diff --git a/.gitignore b/.gitignore index 97ba6b79834c6d20d9612e6b383fc5c69d196e48..e18bc625d330fb177299dd35b8de8bcc7414ba0e 100644 --- a/.gitignore +++ b/.gitignore @@ -57,6 +57,7 @@ modules.builtin /vmlinuz /System.map /Module.markers +/modules.builtin.modinfo # # RPM spec file (make rpm-pkg) diff --git a/Documentation/ABI/obsolete/sysfs-class-dax b/Documentation/ABI/obsolete/sysfs-class-dax new file mode 100644 index 0000000000000000000000000000000000000000..2cb9fc5e8bd1420e5c82a11109a1867d98a95ba1 --- /dev/null +++ b/Documentation/ABI/obsolete/sysfs-class-dax @@ -0,0 +1,22 @@ +What: /sys/class/dax/ +Date: May, 2016 +KernelVersion: v4.7 +Contact: linux-nvdimm@lists.01.org +Description: Device DAX is the device-centric analogue of Filesystem + DAX (CONFIG_FS_DAX). It allows memory ranges to be + allocated and mapped without need of an intervening file + system. Device DAX is strict, precise and predictable. + Specifically this interface: + + 1/ Guarantees fault granularity with respect to a given + page size (pte, pmd, or pud) set at configuration time. + + 2/ Enforces deterministic behavior by being strict about + what fault scenarios are supported. + + The /sys/class/dax/ interface enumerates all the + device-dax instances in the system. The ABI is + deprecated and will be removed after 2020. It is + replaced with the DAX bus interface /sys/bus/dax/ where + device-dax instances can be found under + /sys/bus/dax/devices/ diff --git a/Documentation/ABI/stable/sysfs-devices-node b/Documentation/ABI/stable/sysfs-devices-node index 3e90e1f3bf0a004edbcd2d5b89eed1d05b7aa784..f7ce68fbd4b9a77d2ba353becedfb025c3152d72 100644 --- a/Documentation/ABI/stable/sysfs-devices-node +++ b/Documentation/ABI/stable/sysfs-devices-node @@ -90,4 +90,89 @@ Date: December 2009 Contact: Lee Schermerhorn Description: The node's huge page size control/query attributes. - See Documentation/admin-guide/mm/hugetlbpage.rst \ No newline at end of file + See Documentation/admin-guide/mm/hugetlbpage.rst + +What: /sys/devices/system/node/nodeX/accessY/ +Date: December 2018 +Contact: Keith Busch +Description: + The node's relationship to other nodes for access class "Y". + +What: /sys/devices/system/node/nodeX/accessY/initiators/ +Date: December 2018 +Contact: Keith Busch +Description: + The directory containing symlinks to memory initiator + nodes that have class "Y" access to this target node's + memory. CPUs and other memory initiators in nodes not in + the list accessing this node's memory may have different + performance. + +What: /sys/devices/system/node/nodeX/accessY/targets/ +Date: December 2018 +Contact: Keith Busch +Description: + The directory containing symlinks to memory targets that + this initiator node has class "Y" access. + +What: /sys/devices/system/node/nodeX/accessY/initiators/read_bandwidth +Date: December 2018 +Contact: Keith Busch +Description: + This node's read bandwidth in MB/s when accessed from + nodes found in this access class's linked initiators. + +What: /sys/devices/system/node/nodeX/accessY/initiators/read_latency +Date: December 2018 +Contact: Keith Busch +Description: + This node's read latency in nanoseconds when accessed + from nodes found in this access class's linked initiators. + +What: /sys/devices/system/node/nodeX/accessY/initiators/write_bandwidth +Date: December 2018 +Contact: Keith Busch +Description: + This node's write bandwidth in MB/s when accessed from + found in this access class's linked initiators. + +What: /sys/devices/system/node/nodeX/accessY/initiators/write_latency +Date: December 2018 +Contact: Keith Busch +Description: + This node's write latency in nanoseconds when access + from nodes found in this class's linked initiators. + +What: /sys/devices/system/node/nodeX/memory_side_cache/indexY/ +Date: December 2018 +Contact: Keith Busch +Description: + The directory containing attributes for the memory-side cache + level 'Y'. + +What: /sys/devices/system/node/nodeX/memory_side_cache/indexY/indexing +Date: December 2018 +Contact: Keith Busch +Description: + The caches associativity indexing: 0 for direct mapped, + non-zero if indexed. + +What: /sys/devices/system/node/nodeX/memory_side_cache/indexY/line_size +Date: December 2018 +Contact: Keith Busch +Description: + The number of bytes accessed from the next cache level on a + cache miss. + +What: /sys/devices/system/node/nodeX/memory_side_cache/indexY/size +Date: December 2018 +Contact: Keith Busch +Description: + The size of this memory side cache in bytes. + +What: /sys/devices/system/node/nodeX/memory_side_cache/indexY/write_policy +Date: December 2018 +Contact: Keith Busch +Description: + The cache write policy: 0 for write-back, 1 for write-through, + other or unknown. diff --git a/Documentation/ABI/testing/evm b/Documentation/ABI/testing/evm index 201d10319fa18b588a42246814b13b67047a7dd3..1df1177df68adf03606dc7289adc796500a62e4e 100644 --- a/Documentation/ABI/testing/evm +++ b/Documentation/ABI/testing/evm @@ -42,8 +42,30 @@ Description: modification of EVM-protected metadata and disable all further modification of policy - Note that once a key has been loaded, it will no longer be - possible to enable metadata modification. + Echoing a value is additive, the new value is added to the + existing initialization flags. + + For example, after:: + + echo 2 >/evm + + another echo can be performed:: + + echo 1 >/evm + + and the resulting value will be 3. + + Note that once an HMAC key has been loaded, it will no longer + be possible to enable metadata modification. Signaling that an + HMAC key has been loaded will clear the corresponding flag. + For example, if the current value is 6 (2 and 4 set):: + + echo 1 >/evm + + will set the new value to 3 (4 cleared). + + Loading an HMAC key is the only way to disable metadata + modification. Until key loading has been signaled EVM can not create or validate the 'security.evm' xattr, but returns diff --git a/Documentation/ABI/testing/sysfs-bus-intel_th-devices-msc b/Documentation/ABI/testing/sysfs-bus-intel_th-devices-msc index b940c5d91cf7a37e32cbf92690cf830712c671de..456cb62b384c2a138d8076dfc3c4fa0325b170db 100644 --- a/Documentation/ABI/testing/sysfs-bus-intel_th-devices-msc +++ b/Documentation/ABI/testing/sysfs-bus-intel_th-devices-msc @@ -12,7 +12,8 @@ Description: (RW) Configure MSC operating mode: - "single", for contiguous buffer mode (high-order alloc); - "multi", for multiblock mode; - "ExI", for DCI handler mode; - - "debug", for debug mode. + - "debug", for debug mode; + - any of the currently loaded buffer sinks. If operating mode changes, existing buffer is deallocated, provided there are no active users and tracing is not enabled, otherwise the write will fail. @@ -30,4 +31,12 @@ Description: (RW) Configure MSC buffer size for "single" or "multi" modes. there are no active users and tracing is not enabled) and then allocates a new one. +What: /sys/bus/intel_th/devices/-msc/win_switch +Date: May 2019 +KernelVersion: 5.2 +Contact: Alexander Shishkin +Description: (RW) Trigger window switch for the MSC's buffer, in + multi-window mode. In "multi" mode, accepts writes of "1", thereby + triggering a window switch for the buffer. Returns an error in any + other operating mode or attempts to write something other than "1". diff --git a/Documentation/ABI/testing/sysfs-class-net-qmi b/Documentation/ABI/testing/sysfs-class-net-qmi index 7122d6264c49d6c02c2c0074e145f19b93fc63dd..c310db4ccbc2eeb2aa7250438fd9f7b308b8fdbd 100644 --- a/Documentation/ABI/testing/sysfs-class-net-qmi +++ b/Documentation/ABI/testing/sysfs-class-net-qmi @@ -29,7 +29,7 @@ Contact: Bjørn Mork Description: Unsigned integer. - Write a number ranging from 1 to 127 to add a qmap mux + Write a number ranging from 1 to 254 to add a qmap mux based network device, supported by recent Qualcomm based modems. @@ -46,5 +46,5 @@ Contact: Bjørn Mork Description: Unsigned integer. - Write a number ranging from 1 to 127 to delete a previously + Write a number ranging from 1 to 254 to delete a previously created qmap mux based network device. diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index 73318225a3681b3473abe87d0fe1bb4c3447e7ed..a96ad30130eb3191ccd2ed50fe12978d87f747c2 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -188,6 +188,12 @@ Description: does not reflect it. Likewise, if one enables a deep state but a lighter state still is disabled, then this has no effect. +What: /sys/devices/system/cpu/cpuX/cpuidle/stateN/default_status +Date: December 2019 +KernelVersion: v5.6 +Contact: Linux power management list +Description: + (RO) The default status of this state, "enabled" or "disabled". What: /sys/devices/system/cpu/cpuX/cpuidle/stateN/residency Date: March 2014 @@ -472,11 +478,17 @@ Description: information about CPUs heterogeneity. cpu_capacity: capacity of cpu#. What: /sys/devices/system/cpu/vulnerabilities + /sys/devices/system/cpu/vulnerabilities/gather_data_sampling + /sys/devices/system/cpu/vulnerabilities/itlb_multihit + /sys/devices/system/cpu/vulnerabilities/l1tf + /sys/devices/system/cpu/vulnerabilities/mds /sys/devices/system/cpu/vulnerabilities/meltdown + /sys/devices/system/cpu/vulnerabilities/mmio_stale_data + /sys/devices/system/cpu/vulnerabilities/spec_store_bypass /sys/devices/system/cpu/vulnerabilities/spectre_v1 /sys/devices/system/cpu/vulnerabilities/spectre_v2 - /sys/devices/system/cpu/vulnerabilities/spec_store_bypass - /sys/devices/system/cpu/vulnerabilities/l1tf + /sys/devices/system/cpu/vulnerabilities/srbds + /sys/devices/system/cpu/vulnerabilities/tsx_async_abort Date: January 2018 Contact: Linux kernel mailing list Description: Information about CPU vulnerabilities @@ -489,8 +501,7 @@ Description: Information about CPU vulnerabilities "Vulnerable" CPU is affected and no mitigation in effect "Mitigation: $M" CPU is affected and mitigation $M is in effect - Details about the l1tf file can be found in - Documentation/admin-guide/l1tf.rst + See also: Documentation/admin-guide/hw-vuln/index.rst What: /sys/devices/system/cpu/smt /sys/devices/system/cpu/smt/active diff --git a/Documentation/PCI/pci-error-recovery.txt b/Documentation/PCI/pci-error-recovery.txt index 688b69121e8294ca81d47689c25345e97d5de720..0b6bb3ef449ee7a8ee45afe9b4be61a2859ac8e3 100644 --- a/Documentation/PCI/pci-error-recovery.txt +++ b/Documentation/PCI/pci-error-recovery.txt @@ -110,7 +110,7 @@ The actual steps taken by a platform to recover from a PCI error event will be platform-dependent, but will follow the general sequence described below. -STEP 0: Error Event: ERR_NONFATAL +STEP 0: Error Event ------------------- A PCI bus error is detected by the PCI hardware. On powerpc, the slot is isolated, in that all I/O is blocked: all reads return 0xffffffff, @@ -228,7 +228,13 @@ proceeds to either STEP3 (Link Reset) or to STEP 5 (Resume Operations). If any driver returned PCI_ERS_RESULT_NEED_RESET, then the platform proceeds to STEP 4 (Slot Reset) -STEP 3: Slot Reset +STEP 3: Link Reset +------------------ +The platform resets the link. This is a PCI-Express specific step +and is done whenever a fatal error has been detected that can be +"solved" by resetting the link. + +STEP 4: Slot Reset ------------------ In response to a return value of PCI_ERS_RESULT_NEED_RESET, the @@ -314,7 +320,7 @@ Failure). >>> However, it probably should. -STEP 4: Resume Operations +STEP 5: Resume Operations ------------------------- The platform will call the resume() callback on all affected device drivers if all drivers on the segment have returned @@ -326,7 +332,7 @@ a result code. At this point, if a new error happens, the platform will restart a new error recovery sequence. -STEP 5: Permanent Failure +STEP 6: Permanent Failure ------------------------- A "permanent failure" has occurred, and the platform cannot recover the device. The platform will call error_detected() with a @@ -349,27 +355,6 @@ errors. See the discussion in powerpc/eeh-pci-error-recovery.txt for additional detail on real-life experience of the causes of software errors. -STEP 0: Error Event: ERR_FATAL -------------------- -PCI bus error is detected by the PCI hardware. On powerpc, the slot is -isolated, in that all I/O is blocked: all reads return 0xffffffff, all -writes are ignored. - -STEP 1: Remove devices --------------------- -Platform removes the devices depending on the error agent, it could be -this port for all subordinates or upstream component (likely downstream -port) - -STEP 2: Reset link --------------------- -The platform resets the link. This is a PCI-Express specific step and is -done whenever a fatal error has been detected that can be "solved" by -resetting the link. - -STEP 3: Re-enumerate the devices --------------------- -Initiates the re-enumeration. Conclusion; General Remarks --------------------------- diff --git a/Documentation/admin-guide/bcache.rst b/Documentation/admin-guide/bcache.rst index c0ce64d75bbf7cfe02fa0a99af136f3ae02ba60c..44ae47ea5f43a5c2cefc6bd11dbe087528896636 100644 --- a/Documentation/admin-guide/bcache.rst +++ b/Documentation/admin-guide/bcache.rst @@ -434,6 +434,10 @@ sequential_cutoff most recent 128 IOs are tracked so sequential IO can be detected even when it isn't all done at once. +read_bypass + If enbale, all IO will bypass the cache. This option could be useful when we + enable userspace prefetch and the cache device is low capacity. + sequential_merge If non zero, bcache keeps a list of the last 128 requests submitted to compare against all new requests to determine which new requests are sequential diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index 184193bcb262ac908f1f5a7a7c2c662dec0ea4b8..9c194bc8637407acf9e3e8a2e7111f1b10634c46 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -1177,6 +1177,10 @@ PAGE_SIZE multiple when read back. Amount of cached filesystem data that was modified and is currently being written back to disk + anon_thp + Amount of memory used in anonymous mappings backed by + transparent hugepages + inactive_anon, active_anon, inactive_file, active_file, unevictable Amount of memory, swap-backed and filesystem-backed, on the internal memory management lists used by the @@ -1236,6 +1240,18 @@ PAGE_SIZE multiple when read back. Amount of reclaimed lazyfree pages + thp_fault_alloc + + Number of transparent hugepages which were allocated to satisfy + a page fault, including COW faults. This counter is not present + when CONFIG_TRANSPARENT_HUGEPAGE is not set. + + thp_collapse_alloc + + Number of transparent hugepages which were allocated to allow + collapsing an existing range of pages. This counter is not + present when CONFIG_TRANSPARENT_HUGEPAGE is not set. + memory.swap.current A read-only single value file which exists on non-root cgroups. diff --git a/Documentation/admin-guide/hw-vuln/cross-thread-rsb.rst b/Documentation/admin-guide/hw-vuln/cross-thread-rsb.rst new file mode 100644 index 0000000000000000000000000000000000000000..875616d675fe6beaaca9529e4123965d61f19ca2 --- /dev/null +++ b/Documentation/admin-guide/hw-vuln/cross-thread-rsb.rst @@ -0,0 +1,91 @@ + +.. SPDX-License-Identifier: GPL-2.0 + +Cross-Thread Return Address Predictions +======================================= + +Certain AMD and Hygon processors are subject to a cross-thread return address +predictions vulnerability. When running in SMT mode and one sibling thread +transitions out of C0 state, the other sibling thread could use return target +predictions from the sibling thread that transitioned out of C0. + +The Spectre v2 mitigations protect the Linux kernel, as it fills the return +address prediction entries with safe targets when context switching to the idle +thread. However, KVM does allow a VMM to prevent exiting guest mode when +transitioning out of C0. This could result in a guest-controlled return target +being consumed by the sibling thread. + +Affected processors +------------------- + +The following CPUs are vulnerable: + + - AMD Family 17h processors + - Hygon Family 18h processors + +Related CVEs +------------ + +The following CVE entry is related to this issue: + + ============== ======================================= + CVE-2022-27672 Cross-Thread Return Address Predictions + ============== ======================================= + +Problem +------- + +Affected SMT-capable processors support 1T and 2T modes of execution when SMT +is enabled. In 2T mode, both threads in a core are executing code. For the +processor core to enter 1T mode, it is required that one of the threads +requests to transition out of the C0 state. This can be communicated with the +HLT instruction or with an MWAIT instruction that requests non-C0. +When the thread re-enters the C0 state, the processor transitions back +to 2T mode, assuming the other thread is also still in C0 state. + +In affected processors, the return address predictor (RAP) is partitioned +depending on the SMT mode. For instance, in 2T mode each thread uses a private +16-entry RAP, but in 1T mode, the active thread uses a 32-entry RAP. Upon +transition between 1T/2T mode, the RAP contents are not modified but the RAP +pointers (which control the next return target to use for predictions) may +change. This behavior may result in return targets from one SMT thread being +used by RET predictions in the sibling thread following a 1T/2T switch. In +particular, a RET instruction executed immediately after a transition to 1T may +use a return target from the thread that just became idle. In theory, this +could lead to information disclosure if the return targets used do not come +from trustworthy code. + +Attack scenarios +---------------- + +An attack can be mounted on affected processors by performing a series of CALL +instructions with targeted return locations and then transitioning out of C0 +state. + +Mitigation mechanism +-------------------- + +Before entering idle state, the kernel context switches to the idle thread. The +context switch fills the RAP entries (referred to as the RSB in Linux) with safe +targets by performing a sequence of CALL instructions. + +Prevent a guest VM from directly putting the processor into an idle state by +intercepting HLT and MWAIT instructions. + +Both mitigations are required to fully address this issue. + +Mitigation control on the kernel command line +--------------------------------------------- + +Use existing Spectre v2 mitigations that will fill the RSB on context switch. + +Mitigation control for KVM - module parameter +--------------------------------------------- + +By default, the KVM hypervisor mitigates this issue by intercepting guest +attempts to transition out of C0. A VMM can use the KVM_CAP_X86_DISABLE_EXITS +capability to override those interceptions, but since this is not common, the +mitigation that covers this path is not enabled by default. + +The mitigation for the KVM_CAP_X86_DISABLE_EXITS capability can be turned on +using the boolean module parameter mitigate_smt_rsb, e.g. ``kvm.mitigate_smt_rsb=1``. diff --git a/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst b/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst new file mode 100644 index 0000000000000000000000000000000000000000..264bfa937f7de126bdc1622420f459a8170cea86 --- /dev/null +++ b/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst @@ -0,0 +1,109 @@ +.. SPDX-License-Identifier: GPL-2.0 + +GDS - Gather Data Sampling +========================== + +Gather Data Sampling is a hardware vulnerability which allows unprivileged +speculative access to data which was previously stored in vector registers. + +Problem +------- +When a gather instruction performs loads from memory, different data elements +are merged into the destination vector register. However, when a gather +instruction that is transiently executed encounters a fault, stale data from +architectural or internal vector registers may get transiently forwarded to the +destination vector register instead. This will allow a malicious attacker to +infer stale data using typical side channel techniques like cache timing +attacks. GDS is a purely sampling-based attack. + +The attacker uses gather instructions to infer the stale vector register data. +The victim does not need to do anything special other than use the vector +registers. The victim does not need to use gather instructions to be +vulnerable. + +Because the buffers are shared between Hyper-Threads cross Hyper-Thread attacks +are possible. + +Attack scenarios +---------------- +Without mitigation, GDS can infer stale data across virtually all +permission boundaries: + + Non-enclaves can infer SGX enclave data + Userspace can infer kernel data + Guests can infer data from hosts + Guest can infer guest from other guests + Users can infer data from other users + +Because of this, it is important to ensure that the mitigation stays enabled in +lower-privilege contexts like guests and when running outside SGX enclaves. + +The hardware enforces the mitigation for SGX. Likewise, VMMs should ensure +that guests are not allowed to disable the GDS mitigation. If a host erred and +allowed this, a guest could theoretically disable GDS mitigation, mount an +attack, and re-enable it. + +Mitigation mechanism +-------------------- +This issue is mitigated in microcode. The microcode defines the following new +bits: + + ================================ === ============================ + IA32_ARCH_CAPABILITIES[GDS_CTRL] R/O Enumerates GDS vulnerability + and mitigation support. + IA32_ARCH_CAPABILITIES[GDS_NO] R/O Processor is not vulnerable. + IA32_MCU_OPT_CTRL[GDS_MITG_DIS] R/W Disables the mitigation + 0 by default. + IA32_MCU_OPT_CTRL[GDS_MITG_LOCK] R/W Locks GDS_MITG_DIS=0. Writes + to GDS_MITG_DIS are ignored + Can't be cleared once set. + ================================ === ============================ + +GDS can also be mitigated on systems that don't have updated microcode by +disabling AVX. This can be done by setting gather_data_sampling="force" or +"clearcpuid=avx" on the kernel command-line. + +If used, these options will disable AVX use by turning off XSAVE YMM support. +However, the processor will still enumerate AVX support. Userspace that +does not follow proper AVX enumeration to check both AVX *and* XSAVE YMM +support will break. + +Mitigation control on the kernel command line +--------------------------------------------- +The mitigation can be disabled by setting "gather_data_sampling=off" or +"mitigations=off" on the kernel command line. Not specifying either will default +to the mitigation being enabled. Specifying "gather_data_sampling=force" will +use the microcode mitigation when available or disable AVX on affected systems +where the microcode hasn't been updated to include the mitigation. + +GDS System Information +------------------------ +The kernel provides vulnerability status information through sysfs. For +GDS this can be accessed by the following sysfs file: + +/sys/devices/system/cpu/vulnerabilities/gather_data_sampling + +The possible values contained in this file are: + + ============================== ============================================= + Not affected Processor not vulnerable. + Vulnerable Processor vulnerable and mitigation disabled. + Vulnerable: No microcode Processor vulnerable and microcode is missing + mitigation. + Mitigation: AVX disabled, + no microcode Processor is vulnerable and microcode is missing + mitigation. AVX disabled as mitigation. + Mitigation: Microcode Processor is vulnerable and mitigation is in + effect. + Mitigation: Microcode (locked) Processor is vulnerable and mitigation is in + effect and cannot be disabled. + Unknown: Dependent on + hypervisor status Running on a virtual guest processor that is + affected but with no way to know if host + processor is mitigated or vulnerable. + ============================== ============================================= + +GDS Default mitigation +---------------------- +The updated microcode will enable the mitigation by default. The kernel's +default action is to leave the mitigation enabled. diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..ec29d36ffa6565d022ddb6c55b1a3e5e9f36b04f --- /dev/null +++ b/Documentation/admin-guide/hw-vuln/index.rst @@ -0,0 +1,20 @@ +======================== +Hardware vulnerabilities +======================== + +This section describes CPU vulnerabilities and provides an overview of the +possible mitigations along with guidance for selecting mitigations if they +are configurable at compile, boot or run time. + +.. toctree:: + :maxdepth: 1 + + spectre + l1tf + mds + tsx_async_abort + multihit.rst + special-register-buffer-data-sampling.rst + processor_mmio_stale_data.rst + cross-thread-rsb.rst + gather_data_sampling.rst diff --git a/Documentation/admin-guide/l1tf.rst b/Documentation/admin-guide/hw-vuln/l1tf.rst similarity index 99% rename from Documentation/admin-guide/l1tf.rst rename to Documentation/admin-guide/hw-vuln/l1tf.rst index bae52b845de0b93af644ea55103d5a912dfca753..31653a9f0e1b3496c468e5e7c82ffccaf0420018 100644 --- a/Documentation/admin-guide/l1tf.rst +++ b/Documentation/admin-guide/hw-vuln/l1tf.rst @@ -405,6 +405,9 @@ time with the option "l1tf=". The valid arguments for this option are: off Disables hypervisor mitigations and doesn't emit any warnings. + It also drops the swap size and available RAM limit restrictions + on both hypervisor and bare metal. + ============ ============================================================= The default is 'flush'. For details about L1D flushing see :ref:`l1d_flush`. @@ -442,6 +445,7 @@ The default is 'cond'. If 'l1tf=full,force' is given on the kernel command line, then 'always' is enforced and the kvm-intel.vmentry_l1d_flush module parameter is ignored and writes to the sysfs file are rejected. +.. _mitigation_selection: Mitigation selection guide -------------------------- @@ -553,7 +557,7 @@ When nested virtualization is in use, three operating systems are involved: the bare metal hypervisor, the nested hypervisor and the nested virtual machine. VMENTER operations from the nested hypervisor into the nested guest will always be processed by the bare metal hypervisor. If KVM is the -bare metal hypervisor it wiil: +bare metal hypervisor it will: - Flush the L1D cache on every switch from the nested hypervisor to the nested virtual machine, so that the nested hypervisor's secrets are not @@ -576,7 +580,8 @@ Default mitigations The kernel default mitigations for vulnerable processors are: - PTE inversion to protect against malicious user space. This is done - unconditionally and cannot be controlled. + unconditionally and cannot be controlled. The swap storage is limited + to ~16TB. - L1D conditional flushing on VMENTER when EPT is enabled for a guest. diff --git a/Documentation/admin-guide/hw-vuln/mds.rst b/Documentation/admin-guide/hw-vuln/mds.rst new file mode 100644 index 0000000000000000000000000000000000000000..2d19c9f4c1fec13752884021be630bd0423708c5 --- /dev/null +++ b/Documentation/admin-guide/hw-vuln/mds.rst @@ -0,0 +1,311 @@ +MDS - Microarchitectural Data Sampling +====================================== + +Microarchitectural Data Sampling is a hardware vulnerability which allows +unprivileged speculative access to data which is available in various CPU +internal buffers. + +Affected processors +------------------- + +This vulnerability affects a wide range of Intel processors. The +vulnerability is not present on: + + - Processors from AMD, Centaur and other non Intel vendors + + - Older processor models, where the CPU family is < 6 + + - Some Atoms (Bonnell, Saltwell, Goldmont, GoldmontPlus) + + - Intel processors which have the ARCH_CAP_MDS_NO bit set in the + IA32_ARCH_CAPABILITIES MSR. + +Whether a processor is affected or not can be read out from the MDS +vulnerability file in sysfs. See :ref:`mds_sys_info`. + +Not all processors are affected by all variants of MDS, but the mitigation +is identical for all of them so the kernel treats them as a single +vulnerability. + +Related CVEs +------------ + +The following CVE entries are related to the MDS vulnerability: + + ============== ===== =================================================== + CVE-2018-12126 MSBDS Microarchitectural Store Buffer Data Sampling + CVE-2018-12130 MFBDS Microarchitectural Fill Buffer Data Sampling + CVE-2018-12127 MLPDS Microarchitectural Load Port Data Sampling + CVE-2019-11091 MDSUM Microarchitectural Data Sampling Uncacheable Memory + ============== ===== =================================================== + +Problem +------- + +When performing store, load, L1 refill operations, processors write data +into temporary microarchitectural structures (buffers). The data in the +buffer can be forwarded to load operations as an optimization. + +Under certain conditions, usually a fault/assist caused by a load +operation, data unrelated to the load memory address can be speculatively +forwarded from the buffers. Because the load operation causes a fault or +assist and its result will be discarded, the forwarded data will not cause +incorrect program execution or state changes. But a malicious operation +may be able to forward this speculative data to a disclosure gadget which +allows in turn to infer the value via a cache side channel attack. + +Because the buffers are potentially shared between Hyper-Threads cross +Hyper-Thread attacks are possible. + +Deeper technical information is available in the MDS specific x86 +architecture section: :ref:`Documentation/x86/mds.rst `. + + +Attack scenarios +---------------- + +Attacks against the MDS vulnerabilities can be mounted from malicious non +priviledged user space applications running on hosts or guest. Malicious +guest OSes can obviously mount attacks as well. + +Contrary to other speculation based vulnerabilities the MDS vulnerability +does not allow the attacker to control the memory target address. As a +consequence the attacks are purely sampling based, but as demonstrated with +the TLBleed attack samples can be postprocessed successfully. + +Web-Browsers +^^^^^^^^^^^^ + + It's unclear whether attacks through Web-Browsers are possible at + all. The exploitation through Java-Script is considered very unlikely, + but other widely used web technologies like Webassembly could possibly be + abused. + + +.. _mds_sys_info: + +MDS system information +----------------------- + +The Linux kernel provides a sysfs interface to enumerate the current MDS +status of the system: whether the system is vulnerable, and which +mitigations are active. The relevant sysfs file is: + +/sys/devices/system/cpu/vulnerabilities/mds + +The possible values in this file are: + + .. list-table:: + + * - 'Not affected' + - The processor is not vulnerable + * - 'Vulnerable' + - The processor is vulnerable, but no mitigation enabled + * - 'Vulnerable: Clear CPU buffers attempted, no microcode' + - The processor is vulnerable but microcode is not updated. + + The mitigation is enabled on a best effort basis. See :ref:`vmwerv` + * - 'Mitigation: Clear CPU buffers' + - The processor is vulnerable and the CPU buffer clearing mitigation is + enabled. + +If the processor is vulnerable then the following information is appended +to the above information: + + ======================== ============================================ + 'SMT vulnerable' SMT is enabled + 'SMT mitigated' SMT is enabled and mitigated + 'SMT disabled' SMT is disabled + 'SMT Host state unknown' Kernel runs in a VM, Host SMT state unknown + ======================== ============================================ + +.. _vmwerv: + +Best effort mitigation mode +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + If the processor is vulnerable, but the availability of the microcode based + mitigation mechanism is not advertised via CPUID the kernel selects a best + effort mitigation mode. This mode invokes the mitigation instructions + without a guarantee that they clear the CPU buffers. + + This is done to address virtualization scenarios where the host has the + microcode update applied, but the hypervisor is not yet updated to expose + the CPUID to the guest. If the host has updated microcode the protection + takes effect otherwise a few cpu cycles are wasted pointlessly. + + The state in the mds sysfs file reflects this situation accordingly. + + +Mitigation mechanism +------------------------- + +The kernel detects the affected CPUs and the presence of the microcode +which is required. + +If a CPU is affected and the microcode is available, then the kernel +enables the mitigation by default. The mitigation can be controlled at boot +time via a kernel command line option. See +:ref:`mds_mitigation_control_command_line`. + +.. _cpu_buffer_clear: + +CPU buffer clearing +^^^^^^^^^^^^^^^^^^^ + + The mitigation for MDS clears the affected CPU buffers on return to user + space and when entering a guest. + + If SMT is enabled it also clears the buffers on idle entry when the CPU + is only affected by MSBDS and not any other MDS variant, because the + other variants cannot be protected against cross Hyper-Thread attacks. + + For CPUs which are only affected by MSBDS the user space, guest and idle + transition mitigations are sufficient and SMT is not affected. + +.. _virt_mechanism: + +Virtualization mitigation +^^^^^^^^^^^^^^^^^^^^^^^^^ + + The protection for host to guest transition depends on the L1TF + vulnerability of the CPU: + + - CPU is affected by L1TF: + + If the L1D flush mitigation is enabled and up to date microcode is + available, the L1D flush mitigation is automatically protecting the + guest transition. + + If the L1D flush mitigation is disabled then the MDS mitigation is + invoked explicit when the host MDS mitigation is enabled. + + For details on L1TF and virtualization see: + :ref:`Documentation/admin-guide/hw-vuln//l1tf.rst `. + + - CPU is not affected by L1TF: + + CPU buffers are flushed before entering the guest when the host MDS + mitigation is enabled. + + The resulting MDS protection matrix for the host to guest transition: + + ============ ===== ============= ============ ================= + L1TF MDS VMX-L1FLUSH Host MDS MDS-State + + Don't care No Don't care N/A Not affected + + Yes Yes Disabled Off Vulnerable + + Yes Yes Disabled Full Mitigated + + Yes Yes Enabled Don't care Mitigated + + No Yes N/A Off Vulnerable + + No Yes N/A Full Mitigated + ============ ===== ============= ============ ================= + + This only covers the host to guest transition, i.e. prevents leakage from + host to guest, but does not protect the guest internally. Guests need to + have their own protections. + +.. _xeon_phi: + +XEON PHI specific considerations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + The XEON PHI processor family is affected by MSBDS which can be exploited + cross Hyper-Threads when entering idle states. Some XEON PHI variants allow + to use MWAIT in user space (Ring 3) which opens an potential attack vector + for malicious user space. The exposure can be disabled on the kernel + command line with the 'ring3mwait=disable' command line option. + + XEON PHI is not affected by the other MDS variants and MSBDS is mitigated + before the CPU enters a idle state. As XEON PHI is not affected by L1TF + either disabling SMT is not required for full protection. + +.. _mds_smt_control: + +SMT control +^^^^^^^^^^^ + + All MDS variants except MSBDS can be attacked cross Hyper-Threads. That + means on CPUs which are affected by MFBDS or MLPDS it is necessary to + disable SMT for full protection. These are most of the affected CPUs; the + exception is XEON PHI, see :ref:`xeon_phi`. + + Disabling SMT can have a significant performance impact, but the impact + depends on the type of workloads. + + See the relevant chapter in the L1TF mitigation documentation for details: + :ref:`Documentation/admin-guide/hw-vuln/l1tf.rst `. + + +.. _mds_mitigation_control_command_line: + +Mitigation control on the kernel command line +--------------------------------------------- + +The kernel command line allows to control the MDS mitigations at boot +time with the option "mds=". The valid arguments for this option are: + + ============ ============================================================= + full If the CPU is vulnerable, enable all available mitigations + for the MDS vulnerability, CPU buffer clearing on exit to + userspace and when entering a VM. Idle transitions are + protected as well if SMT is enabled. + + It does not automatically disable SMT. + + full,nosmt The same as mds=full, with SMT disabled on vulnerable + CPUs. This is the complete mitigation. + + off Disables MDS mitigations completely. + + ============ ============================================================= + +Not specifying this option is equivalent to "mds=full". For processors +that are affected by both TAA (TSX Asynchronous Abort) and MDS, +specifying just "mds=off" without an accompanying "tsx_async_abort=off" +will have no effect as the same mitigation is used for both +vulnerabilities. + +Mitigation selection guide +-------------------------- + +1. Trusted userspace +^^^^^^^^^^^^^^^^^^^^ + + If all userspace applications are from a trusted source and do not + execute untrusted code which is supplied externally, then the mitigation + can be disabled. + + +2. Virtualization with trusted guests +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + The same considerations as above versus trusted user space apply. + +3. Virtualization with untrusted guests +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + The protection depends on the state of the L1TF mitigations. + See :ref:`virt_mechanism`. + + If the MDS mitigation is enabled and SMT is disabled, guest to host and + guest to guest attacks are prevented. + +.. _mds_default_mitigations: + +Default mitigations +------------------- + + The kernel default mitigations for vulnerable processors are: + + - Enable CPU buffer clearing + + The kernel does not by default enforce the disabling of SMT, which leaves + SMT systems vulnerable when running untrusted code. The same rationale as + for L1TF applies. + See :ref:`Documentation/admin-guide/hw-vuln//l1tf.rst `. diff --git a/Documentation/admin-guide/hw-vuln/multihit.rst b/Documentation/admin-guide/hw-vuln/multihit.rst new file mode 100644 index 0000000000000000000000000000000000000000..ba9988d8bce500af602492ee571b35d24b9678b3 --- /dev/null +++ b/Documentation/admin-guide/hw-vuln/multihit.rst @@ -0,0 +1,163 @@ +iTLB multihit +============= + +iTLB multihit is an erratum where some processors may incur a machine check +error, possibly resulting in an unrecoverable CPU lockup, when an +instruction fetch hits multiple entries in the instruction TLB. This can +occur when the page size is changed along with either the physical address +or cache type. A malicious guest running on a virtualized system can +exploit this erratum to perform a denial of service attack. + + +Affected processors +------------------- + +Variations of this erratum are present on most Intel Core and Xeon processor +models. The erratum is not present on: + + - non-Intel processors + + - Some Atoms (Airmont, Bonnell, Goldmont, GoldmontPlus, Saltwell, Silvermont) + + - Intel processors that have the PSCHANGE_MC_NO bit set in the + IA32_ARCH_CAPABILITIES MSR. + + +Related CVEs +------------ + +The following CVE entry is related to this issue: + + ============== ================================================= + CVE-2018-12207 Machine Check Error Avoidance on Page Size Change + ============== ================================================= + + +Problem +------- + +Privileged software, including OS and virtual machine managers (VMM), are in +charge of memory management. A key component in memory management is the control +of the page tables. Modern processors use virtual memory, a technique that creates +the illusion of a very large memory for processors. This virtual space is split +into pages of a given size. Page tables translate virtual addresses to physical +addresses. + +To reduce latency when performing a virtual to physical address translation, +processors include a structure, called TLB, that caches recent translations. +There are separate TLBs for instruction (iTLB) and data (dTLB). + +Under this errata, instructions are fetched from a linear address translated +using a 4 KB translation cached in the iTLB. Privileged software modifies the +paging structure so that the same linear address using large page size (2 MB, 4 +MB, 1 GB) with a different physical address or memory type. After the page +structure modification but before the software invalidates any iTLB entries for +the linear address, a code fetch that happens on the same linear address may +cause a machine-check error which can result in a system hang or shutdown. + + +Attack scenarios +---------------- + +Attacks against the iTLB multihit erratum can be mounted from malicious +guests in a virtualized system. + + +iTLB multihit system information +-------------------------------- + +The Linux kernel provides a sysfs interface to enumerate the current iTLB +multihit status of the system:whether the system is vulnerable and which +mitigations are active. The relevant sysfs file is: + +/sys/devices/system/cpu/vulnerabilities/itlb_multihit + +The possible values in this file are: + +.. list-table:: + + * - Not affected + - The processor is not vulnerable. + * - KVM: Mitigation: Split huge pages + - Software changes mitigate this issue. + * - KVM: Vulnerable + - The processor is vulnerable, but no mitigation enabled + + +Enumeration of the erratum +-------------------------------- + +A new bit has been allocated in the IA32_ARCH_CAPABILITIES (PSCHANGE_MC_NO) msr +and will be set on CPU's which are mitigated against this issue. + + ======================================= =========== =============================== + IA32_ARCH_CAPABILITIES MSR Not present Possibly vulnerable,check model + IA32_ARCH_CAPABILITIES[PSCHANGE_MC_NO] '0' Likely vulnerable,check model + IA32_ARCH_CAPABILITIES[PSCHANGE_MC_NO] '1' Not vulnerable + ======================================= =========== =============================== + + +Mitigation mechanism +------------------------- + +This erratum can be mitigated by restricting the use of large page sizes to +non-executable pages. This forces all iTLB entries to be 4K, and removes +the possibility of multiple hits. + +In order to mitigate the vulnerability, KVM initially marks all huge pages +as non-executable. If the guest attempts to execute in one of those pages, +the page is broken down into 4K pages, which are then marked executable. + +If EPT is disabled or not available on the host, KVM is in control of TLB +flushes and the problematic situation cannot happen. However, the shadow +EPT paging mechanism used by nested virtualization is vulnerable, because +the nested guest can trigger multiple iTLB hits by modifying its own +(non-nested) page tables. For simplicity, KVM will make large pages +non-executable in all shadow paging modes. + +Mitigation control on the kernel command line and KVM - module parameter +------------------------------------------------------------------------ + +The KVM hypervisor mitigation mechanism for marking huge pages as +non-executable can be controlled with a module parameter "nx_huge_pages=". +The kernel command line allows to control the iTLB multihit mitigations at +boot time with the option "kvm.nx_huge_pages=". + +The valid arguments for these options are: + + ========== ================================================================ + force Mitigation is enabled. In this case, the mitigation implements + non-executable huge pages in Linux kernel KVM module. All huge + pages in the EPT are marked as non-executable. + If a guest attempts to execute in one of those pages, the page is + broken down into 4K pages, which are then marked executable. + + off Mitigation is disabled. + + auto Enable mitigation only if the platform is affected and the kernel + was not booted with the "mitigations=off" command line parameter. + This is the default option. + ========== ================================================================ + + +Mitigation selection guide +-------------------------- + +1. No virtualization in use +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + The system is protected by the kernel unconditionally and no further + action is required. + +2. Virtualization with trusted guests +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + If the guest comes from a trusted source, you may assume that the guest will + not attempt to maliciously exploit these errata and no further action is + required. + +3. Virtualization with untrusted guests +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + If the guest comes from an untrusted source, the guest host kernel will need + to apply iTLB multihit mitigation via the kernel command line or kvm + module parameter. diff --git a/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst new file mode 100644 index 0000000000000000000000000000000000000000..9393c50b5afc9c9fe8b9ac90ed9fe4774e1d1550 --- /dev/null +++ b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst @@ -0,0 +1,246 @@ +========================================= +Processor MMIO Stale Data Vulnerabilities +========================================= + +Processor MMIO Stale Data Vulnerabilities are a class of memory-mapped I/O +(MMIO) vulnerabilities that can expose data. The sequences of operations for +exposing data range from simple to very complex. Because most of the +vulnerabilities require the attacker to have access to MMIO, many environments +are not affected. System environments using virtualization where MMIO access is +provided to untrusted guests may need mitigation. These vulnerabilities are +not transient execution attacks. However, these vulnerabilities may propagate +stale data into core fill buffers where the data can subsequently be inferred +by an unmitigated transient execution attack. Mitigation for these +vulnerabilities includes a combination of microcode update and software +changes, depending on the platform and usage model. Some of these mitigations +are similar to those used to mitigate Microarchitectural Data Sampling (MDS) or +those used to mitigate Special Register Buffer Data Sampling (SRBDS). + +Data Propagators +================ +Propagators are operations that result in stale data being copied or moved from +one microarchitectural buffer or register to another. Processor MMIO Stale Data +Vulnerabilities are operations that may result in stale data being directly +read into an architectural, software-visible state or sampled from a buffer or +register. + +Fill Buffer Stale Data Propagator (FBSDP) +----------------------------------------- +Stale data may propagate from fill buffers (FB) into the non-coherent portion +of the uncore on some non-coherent writes. Fill buffer propagation by itself +does not make stale data architecturally visible. Stale data must be propagated +to a location where it is subject to reading or sampling. + +Sideband Stale Data Propagator (SSDP) +------------------------------------- +The sideband stale data propagator (SSDP) is limited to the client (including +Intel Xeon server E3) uncore implementation. The sideband response buffer is +shared by all client cores. For non-coherent reads that go to sideband +destinations, the uncore logic returns 64 bytes of data to the core, including +both requested data and unrequested stale data, from a transaction buffer and +the sideband response buffer. As a result, stale data from the sideband +response and transaction buffers may now reside in a core fill buffer. + +Primary Stale Data Propagator (PSDP) +------------------------------------ +The primary stale data propagator (PSDP) is limited to the client (including +Intel Xeon server E3) uncore implementation. Similar to the sideband response +buffer, the primary response buffer is shared by all client cores. For some +processors, MMIO primary reads will return 64 bytes of data to the core fill +buffer including both requested data and unrequested stale data. This is +similar to the sideband stale data propagator. + +Vulnerabilities +=============== +Device Register Partial Write (DRPW) (CVE-2022-21166) +----------------------------------------------------- +Some endpoint MMIO registers incorrectly handle writes that are smaller than +the register size. Instead of aborting the write or only copying the correct +subset of bytes (for example, 2 bytes for a 2-byte write), more bytes than +specified by the write transaction may be written to the register. On +processors affected by FBSDP, this may expose stale data from the fill buffers +of the core that created the write transaction. + +Shared Buffers Data Sampling (SBDS) (CVE-2022-21125) +---------------------------------------------------- +After propagators may have moved data around the uncore and copied stale data +into client core fill buffers, processors affected by MFBDS can leak data from +the fill buffer. It is limited to the client (including Intel Xeon server E3) +uncore implementation. + +Shared Buffers Data Read (SBDR) (CVE-2022-21123) +------------------------------------------------ +It is similar to Shared Buffer Data Sampling (SBDS) except that the data is +directly read into the architectural software-visible state. It is limited to +the client (including Intel Xeon server E3) uncore implementation. + +Affected Processors +=================== +Not all the CPUs are affected by all the variants. For instance, most +processors for the server market (excluding Intel Xeon E3 processors) are +impacted by only Device Register Partial Write (DRPW). + +Below is the list of affected Intel processors [#f1]_: + + =================== ============ ========= + Common name Family_Model Steppings + =================== ============ ========= + HASWELL_X 06_3FH 2,4 + SKYLAKE_L 06_4EH 3 + BROADWELL_X 06_4FH All + SKYLAKE_X 06_55H 3,4,6,7,11 + BROADWELL_D 06_56H 3,4,5 + SKYLAKE 06_5EH 3 + ICELAKE_X 06_6AH 4,5,6 + ICELAKE_D 06_6CH 1 + ICELAKE_L 06_7EH 5 + ATOM_TREMONT_D 06_86H All + LAKEFIELD 06_8AH 1 + KABYLAKE_L 06_8EH 9 to 12 + ATOM_TREMONT 06_96H 1 + ATOM_TREMONT_L 06_9CH 0 + KABYLAKE 06_9EH 9 to 13 + COMETLAKE 06_A5H 2,3,5 + COMETLAKE_L 06_A6H 0,1 + ROCKETLAKE 06_A7H 1 + =================== ============ ========= + +If a CPU is in the affected processor list, but not affected by a variant, it +is indicated by new bits in MSR IA32_ARCH_CAPABILITIES. As described in a later +section, mitigation largely remains the same for all the variants, i.e. to +clear the CPU fill buffers via VERW instruction. + +New bits in MSRs +================ +Newer processors and microcode update on existing affected processors added new +bits to IA32_ARCH_CAPABILITIES MSR. These bits can be used to enumerate +specific variants of Processor MMIO Stale Data vulnerabilities and mitigation +capability. + +MSR IA32_ARCH_CAPABILITIES +-------------------------- +Bit 13 - SBDR_SSDP_NO - When set, processor is not affected by either the + Shared Buffers Data Read (SBDR) vulnerability or the sideband stale + data propagator (SSDP). +Bit 14 - FBSDP_NO - When set, processor is not affected by the Fill Buffer + Stale Data Propagator (FBSDP). +Bit 15 - PSDP_NO - When set, processor is not affected by Primary Stale Data + Propagator (PSDP). +Bit 17 - FB_CLEAR - When set, VERW instruction will overwrite CPU fill buffer + values as part of MD_CLEAR operations. Processors that do not + enumerate MDS_NO (meaning they are affected by MDS) but that do + enumerate support for both L1D_FLUSH and MD_CLEAR implicitly enumerate + FB_CLEAR as part of their MD_CLEAR support. +Bit 18 - FB_CLEAR_CTRL - Processor supports read and write to MSR + IA32_MCU_OPT_CTRL[FB_CLEAR_DIS]. On such processors, the FB_CLEAR_DIS + bit can be set to cause the VERW instruction to not perform the + FB_CLEAR action. Not all processors that support FB_CLEAR will support + FB_CLEAR_CTRL. + +MSR IA32_MCU_OPT_CTRL +--------------------- +Bit 3 - FB_CLEAR_DIS - When set, VERW instruction does not perform the FB_CLEAR +action. This may be useful to reduce the performance impact of FB_CLEAR in +cases where system software deems it warranted (for example, when performance +is more critical, or the untrusted software has no MMIO access). Note that +FB_CLEAR_DIS has no impact on enumeration (for example, it does not change +FB_CLEAR or MD_CLEAR enumeration) and it may not be supported on all processors +that enumerate FB_CLEAR. + +Mitigation +========== +Like MDS, all variants of Processor MMIO Stale Data vulnerabilities have the +same mitigation strategy to force the CPU to clear the affected buffers before +an attacker can extract the secrets. + +This is achieved by using the otherwise unused and obsolete VERW instruction in +combination with a microcode update. The microcode clears the affected CPU +buffers when the VERW instruction is executed. + +Kernel reuses the MDS function to invoke the buffer clearing: + + mds_clear_cpu_buffers() + +On MDS affected CPUs, the kernel already invokes CPU buffer clear on +kernel/userspace, hypervisor/guest and C-state (idle) transitions. No +additional mitigation is needed on such CPUs. + +For CPUs not affected by MDS or TAA, mitigation is needed only for the attacker +with MMIO capability. Therefore, VERW is not required for kernel/userspace. For +virtualization case, VERW is only needed at VMENTER for a guest with MMIO +capability. + +Mitigation points +----------------- +Return to user space +^^^^^^^^^^^^^^^^^^^^ +Same mitigation as MDS when affected by MDS/TAA, otherwise no mitigation +needed. + +C-State transition +^^^^^^^^^^^^^^^^^^ +Control register writes by CPU during C-state transition can propagate data +from fill buffer to uncore buffers. Execute VERW before C-state transition to +clear CPU fill buffers. + +Guest entry point +^^^^^^^^^^^^^^^^^ +Same mitigation as MDS when processor is also affected by MDS/TAA, otherwise +execute VERW at VMENTER only for MMIO capable guests. On CPUs not affected by +MDS/TAA, guest without MMIO access cannot extract secrets using Processor MMIO +Stale Data vulnerabilities, so there is no need to execute VERW for such guests. + +Mitigation control on the kernel command line +--------------------------------------------- +The kernel command line allows to control the Processor MMIO Stale Data +mitigations at boot time with the option "mmio_stale_data=". The valid +arguments for this option are: + + ========== ================================================================= + full If the CPU is vulnerable, enable mitigation; CPU buffer clearing + on exit to userspace and when entering a VM. Idle transitions are + protected as well. It does not automatically disable SMT. + full,nosmt Same as full, with SMT disabled on vulnerable CPUs. This is the + complete mitigation. + off Disables mitigation completely. + ========== ================================================================= + +If the CPU is affected and mmio_stale_data=off is not supplied on the kernel +command line, then the kernel selects the appropriate mitigation. + +Mitigation status information +----------------------------- +The Linux kernel provides a sysfs interface to enumerate the current +vulnerability status of the system: whether the system is vulnerable, and +which mitigations are active. The relevant sysfs file is: + + /sys/devices/system/cpu/vulnerabilities/mmio_stale_data + +The possible values in this file are: + + .. list-table:: + + * - 'Not affected' + - The processor is not vulnerable + * - 'Vulnerable' + - The processor is vulnerable, but no mitigation enabled + * - 'Vulnerable: Clear CPU buffers attempted, no microcode' + - The processor is vulnerable, but microcode is not updated. The + mitigation is enabled on a best effort basis. + * - 'Mitigation: Clear CPU buffers' + - The processor is vulnerable and the CPU buffer clearing mitigation is + enabled. + +If the processor is vulnerable then the following information is appended to +the above information: + + ======================== =========================================== + 'SMT vulnerable' SMT is enabled + 'SMT disabled' SMT is disabled + 'SMT Host state unknown' Kernel runs in a VM, Host SMT state unknown + ======================== =========================================== + +References +---------- +.. [#f1] Affected Processors + https://www.intel.com/content/www/us/en/developer/topic-technology/software-security-guidance/processors-affected-consolidated-product-cpu-model.html diff --git a/Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst b/Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst new file mode 100644 index 0000000000000000000000000000000000000000..47b1b3afac994beb278d4923e262a1da7bad5a23 --- /dev/null +++ b/Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst @@ -0,0 +1,149 @@ +.. SPDX-License-Identifier: GPL-2.0 + +SRBDS - Special Register Buffer Data Sampling +============================================= + +SRBDS is a hardware vulnerability that allows MDS :doc:`mds` techniques to +infer values returned from special register accesses. Special register +accesses are accesses to off core registers. According to Intel's evaluation, +the special register reads that have a security expectation of privacy are +RDRAND, RDSEED and SGX EGETKEY. + +When RDRAND, RDSEED and EGETKEY instructions are used, the data is moved +to the core through the special register mechanism that is susceptible +to MDS attacks. + +Affected processors +-------------------- +Core models (desktop, mobile, Xeon-E3) that implement RDRAND and/or RDSEED may +be affected. + +A processor is affected by SRBDS if its Family_Model and stepping is +in the following list, with the exception of the listed processors +exporting MDS_NO while Intel TSX is available yet not enabled. The +latter class of processors are only affected when Intel TSX is enabled +by software using TSX_CTRL_MSR otherwise they are not affected. + + ============= ============ ======== + common name Family_Model Stepping + ============= ============ ======== + IvyBridge 06_3AH All + + Haswell 06_3CH All + Haswell_L 06_45H All + Haswell_G 06_46H All + + Broadwell_G 06_47H All + Broadwell 06_3DH All + + Skylake_L 06_4EH All + Skylake 06_5EH All + + Kabylake_L 06_8EH <= 0xC + Kabylake 06_9EH <= 0xD + ============= ============ ======== + +Related CVEs +------------ + +The following CVE entry is related to this SRBDS issue: + + ============== ===== ===================================== + CVE-2020-0543 SRBDS Special Register Buffer Data Sampling + ============== ===== ===================================== + +Attack scenarios +---------------- +An unprivileged user can extract values returned from RDRAND and RDSEED +executed on another core or sibling thread using MDS techniques. + + +Mitigation mechanism +------------------- +Intel will release microcode updates that modify the RDRAND, RDSEED, and +EGETKEY instructions to overwrite secret special register data in the shared +staging buffer before the secret data can be accessed by another logical +processor. + +During execution of the RDRAND, RDSEED, or EGETKEY instructions, off-core +accesses from other logical processors will be delayed until the special +register read is complete and the secret data in the shared staging buffer is +overwritten. + +This has three effects on performance: + +#. RDRAND, RDSEED, or EGETKEY instructions have higher latency. + +#. Executing RDRAND at the same time on multiple logical processors will be + serialized, resulting in an overall reduction in the maximum RDRAND + bandwidth. + +#. Executing RDRAND, RDSEED or EGETKEY will delay memory accesses from other + logical processors that miss their core caches, with an impact similar to + legacy locked cache-line-split accesses. + +The microcode updates provide an opt-out mechanism (RNGDS_MITG_DIS) to disable +the mitigation for RDRAND and RDSEED instructions executed outside of Intel +Software Guard Extensions (Intel SGX) enclaves. On logical processors that +disable the mitigation using this opt-out mechanism, RDRAND and RDSEED do not +take longer to execute and do not impact performance of sibling logical +processors memory accesses. The opt-out mechanism does not affect Intel SGX +enclaves (including execution of RDRAND or RDSEED inside an enclave, as well +as EGETKEY execution). + +IA32_MCU_OPT_CTRL MSR Definition +-------------------------------- +Along with the mitigation for this issue, Intel added a new thread-scope +IA32_MCU_OPT_CTRL MSR, (address 0x123). The presence of this MSR and +RNGDS_MITG_DIS (bit 0) is enumerated by CPUID.(EAX=07H,ECX=0).EDX[SRBDS_CTRL = +9]==1. This MSR is introduced through the microcode update. + +Setting IA32_MCU_OPT_CTRL[0] (RNGDS_MITG_DIS) to 1 for a logical processor +disables the mitigation for RDRAND and RDSEED executed outside of an Intel SGX +enclave on that logical processor. Opting out of the mitigation for a +particular logical processor does not affect the RDRAND and RDSEED mitigations +for other logical processors. + +Note that inside of an Intel SGX enclave, the mitigation is applied regardless +of the value of RNGDS_MITG_DS. + +Mitigation control on the kernel command line +--------------------------------------------- +The kernel command line allows control over the SRBDS mitigation at boot time +with the option "srbds=". The option for this is: + + ============= ============================================================= + off This option disables SRBDS mitigation for RDRAND and RDSEED on + affected platforms. + ============= ============================================================= + +SRBDS System Information +----------------------- +The Linux kernel provides vulnerability status information through sysfs. For +SRBDS this can be accessed by the following sysfs file: +/sys/devices/system/cpu/vulnerabilities/srbds + +The possible values contained in this file are: + + ============================== ============================================= + Not affected Processor not vulnerable + Vulnerable Processor vulnerable and mitigation disabled + Vulnerable: No microcode Processor vulnerable and microcode is missing + mitigation + Mitigation: Microcode Processor is vulnerable and mitigation is in + effect. + Mitigation: TSX disabled Processor is only vulnerable when TSX is + enabled while this system was booted with TSX + disabled. + Unknown: Dependent on + hypervisor status Running on virtual guest processor that is + affected but with no way to know if host + processor is mitigated or vulnerable. + ============================== ============================================= + +SRBDS Default mitigation +------------------------ +This new microcode serializes processor access during execution of RDRAND, +RDSEED ensures that the shared buffer is overwritten before it is released for +reuse. Use the "srbds=off" kernel command line to disable the mitigation for +RDRAND and RDSEED. diff --git a/Documentation/admin-guide/hw-vuln/spectre.rst b/Documentation/admin-guide/hw-vuln/spectre.rst new file mode 100644 index 0000000000000000000000000000000000000000..a4cb79d0dd576d3bbdba013b4403f524abfeb23c --- /dev/null +++ b/Documentation/admin-guide/hw-vuln/spectre.rst @@ -0,0 +1,793 @@ +.. SPDX-License-Identifier: GPL-2.0 + +Spectre Side Channels +===================== + +Spectre is a class of side channel attacks that exploit branch prediction +and speculative execution on modern CPUs to read memory, possibly +bypassing access controls. Speculative execution side channel exploits +do not modify memory but attempt to infer privileged data in the memory. + +This document covers Spectre variant 1 and Spectre variant 2. + +Affected processors +------------------- + +Speculative execution side channel methods affect a wide range of modern +high performance processors, since most modern high speed processors +use branch prediction and speculative execution. + +The following CPUs are vulnerable: + + - Intel Core, Atom, Pentium, and Xeon processors + + - AMD Phenom, EPYC, and Zen processors + + - IBM POWER and zSeries processors + + - Higher end ARM processors + + - Apple CPUs + + - Higher end MIPS CPUs + + - Likely most other high performance CPUs. Contact your CPU vendor for details. + +Whether a processor is affected or not can be read out from the Spectre +vulnerability files in sysfs. See :ref:`spectre_sys_info`. + +Related CVEs +------------ + +The following CVE entries describe Spectre variants: + + ============= ======================= ========================== + CVE-2017-5753 Bounds check bypass Spectre variant 1 + CVE-2017-5715 Branch target injection Spectre variant 2 + CVE-2019-1125 Spectre v1 swapgs Spectre variant 1 (swapgs) + ============= ======================= ========================== + +Problem +------- + +CPUs use speculative operations to improve performance. That may leave +traces of memory accesses or computations in the processor's caches, +buffers, and branch predictors. Malicious software may be able to +influence the speculative execution paths, and then use the side effects +of the speculative execution in the CPUs' caches and buffers to infer +privileged data touched during the speculative execution. + +Spectre variant 1 attacks take advantage of speculative execution of +conditional branches, while Spectre variant 2 attacks use speculative +execution of indirect branches to leak privileged memory. +See :ref:`[1] ` :ref:`[5] ` :ref:`[6] ` +:ref:`[7] ` :ref:`[10] ` :ref:`[11] `. + +Spectre variant 1 (Bounds Check Bypass) +--------------------------------------- + +The bounds check bypass attack :ref:`[2] ` takes advantage +of speculative execution that bypasses conditional branch instructions +used for memory access bounds check (e.g. checking if the index of an +array results in memory access within a valid range). This results in +memory accesses to invalid memory (with out-of-bound index) that are +done speculatively before validation checks resolve. Such speculative +memory accesses can leave side effects, creating side channels which +leak information to the attacker. + +There are some extensions of Spectre variant 1 attacks for reading data +over the network, see :ref:`[12] `. However such attacks +are difficult, low bandwidth, fragile, and are considered low risk. + +Note that, despite "Bounds Check Bypass" name, Spectre variant 1 is not +only about user-controlled array bounds checks. It can affect any +conditional checks. The kernel entry code interrupt, exception, and NMI +handlers all have conditional swapgs checks. Those may be problematic +in the context of Spectre v1, as kernel code can speculatively run with +a user GS. + +Spectre variant 2 (Branch Target Injection) +------------------------------------------- + +The branch target injection attack takes advantage of speculative +execution of indirect branches :ref:`[3] `. The indirect +branch predictors inside the processor used to guess the target of +indirect branches can be influenced by an attacker, causing gadget code +to be speculatively executed, thus exposing sensitive data touched by +the victim. The side effects left in the CPU's caches during speculative +execution can be measured to infer data values. + +.. _poison_btb: + +In Spectre variant 2 attacks, the attacker can steer speculative indirect +branches in the victim to gadget code by poisoning the branch target +buffer of a CPU used for predicting indirect branch addresses. Such +poisoning could be done by indirect branching into existing code, +with the address offset of the indirect branch under the attacker's +control. Since the branch prediction on impacted hardware does not +fully disambiguate branch address and uses the offset for prediction, +this could cause privileged code's indirect branch to jump to a gadget +code with the same offset. + +The most useful gadgets take an attacker-controlled input parameter (such +as a register value) so that the memory read can be controlled. Gadgets +without input parameters might be possible, but the attacker would have +very little control over what memory can be read, reducing the risk of +the attack revealing useful data. + +One other variant 2 attack vector is for the attacker to poison the +return stack buffer (RSB) :ref:`[13] ` to cause speculative +subroutine return instruction execution to go to a gadget. An attacker's +imbalanced subroutine call instructions might "poison" entries in the +return stack buffer which are later consumed by a victim's subroutine +return instructions. This attack can be mitigated by flushing the return +stack buffer on context switch, or virtual machine (VM) exit. + +On systems with simultaneous multi-threading (SMT), attacks are possible +from the sibling thread, as level 1 cache and branch target buffer +(BTB) may be shared between hardware threads in a CPU core. A malicious +program running on the sibling thread may influence its peer's BTB to +steer its indirect branch speculations to gadget code, and measure the +speculative execution's side effects left in level 1 cache to infer the +victim's data. + +Yet another variant 2 attack vector is for the attacker to poison the +Branch History Buffer (BHB) to speculatively steer an indirect branch +to a specific Branch Target Buffer (BTB) entry, even if the entry isn't +associated with the source address of the indirect branch. Specifically, +the BHB might be shared across privilege levels even in the presence of +Enhanced IBRS. + +Currently the only known real-world BHB attack vector is via +unprivileged eBPF. Therefore, it's highly recommended to not enable +unprivileged eBPF, especially when eIBRS is used (without retpolines). +For a full mitigation against BHB attacks, it's recommended to use +retpolines (or eIBRS combined with retpolines). + +Attack scenarios +---------------- + +The following list of attack scenarios have been anticipated, but may +not cover all possible attack vectors. + +1. A user process attacking the kernel +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Spectre variant 1 +~~~~~~~~~~~~~~~~~ + + The attacker passes a parameter to the kernel via a register or + via a known address in memory during a syscall. Such parameter may + be used later by the kernel as an index to an array or to derive + a pointer for a Spectre variant 1 attack. The index or pointer + is invalid, but bound checks are bypassed in the code branch taken + for speculative execution. This could cause privileged memory to be + accessed and leaked. + + For kernel code that has been identified where data pointers could + potentially be influenced for Spectre attacks, new "nospec" accessor + macros are used to prevent speculative loading of data. + +Spectre variant 1 (swapgs) +~~~~~~~~~~~~~~~~~~~~~~~~~~ + + An attacker can train the branch predictor to speculatively skip the + swapgs path for an interrupt or exception. If they initialize + the GS register to a user-space value, if the swapgs is speculatively + skipped, subsequent GS-related percpu accesses in the speculation + window will be done with the attacker-controlled GS value. This + could cause privileged memory to be accessed and leaked. + + For example: + + :: + + if (coming from user space) + swapgs + mov %gs:, %reg + mov (%reg), %reg1 + + When coming from user space, the CPU can speculatively skip the + swapgs, and then do a speculative percpu load using the user GS + value. So the user can speculatively force a read of any kernel + value. If a gadget exists which uses the percpu value as an address + in another load/store, then the contents of the kernel value may + become visible via an L1 side channel attack. + + A similar attack exists when coming from kernel space. The CPU can + speculatively do the swapgs, causing the user GS to get used for the + rest of the speculative window. + +Spectre variant 2 +~~~~~~~~~~~~~~~~~ + + A spectre variant 2 attacker can :ref:`poison ` the branch + target buffer (BTB) before issuing syscall to launch an attack. + After entering the kernel, the kernel could use the poisoned branch + target buffer on indirect jump and jump to gadget code in speculative + execution. + + If an attacker tries to control the memory addresses leaked during + speculative execution, he would also need to pass a parameter to the + gadget, either through a register or a known address in memory. After + the gadget has executed, he can measure the side effect. + + The kernel can protect itself against consuming poisoned branch + target buffer entries by using return trampolines (also known as + "retpoline") :ref:`[3] ` :ref:`[9] ` for all + indirect branches. Return trampolines trap speculative execution paths + to prevent jumping to gadget code during speculative execution. + x86 CPUs with Enhanced Indirect Branch Restricted Speculation + (Enhanced IBRS) available in hardware should use the feature to + mitigate Spectre variant 2 instead of retpoline. Enhanced IBRS is + more efficient than retpoline. + + There may be gadget code in firmware which could be exploited with + Spectre variant 2 attack by a rogue user process. To mitigate such + attacks on x86, Indirect Branch Restricted Speculation (IBRS) feature + is turned on before the kernel invokes any firmware code. + +2. A user process attacking another user process +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + A malicious user process can try to attack another user process, + either via a context switch on the same hardware thread, or from the + sibling hyperthread sharing a physical processor core on simultaneous + multi-threading (SMT) system. + + Spectre variant 1 attacks generally require passing parameters + between the processes, which needs a data passing relationship, such + as remote procedure calls (RPC). Those parameters are used in gadget + code to derive invalid data pointers accessing privileged memory in + the attacked process. + + Spectre variant 2 attacks can be launched from a rogue process by + :ref:`poisoning ` the branch target buffer. This can + influence the indirect branch targets for a victim process that either + runs later on the same hardware thread, or running concurrently on + a sibling hardware thread sharing the same physical core. + + A user process can protect itself against Spectre variant 2 attacks + by using the prctl() syscall to disable indirect branch speculation + for itself. An administrator can also cordon off an unsafe process + from polluting the branch target buffer by disabling the process's + indirect branch speculation. This comes with a performance cost + from not using indirect branch speculation and clearing the branch + target buffer. When SMT is enabled on x86, for a process that has + indirect branch speculation disabled, Single Threaded Indirect Branch + Predictors (STIBP) :ref:`[4] ` are turned on to prevent the + sibling thread from controlling branch target buffer. In addition, + the Indirect Branch Prediction Barrier (IBPB) is issued to clear the + branch target buffer when context switching to and from such process. + + On x86, the return stack buffer is stuffed on context switch. + This prevents the branch target buffer from being used for branch + prediction when the return stack buffer underflows while switching to + a deeper call stack. Any poisoned entries in the return stack buffer + left by the previous process will also be cleared. + + User programs should use address space randomization to make attacks + more difficult (Set /proc/sys/kernel/randomize_va_space = 1 or 2). + +3. A virtualized guest attacking the host +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + The attack mechanism is similar to how user processes attack the + kernel. The kernel is entered via hyper-calls or other virtualization + exit paths. + + For Spectre variant 1 attacks, rogue guests can pass parameters + (e.g. in registers) via hyper-calls to derive invalid pointers to + speculate into privileged memory after entering the kernel. For places + where such kernel code has been identified, nospec accessor macros + are used to stop speculative memory access. + + For Spectre variant 2 attacks, rogue guests can :ref:`poison + ` the branch target buffer or return stack buffer, causing + the kernel to jump to gadget code in the speculative execution paths. + + To mitigate variant 2, the host kernel can use return trampolines + for indirect branches to bypass the poisoned branch target buffer, + and flushing the return stack buffer on VM exit. This prevents rogue + guests from affecting indirect branching in the host kernel. + + To protect host processes from rogue guests, host processes can have + indirect branch speculation disabled via prctl(). The branch target + buffer is cleared before context switching to such processes. + +4. A virtualized guest attacking other guest +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + A rogue guest may attack another guest to get data accessible by the + other guest. + + Spectre variant 1 attacks are possible if parameters can be passed + between guests. This may be done via mechanisms such as shared memory + or message passing. Such parameters could be used to derive data + pointers to privileged data in guest. The privileged data could be + accessed by gadget code in the victim's speculation paths. + + Spectre variant 2 attacks can be launched from a rogue guest by + :ref:`poisoning ` the branch target buffer or the return + stack buffer. Such poisoned entries could be used to influence + speculation execution paths in the victim guest. + + Linux kernel mitigates attacks to other guests running in the same + CPU hardware thread by flushing the return stack buffer on VM exit, + and clearing the branch target buffer before switching to a new guest. + + If SMT is used, Spectre variant 2 attacks from an untrusted guest + in the sibling hyperthread can be mitigated by the administrator, + by turning off the unsafe guest's indirect branch speculation via + prctl(). A guest can also protect itself by turning on microcode + based mitigations (such as IBPB or STIBP on x86) within the guest. + +.. _spectre_sys_info: + +Spectre system information +-------------------------- + +The Linux kernel provides a sysfs interface to enumerate the current +mitigation status of the system for Spectre: whether the system is +vulnerable, and which mitigations are active. + +The sysfs file showing Spectre variant 1 mitigation status is: + + /sys/devices/system/cpu/vulnerabilities/spectre_v1 + +The possible values in this file are: + + .. list-table:: + + * - 'Not affected' + - The processor is not vulnerable. + * - 'Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers' + - The swapgs protections are disabled; otherwise it has + protection in the kernel on a case by case base with explicit + pointer sanitation and usercopy LFENCE barriers. + * - 'Mitigation: usercopy/swapgs barriers and __user pointer sanitization' + - Protection in the kernel on a case by case base with explicit + pointer sanitation, usercopy LFENCE barriers, and swapgs LFENCE + barriers. + +However, the protections are put in place on a case by case basis, +and there is no guarantee that all possible attack vectors for Spectre +variant 1 are covered. + +The spectre_v2 kernel file reports if the kernel has been compiled with +retpoline mitigation or if the CPU has hardware mitigation, and if the +CPU has support for additional process-specific mitigation. + +This file also reports CPU features enabled by microcode to mitigate +attack between user processes: + +1. Indirect Branch Prediction Barrier (IBPB) to add additional + isolation between processes of different users. +2. Single Thread Indirect Branch Predictors (STIBP) to add additional + isolation between CPU threads running on the same core. + +These CPU features may impact performance when used and can be enabled +per process on a case-by-case base. + +The sysfs file showing Spectre variant 2 mitigation status is: + + /sys/devices/system/cpu/vulnerabilities/spectre_v2 + +The possible values in this file are: + + - Kernel status: + + ======================================== ================================= + 'Not affected' The processor is not vulnerable + 'Mitigation: None' Vulnerable, no mitigation + 'Mitigation: Retpolines' Use Retpoline thunks + 'Mitigation: LFENCE' Use LFENCE instructions + 'Mitigation: Enhanced IBRS' Hardware-focused mitigation + 'Mitigation: Enhanced IBRS + Retpolines' Hardware-focused + Retpolines + 'Mitigation: Enhanced IBRS + LFENCE' Hardware-focused + LFENCE + ======================================== ================================= + + - Firmware status: Show if Indirect Branch Restricted Speculation (IBRS) is + used to protect against Spectre variant 2 attacks when calling firmware (x86 only). + + ========== ============================================================= + 'IBRS_FW' Protection against user program attacks when calling firmware + ========== ============================================================= + + - Indirect branch prediction barrier (IBPB) status for protection between + processes of different users. This feature can be controlled through + prctl() per process, or through kernel command line options. This is + an x86 only feature. For more details see below. + + =================== ======================================================== + 'IBPB: disabled' IBPB unused + 'IBPB: always-on' Use IBPB on all tasks + 'IBPB: conditional' Use IBPB on SECCOMP or indirect branch restricted tasks + =================== ======================================================== + + - Single threaded indirect branch prediction (STIBP) status for protection + between different hyper threads. This feature can be controlled through + prctl per process, or through kernel command line options. This is x86 + only feature. For more details see below. + + ==================== ======================================================== + 'STIBP: disabled' STIBP unused + 'STIBP: forced' Use STIBP on all tasks + 'STIBP: conditional' Use STIBP on SECCOMP or indirect branch restricted tasks + ==================== ======================================================== + + - Return stack buffer (RSB) protection status: + + ============= =========================================== + 'RSB filling' Protection of RSB on context switch enabled + ============= =========================================== + + - EIBRS Post-barrier Return Stack Buffer (PBRSB) protection status: + + =========================== ======================================================= + 'PBRSB-eIBRS: SW sequence' CPU is affected and protection of RSB on VMEXIT enabled + 'PBRSB-eIBRS: Vulnerable' CPU is vulnerable + 'PBRSB-eIBRS: Not affected' CPU is not affected by PBRSB + =========================== ======================================================= + +Full mitigation might require a microcode update from the CPU +vendor. When the necessary microcode is not available, the kernel will +report vulnerability. + +Turning on mitigation for Spectre variant 1 and Spectre variant 2 +----------------------------------------------------------------- + +1. Kernel mitigation +^^^^^^^^^^^^^^^^^^^^ + +Spectre variant 1 +~~~~~~~~~~~~~~~~~ + + For the Spectre variant 1, vulnerable kernel code (as determined + by code audit or scanning tools) is annotated on a case by case + basis to use nospec accessor macros for bounds clipping :ref:`[2] + ` to avoid any usable disclosure gadgets. However, it may + not cover all attack vectors for Spectre variant 1. + + Copy-from-user code has an LFENCE barrier to prevent the access_ok() + check from being mis-speculated. The barrier is done by the + barrier_nospec() macro. + + For the swapgs variant of Spectre variant 1, LFENCE barriers are + added to interrupt, exception and NMI entry where needed. These + barriers are done by the FENCE_SWAPGS_KERNEL_ENTRY and + FENCE_SWAPGS_USER_ENTRY macros. + +Spectre variant 2 +~~~~~~~~~~~~~~~~~ + + For Spectre variant 2 mitigation, the compiler turns indirect calls or + jumps in the kernel into equivalent return trampolines (retpolines) + :ref:`[3] ` :ref:`[9] ` to go to the target + addresses. Speculative execution paths under retpolines are trapped + in an infinite loop to prevent any speculative execution jumping to + a gadget. + + To turn on retpoline mitigation on a vulnerable CPU, the kernel + needs to be compiled with a gcc compiler that supports the + -mindirect-branch=thunk-extern -mindirect-branch-register options. + If the kernel is compiled with a Clang compiler, the compiler needs + to support -mretpoline-external-thunk option. The kernel config + CONFIG_RETPOLINE needs to be turned on, and the CPU needs to run with + the latest updated microcode. + + On Intel Skylake-era systems the mitigation covers most, but not all, + cases. See :ref:`[3] ` for more details. + + On CPUs with hardware mitigation for Spectre variant 2 (e.g. Enhanced + IBRS on x86), retpoline is automatically disabled at run time. + + The retpoline mitigation is turned on by default on vulnerable + CPUs. It can be forced on or off by the administrator + via the kernel command line and sysfs control files. See + :ref:`spectre_mitigation_control_command_line`. + + On x86, indirect branch restricted speculation is turned on by default + before invoking any firmware code to prevent Spectre variant 2 exploits + using the firmware. + + Using kernel address space randomization (CONFIG_RANDOMIZE_SLAB=y + and CONFIG_SLAB_FREELIST_RANDOM=y in the kernel configuration) makes + attacks on the kernel generally more difficult. + +2. User program mitigation +^^^^^^^^^^^^^^^^^^^^^^^^^^ + + User programs can mitigate Spectre variant 1 using LFENCE or "bounds + clipping". For more details see :ref:`[2] `. + + For Spectre variant 2 mitigation, individual user programs + can be compiled with return trampolines for indirect branches. + This protects them from consuming poisoned entries in the branch + target buffer left by malicious software. Alternatively, the + programs can disable their indirect branch speculation via prctl() + (See :ref:`Documentation/userspace-api/spec_ctrl.rst `). + On x86, this will turn on STIBP to guard against attacks from the + sibling thread when the user program is running, and use IBPB to + flush the branch target buffer when switching to/from the program. + + Restricting indirect branch speculation on a user program will + also prevent the program from launching a variant 2 attack + on x86. All sand-boxed SECCOMP programs have indirect branch + speculation restricted by default. Administrators can change + that behavior via the kernel command line and sysfs control files. + See :ref:`spectre_mitigation_control_command_line`. + + Programs that disable their indirect branch speculation will have + more overhead and run slower. + + User programs should use address space randomization + (/proc/sys/kernel/randomize_va_space = 1 or 2) to make attacks more + difficult. + +3. VM mitigation +^^^^^^^^^^^^^^^^ + + Within the kernel, Spectre variant 1 attacks from rogue guests are + mitigated on a case by case basis in VM exit paths. Vulnerable code + uses nospec accessor macros for "bounds clipping", to avoid any + usable disclosure gadgets. However, this may not cover all variant + 1 attack vectors. + + For Spectre variant 2 attacks from rogue guests to the kernel, the + Linux kernel uses retpoline or Enhanced IBRS to prevent consumption of + poisoned entries in branch target buffer left by rogue guests. It also + flushes the return stack buffer on every VM exit to prevent a return + stack buffer underflow so poisoned branch target buffer could be used, + or attacker guests leaving poisoned entries in the return stack buffer. + + To mitigate guest-to-guest attacks in the same CPU hardware thread, + the branch target buffer is sanitized by flushing before switching + to a new guest on a CPU. + + The above mitigations are turned on by default on vulnerable CPUs. + + To mitigate guest-to-guest attacks from sibling thread when SMT is + in use, an untrusted guest running in the sibling thread can have + its indirect branch speculation disabled by administrator via prctl(). + + The kernel also allows guests to use any microcode based mitigation + they choose to use (such as IBPB or STIBP on x86) to protect themselves. + +.. _spectre_mitigation_control_command_line: + +Mitigation control on the kernel command line +--------------------------------------------- + +Spectre variant 2 mitigation can be disabled or force enabled at the +kernel command line. + + nospectre_v1 + + [X86,PPC] Disable mitigations for Spectre Variant 1 + (bounds check bypass). With this option data leaks are + possible in the system. + + nospectre_v2 + + [X86] Disable all mitigations for the Spectre variant 2 + (indirect branch prediction) vulnerability. System may + allow data leaks with this option, which is equivalent + to spectre_v2=off. + + + spectre_v2= + + [X86] Control mitigation of Spectre variant 2 + (indirect branch speculation) vulnerability. + The default operation protects the kernel from + user space attacks. + + on + unconditionally enable, implies + spectre_v2_user=on + off + unconditionally disable, implies + spectre_v2_user=off + auto + kernel detects whether your CPU model is + vulnerable + + Selecting 'on' will, and 'auto' may, choose a + mitigation method at run time according to the + CPU, the available microcode, the setting of the + CONFIG_RETPOLINE configuration option, and the + compiler with which the kernel was built. + + Selecting 'on' will also enable the mitigation + against user space to user space task attacks. + + Selecting 'off' will disable both the kernel and + the user space protections. + + Specific mitigations can also be selected manually: + + retpoline auto pick between generic,lfence + retpoline,generic Retpolines + retpoline,lfence LFENCE; indirect branch + retpoline,amd alias for retpoline,lfence + eibrs enhanced IBRS + eibrs,retpoline enhanced IBRS + Retpolines + eibrs,lfence enhanced IBRS + LFENCE + + Not specifying this option is equivalent to + spectre_v2=auto. + +For user space mitigation: + + spectre_v2_user= + + [X86] Control mitigation of Spectre variant 2 + (indirect branch speculation) vulnerability between + user space tasks + + on + Unconditionally enable mitigations. Is + enforced by spectre_v2=on + + off + Unconditionally disable mitigations. Is + enforced by spectre_v2=off + + prctl + Indirect branch speculation is enabled, + but mitigation can be enabled via prctl + per thread. The mitigation control state + is inherited on fork. + + prctl,ibpb + Like "prctl" above, but only STIBP is + controlled per thread. IBPB is issued + always when switching between different user + space processes. + + seccomp + Same as "prctl" above, but all seccomp + threads will enable the mitigation unless + they explicitly opt out. + + seccomp,ibpb + Like "seccomp" above, but only STIBP is + controlled per thread. IBPB is issued + always when switching between different + user space processes. + + auto + Kernel selects the mitigation depending on + the available CPU features and vulnerability. + + Default mitigation: + If CONFIG_SECCOMP=y then "seccomp", otherwise "prctl" + + Not specifying this option is equivalent to + spectre_v2_user=auto. + + In general the kernel by default selects + reasonable mitigations for the current CPU. To + disable Spectre variant 2 mitigations, boot with + spectre_v2=off. Spectre variant 1 mitigations + cannot be disabled. + +Mitigation selection guide +-------------------------- + +1. Trusted userspace +^^^^^^^^^^^^^^^^^^^^ + + If all userspace applications are from trusted sources and do not + execute externally supplied untrusted code, then the mitigations can + be disabled. + +2. Protect sensitive programs +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + For security-sensitive programs that have secrets (e.g. crypto + keys), protection against Spectre variant 2 can be put in place by + disabling indirect branch speculation when the program is running + (See :ref:`Documentation/userspace-api/spec_ctrl.rst `). + +3. Sandbox untrusted programs +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Untrusted programs that could be a source of attacks can be cordoned + off by disabling their indirect branch speculation when they are run + (See :ref:`Documentation/userspace-api/spec_ctrl.rst `). + This prevents untrusted programs from polluting the branch target + buffer. All programs running in SECCOMP sandboxes have indirect + branch speculation restricted by default. This behavior can be + changed via the kernel command line and sysfs control files. See + :ref:`spectre_mitigation_control_command_line`. + +3. High security mode +^^^^^^^^^^^^^^^^^^^^^ + + All Spectre variant 2 mitigations can be forced on + at boot time for all programs (See the "on" option in + :ref:`spectre_mitigation_control_command_line`). This will add + overhead as indirect branch speculations for all programs will be + restricted. + + On x86, branch target buffer will be flushed with IBPB when switching + to a new program. STIBP is left on all the time to protect programs + against variant 2 attacks originating from programs running on + sibling threads. + + Alternatively, STIBP can be used only when running programs + whose indirect branch speculation is explicitly disabled, + while IBPB is still used all the time when switching to a new + program to clear the branch target buffer (See "ibpb" option in + :ref:`spectre_mitigation_control_command_line`). This "ibpb" option + has less performance cost than the "on" option, which leaves STIBP + on all the time. + +References on Spectre +--------------------- + +Intel white papers: + +.. _spec_ref1: + +[1] `Intel analysis of speculative execution side channels `_. + +.. _spec_ref2: + +[2] `Bounds check bypass `_. + +.. _spec_ref3: + +[3] `Deep dive: Retpoline: A branch target injection mitigation `_. + +.. _spec_ref4: + +[4] `Deep Dive: Single Thread Indirect Branch Predictors `_. + +AMD white papers: + +.. _spec_ref5: + +[5] `AMD64 technology indirect branch control extension `_. + +.. _spec_ref6: + +[6] `Software techniques for managing speculation on AMD processors `_. + +ARM white papers: + +.. _spec_ref7: + +[7] `Cache speculation side-channels `_. + +.. _spec_ref8: + +[8] `Cache speculation issues update `_. + +Google white paper: + +.. _spec_ref9: + +[9] `Retpoline: a software construct for preventing branch-target-injection `_. + +MIPS white paper: + +.. _spec_ref10: + +[10] `MIPS: response on speculative execution and side channel vulnerabilities `_. + +Academic papers: + +.. _spec_ref11: + +[11] `Spectre Attacks: Exploiting Speculative Execution `_. + +.. _spec_ref12: + +[12] `NetSpectre: Read Arbitrary Memory over Network `_. + +.. _spec_ref13: + +[13] `Spectre Returns! Speculation Attacks using the Return Stack Buffer `_. diff --git a/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst b/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst new file mode 100644 index 0000000000000000000000000000000000000000..af6865b822d21d18946016c1458a4dc3567050e9 --- /dev/null +++ b/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst @@ -0,0 +1,279 @@ +.. SPDX-License-Identifier: GPL-2.0 + +TAA - TSX Asynchronous Abort +====================================== + +TAA is a hardware vulnerability that allows unprivileged speculative access to +data which is available in various CPU internal buffers by using asynchronous +aborts within an Intel TSX transactional region. + +Affected processors +------------------- + +This vulnerability only affects Intel processors that support Intel +Transactional Synchronization Extensions (TSX) when the TAA_NO bit (bit 8) +is 0 in the IA32_ARCH_CAPABILITIES MSR. On processors where the MDS_NO bit +(bit 5) is 0 in the IA32_ARCH_CAPABILITIES MSR, the existing MDS mitigations +also mitigate against TAA. + +Whether a processor is affected or not can be read out from the TAA +vulnerability file in sysfs. See :ref:`tsx_async_abort_sys_info`. + +Related CVEs +------------ + +The following CVE entry is related to this TAA issue: + + ============== ===== =================================================== + CVE-2019-11135 TAA TSX Asynchronous Abort (TAA) condition on some + microprocessors utilizing speculative execution may + allow an authenticated user to potentially enable + information disclosure via a side channel with + local access. + ============== ===== =================================================== + +Problem +------- + +When performing store, load or L1 refill operations, processors write +data into temporary microarchitectural structures (buffers). The data in +those buffers can be forwarded to load operations as an optimization. + +Intel TSX is an extension to the x86 instruction set architecture that adds +hardware transactional memory support to improve performance of multi-threaded +software. TSX lets the processor expose and exploit concurrency hidden in an +application due to dynamically avoiding unnecessary synchronization. + +TSX supports atomic memory transactions that are either committed (success) or +aborted. During an abort, operations that happened within the transactional region +are rolled back. An asynchronous abort takes place, among other options, when a +different thread accesses a cache line that is also used within the transactional +region when that access might lead to a data race. + +Immediately after an uncompleted asynchronous abort, certain speculatively +executed loads may read data from those internal buffers and pass it to dependent +operations. This can be then used to infer the value via a cache side channel +attack. + +Because the buffers are potentially shared between Hyper-Threads cross +Hyper-Thread attacks are possible. + +The victim of a malicious actor does not need to make use of TSX. Only the +attacker needs to begin a TSX transaction and raise an asynchronous abort +which in turn potenitally leaks data stored in the buffers. + +More detailed technical information is available in the TAA specific x86 +architecture section: :ref:`Documentation/x86/tsx_async_abort.rst `. + + +Attack scenarios +---------------- + +Attacks against the TAA vulnerability can be implemented from unprivileged +applications running on hosts or guests. + +As for MDS, the attacker has no control over the memory addresses that can +be leaked. Only the victim is responsible for bringing data to the CPU. As +a result, the malicious actor has to sample as much data as possible and +then postprocess it to try to infer any useful information from it. + +A potential attacker only has read access to the data. Also, there is no direct +privilege escalation by using this technique. + + +.. _tsx_async_abort_sys_info: + +TAA system information +----------------------- + +The Linux kernel provides a sysfs interface to enumerate the current TAA status +of mitigated systems. The relevant sysfs file is: + +/sys/devices/system/cpu/vulnerabilities/tsx_async_abort + +The possible values in this file are: + +.. list-table:: + + * - 'Vulnerable' + - The CPU is affected by this vulnerability and the microcode and kernel mitigation are not applied. + * - 'Vulnerable: Clear CPU buffers attempted, no microcode' + - The system tries to clear the buffers but the microcode might not support the operation. + * - 'Mitigation: Clear CPU buffers' + - The microcode has been updated to clear the buffers. TSX is still enabled. + * - 'Mitigation: TSX disabled' + - TSX is disabled. + * - 'Not affected' + - The CPU is not affected by this issue. + +.. _ucode_needed: + +Best effort mitigation mode +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If the processor is vulnerable, but the availability of the microcode-based +mitigation mechanism is not advertised via CPUID the kernel selects a best +effort mitigation mode. This mode invokes the mitigation instructions +without a guarantee that they clear the CPU buffers. + +This is done to address virtualization scenarios where the host has the +microcode update applied, but the hypervisor is not yet updated to expose the +CPUID to the guest. If the host has updated microcode the protection takes +effect; otherwise a few CPU cycles are wasted pointlessly. + +The state in the tsx_async_abort sysfs file reflects this situation +accordingly. + + +Mitigation mechanism +-------------------- + +The kernel detects the affected CPUs and the presence of the microcode which is +required. If a CPU is affected and the microcode is available, then the kernel +enables the mitigation by default. + + +The mitigation can be controlled at boot time via a kernel command line option. +See :ref:`taa_mitigation_control_command_line`. + +.. _virt_mechanism: + +Virtualization mitigation +^^^^^^^^^^^^^^^^^^^^^^^^^ + +Affected systems where the host has TAA microcode and TAA is mitigated by +having disabled TSX previously, are not vulnerable regardless of the status +of the VMs. + +In all other cases, if the host either does not have the TAA microcode or +the kernel is not mitigated, the system might be vulnerable. + + +.. _taa_mitigation_control_command_line: + +Mitigation control on the kernel command line +--------------------------------------------- + +The kernel command line allows to control the TAA mitigations at boot time with +the option "tsx_async_abort=". The valid arguments for this option are: + + ============ ============================================================= + off This option disables the TAA mitigation on affected platforms. + If the system has TSX enabled (see next parameter) and the CPU + is affected, the system is vulnerable. + + full TAA mitigation is enabled. If TSX is enabled, on an affected + system it will clear CPU buffers on ring transitions. On + systems which are MDS-affected and deploy MDS mitigation, + TAA is also mitigated. Specifying this option on those + systems will have no effect. + + full,nosmt The same as tsx_async_abort=full, with SMT disabled on + vulnerable CPUs that have TSX enabled. This is the complete + mitigation. When TSX is disabled, SMT is not disabled because + CPU is not vulnerable to cross-thread TAA attacks. + ============ ============================================================= + +Not specifying this option is equivalent to "tsx_async_abort=full". For +processors that are affected by both TAA and MDS, specifying just +"tsx_async_abort=off" without an accompanying "mds=off" will have no +effect as the same mitigation is used for both vulnerabilities. + +The kernel command line also allows to control the TSX feature using the +parameter "tsx=" on CPUs which support TSX control. MSR_IA32_TSX_CTRL is used +to control the TSX feature and the enumeration of the TSX feature bits (RTM +and HLE) in CPUID. + +The valid options are: + + ============ ============================================================= + off Disables TSX on the system. + + Note that this option takes effect only on newer CPUs which are + not vulnerable to MDS, i.e., have MSR_IA32_ARCH_CAPABILITIES.MDS_NO=1 + and which get the new IA32_TSX_CTRL MSR through a microcode + update. This new MSR allows for the reliable deactivation of + the TSX functionality. + + on Enables TSX. + + Although there are mitigations for all known security + vulnerabilities, TSX has been known to be an accelerator for + several previous speculation-related CVEs, and so there may be + unknown security risks associated with leaving it enabled. + + auto Disables TSX if X86_BUG_TAA is present, otherwise enables TSX + on the system. + ============ ============================================================= + +Not specifying this option is equivalent to "tsx=off". + +The following combinations of the "tsx_async_abort" and "tsx" are possible. For +affected platforms tsx=auto is equivalent to tsx=off and the result will be: + + ========= ========================== ========================================= + tsx=on tsx_async_abort=full The system will use VERW to clear CPU + buffers. Cross-thread attacks are still + possible on SMT machines. + tsx=on tsx_async_abort=full,nosmt As above, cross-thread attacks on SMT + mitigated. + tsx=on tsx_async_abort=off The system is vulnerable. + tsx=off tsx_async_abort=full TSX might be disabled if microcode + provides a TSX control MSR. If so, + system is not vulnerable. + tsx=off tsx_async_abort=full,nosmt Ditto + tsx=off tsx_async_abort=off ditto + ========= ========================== ========================================= + + +For unaffected platforms "tsx=on" and "tsx_async_abort=full" does not clear CPU +buffers. For platforms without TSX control (MSR_IA32_ARCH_CAPABILITIES.MDS_NO=0) +"tsx" command line argument has no effect. + +For the affected platforms below table indicates the mitigation status for the +combinations of CPUID bit MD_CLEAR and IA32_ARCH_CAPABILITIES MSR bits MDS_NO +and TSX_CTRL_MSR. + + ======= ========= ============= ======================================== + MDS_NO MD_CLEAR TSX_CTRL_MSR Status + ======= ========= ============= ======================================== + 0 0 0 Vulnerable (needs microcode) + 0 1 0 MDS and TAA mitigated via VERW + 1 1 0 MDS fixed, TAA vulnerable if TSX enabled + because MD_CLEAR has no meaning and + VERW is not guaranteed to clear buffers + 1 X 1 MDS fixed, TAA can be mitigated by + VERW or TSX_CTRL_MSR + ======= ========= ============= ======================================== + +Mitigation selection guide +-------------------------- + +1. Trusted userspace and guests +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If all user space applications are from a trusted source and do not execute +untrusted code which is supplied externally, then the mitigation can be +disabled. The same applies to virtualized environments with trusted guests. + + +2. Untrusted userspace and guests +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If there are untrusted applications or guests on the system, enabling TSX +might allow a malicious actor to leak data from the host or from other +processes running on the same physical core. + +If the microcode is available and the TSX is disabled on the host, attacks +are prevented in a virtualized environment as well, even if the VMs do not +explicitly enable the mitigation. + + +.. _taa_default_mitigations: + +Default mitigations +------------------- + +The kernel's default action for vulnerable processors is: + + - Deploy TSX disable mitigation (tsx_async_abort=full tsx=off). diff --git a/Documentation/admin-guide/index.rst b/Documentation/admin-guide/index.rst index 0873685bab0fc278b1c7938ab588796ff253f6ef..89abc5057349b3971238c6310b519dc9969fe9fd 100644 --- a/Documentation/admin-guide/index.rst +++ b/Documentation/admin-guide/index.rst @@ -17,14 +17,12 @@ etc. kernel-parameters devices -This section describes CPU vulnerabilities and provides an overview of the -possible mitigations along with guidance for selecting mitigations if they -are configurable at compile, boot or run time. +This section describes CPU vulnerabilities and their mitigations. .. toctree:: :maxdepth: 1 - l1tf + hw-vuln/index Here is a set of documents aimed at users who are trying to track down problems and bugs in particular. diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 92eb1f42240d7168354dc7129898e2500ef95c1a..81c3e5e6447f7fa062dd9b0fc11b8db8112a8a3e 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -113,7 +113,7 @@ the GPE dispatcher. This facility can be used to prevent such uncontrolled GPE floodings. - Format: + Format: acpi_no_auto_serialize [HW,ACPI] Disable auto-serialization of AML methods @@ -475,6 +475,12 @@ ccw_timeout_log [S390] See Documentation/s390/CommonIO for details. + cdm-nodes= [KNL] + Format: hexadecimal expression + One bit express one node, if the node is HBM, set the + bit to 1. Then transform Binary to hexadecimal. + Example: node1, node2 is HBM, cdm-nodes=0x06. + cgroup_disable= [KNL] Disable a particular controller Format: {name of the controller(s) to disable} The effects of cgroup_disable=foo are: @@ -482,9 +488,10 @@ a single hierarchy - foo isn't visible as an individually mountable subsystem - {Currently only "memory" controller deal with this and - cut the overhead, others just disable the usage. So - only cgroup_disable=memory is actually worthy} + {Currently "memory" and "files" controller deal with + this and cut the overhead, others just disable the usage. + So cgroup_disable=memory and cgroup_disable=files are + actually worthy} cgroup_no_v1= [KNL] Disable one, multiple, all cgroup controllers in v1 Format: { controller[,controller...] | "all" } @@ -495,6 +502,7 @@ Format: nosocket -- Disable socket memory accounting. nokmem -- Disable kernel memory accounting. + kmem -- Enable kernel memory accounting. checkreqprot [SELINUX] Set initial checkreqprot flag value. Format: { "0" | "1" } @@ -508,6 +516,10 @@ cio_ignore= [S390] See Documentation/s390/CommonIO for details. + + clear_freelist + Enable clear_freelist feature. + clk_ignore_unused [CLK] Prevents the clock framework from automatically gating @@ -674,6 +686,9 @@ cpuidle.off=1 [CPU_IDLE] disable the cpuidle sub-system + cpuidle.governor= + [CPU_IDLE] Name of the cpuidle governor to use. + cpufreq.off=1 [CPU_FREQ] disable the cpufreq sub-system @@ -703,14 +718,14 @@ Documentation/kdump/kdump.txt for an example. crashkernel=size[KMG],high - [KNL, x86_64] range could be above 4G. Allow kernel + [KNL, x86_64, arm64] range could be above 4G. Allow kernel to allocate physical memory region from top, so could be above 4G if system have more than 4G ram installed. Otherwise memory region will be allocated below 4G, if available. It will be ignored if crashkernel=X is specified. crashkernel=size[KMG],low - [KNL, x86_64] range under 4G. When crashkernel=X,high + [KNL, x86_64, arm64] range under 4G. When crashkernel=X,high is passed, kernel could allocate physical memory region above 4G, that cause second kernel crash on system that require some amount of low memory, e.g. swiotlb @@ -800,10 +815,6 @@ debugpat [X86] Enable PAT debugging - decnet.addr= [HW,NET] - Format: [,] - See also Documentation/networking/decnet.txt. - default_hugepagesz= [same as hugepagesz=] The size of the default HugeTLB page size. This is the size represented by @@ -833,6 +844,21 @@ disable= [IPV6] See Documentation/networking/ipv6.txt. + disable_tlbflush_is= [page,range,switch,]mm + [ARM64] Disable using TLB instruction to flush + all PE within the same inner shareable domain. + + NOTE(Important) + This feature is used for learning and debugging + only. Please don't enable it on commercial products. + If you know exactly what the impact of the feature is, + you can configure it as you do. + + range use tlb invalidation ipi for flush_tlb_range + page use tlb invalidation ipi for flush_tlb_page + switch don't local_flush_tlb_mm when switch_mm + mm use tlb invalidation ipi for flush_tlb_mm + hardened_usercopy= [KNL] Under CONFIG_HARDENED_USERCOPY, whether hardening is enabled for this boot. Hardened @@ -1063,7 +1089,7 @@ earlyprintk=serial[,0x...[,baudrate]] earlyprintk=ttySn[,baudrate] earlyprintk=dbgp[debugController#] - earlyprintk=pciserial,bus:device.function[,baudrate] + earlyprintk=pciserial[,force],bus:device.function[,baudrate] earlyprintk=xdbc[xhciController#] earlyprintk is useful when the kernel crashes before @@ -1095,6 +1121,10 @@ The sclp output can only be used on s390. + The optional "force" to "pciserial" enables use of a + PCI device even when its classcode is not of the + UART class. + edac_report= [HW,EDAC] Control how to report EDAC event Format: {"on" | "off" | "force"} on: enable EDAC to report H/W event. May be overridden @@ -1129,7 +1159,7 @@ you are really sure that your UEFI does sane gc and fulfills the spec otherwise your board may brick. - efi_fake_mem= nn[KMG]@ss[KMG]:aa[,nn[KMG]@ss[KMG]:aa,..] [EFI; X86] + efi_fake_mem= nn[KMG]@ss[KMG]:aa[,nn[KMG]@ss[KMG]:aa,..] [EFI; X86; ARM64] Add arbitrary attribute to specific memory range by updating original EFI memory map. Region of memory which aa attribute is added to is @@ -1224,6 +1254,15 @@ Warning: use of this parameter will taint the kernel and may cause unknown problems. + fpi_to_tail=off + [KNL] Place pages to front in __free_pages_core(). + This kernel start-up parameter can be just used as + "fpi_to_tail=off", which means memory is online to + the front of freelist. Dynamic open is not supportted, + in other words, "fpi_to_tail=on" will be ignored by + kernel. This is just an ugly solution for circumvention + for some latent bugs revealed by "place pages to tail". + ftrace=[tracer] [FTRACE] will set and start the specified tracer as early as possible in order to facilitate early @@ -1280,6 +1319,26 @@ Format: off | on default: on + gather_data_sampling= + [X86,INTEL] Control the Gather Data Sampling (GDS) + mitigation. + + Gather Data Sampling is a hardware vulnerability which + allows unprivileged speculative access to data which was + previously stored in vector registers. + + This issue is mitigated by default in updated microcode. + The mitigation may have a performance impact but can be + disabled. On systems without the microcode mitigation + disabling AVX serves as a mitigation. + + force: Disable AVX to mitigate systems without + microcode mitigation. No effect if the microcode + mitigation is present. Known to cause crashes in + userspace with buggy AVX enumeration. + + off: Disable GDS mitigation. + gcov_persist= [GCOV] When non-zero (default), profiling data for kernel modules is saved and remains accessible via debugfs, even when the module is unloaded/reloaded. @@ -1362,7 +1421,19 @@ hpet_mmap= [X86, HPET_MMAP] Allow userspace to mmap HPET registers. Default set by CONFIG_HPET_MMAP_DEFAULT. + hugepage_prohibit_sz= + [HW] HugeTLB pages should not alloc when the rest of + the normal pages less than hugepage_prohibit_sz. This + setting is to make sure a system can start even when + part of physical memory is broken, admin users can + adjust this according to typical environment. + hugepages= [HW,X86-32,IA-64] HugeTLB pages to allocate at boot. + If using node format, the number of pages to allocate + per-node can be specified. + Format: or (node format) + :[,:] + hugepagesz= [HW,IA-64,PPC,X86-64] The size of the HugeTLB pages. On x86-64 and powerpc, this option can be specified multiple times interleaved with hugepages= to reserve @@ -1370,6 +1441,9 @@ x86-64 are 2M (when the CPU supports "pse") and 1G (when the CPU supports the "pdpe1gb" cpuinfo flag). + hugetlb_hwpoison_full + [HW] Enable memory error handling of 1GB hugepage. + hung_task_panic= [KNL] Should the hung task detector generate panics. Format: @@ -1749,6 +1823,18 @@ nobypass [PPC/POWERNV] Disable IOMMU bypass, using IOMMU for PCI devices. + iommu.strict= [ARM64] Configure TLB invalidation behaviour + Format: { "0" | "1" } + 0 - Lazy mode (default). + Request that DMA unmap operations use deferred + invalidation of hardware TLBs, for increased + throughput at the cost of reduced device isolation. + Will fall back to strict mode if not supported by + the relevant IOMMU driver. + 1 - Strict mode. + DMA unmap operations invalidate IOMMU hardware TLBs + synchronously. + iommu.passthrough= [ARM64] Configure DMA to bypass the IOMMU by default. Format: { "0" | "1" } @@ -1791,6 +1877,11 @@ to let secondary kernels in charge of setting up LPIs. + irqchip.gicv3_pseudo_nmi= [ARM64] + Enables support for pseudo-NMIs in the kernel. This + requires the kernel to be built with + CONFIG_ARM64_PSEUDO_NMI. + irqfixup [HW] When an interrupt is not handled search all handlers for it. Intended to get systems with badly broken @@ -1883,8 +1974,8 @@ keepinitrd [HW,ARM] - kernelcore= [KNL,X86,IA-64,PPC] - Format: nn[KMGTPE] | nn% | "mirror" + kernelcore= [KNL,X86,IA-64,PPC,ARM64] + Format: nn[KMGTPE] | nn% | "mirror" | "reliable" This parameter specifies the amount of memory usable by the kernel for non-movable allocations. The requested amount is spread evenly throughout all nodes in the @@ -1908,6 +1999,20 @@ for Movable pages. "nn[KMGTPE]", "nn%", and "mirror" are exclusive, so you cannot specify multiple forms. + Option "reliable" is base on option "mirror", but make + some extension. These two features are alternatives. + + reliable_debug= [ARM64] + Format: [F][,S][,P] + Only works with CONFIG_MEMORY_RELIABLE and + "kernelcore=reliable" is configured. + F: User tasks with PF_RELIABLE will not allocate + memory from non-mirrored region if this allocation + from mirrored region failed. + Pagecache and tmpfs will follow this rule too. + S: The shmem does not use the reliable memory. + P: Page cache does not use the reliable memory. + kgdbdbgp= [KGDB,HW] kgdb over EHCI usb debug port. Format: [,poll interval] The controller # is the number of the ehci usb debug @@ -1942,6 +2047,12 @@ Built with CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y, the default is off. + kpti= [ARM64] Control page table isolation of user + and kernel address spaces. + Default: enabled on cores which need mitigation. + 0: force disabled + 1: force enabled + kvm.ignore_msrs=[KVM] Ignore guest accesses to unhandled MSRs. Default is 0 (don't ignore, but inject #GP) @@ -1952,6 +2063,25 @@ KVM MMU at runtime. Default is 0 (off) + kvm.nx_huge_pages= + [KVM] Controls the software workaround for the + X86_BUG_ITLB_MULTIHIT bug. + force : Always deploy workaround. + off : Never deploy workaround. + auto : Deploy workaround based on the presence of + X86_BUG_ITLB_MULTIHIT. + + Default is 'auto'. + + If the software workaround is enabled for the host, + guests do need not to enable it for nested guests. + + kvm.nx_huge_pages_recovery_ratio= + [KVM] Controls how many 4KiB pages are periodically zapped + back to huge pages. 0 disables the recovery, otherwise if + the value is N KVM will zap 1/Nth of the 4KiB pages every + minute. The default is 60. + kvm-amd.nested= [KVM,AMD] Allow nested virtualization in KVM/SVM. Default is 1 (enabled) @@ -2069,10 +2199,13 @@ off Disables hypervisor mitigations and doesn't emit any warnings. + It also drops the swap size and available + RAM limit restriction on both hypervisor and + bare metal. Default is 'flush'. - For details see: Documentation/admin-guide/l1tf.rst + For details see: Documentation/admin-guide/hw-vuln/l1tf.rst l2cr= [PPC] @@ -2312,6 +2445,38 @@ Format: , Specifies range of consoles to be captured by the MDA. + mds= [X86,INTEL] + Control mitigation for the Micro-architectural Data + Sampling (MDS) vulnerability. + + Certain CPUs are vulnerable to an exploit against CPU + internal buffers which can forward information to a + disclosure gadget under certain conditions. + + In vulnerable processors, the speculatively + forwarded data can be used in a cache side channel + attack, to access data to which the attacker does + not have direct access. + + This parameter controls the MDS mitigation. The + options are: + + full - Enable MDS mitigation on vulnerable CPUs + full,nosmt - Enable MDS mitigation and disable + SMT on vulnerable CPUs + off - Unconditionally disable MDS mitigation + + On TAA-affected machines, mds=off can be prevented by + an active TAA mitigation as both vulnerabilities are + mitigated with the same mechanism so in order to disable + this mitigation, you need to specify tsx_async_abort=off + too. + + Not specifying this option is equivalent to + mds=full. + + For details see: Documentation/admin-guide/hw-vuln/mds.rst + mem=nn[KMG] [KNL,BOOT] Force usage of a specific amount of memory Amount of memory to be used when the kernel is not able to see the whole system memory or for test. @@ -2469,6 +2634,56 @@ in the "bleeding edge" mini2440 support kernel at http://repo.or.cz/w/linux-2.6/mini2440.git + mitigations= + [X86,PPC,S390,ARM64] Control optional mitigations for + CPU vulnerabilities. This is a set of curated, + arch-independent options, each of which is an + aggregation of existing arch-specific options. + + off + Disable all optional CPU mitigations. This + improves system performance, but it may also + expose users to several CPU vulnerabilities. + Equivalent to: gather_data_sampling=off [X86] + kpti=0 [ARM64] + kvm.nx_huge_pages=off [X86] + l1tf=off [X86] + mds=off [X86] + mmio_stale_data=off [X86] + no_entry_flush [PPC] + no_uaccess_flush [PPC] + nobp=0 [S390] + nopti [X86,PPC] + nospectre_v1 [PPC] + nospectre_v1 [X86] + nospectre_v2 [X86,PPC,S390,ARM64] + spec_store_bypass_disable=off [X86,PPC] + spectre_v2_user=off [X86] + ssbd=force-off [ARM64] + tsx_async_abort=off [X86] + + Exceptions: + This does not have any effect on + kvm.nx_huge_pages when + kvm.nx_huge_pages=force. + + auto (default) + Mitigate all CPU vulnerabilities, but leave SMT + enabled, even if it's vulnerable. This is for + users who don't want to be surprised by SMT + getting disabled across kernel upgrades, or who + have other ways of avoiding SMT-based attacks. + Equivalent to: (default behavior) + + auto,nosmt + Mitigate all CPU vulnerabilities, disabling SMT + if needed. This is for users who always want to + be fully mitigated, even if it means losing SMT. + Equivalent to: l1tf=flush,nosmt [X86] + mds=full,nosmt [X86] + tsx_async_abort=full,nosmt [X86] + mmio_stale_data=full,nosmt [X86] + mminit_loglevel= [KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this parameter allows control of the logging verbosity for @@ -2477,6 +2692,40 @@ log everything. Information is printed at KERN_DEBUG so loglevel=8 may also need to be specified. + mmio_stale_data= + [X86,INTEL] Control mitigation for the Processor + MMIO Stale Data vulnerabilities. + + Processor MMIO Stale Data is a class of + vulnerabilities that may expose data after an MMIO + operation. Exposed data could originate or end in + the same CPU buffers as affected by MDS and TAA. + Therefore, similar to MDS and TAA, the mitigation + is to clear the affected CPU buffers. + + This parameter controls the mitigation. The + options are: + + full - Enable mitigation on vulnerable CPUs + + full,nosmt - Enable mitigation and disable SMT on + vulnerable CPUs. + + off - Unconditionally disable mitigation + + On MDS or TAA affected machines, + mmio_stale_data=off can be prevented by an active + MDS or TAA mitigation as these vulnerabilities are + mitigated with the same mechanism so in order to + disable this mitigation, you need to specify + mds=off and tsx_async_abort=off too. + + Not specifying this option is equivalent to + mmio_stale_data=full. + + For details see: + Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst + module.sig_enforce [KNL] When CONFIG_MODULE_SIG is set, this means that modules without (valid) signatures will fail to load. @@ -2757,6 +3006,8 @@ noefi Disable EFI runtime services support. + no_entry_flush [PPC] Don't flush the L1-D cache when entering the kernel. + noexec [IA-64] noexec [X86] @@ -2794,18 +3045,21 @@ nosmt=force: Force disable SMT, cannot be undone via the sysfs control file. - nospectre_v1 [PPC] Disable mitigations for Spectre Variant 1 (bounds - check bypass). With this option data leaks are possible - in the system. + nospectre_v1 [X66, PPC] Disable mitigations for Spectre Variant 1 + (bounds check bypass). With this option data leaks + are possible in the system. - nospectre_v2 [X86] Disable all mitigations for the Spectre variant 2 - (indirect branch prediction) vulnerability. System may - allow data leaks with this option, which is equivalent - to spectre_v2=off. + nospectre_v2 [X86,ARM64,PPC_FSL_BOOK3E] Disable all mitigations for + the Spectre variant 2 (indirect branch prediction) + vulnerability. System may allow data leaks with this + option. nospec_store_bypass_disable [HW] Disable all mitigations for the Speculative Store Bypass vulnerability + no_uaccess_flush + [PPC] Don't flush the L1-D cache after accessing user data. + noxsave [BUGS=X86] Disables x86 extended register state save and restore using xsave. The kernel will fallback to enabling legacy floating-point and sse state. @@ -2967,6 +3221,14 @@ If the dependencies are under your control, you can turn on cpu0_hotplug. + arm64_cpu0_hotplug [ARM64] Turn on CPU0 hotplug feature when + CONFIG_ARM64_BOOTPARAM_HOTPLUG_CPU0 is off. + Some features depend on CPU0. Known dependency is: + MegaRAID Tri-Mode SAS3508 may block the reboot process + after offline CPU0. + If the dependencies are under your control, you can + turn on arm64_cpu0_hotplug. + nps_mtm_hs_ctr= [KNL,ARC] This parameter sets the maximum duration, in cycles, each HW thread of the CTOP can run @@ -3053,6 +3315,14 @@ timeout < 0: reboot immediately Format: + panic_print= Bitmask for printing system info when panic happens. + User can chose combination of the following bits: + bit 0: print all tasks info + bit 1: print system memory info + bit 2: print timer info + bit 3: print locks info if CONFIG_LOCKDEP is on + bit 4: print ftrace buffer + panic_on_warn panic() instead of WARN(). Useful to cause kdump on a WARN(). @@ -3323,6 +3593,8 @@ even if the platform doesn't give the OS permission to use them. This may cause conflicts if the platform also tries to use these services. + dpc-native Use native PCIe service for DPC only. May + cause conflicts if firmware uses AER or DPC. compat Disable native PCIe services (PME, AER, DPC, PCIe hotplug). @@ -3880,6 +4152,13 @@ Run specified binary instead of /init from the ramdisk, used for early userspace startup. See initrd. + rdrand= [X86] + force - Override the decision by the kernel to hide the + advertisement of RDRAND support (this affects + certain AMD processors because of buggy BIOS + support, specifically around the suspend/resume + path). + rdt= [HW,X86,RDT] Turn on/off individual RDT features. List is: cmt, mbmtotal, mbmlocal, l3cat, l3cdp, l2cat, l2cdp, @@ -3951,6 +4230,18 @@ retain_initrd [RAM] Keep initrd memory after extraction + retbleed= [X86] Control mitigation of RETBleed (Arbitrary + Speculative Code Execution with Return Instructions) + vulnerability. + + off - unconditionally disable + auto - automatically select a migitation + + Selecting 'auto' will choose a mitigation method at run + time according to the CPU. + + Not specifying this option is equivalent to retbleed=auto. + rfkill.default_state= 0 "airplane mode". All wifi, bluetooth, wimax, gps, fm, etc. communication is blocked by default. @@ -4165,9 +4456,13 @@ spectre_v2= [X86] Control mitigation of Spectre variant 2 (indirect branch speculation) vulnerability. + The default operation protects the kernel from + user space attacks. - on - unconditionally enable - off - unconditionally disable + on - unconditionally enable, implies + spectre_v2_user=on + off - unconditionally disable, implies + spectre_v2_user=off auto - kernel detects whether your CPU model is vulnerable @@ -4177,15 +4472,68 @@ CONFIG_RETPOLINE configuration option, and the compiler with which the kernel was built. + Selecting 'on' will also enable the mitigation + against user space to user space task attacks. + + Selecting 'off' will disable both the kernel and + the user space protections. + Specific mitigations can also be selected manually: retpoline - replace indirect branches - retpoline,generic - google's original retpoline - retpoline,amd - AMD-specific minimal thunk + retpoline,generic - Retpolines + retpoline,lfence - LFENCE; indirect branch + retpoline,amd - alias for retpoline,lfence + eibrs - enhanced IBRS + eibrs,retpoline - enhanced IBRS + Retpolines + eibrs,lfence - enhanced IBRS + LFENCE + ibrs - use IBRS to protect kernel Not specifying this option is equivalent to spectre_v2=auto. + spectre_v2_user= + [X86] Control mitigation of Spectre variant 2 + (indirect branch speculation) vulnerability between + user space tasks + + on - Unconditionally enable mitigations. Is + enforced by spectre_v2=on + + off - Unconditionally disable mitigations. Is + enforced by spectre_v2=off + + prctl - Indirect branch speculation is enabled, + but mitigation can be enabled via prctl + per thread. The mitigation control state + is inherited on fork. + + prctl,ibpb + - Like "prctl" above, but only STIBP is + controlled per thread. IBPB is issued + always when switching between different user + space processes. + + seccomp + - Same as "prctl" above, but all seccomp + threads will enable the mitigation unless + they explicitly opt out. + + seccomp,ibpb + - Like "seccomp" above, but only STIBP is + controlled per thread. IBPB is issued + always when switching between different + user space processes. + + auto - Kernel selects the mitigation depending on + the available CPU features and vulnerability. + + Default mitigation: + If CONFIG_SECCOMP=y then "seccomp", otherwise "prctl" + + Not specifying this option is equivalent to + spectre_v2_user=auto. + spec_store_bypass_disable= [HW] Control Speculative Store Bypass (SSB) Disable mitigation (Speculative Store Bypass vulnerability) @@ -4243,6 +4591,26 @@ spia_pedr= spia_peddr= + srbds= [X86,INTEL] + Control the Special Register Buffer Data Sampling + (SRBDS) mitigation. + + Certain CPUs are vulnerable to an MDS-like + exploit which can leak bits from the random + number generator. + + By default, this issue is mitigated by + microcode. However, the microcode fix can cause + the RDRAND and RDSEED instructions to become + much slower. Among other effects, this will + result in reduced throughput from /dev/urandom. + + The microcode mitigation can be disabled with + the following option: + + off: Disable mitigation and remove + performance impact to RDRAND and RDSEED + srcutree.counter_wrap_check [KNL] Specifies how frequently to check for grace-period sequence counter wrap for the @@ -4561,6 +4929,76 @@ marks the TSC unconditionally unstable at bootup and avoids any further wobbles once the TSC watchdog notices. + tsx= [X86] Control Transactional Synchronization + Extensions (TSX) feature in Intel processors that + support TSX control. + + This parameter controls the TSX feature. The options are: + + on - Enable TSX on the system. Although there are + mitigations for all known security vulnerabilities, + TSX has been known to be an accelerator for + several previous speculation-related CVEs, and + so there may be unknown security risks associated + with leaving it enabled. + + off - Disable TSX on the system. (Note that this + option takes effect only on newer CPUs which are + not vulnerable to MDS, i.e., have + MSR_IA32_ARCH_CAPABILITIES.MDS_NO=1 and which get + the new IA32_TSX_CTRL MSR through a microcode + update. This new MSR allows for the reliable + deactivation of the TSX functionality.) + + auto - Disable TSX if X86_BUG_TAA is present, + otherwise enable TSX on the system. + + Not specifying this option is equivalent to tsx=off. + + See Documentation/admin-guide/hw-vuln/tsx_async_abort.rst + for more details. + + tsx_async_abort= [X86,INTEL] Control mitigation for the TSX Async + Abort (TAA) vulnerability. + + Similar to Micro-architectural Data Sampling (MDS) + certain CPUs that support Transactional + Synchronization Extensions (TSX) are vulnerable to an + exploit against CPU internal buffers which can forward + information to a disclosure gadget under certain + conditions. + + In vulnerable processors, the speculatively forwarded + data can be used in a cache side channel attack, to + access data to which the attacker does not have direct + access. + + This parameter controls the TAA mitigation. The + options are: + + full - Enable TAA mitigation on vulnerable CPUs + if TSX is enabled. + + full,nosmt - Enable TAA mitigation and disable SMT on + vulnerable CPUs. If TSX is disabled, SMT + is not disabled because CPU is not + vulnerable to cross-thread TAA attacks. + off - Unconditionally disable TAA mitigation + + On MDS-affected machines, tsx_async_abort=off can be + prevented by an active MDS mitigation as both vulnerabilities + are mitigated with the same mechanism so in order to disable + this mitigation, you need to specify mds=off too. + + Not specifying this option is equivalent to + tsx_async_abort=full. On CPUs which are MDS affected + and deploy MDS mitigation, TAA mitigation is not + required and doesn't provide any additional + mitigation. + + For details see: + Documentation/admin-guide/hw-vuln/tsx_async_abort.rst + turbografx.map[2|3]= [HW,JOY] TurboGraFX parallel port interface Format: @@ -4683,6 +5121,8 @@ prevent spurious wakeup); n = USB_QUIRK_DELAY_CTRL_MSG (Device needs a pause after every control message); + o = USB_QUIRK_HUB_SLOW_RESET (Hub needs extra + delay after resetting its port); Example: quirks=0781:5580:bk,0a5c:5834:gij usbhid.mousepoll= @@ -4707,13 +5147,13 @@ Flags is a set of characters, each corresponding to a common usb-storage quirk flag as follows: a = SANE_SENSE (collect more than 18 bytes - of sense data); + of sense data, not on uas); b = BAD_SENSE (don't collect more than 18 - bytes of sense data); + bytes of sense data, not on uas); c = FIX_CAPACITY (decrease the reported device capacity by one sector); d = NO_READ_DISC_INFO (don't use - READ_DISC_INFO command); + READ_DISC_INFO command, not on uas); e = NO_READ_CAPACITY_16 (don't use READ_CAPACITY_16 command); f = NO_REPORT_OPCODES (don't use report opcodes @@ -4728,17 +5168,18 @@ j = NO_REPORT_LUNS (don't use report luns command, uas only); l = NOT_LOCKABLE (don't try to lock and - unlock ejectable media); + unlock ejectable media, not on uas); m = MAX_SECTORS_64 (don't transfer more - than 64 sectors = 32 KB at a time); + than 64 sectors = 32 KB at a time, + not on uas); n = INITIAL_READ10 (force a retry of the - initial READ(10) command); + initial READ(10) command, not on uas); o = CAPACITY_OK (accept the capacity - reported by the device); + reported by the device, not on uas); p = WRITE_CACHE (the device cache is ON - by default); + by default, not on uas); r = IGNORE_RESIDUE (the device reports - bogus residue values); + bogus residue values, not on uas); s = SINGLE_LUN (the device has only one Logical Unit); t = NO_ATA_1X (don't allow ATA(12) and ATA(16) @@ -4747,7 +5188,8 @@ w = NO_WP_DETECT (don't test whether the medium is write-protected). y = ALWAYS_SYNC (issue a SYNCHRONIZE_CACHE - even if the device claims no cache) + even if the device claims no cache, + not on uas) Example: quirks=0419:aaf5:rl,0421:0433:rc user_debug= [KNL,ARM] @@ -4855,12 +5297,6 @@ emulate [default] Vsyscalls turn into traps and are emulated reasonably safely. - native Vsyscalls are native syscall instructions. - This is a little bit faster than trapping - and makes a few dynamic recompilers work - better than they would in emulation mode. - It also makes exploits much easier to write. - none Vsyscalls don't work at all. This makes them quite hard to use for exploits but might break your system. @@ -4992,10 +5428,22 @@ the unplug protocol never -- do not unplug even if version check succeeds + xen_legacy_crash [X86,XEN] + Crash from Xen panic notifier, without executing late + panic() code such as dumping handler. + xen_nopvspin [X86,XEN] Disables the ticketlock slowpath using Xen PV optimizations. + xen.event_eoi_delay= [XEN] + How long to delay EOI handling in case of event + storms (jiffies). Default is 10. + + xen.event_loop_timeout= [XEN] + After which time (jiffies) the event handling loop + should start to delay EOI handling. Default is 2. + xen_nopv [X86] Disables the PV optimizations forcing the HVM guest to run as generic HVM guest with no PV drivers. @@ -5006,6 +5454,11 @@ with /sys/devices/system/xen_memory/xen_memory0/scrub_pages. Default value controlled with CONFIG_XEN_SCRUB_PAGES_DEFAULT. + nopvspin [X86,KVM] + Disables the qspinlock slow path using PV optimizations + which allow the hypervisor to 'idle' the guest on lock + contention. + xirc2ps_cs= [NET,PCMCIA] Format: ,,,,,[,[,[,]]] diff --git a/Documentation/admin-guide/mm/index.rst b/Documentation/admin-guide/mm/index.rst index ceead68c2df744818021c8dc40b4efc7d4065bb9..0cabbba2f885dee50f5af902215a05b8d9de8fea 100644 --- a/Documentation/admin-guide/mm/index.rst +++ b/Documentation/admin-guide/mm/index.rst @@ -30,6 +30,7 @@ the Linux memory management. idle_page_tracking ksm numa_memory_policy + numaperf pagemap soft-dirty transhuge diff --git a/Documentation/admin-guide/mm/numaperf.rst b/Documentation/admin-guide/mm/numaperf.rst new file mode 100644 index 0000000000000000000000000000000000000000..c067ed145158d9b4a408fbe62d205e81dacf66f3 --- /dev/null +++ b/Documentation/admin-guide/mm/numaperf.rst @@ -0,0 +1,169 @@ +.. _numaperf: + +============= +NUMA Locality +============= + +Some platforms may have multiple types of memory attached to a compute +node. These disparate memory ranges may share some characteristics, such +as CPU cache coherence, but may have different performance. For example, +different media types and buses affect bandwidth and latency. + +A system supports such heterogeneous memory by grouping each memory type +under different domains, or "nodes", based on locality and performance +characteristics. Some memory may share the same node as a CPU, and others +are provided as memory only nodes. While memory only nodes do not provide +CPUs, they may still be local to one or more compute nodes relative to +other nodes. The following diagram shows one such example of two compute +nodes with local memory and a memory only node for each of compute node:: + + +------------------+ +------------------+ + | Compute Node 0 +-----+ Compute Node 1 | + | Local Node0 Mem | | Local Node1 Mem | + +--------+---------+ +--------+---------+ + | | + +--------+---------+ +--------+---------+ + | Slower Node2 Mem | | Slower Node3 Mem | + +------------------+ +--------+---------+ + +A "memory initiator" is a node containing one or more devices such as +CPUs or separate memory I/O devices that can initiate memory requests. +A "memory target" is a node containing one or more physical address +ranges accessible from one or more memory initiators. + +When multiple memory initiators exist, they may not all have the same +performance when accessing a given memory target. Each initiator-target +pair may be organized into different ranked access classes to represent +this relationship. The highest performing initiator to a given target +is considered to be one of that target's local initiators, and given +the highest access class, 0. Any given target may have one or more +local initiators, and any given initiator may have multiple local +memory targets. + +To aid applications matching memory targets with their initiators, the +kernel provides symlinks to each other. The following example lists the +relationship for the access class "0" memory initiators and targets:: + + # symlinks -v /sys/devices/system/node/nodeX/access0/targets/ + relative: /sys/devices/system/node/nodeX/access0/targets/nodeY -> ../../nodeY + + # symlinks -v /sys/devices/system/node/nodeY/access0/initiators/ + relative: /sys/devices/system/node/nodeY/access0/initiators/nodeX -> ../../nodeX + +A memory initiator may have multiple memory targets in the same access +class. The target memory's initiators in a given class indicate the +nodes' access characteristics share the same performance relative to other +linked initiator nodes. Each target within an initiator's access class, +though, do not necessarily perform the same as each other. + +================ +NUMA Performance +================ + +Applications may wish to consider which node they want their memory to +be allocated from based on the node's performance characteristics. If +the system provides these attributes, the kernel exports them under the +node sysfs hierarchy by appending the attributes directory under the +memory node's access class 0 initiators as follows:: + + /sys/devices/system/node/nodeY/access0/initiators/ + +These attributes apply only when accessed from nodes that have the +are linked under the this access's inititiators. + +The performance characteristics the kernel provides for the local initiators +are exported are as follows:: + + # tree -P "read*|write*" /sys/devices/system/node/nodeY/access0/initiators/ + /sys/devices/system/node/nodeY/access0/initiators/ + |-- read_bandwidth + |-- read_latency + |-- write_bandwidth + `-- write_latency + +The bandwidth attributes are provided in MiB/second. + +The latency attributes are provided in nanoseconds. + +The values reported here correspond to the rated latency and bandwidth +for the platform. + +========== +NUMA Cache +========== + +System memory may be constructed in a hierarchy of elements with various +performance characteristics in order to provide large address space of +slower performing memory cached by a smaller higher performing memory. The +system physical addresses memory initiators are aware of are provided +by the last memory level in the hierarchy. The system meanwhile uses +higher performing memory to transparently cache access to progressively +slower levels. + +The term "far memory" is used to denote the last level memory in the +hierarchy. Each increasing cache level provides higher performing +initiator access, and the term "near memory" represents the fastest +cache provided by the system. + +This numbering is different than CPU caches where the cache level (ex: +L1, L2, L3) uses the CPU-side view where each increased level is lower +performing. In contrast, the memory cache level is centric to the last +level memory, so the higher numbered cache level corresponds to memory +nearer to the CPU, and further from far memory. + +The memory-side caches are not directly addressable by software. When +software accesses a system address, the system will return it from the +near memory cache if it is present. If it is not present, the system +accesses the next level of memory until there is either a hit in that +cache level, or it reaches far memory. + +An application does not need to know about caching attributes in order +to use the system. Software may optionally query the memory cache +attributes in order to maximize the performance out of such a setup. +If the system provides a way for the kernel to discover this information, +for example with ACPI HMAT (Heterogeneous Memory Attribute Table), +the kernel will append these attributes to the NUMA node memory target. + +When the kernel first registers a memory cache with a node, the kernel +will create the following directory:: + + /sys/devices/system/node/nodeX/memory_side_cache/ + +If that directory is not present, the system either does not not provide +a memory-side cache, or that information is not accessible to the kernel. + +The attributes for each level of cache is provided under its cache +level index:: + + /sys/devices/system/node/nodeX/memory_side_cache/indexA/ + /sys/devices/system/node/nodeX/memory_side_cache/indexB/ + /sys/devices/system/node/nodeX/memory_side_cache/indexC/ + +Each cache level's directory provides its attributes. For example, the +following shows a single cache level and the attributes available for +software to query:: + + # tree sys/devices/system/node/node0/memory_side_cache/ + /sys/devices/system/node/node0/memory_side_cache/ + |-- index1 + | |-- indexing + | |-- line_size + | |-- size + | `-- write_policy + +The "indexing" will be 0 if it is a direct-mapped cache, and non-zero +for any other indexed based, multi-way associativity. + +The "line_size" is the number of bytes accessed from the next cache +level on a miss. + +The "size" is the number of bytes provided by this cache level. + +The "write_policy" will be 0 for write-back, and non-zero for +write-through caching. + +======== +See Also +======== +.. [1] https://www.uefi.org/sites/default/files/resources/ACPI_6_2.pdf + Section 5.2.27 diff --git a/Documentation/admin-guide/pm/cpufreq.rst b/Documentation/admin-guide/pm/cpufreq.rst index 47153e64dfb530465ca01d28272e058293eb08b5..219cf42535d2d8937995317164063cf46cc33721 100644 --- a/Documentation/admin-guide/pm/cpufreq.rst +++ b/Documentation/admin-guide/pm/cpufreq.rst @@ -587,6 +587,13 @@ This governor exposes the following tunables: It effectively causes the frequency to go down ``sampling_down_factor`` times slower than it ramps up. +``fast_mode`` + Frequency scaling strategy switch for fast_mode. Enable `conservative` + governor changing frequency a little bit quickly. + + If the user seeks to reduce performance loss, set it to 1(default 0) and + adopt fast mode; Otherwise, pursue greater power consumption benefits, set + it to 0 and adopt slow mode. Frequency Boost Support ======================= diff --git a/Documentation/admin-guide/pm/intel_idle.rst b/Documentation/admin-guide/pm/intel_idle.rst new file mode 100644 index 0000000000000000000000000000000000000000..afbf778035f820656fe8cfa3354c0fab71a1342f --- /dev/null +++ b/Documentation/admin-guide/pm/intel_idle.rst @@ -0,0 +1,246 @@ +.. SPDX-License-Identifier: GPL-2.0 +.. include:: + +============================================== +``intel_idle`` CPU Idle Time Management Driver +============================================== + +:Copyright: |copy| 2020 Intel Corporation + +:Author: Rafael J. Wysocki + + +General Information +=================== + +``intel_idle`` is a part of the +:doc:`CPU idle time management subsystem ` in the Linux kernel +(``CPUIdle``). It is the default CPU idle time management driver for the +Nehalem and later generations of Intel processors, but the level of support for +a particular processor model in it depends on whether or not it recognizes that +processor model and may also depend on information coming from the platform +firmware. [To understand ``intel_idle`` it is necessary to know how ``CPUIdle`` +works in general, so this is the time to get familiar with :doc:`cpuidle` if you +have not done that yet.] + +``intel_idle`` uses the ``MWAIT`` instruction to inform the processor that the +logical CPU executing it is idle and so it may be possible to put some of the +processor's functional blocks into low-power states. That instruction takes two +arguments (passed in the ``EAX`` and ``ECX`` registers of the target CPU), the +first of which, referred to as a *hint*, can be used by the processor to +determine what can be done (for details refer to Intel Software Developer’s +Manual [1]_). Accordingly, ``intel_idle`` refuses to work with processors in +which the support for the ``MWAIT`` instruction has been disabled (for example, +via the platform firmware configuration menu) or which do not support that +instruction at all. + +``intel_idle`` is not modular, so it cannot be unloaded, which means that the +only way to pass early-configuration-time parameters to it is via the kernel +command line. + + +.. _intel-idle-enumeration-of-states: + +Enumeration of Idle States +========================== + +Each ``MWAIT`` hint value is interpreted by the processor as a license to +reconfigure itself in a certain way in order to save energy. The processor +configurations (with reduced power draw) resulting from that are referred to +as C-states (in the ACPI terminology) or idle states. The list of meaningful +``MWAIT`` hint values and idle states (i.e. low-power configurations of the +processor) corresponding to them depends on the processor model and it may also +depend on the configuration of the platform. + +In order to create a list of available idle states required by the ``CPUIdle`` +subsystem (see :ref:`idle-states-representation` in :doc:`cpuidle`), +``intel_idle`` can use two sources of information: static tables of idle states +for different processor models included in the driver itself and the ACPI tables +of the system. The former are always used if the processor model at hand is +recognized by ``intel_idle`` and the latter are used if that is required for +the given processor model (which is the case for all server processor models +recognized by ``intel_idle``) or if the processor model is not recognized. + +If the ACPI tables are going to be used for building the list of available idle +states, ``intel_idle`` first looks for a ``_CST`` object under one of the ACPI +objects corresponding to the CPUs in the system (refer to the ACPI specification +[2]_ for the description of ``_CST`` and its output package). Because the +``CPUIdle`` subsystem expects that the list of idle states supplied by the +driver will be suitable for all of the CPUs handled by it and ``intel_idle`` is +registered as the ``CPUIdle`` driver for all of the CPUs in the system, the +driver looks for the first ``_CST`` object returning at least one valid idle +state description and such that all of the idle states included in its return +package are of the FFH (Functional Fixed Hardware) type, which means that the +``MWAIT`` instruction is expected to be used to tell the processor that it can +enter one of them. The return package of that ``_CST`` is then assumed to be +applicable to all of the other CPUs in the system and the idle state +descriptions extracted from it are stored in a preliminary list of idle states +coming from the ACPI tables. [This step is skipped if ``intel_idle`` is +configured to ignore the ACPI tables; see `below `_.] + +Next, the first (index 0) entry in the list of available idle states is +initialized to represent a "polling idle state" (a pseudo-idle state in which +the target CPU continuously fetches and executes instructions), and the +subsequent (real) idle state entries are populated as follows. + +If the processor model at hand is recognized by ``intel_idle``, there is a +(static) table of idle state descriptions for it in the driver. In that case, +the "internal" table is the primary source of information on idle states and the +information from it is copied to the final list of available idle states. If +using the ACPI tables for the enumeration of idle states is not required +(depending on the processor model), all of the listed idle state are enabled by +default (so all of them will be taken into consideration by ``CPUIdle`` +governors during CPU idle state selection). Otherwise, some of the listed idle +states may not be enabled by default if there are no matching entries in the +preliminary list of idle states coming from the ACPI tables. In that case user +space still can enable them later (on a per-CPU basis) with the help of +the ``disable`` idle state attribute in ``sysfs`` (see +:ref:`idle-states-representation` in :doc:`cpuidle`). This basically means that +the idle states "known" to the driver may not be enabled by default if they have +not been exposed by the platform firmware (through the ACPI tables). + +If the given processor model is not recognized by ``intel_idle``, but it +supports ``MWAIT``, the preliminary list of idle states coming from the ACPI +tables is used for building the final list that will be supplied to the +``CPUIdle`` core during driver registration. For each idle state in that list, +the description, ``MWAIT`` hint and exit latency are copied to the corresponding +entry in the final list of idle states. The name of the idle state represented +by it (to be returned by the ``name`` idle state attribute in ``sysfs``) is +"CX_ACPI", where X is the index of that idle state in the final list (note that +the minimum value of X is 1, because 0 is reserved for the "polling" state), and +its target residency is based on the exit latency value. Specifically, for +C1-type idle states the exit latency value is also used as the target residency +(for compatibility with the majority of the "internal" tables of idle states for +various processor models recognized by ``intel_idle``) and for the other idle +state types (C2 and C3) the target residency value is 3 times the exit latency +(again, that is because it reflects the target residency to exit latency ratio +in the majority of cases for the processor models recognized by ``intel_idle``). +All of the idle states in the final list are enabled by default in this case. + + +.. _intel-idle-initialization: + +Initialization +============== + +The initialization of ``intel_idle`` starts with checking if the kernel command +line options forbid the use of the ``MWAIT`` instruction. If that is the case, +an error code is returned right away. + +The next step is to check whether or not the processor model is known to the +driver, which determines the idle states enumeration method (see +`above `_), and whether or not the processor +supports ``MWAIT`` (the initialization fails if that is not the case). Then, +the ``MWAIT`` support in the processor is enumerated through ``CPUID`` and the +driver initialization fails if the level of support is not as expected (for +example, if the total number of ``MWAIT`` substates returned is 0). + +Next, if the driver is not configured to ignore the ACPI tables (see +`below `_), the idle states information provided by the +platform firmware is extracted from them. + +Then, ``CPUIdle`` device objects are allocated for all CPUs and the list of +available idle states is created as explained +`above `_. + +Finally, ``intel_idle`` is registered with the help of cpuidle_register_driver() +as the ``CPUIdle`` driver for all CPUs in the system and a CPU online callback +for configuring individual CPUs is registered via cpuhp_setup_state(), which +(among other things) causes the callback routine to be invoked for all of the +CPUs present in the system at that time (each CPU executes its own instance of +the callback routine). That routine registers a ``CPUIdle`` device for the CPU +running it (which enables the ``CPUIdle`` subsystem to operate that CPU) and +optionally performs some CPU-specific initialization actions that may be +required for the given processor model. + + +.. _intel-idle-parameters: + +Kernel Command Line Options and Module Parameters +================================================= + +The *x86* architecture support code recognizes three kernel command line +options related to CPU idle time management: ``idle=poll``, ``idle=halt``, +and ``idle=nomwait``. If any of them is present in the kernel command line, the +``MWAIT`` instruction is not allowed to be used, so the initialization of +``intel_idle`` will fail. + +Apart from that there are two module parameters recognized by ``intel_idle`` +itself that can be set via the kernel command line (they cannot be updated via +sysfs, so that is the only way to change their values). + +The ``max_cstate`` parameter value is the maximum idle state index in the list +of idle states supplied to the ``CPUIdle`` core during the registration of the +driver. It is also the maximum number of regular (non-polling) idle states that +can be used by ``intel_idle``, so the enumeration of idle states is terminated +after finding that number of usable idle states (the other idle states that +potentially might have been used if ``max_cstate`` had been greater are not +taken into consideration at all). Setting ``max_cstate`` can prevent +``intel_idle`` from exposing idle states that are regarded as "too deep" for +some reason to the ``CPUIdle`` core, but it does so by making them effectively +invisible until the system is shut down and started again which may not always +be desirable. In practice, it is only really necessary to do that if the idle +states in question cannot be enabled during system startup, because in the +working state of the system the CPU power management quality of service (PM +QoS) feature can be used to prevent ``CPUIdle`` from touching those idle states +even if they have been enumerated (see :ref:`cpu-pm-qos` in :doc:`cpuidle`). +Setting ``max_cstate`` to 0 causes the ``intel_idle`` initialization to fail. + +The ``noacpi`` module parameter (which is recognized by ``intel_idle`` if the +kernel has been configured with ACPI support), can be set to make the driver +ignore the system's ACPI tables entirely (it is unset by default). + + +.. _intel-idle-core-and-package-idle-states: + +Core and Package Levels of Idle States +====================================== + +Typically, in a processor supporting the ``MWAIT`` instruction there are (at +least) two levels of idle states (or C-states). One level, referred to as +"core C-states", covers individual cores in the processor, whereas the other +level, referred to as "package C-states", covers the entire processor package +and it may also involve other components of the system (GPUs, memory +controllers, I/O hubs etc.). + +Some of the ``MWAIT`` hint values allow the processor to use core C-states only +(most importantly, that is the case for the ``MWAIT`` hint value corresponding +to the ``C1`` idle state), but the majority of them give it a license to put +the target core (i.e. the core containing the logical CPU executing ``MWAIT`` +with the given hint value) into a specific core C-state and then (if possible) +to enter a specific package C-state at the deeper level. For example, the +``MWAIT`` hint value representing the ``C3`` idle state allows the processor to +put the target core into the low-power state referred to as "core ``C3``" (or +``CC3``), which happens if all of the logical CPUs (SMT siblings) in that core +have executed ``MWAIT`` with the ``C3`` hint value (or with a hint value +representing a deeper idle state), and in addition to that (in the majority of +cases) it gives the processor a license to put the entire package (possibly +including some non-CPU components such as a GPU or a memory controller) into the +low-power state referred to as "package ``C3``" (or ``PC3``), which happens if +all of the cores have gone into the ``CC3`` state and (possibly) some additional +conditions are satisfied (for instance, if the GPU is covered by ``PC3``, it may +be required to be in a certain GPU-specific low-power state for ``PC3`` to be +reachable). + +As a rule, there is no simple way to make the processor use core C-states only +if the conditions for entering the corresponding package C-states are met, so +the logical CPU executing ``MWAIT`` with a hint value that is not core-level +only (like for ``C1``) must always assume that this may cause the processor to +enter a package C-state. [That is why the exit latency and target residency +values corresponding to the majority of ``MWAIT`` hint values in the "internal" +tables of idle states in ``intel_idle`` reflect the properties of package +C-states.] If using package C-states is not desirable at all, either +:ref:`PM QoS ` or the ``max_cstate`` module parameter of +``intel_idle`` described `above `_ must be used to +restrict the range of permissible idle states to the ones with core-level only +``MWAIT`` hint values (like ``C1``). + + +References +========== + +.. [1] *Intel® 64 and IA-32 Architectures Software Developer’s Manual Volume 2B*, + https://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-software-developer-vol-2b-manual.html + +.. [2] *Advanced Configuration and Power Interface (ACPI) Specification*, + https://uefi.org/specifications diff --git a/Documentation/admin-guide/pm/working-state.rst b/Documentation/admin-guide/pm/working-state.rst index fa01bf083dfe67877b43bfa3a94e2f7b78d48cec..ec695d3c7782079980b490198d2b7e2ed22e69cd 100644 --- a/Documentation/admin-guide/pm/working-state.rst +++ b/Documentation/admin-guide/pm/working-state.rst @@ -5,5 +5,7 @@ Working-State Power Management .. toctree:: :maxdepth: 2 + cpuidle + intel_idle cpufreq intel_pstate diff --git a/Documentation/admin-guide/security-bugs.rst b/Documentation/admin-guide/security-bugs.rst index 30491d91e93d7f9801ec2740e09383ed7a3ee623..30187d49dc2c7d38869c4073e60093d315c3fc4e 100644 --- a/Documentation/admin-guide/security-bugs.rst +++ b/Documentation/admin-guide/security-bugs.rst @@ -26,23 +26,35 @@ information is helpful. Any exploit code is very helpful and will not be released without consent from the reporter unless it has already been made public. -Disclosure ----------- - -The goal of the Linux kernel security team is to work with the bug -submitter to understand and fix the bug. We prefer to publish the fix as -soon as possible, but try to avoid public discussion of the bug itself -and leave that to others. - -Publishing the fix may be delayed when the bug or the fix is not yet -fully understood, the solution is not well-tested or for vendor -coordination. However, we expect these delays to be short, measurable in -days, not weeks or months. A release date is negotiated by the security -team working with the bug submitter as well as vendors. However, the -kernel security team holds the final say when setting a timeframe. The -timeframe varies from immediate (esp. if it's already publicly known bug) -to a few weeks. As a basic default policy, we expect report date to -release date to be on the order of 7 days. +Disclosure and embargoed information +------------------------------------ + +The security list is not a disclosure channel. For that, see Coordination +below. + +Once a robust fix has been developed, the release process starts. Fixes +for publicly known bugs are released immediately. + +Although our preference is to release fixes for publicly undisclosed bugs +as soon as they become available, this may be postponed at the request of +the reporter or an affected party for up to 7 calendar days from the start +of the release process, with an exceptional extension to 14 calendar days +if it is agreed that the criticality of the bug requires more time. The +only valid reason for deferring the publication of a fix is to accommodate +the logistics of QA and large scale rollouts which require release +coordination. + +Whilst embargoed information may be shared with trusted individuals in +order to develop a fix, such information will not be published alongside +the fix or on any other disclosure channel without the permission of the +reporter. This includes but is not limited to the original bug report +and followup discussions (if any), exploits, CVE information or the +identity of the reporter. + +In other words our only interest is in getting bugs fixed. All other +information submitted to the security list and any followup discussions +of the report are treated confidentially even after the embargo has been +lifted, in perpetuity. Coordination ------------ @@ -68,7 +80,7 @@ may delay the bug handling. If a reporter wishes to have a CVE identifier assigned ahead of public disclosure, they will need to contact the private linux-distros list, described above. When such a CVE identifier is known before a patch is provided, it is desirable to mention it in the commit -message, though. +message if the reporter agrees. Non-disclosure agreements ------------------------- diff --git a/Documentation/arm/kernel_mode_neon.txt b/Documentation/arm/kernel_mode_neon.txt index 525452726d31e94c43d7c8c7c05c4431b006912a..b9e060c5b61e08c1491c710adc560530b8bae03e 100644 --- a/Documentation/arm/kernel_mode_neon.txt +++ b/Documentation/arm/kernel_mode_neon.txt @@ -6,7 +6,7 @@ TL;DR summary * Use only NEON instructions, or VFP instructions that don't rely on support code * Isolate your NEON code in a separate compilation unit, and compile it with - '-mfpu=neon -mfloat-abi=softfp' + '-march=armv7-a -mfpu=neon -mfloat-abi=softfp' * Put kernel_neon_begin() and kernel_neon_end() calls around the calls into your NEON code * Don't sleep in your NEON code, and be aware that it will be executed with @@ -87,7 +87,7 @@ instructions appearing in unexpected places if no special care is taken. Therefore, the recommended and only supported way of using NEON/VFP in the kernel is by adhering to the following rules: * isolate the NEON code in a separate compilation unit and compile it with - '-mfpu=neon -mfloat-abi=softfp'; + '-march=armv7-a -mfpu=neon -mfloat-abi=softfp'; * issue the calls to kernel_neon_begin(), kernel_neon_end() as well as the calls into the unit containing the NEON code from a compilation unit which is *not* built with the GCC flag '-mfpu=neon' set. diff --git a/Documentation/arm64/booting.txt b/Documentation/arm64/booting.txt index 8d0df62c3fe025fa963e0add0e25a33be43898e2..2890ece8625f174b48decb4996b78c554c361e53 100644 --- a/Documentation/arm64/booting.txt +++ b/Documentation/arm64/booting.txt @@ -188,10 +188,18 @@ Before jumping into the kernel, the following conditions must be met: the kernel image will be entered must be initialised by software at a higher exception level to prevent execution in an UNKNOWN state. + - SCR_EL3.FIQ must have the same value across all CPUs the kernel is + executing on. + - The value of SCR_EL3.FIQ must be the same as the one present at boot + time whenever the kernel is executing. + For systems with a GICv3 interrupt controller to be used in v3 mode: - If EL3 is present: ICC_SRE_EL3.Enable (bit 3) must be initialiased to 0b1. ICC_SRE_EL3.SRE (bit 0) must be initialised to 0b1. + ICC_CTLR_EL3.PMHE (bit 6) must be set to the same value across + all CPUs the kernel is executing on, and must stay constant + for the lifetime of the kernel. - If the kernel is entered at EL1: ICC.SRE_EL2.Enable (bit 3) must be initialised to 0b1 ICC_SRE_EL2.SRE (bit 0) must be initialised to 0b1. diff --git a/Documentation/arm64/elf_hwcaps.txt b/Documentation/arm64/elf_hwcaps.txt index d6aff2c5e9e2d5f923f25aad9ad71bd2fb8642f1..6feaffe90e22cb59ae54df2a3c936098b4e4bd64 100644 --- a/Documentation/arm64/elf_hwcaps.txt +++ b/Documentation/arm64/elf_hwcaps.txt @@ -178,3 +178,7 @@ HWCAP_ILRCPC HWCAP_FLAGM Functionality implied by ID_AA64ISAR0_EL1.TS == 0b0001. + +HWCAP_SSBS + + Functionality implied by ID_AA64PFR1_EL1.SSBS == 0b0010. diff --git a/Documentation/arm64/ilp32.txt b/Documentation/arm64/ilp32.txt new file mode 100644 index 0000000000000000000000000000000000000000..5f01a61c92af599ebc73fcd5cd225dc417bef16b --- /dev/null +++ b/Documentation/arm64/ilp32.txt @@ -0,0 +1,52 @@ +ILP32 AARCH64 SYSCALL ABI +========================= + +This document describes the ILP32 syscall ABI and where it differs +from the generic compat linux syscall interface. + +ILP32 is acronym for memory model which stands for "Integers, Longs and +Pointers are 32-bit". The main purpose of ILP32 in Linux kernel is providing +compatibility with 32-bit legacy code. Also, ILP32 binaries look better in some +performance tests. ARM has AN490 document which coves ILP32 details for ARM64 +platform: +http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dai0490a/ar01s01.html + +AARCH64/ILP32 userspace may pass garbage in the top halve of w0-w7 registers +(syscall arguments). So top 32 bits are zeroed for them. + +Comparing to AARCH32, AARCH64/ILP32 has 64-bit length of following types: +ino_t is u64 type. +off_t is s64 type. +blkcnt_t is s64 type. +fsblkcnt_t is u64 type. +fsfilcnt_t is u64 type. +rlim_t is u64 type. + +AARCH64/ILP32 ABI uses standard syscall table which can be found at +include/uapi/asm-generic/unistd.h, with the exceptions listed below. + +Syscalls which pass 64-bit values are handled by the code shared from +AARCH32 and pass that value as a pair. Following syscalls are affected: +fadvise64_64() +fallocate() +ftruncate64() +pread64() +pwrite64() +readahead() +sync_file_range() +truncate64() + +ptrace() syscall is handled by compat version. + +shmat() syscall is handled by non-compat handler as aarch64/ilp32 has no +limitation on 4-pages alignment for shared memory. + +statfs() and fstatfs() take the size of struct statfs as an argument. +It is calculated differently in kernel and user spaces. So AARCH32 handlers +are taken to handle it. + +struct rt_sigframe is redefined and contains struct compat_siginfo, +as compat syscalls expect, and struct ilp32_ucontext, to handle +AARCH64 register set and 32-bit userspace register representation. + +elf_gregset_t is taken from lp64 to handle registers properly. diff --git a/Documentation/arm64/perf.txt b/Documentation/arm64/perf.txt new file mode 100644 index 0000000000000000000000000000000000000000..0d6a7d87d49e0d93c376fd80fedaa1780b87d4c7 --- /dev/null +++ b/Documentation/arm64/perf.txt @@ -0,0 +1,85 @@ +Perf Event Attributes +===================== + +Author: Andrew Murray +Date: 2019-03-06 + +exclude_user +------------ + +This attribute excludes userspace. + +Userspace always runs at EL0 and thus this attribute will exclude EL0. + + +exclude_kernel +-------------- + +This attribute excludes the kernel. + +The kernel runs at EL2 with VHE and EL1 without. Guest kernels always run +at EL1. + +For the host this attribute will exclude EL1 and additionally EL2 on a VHE +system. + +For the guest this attribute will exclude EL1. Please note that EL2 is +never counted within a guest. + + +exclude_hv +---------- + +This attribute excludes the hypervisor. + +For a VHE host this attribute is ignored as we consider the host kernel to +be the hypervisor. + +For a non-VHE host this attribute will exclude EL2 as we consider the +hypervisor to be any code that runs at EL2 which is predominantly used for +guest/host transitions. + +For the guest this attribute has no effect. Please note that EL2 is +never counted within a guest. + + +exclude_host / exclude_guest +---------------------------- + +These attributes exclude the KVM host and guest, respectively. + +The KVM host may run at EL0 (userspace), EL1 (non-VHE kernel) and EL2 (VHE +kernel or non-VHE hypervisor). + +The KVM guest may run at EL0 (userspace) and EL1 (kernel). + +Due to the overlapping exception levels between host and guests we cannot +exclusively rely on the PMU's hardware exception filtering - therefore we +must enable/disable counting on the entry and exit to the guest. This is +performed differently on VHE and non-VHE systems. + +For non-VHE systems we exclude EL2 for exclude_host - upon entering and +exiting the guest we disable/enable the event as appropriate based on the +exclude_host and exclude_guest attributes. + +For VHE systems we exclude EL1 for exclude_guest and exclude both EL0,EL2 +for exclude_host. Upon entering and exiting the guest we modify the event +to include/exclude EL0 as appropriate based on the exclude_host and +exclude_guest attributes. + +The statements above also apply when these attributes are used within a +non-VHE guest however please note that EL2 is never counted within a guest. + + +Accuracy +-------- + +On non-VHE hosts we enable/disable counters on the entry/exit of host/guest +transition at EL2 - however there is a period of time between +enabling/disabling the counters and entering/exiting the guest. We are +able to eliminate counters counting host events on the boundaries of guest +entry/exit when counting guest events by filtering out EL2 for +exclude_host. However when using !exclude_hv there is a small blackout +window at the guest entry/exit where host events are not captured. + +On VHE systems there are no blackout windows. diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt index 3b2f2dd82225aee697cc1b6baa1fcbd0092094a2..5016158f5e679917084a257d1b3ff8b49e12bc5d 100644 --- a/Documentation/arm64/silicon-errata.txt +++ b/Documentation/arm64/silicon-errata.txt @@ -44,6 +44,8 @@ stable kernels. | Implementor | Component | Erratum ID | Kconfig | +----------------+-----------------+-----------------+-----------------------------+ +| Allwinner | A64/R18 | UNKNOWN1 | SUN50I_ERRATUM_UNKNOWN1 | +| | | | | | ARM | Cortex-A53 | #826319 | ARM64_ERRATUM_826319 | | ARM | Cortex-A53 | #827319 | ARM64_ERRATUM_827319 | | ARM | Cortex-A53 | #824069 | ARM64_ERRATUM_824069 | @@ -53,9 +55,12 @@ stable kernels. | ARM | Cortex-A57 | #832075 | ARM64_ERRATUM_832075 | | ARM | Cortex-A57 | #852523 | N/A | | ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 | +| ARM | Cortex-A57 | #1742098 | ARM64_ERRATUM_1742098 | | ARM | Cortex-A72 | #853709 | N/A | +| ARM | Cortex-A72 | #1655431 | ARM64_ERRATUM_1742098 | | ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 | | ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 | +| ARM | Cortex-A76 | #1463225 | ARM64_ERRATUM_1463225 | | ARM | MMU-500 | #841119,#826419 | N/A | | | | | | | Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 | @@ -72,6 +77,8 @@ stable kernels. | Hisilicon | Hip0{5,6,7} | #161010101 | HISILICON_ERRATUM_161010101 | | Hisilicon | Hip0{6,7} | #161010701 | N/A | | Hisilicon | Hip07 | #161600802 | HISILICON_ERRATUM_161600802 | +| Hisilicon | TSV{110,200} | #1980005 | HISILICON_ERRATUM_1980005 | +| Hisilicon | Hip09 | #162100801 | HISILICON_ERRATUM_161600801 | | | | | | | Qualcomm Tech. | Kryo/Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 | | Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 | diff --git a/Documentation/arm64/sve.txt b/Documentation/arm64/sve.txt index 7169a0ec41d86911ad4a9c7fc34488841dc7c1e6..2001d84384ca8834c6b0d32246e95f903c8d47f6 100644 --- a/Documentation/arm64/sve.txt +++ b/Documentation/arm64/sve.txt @@ -39,6 +39,18 @@ model features for SVE is included in Appendix A. is to connect to a target process first and then attempt a ptrace(PTRACE_GETREGSET, pid, NT_ARM_SVE, &iov). +* Whenever SVE scalable register values (Zn, Pn, FFR) are exchanged in memory + between userspace and the kernel, the register value is encoded in memory in + an endianness-invariant layout, with bits [(8 * i + 7) : (8 * i)] encoded at + byte offset i from the start of the memory representation. This affects for + example the signal frame (struct sve_context) and ptrace interface + (struct user_sve_header) and associated data. + + Beware that on big-endian systems this results in a different byte order than + for the FPSIMD V-registers, which are stored as single host-endian 128-bit + values, with bits [(127 - 8 * i) : (120 - 8 * i)] of the register encoded at + byte offset i. (struct fpsimd_context, struct user_fpsimd_state). + 2. Vector length terminology ----------------------------- @@ -107,6 +119,10 @@ the SVE instruction set architecture. size and layout. Macros SVE_SIG_* are defined [1] to facilitate access to the members. +* Each scalable register (Zn, Pn, FFR) is stored in an endianness-invariant + layout, with bits [(8 * i + 7) : (8 * i)] stored at byte offset i from the + start of the register's representation in memory. + * If the SVE context is too big to fit in sigcontext.__reserved[], then extra space is allocated on the stack, an extra_context record is written in __reserved[] referencing this space. sve_context is then written in the diff --git a/Documentation/atomic_bitops.txt b/Documentation/atomic_bitops.txt index be70b32c95d918066ffa72dfa4a69e8b4e51a225..bc3fac8e1db3af511e64c428734d20f5380353fc 100644 --- a/Documentation/atomic_bitops.txt +++ b/Documentation/atomic_bitops.txt @@ -59,7 +59,7 @@ Like with atomic_t, the rule of thumb is: - RMW operations that have a return value are fully ordered. - RMW operations that are conditional are unordered on FAILURE, - otherwise the above rules apply. In the case of test_and_{}_bit() operations, + otherwise the above rules apply. In the case of test_and_set_bit_lock(), if the bit in memory is unchanged by the operation then it is deemed to have failed. diff --git a/Documentation/atomic_t.txt b/Documentation/atomic_t.txt index 913396ac582431cb3acbdc96a1bd7f4293661d0b..ed0d814df7e06a25daf0b381f9929d83e152a57c 100644 --- a/Documentation/atomic_t.txt +++ b/Documentation/atomic_t.txt @@ -177,6 +177,9 @@ These helper barriers exist because architectures have varying implicit ordering on their SMP atomic primitives. For example our TSO architectures provide full ordered atomics and these barriers are no-ops. +NOTE: when the atomic RmW ops are fully ordered, they should also imply a +compiler barrier. + Thus: atomic_fetch_add(); diff --git a/Documentation/bpf/bpf_design_QA.rst b/Documentation/bpf/bpf_design_QA.rst index 6780a6d8174580ea1caeac4a13fb4f8dae6bd91b..bc405f7772111af0ec16a5ad909f92fb8753fad2 100644 --- a/Documentation/bpf/bpf_design_QA.rst +++ b/Documentation/bpf/bpf_design_QA.rst @@ -164,6 +164,12 @@ kernels. The union bpf_attr -> kern_version is checked at load time to prevent accidentally loading kprobe-based bpf programs written for a different kernel. Networking programs don't do kern_version check. +Q: Are tracepoints part of the stable ABI? +------------------------------------------ +A: NO. Tracepoints are tied to internal implementation details hence they are +subject to change and can break with newer kernels. BPF programs need to change +accordingly when this happens. + Q: How much stack space a BPF program uses? ------------------------------------------- A: Currently all program types are limited to 512 bytes of stack @@ -202,17 +208,6 @@ program is loaded the kernel will print warning message, so this helper is only useful for experiments and prototypes. Tracing BPF programs are root only. -Q: bpf_trace_printk() helper warning ------------------------------------- -Q: When bpf_trace_printk() helper is used the kernel prints nasty -warning message. Why is that? - -A: This is done to nudge program authors into better interfaces when -programs need to pass data to user space. Like bpf_perf_event_output() -can be used to efficiently stream data via perf ring buffer. -BPF maps can be used for asynchronous data sharing between kernel -and user space. bpf_trace_printk() should only be used for debugging. - Q: New functionality via kernel modules? ---------------------------------------- Q: Can BPF functionality such as new program or map types, new diff --git a/Documentation/cgroup-v1/memory.txt b/Documentation/cgroup-v1/memory.txt index 3682e99234c2c6652ac4990504dfb14bd3873618..d31b7cd5a81c83dcf6a27edabb0d2221f5713eec 100644 --- a/Documentation/cgroup-v1/memory.txt +++ b/Documentation/cgroup-v1/memory.txt @@ -79,6 +79,9 @@ Brief summary of control files. memory.numa_stat # show the number of memory usage per numa node memory.kmem.limit_in_bytes # set/show hard limit for kernel memory + This knob is deprecated and shouldn't be + used. It is planned that this be removed in + the foreseeable future. memory.kmem.usage_in_bytes # show current kernel memory allocation memory.kmem.failcnt # show the number of kernel memory usage hits limits memory.kmem.max_usage_in_bytes # show max kernel memory usage recorded @@ -280,9 +283,9 @@ the amount of kernel memory used by the system. Kernel memory is fundamentally different than user memory, since it can't be swapped out, which makes it possible to DoS the system by consuming too much of this precious resource. -Kernel memory accounting is enabled for all memory cgroups by default. But -it can be disabled system-wide by passing cgroup.memory=nokmem to the kernel -at boot time. In this case, kernel memory will not be accounted at all. +Kernel memory accounting is disabled for all memory cgroups by default. But +it can be enabled system-wide by passing cgroup.memory=kmem to the kernel +at boot time. In this case, kernel memory will all be accounted. Kernel memory limits are not imposed for the root cgroup. Usage for the root cgroup may or may not be accounted. The memory used is accumulated into diff --git a/Documentation/conf.py b/Documentation/conf.py index b691af4831fadcae3b43e51c498a2469a5b6a8da..22c1a6d96f9eaa7dd44095030c4105861e2c51fe 100644 --- a/Documentation/conf.py +++ b/Documentation/conf.py @@ -37,7 +37,7 @@ needs_sphinx = '1.3' extensions = ['kerneldoc', 'rstFlatTable', 'kernel_include', 'cdomain', 'kfigure', 'sphinx.ext.ifconfig'] # The name of the math extension changed on Sphinx 1.4 -if major == 1 and minor > 3: +if (major == 1 and minor > 3) or (major > 1): extensions.append("sphinx.ext.imgmath") else: extensions.append("sphinx.ext.pngmath") diff --git a/Documentation/core-api/cachetlb.rst b/Documentation/core-api/cachetlb.rst index 6eb9d3f090cdf5d9a82afa3bd46cec5554ca116a..a2fed63e692903a76c6742a3870e6be3cd60a573 100644 --- a/Documentation/core-api/cachetlb.rst +++ b/Documentation/core-api/cachetlb.rst @@ -223,7 +223,7 @@ Here are the routines, one by one: there will be no entries in the cache for the kernel address space for virtual addresses in the range 'start' to 'end-1'. - The first of these two routines is invoked after map_vm_area() + The first of these two routines is invoked after map_kernel_range() has installed the page table entries. The second is invoked before unmap_kernel_range() deletes the page table entries. diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst index 26b735cefb932fe5ec4bb861f48bf9339c59cd08..1f5b47113c4cc757f9441ee1a695fdd60e9494b8 100644 --- a/Documentation/core-api/index.rst +++ b/Documentation/core-api/index.rst @@ -18,6 +18,7 @@ Core utilities refcount-vs-atomic cpu_hotplug idr + ktask local_ops workqueue genericirq diff --git a/Documentation/core-api/ktask.rst b/Documentation/core-api/ktask.rst new file mode 100644 index 0000000000000000000000000000000000000000..c3c00e1f802ffbc52bfbc087a88d6b147e6f10c3 --- /dev/null +++ b/Documentation/core-api/ktask.rst @@ -0,0 +1,213 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +============================================ +ktask: parallelize CPU-intensive kernel work +============================================ + +:Date: November, 2018 +:Author: Daniel Jordan + + +Introduction +============ + +ktask is a generic framework for parallelizing CPU-intensive work in the +kernel. The intended use is for big machines that can use their CPU power to +speed up large tasks that can't otherwise be multithreaded in userland. The +API is generic enough to add concurrency to many different kinds of tasks--for +example, page clearing over an address range or freeing a list of pages--and +aims to save its clients the trouble of splitting up the work, choosing the +number of helper threads to use, maintaining an efficient concurrency level, +starting these threads, and load balancing the work between them. + + +Motivation +========== + +A single CPU can spend an excessive amount of time in the kernel operating on +large amounts of data. Often these situations arise during initialization- and +destruction-related tasks, where the data involved scales with system size. +These long-running jobs can slow startup and shutdown of applications and the +system itself while extra CPUs sit idle. + +To ensure that applications and the kernel continue to perform well as core +counts and memory sizes increase, the kernel harnesses these idle CPUs to +complete such jobs more quickly. + +For example, when booting a large NUMA machine, ktask uses additional CPUs that +would otherwise be idle until the machine is fully up to avoid a needless +bottleneck during system boot and allow the kernel to take advantage of unused +memory bandwidth. Similarly, when starting a large VM using VFIO, ktask takes +advantage of the VM's idle CPUs during VFIO page pinning rather than have the +VM's boot blocked on one thread doing all the work. + +ktask is not a substitute for single-threaded optimization. However, there is +a point where a single CPU hits a wall despite performance tuning, so +parallelize! + + +Concept +======= + +ktask is built on unbound workqueues to take advantage of the thread management +facilities it provides: creation, destruction, flushing, priority setting, and +NUMA affinity. + +A little terminology up front: A 'task' is the total work there is to do and a +'chunk' is a unit of work given to a thread. + +To complete a task using the ktask framework, a client provides a thread +function that is responsible for completing one chunk. The thread function is +defined in a standard way, with start and end arguments that delimit the chunk +as well as an argument that the client uses to pass data specific to the task. + +In addition, the client supplies an object representing the start of the task +and an iterator function that knows how to advance some number of units in the +task to yield another object representing the new task position. The framework +uses the start object and iterator internally to divide the task into chunks. + +Finally, the client passes the total task size and a minimum chunk size to +indicate the minimum amount of work that's appropriate to do in one chunk. The +sizes are given in task-specific units (e.g. pages, inodes, bytes). The +framework uses these sizes, along with the number of online CPUs and an +internal maximum number of threads, to decide how many threads to start and how +many chunks to divide the task into. + +For example, consider the task of clearing a gigantic page. This used to be +done in a single thread with a for loop that calls a page clearing function for +each constituent base page. To parallelize with ktask, the client first moves +the for loop to the thread function, adapting it to operate on the range passed +to the function. In this simple case, the thread function's start and end +arguments are just addresses delimiting the portion of the gigantic page to +clear. Then, where the for loop used to be, the client calls into ktask with +the start address of the gigantic page, the total size of the gigantic page, +and the thread function. Internally, ktask will divide the address range into +an appropriate number of chunks and start an appropriate number of threads to +complete these chunks. + + +Configuration +============= + +To use ktask, configure the kernel with CONFIG_KTASK=y. + +If CONFIG_KTASK=n, calls to the ktask API are simply #define'd to run the +thread function that the client provides so that the task is completed without +concurrency in the current thread. + + +Interface +========= + +.. kernel-doc:: include/linux/ktask.h + + +Resource Limits +=============== + +ktask has resource limits on the number of work items it sends to workqueue. +In ktask, a workqueue item is a thread that runs chunks of the task until the +task is finished. + +These limits support the different ways ktask uses workqueues: + - ktask_run to run threads on the calling thread's node. + - ktask_run_numa to run threads on the node(s) specified. + - ktask_run_numa with nid=NUMA_NO_NODE to run threads on any node in the + system. + +To support these different ways of queueing work while maintaining an efficient +concurrency level, we need both system-wide and per-node limits on the number +of threads. Without per-node limits, a node might become oversubscribed +despite ktask staying within the system-wide limit, and without a system-wide +limit, we can't properly account for work that can run on any node. + +The system-wide limit is based on the total number of CPUs, and the per-node +limit on the CPU count for each node. A per-node work item counts against the +system-wide limit. Workqueue's max_active can't accommodate both types of +limit, no matter how many workqueues are used, so ktask implements its own. + +If a per-node limit is reached, the work item is allowed to run anywhere on the +machine to avoid overwhelming the node. If the global limit is also reached, +ktask won't queue additional work items until we fall below the limit again. + +These limits apply only to workqueue items--that is, helper threads beyond the +one starting the task. That way, one thread per task is always allowed to run. + + +Scheduler Interaction +===================== + +Even within the resource limits, ktask must take care to run a number of +threads appropriate for the system's current CPU load. Under high CPU usage, +starting excessive helper threads may disturb other tasks, unfairly taking CPU +time away from them for the sake of an optimized kernel code path. + +ktask plays nicely in this case by setting helper threads to the lowest +scheduling priority on the system (MAX_NICE). This way, helpers' CPU time is +appropriately throttled on a busy system and other tasks are not disturbed. + +The main thread initiating the task remains at its original priority so that it +still makes progress on a busy system. + +It is possible for a helper thread to start running and then be forced off-CPU +by a higher priority thread. With the helper's CPU time curtailed by MAX_NICE, +the main thread may wait longer for the task to finish than it would have had +it not started any helpers, so to ensure forward progress at a single-threaded +pace, once the main thread is finished with all outstanding work in the task, +the main thread wills its priority to one helper thread at a time. At least +one thread will then always be running at the priority of the calling thread. + + +Cgroup Awareness +================ + +Given the potentially large amount of CPU time ktask threads may consume, they +should be aware of the cgroup of the task that called into ktask and +appropriately throttled. + +TODO: Implement cgroup-awareness in unbound workqueues. + + +Power Management +================ + +Starting additional helper threads may cause the system to consume more energy, +which is undesirable on energy-conscious devices. Therefore ktask needs to be +aware of cpufreq policies and scaling governors. + +If an energy-conscious policy is in use (e.g. powersave, conservative) on any +part of the system, that is a signal that the user has strong power management +preferences, in which case ktask is disabled. + +TODO: Implement this. + + +Backward Compatibility +====================== + +ktask is written so that existing calls to the API will be backwards compatible +should the API gain new features in the future. This is accomplished by +restricting API changes to members of struct ktask_ctl and having clients make +an opaque initialization call (DEFINE_KTASK_CTL). This initialization can then +be modified to include any new arguments so that existing call sites stay the +same. + + +Error Handling +============== + +Calls to ktask fail only if the provided thread function fails. In particular, +ktask avoids allocating memory internally during a task, so it's safe to use in +sensitive contexts. + +Tasks can fail midway through their work. To recover, the finished chunks of +work need to be undone in a task-specific way, so ktask allows clients to pass +an "undo" callback that is responsible for undoing one chunk of work. To avoid +multiple levels of error handling, this "undo" callback should not be allowed +to fail. For simplicity and because it's a slow path, undoing is not +multithreaded. + +Each call to ktask_run and ktask_run_numa returns a single value, +KTASK_RETURN_SUCCESS or a client-specific value. Since threads can fail for +different reasons, however, ktask may need the ability to return +thread-specific error information. This can be added later if needed. diff --git a/Documentation/core-api/printk-formats.rst b/Documentation/core-api/printk-formats.rst index 25dc591cb1108790229e18e33da2291eb0317c74..9e5edfe5f0c1c47ccbaf18c52ce8594b5f3244e8 100644 --- a/Documentation/core-api/printk-formats.rst +++ b/Documentation/core-api/printk-formats.rst @@ -412,6 +412,24 @@ Examples:: Passed by reference. +Time and date (struct rtc_time) +------------------------------- + +:: + + %ptR YYYY-mm-ddTHH:MM:SS + %ptRd YYYY-mm-dd + %ptRt HH:MM:SS + %ptR[dt][r] + +For printing date and time as represented by struct rtc_time structure in +human readable format. + +By default year will be incremented by 1900 and month by 1. Use %ptRr (raw) +to suppress this behaviour. + +Passed by reference. + struct clk ---------- @@ -420,9 +438,8 @@ struct clk %pC pll1 %pCn pll1 -For printing struct clk structures. %pC and %pCn print the name -(Common Clock Framework) or address (legacy clock framework) of the -structure. +For printing struct clk structures. %pC and %pCn print the name of the clock +(Common Clock Framework) or a unique 32-bit ID (legacy clock framework). Passed by reference. diff --git a/Documentation/core-api/refcount-vs-atomic.rst b/Documentation/core-api/refcount-vs-atomic.rst index 322851bada16742330c8a2cf4457e5ee30698a97..976e85adffe8ea4a5820fbea647540208ac357dd 100644 --- a/Documentation/core-api/refcount-vs-atomic.rst +++ b/Documentation/core-api/refcount-vs-atomic.rst @@ -54,6 +54,13 @@ must propagate to all other CPUs before the release operation (A-cumulative property). This is implemented using :c:func:`smp_store_release`. +An ACQUIRE memory ordering guarantees that all post loads and +stores (all po-later instructions) on the same CPU are +completed after the acquire operation. It also guarantees that all +po-later stores on the same CPU must propagate to all other CPUs +after the acquire operation executes. This is implemented using +:c:func:`smp_acquire__after_ctrl_dep`. + A control dependency (on success) for refcounters guarantees that if a reference for an object was successfully obtained (reference counter increment or addition happened, function returned true), @@ -119,13 +126,24 @@ Memory ordering guarantees changes: result of obtaining pointer to the object! -case 5) - decrement-based RMW ops that return a value ------------------------------------------------------ +case 5) - generic dec/sub decrement-based RMW ops that return a value +--------------------------------------------------------------------- Function changes: * :c:func:`atomic_dec_and_test` --> :c:func:`refcount_dec_and_test` * :c:func:`atomic_sub_and_test` --> :c:func:`refcount_sub_and_test` + +Memory ordering guarantees changes: + + * fully ordered --> RELEASE ordering + ACQUIRE ordering on success + + +case 6) other decrement-based RMW ops that return a value +--------------------------------------------------------- + +Function changes: + * no atomic counterpart --> :c:func:`refcount_dec_if_one` * ``atomic_add_unless(&var, -1, 1)`` --> ``refcount_dec_not_one(&var)`` @@ -136,7 +154,7 @@ Memory ordering guarantees changes: .. note:: :c:func:`atomic_add_unless` only provides full order on success. -case 6) - lock-based RMW +case 7) - lock-based RMW ------------------------ Function changes: diff --git a/Documentation/cpuidle/core.txt b/Documentation/cpuidle/core.txt deleted file mode 100644 index 63ecc5dc9d8a6c23e4b9f098d33eeec2262d63c8..0000000000000000000000000000000000000000 --- a/Documentation/cpuidle/core.txt +++ /dev/null @@ -1,23 +0,0 @@ - - Supporting multiple CPU idle levels in kernel - - cpuidle - -General Information: - -Various CPUs today support multiple idle levels that are differentiated -by varying exit latencies and power consumption during idle. -cpuidle is a generic in-kernel infrastructure that separates -idle policy (governor) from idle mechanism (driver) and provides a -standardized infrastructure to support independent development of -governors and drivers. - -cpuidle resides under drivers/cpuidle. - -Boot options: -"cpuidle_sysfs_switch" -enables current_governor interface in /sys/devices/system/cpu/cpuidle/, -which can be used to switch governors at run time. This boot option -is meant for developer testing only. In normal usage, kernel picks the -best governor based on governor ratings. -SEE ALSO: sysfs.txt in this directory. diff --git a/Documentation/cpuidle/sysfs.txt b/Documentation/cpuidle/sysfs.txt deleted file mode 100644 index d1587f434e7bb6de8125a4601cee1bdcb24cae50..0000000000000000000000000000000000000000 --- a/Documentation/cpuidle/sysfs.txt +++ /dev/null @@ -1,98 +0,0 @@ - - - Supporting multiple CPU idle levels in kernel - - cpuidle sysfs - -System global cpuidle related information and tunables are under -/sys/devices/system/cpu/cpuidle - -The current interfaces in this directory has self-explanatory names: -* current_driver -* current_governor_ro - -With cpuidle_sysfs_switch boot option (meant for developer testing) -following objects are visible instead. -* current_driver -* available_governors -* current_governor -In this case users can switch the governor at run time by writing -to current_governor. - - -Per logical CPU specific cpuidle information are under -/sys/devices/system/cpu/cpuX/cpuidle -for each online cpu X - --------------------------------------------------------------------------------- -# ls -lR /sys/devices/system/cpu/cpu0/cpuidle/ -/sys/devices/system/cpu/cpu0/cpuidle/: -total 0 -drwxr-xr-x 2 root root 0 Feb 8 10:42 state0 -drwxr-xr-x 2 root root 0 Feb 8 10:42 state1 -drwxr-xr-x 2 root root 0 Feb 8 10:42 state2 -drwxr-xr-x 2 root root 0 Feb 8 10:42 state3 - -/sys/devices/system/cpu/cpu0/cpuidle/state0: -total 0 --r--r--r-- 1 root root 4096 Feb 8 10:42 desc --rw-r--r-- 1 root root 4096 Feb 8 10:42 disable --r--r--r-- 1 root root 4096 Feb 8 10:42 latency --r--r--r-- 1 root root 4096 Feb 8 10:42 name --r--r--r-- 1 root root 4096 Feb 8 10:42 power --r--r--r-- 1 root root 4096 Feb 8 10:42 residency --r--r--r-- 1 root root 4096 Feb 8 10:42 time --r--r--r-- 1 root root 4096 Feb 8 10:42 usage - -/sys/devices/system/cpu/cpu0/cpuidle/state1: -total 0 --r--r--r-- 1 root root 4096 Feb 8 10:42 desc --rw-r--r-- 1 root root 4096 Feb 8 10:42 disable --r--r--r-- 1 root root 4096 Feb 8 10:42 latency --r--r--r-- 1 root root 4096 Feb 8 10:42 name --r--r--r-- 1 root root 4096 Feb 8 10:42 power --r--r--r-- 1 root root 4096 Feb 8 10:42 residency --r--r--r-- 1 root root 4096 Feb 8 10:42 time --r--r--r-- 1 root root 4096 Feb 8 10:42 usage - -/sys/devices/system/cpu/cpu0/cpuidle/state2: -total 0 --r--r--r-- 1 root root 4096 Feb 8 10:42 desc --rw-r--r-- 1 root root 4096 Feb 8 10:42 disable --r--r--r-- 1 root root 4096 Feb 8 10:42 latency --r--r--r-- 1 root root 4096 Feb 8 10:42 name --r--r--r-- 1 root root 4096 Feb 8 10:42 power --r--r--r-- 1 root root 4096 Feb 8 10:42 residency --r--r--r-- 1 root root 4096 Feb 8 10:42 time --r--r--r-- 1 root root 4096 Feb 8 10:42 usage - -/sys/devices/system/cpu/cpu0/cpuidle/state3: -total 0 --r--r--r-- 1 root root 4096 Feb 8 10:42 desc --rw-r--r-- 1 root root 4096 Feb 8 10:42 disable --r--r--r-- 1 root root 4096 Feb 8 10:42 latency --r--r--r-- 1 root root 4096 Feb 8 10:42 name --r--r--r-- 1 root root 4096 Feb 8 10:42 power --r--r--r-- 1 root root 4096 Feb 8 10:42 residency --r--r--r-- 1 root root 4096 Feb 8 10:42 time --r--r--r-- 1 root root 4096 Feb 8 10:42 usage --------------------------------------------------------------------------------- - - -* desc : Small description about the idle state (string) -* disable : Option to disable this idle state (bool) -> see note below -* latency : Latency to exit out of this idle state (in microseconds) -* residency : Time after which a state becomes more effecient than any - shallower state (in microseconds) -* name : Name of the idle state (string) -* power : Power consumed while in this idle state (in milliwatts) -* time : Total time spent in this idle state (in microseconds) -* usage : Number of times this state was entered (count) - -Note: -The behavior and the effect of the disable variable depends on the -implementation of a particular governor. In the ladder governor, for -example, it is not coherent, i.e. if one is disabling a light state, -then all deeper states are disabled as well, but the disable variable -does not reflect it. Likewise, if one enables a deep state but a lighter -state still is disabled, then this has no effect. diff --git a/Documentation/cputopology.txt b/Documentation/cputopology.txt index c6e7e9196a8b41cdd983ac1452c0552c88a9c981..2ff8a1e9a2db0180db58b4d84ecf6f1f50a5f367 100644 --- a/Documentation/cputopology.txt +++ b/Documentation/cputopology.txt @@ -3,84 +3,91 @@ How CPU topology info is exported via sysfs =========================================== Export CPU topology info via sysfs. Items (attributes) are similar -to /proc/cpuinfo output of some architectures: +to /proc/cpuinfo output of some architectures. They reside in +/sys/devices/system/cpu/cpuX/topology/: -1) /sys/devices/system/cpu/cpuX/topology/physical_package_id: +physical_package_id: physical package id of cpuX. Typically corresponds to a physical socket number, but the actual value is architecture and platform dependent. -2) /sys/devices/system/cpu/cpuX/topology/core_id: +die_id: + + the CPU die ID of cpuX. Typically it is the hardware platform's + identifier (rather than the kernel's). The actual value is + architecture and platform dependent. + +core_id: the CPU core ID of cpuX. Typically it is the hardware platform's identifier (rather than the kernel's). The actual value is architecture and platform dependent. -3) /sys/devices/system/cpu/cpuX/topology/book_id: +book_id: the book ID of cpuX. Typically it is the hardware platform's identifier (rather than the kernel's). The actual value is architecture and platform dependent. -4) /sys/devices/system/cpu/cpuX/topology/drawer_id: +drawer_id: the drawer ID of cpuX. Typically it is the hardware platform's identifier (rather than the kernel's). The actual value is architecture and platform dependent. -5) /sys/devices/system/cpu/cpuX/topology/thread_siblings: +thread_siblings: internal kernel map of cpuX's hardware threads within the same core as cpuX. -6) /sys/devices/system/cpu/cpuX/topology/thread_siblings_list: +thread_siblings_list: human-readable list of cpuX's hardware threads within the same core as cpuX. -7) /sys/devices/system/cpu/cpuX/topology/core_siblings: +core_siblings: internal kernel map of cpuX's hardware threads within the same physical_package_id. -8) /sys/devices/system/cpu/cpuX/topology/core_siblings_list: +core_siblings_list: human-readable list of cpuX's hardware threads within the same physical_package_id. -9) /sys/devices/system/cpu/cpuX/topology/book_siblings: +book_siblings: internal kernel map of cpuX's hardware threads within the same book_id. -10) /sys/devices/system/cpu/cpuX/topology/book_siblings_list: +book_siblings_list: human-readable list of cpuX's hardware threads within the same book_id. -11) /sys/devices/system/cpu/cpuX/topology/drawer_siblings: +drawer_siblings: internal kernel map of cpuX's hardware threads within the same drawer_id. -12) /sys/devices/system/cpu/cpuX/topology/drawer_siblings_list: +drawer_siblings_list: human-readable list of cpuX's hardware threads within the same drawer_id. -To implement it in an architecture-neutral way, a new source file, -drivers/base/topology.c, is to export the 6 to 12 attributes. The book -and drawer related sysfs files will only be created if CONFIG_SCHED_BOOK -and CONFIG_SCHED_DRAWER are selected. +Architecture-neutral, drivers/base/topology.c, exports these attributes. +However, the book and drawer related sysfs files will only be created if +CONFIG_SCHED_BOOK and CONFIG_SCHED_DRAWER are selected, respectively. -CONFIG_SCHED_BOOK and CONFIG_DRAWER are currently only used on s390, where -they reflect the cpu and cache hierarchy. +CONFIG_SCHED_BOOK and CONFIG_SCHED_DRAWER are currently only used on s390, +where they reflect the cpu and cache hierarchy. For an architecture to support this feature, it must define some of these macros in include/asm-XXX/topology.h:: #define topology_physical_package_id(cpu) + #define topology_die_id(cpu) #define topology_core_id(cpu) #define topology_book_id(cpu) #define topology_drawer_id(cpu) @@ -98,10 +105,12 @@ To be consistent on all architectures, include/linux/topology.h provides default definitions for any of the above macros that are not defined by include/asm-XXX/topology.h: -1) physical_package_id: -1 -2) core_id: 0 -3) sibling_cpumask: just the given CPU -4) core_cpumask: just the given CPU +1) topology_physical_package_id: -1 +2) topology_die_id: -1 +3) topology_core_id: 0 +4) topology_sibling_cpumask: just the given CPU +5) topology_core_cpumask: just the given CPU +6) topology_die_cpumask: just the given CPU For architectures that don't support books (CONFIG_SCHED_BOOK) there are no default definitions for topology_book_id() and topology_book_cpumask(). diff --git a/Documentation/device-mapper/dm-integrity.txt b/Documentation/device-mapper/dm-integrity.txt index 297251b0d2d5715872449d0b4c556a0fb72cd4dc..bf6af2ade0a670cbfca452ee1f85986b7ca27bae 100644 --- a/Documentation/device-mapper/dm-integrity.txt +++ b/Documentation/device-mapper/dm-integrity.txt @@ -146,6 +146,13 @@ block_size:number Supported values are 512, 1024, 2048 and 4096 bytes. If not specified the default block size is 512 bytes. +legacy_recalculate + Allow recalculating of volumes with HMAC keys. This is disabled by + default for security reasons - an attacker could modify the volume, + set recalc_sector to zero, and the kernel would not detect the + modification. + + The journal mode (D/J), buffer_sectors, journal_watermark, commit_time can be changed when reloading the target (load an inactive table and swap the tables with suspend and resume). The other arguments should not be changed diff --git a/Documentation/devicetree/bindings/arm/arm,mpam.txt b/Documentation/devicetree/bindings/arm/arm,mpam.txt new file mode 100644 index 0000000000000000000000000000000000000000..e9ba09bb3159136f7290ea57927f276482afb048 --- /dev/null +++ b/Documentation/devicetree/bindings/arm/arm,mpam.txt @@ -0,0 +1,57 @@ +Memory System Resource Partitioning and Monitoring (MPAM), for Armv8-A +---------------------------------------------------------- + +The MPAM is used to limit memory bandwidth and cache usage for ARM platform. +The required properties for driver is: + compatible = "arm,mpam"; /* MPAM for Arm */ + reg = <>; /* mpam device base register */ + +The property type must be included, it is used to indicate type of mpam +device for the node. There are several type of mpam device: + MPAM_CLASS_SMMU = 0, + MPAM_CLASS_CACHE, /* Well known caches, e.g. L2 */ + MPAM_CLASS_MEMORY, /* Main memory */ + MPAM_CLASS_UNKNOWN, /* Everything else, e.g. TLBs etc */ + +The type of memory is set as: + type = <2>; +The type of cache is set as: + type = <1>; + +MPAM support interrupt for error and overflow, the error-interrupt and +overflow-interrupt are defined in "Memory System Resource Partitioning +and Monitoring (MPAM), for Armv8-A", MPAM interrupts(section 8.8). + overflow-interrupt = <0>; + overflow-flags = <0>; + error-interrupt = <0>; + error-interrupt-flags = <0>; + +Example: + +mpam { + compatible = "arm,mpam"; + + mpam_memory0 { + reg = <0x0 0x10000000 0x0 0x10000>; + type = <2>; /* memory type */ + numa-node-id = <0>; + overflow-interrupt = <0>; + overflow-flags = <0>; + error-interrupt = <0>; + error-interrupt-flags = <0>; + not-ready-max = <0>; + }; + + mpam_cache0 { + reg = <0x0 0x20000000 0x0 0x10000>; + type = <1>; /* cache type */ + cache-id = <0>; + cache-level = <3>; + overflow-interrupt = <0>; + overflow-flags = <0>; + error-interrupt = <0>; + error-interrupt-flags = <0>; + not-ready-max = <0>; + }; + +}; diff --git a/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt b/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt index e96e085271c134f4889d08febddbab10141f6235..83f6c6a7c41c76cafd697b9b2e84e3b9f313135d 100644 --- a/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt +++ b/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt @@ -46,7 +46,7 @@ Required properties: Example (R-Car H3): usb2_clksel: clock-controller@e6590630 { - compatible = "renesas,r8a77950-rcar-usb2-clock-sel", + compatible = "renesas,r8a7795-rcar-usb2-clock-sel", "renesas,rcar-gen3-usb2-clock-sel"; reg = <0 0xe6590630 0 0x02>; clocks = <&cpg CPG_MOD 703>, <&usb_extal>, <&usb_xtal>; diff --git a/Documentation/devicetree/bindings/display/panel/armadeus,st0700-adapt.txt b/Documentation/devicetree/bindings/display/panel/armadeus,st0700-adapt.txt new file mode 100644 index 0000000000000000000000000000000000000000..a30d63db3c8f7e6811e0cbf97485c69aa287ad58 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/armadeus,st0700-adapt.txt @@ -0,0 +1,9 @@ +Armadeus ST0700 Adapt. A Santek ST0700I5Y-RBSLW 7.0" WVGA (800x480) TFT with +an adapter board. + +Required properties: +- compatible: "armadeus,st0700-adapt" +- power-supply: see panel-common.txt + +Optional properties: +- backlight: see panel-common.txt diff --git a/Documentation/devicetree/bindings/eeprom/at24.txt b/Documentation/devicetree/bindings/eeprom/at24.txt index aededdbc262b241304a0651be8d418c061f89e05..f9a7c984274ce739c392a3af08c0775199d6e32e 100644 --- a/Documentation/devicetree/bindings/eeprom/at24.txt +++ b/Documentation/devicetree/bindings/eeprom/at24.txt @@ -27,6 +27,7 @@ Required properties: "atmel,24c256", "atmel,24c512", "atmel,24c1024", + "atmel,24c2048", If is not "atmel", then a fallback must be used with the same and "atmel" as manufacturer. diff --git a/Documentation/devicetree/bindings/gpio/gpio-phytium-sgpio.txt b/Documentation/devicetree/bindings/gpio/gpio-phytium-sgpio.txt new file mode 100644 index 0000000000000000000000000000000000000000..bcc857592da57e583b6e4e1e23faca4371766ffb --- /dev/null +++ b/Documentation/devicetree/bindings/gpio/gpio-phytium-sgpio.txt @@ -0,0 +1,31 @@ +* Phytium SGPIO controller + +This SGPIO controller is for Phytium Pe220x SoCs, which supports up to +96 (32x3) Serial GPIOs. + +Required properties: +- compatible : Should contain "phytium,gpio" +- reg : Address and length of the register set for the device. +- interrupts: Interrupt mapping for GPIO IRQ. +- gpio-controller : Marks the device node as a gpio controller. +- #gpio-cells : Should be 2. The first cell is the pin number and + the second cell is used to specify the gpio polarity: + 0 = active high + 1 = active low +- ngpios: number of GPIO lines, see gpio.txt + (should be multiple of 32, up to 96 pins) +- bus-frequency: SGPIO CLK frequency +- clocks: A phandle to the APB clock for SGPIO clock division + +Example: + +sgpio: sgpio@2807d000 { + compatible = "phytium,sgpio"; + reg = <0x0 0x2807d000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_48mhz>; + ngpios = <96>; + bus-frequency = <48000>; + gpio-controller; + #gpio-cells = <2>; +}; diff --git a/Documentation/devicetree/bindings/gpio/gpio-phytium.txt b/Documentation/devicetree/bindings/gpio/gpio-phytium.txt new file mode 100644 index 0000000000000000000000000000000000000000..77d4c6c03d0037ab37f7783f7ee074cae1301df4 --- /dev/null +++ b/Documentation/devicetree/bindings/gpio/gpio-phytium.txt @@ -0,0 +1,47 @@ +* Phytium GPIO controller + +Required properties: +- compatible : Should contain "phytium,gpio" +- reg : Address and length of the register set for the device. +- interrupts: Interrupt mapping for GPIO IRQ. +- gpio-controller : Marks the device node as a gpio controller. +- #gpio-cells : Should be 2. The first cell is the pin number and + the second cell is used to specify the gpio polarity: + 0 = active high + 1 = active low +- #address-cells : should be 1 (for addressing port subnodes). +- #size-cells : should be 0 (port subnodes). + +The GPIO controller has two ports, each of which are represented as child +nodes with the following properties: + +Required properties: +- compatible : "phytium,gpio-port" +- reg : The integer port index of the port, a single cell. + +Optional properties: +- nr-gpios : The number of pins in the port, a single cell. + +Example: + +gpio: gpio@28004000 { + compatible = "phytium,gpio"; + reg = <0x0 0x28004000 0x0 0x1000>; + interrupts = ; + gpio-controller; + #gpio-cells = <2>; + #address-cells = <1>; + #size-cells = <0>; + + porta { + compatible = "phytium,gpio-port"; + reg = <0>; + nr-gpios = <8>; + }; + + portb { + compatible = "phytium,gpio-port"; + reg = <1>; + nr-gpios = <8>; + }; +}; diff --git a/Documentation/devicetree/bindings/gpu/phytium-display.txt b/Documentation/devicetree/bindings/gpu/phytium-display.txt new file mode 100644 index 0000000000000000000000000000000000000000..ff2435967e1d6d14381a6d56d27d08874a466bc3 --- /dev/null +++ b/Documentation/devicetree/bindings/gpu/phytium-display.txt @@ -0,0 +1,23 @@ +* Phytium Display Engine + +Required properties: + - compatible : value should be "phytium,dc". + - reg : first reg: Physical base address of the registers and length of memory + mapped region. + second reg (optional): Physical base address and length of video memory which is reserved system memory. + - interrupts : interrupt number. + - pipe_mask : specify which pipe is enabled, each bit corresponds to a pipe. + - edp_mask : specify which pipe is edp port, each bit corresponds to a pipe (0:dp, 1:edp). + +Example: + /memreserve/ 0xf4000000 0x4000000; // (optional) + + dc0@32000000 { + compatible = "phytium,dc"; + reg = <0x0 0x32000000 0x0 0x8000>, + <0x0 0xf4000000 0x0 0x4000000>; // (optional) + interrupts = ; + pipe_mask = 0x3 + edp_mask = 0x0; + }; + diff --git a/Documentation/devicetree/bindings/i2c/i2c-phytium.txt b/Documentation/devicetree/bindings/i2c/i2c-phytium.txt new file mode 100644 index 0000000000000000000000000000000000000000..a1da97b7c60dcdec68f9ff002d62d39f2c14df9a --- /dev/null +++ b/Documentation/devicetree/bindings/i2c/i2c-phytium.txt @@ -0,0 +1,24 @@ +* Phytium I2C/SMBus controller + +Required properties : + + - compatible : should be "phytium,i2c" + - reg : Offset and length of the register set for the device + - interrupts : where IRQ is the interrupt number. + - clock-frequency : desired I2C bus clock frequency in Hz. + +Optional properties: + + - interrupt-names: should be "smbus_alert" if SMBus alert + interrupt is supported. + +Examples : + + i2c0: i2c@28011000 { + compatible = "phytium,i2c"; + reg = <0x0 0x28011000 0x0 0x1000>; + interrupts = ; + interrupt-names = "smbus_alert"; + clocks = <&sysclk_48mhz>; + }; + diff --git a/Documentation/devicetree/bindings/iio/adc/phytium,adc.txt b/Documentation/devicetree/bindings/iio/adc/phytium,adc.txt new file mode 100644 index 0000000000000000000000000000000000000000..6fc793a857648a1085bd5037c2e6e7ccf2325346 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/adc/phytium,adc.txt @@ -0,0 +1,57 @@ +Phytium ADC + +This device is a 10-bit converter for 8 voltage channels. All inputs are +single ended. + +Required properties: +- compatible: Should be "phytium,adc" +- reg: memory window mapping address and length +- interrupts: Interrupt for the ADC control interface. +- clocks: Input clock used to derive the sample clock. +- #address-cells: Should be <1> (settings for the subnodes). +- #size-cells: Should be <0> (settings for the subnodes). + +Required subnodes: + +The ADC channels are configured as subnodes of the ADC. + +Required channel node properties: + +- reg: should contain the hardware channel number. + +Examples: + + adc0: adc@2807b000 { + compatible = "phytium,adc"; + reg = <0x0 0x2807b000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_48mhz>; + + #address-cells = <1>; + #size-cells = <0>; + + channel@0 { + reg = <0>; + }; + channel@1 { + reg = <1>; + }; + channel@2 { + reg = <2>; + }; + channel@3 { + reg = <3>; + }; + channel@4 { + reg = <4>; + }; + channel@5 { + reg = <5>; + }; + channel@6 { + reg = <5>; + }; + channel@7 { + reg = <7>; + }; + }; diff --git a/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt b/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt index 6c49db7f8ad2597128b644316fe60936e994198c..e1fe02f3e3e9c421b170ffebc699085e47f9f0ec 100644 --- a/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt +++ b/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt @@ -11,11 +11,13 @@ New driver handles the following Required properties: - compatible: Must be "samsung,exynos-adc-v1" - for exynos4412/5250 and s5pv210 controllers. + for Exynos5250 controllers. Must be "samsung,exynos-adc-v2" for future controllers. Must be "samsung,exynos3250-adc" for controllers compatible with ADC of Exynos3250. + Must be "samsung,exynos4212-adc" for + controllers compatible with ADC of Exynos4212 and Exynos4412. Must be "samsung,exynos7-adc" for the ADC in Exynos7 and compatibles Must be "samsung,s3c2410-adc" for @@ -28,6 +30,8 @@ Required properties: the ADC in s3c2443 and compatibles Must be "samsung,s3c6410-adc" for the ADC in s3c6410 and compatibles + Must be "samsung,s5pv210-adc" for + the ADC in s5pv210 and compatibles - reg: List of ADC register address range - The base address and range of ADC register - The base address and range of ADC_PHY register (every diff --git a/Documentation/devicetree/bindings/input/phytium-keypad.txt b/Documentation/devicetree/bindings/input/phytium-keypad.txt new file mode 100644 index 0000000000000000000000000000000000000000..b34e6a1b367821aa575c7400090fcf0b759a50cd --- /dev/null +++ b/Documentation/devicetree/bindings/input/phytium-keypad.txt @@ -0,0 +1,41 @@ +* Phytium Keypad Port device tree bindings + +The keypad port is designed to interface with a keypad matrix, which +simplify the software task of scanning a keypad matrix. It is capable +of detecting, debouncing, and decoding one or multiple keys pressed +simultaneously on a keypad. + +Required SoC Specific Properties: +- compatible: Should be "phytium,keypad". +- reg: Physical base address and length of memory mapped region. +- interrupts: Interrupt number to the CPU(s). + +Required Board Specific Properties: +- linux,keymap: The definition can be found at +bindings/input/matrix-keymap.txt. + +Example: + +keypad: keypad@2807a000 { + compatible = "phytium,keypad"; + reg = <0x 0x2807a000 0x0 0x1000>; + interrupts = ; + keypad,num-rows = <4>; + keypad,num-columns = <4>; + linux,keymap = <0x00000067 /* KEY_UP */ + 0x0001006c /* KEY_DOWN */ + 0x00020072 /* KEY_VOLUMEDOWN */ + 0x00030066 /* KEY_HOME */ + 0x0100006a /* KEY_RIGHT */ + 0x01010069 /* KEY_LEFT */ + 0x0102001c /* KEY_ENTER */ + 0x01030073 /* KEY_VOLUMEUP */ + 0x02000040 /* KEY_F6 */ + 0x02010042 /* KEY_F8 */ + 0x02020043 /* KEY_F9 */ + 0x02030044 /* KEY_F10 */ + 0x0300003b /* KEY_F1 */ + 0x0301003c /* KEY_F2 */ + 0x0302003d /* KEY_F3 */ + 0x03030074>; /* KEY_POWER */ +}; diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt index 3ea78c4ef887c9be43a48a287b9cabc77c1a2db4..9f4fe47d9d54cb3991350cc672feaec11a5b46b1 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt @@ -71,6 +71,10 @@ Optional region containing only the {SET,CLR}SPI registers to be used if isolation is required, and if supported by the HW. +- enable-init-all-gicr: Boolean property. Identifies kernel initializes + message interrupt functionality for other GICR not managed by this + operating system. + Sub-nodes: PPI affinity can be expressed as a single "ppi-partitions" node, diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt index c9abbf3e4f68238faaa287dc9fb2222af0e4c236..322f958939fb464c30b4bbe972e260522303a502 100644 --- a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt +++ b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt @@ -61,6 +61,14 @@ the PCIe specification. Set for Cavium ThunderX2 silicon that doesn't support SMMU page1 register space. +- hisilicon,message-based-spi + : Message based SPI is used for Ascend310 silicon. The addr + of GICD_SETSPIR needs to be configured in the CFG_REG of + SMMU. + +- iommu-spi-base + : The addr of GICD_SETSPI + ** Example smmu@2b400000 { diff --git a/Documentation/devicetree/bindings/iommu/iommu.txt b/Documentation/devicetree/bindings/iommu/iommu.txt index 5a8b4624defcb981fc630b2b88ba88957efe6868..ec3ea3a97b77f12b366d9680a788b9d6cc12c6fd 100644 --- a/Documentation/devicetree/bindings/iommu/iommu.txt +++ b/Documentation/devicetree/bindings/iommu/iommu.txt @@ -86,6 +86,30 @@ have a means to turn off translation. But it is invalid in such cases to disable the IOMMU's device tree node in the first place because it would prevent any driver from properly setting up the translations. +Optional properties: +-------------------- +- dma-can-stall: When present, the master can wait for a transaction to + complete for an indefinite amount of time. Upon translation fault some + IOMMUs, instead of aborting the translation immediately, may first + notify the driver and keep the transaction in flight. This allows the OS + to inspect the fault and, for example, make physical pages resident + before updating the mappings and completing the transaction. Such IOMMU + accepts a limited number of simultaneous stalled transactions before + having to either put back-pressure on the master, or abort new faulting + transactions. + + Firmware has to opt-in stalling, because most buses and masters don't + support it. In particular it isn't compatible with PCI, where + transactions have to complete before a time limit. More generally it + won't work in systems and masters that haven't been designed for + stalling. For example the OS, in order to handle a stalled transaction, + may attempt to retrieve pages from secondary storage in a stalled + domain, leading to a deadlock. + +- pasid-num-bits: Some masters support multiple address spaces for DMA, by + tagging DMA transactions with an address space identifier. By default, + this is 0, which means that the device only has one address space. + Notes: ====== diff --git a/Documentation/devicetree/bindings/mailbox/phytium-mailbox.txt b/Documentation/devicetree/bindings/mailbox/phytium-mailbox.txt new file mode 100644 index 0000000000000000000000000000000000000000..4d6f5a44f6e4bb961d4df5be5befa5cabe91010d --- /dev/null +++ b/Documentation/devicetree/bindings/mailbox/phytium-mailbox.txt @@ -0,0 +1,32 @@ +Phytium Mailbox Driver +====================== + +The Phytium mailbox controller that has a channel/link to communicate +with the remote end. A link raises interrupt for any received data. However, +there is no specified way of knowing if the sent data has been read by the +remote. This driver assumes the sender polls STAT register and the remote +clears it after having read the data. + +Mailbox Device Node: +==================== + +Required properties: +-------------------- +- compatible: Shall be "phytium,mbox" +- reg: Contains the mailbox register address range (base + address and length) +- #mbox-cells Shall be 1 - the index of the channel needed. +- interrupts: Contains the interrupt information corresponding to + the link. + +Example: +-------- + +mbox: mailbox@2a000000 { + compatible = "phytium,mbox"; + reg = <0x0 0x2a000000 0x0 0x1000>; + #mbox-cells = <1>; + interrupts = <0 48 4>; + clocks = <&sycclk>; + clock-names = "apb_pclk"; +}; diff --git a/Documentation/devicetree/bindings/media/i2c/adv748x.txt b/Documentation/devicetree/bindings/media/i2c/adv748x.txt index 21ffb5ed818302ff4fb709a213aebfc1d4980b1f..54d1d3bc186949fa2cb1ac0a1da198c4cdd41971 100644 --- a/Documentation/devicetree/bindings/media/i2c/adv748x.txt +++ b/Documentation/devicetree/bindings/media/i2c/adv748x.txt @@ -73,7 +73,7 @@ Example: }; }; - port@10 { + port@a { reg = <10>; adv7482_txa: endpoint { @@ -83,7 +83,7 @@ Example: }; }; - port@11 { + port@b { reg = <11>; adv7482_txb: endpoint { diff --git a/Documentation/devicetree/bindings/mmc/mmc.txt b/Documentation/devicetree/bindings/mmc/mmc.txt index f5a0923b34ca1e5dfd11f3c9ba03792a6963b5c7..c269dbe384feab159be98e713a328585499e473a 100644 --- a/Documentation/devicetree/bindings/mmc/mmc.txt +++ b/Documentation/devicetree/bindings/mmc/mmc.txt @@ -62,6 +62,10 @@ Optional properties: be referred to mmc-pwrseq-simple.txt. But now it's reused as a tunable delay waiting for I/O signalling and card power supply to be stable, regardless of whether pwrseq-simple is used. Default to 10ms if no available. +- supports-cqe : The presence of this property indicates that the corresponding + MMC host controller supports HW command queue feature. +- disable-cqe-dcmd: This property indicates that the MMC controller's command + queue engine (CQE) does not support direct commands (DCMDs). *NOTE* on CD and WP polarity. To use common for all SD/MMC host controllers line polarity properties, we have to fix the meaning of the "normal" and "inverted" diff --git a/Documentation/devicetree/bindings/mmc/phytium,sdci.txt b/Documentation/devicetree/bindings/mmc/phytium,sdci.txt new file mode 100644 index 0000000000000000000000000000000000000000..a304f60f6d8e9cef6f9549db4b0c4d2c56bcd341 --- /dev/null +++ b/Documentation/devicetree/bindings/mmc/phytium,sdci.txt @@ -0,0 +1,22 @@ +Phytium SD Host controller + +The SD Host Controller on Phytium SoCs provides an interface +for SD and SDIO types of memory cards. + +Required properties: +- compatible : should be "phytium,mci". +- reg: mmc controller base registers. +- interrupts : mmc controller interrupt. +- clocks : phandles to input clocks. +- clock-names : should be "phytium_mci_clk". + +Examples: + sdci: sdci@28207c00 { + compatible = "phytium,sdci"; + reg = <0x0 0x28207c00 0x0 0x100>; + interrupts = , + , + ; + clocks = <&sysclk_600mhz>; + clock-names = "phytium_sdc_clk"; + }; diff --git a/Documentation/devicetree/bindings/mmc/phytium-mci.txt b/Documentation/devicetree/bindings/mmc/phytium-mci.txt new file mode 100644 index 0000000000000000000000000000000000000000..b225ff8c47c3518a9322d41dc36f41d09afc9d25 --- /dev/null +++ b/Documentation/devicetree/bindings/mmc/phytium-mci.txt @@ -0,0 +1,35 @@ +Phytium Multimedia Card Interface controller + +The highspeed MMC host controller on Phytium SoCs provides an interface +for MMC, SD and SDIO types of memory cards. + +Required properties: +- compatible : should be "phytium,mci". +- reg: mmc controller base registers. +- clocks : phandles to input clocks. +- clock-names : should be "phytium_mci_clk". +- interrupts : mmc controller interrupt. + +Examples: + - Within .dtsi: + mmc0: mmc@28000000 { + compatible = "phytium,mci"; + reg = <0x0 0x28000000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_1200mhz>; + clock-names = "phytium_mci_clk"; + status = "disabled"; + }; + + - Within dts: + &mmc0 { + bus-width = <4>; + max-frequency = <50000000>; + cap-sdio-irq; + cap-sd-highspeed; + sd-uhs-sdr12; + sd-uhs-sdr25; + sd-uhs-sdr50; + no-mmc; + status = "ok"; + }; diff --git a/Documentation/devicetree/bindings/mtd/phytium,qspi.txt b/Documentation/devicetree/bindings/mtd/phytium,qspi.txt new file mode 100644 index 0000000000000000000000000000000000000000..ea9ab44e58e202e9e4bbb4c3132d223a9f596e32 --- /dev/null +++ b/Documentation/devicetree/bindings/mtd/phytium,qspi.txt @@ -0,0 +1,40 @@ +* Phytium Quad Serial Peripheral Interface (QSPI) + +Required properties: +- compatible: should be "phytium,qspi" +- reg: the first contains the register + the second contains the memory mapping region +- reg-names: should contain the reg names "qspi" "qspi_mm" +- clocks: the phandle of the clock needed by the QSPI controller + +Optional property: +- reg: chip-Select number +- spi-max-frequency: max frequency of spi bus +- spi-rx-bus-width: see ../spi/spi-bus.txt for the description + +Example: + +qspi@28014000 { + compatible = "phytium,qspi"; + reg = <0x0 0x28014000 0x0 0x1000 0x0 0x0 0x0 0x10000000>; + reg-names = "qspi", "qspi_mm"; + clocks = <0x4>; + status = "ok"; + + flash0@0 { + spi-rx-bus-width = <0x1>; + spi-max-frequency = <0x23c34600>; + }; + + flash1@0 { + reg = <0x1>; + spi-rx-bus-width = <0x1>; + spi-max-frequency = <0x23c34600>; + }; + + flash2@0 { + reg = <0x2>; + spi-rx-bus-width = <0x1>; + spi-max-frequency = <0x23c34600>; + }; +}; diff --git a/Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt b/Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt index 4648948f7c3b8f26391292b06aa01743100e8796..e15589f47787633b8d5abeb44ccfa78e01994583 100644 --- a/Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt +++ b/Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt @@ -19,6 +19,9 @@ Optional properties: - interrupt-names: must be "mdio_done_error" when there is a share interrupt fed to this hardware block, or must be "mdio_done" for the first interrupt and "mdio_error" for the second when there are separate interrupts +- clocks: A reference to the clock supplying the MDIO bus controller +- clock-frequency: the MDIO bus clock that must be output by the MDIO bus + hardware, if absent, the default hardware values are used Child nodes of this MDIO bus controller node are standard Ethernet PHY device nodes as described in Documentation/devicetree/bindings/net/phy.txt diff --git a/Documentation/devicetree/bindings/net/can/holt_hi311x.txt b/Documentation/devicetree/bindings/net/can/holt_hi311x.txt index 903a78da65be288cf750af872584b60e9f42c06f..3a9926f99937039022d283817beac8e9bcbbc926 100644 --- a/Documentation/devicetree/bindings/net/can/holt_hi311x.txt +++ b/Documentation/devicetree/bindings/net/can/holt_hi311x.txt @@ -17,7 +17,7 @@ Example: reg = <1>; clocks = <&clk32m>; interrupt-parent = <&gpio4>; - interrupts = <13 IRQ_TYPE_EDGE_RISING>; + interrupts = <13 IRQ_TYPE_LEVEL_HIGH>; vdd-supply = <®5v0>; xceiver-supply = <®5v0>; }; diff --git a/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt b/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt index 188c8bd4eb67709bdc05bc1014fe86d692256be4..5a0111d4de58c2e546ff257708c3ae5c07cf08e1 100644 --- a/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt +++ b/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt @@ -4,6 +4,7 @@ Required properties: - compatible: Should be one of the following: - "microchip,mcp2510" for MCP2510. - "microchip,mcp2515" for MCP2515. + - "microchip,mcp25625" for MCP25625. - reg: SPI chip select. - clocks: The clock feeding the CAN controller. - interrupts: Should contain IRQ line for the CAN controller. diff --git a/Documentation/devicetree/bindings/net/can/phytium,can.yaml b/Documentation/devicetree/bindings/net/can/phytium,can.yaml new file mode 100644 index 0000000000000000000000000000000000000000..729bdfdd18fdf3a4412b7b2c55458c7441798d85 --- /dev/null +++ b/Documentation/devicetree/bindings/net/can/phytium,can.yaml @@ -0,0 +1,55 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/can/phytium,can.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Phytium CAN Controller + +maintainers: + - Chen Baozi + +allOf: + - $ref: /schemas/net/can/phytium/phytium,can.yaml# + +properties: + compatible: + oneOf: + - item: + -const: phytium,can + - const: phytium,canfd + + reg: + minItems: 1 + items: + - description: Should contain CANFD controller registers location and length + + interrupts: + maxItems: 1 + + clocks: + minItems: 1 + items: + - description: CLocks used by the controller + + clock-names: + items: + - const: can_clk + +required: + - compatible + - reg + - interrupts + +examples: + - | + can0: can@2800a000{ + compatible = "phytium,canfd"; + reg = <0x0 0x2800a000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_600mhz>; + clock-names = "can_clk"; + tx-fifo-depth = <64>; + rx-fifo-depth = <64>; + }; +... diff --git a/Documentation/devicetree/bindings/net/can/phytium.can.yaml b/Documentation/devicetree/bindings/net/can/phytium.can.yaml new file mode 100644 index 0000000000000000000000000000000000000000..729bdfdd18fdf3a4412b7b2c55458c7441798d85 --- /dev/null +++ b/Documentation/devicetree/bindings/net/can/phytium.can.yaml @@ -0,0 +1,55 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/can/phytium,can.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Phytium CAN Controller + +maintainers: + - Chen Baozi + +allOf: + - $ref: /schemas/net/can/phytium/phytium,can.yaml# + +properties: + compatible: + oneOf: + - item: + -const: phytium,can + - const: phytium,canfd + + reg: + minItems: 1 + items: + - description: Should contain CANFD controller registers location and length + + interrupts: + maxItems: 1 + + clocks: + minItems: 1 + items: + - description: CLocks used by the controller + + clock-names: + items: + - const: can_clk + +required: + - compatible + - reg + - interrupts + +examples: + - | + can0: can@2800a000{ + compatible = "phytium,canfd"; + reg = <0x0 0x2800a000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_600mhz>; + clock-names = "can_clk"; + tx-fifo-depth = <64>; + rx-fifo-depth = <64>; + }; +... diff --git a/Documentation/devicetree/bindings/net/dwmac-phytium.txt b/Documentation/devicetree/bindings/net/dwmac-phytium.txt new file mode 100644 index 0000000000000000000000000000000000000000..c8f691b4f2b5a86a415e0ea1d18c6312bd243062 --- /dev/null +++ b/Documentation/devicetree/bindings/net/dwmac-phytium.txt @@ -0,0 +1,28 @@ +Phytium 10/100/1000 Ethernet driver(GMAC) + +The device node has following properties. + +Required properties: + - compatible: should be "phytium,gmac" + - reg: addresses and length of the register sets for the device. + - interrupts: Should contain the GMAC interrupts. + - phy-mode: See ethernet.txt file in the same directory. + - clock-frequency: the frequecy of the clock. + +Example: +gmac0: eth@2820c000 { + compatible = "phytium,gmac"; + reg = <0x0 0x2820c000 0x0 0x2000>; + interrupts = ; + status = "disabled"; + phy-mode = "rgmii-txid"; + clock-frequency = <250000000>; + + snps,pbl = <16>; + snps,fixed-burst; + snps,multicast-filter-bins = <64>; + snps,perfect-filter-entries = <128>; + tx-fifo-depth = <4096>; + rx-fifo-depth = <4096>; + max-frame-size = <9000>; +}; diff --git a/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt b/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt index 42cd81090a2c77f959020328aee5f953b02703db..3f3cfc1d8d4d855485c048c85b330b52970cae11 100644 --- a/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt +++ b/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt @@ -16,7 +16,7 @@ Required properties: Optional properties: - interrupts: interrupt line number for the SMI error/done interrupt -- clocks: phandle for up to three required clocks for the MDIO instance +- clocks: phandle for up to four required clocks for the MDIO instance The child nodes of the MDIO driver are the individual PHY devices connected to this MDIO bus. They must have a "reg" property given the diff --git a/Documentation/devicetree/bindings/perf/hisi-l3t-pmu.txt b/Documentation/devicetree/bindings/perf/hisi-l3t-pmu.txt new file mode 100644 index 0000000000000000000000000000000000000000..f747d0e8cff361995a0b4fc687978a24e7011902 --- /dev/null +++ b/Documentation/devicetree/bindings/perf/hisi-l3t-pmu.txt @@ -0,0 +1,17 @@ +Hisilicon L3T PMU comtrollers + +Required properties: + - compatible : should be "hisilicon,l3t-pmu". + - reg : should contain at least address and length of the L3T PMU + register set for the device. + - interrupts : one L3T interrupt should be described here. + +Example + l3t0@81170000 { + compatible = "hisilicon,l3t-pmu"; + hisilicon,scl-id = <1>; + hisilicon,ccl-id = <0>; + hisilicon,index-id = <1>; + reg = <0x0 0x81170000 0x0 0x10000>; + interrupts = <0x0 316 0x4>; + }; diff --git a/Documentation/devicetree/bindings/perf/hisi-lpddrc-pmu.txt b/Documentation/devicetree/bindings/perf/hisi-lpddrc-pmu.txt new file mode 100644 index 0000000000000000000000000000000000000000..89ebc7e75bc49c3197f565a57f5bb1a02337e99e --- /dev/null +++ b/Documentation/devicetree/bindings/perf/hisi-lpddrc-pmu.txt @@ -0,0 +1,16 @@ +Hisilicon LPDDRC PMU comtrollers + +Required properties: + - compatible : should be "hisilicon,lpddrc-pmu". + - reg : should contain at least address and length of the LPDDRC PMU + register set for the device. + - interrupts : one LPDDRC interrupt should be described here. + +Example + lpddrc0@A5800000 { + compatible = "hisilicon,lpddrc-pmu"; + hisilicon,ch-id = <0>; + hisilicon,scl-id = <1>; + reg = <0x0 0xA5800000 0x0 0x10000>; + interrupts = <0x0 32 0x4>; + }; diff --git a/Documentation/devicetree/bindings/rtc/abracon,abx80x.txt b/Documentation/devicetree/bindings/rtc/abracon,abx80x.txt index be789685a1c24256e8b9846121ccb407d9f39e94..18b892d010d87772c49db004cd48e68c8195a614 100644 --- a/Documentation/devicetree/bindings/rtc/abracon,abx80x.txt +++ b/Documentation/devicetree/bindings/rtc/abracon,abx80x.txt @@ -27,4 +27,4 @@ and valid to enable charging: - "abracon,tc-diode": should be "standard" (0.6V) or "schottky" (0.3V) - "abracon,tc-resistor": should be <0>, <3>, <6> or <11>. 0 disables the output - resistor, the other values are in ohm. + resistor, the other values are in kOhm. diff --git a/Documentation/devicetree/bindings/rtc/phytium,rtc.txt b/Documentation/devicetree/bindings/rtc/phytium,rtc.txt new file mode 100644 index 0000000000000000000000000000000000000000..8eefd3024f0c42846cdc70ecb0355054801f9b3a --- /dev/null +++ b/Documentation/devicetree/bindings/rtc/phytium,rtc.txt @@ -0,0 +1,54 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/rtc/phytium,rtc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Phytium Real Time Clock + +select: + properties: + compatible: + contains: + const: phytium,rtc + required: + - compatible + +allOf: + - $ref: rtc.yaml# + +maintainers: + - Chen Baozi + +properties: + compatible: + items: + - const: phytium,rtc + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + clocks: + maxItems: 1 + + start-year: true + +required: + - compatible + - reg + - interrupts + - clocks + +additionalProperties: false + +examples: + - | + rtc@2800d0000 { + compatible = "phytium,rtc"; + reg = <0x0 0x2800d000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_48mhz>; + }; diff --git a/Documentation/devicetree/bindings/sound/phytium-hda.txt b/Documentation/devicetree/bindings/sound/phytium-hda.txt new file mode 100644 index 0000000000000000000000000000000000000000..8d0a7dbbdcd69a9217622ab07f70655e9cc62425 --- /dev/null +++ b/Documentation/devicetree/bindings/sound/phytium-hda.txt @@ -0,0 +1,22 @@ +* Phytium HDA controller + +The HDA bus (High Definition Audio sound bus) is a serial link for digital +audio data transfer between devices in the system. + +Required properties: + +- compatible: should be "phytium,hda" +- reg: physical base address and length of HDA controller. +- interrupts: interrupt for the hda controller. +- clocks: phandle to clock provider with the clock number in the second cell. +- clock-name: the name of the device clock. + +Example for D2000 HDA controller: + +hda: hda@28206000 { + compatible = "phytium,hda"; + reg = <0 0x28206000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_48mhz>; + clock-names = "phytium_hda_clk"; +}; diff --git a/Documentation/devicetree/bindings/spi/spi-phytium.txt b/Documentation/devicetree/bindings/spi/spi-phytium.txt new file mode 100644 index 0000000000000000000000000000000000000000..a674d192132c0c5f5ddb5ea34481d19b2bfc8676 --- /dev/null +++ b/Documentation/devicetree/bindings/spi/spi-phytium.txt @@ -0,0 +1,24 @@ +Phytium SPI controller + +Required properties: +- compatible: should be "phytium,spi" +- #address-cells: see spi-bus.txt +- #size-cells: see spi-bus.txt +- reg: address and length of the spi master registers +- interrupts: should contain one interrupt +- clocks: spi clock phandle +- num-cs: see spi-bus.txt + +Optional properties: +- cs-gpios: see spi-bus.txt + +Example: + + +spi0: spi@2800c000 { + compatible = "phytium,spi"; + interrupts = ; + reg = <0x0 0x2800c000 0x0 0x1000>; + clocks = <&sysclk_48mhz>; + num-cs = <4>; +}; diff --git a/Documentation/devicetree/bindings/spi/spi-uniphier.txt b/Documentation/devicetree/bindings/spi/spi-uniphier.txt index 504a4ecfc7b16869192c666e903a9d884d9f052d..b04e66a52de5dfc4ca66a3efc5339e4986275033 100644 --- a/Documentation/devicetree/bindings/spi/spi-uniphier.txt +++ b/Documentation/devicetree/bindings/spi/spi-uniphier.txt @@ -5,18 +5,20 @@ UniPhier SoCs have SCSSI which supports SPI single channel. Required properties: - compatible: should be "socionext,uniphier-scssi" - reg: address and length of the spi master registers - - #address-cells: must be <1>, see spi-bus.txt - - #size-cells: must be <0>, see spi-bus.txt - - clocks: A phandle to the clock for the device. - - resets: A phandle to the reset control for the device. + - interrupts: a single interrupt specifier + - pinctrl-names: should be "default" + - pinctrl-0: pin control state for the default mode + - clocks: a phandle to the clock for the device + - resets: a phandle to the reset control for the device Example: spi0: spi@54006000 { compatible = "socionext,uniphier-scssi"; reg = <0x54006000 0x100>; - #address-cells = <1>; - #size-cells = <0>; + interrupts = <0 39 4>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_spi0>; clocks = <&peri_clk 11>; resets = <&peri_rst 11>; }; diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index 2c3fc512e7466fe0d7577db7d7c74b803739585c..3334d8399a36103b270a163e77b1cad606d473ba 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -292,6 +292,7 @@ parade Parade Technologies Inc. pericom Pericom Technology Inc. pervasive Pervasive Displays, Inc. phytec PHYTEC Messtechnik GmbH +phytium Phytium Technology Co., Ltd. picochip Picochip Ltd pine64 Pine64 pixcir PIXCIR MICROELECTRONICS Co., Ltd diff --git a/Documentation/devicetree/bindings/w1/phytium,w1.txt b/Documentation/devicetree/bindings/w1/phytium,w1.txt new file mode 100644 index 0000000000000000000000000000000000000000..43784c3aa6de16524a216add43a1fc984b21fe62 --- /dev/null +++ b/Documentation/devicetree/bindings/w1/phytium,w1.txt @@ -0,0 +1,14 @@ +* Phytium 1-wire bus master controller + +Required properties: +- compatible : should be "phytium,w1" +- reg : Address and length of the register set for the device +- interrupts : interrupt line. + +Example: + + onewire0: onewire@2803f000 { + compatible = "phytium,w1"; + reg = <0x0 0x2803f000 0x0 0x1000>; + interrupts = ; + }; diff --git a/Documentation/dontdiff b/Documentation/dontdiff index 2228fcc8e29f40ed02bfd0d1d5329f2e61d9b9a2..3d4d5a402b8bec2df6dd8ac9a6fc3484b3f6fecc 100644 --- a/Documentation/dontdiff +++ b/Documentation/dontdiff @@ -179,6 +179,7 @@ mktables mktree modpost modules.builtin +modules.builtin.modinfo modules.order modversions.h* nconf diff --git a/Documentation/driver-api/device_link.rst b/Documentation/driver-api/device_link.rst index d6763272e747c80361fafe85a69c4720c01eb227..e8b0a8fd1ae0d0edf20d5910a6a6d8b3b64d0450 100644 --- a/Documentation/driver-api/device_link.rst +++ b/Documentation/driver-api/device_link.rst @@ -25,8 +25,8 @@ suspend/resume and shutdown ordering. Device links allow representation of such dependencies in the driver core. -In its standard form, a device link combines *both* dependency types: -It guarantees correct suspend/resume and shutdown ordering between a +In its standard or *managed* form, a device link combines *both* dependency +types: It guarantees correct suspend/resume and shutdown ordering between a "supplier" device and its "consumer" devices, and it guarantees driver presence on the supplier. The consumer devices are not probed before the supplier is bound to a driver, and they're unbound before the supplier @@ -59,18 +59,24 @@ device ``->probe`` callback or a boot-time PCI quirk. Another example for an inconsistent state would be a device link that represents a driver presence dependency, yet is added from the consumer's -``->probe`` callback while the supplier hasn't probed yet: Had the driver -core known about the device link earlier, it wouldn't have probed the +``->probe`` callback while the supplier hasn't started to probe yet: Had the +driver core known about the device link earlier, it wouldn't have probed the consumer in the first place. The onus is thus on the consumer to check presence of the supplier after adding the link, and defer probing on -non-presence. - -If a device link is added in the ``->probe`` callback of the supplier or -consumer driver, it is typically deleted in its ``->remove`` callback for -symmetry. That way, if the driver is compiled as a module, the device -link is added on module load and orderly deleted on unload. The same -restrictions that apply to device link addition (e.g. exclusion of a -parallel suspend/resume transition) apply equally to deletion. +non-presence. [Note that it is valid to create a link from the consumer's +``->probe`` callback while the supplier is still probing, but the consumer must +know that the supplier is functional already at the link creation time (that is +the case, for instance, if the consumer has just acquired some resources that +would not have been available had the supplier not been functional then).] + +If a device link with ``DL_FLAG_STATELESS`` set (i.e. a stateless device link) +is added in the ``->probe`` callback of the supplier or consumer driver, it is +typically deleted in its ``->remove`` callback for symmetry. That way, if the +driver is compiled as a module, the device link is added on module load and +orderly deleted on unload. The same restrictions that apply to device link +addition (e.g. exclusion of a parallel suspend/resume transition) apply equally +to deletion. Device links managed by the driver core are deleted automatically +by it. Several flags may be specified on device link addition, two of which have already been mentioned above: ``DL_FLAG_STATELESS`` to express that no @@ -83,22 +89,37 @@ link is added from the consumer's ``->probe`` callback: ``DL_FLAG_RPM_ACTIVE`` can be specified to runtime resume the supplier upon addition of the device link. ``DL_FLAG_AUTOREMOVE_CONSUMER`` causes the device link to be automatically purged when the consumer fails to probe or later unbinds. -This obviates the need to explicitly delete the link in the ``->remove`` -callback or in the error path of the ``->probe`` callback. Similarly, when the device link is added from supplier's ``->probe`` callback, ``DL_FLAG_AUTOREMOVE_SUPPLIER`` causes the device link to be automatically purged when the supplier fails to probe or later unbinds. +If neither ``DL_FLAG_AUTOREMOVE_CONSUMER`` nor ``DL_FLAG_AUTOREMOVE_SUPPLIER`` +is set, ``DL_FLAG_AUTOPROBE_CONSUMER`` can be used to request the driver core +to probe for a driver for the consumer driver on the link automatically after +a driver has been bound to the supplier device. + +Note, however, that any combinations of ``DL_FLAG_AUTOREMOVE_CONSUMER``, +``DL_FLAG_AUTOREMOVE_SUPPLIER`` or ``DL_FLAG_AUTOPROBE_CONSUMER`` with +``DL_FLAG_STATELESS`` are invalid and cannot be used. + Limitations =========== -Driver authors should be aware that a driver presence dependency (i.e. when -``DL_FLAG_STATELESS`` is not specified on link addition) may cause probing of -the consumer to be deferred indefinitely. This can become a problem if the -consumer is required to probe before a certain initcall level is reached. -Worse, if the supplier driver is blacklisted or missing, the consumer will -never be probed. +Driver authors should be aware that a driver presence dependency for managed +device links (i.e. when ``DL_FLAG_STATELESS`` is not specified on link addition) +may cause probing of the consumer to be deferred indefinitely. This can become +a problem if the consumer is required to probe before a certain initcall level +is reached. Worse, if the supplier driver is blacklisted or missing, the +consumer will never be probed. + +Moreover, managed device links cannot be deleted directly. They are deleted +by the driver core when they are not necessary any more in accordance with the +``DL_FLAG_AUTOREMOVE_CONSUMER`` and ``DL_FLAG_AUTOREMOVE_SUPPLIER`` flags. +However, stateless device links (i.e. device links with ``DL_FLAG_STATELESS`` +set) are expected to be removed by whoever called :c:func:`device_link_add()` +to add them with the help of either :c:func:`device_link_del()` or +:c:func:`device_link_remove()`. Sometimes drivers depend on optional resources. They are able to operate in a degraded mode (reduced feature set or performance) when those resources @@ -282,4 +303,4 @@ API === .. kernel-doc:: drivers/base/core.c - :functions: device_link_add device_link_del + :functions: device_link_add device_link_del device_link_remove diff --git a/Documentation/driver-api/usb/power-management.rst b/Documentation/driver-api/usb/power-management.rst index 79beb807996b7a3a17e08b5f1d6e31d4176d3fc2..4a74cf6f2797274b96510685a44f4066fac558d3 100644 --- a/Documentation/driver-api/usb/power-management.rst +++ b/Documentation/driver-api/usb/power-management.rst @@ -370,11 +370,15 @@ autosuspend the interface's device. When the usage counter is = 0 then the interface is considered to be idle, and the kernel may autosuspend the device. -Drivers need not be concerned about balancing changes to the usage -counter; the USB core will undo any remaining "get"s when a driver -is unbound from its interface. As a corollary, drivers must not call -any of the ``usb_autopm_*`` functions after their ``disconnect`` -routine has returned. +Drivers must be careful to balance their overall changes to the usage +counter. Unbalanced "get"s will remain in effect when a driver is +unbound from its interface, preventing the device from going into +runtime suspend should the interface be bound to a driver again. On +the other hand, drivers are allowed to achieve this balance by calling +the ``usb_autopm_*`` functions even after their ``disconnect`` routine +has returned -- say from within a work-queue routine -- provided they +retain an active reference to the interface (via ``usb_get_intf`` and +``usb_put_intf``). Drivers using the async routines are responsible for their own synchronization and mutual exclusion. diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst index 48b424de85bbca87f840774883132671aec50f02..cfbc18f0d9c985a16f6ff9dcd665f6e8d017a067 100644 --- a/Documentation/filesystems/fscrypt.rst +++ b/Documentation/filesystems/fscrypt.rst @@ -191,21 +191,11 @@ Currently, the following pairs of encryption modes are supported: - AES-256-XTS for contents and AES-256-CTS-CBC for filenames - AES-128-CBC for contents and AES-128-CTS-CBC for filenames -- Speck128/256-XTS for contents and Speck128/256-CTS-CBC for filenames It is strongly recommended to use AES-256-XTS for contents encryption. AES-128-CBC was added only for low-powered embedded devices with crypto accelerators such as CAAM or CESA that do not support XTS. -Similarly, Speck128/256 support was only added for older or low-end -CPUs which cannot do AES fast enough -- especially ARM CPUs which have -NEON instructions but not the Cryptography Extensions -- and for which -it would not otherwise be feasible to use encryption at all. It is -not recommended to use Speck on CPUs that have AES instructions. -Speck support is only available if it has been enabled in the crypto -API via CONFIG_CRYPTO_SPECK. Also, on ARM platforms, to get -acceptable performance CONFIG_CRYPTO_SPECK_NEON must be enabled. - New encryption modes can be added relatively easily, without changes to individual filesystems. However, authenticated encryption (AE) modes are not currently supported because of the difficulty of dealing diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt index 51c136c821bfb0a190e7daa67ebdc2e6faaf878b..d7dc9c818b830d45316a2b9a5a11275390939253 100644 --- a/Documentation/filesystems/overlayfs.txt +++ b/Documentation/filesystems/overlayfs.txt @@ -286,6 +286,12 @@ pointed by REDIRECT. This should not be possible on local system as setting "trusted." xattrs will require CAP_SYS_ADMIN. But it should be possible for untrusted layers like from a pen drive. +Note: redirect_dir={off|nofollow|follow(*)} conflicts with metacopy=on, and +results in an error. + +(*) redirect_dir=follow only conflicts with metacopy=on if upperdir=... is +given. + Sharing and copying layers -------------------------- @@ -296,7 +302,7 @@ beneath or above the path of another overlay lower layer path. Using an upper layer path and/or a workdir path that are already used by another overlay mount is not allowed and may fail with EBUSY. Using -partially overlapping paths is not allowed but will not fail with EBUSY. +partially overlapping paths is not allowed and may fail with EBUSY. If files are accessed from two overlayfs mounts which share or overlap the upper layer and/or workdir path the behavior of the overlay is undefined, though it will not result in a crash or deadlock. diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting index 7b7b845c490a4b187183b5acc7dead9e0141c45d..041b0ded8b4422a3bcaf330d43be7513e888968c 100644 --- a/Documentation/filesystems/porting +++ b/Documentation/filesystems/porting @@ -622,3 +622,15 @@ in your dentry operations instead. alloc_file_clone(file, flags, ops) does not affect any caller's references. On success you get a new struct file sharing the mount/dentry with the original, on failure - ERR_PTR(). +-- +[mandatory] + DCACHE_RCUACCESS is gone; having an RCU delay on dentry freeing is the + default. DCACHE_NORCU opts out, and only d_alloc_pseudo() has any + business doing so. +-- +[mandatory] + + [should've been added in 2016] stale comment in finish_open() + nonwithstanding, failure exits in ->atomic_open() instances should + *NOT* fput() the file, no matter what. Everything is handled by the + caller. diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index 22b4b00dee31290bde37b3abc63f4a0807f973cc..7329e8a4281f7d06ef3d7413831ae54f1dab2e75 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt @@ -181,6 +181,7 @@ read the file /proc/PID/status: VmPTE: 20 kb VmSwap: 0 kB HugetlbPages: 0 kB + Reliable: 1608 KB CoreDumping: 0 Threads: 1 SigQ: 0/28578 @@ -254,6 +255,7 @@ Table 1-2: Contents of the status files (as of 4.8) VmSwap amount of swap used by anonymous private data (shmem swap usage is not included) HugetlbPages size of hugetlb memory portions + Reliable size of reliable memory used CoreDumping process's memory is currently being dumped (killing the process may lead to a corrupted core) Threads number of threads @@ -425,6 +427,7 @@ SwapPss: 0 kB KernelPageSize: 4 kB MMUPageSize: 4 kB Locked: 0 kB +THPeligible: 0 VmFlags: rd ex mr mw me dw the first of these lines shows the same information as is displayed for the @@ -462,6 +465,8 @@ replaced by copy-on-write) part of the underlying shmem object out on swap. "SwapPss" shows proportional swap share of this mapping. Unlike "Swap", this does not take into account swapped out page of underlying shmem objects. "Locked" indicates whether the mapping is locked in memory or not. +"THPeligible" indicates whether the mapping is eligible for THP pages - 1 if +true, 0 otherwise. "VmFlags" field deserves a separate description. This member represents the kernel flags associated with the particular virtual memory area in two letter encoded @@ -496,7 +501,9 @@ manner. The codes are the following: Note that there is no guarantee that every flag and associated mnemonic will be present in all further kernel releases. Things get changed, the flags may -be vanished or the reverse -- new added. +be vanished or the reverse -- new added. Interpretation of their meaning +might change in future as well. So each consumer of these flags has to +follow each specific kernel version for the exact semantic. This file is only present if the CONFIG_MMU kernel configuration option is enabled. @@ -875,6 +882,11 @@ HardwareCorrupted: 0 kB AnonHugePages: 49152 kB ShmemHugePages: 0 kB ShmemPmdMapped: 0 kB +ReliableTotal: 7340032 kB +ReliableUsed: 418824 kB +ReliableTaskUsed: 418824 kB +ReliableBuddyMem: 418824 kB +ReliableShmem: 96 kB MemTotal: Total usable ram (i.e. physical ram minus a few reserved @@ -965,6 +977,12 @@ VmallocTotal: total size of vmalloc memory area VmallocChunk: largest contiguous block of vmalloc area which is free Percpu: Memory allocated to the percpu allocator used to back percpu allocations. This stat excludes the cost of metadata. +ReliableTotal: Total reliable memory size +ReliableUsed: The used amount of reliable memory +ReliableTaskUsed: Reliable memory used by special user tasks and global + init process +ReliableBuddyMem: Total mirrored memory size in buddy system +ReliableShmem: Reliable memory used by shmem .............................................................................. diff --git a/Documentation/filesystems/sysfs.txt b/Documentation/filesystems/sysfs.txt index a1426cabcef12c814ed3527e0d8663eddea8277b..2e38fafc1b6329ceef4dcf47b88b54e923c1a93d 100644 --- a/Documentation/filesystems/sysfs.txt +++ b/Documentation/filesystems/sysfs.txt @@ -211,12 +211,10 @@ Other notes: is 4096. - show() methods should return the number of bytes printed into the - buffer. This is the return value of scnprintf(). + buffer. -- show() must not use snprintf() when formatting the value to be - returned to user space. If you can guarantee that an overflow - will never happen you can use sprintf() otherwise you must use - scnprintf(). +- show() should only use sysfs_emit() or sysfs_emit_at() when formatting + the value to be returned to user space. - store() should return the number of bytes used from the buffer. If the entire buffer has been used, just return the count argument. diff --git a/Documentation/filesystems/tmpfs.txt b/Documentation/filesystems/tmpfs.txt index d06e9a59a9f4a82964f39c3d93c0c5d557f81326..c72536fa78ae5db65997fa5396adc70dbe8de8d4 100644 --- a/Documentation/filesystems/tmpfs.txt +++ b/Documentation/filesystems/tmpfs.txt @@ -135,6 +135,21 @@ gid: The group id These options do not have any effect on remount. You can change these parameters with chmod(1), chown(1) and chgrp(1) on a mounted filesystem. +tmpfs has a mount option to select whether it will wrap at 32- or 64-bit inode +numbers: + +======= ======================== +inode64 Use 64-bit inode numbers +inode32 Use 32-bit inode numbers +======= ======================== + +On a 32-bit kernel, inode32 is implicit, and inode64 is refused at mount time. +On a 64-bit kernel, CONFIG_TMPFS_INODE64 sets the default. inode64 avoids the +possibility of multiple files with the same inode number on a single device; +but risks glibc failing with EOVERFLOW once 33-bit inode numbers are reached - +if a long-lived tmpfs is accessed by 32-bit applications so ancient that +opening a file larger than 2GiB fails with EINVAL. + So 'mount -t tmpfs -o size=10G,nr_inodes=10k,mode=700 tmpfs /mytmpfs' will give you tmpfs instance on /mytmpfs which can allocate 10GB @@ -147,3 +162,5 @@ Updated: Hugh Dickins, 4 June 2007 Updated: KOSAKI Motohiro, 16 Mar 2010 +Updated: + Chris Down, 13 July 2020 diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt index a6c6a8af48a296cf9b7197c8f065370814efd90d..0fe9c0dd3269cb7ca941d185db7058b7b54a5893 100644 --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt @@ -857,6 +857,7 @@ struct file_operations { ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); ssize_t (*read_iter) (struct kiocb *, struct iov_iter *); ssize_t (*write_iter) (struct kiocb *, struct iov_iter *); + int (*iopoll)(struct kiocb *kiocb, bool spin); int (*iterate) (struct file *, struct dir_context *); int (*iterate_shared) (struct file *, struct dir_context *); __poll_t (*poll) (struct file *, struct poll_table_struct *); @@ -901,6 +902,8 @@ otherwise noted. write_iter: possibly asynchronous write with iov_iter as source + iopoll: called when aio wants to poll for completions on HIPRI iocbs + iterate: called when the VFS needs to read the directory contents iterate_shared: called when the VFS needs to read the directory contents diff --git a/Documentation/hid/uhid.txt b/Documentation/hid/uhid.txt index c8656dd029a910251bc9f6ffec70781d3a093f90..958fff945304488d60c0558347ad78198366138b 100644 --- a/Documentation/hid/uhid.txt +++ b/Documentation/hid/uhid.txt @@ -160,7 +160,7 @@ them but you should handle them according to your needs. UHID_OUTPUT: This is sent if the HID device driver wants to send raw data to the I/O device on the interrupt channel. You should read the payload and forward it to - the device. The payload is of type "struct uhid_data_req". + the device. The payload is of type "struct uhid_output_req". This may be received even though you haven't received UHID_OPEN, yet. UHID_GET_REPORT: diff --git a/Documentation/hwmon/hwmon-kernel-api.txt b/Documentation/hwmon/hwmon-kernel-api.txt index eb7a78aebb3801f43463d92d990d1efbf8e642e4..8bdefb41be3071c4349b0ccbd0a3a791a618a913 100644 --- a/Documentation/hwmon/hwmon-kernel-api.txt +++ b/Documentation/hwmon/hwmon-kernel-api.txt @@ -299,17 +299,25 @@ functions is used. The header file linux/hwmon-sysfs.h provides a number of useful macros to declare and use hardware monitoring sysfs attributes. -In many cases, you can use the exsting define DEVICE_ATTR to declare such -attributes. This is feasible if an attribute has no additional context. However, -in many cases there will be additional information such as a sensor index which -will need to be passed to the sysfs attribute handling function. +In many cases, you can use the exsting define DEVICE_ATTR or its variants +DEVICE_ATTR_{RW,RO,WO} to declare such attributes. This is feasible if an +attribute has no additional context. However, in many cases there will be +additional information such as a sensor index which will need to be passed +to the sysfs attribute handling function. SENSOR_DEVICE_ATTR and SENSOR_DEVICE_ATTR_2 can be used to define attributes which need such additional context information. SENSOR_DEVICE_ATTR requires one additional argument, SENSOR_DEVICE_ATTR_2 requires two. -SENSOR_DEVICE_ATTR defines a struct sensor_device_attribute variable. -This structure has the following fields. +Simplified variants of SENSOR_DEVICE_ATTR and SENSOR_DEVICE_ATTR_2 are available +and should be used if standard attribute permissions and function names are +feasible. Standard permissions are 0644 for SENSOR_DEVICE_ATTR[_2]_RW, +0444 for SENSOR_DEVICE_ATTR[_2]_RO, and 0200 for SENSOR_DEVICE_ATTR[_2]_WO. +Standard functions, similar to DEVICE_ATTR_{RW,RO,WO}, have _show and _store +appended to the provided function name. + +SENSOR_DEVICE_ATTR and its variants define a struct sensor_device_attribute +variable. This structure has the following fields. struct sensor_device_attribute { struct device_attribute dev_attr; @@ -320,8 +328,8 @@ You can use to_sensor_dev_attr to get the pointer to this structure from the attribute read or write function. Its parameter is the device to which the attribute is attached. -SENSOR_DEVICE_ATTR_2 defines a struct sensor_device_attribute_2 variable, -which is defined as follows. +SENSOR_DEVICE_ATTR_2 and its variants define a struct sensor_device_attribute_2 +variable, which is defined as follows. struct sensor_device_attribute_2 { struct device_attribute dev_attr; diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801 index d1ee484a787d1b476cf13bcf7d7b53ac084fb63e..ee9984f3586897c870bd42b854f5d883b245621e 100644 --- a/Documentation/i2c/busses/i2c-i801 +++ b/Documentation/i2c/busses/i2c-i801 @@ -36,6 +36,7 @@ Supported adapters: * Intel Cannon Lake (PCH) * Intel Cedar Fork (PCH) * Intel Ice Lake (PCH) + * Intel Comet Lake (PCH) Datasheets: Publicly available at the Intel website On Intel Patsburg and later chipsets, both the normal host SMBus controller diff --git a/Documentation/i2c/busses/i2c-piix4 b/Documentation/i2c/busses/i2c-piix4 index aa959fd22450de6793e02d73f2fa657ac31c4bce..2703bc3acad07ba5e1e61b2607ae9c6954e2dfea 100644 --- a/Documentation/i2c/busses/i2c-piix4 +++ b/Documentation/i2c/busses/i2c-piix4 @@ -15,6 +15,8 @@ Supported adapters: http://support.amd.com/us/Embedded_TechDocs/44413.pdf * AMD Hudson-2, ML, CZ Datasheet: Not publicly available + * Hygon CZ + Datasheet: Not publicly available * Standard Microsystems (SMSC) SLC90E66 (Victory66) southbridge Datasheet: Publicly available at the SMSC website http://www.smsc.com diff --git a/Documentation/index.rst b/Documentation/index.rst index 5db7e87c7cb1d615e6a361983f4ea545dd054c81..1cdc139adb40753b144fe17cbc0cf51c7d280170 100644 --- a/Documentation/index.rst +++ b/Documentation/index.rst @@ -104,6 +104,7 @@ implementation. :maxdepth: 2 sh/index + x86/index Filesystem Documentation ------------------------ diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt index 13a7c999c04ab761f81b0e84663b8f38e7bec622..a2ca3325c78cd2cf884d0bf00d9fe99237590275 100644 --- a/Documentation/ioctl/ioctl-number.txt +++ b/Documentation/ioctl/ioctl-number.txt @@ -108,7 +108,7 @@ Code Seq#(hex) Include File Comments 'C' 01-2F linux/capi.h conflict! 'C' F0-FF drivers/net/wan/cosa.h conflict! 'D' all arch/s390/include/asm/dasd.h -'D' 40-5F drivers/scsi/dpt/dtpi_ioctl.h +'D' 40-5F drivers/scsi/dpt/dtpi_ioctl.h Dead since 2022 'D' 05 drivers/scsi/pmcraid.h 'E' all linux/input.h conflict! 'E' 00-0F xen/evtchn.h conflict! @@ -208,7 +208,6 @@ Code Seq#(hex) Include File Comments 'a' 00-0F drivers/crypto/qat/qat_common/adf_cfg_common.h conflict! qat driver 'b' 00-FF conflict! bit3 vme host bridge -'c' all linux/cm4000_cs.h conflict! 'c' 00-7F linux/comstats.h conflict! 'c' 00-7F linux/coda.h conflict! 'c' 00-1F linux/chio.h conflict! @@ -290,7 +289,6 @@ Code Seq#(hex) Include File Comments 0x89 00-06 arch/x86/include/asm/sockios.h 0x89 0B-DF linux/sockios.h 0x89 E0-EF linux/sockios.h SIOCPROTOPRIVATE range -0x89 E0-EF linux/dn.h PROTOPRIVATE range 0x89 F0-FF linux/sockios.h SIOCDEVPRIVATE range 0x8B all linux/wireless.h 0x8C 00-3F WiNRADiO driver @@ -346,3 +344,4 @@ Code Seq#(hex) Include File Comments 0xF6 all LTTng Linux Trace Toolkit Next Generation 0xFD all linux/dm-ioctl.h +0xFE all linux/isst_if.h diff --git a/Documentation/kbuild/kbuild.txt b/Documentation/kbuild/kbuild.txt index 8390c360d4b350c960f1eebaeaeec6012dacb9b5..7f48e48f3fd2724dcc0527b50de5d3ebc4504f16 100644 --- a/Documentation/kbuild/kbuild.txt +++ b/Documentation/kbuild/kbuild.txt @@ -11,6 +11,11 @@ modules.builtin This file lists all modules that are built into the kernel. This is used by modprobe to not fail when trying to load something builtin. +modules.builtin.modinfo +-------------------------------------------------- +This file contains modinfo from all modules that are built into the kernel. +Unlike modinfo of a separate module, all fields are prefixed with module name. + Environment variables diff --git a/Documentation/media/uapi/cec/cec-ioc-receive.rst b/Documentation/media/uapi/cec/cec-ioc-receive.rst index e964074cd15b757a00dd4c3cd6e519b920f46879..b25e48afaa0877199953e0de0a9f0fcbd2676b91 100644 --- a/Documentation/media/uapi/cec/cec-ioc-receive.rst +++ b/Documentation/media/uapi/cec/cec-ioc-receive.rst @@ -16,10 +16,10 @@ CEC_RECEIVE, CEC_TRANSMIT - Receive or transmit a CEC message Synopsis ======== -.. c:function:: int ioctl( int fd, CEC_RECEIVE, struct cec_msg *argp ) +.. c:function:: int ioctl( int fd, CEC_RECEIVE, struct cec_msg \*argp ) :name: CEC_RECEIVE -.. c:function:: int ioctl( int fd, CEC_TRANSMIT, struct cec_msg *argp ) +.. c:function:: int ioctl( int fd, CEC_TRANSMIT, struct cec_msg \*argp ) :name: CEC_TRANSMIT Arguments @@ -272,6 +272,19 @@ View On' messages from initiator 0xf ('Unregistered') to destination 0 ('TV'). - The transmit failed after one or more retries. This status bit is mutually exclusive with :ref:`CEC_TX_STATUS_OK `. Other bits can still be set to explain which failures were seen. + * .. _`CEC-TX-STATUS-ABORTED`: + + - ``CEC_TX_STATUS_ABORTED`` + - 0x40 + - The transmit was aborted due to an HDMI disconnect, or the adapter + was unconfigured, or a transmit was interrupted, or the driver + returned an error when attempting to start a transmit. + * .. _`CEC-TX-STATUS-TIMEOUT`: + + - ``CEC_TX_STATUS_TIMEOUT`` + - 0x80 + - The transmit timed out. This should not normally happen and this + indicates a driver problem. .. tabularcolumns:: |p{5.6cm}|p{0.9cm}|p{11.0cm}| @@ -300,6 +313,14 @@ View On' messages from initiator 0xf ('Unregistered') to destination 0 ('TV'). - The message was received successfully but the reply was ``CEC_MSG_FEATURE_ABORT``. This status is only set if this message was the reply to an earlier transmitted message. + * .. _`CEC-RX-STATUS-ABORTED`: + + - ``CEC_RX_STATUS_ABORTED`` + - 0x08 + - The wait for a reply to an earlier transmitted message was aborted + because the HDMI cable was disconnected, the adapter was unconfigured + or the :ref:`CEC_TRANSMIT ` that waited for a + reply was interrupted. diff --git a/Documentation/media/uapi/v4l/biblio.rst b/Documentation/media/uapi/v4l/biblio.rst index 1cedcfc043273d9fa16c5d39c25a29b4f8f3667a..386d6cf83e9cab74e741b2251c8e48e26f35418c 100644 --- a/Documentation/media/uapi/v4l/biblio.rst +++ b/Documentation/media/uapi/v4l/biblio.rst @@ -226,16 +226,6 @@ xvYCC :author: International Electrotechnical Commission (http://www.iec.ch) -.. _adobergb: - -AdobeRGB -======== - - -:title: Adobe© RGB (1998) Color Image Encoding Version 2005-05 - -:author: Adobe Systems Incorporated (http://www.adobe.com) - .. _oprgb: opRGB diff --git a/Documentation/media/uapi/v4l/colorspaces-defs.rst b/Documentation/media/uapi/v4l/colorspaces-defs.rst index 410907fe9415e729be39d6ee9bfc978ea9278b33..f24615544792b2c20ee2325e815dfe52fb79e275 100644 --- a/Documentation/media/uapi/v4l/colorspaces-defs.rst +++ b/Documentation/media/uapi/v4l/colorspaces-defs.rst @@ -51,8 +51,8 @@ whole range, 0-255, dividing the angular value by 1.41. The enum - See :ref:`col-rec709`. * - ``V4L2_COLORSPACE_SRGB`` - See :ref:`col-srgb`. - * - ``V4L2_COLORSPACE_ADOBERGB`` - - See :ref:`col-adobergb`. + * - ``V4L2_COLORSPACE_OPRGB`` + - See :ref:`col-oprgb`. * - ``V4L2_COLORSPACE_BT2020`` - See :ref:`col-bt2020`. * - ``V4L2_COLORSPACE_DCI_P3`` @@ -90,8 +90,8 @@ whole range, 0-255, dividing the angular value by 1.41. The enum - Use the Rec. 709 transfer function. * - ``V4L2_XFER_FUNC_SRGB`` - Use the sRGB transfer function. - * - ``V4L2_XFER_FUNC_ADOBERGB`` - - Use the AdobeRGB transfer function. + * - ``V4L2_XFER_FUNC_OPRGB`` + - Use the opRGB transfer function. * - ``V4L2_XFER_FUNC_SMPTE240M`` - Use the SMPTE 240M transfer function. * - ``V4L2_XFER_FUNC_NONE`` diff --git a/Documentation/media/uapi/v4l/colorspaces-details.rst b/Documentation/media/uapi/v4l/colorspaces-details.rst index b5d551b9cc8f819fc7ee8c70299da3b307eed686..09fabf4cd4126b320c4e26d311e064cdaee17841 100644 --- a/Documentation/media/uapi/v4l/colorspaces-details.rst +++ b/Documentation/media/uapi/v4l/colorspaces-details.rst @@ -290,15 +290,14 @@ Y' is clamped to the range [0…1] and Cb and Cr are clamped to the range 170M/BT.601. The Y'CbCr quantization is limited range. -.. _col-adobergb: +.. _col-oprgb: -Colorspace Adobe RGB (V4L2_COLORSPACE_ADOBERGB) +Colorspace opRGB (V4L2_COLORSPACE_OPRGB) =============================================== -The :ref:`adobergb` standard defines the colorspace used by computer -graphics that use the AdobeRGB colorspace. This is also known as the -:ref:`oprgb` standard. The default transfer function is -``V4L2_XFER_FUNC_ADOBERGB``. The default Y'CbCr encoding is +The :ref:`oprgb` standard defines the colorspace used by computer +graphics that use the opRGB colorspace. The default transfer function is +``V4L2_XFER_FUNC_OPRGB``. The default Y'CbCr encoding is ``V4L2_YCBCR_ENC_601``. The default Y'CbCr quantization is limited range. @@ -312,7 +311,7 @@ The chromaticities of the primary colors and the white reference are: .. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}| -.. flat-table:: Adobe RGB Chromaticities +.. flat-table:: opRGB Chromaticities :header-rows: 1 :stub-columns: 0 :widths: 1 1 2 diff --git a/Documentation/media/videodev2.h.rst.exceptions b/Documentation/media/videodev2.h.rst.exceptions index ca9f0edc579e664a3cd18c3d7c6b6a45e9d65d0e..e420a39f1ebfe600f4a34bf5bc459ab4aac5aa1f 100644 --- a/Documentation/media/videodev2.h.rst.exceptions +++ b/Documentation/media/videodev2.h.rst.exceptions @@ -56,7 +56,8 @@ replace symbol V4L2_MEMORY_USERPTR :c:type:`v4l2_memory` # Documented enum v4l2_colorspace replace symbol V4L2_COLORSPACE_470_SYSTEM_BG :c:type:`v4l2_colorspace` replace symbol V4L2_COLORSPACE_470_SYSTEM_M :c:type:`v4l2_colorspace` -replace symbol V4L2_COLORSPACE_ADOBERGB :c:type:`v4l2_colorspace` +replace symbol V4L2_COLORSPACE_OPRGB :c:type:`v4l2_colorspace` +replace define V4L2_COLORSPACE_ADOBERGB :c:type:`v4l2_colorspace` replace symbol V4L2_COLORSPACE_BT2020 :c:type:`v4l2_colorspace` replace symbol V4L2_COLORSPACE_DCI_P3 :c:type:`v4l2_colorspace` replace symbol V4L2_COLORSPACE_DEFAULT :c:type:`v4l2_colorspace` @@ -69,7 +70,8 @@ replace symbol V4L2_COLORSPACE_SRGB :c:type:`v4l2_colorspace` # Documented enum v4l2_xfer_func replace symbol V4L2_XFER_FUNC_709 :c:type:`v4l2_xfer_func` -replace symbol V4L2_XFER_FUNC_ADOBERGB :c:type:`v4l2_xfer_func` +replace symbol V4L2_XFER_FUNC_OPRGB :c:type:`v4l2_xfer_func` +replace define V4L2_XFER_FUNC_ADOBERGB :c:type:`v4l2_xfer_func` replace symbol V4L2_XFER_FUNC_DCI_P3 :c:type:`v4l2_xfer_func` replace symbol V4L2_XFER_FUNC_DEFAULT :c:type:`v4l2_xfer_func` replace symbol V4L2_XFER_FUNC_NONE :c:type:`v4l2_xfer_func` diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt index d3e5dd26db12d75bc09d25cacbf0f775003cd527..b020e6ce6dd49b6c50513953653ec4a5cf8ed7c4 100644 --- a/Documentation/networking/bonding.txt +++ b/Documentation/networking/bonding.txt @@ -191,11 +191,12 @@ ad_actor_sys_prio ad_actor_system In an AD system, this specifies the mac-address for the actor in - protocol packet exchanges (LACPDUs). The value cannot be NULL or - multicast. It is preferred to have the local-admin bit set for this - mac but driver does not enforce it. If the value is not given then - system defaults to using the masters' mac address as actors' system - address. + protocol packet exchanges (LACPDUs). The value cannot be a multicast + address. If the all-zeroes MAC is specified, bonding will internally + use the MAC of the bond itself. It is preferred to have the + local-admin bit set for this mac but driver does not enforce it. If + the value is not given then system defaults to using the masters' + mac address as actors' system address. This parameter has effect only in 802.3ad mode and is available through SysFs interface. @@ -706,9 +707,9 @@ num_unsol_na unsolicited IPv6 Neighbor Advertisements) to be issued after a failover event. As soon as the link is up on the new slave (possibly immediately) a peer notification is sent on the - bonding device and each VLAN sub-device. This is repeated at - each link monitor interval (arp_interval or miimon, whichever - is active) if the number is greater than 1. + bonding device and each VLAN sub-device. This is repeated at + the rate specified by peer_notif_delay if the number is + greater than 1. The valid range is 0 - 255; the default value is 1. These options affect only the active-backup mode. These options were added for @@ -727,6 +728,16 @@ packets_per_slave The valid range is 0 - 65535; the default value is 1. This option has effect only in balance-rr mode. +peer_notif_delay + + Specify the delay, in milliseconds, between each peer + notification (gratuitous ARP and unsolicited IPv6 Neighbor + Advertisement) when they are issued after a failover event. + This delay should be a multiple of the link monitor interval + (arp_interval or miimon, whichever is active). The default + value is 0 which means to match the value of the link monitor + interval. + primary A string (eth0, eth2, etc) specifying which slave is the diff --git a/Documentation/networking/decnet.txt b/Documentation/networking/decnet.txt deleted file mode 100644 index e12a4900cf72cb00b1ade4c0257a23c93d2d8f21..0000000000000000000000000000000000000000 --- a/Documentation/networking/decnet.txt +++ /dev/null @@ -1,232 +0,0 @@ - Linux DECnet Networking Layer Information - =========================================== - -1) Other documentation.... - - o Project Home Pages - http://www.chygwyn.com/ - Kernel info - http://linux-decnet.sourceforge.net/ - Userland tools - http://www.sourceforge.net/projects/linux-decnet/ - Status page - -2) Configuring the kernel - -Be sure to turn on the following options: - - CONFIG_DECNET (obviously) - CONFIG_PROC_FS (to see what's going on) - CONFIG_SYSCTL (for easy configuration) - -if you want to try out router support (not properly debugged yet) -you'll need the following options as well... - - CONFIG_DECNET_ROUTER (to be able to add/delete routes) - CONFIG_NETFILTER (will be required for the DECnet routing daemon) - - CONFIG_DECNET_ROUTE_FWMARK is optional - -Don't turn on SIOCGIFCONF support for DECnet unless you are really sure -that you need it, in general you won't and it can cause ifconfig to -malfunction. - -Run time configuration has changed slightly from the 2.4 system. If you -want to configure an endnode, then the simplified procedure is as follows: - - o Set the MAC address on your ethernet card before starting _any_ other - network protocols. - -As soon as your network card is brought into the UP state, DECnet should -start working. If you need something more complicated or are unsure how -to set the MAC address, see the next section. Also all configurations which -worked with 2.4 will work under 2.5 with no change. - -3) Command line options - -You can set a DECnet address on the kernel command line for compatibility -with the 2.4 configuration procedure, but in general it's not needed any more. -If you do st a DECnet address on the command line, it has only one purpose -which is that its added to the addresses on the loopback device. - -With 2.4 kernels, DECnet would only recognise addresses as local if they -were added to the loopback device. In 2.5, any local interface address -can be used to loop back to the local machine. Of course this does not -prevent you adding further addresses to the loopback device if you -want to. - -N.B. Since the address list of an interface determines the addresses for -which "hello" messages are sent, if you don't set an address on the loopback -interface then you won't see any entries in /proc/net/neigh for the local -host until such time as you start a connection. This doesn't affect the -operation of the local communications in any other way though. - -The kernel command line takes options looking like the following: - - decnet.addr=1,2 - -the two numbers are the node address 1,2 = 1.2 For 2.2.xx kernels -and early 2.3.xx kernels, you must use a comma when specifying the -DECnet address like this. For more recent 2.3.xx kernels, you may -use almost any character except space, although a `.` would be the most -obvious choice :-) - -There used to be a third number specifying the node type. This option -has gone away in favour of a per interface node type. This is now set -using /proc/sys/net/decnet/conf//forwarding. This file can be -set with a single digit, 0=EndNode, 1=L1 Router and 2=L2 Router. - -There are also equivalent options for modules. The node address can -also be set through the /proc/sys/net/decnet/ files, as can other system -parameters. - -Currently the only supported devices are ethernet and ip_gre. The -ethernet address of your ethernet card has to be set according to the DECnet -address of the node in order for it to be autoconfigured (and then appear in -/proc/net/decnet_dev). There is a utility available at the above -FTP sites called dn2ethaddr which can compute the correct ethernet -address to use. The address can be set by ifconfig either before or -at the time the device is brought up. If you are using RedHat you can -add the line: - - MACADDR=AA:00:04:00:03:04 - -or something similar, to /etc/sysconfig/network-scripts/ifcfg-eth0 or -wherever your network card's configuration lives. Setting the MAC address -of your ethernet card to an address starting with "hi-ord" will cause a -DECnet address which matches to be added to the interface (which you can -verify with iproute2). - -The default device for routing can be set through the /proc filesystem -by setting /proc/sys/net/decnet/default_device to the -device you want DECnet to route packets out of when no specific route -is available. Usually this will be eth0, for example: - - echo -n "eth0" >/proc/sys/net/decnet/default_device - -If you don't set the default device, then it will default to the first -ethernet card which has been autoconfigured as described above. You can -confirm that by looking in the default_device file of course. - -There is a list of what the other files under /proc/sys/net/decnet/ do -on the kernel patch web site (shown above). - -4) Run time kernel configuration - -This is either done through the sysctl/proc interface (see the kernel web -pages for details on what the various options do) or through the iproute2 -package in the same way as IPv4/6 configuration is performed. - -Documentation for iproute2 is included with the package, although there is -as yet no specific section on DECnet, most of the features apply to both -IP and DECnet, albeit with DECnet addresses instead of IP addresses and -a reduced functionality. - -If you want to configure a DECnet router you'll need the iproute2 package -since its the _only_ way to add and delete routes currently. Eventually -there will be a routing daemon to send and receive routing messages for -each interface and update the kernel routing tables accordingly. The -routing daemon will use netfilter to listen to routing packets, and -rtnetlink to update the kernels routing tables. - -The DECnet raw socket layer has been removed since it was there purely -for use by the routing daemon which will now use netfilter (a much cleaner -and more generic solution) instead. - -5) How can I tell if its working ? - -Here is a quick guide of what to look for in order to know if your DECnet -kernel subsystem is working. - - - Is the node address set (see /proc/sys/net/decnet/node_address) - - Is the node of the correct type - (see /proc/sys/net/decnet/conf//forwarding) - - Is the Ethernet MAC address of each Ethernet card set to match - the DECnet address. If in doubt use the dn2ethaddr utility available - at the ftp archive. - - If the previous two steps are satisfied, and the Ethernet card is up, - you should find that it is listed in /proc/net/decnet_dev and also - that it appears as a directory in /proc/sys/net/decnet/conf/. The - loopback device (lo) should also appear and is required to communicate - within a node. - - If you have any DECnet routers on your network, they should appear - in /proc/net/decnet_neigh, otherwise this file will only contain the - entry for the node itself (if it doesn't check to see if lo is up). - - If you want to send to any node which is not listed in the - /proc/net/decnet_neigh file, you'll need to set the default device - to point to an Ethernet card with connection to a router. This is - again done with the /proc/sys/net/decnet/default_device file. - - Try starting a simple server and client, like the dnping/dnmirror - over the loopback interface. With luck they should communicate. - For this step and those after, you'll need the DECnet library - which can be obtained from the above ftp sites as well as the - actual utilities themselves. - - If this seems to work, then try talking to a node on your local - network, and see if you can obtain the same results. - - At this point you are on your own... :-) - -6) How to send a bug report - -If you've found a bug and want to report it, then there are several things -you can do to help me work out exactly what it is that is wrong. Useful -information (_most_ of which _is_ _essential_) includes: - - - What kernel version are you running ? - - What version of the patch are you running ? - - How far though the above set of tests can you get ? - - What is in the /proc/decnet* files and /proc/sys/net/decnet/* files ? - - Which services are you running ? - - Which client caused the problem ? - - How much data was being transferred ? - - Was the network congested ? - - How can the problem be reproduced ? - - Can you use tcpdump to get a trace ? (N.B. Most (all?) versions of - tcpdump don't understand how to dump DECnet properly, so including - the hex listing of the packet contents is _essential_, usually the -x flag. - You may also need to increase the length grabbed with the -s flag. The - -e flag also provides very useful information (ethernet MAC addresses)) - -7) MAC FAQ - -A quick FAQ on ethernet MAC addresses to explain how Linux and DECnet -interact and how to get the best performance from your hardware. - -Ethernet cards are designed to normally only pass received network frames -to a host computer when they are addressed to it, or to the broadcast address. - -Linux has an interface which allows the setting of extra addresses for -an ethernet card to listen to. If the ethernet card supports it, the -filtering operation will be done in hardware, if not the extra unwanted packets -received will be discarded by the host computer. In the latter case, -significant processor time and bus bandwidth can be used up on a busy -network (see the NAPI documentation for a longer explanation of these -effects). - -DECnet makes use of this interface to allow running DECnet on an ethernet -card which has already been configured using TCP/IP (presumably using the -built in MAC address of the card, as usual) and/or to allow multiple DECnet -addresses on each physical interface. If you do this, be aware that if your -ethernet card doesn't support perfect hashing in its MAC address filter -then your computer will be doing more work than required. Some cards -will simply set themselves into promiscuous mode in order to receive -packets from the DECnet specified addresses. So if you have one of these -cards its better to set the MAC address of the card as described above -to gain the best efficiency. Better still is to use a card which supports -NAPI as well. - - -8) Mailing list - -If you are keen to get involved in development, or want to ask questions -about configuration, or even just report bugs, then there is a mailing -list that you can join, details are at: - -http://sourceforge.net/mail/?group_id=4993 - -9) Legal Info - -The Linux DECnet project team have placed their code under the GPL. The -software is provided "as is" and without warranty express or implied. -DECnet is a trademark of Compaq. This software is not a product of -Compaq. We acknowledge the help of people at Compaq in providing extra -documentation above and beyond what was previously publicly available. - -Steve Whitehouse - diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst index fcd710f2cc7ade7f89ce81b138ff531bb54a0a11..3786f3b7274c595109e9181bb08375c5215ec3c8 100644 --- a/Documentation/networking/index.rst +++ b/Documentation/networking/index.rst @@ -14,6 +14,7 @@ Contents: dpaa2/index e100 e1000 + j1939 kapi z8530book msg_zerocopy diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 960de8fe3f401c7ce4ceee0d5d3d61cb46102319..563558de360e788400488961d42ca57de2fb203d 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -96,6 +96,9 @@ route/max_size - INTEGER From linux kernel 3.6 onwards, this is deprecated for ipv4 as route cache is no longer used. + From linux kernel 6.3 onwards, this is deprecated for ipv6 + as garbage collection manages cached route entries. + neigh/default/gc_thresh1 - INTEGER Minimum number of entries to keep. Garbage collector will not purge entries if there are fewer than this number. @@ -228,6 +231,9 @@ tcp_allowed_congestion_control - STRING tcp_app_win - INTEGER Reserve max(window/2^tcp_app_win, mss) of window for application buffer. Value 0 is special, it means that nothing is reserved. + + Possible values are [0, 31], inclusive. + Default: 31 tcp_autocorking - BOOLEAN @@ -250,6 +256,14 @@ tcp_base_mss - INTEGER Path MTU discovery (MTU probing). If MTU probing is enabled, this is the initial MSS used by the connection. +tcp_min_snd_mss - INTEGER + TCP SYN and SYNACK messages usually advertise an ADVMSS option, + as described in RFC 1122 and RFC 6691. + If this ADVMSS option is smaller than tcp_min_snd_mss, + it is silently capped to tcp_min_snd_mss. + + Default : 48 (at least 8 bytes of payload per segment) + tcp_congestion_control - STRING Set the congestion control algorithm to be used for new connections. The algorithm "reno" is always available, but @@ -359,6 +373,7 @@ tcp_l3mdev_accept - BOOLEAN derived from the listen socket to be bound to the L3 domain in which the packets originated. Only valid when the kernel was compiled with CONFIG_NET_L3_MASTER_DEV. + Default: 0 (disabled) tcp_low_latency - BOOLEAN This is a legacy option, it has no effect anymore. @@ -410,6 +425,7 @@ tcp_min_rtt_wlen - INTEGER minimum RTT when it is moved to a longer path (e.g., due to traffic engineering). A longer window makes the filter more resistant to RTT inflations such as transient congestion. The unit is seconds. + Possible values: 0 - 86400 (1 day) Default: 300 tcp_moderate_rcvbuf - BOOLEAN @@ -762,6 +778,7 @@ udp_l3mdev_accept - BOOLEAN being received regardless of the L3 domain in which they originated. Only valid when the kernel was compiled with CONFIG_NET_L3_MASTER_DEV. + Default: 0 (disabled) udp_mem - vector of 3 INTEGERs: min, pressure, max Number of pages allowed for queueing by all UDP sockets. @@ -788,6 +805,16 @@ udp_wmem_min - INTEGER total pages of UDP sockets exceed udp_mem pressure. The unit is byte. Default: 4K +RAW variables: + +raw_l3mdev_accept - BOOLEAN + Enabling this option allows a "global" bound socket to work + across L3 master domains (e.g., VRFs) with packets capable of + being received regardless of the L3 domain in which they + originated. Only valid when the kernel was compiled with + CONFIG_NET_L3_MASTER_DEV. + Default: 1 (enabled) + CIPSOv4 Variables: cipso_cache_enable - BOOLEAN @@ -878,7 +905,7 @@ ip_nonlocal_bind - BOOLEAN which can be quite useful - but may break some applications. Default: 0 -ip_dynaddr - BOOLEAN +ip_dynaddr - INTEGER If set non-zero, enables support for dynamic addresses. If set to a non-zero value larger than 1, a kernel log message will be printed when dynamic address rewriting @@ -925,12 +952,14 @@ icmp_ratelimit - INTEGER icmp_msgs_per_sec - INTEGER Limit maximal number of ICMP packets sent per second from this host. Only messages whose type matches icmp_ratemask (see below) are - controlled by this limit. + controlled by this limit. For security reasons, the precise count + of messages per second is randomized. Default: 1000 icmp_msgs_burst - INTEGER icmp_msgs_per_sec controls number of ICMP packets sent per second, while icmp_msgs_burst controls the burst size of these packets. + For security reasons, the precise burst size is randomized. Default: 50 icmp_ratemask - INTEGER @@ -2182,3 +2211,12 @@ max_dgram_qlen - INTEGER Default: 10 +/proc/sys/net/bonding/* Variables: + +broadcast_arp_or_nd - INTEGER + Control broadcasting ARP or ND messages to all slaves + + 0: Not broadcasting + 1: Broadcasting + + Default: 0 diff --git a/Documentation/networking/j1939.rst b/Documentation/networking/j1939.rst new file mode 100644 index 0000000000000000000000000000000000000000..659e938977bf4281c1ff903ec4680b9ab1b1101d --- /dev/null +++ b/Documentation/networking/j1939.rst @@ -0,0 +1,422 @@ +.. SPDX-License-Identifier: (GPL-2.0 OR MIT) + +=================== +J1939 Documentation +=================== + +Overview / What Is J1939 +======================== + +SAE J1939 defines a higher layer protocol on CAN. It implements a more +sophisticated addressing scheme and extends the maximum packet size above 8 +bytes. Several derived specifications exist, which differ from the original +J1939 on the application level, like MilCAN A, NMEA2000 and especially +ISO-11783 (ISOBUS). This last one specifies the so-called ETP (Extended +Transport Protocol) which is has been included in this implementation. This +results in a maximum packet size of ((2 ^ 24) - 1) * 7 bytes == 111 MiB. + +Specifications used +------------------- + +* SAE J1939-21 : data link layer +* SAE J1939-81 : network management +* ISO 11783-6 : Virtual Terminal (Extended Transport Protocol) + +.. _j1939-motivation: + +Motivation +========== + +Given the fact there's something like SocketCAN with an API similar to BSD +sockets, we found some reasons to justify a kernel implementation for the +addressing and transport methods used by J1939. + +* **Addressing:** when a process on an ECU communicates via J1939, it should + not necessarily know its source address. Although at least one process per + ECU should know the source address. Other processes should be able to reuse + that address. This way, address parameters for different processes + cooperating for the same ECU, are not duplicated. This way of working is + closely related to the UNIX concept where programs do just one thing, and do + it well. + +* **Dynamic addressing:** Address Claiming in J1939 is time critical. + Furthermore data transport should be handled properly during the address + negotiation. Putting this functionality in the kernel eliminates it as a + requirement for _every_ user space process that communicates via J1939. This + results in a consistent J1939 bus with proper addressing. + +* **Transport:** both TP & ETP reuse some PGNs to relay big packets over them. + Different processes may thus use the same TP & ETP PGNs without actually + knowing it. The individual TP & ETP sessions _must_ be serialized + (synchronized) between different processes. The kernel solves this problem + properly and eliminates the serialization (synchronization) as a requirement + for _every_ user space process that communicates via J1939. + +J1939 defines some other features (relaying, gateway, fast packet transport, +...). In-kernel code for these would not contribute to protocol stability. +Therefore, these parts are left to user space. + +The J1939 sockets operate on CAN network devices (see SocketCAN). Any J1939 +user space library operating on CAN raw sockets will still operate properly. +Since such library does not communicate with the in-kernel implementation, care +must be taken that these two do not interfere. In practice, this means they +cannot share ECU addresses. A single ECU (or virtual ECU) address is used by +the library exclusively, or by the in-kernel system exclusively. + +J1939 concepts +============== + +PGN +--- + +The PGN (Parameter Group Number) is a number to identify a packet. The PGN +is composed as follows: +1 bit : Reserved Bit +1 bit : Data Page +8 bits : PF (PDU Format) +8 bits : PS (PDU Specific) + +In J1939-21 distinction is made between PDU1 format (where PF < 240) and PDU2 +format (where PF >= 240). Furthermore, when using PDU2 format, the PS-field +contains a so-called Group Extension, which is part of the PGN. When using PDU2 +format, the Group Extension is set in the PS-field. + +On the other hand, when using PDU1 format, the PS-field contains a so-called +Destination Address, which is _not_ part of the PGN. When communicating a PGN +from user space to kernel (or visa versa) and PDU2 format is used, the PS-field +of the PGN shall be set to zero. The Destination Address shall be set +elsewhere. + +Regarding PGN mapping to 29-bit CAN identifier, the Destination Address shall +be get/set from/to the appropriate bits of the identifier by the kernel. + + +Addressing +---------- + +Both static and dynamic addressing methods can be used. + +For static addresses, no extra checks are made by the kernel, and provided +addresses are considered right. This responsibility is for the OEM or system +integrator. + +For dynamic addressing, so-called Address Claiming, extra support is foreseen +in the kernel. In J1939 any ECU is known by it's 64-bit NAME. At the moment of +a successful address claim, the kernel keeps track of both NAME and source +address being claimed. This serves as a base for filter schemes. By default, +packets with a destination that is not locally, will be rejected. + +Mixed mode packets (from a static to a dynamic address or vice versa) are +allowed. The BSD sockets define separate API calls for getting/setting the +local & remote address and are applicable for J1939 sockets. + +Filtering +--------- + +J1939 defines white list filters per socket that a user can set in order to +receive a subset of the J1939 traffic. Filtering can be based on: + +* SA +* SOURCE_NAME +* PGN + +When multiple filters are in place for a single socket, and a packet comes in +that matches several of those filters, the packet is only received once for +that socket. + +How to Use J1939 +================ + +API Calls +--------- + +On CAN, you first need to open a socket for communicating over a CAN network. +To use J1939, #include . From there, will be +included too. To open a socket, use: + +.. code-block:: C + + s = socket(PF_CAN, SOCK_DGRAM, CAN_J1939); + +J1939 does use SOCK_DGRAM sockets. In the J1939 specification, connections are +mentioned in the context of transport protocol sessions. These still deliver +packets to the other end (using several CAN packets). SOCK_STREAM is not +supported. + +After the successful creation of the socket, you would normally use the bind(2) +and/or connect(2) system call to bind the socket to a CAN interface. After +binding and/or connecting the socket, you can read(2) and write(2) from/to the +socket or use send(2), sendto(2), sendmsg(2) and the recv*() counterpart +operations on the socket as usual. There are also J1939 specific socket options +described below. + +In order to send data, a bind(2) must have been successful. bind(2) assigns a +local address to a socket. + +Different from CAN is that the payload data is just the data that get send, +without it's header info. The header info is derived from the sockaddr supplied +to bind(2), connect(2), sendto(2) and recvfrom(2). A write(2) with size 4 will +result in a packet with 4 bytes. + +The sockaddr structure has extensions for use with J1939 as specified below: + +.. code-block:: C + + struct sockaddr_can { + sa_family_t can_family; + int can_ifindex; + union { + struct { + __u64 name; + /* pgn: + * 8 bit: PS in PDU2 case, else 0 + * 8 bit: PF + * 1 bit: DP + * 1 bit: reserved + */ + __u32 pgn; + __u8 addr; + } j1939; + } can_addr; + } + +can_family & can_ifindex serve the same purpose as for other SocketCAN sockets. + +can_addr.j1939.pgn specifies the PGN (max 0x3ffff). Individual bits are +specified above. + +can_addr.j1939.name contains the 64-bit J1939 NAME. + +can_addr.j1939.addr contains the address. + +The bind(2) system call assigns the local address, i.e. the source address when +sending packages. If a PGN during bind(2) is set, it's used as a RX filter. +I.e. only packets with a matching PGN are received. If an ADDR or NAME is set +it is used as a receive filter, too. It will match the destination NAME or ADDR +of the incoming packet. The NAME filter will work only if appropriate Address +Claiming for this name was done on the CAN bus and registered/cached by the +kernel. + +On the other hand connect(2) assigns the remote address, i.e. the destination +address. The PGN from connect(2) is used as the default PGN when sending +packets. If ADDR or NAME is set it will be used as the default destination ADDR +or NAME. Further a set ADDR or NAME during connect(2) is used as a receive +filter. It will match the source NAME or ADDR of the incoming packet. + +Both write(2) and send(2) will send a packet with local address from bind(2) and +the remote address from connect(2). Use sendto(2) to overwrite the destination +address. + +If can_addr.j1939.name is set (!= 0) the NAME is looked up by the kernel and +the corresponding ADDR is used. If can_addr.j1939.name is not set (== 0), +can_addr.j1939.addr is used. + +When creating a socket, reasonable defaults are set. Some options can be +modified with setsockopt(2) & getsockopt(2). + +RX path related options: + +- SO_J1939_FILTER - configure array of filters +- SO_J1939_PROMISC - disable filters set by bind(2) and connect(2) + +By default no broadcast packets can be send or received. To enable sending or +receiving broadcast packets use the socket option SO_BROADCAST: + +.. code-block:: C + + int value = 1; + setsockopt(sock, SOL_SOCKET, SO_BROADCAST, &value, sizeof(value)); + +The following diagram illustrates the RX path: + +.. code:: + + +--------------------+ + | incoming packet | + +--------------------+ + | + V + +--------------------+ + | SO_J1939_PROMISC? | + +--------------------+ + | | + no | | yes + | | + .---------' `---------. + | | + +---------------------------+ | + | bind() + connect() + | | + | SOCK_BROADCAST filter | | + +---------------------------+ | + | | + |<---------------------' + V + +---------------------------+ + | SO_J1939_FILTER | + +---------------------------+ + | + V + +---------------------------+ + | socket recv() | + +---------------------------+ + +TX path related options: +SO_J1939_SEND_PRIO - change default send priority for the socket + +Message Flags during send() and Related System Calls +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +send(2), sendto(2) and sendmsg(2) take a 'flags' argument. Currently +supported flags are: + +* MSG_DONTWAIT, i.e. non-blocking operation. + +recvmsg(2) +^^^^^^^^^ + +In most cases recvmsg(2) is needed if you want to extract more information than +recvfrom(2) can provide. For example package priority and timestamp. The +Destination Address, name and packet priority (if applicable) are attached to +the msghdr in the recvmsg(2) call. They can be extracted using cmsg(3) macros, +with cmsg_level == SOL_J1939 && cmsg_type == SCM_J1939_DEST_ADDR, +SCM_J1939_DEST_NAME or SCM_J1939_PRIO. The returned data is a uint8_t for +priority and dst_addr, and uint64_t for dst_name. + +.. code-block:: C + + uint8_t priority, dst_addr; + uint64_t dst_name; + + for (cmsg = CMSG_FIRSTHDR(&msg); cmsg; cmsg = CMSG_NXTHDR(&msg, cmsg)) { + switch (cmsg->cmsg_level) { + case SOL_CAN_J1939: + if (cmsg->cmsg_type == SCM_J1939_DEST_ADDR) + dst_addr = *CMSG_DATA(cmsg); + else if (cmsg->cmsg_type == SCM_J1939_DEST_NAME) + memcpy(&dst_name, CMSG_DATA(cmsg), cmsg->cmsg_len - CMSG_LEN(0)); + else if (cmsg->cmsg_type == SCM_J1939_PRIO) + priority = *CMSG_DATA(cmsg); + break; + } + } + +Dynamic Addressing +------------------ + +Distinction has to be made between using the claimed address and doing an +address claim. To use an already claimed address, one has to fill in the +j1939.name member and provide it to bind(2). If the name had claimed an address +earlier, all further messages being sent will use that address. And the +j1939.addr member will be ignored. + +An exception on this is PGN 0x0ee00. This is the "Address Claim/Cannot Claim +Address" message and the kernel will use the j1939.addr member for that PGN if +necessary. + +To claim an address following code example can be used: + +.. code-block:: C + + struct sockaddr_can baddr = { + .can_family = AF_CAN, + .can_addr.j1939 = { + .name = name, + .addr = J1939_IDLE_ADDR, + .pgn = J1939_NO_PGN, /* to disable bind() rx filter for PGN */ + }, + .can_ifindex = if_nametoindex("can0"), + }; + + bind(sock, (struct sockaddr *)&baddr, sizeof(baddr)); + + /* for Address Claiming broadcast must be allowed */ + int value = 1; + setsockopt(sock, SOL_SOCKET, SO_BROADCAST, &value, sizeof(value)); + + /* configured advanced RX filter with PGN needed for Address Claiming */ + const struct j1939_filter filt[] = { + { + .pgn = J1939_PGN_ADDRESS_CLAIMED, + .pgn_mask = J1939_PGN_PDU1_MAX, + }, { + .pgn = J1939_PGN_REQUEST, + .pgn_mask = J1939_PGN_PDU1_MAX, + }, { + .pgn = J1939_PGN_ADDRESS_COMMANDED, + .pgn_mask = J1939_PGN_MAX, + }, + }; + + setsockopt(sock, SOL_CAN_J1939, SO_J1939_FILTER, &filt, sizeof(filt)); + + uint64_t dat = htole64(name); + const struct sockaddr_can saddr = { + .can_family = AF_CAN, + .can_addr.j1939 = { + .pgn = J1939_PGN_ADDRESS_CLAIMED, + .addr = J1939_NO_ADDR, + }, + }; + + /* Afterwards do a sendto(2) with data set to the NAME (Little Endian). If the + * NAME provided, does not match the j1939.name provided to bind(2), EPROTO + * will be returned. + */ + sendto(sock, dat, sizeof(dat), 0, (const struct sockaddr *)&saddr, sizeof(saddr)); + +If no-one else contests the address claim within 250ms after transmission, the +kernel marks the NAME-SA assignment as valid. The valid assignment will be kept +among other valid NAME-SA assignments. From that point, any socket bound to the +NAME can send packets. + +If another ECU claims the address, the kernel will mark the NAME-SA expired. +No socket bound to the NAME can send packets (other than address claims). To +claim another address, some socket bound to NAME, must bind(2) again, but with +only j1939.addr changed to the new SA, and must then send a valid address claim +packet. This restarts the state machine in the kernel (and any other +participant on the bus) for this NAME. + +can-utils also include the jacd tool, so it can be used as code example or as +default Address Claiming daemon. + +Send Examples +------------- + +Static Addressing +^^^^^^^^^^^^^^^^^ + +This example will send a PGN (0x12300) from SA 0x20 to DA 0x30. + +Bind: + +.. code-block:: C + + struct sockaddr_can baddr = { + .can_family = AF_CAN, + .can_addr.j1939 = { + .name = J1939_NO_NAME, + .addr = 0x20, + .pgn = J1939_NO_PGN, + }, + .can_ifindex = if_nametoindex("can0"), + }; + + bind(sock, (struct sockaddr *)&baddr, sizeof(baddr)); + +Now, the socket 'sock' is bound to the SA 0x20. Since no connect(2) was called, +at this point we can use only sendto(2) or sendmsg(2). + +Send: + +.. code-block:: C + + const struct sockaddr_can saddr = { + .can_family = AF_CAN, + .can_addr.j1939 = { + .name = J1939_NO_NAME; + .pgn = 0x30, + .addr = 0x12300, + }, + }; + + sendto(sock, dat, sizeof(dat), 0, (const struct sockaddr *)&saddr, sizeof(saddr)); diff --git a/Documentation/networking/nf_flowtable.txt b/Documentation/networking/nf_flowtable.txt index 54128c50d508ef27e5c6f2026fc5dddd0df47ead..b01c91893481fd44e42835ced658773778ac11ed 100644 --- a/Documentation/networking/nf_flowtable.txt +++ b/Documentation/networking/nf_flowtable.txt @@ -76,7 +76,7 @@ flowtable and add one rule to your forward chain. table inet x { flowtable f { - hook ingress priority 0 devices = { eth0, eth1 }; + hook ingress priority 0; devices = { eth0, eth1 }; } chain y { type filter hook forward priority 0; policy accept; diff --git a/Documentation/networking/sssnic.rst b/Documentation/networking/sssnic.rst new file mode 100644 index 0000000000000000000000000000000000000000..624baebea4700eabd59538743df799162425c972 --- /dev/null +++ b/Documentation/networking/sssnic.rst @@ -0,0 +1,75 @@ +.. SPDX-License-Identifier: GPL-2.0 + +==================================================== +Linux Kernel Driver for 3SNIC Intelligent NIC family +==================================================== + +Contents +======== + +- `Overview`_ +- `Supported PCI vendor ID/device IDs`_ +- `Supported features`_ +- `Product specification`_ +- `Support`_ + +Overview: +========= +SSSNIC is a network interface card that can meet the demand of a range +of application scenarios,such as the Data Center Area,cloud computing +and Financial industry,etc. + +The construction of SSSNIC card facilities mainly depends on servers and +switches. 3S910, 920 and 930 are PCIe standard cards adapted to servers, +which provide extended external business interfaces for servers. + +The driver supports a range of link-speed devices (100GE (40GE +compatible) and 25GE (10GE compatible)).A negotiated and extendable +feature set also supported. + +Supported PCI vendor ID/device IDs: +=================================== + +1f3f:9020 - SSSNIC PF + +Supported features: +=================== + +1. Support single-root I/O virtualization (SR-IOV) +2. Support virtual machine multi queue (VMMQ) +3. Support receive side scaling (RSS) +4. Support physical function (PF) passthrough VMs +5. Support the PF promiscuous mode,unicast or multicast MAC filtering, and +all multicast mode +6. Support IPv4/IPv6, checksum offload,TCP Segmentation Offload (TSO), and +Large Receive Offload (LRO) +7. Support in-band one-click logs collection +8. Support loopback tests +9. Support port location indicators +10. Support firmware update and active +11. Support online log collection +12. Network card information query: Gemini information, version, +Optical module information, tx/rx queue information, +Serdes information, bond information, etc +13. Support Relevant parameter settings: tx speed limit, +port rate, port adaptive mode, setting lro aggregation +time and number, Factory reset, etc + +Product specification +===================== + + =================== ======= ============================= =============================================== + PCI ID (pci.ids) OEM Product PCIe port + =================== ======= ============================= =============================================== + 1F3F:9020 3SNIC 3S910(2 x 25GE SFP28 ports) PCIe Gen3 x8(compatible with Gen2/ Gen1) + 1F3F:9020 3SNIC 3S920(4 x 25GE SFP28 ports) PCIe Gen4 x16, compatible with Gen3/ Gen2/ Gen1 + 1F3F:9020 3SNIC 3S930(2 x 100GE QSFP28 ports) PCIe Gen4 x16, compatible with Gen3/ Gen2/ Gen1 + =================== ======= ============================= =============================================== + + +Support +======= + +If an issue is identified with the released source code on the supported kernel +with a supported adapter, email the specific information related to the issue to +https://www.3snic.com. diff --git a/Documentation/networking/vrf.txt b/Documentation/networking/vrf.txt index 8ff7b4c8f91bc45da4f4c50792e0dc5587b62875..a5f103b083a0a02f6e33f79c3fe7f90da24aa589 100644 --- a/Documentation/networking/vrf.txt +++ b/Documentation/networking/vrf.txt @@ -103,19 +103,33 @@ VRF device: or to specify the output device using cmsg and IP_PKTINFO. +By default the scope of the port bindings for unbound sockets is +limited to the default VRF. That is, it will not be matched by packets +arriving on interfaces enslaved to an l3mdev and processes may bind to +the same port if they bind to an l3mdev. + TCP & UDP services running in the default VRF context (ie., not bound to any VRF device) can work across all VRF domains by enabling the tcp_l3mdev_accept and udp_l3mdev_accept sysctl options: + sysctl -w net.ipv4.tcp_l3mdev_accept=1 sysctl -w net.ipv4.udp_l3mdev_accept=1 +These options are disabled by default so that a socket in a VRF is only +selected for packets in that VRF. There is a similar option for RAW +sockets, which is enabled by default for reasons of backwards compatibility. +This is so as to specify the output device with cmsg and IP_PKTINFO, but +using a socket not bound to the corresponding VRF. This allows e.g. older ping +implementations to be run with specifying the device but without executing it +in the VRF. This option can be disabled so that packets received in a VRF +context are only handled by a raw socket bound to the VRF, and packets in the +default VRF are only handled by a socket not bound to any VRF: + + sysctl -w net.ipv4.raw_l3mdev_accept=0 + netfilter rules on the VRF device can be used to limit access to services running in the default VRF context as well. -The default VRF does not have limited scope with respect to port bindings. -That is, if a process does a wildcard bind to a port in the default VRF it -owns the port across all VRF domains within the network namespace. - ################################################################################ Using iproute2 for VRFs diff --git a/Documentation/process/magic-number.rst b/Documentation/process/magic-number.rst index 633be1043690dde2bbd45a09387843cdb5e13b84..2ba1f0809a1cefd5dfab03a667310b3558184ee8 100644 --- a/Documentation/process/magic-number.rst +++ b/Documentation/process/magic-number.rst @@ -89,7 +89,6 @@ AX25_MAGIC 0x5316 ax_disp ``drivers/net/mk TTY_MAGIC 0x5401 tty_struct ``include/linux/tty.h`` MGSL_MAGIC 0x5401 mgsl_info ``drivers/char/synclink.c`` TTY_DRIVER_MAGIC 0x5402 tty_driver ``include/linux/tty_driver.h`` -MGSLPC_MAGIC 0x5402 mgslpc_info ``drivers/char/pcmcia/synclink_cs.c`` TTY_LDISC_MAGIC 0x5403 tty_ldisc ``include/linux/tty_ldisc.h`` USB_SERIAL_MAGIC 0x6702 usb_serial ``drivers/usb/serial/usb-serial.h`` FULL_DUPLEX_MAGIC 0x6969 ``drivers/net/ethernet/dec/tulip/de2104x.c`` diff --git a/Documentation/process/stable-kernel-rules.rst b/Documentation/process/stable-kernel-rules.rst index 0de6f6145cc6deb10f862b139c0d2ae0add29199..7ba8cd567f843cc7126d6f9eab9b56c67bb82078 100644 --- a/Documentation/process/stable-kernel-rules.rst +++ b/Documentation/process/stable-kernel-rules.rst @@ -38,6 +38,9 @@ Procedure for submitting patches to the -stable tree - If the patch covers files in net/ or drivers/net please follow netdev stable submission guidelines as described in :ref:`Documentation/networking/netdev-FAQ.rst ` + after first checking the stable networking queue at + https://patchwork.ozlabs.org/bundle/davem/stable/?series=&submitter=&state=*&q=&archive= + to ensure the requested patch is not already queued up. - Security patches should not be handled (solely) by the -stable review process but should follow the procedures in :ref:`Documentation/admin-guide/security-bugs.rst `. diff --git a/Documentation/robust-futexes.txt b/Documentation/robust-futexes.txt index 6c42c75103eb6db715cf8a03b49e85b38070e0ae..6361fb01c9c1e836abb3616de36f8ccf3bf86561 100644 --- a/Documentation/robust-futexes.txt +++ b/Documentation/robust-futexes.txt @@ -218,5 +218,4 @@ All other architectures should build just fine too - but they won't have the new syscalls yet. Architectures need to implement the new futex_atomic_cmpxchg_inatomic() -inline function before writing up the syscalls (that function returns --ENOSYS right now). +inline function before writing up the syscalls. diff --git a/Documentation/scheduler/sched-bwc.txt b/Documentation/scheduler/sched-bwc.txt index f6b1873f68abc695d0e10ddac6e94c4b4042bda8..de583fbbfe4238e782352d0534a3030fe459a9a8 100644 --- a/Documentation/scheduler/sched-bwc.txt +++ b/Documentation/scheduler/sched-bwc.txt @@ -90,6 +90,51 @@ There are two ways in which a group may become throttled: In case b) above, even though the child may have runtime remaining it will not be allowed to until the parent's runtime is refreshed. +CFS Bandwidth Quota Caveats +--------------------------- +Once a slice is assigned to a cpu it does not expire. However all but 1ms of +the slice may be returned to the global pool if all threads on that cpu become +unrunnable. This is configured at compile time by the min_cfs_rq_runtime +variable. This is a performance tweak that helps prevent added contention on +the global lock. + +The fact that cpu-local slices do not expire results in some interesting corner +cases that should be understood. + +For cgroup cpu constrained applications that are cpu limited this is a +relatively moot point because they will naturally consume the entirety of their +quota as well as the entirety of each cpu-local slice in each period. As a +result it is expected that nr_periods roughly equal nr_throttled, and that +cpuacct.usage will increase roughly equal to cfs_quota_us in each period. + +For highly-threaded, non-cpu bound applications this non-expiration nuance +allows applications to briefly burst past their quota limits by the amount of +unused slice on each cpu that the task group is running on (typically at most +1ms per cpu or as defined by min_cfs_rq_runtime). This slight burst only +applies if quota had been assigned to a cpu and then not fully used or returned +in previous periods. This burst amount will not be transferred between cores. +As a result, this mechanism still strictly limits the task group to quota +average usage, albeit over a longer time window than a single period. This +also limits the burst ability to no more than 1ms per cpu. This provides +better more predictable user experience for highly threaded applications with +small quota limits on high core count machines. It also eliminates the +propensity to throttle these applications while simultanously using less than +quota amounts of cpu. Another way to say this, is that by allowing the unused +portion of a slice to remain valid across periods we have decreased the +possibility of wastefully expiring quota on cpu-local silos that don't need a +full slice's amount of cpu time. + +The interaction between cpu-bound and non-cpu-bound-interactive applications +should also be considered, especially when single core usage hits 100%. If you +gave each of these applications half of a cpu-core and they both got scheduled +on the same CPU it is theoretically possible that the non-cpu bound application +will use up to 1ms additional quota in some periods, thereby preventing the +cpu-bound application from fully using its quota by that same amount. In these +instances it will be up to the CFS algorithm (see sched-design-CFS.rst) to +decide which application is chosen to run, as they will both be runnable and +have remaining quota. This runtime discrepancy will be made up in the following +periods when the interactive application idles. + Examples -------- 1. Limit a group to 1 CPU worth of runtime. diff --git a/Documentation/scheduler/sched-pelt.c b/Documentation/scheduler/sched-pelt.c index e4219139386ae805575a7c1cae5dc83f5b3e52ca..7238b355919c757cc7b8bc38b905470b1a69a805 100644 --- a/Documentation/scheduler/sched-pelt.c +++ b/Documentation/scheduler/sched-pelt.c @@ -20,7 +20,8 @@ void calc_runnable_avg_yN_inv(void) int i; unsigned int x; - printf("static const u32 runnable_avg_yN_inv[] = {"); + /* To silence -Wunused-but-set-variable warnings. */ + printf("static const u32 runnable_avg_yN_inv[] __maybe_unused = {"); for (i = 0; i < HALFLIFE; i++) { x = ((1UL<<32)-1)*pow(y, i); diff --git a/Documentation/scsi/hisi_raid.rst b/Documentation/scsi/hisi_raid.rst new file mode 100644 index 0000000000000000000000000000000000000000..523a6763a7fda0e871ccd12104b3fe3921d0e072 --- /dev/null +++ b/Documentation/scsi/hisi_raid.rst @@ -0,0 +1,84 @@ +.. SPDX-License-Identifier: GPL-2.0 + +============================================== +hisi_raid - HUAWEI SCSI RAID Controller driver +============================================== + +This file describes the hisi_raid SCSI driver for HUAWEI +RAID controllers. The hisi_raid driver is the first +generation RAID driver. + +For hisi_raid controller support, enable the hisi_raid driver +when configuring the kernel. + +hisi_raid specific entries in /sys +================================= + +hisi_raid host attributes +------------------------ + - /sys/class/scsi_host/host*/csts_pp + - /sys/class/scsi_host/host*/csts_shst + - /sys/class/scsi_host/host*/csts_cfs + - /sys/class/scsi_host/host*/csts_rdy + - /sys/class/scsi_host/host*/fw_version + + The host csts_pp attribute is a read only attribute. This attribute + indicates whether the controller is processing commands. If this attribute + is set to ‘1’, then the controller is processing commands normally. If + this attribute is cleared to ‘0’, then the controller has temporarily stopped + processing commands in order to handle an event (e.g., firmware activation). + + The host csts_shst attribute is a read only attribute. This attribute + indicates status of shutdown processing.The shutdown status values are defined + as: + ====== ============================== + Value Definition + ====== ============================== + 00b Normal operation + 01b Shutdown processing occurring + 10b Shutdown processing complete + 11b Reserved + ====== ============================== + The host csts_cfs attribute is a read only attribute. This attribute is set to + ’1’ when a fatal controller error occurred that could not be communicated in the + appropriate Completion Queue. This bit is cleared to ‘0’ when a fatal controller + error has not occurred. + + The host csts_rdy attribute is a read only attribute. This attribute is set to + ‘1’ when the controller is ready to process submission queue entries. + + The fw_version attribute is read-only and will return the driver version and the + controller firmware version. + +hisi_raid scsi device attributes +------------------------------ + - /sys/class/scsi_device/X\:X\:X\:X/device/raid_level + - /sys/class/scsi_device/X\:X\:X\:X/device/raid_state + - /sys/class/scsi_device/X\:X\:X\:X/device/raid_resync + + The device raid_level attribute is a read only attribute. This attribute indicates + RAID level of scsi device(will dispaly "NA" if scsi device is not virtual disk type). + + The device raid_state attribute is read-only and indicates RAID status of scsi + device(will dispaly "NA" if scsi device is not virtual disk type). + + The device raid_resync attribute is read-only and indicates RAID rebuild processing + of scsi device(will dispaly "NA" if scsi device is not virtual disk type). + +Supported devices +================= + + =================== ======= ======================================= + PCI ID (pci.ids) OEM Product + =================== ======= ======================================= + 19E5:3858 HUAWEI SP186-M-8i(HBA:8Ports) + 19E5:3858 HUAWEI SP186-M-16i(HBA:16Ports) + 19E5:3858 HUAWEI SP186-M-32i(HBA:32Ports) + 19E5:3858 HUAWEI SP186-M-40i(HBA:40Ports) + 19E5:3758 HUAWEI SP686C-M-16i(RAID:16Ports,2G cache) + 19E5:3758 HUAWEI SP686C-M-16i(RAID:16Ports,4G cache) + 19E5:3758 HUAWEI SP686C-MH-32i(RAID:32Ports,4G cache) + 19E5:3758 HUAWEI SP686C-M-40i(RAID:40Ports,2G cache) + 19E5:3758 HUAWEI SP686C-M-40i(RAID:40Ports,4G cache) + =================== ======= ======================================= + diff --git a/Documentation/scsi/sssraid.rst b/Documentation/scsi/sssraid.rst new file mode 100644 index 0000000000000000000000000000000000000000..d56e5f7c478cd51acf314233b491f75b74948519 --- /dev/null +++ b/Documentation/scsi/sssraid.rst @@ -0,0 +1,83 @@ +.. SPDX-License-Identifier: GPL-2.0 + +============================================== +SSSRAID - 3SNIC SCSI RAID Controller driver +============================================== + +This file describes the SSSRAID SCSI driver for 3SNIC +(http://www.3snic.com) RAID controllers. The SSSRAID +driver is the first generation RAID driver for 3SNIC Corp. + +For 3SNIC SSSRAID controller support, enable the SSSRAID driver +when configuring the kernel. + +SSSRAID specific entries in /sys +================================= + +SSSRAID host attributes +------------------------ + - /sys/class/scsi_host/host*/csts_pp + - /sys/class/scsi_host/host*/csts_shst + - /sys/class/scsi_host/host*/csts_cfs + - /sys/class/scsi_host/host*/csts_rdy + - /sys/class/scsi_host/host*/fw_version + + The host csts_pp attribute is a read only attribute. This attribute + indicates whether the controller is processing commands. If this attribute + is set to ‘1’, then the controller is processing commands normally. If + this attribute is cleared to ‘0’, then the controller has temporarily stopped + processing commands in order to handle an event (e.g., firmware activation). + + The host csts_shst attribute is a read only attribute. This attribute + indicates status of shutdown processing.The shutdown status values are defined + as: + ====== ============================== + Value Definition + ====== ============================== + 00b Normal operation + 01b Shutdown processing occurring + 10b Shutdown processing complete + 11b Reserved + ====== ============================== + The host csts_cfs attribute is a read only attribute. This attribute is set to + ’1’ when a fatal controller error occurred that could not be communicated in the + appropriate Completion Queue. This bit is cleared to ‘0’ when a fatal controller + error has not occurred. + + The host csts_rdy attribute is a read only attribute. This attribute is set to + ‘1’ when the controller is ready to process submission queue entries. + + The fw_version attribute is read-only and will return the driver version and the + controller firmware version. + +SSSRAID scsi device attributes +------------------------------ + - /sys/class/scsi_device/X\:X\:X\:X/device/raid_level + - /sys/class/scsi_device/X\:X\:X\:X/device/raid_state + - /sys/class/scsi_device/X\:X\:X\:X/device/raid_resync + + The device raid_level attribute is a read only attribute. This attribute indicates + RAID level of scsi device(will dispaly "NA" if scsi device is not virtual disk type). + + The device raid_state attribute is read-only and indicates RAID status of scsi + device(will dispaly "NA" if scsi device is not virtual disk type). + + The device raid_resync attribute is read-only and indicates RAID rebuild processing + of scsi device(will dispaly "NA" if scsi device is not virtual disk type). + +Supported devices +================= + + =================== ======= ======================================= + PCI ID (pci.ids) OEM Product + =================== ======= ======================================= + 1F3F:2100 3SNIC 3S510(HBA:8Ports,1G DDR) + 1F3F:2100 3SNIC 3S520(HBA:16Ports,1G DDR) + 1F3F:2100 3SNIC 3S530(HBA:32Ports,1G DDR) + 1F3F:2100 3SNIC 3S540(HBA:40Ports,1G DDR) + 1F3F:2200 3SNIC 3S580(RAID:16Ports,2G cache) + 1F3F:2200 3SNIC 3S585(RAID:16Ports,4G cache) + 1F3F:2200 3SNIC 3S590(RAID:32Ports,4G cache) + 1F3F:2200 3SNIC 3S5A0(RAID:40Ports,2G cache) + 1F3F:2200 3SNIC 3S5A5(RAID:40Ports,4G cache) + =================== ======= ======================================= diff --git a/Documentation/sphinx/kerneldoc.py b/Documentation/sphinx/kerneldoc.py index 9d0a7f08f93bfc046b54e34a42a901b3ecc6f33e..1159405cb920cef29c2f6f0128180641d3df7bc3 100644 --- a/Documentation/sphinx/kerneldoc.py +++ b/Documentation/sphinx/kerneldoc.py @@ -37,7 +37,19 @@ import glob from docutils import nodes, statemachine from docutils.statemachine import ViewList from docutils.parsers.rst import directives, Directive -from sphinx.ext.autodoc import AutodocReporter + +# +# AutodocReporter is only good up to Sphinx 1.7 +# +import sphinx + +Use_SSI = sphinx.__version__[:3] >= '1.7' +if Use_SSI: + from sphinx.util.docutils import switch_source_input +else: + from sphinx.ext.autodoc import AutodocReporter + +import kernellog __version__ = '1.0' @@ -90,7 +102,8 @@ class KernelDocDirective(Directive): cmd += [filename] try: - env.app.verbose('calling kernel-doc \'%s\'' % (" ".join(cmd))) + kernellog.verbose(env.app, + 'calling kernel-doc \'%s\'' % (" ".join(cmd))) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() @@ -100,7 +113,8 @@ class KernelDocDirective(Directive): if p.returncode != 0: sys.stderr.write(err) - env.app.warn('kernel-doc \'%s\' failed with return code %d' % (" ".join(cmd), p.returncode)) + kernellog.warn(env.app, + 'kernel-doc \'%s\' failed with return code %d' % (" ".join(cmd), p.returncode)) return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))] elif env.config.kerneldoc_verbosity > 0: sys.stderr.write(err) @@ -121,20 +135,28 @@ class KernelDocDirective(Directive): lineoffset += 1 node = nodes.section() - buf = self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter + self.do_parse(result, node) + + return node.children + + except Exception as e: # pylint: disable=W0703 + kernellog.warn(env.app, 'kernel-doc \'%s\' processing failed with: %s' % + (" ".join(cmd), str(e))) + return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))] + + def do_parse(self, result, node): + if Use_SSI: + with switch_source_input(self.state, result): + self.state.nested_parse(result, 0, node, match_titles=1) + else: + save = self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter self.state.memo.reporter = AutodocReporter(result, self.state.memo.reporter) self.state.memo.title_styles, self.state.memo.section_level = [], 0 try: self.state.nested_parse(result, 0, node, match_titles=1) finally: - self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter = buf + self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter = save - return node.children - - except Exception as e: # pylint: disable=W0703 - env.app.warn('kernel-doc \'%s\' processing failed with: %s' % - (" ".join(cmd), str(e))) - return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))] def setup(app): app.add_config_value('kerneldoc_bin', None, 'env') diff --git a/Documentation/sphinx/kernellog.py b/Documentation/sphinx/kernellog.py new file mode 100644 index 0000000000000000000000000000000000000000..af924f51a7dcc5838c80a11c3ff4447e500936e2 --- /dev/null +++ b/Documentation/sphinx/kernellog.py @@ -0,0 +1,28 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Sphinx has deprecated its older logging interface, but the replacement +# only goes back to 1.6. So here's a wrapper layer to keep around for +# as long as we support 1.4. +# +import sphinx + +if sphinx.__version__[:3] >= '1.6': + UseLogging = True + from sphinx.util import logging + logger = logging.getLogger('kerneldoc') +else: + UseLogging = False + +def warn(app, message): + if UseLogging: + logger.warning(message) + else: + app.warn(message) + +def verbose(app, message): + if UseLogging: + logger.verbose(message) + else: + app.verbose(message) + + diff --git a/Documentation/sphinx/kfigure.py b/Documentation/sphinx/kfigure.py index b97228d2cc0eef84c156f06cc1515602ddf4a24d..fbfe6693bb60aa36157c60d33e6a2d0c1c630154 100644 --- a/Documentation/sphinx/kfigure.py +++ b/Documentation/sphinx/kfigure.py @@ -60,6 +60,8 @@ import sphinx from sphinx.util.nodes import clean_astext from six import iteritems +import kernellog + PY3 = sys.version_info[0] == 3 if PY3: @@ -171,20 +173,20 @@ def setupTools(app): This function is called once, when the builder is initiated. """ global dot_cmd, convert_cmd # pylint: disable=W0603 - app.verbose("kfigure: check installed tools ...") + kernellog.verbose(app, "kfigure: check installed tools ...") dot_cmd = which('dot') convert_cmd = which('convert') if dot_cmd: - app.verbose("use dot(1) from: " + dot_cmd) + kernellog.verbose(app, "use dot(1) from: " + dot_cmd) else: - app.warn("dot(1) not found, for better output quality install " - "graphviz from http://www.graphviz.org") + kernellog.warn(app, "dot(1) not found, for better output quality install " + "graphviz from http://www.graphviz.org") if convert_cmd: - app.verbose("use convert(1) from: " + convert_cmd) + kernellog.verbose(app, "use convert(1) from: " + convert_cmd) else: - app.warn( + kernellog.warn(app, "convert(1) not found, for SVG to PDF conversion install " "ImageMagick (https://www.imagemagick.org)") @@ -220,12 +222,13 @@ def convert_image(img_node, translator, src_fname=None): # in kernel builds, use 'make SPHINXOPTS=-v' to see verbose messages - app.verbose('assert best format for: ' + img_node['uri']) + kernellog.verbose(app, 'assert best format for: ' + img_node['uri']) if in_ext == '.dot': if not dot_cmd: - app.verbose("dot from graphviz not available / include DOT raw.") + kernellog.verbose(app, + "dot from graphviz not available / include DOT raw.") img_node.replace_self(file2literal(src_fname)) elif translator.builder.format == 'latex': @@ -252,7 +255,8 @@ def convert_image(img_node, translator, src_fname=None): if translator.builder.format == 'latex': if convert_cmd is None: - app.verbose("no SVG to PDF conversion available / include SVG raw.") + kernellog.verbose(app, + "no SVG to PDF conversion available / include SVG raw.") img_node.replace_self(file2literal(src_fname)) else: dst_fname = path.join(translator.builder.outdir, fname + '.pdf') @@ -265,18 +269,19 @@ def convert_image(img_node, translator, src_fname=None): _name = dst_fname[len(translator.builder.outdir) + 1:] if isNewer(dst_fname, src_fname): - app.verbose("convert: {out}/%s already exists and is newer" % _name) + kernellog.verbose(app, + "convert: {out}/%s already exists and is newer" % _name) else: ok = False mkdir(path.dirname(dst_fname)) if in_ext == '.dot': - app.verbose('convert DOT to: {out}/' + _name) + kernellog.verbose(app, 'convert DOT to: {out}/' + _name) ok = dot2format(app, src_fname, dst_fname) elif in_ext == '.svg': - app.verbose('convert SVG to: {out}/' + _name) + kernellog.verbose(app, 'convert SVG to: {out}/' + _name) ok = svg2pdf(app, src_fname, dst_fname) if not ok: @@ -305,7 +310,8 @@ def dot2format(app, dot_fname, out_fname): with open(out_fname, "w") as out: exit_code = subprocess.call(cmd, stdout = out) if exit_code != 0: - app.warn("Error #%d when calling: %s" % (exit_code, " ".join(cmd))) + kernellog.warn(app, + "Error #%d when calling: %s" % (exit_code, " ".join(cmd))) return bool(exit_code == 0) def svg2pdf(app, svg_fname, pdf_fname): @@ -322,7 +328,7 @@ def svg2pdf(app, svg_fname, pdf_fname): # use stdout and stderr from parent exit_code = subprocess.call(cmd) if exit_code != 0: - app.warn("Error #%d when calling: %s" % (exit_code, " ".join(cmd))) + kernellog.warn(app, "Error #%d when calling: %s" % (exit_code, " ".join(cmd))) return bool(exit_code == 0) @@ -415,15 +421,15 @@ def visit_kernel_render(self, node): app = self.builder.app srclang = node.get('srclang') - app.verbose('visit kernel-render node lang: "%s"' % (srclang)) + kernellog.verbose(app, 'visit kernel-render node lang: "%s"' % (srclang)) tmp_ext = RENDER_MARKUP_EXT.get(srclang, None) if tmp_ext is None: - app.warn('kernel-render: "%s" unknown / include raw.' % (srclang)) + kernellog.warn(app, 'kernel-render: "%s" unknown / include raw.' % (srclang)) return if not dot_cmd and tmp_ext == '.dot': - app.verbose("dot from graphviz not available / include raw.") + kernellog.verbose(app, "dot from graphviz not available / include raw.") return literal_block = node[0] diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt index 37a679501ddc68bc0ab26c58444794c0d30c8f40..1fa03508ff5ac6b85106628e20255a5a51ed4fde 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt @@ -60,6 +60,7 @@ show up in /proc/sys/kernel: - panic_on_stackoverflow - panic_on_unrecovered_nmi - panic_on_warn +- panic_print - panic_on_rcu_stall - perf_cpu_time_max_percent - perf_event_paranoid @@ -94,6 +95,7 @@ show up in /proc/sys/kernel: - sysctl_writes_strict - tainted - threads-max +- unprivileged_bpf_disabled - unknown_nmi_panic - watchdog - watchdog_thresh @@ -336,6 +338,20 @@ Default value is "/sbin/hotplug". ============================================================== +hung_task_all_cpu_backtrace: + +If this option is set, the kernel will send an NMI to all CPUs to dump +their backtraces when a hung task is detected. This file shows up if +CONFIG_DETECT_HUNG_TASK and CONFIG_SMP are enabled. + +0: Won't show all CPUs backtraces when a hung task is detected. +This is the default behavior. + +1: Will non-maskably interrupt all CPUs and dump their backtraces when +a hung task is detected. + +============================================================== + hung_task_panic: Controls the kernel's behavior when a hung task is detected. @@ -653,6 +669,22 @@ a kernel rebuild when attempting to kdump at the location of a WARN(). ============================================================== +panic_print: + +Bitmask for printing system info when panic happens. User can chose +combination of the following bits: + +bit 0: print all tasks info +bit 1: print system memory info +bit 2: print timer info +bit 3: print locks info if CONFIG_LOCKDEP is on +bit 4: print ftrace buffer + +So for example to print tasks and memory info on panic, user can: + echo 3 > /proc/sys/kernel/panic_print + +============================================================== + panic_on_rcu_stall: When set to 1, calls panic() after RCU stall detection messages. This @@ -1041,6 +1073,26 @@ available RAM pages threads-max is reduced accordingly. ============================================================== +unprivileged_bpf_disabled: + +Writing 1 to this entry will disable unprivileged calls to bpf(); +once disabled, calling bpf() without CAP_SYS_ADMIN will return +-EPERM. Once set to 1, this can't be cleared from the running kernel +anymore. + +Writing 2 to this entry will also disable unprivileged calls to bpf(), +however, an admin can still change this setting later on, if needed, by +writing 0 or 1 to this entry. + +If BPF_UNPRIV_DEFAULT_OFF is enabled in the kernel config, then this +entry will default to 2 instead of 0. + + 0 - Unprivileged calls to bpf() are enabled + 1 - Unprivileged calls to bpf() are disabled without recovery + 2 - Unprivileged calls to bpf() are disabled + +============================================================== + unknown_nmi_panic: The value in this file affects behavior of handling NMI. When the diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt index 9ecde517728c317ac7428efd616536a0a90f301e..57e08f16b4e3860dd1ba240dd46535ce109cc756 100644 --- a/Documentation/sysctl/net.txt +++ b/Documentation/sysctl/net.txt @@ -25,7 +25,6 @@ Table : Subdirectories in /proc/sys/net ethernet Ethernet protocol rose X.25 PLP layer ipv4 IP version 4 x25 X.25 protocol ipx IPX token-ring IBM token ring - bridge Bridging decnet DEC net ipv6 IP version 6 tipc TIPC .............................................................................. @@ -92,6 +91,14 @@ Values : 0 - disable JIT kallsyms export (default value) 1 - enable JIT kallsyms export for privileged users only +bpf_jit_limit +------------- + +This enforces a global limit for memory allocations to the BPF JIT +compiler in order to reject unprivileged JIT requests once it has +been surpassed. bpf_jit_limit contains the value of the global limit +in bytes. + dev_weight -------------- diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt index 7d73882e2c273c57d1277c701b71a1774b22763c..b3808f1236f8966a677524926536a04384a5a8fa 100644 --- a/Documentation/sysctl/vm.txt +++ b/Documentation/sysctl/vm.txt @@ -20,6 +20,7 @@ Currently, these files are in /proc/sys/vm: - admin_reserve_kbytes - block_dump +- clear_freelist_pages - compact_memory - compact_unevictable_allowed - dirty_background_bytes @@ -65,6 +66,7 @@ Currently, these files are in /proc/sys/vm: - vfs_cache_pressure - watermark_scale_factor - zone_reclaim_mode +- reliable_debug ============================================================== @@ -104,6 +106,18 @@ information on block I/O debugging is in Documentation/laptops/laptop-mode.txt. ============================================================== +clear_freelist_pages + +Available only when CONFIG_CLEAR_FREELIST_PAGE is set. When 1 is written to the +file, all pages in free lists will be written with 0. + +Zone lock is held during clear_freelist_pages, if the execution time is too +long, RCU CPU Stall warnings will be print. For each NUMA node, +clear_freelist_pages is performed on a "random" CPU of the NUMA node. +The time consuming is related to the hardware. + +============================================================== + compact_memory Available only when CONFIG_COMPACTION is set. When 1 is written to the file, @@ -910,4 +924,24 @@ Allowing regular swap effectively restricts allocations to the local node unless explicitly overridden by memory policies or cpuset configurations. +============================================================== + +reliable_debug: + +reliable_debug is used to control memory reliable features and can only be +disabled via this interface. + +Four bits are used to represent the following features +- bit 0: memory reliable feature +- bit 1: reliable fallback feature +- bit 2: tmpfs use reliable memory feature +- bit 3: pagecache use reliable memory feature + +Bit 1~3 are valid if and only if the bit 0 is 1. If the first bit is 0, all +other features will be closed no matter other bits's status. + +For example, you can disable all features by + + $ echo 0 > /proc/sys/vm/reliable_debug + ============ End of Document ================================= diff --git a/Documentation/trace/coresight-cpu-debug.txt b/Documentation/trace/coresight-cpu-debug.txt index 89ab09e78e8de289270145eea7dcdd308f75fa1e..f07e38094b407110580986f1fee71c0908ee08f6 100644 --- a/Documentation/trace/coresight-cpu-debug.txt +++ b/Documentation/trace/coresight-cpu-debug.txt @@ -165,7 +165,7 @@ Do some work... The same can also be done from an application program. Disable specific CPU's specific idle state from cpuidle sysfs (see -Documentation/cpuidle/sysfs.txt): +Documentation/admin-guide/pm/cpuidle.rst): # echo 1 > /sys/devices/system/cpu/cpu$cpu/cpuidle/state$state/disable diff --git a/Documentation/trace/histogram.rst b/Documentation/trace/histogram.rst index 5ac724baea7d93aeb15760de5bb1722bffb461b9..c14dab13a47e8224e84a790f5687b84741c420f6 100644 --- a/Documentation/trace/histogram.rst +++ b/Documentation/trace/histogram.rst @@ -191,7 +191,7 @@ Documentation written by Tom Zanussi with the event, in nanoseconds. May be modified by .usecs to have timestamps interpreted as microseconds. - cpu int the cpu on which the event occurred. + common_cpu int the cpu on which the event occurred. ====================== ==== ======================================= Extended error information diff --git a/Documentation/trace/tracepoints.rst b/Documentation/trace/tracepoints.rst index 6e3ce3bf3593acb224d6ed8d7709fe0f4d9cc52f..0cb8d9ca3d608d771387b11fd80c78220d99cd0a 100644 --- a/Documentation/trace/tracepoints.rst +++ b/Documentation/trace/tracepoints.rst @@ -146,3 +146,30 @@ with jump labels and avoid conditional branches. define tracepoints. Check http://lwn.net/Articles/379903, http://lwn.net/Articles/381064 and http://lwn.net/Articles/383362 for a series of articles with more details. + +If you require calling a tracepoint from a header file, it is not +recommended to call one directly or to use the trace__enabled() +function call, as tracepoints in header files can have side effects if a +header is included from a file that has CREATE_TRACE_POINTS set, as +well as the trace_() is not that small of an inline +and can bloat the kernel if used by other inlined functions. Instead, +include tracepoint-defs.h and use tracepoint_enabled(). + +In a C file:: + + void do_trace_foo_bar_wrapper(args) + { + trace_foo_bar(args); + } + +In the header file:: + + DECLARE_TRACEPOINT(foo_bar); + + static inline void some_inline_function() + { + [..] + if (tracepoint_enabled(foo_bar)) + do_trace_foo_bar_wrapper(args); + [..] + } diff --git a/Documentation/translations/zh_CN/magic-number.txt b/Documentation/translations/zh_CN/magic-number.txt index 7159cec04090d8f0243090b18a53f227a942e4e1..a91270b02df303b88202db2fc02cef6283b4ecea 100644 --- a/Documentation/translations/zh_CN/magic-number.txt +++ b/Documentation/translations/zh_CN/magic-number.txt @@ -83,7 +83,6 @@ AX25_MAGIC 0x5316 ax_disp drivers/net/mkiss.h TTY_MAGIC 0x5401 tty_struct include/linux/tty.h MGSL_MAGIC 0x5401 mgsl_info drivers/char/synclink.c TTY_DRIVER_MAGIC 0x5402 tty_driver include/linux/tty_driver.h -MGSLPC_MAGIC 0x5402 mgslpc_info drivers/char/pcmcia/synclink_cs.c TTY_LDISC_MAGIC 0x5403 tty_ldisc include/linux/tty_ldisc.h USB_SERIAL_MAGIC 0x6702 usb_serial drivers/usb/serial/usb-serial.h FULL_DUPLEX_MAGIC 0x6969 drivers/net/ethernet/dec/tulip/de2104x.c diff --git a/Documentation/usb/rio.txt b/Documentation/usb/rio.txt deleted file mode 100644 index aee715af7db741be230d754dc468bbb9709e81d6..0000000000000000000000000000000000000000 --- a/Documentation/usb/rio.txt +++ /dev/null @@ -1,138 +0,0 @@ -Copyright (C) 1999, 2000 Bruce Tenison -Portions Copyright (C) 1999, 2000 David Nelson -Thanks to David Nelson for guidance and the usage of the scanner.txt -and scanner.c files to model our driver and this informative file. - -Mar. 2, 2000 - -CHANGES - -- Initial Revision - - -OVERVIEW - -This README will address issues regarding how to configure the kernel -to access a RIO 500 mp3 player. -Before I explain how to use this to access the Rio500 please be warned: - -W A R N I N G: --------------- - -Please note that this software is still under development. The authors -are in no way responsible for any damage that may occur, no matter how -inconsequential. - -It seems that the Rio has a problem when sending .mp3 with low batteries. -I suggest when the batteries are low and you want to transfer stuff that you -replace it with a fresh one. In my case, what happened is I lost two 16kb -blocks (they are no longer usable to store information to it). But I don't -know if that's normal or not; it could simply be a problem with the flash -memory. - -In an extreme case, I left my Rio playing overnight and the batteries wore -down to nothing and appear to have corrupted the flash memory. My RIO -needed to be replaced as a result. Diamond tech support is aware of the -problem. Do NOT allow your batteries to wear down to nothing before -changing them. It appears RIO 500 firmware does not handle low battery -power well at all. - -On systems with OHCI controllers, the kernel OHCI code appears to have -power on problems with some chipsets. If you are having problems -connecting to your RIO 500, try turning it on first and then plugging it -into the USB cable. - -Contact information: --------------------- - - The main page for the project is hosted at sourceforge.net in the following - URL: . You can also go to the project's - sourceforge home page at: . - There is also a mailing list: rio500-users@lists.sourceforge.net - -Authors: -------- - -Most of the code was written by Cesar Miquel . Keith -Clayton is incharge of the PPC port and making sure -things work there. Bruce Tenison is adding support -for .fon files and also does testing. The program will mostly sure be -re-written and Pete Ikusz along with the rest will re-design it. I would -also like to thank Tri Nguyen who provided use -with some important information regarding the communication with the Rio. - -ADDITIONAL INFORMATION and Userspace tools - -http://rio500.sourceforge.net/ - - -REQUIREMENTS - -A host with a USB port. Ideally, either a UHCI (Intel) or OHCI -(Compaq and others) hardware port should work. - -A Linux development kernel (2.3.x) with USB support enabled or a -backported version to linux-2.2.x. See http://www.linux-usb.org for -more information on accomplishing this. - -A Linux kernel with RIO 500 support enabled. - -'lspci' which is only needed to determine the type of USB hardware -available in your machine. - -CONFIGURATION - -Using `lspci -v`, determine the type of USB hardware available. - - If you see something like: - - USB Controller: ...... - Flags: ..... - I/O ports at .... - - Then you have a UHCI based controller. - - If you see something like: - - USB Controller: ..... - Flags: .... - Memory at ..... - - Then you have a OHCI based controller. - -Using `make menuconfig` or your preferred method for configuring the -kernel, select 'Support for USB', 'OHCI/UHCI' depending on your -hardware (determined from the steps above), 'USB Diamond Rio500 support', and -'Preliminary USB device filesystem'. Compile and install the modules -(you may need to execute `depmod -a` to update the module -dependencies). - -Add a device for the USB rio500: - `mknod /dev/usb/rio500 c 180 64` - -Set appropriate permissions for /dev/usb/rio500 (don't forget about -group and world permissions). Both read and write permissions are -required for proper operation. - -Load the appropriate modules (if compiled as modules): - - OHCI: - modprobe usbcore - modprobe usb-ohci - modprobe rio500 - - UHCI: - modprobe usbcore - modprobe usb-uhci (or uhci) - modprobe rio500 - -That's it. The Rio500 Utils at: http://rio500.sourceforge.net should -be able to access the rio500. - -BUGS - -If you encounter any problems feel free to drop me an email. - -Bruce Tenison -btenison@dibbs.net - diff --git a/Documentation/userspace-api/spec_ctrl.rst b/Documentation/userspace-api/spec_ctrl.rst index 32f3d55c54b75e1c6642a3d328a1dc404dfd4dc9..0fda8f6141105d71a04a3a3c8de02fcaaef148e8 100644 --- a/Documentation/userspace-api/spec_ctrl.rst +++ b/Documentation/userspace-api/spec_ctrl.rst @@ -47,6 +47,8 @@ If PR_SPEC_PRCTL is set, then the per-task control of the mitigation is available. If not set, prctl(PR_SET_SPECULATION_CTRL) for the speculation misfeature will fail. +.. _set_spec_ctrl: + PR_SET_SPECULATION_CTRL ----------------------- @@ -92,3 +94,12 @@ Speculation misfeature controls * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0); * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0); * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_FORCE_DISABLE, 0, 0); + +- PR_SPEC_INDIR_BRANCH: Indirect Branch Speculation in User Processes + (Mitigate Spectre V2 style attacks against user processes) + + Invocations: + * prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, 0, 0, 0); + * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_ENABLE, 0, 0); + * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_DISABLE, 0, 0); + * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_FORCE_DISABLE, 0, 0); diff --git a/Documentation/virtual/guest-halt-polling.txt b/Documentation/virtual/guest-halt-polling.txt new file mode 100644 index 0000000000000000000000000000000000000000..b3a2a294532da0729a101b9ebb2751f6676ddf4b --- /dev/null +++ b/Documentation/virtual/guest-halt-polling.txt @@ -0,0 +1,78 @@ +Guest halt polling +================== + +The cpuidle_haltpoll driver, with the haltpoll governor, allows +the guest vcpus to poll for a specified amount of time before +halting. +This provides the following benefits to host side polling: + + 1) The POLL flag is set while polling is performed, which allows + a remote vCPU to avoid sending an IPI (and the associated + cost of handling the IPI) when performing a wakeup. + + 2) The VM-exit cost can be avoided. + +The downside of guest side polling is that polling is performed +even with other runnable tasks in the host. + +The basic logic as follows: A global value, guest_halt_poll_ns, +is configured by the user, indicating the maximum amount of +time polling is allowed. This value is fixed. + +Each vcpu has an adjustable guest_halt_poll_ns +("per-cpu guest_halt_poll_ns"), which is adjusted by the algorithm +in response to events (explained below). + +Module Parameters +================= + +The haltpoll governor has 5 tunable module parameters: + +1) guest_halt_poll_ns: +Maximum amount of time, in nanoseconds, that polling is +performed before halting. + +Default: 200000 + +2) guest_halt_poll_shrink: +Division factor used to shrink per-cpu guest_halt_poll_ns when +wakeup event occurs after the global guest_halt_poll_ns. + +Default: 2 + +3) guest_halt_poll_grow: +Multiplication factor used to grow per-cpu guest_halt_poll_ns +when event occurs after per-cpu guest_halt_poll_ns +but before global guest_halt_poll_ns. + +Default: 2 + +4) guest_halt_poll_grow_start: +The per-cpu guest_halt_poll_ns eventually reaches zero +in case of an idle system. This value sets the initial +per-cpu guest_halt_poll_ns when growing. This can +be increased from 10000, to avoid misses during the initial +growth stage: + +10k, 20k, 40k, ... (example assumes guest_halt_poll_grow=2). + +Default: 50000 + +5) guest_halt_poll_allow_shrink: + +Bool parameter which allows shrinking. Set to N +to avoid it (per-cpu guest_halt_poll_ns will remain +high once achieves global guest_halt_poll_ns value). + +Default: Y + +The module parameters can be set from the debugfs files in: + + /sys/module/haltpoll/parameters/ + +Further Notes +============= + +- Care should be taken when setting the guest_halt_poll_ns parameter as a +large value has the potential to drive the cpu usage to 100% on a machine which +would be almost entirely idle otherwise. diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 647f94128a85e47183f0ba7ffd6cbfc90fe1e6f5..f03924c4353ad4de32f15a174bda6055de6f1bc1 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -13,7 +13,7 @@ of a virtual machine. The ioctls belong to three classes - VM ioctls: These query and set attributes that affect an entire virtual machine, for example memory layout. In addition a VM ioctl is used to - create virtual cpus (vcpus). + create virtual cpus (vcpus) and devices. Only run VM ioctls from the same process (address space) that was used to create the VM. @@ -24,6 +24,11 @@ of a virtual machine. The ioctls belong to three classes Only run vcpu ioctls from the same thread that was used to create the vcpu. + - device ioctls: These query and set attributes that control the operation + of a single device. + + device ioctls must be issued from the same process (address space) that + was used to create the VM. 2. File descriptors ------------------- @@ -32,10 +37,11 @@ The kvm API is centered around file descriptors. An initial open("/dev/kvm") obtains a handle to the kvm subsystem; this handle can be used to issue system ioctls. A KVM_CREATE_VM ioctl on this handle will create a VM file descriptor which can be used to issue VM -ioctls. A KVM_CREATE_VCPU ioctl on a VM fd will create a virtual cpu -and return a file descriptor pointing to it. Finally, ioctls on a vcpu -fd can be used to control the vcpu, including the important task of -actually running guest code. +ioctls. A KVM_CREATE_VCPU or KVM_CREATE_DEVICE ioctl on a VM fd will +create a virtual cpu or device and return a file descriptor pointing to +the new resource. Finally, ioctls on a vcpu or device fd can be used +to control the vcpu or device. For vcpus, this includes the important +task of actually running guest code. In general file descriptors can be migrated among processes by means of fork() and the SCM_RIGHTS facility of unix domain socket. These @@ -123,6 +129,37 @@ memory layout to fit in user mode), check KVM_CAP_MIPS_VZ and use the flag KVM_VM_MIPS_VZ. +On arm64, the physical address size for a VM (IPA Size limit) is limited +to 40bits by default. The limit can be configured if the host supports the +extension KVM_CAP_ARM_VM_IPA_SIZE. When supported, use +KVM_VM_TYPE_ARM_IPA_SIZE(IPA_Bits) to set the size in the machine type +identifier, where IPA_Bits is the maximum width of any physical +address used by the VM. The IPA_Bits is encoded in bits[7-0] of the +machine type identifier. + +e.g, to configure a guest to use 48bit physical address size : + + vm_fd = ioctl(dev_fd, KVM_CREATE_VM, KVM_VM_TYPE_ARM_IPA_SIZE(48)); + +The requested size (IPA_Bits) must be : + 0 - Implies default size, 40bits (for backward compatibility) + + or + + N - Implies N bits, where N is a positive integer such that, + 32 <= N <= Host_IPA_Limit + +Host_IPA_Limit is the maximum possible value for IPA_Bits on the host and +is dependent on the CPU capability and the kernel configuration. The limit can +be retrieved using KVM_CAP_ARM_VM_IPA_SIZE of the KVM_CHECK_EXTENSION +ioctl() at run-time. + +Please note that configuring the IPA size does not affect the capability +exposed by the guest CPUs in ID_AA64MMFR0_EL1[PARange]. It only affects +size of the address translated by the stage2 level (guest physical to +host physical address translations). + + 4.3 KVM_GET_MSR_INDEX_LIST, KVM_GET_MSR_FEATURE_INDEX_LIST Capability: basic, KVM_CAP_GET_MSR_FEATURES for KVM_GET_MSR_FEATURE_INDEX_LIST @@ -274,6 +311,9 @@ the address space for which you want to return the dirty bitmap. They must be less than the value that KVM_CHECK_EXTENSION returns for the KVM_CAP_MULTI_ADDRESS_SPACE capability. +The bits in the dirty bitmap are cleared before the ioctl returns, unless +KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is enabled. For more information, +see the description of the capability. 4.9 KVM_SET_MEMORY_ALIAS @@ -691,8 +731,8 @@ in-kernel irqchip (GIC), and for in-kernel irqchip can tell the GIC to use PPIs designated for specific cpus. The irq field is interpreted like this: -  bits: | 31 ... 24 | 23 ... 16 | 15 ... 0 | - field: | irq_type | vcpu_index | irq_id | + bits: | 31 ... 28 | 27 ... 24 | 23 ... 16 | 15 ... 0 | + field: | vcpu2_index | irq_type | vcpu_index | irq_id | The irq_type field has the following values: - irq_type[0]: out-of-kernel GIC: irq_id 0 is IRQ, irq_id 1 is FIQ @@ -704,6 +744,14 @@ The irq_type field has the following values: In both cases, level is used to assert/deassert the line. +When KVM_CAP_ARM_IRQ_LINE_LAYOUT_2 is supported, the target vcpu is +identified as (256 * vcpu2_index + vcpu_index). Otherwise, vcpu2_index +must be zero. + +Note that on arm/arm64, the KVM_CAP_IRQCHIP capability only conditions +injection of interrupts for the in-kernel irqchip. KVM_IRQ_LINE can always +be used for a userspace interrupt controller. + struct kvm_irq_level { union { __u32 irq; /* GSI */ @@ -1085,10 +1133,15 @@ documentation when it pops into existence). 4.37 KVM_ENABLE_CAP -Capability: KVM_CAP_ENABLE_CAP, KVM_CAP_ENABLE_CAP_VM -Architectures: x86 (only KVM_CAP_ENABLE_CAP_VM), - mips (only KVM_CAP_ENABLE_CAP), ppc, s390 -Type: vcpu ioctl, vm ioctl (with KVM_CAP_ENABLE_CAP_VM) +Capability: KVM_CAP_ENABLE_CAP +Architectures: mips, ppc, s390 +Type: vcpu ioctl +Parameters: struct kvm_enable_cap (in) +Returns: 0 on success; -1 on error + +Capability: KVM_CAP_ENABLE_CAP_VM +Architectures: all +Type: vcpu ioctl Parameters: struct kvm_enable_cap (in) Returns: 0 on success; -1 on error @@ -3676,6 +3729,47 @@ Returns: 0 on success, -1 on error This copies the vcpu's kvm_nested_state struct from userspace to the kernel. For the definition of struct kvm_nested_state, see KVM_GET_NESTED_STATE. +4.117 KVM_CLEAR_DIRTY_LOG (vm ioctl) + +Capability: KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 +Architectures: x86 +Type: vm ioctl +Parameters: struct kvm_dirty_log (in) +Returns: 0 on success, -1 on error + +/* for KVM_CLEAR_DIRTY_LOG */ +struct kvm_clear_dirty_log { + __u32 slot; + __u32 num_pages; + __u64 first_page; + union { + void __user *dirty_bitmap; /* one bit per page */ + __u64 padding; + }; +}; + +The ioctl clears the dirty status of pages in a memory slot, according to +the bitmap that is passed in struct kvm_clear_dirty_log's dirty_bitmap +field. Bit 0 of the bitmap corresponds to page "first_page" in the +memory slot, and num_pages is the size in bits of the input bitmap. +first_page must be a multiple of 64; num_pages must also be a multiple of +64 unless first_page + num_pages is the size of the memory slot. For each +bit that is set in the input bitmap, the corresponding page is marked "clean" +in KVM's dirty bitmap, and dirty tracking is re-enabled for that page +(for example via write-protection, or by clearing the dirty bit in +a page table entry). + +If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 specifies +the address space for which you want to return the dirty bitmap. +They must be less than the value that KVM_CHECK_EXTENSION returns for +the KVM_CAP_MULTI_ADDRESS_SPACE capability. + +This ioctl is mostly useful when KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 +is enabled; for more information, see the description of the capability. +However, it can always be used as long as KVM_CHECK_EXTENSION confirms +that KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is present. + + 5. The kvm_run structure ------------------------ @@ -4531,6 +4625,47 @@ With this capability, a guest may read the MSR_PLATFORM_INFO MSR. Otherwise, a #GP would be raised when the guest tries to access. Currently, this capability does not enable write permissions of this MSR for the guest. +7.18 KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 + +Architectures: all +Parameters: args[0] whether feature should be enabled or not + +Valid flags are:: + + #define KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE (1 << 0) + #define KVM_DIRTY_LOG_INITIALLY_SET (1 << 1) + +With KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE set, KVM_GET_DIRTY_LOG will not +automatically clear and write-protect all pages that are returned as dirty. +Rather, userspace will have to do this operation separately using +KVM_CLEAR_DIRTY_LOG. + +At the cost of a slightly more complicated operation, this provides better +scalability and responsiveness for two reasons. First, +KVM_CLEAR_DIRTY_LOG ioctl can operate on a 64-page granularity rather +than requiring to sync a full memslot; this ensures that KVM does not +take spinlocks for an extended period of time. Second, in some cases a +large amount of time can pass between a call to KVM_GET_DIRTY_LOG and +userspace actually using the data in the page. Pages can be modified +during this time, which is inefficient for both the guest and userspace: +the guest will incur a higher penalty due to write protection faults, +while userspace can see false reports of dirty pages. Manual reprotection +helps reducing this time, improving guest performance and reducing the +number of dirty log false positives. + +With KVM_DIRTY_LOG_INITIALLY_SET set, all the bits of the dirty bitmap +will be initialized to 1 when created. This also improves performance because +dirty logging can be enabled gradually in small chunks on the first call +to KVM_CLEAR_DIRTY_LOG. KVM_DIRTY_LOG_INITIALLY_SET depends on +KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE (it is also only available on +x86 and arm64 for now). + +KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 was previously available under the name +KVM_CAP_MANUAL_DIRTY_LOG_PROTECT, but the implementation had bugs that make +it hard or impossible to use it correctly. The availability of +KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 signals that those bugs are fixed. +Userspace should not try to use KVM_CAP_MANUAL_DIRTY_LOG_PROTECT. + 8. Other capabilities. ---------------------- diff --git a/Documentation/virtual/kvm/arm/pvsched.txt b/Documentation/virtual/kvm/arm/pvsched.txt new file mode 100644 index 0000000000000000000000000000000000000000..1d5aefc290ec54d214d7ce1ee44e75f61ba30433 --- /dev/null +++ b/Documentation/virtual/kvm/arm/pvsched.txt @@ -0,0 +1,56 @@ +Paravirtualized sched support for arm64 +======================================= + +KVM/arm64 provides some hypervisor service calls to support a paravirtualized +sched. + +Some SMCCC compatible hypercalls are defined: + +* PV_SCHED_FEATURES: 0xC5000090 +* PV_SCHED_IPA_INIT: 0xC5000091 +* PV_SCHED_IPA_RELEASE: 0xC5000092 + +The existence of the PV_SCHED hypercall should be probed using the SMCCC 1.1 +ARCH_FEATURES mechanism before calling it. + +PV_SCHED_FEATURES + ============= ======== ========== + Function ID: (uint32) 0xC5000090 + PV_call_id: (uint32) The function to query for support. + Return value: (int64) NOT_SUPPORTED (-1) or SUCCESS (0) if the relevant + PV-sched feature is supported by the hypervisor. + ============= ======== ========== + +PV_SCHED_IPA_INIT + ============= ======== ========== + Function ID: (uint32) 0xC5000091 + Return value: (int64) NOT_SUPPORTED (-1) or SUCCESS (0) if the IPA of + this vCPU's PV data structure is shared to the + hypervisor. + ============= ======== ========== + +PV_SCHED_IPA_RELEASE + ============= ======== ========== + Function ID: (uint32) 0xC5000092 + Return value: (int64) NOT_SUPPORTED (-1) or SUCCESS (0) if the IPA of + this vCPU's PV data structure is released. + ============= ======== ========== + +PV sched state +-------------- + +The structure pointed to by the PV_SCHED_IPA hypercall is as follows: + ++-----------+-------------+-------------+-----------------------------------+ +| Field | Byte Length | Byte Offset | Description | ++===========+=============+=============+===================================+ +| preempted | 4 | 0 | Indicates that the vCPU that owns | +| | | | this struct is running or not. | +| | | | Non-zero values mean the vCPU has | +| | | | been preempted. Zero means the | +| | | | vCPU is not preempted. | ++-----------+-------------+-------------+-----------------------------------+ + +The preempted field will be updated to 0 by the hypervisor prior to scheduling +a vCPU. When the vCPU is scheduled out, the preempted field will be updated +to 1 by the hypervisor. diff --git a/Documentation/virtual/kvm/locking.txt b/Documentation/virtual/kvm/locking.txt index 1bb8bcaf8497703f7cdd61538ca1374f0e8ac622..635cd6eaf71495e081de44774e489d622323fcf4 100644 --- a/Documentation/virtual/kvm/locking.txt +++ b/Documentation/virtual/kvm/locking.txt @@ -15,8 +15,6 @@ The acquisition orders for mutexes are as follows: On x86, vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock. -For spinlocks, kvm_lock is taken outside kvm->mmu_lock. - Everything else is a leaf: no other lock is taken inside the critical sections. @@ -169,7 +167,7 @@ which time it will be set using the Dirty tracking mechanism described above. ------------ Name: kvm_lock -Type: spinlock_t +Type: mutex Arch: any Protects: - vm_list diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virtual/kvm/mmu.txt index e507a9e0421ed22e630425074e053303f5e990bf..c843bee035acf1a093e2627ff8a0d6bb0a289062 100644 --- a/Documentation/virtual/kvm/mmu.txt +++ b/Documentation/virtual/kvm/mmu.txt @@ -152,8 +152,8 @@ Shadow pages contain the following information: shadow pages) so role.quadrant takes values in the range 0..3. Each quadrant maps 1GB virtual address space. role.access: - Inherited guest access permissions in the form uwx. Note execute - permission is positive, not negative. + Inherited guest access permissions from the parent ptes in the form uwx. + Note execute permission is positive, not negative. role.invalid: The page is invalid and should not be used. It is a root page that is currently pinned (by a cpu hardware register pointing to it); once it is @@ -452,13 +452,16 @@ stored into the MMIO spte. Thus, the MMIO spte might be created based on out-of-date information, but with an up-to-date generation number. To avoid this, the generation number is incremented again after synchronize_srcu -returns; thus, the low bit of kvm_memslots(kvm)->generation is only 1 during a +returns; thus, bit 63 of kvm_memslots(kvm)->generation set to 1 only during a memslot update, while some SRCU readers might be using the old copy. We do not want to use an MMIO sptes created with an odd generation number, and we can do -this without losing a bit in the MMIO spte. The low bit of the generation -is not stored in MMIO spte, and presumed zero when it is extracted out of the -spte. If KVM is unlucky and creates an MMIO spte while the low bit is 1, -the next access to the spte will always be a cache miss. +this without losing a bit in the MMIO spte. The "update in-progress" bit of the +generation is not stored in MMIO spte, and is so is implicitly zero when the +generation is extracted out of the spte. If KVM is unlucky and creates an MMIO +spte while an update is in-progress, the next access to the spte will always be +a cache miss. For example, a subsequent access during the update window will +miss due to the in-progress flag diverging, while an access after the update +window closes will have a higher generation number (as compared to the spte). Further reading diff --git a/Documentation/virtual/kvm/msr.txt b/Documentation/virtual/kvm/msr.txt index f3f0d57ced8e1827fe8e8e6dc49808b18c9253fb..30ff2ac58d79a168813e371060edd71e6768ba81 100644 --- a/Documentation/virtual/kvm/msr.txt +++ b/Documentation/virtual/kvm/msr.txt @@ -273,3 +273,11 @@ MSR_KVM_EOI_EN: 0x4b564d04 guest must both read the least significant bit in the memory area and clear it using a single CPU instruction, such as test and clear, or compare and exchange. + +MSR_KVM_POLL_CONTROL: 0x4b564d05 + Control host-side polling. + + data: Bit 0 enables (1) or disables (0) host-side HLT polling logic. + + KVM guests can request the host not to poll on HLT, for example if + they are performing polling themselves. diff --git a/Documentation/vm/memcg_memfs_info.rst b/Documentation/vm/memcg_memfs_info.rst new file mode 100644 index 0000000000000000000000000000000000000000..aff432d125e52f7e4baaa1af490a4c9fe57eddd0 --- /dev/null +++ b/Documentation/vm/memcg_memfs_info.rst @@ -0,0 +1,40 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +================ +Memcg Memfs Info +================ + +Overview +======== + +Support to print rootfs files and tmpfs files that having pages charged +in given memory cgroup. The files infomations can be printed through +interface "memory.memfs_files_info" or printed when OOM is triggered. + +User control +============ + +1. /sys/kernel/mm/memcg_memfs_info/enable +----------------------------------------- + +Boolean type. The default value is 0, set it to 1 to enable the feature. + +2. /sys/kernel/mm/memcg_memfs_info/max_print_files_in_oom +--------------------------------------------------------- + +Unsigned long type. The default value is 500, indicating that the maximum of +files can be print to console when OOM is triggered. + +3. /sys/kernel/mm/memcg_memfs_info/size_threshold +------------------------------------------------- + +Unsigned long type. The default value is 0, indicating that the minimum size of +files that can be printed. + +4. /sys/fs/cgroup/memory//memory.memfs_files_info +--------------------------------------------------------- + +Outputs the files who use memory in this memory cgroup. + +--- +Liu Shixin, Jan 2022 diff --git a/Documentation/x86/conf.py b/Documentation/x86/conf.py new file mode 100644 index 0000000000000000000000000000000000000000..33c5c3142e201972309a2e4e0afdaba77a7c88d0 --- /dev/null +++ b/Documentation/x86/conf.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8; mode: python -*- + +project = "X86 architecture specific documentation" + +tags.add("subproject") + +latex_documents = [ + ('index', 'x86.tex', project, + 'The kernel development community', 'manual'), +] diff --git a/Documentation/x86/index.rst b/Documentation/x86/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..0780d55c5aa82835600973f16e51f3195d374259 --- /dev/null +++ b/Documentation/x86/index.rst @@ -0,0 +1,9 @@ +========================== +x86 architecture specifics +========================== + +.. toctree:: + :maxdepth: 1 + + mds + tsx_async_abort diff --git a/Documentation/x86/mds.rst b/Documentation/x86/mds.rst new file mode 100644 index 0000000000000000000000000000000000000000..5d4330be200f980cf4ea7a8e17b9e89012dcc381 --- /dev/null +++ b/Documentation/x86/mds.rst @@ -0,0 +1,193 @@ +Microarchitectural Data Sampling (MDS) mitigation +================================================= + +.. _mds: + +Overview +-------- + +Microarchitectural Data Sampling (MDS) is a family of side channel attacks +on internal buffers in Intel CPUs. The variants are: + + - Microarchitectural Store Buffer Data Sampling (MSBDS) (CVE-2018-12126) + - Microarchitectural Fill Buffer Data Sampling (MFBDS) (CVE-2018-12130) + - Microarchitectural Load Port Data Sampling (MLPDS) (CVE-2018-12127) + - Microarchitectural Data Sampling Uncacheable Memory (MDSUM) (CVE-2019-11091) + +MSBDS leaks Store Buffer Entries which can be speculatively forwarded to a +dependent load (store-to-load forwarding) as an optimization. The forward +can also happen to a faulting or assisting load operation for a different +memory address, which can be exploited under certain conditions. Store +buffers are partitioned between Hyper-Threads so cross thread forwarding is +not possible. But if a thread enters or exits a sleep state the store +buffer is repartitioned which can expose data from one thread to the other. + +MFBDS leaks Fill Buffer Entries. Fill buffers are used internally to manage +L1 miss situations and to hold data which is returned or sent in response +to a memory or I/O operation. Fill buffers can forward data to a load +operation and also write data to the cache. When the fill buffer is +deallocated it can retain the stale data of the preceding operations which +can then be forwarded to a faulting or assisting load operation, which can +be exploited under certain conditions. Fill buffers are shared between +Hyper-Threads so cross thread leakage is possible. + +MLPDS leaks Load Port Data. Load ports are used to perform load operations +from memory or I/O. The received data is then forwarded to the register +file or a subsequent operation. In some implementations the Load Port can +contain stale data from a previous operation which can be forwarded to +faulting or assisting loads under certain conditions, which again can be +exploited eventually. Load ports are shared between Hyper-Threads so cross +thread leakage is possible. + +MDSUM is a special case of MSBDS, MFBDS and MLPDS. An uncacheable load from +memory that takes a fault or assist can leave data in a microarchitectural +structure that may later be observed using one of the same methods used by +MSBDS, MFBDS or MLPDS. + +Exposure assumptions +-------------------- + +It is assumed that attack code resides in user space or in a guest with one +exception. The rationale behind this assumption is that the code construct +needed for exploiting MDS requires: + + - to control the load to trigger a fault or assist + + - to have a disclosure gadget which exposes the speculatively accessed + data for consumption through a side channel. + + - to control the pointer through which the disclosure gadget exposes the + data + +The existence of such a construct in the kernel cannot be excluded with +100% certainty, but the complexity involved makes it extremly unlikely. + +There is one exception, which is untrusted BPF. The functionality of +untrusted BPF is limited, but it needs to be thoroughly investigated +whether it can be used to create such a construct. + + +Mitigation strategy +------------------- + +All variants have the same mitigation strategy at least for the single CPU +thread case (SMT off): Force the CPU to clear the affected buffers. + +This is achieved by using the otherwise unused and obsolete VERW +instruction in combination with a microcode update. The microcode clears +the affected CPU buffers when the VERW instruction is executed. + +For virtualization there are two ways to achieve CPU buffer +clearing. Either the modified VERW instruction or via the L1D Flush +command. The latter is issued when L1TF mitigation is enabled so the extra +VERW can be avoided. If the CPU is not affected by L1TF then VERW needs to +be issued. + +If the VERW instruction with the supplied segment selector argument is +executed on a CPU without the microcode update there is no side effect +other than a small number of pointlessly wasted CPU cycles. + +This does not protect against cross Hyper-Thread attacks except for MSBDS +which is only exploitable cross Hyper-thread when one of the Hyper-Threads +enters a C-state. + +The kernel provides a function to invoke the buffer clearing: + + mds_clear_cpu_buffers() + +The mitigation is invoked on kernel/userspace, hypervisor/guest and C-state +(idle) transitions. + +As a special quirk to address virtualization scenarios where the host has +the microcode updated, but the hypervisor does not (yet) expose the +MD_CLEAR CPUID bit to guests, the kernel issues the VERW instruction in the +hope that it might actually clear the buffers. The state is reflected +accordingly. + +According to current knowledge additional mitigations inside the kernel +itself are not required because the necessary gadgets to expose the leaked +data cannot be controlled in a way which allows exploitation from malicious +user space or VM guests. + +Kernel internal mitigation modes +-------------------------------- + + ======= ============================================================ + off Mitigation is disabled. Either the CPU is not affected or + mds=off is supplied on the kernel command line + + full Mitigation is enabled. CPU is affected and MD_CLEAR is + advertised in CPUID. + + vmwerv Mitigation is enabled. CPU is affected and MD_CLEAR is not + advertised in CPUID. That is mainly for virtualization + scenarios where the host has the updated microcode but the + hypervisor does not expose MD_CLEAR in CPUID. It's a best + effort approach without guarantee. + ======= ============================================================ + +If the CPU is affected and mds=off is not supplied on the kernel command +line then the kernel selects the appropriate mitigation mode depending on +the availability of the MD_CLEAR CPUID bit. + +Mitigation points +----------------- + +1. Return to user space +^^^^^^^^^^^^^^^^^^^^^^^ + + When transitioning from kernel to user space the CPU buffers are flushed + on affected CPUs when the mitigation is not disabled on the kernel + command line. The migitation is enabled through the static key + mds_user_clear. + + The mitigation is invoked in prepare_exit_to_usermode() which covers + all but one of the kernel to user space transitions. The exception + is when we return from a Non Maskable Interrupt (NMI), which is + handled directly in do_nmi(). + + (The reason that NMI is special is that prepare_exit_to_usermode() can + enable IRQs. In NMI context, NMIs are blocked, and we don't want to + enable IRQs with NMIs blocked.) + + +2. C-State transition +^^^^^^^^^^^^^^^^^^^^^ + + When a CPU goes idle and enters a C-State the CPU buffers need to be + cleared on affected CPUs when SMT is active. This addresses the + repartitioning of the store buffer when one of the Hyper-Threads enters + a C-State. + + When SMT is inactive, i.e. either the CPU does not support it or all + sibling threads are offline CPU buffer clearing is not required. + + The idle clearing is enabled on CPUs which are only affected by MSBDS + and not by any other MDS variant. The other MDS variants cannot be + protected against cross Hyper-Thread attacks because the Fill Buffer and + the Load Ports are shared. So on CPUs affected by other variants, the + idle clearing would be a window dressing exercise and is therefore not + activated. + + The invocation is controlled by the static key mds_idle_clear which is + switched depending on the chosen mitigation mode and the SMT state of + the system. + + The buffer clear is only invoked before entering the C-State to prevent + that stale data from the idling CPU from spilling to the Hyper-Thread + sibling after the store buffer got repartitioned and all entries are + available to the non idle sibling. + + When coming out of idle the store buffer is partitioned again so each + sibling has half of it available. The back from idle CPU could be then + speculatively exposed to contents of the sibling. The buffers are + flushed either on exit to user space or on VMENTER so malicious code + in user space or the guest cannot speculatively access them. + + The mitigation is hooked into all variants of halt()/mwait(), but does + not cover the legacy ACPI IO-Port mechanism because the ACPI idle driver + has been superseded by the intel_idle driver around 2010 and is + preferred on all affected CPUs which are expected to gain the MD_CLEAR + functionality in microcode. Aside of that the IO-Port mechanism is a + legacy interface which is only used on older systems which are either + not affected or do not receive microcode updates anymore. diff --git a/Documentation/x86/microcode.txt b/Documentation/x86/microcode.txt index 79fdb4a8148ad102528823ff99cc634e5876b8f1..07a6ff021e06df091e931ef1893f9fc9c57cf281 100644 --- a/Documentation/x86/microcode.txt +++ b/Documentation/x86/microcode.txt @@ -28,6 +28,7 @@ The microcode files in cpio name space are: on Intel: kernel/x86/microcode/GenuineIntel.bin on AMD : kernel/x86/microcode/AuthenticAMD.bin +on Hygon: kernel/x86/microcode/HygonGenuine.bin During BSP (BootStrapping Processor) boot (pre-SMP), the kernel scans the microcode file in the initrd. If microcode matching the @@ -62,6 +63,10 @@ here for future reference only). cd $TMPDIR mkdir -p $DSTDIR + if [ -d /lib/firmware/hygon-ucode ]; then + cat /lib/firmware/hygon-ucode/microcode_hygon*.bin > $DSTDIR/HygonGenuine.bin + fi + if [ -d /lib/firmware/amd-ucode ]; then cat /lib/firmware/amd-ucode/microcode_amd*.bin > $DSTDIR/AuthenticAMD.bin fi @@ -113,7 +118,8 @@ currently supported. Here's an example: -CONFIG_EXTRA_FIRMWARE="intel-ucode/06-3a-09 amd-ucode/microcode_amd_fam15h.bin" +CONFIG_EXTRA_FIRMWARE="intel-ucode/06-3a-09 \ + amd-ucode/microcode_amd_fam15h.bin hygon-ucode/microcode_hygon_fam18h.bin" CONFIG_EXTRA_FIRMWARE_DIR="/lib/firmware" This basically means, you have the following tree structure locally: @@ -123,6 +129,10 @@ This basically means, you have the following tree structure locally: ... | |-- microcode_amd_fam15h.bin ... +|-- hygon-ucode +... +| |-- microcode_hygon_fam18h.bin +... |-- intel-ucode ... | |-- 06-3a-09 diff --git a/Documentation/x86/topology.txt b/Documentation/x86/topology.txt index 2953e3ec9a0259f102ce40b9c28340f761c2df4e..d84b7d2384cf79675c70b536fcca72e2687b8427 100644 --- a/Documentation/x86/topology.txt +++ b/Documentation/x86/topology.txt @@ -38,6 +38,8 @@ The topology of a system is described in the units of: Packages contain a number of cores plus shared resources, e.g. DRAM controller, shared caches etc. + Modern systems may also use the term 'Die' for package. + AMD nomenclature for package is 'Node'. Package-related topology information in the kernel: @@ -46,11 +48,22 @@ The topology of a system is described in the units of: The number of cores in a package. This information is retrieved via CPUID. + - cpuinfo_x86.x86_max_dies: + + The number of dies in a package. This information is retrieved via CPUID. + + - cpuinfo_x86.cpu_die_id: + + The physical ID of the die. This information is retrieved via CPUID. + - cpuinfo_x86.phys_proc_id: The physical ID of the package. This information is retrieved via CPUID and deduced from the APIC IDs of the cores in the package. + Modern systems use this value for the socket. There may be multiple + packages within a socket. This value may differ from cpu_die_id. + - cpuinfo_x86.logical_id: The logical ID of the package. As we do not trust BIOSes to enumerate the diff --git a/Documentation/x86/tsx_async_abort.rst b/Documentation/x86/tsx_async_abort.rst new file mode 100644 index 0000000000000000000000000000000000000000..583ddc185ba220276cd3316b7bc1c9bbe115c206 --- /dev/null +++ b/Documentation/x86/tsx_async_abort.rst @@ -0,0 +1,117 @@ +.. SPDX-License-Identifier: GPL-2.0 + +TSX Async Abort (TAA) mitigation +================================ + +.. _tsx_async_abort: + +Overview +-------- + +TSX Async Abort (TAA) is a side channel attack on internal buffers in some +Intel processors similar to Microachitectural Data Sampling (MDS). In this +case certain loads may speculatively pass invalid data to dependent operations +when an asynchronous abort condition is pending in a Transactional +Synchronization Extensions (TSX) transaction. This includes loads with no +fault or assist condition. Such loads may speculatively expose stale data from +the same uarch data structures as in MDS, with same scope of exposure i.e. +same-thread and cross-thread. This issue affects all current processors that +support TSX. + +Mitigation strategy +------------------- + +a) TSX disable - one of the mitigations is to disable TSX. A new MSR +IA32_TSX_CTRL will be available in future and current processors after +microcode update which can be used to disable TSX. In addition, it +controls the enumeration of the TSX feature bits (RTM and HLE) in CPUID. + +b) Clear CPU buffers - similar to MDS, clearing the CPU buffers mitigates this +vulnerability. More details on this approach can be found in +:ref:`Documentation/admin-guide/hw-vuln/mds.rst `. + +Kernel internal mitigation modes +-------------------------------- + + ============= ============================================================ + off Mitigation is disabled. Either the CPU is not affected or + tsx_async_abort=off is supplied on the kernel command line. + + tsx disabled Mitigation is enabled. TSX feature is disabled by default at + bootup on processors that support TSX control. + + verw Mitigation is enabled. CPU is affected and MD_CLEAR is + advertised in CPUID. + + ucode needed Mitigation is enabled. CPU is affected and MD_CLEAR is not + advertised in CPUID. That is mainly for virtualization + scenarios where the host has the updated microcode but the + hypervisor does not expose MD_CLEAR in CPUID. It's a best + effort approach without guarantee. + ============= ============================================================ + +If the CPU is affected and the "tsx_async_abort" kernel command line parameter is +not provided then the kernel selects an appropriate mitigation depending on the +status of RTM and MD_CLEAR CPUID bits. + +Below tables indicate the impact of tsx=on|off|auto cmdline options on state of +TAA mitigation, VERW behavior and TSX feature for various combinations of +MSR_IA32_ARCH_CAPABILITIES bits. + +1. "tsx=off" + +========= ========= ============ ============ ============== =================== ====================== +MSR_IA32_ARCH_CAPABILITIES bits Result with cmdline tsx=off +---------------------------------- ------------------------------------------------------------------------- +TAA_NO MDS_NO TSX_CTRL_MSR TSX state VERW can clear TAA mitigation TAA mitigation + after bootup CPU buffers tsx_async_abort=off tsx_async_abort=full +========= ========= ============ ============ ============== =================== ====================== + 0 0 0 HW default Yes Same as MDS Same as MDS + 0 0 1 Invalid case Invalid case Invalid case Invalid case + 0 1 0 HW default No Need ucode update Need ucode update + 0 1 1 Disabled Yes TSX disabled TSX disabled + 1 X 1 Disabled X None needed None needed +========= ========= ============ ============ ============== =================== ====================== + +2. "tsx=on" + +========= ========= ============ ============ ============== =================== ====================== +MSR_IA32_ARCH_CAPABILITIES bits Result with cmdline tsx=on +---------------------------------- ------------------------------------------------------------------------- +TAA_NO MDS_NO TSX_CTRL_MSR TSX state VERW can clear TAA mitigation TAA mitigation + after bootup CPU buffers tsx_async_abort=off tsx_async_abort=full +========= ========= ============ ============ ============== =================== ====================== + 0 0 0 HW default Yes Same as MDS Same as MDS + 0 0 1 Invalid case Invalid case Invalid case Invalid case + 0 1 0 HW default No Need ucode update Need ucode update + 0 1 1 Enabled Yes None Same as MDS + 1 X 1 Enabled X None needed None needed +========= ========= ============ ============ ============== =================== ====================== + +3. "tsx=auto" + +========= ========= ============ ============ ============== =================== ====================== +MSR_IA32_ARCH_CAPABILITIES bits Result with cmdline tsx=auto +---------------------------------- ------------------------------------------------------------------------- +TAA_NO MDS_NO TSX_CTRL_MSR TSX state VERW can clear TAA mitigation TAA mitigation + after bootup CPU buffers tsx_async_abort=off tsx_async_abort=full +========= ========= ============ ============ ============== =================== ====================== + 0 0 0 HW default Yes Same as MDS Same as MDS + 0 0 1 Invalid case Invalid case Invalid case Invalid case + 0 1 0 HW default No Need ucode update Need ucode update + 0 1 1 Disabled Yes TSX disabled TSX disabled + 1 X 1 Enabled X None needed None needed +========= ========= ============ ============ ============== =================== ====================== + +In the tables, TSX_CTRL_MSR is a new bit in MSR_IA32_ARCH_CAPABILITIES that +indicates whether MSR_IA32_TSX_CTRL is supported. + +There are two control bits in IA32_TSX_CTRL MSR: + + Bit 0: When set it disables the Restricted Transactional Memory (RTM) + sub-feature of TSX (will force all transactions to abort on the + XBEGIN instruction). + + Bit 1: When set it disables the enumeration of the RTM and HLE feature + (i.e. it will make CPUID(EAX=7).EBX{bit4} and + CPUID(EAX=7).EBX{bit11} read as 0). diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt index 5432a96d31ffd9938a58ab43e7b34fecd2ab35e7..05ef53d83a41ef69ba6db68bbf64afac44a13308 100644 --- a/Documentation/x86/x86_64/mm.txt +++ b/Documentation/x86/x86_64/mm.txt @@ -4,8 +4,9 @@ Virtual memory map with 4 level page tables: 0000000000000000 - 00007fffffffffff (=47 bits) user space, different per mm hole caused by [47:63] sign extension ffff800000000000 - ffff87ffffffffff (=43 bits) guard hole, reserved for hypervisor -ffff880000000000 - ffffc7ffffffffff (=64 TB) direct mapping of all phys. memory -ffffc80000000000 - ffffc8ffffffffff (=40 bits) hole +ffff880000000000 - ffff887fffffffff (=39 bits) LDT remap for PTI +ffff888000000000 - ffffc87fffffffff (=64 TB) direct mapping of all phys. memory +ffffc88000000000 - ffffc8ffffffffff (=39 bits) hole ffffc90000000000 - ffffe8ffffffffff (=45 bits) vmalloc/ioremap space ffffe90000000000 - ffffe9ffffffffff (=40 bits) hole ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB) @@ -30,8 +31,9 @@ Virtual memory map with 5 level page tables: 0000000000000000 - 00ffffffffffffff (=56 bits) user space, different per mm hole caused by [56:63] sign extension ff00000000000000 - ff0fffffffffffff (=52 bits) guard hole, reserved for hypervisor -ff10000000000000 - ff8fffffffffffff (=55 bits) direct mapping of all phys. memory -ff90000000000000 - ff9fffffffffffff (=52 bits) LDT remap for PTI +ff10000000000000 - ff10ffffffffffff (=48 bits) LDT remap for PTI +ff11000000000000 - ff90ffffffffffff (=55 bits) direct mapping of all phys. memory +ff91000000000000 - ff9fffffffffffff (=3840 TB) hole ffa0000000000000 - ffd1ffffffffffff (=54 bits) vmalloc/ioremap space (12800 TB) ffd2000000000000 - ffd3ffffffffffff (=49 bits) hole ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB) diff --git a/Kconfig b/Kconfig index 48a80beab6853d1461f277229625aa066d75a4e7..e484b19dc55560f302dcff365ce0e94ca946cdad 100644 --- a/Kconfig +++ b/Kconfig @@ -5,8 +5,6 @@ # mainmenu "Linux/$(ARCH) $(KERNELVERSION) Kernel Configuration" -comment "Compiler: $(CC_VERSION_TEXT)" - source "scripts/Kconfig.include" source "init/Kconfig" diff --git a/MAINTAINERS b/MAINTAINERS index b2f710eee67a7cb340969c6cc91fab7f1940e5d7..986bb37e8cbc48078fcab6cf6128a5f49d54c037 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1160,10 +1160,8 @@ M: Will Deacon R: Robin Murphy L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained -F: drivers/iommu/arm-smmu.c -F: drivers/iommu/arm-smmu-v3.c -F: drivers/iommu/io-pgtable-arm.c -F: drivers/iommu/io-pgtable-arm-v7s.c +F: drivers/iommu/arm-smmu* +F: drivers/iommu/io-pgtable-arm* ARM SUB-ARCHITECTURES L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@ -3315,6 +3313,16 @@ F: include/uapi/linux/can/bcm.h F: include/uapi/linux/can/raw.h F: include/uapi/linux/can/gw.h +CAN-J1939 NETWORK LAYER +M: Robin van der Gracht +M: Oleksij Rempel +R: Pengutronix Kernel Team +L: linux-can@vger.kernel.org +S: Maintained +F: Documentation/networking/j1939.txt +F: net/can/j1939/ +F: include/uapi/linux/can/j1939.h + CAPABILITIES M: Serge Hallyn L: linux-security-module@vger.kernel.org @@ -4143,13 +4151,6 @@ F: include/uapi/linux/dccp.h F: include/linux/tfrc.h F: net/dccp/ -DECnet NETWORK LAYER -W: http://linux-decnet.sourceforge.net -L: linux-decnet-user@lists.sourceforge.net -S: Orphan -F: Documentation/networking/decnet.txt -F: net/decnet/ - DECSTATION PLATFORM SUPPORT M: "Maciej W. Rozycki" L: linux-mips@linux-mips.org @@ -4558,14 +4559,6 @@ L: linux-kernel@vger.kernel.org S: Maintained F: drivers/staging/fsl-dpaa2/rtc -DPT_I2O SCSI RAID DRIVER -M: Adaptec OEM Raid Solutions -L: linux-scsi@vger.kernel.org -W: http://www.adaptec.com/ -S: Maintained -F: drivers/scsi/dpt* -F: drivers/scsi/dpt/ - DRBD DRIVER M: Philipp Reisner M: Lars Ellenberg @@ -5271,6 +5264,12 @@ L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/ghes_edac.c +EDAC-I10NM +M: Tony Luck +L: linux-edac@vger.kernel.org +S: Maintained +F: drivers/edac/i10nm_base.c + EDAC-I3000 L: linux-edac@vger.kernel.org S: Orphan @@ -5352,7 +5351,7 @@ EDAC-SKYLAKE M: Tony Luck L: linux-edac@vger.kernel.org S: Maintained -F: drivers/edac/skx_edac.c +F: drivers/edac/skx_*.c EDAC-TI M: Tero Kristo @@ -6628,6 +6627,21 @@ W: http://www.hisilicon.com S: Maintained F: drivers/net/ethernet/hisilicon/hns3/ +HISILICON NETWORK SUBSYSTEM 3 CAE DRIVER (HNS3_CAE) +M: Weiwei Deng +M: Zhaohui Zhong +S: Maintained +F: drivers/net/ethernet/hisilicon/hns3/hns-customer/ +F: drivers/net/ethernet/hisilicon/hns3/hns3_cae/ +F: drivers/net/ethernet/hisilicon/hns3/Makefile + +HISILICON I2C CONTROLLER DRIVER +M: Yicong Yang +L: linux-i2c@vger.kernel.org +S: Maintained +W: https://www.hisilicon.com +F: drivers/i2c/busses/i2c-hisi.c + HISILICON LPC BUS DRIVER M: john.garry@huawei.com W: http://www.hisilicon.com @@ -6659,6 +6673,20 @@ S: Maintained F: drivers/infiniband/hw/hns/ F: Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt +HISILICON ROCE DFX DRIVER +M: Huang Dongdong +S: Maintained +F: drivers/infiniband/hw/hns/Makefile +F: drivers/infiniband/hw/hns/roce-customer/ + +HISILICON WARPDRIVE USER +M: Zaibo Xu +M: Zhou Wang +M: Hao Fang +L: linuxarm@huawei.com +S: Maintained +F: samples/warpdrive/ + HISILICON SAS Controller M: John Garry W: http://www.hisilicon.com @@ -6666,6 +6694,12 @@ S: Supported F: drivers/scsi/hisi_sas/ F: Documentation/devicetree/bindings/scsi/hisilicon-sas.txt +HISI_RAID SCSI RAID DRIVERS +M: Zhang Lei +S: Maintained +F: Documentation/scsi/hisi_raid.rst +F: drivers/scsi/hisi_raid/ + HMM - Heterogeneous Memory Management M: Jérôme Glisse L: linux-mm@kvack.org @@ -6776,6 +6810,12 @@ S: Maintained F: mm/memory-failure.c F: mm/hwpoison-inject.c +HYGON PROCESSOR SUPPORT +M: Pu Wen +L: linux-kernel@vger.kernel.org +S: Maintained +F: arch/x86/kernel/cpu/hygon.c + Hyper-V CORE AND DRIVERS M: "K. Y. Srinivasan" M: Haiyang Zhang @@ -7320,6 +7360,12 @@ L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Supported F: sound/soc/intel/ +INTEL ATOMISP2 DUMMY / POWER-MANAGEMENT DRIVER +M: Hans de Goede +L: platform-driver-x86@vger.kernel.org +S: Maintained +F: drivers/platform/x86/intel_atomisp2_pm.c + INTEL C600 SERIES SAS CONTROLLER DRIVER M: Intel SCU Linux support M: Artur Paszkiewicz @@ -7534,14 +7580,6 @@ S: Supported F: drivers/infiniband/hw/i40iw/ F: include/uapi/rdma/i40iw-abi.h -INTEL SHA MULTIBUFFER DRIVER -M: Megha Dey -R: Tim Chen -L: linux-crypto@vger.kernel.org -S: Supported -F: arch/x86/crypto/sha*-mb/ -F: crypto/mcryptd.c - INTEL TELEMETRY DRIVER M: Souvik Kumar Chakravarty L: platform-driver-x86@vger.kernel.org @@ -7592,6 +7630,7 @@ M: Alexander Shishkin S: Supported F: Documentation/trace/intel_th.rst F: drivers/hwtracing/intel_th/ +F: include/linux/intel_th.h INTEL(R) TRUSTED EXECUTION TECHNOLOGY (TXT) M: Ning Sun @@ -9712,6 +9751,19 @@ S: Maintained F: arch/arm/boot/dts/mmp* F: arch/arm/mach-mmp/ +MMU GATHER AND TLB INVALIDATION +M: Will Deacon +M: "Aneesh Kumar K.V" +M: Andrew Morton +M: Nick Piggin +M: Peter Zijlstra +L: linux-arch@vger.kernel.org +L: linux-mm@kvack.org +S: Maintained +F: arch/*/include/asm/tlb.h +F: include/asm-generic/tlb.h +F: mm/mmu_gather.c + MN88472 MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org @@ -10657,18 +10709,6 @@ S: Maintained F: Documentation/filesystems/omfs.txt F: fs/omfs/ -OMNIKEY CARDMAN 4000 DRIVER -M: Harald Welte -S: Maintained -F: drivers/char/pcmcia/cm4000_cs.c -F: include/linux/cm4000_cs.h -F: include/uapi/linux/cm4000_cs.h - -OMNIKEY CARDMAN 4040 DRIVER -M: Harald Welte -S: Maintained -F: drivers/char/pcmcia/cm4040_cs.* - OMNIVISION OV13858 SENSOR DRIVER M: Sakari Ailus L: linux-media@vger.kernel.org @@ -11316,6 +11356,11 @@ S: Maintained F: Documentation/devicetree/bindings/pci/hisilicon-histb-pcie.txt F: drivers/pci/controller/dwc/pcie-histb.c +PCIE DFX DRIVER FOR HISILICON +M: Liu Yanshi +S: Maintained +F: drivers/pci/controller/hisi-pcie-customer/ + PCIE DRIVER FOR MEDIATEK M: Ryder Lee L: linux-pci@vger.kernel.org @@ -11629,6 +11674,7 @@ F: drivers/base/power/ F: include/linux/pm.h F: include/linux/pm_* F: include/linux/powercap.h +F: include/linux/intel_rapl.h F: drivers/powercap/ F: kernel/configs/nopm.config @@ -12895,11 +12941,6 @@ F: include/linux/sched.h F: include/uapi/linux/sched.h F: include/linux/wait.h -SCR24X CHIP CARD INTERFACE DRIVER -M: Lubomir Rintel -S: Supported -F: drivers/char/pcmcia/scr24x_cs.c - SCSI CDROM DRIVER M: Jens Axboe L: linux-scsi@vger.kernel.org @@ -13761,6 +13802,19 @@ M: Jan-Benedict Glaw S: Maintained F: arch/alpha/kernel/srm_env.c +SSSNIC Ethernet Controller DRIVERS +M: Steven Song +S: Maintained +F: Documentation/networking/sssnic.rst +F: drivers/net/ethernet/3snic/sssnic + +SSSRAID SCSI/Raid DRIVERS +M: Steven Song +L: linux-scsi@vger.kernel.org +S: Maintained +F: Documentation/scsi/sssraid.rst +F: drivers/scsi/sssraid/ + ST STM32 I2C/SMBUS DRIVER M: Pierre-Yves MORDRET L: linux-i2c@vger.kernel.org @@ -13769,6 +13823,7 @@ F: drivers/i2c/busses/i2c-stm32* STABLE BRANCH M: Greg Kroah-Hartman +M: Sasha Levin L: stable@vger.kernel.org S: Supported F: Documentation/process/stable-kernel-rules.rst @@ -15110,13 +15165,6 @@ W: http://www.linux-usb.org/usbnet S: Maintained F: drivers/net/usb/dm9601.c -USB DIAMOND RIO500 DRIVER -M: Cesar Miquel -L: rio500-users@lists.sourceforge.net -W: http://rio500.sourceforge.net -S: Maintained -F: drivers/usb/misc/rio500* - USB EHCI DRIVER M: Alan Stern L: linux-usb@vger.kernel.org @@ -15953,7 +16001,7 @@ M: Tony Luck M: Borislav Petkov L: linux-edac@vger.kernel.org S: Maintained -F: arch/x86/kernel/cpu/mcheck/* +F: arch/x86/kernel/cpu/mce/* X86 MICROCODE UPDATE SUPPORT M: Borislav Petkov @@ -16205,6 +16253,12 @@ Q: https://patchwork.linuxtv.org/project/linux-media/list/ S: Maintained F: drivers/media/dvb-frontends/zd1301_demod* +ZHAOXIN PROCESSOR SUPPORT +M: Tony W Wang-oc +L: linux-kernel@vger.kernel.org +S: Maintained +F: arch/x86/kernel/cpu/zhaoxin.c + ZPOOL COMPRESSED PAGE STORAGE API M: Dan Streetman L: linux-mm@kvack.org @@ -16259,3 +16313,14 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git S: Buried alive in reporters F: * F: */ + +HISILICON IO_MGMT SUBSYSTEM RDE DRIVER +M: hucheng Hu(Cheng) +S: Maintained +F: drivers/crypto/hisilicon/rde + +HISILICON TRUE RANDOM NUMBER GENERATOR (TRNG) DRIVER +M: Weili Qian +M: Zaibo Xu +S: Maintained +F: drivers/char/hw_random/hisi-trng-v2.c diff --git a/Makefile b/Makefile index 69fa5c0310d834f6cb7f58a25cb5b47f926b665e..2e92f2e50a7b7c246e007bc87f4c4ae984713282 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 19 -SUBLEVEL = 0 +SUBLEVEL = 90 EXTRAVERSION = NAME = "People's Front" @@ -430,6 +430,7 @@ KBUILD_CFLAGS_MODULE := -DMODULE KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds KBUILD_LDFLAGS := GCC_PLUGINS_CFLAGS := +CLANG_FLAGS := export ARCH SRCARCH CONFIG_SHELL HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC export CPP AR NM STRIP OBJCOPY OBJDUMP KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS @@ -482,18 +483,19 @@ endif ifeq ($(cc-name),clang) ifneq ($(CROSS_COMPILE),) -CLANG_TARGET := --target=$(notdir $(CROSS_COMPILE:%-=%)) -GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD))) -CLANG_PREFIX := --prefix=$(GCC_TOOLCHAIN_DIR) +CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%)) +GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit)) +CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR) GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..) endif ifneq ($(GCC_TOOLCHAIN),) -CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN) +CLANG_FLAGS += --gcc-toolchain=$(GCC_TOOLCHAIN) endif -KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) $(CLANG_PREFIX) -KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) $(CLANG_PREFIX) -KBUILD_CFLAGS += $(call cc-option, -no-integrated-as) -KBUILD_AFLAGS += $(call cc-option, -no-integrated-as) +CLANG_FLAGS += -no-integrated-as +CLANG_FLAGS += -Werror=unknown-warning-option +KBUILD_CFLAGS += $(CLANG_FLAGS) +KBUILD_AFLAGS += $(CLANG_FLAGS) +export CLANG_FLAGS endif RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register @@ -508,13 +510,6 @@ export RETPOLINE_VDSO_CFLAGS KBUILD_CFLAGS += $(call cc-option,-fno-PIE) KBUILD_AFLAGS += $(call cc-option,-fno-PIE) -# check for 'asm goto' -ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y) - CC_HAVE_ASM_GOTO := 1 - KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO - KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO -endif - # The expansion should be delayed until arch/$(SRCARCH)/Makefile is included. # Some architectures define CROSS_COMPILE in arch/$(SRCARCH)/Makefile. # CC_VERSION_TEXT is referenced from Kconfig (so it needs export), @@ -601,7 +596,12 @@ endif # Defaults to vmlinux, but the arch makefile usually adds further targets all: vmlinux -CFLAGS_GCOV := -fprofile-arcs -ftest-coverage \ +ifeq ($(CONFIG_PGO_KERNEL),y) +CFLAGS_GCOV := -fprofile-generate +else +CFLAGS_GCOV := -fprofile-arcs +endif +CFLAGS_GCOV += -ftest-coverage \ $(call cc-option,-fno-tree-loop-im) \ $(call cc-disable-warning,maybe-uninitialized,) export CFLAGS_GCOV @@ -623,15 +623,18 @@ ifeq ($(may-sync-config),1) # Read in dependencies to all Kconfig* files, make sure to run syncconfig if # changes are detected. This should be included after arch/$(SRCARCH)/Makefile # because some architectures define CROSS_COMPILE there. --include include/config/auto.conf.cmd +include include/config/auto.conf.cmd # To avoid any implicit rule to kick in, define an empty command -$(KCONFIG_CONFIG) include/config/auto.conf.cmd: ; +$(KCONFIG_CONFIG): ; # The actual configuration files used during the build are stored in # include/generated/ and include/config/. Update them if .config is newer than # include/config/auto.conf (which mirrors .config). -include/config/%.conf: $(KCONFIG_CONFIG) include/config/auto.conf.cmd +# +# This exploits the 'multi-target pattern rule' trick. +# The syncconfig should be executed only once to make all the targets. +%/auto.conf %/auto.conf.cmd %/tristate.conf: $(KCONFIG_CONFIG) $(Q)$(MAKE) -f $(srctree)/Makefile syncconfig else # External modules and some install targets need include/generated/autoconf.h @@ -656,10 +659,10 @@ KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,) KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation) KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow) KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context) +KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE -KBUILD_CFLAGS += $(call cc-option,-Oz,-Os) -KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,) +KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,) else ifdef CONFIG_PROFILE_ALL_BRANCHES KBUILD_CFLAGS += -O2 $(call cc-disable-warning,maybe-uninitialized,) @@ -701,7 +704,6 @@ ifeq ($(cc-name),clang) KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,) KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier) KBUILD_CFLAGS += $(call cc-disable-warning, gnu) -KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) # Quiet clang warning: comparison of unsigned expression < 0 is always false KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare) # CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the @@ -838,6 +840,12 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init) # change __FILE__ to the relative path from the srctree KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=) +# ensure -fcf-protection is disabled when using retpoline as it is +# incompatible with -mindirect-branch=thunk-extern +ifdef CONFIG_RETPOLINE +KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none) +endif + # use the deterministic mode of AR if available KBUILD_ARFLAGS := $(call ar-option,D) @@ -948,17 +956,14 @@ mod_sign_cmd = true endif export mod_sign_cmd +HOST_LIBELF_LIBS = $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf) + ifdef CONFIG_STACK_VALIDATION has_libelf := $(call try-run,\ - echo "int main() {}" | $(HOSTCC) -xc -o /dev/null -lelf -,1,0) + echo "int main() {}" | $(HOSTCC) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0) ifeq ($(has_libelf),1) objtool_target := tools/objtool FORCE else - ifdef CONFIG_UNWINDER_ORC - $(error "Cannot generate ORC metadata for CONFIG_UNWINDER_ORC=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel") - else - $(warning "Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel") - endif SKIP_STACK_VALIDATION := 1 export SKIP_STACK_VALIDATION endif @@ -1115,6 +1120,14 @@ uapi-asm-generic: PHONY += prepare-objtool prepare-objtool: $(objtool_target) +ifeq ($(SKIP_STACK_VALIDATION),1) +ifdef CONFIG_UNWINDER_ORC + @echo "error: Cannot generate ORC metadata for CONFIG_UNWINDER_ORC=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2 + @false +else + @echo "warning: Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2 +endif +endif # Generate some files # --------------------------------------------------------------------------- @@ -1255,6 +1268,7 @@ _modinst_: fi @cp -f $(objtree)/modules.order $(MODLIB)/ @cp -f $(objtree)/modules.builtin $(MODLIB)/ + @cp -f $(objtree)/modules.builtin.modinfo $(MODLIB)/ $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modinst # This depmod is only for convenience to give the initial @@ -1295,6 +1309,7 @@ endif # CONFIG_MODULES # Directories & files removed with 'make clean' CLEAN_DIRS += $(MODVERDIR) include/ksym +CLEAN_FILES += modules.builtin.modinfo # Directories & files removed with 'make mrproper' MRPROPER_DIRS += include/config usr/include include/generated \ @@ -1499,9 +1514,6 @@ else # KBUILD_EXTMOD # We are always building modules KBUILD_MODULES := 1 -PHONY += crmodverdir -crmodverdir: - $(cmd_crmodverdir) PHONY += $(objtree)/Module.symvers $(objtree)/Module.symvers: @@ -1513,7 +1525,7 @@ $(objtree)/Module.symvers: module-dirs := $(addprefix _module_,$(KBUILD_EXTMOD)) PHONY += $(module-dirs) modules -$(module-dirs): crmodverdir $(objtree)/Module.symvers +$(module-dirs): prepare $(objtree)/Module.symvers $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@) modules: $(module-dirs) @@ -1554,7 +1566,8 @@ help: # Dummies... PHONY += prepare scripts -prepare: ; +prepare: + $(cmd_crmodverdir) scripts: ; endif # KBUILD_EXTMOD @@ -1681,17 +1694,14 @@ endif # Modules /: prepare scripts FORCE - $(cmd_crmodverdir) $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ $(build)=$(build-dir) # Make sure the latest headers are built for Documentation Documentation/ samples/: headers_install %/: prepare scripts FORCE - $(cmd_crmodverdir) $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ $(build)=$(build-dir) %.ko: prepare scripts FORCE - $(cmd_crmodverdir) $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ $(build)=$(build-dir) $(@:.ko=.o) $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost diff --git a/arch/Kconfig b/arch/Kconfig index 6801123932a503ba64bcf1c9dfbb7877fff0f094..00f55932ba781313a9b89496ccd9af407e36eac3 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -18,6 +18,15 @@ config KEXEC_CORE select CRASH_CORE bool +config QUICK_KEXEC + bool "Support for quick kexec" + depends on KEXEC_CORE + help + It uses pre-reserved memory to accelerate kexec, just like + crash kexec, loads new kernel and initrd to reserved memory, + and boots new kernel on that memory. It will save the time + of relocating kernel. + config HAVE_IMA_KEXEC bool @@ -71,6 +80,7 @@ config KPROBES config JUMP_LABEL bool "Optimize very unlikely/likely branches" depends on HAVE_ARCH_JUMP_LABEL + depends on CC_HAS_ASM_GOTO help This option enables a transparent branch optimization that makes certain almost-always-true or almost-always-false branch @@ -275,6 +285,21 @@ config ARCH_THREAD_STACK_ALLOCATOR config ARCH_WANTS_DYNAMIC_TASK_STRUCT bool +config ARCH_32BIT_OFF_T + bool + depends on !64BIT + help + All new 32-bit architectures should have 64-bit off_t type on + userspace side which corresponds to the loff_t kernel type. This + is the requirement for modern ABIs. Some existing architectures + already have 32-bit off_t. This option is enabled for all such + architectures explicitly. Namely: arc, arm, blackfin, cris, frv, + h8300, hexagon, m32r, m68k, metag, microblaze, mips32, mn10300, + nios2, openrisc, parisc32, powerpc32, score, sh, sparc, tile32, + unicore32, x86_32 and xtensa. This is the complete list. Any + new 32-bit architecture should declare 64-bit off_t type on user + side and so should not enable this option. + config HAVE_REGS_AND_STACK_ACCESS_API bool help @@ -365,6 +390,13 @@ config HAVE_RCU_TABLE_FREE config HAVE_RCU_TABLE_INVALIDATE bool +config ARCH_WANT_IRQS_OFF_ACTIVATE_MM + bool + help + Temporary select until all architectures can be converted to have + irqs disabled over activate_mm. Architectures that do IPI based TLB + shootdowns should enable this. + config ARCH_HAVE_NMI_SAFE_CMPXCHG bool @@ -527,6 +559,10 @@ config HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD config HAVE_ARCH_HUGE_VMAP bool +config HAVE_ARCH_HUGE_VMALLOC + depends on HAVE_ARCH_HUGE_VMAP + bool + config HAVE_ARCH_SOFT_DIRTY bool @@ -862,6 +898,9 @@ config HAVE_ARCH_PREL32_RELOCATIONS architectures, and don't require runtime relocation on relocatable kernels. +config ARCH_USE_MEMREMAP_PROT + bool + source "kernel/gcov/Kconfig" source "scripts/gcc-plugins/Kconfig" diff --git a/arch/alpha/include/asm/futex.h b/arch/alpha/include/asm/futex.h index ca3322536f7247d706bd70d7b7f044f3c3807bea..bfd3c01038f83bac87462029595c7d502bb123d3 100644 --- a/arch/alpha/include/asm/futex.h +++ b/arch/alpha/include/asm/futex.h @@ -68,7 +68,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, int ret = 0, cmp; u32 prev; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + if (!access_ok(uaddr, sizeof(u32))) return -EFAULT; __asm__ __volatile__ ( diff --git a/arch/alpha/include/asm/irq.h b/arch/alpha/include/asm/irq.h index 4d17cacd146222c7c099eb1af0611eb68ec59c91..432402c8e47f5b8e1126fae0e2971d74d9d95f76 100644 --- a/arch/alpha/include/asm/irq.h +++ b/arch/alpha/include/asm/irq.h @@ -56,15 +56,15 @@ #elif defined(CONFIG_ALPHA_DP264) || \ defined(CONFIG_ALPHA_LYNX) || \ - defined(CONFIG_ALPHA_SHARK) || \ - defined(CONFIG_ALPHA_EIGER) + defined(CONFIG_ALPHA_SHARK) # define NR_IRQS 64 #elif defined(CONFIG_ALPHA_TITAN) #define NR_IRQS 80 #elif defined(CONFIG_ALPHA_RAWHIDE) || \ - defined(CONFIG_ALPHA_TAKARA) + defined(CONFIG_ALPHA_TAKARA) || \ + defined(CONFIG_ALPHA_EIGER) # define NR_IRQS 128 #elif defined(CONFIG_ALPHA_WILDFIRE) diff --git a/arch/alpha/include/asm/termios.h b/arch/alpha/include/asm/termios.h index 6a8c53dec57e6e3aa22a5be371b922ebb1bd154d..b7c77bb1bfd20368a8ff95a93d5493353e58023a 100644 --- a/arch/alpha/include/asm/termios.h +++ b/arch/alpha/include/asm/termios.h @@ -73,9 +73,15 @@ }) #define user_termios_to_kernel_termios(k, u) \ - copy_from_user(k, u, sizeof(struct termios)) + copy_from_user(k, u, sizeof(struct termios2)) #define kernel_termios_to_user_termios(u, k) \ + copy_to_user(u, k, sizeof(struct termios2)) + +#define user_termios_to_kernel_termios_1(k, u) \ + copy_from_user(k, u, sizeof(struct termios)) + +#define kernel_termios_to_user_termios_1(u, k) \ copy_to_user(u, k, sizeof(struct termios)) #endif /* _ALPHA_TERMIOS_H */ diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h index 87d8c4f0307d11539c15df2aa8dace09656a6560..cf4ac791a592fdf8dc595e9407182c85bdac4243 100644 --- a/arch/alpha/include/asm/uaccess.h +++ b/arch/alpha/include/asm/uaccess.h @@ -30,13 +30,15 @@ * Address valid if: * - "addr" doesn't have any high-bits set * - AND "size" doesn't have any high-bits set - * - AND "addr+size" doesn't have any high-bits set + * - AND "addr+size-(size != 0)" doesn't have any high-bits set * - OR we are in kernel mode. */ -#define __access_ok(addr, size) \ - ((get_fs().seg & (addr | size | (addr+size))) == 0) +#define __access_ok(addr, size) ({ \ + unsigned long __ao_a = (addr), __ao_b = (size); \ + unsigned long __ao_end = __ao_a + __ao_b - !!__ao_b; \ + (get_fs().seg & (__ao_a | __ao_b | __ao_end)) == 0; }) -#define access_ok(type, addr, size) \ +#define access_ok(addr, size) \ ({ \ __chk_user_ptr(addr); \ __access_ok(((unsigned long)(addr)), (size)); \ diff --git a/arch/alpha/include/asm/vmalloc.h b/arch/alpha/include/asm/vmalloc.h new file mode 100644 index 0000000000000000000000000000000000000000..0a9a366a4d344effdb41fabf28b2d8d5bb3aee9c --- /dev/null +++ b/arch/alpha/include/asm/vmalloc.h @@ -0,0 +1,4 @@ +#ifndef _ASM_ALPHA_VMALLOC_H +#define _ASM_ALPHA_VMALLOC_H + +#endif /* _ASM_ALPHA_VMALLOC_H */ diff --git a/arch/alpha/include/uapi/asm/ioctls.h b/arch/alpha/include/uapi/asm/ioctls.h index 3729d92d3fa854599a99ba57c56aa33b40b4e0c8..dc8c20ac7191f2457137a6b53220557caf3fb941 100644 --- a/arch/alpha/include/uapi/asm/ioctls.h +++ b/arch/alpha/include/uapi/asm/ioctls.h @@ -32,6 +32,11 @@ #define TCXONC _IO('t', 30) #define TCFLSH _IO('t', 31) +#define TCGETS2 _IOR('T', 42, struct termios2) +#define TCSETS2 _IOW('T', 43, struct termios2) +#define TCSETSW2 _IOW('T', 44, struct termios2) +#define TCSETSF2 _IOW('T', 45, struct termios2) + #define TIOCSWINSZ _IOW('t', 103, struct winsize) #define TIOCGWINSZ _IOR('t', 104, struct winsize) #define TIOCSTART _IO('t', 110) /* start output, like ^Q */ diff --git a/arch/alpha/include/uapi/asm/mman.h b/arch/alpha/include/uapi/asm/mman.h index f9d4e6b6d4bd1665d3fe491c98358312dc2c727f..977df8306905f7a4506c36e103573dd441b1ce25 100644 --- a/arch/alpha/include/uapi/asm/mman.h +++ b/arch/alpha/include/uapi/asm/mman.h @@ -33,6 +33,9 @@ #define MAP_STACK 0x80000 /* give out an address that is best suited for process/thread stacks */ #define MAP_HUGETLB 0x100000 /* create a huge page mapping */ #define MAP_FIXED_NOREPLACE 0x200000/* MAP_FIXED which doesn't unmap underlying mapping */ +#define MAP_PA32BIT 0x400000 /* physical address is within 4G */ +#define MAP_CHECKNODE 0x800000 /* hugetlb numa node check */ +#define MAP_ALIGN 0x2000000 /* create an aligned mapping */ #define MS_ASYNC 1 /* sync memory asynchronously */ #define MS_SYNC 2 /* synchronous memory sync */ diff --git a/arch/alpha/include/uapi/asm/termbits.h b/arch/alpha/include/uapi/asm/termbits.h index de6c8360fbe3657e3ddf7cd6bb648a3d8b0fdb71..4575ba34a0eaeecb9b17cb9f3b6b18a698bafdfb 100644 --- a/arch/alpha/include/uapi/asm/termbits.h +++ b/arch/alpha/include/uapi/asm/termbits.h @@ -26,6 +26,19 @@ struct termios { speed_t c_ospeed; /* output speed */ }; +/* Alpha has identical termios and termios2 */ + +struct termios2 { + tcflag_t c_iflag; /* input mode flags */ + tcflag_t c_oflag; /* output mode flags */ + tcflag_t c_cflag; /* control mode flags */ + tcflag_t c_lflag; /* local mode flags */ + cc_t c_cc[NCCS]; /* control characters */ + cc_t c_line; /* line discipline (== c_cc[19]) */ + speed_t c_ispeed; /* input speed */ + speed_t c_ospeed; /* output speed */ +}; + /* Alpha has matching termios and ktermios */ struct ktermios { @@ -152,6 +165,7 @@ struct ktermios { #define B3000000 00034 #define B3500000 00035 #define B4000000 00036 +#define BOTHER 00037 #define CSIZE 00001400 #define CS5 00000000 @@ -169,6 +183,9 @@ struct ktermios { #define CMSPAR 010000000000 /* mark or space (stick) parity */ #define CRTSCTS 020000000000 /* flow control */ +#define CIBAUD 07600000 +#define IBSHIFT 16 + /* c_lflag bits */ #define ISIG 0x00000080 #define ICANON 0x00000100 diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c index 8c0c4ee0be6edb62d90be4ccbffdb4210a09b89c..33e904a05881797481c1d95b68e56332040bf809 100644 --- a/arch/alpha/kernel/signal.c +++ b/arch/alpha/kernel/signal.c @@ -65,7 +65,7 @@ SYSCALL_DEFINE3(osf_sigaction, int, sig, if (act) { old_sigset_t mask; - if (!access_ok(VERIFY_READ, act, sizeof(*act)) || + if (!access_ok(act, sizeof(*act)) || __get_user(new_ka.sa.sa_handler, &act->sa_handler) || __get_user(new_ka.sa.sa_flags, &act->sa_flags) || __get_user(mask, &act->sa_mask)) @@ -77,7 +77,7 @@ SYSCALL_DEFINE3(osf_sigaction, int, sig, ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { - if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || + if (!access_ok(oact, sizeof(*oact)) || __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) @@ -207,7 +207,7 @@ do_sigreturn(struct sigcontext __user *sc) sigset_t set; /* Verify that it's a good sigcontext before using it */ - if (!access_ok(VERIFY_READ, sc, sizeof(*sc))) + if (!access_ok(sc, sizeof(*sc))) goto give_sigsegv; if (__get_user(set.sig[0], &sc->sc_mask)) goto give_sigsegv; @@ -235,7 +235,7 @@ do_rt_sigreturn(struct rt_sigframe __user *frame) sigset_t set; /* Verify that it's a good ucontext_t before using it */ - if (!access_ok(VERIFY_READ, &frame->uc, sizeof(frame->uc))) + if (!access_ok(&frame->uc, sizeof(frame->uc))) goto give_sigsegv; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto give_sigsegv; @@ -332,7 +332,7 @@ setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) oldsp = rdusp(); frame = get_sigframe(ksig, oldsp, sizeof(*frame)); - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) return -EFAULT; err |= setup_sigcontext(&frame->sc, regs, set->sig[0], oldsp); @@ -377,7 +377,7 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) oldsp = rdusp(); frame = get_sigframe(ksig, oldsp, sizeof(*frame)); - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) return -EFAULT; err |= copy_siginfo_to_user(&frame->info, &ksig->info); diff --git a/arch/alpha/kernel/srmcons.c b/arch/alpha/kernel/srmcons.c index 438b10c44d732355888e856668fc09c5a9685e85..2b7a314b84522f87f8696246cb20e8b76714afc8 100644 --- a/arch/alpha/kernel/srmcons.c +++ b/arch/alpha/kernel/srmcons.c @@ -59,7 +59,7 @@ srmcons_do_receive_chars(struct tty_port *port) } while((result.bits.status & 1) && (++loops < 10)); if (count) - tty_schedule_flip(port); + tty_flip_buffer_push(port); return count; } diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c index ddb9c2f376fa2965035311f48b77e049581ed982..e53f96e8aa6d2bbe60bed40d0d549f3b428cdbc5 100644 --- a/arch/alpha/lib/csum_partial_copy.c +++ b/arch/alpha/lib/csum_partial_copy.c @@ -333,7 +333,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst, int len, unsigned long doff = 7 & (unsigned long) dst; if (len) { - if (!access_ok(VERIFY_READ, src, len)) { + if (!access_ok(src, len)) { if (errp) *errp = -EFAULT; memset(dst, 0, len); return sum; diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index d73dc473fbb9432f61d6f1383224a6aff1f67249..0ea6b40223cc0f890e899f95a708ae0d28ff12f7 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -78,7 +78,7 @@ __load_new_mm_context(struct mm_struct *next_mm) /* Macro for exception fixup code to access integer registers. */ #define dpf_reg(r) \ (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \ - (r) <= 18 ? (r)+8 : (r)-10]) + (r) <= 18 ? (r)+10 : (r)-10]) asmlinkage void do_page_fault(unsigned long address, unsigned long mmcsr, @@ -169,7 +169,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr, else current->min_flt++; if (fault & VM_FAULT_RETRY) { - flags &= ~FAULT_FLAG_ALLOW_RETRY; + flags |= FAULT_FLAG_TRIED; /* No need to up_read(&mm->mmap_sem) as we would * have already released it in __lock_page_or_retry diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index a045f30860477bd60e84e50d215684da38edbc19..4c4e85a87a384109a50f788239591f8372cbbc0d 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -14,7 +14,8 @@ config ARC select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_HAS_SG_CHAIN select ARCH_SUPPORTS_ATOMIC_RMW if ARC_HAS_LLSC - select BUILDTIME_EXTABLE_SORT + select ARCH_32BIT_OFF_T + select BUILDTIME_TABLE_SORT select CLONE_BACKWARDS select COMMON_CLK select DMA_NONCOHERENT_OPS @@ -26,6 +27,7 @@ config ARC select GENERIC_IRQ_SHOW select GENERIC_PCI_IOMAP select GENERIC_PENDING_IRQ if SMP + select GENERIC_SCHED_CLOCK select GENERIC_SMP_IDLE_THREAD select HAVE_ARCH_KGDB select HAVE_ARCH_TRACEHOOK @@ -111,7 +113,7 @@ endmenu choice prompt "ARC Instruction Set" - default ISA_ARCOMPACT + default ISA_ARCV2 config ISA_ARCOMPACT bool "ARCompact ISA" @@ -198,7 +200,6 @@ config NR_CPUS config ARC_SMP_HALT_ON_RESET bool "Enable Halt-on-reset boot mode" - default y if ARC_UBOOT_SUPPORT help In SMP configuration cores can be configured as Halt-on-reset or they could all start at same time. For Halt-on-reset, non @@ -419,6 +420,14 @@ config ARC_HAS_ACCL_REGS (also referred to as r58:r59). These can also be used by gcc as GPR so kernel needs to save/restore per process +config ARC_IRQ_NO_AUTOSAVE + bool "Disable hardware autosave regfile on interrupts" + default n + help + On HS cores, taken interrupt auto saves the regfile on stack. + This is programmable and can be optionally disabled in which case + software INTERRUPT_PROLOGUE/EPILGUE do the needed work + endif # ISA_ARCV2 endmenu # "ARC CPU Configuration" @@ -530,18 +539,6 @@ config ARC_DBG_TLB_PARANOIA endif -config ARC_UBOOT_SUPPORT - bool "Support uboot arg Handling" - default n - help - ARC Linux by default checks for uboot provided args as pointers to - external cmdline or DTB. This however breaks in absence of uboot, - when booting from Metaware debugger directly, as the registers are - not zeroed out on reset by mdb and/or ARCv2 based cores. The bogus - registers look like uboot args to kernel which then chokes. - So only enable the uboot arg checking/processing if users are sure - of uboot being in play. - config ARC_BUILTIN_DTB_NAME string "Built in DTB" help diff --git a/arch/arc/Makefile b/arch/arc/Makefile index 644815c0516e75d2ed850f08d9dffd2c65f131c2..16e6cc22e25ccf6adafba6620f3f222d2d6e78d2 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile @@ -6,7 +6,7 @@ # published by the Free Software Foundation. # -KBUILD_DEFCONFIG := nsim_700_defconfig +KBUILD_DEFCONFIG := nsim_hs_defconfig cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__ cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7 diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts index ef149f59929ae394a30695fa0940060acef15817..d131c54acd3ec07e43499d1cc46522281b6edbf2 100644 --- a/arch/arc/boot/dts/hsdk.dts +++ b/arch/arc/boot/dts/hsdk.dts @@ -175,6 +175,7 @@ interrupt-names = "macirq"; phy-mode = "rgmii"; snps,pbl = <32>; + snps,multicast-filter-bins = <256>; clocks = <&gmacclk>; clock-names = "stmmaceth"; phy-handle = <&phy0>; @@ -183,6 +184,9 @@ mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */ dma-coherent; + tx-fifo-depth = <4096>; + rx-fifo-depth = <4096>; + mdio { #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig index 41bc08be6a3b4202bbe27f74fdc8e01a56e4c3cd..020d4493edfd0530423659a4f401258ecf7bc0be 100644 --- a/arch/arc/configs/axs101_defconfig +++ b/arch/arc/configs/axs101_defconfig @@ -14,6 +14,7 @@ CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_SLUB_DEBUG is not set # CONFIG_COMPAT_BRK is not set +CONFIG_ISA_ARCOMPACT=y CONFIG_MODULES=y CONFIG_MODULE_FORCE_LOAD=y CONFIG_MODULE_UNLOAD=y @@ -95,6 +96,7 @@ CONFIG_VFAT_FS=y CONFIG_NTFS_FS=y CONFIG_TMPFS=y CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y # CONFIG_ENABLE_WARN_DEPRECATED is not set diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig index 1e1c4a8011b523dc88b89fb39e90dfeab5a3154b..666314fffc601be8c455664152f9111814cdd446 100644 --- a/arch/arc/configs/axs103_defconfig +++ b/arch/arc/configs/axs103_defconfig @@ -94,6 +94,7 @@ CONFIG_VFAT_FS=y CONFIG_NTFS_FS=y CONFIG_TMPFS=y CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y # CONFIG_ENABLE_WARN_DEPRECATED is not set diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig index 6b0c0cfd5c304fd6ae58fc3fd92d9cb53e086d2d..429832b8560b878b65be199f69eb740a6b362054 100644 --- a/arch/arc/configs/axs103_smp_defconfig +++ b/arch/arc/configs/axs103_smp_defconfig @@ -97,6 +97,7 @@ CONFIG_VFAT_FS=y CONFIG_NTFS_FS=y CONFIG_TMPFS=y CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y # CONFIG_ENABLE_WARN_DEPRECATED is not set diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig index 1dec2b4bc5e6ea70696249d6815dfe69e73eb21c..651fa7978e510f1f0608870fae414d2e123deb59 100644 --- a/arch/arc/configs/hsdk_defconfig +++ b/arch/arc/configs/hsdk_defconfig @@ -8,6 +8,7 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_BLK_DEV_RAM=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set @@ -65,6 +66,7 @@ CONFIG_EXT3_FS=y CONFIG_VFAT_FS=y CONFIG_TMPFS=y CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y # CONFIG_ENABLE_WARN_DEPRECATED is not set diff --git a/arch/arc/configs/nps_defconfig b/arch/arc/configs/nps_defconfig index 31ba224bbfb474985b49930dea193c6bbb1a5f37..621f59407d7693057f642d64cfe31dbdb7cd7d9d 100644 --- a/arch/arc/configs/nps_defconfig +++ b/arch/arc/configs/nps_defconfig @@ -15,6 +15,7 @@ CONFIG_SYSCTL_SYSCALL=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_COMPAT_BRK is not set +CONFIG_ISA_ARCOMPACT=y CONFIG_KPROBES=y CONFIG_MODULES=y CONFIG_MODULE_FORCE_LOAD=y @@ -30,7 +31,6 @@ CONFIG_ARC_CACHE_LINE_SHIFT=5 # CONFIG_ARC_HAS_LLSC is not set CONFIG_ARC_KVADDR_SIZE=402 CONFIG_ARC_EMUL_UNALIGNED=y -CONFIG_ARC_UBOOT_SUPPORT=y CONFIG_PREEMPT=y CONFIG_NET=y CONFIG_UNIX=y @@ -73,6 +73,7 @@ CONFIG_PROC_KCORE=y CONFIG_TMPFS=y # CONFIG_MISC_FILESYSTEMS is not set CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y CONFIG_ROOT_NFS=y CONFIG_DEBUG_INFO=y # CONFIG_ENABLE_WARN_DEPRECATED is not set diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig index 8e0b8b134cd9ed89652b88aea3bade03881e95c9..219c2a65294b82176400c9833e3606cd79f87a1c 100644 --- a/arch/arc/configs/nsim_700_defconfig +++ b/arch/arc/configs/nsim_700_defconfig @@ -15,6 +15,7 @@ CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_SLUB_DEBUG is not set # CONFIG_COMPAT_BRK is not set +CONFIG_ISA_ARCOMPACT=y CONFIG_KPROBES=y CONFIG_MODULES=y # CONFIG_LBDAF is not set diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig index f14eeff7d3084948c16d8905677ec25a629ccdcc..35dfc6491a09486ef0176b8ced1c080efa870ec0 100644 --- a/arch/arc/configs/nsimosci_defconfig +++ b/arch/arc/configs/nsimosci_defconfig @@ -15,6 +15,7 @@ CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_SLUB_DEBUG is not set # CONFIG_COMPAT_BRK is not set +CONFIG_ISA_ARCOMPACT=y CONFIG_KPROBES=y CONFIG_MODULES=y # CONFIG_LBDAF is not set @@ -66,5 +67,6 @@ CONFIG_EXT2_FS_XATTR=y CONFIG_TMPFS=y # CONFIG_MISC_FILESYSTEMS is not set CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y # CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig index 025298a483056b1ca782e83056f8b0a44d193809..1638e5bc967246686735bd6629ce9d7087caffd4 100644 --- a/arch/arc/configs/nsimosci_hs_defconfig +++ b/arch/arc/configs/nsimosci_hs_defconfig @@ -65,5 +65,6 @@ CONFIG_EXT2_FS_XATTR=y CONFIG_TMPFS=y # CONFIG_MISC_FILESYSTEMS is not set CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y # CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig index df7b77b13b823dc0c8d41f543181b12a20212cbd..11cfbdb0f441567ee93d6283e9c8265454c818cf 100644 --- a/arch/arc/configs/nsimosci_hs_smp_defconfig +++ b/arch/arc/configs/nsimosci_hs_smp_defconfig @@ -76,6 +76,7 @@ CONFIG_EXT2_FS_XATTR=y CONFIG_TMPFS=y # CONFIG_MISC_FILESYSTEMS is not set CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y # CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_FTRACE=y diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig index a7f65313f84a56a3ddc0307c669bbfbcf4c0386f..e71ade3cf9c809398a8c51bffd5bdff3f39c465a 100644 --- a/arch/arc/configs/tb10x_defconfig +++ b/arch/arc/configs/tb10x_defconfig @@ -19,6 +19,7 @@ CONFIG_KALLSYMS_ALL=y # CONFIG_AIO is not set CONFIG_EMBEDDED=y # CONFIG_COMPAT_BRK is not set +CONFIG_ISA_ARCOMPACT=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_FORCE_LOAD=y diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig index db47c3541f15931b2927fd1bd27749f2568e9761..e447ace6fa1cab14f00f6ebadb7b15d2812c616a 100644 --- a/arch/arc/configs/vdk_hs38_defconfig +++ b/arch/arc/configs/vdk_hs38_defconfig @@ -13,7 +13,6 @@ CONFIG_PARTITION_ADVANCED=y CONFIG_ARC_PLAT_AXS10X=y CONFIG_AXS103=y CONFIG_ISA_ARCV2=y -CONFIG_ARC_UBOOT_SUPPORT=y CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38" CONFIG_PREEMPT=y CONFIG_NET=y @@ -85,6 +84,7 @@ CONFIG_NTFS_FS=y CONFIG_TMPFS=y CONFIG_JFFS2_FS=y CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y # CONFIG_ENABLE_WARN_DEPRECATED is not set diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig index a8ac5e917d9a5895a4bc3ba30be01fd222ecec71..c82cdb10aaf4fba577b43188809a395298ee3c5e 100644 --- a/arch/arc/configs/vdk_hs38_smp_defconfig +++ b/arch/arc/configs/vdk_hs38_smp_defconfig @@ -15,8 +15,6 @@ CONFIG_AXS103=y CONFIG_ISA_ARCV2=y CONFIG_SMP=y # CONFIG_ARC_TIMERS_64BIT is not set -# CONFIG_ARC_SMP_HALT_ON_RESET is not set -CONFIG_ARC_UBOOT_SUPPORT=y CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38_smp" CONFIG_PREEMPT=y CONFIG_NET=y @@ -90,6 +88,7 @@ CONFIG_NTFS_FS=y CONFIG_TMPFS=y CONFIG_JFFS2_FS=y CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y # CONFIG_ENABLE_WARN_DEPRECATED is not set diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h index 49bfbd879caa6ffa08553e9b0f49b542739bb95b..bdbdaef902ebd6df1104c8203f5d4fa971f12082 100644 --- a/arch/arc/include/asm/arcregs.h +++ b/arch/arc/include/asm/arcregs.h @@ -151,6 +151,14 @@ struct bcr_isa_arcv2 { #endif }; +struct bcr_uarch_build_arcv2 { +#ifdef CONFIG_CPU_BIG_ENDIAN + unsigned int pad:8, prod:8, maj:8, min:8; +#else + unsigned int min:8, maj:8, prod:8, pad:8; +#endif +}; + struct bcr_mpy { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int pad:8, x1616:8, dsp:4, cycles:2, type:2, ver:8; diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h index 8da87feec59aab58a1309fbd5f92fb12b72d8799..99e6d8948f4acf01e963f4c696fb424c7d543934 100644 --- a/arch/arc/include/asm/bitops.h +++ b/arch/arc/include/asm/bitops.h @@ -340,7 +340,7 @@ static inline __attribute__ ((const)) int __fls(unsigned long x) /* * __ffs: Similar to ffs, but zero based (0-31) */ -static inline __attribute__ ((const)) int __ffs(unsigned long word) +static inline __attribute__ ((const)) unsigned long __ffs(unsigned long word) { if (!word) return word; @@ -400,9 +400,9 @@ static inline __attribute__ ((const)) int ffs(unsigned long x) /* * __ffs: Similar to ffs, but zero based (0-31) */ -static inline __attribute__ ((const)) int __ffs(unsigned long x) +static inline __attribute__ ((const)) unsigned long __ffs(unsigned long x) { - int n; + unsigned long n; asm volatile( " ffs.f %0, %1 \n" /* 0:31; 31(Z) if src 0 */ diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h index ff7d3232764a29a41503a213d3bd385e232acf42..2ad77fb43639cd89fe2a00033790d565a2ff1935 100644 --- a/arch/arc/include/asm/cache.h +++ b/arch/arc/include/asm/cache.h @@ -52,6 +52,17 @@ #define cache_line_size() SMP_CACHE_BYTES #define ARCH_DMA_MINALIGN SMP_CACHE_BYTES +/* + * Make sure slab-allocated buffers are 64-bit aligned when atomic64_t uses + * ARCv2 64-bit atomics (LLOCKD/SCONDD). This guarantess runtime 64-bit + * alignment for any atomic64_t embedded in buffer. + * Default ARCH_SLAB_MINALIGN is __alignof__(long long) which has a relaxed + * value of 4 (and not 8) in ARC ABI. + */ +#if defined(CONFIG_ARC_HAS_LL64) && defined(CONFIG_ARC_HAS_LLSC) +#define ARCH_SLAB_MINALIGN 8 +#endif + extern void arc_cache_init(void); extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len); extern void read_decode_cache_bcr(void); @@ -113,7 +124,9 @@ extern unsigned long perip_base, perip_end; /* IO coherency related Auxiliary registers */ #define ARC_REG_IO_COH_ENABLE 0x500 +#define ARC_IO_COH_ENABLE_BIT BIT(0) #define ARC_REG_IO_COH_PARTIAL 0x501 +#define ARC_IO_COH_PARTIAL_BIT BIT(0) #define ARC_REG_IO_COH_AP0_BASE 0x508 #define ARC_REG_IO_COH_AP0_SIZE 0x509 diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h index d819de1c5d10ed21d951a4cf18a7cef75370e1de..3ea4112c8302a8b71b3dc7e473fd7e78d3992b1d 100644 --- a/arch/arc/include/asm/cmpxchg.h +++ b/arch/arc/include/asm/cmpxchg.h @@ -92,8 +92,11 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new) #endif /* CONFIG_ARC_HAS_LLSC */ -#define cmpxchg(ptr, o, n) ((typeof(*(ptr)))__cmpxchg((ptr), \ - (unsigned long)(o), (unsigned long)(n))) +#define cmpxchg(ptr, o, n) ({ \ + (typeof(*(ptr)))__cmpxchg((ptr), \ + (unsigned long)(o), \ + (unsigned long)(n)); \ +}) /* * atomic_cmpxchg is same as cmpxchg @@ -198,8 +201,11 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr, return __xchg_bad_pointer(); } -#define xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \ - sizeof(*(ptr)))) +#define xchg(ptr, with) ({ \ + (typeof(*(ptr)))__xchg((unsigned long)(with), \ + (ptr), \ + sizeof(*(ptr))); \ +}) #endif /* CONFIG_ARC_PLAT_EZNPS */ diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h index 309f4e6721b3e22829847f88a4da884fdc9edf93..225e7df2d8ed8f71932ea5abccaa3390a624c16d 100644 --- a/arch/arc/include/asm/entry-arcv2.h +++ b/arch/arc/include/asm/entry-arcv2.h @@ -17,6 +17,33 @@ ; ; Now manually save: r12, sp, fp, gp, r25 +#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE +.ifnc \called_from, exception + st.as r9, [sp, -10] ; save r9 in it's final stack slot + sub sp, sp, 12 ; skip JLI, LDI, EI + + PUSH lp_count + PUSHAX lp_start + PUSHAX lp_end + PUSH blink + + PUSH r11 + PUSH r10 + + sub sp, sp, 4 ; skip r9 + + PUSH r8 + PUSH r7 + PUSH r6 + PUSH r5 + PUSH r4 + PUSH r3 + PUSH r2 + PUSH r1 + PUSH r0 +.endif +#endif + #ifdef CONFIG_ARC_HAS_ACCL_REGS PUSH r59 PUSH r58 @@ -86,6 +113,33 @@ POP r59 #endif +#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE +.ifnc \called_from, exception + POP r0 + POP r1 + POP r2 + POP r3 + POP r4 + POP r5 + POP r6 + POP r7 + POP r8 + POP r9 + POP r10 + POP r11 + + POP blink + POPAX lp_end + POPAX lp_start + + POP r9 + mov lp_count, r9 + + add sp, sp, 12 ; skip JLI, LDI, EI + ld.as r9, [sp, -10] ; reload r9 which got clobbered +.endif +#endif + .endm /*------------------------------------------------------------------------*/ diff --git a/arch/arc/include/asm/futex.h b/arch/arc/include/asm/futex.h index eb887dd13e74862b9bfbba21c2d2d9f09e3b113c..c29c3fae68549b5d84a230a17094698db3037994 100644 --- a/arch/arc/include/asm/futex.h +++ b/arch/arc/include/asm/futex.h @@ -126,7 +126,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 expval, int ret = 0; u32 existval; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + if (!access_ok(uaddr, sizeof(u32))) return -EFAULT; #ifndef CONFIG_ARC_HAS_LLSC diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h index c22b181e8206f3162c4e0e19214f8b303f13c576..2f39d9b3886e4fc638dfa6a8a9b2fc45453d6c69 100644 --- a/arch/arc/include/asm/io.h +++ b/arch/arc/include/asm/io.h @@ -12,6 +12,7 @@ #include #include #include +#include #ifdef CONFIG_ISA_ARCV2 #include @@ -94,6 +95,42 @@ static inline u32 __raw_readl(const volatile void __iomem *addr) return w; } +/* + * {read,write}s{b,w,l}() repeatedly access the same IO address in + * native endianness in 8-, 16-, 32-bit chunks {into,from} memory, + * @count times + */ +#define __raw_readsx(t,f) \ +static inline void __raw_reads##f(const volatile void __iomem *addr, \ + void *ptr, unsigned int count) \ +{ \ + bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0; \ + u##t *buf = ptr; \ + \ + if (!count) \ + return; \ + \ + /* Some ARC CPU's don't support unaligned accesses */ \ + if (is_aligned) { \ + do { \ + u##t x = __raw_read##f(addr); \ + *buf++ = x; \ + } while (--count); \ + } else { \ + do { \ + u##t x = __raw_read##f(addr); \ + put_unaligned(x, buf++); \ + } while (--count); \ + } \ +} + +#define __raw_readsb __raw_readsb +__raw_readsx(8, b) +#define __raw_readsw __raw_readsw +__raw_readsx(16, w) +#define __raw_readsl __raw_readsl +__raw_readsx(32, l) + #define __raw_writeb __raw_writeb static inline void __raw_writeb(u8 b, volatile void __iomem *addr) { @@ -126,6 +163,35 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr) } +#define __raw_writesx(t,f) \ +static inline void __raw_writes##f(volatile void __iomem *addr, \ + const void *ptr, unsigned int count) \ +{ \ + bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0; \ + const u##t *buf = ptr; \ + \ + if (!count) \ + return; \ + \ + /* Some ARC CPU's don't support unaligned accesses */ \ + if (is_aligned) { \ + do { \ + __raw_write##f(*buf++, addr); \ + } while (--count); \ + } else { \ + do { \ + __raw_write##f(get_unaligned(buf++), addr); \ + } while (--count); \ + } \ +} + +#define __raw_writesb __raw_writesb +__raw_writesx(8, b) +#define __raw_writesw __raw_writesw +__raw_writesx(16, w) +#define __raw_writesl __raw_writesl +__raw_writesx(32, l) + /* * MMIO can also get buffered/optimized in micro-arch, so barriers needed * Based on ARM model for the typical use case @@ -141,10 +207,16 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr) #define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) #define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) #define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) +#define readsb(p,d,l) ({ __raw_readsb(p,d,l); __iormb(); }) +#define readsw(p,d,l) ({ __raw_readsw(p,d,l); __iormb(); }) +#define readsl(p,d,l) ({ __raw_readsl(p,d,l); __iormb(); }) #define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); }) #define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); }) #define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); }) +#define writesb(p,d,l) ({ __iowmb(); __raw_writesb(p,d,l); }) +#define writesw(p,d,l) ({ __iowmb(); __raw_writesw(p,d,l); }) +#define writesl(p,d,l) ({ __iowmb(); __raw_writesl(p,d,l); }) /* * Relaxed API for drivers which can handle barrier ordering themselves diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h index 9185541035cc3a716b59eb12a159850b18c5a7ea..6958545390f0f847ed3a7745b7325964d7f23f17 100644 --- a/arch/arc/include/asm/perf_event.h +++ b/arch/arc/include/asm/perf_event.h @@ -103,7 +103,8 @@ static const char * const arc_pmu_ev_hw_map[] = { /* counts condition */ [PERF_COUNT_HW_INSTRUCTIONS] = "iall", - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */ + /* All jump instructions that are taken */ + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak", [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */ #ifdef CONFIG_ISA_ARCV2 [PERF_COUNT_HW_BRANCH_MISSES] = "bpmp", diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h index c9173c02081c0c3c81e136e3ae226562f5505b8e..eabc3efa6c6ddf9ba97a3f1ca7cd7379d144e2d9 100644 --- a/arch/arc/include/asm/uaccess.h +++ b/arch/arc/include/asm/uaccess.h @@ -207,7 +207,7 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n) */ "=&r" (tmp), "+r" (to), "+r" (from) : - : "lp_count", "lp_start", "lp_end", "memory"); + : "lp_count", "memory"); return n; } @@ -433,7 +433,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n) */ "=&r" (tmp), "+r" (to), "+r" (from) : - : "lp_count", "lp_start", "lp_end", "memory"); + : "lp_count", "memory"); return n; } @@ -653,7 +653,7 @@ static inline unsigned long __arc_clear_user(void __user *to, unsigned long n) " .previous \n" : "+r"(d_char), "+r"(res) : "i"(0) - : "lp_count", "lp_start", "lp_end", "memory"); + : "lp_count", "memory"); return res; } @@ -686,7 +686,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count) " .previous \n" : "+r"(res), "+r"(dst), "+r"(src), "=r"(val) : "g"(-EFAULT), "r"(count) - : "lp_count", "lp_start", "lp_end", "memory"); + : "lp_count", "memory"); return res; } diff --git a/arch/arc/include/asm/vmalloc.h b/arch/arc/include/asm/vmalloc.h new file mode 100644 index 0000000000000000000000000000000000000000..973095aad665ad5b01c54d06c9d28823dc3b2b65 --- /dev/null +++ b/arch/arc/include/asm/vmalloc.h @@ -0,0 +1,4 @@ +#ifndef _ASM_ARC_VMALLOC_H +#define _ASM_ARC_VMALLOC_H + +#endif /* _ASM_ARC_VMALLOC_H */ diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h index 517178b1daef30f5c6c89fbcb3362ad38c12543f..660dbb2e799cb61592e40631946afce5321a8b81 100644 --- a/arch/arc/include/uapi/asm/unistd.h +++ b/arch/arc/include/uapi/asm/unistd.h @@ -17,6 +17,7 @@ #define _UAPI_ASM_ARC_UNISTD_H #define __ARCH_WANT_RENAMEAT +#define __ARCH_WANT_SET_GET_RLIMIT #define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_CLONE #define __ARCH_WANT_SYS_VFORK diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S index cc558a25b8fa690d1c72afed97f80161e4167db6..562089d62d9d68cf6fd0be3e9c1e607dfb1decc5 100644 --- a/arch/arc/kernel/entry-arcv2.S +++ b/arch/arc/kernel/entry-arcv2.S @@ -209,7 +209,9 @@ restore_regs: ;####### Return from Intr ####### debug_marker_l1: - bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot + ; bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot + btst r0, STATUS_DE_BIT ; Z flag set if bit clear + bnz .Lintr_ret_to_delay_slot ; branch if STATUS_DE_BIT set .Lisr_ret_fast_path: ; Handle special case #1: (Entry via Exception, Return via IRQ) diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S index 8b90d25a15cca8ebd334402848d98aa22f07b8bf..a72bbda2f7aad0099860ef3f5e99af12722defae 100644 --- a/arch/arc/kernel/head.S +++ b/arch/arc/kernel/head.S @@ -17,6 +17,7 @@ #include #include #include +#include .macro CPU_EARLY_SETUP @@ -47,6 +48,15 @@ sr r5, [ARC_REG_DC_CTRL] 1: + +#ifdef CONFIG_ISA_ARCV2 + ; Unaligned access is disabled at reset, so re-enable early as + ; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access + ; by default + lr r5, [status32] + bset r5, r5, STATUS_AD_BIT + kflag r5 +#endif .endm .section .init.text, "ax",@progbits @@ -90,15 +100,14 @@ ENTRY(stext) st.ab 0, [r5, 4] 1: -#ifdef CONFIG_ARC_UBOOT_SUPPORT ; Uboot - kernel ABI ; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2 - ; r1 = magic number (board identity, unused as of now + ; r1 = magic number (always zero as of now) ; r2 = pointer to uboot provided cmdline or external DTB in mem - ; These are handled later in setup_arch() + ; These are handled later in handle_uboot_args() st r0, [@uboot_tag] + st r1, [@uboot_magic] st r2, [@uboot_arg] -#endif ; setup "current" tsk and optionally cache it in dedicated r25 mov r9, @init_task diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c index 067ea362fb3efc3bc3a9217aaf197763eafb275e..cf18b3e5a934d34c684edcc7aa84533a10f932bf 100644 --- a/arch/arc/kernel/intc-arcv2.c +++ b/arch/arc/kernel/intc-arcv2.c @@ -49,11 +49,13 @@ void arc_init_IRQ(void) *(unsigned int *)&ictrl = 0; +#ifndef CONFIG_ARC_IRQ_NO_AUTOSAVE ictrl.save_nr_gpr_pairs = 6; /* r0 to r11 (r12 saved manually) */ ictrl.save_blink = 1; ictrl.save_lp_regs = 1; /* LP_COUNT, LP_START, LP_END */ ictrl.save_u_to_u = 0; /* user ctxt saved on kernel stack */ ictrl.save_idx_regs = 1; /* JLI, LDI, EI */ +#endif WRITE_AUX(AUX_IRQ_CTRL, ictrl); diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c index 8aec462d90fbe8f0aa88847272d02004a863f2db..30f66b12354183a708abe7fe816c8a3c3aa799f9 100644 --- a/arch/arc/kernel/perf_event.c +++ b/arch/arc/kernel/perf_event.c @@ -490,8 +490,8 @@ static int arc_pmu_device_probe(struct platform_device *pdev) /* loop thru all available h/w condition indexes */ for (j = 0; j < cc_bcr.c; j++) { write_aux_reg(ARC_REG_CC_INDEX, j); - cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0); - cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1); + cc_name.indiv.word0 = le32_to_cpu(read_aux_reg(ARC_REG_CC_NAME0)); + cc_name.indiv.word1 = le32_to_cpu(read_aux_reg(ARC_REG_CC_NAME1)); /* See if it has been mapped to a perf event_id */ for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) { diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c index 8ce6e723591556fc12765a19e08090632bb9d0ba..641c364fc232f01fe0763423894130047286dd35 100644 --- a/arch/arc/kernel/process.c +++ b/arch/arc/kernel/process.c @@ -61,7 +61,7 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new) /* Z indicates to userspace if operation succeded */ regs->status32 &= ~STATUS_Z_MASK; - ret = access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr)); + ret = access_ok(uaddr, sizeof(*uaddr)); if (!ret) goto fail; diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index b2cae79a25d716165eaf65060cb8ed0be11f3b6c..89c97dcfa3602b3f5e1de180c9a9b7f47a76f23b 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -35,6 +35,7 @@ unsigned int intr_to_DE_cnt; /* Part of U-boot ABI: see head.S */ int __initdata uboot_tag; +int __initdata uboot_magic; char __initdata *uboot_arg; const struct machine_desc *machine_desc; @@ -196,13 +197,29 @@ static void read_arc_build_cfg_regs(void) cpu->bpu.num_pred = 2048 << bpu.pte; if (cpu->core.family >= 0x54) { - unsigned int exec_ctrl; - READ_BCR(AUX_EXEC_CTRL, exec_ctrl); - cpu->extn.dual_enb = !(exec_ctrl & 1); + struct bcr_uarch_build_arcv2 uarch; - /* dual issue always present for this core */ - cpu->extn.dual = 1; + /* + * The first 0x54 core (uarch maj:min 0:1 or 0:2) was + * dual issue only (HS4x). But next uarch rev (1:0) + * allows it be configured for single issue (HS3x) + * Ensure we fiddle with dual issue only on HS4x + */ + READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch); + + if (uarch.prod == 4) { + unsigned int exec_ctrl; + + /* dual issue hardware always present */ + cpu->extn.dual = 1; + + READ_BCR(AUX_EXEC_CTRL, exec_ctrl); + + /* dual issue hardware enabled ? */ + cpu->extn.dual_enb = !(exec_ctrl & 1); + + } } } @@ -449,43 +466,85 @@ void setup_processor(void) arc_chk_core_config(); } -static inline int is_kernel(unsigned long addr) +static inline bool uboot_arg_invalid(unsigned long addr) { - if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end) - return 1; - return 0; + /* + * Check that it is a untranslated address (although MMU is not enabled + * yet, it being a high address ensures this is not by fluke) + */ + if (addr < PAGE_OFFSET) + return true; + + /* Check that address doesn't clobber resident kernel image */ + return addr >= (unsigned long)_stext && addr <= (unsigned long)_end; } -void __init setup_arch(char **cmdline_p) +#define IGNORE_ARGS "Ignore U-boot args: " + +/* uboot_tag values for U-boot - kernel ABI revision 0; see head.S */ +#define UBOOT_TAG_NONE 0 +#define UBOOT_TAG_CMDLINE 1 +#define UBOOT_TAG_DTB 2 +/* We always pass 0 as magic from U-boot */ +#define UBOOT_MAGIC_VALUE 0 + +void __init handle_uboot_args(void) { -#ifdef CONFIG_ARC_UBOOT_SUPPORT - /* make sure that uboot passed pointer to cmdline/dtb is valid */ - if (uboot_tag && is_kernel((unsigned long)uboot_arg)) - panic("Invalid uboot arg\n"); - - /* See if u-boot passed an external Device Tree blob */ - machine_desc = setup_machine_fdt(uboot_arg); /* uboot_tag == 2 */ - if (!machine_desc) -#endif - { - /* No, so try the embedded one */ + bool use_embedded_dtb = true; + bool append_cmdline = false; + + /* check that we know this tag */ + if (uboot_tag != UBOOT_TAG_NONE && + uboot_tag != UBOOT_TAG_CMDLINE && + uboot_tag != UBOOT_TAG_DTB) { + pr_warn(IGNORE_ARGS "invalid uboot tag: '%08x'\n", uboot_tag); + goto ignore_uboot_args; + } + + if (uboot_magic != UBOOT_MAGIC_VALUE) { + pr_warn(IGNORE_ARGS "non zero uboot magic\n"); + goto ignore_uboot_args; + } + + if (uboot_tag != UBOOT_TAG_NONE && + uboot_arg_invalid((unsigned long)uboot_arg)) { + pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg); + goto ignore_uboot_args; + } + + /* see if U-boot passed an external Device Tree blob */ + if (uboot_tag == UBOOT_TAG_DTB) { + machine_desc = setup_machine_fdt((void *)uboot_arg); + + /* external Device Tree blob is invalid - use embedded one */ + use_embedded_dtb = !machine_desc; + } + + if (uboot_tag == UBOOT_TAG_CMDLINE) + append_cmdline = true; + +ignore_uboot_args: + + if (use_embedded_dtb) { machine_desc = setup_machine_fdt(__dtb_start); if (!machine_desc) panic("Embedded DT invalid\n"); + } - /* - * If we are here, it is established that @uboot_arg didn't - * point to DT blob. Instead if u-boot says it is cmdline, - * append to embedded DT cmdline. - * setup_machine_fdt() would have populated @boot_command_line - */ - if (uboot_tag == 1) { - /* Ensure a whitespace between the 2 cmdlines */ - strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); - strlcat(boot_command_line, uboot_arg, - COMMAND_LINE_SIZE); - } + /* + * NOTE: @boot_command_line is populated by setup_machine_fdt() so this + * append processing can only happen after. + */ + if (append_cmdline) { + /* Ensure a whitespace between the 2 cmdlines */ + strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); + strlcat(boot_command_line, uboot_arg, COMMAND_LINE_SIZE); } +} + +void __init setup_arch(char **cmdline_p) +{ + handle_uboot_args(); /* Save unparsed command line copy for /proc/cmdline */ *cmdline_p = boot_command_line; diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c index 48685445002e77ee55a2fe24c40a9c63319b4489..1bfb7de696bd67361a098be6a705df21d3c0ff00 100644 --- a/arch/arc/kernel/signal.c +++ b/arch/arc/kernel/signal.c @@ -169,7 +169,7 @@ SYSCALL_DEFINE0(rt_sigreturn) sf = (struct rt_sigframe __force __user *)(regs->sp); - if (!access_ok(VERIFY_READ, sf, sizeof(*sf))) + if (!access_ok(sf, sizeof(*sf))) goto badframe; if (__get_user(magic, &sf->sigret_magic)) @@ -219,7 +219,7 @@ static inline void __user *get_sigframe(struct ksignal *ksig, frame = (void __user *)((sp - framesize) & ~7); /* Check that we can actually write to the signal frame */ - if (!access_ok(VERIFY_WRITE, frame, framesize)) + if (!access_ok(frame, framesize)) frame = NULL; return frame; diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c index e8d9fb4523462a9807358fea19c4e7668cc1126d..215f515442e03d53ee3a18ade4c62e2a06987b3b 100644 --- a/arch/arc/kernel/troubleshoot.c +++ b/arch/arc/kernel/troubleshoot.c @@ -18,6 +18,8 @@ #include #include +#define ARC_PATH_MAX 256 + /* * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25) * -Prints 3 regs per line and a CR. @@ -58,11 +60,12 @@ static void show_callee_regs(struct callee_regs *cregs) print_reg_file(&(cregs->r13), 13); } -static void print_task_path_n_nm(struct task_struct *tsk, char *buf) +static void print_task_path_n_nm(struct task_struct *tsk) { char *path_nm = NULL; struct mm_struct *mm; struct file *exe_file; + char buf[ARC_PATH_MAX]; mm = get_task_mm(tsk); if (!mm) @@ -72,7 +75,7 @@ static void print_task_path_n_nm(struct task_struct *tsk, char *buf) mmput(mm); if (exe_file) { - path_nm = file_path(exe_file, buf, 255); + path_nm = file_path(exe_file, buf, ARC_PATH_MAX-1); fput(exe_file); } @@ -80,10 +83,9 @@ static void print_task_path_n_nm(struct task_struct *tsk, char *buf) pr_info("Path: %s\n", !IS_ERR(path_nm) ? path_nm : "?"); } -static void show_faulting_vma(unsigned long address, char *buf) +static void show_faulting_vma(unsigned long address) { struct vm_area_struct *vma; - char *nm = buf; struct mm_struct *active_mm = current->active_mm; /* can't use print_vma_addr() yet as it doesn't check for @@ -96,8 +98,11 @@ static void show_faulting_vma(unsigned long address, char *buf) * if the container VMA is not found */ if (vma && (vma->vm_start <= address)) { + char buf[ARC_PATH_MAX]; + char *nm = "?"; + if (vma->vm_file) { - nm = file_path(vma->vm_file, buf, PAGE_SIZE - 1); + nm = file_path(vma->vm_file, buf, ARC_PATH_MAX-1); if (IS_ERR(nm)) nm = "?"; } @@ -173,13 +178,14 @@ void show_regs(struct pt_regs *regs) { struct task_struct *tsk = current; struct callee_regs *cregs; - char *buf; - buf = (char *)__get_free_page(GFP_KERNEL); - if (!buf) - return; + /* + * generic code calls us with preemption disabled, but some calls + * here could sleep, so re-enable to avoid lockdep splat + */ + preempt_enable(); - print_task_path_n_nm(tsk, buf); + print_task_path_n_nm(tsk); show_regs_print_info(KERN_INFO); show_ecr_verbose(regs); @@ -189,7 +195,7 @@ void show_regs(struct pt_regs *regs) (void *)regs->blink, (void *)regs->ret); if (user_mode(regs)) - show_faulting_vma(regs->ret, buf); /* faulting code, not data */ + show_faulting_vma(regs->ret); /* faulting code, not data */ pr_info("[STAT32]: 0x%08lx", regs->status32); @@ -222,7 +228,7 @@ void show_regs(struct pt_regs *regs) if (cregs) show_callee_regs(cregs); - free_page((unsigned long)buf); + preempt_disable(); } void show_kernel_fault_diag(const char *str, struct pt_regs *regs, diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c index 183391d4d33a4138d04418da4b90d61efe38c6c4..9cf2ee8b434937e14e03f4ef29090d56dd0cbdbb 100644 --- a/arch/arc/kernel/unwind.c +++ b/arch/arc/kernel/unwind.c @@ -185,11 +185,6 @@ static void *__init unw_hdr_alloc_early(unsigned long sz) MAX_DMA_ADDRESS); } -static void *unw_hdr_alloc(unsigned long sz) -{ - return kmalloc(sz, GFP_KERNEL); -} - static void init_unwind_table(struct unwind_table *table, const char *name, const void *core_start, unsigned long core_size, const void *init_start, unsigned long init_size, @@ -370,6 +365,10 @@ static void init_unwind_hdr(struct unwind_table *table, } #ifdef CONFIG_MODULES +static void *unw_hdr_alloc(unsigned long sz) +{ + return kmalloc(sz, GFP_KERNEL); +} static struct unwind_table *last_table; diff --git a/arch/arc/lib/memcpy-archs.S b/arch/arc/lib/memcpy-archs.S index d61044dd8b58e0e6620984468b2c5acc1fb2870b..ea14b0bf3116dfecb2655bd4a22816e05df13c4d 100644 --- a/arch/arc/lib/memcpy-archs.S +++ b/arch/arc/lib/memcpy-archs.S @@ -25,15 +25,11 @@ #endif #ifdef CONFIG_ARC_HAS_LL64 -# define PREFETCH_READ(RX) prefetch [RX, 56] -# define PREFETCH_WRITE(RX) prefetchw [RX, 64] # define LOADX(DST,RX) ldd.ab DST, [RX, 8] # define STOREX(SRC,RX) std.ab SRC, [RX, 8] # define ZOLSHFT 5 # define ZOLAND 0x1F #else -# define PREFETCH_READ(RX) prefetch [RX, 28] -# define PREFETCH_WRITE(RX) prefetchw [RX, 32] # define LOADX(DST,RX) ld.ab DST, [RX, 4] # define STOREX(SRC,RX) st.ab SRC, [RX, 4] # define ZOLSHFT 4 @@ -41,8 +37,6 @@ #endif ENTRY_CFI(memcpy) - prefetch [r1] ; Prefetch the read location - prefetchw [r0] ; Prefetch the write location mov.f 0, r2 ;;; if size is zero jz.d [blink] @@ -72,8 +66,6 @@ ENTRY_CFI(memcpy) lpnz @.Lcopy32_64bytes ;; LOOP START LOADX (r6, r1) - PREFETCH_READ (r1) - PREFETCH_WRITE (r3) LOADX (r8, r1) LOADX (r10, r1) LOADX (r4, r1) @@ -117,9 +109,7 @@ ENTRY_CFI(memcpy) lpnz @.Lcopy8bytes_1 ;; LOOP START ld.ab r6, [r1, 4] - prefetch [r1, 28] ;Prefetch the next read location ld.ab r8, [r1,4] - prefetchw [r3, 32] ;Prefetch the next write location SHIFT_1 (r7, r6, 24) or r7, r7, r5 @@ -162,9 +152,7 @@ ENTRY_CFI(memcpy) lpnz @.Lcopy8bytes_2 ;; LOOP START ld.ab r6, [r1, 4] - prefetch [r1, 28] ;Prefetch the next read location ld.ab r8, [r1,4] - prefetchw [r3, 32] ;Prefetch the next write location SHIFT_1 (r7, r6, 16) or r7, r7, r5 @@ -204,9 +192,7 @@ ENTRY_CFI(memcpy) lpnz @.Lcopy8bytes_3 ;; LOOP START ld.ab r6, [r1, 4] - prefetch [r1, 28] ;Prefetch the next read location ld.ab r8, [r1,4] - prefetchw [r3, 32] ;Prefetch the next write location SHIFT_1 (r7, r6, 8) or r7, r7, r5 diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S index 62ad4bcb841aa70811a637c3621d80190ed0c352..f230bb7092fdb3d7d98883ab7310db1b4bc56654 100644 --- a/arch/arc/lib/memset-archs.S +++ b/arch/arc/lib/memset-archs.S @@ -7,11 +7,39 @@ */ #include +#include -#undef PREALLOC_NOT_AVAIL +/* + * The memset implementation below is optimized to use prefetchw and prealloc + * instruction in case of CPU with 64B L1 data cache line (L1_CACHE_SHIFT == 6) + * If you want to implement optimized memset for other possible L1 data cache + * line lengths (32B and 128B) you should rewrite code carefully checking + * we don't call any prefetchw/prealloc instruction for L1 cache lines which + * don't belongs to memset area. + */ + +#if L1_CACHE_SHIFT == 6 + +.macro PREALLOC_INSTR reg, off + prealloc [\reg, \off] +.endm + +.macro PREFETCHW_INSTR reg, off + prefetchw [\reg, \off] +.endm + +#else + +.macro PREALLOC_INSTR +.endm + +.macro PREFETCHW_INSTR +.endm + +#endif ENTRY_CFI(memset) - prefetchw [r0] ; Prefetch the write location + PREFETCHW_INSTR r0, 0 ; Prefetch the first write location mov.f 0, r2 ;;; if size is zero jz.d [blink] @@ -48,11 +76,8 @@ ENTRY_CFI(memset) lpnz @.Lset64bytes ;; LOOP START -#ifdef PREALLOC_NOT_AVAIL - prefetchw [r3, 64] ;Prefetch the next write location -#else - prealloc [r3, 64] -#endif + PREALLOC_INSTR r3, 64 ; alloc next line w/o fetching + #ifdef CONFIG_ARC_HAS_LL64 std.ab r4, [r3, 8] std.ab r4, [r3, 8] @@ -85,7 +110,6 @@ ENTRY_CFI(memset) lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes lpnz .Lset32bytes ;; LOOP START - prefetchw [r3, 32] ;Prefetch the next write location #ifdef CONFIG_ARC_HAS_LL64 std.ab r4, [r3, 8] std.ab r4, [r3, 8] diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index f2701c13a66b209571ff89b71ac6c93cabb9835d..cf9619d4efb4f86d68cb2417558fe3327c55c408 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c @@ -1144,6 +1144,20 @@ noinline void __init arc_ioc_setup(void) { unsigned int ioc_base, mem_sz; + /* + * If IOC was already enabled (due to bootloader) it technically needs to + * be reconfigured with aperture base,size corresponding to Linux memory map + * which will certainly be different than uboot's. But disabling and + * reenabling IOC when DMA might be potentially active is tricky business. + * To avoid random memory issues later, just panic here and ask user to + * upgrade bootloader to one which doesn't enable IOC + */ + if (read_aux_reg(ARC_REG_IO_COH_ENABLE) & ARC_IO_COH_ENABLE_BIT) + panic("IOC already enabled, please upgrade bootloader!\n"); + + if (!ioc_enable) + return; + /* * As for today we don't support both IOC and ZONE_HIGHMEM enabled * simultaneously. This happens because as of today IOC aperture covers @@ -1187,8 +1201,8 @@ noinline void __init arc_ioc_setup(void) panic("IOC Aperture start must be aligned to the size of the aperture"); write_aux_reg(ARC_REG_IO_COH_AP0_BASE, ioc_base >> 12); - write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1); - write_aux_reg(ARC_REG_IO_COH_ENABLE, 1); + write_aux_reg(ARC_REG_IO_COH_PARTIAL, ARC_IO_COH_PARTIAL_BIT); + write_aux_reg(ARC_REG_IO_COH_ENABLE, ARC_IO_COH_ENABLE_BIT); /* Re-enable L1 dcache */ __dc_enable(); @@ -1265,7 +1279,7 @@ void __init arc_cache_init_master(void) if (is_isa_arcv2() && l2_line_sz && !slc_enable) arc_slc_disable(); - if (is_isa_arcv2() && ioc_enable) + if (is_isa_arcv2() && ioc_exists) arc_ioc_setup(); if (is_isa_arcv2() && l2_line_sz && slc_enable) { diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index db6913094be3c9b2bc9ad87e91f7aab779da728d..66db88cb8c62786b41953c69e1684c6b68be64d8 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -66,14 +66,12 @@ void do_page_fault(unsigned long address, struct pt_regs *regs) struct vm_area_struct *vma = NULL; struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; - siginfo_t info; + int si_code = SEGV_MAPERR; int ret; vm_fault_t fault; int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; - clear_siginfo(&info); - /* * We fault-in kernel-space virtual memory on-demand. The * 'reference' page table is init_mm.pgd. @@ -83,16 +81,14 @@ void do_page_fault(unsigned long address, struct pt_regs *regs) * only copy the information from the master page table, * nothing more. */ - if (address >= VMALLOC_START) { + if (address >= VMALLOC_START && !user_mode(regs)) { ret = handle_kernel_vaddr_fault(address); if (unlikely(ret)) - goto bad_area_nosemaphore; + goto no_context; else return; } - info.si_code = SEGV_MAPERR; - /* * If we're in an interrupt or have no user * context, we must not take the fault.. @@ -119,7 +115,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs) * we can handle it.. */ good_area: - info.si_code = SEGV_ACCERR; + si_code = SEGV_ACCERR; /* Handle protection violation, execute on heap or stack */ @@ -143,12 +139,17 @@ void do_page_fault(unsigned long address, struct pt_regs *regs) */ fault = handle_mm_fault(vma, address, flags); - /* If Pagefault was interrupted by SIGKILL, exit page fault "early" */ if (unlikely(fatal_signal_pending(current))) { - if ((fault & VM_FAULT_ERROR) && !(fault & VM_FAULT_RETRY)) - up_read(&mm->mmap_sem); - if (user_mode(regs)) + + /* + * if fault retry, mmap_sem already relinquished by core mm + * so OK to return to user mode (with signal handled first) + */ + if (fault & VM_FAULT_RETRY) { + if (!user_mode(regs)) + goto no_context; return; + } } perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); @@ -167,7 +168,6 @@ void do_page_fault(unsigned long address, struct pt_regs *regs) } if (fault & VM_FAULT_RETRY) { - flags &= ~FAULT_FLAG_ALLOW_RETRY; flags |= FAULT_FLAG_TRIED; goto retry; } @@ -195,15 +195,10 @@ void do_page_fault(unsigned long address, struct pt_regs *regs) bad_area: up_read(&mm->mmap_sem); -bad_area_nosemaphore: /* User mode accesses just cause a SIGSEGV */ if (user_mode(regs)) { tsk->thread.fault_address = address; - info.si_signo = SIGSEGV; - info.si_errno = 0; - /* info.si_code has been set above */ - info.si_addr = (void __user *)address; - force_sig_info(SIGSEGV, &info, tsk); + force_sig_fault(SIGSEGV, si_code, (void __user *)address, tsk); return; } @@ -238,9 +233,5 @@ void do_page_fault(unsigned long address, struct pt_regs *regs) goto no_context; tsk->thread.fault_address = address; - info.si_signo = SIGBUS; - info.si_errno = 0; - info.si_code = BUS_ADRERR; - info.si_addr = (void __user *)address; - force_sig_info(SIGBUS, &info, tsk); + force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, tsk); } diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index ba145065c579bfff5af1d0ca4e569a8b8c49da32..f890b2f9f82f0ead84eaba798ccd8876faee1fd7 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c @@ -138,7 +138,8 @@ void __init setup_arch_memory(void) */ memblock_add_node(low_mem_start, low_mem_sz, 0); - memblock_reserve(low_mem_start, __pa(_end) - low_mem_start); + memblock_reserve(CONFIG_LINUX_LINK_BASE, + __pa(_end) - CONFIG_LINUX_LINK_BASE); #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start) diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index 4097764fea23499a828a559f70a62a29daba14c8..fa18c00b0cfd7de6afc7985ff5a67161608f89b0 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c @@ -911,9 +911,11 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address, struct pt_regs *regs) { struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; - unsigned int pd0[mmu->ways]; unsigned long flags; - int set; + int set, n_ways = mmu->ways; + + n_ways = min(n_ways, 4); + BUG_ON(mmu->ways > 4); local_irq_save(flags); @@ -921,9 +923,10 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address, for (set = 0; set < mmu->sets; set++) { int is_valid, way; + unsigned int pd0[4]; /* read out all the ways of current set */ - for (way = 0, is_valid = 0; way < mmu->ways; way++) { + for (way = 0, is_valid = 0; way < n_ways; way++) { write_aux_reg(ARC_REG_TLBINDEX, SET_WAY_TO_IDX(mmu, set, way)); write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead); @@ -937,14 +940,14 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address, continue; /* Scan the set for duplicate ways: needs a nested loop */ - for (way = 0; way < mmu->ways - 1; way++) { + for (way = 0; way < n_ways - 1; way++) { int n; if (!pd0[way]) continue; - for (n = way + 1; n < mmu->ways; n++) { + for (n = way + 1; n < n_ways; n++) { if (pd0[way] != pd0[n]) continue; diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig index 9356753c2ed83fc8f9ee7dbf55f173401499e2e4..c285a83cbf08ff1373d40cbe1804eb18cac0544b 100644 --- a/arch/arc/plat-hsdk/Kconfig +++ b/arch/arc/plat-hsdk/Kconfig @@ -9,6 +9,7 @@ menuconfig ARC_SOC_HSDK bool "ARC HS Development Kit SOC" depends on ISA_ARCV2 select ARC_HAS_ACCL_REGS + select ARC_IRQ_NO_AUTOSAVE select CLK_HSDK select RESET_HSDK select MIGHT_HAVE_PCI diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index e8cd55a5b04c0570a4ed66b522446b8eee4d202d..b9455d212c028a047f5657db1b0d6eb3c53239ba 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -2,6 +2,7 @@ config ARM bool default y + select ARCH_32BIT_OFF_T select ARCH_CLOCKSOURCE_DATA select ARCH_DISCARD_MEMBLOCK if !HAVE_ARCH_PFN_VALID && !KEXEC select ARCH_HAS_DEBUG_VIRTUAL if MMU @@ -25,7 +26,7 @@ config ARM select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF select ARCH_WANT_IPC_PARSE_VERSION - select BUILDTIME_EXTABLE_SORT if MMU + select BUILDTIME_TABLE_SORT if MMU select CLONE_BACKWARDS select CPU_PM if (SUSPEND || CPU_IDLE) select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS @@ -612,6 +613,7 @@ config ARCH_DAVINCI select HAVE_IDE select PM_GENERIC_DOMAINS if PM select PM_GENERIC_DOMAINS_OF if PM && OF + select REGMAP_MMIO select RESET_CONTROLLER select USE_OF select ZONE_DMA @@ -1444,6 +1446,7 @@ config NR_CPUS config HOTPLUG_CPU bool "Support for hot-pluggable CPUs" depends on SMP + select GENERIC_IRQ_MIGRATION help Say Y here to experiment with turning CPUs off and on. CPUs can be controlled through /sys/devices/system/cpu. @@ -1584,8 +1587,9 @@ config ARM_PATCH_IDIV code to do integer division. config AEABI - bool "Use the ARM EABI to compile the kernel" if !CPU_V7 && !CPU_V7M && !CPU_V6 && !CPU_V6K - default CPU_V7 || CPU_V7M || CPU_V6 || CPU_V6K + bool "Use the ARM EABI to compile the kernel" if !CPU_V7 && \ + !CPU_V7M && !CPU_V6 && !CPU_V6K && !CC_IS_CLANG + default CPU_V7 || CPU_V7M || CPU_V6 || CPU_V6K || CC_IS_CLANG help This option allows for the kernel to be compiled using the latest ARM ABI (aka EABI). This is only useful if you are using a user @@ -1634,6 +1638,7 @@ config ARCH_SELECT_MEMORY_MODEL config HAVE_ARCH_PFN_VALID def_bool ARCH_HAS_HOLES_MEMORYMODEL || !SPARSEMEM + select HAVE_MEMBLOCK_PFN_VALID config HAVE_GENERIC_GUP def_bool y diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index f6fcb8a7988902cdbc34b5fcbbec1e36e2116abf..bee0ba1d1cfb721255ab7736cd98f1faadcdb506 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -1079,14 +1079,21 @@ choice Say Y here if you want kernel low-level debugging support on SOCFPGA(Cyclone 5 and Arria 5) based platforms. - config DEBUG_SOCFPGA_UART1 + config DEBUG_SOCFPGA_ARRIA10_UART1 depends on ARCH_SOCFPGA - bool "Use SOCFPGA UART1 for low-level debug" + bool "Use SOCFPGA Arria10 UART1 for low-level debug" select DEBUG_UART_8250 help Say Y here if you want kernel low-level debugging support on SOCFPGA(Arria 10) based platforms. + config DEBUG_SOCFPGA_CYCLONE5_UART1 + depends on ARCH_SOCFPGA + bool "Use SOCFPGA Cyclone 5 UART1 for low-level debug" + select DEBUG_UART_8250 + help + Say Y here if you want kernel low-level debugging support + on SOCFPGA(Cyclone 5 and Arria 5) based platforms. config DEBUG_SUN9I_UART0 bool "Kernel low-level debugging messages via sun9i UART0" @@ -1432,21 +1439,21 @@ config DEBUG_OMAP2PLUS_UART depends on ARCH_OMAP2PLUS config DEBUG_IMX_UART_PORT - int "i.MX Debug UART Port Selection" if DEBUG_IMX1_UART || \ - DEBUG_IMX25_UART || \ - DEBUG_IMX21_IMX27_UART || \ - DEBUG_IMX31_UART || \ - DEBUG_IMX35_UART || \ - DEBUG_IMX50_UART || \ - DEBUG_IMX51_UART || \ - DEBUG_IMX53_UART || \ - DEBUG_IMX6Q_UART || \ - DEBUG_IMX6SL_UART || \ - DEBUG_IMX6SX_UART || \ - DEBUG_IMX6UL_UART || \ - DEBUG_IMX7D_UART + int "i.MX Debug UART Port Selection" + depends on DEBUG_IMX1_UART || \ + DEBUG_IMX25_UART || \ + DEBUG_IMX21_IMX27_UART || \ + DEBUG_IMX31_UART || \ + DEBUG_IMX35_UART || \ + DEBUG_IMX50_UART || \ + DEBUG_IMX51_UART || \ + DEBUG_IMX53_UART || \ + DEBUG_IMX6Q_UART || \ + DEBUG_IMX6SL_UART || \ + DEBUG_IMX6SX_UART || \ + DEBUG_IMX6UL_UART || \ + DEBUG_IMX7D_UART default 1 - depends on ARCH_MXC help Choose UART port on which kernel low-level debug messages should be output. @@ -1647,7 +1654,8 @@ config DEBUG_UART_PHYS default 0xfe800000 if ARCH_IOP32X default 0xff690000 if DEBUG_RK32_UART2 default 0xffc02000 if DEBUG_SOCFPGA_UART0 - default 0xffc02100 if DEBUG_SOCFPGA_UART1 + default 0xffc02100 if DEBUG_SOCFPGA_ARRIA10_UART1 + default 0xffc03000 if DEBUG_SOCFPGA_CYCLONE5_UART1 default 0xffd82340 if ARCH_IOP13XX default 0xffe40000 if DEBUG_RCAR_GEN1_SCIF0 default 0xffe42000 if DEBUG_RCAR_GEN1_SCIF2 @@ -1754,7 +1762,8 @@ config DEBUG_UART_VIRT default 0xfeb30c00 if DEBUG_KEYSTONE_UART0 default 0xfeb31000 if DEBUG_KEYSTONE_UART1 default 0xfec02000 if DEBUG_SOCFPGA_UART0 - default 0xfec02100 if DEBUG_SOCFPGA_UART1 + default 0xfec02100 if DEBUG_SOCFPGA_ARRIA10_UART1 + default 0xfec03000 if DEBUG_SOCFPGA_CYCLONE5_UART1 default 0xfec12000 if (DEBUG_MVEBU_UART0 || DEBUG_MVEBU_UART0_ALTERNATE) && ARCH_MVEBU default 0xfec12100 if DEBUG_MVEBU_UART1_ALTERNATE default 0xfec10000 if DEBUG_SIRFATLAS7_UART0 @@ -1803,9 +1812,9 @@ config DEBUG_UART_8250_WORD depends on DEBUG_LL_UART_8250 || DEBUG_UART_8250 depends on DEBUG_UART_8250_SHIFT >= 2 default y if DEBUG_PICOXCELL_UART || \ - DEBUG_SOCFPGA_UART0 || DEBUG_SOCFPGA_UART1 || \ - DEBUG_KEYSTONE_UART0 || DEBUG_KEYSTONE_UART1 || \ - DEBUG_ALPINE_UART0 || \ + DEBUG_SOCFPGA_UART0 || DEBUG_SOCFPGA_ARRIA10_UART1 || \ + DEBUG_SOCFPGA_CYCLONE5_UART1 || DEBUG_KEYSTONE_UART0 || \ + DEBUG_KEYSTONE_UART1 || DEBUG_ALPINE_UART0 || \ DEBUG_DAVINCI_DMx_UART0 || DEBUG_DAVINCI_DA8XX_UART1 || \ DEBUG_DAVINCI_DA8XX_UART2 || DEBUG_BCM_IPROC_UART3 || \ DEBUG_BCM_KONA_UART || DEBUG_RK32_UART2 diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 517e0e18f0b8307855447abfec63f0827cb72cec..e205bbbe2794941a051a4dac323cc024a7786d33 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -1395,7 +1395,21 @@ ENTRY(efi_stub_entry) @ Preserve return value of efi_entry() in r4 mov r4, r0 - bl cache_clean_flush + + @ our cache maintenance code relies on CP15 barrier instructions + @ but since we arrived here with the MMU and caches configured + @ by UEFI, we must check that the CP15BEN bit is set in SCTLR. + @ Note that this bit is RAO/WI on v6 and earlier, so the ISB in + @ the enable path will be executed on v7+ only. + mrc p15, 0, r1, c1, c0, 0 @ read SCTLR + tst r1, #(1 << 5) @ CP15BEN bit set? + bne 0f + orr r1, r1, #(1 << 5) @ CP15 barrier instructions + mcr p15, 0, r1, c1, c0, 0 @ write SCTLR + ARM( .inst 0xf57ff06f @ v7+ isb ) + THUMB( isb ) + +0: bl cache_clean_flush bl cache_off @ Set parameters for booting zImage according to boot protocol diff --git a/arch/arm/boot/compressed/libfdt_env.h b/arch/arm/boot/compressed/libfdt_env.h index 07437816e0986876079aa64e9ae6450c2d45d763..6a0f1f524466efaa2344fbaead3e0c2e8ee2acb7 100644 --- a/arch/arm/boot/compressed/libfdt_env.h +++ b/arch/arm/boot/compressed/libfdt_env.h @@ -2,10 +2,14 @@ #ifndef _ARM_LIBFDT_ENV_H #define _ARM_LIBFDT_ENV_H +#include #include #include #include +#define INT32_MAX S32_MAX +#define UINT32_MAX U32_MAX + typedef __be16 fdt16_t; typedef __be32 fdt32_t; typedef __be64 fdt64_t; diff --git a/arch/arm/boot/dts/am335x-boneblack-common.dtsi b/arch/arm/boot/dts/am335x-boneblack-common.dtsi index 325daae40278a11fca64fa96d74fd64e991da546..21bc1173fa6b9f6c1ea5aa107535a1e9d191b416 100644 --- a/arch/arm/boot/dts/am335x-boneblack-common.dtsi +++ b/arch/arm/boot/dts/am335x-boneblack-common.dtsi @@ -88,7 +88,7 @@ }; &i2c0 { - tda19988: tda19988 { + tda19988: tda19988@70 { compatible = "nxp,tda998x"; reg = <0x70>; diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts index c87d01297a013b60b8752594b13b89392ce95e7a..cc59e42c91342e8e716ed62c5dbde08c48ffe583 100644 --- a/arch/arm/boot/dts/am335x-evm.dts +++ b/arch/arm/boot/dts/am335x-evm.dts @@ -57,6 +57,24 @@ enable-active-high; }; + /* TPS79501 */ + v1_8d_reg: fixedregulator-v1_8d { + compatible = "regulator-fixed"; + regulator-name = "v1_8d"; + vin-supply = <&vbat>; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + + /* TPS79501 */ + v3_3d_reg: fixedregulator-v3_3d { + compatible = "regulator-fixed"; + regulator-name = "v3_3d"; + vin-supply = <&vbat>; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + }; + matrix_keypad: matrix_keypad0 { compatible = "gpio-matrix-keypad"; debounce-delay-ms = <5>; @@ -499,10 +517,10 @@ status = "okay"; /* Regulators */ - AVDD-supply = <&vaux2_reg>; - IOVDD-supply = <&vaux2_reg>; - DRVDD-supply = <&vaux2_reg>; - DVDD-supply = <&vbat>; + AVDD-supply = <&v3_3d_reg>; + IOVDD-supply = <&v3_3d_reg>; + DRVDD-supply = <&v3_3d_reg>; + DVDD-supply = <&v1_8d_reg>; }; }; @@ -713,6 +731,7 @@ pinctrl-0 = <&cpsw_default>; pinctrl-1 = <&cpsw_sleep>; status = "okay"; + slaves = <1>; }; &davinci_mdio { @@ -720,15 +739,14 @@ pinctrl-0 = <&davinci_mdio_default>; pinctrl-1 = <&davinci_mdio_sleep>; status = "okay"; -}; -&cpsw_emac0 { - phy_id = <&davinci_mdio>, <0>; - phy-mode = "rgmii-txid"; + ethphy0: ethernet-phy@0 { + reg = <0>; + }; }; -&cpsw_emac1 { - phy_id = <&davinci_mdio>, <1>; +&cpsw_emac0 { + phy-handle = <ðphy0>; phy-mode = "rgmii-txid"; }; diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts index bf1a40e45c97b1b3a0cbbfa6ea583509f4e5bb8a..ba589bc41a57dc3d54a914173ca64ee0aa9eec68 100644 --- a/arch/arm/boot/dts/am335x-evmsk.dts +++ b/arch/arm/boot/dts/am335x-evmsk.dts @@ -73,6 +73,24 @@ enable-active-high; }; + /* TPS79518 */ + v1_8d_reg: fixedregulator-v1_8d { + compatible = "regulator-fixed"; + regulator-name = "v1_8d"; + vin-supply = <&vbat>; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + + /* TPS78633 */ + v3_3d_reg: fixedregulator-v3_3d { + compatible = "regulator-fixed"; + regulator-name = "v3_3d"; + vin-supply = <&vbat>; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + }; + leds { pinctrl-names = "default"; pinctrl-0 = <&user_leds_s0>; @@ -501,10 +519,10 @@ status = "okay"; /* Regulators */ - AVDD-supply = <&vaux2_reg>; - IOVDD-supply = <&vaux2_reg>; - DRVDD-supply = <&vaux2_reg>; - DVDD-supply = <&vbat>; + AVDD-supply = <&v3_3d_reg>; + IOVDD-supply = <&v3_3d_reg>; + DRVDD-supply = <&v3_3d_reg>; + DVDD-supply = <&v1_8d_reg>; }; }; diff --git a/arch/arm/boot/dts/am335x-osd3358-sm-red.dts b/arch/arm/boot/dts/am335x-osd3358-sm-red.dts index 4d969013f99a6180a1e841de8e736e61c6cf3b7e..d9e92671055bdeeadecccf5cb3890ab470f004f8 100644 --- a/arch/arm/boot/dts/am335x-osd3358-sm-red.dts +++ b/arch/arm/boot/dts/am335x-osd3358-sm-red.dts @@ -161,7 +161,7 @@ invensense,key = [4e cc 7e eb f6 1e 35 22 00 34 0d 65 32 e9 94 89];*/ }; - bmp280: pressure@78 { + bmp280: pressure@76 { compatible = "bosch,bmp280"; reg = <0x76>; }; diff --git a/arch/arm/boot/dts/am335x-pcm-953.dtsi b/arch/arm/boot/dts/am335x-pcm-953.dtsi index 1ec8e0d801912fbb080b7f3ff880d017f60bf320..572fbd2546905698ac514a98f6b706940c000ac4 100644 --- a/arch/arm/boot/dts/am335x-pcm-953.dtsi +++ b/arch/arm/boot/dts/am335x-pcm-953.dtsi @@ -197,7 +197,7 @@ bus-width = <4>; pinctrl-names = "default"; pinctrl-0 = <&mmc1_pins>; - cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>; status = "okay"; }; diff --git a/arch/arm/boot/dts/am335x-pdu001.dts b/arch/arm/boot/dts/am335x-pdu001.dts index 1ad530a39a957228e6e334ea9b04515789edec19..f56798efddff3a963a05bb21a20421349d6a2d4b 100644 --- a/arch/arm/boot/dts/am335x-pdu001.dts +++ b/arch/arm/boot/dts/am335x-pdu001.dts @@ -373,7 +373,7 @@ ti,pindir-d0-out-d1-in; status = "okay"; - cfaf240320a032t { + display-controller@0 { compatible = "orisetech,otm3225a"; reg = <0>; spi-max-frequency = <1000000>; @@ -577,7 +577,7 @@ bus-width = <4>; pinctrl-names = "default"; pinctrl-0 = <&mmc2_pins>; - cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>; + cd-gpios = <&gpio2 2 GPIO_ACTIVE_HIGH>; }; &sham { diff --git a/arch/arm/boot/dts/am335x-wega.dtsi b/arch/arm/boot/dts/am335x-wega.dtsi index 8ce541739b24f08bb4a4a0677caee390ba4f899e..83e4fe595e3713e7a78b1a49921dc587d2454ec9 100644 --- a/arch/arm/boot/dts/am335x-wega.dtsi +++ b/arch/arm/boot/dts/am335x-wega.dtsi @@ -157,7 +157,7 @@ bus-width = <4>; pinctrl-names = "default"; pinctrl-0 = <&mmc1_pins>; - cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>; status = "okay"; }; diff --git a/arch/arm/boot/dts/am3517-evm.dts b/arch/arm/boot/dts/am3517-evm.dts index 1d158cfda15f2653fe829aa4faf610f5a926f883..c45aef806870068e5807b7263de70a198d1f7f52 100644 --- a/arch/arm/boot/dts/am3517-evm.dts +++ b/arch/arm/boot/dts/am3517-evm.dts @@ -227,7 +227,7 @@ vmmc-supply = <&vmmc_fixed>; bus-width = <4>; wp-gpios = <&gpio4 30 GPIO_ACTIVE_HIGH>; /* gpio_126 */ - cd-gpios = <&gpio4 31 GPIO_ACTIVE_HIGH>; /* gpio_127 */ + cd-gpios = <&gpio4 31 GPIO_ACTIVE_LOW>; /* gpio_127 */ }; &mmc3 { diff --git a/arch/arm/boot/dts/am3517-som.dtsi b/arch/arm/boot/dts/am3517-som.dtsi index dae6e458e59fe7e4b49c65c78ed5224169a4407c..b1c988eed87c681d65bf5ec57d3b1b3bbcdf94d9 100644 --- a/arch/arm/boot/dts/am3517-som.dtsi +++ b/arch/arm/boot/dts/am3517-som.dtsi @@ -163,7 +163,7 @@ compatible = "ti,wl1271"; reg = <2>; interrupt-parent = <&gpio6>; - interrupts = <10 IRQ_TYPE_LEVEL_HIGH>; /* gpio_170 */ + interrupts = <10 IRQ_TYPE_EDGE_RISING>; /* gpio_170 */ ref-clock-frequency = <26000000>; tcxo-clock-frequency = <26000000>; }; diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi index d4b7c59eec6853f2f836b5b3ffd686988ad0f6b1..09e58fb810d95db0e54932013862d027c813aa90 100644 --- a/arch/arm/boot/dts/am4372.dtsi +++ b/arch/arm/boot/dts/am4372.dtsi @@ -1101,7 +1101,7 @@ }; }; - qspi: qspi@47900000 { + qspi: spi@47900000 { compatible = "ti,am4372-qspi"; reg = <0x47900000 0x100>, <0x30000000 0x4000000>; @@ -1142,6 +1142,8 @@ ti,hwmods = "dss_dispc"; clocks = <&disp_clk>; clock-names = "fck"; + + max-memory-bandwidth = <230000000>; }; rfbi: rfbi@4832a800 { diff --git a/arch/arm/boot/dts/am437x-gp-evm.dts b/arch/arm/boot/dts/am437x-gp-evm.dts index 5b97c20c5ed49c2de9432742442a322e4e127728..8a17eca2bc9764719f16f7064d45aab50ed814a6 100644 --- a/arch/arm/boot/dts/am437x-gp-evm.dts +++ b/arch/arm/boot/dts/am437x-gp-evm.dts @@ -83,7 +83,7 @@ }; lcd0: display { - compatible = "osddisplays,osd057T0559-34ts", "panel-dpi"; + compatible = "osddisplays,osd070t1718-19ts", "panel-dpi"; label = "lcd"; backlight = <&lcd_bl>; diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts index 6502d33976532ceddf12c8f780dead8b703d0dfc..12735cf9674bb67d24c91a6da6724fca7b5a7d87 100644 --- a/arch/arm/boot/dts/am43x-epos-evm.dts +++ b/arch/arm/boot/dts/am43x-epos-evm.dts @@ -45,7 +45,7 @@ }; lcd0: display { - compatible = "osddisplays,osd057T0559-34ts", "panel-dpi"; + compatible = "osddisplays,osd070t1718-19ts", "panel-dpi"; label = "lcd"; backlight = <&lcd_bl>; diff --git a/arch/arm/boot/dts/am571x-idk.dts b/arch/arm/boot/dts/am571x-idk.dts index d9a2049a1ea8ad49163c8028d85ab9af45a0af0b..6bebedfc0f35a68243bf7bc1927638aedce6afd8 100644 --- a/arch/arm/boot/dts/am571x-idk.dts +++ b/arch/arm/boot/dts/am571x-idk.dts @@ -98,14 +98,9 @@ }; &mmc1 { - pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; + pinctrl-names = "default", "hs"; pinctrl-0 = <&mmc1_pins_default_no_clk_pu>; pinctrl-1 = <&mmc1_pins_hs>; - pinctrl-2 = <&mmc1_pins_sdr12>; - pinctrl-3 = <&mmc1_pins_sdr25>; - pinctrl-4 = <&mmc1_pins_sdr50>; - pinctrl-5 = <&mmc1_pins_ddr50_rev20 &mmc1_iodelay_ddr50_conf>; - pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>; }; &mmc2 { diff --git a/arch/arm/boot/dts/am572x-idk.dts b/arch/arm/boot/dts/am572x-idk.dts index 3ef9111d0e8baafd530c7b09f6db1c42895d61cc..9235173edbd3a4a26e9d6a41ea9b6f36a4d9c677 100644 --- a/arch/arm/boot/dts/am572x-idk.dts +++ b/arch/arm/boot/dts/am572x-idk.dts @@ -20,14 +20,9 @@ }; &mmc1 { - pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; + pinctrl-names = "default", "hs"; pinctrl-0 = <&mmc1_pins_default_no_clk_pu>; pinctrl-1 = <&mmc1_pins_hs>; - pinctrl-2 = <&mmc1_pins_sdr12>; - pinctrl-3 = <&mmc1_pins_sdr25>; - pinctrl-4 = <&mmc1_pins_sdr50>; - pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev20_conf>; - pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>; }; &mmc2 { diff --git a/arch/arm/boot/dts/am574x-idk.dts b/arch/arm/boot/dts/am574x-idk.dts index 378dfa780ac17a69a1c9420dd69c7c58690cb0d3..ae43de3297f4f9dfea3cbbb2617061ec9e870ca1 100644 --- a/arch/arm/boot/dts/am574x-idk.dts +++ b/arch/arm/boot/dts/am574x-idk.dts @@ -24,14 +24,9 @@ }; &mmc1 { - pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; + pinctrl-names = "default", "hs"; pinctrl-0 = <&mmc1_pins_default_no_clk_pu>; pinctrl-1 = <&mmc1_pins_hs>; - pinctrl-2 = <&mmc1_pins_default>; - pinctrl-3 = <&mmc1_pins_hs>; - pinctrl-4 = <&mmc1_pins_sdr50>; - pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_conf>; - pinctrl-6 = <&mmc1_pins_ddr50 &mmc1_iodelay_sdr104_conf>; }; &mmc2 { diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi index ad953113cefbda10f32bf03c6b11e0551dad8834..d53532b479475212b62b17472cb843e82a6f3bff 100644 --- a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi +++ b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi @@ -433,6 +433,7 @@ bus-width = <4>; cd-gpios = <&gpio6 27 GPIO_ACTIVE_LOW>; /* gpio 219 */ + no-1-8-v; }; &mmc2 { diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts b/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts index 5a77b334923d051f6943730b8d51849fe6161596..34c69965821bb5d20fe941d2cd5f95343f777338 100644 --- a/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts +++ b/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts @@ -19,14 +19,9 @@ }; &mmc1 { - pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; + pinctrl-names = "default", "hs"; pinctrl-0 = <&mmc1_pins_default>; pinctrl-1 = <&mmc1_pins_hs>; - pinctrl-2 = <&mmc1_pins_sdr12>; - pinctrl-3 = <&mmc1_pins_sdr25>; - pinctrl-4 = <&mmc1_pins_sdr50>; - pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev11_conf>; - pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev11_conf>; vmmc-supply = <&vdd_3v3>; vqmmc-supply = <&ldo1_reg>; }; diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts b/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts index 17c41da3b55f1c7d94460f98647f0fdb084a9dbe..ccd99160bbdfb869451f1a53001068369eb98936 100644 --- a/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts +++ b/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts @@ -19,14 +19,9 @@ }; &mmc1 { - pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; + pinctrl-names = "default", "hs"; pinctrl-0 = <&mmc1_pins_default>; pinctrl-1 = <&mmc1_pins_hs>; - pinctrl-2 = <&mmc1_pins_sdr12>; - pinctrl-3 = <&mmc1_pins_sdr25>; - pinctrl-4 = <&mmc1_pins_sdr50>; - pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev20_conf>; - pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>; vmmc-supply = <&vdd_3v3>; vqmmc-supply = <&ldo1_reg>; }; diff --git a/arch/arm/boot/dts/am57xx-cl-som-am57x.dts b/arch/arm/boot/dts/am57xx-cl-som-am57x.dts index 203266f884807e3a2cced5c35c56e6d92fc69bf9..52ae8eef60fc328bbc1b0e1117f9d86959335ee7 100644 --- a/arch/arm/boot/dts/am57xx-cl-som-am57x.dts +++ b/arch/arm/boot/dts/am57xx-cl-som-am57x.dts @@ -518,7 +518,7 @@ }; /* touch controller */ - ads7846@0 { + touchscreen@1 { pinctrl-names = "default"; pinctrl-0 = <&ads7846_pins>; diff --git a/arch/arm/boot/dts/am57xx-idk-common.dtsi b/arch/arm/boot/dts/am57xx-idk-common.dtsi index c9063ffca524c8022f97930964d7b03f4de38c4c..3fd9a1676d881adf19b30c6abe56e4b01711cf31 100644 --- a/arch/arm/boot/dts/am57xx-idk-common.dtsi +++ b/arch/arm/boot/dts/am57xx-idk-common.dtsi @@ -410,6 +410,7 @@ vqmmc-supply = <&ldo1_reg>; bus-width = <4>; cd-gpios = <&gpio6 27 GPIO_ACTIVE_LOW>; /* gpio 219 */ + no-1-8-v; }; &mmc2 { diff --git a/arch/arm/boot/dts/arm-realview-eb.dtsi b/arch/arm/boot/dts/arm-realview-eb.dtsi index a917cf8825ca8b43bc3cd02a5395d6fbe77a2721..0e4c7c4c8c0930c81a6e8f1925a9cf0343a4b76e 100644 --- a/arch/arm/boot/dts/arm-realview-eb.dtsi +++ b/arch/arm/boot/dts/arm-realview-eb.dtsi @@ -371,7 +371,7 @@ clock-names = "uartclk", "apb_pclk"; }; - ssp: ssp@1000d000 { + ssp: spi@1000d000 { compatible = "arm,pl022", "arm,primecell"; reg = <0x1000d000 0x1000>; clocks = <&sspclk>, <&pclk>; diff --git a/arch/arm/boot/dts/arm-realview-pb1176.dts b/arch/arm/boot/dts/arm-realview-pb1176.dts index f935b72d3d96458a99f1a101d98ec7d620f60c4e..83e0fbc4a1a10cf4e4b9418b9646d7f06aaea181 100644 --- a/arch/arm/boot/dts/arm-realview-pb1176.dts +++ b/arch/arm/boot/dts/arm-realview-pb1176.dts @@ -45,7 +45,7 @@ }; /* The voltage to the MMC card is hardwired at 3.3V */ - vmmc: fixedregulator@0 { + vmmc: regulator-vmmc { compatible = "regulator-fixed"; regulator-name = "vmmc"; regulator-min-microvolt = <3300000>; @@ -53,7 +53,7 @@ regulator-boot-on; }; - veth: fixedregulator@0 { + veth: regulator-veth { compatible = "regulator-fixed"; regulator-name = "veth"; regulator-min-microvolt = <3300000>; @@ -380,7 +380,7 @@ clock-names = "apb_pclk"; }; - pb1176_ssp: ssp@1010b000 { + pb1176_ssp: spi@1010b000 { compatible = "arm,pl022", "arm,primecell"; reg = <0x1010b000 0x1000>; interrupt-parent = <&intc_dc1176>; diff --git a/arch/arm/boot/dts/arm-realview-pb11mp.dts b/arch/arm/boot/dts/arm-realview-pb11mp.dts index 36203288de4267d6b41d01375027da7d099b473e..2f6aa24a0b67c707068bba9fb7902525ff1158c0 100644 --- a/arch/arm/boot/dts/arm-realview-pb11mp.dts +++ b/arch/arm/boot/dts/arm-realview-pb11mp.dts @@ -145,7 +145,7 @@ }; /* The voltage to the MMC card is hardwired at 3.3V */ - vmmc: fixedregulator@0 { + vmmc: regulator-vmmc { compatible = "regulator-fixed"; regulator-name = "vmmc"; regulator-min-microvolt = <3300000>; @@ -153,7 +153,7 @@ regulator-boot-on; }; - veth: fixedregulator@0 { + veth: regulator-veth { compatible = "regulator-fixed"; regulator-name = "veth"; regulator-min-microvolt = <3300000>; @@ -523,7 +523,7 @@ clock-names = "uartclk", "apb_pclk"; }; - ssp@1000d000 { + spi@1000d000 { compatible = "arm,pl022", "arm,primecell"; reg = <0x1000d000 0x1000>; interrupt-parent = <&intc_pb11mp>; diff --git a/arch/arm/boot/dts/arm-realview-pbx.dtsi b/arch/arm/boot/dts/arm-realview-pbx.dtsi index 10868ba3277f52d1eef2be37df09b101187d9fcf..916a97734f84cc7b72d7087d09e429020c8c957c 100644 --- a/arch/arm/boot/dts/arm-realview-pbx.dtsi +++ b/arch/arm/boot/dts/arm-realview-pbx.dtsi @@ -44,7 +44,7 @@ }; /* The voltage to the MMC card is hardwired at 3.3V */ - vmmc: fixedregulator@0 { + vmmc: regulator-vmmc { compatible = "regulator-fixed"; regulator-name = "vmmc"; regulator-min-microvolt = <3300000>; @@ -52,7 +52,7 @@ regulator-boot-on; }; - veth: fixedregulator@0 { + veth: regulator-veth { compatible = "regulator-fixed"; regulator-name = "veth"; regulator-min-microvolt = <3300000>; @@ -362,7 +362,7 @@ clock-names = "uartclk", "apb_pclk"; }; - ssp: ssp@1000d000 { + ssp: spi@1000d000 { compatible = "arm,pl022", "arm,primecell"; reg = <0x1000d000 0x1000>; clocks = <&sspclk>, <&pclk>; @@ -567,4 +567,3 @@ }; }; }; - diff --git a/arch/arm/boot/dts/armada-388-clearfog.dtsi b/arch/arm/boot/dts/armada-388-clearfog.dtsi index 7c6ad2afb0947afc85ea34ff5d470df9bfbd3ef9..1b0d0680c8b6207ddb9717dfd320e8069c6d6d65 100644 --- a/arch/arm/boot/dts/armada-388-clearfog.dtsi +++ b/arch/arm/boot/dts/armada-388-clearfog.dtsi @@ -48,7 +48,7 @@ &clearfog_sdhci_cd_pins>; pinctrl-names = "default"; status = "okay"; - vmmc = <®_3p3v>; + vmmc-supply = <®_3p3v>; wp-inverted; }; diff --git a/arch/arm/boot/dts/armada-xp-98dx3236.dtsi b/arch/arm/boot/dts/armada-xp-98dx3236.dtsi index 8d708cc224958865a3c208b1ec29c03118c0852f..3e7d093d7a9a244e81a6d0640638740b6260870f 100644 --- a/arch/arm/boot/dts/armada-xp-98dx3236.dtsi +++ b/arch/arm/boot/dts/armada-xp-98dx3236.dtsi @@ -336,3 +336,11 @@ status = "disabled"; }; +&uart0 { + compatible = "marvell,armada-38x-uart"; +}; + +&uart1 { + compatible = "marvell,armada-38x-uart"; +}; + diff --git a/arch/arm/boot/dts/armada-xp-db.dts b/arch/arm/boot/dts/armada-xp-db.dts index f3ac7483afed0997ff527e8c8cef68c3a7307367..5d04dc68cf5795ef8e61533cbf0b0f8bb9dc0546 100644 --- a/arch/arm/boot/dts/armada-xp-db.dts +++ b/arch/arm/boot/dts/armada-xp-db.dts @@ -144,30 +144,32 @@ status = "okay"; }; - nand@d0000 { + nand-controller@d0000 { status = "okay"; - label = "pxa3xx_nand-0"; - num-cs = <1>; - marvell,nand-keep-config; - nand-on-flash-bbt; - - partitions { - compatible = "fixed-partitions"; - #address-cells = <1>; - #size-cells = <1>; - - partition@0 { - label = "U-Boot"; - reg = <0 0x800000>; - }; - partition@800000 { - label = "Linux"; - reg = <0x800000 0x800000>; - }; - partition@1000000 { - label = "Filesystem"; - reg = <0x1000000 0x3f000000>; + nand@0 { + reg = <0>; + label = "pxa3xx_nand-0"; + nand-rb = <0>; + nand-on-flash-bbt; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + label = "U-Boot"; + reg = <0 0x800000>; + }; + partition@800000 { + label = "Linux"; + reg = <0x800000 0x800000>; + }; + partition@1000000 { + label = "Filesystem"; + reg = <0x1000000 0x3f000000>; + }; }; }; }; diff --git a/arch/arm/boot/dts/armada-xp-gp.dts b/arch/arm/boot/dts/armada-xp-gp.dts index 1139e9469a83792efc102ff2c8dd375d1c5591b3..b4cca507cf1361b6c534a68f1961b660ef0fece8 100644 --- a/arch/arm/boot/dts/armada-xp-gp.dts +++ b/arch/arm/boot/dts/armada-xp-gp.dts @@ -160,12 +160,15 @@ status = "okay"; }; - nand@d0000 { + nand-controller@d0000 { status = "okay"; - label = "pxa3xx_nand-0"; - num-cs = <1>; - marvell,nand-keep-config; - nand-on-flash-bbt; + + nand@0 { + reg = <0>; + label = "pxa3xx_nand-0"; + nand-rb = <0>; + nand-on-flash-bbt; + }; }; }; diff --git a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts index bbbb38888bb89db8ef5b1814873b7285341e7590..87dcb502f72da5fdab843d38516f8fc85d91387b 100644 --- a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts +++ b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts @@ -81,49 +81,52 @@ }; - nand@d0000 { + nand-controller@d0000 { status = "okay"; - label = "pxa3xx_nand-0"; - num-cs = <1>; - marvell,nand-keep-config; - nand-on-flash-bbt; - - partitions { - compatible = "fixed-partitions"; - #address-cells = <1>; - #size-cells = <1>; - - partition@0 { - label = "u-boot"; - reg = <0x00000000 0x000e0000>; - read-only; - }; - - partition@e0000 { - label = "u-boot-env"; - reg = <0x000e0000 0x00020000>; - read-only; - }; - - partition@100000 { - label = "u-boot-env2"; - reg = <0x00100000 0x00020000>; - read-only; - }; - - partition@120000 { - label = "zImage"; - reg = <0x00120000 0x00400000>; - }; - - partition@520000 { - label = "initrd"; - reg = <0x00520000 0x00400000>; - }; - partition@e00000 { - label = "boot"; - reg = <0x00e00000 0x3f200000>; + nand@0 { + reg = <0>; + label = "pxa3xx_nand-0"; + nand-rb = <0>; + nand-on-flash-bbt; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + label = "u-boot"; + reg = <0x00000000 0x000e0000>; + read-only; + }; + + partition@e0000 { + label = "u-boot-env"; + reg = <0x000e0000 0x00020000>; + read-only; + }; + + partition@100000 { + label = "u-boot-env2"; + reg = <0x00100000 0x00020000>; + read-only; + }; + + partition@120000 { + label = "zImage"; + reg = <0x00120000 0x00400000>; + }; + + partition@520000 { + label = "initrd"; + reg = <0x00520000 0x00400000>; + }; + + partition@e00000 { + label = "boot"; + reg = <0x00e00000 0x3f200000>; + }; }; }; }; diff --git a/arch/arm/boot/dts/aspeed-bmc-arm-centriq2400-rep.dts b/arch/arm/boot/dts/aspeed-bmc-arm-centriq2400-rep.dts index df1227613d48e3430eadda82220fc58dcc1f7936..c2ece0b91885e19a582fd38c8aac6b3b7ef6c985 100644 --- a/arch/arm/boot/dts/aspeed-bmc-arm-centriq2400-rep.dts +++ b/arch/arm/boot/dts/aspeed-bmc-arm-centriq2400-rep.dts @@ -13,7 +13,7 @@ bootargs = "console=ttyS4,115200 earlyprintk"; }; - memory { + memory@80000000 { reg = <0x80000000 0x40000000>; }; diff --git a/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts b/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts index 7a291de02543d0bf1d3d712fbbb0fd903de87558..22dade6393d063c275fd79955c93aa61647be846 100644 --- a/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts +++ b/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts @@ -13,7 +13,7 @@ bootargs = "earlyprintk"; }; - memory { + memory@80000000 { reg = <0x80000000 0x20000000>; }; diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-lanyang.dts b/arch/arm/boot/dts/aspeed-bmc-opp-lanyang.dts index d598b63913625a34e36b0ba0ef1cf5266441e1c9..024e52a6cd0f8f8e781c7f62a7811cecf8e5d7b6 100644 --- a/arch/arm/boot/dts/aspeed-bmc-opp-lanyang.dts +++ b/arch/arm/boot/dts/aspeed-bmc-opp-lanyang.dts @@ -14,7 +14,7 @@ bootargs = "console=ttyS4,115200 earlyprintk"; }; - memory { + memory@80000000 { reg = <0x80000000 0x40000000>; }; @@ -322,4 +322,3 @@ &adc { status = "okay"; }; - diff --git a/arch/arm/boot/dts/aspeed-bmc-portwell-neptune.dts b/arch/arm/boot/dts/aspeed-bmc-portwell-neptune.dts index 43ed13963d35424e05bc1d44d12b6f9dce44e8db..33d704541de6298daeede6ec9a6d20f8d4735af2 100644 --- a/arch/arm/boot/dts/aspeed-bmc-portwell-neptune.dts +++ b/arch/arm/boot/dts/aspeed-bmc-portwell-neptune.dts @@ -17,7 +17,7 @@ bootargs = "console=ttyS4,115200 earlyprintk"; }; - memory { + memory@80000000 { reg = <0x80000000 0x20000000>; }; diff --git a/arch/arm/boot/dts/aspeed-g4.dtsi b/arch/arm/boot/dts/aspeed-g4.dtsi index b23a983f95a5379f9224d3dd602a1e2ee0df094d..69f6b9d2e7e7de67ad887ff844bd6d315a87e7a6 100644 --- a/arch/arm/boot/dts/aspeed-g4.dtsi +++ b/arch/arm/boot/dts/aspeed-g4.dtsi @@ -350,7 +350,7 @@ status = "disabled"; }; - i2c: i2c@1e78a000 { + i2c: bus@1e78a000 { compatible = "simple-bus"; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm/boot/dts/aspeed-g5.dtsi b/arch/arm/boot/dts/aspeed-g5.dtsi index 87fdc146ff525af94c8db648f8f46b8c3c1084d4..d107459fc0f89417f7d7adde30d0155da86dc929 100644 --- a/arch/arm/boot/dts/aspeed-g5.dtsi +++ b/arch/arm/boot/dts/aspeed-g5.dtsi @@ -410,7 +410,7 @@ status = "disabled"; }; - i2c: i2c@1e78a000 { + i2c: bus@1e78a000 { compatible = "simple-bus"; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm/boot/dts/at91-dvk_su60_somc.dtsi b/arch/arm/boot/dts/at91-dvk_su60_somc.dtsi index bb86f17ed5ed1ba476056c434039ebf142f2bc6b..21876da7c44250c752c28983e894fa2f536352cc 100644 --- a/arch/arm/boot/dts/at91-dvk_su60_somc.dtsi +++ b/arch/arm/boot/dts/at91-dvk_su60_somc.dtsi @@ -70,9 +70,9 @@ &i2c1 { status = "okay"; - eeprom@87 { + eeprom@57 { compatible = "giantec,gt24c32a", "atmel,24c32"; - reg = <87>; + reg = <0x57>; pagesize = <32>; }; }; diff --git a/arch/arm/boot/dts/at91-dvk_su60_somc_lcm.dtsi b/arch/arm/boot/dts/at91-dvk_su60_somc_lcm.dtsi index 4b9176dc5d029ff025f400b434e89868da7b1191..df0f0cc575c181006936cba2a78f03a61d3f5e9a 100644 --- a/arch/arm/boot/dts/at91-dvk_su60_somc_lcm.dtsi +++ b/arch/arm/boot/dts/at91-dvk_su60_somc_lcm.dtsi @@ -59,9 +59,9 @@ &i2c1 { status = "okay"; - ft5426@56 { + ft5426@38 { compatible = "focaltech,ft5426", "edt,edt-ft5406"; - reg = <56>; + reg = <0x38>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_lcd_ctp_int>; diff --git a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts index 3b1baa8605a77e8f724724550e5ec123df608732..2214bfe7aa205f624c7ad485ae88f0d3ef8a15fa 100644 --- a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts +++ b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts @@ -92,13 +92,13 @@ reg = <0x40000 0xc0000>; }; - bootloaderenv@0x100000 { - label = "bootloader env"; + bootloaderenvred@0x100000 { + label = "bootloader env redundant"; reg = <0x100000 0x40000>; }; - bootloaderenvred@0x140000 { - label = "bootloader env redundant"; + bootloaderenv@0x140000 { + label = "bootloader env"; reg = <0x140000 0x40000>; }; diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts index 4b7c762d5f2236678f769808e60233ba6b66aa3a..7d554b9ab27fd73762f46578a396e0cc77cb1037 100644 --- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts +++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts @@ -252,7 +252,7 @@ rootfs@800000 { label = "rootfs"; - reg = <0x800000 0x0f800000>; + reg = <0x800000 0x1f800000>; }; }; }; diff --git a/arch/arm/boot/dts/at91-vinco.dts b/arch/arm/boot/dts/at91-vinco.dts index 1be9889a2b3a1eb7c40a7c74287910b4f4c616ed..430277291e025fb17c31cebf49173ffaadab55ea 100644 --- a/arch/arm/boot/dts/at91-vinco.dts +++ b/arch/arm/boot/dts/at91-vinco.dts @@ -128,7 +128,7 @@ i2c2: i2c@f8024000 { status = "okay"; - rtc1: rtc@64 { + rtc1: rtc@32 { compatible = "epson,rx8900"; reg = <0x32>; }; diff --git a/arch/arm/boot/dts/at91sam9260ek.dts b/arch/arm/boot/dts/at91sam9260ek.dts index d2b865f6029322e296133e97e7df4aa3b5127c3c..07d1b571e6017b27a29b7acb7d5e72499baba1c4 100644 --- a/arch/arm/boot/dts/at91sam9260ek.dts +++ b/arch/arm/boot/dts/at91sam9260ek.dts @@ -127,7 +127,7 @@ spi0: spi@fffc8000 { cs-gpios = <0>, <&pioC 11 0>, <0>, <0>; - mtd_dataflash@0 { + mtd_dataflash@1 { compatible = "atmel,at45", "atmel,dataflash"; spi-max-frequency = <50000000>; reg = <1>; diff --git a/arch/arm/boot/dts/at91sam9261ek.dts b/arch/arm/boot/dts/at91sam9261ek.dts index a29fc0494076244576166cc8bf40c123296fac9e..a57f2d435dcae8324c7001942098e55e0aabe267 100644 --- a/arch/arm/boot/dts/at91sam9261ek.dts +++ b/arch/arm/boot/dts/at91sam9261ek.dts @@ -160,7 +160,7 @@ spi-max-frequency = <15000000>; }; - tsc2046@0 { + tsc2046@2 { reg = <2>; compatible = "ti,ads7843"; interrupts-extended = <&pioC 2 IRQ_TYPE_EDGE_BOTH>; diff --git a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi index 71df3adfc7ca1b5684b39947588f6831de90111d..ec1f17ab6753b64211e0030b6725a0099d2c5590 100644 --- a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi +++ b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi @@ -109,7 +109,7 @@ spi0: spi@fffc8000 { cs-gpios = <0>, <&pioC 11 0>, <0>, <0>; - mtd_dataflash@0 { + mtd_dataflash@1 { compatible = "atmel,at45", "atmel,dataflash"; spi-max-frequency = <50000000>; reg = <1>; diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi index 1ee25a475be87ff452287d1adfda8e1644a71a27..d16db1fa7e15c69dba0060dfc647211e440f607d 100644 --- a/arch/arm/boot/dts/at91sam9g45.dtsi +++ b/arch/arm/boot/dts/at91sam9g45.dtsi @@ -570,7 +570,7 @@ }; }; - uart1 { + usart1 { pinctrl_usart1: usart1-0 { atmel,pins = ; + reg = <0x800000 0x0f800000>; }; }; }; diff --git a/arch/arm/boot/dts/bcm-cygnus.dtsi b/arch/arm/boot/dts/bcm-cygnus.dtsi index 253df7170a4e263bec22d589ddf6d34d0bccdacf..887a60c317e9dfae7f3bd68c907abc099b1e07ff 100644 --- a/arch/arm/boot/dts/bcm-cygnus.dtsi +++ b/arch/arm/boot/dts/bcm-cygnus.dtsi @@ -169,8 +169,8 @@ mdio: mdio@18002000 { compatible = "brcm,iproc-mdio"; reg = <0x18002000 0x8>; - #size-cells = <1>; - #address-cells = <0>; + #size-cells = <0>; + #address-cells = <1>; status = "disabled"; gphy0: ethernet-phy@0 { diff --git a/arch/arm/boot/dts/bcm-hr2.dtsi b/arch/arm/boot/dts/bcm-hr2.dtsi index 3084a7c957339f0edc2fef97d203b08635c96790..e4d49731287f693ae94c39239b5d51bac1bfc46c 100644 --- a/arch/arm/boot/dts/bcm-hr2.dtsi +++ b/arch/arm/boot/dts/bcm-hr2.dtsi @@ -216,7 +216,7 @@ reg = <0x33000 0x14>; }; - qspi: qspi@27200 { + qspi: spi@27200 { compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi"; reg = <0x027200 0x184>, <0x027000 0x124>, diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi index 09ba8504632284532e3b17c6d1531e2d732fadc4..2b219addeb4496e18fd5948113bd654db51eaae7 100644 --- a/arch/arm/boot/dts/bcm-nsp.dtsi +++ b/arch/arm/boot/dts/bcm-nsp.dtsi @@ -273,7 +273,7 @@ brcm,nand-has-wp; }; - qspi: qspi@27200 { + qspi: spi@27200 { compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi"; reg = <0x027200 0x184>, <0x027000 0x124>, diff --git a/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts b/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts index 5641d162dfdb0c106eed6f7f4dc4f7c120930970..28e7513ce61713a084bc5f91f96cc2426d3f50a8 100644 --- a/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts +++ b/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts @@ -93,7 +93,7 @@ }; &hdmi { - hpd-gpios = <&gpio 46 GPIO_ACTIVE_LOW>; + hpd-gpios = <&gpio 46 GPIO_ACTIVE_HIGH>; }; &pwm { diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts b/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts index 4adb85e66be3f975894cb712211478439709f274..93762244be7f469a64d9158b2a904e0b8cde1fdd 100644 --- a/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts +++ b/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts @@ -31,7 +31,7 @@ wifi_pwrseq: wifi-pwrseq { compatible = "mmc-pwrseq-simple"; - reset-gpios = <&expgpio 1 GPIO_ACTIVE_HIGH>; + reset-gpios = <&expgpio 1 GPIO_ACTIVE_LOW>; }; }; diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-b.dts b/arch/arm/boot/dts/bcm2837-rpi-3-b.dts index c318bcbc6ba7e327bcf164fd543cd47c04c52d2c..89e6fd547c7572f6bc7d243e5055da4e33dc94a2 100644 --- a/arch/arm/boot/dts/bcm2837-rpi-3-b.dts +++ b/arch/arm/boot/dts/bcm2837-rpi-3-b.dts @@ -26,7 +26,7 @@ wifi_pwrseq: wifi-pwrseq { compatible = "mmc-pwrseq-simple"; - reset-gpios = <&expgpio 1 GPIO_ACTIVE_HIGH>; + reset-gpios = <&expgpio 1 GPIO_ACTIVE_LOW>; }; }; diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi index 31b29646b14cf0725cf312002f23ffe2074006e6..c9322a56300dc45bde2e7fc55953186031440645 100644 --- a/arch/arm/boot/dts/bcm283x.dtsi +++ b/arch/arm/boot/dts/bcm283x.dtsi @@ -39,7 +39,7 @@ trips { cpu-crit { - temperature = <80000>; + temperature = <90000>; hysteresis = <0>; type = "critical"; }; diff --git a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts index 36efe410dcd71a54ae6f802e405fb9b60296b58f..9e33c41f541125f1d5d4a497f0f7ef06bd1d8e06 100644 --- a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts +++ b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts @@ -125,6 +125,9 @@ }; mdio-bus-mux { + #address-cells = <1>; + #size-cells = <0>; + /* BIT(9) = 1 => external mdio */ mdio_ext: mdio@200 { reg = <0x200>; diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi index bc607d11eef8e958f654ced7c22e7a409a5fbdff..a678fb7c9e3b28a2635de242968de79f72c246d4 100644 --- a/arch/arm/boot/dts/bcm5301x.dtsi +++ b/arch/arm/boot/dts/bcm5301x.dtsi @@ -350,8 +350,8 @@ mdio: mdio@18003000 { compatible = "brcm,iproc-mdio"; reg = <0x18003000 0x8>; - #size-cells = <1>; - #address-cells = <0>; + #size-cells = <0>; + #address-cells = <1>; }; mdio-bus-mux { diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts index f9b75790584503a70a4a4a95c2cc04b70cf6e9e0..016616cc036cf8898ffb7b93682fff73fbf2cde0 100644 --- a/arch/arm/boot/dts/da850-evm.dts +++ b/arch/arm/boot/dts/da850-evm.dts @@ -94,6 +94,28 @@ regulator-boot-on; }; + baseboard_3v3: fixedregulator-3v3 { + /* TPS73701DCQ */ + compatible = "regulator-fixed"; + regulator-name = "baseboard_3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&vbat>; + regulator-always-on; + regulator-boot-on; + }; + + baseboard_1v8: fixedregulator-1v8 { + /* TPS73701DCQ */ + compatible = "regulator-fixed"; + regulator-name = "baseboard_1v8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + vin-supply = <&vbat>; + regulator-always-on; + regulator-boot-on; + }; + backlight_lcd: backlight-regulator { compatible = "regulator-fixed"; regulator-name = "lcd_backlight_pwr"; @@ -105,7 +127,7 @@ sound { compatible = "simple-audio-card"; - simple-audio-card,name = "DA850/OMAP-L138 EVM"; + simple-audio-card,name = "DA850-OMAPL138 EVM"; simple-audio-card,widgets = "Line", "Line In", "Line", "Line Out"; @@ -210,10 +232,9 @@ /* Regulators */ IOVDD-supply = <&vdcdc2_reg>; - /* Derived from VBAT: Baseboard 3.3V / 1.8V */ - AVDD-supply = <&vbat>; - DRVDD-supply = <&vbat>; - DVDD-supply = <&vbat>; + AVDD-supply = <&baseboard_3v3>; + DRVDD-supply = <&baseboard_3v3>; + DVDD-supply = <&baseboard_1v8>; }; tca6416: gpio@20 { compatible = "ti,tca6416"; diff --git a/arch/arm/boot/dts/da850-lcdk.dts b/arch/arm/boot/dts/da850-lcdk.dts index 0177e3ed20febdf3d2aa820ccd0bfd3e2a708099..3a2fa6e035a38509edfaad62dba2ea220637b234 100644 --- a/arch/arm/boot/dts/da850-lcdk.dts +++ b/arch/arm/boot/dts/da850-lcdk.dts @@ -39,9 +39,39 @@ }; }; + vcc_5vd: fixedregulator-vcc_5vd { + compatible = "regulator-fixed"; + regulator-name = "vcc_5vd"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + regulator-boot-on; + }; + + vcc_3v3d: fixedregulator-vcc_3v3d { + /* TPS650250 - VDCDC1 */ + compatible = "regulator-fixed"; + regulator-name = "vcc_3v3d"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&vcc_5vd>; + regulator-always-on; + regulator-boot-on; + }; + + vcc_1v8d: fixedregulator-vcc_1v8d { + /* TPS650250 - VDCDC2 */ + compatible = "regulator-fixed"; + regulator-name = "vcc_1v8d"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + vin-supply = <&vcc_5vd>; + regulator-always-on; + regulator-boot-on; + }; + sound { compatible = "simple-audio-card"; - simple-audio-card,name = "DA850/OMAP-L138 LCDK"; + simple-audio-card,name = "DA850-OMAPL138 LCDK"; simple-audio-card,widgets = "Line", "Line In", "Line", "Line Out"; @@ -221,6 +251,12 @@ compatible = "ti,tlv320aic3106"; reg = <0x18>; status = "okay"; + + /* Regulators */ + IOVDD-supply = <&vcc_3v3d>; + AVDD-supply = <&vcc_3v3d>; + DRVDD-supply = <&vcc_3v3d>; + DVDD-supply = <&vcc_1v8d>; }; }; diff --git a/arch/arm/boot/dts/da850.dtsi b/arch/arm/boot/dts/da850.dtsi index 47aa53ba6b92236d4616992aeea113a81bfb8c97..559659b399d04d6d9642d673d335772bc8274b5f 100644 --- a/arch/arm/boot/dts/da850.dtsi +++ b/arch/arm/boot/dts/da850.dtsi @@ -476,7 +476,7 @@ clocksource: timer@20000 { compatible = "ti,da830-timer"; reg = <0x20000 0x1000>; - interrupts = <12>, <13>; + interrupts = <21>, <22>; interrupt-names = "tint12", "tint34"; clocks = <&pll0_auxclk>; }; diff --git a/arch/arm/boot/dts/dove-cubox.dts b/arch/arm/boot/dts/dove-cubox.dts index 580e3cbcfbf7cf8fe83a6479a147d763bafa46b3..3e1584e787aec8c59e02cf0d4e05a6d240dffc0a 100644 --- a/arch/arm/boot/dts/dove-cubox.dts +++ b/arch/arm/boot/dts/dove-cubox.dts @@ -87,7 +87,7 @@ status = "okay"; clock-frequency = <100000>; - si5351: clock-generator { + si5351: clock-generator@60 { compatible = "silabs,si5351a-msop"; reg = <0x60>; #address-cells = <1>; diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi index 4a0a5115b298436dc76180bc74f1d1372d3163a5..250ad0535e8cc642429dc45a2c9401f987bebf41 100644 --- a/arch/arm/boot/dts/dove.dtsi +++ b/arch/arm/boot/dts/dove.dtsi @@ -155,7 +155,7 @@ 0xffffe000 MBUS_ID(0x03, 0x01) 0 0x0000800 /* CESA SRAM 2k */ 0xfffff000 MBUS_ID(0x0d, 0x00) 0 0x0000800>; /* PMU SRAM 2k */ - spi0: spi-ctrl@10600 { + spi0: spi@10600 { compatible = "marvell,orion-spi"; #address-cells = <1>; #size-cells = <0>; @@ -168,7 +168,7 @@ status = "disabled"; }; - i2c: i2c-ctrl@11000 { + i2c: i2c@11000 { compatible = "marvell,mv64xxx-i2c"; reg = <0x11000 0x20>; #address-cells = <1>; @@ -218,7 +218,7 @@ status = "disabled"; }; - spi1: spi-ctrl@14600 { + spi1: spi@14600 { compatible = "marvell,orion-spi"; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index a0ddf497e8cddcd2fd1c906244167bc373be93e3..7ce24b282d421a585b2b264f219d0c541576492c 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -336,6 +336,7 @@ <0 0 0 2 &pcie1_intc 2>, <0 0 0 3 &pcie1_intc 3>, <0 0 0 4 &pcie1_intc 4>; + ti,syscon-unaligned-access = <&scm_conf1 0x14 1>; status = "disabled"; pcie1_intc: interrupt-controller { interrupt-controller; @@ -354,7 +355,7 @@ ti,hwmods = "pcie1"; phys = <&pcie1_phy>; phy-names = "pcie-phy0"; - ti,syscon-unaligned-access = <&scm_conf1 0x14 2>; + ti,syscon-unaligned-access = <&scm_conf1 0x14 1>; status = "disabled"; }; }; @@ -387,6 +388,7 @@ <0 0 0 2 &pcie2_intc 2>, <0 0 0 3 &pcie2_intc 3>, <0 0 0 4 &pcie2_intc 4>; + ti,syscon-unaligned-access = <&scm_conf1 0x14 2>; pcie2_intc: interrupt-controller { interrupt-controller; #address-cells = <0>; @@ -1369,7 +1371,7 @@ status = "disabled"; }; - qspi: qspi@4b300000 { + qspi: spi@4b300000 { compatible = "ti,dra7xxx-qspi"; reg = <0x4b300000 0x100>, <0x5c000000 0x4000000>; diff --git a/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi b/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi index 28ebb4eb884a9450f51508c576e2b37d3d8a0808..214b9e6de2c356d1c6a3650ba9fc92bcc17b8240 100644 --- a/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi +++ b/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi @@ -32,7 +32,7 @@ * * Datamanual Revisions: * - * AM572x Silicon Revision 2.0: SPRS953B, Revised November 2016 + * AM572x Silicon Revision 2.0: SPRS953F, Revised May 2019 * AM572x Silicon Revision 1.1: SPRS915R, Revised November 2016 * */ @@ -229,45 +229,45 @@ mmc3_pins_default: mmc3_pins_default { pinctrl-single,pins = < - DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ - DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ - DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ - DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ - DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ - DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ + DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ + DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ + DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ + DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ + DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ + DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ >; }; mmc3_pins_hs: mmc3_pins_hs { pinctrl-single,pins = < - DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ - DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ - DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ - DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ - DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ - DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ + DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ + DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ + DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ + DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ + DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ + DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ >; }; mmc3_pins_sdr12: mmc3_pins_sdr12 { pinctrl-single,pins = < - DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ - DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ - DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ - DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ - DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ - DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ + DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ + DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ + DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ + DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ + DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ + DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ >; }; mmc3_pins_sdr25: mmc3_pins_sdr25 { pinctrl-single,pins = < - DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ - DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ - DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ - DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ - DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ - DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ + DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ + DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ + DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ + DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ + DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ + DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ >; }; diff --git a/arch/arm/boot/dts/dra76x-mmc-iodelay.dtsi b/arch/arm/boot/dts/dra76x-mmc-iodelay.dtsi index baba7b00eca7976c92847fe664e7b599364d83a7..fdca48186916097692406f8518e2af96765a5ce6 100644 --- a/arch/arm/boot/dts/dra76x-mmc-iodelay.dtsi +++ b/arch/arm/boot/dts/dra76x-mmc-iodelay.dtsi @@ -22,7 +22,7 @@ * * Datamanual Revisions: * - * DRA76x Silicon Revision 1.0: SPRS993A, Revised July 2017 + * DRA76x Silicon Revision 1.0: SPRS993E, Revised December 2018 * */ @@ -169,25 +169,25 @@ /* Corresponds to MMC2_HS200_MANUAL1 in datamanual */ mmc2_iodelay_hs200_conf: mmc2_iodelay_hs200_conf { pinctrl-pin-array = < - 0x190 A_DELAY_PS(384) G_DELAY_PS(0) /* CFG_GPMC_A19_OEN */ - 0x194 A_DELAY_PS(0) G_DELAY_PS(174) /* CFG_GPMC_A19_OUT */ - 0x1a8 A_DELAY_PS(410) G_DELAY_PS(0) /* CFG_GPMC_A20_OEN */ - 0x1ac A_DELAY_PS(85) G_DELAY_PS(0) /* CFG_GPMC_A20_OUT */ - 0x1b4 A_DELAY_PS(468) G_DELAY_PS(0) /* CFG_GPMC_A21_OEN */ - 0x1b8 A_DELAY_PS(139) G_DELAY_PS(0) /* CFG_GPMC_A21_OUT */ - 0x1c0 A_DELAY_PS(676) G_DELAY_PS(0) /* CFG_GPMC_A22_OEN */ - 0x1c4 A_DELAY_PS(69) G_DELAY_PS(0) /* CFG_GPMC_A22_OUT */ - 0x1d0 A_DELAY_PS(1062) G_DELAY_PS(154) /* CFG_GPMC_A23_OUT */ - 0x1d8 A_DELAY_PS(640) G_DELAY_PS(0) /* CFG_GPMC_A24_OEN */ - 0x1dc A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A24_OUT */ - 0x1e4 A_DELAY_PS(356) G_DELAY_PS(0) /* CFG_GPMC_A25_OEN */ - 0x1e8 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A25_OUT */ - 0x1f0 A_DELAY_PS(579) G_DELAY_PS(0) /* CFG_GPMC_A26_OEN */ - 0x1f4 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A26_OUT */ - 0x1fc A_DELAY_PS(435) G_DELAY_PS(0) /* CFG_GPMC_A27_OEN */ - 0x200 A_DELAY_PS(36) G_DELAY_PS(0) /* CFG_GPMC_A27_OUT */ - 0x364 A_DELAY_PS(759) G_DELAY_PS(0) /* CFG_GPMC_CS1_OEN */ - 0x368 A_DELAY_PS(72) G_DELAY_PS(0) /* CFG_GPMC_CS1_OUT */ + 0x190 A_DELAY_PS(384) G_DELAY_PS(0) /* CFG_GPMC_A19_OEN */ + 0x194 A_DELAY_PS(350) G_DELAY_PS(174) /* CFG_GPMC_A19_OUT */ + 0x1a8 A_DELAY_PS(410) G_DELAY_PS(0) /* CFG_GPMC_A20_OEN */ + 0x1ac A_DELAY_PS(335) G_DELAY_PS(0) /* CFG_GPMC_A20_OUT */ + 0x1b4 A_DELAY_PS(468) G_DELAY_PS(0) /* CFG_GPMC_A21_OEN */ + 0x1b8 A_DELAY_PS(339) G_DELAY_PS(0) /* CFG_GPMC_A21_OUT */ + 0x1c0 A_DELAY_PS(676) G_DELAY_PS(0) /* CFG_GPMC_A22_OEN */ + 0x1c4 A_DELAY_PS(219) G_DELAY_PS(0) /* CFG_GPMC_A22_OUT */ + 0x1d0 A_DELAY_PS(1062) G_DELAY_PS(154) /* CFG_GPMC_A23_OUT */ + 0x1d8 A_DELAY_PS(640) G_DELAY_PS(0) /* CFG_GPMC_A24_OEN */ + 0x1dc A_DELAY_PS(150) G_DELAY_PS(0) /* CFG_GPMC_A24_OUT */ + 0x1e4 A_DELAY_PS(356) G_DELAY_PS(0) /* CFG_GPMC_A25_OEN */ + 0x1e8 A_DELAY_PS(150) G_DELAY_PS(0) /* CFG_GPMC_A25_OUT */ + 0x1f0 A_DELAY_PS(579) G_DELAY_PS(0) /* CFG_GPMC_A26_OEN */ + 0x1f4 A_DELAY_PS(200) G_DELAY_PS(0) /* CFG_GPMC_A26_OUT */ + 0x1fc A_DELAY_PS(435) G_DELAY_PS(0) /* CFG_GPMC_A27_OEN */ + 0x200 A_DELAY_PS(236) G_DELAY_PS(0) /* CFG_GPMC_A27_OUT */ + 0x364 A_DELAY_PS(759) G_DELAY_PS(0) /* CFG_GPMC_CS1_OEN */ + 0x368 A_DELAY_PS(372) G_DELAY_PS(0) /* CFG_GPMC_CS1_OUT */ >; }; diff --git a/arch/arm/boot/dts/exynos3250-artik5.dtsi b/arch/arm/boot/dts/exynos3250-artik5.dtsi index 620b50c19ead93b65ef43794ec3f50d732a0a2db..7c22cbf6f3d41f1d2e1e82538f23899de60be426 100644 --- a/arch/arm/boot/dts/exynos3250-artik5.dtsi +++ b/arch/arm/boot/dts/exynos3250-artik5.dtsi @@ -69,6 +69,8 @@ compatible = "samsung,s2mps14-pmic"; interrupt-parent = <&gpx3>; interrupts = <5 IRQ_TYPE_NONE>; + pinctrl-names = "default"; + pinctrl-0 = <&s2mps14_irq>; reg = <0x66>; s2mps14_osc: clocks { @@ -350,6 +352,11 @@ samsung,pin-drv = ; samsung,pin-val = <1>; }; + + s2mps14_irq: s2mps14-irq { + samsung,pins = "gpx3-5"; + samsung,pin-pud = ; + }; }; &rtc { diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi index 27a1ee28c3bb8a1b13b46808df12d4739cc94c22..5892a9f7622faba0b7f4b27a372d6be64af0f3e1 100644 --- a/arch/arm/boot/dts/exynos3250.dtsi +++ b/arch/arm/boot/dts/exynos3250.dtsi @@ -168,6 +168,9 @@ interrupt-controller; #interrupt-cells = <3>; interrupt-parent = <&gic>; + clock-names = "clkout8"; + clocks = <&cmu CLK_FIN_PLL>; + #clock-cells = <1>; }; mipi_phy: video-phy { @@ -357,7 +360,7 @@ }; hsotg: hsotg@12480000 { - compatible = "snps,dwc2"; + compatible = "samsung,s3c6400-hsotg", "snps,dwc2"; reg = <0x12480000 0x20000>; interrupts = ; clocks = <&cmu CLK_USBOTG>; diff --git a/arch/arm/boot/dts/exynos4210-origen.dts b/arch/arm/boot/dts/exynos4210-origen.dts index 2ab99f9f3d0ac2769bf1c504f3926707f59cc012..dd9ec05eb0f795437999b505253f82d1d375b479 100644 --- a/arch/arm/boot/dts/exynos4210-origen.dts +++ b/arch/arm/boot/dts/exynos4210-origen.dts @@ -151,6 +151,8 @@ reg = <0x66>; interrupt-parent = <&gpx0>; interrupts = <4 IRQ_TYPE_NONE>, <3 IRQ_TYPE_NONE>; + pinctrl-names = "default"; + pinctrl-0 = <&max8997_irq>; max8997,pmic-buck1-dvs-voltage = <1350000>; max8997,pmic-buck2-dvs-voltage = <1100000>; @@ -288,6 +290,13 @@ }; }; +&pinctrl_1 { + max8997_irq: max8997-irq { + samsung,pins = "gpx0-3", "gpx0-4"; + samsung,pin-pud = ; + }; +}; + &sdhci_0 { bus-width = <4>; pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_bus4 &sd0_cd>; diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi index a09e46c9dbc04cbedfc626604339d9f4343f242c..00820d2397537b1165f0ee6803697b8f67d0a80b 100644 --- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi +++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi @@ -49,7 +49,7 @@ }; emmc_pwrseq: pwrseq { - pinctrl-0 = <&sd1_cd>; + pinctrl-0 = <&emmc_rstn>; pinctrl-names = "default"; compatible = "mmc-pwrseq-emmc"; reset-gpios = <&gpk1 2 GPIO_ACTIVE_LOW>; @@ -161,12 +161,6 @@ cpu0-supply = <&buck2_reg>; }; -/* RSTN signal for eMMC */ -&sd1_cd { - samsung,pin-pud = ; - samsung,pin-drv = ; -}; - &pinctrl_1 { gpio_power_key: power_key { samsung,pins = "gpx1-3"; @@ -184,6 +178,11 @@ samsung,pins = "gpx3-7"; samsung,pin-pud = ; }; + + emmc_rstn: emmc-rstn { + samsung,pins = "gpk1-2"; + samsung,pin-pud = ; + }; }; &ehci { diff --git a/arch/arm/boot/dts/exynos5250-arndale.dts b/arch/arm/boot/dts/exynos5250-arndale.dts index 7a8a5c55701a894359c748562d156c9c52109ff5..4ab1f1c66c27f407faadc897e332007c6ed80239 100644 --- a/arch/arm/boot/dts/exynos5250-arndale.dts +++ b/arch/arm/boot/dts/exynos5250-arndale.dts @@ -149,9 +149,11 @@ }; &hdmi { + pinctrl-names = "default"; + pinctrl-0 = <&hdmi_hpd>; status = "okay"; - ddc = <&i2c_2>; - hpd-gpios = <&gpx3 7 GPIO_ACTIVE_LOW>; + ddc = <&i2c_ddc>; + hpd-gpios = <&gpx3 7 GPIO_ACTIVE_HIGH>; vdd_osc-supply = <&ldo10_reg>; vdd_pll-supply = <&ldo8_reg>; vdd-supply = <&ldo8_reg>; @@ -168,6 +170,8 @@ reg = <0x66>; interrupt-parent = <&gpx3>; interrupts = <2 IRQ_TYPE_LEVEL_LOW>; + pinctrl-names = "default"; + pinctrl-0 = <&s5m8767_irq>; vinb1-supply = <&main_dc_reg>; vinb2-supply = <&main_dc_reg>; @@ -452,13 +456,6 @@ }; }; -&i2c_2 { - status = "okay"; - /* used by HDMI DDC */ - samsung,i2c-sda-delay = <100>; - samsung,i2c-max-bus-freq = <66000>; -}; - &i2c_3 { status = "okay"; @@ -535,6 +532,13 @@ cap-sd-highspeed; }; +&pinctrl_0 { + s5m8767_irq: s5m8767-irq { + samsung,pins = "gpx3-2"; + samsung,pin-pud = ; + }; +}; + &rtc { status = "okay"; }; @@ -547,3 +551,22 @@ status = "okay"; samsung,exynos-sataphy-i2c-phandle = <&sata_phy_i2c>; }; + +&soc { + /* + * For unknown reasons HDMI-DDC does not work with Exynos I2C + * controllers. Lets use software I2C over GPIO pins as a workaround. + */ + i2c_ddc: i2c-gpio { + pinctrl-names = "default"; + pinctrl-0 = <&i2c2_gpio_bus>; + status = "okay"; + compatible = "i2c-gpio"; + gpios = <&gpa0 6 0 /* sda */ + &gpa0 7 0 /* scl */ + >; + i2c-gpio,delay-us = <2>; + #address-cells = <1>; + #size-cells = <0>; + }; +}; diff --git a/arch/arm/boot/dts/exynos5250-pinctrl.dtsi b/arch/arm/boot/dts/exynos5250-pinctrl.dtsi index 6ff6dea29d4490f13bdf457b1ee7ecfb96929cfa..d31a68672bfacb3a2f6575c26790db05b5498d6c 100644 --- a/arch/arm/boot/dts/exynos5250-pinctrl.dtsi +++ b/arch/arm/boot/dts/exynos5250-pinctrl.dtsi @@ -225,6 +225,12 @@ samsung,pin-drv = ; }; + i2c2_gpio_bus: i2c2-gpio-bus { + samsung,pins = "gpa0-6", "gpa0-7"; + samsung,pin-pud = ; + samsung,pin-drv = ; + }; + uart2_data: uart2-data { samsung,pins = "gpa1-0", "gpa1-1"; samsung,pin-function = ; @@ -593,6 +599,11 @@ samsung,pin-pud = ; samsung,pin-drv = ; }; + + hdmi_hpd: hdmi-hpd { + samsung,pins = "gpx3-7"; + samsung,pin-pud = ; + }; }; &pinctrl_1 { diff --git a/arch/arm/boot/dts/exynos5250-snow-rev5.dts b/arch/arm/boot/dts/exynos5250-snow-rev5.dts index 0348b1c49a691d373792d2433cf26463803c7b70..7cbfc6f1f4b8fde1c52d9ad3203cfe4504c2aa42 100644 --- a/arch/arm/boot/dts/exynos5250-snow-rev5.dts +++ b/arch/arm/boot/dts/exynos5250-snow-rev5.dts @@ -20,6 +20,14 @@ samsung,model = "Snow-I2S-MAX98090"; samsung,audio-codec = <&max98090>; + + cpu { + sound-dai = <&i2s0 0>; + }; + + codec { + sound-dai = <&max98090 0>, <&hdmi>; + }; }; }; @@ -31,6 +39,9 @@ interrupt-parent = <&gpx0>; pinctrl-names = "default"; pinctrl-0 = <&max98090_irq>; + clocks = <&pmu_system_controller 0>; + clock-names = "mclk"; + #sound-dai-cells = <1>; }; }; diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi index da163a40af1598c5f2755d588ccf0a5598b578fe..b85527faa6ea4da77fa992c9707da18485add861 100644 --- a/arch/arm/boot/dts/exynos5250.dtsi +++ b/arch/arm/boot/dts/exynos5250.dtsi @@ -54,62 +54,109 @@ device_type = "cpu"; compatible = "arm,cortex-a15"; reg = <0>; - clock-frequency = <1700000000>; clocks = <&clock CLK_ARM_CLK>; clock-names = "cpu"; - clock-latency = <140000>; - - operating-points = < - 1700000 1300000 - 1600000 1250000 - 1500000 1225000 - 1400000 1200000 - 1300000 1150000 - 1200000 1125000 - 1100000 1100000 - 1000000 1075000 - 900000 1050000 - 800000 1025000 - 700000 1012500 - 600000 1000000 - 500000 975000 - 400000 950000 - 300000 937500 - 200000 925000 - >; + operating-points-v2 = <&cpu0_opp_table>; #cooling-cells = <2>; /* min followed by max */ }; cpu@1 { device_type = "cpu"; compatible = "arm,cortex-a15"; reg = <1>; - clock-frequency = <1700000000>; clocks = <&clock CLK_ARM_CLK>; clock-names = "cpu"; - clock-latency = <140000>; - - operating-points = < - 1700000 1300000 - 1600000 1250000 - 1500000 1225000 - 1400000 1200000 - 1300000 1150000 - 1200000 1125000 - 1100000 1100000 - 1000000 1075000 - 900000 1050000 - 800000 1025000 - 700000 1012500 - 600000 1000000 - 500000 975000 - 400000 950000 - 300000 937500 - 200000 925000 - >; + operating-points-v2 = <&cpu0_opp_table>; #cooling-cells = <2>; /* min followed by max */ }; }; + cpu0_opp_table: opp_table0 { + compatible = "operating-points-v2"; + opp-shared; + + opp-200000000 { + opp-hz = /bits/ 64 <200000000>; + opp-microvolt = <925000>; + clock-latency-ns = <140000>; + }; + opp-300000000 { + opp-hz = /bits/ 64 <300000000>; + opp-microvolt = <937500>; + clock-latency-ns = <140000>; + }; + opp-400000000 { + opp-hz = /bits/ 64 <400000000>; + opp-microvolt = <950000>; + clock-latency-ns = <140000>; + }; + opp-500000000 { + opp-hz = /bits/ 64 <500000000>; + opp-microvolt = <975000>; + clock-latency-ns = <140000>; + }; + opp-600000000 { + opp-hz = /bits/ 64 <600000000>; + opp-microvolt = <1000000>; + clock-latency-ns = <140000>; + }; + opp-700000000 { + opp-hz = /bits/ 64 <700000000>; + opp-microvolt = <1012500>; + clock-latency-ns = <140000>; + }; + opp-800000000 { + opp-hz = /bits/ 64 <800000000>; + opp-microvolt = <1025000>; + clock-latency-ns = <140000>; + }; + opp-900000000 { + opp-hz = /bits/ 64 <900000000>; + opp-microvolt = <1050000>; + clock-latency-ns = <140000>; + }; + opp-1000000000 { + opp-hz = /bits/ 64 <1000000000>; + opp-microvolt = <1075000>; + clock-latency-ns = <140000>; + opp-suspend; + }; + opp-1100000000 { + opp-hz = /bits/ 64 <1100000000>; + opp-microvolt = <1100000>; + clock-latency-ns = <140000>; + }; + opp-1200000000 { + opp-hz = /bits/ 64 <1200000000>; + opp-microvolt = <1125000>; + clock-latency-ns = <140000>; + }; + opp-1300000000 { + opp-hz = /bits/ 64 <1300000000>; + opp-microvolt = <1150000>; + clock-latency-ns = <140000>; + }; + opp-1400000000 { + opp-hz = /bits/ 64 <1400000000>; + opp-microvolt = <1200000>; + clock-latency-ns = <140000>; + }; + opp-1500000000 { + opp-hz = /bits/ 64 <1500000000>; + opp-microvolt = <1225000>; + clock-latency-ns = <140000>; + }; + opp-1600000000 { + opp-hz = /bits/ 64 <1600000000>; + opp-microvolt = <1250000>; + clock-latency-ns = <140000>; + }; + opp-1700000000 { + opp-hz = /bits/ 64 <1700000000>; + opp-microvolt = <1300000>; + clock-latency-ns = <140000>; + }; + }; + soc: soc { sysram@2020000 { compatible = "mmio-sram"; diff --git a/arch/arm/boot/dts/exynos5260.dtsi b/arch/arm/boot/dts/exynos5260.dtsi index 55167850619cb0738cfac1a99f7696219aed2b45..33a085ffc44718c17d04c8c6fe2fcfaafb83fd22 100644 --- a/arch/arm/boot/dts/exynos5260.dtsi +++ b/arch/arm/boot/dts/exynos5260.dtsi @@ -223,7 +223,7 @@ wakeup-interrupt-controller { compatible = "samsung,exynos4210-wakeup-eint"; interrupt-parent = <&gic>; - interrupts = ; + interrupts = ; }; }; diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts index cdda614e417e74334a177e386c8bb20820f16d83..a370857beac0d3b290b01d1d72201b35a4aed8a1 100644 --- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts +++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts @@ -106,6 +106,7 @@ regulator-name = "PVDD_APIO_1V8"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; + regulator-always-on; }; ldo3_reg: LDO3 { @@ -144,6 +145,7 @@ regulator-name = "PVDD_ABB_1V8"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; + regulator-always-on; }; ldo9_reg: LDO9 { diff --git a/arch/arm/boot/dts/exynos5420-peach-pit.dts b/arch/arm/boot/dts/exynos5420-peach-pit.dts index 57c2332bf28247b354592c0dbe839fa5a8ed6560..9eb48cabcca450878f829e15562effaa476bf777 100644 --- a/arch/arm/boot/dts/exynos5420-peach-pit.dts +++ b/arch/arm/boot/dts/exynos5420-peach-pit.dts @@ -153,7 +153,7 @@ &clock_audss { assigned-clocks = <&clock_audss EXYNOS_MOUT_AUDSS>; - assigned-clock-parents = <&clock CLK_FOUT_EPLL>; + assigned-clock-parents = <&clock CLK_MAU_EPLL>; }; &cpu0 { @@ -312,6 +312,7 @@ regulator-name = "vdd_1v35"; regulator-min-microvolt = <1350000>; regulator-max-microvolt = <1350000>; + regulator-always-on; regulator-boot-on; regulator-state-mem { regulator-on-in-suspend; @@ -333,6 +334,7 @@ regulator-name = "vdd_2v"; regulator-min-microvolt = <2000000>; regulator-max-microvolt = <2000000>; + regulator-always-on; regulator-boot-on; regulator-state-mem { regulator-on-in-suspend; @@ -343,6 +345,7 @@ regulator-name = "vdd_1v8"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; + regulator-always-on; regulator-boot-on; regulator-state-mem { regulator-on-in-suspend; @@ -437,6 +440,7 @@ regulator-name = "vdd_ldo10"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; + regulator-always-on; regulator-state-mem { regulator-off-in-suspend; }; diff --git a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi index 2f4f40882daba05c1436091f332f80f7f65d8244..d476ba0f07b6be90788cf075c074708395d4ee50 100644 --- a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi +++ b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi @@ -224,7 +224,7 @@ ldo13_reg: LDO13 { regulator-name = "vddq_mmc2"; - regulator-min-microvolt = <2800000>; + regulator-min-microvolt = <1800000>; regulator-max-microvolt = <2800000>; }; @@ -334,7 +334,7 @@ buck8_reg: BUCK8 { regulator-name = "vdd_1.8v_ldo"; regulator-min-microvolt = <800000>; - regulator-max-microvolt = <1500000>; + regulator-max-microvolt = <2000000>; regulator-always-on; regulator-boot-on; }; diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi index 03611d50c5a9ea218eb9ae69491c43def82788d3..b90cea8b73687eff3051f5278374192125753a14 100644 --- a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi +++ b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi @@ -22,12 +22,11 @@ "Headphone Jack", "HPL", "Headphone Jack", "HPR", "Headphone Jack", "MICBIAS", - "IN1", "Headphone Jack", + "IN12", "Headphone Jack", "Speakers", "SPKL", "Speakers", "SPKR"; - assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>, - <&clock CLK_MOUT_EPLL>, + assigned-clocks = <&clock CLK_MOUT_EPLL>, <&clock CLK_MOUT_MAU_EPLL>, <&clock CLK_MOUT_USER_MAU_EPLL>, <&clock_audss EXYNOS_MOUT_AUDSS>, @@ -36,8 +35,7 @@ <&clock_audss EXYNOS_DOUT_AUD_BUS>, <&clock_audss EXYNOS_DOUT_I2S>; - assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>, - <&clock CLK_FOUT_EPLL>, + assigned-clock-parents = <&clock CLK_FOUT_EPLL>, <&clock CLK_MOUT_EPLL>, <&clock CLK_MOUT_MAU_EPLL>, <&clock CLK_MAU_EPLL>, @@ -48,7 +46,6 @@ <0>, <0>, <0>, - <0>, <196608001>, <(196608002 / 2)>, <196608000>; @@ -84,4 +81,6 @@ &i2s0 { status = "okay"; + assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>; + assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>; }; diff --git a/arch/arm/boot/dts/exynos5422-odroidxu4.dts b/arch/arm/boot/dts/exynos5422-odroidxu4.dts index 4a30cc849b00a2d76b6651adbc5f6ae531991429..122174ea9e0a3d06eab9cb5edf04cb5877e6b15c 100644 --- a/arch/arm/boot/dts/exynos5422-odroidxu4.dts +++ b/arch/arm/boot/dts/exynos5422-odroidxu4.dts @@ -33,8 +33,7 @@ compatible = "samsung,odroid-xu3-audio"; model = "Odroid-XU4"; - assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>, - <&clock CLK_MOUT_EPLL>, + assigned-clocks = <&clock CLK_MOUT_EPLL>, <&clock CLK_MOUT_MAU_EPLL>, <&clock CLK_MOUT_USER_MAU_EPLL>, <&clock_audss EXYNOS_MOUT_AUDSS>, @@ -43,8 +42,7 @@ <&clock_audss EXYNOS_DOUT_AUD_BUS>, <&clock_audss EXYNOS_DOUT_I2S>; - assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>, - <&clock CLK_FOUT_EPLL>, + assigned-clock-parents = <&clock CLK_FOUT_EPLL>, <&clock CLK_MOUT_EPLL>, <&clock CLK_MOUT_MAU_EPLL>, <&clock CLK_MAU_EPLL>, @@ -55,7 +53,6 @@ <0>, <0>, <0>, - <0>, <196608001>, <(196608002 / 2)>, <196608000>; @@ -79,6 +76,8 @@ &i2s0 { status = "okay"; + assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>; + assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>; }; &pwm { diff --git a/arch/arm/boot/dts/exynos5800-peach-pi.dts b/arch/arm/boot/dts/exynos5800-peach-pi.dts index d80ab9085da19330b877643345e24cb50439d89c..4398f2d1fe88171a989f2b928b78570f7169de12 100644 --- a/arch/arm/boot/dts/exynos5800-peach-pi.dts +++ b/arch/arm/boot/dts/exynos5800-peach-pi.dts @@ -153,7 +153,7 @@ &clock_audss { assigned-clocks = <&clock_audss EXYNOS_MOUT_AUDSS>; - assigned-clock-parents = <&clock CLK_FOUT_EPLL>; + assigned-clock-parents = <&clock CLK_MAU_EPLL>; }; &cpu0 { @@ -312,6 +312,7 @@ regulator-name = "vdd_1v35"; regulator-min-microvolt = <1350000>; regulator-max-microvolt = <1350000>; + regulator-always-on; regulator-boot-on; regulator-state-mem { regulator-on-in-suspend; @@ -333,6 +334,7 @@ regulator-name = "vdd_2v"; regulator-min-microvolt = <2000000>; regulator-max-microvolt = <2000000>; + regulator-always-on; regulator-boot-on; regulator-state-mem { regulator-on-in-suspend; @@ -343,6 +345,7 @@ regulator-name = "vdd_1v8"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; + regulator-always-on; regulator-boot-on; regulator-state-mem { regulator-on-in-suspend; @@ -437,6 +440,7 @@ regulator-name = "vdd_ldo10"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; + regulator-always-on; regulator-state-mem { regulator-off-in-suspend; }; diff --git a/arch/arm/boot/dts/gemini-dlink-dir-685.dts b/arch/arm/boot/dts/gemini-dlink-dir-685.dts index 6f258b50eb44262bcc6f5549ddfc168cb3e9dfa9..15d6157b661dbbbfd3d14d7c6e342a53078b7cb0 100644 --- a/arch/arm/boot/dts/gemini-dlink-dir-685.dts +++ b/arch/arm/boot/dts/gemini-dlink-dir-685.dts @@ -65,7 +65,7 @@ gpio-miso = <&gpio1 8 GPIO_ACTIVE_HIGH>; gpio-mosi = <&gpio1 7 GPIO_ACTIVE_HIGH>; /* Collides with pflash CE1, not so cool */ - cs-gpios = <&gpio0 20 GPIO_ACTIVE_HIGH>; + cs-gpios = <&gpio0 20 GPIO_ACTIVE_LOW>; num-chipselects = <1>; panel: display@0 { @@ -274,20 +274,16 @@ read-only; }; /* - * Between the boot loader and the rootfs is the kernel - * in a custom Storlink format flashed from the boot - * menu. The rootfs is in squashfs format. + * This firmware image contains the kernel catenated + * with the squashfs root filesystem. For some reason + * this is called "upgrade" on the vendor system. */ - partition@1800c0 { - label = "rootfs"; - reg = <0x001800c0 0x01dbff40>; - read-only; - }; - partition@1f40000 { + partition@40000 { label = "upgrade"; - reg = <0x01f40000 0x00040000>; + reg = <0x00040000 0x01f40000>; read-only; }; + /* RGDB, Residental Gateway Database? */ partition@1f80000 { label = "rgdb"; reg = <0x01f80000 0x00040000>; diff --git a/arch/arm/boot/dts/gemini-dlink-dns-313.dts b/arch/arm/boot/dts/gemini-dlink-dns-313.dts index d1329322b968540825459b040c96006b9f21eea8..361dccd6c7eeeecce84399a7553c6b43d4089110 100644 --- a/arch/arm/boot/dts/gemini-dlink-dns-313.dts +++ b/arch/arm/boot/dts/gemini-dlink-dns-313.dts @@ -11,7 +11,7 @@ / { model = "D-Link DNS-313 1-Bay Network Storage Enclosure"; - compatible = "dlink,dir-313", "cortina,gemini"; + compatible = "dlink,dns-313", "cortina,gemini"; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm/boot/dts/gemini-sq201.dts b/arch/arm/boot/dts/gemini-sq201.dts index 3787cf3763c41f50e4edc474aeba9de49503c6bc..e9e4a8a02600b913c50779810e32d089ada29c3d 100644 --- a/arch/arm/boot/dts/gemini-sq201.dts +++ b/arch/arm/boot/dts/gemini-sq201.dts @@ -20,7 +20,7 @@ }; chosen { - bootargs = "console=ttyS0,115200n8"; + bootargs = "console=ttyS0,115200n8 root=/dev/mtdblock2 rw rootfstype=squashfs,jffs2 rootwait"; stdout-path = &uart0; }; @@ -138,37 +138,10 @@ /* 16MB of flash */ reg = <0x30000000 0x01000000>; - partition@0 { - label = "RedBoot"; - reg = <0x00000000 0x00120000>; - read-only; - }; - partition@120000 { - label = "Kernel"; - reg = <0x00120000 0x00200000>; - }; - partition@320000 { - label = "Ramdisk"; - reg = <0x00320000 0x00600000>; - }; - partition@920000 { - label = "Application"; - reg = <0x00920000 0x00600000>; - }; - partition@f20000 { - label = "VCTL"; - reg = <0x00f20000 0x00020000>; - read-only; - }; - partition@f40000 { - label = "CurConf"; - reg = <0x00f40000 0x000a0000>; - read-only; - }; - partition@fe0000 { - label = "FIS directory"; - reg = <0x00fe0000 0x00020000>; - read-only; + partitions { + compatible = "redboot-fis"; + /* Eraseblock at 0xfe0000 */ + fis-index-block = <0x1fc>; }; }; diff --git a/arch/arm/boot/dts/imx1-ads.dts b/arch/arm/boot/dts/imx1-ads.dts index a1d81badb5c8ad5b801915be447ee095c90d8ca7..119b19ba53b6d8d66c9683f1ffdef3571c0a2af7 100644 --- a/arch/arm/boot/dts/imx1-ads.dts +++ b/arch/arm/boot/dts/imx1-ads.dts @@ -21,6 +21,7 @@ }; memory@8000000 { + device_type = "memory"; reg = <0x08000000 0x04000000>; }; }; diff --git a/arch/arm/boot/dts/imx1-apf9328.dts b/arch/arm/boot/dts/imx1-apf9328.dts index 11515c0cb195c414580de3e2f2e1170b263d1d21..ee4b1b106b1ae955554f3f75bcd96b64bc93fb68 100644 --- a/arch/arm/boot/dts/imx1-apf9328.dts +++ b/arch/arm/boot/dts/imx1-apf9328.dts @@ -21,6 +21,7 @@ }; memory@8000000 { + device_type = "memory"; reg = <0x08000000 0x00800000>; }; }; diff --git a/arch/arm/boot/dts/imx1.dtsi b/arch/arm/boot/dts/imx1.dtsi index 3edc7b5550d88d67d21132231dba61d9bcdc1fcc..2b6e77029de4dfcc936543b6248f6bf98fd6de85 100644 --- a/arch/arm/boot/dts/imx1.dtsi +++ b/arch/arm/boot/dts/imx1.dtsi @@ -15,10 +15,8 @@ * The decompressor and also some bootloaders rely on a * pre-existing /chosen node to be available to insert the * command line and merge other ATAGS info. - * Also for U-Boot there must be a pre-existing /memory node. */ chosen {}; - memory { device_type = "memory"; }; aliases { gpio0 = &gpio1; diff --git a/arch/arm/boot/dts/imx23-evk.dts b/arch/arm/boot/dts/imx23-evk.dts index ad2ae25b7b4dbeb5fb714dee5af9182166d51457..aca27aa2d44bd3f5f36875b92e0b0c120c20a547 100644 --- a/arch/arm/boot/dts/imx23-evk.dts +++ b/arch/arm/boot/dts/imx23-evk.dts @@ -10,6 +10,7 @@ compatible = "fsl,imx23-evk", "fsl,imx23"; memory@40000000 { + device_type = "memory"; reg = <0x40000000 0x08000000>; }; diff --git a/arch/arm/boot/dts/imx23-olinuxino.dts b/arch/arm/boot/dts/imx23-olinuxino.dts index e9351774c61999ce25e7eede7928b270292f1b65..109f51603d45ee0db851dc59ec336885fab22698 100644 --- a/arch/arm/boot/dts/imx23-olinuxino.dts +++ b/arch/arm/boot/dts/imx23-olinuxino.dts @@ -20,6 +20,7 @@ compatible = "olimex,imx23-olinuxino", "fsl,imx23"; memory@40000000 { + device_type = "memory"; reg = <0x40000000 0x04000000>; }; diff --git a/arch/arm/boot/dts/imx23-sansa.dts b/arch/arm/boot/dts/imx23-sansa.dts index 67de7863ad795718d03cabd590e070a70d508f7b..fa22fd9b24129ca85d0bad2aa53439fd155f1774 100644 --- a/arch/arm/boot/dts/imx23-sansa.dts +++ b/arch/arm/boot/dts/imx23-sansa.dts @@ -50,6 +50,7 @@ compatible = "sandisk,sansa_fuze_plus", "fsl,imx23"; memory@40000000 { + device_type = "memory"; reg = <0x40000000 0x04000000>; }; diff --git a/arch/arm/boot/dts/imx23-stmp378x_devb.dts b/arch/arm/boot/dts/imx23-stmp378x_devb.dts index 95c7b918f6d6093145fd6b26f15021bdf8217327..aab029349420dd1a0975bdb359f48be23846d9c0 100644 --- a/arch/arm/boot/dts/imx23-stmp378x_devb.dts +++ b/arch/arm/boot/dts/imx23-stmp378x_devb.dts @@ -17,6 +17,7 @@ compatible = "fsl,stmp378x-devb", "fsl,imx23"; memory@40000000 { + device_type = "memory"; reg = <0x40000000 0x04000000>; }; diff --git a/arch/arm/boot/dts/imx23-xfi3.dts b/arch/arm/boot/dts/imx23-xfi3.dts index 9616e500b9961657c8c21f8293a8e91e1f5a1e77..2b5df8dfd3ff3b84802e2ca23ec169bdb86e574e 100644 --- a/arch/arm/boot/dts/imx23-xfi3.dts +++ b/arch/arm/boot/dts/imx23-xfi3.dts @@ -49,6 +49,7 @@ compatible = "creative,x-fi3", "fsl,imx23"; memory@40000000 { + device_type = "memory"; reg = <0x40000000 0x04000000>; }; diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi index 71bfd2b15609ae5a1b48b9b309b421f88abbd058..aaaa987d8eff9edd4b746698ca3156dbd6de4887 100644 --- a/arch/arm/boot/dts/imx23.dtsi +++ b/arch/arm/boot/dts/imx23.dtsi @@ -13,10 +13,8 @@ * The decompressor and also some bootloaders rely on a * pre-existing /chosen node to be available to insert the * command line and merge other ATAGS info. - * Also for U-Boot there must be a pre-existing /memory node. */ chosen {}; - memory { device_type = "memory"; }; aliases { gpio0 = &gpio0; diff --git a/arch/arm/boot/dts/imx25-eukrea-cpuimx25.dtsi b/arch/arm/boot/dts/imx25-eukrea-cpuimx25.dtsi index e316fe08837a3e551e8320ed0d08c8d71fdd07ff..e4d7da267532deafecebdd19c28613ea683e92de 100644 --- a/arch/arm/boot/dts/imx25-eukrea-cpuimx25.dtsi +++ b/arch/arm/boot/dts/imx25-eukrea-cpuimx25.dtsi @@ -18,6 +18,7 @@ compatible = "eukrea,cpuimx25", "fsl,imx25"; memory@80000000 { + device_type = "memory"; reg = <0x80000000 0x4000000>; /* 64M */ }; }; diff --git a/arch/arm/boot/dts/imx25-karo-tx25.dts b/arch/arm/boot/dts/imx25-karo-tx25.dts index 5cb6967866c0af7bf1c782686de695971def117b..f37e9a75a3ca7b3553ed1294906e47df4da6cf50 100644 --- a/arch/arm/boot/dts/imx25-karo-tx25.dts +++ b/arch/arm/boot/dts/imx25-karo-tx25.dts @@ -37,6 +37,7 @@ }; memory@80000000 { + device_type = "memory"; reg = <0x80000000 0x02000000 0x90000000 0x02000000>; }; }; diff --git a/arch/arm/boot/dts/imx25-pdk.dts b/arch/arm/boot/dts/imx25-pdk.dts index a5626b46ac4e11baf20eaf3d02bd3f1b32c78486..f8544a9e46330fd756395eaff80f7290dca4af44 100644 --- a/arch/arm/boot/dts/imx25-pdk.dts +++ b/arch/arm/boot/dts/imx25-pdk.dts @@ -12,6 +12,7 @@ compatible = "fsl,imx25-pdk", "fsl,imx25"; memory@80000000 { + device_type = "memory"; reg = <0x80000000 0x4000000>; }; diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi index 85c15ee63272775f2b0025ef2175f64e2b4c36d3..8c8ad80de461400eae6d5f60473410d387197dd1 100644 --- a/arch/arm/boot/dts/imx25.dtsi +++ b/arch/arm/boot/dts/imx25.dtsi @@ -12,10 +12,8 @@ * The decompressor and also some bootloaders rely on a * pre-existing /chosen node to be available to insert the * command line and merge other ATAGS info. - * Also for U-Boot there must be a pre-existing /memory node. */ chosen {}; - memory { device_type = "memory"; }; aliases { ethernet0 = &fec; diff --git a/arch/arm/boot/dts/imx27-apf27.dts b/arch/arm/boot/dts/imx27-apf27.dts index 3eddd805a793a3614c9f4fdd14483c93c9081a76..f635d5c5029c4443f845f231f357f1630303324d 100644 --- a/arch/arm/boot/dts/imx27-apf27.dts +++ b/arch/arm/boot/dts/imx27-apf27.dts @@ -20,6 +20,7 @@ compatible = "armadeus,imx27-apf27", "fsl,imx27"; memory@a0000000 { + device_type = "memory"; reg = <0xa0000000 0x04000000>; }; }; diff --git a/arch/arm/boot/dts/imx27-eukrea-cpuimx27.dtsi b/arch/arm/boot/dts/imx27-eukrea-cpuimx27.dtsi index 9c455dcbe6ebf9b3c30d3d05d9a1e76291bf0f8d..c85f9d01768a105b48fb38cae6d077c62bf16845 100644 --- a/arch/arm/boot/dts/imx27-eukrea-cpuimx27.dtsi +++ b/arch/arm/boot/dts/imx27-eukrea-cpuimx27.dtsi @@ -17,6 +17,7 @@ compatible = "eukrea,cpuimx27", "fsl,imx27"; memory@a0000000 { + device_type = "memory"; reg = <0xa0000000 0x04000000>; }; diff --git a/arch/arm/boot/dts/imx27-pdk.dts b/arch/arm/boot/dts/imx27-pdk.dts index f9a882d99132918b5f0bc364eec2f093d717c678..35123b7cb6b3ed87e72679e8dcdfd4dde532e9ac 100644 --- a/arch/arm/boot/dts/imx27-pdk.dts +++ b/arch/arm/boot/dts/imx27-pdk.dts @@ -10,6 +10,7 @@ compatible = "fsl,imx27-pdk", "fsl,imx27"; memory@a0000000 { + device_type = "memory"; reg = <0xa0000000 0x08000000>; }; diff --git a/arch/arm/boot/dts/imx27-phytec-phycard-s-som.dtsi b/arch/arm/boot/dts/imx27-phytec-phycard-s-som.dtsi index cbad7c88c58cc60f59a3dfac35aabe8e6e8ddee0..b0b4f7c00246d7769939f1cca739302d714bea8a 100644 --- a/arch/arm/boot/dts/imx27-phytec-phycard-s-som.dtsi +++ b/arch/arm/boot/dts/imx27-phytec-phycard-s-som.dtsi @@ -18,6 +18,7 @@ compatible = "phytec,imx27-pca100", "fsl,imx27"; memory@a0000000 { + device_type = "memory"; reg = <0xa0000000 0x08000000>; /* 128MB */ }; }; diff --git a/arch/arm/boot/dts/imx27-phytec-phycore-som.dtsi b/arch/arm/boot/dts/imx27-phytec-phycore-som.dtsi index ec466b4bfd4107d61e7f754bc4879ed94ca17fb0..0935e1400e5d29f661bc6fecaade83d1c1af0aaa 100644 --- a/arch/arm/boot/dts/imx27-phytec-phycore-som.dtsi +++ b/arch/arm/boot/dts/imx27-phytec-phycore-som.dtsi @@ -17,6 +17,7 @@ compatible = "phytec,imx27-pcm038", "fsl,imx27"; memory@a0000000 { + device_type = "memory"; reg = <0xa0000000 0x08000000>; }; diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi index 753d88df1627405953c75116599c7235bd39095b..39e75b997bdc8157d42346ec32b0c7a789bb7f97 100644 --- a/arch/arm/boot/dts/imx27.dtsi +++ b/arch/arm/boot/dts/imx27.dtsi @@ -16,10 +16,8 @@ * The decompressor and also some bootloaders rely on a * pre-existing /chosen node to be available to insert the * command line and merge other ATAGS info. - * Also for U-Boot there must be a pre-existing /memory node. */ chosen {}; - memory { device_type = "memory"; }; aliases { ethernet0 = &fec; diff --git a/arch/arm/boot/dts/imx31-bug.dts b/arch/arm/boot/dts/imx31-bug.dts index 6ee4ff8e4e8f0f470a77a3878c4eac392efebbe6..9eb960cc02cc514f8abccaf579280fc18fe9694a 100644 --- a/arch/arm/boot/dts/imx31-bug.dts +++ b/arch/arm/boot/dts/imx31-bug.dts @@ -17,6 +17,7 @@ compatible = "buglabs,imx31-bug", "fsl,imx31"; memory@80000000 { + device_type = "memory"; reg = <0x80000000 0x8000000>; /* 128M */ }; }; diff --git a/arch/arm/boot/dts/imx31-lite.dts b/arch/arm/boot/dts/imx31-lite.dts index db52ddccabc3338a87490e4c02d21bb2a1d8e01e..d17abdfb6330c1f6ea919f5d224c782e9df8267e 100644 --- a/arch/arm/boot/dts/imx31-lite.dts +++ b/arch/arm/boot/dts/imx31-lite.dts @@ -18,6 +18,7 @@ }; memory@80000000 { + device_type = "memory"; reg = <0x80000000 0x8000000>; }; diff --git a/arch/arm/boot/dts/imx31.dtsi b/arch/arm/boot/dts/imx31.dtsi index ca1419ca303c3d5994a73b5bdddc9e69428dac55..2fc64d2c7c88e53a2ec1368c38c98b65d64063a1 100644 --- a/arch/arm/boot/dts/imx31.dtsi +++ b/arch/arm/boot/dts/imx31.dtsi @@ -10,10 +10,8 @@ * The decompressor and also some bootloaders rely on a * pre-existing /chosen node to be available to insert the * command line and merge other ATAGS info. - * Also for U-Boot there must be a pre-existing /memory node. */ chosen {}; - memory { device_type = "memory"; }; aliases { gpio0 = &gpio1; diff --git a/arch/arm/boot/dts/imx35-eukrea-cpuimx35.dtsi b/arch/arm/boot/dts/imx35-eukrea-cpuimx35.dtsi index ba39d938f2891a3485e950cc3491578910a1b306..5f8a47a9fcd4071f7ce697111d6e4170ab9c1b19 100644 --- a/arch/arm/boot/dts/imx35-eukrea-cpuimx35.dtsi +++ b/arch/arm/boot/dts/imx35-eukrea-cpuimx35.dtsi @@ -18,6 +18,7 @@ compatible = "eukrea,cpuimx35", "fsl,imx35"; memory@80000000 { + device_type = "memory"; reg = <0x80000000 0x8000000>; /* 128M */ }; }; diff --git a/arch/arm/boot/dts/imx35-pdk.dts b/arch/arm/boot/dts/imx35-pdk.dts index df613e88fd2c19622aee24020d215c4add0341e5..ddce0a844758b3a5836c3d240f0f13bff6a68f4a 100644 --- a/arch/arm/boot/dts/imx35-pdk.dts +++ b/arch/arm/boot/dts/imx35-pdk.dts @@ -11,6 +11,7 @@ compatible = "fsl,imx35-pdk", "fsl,imx35"; memory@80000000 { + device_type = "memory"; reg = <0x80000000 0x8000000>, <0x90000000 0x8000000>; }; diff --git a/arch/arm/boot/dts/imx35.dtsi b/arch/arm/boot/dts/imx35.dtsi index 1c50b785cad473afc0d4ef28c266664ac8a63ba9..b36b97b655dda346be60c0aa3bfa0892a9256c2c 100644 --- a/arch/arm/boot/dts/imx35.dtsi +++ b/arch/arm/boot/dts/imx35.dtsi @@ -13,10 +13,8 @@ * The decompressor and also some bootloaders rely on a * pre-existing /chosen node to be available to insert the * command line and merge other ATAGS info. - * Also for U-Boot there must be a pre-existing /memory node. */ chosen {}; - memory { device_type = "memory"; }; aliases { ethernet0 = &fec; diff --git a/arch/arm/boot/dts/imx50-evk.dts b/arch/arm/boot/dts/imx50-evk.dts index 682a99783ee69b24485315576f3852e278ac3081..a25da415cb02ec8a1595bdbf3138f3e5de05d740 100644 --- a/arch/arm/boot/dts/imx50-evk.dts +++ b/arch/arm/boot/dts/imx50-evk.dts @@ -12,6 +12,7 @@ compatible = "fsl,imx50-evk", "fsl,imx50"; memory@70000000 { + device_type = "memory"; reg = <0x70000000 0x80000000>; }; }; diff --git a/arch/arm/boot/dts/imx50.dtsi b/arch/arm/boot/dts/imx50.dtsi index 7fae2ffb76fe2e63ecb4fe8a515b61a65dd99421..9e9e92acceb27ea9f3e0bd233cf69b07f85372e0 100644 --- a/arch/arm/boot/dts/imx50.dtsi +++ b/arch/arm/boot/dts/imx50.dtsi @@ -22,10 +22,8 @@ * The decompressor and also some bootloaders rely on a * pre-existing /chosen node to be available to insert the * command line and merge other ATAGS info. - * Also for U-Boot there must be a pre-existing /memory node. */ chosen {}; - memory { device_type = "memory"; }; aliases { ethernet0 = &fec; @@ -420,7 +418,7 @@ reg = <0x63fb0000 0x4000>; interrupts = <6>; clocks = <&clks IMX5_CLK_SDMA_GATE>, - <&clks IMX5_CLK_SDMA_GATE>; + <&clks IMX5_CLK_AHB>; clock-names = "ipg", "ahb"; #dma-cells = <3>; fsl,sdma-ram-script-name = "imx/sdma/sdma-imx50.bin"; diff --git a/arch/arm/boot/dts/imx51-apf51.dts b/arch/arm/boot/dts/imx51-apf51.dts index 79d80036f74de643f3c0e210bcb363fd239a9526..1eddf2908b3f2962d46d32245d618f243cb794ef 100644 --- a/arch/arm/boot/dts/imx51-apf51.dts +++ b/arch/arm/boot/dts/imx51-apf51.dts @@ -22,6 +22,7 @@ compatible = "armadeus,imx51-apf51", "fsl,imx51"; memory@90000000 { + device_type = "memory"; reg = <0x90000000 0x20000000>; }; diff --git a/arch/arm/boot/dts/imx51-babbage.dts b/arch/arm/boot/dts/imx51-babbage.dts index ba60b0cb3cc13ba08c9c414d7b8c317cc936ad7e..99191466a80852fc9e92386c34d4f9ea15337cbf 100644 --- a/arch/arm/boot/dts/imx51-babbage.dts +++ b/arch/arm/boot/dts/imx51-babbage.dts @@ -15,6 +15,7 @@ }; memory@90000000 { + device_type = "memory"; reg = <0x90000000 0x20000000>; }; diff --git a/arch/arm/boot/dts/imx51-digi-connectcore-som.dtsi b/arch/arm/boot/dts/imx51-digi-connectcore-som.dtsi index 5761a66e8a0d3736f9c4776690174883bdb2dd14..82d8df097ef1fbb6296640fe1631bd90c604db92 100644 --- a/arch/arm/boot/dts/imx51-digi-connectcore-som.dtsi +++ b/arch/arm/boot/dts/imx51-digi-connectcore-som.dtsi @@ -17,6 +17,7 @@ compatible = "digi,connectcore-ccxmx51-som", "fsl,imx51"; memory@90000000 { + device_type = "memory"; reg = <0x90000000 0x08000000>; }; }; diff --git a/arch/arm/boot/dts/imx51-eukrea-cpuimx51.dtsi b/arch/arm/boot/dts/imx51-eukrea-cpuimx51.dtsi index f8902a338e49a40b27814230d0228fd675e62649..2e3125391bc49a78e3a3be67fa567cd3c9705672 100644 --- a/arch/arm/boot/dts/imx51-eukrea-cpuimx51.dtsi +++ b/arch/arm/boot/dts/imx51-eukrea-cpuimx51.dtsi @@ -23,6 +23,7 @@ compatible = "eukrea,cpuimx51", "fsl,imx51"; memory@90000000 { + device_type = "memory"; reg = <0x90000000 0x10000000>; /* 256M */ }; }; diff --git a/arch/arm/boot/dts/imx51-ts4800.dts b/arch/arm/boot/dts/imx51-ts4800.dts index 39eb067904c3ded221952cc1770f397d90cd019d..4344632f794002e054c86f40121d7059347f70e0 100644 --- a/arch/arm/boot/dts/imx51-ts4800.dts +++ b/arch/arm/boot/dts/imx51-ts4800.dts @@ -18,6 +18,7 @@ }; memory@90000000 { + device_type = "memory"; reg = <0x90000000 0x10000000>; }; diff --git a/arch/arm/boot/dts/imx51-zii-rdu1.dts b/arch/arm/boot/dts/imx51-zii-rdu1.dts index 469cce2c03573b5f32d6534ef753b34b4547cd5d..9235fd45a824ec1b552a8a6f7aacb30b91b11330 100644 --- a/arch/arm/boot/dts/imx51-zii-rdu1.dts +++ b/arch/arm/boot/dts/imx51-zii-rdu1.dts @@ -53,6 +53,7 @@ /* Will be filled by the bootloader */ memory@90000000 { + device_type = "memory"; reg = <0x90000000 0>; }; @@ -477,6 +478,15 @@ }; &gpio1 { + gpio-line-names = "", "", "", "", + "", "", "", "", + "", "hp-amp-shutdown-b", "", "", + "", "", "", "", + "", "", "", "", + "", "", "", "", + "", "", "", "", + "", "", "", ""; + unused-sd3-wp-gpio { /* * See pinctrl_esdhc1 below for more details on this @@ -501,14 +511,11 @@ hpa1: amp@60 { compatible = "ti,tpa6130a2"; reg = <0x60>; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_ampgpio>; - power-gpio = <&gpio1 9 GPIO_ACTIVE_HIGH>; Vdd-supply = <®_3p3v>; }; ds1341: rtc@68 { - compatible = "maxim,ds1341"; + compatible = "dallas,ds1341"; reg = <0x68>; }; @@ -677,7 +684,10 @@ }; &iomuxc { - pinctrl_ampgpio: ampgpiogrp { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_hog>; + + pinctrl_hog: hoggrp { fsl,pins = < MX51_PAD_GPIO1_9__GPIO1_9 0x5e >; diff --git a/arch/arm/boot/dts/imx51-zii-scu2-mezz.dts b/arch/arm/boot/dts/imx51-zii-scu2-mezz.dts index 26cf08549df401e61325ceb017fea2a7848cefa8..f5b2d768fe47f45f27f0f193f93cc1722eed6bdc 100644 --- a/arch/arm/boot/dts/imx51-zii-scu2-mezz.dts +++ b/arch/arm/boot/dts/imx51-zii-scu2-mezz.dts @@ -18,6 +18,7 @@ /* Will be filled by the bootloader */ memory@90000000 { + device_type = "memory"; reg = <0x90000000 0>; }; diff --git a/arch/arm/boot/dts/imx51-zii-scu3-esb.dts b/arch/arm/boot/dts/imx51-zii-scu3-esb.dts index e6ebac8f43e4fbfe64a18d8254ebe80d374ebf7a..ad90d66ccca6cbcdb1425bf721bf43c794412284 100644 --- a/arch/arm/boot/dts/imx51-zii-scu3-esb.dts +++ b/arch/arm/boot/dts/imx51-zii-scu3-esb.dts @@ -18,6 +18,7 @@ /* Will be filled by the bootloader */ memory@90000000 { + device_type = "memory"; reg = <0x90000000 0>; }; diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi index 5c4ba91e43ba4b2e797d3d34dea32093f9a1f0e6..81f60c96a2e410f2dddce0557aa2c6229db0f8b9 100644 --- a/arch/arm/boot/dts/imx51.dtsi +++ b/arch/arm/boot/dts/imx51.dtsi @@ -16,10 +16,8 @@ * The decompressor and also some bootloaders rely on a * pre-existing /chosen node to be available to insert the * command line and merge other ATAGS info. - * Also for U-Boot there must be a pre-existing /memory node. */ chosen {}; - memory { device_type = "memory"; }; aliases { ethernet0 = &fec; @@ -481,7 +479,7 @@ reg = <0x83fb0000 0x4000>; interrupts = <6>; clocks = <&clks IMX5_CLK_SDMA_GATE>, - <&clks IMX5_CLK_SDMA_GATE>; + <&clks IMX5_CLK_AHB>; clock-names = "ipg", "ahb"; #dma-cells = <3>; fsl,sdma-ram-script-name = "imx/sdma/sdma-imx51.bin"; diff --git a/arch/arm/boot/dts/imx53-ard.dts b/arch/arm/boot/dts/imx53-ard.dts index 117bd002dd1d11bb942ec811ed9dbad4339dd5fa..7d5a48250f86732d1bc0393eda1b0f89f55ced90 100644 --- a/arch/arm/boot/dts/imx53-ard.dts +++ b/arch/arm/boot/dts/imx53-ard.dts @@ -19,6 +19,7 @@ compatible = "fsl,imx53-ard", "fsl,imx53"; memory@70000000 { + device_type = "memory"; reg = <0x70000000 0x40000000>; }; diff --git a/arch/arm/boot/dts/imx53-cx9020.dts b/arch/arm/boot/dts/imx53-cx9020.dts index cf70ebc4399a21bf5b4074aca4eea99b71c6e250..c875e23ee45fb936e025abbac515e30d6bc0eae7 100644 --- a/arch/arm/boot/dts/imx53-cx9020.dts +++ b/arch/arm/boot/dts/imx53-cx9020.dts @@ -22,6 +22,7 @@ }; memory@70000000 { + device_type = "memory"; reg = <0x70000000 0x20000000>, <0xb0000000 0x20000000>; }; diff --git a/arch/arm/boot/dts/imx53-m53.dtsi b/arch/arm/boot/dts/imx53-m53.dtsi index ce45f08e30514b4a61f1c336b0964071bc40aaa6..db2e5bce9b6a1dad7eb3b58520868ddf1099fcc0 100644 --- a/arch/arm/boot/dts/imx53-m53.dtsi +++ b/arch/arm/boot/dts/imx53-m53.dtsi @@ -16,6 +16,7 @@ compatible = "aries,imx53-m53", "denx,imx53-m53", "fsl,imx53"; memory@70000000 { + device_type = "memory"; reg = <0x70000000 0x20000000>, <0xb0000000 0x20000000>; }; diff --git a/arch/arm/boot/dts/imx53-ppd.dts b/arch/arm/boot/dts/imx53-ppd.dts index cdb90bee7b4a2b3d4d68681cc68c0c0efea7efae..f202396e3f2a8cb0920bc6897101f958b1c71ab8 100644 --- a/arch/arm/boot/dts/imx53-ppd.dts +++ b/arch/arm/boot/dts/imx53-ppd.dts @@ -55,7 +55,7 @@ }; chosen { - stdout-path = "&uart1:115200n8"; + stdout-path = "serial0:115200n8"; }; memory@70000000 { diff --git a/arch/arm/boot/dts/imx53-qsb-common.dtsi b/arch/arm/boot/dts/imx53-qsb-common.dtsi index 50dde84b72ed762ea87e4f21ed5aa260a548bd1b..f00dda334976abf53e37b367ab76938bdc06de4c 100644 --- a/arch/arm/boot/dts/imx53-qsb-common.dtsi +++ b/arch/arm/boot/dts/imx53-qsb-common.dtsi @@ -11,6 +11,7 @@ }; memory@70000000 { + device_type = "memory"; reg = <0x70000000 0x20000000>, <0xb0000000 0x20000000>; }; diff --git a/arch/arm/boot/dts/imx53-smd.dts b/arch/arm/boot/dts/imx53-smd.dts index 462071c9ddd7399a1234347d2f5db8b69e8a1292..09071ca11c6cfa1d0ef5adbed75289847d00caab 100644 --- a/arch/arm/boot/dts/imx53-smd.dts +++ b/arch/arm/boot/dts/imx53-smd.dts @@ -12,6 +12,7 @@ compatible = "fsl,imx53-smd", "fsl,imx53"; memory@70000000 { + device_type = "memory"; reg = <0x70000000 0x40000000>; }; diff --git a/arch/arm/boot/dts/imx53-tqma53.dtsi b/arch/arm/boot/dts/imx53-tqma53.dtsi index a72b8981fc3bd800d047596ca8732160a003861b..c77d58f06c94924e86a1edc99c6b090f53313fc4 100644 --- a/arch/arm/boot/dts/imx53-tqma53.dtsi +++ b/arch/arm/boot/dts/imx53-tqma53.dtsi @@ -17,6 +17,7 @@ compatible = "tq,tqma53", "fsl,imx53"; memory@70000000 { + device_type = "memory"; reg = <0x70000000 0x40000000>; /* Up to 1GiB */ }; diff --git a/arch/arm/boot/dts/imx53-tx53.dtsi b/arch/arm/boot/dts/imx53-tx53.dtsi index 54cf3e67069a9c7771d24b93e47a9368f6a6053f..4ab135906949f274ccfa592fa7aabaaa5ab8cc81 100644 --- a/arch/arm/boot/dts/imx53-tx53.dtsi +++ b/arch/arm/boot/dts/imx53-tx53.dtsi @@ -51,6 +51,7 @@ /* Will be filled by the bootloader */ memory@70000000 { + device_type = "memory"; reg = <0x70000000 0>; }; diff --git a/arch/arm/boot/dts/imx53-usbarmory.dts b/arch/arm/boot/dts/imx53-usbarmory.dts index f6268d0ded2966fc173674e147e40f6d54937c8c..ee6263d1c2d3ddf4020a769a662fbb2abe726699 100644 --- a/arch/arm/boot/dts/imx53-usbarmory.dts +++ b/arch/arm/boot/dts/imx53-usbarmory.dts @@ -58,6 +58,7 @@ }; memory@70000000 { + device_type = "memory"; reg = <0x70000000 0x20000000>; }; diff --git a/arch/arm/boot/dts/imx53-voipac-dmm-668.dtsi b/arch/arm/boot/dts/imx53-voipac-dmm-668.dtsi index f83a8c62ea531d144d2f7a51d5d02503714a42f1..d595034f3f1bfee6258a31d84681b00d647df025 100644 --- a/arch/arm/boot/dts/imx53-voipac-dmm-668.dtsi +++ b/arch/arm/boot/dts/imx53-voipac-dmm-668.dtsi @@ -17,12 +17,8 @@ memory@70000000 { device_type = "memory"; - reg = <0x70000000 0x20000000>; - }; - - memory@b0000000 { - device_type = "memory"; - reg = <0xb0000000 0x20000000>; + reg = <0x70000000 0x20000000>, + <0xb0000000 0x20000000>; }; regulators { diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi index 6386185ae23403089e4cb9a5a7e6c559d5637750..8accbe16b7584c0a266ca721f5a041be6a89f9d6 100644 --- a/arch/arm/boot/dts/imx53.dtsi +++ b/arch/arm/boot/dts/imx53.dtsi @@ -23,10 +23,8 @@ * The decompressor and also some bootloaders rely on a * pre-existing /chosen node to be available to insert the * command line and merge other ATAGS info. - * Also for U-Boot there must be a pre-existing /memory node. */ chosen {}; - memory { device_type = "memory"; }; aliases { ethernet0 = &fec; @@ -701,7 +699,7 @@ reg = <0x63fb0000 0x4000>; interrupts = <6>; clocks = <&clks IMX5_CLK_SDMA_GATE>, - <&clks IMX5_CLK_SDMA_GATE>; + <&clks IMX5_CLK_AHB>; clock-names = "ipg", "ahb"; #dma-cells = <3>; fsl,sdma-ram-script-name = "imx/sdma/sdma-imx53.bin"; diff --git a/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi b/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi index acc3b11fba2a77e440a1afae95eae28b3438a650..cde3025d96033122ccf82919bbc05a0532171d29 100644 --- a/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi +++ b/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi @@ -298,7 +298,7 @@ pinctrl-2 = <&pinctrl_usdhc3_200mhz>; vmcc-supply = <®_sd3_vmmc>; cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>; - bus-witdh = <4>; + bus-width = <4>; no-1-8-v; status = "okay"; }; @@ -309,7 +309,7 @@ pinctrl-1 = <&pinctrl_usdhc4_100mhz>; pinctrl-2 = <&pinctrl_usdhc4_200mhz>; vmcc-supply = <®_sd4_vmmc>; - bus-witdh = <8>; + bus-width = <8>; no-1-8-v; non-removable; status = "okay"; diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi index ed1aafd569735a4dcd6e308767158ae809c27442..fe4e89d773f54aa1788c1e5aeb776bb805f05c9d 100644 --- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi +++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi @@ -89,6 +89,7 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_enet>; phy-mode = "rgmii"; + phy-reset-duration = <10>; /* in msecs */ phy-reset-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>; phy-supply = <&vdd_eth_io_reg>; status = "disabled"; diff --git a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi index 9f11f1fcc3e6caac35c38c5b81038b3a1a152078..9d086a3b5ffc0ca0ebbe4de55b919794ee3967c8 100644 --- a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi +++ b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi @@ -177,6 +177,8 @@ accelerometer@1c { compatible = "fsl,mma8451"; reg = <0x1c>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_mma8451_int>; interrupt-parent = <&gpio6>; interrupts = <31 IRQ_TYPE_LEVEL_LOW>; }; @@ -522,6 +524,12 @@ >; }; + pinctrl_mma8451_int: mma8451intgrp { + fsl,pins = < + MX6QDL_PAD_EIM_BCLK__GPIO6_IO31 0xb0b1 + >; + }; + pinctrl_pwm3: pwm1grp { fsl,pins = < MX6QDL_PAD_SD4_DAT1__PWM3_OUT 0x1b0b1 diff --git a/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi b/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi index 7fff3717cf7c09ae29d130756b3f33856d7ef819..315d0e7615f335efd3e7e120ce2360b149de7acf 100644 --- a/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi +++ b/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi @@ -609,13 +609,14 @@ }; touchscreen@2a { - compatible = "eeti,egalax_ts"; + compatible = "eeti,exc3000"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_ts>; reg = <0x2a>; interrupt-parent = <&gpio1>; interrupts = <8 IRQ_TYPE_LEVEL_LOW>; - wakeup-gpios = <&gpio1 8 GPIO_ACTIVE_LOW>; + touchscreen-inverted-x; + touchscreen-swapped-x-y; status = "disabled"; }; diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi index 61d2d26afbf4d90474dcae54930bb036ba13412b..00d44a60972f7afa122be1079593227ae39fcc21 100644 --- a/arch/arm/boot/dts/imx6qdl.dtsi +++ b/arch/arm/boot/dts/imx6qdl.dtsi @@ -905,7 +905,7 @@ compatible = "fsl,imx6q-sdma", "fsl,imx35-sdma"; reg = <0x020ec000 0x4000>; interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&clks IMX6QDL_CLK_SDMA>, + clocks = <&clks IMX6QDL_CLK_IPG>, <&clks IMX6QDL_CLK_SDMA>; clock-names = "ipg", "ahb"; #dma-cells = <3>; diff --git a/arch/arm/boot/dts/imx6sl-evk.dts b/arch/arm/boot/dts/imx6sl-evk.dts index 679b4482ab13aca4bcaa0506f73b3c3a04233134..f7a48e4622e1bc85a7984312173ad3f3abd7d69d 100644 --- a/arch/arm/boot/dts/imx6sl-evk.dts +++ b/arch/arm/boot/dts/imx6sl-evk.dts @@ -17,6 +17,7 @@ }; memory@80000000 { + device_type = "memory"; reg = <0x80000000 0x40000000>; }; diff --git a/arch/arm/boot/dts/imx6sl-warp.dts b/arch/arm/boot/dts/imx6sl-warp.dts index 404e602e67813f05c043f6414601937c23a4573b..408da704c459b5d4d7d5e9ef90fdadda17b63162 100644 --- a/arch/arm/boot/dts/imx6sl-warp.dts +++ b/arch/arm/boot/dts/imx6sl-warp.dts @@ -55,6 +55,7 @@ compatible = "warp,imx6sl-warp", "fsl,imx6sl"; memory@80000000 { + device_type = "memory"; reg = <0x80000000 0x20000000>; }; diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi index 7a4f5dace9026b0075507f46107c95bba1ab6397..55d1872aa81a8aa7f67aab41987dc629d0e18187 100644 --- a/arch/arm/boot/dts/imx6sl.dtsi +++ b/arch/arm/boot/dts/imx6sl.dtsi @@ -13,10 +13,8 @@ * The decompressor and also some bootloaders rely on a * pre-existing /chosen node to be available to insert the * command line and merge other ATAGS info. - * Also for U-Boot there must be a pre-existing /memory node. */ chosen {}; - memory { device_type = "memory"; }; aliases { ethernet0 = &fec; @@ -739,7 +737,7 @@ reg = <0x020ec000 0x4000>; interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clks IMX6SL_CLK_SDMA>, - <&clks IMX6SL_CLK_SDMA>; + <&clks IMX6SL_CLK_AHB>; clock-names = "ipg", "ahb"; #dma-cells = <3>; /* imx6sl reuses imx6q sdma firmware */ diff --git a/arch/arm/boot/dts/imx6sll-evk.dts b/arch/arm/boot/dts/imx6sll-evk.dts index c8e115564ba2c85df585ee772d923f52230a2481..0c2406ac8a638dd2cadd919238315e6f96ae3179 100644 --- a/arch/arm/boot/dts/imx6sll-evk.dts +++ b/arch/arm/boot/dts/imx6sll-evk.dts @@ -20,6 +20,7 @@ }; memory@80000000 { + device_type = "memory"; reg = <0x80000000 0x80000000>; }; diff --git a/arch/arm/boot/dts/imx6sll.dtsi b/arch/arm/boot/dts/imx6sll.dtsi index 000e6136a9d6c1bfbe2cd825b5a1d19cc9ee07a6..7c7d5c47578e201a86d8895c0b1bde5d43411f79 100644 --- a/arch/arm/boot/dts/imx6sll.dtsi +++ b/arch/arm/boot/dts/imx6sll.dtsi @@ -591,7 +591,7 @@ compatible = "fsl,imx6sll-sdma", "fsl,imx35-sdma"; reg = <0x020ec000 0x4000>; interrupts = ; - clocks = <&clks IMX6SLL_CLK_SDMA>, + clocks = <&clks IMX6SLL_CLK_IPG>, <&clks IMX6SLL_CLK_SDMA>; clock-names = "ipg", "ahb"; #dma-cells = <3>; @@ -709,7 +709,7 @@ i2c1: i2c@21a0000 { #address-cells = <1>; #size-cells = <0>; - compatible = "fs,imx6sll-i2c", "fsl,imx21-i2c"; + compatible = "fsl,imx6sll-i2c", "fsl,imx21-i2c"; reg = <0x021a0000 0x4000>; interrupts = ; clocks = <&clks IMX6SLL_CLK_I2C1>; diff --git a/arch/arm/boot/dts/imx6sx-nitrogen6sx.dts b/arch/arm/boot/dts/imx6sx-nitrogen6sx.dts index adb5cc7d8ce2fc3feb189a525ca9061d59b5e56e..832b5c5d7441a64a79e685f4f51ff4f83e6a39a2 100644 --- a/arch/arm/boot/dts/imx6sx-nitrogen6sx.dts +++ b/arch/arm/boot/dts/imx6sx-nitrogen6sx.dts @@ -12,6 +12,7 @@ compatible = "boundary,imx6sx-nitrogen6sx", "fsl,imx6sx"; memory@80000000 { + device_type = "memory"; reg = <0x80000000 0x40000000>; }; diff --git a/arch/arm/boot/dts/imx6sx-sabreauto.dts b/arch/arm/boot/dts/imx6sx-sabreauto.dts index 841a27f3198ff88b4f58e9fd3286ce080ad3117c..48aede543612b76b892fa3f8356287ededdd3f26 100644 --- a/arch/arm/boot/dts/imx6sx-sabreauto.dts +++ b/arch/arm/boot/dts/imx6sx-sabreauto.dts @@ -11,6 +11,7 @@ compatible = "fsl,imx6sx-sabreauto", "fsl,imx6sx"; memory@80000000 { + device_type = "memory"; reg = <0x80000000 0x80000000>; }; diff --git a/arch/arm/boot/dts/imx6sx-sdb.dtsi b/arch/arm/boot/dts/imx6sx-sdb.dtsi index f8f31872fa144d83b29c227a921125c482ebc4bf..91f809ed1370a6220ac881b6aec386b80dc0dde3 100644 --- a/arch/arm/boot/dts/imx6sx-sdb.dtsi +++ b/arch/arm/boot/dts/imx6sx-sdb.dtsi @@ -21,6 +21,7 @@ }; memory@80000000 { + device_type = "memory"; reg = <0x80000000 0x40000000>; }; @@ -115,7 +116,9 @@ regulator-name = "enet_3v3"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; - gpios = <&gpio2 6 GPIO_ACTIVE_LOW>; + gpio = <&gpio2 6 GPIO_ACTIVE_LOW>; + regulator-boot-on; + regulator-always-on; }; reg_pcie_gpio: regulator-pcie-gpio { @@ -178,6 +181,7 @@ phy-supply = <®_enet_3v3>; phy-mode = "rgmii"; phy-handle = <ðphy1>; + phy-reset-gpios = <&gpio2 7 GPIO_ACTIVE_LOW>; status = "okay"; mdio { @@ -371,6 +375,8 @@ MX6SX_PAD_RGMII1_RD3__ENET1_RX_DATA_3 0x3081 MX6SX_PAD_RGMII1_RX_CTL__ENET1_RX_EN 0x3081 MX6SX_PAD_ENET2_RX_CLK__ENET2_REF_CLK_25M 0x91 + /* phy reset */ + MX6SX_PAD_ENET2_CRS__GPIO2_IO_7 0x10b0 >; }; diff --git a/arch/arm/boot/dts/imx6sx-softing-vining-2000.dts b/arch/arm/boot/dts/imx6sx-softing-vining-2000.dts index 252175b592475743b77a765607534f9cffbe803c..2bc51623a8060d08042c3e8cf601ec65e937e35e 100644 --- a/arch/arm/boot/dts/imx6sx-softing-vining-2000.dts +++ b/arch/arm/boot/dts/imx6sx-softing-vining-2000.dts @@ -21,6 +21,7 @@ }; memory@80000000 { + device_type = "memory"; reg = <0x80000000 0x40000000>; }; diff --git a/arch/arm/boot/dts/imx6sx-udoo-neo-basic.dts b/arch/arm/boot/dts/imx6sx-udoo-neo-basic.dts index 40ccdf43dffc56109dedb9cbca391c2a7ca2c711..db0feb9b9f5d7ef3dfce9181566287e14d6a1b75 100644 --- a/arch/arm/boot/dts/imx6sx-udoo-neo-basic.dts +++ b/arch/arm/boot/dts/imx6sx-udoo-neo-basic.dts @@ -49,6 +49,7 @@ compatible = "udoo,neobasic", "fsl,imx6sx"; memory@80000000 { + device_type = "memory"; reg = <0x80000000 0x20000000>; }; }; diff --git a/arch/arm/boot/dts/imx6sx-udoo-neo-extended.dts b/arch/arm/boot/dts/imx6sx-udoo-neo-extended.dts index 42bfc8f8f7f6b56eae4071ee48ef11a797d324d3..5c7a2bb9141cba474375434f1f6fa84bd7a047e5 100644 --- a/arch/arm/boot/dts/imx6sx-udoo-neo-extended.dts +++ b/arch/arm/boot/dts/imx6sx-udoo-neo-extended.dts @@ -49,6 +49,7 @@ compatible = "udoo,neoextended", "fsl,imx6sx"; memory@80000000 { + device_type = "memory"; reg = <0x80000000 0x40000000>; }; }; diff --git a/arch/arm/boot/dts/imx6sx-udoo-neo-full.dts b/arch/arm/boot/dts/imx6sx-udoo-neo-full.dts index c84c877f09d499fff1665d7005c3845207c18e79..13dfe2afaba563ce5648a78d6756a0300370133e 100644 --- a/arch/arm/boot/dts/imx6sx-udoo-neo-full.dts +++ b/arch/arm/boot/dts/imx6sx-udoo-neo-full.dts @@ -49,6 +49,7 @@ compatible = "udoo,neofull", "fsl,imx6sx"; memory@80000000 { + device_type = "memory"; reg = <0x80000000 0x40000000>; }; }; diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi index 844caa39364ffeaabc18093bcf3cf87d6b4c7c32..ae0728df542e930b8ab3abfaf2185d9a8875f799 100644 --- a/arch/arm/boot/dts/imx6sx.dtsi +++ b/arch/arm/boot/dts/imx6sx.dtsi @@ -15,10 +15,8 @@ * The decompressor and also some bootloaders rely on a * pre-existing /chosen node to be available to insert the * command line and merge other ATAGS info. - * Also for U-Boot there must be a pre-existing /memory node. */ chosen {}; - memory { device_type = "memory"; }; aliases { can0 = &flexcan1; @@ -462,7 +460,7 @@ }; gpt: gpt@2098000 { - compatible = "fsl,imx6sx-gpt", "fsl,imx31-gpt"; + compatible = "fsl,imx6sx-gpt", "fsl,imx6dl-gpt"; reg = <0x02098000 0x4000>; interrupts = ; clocks = <&clks IMX6SX_CLK_GPT_BUS>, @@ -803,7 +801,7 @@ compatible = "fsl,imx6sx-sdma", "fsl,imx6q-sdma"; reg = <0x020ec000 0x4000>; interrupts = ; - clocks = <&clks IMX6SX_CLK_SDMA>, + clocks = <&clks IMX6SX_CLK_IPG>, <&clks IMX6SX_CLK_SDMA>; clock-names = "ipg", "ahb"; #dma-cells = <3>; diff --git a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi index 32a07232c0345ae3a275e50dac3d095fe9acd017..6953034350034ab6dc285ae6976256cb73d9fbfa 100644 --- a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi +++ b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi @@ -12,6 +12,7 @@ }; memory@80000000 { + device_type = "memory"; reg = <0x80000000 0x20000000>; }; @@ -174,7 +175,7 @@ flash0: n25q256a@0 { #address-cells = <1>; #size-cells = <1>; - compatible = "micron,n25q256a"; + compatible = "micron,n25q256a", "jedec,spi-nor"; spi-max-frequency = <29000000>; reg = <0>; }; diff --git a/arch/arm/boot/dts/imx6ul-geam.dts b/arch/arm/boot/dts/imx6ul-geam.dts index d81d20f8fc8ddae18938210eddd846d354fea800..85cfad080f15c4c791bae9ffb7334aa01a2ac7f2 100644 --- a/arch/arm/boot/dts/imx6ul-geam.dts +++ b/arch/arm/boot/dts/imx6ul-geam.dts @@ -51,6 +51,7 @@ compatible = "engicam,imx6ul-geam", "fsl,imx6ul"; memory@80000000 { + device_type = "memory"; reg = <0x80000000 0x08000000>; }; diff --git a/arch/arm/boot/dts/imx6ul-isiot.dtsi b/arch/arm/boot/dts/imx6ul-isiot.dtsi index cd99285511544cc7862dd27d1da71ecf28de1a0c..1cb52744f58adeebc476571f6e211cc25b633d21 100644 --- a/arch/arm/boot/dts/imx6ul-isiot.dtsi +++ b/arch/arm/boot/dts/imx6ul-isiot.dtsi @@ -46,6 +46,7 @@ / { memory@80000000 { + device_type = "memory"; reg = <0x80000000 0x20000000>; }; diff --git a/arch/arm/boot/dts/imx6ul-litesom.dtsi b/arch/arm/boot/dts/imx6ul-litesom.dtsi index 8f775f6974d1ceb01b426b1cd80edfad5f707b80..8d6893210842bed98518f7fd862f1bcb2d23c863 100644 --- a/arch/arm/boot/dts/imx6ul-litesom.dtsi +++ b/arch/arm/boot/dts/imx6ul-litesom.dtsi @@ -48,6 +48,7 @@ compatible = "grinn,imx6ul-litesom", "fsl,imx6ul"; memory@80000000 { + device_type = "memory"; reg = <0x80000000 0x20000000>; }; }; diff --git a/arch/arm/boot/dts/imx6ul-opos6ul.dtsi b/arch/arm/boot/dts/imx6ul-opos6ul.dtsi index a031bee311df4e7134c5715f2f4754291a372f9b..cf7faf4b9c47ef76f5c537bb1e6e894e4122b4fe 100644 --- a/arch/arm/boot/dts/imx6ul-opos6ul.dtsi +++ b/arch/arm/boot/dts/imx6ul-opos6ul.dtsi @@ -49,6 +49,7 @@ / { memory@80000000 { + device_type = "memory"; reg = <0x80000000 0>; /* will be filled by U-Boot */ }; diff --git a/arch/arm/boot/dts/imx6ul-pico-hobbit.dts b/arch/arm/boot/dts/imx6ul-pico-hobbit.dts index 0c09420f995125d49bba630931185a179a40020d..797262d2f27fdbdd1f3098282ebafd5a8615cf2b 100644 --- a/arch/arm/boot/dts/imx6ul-pico-hobbit.dts +++ b/arch/arm/boot/dts/imx6ul-pico-hobbit.dts @@ -53,6 +53,7 @@ /* Will be filled by the bootloader */ memory@80000000 { + device_type = "memory"; reg = <0x80000000 0>; }; diff --git a/arch/arm/boot/dts/imx6ul-tx6ul.dtsi b/arch/arm/boot/dts/imx6ul-tx6ul.dtsi index 02b5ba42cd5911cdc95f61987d0641aed6e37039..bb6dbfd5546b4b178cc31626b6d66ef42417e107 100644 --- a/arch/arm/boot/dts/imx6ul-tx6ul.dtsi +++ b/arch/arm/boot/dts/imx6ul-tx6ul.dtsi @@ -71,6 +71,7 @@ }; memory@80000000 { + device_type = "memory"; reg = <0x80000000 0>; /* will be filled by U-Boot */ }; diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi index 6dc0b569acdf4c803cf0b13667788716028dd159..adecd6e08468cb0e97633261d307658774c72824 100644 --- a/arch/arm/boot/dts/imx6ul.dtsi +++ b/arch/arm/boot/dts/imx6ul.dtsi @@ -15,10 +15,8 @@ * The decompressor and also some bootloaders rely on a * pre-existing /chosen node to be available to insert the * command line and merge other ATAGS info. - * Also for U-Boot there must be a pre-existing /memory node. */ chosen {}; - memory { device_type = "memory"; }; aliases { ethernet0 = &fec1; @@ -89,6 +87,8 @@ "pll1_sys"; arm-supply = <®_arm>; soc-supply = <®_soc>; + nvmem-cells = <&cpu_speed_grade>; + nvmem-cell-names = "speed_grade"; }; }; @@ -359,7 +359,7 @@ pwm1: pwm@2080000 { compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm"; reg = <0x02080000 0x4000>; - interrupts = ; + interrupts = ; clocks = <&clks IMX6UL_CLK_PWM1>, <&clks IMX6UL_CLK_PWM1>; clock-names = "ipg", "per"; @@ -370,7 +370,7 @@ pwm2: pwm@2084000 { compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm"; reg = <0x02084000 0x4000>; - interrupts = ; + interrupts = ; clocks = <&clks IMX6UL_CLK_PWM2>, <&clks IMX6UL_CLK_PWM2>; clock-names = "ipg", "per"; @@ -381,7 +381,7 @@ pwm3: pwm@2088000 { compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm"; reg = <0x02088000 0x4000>; - interrupts = ; + interrupts = ; clocks = <&clks IMX6UL_CLK_PWM3>, <&clks IMX6UL_CLK_PWM3>; clock-names = "ipg", "per"; @@ -392,7 +392,7 @@ pwm4: pwm@208c000 { compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm"; reg = <0x0208c000 0x4000>; - interrupts = ; + interrupts = ; clocks = <&clks IMX6UL_CLK_PWM4>, <&clks IMX6UL_CLK_PWM4>; clock-names = "ipg", "per"; @@ -707,7 +707,7 @@ "fsl,imx35-sdma"; reg = <0x020ec000 0x4000>; interrupts = ; - clocks = <&clks IMX6UL_CLK_SDMA>, + clocks = <&clks IMX6UL_CLK_IPG>, <&clks IMX6UL_CLK_SDMA>; clock-names = "ipg", "ahb"; #dma-cells = <3>; @@ -932,6 +932,10 @@ tempmon_temp_grade: temp-grade@20 { reg = <0x20 4>; }; + + cpu_speed_grade: speed-grade@10 { + reg = <0x10 4>; + }; }; lcdif: lcdif@21c8000 { diff --git a/arch/arm/boot/dts/imx6ull-colibri-nonwifi.dtsi b/arch/arm/boot/dts/imx6ull-colibri-nonwifi.dtsi index 10ab4697950f51e001a8c483e09e9c8cd0240c27..fb213bec465435a80d1ab44f71592cba1aee71ba 100644 --- a/arch/arm/boot/dts/imx6ull-colibri-nonwifi.dtsi +++ b/arch/arm/boot/dts/imx6ull-colibri-nonwifi.dtsi @@ -7,6 +7,7 @@ / { memory@80000000 { + device_type = "memory"; reg = <0x80000000 0x10000000>; }; }; diff --git a/arch/arm/boot/dts/imx6ull-colibri-wifi.dtsi b/arch/arm/boot/dts/imx6ull-colibri-wifi.dtsi index 183193e8580dd12a99f1d14d365fceff892e0853..038d8c90f6dfeafd52cba37b3d5c076bbc67320e 100644 --- a/arch/arm/boot/dts/imx6ull-colibri-wifi.dtsi +++ b/arch/arm/boot/dts/imx6ull-colibri-wifi.dtsi @@ -7,6 +7,7 @@ / { memory@80000000 { + device_type = "memory"; reg = <0x80000000 0x20000000>; }; diff --git a/arch/arm/boot/dts/imx6ull-pinfunc.h b/arch/arm/boot/dts/imx6ull-pinfunc.h index fdc46bb09cc1afe647ac2e931caace08f0e0655b..3c12a6fb0b618aef5a1e47c21fcf249c13f33789 100644 --- a/arch/arm/boot/dts/imx6ull-pinfunc.h +++ b/arch/arm/boot/dts/imx6ull-pinfunc.h @@ -14,14 +14,23 @@ * The pin function ID is a tuple of * */ +/* signals common for i.MX6UL and i.MX6ULL */ +#undef MX6UL_PAD_UART5_TX_DATA__UART5_DTE_RX +#define MX6UL_PAD_UART5_TX_DATA__UART5_DTE_RX 0x00BC 0x0348 0x0644 0x0 0x6 +#undef MX6UL_PAD_UART5_RX_DATA__UART5_DCE_RX +#define MX6UL_PAD_UART5_RX_DATA__UART5_DCE_RX 0x00C0 0x034C 0x0644 0x0 0x7 +#undef MX6UL_PAD_ENET1_RX_EN__UART5_DCE_RTS +#define MX6UL_PAD_ENET1_RX_EN__UART5_DCE_RTS 0x00CC 0x0358 0x0640 0x1 0x5 +#undef MX6UL_PAD_ENET1_TX_DATA0__UART5_DTE_RTS +#define MX6UL_PAD_ENET1_TX_DATA0__UART5_DTE_RTS 0x00D0 0x035C 0x0640 0x1 0x6 +#undef MX6UL_PAD_CSI_DATA02__UART5_DCE_RTS +#define MX6UL_PAD_CSI_DATA02__UART5_DCE_RTS 0x01EC 0x0478 0x0640 0x8 0x7 + +/* signals for i.MX6ULL only */ #define MX6ULL_PAD_UART1_TX_DATA__UART5_DTE_RX 0x0084 0x0310 0x0644 0x9 0x4 #define MX6ULL_PAD_UART1_RX_DATA__UART5_DCE_RX 0x0088 0x0314 0x0644 0x9 0x5 #define MX6ULL_PAD_UART1_CTS_B__UART5_DCE_RTS 0x008C 0x0318 0x0640 0x9 0x3 #define MX6ULL_PAD_UART1_RTS_B__UART5_DTE_RTS 0x0090 0x031C 0x0640 0x9 0x4 -#define MX6ULL_PAD_UART5_TX_DATA__UART5_DTE_RX 0x00BC 0x0348 0x0644 0x0 0x6 -#define MX6ULL_PAD_UART5_RX_DATA__UART5_DCE_RX 0x00C0 0x034C 0x0644 0x0 0x7 -#define MX6ULL_PAD_ENET1_RX_EN__UART5_DCE_RTS 0x00CC 0x0358 0x0640 0x1 0x5 -#define MX6ULL_PAD_ENET1_TX_DATA0__UART5_DTE_RTS 0x00D0 0x035C 0x0640 0x1 0x6 #define MX6ULL_PAD_ENET2_RX_DATA0__EPDC_SDDO08 0x00E4 0x0370 0x0000 0x9 0x0 #define MX6ULL_PAD_ENET2_RX_DATA1__EPDC_SDDO09 0x00E8 0x0374 0x0000 0x9 0x0 #define MX6ULL_PAD_ENET2_RX_EN__EPDC_SDDO10 0x00EC 0x0378 0x0000 0x9 0x0 @@ -55,7 +64,6 @@ #define MX6ULL_PAD_CSI_DATA00__ESAI_TX_HF_CLK 0x01E4 0x0470 0x0000 0x9 0x0 #define MX6ULL_PAD_CSI_DATA01__ESAI_RX_HF_CLK 0x01E8 0x0474 0x0000 0x9 0x0 #define MX6ULL_PAD_CSI_DATA02__ESAI_RX_FS 0x01EC 0x0478 0x0000 0x9 0x0 -#define MX6ULL_PAD_CSI_DATA02__UART5_DCE_RTS 0x01EC 0x0478 0x0640 0x8 0x7 #define MX6ULL_PAD_CSI_DATA03__ESAI_RX_CLK 0x01F0 0x047C 0x0000 0x9 0x0 #define MX6ULL_PAD_CSI_DATA04__ESAI_TX_FS 0x01F4 0x0480 0x0000 0x9 0x0 #define MX6ULL_PAD_CSI_DATA05__ESAI_TX_CLK 0x01F8 0x0484 0x0000 0x9 0x0 diff --git a/arch/arm/boot/dts/imx6ull.dtsi b/arch/arm/boot/dts/imx6ull.dtsi index cd1776a7015ac0dc6d10630eb45bc93f816b0aae..796ed35d4ac9ae194cce80414f1eb09fd718ba02 100644 --- a/arch/arm/boot/dts/imx6ull.dtsi +++ b/arch/arm/boot/dts/imx6ull.dtsi @@ -22,7 +22,7 @@ >; fsl,soc-operating-points = < /* KHz uV */ - 900000 1175000 + 900000 1250000 792000 1175000 528000 1175000 396000 1175000 diff --git a/arch/arm/boot/dts/imx7-colibri.dtsi b/arch/arm/boot/dts/imx7-colibri.dtsi index 895fbde4d4333a3d5f37397feac66163911e4833..c1ed83131b495b7f2811c03f783a086ec48c7800 100644 --- a/arch/arm/boot/dts/imx7-colibri.dtsi +++ b/arch/arm/boot/dts/imx7-colibri.dtsi @@ -323,6 +323,7 @@ vmmc-supply = <®_module_3v3>; vqmmc-supply = <®_DCDC3>; non-removable; + sdhci-caps-mask = <0x80000000 0x0>; }; &iomuxc { diff --git a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts index 8bf365d28cacfd3731648922e705b011cb81b7b1..62d5e9a4a7818cf6648e5378d32d20eeaf6fed54 100644 --- a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts +++ b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts @@ -19,6 +19,7 @@ compatible = "compulab,cl-som-imx7", "fsl,imx7d"; memory@80000000 { + device_type = "memory"; reg = <0x80000000 0x10000000>; /* 256 MB - minimal configuration */ }; @@ -43,7 +44,7 @@ <&clks IMX7D_ENET1_TIME_ROOT_CLK>; assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>; assigned-clock-rates = <0>, <100000000>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-handle = <ðphy0>; fsl,magic-packet; status = "okay"; @@ -69,7 +70,7 @@ <&clks IMX7D_ENET2_TIME_ROOT_CLK>; assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>; assigned-clock-rates = <0>, <100000000>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-handle = <ðphy1>; fsl,magic-packet; status = "okay"; @@ -284,4 +285,4 @@ MX7D_PAD_LPSR_GPIO1_IO05__GPIO1_IO5 0x14 /* OTG PWREN */ >; }; -}; \ No newline at end of file +}; diff --git a/arch/arm/boot/dts/imx7d-colibri-emmc.dtsi b/arch/arm/boot/dts/imx7d-colibri-emmc.dtsi index 04d24ee17b142f498fe781070f729cf8ad44490c..898f4b8d7421fbe50f6c58c00cdfd644c25a2f12 100644 --- a/arch/arm/boot/dts/imx7d-colibri-emmc.dtsi +++ b/arch/arm/boot/dts/imx7d-colibri-emmc.dtsi @@ -8,6 +8,7 @@ / { memory@80000000 { + device_type = "memory"; reg = <0x80000000 0x40000000>; }; }; diff --git a/arch/arm/boot/dts/imx7d-colibri.dtsi b/arch/arm/boot/dts/imx7d-colibri.dtsi index d9f8fb69511b6bd98e6aba5c047b833c1d276e77..e2e327f437e3569058378c8279d5be4faa22b587 100644 --- a/arch/arm/boot/dts/imx7d-colibri.dtsi +++ b/arch/arm/boot/dts/imx7d-colibri.dtsi @@ -45,6 +45,7 @@ / { memory@80000000 { + device_type = "memory"; reg = <0x80000000 0x20000000>; }; }; diff --git a/arch/arm/boot/dts/imx7d-nitrogen7.dts b/arch/arm/boot/dts/imx7d-nitrogen7.dts index d8aac4a2d02a2489d1843e2d22f2f4cb317eb481..6b4acea1ef7955d4c07a68e7b3ea24014e05dcb8 100644 --- a/arch/arm/boot/dts/imx7d-nitrogen7.dts +++ b/arch/arm/boot/dts/imx7d-nitrogen7.dts @@ -12,6 +12,7 @@ compatible = "boundary,imx7d-nitrogen7", "fsl,imx7d"; memory@80000000 { + device_type = "memory"; reg = <0x80000000 0x40000000>; }; @@ -86,13 +87,17 @@ compatible = "regulator-fixed"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; - clocks = <&clks IMX7D_CLKO2_ROOT_DIV>; - clock-names = "slow"; regulator-name = "reg_wlan"; startup-delay-us = <70000>; gpio = <&gpio4 21 GPIO_ACTIVE_HIGH>; enable-active-high; }; + + usdhc2_pwrseq: usdhc2_pwrseq { + compatible = "mmc-pwrseq-simple"; + clocks = <&clks IMX7D_CLKO2_ROOT_DIV>; + clock-names = "ext_clock"; + }; }; &adc1 { @@ -375,6 +380,7 @@ bus-width = <4>; non-removable; vmmc-supply = <®_wlan>; + mmc-pwrseq = <&usdhc2_pwrseq>; cap-power-off-card; keep-power-in-suspend; status = "okay"; diff --git a/arch/arm/boot/dts/imx7d-pico.dtsi b/arch/arm/boot/dts/imx7d-pico.dtsi index 21973eb55671920148e25e6a6b0e1c469093bc8e..934a019f341e420dabb11785a6cb92c449819cb2 100644 --- a/arch/arm/boot/dts/imx7d-pico.dtsi +++ b/arch/arm/boot/dts/imx7d-pico.dtsi @@ -49,6 +49,7 @@ compatible = "technexion,imx7d-pico", "fsl,imx7d"; memory@80000000 { + device_type = "memory"; reg = <0x80000000 0x80000000>; }; @@ -100,6 +101,19 @@ regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; }; + + usdhc2_pwrseq: usdhc2_pwrseq { + compatible = "mmc-pwrseq-simple"; + clocks = <&clks IMX7D_CLKO2_ROOT_DIV>; + clock-names = "ext_clock"; + }; +}; + +&clks { + assigned-clocks = <&clks IMX7D_CLKO2_ROOT_SRC>, + <&clks IMX7D_CLKO2_ROOT_DIV>; + assigned-clock-parents = <&clks IMX7D_CKIL>; + assigned-clock-rates = <0>, <32768>; }; &i2c4 { @@ -199,12 +213,13 @@ &usdhc2 { /* Wifi SDIO */ pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_usdhc2>; + pinctrl-0 = <&pinctrl_usdhc2 &pinctrl_wifi_clk>; no-1-8-v; non-removable; keep-power-in-suspend; wakeup-source; vmmc-supply = <®_ap6212>; + mmc-pwrseq = <&usdhc2_pwrseq>; status = "okay"; }; @@ -301,6 +316,12 @@ }; &iomuxc_lpsr { + pinctrl_wifi_clk: wificlkgrp { + fsl,pins = < + MX7D_PAD_LPSR_GPIO1_IO03__CCM_CLKO2 0x7d + >; + }; + pinctrl_wdog: wdoggrp { fsl,pins = < MX7D_PAD_LPSR_GPIO1_IO00__WDOG1_WDOG_B 0x74 diff --git a/arch/arm/boot/dts/imx7d-sdb.dts b/arch/arm/boot/dts/imx7d-sdb.dts index c9b3c60b0eb22fe3ac4f8ab05f1a17209e96ee5d..317f1bcc56e2a56cc64f90e57392babc33fc299a 100644 --- a/arch/arm/boot/dts/imx7d-sdb.dts +++ b/arch/arm/boot/dts/imx7d-sdb.dts @@ -15,6 +15,7 @@ }; memory@80000000 { + device_type = "memory"; reg = <0x80000000 0x80000000>; }; diff --git a/arch/arm/boot/dts/imx7s-colibri.dtsi b/arch/arm/boot/dts/imx7s-colibri.dtsi index fe8344cee8641debe183a206e8db0afb78fa8a20..1fb1ec5d3d70717f08026f47e42d9bfee0c81ca8 100644 --- a/arch/arm/boot/dts/imx7s-colibri.dtsi +++ b/arch/arm/boot/dts/imx7s-colibri.dtsi @@ -45,6 +45,7 @@ / { memory@80000000 { + device_type = "memory"; reg = <0x80000000 0x10000000>; }; }; diff --git a/arch/arm/boot/dts/imx7s-warp.dts b/arch/arm/boot/dts/imx7s-warp.dts index fa390da636de761fabe196c9c16e2b41b26ba41d..97d5c711eb0ca7dd6cd886ea2276a35ddade285c 100644 --- a/arch/arm/boot/dts/imx7s-warp.dts +++ b/arch/arm/boot/dts/imx7s-warp.dts @@ -51,6 +51,7 @@ compatible = "warp,imx7s-warp", "fsl,imx7s"; memory@80000000 { + device_type = "memory"; reg = <0x80000000 0x20000000>; }; diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi index a052198f6e9631484a7e6d08907110a427db3dc1..7eaf96b425bed4dc48fd647a598a0ce3923c9906 100644 --- a/arch/arm/boot/dts/imx7s.dtsi +++ b/arch/arm/boot/dts/imx7s.dtsi @@ -17,10 +17,8 @@ * The decompressor and also some bootloaders rely on a * pre-existing /chosen node to be available to insert the * command line and merge other ATAGS info. - * Also for U-Boot there must be a pre-existing /memory node. */ chosen {}; - memory { device_type = "memory"; }; aliases { gpio0 = &gpio1; @@ -443,7 +441,7 @@ compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt"; reg = <0x302d0000 0x10000>; interrupts = ; - clocks = <&clks IMX7D_CLK_DUMMY>, + clocks = <&clks IMX7D_GPT1_ROOT_CLK>, <&clks IMX7D_GPT1_ROOT_CLK>; clock-names = "ipg", "per"; }; @@ -452,7 +450,7 @@ compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt"; reg = <0x302e0000 0x10000>; interrupts = ; - clocks = <&clks IMX7D_CLK_DUMMY>, + clocks = <&clks IMX7D_GPT2_ROOT_CLK>, <&clks IMX7D_GPT2_ROOT_CLK>; clock-names = "ipg", "per"; status = "disabled"; @@ -462,7 +460,7 @@ compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt"; reg = <0x302f0000 0x10000>; interrupts = ; - clocks = <&clks IMX7D_CLK_DUMMY>, + clocks = <&clks IMX7D_GPT3_ROOT_CLK>, <&clks IMX7D_GPT3_ROOT_CLK>; clock-names = "ipg", "per"; status = "disabled"; @@ -472,7 +470,7 @@ compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt"; reg = <0x30300000 0x10000>; interrupts = ; - clocks = <&clks IMX7D_CLK_DUMMY>, + clocks = <&clks IMX7D_GPT4_ROOT_CLK>, <&clks IMX7D_GPT4_ROOT_CLK>; clock-names = "ipg", "per"; status = "disabled"; @@ -1050,8 +1048,8 @@ compatible = "fsl,imx7d-sdma", "fsl,imx35-sdma"; reg = <0x30bd0000 0x10000>; interrupts = ; - clocks = <&clks IMX7D_SDMA_CORE_CLK>, - <&clks IMX7D_AHB_CHANNEL_ROOT_CLK>; + clocks = <&clks IMX7D_IPG_ROOT_CLK>, + <&clks IMX7D_SDMA_CORE_CLK>; clock-names = "ipg", "ahb"; #dma-cells = <3>; fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin"; diff --git a/arch/arm/boot/dts/keystone-k2g.dtsi b/arch/arm/boot/dts/keystone-k2g.dtsi index 738b44cf2b0bbdd5eb398af5fff2d6eb60095b53..1c833105d6c5495f6ec2116d888f82b44dce4371 100644 --- a/arch/arm/boot/dts/keystone-k2g.dtsi +++ b/arch/arm/boot/dts/keystone-k2g.dtsi @@ -416,7 +416,7 @@ clock-names = "fck", "mmchsdb_fck"; }; - qspi: qspi@2940000 { + qspi: spi@2940000 { compatible = "ti,k2g-qspi", "cdns,qspi-nor"; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm/boot/dts/kirkwood-dnskw.dtsi b/arch/arm/boot/dts/kirkwood-dnskw.dtsi index cbaf06f2f78e25abf993dead9ecebba23b8b2b2b..eb917462b219b996ba7ca3a7680a8f36347d225a 100644 --- a/arch/arm/boot/dts/kirkwood-dnskw.dtsi +++ b/arch/arm/boot/dts/kirkwood-dnskw.dtsi @@ -36,8 +36,8 @@ compatible = "gpio-fan"; pinctrl-0 = <&pmx_fan_high_speed &pmx_fan_low_speed>; pinctrl-names = "default"; - gpios = <&gpio1 14 GPIO_ACTIVE_LOW - &gpio1 13 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 14 GPIO_ACTIVE_HIGH + &gpio1 13 GPIO_ACTIVE_HIGH>; gpio-fan,speed-map = <0 0 3000 1 6000 2>; diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi index ac343330d0c83f203526ba458f6ddad1a5357b0d..98b682a8080cc334b40f44cc643f15b191e7a336 100644 --- a/arch/arm/boot/dts/logicpd-som-lv.dtsi +++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi @@ -129,7 +129,7 @@ }; &mmc3 { - interrupts-extended = <&intc 94 &omap3_pmx_core2 0x46>; + interrupts-extended = <&intc 94 &omap3_pmx_core 0x136>; pinctrl-0 = <&mmc3_pins &wl127x_gpio>; pinctrl-names = "default"; vmmc-supply = <&wl12xx_vmmc>; diff --git a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts index 9d5d53fbe9c0c0212684cae4738a461062a6ed3a..c39cf2ca54da8d34d15e73dadc48bac3bace4918 100644 --- a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts +++ b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts @@ -35,7 +35,7 @@ * jumpering combinations for the long run. */ &mmc3 { - interrupts-extended = <&intc 94 &omap3_pmx_core2 0x46>; + interrupts-extended = <&intc 94 &omap3_pmx_core 0x136>; pinctrl-0 = <&mmc3_pins &mmc3_core2_pins>; pinctrl-names = "default"; vmmc-supply = <&wl12xx_vmmc>; diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi index 7d2302e8706c969f5ba4da112b206f54baa5cf44..9354da4efe0939e6de254f7ca9133bb02ea79a21 100644 --- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi +++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi @@ -196,3 +196,7 @@ &twl_gpio { ti,use-leds; }; + +&twl_keypad { + status = "disabled"; +}; diff --git a/arch/arm/boot/dts/lpc32xx.dtsi b/arch/arm/boot/dts/lpc32xx.dtsi index abff7ef7c9cd6a571a5966ff75f473215c737233..ed0d6fb20122a4e3c50ac17685cf0617467ea6d5 100644 --- a/arch/arm/boot/dts/lpc32xx.dtsi +++ b/arch/arm/boot/dts/lpc32xx.dtsi @@ -179,7 +179,7 @@ * ssp0 and spi1 are shared pins; * enable one in your board dts, as needed. */ - ssp0: ssp@20084000 { + ssp0: spi@20084000 { compatible = "arm,pl022", "arm,primecell"; reg = <0x20084000 0x1000>; interrupts = <20 IRQ_TYPE_LEVEL_HIGH>; @@ -199,7 +199,7 @@ * ssp1 and spi2 are shared pins; * enable one in your board dts, as needed. */ - ssp1: ssp@2008c000 { + ssp1: spi@2008c000 { compatible = "arm,pl022", "arm,primecell"; reg = <0x2008c000 0x1000>; interrupts = <21 IRQ_TYPE_LEVEL_HIGH>; @@ -230,7 +230,7 @@ status = "disabled"; }; - i2s1: i2s@2009C000 { + i2s1: i2s@2009c000 { compatible = "nxp,lpc3220-i2s"; reg = <0x2009C000 0x1000>; }; @@ -273,7 +273,7 @@ status = "disabled"; }; - i2c1: i2c@400A0000 { + i2c1: i2c@400a0000 { compatible = "nxp,pnx-i2c"; reg = <0x400A0000 0x100>; interrupt-parent = <&sic1>; @@ -284,7 +284,7 @@ clocks = <&clk LPC32XX_CLK_I2C1>; }; - i2c2: i2c@400A8000 { + i2c2: i2c@400a8000 { compatible = "nxp,pnx-i2c"; reg = <0x400A8000 0x100>; interrupt-parent = <&sic1>; @@ -295,7 +295,7 @@ clocks = <&clk LPC32XX_CLK_I2C2>; }; - mpwm: mpwm@400E8000 { + mpwm: mpwm@400e8000 { compatible = "nxp,lpc3220-motor-pwm"; reg = <0x400E8000 0x78>; status = "disabled"; @@ -394,7 +394,7 @@ #gpio-cells = <3>; /* bank, pin, flags */ }; - timer4: timer@4002C000 { + timer4: timer@4002c000 { compatible = "nxp,lpc3220-timer"; reg = <0x4002C000 0x1000>; interrupts = <3 IRQ_TYPE_LEVEL_LOW>; @@ -412,7 +412,7 @@ status = "disabled"; }; - watchdog: watchdog@4003C000 { + watchdog: watchdog@4003c000 { compatible = "nxp,pnx4008-wdt"; reg = <0x4003C000 0x1000>; clocks = <&clk LPC32XX_CLK_WDOG>; @@ -451,7 +451,7 @@ status = "disabled"; }; - timer1: timer@4004C000 { + timer1: timer@4004c000 { compatible = "nxp,lpc3220-timer"; reg = <0x4004C000 0x1000>; interrupts = <17 IRQ_TYPE_LEVEL_LOW>; @@ -475,7 +475,7 @@ status = "disabled"; }; - pwm1: pwm@4005C000 { + pwm1: pwm@4005c000 { compatible = "nxp,lpc3220-pwm"; reg = <0x4005C000 0x4>; clocks = <&clk LPC32XX_CLK_PWM1>; @@ -484,7 +484,7 @@ status = "disabled"; }; - pwm2: pwm@4005C004 { + pwm2: pwm@4005c004 { compatible = "nxp,lpc3220-pwm"; reg = <0x4005C004 0x4>; clocks = <&clk LPC32XX_CLK_PWM2>; diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi index 0d9faf1a51eac0cca63b54cb272bb1c57e8261d2..a86b890863347bc4b0b623bfb33e9f4090e1439b 100644 --- a/arch/arm/boot/dts/meson.dtsi +++ b/arch/arm/boot/dts/meson.dtsi @@ -263,7 +263,7 @@ compatible = "amlogic,meson6-dwmac", "snps,dwmac"; reg = <0xc9410000 0x10000 0xc1108108 0x4>; - interrupts = ; + interrupts = ; interrupt-names = "macirq"; status = "disabled"; }; diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi index d77dcf890cfc82b5c05c88149694f8687e14072c..7162e0ca05b0a5d477fd1369f127e8bef67e8413 100644 --- a/arch/arm/boot/dts/meson8.dtsi +++ b/arch/arm/boot/dts/meson8.dtsi @@ -194,7 +194,7 @@ #clock-cells = <1>; #reset-cells = <1>; compatible = "amlogic,meson8-clkc"; - reg = <0x8000 0x4>, <0x4000 0x460>; + reg = <0x8000 0x4>, <0x4000 0x400>; }; reset: reset-controller@4404 { diff --git a/arch/arm/boot/dts/meson8b-odroidc1.dts b/arch/arm/boot/dts/meson8b-odroidc1.dts index ef3177d3da3dc85d5cb2d2d786a59d368c4189cd..8fdeeffecbdbc296258566fec603472ba5455d22 100644 --- a/arch/arm/boot/dts/meson8b-odroidc1.dts +++ b/arch/arm/boot/dts/meson8b-odroidc1.dts @@ -125,7 +125,6 @@ /* Realtek RTL8211F (0x001cc916) */ eth_phy: ethernet-phy@0 { reg = <0>; - eee-broken-1000t; interrupt-parent = <&gpio_intc>; /* GPIOH_3 */ interrupts = <17 IRQ_TYPE_LEVEL_LOW>; @@ -172,8 +171,7 @@ cap-sd-highspeed; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&tflash_vdd>; vqmmc-supply = <&tf_io>; diff --git a/arch/arm/boot/dts/meson8b.dtsi b/arch/arm/boot/dts/meson8b.dtsi index 08f7f6be7254e5e54e0f9bd5ff6790fbfa108dea..4293047a4b76baf9d9235b390cf59e3e8de02165 100644 --- a/arch/arm/boot/dts/meson8b.dtsi +++ b/arch/arm/boot/dts/meson8b.dtsi @@ -163,7 +163,7 @@ #clock-cells = <1>; #reset-cells = <1>; compatible = "amlogic,meson8b-clkc"; - reg = <0x8000 0x4>, <0x4000 0x460>; + reg = <0x8000 0x4>, <0x4000 0x400>; }; reset: reset-controller@4404 { @@ -207,9 +207,7 @@ groups = "eth_tx_clk", "eth_tx_en", "eth_txd1_0", - "eth_txd1_1", "eth_txd0_0", - "eth_txd0_1", "eth_rx_clk", "eth_rx_dv", "eth_rxd1", @@ -218,7 +216,9 @@ "eth_mdc", "eth_ref_clk", "eth_txd2", - "eth_txd3"; + "eth_txd3", + "eth_rxd3", + "eth_rxd2"; function = "ethernet"; }; }; diff --git a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts index f5853610b20b804170e941c5b24d2b50b94a1d62..6ac02beb5fa724c34706dcbf522312eedf09d618 100644 --- a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts +++ b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts @@ -206,8 +206,7 @@ cap-sd-highspeed; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vcc_3v3>; }; diff --git a/arch/arm/boot/dts/mmp2.dtsi b/arch/arm/boot/dts/mmp2.dtsi index 766bbb8495b60d796ccd857c4965ab4f8d09721c..e95deed6a7973ddff8b4a96a91dc078c70e23576 100644 --- a/arch/arm/boot/dts/mmp2.dtsi +++ b/arch/arm/boot/dts/mmp2.dtsi @@ -180,7 +180,7 @@ clocks = <&soc_clocks MMP2_CLK_GPIO>; resets = <&soc_clocks MMP2_CLK_GPIO>; interrupt-controller; - #interrupt-cells = <1>; + #interrupt-cells = <2>; ranges; gcb0: gpio@d4019000 { @@ -220,12 +220,15 @@ status = "disabled"; }; - twsi2: i2c@d4025000 { + twsi2: i2c@d4031000 { compatible = "mrvl,mmp-twsi"; - reg = <0xd4025000 0x1000>; - interrupts = <58>; + reg = <0xd4031000 0x1000>; + interrupt-parent = <&intcmux17>; + interrupts = <0>; clocks = <&soc_clocks MMP2_CLK_TWSI1>; resets = <&soc_clocks MMP2_CLK_TWSI1>; + #address-cells = <1>; + #size-cells = <0>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi index ddc7a7bb33c0de974a74ecf754f0a4644c5fc46b..f57acf8f66b95d1a2f1e17899e97b61a433c43da 100644 --- a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi +++ b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi @@ -105,7 +105,7 @@ interrupts-extended = < &cpcap 15 0 &cpcap 14 0 &cpcap 28 0 &cpcap 19 0 &cpcap 18 0 &cpcap 17 0 &cpcap 16 0 &cpcap 49 0 - &cpcap 48 1 + &cpcap 48 0 >; interrupt-names = "id_ground", "id_float", "se0conn", "vbusvld", diff --git a/arch/arm/boot/dts/omap2.dtsi b/arch/arm/boot/dts/omap2.dtsi index f1d6de8b3c193eee0d88b0465f33de4e122ba266..000bf16de6517df2ec0e3713429ca6efde32e285 100644 --- a/arch/arm/boot/dts/omap2.dtsi +++ b/arch/arm/boot/dts/omap2.dtsi @@ -114,7 +114,7 @@ dma-names = "tx", "rx"; }; - mcspi1: mcspi@48098000 { + mcspi1: spi@48098000 { compatible = "ti,omap2-mcspi"; ti,hwmods = "mcspi1"; reg = <0x48098000 0x100>; @@ -125,7 +125,7 @@ "tx2", "rx2", "tx3", "rx3"; }; - mcspi2: mcspi@4809a000 { + mcspi2: spi@4809a000 { compatible = "ti,omap2-mcspi"; ti,hwmods = "mcspi2"; reg = <0x4809a000 0x100>; diff --git a/arch/arm/boot/dts/omap2430.dtsi b/arch/arm/boot/dts/omap2430.dtsi index 84635eeb99cd46ae4d7240dfffbe551cb20a6af8..7f57af2f10acb6f02d42fff742d8be754101a042 100644 --- a/arch/arm/boot/dts/omap2430.dtsi +++ b/arch/arm/boot/dts/omap2430.dtsi @@ -285,7 +285,7 @@ ti,timer-alwon; }; - mcspi3: mcspi@480b8000 { + mcspi3: spi@480b8000 { compatible = "ti,omap2-mcspi"; ti,hwmods = "mcspi3"; reg = <0x480b8000 0x100>; diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi index ac830b9177763d32a70b8807e02f35cb039efc17..0c39a2340030b566137d3b7c665ec40dc6f8068a 100644 --- a/arch/arm/boot/dts/omap3-gta04.dtsi +++ b/arch/arm/boot/dts/omap3-gta04.dtsi @@ -28,6 +28,7 @@ aliases { display0 = &lcd; + display1 = &tv0; }; /* fixed 26MHz oscillator */ @@ -78,7 +79,7 @@ #sound-dai-cells = <0>; }; - spi_lcd { + spi_lcd: spi_lcd { compatible = "spi-gpio"; #address-cells = <0x1>; #size-cells = <0x0>; @@ -131,7 +132,7 @@ }; tv0: connector { - compatible = "svideo-connector"; + compatible = "composite-video-connector"; label = "tv"; port { @@ -143,7 +144,7 @@ tv_amp: opa362 { compatible = "ti,opa362"; - enable-gpios = <&gpio1 23 GPIO_ACTIVE_HIGH>; + enable-gpios = <&gpio1 23 GPIO_ACTIVE_HIGH>; /* GPIO_23 to enable video out amplifier */ ports { #address-cells = <1>; @@ -282,6 +283,13 @@ OMAP3_CORE1_IOPAD(0x2134, PIN_INPUT_PULLUP | MUX_MODE4) /* gpio112 */ >; }; + + penirq_pins: pinmux_penirq_pins { + pinctrl-single,pins = < + /* here we could enable to wakeup the cpu from suspend by a pen touch */ + OMAP3_CORE1_IOPAD(0x2194, PIN_INPUT_PULLUP | MUX_MODE4) /* gpio160 */ + >; + }; }; &omap3_pmx_core2 { @@ -422,10 +430,19 @@ tsc2007@48 { compatible = "ti,tsc2007"; reg = <0x48>; + pinctrl-names = "default"; + pinctrl-0 = <&penirq_pins>; interrupt-parent = <&gpio6>; interrupts = <0 IRQ_TYPE_EDGE_FALLING>; /* GPIO_160 */ - gpios = <&gpio6 0 GPIO_ACTIVE_LOW>; + gpios = <&gpio6 0 GPIO_ACTIVE_LOW>; /* GPIO_160 */ ti,x-plate-ohms = <600>; + touchscreen-size-x = <480>; + touchscreen-size-y = <640>; + touchscreen-max-pressure = <1000>; + touchscreen-fuzz-x = <3>; + touchscreen-fuzz-y = <8>; + touchscreen-fuzz-pressure = <10>; + touchscreen-inverted-y; }; /* RFID EEPROM */ @@ -531,6 +548,12 @@ regulator-max-microvolt = <3150000>; }; +/* Needed to power the DPI pins */ + +&vpll2 { + regulator-always-on; +}; + &dss { pinctrl-names = "default"; pinctrl-0 = < &dss_dpi_pins >; @@ -551,10 +574,14 @@ vdda-supply = <&vdac>; + #address-cells = <1>; + #size-cells = <0>; + port { + reg = <0>; venc_out: endpoint { remote-endpoint = <&opa_in>; - ti,channels = <2>; + ti,channels = <1>; ti,invert-polarity; }; }; @@ -598,22 +625,22 @@ bootloaders@80000 { label = "U-Boot"; - reg = <0x80000 0x1e0000>; + reg = <0x80000 0x1c0000>; }; - bootloaders_env@260000 { + bootloaders_env@240000 { label = "U-Boot Env"; - reg = <0x260000 0x20000>; + reg = <0x240000 0x40000>; }; kernel@280000 { label = "Kernel"; - reg = <0x280000 0x400000>; + reg = <0x280000 0x600000>; }; - filesystem@680000 { + filesystem@880000 { label = "File System"; - reg = <0x680000 0xf980000>; + reg = <0x880000 0>; /* 0 = MTDPART_SIZ_FULL */ }; }; }; diff --git a/arch/arm/boot/dts/omap3-n9.dts b/arch/arm/boot/dts/omap3-n9.dts index ded5fcf084eb786d5c3ef521ec1ffbf535915b3f..1f91646b895162c7771174797ce50bb3194f9220 100644 --- a/arch/arm/boot/dts/omap3-n9.dts +++ b/arch/arm/boot/dts/omap3-n9.dts @@ -40,7 +40,7 @@ }; &i2c3 { - ak8975@0f { + ak8975@f { compatible = "asahi-kasei,ak8975"; reg = <0x0f>; }; diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi index 0d9b85317529b317ce9b8788c8cdbde14c7c53c7..e142e6c70a59fa7deb3ffee947b212ec50ba0fac 100644 --- a/arch/arm/boot/dts/omap3-n950-n9.dtsi +++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi @@ -370,6 +370,19 @@ compatible = "ti,omap2-onenand"; reg = <0 0 0x20000>; /* CS0, offset 0, IO size 128K */ + /* + * These timings are based on CONFIG_OMAP_GPMC_DEBUG=y reported + * bootloader set values when booted with v4.19 using both N950 + * and N9 devices (OneNAND Manufacturer: Samsung): + * + * gpmc cs0 before gpmc_cs_program_settings: + * cs0 GPMC_CS_CONFIG1: 0xfd001202 + * cs0 GPMC_CS_CONFIG2: 0x00181800 + * cs0 GPMC_CS_CONFIG3: 0x00030300 + * cs0 GPMC_CS_CONFIG4: 0x18001804 + * cs0 GPMC_CS_CONFIG5: 0x03171d1d + * cs0 GPMC_CS_CONFIG6: 0x97080000 + */ gpmc,sync-read; gpmc,sync-write; gpmc,burst-length = <16>; @@ -379,26 +392,27 @@ gpmc,device-width = <2>; gpmc,mux-add-data = <2>; gpmc,cs-on-ns = <0>; - gpmc,cs-rd-off-ns = <87>; - gpmc,cs-wr-off-ns = <87>; + gpmc,cs-rd-off-ns = <122>; + gpmc,cs-wr-off-ns = <122>; gpmc,adv-on-ns = <0>; - gpmc,adv-rd-off-ns = <10>; - gpmc,adv-wr-off-ns = <10>; - gpmc,oe-on-ns = <15>; - gpmc,oe-off-ns = <87>; + gpmc,adv-rd-off-ns = <15>; + gpmc,adv-wr-off-ns = <15>; + gpmc,oe-on-ns = <20>; + gpmc,oe-off-ns = <122>; gpmc,we-on-ns = <0>; - gpmc,we-off-ns = <87>; - gpmc,rd-cycle-ns = <112>; - gpmc,wr-cycle-ns = <112>; - gpmc,access-ns = <81>; + gpmc,we-off-ns = <122>; + gpmc,rd-cycle-ns = <148>; + gpmc,wr-cycle-ns = <148>; + gpmc,access-ns = <117>; gpmc,page-burst-access-ns = <15>; gpmc,bus-turnaround-ns = <0>; gpmc,cycle2cycle-delay-ns = <0>; gpmc,wait-monitoring-ns = <0>; - gpmc,clk-activation-ns = <5>; - gpmc,wr-data-mux-bus-ns = <30>; - gpmc,wr-access-ns = <81>; - gpmc,sync-clk-ps = <15000>; + gpmc,clk-activation-ns = <10>; + gpmc,wr-data-mux-bus-ns = <40>; + gpmc,wr-access-ns = <117>; + + gpmc,sync-clk-ps = <15000>; /* TBC; Where this value came? */ /* * MTD partition table corresponding to Nokia's MeeGo 1.2 diff --git a/arch/arm/boot/dts/omap3-pandora-common.dtsi b/arch/arm/boot/dts/omap3-pandora-common.dtsi index 90c98f95b2b3ac8b572655a18e04eaa2f4c95801..a51081de7144862e6acaab83362806bb65ce181a 100644 --- a/arch/arm/boot/dts/omap3-pandora-common.dtsi +++ b/arch/arm/boot/dts/omap3-pandora-common.dtsi @@ -229,6 +229,17 @@ gpio = <&gpio6 4 GPIO_ACTIVE_HIGH>; /* GPIO_164 */ }; + /* wl1251 wifi+bt module */ + wlan_en: fixed-regulator-wg7210_en { + compatible = "regulator-fixed"; + regulator-name = "vwlan"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + startup-delay-us = <50000>; + enable-active-high; + gpio = <&gpio1 23 GPIO_ACTIVE_HIGH>; + }; + /* wg7210 (wifi+bt module) 32k clock buffer */ wg7210_32k: fixed-regulator-wg7210_32k { compatible = "regulator-fixed"; @@ -525,9 +536,30 @@ /*wp-gpios = <&gpio4 31 GPIO_ACTIVE_HIGH>;*/ /* GPIO_127 */ }; -/* mmc3 is probed using pdata-quirks to pass wl1251 card data */ &mmc3 { - status = "disabled"; + vmmc-supply = <&wlan_en>; + + bus-width = <4>; + non-removable; + ti,non-removable; + cap-power-off-card; + + pinctrl-names = "default"; + pinctrl-0 = <&mmc3_pins>; + + #address-cells = <1>; + #size-cells = <0>; + + wlan: wifi@1 { + compatible = "ti,wl1251"; + + reg = <1>; + + interrupt-parent = <&gpio1>; + interrupts = <21 IRQ_TYPE_LEVEL_HIGH>; /* GPIO_21 */ + + ti,wl1251-has-eeprom; + }; }; /* bluetooth*/ diff --git a/arch/arm/boot/dts/omap3-tao3530.dtsi b/arch/arm/boot/dts/omap3-tao3530.dtsi index 6f5bd027b71753c8508acc93a56f211a95cd12be..7b4ec2c4004206b819b2901ffc81df52f6b60f3f 100644 --- a/arch/arm/boot/dts/omap3-tao3530.dtsi +++ b/arch/arm/boot/dts/omap3-tao3530.dtsi @@ -225,7 +225,7 @@ pinctrl-0 = <&mmc1_pins>; vmmc-supply = <&vmmc1>; vqmmc-supply = <&vsim>; - cd-gpios = <&twl_gpio 0 GPIO_ACTIVE_HIGH>; + cd-gpios = <&twl_gpio 0 GPIO_ACTIVE_LOW>; bus-width = <8>; }; diff --git a/arch/arm/boot/dts/omap4-droid4-xt894.dts b/arch/arm/boot/dts/omap4-droid4-xt894.dts index 04758a2a87f031eb42df555cc85a85bd0bf63816..67d77eee9433c655e0bd8f0c1dbf7c25aba3ecce 100644 --- a/arch/arm/boot/dts/omap4-droid4-xt894.dts +++ b/arch/arm/boot/dts/omap4-droid4-xt894.dts @@ -644,6 +644,17 @@ }; }; +/* Configure pwm clock source for timers 8 & 9 */ +&timer8 { + assigned-clocks = <&abe_clkctrl OMAP4_TIMER8_CLKCTRL 24>; + assigned-clock-parents = <&sys_clkin_ck>; +}; + +&timer9 { + assigned-clocks = <&l4_per_clkctrl OMAP4_TIMER9_CLKCTRL 24>; + assigned-clock-parents = <&sys_clkin_ck>; +}; + /* * As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for * uart1 wakeirq. diff --git a/arch/arm/boot/dts/omap4-l4.dtsi b/arch/arm/boot/dts/omap4-l4.dtsi index 6eb26b837446c7d51c3f3e7a1bf86ad3ad9f3ea1..5059ecac44787cd6711b869b03cdb92ab0da5be8 100644 --- a/arch/arm/boot/dts/omap4-l4.dtsi +++ b/arch/arm/boot/dts/omap4-l4.dtsi @@ -196,12 +196,12 @@ clock-names = "fck"; #address-cells = <1>; #size-cells = <1>; - ranges = <0x0 0x58000 0x4000>; + ranges = <0x0 0x58000 0x5000>; hsi: hsi@0 { compatible = "ti,omap4-hsi"; reg = <0x0 0x4000>, - <0x4a05c000 0x1000>; + <0x5000 0x1000>; reg-names = "sys", "gdd"; clocks = <&l3_init_clkctrl OMAP4_HSI_CLKCTRL 0>; diff --git a/arch/arm/boot/dts/omap4-sdp.dts b/arch/arm/boot/dts/omap4-sdp.dts index 490726b522162bd0a11654291889e51d40793ee6..9dc7ec7655cb87776d691d75ef465e978b90e7d5 100644 --- a/arch/arm/boot/dts/omap4-sdp.dts +++ b/arch/arm/boot/dts/omap4-sdp.dts @@ -33,6 +33,7 @@ gpio = <&gpio2 16 GPIO_ACTIVE_HIGH>; /* gpio line 48 */ enable-active-high; regulator-boot-on; + startup-delay-us = <25000>; }; vbat: fixedregulator-vbat { diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi index ab6f640b282bfc903f115bdb56796447b862b8f8..61a06f6add3ca52a9d7c4851080c53d0a5d18ac9 100644 --- a/arch/arm/boot/dts/omap5-board-common.dtsi +++ b/arch/arm/boot/dts/omap5-board-common.dtsi @@ -317,7 +317,8 @@ palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins { pinctrl-single,pins = < - OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) /* sys_nirq1 */ + /* sys_nirq1 is pulled down as the SoC is inverting it for GIC */ + OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) >; }; @@ -385,7 +386,8 @@ palmas: palmas@48 { compatible = "ti,palmas"; - interrupts = ; /* IRQ_SYS_1N */ + /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */ + interrupts = ; reg = <0x48>; interrupt-controller; #interrupt-cells = <2>; @@ -651,7 +653,8 @@ pinctrl-names = "default"; pinctrl-0 = <&twl6040_pins>; - interrupts = ; /* IRQ_SYS_2N cascaded to gic */ + /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */ + interrupts = ; /* audpwron gpio defined in the board specific dts */ @@ -700,6 +703,11 @@ vbus-supply = <&smps10_out1_reg>; }; +&dwc3 { + extcon = <&extcon_usb3>; + dr_mode = "otg"; +}; + &mcspi1 { }; diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts index 5e21fb430a65daa8e29a1ca90a404389d9dc99a9..e78d3718f145d544dee0625ada9c3bfe7ca9327b 100644 --- a/arch/arm/boot/dts/omap5-cm-t54.dts +++ b/arch/arm/boot/dts/omap5-cm-t54.dts @@ -181,6 +181,13 @@ OMAP5_IOPAD(0x0042, PIN_INPUT_PULLDOWN | MUX_MODE6) /* llib_wakereqin.gpio1_wk15 */ >; }; + + palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins { + pinctrl-single,pins = < + /* sys_nirq1 is pulled down as the SoC is inverting it for GIC */ + OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) + >; + }; }; &omap5_pmx_core { @@ -414,8 +421,11 @@ palmas: palmas@48 { compatible = "ti,palmas"; - interrupts = ; /* IRQ_SYS_1N */ reg = <0x48>; + pinctrl-0 = <&palmas_sys_nirq_pins>; + pinctrl-names = "default"; + /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */ + interrupts = ; interrupt-controller; #interrupt-cells = <2>; ti,system-power-controller; diff --git a/arch/arm/boot/dts/orion5x-linkstation.dtsi b/arch/arm/boot/dts/orion5x-linkstation.dtsi index ebd93df5d07a8a90218c9bc92e062e962e27db3d..b6c9b85951ea695a2b24f6a68e5b039ee7d556fa 100644 --- a/arch/arm/boot/dts/orion5x-linkstation.dtsi +++ b/arch/arm/boot/dts/orion5x-linkstation.dtsi @@ -156,7 +156,7 @@ &i2c { status = "okay"; - rtc { + rtc@32 { compatible = "ricoh,rs5c372a"; reg = <0x32>; }; diff --git a/arch/arm/boot/dts/pxa25x.dtsi b/arch/arm/boot/dts/pxa25x.dtsi index 95d59be97213e0392505c4e7afd67d2f99e5e4a6..8494b578717090a6401090ce3c59b25f17f7dab1 100644 --- a/arch/arm/boot/dts/pxa25x.dtsi +++ b/arch/arm/boot/dts/pxa25x.dtsi @@ -80,6 +80,10 @@ #pwm-cells = <1>; clocks = <&clks CLK_PWM1>; }; + + rtc@40900000 { + clocks = <&clks CLK_OSC32k768>; + }; }; timer@40a00000 { diff --git a/arch/arm/boot/dts/pxa27x.dtsi b/arch/arm/boot/dts/pxa27x.dtsi index 747f750f675d96dd351d8ad5a02213dae10fb6bf..ccbecad9c5c7c45c67d6fab36daa38a4806c3c97 100644 --- a/arch/arm/boot/dts/pxa27x.dtsi +++ b/arch/arm/boot/dts/pxa27x.dtsi @@ -35,7 +35,7 @@ clocks = <&clks CLK_NONE>; }; - pxa27x_ohci: usb@4c000000 { + usb0: usb@4c000000 { compatible = "marvell,pxa-ohci"; reg = <0x4c000000 0x10000>; interrupts = <3>; @@ -71,7 +71,7 @@ clocks = <&clks CLK_PWM1>; }; - pwri2c: i2c@40f000180 { + pwri2c: i2c@40f00180 { compatible = "mrvl,pxa-i2c"; reg = <0x40f00180 0x24>; interrupts = <6>; @@ -113,6 +113,10 @@ status = "disabled"; }; + + rtc@40900000 { + clocks = <&clks CLK_OSC32k768>; + }; }; clocks { diff --git a/arch/arm/boot/dts/pxa2xx.dtsi b/arch/arm/boot/dts/pxa2xx.dtsi index a520b4c14ea9f5023b11672c17cb438f256f9067..0a0e837dc79cb7b8c44a438eaf4da9b9fa879e76 100644 --- a/arch/arm/boot/dts/pxa2xx.dtsi +++ b/arch/arm/boot/dts/pxa2xx.dtsi @@ -117,13 +117,6 @@ status = "disabled"; }; - usb0: ohci@4c000000 { - compatible = "marvell,pxa-ohci"; - reg = <0x4c000000 0x10000>; - interrupts = <3>; - status = "disabled"; - }; - mmc0: mmc@41100000 { compatible = "marvell,pxa-mmc"; reg = <0x41100000 0x1000>; diff --git a/arch/arm/boot/dts/pxa3xx.dtsi b/arch/arm/boot/dts/pxa3xx.dtsi index 3a8f0edc3af99dea946c13417946e5a9c0456245..53009dbd3615893b638285e76304e9e0293593a0 100644 --- a/arch/arm/boot/dts/pxa3xx.dtsi +++ b/arch/arm/boot/dts/pxa3xx.dtsi @@ -204,7 +204,7 @@ status = "disabled"; }; - pxa3xx_ohci: usb@4c000000 { + usb0: usb@4c000000 { compatible = "marvell,pxa-ohci"; reg = <0x4c000000 0x10000>; interrupts = <3>; diff --git a/arch/arm/boot/dts/qcom-apq8064-arrow-sd-600eval.dts b/arch/arm/boot/dts/qcom-apq8064-arrow-sd-600eval.dts index 76b56eafaab90caa6e1fef91297c8b0d857b9119..f714a20649d7498d84188f5f289d21f5f4980a67 100644 --- a/arch/arm/boot/dts/qcom-apq8064-arrow-sd-600eval.dts +++ b/arch/arm/boot/dts/qcom-apq8064-arrow-sd-600eval.dts @@ -387,6 +387,11 @@ hpd-gpio = <&tlmm_pinmux 72 GPIO_ACTIVE_HIGH>; ports { + port@0 { + endpoint { + remote-endpoint = <&mdp_dtv_out>; + }; + }; port@1 { endpoint { remote-endpoint = <&hdmi_con>; diff --git a/arch/arm/boot/dts/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom-ipq4019.dtsi index 78db67337ed4a3ce90a8962f183444296c27fc53..8328ad589e2bac0281b3973cbbe6f49cdda86546 100644 --- a/arch/arm/boot/dts/qcom-ipq4019.dtsi +++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi @@ -313,7 +313,7 @@ saw0: regulator@b089000 { compatible = "qcom,saw2"; - reg = <0x02089000 0x1000>, <0x0b009000 0x1000>; + reg = <0x0b089000 0x1000>, <0x0b009000 0x1000>; regulator; }; @@ -386,10 +386,10 @@ #address-cells = <3>; #size-cells = <2>; - ranges = <0x81000000 0 0x40200000 0x40200000 0 0x00100000 - 0x82000000 0 0x48000000 0x48000000 0 0x10000000>; + ranges = <0x81000000 0 0x40200000 0x40200000 0 0x00100000>, + <0x82000000 0 0x40300000 0x40300000 0 0x00d00000>; - interrupts = ; + interrupts = ; interrupt-names = "msi"; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0x7>; diff --git a/arch/arm/boot/dts/r8a7779.dtsi b/arch/arm/boot/dts/r8a7779.dtsi index 6b997bc016ee8a9e989a38ce476bb1c4f62d9f26..03919714645ae56d22c8e9e59a6836beea8f74f8 100644 --- a/arch/arm/boot/dts/r8a7779.dtsi +++ b/arch/arm/boot/dts/r8a7779.dtsi @@ -344,7 +344,7 @@ sata: sata@fc600000 { compatible = "renesas,sata-r8a7779", "renesas,rcar-sata"; - reg = <0xfc600000 0x2000>; + reg = <0xfc600000 0x200000>; interrupts = ; clocks = <&mstp1_clks R8A7779_CLK_SATA>; power-domains = <&sysc R8A7779_PD_ALWAYS_ON>; diff --git a/arch/arm/boot/dts/r8a7790-lager.dts b/arch/arm/boot/dts/r8a7790-lager.dts index 50312e752e2fae3dc42c6a0c6b45b78b0db7dcf1..7b9508e83d46c54df2d2b424a63f3ae9db1cdc87 100644 --- a/arch/arm/boot/dts/r8a7790-lager.dts +++ b/arch/arm/boot/dts/r8a7790-lager.dts @@ -489,8 +489,6 @@ }; &lvds1 { - status = "okay"; - ports { port@1 { lvds_connector: endpoint { diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi index 0925bdca438feedaa8ee956f8109fcca75dbbe1f..52a757f47bf08f57f4d2895b5735d03d74ea73f9 100644 --- a/arch/arm/boot/dts/r8a7790.dtsi +++ b/arch/arm/boot/dts/r8a7790.dtsi @@ -1559,7 +1559,7 @@ sata0: sata@ee300000 { compatible = "renesas,sata-r8a7790", "renesas,rcar-gen2-sata"; - reg = <0 0xee300000 0 0x2000>; + reg = <0 0xee300000 0 0x200000>; interrupts = ; clocks = <&cpg CPG_MOD 815>; power-domains = <&sysc R8A7790_PD_ALWAYS_ON>; @@ -1570,7 +1570,7 @@ sata1: sata@ee500000 { compatible = "renesas,sata-r8a7790", "renesas,rcar-gen2-sata"; - reg = <0 0xee500000 0 0x2000>; + reg = <0 0xee500000 0 0x200000>; interrupts = ; clocks = <&cpg CPG_MOD 814>; power-domains = <&sysc R8A7790_PD_ALWAYS_ON>; diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts index ce22db01fbbaafb4689cc736cc357f8ec7f7f335..e6580aa0cea3573fd3c7b52e560f4a076f6e58cb 100644 --- a/arch/arm/boot/dts/r8a7791-koelsch.dts +++ b/arch/arm/boot/dts/r8a7791-koelsch.dts @@ -479,8 +479,6 @@ }; &lvds0 { - status = "okay"; - ports { port@1 { lvds_connector: endpoint { diff --git a/arch/arm/boot/dts/r8a7791-porter.dts b/arch/arm/boot/dts/r8a7791-porter.dts index f02036e5de015a95feec05c47ead06e91cb3daee..fefdf8238bbe900226f338c60753e0f68919258e 100644 --- a/arch/arm/boot/dts/r8a7791-porter.dts +++ b/arch/arm/boot/dts/r8a7791-porter.dts @@ -482,8 +482,6 @@ }; &lvds0 { - status = "okay"; - ports { port@1 { lvds_connector: endpoint { diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi index 991ac6feedd5beb6123f7a12f98c8799d89f9565..25b6a99dd87a22c64519187e3cd10555330706ed 100644 --- a/arch/arm/boot/dts/r8a7791.dtsi +++ b/arch/arm/boot/dts/r8a7791.dtsi @@ -1543,7 +1543,7 @@ sata0: sata@ee300000 { compatible = "renesas,sata-r8a7791", "renesas,rcar-gen2-sata"; - reg = <0 0xee300000 0 0x2000>; + reg = <0 0xee300000 0 0x200000>; interrupts = ; clocks = <&cpg CPG_MOD 815>; power-domains = <&sysc R8A7791_PD_ALWAYS_ON>; @@ -1554,7 +1554,7 @@ sata1: sata@ee500000 { compatible = "renesas,sata-r8a7791", "renesas,rcar-gen2-sata"; - reg = <0 0xee500000 0 0x2000>; + reg = <0 0xee500000 0 0x200000>; interrupts = ; clocks = <&cpg CPG_MOD 814>; power-domains = <&sysc R8A7791_PD_ALWAYS_ON>; diff --git a/arch/arm/boot/dts/rk3036.dtsi b/arch/arm/boot/dts/rk3036.dtsi index 67f57200d9a06ba1a2dde2b123aca62a1571230e..d560fc4051c5f7a355a0896f0e5d0e76e41acbeb 100644 --- a/arch/arm/boot/dts/rk3036.dtsi +++ b/arch/arm/boot/dts/rk3036.dtsi @@ -733,7 +733,7 @@ /* no rts / cts for uart2 */ }; - spi { + spi-pins { spi_txd:spi-txd { rockchip,pins = <1 29 RK_FUNC_3 &pcfg_pull_default>; }; diff --git a/arch/arm/boot/dts/rk3188-radxarock.dts b/arch/arm/boot/dts/rk3188-radxarock.dts index 45fd2b302dda1d1c3c995149f2386c1d1db97af9..4a2890618f6fcf8d914eef78c4a027e1f70b3ac1 100644 --- a/arch/arm/boot/dts/rk3188-radxarock.dts +++ b/arch/arm/boot/dts/rk3188-radxarock.dts @@ -93,6 +93,8 @@ regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; gpio = <&gpio3 RK_PA1 GPIO_ACTIVE_LOW>; + pinctrl-names = "default"; + pinctrl-0 = <&sdmmc_pwr>; startup-delay-us = <100000>; vin-supply = <&vcc_io>; }; @@ -315,6 +317,12 @@ }; }; + sd0 { + sdmmc_pwr: sdmmc-pwr { + rockchip,pins = ; + }; + }; + usb { host_vbus_drv: host-vbus-drv { rockchip,pins = <0 3 RK_FUNC_GPIO &pcfg_pull_none>; diff --git a/arch/arm/boot/dts/rk3288-rock2-som.dtsi b/arch/arm/boot/dts/rk3288-rock2-som.dtsi index 50325489c0ced4d21306801d2fabaf1e0261a97f..32e1ab33666294e048ecb1314c89c10d013c9dfd 100644 --- a/arch/arm/boot/dts/rk3288-rock2-som.dtsi +++ b/arch/arm/boot/dts/rk3288-rock2-som.dtsi @@ -25,7 +25,7 @@ vcc_flash: flash-regulator { compatible = "regulator-fixed"; - regulator-name = "vcc_sys"; + regulator-name = "vcc_flash"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; startup-delay-us = <150>; diff --git a/arch/arm/boot/dts/rk3288-veyron-mickey.dts b/arch/arm/boot/dts/rk3288-veyron-mickey.dts index 1e0158acf895d99f8bde234891011e019e172a92..a593d0a998fc8bcb7cbf75960af21b4a2bac44ca 100644 --- a/arch/arm/boot/dts/rk3288-veyron-mickey.dts +++ b/arch/arm/boot/dts/rk3288-veyron-mickey.dts @@ -124,10 +124,6 @@ }; }; -&emmc { - /delete-property/mmc-hs200-1_8v; -}; - &i2c2 { status = "disabled"; }; diff --git a/arch/arm/boot/dts/rk3288-veyron-minnie.dts b/arch/arm/boot/dts/rk3288-veyron-minnie.dts index f95d0c5fcf71263f044cb84a7efd6599878895be..6e8946052c78b12d688d4ec4a7621576f0cb4ab7 100644 --- a/arch/arm/boot/dts/rk3288-veyron-minnie.dts +++ b/arch/arm/boot/dts/rk3288-veyron-minnie.dts @@ -90,10 +90,6 @@ pwm-off-delay-ms = <200>; }; -&emmc { - /delete-property/mmc-hs200-1_8v; -}; - &gpio_keys { pinctrl-0 = <&pwr_key_l &ap_lid_int_l &volum_down_l &volum_up_l>; diff --git a/arch/arm/boot/dts/rk3288-veyron.dtsi b/arch/arm/boot/dts/rk3288-veyron.dtsi index 2075120cfc4d780482a89b4cd8203419f3860a6d..d8bf939a3aff9d0e0ee1c909237efd55c716dbd5 100644 --- a/arch/arm/boot/dts/rk3288-veyron.dtsi +++ b/arch/arm/boot/dts/rk3288-veyron.dtsi @@ -10,7 +10,11 @@ #include "rk3288.dtsi" / { - memory@0 { + /* + * The default coreboot on veyron devices ignores memory@0 nodes + * and would instead create another memory node. + */ + memory { device_type = "memory"; reg = <0x0 0x0 0x0 0x80000000>; }; diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi index 0840ffb3205cebb7cc7932228b74dad0b920b1c7..440d6783faca55ad6073bfd2fdaae5fb3a2c9957 100644 --- a/arch/arm/boot/dts/rk3288.dtsi +++ b/arch/arm/boot/dts/rk3288.dtsi @@ -70,7 +70,7 @@ compatible = "arm,cortex-a12"; reg = <0x501>; resets = <&cru SRST_CORE1>; - operating-points = <&cpu_opp_table>; + operating-points-v2 = <&cpu_opp_table>; #cooling-cells = <2>; /* min followed by max */ clock-latency = <40000>; clocks = <&cru ARMCLK>; @@ -80,7 +80,7 @@ compatible = "arm,cortex-a12"; reg = <0x502>; resets = <&cru SRST_CORE2>; - operating-points = <&cpu_opp_table>; + operating-points-v2 = <&cpu_opp_table>; #cooling-cells = <2>; /* min followed by max */ clock-latency = <40000>; clocks = <&cru ARMCLK>; @@ -90,7 +90,7 @@ compatible = "arm,cortex-a12"; reg = <0x503>; resets = <&cru SRST_CORE3>; - operating-points = <&cpu_opp_table>; + operating-points-v2 = <&cpu_opp_table>; #cooling-cells = <2>; /* min followed by max */ clock-latency = <40000>; clocks = <&cru ARMCLK>; @@ -227,6 +227,7 @@ , ; clock-frequency = <24000000>; + arm,no-tick-in-suspend; }; timer: timer@ff810000 { @@ -1261,27 +1262,27 @@ gpu_opp_table: gpu-opp-table { compatible = "operating-points-v2"; - opp@100000000 { + opp-100000000 { opp-hz = /bits/ 64 <100000000>; opp-microvolt = <950000>; }; - opp@200000000 { + opp-200000000 { opp-hz = /bits/ 64 <200000000>; opp-microvolt = <950000>; }; - opp@300000000 { + opp-300000000 { opp-hz = /bits/ 64 <300000000>; opp-microvolt = <1000000>; }; - opp@400000000 { + opp-400000000 { opp-hz = /bits/ 64 <400000000>; opp-microvolt = <1100000>; }; - opp@500000000 { + opp-500000000 { opp-hz = /bits/ 64 <500000000>; opp-microvolt = <1200000>; }; - opp@600000000 { + opp-600000000 { opp-hz = /bits/ 64 <600000000>; opp-microvolt = <1250000>; }; diff --git a/arch/arm/boot/dts/rv1108.dtsi b/arch/arm/boot/dts/rv1108.dtsi index ed8f6ca52c5bc979777aa54a6d2e011593c9f92f..a9f053dfdc06885a6a4fcff445e74834ac62aad7 100644 --- a/arch/arm/boot/dts/rv1108.dtsi +++ b/arch/arm/boot/dts/rv1108.dtsi @@ -66,7 +66,7 @@ arm-pmu { compatible = "arm,cortex-a7-pmu"; - interrupts = ; + interrupts = ; }; timer { @@ -541,7 +541,7 @@ compatible = "rockchip,gpio-bank"; reg = <0x20030000 0x100>; interrupts = ; - clocks = <&xin24m>; + clocks = <&cru PCLK_GPIO0_PMU>; gpio-controller; #gpio-cells = <2>; @@ -554,7 +554,7 @@ compatible = "rockchip,gpio-bank"; reg = <0x10310000 0x100>; interrupts = ; - clocks = <&xin24m>; + clocks = <&cru PCLK_GPIO1>; gpio-controller; #gpio-cells = <2>; @@ -567,7 +567,7 @@ compatible = "rockchip,gpio-bank"; reg = <0x10320000 0x100>; interrupts = ; - clocks = <&xin24m>; + clocks = <&cru PCLK_GPIO2>; gpio-controller; #gpio-cells = <2>; @@ -580,7 +580,7 @@ compatible = "rockchip,gpio-bank"; reg = <0x10330000 0x100>; interrupts = ; - clocks = <&xin24m>; + clocks = <&cru PCLK_GPIO3>; gpio-controller; #gpio-cells = <2>; diff --git a/arch/arm/boot/dts/s3c6410-mini6410.dts b/arch/arm/boot/dts/s3c6410-mini6410.dts index 0e159c884f972240914d319aeeccdccdecdf2a93..1aeac33b0d341bea18c0dddf46d6feca5f1c8b5d 100644 --- a/arch/arm/boot/dts/s3c6410-mini6410.dts +++ b/arch/arm/boot/dts/s3c6410-mini6410.dts @@ -165,6 +165,10 @@ }; }; +&clocks { + clocks = <&fin_pll>; +}; + &sdhci0 { pinctrl-names = "default"; pinctrl-0 = <&sd0_clk>, <&sd0_cmd>, <&sd0_cd>, <&sd0_bus4>; diff --git a/arch/arm/boot/dts/s3c6410-smdk6410.dts b/arch/arm/boot/dts/s3c6410-smdk6410.dts index a9a5689dc462db0b3f6abe56be1dea2dd6aedbbd..3bf6c450a26e58f2f0902356b19d3c587dfba181 100644 --- a/arch/arm/boot/dts/s3c6410-smdk6410.dts +++ b/arch/arm/boot/dts/s3c6410-smdk6410.dts @@ -69,6 +69,10 @@ }; }; +&clocks { + clocks = <&fin_pll>; +}; + &sdhci0 { pinctrl-names = "default"; pinctrl-0 = <&sd0_clk>, <&sd0_cmd>, <&sd0_cd>, <&sd0_bus4>; diff --git a/arch/arm/boot/dts/sama5d2-pinfunc.h b/arch/arm/boot/dts/sama5d2-pinfunc.h index 1c01a6f843d8a43c07ab25dd18ffb57acd044c0b..28a2e45752fea34eb2efb576439409c0611d9d90 100644 --- a/arch/arm/boot/dts/sama5d2-pinfunc.h +++ b/arch/arm/boot/dts/sama5d2-pinfunc.h @@ -518,7 +518,7 @@ #define PIN_PC9__GPIO PINMUX_PIN(PIN_PC9, 0, 0) #define PIN_PC9__FIQ PINMUX_PIN(PIN_PC9, 1, 3) #define PIN_PC9__GTSUCOMP PINMUX_PIN(PIN_PC9, 2, 1) -#define PIN_PC9__ISC_D0 PINMUX_PIN(PIN_PC9, 2, 1) +#define PIN_PC9__ISC_D0 PINMUX_PIN(PIN_PC9, 3, 1) #define PIN_PC9__TIOA4 PINMUX_PIN(PIN_PC9, 4, 2) #define PIN_PC10 74 #define PIN_PC10__GPIO PINMUX_PIN(PIN_PC10, 0, 0) diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi index 61f68e5c48e96324eb1c6cfbc26d90ade0d4a89c..b405992eb60166292983f2016c865734974645f4 100644 --- a/arch/arm/boot/dts/sama5d2.dtsi +++ b/arch/arm/boot/dts/sama5d2.dtsi @@ -308,7 +308,7 @@ 0x1 0x0 0x60000000 0x10000000 0x2 0x0 0x70000000 0x10000000 0x3 0x0 0x80000000 0x10000000>; - clocks = <&mck>; + clocks = <&h32ck>; status = "disabled"; nand_controller: nand-controller { diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi index a4dcb68f4322e2c96dd8bad240bb94b7eb307c9e..b4dd3846e8cc986f31a7cf23aa30e6aa30e49e51 100644 --- a/arch/arm/boot/dts/socfpga_arria10.dtsi +++ b/arch/arm/boot/dts/socfpga_arria10.dtsi @@ -613,7 +613,7 @@ status = "disabled"; }; - sdr: sdr@ffc25000 { + sdr: sdr@ffcfb100 { compatible = "altr,sdr-ctl", "syscon"; reg = <0xffcfb100 0x80>; }; diff --git a/arch/arm/boot/dts/socfpga_cyclone5_de0_sockit.dts b/arch/arm/boot/dts/socfpga_cyclone5_de0_sockit.dts index b280e6494193885aba2ca44e9ad3eb9902a2bb57..31b01a998b2ed7e4f0e7c7b1ab1cda05595d39c7 100644 --- a/arch/arm/boot/dts/socfpga_cyclone5_de0_sockit.dts +++ b/arch/arm/boot/dts/socfpga_cyclone5_de0_sockit.dts @@ -88,7 +88,7 @@ status = "okay"; clock-frequency = <100000>; - adxl345: adxl345@0 { + adxl345: adxl345@53 { compatible = "adi,adxl345"; reg = <0x53>; diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi index 2310a4e97768c222ca19fe8f125c5aede36be8d1..986767735e24925a5f2ba7b6b738c0ce255d0c6d 100644 --- a/arch/arm/boot/dts/ste-dbx5x0.dtsi +++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi @@ -197,7 +197,7 @@ <0xa0410100 0x100>; }; - scu@a04100000 { + scu@a0410000 { compatible = "arm,cortex-a9-scu"; reg = <0xa0410000 0x100>; }; @@ -878,7 +878,7 @@ power-domains = <&pm_domains DOMAIN_VAPE>; }; - ssp@80002000 { + spi@80002000 { compatible = "arm,pl022", "arm,primecell"; reg = <0x80002000 0x1000>; interrupts = ; @@ -892,7 +892,7 @@ power-domains = <&pm_domains DOMAIN_VAPE>; }; - ssp@80003000 { + spi@80003000 { compatible = "arm,pl022", "arm,primecell"; reg = <0x80003000 0x1000>; interrupts = ; diff --git a/arch/arm/boot/dts/ste-href-family-pinctrl.dtsi b/arch/arm/boot/dts/ste-href-family-pinctrl.dtsi index 5c5cea232743d111f154cd0faa0517b44f6a70ab..1ec193b0c5065b794838705702a99bd48c95586c 100644 --- a/arch/arm/boot/dts/ste-href-family-pinctrl.dtsi +++ b/arch/arm/boot/dts/ste-href-family-pinctrl.dtsi @@ -607,16 +607,20 @@ mcde { lcd_default_mode: lcd_default { - default_mux { + default_mux1 { /* Mux in VSI0 and all the data lines */ function = "lcd"; groups = "lcdvsi0_a_1", /* VSI0 for LCD */ "lcd_d0_d7_a_1", /* Data lines */ "lcd_d8_d11_a_1", /* TV-out */ - "lcdaclk_b_1", /* Clock line for TV-out */ "lcdvsi1_a_1"; /* VSI1 for HDMI */ }; + default_mux2 { + function = "lcda"; + groups = + "lcdaclk_b_1"; /* Clock line for TV-out */ + }; default_cfg1 { pins = "GPIO68_E1", /* VSI0 */ diff --git a/arch/arm/boot/dts/ste-hrefprev60.dtsi b/arch/arm/boot/dts/ste-hrefprev60.dtsi index 3f14b4df69b4e4d1ab27bd821eaeeb8d819ef40f..94eeb7f1c947863956561ce5e507bcbc75574bbb 100644 --- a/arch/arm/boot/dts/ste-hrefprev60.dtsi +++ b/arch/arm/boot/dts/ste-hrefprev60.dtsi @@ -57,7 +57,7 @@ }; }; - ssp@80002000 { + spi@80002000 { /* * On the first generation boards, this SSP/SPI port was connected * to the AB8500. diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts index b0b94d05309855f4816a05295cbeb19251bc89e5..603890461ae0f33a2ff4e4ab4f3c7f8a2fb8a761 100644 --- a/arch/arm/boot/dts/ste-snowball.dts +++ b/arch/arm/boot/dts/ste-snowball.dts @@ -376,7 +376,7 @@ pinctrl-1 = <&i2c3_sleep_mode>; }; - ssp@80002000 { + spi@80002000 { pinctrl-names = "default"; pinctrl-0 = <&ssp0_snowball_mode>; }; diff --git a/arch/arm/boot/dts/ste-u300.dts b/arch/arm/boot/dts/ste-u300.dts index 62ecb6a2fa39e14aa99daf35782d7b2071852e93..1bd1aba3322f111c67cf672019bb3aac9599d6cd 100644 --- a/arch/arm/boot/dts/ste-u300.dts +++ b/arch/arm/boot/dts/ste-u300.dts @@ -442,7 +442,7 @@ dma-names = "rx"; }; - spi: ssp@c0006000 { + spi: spi@c0006000 { compatible = "arm,pl022", "arm,primecell"; reg = <0xc0006000 0x1000>; interrupt-parent = <&vica>; diff --git a/arch/arm/boot/dts/stm32mp157c-ev1.dts b/arch/arm/boot/dts/stm32mp157c-ev1.dts index 372bc2ea6b92192422368bf8413fe62849321d08..063ee8ac5dcbd12d763a9d32a62f3185d819ae50 100644 --- a/arch/arm/boot/dts/stm32mp157c-ev1.dts +++ b/arch/arm/boot/dts/stm32mp157c-ev1.dts @@ -6,6 +6,7 @@ /dts-v1/; #include "stm32mp157c-ed1.dts" +#include / { model = "STMicroelectronics STM32MP157C eval daughter on eval mother"; @@ -19,6 +20,58 @@ serial0 = &uart4; ethernet0 = ðernet0; }; + + panel_backlight: panel-backlight { + compatible = "gpio-backlight"; + gpios = <&gpiod 13 GPIO_ACTIVE_LOW>; + default-on; + status = "okay"; + }; +}; + +&cec { + pinctrl-names = "default"; + pinctrl-0 = <&cec_pins_a>; + status = "okay"; +}; + +&dsi { + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + dsi_in: endpoint { + remote-endpoint = <<dc_ep0_out>; + }; + }; + + port@1 { + reg = <1>; + dsi_out: endpoint { + remote-endpoint = <&dsi_panel_in>; + }; + }; + }; + + panel-dsi@0 { + compatible = "raydium,rm68200"; + reg = <0>; + reset-gpios = <&gpiof 15 GPIO_ACTIVE_LOW>; + backlight = <&panel_backlight>; + status = "okay"; + + port { + dsi_panel_in: endpoint { + remote-endpoint = <&dsi_out>; + }; + }; + }; }; ðernet0 { @@ -40,12 +93,6 @@ }; }; -&cec { - pinctrl-names = "default"; - pinctrl-0 = <&cec_pins_a>; - status = "okay"; -}; - &i2c2 { pinctrl-names = "default"; pinctrl-0 = <&i2c2_pins_a>; @@ -62,6 +109,20 @@ status = "okay"; }; +<dc { + status = "okay"; + + port { + #address-cells = <1>; + #size-cells = <0>; + + ltdc_ep0_out: endpoint@0 { + reg = <0>; + remote-endpoint = <&dsi_in>; + }; + }; +}; + &m_can1 { pinctrl-names = "default"; pinctrl-0 = <&m_can1_pins_a>; diff --git a/arch/arm/boot/dts/stm32mp157c.dtsi b/arch/arm/boot/dts/stm32mp157c.dtsi index 185541a5b69fb58127136284f86341845b963af3..c50c36baba758f4364aac78e7f973c2c0b1a65b0 100644 --- a/arch/arm/boot/dts/stm32mp157c.dtsi +++ b/arch/arm/boot/dts/stm32mp157c.dtsi @@ -947,7 +947,7 @@ dma-requests = <48>; }; - qspi: qspi@58003000 { + qspi: spi@58003000 { compatible = "st,stm32f469-qspi"; reg = <0x58003000 0x1000>, <0x70000000 0x10000000>; reg-names = "qspi", "qspi_mm"; diff --git a/arch/arm/boot/dts/sun4i-a10-inet9f-rev03.dts b/arch/arm/boot/dts/sun4i-a10-inet9f-rev03.dts index 221acd10f6c8418ccacbdd996e9aa579de4ccf7f..2f0d966f39ad8313b507a5e537ee5edd177e1412 100644 --- a/arch/arm/boot/dts/sun4i-a10-inet9f-rev03.dts +++ b/arch/arm/boot/dts/sun4i-a10-inet9f-rev03.dts @@ -63,8 +63,6 @@ compatible = "gpio-keys-polled"; pinctrl-names = "default"; pinctrl-0 = <&key_pins_inet9f>; - #address-cells = <1>; - #size-cells = <0>; poll-interval = <20>; left-joystick-left { diff --git a/arch/arm/boot/dts/sun4i-a10-pcduino.dts b/arch/arm/boot/dts/sun4i-a10-pcduino.dts index b97a0f2f20b97634b239bf01f3878b5b7d7665a8..d82a604f3d9c76b7332bfa172e3c1afeff338cbd 100644 --- a/arch/arm/boot/dts/sun4i-a10-pcduino.dts +++ b/arch/arm/boot/dts/sun4i-a10-pcduino.dts @@ -76,8 +76,6 @@ gpio-keys { compatible = "gpio-keys"; - #address-cells = <1>; - #size-cells = <0>; back { label = "Key Back"; diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi index 3d62a895072071c57ca69a531190d729492a5044..5d46bb0139fadce8055e7e1223455b78d96a18bd 100644 --- a/arch/arm/boot/dts/sun4i-a10.dtsi +++ b/arch/arm/boot/dts/sun4i-a10.dtsi @@ -530,8 +530,6 @@ }; hdmi_out: port@1 { - #address-cells = <1>; - #size-cells = <0>; reg = <1>; }; }; diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi index 316cb8b2945b114224d2c75ffa65a3d6a07f3300..a66d9f92f58f5dee4d3746cdf4f0b0dcaf481413 100644 --- a/arch/arm/boot/dts/sun5i-a10s.dtsi +++ b/arch/arm/boot/dts/sun5i-a10s.dtsi @@ -104,8 +104,6 @@ }; hdmi_out: port@1 { - #address-cells = <1>; - #size-cells = <0>; reg = <1>; }; }; diff --git a/arch/arm/boot/dts/sun5i-reference-design-tablet.dtsi b/arch/arm/boot/dts/sun5i-reference-design-tablet.dtsi index 8acbaab14fe5179a5649d9b39cb35ae1ea143f3c..d2a2eb8b3f2624a4ac974db1a19c7c120dc1f4be 100644 --- a/arch/arm/boot/dts/sun5i-reference-design-tablet.dtsi +++ b/arch/arm/boot/dts/sun5i-reference-design-tablet.dtsi @@ -92,7 +92,8 @@ */ clock-frequency = <400000>; - touchscreen: touchscreen { + touchscreen: touchscreen@40 { + reg = <0x40>; interrupt-parent = <&pio>; interrupts = <6 11 IRQ_TYPE_EDGE_FALLING>; /* EINT11 (PG11) */ pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi index debc0bf22ea3b063192e458140bcab68ad10e6a5..76924fa42bbc33b152bb2a87bba0035239414649 100644 --- a/arch/arm/boot/dts/sun6i-a31.dtsi +++ b/arch/arm/boot/dts/sun6i-a31.dtsi @@ -201,7 +201,7 @@ }; pmu { - compatible = "arm,cortex-a7-pmu", "arm,cortex-a15-pmu"; + compatible = "arm,cortex-a7-pmu"; interrupts = , , , diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi index 9c52712af24111daeb7e94a8be2ebabcc94c55c9..355619dce79944ad66f52e68579810448ae52bb9 100644 --- a/arch/arm/boot/dts/sun7i-a20.dtsi +++ b/arch/arm/boot/dts/sun7i-a20.dtsi @@ -183,7 +183,7 @@ }; pmu { - compatible = "arm,cortex-a7-pmu", "arm,cortex-a15-pmu"; + compatible = "arm,cortex-a7-pmu"; interrupts = , ; }; @@ -639,8 +639,6 @@ }; hdmi_out: port@1 { - #address-cells = <1>; - #size-cells = <0>; reg = <1>; }; }; diff --git a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts index c7ce4158d6c8bccc3eb1b5322d8fe18a2d3aa34c..f250b20af493785373f1385ed47ec31ec689cd48 100644 --- a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts +++ b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts @@ -309,8 +309,8 @@ ®_dldo3 { regulator-always-on; - regulator-min-microvolt = <2500000>; - regulator-max-microvolt = <2500000>; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; regulator-name = "vcc-pd"; }; diff --git a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts index 1537ce148cc1918a92db88c5352840ade011b9c2..49547a43cc90ad0e865266350bcb2608cab62cfc 100644 --- a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts +++ b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts @@ -171,6 +171,7 @@ vqmmc-supply = <®_dldo1>; non-removable; wakeup-source; + keep-power-in-suspend; status = "okay"; brcmf: wifi@1 { diff --git a/arch/arm/boot/dts/sun8i-h3-bananapi-m2-plus.dts b/arch/arm/boot/dts/sun8i-h3-bananapi-m2-plus.dts index 30540dc8e0c5fd5c5657ca7ad725769e316bd185..bdda0d99128e56d1519d2047685c5a8965a40fb5 100644 --- a/arch/arm/boot/dts/sun8i-h3-bananapi-m2-plus.dts +++ b/arch/arm/boot/dts/sun8i-h3-bananapi-m2-plus.dts @@ -140,7 +140,7 @@ &external_mdio { ext_rgmii_phy: ethernet-phy@1 { compatible = "ethernet-phy-ieee802.3-c22"; - reg = <0>; + reg = <1>; }; }; diff --git a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts index 5d23667dc2d2e72f3fc557c0fa387c69b45ac3a2..25540b7694d590dea1c3c54c5453d4a7217d9f37 100644 --- a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts +++ b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts @@ -53,7 +53,7 @@ aliases { serial0 = &uart0; - /* ethernet0 is the H3 emac, defined in sun8i-h3.dtsi */ + ethernet0 = &emac; ethernet1 = &sdiowifi; }; diff --git a/arch/arm/boot/dts/sun8i-h3.dtsi b/arch/arm/boot/dts/sun8i-h3.dtsi index f0096074a46786cf36c6a824aa6a7ce8bba824ac..9233ba30a857c7b2f77da960da00aab19b0eaff2 100644 --- a/arch/arm/boot/dts/sun8i-h3.dtsi +++ b/arch/arm/boot/dts/sun8i-h3.dtsi @@ -47,19 +47,19 @@ compatible = "operating-points-v2"; opp-shared; - opp@648000000 { + opp-648000000 { opp-hz = /bits/ 64 <648000000>; opp-microvolt = <1040000 1040000 1300000>; clock-latency-ns = <244144>; /* 8 32k periods */ }; - opp@816000000 { + opp-816000000 { opp-hz = /bits/ 64 <816000000>; opp-microvolt = <1100000 1100000 1300000>; clock-latency-ns = <244144>; /* 8 32k periods */ }; - opp@1008000000 { + opp-1008000000 { opp-hz = /bits/ 64 <1008000000>; opp-microvolt = <1200000 1200000 1300000>; clock-latency-ns = <244144>; /* 8 32k periods */ @@ -122,7 +122,7 @@ soc { system-control@1c00000 { compatible = "allwinner,sun8i-h3-system-control"; - reg = <0x01c00000 0x30>; + reg = <0x01c00000 0x1000>; #address-cells = <1>; #size-cells = <1>; ranges; diff --git a/arch/arm/boot/dts/sun8i-r16-bananapi-m2m.dts b/arch/arm/boot/dts/sun8i-r16-bananapi-m2m.dts index 0dbdb29a8fff9b1132b7011f725adc3e35b0a93b..ee7ce3752581b59160e0c6f62315cfbd27ebdb41 100644 --- a/arch/arm/boot/dts/sun8i-r16-bananapi-m2m.dts +++ b/arch/arm/boot/dts/sun8i-r16-bananapi-m2m.dts @@ -103,13 +103,13 @@ }; &cpu0_opp_table { - opp@1104000000 { + opp-1104000000 { opp-hz = /bits/ 64 <1104000000>; opp-microvolt = <1320000>; clock-latency-ns = <244144>; /* 8 32k periods */ }; - opp@1200000000 { + opp-1200000000 { opp-hz = /bits/ 64 <1200000000>; opp-microvolt = <1320000>; clock-latency-ns = <244144>; /* 8 32k periods */ diff --git a/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi b/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi index 880096c7e2523aee4174a9ce7fce4d853cf8e882..5e8a95af89b8c3539ff39f902059bdd256a0df43 100644 --- a/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi +++ b/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi @@ -69,7 +69,8 @@ */ clock-frequency = <400000>; - touchscreen: touchscreen@0 { + touchscreen: touchscreen@40 { + reg = <0x40>; interrupt-parent = <&pio>; interrupts = <1 5 IRQ_TYPE_EDGE_FALLING>; /* PB5 */ pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/sun8i-v3s-licheepi-zero.dts b/arch/arm/boot/dts/sun8i-v3s-licheepi-zero.dts index 387fc2aa546d660ad52a9373862a95d2610c053c..333df90e8037cd511b5159dc7587e1102ed67a56 100644 --- a/arch/arm/boot/dts/sun8i-v3s-licheepi-zero.dts +++ b/arch/arm/boot/dts/sun8i-v3s-licheepi-zero.dts @@ -78,7 +78,7 @@ }; &mmc0 { - pinctrl-0 = <&mmc0_pins_a>; + pinctrl-0 = <&mmc0_pins>; pinctrl-names = "default"; broken-cd; bus-width = <4>; @@ -87,7 +87,7 @@ }; &uart0 { - pinctrl-0 = <&uart0_pins_a>; + pinctrl-0 = <&uart0_pb_pins>; pinctrl-names = "default"; status = "okay"; }; diff --git a/arch/arm/boot/dts/sun8i-v3s.dtsi b/arch/arm/boot/dts/sun8i-v3s.dtsi index 443b083c6adc9a1c9f605554fcada283e8d90872..92fcb756a08a9b605d5f63365f93fe61df1434fe 100644 --- a/arch/arm/boot/dts/sun8i-v3s.dtsi +++ b/arch/arm/boot/dts/sun8i-v3s.dtsi @@ -292,17 +292,17 @@ interrupt-controller; #interrupt-cells = <3>; - i2c0_pins: i2c0 { + i2c0_pins: i2c0-pins { pins = "PB6", "PB7"; function = "i2c0"; }; - uart0_pins_a: uart0@0 { + uart0_pb_pins: uart0-pb-pins { pins = "PB8", "PB9"; function = "uart0"; }; - mmc0_pins_a: mmc0@0 { + mmc0_pins: mmc0-pins { pins = "PF0", "PF1", "PF2", "PF3", "PF4", "PF5"; function = "mmc0"; @@ -310,7 +310,7 @@ bias-pull-up; }; - mmc1_pins: mmc1 { + mmc1_pins: mmc1-pins { pins = "PG0", "PG1", "PG2", "PG3", "PG4", "PG5"; function = "mmc1"; @@ -318,7 +318,7 @@ bias-pull-up; }; - spi0_pins: spi0 { + spi0_pins: spi0-pins { pins = "PC0", "PC1", "PC2", "PC3"; function = "spi0"; }; diff --git a/arch/arm/boot/dts/sun8i-v40-bananapi-m2-berry.dts b/arch/arm/boot/dts/sun8i-v40-bananapi-m2-berry.dts index 35859d8f3267fd2a1d6fa7e716e97094895f3530..bf97f6244c233f802393133d50456735d18766e7 100644 --- a/arch/arm/boot/dts/sun8i-v40-bananapi-m2-berry.dts +++ b/arch/arm/boot/dts/sun8i-v40-bananapi-m2-berry.dts @@ -95,7 +95,7 @@ &i2c0 { status = "okay"; - axp22x: pmic@68 { + axp22x: pmic@34 { compatible = "x-powers,axp221"; reg = <0x34>; interrupt-parent = <&nmi_intc>; diff --git a/arch/arm/boot/dts/sun9i-a80.dtsi b/arch/arm/boot/dts/sun9i-a80.dtsi index 25591d6883ef2feb1fa89e28360bdeb14a048d13..d9532fb1ef65071936c047a25274b2b299c30dce 100644 --- a/arch/arm/boot/dts/sun9i-a80.dtsi +++ b/arch/arm/boot/dts/sun9i-a80.dtsi @@ -1196,7 +1196,7 @@ }; }; - r_rsb: i2c@8003400 { + r_rsb: rsb@8003400 { compatible = "allwinner,sun8i-a23-rsb"; reg = <0x08003400 0x400>; interrupts = ; diff --git a/arch/arm/boot/dts/sunxi-h3-h5.dtsi b/arch/arm/boot/dts/sunxi-h3-h5.dtsi index fc6131315c47ffe695a4db6cbf0f7e38a8b89221..4b1530ebe4272887f33c85e00669e4781618848e 100644 --- a/arch/arm/boot/dts/sunxi-h3-h5.dtsi +++ b/arch/arm/boot/dts/sunxi-h3-h5.dtsi @@ -816,7 +816,7 @@ clock-names = "apb", "ir"; resets = <&r_ccu RST_APB0_IR>; interrupts = ; - reg = <0x01f02000 0x40>; + reg = <0x01f02000 0x400>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/tegra124-nyan.dtsi b/arch/arm/boot/dts/tegra124-nyan.dtsi index d5f11d6d987ea52236b113b49e761b329c948e08..bc85b6a166c79e9b079d184e4ddac0d3e26d7067 100644 --- a/arch/arm/boot/dts/tegra124-nyan.dtsi +++ b/arch/arm/boot/dts/tegra124-nyan.dtsi @@ -13,10 +13,25 @@ stdout-path = "serial0:115200n8"; }; - memory@80000000 { + /* + * Note that recent version of the device tree compiler (starting with + * version 1.4.2) warn about this node containing a reg property, but + * missing a unit-address. However, the bootloader on these Chromebook + * devices relies on the full name of this node to be exactly /memory. + * Adding the unit-address causes the bootloader to create a /memory + * node and write the memory bank configuration to that node, which in + * turn leads the kernel to believe that the device has 2 GiB of + * memory instead of the amount detected by the bootloader. + * + * The name of this node is effectively ABI and must not be changed. + */ + memory { + device_type = "memory"; reg = <0x0 0x80000000 0x0 0x80000000>; }; + /delete-node/ memory@80000000; + host1x@50000000 { hdmi@54280000 { status = "okay"; diff --git a/arch/arm/boot/dts/tegra20-paz00.dts b/arch/arm/boot/dts/tegra20-paz00.dts index ef245291924f076d73aec01d5ad0f68db0e92589..4f9b4a889febe80ae8e9a1a5becd253f9caeefc4 100644 --- a/arch/arm/boot/dts/tegra20-paz00.dts +++ b/arch/arm/boot/dts/tegra20-paz00.dts @@ -524,10 +524,10 @@ gpio-keys { compatible = "gpio-keys"; - power { - label = "Power"; + wakeup { + label = "Wakeup"; gpios = <&gpio TEGRA_GPIO(J, 7) GPIO_ACTIVE_LOW>; - linux,code = ; + linux,code = ; wakeup-source; }; }; diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi index 15b73bd377f0408b948f80b36a4afaff82f14688..80854f7de765c571a628a8f5c1846a47a377079f 100644 --- a/arch/arm/boot/dts/tegra20.dtsi +++ b/arch/arm/boot/dts/tegra20.dtsi @@ -419,19 +419,6 @@ status = "disabled"; }; - gmi@70009000 { - compatible = "nvidia,tegra20-gmi"; - reg = <0x70009000 0x1000>; - #address-cells = <2>; - #size-cells = <1>; - ranges = <0 0 0xd0000000 0xfffffff>; - clocks = <&tegra_car TEGRA20_CLK_NOR>; - clock-names = "gmi"; - resets = <&tegra_car 42>; - reset-names = "gmi"; - status = "disabled"; - }; - nand-controller@70008000 { compatible = "nvidia,tegra20-nand"; reg = <0x70008000 0x100>; @@ -447,6 +434,19 @@ status = "disabled"; }; + gmi@70009000 { + compatible = "nvidia,tegra20-gmi"; + reg = <0x70009000 0x1000>; + #address-cells = <2>; + #size-cells = <1>; + ranges = <0 0 0xd0000000 0xfffffff>; + clocks = <&tegra_car TEGRA20_CLK_NOR>; + clock-names = "gmi"; + resets = <&tegra_car 42>; + reset-names = "gmi"; + status = "disabled"; + }; + pwm: pwm@7000a000 { compatible = "nvidia,tegra20-pwm"; reg = <0x7000a000 0x100>; diff --git a/arch/arm/boot/dts/tegra30-apalis.dtsi b/arch/arm/boot/dts/tegra30-apalis.dtsi index 2f807d40c1b792bd35c662bcd243588b7fa91d0b..f810bbf8212bda115ae5e3f587d2f7b980da682b 100644 --- a/arch/arm/boot/dts/tegra30-apalis.dtsi +++ b/arch/arm/boot/dts/tegra30-apalis.dtsi @@ -171,14 +171,14 @@ /* Apalis MMC1 */ sdmmc3_clk_pa6 { - nvidia,pins = "sdmmc3_clk_pa6", - "sdmmc3_cmd_pa7"; + nvidia,pins = "sdmmc3_clk_pa6"; nvidia,function = "sdmmc3"; nvidia,pull = ; nvidia,tristate = ; }; sdmmc3_dat0_pb7 { - nvidia,pins = "sdmmc3_dat0_pb7", + nvidia,pins = "sdmmc3_cmd_pa7", + "sdmmc3_dat0_pb7", "sdmmc3_dat1_pb6", "sdmmc3_dat2_pb5", "sdmmc3_dat3_pb4", @@ -659,7 +659,7 @@ reg = <1>; clocks = <&clk16m>; interrupt-parent = <&gpio>; - interrupts = ; + interrupts = ; spi-max-frequency = <10000000>; }; }; @@ -674,7 +674,7 @@ reg = <0>; clocks = <&clk16m>; interrupt-parent = <&gpio>; - interrupts = ; + interrupts = ; spi-max-frequency = <10000000>; }; }; diff --git a/arch/arm/boot/dts/tegra30-colibri-eval-v3.dts b/arch/arm/boot/dts/tegra30-colibri-eval-v3.dts index 16e1f387aa6db65aad52d814bd4e663522f81048..a0c550e26738f4e48b87380400f61b32893d756c 100644 --- a/arch/arm/boot/dts/tegra30-colibri-eval-v3.dts +++ b/arch/arm/boot/dts/tegra30-colibri-eval-v3.dts @@ -79,7 +79,8 @@ reg = <0>; clocks = <&clk16m>; interrupt-parent = <&gpio>; - interrupts = ; + /* CAN_INT */ + interrupts = ; spi-max-frequency = <10000000>; }; spidev0: spi@1 { diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi index a6781f6533105e7e27a7bb1e9da8f066cc1814a8..5a04ddefb71f62013d2ce5991fb56beaf47b184e 100644 --- a/arch/arm/boot/dts/tegra30.dtsi +++ b/arch/arm/boot/dts/tegra30.dtsi @@ -896,7 +896,7 @@ nvidia,elastic-limit = <16>; nvidia,term-range-adj = <6>; nvidia,xcvr-setup = <51>; - nvidia.xcvr-setup-use-fuses; + nvidia,xcvr-setup-use-fuses; nvidia,xcvr-lsfslew = <1>; nvidia,xcvr-lsrslew = <1>; nvidia,xcvr-hsslew = <32>; @@ -933,7 +933,7 @@ nvidia,elastic-limit = <16>; nvidia,term-range-adj = <6>; nvidia,xcvr-setup = <51>; - nvidia.xcvr-setup-use-fuses; + nvidia,xcvr-setup-use-fuses; nvidia,xcvr-lsfslew = <2>; nvidia,xcvr-lsrslew = <2>; nvidia,xcvr-hsslew = <32>; @@ -969,7 +969,7 @@ nvidia,elastic-limit = <16>; nvidia,term-range-adj = <6>; nvidia,xcvr-setup = <51>; - nvidia.xcvr-setup-use-fuses; + nvidia,xcvr-setup-use-fuses; nvidia,xcvr-lsfslew = <2>; nvidia,xcvr-lsrslew = <2>; nvidia,xcvr-hsslew = <32>; diff --git a/arch/arm/boot/dts/versatile-ab.dts b/arch/arm/boot/dts/versatile-ab.dts index 5f61d36090270131ed6c8f91a2f93f3ee92ef5e3..6f4f60ba5429c8dedcd3a9863c5558a5e0b8199d 100644 --- a/arch/arm/boot/dts/versatile-ab.dts +++ b/arch/arm/boot/dts/versatile-ab.dts @@ -373,7 +373,7 @@ clock-names = "apb_pclk"; }; - ssp@101f4000 { + spi@101f4000 { compatible = "arm,pl022", "arm,primecell"; reg = <0x101f4000 0x1000>; interrupts = <11>; diff --git a/arch/arm/boot/dts/vf610m4-colibri.dts b/arch/arm/boot/dts/vf610m4-colibri.dts index 41ec66a969907d492dabec284e6a101f592e0216..ca62495587602f44d3e514fb2df910edfc584ea1 100644 --- a/arch/arm/boot/dts/vf610m4-colibri.dts +++ b/arch/arm/boot/dts/vf610m4-colibri.dts @@ -50,8 +50,8 @@ compatible = "fsl,vf610m4"; chosen { - bootargs = "console=ttyLP2,115200 clk_ignore_unused init=/linuxrc rw"; - stdout-path = "&uart2"; + bootargs = "clk_ignore_unused init=/linuxrc rw"; + stdout-path = "serial2:115200"; }; memory@8c000000 { diff --git a/arch/arm/boot/dts/zynq-zc702.dts b/arch/arm/boot/dts/zynq-zc702.dts index cc5a3dc2b4a08dc3ceca92112bc6e0206bfd21d1..27cd6cb52f1ba33607db41ce80c6fb502a55837f 100644 --- a/arch/arm/boot/dts/zynq-zc702.dts +++ b/arch/arm/boot/dts/zynq-zc702.dts @@ -174,17 +174,17 @@ #address-cells = <1>; #size-cells = <0>; reg = <7>; - hwmon@52 { + hwmon@34 { compatible = "ti,ucd9248"; - reg = <52>; + reg = <0x34>; }; - hwmon@53 { + hwmon@35 { compatible = "ti,ucd9248"; - reg = <53>; + reg = <0x35>; }; - hwmon@54 { + hwmon@36 { compatible = "ti,ucd9248"; - reg = <54>; + reg = <0x36>; }; }; }; diff --git a/arch/arm/boot/dts/zynq-zc770-xm010.dts b/arch/arm/boot/dts/zynq-zc770-xm010.dts index 0e1bfdd3421ff04f31266051546b23f5b9e3cfd0..0dd352289a45e58150f75896a590873f66773174 100644 --- a/arch/arm/boot/dts/zynq-zc770-xm010.dts +++ b/arch/arm/boot/dts/zynq-zc770-xm010.dts @@ -68,7 +68,7 @@ status = "okay"; num-cs = <4>; is-decoded-cs = <0>; - flash@0 { + flash@1 { compatible = "sst25wf080", "jedec,spi-nor"; reg = <1>; spi-max-frequency = <1000000>; diff --git a/arch/arm/boot/dts/zynq-zc770-xm013.dts b/arch/arm/boot/dts/zynq-zc770-xm013.dts index 651913f1afa2a06647addfdb6ae415b567031f66..4ae2c85df3a0078111f1b4aa9a24b695265cee16 100644 --- a/arch/arm/boot/dts/zynq-zc770-xm013.dts +++ b/arch/arm/boot/dts/zynq-zc770-xm013.dts @@ -62,7 +62,7 @@ status = "okay"; num-cs = <4>; is-decoded-cs = <0>; - eeprom: eeprom@0 { + eeprom: eeprom@2 { at25,byte-len = <8192>; at25,addr-mode = <2>; at25,page-size = <32>; diff --git a/arch/arm/configs/badge4_defconfig b/arch/arm/configs/badge4_defconfig index 5ae5b5228467adb793e4a847dbbec838c4537300..ef484c4cfd1a252f30aba80e459beacf01ecb199 100644 --- a/arch/arm/configs/badge4_defconfig +++ b/arch/arm/configs/badge4_defconfig @@ -91,7 +91,6 @@ CONFIG_USB_SERIAL_PL2303=m CONFIG_USB_SERIAL_CYBERJACK=m CONFIG_USB_SERIAL_XIRCOM=m CONFIG_USB_SERIAL_OMNINET=m -CONFIG_USB_RIO500=m CONFIG_EXT2_FS=m CONFIG_EXT3_FS=m CONFIG_MSDOS_FS=y diff --git a/arch/arm/configs/corgi_defconfig b/arch/arm/configs/corgi_defconfig index 09e1672777c9b0e76ea42acde31cb7e011e159ff..0ba8df0d48b9702a9d89a226bc820b1de939733e 100644 --- a/arch/arm/configs/corgi_defconfig +++ b/arch/arm/configs/corgi_defconfig @@ -197,7 +197,6 @@ CONFIG_USB_SERIAL_XIRCOM=m CONFIG_USB_SERIAL_OMNINET=m CONFIG_USB_EMI62=m CONFIG_USB_EMI26=m -CONFIG_USB_RIO500=m CONFIG_USB_LEGOTOWER=m CONFIG_USB_LCD=m CONFIG_USB_CYTHERM=m diff --git a/arch/arm/configs/footbridge_defconfig b/arch/arm/configs/footbridge_defconfig index 3a7938f244e56606f427d5d940f39fd3f2b3b48c..2aa3ebeb89d7fb5dea69774a3802f9131e8aa3b9 100644 --- a/arch/arm/configs/footbridge_defconfig +++ b/arch/arm/configs/footbridge_defconfig @@ -7,7 +7,6 @@ CONFIG_EXPERT=y CONFIG_MODULES=y CONFIG_ARCH_FOOTBRIDGE=y CONFIG_ARCH_CATS=y -CONFIG_ARCH_PERSONAL_SERVER=y CONFIG_ARCH_EBSA285_HOST=y CONFIG_ARCH_NETWINDER=y CONFIG_LEDS=y diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig index 7eca43ff69bbed1f1d5d431a968826612812ed18..f4c2e993bba3a778d5b0de167e0c40ec7d2ba515 100644 --- a/arch/arm/configs/imx_v6_v7_defconfig +++ b/arch/arm/configs/imx_v6_v7_defconfig @@ -409,6 +409,7 @@ CONFIG_ZISOFS=y CONFIG_UDF_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=y +CONFIG_TMPFS_POSIX_ACL=y CONFIG_JFFS2_FS=y CONFIG_UBIFS_FS=y CONFIG_NFS_FS=y diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig index 6bb506edb1f53a4867e810b8d86e53957aa79ddb..cc63d09a1f86693dc63455c0d51daa1ada4bd923 100644 --- a/arch/arm/configs/pxa_defconfig +++ b/arch/arm/configs/pxa_defconfig @@ -588,7 +588,6 @@ CONFIG_USB_SERIAL_XIRCOM=m CONFIG_USB_SERIAL_OMNINET=m CONFIG_USB_EMI62=m CONFIG_USB_EMI26=m -CONFIG_USB_RIO500=m CONFIG_USB_LEGOTOWER=m CONFIG_USB_LCD=m CONFIG_USB_CYTHERM=m diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig index 2afb359f3168d50d2fb2a7ed47bf3f515ff3e7af..bd71d5bf98c911ed391adb01437b6bd97573931e 100644 --- a/arch/arm/configs/s3c2410_defconfig +++ b/arch/arm/configs/s3c2410_defconfig @@ -334,7 +334,6 @@ CONFIG_USB_EMI62=m CONFIG_USB_EMI26=m CONFIG_USB_ADUTUX=m CONFIG_USB_SEVSEG=m -CONFIG_USB_RIO500=m CONFIG_USB_LEGOTOWER=m CONFIG_USB_LCD=m CONFIG_USB_CYPRESS_CY7C63=m diff --git a/arch/arm/configs/spitz_defconfig b/arch/arm/configs/spitz_defconfig index 9ea82c118661b179dea1284ac55b7ac7fde12d1e..3aff4ca2a94e2c6ba9683db626c6591506a0318b 100644 --- a/arch/arm/configs/spitz_defconfig +++ b/arch/arm/configs/spitz_defconfig @@ -191,7 +191,6 @@ CONFIG_USB_SERIAL_XIRCOM=m CONFIG_USB_SERIAL_OMNINET=m CONFIG_USB_EMI62=m CONFIG_USB_EMI26=m -CONFIG_USB_RIO500=m CONFIG_USB_LEGOTOWER=m CONFIG_USB_LCD=m CONFIG_USB_CYTHERM=m diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig index 925d1364727a5dd0e35888091eb5dc73ee2413d9..b8e69fe282b8db8338abd8c4405d8031022aa448 100644 --- a/arch/arm/crypto/Kconfig +++ b/arch/arm/crypto/Kconfig @@ -121,10 +121,4 @@ config CRYPTO_CHACHA20_NEON select CRYPTO_BLKCIPHER select CRYPTO_CHACHA20 -config CRYPTO_SPECK_NEON - tristate "NEON accelerated Speck cipher algorithms" - depends on KERNEL_MODE_NEON - select CRYPTO_BLKCIPHER - select CRYPTO_SPECK - endif diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile index 8de542c48adea528d4726deeee7d1080e63f974d..bd5bceef0605f1c22d158bdca6b875a4b58311c0 100644 --- a/arch/arm/crypto/Makefile +++ b/arch/arm/crypto/Makefile @@ -10,7 +10,6 @@ obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o -obj-$(CONFIG_CRYPTO_SPECK_NEON) += speck-neon.o ce-obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o ce-obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o @@ -54,7 +53,6 @@ ghash-arm-ce-y := ghash-ce-core.o ghash-ce-glue.o crct10dif-arm-ce-y := crct10dif-ce-core.o crct10dif-ce-glue.o crc32-arm-ce-y:= crc32-ce-core.o crc32-ce-glue.o chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o -speck-neon-y := speck-neon-core.o speck-neon-glue.o ifdef REGENERATE_ARM_CRYPTO quiet_cmd_perl = PERL $@ diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c index 07e31941dc674352d13ea68ad661998879ec4c1e..617c2c99ebfb30bd3862542af311fa5c93258043 100644 --- a/arch/arm/crypto/aes-neonbs-glue.c +++ b/arch/arm/crypto/aes-neonbs-glue.c @@ -278,6 +278,8 @@ static int __xts_crypt(struct skcipher_request *req, int err; err = skcipher_walk_virt(&walk, req, true); + if (err) + return err; crypto_cipher_encrypt_one(ctx->tweak_tfm, walk.iv, walk.iv); diff --git a/arch/arm/crypto/crc32-ce-glue.c b/arch/arm/crypto/crc32-ce-glue.c index 96e62ec105d061864e516eacefcbd55e983bfedd..cd9e93b46c2ddf21d1722ce228729e61d2253ed3 100644 --- a/arch/arm/crypto/crc32-ce-glue.c +++ b/arch/arm/crypto/crc32-ce-glue.c @@ -236,7 +236,7 @@ static void __exit crc32_pmull_mod_exit(void) ARRAY_SIZE(crc32_pmull_algs)); } -static const struct cpu_feature crc32_cpu_feature[] = { +static const struct cpu_feature __maybe_unused crc32_cpu_feature[] = { { cpu_feature(CRC32) }, { cpu_feature(PMULL) }, { } }; MODULE_DEVICE_TABLE(cpu, crc32_cpu_feature); diff --git a/arch/arm/crypto/crct10dif-ce-core.S b/arch/arm/crypto/crct10dif-ce-core.S index ce45ba0c06879b8c748ae763603cff14ef68aa60..16019b5961e7890709eb29d7f76e4b0b8a13000d 100644 --- a/arch/arm/crypto/crct10dif-ce-core.S +++ b/arch/arm/crypto/crct10dif-ce-core.S @@ -124,10 +124,10 @@ ENTRY(crc_t10dif_pmull) vext.8 q10, qzr, q0, #4 // receive the initial 64B data, xor the initial crc value - vld1.64 {q0-q1}, [arg2, :128]! - vld1.64 {q2-q3}, [arg2, :128]! - vld1.64 {q4-q5}, [arg2, :128]! - vld1.64 {q6-q7}, [arg2, :128]! + vld1.64 {q0-q1}, [arg2]! + vld1.64 {q2-q3}, [arg2]! + vld1.64 {q4-q5}, [arg2]! + vld1.64 {q6-q7}, [arg2]! CPU_LE( vrev64.8 q0, q0 ) CPU_LE( vrev64.8 q1, q1 ) CPU_LE( vrev64.8 q2, q2 ) @@ -167,7 +167,7 @@ CPU_LE( vrev64.8 q7, q7 ) _fold_64_B_loop: .macro fold64, reg1, reg2 - vld1.64 {q11-q12}, [arg2, :128]! + vld1.64 {q11-q12}, [arg2]! vmull.p64 q8, \reg1\()h, d21 vmull.p64 \reg1, \reg1\()l, d20 @@ -238,7 +238,7 @@ _16B_reduction_loop: vmull.p64 q7, d15, d21 veor.8 q7, q7, q8 - vld1.64 {q0}, [arg2, :128]! + vld1.64 {q0}, [arg2]! CPU_LE( vrev64.8 q0, q0 ) vswp d0, d1 veor.8 q7, q7, q0 @@ -335,7 +335,7 @@ _less_than_128: vmov.i8 q0, #0 vmov s3, arg1_low32 // get the initial crc value - vld1.64 {q7}, [arg2, :128]! + vld1.64 {q7}, [arg2]! CPU_LE( vrev64.8 q7, q7 ) vswp d14, d15 veor.8 q7, q7, q0 diff --git a/arch/arm/crypto/crct10dif-ce-glue.c b/arch/arm/crypto/crct10dif-ce-glue.c index d428355cf38d9b848c88e5024d8d4d03badeb754..14c19c70a8416bec170eebef2ee7f3fb1fb6b7ba 100644 --- a/arch/arm/crypto/crct10dif-ce-glue.c +++ b/arch/arm/crypto/crct10dif-ce-glue.c @@ -35,26 +35,15 @@ static int crct10dif_update(struct shash_desc *desc, const u8 *data, unsigned int length) { u16 *crc = shash_desc_ctx(desc); - unsigned int l; - if (!may_use_simd()) { - *crc = crc_t10dif_generic(*crc, data, length); + if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) { + kernel_neon_begin(); + *crc = crc_t10dif_pmull(*crc, data, length); + kernel_neon_end(); } else { - if (unlikely((u32)data % CRC_T10DIF_PMULL_CHUNK_SIZE)) { - l = min_t(u32, length, CRC_T10DIF_PMULL_CHUNK_SIZE - - ((u32)data % CRC_T10DIF_PMULL_CHUNK_SIZE)); - - *crc = crc_t10dif_generic(*crc, data, l); - - length -= l; - data += l; - } - if (length > 0) { - kernel_neon_begin(); - *crc = crc_t10dif_pmull(*crc, data, length); - kernel_neon_end(); - } + *crc = crc_t10dif_generic(*crc, data, length); } + return 0; } diff --git a/arch/arm/crypto/sha256-armv4.pl b/arch/arm/crypto/sha256-armv4.pl index b9ec44060ed313dac5c469e4e6e39e905d0206cc..a03cf4dfb7818d1275ee0d06ecf633b149adf1cd 100644 --- a/arch/arm/crypto/sha256-armv4.pl +++ b/arch/arm/crypto/sha256-armv4.pl @@ -212,10 +212,11 @@ K256: .global sha256_block_data_order .type sha256_block_data_order,%function sha256_block_data_order: +.Lsha256_block_data_order: #if __ARM_ARCH__<7 sub r3,pc,#8 @ sha256_block_data_order #else - adr r3,sha256_block_data_order + adr r3,.Lsha256_block_data_order #endif #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) ldr r12,.LOPENSSL_armcap diff --git a/arch/arm/crypto/sha256-core.S_shipped b/arch/arm/crypto/sha256-core.S_shipped index 3b58300d611cf4c94f9014901d0a3204acb75f13..054aae0edfce5628715d607fd7d194ac5cc81938 100644 --- a/arch/arm/crypto/sha256-core.S_shipped +++ b/arch/arm/crypto/sha256-core.S_shipped @@ -93,10 +93,11 @@ K256: .global sha256_block_data_order .type sha256_block_data_order,%function sha256_block_data_order: +.Lsha256_block_data_order: #if __ARM_ARCH__<7 sub r3,pc,#8 @ sha256_block_data_order #else - adr r3,sha256_block_data_order + adr r3,.Lsha256_block_data_order #endif #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) ldr r12,.LOPENSSL_armcap diff --git a/arch/arm/crypto/sha512-armv4.pl b/arch/arm/crypto/sha512-armv4.pl index fb5d15048c0b2d2ea2d8c8c9a2bdbb15a6c14052..788c17b56ecceb5e607382539faec13b8fbc2886 100644 --- a/arch/arm/crypto/sha512-armv4.pl +++ b/arch/arm/crypto/sha512-armv4.pl @@ -274,10 +274,11 @@ WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817) .global sha512_block_data_order .type sha512_block_data_order,%function sha512_block_data_order: +.Lsha512_block_data_order: #if __ARM_ARCH__<7 sub r3,pc,#8 @ sha512_block_data_order #else - adr r3,sha512_block_data_order + adr r3,.Lsha512_block_data_order #endif #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) ldr r12,.LOPENSSL_armcap diff --git a/arch/arm/crypto/sha512-core.S_shipped b/arch/arm/crypto/sha512-core.S_shipped index b1c334a49cdaa61f019b7367bfa4e20d4ac4a078..710ea309769e71628d1d4a166c834ad92033b658 100644 --- a/arch/arm/crypto/sha512-core.S_shipped +++ b/arch/arm/crypto/sha512-core.S_shipped @@ -141,10 +141,11 @@ WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817) .global sha512_block_data_order .type sha512_block_data_order,%function sha512_block_data_order: +.Lsha512_block_data_order: #if __ARM_ARCH__<7 sub r3,pc,#8 @ sha512_block_data_order #else - adr r3,sha512_block_data_order + adr r3,.Lsha512_block_data_order #endif #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) ldr r12,.LOPENSSL_armcap diff --git a/arch/arm/crypto/speck-neon-core.S b/arch/arm/crypto/speck-neon-core.S deleted file mode 100644 index 57caa742016ed59bc8d3755fd6b9526f0c05f860..0000000000000000000000000000000000000000 --- a/arch/arm/crypto/speck-neon-core.S +++ /dev/null @@ -1,434 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS - * - * Copyright (c) 2018 Google, Inc - * - * Author: Eric Biggers - */ - -#include - - .text - .fpu neon - - // arguments - ROUND_KEYS .req r0 // const {u64,u32} *round_keys - NROUNDS .req r1 // int nrounds - DST .req r2 // void *dst - SRC .req r3 // const void *src - NBYTES .req r4 // unsigned int nbytes - TWEAK .req r5 // void *tweak - - // registers which hold the data being encrypted/decrypted - X0 .req q0 - X0_L .req d0 - X0_H .req d1 - Y0 .req q1 - Y0_H .req d3 - X1 .req q2 - X1_L .req d4 - X1_H .req d5 - Y1 .req q3 - Y1_H .req d7 - X2 .req q4 - X2_L .req d8 - X2_H .req d9 - Y2 .req q5 - Y2_H .req d11 - X3 .req q6 - X3_L .req d12 - X3_H .req d13 - Y3 .req q7 - Y3_H .req d15 - - // the round key, duplicated in all lanes - ROUND_KEY .req q8 - ROUND_KEY_L .req d16 - ROUND_KEY_H .req d17 - - // index vector for vtbl-based 8-bit rotates - ROTATE_TABLE .req d18 - - // multiplication table for updating XTS tweaks - GF128MUL_TABLE .req d19 - GF64MUL_TABLE .req d19 - - // current XTS tweak value(s) - TWEAKV .req q10 - TWEAKV_L .req d20 - TWEAKV_H .req d21 - - TMP0 .req q12 - TMP0_L .req d24 - TMP0_H .req d25 - TMP1 .req q13 - TMP2 .req q14 - TMP3 .req q15 - - .align 4 -.Lror64_8_table: - .byte 1, 2, 3, 4, 5, 6, 7, 0 -.Lror32_8_table: - .byte 1, 2, 3, 0, 5, 6, 7, 4 -.Lrol64_8_table: - .byte 7, 0, 1, 2, 3, 4, 5, 6 -.Lrol32_8_table: - .byte 3, 0, 1, 2, 7, 4, 5, 6 -.Lgf128mul_table: - .byte 0, 0x87 - .fill 14 -.Lgf64mul_table: - .byte 0, 0x1b, (0x1b << 1), (0x1b << 1) ^ 0x1b - .fill 12 - -/* - * _speck_round_128bytes() - Speck encryption round on 128 bytes at a time - * - * Do one Speck encryption round on the 128 bytes (8 blocks for Speck128, 16 for - * Speck64) stored in X0-X3 and Y0-Y3, using the round key stored in all lanes - * of ROUND_KEY. 'n' is the lane size: 64 for Speck128, or 32 for Speck64. - * - * The 8-bit rotates are implemented using vtbl instead of vshr + vsli because - * the vtbl approach is faster on some processors and the same speed on others. - */ -.macro _speck_round_128bytes n - - // x = ror(x, 8) - vtbl.8 X0_L, {X0_L}, ROTATE_TABLE - vtbl.8 X0_H, {X0_H}, ROTATE_TABLE - vtbl.8 X1_L, {X1_L}, ROTATE_TABLE - vtbl.8 X1_H, {X1_H}, ROTATE_TABLE - vtbl.8 X2_L, {X2_L}, ROTATE_TABLE - vtbl.8 X2_H, {X2_H}, ROTATE_TABLE - vtbl.8 X3_L, {X3_L}, ROTATE_TABLE - vtbl.8 X3_H, {X3_H}, ROTATE_TABLE - - // x += y - vadd.u\n X0, Y0 - vadd.u\n X1, Y1 - vadd.u\n X2, Y2 - vadd.u\n X3, Y3 - - // x ^= k - veor X0, ROUND_KEY - veor X1, ROUND_KEY - veor X2, ROUND_KEY - veor X3, ROUND_KEY - - // y = rol(y, 3) - vshl.u\n TMP0, Y0, #3 - vshl.u\n TMP1, Y1, #3 - vshl.u\n TMP2, Y2, #3 - vshl.u\n TMP3, Y3, #3 - vsri.u\n TMP0, Y0, #(\n - 3) - vsri.u\n TMP1, Y1, #(\n - 3) - vsri.u\n TMP2, Y2, #(\n - 3) - vsri.u\n TMP3, Y3, #(\n - 3) - - // y ^= x - veor Y0, TMP0, X0 - veor Y1, TMP1, X1 - veor Y2, TMP2, X2 - veor Y3, TMP3, X3 -.endm - -/* - * _speck_unround_128bytes() - Speck decryption round on 128 bytes at a time - * - * This is the inverse of _speck_round_128bytes(). - */ -.macro _speck_unround_128bytes n - - // y ^= x - veor TMP0, Y0, X0 - veor TMP1, Y1, X1 - veor TMP2, Y2, X2 - veor TMP3, Y3, X3 - - // y = ror(y, 3) - vshr.u\n Y0, TMP0, #3 - vshr.u\n Y1, TMP1, #3 - vshr.u\n Y2, TMP2, #3 - vshr.u\n Y3, TMP3, #3 - vsli.u\n Y0, TMP0, #(\n - 3) - vsli.u\n Y1, TMP1, #(\n - 3) - vsli.u\n Y2, TMP2, #(\n - 3) - vsli.u\n Y3, TMP3, #(\n - 3) - - // x ^= k - veor X0, ROUND_KEY - veor X1, ROUND_KEY - veor X2, ROUND_KEY - veor X3, ROUND_KEY - - // x -= y - vsub.u\n X0, Y0 - vsub.u\n X1, Y1 - vsub.u\n X2, Y2 - vsub.u\n X3, Y3 - - // x = rol(x, 8); - vtbl.8 X0_L, {X0_L}, ROTATE_TABLE - vtbl.8 X0_H, {X0_H}, ROTATE_TABLE - vtbl.8 X1_L, {X1_L}, ROTATE_TABLE - vtbl.8 X1_H, {X1_H}, ROTATE_TABLE - vtbl.8 X2_L, {X2_L}, ROTATE_TABLE - vtbl.8 X2_H, {X2_H}, ROTATE_TABLE - vtbl.8 X3_L, {X3_L}, ROTATE_TABLE - vtbl.8 X3_H, {X3_H}, ROTATE_TABLE -.endm - -.macro _xts128_precrypt_one dst_reg, tweak_buf, tmp - - // Load the next source block - vld1.8 {\dst_reg}, [SRC]! - - // Save the current tweak in the tweak buffer - vst1.8 {TWEAKV}, [\tweak_buf:128]! - - // XOR the next source block with the current tweak - veor \dst_reg, TWEAKV - - /* - * Calculate the next tweak by multiplying the current one by x, - * modulo p(x) = x^128 + x^7 + x^2 + x + 1. - */ - vshr.u64 \tmp, TWEAKV, #63 - vshl.u64 TWEAKV, #1 - veor TWEAKV_H, \tmp\()_L - vtbl.8 \tmp\()_H, {GF128MUL_TABLE}, \tmp\()_H - veor TWEAKV_L, \tmp\()_H -.endm - -.macro _xts64_precrypt_two dst_reg, tweak_buf, tmp - - // Load the next two source blocks - vld1.8 {\dst_reg}, [SRC]! - - // Save the current two tweaks in the tweak buffer - vst1.8 {TWEAKV}, [\tweak_buf:128]! - - // XOR the next two source blocks with the current two tweaks - veor \dst_reg, TWEAKV - - /* - * Calculate the next two tweaks by multiplying the current ones by x^2, - * modulo p(x) = x^64 + x^4 + x^3 + x + 1. - */ - vshr.u64 \tmp, TWEAKV, #62 - vshl.u64 TWEAKV, #2 - vtbl.8 \tmp\()_L, {GF64MUL_TABLE}, \tmp\()_L - vtbl.8 \tmp\()_H, {GF64MUL_TABLE}, \tmp\()_H - veor TWEAKV, \tmp -.endm - -/* - * _speck_xts_crypt() - Speck-XTS encryption/decryption - * - * Encrypt or decrypt NBYTES bytes of data from the SRC buffer to the DST buffer - * using Speck-XTS, specifically the variant with a block size of '2n' and round - * count given by NROUNDS. The expanded round keys are given in ROUND_KEYS, and - * the current XTS tweak value is given in TWEAK. It's assumed that NBYTES is a - * nonzero multiple of 128. - */ -.macro _speck_xts_crypt n, decrypting - push {r4-r7} - mov r7, sp - - /* - * The first four parameters were passed in registers r0-r3. Load the - * additional parameters, which were passed on the stack. - */ - ldr NBYTES, [sp, #16] - ldr TWEAK, [sp, #20] - - /* - * If decrypting, modify the ROUND_KEYS parameter to point to the last - * round key rather than the first, since for decryption the round keys - * are used in reverse order. - */ -.if \decrypting -.if \n == 64 - add ROUND_KEYS, ROUND_KEYS, NROUNDS, lsl #3 - sub ROUND_KEYS, #8 -.else - add ROUND_KEYS, ROUND_KEYS, NROUNDS, lsl #2 - sub ROUND_KEYS, #4 -.endif -.endif - - // Load the index vector for vtbl-based 8-bit rotates -.if \decrypting - ldr r12, =.Lrol\n\()_8_table -.else - ldr r12, =.Lror\n\()_8_table -.endif - vld1.8 {ROTATE_TABLE}, [r12:64] - - // One-time XTS preparation - - /* - * Allocate stack space to store 128 bytes worth of tweaks. For - * performance, this space is aligned to a 16-byte boundary so that we - * can use the load/store instructions that declare 16-byte alignment. - * For Thumb2 compatibility, don't do the 'bic' directly on 'sp'. - */ - sub r12, sp, #128 - bic r12, #0xf - mov sp, r12 - -.if \n == 64 - // Load first tweak - vld1.8 {TWEAKV}, [TWEAK] - - // Load GF(2^128) multiplication table - ldr r12, =.Lgf128mul_table - vld1.8 {GF128MUL_TABLE}, [r12:64] -.else - // Load first tweak - vld1.8 {TWEAKV_L}, [TWEAK] - - // Load GF(2^64) multiplication table - ldr r12, =.Lgf64mul_table - vld1.8 {GF64MUL_TABLE}, [r12:64] - - // Calculate second tweak, packing it together with the first - vshr.u64 TMP0_L, TWEAKV_L, #63 - vtbl.u8 TMP0_L, {GF64MUL_TABLE}, TMP0_L - vshl.u64 TWEAKV_H, TWEAKV_L, #1 - veor TWEAKV_H, TMP0_L -.endif - -.Lnext_128bytes_\@: - - /* - * Load the source blocks into {X,Y}[0-3], XOR them with their XTS tweak - * values, and save the tweaks on the stack for later. Then - * de-interleave the 'x' and 'y' elements of each block, i.e. make it so - * that the X[0-3] registers contain only the second halves of blocks, - * and the Y[0-3] registers contain only the first halves of blocks. - * (Speck uses the order (y, x) rather than the more intuitive (x, y).) - */ - mov r12, sp -.if \n == 64 - _xts128_precrypt_one X0, r12, TMP0 - _xts128_precrypt_one Y0, r12, TMP0 - _xts128_precrypt_one X1, r12, TMP0 - _xts128_precrypt_one Y1, r12, TMP0 - _xts128_precrypt_one X2, r12, TMP0 - _xts128_precrypt_one Y2, r12, TMP0 - _xts128_precrypt_one X3, r12, TMP0 - _xts128_precrypt_one Y3, r12, TMP0 - vswp X0_L, Y0_H - vswp X1_L, Y1_H - vswp X2_L, Y2_H - vswp X3_L, Y3_H -.else - _xts64_precrypt_two X0, r12, TMP0 - _xts64_precrypt_two Y0, r12, TMP0 - _xts64_precrypt_two X1, r12, TMP0 - _xts64_precrypt_two Y1, r12, TMP0 - _xts64_precrypt_two X2, r12, TMP0 - _xts64_precrypt_two Y2, r12, TMP0 - _xts64_precrypt_two X3, r12, TMP0 - _xts64_precrypt_two Y3, r12, TMP0 - vuzp.32 Y0, X0 - vuzp.32 Y1, X1 - vuzp.32 Y2, X2 - vuzp.32 Y3, X3 -.endif - - // Do the cipher rounds - - mov r12, ROUND_KEYS - mov r6, NROUNDS - -.Lnext_round_\@: -.if \decrypting -.if \n == 64 - vld1.64 ROUND_KEY_L, [r12] - sub r12, #8 - vmov ROUND_KEY_H, ROUND_KEY_L -.else - vld1.32 {ROUND_KEY_L[],ROUND_KEY_H[]}, [r12] - sub r12, #4 -.endif - _speck_unround_128bytes \n -.else -.if \n == 64 - vld1.64 ROUND_KEY_L, [r12]! - vmov ROUND_KEY_H, ROUND_KEY_L -.else - vld1.32 {ROUND_KEY_L[],ROUND_KEY_H[]}, [r12]! -.endif - _speck_round_128bytes \n -.endif - subs r6, r6, #1 - bne .Lnext_round_\@ - - // Re-interleave the 'x' and 'y' elements of each block -.if \n == 64 - vswp X0_L, Y0_H - vswp X1_L, Y1_H - vswp X2_L, Y2_H - vswp X3_L, Y3_H -.else - vzip.32 Y0, X0 - vzip.32 Y1, X1 - vzip.32 Y2, X2 - vzip.32 Y3, X3 -.endif - - // XOR the encrypted/decrypted blocks with the tweaks we saved earlier - mov r12, sp - vld1.8 {TMP0, TMP1}, [r12:128]! - vld1.8 {TMP2, TMP3}, [r12:128]! - veor X0, TMP0 - veor Y0, TMP1 - veor X1, TMP2 - veor Y1, TMP3 - vld1.8 {TMP0, TMP1}, [r12:128]! - vld1.8 {TMP2, TMP3}, [r12:128]! - veor X2, TMP0 - veor Y2, TMP1 - veor X3, TMP2 - veor Y3, TMP3 - - // Store the ciphertext in the destination buffer - vst1.8 {X0, Y0}, [DST]! - vst1.8 {X1, Y1}, [DST]! - vst1.8 {X2, Y2}, [DST]! - vst1.8 {X3, Y3}, [DST]! - - // Continue if there are more 128-byte chunks remaining, else return - subs NBYTES, #128 - bne .Lnext_128bytes_\@ - - // Store the next tweak -.if \n == 64 - vst1.8 {TWEAKV}, [TWEAK] -.else - vst1.8 {TWEAKV_L}, [TWEAK] -.endif - - mov sp, r7 - pop {r4-r7} - bx lr -.endm - -ENTRY(speck128_xts_encrypt_neon) - _speck_xts_crypt n=64, decrypting=0 -ENDPROC(speck128_xts_encrypt_neon) - -ENTRY(speck128_xts_decrypt_neon) - _speck_xts_crypt n=64, decrypting=1 -ENDPROC(speck128_xts_decrypt_neon) - -ENTRY(speck64_xts_encrypt_neon) - _speck_xts_crypt n=32, decrypting=0 -ENDPROC(speck64_xts_encrypt_neon) - -ENTRY(speck64_xts_decrypt_neon) - _speck_xts_crypt n=32, decrypting=1 -ENDPROC(speck64_xts_decrypt_neon) diff --git a/arch/arm/crypto/speck-neon-glue.c b/arch/arm/crypto/speck-neon-glue.c deleted file mode 100644 index f012c3ea998fb4f741f6063842cdc9b1c8c154c0..0000000000000000000000000000000000000000 --- a/arch/arm/crypto/speck-neon-glue.c +++ /dev/null @@ -1,288 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS - * - * Copyright (c) 2018 Google, Inc - * - * Note: the NIST recommendation for XTS only specifies a 128-bit block size, - * but a 64-bit version (needed for Speck64) is fairly straightforward; the math - * is just done in GF(2^64) instead of GF(2^128), with the reducing polynomial - * x^64 + x^4 + x^3 + x + 1 from the original XEX paper (Rogaway, 2004: - * "Efficient Instantiations of Tweakable Blockciphers and Refinements to Modes - * OCB and PMAC"), represented as 0x1B. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* The assembly functions only handle multiples of 128 bytes */ -#define SPECK_NEON_CHUNK_SIZE 128 - -/* Speck128 */ - -struct speck128_xts_tfm_ctx { - struct speck128_tfm_ctx main_key; - struct speck128_tfm_ctx tweak_key; -}; - -asmlinkage void speck128_xts_encrypt_neon(const u64 *round_keys, int nrounds, - void *dst, const void *src, - unsigned int nbytes, void *tweak); - -asmlinkage void speck128_xts_decrypt_neon(const u64 *round_keys, int nrounds, - void *dst, const void *src, - unsigned int nbytes, void *tweak); - -typedef void (*speck128_crypt_one_t)(const struct speck128_tfm_ctx *, - u8 *, const u8 *); -typedef void (*speck128_xts_crypt_many_t)(const u64 *, int, void *, - const void *, unsigned int, void *); - -static __always_inline int -__speck128_xts_crypt(struct skcipher_request *req, - speck128_crypt_one_t crypt_one, - speck128_xts_crypt_many_t crypt_many) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - const struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - le128 tweak; - int err; - - err = skcipher_walk_virt(&walk, req, true); - - crypto_speck128_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv); - - while (walk.nbytes > 0) { - unsigned int nbytes = walk.nbytes; - u8 *dst = walk.dst.virt.addr; - const u8 *src = walk.src.virt.addr; - - if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) { - unsigned int count; - - count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE); - kernel_neon_begin(); - (*crypt_many)(ctx->main_key.round_keys, - ctx->main_key.nrounds, - dst, src, count, &tweak); - kernel_neon_end(); - dst += count; - src += count; - nbytes -= count; - } - - /* Handle any remainder with generic code */ - while (nbytes >= sizeof(tweak)) { - le128_xor((le128 *)dst, (const le128 *)src, &tweak); - (*crypt_one)(&ctx->main_key, dst, dst); - le128_xor((le128 *)dst, (const le128 *)dst, &tweak); - gf128mul_x_ble(&tweak, &tweak); - - dst += sizeof(tweak); - src += sizeof(tweak); - nbytes -= sizeof(tweak); - } - err = skcipher_walk_done(&walk, nbytes); - } - - return err; -} - -static int speck128_xts_encrypt(struct skcipher_request *req) -{ - return __speck128_xts_crypt(req, crypto_speck128_encrypt, - speck128_xts_encrypt_neon); -} - -static int speck128_xts_decrypt(struct skcipher_request *req) -{ - return __speck128_xts_crypt(req, crypto_speck128_decrypt, - speck128_xts_decrypt_neon); -} - -static int speck128_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keylen) -{ - struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); - int err; - - err = xts_verify_key(tfm, key, keylen); - if (err) - return err; - - keylen /= 2; - - err = crypto_speck128_setkey(&ctx->main_key, key, keylen); - if (err) - return err; - - return crypto_speck128_setkey(&ctx->tweak_key, key + keylen, keylen); -} - -/* Speck64 */ - -struct speck64_xts_tfm_ctx { - struct speck64_tfm_ctx main_key; - struct speck64_tfm_ctx tweak_key; -}; - -asmlinkage void speck64_xts_encrypt_neon(const u32 *round_keys, int nrounds, - void *dst, const void *src, - unsigned int nbytes, void *tweak); - -asmlinkage void speck64_xts_decrypt_neon(const u32 *round_keys, int nrounds, - void *dst, const void *src, - unsigned int nbytes, void *tweak); - -typedef void (*speck64_crypt_one_t)(const struct speck64_tfm_ctx *, - u8 *, const u8 *); -typedef void (*speck64_xts_crypt_many_t)(const u32 *, int, void *, - const void *, unsigned int, void *); - -static __always_inline int -__speck64_xts_crypt(struct skcipher_request *req, speck64_crypt_one_t crypt_one, - speck64_xts_crypt_many_t crypt_many) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - const struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - __le64 tweak; - int err; - - err = skcipher_walk_virt(&walk, req, true); - - crypto_speck64_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv); - - while (walk.nbytes > 0) { - unsigned int nbytes = walk.nbytes; - u8 *dst = walk.dst.virt.addr; - const u8 *src = walk.src.virt.addr; - - if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) { - unsigned int count; - - count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE); - kernel_neon_begin(); - (*crypt_many)(ctx->main_key.round_keys, - ctx->main_key.nrounds, - dst, src, count, &tweak); - kernel_neon_end(); - dst += count; - src += count; - nbytes -= count; - } - - /* Handle any remainder with generic code */ - while (nbytes >= sizeof(tweak)) { - *(__le64 *)dst = *(__le64 *)src ^ tweak; - (*crypt_one)(&ctx->main_key, dst, dst); - *(__le64 *)dst ^= tweak; - tweak = cpu_to_le64((le64_to_cpu(tweak) << 1) ^ - ((tweak & cpu_to_le64(1ULL << 63)) ? - 0x1B : 0)); - dst += sizeof(tweak); - src += sizeof(tweak); - nbytes -= sizeof(tweak); - } - err = skcipher_walk_done(&walk, nbytes); - } - - return err; -} - -static int speck64_xts_encrypt(struct skcipher_request *req) -{ - return __speck64_xts_crypt(req, crypto_speck64_encrypt, - speck64_xts_encrypt_neon); -} - -static int speck64_xts_decrypt(struct skcipher_request *req) -{ - return __speck64_xts_crypt(req, crypto_speck64_decrypt, - speck64_xts_decrypt_neon); -} - -static int speck64_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keylen) -{ - struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); - int err; - - err = xts_verify_key(tfm, key, keylen); - if (err) - return err; - - keylen /= 2; - - err = crypto_speck64_setkey(&ctx->main_key, key, keylen); - if (err) - return err; - - return crypto_speck64_setkey(&ctx->tweak_key, key + keylen, keylen); -} - -static struct skcipher_alg speck_algs[] = { - { - .base.cra_name = "xts(speck128)", - .base.cra_driver_name = "xts-speck128-neon", - .base.cra_priority = 300, - .base.cra_blocksize = SPECK128_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct speck128_xts_tfm_ctx), - .base.cra_alignmask = 7, - .base.cra_module = THIS_MODULE, - .min_keysize = 2 * SPECK128_128_KEY_SIZE, - .max_keysize = 2 * SPECK128_256_KEY_SIZE, - .ivsize = SPECK128_BLOCK_SIZE, - .walksize = SPECK_NEON_CHUNK_SIZE, - .setkey = speck128_xts_setkey, - .encrypt = speck128_xts_encrypt, - .decrypt = speck128_xts_decrypt, - }, { - .base.cra_name = "xts(speck64)", - .base.cra_driver_name = "xts-speck64-neon", - .base.cra_priority = 300, - .base.cra_blocksize = SPECK64_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct speck64_xts_tfm_ctx), - .base.cra_alignmask = 7, - .base.cra_module = THIS_MODULE, - .min_keysize = 2 * SPECK64_96_KEY_SIZE, - .max_keysize = 2 * SPECK64_128_KEY_SIZE, - .ivsize = SPECK64_BLOCK_SIZE, - .walksize = SPECK_NEON_CHUNK_SIZE, - .setkey = speck64_xts_setkey, - .encrypt = speck64_xts_encrypt, - .decrypt = speck64_xts_decrypt, - } -}; - -static int __init speck_neon_module_init(void) -{ - if (!(elf_hwcap & HWCAP_NEON)) - return -ENODEV; - return crypto_register_skciphers(speck_algs, ARRAY_SIZE(speck_algs)); -} - -static void __exit speck_neon_module_exit(void) -{ - crypto_unregister_skciphers(speck_algs, ARRAY_SIZE(speck_algs)); -} - -module_init(speck_neon_module_init); -module_exit(speck_neon_module_exit); - -MODULE_DESCRIPTION("Speck block cipher (NEON-accelerated)"); -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Eric Biggers "); -MODULE_ALIAS_CRYPTO("xts(speck128)"); -MODULE_ALIAS_CRYPTO("xts-speck128-neon"); -MODULE_ALIAS_CRYPTO("xts(speck64)"); -MODULE_ALIAS_CRYPTO("xts-speck64-neon"); diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h index 0bd530702118f3ce1fa2462afeaacec2202dc215..f6f485f4744e034679f5b4483daa74d98a3907d3 100644 --- a/arch/arm/include/asm/arch_gicv3.h +++ b/arch/arm/include/asm/arch_gicv3.h @@ -34,6 +34,7 @@ #define ICC_SRE __ACCESS_CP15(c12, 0, c12, 5) #define ICC_IGRPEN1 __ACCESS_CP15(c12, 0, c12, 7) #define ICC_BPR1 __ACCESS_CP15(c12, 0, c12, 3) +#define ICC_RPR __ACCESS_CP15(c12, 0, c11, 3) #define __ICC_AP0Rx(x) __ACCESS_CP15(c12, 0, c8, 4 | x) #define ICC_AP0R0 __ICC_AP0Rx(0) @@ -245,6 +246,21 @@ static inline void gic_write_bpr1(u32 val) write_sysreg(val, ICC_BPR1); } +static inline u32 gic_read_pmr(void) +{ + return read_sysreg(ICC_PMR); +} + +static inline void gic_write_pmr(u32 val) +{ + write_sysreg(val, ICC_PMR); +} + +static inline u32 gic_read_rpr(void) +{ + return read_sysreg(ICC_RPR); +} + /* * Even in 32bit systems that use LPAE, there is no guarantee that the I/O * interface provides true 64bit atomic accesses, so using strd/ldrd doesn't @@ -347,5 +363,22 @@ static inline void gits_write_vpendbaser(u64 val, void * __iomem addr) #define gits_read_vpendbaser(c) __gic_readq_nonatomic(c) +static inline bool gic_prio_masking_enabled(void) +{ + return false; +} + +static inline void gic_pmr_mask_irqs(void) +{ + /* Should not get called. */ + WARN_ON_ONCE(true); +} + +static inline void gic_arch_enable_irqs(void) +{ + /* Should not get called. */ + WARN_ON_ONCE(true); +} + #endif /* !__ASSEMBLY__ */ #endif /* !__ASM_ARCH_GICV3_H */ diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index b17ee03d280b6ff9ff706f04ab4d3be57f20bfe8..f903e1040b36b769b4bb7c476185c6ed8c43619f 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -110,6 +110,16 @@ .endm #endif +#if __LINUX_ARM_ARCH__ < 7 + .macro dsb, args + mcr p15, 0, r0, c7, c10, 4 + .endm + + .macro isb, args + mcr p15, 0, r0, c7, c5, 4 + .endm +#endif + .macro asm_trace_hardirqs_off, save=1 #if defined(CONFIG_TRACE_IRQFLAGS) .if \save @@ -467,6 +477,17 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) #endif .endm + .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req +#ifdef CONFIG_CPU_SPECTRE + sub \tmp, \limit, #1 + subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr + addhs \tmp, \tmp, #1 @ if (tmp >= 0) { + subhss \tmp, \tmp, \size @ tmp = limit - (addr + size) } + movlo \addr, #0 @ if (tmp < 0) addr = NULL + csdb +#endif + .endm + .macro uaccess_disable, tmp, isb=1 #ifdef CONFIG_CPU_SW_DOMAIN_PAN /* diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h index 69772e742a0acdc16dbf76f2130a8025af41b6c0..83ae97c049d9bd48b474f0127164c71628bf05c0 100644 --- a/arch/arm/include/asm/barrier.h +++ b/arch/arm/include/asm/barrier.h @@ -11,6 +11,8 @@ #define sev() __asm__ __volatile__ ("sev" : : : "memory") #define wfe() __asm__ __volatile__ ("wfe" : : : "memory") #define wfi() __asm__ __volatile__ ("wfi" : : : "memory") +#else +#define wfe() do { } while (0) #endif #if __LINUX_ARM_ARCH__ >= 7 diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h index 07e27f212dc754b42b0361536e33ed8bed144f5a..d2453e2d3f1f3804db034c945f268ae614ff3c3f 100644 --- a/arch/arm/include/asm/cp15.h +++ b/arch/arm/include/asm/cp15.h @@ -68,6 +68,8 @@ #define BPIALL __ACCESS_CP15(c7, 0, c5, 6) #define ICIALLU __ACCESS_CP15(c7, 0, c5, 0) +#define CNTVCT __ACCESS_CP15_64(1, c14) + extern unsigned long cr_alignment; /* defined in entry-armv.S */ static inline unsigned long get_cr(void) diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index 0d289240b6ca110ab961a280ddd20fc1c567f2a4..775cac3c02bb0a31facb970e16feef83f86c6632 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -111,6 +111,7 @@ #include extern unsigned int processor_id; +struct proc_info_list *lookup_processor(u32 midr); #ifdef CONFIG_CPU_CP15 #define read_cpuid(reg) \ diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h index ffebe7b7a5b743682c071fc14a7dd30f75ebfac9..0a46676b4245b3d15292eac1c03f5915033d8cae 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h @@ -50,7 +50,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, int ret; u32 val; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + if (!access_ok(uaddr, sizeof(u32))) return -EFAULT; smp_mb(); @@ -104,7 +104,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, int ret = 0; u32 val; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + if (!access_ok(uaddr, sizeof(u32))) return -EFAULT; preempt_disable(); diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h index cba23eaa607215aaaab9342c6a661b06ebd07db8..7a88f160b1fbe9b84a6f39c04a311b02d996b447 100644 --- a/arch/arm/include/asm/hardirq.h +++ b/arch/arm/include/asm/hardirq.h @@ -6,6 +6,7 @@ #include #include +/* number of IPIS _not_ including IPI_CPU_BACKTRACE */ #define NR_IPI 7 typedef struct { diff --git a/arch/arm/include/asm/hisi_cpu_model.h b/arch/arm/include/asm/hisi_cpu_model.h new file mode 100644 index 0000000000000000000000000000000000000000..54cc3c236df0e4b1c813a565268b5825a618a404 --- /dev/null +++ b/arch/arm/include/asm/hisi_cpu_model.h @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright(c) 2019 Huawei Technologies Co., Ltd + */ + +#ifndef __HISI_CPU_MODEL_H__ +#define __HISI_CPU_MODEL_H__ + +enum hisi_cpu_type { + UNKNOWN_HI_TYPE +}; + +extern enum hisi_cpu_type hi_cpu_type; +extern bool kvm_ncsnp_support; + +void probe_hisi_cpu_type(void); +void probe_hisi_ncsnp_support(void); +#endif /* __HISI_CPU_MODEL_H__ */ diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h index c883fcbe93b67ef68bfc18a6e48d4ec53c37cdd0..46d41140df27dd9c4f15c713189db2199c1352a2 100644 --- a/arch/arm/include/asm/irq.h +++ b/arch/arm/include/asm/irq.h @@ -25,7 +25,6 @@ #ifndef __ASSEMBLY__ struct irqaction; struct pt_regs; -extern void migrate_irqs(void); extern void asm_do_IRQ(unsigned int, struct pt_regs *); void handle_IRQ(unsigned int, struct pt_regs *); diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h index 3ab8b3781bfeca7264989b813209a99d115d35f2..289ed7bef01ffe56c7d2d31ee45defe7e0c5cc83 100644 --- a/arch/arm/include/asm/kvm_arm.h +++ b/arch/arm/include/asm/kvm_arm.h @@ -133,8 +133,7 @@ * space. */ #define KVM_PHYS_SHIFT (40) -#define KVM_PHYS_SIZE (_AC(1, ULL) << KVM_PHYS_SHIFT) -#define KVM_PHYS_MASK (KVM_PHYS_SIZE - _AC(1, ULL)) + #define PTRS_PER_S2_PGD (_AC(1, ULL) << (KVM_PHYS_SHIFT - 30)) /* Virtualization Translation Control Register (VTCR) bits */ @@ -186,6 +185,7 @@ #define FSC_FAULT (0x04) #define FSC_ACCESS (0x08) #define FSC_PERM (0x0c) +#define FSC_IGNORE (-1) #define FSC_SEA (0x10) #define FSC_SEA_TTW0 (0x14) #define FSC_SEA_TTW1 (0x15) diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index 231e87ad45d5660f97800e90b39951d8f7c88180..51c9f9836befa8ea446b1faf25630dd4a72bfcb7 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -23,6 +23,10 @@ #define ARM_EXIT_WITH_ABORT_BIT 31 #define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_ABORT_BIT)) +#define ARM_EXCEPTION_IS_TRAP(x) \ + (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_PREF_ABORT || \ + ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_DATA_ABORT || \ + ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_HVC) #define ARM_ABORT_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_ABORT_BIT)) #define ARM_EXCEPTION_RESET 0 @@ -62,9 +66,9 @@ extern char __kvm_hyp_init[]; extern char __kvm_hyp_init_end[]; extern void __kvm_flush_vm_context(void); +extern void __kvm_flush_cpu_context(struct kvm_vcpu *vcpu); extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); extern void __kvm_tlb_flush_vmid(struct kvm *kvm); -extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu); extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high); diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 3ad482d2f1eb91c8bfe6b597788e1e70b9521234..46a2e8636f868cabb677c07b53050454d0c1a083 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -19,6 +19,7 @@ #ifndef __ARM_KVM_HOST_H__ #define __ARM_KVM_HOST_H__ +#include #include #include #include @@ -26,7 +27,9 @@ #include #include #include +#include #include +#include #define __KVM_HAVE_ARCH_INTC_INITIALIZED @@ -48,6 +51,7 @@ #define KVM_REQ_SLEEP \ KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) +#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); @@ -145,7 +149,24 @@ struct kvm_cpu_context { u32 cp15[NR_CP15_REGS]; }; -typedef struct kvm_cpu_context kvm_cpu_context_t; +struct kvm_host_data { + struct kvm_cpu_context host_ctxt; +}; + +typedef struct kvm_host_data kvm_host_data_t; + +static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt) +{ + /* The host's MPIDR is immutable, so let's set it up at boot time */ + cpu_ctxt->cp15[c0_MPIDR] = read_cpuid_mpidr(); +} + +struct vcpu_reset_state { + unsigned long pc; + unsigned long r0; + bool be; + bool reset; +}; struct kvm_vcpu_arch { struct kvm_cpu_context ctxt; @@ -163,7 +184,7 @@ struct kvm_vcpu_arch { struct kvm_vcpu_fault_info fault; /* Host FP context */ - kvm_cpu_context_t *host_cpu_context; + struct kvm_cpu_context *host_cpu_context; /* VGIC state */ struct vgic_cpu vgic_cpu; @@ -186,6 +207,8 @@ struct kvm_vcpu_arch { /* Cache some mmu pages needed inside spinlock regions */ struct kvm_mmu_memory_cache mmu_page_cache; + struct vcpu_reset_state reset_state; + /* Detect first run of a vcpu */ bool has_run_once; }; @@ -281,6 +304,25 @@ static inline int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext) int kvm_perf_init(void); int kvm_perf_teardown(void); +static inline void kvm_arm_pvsched_vcpu_init(struct kvm_vcpu_arch *vcpu_arch) +{ +} + +static inline bool kvm_arm_is_pvsched_enabled(struct kvm_vcpu_arch *vcpu_arch) +{ + return false; +} + +static inline void kvm_update_pvsched_preempted(struct kvm_vcpu *vcpu, + u32 preempted) +{ +} + +static inline int kvm_hypercall_pvsched_features(struct kvm_vcpu *vcpu) +{ + return SMCCC_RET_NOT_SUPPORTED; +} + void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot); struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); @@ -317,6 +359,9 @@ static inline void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) {} +static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {} +static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {} + static inline void kvm_arm_vhe_guest_enter(void) {} static inline void kvm_arm_vhe_guest_exit(void) {} @@ -354,4 +399,17 @@ static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {} struct kvm *kvm_arch_alloc_vm(void); void kvm_arch_free_vm(struct kvm *kvm); +static inline int kvm_arm_config_vm(struct kvm *kvm, unsigned long type) +{ + if (type) + return -EINVAL; + return 0; +} + +static inline int kvm_arm_get_spectre_bhb_state(void) +{ + /* 32bit guests don't need firmware for this */ + return SPECTRE_VULNERABLE; /* aka SMCCC_RET_NOT_SUPPORTED */ +} + #endif /* __ARM_KVM_HOST_H__ */ diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 265ea9cf7df773bf7d49926d3eed34be25032463..b713db7903c2bcb37b044754256bad5dd4c49f28 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -35,16 +35,12 @@ addr; \ }) -/* - * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels. - */ -#define KVM_MMU_CACHE_MIN_PAGES 2 - #ifndef __ASSEMBLY__ #include #include #include +#include #include #include #include @@ -52,6 +48,13 @@ /* Ensure compatibility with arm64 */ #define VA_BITS 32 +#define kvm_phys_shift(kvm) KVM_PHYS_SHIFT +#define kvm_phys_size(kvm) (1ULL << kvm_phys_shift(kvm)) +#define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - 1ULL) +#define kvm_vttbr_baddr_mask(kvm) VTTBR_BADDR_MASK + +#define stage2_pgd_size(kvm) (PTRS_PER_S2_PGD * sizeof(pgd_t)) + int create_hyp_mappings(void *from, void *to, pgprot_t prot); int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size, void __iomem **kaddr, @@ -79,6 +82,67 @@ void kvm_clear_hyp_idmap(void); #define kvm_mk_pud(pmdp) __pud(__pa(pmdp) | PMD_TYPE_TABLE) #define kvm_mk_pgd(pudp) ({ BUILD_BUG(); 0; }) +#define kvm_pfn_pte(pfn, prot) pfn_pte(pfn, prot) +#define kvm_pfn_pmd(pfn, prot) pfn_pmd(pfn, prot) +#define kvm_pfn_pud(pfn, prot) (__pud(0)) + +#define kvm_pud_pfn(pud) ({ WARN_ON(1); 0; }) + + +#define kvm_pmd_mkhuge(pmd) pmd_mkhuge(pmd) +/* No support for pud hugepages */ +#define kvm_pud_mkhuge(pud) ( {WARN_ON(1); pud; }) + +/* + * The following kvm_*pud*() functions are provided strictly to allow + * sharing code with arm64. They should never be called in practice. + */ +static inline void kvm_set_s2pud_readonly(pud_t *pud) +{ + WARN_ON(1); +} + +static inline bool kvm_s2pud_readonly(pud_t *pud) +{ + WARN_ON(1); + return false; +} + +static inline void kvm_set_pud(pud_t *pud, pud_t new_pud) +{ + WARN_ON(1); +} + +static inline pud_t kvm_s2pud_mkwrite(pud_t pud) +{ + WARN_ON(1); + return pud; +} + +static inline pud_t kvm_s2pud_mkexec(pud_t pud) +{ + WARN_ON(1); + return pud; +} + +static inline bool kvm_s2pud_exec(pud_t *pud) +{ + WARN_ON(1); + return false; +} + +static inline pud_t kvm_s2pud_mkyoung(pud_t pud) +{ + BUG(); + return pud; +} + +static inline bool kvm_s2pud_young(pud_t pud) +{ + WARN_ON(1); + return false; +} + static inline pte_t kvm_s2pte_mkwrite(pte_t pte) { pte_val(pte) |= L_PTE_S2_RDWR; @@ -317,6 +381,17 @@ static inline int kvm_read_guest_lock(struct kvm *kvm, return ret; } +static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa, + const void *data, unsigned long len) +{ + int srcu_idx = srcu_read_lock(&kvm->srcu); + int ret = kvm_write_guest(kvm, gpa, data, len); + + srcu_read_unlock(&kvm->srcu, srcu_idx); + + return ret; +} + static inline void *kvm_get_hyp_vector(void) { switch(read_cpuid_part()) { @@ -355,6 +430,8 @@ static inline int hyp_map_aux_data(void) #define kvm_phys_to_vttbr(addr) (addr) +static inline void kvm_set_ipa_limit(void) {} + #endif /* !__ASSEMBLY__ */ #endif /* __ARM_KVM_MMU_H__ */ diff --git a/arch/arm/include/asm/mcs_spinlock.h b/arch/arm/include/asm/mcs_spinlock.h index 529d2cf4d06f4adf88170ca6c84f3e39d3305188..ae6d763477f4f115045b512784bb81c43387d9bc 100644 --- a/arch/arm/include/asm/mcs_spinlock.h +++ b/arch/arm/include/asm/mcs_spinlock.h @@ -14,9 +14,9 @@ do { \ wfe(); \ } while (0) \ -#define arch_mcs_spin_unlock_contended(lock) \ +#define arch_mcs_spin_unlock_contended(lock, val) \ do { \ - smp_store_release(lock, 1); \ + smp_store_release(lock, (val)); \ dsb_sev(); \ } while (0) diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h index a89b4076cde4769db52d0ec3fbc83aed9346361e..72821b4721addd78e917b607e6b52b7377909816 100644 --- a/arch/arm/include/asm/percpu.h +++ b/arch/arm/include/asm/percpu.h @@ -16,6 +16,8 @@ #ifndef _ASM_ARM_PERCPU_H_ #define _ASM_ARM_PERCPU_H_ +#include + /* * Same as asm-generic/percpu.h, except that we store the per cpu offset * in the TPIDRPRW. TPIDRPRW only exists on V6K and V7 diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h index 92fd2c8a9af0638834d6c2b5814b9a88911f33fe..3d3103ff6e14dffe93231a9398480bc1697b1818 100644 --- a/arch/arm/include/asm/pgtable-2level.h +++ b/arch/arm/include/asm/pgtable-2level.h @@ -10,7 +10,7 @@ #ifndef _ASM_PGTABLE_2LEVEL_H #define _ASM_PGTABLE_2LEVEL_H -#define __PAGETABLE_PMD_FOLDED +#define __PAGETABLE_PMD_FOLDED 1 /* * Hardware-wise, we have a two level page table structure, where the first @@ -191,6 +191,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) return (pmd_t *)pud; } +#define pud_page(pud) NULL #define pmd_large(pmd) (pmd_val(pmd) & 2) #define pmd_bad(pmd) (pmd_val(pmd) & 2) #define pmd_present(pmd) (pmd_val(pmd)) diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h index e25f4392e1b2868446de858701d408aaaee26eab..e1b6f280ab088fb0b8ac59b6ceb3543606c97e01 100644 --- a/arch/arm/include/asm/proc-fns.h +++ b/arch/arm/include/asm/proc-fns.h @@ -23,7 +23,7 @@ struct mm_struct; /* * Don't change this structure - ASM code relies on it. */ -extern struct processor { +struct processor { /* MISC * get data abort address/flags */ @@ -79,9 +79,13 @@ extern struct processor { unsigned int suspend_size; void (*do_suspend)(void *); void (*do_resume)(void *); -} processor; +}; #ifndef MULTI_CPU +static inline void init_proc_vtable(const struct processor *p) +{ +} + extern void cpu_proc_init(void); extern void cpu_proc_fin(void); extern int cpu_do_idle(void); @@ -98,17 +102,50 @@ extern void cpu_reset(unsigned long addr, bool hvc) __attribute__((noreturn)); extern void cpu_do_suspend(void *); extern void cpu_do_resume(void *); #else -#define cpu_proc_init processor._proc_init -#define cpu_proc_fin processor._proc_fin -#define cpu_reset processor.reset -#define cpu_do_idle processor._do_idle -#define cpu_dcache_clean_area processor.dcache_clean_area -#define cpu_set_pte_ext processor.set_pte_ext -#define cpu_do_switch_mm processor.switch_mm -/* These three are private to arch/arm/kernel/suspend.c */ -#define cpu_do_suspend processor.do_suspend -#define cpu_do_resume processor.do_resume +extern struct processor processor; +#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) +#include +/* + * This can't be a per-cpu variable because we need to access it before + * per-cpu has been initialised. We have a couple of functions that are + * called in a pre-emptible context, and so can't use smp_processor_id() + * there, hence PROC_TABLE(). We insist in init_proc_vtable() that the + * function pointers for these are identical across all CPUs. + */ +extern struct processor *cpu_vtable[]; +#define PROC_VTABLE(f) cpu_vtable[smp_processor_id()]->f +#define PROC_TABLE(f) cpu_vtable[0]->f +static inline void init_proc_vtable(const struct processor *p) +{ + unsigned int cpu = smp_processor_id(); + *cpu_vtable[cpu] = *p; + WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area != + cpu_vtable[0]->dcache_clean_area); + WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext != + cpu_vtable[0]->set_pte_ext); +} +#else +#define PROC_VTABLE(f) processor.f +#define PROC_TABLE(f) processor.f +static inline void init_proc_vtable(const struct processor *p) +{ + processor = *p; +} +#endif + +#define cpu_proc_init PROC_VTABLE(_proc_init) +#define cpu_check_bugs PROC_VTABLE(check_bugs) +#define cpu_proc_fin PROC_VTABLE(_proc_fin) +#define cpu_reset PROC_VTABLE(reset) +#define cpu_do_idle PROC_VTABLE(_do_idle) +#define cpu_dcache_clean_area PROC_TABLE(dcache_clean_area) +#define cpu_set_pte_ext PROC_TABLE(set_pte_ext) +#define cpu_do_switch_mm PROC_VTABLE(switch_mm) + +/* These two are private to arch/arm/kernel/suspend.c */ +#define cpu_do_suspend PROC_VTABLE(do_suspend) +#define cpu_do_resume PROC_VTABLE(do_resume) #endif extern void cpu_resume(void); diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h index 1bf65b47808a1b99cc574d4340c3a41491fd76a6..cb2a3423b7148cfaa5f0cedc8b8e8611c9e9beb7 100644 --- a/arch/arm/include/asm/processor.h +++ b/arch/arm/include/asm/processor.h @@ -95,7 +95,11 @@ extern void release_thread(struct task_struct *); unsigned long get_wchan(struct task_struct *p); #if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327) -#define cpu_relax() smp_mb() +#define cpu_relax() \ + do { \ + smp_mb(); \ + __asm__ __volatile__("nop; nop; nop; nop; nop; nop; nop; nop; nop; nop;"); \ + } while (0) #else #define cpu_relax() barrier() #endif diff --git a/arch/arm/include/asm/spectre.h b/arch/arm/include/asm/spectre.h new file mode 100644 index 0000000000000000000000000000000000000000..85f9e538fb325730613f78419f52826a68b8d76c --- /dev/null +++ b/arch/arm/include/asm/spectre.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_SPECTRE_H +#define __ASM_SPECTRE_H + +enum { + SPECTRE_UNAFFECTED, + SPECTRE_MITIGATED, + SPECTRE_VULNERABLE, +}; + +enum { + __SPECTRE_V2_METHOD_BPIALL, + __SPECTRE_V2_METHOD_ICIALLU, + __SPECTRE_V2_METHOD_SMC, + __SPECTRE_V2_METHOD_HVC, + __SPECTRE_V2_METHOD_LOOP8, +}; + +enum { + SPECTRE_V2_METHOD_BPIALL = BIT(__SPECTRE_V2_METHOD_BPIALL), + SPECTRE_V2_METHOD_ICIALLU = BIT(__SPECTRE_V2_METHOD_ICIALLU), + SPECTRE_V2_METHOD_SMC = BIT(__SPECTRE_V2_METHOD_SMC), + SPECTRE_V2_METHOD_HVC = BIT(__SPECTRE_V2_METHOD_HVC), + SPECTRE_V2_METHOD_LOOP8 = BIT(__SPECTRE_V2_METHOD_LOOP8), +}; + +#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES +void spectre_v2_update_state(unsigned int state, unsigned int methods); +#else +static inline void spectre_v2_update_state(unsigned int state, + unsigned int methods) +{} +#endif + +int spectre_bhb_update_vectors(unsigned int method); + +#endif diff --git a/arch/arm/include/asm/stage2_pgtable.h b/arch/arm/include/asm/stage2_pgtable.h index 460d616bb2d6dab9859fb244b31eb325a2247c85..9e11dce55e06f4e7359b7b779cc7814ae752c813 100644 --- a/arch/arm/include/asm/stage2_pgtable.h +++ b/arch/arm/include/asm/stage2_pgtable.h @@ -19,43 +19,68 @@ #ifndef __ARM_S2_PGTABLE_H_ #define __ARM_S2_PGTABLE_H_ -#define stage2_pgd_none(pgd) pgd_none(pgd) -#define stage2_pgd_clear(pgd) pgd_clear(pgd) -#define stage2_pgd_present(pgd) pgd_present(pgd) -#define stage2_pgd_populate(pgd, pud) pgd_populate(NULL, pgd, pud) -#define stage2_pud_offset(pgd, address) pud_offset(pgd, address) -#define stage2_pud_free(pud) pud_free(NULL, pud) - -#define stage2_pud_none(pud) pud_none(pud) -#define stage2_pud_clear(pud) pud_clear(pud) -#define stage2_pud_present(pud) pud_present(pud) -#define stage2_pud_populate(pud, pmd) pud_populate(NULL, pud, pmd) -#define stage2_pmd_offset(pud, address) pmd_offset(pud, address) -#define stage2_pmd_free(pmd) pmd_free(NULL, pmd) - -#define stage2_pud_huge(pud) pud_huge(pud) +/* + * kvm_mmu_cache_min_pages() is the number of pages required + * to install a stage-2 translation. We pre-allocate the entry + * level table at VM creation. Since we have a 3 level page-table, + * we need only two pages to add a new mapping. + */ +#define kvm_mmu_cache_min_pages(kvm) 2 + +#define stage2_pgd_none(kvm, pgd) pgd_none(pgd) +#define stage2_pgd_clear(kvm, pgd) pgd_clear(pgd) +#define stage2_pgd_present(kvm, pgd) pgd_present(pgd) +#define stage2_pgd_populate(kvm, pgd, pud) pgd_populate(NULL, pgd, pud) +#define stage2_pud_offset(kvm, pgd, address) pud_offset(pgd, address) +#define stage2_pud_free(kvm, pud) pud_free(NULL, pud) + +#define stage2_pud_none(kvm, pud) pud_none(pud) +#define stage2_pud_clear(kvm, pud) pud_clear(pud) +#define stage2_pud_present(kvm, pud) pud_present(pud) +#define stage2_pud_populate(kvm, pud, pmd) pud_populate(NULL, pud, pmd) +#define stage2_pmd_offset(kvm, pud, address) pmd_offset(pud, address) +#define stage2_pmd_free(kvm, pmd) pmd_free(NULL, pmd) + +#define stage2_pud_huge(kvm, pud) pud_huge(pud) /* Open coded p*d_addr_end that can deal with 64bit addresses */ -static inline phys_addr_t stage2_pgd_addr_end(phys_addr_t addr, phys_addr_t end) +static inline phys_addr_t +stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) { phys_addr_t boundary = (addr + PGDIR_SIZE) & PGDIR_MASK; return (boundary - 1 < end - 1) ? boundary : end; } -#define stage2_pud_addr_end(addr, end) (end) +#define stage2_pud_addr_end(kvm, addr, end) (end) -static inline phys_addr_t stage2_pmd_addr_end(phys_addr_t addr, phys_addr_t end) +static inline phys_addr_t +stage2_pmd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) { phys_addr_t boundary = (addr + PMD_SIZE) & PMD_MASK; return (boundary - 1 < end - 1) ? boundary : end; } -#define stage2_pgd_index(addr) pgd_index(addr) +#define stage2_pgd_index(kvm, addr) pgd_index(addr) + +#define stage2_pte_table_empty(kvm, ptep) kvm_page_empty(ptep) +#define stage2_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp) +#define stage2_pud_table_empty(kvm, pudp) false + +static inline bool kvm_stage2_has_pud(struct kvm *kvm) +{ + return false; +} + +#define S2_PMD_MASK PMD_MASK +#define S2_PMD_SIZE PMD_SIZE +#define S2_PUD_MASK PUD_MASK +#define S2_PUD_SIZE PUD_SIZE -#define stage2_pte_table_empty(ptep) kvm_page_empty(ptep) -#define stage2_pmd_table_empty(pmdp) kvm_page_empty(pmdp) -#define stage2_pud_table_empty(pudp) false +static inline bool kvm_stage2_has_pmd(struct kvm *kvm) +{ + return true; +} #endif /* __ARM_S2_PGTABLE_H_ */ diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 9b37b6ab27fe052eb8225c811688700c6ae99bb3..8f55dc520a3e5512cd1474f4a5c3c2d7e72698e8 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -121,8 +121,8 @@ extern void vfp_flush_hwstate(struct thread_info *); struct user_vfp; struct user_vfp_exc; -extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *, - struct user_vfp_exc __user *); +extern int vfp_preserve_user_clear_hwstate(struct user_vfp *, + struct user_vfp_exc *); extern int vfp_restore_user_hwstate(struct user_vfp *, struct user_vfp_exc *); #endif diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index f854148c8d7c258927b031d0c87e8aa8a142e309..00baa13c158d7f0114003e970e418d36d76988d9 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -280,6 +280,14 @@ tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr tlb_add_flush(tlb, addr); } +static inline void +tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address, + unsigned long size) +{ + tlb_add_flush(tlb, address); + tlb_add_flush(tlb, address + size - PMD_SIZE); +} + #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) #define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr) #define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp) diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 5451e1f05a193c002f3b30af921975a28c232833..42aa4a22803c2760d7799bdd17b4434d9cc387bd 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -69,6 +69,14 @@ extern int __put_user_bad(void); static inline void set_fs(mm_segment_t fs) { current_thread_info()->addr_limit = fs; + + /* + * Prevent a mispredicted conditional call to set_fs from forwarding + * the wrong address limit to access_ok under speculation. + */ + dsb(nsh); + isb(); + modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); } @@ -91,6 +99,32 @@ static inline void set_fs(mm_segment_t fs) #define __inttype(x) \ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) +/* + * Sanitise a uaccess pointer such that it becomes NULL if addr+size + * is above the current addr_limit. + */ +#define uaccess_mask_range_ptr(ptr, size) \ + ((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size)) +static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr, + size_t size) +{ + void __user *safe_ptr = (void __user *)ptr; + unsigned long tmp; + + asm volatile( + " sub %1, %3, #1\n" + " subs %1, %1, %0\n" + " addhs %1, %1, #1\n" + " subhss %1, %1, %2\n" + " movlo %0, #0\n" + : "+r" (safe_ptr), "=&r" (tmp) + : "r" (size), "r" (current_thread_info()->addr_limit) + : "cc"); + + csdb(); + return safe_ptr; +} + /* * Single-value transfer routines. They automatically use the right * size if we just have the right pointer type. Note that the functions @@ -245,7 +279,7 @@ static inline void set_fs(mm_segment_t fs) #endif /* CONFIG_MMU */ -#define access_ok(type, addr, size) (__range_ok(addr, size) == 0) +#define access_ok(addr, size) (__range_ok(addr, size) == 0) #define user_addr_max() \ (uaccess_kernel() ? ~0UL : get_fs()) @@ -315,6 +349,13 @@ do { \ #define __get_user_asm_byte(x, addr, err) \ __get_user_asm(x, addr, err, ldrb) +#if __LINUX_ARM_ARCH__ >= 6 + +#define __get_user_asm_half(x, addr, err) \ + __get_user_asm(x, addr, err, ldrh) + +#else + #ifndef __ARMEB__ #define __get_user_asm_half(x, __gu_addr, err) \ ({ \ @@ -333,6 +374,8 @@ do { \ }) #endif +#endif /* __LINUX_ARM_ARCH__ >= 6 */ + #define __get_user_asm_word(x, addr, err) \ __get_user_asm(x, addr, err, ldr) #endif @@ -362,6 +405,14 @@ do { \ __pu_err; \ }) +#ifdef CONFIG_CPU_SPECTRE +/* + * When mitigating Spectre variant 1.1, all accessors need to include + * verification of the address space. + */ +#define __put_user(x, ptr) put_user(x, ptr) + +#else #define __put_user(x, ptr) \ ({ \ long __pu_err = 0; \ @@ -369,12 +420,6 @@ do { \ __pu_err; \ }) -#define __put_user_error(x, ptr, err) \ -({ \ - __put_user_switch((x), (ptr), (err), __put_user_nocheck); \ - (void) 0; \ -}) - #define __put_user_nocheck(x, __pu_ptr, __err, __size) \ do { \ unsigned long __pu_addr = (unsigned long)__pu_ptr; \ @@ -406,6 +451,13 @@ do { \ #define __put_user_asm_byte(x, __pu_addr, err) \ __put_user_asm(x, __pu_addr, err, strb) +#if __LINUX_ARM_ARCH__ >= 6 + +#define __put_user_asm_half(x, __pu_addr, err) \ + __put_user_asm(x, __pu_addr, err, strh) + +#else + #ifndef __ARMEB__ #define __put_user_asm_half(x, __pu_addr, err) \ ({ \ @@ -422,6 +474,8 @@ do { \ }) #endif +#endif /* __LINUX_ARM_ARCH__ >= 6 */ + #define __put_user_asm_word(x, __pu_addr, err) \ __put_user_asm(x, __pu_addr, err, str) @@ -454,6 +508,7 @@ do { \ : "r" (x), "i" (-EFAULT) \ : "cc") +#endif /* !CONFIG_CPU_SPECTRE */ #ifdef CONFIG_MMU extern unsigned long __must_check @@ -523,7 +578,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n) static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) { - if (access_ok(VERIFY_WRITE, to, n)) + if (access_ok(to, n)) n = __clear_user(to, n); return n; } diff --git a/arch/arm/include/asm/v7m.h b/arch/arm/include/asm/v7m.h index 187ccf6496ad61c222dc6e53102ec8a5b6ccf881..2cb00d15831b93e9e10164134d1f72cdb64c4bb0 100644 --- a/arch/arm/include/asm/v7m.h +++ b/arch/arm/include/asm/v7m.h @@ -49,7 +49,7 @@ * (0 -> msp; 1 -> psp). Bits [1:0] are fixed to 0b01. */ #define EXC_RET_STACK_MASK 0x00000004 -#define EXC_RET_THREADMODE_PROCESSSTACK 0xfffffffd +#define EXC_RET_THREADMODE_PROCESSSTACK (3 << 2) /* Cache related definitions */ diff --git a/arch/arm/include/asm/vmalloc.h b/arch/arm/include/asm/vmalloc.h new file mode 100644 index 0000000000000000000000000000000000000000..a9b3718b8600b789894e784b277a2b9e93f64a60 --- /dev/null +++ b/arch/arm/include/asm/vmalloc.h @@ -0,0 +1,4 @@ +#ifndef _ASM_ARM_VMALLOC_H +#define _ASM_ARM_VMALLOC_H + +#endif /* _ASM_ARM_VMALLOC_H */ diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h index 4602464ebdfbfccd9296593ed5dd8b15f55305ff..86db092e4c2f3bf7ba02771b869d43569f7b7846 100644 --- a/arch/arm/include/uapi/asm/kvm.h +++ b/arch/arm/include/uapi/asm/kvm.h @@ -254,8 +254,10 @@ struct kvm_vcpu_events { #define KVM_DEV_ARM_ITS_CTRL_RESET 4 /* KVM_IRQ_LINE irq field index values */ +#define KVM_ARM_IRQ_VCPU2_SHIFT 28 +#define KVM_ARM_IRQ_VCPU2_MASK 0xf #define KVM_ARM_IRQ_TYPE_SHIFT 24 -#define KVM_ARM_IRQ_TYPE_MASK 0xff +#define KVM_ARM_IRQ_TYPE_MASK 0xf #define KVM_ARM_IRQ_VCPU_SHIFT 16 #define KVM_ARM_IRQ_VCPU_MASK 0xff #define KVM_ARM_IRQ_NUM_SHIFT 0 diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 8cad59465af39ac44eafecc881e0ed9a6afc9893..70fd896c132a67766a3d91827f28a7c4a7401689 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -102,4 +102,6 @@ endif obj-$(CONFIG_HAVE_ARM_SMCCC) += smccc-call.o +obj-$(CONFIG_GENERIC_CPU_VULNERABILITIES) += spectre.o + extra-y := $(head-y) vmlinux.lds diff --git a/arch/arm/kernel/bugs.c b/arch/arm/kernel/bugs.c index 7be5113101915cd81a5558f45238041138fb5a58..d41d3598e5e541115c08f9b81b26fd187a7fe7af 100644 --- a/arch/arm/kernel/bugs.c +++ b/arch/arm/kernel/bugs.c @@ -6,8 +6,8 @@ void check_other_bugs(void) { #ifdef MULTI_CPU - if (processor.check_bugs) - processor.check_bugs(); + if (cpu_check_bugs) + cpu_check_bugs(); #endif } diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index e85a3af9ddeb5694b793363f8245ba1ad5f99899..8de1e1da9144ef2b24363c7f91951467b37dfec8 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -1038,12 +1038,11 @@ vector_\name: sub lr, lr, #\correction .endif - @ - @ Save r0, lr_ (parent PC) and spsr_ - @ (parent CPSR) - @ + @ Save r0, lr_ (parent PC) stmia sp, {r0, lr} @ save r0, lr - mrs lr, spsr + + @ Save spsr_ (parent CPSR) +2: mrs lr, spsr str lr, [sp, #8] @ save spsr @ @@ -1064,6 +1063,44 @@ vector_\name: movs pc, lr @ branch to handler in SVC mode ENDPROC(vector_\name) +#ifdef CONFIG_HARDEN_BRANCH_HISTORY + .subsection 1 + .align 5 +vector_bhb_loop8_\name: + .if \correction + sub lr, lr, #\correction + .endif + + @ Save r0, lr_ (parent PC) + stmia sp, {r0, lr} + + @ bhb workaround + mov r0, #8 +3: W(b) . + 4 + subs r0, r0, #1 + bne 3b + dsb + isb + b 2b +ENDPROC(vector_bhb_loop8_\name) + +vector_bhb_bpiall_\name: + .if \correction + sub lr, lr, #\correction + .endif + + @ Save r0, lr_ (parent PC) + stmia sp, {r0, lr} + + @ bhb workaround + mcr p15, 0, r0, c7, c5, 6 @ BPIALL + @ isb not needed due to "movs pc, lr" in the vector stub + @ which gives a "context synchronisation". + b 2b +ENDPROC(vector_bhb_bpiall_\name) + .previous +#endif + .align 2 @ handler addresses follow this label 1: @@ -1072,6 +1109,10 @@ ENDPROC(vector_\name) .section .stubs, "ax", %progbits @ This must be the first word .word vector_swi +#ifdef CONFIG_HARDEN_BRANCH_HISTORY + .word vector_bhb_loop8_swi + .word vector_bhb_bpiall_swi +#endif vector_rst: ARM( swi SYS_ERROR0 ) @@ -1186,8 +1227,10 @@ vector_addrexcptn: * FIQ "NMI" handler *----------------------------------------------------------------------------- * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86 - * systems. + * systems. This must be the last vector stub, so lets place it in its own + * subsection. */ + .subsection 2 vector_stub fiq, FIQ_MODE, 4 .long __fiq_usr @ 0 (USR_26 / USR_32) @@ -1220,6 +1263,30 @@ vector_addrexcptn: W(b) vector_irq W(b) vector_fiq +#ifdef CONFIG_HARDEN_BRANCH_HISTORY + .section .vectors.bhb.loop8, "ax", %progbits +.L__vectors_bhb_loop8_start: + W(b) vector_rst + W(b) vector_bhb_loop8_und + W(ldr) pc, .L__vectors_bhb_loop8_start + 0x1004 + W(b) vector_bhb_loop8_pabt + W(b) vector_bhb_loop8_dabt + W(b) vector_addrexcptn + W(b) vector_bhb_loop8_irq + W(b) vector_bhb_loop8_fiq + + .section .vectors.bhb.bpiall, "ax", %progbits +.L__vectors_bhb_bpiall_start: + W(b) vector_rst + W(b) vector_bhb_bpiall_und + W(ldr) pc, .L__vectors_bhb_bpiall_start + 0x1008 + W(b) vector_bhb_bpiall_pabt + W(b) vector_bhb_bpiall_dabt + W(b) vector_addrexcptn + W(b) vector_bhb_bpiall_irq + W(b) vector_bhb_bpiall_fiq +#endif + .data .align 2 diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 746565a876dcdd362522d1e546c8404faacfbfe2..e27fc2df523167cd6dbbeeb5c65bd7d23dd21402 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -165,6 +165,29 @@ ENDPROC(ret_from_fork) *----------------------------------------------------------------------------- */ + .align 5 +#ifdef CONFIG_HARDEN_BRANCH_HISTORY +ENTRY(vector_bhb_loop8_swi) + sub sp, sp, #PT_REGS_SIZE + stmia sp, {r0 - r12} + mov r8, #8 +1: b 2f +2: subs r8, r8, #1 + bne 1b + dsb + isb + b 3f +ENDPROC(vector_bhb_loop8_swi) + + .align 5 +ENTRY(vector_bhb_bpiall_swi) + sub sp, sp, #PT_REGS_SIZE + stmia sp, {r0 - r12} + mcr p15, 0, r8, c7, c5, 6 @ BPIALL + isb + b 3f +ENDPROC(vector_bhb_bpiall_swi) +#endif .align 5 ENTRY(vector_swi) #ifdef CONFIG_CPU_V7M @@ -172,6 +195,7 @@ ENTRY(vector_swi) #else sub sp, sp, #PT_REGS_SIZE stmia sp, {r0 - r12} @ Calling r0 - r12 +3: ARM( add r8, sp, #S_PC ) ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr THUMB( mov r8, sp ) @@ -296,16 +320,15 @@ __sys_trace: cmp scno, #-1 @ skip the syscall? bne 2b add sp, sp, #S_OFF @ restore stack - b ret_slow_syscall -__sys_trace_return: - str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 +__sys_trace_return_nosave: + enable_irq_notrace mov r0, sp bl syscall_trace_exit b ret_slow_syscall -__sys_trace_return_nosave: - enable_irq_notrace +__sys_trace_return: + str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 mov r0, sp bl syscall_trace_exit b ret_slow_syscall diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 773424843d6efcc2ebeb0ec0cfa88d67643213cc..62db1c9746cbc83607c9eaeb45b52a294b94e36e 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -127,7 +127,8 @@ */ .macro v7m_exception_slow_exit ret_r0 cpsid i - ldr lr, =EXC_RET_THREADMODE_PROCESSSTACK + ldr lr, =exc_ret + ldr lr, [lr] @ read original r12, sp, lr, pc and xPSR add r12, sp, #S_IP diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S index abcf4784852593397daf3b1e6cf5d70cf47660e0..19d2dcd6530dc351188bd6c7785705e36e9e64d7 100644 --- a/arch/arm/kernel/entry-v7m.S +++ b/arch/arm/kernel/entry-v7m.S @@ -146,3 +146,7 @@ ENTRY(vector_table) .rept CONFIG_CPU_V7M_NUM_IRQ .long __irq_entry @ External Interrupts .endr + .align 2 + .globl exc_ret +exc_ret: + .space 4 diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index 5617932a83dfaab416a05e429642b2d19b0a4438..ee673c09aa6c0357efb4ad510a48d9eb419668a0 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c @@ -227,9 +227,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, unsigned long frame_pointer) { unsigned long return_hooker = (unsigned long) &return_to_handler; - struct ftrace_graph_ent trace; unsigned long old; - int err; if (unlikely(atomic_read(¤t->tracing_graph_pause))) return; @@ -237,21 +235,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, old = *parent; *parent = return_hooker; - trace.func = self_addr; - trace.depth = current->curr_ret_stack + 1; - - /* Only trace if the calling function expects to */ - if (!ftrace_graph_entry(&trace)) { + if (function_graph_enter(old, self_addr, frame_pointer, NULL)) *parent = old; - return; - } - - err = ftrace_push_return_trace(old, self_addr, &trace.depth, - frame_pointer, NULL); - if (err == -EBUSY) { - *parent = old; - return; - } } #ifdef CONFIG_DYNAMIC_FTRACE diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S index 6e0375e7db055bc82cf0674b37b74646e2d64ff0..9328f2010bc199198612120bde11a7a0c8d2c729 100644 --- a/arch/arm/kernel/head-common.S +++ b/arch/arm/kernel/head-common.S @@ -72,7 +72,7 @@ ENDPROC(__vet_atags) * The following fragment of code is executed with the MMU on in MMU mode, * and uses absolute addresses; this is not position independent. * - * r0 = cp#15 control register + * r0 = cp#15 control register (exc_ret for M-class) * r1 = machine ID * r2 = atags/dtb pointer * r9 = processor ID @@ -141,10 +141,14 @@ __mmap_switched_data: #ifdef CONFIG_CPU_CP15 .long cr_alignment @ r3 #else - .long 0 @ r3 +M_CLASS(.long exc_ret) @ r3 +AR_CLASS(.long 0) @ r3 #endif .size __mmap_switched_data, . - __mmap_switched_data + __FINIT + .text + /* * This provides a C-API version of __lookup_processor_type */ @@ -156,9 +160,6 @@ ENTRY(lookup_processor_type) ldmfd sp!, {r4 - r6, r9, pc} ENDPROC(lookup_processor_type) - __FINIT - .text - /* * Read processor ID register (CP#15, CR0), and look up in the linker-built * supported processor list. Note that we can't use the absolute addresses diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index ec29de2500764e11ad839bda165c135f12b6a6e5..326a97aa3ea0cfaf390d06a267cbcdc280d18164 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -133,9 +133,9 @@ __secondary_data: */ .text __after_proc_init: -#ifdef CONFIG_ARM_MPU M_CLASS(movw r12, #:lower16:BASEADDR_V7M_SCB) M_CLASS(movt r12, #:upper16:BASEADDR_V7M_SCB) +#ifdef CONFIG_ARM_MPU M_CLASS(ldr r3, [r12, 0x50]) AR_CLASS(mrc p15, 0, r3, c0, c1, 4) @ Read ID_MMFR0 and r3, r3, #(MMFR0_PMSA) @ PMSA field @@ -205,6 +205,8 @@ M_CLASS(streq r3, [r12, #PMSAv8_MAIR1]) bic r0, r0, #V7M_SCB_CCR_IC #endif str r0, [r12, V7M_SCB_CCR] + /* Pass exc_ret to __mmap_switched */ + mov r0, r10 #endif /* CONFIG_CPU_CP15 elif CONFIG_CPU_V7M */ ret lr ENDPROC(__after_proc_init) diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 1d5fbf1d1c675770dde85c75912b692827375e1c..3f29de45fb906a94c56ac5689cd8d284856e9e8e 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -830,7 +830,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs) info->trigger = addr; pr_debug("breakpoint fired: address = 0x%x\n", addr); perf_bp_event(bp, regs); - if (!bp->overflow_handler) + if (is_default_overflow_handler(bp)) enable_single_step(bp, addr); goto unlock; } diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 9908dacf9229fbfa694ceebdfb2ed1b534c3f522..844861368cd5c236a113adaeab8a26d43d8ac419 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -31,7 +31,6 @@ #include #include #include -#include #include #include #include @@ -109,64 +108,3 @@ int __init arch_probe_nr_irqs(void) return nr_irqs; } #endif - -#ifdef CONFIG_HOTPLUG_CPU -static bool migrate_one_irq(struct irq_desc *desc) -{ - struct irq_data *d = irq_desc_get_irq_data(desc); - const struct cpumask *affinity = irq_data_get_affinity_mask(d); - struct irq_chip *c; - bool ret = false; - - /* - * If this is a per-CPU interrupt, or the affinity does not - * include this CPU, then we have nothing to do. - */ - if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) - return false; - - if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { - affinity = cpu_online_mask; - ret = true; - } - - c = irq_data_get_irq_chip(d); - if (!c->irq_set_affinity) - pr_debug("IRQ%u: unable to set affinity\n", d->irq); - else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) - cpumask_copy(irq_data_get_affinity_mask(d), affinity); - - return ret; -} - -/* - * The current CPU has been marked offline. Migrate IRQs off this CPU. - * If the affinity settings do not allow other CPUs, force them onto any - * available CPU. - * - * Note: we must iterate over all IRQs, whether they have an attached - * action structure or not, as we need to get chained interrupts too. - */ -void migrate_irqs(void) -{ - unsigned int i; - struct irq_desc *desc; - unsigned long flags; - - local_irq_save(flags); - - for_each_irq_desc(i, desc) { - bool affinity_broken; - - raw_spin_lock(&desc->lock); - affinity_broken = migrate_one_irq(desc); - raw_spin_unlock(&desc->lock); - - if (affinity_broken) - pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n", - i, smp_processor_id()); - } - - local_irq_restore(flags); -} -#endif /* CONFIG_HOTPLUG_CPU */ diff --git a/arch/arm/kernel/jump_label.c b/arch/arm/kernel/jump_label.c index 90bce3d9928e2f679c01d0bf13eabf2dead216f2..303b3ab87f7e892a4607055eae136582372745cf 100644 --- a/arch/arm/kernel/jump_label.c +++ b/arch/arm/kernel/jump_label.c @@ -4,8 +4,6 @@ #include #include -#ifdef HAVE_JUMP_LABEL - static void __arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type, bool is_static) @@ -35,5 +33,3 @@ void arch_jump_label_transform_static(struct jump_entry *entry, { __arch_jump_label_transform(entry, type, true); } - -#endif diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index dd2eb5f76b9f0a7d64f50169dd0d04a402b2ae67..76300f3813e89bc48a76d83dc3076d9f7b79ee84 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -91,8 +91,11 @@ void machine_crash_nonpanic_core(void *unused) set_cpu_online(smp_processor_id(), false); atomic_dec(&waiting_for_crash_ipi); - while (1) + + while (1) { cpu_relax(); + wfe(); + } } void crash_smp_send_stop(void) diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c index a50dc00d79a273fac9e5d5c3f8be75f37231f766..d0a05a3bdb9652450ea4a6e1cdc6036c945ab42a 100644 --- a/arch/arm/kernel/patch.c +++ b/arch/arm/kernel/patch.c @@ -16,7 +16,7 @@ struct patch { unsigned int insn; }; -static DEFINE_SPINLOCK(patch_lock); +static DEFINE_RAW_SPINLOCK(patch_lock); static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags) __acquires(&patch_lock) @@ -33,7 +33,7 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags) return addr; if (flags) - spin_lock_irqsave(&patch_lock, *flags); + raw_spin_lock_irqsave(&patch_lock, *flags); else __acquire(&patch_lock); @@ -48,7 +48,7 @@ static void __kprobes patch_unmap(int fixmap, unsigned long *flags) clear_fixmap(fixmap); if (flags) - spin_unlock_irqrestore(&patch_lock, *flags); + raw_spin_unlock_irqrestore(&patch_lock, *flags); else __release(&patch_lock); } diff --git a/arch/arm/kernel/perf_callchain.c b/arch/arm/kernel/perf_callchain.c index 08e43a32a693bd810f98d366e19c03e1e3767d63..3b69a76d341e784075a1f8ef053f0d308177feee 100644 --- a/arch/arm/kernel/perf_callchain.c +++ b/arch/arm/kernel/perf_callchain.c @@ -37,7 +37,7 @@ user_backtrace(struct frame_tail __user *tail, struct frame_tail buftail; unsigned long err; - if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) + if (!access_ok(tail, sizeof(buftail))) return NULL; pagefault_disable(); diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index 1ae99deeec5491ebddfd212acd8de384e8969d0f..c6e44bfe1b14e7124711f71d9cb2be51adbb27ed 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c @@ -68,6 +68,12 @@ enum armv6_counters { ARMV6_COUNTER1, }; +/* + * Hardware lock to serialize accesses to PMU registers. Needed for the + * read/modify/write sequences. + */ +DEFINE_PER_CPU(raw_spinlock_t, pmu_lock); + /* * The hardware events that we support. We do support cache operations but * we have harvard caches and no way to combine instruction and data @@ -272,6 +278,7 @@ static void armv6pmu_enable_event(struct perf_event *event) struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); + struct raw_spinlock_t *lock = this_cpu_ptr(&pmu_lock); int idx = hwc->idx; if (ARMV6_CYCLE_COUNTER == idx) { @@ -294,12 +301,18 @@ static void armv6pmu_enable_event(struct perf_event *event) * Mask out the current event and set the counter to count the event * that we're interested in. */ - raw_spin_lock_irqsave(&events->pmu_lock, flags); + if (pmu_nmi_enable) + raw_spin_lock_irqsave(lock, flags); + else + raw_spin_lock_irqsave(&events->pmu_lock, flags); val = armv6_pmcr_read(); val &= ~mask; val |= evt; armv6_pmcr_write(val); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); + if (pmu_nmi_enable) + raw_spin_unlock_irqrestore(lock, flags); + else + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static irqreturn_t @@ -364,24 +377,38 @@ static void armv6pmu_start(struct arm_pmu *cpu_pmu) { unsigned long flags, val; struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); + raw_spinlock_t *lock = this_cpu_ptr(&pmu_lock); - raw_spin_lock_irqsave(&events->pmu_lock, flags); + if (pmu_nmi_enable) + raw_spin_lock_irqsave(lock, flags); + else + raw_spin_lock_irqsave(&events->pmu_lock, flags); val = armv6_pmcr_read(); val |= ARMV6_PMCR_ENABLE; armv6_pmcr_write(val); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); + if (pmu_nmi_enable) + raw_spin_unlock_irqrestore(lock, flags); + else + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void armv6pmu_stop(struct arm_pmu *cpu_pmu) { unsigned long flags, val; struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); + raw_spinlock_t *lock = this_cpu_ptr(&pmu_lock); - raw_spin_lock_irqsave(&events->pmu_lock, flags); + if (pmu_nmi_enable) + raw_spin_lock_irqsave(lock, flags); + else + raw_spin_lock_irqsave(&events->pmu_lock, flags); val = armv6_pmcr_read(); val &= ~ARMV6_PMCR_ENABLE; armv6_pmcr_write(val); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); + if (pmu_nmi_enable) + raw_spin_unlock_irqrestore(lock, flags); + else + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static int @@ -502,6 +529,8 @@ static void armv6pmu_init(struct arm_pmu *cpu_pmu) cpu_pmu->stop = armv6pmu_stop; cpu_pmu->map_event = armv6_map_event; cpu_pmu->num_events = 3; + + raw_spin_lock_init(this_cpu_ptr(&pmu_lock)); } static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu) diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index a4fb0f8b8f84a96544977bf0c31b49d6e93c714a..c01ed569590aeeecea37d4e798e993b781705866 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -736,10 +736,27 @@ static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx) return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx)); } -static inline void armv7_pmnc_select_counter(int idx) +static inline u32 armv7_pmsel_read(void) +{ + u32 pmsel; + + asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=&r" (pmsel)); + return pmsel; +} + +static inline void armv7_pmsel_write(u32 counter) { - u32 counter = ARMV7_IDX_TO_COUNTER(idx); asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter)); +} + +static inline void armv7_pmnc_select_counter(int idx) +{ + if (pmu_nmi_enable) { + armv7_pmsel_write(ARMV7_IDX_TO_COUNTER(idx)); + } else { + u32 counter = ARMV7_IDX_TO_COUNTER(idx); + asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter)); + } isb(); } @@ -886,7 +903,8 @@ static void armv7pmu_enable_event(struct perf_event *event) * Enable counter and interrupt, and set the counter to count * the event that we're interested in. */ - raw_spin_lock_irqsave(&events->pmu_lock, flags); + if (!pmu_nmi_enable) + raw_spin_lock_irqsave(&events->pmu_lock, flags); /* * Disable counter @@ -911,7 +929,8 @@ static void armv7pmu_enable_event(struct perf_event *event) */ armv7_pmnc_enable_counter(idx); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); + if (!pmu_nmi_enable) + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void armv7pmu_disable_event(struct perf_event *event) @@ -931,7 +950,8 @@ static void armv7pmu_disable_event(struct perf_event *event) /* * Disable counter and interrupt */ - raw_spin_lock_irqsave(&events->pmu_lock, flags); + if (!pmu_nmi_enable) + raw_spin_lock_irqsave(&events->pmu_lock, flags); /* * Disable counter @@ -943,7 +963,8 @@ static void armv7pmu_disable_event(struct perf_event *event) */ armv7_pmnc_disable_intens(idx); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); + if (!pmu_nmi_enable) + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu) @@ -952,8 +973,12 @@ static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu) struct perf_sample_data data; struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); struct pt_regs *regs; + u32 pmsel; int idx; + if (pmu_nmi_enable) + pmsel = armv7_pmsel_read(); + /* * Get and reset the IRQ flags */ @@ -1004,29 +1029,46 @@ static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu) */ irq_work_run(); + if (pmu_nmi_enable) + armv7_pmsel_write(pmsel); + return IRQ_HANDLED; } static void armv7pmu_start(struct arm_pmu *cpu_pmu) { - unsigned long flags; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); + if (pmu_nmi_enable) { + preempt_disable(); + /* Enable all counters */ + armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E); + preempt_enable(); + } else { + unsigned long flags; + struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); - raw_spin_lock_irqsave(&events->pmu_lock, flags); - /* Enable all counters */ - armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); + raw_spin_lock_irqsave(&events->pmu_lock, flags); + /* Enable all counters */ + armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E); + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); + } } static void armv7pmu_stop(struct arm_pmu *cpu_pmu) { - unsigned long flags; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); + if (pmu_nmi_enable) { + preempt_disable(); + /* Disable all counters */ + armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E); + preempt_enable(); + } else { + unsigned long flags; + struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); - raw_spin_lock_irqsave(&events->pmu_lock, flags); - /* Disable all counters */ - armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); + raw_spin_lock_irqsave(&events->pmu_lock, flags); + /* Disable all counters */ + armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E); + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); + } } static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc, @@ -1499,7 +1541,8 @@ static void krait_pmu_disable_event(struct perf_event *event) struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); /* Disable counter and interrupt */ - raw_spin_lock_irqsave(&events->pmu_lock, flags); + if (!pmu_nmi_enable) + raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Disable counter */ armv7_pmnc_disable_counter(idx); @@ -1513,7 +1556,8 @@ static void krait_pmu_disable_event(struct perf_event *event) /* Disable interrupt for this counter */ armv7_pmnc_disable_intens(idx); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); + if (!pmu_nmi_enable) + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void krait_pmu_enable_event(struct perf_event *event) @@ -1528,7 +1572,8 @@ static void krait_pmu_enable_event(struct perf_event *event) * Enable counter and interrupt, and set the counter to count * the event that we're interested in. */ - raw_spin_lock_irqsave(&events->pmu_lock, flags); + if (!pmu_nmi_enable) + raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Disable counter */ armv7_pmnc_disable_counter(idx); @@ -1549,7 +1594,8 @@ static void krait_pmu_enable_event(struct perf_event *event) /* Enable counter */ armv7_pmnc_enable_counter(idx); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); + if (!pmu_nmi_enable) + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void krait_pmu_reset(void *info) @@ -1832,7 +1878,8 @@ static void scorpion_pmu_disable_event(struct perf_event *event) struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); /* Disable counter and interrupt */ - raw_spin_lock_irqsave(&events->pmu_lock, flags); + if (!pmu_nmi_enable) + raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Disable counter */ armv7_pmnc_disable_counter(idx); @@ -1846,7 +1893,8 @@ static void scorpion_pmu_disable_event(struct perf_event *event) /* Disable interrupt for this counter */ armv7_pmnc_disable_intens(idx); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); + if (!pmu_nmi_enable) + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void scorpion_pmu_enable_event(struct perf_event *event) @@ -1861,7 +1909,8 @@ static void scorpion_pmu_enable_event(struct perf_event *event) * Enable counter and interrupt, and set the counter to count * the event that we're interested in. */ - raw_spin_lock_irqsave(&events->pmu_lock, flags); + if (!pmu_nmi_enable) + raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Disable counter */ armv7_pmnc_disable_counter(idx); @@ -1882,7 +1931,8 @@ static void scorpion_pmu_enable_event(struct perf_event *event) /* Enable counter */ armv7_pmnc_enable_counter(idx); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); + if (!pmu_nmi_enable) + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void scorpion_pmu_reset(void *info) diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 4c249cb261f3913112792cd6cad0a7e2df17ff4f..7bbaa293a38ce959367811f7fa59c4b27db7cef8 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -115,6 +115,11 @@ EXPORT_SYMBOL(elf_hwcap2); #ifdef MULTI_CPU struct processor processor __ro_after_init; +#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) +struct processor *cpu_vtable[NR_CPUS] = { + [0] = &processor, +}; +#endif #endif #ifdef MULTI_TLB struct cpu_tlb_fns cpu_tlb __ro_after_init; @@ -667,28 +672,33 @@ static void __init smp_build_mpidr_hash(void) } #endif -static void __init setup_processor(void) +/* + * locate processor in the list of supported processor types. The linker + * builds this table for us from the entries in arch/arm/mm/proc-*.S + */ +struct proc_info_list *lookup_processor(u32 midr) { - struct proc_info_list *list; + struct proc_info_list *list = lookup_processor_type(midr); - /* - * locate processor in the list of supported processor - * types. The linker builds this table for us from the - * entries in arch/arm/mm/proc-*.S - */ - list = lookup_processor_type(read_cpuid_id()); if (!list) { - pr_err("CPU configuration botched (ID %08x), unable to continue.\n", - read_cpuid_id()); - while (1); + pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n", + smp_processor_id(), midr); + while (1) + /* can't use cpu_relax() here as it may require MMU setup */; } + return list; +} + +static void __init setup_processor(void) +{ + unsigned int midr = read_cpuid_id(); + struct proc_info_list *list = lookup_processor(midr); + cpu_name = list->cpu_name; __cpu_architecture = __get_cpu_architecture(); -#ifdef MULTI_CPU - processor = *list->proc; -#endif + init_proc_vtable(list->proc); #ifdef MULTI_TLB cpu_tlb = *list->tlb; #endif @@ -700,7 +710,7 @@ static void __init setup_processor(void) #endif pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n", - cpu_name, read_cpuid_id(), read_cpuid_id() & 15, + list->cpu_name, midr, midr & 15, proc_arch[cpu_architecture()], get_cr()); snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c", diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index b8f766cf3a905d5c226f8caa2f0cc367b73671f6..dfe24883cc93aa7550f782a4e3ea38bf985f1e94 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -77,8 +77,6 @@ static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame) kframe->magic = IWMMXT_MAGIC; kframe->size = IWMMXT_STORAGE_SIZE; iwmmxt_task_copy(current_thread_info(), &kframe->storage); - - err = __copy_to_user(frame, kframe, sizeof(*frame)); } else { /* * For bug-compatibility with older kernels, some space @@ -86,10 +84,14 @@ static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame) * Set the magic and size appropriately so that properly * written userspace can skip it reliably: */ - __put_user_error(DUMMY_MAGIC, &frame->magic, err); - __put_user_error(IWMMXT_STORAGE_SIZE, &frame->size, err); + *kframe = (struct iwmmxt_sigframe) { + .magic = DUMMY_MAGIC, + .size = IWMMXT_STORAGE_SIZE, + }; } + err = __copy_to_user(frame, kframe, sizeof(*kframe)); + return err; } @@ -135,17 +137,18 @@ static int restore_iwmmxt_context(char __user **auxp) static int preserve_vfp_context(struct vfp_sigframe __user *frame) { - const unsigned long magic = VFP_MAGIC; - const unsigned long size = VFP_STORAGE_SIZE; + struct vfp_sigframe kframe; int err = 0; - __put_user_error(magic, &frame->magic, err); - __put_user_error(size, &frame->size, err); + memset(&kframe, 0, sizeof(kframe)); + kframe.magic = VFP_MAGIC; + kframe.size = VFP_STORAGE_SIZE; + err = vfp_preserve_user_clear_hwstate(&kframe.ufp, &kframe.ufp_exc); if (err) - return -EFAULT; + return err; - return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc); + return __copy_to_user(frame, &kframe, sizeof(kframe)); } static int restore_vfp_context(char __user **auxp) @@ -238,7 +241,7 @@ asmlinkage int sys_sigreturn(struct pt_regs *regs) frame = (struct sigframe __user *)regs->ARM_sp; - if (!access_ok(VERIFY_READ, frame, sizeof (*frame))) + if (!access_ok(frame, sizeof (*frame))) goto badframe; if (restore_sigframe(regs, frame)) @@ -268,7 +271,7 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) frame = (struct rt_sigframe __user *)regs->ARM_sp; - if (!access_ok(VERIFY_READ, frame, sizeof (*frame))) + if (!access_ok(frame, sizeof (*frame))) goto badframe; if (restore_sigframe(regs, &frame->sig)) @@ -288,30 +291,35 @@ static int setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set) { struct aux_sigframe __user *aux; + struct sigcontext context; int err = 0; - __put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err); - __put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err); - __put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err); - __put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err); - __put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err); - __put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err); - __put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err); - __put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err); - __put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err); - __put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err); - __put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err); - __put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err); - __put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err); - __put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err); - __put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err); - __put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err); - __put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err); - - __put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err); - __put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err); - __put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err); - __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err); + context = (struct sigcontext) { + .arm_r0 = regs->ARM_r0, + .arm_r1 = regs->ARM_r1, + .arm_r2 = regs->ARM_r2, + .arm_r3 = regs->ARM_r3, + .arm_r4 = regs->ARM_r4, + .arm_r5 = regs->ARM_r5, + .arm_r6 = regs->ARM_r6, + .arm_r7 = regs->ARM_r7, + .arm_r8 = regs->ARM_r8, + .arm_r9 = regs->ARM_r9, + .arm_r10 = regs->ARM_r10, + .arm_fp = regs->ARM_fp, + .arm_ip = regs->ARM_ip, + .arm_sp = regs->ARM_sp, + .arm_lr = regs->ARM_lr, + .arm_pc = regs->ARM_pc, + .arm_cpsr = regs->ARM_cpsr, + + .trap_no = current->thread.trap_no, + .error_code = current->thread.error_code, + .fault_address = current->thread.address, + .oldmask = set->sig[0], + }; + + err |= __copy_to_user(&sf->uc.uc_mcontext, &context, sizeof(context)); err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); @@ -328,7 +336,7 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set) if (err == 0) err |= preserve_vfp_context(&aux->vfp); #endif - __put_user_error(0, &aux->end_magic, err); + err |= __put_user(0, &aux->end_magic); return err; } @@ -347,7 +355,7 @@ get_sigframe(struct ksignal *ksig, struct pt_regs *regs, int framesize) /* * Check that we can actually write to the signal frame. */ - if (!access_ok(VERIFY_WRITE, frame, framesize)) + if (!access_ok(frame, framesize)) frame = NULL; return frame; @@ -491,7 +499,7 @@ setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) /* * Set uc.uc_flags to a value which sc.trap_no would never have. */ - __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err); + err = __put_user(0x5ac3c35a, &frame->uc.uc_flags); err |= setup_sigframe(frame, regs, set); if (err == 0) @@ -511,8 +519,8 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) err |= copy_siginfo_to_user(&frame->info, &ksig->info); - __put_user_error(0, &frame->sig.uc.uc_flags, err); - __put_user_error(NULL, &frame->sig.uc.uc_link, err); + err |= __put_user(0, &frame->sig.uc.uc_flags); + err |= __put_user(NULL, &frame->sig.uc.uc_link); err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp); err |= setup_sigframe(&frame->sig, regs, set); @@ -689,18 +697,20 @@ struct page *get_signal_page(void) addr = page_address(page); + /* Poison the entire page */ + memset32(addr, __opcode_to_mem_arm(0xe7fddef1), + PAGE_SIZE / sizeof(u32)); + /* Give the signal return code some randomness */ offset = 0x200 + (get_random_int() & 0x7fc); signal_return_offset = offset; - /* - * Copy signal return handlers into the vector page, and - * set sigreturn to be a pointer to these. - */ + /* Copy signal return handlers into the page */ memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes)); - ptr = (unsigned long)addr + offset; - flush_icache_range(ptr, ptr + sizeof(sigreturn_codes)); + /* Flush out all instructions in this page */ + ptr = (unsigned long)addr; + flush_icache_range(ptr, ptr + PAGE_SIZE); return page; } diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 0978282d5fc27a7c4a5e6b0e274da8bfc4c14c8d..bada66ef4419344a456c83bdf4288b6da7bcc9cf 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include #include @@ -75,6 +76,10 @@ enum ipi_msg_type { IPI_CPU_STOP, IPI_IRQ_WORK, IPI_COMPLETION, + /* + * CPU_BACKTRACE is special and not included in NR_IPI + * or tracable with trace_ipi_* + */ IPI_CPU_BACKTRACE, /* * SGI8-15 can be reserved by secure firmware, and thus may @@ -102,6 +107,30 @@ static unsigned long get_arch_pgd(pgd_t *pgd) #endif } +#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) +static int secondary_biglittle_prepare(unsigned int cpu) +{ + if (!cpu_vtable[cpu]) + cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL); + + return cpu_vtable[cpu] ? 0 : -ENOMEM; +} + +static void secondary_biglittle_init(void) +{ + init_proc_vtable(lookup_processor(read_cpuid_id())->proc); +} +#else +static int secondary_biglittle_prepare(unsigned int cpu) +{ + return 0; +} + +static void secondary_biglittle_init(void) +{ +} +#endif + int __cpu_up(unsigned int cpu, struct task_struct *idle) { int ret; @@ -109,6 +138,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) if (!smp_ops.smp_boot_secondary) return -ENOSYS; + ret = secondary_biglittle_prepare(cpu); + if (ret) + return ret; + /* * We need to tell the secondary core where to find * its stack and the page tables. @@ -225,7 +258,7 @@ int __cpu_disable(void) /* * OK - migrate IRQs away from this CPU */ - migrate_irqs(); + irq_migrate_all_off_this_cpu(); /* * Flush user cache and TLB mappings, and then remove this CPU @@ -359,6 +392,8 @@ asmlinkage void secondary_start_kernel(void) struct mm_struct *mm = &init_mm; unsigned int cpu; + secondary_biglittle_init(); + /* * The identity mapping is uncached (strongly ordered), so * switch away from it before attempting any exclusive accesses. @@ -573,8 +608,10 @@ static void ipi_cpu_stop(unsigned int cpu) local_fiq_disable(); local_irq_disable(); - while (1) + while (1) { cpu_relax(); + wfe(); + } } static DEFINE_PER_CPU(struct completion *, cpu_completion); @@ -693,6 +730,21 @@ void smp_send_stop(void) pr_warn("SMP: failed to stop secondary CPUs\n"); } +/* In case panic() and panic() called at the same time on CPU1 and CPU2, + * and CPU 1 calls panic_smp_self_stop() before crash_smp_send_stop() + * CPU1 can't receive the ipi irqs from CPU2, CPU1 will be always online, + * kdump fails. So split out the panic_smp_self_stop() and add + * set_cpu_online(smp_processor_id(), false). + */ +void panic_smp_self_stop(void) +{ + pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n", + smp_processor_id()); + set_cpu_online(smp_processor_id(), false); + while (1) + cpu_relax(); +} + /* * not supported here */ @@ -755,7 +807,7 @@ core_initcall(register_cpufreq_notifier); static void raise_nmi(cpumask_t *mask) { - smp_cross_call(mask, IPI_CPU_BACKTRACE); + __smp_cross_call(mask, IPI_CPU_BACKTRACE); } void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) diff --git a/arch/arm/kernel/spectre.c b/arch/arm/kernel/spectre.c new file mode 100644 index 0000000000000000000000000000000000000000..0dcefc36fb7a08af113ef923af1fe9c826da662d --- /dev/null +++ b/arch/arm/kernel/spectre.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include +#include + +#include + +static bool _unprivileged_ebpf_enabled(void) +{ +#ifdef CONFIG_BPF_SYSCALL + return !sysctl_unprivileged_bpf_disabled; +#else + return false; +#endif +} + +ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "Mitigation: __user pointer sanitization\n"); +} + +static unsigned int spectre_v2_state; +static unsigned int spectre_v2_methods; + +void spectre_v2_update_state(unsigned int state, unsigned int method) +{ + if (state > spectre_v2_state) + spectre_v2_state = state; + spectre_v2_methods |= method; +} + +ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, + char *buf) +{ + const char *method; + + if (spectre_v2_state == SPECTRE_UNAFFECTED) + return sprintf(buf, "%s\n", "Not affected"); + + if (spectre_v2_state != SPECTRE_MITIGATED) + return sprintf(buf, "%s\n", "Vulnerable"); + + if (_unprivileged_ebpf_enabled()) + return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n"); + + switch (spectre_v2_methods) { + case SPECTRE_V2_METHOD_BPIALL: + method = "Branch predictor hardening"; + break; + + case SPECTRE_V2_METHOD_ICIALLU: + method = "I-cache invalidation"; + break; + + case SPECTRE_V2_METHOD_SMC: + case SPECTRE_V2_METHOD_HVC: + method = "Firmware call"; + break; + + case SPECTRE_V2_METHOD_LOOP8: + method = "History overwrite"; + break; + + default: + method = "Multiple mitigations"; + break; + } + + return sprintf(buf, "Mitigation: %s\n", method); +} diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c index 80517f293eb9c84c58935214174033bfe80ea892..cbb79437876fb3433eabb9495d609c6785dca266 100644 --- a/arch/arm/kernel/swp_emulate.c +++ b/arch/arm/kernel/swp_emulate.c @@ -200,7 +200,7 @@ static int swp_handler(struct pt_regs *regs, unsigned int instr) destreg, EXTRACT_REG_NUM(instr, RT2_OFFSET), data); /* Check access in reasonable access range for both SWP and SWPB */ - if (!access_ok(VERIFY_WRITE, (address & ~3), 4)) { + if (!access_ok((address & ~3), 4)) { pr_debug("SWP{B} emulation: access to %p not allowed!\n", (void *)address); res = -EFAULT; diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c index f0dd4b6ebb6330e5007e883d2ba2d099a4e1df94..92ab36f3879512979c4b0ba558b92599d3df89ab 100644 --- a/arch/arm/kernel/sys_oabi-compat.c +++ b/arch/arm/kernel/sys_oabi-compat.c @@ -277,6 +277,7 @@ asmlinkage long sys_oabi_epoll_wait(int epfd, int maxevents, int timeout) { struct epoll_event *kbuf; + struct oabi_epoll_event e; mm_segment_t fs; long ret, err, i; @@ -284,7 +285,7 @@ asmlinkage long sys_oabi_epoll_wait(int epfd, maxevents > (INT_MAX/sizeof(*kbuf)) || maxevents > (INT_MAX/sizeof(*events))) return -EINVAL; - if (!access_ok(VERIFY_WRITE, events, sizeof(*events) * maxevents)) + if (!access_ok(events, sizeof(*events) * maxevents)) return -EFAULT; kbuf = kmalloc_array(maxevents, sizeof(*kbuf), GFP_KERNEL); if (!kbuf) @@ -295,8 +296,11 @@ asmlinkage long sys_oabi_epoll_wait(int epfd, set_fs(fs); err = 0; for (i = 0; i < ret; i++) { - __put_user_error(kbuf[i].events, &events->events, err); - __put_user_error(kbuf[i].data, &events->data, err); + e.events = kbuf[i].events; + e.data = kbuf[i].data; + err = __copy_to_user(events, &e, sizeof(e)); + if (err) + break; events++; } kfree(kbuf); @@ -322,7 +326,7 @@ asmlinkage long sys_oabi_semtimedop(int semid, if (nsops < 1 || nsops > SEMOPM) return -EINVAL; - if (!access_ok(VERIFY_READ, tsops, sizeof(*tsops) * nsops)) + if (!access_ok(tsops, sizeof(*tsops) * nsops)) return -EFAULT; sops = kmalloc_array(nsops, sizeof(*sops), GFP_KERNEL); if (!sops) diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index badf02ca369384f3fb9522139aefb0c66bff55e5..4c61ac4f3d3555b8a3765ad48e2a2845be838ee7 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -594,7 +595,7 @@ do_cache_op(unsigned long start, unsigned long end, int flags) if (end < start || flags) return -EINVAL; - if (!access_ok(VERIFY_READ, start, end - start)) + if (!access_ok(start, end - start)) return -EFAULT; return __do_cache_op(start, end); @@ -828,10 +829,59 @@ static inline void __init kuser_init(void *vectors) } #endif +#ifndef CONFIG_CPU_V7M +static void copy_from_lma(void *vma, void *lma_start, void *lma_end) +{ + memcpy(vma, lma_start, lma_end - lma_start); +} + +static void flush_vectors(void *vma, size_t offset, size_t size) +{ + unsigned long start = (unsigned long)vma + offset; + unsigned long end = start + size; + + flush_icache_range(start, end); +} + +#ifdef CONFIG_HARDEN_BRANCH_HISTORY +int spectre_bhb_update_vectors(unsigned int method) +{ + extern char __vectors_bhb_bpiall_start[], __vectors_bhb_bpiall_end[]; + extern char __vectors_bhb_loop8_start[], __vectors_bhb_loop8_end[]; + void *vec_start, *vec_end; + + if (system_state > SYSTEM_SCHEDULING) { + pr_err("CPU%u: Spectre BHB workaround too late - system vulnerable\n", + smp_processor_id()); + return SPECTRE_VULNERABLE; + } + + switch (method) { + case SPECTRE_V2_METHOD_LOOP8: + vec_start = __vectors_bhb_loop8_start; + vec_end = __vectors_bhb_loop8_end; + break; + + case SPECTRE_V2_METHOD_BPIALL: + vec_start = __vectors_bhb_bpiall_start; + vec_end = __vectors_bhb_bpiall_end; + break; + + default: + pr_err("CPU%u: unknown Spectre BHB state %d\n", + smp_processor_id(), method); + return SPECTRE_VULNERABLE; + } + + copy_from_lma(vectors_page, vec_start, vec_end); + flush_vectors(vectors_page, 0, vec_end - vec_start); + + return SPECTRE_MITIGATED; +} +#endif + void __init early_trap_init(void *vectors_base) { -#ifndef CONFIG_CPU_V7M - unsigned long vectors = (unsigned long)vectors_base; extern char __stubs_start[], __stubs_end[]; extern char __vectors_start[], __vectors_end[]; unsigned i; @@ -852,17 +902,20 @@ void __init early_trap_init(void *vectors_base) * into the vector page, mapped at 0xffff0000, and ensure these * are visible to the instruction stream. */ - memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start); - memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start); + copy_from_lma(vectors_base, __vectors_start, __vectors_end); + copy_from_lma(vectors_base + 0x1000, __stubs_start, __stubs_end); kuser_init(vectors_base); - flush_icache_range(vectors, vectors + PAGE_SIZE * 2); + flush_vectors(vectors_base, 0, PAGE_SIZE * 2); +} #else /* ifndef CONFIG_CPU_V7M */ +void __init early_trap_init(void *vectors_base) +{ /* * on V7-M there is no need to copy the vector table to a dedicated * memory area. The address is configurable and so a table in the kernel * image can be used. */ -#endif } +#endif diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c index 0bee233fef9a30bc92df73da7beee4e3f2966f2f..314cfb232a6353165dc899f35e5747a27a7ef617 100644 --- a/arch/arm/kernel/unwind.c +++ b/arch/arm/kernel/unwind.c @@ -93,7 +93,7 @@ extern const struct unwind_idx __start_unwind_idx[]; static const struct unwind_idx *__origin_unwind_idx; extern const struct unwind_idx __stop_unwind_idx[]; -static DEFINE_SPINLOCK(unwind_lock); +static DEFINE_RAW_SPINLOCK(unwind_lock); static LIST_HEAD(unwind_tables); /* Convert a prel31 symbol to an absolute address */ @@ -201,7 +201,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr) /* module unwind tables */ struct unwind_table *table; - spin_lock_irqsave(&unwind_lock, flags); + raw_spin_lock_irqsave(&unwind_lock, flags); list_for_each_entry(table, &unwind_tables, list) { if (addr >= table->begin_addr && addr < table->end_addr) { @@ -213,7 +213,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr) break; } } - spin_unlock_irqrestore(&unwind_lock, flags); + raw_spin_unlock_irqrestore(&unwind_lock, flags); } pr_debug("%s: idx = %p\n", __func__, idx); @@ -529,9 +529,9 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size, tab->begin_addr = text_addr; tab->end_addr = text_addr + text_size; - spin_lock_irqsave(&unwind_lock, flags); + raw_spin_lock_irqsave(&unwind_lock, flags); list_add_tail(&tab->list, &unwind_tables); - spin_unlock_irqrestore(&unwind_lock, flags); + raw_spin_unlock_irqrestore(&unwind_lock, flags); return tab; } @@ -543,9 +543,9 @@ void unwind_table_del(struct unwind_table *tab) if (!tab) return; - spin_lock_irqsave(&unwind_lock, flags); + raw_spin_lock_irqsave(&unwind_lock, flags); list_del(&tab->list); - spin_unlock_irqrestore(&unwind_lock, flags); + raw_spin_unlock_irqrestore(&unwind_lock, flags); kfree(tab); } diff --git a/arch/arm/kernel/vmlinux.lds.h b/arch/arm/kernel/vmlinux.lds.h index 8247bc15addc419d2b6f4bf222d4f37e172ca0a7..78d156e4f008847486fa052bb74f4372038aa3a6 100644 --- a/arch/arm/kernel/vmlinux.lds.h +++ b/arch/arm/kernel/vmlinux.lds.h @@ -25,6 +25,19 @@ #define ARM_MMU_DISCARD(x) x #endif +/* + * ld.lld does not support NOCROSSREFS: + * https://github.com/ClangBuiltLinux/linux/issues/1609 + */ +#ifdef CONFIG_LD_IS_LLD +#define NOCROSSREFS +#endif + +/* Set start/end symbol names to the LMA for the section */ +#define ARM_LMA(sym, section) \ + sym##_start = LOADADDR(section); \ + sym##_end = LOADADDR(section) + SIZEOF(section) + #define PROC_INFO \ . = ALIGN(4); \ __proc_info_begin = .; \ @@ -100,19 +113,31 @@ * only thing that matters is their relative offsets */ #define ARM_VECTORS \ - __vectors_start = .; \ - .vectors 0xffff0000 : AT(__vectors_start) { \ - *(.vectors) \ + __vectors_lma = .; \ + OVERLAY 0xffff0000 : NOCROSSREFS AT(__vectors_lma) { \ + .vectors { \ + *(.vectors) \ + } \ + .vectors.bhb.loop8 { \ + *(.vectors.bhb.loop8) \ + } \ + .vectors.bhb.bpiall { \ + *(.vectors.bhb.bpiall) \ + } \ } \ - . = __vectors_start + SIZEOF(.vectors); \ - __vectors_end = .; \ + ARM_LMA(__vectors, .vectors); \ + ARM_LMA(__vectors_bhb_loop8, .vectors.bhb.loop8); \ + ARM_LMA(__vectors_bhb_bpiall, .vectors.bhb.bpiall); \ + . = __vectors_lma + SIZEOF(.vectors) + \ + SIZEOF(.vectors.bhb.loop8) + \ + SIZEOF(.vectors.bhb.bpiall); \ \ - __stubs_start = .; \ - .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) { \ + __stubs_lma = .; \ + .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_lma) { \ *(.stubs) \ } \ - . = __stubs_start + SIZEOF(.stubs); \ - __stubs_end = .; \ + ARM_LMA(__stubs, .stubs); \ + . = __stubs_lma + SIZEOF(.stubs); \ \ PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors)); diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile index 48de846f22464637be95c64e0a1ff9357b6e5a65..5994f3b3d375e7da643d745c36d862ec43f8b441 100644 --- a/arch/arm/kvm/Makefile +++ b/arch/arm/kvm/Makefile @@ -24,7 +24,7 @@ obj-y += kvm-arm.o init.o interrupts.o obj-y += handle_exit.o guest.o emulate.o reset.o obj-y += coproc.o coproc_a15.o coproc_a7.o vgic-v3-coproc.o obj-y += $(KVM)/arm/arm.o $(KVM)/arm/mmu.o $(KVM)/arm/mmio.o -obj-y += $(KVM)/arm/psci.o $(KVM)/arm/perf.o +obj-y += $(KVM)/arm/psci.o $(KVM)/arm/perf.o $(KVM)/arm/hypercalls.o obj-y += $(KVM)/arm/aarch32.o obj-y += $(KVM)/arm/vgic/vgic.o diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index cb094e55dc5f12cacd25bde0b5b9eb853c6f3eeb..cac13ef4bba163ad8ad3e25bdf47da70b8771836 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -109,6 +109,8 @@ int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run) int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) { kvm_inject_undefined(vcpu); + vcpu->stat.cp14_ls_exit_stat++; + return 1; } @@ -602,8 +604,8 @@ static int emulate_cp15(struct kvm_vcpu *vcpu, } } else { /* If access function fails, it should complain. */ - kvm_err("Unsupported guest CP15 access at: %08lx\n", - *vcpu_pc(vcpu)); + kvm_err("Unsupported guest CP15 access at: %08lx [%08lx]\n", + *vcpu_pc(vcpu), *vcpu_cpsr(vcpu)); print_cp_instr(params); kvm_inject_undefined(vcpu); } @@ -637,6 +639,8 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) { struct coproc_params params = decode_64bit_hsr(vcpu); + vcpu->stat.cp15_64_exit_stat++; + return emulate_cp15(vcpu, ¶ms); } @@ -654,17 +658,28 @@ int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run) /* handled */ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); + vcpu->stat.cp14_64_exit_stat++; + return 1; } static void reset_coproc_regs(struct kvm_vcpu *vcpu, - const struct coproc_reg *table, size_t num) + const struct coproc_reg *table, size_t num, + unsigned long *bmap) { unsigned long i; for (i = 0; i < num; i++) - if (table[i].reset) + if (table[i].reset) { + int reg = table[i].reg; + table[i].reset(vcpu, &table[i]); + if (reg > 0 && reg < NR_CP15_REGS) { + set_bit(reg, bmap); + if (table[i].is_64bit) + set_bit(reg + 1, bmap); + } + } } static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu) @@ -692,6 +707,9 @@ static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu) int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) { struct coproc_params params = decode_32bit_hsr(vcpu); + + vcpu->stat.cp15_32_exit_stat++; + return emulate_cp15(vcpu, ¶ms); } @@ -709,6 +727,8 @@ int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run) /* handled */ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); + vcpu->stat.cp14_mr_exit_stat++; + return 1; } @@ -1439,17 +1459,15 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu) { size_t num; const struct coproc_reg *table; - - /* Catch someone adding a register without putting in reset entry. */ - memset(vcpu->arch.ctxt.cp15, 0x42, sizeof(vcpu->arch.ctxt.cp15)); + DECLARE_BITMAP(bmap, NR_CP15_REGS) = { 0, }; /* Generic chip reset first (so target could override). */ - reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs)); + reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs), bmap); table = get_target_table(vcpu->arch.target, &num); - reset_coproc_regs(vcpu, table, num); + reset_coproc_regs(vcpu, table, num, bmap); for (num = 1; num < NR_CP15_REGS; num++) - if (vcpu_cp15(vcpu, num) == 0x42424242) - panic("Didn't reset vcpu_cp15(vcpu, %zi)", num); + WARN(!test_bit(num, bmap), + "Didn't reset vcpu_cp15(vcpu, %zi)", num); } diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c index 910bd8dabb3c038932abec060bc66da8834aeb68..404c67a06a2fa1977e19d6b9269054a53efc95f1 100644 --- a/arch/arm/kvm/handle_exit.c +++ b/arch/arm/kvm/handle_exit.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include "trace.h" diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile index d2b5ec9c4b9293758626d35ce49b6ba226140b73..ba88b1eca93c4c638c0243acede841235e4c16d3 100644 --- a/arch/arm/kvm/hyp/Makefile +++ b/arch/arm/kvm/hyp/Makefile @@ -11,6 +11,7 @@ CFLAGS_ARMV7VE :=$(call cc-option, -march=armv7ve) obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o +obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/aarch32.o obj-$(CONFIG_KVM_ARM_HOST) += tlb.o obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o diff --git a/arch/arm/kvm/hyp/cp15-sr.c b/arch/arm/kvm/hyp/cp15-sr.c index c4782812714cf4cbdbd532c8b975cd5cd20ee4cf..8bf895ec6e04231f9849ef3ca4bf88d45eb3f74f 100644 --- a/arch/arm/kvm/hyp/cp15-sr.c +++ b/arch/arm/kvm/hyp/cp15-sr.c @@ -27,7 +27,6 @@ static u64 *cp15_64(struct kvm_cpu_context *ctxt, int idx) void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt) { - ctxt->cp15[c0_MPIDR] = read_sysreg(VMPIDR); ctxt->cp15[c0_CSSELR] = read_sysreg(CSSELR); ctxt->cp15[c1_SCTLR] = read_sysreg(SCTLR); ctxt->cp15[c1_CPACR] = read_sysreg(CPACR); diff --git a/arch/arm/kvm/hyp/tlb.c b/arch/arm/kvm/hyp/tlb.c index c0edd450e10459612e37cc292ad8585494d12773..e8c45d088d0b1c97f0b0fd3829780c3ee7badcfd 100644 --- a/arch/arm/kvm/hyp/tlb.c +++ b/arch/arm/kvm/hyp/tlb.c @@ -56,7 +56,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) __kvm_tlb_flush_vmid(kvm); } -void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) +void __hyp_text __kvm_flush_cpu_context(struct kvm_vcpu *vcpu) { struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm); diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c index 5ed0c3ee33d66b3c8263d28d28da756693b4c935..e53327912adc67e80a93f6f4130df5b81313f902 100644 --- a/arch/arm/kvm/reset.c +++ b/arch/arm/kvm/reset.c @@ -26,6 +26,7 @@ #include #include #include +#include #include @@ -69,6 +70,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) /* Reset CP15 registers */ kvm_reset_coprocs(vcpu); + /* + * Additional reset state handling that PSCI may have imposed on us. + * Must be done after all the sys_reg reset. + */ + if (READ_ONCE(vcpu->arch.reset_state.reset)) { + unsigned long target_pc = vcpu->arch.reset_state.pc; + + /* Gracefully handle Thumb2 entry point */ + if (target_pc & 1) { + target_pc &= ~1UL; + vcpu_set_thumb(vcpu); + } + + /* Propagate caller endianness */ + if (vcpu->arch.reset_state.be) + kvm_vcpu_set_be(vcpu); + + *vcpu_pc(vcpu) = target_pc; + vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0); + + vcpu->arch.reset_state.reset = false; + } + /* Reset arch_timer context */ return kvm_timer_vcpu_reset(vcpu); } diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile index ad25fd1872c7d7dd5c6c837e9f579b284dec2c15..0bff0176db2c4f1bb31dd9cdaa3b0eceaa26dcd9 100644 --- a/arch/arm/lib/Makefile +++ b/arch/arm/lib/Makefile @@ -39,7 +39,7 @@ $(obj)/csumpartialcopy.o: $(obj)/csumpartialcopygeneric.S $(obj)/csumpartialcopyuser.o: $(obj)/csumpartialcopygeneric.S ifeq ($(CONFIG_KERNEL_MODE_NEON),y) - NEON_FLAGS := -mfloat-abi=softfp -mfpu=neon + NEON_FLAGS := -march=armv7-a -mfloat-abi=softfp -mfpu=neon CFLAGS_xor-neon.o += $(NEON_FLAGS) obj-$(CONFIG_XOR_BLOCKS) += xor-neon.o endif diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S index a826df3d3814bfef44d6e6f71b38c8131d9fe25b..6709a8d33963b679b0a0f6ec683e3291a7f07a71 100644 --- a/arch/arm/lib/copy_from_user.S +++ b/arch/arm/lib/copy_from_user.S @@ -93,11 +93,7 @@ ENTRY(arm_copy_from_user) #ifdef CONFIG_CPU_SPECTRE get_thread_info r3 ldr r3, [r3, #TI_ADDR_LIMIT] - adds ip, r1, r2 @ ip=addr+size - sub r3, r3, #1 @ addr_limit - 1 - cmpcc ip, r3 @ if (addr+size > addr_limit - 1) - movcs r1, #0 @ addr = NULL - csdb + uaccess_mask_range_ptr r1, r2, r3, ip #endif #include "copy_template.S" diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S index caf5019d8161e2f1914a797a4c6800844a27d570..970abe521197fa1881473c2f7b1423bcea77227c 100644 --- a/arch/arm/lib/copy_to_user.S +++ b/arch/arm/lib/copy_to_user.S @@ -94,6 +94,11 @@ ENTRY(__copy_to_user_std) WEAK(arm_copy_to_user) +#ifdef CONFIG_CPU_SPECTRE + get_thread_info r3 + ldr r3, [r3, #TI_ADDR_LIMIT] + uaccess_mask_range_ptr r0, r2, r3, ip +#endif #include "copy_template.S" @@ -108,4 +113,3 @@ ENDPROC(__copy_to_user_std) rsb r0, r0, r2 copy_abort_end .popsection - diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S index 746e7801dcdf70fed9e339c2d6800b3f275c49b7..b2e4bc3a635e22002fd90eaf4d802b528f6dab32 100644 --- a/arch/arm/lib/getuser.S +++ b/arch/arm/lib/getuser.S @@ -42,6 +42,12 @@ _ASM_NOKPROBE(__get_user_1) ENTRY(__get_user_2) check_uaccess r0, 2, r1, r2, __get_user_bad +#if __LINUX_ARM_ARCH__ >= 6 + +2: TUSER(ldrh) r2, [r0] + +#else + #ifdef CONFIG_CPU_USE_DOMAINS rb .req ip 2: ldrbt r2, [r0], #1 @@ -56,6 +62,9 @@ rb .req r0 #else orr r2, rb, r2, lsl #8 #endif + +#endif /* __LINUX_ARM_ARCH__ >= 6 */ + mov r0, #0 ret lr ENDPROC(__get_user_2) @@ -145,7 +154,9 @@ _ASM_NOKPROBE(__get_user_bad8) .pushsection __ex_table, "a" .long 1b, __get_user_bad .long 2b, __get_user_bad +#if __LINUX_ARM_ARCH__ < 6 .long 3b, __get_user_bad +#endif .long 4b, __get_user_bad .long 5b, __get_user_bad8 .long 6b, __get_user_bad8 diff --git a/arch/arm/lib/putuser.S b/arch/arm/lib/putuser.S index 38d660d3705f4f259c5299d2cc8c1126f0a1dbb4..515eeaa9975c6cbf75b8aba01d85df7deb063b25 100644 --- a/arch/arm/lib/putuser.S +++ b/arch/arm/lib/putuser.S @@ -41,16 +41,13 @@ ENDPROC(__put_user_1) ENTRY(__put_user_2) check_uaccess r0, 2, r1, ip, __put_user_bad - mov ip, r2, lsr #8 -#ifdef CONFIG_THUMB2_KERNEL -#ifndef __ARMEB__ -2: TUSER(strb) r2, [r0] -3: TUSER(strb) ip, [r0, #1] +#if __LINUX_ARM_ARCH__ >= 6 + +2: TUSER(strh) r2, [r0] + #else -2: TUSER(strb) ip, [r0] -3: TUSER(strb) r2, [r0, #1] -#endif -#else /* !CONFIG_THUMB2_KERNEL */ + + mov ip, r2, lsr #8 #ifndef __ARMEB__ 2: TUSER(strb) r2, [r0], #1 3: TUSER(strb) ip, [r0] @@ -58,7 +55,8 @@ ENTRY(__put_user_2) 2: TUSER(strb) ip, [r0], #1 3: TUSER(strb) r2, [r0] #endif -#endif /* CONFIG_THUMB2_KERNEL */ + +#endif /* __LINUX_ARM_ARCH__ >= 6 */ mov r0, #0 ret lr ENDPROC(__put_user_2) @@ -91,7 +89,9 @@ ENDPROC(__put_user_bad) .pushsection __ex_table, "a" .long 1b, __put_user_bad .long 2b, __put_user_bad +#if __LINUX_ARM_ARCH__ < 6 .long 3b, __put_user_bad +#endif .long 4b, __put_user_bad .long 5b, __put_user_bad .long 6b, __put_user_bad diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c index 9b4ed172861615fa94f628fb3eea067176ef3bae..73dc7360cbdd58c76e7e230417e83d902d3a2a3b 100644 --- a/arch/arm/lib/uaccess_with_memcpy.c +++ b/arch/arm/lib/uaccess_with_memcpy.c @@ -152,7 +152,8 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n) n = __copy_to_user_std(to, from, n); uaccess_restore(ua_flags); } else { - n = __copy_to_user_memcpy(to, from, n); + n = __copy_to_user_memcpy(uaccess_mask_range_ptr(to, n), + from, n); } return n; } diff --git a/arch/arm/lib/xor-neon.c b/arch/arm/lib/xor-neon.c index 2c40aeab3eaae8cb038a283b6fa2dc422d744d08..c691b901092f55a8f251c186a6938ba19d79f6ec 100644 --- a/arch/arm/lib/xor-neon.c +++ b/arch/arm/lib/xor-neon.c @@ -14,7 +14,7 @@ MODULE_LICENSE("GPL"); #ifndef __ARM_NEON__ -#error You should compile this file with '-mfloat-abi=softfp -mfpu=neon' +#error You should compile this file with '-march=armv7-a -mfloat-abi=softfp -mfpu=neon' #endif /* diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index 32fae4dbd63bac62391928de43831520c0bb39bc..e2e4df3d11e53dfc38d8456340efc6d109c417a6 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c @@ -143,15 +143,15 @@ static int at91_pm_config_ws(unsigned int pm_mode, bool set) /* Check if enabled on SHDWC. */ if (wsi->shdwc_mr_bit && !(val & wsi->shdwc_mr_bit)) - goto put_node; + goto put_device; mode |= wsi->pmc_fsmr_bit; if (wsi->set_polarity) polarity |= wsi->pmc_fsmr_bit; } -put_node: - of_node_put(np); +put_device: + put_device(&pdev->dev); } if (mode) { @@ -594,13 +594,13 @@ static int __init at91_pm_backup_init(void) np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-securam"); if (!np) - goto securam_fail; + goto securam_fail_no_ref_dev; pdev = of_find_device_by_node(np); of_node_put(np); if (!pdev) { pr_warn("%s: failed to find securam device!\n", __func__); - goto securam_fail; + goto securam_fail_no_ref_dev; } sram_pool = gen_pool_get(&pdev->dev, NULL); @@ -623,6 +623,8 @@ static int __init at91_pm_backup_init(void) return 0; securam_fail: + put_device(&pdev->dev); +securam_fail_no_ref_dev: iounmap(pm_data.sfrbu); pm_data.sfrbu = NULL; return ret; diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c index 318394ed5c7a97c2923c8c35134b88cb188ef238..5e11ad3164e08928bbfac37bb583312b2345ea77 100644 --- a/arch/arm/mach-cns3xxx/pcie.c +++ b/arch/arm/mach-cns3xxx/pcie.c @@ -83,7 +83,7 @@ static void __iomem *cns3xxx_pci_map_bus(struct pci_bus *bus, } else /* remote PCI bus */ base = cnspci->cfg1_regs + ((busno & 0xf) << 20); - return base + (where & 0xffc) + (devfn << 12); + return base + where + (devfn << 12); } static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn, diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c index e1a949b47306d2eaf93a14b76e0ac720373e1c10..774a3e535ad0874779e4e46cb4d5a102e0fcde6c 100644 --- a/arch/arm/mach-davinci/board-da850-evm.c +++ b/arch/arm/mach-davinci/board-da850-evm.c @@ -1472,6 +1472,8 @@ static __init void da850_evm_init(void) if (ret) pr_warn("%s: dsp/rproc registration failed: %d\n", __func__, ret); + + regulator_has_full_constraints(); } #ifdef CONFIG_SERIAL_8250_CONSOLE diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c index 1fd3619f6a09f1311eeb8153ab627cedd80d3149..708931b470909fa12a8e6a3c0877321cb614c81c 100644 --- a/arch/arm/mach-davinci/devices-da8xx.c +++ b/arch/arm/mach-davinci/devices-da8xx.c @@ -685,6 +685,9 @@ static struct platform_device da8xx_lcdc_device = { .id = 0, .num_resources = ARRAY_SIZE(da8xx_lcdc_resources), .resource = da8xx_lcdc_resources, + .dev = { + .coherent_dma_mask = DMA_BIT_MASK(32), + } }; int __init da8xx_register_lcdc(struct da8xx_lcdc_platform_data *pdata) @@ -701,6 +704,46 @@ static struct resource da8xx_gpio_resources[] = { }, { /* interrupt */ .start = IRQ_DA8XX_GPIO0, + .end = IRQ_DA8XX_GPIO0, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_DA8XX_GPIO1, + .end = IRQ_DA8XX_GPIO1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_DA8XX_GPIO2, + .end = IRQ_DA8XX_GPIO2, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_DA8XX_GPIO3, + .end = IRQ_DA8XX_GPIO3, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_DA8XX_GPIO4, + .end = IRQ_DA8XX_GPIO4, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_DA8XX_GPIO5, + .end = IRQ_DA8XX_GPIO5, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_DA8XX_GPIO6, + .end = IRQ_DA8XX_GPIO6, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_DA8XX_GPIO7, + .end = IRQ_DA8XX_GPIO7, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_DA8XX_GPIO8, .end = IRQ_DA8XX_GPIO8, .flags = IORESOURCE_IRQ, }, diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c index 9f7d38d12c8886134a0b4d149b6593bf228efc4a..2b0f5d97ab7c1acd694c0edab73120ff408f7338 100644 --- a/arch/arm/mach-davinci/dm355.c +++ b/arch/arm/mach-davinci/dm355.c @@ -548,6 +548,36 @@ static struct resource dm355_gpio_resources[] = { }, { /* interrupt */ .start = IRQ_DM355_GPIOBNK0, + .end = IRQ_DM355_GPIOBNK0, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_DM355_GPIOBNK1, + .end = IRQ_DM355_GPIOBNK1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_DM355_GPIOBNK2, + .end = IRQ_DM355_GPIOBNK2, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_DM355_GPIOBNK3, + .end = IRQ_DM355_GPIOBNK3, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_DM355_GPIOBNK4, + .end = IRQ_DM355_GPIOBNK4, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_DM355_GPIOBNK5, + .end = IRQ_DM355_GPIOBNK5, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_DM355_GPIOBNK6, .end = IRQ_DM355_GPIOBNK6, .flags = IORESOURCE_IRQ, }, diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c index abcf2a5ed89b5e4780430911b1ef584711cf0142..83ca89a353002a9a02955bc8e1410c8b55edc461 100644 --- a/arch/arm/mach-davinci/dm365.c +++ b/arch/arm/mach-davinci/dm365.c @@ -267,6 +267,41 @@ static struct resource dm365_gpio_resources[] = { }, { /* interrupt */ .start = IRQ_DM365_GPIO0, + .end = IRQ_DM365_GPIO0, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_DM365_GPIO1, + .end = IRQ_DM365_GPIO1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_DM365_GPIO2, + .end = IRQ_DM365_GPIO2, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_DM365_GPIO3, + .end = IRQ_DM365_GPIO3, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_DM365_GPIO4, + .end = IRQ_DM365_GPIO4, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_DM365_GPIO5, + .end = IRQ_DM365_GPIO5, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_DM365_GPIO6, + .end = IRQ_DM365_GPIO6, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_DM365_GPIO7, .end = IRQ_DM365_GPIO7, .flags = IORESOURCE_IRQ, }, @@ -423,8 +458,8 @@ static s8 dm365_queue_priority_mapping[][2] = { }; static const struct dma_slave_map dm365_edma_map[] = { - { "davinci-mcbsp.0", "tx", EDMA_FILTER_PARAM(0, 2) }, - { "davinci-mcbsp.0", "rx", EDMA_FILTER_PARAM(0, 3) }, + { "davinci-mcbsp", "tx", EDMA_FILTER_PARAM(0, 2) }, + { "davinci-mcbsp", "rx", EDMA_FILTER_PARAM(0, 3) }, { "davinci_voicecodec", "tx", EDMA_FILTER_PARAM(0, 2) }, { "davinci_voicecodec", "rx", EDMA_FILTER_PARAM(0, 3) }, { "spi_davinci.2", "tx", EDMA_FILTER_PARAM(0, 10) }, diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c index 0720da7809a693eee06c22a80b0449a1cad06e17..de1ec6dc01e949cf1ee27fe89a375424f5f545f8 100644 --- a/arch/arm/mach-davinci/dm644x.c +++ b/arch/arm/mach-davinci/dm644x.c @@ -492,6 +492,26 @@ static struct resource dm644_gpio_resources[] = { }, { /* interrupt */ .start = IRQ_GPIOBNK0, + .end = IRQ_GPIOBNK0, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_GPIOBNK1, + .end = IRQ_GPIOBNK1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_GPIOBNK2, + .end = IRQ_GPIOBNK2, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_GPIOBNK3, + .end = IRQ_GPIOBNK3, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_GPIOBNK4, .end = IRQ_GPIOBNK4, .flags = IORESOURCE_IRQ, }, diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c index 6bd2ed069d0d7491a28b5af9665838e030e41ed3..d9b93e2806d222e7959a9d2b626e192205e60c2d 100644 --- a/arch/arm/mach-davinci/dm646x.c +++ b/arch/arm/mach-davinci/dm646x.c @@ -442,6 +442,16 @@ static struct resource dm646x_gpio_resources[] = { }, { /* interrupt */ .start = IRQ_DM646X_GPIOBNK0, + .end = IRQ_DM646X_GPIOBNK0, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_DM646X_GPIOBNK1, + .end = IRQ_DM646X_GPIOBNK1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_DM646X_GPIOBNK2, .end = IRQ_DM646X_GPIOBNK2, .flags = IORESOURCE_IRQ, }, diff --git a/arch/arm/mach-davinci/sleep.S b/arch/arm/mach-davinci/sleep.S index cd350dee4df376a3452299df86ba53815b50649c..efcd400b2abb3a876d9b36a7918ff6d0d3bf93cd 100644 --- a/arch/arm/mach-davinci/sleep.S +++ b/arch/arm/mach-davinci/sleep.S @@ -37,6 +37,7 @@ #define DEEPSLEEP_SLEEPENABLE_BIT BIT(31) .text + .arch armv5te /* * Move DaVinci into deep sleep state * diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c index faf48a3b1fea913813681430bc55f3697a2c0028..b82afe4d26fbbc9c4b9f37a660154b2b4219c5c1 100644 --- a/arch/arm/mach-ep93xx/core.c +++ b/arch/arm/mach-ep93xx/core.c @@ -330,6 +330,7 @@ static struct gpiod_lookup_table ep93xx_i2c_gpiod_table = { GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), GPIO_LOOKUP_IDX("G", 0, NULL, 1, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), + { } }, }; diff --git a/arch/arm/mach-exynos/firmware.c b/arch/arm/mach-exynos/firmware.c index be1f20fe28f448bee2b03c35dcd30d726ec34a69..fbe1db61115fa38ccdb21129d09b3f2cad54bc44 100644 --- a/arch/arm/mach-exynos/firmware.c +++ b/arch/arm/mach-exynos/firmware.c @@ -196,6 +196,7 @@ void __init exynos_firmware_init(void) return; addr = of_get_address(nd, 0, NULL, NULL); + of_node_put(nd); if (!addr) { pr_err("%s: No address specified.\n", __func__); return; diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c index 7ead3acd6fa4d4f14463cb3606222894dbc991c4..088c34e99b02ff2ade29a74b1db899cb136959d5 100644 --- a/arch/arm/mach-exynos/suspend.c +++ b/arch/arm/mach-exynos/suspend.c @@ -434,8 +434,27 @@ static void exynos3250_pm_resume(void) static void exynos5420_prepare_pm_resume(void) { + unsigned int mpidr, cluster; + + mpidr = read_cpuid_mpidr(); + cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); + if (IS_ENABLED(CONFIG_EXYNOS5420_MCPM)) WARN_ON(mcpm_cpu_powered_up()); + + if (IS_ENABLED(CONFIG_HW_PERF_EVENTS) && cluster != 0) { + /* + * When system is resumed on the LITTLE/KFC core (cluster 1), + * the DSCR is not properly updated until the power is turned + * on also for the cluster 0. Enable it for a while to + * propagate the SPNIDEN and SPIDEN signals from Secure JTAG + * block and avoid undefined instruction issue on CP14 reset. + */ + pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN, + EXYNOS_COMMON_CONFIGURATION(0)); + pmu_raw_writel(0, + EXYNOS_COMMON_CONFIGURATION(0)); + } } static void exynos5420_pm_resume(void) @@ -639,8 +658,10 @@ void __init exynos_pm_init(void) if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL))) { pr_warn("Outdated DT detected, suspend/resume will NOT work\n"); + of_node_put(np); return; } + of_node_put(np); pm_data = (const struct exynos_pm_data *) match->data; diff --git a/arch/arm/mach-footbridge/Kconfig b/arch/arm/mach-footbridge/Kconfig index cbbdd84cf49ad0f70b464f79a336f07f24f5da82..84c400f96aa21162535e705d2a9454cb368da983 100644 --- a/arch/arm/mach-footbridge/Kconfig +++ b/arch/arm/mach-footbridge/Kconfig @@ -15,27 +15,6 @@ config ARCH_CATS Saying N will reduce the size of the Footbridge kernel. -config ARCH_PERSONAL_SERVER - bool "Compaq Personal Server" - select FOOTBRIDGE_HOST - select ISA - select ISA_DMA - select PCI - ---help--- - Say Y here if you intend to run this kernel on the Compaq - Personal Server. - - Saying N will reduce the size of the Footbridge kernel. - - The Compaq Personal Server is not available for purchase. - There are no product plans beyond the current research - prototypes at this time. Information is available at: - - - - If you have any questions or comments about the Compaq Personal - Server, send e-mail to . - config ARCH_EBSA285_ADDIN bool "EBSA285 (addin mode)" select ARCH_EBSA285 diff --git a/arch/arm/mach-footbridge/Makefile b/arch/arm/mach-footbridge/Makefile index a09f1041f14134fe7eb4918059089818e9cb0848..6262993c05558592ff9c3d6467bb6bd4f950b6e3 100644 --- a/arch/arm/mach-footbridge/Makefile +++ b/arch/arm/mach-footbridge/Makefile @@ -11,12 +11,10 @@ pci-y += dc21285.o pci-$(CONFIG_ARCH_CATS) += cats-pci.o pci-$(CONFIG_ARCH_EBSA285_HOST) += ebsa285-pci.o pci-$(CONFIG_ARCH_NETWINDER) += netwinder-pci.o -pci-$(CONFIG_ARCH_PERSONAL_SERVER) += personal-pci.o obj-$(CONFIG_ARCH_CATS) += cats-hw.o isa-timer.o obj-$(CONFIG_ARCH_EBSA285) += ebsa285.o dc21285-timer.o obj-$(CONFIG_ARCH_NETWINDER) += netwinder-hw.o isa-timer.o -obj-$(CONFIG_ARCH_PERSONAL_SERVER) += personal.o dc21285-timer.o obj-$(CONFIG_PCI) +=$(pci-y) diff --git a/arch/arm/mach-footbridge/cats-pci.c b/arch/arm/mach-footbridge/cats-pci.c index 0b2fd7e2e9b429fd40ecef879a76f8f7cbb3bacb..90b1e9be430e97e779e63a506174de7c26ad4bd5 100644 --- a/arch/arm/mach-footbridge/cats-pci.c +++ b/arch/arm/mach-footbridge/cats-pci.c @@ -15,14 +15,14 @@ #include /* cats host-specific stuff */ -static int irqmap_cats[] __initdata = { IRQ_PCI, IRQ_IN0, IRQ_IN1, IRQ_IN3 }; +static int irqmap_cats[] = { IRQ_PCI, IRQ_IN0, IRQ_IN1, IRQ_IN3 }; static u8 cats_no_swizzle(struct pci_dev *dev, u8 *pin) { return 0; } -static int __init cats_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +static int cats_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { if (dev->irq >= 255) return -1; /* not a valid interrupt. */ diff --git a/arch/arm/mach-footbridge/ebsa285-pci.c b/arch/arm/mach-footbridge/ebsa285-pci.c index 6f28aaa9ca79b2c849b892535c192cb3fc290f5f..c3f280d08fa7fde7fa55c35a8b71908744adc95b 100644 --- a/arch/arm/mach-footbridge/ebsa285-pci.c +++ b/arch/arm/mach-footbridge/ebsa285-pci.c @@ -14,9 +14,9 @@ #include #include -static int irqmap_ebsa285[] __initdata = { IRQ_IN3, IRQ_IN1, IRQ_IN0, IRQ_PCI }; +static int irqmap_ebsa285[] = { IRQ_IN3, IRQ_IN1, IRQ_IN0, IRQ_PCI }; -static int __init ebsa285_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +static int ebsa285_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { if (dev->vendor == PCI_VENDOR_ID_CONTAQ && dev->device == PCI_DEVICE_ID_CONTAQ_82C693) diff --git a/arch/arm/mach-footbridge/netwinder-pci.c b/arch/arm/mach-footbridge/netwinder-pci.c index 9473aa0305e5f77883bb65e1f343ce7a8b9452e1..e8304392074b845d8ddfbbab20aac596ca7ba067 100644 --- a/arch/arm/mach-footbridge/netwinder-pci.c +++ b/arch/arm/mach-footbridge/netwinder-pci.c @@ -18,7 +18,7 @@ * We now use the slot ID instead of the device identifiers to select * which interrupt is routed where. */ -static int __init netwinder_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +static int netwinder_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { switch (slot) { case 0: /* host bridge */ diff --git a/arch/arm/mach-footbridge/personal-pci.c b/arch/arm/mach-footbridge/personal-pci.c deleted file mode 100644 index 4391e433a4b2fc3f9bd0843fe1a4a68077818809..0000000000000000000000000000000000000000 --- a/arch/arm/mach-footbridge/personal-pci.c +++ /dev/null @@ -1,58 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * linux/arch/arm/mach-footbridge/personal-pci.c - * - * PCI bios-type initialisation for PCI machines - * - * Bits taken from various places. - */ -#include -#include -#include - -#include -#include -#include - -static int irqmap_personal_server[] __initdata = { - IRQ_IN0, IRQ_IN1, IRQ_IN2, IRQ_IN3, 0, 0, 0, - IRQ_DOORBELLHOST, IRQ_DMA1, IRQ_DMA2, IRQ_PCI -}; - -static int __init personal_server_map_irq(const struct pci_dev *dev, u8 slot, - u8 pin) -{ - unsigned char line; - - pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &line); - - if (line > 0x40 && line <= 0x5f) { - /* line corresponds to the bit controlling this interrupt - * in the footbridge. Ignore the first 8 interrupt bits, - * look up the rest in the map. IN0 is bit number 8 - */ - return irqmap_personal_server[(line & 0x1f) - 8]; - } else if (line == 0) { - /* no interrupt */ - return 0; - } else - return irqmap_personal_server[(line - 1) & 3]; -} - -static struct hw_pci personal_server_pci __initdata = { - .map_irq = personal_server_map_irq, - .nr_controllers = 1, - .ops = &dc21285_ops, - .setup = dc21285_setup, - .preinit = dc21285_preinit, - .postinit = dc21285_postinit, -}; - -static int __init personal_pci_init(void) -{ - if (machine_is_personal_server()) - pci_common_init(&personal_server_pci); - return 0; -} - -subsys_initcall(personal_pci_init); diff --git a/arch/arm/mach-footbridge/personal.c b/arch/arm/mach-footbridge/personal.c deleted file mode 100644 index ca715754fc0077044601361ce5594fc0a8fb49de..0000000000000000000000000000000000000000 --- a/arch/arm/mach-footbridge/personal.c +++ /dev/null @@ -1,25 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * linux/arch/arm/mach-footbridge/personal.c - * - * Personal server (Skiff) machine fixup - */ -#include -#include - -#include -#include - -#include - -#include "common.h" - -MACHINE_START(PERSONAL_SERVER, "Compaq-PersonalServer") - /* Maintainer: Jamey Hicks / George France */ - .atag_offset = 0x100, - .map_io = footbridge_map_io, - .init_irq = footbridge_init_irq, - .init_time = footbridge_timer_init, - .restart = footbridge_restart, -MACHINE_END - diff --git a/arch/arm/mach-imx/cpuidle-imx6q.c b/arch/arm/mach-imx/cpuidle-imx6q.c index bfeb25aaf9a2a7a48857a3896fb682d7d94568a8..326e870d712394fad445033defd8e3ff5975ebdd 100644 --- a/arch/arm/mach-imx/cpuidle-imx6q.c +++ b/arch/arm/mach-imx/cpuidle-imx6q.c @@ -16,30 +16,23 @@ #include "cpuidle.h" #include "hardware.h" -static atomic_t master = ATOMIC_INIT(0); -static DEFINE_SPINLOCK(master_lock); +static int num_idle_cpus = 0; +static DEFINE_SPINLOCK(cpuidle_lock); static int imx6q_enter_wait(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { - if (atomic_inc_return(&master) == num_online_cpus()) { - /* - * With this lock, we prevent other cpu to exit and enter - * this function again and become the master. - */ - if (!spin_trylock(&master_lock)) - goto idle; + spin_lock(&cpuidle_lock); + if (++num_idle_cpus == num_online_cpus()) imx6_set_lpm(WAIT_UNCLOCKED); - cpu_do_idle(); - imx6_set_lpm(WAIT_CLOCKED); - spin_unlock(&master_lock); - goto done; - } + spin_unlock(&cpuidle_lock); -idle: cpu_do_idle(); -done: - atomic_dec(&master); + + spin_lock(&cpuidle_lock); + if (num_idle_cpus-- == num_online_cpus()) + imx6_set_lpm(WAIT_CLOCKED); + spin_unlock(&cpuidle_lock); return index; } diff --git a/arch/arm/mach-imx/cpuidle-imx6sx.c b/arch/arm/mach-imx/cpuidle-imx6sx.c index 243a108a940b46c9c0d9b13d2802beb11f84d2d9..3708a71f30e62bce60661e9527d22fd307838c60 100644 --- a/arch/arm/mach-imx/cpuidle-imx6sx.c +++ b/arch/arm/mach-imx/cpuidle-imx6sx.c @@ -15,6 +15,7 @@ #include "common.h" #include "cpuidle.h" +#include "hardware.h" static int imx6sx_idle_finish(unsigned long val) { @@ -110,7 +111,7 @@ int __init imx6sx_cpuidle_init(void) * except for power up sw2iso which need to be * larger than LDO ramp up time. */ - imx_gpc_set_arm_power_up_timing(2, 1); + imx_gpc_set_arm_power_up_timing(cpu_is_imx6sx() ? 0xf : 0x2, 1); imx_gpc_set_arm_power_down_timing(1, 1); return cpuidle_register(&imx6sx_cpuidle_driver, NULL); diff --git a/arch/arm/mach-imx/mach-imx51.c b/arch/arm/mach-imx/mach-imx51.c index c7169c2f94c4fd8cc018caa790c7b170e778eaf3..08c7892866c2df48732d15b9aa64329d0b009b75 100644 --- a/arch/arm/mach-imx/mach-imx51.c +++ b/arch/arm/mach-imx/mach-imx51.c @@ -59,6 +59,7 @@ static void __init imx51_m4if_setup(void) return; m4if_base = of_iomap(np, 0); + of_node_put(np); if (!m4if_base) { pr_err("Unable to map M4IF registers\n"); return; diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c index b08e407d8d96f4f7a61d107ee877a64571a8acaf..529f4b5bbd3a7addfbe7492d3b83d516b96fd567 100644 --- a/arch/arm/mach-imx/pm-imx6.c +++ b/arch/arm/mach-imx/pm-imx6.c @@ -618,6 +618,28 @@ static void __init imx6_pm_common_init(const struct imx6_pm_socdata IMX6Q_GPR1_GINT); } +static void imx6_pm_stby_poweroff(void) +{ + imx6_set_lpm(STOP_POWER_OFF); + imx6q_suspend_finish(0); + + mdelay(1000); + + pr_emerg("Unable to poweroff system\n"); +} + +static int imx6_pm_stby_poweroff_probe(void) +{ + if (pm_power_off) { + pr_warn("%s: pm_power_off already claimed %p %pf!\n", + __func__, pm_power_off, pm_power_off); + return -EBUSY; + } + + pm_power_off = imx6_pm_stby_poweroff; + return 0; +} + void __init imx6_pm_ccm_init(const char *ccm_compat) { struct device_node *np; @@ -634,6 +656,9 @@ void __init imx6_pm_ccm_init(const char *ccm_compat) val = readl_relaxed(ccm_base + CLPCR); val &= ~BM_CLPCR_LPM; writel_relaxed(val, ccm_base + CLPCR); + + if (of_property_read_bool(np, "fsl,pmic-stby-poweroff")) + imx6_pm_stby_poweroff_probe(); } void __init imx6q_pm_init(void) diff --git a/arch/arm/mach-integrator/impd1.c b/arch/arm/mach-integrator/impd1.c index a109f6482413660bf455e9573d8e59d4e7a8a0e7..0f916c245a2e9c75c97f42e3590e21b675f0a614 100644 --- a/arch/arm/mach-integrator/impd1.c +++ b/arch/arm/mach-integrator/impd1.c @@ -393,7 +393,11 @@ static int __ref impd1_probe(struct lm_device *dev) sizeof(*lookup) + 3 * sizeof(struct gpiod_lookup), GFP_KERNEL); chipname = devm_kstrdup(&dev->dev, devname, GFP_KERNEL); - mmciname = kasprintf(GFP_KERNEL, "lm%x:00700", dev->id); + mmciname = devm_kasprintf(&dev->dev, GFP_KERNEL, + "lm%x:00700", dev->id); + if (!lookup || !chipname || !mmciname) + return -ENOMEM; + lookup->dev_id = mmciname; /* * Offsets on GPIO block 1: diff --git a/arch/arm/mach-iop13xx/setup.c b/arch/arm/mach-iop13xx/setup.c index 53c316f7301e69fcbebbfe5d73bb48664180f5b6..fe4932fda01d7d0bc819c0ca4e6dcedb6b061081 100644 --- a/arch/arm/mach-iop13xx/setup.c +++ b/arch/arm/mach-iop13xx/setup.c @@ -300,7 +300,7 @@ static struct resource iop13xx_adma_2_resources[] = { } }; -static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(64); +static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(32); static struct iop_adma_platform_data iop13xx_adma_0_data = { .hw_id = 0, .pool_size = PAGE_SIZE, @@ -324,7 +324,7 @@ static struct platform_device iop13xx_adma_0_channel = { .resource = iop13xx_adma_0_resources, .dev = { .dma_mask = &iop13xx_adma_dmamask, - .coherent_dma_mask = DMA_BIT_MASK(64), + .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = (void *) &iop13xx_adma_0_data, }, }; @@ -336,7 +336,7 @@ static struct platform_device iop13xx_adma_1_channel = { .resource = iop13xx_adma_1_resources, .dev = { .dma_mask = &iop13xx_adma_dmamask, - .coherent_dma_mask = DMA_BIT_MASK(64), + .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = (void *) &iop13xx_adma_1_data, }, }; @@ -348,7 +348,7 @@ static struct platform_device iop13xx_adma_2_channel = { .resource = iop13xx_adma_2_resources, .dev = { .dma_mask = &iop13xx_adma_dmamask, - .coherent_dma_mask = DMA_BIT_MASK(64), + .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = (void *) &iop13xx_adma_2_data, }, }; diff --git a/arch/arm/mach-iop13xx/tpmi.c b/arch/arm/mach-iop13xx/tpmi.c index db511ec2b1df6824cb6d3d24659cfebe2428d5ec..116feb6b261eb7b0e08ee7ce248e44682e537898 100644 --- a/arch/arm/mach-iop13xx/tpmi.c +++ b/arch/arm/mach-iop13xx/tpmi.c @@ -152,7 +152,7 @@ static struct resource iop13xx_tpmi_3_resources[] = { } }; -u64 iop13xx_tpmi_mask = DMA_BIT_MASK(64); +u64 iop13xx_tpmi_mask = DMA_BIT_MASK(32); static struct platform_device iop13xx_tpmi_0_device = { .name = "iop-tpmi", .id = 0, @@ -160,7 +160,7 @@ static struct platform_device iop13xx_tpmi_0_device = { .resource = iop13xx_tpmi_0_resources, .dev = { .dma_mask = &iop13xx_tpmi_mask, - .coherent_dma_mask = DMA_BIT_MASK(64), + .coherent_dma_mask = DMA_BIT_MASK(32), }, }; @@ -171,7 +171,7 @@ static struct platform_device iop13xx_tpmi_1_device = { .resource = iop13xx_tpmi_1_resources, .dev = { .dma_mask = &iop13xx_tpmi_mask, - .coherent_dma_mask = DMA_BIT_MASK(64), + .coherent_dma_mask = DMA_BIT_MASK(32), }, }; @@ -182,7 +182,7 @@ static struct platform_device iop13xx_tpmi_2_device = { .resource = iop13xx_tpmi_2_resources, .dev = { .dma_mask = &iop13xx_tpmi_mask, - .coherent_dma_mask = DMA_BIT_MASK(64), + .coherent_dma_mask = DMA_BIT_MASK(32), }, }; @@ -193,7 +193,7 @@ static struct platform_device iop13xx_tpmi_3_device = { .resource = iop13xx_tpmi_3_resources, .dev = { .dma_mask = &iop13xx_tpmi_mask, - .coherent_dma_mask = DMA_BIT_MASK(64), + .coherent_dma_mask = DMA_BIT_MASK(32), }, }; diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c index 3b73813c6b0434f93c85bf4256ae70049de8ae2c..23e8c93515d4ec5d0648a5be2f1a900e0f97ca8d 100644 --- a/arch/arm/mach-iop32x/n2100.c +++ b/arch/arm/mach-iop32x/n2100.c @@ -75,8 +75,7 @@ void __init n2100_map_io(void) /* * N2100 PCI. */ -static int __init -n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +static int n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int irq; diff --git a/arch/arm/mach-ks8695/board-acs5k.c b/arch/arm/mach-ks8695/board-acs5k.c index ef835d82cdb95ecb7f43d36a80f1dc7c9870a5b6..5783062224c393254c82ee45807e90c486ed95bb 100644 --- a/arch/arm/mach-ks8695/board-acs5k.c +++ b/arch/arm/mach-ks8695/board-acs5k.c @@ -100,7 +100,7 @@ static struct i2c_board_info acs5k_i2c_devs[] __initdata = { }, }; -static void acs5k_i2c_init(void) +static void __init acs5k_i2c_init(void) { /* The gpio interface */ gpiod_add_lookup_table(&acs5k_i2c_gpiod_table); diff --git a/arch/arm/mach-mmp/cputype.h b/arch/arm/mach-mmp/cputype.h index 446edaeb78a71d07a8c719732455589ffa67b49e..a96abcf521b4b095a13658e51f409884d89b35b5 100644 --- a/arch/arm/mach-mmp/cputype.h +++ b/arch/arm/mach-mmp/cputype.h @@ -44,10 +44,12 @@ static inline int cpu_is_pxa910(void) #define cpu_is_pxa910() (0) #endif -#ifdef CONFIG_CPU_MMP2 +#if defined(CONFIG_CPU_MMP2) || defined(CONFIG_MACH_MMP2_DT) static inline int cpu_is_mmp2(void) { - return (((read_cpuid_id() >> 8) & 0xff) == 0x58); + return (((read_cpuid_id() >> 8) & 0xff) == 0x58) && + (((mmp_chip_id & 0xfff) == 0x410) || + ((mmp_chip_id & 0xfff) == 0x610)); } #else #define cpu_is_mmp2() (0) diff --git a/arch/arm/mach-omap1/Makefile b/arch/arm/mach-omap1/Makefile index e8ccf51c6f292959c4f373d7a802c19ddf278c09..ec0235899de20e63eb4d351775a1675817aafb6a 100644 --- a/arch/arm/mach-omap1/Makefile +++ b/arch/arm/mach-omap1/Makefile @@ -25,7 +25,7 @@ obj-y += $(i2c-omap-m) $(i2c-omap-y) led-y := leds.o -usb-fs-$(CONFIG_USB) := usb.o +usb-fs-$(CONFIG_USB_SUPPORT) := usb.o obj-y += $(usb-fs-m) $(usb-fs-y) # Specific board support diff --git a/arch/arm/mach-omap1/ams-delta-fiq-handler.S b/arch/arm/mach-omap1/ams-delta-fiq-handler.S index ddc27638ba2a5e7807b9a904df874c5e913ef812..017c792be0a076469fa9231039f6115e91a20ed6 100644 --- a/arch/arm/mach-omap1/ams-delta-fiq-handler.S +++ b/arch/arm/mach-omap1/ams-delta-fiq-handler.S @@ -135,6 +135,8 @@ restart: orr r11, r11, r13 @ mask all requested interrupts str r11, [r12, #OMAP1510_GPIO_INT_MASK] + str r13, [r12, #OMAP1510_GPIO_INT_STATUS] @ ack all requested interrupts + ands r10, r13, #KEYBRD_CLK_MASK @ extract keyboard status - set? beq hksw @ no - try next source @@ -142,7 +144,6 @@ restart: @@@@@@@@@@@@@@@@@@@@@@ @ Keyboard clock FIQ mode interrupt handler @ r10 now contains KEYBRD_CLK_MASK, use it - str r10, [r12, #OMAP1510_GPIO_INT_STATUS] @ ack the interrupt bic r11, r11, r10 @ unmask it str r11, [r12, #OMAP1510_GPIO_INT_MASK] diff --git a/arch/arm/mach-omap1/ams-delta-fiq.c b/arch/arm/mach-omap1/ams-delta-fiq.c index b0dc7ddf5877d70eeda21df28b331acdf99a4cdb..b8ba763fe10863293378ba15d17744e44fdeb589 100644 --- a/arch/arm/mach-omap1/ams-delta-fiq.c +++ b/arch/arm/mach-omap1/ams-delta-fiq.c @@ -73,9 +73,7 @@ static irqreturn_t deferred_fiq(int irq, void *dev_id) * interrupts default to since commit 80ac93c27441 * requires interrupt already acked and unmasked. */ - if (irq_chip->irq_ack) - irq_chip->irq_ack(d); - if (irq_chip->irq_unmask) + if (!WARN_ON_ONCE(!irq_chip->irq_unmask)) irq_chip->irq_unmask(d); } for (; irq_counter[gpio] < fiq_count; irq_counter[gpio]++) diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c index dd28d2614d7fecc13a49bd06a2d437ec2c88c70c..d10d8831f5274bb1ed8ad6021a84286b4764e4af 100644 --- a/arch/arm/mach-omap1/board-ams-delta.c +++ b/arch/arm/mach-omap1/board-ams-delta.c @@ -726,6 +726,9 @@ static void modem_pm(struct uart_port *port, unsigned int state, unsigned old) struct modem_private_data *priv = port->private_data; int ret; + if (!priv) + return; + if (IS_ERR(priv->regulator)) return; diff --git a/arch/arm/mach-omap1/id.c b/arch/arm/mach-omap1/id.c index 52de382fc8047148f272dd57d7dd01354ec33801..7e49dfda3d2f4491645abfbd9af4830c132d7dca 100644 --- a/arch/arm/mach-omap1/id.c +++ b/arch/arm/mach-omap1/id.c @@ -200,10 +200,10 @@ void __init omap_check_revision(void) printk(KERN_INFO "Unknown OMAP cpu type: 0x%02x\n", cpu_type); } - printk(KERN_INFO "OMAP%04x", omap_revision >> 16); + pr_info("OMAP%04x", omap_revision >> 16); if ((omap_revision >> 8) & 0xff) - printk(KERN_INFO "%x", (omap_revision >> 8) & 0xff); - printk(KERN_INFO " revision %i handled as %02xxx id: %08x%08x\n", + pr_cont("%x", (omap_revision >> 8) & 0xff); + pr_cont(" revision %i handled as %02xxx id: %08x%08x\n", die_rev, omap_revision & 0xff, system_serial_low, system_serial_high); } diff --git a/arch/arm/mach-omap1/include/mach/usb.h b/arch/arm/mach-omap1/include/mach/usb.h index 77867778d4ec700844fefe14146303a9f35f17fb..5429d86c7190d805ea003729837997751a772eaa 100644 --- a/arch/arm/mach-omap1/include/mach/usb.h +++ b/arch/arm/mach-omap1/include/mach/usb.h @@ -11,7 +11,7 @@ #include -#if IS_ENABLED(CONFIG_USB) +#if IS_ENABLED(CONFIG_USB_SUPPORT) void omap1_usb_init(struct omap_usb_config *pdata); #else static inline void omap1_usb_init(struct omap_usb_config *pdata) diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c index a8b291f00109c653c05d47ab49abb7c9c306b238..dae514c8276aac6218fd4a7f5df711769c048769 100644 --- a/arch/arm/mach-omap2/cpuidle44xx.c +++ b/arch/arm/mach-omap2/cpuidle44xx.c @@ -152,6 +152,10 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) && (cx->mpu_logic_state == PWRDM_POWER_OFF); + /* Enter broadcast mode for periodic timers */ + tick_broadcast_enable(); + + /* Enter broadcast mode for one-shot timers */ tick_broadcast_enter(); /* @@ -218,15 +222,6 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, return index; } -/* - * For each cpu, setup the broadcast timer because local timers - * stops for the states above C1. - */ -static void omap_setup_broadcast_timer(void *arg) -{ - tick_broadcast_enable(); -} - static struct cpuidle_driver omap4_idle_driver = { .name = "omap4_idle", .owner = THIS_MODULE, @@ -319,8 +314,5 @@ int __init omap4_idle_init(void) if (!cpu_clkdm[0] || !cpu_clkdm[1]) return -ENODEV; - /* Configure the broadcast timer on each cpu */ - on_each_cpu(omap_setup_broadcast_timer, NULL, 1); - return cpuidle_register(idle_driver, cpu_online_mask); } diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index 9500b6e2738019a4fb53e50c8150a2972ca8c391..5d73f2c0b117ebaad7f37035f7109ce34dd107ff 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c @@ -83,6 +83,7 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes) u32 enable_mask, enable_shift; u32 pipd_mask, pipd_shift; u32 reg; + int ret; if (dsi_id == 0) { enable_mask = OMAP4_DSI1_LANEENABLE_MASK; @@ -98,7 +99,11 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes) return -ENODEV; } - regmap_read(omap4_dsi_mux_syscon, OMAP4_DSIPHY_SYSCON_OFFSET, ®); + ret = regmap_read(omap4_dsi_mux_syscon, + OMAP4_DSIPHY_SYSCON_OFFSET, + ®); + if (ret) + return ret; reg &= ~enable_mask; reg &= ~pipd_mask; diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c index 68ba5f472f6badd30ccc1408361ce5ff20c54a3a..859c71c4e93244c050979916f83eca19f754a2a1 100644 --- a/arch/arm/mach-omap2/id.c +++ b/arch/arm/mach-omap2/id.c @@ -199,8 +199,8 @@ void __init omap2xxx_check_revision(void) pr_info("%s", soc_name); if ((omap_rev() >> 8) & 0x0f) - pr_info("%s", soc_rev); - pr_info("\n"); + pr_cont("%s", soc_rev); + pr_cont("\n"); } #define OMAP3_SHOW_FEATURE(feat) \ diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c index fc5fb776a7101234bd64da673815d10a0b75f0f2..17558be4bf0a528700939684e4f39c002e0215c7 100644 --- a/arch/arm/mach-omap2/omap-wakeupgen.c +++ b/arch/arm/mach-omap2/omap-wakeupgen.c @@ -50,6 +50,9 @@ #define OMAP4_NR_BANKS 4 #define OMAP4_NR_IRQS 128 +#define SYS_NIRQ1_EXT_SYS_IRQ_1 7 +#define SYS_NIRQ2_EXT_SYS_IRQ_2 119 + static void __iomem *wakeupgen_base; static void __iomem *sar_base; static DEFINE_RAW_SPINLOCK(wakeupgen_lock); @@ -153,6 +156,37 @@ static void wakeupgen_unmask(struct irq_data *d) irq_chip_unmask_parent(d); } +/* + * The sys_nirq pins bypass peripheral modules and are wired directly + * to MPUSS wakeupgen. They get automatically inverted for GIC. + */ +static int wakeupgen_irq_set_type(struct irq_data *d, unsigned int type) +{ + bool inverted = false; + + switch (type) { + case IRQ_TYPE_LEVEL_LOW: + type &= ~IRQ_TYPE_LEVEL_MASK; + type |= IRQ_TYPE_LEVEL_HIGH; + inverted = true; + break; + case IRQ_TYPE_EDGE_FALLING: + type &= ~IRQ_TYPE_EDGE_BOTH; + type |= IRQ_TYPE_EDGE_RISING; + inverted = true; + break; + default: + break; + } + + if (inverted && d->hwirq != SYS_NIRQ1_EXT_SYS_IRQ_1 && + d->hwirq != SYS_NIRQ2_EXT_SYS_IRQ_2) + pr_warn("wakeupgen: irq%li polarity inverted in dts\n", + d->hwirq); + + return irq_chip_set_type_parent(d, type); +} + #ifdef CONFIG_HOTPLUG_CPU static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks); @@ -446,7 +480,7 @@ static struct irq_chip wakeupgen_chip = { .irq_mask = wakeupgen_mask, .irq_unmask = wakeupgen_unmask, .irq_retrigger = irq_chip_retrigger_hierarchy, - .irq_set_type = irq_chip_set_type_parent, + .irq_set_type = wakeupgen_irq_set_type, .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, #ifdef CONFIG_SMP .irq_set_affinity = irq_chip_set_affinity_parent, diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c index b226c8aaf8b1c94c687cadbbad55c89e845cf5cd..7074cfd1ff413b57ded78cf2c4bc150ecef77775 100644 --- a/arch/arm/mach-omap2/omap4-common.c +++ b/arch/arm/mach-omap2/omap4-common.c @@ -131,6 +131,9 @@ static int __init omap4_sram_init(void) struct device_node *np; struct gen_pool *sram_pool; + if (!soc_is_omap44xx() && !soc_is_omap54xx()) + return 0; + np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu"); if (!np) pr_warn("%s:Unable to allocate sram needed to handle errata I688\n", diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index cd65ea4e9c54e633bd66a0178ca3f06ad16e8db9..ec3789ba17b8b7a73e88e2724dc48c54fba55e73 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -2397,7 +2397,7 @@ static int __init _init(struct omap_hwmod *oh, void *data) * a stub; implementing this properly requires iclk autoidle usecounting in * the clock code. No return value. */ -static void __init _setup_iclk_autoidle(struct omap_hwmod *oh) +static void _setup_iclk_autoidle(struct omap_hwmod *oh) { struct omap_hwmod_ocp_if *os; @@ -2428,7 +2428,7 @@ static void __init _setup_iclk_autoidle(struct omap_hwmod *oh) * reset. Returns 0 upon success or a negative error code upon * failure. */ -static int __init _setup_reset(struct omap_hwmod *oh) +static int _setup_reset(struct omap_hwmod *oh) { int r; @@ -2489,7 +2489,7 @@ static int __init _setup_reset(struct omap_hwmod *oh) * * No return value. */ -static void __init _setup_postsetup(struct omap_hwmod *oh) +static void _setup_postsetup(struct omap_hwmod *oh) { u8 postsetup_state; diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c index 9ded7bf972e714dba0fc12e7b261b512823fe2a7..3b8fe014a3e94635408dcc47cbf8fc41d6719f24 100644 --- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c @@ -946,7 +946,8 @@ static struct omap_hwmod_class_sysconfig am33xx_timer_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, - .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), + .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | + SYSC_HAS_RESET_STATUS, .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP), .sysc_fields = &omap_hwmod_sysc_type2, diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c index e6c7061a8e73679695f7816c0461ccccf66151c6..3547f32822b6449d4a2bd630996d896641cd8f94 100644 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c @@ -385,7 +385,8 @@ static struct omap_hwmod dra7xx_dcan2_hwmod = { static struct omap_hwmod_class_sysconfig dra7xx_epwmss_sysc = { .rev_offs = 0x0, .sysc_offs = 0x4, - .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET, + .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | + SYSC_HAS_RESET_STATUS, .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type2, }; diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c index 7f02743edbe4c7880bb12feccb4d6a1683ca276d..dae726228770421bea84353400dda87e390fcb0e 100644 --- a/arch/arm/mach-omap2/pdata-quirks.c +++ b/arch/arm/mach-omap2/pdata-quirks.c @@ -305,108 +305,15 @@ static void __init omap3_logicpd_torpedo_init(void) } /* omap3pandora legacy devices */ -#define PANDORA_WIFI_IRQ_GPIO 21 -#define PANDORA_WIFI_NRESET_GPIO 23 static struct platform_device pandora_backlight = { .name = "pandora-backlight", .id = -1, }; -static struct regulator_consumer_supply pandora_vmmc3_supply[] = { - REGULATOR_SUPPLY("vmmc", "omap_hsmmc.2"), -}; - -static struct regulator_init_data pandora_vmmc3 = { - .constraints = { - .valid_ops_mask = REGULATOR_CHANGE_STATUS, - }, - .num_consumer_supplies = ARRAY_SIZE(pandora_vmmc3_supply), - .consumer_supplies = pandora_vmmc3_supply, -}; - -static struct fixed_voltage_config pandora_vwlan = { - .supply_name = "vwlan", - .microvolts = 1800000, /* 1.8V */ - .gpio = PANDORA_WIFI_NRESET_GPIO, - .startup_delay = 50000, /* 50ms */ - .enable_high = 1, - .init_data = &pandora_vmmc3, -}; - -static struct platform_device pandora_vwlan_device = { - .name = "reg-fixed-voltage", - .id = 1, - .dev = { - .platform_data = &pandora_vwlan, - }, -}; - -static void pandora_wl1251_init_card(struct mmc_card *card) -{ - /* - * We have TI wl1251 attached to MMC3. Pass this information to - * SDIO core because it can't be probed by normal methods. - */ - if (card->type == MMC_TYPE_SDIO || card->type == MMC_TYPE_SD_COMBO) { - card->quirks |= MMC_QUIRK_NONSTD_SDIO; - card->cccr.wide_bus = 1; - card->cis.vendor = 0x104c; - card->cis.device = 0x9066; - card->cis.blksize = 512; - card->cis.max_dtr = 24000000; - card->ocr = 0x80; - } -} - -static struct omap2_hsmmc_info pandora_mmc3[] = { - { - .mmc = 3, - .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_POWER_OFF_CARD, - .gpio_cd = -EINVAL, - .gpio_wp = -EINVAL, - .init_card = pandora_wl1251_init_card, - }, - {} /* Terminator */ -}; - -static void __init pandora_wl1251_init(void) -{ - struct wl1251_platform_data pandora_wl1251_pdata; - int ret; - - memset(&pandora_wl1251_pdata, 0, sizeof(pandora_wl1251_pdata)); - - pandora_wl1251_pdata.power_gpio = -1; - - ret = gpio_request_one(PANDORA_WIFI_IRQ_GPIO, GPIOF_IN, "wl1251 irq"); - if (ret < 0) - goto fail; - - pandora_wl1251_pdata.irq = gpio_to_irq(PANDORA_WIFI_IRQ_GPIO); - if (pandora_wl1251_pdata.irq < 0) - goto fail_irq; - - pandora_wl1251_pdata.use_eeprom = true; - ret = wl1251_set_platform_data(&pandora_wl1251_pdata); - if (ret < 0) - goto fail_irq; - - return; - -fail_irq: - gpio_free(PANDORA_WIFI_IRQ_GPIO); -fail: - pr_err("wl1251 board initialisation failed\n"); -} - static void __init omap3_pandora_legacy_init(void) { platform_device_register(&pandora_backlight); - platform_device_register(&pandora_vwlan_device); - omap_hsmmc_init(pandora_mmc3); - omap_hsmmc_late_init(pandora_mmc3); - pandora_wl1251_init(); } #endif /* CONFIG_ARCH_OMAP3 */ diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c index ca03af8fe43ffc21a0252233e4cc2e1bb38cd771..ddf96adf65ab38ac13b957a0d70b57f621e6ad8e 100644 --- a/arch/arm/mach-omap2/pm.c +++ b/arch/arm/mach-omap2/pm.c @@ -77,83 +77,6 @@ int omap_pm_clkdms_setup(struct clockdomain *clkdm, void *unused) return 0; } -/* - * This API is to be called during init to set the various voltage - * domains to the voltage as per the opp table. Typically we boot up - * at the nominal voltage. So this function finds out the rate of - * the clock associated with the voltage domain, finds out the correct - * opp entry and sets the voltage domain to the voltage specified - * in the opp entry - */ -static int __init omap2_set_init_voltage(char *vdd_name, char *clk_name, - const char *oh_name) -{ - struct voltagedomain *voltdm; - struct clk *clk; - struct dev_pm_opp *opp; - unsigned long freq, bootup_volt; - struct device *dev; - - if (!vdd_name || !clk_name || !oh_name) { - pr_err("%s: invalid parameters\n", __func__); - goto exit; - } - - if (!strncmp(oh_name, "mpu", 3)) - /* - * All current OMAPs share voltage rail and clock - * source, so CPU0 is used to represent the MPU-SS. - */ - dev = get_cpu_device(0); - else - dev = omap_device_get_by_hwmod_name(oh_name); - - if (IS_ERR(dev)) { - pr_err("%s: Unable to get dev pointer for hwmod %s\n", - __func__, oh_name); - goto exit; - } - - voltdm = voltdm_lookup(vdd_name); - if (!voltdm) { - pr_err("%s: unable to get vdd pointer for vdd_%s\n", - __func__, vdd_name); - goto exit; - } - - clk = clk_get(NULL, clk_name); - if (IS_ERR(clk)) { - pr_err("%s: unable to get clk %s\n", __func__, clk_name); - goto exit; - } - - freq = clk_get_rate(clk); - clk_put(clk); - - opp = dev_pm_opp_find_freq_ceil(dev, &freq); - if (IS_ERR(opp)) { - pr_err("%s: unable to find boot up OPP for vdd_%s\n", - __func__, vdd_name); - goto exit; - } - - bootup_volt = dev_pm_opp_get_voltage(opp); - dev_pm_opp_put(opp); - - if (!bootup_volt) { - pr_err("%s: unable to find voltage corresponding to the bootup OPP for vdd_%s\n", - __func__, vdd_name); - goto exit; - } - - voltdm_scale(voltdm, bootup_volt); - return 0; - -exit: - pr_err("%s: unable to set vdd_%s\n", __func__, vdd_name); - return -EINVAL; -} - #ifdef CONFIG_SUSPEND static int omap_pm_enter(suspend_state_t suspend_state) { @@ -211,25 +134,6 @@ void omap_common_suspend_init(void *pm_suspend) } #endif /* CONFIG_SUSPEND */ -static void __init omap3_init_voltages(void) -{ - if (!soc_is_omap34xx()) - return; - - omap2_set_init_voltage("mpu_iva", "dpll1_ck", "mpu"); - omap2_set_init_voltage("core", "l3_ick", "l3_main"); -} - -static void __init omap4_init_voltages(void) -{ - if (!soc_is_omap44xx()) - return; - - omap2_set_init_voltage("mpu", "dpll_mpu_ck", "mpu"); - omap2_set_init_voltage("core", "l3_div_ck", "l3_main_1"); - omap2_set_init_voltage("iva", "dpll_iva_m5x2_ck", "iva"); -} - int __maybe_unused omap_pm_nop_init(void) { return 0; @@ -249,10 +153,6 @@ int __init omap2_common_pm_late_init(void) omap4_twl_init(); omap_voltage_late_init(); - /* Initialize the voltages */ - omap3_init_voltages(); - omap4_init_voltages(); - /* Smartreflex device init */ omap_devinit_smartreflex(); diff --git a/arch/arm/mach-omap2/pm33xx-core.c b/arch/arm/mach-omap2/pm33xx-core.c index f4971e4a86b26893578badccb6c0828a4c8821ab..ca7026958d425ae3f35f1ea849a4a7ce0e09b1a2 100644 --- a/arch/arm/mach-omap2/pm33xx-core.c +++ b/arch/arm/mach-omap2/pm33xx-core.c @@ -51,10 +51,12 @@ static int amx3_common_init(void) /* CEFUSE domain can be turned off post bootup */ cefuse_pwrdm = pwrdm_lookup("cefuse_pwrdm"); - if (cefuse_pwrdm) - omap_set_pwrdm_state(cefuse_pwrdm, PWRDM_POWER_OFF); - else + if (!cefuse_pwrdm) pr_err("PM: Failed to get cefuse_pwrdm\n"); + else if (omap_type() != OMAP2_DEVICE_TYPE_GP) + pr_info("PM: Leaving EFUSE power domain active\n"); + else + omap_set_pwrdm_state(cefuse_pwrdm, PWRDM_POWER_OFF); return 0; } diff --git a/arch/arm/mach-omap2/prm3xxx.c b/arch/arm/mach-omap2/prm3xxx.c index 05858f966f7d9443f776fe2ee924c05e3e7b99a9..dfa65fc2c82bc14dbf69de2ccd27b9de9e83f037 100644 --- a/arch/arm/mach-omap2/prm3xxx.c +++ b/arch/arm/mach-omap2/prm3xxx.c @@ -433,7 +433,7 @@ static void omap3_prm_reconfigure_io_chain(void) * registers, and omap3xxx_prm_reconfigure_io_chain() must be called. * No return value. */ -static void __init omap3xxx_prm_enable_io_wakeup(void) +static void omap3xxx_prm_enable_io_wakeup(void) { if (prm_features & PRM_HAS_IO_WAKEUP) omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c index 7b95729e83594d330e4f0d5e3205a05b4d0751e1..38a1be6c3694f2922280c6a74cfcb792b1e4a8b6 100644 --- a/arch/arm/mach-omap2/prm44xx.c +++ b/arch/arm/mach-omap2/prm44xx.c @@ -351,7 +351,7 @@ static void omap44xx_prm_reconfigure_io_chain(void) * to occur, WAKEUPENABLE bits must be set in the pad mux registers, and * omap44xx_prm_reconfigure_io_chain() must be called. No return value. */ -static void __init omap44xx_prm_enable_io_wakeup(void) +static void omap44xx_prm_enable_io_wakeup(void) { s32 inst = omap4_prmst_get_prm_dev_inst(); diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c index 058a37e6d11c34955ab37f4df9833cdb0166fb6c..fd6e0671f957342e06e0a1601837f221969a01af 100644 --- a/arch/arm/mach-omap2/prm_common.c +++ b/arch/arm/mach-omap2/prm_common.c @@ -523,8 +523,10 @@ void omap_prm_reset_system(void) prm_ll_data->reset_system(); - while (1) + while (1) { cpu_relax(); + wfe(); + } } /** diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c index c5c0ab8ac9f91991a0cf6791ebb065cd7e055395..024c1fbcc55aee5816526b5574528e6c9532a3aa 100644 --- a/arch/arm/mach-pxa/cm-x300.c +++ b/arch/arm/mach-pxa/cm-x300.c @@ -558,7 +558,7 @@ static struct pxa3xx_u2d_platform_data cm_x300_u2d_platform_data = { .exit = cm_x300_u2d_exit, }; -static void cm_x300_init_u2d(void) +static void __init cm_x300_init_u2d(void) { pxa3xx_set_u2d_info(&cm_x300_u2d_platform_data); } diff --git a/arch/arm/mach-pxa/littleton.c b/arch/arm/mach-pxa/littleton.c index 9e132b3e48c68ef767ef7055b8d37cddc9d6453c..9960ea158829b88b2bf572b4979f27bc0f53f5ac 100644 --- a/arch/arm/mach-pxa/littleton.c +++ b/arch/arm/mach-pxa/littleton.c @@ -184,7 +184,7 @@ static struct pxafb_mach_info littleton_lcd_info = { .lcd_conn = LCD_COLOR_TFT_16BPP, }; -static void littleton_init_lcd(void) +static void __init littleton_init_lcd(void) { pxa_set_fb_info(NULL, &littleton_lcd_info); } diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c index e3851795d6d7d5127679093f8f318f39ee976234..68a536de542df75b8acc7594fb86ac29fefd2217 100644 --- a/arch/arm/mach-pxa/zeus.c +++ b/arch/arm/mach-pxa/zeus.c @@ -559,7 +559,7 @@ static struct pxaohci_platform_data zeus_ohci_platform_data = { .flags = ENABLE_PORT_ALL | POWER_SENSE_LOW, }; -static void zeus_register_ohci(void) +static void __init zeus_register_ohci(void) { /* Port 2 is shared between host and client interface. */ UP2OCR = UP2OCR_HXOE | UP2OCR_HXS | UP2OCR_DMPDE | UP2OCR_DPPDE; diff --git a/arch/arm/mach-rpc/dma.c b/arch/arm/mach-rpc/dma.c index fb48f3141fb4d7cd2403aaece876ae338a308bf2..c4c96661eb89ae2d60ba3eecf4c84e17a401243f 100644 --- a/arch/arm/mach-rpc/dma.c +++ b/arch/arm/mach-rpc/dma.c @@ -131,7 +131,7 @@ static irqreturn_t iomd_dma_handle(int irq, void *dev_id) } while (1); idma->state = ~DMA_ST_AB; - disable_irq(irq); + disable_irq_nosync(irq); return IRQ_HANDLED; } @@ -174,6 +174,9 @@ static void iomd_enable_dma(unsigned int chan, dma_t *dma) DMA_FROM_DEVICE : DMA_TO_DEVICE); } + idma->dma_addr = idma->dma.sg->dma_address; + idma->dma_len = idma->dma.sg->length; + iomd_writeb(DMA_CR_C, dma_base + CR); idma->state = DMA_ST_AB; } diff --git a/arch/arm/mach-s3c24xx/mach-osiris-dvs.c b/arch/arm/mach-s3c24xx/mach-osiris-dvs.c index 058ce73137e80bb92926e430ae794129392cdbb7..5d819b6ea428525414e71317b5d9c0f63b8a3164 100644 --- a/arch/arm/mach-s3c24xx/mach-osiris-dvs.c +++ b/arch/arm/mach-s3c24xx/mach-osiris-dvs.c @@ -65,16 +65,16 @@ static int osiris_dvs_notify(struct notifier_block *nb, switch (val) { case CPUFREQ_PRECHANGE: - if (old_dvs & !new_dvs || - cur_dvs & !new_dvs) { + if ((old_dvs && !new_dvs) || + (cur_dvs && !new_dvs)) { pr_debug("%s: exiting dvs\n", __func__); cur_dvs = false; gpio_set_value(OSIRIS_GPIO_DVS, 1); } break; case CPUFREQ_POSTCHANGE: - if (!old_dvs & new_dvs || - !cur_dvs & new_dvs) { + if ((!old_dvs && new_dvs) || + (!cur_dvs && new_dvs)) { pr_debug("entering dvs\n"); cur_dvs = true; gpio_set_value(OSIRIS_GPIO_DVS, 0); diff --git a/arch/arm/mach-sunxi/mc_smp.c b/arch/arm/mach-sunxi/mc_smp.c index b4037b603897d62e15eeafb099c4ef1163328d29..ff173e67eed2173eec8007be24985f45b32a5bd6 100644 --- a/arch/arm/mach-sunxi/mc_smp.c +++ b/arch/arm/mach-sunxi/mc_smp.c @@ -478,14 +478,18 @@ static void sunxi_mc_smp_cpu_die(unsigned int l_cpu) static int sunxi_cpu_powerdown(unsigned int cpu, unsigned int cluster) { u32 reg; + int gating_bit = cpu; pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu); if (cpu >= SUNXI_CPUS_PER_CLUSTER || cluster >= SUNXI_NR_CLUSTERS) return -EINVAL; + if (is_a83t && cpu == 0) + gating_bit = 4; + /* gate processor power */ reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster)); - reg |= PRCM_PWROFF_GATING_REG_CORE(cpu); + reg |= PRCM_PWROFF_GATING_REG_CORE(gating_bit); writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster)); udelay(20); diff --git a/arch/arm/mach-tango/pm.c b/arch/arm/mach-tango/pm.c index 028e50c6383fa4b1a15b36e23f961161ffb76dbe..a32c3b631484a9f2751fe1750a1750284aa419d4 100644 --- a/arch/arm/mach-tango/pm.c +++ b/arch/arm/mach-tango/pm.c @@ -3,6 +3,7 @@ #include #include #include "smc.h" +#include "pm.h" static int tango_pm_powerdown(unsigned long arg) { @@ -24,10 +25,7 @@ static const struct platform_suspend_ops tango_pm_ops = { .valid = suspend_valid_only_mem, }; -static int __init tango_pm_init(void) +void __init tango_pm_init(void) { suspend_set_ops(&tango_pm_ops); - return 0; } - -late_initcall(tango_pm_init); diff --git a/arch/arm/mach-tango/pm.h b/arch/arm/mach-tango/pm.h new file mode 100644 index 0000000000000000000000000000000000000000..35ea705a0ee23370948a21029214b43fc4584959 --- /dev/null +++ b/arch/arm/mach-tango/pm.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifdef CONFIG_SUSPEND +void __init tango_pm_init(void); +#else +#define tango_pm_init NULL +#endif diff --git a/arch/arm/mach-tango/setup.c b/arch/arm/mach-tango/setup.c index 677dd7b5efd9007412a5e7047573f02594337ef4..824f90737b044145378137cbecd795168cc09a4f 100644 --- a/arch/arm/mach-tango/setup.c +++ b/arch/arm/mach-tango/setup.c @@ -2,6 +2,7 @@ #include #include #include "smc.h" +#include "pm.h" static void tango_l2c_write(unsigned long val, unsigned int reg) { @@ -15,4 +16,5 @@ DT_MACHINE_START(TANGO_DT, "Sigma Tango DT") .dt_compat = tango_dt_compat, .l2c_aux_mask = ~0, .l2c_write_sec = tango_l2c_write, + .init_late = tango_pm_init, MACHINE_END diff --git a/arch/arm/mach-tegra/reset-handler.S b/arch/arm/mach-tegra/reset-handler.S index 805f306fa6f707f055878a31f00a2f412a89f9c5..e31f167a8199443689a78f97de477d6f929db2c6 100644 --- a/arch/arm/mach-tegra/reset-handler.S +++ b/arch/arm/mach-tegra/reset-handler.S @@ -56,16 +56,16 @@ ENTRY(tegra_resume) cmp r6, #TEGRA20 beq 1f @ Yes /* Clear the flow controller flags for this CPU. */ - cpu_to_csr_reg r1, r0 + cpu_to_csr_reg r3, r0 mov32 r2, TEGRA_FLOW_CTRL_BASE - ldr r1, [r2, r1] + ldr r1, [r2, r3] /* Clear event & intr flag */ orr r1, r1, \ #FLOW_CTRL_CSR_INTR_FLAG | FLOW_CTRL_CSR_EVENT_FLAG movw r0, #0x3FFD @ enable, cluster_switch, immed, bitmaps @ & ext flags for CPU power mgnt bic r1, r1, r0 - str r1, [r2] + str r1, [r2, r3] 1: mov32 r9, 0xc09 diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c index 0f5381d1349418c5de2723be59811be30a402f8e..55bbbc3b328f0339733474c2d40704f732463b6d 100644 --- a/arch/arm/mach-vexpress/spc.c +++ b/arch/arm/mach-vexpress/spc.c @@ -551,8 +551,9 @@ static struct clk *ve_spc_clk_register(struct device *cpu_dev) static int __init ve_spc_clk_init(void) { - int cpu; + int cpu, cluster; struct clk *clk; + bool init_opp_table[MAX_CLUSTERS] = { false }; if (!info) return 0; /* Continue only if SPC is initialised */ @@ -578,8 +579,17 @@ static int __init ve_spc_clk_init(void) continue; } + cluster = topology_physical_package_id(cpu_dev->id); + if (init_opp_table[cluster]) + continue; + if (ve_init_opp_table(cpu_dev)) pr_warn("failed to initialise cpu%d opp table\n", cpu); + else if (dev_pm_opp_set_sharing_cpus(cpu_dev, + topology_core_cpumask(cpu_dev->id))) + pr_warn("failed to mark OPPs shared for cpu%d\n", cpu); + else + init_opp_table[cluster] = true; } platform_device_register_simple("vexpress-spc-cpufreq", -1, NULL, 0); diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c index caa6d5fe9078326ea65d29d8d6359e1a82306969..b296ada974091b4a8f3fc10be453034e807918ac 100644 --- a/arch/arm/mach-zynq/platsmp.c +++ b/arch/arm/mach-zynq/platsmp.c @@ -65,7 +65,7 @@ int zynq_cpun_start(u32 address, int cpu) * 0x4: Jump by mov instruction * 0x8: Jumping address */ - memcpy((__force void *)zero, &zynq_secondary_trampoline, + memcpy_toio(zero, &zynq_secondary_trampoline, trampoline_size); writel(address, zero + trampoline_size); diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index b169e580bf8298026c8ae5791193adef3bbef342..096afa16b5317b87797e6fbf53b98e46f8f32609 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -823,6 +823,7 @@ config CPU_BPREDICT_DISABLE config CPU_SPECTRE bool + select GENERIC_CPU_VULNERABILITIES config HARDEN_BRANCH_PREDICTOR bool "Harden the branch predictor against aliasing attacks" if EXPERT @@ -843,6 +844,16 @@ config HARDEN_BRANCH_PREDICTOR If unsure, say Y. +config HARDEN_BRANCH_HISTORY + bool "Harden Spectre style attacks against branch history" if EXPERT + depends on CPU_SPECTRE + default y + help + Speculation attacks against some high-performance processors can + make use of branch history to influence future speculation. When + taking an exception, a sequence of branches overwrites the branch + history, or branch history is invalidated. + config TLS_REG_EMUL bool select NEED_KUSER_HELPERS diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index bd2c739d80839bac052d4af850afed32c246358c..84a6bbaf8cb200c46963569fa3d92fca65cdc992 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -768,6 +768,36 @@ do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs, return NULL; } +static int alignment_get_arm(struct pt_regs *regs, u32 *ip, unsigned long *inst) +{ + u32 instr = 0; + int fault; + + if (user_mode(regs)) + fault = get_user(instr, ip); + else + fault = probe_kernel_address(ip, instr); + + *inst = __mem_to_opcode_arm(instr); + + return fault; +} + +static int alignment_get_thumb(struct pt_regs *regs, u16 *ip, u16 *inst) +{ + u16 instr = 0; + int fault; + + if (user_mode(regs)) + fault = get_user(instr, ip); + else + fault = probe_kernel_address(ip, instr); + + *inst = __mem_to_opcode_thumb16(instr); + + return fault; +} + static int do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { @@ -775,10 +805,10 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) unsigned long instr = 0, instrptr; int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs); unsigned int type; - unsigned int fault; u16 tinstr = 0; int isize = 4; int thumb2_32b = 0; + int fault; if (interrupts_enabled(regs)) local_irq_enable(); @@ -787,15 +817,14 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) if (thumb_mode(regs)) { u16 *ptr = (u16 *)(instrptr & ~1); - fault = probe_kernel_address(ptr, tinstr); - tinstr = __mem_to_opcode_thumb16(tinstr); + + fault = alignment_get_thumb(regs, ptr, &tinstr); if (!fault) { if (cpu_architecture() >= CPU_ARCH_ARMv7 && IS_T32(tinstr)) { /* Thumb-2 32-bit */ - u16 tinst2 = 0; - fault = probe_kernel_address(ptr + 1, tinst2); - tinst2 = __mem_to_opcode_thumb16(tinst2); + u16 tinst2; + fault = alignment_get_thumb(regs, ptr + 1, &tinst2); instr = __opcode_thumb32_compose(tinstr, tinst2); thumb2_32b = 1; } else { @@ -804,8 +833,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) } } } else { - fault = probe_kernel_address((void *)instrptr, instr); - instr = __mem_to_opcode_arm(instr); + fault = alignment_get_arm(regs, (void *)instrptr, &instr); } if (fault) { diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index 215df435bfb9881f347d59f90aa0a98765d304b4..2149b47a0c5ace25958929ca44692df779950fbf 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -360,14 +360,16 @@ v7_dma_inv_range: ALT_UP(W(nop)) #endif mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line + addne r0, r0, r2 tst r1, r3 bic r1, r1, r3 mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line -1: - mcr p15, 0, r0, c7, c6, 1 @ invalidate D / U line - add r0, r0, r2 cmp r0, r1 +1: + mcrlo p15, 0, r0, c7, c6, 1 @ invalidate D / U line + addlo r0, r0, r2 + cmplo r0, r1 blo 1b dsb st ret lr diff --git a/arch/arm/mm/cache-v7m.S b/arch/arm/mm/cache-v7m.S index 788486e830d3e644bbf4c608af6c75e64b5bb84a..32aa2a2aa260cb59eb10557f2c4159588e350b3c 100644 --- a/arch/arm/mm/cache-v7m.S +++ b/arch/arm/mm/cache-v7m.S @@ -73,9 +73,11 @@ /* * dcimvac: Invalidate data cache line by MVA to PoC */ -.macro dcimvac, rt, tmp - v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC +.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo +.macro dcimvac\c, rt, tmp + v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC, \c .endm +.endr /* * dccmvau: Clean data cache line by MVA to PoU @@ -369,14 +371,16 @@ v7m_dma_inv_range: tst r0, r3 bic r0, r0, r3 dccimvacne r0, r3 + addne r0, r0, r2 subne r3, r2, #1 @ restore r3, corrupted by v7m's dccimvac tst r1, r3 bic r1, r1, r3 dccimvacne r1, r3 -1: - dcimvac r0, r3 - add r0, r0, r2 cmp r0, r1 +1: + dcimvaclo r0, r3 + addlo r0, r0, r2 + cmplo r0, r1 blo 1b dsb st ret lr diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 66566472c15384c6eb9fc0bea6045b8c8287e972..8211cf45ece17f46762191cf45853db0998eba0f 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -830,7 +830,7 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs) { - int ret; + int ret = -ENXIO; unsigned long nr_vma_pages = vma_pages(vma); unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long pfn = dma_to_pfn(dev, dma_addr); @@ -2400,4 +2400,6 @@ void arch_teardown_dma_ops(struct device *dev) return; arm_teardown_iommu_dma_ops(dev); + /* Let arch_setup_dma_ops() start again from scratch upon re-probe */ + set_dma_ops(dev, NULL); } diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 3232afb6fdc00be7da29c521068d3ba08e08e500..f49b996aebdb12cce83800fdefeb40090331706b 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -216,7 +216,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma) { unsigned int mask = VM_READ | VM_WRITE | VM_EXEC; - if (fsr & FSR_WRITE) + if ((fsr & FSR_WRITE) && !(fsr & FSR_CM)) mask = VM_WRITE; if (fsr & FSR_LNX_PF) mask = VM_EXEC; @@ -287,7 +287,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) if (user_mode(regs)) flags |= FAULT_FLAG_USER; - if (fsr & FSR_WRITE) + if ((fsr & FSR_WRITE) && !(fsr & FSR_CM)) flags |= FAULT_FLAG_WRITE; /* @@ -344,9 +344,6 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) regs, addr); } if (fault & VM_FAULT_RETRY) { - /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk - * of starvation. */ - flags &= ~FAULT_FLAG_ALLOW_RETRY; flags |= FAULT_FLAG_TRIED; goto retry; } diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h index c063708fa5032a5b4393be25ecfdc886d534aa31..9ecc2097a87a07e0c03bb5c915a1e175664db961 100644 --- a/arch/arm/mm/fault.h +++ b/arch/arm/mm/fault.h @@ -6,6 +6,7 @@ * Fault status register encodings. We steal bit 31 for our own purposes. */ #define FSR_LNX_PF (1 << 31) +#define FSR_CM (1 << 13) #define FSR_WRITE (1 << 11) #define FSR_FS4 (1 << 10) #define FSR_FS3_0 (15) diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 0cc8e04295a40dc1d16f308396afdfb7540aa48c..e1d330a269212e3b0176166a8b1be370ec01a3d8 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -196,6 +196,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max_low, #ifdef CONFIG_HAVE_ARCH_PFN_VALID int pfn_valid(unsigned long pfn) { + phys_addr_t addr = __pfn_to_phys(pfn); + + if (__phys_to_pfn(addr) != pfn) + return 0; + return memblock_is_map_memory(__pfn_to_phys(pfn)); } EXPORT_SYMBOL(pfn_valid); @@ -713,7 +718,8 @@ static void update_sections_early(struct section_perm perms[], int n) if (t->flags & PF_KTHREAD) continue; for_each_thread(t, s) - set_section_perms(perms, n, true, s->mm); + if (s->mm) + set_section_perms(perms, n, true, s->mm); } set_section_perms(perms, n, true, current->active_mm); set_section_perms(perms, n, true, &init_mm); diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index f866870db749c4bf2b0e5ff03f687cda5569e651..0b94b674aa91fa5b8ac994d143c96cece97f02c6 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -18,8 +18,9 @@ (((pgoff)<> (PAGE_SHIFT - 12)) static int mmap_is_legacy(struct rlimit *rlim_stack) { @@ -35,13 +36,22 @@ static int mmap_is_legacy(struct rlimit *rlim_stack) static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack) { unsigned long gap = rlim_stack->rlim_cur; + unsigned long pad = stack_guard_gap; + + /* Account for stack randomization if necessary */ + if (current->flags & PF_RANDOMIZE) + pad += (STACK_RND_MASK << PAGE_SHIFT); + + /* Values close to RLIM_INFINITY can overflow. */ + if (gap + pad > gap) + gap += pad; if (gap < MIN_GAP) gap = MIN_GAP; else if (gap > MAX_GAP) gap = MAX_GAP; - return PAGE_ALIGN(TASK_SIZE - gap - rnd); + return PAGE_ALIGN(STACK_TOP - gap - rnd); } /* diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index e46a6a446cdd27126869bb97574d5bf51075e9e4..d8cbe772f6901bd4eab5edc0ae20960b85dc5b67 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1175,10 +1175,29 @@ void __init adjust_lowmem_bounds(void) */ vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET; + /* + * The first usable region must be PMD aligned. Mark its start + * as MEMBLOCK_NOMAP if it isn't + */ + for_each_memblock(memory, reg) { + if (!memblock_is_nomap(reg)) { + if (!IS_ALIGNED(reg->base, PMD_SIZE)) { + phys_addr_t len; + + len = round_up(reg->base, PMD_SIZE) - reg->base; + memblock_mark_nomap(reg->base, len); + } + break; + } + } + for_each_memblock(memory, reg) { phys_addr_t block_start = reg->base; phys_addr_t block_end = reg->base + reg->size; + if (memblock_is_nomap(reg)) + continue; + if (reg->base < vmalloc_limit) { if (block_end > lowmem_limit) /* diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index 81d0efb055c66080e976f9504c69866f7699b1a6..5461d589a1e25e7b63207f83968f63429860263b 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S @@ -274,6 +274,13 @@ .endm .macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0 +/* + * If we are building for big.Little with branch predictor hardening, + * we need the processor function tables to remain available after boot. + */ +#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) + .section ".rodata" +#endif .type \name\()_processor_functions, #object .align 2 ENTRY(\name\()_processor_functions) @@ -309,6 +316,9 @@ ENTRY(\name\()_processor_functions) .endif .size \name\()_processor_functions, . - \name\()_processor_functions +#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) + .previous +#endif .endm .macro define_cache_functions name:req diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c index 5544b82a2e7a553d015e23d77a9017682dd91f11..8c7d00cb63ef682ea7b85a640d7a02af58430166 100644 --- a/arch/arm/mm/proc-v7-bugs.c +++ b/arch/arm/mm/proc-v7-bugs.c @@ -7,8 +7,36 @@ #include #include #include +#include #include +#ifdef CONFIG_ARM_PSCI +#define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED 1 +static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, + ARM_SMCCC_ARCH_WORKAROUND_1, &res); + + switch ((int)res.a0) { + case SMCCC_RET_SUCCESS: + return SPECTRE_MITIGATED; + + case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED: + return SPECTRE_UNAFFECTED; + + default: + return SPECTRE_VULNERABLE; + } +} +#else +static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void) +{ + return SPECTRE_VULNERABLE; +} +#endif + #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn); @@ -37,13 +65,61 @@ static void __maybe_unused call_hvc_arch_workaround_1(void) arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); } -static void cpu_v7_spectre_init(void) +static unsigned int spectre_v2_install_workaround(unsigned int method) { const char *spectre_v2_method = NULL; int cpu = smp_processor_id(); if (per_cpu(harden_branch_predictor_fn, cpu)) - return; + return SPECTRE_MITIGATED; + + switch (method) { + case SPECTRE_V2_METHOD_BPIALL: + per_cpu(harden_branch_predictor_fn, cpu) = + harden_branch_predictor_bpiall; + spectre_v2_method = "BPIALL"; + break; + + case SPECTRE_V2_METHOD_ICIALLU: + per_cpu(harden_branch_predictor_fn, cpu) = + harden_branch_predictor_iciallu; + spectre_v2_method = "ICIALLU"; + break; + + case SPECTRE_V2_METHOD_HVC: + per_cpu(harden_branch_predictor_fn, cpu) = + call_hvc_arch_workaround_1; + cpu_do_switch_mm = cpu_v7_hvc_switch_mm; + spectre_v2_method = "hypervisor"; + break; + + case SPECTRE_V2_METHOD_SMC: + per_cpu(harden_branch_predictor_fn, cpu) = + call_smc_arch_workaround_1; + cpu_do_switch_mm = cpu_v7_smc_switch_mm; + spectre_v2_method = "firmware"; + break; + } + + if (spectre_v2_method) + pr_info("CPU%u: Spectre v2: using %s workaround\n", + smp_processor_id(), spectre_v2_method); + + return SPECTRE_MITIGATED; +} +#else +static unsigned int spectre_v2_install_workaround(unsigned int method) +{ + pr_info("CPU%u: Spectre V2: workarounds disabled by configuration\n", + smp_processor_id()); + + return SPECTRE_VULNERABLE; +} +#endif + +static void cpu_v7_spectre_v2_init(void) +{ + unsigned int state, method = 0; switch (read_cpuid_part()) { case ARM_CPU_PART_CORTEX_A8: @@ -52,85 +128,142 @@ static void cpu_v7_spectre_init(void) case ARM_CPU_PART_CORTEX_A17: case ARM_CPU_PART_CORTEX_A73: case ARM_CPU_PART_CORTEX_A75: - if (processor.switch_mm != cpu_v7_bpiall_switch_mm) - goto bl_error; - per_cpu(harden_branch_predictor_fn, cpu) = - harden_branch_predictor_bpiall; - spectre_v2_method = "BPIALL"; + state = SPECTRE_MITIGATED; + method = SPECTRE_V2_METHOD_BPIALL; break; case ARM_CPU_PART_CORTEX_A15: case ARM_CPU_PART_BRAHMA_B15: - if (processor.switch_mm != cpu_v7_iciallu_switch_mm) - goto bl_error; - per_cpu(harden_branch_predictor_fn, cpu) = - harden_branch_predictor_iciallu; - spectre_v2_method = "ICIALLU"; + state = SPECTRE_MITIGATED; + method = SPECTRE_V2_METHOD_ICIALLU; + break; + + case ARM_CPU_PART_BRAHMA_B53: + /* Requires no workaround */ + state = SPECTRE_UNAFFECTED; break; -#ifdef CONFIG_ARM_PSCI default: /* Other ARM CPUs require no workaround */ - if (read_cpuid_implementor() == ARM_CPU_IMP_ARM) + if (read_cpuid_implementor() == ARM_CPU_IMP_ARM) { + state = SPECTRE_UNAFFECTED; break; + } /* fallthrough */ - /* Cortex A57/A72 require firmware workaround */ + /* Cortex A57/A72 require firmware workaround */ case ARM_CPU_PART_CORTEX_A57: case ARM_CPU_PART_CORTEX_A72: { struct arm_smccc_res res; + state = spectre_v2_get_cpu_fw_mitigation_state(); + if (state != SPECTRE_MITIGATED) + break; + if (psci_ops.smccc_version == SMCCC_VERSION_1_0) break; + arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, + ARM_SMCCC_ARCH_WORKAROUND_1, &res); + if ((int)res.a0 != 0) + return; + switch (psci_ops.conduit) { case PSCI_CONDUIT_HVC: - arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, - ARM_SMCCC_ARCH_WORKAROUND_1, &res); - if ((int)res.a0 != 0) - break; - if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu) - goto bl_error; - per_cpu(harden_branch_predictor_fn, cpu) = - call_hvc_arch_workaround_1; - processor.switch_mm = cpu_v7_hvc_switch_mm; - spectre_v2_method = "hypervisor"; + method = SPECTRE_V2_METHOD_HVC; break; case PSCI_CONDUIT_SMC: - arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, - ARM_SMCCC_ARCH_WORKAROUND_1, &res); - if ((int)res.a0 != 0) - break; - if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu) - goto bl_error; - per_cpu(harden_branch_predictor_fn, cpu) = - call_smc_arch_workaround_1; - processor.switch_mm = cpu_v7_smc_switch_mm; - spectre_v2_method = "firmware"; + method = SPECTRE_V2_METHOD_SMC; break; default: + state = SPECTRE_VULNERABLE; break; } } -#endif } - if (spectre_v2_method) - pr_info("CPU%u: Spectre v2: using %s workaround\n", - smp_processor_id(), spectre_v2_method); - return; + if (state == SPECTRE_MITIGATED) + state = spectre_v2_install_workaround(method); -bl_error: - pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n", - cpu); + spectre_v2_update_state(state, method); +} + +#ifdef CONFIG_HARDEN_BRANCH_HISTORY +static int spectre_bhb_method; + +static const char *spectre_bhb_method_name(int method) +{ + switch (method) { + case SPECTRE_V2_METHOD_LOOP8: + return "loop"; + + case SPECTRE_V2_METHOD_BPIALL: + return "BPIALL"; + + default: + return "unknown"; + } +} + +static int spectre_bhb_install_workaround(int method) +{ + if (spectre_bhb_method != method) { + if (spectre_bhb_method) { + pr_err("CPU%u: Spectre BHB: method disagreement, system vulnerable\n", + smp_processor_id()); + + return SPECTRE_VULNERABLE; + } + + if (spectre_bhb_update_vectors(method) == SPECTRE_VULNERABLE) + return SPECTRE_VULNERABLE; + + spectre_bhb_method = method; + } + + pr_info("CPU%u: Spectre BHB: using %s workaround\n", + smp_processor_id(), spectre_bhb_method_name(method)); + + return SPECTRE_MITIGATED; } #else -static void cpu_v7_spectre_init(void) +static int spectre_bhb_install_workaround(int method) { + return SPECTRE_VULNERABLE; } #endif +static void cpu_v7_spectre_bhb_init(void) +{ + unsigned int state, method = 0; + + switch (read_cpuid_part()) { + case ARM_CPU_PART_CORTEX_A15: + case ARM_CPU_PART_BRAHMA_B15: + case ARM_CPU_PART_CORTEX_A57: + case ARM_CPU_PART_CORTEX_A72: + state = SPECTRE_MITIGATED; + method = SPECTRE_V2_METHOD_LOOP8; + break; + + case ARM_CPU_PART_CORTEX_A73: + case ARM_CPU_PART_CORTEX_A75: + state = SPECTRE_MITIGATED; + method = SPECTRE_V2_METHOD_BPIALL; + break; + + default: + state = SPECTRE_UNAFFECTED; + break; + } + + if (state == SPECTRE_MITIGATED) + state = spectre_bhb_install_workaround(method); + + spectre_v2_update_state(state, method); +} + static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned, u32 mask, const char *msg) { @@ -159,16 +292,18 @@ static bool check_spectre_auxcr(bool *warned, u32 bit) void cpu_v7_ca8_ibe(void) { if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6))) - cpu_v7_spectre_init(); + cpu_v7_spectre_v2_init(); } void cpu_v7_ca15_ibe(void) { if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0))) - cpu_v7_spectre_init(); + cpu_v7_spectre_v2_init(); + cpu_v7_spectre_bhb_init(); } void cpu_v7_bugs_init(void) { - cpu_v7_spectre_init(); + cpu_v7_spectre_v2_init(); + cpu_v7_spectre_bhb_init(); } diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 6fe52819e0148c6f3f04b11c75e278cd0b04a1f9..339eb17c9808e2c04a043485e42e5d29a49de347 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -112,7 +112,7 @@ ENTRY(cpu_v7_hvc_switch_mm) hvc #0 ldmfd sp!, {r0 - r3} b cpu_v7_switch_mm -ENDPROC(cpu_v7_smc_switch_mm) +ENDPROC(cpu_v7_hvc_switch_mm) #endif ENTRY(cpu_v7_iciallu_switch_mm) mov r3, #0 diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S index 47a5acc644333f7f995293ef6b3dc6fb3527270a..9c2978c128d97cf24ba3f5aff70dc14179ae17b3 100644 --- a/arch/arm/mm/proc-v7m.S +++ b/arch/arm/mm/proc-v7m.S @@ -135,10 +135,11 @@ __v7m_setup_cont: dsb mov r6, lr @ save LR ldr sp, =init_thread_union + THREAD_START_SP - stmia sp, {r0-r3, r12} cpsie i svc #0 1: cpsid i + /* Calculate exc_ret */ + orr r10, lr, #EXC_RET_THREADMODE_PROCESSSTACK ldmia sp, {r0-r3, r12} str r5, [r12, #11 * 4] @ restore the original SVC vector entry mov lr, r6 @ restore LR diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 25b3ee85066e16e95652b9963645dab61bcb7bbd..8fcdb65a4c68b19ecfcd7642ff4b4d3dbf26cea9 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -39,6 +39,10 @@ * +-----+ * |RSVD | JIT scratchpad * current ARM_SP => +-----+ <= (BPF_FP - STACK_SIZE + SCRATCH_SIZE) + * | ... | caller-saved registers + * +-----+ + * | ... | arguments passed on stack + * ARM_SP during call => +-----| * | | * | ... | Function call stack * | | @@ -66,6 +70,12 @@ * * When popping registers off the stack at the end of a BPF function, we * reference them via the current ARM_FP register. + * + * Some eBPF operations are implemented via a call to a helper function. + * Such calls are "invisible" in the eBPF code, so it is up to the calling + * program to preserve any caller-saved ARM registers during the call. The + * JIT emits code to push and pop those registers onto the stack, immediately + * above the callee stack frame. */ #define CALLEE_MASK (1 << ARM_R4 | 1 << ARM_R5 | 1 << ARM_R6 | \ 1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R9 | \ @@ -73,6 +83,8 @@ #define CALLEE_PUSH_MASK (CALLEE_MASK | 1 << ARM_LR) #define CALLEE_POP_MASK (CALLEE_MASK | 1 << ARM_PC) +#define CALLER_MASK (1 << ARM_R0 | 1 << ARM_R1 | 1 << ARM_R2 | 1 << ARM_R3) + enum { /* Stack layout - these are offsets from (top of stack - 4) */ BPF_R2_HI, @@ -467,6 +479,7 @@ static inline int epilogue_offset(const struct jit_ctx *ctx) static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op) { + const int exclude_mask = BIT(ARM_R0) | BIT(ARM_R1); const s8 *tmp = bpf2a32[TMP_REG_1]; #if __LINUX_ARM_ARCH__ == 7 @@ -498,11 +511,17 @@ static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op) emit(ARM_MOV_R(ARM_R0, rm), ctx); } + /* Push caller-saved registers on stack */ + emit(ARM_PUSH(CALLER_MASK & ~exclude_mask), ctx); + /* Call appropriate function */ emit_mov_i(ARM_IP, op == BPF_DIV ? (u32)jit_udiv32 : (u32)jit_mod32, ctx); emit_blx_r(ARM_IP, ctx); + /* Restore caller-saved registers from stack */ + emit(ARM_POP(CALLER_MASK & ~exclude_mask), ctx); + /* Save return value */ if (rd != ARM_R0) emit(ARM_MOV_R(rd, ARM_R0), ctx); @@ -1562,6 +1581,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code)); break; + /* speculation barrier */ + case BPF_ST | BPF_NOSPEC: + break; /* ST: *(size *)(dst + off) = imm */ case BPF_ST | BPF_MEM | BPF_W: case BPF_ST | BPF_MEM | BPF_H: diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c index cc649a1e46da27ae45488d743597edcc251430ff..7cb3e0453fcd928eb48bbd80dec6b98f38179820 100644 --- a/arch/arm/oprofile/common.c +++ b/arch/arm/oprofile/common.c @@ -88,7 +88,7 @@ static struct frame_tail* user_backtrace(struct frame_tail *tail) struct frame_tail buftail[2]; /* Also check accessibility of one struct frame_tail beyond */ - if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) + if (!access_ok(tail, sizeof(buftail))) return NULL; if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail))) return NULL; diff --git a/arch/arm/plat-iop/adma.c b/arch/arm/plat-iop/adma.c index a4d1f8de3b5b23453ee4738723164a5ba8405424..d9612221e4848971f4ea27cf4f5d4c319073e439 100644 --- a/arch/arm/plat-iop/adma.c +++ b/arch/arm/plat-iop/adma.c @@ -143,7 +143,7 @@ struct platform_device iop3xx_dma_0_channel = { .resource = iop3xx_dma_0_resources, .dev = { .dma_mask = &iop3xx_adma_dmamask, - .coherent_dma_mask = DMA_BIT_MASK(64), + .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = (void *) &iop3xx_dma_0_data, }, }; @@ -155,7 +155,7 @@ struct platform_device iop3xx_dma_1_channel = { .resource = iop3xx_dma_1_resources, .dev = { .dma_mask = &iop3xx_adma_dmamask, - .coherent_dma_mask = DMA_BIT_MASK(64), + .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = (void *) &iop3xx_dma_1_data, }, }; @@ -167,7 +167,7 @@ struct platform_device iop3xx_aau_channel = { .resource = iop3xx_aau_resources, .dev = { .dma_mask = &iop3xx_adma_dmamask, - .coherent_dma_mask = DMA_BIT_MASK(64), + .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = (void *) &iop3xx_aau_data, }, }; diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c index d4012d6c0dcb22deb2452857041483a99264bf4d..5ca4c5fd627a5e0a985b57f10314b11696736814 100644 --- a/arch/arm/plat-omap/dma.c +++ b/arch/arm/plat-omap/dma.c @@ -1449,7 +1449,6 @@ static void __exit omap_system_dma_exit(void) MODULE_DESCRIPTION("OMAP SYSTEM DMA DRIVER"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:" DRIVER_NAME); MODULE_AUTHOR("Texas Instruments Inc"); /* diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c index a2399fd66e97cef3db011508dc73c718c9456bc9..1e970873439cdd9812ba9d6ad974ea150d173b31 100644 --- a/arch/arm/plat-orion/common.c +++ b/arch/arm/plat-orion/common.c @@ -622,7 +622,7 @@ static struct platform_device orion_xor0_shared = { .resource = orion_xor0_shared_resources, .dev = { .dma_mask = &orion_xor_dmamask, - .coherent_dma_mask = DMA_BIT_MASK(64), + .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &orion_xor0_pdata, }, }; @@ -683,7 +683,7 @@ static struct platform_device orion_xor1_shared = { .resource = orion_xor1_shared_resources, .dev = { .dma_mask = &orion_xor_dmamask, - .coherent_dma_mask = DMA_BIT_MASK(64), + .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &orion_xor1_pdata, }, }; diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c index ed36dcab80f1e7fb1a89cb41bc05ed71a1afd66f..f519199741837664df922fc60e31c42de8eab145 100644 --- a/arch/arm/plat-pxa/ssp.c +++ b/arch/arm/plat-pxa/ssp.c @@ -190,8 +190,6 @@ static int pxa_ssp_remove(struct platform_device *pdev) if (ssp == NULL) return -ENODEV; - iounmap(ssp->mmio_base); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); @@ -201,7 +199,6 @@ static int pxa_ssp_remove(struct platform_device *pdev) list_del(&ssp->node); mutex_unlock(&ssp_lock); - kfree(ssp); return 0; } diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig index b600e38364eb64ccd99ce165e4d4a13825408937..377ff9cda667a11135fef4f12d84d160a3b08aaa 100644 --- a/arch/arm/plat-samsung/Kconfig +++ b/arch/arm/plat-samsung/Kconfig @@ -256,7 +256,7 @@ config S3C_PM_DEBUG_LED_SMDK config SAMSUNG_PM_CHECK bool "S3C2410 PM Suspend Memory CRC" - depends on PM + depends on PM && (PLAT_S3C24XX || ARCH_S3C64XX || ARCH_S5PV210) select CRC32 help Enable the PM code's memory area checksum over sleep. This option diff --git a/arch/arm/plat-samsung/watchdog-reset.c b/arch/arm/plat-samsung/watchdog-reset.c index ce42cc640a61a32b66ad84fdb8e2f82af976f4ef..71d85ff323f73d0e13d98ad5803287c1aba38e18 100644 --- a/arch/arm/plat-samsung/watchdog-reset.c +++ b/arch/arm/plat-samsung/watchdog-reset.c @@ -62,6 +62,7 @@ void samsung_wdt_reset(void) #ifdef CONFIG_OF static const struct of_device_id s3c2410_wdt_match[] = { { .compatible = "samsung,s3c2410-wdt" }, + { .compatible = "samsung,s3c6410-wdt" }, {}, }; diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c index b2aa9b32bff2b5e9d2e6d102a4cd58f6cf8c5676..0dc23fc227ed2745215eeda46965dcac1524281b 100644 --- a/arch/arm/probes/kprobes/opt-arm.c +++ b/arch/arm/probes/kprobes/opt-arm.c @@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or } /* Copy arch-dep-instance from template. */ - memcpy(code, &optprobe_template_entry, + memcpy(code, (unsigned long *)&optprobe_template_entry, TMPL_END_IDX * sizeof(kprobe_opcode_t)); /* Adjust buffer according to instruction. */ diff --git a/arch/arm/vdso/vgettimeofday.c b/arch/arm/vdso/vgettimeofday.c index a9dd619c6c290042d052f03c351d1dae4760f7e5..7bdbf5d5c47d3c0864ca9d48026989b94d06ffb5 100644 --- a/arch/arm/vdso/vgettimeofday.c +++ b/arch/arm/vdso/vgettimeofday.c @@ -18,9 +18,9 @@ #include #include #include -#include #include #include +#include #include #include #include @@ -123,7 +123,8 @@ static notrace u64 get_ns(struct vdso_data *vdata) u64 cycle_now; u64 nsec; - cycle_now = arch_counter_get_cntvct(); + isb(); + cycle_now = read_sysreg(CNTVCT); cycle_delta = (cycle_now - vdata->cs_cycle_last) & vdata->cs_mask; diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index dc7e6b50ef674839a21480c69e12bec144d19194..66c5e693428ab37bd3d4c23f6ef97ab598323e32 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -553,12 +553,11 @@ void vfp_flush_hwstate(struct thread_info *thread) * Save the current VFP state into the provided structures and prepare * for entry into a new function (signal handler). */ -int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp, - struct user_vfp_exc __user *ufp_exc) +int vfp_preserve_user_clear_hwstate(struct user_vfp *ufp, + struct user_vfp_exc *ufp_exc) { struct thread_info *thread = current_thread_info(); struct vfp_hard_struct *hwstate = &thread->vfpstate.hard; - int err = 0; /* Ensure that the saved hwstate is up-to-date. */ vfp_sync_hwstate(thread); @@ -567,22 +566,19 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp, * Copy the floating point registers. There can be unused * registers see asm/hwcap.h for details. */ - err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs, - sizeof(hwstate->fpregs)); + memcpy(&ufp->fpregs, &hwstate->fpregs, sizeof(hwstate->fpregs)); + /* * Copy the status and control register. */ - __put_user_error(hwstate->fpscr, &ufp->fpscr, err); + ufp->fpscr = hwstate->fpscr; /* * Copy the exception registers. */ - __put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err); - __put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err); - __put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err); - - if (err) - return -EFAULT; + ufp_exc->fpexc = hwstate->fpexc; + ufp_exc->fpinst = hwstate->fpinst; + ufp_exc->fpinst2 = hwstate->fpinst2; /* Ensure that VFP is disabled. */ vfp_flush_hwstate(thread); diff --git a/arch/arm/xen/efi.c b/arch/arm/xen/efi.c index b4d78959cadf08df3cd3f983c162fdbe0dab46e3..bc9a37b3cecd6247c92c98c03221240898241bc8 100644 --- a/arch/arm/xen/efi.c +++ b/arch/arm/xen/efi.c @@ -31,7 +31,9 @@ void __init xen_efi_runtime_setup(void) efi.get_variable = xen_efi_get_variable; efi.get_next_variable = xen_efi_get_next_variable; efi.set_variable = xen_efi_set_variable; + efi.set_variable_nonblocking = xen_efi_set_variable; efi.query_variable_info = xen_efi_query_variable_info; + efi.query_variable_info_nonblocking = xen_efi_query_variable_info; efi.update_capsule = xen_efi_update_capsule; efi.query_capsule_caps = xen_efi_query_capsule_caps; efi.get_next_high_mono_count = xen_efi_get_next_high_mono_count; diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c index 0641ba54ab62ae9786cb12ab88b2666b5c783b7a..e04b5044f597339dc9275f24e15dc6966a0d6cf2 100644 --- a/arch/arm/xen/p2m.c +++ b/arch/arm/xen/p2m.c @@ -61,11 +61,12 @@ static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new) unsigned long __pfn_to_mfn(unsigned long pfn) { - struct rb_node *n = phys_to_mach.rb_node; + struct rb_node *n; struct xen_p2m_entry *entry; unsigned long irqflags; read_lock_irqsave(&p2m_lock, irqflags); + n = phys_to_mach.rb_node; while (n) { entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); if (entry->pfn <= pfn && @@ -91,10 +92,39 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, int i; for (i = 0; i < count; i++) { + struct gnttab_unmap_grant_ref unmap; + int rc; + if (map_ops[i].status) continue; - set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT, - map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT); + if (likely(set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT, + map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT))) + continue; + + /* + * Signal an error for this slot. This in turn requires + * immediate unmapping. + */ + map_ops[i].status = GNTST_general_error; + unmap.host_addr = map_ops[i].host_addr, + unmap.handle = map_ops[i].handle; + map_ops[i].handle = ~0; + if (map_ops[i].flags & GNTMAP_device_map) + unmap.dev_bus_addr = map_ops[i].dev_bus_addr; + else + unmap.dev_bus_addr = 0; + + /* + * Pre-populate the status field, to be recognizable in + * the log message below. + */ + unmap.status = 1; + + rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, + &unmap, 1); + if (rc || unmap.status != GNTST_okay) + pr_err_once("gnttab unmap failed: rc=%d st=%d\n", + rc, unmap.status); } return 0; @@ -122,10 +152,11 @@ bool __set_phys_to_machine_multi(unsigned long pfn, int rc; unsigned long irqflags; struct xen_p2m_entry *p2m_entry; - struct rb_node *n = phys_to_mach.rb_node; + struct rb_node *n; if (mfn == INVALID_P2M_ENTRY) { write_lock_irqsave(&p2m_lock, irqflags); + n = phys_to_mach.rb_node; while (n) { p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); if (p2m_entry->pfn <= pfn && diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 1b1a0e95c7511b9256f1953c00d0ca32994b2160..6498279140fc43c333fbbe8c8ed2270f1b41feea 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -71,10 +71,11 @@ config ARM64 select ARM_GIC_V3 select ARM_GIC_V3_ITS if PCI select ARM_PSCI_FW - select BUILDTIME_EXTABLE_SORT + select BUILDTIME_TABLE_SORT select CLONE_BACKWARDS select COMMON_CLK select CPU_PM if (SUSPEND || CPU_IDLE) + select CRC32 select DCACHE_WORD_ACCESS select DMA_DIRECT_OPS select EDAC_SUPPORT @@ -84,6 +85,7 @@ config ARM64 select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS_BROADCAST select GENERIC_CPU_AUTOPROBE + select GENERIC_CPU_VULNERABILITIES select GENERIC_EARLY_IOREMAP select GENERIC_IDLE_POLL_SETUP select GENERIC_IRQ_MULTI_HANDLER @@ -103,6 +105,7 @@ config ARM64 select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_BITREVERSE select HAVE_ARCH_HUGE_VMAP + select HAVE_ARCH_HUGE_VMALLOC select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48) select HAVE_ARCH_KGDB @@ -141,7 +144,9 @@ config ARM64 select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP select HAVE_REGS_AND_STACK_ACCESS_API + select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_RCU_TABLE_FREE + select HAVE_RCU_TABLE_INVALIDATE select HAVE_RSEQ select HAVE_STACKPROTECTOR select HAVE_SYSCALL_TRACEPOINTS @@ -166,6 +171,7 @@ config ARM64 select SWIOTLB select SYSCTL_EXCEPTION_TRACE select THREAD_INFO_IN_TASK + select HAVE_LIVEPATCH_WO_FTRACE help ARM 64-bit (AArch64) Linux support. @@ -251,11 +257,15 @@ config GENERIC_CALIBRATE_DELAY def_bool y config ZONE_DMA32 - def_bool y + bool "Support DMA32 zone" if EXPERT + default y config HAVE_GENERIC_GUP def_bool y +config ARCH_ENABLE_MEMORY_HOTPLUG + def_bool y + config SMP def_bool y @@ -280,8 +290,13 @@ config ARCH_SUPPORTS_UPROBES config ARCH_PROC_KCORE_TEXT def_bool y +config ARCH_HAS_CPU_RELAX + def_bool y + source "arch/arm64/Kconfig.platforms" +source "kernel/livepatch/Kconfig" + menu "Bus support" config PCI @@ -434,7 +449,7 @@ config ARM64_ERRATUM_834220 config ARM64_ERRATUM_845719 bool "Cortex-A53: 845719: a load might read incorrect data" - depends on COMPAT + depends on AARCH32_EL0 default y help This option adds an alternative code sequence to work around ARM @@ -479,6 +494,40 @@ config ARM64_ERRATUM_1024718 If unsure, say Y. +config ARM64_ERRATUM_1463225 + bool "Cortex-A76: Software Step might prevent interrupt recognition" + default y + help + This option adds a workaround for Arm Cortex-A76 erratum 1463225. + + On the affected Cortex-A76 cores (r0p0 to r3p1), software stepping + of a system call instruction (SVC) can prevent recognition of + subsequent interrupts when software stepping is disabled in the + exception handler of the system call and either kernel debugging + is enabled or VHE is in use. + + Work around the erratum by triggering a dummy step exception + when handling a system call from a task that is being stepped + in a VHE configuration of the kernel. + + If unsure, say Y. + +config ARM64_ERRATUM_1742098 + bool "Cortex-A57/A72: 1742098: ELR recorded incorrectly on interrupt taken between cryptographic instructions in a sequence" + depends on COMPAT + default y + help + This option removes the AES hwcap for aarch32 user-space to + workaround erratum 1742098 on Cortex-A57 and Cortex-A72. + + Affected parts may corrupt the AES state if an interrupt is + taken between a pair of AES instructions. These instructions + are only present if the cryptography extensions are present. + All software should have a fallback implementation for CPUs + that don't implement the cryptography extensions. + + If unsure, say Y. + config CAVIUM_ERRATUM_22375 bool "Cavium erratum 22375, 24313" default y @@ -537,6 +586,16 @@ config CAVIUM_ERRATUM_30115 If unsure, say Y. +config HISILICON_ERRATUM_162100801 + bool "Hip09 162100801 erratum support" + default y + help + When enabled GICv4.1 in hip09, there are some invalid vPE config + in configuration tables for some situation, which will cause vSGI + interrupts lost. So fix it by sending vinvall commands after vmovp. + + If unsure, say Y. + config QCOM_FALKOR_ERRATUM_1003 bool "Falkor E1003: Incorrect translation due to ASID change" default y @@ -587,6 +646,15 @@ config HISILICON_ERRATUM_161600802 If unsure, say Y. +config HISILICON_ERRATUM_1980005 + bool "Hisilicon erratum IDC support" + default n + help + The HiSilicon TSV100/200 SoC support idc but report wrong value to + kernel. + + If unsure, say N. + config QCOM_FALKOR_ERRATUM_E1041 bool "Falkor E1041: Speculative instruction fetches might cause errant memory access" default y @@ -597,6 +665,24 @@ config QCOM_FALKOR_ERRATUM_E1041 If unsure, say Y. +config HISILICON_ERRATUM_HIP08_RU_PREFETCH + bool "HIP08 RU: HiSilicon HIP08 cache readunique might cause performance drop" + default y + help + The HiSilicon HIP08 cache readunique might compromise performance, + use cmdline "readunique_prefetch_disable" to disable RU prefetch. + + If unsure, say Y. + +config HISILICON_HIP08_RU_PREFETCH_DEFAULT_OFF + bool "HIP08 RU: disable HiSilicon HIP08 cache readunique by default" + depends on HISILICON_ERRATUM_HIP08_RU_PREFETCH + default n + help + Disable HiSilicon HIP08 cache readunique by default. + + If unsure, say N. + endmenu @@ -697,8 +783,11 @@ config ARM64_PA_BITS default 52 if ARM64_PA_BITS_52 config CPU_BIG_ENDIAN - bool "Build big-endian kernel" - help + bool "Build big-endian kernel" + depends on !LD_IS_LLD || LLD_VERSION >= 130000 + # https://github.com/llvm/llvm-project/commit/1379b150991f70a5782e9a143c2ba5308da1161c + depends on AS_IS_GNU || AS_VERSION >= 150000 + help Say Y if you plan on running a kernel in big-endian mode. config SCHED_MC @@ -728,6 +817,55 @@ config HOTPLUG_CPU Say Y here to experiment with turning CPUs off and on. CPUs can be controlled through /sys/devices/system/cpu. +config ARM64_BOOTPARAM_HOTPLUG_CPU0 + bool "Set default setting of arm64_cpu0_hotpluggable" + default n + depends on HOTPLUG_CPU + help + Set whether default state of arm64_cpu0_hotpluggable is on or off. + + Say Y here to enable CPU0 hotplug by default. If this switch + is turned on, there is no need to give arm64_cpu0_hotplug kernel + parameter and the CPU0 hotplug feature is enabled by default. + + Please note: there may be some CPU0 dependencies if you want + to enable the CPU0 hotplug feature either by this switch or by + arm64_cpu0_hotplug kernel parameter. + + For example: + We found the following issue related to CPU0 dependency: + 1. echo 0 > /sys/devices/system/cpu/cpu0/online + 2. reboot + MegaRAID Tri-Mode SAS3508 may block the reboot process. + + Please make sure the dependencies are under your control before + you enable this feature. + + Say N if you don't want to enable CPU0 hotplug feature by default. + You still can enable the CPU0 hotplug feature at boot by kernel + parameter arm64_cpu0_hotplug. + +config ARM64_ERR_RECOV + bool "Support arm64 RAS error recovery" + depends on ACPI_APEI_SEA && MEMORY_FAILURE + help + With ARM v8.2 RAS Extension, SEA are usually triggered when memory errors + are consumed. In some cases, if the error address is in a clean page or a + read-only page, there is a chance to recover. Such as error occurs in a + instruction page, we can reread this page from disk instead of killing process. + + Say Y if unsure. + +config MPAM + bool "Support Memory Partitioning and Monitoring" + default n + depends on ACPI + select RESCTRL + select ACPI_MPAM if ACPI + help + Memory Partitioning and Monitoring. More exactly Memory system + performance resource Partitioning and Monitoring + # Common NUMA Features config NUMA bool "Numa Memory Allocation and Scheduler Support" @@ -749,6 +887,20 @@ config NODES_SHIFT Specify the maximum number of NUMA Nodes available on the target system. Increases memory reserved to accommodate various tables. +config NUMA_AWARE_SPINLOCKS + bool "Numa-aware spinlocks" + depends on NUMA && QUEUED_SPINLOCKS + default n + help + Introduce NUMA (Non Uniform Memory Access) awareness into + the slow path of spinlocks. + + The kernel will try to keep the lock on the same node, + thus reducing the number of remote cache misses, while + trading some of the short term fairness for better performance. + + Say N if you want absolute first come first serve fairness. + config USE_PERCPU_NUMA_NODE_ID def_bool y depends on NUMA @@ -787,6 +939,7 @@ config ARCH_FLATMEM_ENABLE config HAVE_ARCH_PFN_VALID def_bool ARCH_HAS_HOLES_MEMORYMODEL || !SPARSEMEM + select HAVE_MEMBLOCK_PFN_VALID config HW_PERF_EVENTS def_bool y @@ -801,6 +954,15 @@ config ARCH_WANT_HUGE_PMD_SHARE config ARCH_HAS_CACHE_LINE_SIZE def_bool y +config ARCH_LLC_128_LINE_SIZE + bool "Force 128 bytes alignment for fitting LLC cacheline" + depends on ARM64 + default n + help + As specific machine's LLC cacheline size may be up to + 128 bytes, gaining performance improvement from fitting + 128 Bytes LLC cache aligned. + config SECCOMP bool "Enable seccomp to safely compute untrusted bytecode" ---help--- @@ -854,6 +1016,18 @@ config CRASH_DUMP For more details see Documentation/kdump/kdump.txt +config ARM64_CPU_PARK + bool "Support CPU PARK on kexec" + depends on SMP + depends on KEXEC_CORE + help + This enables support for CPU PARK feature in + order to save time of cpu down to up. + CPU park is a state through kexec, spin loop + instead of cpu die before jumping to new kernel, + jumping out from loop to new kernel entry in + smp_init. + config XEN_DOM0 def_bool y depends on XEN @@ -943,9 +1117,18 @@ config ARM64_SSBD If unsure, say Y. +config MITIGATE_SPECTRE_BRANCH_HISTORY + bool "Mitigate Spectre style attacks against branch history" if EXPERT + default y + help + Speculation attacks against some high-performance processors can + make use of branch history to influence future speculation. + When taking an exception from user-space, a sequence of branches + or a firmware call overwrites the branch history. + menuconfig ARMV8_DEPRECATED bool "Emulate deprecated/obsolete ARMv8 instructions" - depends on COMPAT + depends on AARCH32_EL0 depends on SYSCTL help Legacy software support may require certain instructions @@ -1173,6 +1356,32 @@ config ARM64_MODULE_PLTS bool select HAVE_MOD_ARCH_SPECIFIC +config ARM64_PSEUDO_NMI + bool "Support for NMI-like interrupts" + select CONFIG_ARM_GIC_V3 + select HAVE_PERF_EVENTS_NMI + help + Adds support for mimicking Non-Maskable Interrupts through the use of + GIC interrupt priority. This support requires version 3 or later of + Arm GIC. + + This high priority configuration for interrupts needs to be + explicitly enabled by setting the kernel parameter + "irqchip.gicv3_pseudo_nmi" to 1. + + If unsure, say N + +if ARM64_PSEUDO_NMI +config ARM64_DEBUG_PRIORITY_MASKING + bool "Debug interrupt priority masking" + help + This adds runtime checks to functions enabling/disabling + interrupts when using priority masking. The additional checks verify + the validity of ICC_PMR_EL1 when calling concerned functions. + + If unsure, say N +endif + config RELOCATABLE bool help @@ -1218,6 +1427,114 @@ config RANDOMIZE_MODULE_REGION_FULL a limited range that contains the [_stext, _etext] interval of the core kernel, so branch relocations are always in range. +config ARCH_GET_PREFERRED_SIBLING_CPUMASK + bool "Get preferred sibling cpumask from mpidr" + depends on ARM64 + default n + help + For some architectures, masking the underlying processor topology + differences can make software unable to identify the cpu distance, + which results in performance fluctuations. + + So we provide additional interface for getting preferred sibling's + cpumask supported by platform, this siblings' cpumask indicates those + CPUs which are clustered with relatively short distances, NOTE this + hardly depends on the specific implementation of the specific platform. + + +menuconfig ASCEND_FEATURES + bool "Support Ascend Features" + depends on ARM64 + help + The Ascend chip use the Hisilicon DaVinci architecture, and mainly + focus on AI and machine leanring area, contains many external features. + + Enable this config to enable selective list of these features. + + If unsure, say Y + +if ASCEND_FEATURES + +config ASCEND_DVPP_MMAP + bool "Enable support for the DvPP mmap" + default y + help + The DvPP means Davinci Video Pre-Processor, are mainly consist of VDEC + (Video Decode), VENC(Video Encode), JPEG D/E (Decode/Encode), PNGD + (PNG Decode) and VPC (Video Process) processors. + + The DvPP could only use a limit range of virtual address, just like the + Ascend310/910 could only use the limit range of virtual address (default + 4 GB), so add a new mmap flag which is named MAP_DVPP to allocate the + special memory for DvPP processor, the new flag is only valid for Ascend + platform. + +config ASCEND_OOM + bool "Enable support for disable oom killer" + default y + help + In some cases we hopes that the oom will not kill the process when it occurs, + be able to notify the black box to report the event, and be able to trigger + the panic to locate the problem. + vm.enable_oom_killer: + 0: disable oom killer + 1: enable oom killer (default,compatible with mainline) + 2: disable oom killer and panic on oom + +config ASCEND_IOPF_HIPRI + bool "Enable support for highpri iopf workqueue" + default y + depends on IOMMU_PAGE_FAULT + help + The iopf workqueue which means IO Page Fault workqueue is used for device + page fault. + + This option enable the high priority for iopf workqueue. If enabled, the + CPU which processes IOPF work is the same as that which processes IOPF + event interrupts. + +config ASCEND_CHARGE_MIGRATE_HUGEPAGES + bool "Enable support for migrate hugepages" + depends on HUGETLBFS + default y + help + When reseved hugepages are used up, we attempts to apply for migrate + hugepages. We expect that the migrated hugepages that are applied for + can be charged in memcg to limit the memory usage. + + This option enable the feature to charge migrate hugepages to memory + cgroup. + +config ASCEND_WATCHDOG_SYSFS_CONFIGURE + bool "Configure watchdog timeout and pretimeout via sysfs" + depends on WATCHDOG_SYSFS + help + Add interface for user to configure timeout and pretimeout through + sysfs. Enable this config carefully since a user could change the + timeout value without notifying the userspace process that pings the + watchdog. The kernel thread could be notified so it's ok to make that + change when the watchdog is pinged by kernel thread. + +config ASCEND_SHARE_POOL + bool "Enable support for the Share Pool Memory" + default n + select ARCH_USES_HIGH_VMA_FLAGS + select MM_OWNER + help + This feature allows multiple processes to share virtual memory both + in kernel and user level, which is only enabled for ascend platform. + +config ASCEND_CLEAN_CDM + bool "move the management structure for HBM to DDR" + def_bool n + depends on COHERENT_DEVICE + help + The cdm nodes sometimes are easiler to raise an ECC error and it may + cause the kernel crash if the essential structures went wrong. So move + the management structures for hbm nodes to the ddr nodes of the same + partion to reduce the probability of kernel crashes. +endif + endmenu menu "Boot options" @@ -1283,9 +1600,13 @@ config DMI endmenu config COMPAT + def_bool y + depends on AARCH32_EL0 || ARM64_ILP32 + +config AARCH32_EL0 bool "Kernel support for 32-bit EL0" + def_bool y depends on ARM64_4K_PAGES || EXPERT - select COMPAT_BINFMT_ELF if BINFMT_ELF select HAVE_UID16 select OLD_SIGSUSPEND3 select COMPAT_OLD_SIGACTION @@ -1301,6 +1622,13 @@ config COMPAT If you want to execute 32-bit userspace applications, say Y. +config ARM64_ILP32 + bool "Kernel support for ILP32" + help + This option enables support for AArch64 ILP32 user space. ILP32 + is an ABI where long and pointers are 32bits but it uses the AARCH64 + instruction set. + config SYSVIPC_COMPAT def_bool y depends on COMPAT && SYSVIPC @@ -1320,6 +1648,44 @@ config ARCH_HIBERNATION_HEADER config ARCH_SUSPEND_POSSIBLE def_bool y +config UCE_KERNEL_RECOVERY + bool "uce kernel recovery from special scenario" + def_bool y + depends on ARM64_ERR_RECOV + help + With ARM v8.2 RAS Extension, SEA are usually triggered when memory errors + are consumed. In some cases, if the error address is in a user page there + is a chance to recover. Such as error occurs in COW and pagecache reading + scenario, we can isolate this page and killing process instead of die. + +endmenu + +menu "TLB options" + +config ARM64_TLBI_IPI + bool "IPI based ARM64 TLB invalidation(EXPERIMENTAL)" + depends on ARM64 + default n + help + adds new boot parameter 'disable_tlbflush_is' to disable TLB flush + within the same inner shareable domain for performance tuning. + + When this new parameter is specified, TLB entry is invalidated by + __tlbi(aside1, asid) only on the CPUs specified by mm_cpumask(mm). + + By using TLB.IS, all CPUs within the same inner shareable domain + check if there are TLB entries which have this ASID, this causes + performance noise, especially at large-scale HPC environment, which + has more than thousand nodes with low latency interconnect. + + NOTE(Important) + This feature is used for learning and debugging only. Please don't + enable it on commercial products. + If you know exactly what the impact of the feature is, you can + configure it as you do. + + If unsure, say N. + endmenu menu "CPU Power Management" diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index 393d2b524284e79ab4a344779089432eb4ef31bd..ecc3e6d369142b2d7b601ac18a22976d5f0e48a8 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -139,6 +139,12 @@ config ARCH_MVEBU - Armada 7K SoC Family - Armada 8K SoC Family +config ARCH_PHYTIUM + bool "Phytium SoC Family" + help + This enables support for Phytium ARMv8 SoC family. + select ARM_GIC_PHYTIUM_2500 + config ARCH_QCOM bool "Qualcomm Platforms" select GPIOLIB diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 106039d25e2f7a708b81db27d7c95ae82aaa54dc..e20e8c082448e91e94955bfbd1a5a1756c87bc74 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -18,7 +18,7 @@ ifeq ($(CONFIG_RELOCATABLE), y) # Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour # for relative relocs, since this leads to better Image compression # with the relocation offsets always being zero. -LDFLAGS_vmlinux += -pie -shared -Bsymbolic \ +LDFLAGS_vmlinux += -shared -Bsymbolic -z notext -z norelro \ $(call ld-option, --no-apply-dynamic-relocs) endif @@ -51,6 +51,7 @@ endif KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst) KBUILD_CFLAGS += -fno-asynchronous-unwind-tables +KBUILD_CFLAGS += $(call cc-disable-warning, psabi) KBUILD_AFLAGS += $(lseinstr) $(brokengasinst) KBUILD_CFLAGS += $(call cc-option,-mabi=lp64) @@ -147,6 +148,7 @@ archclean: $(Q)$(MAKE) $(clean)=$(boot) $(Q)$(MAKE) $(clean)=$(boot)/dts +ifeq ($(KBUILD_EXTMOD),) # We need to generate vdso-offsets.h before compiling certain files in kernel/. # In order to do that, we should use the archprepare target, but we can't since # asm-offsets.h is included in some files used to generate vdso-offsets.h, and @@ -156,6 +158,10 @@ archclean: prepare: vdso_prepare vdso_prepare: prepare0 $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso include/generated/vdso-offsets.h +ifeq ($(CONFIG_ARM64_ILP32), y) + $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso-ilp32 include/generated/vdso-ilp32-offsets.h +endif +endif define archhelp echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)' diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts index 98dbff19f5cccd6db3711cbe611f2fe3f0f8128f..5caba225b4f78760b4d8d316ae706daab0fa44e5 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts @@ -125,9 +125,9 @@ ®_dcdc1 { regulator-always-on; - regulator-min-microvolt = <3000000>; - regulator-max-microvolt = <3000000>; - regulator-name = "vcc-3v"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-name = "vcc-3v3"; }; ®_dcdc2 { diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts index 3f531393eaee9a8cbae54ad5e1f5fd0e1b3aafbb..b3f186434f363f36834e0d3bccd1d671d2ba0fe5 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts @@ -142,10 +142,14 @@ /* DCDC3 is polyphased with DCDC2 */ +/* + * The board uses DDR3L DRAM chips. 1.36V is the closest to the nominal + * 1.35V that the PMIC can drive. + */ ®_dcdc5 { regulator-always-on; - regulator-min-microvolt = <1500000>; - regulator-max-microvolt = <1500000>; + regulator-min-microvolt = <1360000>; + regulator-max-microvolt = <1360000>; regulator-name = "vcc-ddr3"; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts index 1221764f5719cfe19a97b63c440ff8465e2b5dec..667016815cf32081d44efb139f5f1c8e94bbefd6 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts @@ -67,7 +67,9 @@ pinctrl-names = "default"; pinctrl-0 = <&mmc0_pins>; vmmc-supply = <®_dcdc1>; - cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; + cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */ + disable-wp; + bus-width = <4>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts index 24f1aac366d64355f5b6b37bb8e263bcce7f2e2d..d5b6e8159a335a0fde372e68f84e2101fc448560 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts @@ -63,3 +63,12 @@ reg = <1>; }; }; + +®_dc1sw { + /* + * Ethernet PHY needs 30ms to properly power up and some more + * to initialize. 100ms should be plenty of time to finish + * whole process. + */ + regulator-enable-ramp-delay = <100000>; +}; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts index c21f2331add60255d0cb97b49319a3e30c22d354..285cb7143b96c9311606663885b722a523d70af5 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts @@ -113,6 +113,12 @@ }; ®_dc1sw { + /* + * Ethernet PHY needs 30ms to properly power up and some more + * to initialize. 100ms should be plenty of time to finish + * whole process. + */ + regulator-enable-ramp-delay = <100000>; regulator-name = "vcc-phy"; }; diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi index d033da401c268a79290708569a3ba92566657edd..9a1ea8a464057a970c4eefbca99999e283043143 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi @@ -137,6 +137,10 @@ reset-names = "stmmaceth", "stmmaceth-ocp"; clocks = <&clkmgr STRATIX10_EMAC0_CLK>; clock-names = "stmmaceth"; + tx-fifo-depth = <16384>; + rx-fifo-depth = <16384>; + snps,multicast-filter-bins = <256>; + altr,sysmgr-syscon = <&sysmgr 0x44 0>; status = "disabled"; }; @@ -150,6 +154,10 @@ reset-names = "stmmaceth", "stmmaceth-ocp"; clocks = <&clkmgr STRATIX10_EMAC1_CLK>; clock-names = "stmmaceth"; + tx-fifo-depth = <16384>; + rx-fifo-depth = <16384>; + snps,multicast-filter-bins = <256>; + altr,sysmgr-syscon = <&sysmgr 0x48 0>; status = "disabled"; }; @@ -163,6 +171,10 @@ reset-names = "stmmaceth", "stmmaceth-ocp"; clocks = <&clkmgr STRATIX10_EMAC2_CLK>; clock-names = "stmmaceth"; + tx-fifo-depth = <16384>; + rx-fifo-depth = <16384>; + snps,multicast-filter-bins = <256>; + altr,sysmgr-syscon = <&sysmgr 0x4c 0>; status = "disabled"; }; @@ -335,7 +347,7 @@ sysmgr: sysmgr@ffd12000 { compatible = "altr,sys-mgr", "syscon"; - reg = <0xffd12000 0x1000>; + reg = <0xffd12000 0x228>; }; /* Local timer */ diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts index 6edc4fa9fd42683e12f1de2eb4ac3d9732255e44..faa017d4cd56b3960b33b0469590e9876c71c613 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts @@ -76,7 +76,7 @@ phy-mode = "rgmii"; phy-handle = <&phy0>; - max-frame-size = <3800>; + max-frame-size = <9000>; mdio0 { #address-cells = <1>; @@ -124,6 +124,8 @@ &i2c1 { status = "okay"; clock-frequency = <100000>; + i2c-sda-falling-time-ns = <890>; /* hcnt */ + i2c-sdl-falling-time-ns = <890>; /* lcnt */ adc@14 { compatible = "lltc,ltc2497"; diff --git a/arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi b/arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi index 125f4deb52fe9e0c08f866050788a941ca710b11..b664e7af74eb3a99953796816ed2e5525b169832 100644 --- a/arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi +++ b/arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi @@ -107,7 +107,7 @@ clock-names = "uartclk", "apb_pclk"; }; - spi0: ssp@e1020000 { + spi0: spi@e1020000 { status = "disabled"; compatible = "arm,pl022", "arm,primecell"; reg = <0 0xe1020000 0 0x1000>; @@ -117,7 +117,7 @@ clock-names = "apb_pclk"; }; - spi1: ssp@e1030000 { + spi1: spi@e1030000 { status = "disabled"; compatible = "arm,pl022", "arm,primecell"; reg = <0 0xe1030000 0 0x1000>; diff --git a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi index c518130e5ce730e8267456cee4a6062d4527b772..3c34f14fa508602a7f3678d328e3ecdb7d47847c 100644 --- a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi @@ -458,7 +458,7 @@ }; ethmac: ethernet@ff3f0000 { - compatible = "amlogic,meson-gxbb-dwmac", "snps,dwmac"; + compatible = "amlogic,meson-axg-dwmac", "snps,dwmac"; reg = <0x0 0xff3f0000 0x0 0x10000 0x0 0xff634540 0x0 0x8>; interrupts = ; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts index cbe99bd4e06d2c43934a23848bbe788ee69e0390..8cd50b75171de6f2d64f9732d36e4b280510b48f 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts @@ -191,7 +191,7 @@ pinctrl-names = "default"; }; -&pinctrl_aobus { +&gpio_ao { gpio-line-names = "UART TX", "UART RX", "Power Control", "Power Key In", "VCCK En", "CON1 Header Pin31", "I2S Header Pin6", "IR In", "I2S Header Pin7", @@ -201,7 +201,7 @@ ""; }; -&pinctrl_periphs { +&gpio { gpio-line-names = /* Bank GPIOZ */ "Eth MDIO", "Eth MDC", "Eth RGMII RX Clk", "Eth RX DV", "Eth RX D0", "Eth RX D1", "Eth RX D2", diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts index 54954b314a452b7929aa0ce30a031927c2832b92..09a27e066d8ec2113ac3a2f9fcbb97f2067fd544 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts @@ -187,7 +187,7 @@ pinctrl-names = "default"; }; -&pinctrl_aobus { +&gpio_ao { gpio-line-names = "UART TX", "UART RX", "VCCK En", "TF 3V3/1V8 En", "USB HUB nRESET", "USB OTG Power En", "J7 Header Pin2", "IR In", "J7 Header Pin4", @@ -197,7 +197,7 @@ ""; }; -&pinctrl_periphs { +&gpio { gpio-line-names = /* Bank GPIOZ */ "Eth MDIO", "Eth MDC", "Eth RGMII RX Clk", "Eth RX DV", "Eth RX D0", "Eth RX D1", "Eth RX D2", @@ -293,7 +293,7 @@ }; &usb0_phy { - status = "okay"; + status = "disabled"; phy-supply = <&usb_otg_pwr>; }; @@ -303,7 +303,7 @@ }; &usb0 { - status = "okay"; + status = "disabled"; }; &usb1 { diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi index 98cbba6809caa17e2fa4f6b630bf8b02e26f32ab..1ade7e486828c2db082a121e856456e5562d3445 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi @@ -390,7 +390,7 @@ }; }; - spi_pins: spi { + spi_pins: spi-pins { mux { groups = "spi_miso", "spi_mosi", diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts index d32cf384637022b8ee36287283be31c1091273ee..864ef0111b01ac93025edacdf0ffd048e73bcb07 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts @@ -112,7 +112,7 @@ linux,rc-map-name = "rc-geekbox"; }; -&pinctrl_aobus { +&gpio_ao { gpio-line-names = "UART TX", "UART RX", "Power Key In", @@ -127,7 +127,7 @@ ""; }; -&pinctrl_periphs { +&gpio { gpio-line-names = /* Bank GPIOZ */ "", "", "", "", "", "", "", "", "", "", "", "", "", "", diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts index f63bceb88caafa249d84de963c3daa034fb842b7..b4dfb9afdef86926d8fec69bf034cdb630ccde8a 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts @@ -13,7 +13,7 @@ / { compatible = "libretech,cc", "amlogic,s905x", "amlogic,meson-gxl"; - model = "Libre Technology CC"; + model = "Libre Computer Board AML-S905X-CC"; aliases { serial0 = &uart_AO; @@ -163,7 +163,7 @@ }; }; -&pinctrl_aobus { +&gpio_ao { gpio-line-names = "UART TX", "UART RX", "Blue LED", @@ -178,7 +178,7 @@ "7J1 Header Pin15"; }; -&pinctrl_periphs { +&gpio { gpio-line-names = /* Bank GPIOZ */ "", "", "", "", "", "", "", "", "", "", "", "", "", "", diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi index c87a80e9bcc6a80bc0f8a59c43a32d6485facafe..8f0bb3c44bd6d05a11e6dea2ed390f0c88bdc9cc 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi @@ -337,7 +337,7 @@ }; }; - spi_pins: spi { + spi_pins: spi-pins { mux { groups = "spi_miso", "spi_mosi", diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi index 1a406a76c86a2ae7ae465c192b08739009e9d891..ea854f689fda89fe8c19526ce99c7724e41c0bd5 100644 --- a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi +++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi @@ -639,7 +639,7 @@ status = "disabled"; }; - ssp0: ssp@66180000 { + ssp0: spi@66180000 { compatible = "arm,pl022", "arm,primecell"; reg = <0x66180000 0x1000>; interrupts = ; @@ -650,7 +650,7 @@ status = "disabled"; }; - ssp1: ssp@66190000 { + ssp1: spi@66190000 { compatible = "arm,pl022", "arm,primecell"; reg = <0x66190000 0x1000>; interrupts = ; diff --git a/arch/arm64/boot/dts/broadcom/stingray/bcm958742-base.dtsi b/arch/arm64/boot/dts/broadcom/stingray/bcm958742-base.dtsi index bc299c3d90683b02e168b9c06cbfd0b26c66ee2f..a9b92e52d50e8a1d4175ece1de5dc2050c34eea6 100644 --- a/arch/arm64/boot/dts/broadcom/stingray/bcm958742-base.dtsi +++ b/arch/arm64/boot/dts/broadcom/stingray/bcm958742-base.dtsi @@ -138,7 +138,7 @@ &i2c1 { status = "okay"; - pcf8574: pcf8574@20 { + pcf8574: pcf8574@27 { compatible = "nxp,pcf8574a"; gpio-controller; #gpio-cells = <2>; diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray-pinctrl.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray-pinctrl.dtsi index 8a3a770e8f2ce62bb99fc9fd74461073cfa7780a..56789ccf94545f39cde28c34f9dba8af495322a7 100644 --- a/arch/arm64/boot/dts/broadcom/stingray/stingray-pinctrl.dtsi +++ b/arch/arm64/boot/dts/broadcom/stingray/stingray-pinctrl.dtsi @@ -42,13 +42,14 @@ pinmux: pinmux@14029c { compatible = "pinctrl-single"; - reg = <0x0014029c 0x250>; + reg = <0x0014029c 0x26c>; #address-cells = <1>; #size-cells = <1>; pinctrl-single,register-width = <32>; pinctrl-single,function-mask = <0xf>; pinctrl-single,gpio-range = < - &range 0 154 MODE_GPIO + &range 0 91 MODE_GPIO + &range 95 60 MODE_GPIO >; range: gpio-range { #pinctrl-single,gpio-range-cells = <3>; diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi index e283480bfc7e5d50701b7d62bde1b9b3e53ee5a5..ff714fcbac68d816287d85cf7d5535042222c144 100644 --- a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi +++ b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi @@ -463,8 +463,7 @@ <&pinmux 108 16 27>, <&pinmux 135 77 6>, <&pinmux 141 67 4>, - <&pinmux 145 149 6>, - <&pinmux 151 91 4>; + <&pinmux 145 149 6>; }; i2c1: i2c@e0000 { @@ -521,7 +520,7 @@ status = "disabled"; }; - ssp0: ssp@180000 { + ssp0: spi@180000 { compatible = "arm,pl022", "arm,primecell"; reg = <0x00180000 0x1000>; interrupts = ; @@ -533,7 +532,7 @@ status = "disabled"; }; - ssp1: ssp@190000 { + ssp1: spi@190000 { compatible = "arm,pl022", "arm,primecell"; reg = <0x00190000 0x1000>; interrupts = ; diff --git a/arch/arm64/boot/dts/exynos/exynos5433.dtsi b/arch/arm64/boot/dts/exynos/exynos5433.dtsi index 2131f12364cb23906047495bbfae29e5084c3420..6e20415b061e419e95baeebf4b98b04581dcb4e6 100644 --- a/arch/arm64/boot/dts/exynos/exynos5433.dtsi +++ b/arch/arm64/boot/dts/exynos/exynos5433.dtsi @@ -18,8 +18,8 @@ / { compatible = "samsung,exynos5433"; - #address-cells = <1>; - #size-cells = <1>; + #address-cells = <2>; + #size-cells = <2>; interrupt-parent = <&gic>; @@ -235,7 +235,7 @@ compatible = "simple-bus"; #address-cells = <1>; #size-cells = <1>; - ranges; + ranges = <0x0 0x0 0x0 0x18000000>; arm_a53_pmu { compatible = "arm,cortex-a53-pmu", "arm,armv8-pmuv3"; diff --git a/arch/arm64/boot/dts/exynos/exynos7.dtsi b/arch/arm64/boot/dts/exynos/exynos7.dtsi index 75ad724c487ec863c3e409a09cb6cb70fddb5f66..31b1a606cb664435dff6fda2fe3979ebe0410e4b 100644 --- a/arch/arm64/boot/dts/exynos/exynos7.dtsi +++ b/arch/arm64/boot/dts/exynos/exynos7.dtsi @@ -12,8 +12,8 @@ / { compatible = "samsung,exynos7"; interrupt-parent = <&gic>; - #address-cells = <1>; - #size-cells = <1>; + #address-cells = <2>; + #size-cells = <2>; aliases { pinctrl0 = &pinctrl_alive; @@ -70,7 +70,7 @@ compatible = "simple-bus"; #address-cells = <1>; #size-cells = <1>; - ranges; + ranges = <0 0 0 0x18000000>; chipid@10000000 { compatible = "samsung,exynos4210-chipid"; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi index 68ac78c4564dc74cdfd0f40697c1b599bee72add..5da732f82fa0cb0fc844b73bfdd20d347a786628 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi @@ -337,7 +337,7 @@ status = "disabled"; }; - dspi: dspi@2100000 { + dspi: spi@2100000 { compatible = "fsl,ls1012a-dspi", "fsl,ls1021a-v1.0-dspi"; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi index 7881e3d81a9aba6134fc66a7d35e6d02daa4009a..b9c0f2de8f12c4ed9be5f25d56a62634c216a221 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi @@ -284,7 +284,7 @@ interrupts = <0 43 0x4>; }; - qspi: quadspi@1550000 { + qspi: spi@1550000 { compatible = "fsl,ls1043a-qspi", "fsl,ls1021a-qspi"; #address-cells = <1>; #size-cells = <0>; @@ -382,7 +382,7 @@ ranges = <0x0 0x5 0x00000000 0x8000000>; }; - dspi0: dspi@2100000 { + dspi0: spi@2100000 { compatible = "fsl,ls1043a-dspi", "fsl,ls1021a-v1.0-dspi"; #address-cells = <1>; #size-cells = <0>; @@ -395,7 +395,7 @@ status = "disabled"; }; - dspi1: dspi@2110000 { + dspi1: spi@2110000 { compatible = "fsl,ls1043a-dspi", "fsl,ls1021a-v1.0-dspi"; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts index 440e111651d53d9bba85763f0a04cde2796022b3..a59b48203688a47db12732f019255800b9880def 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts @@ -57,12 +57,12 @@ reg = <0x4c>; }; - eeprom@56 { + eeprom@52 { compatible = "atmel,24c512"; reg = <0x52>; }; - eeprom@57 { + eeprom@53 { compatible = "atmel,24c512"; reg = <0x53>; }; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi index ef83786b8b905d57852ad6637bd2d4dfb5683c43..de6af453a6e168520e2b875521114d5b10ea0444 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi @@ -202,7 +202,7 @@ interrupts = ; }; - qspi: quadspi@1550000 { + qspi: spi@1550000 { compatible = "fsl,ls1021a-qspi"; #address-cells = <1>; #size-cells = <0>; @@ -361,7 +361,7 @@ #thermal-sensor-cells = <1>; }; - dspi: dspi@2100000 { + dspi: spi@2100000 { compatible = "fsl,ls1021a-v1.0-dspi"; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi index 8cb78dd9967281d06c1cd10c4a55b0bebe337e6e..ebe0cd4bf2b7e2f8539741d61ec14def4565562e 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi @@ -469,7 +469,7 @@ mmu-masters = <&fsl_mc 0x300 0>; }; - dspi: dspi@2100000 { + dspi: spi@2100000 { status = "disabled"; compatible = "fsl,ls2080a-dspi", "fsl,ls2085a-dspi"; #address-cells = <1>; @@ -595,7 +595,7 @@ 3 0 0x5 0x20000000 0x00010000>; }; - qspi: quadspi@20c0000 { + qspi: spi@20c0000 { status = "disabled"; compatible = "fsl,ls2080a-qspi", "fsl,ls1021a-qspi"; #address-cells = <1>; diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts index f4964bee6a1a9b7b7e0ab2063f3c2987253c9096..e80a792827edbfb6f4681f6726489d7eebcb90be 100644 --- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts +++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts @@ -118,6 +118,7 @@ reset-gpios = <&gpio0 5 GPIO_ACTIVE_LOW>; clocks = <&pmic>; clock-names = "ext_clock"; + post-power-on-delay-ms = <10>; power-off-delay-us = <10>; }; @@ -300,7 +301,6 @@ dwmmc_0: dwmmc0@f723d000 { cap-mmc-highspeed; - mmc-hs200-1_8v; non-removable; bus-width = <0x8>; vmmc-supply = <&ldo19>; diff --git a/arch/arm64/boot/dts/hisilicon/hip06.dtsi b/arch/arm64/boot/dts/hisilicon/hip06.dtsi index d78a6a755d03dfb1c2ac6ffacd1a990fec9fa7b8..29b5739955a3b4c8328fcac9c9b072afcb9fc997 100644 --- a/arch/arm64/boot/dts/hisilicon/hip06.dtsi +++ b/arch/arm64/boot/dts/hisilicon/hip06.dtsi @@ -298,6 +298,14 @@ #interrupt-cells = <2>; num-pins = <10>; }; + + mbigen_smmu_pcie_intc: intc_smmu_pcie { + msi-parent = <&its_dsa 0x40b0c>; + interrupt-controller; + #interrupt-cells = <2>; + num-pins = <3>; + }; + }; mbigen_dsa@c0080000 { @@ -317,6 +325,13 @@ #interrupt-cells = <2>; num-pins = <128>; }; + + mbigen_smmu_dsa_intc: intc_smmu_dsa { + msi-parent = <&its_dsa 0x40b20>; + interrupt-controller; + #interrupt-cells = <2>; + num-pins = <3>; + }; }; /** @@ -344,6 +359,20 @@ status = "disabled"; }; + smmu1: smmu_dsa { + compatible = "arm,smmu-v3"; + reg = <0x0 0xc0040000 0x0 0x20000>; + interrupt-parent = <&mbigen_smmu_dsa_intc>; + interrupts = <733 1>, + <734 1>, + <735 1>; + interrupt-names = "eventq", "gerror", "priq"; + #iommu-cells = <1>; + dma-coherent; + smmu-cb-memtype = <0x0 0x1>; + hisilicon,broken-prefetch-cmd; + }; + soc { compatible = "simple-bus"; #address-cells = <2>; @@ -377,6 +406,11 @@ #clock-cells = <0>; }; + lpc@a01b0000 { + compatible = "hisilicon,low-pin-count"; + reg = <0x0 0xa01b0000 0x0 0x1000>; + }; + usb_ohci: ohci@a7030000 { compatible = "generic-ohci"; reg = <0x0 0xa7030000 0x0 0x10000>; @@ -443,6 +477,7 @@ reg-names = "ppe-base", "dsaf-base"; interrupt-parent = <&mbigen_dsaf0>; subctrl-syscon = <&dsa_subctrl>; + iommus = <&smmu1 0x0>; reset-field-offset = <0>; interrupts = <576 1>, <577 1>, <578 1>, <579 1>, <580 1>, diff --git a/arch/arm64/boot/dts/hisilicon/hip07-d05.dts b/arch/arm64/boot/dts/hisilicon/hip07-d05.dts index 21147e8e3f94410f3f5e1634302cd7b296f8ff3c..e8ab4027b4356fc3c7287746f5667a7e6e37d0f7 100644 --- a/arch/arm64/boot/dts/hisilicon/hip07-d05.dts +++ b/arch/arm64/boot/dts/hisilicon/hip07-d05.dts @@ -89,6 +89,10 @@ status = "ok"; }; +&ipmi0 { + status = "ok"; +}; + &p0_pcie2_a { status = "ok"; }; diff --git a/arch/arm64/boot/dts/lg/lg1312.dtsi b/arch/arm64/boot/dts/lg/lg1312.dtsi index 860c8fb10795011f6e6f9894f0ad5e8d4e9444d4..4bde7b6f2b113ccd541c68c9e2c0dee6efe5f568 100644 --- a/arch/arm64/boot/dts/lg/lg1312.dtsi +++ b/arch/arm64/boot/dts/lg/lg1312.dtsi @@ -168,14 +168,14 @@ clock-names = "apb_pclk"; status="disabled"; }; - spi0: ssp@fe800000 { + spi0: spi@fe800000 { compatible = "arm,pl022", "arm,primecell"; reg = <0x0 0xfe800000 0x1000>; interrupts = ; clocks = <&clk_bus>; clock-names = "apb_pclk"; }; - spi1: ssp@fe900000 { + spi1: spi@fe900000 { compatible = "arm,pl022", "arm,primecell"; reg = <0x0 0xfe900000 0x1000>; interrupts = ; diff --git a/arch/arm64/boot/dts/lg/lg1313.dtsi b/arch/arm64/boot/dts/lg/lg1313.dtsi index 1887af654a7db96685581b9f6953f8906073da11..16ced1ff1ad36754977dd41c5a1d39c8d8f81866 100644 --- a/arch/arm64/boot/dts/lg/lg1313.dtsi +++ b/arch/arm64/boot/dts/lg/lg1313.dtsi @@ -168,14 +168,14 @@ clock-names = "apb_pclk"; status="disabled"; }; - spi0: ssp@fe800000 { + spi0: spi@fe800000 { compatible = "arm,pl022", "arm,primecell"; reg = <0x0 0xfe800000 0x1000>; interrupts = ; clocks = <&clk_bus>; clock-names = "apb_pclk"; }; - spi1: ssp@fe900000 { + spi1: spi@fe900000 { compatible = "arm,pl022", "arm,primecell"; reg = <0x0 0xfe900000 0x1000>; interrupts = ; diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi index 176e38d548727ddbac73e0c88a6ba01f6ace1e37..ec0da5b3d7fd7e4bd8b359e85837ca1313431870 100644 --- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi @@ -27,6 +27,23 @@ method = "smc"; }; + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + /* + * This area matches the mapping done with a + * mainline U-Boot, and should be updated by the + * bootloader. + */ + + psci-area@4000000 { + reg = <0x0 0x4000000 0x0 0x200000>; + no-map; + }; + }; + ap806 { #address-cells = <2>; #size-cells = <2>; diff --git a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts index a747b7bf132d1dafe8ff4d6b5ebcaaa5b7021f1e..387be39d40cddae3b8d05b32e7c241134ea40a23 100644 --- a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts +++ b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts @@ -17,8 +17,13 @@ model = "MediaTek MT7622 RFB1 board"; compatible = "mediatek,mt7622-rfb1", "mediatek,mt7622"; + aliases { + serial0 = &uart0; + }; + chosen { - bootargs = "earlycon=uart8250,mmio32,0x11002000 console=ttyS0,115200n1 swiotlb=512"; + stdout-path = "serial0:115200n8"; + bootargs = "earlycon=uart8250,mmio32,0x11002000 swiotlb=512"; }; cpus { diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi index a4dfcd19b9e88965187cd24e23116103b0f7e652..9fc14bb9a0affc7dea710afa5bae74b90a264adb 100644 --- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi @@ -118,7 +118,7 @@ }; gen1_i2c: i2c@3160000 { - compatible = "nvidia,tegra194-i2c", "nvidia,tegra114-i2c"; + compatible = "nvidia,tegra194-i2c"; reg = <0x03160000 0x10000>; interrupts = ; #address-cells = <1>; @@ -143,7 +143,7 @@ }; cam_i2c: i2c@3180000 { - compatible = "nvidia,tegra194-i2c", "nvidia,tegra114-i2c"; + compatible = "nvidia,tegra194-i2c"; reg = <0x03180000 0x10000>; interrupts = ; #address-cells = <1>; @@ -157,7 +157,7 @@ /* shares pads with dpaux1 */ dp_aux_ch1_i2c: i2c@3190000 { - compatible = "nvidia,tegra194-i2c", "nvidia,tegra114-i2c"; + compatible = "nvidia,tegra194-i2c"; reg = <0x03190000 0x10000>; interrupts = ; #address-cells = <1>; @@ -171,7 +171,7 @@ /* shares pads with dpaux0 */ dp_aux_ch0_i2c: i2c@31b0000 { - compatible = "nvidia,tegra194-i2c", "nvidia,tegra114-i2c"; + compatible = "nvidia,tegra194-i2c"; reg = <0x031b0000 0x10000>; interrupts = ; #address-cells = <1>; @@ -184,7 +184,7 @@ }; gen7_i2c: i2c@31c0000 { - compatible = "nvidia,tegra194-i2c", "nvidia,tegra114-i2c"; + compatible = "nvidia,tegra194-i2c"; reg = <0x031c0000 0x10000>; interrupts = ; #address-cells = <1>; @@ -197,7 +197,7 @@ }; gen9_i2c: i2c@31e0000 { - compatible = "nvidia,tegra194-i2c", "nvidia,tegra114-i2c"; + compatible = "nvidia,tegra194-i2c"; reg = <0x031e0000 0x10000>; interrupts = ; #address-cells = <1>; @@ -264,7 +264,7 @@ }; gen2_i2c: i2c@c240000 { - compatible = "nvidia,tegra194-i2c", "nvidia,tegra114-i2c"; + compatible = "nvidia,tegra194-i2c"; reg = <0x0c240000 0x10000>; interrupts = ; #address-cells = <1>; @@ -277,7 +277,7 @@ }; gen8_i2c: i2c@c250000 { - compatible = "nvidia,tegra194-i2c", "nvidia,tegra114-i2c"; + compatible = "nvidia,tegra194-i2c"; reg = <0x0c250000 0x10000>; interrupts = ; #address-cells = <1>; diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi index 212e6634c9baa5173efd128eb9e37c28a6714468..ccaa555180dc0f03fe3b73d318d00902bc8ee599 100644 --- a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi @@ -282,6 +282,7 @@ status = "okay"; bus-width = <8>; non-removable; + vqmmc-supply = <&vdd_1v8>; }; clocks { @@ -330,7 +331,8 @@ regulator-max-microvolt = <1320000>; enable-gpios = <&pmic 6 GPIO_ACTIVE_HIGH>; regulator-ramp-delay = <80>; - regulator-enable-ramp-delay = <1000>; + regulator-enable-ramp-delay = <2000>; + regulator-settling-time-us = <160>; }; }; }; diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi index 9d5a0e6b2ca4f9b69413e84f8513a7c2f31a5d8f..68af663757d0b3870f53906d7fa9711d73bcb2b9 100644 --- a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi @@ -1589,7 +1589,7 @@ regulator-name = "VDD_HDMI_5V0"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; - gpio = <&exp1 12 GPIO_ACTIVE_LOW>; + gpio = <&exp1 12 GPIO_ACTIVE_HIGH>; enable-active-high; vin-supply = <&vdd_5v0_sys>; }; diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi index 3be920efee823a2913f7695bbd09a4ca10f03eb8..6597c0894137a471546ab9a6b02d44ec04d32211 100644 --- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi @@ -1119,7 +1119,7 @@ compatible = "nvidia,tegra210-agic"; #interrupt-cells = <3>; interrupt-controller; - reg = <0x702f9000 0x2000>, + reg = <0x702f9000 0x1000>, <0x702fa000 0x2000>; interrupts = ; clocks = <&tegra_car TEGRA210_CLK_APE>; diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi index cd3865e7a270b97e1913c779fa7ad3448d3e5660..8c86c41a0d25fb3728bc10b2caa518dad5ea7354 100644 --- a/arch/arm64/boot/dts/qcom/msm8996.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi @@ -399,7 +399,7 @@ }; intc: interrupt-controller@9bc0000 { - compatible = "arm,gic-v3"; + compatible = "qcom,msm8996-gic-v3", "arm,gic-v3"; #interrupt-cells = <3>; interrupt-controller; #redistributor-regions = <1>; diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts index 6d651f314193724e024d099a2f7a9ae32fc26f5f..6921f8dc5ebbcf057e46044ac03a2715b48cce36 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts +++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts @@ -31,6 +31,10 @@ status = "okay"; }; +&tlmm { + gpio-reserved-ranges = <0 4>, <81 4>; +}; + &uart9 { status = "okay"; }; diff --git a/arch/arm64/boot/dts/renesas/r8a7795.dtsi b/arch/arm64/boot/dts/renesas/r8a7795.dtsi index fb9d08ad7659da7938b8cffecba6b41ada7b07dc..c87eed77de2c110189e0789b488aa63ec3dad589 100644 --- a/arch/arm64/boot/dts/renesas/r8a7795.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a7795.dtsi @@ -662,7 +662,7 @@ clock-names = "fck", "brg_int", "scif_clk"; dmas = <&dmac1 0x35>, <&dmac1 0x34>, <&dmac2 0x35>, <&dmac2 0x34>; - dma-names = "tx", "rx"; + dma-names = "tx", "rx", "tx", "rx"; power-domains = <&sysc R8A7795_PD_ALWAYS_ON>; resets = <&cpg 518>; status = "disabled"; diff --git a/arch/arm64/boot/dts/renesas/r8a7796.dtsi b/arch/arm64/boot/dts/renesas/r8a7796.dtsi index cbd35c00b4af6e2bde98edc29b67a2fedb4a323d..33cb0281c39c8fc166ee16d52653befeb33e5ff5 100644 --- a/arch/arm64/boot/dts/renesas/r8a7796.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a7796.dtsi @@ -1161,6 +1161,9 @@ <&cpg CPG_CORE R8A7796_CLK_S3D1>, <&scif_clk>; clock-names = "fck", "brg_int", "scif_clk"; + dmas = <&dmac1 0x13>, <&dmac1 0x12>, + <&dmac2 0x13>, <&dmac2 0x12>; + dma-names = "tx", "rx", "tx", "rx"; power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; resets = <&cpg 310>; status = "disabled"; diff --git a/arch/arm64/boot/dts/renesas/r8a77965.dtsi b/arch/arm64/boot/dts/renesas/r8a77965.dtsi index 0cd44461a0bd218b830309f1e9b9f61509084492..f1dfd17413b9efb6aa19b78a46dbc75f0b86ba04 100644 --- a/arch/arm64/boot/dts/renesas/r8a77965.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a77965.dtsi @@ -545,7 +545,7 @@ }; hsusb: usb@e6590000 { - compatible = "renesas,usbhs-r8a7796", + compatible = "renesas,usbhs-r8a77965", "renesas,rcar-gen3-usbhs"; reg = <0 0xe6590000 0 0x100>; interrupts = ; @@ -634,6 +634,14 @@ resets = <&cpg 219>; #dma-cells = <1>; dma-channels = <16>; + iommus = <&ipmmu_ds0 0>, <&ipmmu_ds0 1>, + <&ipmmu_ds0 2>, <&ipmmu_ds0 3>, + <&ipmmu_ds0 4>, <&ipmmu_ds0 5>, + <&ipmmu_ds0 6>, <&ipmmu_ds0 7>, + <&ipmmu_ds0 8>, <&ipmmu_ds0 9>, + <&ipmmu_ds0 10>, <&ipmmu_ds0 11>, + <&ipmmu_ds0 12>, <&ipmmu_ds0 13>, + <&ipmmu_ds0 14>, <&ipmmu_ds0 15>; }; dmac1: dma-controller@e7300000 { @@ -668,6 +676,14 @@ resets = <&cpg 218>; #dma-cells = <1>; dma-channels = <16>; + iommus = <&ipmmu_ds1 0>, <&ipmmu_ds1 1>, + <&ipmmu_ds1 2>, <&ipmmu_ds1 3>, + <&ipmmu_ds1 4>, <&ipmmu_ds1 5>, + <&ipmmu_ds1 6>, <&ipmmu_ds1 7>, + <&ipmmu_ds1 8>, <&ipmmu_ds1 9>, + <&ipmmu_ds1 10>, <&ipmmu_ds1 11>, + <&ipmmu_ds1 12>, <&ipmmu_ds1 13>, + <&ipmmu_ds1 14>, <&ipmmu_ds1 15>; }; dmac2: dma-controller@e7310000 { @@ -702,6 +718,14 @@ resets = <&cpg 217>; #dma-cells = <1>; dma-channels = <16>; + iommus = <&ipmmu_ds1 16>, <&ipmmu_ds1 17>, + <&ipmmu_ds1 18>, <&ipmmu_ds1 19>, + <&ipmmu_ds1 20>, <&ipmmu_ds1 21>, + <&ipmmu_ds1 22>, <&ipmmu_ds1 23>, + <&ipmmu_ds1 24>, <&ipmmu_ds1 25>, + <&ipmmu_ds1 26>, <&ipmmu_ds1 27>, + <&ipmmu_ds1 28>, <&ipmmu_ds1 29>, + <&ipmmu_ds1 30>, <&ipmmu_ds1 31>; }; ipmmu_ds0: mmu@e6740000 { @@ -951,6 +975,9 @@ <&cpg CPG_CORE R8A77965_CLK_S3D1>, <&scif_clk>; clock-names = "fck", "brg_int", "scif_clk"; + dmas = <&dmac1 0x13>, <&dmac1 0x12>, + <&dmac2 0x13>, <&dmac2 0x12>; + dma-names = "tx", "rx", "tx", "rx"; power-domains = <&sysc R8A77965_PD_ALWAYS_ON>; resets = <&cpg 310>; status = "disabled"; @@ -1452,9 +1479,9 @@ compatible = "renesas,usb2-phy-r8a77965", "renesas,rcar-gen3-usb2-phy"; reg = <0 0xee0a0200 0 0x700>; - clocks = <&cpg CPG_MOD 703>; + clocks = <&cpg CPG_MOD 702>; power-domains = <&sysc R8A77965_PD_ALWAYS_ON>; - resets = <&cpg 703>; + resets = <&cpg 702>; #phy-cells = <0>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/renesas/r8a77980-condor.dts b/arch/arm64/boot/dts/renesas/r8a77980-condor.dts index 9f25c407dfd711741d14e2b19521c60097ad5006..e830b6162375dff225b697c16cb864fdd2ab4772 100644 --- a/arch/arm64/boot/dts/renesas/r8a77980-condor.dts +++ b/arch/arm64/boot/dts/renesas/r8a77980-condor.dts @@ -15,7 +15,7 @@ aliases { serial0 = &scif0; - ethernet0 = &avb; + ethernet0 = &gether; }; chosen { @@ -47,23 +47,6 @@ }; }; -&avb { - pinctrl-0 = <&avb_pins>; - pinctrl-names = "default"; - - phy-mode = "rgmii-id"; - phy-handle = <&phy0>; - renesas,no-ether-link; - status = "okay"; - - phy0: ethernet-phy@0 { - rxc-skew-ps = <1500>; - reg = <0>; - interrupt-parent = <&gpio1>; - interrupts = <17 IRQ_TYPE_LEVEL_LOW>; - }; -}; - &canfd { pinctrl-0 = <&canfd0_pins>; pinctrl-names = "default"; @@ -82,6 +65,23 @@ clock-frequency = <32768>; }; +&gether { + pinctrl-0 = <&gether_pins>; + pinctrl-names = "default"; + + phy-mode = "rgmii-id"; + phy-handle = <&phy0>; + renesas,no-ether-link; + status = "okay"; + + phy0: ethernet-phy@0 { + rxc-skew-ps = <1500>; + reg = <0>; + interrupt-parent = <&gpio4>; + interrupts = <23 IRQ_TYPE_LEVEL_LOW>; + }; +}; + &i2c0 { pinctrl-0 = <&i2c0_pins>; pinctrl-names = "default"; @@ -118,16 +118,17 @@ }; &pfc { - avb_pins: avb { - groups = "avb_mdio", "avb_rgmii"; - function = "avb"; - }; - canfd0_pins: canfd0 { groups = "canfd0_data_a"; function = "canfd0"; }; + gether_pins: gether { + groups = "gether_mdio_a", "gether_rgmii", + "gether_txcrefclk", "gether_txcrefclk_mega"; + function = "gether"; + }; + i2c0_pins: i2c0 { groups = "i2c0"; function = "i2c0"; diff --git a/arch/arm64/boot/dts/renesas/r8a77995-draak.dts b/arch/arm64/boot/dts/renesas/r8a77995-draak.dts index a8e8f2669d4c53ae7492dc489107d3fbac30eebe..1b8f19ee257f0fff48634dbd9585a5dc130c0faf 100644 --- a/arch/arm64/boot/dts/renesas/r8a77995-draak.dts +++ b/arch/arm64/boot/dts/renesas/r8a77995-draak.dts @@ -188,7 +188,7 @@ compatible = "adi,adv7180cp"; reg = <0x20>; - port { + ports { #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi index 7d3d866a006352ac196b52002c9c9ce8899f5208..3b90f816dfefcbd8cbddc513ed5a534504643197 100644 --- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi +++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi @@ -420,7 +420,10 @@ video-receiver@70 { compatible = "adi,adv7482"; - reg = <0x70>; + reg = <0x70 0x71 0x72 0x73 0x74 0x75 + 0x60 0x61 0x62 0x63 0x64 0x65>; + reg-names = "main", "dpll", "cp", "hdmi", "edid", "repeater", + "infoframe", "cbus", "cec", "sdp", "txa", "txb" ; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts index 246c317f6a6822990fc8851b48c2009bc2cc53a9..91061d9cf78bc42b059512f2b9870fb1a12b3f43 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts @@ -94,8 +94,8 @@ snps,reset-gpio = <&gpio1 RK_PC2 GPIO_ACTIVE_LOW>; snps,reset-active-low; snps,reset-delays-us = <0 10000 50000>; - tx_delay = <0x25>; - rx_delay = <0x11>; + tx_delay = <0x24>; + rx_delay = <0x18>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts index 5272e887a434ec46d52afdffaf76164ad182cdaa..e9147e35b7396d1aedaba08280e62ffb58c7e14e 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts @@ -40,17 +40,18 @@ pinctrl-0 = <&usb30_host_drv>; regulator-name = "vcc_host_5v"; regulator-always-on; + regulator-boot-on; vin-supply = <&vcc_sys>; }; vcc_host1_5v: vcc_otg_5v: vcc-host1-5v-regulator { compatible = "regulator-fixed"; - enable-active-high; - gpio = <&gpio0 RK_PD3 GPIO_ACTIVE_HIGH>; + gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_LOW>; pinctrl-names = "default"; pinctrl-0 = <&usb20_host_drv>; regulator-name = "vcc_host1_5v"; regulator-always-on; + regulator-boot-on; vin-supply = <&vcc_sys>; }; @@ -238,7 +239,7 @@ usb2 { usb20_host_drv: usb20-host-drv { - rockchip,pins = <0 RK_PD3 RK_FUNC_GPIO &pcfg_pull_none>; + rockchip,pins = <0 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>; }; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi index 3f5a2944300fe2441f87ec1ac144d771f2cddbfe..92186edefeb96e751ad25df2f2d2bdca5c2f7119 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi @@ -708,6 +708,7 @@ <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>; clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; fifo-depth = <0x100>; + max-frequency = <150000000>; status = "disabled"; }; @@ -719,6 +720,7 @@ <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>; clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; fifo-depth = <0x100>; + max-frequency = <150000000>; status = "disabled"; }; @@ -730,6 +732,7 @@ <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>; clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; fifo-depth = <0x100>; + max-frequency = <150000000>; status = "disabled"; }; @@ -1356,11 +1359,11 @@ sdmmc0 { sdmmc0_clk: sdmmc0-clk { - rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_4ma>; + rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_8ma>; }; sdmmc0_cmd: sdmmc0-cmd { - rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_4ma>; + rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_8ma>; }; sdmmc0_dectn: sdmmc0-dectn { @@ -1372,14 +1375,14 @@ }; sdmmc0_bus1: sdmmc0-bus1 { - rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>; + rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>; }; sdmmc0_bus4: sdmmc0-bus4 { - rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>, - <1 RK_PA1 1 &pcfg_pull_up_4ma>, - <1 RK_PA2 1 &pcfg_pull_up_4ma>, - <1 RK_PA3 1 &pcfg_pull_up_4ma>; + rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>, + <1 RK_PA1 1 &pcfg_pull_up_8ma>, + <1 RK_PA2 1 &pcfg_pull_up_8ma>, + <1 RK_PA3 1 &pcfg_pull_up_8ma>; }; sdmmc0_gpio: sdmmc0-gpio { @@ -1553,50 +1556,50 @@ rgmiim1_pins: rgmiim1-pins { rockchip,pins = /* mac_txclk */ - <1 RK_PB4 2 &pcfg_pull_none_12ma>, + <1 RK_PB4 2 &pcfg_pull_none_8ma>, /* mac_rxclk */ - <1 RK_PB5 2 &pcfg_pull_none_2ma>, + <1 RK_PB5 2 &pcfg_pull_none_4ma>, /* mac_mdio */ - <1 RK_PC3 2 &pcfg_pull_none_2ma>, + <1 RK_PC3 2 &pcfg_pull_none_4ma>, /* mac_txen */ - <1 RK_PD1 2 &pcfg_pull_none_12ma>, + <1 RK_PD1 2 &pcfg_pull_none_8ma>, /* mac_clk */ - <1 RK_PC5 2 &pcfg_pull_none_2ma>, + <1 RK_PC5 2 &pcfg_pull_none_4ma>, /* mac_rxdv */ - <1 RK_PC6 2 &pcfg_pull_none_2ma>, + <1 RK_PC6 2 &pcfg_pull_none_4ma>, /* mac_mdc */ - <1 RK_PC7 2 &pcfg_pull_none_2ma>, + <1 RK_PC7 2 &pcfg_pull_none_4ma>, /* mac_rxd1 */ - <1 RK_PB2 2 &pcfg_pull_none_2ma>, + <1 RK_PB2 2 &pcfg_pull_none_4ma>, /* mac_rxd0 */ - <1 RK_PB3 2 &pcfg_pull_none_2ma>, + <1 RK_PB3 2 &pcfg_pull_none_4ma>, /* mac_txd1 */ - <1 RK_PB0 2 &pcfg_pull_none_12ma>, + <1 RK_PB0 2 &pcfg_pull_none_8ma>, /* mac_txd0 */ - <1 RK_PB1 2 &pcfg_pull_none_12ma>, + <1 RK_PB1 2 &pcfg_pull_none_8ma>, /* mac_rxd3 */ - <1 RK_PB6 2 &pcfg_pull_none_2ma>, + <1 RK_PB6 2 &pcfg_pull_none_4ma>, /* mac_rxd2 */ - <1 RK_PB7 2 &pcfg_pull_none_2ma>, + <1 RK_PB7 2 &pcfg_pull_none_4ma>, /* mac_txd3 */ - <1 RK_PC0 2 &pcfg_pull_none_12ma>, + <1 RK_PC0 2 &pcfg_pull_none_8ma>, /* mac_txd2 */ - <1 RK_PC1 2 &pcfg_pull_none_12ma>, + <1 RK_PC1 2 &pcfg_pull_none_8ma>, /* mac_txclk */ - <0 RK_PB0 1 &pcfg_pull_none>, + <0 RK_PB0 1 &pcfg_pull_none_8ma>, /* mac_txen */ - <0 RK_PB4 1 &pcfg_pull_none>, + <0 RK_PB4 1 &pcfg_pull_none_8ma>, /* mac_clk */ - <0 RK_PD0 1 &pcfg_pull_none>, + <0 RK_PD0 1 &pcfg_pull_none_4ma>, /* mac_txd1 */ - <0 RK_PC0 1 &pcfg_pull_none>, + <0 RK_PC0 1 &pcfg_pull_none_8ma>, /* mac_txd0 */ - <0 RK_PC1 1 &pcfg_pull_none>, + <0 RK_PC1 1 &pcfg_pull_none_8ma>, /* mac_txd3 */ - <0 RK_PC7 1 &pcfg_pull_none>, + <0 RK_PC7 1 &pcfg_pull_none_8ma>, /* mac_txd2 */ - <0 RK_PC6 1 &pcfg_pull_none>; + <0 RK_PC6 1 &pcfg_pull_none_8ma>; }; rmiim1_pins: rmiim1-pins { diff --git a/arch/arm64/boot/dts/rockchip/rk3399-ficus.dts b/arch/arm64/boot/dts/rockchip/rk3399-ficus.dts index 8978d924eb83ec3d4dea337e683b2d570bc00581..85cf0b6bdda9e850c8683bfd57c6a972cb2051c5 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-ficus.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-ficus.dts @@ -75,18 +75,6 @@ regulator-always-on; vin-supply = <&vcc_sys>; }; - - vdd_log: vdd-log { - compatible = "pwm-regulator"; - pwms = <&pwm2 0 25000 0>; - regulator-name = "vdd_log"; - regulator-min-microvolt = <800000>; - regulator-max-microvolt = <1400000>; - regulator-always-on; - regulator-boot-on; - vin-supply = <&vcc_sys>; - }; - }; &cpu_l0 { diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts index 1ee0dc0d9f10ff9641f02bdad4aae75fc225d078..d1cf404b87084a00b18d55b26d50681b68ce48d5 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts @@ -22,7 +22,7 @@ backlight = <&backlight>; power-supply = <&pp3300_disp>; - ports { + port { panel_in_edp: endpoint { remote-endpoint = <&edp_out_panel>; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts index 2cc7c47d6a85d79312a519dd0a0c02964f9223ff..65637a5a4b21be6e1fec5d510f31164f2a1ca07d 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts @@ -43,7 +43,7 @@ backlight = <&backlight>; power-supply = <&pp3300_disp>; - ports { + port { panel_in_edp: endpoint { remote-endpoint = <&edp_out_panel>; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts index e0d64f862322e507798c4fd5d590963164da4c4e..1e6a71066c163fd7bd2493b286e83a0596942930 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts @@ -131,7 +131,7 @@ status = "okay"; clock-frequency = <400000>; - sgtl5000: codec@0a { + sgtl5000: codec@a { compatible = "fsl,sgtl5000"; reg = <0x0a>; clocks = <&sgtl5000_clk>; @@ -153,7 +153,7 @@ }; &pcie0 { - ep-gpios = <&gpio4 RK_PC6 GPIO_ACTIVE_LOW>; + ep-gpios = <&gpio4 RK_PC6 GPIO_ACTIVE_HIGH>; num-lanes = <4>; pinctrl-names = "default"; pinctrl-0 = <&pcie_clkreqn_cpm>; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts index fef2c06089993c38a76096df356fdf1406fa34f1..b14d83919f14ce1b90668fc7467645edfd9b2dec 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts @@ -50,7 +50,7 @@ pinctrl-0 = <&lcd_panel_reset>; power-supply = <&vcc3v3_s0>; - ports { + port { panel_in_edp: endpoint { remote-endpoint = <&edp_out_panel>; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi index 36b60791c156d2ace112850571418d0410f81bd7..6062cc8250b110d988a12b80b27257f24fe125af 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi @@ -93,6 +93,19 @@ vin-supply = <&vcc_1v8>; }; + vcc3v0_sd: vcc3v0-sd { + compatible = "regulator-fixed"; + enable-active-high; + gpio = <&gpio0 RK_PA1 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&sdmmc0_pwr_h>; + regulator-always-on; + regulator-max-microvolt = <3000000>; + regulator-min-microvolt = <3000000>; + regulator-name = "vcc3v0_sd"; + vin-supply = <&vcc3v3_sys>; + }; + vcc3v3_sys: vcc3v3-sys { compatible = "regulator-fixed"; regulator-name = "vcc3v3_sys"; @@ -116,7 +129,7 @@ vcc5v0_host: vcc5v0-host-regulator { compatible = "regulator-fixed"; enable-active-high; - gpio = <&gpio1 RK_PD1 GPIO_ACTIVE_HIGH>; + gpio = <&gpio4 RK_PD1 GPIO_ACTIVE_HIGH>; pinctrl-names = "default"; pinctrl-0 = <&vcc5v0_host_en>; regulator-name = "vcc5v0_host"; @@ -310,7 +323,7 @@ regulator-always-on; regulator-boot-on; regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <3000000>; + regulator-max-microvolt = <3300000>; regulator-state-mem { regulator-on-in-suspend; regulator-suspend-microvolt = <3000000>; @@ -469,6 +482,13 @@ }; }; + sd { + sdmmc0_pwr_h: sdmmc0-pwr-h { + rockchip,pins = + ; + }; + }; + usb2 { vcc5v0_host_en: vcc5v0-host-en { rockchip,pins = @@ -499,6 +519,7 @@ }; &sdmmc { + broken-cd; bus-width = <4>; cap-mmc-highspeed; cap-sd-highspeed; @@ -507,6 +528,7 @@ max-frequency = <150000000>; pinctrl-names = "default"; pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>; + vmmc-supply = <&vcc3v0_sd>; vqmmc-supply = <&vcc_sdio>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi index c88e603396f610615e4070bc8af62dfa9d389b0b..cea44a7c7cf998566f2c9f99d3b400d1f11acc5a 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi @@ -305,6 +305,7 @@ phys = <&emmc_phy>; phy-names = "phy_arasan"; power-domains = <&power RK3399_PD_EMMC>; + disable-cqe-dcmd; status = "disabled"; }; @@ -1642,11 +1643,11 @@ reg = <0x0 0xff914000 0x0 0x100>, <0x0 0xff915000 0x0 0x100>; interrupts = ; interrupt-names = "isp0_mmu"; - clocks = <&cru ACLK_ISP0_NOC>, <&cru HCLK_ISP0_NOC>; + clocks = <&cru ACLK_ISP0_WRAPPER>, <&cru HCLK_ISP0_WRAPPER>; clock-names = "aclk", "iface"; #iommu-cells = <0>; + power-domains = <&power RK3399_PD_ISP0>; rockchip,disable-mmu-reset; - status = "disabled"; }; isp1_mmu: iommu@ff924000 { @@ -1654,11 +1655,11 @@ reg = <0x0 0xff924000 0x0 0x100>, <0x0 0xff925000 0x0 0x100>; interrupts = ; interrupt-names = "isp1_mmu"; - clocks = <&cru ACLK_ISP1_NOC>, <&cru HCLK_ISP1_NOC>; + clocks = <&cru ACLK_ISP1_WRAPPER>, <&cru HCLK_ISP1_WRAPPER>; clock-names = "aclk", "iface"; #iommu-cells = <0>; + power-domains = <&power RK3399_PD_ISP1>; rockchip,disable-mmu-reset; - status = "disabled"; }; hdmi_sound: hdmi-sound { diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi index 2409344df4fa2f50ee9e91feddc483e0a4a10279..2e3917171b17f9a557869454cbb746f6bf8296f9 100644 --- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi @@ -8,22 +8,22 @@ &cbass_main { gic500: interrupt-controller@1800000 { compatible = "arm,gic-v3"; - #address-cells = <1>; - #size-cells = <1>; + #address-cells = <2>; + #size-cells = <2>; ranges; #interrupt-cells = <3>; interrupt-controller; - reg = <0x01800000 0x10000>, /* GICD */ - <0x01880000 0x90000>; /* GICR */ + reg = <0x00 0x01800000 0x00 0x10000>, /* GICD */ + <0x00 0x01880000 0x00 0x90000>; /* GICR */ /* * vcpumntirq: * virtual CPU interface maintenance interrupt */ interrupts = ; - gic_its: gic-its@18200000 { + gic_its: gic-its@1820000 { compatible = "arm,gic-v3-its"; - reg = <0x01820000 0x10000>; + reg = <0x00 0x01820000 0x00 0x10000>; msi-controller; #msi-cells = <1>; }; diff --git a/arch/arm64/boot/dts/ti/k3-am65.dtsi b/arch/arm64/boot/dts/ti/k3-am65.dtsi index cede1fa0983c9321511649251cff09c1cdaa4e63..ded364d208351f959262df4e57339c7189fcfea7 100644 --- a/arch/arm64/boot/dts/ti/k3-am65.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65.dtsi @@ -46,38 +46,38 @@ cbass_main: interconnect@100000 { compatible = "simple-bus"; - #address-cells = <1>; - #size-cells = <1>; - ranges = <0x00100000 0x00 0x00100000 0x00020000>, /* ctrl mmr */ - <0x00600000 0x00 0x00600000 0x00001100>, /* GPIO */ - <0x00900000 0x00 0x00900000 0x00012000>, /* serdes */ - <0x01000000 0x00 0x01000000 0x0af02400>, /* Most peripherals */ - <0x30800000 0x00 0x30800000 0x0bc00000>, /* MAIN NAVSS */ + #address-cells = <2>; + #size-cells = <2>; + ranges = <0x00 0x00100000 0x00 0x00100000 0x00 0x00020000>, /* ctrl mmr */ + <0x00 0x00600000 0x00 0x00600000 0x00 0x00001100>, /* GPIO */ + <0x00 0x00900000 0x00 0x00900000 0x00 0x00012000>, /* serdes */ + <0x00 0x01000000 0x00 0x01000000 0x00 0x0af02400>, /* Most peripherals */ + <0x00 0x30800000 0x00 0x30800000 0x00 0x0bc00000>, /* MAIN NAVSS */ /* MCUSS Range */ - <0x28380000 0x00 0x28380000 0x03880000>, - <0x40200000 0x00 0x40200000 0x00900100>, - <0x42040000 0x00 0x42040000 0x03ac2400>, - <0x45100000 0x00 0x45100000 0x00c24000>, - <0x46000000 0x00 0x46000000 0x00200000>, - <0x47000000 0x00 0x47000000 0x00068400>; + <0x00 0x28380000 0x00 0x28380000 0x00 0x03880000>, + <0x00 0x40200000 0x00 0x40200000 0x00 0x00900100>, + <0x00 0x42040000 0x00 0x42040000 0x00 0x03ac2400>, + <0x00 0x45100000 0x00 0x45100000 0x00 0x00c24000>, + <0x00 0x46000000 0x00 0x46000000 0x00 0x00200000>, + <0x00 0x47000000 0x00 0x47000000 0x00 0x00068400>; cbass_mcu: interconnect@28380000 { compatible = "simple-bus"; - #address-cells = <1>; - #size-cells = <1>; - ranges = <0x28380000 0x28380000 0x03880000>, /* MCU NAVSS*/ - <0x40200000 0x40200000 0x00900100>, /* First peripheral window */ - <0x42040000 0x42040000 0x03ac2400>, /* WKUP */ - <0x45100000 0x45100000 0x00c24000>, /* MMRs, remaining NAVSS */ - <0x46000000 0x46000000 0x00200000>, /* CPSW */ - <0x47000000 0x47000000 0x00068400>; /* OSPI space 1 */ + #address-cells = <2>; + #size-cells = <2>; + ranges = <0x00 0x28380000 0x00 0x28380000 0x00 0x03880000>, /* MCU NAVSS*/ + <0x00 0x40200000 0x00 0x40200000 0x00 0x00900100>, /* First peripheral window */ + <0x00 0x42040000 0x00 0x42040000 0x00 0x03ac2400>, /* WKUP */ + <0x00 0x45100000 0x00 0x45100000 0x00 0x00c24000>, /* MMRs, remaining NAVSS */ + <0x00 0x46000000 0x00 0x46000000 0x00 0x00200000>, /* CPSW */ + <0x00 0x47000000 0x00 0x47000000 0x00 0x00068400>; /* OSPI space 1 */ cbass_wakeup: interconnect@42040000 { compatible = "simple-bus"; #address-cells = <1>; #size-cells = <1>; /* WKUP Basic peripherals */ - ranges = <0x42040000 0x42040000 0x03ac2400>; + ranges = <0x42040000 0x00 0x42040000 0x03ac2400>; }; }; }; diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-clk.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp-clk.dtsi index 9c09baca7dd78ce9283e535d90bcbb12c1c8da29..306ad2157c9882b5ce4a5a9c5bc24e5d25d75c4b 100644 --- a/arch/arm64/boot/dts/xilinx/zynqmp-clk.dtsi +++ b/arch/arm64/boot/dts/xilinx/zynqmp-clk.dtsi @@ -58,13 +58,13 @@ clock-accuracy = <100>; }; - dpdma_clk: dpdma_clk { + dpdma_clk: dpdma-clk { compatible = "fixed-clock"; #clock-cells = <0x0>; clock-frequency = <533000000>; }; - drm_clock: drm_clock { + drm_clock: drm-clock { compatible = "fixed-clock"; #clock-cells = <0x0>; clock-frequency = <262750000>; diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts index eb5e8bddb610adf4d1781d6b4646eef6c1bef657..14062b4535dd71b29d56a0361b556c2c46a2a1a6 100644 --- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts +++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts @@ -82,7 +82,7 @@ linux,default-trigger = "bluetooth-power"; }; - vbus_det { /* U5 USB5744 VBUS detection via MIO25 */ + vbus-det { /* U5 USB5744 VBUS detection via MIO25 */ label = "vbus_det"; gpios = <&gpio 25 GPIO_ACTIVE_HIGH>; default-state = "on"; @@ -98,9 +98,10 @@ regulator-boot-on; }; - sdio_pwrseq: sdio_pwrseq { + sdio_pwrseq: sdio-pwrseq { compatible = "mmc-pwrseq-simple"; reset-gpios = <&gpio 7 GPIO_ACTIVE_LOW>; /* WIFI_EN */ + post-power-on-delay-ms = <10>; }; }; diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts index 25dd57485323507888ebfc66cebdcfc858f5a3a3..d3b8e1a9c07616f76e41976351b28332efc96e0d 100644 --- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts +++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts @@ -53,7 +53,7 @@ leds { compatible = "gpio-leds"; - heartbeat_led { + heartbeat-led { label = "heartbeat"; gpios = <&gpio 23 GPIO_ACTIVE_HIGH>; linux,default-trigger = "heartbeat"; @@ -139,25 +139,25 @@ * 7, 10 - 17 - not connected */ - gtr_sel0 { + gtr-sel0 { gpio-hog; gpios = <0 0>; output-low; /* PCIE = 0, DP = 1 */ line-name = "sel0"; }; - gtr_sel1 { + gtr-sel1 { gpio-hog; gpios = <1 0>; output-high; /* PCIE = 0, DP = 1 */ line-name = "sel1"; }; - gtr_sel2 { + gtr-sel2 { gpio-hog; gpios = <2 0>; output-high; /* PCIE = 0, USB0 = 1 */ line-name = "sel2"; }; - gtr_sel3 { + gtr-sel3 { gpio-hog; gpios = <3 0>; output-high; /* PCIE = 0, SATA = 1 */ diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts index 259f21b0c0014690ded2d21a417d0cffb2d2b271..28dee4dad82c236c6e2b5fd974b71f707a15a0f2 100644 --- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts +++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts @@ -53,7 +53,7 @@ leds { compatible = "gpio-leds"; - heartbeat_led { + heartbeat-led { label = "heartbeat"; gpios = <&gpio 23 GPIO_ACTIVE_HIGH>; linux,default-trigger = "heartbeat"; diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts index a61b3cc6f4c95ecf8396b1ff253fa1f794f10dd0..47b5989035e4e2d3e1069a7776b06bbbe6c88fc4 100644 --- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts +++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts @@ -53,7 +53,7 @@ leds { compatible = "gpio-leds"; - heartbeat_led { + heartbeat-led { label = "heartbeat"; gpios = <&gpio 23 GPIO_ACTIVE_HIGH>; linux,default-trigger = "heartbeat"; diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi index 29ce23422acf221e0725e93949b99c79f975ccf7..a516c0e01429a82c1ddb21c2d214484e4604bf72 100644 --- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi +++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi @@ -71,7 +71,7 @@ }; }; - cpu_opp_table: cpu_opp_table { + cpu_opp_table: cpu-opp-table { compatible = "operating-points-v2"; opp-shared; opp00 { @@ -124,7 +124,7 @@ <1 10 0xf08>; }; - amba_apu: amba_apu@0 { + amba_apu: amba-apu@0 { compatible = "simple-bus"; #address-cells = <2>; #size-cells = <1>; diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index db8d364f84768b669333dbe21334fcbb3d3e81c3..e319985987367f11ddc480d27dd1c2c75d3324fe 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -706,8 +706,23 @@ CONFIG_CRYPTO_SHA3_ARM64=m CONFIG_CRYPTO_SM3_ARM64_CE=m CONFIG_CRYPTO_GHASH_ARM64_CE=y CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m -CONFIG_CRYPTO_CRC32_ARM64_CE=m CONFIG_CRYPTO_AES_ARM64_CE_CCM=y CONFIG_CRYPTO_AES_ARM64_CE_BLK=y CONFIG_CRYPTO_CHACHA20_NEON=m CONFIG_CRYPTO_AES_ARM64_BS=m +CONFIG_SOFTLOCKUP_DETECTOR=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_SCSI_ENCLOSURE=y +CONFIG_ENCLOSURE_SERVICES=y +CONFIG_CHR_DEV_SG=y +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_LINEAR=y +CONFIG_MD_RAID0=y +CONFIG_MD_RAID1=y +CONFIG_MD_RAID456=y +CONFIG_MD_RAID10=y +CONFIG_MD_MULTIPATH=y +CONFIG_MD_FAULTY=y +CONFIG_MD_CLUSTER=y + diff --git a/arch/arm64/configs/hulk_defconfig b/arch/arm64/configs/hulk_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..06dfc89b644fe846ac0387961c5b379ca77bdf35 --- /dev/null +++ b/arch/arm64/configs/hulk_defconfig @@ -0,0 +1,5763 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/arm64 4.19.25 Kernel Configuration +# + +# +# Compiler: gcc (GCC) 6.3.0 +# +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=60300 +CONFIG_CLANG_VERSION=0 +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_EXTABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y +CONFIG_AUDIT_WATCH=y +CONFIG_AUDIT_TREE=y +# CONFIG_KTASK is not set + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_SHOW_LEVEL=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_GENERIC_IRQ_CHIP=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GENERIC_MSI_IRQ_DOMAIN=y +CONFIG_HANDLE_DOMAIN_IRQ=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +CONFIG_GENERIC_IRQ_MULTI_HANDLER=y +CONFIG_ARCH_CLOCKSOURCE_DATA=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_ARCH_HAS_TICK_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +# CONFIG_NO_HZ_IDLE is not set +CONFIG_NO_HZ_FULL=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PREEMPT is not set + +# +# CPU/Task time and stats accounting +# +CONFIG_VIRT_CPU_ACCOUNTING=y +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_SCHED_AVG_IRQ=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_SRCU=y +CONFIG_TREE_SRCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +CONFIG_CONTEXT_TRACKING=y +# CONFIG_CONTEXT_TRACKING_FORCE is not set +CONFIG_RCU_NOCB_CPU=y +CONFIG_BUILD_BIN2C=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=20 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 +CONFIG_GENERIC_SCHED_CLOCK=y +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_SUPPORTS_INT128=y +CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_MEMCG_SWAP_ENABLED=y +CONFIG_MEMCG_KMEM=y +CONFIG_MEMCG_MEMFS_INFO=y +CONFIG_BLK_CGROUP=y +# CONFIG_DEBUG_BLK_CGROUP is not set +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_QOS_SCHED_DYNAMIC_AFFINITY=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +# CONFIG_CGROUP_BPF is not set +# CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_CGROUP_FILES=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_STEAL=y +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_BPF=y +# CONFIG_EXPERT is not set +CONFIG_UID16=y +CONFIG_MULTIUSER=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_PRINTK_NMI=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_BPF_SYSCALL=y +# CONFIG_BPF_JIT_ALWAYS_ON is not set +CONFIG_USERFAULTFD=y +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y +CONFIG_RSEQ=y +# CONFIG_EMBEDDED is not set +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +CONFIG_DEBUG_PERF_USE_VMALLOC=y +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_SLUB_DEBUG=y +# CONFIG_COMPAT_BRK is not set +# CONFIG_SLAB is not set +CONFIG_SLUB=y +CONFIG_SLAB_MERGE_DEFAULT=y +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SLAB_FREELIST_HARDENED is not set +CONFIG_SLUB_CPU_PARTIAL=y +CONFIG_SYSTEM_DATA_VERIFICATION=y +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y +CONFIG_ARM64=y +CONFIG_64BIT=y +CONFIG_MMU=y +CONFIG_ARM64_PAGE_SHIFT=12 +CONFIG_ARM64_CONT_SHIFT=4 +CONFIG_ARCH_MMAP_RND_BITS_MIN=18 +CONFIG_ARCH_MMAP_RND_BITS_MAX=33 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_ZONE_DMA32=y +CONFIG_HAVE_GENERIC_GUP=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_SMP=y +CONFIG_KERNEL_MODE_NEON=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_PGTABLE_LEVELS=4 +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_ARCH_PROC_KCORE_TEXT=y + +# +# Platform selection +# +# CONFIG_ARCH_ACTIONS is not set +# CONFIG_ARCH_SUNXI is not set +# CONFIG_ARCH_ALPINE is not set +# CONFIG_ARCH_BCM2835 is not set +# CONFIG_ARCH_BCM_IPROC is not set +# CONFIG_ARCH_BERLIN is not set +# CONFIG_ARCH_BRCMSTB is not set +# CONFIG_ARCH_EXYNOS is not set +# CONFIG_ARCH_K3 is not set +# CONFIG_ARCH_LAYERSCAPE is not set +# CONFIG_ARCH_LG1K is not set +CONFIG_ARCH_HISI=y +# CONFIG_ARCH_MEDIATEK is not set +# CONFIG_ARCH_MESON is not set +# CONFIG_ARCH_MVEBU is not set +CONFIG_ARCH_PHYTIUM=y +CONFIG_ARCH_QCOM=y +# CONFIG_ARCH_REALTEK is not set +# CONFIG_ARCH_ROCKCHIP is not set +CONFIG_ARCH_SEATTLE=y +# CONFIG_ARCH_SYNQUACER is not set +# CONFIG_ARCH_RENESAS is not set +# CONFIG_ARCH_STRATIX10 is not set +# CONFIG_ARCH_TEGRA is not set +# CONFIG_ARCH_SPRD is not set +CONFIG_ARCH_THUNDER=y +CONFIG_ARCH_THUNDER2=y +# CONFIG_ARCH_UNIPHIER is not set +CONFIG_ARCH_VEXPRESS=y +CONFIG_ARCH_XGENE=y +# CONFIG_ARCH_ZX is not set +# CONFIG_ARCH_ZYNQMP is not set +CONFIG_HAVE_LIVEPATCH_WO_FTRACE=y + +# +# Enable Livepatch +# +CONFIG_LIVEPATCH=y +CONFIG_LIVEPATCH_WO_FTRACE=y +CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY=y +CONFIG_LIVEPATCH_STACK=y +CONFIG_LIVEPATCH_RESTRICT_KPROBE=y + +# +# Bus support +# +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_DOMAINS_GENERIC=y +CONFIG_PCI_SYSCALL=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIEASPM=y +# CONFIG_PCIEASPM_DEBUG is not set +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +CONFIG_PCIE_DPC=y +# CONFIG_PCIE_PTM is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_IRQ_DOMAIN=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +CONFIG_PCI_STUB=y +# CONFIG_PCI_PF_STUB is not set +CONFIG_PCI_ATS=y +CONFIG_PCI_ECAM=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +CONFIG_PCI_LABEL=y +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_ACPI_IBM=m +# CONFIG_HOTPLUG_PCI_CPCI is not set +# CONFIG_HOTPLUG_PCI_SHPC is not set + +# +# PCI controller drivers +# + +# +# Cadence PCIe controllers support +# +# CONFIG_PCIE_CADENCE_HOST is not set +# CONFIG_PCI_FTPCI100 is not set +CONFIG_PCI_HOST_COMMON=y +CONFIG_PCI_HOST_GENERIC=y +# CONFIG_PCIE_XILINX is not set +CONFIG_PCI_XGENE=y +CONFIG_PCI_XGENE_MSI=y +CONFIG_PCI_HOST_THUNDER_PEM=y +CONFIG_PCI_HOST_THUNDER_ECAM=y + +# +# DesignWare PCI Core Support +# +CONFIG_PCIE_DW=y +CONFIG_PCIE_DW_HOST=y +# CONFIG_PCIE_DW_PLAT_HOST is not set +CONFIG_PCI_HISI=y +CONFIG_HISILICON_PCIE_CAE=m +# CONFIG_PCIE_QCOM is not set +# CONFIG_PCIE_KIRIN is not set +# CONFIG_PCIE_HISI_STB is not set + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set + +# +# Kernel Features +# + +# +# ARM errata workarounds via the alternatives framework +# +# CONFIG_ARM64_ERRATUM_826319 is not set +# CONFIG_ARM64_ERRATUM_827319 is not set +# CONFIG_ARM64_ERRATUM_824069 is not set +# CONFIG_ARM64_ERRATUM_819472 is not set +# CONFIG_ARM64_ERRATUM_832075 is not set +# CONFIG_ARM64_ERRATUM_834220 is not set +CONFIG_ARM64_ERRATUM_845719=y +# CONFIG_ARM64_ERRATUM_843419 is not set +# CONFIG_ARM64_ERRATUM_1024718 is not set +# CONFIG_ARM64_ERRATUM_1463225 is not set +# CONFIG_CAVIUM_ERRATUM_22375 is not set +# CONFIG_CAVIUM_ERRATUM_23144 is not set +# CONFIG_CAVIUM_ERRATUM_23154 is not set +# CONFIG_CAVIUM_ERRATUM_27456 is not set +# CONFIG_CAVIUM_ERRATUM_30115 is not set +# CONFIG_QCOM_FALKOR_ERRATUM_1003 is not set +# CONFIG_QCOM_FALKOR_ERRATUM_1009 is not set +# CONFIG_QCOM_QDF2400_ERRATUM_0065 is not set +# CONFIG_SOCIONEXT_SYNQUACER_PREITS is not set +CONFIG_HISILICON_ERRATUM_161600802=y +CONFIG_HISILICON_ERRATUM_1980005=y +# CONFIG_QCOM_FALKOR_ERRATUM_E1041 is not set +CONFIG_HISILICON_ERRATUM_HIP08_RU_PREFETCH=y +# CONFIG_HISILICON_HIP08_RU_PREFETCH_DEFAULT_OFF is not set +CONFIG_ARM64_4K_PAGES=y +# CONFIG_ARM64_16K_PAGES is not set +# CONFIG_ARM64_64K_PAGES is not set +# CONFIG_ARM64_VA_BITS_39 is not set +CONFIG_ARM64_VA_BITS_48=y +CONFIG_ARM64_VA_BITS=48 +CONFIG_ARM64_PA_BITS_48=y +CONFIG_ARM64_PA_BITS=48 +# CONFIG_CPU_BIG_ENDIAN is not set +CONFIG_SCHED_MC=y +# CONFIG_SCHED_SMT is not set +CONFIG_NR_CPUS=1024 +CONFIG_HOTPLUG_CPU=y +# CONFIG_ARM64_BOOTPARAM_HOTPLUG_CPU0 is not set +CONFIG_ARM64_ERR_RECOV=y +CONFIG_MPAM=y +CONFIG_NUMA=y +CONFIG_NODES_SHIFT=4 +CONFIG_NUMA_AWARE_SPINLOCKS=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_COHERENT_DEVICE=y +CONFIG_HOLES_IN_ZONE=y +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +CONFIG_SCHED_HRTICK=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_HAVE_ARCH_PFN_VALID=y +CONFIG_HW_PERF_EVENTS=y +CONFIG_SYS_SUPPORTS_HUGETLBFS=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_SECCOMP=y +CONFIG_PARAVIRT=y +CONFIG_PARAVIRT_TIME_ACCOUNTING=y +CONFIG_KEXEC=y +CONFIG_CRASH_DUMP=y +CONFIG_ARM64_CPU_PARK=y +# CONFIG_XEN is not set +CONFIG_FORCE_MAX_ZONEORDER=11 +CONFIG_UNMAP_KERNEL_AT_EL0=y +CONFIG_HARDEN_BRANCH_PREDICTOR=y +CONFIG_HARDEN_EL2_VECTORS=y +CONFIG_ARM64_SSBD=y +# CONFIG_ARMV8_DEPRECATED is not set +# CONFIG_ARM64_SW_TTBR0_PAN is not set + +# +# ARMv8.1 architectural features +# +CONFIG_ARM64_HW_AFDBM=y +CONFIG_ARM64_PAN=y +CONFIG_ARM64_LSE_ATOMICS=y +CONFIG_ARM64_VHE=y + +# +# ARMv8.2 architectural features +# +CONFIG_ARM64_UAO=y +CONFIG_ARM64_PMEM=y +CONFIG_ARM64_RAS_EXTN=y +CONFIG_ARM64_SVE=y +CONFIG_ARM64_MODULE_PLTS=y +CONFIG_ARM64_PSEUDO_NMI=y +CONFIG_ARM64_DEBUG_PRIORITY_MASKING=y +CONFIG_RELOCATABLE=y +CONFIG_RANDOMIZE_BASE=y +CONFIG_RANDOMIZE_MODULE_REGION_FULL=y +CONFIG_ASCEND_FEATURES=y +CONFIG_ASCEND_DVPP_MMAP=y +CONFIG_ASCEND_OOM=y +CONFIG_ASCEND_IOPF_HIPRI=y +CONFIG_ASCEND_CHARGE_MIGRATE_HUGEPAGES=y +CONFIG_ASCEND_WATCHDOG_SYSFS_CONFIGURE=y +CONFIG_ASCEND_SHARE_POOL=y +CONFIG_ASCEND_CLEAN_CDM=y + +# +# Boot options +# +CONFIG_ARM64_ACPI_PARKING_PROTOCOL=y +CONFIG_CMDLINE="console=ttyAMA0" +# CONFIG_CMDLINE_FORCE is not set +CONFIG_EFI_STUB=y +CONFIG_EFI=y +CONFIG_DMI=y +CONFIG_COMPAT=y +CONFIG_AARCH32_EL0=y +CONFIG_ARM64_ILP32=y +CONFIG_SYSVIPC_COMPAT=y + +# +# Power management options +# +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HIBERNATE_CALLBACKS=y +CONFIG_HIBERNATION=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +CONFIG_PM_DEBUG=y +# CONFIG_PM_ADVANCED_DEBUG is not set +# CONFIG_PM_TEST_SUSPEND is not set +CONFIG_PM_SLEEP_DEBUG=y +CONFIG_PM_CLK=y +CONFIG_PM_GENERIC_DOMAINS=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +CONFIG_PM_GENERIC_DOMAINS_SLEEP=y +CONFIG_PM_GENERIC_DOMAINS_OF=y +CONFIG_CPU_PM=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_HIBERNATION_HEADER=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_UCE_KERNEL_RECOVERY=y + +# +# TLB options +# +CONFIG_ARM64_TLBI_IPI=y + +# +# CPU Power Management +# + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +CONFIG_CPU_IDLE_MULTIPLE_DRIVERS=y +# CONFIG_CPU_IDLE_GOV_LADDER is not set +CONFIG_CPU_IDLE_GOV_MENU=y +CONFIG_DT_IDLE_STATES=y +# CONFIG_CPU_IDLE_GOV_HALTPOLL is not set + +# +# ARM CPU Idle Drivers +# +CONFIG_ARM_CPUIDLE=y +# CONFIG_HALTPOLL_CPUIDLE is not set + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y + +# +# CPU frequency scaling drivers +# +# CONFIG_CPUFREQ_DT is not set +CONFIG_ACPI_CPPC_CPUFREQ=y +CONFIG_HISILICON_CPPC_CPUFREQ_WORKAROUND=y +# CONFIG_ARM_BIG_LITTLE_CPUFREQ is not set +CONFIG_ARM_SCPI_CPUFREQ=m +# CONFIG_QORIQ_CPUFREQ is not set + +# +# Firmware Drivers +# +CONFIG_ARM_PSCI_FW=y +# CONFIG_ARM_PSCI_CHECKER is not set +# CONFIG_ARM_SCMI_PROTOCOL is not set +CONFIG_ARM_SCPI_PROTOCOL=m +CONFIG_ARM_SCPI_POWER_DOMAIN=m +CONFIG_ARM_SDE_INTERFACE=y +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=y +CONFIG_FW_CFG_SYSFS=y +# CONFIG_ISCSI_IBFT is not set +# CONFIG_FW_CFG_SYSFS_CMDLINE is not set +CONFIG_HAVE_ARM_SMCCC=y +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +CONFIG_EFI_VARS=y +CONFIG_EFI_ESRT=y +CONFIG_EFI_VARS_PSTORE=y +CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y +CONFIG_EFI_FAKE_MEMMAP=y +CONFIG_EFI_PARAMS_FROM_FDT=y +CONFIG_EFI_RUNTIME_WRAPPERS=y +CONFIG_EFI_ARMSTUB=y +CONFIG_EFI_ARMSTUB_DTB_LOADER=y +# CONFIG_EFI_BOOTLOADER_CONTROL is not set +# CONFIG_EFI_CAPSULE_LOADER is not set +# CONFIG_EFI_TEST is not set +# CONFIG_RESET_ATTACK_MITIGATION is not set +CONFIG_UEFI_CPER=y +CONFIG_UEFI_CPER_ARM=y + +# +# Tegra firmware driver +# +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_GENERIC_GSI=y +CONFIG_ACPI_CCA_REQUIRED=y +# CONFIG_ACPI_DEBUGGER is not set +CONFIG_ACPI_SPCR_TABLE=y +# CONFIG_ACPI_EC_DEBUGFS is not set +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_FAN=y +# CONFIG_ACPI_TAD is not set +# CONFIG_ACPI_DOCK is not set +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_MCFG=y +CONFIG_ACPI_CPPC_LIB=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_THERMAL=y +CONFIG_ACPI_NUMA=y +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +# CONFIG_ACPI_DEBUG is not set +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_CONTAINER=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_ACPI_HED=y +# CONFIG_ACPI_CUSTOM_METHOD is not set +# CONFIG_ACPI_BGRT is not set +CONFIG_ACPI_REDUCED_HARDWARE_ONLY=y +CONFIG_ACPI_NFIT=m +# CONFIG_ACPI_HMAT is not set +CONFIG_HAVE_ACPI_APEI=y +CONFIG_ACPI_APEI=y +CONFIG_ACPI_APEI_GHES=y +CONFIG_ACPI_APEI_PCIEAER=y +CONFIG_ACPI_APEI_SEA=y +CONFIG_ACPI_APEI_MEMORY_FAILURE=y +CONFIG_ACPI_APEI_EINJ=m +# CONFIG_ACPI_APEI_ERST_DEBUG is not set +# CONFIG_PMIC_OPREGION is not set +# CONFIG_ACPI_CONFIGFS is not set +CONFIG_ACPI_IORT=y +CONFIG_ACPI_GTDT=y +CONFIG_ACPI_PPTT=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_HAVE_KVM_IRQ_BYPASS=y +CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE=y +CONFIG_IRQ_BYPASS_MANAGER=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_KVM_ARM_HOST=y +CONFIG_KVM_ARM_PMU=y +CONFIG_KVM_INDIRECT_VECTORS=y +CONFIG_VHOST_NET=m +# CONFIG_VHOST_SCSI is not set +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set +CONFIG_ARM64_CRYPTO=y +CONFIG_CRYPTO_SHA256_ARM64=m +# CONFIG_CRYPTO_SHA512_ARM64 is not set +CONFIG_CRYPTO_SHA1_ARM64_CE=m +CONFIG_CRYPTO_SHA2_ARM64_CE=m +# CONFIG_CRYPTO_SHA512_ARM64_CE is not set +# CONFIG_CRYPTO_SHA3_ARM64 is not set +CONFIG_CRYPTO_SM3_ARM64_CE=m +CONFIG_CRYPTO_SM4_ARM64_CE=m +CONFIG_CRYPTO_GHASH_ARM64_CE=m +CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m +CONFIG_CRYPTO_CRC32_ARM64_CE=m +CONFIG_CRYPTO_AES_ARM64=y +CONFIG_CRYPTO_AES_ARM64_CE=m +CONFIG_CRYPTO_AES_ARM64_CE_CCM=m +CONFIG_CRYPTO_AES_ARM64_CE_BLK=m +CONFIG_CRYPTO_AES_ARM64_NEON_BLK=m +# CONFIG_CRYPTO_CHACHA20_NEON is not set +# CONFIG_CRYPTO_AES_ARM64_BS is not set + +# +# General architecture-dependent options +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_QUICK_KEXEC=y +CONFIG_OPROFILE_NMI_TIMER=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +CONFIG_STATIC_KEYS_SELFTEST=y +CONFIG_UPROBES=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_KRETPROBES=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_NMI=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_GENERIC_IDLE_POLL_SETUP=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_PERF_EVENTS_NMI=y +CONFIG_HAVE_NMI_WATCHDOG=y +CONFIG_HAVE_HARDLOCKUP_DETECTOR_ARCH=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_RCU_TABLE_FREE=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP_FILTER=y +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_CC_HAS_STACKPROTECTOR_NONE=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_HAVE_ARCH_HUGE_VMALLOC=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_ARCH_MMAP_RND_BITS=18 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=11 +CONFIG_CLONE_BACKWARDS=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_REFCOUNT_FULL=y +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +CONFIG_PLUGIN_HOSTCC="" +CONFIG_HAVE_GCC_PLUGINS=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_FORCE is not set +CONFIG_MODULE_SIG_ALL=y +# CONFIG_MODULE_SIG_SHA1 is not set +# CONFIG_MODULE_SIG_SHA224 is not set +CONFIG_MODULE_SIG_SHA256=y +# CONFIG_MODULE_SIG_SHA384 is not set +# CONFIG_MODULE_SIG_SHA512 is not set +CONFIG_MODULE_SIG_HASH="sha256" +# CONFIG_MODULE_COMPRESS is not set +# CONFIG_TRIM_UNUSED_KSYMS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLK_SCSI_REQUEST=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +# CONFIG_BLK_CMDLINE_PARSER is not set +CONFIG_BLK_WBT=y +# CONFIG_BLK_CGROUP_IOLATENCY is not set +# CONFIG_BLK_WBT_SQ is not set +CONFIG_BLK_WBT_MQ=y +CONFIG_BLK_DEBUG_FS=y +CONFIG_BLK_DEBUG_FS_ZONED=y +# CONFIG_BLK_SED_OPAL is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +CONFIG_OSF_PARTITION=y +CONFIG_AMIGA_PARTITION=y +# CONFIG_ATARI_PARTITION is not set +CONFIG_MAC_PARTITION=y +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +# CONFIG_LDM_PARTITION is not set +CONFIG_SGI_PARTITION=y +# CONFIG_ULTRIX_PARTITION is not set +CONFIG_SUN_PARTITION=y +CONFIG_KARMA_PARTITION=y +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +CONFIG_BLOCK_COMPAT=y +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_MQ_RDMA=y + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_CFQ_GROUP_IOSCHED=y +CONFIG_DEFAULT_DEADLINE=y +# CONFIG_DEFAULT_CFQ is not set +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="deadline" +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK=y +CONFIG_ARCH_INLINE_SPIN_LOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_READ_LOCK=y +CONFIG_ARCH_INLINE_READ_LOCK_BH=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_READ_UNLOCK=y +CONFIG_ARCH_INLINE_READ_UNLOCK_BH=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_WRITE_LOCK=y +CONFIG_ARCH_INLINE_WRITE_LOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_SPIN_TRYLOCK=y +CONFIG_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_INLINE_SPIN_LOCK=y +CONFIG_INLINE_SPIN_LOCK_BH=y +CONFIG_INLINE_SPIN_LOCK_IRQ=y +CONFIG_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_INLINE_SPIN_UNLOCK_BH=y +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_READ_LOCK=y +CONFIG_INLINE_READ_LOCK_BH=y +CONFIG_INLINE_READ_LOCK_IRQ=y +CONFIG_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_BH=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_WRITE_LOCK=y +CONFIG_INLINE_WRITE_LOCK_BH=y +CONFIG_INLINE_WRITE_LOCK_IRQ=y +CONFIG_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_BH=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y + +# +# Memory Management options +# +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_NEED_MULTIPLE_NODES=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +CONFIG_NO_BOOTMEM=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_SPARSE=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_MM_OWNER=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +CONFIG_MEMORY_FAILURE=y +CONFIG_HWPOISON_INJECT=m +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_TRANSPARENT_HUGE_PAGECACHE=y +CONFIG_CLEANCACHE=y +CONFIG_FRONTSWAP=y +CONFIG_SHRINK_PAGECACHE=y +CONFIG_USERSWAP=y +CONFIG_MEMCG_QOS=y +CONFIG_CMA=y +# CONFIG_CMA_DEBUG is not set +# CONFIG_CMA_DEBUGFS is not set +CONFIG_CMA_AREAS=7 +CONFIG_ZSWAP=y +CONFIG_ZPOOL=y +CONFIG_ZBUD=y +# CONFIG_Z3FOLD is not set +CONFIG_ZSMALLOC=y +# CONFIG_PGTABLE_MAPPING is not set +CONFIG_ZSMALLOC_STAT=y +CONFIG_GENERIC_EARLY_IOREMAP=y +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +CONFIG_IDLE_PAGE_TRACKING=y +CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_BENCHMARK is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +CONFIG_PIN_MEMORY=y +CONFIG_PID_RESERVE=y +CONFIG_MEMORY_RELIABLE=y +CONFIG_CLEAR_FREELIST_PAGE=y +CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_XFRM=y +CONFIG_XFRM_OFFLOAD=y +CONFIG_XFRM_ALGO=y +CONFIG_XFRM_USER=y +# CONFIG_XFRM_INTERFACE is not set +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +# CONFIG_SMC is not set +# CONFIG_XDP_SOCKETS is not set +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +# CONFIG_IP_PNP is not set +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE_COMMON=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=m +# CONFIG_NET_FOU is not set +# CONFIG_NET_FOU_IP_TUNNELS is not set +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_XFRM_MODE_TRANSPORT=m +CONFIG_INET_XFRM_MODE_TUNNEL=m +CONFIG_INET_XFRM_MODE_BEET=m +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +# CONFIG_INET_DIAG_DESTROY is not set +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +# CONFIG_TCP_CONG_CDG is not set +CONFIG_TCP_CONG_BBR=m +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_TCP_COMP=y +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +# CONFIG_IPV6_ILA is not set +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +CONFIG_INET6_XFRM_MODE_TRANSPORT=m +CONFIG_INET6_XFRM_MODE_TUNNEL=m +CONFIG_INET6_XFRM_MODE_BEET=m +CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +CONFIG_IPV6_MULTIPLE_TABLES=y +# CONFIG_IPV6_SUBTREES is not set +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +# CONFIG_IPV6_SEG6_LWTUNNEL is not set +# CONFIG_IPV6_SEG6_HMAC is not set +CONFIG_NETLABEL=y +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_COMMON=y +CONFIG_NF_LOG_NETDEV=m +CONFIG_NETFILTER_CONNCOUNT=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=m +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_NEEDED=y +CONFIG_NF_NAT_PROTO_DCCP=y +CONFIG_NF_NAT_PROTO_UDPLITE=y +CONFIG_NF_NAT_PROTO_SCTP=y +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_SET=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_COUNTER=m +# CONFIG_NFT_CONNLIMIT is not set +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +# CONFIG_NFT_TUNNEL is not set +CONFIG_NFT_OBJREF=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m +CONFIG_NFT_FIB_INET=m +# CONFIG_NFT_SOCKET is not set +# CONFIG_NFT_OSF is not set +# CONFIG_NFT_TPROXY is not set +CONFIG_NF_DUP_NETDEV=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +# CONFIG_NF_FLOW_TABLE is not set +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=y +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=y +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=y +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +# CONFIG_IP_SET_HASH_IPMAC is not set +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +# CONFIG_IP_VS_DEBUG is not set +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +# CONFIG_IP_VS_MH is not set +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_DUP_IPV4=m +CONFIG_NF_LOG_ARP=y +CONFIG_NF_LOG_IPV4=y +CONFIG_NF_REJECT_IPV4=y +CONFIG_NF_NAT_IPV4=m +CONFIG_NF_NAT_MASQUERADE_IPV4=y +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NFT_MASQ_IPV4=m +CONFIG_NFT_REDIR_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PROTO_GRE=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +# CONFIG_IP_NF_TARGET_CLUSTERIP is not set +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TPROXY_IPV6=m +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m +CONFIG_NFT_MASQ_IPV6=m +CONFIG_NFT_REDIR_IPV6=m +CONFIG_NFT_REJECT_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m +CONFIG_NF_NAT_IPV6=m +CONFIG_NF_NAT_MASQUERADE_IPV6=y +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +# CONFIG_IP6_NF_MATCH_SRH is not set +# CONFIG_IP6_NF_TARGET_HL is not set +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_DEFRAG_IPV6=m +CONFIG_NF_TABLES_BRIDGE=y +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_NF_LOG_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +# CONFIG_BPFILTER is not set +# CONFIG_IP_DCCP is not set +CONFIG_IP_SCTP=y +# CONFIG_SCTP_DBG_OBJCNT is not set +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_INET_SCTP_DIAG=m +# CONFIG_RDS is not set +CONFIG_TIPC=m +# CONFIG_TIPC_MEDIA_IB is not set +CONFIG_TIPC_MEDIA_UDP=y +CONFIG_TIPC_DIAG=m +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +# CONFIG_ATM_CLIP_NO_ICMP is not set +CONFIG_ATM_LANE=m +# CONFIG_ATM_MPOA is not set +CONFIG_ATM_BR2684=m +# CONFIG_ATM_BR2684_IPFILTER is not set +CONFIG_L2TP=m +CONFIG_L2TP_DEBUGFS=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_STP=y +CONFIG_GARP=y +CONFIG_MRP=y +CONFIG_BRIDGE=y +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_HAVE_NET_DSA=y +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=y +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +# CONFIG_DECNET is not set +CONFIG_LLC=y +# CONFIG_LLC2 is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_DEBUGFS is not set +# CONFIG_6LOWPAN_NHC is not set +CONFIG_IEEE802154=m +# CONFIG_IEEE802154_NL802154_EXPERIMENTAL is not set +CONFIG_IEEE802154_SOCKET=m +# CONFIG_IEEE802154_6LOWPAN is not set +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_ATM=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +# CONFIG_NET_SCH_CBS is not set +# CONFIG_NET_SCH_ETF is not set +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +# CONFIG_NET_SCH_SKBPRIO is not set +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +# CONFIG_NET_SCH_CAKE is not set +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_DEFAULT=y +# CONFIG_DEFAULT_FQ is not set +# CONFIG_DEFAULT_CODEL is not set +CONFIG_DEFAULT_FQ_CODEL=y +# CONFIG_DEFAULT_SFQ is not set +# CONFIG_DEFAULT_PFIFO_FAST is not set +CONFIG_DEFAULT_NET_SCH="fq_codel" + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +# CONFIG_NET_EMATCH_CANID is not set +CONFIG_NET_EMATCH_IPSET=m +# CONFIG_NET_EMATCH_IPT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +# CONFIG_NET_ACT_IPT is not set +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +# CONFIG_NET_ACT_CONNMARK is not set +CONFIG_NET_ACT_SKBMOD=m +# CONFIG_NET_ACT_IFE is not set +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_CLS_IND=y +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=y +# CONFIG_BATMAN_ADV is not set +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_OPENVSWITCH_GENEVE=m +CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS_COMMON=m +CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=y +# CONFIG_MPLS_ROUTING is not set +CONFIG_NET_NSH=m +# CONFIG_HSR is not set +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_L3_MASTER_DEV=y +# CONFIG_QRTR is not set +# CONFIG_NET_NCSI is not set +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_JIT=y +# CONFIG_BPF_STREAM_PARSER is not set +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +CONFIG_NET_DROP_MONITOR=m +# CONFIG_HAMRADIO is not set +CONFIG_CAN=y +CONFIG_CAN_RAW=y +CONFIG_CAN_BCM=y +CONFIG_CAN_GW=y +CONFIG_CAN_J1939=y + +# +# CAN Device Drivers +# +CONFIG_CAN_VCAN=m +# CONFIG_CAN_VXCAN is not set +CONFIG_CAN_SLCAN=m +CONFIG_CAN_DEV=y +CONFIG_CAN_CALC_BITTIMING=y +# CONFIG_CAN_GRCAN is not set +# CONFIG_CAN_XILINXCAN is not set +CONFIG_CAN_C_CAN=m +CONFIG_CAN_C_CAN_PLATFORM=m +CONFIG_CAN_C_CAN_PCI=m +CONFIG_CAN_CC770=m +# CONFIG_CAN_CC770_ISA is not set +CONFIG_CAN_CC770_PLATFORM=m +# CONFIG_CAN_IFI_CANFD is not set +# CONFIG_CAN_M_CAN is not set +# CONFIG_CAN_PEAK_PCIEFD is not set +CONFIG_CAN_SJA1000=m +# CONFIG_CAN_SJA1000_ISA is not set +CONFIG_CAN_SJA1000_PLATFORM=m +CONFIG_CAN_EMS_PCI=m +CONFIG_CAN_PEAK_PCI=m +CONFIG_CAN_PEAK_PCIEC=y +CONFIG_CAN_KVASER_PCI=m +CONFIG_CAN_PLX_PCI=m +CONFIG_CAN_SOFTING=m + +# +# CAN SPI interfaces +# +# CONFIG_CAN_HI311X is not set +# CONFIG_CAN_MCP251X is not set + +# +# CAN USB interfaces +# +CONFIG_CAN_8DEV_USB=m +CONFIG_CAN_EMS_USB=m +CONFIG_CAN_ESD_USB2=m +# CONFIG_CAN_GS_USB is not set +CONFIG_CAN_KVASER_USB=m +# CONFIG_CAN_MCBA_USB is not set +CONFIG_CAN_PEAK_USB=m +# CONFIG_CAN_UCAN is not set +CONFIG_CAN_DEBUG_DEVICES=y +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_STREAM_PARSER=m +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_CFG80211=m +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y +CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +CONFIG_CFG80211_CRDA_SUPPORT=y +# CONFIG_CFG80211_WEXT is not set +CONFIG_MAC80211=m +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_MINSTREL_HT=y +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +# CONFIG_MAC80211_MESH is not set +CONFIG_MAC80211_LEDS=y +CONFIG_MAC80211_DEBUGFS=y +# CONFIG_MAC80211_MESSAGE_TRACING is not set +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +# CONFIG_WIMAX is not set +CONFIG_RFKILL=m +CONFIG_RFKILL_LEDS=y +CONFIG_RFKILL_INPUT=y +CONFIG_RFKILL_GPIO=m +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +# CONFIG_NFC is not set +CONFIG_PSAMPLE=m +# CONFIG_NET_IFE is not set +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_SOCK_VALIDATE_XMIT=y +CONFIG_NET_DEVLINK=m +CONFIG_MAY_USE_DEVLINK=m +CONFIG_FAILOVER=y +CONFIG_HAVE_EBPF_JIT=y + +# +# Device Drivers +# +CONFIG_ARM_AMBA=y + +# +# Generic Driver Options +# +# CONFIG_UEVENT_HELPER is not set +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_EXTRA_FIRMWARE="" +CONFIG_FW_LOADER_USER_HELPER=y +# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set +CONFIG_ALLOW_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=y +CONFIG_REGMAP_SPI=y +CONFIG_REGMAP_MMIO=y +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +CONFIG_DMA_CMA=y + +# +# Default contiguous memory area size: +# +CONFIG_CMA_SIZE_MBYTES=64 +CONFIG_CMA_SIZE_SEL_MBYTES=y +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set +# CONFIG_CMA_SIZE_SEL_MIN is not set +# CONFIG_CMA_SIZE_SEL_MAX is not set +CONFIG_CMA_ALIGNMENT=8 +CONFIG_GENERIC_ARCH_TOPOLOGY=y + +# +# Bus devices +# +# CONFIG_BRCMSTB_GISB_ARB is not set +CONFIG_HISILICON_LPC=y +CONFIG_QCOM_EBI2=y +# CONFIG_SIMPLE_PM_BUS is not set +CONFIG_VEXPRESS_CONFIG=y +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y +# CONFIG_GNSS is not set +CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +# CONFIG_MTD_AFS_PARTS is not set +CONFIG_MTD_OF_PARTS=m +# CONFIG_MTD_AR7_PARTS is not set + +# +# Partition parsers +# + +# +# User Modules And Translation Layers +# +CONFIG_MTD_BLKDEVS=m +CONFIG_MTD_BLOCK=m +# CONFIG_MTD_BLOCK_RO is not set +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=m +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_GEN_PROBE=m +# CONFIG_MTD_CFI_ADV_OPTIONS is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +CONFIG_MTD_CFI_INTELEXT=m +CONFIG_MTD_CFI_AMDSTD=m +CONFIG_MTD_CFI_STAA=m +CONFIG_MTD_CFI_UTIL=m +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +CONFIG_MTD_PHYSMAP=m +# CONFIG_MTD_PHYSMAP_COMPAT is not set +CONFIG_MTD_PHYSMAP_OF=m +# CONFIG_MTD_PHYSMAP_OF_VERSATILE is not set +# CONFIG_MTD_PHYSMAP_OF_GEMINI is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# CONFIG_MTD_ONENAND is not set +CONFIG_MTD_NAND_ECC=m +# CONFIG_MTD_NAND_ECC_SMC is not set +CONFIG_MTD_NAND=m +# CONFIG_MTD_NAND_ECC_BCH is not set +# CONFIG_MTD_NAND_DENALI_PCI is not set +# CONFIG_MTD_NAND_DENALI_DT is not set +# CONFIG_MTD_NAND_GPIO is not set +# CONFIG_MTD_NAND_RICOH is not set +# CONFIG_MTD_NAND_DISKONCHIP is not set +# CONFIG_MTD_NAND_DOCG4 is not set +# CONFIG_MTD_NAND_CAFE is not set +CONFIG_MTD_NAND_NANDSIM=m +# CONFIG_MTD_NAND_BRCMNAND is not set +# CONFIG_MTD_NAND_PLATFORM is not set +# CONFIG_MTD_NAND_HISI504 is not set +# CONFIG_MTD_NAND_QCOM is not set +# CONFIG_MTD_SPI_NAND is not set + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# CONFIG_MTD_SPI_NOR is not set +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_LIMIT=20 +# CONFIG_MTD_UBI_FASTMAP is not set +# CONFIG_MTD_UBI_GLUEBI is not set +# CONFIG_MTD_UBI_BLOCK is not set +CONFIG_DTC=y +CONFIG_OF=y +# CONFIG_OF_UNITTEST is not set +CONFIG_OF_FLATTREE=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_KOBJ=y +CONFIG_OF_DYNAMIC=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_IRQ=y +CONFIG_OF_NET=y +CONFIG_OF_MDIO=y +CONFIG_OF_RESERVED_MEM=y +CONFIG_OF_RESOLVE=y +CONFIG_OF_OVERLAY=y +CONFIG_OF_NUMA=y +# CONFIG_PARPORT is not set +CONFIG_PNP=y +CONFIG_PNP_DEBUG_MESSAGES=y + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_NULL_BLK=m +CONFIG_CDROM=m +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +CONFIG_ZRAM=m +# CONFIG_ZRAM_WRITEBACK is not set +# CONFIG_ZRAM_MEMORY_TRACKING is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_DRBD is not set +CONFIG_BLK_DEV_NBD=y +# CONFIG_BLK_DEV_SKD is not set +# CONFIG_BLK_DEV_SX8 is not set +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=16384 +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_VIRTIO_BLK=y +CONFIG_VIRTIO_BLK_SCSI=y +CONFIG_BLK_DEV_RBD=m +# CONFIG_BLK_DEV_RSXX is not set + +# +# NVME Support +# +CONFIG_NVME_CORE=m +CONFIG_BLK_DEV_NVME=m +# CONFIG_NVME_MULTIPATH is not set +CONFIG_NVME_FABRICS=m +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=m +CONFIG_NVME_TARGET=m +CONFIG_NVME_TARGET_LOOP=m +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=m +CONFIG_NVME_TARGET_FCLOOP=m + +# +# Misc devices +# +CONFIG_SENSORS_LIS3LV02D=m +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_PHANTOM is not set +# CONFIG_SGI_IOC4 is not set +CONFIG_TIFM_CORE=m +CONFIG_TIFM_7XX1=m +# CONFIG_ICS932S401 is not set +CONFIG_ENCLOSURE_SERVICES=m +# CONFIG_HP_ILO is not set +CONFIG_APDS9802ALS=m +CONFIG_ISL29003=m +CONFIG_ISL29020=m +CONFIG_SENSORS_TSL2550=m +CONFIG_SENSORS_BH1770=m +CONFIG_SENSORS_APDS990X=m +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_USB_SWITCH_FSA9480 is not set +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +CONFIG_VEXPRESS_SYSCFG=y +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_MAX6875=m +CONFIG_EEPROM_93CX6=m +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +CONFIG_CB710_CORE=m +# CONFIG_CB710_DEBUG is not set +CONFIG_CB710_DEBUG_ASSUMPTIONS=y + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +CONFIG_SENSORS_LIS3_I2C=m +CONFIG_ALTERA_STAPL=m + +# +# Intel MIC & related support +# + +# +# Intel MIC Bus Driver +# + +# +# SCIF Bus Driver +# + +# +# VOP Bus Driver +# + +# +# Intel MIC Host Driver +# + +# +# Intel MIC Card Driver +# + +# +# SCIF Driver +# + +# +# Intel MIC Coprocessor State Management (COSM) Drivers +# + +# +# VOP Driver +# +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_MISC_RTSX_PCI is not set +# CONFIG_MISC_RTSX_USB is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=y +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_MQ_DEFAULT=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=m +# CONFIG_CHR_DEV_OSST is not set +CONFIG_BLK_DEV_SR=m +CONFIG_BLK_DEV_SR_VENDOR=y +CONFIG_CHR_DEV_SG=m +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_ISCSI_ATTRS=y +CONFIG_SCSI_SAS_ATTRS=y +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=m +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=y +CONFIG_ISCSI_BOOT_SYSFS=m +# CONFIG_SCSI_CXGB3_ISCSI is not set +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_SCSI_BNX2X_FCOE=m +CONFIG_BE2ISCSI=m +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +CONFIG_SCSI_HPSA=m +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +CONFIG_SCSI_AACRAID=m +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +CONFIG_SCSI_HISI_SAS=m +CONFIG_SCSI_HISI_SAS_PCI=m +# CONFIG_SCSI_MVSAS is not set +# CONFIG_SCSI_MVUMI is not set +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +CONFIG_MEGARAID_NEWGEN=y +CONFIG_MEGARAID_MM=m +CONFIG_MEGARAID_MAILBOX=m +# CONFIG_MEGARAID_LEGACY is not set +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_SMARTPQI=m +# CONFIG_SCSI_UFSHCD is not set +CONFIG_RAMAXEL_SPRAID=m +# CONFIG_SCSI_HPTIOP is not set +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +# CONFIG_FCOE is not set +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +CONFIG_SCSI_IPR=m +CONFIG_SCSI_IPR_TRACE=y +CONFIG_SCSI_IPR_DUMP=y +# CONFIG_SCSI_QLOGIC_1280 is not set +CONFIG_SCSI_QLA_FC=m +# CONFIG_TCM_QLA2XXX is not set +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_QEDI=m +CONFIG_QEDF=m +CONFIG_SCSI_LPFC=m +# CONFIG_SCSI_LPFC_DEBUG_FS is not set +CONFIG_SCSI_HUAWEI_FC=m +CONFIG_SCSI_FC_HIFC=m +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +CONFIG_SCSI_DEBUG=m +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_BFA_FC is not set +CONFIG_SCSI_VIRTIO=y +CONFIG_SCSI_CHELSIO_FCOE=m +# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +# CONFIG_SCSI_OSD_INITIATOR is not set +CONFIG_HAVE_PATA_PLATFORM=y +CONFIG_ATA=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=y +CONFIG_SATA_MOBILE_LPM_POLICY=0 +CONFIG_SATA_AHCI_PLATFORM=y +# CONFIG_AHCI_CEVA is not set +CONFIG_AHCI_XGENE=y +# CONFIG_AHCI_QORIQ is not set +CONFIG_SATA_AHCI_SEATTLE=m +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +CONFIG_ATA_PIIX=y +# CONFIG_SATA_DWC is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +# CONFIG_SATA_SIL is not set +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set +# CONFIG_SATA_ZHAOXIN is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_PLATFORM is not set +# CONFIG_PATA_RZ1000 is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_PATA_ACPI is not set +CONFIG_ATA_GENERIC=m +# CONFIG_PATA_LEGACY is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y +CONFIG_MD_LINEAR=y +CONFIG_MD_RAID0=y +CONFIG_MD_RAID1=y +CONFIG_MD_RAID10=y +CONFIG_MD_RAID456=y +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=y +# CONFIG_BCACHE is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=m +# CONFIG_DM_MQ_DEFAULT is not set +CONFIG_DM_DEBUG=y +CONFIG_DM_BUFIO=m +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +# CONFIG_DM_UNSTRIPED is not set +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m +# CONFIG_DM_WRITECACHE is not set +CONFIG_DM_ERA=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +# CONFIG_DM_VERITY_FEC is not set +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +# CONFIG_DM_ZONED is not set +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_TCM_FC=m +CONFIG_ISCSI_TARGET=m +CONFIG_ISCSI_TARGET_CXGB4=m +# CONFIG_FUSION is not set + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_FIREWIRE is not set +# CONFIG_FIREWIRE_NOSY is not set +CONFIG_NETDEVICES=y +CONFIG_MII=m +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +# CONFIG_EQUALIZER is not set +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +# CONFIG_GTP is not set +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_NETPOLL=y +CONFIG_NET_POLL_CONTROLLER=y +CONFIG_TUN=m +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=y +CONFIG_VIRTIO_NET=y +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ARCNET is not set +# CONFIG_ATM_DRIVERS is not set + +# +# CAIF transport drivers +# + +# +# Distributed Switch Architecture drivers +# +CONFIG_ETHERNET=y +CONFIG_MDIO=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +CONFIG_NET_VENDOR_ALACRITECH=y +# CONFIG_SLICOSS is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set +CONFIG_NET_VENDOR_AMAZON=y +# CONFIG_ENA_ETHERNET is not set +CONFIG_NET_VENDOR_AMD=y +# CONFIG_AMD8111_ETH is not set +# CONFIG_PCNET32 is not set +CONFIG_AMD_XGBE=y +# CONFIG_AMD_XGBE_DCB is not set +CONFIG_NET_XGENE=y +CONFIG_NET_XGENE_V2=m +CONFIG_NET_VENDOR_AQUANTIA=y +CONFIG_NET_VENDOR_ARC=y +CONFIG_NET_VENDOR_ATHEROS=y +# CONFIG_ATL2 is not set +CONFIG_ATL1=m +CONFIG_ATL1E=m +CONFIG_ATL1C=m +CONFIG_ALX=m +# CONFIG_NET_VENDOR_AURORA is not set +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +CONFIG_BNX2=m +CONFIG_CNIC=m +CONFIG_TIGON3=m +CONFIG_TIGON3_HWMON=y +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set +CONFIG_BNXT=m +CONFIG_BNXT_SRIOV=y +CONFIG_BNXT_FLOWER_OFFLOAD=y +CONFIG_BNXT_DCB=y +# CONFIG_BNXT_HWMON is not set +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_CADENCE is not set +CONFIG_NET_VENDOR_CAVIUM=y +CONFIG_THUNDER_NIC_PF=m +CONFIG_THUNDER_NIC_VF=m +CONFIG_THUNDER_NIC_BGX=m +CONFIG_THUNDER_NIC_RGX=m +CONFIG_CAVIUM_PTP=y +CONFIG_LIQUIDIO=m +CONFIG_LIQUIDIO_VF=m +CONFIG_NET_VENDOR_CHELSIO=y +# CONFIG_CHELSIO_T1 is not set +# CONFIG_CHELSIO_T3 is not set +CONFIG_CHELSIO_T4=m +# CONFIG_CHELSIO_T4_DCB is not set +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_LIB=m +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_CORTINA is not set +CONFIG_DNET=m +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_NET_VENDOR_HISILICON=y +# CONFIG_HIX5HD2_GMAC is not set +# CONFIG_HISI_FEMAC is not set +# CONFIG_HIP04_ETH is not set +CONFIG_HNS_MDIO=m +CONFIG_HNS=m +CONFIG_HNS_DSAF=m +CONFIG_HNS_ENET=m +CONFIG_HNS3=m +CONFIG_HNS3_HCLGE=m +CONFIG_HNS3_DCB=y +CONFIG_HNS3_HCLGEVF=m +CONFIG_HNS3_ENET=m +CONFIG_HNS3_CAE=m +# CONFIG_NET_VENDOR_HP is not set +CONFIG_NET_VENDOR_HUAWEI=y +CONFIG_HINIC=m +CONFIG_BMA=m +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_IGB=m +CONFIG_IGB_HWMON=y +CONFIG_IGBVF=m +# CONFIG_IXGB is not set +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBE_DCB=y +CONFIG_IXGBEVF=m +CONFIG_I40E=m +# CONFIG_I40E_DCB is not set +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_FM10K=m +CONFIG_NET_VENDOR_NETSWIFT=y +CONFIG_TXGBE=m +# CONFIG_NGBE is not set +# CONFIG_JME is not set +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX4_EN=m +CONFIG_MLX4_EN_DCB=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y +# CONFIG_MLX4_CORE_GEN2 is not set +# CONFIG_MLX5_CORE is not set +CONFIG_MLXSW_CORE=m +CONFIG_MLXSW_CORE_HWMON=y +CONFIG_MLXSW_CORE_THERMAL=y +CONFIG_MLXSW_PCI=m +CONFIG_MLXSW_I2C=m +# CONFIG_MLXSW_SWITCHIB is not set +# CONFIG_MLXSW_SWITCHX2 is not set +# CONFIG_MLXSW_SPECTRUM is not set +CONFIG_MLXSW_MINIMAL=m +CONFIG_MLXFW=m +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +CONFIG_NET_VENDOR_MICROSEMI=y +# CONFIG_MSCC_OCELOT_SWITCH is not set +CONFIG_NET_VENDOR_MYRI=y +# CONFIG_MYRI10GE is not set +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETERION is not set +CONFIG_NET_VENDOR_NETRONOME=y +CONFIG_NFP=m +CONFIG_NFP_APP_FLOWER=y +CONFIG_NFP_APP_ABM_NIC=y +# CONFIG_NFP_DEBUG is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +CONFIG_NET_VENDOR_OKI=y +CONFIG_ETHOC=m +# CONFIG_NET_VENDOR_PACKET_ENGINES is not set +CONFIG_NET_VENDOR_QLOGIC=y +CONFIG_QLA3XXX=m +CONFIG_QLCNIC=m +CONFIG_QLCNIC_SRIOV=y +CONFIG_QLCNIC_DCB=y +CONFIG_QLCNIC_HWMON=y +CONFIG_QLGE=m +CONFIG_NETXEN_NIC=m +CONFIG_QED=m +CONFIG_QED_LL2=y +CONFIG_QED_SRIOV=y +CONFIG_QEDE=m +CONFIG_QED_RDMA=y +CONFIG_QED_ISCSI=y +CONFIG_QED_FCOE=y +CONFIG_QED_OOO=y +CONFIG_NET_VENDOR_QUALCOMM=y +# CONFIG_QCA7000_SPI is not set +CONFIG_QCOM_EMAC=m +# CONFIG_RMNET is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_NET_VENDOR_REALTEK=y +CONFIG_8139CP=m +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +# CONFIG_8139TOO_TUNE_TWISTER is not set +CONFIG_8139TOO_8129=y +# CONFIG_8139_OLD_RX_RESET is not set +CONFIG_R8169=m +# CONFIG_NET_VENDOR_RENESAS is not set +CONFIG_NET_VENDOR_ROCKER=y +CONFIG_ROCKER=m +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +CONFIG_NET_VENDOR_SOLARFLARE=y +# CONFIG_SFC is not set +# CONFIG_SFC_FALCON is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +CONFIG_NET_VENDOR_SMSC=y +CONFIG_SMC91X=m +CONFIG_EPIC100=m +CONFIG_SMSC911X=m +CONFIG_SMSC9420=m +# CONFIG_NET_VENDOR_SOCIONEXT is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +CONFIG_MDIO_BCM_UNIMAC=m +CONFIG_MDIO_BITBANG=m +# CONFIG_MDIO_BUS_MUX_GPIO is not set +# CONFIG_MDIO_BUS_MUX_MMIOREG is not set +CONFIG_MDIO_CAVIUM=m +CONFIG_MDIO_GPIO=m +# CONFIG_MDIO_HISI_FEMAC is not set +# CONFIG_MDIO_MSCC_MIIM is not set +CONFIG_MDIO_OCTEON=m +CONFIG_MDIO_THUNDER=m +CONFIG_MDIO_XGENE=y +CONFIG_PHYLIB=y +CONFIG_SWPHY=y +# CONFIG_LED_TRIGGER_PHY is not set + +# +# MII PHY device drivers +# +CONFIG_AMD_PHY=m +CONFIG_AQUANTIA_PHY=m +# CONFIG_AX88796B_PHY is not set +CONFIG_AT803X_PHY=m +# CONFIG_BCM7XXX_PHY is not set +CONFIG_BCM87XX_PHY=m +CONFIG_BCM_NET_PHYLIB=m +CONFIG_BROADCOM_PHY=m +CONFIG_CICADA_PHY=m +# CONFIG_CORTINA_PHY is not set +CONFIG_DAVICOM_PHY=m +# CONFIG_DP83822_PHY is not set +# CONFIG_DP83TC811_PHY is not set +CONFIG_DP83848_PHY=m +CONFIG_DP83867_PHY=m +CONFIG_FIXED_PHY=y +CONFIG_ICPLUS_PHY=m +# CONFIG_INTEL_XWAY_PHY is not set +CONFIG_LSI_ET1011C_PHY=m +CONFIG_LXT_PHY=m +CONFIG_MARVELL_PHY=m +# CONFIG_MARVELL_10G_PHY is not set +CONFIG_MICREL_PHY=m +CONFIG_MICROCHIP_PHY=m +# CONFIG_MICROCHIP_T1_PHY is not set +# CONFIG_MICROSEMI_PHY is not set +CONFIG_NATIONAL_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_REALTEK_PHY=m +# CONFIG_RENESAS_PHY is not set +# CONFIG_ROCKCHIP_PHY is not set +CONFIG_SMSC_PHY=m +CONFIG_STE10XP=m +CONFIG_TERANETICS_PHY=m +CONFIG_VITESSE_PHY=m +# CONFIG_XILINX_GMII2RGMII is not set +# CONFIG_MICREL_KS8995MA is not set +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m +CONFIG_PPPOE=m +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLHC=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +# CONFIG_SLIP_MODE_SLIP6 is not set +CONFIG_USB_NET_DRIVERS=y +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m +CONFIG_USB_USBNET=m +CONFIG_USB_NET_AX8817X=m +CONFIG_USB_NET_AX88179_178A=m +CONFIG_USB_NET_CDCETHER=m +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_CDC_NCM=m +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +CONFIG_USB_NET_SR9700=m +# CONFIG_USB_NET_SR9800 is not set +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m +CONFIG_USB_NET_NET1080=m +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_NET_RNDIS_HOST=m +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m +CONFIG_USB_NET_CDC_SUBSET=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y +CONFIG_USB_BELKIN=y +CONFIG_USB_ARMLINUX=y +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y +CONFIG_USB_NET_ZAURUS=m +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +CONFIG_WLAN=y +# CONFIG_WLAN_VENDOR_ADMTEK is not set +# CONFIG_WLAN_VENDOR_ATH is not set +# CONFIG_WLAN_VENDOR_ATMEL is not set +# CONFIG_WLAN_VENDOR_BROADCOM is not set +# CONFIG_WLAN_VENDOR_CISCO is not set +# CONFIG_WLAN_VENDOR_INTEL is not set +# CONFIG_WLAN_VENDOR_INTERSIL is not set +# CONFIG_WLAN_VENDOR_MARVELL is not set +# CONFIG_WLAN_VENDOR_MEDIATEK is not set +# CONFIG_WLAN_VENDOR_RALINK is not set +# CONFIG_WLAN_VENDOR_REALTEK is not set +# CONFIG_WLAN_VENDOR_RSI is not set +# CONFIG_WLAN_VENDOR_ST is not set +# CONFIG_WLAN_VENDOR_TI is not set +# CONFIG_WLAN_VENDOR_ZYDAS is not set +# CONFIG_WLAN_VENDOR_QUANTENNA is not set +# CONFIG_MAC80211_HWSIM is not set +# CONFIG_USB_NET_RNDIS_WLAN is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +CONFIG_WAN=y +CONFIG_HDLC=m +CONFIG_HDLC_RAW=m +# CONFIG_HDLC_RAW_ETH is not set +CONFIG_HDLC_CISCO=m +CONFIG_HDLC_FR=m +CONFIG_HDLC_PPP=m + +# +# X.25/LAPB support is disabled +# +# CONFIG_PCI200SYN is not set +# CONFIG_WANXL is not set +# CONFIG_PC300TOO is not set +# CONFIG_FARSYNC is not set +# CONFIG_DSCC4 is not set +CONFIG_DLCI=m +CONFIG_DLCI_MAX=8 +# CONFIG_IEEE802154_DRIVERS is not set +# CONFIG_VMXNET3 is not set +# CONFIG_FUJITSU_ES is not set +# CONFIG_NETDEVSIM is not set +CONFIG_NET_FAILOVER=y +# CONFIG_ISDN is not set +# CONFIG_NVM is not set + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_LEDS=y +CONFIG_INPUT_FF_MEMLESS=m +CONFIG_INPUT_POLLDEV=m +CONFIG_INPUT_SPARSEKMAP=m +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_OMAP4 is not set +# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_CAP11XX is not set +# CONFIG_KEYBOARD_BCM is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=y +CONFIG_MOUSE_PS2_ALPS=y +CONFIG_MOUSE_PS2_BYD=y +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y +CONFIG_MOUSE_PS2_CYPRESS=y +CONFIG_MOUSE_PS2_TRACKPOINT=y +CONFIG_MOUSE_PS2_ELANTECH=y +CONFIG_MOUSE_PS2_ELANTECH_SMBUS=y +CONFIG_MOUSE_PS2_SENTELIC=y +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +CONFIG_MOUSE_PS2_FOCALTECH=y +CONFIG_MOUSE_PS2_SMBUS=y +CONFIG_MOUSE_SERIAL=m +CONFIG_MOUSE_APPLETOUCH=m +CONFIG_MOUSE_BCM5974=m +CONFIG_MOUSE_CYAPA=m +# CONFIG_MOUSE_ELAN_I2C is not set +CONFIG_MOUSE_VSXXXAA=m +# CONFIG_MOUSE_GPIO is not set +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set +CONFIG_RMI4_CORE=m +CONFIG_RMI4_I2C=m +CONFIG_RMI4_SPI=m +CONFIG_RMI4_SMB=m +CONFIG_RMI4_F03=y +CONFIG_RMI4_F03_SERIO=m +CONFIG_RMI4_2D_SENSOR=y +CONFIG_RMI4_F11=y +CONFIG_RMI4_F12=y +CONFIG_RMI4_F30=y +# CONFIG_RMI4_F34 is not set +# CONFIG_RMI4_F55 is not set + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_SERIO_SERPORT=y +CONFIG_SERIO_AMBAKMI=y +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +# CONFIG_SERIO_PS2MULT is not set +CONFIG_SERIO_ARC_PS2=m +# CONFIG_SERIO_APBPS2 is not set +# CONFIG_SERIO_GPIO_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_ROCKETPORT is not set +CONFIG_CYCLADES=m +# CONFIG_CYZ_INTR is not set +# CONFIG_MOXA_INTELLIO is not set +# CONFIG_MOXA_SMARTIO is not set +CONFIG_SYNCLINKMP=m +CONFIG_SYNCLINK_GT=m +# CONFIG_NOZOMI is not set +# CONFIG_ISI is not set +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +# CONFIG_TRACE_SINK is not set +CONFIG_LDISC_AUTOLOAD=y +CONFIG_DEVMEM=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=8 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +# CONFIG_SERIAL_8250_ASPEED_VUART is not set +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_FSL=y +CONFIG_SERIAL_8250_DW=y +CONFIG_SERIAL_8250_RT288X=y +# CONFIG_SERIAL_8250_MOXA is not set +CONFIG_SERIAL_OF_PLATFORM=y + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_AMBA_PL010 is not set +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +CONFIG_SERIAL_ATTACHED_MBIGEN=y +CONFIG_SERIAL_EARLYCON_ARM_SEMIHOST=y +# CONFIG_SERIAL_KGDB_NMI is not set +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_CONSOLE_POLL=y +# CONFIG_SERIAL_JSM is not set +# CONFIG_SERIAL_MSM is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_IFX6X60 is not set +# CONFIG_SERIAL_XILINX_PS_UART is not set +# CONFIG_SERIAL_ARC is not set +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set +# CONFIG_SERIAL_DEV_BUS is not set +CONFIG_HVC_DRIVER=y +# CONFIG_HVC_DCC is not set +CONFIG_VIRTIO_CONSOLE=m +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DMI_DECODE=y +# CONFIG_IPMI_PANIC_EVENT is not set +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +CONFIG_IPMI_SSIF=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m +CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_HW_RANDOM_HISI=y +CONFIG_HW_RANDOM_HISI_V2=m +CONFIG_HW_RANDOM_XGENE=y +CONFIG_HW_RANDOM_CAVIUM=y +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set + +# +# PCMCIA character devices +# +CONFIG_RAW_DRIVER=y +CONFIG_MAX_RAW_DEVS=8192 +CONFIG_TCG_TPM=y +CONFIG_HW_RANDOM_TPM=y +CONFIG_TCG_TIS_CORE=m +CONFIG_TCG_TIS=m +# CONFIG_TCG_TIS_SPI is not set +# CONFIG_TCG_TIS_I2C_ATMEL is not set +# CONFIG_TCG_TIS_I2C_INFINEON is not set +# CONFIG_TCG_TIS_I2C_NUVOTON is not set +CONFIG_TCG_ATMEL=m +# CONFIG_TCG_INFINEON is not set +CONFIG_TCG_CRB=y +# CONFIG_TCG_VTPM_PROXY is not set +# CONFIG_TCG_TIS_ST33ZP24_I2C is not set +# CONFIG_TCG_TIS_ST33ZP24_SPI is not set +# CONFIG_DEVPORT is not set +# CONFIG_XILLYBUS is not set +CONFIG_HISI_SVM=y +CONFIG_PIN_MEMORY_DEV=m + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_ACPI_I2C_OPREGION=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=m + +# +# Multiplexer I2C Chip support +# +CONFIG_I2C_ARB_GPIO_CHALLENGE=m +CONFIG_I2C_MUX_GPIO=m +# CONFIG_I2C_MUX_GPMUX is not set +# CONFIG_I2C_MUX_LTC4306 is not set +CONFIG_I2C_MUX_PCA9541=m +CONFIG_I2C_MUX_PCA954x=m +CONFIG_I2C_MUX_PINCTRL=m +# CONFIG_I2C_MUX_REG is not set +# CONFIG_I2C_DEMUX_PINCTRL is not set +CONFIG_I2C_MUX_MLXCPLD=m +# CONFIG_I2C_HELPER_AUTO is not set +CONFIG_I2C_SMBUS=m + +# +# I2C Algorithms +# +CONFIG_I2C_ALGOBIT=y +# CONFIG_I2C_ALGOPCF is not set +CONFIG_I2C_ALGOPCA=m + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_HIX5HD2 is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_ISCH is not set +# CONFIG_I2C_PIIX4 is not set +CONFIG_I2C_NFORCE2=m +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set + +# +# ACPI drivers +# +# CONFIG_I2C_SCMI is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CADENCE is not set +# CONFIG_I2C_CBUS_GPIO is not set +CONFIG_I2C_DESIGNWARE_CORE=y +CONFIG_I2C_DESIGNWARE_PLATFORM=y +# CONFIG_I2C_DESIGNWARE_SLAVE is not set +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_EMEV2 is not set +CONFIG_I2C_GPIO=m +# CONFIG_I2C_GPIO_FAULT_INJECTOR is not set +CONFIG_I2C_HISI=m +# CONFIG_I2C_NOMADIK is not set +# CONFIG_I2C_OCORES is not set +CONFIG_I2C_PCA_PLATFORM=m +CONFIG_I2C_QUP=y +# CONFIG_I2C_RK3X is not set +CONFIG_I2C_SIMTEC=m +CONFIG_I2C_VERSATILE=m +CONFIG_I2C_THUNDERX=m +# CONFIG_I2C_XILINX is not set +CONFIG_I2C_XLP9XX=m + +# +# External I2C/SMBus adapter drivers +# +CONFIG_I2C_DIOLAN_U2C=m +CONFIG_I2C_PARPORT_LIGHT=m +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +CONFIG_I2C_TINY_USB=m + +# +# Other I2C/SMBus bus drivers +# +CONFIG_I2C_XGENE_SLIMPRO=m +CONFIG_I2C_STUB=m +CONFIG_I2C_SLAVE=y +CONFIG_I2C_SLAVE_EEPROM=m +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y +# CONFIG_SPI_MEM is not set + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +CONFIG_SPI_CADENCE=m +CONFIG_SPI_DESIGNWARE=y +# CONFIG_SPI_DW_PCI is not set +CONFIG_SPI_DW_MMIO=y +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_FSL_SPI is not set +# CONFIG_SPI_OC_TINY is not set +CONFIG_SPI_PL022=y +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_ROCKCHIP is not set +CONFIG_SPI_QUP=y +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_THUNDERX is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +CONFIG_SPI_XLP=m +# CONFIG_SPI_ZYNQMP_GQSPI is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_GPIO=m + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y +CONFIG_DP83640_PHY=m +CONFIG_PINCTRL=y +CONFIG_PINMUX=y +CONFIG_PINCONF=y +CONFIG_GENERIC_PINCONF=y +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_PINCTRL_AMD is not set +# CONFIG_PINCTRL_MCP23S08 is not set +# CONFIG_PINCTRL_SINGLE is not set +# CONFIG_PINCTRL_SX150X is not set +CONFIG_PINCTRL_MSM=y +# CONFIG_PINCTRL_APQ8064 is not set +# CONFIG_PINCTRL_APQ8084 is not set +# CONFIG_PINCTRL_IPQ4019 is not set +# CONFIG_PINCTRL_IPQ8064 is not set +# CONFIG_PINCTRL_IPQ8074 is not set +# CONFIG_PINCTRL_MSM8660 is not set +# CONFIG_PINCTRL_MSM8960 is not set +# CONFIG_PINCTRL_MDM9615 is not set +# CONFIG_PINCTRL_MSM8X74 is not set +# CONFIG_PINCTRL_MSM8916 is not set +# CONFIG_PINCTRL_MSM8994 is not set +# CONFIG_PINCTRL_MSM8996 is not set +# CONFIG_PINCTRL_MSM8998 is not set +CONFIG_PINCTRL_QDF2XXX=y +# CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set +# CONFIG_PINCTRL_SDM845 is not set +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_OF_GPIO=y +CONFIG_GPIO_ACPI=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_GENERIC=y + +# +# Memory mapped GPIO drivers +# +# CONFIG_GPIO_74XX_MMIO is not set +# CONFIG_GPIO_ALTERA is not set +CONFIG_GPIO_AMDPT=m +CONFIG_GPIO_DWAPB=y +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_FTGPIO010 is not set +CONFIG_GPIO_GENERIC_PLATFORM=y +# CONFIG_GPIO_GRGPIO is not set +CONFIG_GPIO_HISI=m +# CONFIG_GPIO_HLWD is not set +# CONFIG_GPIO_MB86S7X is not set +# CONFIG_GPIO_MOCKUP is not set +CONFIG_GPIO_PL061=y +# CONFIG_GPIO_SYSCON is not set +# CONFIG_GPIO_THUNDERX is not set +CONFIG_GPIO_XGENE=y +CONFIG_GPIO_XGENE_SB=m +# CONFIG_GPIO_XILINX is not set +CONFIG_GPIO_XLP=m + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_ADP5588 is not set +# CONFIG_GPIO_ADNP is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set + +# +# MFD GPIO expanders +# + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_BT8XX is not set +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_PCIE_IDIO_24 is not set +# CONFIG_GPIO_RDC321X is not set + +# +# SPI GPIO expanders +# +# CONFIG_GPIO_74X164 is not set +# CONFIG_GPIO_MAX3191X is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set + +# +# USB GPIO expanders +# +# CONFIG_W1 is not set +# CONFIG_POWER_AVS is not set +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_BRCMSTB is not set +CONFIG_POWER_RESET_GPIO=y +CONFIG_POWER_RESET_GPIO_RESTART=y +CONFIG_POWER_RESET_HISI=y +# CONFIG_POWER_RESET_MSM is not set +# CONFIG_POWER_RESET_LTC2952 is not set +CONFIG_POWER_RESET_RESTART=y +CONFIG_POWER_RESET_VEXPRESS=y +# CONFIG_POWER_RESET_XGENE is not set +CONFIG_POWER_RESET_SYSCON=y +# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set +# CONFIG_SYSCON_REBOOT_MODE is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_MANAGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_LTC3651 is not set +# CONFIG_CHARGER_DETECTOR_MAX14656 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24190 is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ25890 is not set +CONFIG_CHARGER_SMB347=m +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_CHARGER_RT9455 is not set +CONFIG_HWMON=y +CONFIG_HWMON_VID=m +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +CONFIG_SENSORS_AD7314=m +CONFIG_SENSORS_AD7414=m +CONFIG_SENSORS_AD7418=m +CONFIG_SENSORS_ADM1021=m +CONFIG_SENSORS_ADM1025=m +CONFIG_SENSORS_ADM1026=m +CONFIG_SENSORS_ADM1029=m +CONFIG_SENSORS_ADM1031=m +CONFIG_SENSORS_ADM9240=m +CONFIG_SENSORS_ADT7X10=m +CONFIG_SENSORS_ADT7310=m +CONFIG_SENSORS_ADT7410=m +CONFIG_SENSORS_ADT7411=m +CONFIG_SENSORS_ADT7462=m +CONFIG_SENSORS_ADT7470=m +CONFIG_SENSORS_ADT7475=m +CONFIG_SENSORS_ASC7621=m +CONFIG_SENSORS_ARM_SCPI=m +# CONFIG_SENSORS_ASPEED is not set +CONFIG_SENSORS_ATXP1=m +CONFIG_SENSORS_DS620=m +CONFIG_SENSORS_DS1621=m +# CONFIG_SENSORS_I5K_AMB is not set +CONFIG_SENSORS_F71805F=m +CONFIG_SENSORS_F71882FG=m +CONFIG_SENSORS_F75375S=m +# CONFIG_SENSORS_FTSTEUTATES is not set +CONFIG_SENSORS_GL518SM=m +CONFIG_SENSORS_GL520SM=m +CONFIG_SENSORS_G760A=m +CONFIG_SENSORS_G762=m +# CONFIG_SENSORS_GPIO_FAN is not set +# CONFIG_SENSORS_HIH6130 is not set +CONFIG_SENSORS_IBMAEM=m +CONFIG_SENSORS_IBMPEX=m +CONFIG_SENSORS_IT87=m +CONFIG_SENSORS_JC42=m +CONFIG_SENSORS_POWR1220=m +CONFIG_SENSORS_LINEAGE=m +CONFIG_SENSORS_LTC2945=m +# CONFIG_SENSORS_LTC2990 is not set +CONFIG_SENSORS_LTC4151=m +CONFIG_SENSORS_LTC4215=m +CONFIG_SENSORS_LTC4222=m +CONFIG_SENSORS_LTC4245=m +CONFIG_SENSORS_LTC4260=m +CONFIG_SENSORS_LTC4261=m +CONFIG_SENSORS_MAX1111=m +CONFIG_SENSORS_MAX16065=m +CONFIG_SENSORS_MAX1619=m +CONFIG_SENSORS_MAX1668=m +CONFIG_SENSORS_MAX197=m +# CONFIG_SENSORS_MAX31722 is not set +# CONFIG_SENSORS_MAX6621 is not set +CONFIG_SENSORS_MAX6639=m +CONFIG_SENSORS_MAX6642=m +CONFIG_SENSORS_MAX6650=m +CONFIG_SENSORS_MAX6697=m +CONFIG_SENSORS_MAX31790=m +CONFIG_SENSORS_MCP3021=m +# CONFIG_SENSORS_TC654 is not set +CONFIG_SENSORS_ADCXX=m +CONFIG_SENSORS_LM63=m +CONFIG_SENSORS_LM70=m +CONFIG_SENSORS_LM73=m +CONFIG_SENSORS_LM75=m +CONFIG_SENSORS_LM77=m +CONFIG_SENSORS_LM78=m +CONFIG_SENSORS_LM80=m +CONFIG_SENSORS_LM83=m +CONFIG_SENSORS_LM85=m +CONFIG_SENSORS_LM87=m +CONFIG_SENSORS_LM90=m +CONFIG_SENSORS_LM92=m +CONFIG_SENSORS_LM93=m +CONFIG_SENSORS_LM95234=m +CONFIG_SENSORS_LM95241=m +CONFIG_SENSORS_LM95245=m +CONFIG_SENSORS_PC87360=m +CONFIG_SENSORS_PC87427=m +CONFIG_SENSORS_NTC_THERMISTOR=m +CONFIG_SENSORS_NCT6683=m +CONFIG_SENSORS_NCT6775=m +CONFIG_SENSORS_NCT7802=m +CONFIG_SENSORS_NCT7904=m +# CONFIG_SENSORS_NPCM7XX is not set +CONFIG_SENSORS_PCF8591=m +CONFIG_PMBUS=m +CONFIG_SENSORS_PMBUS=m +CONFIG_SENSORS_ADM1275=m +# CONFIG_SENSORS_IBM_CFFPS is not set +# CONFIG_SENSORS_IR35221 is not set +CONFIG_SENSORS_LM25066=m +CONFIG_SENSORS_LTC2978=m +CONFIG_SENSORS_LTC3815=m +CONFIG_SENSORS_MAX16064=m +CONFIG_SENSORS_MAX20751=m +# CONFIG_SENSORS_MAX31785 is not set +CONFIG_SENSORS_MAX34440=m +CONFIG_SENSORS_MAX8688=m +CONFIG_SENSORS_TPS40422=m +# CONFIG_SENSORS_TPS53679 is not set +CONFIG_SENSORS_UCD9000=m +CONFIG_SENSORS_UCD9200=m +CONFIG_SENSORS_ZL6100=m +CONFIG_SENSORS_PWM_FAN=m +CONFIG_SENSORS_SHT15=m +CONFIG_SENSORS_SHT21=m +# CONFIG_SENSORS_SHT3x is not set +CONFIG_SENSORS_SHTC1=m +CONFIG_SENSORS_SIS5595=m +CONFIG_SENSORS_DME1737=m +CONFIG_SENSORS_EMC1403=m +# CONFIG_SENSORS_EMC2103 is not set +CONFIG_SENSORS_EMC6W201=m +CONFIG_SENSORS_SMSC47M1=m +CONFIG_SENSORS_SMSC47M192=m +CONFIG_SENSORS_SMSC47B397=m +CONFIG_SENSORS_SCH56XX_COMMON=m +CONFIG_SENSORS_SCH5627=m +CONFIG_SENSORS_SCH5636=m +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_SMM665 is not set +CONFIG_SENSORS_ADC128D818=m +CONFIG_SENSORS_ADS1015=m +CONFIG_SENSORS_ADS7828=m +CONFIG_SENSORS_ADS7871=m +CONFIG_SENSORS_AMC6821=m +CONFIG_SENSORS_INA209=m +CONFIG_SENSORS_INA2XX=m +# CONFIG_SENSORS_INA3221 is not set +CONFIG_SENSORS_TC74=m +CONFIG_SENSORS_THMC50=m +CONFIG_SENSORS_TMP102=m +CONFIG_SENSORS_TMP103=m +# CONFIG_SENSORS_TMP108 is not set +CONFIG_SENSORS_TMP401=m +CONFIG_SENSORS_TMP421=m +CONFIG_SENSORS_VEXPRESS=m +CONFIG_SENSORS_VIA686A=m +CONFIG_SENSORS_VT1211=m +CONFIG_SENSORS_VT8231=m +# CONFIG_SENSORS_W83773G is not set +CONFIG_SENSORS_W83781D=m +CONFIG_SENSORS_W83791D=m +CONFIG_SENSORS_W83792D=m +CONFIG_SENSORS_W83793=m +CONFIG_SENSORS_W83795=m +# CONFIG_SENSORS_W83795_FANCTRL is not set +CONFIG_SENSORS_W83L785TS=m +CONFIG_SENSORS_W83L786NG=m +CONFIG_SENSORS_W83627HF=m +CONFIG_SENSORS_W83627EHF=m +CONFIG_SENSORS_XGENE=m + +# +# ACPI drivers +# +CONFIG_SENSORS_ACPI_POWER=y +CONFIG_THERMAL=y +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_OF=y +# CONFIG_THERMAL_WRITABLE_TRIPS is not set +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +# CONFIG_THERMAL_GOV_BANG_BANG is not set +CONFIG_THERMAL_GOV_USER_SPACE=y +# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set +CONFIG_CPU_THERMAL=y +# CONFIG_THERMAL_EMULATION is not set +CONFIG_HISI_THERMAL=y +# CONFIG_QORIQ_THERMAL is not set + +# +# ACPI INT340X thermal drivers +# + +# +# Qualcomm thermal drivers +# +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y +CONFIG_WATCHDOG_SYSFS=y + +# +# Watchdog Device Drivers +# +CONFIG_SOFT_WATCHDOG=m +CONFIG_GPIO_WATCHDOG=m +# CONFIG_WDAT_WDT is not set +# CONFIG_XILINX_WATCHDOG is not set +# CONFIG_ZIIRAVE_WATCHDOG is not set +CONFIG_ARM_SP805_WATCHDOG=m +CONFIG_ARM_SBSA_WATCHDOG=m +CONFIG_ARM_SBSA_WATCHDOG_PANIC_NOTIFIER=y +# CONFIG_CADENCE_WATCHDOG is not set +# CONFIG_DW_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set +# CONFIG_QCOM_WDT is not set +CONFIG_ALIM7101_WDT=m +CONFIG_I6300ESB_WDT=m +# CONFIG_MEN_A21_WDT is not set + +# +# PCI-based Watchdog Cards +# +CONFIG_PCIPCWATCHDOG=m +CONFIG_WDTPCI=m + +# +# USB-based Watchdog Cards +# +CONFIG_USBPCWATCHDOG=m + +# +# Watchdog Pretimeout Governors +# +# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +CONFIG_BCMA=m +CONFIG_BCMA_HOST_PCI_POSSIBLE=y +CONFIG_BCMA_HOST_PCI=y +# CONFIG_BCMA_HOST_SOC is not set +CONFIG_BCMA_DRIVER_PCI=y +CONFIG_BCMA_DRIVER_GMAC_CMN=y +CONFIG_BCMA_DRIVER_GPIO=y +# CONFIG_BCMA_DEBUG is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=m +# CONFIG_MFD_ACT8945A is not set +# CONFIG_MFD_AS3711 is not set +# CONFIG_MFD_AS3722 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set +# CONFIG_MFD_ATMEL_FLEXCOM is not set +# CONFIG_MFD_ATMEL_HLCDC is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_CROS_EC is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_MFD_HI6421_PMIC is not set +# CONFIG_MFD_HI655X_PMIC is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_LPC_ICH is not set +# CONFIG_LPC_SCH is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77620 is not set +# CONFIG_MFD_MAX77686 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77843 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_CPCAP is not set +# CONFIG_MFD_VIPERBOARD is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_QCOM_RPM is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RC5T583 is not set +# CONFIG_MFD_RK808 is not set +# CONFIG_MFD_RN5T618 is not set +# CONFIG_MFD_SEC_CORE is not set +# CONFIG_MFD_SI476X_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_SMSC is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_STMPE is not set +CONFIG_MFD_SYSCON=y +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TPS68470 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TI_LP87565 is not set +# CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS80031 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_VX855 is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_ROHM_BD718XX is not set +# CONFIG_MFD_VEXPRESS_SYSREG is not set +# CONFIG_REGULATOR is not set +# CONFIG_RC_CORE is not set +# CONFIG_MEDIA_SUPPORT is not set + +# +# Graphics support +# +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_DRM=y +CONFIG_DRM_DP_AUX_CHARDEV=y +# CONFIG_DRM_DEBUG_MM is not set +# CONFIG_DRM_DEBUG_SELFTEST is not set +CONFIG_DRM_KMS_HELPER=y +CONFIG_DRM_KMS_FB_HELPER=y +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +# CONFIG_DRM_DP_CEC is not set +CONFIG_DRM_TTM=y +CONFIG_DRM_VM=y + +# +# I2C encoder or helper chips +# +CONFIG_DRM_I2C_CH7006=m +# CONFIG_DRM_I2C_SIL164 is not set +CONFIG_DRM_I2C_NXP_TDA998X=m +# CONFIG_DRM_I2C_NXP_TDA9950 is not set +# CONFIG_DRM_HDLCD is not set +# CONFIG_DRM_MALI_DISPLAY is not set +CONFIG_DRM_RADEON=y +CONFIG_DRM_RADEON_USERPTR=y +# CONFIG_DRM_AMDGPU is not set + +# +# ACP (Audio CoProcessor) Configuration +# + +# +# AMD Library routines +# +CONFIG_DRM_NOUVEAU=m +CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT=y +CONFIG_NOUVEAU_DEBUG=5 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3 +# CONFIG_NOUVEAU_DEBUG_MMU is not set +CONFIG_DRM_NOUVEAU_BACKLIGHT=y +# CONFIG_DRM_VGEM is not set +# CONFIG_DRM_VKMS is not set +CONFIG_DRM_UDL=m +CONFIG_DRM_AST=m +CONFIG_DRM_MGAG200=m +CONFIG_DRM_CIRRUS_QEMU=m +# CONFIG_DRM_RCAR_DW_HDMI is not set +# CONFIG_DRM_RCAR_LVDS is not set +CONFIG_DRM_QXL=m +CONFIG_DRM_BOCHS=m +CONFIG_DRM_VIRTIO_GPU=m +# CONFIG_DRM_MSM is not set +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_ARM_VERSATILE is not set +# CONFIG_DRM_PANEL_LVDS is not set +# CONFIG_DRM_PANEL_SIMPLE is not set +# CONFIG_DRM_PANEL_ILITEK_IL9322 is not set +# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set +# CONFIG_DRM_PANEL_LG_LG4573 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set +# CONFIG_DRM_PANEL_SEIKO_43WVF1G is not set +# CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +# CONFIG_DRM_CDNS_DSI is not set +# CONFIG_DRM_DUMB_VGA_DAC is not set +# CONFIG_DRM_LVDS_ENCODER is not set +# CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW is not set +# CONFIG_DRM_NXP_PTN3460 is not set +# CONFIG_DRM_PARADE_PS8622 is not set +# CONFIG_DRM_SIL_SII8620 is not set +# CONFIG_DRM_SII902X is not set +# CONFIG_DRM_SII9234 is not set +# CONFIG_DRM_THINE_THC63LVD1024 is not set +# CONFIG_DRM_TOSHIBA_TC358767 is not set +# CONFIG_DRM_TI_TFP410 is not set +# CONFIG_DRM_I2C_ADV7511 is not set +# CONFIG_DRM_ARCPGU is not set +CONFIG_DRM_HISI_HIBMC=m +# CONFIG_DRM_HISI_KIRIN is not set +# CONFIG_DRM_MXSFB is not set +# CONFIG_DRM_TINYDRM is not set +# CONFIG_DRM_PL111 is not set +# CONFIG_DRM_LEGACY is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y + +# +# Frame buffer Devices +# +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_CMDLINE=y +CONFIG_FB_NOTIFY=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_BACKLIGHT=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_TILEBLITTING=y + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +CONFIG_FB_ARMCLCD=y +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_UVESA is not set +CONFIG_FB_EFI=y +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +CONFIG_FB_SIMPLE=y +CONFIG_FB_SSD1307=m +# CONFIG_FB_SM712 is not set +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set +CONFIG_LCD_PLATFORM=m +# CONFIG_LCD_S6E63M0 is not set +# CONFIG_LCD_LD9040 is not set +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +# CONFIG_LCD_OTM3225A is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_GENERIC is not set +CONFIG_BACKLIGHT_PWM=m +# CONFIG_BACKLIGHT_PM8941_WLED is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3630A is not set +# CONFIG_BACKLIGHT_LM3639 is not set +CONFIG_BACKLIGHT_LP855X=m +CONFIG_BACKLIGHT_GPIO=m +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +CONFIG_VIDEOMODE_HELPERS=y +CONFIG_HDMI=y + +# +# Console display driver support +# +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_LOGO_LINUX_CLUT224=y +CONFIG_SOUND=m +# CONFIG_SND is not set + +# +# HID support +# +CONFIG_HID=y +CONFIG_HID_BATTERY_STRENGTH=y +CONFIG_HIDRAW=y +CONFIG_UHID=m +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=y +# CONFIG_HID_ACCUTOUCH is not set +CONFIG_HID_ACRUX=m +# CONFIG_HID_ACRUX_FF is not set +CONFIG_HID_APPLE=y +CONFIG_HID_APPLEIR=m +# CONFIG_HID_ASUS is not set +CONFIG_HID_AUREAL=m +CONFIG_HID_BELKIN=y +CONFIG_HID_BETOP_FF=m +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +CONFIG_HID_CORSAIR=m +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_CMEDIA is not set +# CONFIG_HID_CP2112 is not set +CONFIG_HID_CYPRESS=y +CONFIG_HID_DRAGONRISE=m +# CONFIG_DRAGONRISE_FF is not set +# CONFIG_HID_EMS_FF is not set +# CONFIG_HID_ELAN is not set +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +CONFIG_HID_EZKEY=y +CONFIG_HID_GEMBIRD=m +CONFIG_HID_GFRM=m +CONFIG_HID_HOLTEK=m +# CONFIG_HOLTEK_FF is not set +# CONFIG_HID_GOOGLE_HAMMER is not set +CONFIG_HID_GT683R=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m +CONFIG_HID_GYRATION=m +CONFIG_HID_ICADE=m +CONFIG_HID_ITE=y +# CONFIG_HID_JABRA is not set +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=y +CONFIG_HID_LCPOWER=m +CONFIG_HID_LED=m +CONFIG_HID_LENOVO=m +CONFIG_HID_LOGITECH=y +CONFIG_HID_LOGITECH_DJ=m +CONFIG_HID_LOGITECH_HIDPP=m +# CONFIG_LOGITECH_FF is not set +# CONFIG_LOGIRUMBLEPAD2_FF is not set +# CONFIG_LOGIG940_FF is not set +# CONFIG_LOGIWHEELS_FF is not set +CONFIG_HID_MAGICMOUSE=y +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_REDRAGON is not set +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +CONFIG_HID_MULTITOUCH=m +# CONFIG_HID_NTI is not set +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +# CONFIG_PANTHERLORD_FF is not set +CONFIG_HID_PENMOUNT=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +CONFIG_HID_PICOLCD_FB=y +CONFIG_HID_PICOLCD_BACKLIGHT=y +CONFIG_HID_PICOLCD_LCD=y +CONFIG_HID_PICOLCD_LEDS=y +CONFIG_HID_PLANTRONICS=y +CONFIG_HID_PRIMAX=m +# CONFIG_HID_RETRODE is not set +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=m +CONFIG_HID_SONY=m +CONFIG_SONY_FF=y +CONFIG_HID_SPEEDLINK=m +# CONFIG_HID_STEAM is not set +CONFIG_HID_STEELSERIES=m +CONFIG_HID_SUNPLUS=m +CONFIG_HID_RMI=m +CONFIG_HID_GREENASIA=m +# CONFIG_GREENASIA_FF is not set +CONFIG_HID_SMARTJOYPLUS=m +# CONFIG_SMARTJOYPLUS_FF is not set +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +# CONFIG_THRUSTMASTER_FF is not set +# CONFIG_HID_UDRAW_PS3 is not set +CONFIG_HID_WACOM=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +# CONFIG_ZEROPLUS_FF is not set +CONFIG_HID_ZYDACRON=m +CONFIG_HID_SENSOR_HUB=m +# CONFIG_HID_SENSOR_CUSTOM_SENSOR is not set +# CONFIG_HID_ALPS is not set + +# +# USB HID support +# +CONFIG_USB_HID=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y + +# +# I2C HID support +# +CONFIG_I2C_HID=m +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_WHITELIST is not set +CONFIG_USB_LEDS_TRIGGER_USBPORT=m +CONFIG_USB_MON=y +CONFIG_USB_WUSB=m +CONFIG_USB_WUSB_CBAF=m +# CONFIG_USB_WUSB_CBAF_DEBUG is not set + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=y +# CONFIG_USB_XHCI_DBGCAP is not set +CONFIG_USB_XHCI_PCI=y +CONFIG_USB_XHCI_PLATFORM=m +# CONFIG_USB_XHCI_HISTB is not set +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_FOTG210_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y +# CONFIG_USB_OHCI_HCD_PLATFORM is not set +CONFIG_USB_UHCI_HCD=y +# CONFIG_USB_U132_HCD is not set +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_WHCI_HCD is not set +CONFIG_USB_HWA_HCD=m +# CONFIG_USB_HCD_BCMA is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m +CONFIG_USB_WDM=m +CONFIG_USB_TMC=m + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=y +# CONFIG_USB_STORAGE_DEBUG is not set +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_REALTEK_AUTOPM=y +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_UAS=m + +# +# USB Imaging devices +# +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m +# CONFIG_USBIP_CORE is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +# CONFIG_USB_DWC2 is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +CONFIG_USB_SERIAL=y +CONFIG_USB_SERIAL_CONSOLE=y +CONFIG_USB_SERIAL_GENERIC=y +CONFIG_USB_SERIAL_SIMPLE=m +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +# CONFIG_USB_SERIAL_F81232 is not set +# CONFIG_USB_SERIAL_F8153X is not set +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +# CONFIG_USB_SERIAL_METRO is not set +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7840=m +# CONFIG_USB_SERIAL_MXUPORT is not set +CONFIG_USB_SERIAL_NAVMAN=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SAFE_PADDED=y +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_XIRCOM=m +CONFIG_USB_SERIAL_WWAN=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +# CONFIG_USB_SERIAL_WISHBONE is not set +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m +# CONFIG_USB_SERIAL_UPD78F0730 is not set +CONFIG_USB_SERIAL_DEBUG=m + +# +# USB Miscellaneous drivers +# +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +# CONFIG_USB_RIO500 is not set +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +CONFIG_USB_IDMOUSE=m +CONFIG_USB_FTDI_ELAN=m +CONFIG_USB_APPLEDISPLAY=m +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_SISUSBVGA_CON=y +CONFIG_USB_LD=m +# CONFIG_USB_TRANCEVIBRATOR is not set +CONFIG_USB_IOWARRIOR=m +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +CONFIG_USB_ISIGHTFW=m +# CONFIG_USB_YUREX is not set +CONFIG_USB_EZUSB_FX2=m +# CONFIG_USB_HUB_USB251XB is not set +CONFIG_USB_HSIC_USB3503=m +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +CONFIG_USB_CHAOSKEY=m +CONFIG_USB_ATM=m +# CONFIG_USB_SPEEDTOUCH is not set +CONFIG_USB_CXACRU=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_XUSBATM=m + +# +# USB Physical Layer drivers +# +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ISP1301 is not set +# CONFIG_USB_ULPI is not set +# CONFIG_USB_GADGET is not set +CONFIG_TYPEC=y +# CONFIG_TYPEC_TCPM is not set +CONFIG_TYPEC_UCSI=y +CONFIG_UCSI_ACPI=y +# CONFIG_TYPEC_TPS6598X is not set + +# +# USB Type-C Multiplexer/DeMultiplexer Switch support +# +# CONFIG_TYPEC_MUX_PI3USB30532 is not set + +# +# USB Type-C Alternate Mode drivers +# +# CONFIG_TYPEC_DP_ALTMODE is not set +# CONFIG_USB_ROLE_SWITCH is not set +CONFIG_USB_LED_TRIG=y +CONFIG_USB_ULPI_BUS=m +CONFIG_UWB=m +CONFIG_UWB_HWA=m +CONFIG_UWB_WHCI=m +CONFIG_UWB_I1480U=m +CONFIG_MMC=m +CONFIG_PWRSEQ_EMMC=m +CONFIG_PWRSEQ_SIMPLE=m +CONFIG_MMC_BLOCK=m +CONFIG_MMC_BLOCK_MINORS=8 +CONFIG_SDIO_UART=m +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_ARMMMCI=m +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_RICOH_MMC=y +CONFIG_MMC_SDHCI_ACPI=m +CONFIG_MMC_SDHCI_PLTFM=m +# CONFIG_MMC_SDHCI_OF_ARASAN is not set +# CONFIG_MMC_SDHCI_OF_AT91 is not set +# CONFIG_MMC_SDHCI_OF_DWCMSHC is not set +# CONFIG_MMC_SDHCI_CADENCE is not set +# CONFIG_MMC_SDHCI_F_SDH30 is not set +# CONFIG_MMC_SDHCI_MSM is not set +CONFIG_MMC_TIFM_SD=m +CONFIG_MMC_SPI=m +CONFIG_MMC_CB710=m +CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_DW=m +# CONFIG_MMC_DW_IDMAC is not set +CONFIG_MMC_DW_PLTFM=m +CONFIG_MMC_DW_BLUEFIELD=m +# CONFIG_MMC_DW_EXYNOS is not set +# CONFIG_MMC_DW_HI3798CV200 is not set +# CONFIG_MMC_DW_HI3XXX is not set +# CONFIG_MMC_DW_K3 is not set +# CONFIG_MMC_DW_PCI is not set +CONFIG_MMC_VUB300=m +CONFIG_MMC_USHC=m +# CONFIG_MMC_USDHI6ROL0 is not set +CONFIG_MMC_CQHCI=m +CONFIG_MMC_TOSHIBA_PCI=m +CONFIG_MMC_MTK=m +# CONFIG_MMC_SDHCI_XENON is not set +# CONFIG_MMC_SDHCI_OMAP is not set +CONFIG_MEMSTICK=m +# CONFIG_MEMSTICK_DEBUG is not set + +# +# MemoryStick drivers +# +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set +CONFIG_MSPRO_BLOCK=m +# CONFIG_MS_BLOCK is not set + +# +# MemoryStick Host Controller Drivers +# +CONFIG_MEMSTICK_TIFM_MS=m +CONFIG_MEMSTICK_JMICRON_38X=m +CONFIG_MEMSTICK_R592=m +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_CLASS_FLASH=m +# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set + +# +# LED drivers +# +# CONFIG_LEDS_AAT1290 is not set +# CONFIG_LEDS_AS3645A is not set +# CONFIG_LEDS_BCM6328 is not set +# CONFIG_LEDS_BCM6358 is not set +# CONFIG_LEDS_CR0014114 is not set +CONFIG_LEDS_LM3530=m +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_LM3692X is not set +# CONFIG_LEDS_LM3601X is not set +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_GPIO is not set +CONFIG_LEDS_LP3944=m +# CONFIG_LEDS_LP3952 is not set +CONFIG_LEDS_LP55XX_COMMON=m +CONFIG_LEDS_LP5521=m +CONFIG_LEDS_LP5523=m +CONFIG_LEDS_LP5562=m +# CONFIG_LEDS_LP8501 is not set +# CONFIG_LEDS_LP8860 is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_PWM is not set +# CONFIG_LEDS_BD2802 is not set +CONFIG_LEDS_LT3593=m +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_TLC591XX is not set +# CONFIG_LEDS_LM355x is not set +# CONFIG_LEDS_KTD2692 is not set +# CONFIG_LEDS_IS31FL319X is not set +# CONFIG_LEDS_IS31FL32XX is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# +CONFIG_LEDS_BLINKM=m +# CONFIG_LEDS_SYSCON is not set +# CONFIG_LEDS_MLXREG is not set +# CONFIG_LEDS_USER is not set + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_ONESHOT=m +# CONFIG_LEDS_TRIGGER_DISK is not set +# CONFIG_LEDS_TRIGGER_MTD is not set +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +# CONFIG_LEDS_TRIGGER_CPU is not set +# CONFIG_LEDS_TRIGGER_ACTIVITY is not set +CONFIG_LEDS_TRIGGER_GPIO=m +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m + +# +# iptables trigger is under Netfilter config (LED target) +# +CONFIG_LEDS_TRIGGER_TRANSIENT=m +CONFIG_LEDS_TRIGGER_CAMERA=m +# CONFIG_LEDS_TRIGGER_PANIC is not set +# CONFIG_LEDS_TRIGGER_NETDEV is not set +# CONFIG_ACCESSIBILITY is not set +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +# CONFIG_INFINIBAND_EXP_LEGACY_VERBS_NEW_UAPI is not set +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +# CONFIG_INFINIBAND_MTHCA is not set +# CONFIG_INFINIBAND_QIB is not set +CONFIG_INFINIBAND_CXGB4=m +CONFIG_INFINIBAND_I40IW=m +CONFIG_MLX4_INFINIBAND=m +# CONFIG_INFINIBAND_NES is not set +# CONFIG_INFINIBAND_OCRDMA is not set +CONFIG_INFINIBAND_HNS=m +CONFIG_INFINIBAND_HNS_HIP06=m +CONFIG_INFINIBAND_HNS_HIP08=m +CONFIG_INFINIBAND_HNS_DFX=m +CONFIG_INFINIBAND_HNS_TEST=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +CONFIG_INFINIBAND_RDMAVT=m +CONFIG_RDMA_RXE=m +CONFIG_INFINIBAND_QEDR=m +CONFIG_INFINIBAND_BNXT_RE=m +CONFIG_EDAC_SUPPORT=y +CONFIG_EDAC=y +CONFIG_EDAC_LEGACY_SYSFS=y +# CONFIG_EDAC_DEBUG is not set +CONFIG_EDAC_GHES=y +CONFIG_EDAC_THUNDERX=m +CONFIG_EDAC_XGENE=m +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_SYSTOHC is not set +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +CONFIG_RTC_DRV_ABB5ZES3=m +CONFIG_RTC_DRV_ABX80X=m +CONFIG_RTC_DRV_DS1307=y +# CONFIG_RTC_DRV_DS1307_CENTURY is not set +CONFIG_RTC_DRV_DS1374=m +CONFIG_RTC_DRV_DS1374_WDT=y +CONFIG_RTC_DRV_DS1672=m +# CONFIG_RTC_DRV_HYM8563 is not set +CONFIG_RTC_DRV_MAX6900=m +CONFIG_RTC_DRV_RS5C372=m +CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_ISL12022=m +# CONFIG_RTC_DRV_ISL12026 is not set +CONFIG_RTC_DRV_X1205=m +CONFIG_RTC_DRV_PCF8523=m +CONFIG_RTC_DRV_PCF85063=m +# CONFIG_RTC_DRV_PCF85363 is not set +CONFIG_RTC_DRV_PCF8563=m +CONFIG_RTC_DRV_PCF8583=m +CONFIG_RTC_DRV_M41T80=m +CONFIG_RTC_DRV_M41T80_WDT=y +CONFIG_RTC_DRV_BQ32K=m +# CONFIG_RTC_DRV_S35390A is not set +CONFIG_RTC_DRV_FM3130=m +CONFIG_RTC_DRV_RX8010=m +CONFIG_RTC_DRV_RX8581=m +CONFIG_RTC_DRV_RX8025=m +CONFIG_RTC_DRV_EM3027=m +# CONFIG_RTC_DRV_RV8803 is not set + +# +# SPI RTC drivers +# +CONFIG_RTC_DRV_M41T93=m +CONFIG_RTC_DRV_M41T94=m +# CONFIG_RTC_DRV_DS1302 is not set +CONFIG_RTC_DRV_DS1305=m +CONFIG_RTC_DRV_DS1343=m +CONFIG_RTC_DRV_DS1347=m +CONFIG_RTC_DRV_DS1390=m +# CONFIG_RTC_DRV_MAX6916 is not set +CONFIG_RTC_DRV_R9701=m +CONFIG_RTC_DRV_RX4581=m +# CONFIG_RTC_DRV_RX6110 is not set +CONFIG_RTC_DRV_RS5C348=m +CONFIG_RTC_DRV_MAX6902=m +CONFIG_RTC_DRV_PCF2123=m +CONFIG_RTC_DRV_MCP795=m +CONFIG_RTC_I2C_AND_SPI=y + +# +# SPI and I2C RTC drivers +# +CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_DS3232_HWMON=y +CONFIG_RTC_DRV_PCF2127=m +CONFIG_RTC_DRV_RV3029C2=m +# CONFIG_RTC_DRV_RV3029_HWMON is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_DS1286=m +CONFIG_RTC_DRV_DS1511=m +CONFIG_RTC_DRV_DS1553=m +CONFIG_RTC_DRV_DS1685_FAMILY=m +CONFIG_RTC_DRV_DS1685=y +# CONFIG_RTC_DRV_DS1689 is not set +# CONFIG_RTC_DRV_DS17285 is not set +# CONFIG_RTC_DRV_DS17485 is not set +# CONFIG_RTC_DRV_DS17885 is not set +# CONFIG_RTC_DS1685_PROC_REGS is not set +CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_DS2404=m +CONFIG_RTC_DRV_EFI=y +CONFIG_RTC_DRV_STK17TA8=m +# CONFIG_RTC_DRV_M48T86 is not set +CONFIG_RTC_DRV_M48T35=m +CONFIG_RTC_DRV_M48T59=m +CONFIG_RTC_DRV_MSM6242=m +CONFIG_RTC_DRV_BQ4802=m +CONFIG_RTC_DRV_RP5C01=m +CONFIG_RTC_DRV_V3020=m +# CONFIG_RTC_DRV_ZYNQMP is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_PL030 is not set +CONFIG_RTC_DRV_PL031=y +# CONFIG_RTC_DRV_FTRTC010 is not set +# CONFIG_RTC_DRV_SNVS is not set +# CONFIG_RTC_DRV_XGENE is not set +# CONFIG_RTC_DRV_R7301 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_ACPI=y +CONFIG_DMA_OF=y +# CONFIG_ALTERA_MSGDMA is not set +# CONFIG_AMBA_PL08X is not set +# CONFIG_BCM_SBA_RAID is not set +# CONFIG_DW_AXI_DMAC is not set +# CONFIG_FSL_EDMA is not set +# CONFIG_INTEL_IDMA64 is not set +# CONFIG_K3_DMA is not set +# CONFIG_MV_XOR_V2 is not set +# CONFIG_PL330_DMA is not set +# CONFIG_XGENE_DMA is not set +# CONFIG_XILINX_DMA is not set +# CONFIG_XILINX_ZYNQMP_DMA is not set +# CONFIG_QCOM_BAM_DMA is not set +CONFIG_QCOM_HIDMA_MGMT=m +CONFIG_QCOM_HIDMA=m +CONFIG_DW_DMAC_CORE=m +CONFIG_DW_DMAC=m +CONFIG_DW_DMAC_PCI=m + +# +# DMA Clients +# +CONFIG_ASYNC_TX_DMA=y +# CONFIG_DMATEST is not set + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +CONFIG_AUXDISPLAY=y +# CONFIG_HD44780 is not set +# CONFIG_IMG_ASCII_LCD is not set +# CONFIG_HT16K33 is not set +CONFIG_UIO=y +CONFIG_UIO_CIF=m +CONFIG_UIO_PDRV_GENIRQ=y +# CONFIG_UIO_DMEM_GENIRQ is not set +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=y +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +CONFIG_VFIO_IOMMU_TYPE1=y +CONFIG_VFIO_VIRQFD=y +CONFIG_VFIO=y +CONFIG_VFIO_NOIOMMU=y +CONFIG_VFIO_PCI=y +CONFIG_VFIO_PCI_MMAP=y +CONFIG_VFIO_PCI_INTX=y +CONFIG_VFIO_PLATFORM=m +# CONFIG_VFIO_AMBA is not set +# CONFIG_VFIO_PLATFORM_CALXEDAXGMAC_RESET is not set +# CONFIG_VFIO_PLATFORM_AMDXGBE_RESET is not set +CONFIG_VFIO_MDEV=m +CONFIG_VFIO_MDEV_DEVICE=m +CONFIG_VFIO_SPIMDEV=m +# CONFIG_VIRT_DRIVERS is not set +CONFIG_VIRTIO=y +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=y +# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set + +# +# Microsoft Hyper-V guest support +# +# CONFIG_STAGING is not set +# CONFIG_GOLDFISH is not set +CONFIG_CHROME_PLATFORMS=y +# CONFIG_CHROMEOS_TBMC is not set +# CONFIG_CROS_KBD_LED_BACKLIGHT is not set +CONFIG_CLKDEV_LOOKUP=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y + +# +# Common Clock Framework +# +CONFIG_COMMON_CLK_VERSATILE=y +CONFIG_CLK_SP810=y +CONFIG_CLK_VEXPRESS_OSC=y +# CONFIG_CLK_HSDK is not set +# CONFIG_COMMON_CLK_MAX9485 is not set +CONFIG_COMMON_CLK_SCPI=m +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI514 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_SI570 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CDCE925 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_CLK_QORIQ is not set +CONFIG_COMMON_CLK_XGENE=y +# CONFIG_COMMON_CLK_PWM is not set +# CONFIG_COMMON_CLK_VC5 is not set +CONFIG_COMMON_CLK_HI3516CV300=y +CONFIG_COMMON_CLK_HI3519=y +CONFIG_COMMON_CLK_HI3660=y +CONFIG_COMMON_CLK_HI3798CV200=y +# CONFIG_COMMON_CLK_HI6220 is not set +CONFIG_RESET_HISI=y +CONFIG_STUB_CLK_HI3660=y +# CONFIG_COMMON_CLK_QCOM is not set +CONFIG_HWSPINLOCK=y +# CONFIG_HWSPINLOCK_QCOM is not set + +# +# Clock Source drivers +# +CONFIG_TIMER_OF=y +CONFIG_TIMER_ACPI=y +CONFIG_TIMER_PROBE=y +CONFIG_CLKSRC_MMIO=y +CONFIG_ARM_ARCH_TIMER=y +CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y +CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND=y +CONFIG_FSL_ERRATUM_A008585=y +CONFIG_HISILICON_ERRATUM_161010101=y +CONFIG_ARM64_ERRATUM_858921=y +CONFIG_ARM_TIMER_SP804=y +CONFIG_MAILBOX=y +CONFIG_ARM_MHU=m +# CONFIG_PLATFORM_MHU is not set +# CONFIG_PL320_MBOX is not set +CONFIG_PCC=y +# CONFIG_ALTERA_MBOX is not set +CONFIG_HI3660_MBOX=y +CONFIG_HI6220_MBOX=y +# CONFIG_MAILBOX_TEST is not set +# CONFIG_QCOM_APCS_IPC is not set +CONFIG_XGENE_SLIMPRO_MBOX=m +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +CONFIG_IOMMU_IO_PGTABLE=y +CONFIG_IOMMU_IO_PGTABLE_LPAE=y +# CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST is not set +# CONFIG_IOMMU_IO_PGTABLE_ARMV7S is not set + +# +# Generic PASID table support +# +CONFIG_IOMMU_PASID_TABLE=y +CONFIG_ARM_SMMU_V3_CONTEXT=y +# CONFIG_IOMMU_DEBUGFS is not set +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set +CONFIG_IOMMU_IOVA=y +CONFIG_OF_IOMMU=y +CONFIG_IOMMU_DMA=y +CONFIG_IOMMU_SVA=y +CONFIG_IOMMU_PAGE_FAULT=y +CONFIG_ARM_SMMU=y +CONFIG_ARM_SMMU_V3=y +# CONFIG_QCOM_IOMMU is not set +CONFIG_SMMU_BYPASS_DEV=y + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_QCOM_GLINK_RPM is not set +# CONFIG_RPMSG_VIRTIO is not set +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# + +# +# Broadcom SoC drivers +# +# CONFIG_SOC_BRCMSTB is not set + +# +# NXP/Freescale QorIQ SoC drivers +# + +# +# i.MX SoC drivers +# + +# +# Qualcomm SoC drivers +# +# CONFIG_QCOM_COMMAND_DB is not set +# CONFIG_QCOM_GENI_SE is not set +# CONFIG_QCOM_GSBI is not set +# CONFIG_QCOM_LLCC is not set +# CONFIG_QCOM_RMTFS_MEM is not set +# CONFIG_QCOM_RPMH is not set +# CONFIG_QCOM_SMEM is not set +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# CONFIG_XILINX_VCU is not set +# CONFIG_PM_DEVFREQ is not set +CONFIG_EXTCON=y + +# +# Extcon Device Drivers +# +CONFIG_EXTCON_GPIO=m +# CONFIG_EXTCON_MAX3355 is not set +# CONFIG_EXTCON_QCOM_SPMI_MISC is not set +# CONFIG_EXTCON_RT8973A is not set +# CONFIG_EXTCON_SM5502 is not set +# CONFIG_EXTCON_USB_GPIO is not set +# CONFIG_MEMORY is not set +# CONFIG_IIO is not set +# CONFIG_NTB is not set +# CONFIG_VME_BUS is not set +CONFIG_PWM=y +CONFIG_PWM_SYSFS=y +# CONFIG_PWM_FSL_FTM is not set +# CONFIG_PWM_HIBVT is not set +# CONFIG_PWM_PCA9685 is not set + +# +# IRQ chip support +# +CONFIG_IRQCHIP=y +CONFIG_ARM_GIC=y +CONFIG_ARM_GIC_MAX_NR=1 +CONFIG_ARM_GIC_V2M=y +CONFIG_ARM_GIC_V3=y +CONFIG_ARM_GIC_V3_ITS=y +CONFIG_ARM_GIC_V3_ITS_PCI=y +CONFIG_ARM_GIC_PHYTIUM_2500=y +CONFIG_HISILICON_IRQ_MBIGEN=y +CONFIG_ASCEND_INIT_ALL_GICR=y +CONFIG_PARTITION_PERCPU=y +CONFIG_QCOM_IRQ_COMBINER=y +# CONFIG_QCOM_PDC is not set +# CONFIG_IPACK_BUS is not set +CONFIG_RESET_CONTROLLER=y +# CONFIG_RESET_QCOM_AOSS is not set +# CONFIG_RESET_TI_SYSCON is not set +CONFIG_COMMON_RESET_HI3660=y +CONFIG_COMMON_RESET_HI6220=y +CONFIG_FMC=m +CONFIG_FMC_FAKEDEV=m +CONFIG_FMC_TRIVIAL=m +CONFIG_FMC_WRITE_EEPROM=m +CONFIG_FMC_CHARDEV=m + +# +# PHY Subsystem +# +CONFIG_GENERIC_PHY=y +CONFIG_PHY_XGENE=y +# CONFIG_BCM_KONA_USB2_PHY is not set +CONFIG_PHY_HI6220_USB=m +# CONFIG_PHY_HISTB_COMBPHY is not set +# CONFIG_PHY_HISI_INNO_USB2 is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_MAPPHONE_MDM6600 is not set +# CONFIG_PHY_QCOM_APQ8064_SATA is not set +# CONFIG_PHY_QCOM_IPQ806X_SATA is not set +# CONFIG_PHY_QCOM_QMP is not set +# CONFIG_PHY_QCOM_QUSB2 is not set +# CONFIG_PHY_QCOM_UFS is not set +# CONFIG_PHY_QCOM_USB_HS is not set +# CONFIG_PHY_QCOM_USB_HSIC is not set +# CONFIG_PHY_TUSB1210 is not set +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# CONFIG_ARM_CCI_PMU is not set +CONFIG_ARM_CCN=y +CONFIG_ARM_PMU=y +CONFIG_ARM_PMU_ACPI=y +CONFIG_ARM_SMMU_V3_PMU=y +# CONFIG_ARM_DSU_PMU is not set +CONFIG_HISI_PMU=y +CONFIG_QCOM_L2_PMU=y +CONFIG_QCOM_L3_PMU=y +CONFIG_XGENE_PMU=y +CONFIG_ARM_SPE_PMU=y +CONFIG_RAS=y + +# +# Android +# +# CONFIG_ANDROID is not set +CONFIG_LIBNVDIMM=m +CONFIG_BLK_DEV_PMEM=m +CONFIG_ND_BLK=m +CONFIG_ND_CLAIM=y +CONFIG_ND_BTT=m +CONFIG_BTT=y +CONFIG_OF_PMEM=m +CONFIG_DAX_DRIVER=y +CONFIG_DAX=y +CONFIG_DEV_DAX=m +# CONFIG_DEV_DAX_KMEM is not set +CONFIG_NVMEM=y +# CONFIG_QCOM_QFPROM is not set + +# +# HW tracing support +# +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set +# CONFIG_FPGA is not set +# CONFIG_FSI is not set +CONFIG_TEE=m + +# +# TEE drivers +# +# CONFIG_OPTEE is not set +# CONFIG_SIOX is not set +CONFIG_UACCE=y +# CONFIG_WD_DUMMY_DEV is not set +# CONFIG_SLIMBUS is not set + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +CONFIG_FS_IOMAP=y +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT2=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_ENCRYPTION is not set +# CONFIG_EXT4_DEBUG is not set +CONFIG_EXT4_PARALLEL_DIO_READ=y +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_XFS_FS=m +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set +# CONFIG_XFS_ONLINE_SCRUB is not set +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set +# CONFIG_GFS2_FS is not set +# CONFIG_OCFS2_FS is not set +CONFIG_BTRFS_FS=y +CONFIG_BTRFS_FS_POSIX_ACL=y +CONFIG_BTRFS_FS_CHECK_INTEGRITY=y +CONFIG_BTRFS_FS_RUN_SANITY_TESTS=y +CONFIG_BTRFS_DEBUG=y +CONFIG_BTRFS_ASSERT=y +CONFIG_BTRFS_FS_REF_VERIFY=y +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +CONFIG_FS_DAX=y +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FILE_LOCKING=y +CONFIG_MANDATORY_FILE_LOCKING=y +# CONFIG_FS_ENCRYPTION is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_PRINT_QUOTA_WARNING=y +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=y +# CONFIG_QFMT_V1 is not set +CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y +CONFIG_AUTOFS4_FS=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_OVERLAY_FS=y +# CONFIG_OVERLAY_FS_REDIRECT_DIR is not set +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y +# CONFIG_OVERLAY_FS_INDEX is not set +# CONFIG_OVERLAY_FS_XINO_AUTO is not set +# CONFIG_OVERLAY_FS_METACOPY is not set + +# +# Caches +# +CONFIG_FSCACHE=y +CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_HISTOGRAM is not set +# CONFIG_FSCACHE_DEBUG is not set +# CONFIG_FSCACHE_OBJECT_LIST is not set +CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_HISTOGRAM is not set + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="ascii" +# CONFIG_FAT_DEFAULT_UTF8 is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y +# CONFIG_PROC_VMCORE_DEVICE_DUMP is not set +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +# CONFIG_TMPFS_INODE64 is not set +CONFIG_DYNAMIC_HUGETLB=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_MEMFD_CREATE=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +CONFIG_JFFS2_FS=m +CONFIG_JFFS2_FS_DEBUG=0 +CONFIG_JFFS2_FS_WRITEBUFFER=y +CONFIG_JFFS2_FS_WBUF_VERIFY=y +CONFIG_JFFS2_SUMMARY=y +CONFIG_JFFS2_FS_XATTR=y +CONFIG_JFFS2_FS_POSIX_ACL=y +CONFIG_JFFS2_FS_SECURITY=y +CONFIG_JFFS2_COMPRESSION_OPTIONS=y +CONFIG_JFFS2_ZLIB=y +CONFIG_JFFS2_LZO=y +CONFIG_JFFS2_RTIME=y +CONFIG_JFFS2_RUBIN=y +# CONFIG_JFFS2_CMODE_NONE is not set +CONFIG_JFFS2_CMODE_PRIORITY=y +# CONFIG_JFFS2_CMODE_SIZE is not set +# CONFIG_JFFS2_CMODE_FAVOURLZO is not set +CONFIG_UBIFS_FS=m +# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set +CONFIG_UBIFS_FS_LZO=y +CONFIG_UBIFS_FS_ZLIB=y +# CONFIG_UBIFS_ATIME_SUPPORT is not set +CONFIG_UBIFS_FS_XATTR=y +# CONFIG_UBIFS_FS_ENCRYPTION is not set +CONFIG_UBIFS_FS_SECURITY=y +CONFIG_CRAMFS=m +CONFIG_CRAMFS_BLOCKDEV=y +# CONFIG_CRAMFS_MTD is not set +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_FILE_CACHE=y +# CONFIG_SQUASHFS_FILE_DIRECT is not set +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +CONFIG_PSTORE=y +CONFIG_PSTORE_DEFLATE_COMPRESS=y +# CONFIG_PSTORE_LZO_COMPRESS is not set +# CONFIG_PSTORE_LZ4_COMPRESS is not set +# CONFIG_PSTORE_LZ4HC_COMPRESS is not set +# CONFIG_PSTORE_842_COMPRESS is not set +# CONFIG_PSTORE_ZSTD_COMPRESS is not set +CONFIG_PSTORE_COMPRESS=y +CONFIG_PSTORE_DEFLATE_COMPRESS_DEFAULT=y +CONFIG_PSTORE_COMPRESS_DEFAULT="deflate" +# CONFIG_PSTORE_CONSOLE is not set +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +CONFIG_PSTORE_RAM=m +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +CONFIG_NFS_V2=m +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +# CONFIG_NFS_SWAP is not set +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_V4_SECURITY_LABEL=y +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DEBUG=y +CONFIG_NFSD=m +CONFIG_NFSD_V2_ACL=y +CONFIG_NFSD_V3=y +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +# CONFIG_NFSD_BLOCKLAYOUT is not set +# CONFIG_NFSD_SCSILAYOUT is not set +# CONFIG_NFSD_FLEXFILELAYOUT is not set +CONFIG_NFSD_V4_SECURITY_LABEL=y +# CONFIG_NFSD_FAULT_INJECTION is not set +CONFIG_GRACE_PERIOD=m +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=m +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_RPCSEC_GSS_KRB5=m +CONFIG_SUNRPC_DEBUG=y +CONFIG_SUNRPC_XPRT_RDMA=m +CONFIG_CEPH_FS=m +# CONFIG_CEPH_FSCACHE is not set +CONFIG_CEPH_FS_POSIX_ACL=y +CONFIG_CIFS=y +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_ACL=y +CONFIG_CIFS_DEBUG=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set +CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CIFS_FSCACHE is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=m +# CONFIG_DLM is not set +CONFIG_RESCTRL=y + +# +# Security options +# +CONFIG_KEYS=y +CONFIG_KEYS_COMPAT=y +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_BIG_KEYS=y +CONFIG_TRUSTED_KEYS=m +CONFIG_ENCRYPTED_KEYS=y +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +CONFIG_SECURITY_WRITABLE_HOOKS=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +# CONFIG_SECURITY_PATH is not set +CONFIG_LSM_MMAP_MIN_ADDR=65535 +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y +CONFIG_HARDENED_USERCOPY=y +CONFIG_HARDENED_USERCOPY_FALLBACK=y +CONFIG_FORTIFY_SOURCE=y +# CONFIG_STATIC_USERMODEHELPER is not set +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1 +CONFIG_SECURITY_SELINUX_DISABLE=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_SECURITY_LOADPIN is not set +CONFIG_SECURITY_YAMA=y +CONFIG_INTEGRITY=y +# CONFIG_INTEGRITY_SIGNATURE is not set +CONFIG_INTEGRITY_AUDIT=y +CONFIG_IMA=y +CONFIG_IMA_MEASURE_PCR_IDX=10 +CONFIG_IMA_LSM_RULES=y +# CONFIG_IMA_TEMPLATE is not set +CONFIG_IMA_NG_TEMPLATE=y +# CONFIG_IMA_SIG_TEMPLATE is not set +CONFIG_IMA_DEFAULT_TEMPLATE="ima-ng" +CONFIG_IMA_DEFAULT_HASH_SHA1=y +# CONFIG_IMA_DEFAULT_HASH_SHA256 is not set +# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set +CONFIG_IMA_DEFAULT_HASH="sha1" +CONFIG_IMA_WRITE_POLICY=y +CONFIG_IMA_READ_POLICY=y +CONFIG_IMA_APPRAISE=y +CONFIG_IMA_APPRAISE_BOOTPARAM=y +CONFIG_EVM=y +CONFIG_EVM_ATTR_FSUUID=y +CONFIG_EVM_ADD_XATTRS=y +CONFIG_DEFAULT_SECURITY_SELINUX=y +# CONFIG_DEFAULT_SECURITY_DAC is not set +CONFIG_DEFAULT_SECURITY="selinux" +CONFIG_XOR_BLOCKS=y +CONFIG_ASYNC_CORE=y +CONFIG_ASYNC_MEMCPY=y +CONFIG_ASYNC_XOR=y +CONFIG_ASYNC_PQ=y +CONFIG_ASYNC_RAID6_RECOV=y +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=m +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=m +# CONFIG_CRYPTO_ECDH is not set +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_GF128MUL=y +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_CRYPTO_CRYPTD=m +# CONFIG_CRYPTO_MCRYPTD is not set +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_SIMD=m +CONFIG_CRYPTO_ENGINE=m + +# +# Authenticated Encryption with Associated Data +# +CONFIG_CRYPTO_CCM=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_CHACHA20POLY1305=m +# CONFIG_CRYPTO_AEGIS128 is not set +# CONFIG_CRYPTO_AEGIS128L is not set +# CONFIG_CRYPTO_AEGIS256 is not set +# CONFIG_CRYPTO_MORUS640 is not set +# CONFIG_CRYPTO_MORUS1280 is not set +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=m + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CFB is not set +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=m +# CONFIG_CRYPTO_KEYWRAP is not set + +# +# Hash modes +# +CONFIG_CRYPTO_CMAC=y +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_VMAC=m + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_POLY1305=m +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD128=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_RMD256=m +CONFIG_CRYPTO_RMD320=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +CONFIG_CRYPTO_SHA3=m +CONFIG_CRYPTO_SM3=m +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_ARC4=y +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=y +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SALSA20=m +CONFIG_CRYPTO_CHACHA20=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_842 is not set +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ZSTD=y + +# +# Random Number Generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +CONFIG_CRYPTO_USER_API=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_HASH_INFO=y +CONFIG_CRYPTO_HW=y +CONFIG_CRYPTO_DEV_CCP=y +CONFIG_CRYPTO_DEV_CCP_DD=m +CONFIG_CRYPTO_DEV_SP_CCP=y +CONFIG_CRYPTO_DEV_CCP_CRYPTO=m +CONFIG_CRYPTO_DEV_CPT=m +CONFIG_CAVIUM_CPT=m +# CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set +CONFIG_CRYPTO_DEV_CAVIUM_ZIP=m +# CONFIG_CRYPTO_DEV_QCE is not set +# CONFIG_CRYPTO_DEV_QCOM_RNG is not set +CONFIG_CRYPTO_DEV_CHELSIO=m +CONFIG_CHELSIO_IPSEC_INLINE=y +# CONFIG_CRYPTO_DEV_CHELSIO_TLS is not set +CONFIG_CRYPTO_DEV_VIRTIO=m +# CONFIG_CRYPTO_DEV_CCREE is not set +CONFIG_CRYPTO_DEV_HISI_SEC=m +CONFIG_CRYPTO_DEV_HISI_QM=m +CONFIG_CRYPTO_QM_UACCE=y +CONFIG_CRYPTO_DEV_HISI_ZIP=m +CONFIG_CRYPTO_DEV_HISI_HPRE=m +CONFIG_CRYPTO_HISI_SGL=m +CONFIG_CRYPTO_DEV_HISI_SEC2=m +CONFIG_CRYPTO_DEV_HISI_RDE=m +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +CONFIG_PKCS7_MESSAGE_PARSER=y +# CONFIG_PKCS7_TEST_KEY is not set +CONFIG_SIGNED_PE_FILE_VERIFICATION=y + +# +# Certificates for signature checking +# +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set +# CONFIG_SECONDARY_TRUSTED_KEYRING is not set +# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=y +CONFIG_BITREVERSE=y +CONFIG_HAVE_ARCH_BITREVERSE=y +CONFIG_RATIONAL=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y +CONFIG_INDIRECT_PIO=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=m +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +# CONFIG_CRC64 is not set +# CONFIG_CRC4 is not set +CONFIG_CRC7=m +CONFIG_LIBCRC32C=y +CONFIG_CRC8=m +CONFIG_AUDIT_GENERIC=y +CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y +CONFIG_AUDIT_COMPAT_GENERIC=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_COMPRESS=m +CONFIG_LZ4HC_COMPRESS=m +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMPRESS=y +CONFIG_ZSTD_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=m +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_BTREE=y +CONFIG_INTERVAL_TREE=y +CONFIG_RADIX_TREE_MULTIORDER=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_DMA_DIRECT_OPS=y +CONFIG_DMA_VIRT_OPS=y +CONFIG_SWIOTLB=y +CONFIG_SGL_ALLOC=y +CONFIG_CHECK_SIGNATURE=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_CLZ_TAB=y +CONFIG_CORDIC=m +# CONFIG_DDR is not set +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_LIBFDT=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_SPLIT=y +CONFIG_SG_POOL=y +CONFIG_ARCH_HAS_SG_CHAIN=y +CONFIG_ARCH_HAS_PMEM_API=y +CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y +CONFIG_SBITMAP=y +# CONFIG_STRING_SELFTEST is not set + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +CONFIG_DEBUG_INFO_DWARF4=y +# CONFIG_GDB_SCRIPTS is not set +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=2048 +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_PAGE_OWNER is not set +CONFIG_DEBUG_FS=y +CONFIG_HEADERS_CHECK=y +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_ARCH_WANT_FRAME_POINTERS=y +CONFIG_FRAME_POINTER=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_DEBUG_KERNEL=y + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +CONFIG_DEBUG_RODATA_TEST=y +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_SLUB_STATS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +CONFIG_DEBUG_KMEMLEAK=y +CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000 +# CONFIG_DEBUG_KMEMLEAK_TEST is not set +CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_VM is not set +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_KASAN=y +# CONFIG_KASAN_OUTLINE is not set +CONFIG_KASAN_INLINE=y +# CONFIG_TEST_KASAN is not set +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set +CONFIG_DEBUG_SHIRQ=y + +# +# Debug Lockups and Hangs +# +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_SDEI_WATCHDOG=y +# CONFIG_PMU_WATCHDOG is not set +CONFIG_CORELOCKUP_DETECTOR=y +CONFIG_HARDLOCKUP_DETECTOR=y +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=1 +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +# CONFIG_WQ_WATCHDOG is not set +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=0 +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +# CONFIG_SCHED_STACK_END_CHECK is not set +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_HAVE_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PI_LIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_RCU_PERF_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_PREEMPTIRQ_EVENTS is not set +# CONFIG_IRQSOFF_TRACER is not set +CONFIG_SCHED_TRACER=y +CONFIG_HWLAT_TRACER=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_TRACER_SNAPSHOT=y +# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +CONFIG_STACK_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_KPROBE_EVENTS=y +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_PROBE_EVENTS=y +CONFIG_DYNAMIC_FTRACE=y +# CONFIG_FUNCTION_PROFILER is not set +CONFIG_FTRACE_MCOUNT_RECORD=y +# CONFIG_FTRACE_STARTUP_TEST is not set +CONFIG_TRACING_MAP=y +CONFIG_HIST_TRIGGERS=y +# CONFIG_TRACEPOINT_BENCHMARK is not set +CONFIG_RING_BUFFER_BENCHMARK=m +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_TRACE_EVAL_MAP_FILE is not set +# CONFIG_TRACING_EVENTS_GPIO is not set +# CONFIG_DMA_API_DEBUG is not set +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_LKDTM is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_TEST_SORT is not set +# CONFIG_KPROBES_SANITY_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +CONFIG_ATOMIC64_SELFTEST=y +CONFIG_ASYNC_RAID6_TEST=m +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_TEST_STRING_HELPERS is not set +CONFIG_TEST_KSTRTOX=y +CONFIG_TEST_PRINTF=m +CONFIG_TEST_BITMAP=m +# CONFIG_TEST_BITFIELD is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_OVERFLOW is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_HASH is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_LKM is not set +CONFIG_TEST_USER_COPY=m +# CONFIG_TEST_BPF is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +CONFIG_TEST_SYSCTL=y +# CONFIG_TEST_UDELAY is not set +CONFIG_TEST_STATIC_KEYS=m +# CONFIG_TEST_KMOD is not set +# CONFIG_TEST_FREE_PAGES is not set +# CONFIG_MEMTEST is not set +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_KGDB=y +CONFIG_KGDB_SERIAL_CONSOLE=y +CONFIG_KGDB_TESTS=y +# CONFIG_KGDB_TESTS_ON_BOOT is not set +CONFIG_KGDB_KDB=y +CONFIG_KDB_DEFAULT_ENABLE=0x0 +CONFIG_KDB_KEYBOARD=y +CONFIG_KDB_CONTINUE_CATASTROPHIC=0 +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +CONFIG_UBSAN=y +CONFIG_UBSAN_SANITIZE_ALL=y +# CONFIG_UBSAN_ALIGNMENT is not set +# CONFIG_TEST_UBSAN is not set +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y +CONFIG_STRICT_DEVMEM=y +CONFIG_IO_STRICT_DEVMEM=y +# CONFIG_ARM64_PTDUMP_DEBUGFS is not set +# CONFIG_PID_IN_CONTEXTIDR is not set +# CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET is not set +# CONFIG_DEBUG_WX is not set +# CONFIG_DEBUG_ALIGN_RODATA is not set +# CONFIG_DEBUG_EFI is not set +# CONFIG_ARM64_RELOC_TEST is not set +# CONFIG_CORESIGHT is not set +CONFIG_ETMEM_SCAN=m +CONFIG_ETMEM_SWAP=m diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..71e12eb64467e6792a7ad44f74ed61836c016448 --- /dev/null +++ b/arch/arm64/configs/openeuler_defconfig @@ -0,0 +1,6695 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/arm64 4.19.90 Kernel Configuration +# +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_TABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y +CONFIG_AUDIT_WATCH=y +CONFIG_AUDIT_TREE=y +# CONFIG_KTASK is not set + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_SHOW_LEVEL=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_GENERIC_IRQ_INJECTION=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_GENERIC_IRQ_CHIP=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GENERIC_MSI_IRQ_DOMAIN=y +CONFIG_HANDLE_DOMAIN_IRQ=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +CONFIG_GENERIC_IRQ_MULTI_HANDLER=y +CONFIG_ARCH_CLOCKSOURCE_DATA=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_ARCH_HAS_TICK_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +# CONFIG_NO_HZ_IDLE is not set +CONFIG_NO_HZ_FULL=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PREEMPT is not set + +# +# CPU/Task time and stats accounting +# +CONFIG_VIRT_CPU_ACCOUNTING=y +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_SCHED_AVG_IRQ=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_SRCU=y +CONFIG_TREE_SRCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +CONFIG_CONTEXT_TRACKING=y +# CONFIG_CONTEXT_TRACKING_FORCE is not set +CONFIG_RCU_NOCB_CPU=y +# CONFIG_IKCONFIG is not set +CONFIG_LOG_BUF_SHIFT=20 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 +CONFIG_GENERIC_SCHED_CLOCK=y +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_SUPPORTS_INT128=y +CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_MEMCG_SWAP_ENABLED=y +CONFIG_MEMCG_KMEM=y +CONFIG_MEMCG_MEMFS_INFO=y +CONFIG_BLK_CGROUP=y +# CONFIG_DEBUG_BLK_CGROUP is not set +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_QOS_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_QOS_SCHED_DYNAMIC_AFFINITY=y +CONFIG_QOS_SCHED_SMART_GRID=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +# CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_CGROUP_FILES=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_SCHED_STEAL=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_BPF=y +CONFIG_EXPERT=y +CONFIG_UID16=y +CONFIG_MULTIUSER=y +# CONFIG_SGETMASK_SYSCALL is not set +CONFIG_SYSFS_SYSCALL=y +# CONFIG_SYSCTL_SYSCALL is not set +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_PRINTK_NMI=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_HAVE_FUTEX_CMPXCHG=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_IO_URING=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT_ALWAYS_ON=y +# CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set +CONFIG_USERFAULTFD=y +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y +CONFIG_RSEQ=y +# CONFIG_DEBUG_RSEQ is not set +# CONFIG_EMBEDDED is not set +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y +# CONFIG_PC104 is not set + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +CONFIG_DEBUG_PERF_USE_VMALLOC=y +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_SLUB_DEBUG=y +# CONFIG_SLUB_MEMCG_SYSFS_ON is not set +# CONFIG_COMPAT_BRK is not set +# CONFIG_SLAB is not set +CONFIG_SLUB=y +# CONFIG_SLOB is not set +CONFIG_SLAB_MERGE_DEFAULT=y +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SLAB_FREELIST_HARDENED is not set +CONFIG_SLUB_CPU_PARTIAL=y +CONFIG_SYSTEM_DATA_VERIFICATION=y +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y +CONFIG_ARM64=y +CONFIG_64BIT=y +CONFIG_MMU=y +CONFIG_ARM64_PAGE_SHIFT=16 +CONFIG_ARM64_CONT_SHIFT=5 +CONFIG_ARCH_MMAP_RND_BITS_MIN=14 +CONFIG_ARCH_MMAP_RND_BITS_MAX=29 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=7 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_ZONE_DMA32=y +CONFIG_HAVE_GENERIC_GUP=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_SMP=y +CONFIG_KERNEL_MODE_NEON=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_PGTABLE_LEVELS=3 +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_ARCH_PROC_KCORE_TEXT=y +CONFIG_ARCH_HAS_CPU_RELAX=y + +# +# Platform selection +# +# CONFIG_ARCH_ACTIONS is not set +# CONFIG_ARCH_SUNXI is not set +# CONFIG_ARCH_ALPINE is not set +# CONFIG_ARCH_BCM2835 is not set +# CONFIG_ARCH_BCM_IPROC is not set +# CONFIG_ARCH_BERLIN is not set +# CONFIG_ARCH_BRCMSTB is not set +# CONFIG_ARCH_EXYNOS is not set +# CONFIG_ARCH_K3 is not set +# CONFIG_ARCH_LAYERSCAPE is not set +# CONFIG_ARCH_LG1K is not set +CONFIG_ARCH_HISI=y +# CONFIG_ARCH_MEDIATEK is not set +# CONFIG_ARCH_MESON is not set +# CONFIG_ARCH_MVEBU is not set +CONFIG_ARCH_PHYTIUM=y +CONFIG_ARCH_QCOM=y +# CONFIG_ARCH_REALTEK is not set +# CONFIG_ARCH_ROCKCHIP is not set +CONFIG_ARCH_SEATTLE=y +# CONFIG_ARCH_SYNQUACER is not set +# CONFIG_ARCH_RENESAS is not set +# CONFIG_ARCH_STRATIX10 is not set +# CONFIG_ARCH_TEGRA is not set +# CONFIG_ARCH_SPRD is not set +CONFIG_ARCH_THUNDER=y +CONFIG_ARCH_THUNDER2=y +# CONFIG_ARCH_UNIPHIER is not set +CONFIG_ARCH_VEXPRESS=y +CONFIG_ARCH_XGENE=y +# CONFIG_ARCH_ZX is not set +# CONFIG_ARCH_ZYNQMP is not set +CONFIG_HAVE_LIVEPATCH_WO_FTRACE=y + +# +# Enable Livepatch +# +CONFIG_LIVEPATCH=y +CONFIG_LIVEPATCH_WO_FTRACE=y +CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY=y +# CONFIG_LIVEPATCH_STACK is not set +CONFIG_LIVEPATCH_RESTRICT_KPROBE=y + +# +# Bus support +# +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_DOMAINS_GENERIC=y +CONFIG_PCI_SYSCALL=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIEASPM=y +# CONFIG_PCIEASPM_DEBUG is not set +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +CONFIG_PCIE_DPC=y +# CONFIG_PCIE_PTM is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_IRQ_DOMAIN=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +CONFIG_PCI_STUB=y +# CONFIG_PCI_PF_STUB is not set +CONFIG_PCI_ATS=y +CONFIG_PCI_ECAM=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +CONFIG_PCI_LABEL=y +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_ACPI_IBM=m +# CONFIG_HOTPLUG_PCI_CPCI is not set +CONFIG_HOTPLUG_PCI_SHPC=y + +# +# PCI controller drivers +# + +# +# Cadence PCIe controllers support +# +# CONFIG_PCIE_CADENCE_HOST is not set +# CONFIG_PCI_FTPCI100 is not set +CONFIG_PCI_HOST_COMMON=y +CONFIG_PCI_HOST_GENERIC=y +# CONFIG_PCIE_XILINX is not set +CONFIG_PCI_XGENE=y +CONFIG_PCI_XGENE_MSI=y +CONFIG_PCI_HOST_THUNDER_PEM=y +CONFIG_PCI_HOST_THUNDER_ECAM=y + +# +# DesignWare PCI Core Support +# +CONFIG_PCIE_DW=y +CONFIG_PCIE_DW_HOST=y +# CONFIG_PCIE_DW_PLAT_HOST is not set +CONFIG_PCI_HISI=y +# CONFIG_PCIE_QCOM is not set +# CONFIG_PCIE_KIRIN is not set +# CONFIG_PCIE_HISI_STB is not set +CONFIG_HISILICON_PCIE_CAE=m + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set + +# +# Kernel Features +# + +# +# ARM errata workarounds via the alternatives framework +# +CONFIG_ARM64_ERRATUM_826319=y +CONFIG_ARM64_ERRATUM_827319=y +CONFIG_ARM64_ERRATUM_824069=y +CONFIG_ARM64_ERRATUM_819472=y +CONFIG_ARM64_ERRATUM_832075=y +CONFIG_ARM64_ERRATUM_834220=y +CONFIG_ARM64_ERRATUM_845719=y +CONFIG_ARM64_ERRATUM_843419=y +CONFIG_ARM64_ERRATUM_1024718=y +# CONFIG_ARM64_ERRATUM_1463225 is not set +CONFIG_ARM64_ERRATUM_1742098=y +CONFIG_CAVIUM_ERRATUM_22375=y +CONFIG_CAVIUM_ERRATUM_23144=y +CONFIG_CAVIUM_ERRATUM_23154=y +CONFIG_CAVIUM_ERRATUM_27456=y +CONFIG_CAVIUM_ERRATUM_30115=y +CONFIG_HISILICON_ERRATUM_162100801=y +CONFIG_QCOM_FALKOR_ERRATUM_1003=y +CONFIG_QCOM_FALKOR_ERRATUM_1009=y +CONFIG_QCOM_QDF2400_ERRATUM_0065=y +CONFIG_SOCIONEXT_SYNQUACER_PREITS=y +CONFIG_HISILICON_ERRATUM_161600802=y +# CONFIG_HISILICON_ERRATUM_1980005 is not set +CONFIG_QCOM_FALKOR_ERRATUM_E1041=y +CONFIG_HISILICON_ERRATUM_HIP08_RU_PREFETCH=y +# CONFIG_HISILICON_HIP08_RU_PREFETCH_DEFAULT_OFF is not set +# CONFIG_ARM64_4K_PAGES is not set +# CONFIG_ARM64_16K_PAGES is not set +CONFIG_ARM64_64K_PAGES=y +# CONFIG_ARM64_VA_BITS_42 is not set +CONFIG_ARM64_VA_BITS_48=y +CONFIG_ARM64_VA_BITS=48 +CONFIG_ARM64_PA_BITS_48=y +# CONFIG_ARM64_PA_BITS_52 is not set +CONFIG_ARM64_PA_BITS=48 +# CONFIG_CPU_BIG_ENDIAN is not set +CONFIG_SCHED_MC=y +# CONFIG_SCHED_SMT is not set +CONFIG_NR_CPUS=1024 +CONFIG_HOTPLUG_CPU=y +# CONFIG_ARM64_BOOTPARAM_HOTPLUG_CPU0 is not set +CONFIG_ARM64_ERR_RECOV=y +CONFIG_MPAM=y +CONFIG_NUMA=y +CONFIG_NODES_SHIFT=4 +# CONFIG_NUMA_AWARE_SPINLOCKS is not set +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_HOLES_IN_ZONE=y +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +CONFIG_SCHED_HRTICK=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_HAVE_ARCH_PFN_VALID=y +CONFIG_HW_PERF_EVENTS=y +CONFIG_SYS_SUPPORTS_HUGETLBFS=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +# CONFIG_ARCH_LLC_128_LINE_SIZE is not set +CONFIG_SECCOMP=y +CONFIG_PARAVIRT=y +CONFIG_PARAVIRT_TIME_ACCOUNTING=y +CONFIG_KEXEC=y +CONFIG_CRASH_DUMP=y +CONFIG_ARM64_CPU_PARK=y +# CONFIG_XEN is not set +CONFIG_FORCE_MAX_ZONEORDER=14 +CONFIG_UNMAP_KERNEL_AT_EL0=y +CONFIG_HARDEN_BRANCH_PREDICTOR=y +CONFIG_HARDEN_EL2_VECTORS=y +CONFIG_ARM64_SSBD=y +CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY=y +# CONFIG_ARMV8_DEPRECATED is not set +# CONFIG_ARM64_SW_TTBR0_PAN is not set + +# +# ARMv8.1 architectural features +# +CONFIG_ARM64_HW_AFDBM=y +CONFIG_ARM64_PAN=y +CONFIG_ARM64_LSE_ATOMICS=y +CONFIG_ARM64_VHE=y + +# +# ARMv8.2 architectural features +# +CONFIG_ARM64_UAO=y +CONFIG_ARM64_PMEM=y +CONFIG_ARM64_RAS_EXTN=y +CONFIG_ARM64_SVE=y +CONFIG_ARM64_MODULE_PLTS=y +# CONFIG_ARM64_PSEUDO_NMI is not set +CONFIG_RELOCATABLE=y +CONFIG_RANDOMIZE_BASE=y +CONFIG_RANDOMIZE_MODULE_REGION_FULL=y +# CONFIG_ARCH_GET_PREFERRED_SIBLING_CPUMASK is not set +# CONFIG_ASCEND_FEATURES is not set + +# +# Boot options +# +CONFIG_ARM64_ACPI_PARKING_PROTOCOL=y +CONFIG_CMDLINE="console=ttyAMA0" +# CONFIG_CMDLINE_FORCE is not set +CONFIG_EFI_STUB=y +CONFIG_EFI=y +CONFIG_DMI=y +CONFIG_COMPAT=y +CONFIG_AARCH32_EL0=y +# CONFIG_ARM64_ILP32 is not set +CONFIG_SYSVIPC_COMPAT=y + +# +# Power management options +# +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +# CONFIG_SUSPEND_SKIP_SYNC is not set +CONFIG_HIBERNATE_CALLBACKS=y +CONFIG_HIBERNATION=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +CONFIG_PM_DEBUG=y +# CONFIG_PM_ADVANCED_DEBUG is not set +# CONFIG_PM_TEST_SUSPEND is not set +CONFIG_PM_SLEEP_DEBUG=y +# CONFIG_DPM_WATCHDOG is not set +CONFIG_PM_CLK=y +CONFIG_PM_GENERIC_DOMAINS=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +CONFIG_PM_GENERIC_DOMAINS_SLEEP=y +CONFIG_PM_GENERIC_DOMAINS_OF=y +CONFIG_CPU_PM=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_HIBERNATION_HEADER=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_UCE_KERNEL_RECOVERY=y + +# +# TLB options +# +CONFIG_ARM64_TLBI_IPI=y + +# +# CPU Power Management +# + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +CONFIG_CPU_IDLE_MULTIPLE_DRIVERS=y +# CONFIG_CPU_IDLE_GOV_LADDER is not set +CONFIG_CPU_IDLE_GOV_MENU=y +CONFIG_CPU_IDLE_GOV_HALTPOLL=y +CONFIG_DT_IDLE_STATES=y + +# +# ARM CPU Idle Drivers +# +CONFIG_ARM_CPUIDLE=y +CONFIG_HALTPOLL_CPUIDLE=y + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set + +# +# CPU frequency scaling drivers +# +# CONFIG_CPUFREQ_DT is not set +CONFIG_ACPI_CPPC_CPUFREQ=y +# CONFIG_ARM_BIG_LITTLE_CPUFREQ is not set +CONFIG_ARM_SCPI_CPUFREQ=m +# CONFIG_QORIQ_CPUFREQ is not set + +# +# Firmware Drivers +# +CONFIG_ARM_PSCI_FW=y +# CONFIG_ARM_PSCI_CHECKER is not set +# CONFIG_ARM_SCMI_PROTOCOL is not set +CONFIG_ARM_SCPI_PROTOCOL=m +CONFIG_ARM_SCPI_POWER_DOMAIN=m +CONFIG_ARM_SDE_INTERFACE=y +# CONFIG_FIRMWARE_MEMMAP is not set +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=y +# CONFIG_ISCSI_IBFT is not set +CONFIG_FW_CFG_SYSFS=y +# CONFIG_FW_CFG_SYSFS_CMDLINE is not set +CONFIG_HAVE_ARM_SMCCC=y +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +# CONFIG_EFI_VARS is not set +CONFIG_EFI_ESRT=y +# CONFIG_EFI_FAKE_MEMMAP is not set +CONFIG_EFI_PARAMS_FROM_FDT=y +CONFIG_EFI_RUNTIME_WRAPPERS=y +CONFIG_EFI_ARMSTUB=y +CONFIG_EFI_ARMSTUB_DTB_LOADER=y +# CONFIG_EFI_CAPSULE_LOADER is not set +# CONFIG_EFI_TEST is not set +# CONFIG_RESET_ATTACK_MITIGATION is not set +CONFIG_UEFI_CPER=y +CONFIG_UEFI_CPER_ARM=y + +# +# Tegra firmware driver +# +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_GENERIC_GSI=y +CONFIG_ACPI_CCA_REQUIRED=y +# CONFIG_ACPI_DEBUGGER is not set +CONFIG_ACPI_SPCR_TABLE=y +# CONFIG_ACPI_EC_DEBUGFS is not set +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_FAN=y +# CONFIG_ACPI_TAD is not set +# CONFIG_ACPI_DOCK is not set +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_MCFG=y +CONFIG_ACPI_CPPC_LIB=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_THERMAL=y +CONFIG_ACPI_NUMA=y +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +# CONFIG_ACPI_DEBUG is not set +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_CONTAINER=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_ACPI_HED=y +# CONFIG_ACPI_CUSTOM_METHOD is not set +# CONFIG_ACPI_BGRT is not set +CONFIG_ACPI_REDUCED_HARDWARE_ONLY=y +CONFIG_ACPI_NFIT=m +# CONFIG_ACPI_HMAT is not set +CONFIG_HAVE_ACPI_APEI=y +CONFIG_ACPI_APEI=y +CONFIG_ACPI_APEI_GHES=y +CONFIG_ACPI_APEI_PCIEAER=y +CONFIG_ACPI_APEI_SEA=y +CONFIG_ACPI_APEI_MEMORY_FAILURE=y +CONFIG_ACPI_APEI_EINJ=m +# CONFIG_ACPI_APEI_ERST_DEBUG is not set +# CONFIG_PMIC_OPREGION is not set +# CONFIG_ACPI_CONFIGFS is not set +CONFIG_ACPI_IORT=y +CONFIG_ACPI_GTDT=y +CONFIG_ACPI_MPAM=y +CONFIG_ACPI_PPTT=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_HAVE_KVM_IRQ_BYPASS=y +CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE=y +CONFIG_IRQ_BYPASS_MANAGER=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_KVM_ARM_HOST=y +CONFIG_KVM_ARM_PMU=y +CONFIG_KVM_INDIRECT_VECTORS=y +CONFIG_VHOST_NET=m +# CONFIG_VHOST_SCSI is not set +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set +CONFIG_ARM64_CRYPTO=y +CONFIG_CRYPTO_SHA256_ARM64=m +# CONFIG_CRYPTO_SHA512_ARM64 is not set +CONFIG_CRYPTO_SHA1_ARM64_CE=m +CONFIG_CRYPTO_SHA2_ARM64_CE=m +# CONFIG_CRYPTO_SHA512_ARM64_CE is not set +# CONFIG_CRYPTO_SHA3_ARM64 is not set +# CONFIG_CRYPTO_SM3_ARM64_CE is not set +CONFIG_CRYPTO_SM4_ARM64_CE=m +CONFIG_CRYPTO_GHASH_ARM64_CE=m +CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m +CONFIG_CRYPTO_AES_ARM64=y +CONFIG_CRYPTO_AES_ARM64_CE=m +CONFIG_CRYPTO_AES_ARM64_CE_CCM=m +CONFIG_CRYPTO_AES_ARM64_CE_BLK=m +CONFIG_CRYPTO_AES_ARM64_NEON_BLK=m +# CONFIG_CRYPTO_CHACHA20_NEON is not set +# CONFIG_CRYPTO_AES_ARM64_BS is not set + +# +# General architecture-dependent options +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_QUICK_KEXEC=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +CONFIG_STATIC_KEYS_SELFTEST=y +CONFIG_UPROBES=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_KRETPROBES=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_NMI=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_GENERIC_IDLE_POLL_SETUP=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_NMI_WATCHDOG=y +CONFIG_HAVE_HARDLOCKUP_DETECTOR_ARCH=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_RCU_TABLE_FREE=y +CONFIG_HAVE_RCU_TABLE_INVALIDATE=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP_FILTER=y +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_HAVE_ARCH_HUGE_VMALLOC=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_ARCH_MMAP_RND_BITS=18 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=11 +CONFIG_CLONE_BACKWARDS=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_REFCOUNT_FULL=y +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +CONFIG_PLUGIN_HOSTCC="" +CONFIG_HAVE_GCC_PLUGINS=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_FORCE is not set +CONFIG_MODULE_SIG_ALL=y +# CONFIG_MODULE_SIG_SHA1 is not set +# CONFIG_MODULE_SIG_SHA224 is not set +CONFIG_MODULE_SIG_SHA256=y +# CONFIG_MODULE_SIG_SHA384 is not set +# CONFIG_MODULE_SIG_SHA512 is not set +CONFIG_MODULE_SIG_HASH="sha256" +# CONFIG_MODULE_COMPRESS is not set +# CONFIG_TRIM_UNUSED_KSYMS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLK_SCSI_REQUEST=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +# CONFIG_BLK_CMDLINE_PARSER is not set +CONFIG_BLK_WBT=y +# CONFIG_BLK_CGROUP_IOLATENCY is not set +# CONFIG_BLK_WBT_SQ is not set +CONFIG_BLK_WBT_MQ=y +CONFIG_BLK_DEBUG_FS=y +CONFIG_BLK_DEBUG_FS_ZONED=y +# CONFIG_BLK_SED_OPAL is not set +# CONFIG_BLK_BIO_DISPATCH_ASYNC is not set +# CONFIG_BLK_IO_HIERARCHY_STATS is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +CONFIG_OSF_PARTITION=y +CONFIG_AMIGA_PARTITION=y +# CONFIG_ATARI_PARTITION is not set +CONFIG_MAC_PARTITION=y +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +# CONFIG_LDM_PARTITION is not set +CONFIG_SGI_PARTITION=y +# CONFIG_ULTRIX_PARTITION is not set +CONFIG_SUN_PARTITION=y +CONFIG_KARMA_PARTITION=y +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +CONFIG_BLOCK_COMPAT=y +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_MQ_RDMA=y + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_CFQ_GROUP_IOSCHED=y +CONFIG_DEFAULT_DEADLINE=y +# CONFIG_DEFAULT_CFQ is not set +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="deadline" +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK=y +CONFIG_ARCH_INLINE_SPIN_LOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_READ_LOCK=y +CONFIG_ARCH_INLINE_READ_LOCK_BH=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_READ_UNLOCK=y +CONFIG_ARCH_INLINE_READ_UNLOCK_BH=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_WRITE_LOCK=y +CONFIG_ARCH_INLINE_WRITE_LOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_SPIN_TRYLOCK=y +CONFIG_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_INLINE_SPIN_LOCK=y +CONFIG_INLINE_SPIN_LOCK_BH=y +CONFIG_INLINE_SPIN_LOCK_IRQ=y +CONFIG_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_INLINE_SPIN_UNLOCK_BH=y +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_READ_LOCK=y +CONFIG_INLINE_READ_LOCK_BH=y +CONFIG_INLINE_READ_LOCK_IRQ=y +CONFIG_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_BH=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_WRITE_LOCK=y +CONFIG_INLINE_WRITE_LOCK_BH=y +CONFIG_INLINE_WRITE_LOCK_IRQ=y +CONFIG_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_BH=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y + +# +# Memory Management options +# +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_NEED_MULTIPLE_NODES=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +CONFIG_HAVE_MEMBLOCK_PFN_VALID=y +# CONFIG_COHERENT_DEVICE is not set +CONFIG_NO_BOOTMEM=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_SPARSE=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_MM_OWNER=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +CONFIG_MEMORY_FAILURE=y +CONFIG_HWPOISON_INJECT=m +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_TRANSPARENT_HUGE_PAGECACHE=y +CONFIG_CLEANCACHE=y +CONFIG_FRONTSWAP=y +CONFIG_SHRINK_PAGECACHE=y +CONFIG_USERSWAP=y +CONFIG_MEMCG_QOS=y +CONFIG_CMA=y +# CONFIG_CMA_DEBUG is not set +# CONFIG_CMA_DEBUGFS is not set +CONFIG_CMA_AREAS=7 +CONFIG_ZSWAP=y +CONFIG_ZPOOL=y +CONFIG_ZBUD=y +# CONFIG_Z3FOLD is not set +CONFIG_ZSMALLOC=y +# CONFIG_PGTABLE_MAPPING is not set +CONFIG_ZSMALLOC_STAT=y +CONFIG_GENERIC_EARLY_IOREMAP=y +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +CONFIG_IDLE_PAGE_TRACKING=y +CONFIG_FRAME_VECTOR=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_BENCHMARK is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +CONFIG_PIN_MEMORY=y +CONFIG_PID_RESERVE=y +# CONFIG_MEMORY_RELIABLE is not set +# CONFIG_CLEAR_FREELIST_PAGE is not set +CONFIG_NET=y +CONFIG_COMPAT_NETLINK_MESSAGES=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_SCM=y +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_XFRM=y +CONFIG_XFRM_OFFLOAD=y +CONFIG_XFRM_ALGO=y +CONFIG_XFRM_USER=y +# CONFIG_XFRM_INTERFACE is not set +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +# CONFIG_SMC is not set +# CONFIG_XDP_SOCKETS is not set +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +# CONFIG_IP_PNP is not set +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE_COMMON=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=m +# CONFIG_NET_FOU is not set +# CONFIG_NET_FOU_IP_TUNNELS is not set +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_XFRM_MODE_TRANSPORT=m +CONFIG_INET_XFRM_MODE_TUNNEL=m +CONFIG_INET_XFRM_MODE_BEET=m +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +# CONFIG_INET_DIAG_DESTROY is not set +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +# CONFIG_TCP_CONG_CDG is not set +CONFIG_TCP_CONG_BBR=m +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_TCP_COMP=y +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +# CONFIG_IPV6_ILA is not set +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +CONFIG_INET6_XFRM_MODE_TRANSPORT=m +CONFIG_INET6_XFRM_MODE_TUNNEL=m +CONFIG_INET6_XFRM_MODE_BEET=m +CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +CONFIG_IPV6_MULTIPLE_TABLES=y +# CONFIG_IPV6_SUBTREES is not set +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +# CONFIG_IPV6_SEG6_LWTUNNEL is not set +# CONFIG_IPV6_SEG6_HMAC is not set +CONFIG_NETLABEL=y +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_COMMON=m +CONFIG_NF_LOG_NETDEV=m +CONFIG_NETFILTER_CONNCOUNT=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=m +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_NEEDED=y +CONFIG_NF_NAT_PROTO_DCCP=y +CONFIG_NF_NAT_PROTO_UDPLITE=y +CONFIG_NF_NAT_PROTO_SCTP=y +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_SET=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_COUNTER=m +# CONFIG_NFT_CONNLIMIT is not set +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +# CONFIG_NFT_TUNNEL is not set +CONFIG_NFT_OBJREF=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m +CONFIG_NFT_FIB_INET=m +# CONFIG_NFT_SOCKET is not set +# CONFIG_NFT_OSF is not set +# CONFIG_NFT_TPROXY is not set +CONFIG_NF_DUP_NETDEV=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +# CONFIG_NF_FLOW_TABLE is not set +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +# CONFIG_IP_SET_HASH_IPMAC is not set +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +# CONFIG_IP_VS_DEBUG is not set +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +# CONFIG_IP_VS_MH is not set +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_DUP_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_IPV4=m +CONFIG_NF_NAT_MASQUERADE_IPV4=y +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NFT_MASQ_IPV4=m +CONFIG_NFT_REDIR_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PROTO_GRE=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +# CONFIG_IP_NF_TARGET_CLUSTERIP is not set +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TPROXY_IPV6=m +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m +CONFIG_NFT_MASQ_IPV6=m +CONFIG_NFT_REDIR_IPV6=m +CONFIG_NFT_REJECT_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m +CONFIG_NF_NAT_IPV6=m +CONFIG_NF_NAT_MASQUERADE_IPV6=y +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +# CONFIG_IP6_NF_MATCH_SRH is not set +# CONFIG_IP6_NF_TARGET_HL is not set +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_DEFRAG_IPV6=m +CONFIG_NF_TABLES_BRIDGE=y +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_NF_LOG_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +# CONFIG_BPFILTER is not set +# CONFIG_IP_DCCP is not set +CONFIG_IP_SCTP=m +# CONFIG_SCTP_DBG_OBJCNT is not set +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_INET_SCTP_DIAG=m +# CONFIG_RDS is not set +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_IB=y +CONFIG_TIPC_MEDIA_UDP=y +CONFIG_TIPC_DIAG=m +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +# CONFIG_ATM_CLIP_NO_ICMP is not set +CONFIG_ATM_LANE=m +# CONFIG_ATM_MPOA is not set +CONFIG_ATM_BR2684=m +# CONFIG_ATM_BR2684_IPFILTER is not set +CONFIG_L2TP=m +CONFIG_L2TP_DEBUGFS=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_MRP=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_HAVE_NET_DSA=y +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_LLC=m +# CONFIG_LLC2 is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_DEBUGFS is not set +# CONFIG_6LOWPAN_NHC is not set +CONFIG_IEEE802154=m +# CONFIG_IEEE802154_NL802154_EXPERIMENTAL is not set +CONFIG_IEEE802154_SOCKET=m +# CONFIG_IEEE802154_6LOWPAN is not set +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_ATM=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +# CONFIG_NET_SCH_CBS is not set +# CONFIG_NET_SCH_ETF is not set +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +# CONFIG_NET_SCH_SKBPRIO is not set +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +# CONFIG_NET_SCH_CAKE is not set +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_DEFAULT=y +# CONFIG_DEFAULT_FQ is not set +# CONFIG_DEFAULT_CODEL is not set +CONFIG_DEFAULT_FQ_CODEL=y +# CONFIG_DEFAULT_SFQ is not set +# CONFIG_DEFAULT_PFIFO_FAST is not set +CONFIG_DEFAULT_NET_SCH="fq_codel" + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +# CONFIG_NET_EMATCH_CANID is not set +CONFIG_NET_EMATCH_IPSET=m +# CONFIG_NET_EMATCH_IPT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +# CONFIG_NET_ACT_IPT is not set +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +# CONFIG_NET_ACT_CONNMARK is not set +CONFIG_NET_ACT_SKBMOD=m +# CONFIG_NET_ACT_IFE is not set +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_CLS_IND=y +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=m +# CONFIG_BATMAN_ADV is not set +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_OPENVSWITCH_GENEVE=m +CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS_COMMON=m +CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=m +# CONFIG_MPLS_ROUTING is not set +CONFIG_NET_NSH=m +# CONFIG_HSR is not set +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_L3_MASTER_DEV=y +# CONFIG_QRTR is not set +# CONFIG_NET_NCSI is not set +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_JIT=y +# CONFIG_BPF_STREAM_PARSER is not set +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +CONFIG_NET_DROP_MONITOR=m +# CONFIG_HAMRADIO is not set +CONFIG_CAN=m +CONFIG_CAN_RAW=m +CONFIG_CAN_BCM=m +CONFIG_CAN_GW=m +# CONFIG_CAN_J1939 is not set + +# +# CAN Device Drivers +# +CONFIG_CAN_VCAN=m +# CONFIG_CAN_VXCAN is not set +CONFIG_CAN_SLCAN=m +CONFIG_CAN_DEV=m +CONFIG_CAN_CALC_BITTIMING=y +# CONFIG_CAN_GRCAN is not set +# CONFIG_CAN_XILINXCAN is not set +CONFIG_CAN_C_CAN=m +CONFIG_CAN_C_CAN_PLATFORM=m +CONFIG_CAN_C_CAN_PCI=m +CONFIG_CAN_CC770=m +# CONFIG_CAN_CC770_ISA is not set +CONFIG_CAN_CC770_PLATFORM=m +# CONFIG_CAN_IFI_CANFD is not set +# CONFIG_CAN_M_CAN is not set +# CONFIG_CAN_PEAK_PCIEFD is not set +CONFIG_CAN_PHYTIUM=m +CONFIG_CAN_PHYTIUM_PLATFORM=m +CONFIG_CAN_PHYTIUM_PCI=m +CONFIG_CAN_SJA1000=m +# CONFIG_CAN_SJA1000_ISA is not set +CONFIG_CAN_SJA1000_PLATFORM=m +CONFIG_CAN_EMS_PCI=m +CONFIG_CAN_PEAK_PCI=m +CONFIG_CAN_PEAK_PCIEC=y +CONFIG_CAN_KVASER_PCI=m +CONFIG_CAN_PLX_PCI=m +CONFIG_CAN_SOFTING=m + +# +# CAN SPI interfaces +# +# CONFIG_CAN_HI311X is not set +# CONFIG_CAN_MCP251X is not set + +# +# CAN USB interfaces +# +CONFIG_CAN_8DEV_USB=m +CONFIG_CAN_EMS_USB=m +CONFIG_CAN_ESD_USB2=m +# CONFIG_CAN_GS_USB is not set +CONFIG_CAN_KVASER_USB=m +# CONFIG_CAN_MCBA_USB is not set +CONFIG_CAN_PEAK_USB=m +# CONFIG_CAN_UCAN is not set +# CONFIG_CAN_DEBUG_DEVICES is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_STREAM_PARSER=y +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y +CONFIG_CFG80211=m +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +# CONFIG_CFG80211_CERTIFICATION_ONUS is not set +CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y +CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +CONFIG_CFG80211_CRDA_SUPPORT=y +CONFIG_CFG80211_WEXT=y +CONFIG_MAC80211=m +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_MINSTREL_HT=y +# CONFIG_MAC80211_RC_MINSTREL_VHT is not set +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +# CONFIG_MAC80211_MESH is not set +CONFIG_MAC80211_LEDS=y +CONFIG_MAC80211_DEBUGFS=y +# CONFIG_MAC80211_MESSAGE_TRACING is not set +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +# CONFIG_WIMAX is not set +CONFIG_RFKILL=m +CONFIG_RFKILL_LEDS=y +CONFIG_RFKILL_INPUT=y +CONFIG_RFKILL_GPIO=m +CONFIG_NET_9P=m +CONFIG_NET_9P_VIRTIO=m +# CONFIG_NET_9P_RDMA is not set +# CONFIG_NET_9P_DEBUG is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +# CONFIG_NFC is not set +CONFIG_PSAMPLE=m +# CONFIG_NET_IFE is not set +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_SOCK_VALIDATE_XMIT=y +CONFIG_NET_DEVLINK=m +CONFIG_MAY_USE_DEVLINK=m +CONFIG_PAGE_POOL=y +CONFIG_FAILOVER=m +CONFIG_HAVE_EBPF_JIT=y + +# +# Device Drivers +# +CONFIG_ARM_AMBA=y + +# +# Generic Driver Options +# +# CONFIG_UEVENT_HELPER is not set +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER is not set +CONFIG_WANT_DEV_COREDUMP=y +# CONFIG_ALLOW_DEV_COREDUMP is not set +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=y +CONFIG_REGMAP_SPI=y +CONFIG_REGMAP_MMIO=y +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +CONFIG_DMA_CMA=y + +# +# Default contiguous memory area size: +# +CONFIG_CMA_SIZE_MBYTES=64 +CONFIG_CMA_SIZE_SEL_MBYTES=y +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set +# CONFIG_CMA_SIZE_SEL_MIN is not set +# CONFIG_CMA_SIZE_SEL_MAX is not set +CONFIG_CMA_ALIGNMENT=8 +CONFIG_GENERIC_ARCH_TOPOLOGY=y + +# +# Bus devices +# +# CONFIG_BRCMSTB_GISB_ARB is not set +CONFIG_HISILICON_LPC=y +CONFIG_QCOM_EBI2=y +# CONFIG_SIMPLE_PM_BUS is not set +CONFIG_VEXPRESS_CONFIG=y +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y +# CONFIG_GNSS is not set +CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +CONFIG_MTD_CMDLINE_PARTS=m +# CONFIG_MTD_AFS_PARTS is not set +CONFIG_MTD_OF_PARTS=m +# CONFIG_MTD_AR7_PARTS is not set + +# +# Partition parsers +# + +# +# User Modules And Translation Layers +# +CONFIG_MTD_BLKDEVS=m +CONFIG_MTD_BLOCK=m +# CONFIG_MTD_BLOCK_RO is not set +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=m +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_GEN_PROBE=m +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_NOSWAP=y +# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set +# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set +CONFIG_MTD_CFI_GEOMETRY=y +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_MAP_BANK_WIDTH_8=y +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +# CONFIG_MTD_OTP is not set +CONFIG_MTD_CFI_INTELEXT=m +CONFIG_MTD_CFI_AMDSTD=m +CONFIG_MTD_CFI_STAA=m +CONFIG_MTD_CFI_UTIL=m +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +CONFIG_MTD_PHYSMAP=m +# CONFIG_MTD_PHYSMAP_COMPAT is not set +CONFIG_MTD_PHYSMAP_OF=m +# CONFIG_MTD_PHYSMAP_OF_VERSATILE is not set +# CONFIG_MTD_PHYSMAP_OF_GEMINI is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_M25P80 is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +CONFIG_MTD_BLOCK2MTD=m + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# CONFIG_MTD_ONENAND is not set +# CONFIG_MTD_NAND is not set +# CONFIG_MTD_SPI_NAND is not set + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +CONFIG_MTD_SPI_NOR=m +CONFIG_SPI_PHYTIUM_QUADSPI=m +CONFIG_MTD_MT81xx_NOR=m +CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y +# CONFIG_SPI_CADENCE_QUADSPI is not set +# CONFIG_SPI_HISI_SFC is not set +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_LIMIT=20 +# CONFIG_MTD_UBI_FASTMAP is not set +CONFIG_MTD_UBI_GLUEBI=m +# CONFIG_MTD_UBI_BLOCK is not set +CONFIG_DTC=y +CONFIG_OF=y +# CONFIG_OF_UNITTEST is not set +CONFIG_OF_FLATTREE=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_KOBJ=y +CONFIG_OF_DYNAMIC=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_IRQ=y +CONFIG_OF_NET=y +CONFIG_OF_MDIO=y +CONFIG_OF_RESERVED_MEM=y +CONFIG_OF_RESOLVE=y +CONFIG_OF_OVERLAY=y +CONFIG_OF_NUMA=y +# CONFIG_PARPORT is not set +CONFIG_PNP=y +CONFIG_PNP_DEBUG_MESSAGES=y + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_NULL_BLK=m +CONFIG_CDROM=m +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +CONFIG_ZRAM=m +# CONFIG_ZRAM_WRITEBACK is not set +# CONFIG_ZRAM_MEMORY_TRACKING is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_DRBD is not set +CONFIG_BLK_DEV_NBD=m +# CONFIG_BLK_DEV_SKD is not set +# CONFIG_BLK_DEV_SX8 is not set +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=16384 +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_VIRTIO_BLK=m +# CONFIG_VIRTIO_BLK_SCSI is not set +CONFIG_BLK_DEV_RBD=m +# CONFIG_BLK_DEV_RSXX is not set + +# +# NVME Support +# +CONFIG_NVME_CORE=m +CONFIG_BLK_DEV_NVME=m +# CONFIG_NVME_MULTIPATH is not set +CONFIG_NVME_FABRICS=m +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=m +CONFIG_NVME_TARGET=m +CONFIG_NVME_TARGET_LOOP=m +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=m +CONFIG_NVME_TARGET_FCLOOP=m + +# +# Misc devices +# +CONFIG_SENSORS_LIS3LV02D=m +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_PHANTOM is not set +# CONFIG_SGI_IOC4 is not set +CONFIG_TIFM_CORE=m +CONFIG_TIFM_7XX1=m +# CONFIG_ICS932S401 is not set +CONFIG_ENCLOSURE_SERVICES=m +# CONFIG_HP_ILO is not set +CONFIG_APDS9802ALS=m +CONFIG_ISL29003=m +CONFIG_ISL29020=m +CONFIG_SENSORS_TSL2550=m +CONFIG_SENSORS_BH1770=m +CONFIG_SENSORS_APDS990X=m +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_USB_SWITCH_FSA9480 is not set +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +CONFIG_VEXPRESS_SYSCFG=y +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_MAX6875=m +CONFIG_EEPROM_93CX6=m +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +CONFIG_CB710_CORE=m +# CONFIG_CB710_DEBUG is not set +CONFIG_CB710_DEBUG_ASSUMPTIONS=y + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +CONFIG_SENSORS_LIS3_I2C=m +CONFIG_ALTERA_STAPL=m + +# +# Intel MIC & related support +# + +# +# Intel MIC Bus Driver +# + +# +# SCIF Bus Driver +# + +# +# VOP Bus Driver +# + +# +# Intel MIC Host Driver +# + +# +# Intel MIC Card Driver +# + +# +# SCIF Driver +# + +# +# Intel MIC Coprocessor State Management (COSM) Drivers +# + +# +# VOP Driver +# +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_MISC_RTSX_PCI is not set +# CONFIG_MISC_RTSX_USB is not set +# CONFIG_UACCE is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=m +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_MQ_DEFAULT=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=m +CONFIG_CHR_DEV_ST=m +# CONFIG_CHR_DEV_OSST is not set +CONFIG_BLK_DEV_SR=m +CONFIG_BLK_DEV_SR_VENDOR=y +CONFIG_CHR_DEV_SG=m +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=m +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=m +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=m +# CONFIG_SCSI_CXGB3_ISCSI is not set +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_SCSI_BNX2X_FCOE=m +CONFIG_BE2ISCSI=m +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +CONFIG_SCSI_HPSA=m +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +CONFIG_SCSI_AACRAID=m +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +CONFIG_SCSI_HISI_SAS=m +CONFIG_SCSI_HISI_SAS_PCI=m +# CONFIG_SCSI_MVSAS is not set +# CONFIG_SCSI_MVUMI is not set +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +# CONFIG_MEGARAID_NEWGEN is not set +# CONFIG_MEGARAID_LEGACY is not set +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_3SNIC_SSSRAID=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_SMARTPQI=m +CONFIG_SCSI_HISI_RAID=m +# CONFIG_SCSI_UFSHCD is not set +CONFIG_RAMAXEL_SPRAID=m +# CONFIG_SCSI_HPTIOP is not set +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +CONFIG_FCOE=m +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +CONFIG_SCSI_IPR=m +CONFIG_SCSI_IPR_TRACE=y +CONFIG_SCSI_IPR_DUMP=y +# CONFIG_SCSI_QLOGIC_1280 is not set +CONFIG_SCSI_QLA_FC=m +# CONFIG_TCM_QLA2XXX is not set +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_QEDI=m +CONFIG_QEDF=m +CONFIG_SCSI_HUAWEI_FC=m +CONFIG_SCSI_FC_HIFC=m +CONFIG_SCSI_LPFC=m +# CONFIG_SCSI_LPFC_DEBUG_FS is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +CONFIG_SCSI_DEBUG=m +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_BFA_FC is not set +CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_CHELSIO_FCOE=m +# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +# CONFIG_SCSI_OSD_INITIATOR is not set +CONFIG_HAVE_PATA_PLATFORM=y +CONFIG_ATA=m +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=m +CONFIG_SATA_MOBILE_LPM_POLICY=0 +CONFIG_SATA_AHCI_PLATFORM=m +# CONFIG_AHCI_CEVA is not set +CONFIG_AHCI_XGENE=m +# CONFIG_AHCI_QORIQ is not set +CONFIG_SATA_AHCI_SEATTLE=m +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +CONFIG_ATA_PIIX=m +# CONFIG_SATA_DWC is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +# CONFIG_SATA_SIL is not set +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set +# CONFIG_SATA_ZHAOXIN is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_PLATFORM is not set +# CONFIG_PATA_RZ1000 is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_PATA_ACPI is not set +CONFIG_ATA_GENERIC=m +# CONFIG_PATA_LEGACY is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BCACHE=m +# CONFIG_BCACHE_DEBUG is not set +# CONFIG_BCACHE_CLOSURES_DEBUG is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=m +# CONFIG_DM_MQ_DEFAULT is not set +CONFIG_DM_DEBUG=y +CONFIG_DM_BUFIO=m +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +# CONFIG_DM_UNSTRIPED is not set +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m +# CONFIG_DM_WRITECACHE is not set +CONFIG_DM_ERA=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set +# CONFIG_DM_VERITY_FEC is not set +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +# CONFIG_DM_ZONED is not set +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_TCM_FC=m +CONFIG_ISCSI_TARGET=m +CONFIG_ISCSI_TARGET_CXGB4=m +# CONFIG_FUSION is not set + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_FIREWIRE is not set +# CONFIG_FIREWIRE_NOSY is not set +CONFIG_NETDEVICES=y +CONFIG_MII=m +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +# CONFIG_EQUALIZER is not set +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +# CONFIG_GTP is not set +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_NETPOLL=y +CONFIG_NET_POLL_CONTROLLER=y +CONFIG_TUN=m +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ARCNET is not set +# CONFIG_ATM_DRIVERS is not set + +# +# CAIF transport drivers +# + +# +# Distributed Switch Architecture drivers +# +CONFIG_ETHERNET=y +CONFIG_MDIO=m +# CONFIG_NET_VENDOR_3COM is not set +CONFIG_NET_VENDOR_3SNIC=y +CONFIG_SSSNIC=m +CONFIG_SSSNIC_HW=m +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +CONFIG_NET_VENDOR_ALACRITECH=y +# CONFIG_SLICOSS is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set +CONFIG_NET_VENDOR_AMAZON=y +# CONFIG_ENA_ETHERNET is not set +CONFIG_NET_VENDOR_AMD=y +# CONFIG_AMD8111_ETH is not set +# CONFIG_PCNET32 is not set +CONFIG_AMD_XGBE=m +# CONFIG_AMD_XGBE_DCB is not set +CONFIG_NET_XGENE=y +CONFIG_NET_XGENE_V2=m +CONFIG_NET_VENDOR_AQUANTIA=y +CONFIG_NET_VENDOR_ARC=y +CONFIG_NET_VENDOR_ATHEROS=y +# CONFIG_ATL2 is not set +CONFIG_ATL1=m +CONFIG_ATL1E=m +CONFIG_ATL1C=m +CONFIG_ALX=m +# CONFIG_NET_VENDOR_AURORA is not set +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +CONFIG_BNX2=m +CONFIG_CNIC=m +CONFIG_TIGON3=m +CONFIG_TIGON3_HWMON=y +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set +CONFIG_BNXT=m +CONFIG_BNXT_SRIOV=y +CONFIG_BNXT_FLOWER_OFFLOAD=y +CONFIG_BNXT_DCB=y +# CONFIG_BNXT_HWMON is not set +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_CADENCE is not set +CONFIG_NET_VENDOR_CAVIUM=y +CONFIG_THUNDER_NIC_PF=m +CONFIG_THUNDER_NIC_VF=m +CONFIG_THUNDER_NIC_BGX=m +CONFIG_THUNDER_NIC_RGX=m +CONFIG_CAVIUM_PTP=y +CONFIG_LIQUIDIO=m +CONFIG_LIQUIDIO_VF=m +CONFIG_NET_VENDOR_CHELSIO=y +# CONFIG_CHELSIO_T1 is not set +# CONFIG_CHELSIO_T3 is not set +CONFIG_CHELSIO_T4=m +# CONFIG_CHELSIO_T4_DCB is not set +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_LIB=m +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_CORTINA is not set +CONFIG_DNET=m +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_NET_VENDOR_HISILICON=y +# CONFIG_HIX5HD2_GMAC is not set +# CONFIG_HISI_FEMAC is not set +# CONFIG_HIP04_ETH is not set +CONFIG_HNS_MDIO=m +CONFIG_HNS=m +CONFIG_HNS_DSAF=m +CONFIG_HNS_ENET=m +CONFIG_HNS3=m +CONFIG_HNS3_HCLGE=m +CONFIG_HNS3_DCB=y +CONFIG_HNS3_HCLGEVF=m +CONFIG_HNS3_ENET=m +CONFIG_HNS3_CAE=m +# CONFIG_NET_VENDOR_HP is not set +CONFIG_NET_VENDOR_HUAWEI=y +CONFIG_HINIC=m +CONFIG_BMA=m +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_IGB=m +CONFIG_IGB_HWMON=y +CONFIG_IGBVF=m +# CONFIG_IXGB is not set +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBE_DCB=y +CONFIG_IXGBEVF=m +CONFIG_I40E=m +# CONFIG_I40E_DCB is not set +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_FM10K=m +CONFIG_NET_VENDOR_NETSWIFT=y +CONFIG_TXGBE=m +CONFIG_NGBE=m +# CONFIG_NGBE_HWMON is not set +# CONFIG_NGBE_PROCFS is not set +# CONFIG_NGBE_NO_LLI is not set +# CONFIG_NGBE_DEBUG_FS is not set +# CONFIG_NGBE_POLL_LINK_STATUS is not set +# CONFIG_NGBE_SYSFS is not set +# CONFIG_JME is not set +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX4_EN=m +CONFIG_MLX4_EN_DCB=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y +# CONFIG_MLX4_CORE_GEN2 is not set +CONFIG_MLX5_CORE=m +# CONFIG_MLX5_FPGA is not set +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_EN_ARFS=y +CONFIG_MLX5_EN_RXNFC=y +CONFIG_MLX5_MPFS=y +# CONFIG_MLX5_ESWITCH is not set +CONFIG_MLX5_CORE_EN_DCB=y +CONFIG_MLX5_CORE_IPOIB=y +CONFIG_MLXSW_CORE=m +CONFIG_MLXSW_CORE_HWMON=y +CONFIG_MLXSW_CORE_THERMAL=y +CONFIG_MLXSW_PCI=m +CONFIG_MLXSW_I2C=m +# CONFIG_MLXSW_SWITCHIB is not set +# CONFIG_MLXSW_SWITCHX2 is not set +# CONFIG_MLXSW_SPECTRUM is not set +CONFIG_MLXSW_MINIMAL=m +CONFIG_MLXFW=m +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +CONFIG_NET_VENDOR_MICROSEMI=y +# CONFIG_MSCC_OCELOT_SWITCH is not set +CONFIG_NET_VENDOR_MYRI=y +# CONFIG_MYRI10GE is not set +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETERION is not set +CONFIG_NET_VENDOR_NETRONOME=y +CONFIG_NFP=m +CONFIG_NFP_APP_FLOWER=y +CONFIG_NFP_APP_ABM_NIC=y +# CONFIG_NFP_DEBUG is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +CONFIG_NET_VENDOR_OKI=y +CONFIG_ETHOC=m +# CONFIG_NET_VENDOR_PACKET_ENGINES is not set +CONFIG_NET_VENDOR_QLOGIC=y +CONFIG_QLA3XXX=m +# CONFIG_QLCNIC is not set +# CONFIG_QLGE is not set +CONFIG_NETXEN_NIC=m +CONFIG_QED=m +CONFIG_QED_LL2=y +CONFIG_QED_SRIOV=y +CONFIG_QEDE=m +CONFIG_QED_RDMA=y +CONFIG_QED_ISCSI=y +CONFIG_QED_FCOE=y +CONFIG_QED_OOO=y +CONFIG_NET_VENDOR_QUALCOMM=y +# CONFIG_QCA7000_SPI is not set +CONFIG_QCOM_EMAC=m +# CONFIG_RMNET is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_NET_VENDOR_REALTEK=y +CONFIG_8139CP=m +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +# CONFIG_8139TOO_TUNE_TWISTER is not set +CONFIG_8139TOO_8129=y +# CONFIG_8139_OLD_RX_RESET is not set +CONFIG_R8169=m +# CONFIG_NET_VENDOR_RENESAS is not set +CONFIG_NET_VENDOR_ROCKER=y +CONFIG_ROCKER=m +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +CONFIG_NET_VENDOR_SOLARFLARE=y +CONFIG_SFC=m +CONFIG_SFC_MTD=y +CONFIG_SFC_MCDI_MON=y +CONFIG_SFC_SRIOV=y +CONFIG_SFC_MCDI_LOGGING=y +# CONFIG_SFC_FALCON is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +CONFIG_NET_VENDOR_SMSC=y +CONFIG_SMC91X=m +CONFIG_EPIC100=m +CONFIG_SMSC911X=m +CONFIG_SMSC9420=m +# CONFIG_NET_VENDOR_SOCIONEXT is not set +CONFIG_NET_VENDOR_STMICRO=y +CONFIG_STMMAC_ETH=m +CONFIG_STMMAC_PLATFORM=m +# CONFIG_DWMAC_DWC_QOS_ETH is not set +CONFIG_DWMAC_GENERIC=m +CONFIG_DWMAC_IPQ806X=m +CONFIG_DWMAC_PHYTIUM=m +# CONFIG_STMMAC_PCI is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +CONFIG_MDIO_BCM_UNIMAC=m +CONFIG_MDIO_BITBANG=m +# CONFIG_MDIO_BUS_MUX_GPIO is not set +# CONFIG_MDIO_BUS_MUX_MMIOREG is not set +CONFIG_MDIO_CAVIUM=m +CONFIG_MDIO_GPIO=m +# CONFIG_MDIO_HISI_FEMAC is not set +# CONFIG_MDIO_MSCC_MIIM is not set +CONFIG_MDIO_OCTEON=m +CONFIG_MDIO_THUNDER=m +CONFIG_MDIO_XGENE=y +CONFIG_PHYLIB=y +CONFIG_SWPHY=y +# CONFIG_LED_TRIGGER_PHY is not set + +# +# MII PHY device drivers +# +CONFIG_AMD_PHY=m +CONFIG_AQUANTIA_PHY=m +# CONFIG_AX88796B_PHY is not set +CONFIG_AT803X_PHY=m +# CONFIG_BCM7XXX_PHY is not set +CONFIG_BCM87XX_PHY=m +CONFIG_BCM_NET_PHYLIB=m +CONFIG_BROADCOM_PHY=m +CONFIG_CICADA_PHY=m +# CONFIG_CORTINA_PHY is not set +CONFIG_DAVICOM_PHY=m +# CONFIG_DP83822_PHY is not set +# CONFIG_DP83TC811_PHY is not set +CONFIG_DP83848_PHY=m +CONFIG_DP83867_PHY=m +CONFIG_FIXED_PHY=y +CONFIG_ICPLUS_PHY=m +# CONFIG_INTEL_XWAY_PHY is not set +CONFIG_LSI_ET1011C_PHY=m +CONFIG_LXT_PHY=m +CONFIG_MARVELL_PHY=m +# CONFIG_MARVELL_10G_PHY is not set +CONFIG_MICREL_PHY=m +CONFIG_MICROCHIP_PHY=m +# CONFIG_MICROCHIP_T1_PHY is not set +# CONFIG_MICROSEMI_PHY is not set +CONFIG_NATIONAL_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_REALTEK_PHY=m +# CONFIG_RENESAS_PHY is not set +# CONFIG_ROCKCHIP_PHY is not set +CONFIG_SMSC_PHY=m +CONFIG_STE10XP=m +CONFIG_TERANETICS_PHY=m +CONFIG_VITESSE_PHY=m +# CONFIG_XILINX_GMII2RGMII is not set +# CONFIG_MICREL_KS8995MA is not set +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m +CONFIG_PPPOE=m +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLHC=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +# CONFIG_SLIP_MODE_SLIP6 is not set +CONFIG_USB_NET_DRIVERS=y +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m +CONFIG_USB_USBNET=m +CONFIG_USB_NET_AX8817X=m +CONFIG_USB_NET_AX88179_178A=m +CONFIG_USB_NET_CDCETHER=m +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_CDC_NCM=m +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +CONFIG_USB_NET_SR9700=m +# CONFIG_USB_NET_SR9800 is not set +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m +CONFIG_USB_NET_NET1080=m +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_NET_RNDIS_HOST=m +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m +CONFIG_USB_NET_CDC_SUBSET=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y +CONFIG_USB_BELKIN=y +CONFIG_USB_ARMLINUX=y +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y +CONFIG_USB_NET_ZAURUS=m +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +CONFIG_WLAN=y +# CONFIG_WIRELESS_WDS is not set +# CONFIG_WLAN_VENDOR_ADMTEK is not set +CONFIG_ATH_COMMON=m +CONFIG_WLAN_VENDOR_ATH=y +# CONFIG_ATH_DEBUG is not set +# CONFIG_ATH5K is not set +# CONFIG_ATH5K_PCI is not set +# CONFIG_ATH9K is not set +# CONFIG_ATH9K_HTC is not set +# CONFIG_CARL9170 is not set +# CONFIG_ATH6KL is not set +# CONFIG_AR5523 is not set +# CONFIG_WIL6210 is not set +CONFIG_ATH10K=m +CONFIG_ATH10K_CE=y +CONFIG_ATH10K_PCI=m +# CONFIG_ATH10K_AHB is not set +# CONFIG_ATH10K_SDIO is not set +# CONFIG_ATH10K_USB is not set +# CONFIG_ATH10K_SNOC is not set +# CONFIG_ATH10K_DEBUG is not set +# CONFIG_ATH10K_DEBUGFS is not set +# CONFIG_ATH10K_TRACING is not set +# CONFIG_WCN36XX is not set +# CONFIG_WLAN_VENDOR_ATMEL is not set +# CONFIG_WLAN_VENDOR_BROADCOM is not set +# CONFIG_WLAN_VENDOR_CISCO is not set +# CONFIG_WLAN_VENDOR_INTEL is not set +# CONFIG_WLAN_VENDOR_INTERSIL is not set +# CONFIG_WLAN_VENDOR_MARVELL is not set +# CONFIG_WLAN_VENDOR_MEDIATEK is not set +CONFIG_WLAN_VENDOR_RALINK=y +CONFIG_RT2X00=m +# CONFIG_RT2400PCI is not set +# CONFIG_RT2500PCI is not set +# CONFIG_RT61PCI is not set +# CONFIG_RT2800PCI is not set +# CONFIG_RT2500USB is not set +# CONFIG_RT73USB is not set +CONFIG_RT2800USB=m +CONFIG_RT2800USB_RT33XX=y +CONFIG_RT2800USB_RT35XX=y +# CONFIG_RT2800USB_RT3573 is not set +CONFIG_RT2800USB_RT53XX=y +# CONFIG_RT2800USB_RT55XX is not set +# CONFIG_RT2800USB_UNKNOWN is not set +CONFIG_RT2800_LIB=m +CONFIG_RT2X00_LIB_USB=m +CONFIG_RT2X00_LIB=m +CONFIG_RT2X00_LIB_FIRMWARE=y +CONFIG_RT2X00_LIB_CRYPTO=y +CONFIG_RT2X00_LIB_LEDS=y +# CONFIG_RT2X00_LIB_DEBUGFS is not set +# CONFIG_RT2X00_DEBUG is not set +# CONFIG_WLAN_VENDOR_REALTEK is not set +# CONFIG_WLAN_VENDOR_RSI is not set +# CONFIG_WLAN_VENDOR_ST is not set +# CONFIG_WLAN_VENDOR_TI is not set +# CONFIG_WLAN_VENDOR_ZYDAS is not set +# CONFIG_WLAN_VENDOR_QUANTENNA is not set +# CONFIG_MAC80211_HWSIM is not set +# CONFIG_USB_NET_RNDIS_WLAN is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +CONFIG_WAN=y +CONFIG_HDLC=m +CONFIG_HDLC_RAW=m +# CONFIG_HDLC_RAW_ETH is not set +CONFIG_HDLC_CISCO=m +CONFIG_HDLC_FR=m +CONFIG_HDLC_PPP=m + +# +# X.25/LAPB support is disabled +# +# CONFIG_PCI200SYN is not set +# CONFIG_WANXL is not set +# CONFIG_PC300TOO is not set +# CONFIG_FARSYNC is not set +# CONFIG_DSCC4 is not set +CONFIG_DLCI=m +CONFIG_DLCI_MAX=8 +# CONFIG_IEEE802154_DRIVERS is not set +# CONFIG_FUJITSU_ES is not set +# CONFIG_NETDEVSIM is not set +CONFIG_NET_FAILOVER=m +# CONFIG_ISDN is not set + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_LEDS=y +CONFIG_INPUT_FF_MEMLESS=m +CONFIG_INPUT_POLLDEV=m +CONFIG_INPUT_SPARSEKMAP=m +CONFIG_INPUT_MATRIXKMAP=m + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADC is not set +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_OMAP4 is not set +# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_CAP11XX is not set +# CONFIG_KEYBOARD_BCM is not set +CONFIG_KEYBOARD_PHYTIUM=m +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=y +CONFIG_MOUSE_PS2_ALPS=y +CONFIG_MOUSE_PS2_BYD=y +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y +CONFIG_MOUSE_PS2_CYPRESS=y +CONFIG_MOUSE_PS2_TRACKPOINT=y +CONFIG_MOUSE_PS2_ELANTECH=y +CONFIG_MOUSE_PS2_ELANTECH_SMBUS=y +CONFIG_MOUSE_PS2_SENTELIC=y +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +CONFIG_MOUSE_PS2_FOCALTECH=y +CONFIG_MOUSE_PS2_SMBUS=y +CONFIG_MOUSE_SERIAL=m +CONFIG_MOUSE_APPLETOUCH=m +CONFIG_MOUSE_BCM5974=m +CONFIG_MOUSE_CYAPA=m +# CONFIG_MOUSE_ELAN_I2C is not set +CONFIG_MOUSE_VSXXXAA=m +# CONFIG_MOUSE_GPIO is not set +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ATMEL_CAPTOUCH is not set +# CONFIG_INPUT_BMA150 is not set +# CONFIG_INPUT_E3X0_BUTTON is not set +# CONFIG_INPUT_MMA8450 is not set +# CONFIG_INPUT_GP2A is not set +# CONFIG_INPUT_GPIO_BEEPER is not set +# CONFIG_INPUT_GPIO_DECODER is not set +# CONFIG_INPUT_ATI_REMOTE2 is not set +# CONFIG_INPUT_KEYSPAN_REMOTE is not set +# CONFIG_INPUT_KXTJ9 is not set +# CONFIG_INPUT_POWERMATE is not set +# CONFIG_INPUT_YEALINK is not set +# CONFIG_INPUT_CM109 is not set +CONFIG_INPUT_UINPUT=m +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_PWM_BEEPER is not set +# CONFIG_INPUT_PWM_VIBRA is not set +# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_IMS_PCU is not set +# CONFIG_INPUT_CMA3000 is not set +# CONFIG_INPUT_DRV260X_HAPTICS is not set +# CONFIG_INPUT_DRV2665_HAPTICS is not set +# CONFIG_INPUT_DRV2667_HAPTICS is not set +# CONFIG_INPUT_HISI_POWERKEY is not set +CONFIG_RMI4_CORE=m +CONFIG_RMI4_I2C=m +CONFIG_RMI4_SPI=m +CONFIG_RMI4_SMB=m +CONFIG_RMI4_F03=y +CONFIG_RMI4_F03_SERIO=m +CONFIG_RMI4_2D_SENSOR=y +CONFIG_RMI4_F11=y +CONFIG_RMI4_F12=y +CONFIG_RMI4_F30=y +# CONFIG_RMI4_F34 is not set +# CONFIG_RMI4_F54 is not set +# CONFIG_RMI4_F55 is not set + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_SERIO_SERPORT=y +CONFIG_SERIO_AMBAKMI=y +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +# CONFIG_SERIO_PS2MULT is not set +CONFIG_SERIO_ARC_PS2=m +# CONFIG_SERIO_APBPS2 is not set +# CONFIG_SERIO_GPIO_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_ROCKETPORT is not set +CONFIG_CYCLADES=m +# CONFIG_CYZ_INTR is not set +# CONFIG_MOXA_INTELLIO is not set +# CONFIG_MOXA_SMARTIO is not set +CONFIG_SYNCLINKMP=m +CONFIG_SYNCLINK_GT=m +# CONFIG_NOZOMI is not set +# CONFIG_ISI is not set +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +# CONFIG_TRACE_SINK is not set +CONFIG_LDISC_AUTOLOAD=y +CONFIG_DEVMEM=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +# CONFIG_SERIAL_8250_ASPEED_VUART is not set +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_FSL=y +CONFIG_SERIAL_8250_DW=y +CONFIG_SERIAL_8250_RT288X=y +# CONFIG_SERIAL_8250_MOXA is not set +CONFIG_SERIAL_OF_PLATFORM=y + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_AMBA_PL010 is not set +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +CONFIG_SERIAL_EARLYCON_ARM_SEMIHOST=y +# CONFIG_SERIAL_KGDB_NMI is not set +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_CONSOLE_POLL=y +# CONFIG_SERIAL_JSM is not set +# CONFIG_SERIAL_MSM is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_IFX6X60 is not set +# CONFIG_SERIAL_XILINX_PS_UART is not set +# CONFIG_SERIAL_ARC is not set +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set +# CONFIG_SERIAL_DEV_BUS is not set +# CONFIG_TTY_PRINTK is not set +CONFIG_HVC_DRIVER=y +# CONFIG_HVC_DCC is not set +CONFIG_VIRTIO_CONSOLE=m +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DMI_DECODE=y +# CONFIG_IPMI_PANIC_EVENT is not set +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +CONFIG_IPMI_SSIF=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m +CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_HW_RANDOM_HISI=y +CONFIG_HW_RANDOM_HISI_V2=y +CONFIG_HW_RANDOM_XGENE=y +CONFIG_HW_RANDOM_CAVIUM=y +CONFIG_HW_RANDOM_PHYTIUM=m +# CONFIG_APPLICOM is not set +CONFIG_RAW_DRIVER=y +CONFIG_MAX_RAW_DEVS=8192 +CONFIG_TCG_TPM=m +CONFIG_HW_RANDOM_TPM=y +CONFIG_TCG_TIS_CORE=m +# CONFIG_TCG_TIS is not set +CONFIG_TCG_TIS_SPI=m +# CONFIG_TCG_TIS_I2C_ATMEL is not set +# CONFIG_TCG_TIS_I2C_INFINEON is not set +# CONFIG_TCG_TIS_I2C_NUVOTON is not set +CONFIG_TCG_ATMEL=m +# CONFIG_TCG_INFINEON is not set +CONFIG_TCG_CRB=m +# CONFIG_TCG_VTPM_PROXY is not set +# CONFIG_TCG_TIS_ST33ZP24_I2C is not set +# CONFIG_TCG_TIS_ST33ZP24_SPI is not set +# CONFIG_DEVPORT is not set +# CONFIG_XILLYBUS is not set +CONFIG_HISI_SVM=y +CONFIG_PIN_MEMORY_DEV=m + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_ACPI_I2C_OPREGION=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_MUX=m + +# +# Multiplexer I2C Chip support +# +CONFIG_I2C_ARB_GPIO_CHALLENGE=m +CONFIG_I2C_MUX_GPIO=m +# CONFIG_I2C_MUX_GPMUX is not set +# CONFIG_I2C_MUX_LTC4306 is not set +CONFIG_I2C_MUX_PCA9541=m +CONFIG_I2C_MUX_PCA954x=m +CONFIG_I2C_MUX_PINCTRL=m +# CONFIG_I2C_MUX_REG is not set +# CONFIG_I2C_DEMUX_PINCTRL is not set +CONFIG_I2C_MUX_MLXCPLD=m +# CONFIG_I2C_HELPER_AUTO is not set +CONFIG_I2C_SMBUS=m + +# +# I2C Algorithms +# +CONFIG_I2C_ALGOBIT=y +# CONFIG_I2C_ALGOPCF is not set +CONFIG_I2C_ALGOPCA=m + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_HIX5HD2 is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_ISCH is not set +# CONFIG_I2C_PIIX4 is not set +CONFIG_I2C_NFORCE2=m +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set +CONFIG_I2C_ZHAOXIN=m + +# +# ACPI drivers +# +# CONFIG_I2C_SCMI is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CADENCE is not set +# CONFIG_I2C_CBUS_GPIO is not set +CONFIG_I2C_DESIGNWARE_CORE=m +CONFIG_I2C_DESIGNWARE_PLATFORM=m +# CONFIG_I2C_DESIGNWARE_SLAVE is not set +# CONFIG_I2C_DESIGNWARE_PCI is not set +CONFIG_I2C_PHYTIUM_CORE=m +CONFIG_I2C_PHYTIUM_PCI=m +CONFIG_I2C_PHYTIUM_PLATFORM=m +# CONFIG_I2C_EMEV2 is not set +CONFIG_I2C_GPIO=m +# CONFIG_I2C_GPIO_FAULT_INJECTOR is not set +CONFIG_I2C_HISI=m +# CONFIG_I2C_NOMADIK is not set +# CONFIG_I2C_OCORES is not set +CONFIG_I2C_PCA_PLATFORM=m +CONFIG_I2C_QUP=m +# CONFIG_I2C_RK3X is not set +CONFIG_I2C_SIMTEC=m +CONFIG_I2C_VERSATILE=m +CONFIG_I2C_THUNDERX=m +# CONFIG_I2C_XILINX is not set +CONFIG_I2C_XLP9XX=m + +# +# External I2C/SMBus adapter drivers +# +CONFIG_I2C_DIOLAN_U2C=m +CONFIG_I2C_PARPORT_LIGHT=m +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +CONFIG_I2C_TINY_USB=m + +# +# Other I2C/SMBus bus drivers +# +CONFIG_I2C_XGENE_SLIMPRO=m +CONFIG_I2C_STUB=m +CONFIG_I2C_SLAVE=y +CONFIG_I2C_SLAVE_EEPROM=m +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y +# CONFIG_SPI_MEM is not set + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +CONFIG_SPI_CADENCE=m +CONFIG_SPI_DESIGNWARE=y +CONFIG_SPI_DW_PCI=m +# CONFIG_SPI_DW_MID_DMA is not set +CONFIG_SPI_DW_MMIO=m +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_FSL_SPI is not set +# CONFIG_SPI_OC_TINY is not set +CONFIG_SPI_PHYTIUM=m +CONFIG_SPI_PHYTIUM_PLAT=m +CONFIG_SPI_PHYTIUM_PCI=m +CONFIG_SPI_PL022=y +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_ROCKCHIP is not set +CONFIG_SPI_QUP=y +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_THUNDERX is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +CONFIG_SPI_XLP=m +# CONFIG_SPI_ZYNQMP_GQSPI is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +CONFIG_SPI_DYNAMIC=y +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_GPIO=m + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y +CONFIG_DP83640_PHY=m +CONFIG_PINCTRL=y +CONFIG_PINMUX=y +CONFIG_PINCONF=y +CONFIG_GENERIC_PINCONF=y +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_PINCTRL_AMD is not set +# CONFIG_PINCTRL_MCP23S08 is not set +# CONFIG_PINCTRL_SINGLE is not set +# CONFIG_PINCTRL_SX150X is not set +CONFIG_PINCTRL_MSM=y +# CONFIG_PINCTRL_APQ8064 is not set +# CONFIG_PINCTRL_APQ8084 is not set +# CONFIG_PINCTRL_IPQ4019 is not set +# CONFIG_PINCTRL_IPQ8064 is not set +# CONFIG_PINCTRL_IPQ8074 is not set +# CONFIG_PINCTRL_MSM8660 is not set +# CONFIG_PINCTRL_MSM8960 is not set +# CONFIG_PINCTRL_MDM9615 is not set +# CONFIG_PINCTRL_MSM8X74 is not set +# CONFIG_PINCTRL_MSM8916 is not set +# CONFIG_PINCTRL_MSM8994 is not set +# CONFIG_PINCTRL_MSM8996 is not set +# CONFIG_PINCTRL_MSM8998 is not set +CONFIG_PINCTRL_QDF2XXX=y +# CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set +# CONFIG_PINCTRL_SDM845 is not set +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_OF_GPIO=y +CONFIG_GPIO_ACPI=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_GENERIC=y +CONFIG_GPIO_PHYTIUM_CORE=m + +# +# Memory mapped GPIO drivers +# +# CONFIG_GPIO_74XX_MMIO is not set +# CONFIG_GPIO_ALTERA is not set +CONFIG_GPIO_AMDPT=m +CONFIG_GPIO_DWAPB=y +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_FTGPIO010 is not set +CONFIG_GPIO_GENERIC_PLATFORM=m +# CONFIG_GPIO_GRGPIO is not set +CONFIG_GPIO_HISI=m +# CONFIG_GPIO_HLWD is not set +# CONFIG_GPIO_MB86S7X is not set +# CONFIG_GPIO_MOCKUP is not set +CONFIG_GPIO_PHYTIUM_PLAT=m +CONFIG_GPIO_PHYTIUM_SGPIO=m +CONFIG_GPIO_PL061=y +# CONFIG_GPIO_SYSCON is not set +# CONFIG_GPIO_THUNDERX is not set +CONFIG_GPIO_XGENE=y +CONFIG_GPIO_XGENE_SB=m +# CONFIG_GPIO_XILINX is not set +CONFIG_GPIO_XLP=m + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_ADP5588 is not set +# CONFIG_GPIO_ADNP is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set + +# +# MFD GPIO expanders +# + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_BT8XX is not set +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_PCIE_IDIO_24 is not set +CONFIG_GPIO_PHYTIUM_PCI=m +# CONFIG_GPIO_RDC321X is not set + +# +# SPI GPIO expanders +# +# CONFIG_GPIO_74X164 is not set +# CONFIG_GPIO_MAX3191X is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set + +# +# USB GPIO expanders +# +CONFIG_W1=m +CONFIG_W1_CON=y + +# +# 1-wire Bus Masters +# +# CONFIG_W1_MASTER_MATROX is not set +# CONFIG_W1_MASTER_DS2490 is not set +# CONFIG_W1_MASTER_DS2482 is not set +# CONFIG_W1_MASTER_DS1WM is not set +# CONFIG_W1_MASTER_GPIO is not set +CONFIG_W1_MASTER_PHYTIUM=m + +# +# 1-wire Slaves +# +CONFIG_W1_SLAVE_THERM=m +CONFIG_W1_SLAVE_SMEM=m +# CONFIG_W1_SLAVE_DS2405 is not set +# CONFIG_W1_SLAVE_DS2408 is not set +# CONFIG_W1_SLAVE_DS2413 is not set +# CONFIG_W1_SLAVE_DS2406 is not set +# CONFIG_W1_SLAVE_DS2423 is not set +# CONFIG_W1_SLAVE_DS2805 is not set +# CONFIG_W1_SLAVE_DS2431 is not set +# CONFIG_W1_SLAVE_DS2433 is not set +# CONFIG_W1_SLAVE_DS2438 is not set +# CONFIG_W1_SLAVE_DS2780 is not set +# CONFIG_W1_SLAVE_DS2781 is not set +# CONFIG_W1_SLAVE_DS28E04 is not set +# CONFIG_W1_SLAVE_DS28E17 is not set +# CONFIG_POWER_AVS is not set +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_BRCMSTB is not set +CONFIG_POWER_RESET_GPIO=y +CONFIG_POWER_RESET_GPIO_RESTART=y +CONFIG_POWER_RESET_HISI=y +# CONFIG_POWER_RESET_MSM is not set +# CONFIG_POWER_RESET_LTC2952 is not set +CONFIG_POWER_RESET_RESTART=y +CONFIG_POWER_RESET_VEXPRESS=y +# CONFIG_POWER_RESET_XGENE is not set +CONFIG_POWER_RESET_SYSCON=y +# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set +# CONFIG_SYSCON_REBOOT_MODE is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_GENERIC_ADC_BATTERY is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_DS2760 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_LEGO_EV3 is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_MANAGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_BATTERY_MAX1721X is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_LTC3651 is not set +# CONFIG_CHARGER_DETECTOR_MAX14656 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24190 is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ25890 is not set +CONFIG_CHARGER_SMB347=m +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_CHARGER_RT9455 is not set +CONFIG_HWMON=y +CONFIG_HWMON_VID=m +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +CONFIG_SENSORS_AD7314=m +CONFIG_SENSORS_AD7414=m +CONFIG_SENSORS_AD7418=m +CONFIG_SENSORS_ADM1021=m +CONFIG_SENSORS_ADM1025=m +CONFIG_SENSORS_ADM1026=m +CONFIG_SENSORS_ADM1029=m +CONFIG_SENSORS_ADM1031=m +CONFIG_SENSORS_ADM9240=m +CONFIG_SENSORS_ADT7X10=m +CONFIG_SENSORS_ADT7310=m +CONFIG_SENSORS_ADT7410=m +CONFIG_SENSORS_ADT7411=m +CONFIG_SENSORS_ADT7462=m +CONFIG_SENSORS_ADT7470=m +CONFIG_SENSORS_ADT7475=m +CONFIG_SENSORS_ASC7621=m +CONFIG_SENSORS_ARM_SCPI=m +# CONFIG_SENSORS_ASPEED is not set +CONFIG_SENSORS_ATXP1=m +CONFIG_SENSORS_DS620=m +CONFIG_SENSORS_DS1621=m +# CONFIG_SENSORS_I5K_AMB is not set +CONFIG_SENSORS_F71805F=m +CONFIG_SENSORS_F71882FG=m +CONFIG_SENSORS_F75375S=m +# CONFIG_SENSORS_FTSTEUTATES is not set +CONFIG_SENSORS_GL518SM=m +CONFIG_SENSORS_GL520SM=m +CONFIG_SENSORS_G760A=m +CONFIG_SENSORS_G762=m +# CONFIG_SENSORS_GPIO_FAN is not set +# CONFIG_SENSORS_HIH6130 is not set +CONFIG_SENSORS_IBMAEM=m +CONFIG_SENSORS_IBMPEX=m +# CONFIG_SENSORS_IIO_HWMON is not set +CONFIG_SENSORS_IT87=m +CONFIG_SENSORS_JC42=m +CONFIG_SENSORS_POWR1220=m +CONFIG_SENSORS_LINEAGE=m +CONFIG_SENSORS_LTC2945=m +# CONFIG_SENSORS_LTC2990 is not set +CONFIG_SENSORS_LTC4151=m +CONFIG_SENSORS_LTC4215=m +CONFIG_SENSORS_LTC4222=m +CONFIG_SENSORS_LTC4245=m +CONFIG_SENSORS_LTC4260=m +CONFIG_SENSORS_LTC4261=m +CONFIG_SENSORS_MAX1111=m +CONFIG_SENSORS_MAX16065=m +CONFIG_SENSORS_MAX1619=m +CONFIG_SENSORS_MAX1668=m +CONFIG_SENSORS_MAX197=m +# CONFIG_SENSORS_MAX31722 is not set +# CONFIG_SENSORS_MAX6621 is not set +CONFIG_SENSORS_MAX6639=m +CONFIG_SENSORS_MAX6642=m +CONFIG_SENSORS_MAX6650=m +CONFIG_SENSORS_MAX6697=m +CONFIG_SENSORS_MAX31790=m +CONFIG_SENSORS_MCP3021=m +# CONFIG_SENSORS_TC654 is not set +CONFIG_SENSORS_ADCXX=m +CONFIG_SENSORS_LM63=m +CONFIG_SENSORS_LM70=m +CONFIG_SENSORS_LM73=m +CONFIG_SENSORS_LM75=m +CONFIG_SENSORS_LM77=m +CONFIG_SENSORS_LM78=m +CONFIG_SENSORS_LM80=m +CONFIG_SENSORS_LM83=m +CONFIG_SENSORS_LM85=m +CONFIG_SENSORS_LM87=m +CONFIG_SENSORS_LM90=m +CONFIG_SENSORS_LM92=m +CONFIG_SENSORS_LM93=m +CONFIG_SENSORS_LM95234=m +CONFIG_SENSORS_LM95241=m +CONFIG_SENSORS_LM95245=m +CONFIG_SENSORS_PC87360=m +CONFIG_SENSORS_PC87427=m +CONFIG_SENSORS_NTC_THERMISTOR=m +CONFIG_SENSORS_NCT6683=m +CONFIG_SENSORS_NCT6775=m +CONFIG_SENSORS_NCT7802=m +CONFIG_SENSORS_NCT7904=m +# CONFIG_SENSORS_NPCM7XX is not set +CONFIG_SENSORS_PCF8591=m +CONFIG_PMBUS=m +CONFIG_SENSORS_PMBUS=m +CONFIG_SENSORS_ADM1275=m +# CONFIG_SENSORS_IBM_CFFPS is not set +# CONFIG_SENSORS_IR35221 is not set +CONFIG_SENSORS_LM25066=m +CONFIG_SENSORS_LTC2978=m +CONFIG_SENSORS_LTC3815=m +CONFIG_SENSORS_MAX16064=m +CONFIG_SENSORS_MAX20751=m +# CONFIG_SENSORS_MAX31785 is not set +CONFIG_SENSORS_MAX34440=m +CONFIG_SENSORS_MAX8688=m +CONFIG_SENSORS_TPS40422=m +# CONFIG_SENSORS_TPS53679 is not set +CONFIG_SENSORS_UCD9000=m +CONFIG_SENSORS_UCD9200=m +CONFIG_SENSORS_ZL6100=m +CONFIG_SENSORS_PWM_FAN=m +CONFIG_SENSORS_SHT15=m +CONFIG_SENSORS_SHT21=m +# CONFIG_SENSORS_SHT3x is not set +CONFIG_SENSORS_SHTC1=m +CONFIG_SENSORS_SIS5595=m +CONFIG_SENSORS_DME1737=m +CONFIG_SENSORS_EMC1403=m +# CONFIG_SENSORS_EMC2103 is not set +CONFIG_SENSORS_EMC6W201=m +CONFIG_SENSORS_SMSC47M1=m +CONFIG_SENSORS_SMSC47M192=m +CONFIG_SENSORS_SMSC47B397=m +CONFIG_SENSORS_SCH56XX_COMMON=m +CONFIG_SENSORS_SCH5627=m +CONFIG_SENSORS_SCH5636=m +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_SMM665 is not set +CONFIG_SENSORS_ADC128D818=m +CONFIG_SENSORS_ADS1015=m +CONFIG_SENSORS_ADS7828=m +CONFIG_SENSORS_ADS7871=m +CONFIG_SENSORS_AMC6821=m +CONFIG_SENSORS_INA209=m +CONFIG_SENSORS_INA2XX=m +# CONFIG_SENSORS_INA3221 is not set +CONFIG_SENSORS_TC74=m +CONFIG_SENSORS_THMC50=m +CONFIG_SENSORS_TMP102=m +CONFIG_SENSORS_TMP103=m +# CONFIG_SENSORS_TMP108 is not set +CONFIG_SENSORS_TMP401=m +CONFIG_SENSORS_TMP421=m +CONFIG_SENSORS_VEXPRESS=m +CONFIG_SENSORS_VIA686A=m +CONFIG_SENSORS_VT1211=m +CONFIG_SENSORS_VT8231=m +# CONFIG_SENSORS_W83773G is not set +CONFIG_SENSORS_W83781D=m +CONFIG_SENSORS_W83791D=m +CONFIG_SENSORS_W83792D=m +CONFIG_SENSORS_W83793=m +CONFIG_SENSORS_W83795=m +# CONFIG_SENSORS_W83795_FANCTRL is not set +CONFIG_SENSORS_W83L785TS=m +CONFIG_SENSORS_W83L786NG=m +CONFIG_SENSORS_W83627HF=m +CONFIG_SENSORS_W83627EHF=m +CONFIG_SENSORS_XGENE=m + +# +# ACPI drivers +# +CONFIG_SENSORS_ACPI_POWER=m +CONFIG_THERMAL=y +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_OF=y +# CONFIG_THERMAL_WRITABLE_TRIPS is not set +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +# CONFIG_THERMAL_GOV_BANG_BANG is not set +CONFIG_THERMAL_GOV_USER_SPACE=y +# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set +CONFIG_CPU_THERMAL=y +# CONFIG_THERMAL_EMULATION is not set +CONFIG_HISI_THERMAL=y +# CONFIG_QORIQ_THERMAL is not set + +# +# ACPI INT340X thermal drivers +# +# CONFIG_GENERIC_ADC_THERMAL is not set + +# +# Qualcomm thermal drivers +# +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y +CONFIG_WATCHDOG_SYSFS=y + +# +# Watchdog Device Drivers +# +CONFIG_SOFT_WATCHDOG=m +CONFIG_GPIO_WATCHDOG=m +# CONFIG_WDAT_WDT is not set +# CONFIG_XILINX_WATCHDOG is not set +# CONFIG_ZIIRAVE_WATCHDOG is not set +CONFIG_ARM_SP805_WATCHDOG=m +CONFIG_ARM_SBSA_WATCHDOG=m +# CONFIG_CADENCE_WATCHDOG is not set +# CONFIG_DW_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set +# CONFIG_QCOM_WDT is not set +CONFIG_ALIM7101_WDT=m +CONFIG_I6300ESB_WDT=m +# CONFIG_MEN_A21_WDT is not set + +# +# PCI-based Watchdog Cards +# +CONFIG_PCIPCWATCHDOG=m +CONFIG_WDTPCI=m + +# +# USB-based Watchdog Cards +# +CONFIG_USBPCWATCHDOG=m + +# +# Watchdog Pretimeout Governors +# +# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +CONFIG_BCMA=m +CONFIG_BCMA_HOST_PCI_POSSIBLE=y +CONFIG_BCMA_HOST_PCI=y +# CONFIG_BCMA_HOST_SOC is not set +CONFIG_BCMA_DRIVER_PCI=y +CONFIG_BCMA_DRIVER_GMAC_CMN=y +CONFIG_BCMA_DRIVER_GPIO=y +# CONFIG_BCMA_DEBUG is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=m +# CONFIG_MFD_ACT8945A is not set +# CONFIG_MFD_AS3711 is not set +# CONFIG_MFD_AS3722 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set +# CONFIG_MFD_ATMEL_FLEXCOM is not set +# CONFIG_MFD_ATMEL_HLCDC is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_CROS_EC is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_MFD_HI6421_PMIC is not set +# CONFIG_MFD_HI655X_PMIC is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_LPC_ICH is not set +# CONFIG_LPC_SCH is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77620 is not set +# CONFIG_MFD_MAX77686 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77843 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_CPCAP is not set +# CONFIG_MFD_VIPERBOARD is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_QCOM_RPM is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RC5T583 is not set +# CONFIG_MFD_RK808 is not set +# CONFIG_MFD_RN5T618 is not set +# CONFIG_MFD_SEC_CORE is not set +# CONFIG_MFD_SI476X_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_SMSC is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_STMPE is not set +CONFIG_MFD_SYSCON=y +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TI_LP87565 is not set +# CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS80031 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_VX855 is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_ROHM_BD718XX is not set +# CONFIG_MFD_VEXPRESS_SYSREG is not set +# CONFIG_REGULATOR is not set +# CONFIG_RC_CORE is not set +CONFIG_MEDIA_SUPPORT=m + +# +# Multimedia core support +# +CONFIG_MEDIA_CAMERA_SUPPORT=y +# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set +# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set +# CONFIG_MEDIA_RADIO_SUPPORT is not set +# CONFIG_MEDIA_SDR_SUPPORT is not set +# CONFIG_MEDIA_CEC_SUPPORT is not set +CONFIG_MEDIA_CONTROLLER=y +CONFIG_VIDEO_DEV=m +CONFIG_VIDEO_V4L2_SUBDEV_API=y +CONFIG_VIDEO_V4L2=m +# CONFIG_VIDEO_ADV_DEBUG is not set +CONFIG_VIDEO_FIXED_MINOR_RANGES=y +# CONFIG_VIDEO_PCI_SKELETON is not set +# CONFIG_V4L2_FLASH_LED_CLASS is not set + +# +# Media drivers +# +CONFIG_MEDIA_USB_SUPPORT=y + +# +# Webcam devices +# +CONFIG_USB_VIDEO_CLASS=m +CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y +# CONFIG_USB_GSPCA is not set +# CONFIG_USB_PWC is not set +# CONFIG_VIDEO_CPIA2 is not set +# CONFIG_USB_STKWEBCAM is not set +# CONFIG_USB_S2255 is not set +# CONFIG_VIDEO_USBTV is not set + +# +# Webcam, TV (analog/digital) USB devices +# +# CONFIG_VIDEO_EM28XX is not set +# CONFIG_MEDIA_PCI_SUPPORT is not set +CONFIG_V4L_PLATFORM_DRIVERS=y +# CONFIG_VIDEO_CAFE_CCIC is not set +# CONFIG_VIDEO_CADENCE is not set +# CONFIG_VIDEO_MUX is not set +# CONFIG_VIDEO_QCOM_CAMSS is not set +# CONFIG_SOC_CAMERA is not set +# CONFIG_VIDEO_XILINX is not set +CONFIG_VIDEO_PHYTIUM_JPEG=m +# CONFIG_V4L_MEM2MEM_DRIVERS is not set +# CONFIG_V4L_TEST_DRIVERS is not set + +# +# Supported MMC/SDIO adapters +# +# CONFIG_CYPRESS_FIRMWARE is not set +CONFIG_VIDEOBUF2_CORE=m +CONFIG_VIDEOBUF2_V4L2=m +CONFIG_VIDEOBUF2_MEMOPS=m +CONFIG_VIDEOBUF2_DMA_CONTIG=m +CONFIG_VIDEOBUF2_VMALLOC=m + +# +# Media ancillary drivers (tuners, sensors, i2c, spi, frontends) +# +# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set + +# +# I2C Encoders, decoders, sensors and other helper chips +# + +# +# Audio decoders, processors and mixers +# +# CONFIG_VIDEO_TVAUDIO is not set +# CONFIG_VIDEO_TDA7432 is not set +# CONFIG_VIDEO_TDA9840 is not set +# CONFIG_VIDEO_TEA6415C is not set +# CONFIG_VIDEO_TEA6420 is not set +# CONFIG_VIDEO_MSP3400 is not set +# CONFIG_VIDEO_CS3308 is not set +# CONFIG_VIDEO_CS5345 is not set +# CONFIG_VIDEO_CS53L32A is not set +# CONFIG_VIDEO_TLV320AIC23B is not set +# CONFIG_VIDEO_UDA1342 is not set +# CONFIG_VIDEO_WM8775 is not set +# CONFIG_VIDEO_WM8739 is not set +# CONFIG_VIDEO_VP27SMPX is not set +# CONFIG_VIDEO_SONY_BTF_MPX is not set + +# +# RDS decoders +# +# CONFIG_VIDEO_SAA6588 is not set + +# +# Video decoders +# +# CONFIG_VIDEO_ADV7180 is not set +# CONFIG_VIDEO_ADV7183 is not set +# CONFIG_VIDEO_ADV748X is not set +# CONFIG_VIDEO_ADV7604 is not set +# CONFIG_VIDEO_ADV7842 is not set +# CONFIG_VIDEO_BT819 is not set +# CONFIG_VIDEO_BT856 is not set +# CONFIG_VIDEO_BT866 is not set +# CONFIG_VIDEO_KS0127 is not set +# CONFIG_VIDEO_ML86V7667 is not set +# CONFIG_VIDEO_AD5820 is not set +# CONFIG_VIDEO_AK7375 is not set +# CONFIG_VIDEO_DW9714 is not set +# CONFIG_VIDEO_DW9807_VCM is not set +# CONFIG_VIDEO_SAA7110 is not set +# CONFIG_VIDEO_SAA711X is not set +# CONFIG_VIDEO_TC358743 is not set +# CONFIG_VIDEO_TVP514X is not set +# CONFIG_VIDEO_TVP5150 is not set +# CONFIG_VIDEO_TVP7002 is not set +# CONFIG_VIDEO_TW2804 is not set +# CONFIG_VIDEO_TW9903 is not set +# CONFIG_VIDEO_TW9906 is not set +# CONFIG_VIDEO_TW9910 is not set +# CONFIG_VIDEO_VPX3220 is not set + +# +# Video and audio decoders +# +# CONFIG_VIDEO_SAA717X is not set +# CONFIG_VIDEO_CX25840 is not set + +# +# Video encoders +# +# CONFIG_VIDEO_SAA7127 is not set +# CONFIG_VIDEO_SAA7185 is not set +# CONFIG_VIDEO_ADV7170 is not set +# CONFIG_VIDEO_ADV7175 is not set +# CONFIG_VIDEO_ADV7343 is not set +# CONFIG_VIDEO_ADV7393 is not set +# CONFIG_VIDEO_ADV7511 is not set +# CONFIG_VIDEO_AD9389B is not set +# CONFIG_VIDEO_AK881X is not set +# CONFIG_VIDEO_THS8200 is not set + +# +# Camera sensor devices +# +# CONFIG_VIDEO_IMX258 is not set +# CONFIG_VIDEO_IMX274 is not set +# CONFIG_VIDEO_OV2640 is not set +# CONFIG_VIDEO_OV2659 is not set +# CONFIG_VIDEO_OV2680 is not set +# CONFIG_VIDEO_OV2685 is not set +# CONFIG_VIDEO_OV5640 is not set +# CONFIG_VIDEO_OV5645 is not set +# CONFIG_VIDEO_OV5647 is not set +# CONFIG_VIDEO_OV6650 is not set +# CONFIG_VIDEO_OV5670 is not set +# CONFIG_VIDEO_OV5695 is not set +# CONFIG_VIDEO_OV7251 is not set +# CONFIG_VIDEO_OV772X is not set +# CONFIG_VIDEO_OV7640 is not set +# CONFIG_VIDEO_OV7670 is not set +# CONFIG_VIDEO_OV7740 is not set +# CONFIG_VIDEO_OV9650 is not set +# CONFIG_VIDEO_OV13858 is not set +# CONFIG_VIDEO_VS6624 is not set +# CONFIG_VIDEO_MT9M032 is not set +# CONFIG_VIDEO_MT9M111 is not set +# CONFIG_VIDEO_MT9P031 is not set +# CONFIG_VIDEO_MT9T001 is not set +# CONFIG_VIDEO_MT9T112 is not set +# CONFIG_VIDEO_MT9V011 is not set +# CONFIG_VIDEO_MT9V032 is not set +# CONFIG_VIDEO_MT9V111 is not set +# CONFIG_VIDEO_SR030PC30 is not set +# CONFIG_VIDEO_NOON010PC30 is not set +# CONFIG_VIDEO_M5MOLS is not set +# CONFIG_VIDEO_RJ54N1 is not set +# CONFIG_VIDEO_S5K6AA is not set +# CONFIG_VIDEO_S5K6A3 is not set +# CONFIG_VIDEO_S5K4ECGX is not set +# CONFIG_VIDEO_S5K5BAF is not set +# CONFIG_VIDEO_SMIAPP is not set +# CONFIG_VIDEO_ET8EK8 is not set +# CONFIG_VIDEO_S5C73M3 is not set + +# +# Flash devices +# +# CONFIG_VIDEO_ADP1653 is not set +# CONFIG_VIDEO_LM3560 is not set +# CONFIG_VIDEO_LM3646 is not set + +# +# Video improvement chips +# +# CONFIG_VIDEO_UPD64031A is not set +# CONFIG_VIDEO_UPD64083 is not set + +# +# Audio/Video compression chips +# +# CONFIG_VIDEO_SAA6752HS is not set + +# +# SDR tuner chips +# + +# +# Miscellaneous helper chips +# +# CONFIG_VIDEO_THS7303 is not set +# CONFIG_VIDEO_M52790 is not set +# CONFIG_VIDEO_I2C is not set + +# +# Sensors used on soc_camera driver +# + +# +# SPI helper chips +# +# CONFIG_VIDEO_GS1662 is not set + +# +# Media SPI Adapters +# + +# +# Customise DVB Frontends +# + +# +# Tools to develop new frontends +# + +# +# Graphics support +# +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_DRM=y +CONFIG_DRM_DP_AUX_CHARDEV=y +# CONFIG_DRM_DEBUG_MM is not set +# CONFIG_DRM_DEBUG_SELFTEST is not set +CONFIG_DRM_KMS_HELPER=y +CONFIG_DRM_KMS_FB_HELPER=y +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +# CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM is not set +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +# CONFIG_DRM_DP_CEC is not set +CONFIG_DRM_TTM=y +CONFIG_DRM_VM=y +CONFIG_DRM_SCHED=m + +# +# I2C encoder or helper chips +# +CONFIG_DRM_I2C_CH7006=m +# CONFIG_DRM_I2C_SIL164 is not set +CONFIG_DRM_I2C_NXP_TDA998X=m +# CONFIG_DRM_I2C_NXP_TDA9950 is not set +# CONFIG_DRM_HDLCD is not set +# CONFIG_DRM_MALI_DISPLAY is not set +CONFIG_DRM_RADEON=m +CONFIG_DRM_RADEON_USERPTR=y +CONFIG_DRM_AMDGPU=m +# CONFIG_DRM_AMDGPU_SI is not set +CONFIG_DRM_AMDGPU_CIK=y +CONFIG_DRM_AMDGPU_USERPTR=y +# CONFIG_DRM_AMDGPU_GART_DEBUGFS is not set + +# +# ACP (Audio CoProcessor) Configuration +# +# CONFIG_DRM_AMD_ACP is not set + +# +# Display Engine Configuration +# +CONFIG_DRM_AMD_DC=y +# CONFIG_DEBUG_KERNEL_DC is not set + +# +# AMD Library routines +# +CONFIG_CHASH=m +# CONFIG_CHASH_STATS is not set +# CONFIG_CHASH_SELFTEST is not set +CONFIG_DRM_NOUVEAU=m +CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT=y +CONFIG_NOUVEAU_DEBUG=5 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3 +# CONFIG_NOUVEAU_DEBUG_MMU is not set +CONFIG_DRM_NOUVEAU_BACKLIGHT=y +# CONFIG_DRM_VGEM is not set +# CONFIG_DRM_VKMS is not set +CONFIG_DRM_UDL=m +CONFIG_DRM_AST=m +CONFIG_DRM_MGAG200=m +CONFIG_DRM_CIRRUS_QEMU=m +# CONFIG_DRM_RCAR_DW_HDMI is not set +# CONFIG_DRM_RCAR_LVDS is not set +CONFIG_DRM_QXL=m +CONFIG_DRM_BOCHS=m +CONFIG_DRM_VIRTIO_GPU=m +# CONFIG_DRM_MSM is not set +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_ARM_VERSATILE is not set +# CONFIG_DRM_PANEL_LVDS is not set +# CONFIG_DRM_PANEL_SIMPLE is not set +# CONFIG_DRM_PANEL_ILITEK_IL9322 is not set +# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set +# CONFIG_DRM_PANEL_LG_LG4573 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set +# CONFIG_DRM_PANEL_SEIKO_43WVF1G is not set +# CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +# CONFIG_DRM_CDNS_DSI is not set +# CONFIG_DRM_DUMB_VGA_DAC is not set +# CONFIG_DRM_LVDS_ENCODER is not set +# CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW is not set +# CONFIG_DRM_NXP_PTN3460 is not set +# CONFIG_DRM_PARADE_PS8622 is not set +# CONFIG_DRM_SIL_SII8620 is not set +# CONFIG_DRM_SII902X is not set +# CONFIG_DRM_SII9234 is not set +# CONFIG_DRM_THINE_THC63LVD1024 is not set +# CONFIG_DRM_TOSHIBA_TC358767 is not set +# CONFIG_DRM_TI_TFP410 is not set +# CONFIG_DRM_I2C_ADV7511 is not set +# CONFIG_DRM_ARCPGU is not set +CONFIG_DRM_HISI_HIBMC=y +# CONFIG_DRM_HISI_KIRIN is not set +# CONFIG_DRM_MXSFB is not set +# CONFIG_DRM_TINYDRM is not set +# CONFIG_DRM_PL111 is not set +CONFIG_DRM_PHYTIUM=m +# CONFIG_DRM_LEGACY is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y + +# +# Frame buffer Devices +# +CONFIG_FB_CMDLINE=y +CONFIG_FB_NOTIFY=y +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_BACKLIGHT=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_TILEBLITTING=y + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +CONFIG_FB_ARMCLCD=y +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_UVESA is not set +CONFIG_FB_EFI=y +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +CONFIG_FB_SIMPLE=y +CONFIG_FB_SSD1307=m +# CONFIG_FB_SM712 is not set +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set +CONFIG_LCD_PLATFORM=m +# CONFIG_LCD_S6E63M0 is not set +# CONFIG_LCD_LD9040 is not set +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +# CONFIG_LCD_OTM3225A is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_GENERIC is not set +CONFIG_BACKLIGHT_PWM=m +# CONFIG_BACKLIGHT_PM8941_WLED is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3630A is not set +# CONFIG_BACKLIGHT_LM3639 is not set +CONFIG_BACKLIGHT_LP855X=m +CONFIG_BACKLIGHT_GPIO=m +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +CONFIG_VIDEOMODE_HELPERS=y +CONFIG_HDMI=y + +# +# Console display driver support +# +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_LOGO_LINUX_CLUT224=y +CONFIG_SOUND=m +CONFIG_SND=m +CONFIG_SND_TIMER=m +CONFIG_SND_PCM=m +CONFIG_SND_JACK=y +CONFIG_SND_JACK_INPUT_DEV=y +# CONFIG_SND_OSSEMUL is not set +CONFIG_SND_PCM_TIMER=y +# CONFIG_SND_HRTIMER is not set +# CONFIG_SND_DYNAMIC_MINORS is not set +CONFIG_SND_SUPPORT_OLD_API=y +CONFIG_SND_PROC_FS=y +CONFIG_SND_VERBOSE_PROCFS=y +# CONFIG_SND_VERBOSE_PRINTK is not set +# CONFIG_SND_DEBUG is not set +CONFIG_SND_VMASTER=y +# CONFIG_SND_SEQUENCER is not set +CONFIG_SND_DRIVERS=y +# CONFIG_SND_DUMMY is not set +# CONFIG_SND_ALOOP is not set +# CONFIG_SND_MTPAV is not set +# CONFIG_SND_SERIAL_U16550 is not set +# CONFIG_SND_MPU401 is not set +CONFIG_SND_PCI=y +# CONFIG_SND_AD1889 is not set +# CONFIG_SND_ATIIXP is not set +# CONFIG_SND_ATIIXP_MODEM is not set +# CONFIG_SND_AU8810 is not set +# CONFIG_SND_AU8820 is not set +# CONFIG_SND_AU8830 is not set +# CONFIG_SND_AW2 is not set +# CONFIG_SND_BT87X is not set +# CONFIG_SND_CA0106 is not set +# CONFIG_SND_CMIPCI is not set +# CONFIG_SND_OXYGEN is not set +# CONFIG_SND_CS4281 is not set +# CONFIG_SND_CS46XX is not set +# CONFIG_SND_CTXFI is not set +# CONFIG_SND_DARLA20 is not set +# CONFIG_SND_GINA20 is not set +# CONFIG_SND_LAYLA20 is not set +# CONFIG_SND_DARLA24 is not set +# CONFIG_SND_GINA24 is not set +# CONFIG_SND_LAYLA24 is not set +# CONFIG_SND_MONA is not set +# CONFIG_SND_MIA is not set +# CONFIG_SND_ECHO3G is not set +# CONFIG_SND_INDIGO is not set +# CONFIG_SND_INDIGOIO is not set +# CONFIG_SND_INDIGODJ is not set +# CONFIG_SND_INDIGOIOX is not set +# CONFIG_SND_INDIGODJX is not set +# CONFIG_SND_ENS1370 is not set +# CONFIG_SND_ENS1371 is not set +# CONFIG_SND_FM801 is not set +# CONFIG_SND_HDSP is not set +# CONFIG_SND_HDSPM is not set +# CONFIG_SND_ICE1724 is not set +# CONFIG_SND_INTEL8X0 is not set +# CONFIG_SND_INTEL8X0M is not set +# CONFIG_SND_KORG1212 is not set +# CONFIG_SND_LOLA is not set +# CONFIG_SND_LX6464ES is not set +# CONFIG_SND_MIXART is not set +# CONFIG_SND_NM256 is not set +# CONFIG_SND_PCXHR is not set +# CONFIG_SND_RIPTIDE is not set +# CONFIG_SND_RME32 is not set +# CONFIG_SND_RME96 is not set +# CONFIG_SND_RME9652 is not set +# CONFIG_SND_SE6X is not set +# CONFIG_SND_VIA82XX is not set +# CONFIG_SND_VIA82XX_MODEM is not set +# CONFIG_SND_VIRTUOSO is not set +# CONFIG_SND_VX222 is not set +# CONFIG_SND_YMFPCI is not set + +# +# HD-Audio +# +CONFIG_SND_HDA=m +# CONFIG_SND_HDA_INTEL is not set +CONFIG_SND_HDA_PHYTIUM=m +# CONFIG_SND_HDA_HWDEP is not set +# CONFIG_SND_HDA_RECONFIG is not set +# CONFIG_SND_HDA_INPUT_BEEP is not set +# CONFIG_SND_HDA_PATCH_LOADER is not set +CONFIG_SND_HDA_CODEC_REALTEK=m +# CONFIG_SND_HDA_CODEC_ANALOG is not set +# CONFIG_SND_HDA_CODEC_SIGMATEL is not set +# CONFIG_SND_HDA_CODEC_VIA is not set +# CONFIG_SND_HDA_CODEC_HDMI is not set +# CONFIG_SND_HDA_CODEC_CIRRUS is not set +# CONFIG_SND_HDA_CODEC_CONEXANT is not set +# CONFIG_SND_HDA_CODEC_CA0110 is not set +# CONFIG_SND_HDA_CODEC_CA0132 is not set +# CONFIG_SND_HDA_CODEC_CMEDIA is not set +# CONFIG_SND_HDA_CODEC_SI3054 is not set +CONFIG_SND_HDA_GENERIC=m +CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0 +CONFIG_SND_HDA_CORE=m +CONFIG_SND_HDA_PREALLOC_SIZE=64 +CONFIG_SND_SPI=y +CONFIG_SND_USB=y +# CONFIG_SND_USB_AUDIO is not set +# CONFIG_SND_USB_UA101 is not set +# CONFIG_SND_USB_CAIAQ is not set +# CONFIG_SND_USB_6FIRE is not set +# CONFIG_SND_USB_HIFACE is not set +# CONFIG_SND_BCD2000 is not set +# CONFIG_SND_USB_POD is not set +# CONFIG_SND_USB_PODHD is not set +# CONFIG_SND_USB_TONEPORT is not set +# CONFIG_SND_USB_VARIAX is not set +# CONFIG_SND_SOC is not set + +# +# HID support +# +CONFIG_HID=y +CONFIG_HID_BATTERY_STRENGTH=y +CONFIG_HIDRAW=y +CONFIG_UHID=m +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=y +# CONFIG_HID_ACCUTOUCH is not set +CONFIG_HID_ACRUX=m +# CONFIG_HID_ACRUX_FF is not set +CONFIG_HID_APPLE=y +CONFIG_HID_APPLEIR=m +# CONFIG_HID_ASUS is not set +CONFIG_HID_AUREAL=m +CONFIG_HID_BELKIN=y +CONFIG_HID_BETOP_FF=m +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +CONFIG_HID_CORSAIR=m +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_PRODIKEYS is not set +# CONFIG_HID_CMEDIA is not set +# CONFIG_HID_CP2112 is not set +CONFIG_HID_CYPRESS=y +CONFIG_HID_DRAGONRISE=m +# CONFIG_DRAGONRISE_FF is not set +# CONFIG_HID_EMS_FF is not set +# CONFIG_HID_ELAN is not set +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +CONFIG_HID_EZKEY=y +CONFIG_HID_GEMBIRD=m +CONFIG_HID_GFRM=m +CONFIG_HID_HOLTEK=m +# CONFIG_HOLTEK_FF is not set +# CONFIG_HID_GOOGLE_HAMMER is not set +CONFIG_HID_GT683R=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m +CONFIG_HID_GYRATION=m +CONFIG_HID_ICADE=m +CONFIG_HID_ITE=y +# CONFIG_HID_JABRA is not set +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=y +CONFIG_HID_LCPOWER=m +CONFIG_HID_LED=m +CONFIG_HID_LENOVO=m +CONFIG_HID_LOGITECH=y +CONFIG_HID_LOGITECH_DJ=m +CONFIG_HID_LOGITECH_HIDPP=m +# CONFIG_LOGITECH_FF is not set +# CONFIG_LOGIRUMBLEPAD2_FF is not set +# CONFIG_LOGIG940_FF is not set +# CONFIG_LOGIWHEELS_FF is not set +CONFIG_HID_MAGICMOUSE=y +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_REDRAGON is not set +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +CONFIG_HID_MULTITOUCH=m +# CONFIG_HID_NTI is not set +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +# CONFIG_PANTHERLORD_FF is not set +CONFIG_HID_PENMOUNT=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +CONFIG_HID_PICOLCD_FB=y +CONFIG_HID_PICOLCD_BACKLIGHT=y +CONFIG_HID_PICOLCD_LCD=y +CONFIG_HID_PICOLCD_LEDS=y +CONFIG_HID_PLANTRONICS=m +CONFIG_HID_PRIMAX=m +# CONFIG_HID_RETRODE is not set +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=m +CONFIG_HID_SONY=m +CONFIG_SONY_FF=y +CONFIG_HID_SPEEDLINK=m +# CONFIG_HID_STEAM is not set +CONFIG_HID_STEELSERIES=m +CONFIG_HID_SUNPLUS=m +CONFIG_HID_RMI=m +CONFIG_HID_GREENASIA=m +# CONFIG_GREENASIA_FF is not set +CONFIG_HID_SMARTJOYPLUS=m +# CONFIG_SMARTJOYPLUS_FF is not set +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +# CONFIG_THRUSTMASTER_FF is not set +# CONFIG_HID_UDRAW_PS3 is not set +CONFIG_HID_WACOM=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +# CONFIG_ZEROPLUS_FF is not set +CONFIG_HID_ZYDACRON=m +CONFIG_HID_SENSOR_HUB=m +# CONFIG_HID_SENSOR_CUSTOM_SENSOR is not set +# CONFIG_HID_ALPS is not set + +# +# USB HID support +# +CONFIG_USB_HID=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y + +# +# I2C HID support +# +CONFIG_I2C_HID=m +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +CONFIG_USB_LEDS_TRIGGER_USBPORT=m +CONFIG_USB_MON=y +CONFIG_USB_WUSB=m +CONFIG_USB_WUSB_CBAF=m +# CONFIG_USB_WUSB_CBAF_DEBUG is not set + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=y +# CONFIG_USB_XHCI_DBGCAP is not set +CONFIG_USB_XHCI_PCI=y +CONFIG_USB_XHCI_PLATFORM=m +# CONFIG_USB_XHCI_HISTB is not set +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_FOTG210_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y +# CONFIG_USB_OHCI_HCD_PLATFORM is not set +CONFIG_USB_UHCI_HCD=y +# CONFIG_USB_U132_HCD is not set +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_WHCI_HCD is not set +CONFIG_USB_HWA_HCD=m +# CONFIG_USB_HCD_BCMA is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m +CONFIG_USB_WDM=m +CONFIG_USB_TMC=m + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_DEBUG is not set +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_REALTEK_AUTOPM=y +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_UAS=m + +# +# USB Imaging devices +# +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m +# CONFIG_USBIP_CORE is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +# CONFIG_USB_DWC2 is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +CONFIG_USB_SERIAL=m +CONFIG_USB_SERIAL_GENERIC=y +CONFIG_USB_SERIAL_SIMPLE=m +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +# CONFIG_USB_SERIAL_F81232 is not set +# CONFIG_USB_SERIAL_F8153X is not set +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +# CONFIG_USB_SERIAL_METRO is not set +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7840=m +# CONFIG_USB_SERIAL_MXUPORT is not set +CONFIG_USB_SERIAL_NAVMAN=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SAFE_PADDED=y +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_XIRCOM=m +CONFIG_USB_SERIAL_WWAN=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +# CONFIG_USB_SERIAL_WISHBONE is not set +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m +# CONFIG_USB_SERIAL_UPD78F0730 is not set +CONFIG_USB_SERIAL_DEBUG=m + +# +# USB Miscellaneous drivers +# +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +CONFIG_USB_IDMOUSE=m +CONFIG_USB_FTDI_ELAN=m +CONFIG_USB_APPLEDISPLAY=m +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_SISUSBVGA_CON=y +CONFIG_USB_LD=m +# CONFIG_USB_TRANCEVIBRATOR is not set +CONFIG_USB_IOWARRIOR=m +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +CONFIG_USB_ISIGHTFW=m +# CONFIG_USB_YUREX is not set +CONFIG_USB_EZUSB_FX2=m +# CONFIG_USB_HUB_USB251XB is not set +CONFIG_USB_HSIC_USB3503=m +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +CONFIG_USB_CHAOSKEY=m +CONFIG_USB_ATM=m +# CONFIG_USB_SPEEDTOUCH is not set +CONFIG_USB_CXACRU=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_XUSBATM=m + +# +# USB Physical Layer drivers +# +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ISP1301 is not set +# CONFIG_USB_ULPI is not set +# CONFIG_USB_GADGET is not set +CONFIG_TYPEC=y +# CONFIG_TYPEC_TCPM is not set +CONFIG_TYPEC_UCSI=y +CONFIG_UCSI_ACPI=y +# CONFIG_TYPEC_TPS6598X is not set + +# +# USB Type-C Multiplexer/DeMultiplexer Switch support +# +# CONFIG_TYPEC_MUX_PI3USB30532 is not set + +# +# USB Type-C Alternate Mode drivers +# +# CONFIG_TYPEC_DP_ALTMODE is not set +# CONFIG_USB_ROLE_SWITCH is not set +CONFIG_USB_LED_TRIG=y +CONFIG_USB_ULPI_BUS=m +CONFIG_UWB=m +CONFIG_UWB_HWA=m +CONFIG_UWB_WHCI=m +CONFIG_UWB_I1480U=m +CONFIG_MMC=m +CONFIG_PWRSEQ_EMMC=m +CONFIG_PWRSEQ_SIMPLE=m +CONFIG_MMC_BLOCK=m +CONFIG_MMC_BLOCK_MINORS=8 +CONFIG_SDIO_UART=m +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_ARMMMCI=m +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_RICOH_MMC=y +CONFIG_MMC_SDHCI_ACPI=m +CONFIG_MMC_SDHCI_PLTFM=m +# CONFIG_MMC_SDHCI_OF_ARASAN is not set +# CONFIG_MMC_SDHCI_OF_AT91 is not set +# CONFIG_MMC_SDHCI_OF_DWCMSHC is not set +CONFIG_MMC_SDHCI_CADENCE=m +# CONFIG_MMC_SDHCI_F_SDH30 is not set +# CONFIG_MMC_SDHCI_MSM is not set +CONFIG_MMC_TIFM_SD=m +CONFIG_MMC_SPI=m +CONFIG_MMC_CB710=m +CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_DW=m +CONFIG_MMC_DW_PLTFM=m +CONFIG_MMC_DW_BLUEFIELD=m +# CONFIG_MMC_DW_EXYNOS is not set +# CONFIG_MMC_DW_HI3798CV200 is not set +# CONFIG_MMC_DW_K3 is not set +# CONFIG_MMC_DW_PCI is not set +CONFIG_MMC_VUB300=m +CONFIG_MMC_USHC=m +# CONFIG_MMC_USDHI6ROL0 is not set +CONFIG_MMC_CQHCI=m +CONFIG_MMC_TOSHIBA_PCI=m +CONFIG_MMC_MTK=m +CONFIG_MMC_SDHCI_XENON=m +# CONFIG_MMC_SDHCI_OMAP is not set +CONFIG_MMC_PHYTIUM_SDCI=m +CONFIG_MMC_PHYTIUM_MCI_PCI=m +CONFIG_MMC_PHYTIUM_MCI_PLTFM=m +CONFIG_MEMSTICK=m +# CONFIG_MEMSTICK_DEBUG is not set + +# +# MemoryStick drivers +# +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set +CONFIG_MSPRO_BLOCK=m +# CONFIG_MS_BLOCK is not set + +# +# MemoryStick Host Controller Drivers +# +CONFIG_MEMSTICK_TIFM_MS=m +CONFIG_MEMSTICK_JMICRON_38X=m +CONFIG_MEMSTICK_R592=m +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_CLASS_FLASH=m +# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set + +# +# LED drivers +# +# CONFIG_LEDS_AAT1290 is not set +# CONFIG_LEDS_AS3645A is not set +# CONFIG_LEDS_BCM6328 is not set +# CONFIG_LEDS_BCM6358 is not set +# CONFIG_LEDS_CR0014114 is not set +CONFIG_LEDS_LM3530=m +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_LM3692X is not set +# CONFIG_LEDS_LM3601X is not set +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_GPIO is not set +CONFIG_LEDS_LP3944=m +# CONFIG_LEDS_LP3952 is not set +# CONFIG_LEDS_LP5521 is not set +# CONFIG_LEDS_LP5523 is not set +# CONFIG_LEDS_LP5562 is not set +# CONFIG_LEDS_LP8501 is not set +# CONFIG_LEDS_LP8860 is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_PWM is not set +# CONFIG_LEDS_BD2802 is not set +CONFIG_LEDS_LT3593=m +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_TLC591XX is not set +# CONFIG_LEDS_LM355x is not set +# CONFIG_LEDS_KTD2692 is not set +# CONFIG_LEDS_IS31FL319X is not set +# CONFIG_LEDS_IS31FL32XX is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# +CONFIG_LEDS_BLINKM=m +# CONFIG_LEDS_SYSCON is not set +# CONFIG_LEDS_MLXREG is not set +# CONFIG_LEDS_USER is not set + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_ONESHOT=m +# CONFIG_LEDS_TRIGGER_DISK is not set +# CONFIG_LEDS_TRIGGER_MTD is not set +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +# CONFIG_LEDS_TRIGGER_CPU is not set +# CONFIG_LEDS_TRIGGER_ACTIVITY is not set +CONFIG_LEDS_TRIGGER_GPIO=m +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m + +# +# iptables trigger is under Netfilter config (LED target) +# +CONFIG_LEDS_TRIGGER_TRANSIENT=m +CONFIG_LEDS_TRIGGER_CAMERA=m +# CONFIG_LEDS_TRIGGER_PANIC is not set +# CONFIG_LEDS_TRIGGER_NETDEV is not set +# CONFIG_ACCESSIBILITY is not set +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +# CONFIG_INFINIBAND_EXP_LEGACY_VERBS_NEW_UAPI is not set +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +# CONFIG_INFINIBAND_MTHCA is not set +# CONFIG_INFINIBAND_QIB is not set +CONFIG_INFINIBAND_CXGB4=m +CONFIG_INFINIBAND_I40IW=m +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +# CONFIG_INFINIBAND_NES is not set +# CONFIG_INFINIBAND_OCRDMA is not set +CONFIG_INFINIBAND_HNS=m +CONFIG_INFINIBAND_HNS_HIP06=m +CONFIG_INFINIBAND_HNS_HIP08=m +# CONFIG_INFINIBAND_HNS_DFX is not set +# CONFIG_INFINIBAND_HNS_TEST is not set +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +# CONFIG_INFINIBAND_IPOIB_DEBUG is not set +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +CONFIG_INFINIBAND_RDMAVT=m +CONFIG_RDMA_RXE=m +CONFIG_INFINIBAND_QEDR=m +CONFIG_INFINIBAND_BNXT_RE=m +CONFIG_EDAC_SUPPORT=y +CONFIG_EDAC=y +CONFIG_EDAC_LEGACY_SYSFS=y +# CONFIG_EDAC_DEBUG is not set +CONFIG_EDAC_GHES=y +CONFIG_EDAC_THUNDERX=m +CONFIG_EDAC_XGENE=m +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_SYSTOHC is not set +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +CONFIG_RTC_DRV_ABB5ZES3=m +CONFIG_RTC_DRV_ABX80X=m +CONFIG_RTC_DRV_DS1307=m +# CONFIG_RTC_DRV_DS1307_CENTURY is not set +CONFIG_RTC_DRV_DS1374=m +CONFIG_RTC_DRV_DS1374_WDT=y +CONFIG_RTC_DRV_DS1672=m +# CONFIG_RTC_DRV_HYM8563 is not set +CONFIG_RTC_DRV_MAX6900=m +CONFIG_RTC_DRV_RS5C372=m +CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_ISL12022=m +# CONFIG_RTC_DRV_ISL12026 is not set +CONFIG_RTC_DRV_X1205=m +CONFIG_RTC_DRV_PCF8523=m +CONFIG_RTC_DRV_PCF85063=m +# CONFIG_RTC_DRV_PCF85363 is not set +CONFIG_RTC_DRV_PCF8563=m +CONFIG_RTC_DRV_PCF8583=m +CONFIG_RTC_DRV_M41T80=m +CONFIG_RTC_DRV_M41T80_WDT=y +CONFIG_RTC_DRV_BQ32K=m +# CONFIG_RTC_DRV_S35390A is not set +CONFIG_RTC_DRV_FM3130=m +CONFIG_RTC_DRV_RX8010=m +CONFIG_RTC_DRV_RX8581=m +CONFIG_RTC_DRV_RX8025=m +CONFIG_RTC_DRV_EM3027=m +CONFIG_RTC_DRV_RV8803=m + +# +# SPI RTC drivers +# +CONFIG_RTC_DRV_M41T93=m +CONFIG_RTC_DRV_M41T94=m +# CONFIG_RTC_DRV_DS1302 is not set +CONFIG_RTC_DRV_DS1305=m +CONFIG_RTC_DRV_DS1343=m +CONFIG_RTC_DRV_DS1347=m +CONFIG_RTC_DRV_DS1390=m +# CONFIG_RTC_DRV_MAX6916 is not set +CONFIG_RTC_DRV_R9701=m +CONFIG_RTC_DRV_RX4581=m +# CONFIG_RTC_DRV_RX6110 is not set +CONFIG_RTC_DRV_RS5C348=m +CONFIG_RTC_DRV_MAX6902=m +CONFIG_RTC_DRV_PCF2123=m +CONFIG_RTC_DRV_MCP795=m +CONFIG_RTC_I2C_AND_SPI=y + +# +# SPI and I2C RTC drivers +# +CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_DS3232_HWMON=y +CONFIG_RTC_DRV_PCF2127=m +CONFIG_RTC_DRV_RV3029C2=m +# CONFIG_RTC_DRV_RV3029_HWMON is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_DS1286=m +CONFIG_RTC_DRV_DS1511=m +CONFIG_RTC_DRV_DS1553=m +CONFIG_RTC_DRV_DS1685_FAMILY=m +CONFIG_RTC_DRV_DS1685=y +# CONFIG_RTC_DRV_DS1689 is not set +# CONFIG_RTC_DRV_DS17285 is not set +# CONFIG_RTC_DRV_DS17485 is not set +# CONFIG_RTC_DRV_DS17885 is not set +# CONFIG_RTC_DS1685_PROC_REGS is not set +CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_DS2404=m +CONFIG_RTC_DRV_EFI=y +CONFIG_RTC_DRV_STK17TA8=m +# CONFIG_RTC_DRV_M48T86 is not set +CONFIG_RTC_DRV_M48T35=m +CONFIG_RTC_DRV_M48T59=m +CONFIG_RTC_DRV_MSM6242=m +CONFIG_RTC_DRV_BQ4802=m +CONFIG_RTC_DRV_RP5C01=m +CONFIG_RTC_DRV_V3020=m +# CONFIG_RTC_DRV_ZYNQMP is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_PL030 is not set +CONFIG_RTC_DRV_PL031=y +# CONFIG_RTC_DRV_FTRTC010 is not set +# CONFIG_RTC_DRV_SNVS is not set +# CONFIG_RTC_DRV_XGENE is not set +# CONFIG_RTC_DRV_R7301 is not set +CONFIG_RTC_DRV_PHYTIUM=m + +# +# HID Sensor RTC drivers +# +# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_ACPI=y +CONFIG_DMA_OF=y +# CONFIG_ALTERA_MSGDMA is not set +# CONFIG_AMBA_PL08X is not set +# CONFIG_BCM_SBA_RAID is not set +# CONFIG_DW_AXI_DMAC is not set +# CONFIG_FSL_EDMA is not set +# CONFIG_INTEL_IDMA64 is not set +# CONFIG_K3_DMA is not set +# CONFIG_MV_XOR_V2 is not set +# CONFIG_PL330_DMA is not set +# CONFIG_XGENE_DMA is not set +# CONFIG_XILINX_DMA is not set +# CONFIG_XILINX_ZYNQMP_DMA is not set +# CONFIG_QCOM_BAM_DMA is not set +CONFIG_QCOM_HIDMA_MGMT=m +CONFIG_QCOM_HIDMA=m +CONFIG_DW_DMAC_CORE=m +CONFIG_DW_DMAC=m +CONFIG_DW_DMAC_PCI=m + +# +# DMA Clients +# +CONFIG_ASYNC_TX_DMA=y +# CONFIG_DMATEST is not set + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +CONFIG_AUXDISPLAY=y +# CONFIG_HD44780 is not set +# CONFIG_IMG_ASCII_LCD is not set +# CONFIG_HT16K33 is not set +CONFIG_UIO=m +CONFIG_UIO_CIF=m +CONFIG_UIO_PDRV_GENIRQ=m +# CONFIG_UIO_DMEM_GENIRQ is not set +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=m +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +CONFIG_VFIO_IOMMU_TYPE1=m +CONFIG_VFIO_VIRQFD=m +CONFIG_VFIO=m +CONFIG_VFIO_NOIOMMU=y +CONFIG_VFIO_PCI=m +CONFIG_VFIO_PCI_MMAP=y +CONFIG_VFIO_PCI_INTX=y +CONFIG_VFIO_PLATFORM=m +# CONFIG_VFIO_AMBA is not set +# CONFIG_VFIO_PLATFORM_CALXEDAXGMAC_RESET is not set +# CONFIG_VFIO_PLATFORM_AMDXGBE_RESET is not set +CONFIG_VFIO_MDEV=m +CONFIG_VFIO_MDEV_DEVICE=m +# CONFIG_VIRT_DRIVERS is not set +CONFIG_VIRTIO=m +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=m +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=m +# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set + +# +# Microsoft Hyper-V guest support +# +CONFIG_STAGING=y +# CONFIG_PRISM2_USB is not set +# CONFIG_COMEDI is not set +# CONFIG_RTL8192U is not set +# CONFIG_RTLLIB is not set +# CONFIG_RTL8723BS is not set +# CONFIG_R8712U is not set +# CONFIG_R8188EU is not set +# CONFIG_R8822BE is not set +# CONFIG_RTS5208 is not set +# CONFIG_VT6655 is not set +# CONFIG_VT6656 is not set + +# +# IIO staging drivers +# + +# +# Accelerometers +# +# CONFIG_ADIS16203 is not set +# CONFIG_ADIS16240 is not set + +# +# Analog to digital converters +# +# CONFIG_AD7606 is not set +# CONFIG_AD7780 is not set +# CONFIG_AD7816 is not set +# CONFIG_AD7192 is not set +# CONFIG_AD7280 is not set + +# +# Analog digital bi-direction converters +# +# CONFIG_ADT7316 is not set + +# +# Capacitance to digital converters +# +# CONFIG_AD7150 is not set +# CONFIG_AD7152 is not set +# CONFIG_AD7746 is not set + +# +# Direct Digital Synthesis +# +# CONFIG_AD9832 is not set +# CONFIG_AD9834 is not set + +# +# Network Analyzer, Impedance Converters +# +# CONFIG_AD5933 is not set + +# +# Active energy metering IC +# +# CONFIG_ADE7854 is not set + +# +# Resolver to digital converters +# +# CONFIG_AD2S90 is not set +# CONFIG_AD2S1210 is not set +# CONFIG_FB_SM750 is not set +# CONFIG_FB_XGI is not set + +# +# Speakup console speech +# +# CONFIG_SPEAKUP is not set +# CONFIG_STAGING_MEDIA is not set + +# +# Android +# +# CONFIG_STAGING_BOARD is not set +# CONFIG_LTE_GDM724X is not set +# CONFIG_DGNC is not set +# CONFIG_GS_FPGABOOT is not set +# CONFIG_UNISYSSPAR is not set +# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set +# CONFIG_FB_TFT is not set +# CONFIG_WILC1000_SDIO is not set +# CONFIG_WILC1000_SPI is not set +# CONFIG_MOST is not set +# CONFIG_KS7010 is not set +# CONFIG_GREYBUS is not set +# CONFIG_PI433 is not set +# CONFIG_MTK_MMC is not set + +# +# Gasket devices +# +# CONFIG_STAGING_GASKET_FRAMEWORK is not set +# CONFIG_XIL_AXIS_FIFO is not set +# CONFIG_EROFS_FS is not set + +# +# GMJS TCM support +# +CONFIG_GMJS_TCM=y +CONFIG_GMJS_TCM_CORE=m +CONFIG_GMJS_TCM_SPI=m +# CONFIG_GOLDFISH is not set +CONFIG_CHROME_PLATFORMS=y +# CONFIG_CHROMEOS_TBMC is not set +# CONFIG_CROS_KBD_LED_BACKLIGHT is not set +CONFIG_CLKDEV_LOOKUP=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y + +# +# Common Clock Framework +# +CONFIG_COMMON_CLK_VERSATILE=y +CONFIG_CLK_SP810=y +CONFIG_CLK_VEXPRESS_OSC=y +# CONFIG_CLK_HSDK is not set +# CONFIG_COMMON_CLK_MAX9485 is not set +CONFIG_COMMON_CLK_SCPI=m +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI514 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_SI570 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CDCE925 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_CLK_QORIQ is not set +CONFIG_COMMON_CLK_XGENE=y +# CONFIG_COMMON_CLK_PWM is not set +# CONFIG_COMMON_CLK_VC5 is not set +CONFIG_COMMON_CLK_HI3516CV300=y +CONFIG_COMMON_CLK_HI3519=y +CONFIG_COMMON_CLK_HI3660=y +CONFIG_COMMON_CLK_HI3798CV200=y +# CONFIG_COMMON_CLK_HI6220 is not set +CONFIG_RESET_HISI=y +CONFIG_STUB_CLK_HI3660=y +# CONFIG_COMMON_CLK_QCOM is not set +CONFIG_HWSPINLOCK=y +# CONFIG_HWSPINLOCK_QCOM is not set + +# +# Clock Source drivers +# +CONFIG_TIMER_OF=y +CONFIG_TIMER_ACPI=y +CONFIG_TIMER_PROBE=y +CONFIG_CLKSRC_MMIO=y +CONFIG_ARM_ARCH_TIMER=y +CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y +CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND=y +CONFIG_FSL_ERRATUM_A008585=y +CONFIG_HISILICON_ERRATUM_161010101=y +CONFIG_ARM64_ERRATUM_858921=y +CONFIG_ARM_TIMER_SP804=y +CONFIG_MAILBOX=y +CONFIG_ARM_MHU=m +CONFIG_PHYTIUM_MBOX=m +# CONFIG_PLATFORM_MHU is not set +# CONFIG_PL320_MBOX is not set +CONFIG_PCC=y +# CONFIG_ALTERA_MBOX is not set +CONFIG_HI3660_MBOX=y +CONFIG_HI6220_MBOX=y +# CONFIG_MAILBOX_TEST is not set +# CONFIG_QCOM_APCS_IPC is not set +CONFIG_XGENE_SLIMPRO_MBOX=m +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +CONFIG_IOMMU_IO_PGTABLE=y +CONFIG_IOMMU_IO_PGTABLE_LPAE=y +# CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST is not set +# CONFIG_IOMMU_IO_PGTABLE_ARMV7S is not set + +# +# Generic PASID table support +# +CONFIG_IOMMU_PASID_TABLE=y +CONFIG_ARM_SMMU_V3_CONTEXT=y +# CONFIG_IOMMU_DEBUGFS is not set +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set +CONFIG_IOMMU_IOVA=y +CONFIG_OF_IOMMU=y +CONFIG_IOMMU_DMA=y +CONFIG_IOMMU_SVA=y +CONFIG_IOMMU_PAGE_FAULT=y +CONFIG_ARM_SMMU=y +CONFIG_ARM_SMMU_V3=y +# CONFIG_QCOM_IOMMU is not set +CONFIG_SMMU_BYPASS_DEV=y + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_QCOM_GLINK_RPM is not set +# CONFIG_RPMSG_VIRTIO is not set +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# + +# +# Broadcom SoC drivers +# +# CONFIG_SOC_BRCMSTB is not set + +# +# NXP/Freescale QorIQ SoC drivers +# + +# +# i.MX SoC drivers +# + +# +# Qualcomm SoC drivers +# +# CONFIG_QCOM_COMMAND_DB is not set +# CONFIG_QCOM_GENI_SE is not set +# CONFIG_QCOM_GSBI is not set +# CONFIG_QCOM_LLCC is not set +# CONFIG_QCOM_RMTFS_MEM is not set +# CONFIG_QCOM_RPMH is not set +# CONFIG_QCOM_SMEM is not set +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# CONFIG_XILINX_VCU is not set +# CONFIG_PM_DEVFREQ is not set +CONFIG_EXTCON=y + +# +# Extcon Device Drivers +# +# CONFIG_EXTCON_ADC_JACK is not set +CONFIG_EXTCON_GPIO=m +# CONFIG_EXTCON_MAX3355 is not set +# CONFIG_EXTCON_QCOM_SPMI_MISC is not set +# CONFIG_EXTCON_RT8973A is not set +# CONFIG_EXTCON_SM5502 is not set +# CONFIG_EXTCON_USB_GPIO is not set +# CONFIG_MEMORY is not set +CONFIG_IIO=m +CONFIG_IIO_BUFFER=y +# CONFIG_IIO_BUFFER_CB is not set +# CONFIG_IIO_BUFFER_HW_CONSUMER is not set +CONFIG_IIO_KFIFO_BUF=m +CONFIG_IIO_TRIGGERED_BUFFER=m +# CONFIG_IIO_CONFIGFS is not set +CONFIG_IIO_TRIGGER=y +CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 +# CONFIG_IIO_SW_DEVICE is not set +# CONFIG_IIO_SW_TRIGGER is not set + +# +# Accelerometers +# +# CONFIG_ADIS16201 is not set +# CONFIG_ADIS16209 is not set +# CONFIG_ADXL345_I2C is not set +# CONFIG_ADXL345_SPI is not set +# CONFIG_BMA180 is not set +# CONFIG_BMA220 is not set +# CONFIG_BMC150_ACCEL is not set +# CONFIG_DA280 is not set +# CONFIG_DA311 is not set +# CONFIG_DMARD06 is not set +# CONFIG_DMARD09 is not set +# CONFIG_DMARD10 is not set +# CONFIG_HID_SENSOR_ACCEL_3D is not set +# CONFIG_IIO_CROS_EC_ACCEL_LEGACY is not set +# CONFIG_IIO_ST_ACCEL_3AXIS is not set +# CONFIG_KXSD9 is not set +# CONFIG_KXCJK1013 is not set +# CONFIG_MC3230 is not set +# CONFIG_MMA7455_I2C is not set +# CONFIG_MMA7455_SPI is not set +# CONFIG_MMA7660 is not set +# CONFIG_MMA8452 is not set +# CONFIG_MMA9551 is not set +# CONFIG_MMA9553 is not set +# CONFIG_MXC4005 is not set +# CONFIG_MXC6255 is not set +# CONFIG_SCA3000 is not set +# CONFIG_STK8312 is not set +# CONFIG_STK8BA50 is not set + +# +# Analog to digital converters +# +# CONFIG_AD7266 is not set +# CONFIG_AD7291 is not set +# CONFIG_AD7298 is not set +# CONFIG_AD7476 is not set +# CONFIG_AD7766 is not set +# CONFIG_AD7791 is not set +# CONFIG_AD7793 is not set +# CONFIG_AD7887 is not set +# CONFIG_AD7923 is not set +# CONFIG_AD799X is not set +# CONFIG_ENVELOPE_DETECTOR is not set +# CONFIG_HI8435 is not set +# CONFIG_HX711 is not set +# CONFIG_INA2XX_ADC is not set +# CONFIG_LTC2471 is not set +# CONFIG_LTC2485 is not set +# CONFIG_LTC2497 is not set +# CONFIG_MAX1027 is not set +# CONFIG_MAX11100 is not set +# CONFIG_MAX1118 is not set +# CONFIG_MAX1363 is not set +# CONFIG_MAX9611 is not set +# CONFIG_MCP320X is not set +# CONFIG_MCP3422 is not set +# CONFIG_NAU7802 is not set +# CONFIG_SD_ADC_MODULATOR is not set +# CONFIG_TI_ADC081C is not set +# CONFIG_TI_ADC0832 is not set +# CONFIG_TI_ADC084S021 is not set +# CONFIG_TI_ADC12138 is not set +# CONFIG_TI_ADC108S102 is not set +# CONFIG_TI_ADC128S052 is not set +# CONFIG_TI_ADC161S626 is not set +# CONFIG_TI_ADS1015 is not set +# CONFIG_TI_ADS7950 is not set +# CONFIG_TI_ADS8688 is not set +# CONFIG_TI_TLC4541 is not set +# CONFIG_VF610_ADC is not set +CONFIG_PHYTIUM_ADC=m + +# +# Analog Front Ends +# +# CONFIG_IIO_RESCALE is not set + +# +# Amplifiers +# +# CONFIG_AD8366 is not set + +# +# Chemical Sensors +# +# CONFIG_ATLAS_PH_SENSOR is not set +# CONFIG_BME680 is not set +# CONFIG_CCS811 is not set +# CONFIG_IAQCORE is not set +# CONFIG_VZ89X is not set + +# +# Hid Sensor IIO Common +# +# CONFIG_HID_SENSOR_IIO_COMMON is not set + +# +# SSP Sensor Common +# +# CONFIG_IIO_SSP_SENSORHUB is not set + +# +# Counters +# + +# +# Digital to analog converters +# +# CONFIG_AD5064 is not set +# CONFIG_AD5360 is not set +# CONFIG_AD5380 is not set +# CONFIG_AD5421 is not set +# CONFIG_AD5446 is not set +# CONFIG_AD5449 is not set +# CONFIG_AD5592R is not set +# CONFIG_AD5593R is not set +# CONFIG_AD5504 is not set +# CONFIG_AD5624R_SPI is not set +# CONFIG_LTC2632 is not set +# CONFIG_AD5686_SPI is not set +# CONFIG_AD5696_I2C is not set +# CONFIG_AD5755 is not set +# CONFIG_AD5758 is not set +# CONFIG_AD5761 is not set +# CONFIG_AD5764 is not set +# CONFIG_AD5791 is not set +# CONFIG_AD7303 is not set +# CONFIG_AD8801 is not set +# CONFIG_DPOT_DAC is not set +# CONFIG_DS4424 is not set +# CONFIG_M62332 is not set +# CONFIG_MAX517 is not set +# CONFIG_MAX5821 is not set +# CONFIG_MCP4725 is not set +# CONFIG_MCP4922 is not set +# CONFIG_TI_DAC082S085 is not set +# CONFIG_TI_DAC5571 is not set +# CONFIG_VF610_DAC is not set + +# +# IIO dummy driver +# + +# +# Frequency Synthesizers DDS/PLL +# + +# +# Clock Generator/Distribution +# +# CONFIG_AD9523 is not set + +# +# Phase-Locked Loop (PLL) frequency synthesizers +# +# CONFIG_ADF4350 is not set + +# +# Digital gyroscope sensors +# +# CONFIG_ADIS16080 is not set +# CONFIG_ADIS16130 is not set +# CONFIG_ADIS16136 is not set +# CONFIG_ADIS16260 is not set +# CONFIG_ADXRS450 is not set +# CONFIG_BMG160 is not set +# CONFIG_HID_SENSOR_GYRO_3D is not set +# CONFIG_MPU3050_I2C is not set +# CONFIG_IIO_ST_GYRO_3AXIS is not set +# CONFIG_ITG3200 is not set + +# +# Health Sensors +# + +# +# Heart Rate Monitors +# +# CONFIG_AFE4403 is not set +# CONFIG_AFE4404 is not set +# CONFIG_MAX30100 is not set +# CONFIG_MAX30102 is not set + +# +# Humidity sensors +# +# CONFIG_AM2315 is not set +# CONFIG_DHT11 is not set +# CONFIG_HDC100X is not set +# CONFIG_HID_SENSOR_HUMIDITY is not set +# CONFIG_HTS221 is not set +# CONFIG_HTU21 is not set +# CONFIG_SI7005 is not set +# CONFIG_SI7020 is not set + +# +# Inertial measurement units +# +# CONFIG_ADIS16400 is not set +# CONFIG_ADIS16480 is not set +# CONFIG_BMI160_I2C is not set +# CONFIG_BMI160_SPI is not set +# CONFIG_KMX61 is not set +# CONFIG_INV_MPU6050_I2C is not set +# CONFIG_INV_MPU6050_SPI is not set +# CONFIG_IIO_ST_LSM6DSX is not set + +# +# Light sensors +# +# CONFIG_ACPI_ALS is not set +# CONFIG_ADJD_S311 is not set +# CONFIG_AL3320A is not set +# CONFIG_APDS9300 is not set +# CONFIG_APDS9960 is not set +# CONFIG_BH1750 is not set +# CONFIG_BH1780 is not set +# CONFIG_CM32181 is not set +# CONFIG_CM3232 is not set +# CONFIG_CM3323 is not set +# CONFIG_CM3605 is not set +# CONFIG_CM36651 is not set +# CONFIG_GP2AP020A00F is not set +# CONFIG_SENSORS_ISL29018 is not set +# CONFIG_SENSORS_ISL29028 is not set +# CONFIG_ISL29125 is not set +# CONFIG_HID_SENSOR_ALS is not set +# CONFIG_HID_SENSOR_PROX is not set +# CONFIG_JSA1212 is not set +# CONFIG_RPR0521 is not set +# CONFIG_LTR501 is not set +# CONFIG_LV0104CS is not set +# CONFIG_MAX44000 is not set +# CONFIG_OPT3001 is not set +# CONFIG_PA12203001 is not set +# CONFIG_SI1133 is not set +# CONFIG_SI1145 is not set +# CONFIG_STK3310 is not set +# CONFIG_ST_UVIS25 is not set +# CONFIG_TCS3414 is not set +# CONFIG_TCS3472 is not set +# CONFIG_SENSORS_TSL2563 is not set +# CONFIG_TSL2583 is not set +# CONFIG_TSL2772 is not set +# CONFIG_TSL4531 is not set +# CONFIG_US5182D is not set +# CONFIG_VCNL4000 is not set +# CONFIG_VEML6070 is not set +# CONFIG_VL6180 is not set +# CONFIG_ZOPT2201 is not set + +# +# Magnetometer sensors +# +# CONFIG_AK8974 is not set +# CONFIG_AK8975 is not set +# CONFIG_AK09911 is not set +# CONFIG_BMC150_MAGN_I2C is not set +# CONFIG_BMC150_MAGN_SPI is not set +# CONFIG_MAG3110 is not set +# CONFIG_HID_SENSOR_MAGNETOMETER_3D is not set +# CONFIG_MMC35240 is not set +# CONFIG_IIO_ST_MAGN_3AXIS is not set +# CONFIG_SENSORS_HMC5843_I2C is not set +# CONFIG_SENSORS_HMC5843_SPI is not set + +# +# Multiplexers +# +# CONFIG_IIO_MUX is not set + +# +# Inclinometer sensors +# +# CONFIG_HID_SENSOR_INCLINOMETER_3D is not set +# CONFIG_HID_SENSOR_DEVICE_ROTATION is not set + +# +# Triggers - standalone +# +# CONFIG_IIO_INTERRUPT_TRIGGER is not set +# CONFIG_IIO_SYSFS_TRIGGER is not set + +# +# Digital potentiometers +# +# CONFIG_AD5272 is not set +# CONFIG_DS1803 is not set +# CONFIG_MAX5481 is not set +# CONFIG_MAX5487 is not set +# CONFIG_MCP4018 is not set +# CONFIG_MCP4131 is not set +# CONFIG_MCP4531 is not set +# CONFIG_TPL0102 is not set + +# +# Digital potentiostats +# +# CONFIG_LMP91000 is not set + +# +# Pressure sensors +# +# CONFIG_ABP060MG is not set +# CONFIG_BMP280 is not set +# CONFIG_HID_SENSOR_PRESS is not set +# CONFIG_HP03 is not set +# CONFIG_MPL115_I2C is not set +# CONFIG_MPL115_SPI is not set +# CONFIG_MPL3115 is not set +# CONFIG_MS5611 is not set +# CONFIG_MS5637 is not set +# CONFIG_IIO_ST_PRESS is not set +# CONFIG_T5403 is not set +# CONFIG_HP206C is not set +# CONFIG_ZPA2326 is not set + +# +# Lightning sensors +# +# CONFIG_AS3935 is not set + +# +# Proximity and distance sensors +# +# CONFIG_ISL29501 is not set +# CONFIG_LIDAR_LITE_V2 is not set +# CONFIG_RFD77402 is not set +# CONFIG_SRF04 is not set +# CONFIG_SX9500 is not set +# CONFIG_SRF08 is not set + +# +# Resolver to digital converters +# +# CONFIG_AD2S1200 is not set + +# +# Temperature sensors +# +# CONFIG_MAXIM_THERMOCOUPLE is not set +# CONFIG_HID_SENSOR_TEMP is not set +# CONFIG_MLX90614 is not set +# CONFIG_MLX90632 is not set +# CONFIG_TMP006 is not set +# CONFIG_TMP007 is not set +# CONFIG_TSYS01 is not set +# CONFIG_TSYS02D is not set +# CONFIG_NTB is not set +# CONFIG_VME_BUS is not set +CONFIG_PWM=y +CONFIG_PWM_SYSFS=y +# CONFIG_PWM_FSL_FTM is not set +# CONFIG_PWM_HIBVT is not set +# CONFIG_PWM_PCA9685 is not set + +# +# IRQ chip support +# +CONFIG_IRQCHIP=y +CONFIG_ARM_GIC=y +CONFIG_ARM_GIC_MAX_NR=1 +CONFIG_ARM_GIC_V2M=y +CONFIG_ARM_GIC_V3=y +CONFIG_ARM_GIC_V3_ITS=y +CONFIG_ARM_GIC_V3_ITS_PCI=y +CONFIG_ARM_GIC_PHYTIUM_2500=y +CONFIG_HISILICON_IRQ_MBIGEN=y +CONFIG_PARTITION_PERCPU=y +CONFIG_QCOM_IRQ_COMBINER=y +# CONFIG_QCOM_PDC is not set +# CONFIG_IPACK_BUS is not set +CONFIG_RESET_CONTROLLER=y +# CONFIG_RESET_QCOM_AOSS is not set +# CONFIG_RESET_TI_SYSCON is not set +CONFIG_COMMON_RESET_HI3660=y +CONFIG_COMMON_RESET_HI6220=y +CONFIG_FMC=m +CONFIG_FMC_FAKEDEV=m +CONFIG_FMC_TRIVIAL=m +CONFIG_FMC_WRITE_EEPROM=m +CONFIG_FMC_CHARDEV=m + +# +# PHY Subsystem +# +CONFIG_GENERIC_PHY=y +CONFIG_PHY_XGENE=y +# CONFIG_BCM_KONA_USB2_PHY is not set +CONFIG_PHY_HI6220_USB=m +# CONFIG_PHY_HISTB_COMBPHY is not set +# CONFIG_PHY_HISI_INNO_USB2 is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_CPCAP_USB is not set +# CONFIG_PHY_MAPPHONE_MDM6600 is not set +# CONFIG_PHY_QCOM_APQ8064_SATA is not set +# CONFIG_PHY_QCOM_IPQ806X_SATA is not set +# CONFIG_PHY_QCOM_QMP is not set +# CONFIG_PHY_QCOM_QUSB2 is not set +# CONFIG_PHY_QCOM_UFS is not set +# CONFIG_PHY_QCOM_USB_HS is not set +# CONFIG_PHY_QCOM_USB_HSIC is not set +# CONFIG_PHY_TUSB1210 is not set +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# CONFIG_ARM_CCI_PMU is not set +CONFIG_ARM_CCN=y +CONFIG_ARM_PMU=y +CONFIG_ARM_PMU_ACPI=y +CONFIG_ARM_SMMU_V3_PMU=y +# CONFIG_ARM_DSU_PMU is not set +CONFIG_QCOM_L2_PMU=y +CONFIG_QCOM_L3_PMU=y +CONFIG_XGENE_PMU=y +CONFIG_ARM_SPE_PMU=y +CONFIG_HISI_PMU=m +CONFIG_RAS=y + +# +# Android +# +# CONFIG_ANDROID is not set +CONFIG_LIBNVDIMM=m +CONFIG_BLK_DEV_PMEM=m +CONFIG_ND_BLK=m +CONFIG_ND_CLAIM=y +CONFIG_ND_BTT=m +CONFIG_BTT=y +CONFIG_OF_PMEM=m +CONFIG_DAX_DRIVER=y +CONFIG_DAX=y +CONFIG_DEV_DAX=m +# CONFIG_DEV_DAX_KMEM is not set +CONFIG_NVMEM=y +# CONFIG_QCOM_QFPROM is not set + +# +# HW tracing support +# +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set +# CONFIG_FPGA is not set +# CONFIG_FSI is not set +CONFIG_TEE=m + +# +# TEE drivers +# +CONFIG_OPTEE=m +CONFIG_OPTEE_SHM_NUM_PRIV_PAGES=1 +CONFIG_OPTEE_DEFAULT_METHOD_NONE=y +# CONFIG_OPTEE_DEFAULT_METHOD_HVC is not set +# CONFIG_OPTEE_DEFAULT_METHOD_SMC is not set +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +CONFIG_FS_IOMAP=y +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=m +CONFIG_EXT4_USE_FOR_EXT2=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_ENCRYPTION is not set +# CONFIG_EXT4_DEBUG is not set +CONFIG_EXT4_PARALLEL_DIO_READ=y +CONFIG_JBD2=m +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=m +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_XFS_FS=m +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set +# CONFIG_XFS_ONLINE_SCRUB is not set +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set +# CONFIG_GFS2_FS is not set +# CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +CONFIG_FS_DAX=y +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FILE_LOCKING=y +CONFIG_MANDATORY_FILE_LOCKING=y +# CONFIG_FS_ENCRYPTION is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_PRINT_QUOTA_WARNING=y +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=y +# CONFIG_QFMT_V1 is not set +CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y +CONFIG_AUTOFS4_FS=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_OVERLAY_FS=m +# CONFIG_OVERLAY_FS_REDIRECT_DIR is not set +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y +# CONFIG_OVERLAY_FS_INDEX is not set +# CONFIG_OVERLAY_FS_XINO_AUTO is not set +# CONFIG_OVERLAY_FS_METACOPY is not set + +# +# Caches +# +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_HISTOGRAM is not set +# CONFIG_FSCACHE_DEBUG is not set +# CONFIG_FSCACHE_OBJECT_LIST is not set +CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_HISTOGRAM is not set + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="ascii" +# CONFIG_FAT_DEFAULT_UTF8 is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y +# CONFIG_PROC_VMCORE_DEVICE_DUMP is not set +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +# CONFIG_TMPFS_INODE64 is not set +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_MEMFD_CREATE=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_FS is not set +# CONFIG_UBIFS_FS is not set +CONFIG_CRAMFS=m +CONFIG_CRAMFS_BLOCKDEV=y +# CONFIG_CRAMFS_MTD is not set +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_FILE_CACHE=y +# CONFIG_SQUASHFS_FILE_DIRECT is not set +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +CONFIG_PSTORE=y +CONFIG_PSTORE_DEFLATE_COMPRESS=y +# CONFIG_PSTORE_LZO_COMPRESS is not set +# CONFIG_PSTORE_LZ4_COMPRESS is not set +# CONFIG_PSTORE_LZ4HC_COMPRESS is not set +# CONFIG_PSTORE_842_COMPRESS is not set +# CONFIG_PSTORE_ZSTD_COMPRESS is not set +CONFIG_PSTORE_COMPRESS=y +CONFIG_PSTORE_DEFLATE_COMPRESS_DEFAULT=y +CONFIG_PSTORE_COMPRESS_DEFAULT="deflate" +# CONFIG_PSTORE_CONSOLE is not set +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +CONFIG_PSTORE_RAM=m +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +CONFIG_NFS_V2=m +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +# CONFIG_NFS_SWAP is not set +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_V4_SECURITY_LABEL=y +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DEBUG=y +CONFIG_NFSD=m +CONFIG_NFSD_V2_ACL=y +CONFIG_NFSD_V3=y +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +# CONFIG_NFSD_BLOCKLAYOUT is not set +# CONFIG_NFSD_SCSILAYOUT is not set +# CONFIG_NFSD_FLEXFILELAYOUT is not set +CONFIG_NFSD_V4_SECURITY_LABEL=y +# CONFIG_NFSD_FAULT_INJECTION is not set +CONFIG_GRACE_PERIOD=m +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=m +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_RPCSEC_GSS_KRB5=m +CONFIG_SUNRPC_DEBUG=y +CONFIG_SUNRPC_XPRT_RDMA=m +CONFIG_CEPH_FS=m +# CONFIG_CEPH_FSCACHE is not set +CONFIG_CEPH_FS_POSIX_ACL=y +CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_ACL=y +CONFIG_CIFS_DEBUG=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set +CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CIFS_SMB_DIRECT is not set +# CONFIG_CIFS_FSCACHE is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_9P_FS=m +CONFIG_9P_FSCACHE=y +CONFIG_9P_FS_POSIX_ACL=y +CONFIG_9P_FS_SECURITY=y +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=m +# CONFIG_DLM is not set +CONFIG_IO_WQ=y +CONFIG_RESCTRL=y + +# +# Security options +# +CONFIG_KEYS=y +CONFIG_KEYS_COMPAT=y +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_BIG_KEYS=y +CONFIG_TRUSTED_KEYS=m +CONFIG_ENCRYPTED_KEYS=m +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +CONFIG_SECURITY_WRITABLE_HOOKS=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_PATH=y +CONFIG_LSM_MMAP_MIN_ADDR=65535 +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y +CONFIG_HARDENED_USERCOPY=y +CONFIG_HARDENED_USERCOPY_FALLBACK=y +# CONFIG_HARDENED_USERCOPY_PAGESPAN is not set +CONFIG_FORTIFY_SOURCE=y +# CONFIG_STATIC_USERMODEHELPER is not set +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1 +CONFIG_SECURITY_SELINUX_DISABLE=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_SECURITY_LOADPIN is not set +CONFIG_SECURITY_YAMA=y +# CONFIG_INTEGRITY is not set +CONFIG_DEFAULT_SECURITY_SELINUX=y +# CONFIG_DEFAULT_SECURITY_DAC is not set +CONFIG_DEFAULT_SECURITY="selinux" +CONFIG_XOR_BLOCKS=m +CONFIG_ASYNC_CORE=m +CONFIG_ASYNC_MEMCPY=m +CONFIG_ASYNC_XOR=m +CONFIG_ASYNC_PQ=m +CONFIG_ASYNC_RAID6_RECOV=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_RSA=y +# CONFIG_CRYPTO_DH is not set +# CONFIG_CRYPTO_ECDH is not set +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_GF128MUL=y +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_SIMD=m +CONFIG_CRYPTO_ENGINE=m + +# +# Authenticated Encryption with Associated Data +# +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_CHACHA20POLY1305=m +# CONFIG_CRYPTO_AEGIS128 is not set +# CONFIG_CRYPTO_AEGIS128L is not set +# CONFIG_CRYPTO_AEGIS256 is not set +# CONFIG_CRYPTO_MORUS640 is not set +# CONFIG_CRYPTO_MORUS1280 is not set +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=m + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CFB is not set +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=m +# CONFIG_CRYPTO_KEYWRAP is not set + +# +# Hash modes +# +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_VMAC=m + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_POLY1305=m +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD128=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_RMD256=m +CONFIG_CRYPTO_RMD320=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=m +CONFIG_CRYPTO_SHA3=m +# CONFIG_CRYPTO_SM3 is not set +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SALSA20=m +CONFIG_CRYPTO_CHACHA20=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_842 is not set +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ZSTD=y + +# +# Random Number Generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +CONFIG_CRYPTO_USER_API=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_HASH_INFO=y +CONFIG_CRYPTO_HW=y +CONFIG_CRYPTO_DEV_CCP=y +CONFIG_CRYPTO_DEV_CCP_DD=m +CONFIG_CRYPTO_DEV_SP_CCP=y +CONFIG_CRYPTO_DEV_CCP_CRYPTO=m +CONFIG_CRYPTO_DEV_CPT=m +CONFIG_CAVIUM_CPT=m +# CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set +CONFIG_CRYPTO_DEV_CAVIUM_ZIP=m +# CONFIG_CRYPTO_DEV_QCE is not set +# CONFIG_CRYPTO_DEV_QCOM_RNG is not set +CONFIG_CRYPTO_DEV_CHELSIO=m +CONFIG_CHELSIO_IPSEC_INLINE=y +# CONFIG_CRYPTO_DEV_CHELSIO_TLS is not set +CONFIG_CRYPTO_DEV_VIRTIO=m +# CONFIG_CRYPTO_DEV_CCREE is not set +# CONFIG_CRYPTO_DEV_HISI_SEC is not set +# CONFIG_CRYPTO_DEV_HISI_ZIP is not set +# CONFIG_CRYPTO_DEV_HISI_HPRE is not set +# CONFIG_CRYPTO_DEV_HISI_SEC2 is not set +# CONFIG_CRYPTO_DEV_HISI_RDE is not set +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +CONFIG_PKCS7_MESSAGE_PARSER=y +# CONFIG_PKCS7_TEST_KEY is not set +CONFIG_SIGNED_PE_FILE_VERIFICATION=y + +# +# Certificates for signature checking +# +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set +# CONFIG_SECONDARY_TRUSTED_KEYRING is not set +# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=m +CONFIG_BITREVERSE=y +CONFIG_HAVE_ARCH_BITREVERSE=y +CONFIG_RATIONAL=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y +CONFIG_INDIRECT_PIO=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=m +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +CONFIG_CRC64=m +# CONFIG_CRC4 is not set +CONFIG_CRC7=m +CONFIG_LIBCRC32C=m +CONFIG_CRC8=m +CONFIG_XXHASH=y +CONFIG_AUDIT_GENERIC=y +CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y +CONFIG_AUDIT_COMPAT_GENERIC=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_COMPRESS=m +CONFIG_LZ4HC_COMPRESS=m +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMPRESS=y +CONFIG_ZSTD_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=m +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_BTREE=y +CONFIG_INTERVAL_TREE=y +CONFIG_RADIX_TREE_MULTIORDER=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_DMA_DIRECT_OPS=y +CONFIG_DMA_VIRT_OPS=y +CONFIG_SWIOTLB=y +CONFIG_SGL_ALLOC=y +CONFIG_CHECK_SIGNATURE=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_CLZ_TAB=y +CONFIG_CORDIC=m +# CONFIG_DDR is not set +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_LIBFDT=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_POOL=y +CONFIG_ARCH_HAS_SG_CHAIN=y +CONFIG_ARCH_HAS_PMEM_API=y +CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y +CONFIG_SBITMAP=y +CONFIG_ETMEM_SCAN=m +CONFIG_ETMEM_SWAP=m +# CONFIG_STRING_SELFTEST is not set + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +CONFIG_DEBUG_INFO_DWARF4=y +# CONFIG_GDB_SCRIPTS is not set +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=2048 +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_PAGE_OWNER is not set +CONFIG_DEBUG_FS=y +CONFIG_HEADERS_CHECK=y +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_ARCH_WANT_FRAME_POINTERS=y +CONFIG_FRAME_POINTER=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_DEBUG_KERNEL=y + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +# CONFIG_DEBUG_RODATA_TEST is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_SLUB_STATS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_VM is not set +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_ARCH_KASAN=y +# CONFIG_KASAN is not set +CONFIG_ARCH_HAS_KCOV=y +# CONFIG_KCOV is not set +# CONFIG_DEBUG_SHIRQ is not set + +# +# Debug Lockups and Hangs +# +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_SDEI_WATCHDOG=y +CONFIG_HARDLOCKUP_DETECTOR=y +# CONFIG_CORELOCKUP_DETECTOR is not set +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=1 +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +# CONFIG_WQ_WATCHDOG is not set +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=0 +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +# CONFIG_SCHED_STACK_END_CHECK is not set +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_HAVE_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PI_LIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_RCU_PERF_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_PREEMPTIRQ_EVENTS is not set +# CONFIG_IRQSOFF_TRACER is not set +CONFIG_SCHED_TRACER=y +CONFIG_HWLAT_TRACER=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_TRACER_SNAPSHOT=y +# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +CONFIG_STACK_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_KPROBE_EVENTS=y +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_PROBE_EVENTS=y +CONFIG_DYNAMIC_FTRACE=y +# CONFIG_FUNCTION_PROFILER is not set +CONFIG_FTRACE_MCOUNT_RECORD=y +# CONFIG_FTRACE_STARTUP_TEST is not set +CONFIG_TRACING_MAP=y +CONFIG_HIST_TRIGGERS=y +# CONFIG_TRACEPOINT_BENCHMARK is not set +CONFIG_RING_BUFFER_BENCHMARK=m +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_TRACE_EVAL_MAP_FILE is not set +# CONFIG_TRACING_EVENTS_GPIO is not set +# CONFIG_DMA_API_DEBUG is not set +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_LKDTM is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_TEST_SORT is not set +# CONFIG_KPROBES_SANITY_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +CONFIG_ATOMIC64_SELFTEST=y +CONFIG_ASYNC_RAID6_TEST=m +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_TEST_STRING_HELPERS is not set +CONFIG_TEST_KSTRTOX=y +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_BITFIELD is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_OVERFLOW is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_HASH is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_USER_COPY is not set +# CONFIG_TEST_BPF is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_TEST_FREE_PAGES is not set +# CONFIG_MEMTEST is not set +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_KGDB=y +CONFIG_KGDB_SERIAL_CONSOLE=y +CONFIG_KGDB_TESTS=y +# CONFIG_KGDB_TESTS_ON_BOOT is not set +CONFIG_KGDB_KDB=y +CONFIG_KDB_DEFAULT_ENABLE=0x0 +CONFIG_KDB_KEYBOARD=y +CONFIG_KDB_CONTINUE_CATASTROPHIC=0 +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +# CONFIG_UBSAN is not set +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y +CONFIG_STRICT_DEVMEM=y +CONFIG_IO_STRICT_DEVMEM=y +# CONFIG_ARM64_PTDUMP_DEBUGFS is not set +# CONFIG_PID_IN_CONTEXTIDR is not set +# CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET is not set +# CONFIG_DEBUG_WX is not set +# CONFIG_DEBUG_ALIGN_RODATA is not set +# CONFIG_DEBUG_EFI is not set +# CONFIG_ARM64_RELOC_TEST is not set +# CONFIG_CORESIGHT is not set diff --git a/arch/arm64/configs/storage_ci_defconfig b/arch/arm64/configs/storage_ci_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..941f76b15c099146acf6e8f2133befb444c22387 --- /dev/null +++ b/arch/arm64/configs/storage_ci_defconfig @@ -0,0 +1,3063 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/arm64 4.19.44 Kernel Configuration +# + +# +# Compiler: gcc (GCC) 7.3.0 +# +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=70300 +CONFIG_CLANG_VERSION=0 +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_EXTABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +CONFIG_LOCALVERSION="" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_BUILD_SALT="" +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_CROSS_MEMORY_ATTACH=y +CONFIG_USELIB=y +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y +CONFIG_AUDIT_WATCH=y +CONFIG_AUDIT_TREE=y +CONFIG_KTASK=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_SHOW_LEVEL=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GENERIC_MSI_IRQ_DOMAIN=y +CONFIG_HANDLE_DOMAIN_IRQ=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +CONFIG_GENERIC_IRQ_DEBUGFS=y +CONFIG_GENERIC_IRQ_MULTI_HANDLER=y +CONFIG_ARCH_CLOCKSOURCE_DATA=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_ARCH_HAS_TICK_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +CONFIG_NO_HZ_IDLE=y +# CONFIG_NO_HZ_FULL is not set +# CONFIG_NO_HZ is not set +CONFIG_HIGH_RES_TIMERS=y +# CONFIG_PREEMPT_NONE is not set +# CONFIG_PREEMPT_VOLUNTARY is not set +CONFIG_PREEMPT=y +CONFIG_PREEMPT_COUNT=y + +# +# CPU/Task time and stats accounting +# +CONFIG_VIRT_CPU_ACCOUNTING=y +# CONFIG_TICK_CPU_ACCOUNTING is not set +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_SCHED_AVG_IRQ=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_PREEMPT_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_SRCU=y +CONFIG_TREE_SRCU=y +CONFIG_TASKS_RCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +CONFIG_CONTEXT_TRACKING=y +# CONFIG_CONTEXT_TRACKING_FORCE is not set +CONFIG_BUILD_BIN2C=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=25 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=20 +CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 +CONFIG_GENERIC_SCHED_CLOCK=y +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_SUPPORTS_INT128=y +CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_MEMCG_SWAP_ENABLED=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +# CONFIG_DEBUG_BLK_CGROUP is not set +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +# CONFIG_CFS_BANDWIDTH is not set +# CONFIG_RT_GROUP_SCHED is not set +CONFIG_CGROUP_PIDS=y +# CONFIG_CGROUP_RDMA is not set +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +# CONFIG_CGROUP_BPF is not set +# CONFIG_CGROUP_DEBUG is not set +# CONFIG_CGROUP_FILES is not set +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +# CONFIG_CHECKPOINT_RESTORE is not set +CONFIG_SCHED_STEAL=y +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_BPF=y +# CONFIG_EXPERT is not set +CONFIG_UID16=y +CONFIG_MULTIUSER=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_PRINTK_NMI=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT_ALWAYS_ON=y +# CONFIG_USERFAULTFD is not set +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y +CONFIG_RSEQ=y +# CONFIG_EMBEDDED is not set +CONFIG_HAVE_PERF_EVENTS=y + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_SLUB_DEBUG=y +# CONFIG_COMPAT_BRK is not set +# CONFIG_SLAB is not set +CONFIG_SLUB=y +CONFIG_SLAB_MERGE_DEFAULT=y +# CONFIG_SLAB_FREELIST_RANDOM is not set +# CONFIG_SLAB_FREELIST_HARDENED is not set +CONFIG_SLUB_CPU_PARTIAL=y +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y +CONFIG_ARM64=y +CONFIG_64BIT=y +CONFIG_MMU=y +CONFIG_ARM64_PAGE_SHIFT=12 +CONFIG_ARM64_CONT_SHIFT=4 +CONFIG_ARCH_MMAP_RND_BITS_MIN=18 +CONFIG_ARCH_MMAP_RND_BITS_MAX=24 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_ZONE_DMA32=y +CONFIG_HAVE_GENERIC_GUP=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_SMP=y +CONFIG_KERNEL_MODE_NEON=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_PGTABLE_LEVELS=3 +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_ARCH_PROC_KCORE_TEXT=y + +# +# Platform selection +# +# CONFIG_ARCH_ACTIONS is not set +# CONFIG_ARCH_SUNXI is not set +# CONFIG_ARCH_ALPINE is not set +# CONFIG_ARCH_BCM2835 is not set +# CONFIG_ARCH_BCM_IPROC is not set +# CONFIG_ARCH_BERLIN is not set +# CONFIG_ARCH_BRCMSTB is not set +# CONFIG_ARCH_EXYNOS is not set +# CONFIG_ARCH_K3 is not set +# CONFIG_ARCH_LAYERSCAPE is not set +# CONFIG_ARCH_LG1K is not set +CONFIG_ARCH_HISI=y +# CONFIG_ARCH_MEDIATEK is not set +# CONFIG_ARCH_MESON is not set +# CONFIG_ARCH_MVEBU is not set +# CONFIG_ARCH_QCOM is not set +# CONFIG_ARCH_REALTEK is not set +# CONFIG_ARCH_ROCKCHIP is not set +# CONFIG_ARCH_SEATTLE is not set +# CONFIG_ARCH_SYNQUACER is not set +# CONFIG_ARCH_RENESAS is not set +# CONFIG_ARCH_STRATIX10 is not set +# CONFIG_ARCH_TEGRA is not set +# CONFIG_ARCH_SPRD is not set +# CONFIG_ARCH_THUNDER is not set +# CONFIG_ARCH_THUNDER2 is not set +# CONFIG_ARCH_UNIPHIER is not set +CONFIG_ARCH_VEXPRESS=y +# CONFIG_ARCH_XGENE is not set +# CONFIG_ARCH_ZX is not set +# CONFIG_ARCH_ZYNQMP is not set +CONFIG_HAVE_LIVEPATCH_WO_FTRACE=y + +# +# Enable Livepatch +# +# CONFIG_LIVEPATCH is not set + +# +# Bus support +# +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_DOMAINS_GENERIC=y +CONFIG_PCI_SYSCALL=y +CONFIG_PCIEPORTBUS=y +# CONFIG_HOTPLUG_PCI_PCIE is not set +CONFIG_PCIEAER=y +# CONFIG_PCIEAER_INJECT is not set +CONFIG_PCIE_ECRC=y +CONFIG_PCIEASPM=y +CONFIG_PCIEASPM_DEBUG=y +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +# CONFIG_PCIE_DPC is not set +# CONFIG_PCIE_PTM is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_IRQ_DOMAIN=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +CONFIG_PCI_REALLOC_ENABLE_AUTO=y +CONFIG_PCI_STUB=y +# CONFIG_PCI_PF_STUB is not set +CONFIG_PCI_ATS=y +CONFIG_PCI_ECAM=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +CONFIG_PCI_LABEL=y +CONFIG_HOTPLUG_PCI=y +# CONFIG_HOTPLUG_PCI_ACPI is not set +# CONFIG_HOTPLUG_PCI_CPCI is not set +CONFIG_HOTPLUG_PCI_SHPC=y + +# +# PCI controller drivers +# + +# +# Cadence PCIe controllers support +# +# CONFIG_PCIE_CADENCE_HOST is not set +# CONFIG_PCI_FTPCI100 is not set +CONFIG_PCI_HOST_COMMON=y +CONFIG_PCI_HOST_GENERIC=y +# CONFIG_PCIE_XILINX is not set +CONFIG_PCI_XGENE=y +CONFIG_PCI_XGENE_MSI=y +# CONFIG_PCI_HOST_THUNDER_PEM is not set +# CONFIG_PCI_HOST_THUNDER_ECAM is not set + +# +# DesignWare PCI Core Support +# +# CONFIG_PCIE_DW_PLAT_HOST is not set +# CONFIG_PCI_HISI is not set +# CONFIG_PCIE_KIRIN is not set +# CONFIG_PCIE_HISI_STB is not set + +CONFIG_HISILICON_PCIE_CAE=m +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set + +# +# Kernel Features +# + +# +# ARM errata workarounds via the alternatives framework +# +CONFIG_ARM64_ERRATUM_826319=y +CONFIG_ARM64_ERRATUM_827319=y +CONFIG_ARM64_ERRATUM_824069=y +CONFIG_ARM64_ERRATUM_819472=y +CONFIG_ARM64_ERRATUM_832075=y +CONFIG_ARM64_ERRATUM_845719=y +CONFIG_ARM64_ERRATUM_843419=y +CONFIG_ARM64_ERRATUM_1024718=y +# CONFIG_ARM64_ERRATUM_1463225 is not set +CONFIG_CAVIUM_ERRATUM_22375=y +CONFIG_CAVIUM_ERRATUM_23144=y +CONFIG_CAVIUM_ERRATUM_23154=y +CONFIG_CAVIUM_ERRATUM_27456=y +CONFIG_CAVIUM_ERRATUM_30115=y +CONFIG_QCOM_FALKOR_ERRATUM_1003=y +CONFIG_QCOM_FALKOR_ERRATUM_1009=y +CONFIG_QCOM_QDF2400_ERRATUM_0065=y +CONFIG_SOCIONEXT_SYNQUACER_PREITS=y +CONFIG_HISILICON_ERRATUM_161600802=y +CONFIG_QCOM_FALKOR_ERRATUM_E1041=y +CONFIG_ARM64_4K_PAGES=y +# CONFIG_ARM64_16K_PAGES is not set +# CONFIG_ARM64_64K_PAGES is not set +CONFIG_ARM64_VA_BITS_39=y +# CONFIG_ARM64_VA_BITS_48 is not set +CONFIG_ARM64_VA_BITS=39 +CONFIG_ARM64_PA_BITS_48=y +CONFIG_ARM64_PA_BITS=48 +# CONFIG_CPU_BIG_ENDIAN is not set +CONFIG_SCHED_MC=y +# CONFIG_SCHED_SMT is not set +CONFIG_NR_CPUS=64 +CONFIG_HOTPLUG_CPU=y +# CONFIG_MPAM is not set +CONFIG_NUMA=y +CONFIG_NODES_SHIFT=2 +CONFIG_NUMA_AWARE_SPINLOCKS=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_HOLES_IN_ZONE=y +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +CONFIG_SCHED_HRTICK=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_HAVE_ARCH_PFN_VALID=y +CONFIG_HW_PERF_EVENTS=y +CONFIG_SYS_SUPPORTS_HUGETLBFS=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +# CONFIG_SECCOMP is not set +# CONFIG_PARAVIRT is not set +# CONFIG_PARAVIRT_TIME_ACCOUNTING is not set +CONFIG_KEXEC=y +# CONFIG_CRASH_DUMP is not set +# CONFIG_XEN is not set +CONFIG_FORCE_MAX_ZONEORDER=11 +CONFIG_UNMAP_KERNEL_AT_EL0=y +CONFIG_HARDEN_BRANCH_PREDICTOR=y +CONFIG_HARDEN_EL2_VECTORS=y +CONFIG_ARM64_SSBD=y +# CONFIG_ARMV8_DEPRECATED is not set +# CONFIG_ARM64_SW_TTBR0_PAN is not set + +# +# ARMv8.1 architectural features +# +CONFIG_ARM64_HW_AFDBM=y +CONFIG_ARM64_PAN=y +# CONFIG_ARM64_LSE_ATOMICS is not set +CONFIG_ARM64_VHE=y + +# +# ARMv8.2 architectural features +# +CONFIG_ARM64_UAO=y +# CONFIG_ARM64_PMEM is not set +CONFIG_ARM64_RAS_EXTN=y +CONFIG_ARM64_SVE=y +CONFIG_ARM64_MODULE_PLTS=y +# CONFIG_ARM64_PSEUDO_NMI is not set +# CONFIG_RANDOMIZE_BASE is not set + +# +# Boot options +# +# CONFIG_ARM64_ACPI_PARKING_PROTOCOL is not set +CONFIG_CMDLINE="console=ttyAMA0" +# CONFIG_CMDLINE_FORCE is not set +CONFIG_EFI_STUB=y +CONFIG_EFI=y +CONFIG_DMI=y +CONFIG_COMPAT=y +CONFIG_AARCH32_EL0=y +# CONFIG_ARM64_ILP32 is not set +CONFIG_SYSVIPC_COMPAT=y + +# +# Power management options +# +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +# CONFIG_HIBERNATION is not set +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_CLK=y +CONFIG_PM_GENERIC_DOMAINS=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +CONFIG_PM_GENERIC_DOMAINS_SLEEP=y +CONFIG_PM_GENERIC_DOMAINS_OF=y +CONFIG_CPU_PM=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y + +# +# CPU Power Management +# + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +CONFIG_CPU_IDLE_MULTIPLE_DRIVERS=y +CONFIG_CPU_IDLE_GOV_LADDER=y +CONFIG_CPU_IDLE_GOV_MENU=y +CONFIG_DT_IDLE_STATES=y + +# +# ARM CPU Idle Drivers +# +CONFIG_ARM_CPUIDLE=y + +# +# CPU Frequency scaling +# +# CONFIG_CPU_FREQ is not set + +# +# Firmware Drivers +# +CONFIG_ARM_PSCI_FW=y +# CONFIG_ARM_PSCI_CHECKER is not set +# CONFIG_ARM_SDE_INTERFACE is not set +CONFIG_DMIID=y +# CONFIG_DMI_SYSFS is not set +# CONFIG_FW_CFG_SYSFS is not set +# CONFIG_ISCSI_IBFT is not set +CONFIG_HAVE_ARM_SMCCC=y +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +# CONFIG_EFI_VARS is not set +CONFIG_EFI_ESRT=y +CONFIG_EFI_PARAMS_FROM_FDT=y +CONFIG_EFI_RUNTIME_WRAPPERS=y +CONFIG_EFI_ARMSTUB=y +CONFIG_EFI_ARMSTUB_DTB_LOADER=y +# CONFIG_EFI_CAPSULE_LOADER is not set +# CONFIG_EFI_TEST is not set +# CONFIG_RESET_ATTACK_MITIGATION is not set + +# +# Tegra firmware driver +# +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_GENERIC_GSI=y +CONFIG_ACPI_CCA_REQUIRED=y +# CONFIG_ACPI_DEBUGGER is not set +CONFIG_ACPI_SPCR_TABLE=y +# CONFIG_ACPI_EC_DEBUGFS is not set +# CONFIG_ACPI_BUTTON is not set +# CONFIG_ACPI_TAD is not set +# CONFIG_ACPI_DOCK is not set +CONFIG_ACPI_MCFG=y +# CONFIG_ACPI_PROCESSOR is not set +CONFIG_ACPI_NUMA=y +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +# CONFIG_ACPI_TABLE_UPGRADE is not set +# CONFIG_ACPI_DEBUG is not set +# CONFIG_ACPI_PCI_SLOT is not set +# CONFIG_ACPI_CONTAINER is not set +# CONFIG_ACPI_HED is not set +# CONFIG_ACPI_CUSTOM_METHOD is not set +# CONFIG_ACPI_BGRT is not set +CONFIG_ACPI_REDUCED_HARDWARE_ONLY=y +CONFIG_HAVE_ACPI_APEI=y +# CONFIG_ACPI_APEI is not set +# CONFIG_PMIC_OPREGION is not set +# CONFIG_ACPI_CONFIGFS is not set +CONFIG_ACPI_IORT=y +CONFIG_ACPI_GTDT=y +CONFIG_ACPI_PPTT=y +# CONFIG_VIRTUALIZATION is not set +CONFIG_ARM64_CRYPTO=y +CONFIG_CRYPTO_SHA256_ARM64=y +# CONFIG_CRYPTO_SHA512_ARM64 is not set +CONFIG_CRYPTO_SHA1_ARM64_CE=y +CONFIG_CRYPTO_SHA2_ARM64_CE=y +# CONFIG_CRYPTO_SHA512_ARM64_CE is not set +# CONFIG_CRYPTO_SHA3_ARM64 is not set +# CONFIG_CRYPTO_SM3_ARM64_CE is not set +# CONFIG_CRYPTO_SM4_ARM64_CE is not set +CONFIG_CRYPTO_GHASH_ARM64_CE=y +# CONFIG_CRYPTO_CRCT10DIF_ARM64_CE is not set +# CONFIG_CRYPTO_CRC32_ARM64_CE is not set +CONFIG_CRYPTO_AES_ARM64=y +CONFIG_CRYPTO_AES_ARM64_CE=y +CONFIG_CRYPTO_AES_ARM64_CE_CCM=y +CONFIG_CRYPTO_AES_ARM64_CE_BLK=y +CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y +# CONFIG_CRYPTO_CHACHA20_NEON is not set +# CONFIG_CRYPTO_AES_ARM64_BS is not set + +# +# General architecture-dependent options +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +CONFIG_UPROBES=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_KRETPROBES=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_NMI=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_GENERIC_IDLE_POLL_SETUP=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_RCU_TABLE_FREE=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_CC_HAS_STACKPROTECTOR_NONE=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_ARCH_MMAP_RND_BITS=18 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=11 +CONFIG_CLONE_BACKWARDS=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_REFCOUNT_FULL=y +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +CONFIG_PLUGIN_HOSTCC="" +CONFIG_HAVE_GCC_PLUGINS=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +# CONFIG_MODULE_SIG is not set +# CONFIG_MODULE_COMPRESS is not set +# CONFIG_TRIM_UNUSED_KSYMS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLK_SCSI_REQUEST=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +# CONFIG_BLK_DEV_INTEGRITY is not set +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +# CONFIG_BLK_CMDLINE_PARSER is not set +CONFIG_BLK_WBT=y +CONFIG_BLK_CGROUP_IOLATENCY=y +CONFIG_BLK_WBT_SQ=y +CONFIG_BLK_WBT_MQ=y +CONFIG_BLK_DEBUG_FS=y +CONFIG_BLK_DEBUG_FS_ZONED=y +# CONFIG_BLK_SED_OPAL is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_EFI_PARTITION=y +CONFIG_BLOCK_COMPAT=y +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_CFQ_GROUP_IOSCHED=y +CONFIG_DEFAULT_DEADLINE=y +# CONFIG_DEFAULT_CFQ is not set +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="deadline" +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +CONFIG_UNINLINE_SPIN_UNLOCK=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ELFCORE=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_BINFMT_SCRIPT=y +# CONFIG_BINFMT_MISC is not set +CONFIG_COREDUMP=y + +# +# Memory Management options +# +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_NEED_MULTIPLE_NODES=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +# CONFIG_COHERENT_DEVICE is not set +CONFIG_NO_BOOTMEM=y +CONFIG_MEMORY_ISOLATION=y +# CONFIG_MEMORY_HOTPLUG is not set +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +# CONFIG_MEMORY_FAILURE is not set +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_TRANSPARENT_HUGE_PAGECACHE=y +# CONFIG_CLEANCACHE is not set +# CONFIG_FRONTSWAP is not set +# CONFIG_SHRINK_PAGECACHE is not set +CONFIG_CMA=y +# CONFIG_CMA_DEBUG is not set +# CONFIG_CMA_DEBUGFS is not set +CONFIG_CMA_AREAS=7 +# CONFIG_ZPOOL is not set +# CONFIG_ZBUD is not set +# CONFIG_ZSMALLOC is not set +CONFIG_GENERIC_EARLY_IOREMAP=y +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +# CONFIG_IDLE_PAGE_TRACKING is not set +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_BENCHMARK is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +# CONFIG_PACKET_DIAG is not set +CONFIG_UNIX=y +# CONFIG_UNIX_DIAG is not set +# CONFIG_TLS is not set +CONFIG_XFRM=y +# CONFIG_XFRM_USER is not set +# CONFIG_XFRM_SUB_POLICY is not set +# CONFIG_XFRM_MIGRATE is not set +# CONFIG_XFRM_STATISTICS is not set +# CONFIG_NET_KEY is not set +# CONFIG_XDP_SOCKETS is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE_DEMUX is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_NET_IPVTI is not set +# CONFIG_NET_FOU is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +CONFIG_INET_XFRM_MODE_TRANSPORT=y +CONFIG_INET_XFRM_MODE_TUNNEL=y +CONFIG_INET_XFRM_MODE_BEET=y +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_INET_UDP_DIAG is not set +# CONFIG_INET_RAW_DIAG is not set +# CONFIG_INET_DIAG_DESTROY is not set +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +# CONFIG_IPV6 is not set +# CONFIG_NETWORK_SECMARK is not set +# CONFIG_NETWORK_PHY_TIMESTAMPING is not set +# CONFIG_NETFILTER is not set +# CONFIG_BPFILTER is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_L2TP is not set +# CONFIG_BRIDGE is not set +CONFIG_HAVE_NET_DSA=y +# CONFIG_NET_DSA is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set +# CONFIG_NET_SCHED is not set +# CONFIG_DCB is not set +CONFIG_DNS_RESOLVER=y +# CONFIG_BATMAN_ADV is not set +# CONFIG_OPENVSWITCH is not set +# CONFIG_VSOCKETS is not set +# CONFIG_NETLINK_DIAG is not set +# CONFIG_MPLS is not set +# CONFIG_NET_NSH is not set +# CONFIG_HSR is not set +# CONFIG_NET_SWITCHDEV is not set +# CONFIG_NET_L3_MASTER_DEV is not set +# CONFIG_NET_NCSI is not set +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_XPS=y +# CONFIG_CGROUP_NET_PRIO is not set +# CONFIG_CGROUP_NET_CLASSID is not set +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_JIT=y +# CONFIG_BPF_STREAM_PARSER is not set +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NET_DROP_MONITOR is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +# CONFIG_WIRELESS is not set +# CONFIG_WIMAX is not set +# CONFIG_RFKILL is not set +CONFIG_NET_9P=y +CONFIG_NET_9P_VIRTIO=y +# CONFIG_NET_9P_DEBUG is not set +# CONFIG_CAIF is not set +# CONFIG_CEPH_LIB is not set +# CONFIG_NFC is not set +# CONFIG_PSAMPLE is not set +# CONFIG_NET_IFE is not set +# CONFIG_LWTUNNEL is not set +CONFIG_GRO_CELLS=y +# CONFIG_NET_DEVLINK is not set +CONFIG_MAY_USE_DEVLINK=y +CONFIG_FAILOVER=y +CONFIG_HAVE_EBPF_JIT=y + +# +# Device Drivers +# +CONFIG_ARM_AMBA=y + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER=y +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER is not set +CONFIG_ALLOW_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=y +CONFIG_REGMAP_MMIO=y +CONFIG_DMA_CMA=y + +# +# Default contiguous memory area size: +# +CONFIG_CMA_SIZE_MBYTES=16 +CONFIG_CMA_SIZE_SEL_MBYTES=y +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set +# CONFIG_CMA_SIZE_SEL_MIN is not set +# CONFIG_CMA_SIZE_SEL_MAX is not set +CONFIG_CMA_ALIGNMENT=8 +CONFIG_GENERIC_ARCH_TOPOLOGY=y + +# +# Bus devices +# +# CONFIG_BRCMSTB_GISB_ARB is not set +# CONFIG_HISILICON_LPC is not set +# CONFIG_SIMPLE_PM_BUS is not set +CONFIG_VEXPRESS_CONFIG=y +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y +# CONFIG_GNSS is not set +CONFIG_MTD=y +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +# CONFIG_MTD_AFS_PARTS is not set +CONFIG_MTD_OF_PARTS=y +# CONFIG_MTD_AR7_PARTS is not set + +# +# Partition parsers +# + +# +# User Modules And Translation Layers +# +# CONFIG_MTD_BLOCK is not set +# CONFIG_MTD_BLOCK_RO is not set +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=y +CONFIG_MTD_JEDECPROBE=y +CONFIG_MTD_GEN_PROBE=y +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_NOSWAP=y +# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set +# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set +# CONFIG_MTD_CFI_GEOMETRY is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_OTP is not set +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +# CONFIG_MTD_CFI_STAA is not set +CONFIG_MTD_CFI_UTIL=y +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_PHYSMAP is not set +# CONFIG_MTD_PHYSMAP_OF is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# CONFIG_MTD_ONENAND is not set +CONFIG_MTD_NAND_ECC=y +# CONFIG_MTD_NAND_ECC_SMC is not set +CONFIG_MTD_NAND=y +CONFIG_MTD_NAND_BCH=y +CONFIG_MTD_NAND_ECC_BCH=y +# CONFIG_MTD_NAND_DENALI_PCI is not set +# CONFIG_MTD_NAND_DENALI_DT is not set +# CONFIG_MTD_NAND_GPIO is not set +# CONFIG_MTD_NAND_RICOH is not set +# CONFIG_MTD_NAND_DISKONCHIP is not set +# CONFIG_MTD_NAND_DOCG4 is not set +# CONFIG_MTD_NAND_CAFE is not set +CONFIG_MTD_NAND_NANDSIM=m +# CONFIG_MTD_NAND_BRCMNAND is not set +# CONFIG_MTD_NAND_PLATFORM is not set +# CONFIG_MTD_NAND_HISI504 is not set + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# CONFIG_MTD_SPI_NOR is not set +CONFIG_MTD_UBI=y +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_LIMIT=20 +CONFIG_MTD_UBI_FASTMAP=y +CONFIG_MTD_UBI_GLUEBI=y +CONFIG_MTD_UBI_BLOCK=y +CONFIG_DTC=y +CONFIG_OF=y +# CONFIG_OF_UNITTEST is not set +CONFIG_OF_FLATTREE=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_KOBJ=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_IRQ=y +CONFIG_OF_NET=y +CONFIG_OF_RESERVED_MEM=y +# CONFIG_OF_OVERLAY is not set +CONFIG_OF_NUMA=y +# CONFIG_PARPORT is not set +CONFIG_PNP=y +CONFIG_PNP_DEBUG_MESSAGES=y + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_NULL_BLK=m +# CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION is not set +CONFIG_CDROM=y +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_DRBD is not set +CONFIG_BLK_DEV_NBD=m +# CONFIG_BLK_DEV_SKD is not set +# CONFIG_BLK_DEV_SX8 is not set +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=2 +CONFIG_BLK_DEV_RAM_SIZE=1048576 +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_VIRTIO_BLK=y +# CONFIG_VIRTIO_BLK_SCSI is not set +# CONFIG_BLK_DEV_RBD is not set +# CONFIG_BLK_DEV_RSXX is not set + +# +# NVME Support +# +CONFIG_NVME_CORE=m +CONFIG_BLK_DEV_NVME=m +CONFIG_NVME_MULTIPATH=y +# CONFIG_NVME_FC is not set +# CONFIG_NVME_TARGET is not set + +# +# Misc devices +# +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_PHANTOM is not set +# CONFIG_SGI_IOC4 is not set +# CONFIG_TIFM_CORE is not set +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_HP_ILO is not set +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_USB_SWITCH_FSA9480 is not set +# CONFIG_SRAM is not set +CONFIG_VEXPRESS_SYSCFG=y +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_CB710_CORE is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +# CONFIG_SENSORS_LIS3_I2C is not set +# CONFIG_ALTERA_STAPL is not set + +# +# Intel MIC & related support +# + +# +# Intel MIC Bus Driver +# + +# +# SCIF Bus Driver +# + +# +# VOP Bus Driver +# + +# +# Intel MIC Host Driver +# + +# +# Intel MIC Card Driver +# + +# +# SCIF Driver +# + +# +# Intel MIC Coprocessor State Management (COSM) Drivers +# + +# +# VOP Driver +# +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_MISC_RTSX_PCI is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +# CONFIG_RAID_ATTRS is not set +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_MQ_DEFAULT=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +# CONFIG_CHR_DEV_ST is not set +# CONFIG_CHR_DEV_OSST is not set +CONFIG_BLK_DEV_SR=y +# CONFIG_BLK_DEV_SR_VENDOR is not set +CONFIG_CHR_DEV_SG=y +# CONFIG_CHR_DEV_SCH is not set +# CONFIG_SCSI_CONSTANTS is not set +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +# CONFIG_SCSI_SPI_ATTRS is not set +# CONFIG_SCSI_FC_ATTRS is not set +CONFIG_SCSI_ISCSI_ATTRS=y +# CONFIG_SCSI_SAS_ATTRS is not set +# CONFIG_SCSI_SAS_LIBSAS is not set +# CONFIG_SCSI_SRP_ATTRS is not set +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=y +CONFIG_ISCSI_BOOT_SYSFS=y +# CONFIG_SCSI_CXGB3_ISCSI is not set +# CONFIG_SCSI_CXGB4_ISCSI is not set +# CONFIG_SCSI_BNX2_ISCSI is not set +# CONFIG_BE2ISCSI is not set +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +# CONFIG_SCSI_HPSA is not set +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +# CONFIG_SCSI_AACRAID is not set +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +# CONFIG_SCSI_HISI_SAS is not set +# CONFIG_SCSI_MVSAS is not set +# CONFIG_SCSI_MVUMI is not set +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +# CONFIG_MEGARAID_NEWGEN is not set +# CONFIG_MEGARAID_LEGACY is not set +# CONFIG_MEGARAID_SAS is not set +# CONFIG_SCSI_MPT3SAS is not set +# CONFIG_SCSI_MPT2SAS is not set +# CONFIG_SCSI_SMARTPQI is not set +# CONFIG_SCSI_UFSHCD is not set +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set +# CONFIG_SCSI_QLOGIC_1280 is not set +CONFIG_SCSI_QLA_ISCSI=y +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +CONFIG_SCSI_DEBUG=m +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +CONFIG_SCSI_VIRTIO=y +# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set +# CONFIG_SCSI_DH is not set +# CONFIG_SCSI_OSD_INITIATOR is not set +CONFIG_HAVE_PATA_PLATFORM=y +CONFIG_ATA=y +CONFIG_ATA_VERBOSE_ERROR=y +# CONFIG_ATA_ACPI is not set +# CONFIG_SATA_PMP is not set + +# +# Controllers with non-SFF native interface +# +# CONFIG_SATA_AHCI is not set +# CONFIG_SATA_AHCI_PLATFORM is not set +# CONFIG_AHCI_CEVA is not set +# CONFIG_AHCI_XGENE is not set +# CONFIG_AHCI_QORIQ is not set +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +# CONFIG_ATA_SFF is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y +# CONFIG_MD_LINEAR is not set +CONFIG_MD_RAID0=y +CONFIG_MD_RAID1=y +CONFIG_MD_RAID10=y +CONFIG_MD_RAID456=y +# CONFIG_MD_MULTIPATH is not set +# CONFIG_MD_FAULTY is not set +# CONFIG_BCACHE is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=y +# CONFIG_DM_MQ_DEFAULT is not set +CONFIG_DM_DEBUG=y +CONFIG_DM_BUFIO=y +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=y +CONFIG_DM_PERSISTENT_DATA=y +# CONFIG_DM_UNSTRIPED is not set +# CONFIG_DM_CRYPT is not set +CONFIG_DM_SNAPSHOT=y +CONFIG_DM_THIN_PROVISIONING=y +# CONFIG_DM_CACHE is not set +# CONFIG_DM_WRITECACHE is not set +# CONFIG_DM_ERA is not set +CONFIG_DM_MIRROR=y +CONFIG_DM_LOG_USERSPACE=y +CONFIG_DM_RAID=y +CONFIG_DM_ZERO=y +CONFIG_DM_MULTIPATH=y +CONFIG_DM_MULTIPATH_QL=y +CONFIG_DM_MULTIPATH_ST=y +CONFIG_DM_DELAY=y +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=y +# CONFIG_DM_VERITY is not set +# CONFIG_DM_SWITCH is not set +CONFIG_DM_LOG_WRITES=y +# CONFIG_DM_INTEGRITY is not set +CONFIG_DM_ZONED=y +# CONFIG_TARGET_CORE is not set +# CONFIG_FUSION is not set + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_FIREWIRE is not set +# CONFIG_FIREWIRE_NOSY is not set +CONFIG_NETDEVICES=y +CONFIG_NET_CORE=y +# CONFIG_BONDING is not set +# CONFIG_DUMMY is not set +# CONFIG_EQUALIZER is not set +# CONFIG_NET_FC is not set +# CONFIG_NET_TEAM is not set +# CONFIG_MACVLAN is not set +# CONFIG_VXLAN is not set +# CONFIG_GENEVE is not set +# CONFIG_GTP is not set +# CONFIG_MACSEC is not set +# CONFIG_NETCONSOLE is not set +CONFIG_TUN=y +# CONFIG_TUN_VNET_CROSS_LE is not set +# CONFIG_VETH is not set +CONFIG_VIRTIO_NET=y +# CONFIG_NLMON is not set +# CONFIG_ARCNET is not set + +# +# CAIF transport drivers +# + +# +# Distributed Switch Architecture drivers +# +# CONFIG_ETHERNET is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +# CONFIG_MDIO_DEVICE is not set +# CONFIG_PHYLIB is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set + +# +# Host-side USB support is needed for USB Network Adapter support +# +# CONFIG_WLAN is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set +# CONFIG_VMXNET3 is not set +# CONFIG_FUJITSU_ES is not set +# CONFIG_NETDEVSIM is not set +CONFIG_NET_FAILOVER=y +# CONFIG_ISDN is not set +# CONFIG_NVM is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set +# CONFIG_INPUT_SPARSEKMAP is not set +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set +# CONFIG_RMI4_CORE is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=16 +# CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_NOZOMI is not set +# CONFIG_N_GSM is not set +# CONFIG_TRACE_SINK is not set +CONFIG_LDISC_AUTOLOAD=y +CONFIG_DEVMEM=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y +CONFIG_SERIAL_8250_PNP=y +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=4 +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +# CONFIG_SERIAL_8250_EXTENDED is not set +# CONFIG_SERIAL_8250_ASPEED_VUART is not set +CONFIG_SERIAL_8250_FSL=y +CONFIG_SERIAL_8250_DW=y +# CONFIG_SERIAL_8250_RT288X is not set +# CONFIG_SERIAL_8250_MOXA is not set +CONFIG_SERIAL_OF_PLATFORM=y + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_AMBA_PL010 is not set +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +# CONFIG_SERIAL_EARLYCON_ARM_SEMIHOST is not set +# CONFIG_SERIAL_KGDB_NMI is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_CONSOLE_POLL=y +# CONFIG_SERIAL_JSM is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_XILINX_PS_UART is not set +# CONFIG_SERIAL_ARC is not set +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set +# CONFIG_SERIAL_DEV_BUS is not set +CONFIG_HVC_DRIVER=y +# CONFIG_HVC_DCC is not set +CONFIG_VIRTIO_CONSOLE=y +# CONFIG_IPMI_HANDLER is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_APPLICOM is not set + +# +# PCMCIA character devices +# +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +CONFIG_DEVPORT=y +# CONFIG_XILLYBUS is not set + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_ACPI_I2C_OPREGION=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +# CONFIG_I2C_CHARDEV is not set +# CONFIG_I2C_MUX is not set +CONFIG_I2C_HELPER_AUTO=y + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_HIX5HD2 is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_ISCH is not set +# CONFIG_I2C_PIIX4 is not set +# CONFIG_I2C_NFORCE2 is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set + +# +# ACPI drivers +# +# CONFIG_I2C_SCMI is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CADENCE is not set +# CONFIG_I2C_CBUS_GPIO is not set +# CONFIG_I2C_DESIGNWARE_PLATFORM is not set +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_EMEV2 is not set +# CONFIG_I2C_GPIO is not set +# CONFIG_I2C_NOMADIK is not set +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_RK3X is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_VERSATILE is not set +# CONFIG_I2C_THUNDERX is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_SLAVE is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# CONFIG_SPI is not set +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +# CONFIG_PPS is not set + +# +# PTP clock support +# +# CONFIG_PTP_1588_CLOCK is not set + +# +# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks. +# +CONFIG_PINCTRL=y +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_PINCTRL_AMD is not set +# CONFIG_PINCTRL_MCP23S08 is not set +# CONFIG_PINCTRL_SINGLE is not set +# CONFIG_PINCTRL_SX150X is not set +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_OF_GPIO=y +CONFIG_GPIO_ACPI=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set +# CONFIG_GPIO_SYSFS is not set +CONFIG_GPIO_GENERIC=y + +# +# Memory mapped GPIO drivers +# +# CONFIG_GPIO_74XX_MMIO is not set +# CONFIG_GPIO_ALTERA is not set +# CONFIG_GPIO_AMDPT is not set +# CONFIG_GPIO_DWAPB is not set +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_FTGPIO010 is not set +CONFIG_GPIO_GENERIC_PLATFORM=y +# CONFIG_GPIO_GRGPIO is not set +# CONFIG_GPIO_HLWD is not set +# CONFIG_GPIO_MB86S7X is not set +# CONFIG_GPIO_MOCKUP is not set +CONFIG_GPIO_PL061=y +# CONFIG_GPIO_SYSCON is not set +CONFIG_GPIO_XGENE=y +# CONFIG_GPIO_XILINX is not set + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_ADP5588 is not set +# CONFIG_GPIO_ADNP is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set + +# +# MFD GPIO expanders +# + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_BT8XX is not set +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_PCIE_IDIO_24 is not set +# CONFIG_GPIO_RDC321X is not set +# CONFIG_W1 is not set +# CONFIG_POWER_AVS is not set +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_BRCMSTB is not set +# CONFIG_POWER_RESET_GPIO is not set +# CONFIG_POWER_RESET_GPIO_RESTART is not set +# CONFIG_POWER_RESET_HISI is not set +# CONFIG_POWER_RESET_LTC2952 is not set +# CONFIG_POWER_RESET_RESTART is not set +CONFIG_POWER_RESET_VEXPRESS=y +CONFIG_POWER_RESET_XGENE=y +CONFIG_POWER_RESET_SYSCON=y +# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set +# CONFIG_SYSCON_REBOOT_MODE is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_LTC3651 is not set +# CONFIG_CHARGER_DETECTOR_MAX14656 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ25890 is not set +# CONFIG_CHARGER_SMB347 is not set +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_CHARGER_RT9455 is not set +# CONFIG_HWMON is not set +# CONFIG_THERMAL is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +# CONFIG_BCMA is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=y +# CONFIG_MFD_ACT8945A is not set +# CONFIG_MFD_AS3711 is not set +# CONFIG_MFD_AS3722 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set +# CONFIG_MFD_ATMEL_FLEXCOM is not set +# CONFIG_MFD_ATMEL_HLCDC is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_CROS_EC is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_MFD_HI6421_PMIC is not set +# CONFIG_MFD_HI655X_PMIC is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_LPC_ICH is not set +# CONFIG_LPC_SCH is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77620 is not set +# CONFIG_MFD_MAX77686 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77843 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RC5T583 is not set +# CONFIG_MFD_RK808 is not set +# CONFIG_MFD_RN5T618 is not set +# CONFIG_MFD_SEC_CORE is not set +# CONFIG_MFD_SI476X_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_SMSC is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_STMPE is not set +CONFIG_MFD_SYSCON=y +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TPS68470 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TI_LP87565 is not set +# CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS80031 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_VX855 is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_ROHM_BD718XX is not set +CONFIG_MFD_VEXPRESS_SYSREG=y +# CONFIG_REGULATOR is not set +# CONFIG_RC_CORE is not set +# CONFIG_MEDIA_SUPPORT is not set + +# +# Graphics support +# +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=16 +# CONFIG_DRM is not set +# CONFIG_DRM_DP_CEC is not set + +# +# ACP (Audio CoProcessor) Configuration +# + +# +# AMD Library routines +# + +# +# Frame buffer Devices +# +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_CMDLINE=y +CONFIG_FB_NOTIFY=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_MODE_HELPERS=y +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +CONFIG_FB_ARMCLCD=y +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_UVESA is not set +# CONFIG_FB_EFI is not set +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +# CONFIG_FB_SIMPLE is not set +# CONFIG_FB_SSD1307 is not set +# CONFIG_FB_SM712 is not set +CONFIG_BACKLIGHT_LCD_SUPPORT=y +# CONFIG_LCD_CLASS_DEVICE is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_GENERIC is not set +# CONFIG_BACKLIGHT_PM8941_WLED is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3639 is not set +# CONFIG_BACKLIGHT_GPIO is not set +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +CONFIG_VIDEOMODE_HELPERS=y + +# +# Console display driver support +# +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set +# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_LOGO_LINUX_CLUT224=y +# CONFIG_SOUND is not set + +# +# HID support +# +CONFIG_HID=y +# CONFIG_HID_BATTERY_STRENGTH is not set +# CONFIG_HIDRAW is not set +# CONFIG_UHID is not set +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=y +# CONFIG_HID_ACRUX is not set +CONFIG_HID_APPLE=y +# CONFIG_HID_AUREAL is not set +CONFIG_HID_BELKIN=y +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_CMEDIA is not set +CONFIG_HID_CYPRESS=y +# CONFIG_HID_DRAGONRISE is not set +# CONFIG_HID_EMS_FF is not set +# CONFIG_HID_ELECOM is not set +CONFIG_HID_EZKEY=y +# CONFIG_HID_GEMBIRD is not set +# CONFIG_HID_GFRM is not set +# CONFIG_HID_KEYTOUCH is not set +# CONFIG_HID_KYE is not set +# CONFIG_HID_WALTOP is not set +# CONFIG_HID_GYRATION is not set +# CONFIG_HID_ICADE is not set +CONFIG_HID_ITE=y +# CONFIG_HID_JABRA is not set +# CONFIG_HID_TWINHAN is not set +CONFIG_HID_KENSINGTON=y +# CONFIG_HID_LCPOWER is not set +# CONFIG_HID_LENOVO is not set +CONFIG_HID_LOGITECH=y +# CONFIG_HID_LOGITECH_HIDPP is not set +# CONFIG_LOGITECH_FF is not set +# CONFIG_LOGIRUMBLEPAD2_FF is not set +# CONFIG_LOGIG940_FF is not set +# CONFIG_LOGIWHEELS_FF is not set +# CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_MAYFLASH is not set +CONFIG_HID_REDRAGON=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +# CONFIG_HID_MULTITOUCH is not set +# CONFIG_HID_NTI is not set +# CONFIG_HID_ORTEK is not set +# CONFIG_HID_PANTHERLORD is not set +# CONFIG_HID_PETALYNX is not set +# CONFIG_HID_PICOLCD is not set +# CONFIG_HID_PLANTRONICS is not set +# CONFIG_HID_PRIMAX is not set +# CONFIG_HID_SAITEK is not set +# CONFIG_HID_SAMSUNG is not set +# CONFIG_HID_SPEEDLINK is not set +# CONFIG_HID_STEAM is not set +# CONFIG_HID_STEELSERIES is not set +# CONFIG_HID_SUNPLUS is not set +# CONFIG_HID_RMI is not set +# CONFIG_HID_GREENASIA is not set +# CONFIG_HID_SMARTJOYPLUS is not set +# CONFIG_HID_TIVO is not set +# CONFIG_HID_TOPSEED is not set +# CONFIG_HID_THRUSTMASTER is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_XINMO is not set +# CONFIG_HID_ZEROPLUS is not set +# CONFIG_HID_ZYDACRON is not set +# CONFIG_HID_SENSOR_HUB is not set +# CONFIG_HID_ALPS is not set + +# +# I2C HID support +# +# CONFIG_I2C_HID is not set +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +# CONFIG_USB_SUPPORT is not set +# CONFIG_UWB is not set +# CONFIG_MMC is not set +# CONFIG_MEMSTICK is not set +# CONFIG_NEW_LEDS is not set +# CONFIG_ACCESSIBILITY is not set +# CONFIG_INFINIBAND is not set +CONFIG_EDAC_SUPPORT=y +# CONFIG_EDAC is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABX80X is not set +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_HYM8563 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_ISL12026 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8523 is not set +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF85363 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8010 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set +# CONFIG_RTC_DRV_EM3027 is not set +# CONFIG_RTC_DRV_RV8803 is not set + +# +# SPI RTC drivers +# +CONFIG_RTC_I2C_AND_SPI=y + +# +# SPI and I2C RTC drivers +# +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_PCF2127 is not set +# CONFIG_RTC_DRV_RV3029C2 is not set + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1685_FAMILY is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_DS2404 is not set +CONFIG_RTC_DRV_EFI=y +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set +# CONFIG_RTC_DRV_ZYNQMP is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_PL030 is not set +CONFIG_RTC_DRV_PL031=y +# CONFIG_RTC_DRV_FTRTC010 is not set +# CONFIG_RTC_DRV_SNVS is not set +# CONFIG_RTC_DRV_R7301 is not set + +# +# HID Sensor RTC drivers +# +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_ACPI=y +CONFIG_DMA_OF=y +# CONFIG_ALTERA_MSGDMA is not set +# CONFIG_AMBA_PL08X is not set +# CONFIG_DW_AXI_DMAC is not set +# CONFIG_FSL_EDMA is not set +# CONFIG_INTEL_IDMA64 is not set +# CONFIG_K3_DMA is not set +# CONFIG_MV_XOR_V2 is not set +# CONFIG_PL330_DMA is not set +# CONFIG_XILINX_DMA is not set +# CONFIG_XILINX_ZYNQMP_DMA is not set +# CONFIG_QCOM_HIDMA_MGMT is not set +# CONFIG_QCOM_HIDMA is not set +# CONFIG_DW_DMAC is not set +# CONFIG_DW_DMAC_PCI is not set + +# +# DMA Clients +# +# CONFIG_ASYNC_TX_DMA is not set +# CONFIG_DMATEST is not set + +# +# DMABUF options +# +# CONFIG_SYNC_FILE is not set +# CONFIG_AUXDISPLAY is not set +# CONFIG_UIO is not set +# CONFIG_VIRT_DRIVERS is not set +CONFIG_VIRTIO=y +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTIO_BALLOON=y +# CONFIG_VIRTIO_INPUT is not set +CONFIG_VIRTIO_MMIO=y +# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set + +# +# Microsoft Hyper-V guest support +# +# CONFIG_STAGING is not set +# CONFIG_GOLDFISH is not set +# CONFIG_CHROME_PLATFORMS is not set +CONFIG_CLKDEV_LOOKUP=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y + +# +# Common Clock Framework +# +CONFIG_COMMON_CLK_VERSATILE=y +CONFIG_CLK_SP810=y +CONFIG_CLK_VEXPRESS_OSC=y +# CONFIG_CLK_HSDK is not set +# CONFIG_COMMON_CLK_MAX9485 is not set +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI514 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_SI570 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CDCE925 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_CLK_QORIQ is not set +CONFIG_COMMON_CLK_XGENE=y +# CONFIG_COMMON_CLK_VC5 is not set +CONFIG_COMMON_CLK_HI3516CV300=y +CONFIG_COMMON_CLK_HI3519=y +CONFIG_COMMON_CLK_HI3660=y +CONFIG_COMMON_CLK_HI3798CV200=y +CONFIG_COMMON_CLK_HI6220=y +CONFIG_RESET_HISI=y +CONFIG_HWSPINLOCK=y + +# +# Clock Source drivers +# +CONFIG_TIMER_OF=y +CONFIG_TIMER_ACPI=y +CONFIG_TIMER_PROBE=y +CONFIG_CLKSRC_MMIO=y +CONFIG_ARM_ARCH_TIMER=y +CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y +CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND=y +CONFIG_FSL_ERRATUM_A008585=y +CONFIG_HISILICON_ERRATUM_161010101=y +CONFIG_ARM64_ERRATUM_858921=y +CONFIG_ARM_TIMER_SP804=y +CONFIG_CLKSRC_VERSATILE=y +# CONFIG_MAILBOX is not set +# CONFIG_IOMMU_SUPPORT is not set + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_VIRTIO is not set +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# + +# +# Broadcom SoC drivers +# +# CONFIG_SOC_BRCMSTB is not set + +# +# NXP/Freescale QorIQ SoC drivers +# + +# +# i.MX SoC drivers +# + +# +# Qualcomm SoC drivers +# +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# CONFIG_XILINX_VCU is not set +# CONFIG_PM_DEVFREQ is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set +# CONFIG_IIO is not set +# CONFIG_NTB is not set +# CONFIG_VME_BUS is not set +# CONFIG_PWM is not set + +# +# IRQ chip support +# +CONFIG_IRQCHIP=y +CONFIG_ARM_GIC=y +CONFIG_ARM_GIC_MAX_NR=1 +CONFIG_ARM_GIC_V2M=y +CONFIG_ARM_GIC_V3=y +CONFIG_ARM_GIC_V3_ITS=y +CONFIG_ARM_GIC_V3_ITS_PCI=y +CONFIG_HISILICON_IRQ_MBIGEN=y +CONFIG_PARTITION_PERCPU=y +# CONFIG_IPACK_BUS is not set +CONFIG_RESET_CONTROLLER=y +# CONFIG_RESET_TI_SYSCON is not set +CONFIG_COMMON_RESET_HI3660=y +CONFIG_COMMON_RESET_HI6220=y +# CONFIG_FMC is not set + +# +# PHY Subsystem +# +CONFIG_GENERIC_PHY=y +CONFIG_PHY_XGENE=y +# CONFIG_BCM_KONA_USB2_PHY is not set +# CONFIG_PHY_HI6220_USB is not set +# CONFIG_PHY_HISTB_COMBPHY is not set +# CONFIG_PHY_HISI_INNO_USB2 is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# CONFIG_ARM_CCI_PMU is not set +# CONFIG_ARM_CCN is not set +CONFIG_ARM_PMU=y +CONFIG_ARM_PMU_ACPI=y +# CONFIG_ARM_DSU_PMU is not set +# CONFIG_HISI_PMU is not set +# CONFIG_ARM_SPE_PMU is not set +CONFIG_RAS=y + +# +# Android +# +# CONFIG_ANDROID is not set +CONFIG_LIBNVDIMM=y +CONFIG_BLK_DEV_PMEM=y +CONFIG_ND_BLK=y +CONFIG_ND_CLAIM=y +CONFIG_ND_BTT=y +CONFIG_BTT=y +CONFIG_OF_PMEM=y +CONFIG_DAX_DRIVER=y +CONFIG_DAX=y +CONFIG_DEV_DAX=y +CONFIG_NVMEM=y + +# +# HW tracing support +# +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set +# CONFIG_FPGA is not set +# CONFIG_FSI is not set +# CONFIG_TEE is not set +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +CONFIG_FS_IOMAP=y +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +CONFIG_EXT2_FS_SECURITY=y +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_ENCRYPTION=y +CONFIG_EXT4_FS_ENCRYPTION=y +CONFIG_EXT4_DEBUG=y +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_XFS_FS=y +CONFIG_XFS_QUOTA=y +# CONFIG_XFS_POSIX_ACL is not set +# CONFIG_XFS_RT is not set +# CONFIG_XFS_ONLINE_SCRUB is not set +CONFIG_XFS_WARN=y +# CONFIG_XFS_DEBUG is not set +# CONFIG_GFS2_FS is not set +# CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +CONFIG_FS_DAX=y +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +# CONFIG_EXPORTFS_BLOCK_OPS is not set +CONFIG_FILE_LOCKING=y +CONFIG_MANDATORY_FILE_LOCKING=y +CONFIG_FS_ENCRYPTION=y +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_PRINT_QUOTA_WARNING=y +CONFIG_QUOTA_DEBUG=y +CONFIG_QUOTA_TREE=y +# CONFIG_QFMT_V1 is not set +CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y +CONFIG_AUTOFS4_FS=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=y +CONFIG_CUSE=y +CONFIG_OVERLAY_FS=y +CONFIG_OVERLAY_FS_REDIRECT_DIR=y +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y +# CONFIG_OVERLAY_FS_INDEX is not set +# CONFIG_OVERLAY_FS_XINO_AUTO is not set +# CONFIG_OVERLAY_FS_METACOPY is not set + +# +# Caches +# +# CONFIG_FSCACHE is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_FAT_DEFAULT_UTF8 is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +# CONFIG_PROC_KCORE is not set +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +# CONFIG_PROC_CHILDREN is not set +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_MEMFD_CREATE=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +CONFIG_JFFS2_FS=y +CONFIG_JFFS2_FS_DEBUG=0 +CONFIG_JFFS2_FS_WRITEBUFFER=y +CONFIG_JFFS2_FS_WBUF_VERIFY=y +CONFIG_JFFS2_SUMMARY=y +CONFIG_JFFS2_FS_XATTR=y +CONFIG_JFFS2_FS_POSIX_ACL=y +CONFIG_JFFS2_FS_SECURITY=y +CONFIG_JFFS2_COMPRESSION_OPTIONS=y +CONFIG_JFFS2_ZLIB=y +CONFIG_JFFS2_LZO=y +CONFIG_JFFS2_RTIME=y +CONFIG_JFFS2_RUBIN=y +# CONFIG_JFFS2_CMODE_NONE is not set +CONFIG_JFFS2_CMODE_PRIORITY=y +# CONFIG_JFFS2_CMODE_SIZE is not set +# CONFIG_JFFS2_CMODE_FAVOURLZO is not set +CONFIG_UBIFS_FS=y +CONFIG_UBIFS_FS_ADVANCED_COMPR=y +CONFIG_UBIFS_FS_LZO=y +CONFIG_UBIFS_FS_ZLIB=y +CONFIG_UBIFS_ATIME_SUPPORT=y +CONFIG_UBIFS_FS_XATTR=y +# CONFIG_UBIFS_FS_ENCRYPTION is not set +CONFIG_UBIFS_FS_SECURITY=y +# CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set +CONFIG_VXFS_FS=y +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_PSTORE is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V2=y +CONFIG_NFS_V3=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NFS_SWAP=y +# CONFIG_NFS_V4_1 is not set +CONFIG_ROOT_NFS=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DEBUG=y +CONFIG_NFSD=y +CONFIG_NFSD_V2_ACL=y +CONFIG_NFSD_V3=y +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +# CONFIG_NFSD_BLOCKLAYOUT is not set +# CONFIG_NFSD_SCSILAYOUT is not set +# CONFIG_NFSD_FLEXFILELAYOUT is not set +# CONFIG_NFSD_FAULT_INJECTION is not set +CONFIG_GRACE_PERIOD=y +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=y +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=y +CONFIG_SUNRPC_GSS=y +CONFIG_SUNRPC_SWAP=y +CONFIG_RPCSEC_GSS_KRB5=y +CONFIG_SUNRPC_DEBUG=y +# CONFIG_CEPH_FS is not set +CONFIG_CIFS=y +CONFIG_CIFS_STATS2=y +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_ACL=y +CONFIG_CIFS_DEBUG=y +CONFIG_CIFS_DEBUG2=y +CONFIG_CIFS_DEBUG_DUMP_KEYS=y +CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_9P_FS=y +# CONFIG_9P_FS_POSIX_ACL is not set +# CONFIG_9P_FS_SECURITY is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +# CONFIG_NLS_MAC_ROMAN is not set +# CONFIG_NLS_MAC_CELTIC is not set +# CONFIG_NLS_MAC_CENTEURO is not set +# CONFIG_NLS_MAC_CROATIAN is not set +# CONFIG_NLS_MAC_CYRILLIC is not set +# CONFIG_NLS_MAC_GAELIC is not set +# CONFIG_NLS_MAC_GREEK is not set +# CONFIG_NLS_MAC_ICELAND is not set +# CONFIG_NLS_MAC_INUIT is not set +# CONFIG_NLS_MAC_ROMANIAN is not set +# CONFIG_NLS_MAC_TURKISH is not set +# CONFIG_NLS_UTF8 is not set +# CONFIG_DLM is not set + +# +# Security options +# +CONFIG_KEYS=y +CONFIG_KEYS_COMPAT=y +# CONFIG_PERSISTENT_KEYRINGS is not set +# CONFIG_BIG_KEYS is not set +# CONFIG_ENCRYPTED_KEYS is not set +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +# CONFIG_SECURITY is not set +# CONFIG_SECURITYFS is not set +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y +# CONFIG_HARDENED_USERCOPY is not set +# CONFIG_FORTIFY_SOURCE is not set +# CONFIG_STATIC_USERMODEHELPER is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_DEFAULT_SECURITY="" +CONFIG_XOR_BLOCKS=y +CONFIG_ASYNC_CORE=y +CONFIG_ASYNC_MEMCPY=y +CONFIG_ASYNC_XOR=y +CONFIG_ASYNC_PQ=y +CONFIG_ASYNC_RAID6_RECOV=y +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_ACOMP2=y +# CONFIG_CRYPTO_RSA is not set +# CONFIG_CRYPTO_DH is not set +# CONFIG_CRYPTO_ECDH is not set +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +# CONFIG_CRYPTO_USER is not set +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +CONFIG_CRYPTO_GF128MUL=y +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +# CONFIG_CRYPTO_PCRYPT is not set +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_CRYPTO_CRYPTD=y +# CONFIG_CRYPTO_MCRYPTD is not set +# CONFIG_CRYPTO_AUTHENC is not set +# CONFIG_CRYPTO_TEST is not set +CONFIG_CRYPTO_SIMD=y + +# +# Authenticated Encryption with Associated Data +# +CONFIG_CRYPTO_CCM=y +# CONFIG_CRYPTO_GCM is not set +# CONFIG_CRYPTO_CHACHA20POLY1305 is not set +# CONFIG_CRYPTO_AEGIS128 is not set +# CONFIG_CRYPTO_AEGIS128L is not set +# CONFIG_CRYPTO_AEGIS256 is not set +# CONFIG_CRYPTO_MORUS640 is not set +# CONFIG_CRYPTO_MORUS1280 is not set +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=y + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CFB is not set +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=y +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +CONFIG_CRYPTO_XTS=y +# CONFIG_CRYPTO_KEYWRAP is not set + +# +# Hash modes +# +CONFIG_CRYPTO_CMAC=y +CONFIG_CRYPTO_HMAC=y +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +# CONFIG_CRYPTO_CRC32 is not set +CONFIG_CRYPTO_CRCT10DIF=y +# CONFIG_CRYPTO_GHASH is not set +# CONFIG_CRYPTO_POLY1305 is not set +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +# CONFIG_CRYPTO_SHA3 is not set +# CONFIG_CRYPTO_SM3 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +# CONFIG_CRYPTO_ANUBIS is not set +CONFIG_CRYPTO_ARC4=y +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_CHACHA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_SM4 is not set +# CONFIG_CRYPTO_TEA is not set +# CONFIG_CRYPTO_TWOFISH is not set + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_842 is not set +# CONFIG_CRYPTO_LZ4 is not set +# CONFIG_CRYPTO_LZ4HC is not set +# CONFIG_CRYPTO_ZSTD is not set + +# +# Random Number Generation +# +CONFIG_CRYPTO_ANSI_CPRNG=y +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +# CONFIG_CRYPTO_DRBG_HASH is not set +# CONFIG_CRYPTO_DRBG_CTR is not set +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +# CONFIG_CRYPTO_USER_API_HASH is not set +# CONFIG_CRYPTO_USER_API_SKCIPHER is not set +# CONFIG_CRYPTO_USER_API_RNG is not set +# CONFIG_CRYPTO_USER_API_AEAD is not set +CONFIG_CRYPTO_HW=y +# CONFIG_CRYPTO_DEV_CCP is not set +# CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set +# CONFIG_CRYPTO_DEV_CAVIUM_ZIP is not set +# CONFIG_CRYPTO_DEV_VIRTIO is not set +# CONFIG_CRYPTO_DEV_CCREE is not set +# CONFIG_CRYPTO_DEV_HISI_SEC is not set +# CONFIG_CRYPTO_DEV_HISI_ZIP is not set +# CONFIG_CRYPTO_DEV_HISI_HPRE is not set +# CONFIG_CRYPTO_DEV_HISI_SEC2 is not set +# CONFIG_CRYPTO_DEV_HISI_RDE is not set +# CONFIG_ASYMMETRIC_KEY_TYPE is not set + +# +# Certificates for signature checking +# +# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=y +CONFIG_BITREVERSE=y +CONFIG_HAVE_ARCH_BITREVERSE=y +CONFIG_RATIONAL=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y +# CONFIG_INDIRECT_PIO is not set +# CONFIG_CRC_CCITT is not set +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=y +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +# CONFIG_CRC64 is not set +# CONFIG_CRC4 is not set +CONFIG_CRC7=y +CONFIG_LIBCRC32C=y +# CONFIG_CRC8 is not set +CONFIG_AUDIT_GENERIC=y +CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y +CONFIG_AUDIT_COMPAT_GENERIC=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_BCH=y +CONFIG_RADIX_TREE_MULTIORDER=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_DMA_DIRECT_OPS=y +CONFIG_SWIOTLB=y +CONFIG_SGL_ALLOC=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +# CONFIG_CORDIC is not set +# CONFIG_DDR is not set +# CONFIG_IRQ_POLL is not set +CONFIG_LIBFDT=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_POOL=y +CONFIG_ARCH_HAS_SG_CHAIN=y +CONFIG_SBITMAP=y +# CONFIG_STRING_SELFTEST is not set + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +# CONFIG_PRINTK_TIME is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_DYNAMIC_DEBUG is not set + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +# CONFIG_DEBUG_INFO_DWARF4 is not set +# CONFIG_GDB_SCRIPTS is not set +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=2048 +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_READABLE_ASM is not set +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_PAGE_OWNER is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +# CONFIG_DEBUG_SECTION_MISMATCH is not set +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_ARCH_WANT_FRAME_POINTERS=y +CONFIG_FRAME_POINTER=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_DEBUG_KERNEL=y + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +# CONFIG_DEBUG_RODATA_TEST is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_SLUB_STATS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_VM is not set +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_ARCH_KASAN=y +# CONFIG_KASAN is not set +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set +# CONFIG_DEBUG_SHIRQ is not set + +# +# Debug Lockups and Hangs +# +# CONFIG_SOFTLOCKUP_DETECTOR is not set +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +# CONFIG_WQ_WATCHDOG is not set +# CONFIG_PANIC_ON_OOPS is not set +CONFIG_PANIC_ON_OOPS_VALUE=0 +CONFIG_PANIC_TIMEOUT=0 +# CONFIG_SCHED_DEBUG is not set +CONFIG_SCHED_INFO=y +# CONFIG_SCHEDSTATS is not set +# CONFIG_SCHED_STACK_END_CHECK is not set +# CONFIG_DEBUG_TIMEKEEPING is not set +# CONFIG_DEBUG_PREEMPT is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +CONFIG_TRACE_IRQFLAGS=y +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_HAVE_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_BUGVERBOSE=y +# CONFIG_DEBUG_LIST is not set +# CONFIG_DEBUG_PI_LIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_RCU_PERF_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=21 +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +CONFIG_FAULT_INJECTION=y +CONFIG_FAILSLAB=y +CONFIG_FAIL_PAGE_ALLOC=y +CONFIG_FAIL_MAKE_REQUEST=y +CONFIG_FAIL_IO_TIMEOUT=y +CONFIG_FAIL_FUTEX=y +CONFIG_FAULT_INJECTION_DEBUG_FS=y +# CONFIG_FAULT_INJECTION_STACKTRACE_FILTER is not set +# CONFIG_LATENCYTOP is not set +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_RING_BUFFER_ALLOW_SWAP=y +CONFIG_PREEMPTIRQ_TRACEPOINTS=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +CONFIG_TRACE_PREEMPT_TOGGLE=y +CONFIG_PREEMPTIRQ_EVENTS=y +CONFIG_IRQSOFF_TRACER=y +CONFIG_PREEMPT_TRACER=y +CONFIG_SCHED_TRACER=y +CONFIG_HWLAT_TRACER=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_TRACER_SNAPSHOT=y +CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +CONFIG_STACK_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_KPROBE_EVENTS=y +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_PROBE_EVENTS=y +CONFIG_DYNAMIC_FTRACE=y +CONFIG_FUNCTION_PROFILER=y +CONFIG_FTRACE_MCOUNT_RECORD=y +# CONFIG_FTRACE_STARTUP_TEST is not set +CONFIG_TRACING_MAP=y +CONFIG_HIST_TRIGGERS=y +# CONFIG_TRACEPOINT_BENCHMARK is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +CONFIG_TRACE_EVAL_MAP_FILE=y +# CONFIG_TRACING_EVENTS_GPIO is not set +# CONFIG_DMA_API_DEBUG is not set +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_LKDTM is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_TEST_SORT is not set +# CONFIG_KPROBES_SANITY_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_ASYNC_RAID6_TEST is not set +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_TEST_STRING_HELPERS is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_BITFIELD is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_OVERFLOW is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_HASH is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_USER_COPY is not set +# CONFIG_TEST_BPF is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_MEMTEST is not set +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_KGDB=y +CONFIG_KGDB_SERIAL_CONSOLE=y +CONFIG_KGDB_TESTS=y +# CONFIG_KGDB_TESTS_ON_BOOT is not set +# CONFIG_KGDB_KDB is not set +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +# CONFIG_UBSAN is not set +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y +# CONFIG_STRICT_DEVMEM is not set +# CONFIG_ARM64_PTDUMP_DEBUGFS is not set +# CONFIG_PID_IN_CONTEXTIDR is not set +# CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET is not set +# CONFIG_DEBUG_WX is not set +# CONFIG_DEBUG_ALIGN_RODATA is not set +# CONFIG_DEBUG_EFI is not set +# CONFIG_ARM64_RELOC_TEST is not set +# CONFIG_CORESIGHT is not set diff --git a/arch/arm64/configs/syzkaller_defconfig b/arch/arm64/configs/syzkaller_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..6d1eca7cca3a1869c7ff8c5303572e694c95b111 --- /dev/null +++ b/arch/arm64/configs/syzkaller_defconfig @@ -0,0 +1,5650 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/arm64 4.19.33 Kernel Configuration +# + +# +# Compiler: aarch64-linux-gnu-gcc (Linaro GCC 7.3-2018.05) 7.3.1 20180425 [linaro-7.3-2018.05 revision d29120a424ecfbc167ef90065c0eeb7f91977701] +# +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=70301 +CONFIG_CLANG_VERSION=0 +CONFIG_CONSTRUCTORS=y +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_EXTABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y +CONFIG_AUDIT_WATCH=y +CONFIG_AUDIT_TREE=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_SHOW_LEVEL=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_GENERIC_IRQ_CHIP=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GENERIC_MSI_IRQ_DOMAIN=y +CONFIG_HANDLE_DOMAIN_IRQ=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +CONFIG_GENERIC_IRQ_MULTI_HANDLER=y +CONFIG_ARCH_CLOCKSOURCE_DATA=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_ARCH_HAS_TICK_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +# CONFIG_NO_HZ_IDLE is not set +CONFIG_NO_HZ_FULL=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PREEMPT is not set +CONFIG_PREEMPT_COUNT=y + +# +# CPU/Task time and stats accounting +# +CONFIG_VIRT_CPU_ACCOUNTING=y +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y +# CONFIG_IRQ_TIME_ACCOUNTING is not set +CONFIG_HAVE_SCHED_AVG_IRQ=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_SRCU=y +CONFIG_TREE_SRCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +CONFIG_CONTEXT_TRACKING=y +# CONFIG_CONTEXT_TRACKING_FORCE is not set +CONFIG_RCU_NOCB_CPU=y +CONFIG_BUILD_BIN2C=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=20 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 +CONFIG_GENERIC_SCHED_CLOCK=y +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_SUPPORTS_INT128=y +CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_MEMCG_SWAP_ENABLED=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +# CONFIG_DEBUG_BLK_CGROUP is not set +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +# CONFIG_CGROUP_BPF is not set +# CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_CGROUP_FILES=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_STEAL=y +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_BPF=y +# CONFIG_EXPERT is not set +CONFIG_UID16=y +CONFIG_MULTIUSER=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_PRINTK_NMI=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_BPF_SYSCALL=y +# CONFIG_BPF_JIT_ALWAYS_ON is not set +CONFIG_USERFAULTFD=y +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y +CONFIG_RSEQ=y +# CONFIG_EMBEDDED is not set +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +CONFIG_DEBUG_PERF_USE_VMALLOC=y +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_SLUB_DEBUG=y +# CONFIG_COMPAT_BRK is not set +# CONFIG_SLAB is not set +CONFIG_SLUB=y +CONFIG_SLAB_MERGE_DEFAULT=y +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SLAB_FREELIST_HARDENED is not set +CONFIG_SLUB_CPU_PARTIAL=y +CONFIG_SYSTEM_DATA_VERIFICATION=y +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y +CONFIG_ARM64=y +CONFIG_64BIT=y +CONFIG_MMU=y +CONFIG_ARM64_PAGE_SHIFT=12 +CONFIG_ARM64_CONT_SHIFT=4 +CONFIG_ARCH_MMAP_RND_BITS_MIN=18 +CONFIG_ARCH_MMAP_RND_BITS_MAX=33 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_ZONE_DMA32=y +CONFIG_HAVE_GENERIC_GUP=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_SMP=y +CONFIG_KERNEL_MODE_NEON=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_PGTABLE_LEVELS=4 +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_ARCH_PROC_KCORE_TEXT=y + +# +# Platform selection +# +# CONFIG_ARCH_ACTIONS is not set +# CONFIG_ARCH_SUNXI is not set +# CONFIG_ARCH_ALPINE is not set +# CONFIG_ARCH_BCM2835 is not set +# CONFIG_ARCH_BCM_IPROC is not set +# CONFIG_ARCH_BERLIN is not set +# CONFIG_ARCH_BRCMSTB is not set +# CONFIG_ARCH_EXYNOS is not set +# CONFIG_ARCH_K3 is not set +# CONFIG_ARCH_LAYERSCAPE is not set +# CONFIG_ARCH_LG1K is not set +CONFIG_ARCH_HISI=y +# CONFIG_ARCH_MEDIATEK is not set +# CONFIG_ARCH_MESON is not set +# CONFIG_ARCH_MVEBU is not set +CONFIG_ARCH_QCOM=y +# CONFIG_ARCH_REALTEK is not set +# CONFIG_ARCH_ROCKCHIP is not set +CONFIG_ARCH_SEATTLE=y +# CONFIG_ARCH_SYNQUACER is not set +# CONFIG_ARCH_RENESAS is not set +# CONFIG_ARCH_STRATIX10 is not set +# CONFIG_ARCH_TEGRA is not set +# CONFIG_ARCH_SPRD is not set +CONFIG_ARCH_THUNDER=y +CONFIG_ARCH_THUNDER2=y +# CONFIG_ARCH_UNIPHIER is not set +CONFIG_ARCH_VEXPRESS=y +CONFIG_ARCH_XGENE=y +# CONFIG_ARCH_ZX is not set +# CONFIG_ARCH_ZYNQMP is not set +CONFIG_HAVE_LIVEPATCH_WO_FTRACE=y +# +# Enable Livepatch +# +CONFIG_LIVEPATCH=y +CONFIG_LIVEPATCH_WO_FTRACE=y +CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY=y +CONFIG_LIVEPATCH_STACK=y +CONFIG_LIVEPATCH_RESTRICT_KPROBE=y + +# +# Bus support +# +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_DOMAINS_GENERIC=y +CONFIG_PCI_SYSCALL=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIEASPM=y +# CONFIG_PCIEASPM_DEBUG is not set +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +CONFIG_PCIE_DPC=y +# CONFIG_PCIE_PTM is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_IRQ_DOMAIN=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +CONFIG_PCI_STUB=y +# CONFIG_PCI_PF_STUB is not set +CONFIG_PCI_ATS=y +CONFIG_PCI_ECAM=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +CONFIG_PCI_LABEL=y +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_ACPI_IBM=m +# CONFIG_HOTPLUG_PCI_CPCI is not set +# CONFIG_HOTPLUG_PCI_SHPC is not set + +# +# PCI controller drivers +# + +# +# Cadence PCIe controllers support +# +# CONFIG_PCIE_CADENCE_HOST is not set +# CONFIG_PCI_FTPCI100 is not set +CONFIG_PCI_HOST_COMMON=y +CONFIG_PCI_HOST_GENERIC=y +# CONFIG_PCIE_XILINX is not set +CONFIG_PCI_XGENE=y +CONFIG_PCI_XGENE_MSI=y +CONFIG_PCI_HOST_THUNDER_PEM=y +CONFIG_PCI_HOST_THUNDER_ECAM=y + +# +# DesignWare PCI Core Support +# +CONFIG_PCIE_DW=y +CONFIG_PCIE_DW_HOST=y +# CONFIG_PCIE_DW_PLAT_HOST is not set +CONFIG_PCI_HISI=y +# CONFIG_PCIE_QCOM is not set +# CONFIG_PCIE_KIRIN is not set +# CONFIG_PCIE_HISI_STB is not set + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set + +# +# Kernel Features +# + +# +# ARM errata workarounds via the alternatives framework +# +# CONFIG_ARM64_ERRATUM_826319 is not set +# CONFIG_ARM64_ERRATUM_827319 is not set +# CONFIG_ARM64_ERRATUM_824069 is not set +# CONFIG_ARM64_ERRATUM_819472 is not set +# CONFIG_ARM64_ERRATUM_832075 is not set +# CONFIG_ARM64_ERRATUM_834220 is not set +CONFIG_ARM64_ERRATUM_845719=y +# CONFIG_ARM64_ERRATUM_843419 is not set +# CONFIG_ARM64_ERRATUM_1024718 is not set +# CONFIG_ARM64_ERRATUM_1463225 is not set +# CONFIG_CAVIUM_ERRATUM_22375 is not set +# CONFIG_CAVIUM_ERRATUM_23144 is not set +# CONFIG_CAVIUM_ERRATUM_23154 is not set +# CONFIG_CAVIUM_ERRATUM_27456 is not set +# CONFIG_CAVIUM_ERRATUM_30115 is not set +# CONFIG_QCOM_FALKOR_ERRATUM_1003 is not set +# CONFIG_QCOM_FALKOR_ERRATUM_1009 is not set +# CONFIG_QCOM_QDF2400_ERRATUM_0065 is not set +# CONFIG_SOCIONEXT_SYNQUACER_PREITS is not set +CONFIG_HISILICON_ERRATUM_161600802=y +# CONFIG_QCOM_FALKOR_ERRATUM_E1041 is not set +CONFIG_ARM64_4K_PAGES=y +# CONFIG_ARM64_16K_PAGES is not set +# CONFIG_ARM64_64K_PAGES is not set +# CONFIG_ARM64_VA_BITS_39 is not set +CONFIG_ARM64_VA_BITS_48=y +CONFIG_ARM64_VA_BITS=48 +CONFIG_ARM64_PA_BITS_48=y +CONFIG_ARM64_PA_BITS=48 +# CONFIG_CPU_BIG_ENDIAN is not set +CONFIG_SCHED_MC=y +# CONFIG_SCHED_SMT is not set +CONFIG_NR_CPUS=1024 +CONFIG_HOTPLUG_CPU=y +CONFIG_ARM64_ERR_RECOV=y +CONFIG_MPAM=y +CONFIG_NUMA=y +CONFIG_NODES_SHIFT=3 +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_COHERENT_DEVICE=y +CONFIG_HOLES_IN_ZONE=y +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +CONFIG_SCHED_HRTICK=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_HAVE_ARCH_PFN_VALID=y +CONFIG_HW_PERF_EVENTS=y +CONFIG_SYS_SUPPORTS_HUGETLBFS=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_SECCOMP=y +CONFIG_PARAVIRT=y +CONFIG_PARAVIRT_TIME_ACCOUNTING=y +CONFIG_KEXEC=y +CONFIG_CRASH_DUMP=y +# CONFIG_XEN is not set +CONFIG_FORCE_MAX_ZONEORDER=11 +CONFIG_UNMAP_KERNEL_AT_EL0=y +CONFIG_HARDEN_BRANCH_PREDICTOR=y +CONFIG_HARDEN_EL2_VECTORS=y +CONFIG_ARM64_SSBD=y +# CONFIG_ARMV8_DEPRECATED is not set +# CONFIG_ARM64_SW_TTBR0_PAN is not set + +# +# ARMv8.1 architectural features +# +CONFIG_ARM64_HW_AFDBM=y +CONFIG_ARM64_PAN=y +CONFIG_ARM64_LSE_ATOMICS=y +CONFIG_ARM64_VHE=y + +# +# ARMv8.2 architectural features +# +CONFIG_ARM64_UAO=y +CONFIG_ARM64_PMEM=y +CONFIG_ARM64_RAS_EXTN=y +CONFIG_ARM64_SVE=y +CONFIG_ARM64_MODULE_PLTS=y +CONFIG_ARM64_PSEUDO_NMI=y +CONFIG_RELOCATABLE=y +CONFIG_RANDOMIZE_BASE=y +CONFIG_RANDOMIZE_MODULE_REGION_FULL=y +CONFIG_ASCEND_FEATURES=y +CONFIG_ASCEND_DVPP_MMAP=y +CONFIG_ASCEND_OOM=y +CONFIG_ASCEND_IOPF_HIPRI=y +CONFIG_ASCEND_CHARGE_MIGRATE_HUGEPAGES=y +# CONFIG_ASCEND_WATCHDOG_SYSFS_CONFIGURE is not set +CONFIG_ASCEND_SHARE_POOL=y + +# +# Boot options +# +CONFIG_ARM64_ACPI_PARKING_PROTOCOL=y +CONFIG_CMDLINE="console=ttyAMA0" +# CONFIG_CMDLINE_FORCE is not set +CONFIG_EFI_STUB=y +CONFIG_EFI=y +CONFIG_DMI=y +CONFIG_COMPAT=y +CONFIG_AARCH32_EL0=y +CONFIG_ARM64_ILP32=y +CONFIG_SYSVIPC_COMPAT=y + +# +# Power management options +# +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HIBERNATE_CALLBACKS=y +CONFIG_HIBERNATION=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +CONFIG_PM_DEBUG=y +# CONFIG_PM_ADVANCED_DEBUG is not set +# CONFIG_PM_TEST_SUSPEND is not set +CONFIG_PM_SLEEP_DEBUG=y +CONFIG_PM_CLK=y +CONFIG_PM_GENERIC_DOMAINS=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +CONFIG_PM_GENERIC_DOMAINS_SLEEP=y +CONFIG_PM_GENERIC_DOMAINS_OF=y +CONFIG_CPU_PM=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_HIBERNATION_HEADER=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y + +# +# CPU Power Management +# + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +CONFIG_CPU_IDLE_MULTIPLE_DRIVERS=y +# CONFIG_CPU_IDLE_GOV_LADDER is not set +CONFIG_CPU_IDLE_GOV_MENU=y +CONFIG_DT_IDLE_STATES=y + +# +# ARM CPU Idle Drivers +# +CONFIG_ARM_CPUIDLE=y + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set + +# +# CPU frequency scaling drivers +# +# CONFIG_CPUFREQ_DT is not set +CONFIG_ACPI_CPPC_CPUFREQ=y +CONFIG_HISILICON_CPPC_CPUFREQ_WORKAROUND=y +# CONFIG_ARM_BIG_LITTLE_CPUFREQ is not set +CONFIG_ARM_SCPI_CPUFREQ=m +# CONFIG_QORIQ_CPUFREQ is not set + +# +# Firmware Drivers +# +CONFIG_ARM_PSCI_FW=y +# CONFIG_ARM_PSCI_CHECKER is not set +# CONFIG_ARM_SCMI_PROTOCOL is not set +CONFIG_ARM_SCPI_PROTOCOL=m +CONFIG_ARM_SCPI_POWER_DOMAIN=m +CONFIG_ARM_SDE_INTERFACE=y +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=y +CONFIG_FW_CFG_SYSFS=y +# CONFIG_FW_CFG_SYSFS_CMDLINE is not set +CONFIG_HAVE_ARM_SMCCC=y +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +CONFIG_EFI_VARS=y +CONFIG_EFI_ESRT=y +CONFIG_EFI_VARS_PSTORE=y +CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y +CONFIG_EFI_PARAMS_FROM_FDT=y +CONFIG_EFI_RUNTIME_WRAPPERS=y +CONFIG_EFI_ARMSTUB=y +CONFIG_EFI_ARMSTUB_DTB_LOADER=y +# CONFIG_EFI_BOOTLOADER_CONTROL is not set +# CONFIG_EFI_CAPSULE_LOADER is not set +# CONFIG_EFI_TEST is not set +# CONFIG_RESET_ATTACK_MITIGATION is not set +CONFIG_UEFI_CPER=y +CONFIG_UEFI_CPER_ARM=y + +# +# Tegra firmware driver +# +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_GENERIC_GSI=y +CONFIG_ACPI_CCA_REQUIRED=y +# CONFIG_ACPI_DEBUGGER is not set +CONFIG_ACPI_SPCR_TABLE=y +# CONFIG_ACPI_EC_DEBUGFS is not set +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_FAN=y +# CONFIG_ACPI_TAD is not set +# CONFIG_ACPI_DOCK is not set +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_MCFG=y +CONFIG_ACPI_CPPC_LIB=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_THERMAL=y +CONFIG_ACPI_NUMA=y +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +# CONFIG_ACPI_DEBUG is not set +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_CONTAINER=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_ACPI_HED=y +# CONFIG_ACPI_CUSTOM_METHOD is not set +# CONFIG_ACPI_BGRT is not set +CONFIG_ACPI_REDUCED_HARDWARE_ONLY=y +# CONFIG_ACPI_NFIT is not set +CONFIG_HAVE_ACPI_APEI=y +CONFIG_ACPI_APEI=y +CONFIG_ACPI_APEI_GHES=y +CONFIG_ACPI_APEI_PCIEAER=y +CONFIG_ACPI_APEI_SEA=y +CONFIG_ACPI_APEI_MEMORY_FAILURE=y +CONFIG_ACPI_APEI_EINJ=m +# CONFIG_ACPI_APEI_ERST_DEBUG is not set +# CONFIG_PMIC_OPREGION is not set +# CONFIG_ACPI_CONFIGFS is not set +CONFIG_ACPI_IORT=y +CONFIG_ACPI_GTDT=y +CONFIG_ACPI_PPTT=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_HAVE_KVM_IRQ_BYPASS=y +CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE=y +CONFIG_IRQ_BYPASS_MANAGER=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_KVM_ARM_HOST=y +CONFIG_KVM_ARM_PMU=y +CONFIG_KVM_INDIRECT_VECTORS=y +CONFIG_VHOST_NET=m +# CONFIG_VHOST_SCSI is not set +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set +CONFIG_ARM64_CRYPTO=y +CONFIG_CRYPTO_SHA256_ARM64=m +# CONFIG_CRYPTO_SHA512_ARM64 is not set +CONFIG_CRYPTO_SHA1_ARM64_CE=m +CONFIG_CRYPTO_SHA2_ARM64_CE=m +# CONFIG_CRYPTO_SHA512_ARM64_CE is not set +# CONFIG_CRYPTO_SHA3_ARM64 is not set +CONFIG_CRYPTO_SM3_ARM64_CE=m +CONFIG_CRYPTO_SM4_ARM64_CE=m +CONFIG_CRYPTO_GHASH_ARM64_CE=m +# CONFIG_CRYPTO_CRCT10DIF_ARM64_CE is not set +CONFIG_CRYPTO_CRC32_ARM64_CE=m +CONFIG_CRYPTO_AES_ARM64=y +CONFIG_CRYPTO_AES_ARM64_CE=m +CONFIG_CRYPTO_AES_ARM64_CE_CCM=m +CONFIG_CRYPTO_AES_ARM64_CE_BLK=m +CONFIG_CRYPTO_AES_ARM64_NEON_BLK=m +# CONFIG_CRYPTO_CHACHA20_NEON is not set +# CONFIG_CRYPTO_AES_ARM64_BS is not set + +# +# General architecture-dependent options +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_OPROFILE_NMI_TIMER=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +CONFIG_STATIC_KEYS_SELFTEST=y +CONFIG_UPROBES=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_KRETPROBES=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_NMI=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_GENERIC_IDLE_POLL_SETUP=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_PERF_EVENTS_NMI=y +CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_RCU_TABLE_FREE=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP_FILTER=y +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_CC_HAS_STACKPROTECTOR_NONE=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_HAVE_ARCH_HUGE_VMALLOC=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_ARCH_MMAP_RND_BITS=18 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=11 +CONFIG_CLONE_BACKWARDS=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_REFCOUNT_FULL=y +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +CONFIG_PLUGIN_HOSTCC="g++" +CONFIG_HAVE_GCC_PLUGINS=y +# CONFIG_GCC_PLUGINS is not set +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_FORCE is not set +CONFIG_MODULE_SIG_ALL=y +# CONFIG_MODULE_SIG_SHA1 is not set +# CONFIG_MODULE_SIG_SHA224 is not set +CONFIG_MODULE_SIG_SHA256=y +# CONFIG_MODULE_SIG_SHA384 is not set +# CONFIG_MODULE_SIG_SHA512 is not set +CONFIG_MODULE_SIG_HASH="sha256" +# CONFIG_MODULE_COMPRESS is not set +# CONFIG_TRIM_UNUSED_KSYMS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLK_SCSI_REQUEST=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +# CONFIG_BLK_CMDLINE_PARSER is not set +CONFIG_BLK_WBT=y +# CONFIG_BLK_CGROUP_IOLATENCY is not set +# CONFIG_BLK_WBT_SQ is not set +CONFIG_BLK_WBT_MQ=y +CONFIG_BLK_DEBUG_FS=y +CONFIG_BLK_DEBUG_FS_ZONED=y +# CONFIG_BLK_SED_OPAL is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +CONFIG_OSF_PARTITION=y +CONFIG_AMIGA_PARTITION=y +# CONFIG_ATARI_PARTITION is not set +CONFIG_MAC_PARTITION=y +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +# CONFIG_LDM_PARTITION is not set +CONFIG_SGI_PARTITION=y +# CONFIG_ULTRIX_PARTITION is not set +CONFIG_SUN_PARTITION=y +CONFIG_KARMA_PARTITION=y +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +CONFIG_BLOCK_COMPAT=y +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_MQ_RDMA=y + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_CFQ_GROUP_IOSCHED=y +CONFIG_DEFAULT_DEADLINE=y +# CONFIG_DEFAULT_CFQ is not set +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="deadline" +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK=y +CONFIG_ARCH_INLINE_SPIN_LOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_READ_LOCK=y +CONFIG_ARCH_INLINE_READ_LOCK_BH=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_READ_UNLOCK=y +CONFIG_ARCH_INLINE_READ_UNLOCK_BH=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_WRITE_LOCK=y +CONFIG_ARCH_INLINE_WRITE_LOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_UNINLINE_SPIN_UNLOCK=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y + +# +# Memory Management options +# +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_NEED_MULTIPLE_NODES=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +CONFIG_NO_BOOTMEM=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_SPARSE=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_MM_OWNER=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +CONFIG_MEMORY_FAILURE=y +CONFIG_HWPOISON_INJECT=m +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_TRANSPARENT_HUGE_PAGECACHE=y +CONFIG_CLEANCACHE=y +CONFIG_FRONTSWAP=y +CONFIG_SHRINK_PAGECACHE=y +CONFIG_CMA=y +# CONFIG_CMA_DEBUG is not set +# CONFIG_CMA_DEBUGFS is not set +CONFIG_CMA_AREAS=7 +CONFIG_ZSWAP=y +CONFIG_ZPOOL=y +CONFIG_ZBUD=y +# CONFIG_Z3FOLD is not set +CONFIG_ZSMALLOC=y +# CONFIG_PGTABLE_MAPPING is not set +CONFIG_ZSMALLOC_STAT=y +CONFIG_GENERIC_EARLY_IOREMAP=y +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +CONFIG_IDLE_PAGE_TRACKING=y +CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_BENCHMARK is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_XFRM=y +CONFIG_XFRM_OFFLOAD=y +CONFIG_XFRM_ALGO=y +CONFIG_XFRM_USER=y +# CONFIG_XFRM_INTERFACE is not set +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +# CONFIG_SMC is not set +# CONFIG_XDP_SOCKETS is not set +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +# CONFIG_IP_PNP is not set +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE_COMMON=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=m +# CONFIG_NET_FOU is not set +# CONFIG_NET_FOU_IP_TUNNELS is not set +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_XFRM_MODE_TRANSPORT=m +CONFIG_INET_XFRM_MODE_TUNNEL=m +CONFIG_INET_XFRM_MODE_BEET=m +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +# CONFIG_INET_DIAG_DESTROY is not set +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +# CONFIG_TCP_CONG_CDG is not set +CONFIG_TCP_CONG_BBR=m +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +# CONFIG_IPV6_ILA is not set +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +CONFIG_INET6_XFRM_MODE_TRANSPORT=m +CONFIG_INET6_XFRM_MODE_TUNNEL=m +CONFIG_INET6_XFRM_MODE_BEET=m +CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +CONFIG_IPV6_MULTIPLE_TABLES=y +# CONFIG_IPV6_SUBTREES is not set +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +# CONFIG_IPV6_SEG6_LWTUNNEL is not set +# CONFIG_IPV6_SEG6_HMAC is not set +CONFIG_NETLABEL=y +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_COMMON=y +CONFIG_NF_LOG_NETDEV=m +CONFIG_NETFILTER_CONNCOUNT=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=m +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_NEEDED=y +CONFIG_NF_NAT_PROTO_DCCP=y +CONFIG_NF_NAT_PROTO_UDPLITE=y +CONFIG_NF_NAT_PROTO_SCTP=y +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_SET=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_COUNTER=m +# CONFIG_NFT_CONNLIMIT is not set +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +# CONFIG_NFT_TUNNEL is not set +CONFIG_NFT_OBJREF=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m +CONFIG_NFT_FIB_INET=m +# CONFIG_NFT_SOCKET is not set +# CONFIG_NFT_OSF is not set +# CONFIG_NFT_TPROXY is not set +CONFIG_NF_DUP_NETDEV=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +# CONFIG_NF_FLOW_TABLE is not set +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=y +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=y +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=y +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +# CONFIG_IP_SET_HASH_IPMAC is not set +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +# CONFIG_IP_VS_DEBUG is not set +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +# CONFIG_IP_VS_MH is not set +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_DUP_IPV4=m +CONFIG_NF_LOG_ARP=y +CONFIG_NF_LOG_IPV4=y +CONFIG_NF_REJECT_IPV4=y +CONFIG_NF_NAT_IPV4=m +CONFIG_NF_NAT_MASQUERADE_IPV4=y +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NFT_MASQ_IPV4=m +CONFIG_NFT_REDIR_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PROTO_GRE=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +# CONFIG_IP_NF_TARGET_CLUSTERIP is not set +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TPROXY_IPV6=m +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m +CONFIG_NFT_MASQ_IPV6=m +CONFIG_NFT_REDIR_IPV6=m +CONFIG_NFT_REJECT_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m +CONFIG_NF_NAT_IPV6=m +CONFIG_NF_NAT_MASQUERADE_IPV6=y +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +# CONFIG_IP6_NF_MATCH_SRH is not set +# CONFIG_IP6_NF_TARGET_HL is not set +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_DEFRAG_IPV6=m +CONFIG_NF_TABLES_BRIDGE=y +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_NF_LOG_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +# CONFIG_BPFILTER is not set +# CONFIG_IP_DCCP is not set +CONFIG_IP_SCTP=y +# CONFIG_SCTP_DBG_OBJCNT is not set +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_INET_SCTP_DIAG=m +# CONFIG_RDS is not set +CONFIG_TIPC=m +# CONFIG_TIPC_MEDIA_IB is not set +CONFIG_TIPC_MEDIA_UDP=y +CONFIG_TIPC_DIAG=m +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +# CONFIG_ATM_CLIP_NO_ICMP is not set +CONFIG_ATM_LANE=m +# CONFIG_ATM_MPOA is not set +CONFIG_ATM_BR2684=m +# CONFIG_ATM_BR2684_IPFILTER is not set +CONFIG_L2TP=m +CONFIG_L2TP_DEBUGFS=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_STP=y +CONFIG_GARP=y +CONFIG_MRP=y +CONFIG_BRIDGE=y +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_HAVE_NET_DSA=y +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=y +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +# CONFIG_DECNET is not set +CONFIG_LLC=y +# CONFIG_LLC2 is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_DEBUGFS is not set +# CONFIG_6LOWPAN_NHC is not set +CONFIG_IEEE802154=m +# CONFIG_IEEE802154_NL802154_EXPERIMENTAL is not set +CONFIG_IEEE802154_SOCKET=m +# CONFIG_IEEE802154_6LOWPAN is not set +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_ATM=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +# CONFIG_NET_SCH_CBS is not set +# CONFIG_NET_SCH_ETF is not set +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +# CONFIG_NET_SCH_SKBPRIO is not set +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +# CONFIG_NET_SCH_CAKE is not set +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_DEFAULT=y +# CONFIG_DEFAULT_FQ is not set +# CONFIG_DEFAULT_CODEL is not set +CONFIG_DEFAULT_FQ_CODEL=y +# CONFIG_DEFAULT_SFQ is not set +# CONFIG_DEFAULT_PFIFO_FAST is not set +CONFIG_DEFAULT_NET_SCH="fq_codel" + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +# CONFIG_NET_EMATCH_CANID is not set +CONFIG_NET_EMATCH_IPSET=m +# CONFIG_NET_EMATCH_IPT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +# CONFIG_NET_ACT_IPT is not set +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +# CONFIG_NET_ACT_CONNMARK is not set +CONFIG_NET_ACT_SKBMOD=m +# CONFIG_NET_ACT_IFE is not set +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_CLS_IND=y +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=y +# CONFIG_BATMAN_ADV is not set +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_OPENVSWITCH_GENEVE=m +CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS_COMMON=m +CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=y +# CONFIG_MPLS_ROUTING is not set +CONFIG_NET_NSH=m +# CONFIG_HSR is not set +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_L3_MASTER_DEV=y +# CONFIG_QRTR is not set +# CONFIG_NET_NCSI is not set +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_JIT=y +# CONFIG_BPF_STREAM_PARSER is not set +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +CONFIG_NET_DROP_MONITOR=m +# CONFIG_HAMRADIO is not set +CONFIG_CAN=y +CONFIG_CAN_RAW=y +CONFIG_CAN_BCM=y +CONFIG_CAN_GW=y +CONFIG_CAN_J1939=y + +# +# CAN Device Drivers +# +CONFIG_CAN_VCAN=m +# CONFIG_CAN_VXCAN is not set +CONFIG_CAN_SLCAN=m +CONFIG_CAN_DEV=y +CONFIG_CAN_CALC_BITTIMING=y +# CONFIG_CAN_GRCAN is not set +# CONFIG_CAN_XILINXCAN is not set +CONFIG_CAN_C_CAN=m +CONFIG_CAN_C_CAN_PLATFORM=m +CONFIG_CAN_C_CAN_PCI=m +CONFIG_CAN_CC770=m +# CONFIG_CAN_CC770_ISA is not set +CONFIG_CAN_CC770_PLATFORM=m +# CONFIG_CAN_IFI_CANFD is not set +# CONFIG_CAN_M_CAN is not set +# CONFIG_CAN_PEAK_PCIEFD is not set +CONFIG_CAN_SJA1000=m +# CONFIG_CAN_SJA1000_ISA is not set +CONFIG_CAN_SJA1000_PLATFORM=m +CONFIG_CAN_EMS_PCI=m +CONFIG_CAN_PEAK_PCI=m +CONFIG_CAN_PEAK_PCIEC=y +CONFIG_CAN_KVASER_PCI=m +CONFIG_CAN_PLX_PCI=m +CONFIG_CAN_SOFTING=m + +# +# CAN SPI interfaces +# +# CONFIG_CAN_HI311X is not set +# CONFIG_CAN_MCP251X is not set + +# +# CAN USB interfaces +# +CONFIG_CAN_8DEV_USB=m +CONFIG_CAN_EMS_USB=m +CONFIG_CAN_ESD_USB2=m +# CONFIG_CAN_GS_USB is not set +CONFIG_CAN_KVASER_USB=m +# CONFIG_CAN_MCBA_USB is not set +CONFIG_CAN_PEAK_USB=m +# CONFIG_CAN_UCAN is not set +# CONFIG_CAN_DEBUG_DEVICES is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_STREAM_PARSER=m +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_CFG80211=m +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y +CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +CONFIG_CFG80211_CRDA_SUPPORT=y +# CONFIG_CFG80211_WEXT is not set +CONFIG_MAC80211=m +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_MINSTREL_HT=y +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +# CONFIG_MAC80211_MESH is not set +CONFIG_MAC80211_LEDS=y +CONFIG_MAC80211_DEBUGFS=y +# CONFIG_MAC80211_MESSAGE_TRACING is not set +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +# CONFIG_WIMAX is not set +CONFIG_RFKILL=m +CONFIG_RFKILL_LEDS=y +CONFIG_RFKILL_INPUT=y +CONFIG_RFKILL_GPIO=m +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +# CONFIG_NFC is not set +CONFIG_PSAMPLE=m +# CONFIG_NET_IFE is not set +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_SOCK_VALIDATE_XMIT=y +CONFIG_NET_DEVLINK=m +CONFIG_MAY_USE_DEVLINK=m +CONFIG_FAILOVER=y +CONFIG_HAVE_EBPF_JIT=y + +# +# Device Drivers +# +CONFIG_ARM_AMBA=y + +# +# Generic Driver Options +# +# CONFIG_UEVENT_HELPER is not set +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_EXTRA_FIRMWARE="" +CONFIG_FW_LOADER_USER_HELPER=y +# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set +CONFIG_ALLOW_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=y +CONFIG_REGMAP_SPI=y +CONFIG_REGMAP_MMIO=y +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +CONFIG_DMA_CMA=y + +# +# Default contiguous memory area size: +# +CONFIG_CMA_SIZE_MBYTES=64 +CONFIG_CMA_SIZE_SEL_MBYTES=y +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set +# CONFIG_CMA_SIZE_SEL_MIN is not set +# CONFIG_CMA_SIZE_SEL_MAX is not set +CONFIG_CMA_ALIGNMENT=8 +CONFIG_GENERIC_ARCH_TOPOLOGY=y + +# +# Bus devices +# +# CONFIG_BRCMSTB_GISB_ARB is not set +CONFIG_HISILICON_LPC=y +CONFIG_QCOM_EBI2=y +# CONFIG_SIMPLE_PM_BUS is not set +CONFIG_VEXPRESS_CONFIG=y +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y +# CONFIG_GNSS is not set +CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +# CONFIG_MTD_AFS_PARTS is not set +CONFIG_MTD_OF_PARTS=m +# CONFIG_MTD_AR7_PARTS is not set + +# +# Partition parsers +# + +# +# User Modules And Translation Layers +# +CONFIG_MTD_BLKDEVS=m +CONFIG_MTD_BLOCK=m +# CONFIG_MTD_BLOCK_RO is not set +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=m +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_GEN_PROBE=m +# CONFIG_MTD_CFI_ADV_OPTIONS is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +CONFIG_MTD_CFI_INTELEXT=m +CONFIG_MTD_CFI_AMDSTD=m +CONFIG_MTD_CFI_STAA=m +CONFIG_MTD_CFI_UTIL=m +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +CONFIG_MTD_PHYSMAP=m +# CONFIG_MTD_PHYSMAP_COMPAT is not set +CONFIG_MTD_PHYSMAP_OF=m +# CONFIG_MTD_PHYSMAP_OF_VERSATILE is not set +# CONFIG_MTD_PHYSMAP_OF_GEMINI is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# CONFIG_MTD_ONENAND is not set +# CONFIG_MTD_NAND is not set +# CONFIG_MTD_SPI_NAND is not set + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# CONFIG_MTD_SPI_NOR is not set +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_LIMIT=20 +# CONFIG_MTD_UBI_FASTMAP is not set +# CONFIG_MTD_UBI_GLUEBI is not set +# CONFIG_MTD_UBI_BLOCK is not set +CONFIG_DTC=y +CONFIG_OF=y +# CONFIG_OF_UNITTEST is not set +CONFIG_OF_FLATTREE=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_KOBJ=y +CONFIG_OF_DYNAMIC=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_IRQ=y +CONFIG_OF_NET=y +CONFIG_OF_MDIO=y +CONFIG_OF_RESERVED_MEM=y +CONFIG_OF_RESOLVE=y +CONFIG_OF_OVERLAY=y +CONFIG_OF_NUMA=y +# CONFIG_PARPORT is not set +CONFIG_PNP=y +CONFIG_PNP_DEBUG_MESSAGES=y + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_NULL_BLK=m +# CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION is not set +CONFIG_CDROM=m +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +CONFIG_ZRAM=m +# CONFIG_ZRAM_WRITEBACK is not set +# CONFIG_ZRAM_MEMORY_TRACKING is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_DRBD is not set +CONFIG_BLK_DEV_NBD=y +# CONFIG_BLK_DEV_SKD is not set +# CONFIG_BLK_DEV_SX8 is not set +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=16384 +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_VIRTIO_BLK=y +CONFIG_VIRTIO_BLK_SCSI=y +CONFIG_BLK_DEV_RBD=m +# CONFIG_BLK_DEV_RSXX is not set + +# +# NVME Support +# +CONFIG_NVME_CORE=m +CONFIG_BLK_DEV_NVME=m +# CONFIG_NVME_MULTIPATH is not set +CONFIG_NVME_FABRICS=m +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=m +CONFIG_NVME_TARGET=m +CONFIG_NVME_TARGET_LOOP=m +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=m +CONFIG_NVME_TARGET_FCLOOP=m + +# +# Misc devices +# +CONFIG_SENSORS_LIS3LV02D=m +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_PHANTOM is not set +# CONFIG_SGI_IOC4 is not set +CONFIG_TIFM_CORE=m +CONFIG_TIFM_7XX1=m +# CONFIG_ICS932S401 is not set +CONFIG_ENCLOSURE_SERVICES=m +# CONFIG_HP_ILO is not set +CONFIG_APDS9802ALS=m +CONFIG_ISL29003=m +CONFIG_ISL29020=m +CONFIG_SENSORS_TSL2550=m +CONFIG_SENSORS_BH1770=m +CONFIG_SENSORS_APDS990X=m +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_USB_SWITCH_FSA9480 is not set +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +CONFIG_VEXPRESS_SYSCFG=y +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_MAX6875=m +CONFIG_EEPROM_93CX6=m +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +CONFIG_CB710_CORE=m +# CONFIG_CB710_DEBUG is not set +CONFIG_CB710_DEBUG_ASSUMPTIONS=y + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +CONFIG_SENSORS_LIS3_I2C=m +CONFIG_ALTERA_STAPL=m + +# +# Intel MIC & related support +# + +# +# Intel MIC Bus Driver +# + +# +# SCIF Bus Driver +# + +# +# VOP Bus Driver +# + +# +# Intel MIC Host Driver +# + +# +# Intel MIC Card Driver +# + +# +# SCIF Driver +# + +# +# Intel MIC Coprocessor State Management (COSM) Drivers +# + +# +# VOP Driver +# +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_MISC_RTSX_PCI is not set +# CONFIG_MISC_RTSX_USB is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=y +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_MQ_DEFAULT=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=m +# CONFIG_CHR_DEV_OSST is not set +CONFIG_BLK_DEV_SR=m +CONFIG_BLK_DEV_SR_VENDOR=y +CONFIG_CHR_DEV_SG=m +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_ISCSI_ATTRS=y +CONFIG_SCSI_SAS_ATTRS=y +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=m +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=y +CONFIG_ISCSI_BOOT_SYSFS=m +# CONFIG_SCSI_CXGB3_ISCSI is not set +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_SCSI_BNX2X_FCOE=m +CONFIG_BE2ISCSI=m +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +CONFIG_SCSI_HPSA=m +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +CONFIG_SCSI_AACRAID=m +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +CONFIG_SCSI_HISI_SAS=m +CONFIG_SCSI_HISI_SAS_PCI=m +# CONFIG_SCSI_MVSAS is not set +# CONFIG_SCSI_MVUMI is not set +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +CONFIG_MEGARAID_NEWGEN=y +CONFIG_MEGARAID_MM=m +CONFIG_MEGARAID_MAILBOX=m +# CONFIG_MEGARAID_LEGACY is not set +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_SMARTPQI=m +# CONFIG_SCSI_UFSHCD is not set +# CONFIG_SCSI_HPTIOP is not set +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +# CONFIG_FCOE is not set +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +CONFIG_SCSI_IPR=m +CONFIG_SCSI_IPR_TRACE=y +CONFIG_SCSI_IPR_DUMP=y +# CONFIG_SCSI_QLOGIC_1280 is not set +CONFIG_SCSI_QLA_FC=m +# CONFIG_TCM_QLA2XXX is not set +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_QEDI=m +CONFIG_QEDF=m +CONFIG_SCSI_LPFC=m +# CONFIG_SCSI_LPFC_DEBUG_FS is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +CONFIG_SCSI_DEBUG=m +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_BFA_FC is not set +CONFIG_SCSI_VIRTIO=y +CONFIG_SCSI_CHELSIO_FCOE=m +# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +# CONFIG_SCSI_OSD_INITIATOR is not set +CONFIG_HAVE_PATA_PLATFORM=y +CONFIG_ATA=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=y +CONFIG_SATA_MOBILE_LPM_POLICY=0 +CONFIG_SATA_AHCI_PLATFORM=y +# CONFIG_AHCI_CEVA is not set +CONFIG_AHCI_XGENE=y +# CONFIG_AHCI_QORIQ is not set +CONFIG_SATA_AHCI_SEATTLE=m +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +CONFIG_ATA_PIIX=y +# CONFIG_SATA_DWC is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +# CONFIG_SATA_SIL is not set +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_PLATFORM is not set +# CONFIG_PATA_RZ1000 is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_PATA_ACPI is not set +CONFIG_ATA_GENERIC=m +# CONFIG_PATA_LEGACY is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y +CONFIG_MD_LINEAR=y +CONFIG_MD_RAID0=y +CONFIG_MD_RAID1=y +CONFIG_MD_RAID10=y +CONFIG_MD_RAID456=y +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=y +# CONFIG_BCACHE is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=m +# CONFIG_DM_MQ_DEFAULT is not set +CONFIG_DM_DEBUG=y +CONFIG_DM_BUFIO=m +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +# CONFIG_DM_UNSTRIPED is not set +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m +# CONFIG_DM_WRITECACHE is not set +CONFIG_DM_ERA=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +# CONFIG_DM_VERITY_FEC is not set +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +# CONFIG_DM_ZONED is not set +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_TCM_FC=m +CONFIG_ISCSI_TARGET=m +CONFIG_ISCSI_TARGET_CXGB4=m +# CONFIG_FUSION is not set + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_FIREWIRE is not set +# CONFIG_FIREWIRE_NOSY is not set +CONFIG_NETDEVICES=y +CONFIG_MII=m +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +# CONFIG_EQUALIZER is not set +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +# CONFIG_GTP is not set +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_NETPOLL=y +CONFIG_NET_POLL_CONTROLLER=y +CONFIG_TUN=m +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=y +CONFIG_VIRTIO_NET=y +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ARCNET is not set +# CONFIG_ATM_DRIVERS is not set + +# +# CAIF transport drivers +# + +# +# Distributed Switch Architecture drivers +# +CONFIG_ETHERNET=y +CONFIG_MDIO=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +CONFIG_NET_VENDOR_ALACRITECH=y +# CONFIG_SLICOSS is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set +CONFIG_NET_VENDOR_AMAZON=y +CONFIG_NET_VENDOR_AMD=y +# CONFIG_AMD8111_ETH is not set +# CONFIG_PCNET32 is not set +CONFIG_AMD_XGBE=y +# CONFIG_AMD_XGBE_DCB is not set +CONFIG_NET_XGENE=y +CONFIG_NET_XGENE_V2=m +CONFIG_NET_VENDOR_AQUANTIA=y +CONFIG_NET_VENDOR_ARC=y +CONFIG_NET_VENDOR_ATHEROS=y +# CONFIG_ATL2 is not set +CONFIG_ATL1=m +CONFIG_ATL1E=m +CONFIG_ATL1C=m +CONFIG_ALX=m +# CONFIG_NET_VENDOR_AURORA is not set +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +CONFIG_BNX2=m +CONFIG_CNIC=m +CONFIG_TIGON3=m +CONFIG_TIGON3_HWMON=y +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set +CONFIG_BNXT=m +CONFIG_BNXT_SRIOV=y +CONFIG_BNXT_FLOWER_OFFLOAD=y +CONFIG_BNXT_DCB=y +# CONFIG_BNXT_HWMON is not set +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_CADENCE is not set +CONFIG_NET_VENDOR_CAVIUM=y +CONFIG_THUNDER_NIC_PF=m +CONFIG_THUNDER_NIC_VF=m +CONFIG_THUNDER_NIC_BGX=m +CONFIG_THUNDER_NIC_RGX=m +CONFIG_CAVIUM_PTP=y +CONFIG_LIQUIDIO=m +CONFIG_LIQUIDIO_VF=m +CONFIG_NET_VENDOR_CHELSIO=y +# CONFIG_CHELSIO_T1 is not set +# CONFIG_CHELSIO_T3 is not set +CONFIG_CHELSIO_T4=m +# CONFIG_CHELSIO_T4_DCB is not set +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_LIB=m +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_CORTINA is not set +CONFIG_DNET=m +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_NET_VENDOR_HISILICON=y +# CONFIG_HIX5HD2_GMAC is not set +# CONFIG_HISI_FEMAC is not set +# CONFIG_HIP04_ETH is not set +CONFIG_HNS_MDIO=m +CONFIG_HNS=m +CONFIG_HNS_DSAF=m +CONFIG_HNS_ENET=m +CONFIG_HNS3=m +CONFIG_HNS3_HCLGE=m +CONFIG_HNS3_DCB=y +CONFIG_HNS3_HCLGEVF=m +CONFIG_HNS3_ENET=m +CONFIG_HNS3_CAE=m +# CONFIG_NET_VENDOR_HP is not set +CONFIG_NET_VENDOR_HUAWEI=y +CONFIG_HINIC=m +CONFIG_BMA=m +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_IGB=m +CONFIG_IGB_HWMON=y +CONFIG_IGBVF=m +# CONFIG_IXGB is not set +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBE_DCB=y +CONFIG_IXGBEVF=m +CONFIG_I40E=m +# CONFIG_I40E_DCB is not set +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_FM10K=m +# CONFIG_JME is not set +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX4_EN=m +CONFIG_MLX4_EN_DCB=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y +# CONFIG_MLX4_CORE_GEN2 is not set +# CONFIG_MLX5_CORE is not set +CONFIG_MLXSW_CORE=m +CONFIG_MLXSW_CORE_HWMON=y +CONFIG_MLXSW_CORE_THERMAL=y +CONFIG_MLXSW_PCI=m +CONFIG_MLXSW_I2C=m +# CONFIG_MLXSW_SWITCHIB is not set +# CONFIG_MLXSW_SWITCHX2 is not set +# CONFIG_MLXSW_SPECTRUM is not set +CONFIG_MLXSW_MINIMAL=m +CONFIG_MLXFW=m +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +CONFIG_NET_VENDOR_MICROSEMI=y +# CONFIG_MSCC_OCELOT_SWITCH is not set +CONFIG_NET_VENDOR_MYRI=y +# CONFIG_MYRI10GE is not set +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETERION is not set +CONFIG_NET_VENDOR_NETRONOME=y +CONFIG_NFP=m +CONFIG_NFP_APP_FLOWER=y +CONFIG_NFP_APP_ABM_NIC=y +# CONFIG_NFP_DEBUG is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +CONFIG_NET_VENDOR_OKI=y +CONFIG_ETHOC=m +# CONFIG_NET_VENDOR_PACKET_ENGINES is not set +CONFIG_NET_VENDOR_QLOGIC=y +CONFIG_QLA3XXX=m +CONFIG_QLCNIC=m +CONFIG_QLCNIC_SRIOV=y +CONFIG_QLCNIC_DCB=y +CONFIG_QLCNIC_HWMON=y +CONFIG_QLGE=m +CONFIG_NETXEN_NIC=m +CONFIG_QED=m +CONFIG_QED_LL2=y +CONFIG_QED_SRIOV=y +CONFIG_QEDE=m +CONFIG_QED_RDMA=y +CONFIG_QED_ISCSI=y +CONFIG_QED_FCOE=y +CONFIG_QED_OOO=y +CONFIG_NET_VENDOR_QUALCOMM=y +# CONFIG_QCA7000_SPI is not set +CONFIG_QCOM_EMAC=m +# CONFIG_RMNET is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_NET_VENDOR_REALTEK=y +CONFIG_8139CP=m +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +# CONFIG_8139TOO_TUNE_TWISTER is not set +CONFIG_8139TOO_8129=y +# CONFIG_8139_OLD_RX_RESET is not set +CONFIG_R8169=m +# CONFIG_NET_VENDOR_RENESAS is not set +CONFIG_NET_VENDOR_ROCKER=y +CONFIG_ROCKER=m +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +CONFIG_NET_VENDOR_SOLARFLARE=y +# CONFIG_SFC is not set +# CONFIG_SFC_FALCON is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +CONFIG_NET_VENDOR_SMSC=y +CONFIG_SMC91X=m +CONFIG_EPIC100=m +CONFIG_SMSC911X=m +CONFIG_SMSC9420=m +# CONFIG_NET_VENDOR_SOCIONEXT is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +CONFIG_MDIO_BCM_UNIMAC=m +CONFIG_MDIO_BITBANG=m +# CONFIG_MDIO_BUS_MUX_GPIO is not set +# CONFIG_MDIO_BUS_MUX_MMIOREG is not set +CONFIG_MDIO_CAVIUM=m +CONFIG_MDIO_GPIO=m +# CONFIG_MDIO_HISI_FEMAC is not set +# CONFIG_MDIO_MSCC_MIIM is not set +CONFIG_MDIO_OCTEON=m +CONFIG_MDIO_THUNDER=m +CONFIG_MDIO_XGENE=y +CONFIG_PHYLIB=y +CONFIG_SWPHY=y +# CONFIG_LED_TRIGGER_PHY is not set + +# +# MII PHY device drivers +# +CONFIG_AMD_PHY=m +CONFIG_AQUANTIA_PHY=m +# CONFIG_ASIX_PHY is not set +CONFIG_AT803X_PHY=m +# CONFIG_BCM7XXX_PHY is not set +CONFIG_BCM87XX_PHY=m +CONFIG_BCM_NET_PHYLIB=m +CONFIG_BROADCOM_PHY=m +CONFIG_CICADA_PHY=m +# CONFIG_CORTINA_PHY is not set +CONFIG_DAVICOM_PHY=m +# CONFIG_DP83822_PHY is not set +# CONFIG_DP83TC811_PHY is not set +CONFIG_DP83848_PHY=m +CONFIG_DP83867_PHY=m +CONFIG_FIXED_PHY=y +CONFIG_ICPLUS_PHY=m +# CONFIG_INTEL_XWAY_PHY is not set +CONFIG_LSI_ET1011C_PHY=m +CONFIG_LXT_PHY=m +CONFIG_MARVELL_PHY=m +# CONFIG_MARVELL_10G_PHY is not set +CONFIG_MICREL_PHY=m +CONFIG_MICROCHIP_PHY=m +# CONFIG_MICROCHIP_T1_PHY is not set +# CONFIG_MICROSEMI_PHY is not set +CONFIG_NATIONAL_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_REALTEK_PHY=m +# CONFIG_RENESAS_PHY is not set +# CONFIG_ROCKCHIP_PHY is not set +CONFIG_SMSC_PHY=m +CONFIG_STE10XP=m +CONFIG_TERANETICS_PHY=m +CONFIG_VITESSE_PHY=m +# CONFIG_XILINX_GMII2RGMII is not set +# CONFIG_MICREL_KS8995MA is not set +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m +CONFIG_PPPOE=m +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLHC=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +# CONFIG_SLIP_MODE_SLIP6 is not set +CONFIG_USB_NET_DRIVERS=y +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m +CONFIG_USB_USBNET=m +CONFIG_USB_NET_AX8817X=m +CONFIG_USB_NET_AX88179_178A=m +CONFIG_USB_NET_CDCETHER=m +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_CDC_NCM=m +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +CONFIG_USB_NET_SR9700=m +# CONFIG_USB_NET_SR9800 is not set +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m +CONFIG_USB_NET_NET1080=m +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_NET_RNDIS_HOST=m +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m +CONFIG_USB_NET_CDC_SUBSET=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y +CONFIG_USB_BELKIN=y +CONFIG_USB_ARMLINUX=y +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y +CONFIG_USB_NET_ZAURUS=m +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +CONFIG_WLAN=y +# CONFIG_WLAN_VENDOR_ADMTEK is not set +# CONFIG_WLAN_VENDOR_ATH is not set +# CONFIG_WLAN_VENDOR_ATMEL is not set +# CONFIG_WLAN_VENDOR_BROADCOM is not set +# CONFIG_WLAN_VENDOR_CISCO is not set +# CONFIG_WLAN_VENDOR_INTEL is not set +# CONFIG_WLAN_VENDOR_INTERSIL is not set +# CONFIG_WLAN_VENDOR_MARVELL is not set +# CONFIG_WLAN_VENDOR_MEDIATEK is not set +# CONFIG_WLAN_VENDOR_RALINK is not set +# CONFIG_WLAN_VENDOR_REALTEK is not set +# CONFIG_WLAN_VENDOR_RSI is not set +# CONFIG_WLAN_VENDOR_ST is not set +# CONFIG_WLAN_VENDOR_TI is not set +# CONFIG_WLAN_VENDOR_ZYDAS is not set +# CONFIG_WLAN_VENDOR_QUANTENNA is not set +# CONFIG_MAC80211_HWSIM is not set +# CONFIG_USB_NET_RNDIS_WLAN is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +CONFIG_WAN=y +CONFIG_HDLC=m +CONFIG_HDLC_RAW=m +# CONFIG_HDLC_RAW_ETH is not set +CONFIG_HDLC_CISCO=m +CONFIG_HDLC_FR=m +CONFIG_HDLC_PPP=m + +# +# X.25/LAPB support is disabled +# +# CONFIG_PCI200SYN is not set +# CONFIG_WANXL is not set +# CONFIG_PC300TOO is not set +# CONFIG_FARSYNC is not set +# CONFIG_DSCC4 is not set +CONFIG_DLCI=m +CONFIG_DLCI_MAX=8 +# CONFIG_IEEE802154_DRIVERS is not set +# CONFIG_VMXNET3 is not set +# CONFIG_FUJITSU_ES is not set +# CONFIG_NETDEVSIM is not set +CONFIG_NET_FAILOVER=y +# CONFIG_ISDN is not set +# CONFIG_NVM is not set + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_LEDS=y +CONFIG_INPUT_FF_MEMLESS=m +CONFIG_INPUT_POLLDEV=m +CONFIG_INPUT_SPARSEKMAP=m +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_OMAP4 is not set +# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_CAP11XX is not set +# CONFIG_KEYBOARD_BCM is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=y +CONFIG_MOUSE_PS2_ALPS=y +CONFIG_MOUSE_PS2_BYD=y +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y +CONFIG_MOUSE_PS2_CYPRESS=y +CONFIG_MOUSE_PS2_TRACKPOINT=y +CONFIG_MOUSE_PS2_ELANTECH=y +CONFIG_MOUSE_PS2_ELANTECH_SMBUS=y +CONFIG_MOUSE_PS2_SENTELIC=y +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +CONFIG_MOUSE_PS2_FOCALTECH=y +CONFIG_MOUSE_PS2_SMBUS=y +CONFIG_MOUSE_SERIAL=m +CONFIG_MOUSE_APPLETOUCH=m +CONFIG_MOUSE_BCM5974=m +CONFIG_MOUSE_CYAPA=m +# CONFIG_MOUSE_ELAN_I2C is not set +CONFIG_MOUSE_VSXXXAA=m +# CONFIG_MOUSE_GPIO is not set +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set +CONFIG_RMI4_CORE=m +CONFIG_RMI4_I2C=m +CONFIG_RMI4_SPI=m +CONFIG_RMI4_SMB=m +CONFIG_RMI4_F03=y +CONFIG_RMI4_F03_SERIO=m +CONFIG_RMI4_2D_SENSOR=y +CONFIG_RMI4_F11=y +CONFIG_RMI4_F12=y +CONFIG_RMI4_F30=y +# CONFIG_RMI4_F34 is not set +# CONFIG_RMI4_F55 is not set + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_SERIO_SERPORT=y +CONFIG_SERIO_AMBAKMI=y +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +# CONFIG_SERIO_PS2MULT is not set +CONFIG_SERIO_ARC_PS2=m +# CONFIG_SERIO_APBPS2 is not set +# CONFIG_SERIO_GPIO_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_ROCKETPORT is not set +CONFIG_CYCLADES=m +# CONFIG_CYZ_INTR is not set +# CONFIG_MOXA_INTELLIO is not set +# CONFIG_MOXA_SMARTIO is not set +CONFIG_SYNCLINKMP=m +CONFIG_SYNCLINK_GT=m +# CONFIG_NOZOMI is not set +# CONFIG_ISI is not set +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +# CONFIG_TRACE_SINK is not set +CONFIG_LDISC_AUTOLOAD=y +CONFIG_DEVMEM=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=8 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +# CONFIG_SERIAL_8250_ASPEED_VUART is not set +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_FSL=y +CONFIG_SERIAL_8250_DW=y +CONFIG_SERIAL_8250_RT288X=y +# CONFIG_SERIAL_8250_MOXA is not set +CONFIG_SERIAL_OF_PLATFORM=y + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_AMBA_PL010 is not set +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +CONFIG_SERIAL_EARLYCON_ARM_SEMIHOST=y +# CONFIG_SERIAL_KGDB_NMI is not set +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_CONSOLE_POLL=y +# CONFIG_SERIAL_JSM is not set +# CONFIG_SERIAL_MSM is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_IFX6X60 is not set +# CONFIG_SERIAL_XILINX_PS_UART is not set +# CONFIG_SERIAL_ARC is not set +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set +# CONFIG_SERIAL_DEV_BUS is not set +CONFIG_HVC_DRIVER=y +# CONFIG_HVC_DCC is not set +CONFIG_VIRTIO_CONSOLE=m +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DMI_DECODE=y +# CONFIG_IPMI_PANIC_EVENT is not set +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +CONFIG_IPMI_SSIF=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m +CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_HW_RANDOM_HISI=y +CONFIG_HW_RANDOM_XGENE=y +CONFIG_HW_RANDOM_CAVIUM=y +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set + +# +# PCMCIA character devices +# +CONFIG_RAW_DRIVER=y +CONFIG_MAX_RAW_DEVS=8192 +CONFIG_TCG_TPM=y +CONFIG_HW_RANDOM_TPM=y +CONFIG_TCG_TIS_CORE=m +CONFIG_TCG_TIS=m +# CONFIG_TCG_TIS_SPI is not set +# CONFIG_TCG_TIS_I2C_ATMEL is not set +# CONFIG_TCG_TIS_I2C_INFINEON is not set +# CONFIG_TCG_TIS_I2C_NUVOTON is not set +CONFIG_TCG_ATMEL=m +# CONFIG_TCG_INFINEON is not set +CONFIG_TCG_CRB=y +# CONFIG_TCG_VTPM_PROXY is not set +# CONFIG_TCG_TIS_ST33ZP24_I2C is not set +# CONFIG_TCG_TIS_ST33ZP24_SPI is not set +# CONFIG_DEVPORT is not set +# CONFIG_XILLYBUS is not set +CONFIG_HISI_SVM=y +# +# I2C support +# +CONFIG_I2C=y +CONFIG_ACPI_I2C_OPREGION=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=m + +# +# Multiplexer I2C Chip support +# +CONFIG_I2C_ARB_GPIO_CHALLENGE=m +CONFIG_I2C_MUX_GPIO=m +# CONFIG_I2C_MUX_GPMUX is not set +# CONFIG_I2C_MUX_LTC4306 is not set +CONFIG_I2C_MUX_PCA9541=m +CONFIG_I2C_MUX_PCA954x=m +CONFIG_I2C_MUX_PINCTRL=m +# CONFIG_I2C_MUX_REG is not set +# CONFIG_I2C_DEMUX_PINCTRL is not set +CONFIG_I2C_MUX_MLXCPLD=m +# CONFIG_I2C_HELPER_AUTO is not set +CONFIG_I2C_SMBUS=m + +# +# I2C Algorithms +# +CONFIG_I2C_ALGOBIT=y +# CONFIG_I2C_ALGOPCF is not set +CONFIG_I2C_ALGOPCA=m + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_HIX5HD2 is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_ISCH is not set +# CONFIG_I2C_PIIX4 is not set +CONFIG_I2C_NFORCE2=m +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set + +# +# ACPI drivers +# +# CONFIG_I2C_SCMI is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CADENCE is not set +# CONFIG_I2C_CBUS_GPIO is not set +CONFIG_I2C_DESIGNWARE_CORE=y +CONFIG_I2C_DESIGNWARE_PLATFORM=y +# CONFIG_I2C_DESIGNWARE_SLAVE is not set +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_EMEV2 is not set +CONFIG_I2C_GPIO=m +# CONFIG_I2C_GPIO_FAULT_INJECTOR is not set +# CONFIG_I2C_NOMADIK is not set +# CONFIG_I2C_OCORES is not set +CONFIG_I2C_PCA_PLATFORM=m +CONFIG_I2C_QUP=y +# CONFIG_I2C_RK3X is not set +CONFIG_I2C_SIMTEC=m +CONFIG_I2C_VERSATILE=m +CONFIG_I2C_THUNDERX=m +# CONFIG_I2C_XILINX is not set +CONFIG_I2C_XLP9XX=m + +# +# External I2C/SMBus adapter drivers +# +CONFIG_I2C_DIOLAN_U2C=m +CONFIG_I2C_PARPORT_LIGHT=m +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +CONFIG_I2C_TINY_USB=m + +# +# Other I2C/SMBus bus drivers +# +CONFIG_I2C_XGENE_SLIMPRO=m +CONFIG_I2C_STUB=m +CONFIG_I2C_SLAVE=y +CONFIG_I2C_SLAVE_EEPROM=m +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y +# CONFIG_SPI_MEM is not set + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +CONFIG_SPI_CADENCE=m +CONFIG_SPI_DESIGNWARE=y +# CONFIG_SPI_DW_PCI is not set +CONFIG_SPI_DW_MMIO=y +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_FSL_SPI is not set +# CONFIG_SPI_OC_TINY is not set +CONFIG_SPI_PL022=y +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_ROCKCHIP is not set +CONFIG_SPI_QUP=y +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_THUNDERX is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +CONFIG_SPI_XLP=m +# CONFIG_SPI_ZYNQMP_GQSPI is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_GPIO=m + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y +CONFIG_DP83640_PHY=m +CONFIG_PINCTRL=y +CONFIG_PINMUX=y +CONFIG_PINCONF=y +CONFIG_GENERIC_PINCONF=y +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_PINCTRL_AMD is not set +# CONFIG_PINCTRL_MCP23S08 is not set +# CONFIG_PINCTRL_SINGLE is not set +# CONFIG_PINCTRL_SX150X is not set +CONFIG_PINCTRL_MSM=y +# CONFIG_PINCTRL_APQ8064 is not set +# CONFIG_PINCTRL_APQ8084 is not set +# CONFIG_PINCTRL_IPQ4019 is not set +# CONFIG_PINCTRL_IPQ8064 is not set +# CONFIG_PINCTRL_IPQ8074 is not set +# CONFIG_PINCTRL_MSM8660 is not set +# CONFIG_PINCTRL_MSM8960 is not set +# CONFIG_PINCTRL_MDM9615 is not set +# CONFIG_PINCTRL_MSM8X74 is not set +# CONFIG_PINCTRL_MSM8916 is not set +# CONFIG_PINCTRL_MSM8994 is not set +# CONFIG_PINCTRL_MSM8996 is not set +# CONFIG_PINCTRL_MSM8998 is not set +CONFIG_PINCTRL_QDF2XXX=y +# CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set +# CONFIG_PINCTRL_SDM845 is not set +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_OF_GPIO=y +CONFIG_GPIO_ACPI=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_GENERIC=y + +# +# Memory mapped GPIO drivers +# +# CONFIG_GPIO_74XX_MMIO is not set +# CONFIG_GPIO_ALTERA is not set +CONFIG_GPIO_AMDPT=m +CONFIG_GPIO_DWAPB=y +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_FTGPIO010 is not set +CONFIG_GPIO_GENERIC_PLATFORM=y +# CONFIG_GPIO_GRGPIO is not set +# CONFIG_GPIO_HLWD is not set +# CONFIG_GPIO_MB86S7X is not set +# CONFIG_GPIO_MOCKUP is not set +CONFIG_GPIO_PL061=y +# CONFIG_GPIO_SYSCON is not set +# CONFIG_GPIO_THUNDERX is not set +CONFIG_GPIO_XGENE=y +CONFIG_GPIO_XGENE_SB=m +# CONFIG_GPIO_XILINX is not set +CONFIG_GPIO_XLP=m + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_ADP5588 is not set +# CONFIG_GPIO_ADNP is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set + +# +# MFD GPIO expanders +# + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_BT8XX is not set +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_PCIE_IDIO_24 is not set +# CONFIG_GPIO_RDC321X is not set + +# +# SPI GPIO expanders +# +# CONFIG_GPIO_74X164 is not set +# CONFIG_GPIO_MAX3191X is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set + +# +# USB GPIO expanders +# +# CONFIG_W1 is not set +# CONFIG_POWER_AVS is not set +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_BRCMSTB is not set +CONFIG_POWER_RESET_GPIO=y +CONFIG_POWER_RESET_GPIO_RESTART=y +CONFIG_POWER_RESET_HISI=y +# CONFIG_POWER_RESET_MSM is not set +# CONFIG_POWER_RESET_LTC2952 is not set +CONFIG_POWER_RESET_RESTART=y +CONFIG_POWER_RESET_VEXPRESS=y +# CONFIG_POWER_RESET_XGENE is not set +CONFIG_POWER_RESET_SYSCON=y +# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set +# CONFIG_SYSCON_REBOOT_MODE is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_MANAGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_LTC3651 is not set +# CONFIG_CHARGER_DETECTOR_MAX14656 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24190 is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ25890 is not set +CONFIG_CHARGER_SMB347=m +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_CHARGER_RT9455 is not set +CONFIG_HWMON=y +CONFIG_HWMON_VID=m +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +CONFIG_SENSORS_AD7314=m +CONFIG_SENSORS_AD7414=m +CONFIG_SENSORS_AD7418=m +CONFIG_SENSORS_ADM1021=m +CONFIG_SENSORS_ADM1025=m +CONFIG_SENSORS_ADM1026=m +CONFIG_SENSORS_ADM1029=m +CONFIG_SENSORS_ADM1031=m +CONFIG_SENSORS_ADM9240=m +CONFIG_SENSORS_ADT7X10=m +CONFIG_SENSORS_ADT7310=m +CONFIG_SENSORS_ADT7410=m +CONFIG_SENSORS_ADT7411=m +CONFIG_SENSORS_ADT7462=m +CONFIG_SENSORS_ADT7470=m +CONFIG_SENSORS_ADT7475=m +CONFIG_SENSORS_ASC7621=m +CONFIG_SENSORS_ARM_SCPI=m +# CONFIG_SENSORS_ASPEED is not set +CONFIG_SENSORS_ATXP1=m +CONFIG_SENSORS_DS620=m +CONFIG_SENSORS_DS1621=m +# CONFIG_SENSORS_I5K_AMB is not set +CONFIG_SENSORS_F71805F=m +CONFIG_SENSORS_F71882FG=m +CONFIG_SENSORS_F75375S=m +# CONFIG_SENSORS_FTSTEUTATES is not set +CONFIG_SENSORS_GL518SM=m +CONFIG_SENSORS_GL520SM=m +CONFIG_SENSORS_G760A=m +CONFIG_SENSORS_G762=m +# CONFIG_SENSORS_GPIO_FAN is not set +# CONFIG_SENSORS_HIH6130 is not set +CONFIG_SENSORS_IBMAEM=m +CONFIG_SENSORS_IBMPEX=m +CONFIG_SENSORS_IT87=m +CONFIG_SENSORS_JC42=m +CONFIG_SENSORS_POWR1220=m +CONFIG_SENSORS_LINEAGE=m +CONFIG_SENSORS_LTC2945=m +# CONFIG_SENSORS_LTC2990 is not set +CONFIG_SENSORS_LTC4151=m +CONFIG_SENSORS_LTC4215=m +CONFIG_SENSORS_LTC4222=m +CONFIG_SENSORS_LTC4245=m +CONFIG_SENSORS_LTC4260=m +CONFIG_SENSORS_LTC4261=m +CONFIG_SENSORS_MAX1111=m +CONFIG_SENSORS_MAX16065=m +CONFIG_SENSORS_MAX1619=m +CONFIG_SENSORS_MAX1668=m +CONFIG_SENSORS_MAX197=m +# CONFIG_SENSORS_MAX31722 is not set +# CONFIG_SENSORS_MAX6621 is not set +CONFIG_SENSORS_MAX6639=m +CONFIG_SENSORS_MAX6642=m +CONFIG_SENSORS_MAX6650=m +CONFIG_SENSORS_MAX6697=m +CONFIG_SENSORS_MAX31790=m +CONFIG_SENSORS_MCP3021=m +# CONFIG_SENSORS_TC654 is not set +CONFIG_SENSORS_ADCXX=m +CONFIG_SENSORS_LM63=m +CONFIG_SENSORS_LM70=m +CONFIG_SENSORS_LM73=m +CONFIG_SENSORS_LM75=m +CONFIG_SENSORS_LM77=m +CONFIG_SENSORS_LM78=m +CONFIG_SENSORS_LM80=m +CONFIG_SENSORS_LM83=m +CONFIG_SENSORS_LM85=m +CONFIG_SENSORS_LM87=m +CONFIG_SENSORS_LM90=m +CONFIG_SENSORS_LM92=m +CONFIG_SENSORS_LM93=m +CONFIG_SENSORS_LM95234=m +CONFIG_SENSORS_LM95241=m +CONFIG_SENSORS_LM95245=m +CONFIG_SENSORS_PC87360=m +CONFIG_SENSORS_PC87427=m +CONFIG_SENSORS_NTC_THERMISTOR=m +CONFIG_SENSORS_NCT6683=m +CONFIG_SENSORS_NCT6775=m +CONFIG_SENSORS_NCT7802=m +CONFIG_SENSORS_NCT7904=m +# CONFIG_SENSORS_NPCM7XX is not set +CONFIG_SENSORS_PCF8591=m +CONFIG_PMBUS=m +CONFIG_SENSORS_PMBUS=m +CONFIG_SENSORS_ADM1275=m +# CONFIG_SENSORS_IBM_CFFPS is not set +# CONFIG_SENSORS_IR35221 is not set +CONFIG_SENSORS_LM25066=m +CONFIG_SENSORS_LTC2978=m +CONFIG_SENSORS_LTC3815=m +CONFIG_SENSORS_MAX16064=m +CONFIG_SENSORS_MAX20751=m +# CONFIG_SENSORS_MAX31785 is not set +CONFIG_SENSORS_MAX34440=m +CONFIG_SENSORS_MAX8688=m +CONFIG_SENSORS_TPS40422=m +# CONFIG_SENSORS_TPS53679 is not set +CONFIG_SENSORS_UCD9000=m +CONFIG_SENSORS_UCD9200=m +CONFIG_SENSORS_ZL6100=m +CONFIG_SENSORS_PWM_FAN=m +CONFIG_SENSORS_SHT15=m +CONFIG_SENSORS_SHT21=m +# CONFIG_SENSORS_SHT3x is not set +CONFIG_SENSORS_SHTC1=m +CONFIG_SENSORS_SIS5595=m +CONFIG_SENSORS_DME1737=m +CONFIG_SENSORS_EMC1403=m +# CONFIG_SENSORS_EMC2103 is not set +CONFIG_SENSORS_EMC6W201=m +CONFIG_SENSORS_SMSC47M1=m +CONFIG_SENSORS_SMSC47M192=m +CONFIG_SENSORS_SMSC47B397=m +CONFIG_SENSORS_SCH56XX_COMMON=m +CONFIG_SENSORS_SCH5627=m +CONFIG_SENSORS_SCH5636=m +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_SMM665 is not set +CONFIG_SENSORS_ADC128D818=m +CONFIG_SENSORS_ADS1015=m +CONFIG_SENSORS_ADS7828=m +CONFIG_SENSORS_ADS7871=m +CONFIG_SENSORS_AMC6821=m +CONFIG_SENSORS_INA209=m +CONFIG_SENSORS_INA2XX=m +# CONFIG_SENSORS_INA3221 is not set +CONFIG_SENSORS_TC74=m +CONFIG_SENSORS_THMC50=m +CONFIG_SENSORS_TMP102=m +CONFIG_SENSORS_TMP103=m +# CONFIG_SENSORS_TMP108 is not set +CONFIG_SENSORS_TMP401=m +CONFIG_SENSORS_TMP421=m +CONFIG_SENSORS_VEXPRESS=m +CONFIG_SENSORS_VIA686A=m +CONFIG_SENSORS_VT1211=m +CONFIG_SENSORS_VT8231=m +# CONFIG_SENSORS_W83773G is not set +CONFIG_SENSORS_W83781D=m +CONFIG_SENSORS_W83791D=m +CONFIG_SENSORS_W83792D=m +CONFIG_SENSORS_W83793=m +CONFIG_SENSORS_W83795=m +# CONFIG_SENSORS_W83795_FANCTRL is not set +CONFIG_SENSORS_W83L785TS=m +CONFIG_SENSORS_W83L786NG=m +CONFIG_SENSORS_W83627HF=m +CONFIG_SENSORS_W83627EHF=m +CONFIG_SENSORS_XGENE=m + +# +# ACPI drivers +# +CONFIG_SENSORS_ACPI_POWER=y +CONFIG_THERMAL=y +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_OF=y +# CONFIG_THERMAL_WRITABLE_TRIPS is not set +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +# CONFIG_THERMAL_GOV_BANG_BANG is not set +CONFIG_THERMAL_GOV_USER_SPACE=y +# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set +CONFIG_CPU_THERMAL=y +# CONFIG_THERMAL_EMULATION is not set +CONFIG_HISI_THERMAL=y +# CONFIG_QORIQ_THERMAL is not set + +# +# ACPI INT340X thermal drivers +# + +# +# Qualcomm thermal drivers +# +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y +CONFIG_WATCHDOG_SYSFS=y + +# +# Watchdog Device Drivers +# +CONFIG_SOFT_WATCHDOG=m +CONFIG_GPIO_WATCHDOG=m +# CONFIG_WDAT_WDT is not set +# CONFIG_XILINX_WATCHDOG is not set +# CONFIG_ZIIRAVE_WATCHDOG is not set +CONFIG_ARM_SP805_WATCHDOG=m +CONFIG_ARM_SBSA_WATCHDOG=m +# CONFIG_CADENCE_WATCHDOG is not set +# CONFIG_DW_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set +# CONFIG_QCOM_WDT is not set +CONFIG_ALIM7101_WDT=m +CONFIG_I6300ESB_WDT=m +# CONFIG_MEN_A21_WDT is not set + +# +# PCI-based Watchdog Cards +# +CONFIG_PCIPCWATCHDOG=m +CONFIG_WDTPCI=m + +# +# USB-based Watchdog Cards +# +CONFIG_USBPCWATCHDOG=m + +# +# Watchdog Pretimeout Governors +# +# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +CONFIG_BCMA=m +CONFIG_BCMA_HOST_PCI_POSSIBLE=y +CONFIG_BCMA_HOST_PCI=y +# CONFIG_BCMA_HOST_SOC is not set +CONFIG_BCMA_DRIVER_PCI=y +CONFIG_BCMA_DRIVER_GMAC_CMN=y +CONFIG_BCMA_DRIVER_GPIO=y +# CONFIG_BCMA_DEBUG is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=m +# CONFIG_MFD_ACT8945A is not set +# CONFIG_MFD_AS3711 is not set +# CONFIG_MFD_AS3722 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set +# CONFIG_MFD_ATMEL_FLEXCOM is not set +# CONFIG_MFD_ATMEL_HLCDC is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_CROS_EC is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_MFD_HI6421_PMIC is not set +# CONFIG_MFD_HI655X_PMIC is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_LPC_ICH is not set +# CONFIG_LPC_SCH is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77620 is not set +# CONFIG_MFD_MAX77686 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77843 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_CPCAP is not set +# CONFIG_MFD_VIPERBOARD is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_QCOM_RPM is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RC5T583 is not set +# CONFIG_MFD_RK808 is not set +# CONFIG_MFD_RN5T618 is not set +# CONFIG_MFD_SEC_CORE is not set +# CONFIG_MFD_SI476X_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_SMSC is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_STMPE is not set +CONFIG_MFD_SYSCON=y +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TPS68470 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TI_LP87565 is not set +# CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS80031 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_VX855 is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_ROHM_BD718XX is not set +# CONFIG_MFD_VEXPRESS_SYSREG is not set +# CONFIG_REGULATOR is not set +# CONFIG_RC_CORE is not set +# CONFIG_MEDIA_SUPPORT is not set + +# +# Graphics support +# +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_DRM=y +CONFIG_DRM_DP_AUX_CHARDEV=y +# CONFIG_DRM_DEBUG_MM is not set +# CONFIG_DRM_DEBUG_SELFTEST is not set +CONFIG_DRM_KMS_HELPER=y +CONFIG_DRM_KMS_FB_HELPER=y +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +# CONFIG_DRM_DP_CEC is not set +CONFIG_DRM_TTM=y +CONFIG_DRM_VM=y + +# +# I2C encoder or helper chips +# +CONFIG_DRM_I2C_CH7006=m +# CONFIG_DRM_I2C_SIL164 is not set +CONFIG_DRM_I2C_NXP_TDA998X=m +# CONFIG_DRM_I2C_NXP_TDA9950 is not set +# CONFIG_DRM_HDLCD is not set +# CONFIG_DRM_MALI_DISPLAY is not set +CONFIG_DRM_RADEON=y +CONFIG_DRM_RADEON_USERPTR=y +# CONFIG_DRM_AMDGPU is not set + +# +# ACP (Audio CoProcessor) Configuration +# + +# +# AMD Library routines +# +CONFIG_DRM_NOUVEAU=m +CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT=y +CONFIG_NOUVEAU_DEBUG=5 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3 +# CONFIG_NOUVEAU_DEBUG_MMU is not set +CONFIG_DRM_NOUVEAU_BACKLIGHT=y +# CONFIG_DRM_VGEM is not set +# CONFIG_DRM_VKMS is not set +CONFIG_DRM_UDL=m +CONFIG_DRM_AST=m +CONFIG_DRM_MGAG200=m +CONFIG_DRM_CIRRUS_QEMU=m +# CONFIG_DRM_RCAR_DW_HDMI is not set +# CONFIG_DRM_RCAR_LVDS is not set +CONFIG_DRM_QXL=m +CONFIG_DRM_BOCHS=m +CONFIG_DRM_VIRTIO_GPU=m +# CONFIG_DRM_MSM is not set +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_ARM_VERSATILE is not set +# CONFIG_DRM_PANEL_LVDS is not set +# CONFIG_DRM_PANEL_SIMPLE is not set +# CONFIG_DRM_PANEL_ILITEK_IL9322 is not set +# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set +# CONFIG_DRM_PANEL_LG_LG4573 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set +# CONFIG_DRM_PANEL_SEIKO_43WVF1G is not set +# CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +# CONFIG_DRM_CDNS_DSI is not set +# CONFIG_DRM_DUMB_VGA_DAC is not set +# CONFIG_DRM_LVDS_ENCODER is not set +# CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW is not set +# CONFIG_DRM_NXP_PTN3460 is not set +# CONFIG_DRM_PARADE_PS8622 is not set +# CONFIG_DRM_SIL_SII8620 is not set +# CONFIG_DRM_SII902X is not set +# CONFIG_DRM_SII9234 is not set +# CONFIG_DRM_THINE_THC63LVD1024 is not set +# CONFIG_DRM_TOSHIBA_TC358767 is not set +# CONFIG_DRM_TI_TFP410 is not set +# CONFIG_DRM_I2C_ADV7511 is not set +# CONFIG_DRM_ARCPGU is not set +CONFIG_DRM_HISI_HIBMC=m +# CONFIG_DRM_HISI_KIRIN is not set +# CONFIG_DRM_MXSFB is not set +# CONFIG_DRM_TINYDRM is not set +# CONFIG_DRM_PL111 is not set +# CONFIG_DRM_LEGACY is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y + +# +# Frame buffer Devices +# +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_CMDLINE=y +CONFIG_FB_NOTIFY=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_BACKLIGHT=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_TILEBLITTING=y + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +CONFIG_FB_ARMCLCD=y +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_UVESA is not set +CONFIG_FB_EFI=y +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +CONFIG_FB_SIMPLE=y +CONFIG_FB_SSD1307=m +# CONFIG_FB_SM712 is not set +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set +CONFIG_LCD_PLATFORM=m +# CONFIG_LCD_S6E63M0 is not set +# CONFIG_LCD_LD9040 is not set +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +# CONFIG_LCD_OTM3225A is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_GENERIC is not set +CONFIG_BACKLIGHT_PWM=m +# CONFIG_BACKLIGHT_PM8941_WLED is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3630A is not set +# CONFIG_BACKLIGHT_LM3639 is not set +CONFIG_BACKLIGHT_LP855X=m +CONFIG_BACKLIGHT_GPIO=m +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +CONFIG_VIDEOMODE_HELPERS=y +CONFIG_HDMI=y + +# +# Console display driver support +# +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_LOGO_LINUX_CLUT224=y +CONFIG_SOUND=m +# CONFIG_SND is not set + +# +# HID support +# +CONFIG_HID=y +CONFIG_HID_BATTERY_STRENGTH=y +CONFIG_HIDRAW=y +CONFIG_UHID=m +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=y +# CONFIG_HID_ACCUTOUCH is not set +CONFIG_HID_ACRUX=m +# CONFIG_HID_ACRUX_FF is not set +CONFIG_HID_APPLE=y +CONFIG_HID_APPLEIR=m +# CONFIG_HID_ASUS is not set +CONFIG_HID_AUREAL=m +CONFIG_HID_BELKIN=y +CONFIG_HID_BETOP_FF=m +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +CONFIG_HID_CORSAIR=m +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_CMEDIA is not set +# CONFIG_HID_CP2112 is not set +CONFIG_HID_CYPRESS=y +CONFIG_HID_DRAGONRISE=m +# CONFIG_DRAGONRISE_FF is not set +# CONFIG_HID_EMS_FF is not set +# CONFIG_HID_ELAN is not set +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +CONFIG_HID_EZKEY=y +CONFIG_HID_GEMBIRD=m +CONFIG_HID_GFRM=m +CONFIG_HID_HOLTEK=m +# CONFIG_HOLTEK_FF is not set +# CONFIG_HID_GOOGLE_HAMMER is not set +CONFIG_HID_GT683R=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m +CONFIG_HID_GYRATION=m +CONFIG_HID_ICADE=m +CONFIG_HID_ITE=y +# CONFIG_HID_JABRA is not set +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=y +CONFIG_HID_LCPOWER=m +CONFIG_HID_LED=m +CONFIG_HID_LENOVO=m +CONFIG_HID_LOGITECH=y +CONFIG_HID_LOGITECH_DJ=m +CONFIG_HID_LOGITECH_HIDPP=m +# CONFIG_LOGITECH_FF is not set +# CONFIG_LOGIRUMBLEPAD2_FF is not set +# CONFIG_LOGIG940_FF is not set +# CONFIG_LOGIWHEELS_FF is not set +CONFIG_HID_MAGICMOUSE=y +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_REDRAGON is not set +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +CONFIG_HID_MULTITOUCH=m +# CONFIG_HID_NTI is not set +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +# CONFIG_PANTHERLORD_FF is not set +CONFIG_HID_PENMOUNT=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +CONFIG_HID_PICOLCD_FB=y +CONFIG_HID_PICOLCD_BACKLIGHT=y +CONFIG_HID_PICOLCD_LCD=y +CONFIG_HID_PICOLCD_LEDS=y +CONFIG_HID_PLANTRONICS=y +CONFIG_HID_PRIMAX=m +# CONFIG_HID_RETRODE is not set +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=m +CONFIG_HID_SONY=m +CONFIG_SONY_FF=y +CONFIG_HID_SPEEDLINK=m +# CONFIG_HID_STEAM is not set +CONFIG_HID_STEELSERIES=m +CONFIG_HID_SUNPLUS=m +CONFIG_HID_RMI=m +CONFIG_HID_GREENASIA=m +# CONFIG_GREENASIA_FF is not set +CONFIG_HID_SMARTJOYPLUS=m +# CONFIG_SMARTJOYPLUS_FF is not set +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +# CONFIG_THRUSTMASTER_FF is not set +# CONFIG_HID_UDRAW_PS3 is not set +CONFIG_HID_WACOM=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +# CONFIG_ZEROPLUS_FF is not set +CONFIG_HID_ZYDACRON=m +CONFIG_HID_SENSOR_HUB=m +# CONFIG_HID_SENSOR_CUSTOM_SENSOR is not set +# CONFIG_HID_ALPS is not set + +# +# USB HID support +# +CONFIG_USB_HID=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y + +# +# I2C HID support +# +CONFIG_I2C_HID=m +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_WHITELIST is not set +CONFIG_USB_LEDS_TRIGGER_USBPORT=m +CONFIG_USB_MON=y +CONFIG_USB_WUSB=m +CONFIG_USB_WUSB_CBAF=m +# CONFIG_USB_WUSB_CBAF_DEBUG is not set + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=y +# CONFIG_USB_XHCI_DBGCAP is not set +CONFIG_USB_XHCI_PCI=y +CONFIG_USB_XHCI_PLATFORM=m +# CONFIG_USB_XHCI_HISTB is not set +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_FOTG210_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y +# CONFIG_USB_OHCI_HCD_PLATFORM is not set +CONFIG_USB_UHCI_HCD=y +# CONFIG_USB_U132_HCD is not set +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_WHCI_HCD is not set +CONFIG_USB_HWA_HCD=m +# CONFIG_USB_HCD_BCMA is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m +CONFIG_USB_WDM=m +CONFIG_USB_TMC=m + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=y +# CONFIG_USB_STORAGE_DEBUG is not set +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_REALTEK_AUTOPM=y +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_UAS=m + +# +# USB Imaging devices +# +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m +# CONFIG_USBIP_CORE is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +# CONFIG_USB_DWC2 is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +CONFIG_USB_SERIAL=y +CONFIG_USB_SERIAL_CONSOLE=y +CONFIG_USB_SERIAL_GENERIC=y +CONFIG_USB_SERIAL_SIMPLE=m +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +# CONFIG_USB_SERIAL_F81232 is not set +# CONFIG_USB_SERIAL_F8153X is not set +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +# CONFIG_USB_SERIAL_METRO is not set +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7840=m +# CONFIG_USB_SERIAL_MXUPORT is not set +CONFIG_USB_SERIAL_NAVMAN=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SAFE_PADDED=y +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_XIRCOM=m +CONFIG_USB_SERIAL_WWAN=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +# CONFIG_USB_SERIAL_WISHBONE is not set +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m +# CONFIG_USB_SERIAL_UPD78F0730 is not set +CONFIG_USB_SERIAL_DEBUG=m + +# +# USB Miscellaneous drivers +# +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +# CONFIG_USB_RIO500 is not set +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +CONFIG_USB_IDMOUSE=m +CONFIG_USB_FTDI_ELAN=m +CONFIG_USB_APPLEDISPLAY=m +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_SISUSBVGA_CON=y +CONFIG_USB_LD=m +# CONFIG_USB_TRANCEVIBRATOR is not set +CONFIG_USB_IOWARRIOR=m +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +CONFIG_USB_ISIGHTFW=m +# CONFIG_USB_YUREX is not set +CONFIG_USB_EZUSB_FX2=m +# CONFIG_USB_HUB_USB251XB is not set +CONFIG_USB_HSIC_USB3503=m +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +CONFIG_USB_CHAOSKEY=m +CONFIG_USB_ATM=m +# CONFIG_USB_SPEEDTOUCH is not set +CONFIG_USB_CXACRU=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_XUSBATM=m + +# +# USB Physical Layer drivers +# +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ISP1301 is not set +# CONFIG_USB_ULPI is not set +# CONFIG_USB_GADGET is not set +CONFIG_TYPEC=y +# CONFIG_TYPEC_TCPM is not set +CONFIG_TYPEC_UCSI=y +CONFIG_UCSI_ACPI=y +# CONFIG_TYPEC_TPS6598X is not set + +# +# USB Type-C Multiplexer/DeMultiplexer Switch support +# +# CONFIG_TYPEC_MUX_PI3USB30532 is not set + +# +# USB Type-C Alternate Mode drivers +# +# CONFIG_TYPEC_DP_ALTMODE is not set +# CONFIG_USB_ROLE_SWITCH is not set +CONFIG_USB_LED_TRIG=y +CONFIG_USB_ULPI_BUS=m +CONFIG_UWB=m +CONFIG_UWB_HWA=m +CONFIG_UWB_WHCI=m +CONFIG_UWB_I1480U=m +CONFIG_MMC=m +CONFIG_PWRSEQ_EMMC=m +CONFIG_PWRSEQ_SIMPLE=m +CONFIG_MMC_BLOCK=m +CONFIG_MMC_BLOCK_MINORS=8 +CONFIG_SDIO_UART=m +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_ARMMMCI=m +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_RICOH_MMC=y +CONFIG_MMC_SDHCI_ACPI=m +CONFIG_MMC_SDHCI_PLTFM=m +# CONFIG_MMC_SDHCI_OF_ARASAN is not set +# CONFIG_MMC_SDHCI_OF_AT91 is not set +# CONFIG_MMC_SDHCI_OF_DWCMSHC is not set +# CONFIG_MMC_SDHCI_CADENCE is not set +# CONFIG_MMC_SDHCI_F_SDH30 is not set +# CONFIG_MMC_SDHCI_MSM is not set +CONFIG_MMC_TIFM_SD=m +CONFIG_MMC_SPI=m +CONFIG_MMC_CB710=m +CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_DW=m +CONFIG_MMC_DW_PLTFM=m +CONFIG_MMC_DW_BLUEFIELD=m +# CONFIG_MMC_DW_EXYNOS is not set +# CONFIG_MMC_DW_HI3798CV200 is not set +# CONFIG_MMC_DW_K3 is not set +# CONFIG_MMC_DW_PCI is not set +CONFIG_MMC_VUB300=m +CONFIG_MMC_USHC=m +# CONFIG_MMC_USDHI6ROL0 is not set +CONFIG_MMC_CQHCI=m +CONFIG_MMC_TOSHIBA_PCI=m +CONFIG_MMC_MTK=m +# CONFIG_MMC_SDHCI_XENON is not set +# CONFIG_MMC_SDHCI_OMAP is not set +CONFIG_MEMSTICK=m +# CONFIG_MEMSTICK_DEBUG is not set + +# +# MemoryStick drivers +# +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set +CONFIG_MSPRO_BLOCK=m +# CONFIG_MS_BLOCK is not set + +# +# MemoryStick Host Controller Drivers +# +CONFIG_MEMSTICK_TIFM_MS=m +CONFIG_MEMSTICK_JMICRON_38X=m +CONFIG_MEMSTICK_R592=m +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_CLASS_FLASH=m +# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set + +# +# LED drivers +# +# CONFIG_LEDS_AAT1290 is not set +# CONFIG_LEDS_AS3645A is not set +# CONFIG_LEDS_BCM6328 is not set +# CONFIG_LEDS_BCM6358 is not set +# CONFIG_LEDS_CR0014114 is not set +CONFIG_LEDS_LM3530=m +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_LM3692X is not set +# CONFIG_LEDS_LM3601X is not set +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_GPIO is not set +CONFIG_LEDS_LP3944=m +# CONFIG_LEDS_LP3952 is not set +CONFIG_LEDS_LP55XX_COMMON=m +CONFIG_LEDS_LP5521=m +CONFIG_LEDS_LP5523=m +CONFIG_LEDS_LP5562=m +# CONFIG_LEDS_LP8501 is not set +# CONFIG_LEDS_LP8860 is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_PWM is not set +# CONFIG_LEDS_BD2802 is not set +CONFIG_LEDS_LT3593=m +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_TLC591XX is not set +# CONFIG_LEDS_LM355x is not set +# CONFIG_LEDS_KTD2692 is not set +# CONFIG_LEDS_IS31FL319X is not set +# CONFIG_LEDS_IS31FL32XX is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# +CONFIG_LEDS_BLINKM=m +# CONFIG_LEDS_SYSCON is not set +# CONFIG_LEDS_MLXREG is not set +# CONFIG_LEDS_USER is not set + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_ONESHOT=m +# CONFIG_LEDS_TRIGGER_DISK is not set +# CONFIG_LEDS_TRIGGER_MTD is not set +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +# CONFIG_LEDS_TRIGGER_CPU is not set +# CONFIG_LEDS_TRIGGER_ACTIVITY is not set +CONFIG_LEDS_TRIGGER_GPIO=m +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m + +# +# iptables trigger is under Netfilter config (LED target) +# +CONFIG_LEDS_TRIGGER_TRANSIENT=m +CONFIG_LEDS_TRIGGER_CAMERA=m +# CONFIG_LEDS_TRIGGER_PANIC is not set +# CONFIG_LEDS_TRIGGER_NETDEV is not set +# CONFIG_ACCESSIBILITY is not set +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +# CONFIG_INFINIBAND_EXP_LEGACY_VERBS_NEW_UAPI is not set +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +# CONFIG_INFINIBAND_MTHCA is not set +# CONFIG_INFINIBAND_QIB is not set +CONFIG_INFINIBAND_CXGB4=m +CONFIG_INFINIBAND_I40IW=m +CONFIG_MLX4_INFINIBAND=m +# CONFIG_INFINIBAND_NES is not set +# CONFIG_INFINIBAND_OCRDMA is not set +CONFIG_INFINIBAND_HNS=m +CONFIG_INFINIBAND_HNS_HIP06=m +CONFIG_INFINIBAND_HNS_HIP08=m +CONFIG_INFINIBAND_HNS_DFX=m +CONFIG_INFINIBAND_HNS_TEST=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +CONFIG_INFINIBAND_RDMAVT=m +CONFIG_RDMA_RXE=m +CONFIG_INFINIBAND_QEDR=m +CONFIG_INFINIBAND_BNXT_RE=m +CONFIG_EDAC_SUPPORT=y +CONFIG_EDAC=y +CONFIG_EDAC_LEGACY_SYSFS=y +# CONFIG_EDAC_DEBUG is not set +CONFIG_EDAC_GHES=y +CONFIG_EDAC_THUNDERX=m +CONFIG_EDAC_XGENE=m +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_SYSTOHC is not set +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +CONFIG_RTC_DRV_ABB5ZES3=m +CONFIG_RTC_DRV_ABX80X=m +CONFIG_RTC_DRV_DS1307=y +# CONFIG_RTC_DRV_DS1307_CENTURY is not set +CONFIG_RTC_DRV_DS1374=m +CONFIG_RTC_DRV_DS1374_WDT=y +CONFIG_RTC_DRV_DS1672=m +# CONFIG_RTC_DRV_HYM8563 is not set +CONFIG_RTC_DRV_MAX6900=m +CONFIG_RTC_DRV_RS5C372=m +CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_ISL12022=m +# CONFIG_RTC_DRV_ISL12026 is not set +CONFIG_RTC_DRV_X1205=m +CONFIG_RTC_DRV_PCF8523=m +CONFIG_RTC_DRV_PCF85063=m +# CONFIG_RTC_DRV_PCF85363 is not set +CONFIG_RTC_DRV_PCF8563=m +CONFIG_RTC_DRV_PCF8583=m +CONFIG_RTC_DRV_M41T80=m +CONFIG_RTC_DRV_M41T80_WDT=y +CONFIG_RTC_DRV_BQ32K=m +# CONFIG_RTC_DRV_S35390A is not set +CONFIG_RTC_DRV_FM3130=m +CONFIG_RTC_DRV_RX8010=m +CONFIG_RTC_DRV_RX8581=m +CONFIG_RTC_DRV_RX8025=m +CONFIG_RTC_DRV_EM3027=m +# CONFIG_RTC_DRV_RV8803 is not set + +# +# SPI RTC drivers +# +CONFIG_RTC_DRV_M41T93=m +CONFIG_RTC_DRV_M41T94=m +# CONFIG_RTC_DRV_DS1302 is not set +CONFIG_RTC_DRV_DS1305=m +CONFIG_RTC_DRV_DS1343=m +CONFIG_RTC_DRV_DS1347=m +CONFIG_RTC_DRV_DS1390=m +# CONFIG_RTC_DRV_MAX6916 is not set +CONFIG_RTC_DRV_R9701=m +CONFIG_RTC_DRV_RX4581=m +# CONFIG_RTC_DRV_RX6110 is not set +CONFIG_RTC_DRV_RS5C348=m +CONFIG_RTC_DRV_MAX6902=m +CONFIG_RTC_DRV_PCF2123=m +CONFIG_RTC_DRV_MCP795=m +CONFIG_RTC_I2C_AND_SPI=y + +# +# SPI and I2C RTC drivers +# +CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_DS3232_HWMON=y +CONFIG_RTC_DRV_PCF2127=m +CONFIG_RTC_DRV_RV3029C2=m +# CONFIG_RTC_DRV_RV3029_HWMON is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_DS1286=m +CONFIG_RTC_DRV_DS1511=m +CONFIG_RTC_DRV_DS1553=m +CONFIG_RTC_DRV_DS1685_FAMILY=m +CONFIG_RTC_DRV_DS1685=y +# CONFIG_RTC_DRV_DS1689 is not set +# CONFIG_RTC_DRV_DS17285 is not set +# CONFIG_RTC_DRV_DS17485 is not set +# CONFIG_RTC_DRV_DS17885 is not set +# CONFIG_RTC_DS1685_PROC_REGS is not set +CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_DS2404=m +CONFIG_RTC_DRV_EFI=y +CONFIG_RTC_DRV_STK17TA8=m +# CONFIG_RTC_DRV_M48T86 is not set +CONFIG_RTC_DRV_M48T35=m +CONFIG_RTC_DRV_M48T59=m +CONFIG_RTC_DRV_MSM6242=m +CONFIG_RTC_DRV_BQ4802=m +CONFIG_RTC_DRV_RP5C01=m +CONFIG_RTC_DRV_V3020=m +# CONFIG_RTC_DRV_ZYNQMP is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_PL030 is not set +CONFIG_RTC_DRV_PL031=y +# CONFIG_RTC_DRV_FTRTC010 is not set +# CONFIG_RTC_DRV_SNVS is not set +# CONFIG_RTC_DRV_XGENE is not set +# CONFIG_RTC_DRV_R7301 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_ACPI=y +CONFIG_DMA_OF=y +# CONFIG_ALTERA_MSGDMA is not set +# CONFIG_AMBA_PL08X is not set +# CONFIG_BCM_SBA_RAID is not set +# CONFIG_DW_AXI_DMAC is not set +# CONFIG_FSL_EDMA is not set +# CONFIG_INTEL_IDMA64 is not set +# CONFIG_K3_DMA is not set +# CONFIG_MV_XOR_V2 is not set +# CONFIG_PL330_DMA is not set +# CONFIG_XGENE_DMA is not set +# CONFIG_XILINX_DMA is not set +# CONFIG_XILINX_ZYNQMP_DMA is not set +# CONFIG_QCOM_BAM_DMA is not set +CONFIG_QCOM_HIDMA_MGMT=m +CONFIG_QCOM_HIDMA=m +CONFIG_DW_DMAC_CORE=m +CONFIG_DW_DMAC=m +CONFIG_DW_DMAC_PCI=m + +# +# DMA Clients +# +CONFIG_ASYNC_TX_DMA=y +# CONFIG_DMATEST is not set + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +CONFIG_AUXDISPLAY=y +# CONFIG_HD44780 is not set +# CONFIG_IMG_ASCII_LCD is not set +# CONFIG_HT16K33 is not set +CONFIG_UIO=y +CONFIG_UIO_CIF=m +CONFIG_UIO_PDRV_GENIRQ=y +# CONFIG_UIO_DMEM_GENIRQ is not set +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=y +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +CONFIG_VFIO_IOMMU_TYPE1=y +CONFIG_VFIO_VIRQFD=y +CONFIG_VFIO=y +CONFIG_VFIO_NOIOMMU=y +CONFIG_VFIO_PCI=y +CONFIG_VFIO_PCI_MMAP=y +CONFIG_VFIO_PCI_INTX=y +CONFIG_VFIO_PLATFORM=m +# CONFIG_VFIO_AMBA is not set +# CONFIG_VFIO_PLATFORM_CALXEDAXGMAC_RESET is not set +# CONFIG_VFIO_PLATFORM_AMDXGBE_RESET is not set +CONFIG_VFIO_MDEV=m +CONFIG_VFIO_MDEV_DEVICE=m +CONFIG_VFIO_SPIMDEV=m +# CONFIG_VIRT_DRIVERS is not set +CONFIG_VIRTIO=y +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=y +# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set + +# +# Microsoft Hyper-V guest support +# +# CONFIG_STAGING is not set +# CONFIG_GOLDFISH is not set +CONFIG_CHROME_PLATFORMS=y +# CONFIG_CHROMEOS_TBMC is not set +# CONFIG_CROS_KBD_LED_BACKLIGHT is not set +CONFIG_CLKDEV_LOOKUP=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y + +# +# Common Clock Framework +# +CONFIG_COMMON_CLK_VERSATILE=y +CONFIG_CLK_SP810=y +CONFIG_CLK_VEXPRESS_OSC=y +# CONFIG_CLK_HSDK is not set +# CONFIG_COMMON_CLK_MAX9485 is not set +CONFIG_COMMON_CLK_SCPI=m +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI514 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_SI570 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CDCE925 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_CLK_QORIQ is not set +CONFIG_COMMON_CLK_XGENE=y +# CONFIG_COMMON_CLK_PWM is not set +# CONFIG_COMMON_CLK_VC5 is not set +CONFIG_COMMON_CLK_HI3516CV300=y +CONFIG_COMMON_CLK_HI3519=y +CONFIG_COMMON_CLK_HI3660=y +CONFIG_COMMON_CLK_HI3798CV200=y +# CONFIG_COMMON_CLK_HI6220 is not set +CONFIG_RESET_HISI=y +CONFIG_STUB_CLK_HI3660=y +# CONFIG_COMMON_CLK_QCOM is not set +CONFIG_HWSPINLOCK=y +# CONFIG_HWSPINLOCK_QCOM is not set + +# +# Clock Source drivers +# +CONFIG_TIMER_OF=y +CONFIG_TIMER_ACPI=y +CONFIG_TIMER_PROBE=y +CONFIG_CLKSRC_MMIO=y +CONFIG_ARM_ARCH_TIMER=y +CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y +CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND=y +CONFIG_FSL_ERRATUM_A008585=y +CONFIG_HISILICON_ERRATUM_161010101=y +CONFIG_ARM64_ERRATUM_858921=y +CONFIG_ARM_TIMER_SP804=y +CONFIG_MAILBOX=y +CONFIG_ARM_MHU=m +# CONFIG_PLATFORM_MHU is not set +# CONFIG_PL320_MBOX is not set +CONFIG_PCC=y +# CONFIG_ALTERA_MBOX is not set +CONFIG_HI3660_MBOX=y +CONFIG_HI6220_MBOX=y +# CONFIG_MAILBOX_TEST is not set +# CONFIG_QCOM_APCS_IPC is not set +CONFIG_XGENE_SLIMPRO_MBOX=m +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +CONFIG_IOMMU_IO_PGTABLE=y +CONFIG_IOMMU_IO_PGTABLE_LPAE=y +# CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST is not set +# CONFIG_IOMMU_IO_PGTABLE_ARMV7S is not set + +# +# Generic PASID table support +# +CONFIG_IOMMU_PASID_TABLE=y +CONFIG_ARM_SMMU_V3_CONTEXT=y +# CONFIG_IOMMU_DEBUGFS is not set +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set +CONFIG_IOMMU_IOVA=y +CONFIG_OF_IOMMU=y +CONFIG_IOMMU_DMA=y +CONFIG_IOMMU_SVA=y +CONFIG_IOMMU_PAGE_FAULT=y +CONFIG_ARM_SMMU=y +CONFIG_ARM_SMMU_V3=y +# CONFIG_QCOM_IOMMU is not set + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_QCOM_GLINK_RPM is not set +# CONFIG_RPMSG_VIRTIO is not set +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# + +# +# Broadcom SoC drivers +# +# CONFIG_SOC_BRCMSTB is not set + +# +# NXP/Freescale QorIQ SoC drivers +# + +# +# i.MX SoC drivers +# + +# +# Qualcomm SoC drivers +# +# CONFIG_QCOM_COMMAND_DB is not set +# CONFIG_QCOM_GENI_SE is not set +# CONFIG_QCOM_GSBI is not set +# CONFIG_QCOM_LLCC is not set +# CONFIG_QCOM_RMTFS_MEM is not set +# CONFIG_QCOM_RPMH is not set +# CONFIG_QCOM_SMEM is not set +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# CONFIG_XILINX_VCU is not set +# CONFIG_PM_DEVFREQ is not set +CONFIG_EXTCON=y + +# +# Extcon Device Drivers +# +CONFIG_EXTCON_GPIO=m +# CONFIG_EXTCON_MAX3355 is not set +# CONFIG_EXTCON_QCOM_SPMI_MISC is not set +# CONFIG_EXTCON_RT8973A is not set +# CONFIG_EXTCON_SM5502 is not set +# CONFIG_EXTCON_USB_GPIO is not set +# CONFIG_MEMORY is not set +# CONFIG_IIO is not set +# CONFIG_NTB is not set +# CONFIG_VME_BUS is not set +CONFIG_PWM=y +CONFIG_PWM_SYSFS=y +# CONFIG_PWM_FSL_FTM is not set +# CONFIG_PWM_HIBVT is not set +# CONFIG_PWM_PCA9685 is not set + +# +# IRQ chip support +# +CONFIG_IRQCHIP=y +CONFIG_ARM_GIC=y +CONFIG_ARM_GIC_MAX_NR=1 +CONFIG_ARM_GIC_V2M=y +CONFIG_ARM_GIC_V3=y +CONFIG_ARM_GIC_V3_ITS=y +CONFIG_ARM_GIC_V3_ITS_PCI=y +CONFIG_HISILICON_IRQ_MBIGEN=y +CONFIG_PARTITION_PERCPU=y +CONFIG_QCOM_IRQ_COMBINER=y +# CONFIG_QCOM_PDC is not set +# CONFIG_IPACK_BUS is not set +CONFIG_RESET_CONTROLLER=y +# CONFIG_RESET_QCOM_AOSS is not set +# CONFIG_RESET_TI_SYSCON is not set +CONFIG_COMMON_RESET_HI3660=y +CONFIG_COMMON_RESET_HI6220=y +CONFIG_FMC=m +CONFIG_FMC_FAKEDEV=m +CONFIG_FMC_TRIVIAL=m +CONFIG_FMC_WRITE_EEPROM=m +CONFIG_FMC_CHARDEV=m + +# +# PHY Subsystem +# +CONFIG_GENERIC_PHY=y +CONFIG_PHY_XGENE=y +# CONFIG_BCM_KONA_USB2_PHY is not set +CONFIG_PHY_HI6220_USB=m +# CONFIG_PHY_HISTB_COMBPHY is not set +# CONFIG_PHY_HISI_INNO_USB2 is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_MAPPHONE_MDM6600 is not set +# CONFIG_PHY_QCOM_APQ8064_SATA is not set +# CONFIG_PHY_QCOM_IPQ806X_SATA is not set +# CONFIG_PHY_QCOM_QMP is not set +# CONFIG_PHY_QCOM_QUSB2 is not set +# CONFIG_PHY_QCOM_UFS is not set +# CONFIG_PHY_QCOM_USB_HS is not set +# CONFIG_PHY_QCOM_USB_HSIC is not set +# CONFIG_PHY_TUSB1210 is not set +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# CONFIG_ARM_CCI_PMU is not set +CONFIG_ARM_CCN=y +CONFIG_ARM_PMU=y +CONFIG_ARM_PMU_ACPI=y +CONFIG_ARM_SMMU_V3_PMU=y +# CONFIG_ARM_DSU_PMU is not set +CONFIG_HISI_PMU=y +CONFIG_QCOM_L2_PMU=y +CONFIG_QCOM_L3_PMU=y +CONFIG_XGENE_PMU=y +CONFIG_ARM_SPE_PMU=y +CONFIG_RAS=y + +# +# Android +# +# CONFIG_ANDROID is not set +CONFIG_LIBNVDIMM=m +CONFIG_BLK_DEV_PMEM=m +CONFIG_ND_BLK=m +CONFIG_ND_CLAIM=y +CONFIG_ND_BTT=m +CONFIG_BTT=y +CONFIG_OF_PMEM=m +CONFIG_DAX_DRIVER=y +CONFIG_DAX=y +CONFIG_DEV_DAX=m +CONFIG_NVMEM=y +# CONFIG_QCOM_QFPROM is not set + +# +# HW tracing support +# +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set +# CONFIG_FPGA is not set +# CONFIG_FSI is not set +CONFIG_TEE=m + +# +# TEE drivers +# +# CONFIG_OPTEE is not set +# CONFIG_SIOX is not set +CONFIG_UACCE=y +# CONFIG_WD_DUMMY_DEV is not set +# CONFIG_SLIMBUS is not set + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +CONFIG_FS_IOMAP=y +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT2=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_ENCRYPTION is not set +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_XFS_FS=m +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set +# CONFIG_XFS_ONLINE_SCRUB is not set +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set +# CONFIG_GFS2_FS is not set +# CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +CONFIG_FS_DAX=y +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FILE_LOCKING=y +CONFIG_MANDATORY_FILE_LOCKING=y +# CONFIG_FS_ENCRYPTION is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_PRINT_QUOTA_WARNING=y +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=y +# CONFIG_QFMT_V1 is not set +CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y +CONFIG_AUTOFS4_FS=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_OVERLAY_FS=y +# CONFIG_OVERLAY_FS_REDIRECT_DIR is not set +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y +# CONFIG_OVERLAY_FS_INDEX is not set +# CONFIG_OVERLAY_FS_XINO_AUTO is not set +# CONFIG_OVERLAY_FS_METACOPY is not set + +# +# Caches +# +CONFIG_FSCACHE=y +CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_HISTOGRAM is not set +# CONFIG_FSCACHE_DEBUG is not set +# CONFIG_FSCACHE_OBJECT_LIST is not set +CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_HISTOGRAM is not set + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="ascii" +# CONFIG_FAT_DEFAULT_UTF8 is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y +# CONFIG_PROC_VMCORE_DEVICE_DUMP is not set +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_MEMFD_CREATE=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_FS is not set +# CONFIG_UBIFS_FS is not set +CONFIG_CRAMFS=m +CONFIG_CRAMFS_BLOCKDEV=y +# CONFIG_CRAMFS_MTD is not set +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_FILE_CACHE=y +# CONFIG_SQUASHFS_FILE_DIRECT is not set +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +CONFIG_PSTORE=y +CONFIG_PSTORE_DEFLATE_COMPRESS=y +# CONFIG_PSTORE_LZO_COMPRESS is not set +# CONFIG_PSTORE_LZ4_COMPRESS is not set +# CONFIG_PSTORE_LZ4HC_COMPRESS is not set +# CONFIG_PSTORE_842_COMPRESS is not set +# CONFIG_PSTORE_ZSTD_COMPRESS is not set +CONFIG_PSTORE_COMPRESS=y +CONFIG_PSTORE_DEFLATE_COMPRESS_DEFAULT=y +CONFIG_PSTORE_COMPRESS_DEFAULT="deflate" +# CONFIG_PSTORE_CONSOLE is not set +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +CONFIG_PSTORE_RAM=m +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +CONFIG_NFS_V2=m +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +# CONFIG_NFS_SWAP is not set +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_V4_SECURITY_LABEL=y +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DEBUG=y +CONFIG_NFSD=m +CONFIG_NFSD_V2_ACL=y +CONFIG_NFSD_V3=y +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +# CONFIG_NFSD_BLOCKLAYOUT is not set +# CONFIG_NFSD_SCSILAYOUT is not set +# CONFIG_NFSD_FLEXFILELAYOUT is not set +CONFIG_NFSD_V4_SECURITY_LABEL=y +# CONFIG_NFSD_FAULT_INJECTION is not set +CONFIG_GRACE_PERIOD=m +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=m +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_RPCSEC_GSS_KRB5=m +CONFIG_SUNRPC_DEBUG=y +CONFIG_SUNRPC_XPRT_RDMA=m +CONFIG_CEPH_FS=m +# CONFIG_CEPH_FSCACHE is not set +CONFIG_CEPH_FS_POSIX_ACL=y +CONFIG_CIFS=y +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_ACL=y +CONFIG_CIFS_DEBUG=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set +CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CIFS_FSCACHE is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=m +# CONFIG_DLM is not set +CONFIG_RESCTRL=y + +# +# Security options +# +CONFIG_KEYS=y +CONFIG_KEYS_COMPAT=y +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_BIG_KEYS=y +CONFIG_TRUSTED_KEYS=m +CONFIG_ENCRYPTED_KEYS=y +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +CONFIG_SECURITY_WRITABLE_HOOKS=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +# CONFIG_SECURITY_PATH is not set +CONFIG_LSM_MMAP_MIN_ADDR=65535 +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y +CONFIG_HARDENED_USERCOPY=y +CONFIG_HARDENED_USERCOPY_FALLBACK=y +CONFIG_FORTIFY_SOURCE=y +# CONFIG_STATIC_USERMODEHELPER is not set +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1 +CONFIG_SECURITY_SELINUX_DISABLE=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_SECURITY_LOADPIN is not set +CONFIG_SECURITY_YAMA=y +CONFIG_INTEGRITY=y +# CONFIG_INTEGRITY_SIGNATURE is not set +CONFIG_INTEGRITY_AUDIT=y +CONFIG_IMA=y +CONFIG_IMA_MEASURE_PCR_IDX=10 +CONFIG_IMA_LSM_RULES=y +# CONFIG_IMA_TEMPLATE is not set +CONFIG_IMA_NG_TEMPLATE=y +# CONFIG_IMA_SIG_TEMPLATE is not set +CONFIG_IMA_DEFAULT_TEMPLATE="ima-ng" +CONFIG_IMA_DEFAULT_HASH_SHA1=y +# CONFIG_IMA_DEFAULT_HASH_SHA256 is not set +# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set +CONFIG_IMA_DEFAULT_HASH="sha1" +CONFIG_IMA_WRITE_POLICY=y +CONFIG_IMA_READ_POLICY=y +CONFIG_IMA_APPRAISE=y +CONFIG_IMA_APPRAISE_BOOTPARAM=y +CONFIG_EVM=y +CONFIG_EVM_ATTR_FSUUID=y +CONFIG_EVM_ADD_XATTRS=y +CONFIG_DEFAULT_SECURITY_SELINUX=y +# CONFIG_DEFAULT_SECURITY_DAC is not set +CONFIG_DEFAULT_SECURITY="selinux" +CONFIG_XOR_BLOCKS=y +CONFIG_ASYNC_CORE=y +CONFIG_ASYNC_MEMCPY=y +CONFIG_ASYNC_XOR=y +CONFIG_ASYNC_PQ=y +CONFIG_ASYNC_RAID6_RECOV=y +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=m +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=m +# CONFIG_CRYPTO_ECDH is not set +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_GF128MUL=y +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_CRYPTO_CRYPTD=m +# CONFIG_CRYPTO_MCRYPTD is not set +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_SIMD=m +CONFIG_CRYPTO_ENGINE=m + +# +# Authenticated Encryption with Associated Data +# +CONFIG_CRYPTO_CCM=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_CHACHA20POLY1305=m +# CONFIG_CRYPTO_AEGIS128 is not set +# CONFIG_CRYPTO_AEGIS128L is not set +# CONFIG_CRYPTO_AEGIS256 is not set +# CONFIG_CRYPTO_MORUS640 is not set +# CONFIG_CRYPTO_MORUS1280 is not set +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=m + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CFB is not set +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=m +# CONFIG_CRYPTO_KEYWRAP is not set + +# +# Hash modes +# +CONFIG_CRYPTO_CMAC=y +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_VMAC=m + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_POLY1305=m +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD128=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_RMD256=m +CONFIG_CRYPTO_RMD320=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +CONFIG_CRYPTO_SHA3=m +CONFIG_CRYPTO_SM3=m +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_ARC4=y +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=y +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SALSA20=m +CONFIG_CRYPTO_CHACHA20=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_842 is not set +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +# CONFIG_CRYPTO_ZSTD is not set + +# +# Random Number Generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +CONFIG_CRYPTO_USER_API=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_HASH_INFO=y +CONFIG_CRYPTO_HW=y +CONFIG_CRYPTO_DEV_CCP=y +CONFIG_CRYPTO_DEV_CCP_DD=m +CONFIG_CRYPTO_DEV_SP_CCP=y +CONFIG_CRYPTO_DEV_CCP_CRYPTO=m +CONFIG_CRYPTO_DEV_CPT=m +CONFIG_CAVIUM_CPT=m +# CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set +CONFIG_CRYPTO_DEV_CAVIUM_ZIP=m +# CONFIG_CRYPTO_DEV_QCE is not set +# CONFIG_CRYPTO_DEV_QCOM_RNG is not set +CONFIG_CRYPTO_DEV_CHELSIO=m +CONFIG_CHELSIO_IPSEC_INLINE=y +# CONFIG_CRYPTO_DEV_CHELSIO_TLS is not set +CONFIG_CRYPTO_DEV_VIRTIO=m +# CONFIG_CRYPTO_DEV_CCREE is not set +CONFIG_CRYPTO_DEV_HISI_SEC=m +CONFIG_CRYPTO_DEV_HISI_QM=m +CONFIG_CRYPTO_QM_UACCE=y +CONFIG_CRYPTO_DEV_HISI_ZIP=m +CONFIG_CRYPTO_DEV_HISI_HPRE=m +CONFIG_CRYPTO_HISI_SGL=m +CONFIG_CRYPTO_DEV_HISI_SEC2=m +CONFIG_CRYPTO_DEV_HISI_RDE=m +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +CONFIG_PKCS7_MESSAGE_PARSER=y +# CONFIG_PKCS7_TEST_KEY is not set +CONFIG_SIGNED_PE_FILE_VERIFICATION=y + +# +# Certificates for signature checking +# +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set +# CONFIG_SECONDARY_TRUSTED_KEYRING is not set +# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=y +CONFIG_BITREVERSE=y +CONFIG_HAVE_ARCH_BITREVERSE=y +CONFIG_RATIONAL=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y +CONFIG_INDIRECT_PIO=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=m +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +# CONFIG_CRC64 is not set +# CONFIG_CRC4 is not set +CONFIG_CRC7=m +CONFIG_LIBCRC32C=y +CONFIG_CRC8=m +CONFIG_AUDIT_GENERIC=y +CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y +CONFIG_AUDIT_COMPAT_GENERIC=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_COMPRESS=m +CONFIG_LZ4HC_COMPRESS=m +CONFIG_LZ4_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=m +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_BTREE=y +CONFIG_INTERVAL_TREE=y +CONFIG_RADIX_TREE_MULTIORDER=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_DMA_DIRECT_OPS=y +CONFIG_DMA_VIRT_OPS=y +CONFIG_SWIOTLB=y +CONFIG_SGL_ALLOC=y +CONFIG_CHECK_SIGNATURE=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_CLZ_TAB=y +CONFIG_CORDIC=m +# CONFIG_DDR is not set +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_LIBFDT=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_SPLIT=y +CONFIG_SG_POOL=y +CONFIG_ARCH_HAS_SG_CHAIN=y +CONFIG_ARCH_HAS_PMEM_API=y +CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y +CONFIG_STACKDEPOT=y +CONFIG_SBITMAP=y +# CONFIG_STRING_SELFTEST is not set + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +CONFIG_DEBUG_INFO_DWARF4=y +# CONFIG_GDB_SCRIPTS is not set +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=2048 +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_PAGE_OWNER is not set +CONFIG_DEBUG_FS=y +CONFIG_HEADERS_CHECK=y +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_ARCH_WANT_FRAME_POINTERS=y +CONFIG_FRAME_POINTER=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_DEBUG_KERNEL=y + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +# CONFIG_DEBUG_RODATA_TEST is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_SLUB_STATS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +CONFIG_DEBUG_KMEMLEAK=y +CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000 +# CONFIG_DEBUG_KMEMLEAK_TEST is not set +CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y +# CONFIG_DEBUG_STACK_USAGE is not set +CONFIG_DEBUG_VM=y +CONFIG_DEBUG_VM_VMACACHE=y +CONFIG_DEBUG_VM_RB=y +CONFIG_DEBUG_VM_PGFLAGS=y +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_KASAN=y +# CONFIG_KASAN_OUTLINE is not set +CONFIG_KASAN_INLINE=y +# CONFIG_TEST_KASAN is not set +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +CONFIG_KCOV=y +CONFIG_KCOV_INSTRUMENT_ALL=y +CONFIG_DEBUG_SHIRQ=y + +# +# Debug Lockups and Hangs +# +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_HARDLOCKUP_DETECTOR_PERF=y +# CONFIG_SDEI_WATCHDOG is not set +CONFIG_PMU_WATCHDOG=y +CONFIG_HARDLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=0 +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=140 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +CONFIG_WQ_WATCHDOG=y +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=0 +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +# CONFIG_SCHED_STACK_END_CHECK is not set +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +CONFIG_PROVE_LOCKING=y +# CONFIG_LOCK_STAT is not set +CONFIG_DEBUG_RT_MUTEXES=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y +CONFIG_DEBUG_RWSEMS=y +CONFIG_DEBUG_LOCK_ALLOC=y +CONFIG_LOCKDEP=y +# CONFIG_DEBUG_LOCKDEP is not set +CONFIG_DEBUG_ATOMIC_SLEEP=y +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +CONFIG_TRACE_IRQFLAGS=y +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_HAVE_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PI_LIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +CONFIG_PROVE_RCU=y +# CONFIG_RCU_PERF_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=120 +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +CONFIG_FAULT_INJECTION=y +CONFIG_FAILSLAB=y +CONFIG_FAIL_PAGE_ALLOC=y +CONFIG_FAIL_MAKE_REQUEST=y +CONFIG_FAIL_IO_TIMEOUT=y +CONFIG_FAIL_FUTEX=y +CONFIG_FAULT_INJECTION_DEBUG_FS=y +CONFIG_FAIL_MMC_REQUEST=y +CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y +# CONFIG_LATENCYTOP is not set +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_PREEMPTIRQ_TRACEPOINTS=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_PREEMPTIRQ_EVENTS is not set +# CONFIG_IRQSOFF_TRACER is not set +CONFIG_SCHED_TRACER=y +CONFIG_HWLAT_TRACER=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_TRACER_SNAPSHOT=y +# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +CONFIG_STACK_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_KPROBE_EVENTS=y +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_PROBE_EVENTS=y +CONFIG_DYNAMIC_FTRACE=y +# CONFIG_FUNCTION_PROFILER is not set +CONFIG_FTRACE_MCOUNT_RECORD=y +# CONFIG_FTRACE_STARTUP_TEST is not set +CONFIG_TRACING_MAP=y +CONFIG_HIST_TRIGGERS=y +# CONFIG_TRACEPOINT_BENCHMARK is not set +CONFIG_RING_BUFFER_BENCHMARK=m +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_TRACE_EVAL_MAP_FILE is not set +# CONFIG_TRACING_EVENTS_GPIO is not set +# CONFIG_DMA_API_DEBUG is not set +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_LKDTM is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_TEST_SORT is not set +# CONFIG_KPROBES_SANITY_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +CONFIG_ATOMIC64_SELFTEST=y +CONFIG_ASYNC_RAID6_TEST=m +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_TEST_STRING_HELPERS is not set +CONFIG_TEST_KSTRTOX=y +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_BITFIELD is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_OVERFLOW is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_HASH is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_USER_COPY is not set +# CONFIG_TEST_BPF is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_MEMTEST is not set +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_KGDB=y +CONFIG_KGDB_SERIAL_CONSOLE=y +CONFIG_KGDB_TESTS=y +# CONFIG_KGDB_TESTS_ON_BOOT is not set +CONFIG_KGDB_KDB=y +CONFIG_KDB_DEFAULT_ENABLE=0x0 +CONFIG_KDB_KEYBOARD=y +CONFIG_KDB_CONTINUE_CATASTROPHIC=0 +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +# CONFIG_UBSAN is not set +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y +CONFIG_STRICT_DEVMEM=y +CONFIG_IO_STRICT_DEVMEM=y +# CONFIG_ARM64_PTDUMP_DEBUGFS is not set +# CONFIG_PID_IN_CONTEXTIDR is not set +# CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET is not set +# CONFIG_DEBUG_WX is not set +# CONFIG_DEBUG_ALIGN_RODATA is not set +# CONFIG_DEBUG_EFI is not set +# CONFIG_ARM64_RELOC_TEST is not set +# CONFIG_CORESIGHT is not set diff --git a/arch/arm64/configs/test_defconfig b/arch/arm64/configs/test_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..b0c4decd2d3adffed7e3f45e8ca1c05ea7528d44 --- /dev/null +++ b/arch/arm64/configs/test_defconfig @@ -0,0 +1,5634 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/arm64 4.19.29 Kernel Configuration +# + +# +# Compiler: aarch64-linux-gnu-gcc (Linaro GCC 7.3-2018.05) 7.3.1 20180425 [linaro-7.3-2018.05 revision d29120a424ecfbc167ef90065c0eeb7f91977701] +# +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=70301 +CONFIG_CLANG_VERSION=0 +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_EXTABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y +CONFIG_AUDIT_WATCH=y +CONFIG_AUDIT_TREE=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_SHOW_LEVEL=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_GENERIC_IRQ_CHIP=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GENERIC_MSI_IRQ_DOMAIN=y +CONFIG_HANDLE_DOMAIN_IRQ=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +CONFIG_GENERIC_IRQ_MULTI_HANDLER=y +CONFIG_ARCH_CLOCKSOURCE_DATA=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_ARCH_HAS_TICK_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +# CONFIG_NO_HZ_IDLE is not set +CONFIG_NO_HZ_FULL=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PREEMPT is not set + +# +# CPU/Task time and stats accounting +# +CONFIG_VIRT_CPU_ACCOUNTING=y +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y +# CONFIG_IRQ_TIME_ACCOUNTING is not set +CONFIG_HAVE_SCHED_AVG_IRQ=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_SRCU=y +CONFIG_TREE_SRCU=y +CONFIG_TASKS_RCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +CONFIG_CONTEXT_TRACKING=y +# CONFIG_CONTEXT_TRACKING_FORCE is not set +CONFIG_RCU_NOCB_CPU=y +CONFIG_BUILD_BIN2C=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=20 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 +CONFIG_GENERIC_SCHED_CLOCK=y +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_SUPPORTS_INT128=y +CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_MEMCG_SWAP_ENABLED=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +# CONFIG_DEBUG_BLK_CGROUP is not set +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +# CONFIG_CGROUP_BPF is not set +CONFIG_CGROUP_DEBUG=y +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_BPF=y +# CONFIG_EXPERT is not set +CONFIG_UID16=y +CONFIG_MULTIUSER=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_PRINTK_NMI=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_BPF_SYSCALL=y +# CONFIG_BPF_JIT_ALWAYS_ON is not set +CONFIG_USERFAULTFD=y +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y +CONFIG_RSEQ=y +# CONFIG_EMBEDDED is not set +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +CONFIG_DEBUG_PERF_USE_VMALLOC=y +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_SLUB_DEBUG=y +# CONFIG_COMPAT_BRK is not set +# CONFIG_SLAB is not set +CONFIG_SLUB=y +CONFIG_SLAB_MERGE_DEFAULT=y +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SLAB_FREELIST_HARDENED is not set +CONFIG_SLUB_CPU_PARTIAL=y +CONFIG_SYSTEM_DATA_VERIFICATION=y +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y +CONFIG_ARM64=y +CONFIG_64BIT=y +CONFIG_MMU=y +CONFIG_ARM64_PAGE_SHIFT=12 +CONFIG_ARM64_CONT_SHIFT=4 +CONFIG_ARCH_MMAP_RND_BITS_MIN=18 +CONFIG_ARCH_MMAP_RND_BITS_MAX=33 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_ZONE_DMA32=y +CONFIG_HAVE_GENERIC_GUP=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_SMP=y +CONFIG_KERNEL_MODE_NEON=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_PGTABLE_LEVELS=4 +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_ARCH_PROC_KCORE_TEXT=y + +# +# Platform selection +# +# CONFIG_ARCH_ACTIONS is not set +# CONFIG_ARCH_SUNXI is not set +# CONFIG_ARCH_ALPINE is not set +# CONFIG_ARCH_BCM2835 is not set +# CONFIG_ARCH_BCM_IPROC is not set +# CONFIG_ARCH_BERLIN is not set +# CONFIG_ARCH_BRCMSTB is not set +# CONFIG_ARCH_EXYNOS is not set +# CONFIG_ARCH_K3 is not set +# CONFIG_ARCH_LAYERSCAPE is not set +# CONFIG_ARCH_LG1K is not set +CONFIG_ARCH_HISI=y +# CONFIG_ARCH_MEDIATEK is not set +# CONFIG_ARCH_MESON is not set +# CONFIG_ARCH_MVEBU is not set +CONFIG_ARCH_QCOM=y +# CONFIG_ARCH_REALTEK is not set +# CONFIG_ARCH_ROCKCHIP is not set +CONFIG_ARCH_SEATTLE=y +# CONFIG_ARCH_SYNQUACER is not set +# CONFIG_ARCH_RENESAS is not set +# CONFIG_ARCH_STRATIX10 is not set +# CONFIG_ARCH_TEGRA is not set +# CONFIG_ARCH_SPRD is not set +CONFIG_ARCH_THUNDER=y +CONFIG_ARCH_THUNDER2=y +# CONFIG_ARCH_UNIPHIER is not set +CONFIG_ARCH_VEXPRESS=y +CONFIG_ARCH_XGENE=y +# CONFIG_ARCH_ZX is not set +# CONFIG_ARCH_ZYNQMP is not set +CONFIG_HAVE_LIVEPATCH_WO_FTRACE=y + +# +# Enable Livepatch +# +CONFIG_LIVEPATCH=y +CONFIG_LIVEPATCH_WO_FTRACE=y +CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY=y +CONFIG_LIVEPATCH_STACK=y +CONFIG_LIVEPATCH_RESTRICT_KPROBE=y + +# +# Bus support +# +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_DOMAINS_GENERIC=y +CONFIG_PCI_SYSCALL=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIEASPM=y +# CONFIG_PCIEASPM_DEBUG is not set +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +CONFIG_PCIE_DPC=y +# CONFIG_PCIE_PTM is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_IRQ_DOMAIN=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +CONFIG_PCI_STUB=y +# CONFIG_PCI_PF_STUB is not set +CONFIG_PCI_ATS=y +CONFIG_PCI_ECAM=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +CONFIG_PCI_LABEL=y +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_ACPI_IBM=m +# CONFIG_HOTPLUG_PCI_CPCI is not set +# CONFIG_HOTPLUG_PCI_SHPC is not set + +# +# PCI controller drivers +# + +# +# Cadence PCIe controllers support +# +# CONFIG_PCIE_CADENCE_HOST is not set +# CONFIG_PCI_FTPCI100 is not set +CONFIG_PCI_HOST_COMMON=y +CONFIG_PCI_HOST_GENERIC=y +# CONFIG_PCIE_XILINX is not set +CONFIG_PCI_XGENE=y +CONFIG_PCI_XGENE_MSI=y +CONFIG_PCI_HOST_THUNDER_PEM=y +CONFIG_PCI_HOST_THUNDER_ECAM=y + +# +# DesignWare PCI Core Support +# +CONFIG_PCIE_DW=y +CONFIG_PCIE_DW_HOST=y +# CONFIG_PCIE_DW_PLAT_HOST is not set +CONFIG_PCI_HISI=y +# CONFIG_PCIE_QCOM is not set +# CONFIG_PCIE_KIRIN is not set +# CONFIG_PCIE_HISI_STB is not set + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set + +# +# Kernel Features +# + +# +# ARM errata workarounds via the alternatives framework +# +# CONFIG_ARM64_ERRATUM_826319 is not set +# CONFIG_ARM64_ERRATUM_827319 is not set +# CONFIG_ARM64_ERRATUM_824069 is not set +# CONFIG_ARM64_ERRATUM_819472 is not set +# CONFIG_ARM64_ERRATUM_832075 is not set +# CONFIG_ARM64_ERRATUM_834220 is not set +CONFIG_ARM64_ERRATUM_845719=y +# CONFIG_ARM64_ERRATUM_843419 is not set +# CONFIG_ARM64_ERRATUM_1024718 is not set +# CONFIG_CAVIUM_ERRATUM_22375 is not set +# CONFIG_CAVIUM_ERRATUM_23144 is not set +# CONFIG_CAVIUM_ERRATUM_23154 is not set +# CONFIG_CAVIUM_ERRATUM_27456 is not set +# CONFIG_CAVIUM_ERRATUM_30115 is not set +# CONFIG_QCOM_FALKOR_ERRATUM_1003 is not set +# CONFIG_QCOM_FALKOR_ERRATUM_1009 is not set +# CONFIG_QCOM_QDF2400_ERRATUM_0065 is not set +# CONFIG_SOCIONEXT_SYNQUACER_PREITS is not set +CONFIG_HISILICON_ERRATUM_161600802=y +# CONFIG_QCOM_FALKOR_ERRATUM_E1041 is not set +CONFIG_ARM64_4K_PAGES=y +# CONFIG_ARM64_16K_PAGES is not set +# CONFIG_ARM64_64K_PAGES is not set +# CONFIG_ARM64_VA_BITS_39 is not set +CONFIG_ARM64_VA_BITS_48=y +CONFIG_ARM64_VA_BITS=48 +CONFIG_ARM64_PA_BITS_48=y +CONFIG_ARM64_PA_BITS=48 +# CONFIG_CPU_BIG_ENDIAN is not set +CONFIG_SCHED_MC=y +# CONFIG_SCHED_SMT is not set +CONFIG_NR_CPUS=1024 +CONFIG_HOTPLUG_CPU=y +CONFIG_ARM64_ERR_RECOV=y +CONFIG_MPAM=y +CONFIG_NUMA=y +CONFIG_NODES_SHIFT=3 +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_HOLES_IN_ZONE=y +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +CONFIG_SCHED_HRTICK=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_HAVE_ARCH_PFN_VALID=y +CONFIG_HW_PERF_EVENTS=y +CONFIG_SYS_SUPPORTS_HUGETLBFS=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_SECCOMP=y +CONFIG_PARAVIRT=y +CONFIG_PARAVIRT_TIME_ACCOUNTING=y +CONFIG_KEXEC=y +CONFIG_CRASH_DUMP=y +# CONFIG_XEN is not set +CONFIG_FORCE_MAX_ZONEORDER=11 +CONFIG_UNMAP_KERNEL_AT_EL0=y +CONFIG_HARDEN_BRANCH_PREDICTOR=y +CONFIG_HARDEN_EL2_VECTORS=y +CONFIG_ARM64_SSBD=y +# CONFIG_ARMV8_DEPRECATED is not set +# CONFIG_ARM64_SW_TTBR0_PAN is not set + +# +# ARMv8.1 architectural features +# +CONFIG_ARM64_HW_AFDBM=y +CONFIG_ARM64_PAN=y +CONFIG_ARM64_LSE_ATOMICS=y +CONFIG_ARM64_VHE=y + +# +# ARMv8.2 architectural features +# +CONFIG_ARM64_UAO=y +CONFIG_ARM64_PMEM=y +CONFIG_ARM64_RAS_EXTN=y +CONFIG_ARM64_SVE=y +CONFIG_ARM64_MODULE_PLTS=y +CONFIG_ARM64_PSEUDO_NMI=y +CONFIG_RELOCATABLE=y +CONFIG_RANDOMIZE_BASE=y +CONFIG_RANDOMIZE_MODULE_REGION_FULL=y + +# +# Boot options +# +CONFIG_ARM64_ACPI_PARKING_PROTOCOL=y +CONFIG_CMDLINE="console=ttyAMA0" +# CONFIG_CMDLINE_FORCE is not set +CONFIG_EFI_STUB=y +CONFIG_EFI=y +CONFIG_DMI=y +CONFIG_COMPAT=y +CONFIG_SYSVIPC_COMPAT=y + +# +# Power management options +# +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HIBERNATE_CALLBACKS=y +CONFIG_HIBERNATION=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +CONFIG_PM_DEBUG=y +# CONFIG_PM_ADVANCED_DEBUG is not set +# CONFIG_PM_TEST_SUSPEND is not set +CONFIG_PM_SLEEP_DEBUG=y +CONFIG_PM_CLK=y +CONFIG_PM_GENERIC_DOMAINS=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +CONFIG_PM_GENERIC_DOMAINS_SLEEP=y +CONFIG_PM_GENERIC_DOMAINS_OF=y +CONFIG_CPU_PM=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_HIBERNATION_HEADER=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y + +# +# CPU Power Management +# + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +CONFIG_CPU_IDLE_MULTIPLE_DRIVERS=y +# CONFIG_CPU_IDLE_GOV_LADDER is not set +CONFIG_CPU_IDLE_GOV_MENU=y +CONFIG_DT_IDLE_STATES=y + +# +# ARM CPU Idle Drivers +# +CONFIG_ARM_CPUIDLE=y + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set + +# +# CPU frequency scaling drivers +# +# CONFIG_CPUFREQ_DT is not set +CONFIG_ACPI_CPPC_CPUFREQ=y +CONFIG_HISILICON_CPPC_CPUFREQ_WORKAROUND=y +# CONFIG_ARM_BIG_LITTLE_CPUFREQ is not set +CONFIG_ARM_SCPI_CPUFREQ=m +# CONFIG_QORIQ_CPUFREQ is not set + +# +# Firmware Drivers +# +CONFIG_ARM_PSCI_FW=y +# CONFIG_ARM_SCMI_PROTOCOL is not set +CONFIG_ARM_SCPI_PROTOCOL=m +CONFIG_ARM_SCPI_POWER_DOMAIN=m +CONFIG_ARM_SDE_INTERFACE=y +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=y +CONFIG_FW_CFG_SYSFS=y +# CONFIG_FW_CFG_SYSFS_CMDLINE is not set +CONFIG_HAVE_ARM_SMCCC=y +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +CONFIG_EFI_VARS=y +CONFIG_EFI_ESRT=y +CONFIG_EFI_VARS_PSTORE=y +CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y +CONFIG_EFI_PARAMS_FROM_FDT=y +CONFIG_EFI_RUNTIME_WRAPPERS=y +CONFIG_EFI_ARMSTUB=y +CONFIG_EFI_ARMSTUB_DTB_LOADER=y +# CONFIG_EFI_BOOTLOADER_CONTROL is not set +# CONFIG_EFI_CAPSULE_LOADER is not set +# CONFIG_EFI_TEST is not set +# CONFIG_RESET_ATTACK_MITIGATION is not set +CONFIG_UEFI_CPER=y +CONFIG_UEFI_CPER_ARM=y + +# +# Tegra firmware driver +# +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_GENERIC_GSI=y +CONFIG_ACPI_CCA_REQUIRED=y +# CONFIG_ACPI_DEBUGGER is not set +CONFIG_ACPI_SPCR_TABLE=y +# CONFIG_ACPI_EC_DEBUGFS is not set +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_FAN=y +# CONFIG_ACPI_TAD is not set +# CONFIG_ACPI_DOCK is not set +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_MCFG=y +CONFIG_ACPI_CPPC_LIB=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_THERMAL=y +CONFIG_ACPI_NUMA=y +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +# CONFIG_ACPI_DEBUG is not set +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_CONTAINER=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_ACPI_HED=y +# CONFIG_ACPI_CUSTOM_METHOD is not set +# CONFIG_ACPI_BGRT is not set +CONFIG_ACPI_REDUCED_HARDWARE_ONLY=y +# CONFIG_ACPI_NFIT is not set +CONFIG_HAVE_ACPI_APEI=y +CONFIG_ACPI_APEI=y +CONFIG_ACPI_APEI_GHES=y +CONFIG_ACPI_APEI_PCIEAER=y +CONFIG_ACPI_APEI_SEA=y +CONFIG_ACPI_APEI_MEMORY_FAILURE=y +CONFIG_ACPI_APEI_EINJ=m +# CONFIG_ACPI_APEI_ERST_DEBUG is not set +# CONFIG_PMIC_OPREGION is not set +# CONFIG_ACPI_CONFIGFS is not set +CONFIG_ACPI_IORT=y +CONFIG_ACPI_GTDT=y +CONFIG_ACPI_PPTT=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_HAVE_KVM_IRQ_BYPASS=y +CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE=y +CONFIG_IRQ_BYPASS_MANAGER=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_KVM_ARM_HOST=y +CONFIG_KVM_ARM_PMU=y +CONFIG_KVM_INDIRECT_VECTORS=y +CONFIG_VHOST_NET=m +# CONFIG_VHOST_SCSI is not set +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set +CONFIG_ARM64_CRYPTO=y +CONFIG_CRYPTO_SHA256_ARM64=m +# CONFIG_CRYPTO_SHA512_ARM64 is not set +CONFIG_CRYPTO_SHA1_ARM64_CE=m +CONFIG_CRYPTO_SHA2_ARM64_CE=m +# CONFIG_CRYPTO_SHA512_ARM64_CE is not set +# CONFIG_CRYPTO_SHA3_ARM64 is not set +CONFIG_CRYPTO_SM3_ARM64_CE=m +CONFIG_CRYPTO_SM4_ARM64_CE=m +CONFIG_CRYPTO_GHASH_ARM64_CE=m +# CONFIG_CRYPTO_CRCT10DIF_ARM64_CE is not set +CONFIG_CRYPTO_CRC32_ARM64_CE=m +CONFIG_CRYPTO_AES_ARM64=y +CONFIG_CRYPTO_AES_ARM64_CE=m +CONFIG_CRYPTO_AES_ARM64_CE_CCM=m +CONFIG_CRYPTO_AES_ARM64_CE_BLK=m +CONFIG_CRYPTO_AES_ARM64_NEON_BLK=m +# CONFIG_CRYPTO_CHACHA20_NEON is not set +# CONFIG_CRYPTO_AES_ARM64_BS is not set + +# +# General architecture-dependent options +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +CONFIG_STATIC_KEYS_SELFTEST=y +CONFIG_UPROBES=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_KRETPROBES=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_NMI=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_GENERIC_IDLE_POLL_SETUP=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_NMI_WATCHDOG=y +CONFIG_HAVE_HARDLOCKUP_DETECTOR_ARCH=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_RCU_TABLE_FREE=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP_FILTER=y +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_CC_HAS_STACKPROTECTOR_NONE=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_ARCH_MMAP_RND_BITS=18 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=11 +CONFIG_CLONE_BACKWARDS=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_REFCOUNT_FULL=y +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +CONFIG_PLUGIN_HOSTCC="g++" +CONFIG_HAVE_GCC_PLUGINS=y +# CONFIG_GCC_PLUGINS is not set +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_FORCE is not set +CONFIG_MODULE_SIG_ALL=y +# CONFIG_MODULE_SIG_SHA1 is not set +# CONFIG_MODULE_SIG_SHA224 is not set +CONFIG_MODULE_SIG_SHA256=y +# CONFIG_MODULE_SIG_SHA384 is not set +# CONFIG_MODULE_SIG_SHA512 is not set +CONFIG_MODULE_SIG_HASH="sha256" +# CONFIG_MODULE_COMPRESS is not set +# CONFIG_TRIM_UNUSED_KSYMS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLK_SCSI_REQUEST=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +# CONFIG_BLK_CMDLINE_PARSER is not set +CONFIG_BLK_WBT=y +# CONFIG_BLK_CGROUP_IOLATENCY is not set +# CONFIG_BLK_WBT_SQ is not set +CONFIG_BLK_WBT_MQ=y +CONFIG_BLK_DEBUG_FS=y +CONFIG_BLK_DEBUG_FS_ZONED=y +# CONFIG_BLK_SED_OPAL is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +CONFIG_OSF_PARTITION=y +CONFIG_AMIGA_PARTITION=y +# CONFIG_ATARI_PARTITION is not set +CONFIG_MAC_PARTITION=y +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +# CONFIG_LDM_PARTITION is not set +CONFIG_SGI_PARTITION=y +# CONFIG_ULTRIX_PARTITION is not set +CONFIG_SUN_PARTITION=y +CONFIG_KARMA_PARTITION=y +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +CONFIG_BLOCK_COMPAT=y +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_MQ_RDMA=y + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_CFQ_GROUP_IOSCHED=y +CONFIG_DEFAULT_DEADLINE=y +# CONFIG_DEFAULT_CFQ is not set +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="deadline" +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK=y +CONFIG_ARCH_INLINE_SPIN_LOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_READ_LOCK=y +CONFIG_ARCH_INLINE_READ_LOCK_BH=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_READ_UNLOCK=y +CONFIG_ARCH_INLINE_READ_UNLOCK_BH=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_WRITE_LOCK=y +CONFIG_ARCH_INLINE_WRITE_LOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_SPIN_TRYLOCK=y +CONFIG_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_INLINE_SPIN_LOCK=y +CONFIG_INLINE_SPIN_LOCK_BH=y +CONFIG_INLINE_SPIN_LOCK_IRQ=y +CONFIG_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_INLINE_SPIN_UNLOCK_BH=y +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_READ_LOCK=y +CONFIG_INLINE_READ_LOCK_BH=y +CONFIG_INLINE_READ_LOCK_IRQ=y +CONFIG_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_BH=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_WRITE_LOCK=y +CONFIG_INLINE_WRITE_LOCK_BH=y +CONFIG_INLINE_WRITE_LOCK_IRQ=y +CONFIG_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_BH=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y + +# +# Memory Management options +# +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_NEED_MULTIPLE_NODES=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +CONFIG_NO_BOOTMEM=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_SPARSE=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +CONFIG_MEMORY_FAILURE=y +CONFIG_HWPOISON_INJECT=m +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_TRANSPARENT_HUGE_PAGECACHE=y +CONFIG_CLEANCACHE=y +CONFIG_FRONTSWAP=y +CONFIG_SHRINK_PAGECACHE=y +CONFIG_CMA=y +# CONFIG_CMA_DEBUG is not set +# CONFIG_CMA_DEBUGFS is not set +CONFIG_CMA_AREAS=7 +CONFIG_ZSWAP=y +CONFIG_ZPOOL=y +CONFIG_ZBUD=y +# CONFIG_Z3FOLD is not set +CONFIG_ZSMALLOC=y +# CONFIG_PGTABLE_MAPPING is not set +CONFIG_ZSMALLOC_STAT=y +CONFIG_GENERIC_EARLY_IOREMAP=y +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +CONFIG_IDLE_PAGE_TRACKING=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_BENCHMARK is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_XFRM=y +CONFIG_XFRM_OFFLOAD=y +CONFIG_XFRM_ALGO=y +CONFIG_XFRM_USER=y +# CONFIG_XFRM_INTERFACE is not set +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +# CONFIG_SMC is not set +# CONFIG_XDP_SOCKETS is not set +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +# CONFIG_IP_PNP is not set +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE_COMMON=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=m +# CONFIG_NET_FOU is not set +# CONFIG_NET_FOU_IP_TUNNELS is not set +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_XFRM_MODE_TRANSPORT=m +CONFIG_INET_XFRM_MODE_TUNNEL=m +CONFIG_INET_XFRM_MODE_BEET=m +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +# CONFIG_INET_DIAG_DESTROY is not set +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +# CONFIG_TCP_CONG_CDG is not set +CONFIG_TCP_CONG_BBR=m +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +# CONFIG_IPV6_ILA is not set +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +CONFIG_INET6_XFRM_MODE_TRANSPORT=m +CONFIG_INET6_XFRM_MODE_TUNNEL=m +CONFIG_INET6_XFRM_MODE_BEET=m +CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +CONFIG_IPV6_MULTIPLE_TABLES=y +# CONFIG_IPV6_SUBTREES is not set +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +# CONFIG_IPV6_SEG6_LWTUNNEL is not set +# CONFIG_IPV6_SEG6_HMAC is not set +CONFIG_NETLABEL=y +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_COMMON=y +CONFIG_NF_LOG_NETDEV=m +CONFIG_NETFILTER_CONNCOUNT=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=m +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_NEEDED=y +CONFIG_NF_NAT_PROTO_DCCP=y +CONFIG_NF_NAT_PROTO_UDPLITE=y +CONFIG_NF_NAT_PROTO_SCTP=y +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_SET=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_COUNTER=m +# CONFIG_NFT_CONNLIMIT is not set +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +# CONFIG_NFT_TUNNEL is not set +CONFIG_NFT_OBJREF=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m +CONFIG_NFT_FIB_INET=m +# CONFIG_NFT_SOCKET is not set +# CONFIG_NFT_OSF is not set +# CONFIG_NFT_TPROXY is not set +CONFIG_NF_DUP_NETDEV=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +# CONFIG_NF_FLOW_TABLE is not set +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=y +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=y +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=y +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +# CONFIG_IP_SET_HASH_IPMAC is not set +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +# CONFIG_IP_VS_DEBUG is not set +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +# CONFIG_IP_VS_MH is not set +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_DUP_IPV4=m +CONFIG_NF_LOG_ARP=y +CONFIG_NF_LOG_IPV4=y +CONFIG_NF_REJECT_IPV4=y +CONFIG_NF_NAT_IPV4=m +CONFIG_NF_NAT_MASQUERADE_IPV4=y +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NFT_MASQ_IPV4=m +CONFIG_NFT_REDIR_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PROTO_GRE=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +# CONFIG_IP_NF_TARGET_CLUSTERIP is not set +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TPROXY_IPV6=m +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m +CONFIG_NFT_MASQ_IPV6=m +CONFIG_NFT_REDIR_IPV6=m +CONFIG_NFT_REJECT_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m +CONFIG_NF_NAT_IPV6=m +CONFIG_NF_NAT_MASQUERADE_IPV6=y +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +# CONFIG_IP6_NF_MATCH_SRH is not set +# CONFIG_IP6_NF_TARGET_HL is not set +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_DEFRAG_IPV6=m +CONFIG_NF_TABLES_BRIDGE=y +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_NF_LOG_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +# CONFIG_BPFILTER is not set +# CONFIG_IP_DCCP is not set +CONFIG_IP_SCTP=y +# CONFIG_SCTP_DBG_OBJCNT is not set +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_INET_SCTP_DIAG=m +CONFIG_RDS=y +CONFIG_RDS_RDMA=m +CONFIG_RDS_TCP=y +# CONFIG_RDS_DEBUG is not set +CONFIG_TIPC=m +# CONFIG_TIPC_MEDIA_IB is not set +CONFIG_TIPC_MEDIA_UDP=y +CONFIG_TIPC_DIAG=m +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +# CONFIG_ATM_CLIP_NO_ICMP is not set +CONFIG_ATM_LANE=m +# CONFIG_ATM_MPOA is not set +CONFIG_ATM_BR2684=m +# CONFIG_ATM_BR2684_IPFILTER is not set +CONFIG_L2TP=m +CONFIG_L2TP_DEBUGFS=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_STP=y +CONFIG_GARP=y +CONFIG_MRP=y +CONFIG_BRIDGE=y +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_HAVE_NET_DSA=y +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=y +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +# CONFIG_DECNET is not set +CONFIG_LLC=y +# CONFIG_LLC2 is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_DEBUGFS is not set +# CONFIG_6LOWPAN_NHC is not set +CONFIG_IEEE802154=m +# CONFIG_IEEE802154_NL802154_EXPERIMENTAL is not set +CONFIG_IEEE802154_SOCKET=m +# CONFIG_IEEE802154_6LOWPAN is not set +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_ATM=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +# CONFIG_NET_SCH_CBS is not set +# CONFIG_NET_SCH_ETF is not set +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +# CONFIG_NET_SCH_SKBPRIO is not set +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +# CONFIG_NET_SCH_CAKE is not set +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_DEFAULT=y +# CONFIG_DEFAULT_FQ is not set +# CONFIG_DEFAULT_CODEL is not set +CONFIG_DEFAULT_FQ_CODEL=y +# CONFIG_DEFAULT_SFQ is not set +# CONFIG_DEFAULT_PFIFO_FAST is not set +CONFIG_DEFAULT_NET_SCH="fq_codel" + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +# CONFIG_NET_EMATCH_CANID is not set +CONFIG_NET_EMATCH_IPSET=m +# CONFIG_NET_EMATCH_IPT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +# CONFIG_NET_ACT_IPT is not set +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +# CONFIG_NET_ACT_CONNMARK is not set +CONFIG_NET_ACT_SKBMOD=m +# CONFIG_NET_ACT_IFE is not set +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_CLS_IND=y +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=y +# CONFIG_BATMAN_ADV is not set +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_OPENVSWITCH_GENEVE=m +CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS_COMMON=m +CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=y +# CONFIG_MPLS_ROUTING is not set +CONFIG_NET_NSH=m +# CONFIG_HSR is not set +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_L3_MASTER_DEV=y +# CONFIG_QRTR is not set +# CONFIG_NET_NCSI is not set +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_JIT=y +# CONFIG_BPF_STREAM_PARSER is not set +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +CONFIG_NET_DROP_MONITOR=m +# CONFIG_HAMRADIO is not set +CONFIG_CAN=y +CONFIG_CAN_RAW=y +CONFIG_CAN_BCM=y +CONFIG_CAN_GW=y + +# +# CAN Device Drivers +# +CONFIG_CAN_VCAN=m +# CONFIG_CAN_VXCAN is not set +CONFIG_CAN_SLCAN=m +CONFIG_CAN_DEV=y +CONFIG_CAN_CALC_BITTIMING=y +# CONFIG_CAN_GRCAN is not set +# CONFIG_CAN_XILINXCAN is not set +CONFIG_CAN_C_CAN=m +CONFIG_CAN_C_CAN_PLATFORM=m +CONFIG_CAN_C_CAN_PCI=m +CONFIG_CAN_CC770=m +# CONFIG_CAN_CC770_ISA is not set +CONFIG_CAN_CC770_PLATFORM=m +# CONFIG_CAN_IFI_CANFD is not set +# CONFIG_CAN_M_CAN is not set +# CONFIG_CAN_PEAK_PCIEFD is not set +CONFIG_CAN_SJA1000=m +# CONFIG_CAN_SJA1000_ISA is not set +CONFIG_CAN_SJA1000_PLATFORM=m +CONFIG_CAN_EMS_PCI=m +CONFIG_CAN_PEAK_PCI=m +CONFIG_CAN_PEAK_PCIEC=y +CONFIG_CAN_KVASER_PCI=m +CONFIG_CAN_PLX_PCI=m +CONFIG_CAN_SOFTING=m + +# +# CAN SPI interfaces +# +# CONFIG_CAN_HI311X is not set +# CONFIG_CAN_MCP251X is not set + +# +# CAN USB interfaces +# +CONFIG_CAN_8DEV_USB=m +CONFIG_CAN_EMS_USB=m +CONFIG_CAN_ESD_USB2=m +# CONFIG_CAN_GS_USB is not set +CONFIG_CAN_KVASER_USB=m +# CONFIG_CAN_MCBA_USB is not set +CONFIG_CAN_PEAK_USB=m +# CONFIG_CAN_UCAN is not set +# CONFIG_CAN_DEBUG_DEVICES is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_STREAM_PARSER=m +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_CFG80211=m +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y +CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +CONFIG_CFG80211_CRDA_SUPPORT=y +# CONFIG_CFG80211_WEXT is not set +CONFIG_MAC80211=m +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_MINSTREL_HT=y +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +# CONFIG_MAC80211_MESH is not set +CONFIG_MAC80211_LEDS=y +CONFIG_MAC80211_DEBUGFS=y +# CONFIG_MAC80211_MESSAGE_TRACING is not set +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +# CONFIG_WIMAX is not set +CONFIG_RFKILL=m +CONFIG_RFKILL_LEDS=y +CONFIG_RFKILL_INPUT=y +CONFIG_RFKILL_GPIO=m +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +# CONFIG_NFC is not set +CONFIG_PSAMPLE=m +# CONFIG_NET_IFE is not set +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_SOCK_VALIDATE_XMIT=y +CONFIG_NET_DEVLINK=m +CONFIG_MAY_USE_DEVLINK=m +CONFIG_FAILOVER=y +CONFIG_HAVE_EBPF_JIT=y + +# +# Device Drivers +# +CONFIG_ARM_AMBA=y + +# +# Generic Driver Options +# +# CONFIG_UEVENT_HELPER is not set +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_EXTRA_FIRMWARE="" +CONFIG_FW_LOADER_USER_HELPER=y +# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set +CONFIG_ALLOW_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=y +CONFIG_REGMAP_SPI=y +CONFIG_REGMAP_MMIO=y +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +CONFIG_DMA_CMA=y + +# +# Default contiguous memory area size: +# +CONFIG_CMA_SIZE_MBYTES=64 +CONFIG_CMA_SIZE_SEL_MBYTES=y +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set +# CONFIG_CMA_SIZE_SEL_MIN is not set +# CONFIG_CMA_SIZE_SEL_MAX is not set +CONFIG_CMA_ALIGNMENT=8 +CONFIG_GENERIC_ARCH_TOPOLOGY=y + +# +# Bus devices +# +# CONFIG_BRCMSTB_GISB_ARB is not set +CONFIG_HISILICON_LPC=y +CONFIG_QCOM_EBI2=y +# CONFIG_SIMPLE_PM_BUS is not set +CONFIG_VEXPRESS_CONFIG=y +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y +# CONFIG_GNSS is not set +CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +# CONFIG_MTD_AFS_PARTS is not set +CONFIG_MTD_OF_PARTS=m +# CONFIG_MTD_AR7_PARTS is not set + +# +# Partition parsers +# + +# +# User Modules And Translation Layers +# +CONFIG_MTD_BLKDEVS=m +CONFIG_MTD_BLOCK=m +# CONFIG_MTD_BLOCK_RO is not set +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=m +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_GEN_PROBE=m +# CONFIG_MTD_CFI_ADV_OPTIONS is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +CONFIG_MTD_CFI_INTELEXT=m +CONFIG_MTD_CFI_AMDSTD=m +CONFIG_MTD_CFI_STAA=m +CONFIG_MTD_CFI_UTIL=m +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +CONFIG_MTD_PHYSMAP=m +# CONFIG_MTD_PHYSMAP_COMPAT is not set +CONFIG_MTD_PHYSMAP_OF=m +# CONFIG_MTD_PHYSMAP_OF_VERSATILE is not set +# CONFIG_MTD_PHYSMAP_OF_GEMINI is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# CONFIG_MTD_ONENAND is not set +# CONFIG_MTD_NAND is not set +# CONFIG_MTD_SPI_NAND is not set + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# CONFIG_MTD_SPI_NOR is not set +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_LIMIT=20 +# CONFIG_MTD_UBI_FASTMAP is not set +# CONFIG_MTD_UBI_GLUEBI is not set +# CONFIG_MTD_UBI_BLOCK is not set +CONFIG_DTC=y +CONFIG_OF=y +# CONFIG_OF_UNITTEST is not set +CONFIG_OF_FLATTREE=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_KOBJ=y +CONFIG_OF_DYNAMIC=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_IRQ=y +CONFIG_OF_NET=y +CONFIG_OF_MDIO=y +CONFIG_OF_RESERVED_MEM=y +CONFIG_OF_RESOLVE=y +CONFIG_OF_OVERLAY=y +CONFIG_OF_NUMA=y +# CONFIG_PARPORT is not set +CONFIG_PNP=y +CONFIG_PNP_DEBUG_MESSAGES=y + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_NULL_BLK=m +CONFIG_CDROM=m +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +CONFIG_ZRAM=m +# CONFIG_ZRAM_WRITEBACK is not set +# CONFIG_ZRAM_MEMORY_TRACKING is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_DRBD is not set +CONFIG_BLK_DEV_NBD=y +# CONFIG_BLK_DEV_SKD is not set +# CONFIG_BLK_DEV_SX8 is not set +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=16384 +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_VIRTIO_BLK=y +CONFIG_VIRTIO_BLK_SCSI=y +CONFIG_BLK_DEV_RBD=m +# CONFIG_BLK_DEV_RSXX is not set + +# +# NVME Support +# +CONFIG_NVME_CORE=m +CONFIG_BLK_DEV_NVME=m +# CONFIG_NVME_MULTIPATH is not set +CONFIG_NVME_FABRICS=m +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=m +CONFIG_NVME_TARGET=m +CONFIG_NVME_TARGET_LOOP=m +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=m +CONFIG_NVME_TARGET_FCLOOP=m + +# +# Misc devices +# +CONFIG_SENSORS_LIS3LV02D=m +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_PHANTOM is not set +# CONFIG_SGI_IOC4 is not set +CONFIG_TIFM_CORE=m +CONFIG_TIFM_7XX1=m +# CONFIG_ICS932S401 is not set +CONFIG_ENCLOSURE_SERVICES=m +# CONFIG_HP_ILO is not set +CONFIG_APDS9802ALS=m +CONFIG_ISL29003=m +CONFIG_ISL29020=m +CONFIG_SENSORS_TSL2550=m +CONFIG_SENSORS_BH1770=m +CONFIG_SENSORS_APDS990X=m +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_USB_SWITCH_FSA9480 is not set +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +CONFIG_VEXPRESS_SYSCFG=y +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_MAX6875=m +CONFIG_EEPROM_93CX6=m +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +CONFIG_CB710_CORE=m +# CONFIG_CB710_DEBUG is not set +CONFIG_CB710_DEBUG_ASSUMPTIONS=y + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +CONFIG_SENSORS_LIS3_I2C=m +CONFIG_ALTERA_STAPL=m + +# +# Intel MIC & related support +# + +# +# Intel MIC Bus Driver +# + +# +# SCIF Bus Driver +# + +# +# VOP Bus Driver +# + +# +# Intel MIC Host Driver +# + +# +# Intel MIC Card Driver +# + +# +# SCIF Driver +# + +# +# Intel MIC Coprocessor State Management (COSM) Drivers +# + +# +# VOP Driver +# +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_MISC_RTSX_PCI is not set +# CONFIG_MISC_RTSX_USB is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=y +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_MQ_DEFAULT=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=m +# CONFIG_CHR_DEV_OSST is not set +CONFIG_BLK_DEV_SR=m +CONFIG_BLK_DEV_SR_VENDOR=y +CONFIG_CHR_DEV_SG=m +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_ISCSI_ATTRS=y +CONFIG_SCSI_SAS_ATTRS=y +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=m +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=y +CONFIG_ISCSI_BOOT_SYSFS=m +# CONFIG_SCSI_CXGB3_ISCSI is not set +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_SCSI_BNX2X_FCOE=m +CONFIG_BE2ISCSI=m +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +CONFIG_SCSI_HPSA=m +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +CONFIG_SCSI_AACRAID=m +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +CONFIG_SCSI_HISI_SAS=m +CONFIG_SCSI_HISI_SAS_PCI=m +# CONFIG_SCSI_MVSAS is not set +# CONFIG_SCSI_MVUMI is not set +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +CONFIG_MEGARAID_NEWGEN=y +CONFIG_MEGARAID_MM=m +CONFIG_MEGARAID_MAILBOX=m +# CONFIG_MEGARAID_LEGACY is not set +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_SMARTPQI=m +# CONFIG_SCSI_UFSHCD is not set +# CONFIG_SCSI_HPTIOP is not set +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +# CONFIG_FCOE is not set +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +CONFIG_SCSI_IPR=m +CONFIG_SCSI_IPR_TRACE=y +CONFIG_SCSI_IPR_DUMP=y +# CONFIG_SCSI_QLOGIC_1280 is not set +CONFIG_SCSI_QLA_FC=m +# CONFIG_TCM_QLA2XXX is not set +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_QEDI=m +CONFIG_QEDF=m +CONFIG_SCSI_LPFC=m +# CONFIG_SCSI_LPFC_DEBUG_FS is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +CONFIG_SCSI_DEBUG=m +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_BFA_FC is not set +CONFIG_SCSI_VIRTIO=y +CONFIG_SCSI_CHELSIO_FCOE=m +# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +# CONFIG_SCSI_OSD_INITIATOR is not set +CONFIG_HAVE_PATA_PLATFORM=y +CONFIG_ATA=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=y +CONFIG_SATA_MOBILE_LPM_POLICY=0 +CONFIG_SATA_AHCI_PLATFORM=y +# CONFIG_AHCI_CEVA is not set +CONFIG_AHCI_XGENE=y +# CONFIG_AHCI_QORIQ is not set +CONFIG_SATA_AHCI_SEATTLE=m +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +CONFIG_ATA_PIIX=y +# CONFIG_SATA_DWC is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +# CONFIG_SATA_SIL is not set +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_PLATFORM is not set +# CONFIG_PATA_RZ1000 is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_PATA_ACPI is not set +CONFIG_ATA_GENERIC=m +# CONFIG_PATA_LEGACY is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y +CONFIG_MD_LINEAR=y +CONFIG_MD_RAID0=y +CONFIG_MD_RAID1=y +CONFIG_MD_RAID10=y +CONFIG_MD_RAID456=y +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=y +# CONFIG_BCACHE is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=m +# CONFIG_DM_MQ_DEFAULT is not set +CONFIG_DM_DEBUG=y +CONFIG_DM_BUFIO=m +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +# CONFIG_DM_UNSTRIPED is not set +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m +# CONFIG_DM_WRITECACHE is not set +CONFIG_DM_ERA=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +# CONFIG_DM_VERITY_FEC is not set +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +# CONFIG_DM_ZONED is not set +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_TCM_FC=m +CONFIG_ISCSI_TARGET=m +CONFIG_ISCSI_TARGET_CXGB4=m +# CONFIG_FUSION is not set + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_FIREWIRE is not set +# CONFIG_FIREWIRE_NOSY is not set +CONFIG_NETDEVICES=y +CONFIG_MII=m +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +# CONFIG_EQUALIZER is not set +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +# CONFIG_GTP is not set +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_NETPOLL=y +CONFIG_NET_POLL_CONTROLLER=y +CONFIG_TUN=m +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=y +CONFIG_VIRTIO_NET=y +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ARCNET is not set +# CONFIG_ATM_DRIVERS is not set + +# +# CAIF transport drivers +# + +# +# Distributed Switch Architecture drivers +# +CONFIG_ETHERNET=y +CONFIG_MDIO=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +CONFIG_NET_VENDOR_ALACRITECH=y +# CONFIG_SLICOSS is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set +CONFIG_NET_VENDOR_AMAZON=y +CONFIG_NET_VENDOR_AMD=y +# CONFIG_AMD8111_ETH is not set +# CONFIG_PCNET32 is not set +CONFIG_AMD_XGBE=y +# CONFIG_AMD_XGBE_DCB is not set +CONFIG_NET_XGENE=y +CONFIG_NET_XGENE_V2=m +CONFIG_NET_VENDOR_AQUANTIA=y +CONFIG_NET_VENDOR_ARC=y +CONFIG_NET_VENDOR_ATHEROS=y +# CONFIG_ATL2 is not set +CONFIG_ATL1=m +CONFIG_ATL1E=m +CONFIG_ATL1C=m +CONFIG_ALX=m +# CONFIG_NET_VENDOR_AURORA is not set +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +CONFIG_BNX2=m +CONFIG_CNIC=m +CONFIG_TIGON3=m +CONFIG_TIGON3_HWMON=y +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set +CONFIG_BNXT=m +CONFIG_BNXT_SRIOV=y +CONFIG_BNXT_FLOWER_OFFLOAD=y +CONFIG_BNXT_DCB=y +# CONFIG_BNXT_HWMON is not set +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_CADENCE is not set +CONFIG_NET_VENDOR_CAVIUM=y +CONFIG_THUNDER_NIC_PF=m +CONFIG_THUNDER_NIC_VF=m +CONFIG_THUNDER_NIC_BGX=m +CONFIG_THUNDER_NIC_RGX=m +CONFIG_CAVIUM_PTP=y +CONFIG_LIQUIDIO=m +CONFIG_LIQUIDIO_VF=m +CONFIG_NET_VENDOR_CHELSIO=y +# CONFIG_CHELSIO_T1 is not set +# CONFIG_CHELSIO_T3 is not set +CONFIG_CHELSIO_T4=m +# CONFIG_CHELSIO_T4_DCB is not set +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_LIB=m +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_CORTINA is not set +CONFIG_DNET=m +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_NET_VENDOR_HISILICON=y +# CONFIG_HIX5HD2_GMAC is not set +# CONFIG_HISI_FEMAC is not set +# CONFIG_HIP04_ETH is not set +CONFIG_HNS_MDIO=m +CONFIG_HNS=m +CONFIG_HNS_DSAF=m +CONFIG_HNS_ENET=m +CONFIG_HNS3=m +CONFIG_HNS3_HCLGE=m +CONFIG_HNS3_DCB=y +CONFIG_HNS3_HCLGEVF=m +CONFIG_HNS3_ENET=m +# CONFIG_NET_VENDOR_HP is not set +CONFIG_NET_VENDOR_HUAWEI=y +CONFIG_HINIC=m +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_IGB=m +CONFIG_IGB_HWMON=y +CONFIG_IGBVF=m +# CONFIG_IXGB is not set +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBE_DCB=y +CONFIG_IXGBEVF=m +CONFIG_I40E=m +# CONFIG_I40E_DCB is not set +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_FM10K=m +# CONFIG_JME is not set +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX4_EN=m +CONFIG_MLX4_EN_DCB=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y +# CONFIG_MLX4_CORE_GEN2 is not set +# CONFIG_MLX5_CORE is not set +CONFIG_MLXSW_CORE=m +CONFIG_MLXSW_CORE_HWMON=y +CONFIG_MLXSW_CORE_THERMAL=y +CONFIG_MLXSW_PCI=m +CONFIG_MLXSW_I2C=m +# CONFIG_MLXSW_SWITCHIB is not set +# CONFIG_MLXSW_SWITCHX2 is not set +# CONFIG_MLXSW_SPECTRUM is not set +CONFIG_MLXSW_MINIMAL=m +CONFIG_MLXFW=m +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +CONFIG_NET_VENDOR_MICROSEMI=y +# CONFIG_MSCC_OCELOT_SWITCH is not set +CONFIG_NET_VENDOR_MYRI=y +# CONFIG_MYRI10GE is not set +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETERION is not set +CONFIG_NET_VENDOR_NETRONOME=y +CONFIG_NFP=m +CONFIG_NFP_APP_FLOWER=y +CONFIG_NFP_APP_ABM_NIC=y +# CONFIG_NFP_DEBUG is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +CONFIG_NET_VENDOR_OKI=y +CONFIG_ETHOC=m +# CONFIG_NET_VENDOR_PACKET_ENGINES is not set +CONFIG_NET_VENDOR_QLOGIC=y +CONFIG_QLA3XXX=m +CONFIG_QLCNIC=m +CONFIG_QLCNIC_SRIOV=y +CONFIG_QLCNIC_DCB=y +CONFIG_QLCNIC_HWMON=y +CONFIG_QLGE=m +CONFIG_NETXEN_NIC=m +CONFIG_QED=m +CONFIG_QED_LL2=y +CONFIG_QED_SRIOV=y +CONFIG_QEDE=m +CONFIG_QED_RDMA=y +CONFIG_QED_ISCSI=y +CONFIG_QED_FCOE=y +CONFIG_QED_OOO=y +CONFIG_NET_VENDOR_QUALCOMM=y +# CONFIG_QCA7000_SPI is not set +CONFIG_QCOM_EMAC=m +# CONFIG_RMNET is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_NET_VENDOR_REALTEK=y +CONFIG_8139CP=m +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +# CONFIG_8139TOO_TUNE_TWISTER is not set +CONFIG_8139TOO_8129=y +# CONFIG_8139_OLD_RX_RESET is not set +CONFIG_R8169=m +# CONFIG_NET_VENDOR_RENESAS is not set +CONFIG_NET_VENDOR_ROCKER=y +CONFIG_ROCKER=m +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +CONFIG_NET_VENDOR_SOLARFLARE=y +# CONFIG_SFC is not set +# CONFIG_SFC_FALCON is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +CONFIG_NET_VENDOR_SMSC=y +CONFIG_SMC91X=m +CONFIG_EPIC100=m +CONFIG_SMSC911X=m +CONFIG_SMSC9420=m +# CONFIG_NET_VENDOR_SOCIONEXT is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +CONFIG_MDIO_BCM_UNIMAC=m +CONFIG_MDIO_BITBANG=m +# CONFIG_MDIO_BUS_MUX_GPIO is not set +# CONFIG_MDIO_BUS_MUX_MMIOREG is not set +CONFIG_MDIO_CAVIUM=m +CONFIG_MDIO_GPIO=m +# CONFIG_MDIO_HISI_FEMAC is not set +# CONFIG_MDIO_MSCC_MIIM is not set +CONFIG_MDIO_OCTEON=m +CONFIG_MDIO_THUNDER=m +CONFIG_MDIO_XGENE=y +CONFIG_PHYLIB=y +CONFIG_SWPHY=y +# CONFIG_LED_TRIGGER_PHY is not set + +# +# MII PHY device drivers +# +CONFIG_AMD_PHY=m +CONFIG_AQUANTIA_PHY=m +# CONFIG_ASIX_PHY is not set +CONFIG_AT803X_PHY=m +# CONFIG_BCM7XXX_PHY is not set +CONFIG_BCM87XX_PHY=m +CONFIG_BCM_NET_PHYLIB=m +CONFIG_BROADCOM_PHY=m +CONFIG_CICADA_PHY=m +# CONFIG_CORTINA_PHY is not set +CONFIG_DAVICOM_PHY=m +# CONFIG_DP83822_PHY is not set +# CONFIG_DP83TC811_PHY is not set +CONFIG_DP83848_PHY=m +CONFIG_DP83867_PHY=m +CONFIG_FIXED_PHY=y +CONFIG_ICPLUS_PHY=m +# CONFIG_INTEL_XWAY_PHY is not set +CONFIG_LSI_ET1011C_PHY=m +CONFIG_LXT_PHY=m +CONFIG_MARVELL_PHY=m +# CONFIG_MARVELL_10G_PHY is not set +CONFIG_MICREL_PHY=m +CONFIG_MICROCHIP_PHY=m +# CONFIG_MICROCHIP_T1_PHY is not set +# CONFIG_MICROSEMI_PHY is not set +CONFIG_NATIONAL_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_REALTEK_PHY=m +# CONFIG_RENESAS_PHY is not set +# CONFIG_ROCKCHIP_PHY is not set +CONFIG_SMSC_PHY=m +CONFIG_STE10XP=m +CONFIG_TERANETICS_PHY=m +CONFIG_VITESSE_PHY=m +# CONFIG_XILINX_GMII2RGMII is not set +# CONFIG_MICREL_KS8995MA is not set +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m +CONFIG_PPPOE=m +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLHC=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +# CONFIG_SLIP_MODE_SLIP6 is not set +CONFIG_USB_NET_DRIVERS=y +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m +CONFIG_USB_USBNET=m +CONFIG_USB_NET_AX8817X=m +CONFIG_USB_NET_AX88179_178A=m +CONFIG_USB_NET_CDCETHER=m +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_CDC_NCM=m +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +CONFIG_USB_NET_SR9700=m +# CONFIG_USB_NET_SR9800 is not set +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m +CONFIG_USB_NET_NET1080=m +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_NET_RNDIS_HOST=m +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m +CONFIG_USB_NET_CDC_SUBSET=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y +CONFIG_USB_BELKIN=y +CONFIG_USB_ARMLINUX=y +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y +CONFIG_USB_NET_ZAURUS=m +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +CONFIG_WLAN=y +# CONFIG_WLAN_VENDOR_ADMTEK is not set +# CONFIG_WLAN_VENDOR_ATH is not set +# CONFIG_WLAN_VENDOR_ATMEL is not set +# CONFIG_WLAN_VENDOR_BROADCOM is not set +# CONFIG_WLAN_VENDOR_CISCO is not set +# CONFIG_WLAN_VENDOR_INTEL is not set +# CONFIG_WLAN_VENDOR_INTERSIL is not set +# CONFIG_WLAN_VENDOR_MARVELL is not set +# CONFIG_WLAN_VENDOR_MEDIATEK is not set +# CONFIG_WLAN_VENDOR_RALINK is not set +# CONFIG_WLAN_VENDOR_REALTEK is not set +# CONFIG_WLAN_VENDOR_RSI is not set +# CONFIG_WLAN_VENDOR_ST is not set +# CONFIG_WLAN_VENDOR_TI is not set +# CONFIG_WLAN_VENDOR_ZYDAS is not set +# CONFIG_WLAN_VENDOR_QUANTENNA is not set +# CONFIG_MAC80211_HWSIM is not set +# CONFIG_USB_NET_RNDIS_WLAN is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +CONFIG_WAN=y +CONFIG_HDLC=m +CONFIG_HDLC_RAW=m +# CONFIG_HDLC_RAW_ETH is not set +CONFIG_HDLC_CISCO=m +CONFIG_HDLC_FR=m +CONFIG_HDLC_PPP=m + +# +# X.25/LAPB support is disabled +# +# CONFIG_PCI200SYN is not set +# CONFIG_WANXL is not set +# CONFIG_PC300TOO is not set +# CONFIG_FARSYNC is not set +# CONFIG_DSCC4 is not set +CONFIG_DLCI=m +CONFIG_DLCI_MAX=8 +# CONFIG_IEEE802154_DRIVERS is not set +# CONFIG_VMXNET3 is not set +# CONFIG_FUJITSU_ES is not set +# CONFIG_NETDEVSIM is not set +CONFIG_NET_FAILOVER=y +# CONFIG_ISDN is not set +# CONFIG_NVM is not set + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_LEDS=y +CONFIG_INPUT_FF_MEMLESS=m +CONFIG_INPUT_POLLDEV=m +CONFIG_INPUT_SPARSEKMAP=m +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_OMAP4 is not set +# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_CAP11XX is not set +# CONFIG_KEYBOARD_BCM is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=y +CONFIG_MOUSE_PS2_ALPS=y +CONFIG_MOUSE_PS2_BYD=y +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y +CONFIG_MOUSE_PS2_CYPRESS=y +CONFIG_MOUSE_PS2_TRACKPOINT=y +CONFIG_MOUSE_PS2_ELANTECH=y +CONFIG_MOUSE_PS2_ELANTECH_SMBUS=y +CONFIG_MOUSE_PS2_SENTELIC=y +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +CONFIG_MOUSE_PS2_FOCALTECH=y +CONFIG_MOUSE_PS2_SMBUS=y +CONFIG_MOUSE_SERIAL=m +CONFIG_MOUSE_APPLETOUCH=m +CONFIG_MOUSE_BCM5974=m +CONFIG_MOUSE_CYAPA=m +# CONFIG_MOUSE_ELAN_I2C is not set +CONFIG_MOUSE_VSXXXAA=m +# CONFIG_MOUSE_GPIO is not set +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set +CONFIG_RMI4_CORE=m +CONFIG_RMI4_I2C=m +CONFIG_RMI4_SPI=m +CONFIG_RMI4_SMB=m +CONFIG_RMI4_F03=y +CONFIG_RMI4_F03_SERIO=m +CONFIG_RMI4_2D_SENSOR=y +CONFIG_RMI4_F11=y +CONFIG_RMI4_F12=y +CONFIG_RMI4_F30=y +# CONFIG_RMI4_F34 is not set +# CONFIG_RMI4_F55 is not set + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_SERIO_SERPORT=y +CONFIG_SERIO_AMBAKMI=y +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +# CONFIG_SERIO_PS2MULT is not set +CONFIG_SERIO_ARC_PS2=m +# CONFIG_SERIO_APBPS2 is not set +# CONFIG_SERIO_GPIO_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_ROCKETPORT is not set +CONFIG_CYCLADES=m +# CONFIG_CYZ_INTR is not set +# CONFIG_MOXA_INTELLIO is not set +# CONFIG_MOXA_SMARTIO is not set +CONFIG_SYNCLINKMP=m +CONFIG_SYNCLINK_GT=m +# CONFIG_NOZOMI is not set +# CONFIG_ISI is not set +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +# CONFIG_TRACE_SINK is not set +CONFIG_DEVMEM=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=8 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +# CONFIG_SERIAL_8250_ASPEED_VUART is not set +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_FSL=y +CONFIG_SERIAL_8250_DW=y +CONFIG_SERIAL_8250_RT288X=y +# CONFIG_SERIAL_8250_MOXA is not set +CONFIG_SERIAL_OF_PLATFORM=y + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_AMBA_PL010 is not set +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +CONFIG_SERIAL_EARLYCON_ARM_SEMIHOST=y +# CONFIG_SERIAL_KGDB_NMI is not set +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_CONSOLE_POLL=y +# CONFIG_SERIAL_JSM is not set +# CONFIG_SERIAL_MSM is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_IFX6X60 is not set +# CONFIG_SERIAL_XILINX_PS_UART is not set +# CONFIG_SERIAL_ARC is not set +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set +# CONFIG_SERIAL_DEV_BUS is not set +CONFIG_HVC_DRIVER=y +# CONFIG_HVC_DCC is not set +CONFIG_VIRTIO_CONSOLE=m +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DMI_DECODE=y +# CONFIG_IPMI_PANIC_EVENT is not set +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +CONFIG_IPMI_SSIF=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m +CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_HW_RANDOM_HISI=y +CONFIG_HW_RANDOM_XGENE=y +CONFIG_HW_RANDOM_CAVIUM=y +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set + +# +# PCMCIA character devices +# +CONFIG_RAW_DRIVER=y +CONFIG_MAX_RAW_DEVS=8192 +CONFIG_TCG_TPM=y +CONFIG_HW_RANDOM_TPM=y +CONFIG_TCG_TIS_CORE=m +CONFIG_TCG_TIS=m +# CONFIG_TCG_TIS_SPI is not set +# CONFIG_TCG_TIS_I2C_ATMEL is not set +# CONFIG_TCG_TIS_I2C_INFINEON is not set +# CONFIG_TCG_TIS_I2C_NUVOTON is not set +CONFIG_TCG_ATMEL=m +# CONFIG_TCG_INFINEON is not set +CONFIG_TCG_CRB=y +# CONFIG_TCG_VTPM_PROXY is not set +# CONFIG_TCG_TIS_ST33ZP24_I2C is not set +# CONFIG_TCG_TIS_ST33ZP24_SPI is not set +# CONFIG_DEVPORT is not set +# CONFIG_XILLYBUS is not set + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_ACPI_I2C_OPREGION=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=m + +# +# Multiplexer I2C Chip support +# +CONFIG_I2C_ARB_GPIO_CHALLENGE=m +CONFIG_I2C_MUX_GPIO=m +# CONFIG_I2C_MUX_GPMUX is not set +# CONFIG_I2C_MUX_LTC4306 is not set +CONFIG_I2C_MUX_PCA9541=m +CONFIG_I2C_MUX_PCA954x=m +CONFIG_I2C_MUX_PINCTRL=m +# CONFIG_I2C_MUX_REG is not set +# CONFIG_I2C_DEMUX_PINCTRL is not set +CONFIG_I2C_MUX_MLXCPLD=m +# CONFIG_I2C_HELPER_AUTO is not set +CONFIG_I2C_SMBUS=m + +# +# I2C Algorithms +# +CONFIG_I2C_ALGOBIT=y +# CONFIG_I2C_ALGOPCF is not set +CONFIG_I2C_ALGOPCA=m + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_HIX5HD2 is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_ISCH is not set +# CONFIG_I2C_PIIX4 is not set +CONFIG_I2C_NFORCE2=m +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set + +# +# ACPI drivers +# +# CONFIG_I2C_SCMI is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CADENCE is not set +# CONFIG_I2C_CBUS_GPIO is not set +CONFIG_I2C_DESIGNWARE_CORE=y +CONFIG_I2C_DESIGNWARE_PLATFORM=y +# CONFIG_I2C_DESIGNWARE_SLAVE is not set +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_EMEV2 is not set +CONFIG_I2C_GPIO=m +# CONFIG_I2C_GPIO_FAULT_INJECTOR is not set +# CONFIG_I2C_NOMADIK is not set +# CONFIG_I2C_OCORES is not set +CONFIG_I2C_PCA_PLATFORM=m +CONFIG_I2C_QUP=y +# CONFIG_I2C_RK3X is not set +CONFIG_I2C_SIMTEC=m +CONFIG_I2C_VERSATILE=m +CONFIG_I2C_THUNDERX=m +# CONFIG_I2C_XILINX is not set +CONFIG_I2C_XLP9XX=m + +# +# External I2C/SMBus adapter drivers +# +CONFIG_I2C_DIOLAN_U2C=m +CONFIG_I2C_PARPORT_LIGHT=m +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +CONFIG_I2C_TINY_USB=m + +# +# Other I2C/SMBus bus drivers +# +CONFIG_I2C_XGENE_SLIMPRO=m +CONFIG_I2C_STUB=m +CONFIG_I2C_SLAVE=y +CONFIG_I2C_SLAVE_EEPROM=m +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y +# CONFIG_SPI_MEM is not set + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +CONFIG_SPI_CADENCE=m +CONFIG_SPI_DESIGNWARE=y +# CONFIG_SPI_DW_PCI is not set +CONFIG_SPI_DW_MMIO=y +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_FSL_SPI is not set +# CONFIG_SPI_OC_TINY is not set +CONFIG_SPI_PL022=y +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_ROCKCHIP is not set +CONFIG_SPI_QUP=y +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_THUNDERX is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +CONFIG_SPI_XLP=m +# CONFIG_SPI_ZYNQMP_GQSPI is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_GPIO=m + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y +CONFIG_DP83640_PHY=m +CONFIG_PINCTRL=y +CONFIG_PINMUX=y +CONFIG_PINCONF=y +CONFIG_GENERIC_PINCONF=y +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_PINCTRL_AMD is not set +# CONFIG_PINCTRL_MCP23S08 is not set +# CONFIG_PINCTRL_SINGLE is not set +# CONFIG_PINCTRL_SX150X is not set +CONFIG_PINCTRL_MSM=y +# CONFIG_PINCTRL_APQ8064 is not set +# CONFIG_PINCTRL_APQ8084 is not set +# CONFIG_PINCTRL_IPQ4019 is not set +# CONFIG_PINCTRL_IPQ8064 is not set +# CONFIG_PINCTRL_IPQ8074 is not set +# CONFIG_PINCTRL_MSM8660 is not set +# CONFIG_PINCTRL_MSM8960 is not set +# CONFIG_PINCTRL_MDM9615 is not set +# CONFIG_PINCTRL_MSM8X74 is not set +# CONFIG_PINCTRL_MSM8916 is not set +# CONFIG_PINCTRL_MSM8994 is not set +# CONFIG_PINCTRL_MSM8996 is not set +# CONFIG_PINCTRL_MSM8998 is not set +CONFIG_PINCTRL_QDF2XXX=y +# CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set +# CONFIG_PINCTRL_SDM845 is not set +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_OF_GPIO=y +CONFIG_GPIO_ACPI=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_GENERIC=y + +# +# Memory mapped GPIO drivers +# +# CONFIG_GPIO_74XX_MMIO is not set +# CONFIG_GPIO_ALTERA is not set +CONFIG_GPIO_AMDPT=m +CONFIG_GPIO_DWAPB=y +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_FTGPIO010 is not set +CONFIG_GPIO_GENERIC_PLATFORM=y +# CONFIG_GPIO_GRGPIO is not set +# CONFIG_GPIO_HLWD is not set +# CONFIG_GPIO_MB86S7X is not set +# CONFIG_GPIO_MOCKUP is not set +CONFIG_GPIO_PL061=y +# CONFIG_GPIO_SYSCON is not set +# CONFIG_GPIO_THUNDERX is not set +CONFIG_GPIO_XGENE=y +CONFIG_GPIO_XGENE_SB=m +# CONFIG_GPIO_XILINX is not set +CONFIG_GPIO_XLP=m + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_ADP5588 is not set +# CONFIG_GPIO_ADNP is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set + +# +# MFD GPIO expanders +# + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_BT8XX is not set +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_PCIE_IDIO_24 is not set +# CONFIG_GPIO_RDC321X is not set + +# +# SPI GPIO expanders +# +# CONFIG_GPIO_74X164 is not set +# CONFIG_GPIO_MAX3191X is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set + +# +# USB GPIO expanders +# +# CONFIG_W1 is not set +# CONFIG_POWER_AVS is not set +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_BRCMSTB is not set +CONFIG_POWER_RESET_GPIO=y +CONFIG_POWER_RESET_GPIO_RESTART=y +CONFIG_POWER_RESET_HISI=y +# CONFIG_POWER_RESET_MSM is not set +# CONFIG_POWER_RESET_LTC2952 is not set +CONFIG_POWER_RESET_RESTART=y +CONFIG_POWER_RESET_VEXPRESS=y +# CONFIG_POWER_RESET_XGENE is not set +CONFIG_POWER_RESET_SYSCON=y +# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set +# CONFIG_SYSCON_REBOOT_MODE is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_MANAGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_LTC3651 is not set +# CONFIG_CHARGER_DETECTOR_MAX14656 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24190 is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ25890 is not set +CONFIG_CHARGER_SMB347=m +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_CHARGER_RT9455 is not set +CONFIG_HWMON=y +CONFIG_HWMON_VID=m +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +CONFIG_SENSORS_AD7314=m +CONFIG_SENSORS_AD7414=m +CONFIG_SENSORS_AD7418=m +CONFIG_SENSORS_ADM1021=m +CONFIG_SENSORS_ADM1025=m +CONFIG_SENSORS_ADM1026=m +CONFIG_SENSORS_ADM1029=m +CONFIG_SENSORS_ADM1031=m +CONFIG_SENSORS_ADM9240=m +CONFIG_SENSORS_ADT7X10=m +CONFIG_SENSORS_ADT7310=m +CONFIG_SENSORS_ADT7410=m +CONFIG_SENSORS_ADT7411=m +CONFIG_SENSORS_ADT7462=m +CONFIG_SENSORS_ADT7470=m +CONFIG_SENSORS_ADT7475=m +CONFIG_SENSORS_ASC7621=m +CONFIG_SENSORS_ARM_SCPI=m +# CONFIG_SENSORS_ASPEED is not set +CONFIG_SENSORS_ATXP1=m +CONFIG_SENSORS_DS620=m +CONFIG_SENSORS_DS1621=m +# CONFIG_SENSORS_I5K_AMB is not set +CONFIG_SENSORS_F71805F=m +CONFIG_SENSORS_F71882FG=m +CONFIG_SENSORS_F75375S=m +# CONFIG_SENSORS_FTSTEUTATES is not set +CONFIG_SENSORS_GL518SM=m +CONFIG_SENSORS_GL520SM=m +CONFIG_SENSORS_G760A=m +CONFIG_SENSORS_G762=m +# CONFIG_SENSORS_GPIO_FAN is not set +# CONFIG_SENSORS_HIH6130 is not set +CONFIG_SENSORS_IBMAEM=m +CONFIG_SENSORS_IBMPEX=m +CONFIG_SENSORS_IT87=m +CONFIG_SENSORS_JC42=m +CONFIG_SENSORS_POWR1220=m +CONFIG_SENSORS_LINEAGE=m +CONFIG_SENSORS_LTC2945=m +# CONFIG_SENSORS_LTC2990 is not set +CONFIG_SENSORS_LTC4151=m +CONFIG_SENSORS_LTC4215=m +CONFIG_SENSORS_LTC4222=m +CONFIG_SENSORS_LTC4245=m +CONFIG_SENSORS_LTC4260=m +CONFIG_SENSORS_LTC4261=m +CONFIG_SENSORS_MAX1111=m +CONFIG_SENSORS_MAX16065=m +CONFIG_SENSORS_MAX1619=m +CONFIG_SENSORS_MAX1668=m +CONFIG_SENSORS_MAX197=m +# CONFIG_SENSORS_MAX31722 is not set +# CONFIG_SENSORS_MAX6621 is not set +CONFIG_SENSORS_MAX6639=m +CONFIG_SENSORS_MAX6642=m +CONFIG_SENSORS_MAX6650=m +CONFIG_SENSORS_MAX6697=m +CONFIG_SENSORS_MAX31790=m +CONFIG_SENSORS_MCP3021=m +# CONFIG_SENSORS_TC654 is not set +CONFIG_SENSORS_ADCXX=m +CONFIG_SENSORS_LM63=m +CONFIG_SENSORS_LM70=m +CONFIG_SENSORS_LM73=m +CONFIG_SENSORS_LM75=m +CONFIG_SENSORS_LM77=m +CONFIG_SENSORS_LM78=m +CONFIG_SENSORS_LM80=m +CONFIG_SENSORS_LM83=m +CONFIG_SENSORS_LM85=m +CONFIG_SENSORS_LM87=m +CONFIG_SENSORS_LM90=m +CONFIG_SENSORS_LM92=m +CONFIG_SENSORS_LM93=m +CONFIG_SENSORS_LM95234=m +CONFIG_SENSORS_LM95241=m +CONFIG_SENSORS_LM95245=m +CONFIG_SENSORS_PC87360=m +CONFIG_SENSORS_PC87427=m +CONFIG_SENSORS_NTC_THERMISTOR=m +CONFIG_SENSORS_NCT6683=m +CONFIG_SENSORS_NCT6775=m +CONFIG_SENSORS_NCT7802=m +CONFIG_SENSORS_NCT7904=m +# CONFIG_SENSORS_NPCM7XX is not set +CONFIG_SENSORS_PCF8591=m +CONFIG_PMBUS=m +CONFIG_SENSORS_PMBUS=m +CONFIG_SENSORS_ADM1275=m +# CONFIG_SENSORS_IBM_CFFPS is not set +# CONFIG_SENSORS_IR35221 is not set +CONFIG_SENSORS_LM25066=m +CONFIG_SENSORS_LTC2978=m +CONFIG_SENSORS_LTC3815=m +CONFIG_SENSORS_MAX16064=m +CONFIG_SENSORS_MAX20751=m +# CONFIG_SENSORS_MAX31785 is not set +CONFIG_SENSORS_MAX34440=m +CONFIG_SENSORS_MAX8688=m +CONFIG_SENSORS_TPS40422=m +# CONFIG_SENSORS_TPS53679 is not set +CONFIG_SENSORS_UCD9000=m +CONFIG_SENSORS_UCD9200=m +CONFIG_SENSORS_ZL6100=m +CONFIG_SENSORS_PWM_FAN=m +CONFIG_SENSORS_SHT15=m +CONFIG_SENSORS_SHT21=m +# CONFIG_SENSORS_SHT3x is not set +CONFIG_SENSORS_SHTC1=m +CONFIG_SENSORS_SIS5595=m +CONFIG_SENSORS_DME1737=m +CONFIG_SENSORS_EMC1403=m +# CONFIG_SENSORS_EMC2103 is not set +CONFIG_SENSORS_EMC6W201=m +CONFIG_SENSORS_SMSC47M1=m +CONFIG_SENSORS_SMSC47M192=m +CONFIG_SENSORS_SMSC47B397=m +CONFIG_SENSORS_SCH56XX_COMMON=m +CONFIG_SENSORS_SCH5627=m +CONFIG_SENSORS_SCH5636=m +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_SMM665 is not set +CONFIG_SENSORS_ADC128D818=m +CONFIG_SENSORS_ADS1015=m +CONFIG_SENSORS_ADS7828=m +CONFIG_SENSORS_ADS7871=m +CONFIG_SENSORS_AMC6821=m +CONFIG_SENSORS_INA209=m +CONFIG_SENSORS_INA2XX=m +# CONFIG_SENSORS_INA3221 is not set +CONFIG_SENSORS_TC74=m +CONFIG_SENSORS_THMC50=m +CONFIG_SENSORS_TMP102=m +CONFIG_SENSORS_TMP103=m +# CONFIG_SENSORS_TMP108 is not set +CONFIG_SENSORS_TMP401=m +CONFIG_SENSORS_TMP421=m +CONFIG_SENSORS_VEXPRESS=m +CONFIG_SENSORS_VIA686A=m +CONFIG_SENSORS_VT1211=m +CONFIG_SENSORS_VT8231=m +# CONFIG_SENSORS_W83773G is not set +CONFIG_SENSORS_W83781D=m +CONFIG_SENSORS_W83791D=m +CONFIG_SENSORS_W83792D=m +CONFIG_SENSORS_W83793=m +CONFIG_SENSORS_W83795=m +# CONFIG_SENSORS_W83795_FANCTRL is not set +CONFIG_SENSORS_W83L785TS=m +CONFIG_SENSORS_W83L786NG=m +CONFIG_SENSORS_W83627HF=m +CONFIG_SENSORS_W83627EHF=m +CONFIG_SENSORS_XGENE=m + +# +# ACPI drivers +# +CONFIG_SENSORS_ACPI_POWER=y +CONFIG_THERMAL=y +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_OF=y +# CONFIG_THERMAL_WRITABLE_TRIPS is not set +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +# CONFIG_THERMAL_GOV_BANG_BANG is not set +CONFIG_THERMAL_GOV_USER_SPACE=y +# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set +CONFIG_CPU_THERMAL=y +# CONFIG_THERMAL_EMULATION is not set +CONFIG_HISI_THERMAL=y +# CONFIG_QORIQ_THERMAL is not set + +# +# ACPI INT340X thermal drivers +# + +# +# Qualcomm thermal drivers +# +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y +CONFIG_WATCHDOG_SYSFS=y + +# +# Watchdog Device Drivers +# +CONFIG_SOFT_WATCHDOG=m +CONFIG_GPIO_WATCHDOG=m +# CONFIG_WDAT_WDT is not set +# CONFIG_XILINX_WATCHDOG is not set +# CONFIG_ZIIRAVE_WATCHDOG is not set +CONFIG_ARM_SP805_WATCHDOG=m +CONFIG_ARM_SBSA_WATCHDOG=m +# CONFIG_CADENCE_WATCHDOG is not set +# CONFIG_DW_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set +# CONFIG_QCOM_WDT is not set +CONFIG_ALIM7101_WDT=m +CONFIG_I6300ESB_WDT=m +# CONFIG_MEN_A21_WDT is not set + +# +# PCI-based Watchdog Cards +# +CONFIG_PCIPCWATCHDOG=m +CONFIG_WDTPCI=m + +# +# USB-based Watchdog Cards +# +CONFIG_USBPCWATCHDOG=m + +# +# Watchdog Pretimeout Governors +# +# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +CONFIG_BCMA=m +CONFIG_BCMA_HOST_PCI_POSSIBLE=y +CONFIG_BCMA_HOST_PCI=y +# CONFIG_BCMA_HOST_SOC is not set +CONFIG_BCMA_DRIVER_PCI=y +CONFIG_BCMA_DRIVER_GMAC_CMN=y +CONFIG_BCMA_DRIVER_GPIO=y +# CONFIG_BCMA_DEBUG is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=m +# CONFIG_MFD_ACT8945A is not set +# CONFIG_MFD_AS3711 is not set +# CONFIG_MFD_AS3722 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set +# CONFIG_MFD_ATMEL_FLEXCOM is not set +# CONFIG_MFD_ATMEL_HLCDC is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_CROS_EC is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_MFD_HI6421_PMIC is not set +# CONFIG_MFD_HI655X_PMIC is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_LPC_ICH is not set +# CONFIG_LPC_SCH is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77620 is not set +# CONFIG_MFD_MAX77686 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77843 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_CPCAP is not set +# CONFIG_MFD_VIPERBOARD is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_QCOM_RPM is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RC5T583 is not set +# CONFIG_MFD_RK808 is not set +# CONFIG_MFD_RN5T618 is not set +# CONFIG_MFD_SEC_CORE is not set +# CONFIG_MFD_SI476X_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_SMSC is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_STMPE is not set +CONFIG_MFD_SYSCON=y +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TPS68470 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TI_LP87565 is not set +# CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS80031 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_VX855 is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_ROHM_BD718XX is not set +# CONFIG_MFD_VEXPRESS_SYSREG is not set +# CONFIG_REGULATOR is not set +# CONFIG_RC_CORE is not set +# CONFIG_MEDIA_SUPPORT is not set + +# +# Graphics support +# +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_DRM=y +CONFIG_DRM_DP_AUX_CHARDEV=y +# CONFIG_DRM_DEBUG_MM is not set +# CONFIG_DRM_DEBUG_SELFTEST is not set +CONFIG_DRM_KMS_HELPER=y +CONFIG_DRM_KMS_FB_HELPER=y +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +# CONFIG_DRM_DP_CEC is not set +CONFIG_DRM_TTM=y +CONFIG_DRM_VM=y +CONFIG_DRM_SCHED=m + +# +# I2C encoder or helper chips +# +CONFIG_DRM_I2C_CH7006=m +# CONFIG_DRM_I2C_SIL164 is not set +CONFIG_DRM_I2C_NXP_TDA998X=m +# CONFIG_DRM_I2C_NXP_TDA9950 is not set +# CONFIG_DRM_HDLCD is not set +# CONFIG_DRM_MALI_DISPLAY is not set +CONFIG_DRM_RADEON=y +CONFIG_DRM_RADEON_USERPTR=y +CONFIG_DRM_AMDGPU=m +# CONFIG_DRM_AMDGPU_SI is not set +CONFIG_DRM_AMDGPU_CIK=y +CONFIG_DRM_AMDGPU_USERPTR=y +# CONFIG_DRM_AMDGPU_GART_DEBUGFS is not set + +# +# ACP (Audio CoProcessor) Configuration +# +# CONFIG_DRM_AMD_ACP is not set + +# +# Display Engine Configuration +# +CONFIG_DRM_AMD_DC=y +# CONFIG_DEBUG_KERNEL_DC is not set + +# +# AMD Library routines +# +CONFIG_CHASH=m +# CONFIG_CHASH_STATS is not set +# CONFIG_CHASH_SELFTEST is not set +CONFIG_DRM_NOUVEAU=m +CONFIG_NOUVEAU_DEBUG=5 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3 +# CONFIG_NOUVEAU_DEBUG_MMU is not set +CONFIG_DRM_NOUVEAU_BACKLIGHT=y +# CONFIG_DRM_VGEM is not set +# CONFIG_DRM_VKMS is not set +CONFIG_DRM_UDL=m +CONFIG_DRM_AST=m +CONFIG_DRM_MGAG200=m +CONFIG_DRM_CIRRUS_QEMU=m +# CONFIG_DRM_RCAR_DW_HDMI is not set +# CONFIG_DRM_RCAR_LVDS is not set +CONFIG_DRM_QXL=m +CONFIG_DRM_BOCHS=m +CONFIG_DRM_VIRTIO_GPU=m +# CONFIG_DRM_MSM is not set +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_ARM_VERSATILE is not set +# CONFIG_DRM_PANEL_LVDS is not set +# CONFIG_DRM_PANEL_SIMPLE is not set +# CONFIG_DRM_PANEL_ILITEK_IL9322 is not set +# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set +# CONFIG_DRM_PANEL_LG_LG4573 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set +# CONFIG_DRM_PANEL_SEIKO_43WVF1G is not set +# CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +# CONFIG_DRM_CDNS_DSI is not set +# CONFIG_DRM_DUMB_VGA_DAC is not set +# CONFIG_DRM_LVDS_ENCODER is not set +# CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW is not set +# CONFIG_DRM_NXP_PTN3460 is not set +# CONFIG_DRM_PARADE_PS8622 is not set +# CONFIG_DRM_SIL_SII8620 is not set +# CONFIG_DRM_SII902X is not set +# CONFIG_DRM_SII9234 is not set +# CONFIG_DRM_THINE_THC63LVD1024 is not set +# CONFIG_DRM_TOSHIBA_TC358767 is not set +# CONFIG_DRM_TI_TFP410 is not set +# CONFIG_DRM_I2C_ADV7511 is not set +# CONFIG_DRM_ARCPGU is not set +CONFIG_DRM_HISI_HIBMC=m +# CONFIG_DRM_HISI_KIRIN is not set +# CONFIG_DRM_MXSFB is not set +# CONFIG_DRM_TINYDRM is not set +# CONFIG_DRM_PL111 is not set +# CONFIG_DRM_LEGACY is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y + +# +# Frame buffer Devices +# +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_CMDLINE=y +CONFIG_FB_NOTIFY=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_BACKLIGHT=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_TILEBLITTING=y + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +CONFIG_FB_ARMCLCD=y +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_UVESA is not set +CONFIG_FB_EFI=y +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +CONFIG_FB_SIMPLE=y +CONFIG_FB_SSD1307=m +# CONFIG_FB_SM712 is not set +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set +CONFIG_LCD_PLATFORM=m +# CONFIG_LCD_S6E63M0 is not set +# CONFIG_LCD_LD9040 is not set +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +# CONFIG_LCD_OTM3225A is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_GENERIC is not set +CONFIG_BACKLIGHT_PWM=m +# CONFIG_BACKLIGHT_PM8941_WLED is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3630A is not set +# CONFIG_BACKLIGHT_LM3639 is not set +CONFIG_BACKLIGHT_LP855X=m +CONFIG_BACKLIGHT_GPIO=m +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +CONFIG_VIDEOMODE_HELPERS=y +CONFIG_HDMI=y + +# +# Console display driver support +# +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_LOGO_LINUX_CLUT224=y +CONFIG_SOUND=m +# CONFIG_SND is not set + +# +# HID support +# +CONFIG_HID=y +CONFIG_HID_BATTERY_STRENGTH=y +CONFIG_HIDRAW=y +CONFIG_UHID=m +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=y +# CONFIG_HID_ACCUTOUCH is not set +CONFIG_HID_ACRUX=m +# CONFIG_HID_ACRUX_FF is not set +CONFIG_HID_APPLE=y +CONFIG_HID_APPLEIR=m +# CONFIG_HID_ASUS is not set +CONFIG_HID_AUREAL=m +CONFIG_HID_BELKIN=y +CONFIG_HID_BETOP_FF=m +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +CONFIG_HID_CORSAIR=m +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_CMEDIA is not set +# CONFIG_HID_CP2112 is not set +CONFIG_HID_CYPRESS=y +CONFIG_HID_DRAGONRISE=m +# CONFIG_DRAGONRISE_FF is not set +# CONFIG_HID_EMS_FF is not set +# CONFIG_HID_ELAN is not set +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +CONFIG_HID_EZKEY=y +CONFIG_HID_GEMBIRD=m +CONFIG_HID_GFRM=m +CONFIG_HID_HOLTEK=m +# CONFIG_HOLTEK_FF is not set +# CONFIG_HID_GOOGLE_HAMMER is not set +CONFIG_HID_GT683R=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m +CONFIG_HID_GYRATION=m +CONFIG_HID_ICADE=m +CONFIG_HID_ITE=y +# CONFIG_HID_JABRA is not set +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=y +CONFIG_HID_LCPOWER=m +CONFIG_HID_LED=m +CONFIG_HID_LENOVO=m +CONFIG_HID_LOGITECH=y +CONFIG_HID_LOGITECH_DJ=m +CONFIG_HID_LOGITECH_HIDPP=m +# CONFIG_LOGITECH_FF is not set +# CONFIG_LOGIRUMBLEPAD2_FF is not set +# CONFIG_LOGIG940_FF is not set +# CONFIG_LOGIWHEELS_FF is not set +CONFIG_HID_MAGICMOUSE=y +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_REDRAGON is not set +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +CONFIG_HID_MULTITOUCH=m +# CONFIG_HID_NTI is not set +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +# CONFIG_PANTHERLORD_FF is not set +CONFIG_HID_PENMOUNT=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +CONFIG_HID_PICOLCD_FB=y +CONFIG_HID_PICOLCD_BACKLIGHT=y +CONFIG_HID_PICOLCD_LCD=y +CONFIG_HID_PICOLCD_LEDS=y +CONFIG_HID_PLANTRONICS=y +CONFIG_HID_PRIMAX=m +# CONFIG_HID_RETRODE is not set +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=m +CONFIG_HID_SONY=m +CONFIG_SONY_FF=y +CONFIG_HID_SPEEDLINK=m +# CONFIG_HID_STEAM is not set +CONFIG_HID_STEELSERIES=m +CONFIG_HID_SUNPLUS=m +CONFIG_HID_RMI=m +CONFIG_HID_GREENASIA=m +# CONFIG_GREENASIA_FF is not set +CONFIG_HID_SMARTJOYPLUS=m +# CONFIG_SMARTJOYPLUS_FF is not set +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +# CONFIG_THRUSTMASTER_FF is not set +# CONFIG_HID_UDRAW_PS3 is not set +CONFIG_HID_WACOM=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +# CONFIG_ZEROPLUS_FF is not set +CONFIG_HID_ZYDACRON=m +CONFIG_HID_SENSOR_HUB=m +# CONFIG_HID_SENSOR_CUSTOM_SENSOR is not set +# CONFIG_HID_ALPS is not set + +# +# USB HID support +# +CONFIG_USB_HID=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y + +# +# I2C HID support +# +CONFIG_I2C_HID=m +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_WHITELIST is not set +CONFIG_USB_LEDS_TRIGGER_USBPORT=m +CONFIG_USB_MON=y +CONFIG_USB_WUSB=m +CONFIG_USB_WUSB_CBAF=m +# CONFIG_USB_WUSB_CBAF_DEBUG is not set + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=y +# CONFIG_USB_XHCI_DBGCAP is not set +CONFIG_USB_XHCI_PCI=y +CONFIG_USB_XHCI_PLATFORM=m +# CONFIG_USB_XHCI_HISTB is not set +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_FOTG210_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y +# CONFIG_USB_OHCI_HCD_PLATFORM is not set +CONFIG_USB_UHCI_HCD=y +# CONFIG_USB_U132_HCD is not set +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_WHCI_HCD is not set +CONFIG_USB_HWA_HCD=m +# CONFIG_USB_HCD_BCMA is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m +CONFIG_USB_WDM=m +CONFIG_USB_TMC=m + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=y +# CONFIG_USB_STORAGE_DEBUG is not set +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_REALTEK_AUTOPM=y +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_UAS=m + +# +# USB Imaging devices +# +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m +# CONFIG_USBIP_CORE is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +# CONFIG_USB_DWC2 is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +CONFIG_USB_SERIAL=y +CONFIG_USB_SERIAL_CONSOLE=y +CONFIG_USB_SERIAL_GENERIC=y +CONFIG_USB_SERIAL_SIMPLE=m +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +# CONFIG_USB_SERIAL_F81232 is not set +# CONFIG_USB_SERIAL_F8153X is not set +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +# CONFIG_USB_SERIAL_METRO is not set +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7840=m +# CONFIG_USB_SERIAL_MXUPORT is not set +CONFIG_USB_SERIAL_NAVMAN=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SAFE_PADDED=y +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_XIRCOM=m +CONFIG_USB_SERIAL_WWAN=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +# CONFIG_USB_SERIAL_WISHBONE is not set +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m +# CONFIG_USB_SERIAL_UPD78F0730 is not set +CONFIG_USB_SERIAL_DEBUG=m + +# +# USB Miscellaneous drivers +# +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +# CONFIG_USB_RIO500 is not set +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +CONFIG_USB_IDMOUSE=m +CONFIG_USB_FTDI_ELAN=m +CONFIG_USB_APPLEDISPLAY=m +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_SISUSBVGA_CON=y +CONFIG_USB_LD=m +# CONFIG_USB_TRANCEVIBRATOR is not set +CONFIG_USB_IOWARRIOR=m +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +CONFIG_USB_ISIGHTFW=m +# CONFIG_USB_YUREX is not set +CONFIG_USB_EZUSB_FX2=m +# CONFIG_USB_HUB_USB251XB is not set +CONFIG_USB_HSIC_USB3503=m +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +CONFIG_USB_CHAOSKEY=m +CONFIG_USB_ATM=m +# CONFIG_USB_SPEEDTOUCH is not set +CONFIG_USB_CXACRU=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_XUSBATM=m + +# +# USB Physical Layer drivers +# +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ISP1301 is not set +# CONFIG_USB_ULPI is not set +# CONFIG_USB_GADGET is not set +CONFIG_TYPEC=y +# CONFIG_TYPEC_TCPM is not set +CONFIG_TYPEC_UCSI=y +CONFIG_UCSI_ACPI=y +# CONFIG_TYPEC_TPS6598X is not set + +# +# USB Type-C Multiplexer/DeMultiplexer Switch support +# +# CONFIG_TYPEC_MUX_PI3USB30532 is not set + +# +# USB Type-C Alternate Mode drivers +# +# CONFIG_TYPEC_DP_ALTMODE is not set +# CONFIG_USB_ROLE_SWITCH is not set +CONFIG_USB_LED_TRIG=y +CONFIG_USB_ULPI_BUS=m +CONFIG_UWB=m +CONFIG_UWB_HWA=m +CONFIG_UWB_WHCI=m +CONFIG_UWB_I1480U=m +CONFIG_MMC=m +CONFIG_PWRSEQ_EMMC=m +CONFIG_PWRSEQ_SIMPLE=m +CONFIG_MMC_BLOCK=m +CONFIG_MMC_BLOCK_MINORS=8 +CONFIG_SDIO_UART=m +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_ARMMMCI=m +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_RICOH_MMC=y +CONFIG_MMC_SDHCI_ACPI=m +CONFIG_MMC_SDHCI_PLTFM=m +# CONFIG_MMC_SDHCI_OF_ARASAN is not set +# CONFIG_MMC_SDHCI_OF_AT91 is not set +# CONFIG_MMC_SDHCI_OF_DWCMSHC is not set +# CONFIG_MMC_SDHCI_CADENCE is not set +# CONFIG_MMC_SDHCI_F_SDH30 is not set +# CONFIG_MMC_SDHCI_MSM is not set +CONFIG_MMC_TIFM_SD=m +CONFIG_MMC_SPI=m +CONFIG_MMC_CB710=m +CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_DW=m +CONFIG_MMC_DW_PLTFM=m +CONFIG_MMC_DW_BLUEFIELD=m +# CONFIG_MMC_DW_EXYNOS is not set +# CONFIG_MMC_DW_HI3798CV200 is not set +# CONFIG_MMC_DW_K3 is not set +# CONFIG_MMC_DW_PCI is not set +CONFIG_MMC_VUB300=m +CONFIG_MMC_USHC=m +# CONFIG_MMC_USDHI6ROL0 is not set +CONFIG_MMC_CQHCI=m +CONFIG_MMC_TOSHIBA_PCI=m +CONFIG_MMC_MTK=m +# CONFIG_MMC_SDHCI_XENON is not set +# CONFIG_MMC_SDHCI_OMAP is not set +CONFIG_MEMSTICK=m +# CONFIG_MEMSTICK_DEBUG is not set + +# +# MemoryStick drivers +# +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set +CONFIG_MSPRO_BLOCK=m +# CONFIG_MS_BLOCK is not set + +# +# MemoryStick Host Controller Drivers +# +CONFIG_MEMSTICK_TIFM_MS=m +CONFIG_MEMSTICK_JMICRON_38X=m +CONFIG_MEMSTICK_R592=m +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_CLASS_FLASH=m +# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set + +# +# LED drivers +# +# CONFIG_LEDS_AAT1290 is not set +# CONFIG_LEDS_AS3645A is not set +# CONFIG_LEDS_BCM6328 is not set +# CONFIG_LEDS_BCM6358 is not set +# CONFIG_LEDS_CR0014114 is not set +CONFIG_LEDS_LM3530=m +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_LM3692X is not set +# CONFIG_LEDS_LM3601X is not set +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_GPIO is not set +CONFIG_LEDS_LP3944=m +# CONFIG_LEDS_LP3952 is not set +CONFIG_LEDS_LP55XX_COMMON=m +CONFIG_LEDS_LP5521=m +CONFIG_LEDS_LP5523=m +CONFIG_LEDS_LP5562=m +# CONFIG_LEDS_LP8501 is not set +# CONFIG_LEDS_LP8860 is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_PWM is not set +# CONFIG_LEDS_BD2802 is not set +CONFIG_LEDS_LT3593=m +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_TLC591XX is not set +# CONFIG_LEDS_LM355x is not set +# CONFIG_LEDS_KTD2692 is not set +# CONFIG_LEDS_IS31FL319X is not set +# CONFIG_LEDS_IS31FL32XX is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# +CONFIG_LEDS_BLINKM=m +# CONFIG_LEDS_SYSCON is not set +# CONFIG_LEDS_MLXREG is not set +# CONFIG_LEDS_USER is not set + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_ONESHOT=m +# CONFIG_LEDS_TRIGGER_DISK is not set +# CONFIG_LEDS_TRIGGER_MTD is not set +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +# CONFIG_LEDS_TRIGGER_CPU is not set +# CONFIG_LEDS_TRIGGER_ACTIVITY is not set +CONFIG_LEDS_TRIGGER_GPIO=m +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m + +# +# iptables trigger is under Netfilter config (LED target) +# +CONFIG_LEDS_TRIGGER_TRANSIENT=m +CONFIG_LEDS_TRIGGER_CAMERA=m +# CONFIG_LEDS_TRIGGER_PANIC is not set +# CONFIG_LEDS_TRIGGER_NETDEV is not set +# CONFIG_ACCESSIBILITY is not set +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +# CONFIG_INFINIBAND_EXP_LEGACY_VERBS_NEW_UAPI is not set +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +# CONFIG_INFINIBAND_MTHCA is not set +# CONFIG_INFINIBAND_QIB is not set +CONFIG_INFINIBAND_CXGB4=m +CONFIG_INFINIBAND_I40IW=m +CONFIG_MLX4_INFINIBAND=m +# CONFIG_INFINIBAND_NES is not set +# CONFIG_INFINIBAND_OCRDMA is not set +CONFIG_INFINIBAND_HNS=m +CONFIG_INFINIBAND_HNS_HIP06=m +CONFIG_INFINIBAND_HNS_HIP08=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +CONFIG_INFINIBAND_RDMAVT=m +CONFIG_RDMA_RXE=m +CONFIG_INFINIBAND_QEDR=m +CONFIG_INFINIBAND_BNXT_RE=m +CONFIG_EDAC_SUPPORT=y +CONFIG_EDAC=y +CONFIG_EDAC_LEGACY_SYSFS=y +# CONFIG_EDAC_DEBUG is not set +CONFIG_EDAC_GHES=y +CONFIG_EDAC_THUNDERX=m +CONFIG_EDAC_XGENE=m +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_SYSTOHC is not set +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +CONFIG_RTC_DRV_ABB5ZES3=m +CONFIG_RTC_DRV_ABX80X=m +CONFIG_RTC_DRV_DS1307=y +# CONFIG_RTC_DRV_DS1307_CENTURY is not set +CONFIG_RTC_DRV_DS1374=m +CONFIG_RTC_DRV_DS1374_WDT=y +CONFIG_RTC_DRV_DS1672=m +# CONFIG_RTC_DRV_HYM8563 is not set +CONFIG_RTC_DRV_MAX6900=m +CONFIG_RTC_DRV_RS5C372=m +CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_ISL12022=m +# CONFIG_RTC_DRV_ISL12026 is not set +CONFIG_RTC_DRV_X1205=m +CONFIG_RTC_DRV_PCF8523=m +CONFIG_RTC_DRV_PCF85063=m +# CONFIG_RTC_DRV_PCF85363 is not set +CONFIG_RTC_DRV_PCF8563=m +CONFIG_RTC_DRV_PCF8583=m +CONFIG_RTC_DRV_M41T80=m +CONFIG_RTC_DRV_M41T80_WDT=y +CONFIG_RTC_DRV_BQ32K=m +# CONFIG_RTC_DRV_S35390A is not set +CONFIG_RTC_DRV_FM3130=m +CONFIG_RTC_DRV_RX8010=m +CONFIG_RTC_DRV_RX8581=m +CONFIG_RTC_DRV_RX8025=m +CONFIG_RTC_DRV_EM3027=m +# CONFIG_RTC_DRV_RV8803 is not set + +# +# SPI RTC drivers +# +CONFIG_RTC_DRV_M41T93=m +CONFIG_RTC_DRV_M41T94=m +# CONFIG_RTC_DRV_DS1302 is not set +CONFIG_RTC_DRV_DS1305=m +CONFIG_RTC_DRV_DS1343=m +CONFIG_RTC_DRV_DS1347=m +CONFIG_RTC_DRV_DS1390=m +# CONFIG_RTC_DRV_MAX6916 is not set +CONFIG_RTC_DRV_R9701=m +CONFIG_RTC_DRV_RX4581=m +# CONFIG_RTC_DRV_RX6110 is not set +CONFIG_RTC_DRV_RS5C348=m +CONFIG_RTC_DRV_MAX6902=m +CONFIG_RTC_DRV_PCF2123=m +CONFIG_RTC_DRV_MCP795=m +CONFIG_RTC_I2C_AND_SPI=y + +# +# SPI and I2C RTC drivers +# +CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_DS3232_HWMON=y +CONFIG_RTC_DRV_PCF2127=m +CONFIG_RTC_DRV_RV3029C2=m +# CONFIG_RTC_DRV_RV3029_HWMON is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_DS1286=m +CONFIG_RTC_DRV_DS1511=m +CONFIG_RTC_DRV_DS1553=m +CONFIG_RTC_DRV_DS1685_FAMILY=m +CONFIG_RTC_DRV_DS1685=y +# CONFIG_RTC_DRV_DS1689 is not set +# CONFIG_RTC_DRV_DS17285 is not set +# CONFIG_RTC_DRV_DS17485 is not set +# CONFIG_RTC_DRV_DS17885 is not set +# CONFIG_RTC_DS1685_PROC_REGS is not set +CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_DS2404=m +CONFIG_RTC_DRV_EFI=y +CONFIG_RTC_DRV_STK17TA8=m +# CONFIG_RTC_DRV_M48T86 is not set +CONFIG_RTC_DRV_M48T35=m +CONFIG_RTC_DRV_M48T59=m +CONFIG_RTC_DRV_MSM6242=m +CONFIG_RTC_DRV_BQ4802=m +CONFIG_RTC_DRV_RP5C01=m +CONFIG_RTC_DRV_V3020=m +# CONFIG_RTC_DRV_ZYNQMP is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_PL030 is not set +CONFIG_RTC_DRV_PL031=y +# CONFIG_RTC_DRV_FTRTC010 is not set +# CONFIG_RTC_DRV_SNVS is not set +# CONFIG_RTC_DRV_XGENE is not set +# CONFIG_RTC_DRV_R7301 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_ACPI=y +CONFIG_DMA_OF=y +# CONFIG_ALTERA_MSGDMA is not set +# CONFIG_AMBA_PL08X is not set +# CONFIG_BCM_SBA_RAID is not set +# CONFIG_DW_AXI_DMAC is not set +# CONFIG_FSL_EDMA is not set +# CONFIG_INTEL_IDMA64 is not set +# CONFIG_K3_DMA is not set +# CONFIG_MV_XOR_V2 is not set +# CONFIG_PL330_DMA is not set +# CONFIG_XGENE_DMA is not set +# CONFIG_XILINX_DMA is not set +# CONFIG_XILINX_ZYNQMP_DMA is not set +# CONFIG_QCOM_BAM_DMA is not set +CONFIG_QCOM_HIDMA_MGMT=m +CONFIG_QCOM_HIDMA=m +CONFIG_DW_DMAC_CORE=m +CONFIG_DW_DMAC=m +CONFIG_DW_DMAC_PCI=m + +# +# DMA Clients +# +CONFIG_ASYNC_TX_DMA=y +# CONFIG_DMATEST is not set + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +CONFIG_AUXDISPLAY=y +# CONFIG_HD44780 is not set +# CONFIG_IMG_ASCII_LCD is not set +# CONFIG_HT16K33 is not set +CONFIG_UIO=y +CONFIG_UIO_CIF=m +CONFIG_UIO_PDRV_GENIRQ=y +# CONFIG_UIO_DMEM_GENIRQ is not set +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=y +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +CONFIG_VFIO_IOMMU_TYPE1=y +CONFIG_VFIO_VIRQFD=y +CONFIG_VFIO=y +CONFIG_VFIO_NOIOMMU=y +CONFIG_VFIO_PCI=y +CONFIG_VFIO_PCI_MMAP=y +CONFIG_VFIO_PCI_INTX=y +CONFIG_VFIO_PLATFORM=m +# CONFIG_VFIO_AMBA is not set +# CONFIG_VFIO_PLATFORM_CALXEDAXGMAC_RESET is not set +# CONFIG_VFIO_PLATFORM_AMDXGBE_RESET is not set +CONFIG_VFIO_MDEV=m +CONFIG_VFIO_MDEV_DEVICE=m +CONFIG_VFIO_SPIMDEV=m +# CONFIG_VIRT_DRIVERS is not set +CONFIG_VIRTIO=y +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=y +# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set + +# +# Microsoft Hyper-V guest support +# +# CONFIG_STAGING is not set +# CONFIG_GOLDFISH is not set +CONFIG_CHROME_PLATFORMS=y +# CONFIG_CHROMEOS_TBMC is not set +# CONFIG_CROS_KBD_LED_BACKLIGHT is not set +CONFIG_CLKDEV_LOOKUP=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y + +# +# Common Clock Framework +# +CONFIG_COMMON_CLK_VERSATILE=y +CONFIG_CLK_SP810=y +CONFIG_CLK_VEXPRESS_OSC=y +# CONFIG_CLK_HSDK is not set +# CONFIG_COMMON_CLK_MAX9485 is not set +CONFIG_COMMON_CLK_SCPI=m +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI514 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_SI570 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CDCE925 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_CLK_QORIQ is not set +CONFIG_COMMON_CLK_XGENE=y +# CONFIG_COMMON_CLK_PWM is not set +# CONFIG_COMMON_CLK_VC5 is not set +CONFIG_COMMON_CLK_HI3516CV300=y +CONFIG_COMMON_CLK_HI3519=y +CONFIG_COMMON_CLK_HI3660=y +CONFIG_COMMON_CLK_HI3798CV200=y +# CONFIG_COMMON_CLK_HI6220 is not set +CONFIG_RESET_HISI=y +CONFIG_STUB_CLK_HI3660=y +# CONFIG_COMMON_CLK_QCOM is not set +CONFIG_HWSPINLOCK=y +# CONFIG_HWSPINLOCK_QCOM is not set + +# +# Clock Source drivers +# +CONFIG_TIMER_OF=y +CONFIG_TIMER_ACPI=y +CONFIG_TIMER_PROBE=y +CONFIG_CLKSRC_MMIO=y +CONFIG_ARM_ARCH_TIMER=y +CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y +CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND=y +CONFIG_FSL_ERRATUM_A008585=y +CONFIG_HISILICON_ERRATUM_161010101=y +CONFIG_ARM64_ERRATUM_858921=y +CONFIG_ARM_TIMER_SP804=y +CONFIG_MAILBOX=y +CONFIG_ARM_MHU=m +# CONFIG_PLATFORM_MHU is not set +# CONFIG_PL320_MBOX is not set +CONFIG_PCC=y +# CONFIG_ALTERA_MBOX is not set +CONFIG_HI3660_MBOX=y +CONFIG_HI6220_MBOX=y +# CONFIG_MAILBOX_TEST is not set +# CONFIG_QCOM_APCS_IPC is not set +CONFIG_XGENE_SLIMPRO_MBOX=m +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +CONFIG_IOMMU_IO_PGTABLE=y +CONFIG_IOMMU_IO_PGTABLE_LPAE=y +# CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST is not set +# CONFIG_IOMMU_IO_PGTABLE_ARMV7S is not set +# CONFIG_IOMMU_DEBUGFS is not set +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set +CONFIG_IOMMU_IOVA=y +CONFIG_OF_IOMMU=y +CONFIG_IOMMU_DMA=y +CONFIG_ARM_SMMU=y +CONFIG_ARM_SMMU_V3=y +# CONFIG_QCOM_IOMMU is not set + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_QCOM_GLINK_RPM is not set +# CONFIG_RPMSG_VIRTIO is not set +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# + +# +# Broadcom SoC drivers +# +# CONFIG_SOC_BRCMSTB is not set + +# +# NXP/Freescale QorIQ SoC drivers +# + +# +# i.MX SoC drivers +# + +# +# Qualcomm SoC drivers +# +# CONFIG_QCOM_COMMAND_DB is not set +# CONFIG_QCOM_GENI_SE is not set +# CONFIG_QCOM_GSBI is not set +# CONFIG_QCOM_LLCC is not set +# CONFIG_QCOM_RMTFS_MEM is not set +# CONFIG_QCOM_RPMH is not set +# CONFIG_QCOM_SMEM is not set +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# CONFIG_XILINX_VCU is not set +# CONFIG_PM_DEVFREQ is not set +CONFIG_EXTCON=y + +# +# Extcon Device Drivers +# +CONFIG_EXTCON_GPIO=m +# CONFIG_EXTCON_MAX3355 is not set +# CONFIG_EXTCON_QCOM_SPMI_MISC is not set +# CONFIG_EXTCON_RT8973A is not set +# CONFIG_EXTCON_SM5502 is not set +# CONFIG_EXTCON_USB_GPIO is not set +# CONFIG_MEMORY is not set +# CONFIG_IIO is not set +# CONFIG_NTB is not set +# CONFIG_VME_BUS is not set +CONFIG_PWM=y +CONFIG_PWM_SYSFS=y +# CONFIG_PWM_FSL_FTM is not set +# CONFIG_PWM_HIBVT is not set +# CONFIG_PWM_PCA9685 is not set + +# +# IRQ chip support +# +CONFIG_IRQCHIP=y +CONFIG_ARM_GIC=y +CONFIG_ARM_GIC_MAX_NR=1 +CONFIG_ARM_GIC_V2M=y +CONFIG_ARM_GIC_V3=y +CONFIG_ARM_GIC_V3_ITS=y +CONFIG_ARM_GIC_V3_ITS_PCI=y +CONFIG_HISILICON_IRQ_MBIGEN=y +CONFIG_PARTITION_PERCPU=y +CONFIG_QCOM_IRQ_COMBINER=y +# CONFIG_QCOM_PDC is not set +# CONFIG_IPACK_BUS is not set +CONFIG_RESET_CONTROLLER=y +# CONFIG_RESET_QCOM_AOSS is not set +# CONFIG_RESET_TI_SYSCON is not set +CONFIG_COMMON_RESET_HI3660=y +CONFIG_COMMON_RESET_HI6220=y +CONFIG_FMC=m +CONFIG_FMC_FAKEDEV=m +CONFIG_FMC_TRIVIAL=m +CONFIG_FMC_WRITE_EEPROM=m +CONFIG_FMC_CHARDEV=m + +# +# PHY Subsystem +# +CONFIG_GENERIC_PHY=y +CONFIG_PHY_XGENE=y +# CONFIG_BCM_KONA_USB2_PHY is not set +CONFIG_PHY_HI6220_USB=m +# CONFIG_PHY_HISTB_COMBPHY is not set +# CONFIG_PHY_HISI_INNO_USB2 is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_MAPPHONE_MDM6600 is not set +# CONFIG_PHY_QCOM_APQ8064_SATA is not set +# CONFIG_PHY_QCOM_IPQ806X_SATA is not set +# CONFIG_PHY_QCOM_QMP is not set +# CONFIG_PHY_QCOM_QUSB2 is not set +# CONFIG_PHY_QCOM_UFS is not set +# CONFIG_PHY_QCOM_USB_HS is not set +# CONFIG_PHY_QCOM_USB_HSIC is not set +# CONFIG_PHY_TUSB1210 is not set +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# CONFIG_ARM_CCI_PMU is not set +CONFIG_ARM_CCN=y +CONFIG_ARM_PMU=y +CONFIG_ARM_PMU_ACPI=y +CONFIG_ARM_SMMU_V3_PMU=y +# CONFIG_ARM_DSU_PMU is not set +CONFIG_HISI_PMU=y +CONFIG_QCOM_L2_PMU=y +CONFIG_QCOM_L3_PMU=y +CONFIG_XGENE_PMU=y +CONFIG_ARM_SPE_PMU=y +CONFIG_RAS=y + +# +# Android +# +# CONFIG_ANDROID is not set +CONFIG_LIBNVDIMM=m +CONFIG_BLK_DEV_PMEM=m +CONFIG_ND_BLK=m +CONFIG_ND_CLAIM=y +CONFIG_ND_BTT=m +CONFIG_BTT=y +CONFIG_OF_PMEM=m +CONFIG_DAX_DRIVER=y +CONFIG_DAX=y +CONFIG_DEV_DAX=m +CONFIG_NVMEM=y +# CONFIG_QCOM_QFPROM is not set + +# +# HW tracing support +# +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set +# CONFIG_FPGA is not set +# CONFIG_FSI is not set +CONFIG_TEE=m + +# +# TEE drivers +# +# CONFIG_OPTEE is not set +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +CONFIG_FS_IOMAP=y +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT2=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_ENCRYPTION is not set +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_XFS_FS=m +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set +# CONFIG_XFS_ONLINE_SCRUB is not set +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set +# CONFIG_GFS2_FS is not set +# CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +CONFIG_FS_DAX=y +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FILE_LOCKING=y +CONFIG_MANDATORY_FILE_LOCKING=y +# CONFIG_FS_ENCRYPTION is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_PRINT_QUOTA_WARNING=y +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=y +# CONFIG_QFMT_V1 is not set +CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y +CONFIG_AUTOFS4_FS=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_OVERLAY_FS=y +# CONFIG_OVERLAY_FS_REDIRECT_DIR is not set +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y +# CONFIG_OVERLAY_FS_INDEX is not set +# CONFIG_OVERLAY_FS_XINO_AUTO is not set +# CONFIG_OVERLAY_FS_METACOPY is not set + +# +# Caches +# +CONFIG_FSCACHE=y +CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_HISTOGRAM is not set +# CONFIG_FSCACHE_DEBUG is not set +# CONFIG_FSCACHE_OBJECT_LIST is not set +CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_HISTOGRAM is not set + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="ascii" +# CONFIG_FAT_DEFAULT_UTF8 is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y +# CONFIG_PROC_VMCORE_DEVICE_DUMP is not set +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_MEMFD_CREATE=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_FS is not set +# CONFIG_UBIFS_FS is not set +CONFIG_CRAMFS=m +CONFIG_CRAMFS_BLOCKDEV=y +# CONFIG_CRAMFS_MTD is not set +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_FILE_CACHE=y +# CONFIG_SQUASHFS_FILE_DIRECT is not set +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +CONFIG_PSTORE=y +CONFIG_PSTORE_DEFLATE_COMPRESS=y +# CONFIG_PSTORE_LZO_COMPRESS is not set +# CONFIG_PSTORE_LZ4_COMPRESS is not set +# CONFIG_PSTORE_LZ4HC_COMPRESS is not set +# CONFIG_PSTORE_842_COMPRESS is not set +# CONFIG_PSTORE_ZSTD_COMPRESS is not set +CONFIG_PSTORE_COMPRESS=y +CONFIG_PSTORE_DEFLATE_COMPRESS_DEFAULT=y +CONFIG_PSTORE_COMPRESS_DEFAULT="deflate" +# CONFIG_PSTORE_CONSOLE is not set +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +CONFIG_PSTORE_RAM=m +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +CONFIG_NFS_V2=m +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +# CONFIG_NFS_SWAP is not set +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_V4_SECURITY_LABEL=y +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DEBUG=y +CONFIG_NFSD=m +CONFIG_NFSD_V2_ACL=y +CONFIG_NFSD_V3=y +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +# CONFIG_NFSD_BLOCKLAYOUT is not set +# CONFIG_NFSD_SCSILAYOUT is not set +# CONFIG_NFSD_FLEXFILELAYOUT is not set +CONFIG_NFSD_V4_SECURITY_LABEL=y +# CONFIG_NFSD_FAULT_INJECTION is not set +CONFIG_GRACE_PERIOD=m +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=m +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_RPCSEC_GSS_KRB5=m +CONFIG_SUNRPC_DEBUG=y +CONFIG_SUNRPC_XPRT_RDMA=m +CONFIG_CEPH_FS=m +# CONFIG_CEPH_FSCACHE is not set +CONFIG_CEPH_FS_POSIX_ACL=y +CONFIG_CIFS=y +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_ACL=y +CONFIG_CIFS_DEBUG=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set +CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CIFS_FSCACHE is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=m +# CONFIG_DLM is not set +CONFIG_RESCTRL=y + +# +# Security options +# +CONFIG_KEYS=y +CONFIG_KEYS_COMPAT=y +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_BIG_KEYS=y +CONFIG_TRUSTED_KEYS=m +CONFIG_ENCRYPTED_KEYS=y +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +CONFIG_SECURITY_WRITABLE_HOOKS=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +# CONFIG_SECURITY_PATH is not set +CONFIG_LSM_MMAP_MIN_ADDR=65535 +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y +CONFIG_HARDENED_USERCOPY=y +CONFIG_HARDENED_USERCOPY_FALLBACK=y +CONFIG_FORTIFY_SOURCE=y +# CONFIG_STATIC_USERMODEHELPER is not set +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1 +CONFIG_SECURITY_SELINUX_DISABLE=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_SECURITY_LOADPIN is not set +CONFIG_SECURITY_YAMA=y +CONFIG_INTEGRITY=y +# CONFIG_INTEGRITY_SIGNATURE is not set +CONFIG_INTEGRITY_AUDIT=y +CONFIG_IMA=y +CONFIG_IMA_MEASURE_PCR_IDX=10 +CONFIG_IMA_LSM_RULES=y +# CONFIG_IMA_TEMPLATE is not set +CONFIG_IMA_NG_TEMPLATE=y +# CONFIG_IMA_SIG_TEMPLATE is not set +CONFIG_IMA_DEFAULT_TEMPLATE="ima-ng" +CONFIG_IMA_DEFAULT_HASH_SHA1=y +# CONFIG_IMA_DEFAULT_HASH_SHA256 is not set +# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set +CONFIG_IMA_DEFAULT_HASH="sha1" +CONFIG_IMA_WRITE_POLICY=y +CONFIG_IMA_READ_POLICY=y +CONFIG_IMA_APPRAISE=y +CONFIG_IMA_APPRAISE_BOOTPARAM=y +CONFIG_EVM=y +CONFIG_EVM_ATTR_FSUUID=y +CONFIG_EVM_ADD_XATTRS=y +CONFIG_DEFAULT_SECURITY_SELINUX=y +# CONFIG_DEFAULT_SECURITY_DAC is not set +CONFIG_DEFAULT_SECURITY="selinux" +CONFIG_XOR_BLOCKS=y +CONFIG_ASYNC_CORE=y +CONFIG_ASYNC_MEMCPY=y +CONFIG_ASYNC_XOR=y +CONFIG_ASYNC_PQ=y +CONFIG_ASYNC_RAID6_RECOV=y +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=m +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=m +# CONFIG_CRYPTO_ECDH is not set +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_GF128MUL=y +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_CRYPTO_CRYPTD=m +# CONFIG_CRYPTO_MCRYPTD is not set +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_SIMD=m +CONFIG_CRYPTO_ENGINE=m + +# +# Authenticated Encryption with Associated Data +# +CONFIG_CRYPTO_CCM=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_CHACHA20POLY1305=m +# CONFIG_CRYPTO_AEGIS128 is not set +# CONFIG_CRYPTO_AEGIS128L is not set +# CONFIG_CRYPTO_AEGIS256 is not set +# CONFIG_CRYPTO_MORUS640 is not set +# CONFIG_CRYPTO_MORUS1280 is not set +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=m + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CFB is not set +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=m +# CONFIG_CRYPTO_KEYWRAP is not set + +# +# Hash modes +# +CONFIG_CRYPTO_CMAC=y +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_VMAC=m + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_POLY1305=m +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD128=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_RMD256=m +CONFIG_CRYPTO_RMD320=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +CONFIG_CRYPTO_SHA3=m +CONFIG_CRYPTO_SM3=m +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_ARC4=y +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=y +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SALSA20=m +CONFIG_CRYPTO_CHACHA20=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_842 is not set +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +# CONFIG_CRYPTO_ZSTD is not set + +# +# Random Number Generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +CONFIG_CRYPTO_USER_API=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_HASH_INFO=y +CONFIG_CRYPTO_HW=y +CONFIG_CRYPTO_DEV_CCP=y +CONFIG_CRYPTO_DEV_CCP_DD=m +CONFIG_CRYPTO_DEV_SP_CCP=y +CONFIG_CRYPTO_DEV_CCP_CRYPTO=m +CONFIG_CRYPTO_DEV_CPT=m +CONFIG_CAVIUM_CPT=m +# CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set +CONFIG_CRYPTO_DEV_CAVIUM_ZIP=m +# CONFIG_CRYPTO_DEV_QCE is not set +# CONFIG_CRYPTO_DEV_QCOM_RNG is not set +CONFIG_CRYPTO_DEV_CHELSIO=m +CONFIG_CHELSIO_IPSEC_INLINE=y +# CONFIG_CRYPTO_DEV_CHELSIO_TLS is not set +CONFIG_CRYPTO_DEV_VIRTIO=m +# CONFIG_CRYPTO_DEV_CCREE is not set +CONFIG_CRYPTO_DEV_HISI_SEC=m +CONFIG_CRYPTO_DEV_HISILICON=m +CONFIG_CRYPTO_DEV_HISI_SPIMDEV=y +CONFIG_CRYPTO_DEV_HISI_QM=m +CONFIG_CRYPTO_DEV_HISI_ZIP=m +# CONFIG_CRYPTO_DEV_HISI_HPRE is not set +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +CONFIG_PKCS7_MESSAGE_PARSER=y +# CONFIG_PKCS7_TEST_KEY is not set +CONFIG_SIGNED_PE_FILE_VERIFICATION=y + +# +# Certificates for signature checking +# +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set +# CONFIG_SECONDARY_TRUSTED_KEYRING is not set +# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=y +CONFIG_BITREVERSE=y +CONFIG_HAVE_ARCH_BITREVERSE=y +CONFIG_RATIONAL=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y +CONFIG_INDIRECT_PIO=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=m +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +# CONFIG_CRC64 is not set +# CONFIG_CRC4 is not set +CONFIG_CRC7=m +CONFIG_LIBCRC32C=y +CONFIG_CRC8=m +CONFIG_AUDIT_GENERIC=y +CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y +CONFIG_AUDIT_COMPAT_GENERIC=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_COMPRESS=m +CONFIG_LZ4HC_COMPRESS=m +CONFIG_LZ4_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=m +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_BTREE=y +CONFIG_INTERVAL_TREE=y +CONFIG_RADIX_TREE_MULTIORDER=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_DMA_DIRECT_OPS=y +CONFIG_DMA_VIRT_OPS=y +CONFIG_SWIOTLB=y +CONFIG_SGL_ALLOC=y +CONFIG_CHECK_SIGNATURE=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_CLZ_TAB=y +CONFIG_CORDIC=m +# CONFIG_DDR is not set +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_LIBFDT=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_SPLIT=y +CONFIG_SG_POOL=y +CONFIG_ARCH_HAS_SG_CHAIN=y +CONFIG_ARCH_HAS_PMEM_API=y +CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y +CONFIG_SBITMAP=y +# CONFIG_STRING_SELFTEST is not set + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +CONFIG_DEBUG_INFO_DWARF4=y +# CONFIG_GDB_SCRIPTS is not set +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=2048 +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_PAGE_OWNER is not set +CONFIG_DEBUG_FS=y +CONFIG_HEADERS_CHECK=y +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_ARCH_WANT_FRAME_POINTERS=y +CONFIG_FRAME_POINTER=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_DEBUG_KERNEL=y + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +# CONFIG_DEBUG_RODATA_TEST is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_SLUB_STATS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +CONFIG_DEBUG_KMEMLEAK=y +CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000 +# CONFIG_DEBUG_KMEMLEAK_TEST is not set +CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_VM is not set +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_ARCH_KASAN=y +# CONFIG_KASAN is not set +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set +CONFIG_DEBUG_SHIRQ=y + +# +# Debug Lockups and Hangs +# +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_SDEI_WATCHDOG=y +CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y +CONFIG_HARDLOCKUP_DETECTOR=y +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=1 +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +# CONFIG_WQ_WATCHDOG is not set +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=0 +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +# CONFIG_SCHED_STACK_END_CHECK is not set +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +CONFIG_LOCK_TORTURE_TEST=y +# CONFIG_WW_MUTEX_SELFTEST is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_HAVE_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PI_LIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +CONFIG_TORTURE_TEST=y +# CONFIG_RCU_PERF_TEST is not set +CONFIG_RCU_TORTURE_TEST=y +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_PREEMPTIRQ_EVENTS is not set +# CONFIG_IRQSOFF_TRACER is not set +CONFIG_SCHED_TRACER=y +CONFIG_HWLAT_TRACER=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_TRACER_SNAPSHOT=y +# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +CONFIG_STACK_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_KPROBE_EVENTS=y +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_PROBE_EVENTS=y +CONFIG_DYNAMIC_FTRACE=y +# CONFIG_FUNCTION_PROFILER is not set +CONFIG_FTRACE_MCOUNT_RECORD=y +# CONFIG_FTRACE_STARTUP_TEST is not set +CONFIG_TRACING_MAP=y +CONFIG_HIST_TRIGGERS=y +# CONFIG_TRACEPOINT_BENCHMARK is not set +CONFIG_RING_BUFFER_BENCHMARK=m +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_TRACE_EVAL_MAP_FILE is not set +# CONFIG_TRACING_EVENTS_GPIO is not set +# CONFIG_DMA_API_DEBUG is not set +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_LKDTM is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_TEST_SORT is not set +# CONFIG_KPROBES_SANITY_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +CONFIG_ATOMIC64_SELFTEST=y +CONFIG_ASYNC_RAID6_TEST=m +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_TEST_STRING_HELPERS is not set +CONFIG_TEST_KSTRTOX=y +CONFIG_TEST_PRINTF=m +CONFIG_TEST_BITMAP=m +# CONFIG_TEST_BITFIELD is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_OVERFLOW is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_HASH is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_LKM is not set +CONFIG_TEST_USER_COPY=m +# CONFIG_TEST_BPF is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +CONFIG_TEST_SYSCTL=y +# CONFIG_TEST_UDELAY is not set +CONFIG_TEST_STATIC_KEYS=m +# CONFIG_TEST_KMOD is not set +# CONFIG_MEMTEST is not set +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_KGDB=y +CONFIG_KGDB_SERIAL_CONSOLE=y +CONFIG_KGDB_TESTS=y +# CONFIG_KGDB_TESTS_ON_BOOT is not set +CONFIG_KGDB_KDB=y +CONFIG_KDB_DEFAULT_ENABLE=0x0 +CONFIG_KDB_KEYBOARD=y +CONFIG_KDB_CONTINUE_CATASTROPHIC=0 +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +# CONFIG_UBSAN is not set +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y +CONFIG_STRICT_DEVMEM=y +CONFIG_IO_STRICT_DEVMEM=y +# CONFIG_ARM64_PTDUMP_DEBUGFS is not set +# CONFIG_PID_IN_CONTEXTIDR is not set +# CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET is not set +# CONFIG_DEBUG_WX is not set +# CONFIG_DEBUG_ALIGN_RODATA is not set +# CONFIG_DEBUG_EFI is not set +# CONFIG_ARM64_RELOC_TEST is not set +# CONFIG_CORESIGHT is not set diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig index e3fdb0fd6f700c77a0ee0a9b8fa625a15a3d329e..a5606823ed4da3ccbe72ee141859ef506a9e9526 100644 --- a/arch/arm64/crypto/Kconfig +++ b/arch/arm64/crypto/Kconfig @@ -66,11 +66,6 @@ config CRYPTO_CRCT10DIF_ARM64_CE depends on KERNEL_MODE_NEON && CRC_T10DIF select CRYPTO_HASH -config CRYPTO_CRC32_ARM64_CE - tristate "CRC32 and CRC32C digest algorithms using ARMv8 extensions" - depends on CRC32 - select CRYPTO_HASH - config CRYPTO_AES_ARM64 tristate "AES core cipher using scalar instructions" select CRYPTO_AES @@ -119,10 +114,4 @@ config CRYPTO_AES_ARM64_BS select CRYPTO_AES_ARM64 select CRYPTO_SIMD -config CRYPTO_SPECK_NEON - tristate "NEON accelerated Speck cipher algorithms" - depends on KERNEL_MODE_NEON - select CRYPTO_BLKCIPHER - select CRYPTO_SPECK - endif diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile index bcafd016618eabee517a505096ff3f3c37ec0130..9af4dd81f22b1802bc99ef4b0f08864ab9f19c47 100644 --- a/arch/arm64/crypto/Makefile +++ b/arch/arm64/crypto/Makefile @@ -30,10 +30,8 @@ obj-$(CONFIG_CRYPTO_GHASH_ARM64_CE) += ghash-ce.o ghash-ce-y := ghash-ce-glue.o ghash-ce-core.o obj-$(CONFIG_CRYPTO_CRCT10DIF_ARM64_CE) += crct10dif-ce.o -crct10dif-ce-y := crct10dif-ce-core.o crct10dif-ce-glue.o - -obj-$(CONFIG_CRYPTO_CRC32_ARM64_CE) += crc32-ce.o -crc32-ce-y:= crc32-ce-core.o crc32-ce-glue.o +crct10dif-ce-y := crct10dif-neon-asm_64.o crct10dif-neon_glue.o +AFLAGS_crct10dif-neon-asm_64.o := -march=armv8-a+crypto obj-$(CONFIG_CRYPTO_AES_ARM64_CE) += aes-ce-cipher.o aes-ce-cipher-y := aes-ce-core.o aes-ce-glue.o @@ -56,9 +54,6 @@ sha512-arm64-y := sha512-glue.o sha512-core.o obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o -obj-$(CONFIG_CRYPTO_SPECK_NEON) += speck-neon.o -speck-neon-y := speck-neon-core.o speck-neon-glue.o - obj-$(CONFIG_CRYPTO_AES_ARM64) += aes-arm64.o aes-arm64-y := aes-cipher-core.o aes-cipher-glue.o diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S index e3a375c4cb83c383242ac6b9cc8b3247939e0947..1b151442dac1f26593d3d44f52c066e6f8b89d44 100644 --- a/arch/arm64/crypto/aes-ce-ccm-core.S +++ b/arch/arm64/crypto/aes-ce-ccm-core.S @@ -74,12 +74,13 @@ ENTRY(ce_aes_ccm_auth_data) beq 10f ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */ b 7b -8: mov w7, w8 +8: cbz w8, 91f + mov w7, w8 add w8, w8, #16 9: ext v1.16b, v1.16b, v1.16b, #1 adds w7, w7, #1 bne 9b - eor v0.16b, v0.16b, v1.16b +91: eor v0.16b, v0.16b, v1.16b st1 {v0.16b}, [x0] 10: str w8, [x3] ret diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c index 68b11aa690e476d1011c50480da444b8bde91479..5fc6f51908fdd916f80d08d2931185b37dcb1d50 100644 --- a/arch/arm64/crypto/aes-ce-ccm-glue.c +++ b/arch/arm64/crypto/aes-ce-ccm-glue.c @@ -125,7 +125,7 @@ static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[], abytes -= added; } - while (abytes > AES_BLOCK_SIZE) { + while (abytes >= AES_BLOCK_SIZE) { __aes_arm64_encrypt(key->key_enc, mac, mac, num_rounds(key)); crypto_xor(mac, in, AES_BLOCK_SIZE); @@ -139,8 +139,6 @@ static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[], num_rounds(key)); crypto_xor(mac, in, abytes); *macp = abytes; - } else { - *macp = 0; } } } @@ -255,7 +253,7 @@ static int ccm_encrypt(struct aead_request *req) /* preserve the original iv for the final round */ memcpy(buf, req->iv, AES_BLOCK_SIZE); - err = skcipher_walk_aead_encrypt(&walk, req, true); + err = skcipher_walk_aead_encrypt(&walk, req, false); if (may_use_simd()) { while (walk.nbytes) { @@ -313,7 +311,7 @@ static int ccm_decrypt(struct aead_request *req) /* preserve the original iv for the final round */ memcpy(buf, req->iv, AES_BLOCK_SIZE); - err = skcipher_walk_aead_decrypt(&walk, req, true); + err = skcipher_walk_aead_decrypt(&walk, req, false); if (may_use_simd()) { while (walk.nbytes) { diff --git a/arch/arm64/crypto/aes-neonbs-core.S b/arch/arm64/crypto/aes-neonbs-core.S index e613a87f8b53ffed85bdb9cf9f26c063e8755691..8432c8d0dea66ddc19f6061c20cab8bd66429b5b 100644 --- a/arch/arm64/crypto/aes-neonbs-core.S +++ b/arch/arm64/crypto/aes-neonbs-core.S @@ -971,18 +971,22 @@ CPU_LE( rev x8, x8 ) 8: next_ctr v0 st1 {v0.16b}, [x24] - cbz x23, 0f + cbz x23, .Lctr_done cond_yield_neon 98b b 99b -0: frame_pop +.Lctr_done: + frame_pop ret /* * If we are handling the tail of the input (x6 != NULL), return the * final keystream block back to the caller. */ +0: cbz x25, 8b + st1 {v0.16b}, [x25] + b 8b 1: cbz x25, 8b st1 {v1.16b}, [x25] b 8b diff --git a/arch/arm64/crypto/aes-neonbs-glue.c b/arch/arm64/crypto/aes-neonbs-glue.c index e7a95a566462f259c227d924ae61eaa5697a23d4..5cc24896738719bdf57f98a24017914b2cf09e93 100644 --- a/arch/arm64/crypto/aes-neonbs-glue.c +++ b/arch/arm64/crypto/aes-neonbs-glue.c @@ -304,6 +304,8 @@ static int __xts_crypt(struct skcipher_request *req, int err; err = skcipher_walk_virt(&walk, req, false); + if (err) + return err; kernel_neon_begin(); neon_aes_ecb_encrypt(walk.iv, walk.iv, ctx->twkey, ctx->key.rounds, 1); diff --git a/arch/arm64/crypto/crc32-ce-core.S b/arch/arm64/crypto/crc32-ce-core.S deleted file mode 100644 index 8061bf0f9c66ab052e9bcd0b118872f49e7bf14c..0000000000000000000000000000000000000000 --- a/arch/arm64/crypto/crc32-ce-core.S +++ /dev/null @@ -1,287 +0,0 @@ -/* - * Accelerated CRC32(C) using arm64 CRC, NEON and Crypto Extensions instructions - * - * Copyright (C) 2016 Linaro Ltd - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -/* GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see http://www.gnu.org/licenses - * - * Please visit http://www.xyratex.com/contact if you need additional - * information or have any questions. - * - * GPL HEADER END - */ - -/* - * Copyright 2012 Xyratex Technology Limited - * - * Using hardware provided PCLMULQDQ instruction to accelerate the CRC32 - * calculation. - * CRC32 polynomial:0x04c11db7(BE)/0xEDB88320(LE) - * PCLMULQDQ is a new instruction in Intel SSE4.2, the reference can be found - * at: - * http://www.intel.com/products/processor/manuals/ - * Intel(R) 64 and IA-32 Architectures Software Developer's Manual - * Volume 2B: Instruction Set Reference, N-Z - * - * Authors: Gregory Prestas - * Alexander Boyko - */ - -#include -#include - - .section ".rodata", "a" - .align 6 - .cpu generic+crypto+crc - -.Lcrc32_constants: - /* - * [x4*128+32 mod P(x) << 32)]' << 1 = 0x154442bd4 - * #define CONSTANT_R1 0x154442bd4LL - * - * [(x4*128-32 mod P(x) << 32)]' << 1 = 0x1c6e41596 - * #define CONSTANT_R2 0x1c6e41596LL - */ - .octa 0x00000001c6e415960000000154442bd4 - - /* - * [(x128+32 mod P(x) << 32)]' << 1 = 0x1751997d0 - * #define CONSTANT_R3 0x1751997d0LL - * - * [(x128-32 mod P(x) << 32)]' << 1 = 0x0ccaa009e - * #define CONSTANT_R4 0x0ccaa009eLL - */ - .octa 0x00000000ccaa009e00000001751997d0 - - /* - * [(x64 mod P(x) << 32)]' << 1 = 0x163cd6124 - * #define CONSTANT_R5 0x163cd6124LL - */ - .quad 0x0000000163cd6124 - .quad 0x00000000FFFFFFFF - - /* - * #define CRCPOLY_TRUE_LE_FULL 0x1DB710641LL - * - * Barrett Reduction constant (u64`) = u` = (x**64 / P(x))` - * = 0x1F7011641LL - * #define CONSTANT_RU 0x1F7011641LL - */ - .octa 0x00000001F701164100000001DB710641 - -.Lcrc32c_constants: - .octa 0x000000009e4addf800000000740eef02 - .octa 0x000000014cd00bd600000000f20c0dfe - .quad 0x00000000dd45aab8 - .quad 0x00000000FFFFFFFF - .octa 0x00000000dea713f10000000105ec76f0 - - vCONSTANT .req v0 - dCONSTANT .req d0 - qCONSTANT .req q0 - - BUF .req x19 - LEN .req x20 - CRC .req x21 - CONST .req x22 - - vzr .req v9 - - /** - * Calculate crc32 - * BUF - buffer - * LEN - sizeof buffer (multiple of 16 bytes), LEN should be > 63 - * CRC - initial crc32 - * return %eax crc32 - * uint crc32_pmull_le(unsigned char const *buffer, - * size_t len, uint crc32) - */ - .text -ENTRY(crc32_pmull_le) - adr_l x3, .Lcrc32_constants - b 0f - -ENTRY(crc32c_pmull_le) - adr_l x3, .Lcrc32c_constants - -0: frame_push 4, 64 - - mov BUF, x0 - mov LEN, x1 - mov CRC, x2 - mov CONST, x3 - - bic LEN, LEN, #15 - ld1 {v1.16b-v4.16b}, [BUF], #0x40 - movi vzr.16b, #0 - fmov dCONSTANT, CRC - eor v1.16b, v1.16b, vCONSTANT.16b - sub LEN, LEN, #0x40 - cmp LEN, #0x40 - b.lt less_64 - - ldr qCONSTANT, [CONST] - -loop_64: /* 64 bytes Full cache line folding */ - sub LEN, LEN, #0x40 - - pmull2 v5.1q, v1.2d, vCONSTANT.2d - pmull2 v6.1q, v2.2d, vCONSTANT.2d - pmull2 v7.1q, v3.2d, vCONSTANT.2d - pmull2 v8.1q, v4.2d, vCONSTANT.2d - - pmull v1.1q, v1.1d, vCONSTANT.1d - pmull v2.1q, v2.1d, vCONSTANT.1d - pmull v3.1q, v3.1d, vCONSTANT.1d - pmull v4.1q, v4.1d, vCONSTANT.1d - - eor v1.16b, v1.16b, v5.16b - ld1 {v5.16b}, [BUF], #0x10 - eor v2.16b, v2.16b, v6.16b - ld1 {v6.16b}, [BUF], #0x10 - eor v3.16b, v3.16b, v7.16b - ld1 {v7.16b}, [BUF], #0x10 - eor v4.16b, v4.16b, v8.16b - ld1 {v8.16b}, [BUF], #0x10 - - eor v1.16b, v1.16b, v5.16b - eor v2.16b, v2.16b, v6.16b - eor v3.16b, v3.16b, v7.16b - eor v4.16b, v4.16b, v8.16b - - cmp LEN, #0x40 - b.lt less_64 - - if_will_cond_yield_neon - stp q1, q2, [sp, #.Lframe_local_offset] - stp q3, q4, [sp, #.Lframe_local_offset + 32] - do_cond_yield_neon - ldp q1, q2, [sp, #.Lframe_local_offset] - ldp q3, q4, [sp, #.Lframe_local_offset + 32] - ldr qCONSTANT, [CONST] - movi vzr.16b, #0 - endif_yield_neon - b loop_64 - -less_64: /* Folding cache line into 128bit */ - ldr qCONSTANT, [CONST, #16] - - pmull2 v5.1q, v1.2d, vCONSTANT.2d - pmull v1.1q, v1.1d, vCONSTANT.1d - eor v1.16b, v1.16b, v5.16b - eor v1.16b, v1.16b, v2.16b - - pmull2 v5.1q, v1.2d, vCONSTANT.2d - pmull v1.1q, v1.1d, vCONSTANT.1d - eor v1.16b, v1.16b, v5.16b - eor v1.16b, v1.16b, v3.16b - - pmull2 v5.1q, v1.2d, vCONSTANT.2d - pmull v1.1q, v1.1d, vCONSTANT.1d - eor v1.16b, v1.16b, v5.16b - eor v1.16b, v1.16b, v4.16b - - cbz LEN, fold_64 - -loop_16: /* Folding rest buffer into 128bit */ - subs LEN, LEN, #0x10 - - ld1 {v2.16b}, [BUF], #0x10 - pmull2 v5.1q, v1.2d, vCONSTANT.2d - pmull v1.1q, v1.1d, vCONSTANT.1d - eor v1.16b, v1.16b, v5.16b - eor v1.16b, v1.16b, v2.16b - - b.ne loop_16 - -fold_64: - /* perform the last 64 bit fold, also adds 32 zeroes - * to the input stream */ - ext v2.16b, v1.16b, v1.16b, #8 - pmull2 v2.1q, v2.2d, vCONSTANT.2d - ext v1.16b, v1.16b, vzr.16b, #8 - eor v1.16b, v1.16b, v2.16b - - /* final 32-bit fold */ - ldr dCONSTANT, [CONST, #32] - ldr d3, [CONST, #40] - - ext v2.16b, v1.16b, vzr.16b, #4 - and v1.16b, v1.16b, v3.16b - pmull v1.1q, v1.1d, vCONSTANT.1d - eor v1.16b, v1.16b, v2.16b - - /* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */ - ldr qCONSTANT, [CONST, #48] - - and v2.16b, v1.16b, v3.16b - ext v2.16b, vzr.16b, v2.16b, #8 - pmull2 v2.1q, v2.2d, vCONSTANT.2d - and v2.16b, v2.16b, v3.16b - pmull v2.1q, v2.1d, vCONSTANT.1d - eor v1.16b, v1.16b, v2.16b - mov w0, v1.s[1] - - frame_pop - ret -ENDPROC(crc32_pmull_le) -ENDPROC(crc32c_pmull_le) - - .macro __crc32, c -0: subs x2, x2, #16 - b.mi 8f - ldp x3, x4, [x1], #16 -CPU_BE( rev x3, x3 ) -CPU_BE( rev x4, x4 ) - crc32\c\()x w0, w0, x3 - crc32\c\()x w0, w0, x4 - b.ne 0b - ret - -8: tbz x2, #3, 4f - ldr x3, [x1], #8 -CPU_BE( rev x3, x3 ) - crc32\c\()x w0, w0, x3 -4: tbz x2, #2, 2f - ldr w3, [x1], #4 -CPU_BE( rev w3, w3 ) - crc32\c\()w w0, w0, w3 -2: tbz x2, #1, 1f - ldrh w3, [x1], #2 -CPU_BE( rev16 w3, w3 ) - crc32\c\()h w0, w0, w3 -1: tbz x2, #0, 0f - ldrb w3, [x1] - crc32\c\()b w0, w0, w3 -0: ret - .endm - - .align 5 -ENTRY(crc32_armv8_le) - __crc32 -ENDPROC(crc32_armv8_le) - - .align 5 -ENTRY(crc32c_armv8_le) - __crc32 c -ENDPROC(crc32c_armv8_le) diff --git a/arch/arm64/crypto/crc32-ce-glue.c b/arch/arm64/crypto/crc32-ce-glue.c deleted file mode 100644 index 34b4e3d46aab41b2d05d7e68e58d3cc1d7f4d304..0000000000000000000000000000000000000000 --- a/arch/arm64/crypto/crc32-ce-glue.c +++ /dev/null @@ -1,244 +0,0 @@ -/* - * Accelerated CRC32(C) using arm64 NEON and Crypto Extensions instructions - * - * Copyright (C) 2016 - 2017 Linaro Ltd - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include -#include - -#define PMULL_MIN_LEN 64L /* minimum size of buffer - * for crc32_pmull_le_16 */ -#define SCALE_F 16L /* size of NEON register */ - -asmlinkage u32 crc32_pmull_le(const u8 buf[], u64 len, u32 init_crc); -asmlinkage u32 crc32_armv8_le(u32 init_crc, const u8 buf[], size_t len); - -asmlinkage u32 crc32c_pmull_le(const u8 buf[], u64 len, u32 init_crc); -asmlinkage u32 crc32c_armv8_le(u32 init_crc, const u8 buf[], size_t len); - -static u32 (*fallback_crc32)(u32 init_crc, const u8 buf[], size_t len); -static u32 (*fallback_crc32c)(u32 init_crc, const u8 buf[], size_t len); - -static int crc32_pmull_cra_init(struct crypto_tfm *tfm) -{ - u32 *key = crypto_tfm_ctx(tfm); - - *key = 0; - return 0; -} - -static int crc32c_pmull_cra_init(struct crypto_tfm *tfm) -{ - u32 *key = crypto_tfm_ctx(tfm); - - *key = ~0; - return 0; -} - -static int crc32_pmull_setkey(struct crypto_shash *hash, const u8 *key, - unsigned int keylen) -{ - u32 *mctx = crypto_shash_ctx(hash); - - if (keylen != sizeof(u32)) { - crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - *mctx = le32_to_cpup((__le32 *)key); - return 0; -} - -static int crc32_pmull_init(struct shash_desc *desc) -{ - u32 *mctx = crypto_shash_ctx(desc->tfm); - u32 *crc = shash_desc_ctx(desc); - - *crc = *mctx; - return 0; -} - -static int crc32_update(struct shash_desc *desc, const u8 *data, - unsigned int length) -{ - u32 *crc = shash_desc_ctx(desc); - - *crc = crc32_armv8_le(*crc, data, length); - return 0; -} - -static int crc32c_update(struct shash_desc *desc, const u8 *data, - unsigned int length) -{ - u32 *crc = shash_desc_ctx(desc); - - *crc = crc32c_armv8_le(*crc, data, length); - return 0; -} - -static int crc32_pmull_update(struct shash_desc *desc, const u8 *data, - unsigned int length) -{ - u32 *crc = shash_desc_ctx(desc); - unsigned int l; - - if ((u64)data % SCALE_F) { - l = min_t(u32, length, SCALE_F - ((u64)data % SCALE_F)); - - *crc = fallback_crc32(*crc, data, l); - - data += l; - length -= l; - } - - if (length >= PMULL_MIN_LEN && may_use_simd()) { - l = round_down(length, SCALE_F); - - kernel_neon_begin(); - *crc = crc32_pmull_le(data, l, *crc); - kernel_neon_end(); - - data += l; - length -= l; - } - - if (length > 0) - *crc = fallback_crc32(*crc, data, length); - - return 0; -} - -static int crc32c_pmull_update(struct shash_desc *desc, const u8 *data, - unsigned int length) -{ - u32 *crc = shash_desc_ctx(desc); - unsigned int l; - - if ((u64)data % SCALE_F) { - l = min_t(u32, length, SCALE_F - ((u64)data % SCALE_F)); - - *crc = fallback_crc32c(*crc, data, l); - - data += l; - length -= l; - } - - if (length >= PMULL_MIN_LEN && may_use_simd()) { - l = round_down(length, SCALE_F); - - kernel_neon_begin(); - *crc = crc32c_pmull_le(data, l, *crc); - kernel_neon_end(); - - data += l; - length -= l; - } - - if (length > 0) { - *crc = fallback_crc32c(*crc, data, length); - } - - return 0; -} - -static int crc32_pmull_final(struct shash_desc *desc, u8 *out) -{ - u32 *crc = shash_desc_ctx(desc); - - put_unaligned_le32(*crc, out); - return 0; -} - -static int crc32c_pmull_final(struct shash_desc *desc, u8 *out) -{ - u32 *crc = shash_desc_ctx(desc); - - put_unaligned_le32(~*crc, out); - return 0; -} - -static struct shash_alg crc32_pmull_algs[] = { { - .setkey = crc32_pmull_setkey, - .init = crc32_pmull_init, - .update = crc32_update, - .final = crc32_pmull_final, - .descsize = sizeof(u32), - .digestsize = sizeof(u32), - - .base.cra_ctxsize = sizeof(u32), - .base.cra_init = crc32_pmull_cra_init, - .base.cra_name = "crc32", - .base.cra_driver_name = "crc32-arm64-ce", - .base.cra_priority = 200, - .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, - .base.cra_blocksize = 1, - .base.cra_module = THIS_MODULE, -}, { - .setkey = crc32_pmull_setkey, - .init = crc32_pmull_init, - .update = crc32c_update, - .final = crc32c_pmull_final, - .descsize = sizeof(u32), - .digestsize = sizeof(u32), - - .base.cra_ctxsize = sizeof(u32), - .base.cra_init = crc32c_pmull_cra_init, - .base.cra_name = "crc32c", - .base.cra_driver_name = "crc32c-arm64-ce", - .base.cra_priority = 200, - .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, - .base.cra_blocksize = 1, - .base.cra_module = THIS_MODULE, -} }; - -static int __init crc32_pmull_mod_init(void) -{ - if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_PMULL)) { - crc32_pmull_algs[0].update = crc32_pmull_update; - crc32_pmull_algs[1].update = crc32c_pmull_update; - - if (elf_hwcap & HWCAP_CRC32) { - fallback_crc32 = crc32_armv8_le; - fallback_crc32c = crc32c_armv8_le; - } else { - fallback_crc32 = crc32_le; - fallback_crc32c = __crc32c_le; - } - } else if (!(elf_hwcap & HWCAP_CRC32)) { - return -ENODEV; - } - return crypto_register_shashes(crc32_pmull_algs, - ARRAY_SIZE(crc32_pmull_algs)); -} - -static void __exit crc32_pmull_mod_exit(void) -{ - crypto_unregister_shashes(crc32_pmull_algs, - ARRAY_SIZE(crc32_pmull_algs)); -} - -static const struct cpu_feature crc32_cpu_feature[] = { - { cpu_feature(CRC32) }, { cpu_feature(PMULL) }, { } -}; -MODULE_DEVICE_TABLE(cpu, crc32_cpu_feature); - -module_init(crc32_pmull_mod_init); -module_exit(crc32_pmull_mod_exit); - -MODULE_AUTHOR("Ard Biesheuvel "); -MODULE_LICENSE("GPL v2"); diff --git a/arch/arm64/crypto/crct10dif-ce-glue.c b/arch/arm64/crypto/crct10dif-ce-glue.c index 96f0cae4a02258bad9cf63639da45190499d1696..617bcfc1b0804e6e543c14b7da08ac4f54028801 100644 --- a/arch/arm64/crypto/crct10dif-ce-glue.c +++ b/arch/arm64/crypto/crct10dif-ce-glue.c @@ -36,26 +36,13 @@ static int crct10dif_update(struct shash_desc *desc, const u8 *data, unsigned int length) { u16 *crc = shash_desc_ctx(desc); - unsigned int l; - if (unlikely((u64)data % CRC_T10DIF_PMULL_CHUNK_SIZE)) { - l = min_t(u32, length, CRC_T10DIF_PMULL_CHUNK_SIZE - - ((u64)data % CRC_T10DIF_PMULL_CHUNK_SIZE)); - - *crc = crc_t10dif_generic(*crc, data, l); - - length -= l; - data += l; - } - - if (length > 0) { - if (may_use_simd()) { - kernel_neon_begin(); - *crc = crc_t10dif_pmull(*crc, data, length); - kernel_neon_end(); - } else { - *crc = crc_t10dif_generic(*crc, data, length); - } + if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) { + kernel_neon_begin(); + *crc = crc_t10dif_pmull(*crc, data, length); + kernel_neon_end(); + } else { + *crc = crc_t10dif_generic(*crc, data, length); } return 0; diff --git a/arch/arm64/crypto/crct10dif-neon-asm_64.S b/arch/arm64/crypto/crct10dif-neon-asm_64.S new file mode 100644 index 0000000000000000000000000000000000000000..a37204bf5a7a2f7b233a6c5056289ff81515c9d0 --- /dev/null +++ b/arch/arm64/crypto/crct10dif-neon-asm_64.S @@ -0,0 +1,752 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2016-2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include + +.global crc_t10dif_neon +.text + +/* X0 is initial CRC value + * X1 is data buffer + * X2 is the length of buffer + * X3 is the backup buffer(for extend) + * X4 for other extend parameter(for extend) + * Q0, Q1, Q2, Q3 maybe as parameter for other functions, + * the value of Q0, Q1, Q2, Q3 maybe modified. + * + * suggestion: + * 1. dont use general purpose register for calculation + * 2. set data endianness outside of the kernel + * 3. use ext as shifting around + * 4. dont use LD3/LD4, ST3/ST4 + */ + +crc_t10dif_neon: + /* push the register to stack that CRC16 will use */ + STP X5, X6, [sp, #-0x10]! + STP X7, X8, [sp, #-0x10]! + STP X9, X10, [sp, #-0x10]! + STP X11, X12, [sp, #-0x10]! + STP X13, X14, [sp, #-0x10]! + STP Q10, Q11, [sp, #-0x20]! + STP Q12, Q13, [sp, #-0x20]! + STP Q4, Q5, [sp, #-0x20]! + STP Q6, Q7, [sp, #-0x20]! + STP Q8, Q9, [sp, #-0x20]! + STP Q14, Q15, [sp, #-0x20]! + STP Q16, Q17, [sp, #-0x20]! + STP Q18, Q19, [sp, #-0x20]! + + SUB sp,sp,#0x20 + + MOV X11, #0 // PUSH STACK FLAG + + CMP X2, #0x80 + B.LT 2f // _less_than_128, <128 + + /* V10/V11/V12/V13 is 128bit. + * we get data 512bit( by cacheline ) each time + */ + LDP Q10, Q11, [X1], #0x20 + LDP Q12, Q13, [X1], #0x20 + + /* move the initial value to V6 register */ + LSL X0, X0, #48 + EOR V6.16B, V6.16B, V6.16B + MOV V6.D[1], X0 + + /* big-little end change. because the data in memory is little-end, + * we deal the data for bigend + */ + + REV64 V10.16B, V10.16B + REV64 V11.16B, V11.16B + REV64 V12.16B, V12.16B + REV64 V13.16B, V13.16B + EXT V10.16B, V10.16B, V10.16B, #8 + EXT V11.16B, V11.16B, V11.16B, #8 + EXT V12.16B, V12.16B, V12.16B, #8 + EXT V13.16B, V13.16B, V13.16B, #8 + + EOR V10.16B, V10.16B, V6.16B + + SUB X2, X2, #0x80 + ADD X5, X1, #0x20 + + /* deal data when the size of buffer bigger than 128 bytes */ + /* _fold_64_B_loop */ + LDR Q6,=0xe658000000000000044c000000000000 +1: + + LDP Q16, Q17, [X1] ,#0x40 + LDP Q18, Q19, [X5], #0x40 + + /* carry-less multiply. + * V10 high-64bits carry-less multiply + * V6 high-64bits(PMULL2) + * V11 low-64bits carry-less multiply V6 low-64bits(PMULL) + */ + + PMULL2 V4.1Q, V10.2D, V6.2D + PMULL V10.1Q, V10.1D, V6.1D + PMULL2 V5.1Q, V11.2D, V6.2D + PMULL V11.1Q, V11.1D, V6.1D + + REV64 V16.16B, V16.16B + REV64 V17.16B, V17.16B + REV64 V18.16B, V18.16B + REV64 V19.16B, V19.16B + + PMULL2 V14.1Q, V12.2D, V6.2D + PMULL V12.1Q, V12.1D, V6.1D + PMULL2 V15.1Q, V13.2D, V6.2D + PMULL V13.1Q, V13.1D, V6.1D + + EXT V16.16B, V16.16B, V16.16B, #8 + EOR V10.16B, V10.16B, V4.16B + + EXT V17.16B, V17.16B, V17.16B, #8 + EOR V11.16B, V11.16B, V5.16B + + EXT V18.16B, V18.16B, V18.16B, #8 + EOR V12.16B, V12.16B, V14.16B + + EXT V19.16B, V19.16B, V19.16B, #8 + EOR V13.16B, V13.16B, V15.16B + + SUB X2, X2, #0x40 + + + EOR V10.16B, V10.16B, V16.16B + EOR V11.16B, V11.16B, V17.16B + + EOR V12.16B, V12.16B, V18.16B + EOR V13.16B, V13.16B, V19.16B + + CMP X2, #0x0 + B.GE 1b // >=0 + + LDR Q6, =0x06df0000000000002d56000000000000 + MOV V4.16B, V10.16B + /* V10 carry-less 0x06df000000000000([127:64]*[127:64]) */ + PMULL V4.1Q, V4.1D, V6.1D //switch PMULL & PMULL2 order + PMULL2 V10.1Q, V10.2D, V6.2D + EOR V11.16B, V11.16B, V4.16B + EOR V11.16B, V11.16B, V10.16B + + MOV V4.16B, V11.16B + PMULL V4.1Q, V4.1D, V6.1D //switch PMULL & PMULL2 order + PMULL2 V11.1Q, V11.2D, V6.2D + EOR V12.16B, V12.16B, V4.16B + EOR V12.16B, V12.16B, V11.16B + + MOV V4.16B, V12.16B + PMULL V4.1Q, V4.1D, V6.1D //switch PMULL & PMULL2 order + PMULL2 V12.1Q, V12.2D, V6.2D + EOR V13.16B, V13.16B, V4.16B + EOR V13.16B, V13.16B, V12.16B + + ADD X2, X2, #48 + CMP X2, #0x0 + B.LT 3f // _final_reduction_for_128, <0 + + /* _16B_reduction_loop */ +4: + /* unrelated load as early as possible*/ + LDR Q10, [X1], #0x10 + + MOV V4.16B, V13.16B + PMULL2 V13.1Q, V13.2D, V6.2D + PMULL V4.1Q, V4.1D, V6.1D + EOR V13.16B, V13.16B, V4.16B + + REV64 V10.16B, V10.16B + EXT V10.16B, V10.16B, V10.16B, #8 + + EOR V13.16B, V13.16B, V10.16B + + SUB X2, X2, #0x10 + CMP X2, #0x0 + B.GE 4b // _16B_reduction_loop, >=0 + + /* _final_reduction_for_128 */ +3: ADD X2, X2, #0x10 + CMP X2, #0x0 + B.EQ 5f // _128_done, ==0 + + /* _get_last_two_xmms */ +6: MOV V12.16B, V13.16B + SUB X1, X1, #0x10 + ADD X1, X1, X2 + LDR Q11, [X1], #0x10 + REV64 V11.16B, V11.16B + EXT V11.16B, V11.16B, V11.16B, #8 + + CMP X2, #8 + B.EQ 50f + B.LT 51f + B.GT 52f + +50: + /* dont use X register as temp one */ + FMOV D14, D12 + MOVI D12, #0 + MOV V12.D[1],V14.D[0] + B 53f +51: + MOV X9, #64 + LSL X13, X2, #3 // <<3 equal x8 + SUB X9, X9, X13 + MOV X5, V12.D[0] // low 64-bit + MOV X6, V12.D[1] // high 64-bit + LSR X10, X5, X9 // high bit of low 64-bit + LSL X7, X5, X13 + LSL X8, X6, X13 + ORR X8, X8, X10 // combination of high 64-bit + MOV V12.D[1], X8 + MOV V12.D[0], X7 + + B 53f +52: + LSL X13, X2, #3 // <<3 equal x8 + SUB X13, X13, #64 + + DUP V18.2D, X13 + FMOV D16, D12 + USHL D16, D16, D18 + EXT V12.16B, V16.16B, V16.16B, #8 + +53: + MOVI D14, #0 //add one zero constant + + CMP X2, #0 + B.EQ 30f + CMP X2, #1 + B.EQ 31f + CMP X2, #2 + B.EQ 32f + CMP X2, #3 + B.EQ 33f + CMP X2, #4 + B.EQ 34f + CMP X2, #5 + B.EQ 35f + CMP X2, #6 + B.EQ 36f + CMP X2, #7 + B.EQ 37f + CMP X2, #8 + B.EQ 38f + CMP X2, #9 + B.EQ 39f + CMP X2, #10 + B.EQ 40f + CMP X2, #11 + B.EQ 41f + CMP X2, #12 + B.EQ 42f + CMP X2, #13 + B.EQ 43f + CMP X2, #14 + B.EQ 44f + CMP X2, #15 + B.EQ 45f + + // >> 128bit +30: + EOR V13.16B, V13.16B, V13.16B + EOR V8.16B, V8.16B, V8.16B + LDR Q9,=0xffffffffffffffffffffffffffffffff + B 46f + + // >> 120bit +31: + USHR V13.2D, V13.2D, #56 + EXT V13.16B, V13.16B, V14.16B, #8 + LDR Q8,=0xff + LDR Q9,=0xffffffffffffffffffffffffffffff00 + B 46f + + // >> 112bit +32: + USHR V13.2D, V13.2D, #48 + EXT V13.16B, V13.16B, V14.16B, #8 + LDR Q8,=0xffff + LDR Q9,=0xffffffffffffffffffffffffffff0000 + B 46f + + // >> 104bit +33: + USHR V13.2D, V13.2D, #40 + EXT V13.16B, V13.16B, V14.16B, #8 + LDR Q8,=0xffffff + LDR Q9,=0xffffffffffffffffffffffffff000000 + B 46f + + // >> 96bit +34: + USHR V13.2D, V13.2D, #32 + EXT V13.16B, V13.16B, V14.16B, #8 + LDR Q8,=0xffffffff + LDR Q9,=0xffffffffffffffffffffffff00000000 + B 46f + + // >> 88bit +35: + USHR V13.2D, V13.2D, #24 + EXT V13.16B, V13.16B, V14.16B, #8 + LDR Q8,=0xffffffffff + LDR Q9,=0xffffffffffffffffffffff0000000000 + B 46f + + // >> 80bit +36: + USHR V13.2D, V13.2D, #16 + EXT V13.16B, V13.16B, V14.16B, #8 + LDR Q8,=0xffffffffffff + LDR Q9,=0xffffffffffffffffffff000000000000 + B 46f + + // >> 72bit +37: + USHR V13.2D, V13.2D, #8 + EXT V13.16B, V13.16B, V14.16B, #8 + LDR Q8,=0xffffffffffffff + LDR Q9,=0xffffffffffffffffff00000000000000 + B 46f + + // >> 64bit +38: + EXT V13.16B, V13.16B, V14.16B, #8 + LDR Q8,=0xffffffffffffffff + LDR Q9,=0xffffffffffffffff0000000000000000 + B 46f + + // >> 56bit +39: + EXT V13.16B, V13.16B, V13.16B, #7 + MOV V13.S[3], V14.S[0] + MOV V13.H[5], V14.H[0] + MOV V13.B[9], V14.B[0] + + LDR Q8,=0xffffffffffffffffff + LDR Q9,=0xffffffffffffff000000000000000000 + B 46f + + // >> 48bit +40: + EXT V13.16B, V13.16B, V13.16B, #6 + MOV V13.S[3], V14.S[0] + MOV V13.H[5], V14.H[0] + + LDR Q8,=0xffffffffffffffffffff + LDR Q9,=0xffffffffffff00000000000000000000 + B 46f + + // >> 40bit +41: + EXT V13.16B, V13.16B, V13.16B, #5 + MOV V13.S[3], V14.S[0] + MOV V13.B[11], V14.B[0] + + LDR Q8,=0xffffffffffffffffffffff + LDR Q9,=0xffffffffff0000000000000000000000 + B 46f + + // >> 32bit +42: + EXT V13.16B, V13.16B, V13.16B, #4 + MOV V13.S[3], V14.S[0] + + LDR Q8,=0xffffffffffffffffffffffff + LDR Q9,=0xffffffff000000000000000000000000 + B 46f + + // >> 24bit +43: + EXT V13.16B, V13.16B, V13.16B, #3 + MOV V13.H[7], V14.H[0] + MOV V13.B[13], V14.B[0] + + LDR Q8,=0xffffffffffffffffffffffffff + LDR Q9,=0xffffff00000000000000000000000000 + B 46f + + // >> 16bit +44: + EXT V13.16B, V13.16B, V13.16B, #2 + MOV V13.H[7], V14.H[0] + + LDR Q8,=0xffffffffffffffffffffffffffff + LDR Q9,=0xffff0000000000000000000000000000 + B 46f + + // >> 8bit +45: + EXT V13.16B, V13.16B, V13.16B, #1 + MOV V13.B[15], V14.B[0] + + LDR Q8,=0xffffffffffffffffffffffffffffff + LDR Q9,=0xff000000000000000000000000000000 + + // backup V12 first + // pblendvb xmm1, xmm2 +46: + AND V12.16B, V12.16B, V9.16B + AND V11.16B, V11.16B, V8.16B + ORR V11.16B, V11.16B, V12.16B + + MOV V12.16B, V11.16B + MOV V4.16B, V13.16B + PMULL2 V13.1Q, V13.2D, V6.2D + PMULL V4.1Q, V4.1D, V6.1D + EOR V13.16B, V13.16B, V4.16B + EOR V13.16B, V13.16B, V12.16B + + /* _128_done. we change the Q6 D[0] and D[1] */ +5: LDR Q6, =0x2d560000000000001368000000000000 + MOVI D14, #0 + MOV V10.16B, V13.16B + PMULL2 V13.1Q, V13.2D, V6.2D + + MOV V10.D[1], V10.D[0] + MOV V10.D[0], V14.D[0] //set zero + + EOR V13.16B, V13.16B, V10.16B + + MOV V10.16B, V13.16B + LDR Q7, =0x00000000FFFFFFFFFFFFFFFFFFFFFFFF + AND V10.16B, V10.16B, V7.16B + + MOV S13, V13.S[3] + + PMULL V13.1Q, V13.1D, V6.1D + EOR V13.16B, V13.16B, V10.16B + + /* _barrett */ +7: LDR Q6, =0x00000001f65a57f8000000018bb70000 + MOVI D14, #0 + MOV V10.16B, V13.16B + PMULL2 V13.1Q, V13.2D, V6.2D + + EXT V13.16B, V13.16B, V13.16B, #12 + MOV V13.S[0], V14.S[0] + + EXT V6.16B, V6.16B, V6.16B, #8 + PMULL2 V13.1Q, V13.2D, V6.2D + + EXT V13.16B, V13.16B, V13.16B, #12 + MOV V13.S[0], V14.S[0] + + EOR V13.16B, V13.16B, V10.16B + MOV X0, V13.D[0] + + /* _cleanup */ +8: MOV X14, #48 + LSR X0, X0, X14 +99: + ADD sp, sp, #0x20 + + LDP Q18, Q19, [sp], #0x20 + LDP Q16, Q17, [sp], #0x20 + LDP Q14, Q15, [sp], #0x20 + + LDP Q8, Q9, [sp], #0x20 + LDP Q6, Q7, [sp], #0x20 + LDP Q4, Q5, [sp], #0x20 + LDP Q12, Q13, [sp], #0x20 + LDP Q10, Q11, [sp], #0x20 + LDP X13, X14, [sp], #0x10 + LDP X11, X12, [sp], #0x10 + LDP X9, X10, [sp], #0x10 + LDP X7, X8, [sp], #0x10 + LDP X5, X6, [sp], #0x10 + + RET + + /* _less_than_128 */ +2: CMP X2, #32 + B.LT 9f // _less_than_32 + LDR Q6, =0x06df0000000000002d56000000000000 + + LSL X0, X0, #48 + LDR Q10, =0x0 + MOV V10.D[1], X0 + LDR Q13, [X1], #0x10 + REV64 V13.16B, V13.16B + EXT V13.16B, V13.16B, V13.16B, #8 + + EOR V13.16B, V13.16B, V10.16B + + SUB X2, X2, #32 + B 4b + + /* _less_than_32 */ +9: CMP X2, #0 + B.EQ 99b // _cleanup + LSL X0, X0, #48 + LDR Q10,=0x0 + MOV V10.D[1], X0 + + CMP X2, #16 + B.EQ 10f // _exact_16_left + B.LE 11f // _less_than_16_left + LDR Q13, [X1], #0x10 + + REV64 V13.16B, V13.16B + EXT V13.16B, V13.16B, V13.16B, #8 + + EOR V13.16B, V13.16B, V10.16B + SUB X2, X2, #16 + LDR Q6, =0x06df0000000000002d56000000000000 + B 6b // _get_last_two_xmms + + /* _less_than_16_left */ +11: CMP X2, #4 + B.LT 13f // _only_less_than_4 + + /* backup the length of data, we used in _less_than_2_left */ + MOV X8, X2 + CMP X2, #8 + B.LT 14f // _less_than_8_left + + LDR X14, [X1], #8 + /* push the data to stack, we backup the data to V10 */ + STR X14, [sp, #0] + SUB X2, X2, #8 + ADD X11, X11, #8 + + /* _less_than_8_left */ +14: CMP X2, #4 + B.LT 15f // _less_than_4_left + + /* get 32bit data */ + LDR W5, [X1], #4 + + /* push the data to stack */ + STR W5, [sp, X11] + SUB X2, X2, #4 + ADD X11, X11, #4 + + /* _less_than_4_left */ +15: CMP X2, #2 + B.LT 16f // _less_than_2_left + + /* get 16bits data */ + LDRH W6, [X1], #2 + + /* push the data to stack */ + STRH W6, [sp, X11] + SUB X2, X2, #2 + ADD X11, X11, #2 + + /* _less_than_2_left */ +16: + /* get 8bits data */ + LDRB W7, [X1], #1 + STRB W7, [sp, X11] + ADD X11, X11, #1 + + /* POP data from stack, store to V13 */ + LDR Q13, [sp] + MOVI D14, #0 + REV64 V13.16B, V13.16B + MOV V8.16B, V13.16B + MOV V13.D[1], V8.D[0] + MOV V13.D[0], V8.D[1] + + EOR V13.16B, V13.16B, V10.16B + CMP X8, #15 + B.EQ 80f + CMP X8, #14 + B.EQ 81f + CMP X8, #13 + B.EQ 82f + CMP X8, #12 + B.EQ 83f + CMP X8, #11 + B.EQ 84f + CMP X8, #10 + B.EQ 85f + CMP X8, #9 + B.EQ 86f + CMP X8, #8 + B.EQ 87f + CMP X8, #7 + B.EQ 88f + CMP X8, #6 + B.EQ 89f + CMP X8, #5 + B.EQ 90f + CMP X8, #4 + B.EQ 91f + CMP X8, #3 + B.EQ 92f + CMP X8, #2 + B.EQ 93f + CMP X8, #1 + B.EQ 94f + CMP X8, #0 + B.EQ 95f + +80: + EXT V13.16B, V13.16B, V13.16B, #1 + MOV V13.B[15], V14.B[0] + B 5b + +81: + EXT V13.16B, V13.16B, V13.16B, #2 + MOV V13.H[7], V14.H[0] + B 5b + +82: + EXT V13.16B, V13.16B, V13.16B, #3 + MOV V13.H[7], V14.H[0] + MOV V13.B[13], V14.B[0] + B 5b +83: + + EXT V13.16B, V13.16B, V13.16B, #4 + MOV V13.S[3], V14.S[0] + B 5b + +84: + EXT V13.16B, V13.16B, V13.16B, #5 + MOV V13.S[3], V14.S[0] + MOV V13.B[11], V14.B[0] + B 5b + +85: + EXT V13.16B, V13.16B, V13.16B, #6 + MOV V13.S[3], V14.S[0] + MOV V13.H[5], V14.H[0] + B 5b + +86: + EXT V13.16B, V13.16B, V13.16B, #7 + MOV V13.S[3], V14.S[0] + MOV V13.H[5], V14.H[0] + MOV V13.B[9], V14.B[0] + B 5b + +87: + MOV V13.D[0], V13.D[1] + MOV V13.D[1], V14.D[0] + B 5b + +88: + EXT V13.16B, V13.16B, V13.16B, #9 + MOV V13.D[1], V14.D[0] + MOV V13.B[7], V14.B[0] + B 5b + +89: + EXT V13.16B, V13.16B, V13.16B, #10 + MOV V13.D[1], V14.D[0] + MOV V13.H[3], V14.H[0] + B 5b + +90: + EXT V13.16B, V13.16B, V13.16B, #11 + MOV V13.D[1], V14.D[0] + MOV V13.H[3], V14.H[0] + MOV V13.B[5], V14.B[0] + B 5b + +91: + MOV V13.S[0], V13.S[3] + MOV V13.D[1], V14.D[0] + MOV V13.S[1], V14.S[0] + B 5b + +92: + EXT V13.16B, V13.16B, V13.16B, #13 + MOV V13.D[1], V14.D[0] + MOV V13.S[1], V14.S[0] + MOV V13.B[3], V14.B[0] + B 5b + +93: + MOV V15.H[0], V13.H[7] + MOV V13.16B, V14.16B + MOV V13.H[0], V15.H[0] + B 5b + +94: + MOV V15.B[0], V13.B[15] + MOV V13.16B, V14.16B + MOV V13.B[0], V15.B[0] + B 5b + +95: + LDR Q13,=0x0 + B 5b // _128_done + + /* _exact_16_left */ +10: + LD1 { V13.2D }, [X1], #0x10 + + REV64 V13.16B, V13.16B + EXT V13.16B, V13.16B, V13.16B, #8 + EOR V13.16B, V13.16B, V10.16B + B 5b // _128_done + + /* _only_less_than_4 */ +13: CMP X2, #3 + MOVI D14, #0 + B.LT 17f //_only_less_than_3 + + LDR S13, [X1], #4 + MOV V13.B[15], V13.B[0] + MOV V13.B[14], V13.B[1] + MOV V13.B[13], V13.B[2] + MOV V13.S[0], V13.S[1] + + EOR V13.16B, V13.16B, V10.16B + + EXT V13.16B, V13.16B, V13.16B, #5 + + MOV V13.S[3], V14.S[0] + MOV V13.B[11], V14.B[0] + + B 7b // _barrett + /* _only_less_than_3 */ +17: + CMP X2, #2 + B.LT 18f // _only_less_than_2 + + LDR H13, [X1], #2 + MOV V13.B[15], V13.B[0] + MOV V13.B[14], V13.B[1] + MOV V13.H[0], V13.H[1] + + EOR V13.16B, V13.16B, V10.16B + + EXT V13.16B, V13.16B, V13.16B, #6 + MOV V13.S[3], V14.S[0] + MOV V13.H[5], V14.H[0] + + B 7b // _barrett + + /* _only_less_than_2 */ +18: + LDRB W7, [X1], #1 + LDR Q13, = 0x0 + MOV V13.B[15], W7 + + EOR V13.16B, V13.16B, V10.16B + + EXT V13.16B, V13.16B, V13.16B, #7 + MOV V13.S[3], V14.S[0] + MOV V13.H[5], V14.H[0] + MOV V13.B[9], V14.B[0] + + B 7b // _barrett diff --git a/arch/arm64/crypto/crct10dif-neon_glue.c b/arch/arm64/crypto/crct10dif-neon_glue.c new file mode 100644 index 0000000000000000000000000000000000000000..e0c4a9acee279171a9853b596709d0f4df4bd57c --- /dev/null +++ b/arch/arm64/crypto/crct10dif-neon_glue.c @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2016-2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + + +#include +#include +#include +#include +#include +#include +#include + +asmlinkage __u16 crc_t10dif_neon(__u16 crc, const unsigned char *buf, + size_t len); + +struct chksum_desc_ctx { + __u16 crc; +}; + +/* + * Steps through buffer one byte at at time, calculates reflected + * crc using table. + */ + +static int chksum_init(struct shash_desc *desc) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + ctx->crc = 0; + + return 0; +} + +static int chksum_update(struct shash_desc *desc, const u8 *data, + unsigned int length) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + ctx->crc = crc_t10dif_neon(ctx->crc, data, length); + return 0; +} + +static int chksum_final(struct shash_desc *desc, u8 *out) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + *(__u16 *)out = ctx->crc; + return 0; +} + +static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len, + u8 *out) +{ + *(__u16 *)out = crc_t10dif_neon(*crcp, data, len); + return 0; +} + +static int chksum_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + return __chksum_finup(&ctx->crc, data, len, out); +} + +static int chksum_digest(struct shash_desc *desc, const u8 *data, + unsigned int length, u8 *out) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + return __chksum_finup(&ctx->crc, data, length, out); +} + +static struct shash_alg alg = { + .digestsize = CRC_T10DIF_DIGEST_SIZE, + .init = chksum_init, + .update = chksum_update, + .final = chksum_final, + .finup = chksum_finup, + .digest = chksum_digest, + .descsize = sizeof(struct chksum_desc_ctx), + .base = { + .cra_name = "crct10dif", + .cra_driver_name = "crct10dif-neon", + .cra_priority = 200, + .cra_blocksize = CRC_T10DIF_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +}; + +static int __init crct10dif_arm64_mod_init(void) +{ + return crypto_register_shash(&alg); +} + +static void __exit crct10dif_arm64_mod_fini(void) +{ + crypto_unregister_shash(&alg); +} + +module_init(crct10dif_arm64_mod_init); +module_exit(crct10dif_arm64_mod_fini); + +MODULE_AUTHOR("YueHaibing "); +MODULE_DESCRIPTION("T10 DIF CRC calculation accelerated with ARM64 NEON instruction."); +MODULE_LICENSE("GPL"); + +MODULE_ALIAS_CRYPTO("crct10dif"); +MODULE_ALIAS_CRYPTO("crct10dif-neon"); diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c index 067d8937d5af1e74a69ae7b14b1b0306a53fe087..1ed227bf61066c2b7c9b487ced6ab6497508fe51 100644 --- a/arch/arm64/crypto/ghash-ce-glue.c +++ b/arch/arm64/crypto/ghash-ce-glue.c @@ -418,9 +418,11 @@ static int gcm_encrypt(struct aead_request *req) put_unaligned_be32(2, iv + GCM_IV_SIZE); while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) { - int blocks = walk.nbytes / AES_BLOCK_SIZE; + const int blocks = + walk.nbytes / (2 * AES_BLOCK_SIZE) * 2; u8 *dst = walk.dst.virt.addr; u8 *src = walk.src.virt.addr; + int remaining = blocks; do { __aes_arm64_encrypt(ctx->aes_key.key_enc, @@ -430,9 +432,9 @@ static int gcm_encrypt(struct aead_request *req) dst += AES_BLOCK_SIZE; src += AES_BLOCK_SIZE; - } while (--blocks > 0); + } while (--remaining > 0); - ghash_do_update(walk.nbytes / AES_BLOCK_SIZE, dg, + ghash_do_update(blocks, dg, walk.dst.virt.addr, &ctx->ghash_key, NULL); @@ -553,7 +555,7 @@ static int gcm_decrypt(struct aead_request *req) put_unaligned_be32(2, iv + GCM_IV_SIZE); while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) { - int blocks = walk.nbytes / AES_BLOCK_SIZE; + int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2; u8 *dst = walk.dst.virt.addr; u8 *src = walk.src.virt.addr; diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c index 17fac2889f56ea99dbcca0c87e8c6c6b45934235..d8c521c757e836717b5a80b70e1e83f293c9bc2c 100644 --- a/arch/arm64/crypto/sha1-ce-glue.c +++ b/arch/arm64/crypto/sha1-ce-glue.c @@ -54,7 +54,7 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { struct sha1_ce_state *sctx = shash_desc_ctx(desc); - bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE); + bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE) && len; if (!may_use_simd()) return crypto_sha1_finup(desc, data, len, out); diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c index 261f5195cab74b2952ee2158daf08ab968939701..c47d1a28ff6bb6274180c6063e2034be1b991eca 100644 --- a/arch/arm64/crypto/sha2-ce-glue.c +++ b/arch/arm64/crypto/sha2-ce-glue.c @@ -59,7 +59,7 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { struct sha256_ce_state *sctx = shash_desc_ctx(desc); - bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE); + bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE) && len; if (!may_use_simd()) { if (len) diff --git a/arch/arm64/crypto/speck-neon-core.S b/arch/arm64/crypto/speck-neon-core.S deleted file mode 100644 index b14463438b0966b6bc37f2f7784b0285c51ce290..0000000000000000000000000000000000000000 --- a/arch/arm64/crypto/speck-neon-core.S +++ /dev/null @@ -1,352 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * ARM64 NEON-accelerated implementation of Speck128-XTS and Speck64-XTS - * - * Copyright (c) 2018 Google, Inc - * - * Author: Eric Biggers - */ - -#include - - .text - - // arguments - ROUND_KEYS .req x0 // const {u64,u32} *round_keys - NROUNDS .req w1 // int nrounds - NROUNDS_X .req x1 - DST .req x2 // void *dst - SRC .req x3 // const void *src - NBYTES .req w4 // unsigned int nbytes - TWEAK .req x5 // void *tweak - - // registers which hold the data being encrypted/decrypted - // (underscores avoid a naming collision with ARM64 registers x0-x3) - X_0 .req v0 - Y_0 .req v1 - X_1 .req v2 - Y_1 .req v3 - X_2 .req v4 - Y_2 .req v5 - X_3 .req v6 - Y_3 .req v7 - - // the round key, duplicated in all lanes - ROUND_KEY .req v8 - - // index vector for tbl-based 8-bit rotates - ROTATE_TABLE .req v9 - ROTATE_TABLE_Q .req q9 - - // temporary registers - TMP0 .req v10 - TMP1 .req v11 - TMP2 .req v12 - TMP3 .req v13 - - // multiplication table for updating XTS tweaks - GFMUL_TABLE .req v14 - GFMUL_TABLE_Q .req q14 - - // next XTS tweak value(s) - TWEAKV_NEXT .req v15 - - // XTS tweaks for the blocks currently being encrypted/decrypted - TWEAKV0 .req v16 - TWEAKV1 .req v17 - TWEAKV2 .req v18 - TWEAKV3 .req v19 - TWEAKV4 .req v20 - TWEAKV5 .req v21 - TWEAKV6 .req v22 - TWEAKV7 .req v23 - - .align 4 -.Lror64_8_table: - .octa 0x080f0e0d0c0b0a090007060504030201 -.Lror32_8_table: - .octa 0x0c0f0e0d080b0a090407060500030201 -.Lrol64_8_table: - .octa 0x0e0d0c0b0a09080f0605040302010007 -.Lrol32_8_table: - .octa 0x0e0d0c0f0a09080b0605040702010003 -.Lgf128mul_table: - .octa 0x00000000000000870000000000000001 -.Lgf64mul_table: - .octa 0x0000000000000000000000002d361b00 - -/* - * _speck_round_128bytes() - Speck encryption round on 128 bytes at a time - * - * Do one Speck encryption round on the 128 bytes (8 blocks for Speck128, 16 for - * Speck64) stored in X0-X3 and Y0-Y3, using the round key stored in all lanes - * of ROUND_KEY. 'n' is the lane size: 64 for Speck128, or 32 for Speck64. - * 'lanes' is the lane specifier: "2d" for Speck128 or "4s" for Speck64. - */ -.macro _speck_round_128bytes n, lanes - - // x = ror(x, 8) - tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b - tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b - tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b - tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b - - // x += y - add X_0.\lanes, X_0.\lanes, Y_0.\lanes - add X_1.\lanes, X_1.\lanes, Y_1.\lanes - add X_2.\lanes, X_2.\lanes, Y_2.\lanes - add X_3.\lanes, X_3.\lanes, Y_3.\lanes - - // x ^= k - eor X_0.16b, X_0.16b, ROUND_KEY.16b - eor X_1.16b, X_1.16b, ROUND_KEY.16b - eor X_2.16b, X_2.16b, ROUND_KEY.16b - eor X_3.16b, X_3.16b, ROUND_KEY.16b - - // y = rol(y, 3) - shl TMP0.\lanes, Y_0.\lanes, #3 - shl TMP1.\lanes, Y_1.\lanes, #3 - shl TMP2.\lanes, Y_2.\lanes, #3 - shl TMP3.\lanes, Y_3.\lanes, #3 - sri TMP0.\lanes, Y_0.\lanes, #(\n - 3) - sri TMP1.\lanes, Y_1.\lanes, #(\n - 3) - sri TMP2.\lanes, Y_2.\lanes, #(\n - 3) - sri TMP3.\lanes, Y_3.\lanes, #(\n - 3) - - // y ^= x - eor Y_0.16b, TMP0.16b, X_0.16b - eor Y_1.16b, TMP1.16b, X_1.16b - eor Y_2.16b, TMP2.16b, X_2.16b - eor Y_3.16b, TMP3.16b, X_3.16b -.endm - -/* - * _speck_unround_128bytes() - Speck decryption round on 128 bytes at a time - * - * This is the inverse of _speck_round_128bytes(). - */ -.macro _speck_unround_128bytes n, lanes - - // y ^= x - eor TMP0.16b, Y_0.16b, X_0.16b - eor TMP1.16b, Y_1.16b, X_1.16b - eor TMP2.16b, Y_2.16b, X_2.16b - eor TMP3.16b, Y_3.16b, X_3.16b - - // y = ror(y, 3) - ushr Y_0.\lanes, TMP0.\lanes, #3 - ushr Y_1.\lanes, TMP1.\lanes, #3 - ushr Y_2.\lanes, TMP2.\lanes, #3 - ushr Y_3.\lanes, TMP3.\lanes, #3 - sli Y_0.\lanes, TMP0.\lanes, #(\n - 3) - sli Y_1.\lanes, TMP1.\lanes, #(\n - 3) - sli Y_2.\lanes, TMP2.\lanes, #(\n - 3) - sli Y_3.\lanes, TMP3.\lanes, #(\n - 3) - - // x ^= k - eor X_0.16b, X_0.16b, ROUND_KEY.16b - eor X_1.16b, X_1.16b, ROUND_KEY.16b - eor X_2.16b, X_2.16b, ROUND_KEY.16b - eor X_3.16b, X_3.16b, ROUND_KEY.16b - - // x -= y - sub X_0.\lanes, X_0.\lanes, Y_0.\lanes - sub X_1.\lanes, X_1.\lanes, Y_1.\lanes - sub X_2.\lanes, X_2.\lanes, Y_2.\lanes - sub X_3.\lanes, X_3.\lanes, Y_3.\lanes - - // x = rol(x, 8) - tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b - tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b - tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b - tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b -.endm - -.macro _next_xts_tweak next, cur, tmp, n -.if \n == 64 - /* - * Calculate the next tweak by multiplying the current one by x, - * modulo p(x) = x^128 + x^7 + x^2 + x + 1. - */ - sshr \tmp\().2d, \cur\().2d, #63 - and \tmp\().16b, \tmp\().16b, GFMUL_TABLE.16b - shl \next\().2d, \cur\().2d, #1 - ext \tmp\().16b, \tmp\().16b, \tmp\().16b, #8 - eor \next\().16b, \next\().16b, \tmp\().16b -.else - /* - * Calculate the next two tweaks by multiplying the current ones by x^2, - * modulo p(x) = x^64 + x^4 + x^3 + x + 1. - */ - ushr \tmp\().2d, \cur\().2d, #62 - shl \next\().2d, \cur\().2d, #2 - tbl \tmp\().16b, {GFMUL_TABLE.16b}, \tmp\().16b - eor \next\().16b, \next\().16b, \tmp\().16b -.endif -.endm - -/* - * _speck_xts_crypt() - Speck-XTS encryption/decryption - * - * Encrypt or decrypt NBYTES bytes of data from the SRC buffer to the DST buffer - * using Speck-XTS, specifically the variant with a block size of '2n' and round - * count given by NROUNDS. The expanded round keys are given in ROUND_KEYS, and - * the current XTS tweak value is given in TWEAK. It's assumed that NBYTES is a - * nonzero multiple of 128. - */ -.macro _speck_xts_crypt n, lanes, decrypting - - /* - * If decrypting, modify the ROUND_KEYS parameter to point to the last - * round key rather than the first, since for decryption the round keys - * are used in reverse order. - */ -.if \decrypting - mov NROUNDS, NROUNDS /* zero the high 32 bits */ -.if \n == 64 - add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #3 - sub ROUND_KEYS, ROUND_KEYS, #8 -.else - add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #2 - sub ROUND_KEYS, ROUND_KEYS, #4 -.endif -.endif - - // Load the index vector for tbl-based 8-bit rotates -.if \decrypting - ldr ROTATE_TABLE_Q, .Lrol\n\()_8_table -.else - ldr ROTATE_TABLE_Q, .Lror\n\()_8_table -.endif - - // One-time XTS preparation -.if \n == 64 - // Load first tweak - ld1 {TWEAKV0.16b}, [TWEAK] - - // Load GF(2^128) multiplication table - ldr GFMUL_TABLE_Q, .Lgf128mul_table -.else - // Load first tweak - ld1 {TWEAKV0.8b}, [TWEAK] - - // Load GF(2^64) multiplication table - ldr GFMUL_TABLE_Q, .Lgf64mul_table - - // Calculate second tweak, packing it together with the first - ushr TMP0.2d, TWEAKV0.2d, #63 - shl TMP1.2d, TWEAKV0.2d, #1 - tbl TMP0.8b, {GFMUL_TABLE.16b}, TMP0.8b - eor TMP0.8b, TMP0.8b, TMP1.8b - mov TWEAKV0.d[1], TMP0.d[0] -.endif - -.Lnext_128bytes_\@: - - // Calculate XTS tweaks for next 128 bytes - _next_xts_tweak TWEAKV1, TWEAKV0, TMP0, \n - _next_xts_tweak TWEAKV2, TWEAKV1, TMP0, \n - _next_xts_tweak TWEAKV3, TWEAKV2, TMP0, \n - _next_xts_tweak TWEAKV4, TWEAKV3, TMP0, \n - _next_xts_tweak TWEAKV5, TWEAKV4, TMP0, \n - _next_xts_tweak TWEAKV6, TWEAKV5, TMP0, \n - _next_xts_tweak TWEAKV7, TWEAKV6, TMP0, \n - _next_xts_tweak TWEAKV_NEXT, TWEAKV7, TMP0, \n - - // Load the next source blocks into {X,Y}[0-3] - ld1 {X_0.16b-Y_1.16b}, [SRC], #64 - ld1 {X_2.16b-Y_3.16b}, [SRC], #64 - - // XOR the source blocks with their XTS tweaks - eor TMP0.16b, X_0.16b, TWEAKV0.16b - eor Y_0.16b, Y_0.16b, TWEAKV1.16b - eor TMP1.16b, X_1.16b, TWEAKV2.16b - eor Y_1.16b, Y_1.16b, TWEAKV3.16b - eor TMP2.16b, X_2.16b, TWEAKV4.16b - eor Y_2.16b, Y_2.16b, TWEAKV5.16b - eor TMP3.16b, X_3.16b, TWEAKV6.16b - eor Y_3.16b, Y_3.16b, TWEAKV7.16b - - /* - * De-interleave the 'x' and 'y' elements of each block, i.e. make it so - * that the X[0-3] registers contain only the second halves of blocks, - * and the Y[0-3] registers contain only the first halves of blocks. - * (Speck uses the order (y, x) rather than the more intuitive (x, y).) - */ - uzp2 X_0.\lanes, TMP0.\lanes, Y_0.\lanes - uzp1 Y_0.\lanes, TMP0.\lanes, Y_0.\lanes - uzp2 X_1.\lanes, TMP1.\lanes, Y_1.\lanes - uzp1 Y_1.\lanes, TMP1.\lanes, Y_1.\lanes - uzp2 X_2.\lanes, TMP2.\lanes, Y_2.\lanes - uzp1 Y_2.\lanes, TMP2.\lanes, Y_2.\lanes - uzp2 X_3.\lanes, TMP3.\lanes, Y_3.\lanes - uzp1 Y_3.\lanes, TMP3.\lanes, Y_3.\lanes - - // Do the cipher rounds - mov x6, ROUND_KEYS - mov w7, NROUNDS -.Lnext_round_\@: -.if \decrypting - ld1r {ROUND_KEY.\lanes}, [x6] - sub x6, x6, #( \n / 8 ) - _speck_unround_128bytes \n, \lanes -.else - ld1r {ROUND_KEY.\lanes}, [x6], #( \n / 8 ) - _speck_round_128bytes \n, \lanes -.endif - subs w7, w7, #1 - bne .Lnext_round_\@ - - // Re-interleave the 'x' and 'y' elements of each block - zip1 TMP0.\lanes, Y_0.\lanes, X_0.\lanes - zip2 Y_0.\lanes, Y_0.\lanes, X_0.\lanes - zip1 TMP1.\lanes, Y_1.\lanes, X_1.\lanes - zip2 Y_1.\lanes, Y_1.\lanes, X_1.\lanes - zip1 TMP2.\lanes, Y_2.\lanes, X_2.\lanes - zip2 Y_2.\lanes, Y_2.\lanes, X_2.\lanes - zip1 TMP3.\lanes, Y_3.\lanes, X_3.\lanes - zip2 Y_3.\lanes, Y_3.\lanes, X_3.\lanes - - // XOR the encrypted/decrypted blocks with the tweaks calculated earlier - eor X_0.16b, TMP0.16b, TWEAKV0.16b - eor Y_0.16b, Y_0.16b, TWEAKV1.16b - eor X_1.16b, TMP1.16b, TWEAKV2.16b - eor Y_1.16b, Y_1.16b, TWEAKV3.16b - eor X_2.16b, TMP2.16b, TWEAKV4.16b - eor Y_2.16b, Y_2.16b, TWEAKV5.16b - eor X_3.16b, TMP3.16b, TWEAKV6.16b - eor Y_3.16b, Y_3.16b, TWEAKV7.16b - mov TWEAKV0.16b, TWEAKV_NEXT.16b - - // Store the ciphertext in the destination buffer - st1 {X_0.16b-Y_1.16b}, [DST], #64 - st1 {X_2.16b-Y_3.16b}, [DST], #64 - - // Continue if there are more 128-byte chunks remaining - subs NBYTES, NBYTES, #128 - bne .Lnext_128bytes_\@ - - // Store the next tweak and return -.if \n == 64 - st1 {TWEAKV_NEXT.16b}, [TWEAK] -.else - st1 {TWEAKV_NEXT.8b}, [TWEAK] -.endif - ret -.endm - -ENTRY(speck128_xts_encrypt_neon) - _speck_xts_crypt n=64, lanes=2d, decrypting=0 -ENDPROC(speck128_xts_encrypt_neon) - -ENTRY(speck128_xts_decrypt_neon) - _speck_xts_crypt n=64, lanes=2d, decrypting=1 -ENDPROC(speck128_xts_decrypt_neon) - -ENTRY(speck64_xts_encrypt_neon) - _speck_xts_crypt n=32, lanes=4s, decrypting=0 -ENDPROC(speck64_xts_encrypt_neon) - -ENTRY(speck64_xts_decrypt_neon) - _speck_xts_crypt n=32, lanes=4s, decrypting=1 -ENDPROC(speck64_xts_decrypt_neon) diff --git a/arch/arm64/crypto/speck-neon-glue.c b/arch/arm64/crypto/speck-neon-glue.c deleted file mode 100644 index 6e233aeb4ff48b8eebed3966800ffe385658a06d..0000000000000000000000000000000000000000 --- a/arch/arm64/crypto/speck-neon-glue.c +++ /dev/null @@ -1,282 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS - * (64-bit version; based on the 32-bit version) - * - * Copyright (c) 2018 Google, Inc - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* The assembly functions only handle multiples of 128 bytes */ -#define SPECK_NEON_CHUNK_SIZE 128 - -/* Speck128 */ - -struct speck128_xts_tfm_ctx { - struct speck128_tfm_ctx main_key; - struct speck128_tfm_ctx tweak_key; -}; - -asmlinkage void speck128_xts_encrypt_neon(const u64 *round_keys, int nrounds, - void *dst, const void *src, - unsigned int nbytes, void *tweak); - -asmlinkage void speck128_xts_decrypt_neon(const u64 *round_keys, int nrounds, - void *dst, const void *src, - unsigned int nbytes, void *tweak); - -typedef void (*speck128_crypt_one_t)(const struct speck128_tfm_ctx *, - u8 *, const u8 *); -typedef void (*speck128_xts_crypt_many_t)(const u64 *, int, void *, - const void *, unsigned int, void *); - -static __always_inline int -__speck128_xts_crypt(struct skcipher_request *req, - speck128_crypt_one_t crypt_one, - speck128_xts_crypt_many_t crypt_many) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - const struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - le128 tweak; - int err; - - err = skcipher_walk_virt(&walk, req, true); - - crypto_speck128_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv); - - while (walk.nbytes > 0) { - unsigned int nbytes = walk.nbytes; - u8 *dst = walk.dst.virt.addr; - const u8 *src = walk.src.virt.addr; - - if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) { - unsigned int count; - - count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE); - kernel_neon_begin(); - (*crypt_many)(ctx->main_key.round_keys, - ctx->main_key.nrounds, - dst, src, count, &tweak); - kernel_neon_end(); - dst += count; - src += count; - nbytes -= count; - } - - /* Handle any remainder with generic code */ - while (nbytes >= sizeof(tweak)) { - le128_xor((le128 *)dst, (const le128 *)src, &tweak); - (*crypt_one)(&ctx->main_key, dst, dst); - le128_xor((le128 *)dst, (const le128 *)dst, &tweak); - gf128mul_x_ble(&tweak, &tweak); - - dst += sizeof(tweak); - src += sizeof(tweak); - nbytes -= sizeof(tweak); - } - err = skcipher_walk_done(&walk, nbytes); - } - - return err; -} - -static int speck128_xts_encrypt(struct skcipher_request *req) -{ - return __speck128_xts_crypt(req, crypto_speck128_encrypt, - speck128_xts_encrypt_neon); -} - -static int speck128_xts_decrypt(struct skcipher_request *req) -{ - return __speck128_xts_crypt(req, crypto_speck128_decrypt, - speck128_xts_decrypt_neon); -} - -static int speck128_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keylen) -{ - struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); - int err; - - err = xts_verify_key(tfm, key, keylen); - if (err) - return err; - - keylen /= 2; - - err = crypto_speck128_setkey(&ctx->main_key, key, keylen); - if (err) - return err; - - return crypto_speck128_setkey(&ctx->tweak_key, key + keylen, keylen); -} - -/* Speck64 */ - -struct speck64_xts_tfm_ctx { - struct speck64_tfm_ctx main_key; - struct speck64_tfm_ctx tweak_key; -}; - -asmlinkage void speck64_xts_encrypt_neon(const u32 *round_keys, int nrounds, - void *dst, const void *src, - unsigned int nbytes, void *tweak); - -asmlinkage void speck64_xts_decrypt_neon(const u32 *round_keys, int nrounds, - void *dst, const void *src, - unsigned int nbytes, void *tweak); - -typedef void (*speck64_crypt_one_t)(const struct speck64_tfm_ctx *, - u8 *, const u8 *); -typedef void (*speck64_xts_crypt_many_t)(const u32 *, int, void *, - const void *, unsigned int, void *); - -static __always_inline int -__speck64_xts_crypt(struct skcipher_request *req, speck64_crypt_one_t crypt_one, - speck64_xts_crypt_many_t crypt_many) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - const struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - __le64 tweak; - int err; - - err = skcipher_walk_virt(&walk, req, true); - - crypto_speck64_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv); - - while (walk.nbytes > 0) { - unsigned int nbytes = walk.nbytes; - u8 *dst = walk.dst.virt.addr; - const u8 *src = walk.src.virt.addr; - - if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) { - unsigned int count; - - count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE); - kernel_neon_begin(); - (*crypt_many)(ctx->main_key.round_keys, - ctx->main_key.nrounds, - dst, src, count, &tweak); - kernel_neon_end(); - dst += count; - src += count; - nbytes -= count; - } - - /* Handle any remainder with generic code */ - while (nbytes >= sizeof(tweak)) { - *(__le64 *)dst = *(__le64 *)src ^ tweak; - (*crypt_one)(&ctx->main_key, dst, dst); - *(__le64 *)dst ^= tweak; - tweak = cpu_to_le64((le64_to_cpu(tweak) << 1) ^ - ((tweak & cpu_to_le64(1ULL << 63)) ? - 0x1B : 0)); - dst += sizeof(tweak); - src += sizeof(tweak); - nbytes -= sizeof(tweak); - } - err = skcipher_walk_done(&walk, nbytes); - } - - return err; -} - -static int speck64_xts_encrypt(struct skcipher_request *req) -{ - return __speck64_xts_crypt(req, crypto_speck64_encrypt, - speck64_xts_encrypt_neon); -} - -static int speck64_xts_decrypt(struct skcipher_request *req) -{ - return __speck64_xts_crypt(req, crypto_speck64_decrypt, - speck64_xts_decrypt_neon); -} - -static int speck64_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keylen) -{ - struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); - int err; - - err = xts_verify_key(tfm, key, keylen); - if (err) - return err; - - keylen /= 2; - - err = crypto_speck64_setkey(&ctx->main_key, key, keylen); - if (err) - return err; - - return crypto_speck64_setkey(&ctx->tweak_key, key + keylen, keylen); -} - -static struct skcipher_alg speck_algs[] = { - { - .base.cra_name = "xts(speck128)", - .base.cra_driver_name = "xts-speck128-neon", - .base.cra_priority = 300, - .base.cra_blocksize = SPECK128_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct speck128_xts_tfm_ctx), - .base.cra_alignmask = 7, - .base.cra_module = THIS_MODULE, - .min_keysize = 2 * SPECK128_128_KEY_SIZE, - .max_keysize = 2 * SPECK128_256_KEY_SIZE, - .ivsize = SPECK128_BLOCK_SIZE, - .walksize = SPECK_NEON_CHUNK_SIZE, - .setkey = speck128_xts_setkey, - .encrypt = speck128_xts_encrypt, - .decrypt = speck128_xts_decrypt, - }, { - .base.cra_name = "xts(speck64)", - .base.cra_driver_name = "xts-speck64-neon", - .base.cra_priority = 300, - .base.cra_blocksize = SPECK64_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct speck64_xts_tfm_ctx), - .base.cra_alignmask = 7, - .base.cra_module = THIS_MODULE, - .min_keysize = 2 * SPECK64_96_KEY_SIZE, - .max_keysize = 2 * SPECK64_128_KEY_SIZE, - .ivsize = SPECK64_BLOCK_SIZE, - .walksize = SPECK_NEON_CHUNK_SIZE, - .setkey = speck64_xts_setkey, - .encrypt = speck64_xts_encrypt, - .decrypt = speck64_xts_decrypt, - } -}; - -static int __init speck_neon_module_init(void) -{ - if (!(elf_hwcap & HWCAP_ASIMD)) - return -ENODEV; - return crypto_register_skciphers(speck_algs, ARRAY_SIZE(speck_algs)); -} - -static void __exit speck_neon_module_exit(void) -{ - crypto_unregister_skciphers(speck_algs, ARRAY_SIZE(speck_algs)); -} - -module_init(speck_neon_module_init); -module_exit(speck_neon_module_exit); - -MODULE_DESCRIPTION("Speck block cipher (NEON-accelerated)"); -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Eric Biggers "); -MODULE_ALIAS_CRYPTO("xts(speck128)"); -MODULE_ALIAS_CRYPTO("xts-speck128-neon"); -MODULE_ALIAS_CRYPTO("xts(speck64)"); -MODULE_ALIAS_CRYPTO("xts-speck64-neon"); diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index 6cd5d77b6b44b372ca8f2f3ec206e0123292f215..22bd68ce50d2b61e77cff84a8b18906d8996ed16 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -16,7 +16,6 @@ generic-y += mm-arch-hooks.h generic-y += msi.h generic-y += preempt.h generic-y += qrwlock.h -generic-y += qspinlock.h generic-y += rwsem.h generic-y += segment.h generic-y += serial.h @@ -27,4 +26,3 @@ generic-y += trace_clock.h generic-y += unaligned.h generic-y += user.h generic-y += vga.h -generic-y += xor.h diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h index 709208dfdc8b03b43748009421aefda7d31fa0ab..348f12447ecd2c5a918e881fc6307dadf9624e50 100644 --- a/arch/arm64/include/asm/acpi.h +++ b/arch/arm64/include/asm/acpi.h @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -22,12 +23,26 @@ #include /* Macros for consistency checks of the GICC subtable of MADT */ -#define ACPI_MADT_GICC_LENGTH \ - (acpi_gbl_FADT.header.revision < 6 ? 76 : 80) + +/* + * MADT GICC minimum length refers to the MADT GICC structure table length as + * defined in the earliest ACPI version supported on arm64, ie ACPI 5.1. + * + * The efficiency_class member was added to the + * struct acpi_madt_generic_interrupt to represent the MADT GICC structure + * "Processor Power Efficiency Class" field, added in ACPI 6.0 whose offset + * is therefore used to delimit the MADT GICC structure minimum length + * appropriately. + */ +#define ACPI_MADT_GICC_MIN_LENGTH offsetof( \ + struct acpi_madt_generic_interrupt, efficiency_class) #define BAD_MADT_GICC_ENTRY(entry, end) \ - (!(entry) || (entry)->header.length != ACPI_MADT_GICC_LENGTH || \ - (unsigned long)(entry) + ACPI_MADT_GICC_LENGTH > (end)) + (!(entry) || (entry)->header.length < ACPI_MADT_GICC_MIN_LENGTH || \ + (unsigned long)(entry) + (entry)->header.length > (end)) + +#define ACPI_MADT_GICC_SPE (offsetof(struct acpi_madt_generic_interrupt, \ + spe_interrupt) + sizeof(u16)) /* Basic configuration for ACPI */ #ifdef CONFIG_ACPI @@ -99,6 +114,10 @@ static inline u32 get_acpi_id_for_cpu(unsigned int cpu) static inline void arch_fix_phys_package_id(int num, u32 slot) { } void __init acpi_init_cpus(void); +void acpi_pptt_find_min_physid_cpu_node(struct acpi_table_header *table_hdr, + struct acpi_pptt_processor *cpu_node, + phys_cpuid_t *min_physid, + struct acpi_pptt_processor **min_cpu_node); #else static inline void acpi_init_cpus(void) { } diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index 4b650ec1d7dd1aa8d4418b6b896f81de4a2187ab..c3e4273c140224ee23fd64e9cb2c6950a6ff0a1e 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -14,8 +14,6 @@ #include #include -extern int alternatives_applied; - struct alt_instr { s32 orig_offset; /* offset to original instruction */ s32 alt_offset; /* offset to replacement instruction */ @@ -27,7 +25,9 @@ struct alt_instr { typedef void (*alternative_cb_t)(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst); +void __init apply_boot_alternatives(void); void __init apply_alternatives_all(void); +bool alternative_is_applied(u16 cpufeature); #ifdef CONFIG_MODULES void apply_alternatives_module(void *start, size_t length); @@ -35,13 +35,16 @@ void apply_alternatives_module(void *start, size_t length); static inline void apply_alternatives_module(void *start, size_t length) { } #endif -#define ALTINSTR_ENTRY(feature,cb) \ +#define ALTINSTR_ENTRY(feature) \ " .word 661b - .\n" /* label */ \ - " .if " __stringify(cb) " == 0\n" \ " .word 663f - .\n" /* new instruction */ \ - " .else\n" \ + " .hword " __stringify(feature) "\n" /* feature bit */ \ + " .byte 662b-661b\n" /* source len */ \ + " .byte 664f-663f\n" /* replacement len */ + +#define ALTINSTR_ENTRY_CB(feature, cb) \ + " .word 661b - .\n" /* label */ \ " .word " __stringify(cb) "- .\n" /* callback */ \ - " .endif\n" \ " .hword " __stringify(feature) "\n" /* feature bit */ \ " .byte 662b-661b\n" /* source len */ \ " .byte 664f-663f\n" /* replacement len */ @@ -62,33 +65,40 @@ static inline void apply_alternatives_module(void *start, size_t length) { } * * Alternatives with callbacks do not generate replacement instructions. */ -#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled, cb) \ +#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled) \ ".if "__stringify(cfg_enabled)" == 1\n" \ "661:\n\t" \ oldinstr "\n" \ "662:\n" \ ".pushsection .altinstructions,\"a\"\n" \ - ALTINSTR_ENTRY(feature,cb) \ + ALTINSTR_ENTRY(feature) \ ".popsection\n" \ - " .if " __stringify(cb) " == 0\n" \ - ".pushsection .altinstr_replacement, \"a\"\n" \ + ".subsection 1\n" \ "663:\n\t" \ newinstr "\n" \ "664:\n\t" \ - ".popsection\n\t" \ ".org . - (664b-663b) + (662b-661b)\n\t" \ - ".org . - (662b-661b) + (664b-663b)\n" \ - ".else\n\t" \ + ".org . - (662b-661b) + (664b-663b)\n\t" \ + ".previous\n" \ + ".endif\n" + +#define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \ + ".if "__stringify(cfg_enabled)" == 1\n" \ + "661:\n\t" \ + oldinstr "\n" \ + "662:\n" \ + ".pushsection .altinstructions,\"a\"\n" \ + ALTINSTR_ENTRY_CB(feature, cb) \ + ".popsection\n" \ "663:\n\t" \ "664:\n\t" \ - ".endif\n" \ ".endif\n" #define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \ - __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg), 0) + __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg)) #define ALTERNATIVE_CB(oldinstr, cb) \ - __ALTERNATIVE_CFG(oldinstr, "NOT_AN_INSTRUCTION", ARM64_CB_PATCH, 1, cb) + __ALTERNATIVE_CFG_CB(oldinstr, ARM64_CB_PATCH, 1, cb) #else #include @@ -107,11 +117,11 @@ static inline void apply_alternatives_module(void *start, size_t length) { } 662: .pushsection .altinstructions, "a" altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f .popsection - .pushsection .altinstr_replacement, "ax" + .subsection 1 663: \insn2 -664: .popsection - .org . - (664b-663b) + (662b-661b) +664: .org . - (664b-663b) + (662b-661b) .org . - (662b-661b) + (664b-663b) + .previous .endif .endm @@ -150,7 +160,7 @@ static inline void apply_alternatives_module(void *start, size_t length) { } .pushsection .altinstructions, "a" altinstruction_entry 663f, 661f, \cap, 664f-663f, 662f-661f .popsection - .pushsection .altinstr_replacement, "ax" + .subsection 1 .align 2 /* So GAS knows label 661 is suitably aligned */ 661: .endm @@ -169,9 +179,9 @@ static inline void apply_alternatives_module(void *start, size_t length) { } .macro alternative_else 662: .if .Lasm_alt_mode==0 - .pushsection .altinstr_replacement, "ax" + .subsection 1 .else - .popsection + .previous .endif 663: .endm @@ -181,11 +191,11 @@ static inline void apply_alternatives_module(void *start, size_t length) { } */ .macro alternative_endif 664: - .if .Lasm_alt_mode==0 - .popsection - .endif .org . - (664b-663b) + (662b-661b) .org . - (662b-661b) + (664b-663b) + .if .Lasm_alt_mode==0 + .previous + .endif .endm /* diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index e278f94df0c935d3f9c5b2ec2a683eeaee228d26..8a7ed2fd68519d54df8c6f2751d7300929db11bc 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -22,6 +22,7 @@ #ifndef __ASSEMBLY__ +#include #include #include #include @@ -114,6 +115,21 @@ static inline void gic_write_bpr1(u32 val) write_sysreg_s(val, SYS_ICC_BPR1_EL1); } +static inline u32 gic_read_pmr(void) +{ + return read_sysreg_s(SYS_ICC_PMR_EL1); +} + +static inline void gic_write_pmr(u32 val) +{ + write_sysreg_s(val, SYS_ICC_PMR_EL1); +} + +static inline u32 gic_read_rpr(void) +{ + return read_sysreg_s(SYS_ICC_RPR_EL1); +} + #define gic_read_typer(c) readq_relaxed(c) #define gic_write_irouter(v, c) writeq_relaxed(v, c) #define gic_read_lpir(c) readq_relaxed(c) @@ -140,5 +156,42 @@ static inline void gic_write_bpr1(u32 val) #define gits_write_vpendbaser(v, c) writeq_relaxed(v, c) #define gits_read_vpendbaser(c) readq_relaxed(c) +bool gic_supports_pseudo_nmis(void); + +static inline bool gic_prio_masking_enabled(void) +{ + return system_uses_irq_prio_masking(); +} + +static inline void gic_pmr_mask_irqs(void) +{ + BUILD_BUG_ON(GICD_INT_DEF_PRI < (GIC_PRIO_IRQOFF | + GIC_PRIO_PSR_I_SET)); + BUILD_BUG_ON(GICD_INT_DEF_PRI >= GIC_PRIO_IRQON); + /* + * Need to make sure IRQON allows IRQs when SCR_EL3.FIQ is cleared + * and non-secure PMR accesses are not subject to the shifts that + * are applied to IRQ priorities + */ + BUILD_BUG_ON((0x80 | (GICD_INT_DEF_PRI >> 1)) >= GIC_PRIO_IRQON); + gic_write_pmr(GIC_PRIO_IRQOFF); +} + +static inline void gic_arch_enable_irqs(void) +{ + asm volatile ("msr daifclr, #2" : : : "memory"); +} + +static inline void gic_arch_disable_irqs(void) +{ + asm volatile ("msr daifset, #2" : : : "memory"); +} + +static inline void gic_arch_restore_irqs(unsigned long flags) +{ + if (gic_supports_pseudo_nmis()) + asm volatile ("msr daif, %0" : : "r" (flags >> 32) + : "memory"); +} #endif /* __ASSEMBLY__ */ #endif /* __ASM_ARCH_GICV3_H */ diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h index f2a234d6516cf5b537b80134b137729dd9abb798..837648870002d255400b091b2a54ab766e7de60b 100644 --- a/arch/arm64/include/asm/arch_timer.h +++ b/arch/arm64/include/asm/arch_timer.h @@ -148,21 +148,53 @@ static inline void arch_timer_set_cntkctl(u32 cntkctl) isb(); } +/* + * Ensure that reads of the counter are treated the same as memory reads + * for the purposes of ordering by subsequent memory barriers. + * + * This insanity brought to you by speculative system register reads, + * out-of-order memory accesses, sequence locks and Thomas Gleixner. + * + * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html + */ +#define arch_counter_enforce_ordering(val) do { \ + u64 tmp, _val = (val); \ + \ + asm volatile( \ + " eor %0, %1, %1\n" \ + " add %0, sp, %0\n" \ + " ldr xzr, [%0]" \ + : "=r" (tmp) : "r" (_val)); \ +} while (0) + static inline u64 arch_counter_get_cntpct(void) { + u64 cnt; + isb(); - return arch_timer_reg_read_stable(cntpct_el0); + cnt = arch_timer_reg_read_stable(cntpct_el0); + arch_counter_enforce_ordering(cnt); + return cnt; } static inline u64 arch_counter_get_cntvct(void) { + u64 cnt; + isb(); - return arch_timer_reg_read_stable(cntvct_el0); + cnt = arch_timer_reg_read_stable(cntvct_el0); + arch_counter_enforce_ordering(cnt); + return cnt; } +#undef arch_counter_enforce_ordering + static inline int arch_timer_arch_init(void) { return 0; } +typedef void (*clock_access_fn)(struct timespec64 *); +extern int register_persistent_clock(clock_access_fn read_persistent); + #endif diff --git a/arch/arm64/include/asm/asm-bug.h b/arch/arm64/include/asm/asm-bug.h index b3552c4a405f28a2c2791d30190bdc7540bcd2ab..04e5be18acb16530c63122c8cd83078e81a13fad 100644 --- a/arch/arm64/include/asm/asm-bug.h +++ b/arch/arm64/include/asm/asm-bug.h @@ -39,6 +39,7 @@ 14470: .long 14471f - 14470b; \ _BUGVERBOSE_LOCATION(__FILE__, __LINE__) \ .short flags; \ + .align 2; \ .popsection; \ 14471: #else diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 0bcc98dbba565f1727995b5ec16ff3e978720174..21431950531303abe4002fe24f878f08597eae8b 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -60,16 +60,8 @@ .endm /* - * Enable and disable interrupts. + * Save/restore interrupts. */ - .macro disable_irq - msr daifset, #2 - .endm - - .macro enable_irq - msr daifclr, #2 - .endm - .macro save_and_disable_irq, flags mrs \flags, daif msr daifset, #2 @@ -112,7 +104,11 @@ * RAS Error Synchronization barrier */ .macro esb +#ifdef CONFIG_ARM64_RAS_EXTN hint #16 +#else + nop +#endif .endm /* @@ -122,6 +118,13 @@ hint #20 .endm +/* + * Clear Branch History instruction + */ + .macro clearbhb + hint #22 + .endm + /* * Sanitise a 64-bit bounded index wrt speculation, returning zero if out * of bounds. @@ -378,27 +381,33 @@ alternative_endif * size: size of the region * Corrupts: kaddr, size, tmp1, tmp2 */ + .macro __dcache_op_workaround_clean_cache, op, kaddr +alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE + dc \op, \kaddr +alternative_else + dc civac, \kaddr +alternative_endif + .endm + .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2 dcache_line_size \tmp1, \tmp2 add \size, \kaddr, \size sub \tmp2, \tmp1, #1 bic \kaddr, \kaddr, \tmp2 9998: - .if (\op == cvau || \op == cvac) -alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE - dc \op, \kaddr -alternative_else - dc civac, \kaddr -alternative_endif - .elseif (\op == cvap) -alternative_if ARM64_HAS_DCPOP - sys 3, c7, c12, 1, \kaddr // dc cvap -alternative_else - dc cvac, \kaddr -alternative_endif + .ifc \op, cvau + __dcache_op_workaround_clean_cache \op, \kaddr + .else + .ifc \op, cvac + __dcache_op_workaround_clean_cache \op, \kaddr + .else + .ifc \op, cvap + sys 3, c7, c12, 1, \kaddr // dc cvap .else dc \op, \kaddr .endif + .endif + .endif add \kaddr, \kaddr, \tmp1 cmp \kaddr, \size b.lo 9998b @@ -701,4 +710,31 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU .Lyield_out_\@ : .endm + .macro __mitigate_spectre_bhb_loop tmp +#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY +alternative_cb spectre_bhb_patch_loop_iter + mov \tmp, #32 // Patched to correct the immediate +alternative_cb_end +.Lspectre_bhb_loop\@: + b . + 4 + subs \tmp, \tmp, #1 + b.ne .Lspectre_bhb_loop\@ + dsb nsh + isb +#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ + .endm + + /* Save/restores x0-x3 to the stack */ + .macro __mitigate_spectre_bhb_fw +#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY + stp x0, x1, [sp, #-16]! + stp x2, x3, [sp, #-16]! + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3 +alternative_cb arm64_update_smccc_conduit + nop // Patched to SMC/HVC #0 +alternative_cb_end + ldp x2, x3, [sp], #16 + ldp x0, x1, [sp], #16 +#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ + .endm #endif /* __ASM_ASSEMBLER_H */ diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h index f5a2d09afb3841bd5ac7d40764ef48b2e108de0d..817a043a85f646ce68111ceec492cfc5da022520 100644 --- a/arch/arm64/include/asm/atomic_ll_sc.h +++ b/arch/arm64/include/asm/atomic_ll_sc.h @@ -314,7 +314,7 @@ __LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1, \ " cbnz %w0, 1b\n" \ " " #mb "\n" \ "2:" \ - : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \ + : "=&r" (tmp), "=&r" (ret), "+Q" (*(__uint128_t *)ptr) \ : "r" (old1), "r" (old2), "r" (new1), "r" (new2) \ : cl); \ \ diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h index f9b0b09153e0eaa3b15728fd42471c77c2d1955a..d1e77f843d88b9af3bdef77ac1cc36d683590299 100644 --- a/arch/arm64/include/asm/atomic_lse.h +++ b/arch/arm64/include/asm/atomic_lse.h @@ -32,7 +32,9 @@ static inline void atomic_##op(int i, atomic_t *v) \ register int w0 asm ("w0") = i; \ register atomic_t *x1 asm ("x1") = v; \ \ - asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(op), \ + asm volatile( \ + __LSE_PREAMBLE \ + ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(op), \ " " #asm_op " %w[i], %[v]\n") \ : [i] "+r" (w0), [v] "+Q" (v->counter) \ : "r" (x1) \ @@ -52,7 +54,9 @@ static inline int atomic_fetch_##op##name(int i, atomic_t *v) \ register int w0 asm ("w0") = i; \ register atomic_t *x1 asm ("x1") = v; \ \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ + asm volatile( \ + __LSE_PREAMBLE \ + ARM64_LSE_ATOMIC_INSN( \ /* LL/SC */ \ __LL_SC_ATOMIC(fetch_##op##name), \ /* LSE atomics */ \ @@ -84,7 +88,9 @@ static inline int atomic_add_return##name(int i, atomic_t *v) \ register int w0 asm ("w0") = i; \ register atomic_t *x1 asm ("x1") = v; \ \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ + asm volatile( \ + __LSE_PREAMBLE \ + ARM64_LSE_ATOMIC_INSN( \ /* LL/SC */ \ __LL_SC_ATOMIC(add_return##name) \ __nops(1), \ @@ -110,7 +116,9 @@ static inline void atomic_and(int i, atomic_t *v) register int w0 asm ("w0") = i; register atomic_t *x1 asm ("x1") = v; - asm volatile(ARM64_LSE_ATOMIC_INSN( + asm volatile( + __LSE_PREAMBLE + ARM64_LSE_ATOMIC_INSN( /* LL/SC */ __LL_SC_ATOMIC(and) __nops(1), @@ -128,7 +136,9 @@ static inline int atomic_fetch_and##name(int i, atomic_t *v) \ register int w0 asm ("w0") = i; \ register atomic_t *x1 asm ("x1") = v; \ \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ + asm volatile( \ + __LSE_PREAMBLE \ + ARM64_LSE_ATOMIC_INSN( \ /* LL/SC */ \ __LL_SC_ATOMIC(fetch_and##name) \ __nops(1), \ @@ -154,7 +164,9 @@ static inline void atomic_sub(int i, atomic_t *v) register int w0 asm ("w0") = i; register atomic_t *x1 asm ("x1") = v; - asm volatile(ARM64_LSE_ATOMIC_INSN( + asm volatile( + __LSE_PREAMBLE + ARM64_LSE_ATOMIC_INSN( /* LL/SC */ __LL_SC_ATOMIC(sub) __nops(1), @@ -172,7 +184,9 @@ static inline int atomic_sub_return##name(int i, atomic_t *v) \ register int w0 asm ("w0") = i; \ register atomic_t *x1 asm ("x1") = v; \ \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ + asm volatile( \ + __LSE_PREAMBLE \ + ARM64_LSE_ATOMIC_INSN( \ /* LL/SC */ \ __LL_SC_ATOMIC(sub_return##name) \ __nops(2), \ @@ -200,7 +214,9 @@ static inline int atomic_fetch_sub##name(int i, atomic_t *v) \ register int w0 asm ("w0") = i; \ register atomic_t *x1 asm ("x1") = v; \ \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ + asm volatile( \ + __LSE_PREAMBLE \ + ARM64_LSE_ATOMIC_INSN( \ /* LL/SC */ \ __LL_SC_ATOMIC(fetch_sub##name) \ __nops(1), \ @@ -229,7 +245,9 @@ static inline void atomic64_##op(long i, atomic64_t *v) \ register long x0 asm ("x0") = i; \ register atomic64_t *x1 asm ("x1") = v; \ \ - asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op), \ + asm volatile( \ + __LSE_PREAMBLE \ + ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op), \ " " #asm_op " %[i], %[v]\n") \ : [i] "+r" (x0), [v] "+Q" (v->counter) \ : "r" (x1) \ @@ -249,7 +267,9 @@ static inline long atomic64_fetch_##op##name(long i, atomic64_t *v) \ register long x0 asm ("x0") = i; \ register atomic64_t *x1 asm ("x1") = v; \ \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ + asm volatile( \ + __LSE_PREAMBLE \ + ARM64_LSE_ATOMIC_INSN( \ /* LL/SC */ \ __LL_SC_ATOMIC64(fetch_##op##name), \ /* LSE atomics */ \ @@ -281,7 +301,9 @@ static inline long atomic64_add_return##name(long i, atomic64_t *v) \ register long x0 asm ("x0") = i; \ register atomic64_t *x1 asm ("x1") = v; \ \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ + asm volatile( \ + __LSE_PREAMBLE \ + ARM64_LSE_ATOMIC_INSN( \ /* LL/SC */ \ __LL_SC_ATOMIC64(add_return##name) \ __nops(1), \ @@ -307,7 +329,9 @@ static inline void atomic64_and(long i, atomic64_t *v) register long x0 asm ("x0") = i; register atomic64_t *x1 asm ("x1") = v; - asm volatile(ARM64_LSE_ATOMIC_INSN( + asm volatile( + __LSE_PREAMBLE + ARM64_LSE_ATOMIC_INSN( /* LL/SC */ __LL_SC_ATOMIC64(and) __nops(1), @@ -325,7 +349,9 @@ static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \ register long x0 asm ("x0") = i; \ register atomic64_t *x1 asm ("x1") = v; \ \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ + asm volatile( \ + __LSE_PREAMBLE \ + ARM64_LSE_ATOMIC_INSN( \ /* LL/SC */ \ __LL_SC_ATOMIC64(fetch_and##name) \ __nops(1), \ @@ -351,7 +377,9 @@ static inline void atomic64_sub(long i, atomic64_t *v) register long x0 asm ("x0") = i; register atomic64_t *x1 asm ("x1") = v; - asm volatile(ARM64_LSE_ATOMIC_INSN( + asm volatile( + __LSE_PREAMBLE + ARM64_LSE_ATOMIC_INSN( /* LL/SC */ __LL_SC_ATOMIC64(sub) __nops(1), @@ -369,7 +397,9 @@ static inline long atomic64_sub_return##name(long i, atomic64_t *v) \ register long x0 asm ("x0") = i; \ register atomic64_t *x1 asm ("x1") = v; \ \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ + asm volatile( \ + __LSE_PREAMBLE \ + ARM64_LSE_ATOMIC_INSN( \ /* LL/SC */ \ __LL_SC_ATOMIC64(sub_return##name) \ __nops(2), \ @@ -397,7 +427,9 @@ static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \ register long x0 asm ("x0") = i; \ register atomic64_t *x1 asm ("x1") = v; \ \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ + asm volatile( \ + __LSE_PREAMBLE \ + ARM64_LSE_ATOMIC_INSN( \ /* LL/SC */ \ __LL_SC_ATOMIC64(fetch_sub##name) \ __nops(1), \ @@ -422,7 +454,9 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) { register long x0 asm ("x0") = (long)v; - asm volatile(ARM64_LSE_ATOMIC_INSN( + asm volatile( + __LSE_PREAMBLE + ARM64_LSE_ATOMIC_INSN( /* LL/SC */ __LL_SC_ATOMIC64(dec_if_positive) __nops(6), @@ -455,7 +489,9 @@ static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \ register unsigned long x1 asm ("x1") = old; \ register unsigned long x2 asm ("x2") = new; \ \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ + asm volatile( \ + __LSE_PREAMBLE \ + ARM64_LSE_ATOMIC_INSN( \ /* LL/SC */ \ __LL_SC_CMPXCHG(name) \ __nops(2), \ @@ -507,7 +543,9 @@ static inline long __cmpxchg_double##name(unsigned long old1, \ register unsigned long x3 asm ("x3") = new2; \ register unsigned long x4 asm ("x4") = (unsigned long)ptr; \ \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ + asm volatile( \ + __LSE_PREAMBLE \ + ARM64_LSE_ATOMIC_INSN( \ /* LL/SC */ \ __LL_SC_CMPXCHG_DBL(name) \ __nops(3), \ @@ -517,7 +555,7 @@ static inline long __cmpxchg_double##name(unsigned long old1, \ " eor %[old2], %[old2], %[oldval2]\n" \ " orr %[old1], %[old1], %[old2]") \ : [old1] "+&r" (x0), [old2] "+&r" (x1), \ - [v] "+Q" (*(unsigned long *)ptr) \ + [v] "+Q" (*(__uint128_t *)ptr) \ : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \ [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \ : __LL_SC_CLOBBERS, ##cl); \ diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 822a9192c55118ddc624687d3917f086cad1637f..3cae78c1ce33b772f6cd18dbe03cfb30c3329c17 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -34,6 +34,18 @@ #define psb_csync() asm volatile("hint #17" : : : "memory") #define csdb() asm volatile("hint #20" : : : "memory") +#ifdef CONFIG_ARM64_PSEUDO_NMI +#define pmr_sync() \ + do { \ + extern struct static_key_false gic_pmr_sync; \ + \ + if (static_branch_unlikely(&gic_pmr_sync)) \ + dsb(sy); \ + } while(0) +#else +#define pmr_sync() do {} while (0) +#endif + #define mb() dsb(sy) #define rmb() dsb(ld) #define wmb() dsb(st) diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index 5ee5bca8c24b1ba777ee3c9bd19667af0d1d90cb..ccb013f822ba75ef95329ceb79e431d8767a6376 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -40,6 +40,21 @@ #define L1_CACHE_SHIFT (6) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +#ifdef CONFIG_ARCH_LLC_128_LINE_SIZE +#ifndef ____cacheline_aligned_128 +#define ____cacheline_aligned_128 __attribute__((__aligned__(128))) +#endif +#endif + + +#define CLIDR_LOUU_SHIFT 27 +#define CLIDR_LOC_SHIFT 24 +#define CLIDR_LOUIS_SHIFT 21 + +#define CLIDR_LOUU(clidr) (((clidr) >> CLIDR_LOUU_SHIFT) & 0x7) +#define CLIDR_LOC(clidr) (((clidr) >> CLIDR_LOC_SHIFT) & 0x7) +#define CLIDR_LOUIS(clidr) (((clidr) >> CLIDR_LOUIS_SHIFT) & 0x7) + /* * Memory returned by kmalloc() may be used for DMA, so we must make * sure that all such allocations are cache aligned. Otherwise, @@ -78,10 +93,46 @@ static inline u32 cache_type_cwg(void) #define __read_mostly __attribute__((__section__(".data..read_mostly"))) -static inline int cache_line_size(void) +int cache_line_size(void); + +/* + * Read the effective value of CTR_EL0. + * + * According to ARM ARM for ARMv8-A (ARM DDI 0487C.a), + * section D10.2.33 "CTR_EL0, Cache Type Register" : + * + * CTR_EL0.IDC reports the data cache clean requirements for + * instruction to data coherence. + * + * 0 - dcache clean to PoU is required unless : + * (CLIDR_EL1.LoC == 0) || (CLIDR_EL1.LoUIS == 0 && CLIDR_EL1.LoUU == 0) + * 1 - dcache clean to PoU is not required for i-to-d coherence. + * + * This routine provides the CTR_EL0 with the IDC field updated to the + * effective state. + */ +static inline u32 __attribute_const__ read_cpuid_effective_cachetype(void) { - u32 cwg = cache_type_cwg(); - return cwg ? 4 << cwg : ARCH_DMA_MINALIGN; + u32 ctr = read_cpuid_cachetype(); +#ifdef CONFIG_HISILICON_ERRATUM_1980005 + static const struct midr_range idc_support_list[] = { + MIDR_ALL_VERSIONS(MIDR_HISI_TSV110), + MIDR_REV(MIDR_HISI_TSV200, 1, 0), + { /* sentinel */ } + }; + if (is_midr_in_range_list(read_cpuid_id(), idc_support_list)) + ctr |= BIT(CTR_IDC_SHIFT); +#endif + + if (!(ctr & BIT(CTR_IDC_SHIFT))) { + u64 clidr = read_sysreg(clidr_el1); + + if (CLIDR_LOC(clidr) == 0 || + (CLIDR_LOUIS(clidr) == 0 && CLIDR_LOUU(clidr) == 0)) + ctr |= BIT(CTR_IDC_SHIFT); + } + + return ctr; } #endif /* __ASSEMBLY__ */ diff --git a/arch/arm64/include/asm/checksum.h b/arch/arm64/include/asm/checksum.h index 0b6f5a7d4027c56fcd868e3aecc0f93ef1f10432..1672ed418bc53b8971f5aeb21c6407f6108834d9 100644 --- a/arch/arm64/include/asm/checksum.h +++ b/arch/arm64/include/asm/checksum.h @@ -30,22 +30,26 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) { __uint128_t tmp; u64 sum; + int n = ihl; /* we want it signed */ tmp = *(const __uint128_t *)iph; iph += 16; - ihl -= 4; + n -= 4; tmp += ((tmp >> 64) | (tmp << 64)); sum = tmp >> 64; do { sum += *(const u32 *)iph; iph += 4; - } while (--ihl); + } while (--n > 0); sum += ((sum >> 32) | (sum << 32)); return csum_fold((__force u32)(sum >> 32)); } #define ip_fast_csum ip_fast_csum +extern unsigned int do_csum(const unsigned char *buff, int len); +#define do_csum do_csum + #include #endif /* __ASM_CHECKSUM_H */ diff --git a/arch/arm64/include/asm/clocksource.h b/arch/arm64/include/asm/clocksource.h index 0ece64a26c8c94d04c7aac8a35b7bc8dd0000b85..734428342ebfe76a913993969546acf89adfe8d9 100644 --- a/arch/arm64/include/asm/clocksource.h +++ b/arch/arm64/include/asm/clocksource.h @@ -4,6 +4,7 @@ struct arch_clocksource_data { bool vdso_direct; /* Usable for direct VDSO access? */ + bool vdso_fix; /* Need avoid the clock bug in VDSO? */ }; #endif diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index 3b0938281541912aae9fbe94ae97f799a0b342e4..d8b01c7c9cd3fa4d2aa58a0048bb582f37f74870 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h @@ -74,7 +74,7 @@ __XCHG_CASE( , , mb_8, dmb ish, nop, , a, l, "memory") #undef __XCHG_CASE #define __XCHG_GEN(sfx) \ -static inline unsigned long __xchg##sfx(unsigned long x, \ +static __always_inline unsigned long __xchg##sfx(unsigned long x, \ volatile void *ptr, \ int size) \ { \ @@ -116,7 +116,7 @@ __XCHG_GEN(_mb) #define xchg(...) __xchg_wrapper( _mb, __VA_ARGS__) #define __CMPXCHG_GEN(sfx) \ -static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \ +static __always_inline unsigned long __cmpxchg##sfx(volatile void *ptr, \ unsigned long old, \ unsigned long new, \ int size) \ @@ -223,7 +223,7 @@ __CMPWAIT_CASE( , , 8); #undef __CMPWAIT_CASE #define __CMPWAIT_GEN(sfx) \ -static inline void __cmpwait##sfx(volatile void *ptr, \ +static __always_inline void __cmpwait##sfx(volatile void *ptr, \ unsigned long val, \ int size) \ { \ diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h index 1a037b94eba10d481866063bfcc8c5f59adf2e35..b924cddb5014f96f8f957df82fb290ecc863cf30 100644 --- a/arch/arm64/include/asm/compat.h +++ b/arch/arm64/include/asm/compat.h @@ -25,6 +25,8 @@ #include #include +#include + #define COMPAT_USER_HZ 100 #ifdef __AARCH64EB__ #define COMPAT_UTS_MACHINE "armv8b\0\0" @@ -159,6 +161,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) } #define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current))) +#define COMPAT_MINSIGSTKSZ 2048 static inline void __user *arch_compat_alloc_user_space(long len) { @@ -223,23 +226,6 @@ struct compat_shmid64_ds { compat_ulong_t __unused5; }; -static inline int is_compat_task(void) -{ - return test_thread_flag(TIF_32BIT); -} - -static inline int is_compat_thread(struct thread_info *thread) -{ - return test_ti_thread_flag(thread, TIF_32BIT); -} - -#else /* !CONFIG_COMPAT */ - -static inline int is_compat_thread(struct thread_info *thread) -{ - return 0; -} - #endif /* CONFIG_COMPAT */ #endif /* __KERNEL__ */ #endif /* __ASM_COMPAT_H */ diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h index 88392272250e8ab3e263cd1cedefec542f87a70c..3a9908a0121904c522b6ce87f0438c22c5f731c9 100644 --- a/arch/arm64/include/asm/cpu.h +++ b/arch/arm64/include/asm/cpu.h @@ -36,6 +36,7 @@ struct cpuinfo_arm64 { u64 reg_id_aa64dfr1; u64 reg_id_aa64isar0; u64 reg_id_aa64isar1; + u64 reg_id_aa64isar2; u64 reg_id_aa64mmfr0; u64 reg_id_aa64mmfr1; u64 reg_id_aa64mmfr2; diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index ae1f70450fb2129b5c195be0678ae9935bb3a450..5edcd00ee6df74e5c2971bddd1dd85f7944de4e3 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -51,7 +51,20 @@ #define ARM64_SSBD 30 #define ARM64_MISMATCHED_CACHE_TYPE 31 #define ARM64_HAS_STAGE2_FWB 32 +#define ARM64_HAS_MPAM 33 +#define ARM64_HAS_IRQ_PRIO_MASKING 34 +#define ARM64_WORKAROUND_1463225 35 +#define ARM64_HAS_CRC32 36 +#define ARM64_SSBS 37 +#ifdef CONFIG_HISILICON_ERRATUM_1980005 +#define ARM64_WORKAROUND_HISILICON_1980005 38 -#define ARM64_NCAPS 33 +#define ARM64_NCAPS 39 +#else +#define ARM64_NCAPS 38 +#endif + +#define ARM64_SPECTRE_BHB 40 +#define ARM64_WORKAROUND_1742098 41 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 1717ba1db35ddb935720c20ec46c318d59ca9b83..12b8f9180abb5cfb1cc4b96ce5ca5c71d3501414 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -45,9 +45,10 @@ */ enum ftr_type { - FTR_EXACT, /* Use a predefined safe value */ - FTR_LOWER_SAFE, /* Smaller value is safe */ - FTR_HIGHER_SAFE,/* Bigger value is safe */ + FTR_EXACT, /* Use a predefined safe value */ + FTR_LOWER_SAFE, /* Smaller value is safe */ + FTR_HIGHER_SAFE, /* Bigger value is safe */ + FTR_HIGHER_OR_ZERO_SAFE, /* Bigger value is safe, but 0 is biggest */ }; #define FTR_STRICT true /* SANITY check strict matching required */ @@ -88,6 +89,12 @@ struct arm64_ftr_reg { extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0; +int arm64_cpu_ftr_regs_traverse(int (*op)(u32, u64, void *), void *argp); + +#ifdef CONFIG_ARM64_PSEUDO_NMI +extern bool enable_pseudo_nmi; +#endif + /* * CPU capabilities: * @@ -262,7 +269,7 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0; /* * CPU feature detected at boot time based on system-wide value of a * feature. It is safe for a late CPU to have this feature even though - * the system hasn't enabled it, although the featuer will not be used + * the system hasn't enabled it, although the feature will not be used * by Linux in this case. If the system has enabled this feature already, * then every late CPU must have it. */ @@ -357,6 +364,12 @@ extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; extern struct static_key_false arm64_const_caps_ready; +/* ARM64 CAPS + alternative_cb */ +#define ARM64_NPATCHABLE (ARM64_NCAPS + 1) +extern DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE); + +extern bool set_cap_spectre_bhb; + bool this_cpu_has_cap(unsigned int cap); static inline bool cpu_have_feature(unsigned int num) @@ -365,21 +378,29 @@ static inline bool cpu_have_feature(unsigned int num) } /* System capability check for constant caps */ -static inline bool __cpus_have_const_cap(int num) +static __always_inline bool __cpus_have_const_cap(int num) { + if (num == ARM64_SPECTRE_BHB) + return set_cap_spectre_bhb; + if (num >= ARM64_NCAPS) return false; + return static_branch_unlikely(&cpu_hwcap_keys[num]); } static inline bool cpus_have_cap(unsigned int num) { + if (num == ARM64_SPECTRE_BHB) + return set_cap_spectre_bhb; + if (num >= ARM64_NCAPS) return false; + return test_bit(num, cpu_hwcaps); } -static inline bool cpus_have_const_cap(int num) +static __always_inline bool cpus_have_const_cap(int num) { if (static_branch_likely(&arm64_const_caps_ready)) return __cpus_have_const_cap(num); @@ -389,6 +410,11 @@ static inline bool cpus_have_const_cap(int num) static inline void cpus_set_cap(unsigned int num) { + if (num == ARM64_SPECTRE_BHB) { + set_cap_spectre_bhb = true; + return; + } + if (num >= ARM64_NCAPS) { pr_warn("Attempt to set an illegal CPU capability (%d >= %d)\n", num, ARM64_NCAPS); @@ -421,6 +447,29 @@ cpuid_feature_extract_unsigned_field(u64 features, int field) return cpuid_feature_extract_unsigned_field_width(features, field, 4); } +/* + * Fields that identify the version of the Performance Monitors Extension do + * not follow the standard ID scheme. See ARM DDI 0487E.a page D13-2825, + * "Alternative ID scheme used for the Performance Monitors Extension version". + */ +static inline u64 __attribute_const__ +cpuid_feature_cap_perfmon_field(u64 features, int field, u64 cap) +{ + u64 val = cpuid_feature_extract_unsigned_field(features, field); + u64 mask = GENMASK_ULL(field + 3, field); + + /* Treat IMPLEMENTATION DEFINED functionality as unimplemented */ + if (val == 0xf) + val = 0; + + if (val > cap) { + features &= ~mask; + features |= (cap << field) & mask; + } + + return features; +} + static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp) { return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift); @@ -481,6 +530,34 @@ static inline bool cpu_supports_mixed_endian_el0(void) return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1)); } +static inline bool supports_csv2p3(int scope) +{ + u64 pfr0; + u8 csv2_val; + + if (scope == SCOPE_LOCAL_CPU) + pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1); + else + pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); + + csv2_val = cpuid_feature_extract_unsigned_field(pfr0, + ID_AA64PFR0_CSV2_SHIFT); + return csv2_val == 3; +} + +static inline bool supports_clearbhb(int scope) +{ + u64 isar2; + + if (scope == SCOPE_LOCAL_CPU) + isar2 = read_sysreg_s(SYS_ID_AA64ISAR2_EL1); + else + isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1); + + return cpuid_feature_extract_unsigned_field(isar2, + ID_AA64ISAR2_CLEARBHB_SHIFT); +} + static inline bool system_supports_32bit_el0(void) { return cpus_have_const_cap(ARM64_HAS_32BIT_EL0); @@ -508,6 +585,18 @@ static inline bool system_supports_sve(void) cpus_have_const_cap(ARM64_SVE); } +static inline bool system_uses_irq_prio_masking(void) +{ + return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && + cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING); +} + +static inline bool system_has_prio_mask_debugging(void) +{ + return IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING) && + system_uses_irq_prio_masking(); +} + #define ARM64_SSBD_UNKNOWN -1 #define ARM64_SSBD_FORCE_DISABLE 0 #define ARM64_SSBD_KERNEL 1 @@ -524,11 +613,40 @@ static inline int arm64_get_ssbd_state(void) #endif } -#ifdef CONFIG_ARM64_SSBD void arm64_set_ssbd_mitigation(bool state); -#else -static inline void arm64_set_ssbd_mitigation(bool state) {} -#endif + +/* Watch out, ordering is important here. */ +enum mitigation_state { + SPECTRE_UNAFFECTED, + SPECTRE_MITIGATED, + SPECTRE_VULNERABLE, +}; + +enum mitigation_state arm64_get_spectre_bhb_state(void); +bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope); +u8 spectre_bhb_loop_affected(int scope); +void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused); + +static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange) +{ + switch (parange) { + case 0: return 32; + case 1: return 36; + case 2: return 40; + case 3: return 42; + case 4: return 44; + case 5: return 48; + case 6: return 52; + /* + * A future PE could use a value unknown to the kernel. + * However, by the "D10.1.4 Principles of the ID scheme + * for fields in ID registers", ARM DDI 0487C.a, any new + * value is guaranteed to be higher than what we know already. + * As a safe limit, we return the limit supported by the kernel. + */ + default: return CONFIG_ARM64_PA_BITS; + } +} #endif /* __ASSEMBLY__ */ diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index ea690b3562afb20773ce81cf3ea48f897f5998b1..bdec502910171ce87bd7688c90da24b902f678e1 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -62,20 +62,15 @@ #define MIDR_CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \ MIDR_ARCHITECTURE_MASK) -#define MIDR_IS_CPU_MODEL_RANGE(midr, model, rv_min, rv_max) \ -({ \ - u32 _model = (midr) & MIDR_CPU_MODEL_MASK; \ - u32 rv = (midr) & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK); \ - \ - _model == (model) && rv >= (rv_min) && rv <= (rv_max); \ - }) - #define ARM_CPU_IMP_ARM 0x41 #define ARM_CPU_IMP_APM 0x50 #define ARM_CPU_IMP_CAVIUM 0x43 #define ARM_CPU_IMP_BRCM 0x42 #define ARM_CPU_IMP_QCOM 0x51 #define ARM_CPU_IMP_NVIDIA 0x4E +#define ARM_CPU_IMP_HISI 0x48 +#define ARM_CPU_IMP_AMPERE 0xC0 +#define ARM_CPU_IMP_PHYTIUM 0x70 #define ARM_CPU_PART_AEM_V8 0xD0F #define ARM_CPU_PART_FOUNDATION 0xD00 @@ -86,6 +81,11 @@ #define ARM_CPU_PART_CORTEX_A75 0xD0A #define ARM_CPU_PART_CORTEX_A35 0xD04 #define ARM_CPU_PART_CORTEX_A55 0xD05 +#define ARM_CPU_PART_CORTEX_A76 0xD0B +#define ARM_CPU_PART_NEOVERSE_V1 0xD40 +#define ARM_CPU_PART_CORTEX_A78 0xD41 +#define ARM_CPU_PART_CORTEX_X1 0xD44 +#define ARM_CPU_PART_CORTEX_A78C 0xD4B #define APM_CPU_PART_POTENZA 0x000 @@ -103,6 +103,17 @@ #define NVIDIA_CPU_PART_DENVER 0x003 #define NVIDIA_CPU_PART_CARMEL 0x004 +#define HISI_CPU_PART_TSV110 0xD01 +#define HISI_CPU_PART_TSV200 0xD02 + +#define PHYTIUM_CPU_PART_1500A 0X660 +#define PHYTIUM_CPU_PART_2000AHK 0X661 +#define PHYTIUM_CPU_PART_2000PLUS 0X662 +#define PHYTIUM_CPU_PART_2004 0X663 +#define PHYTIUM_CPU_PART_2500 0X663 + +#define AMPERE_CPU_PART_AMPERE1 0xAC3 + #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) @@ -110,6 +121,12 @@ #define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75) #define MIDR_CORTEX_A35 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A35) #define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55) +#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76) +#define MIDR_NEOVERSE_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1) +#define MIDR_CORTEX_A78 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78) +#define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1) +#define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C) + #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) @@ -120,6 +137,14 @@ #define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO) #define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER) #define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL) +#define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110) +#define MIDR_HISI_TSV200 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV200) +#define MIDR_FT_1500A MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_1500A) +#define MIDR_FT_2000AHK MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_2000AHK) +#define MIDR_FT_2000PLUS MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_2000PLUS) +#define MIDR_FT_2004 MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_2004) +#define MIDR_FT_2500 MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_2500) +#define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1) #ifndef __ASSEMBLY__ @@ -149,12 +174,23 @@ struct midr_range { .rv_max = MIDR_CPU_VAR_REV(v_max, r_max), \ } +#define MIDR_REV_RANGE(m, v, r_min, r_max) MIDR_RANGE(m, v, r_min, v, r_max) +#define MIDR_REV(m, v, r) MIDR_RANGE(m, v, r, v, r) #define MIDR_ALL_VERSIONS(m) MIDR_RANGE(m, 0, 0, 0xf, 0xf) +static inline bool midr_is_cpu_model_range(u32 midr, u32 model, u32 rv_min, + u32 rv_max) +{ + u32 _model = midr & MIDR_CPU_MODEL_MASK; + u32 rv = midr & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK); + + return _model == model && rv >= rv_min && rv <= rv_max; +} + static inline bool is_midr_in_range(u32 midr, struct midr_range const *range) { - return MIDR_IS_CPU_MODEL_RANGE(midr, range->model, - range->rv_min, range->rv_max); + return midr_is_cpu_model_range(midr, range->model, + range->rv_min, range->rv_max); } static inline bool diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h index 22e4c83de5a5c34b8c9dcfcaee723edcd740fc4a..1230923b032dd48e6f872762be3a0d124c97562c 100644 --- a/arch/arm64/include/asm/daifflags.h +++ b/arch/arm64/include/asm/daifflags.h @@ -18,17 +18,31 @@ #include +#include +#include +#include + #define DAIF_PROCCTX 0 #define DAIF_PROCCTX_NOIRQ PSR_I_BIT +#define DAIF_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT) /* mask/save/unmask/restore all exceptions, including interrupts. */ static inline void local_daif_mask(void) { + WARN_ON(system_has_prio_mask_debugging() && + (read_sysreg_s(SYS_ICC_PMR_EL1) == (GIC_PRIO_IRQOFF | + GIC_PRIO_PSR_I_SET))); + asm volatile( "msr daifset, #0xf // local_daif_mask\n" : : : "memory"); + + /* Don't really care for a dsb here, we don't intend to enable IRQs */ + if (system_uses_irq_prio_masking()) + gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); + trace_hardirqs_off(); } @@ -36,36 +50,72 @@ static inline unsigned long local_daif_save(void) { unsigned long flags; - asm volatile( - "mrs %0, daif // local_daif_save\n" - : "=r" (flags) - : - : "memory"); + flags = read_sysreg(daif); + + if (system_uses_irq_prio_masking()) { + /* If IRQs are masked with PMR, reflect it in the flags */ + if (read_sysreg_s(SYS_ICC_PMR_EL1) != GIC_PRIO_IRQON) + flags |= PSR_I_BIT; + } + local_daif_mask(); return flags; } -static inline void local_daif_unmask(void) -{ - trace_hardirqs_on(); - asm volatile( - "msr daifclr, #0xf // local_daif_unmask" - : - : - : "memory"); -} - static inline void local_daif_restore(unsigned long flags) { - if (!arch_irqs_disabled_flags(flags)) + bool irq_disabled = flags & PSR_I_BIT; + + WARN_ON(system_has_prio_mask_debugging() && + !(read_sysreg(daif) & PSR_I_BIT)); + + if (!irq_disabled) { trace_hardirqs_on(); - asm volatile( - "msr daif, %0 // local_daif_restore" - : - : "r" (flags) - : "memory"); - if (arch_irqs_disabled_flags(flags)) + + if (system_uses_irq_prio_masking()) { + gic_write_pmr(GIC_PRIO_IRQON); + pmr_sync(); + } + } else if (system_uses_irq_prio_masking()) { + u64 pmr; + + if (!(flags & PSR_A_BIT)) { + /* + * If interrupts are disabled but we can take + * asynchronous errors, we can take NMIs + */ + flags &= ~PSR_I_BIT; + pmr = GIC_PRIO_IRQOFF; + } else { + pmr = GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET; + } + + /* + * There has been concern that the write to daif + * might be reordered before this write to PMR. + * From the ARM ARM DDI 0487D.a, section D1.7.1 + * "Accessing PSTATE fields": + * Writes to the PSTATE fields have side-effects on + * various aspects of the PE operation. All of these + * side-effects are guaranteed: + * - Not to be visible to earlier instructions in + * the execution stream. + * - To be visible to later instructions in the + * execution stream + * + * Also, writes to PMR are self-synchronizing, so no + * interrupts with a lower priority than PMR is signaled + * to the PE after the write. + * + * So we don't need additional synchronization here. + */ + gic_write_pmr(pmr); + } + + write_sysreg(flags, daif); + + if (irq_disabled) trace_hardirqs_off(); } diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 7ed320895d1f463d1e95cd9ec6328a49eed765ae..d3cb42fd51ec2d33d4085c25f132b79875550ed8 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -44,6 +44,17 @@ efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...); #define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT) +/* + * Even when Linux uses IRQ priorities for IRQ disabling, EFI does not. + * And EFI shouldn't really play around with priority masking as it is not aware + * which priorities the OS has assigned to its interrupts. + */ +#define arch_efi_save_flags(state_flags) \ + ((void)((state_flags) = read_sysreg(daif))) + +#define arch_efi_restore_flags(state_flags) write_sysreg(state_flags, daif) + + /* arch specific definitions used by the stub code */ /* @@ -94,7 +105,11 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base, ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__) #define alloc_screen_info(x...) &screen_info -#define free_screen_info(x...) + +static inline void free_screen_info(efi_system_table_t *sys_table_arg, + struct screen_info *si) +{ +} /* redeclare as 'hidden' so the compiler will generate relative references */ extern struct screen_info screen_info __attribute__((__visibility__("hidden"))); diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index 433b9554c6a19c1e72b2be8f827861c14f5856fb..9f5203bda3b9aa52c282f126edf5337564d3e234 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -142,6 +142,7 @@ typedef struct user_fpsimd_state elf_fpregset_t; #define SET_PERSONALITY(ex) \ ({ \ + clear_thread_flag(TIF_32BIT_AARCH64); \ clear_thread_flag(TIF_32BIT); \ current->personality &= ~READ_IMPLIES_EXEC; \ }) @@ -168,14 +169,16 @@ struct linux_binprm; extern int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp); +#ifndef CONFIG_COMPAT +#ifndef is_compat_task +#define is_compat_task() (0) +#endif +#endif + /* 1GB of VA */ -#ifdef CONFIG_COMPAT -#define STACK_RND_MASK (test_thread_flag(TIF_32BIT) ? \ +#define STACK_RND_MASK (is_compat_task() ? \ 0x7ff >> (PAGE_SHIFT - 12) : \ 0x3ffff >> (PAGE_SHIFT - 12)) -#else -#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12)) -#endif #ifdef __AARCH64EB__ #define COMPAT_ELF_PLATFORM ("v8b") @@ -187,35 +190,16 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, /* PIE load location for compat arm. Must match ARM ELF_ET_DYN_BASE. */ #define COMPAT_ELF_ET_DYN_BASE 0x000400000UL +#endif /*CONFIG_COMPAT */ +#ifdef CONFIG_AARCH32_EL0 /* AArch32 registers. */ #define COMPAT_ELF_NGREG 18 typedef unsigned int compat_elf_greg_t; typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG]; - -/* AArch32 EABI. */ -#define EF_ARM_EABI_MASK 0xff000000 -#define compat_elf_check_arch(x) (system_supports_32bit_el0() && \ - ((x)->e_machine == EM_ARM) && \ - ((x)->e_flags & EF_ARM_EABI_MASK)) - -#define compat_start_thread compat_start_thread -/* - * Unlike the native SET_PERSONALITY macro, the compat version maintains - * READ_IMPLIES_EXEC across an execve() since this is the behaviour on - * arch/arm/. - */ -#define COMPAT_SET_PERSONALITY(ex) \ -({ \ - set_thread_flag(TIF_32BIT); \ - }) -#define COMPAT_ARCH_DLINFO extern int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp); -#define compat_arch_setup_additional_pages \ - aarch32_setup_vectors_page - -#endif /* CONFIG_COMPAT */ +#endif /* CONFIG_AARCH32_EL0 */ #endif /* !__ASSEMBLY__ */ diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index bc30429d8e91eb288bb81217380b67fc614e2db2..d8f96d07dbc004688626be26e7deb95f175fa69d 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -41,4 +41,23 @@ static inline u32 disr_to_esr(u64 disr) return esr; } +#ifdef CONFIG_UCE_KERNEL_RECOVERY +/* Need set task state when trigger uce */ +#define KR_SET_TASK_STATE 0x00000001 + +struct uce_kernel_recovery_info { + int (*fn)(void); + const char *name; + unsigned long addr; + unsigned long size; + unsigned int flags; +}; + +extern int copy_page_cow_sea_fallback(void); +extern int copy_generic_read_sea_fallback(void); +extern int copy_from_user_sea_fallback(void); +extern int get_user_sea_fallback(void); +extern int memcpy_mc_sea_fallback(void); +#endif + #endif /* __ASM_EXCEPTION_H */ diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h index ec1e6d6fa14ccd4c14a735a8732a7a468c709276..3c962ef081f840ef1dcf796f90aa73d65d863640 100644 --- a/arch/arm64/include/asm/fixmap.h +++ b/arch/arm64/include/asm/fixmap.h @@ -59,9 +59,11 @@ enum fixed_addresses { #endif /* CONFIG_ACPI_APEI_GHES */ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + FIX_ENTRY_TRAMP_TEXT3, + FIX_ENTRY_TRAMP_TEXT2, + FIX_ENTRY_TRAMP_TEXT1, FIX_ENTRY_TRAMP_DATA, - FIX_ENTRY_TRAMP_TEXT, -#define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT)) +#define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT1)) #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ __end_of_permanent_fixed_addresses, diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index dd1ad3950ef5dfbe270768d27da0aca82d534c8c..097c8d4966b1c4915be3d278fe5f2118acb35d00 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -29,7 +29,7 @@ #include #include -#if defined(__KERNEL__) && defined(CONFIG_COMPAT) +#if defined(__KERNEL__) && defined(CONFIG_AARCH32_EL0) /* Masks for extracting the FPSR and FPCR from the FPSCR */ #define VFP_FPSCR_STAT_MASK 0xf800009f #define VFP_FPSCR_CTRL_MASK 0x07f79f00 diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h index caa955f10e19509adf568e4a44bc357f2340b562..ce4934ccbb496c60f283608ad568708bca67e230 100644 --- a/arch/arm64/include/asm/ftrace.h +++ b/arch/arm64/include/asm/ftrace.h @@ -54,7 +54,20 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) #define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs) { - return is_compat_task(); + return is_a32_compat_task(); +} + +#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME + +static inline bool arch_syscall_match_sym_name(const char *sym, + const char *name) +{ + /* + * Since all syscall functions have __arm64_ prefix, we must skip it. + * However, as we described above, we decided to ignore compat + * syscalls, so we don't care about __arm64_compat_ prefix here. + */ + return !strcmp(sym + 8, name); } #endif /* ifndef __ASSEMBLY__ */ diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index 07fe2479d3105da29feafcb57a209d3942e6dca3..a56efb5626fa25264d3de7d34c06ba024cc2c131 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h @@ -23,26 +23,34 @@ #include +#define FUTEX_MAX_LOOPS 128 /* What's the largest number you can think of? */ + #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \ do { \ + unsigned int loops = FUTEX_MAX_LOOPS; \ + \ uaccess_enable(); \ asm volatile( \ " prfm pstl1strm, %2\n" \ "1: ldxr %w1, %2\n" \ insn "\n" \ -"2: stlxr %w3, %w0, %2\n" \ -" cbnz %w3, 1b\n" \ -" dmb ish\n" \ +"2: stlxr %w0, %w3, %2\n" \ +" cbz %w0, 3f\n" \ +" sub %w4, %w4, %w0\n" \ +" cbnz %w4, 1b\n" \ +" mov %w0, %w7\n" \ "3:\n" \ +" dmb ish\n" \ " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ -"4: mov %w0, %w5\n" \ +"4: mov %w0, %w6\n" \ " b 3b\n" \ " .popsection\n" \ _ASM_EXTABLE(1b, 4b) \ _ASM_EXTABLE(2b, 4b) \ - : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \ - : "r" (oparg), "Ir" (-EFAULT) \ + : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp), \ + "+r" (loops) \ + : "r" (oparg), "Ir" (-EFAULT), "Ir" (-EAGAIN) \ : "memory"); \ uaccess_disable(); \ } while (0) @@ -57,23 +65,23 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr) switch (op) { case FUTEX_OP_SET: - __futex_atomic_op("mov %w0, %w4", + __futex_atomic_op("mov %w3, %w5", ret, oldval, uaddr, tmp, oparg); break; case FUTEX_OP_ADD: - __futex_atomic_op("add %w0, %w1, %w4", + __futex_atomic_op("add %w3, %w1, %w5", ret, oldval, uaddr, tmp, oparg); break; case FUTEX_OP_OR: - __futex_atomic_op("orr %w0, %w1, %w4", + __futex_atomic_op("orr %w3, %w1, %w5", ret, oldval, uaddr, tmp, oparg); break; case FUTEX_OP_ANDN: - __futex_atomic_op("and %w0, %w1, %w4", + __futex_atomic_op("and %w3, %w1, %w5", ret, oldval, uaddr, tmp, ~oparg); break; case FUTEX_OP_XOR: - __futex_atomic_op("eor %w0, %w1, %w4", + __futex_atomic_op("eor %w3, %w1, %w5", ret, oldval, uaddr, tmp, oparg); break; default: @@ -93,10 +101,11 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr, u32 oldval, u32 newval) { int ret = 0; + unsigned int loops = FUTEX_MAX_LOOPS; u32 val, tmp; u32 __user *uaddr; - if (!access_ok(VERIFY_WRITE, _uaddr, sizeof(u32))) + if (!access_ok(_uaddr, sizeof(u32))) return -EFAULT; uaddr = __uaccess_mask_ptr(_uaddr); @@ -104,24 +113,30 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr, asm volatile("// futex_atomic_cmpxchg_inatomic\n" " prfm pstl1strm, %2\n" "1: ldxr %w1, %2\n" -" sub %w3, %w1, %w4\n" -" cbnz %w3, 3f\n" -"2: stlxr %w3, %w5, %2\n" -" cbnz %w3, 1b\n" -" dmb ish\n" +" sub %w3, %w1, %w5\n" +" cbnz %w3, 4f\n" +"2: stlxr %w3, %w6, %2\n" +" cbz %w3, 3f\n" +" sub %w4, %w4, %w3\n" +" cbnz %w4, 1b\n" +" mov %w0, %w8\n" "3:\n" +" dmb ish\n" +"4:\n" " .pushsection .fixup,\"ax\"\n" -"4: mov %w0, %w6\n" -" b 3b\n" +"5: mov %w0, %w7\n" +" b 4b\n" " .popsection\n" - _ASM_EXTABLE(1b, 4b) - _ASM_EXTABLE(2b, 4b) - : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp) - : "r" (oldval), "r" (newval), "Ir" (-EFAULT) + _ASM_EXTABLE(1b, 5b) + _ASM_EXTABLE(2b, 5b) + : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp), "+r" (loops) + : "r" (oldval), "r" (newval), "Ir" (-EFAULT), "Ir" (-EAGAIN) : "memory"); uaccess_disable(); - *uval = val; + if (!ret) + *uval = val; + return ret; } diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h index 1473fc2f7ab7537d7953d53c4a8550b44c126b0a..a5d94aa59c7cdee29e9f763b6bada608b34dfcc8 100644 --- a/arch/arm64/include/asm/hardirq.h +++ b/arch/arm64/include/asm/hardirq.h @@ -17,10 +17,14 @@ #define __ASM_HARDIRQ_H #include +#include #include +#include #include +#include +#include -#define NR_IPI 7 +#define NR_IPI 8 typedef struct { unsigned int __softirq_pending; @@ -37,6 +41,33 @@ u64 smp_irq_stat_cpu(unsigned int cpu); #define __ARCH_IRQ_EXIT_IRQS_DISABLED 1 +struct nmi_ctx { + u64 hcr; +}; + +DECLARE_PER_CPU(struct nmi_ctx, nmi_contexts); + +#define arch_nmi_enter() \ + do { \ + if (is_kernel_in_hyp_mode()) { \ + struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \ + nmi_ctx->hcr = read_sysreg(hcr_el2); \ + if (!(nmi_ctx->hcr & HCR_TGE)) { \ + write_sysreg(nmi_ctx->hcr | HCR_TGE, hcr_el2); \ + isb(); \ + } \ + } \ + } while (0) + +#define arch_nmi_exit() \ + do { \ + if (is_kernel_in_hyp_mode()) { \ + struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \ + if (!(nmi_ctx->hcr & HCR_TGE)) \ + write_sysreg(nmi_ctx->hcr, hcr_el2); \ + } \ + } while (0) + static inline void ack_bad_irq(unsigned int irq) { extern unsigned long irq_err_count; diff --git a/arch/arm64/include/asm/hisi_cpu_model.h b/arch/arm64/include/asm/hisi_cpu_model.h new file mode 100644 index 0000000000000000000000000000000000000000..e0da0ef6161358d7a029c4653d02a7d68ab1ab37 --- /dev/null +++ b/arch/arm64/include/asm/hisi_cpu_model.h @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright(c) 2019 Huawei Technologies Co., Ltd + */ + +#ifndef __HISI_CPU_MODEL_H__ +#define __HISI_CPU_MODEL_H__ + +enum hisi_cpu_type { + HI_1612, + HI_1616, + HI_1620, + UNKNOWN_HI_TYPE +}; + +extern enum hisi_cpu_type hi_cpu_type; +extern bool kvm_ncsnp_support; + +void probe_hisi_cpu_type(void); +void probe_hisi_ncsnp_support(void); +#endif /* __HISI_CPU_MODEL_H__ */ diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index 400b80b49595dc147fb5a2110ff347405f58bd0b..428b745b538606046acf4c2065aa1b801d701510 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -46,15 +46,13 @@ */ #define ELF_HWCAP (elf_hwcap) -#ifdef CONFIG_COMPAT -#define COMPAT_ELF_HWCAP (compat_elf_hwcap) -#define COMPAT_ELF_HWCAP2 (compat_elf_hwcap2) -extern unsigned int compat_elf_hwcap, compat_elf_hwcap2; +#ifdef CONFIG_AARCH32_EL0 +extern unsigned int a32_elf_hwcap, a32_elf_hwcap2; #endif enum { CAP_HWCAP = 1, -#ifdef CONFIG_COMPAT +#ifdef CONFIG_AARCH32_EL0 CAP_COMPAT_HWCAP, CAP_COMPAT_HWCAP2, #endif diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index c6802dea6cabd39cc271fee36eb75eccbdd0eaa9..310e47d54d81a9a2bc8da40df7686fbf21b331e1 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -272,6 +272,7 @@ __AARCH64_INSN_FUNCS(adrp, 0x9F000000, 0x90000000) __AARCH64_INSN_FUNCS(prfm, 0x3FC00000, 0x39800000) __AARCH64_INSN_FUNCS(prfm_lit, 0xFF000000, 0xD8000000) __AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800) +__AARCH64_INSN_FUNCS(ldadd, 0x3F20FC00, 0x38200000) __AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800) __AARCH64_INSN_FUNCS(ldr_lit, 0xBF000000, 0x18000000) __AARCH64_INSN_FUNCS(ldrsw_lit, 0xFF000000, 0x98000000) @@ -389,6 +390,13 @@ u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg, enum aarch64_insn_register state, enum aarch64_insn_size_type size, enum aarch64_insn_ldst_type type); +u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result, + enum aarch64_insn_register address, + enum aarch64_insn_register value, + enum aarch64_insn_size_type size); +u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address, + enum aarch64_insn_register value, + enum aarch64_insn_size_type size); u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst, enum aarch64_insn_register src, int imm, enum aarch64_insn_variant variant, diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 35b2e50f17fbfedc220f50c5162d42cec73160b5..09f240586c2a3fd77f6665fa8179dec043587953 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -106,7 +106,24 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) } /* IO barriers */ -#define __iormb() rmb() +#define __iormb(v) \ +({ \ + unsigned long tmp; \ + \ + rmb(); \ + \ + /* \ + * Create a dummy control dependency from the IO read to any \ + * later instructions. This ensures that a subsequent call to \ + * udelay() will be ordered due to the ISB in get_cycles(). \ + */ \ + asm volatile("eor %0, %1, %1\n" \ + "cbnz %0, ." \ + : "=r" (tmp) : "r" ((unsigned long)(v)) \ + : "memory"); \ +}) + +#define __io_par(v) __iormb(v) #define __iowmb() wmb() #define mmiowb() do { } while (0) @@ -131,10 +148,10 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) * following Normal memory access. Writes are ordered relative to any prior * Normal memory access. */ -#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) -#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) -#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) -#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(); __v; }) +#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(__v); __v; }) +#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(__v); __v; }) +#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(__v); __v; }) +#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(__v); __v; }) #define writeb(v,c) ({ __iowmb(); writeb_relaxed((v),(c)); }) #define writew(v,c) ({ __iowmb(); writew_relaxed((v),(c)); }) @@ -185,9 +202,9 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); /* * io{read,write}{16,32,64}be() macros */ -#define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; }) -#define ioread32be(p) ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; }) -#define ioread64be(p) ({ __u64 __v = be64_to_cpu((__force __be64)__raw_readq(p)); __iormb(); __v; }) +#define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(__v); __v; }) +#define ioread32be(p) ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(__v); __v; }) +#define ioread64be(p) ({ __u64 __v = be64_to_cpu((__force __be64)__raw_readq(p)); __iormb(__v); __v; }) #define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); }) #define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); }) diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h index b2b0c6405eb082fea7c99e607e7f2c9d9cca4ee7..28471df488c00291c155f916200de54e396c6afe 100644 --- a/arch/arm64/include/asm/irq.h +++ b/arch/arm64/include/asm/irq.h @@ -13,5 +13,11 @@ static inline int nr_legacy_irqs(void) return 0; } +#ifdef CONFIG_SMP +extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask, + bool exclude_self); +#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace +#endif + #endif /* !__ASSEMBLER__ */ #endif diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h index 24692edf1a691a95d623208dc9b8641b404c2248..6074bf2ef58dd2cb37c7d6417ab8f967a2f89219 100644 --- a/arch/arm64/include/asm/irqflags.h +++ b/arch/arm64/include/asm/irqflags.h @@ -18,7 +18,10 @@ #ifdef __KERNEL__ +#include +#include #include +#include /* * Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and @@ -36,33 +39,39 @@ /* * CPU interrupt mask handling. */ -static inline unsigned long arch_local_irq_save(void) -{ - unsigned long flags; - asm volatile( - "mrs %0, daif // arch_local_irq_save\n" - "msr daifset, #2" - : "=r" (flags) - : - : "memory"); - return flags; -} - static inline void arch_local_irq_enable(void) { - asm volatile( - "msr daifclr, #2 // arch_local_irq_enable" - : + if (system_has_prio_mask_debugging()) { + u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1); + + WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF); + } + + asm volatile(ALTERNATIVE( + "msr daifclr, #2 // arch_local_irq_enable", + __msr_s(SYS_ICC_PMR_EL1, "%0"), + ARM64_HAS_IRQ_PRIO_MASKING) : + : "r" ((unsigned long) GIC_PRIO_IRQON) : "memory"); + + pmr_sync(); } static inline void arch_local_irq_disable(void) { - asm volatile( - "msr daifset, #2 // arch_local_irq_disable" - : + if (system_has_prio_mask_debugging()) { + u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1); + + WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF); + } + + asm volatile(ALTERNATIVE( + "msr daifset, #2 // arch_local_irq_disable", + __msr_s(SYS_ICC_PMR_EL1, "%0"), + ARM64_HAS_IRQ_PRIO_MASKING) : + : "r" ((unsigned long) GIC_PRIO_IRQOFF) : "memory"); } @@ -72,11 +81,46 @@ static inline void arch_local_irq_disable(void) static inline unsigned long arch_local_save_flags(void) { unsigned long flags; - asm volatile( - "mrs %0, daif // arch_local_save_flags" - : "=r" (flags) + + asm volatile(ALTERNATIVE( + "mrs %0, daif", + __mrs_s("%0", SYS_ICC_PMR_EL1), + ARM64_HAS_IRQ_PRIO_MASKING) + : "=&r" (flags) : : "memory"); + + return flags; +} + +static inline int arch_irqs_disabled_flags(unsigned long flags) +{ + int res; + + asm volatile(ALTERNATIVE( + "and %w0, %w1, #" __stringify(PSR_I_BIT), + "eor %w0, %w1, #" __stringify(GIC_PRIO_IRQON), + ARM64_HAS_IRQ_PRIO_MASKING) + : "=&r" (res) + : "r" ((int) flags) + : "memory"); + + return res; +} + +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags; + + flags = arch_local_save_flags(); + + /* + * There are too many states with IRQs disabled, just keep the current + * state if interrupts are already disabled/masked. + */ + if (!arch_irqs_disabled_flags(flags)) + arch_local_irq_disable(); + return flags; } @@ -85,16 +129,16 @@ static inline unsigned long arch_local_save_flags(void) */ static inline void arch_local_irq_restore(unsigned long flags) { - asm volatile( - "msr daif, %0 // arch_local_irq_restore" - : - : "r" (flags) - : "memory"); -} + asm volatile(ALTERNATIVE( + "msr daif, %0", + __msr_s(SYS_ICC_PMR_EL1, "%0"), + ARM64_HAS_IRQ_PRIO_MASKING) + : + : "r" (flags) + : "memory"); -static inline int arch_irqs_disabled_flags(unsigned long flags) -{ - return flags & PSR_I_BIT; + pmr_sync(); } + #endif #endif diff --git a/arch/arm64/include/asm/is_compat.h b/arch/arm64/include/asm/is_compat.h new file mode 100644 index 0000000000000000000000000000000000000000..484c01def030ca466db3b92ed90d0ac3c866aa17 --- /dev/null +++ b/arch/arm64/include/asm/is_compat.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef __ASM_IS_COMPAT_H +#define __ASM_IS_COMPAT_H +#ifndef __ASSEMBLY__ + +#include + +#ifdef CONFIG_AARCH32_EL0 + +static inline int is_a32_compat_task(void) +{ + return test_thread_flag(TIF_32BIT); +} + +static inline int is_a32_compat_thread(struct thread_info *thread) +{ + return test_ti_thread_flag(thread, TIF_32BIT); +} + +#else + +static inline int is_a32_compat_task(void) + +{ + return 0; +} + +static inline int is_a32_compat_thread(struct thread_info *thread) +{ + return 0; +} + +#endif /* CONFIG_AARCH32_EL0 */ + +#ifdef CONFIG_ARM64_ILP32 + +static inline int is_ilp32_compat_task(void) +{ + return test_thread_flag(TIF_32BIT_AARCH64); +} + +static inline int is_ilp32_compat_thread(struct thread_info *thread) +{ + return test_ti_thread_flag(thread, TIF_32BIT_AARCH64); +} + +#else + +static inline int is_ilp32_compat_task(void) +{ + return 0; +} + +static inline int is_ilp32_compat_thread(struct thread_info *thread) +{ + return 0; +} + +#endif /* CONFIG_ARM64_ILP32 */ + +#ifdef CONFIG_COMPAT + +static inline int is_compat_task(void) +{ + return is_a32_compat_task() || is_ilp32_compat_task(); +} + +#endif /* CONFIG_COMPAT */ + +static inline int is_compat_thread(struct thread_info *thread) +{ + return is_a32_compat_thread(thread) || is_ilp32_compat_thread(thread); +} + + +#endif /* !__ASSEMBLY__ */ +#endif /* __ASM_IS_COMPAT_H */ diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h index a780f6714b44585b7375b3a2749714e9210b7533..74ab40b76ad53b96a6f66fd3ccb5498eff172c61 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h @@ -76,8 +76,8 @@ #define EARLY_KASLR (0) #endif -#define EARLY_ENTRIES(vstart, vend, shift) (((vend) >> (shift)) \ - - ((vstart) >> (shift)) + 1 + EARLY_KASLR) +#define EARLY_ENTRIES(vstart, vend, shift) \ + ((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1 + EARLY_KASLR) #define EARLY_PGDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT)) diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h index e17f0529a882a11c4ad0fb1206cb198a80108acd..30ab1fa346cd358476edc1d34c9ccd1a7d42ae37 100644 --- a/arch/arm64/include/asm/kexec.h +++ b/arch/arm64/include/asm/kexec.h @@ -28,6 +28,14 @@ #define KEXEC_ARCH KEXEC_ARCH_AARCH64 +/* 2M alignment for crash kernel regions */ +#define CRASH_ALIGN SZ_2M + +#ifdef CONFIG_ARM64_CPU_PARK +/* CPU park state flag: "park" */ +#define PARK_MAGIC 0x7061726b +#endif + #ifndef __ASSEMBLY__ /** @@ -93,6 +101,8 @@ static inline void crash_prepare_suspend(void) {} static inline void crash_post_resume(void) {} #endif +void machine_kexec_mask_interrupts(void); + #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index aa45df752a166a56a3ace4808db6b985ac8d2898..fd85e5424206ac0277212ff37b966de659fd7100 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -24,6 +24,8 @@ /* Hyp Configuration Register (HCR) bits */ #define HCR_FWB (UL(1) << 46) +#define HCR_API (UL(1) << 41) +#define HCR_APK (UL(1) << 40) #define HCR_TEA (UL(1) << 37) #define HCR_TERR (UL(1) << 36) #define HCR_TLOR (UL(1) << 35) @@ -87,6 +89,7 @@ HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \ HCR_FMO | HCR_IMO) #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF) +#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK) #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H) /* TCR_EL2 Registers bits */ @@ -104,9 +107,10 @@ TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | TCR_EL2_T0SZ_MASK) /* VTCR_EL2 Registers bits */ -#define VTCR_EL2_RES1 (1 << 31) +#define VTCR_EL2_RES1 (1U << 31) #define VTCR_EL2_HD (1 << 22) #define VTCR_EL2_HA (1 << 21) +#define VTCR_EL2_PS_SHIFT TCR_EL2_PS_SHIFT #define VTCR_EL2_PS_MASK TCR_EL2_PS_MASK #define VTCR_EL2_TG0_MASK TCR_TG0_MASK #define VTCR_EL2_TG0_4K TCR_TG0_4K @@ -120,62 +124,149 @@ #define VTCR_EL2_IRGN0_WBWA TCR_IRGN0_WBWA #define VTCR_EL2_SL0_SHIFT 6 #define VTCR_EL2_SL0_MASK (3 << VTCR_EL2_SL0_SHIFT) -#define VTCR_EL2_SL0_LVL1 (1 << VTCR_EL2_SL0_SHIFT) #define VTCR_EL2_T0SZ_MASK 0x3f -#define VTCR_EL2_T0SZ_40B 24 #define VTCR_EL2_VS_SHIFT 19 #define VTCR_EL2_VS_8BIT (0 << VTCR_EL2_VS_SHIFT) #define VTCR_EL2_VS_16BIT (1 << VTCR_EL2_VS_SHIFT) +#define VTCR_EL2_T0SZ(x) TCR_T0SZ(x) + /* * We configure the Stage-2 page tables to always restrict the IPA space to be * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are * not known to exist and will break with this configuration. * - * VTCR_EL2.PS is extracted from ID_AA64MMFR0_EL1.PARange at boot time - * (see hyp-init.S). + * The VTCR_EL2 is configured per VM and is initialised in kvm_arm_config_vm(). * * Note that when using 4K pages, we concatenate two first level page tables * together. With 16K pages, we concatenate 16 first level page tables. * - * The magic numbers used for VTTBR_X in this patch can be found in Tables - * D4-23 and D4-25 in ARM DDI 0487A.b. */ -#define VTCR_EL2_T0SZ_IPA VTCR_EL2_T0SZ_40B #define VTCR_EL2_COMMON_BITS (VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \ VTCR_EL2_IRGN0_WBWA | VTCR_EL2_RES1) -#ifdef CONFIG_ARM64_64K_PAGES /* - * Stage2 translation configuration: - * 64kB pages (TG0 = 1) - * 2 level page tables (SL = 1) + * VTCR_EL2:SL0 indicates the entry level for Stage2 translation. + * Interestingly, it depends on the page size. + * See D.10.2.121, VTCR_EL2, in ARM DDI 0487C.a + * + * ----------------------------------------- + * | Entry level | 4K | 16K/64K | + * ------------------------------------------ + * | Level: 0 | 2 | - | + * ------------------------------------------ + * | Level: 1 | 1 | 2 | + * ------------------------------------------ + * | Level: 2 | 0 | 1 | + * ------------------------------------------ + * | Level: 3 | - | 0 | + * ------------------------------------------ + * + * The table roughly translates to : + * + * SL0(PAGE_SIZE, Entry_level) = TGRAN_SL0_BASE - Entry_Level + * + * Where TGRAN_SL0_BASE is a magic number depending on the page size: + * TGRAN_SL0_BASE(4K) = 2 + * TGRAN_SL0_BASE(16K) = 3 + * TGRAN_SL0_BASE(64K) = 3 + * provided we take care of ruling out the unsupported cases and + * Entry_Level = 4 - Number_of_levels. + * */ -#define VTCR_EL2_TGRAN_FLAGS (VTCR_EL2_TG0_64K | VTCR_EL2_SL0_LVL1) -#define VTTBR_X_TGRAN_MAGIC 38 +#ifdef CONFIG_ARM64_64K_PAGES + +#define VTCR_EL2_TGRAN VTCR_EL2_TG0_64K +#define VTCR_EL2_TGRAN_SL0_BASE 3UL + #elif defined(CONFIG_ARM64_16K_PAGES) -/* - * Stage2 translation configuration: - * 16kB pages (TG0 = 2) - * 2 level page tables (SL = 1) - */ -#define VTCR_EL2_TGRAN_FLAGS (VTCR_EL2_TG0_16K | VTCR_EL2_SL0_LVL1) -#define VTTBR_X_TGRAN_MAGIC 42 + +#define VTCR_EL2_TGRAN VTCR_EL2_TG0_16K +#define VTCR_EL2_TGRAN_SL0_BASE 3UL + #else /* 4K */ -/* - * Stage2 translation configuration: - * 4kB pages (TG0 = 0) - * 3 level page tables (SL = 1) - */ -#define VTCR_EL2_TGRAN_FLAGS (VTCR_EL2_TG0_4K | VTCR_EL2_SL0_LVL1) -#define VTTBR_X_TGRAN_MAGIC 37 + +#define VTCR_EL2_TGRAN VTCR_EL2_TG0_4K +#define VTCR_EL2_TGRAN_SL0_BASE 2UL + #endif -#define VTCR_EL2_FLAGS (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN_FLAGS) -#define VTTBR_X (VTTBR_X_TGRAN_MAGIC - VTCR_EL2_T0SZ_IPA) +#define VTCR_EL2_LVLS_TO_SL0(levels) \ + ((VTCR_EL2_TGRAN_SL0_BASE - (4 - (levels))) << VTCR_EL2_SL0_SHIFT) +#define VTCR_EL2_SL0_TO_LVLS(sl0) \ + ((sl0) + 4 - VTCR_EL2_TGRAN_SL0_BASE) +#define VTCR_EL2_LVLS(vtcr) \ + VTCR_EL2_SL0_TO_LVLS(((vtcr) & VTCR_EL2_SL0_MASK) >> VTCR_EL2_SL0_SHIFT) + +#define VTCR_EL2_FLAGS (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN) +#define VTCR_EL2_IPA(vtcr) (64 - ((vtcr) & VTCR_EL2_T0SZ_MASK)) + +/* + * ARM VMSAv8-64 defines an algorithm for finding the translation table + * descriptors in section D4.2.8 in ARM DDI 0487C.a. + * + * The algorithm defines the expectations on the translation table + * addresses for each level, based on PAGE_SIZE, entry level + * and the translation table size (T0SZ). The variable "x" in the + * algorithm determines the alignment of a table base address at a given + * level and thus determines the alignment of VTTBR:BADDR for stage2 + * page table entry level. + * Since the number of bits resolved at the entry level could vary + * depending on the T0SZ, the value of "x" is defined based on a + * Magic constant for a given PAGE_SIZE and Entry Level. The + * intermediate levels must be always aligned to the PAGE_SIZE (i.e, + * x = PAGE_SHIFT). + * + * The value of "x" for entry level is calculated as : + * x = Magic_N - T0SZ + * + * where Magic_N is an integer depending on the page size and the entry + * level of the page table as below: + * + * -------------------------------------------- + * | Entry level | 4K 16K 64K | + * -------------------------------------------- + * | Level: 0 (4 levels) | 28 | - | - | + * -------------------------------------------- + * | Level: 1 (3 levels) | 37 | 31 | 25 | + * -------------------------------------------- + * | Level: 2 (2 levels) | 46 | 42 | 38 | + * -------------------------------------------- + * | Level: 3 (1 level) | - | 53 | 51 | + * -------------------------------------------- + * + * We have a magic formula for the Magic_N below: + * + * Magic_N(PAGE_SIZE, Level) = 64 - ((PAGE_SHIFT - 3) * Number_of_levels) + * + * where Number_of_levels = (4 - Level). We are only interested in the + * value for Entry_Level for the stage2 page table. + * + * So, given that T0SZ = (64 - IPA_SHIFT), we can compute 'x' as follows: + * + * x = (64 - ((PAGE_SHIFT - 3) * Number_of_levels)) - (64 - IPA_SHIFT) + * = IPA_SHIFT - ((PAGE_SHIFT - 3) * Number of levels) + * + * Here is one way to explain the Magic Formula: + * + * x = log2(Size_of_Entry_Level_Table) + * + * Since, we can resolve (PAGE_SHIFT - 3) bits at each level, and another + * PAGE_SHIFT bits in the PTE, we have : + * + * Bits_Entry_level = IPA_SHIFT - ((PAGE_SHIFT - 3) * (n - 1) + PAGE_SHIFT) + * = IPA_SHIFT - (PAGE_SHIFT - 3) * n - 3 + * where n = number of levels, and since each pointer is 8bytes, we have: + * + * x = Bits_Entry_Level + 3 + * = IPA_SHIFT - (PAGE_SHIFT - 3) * n + * + * The only constraint here is that, we have to find the number of page table + * levels for a given IPA size (which we do, see stage2_pt_levels()) + */ +#define ARM64_VTTBR_X(ipa, levels) ((ipa) - ((levels) * (PAGE_SHIFT - 3))) -#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_X) #define VTTBR_VMID_SHIFT (UL(48)) #define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT) @@ -210,6 +301,7 @@ #define FSC_FAULT ESR_ELx_FSC_FAULT #define FSC_ACCESS ESR_ELx_FSC_ACCESS #define FSC_PERM ESR_ELx_FSC_PERM +#define FSC_IGNORE (-1) #define FSC_SEA ESR_ELx_FSC_EXTABT #define FSC_SEA_TTW0 (0x14) #define FSC_SEA_TTW1 (0x15) @@ -223,10 +315,13 @@ /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ #define HPFAR_MASK (~UL(0xf)) - -#define kvm_arm_exception_type \ - {0, "IRQ" }, \ - {1, "TRAP" } +/* + * We have + * PAR [PA_Shift - 1 : 12] = PA [PA_Shift - 1 : 12] + * HPFAR [PA_Shift - 9 : 4] = FIPA [PA_Shift - 1 : 12] + */ +#define PAR_TO_HPFAR(par) \ + (((par) & GENMASK_ULL(PHYS_MASK_SHIFT - 1, 12)) >> 8) #define ECN(x) { ESR_ELx_EC_##x, #x } diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 102b5a5c47b6cb4a00040e7efb295d357b20c8a3..ab8dec4eb3aa834f470a1255db682b0738cbd706 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -25,14 +25,22 @@ #define ARM_EXIT_WITH_SERROR_BIT 31 #define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT)) +#define ARM_EXCEPTION_IS_TRAP(x) (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP) #define ARM_SERROR_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT)) #define ARM_EXCEPTION_IRQ 0 #define ARM_EXCEPTION_EL1_SERROR 1 #define ARM_EXCEPTION_TRAP 2 +#define ARM_EXCEPTION_IL 3 /* The hyp-stub will return this for any kvm_call_hyp() call */ #define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR +#define kvm_arm_exception_type \ + {ARM_EXCEPTION_IRQ, "IRQ" }, \ + {ARM_EXCEPTION_EL1_SERROR, "SERROR" }, \ + {ARM_EXCEPTION_TRAP, "TRAP" }, \ + {ARM_EXCEPTION_HYP_GONE, "HYP_GONE" } + #ifndef __ASSEMBLY__ #include @@ -55,9 +63,9 @@ extern char __kvm_hyp_init_end[]; extern char __kvm_hyp_vector[]; extern void __kvm_flush_vm_context(void); +extern void __kvm_flush_cpu_context(struct kvm_vcpu *vcpu); extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); extern void __kvm_tlb_flush_vmid(struct kvm *kvm); -extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu); extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high); @@ -72,8 +80,6 @@ extern void __vgic_v3_init_lrs(void); extern u32 __kvm_get_mdcr_el2(void); -extern u32 __init_stage2_translation(void); - /* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */ #define __hyp_this_cpu_ptr(sym) \ ({ \ @@ -102,7 +108,8 @@ extern u32 __init_stage2_translation(void); .endm .macro get_host_ctxt reg, tmp - hyp_adr_this_cpu \reg, kvm_host_cpu_state, \tmp + hyp_adr_this_cpu \reg, kvm_host_data, \tmp + add \reg, \reg, #HOST_DATA_CONTEXT .endm .macro get_vcpu_ptr vcpu, ctxt diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 6106a85ae0be70f91f8ad7b64bd7723e236843e5..9ee37c0e763b2f84977a919f333ddf2d2694c94b 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -52,6 +52,8 @@ static inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu) return !(vcpu->arch.hcr_el2 & HCR_RW); } +extern bool kvm_hcr_nofb; + static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) { vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; @@ -76,6 +78,9 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) */ if (!vcpu_el1_is_32bit(vcpu)) vcpu->arch.hcr_el2 |= HCR_TID3; + + if (unlikely(kvm_hcr_nofb)) + vcpu->arch.hcr_el2 &= ~HCR_FB; } static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 3d6d7336f871221fd29bcc3bc4faa2cee0a7765f..6eeaec218d9f7637d35e8d36ee89f493efdc0cd9 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -24,13 +24,16 @@ #include #include +#include #include +#include #include #include #include #include #include #include +#include #define __KVM_HAVE_ARCH_INTC_INITIALIZED @@ -48,6 +51,10 @@ #define KVM_REQ_SLEEP \ KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) +#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) + +#define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \ + KVM_DIRTY_LOG_INITIALLY_SET) DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); @@ -61,11 +68,13 @@ struct kvm_arch { u64 vmid_gen; u32 vmid; - /* 1-level 2nd stage table, protected by kvm->mmu_lock */ + /* stage2 entry level table */ pgd_t *pgd; /* VTTBR value associated with above pgd and vmid */ u64 vttbr; + /* VTCR_EL2 value for this VM */ + u64 vtcr; /* The last vcpu id that ran on each physical CPU */ int __percpu *last_vcpu_ran; @@ -204,7 +213,24 @@ struct kvm_cpu_context { struct kvm_vcpu *__hyp_running_vcpu; }; -typedef struct kvm_cpu_context kvm_cpu_context_t; +struct kvm_pmu_events { + u32 events_host; + u32 events_guest; +}; + +struct kvm_host_data { + struct kvm_cpu_context host_ctxt; + struct kvm_pmu_events pmu_events; +}; + +typedef struct kvm_host_data kvm_host_data_t; + +struct vcpu_reset_state { + unsigned long pc; + unsigned long r0; + bool be; + bool reset; +}; struct kvm_vcpu_arch { struct kvm_cpu_context ctxt; @@ -240,7 +266,7 @@ struct kvm_vcpu_arch { struct kvm_guest_debug_arch external_debug_state; /* Pointer to host CPU context */ - kvm_cpu_context_t *host_cpu_context; + struct kvm_cpu_context *host_cpu_context; struct thread_info *host_thread_info; /* hyp VA */ struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */ @@ -295,9 +321,19 @@ struct kvm_vcpu_arch { /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ u64 vsesr_el2; + /* Additional reset state */ + struct vcpu_reset_state reset_state; + /* True when deferrable sysregs are loaded on the physical CPU, * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */ bool sysregs_loaded_on_cpu; + + /* Guest PV sched state */ + struct { + gpa_t base; + } pvsched; + + struct id_registers idregs; }; /* vcpu_arch flags field values: */ @@ -317,7 +353,7 @@ struct kvm_vcpu_arch { */ #define __vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)]) -u64 vcpu_read_sys_reg(struct kvm_vcpu *vcpu, int reg); +u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg); void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg); /* @@ -332,6 +368,7 @@ struct kvm_vm_stat { }; struct kvm_vcpu_stat { + u64 pid; u64 halt_successful_poll; u64 halt_attempted_poll; u64 halt_poll_invalid; @@ -342,6 +379,26 @@ struct kvm_vcpu_stat { u64 mmio_exit_user; u64 mmio_exit_kernel; u64 exits; + u64 fp_asimd_exit_stat; + u64 irq_exit_stat; + u64 sys64_exit_stat; + u64 mabt_exit_stat; + u64 fail_entry_exit_stat; + u64 internal_error_exit_stat; + u64 unknown_ec_exit_stat; + u64 cp15_32_exit_stat; + u64 cp15_64_exit_stat; + u64 cp14_mr_exit_stat; + u64 cp14_ls_exit_stat; + u64 cp14_64_exit_stat; + u64 smc_exit_stat; + u64 sve_exit_stat; + u64 debug_exit_stat; + u64 steal; + u64 st_max; + u64 utime; + u64 stime; + u64 gtime; }; int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); @@ -381,11 +438,33 @@ void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run, int kvm_perf_init(void); int kvm_perf_teardown(void); +static inline void kvm_arm_pvsched_vcpu_init(struct kvm_vcpu_arch *vcpu_arch) +{ + vcpu_arch->pvsched.base = GPA_INVALID; +} + +static inline bool kvm_arm_is_pvsched_enabled(struct kvm_vcpu_arch *vcpu_arch) +{ + return (vcpu_arch->pvsched.base != GPA_INVALID); +} + +void kvm_update_pvsched_preempted(struct kvm_vcpu *vcpu, u32 preempted); +int kvm_hypercall_pvsched_features(struct kvm_vcpu *vcpu); + void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome); struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); -DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state); +DECLARE_PER_CPU(kvm_host_data_t, kvm_host_data); + +void __kvm_enable_ssbs(void); + +static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt) +{ + /* The host's MPIDR is immutable, so let's set it up at boot time */ + cpu_ctxt->sys_regs[MPIDR_EL1] = read_cpuid_mpidr(); +} + static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr, unsigned long hyp_stack_ptr, @@ -396,8 +475,8 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr, * kernel's mapping to the linear mapping, and store it in tpidr_el2 * so that we can use adr_l to access per-cpu variables in EL2. */ - u64 tpidr_el2 = ((u64)this_cpu_ptr(&kvm_host_cpu_state) - - (u64)kvm_ksym_ref(kvm_host_cpu_state)); + u64 tpidr_el2 = ((u64)this_cpu_ptr(&kvm_host_data) - + (u64)kvm_ksym_ref(kvm_host_data)); /* * Call initialization code, and switch to the full blown HYP code. @@ -407,6 +486,15 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr, */ BUG_ON(!static_branch_likely(&arm64_const_caps_ready)); __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2); + + /* + * Disabling SSBD on a non-VHE system requires us to enable SSBS + * at EL2. + */ + if (!has_vhe() && this_cpu_has_cap(ARM64_SSBS) && + arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) { + kvm_call_hyp(__kvm_enable_ssbs); + } } static inline bool kvm_arch_check_sve_has_vhe(void) @@ -440,13 +528,7 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); -static inline void __cpu_init_stage2(void) -{ - u32 parange = kvm_call_hyp(__init_stage2_translation); - - WARN_ONCE(parange < 40, - "PARange is %d bits, unsupported configuration!", parange); -} +static inline void __cpu_init_stage2(void) {} /* Guest/host FPSIMD coordination helpers */ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu); @@ -454,20 +536,49 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu); +static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr) +{ + return (!has_vhe() && attr->exclude_host); +} + #ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */ static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) { return kvm_arch_vcpu_run_map_fp(vcpu); } + +void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr); +void kvm_clr_pmu_events(u32 clr); + +void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu); +void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu); +#else +static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {} +static inline void kvm_clr_pmu_events(u32 clr) {} #endif static inline void kvm_arm_vhe_guest_enter(void) { local_daif_mask(); + + /* + * Having IRQs masked via PMR when entering the guest means the GIC + * will not signal the CPU of interrupts of lower priority, and the + * only way to get out will be via guest exceptions. + * Naturally, we want to avoid this. + * + * local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a + * dsb to ensure the redistributor is forwards EL2 IRQs to the CPU. + */ + pmr_sync(); } static inline void kvm_arm_vhe_guest_exit(void) { + /* + * local_daif_restore() takes care to properly restore PSTATE.DAIF + * and the GIC PMR if the host is using IRQ priorities. + */ local_daif_restore(DAIF_PROCCTX_NOIRQ); /* @@ -509,8 +620,17 @@ static inline int kvm_arm_have_ssbd(void) void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu); void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu); +void kvm_set_ipa_limit(void); + #define __KVM_HAVE_ARCH_VM_ALLOC struct kvm *kvm_arch_alloc_vm(void); void kvm_arch_free_vm(struct kvm *kvm); +int kvm_arm_config_vm(struct kvm *kvm, unsigned long type); + +static inline enum mitigation_state kvm_arm_get_spectre_bhb_state(void) +{ + return arm64_get_spectre_bhb_state(); +} + #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index 384c343976198dd11ec241b0a958ce855eaba9d7..c7113bfe1a0efb1f62b83448310d6ac33e9b11d0 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -28,7 +28,7 @@ ({ \ u64 reg; \ asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##nvh),\ - "mrs_s %0, " __stringify(r##vh),\ + __mrs_s("%0", r##vh), \ ARM64_HAS_VIRT_HOST_EXTN) \ : "=r" (reg)); \ reg; \ @@ -38,7 +38,7 @@ do { \ u64 __val = (u64)(v); \ asm volatile(ALTERNATIVE("msr " __stringify(r##nvh) ", %x0",\ - "msr_s " __stringify(r##vh) ", %x0",\ + __msr_s(r##vh, "%x0"), \ ARM64_HAS_VIRT_HOST_EXTN) \ : : "rZ" (__val)); \ } while (0) @@ -155,5 +155,15 @@ void deactivate_traps_vhe_put(void); u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt); void __noreturn __hyp_do_panic(unsigned long, ...); +/* + * Must be called from hyp code running at EL2 with an updated VTTBR + * and interrupts disabled. + */ +static __always_inline void __hyp_text __load_guest_stage2(struct kvm *kvm) +{ + write_sysreg(kvm->arch.vtcr, vtcr_el2); + write_sysreg(kvm->arch.vttbr, vttbr_el2); +} + #endif /* __ARM64_KVM_HYP_H__ */ diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index d6fff7de5539f22ffa91202c587e13ee48d7d4de..f8f00c054776983f8c6811a75ffaef54e3bf588f 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -141,8 +141,16 @@ static inline unsigned long __kern_hyp_va(unsigned long v) * We currently only support a 40bit IPA. */ #define KVM_PHYS_SHIFT (40) -#define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT) -#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL) + +#define kvm_phys_shift(kvm) VTCR_EL2_IPA(kvm->arch.vtcr) +#define kvm_phys_size(kvm) (_AC(1, ULL) << kvm_phys_shift(kvm)) +#define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - _AC(1, ULL)) + +static inline bool kvm_page_empty(void *ptr) +{ + struct page *ptr_page = virt_to_page(ptr); + return page_count(ptr_page) == 1; +} #include @@ -176,6 +184,17 @@ void kvm_clear_hyp_idmap(void); #define kvm_mk_pgd(pudp) \ __pgd(__phys_to_pgd_val(__pa(pudp)) | PUD_TYPE_TABLE) +#define kvm_set_pud(pudp, pud) set_pud(pudp, pud) + +#define kvm_pfn_pte(pfn, prot) pfn_pte(pfn, prot) +#define kvm_pfn_pmd(pfn, prot) pfn_pmd(pfn, prot) +#define kvm_pfn_pud(pfn, prot) pfn_pud(pfn, prot) + +#define kvm_pud_pfn(pud) pud_pfn(pud) + +#define kvm_pmd_mkhuge(pmd) pmd_mkhuge(pmd) +#define kvm_pud_mkhuge(pud) pud_mkhuge(pud) + static inline pte_t kvm_s2pte_mkwrite(pte_t pte) { pte_val(pte) |= PTE_S2_RDWR; @@ -188,6 +207,12 @@ static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd) return pmd; } +static inline pud_t kvm_s2pud_mkwrite(pud_t pud) +{ + pud_val(pud) |= PUD_S2_RDWR; + return pud; +} + static inline pte_t kvm_s2pte_mkexec(pte_t pte) { pte_val(pte) &= ~PTE_S2_XN; @@ -200,6 +225,12 @@ static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd) return pmd; } +static inline pud_t kvm_s2pud_mkexec(pud_t pud) +{ + pud_val(pud) &= ~PUD_S2_XN; + return pud; +} + static inline void kvm_set_s2pte_readonly(pte_t *ptep) { pteval_t old_pteval, pteval; @@ -238,10 +269,29 @@ static inline bool kvm_s2pmd_exec(pmd_t *pmdp) return !(READ_ONCE(pmd_val(*pmdp)) & PMD_S2_XN); } -static inline bool kvm_page_empty(void *ptr) +static inline void kvm_set_s2pud_readonly(pud_t *pudp) { - struct page *ptr_page = virt_to_page(ptr); - return page_count(ptr_page) == 1; + kvm_set_s2pte_readonly((pte_t *)pudp); +} + +static inline bool kvm_s2pud_readonly(pud_t *pudp) +{ + return kvm_s2pte_readonly((pte_t *)pudp); +} + +static inline bool kvm_s2pud_exec(pud_t *pudp) +{ + return !(READ_ONCE(pud_val(*pudp)) & PUD_S2_XN); +} + +static inline pud_t kvm_s2pud_mkyoung(pud_t pud) +{ + return pud_mkyoung(pud); +} + +static inline bool kvm_s2pud_young(pud_t pud) +{ + return pud_young(pud); } #define hyp_pte_table_empty(ptep) kvm_page_empty(ptep) @@ -394,6 +444,17 @@ static inline int kvm_read_guest_lock(struct kvm *kvm, return ret; } +static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa, + const void *data, unsigned long len) +{ + int srcu_idx = srcu_read_lock(&kvm->srcu); + int ret = kvm_write_guest(kvm, gpa, data, len); + + srcu_read_unlock(&kvm->srcu, srcu_idx); + + return ret; +} + #ifdef CONFIG_KVM_INDIRECT_VECTORS /* * EL2 vectors can be mapped and rerouted in a number of ways, @@ -427,7 +488,8 @@ static inline void *kvm_get_hyp_vector(void) void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector)); int slot = -1; - if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) { + if ((cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) || + cpus_have_const_cap(ARM64_SPECTRE_BHB)) && data->template_start) { vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs_start)); slot = data->hyp_vectors_slot; } @@ -456,7 +518,8 @@ static inline int kvm_map_vectors(void) * !HBP + HEL2 -> allocate one vector slot and use exec mapping * HBP + HEL2 -> use hardened vertors and use exec mapping */ - if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) { + if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) || + cpus_have_const_cap(ARM64_SPECTRE_BHB)) { __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs_start); __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base); } @@ -517,5 +580,29 @@ static inline int hyp_map_aux_data(void) #define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr) +/* + * Get the magic number 'x' for VTTBR:BADDR of this KVM instance. + * With v8.2 LVA extensions, 'x' should be a minimum of 6 with + * 52bit IPS. + */ +static inline int arm64_vttbr_x(u32 ipa_shift, u32 levels) +{ + int x = ARM64_VTTBR_X(ipa_shift, levels); + + return (IS_ENABLED(CONFIG_ARM64_PA_BITS_52) && x < 6) ? 6 : x; +} + +static inline u64 vttbr_baddr_mask(u32 ipa_shift, u32 levels) +{ + unsigned int x = arm64_vttbr_x(ipa_shift, levels); + + return GENMASK_ULL(PHYS_MASK_SHIFT - 1, x); +} + +static inline u64 kvm_vttbr_baddr_mask(struct kvm *kvm) +{ + return vttbr_baddr_mask(kvm_phys_shift(kvm), kvm_stage2_levels(kvm)); +} + #endif /* __ASSEMBLY__ */ #endif /* __ARM64_KVM_MMU_H__ */ diff --git a/arch/arm64/include/asm/kvm_para.h b/arch/arm64/include/asm/kvm_para.h new file mode 100644 index 0000000000000000000000000000000000000000..e1ecc089ee9b8414f0dfe12658238773e62cb819 --- /dev/null +++ b/arch/arm64/include/asm/kvm_para.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_ARM64_KVM_PARA_H +#define _ASM_ARM64_KVM_PARA_H + +#define KVM_HINTS_REALTIME 0 + +static inline bool kvm_check_and_clear_guest_paused(void) +{ + return false; +} + +static inline bool kvm_para_available(void) +{ + return false; +} + +static inline unsigned int kvm_arch_para_features(void) +{ + return 0; +} + +static inline unsigned int kvm_arch_para_hints(void) +{ + return 0; +} + +#endif /* _ASM_ARM64_KVM_PARA_H */ diff --git a/arch/arm64/include/asm/livepatch.h b/arch/arm64/include/asm/livepatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5817b1e6aef5ffc401357a8c41e92c76ac5f472d --- /dev/null +++ b/arch/arm64/include/asm/livepatch.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2014-2019, Huawei. + * Author: Li Bin + * Author: Cheng Jian + * + * livepatch.h - arm64-specific Kernel Live Patching Core + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef _ASM_ARM64_LIVEPATCH_H +#define _ASM_ARM64_LIVEPATCH_H + +#include +#include + + +#ifdef CONFIG_LIVEPATCH + +struct klp_patch; +struct klp_func; + +#define klp_smp_isb() isb() + +static inline int klp_check_compiler_support(void) +{ + return 0; +} + +int arch_klp_patch_func(struct klp_func *func); +void arch_klp_unpatch_func(struct klp_func *func); +int klp_check_calltrace(struct klp_patch *patch, int enable); +#else +#error Live patching support is disabled; check CONFIG_LIVEPATCH +#endif + +#endif /* _ASM_ARM64_LIVEPATCH_H */ diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h index 8262325e2fc66ec3d42f83ae6621ea4ec7e19651..13536c4da2c26116d74cea10739ed030334bf2b2 100644 --- a/arch/arm64/include/asm/lse.h +++ b/arch/arm64/include/asm/lse.h @@ -4,6 +4,8 @@ #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS) +#define __LSE_PREAMBLE ".arch_extension lse\n" + #include #include #include @@ -20,8 +22,6 @@ #else /* __ASSEMBLER__ */ -__asm__(".arch_extension lse"); - /* Move the ll/sc atomics out-of-line */ #define __LL_SC_INLINE notrace #define __LL_SC_PREFIX(x) __ll_sc_##x @@ -33,7 +33,7 @@ __asm__(".arch_extension lse"); /* In-line patching at runtime */ #define ARM64_LSE_ATOMIC_INSN(llsc, lse) \ - ALTERNATIVE(llsc, lse, ARM64_HAS_LSE_ATOMICS) + ALTERNATIVE(llsc, __LSE_PREAMBLE lse, ARM64_HAS_LSE_ATOMICS) #endif /* __ASSEMBLER__ */ #else /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */ diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index b96442960aead1692e94d28dd35d33e7954d84b3..eeaf48163f7b5fcf02d11dadce48b176be620ac3 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -76,7 +76,8 @@ /* * KASAN requires 1/8th of the kernel virtual address space for the shadow * region. KASAN can bloat the stack significantly, so double the (minimum) - * stack size when KASAN is in use. + * stack size when KASAN is in use, and then double it again if KASAN_EXTRA is + * on. */ #ifdef CONFIG_KASAN #define KASAN_SHADOW_SCALE_SHIFT 3 @@ -191,10 +192,12 @@ extern u64 kimage_vaddr; /* the offset between the kernel virtual and physical mappings */ extern u64 kimage_voffset; +#ifndef __ILP32__ static inline unsigned long kaslr_offset(void) { return kimage_vaddr - KIMAGE_VADDR; } +#endif /* * Allow all memory at the discovery stage. We will clip it later. @@ -254,6 +257,7 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x); #define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) #define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys))) +#ifndef __ILP32__ /* * Note: Drivers should NOT use these. They are the wrong * translation for translating DMA addresses. Use the driver @@ -270,6 +274,7 @@ static inline void *phys_to_virt(phys_addr_t x) { return (void *)(__phys_to_virt(x)); } +#endif /* * Drivers should NOT use these either. @@ -307,6 +312,17 @@ static inline void *phys_to_virt(phys_addr_t x) #define virt_addr_valid(kaddr) (_virt_addr_is_linear(kaddr) && \ _virt_addr_valid(kaddr)) +/* + * Given that the GIC architecture permits ITS implementations that can only be + * configured with a LPI table address once, GICv3 systems with many CPUs may + * end up reserving a lot of different regions after a kexec for their LPI + * tables (one per CPU), as we are forced to reuse the same memory after kexec + * (and thus reserve it persistently with EFI beforehand) + */ +#if defined(CONFIG_EFI) && defined(CONFIG_ARM_GIC_V3_ITS) +# define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + NR_CPUS + 1) +#endif + #include #endif diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index dd320df0d026900d10c26f523a87c91b81a53569..fac1a43f4ac36ac20ec25fd04a80332987643153 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -27,10 +27,22 @@ typedef struct { atomic64_t id; + unsigned long pinned; void *vdso; + unsigned long refcount; unsigned long flags; } mm_context_t; +#define MAX_RES_REGIONS 32 + +struct res_mem { + phys_addr_t base; + phys_addr_t size; +}; + +extern struct res_mem res_mem[MAX_RES_REGIONS]; +extern int res_mem_count; + /* * This macro is only used by the TLBI code, which cannot race with an * ASID change and therefore doesn't need to reload the counter using @@ -38,7 +50,7 @@ typedef struct { */ #define ASID(mm) ((mm)->context.id.counter & 0xffff) -static inline bool arm64_kernel_unmapped_at_el0(void) +static __always_inline bool arm64_kernel_unmapped_at_el0(void) { return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) && cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0); @@ -49,6 +61,12 @@ typedef void (*bp_hardening_cb_t)(void); struct bp_hardening_data { int hyp_vectors_slot; bp_hardening_cb_t fn; + + /* + * template_start is only used by the BHB mitigation to identify the + * hyp_vectors_slot sequence. + */ + const char *template_start; }; #if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) || \ diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 39ec0b8a689eea3e495029685bed047737d64c5e..04a7700109a8869d81fa74aaa7207f9d0307f825 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -168,7 +168,13 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp) #define destroy_context(mm) do { } while(0) void check_and_switch_context(struct mm_struct *mm, unsigned int cpu); -#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; }) +static inline int +init_new_context(struct task_struct *tsk, struct mm_struct *mm) +{ + atomic64_set(&mm->context.id, 0); + mm->context.pinned = 0; + return 0; +} #ifdef CONFIG_ARM64_SW_TTBR0_PAN static inline void update_saved_ttbr0(struct task_struct *tsk, @@ -223,8 +229,17 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { - if (prev != next) + unsigned int __maybe_unused cpu = smp_processor_id(); + + if (prev != next) { __switch_mm(next); +#ifdef CONFIG_ARM64_TLBI_IPI + if (unlikely(test_tlbi_ipi_switch())) { + cpumask_clear_cpu(cpu, mm_cpumask(prev)); + local_flush_tlb_mm(prev); + } +#endif + } /* * Update the saved TTBR0_EL1 of the scheduled-in task as the previous @@ -238,9 +253,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, #define deactivate_mm(tsk,mm) do { } while (0) #define activate_mm(prev,next) switch_mm(prev, next, current) +unsigned long mm_context_get(struct mm_struct *mm); +void mm_context_put(struct mm_struct *mm); + void verify_cpu_asid_bits(void); void post_ttbr_update_workaround(void); +unsigned long mm_context_get(struct mm_struct *mm); +void mm_context_put(struct mm_struct *mm); + #endif /* !__ASSEMBLY__ */ #endif /* !__ASM_MMU_CONTEXT_H */ diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h index 97d0ef12e2ff561938acdf96196a1a213664c522..b14c1beb98c31b1bb08361d02dc5f3804d7ea8e7 100644 --- a/arch/arm64/include/asm/module.h +++ b/arch/arm64/include/asm/module.h @@ -33,6 +33,11 @@ struct mod_arch_specific { /* for CONFIG_DYNAMIC_FTRACE */ struct plt_entry *ftrace_trampoline; + +#ifdef CONFIG_LIVEPATCH + struct plt_entry *core_plts; + bool have_plts; +#endif }; #endif @@ -41,6 +46,9 @@ u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela, u64 module_emit_veneer_for_adrp(struct module *mod, void *loc, u64 val); +u64 livepatch_emit_plt_entry(struct module *mod, void *loc, + const Elf64_Rela *rela, Elf64_Sym *sym); + #ifdef CONFIG_RANDOMIZE_BASE extern u64 module_alloc_base; #else diff --git a/arch/arm64/include/asm/mpam.h b/arch/arm64/include/asm/mpam.h new file mode 100644 index 0000000000000000000000000000000000000000..6338eab817e752cc15353541df5a5d4360b20f13 --- /dev/null +++ b/arch/arm64/include/asm/mpam.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_ARM64_MPAM_H +#define _ASM_ARM64_MPAM_H + +#ifdef CONFIG_MPAM +extern int mpam_rmid_to_partid_pmg(int rmid, int *partid, int *pmg); +#endif + +#endif /* _ASM_ARM64_MPAM_H */ diff --git a/arch/arm64/include/asm/mpam_sched.h b/arch/arm64/include/asm/mpam_sched.h new file mode 100644 index 0000000000000000000000000000000000000000..08ed349b6efa1a7a345500e16d301883f0a8a47e --- /dev/null +++ b/arch/arm64/include/asm/mpam_sched.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_ARM64_MPAM_SCHED_H +#define _ASM_ARM64_MPAM_SCHED_H + +#ifdef CONFIG_MPAM + +#include +#include + +/** + * struct intel_pqr_state - State cache for the PQR MSR + * @cur_rmid: The cached Resource Monitoring ID + * @cur_closid: The cached Class Of Service ID + * @default_rmid: The user assigned Resource Monitoring ID + * @default_closid: The user assigned cached Class Of Service ID + * + * The upper 32 bits of IA32_PQR_ASSOC contain closid and the + * lower 10 bits rmid. The update to IA32_PQR_ASSOC always + * contains both parts, so we need to cache them. This also + * stores the user configured per cpu CLOSID and RMID. + * + * The cache also helps to avoid pointless updates if the value does + * not change. + */ +struct intel_pqr_state { + u32 cur_rmid; + u32 cur_closid; + u32 default_rmid; + u32 default_closid; +}; + +DECLARE_PER_CPU(struct intel_pqr_state, pqr_state); + +extern void __mpam_sched_in(void); +DECLARE_STATIC_KEY_FALSE(resctrl_enable_key); + +static inline void mpam_sched_in(void) +{ + if (static_branch_likely(&resctrl_enable_key)) + __mpam_sched_in(); +} + +#else + +static inline void mpam_sched_in(void) {} + +#endif /* CONFIG_MPAM */ + +#endif diff --git a/arch/arm64/include/asm/neon-intrinsics.h b/arch/arm64/include/asm/neon-intrinsics.h new file mode 100644 index 0000000000000000000000000000000000000000..71abfc7612b2ff59f3abde8064e56ebf2434cbd2 --- /dev/null +++ b/arch/arm64/include/asm/neon-intrinsics.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2018 Linaro, Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_NEON_INTRINSICS_H +#define __ASM_NEON_INTRINSICS_H + +#include + +/* + * In the kernel, u64/s64 are [un]signed long long, not [un]signed long. + * So by redefining these macros to the former, we can force gcc-stdint.h + * to define uint64_t / in64_t in a compatible manner. + */ + +#ifdef __INT64_TYPE__ +#undef __INT64_TYPE__ +#define __INT64_TYPE__ long long +#endif + +#ifdef __UINT64_TYPE__ +#undef __UINT64_TYPE__ +#define __UINT64_TYPE__ unsigned long long +#endif + +/* + * genksyms chokes on the ARM NEON instrinsics system header, but we + * don't export anything it defines anyway, so just disregard when + * genksyms execute. + */ +#ifndef __GENKSYMS__ +#include +#endif + +#ifdef CONFIG_CC_IS_CLANG +#pragma clang diagnostic ignored "-Wincompatible-pointer-types" +#endif + +#endif /* __ASM_NEON_INTRINSICS_H */ diff --git a/arch/arm64/include/asm/nmi.h b/arch/arm64/include/asm/nmi.h new file mode 100644 index 0000000000000000000000000000000000000000..b9258085ac134c7fe1fc1b4815be49bc5b5d0410 --- /dev/null +++ b/arch/arm64/include/asm/nmi.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#if !defined(CONFIG_HARDLOCKUP_DETECTOR) +static inline void arch_touch_nmi_watchdog(void) { } +#endif diff --git a/arch/arm64/include/asm/numa.h b/arch/arm64/include/asm/numa.h index 626ad01e83bf01a947c2bd9e087c2501338a4bec..43bfff72a32f1527c4d9ca45c3b07974051b6e3b 100644 --- a/arch/arm64/include/asm/numa.h +++ b/arch/arm64/include/asm/numa.h @@ -19,12 +19,18 @@ extern bool numa_off; extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; void numa_clear_node(unsigned int cpu); +#ifdef CONFIG_COHERENT_DEVICE +extern nodemask_t cdmmask; +#endif #ifdef CONFIG_DEBUG_PER_CPU_MAPS const struct cpumask *cpumask_of_node(int node); #else /* Returns a pointer to the cpumask of CPUs on Node 'node'. */ static inline const struct cpumask *cpumask_of_node(int node) { + if (node == NUMA_NO_NODE) + return cpu_all_mask; + return node_to_cpumask_map[node]; } #endif @@ -42,6 +48,7 @@ void numa_remove_cpu(unsigned int cpu); static inline void numa_store_cpu_info(unsigned int cpu) { } static inline void numa_add_cpu(unsigned int cpu) { } +static inline void numa_clear_node(unsigned int cpu) { } static inline void numa_remove_cpu(unsigned int cpu) { } static inline void arm64_numa_init(void) { } static inline void early_map_cpu_to_node(unsigned int cpu, int nid) { } diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index 60d02c81a3a2c02a6d41de1721a529a4881b507e..725bff5bc0ad365688dc158a8d7b95b52f66e9e7 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -35,6 +35,14 @@ extern void clear_page(void *to); #define clear_user_page(addr,vaddr,pg) __cpu_clear_user_page(addr, vaddr) #define copy_user_page(to,from,vaddr,pg) __cpu_copy_user_page(to, from, vaddr) +#ifdef CONFIG_UCE_KERNEL_RECOVERY +extern int copy_page_cow(void *to, const void *from); +extern int __cpu_copy_user_page_cow(void *to, const void *from, + unsigned long user); +#define copy_user_page_cow(to, from, vaddr, pg) \ + __cpu_copy_user_page_cow(to, from, vaddr) +#endif + typedef struct page *pgtable_t; #ifdef CONFIG_HAVE_ARCH_PFN_VALID diff --git a/arch/arm64/include/asm/paravirt.h b/arch/arm64/include/asm/paravirt.h index bb5dcea42003d64dccfe9e6bb11dd0ff9586562d..68b25d0eefe1f96fca3f9294d6a1f763924d997b 100644 --- a/arch/arm64/include/asm/paravirt.h +++ b/arch/arm64/include/asm/paravirt.h @@ -10,12 +10,35 @@ extern struct static_key paravirt_steal_rq_enabled; struct pv_time_ops { unsigned long long (*steal_clock)(int cpu); }; + +struct pv_sched_ops { + bool (*vcpu_is_preempted)(int cpu); +}; + +struct paravirt_patch_template { + struct pv_sched_ops sched; +}; + extern struct pv_time_ops pv_time_ops; +extern struct paravirt_patch_template pv_ops; static inline u64 paravirt_steal_clock(int cpu) { return pv_time_ops.steal_clock(cpu); } -#endif + +int __init pv_sched_init(void); + +__visible bool __native_vcpu_is_preempted(int cpu); +static inline bool pv_vcpu_is_preempted(int cpu) +{ + return pv_ops.sched.vcpu_is_preempted(cpu); +} + +#else + +#define pv_sched_init() do {} while (0) + +#endif /* CONFIG_PARAVIRT */ #endif diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h index 9234013e759e56a9ebd5c326cab49bd7c66df323..21a81b59a0ccd5419be92ec6e661a3e05e5820ff 100644 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h @@ -96,6 +96,7 @@ static inline unsigned long __percpu_##op(void *ptr, \ : [val] "Ir" (val)); \ break; \ default: \ + ret = 0; \ BUILD_BUG(); \ } \ \ @@ -125,6 +126,7 @@ static inline unsigned long __percpu_read(void *ptr, int size) ret = READ_ONCE(*(u64 *)ptr); break; default: + ret = 0; BUILD_BUG(); } @@ -194,6 +196,7 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val, : [val] "r" (val)); break; default: + ret = 0; BUILD_BUG(); } diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index fd208eac9f2a92bc8a09bb66c2f2ae19226aedf4..e327665e94d14e2315177e25a022397e0452ff16 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -193,6 +193,10 @@ #define PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */ #define PMD_S2_XN (_AT(pmdval_t, 2) << 53) /* XN[1:0] */ +#define PUD_S2_RDONLY (_AT(pudval_t, 1) << 6) /* HAP[2:1] */ +#define PUD_S2_RDWR (_AT(pudval_t, 3) << 6) /* HAP[2:1] */ +#define PUD_S2_XN (_AT(pudval_t, 2) << 53) /* XN[1:0] */ + /* * Memory Attribute override for Stage-2 (MemAttr[3:0]) */ diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 78b942c1bea437c5a0e9124b86afd4c48503d999..5be015e2133a282e5d508d8df72f25e1dc62c4c1 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -43,11 +43,11 @@ #define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG) #define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG) -#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE)) -#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE)) -#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC)) -#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT)) -#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL)) +#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE)) +#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE)) +#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC)) +#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT)) +#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL)) #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE)) #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) @@ -91,17 +91,17 @@ #define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PAGE_S2_MEMATTR(DEVICE_nGnRE) | PTE_S2_RDONLY | PAGE_S2_XN) #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) -#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) -#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) +/* shared+writable pages are clean by default, hence PTE_RDONLY|PTE_WRITE */ +#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) +#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE) #define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) #define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN) -#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN) #define __P000 PAGE_NONE #define __P001 PAGE_READONLY #define __P010 PAGE_READONLY #define __P011 PAGE_READONLY -#define __P100 PAGE_EXECONLY +#define __P100 PAGE_READONLY_EXEC #define __P101 PAGE_READONLY_EXEC #define __P110 PAGE_READONLY_EXEC #define __P111 PAGE_READONLY_EXEC @@ -110,7 +110,7 @@ #define __S001 PAGE_READONLY #define __S010 PAGE_SHARED #define __S011 PAGE_SHARED -#define __S100 PAGE_EXECONLY +#define __S100 PAGE_READONLY_EXEC #define __S101 PAGE_READONLY_EXEC #define __S110 PAGE_SHARED_EXEC #define __S111 PAGE_SHARED_EXEC diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 1bdeca8918a684814f84ca3841b88a3123749cbb..69b395670ac1d27ba5b26610cf48126e25e2cbb5 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -22,6 +22,7 @@ #include #include #include +#include /* * VMALLOC range. @@ -105,14 +106,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) -/* - * Execute-only user mappings do not have the PTE_USER bit set. All valid - * kernel mappings have the PTE_UXN bit set. - */ #define pte_valid_not_user(pte) \ - ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN)) -#define pte_valid_young(pte) \ - ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF)) + ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID) #define pte_valid_user(pte) \ ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) @@ -120,14 +115,17 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; * Could the pte be present in the TLB? We must check mm_tlb_flush_pending * so that we don't erroneously return false for pages that have been * remapped as PROT_NONE but are yet to be flushed from the TLB. + * Note that we can't make any assumptions based on the state of the access + * flag, since ptep_clear_flush_young() elides a DSB when invalidating the + * TLB. */ #define pte_accessible(mm, pte) \ - (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte)) + (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte)) /* * p??_access_permitted() is true for valid user mappings (subject to the - * write permission check) other than user execute-only which do not have the - * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set. + * write permission check). PROT_NONE mappings do not have the PTE_VALID bit + * set. */ #define pte_access_permitted(pte, write) \ (pte_valid_user(pte) && (!(write) || pte_write(pte))) @@ -148,13 +146,6 @@ static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot) return pte; } -static inline pte_t pte_wrprotect(pte_t pte) -{ - pte = clear_pte_bit(pte, __pgprot(PTE_WRITE)); - pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); - return pte; -} - static inline pte_t pte_mkwrite(pte_t pte) { pte = set_pte_bit(pte, __pgprot(PTE_WRITE)); @@ -180,6 +171,20 @@ static inline pte_t pte_mkdirty(pte_t pte) return pte; } +static inline pte_t pte_wrprotect(pte_t pte) +{ + /* + * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY + * clear), set the PTE_DIRTY bit. + */ + if (pte_hw_dirty(pte)) + pte = pte_mkdirty(pte); + + pte = clear_pte_bit(pte, __pgprot(PTE_WRITE)); + pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); + return pte; +} + static inline pte_t pte_mkold(pte_t pte) { return clear_pte_bit(pte, __pgprot(PTE_AF)); @@ -224,8 +229,10 @@ static inline void set_pte(pte_t *ptep, pte_t pte) * Only if the new pte is valid and kernel, otherwise TLB maintenance * or update_mmu_cache() have the necessary barriers. */ - if (pte_valid_not_user(pte)) + if (pte_valid_not_user(pte)) { dsb(ishst); + isb(); + } } extern void __sync_icache_dcache(pte_t pteval); @@ -272,23 +279,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, set_pte(ptep, pte); } -#define __HAVE_ARCH_PTE_SAME -static inline int pte_same(pte_t pte_a, pte_t pte_b) -{ - pteval_t lhs, rhs; - - lhs = pte_val(pte_a); - rhs = pte_val(pte_b); - - if (pte_present(pte_a)) - lhs &= ~PTE_RDONLY; - - if (pte_present(pte_b)) - rhs &= ~PTE_RDONLY; - - return (lhs == rhs); -} - /* * Huge pte definitions. */ @@ -314,6 +304,11 @@ static inline pte_t pud_pte(pud_t pud) return __pte(pud_val(pud)); } +static inline pud_t pte_pud(pte_t pte) +{ + return __pud(pte_val(pte)); +} + static inline pmd_t pud_pmd(pud_t pud) { return __pmd(pud_val(pud)); @@ -360,6 +355,7 @@ static inline int pmd_protnone(pmd_t pmd) #define pmd_present(pmd) pte_present(pmd_pte(pmd)) #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) #define pmd_young(pmd) pte_young(pmd_pte(pmd)) +#define pmd_valid(pmd) pte_valid(pmd_pte(pmd)) #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) @@ -380,8 +376,12 @@ static inline int pmd_protnone(pmd_t pmd) #define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) +#define pud_young(pud) pte_young(pud_pte(pud)) +#define pud_mkyoung(pud) pte_pud(pte_mkyoung(pud_pte(pud))) #define pud_write(pud) pte_write(pud_pte(pud)) +#define pud_mkhuge(pud) (__pud(pud_val(pud) & ~PUD_TABLE_BIT)) + #define __pud_to_phys(pud) __pte_to_phys(pud_pte(pud)) #define __phys_to_pud_val(phys) __phys_to_pte_val(phys) #define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT) @@ -417,10 +417,11 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, PMD_TYPE_TABLE) #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ PMD_TYPE_SECT) +#define pmd_leaf(pmd) pmd_sect(pmd) #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3 -#define pud_sect(pud) (0) -#define pud_table(pud) (1) +static inline bool pud_sect(pud_t pud) { return false; } +static inline bool pud_table(pud_t pud) { return true; } #else #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ PUD_TYPE_SECT) @@ -431,7 +432,11 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) { WRITE_ONCE(*pmdp, pmd); - dsb(ishst); + + if (pmd_valid(pmd)) { + dsb(ishst); + isb(); + } } static inline void pmd_clear(pmd_t *pmdp) @@ -444,6 +449,8 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd) return __pmd_to_phys(pmd); } +static inline void pte_unmap(pte_t *pte) { } + /* Find an entry in the third-level page table. */ #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) @@ -452,7 +459,6 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd) #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) -#define pte_unmap(pte) do { } while (0) #define pte_unmap_nested(pte) do { } while (0) #define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr)) @@ -477,11 +483,17 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd) #define pud_none(pud) (!pud_val(pud)) #define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT)) #define pud_present(pud) pte_present(pud_pte(pud)) +#define pud_leaf(pud) pud_sect(pud) +#define pud_valid(pud) pte_valid(pud_pte(pud)) static inline void set_pud(pud_t *pudp, pud_t pud) { WRITE_ONCE(*pudp, pud); - dsb(ishst); + + if (pud_valid(pud)) { + dsb(ishst); + isb(); + } } static inline void pud_clear(pud_t *pudp) @@ -534,6 +546,7 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) { WRITE_ONCE(*pgdp, pgd); dsb(ishst); + isb(); } static inline void pgd_clear(pgd_t *pgdp) @@ -597,6 +610,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) if (pte_hw_dirty(pte)) pte = pte_mkdirty(pte); pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); + /* + * If we end up clearing hw dirtiness for a sw-dirty PTE, set hardware + * dirtiness again. + */ + if (pte_sw_dirty(pte)) + pte = pte_mkdirty(pte); return pte; } @@ -646,6 +665,27 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, return __ptep_test_and_clear_young(ptep); } +#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH +static inline int ptep_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep) +{ + int young = ptep_test_and_clear_young(vma, address, ptep); + + if (young) { + /* + * We can elide the trailing DSB here since the worst that can + * happen is that a CPU continues to use the young entry in its + * TLB and we mistakenly reclaim the associated page. The + * window for such an event is bounded by the next + * context-switch, which provides a DSB to complete the TLB + * invalidation. + */ + flush_tlb_page_nosync(vma, address); + } + + return young; +} + #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, @@ -684,12 +724,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres pte = READ_ONCE(*ptep); do { old_pte = pte; - /* - * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY - * clear), set the PTE_DIRTY bit. - */ - if (pte_hw_dirty(pte)) - pte = pte_mkdirty(pte); pte = pte_wrprotect(pte); pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), pte_val(old_pte), pte_val(pte)); diff --git a/arch/arm64/include/asm/phytium_machine_types.h b/arch/arm64/include/asm/phytium_machine_types.h new file mode 100644 index 0000000000000000000000000000000000000000..fb791988f0cee34adb4f5f34032c43305f991ec2 --- /dev/null +++ b/arch/arm64/include/asm/phytium_machine_types.h @@ -0,0 +1,37 @@ +/* + * Authors: Wang Yinfeng + * + * Copyright (C) 2021, PHYTIUM Information Technology Co., Ltd. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#ifndef __PHYTIUM_MACHINE_TYPES_H__ +#define __PHYTIUM_MACHINE_TYPES_H__ + +#include +#include + +static inline bool phytium_part(u32 cpuid) +{ + return ((read_cpuid_id() & MIDR_CPU_MODEL_MASK) == cpuid); +} + +#define typeof_ft1500a() phytium_part(MIDR_FT_1500A) +#define typeof_ft2000ahk() phytium_part(MIDR_FT_2000AHK) +#define typeof_ft2000plus() phytium_part(MIDR_FT_2000PLUS) +#define typeof_ft2004() phytium_part(MIDR_FT_2004) +#define typeof_s2500() phytium_part(MIDR_FT_2500) + +#endif diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 79657ad91397fb0017ffd2e5d3390c8a2efa13c3..7695a5117ff20d07ae97b827c1fa895d61e1ecec 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -24,6 +24,14 @@ #define KERNEL_DS UL(-1) #define USER_DS (TASK_SIZE_64 - 1) +/* + * On arm64 systems, unaligned accesses by the CPU are cheap, and so there is + * no point in shifting all network buffers by 2 bytes just to make some IP + * header fields appear aligned in memory, potentially sacrificing some DMA + * performance on some platforms. + */ +#define NET_IP_ALIGN 0 + #ifndef __ASSEMBLY__ /* @@ -42,6 +50,7 @@ #include #include +#include #include #include #include @@ -53,10 +62,18 @@ * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area. */ #ifdef CONFIG_COMPAT +#ifdef CONFIG_ARM64_64K_PAGES +/* + * With CONFIG_ARM64_64K_PAGES enabled, the last page is occupied + * by the compat vectors page. + */ #define TASK_SIZE_32 UL(0x100000000) -#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ +#else +#define TASK_SIZE_32 (UL(0x100000000) - PAGE_SIZE) +#endif /* CONFIG_ARM64_64K_PAGES */ +#define TASK_SIZE (is_compat_task() ? \ TASK_SIZE_32 : TASK_SIZE_64) -#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \ +#define TASK_SIZE_OF(tsk) (is_compat_thread(tsk) ? \ TASK_SIZE_32 : TASK_SIZE_64) #else #define TASK_SIZE TASK_SIZE_64 @@ -67,7 +84,7 @@ #define STACK_TOP_MAX TASK_SIZE_64 #ifdef CONFIG_COMPAT #define AARCH32_VECTORS_BASE 0xffff0000 -#define STACK_TOP (test_thread_flag(TIF_32BIT) ? \ +#define STACK_TOP (is_compat_task() ? \ AARCH32_VECTORS_BASE : STACK_TOP_MAX) #else #define STACK_TOP STACK_TOP_MAX @@ -141,11 +158,11 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset, *size = sizeof_field(struct thread_struct, uw); } -#ifdef CONFIG_COMPAT +#ifdef CONFIG_AARCH32_EL0 #define task_user_tls(t) \ ({ \ unsigned long *__tls; \ - if (is_compat_thread(task_thread_info(t))) \ + if (is_a32_compat_thread(task_thread_info(t))) \ __tls = &(t)->thread.uw.tp2_value; \ else \ __tls = &(t)->thread.uw.tp_value; \ @@ -167,6 +184,19 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc) memset(regs, 0, sizeof(*regs)); forget_syscall(regs); regs->pc = pc; + + if (system_uses_irq_prio_masking()) + regs->pmr_save = GIC_PRIO_IRQON; +} + +static inline void set_ssbs_bit(struct pt_regs *regs) +{ + regs->pstate |= PSR_SSBS_BIT; +} + +static inline void set_compat_ssbs_bit(struct pt_regs *regs) +{ + regs->pstate |= PSR_AA32_SSBS_BIT; } static inline void start_thread(struct pt_regs *regs, unsigned long pc, @@ -174,10 +204,14 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc, { start_thread_common(regs, pc); regs->pstate = PSR_MODE_EL0t; + + if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE) + set_ssbs_bit(regs); + regs->sp = sp; } -#ifdef CONFIG_COMPAT +#ifdef CONFIG_AARCH32_EL0 static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) { @@ -190,6 +224,9 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, regs->pstate |= PSR_AA32_E_BIT; #endif + if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE) + set_compat_ssbs_bit(regs); + regs->compat_sp = sp; } #endif @@ -244,10 +281,6 @@ static inline void spin_lock_prefetch(const void *ptr) #endif -void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused); -void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused); -void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused); - extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */ extern void __init minsigstksz_setup(void); diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 177b851ca6d997741580e73c89e448f274ac3ba7..f1662df255caf45d024b429055dc48eee2e90cb4 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -19,12 +19,35 @@ #ifndef __ASM_PTRACE_H #define __ASM_PTRACE_H +#include + #include /* Current Exception Level values, as contained in CurrentEL */ #define CurrentEL_EL1 (1 << 2) #define CurrentEL_EL2 (2 << 2) +/* + * PMR values used to mask/unmask interrupts. + * + * GIC priority masking works as follows: if an IRQ's priority is a higher value + * than the value held in PMR, that IRQ is masked. Lowering the value of PMR + * means masking more IRQs (or at least that the same IRQs remain masked). + * + * To mask interrupts, we clear the most significant bit of PMR. + * + * Some code sections either automatically switch back to PSR.I or explicitly + * require to not use priority masking. If bit GIC_PRIO_PSR_I_SET is included + * in the the priority mask, it indicates that PSR.I should be set and + * interrupt disabling temporarily does not rely on IRQ priorities. + */ +#define GIC_PRIO_IRQON 0xe0 +#define GIC_PRIO_IRQOFF (GIC_PRIO_IRQON & ~0x80) +#define GIC_PRIO_PSR_I_SET (1 << 4) + +/* Additional SPSR bits not exposed in the UABI */ +#define PSR_IL_BIT (1 << 20) + /* AArch32-specific ptrace requests */ #define COMPAT_PTRACE_GETREGS 12 #define COMPAT_PTRACE_SETREGS 13 @@ -50,6 +73,7 @@ #define PSR_AA32_I_BIT 0x00000080 #define PSR_AA32_A_BIT 0x00000100 #define PSR_AA32_E_BIT 0x00000200 +#define PSR_AA32_SSBS_BIT 0x00800000 #define PSR_AA32_DIT_BIT 0x01000000 #define PSR_AA32_Q_BIT 0x08000000 #define PSR_AA32_V_BIT 0x10000000 @@ -163,7 +187,8 @@ struct pt_regs { #endif u64 orig_addr_limit; - u64 unused; // maintain 16 byte alignment + /* Only valid when ARM64_HAS_IRQ_PRIO_MASKING is enabled. */ + u64 pmr_save; u64 stackframe[2]; }; @@ -181,34 +206,43 @@ static inline void forget_syscall(struct pt_regs *regs) #define arch_has_single_step() (1) -#ifdef CONFIG_COMPAT -#define compat_thumb_mode(regs) \ +#ifdef CONFIG_AARCH32_EL0 +#define a32_thumb_mode(regs) \ (((regs)->pstate & PSR_AA32_T_BIT)) #else -#define compat_thumb_mode(regs) (0) +#define a32_thumb_mode(regs) (0) #endif +#define compat_thumb_mode(regs) a32_thumb_mode(regs) + #define user_mode(regs) \ (((regs)->pstate & PSR_MODE_MASK) == PSR_MODE_EL0t) -#define compat_user_mode(regs) \ +#define a32_user_mode(regs) \ (((regs)->pstate & (PSR_MODE32_BIT | PSR_MODE_MASK)) == \ (PSR_MODE32_BIT | PSR_MODE_EL0t)) +#define compat_user_mode(regs) a32_user_mode(regs) + #define processor_mode(regs) \ ((regs)->pstate & PSR_MODE_MASK) -#define interrupts_enabled(regs) \ - (!((regs)->pstate & PSR_I_BIT)) +#define irqs_priority_unmasked(regs) \ + (system_uses_irq_prio_masking() ? \ + (regs)->pmr_save == GIC_PRIO_IRQON : \ + true) + +#define interrupts_enabled(regs) \ + (!((regs)->pstate & PSR_I_BIT) && irqs_priority_unmasked(regs)) #define fast_interrupts_enabled(regs) \ (!((regs)->pstate & PSR_F_BIT)) #define GET_USP(regs) \ - (!compat_user_mode(regs) ? (regs)->sp : (regs)->compat_sp) + (!a32_user_mode(regs) ? (regs)->sp : (regs)->compat_sp) #define SET_USP(ptregs, value) \ - (!compat_user_mode(regs) ? ((regs)->sp = value) : ((regs)->compat_sp = value)) + (!a32_user_mode(regs) ? ((regs)->sp = value) : ((regs)->compat_sp = value)) extern int regs_query_register_offset(const char *name); extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, diff --git a/arch/arm64/include/asm/pvsched-abi.h b/arch/arm64/include/asm/pvsched-abi.h new file mode 100644 index 0000000000000000000000000000000000000000..80e50e7a1a3179eb4104262cd0cf18dd1cd603ec --- /dev/null +++ b/arch/arm64/include/asm/pvsched-abi.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright(c) 2019 Huawei Technologies Co., Ltd + * Author: Zengruan Ye + */ + +#ifndef __ASM_PVSCHED_ABI_H +#define __ASM_PVSCHED_ABI_H + +struct pvsched_vcpu_state { + __le32 preempted; + /* Structure must be 64 byte aligned, pad to that size */ + u8 padding[60]; +} __packed; + +#endif diff --git a/arch/arm64/include/asm/qspinlock.h b/arch/arm64/include/asm/qspinlock.h new file mode 100644 index 0000000000000000000000000000000000000000..fbe176fd4b3f707495f8083285eb15da9ba76e1e --- /dev/null +++ b/arch/arm64/include/asm/qspinlock.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_ARM64_QSPINLOCK_H +#define _ASM_ARM64_QSPINLOCK_H + +#ifdef CONFIG_NUMA_AWARE_SPINLOCKS +#include + +extern void __cna_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void (*cna_queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val); + +#define queued_spin_unlock queued_spin_unlock +/** + * queued_spin_unlock - release a queued spinlock + * @lock : Pointer to queued spinlock structure + * + * A smp_store_release() on the least-significant byte. + */ +static inline void native_queued_spin_unlock(struct qspinlock *lock) +{ + smp_store_release(&lock->locked, 0); +} + +static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) +{ + cna_queued_spin_lock_slowpath(lock, val); +} + +static inline void queued_spin_unlock(struct qspinlock *lock) +{ + native_queued_spin_unlock(lock); +} +#endif + +#include + +#endif /* _ASM_ARM64_QSPINLOCK_H */ diff --git a/arch/arm64/include/asm/ras.h b/arch/arm64/include/asm/ras.h new file mode 100644 index 0000000000000000000000000000000000000000..f0f18da2692b2fda25a63dbc245a99d453ce24ec --- /dev/null +++ b/arch/arm64/include/asm/ras.h @@ -0,0 +1,23 @@ +/* + * ARM64 SEA error recoery support + * + * Copyright 2017 Huawei Technologies Co., Ltd. + * Author: Xie XiuQi + * Author: Wang Xiongfeng + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation; + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _ASM_RAS_H +#define _ASM_RAS_H + +extern void sea_notify_process(void); + +#endif /*_ASM_RAS_H*/ diff --git a/arch/arm64/include/asm/resctrl.h b/arch/arm64/include/asm/resctrl.h new file mode 100644 index 0000000000000000000000000000000000000000..bdf52f104d2e719031af41b33e1ffdad26808a34 --- /dev/null +++ b/arch/arm64/include/asm/resctrl.h @@ -0,0 +1,527 @@ +#ifndef _ASM_ARM64_RESCTRL_H +#define _ASM_ARM64_RESCTRL_H + +#include +#include +#include + +#if defined(CONFIG_RESCTRL) && defined(CONFIG_MPAM) + +#define resctrl_group rdtgroup +#define resctrl_alloc_capable rdt_alloc_capable +#define resctrl_mon_capable rdt_mon_capable + +enum resctrl_resource_level { + RDT_RESOURCE_SMMU, + RDT_RESOURCE_L3, + RDT_RESOURCE_L2, + RDT_RESOURCE_MC, + + /* Must be the last */ + RDT_NUM_RESOURCES, +}; + +enum rdt_event_id { + QOS_L3_OCCUP_EVENT_ID = 0x01, + QOS_L3_MBM_TOTAL_EVENT_ID = 0x02, + QOS_L3_MBM_LOCAL_EVENT_ID = 0x03, + + QOS_CAT_CPBM_EVENT_ID = 0x04, + QOS_CAT_CMAX_EVENT_ID = 0x05, + QOS_CAT_INTPRI_EVENT_ID = 0x06, + QOS_CAT_DSPRI_EVENT_ID = 0x07, + QOS_MBA_MAX_EVENT_ID = 0x08, + QOS_MBA_MIN_EVENT_ID = 0x09, + QOS_MBA_PBM_EVENT_ID = 0x0a, + QOS_MBA_INTPRI_EVENT_ID = 0x0b, + QOS_MBA_DSPRI_EVENT_ID = 0x0c, + QOS_MBA_HDL_EVENT_ID = 0x0d, + /* Must be the last */ + RESCTRL_NUM_EVENT_IDS, +}; + +enum rdt_group_type { + RDTCTRL_GROUP = 0, + RDTMON_GROUP, + RDT_NUM_GROUP, +}; + +/** + * struct resctrl_cache - Cache allocation related data + * @cbm_len: Length of the cache bit mask + * @min_cbm_bits: Minimum number of consecutive bits to be set + * @shareable_bits: Bitmask of shareable resource with other + * executing entities + */ +struct resctrl_cache { + u32 cbm_len; + u32 shareable_bits; + u32 min_cbm_bits; +}; + +/** + * struct resctrl_membw - Memory bandwidth allocation related data + * @min_bw: Minimum memory bandwidth percentage user can request + * @bw_gran: Granularity at which the memory bandwidth is allocated + * @delay_linear: True if memory B/W delay is in linear scale + * @ctrl_extend_bits: Indicates if there are extra ctrl capabilities supported. + * e.g. priority/hardlimit. + */ +struct resctrl_membw { + u32 min_bw; + u32 bw_gran; + u32 delay_linear; +}; + +/** + * struct resctrl_resource - attributes of an RDT resource + * @rid: The index of the resource + * @alloc_enabled: Is allocation enabled on this machine + * @mon_enabled: Is monitoring enabled for this feature + * @alloc_capable: Is allocation available on this machine + * @mon_capable: Is monitor feature available on this machine + * @name: Name to use in "schemata" file + * @domains: All domains for this resource + * @cache: Cache allocation related data + * @mbw: Memory Bandwidth allocation related data + * @evt_list: List of monitoring events + * @fflags: flags to choose base and info files + */ +struct resctrl_resource { + int rid; + bool alloc_enabled; + bool mon_enabled; + bool alloc_capable; + bool mon_capable; + char *name; + struct list_head domains; + u32 dom_num; + struct list_head evt_list; + unsigned long fflags; + + struct resctrl_cache cache; + struct resctrl_membw mbw; + + bool cdp_capable; + bool cdp_enable; + u32 *default_ctrl; + + u32 ctrl_extend_bits; + + void *res; +}; + +/* List of all resource groups */ +extern struct list_head resctrl_all_groups; + +/** + * struct mongroup - store mon group's data in resctrl fs. + * @mon_data_kn kernlfs node for the mon_data directory + * @parent: parent rdtgrp + * @crdtgrp_list: child rdtgroup node list + * @rmid: rmid for this rdtgroup + * @init: init flag + */ +struct mongroup { + struct kernfs_node *mon_data_kn; + struct rdtgroup *parent; + struct list_head crdtgrp_list; + u32 rmid; + int init; +}; + +/** + * struct sd_closid - software defined closid + * @intpartid: closid for this rdtgroup only for allocation + * @reqpartid: closid for synchronizing configuration and monitoring + */ +struct sd_closid { + u32 intpartid; + u32 reqpartid; +}; + +/** + * struct rdtgroup - store rdtgroup's data in resctrl file system. + * @kn: kernfs node + * @resctrl_group_list: linked list for all rdtgroups + * @closid: software defined closid + * @cpu_mask: CPUs assigned to this rdtgroup + * @flags: status bits + * @waitcount: how many cpus expect to find this + * group when they acquire resctrl_group_mutex + * @type: indicates type of this rdtgroup - either + * monitor only or ctrl_mon group + * @mon: mongroup related data + */ +struct rdtgroup { + struct kernfs_node *kn; + struct list_head resctrl_group_list; + struct sd_closid closid; + struct cpumask cpu_mask; + int flags; + atomic_t waitcount; + enum rdt_group_type type; + struct mongroup mon; + int resync; +}; + +enum resctrl_ctrl_type { + SCHEMA_COMM = 0, + SCHEMA_PRI, + SCHEMA_HDL, + SCHEMA_PBM, + SCHEMA_MAX, + SCHEMA_MIN, + SCHEMA_NUM_CTRL_TYPE +}; + +#define for_each_ctrl_type(t) \ + for (t = SCHEMA_COMM; t != SCHEMA_NUM_CTRL_TYPE; t++) + +#define for_each_extend_ctrl_type(t) \ + for (t = SCHEMA_PRI; t != SCHEMA_NUM_CTRL_TYPE; t++) + +/** + * struct resctrl_ctrl_feature - ctrl feature member live in schema list + * @flags: Does what ctrl types can this feature server for + * @name: Name of this ctrl feature + * @max_wd: Max width of this feature can be input from outter space + * @base: Base of integer from outter space + * @evt: rdt_event_id event owned for applying configuration + * @capable: Does this feature support + * @enabled: Enabled or not. + * @default_ctrl: Default ctrl value of this feature + */ +struct resctrl_ctrl_feature { + enum resctrl_ctrl_type type; + int flags; + const char *name; + u32 max_wd; + int base; + enum rdt_event_id evt; + int default_ctrl; + bool capable; + bool enabled; + + const char *ctrl_suffix; +}; + +struct msr_param { + enum resctrl_ctrl_type type; + struct sd_closid *closid; +}; + +enum resctrl_conf_type { + CDP_BOTH = 0, + CDP_CODE, + CDP_DATA, + CDP_NUM_CONF_TYPE, +}; + +static inline int conf_name_to_conf_type(char *name) +{ + enum resctrl_conf_type t; + + if (!strcmp(name, "L3CODE") || !strcmp(name, "L2CODE")) + t = CDP_CODE; + else if (!strcmp(name, "L3DATA") || !strcmp(name, "L2DATA")) + t = CDP_DATA; + else + t = CDP_BOTH; + return t; +} + +#define for_each_conf_type(t) \ + for (t = CDP_BOTH; t < CDP_NUM_CONF_TYPE; t++) + +typedef struct { u16 val; } hw_mpamid_t; +typedef hw_mpamid_t hw_closid_t; + +#define hw_mpamid_val(__x) (__x.val) +#define hw_closid_val(__x) (__x.val) + +#define as_hw_mpamid_t(__x) ((hw_mpamid_t){(__x)}) + +/** + * When cdp enabled, give (closid + 1) to Cache LxDATA. + */ +#define resctrl_cdp_mpamid_map(__id, __type, __hw_mpamid) \ +do { \ + if (__type == CDP_CODE) \ + __hw_mpamid = as_hw_mpamid_t(__id); \ + else if (__type == CDP_DATA) \ + __hw_mpamid = as_hw_mpamid_t(__id + 1); \ + else \ + __hw_mpamid = as_hw_mpamid_t(__id); \ +} while (0) + +#define resctrl_cdp_mpamid_map_val(__id, __type, __hw_mpamid_val) \ +do { \ + if (__type == CDP_CODE) \ + __hw_mpamid_val = __id; \ + else if (__type == CDP_DATA) \ + __hw_mpamid_val = __id + 1; \ + else \ + __hw_mpamid_val = __id; \ +} while (0) + +bool is_resctrl_cdp_enabled(void); + +#define hw_alloc_validate(__flag) \ +do { \ + if (is_resctrl_cdp_enabled()) \ + __flag = true; \ + else \ + __flag = false; \ +} while (0) + +#define hw_alloc_times_validate(__times, __flag) \ +do { \ + hw_alloc_validate(__flag); \ + if (__flag) \ + __times = 2; \ + else \ + __times = 1; \ +} while (0) + +/** + * struct resctrl_staged_config - parsed configuration to be applied + * @hw_closid: raw closid for this configuration, regardless of CDP + * @new_ctrl: new ctrl value to be loaded + * @have_new_ctrl: did user provide new_ctrl for this domain + * @new_ctrl_type: CDP property of the new ctrl + * @cdp_both_ctrl: did cdp both control if cdp enabled + */ +struct resctrl_staged_config { + hw_closid_t hw_closid; + u32 new_ctrl[SCHEMA_NUM_CTRL_TYPE]; + bool ctrl_updated[SCHEMA_NUM_CTRL_TYPE]; + bool have_new_ctrl; + enum resctrl_conf_type conf_type; + enum resctrl_ctrl_type ctrl_type; + bool cdp_both_ctrl; +}; + +/* later move to resctrl common directory */ +#define RESCTRL_NAME_LEN 15 + +struct resctrl_schema_ctrl { + struct list_head list; + char name[RESCTRL_NAME_LEN]; + enum resctrl_ctrl_type ctrl_type; +}; + +/** + * @list: Member of resctrl's schema list + * @name: Name visible in the schemata file + * @conf_type: Type of configuration, e.g. code/data/both + * @res: The rdt_resource for this entry + * @schemata_ctrl_list: Type of ctrl configuration. e.g. priority/hardlimit + * @cdp_mc_both: did cdp both mon/ctrl if cdp enabled + */ +struct resctrl_schema { + struct list_head list; + char name[RESCTRL_NAME_LEN]; + enum resctrl_conf_type conf_type; + struct resctrl_resource *res; + struct list_head schema_ctrl_list; + bool cdp_mc_both; +}; + +int schemata_list_init(void); + +void schemata_list_destroy(void); + +/** + * struct rdt_domain - group of cpus sharing an RDT resource + * @list: all instances of this resource + * @id: unique id for this instance + * @cpu_mask: which cpus share this resource + * @base MMIO base address + * @ctrl_val: array of cache or mem ctrl values (indexed by CLOSID) + * @have_new_ctrl: did user provide new_ctrl for this domain + */ +struct rdt_domain { + struct list_head list; + int id; + struct cpumask cpu_mask; + void __iomem *base; + + /* arch specific fields */ + u32 *ctrl_val[SCHEMA_NUM_CTRL_TYPE]; + bool have_new_ctrl; + + /* for debug */ + char *cpus_list; + + struct resctrl_staged_config staged_cfg[CDP_NUM_CONF_TYPE]; +}; + +/* + * Internal struct of resctrl_resource structure, + * for static initialization. + */ +struct raw_resctrl_resource { + u16 num_partid; + u16 num_intpartid; + u16 num_pmg; + + u16 extend_ctrls_wd[SCHEMA_NUM_CTRL_TYPE]; + + void (*msr_update)(struct resctrl_resource *r, struct rdt_domain *d, + struct msr_param *para); + u64 (*msr_read)(struct resctrl_resource *r, struct rdt_domain *d, + struct msr_param *para); + + int data_width; + const char *format_str; + int (*parse_ctrlval)(char *buf, struct resctrl_resource *r, + struct resctrl_staged_config *cfg, enum resctrl_ctrl_type ctrl_type); + + u16 num_mon; + u64 (*mon_read)(struct rdt_domain *d, void *md_priv); + int (*mon_write)(struct rdt_domain *d, void *md_priv); + unsigned long fflags; + + struct resctrl_ctrl_feature ctrl_features[SCHEMA_NUM_CTRL_TYPE]; +}; + +int rmid_alloc(int entry_idx); +void rmid_free(int rmid); + +int resctrl_id_init(void); +int closid_alloc(void); +void closid_free(int closid); + +void update_cpu_closid_rmid(void *info); +void update_closid_rmid(const struct cpumask *cpu_mask, + struct resctrl_group *r); +int __resctrl_group_move_task(struct task_struct *tsk, + struct resctrl_group *rdtgrp); + +extern bool rdt_alloc_capable; +extern bool rdt_mon_capable; + +/* rdtgroup.flags */ +#define RDT_DELETED BIT(0) + +void rdt_last_cmd_clear(void); +void rdt_last_cmd_puts(const char *s); +void rdt_last_cmd_printf(const char *fmt, ...); + +void resctrl_resource_reset(void); + +#define release_resctrl_group_fs_options release_rdtgroupfs_options +#define parse_resctrl_group_fs_options parse_rdtgroupfs_options + +int resctrl_group_init_alloc(struct rdtgroup *rdtgrp); + +int __resctrl_group_show_options(struct seq_file *seq); + +int resctrl_update_groups_config(struct rdtgroup *rdtgrp); + +#define RESCTRL_MAX_CLOSID 32 + +int resctrl_group_init(void); + +void post_resctrl_mount(void); + +extern struct mutex resctrl_group_mutex; +DECLARE_STATIC_KEY_FALSE(resctrl_alloc_enable_key); +extern struct rdtgroup resctrl_group_default; +int resctrl_mkdir_mondata_all_subdir(struct kernfs_node *parent_kn, + struct resctrl_group *prgrp); + +int resctrl_group_create_info_dir(struct kernfs_node *parent_kn, + struct kernfs_node **kn_info); + +int register_resctrl_specific_files(struct rftype *files, size_t len); +extern struct kernfs_ops resctrl_group_kf_single_ops; + +extern struct rdtgroup *resctrl_group_kn_lock_live(struct kernfs_node *kn); +void resctrl_group_kn_unlock(struct kernfs_node *kn); + +void release_rdtgroupfs_options(void); +int parse_rdtgroupfs_options(char *data); + +int resctrl_group_add_files(struct kernfs_node *kn, unsigned long fflags); + +static inline void resctrl_cdp_update_cpus_state(struct resctrl_group *rdtgrp) +{ + int cpu; + + /* + * If cdp on, tasks in resctrl default group with closid=0 + * and rmid=0 don't know how to fill proper partid_i/pmg_i + * and partid_d/pmg_d into MPAMx_ELx sysregs by mpam_sched_in() + * called by __switch_to(), it's because current cpu's default + * closid and rmid are also equal to 0 and make the operation + * modifying configuration passed. Update per cpu default closid + * of none-zero value, call update_closid_rmid() to update each + * cpu's mpam proper MPAMx_ELx sysregs for setting partid and + * pmg when mounting resctrl sysfs, which is a practical method; + * Besides, to support cpu online and offline we should set + * cur_closid to 0. + */ + for_each_cpu(cpu, &rdtgrp->cpu_mask) { + per_cpu(pqr_state.default_closid, cpu) = ~0; + per_cpu(pqr_state.cur_closid, cpu) = 0; + } + + update_closid_rmid(&rdtgrp->cpu_mask, NULL); +} + +#define RESCTRL_MAX_CBM 32 + +/* + * This is only for avoiding unnecessary cost in mpam_sched_in() + * called by __switch_to() if using mpam_rmid_to_partid_pmg() + * to get partid and pmg, we just simply shift and get their + * two easily when we want. + */ +static inline void resctrl_navie_rmid_partid_pmg(u32 rmid, int *partid, int *pmg) +{ + *partid = rmid >> 16; + *pmg = (rmid << 16) >> 16; +} + +static inline u32 resctrl_navie_rmid(u32 rmid) +{ + int ret, partid, pmg; + + ret = mpam_rmid_to_partid_pmg(rmid, (int *)&partid, (int *)&pmg); + if (ret) + return 0; + + return (partid << 16) | pmg; +} + +/* + * closid.reqpartid is used as part of mapping to rmid, now + * we only need to map intpartid to closid. + */ +static inline u32 resctrl_navie_closid(struct sd_closid closid) +{ + return closid.intpartid; +} + +/** + * rdtgroup_remove - the helper to remove resource group safely + * @rdtgrp: resource group to remove + * + * On resource group creation via a mkdir, an extra kernfs_node reference is + * taken to ensure that the rdtgroup structure remains accessible for the + * rdtgroup_kn_unlock() calls where it is removed. + * + * Drop the extra reference here, then free the rdtgroup structure. + * + * Return: void + */ +static inline void rdtgroup_remove(struct rdtgroup *rdtgrp) +{ + kernfs_put(rdtgrp->kn); + kfree(rdtgrp); +} + +#endif +#endif /* _ASM_ARM64_RESCTRL_H */ diff --git a/arch/arm64/include/asm/seccomp.h b/arch/arm64/include/asm/seccomp.h index c76fac9796290031a4cd5f612fec26c9e6175b8f..b7c986c3dbcab097e25c02833b21e9186cc582d1 100644 --- a/arch/arm64/include/asm/seccomp.h +++ b/arch/arm64/include/asm/seccomp.h @@ -13,13 +13,43 @@ #include -#ifdef CONFIG_COMPAT +#ifdef CONFIG_AARCH32_EL0 #define __NR_seccomp_read_32 __NR_compat_read #define __NR_seccomp_write_32 __NR_compat_write #define __NR_seccomp_exit_32 __NR_compat_exit #define __NR_seccomp_sigreturn_32 __NR_compat_rt_sigreturn #endif /* CONFIG_COMPAT */ +#ifdef CONFIG_COMPAT +#ifndef __COMPAT_SYSCALL_NR + +static inline const int *get_compat_mode1_syscalls(void) +{ +#ifdef CONFIG_AARCH32_EL0 + static const int mode1_syscalls_a32[] = { + __NR_compat_read, __NR_compat_write, + __NR_compat_read, __NR_compat_sigreturn, + 0, /* null terminated */ + }; +#endif + static const int mode1_syscalls_ilp32[] = { + __NR_read, __NR_write, + __NR_exit, __NR_rt_sigreturn, + 0, /* null terminated */ + }; + +#ifdef CONFIG_AARCH32_EL0 + if (is_a32_compat_task()) + return mode1_syscalls_a32; +#endif + return mode1_syscalls_ilp32; +} + +#define get_compat_mode1_syscalls get_compat_mode1_syscalls + +#endif +#endif + #include #endif /* _ASM_SECCOMP_H */ diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h index caab039d63055ad7180b6a029cd80d8c572c92eb..8d3f1eab58e049bdc39c62fac562bb65f09fcdd9 100644 --- a/arch/arm64/include/asm/sections.h +++ b/arch/arm64/include/asm/sections.h @@ -30,4 +30,9 @@ extern char __irqentry_text_start[], __irqentry_text_end[]; extern char __mmuoff_data_start[], __mmuoff_data_end[]; extern char __entry_tramp_text_start[], __entry_tramp_text_end[]; +static inline size_t entry_tramp_text_size(void) +{ + return __entry_tramp_text_end - __entry_tramp_text_start; +} + #endif /* __ASM_SECTIONS_H */ diff --git a/arch/arm64/include/asm/signal32.h b/arch/arm64/include/asm/signal32.h index 81abea0b7650867d86385fc0c4fc145fefd24c46..92f48828b13a17e2a2c56a4ffd9fd864237fcee1 100644 --- a/arch/arm64/include/asm/signal32.h +++ b/arch/arm64/include/asm/signal32.h @@ -17,34 +17,37 @@ #define __ASM_SIGNAL32_H #ifdef __KERNEL__ -#ifdef CONFIG_COMPAT + +#ifdef CONFIG_AARCH32_EL0 + #include #define AARCH32_KERN_SIGRET_CODE_OFFSET 0x500 -int compat_setup_frame(int usig, struct ksignal *ksig, sigset_t *set, +int a32_setup_frame(int usig, struct ksignal *ksig, sigset_t *set, struct pt_regs *regs); -int compat_setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, + +int a32_setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, struct pt_regs *regs); -void compat_setup_restart_syscall(struct pt_regs *regs); +void a32_setup_restart_syscall(struct pt_regs *regs); #else -static inline int compat_setup_frame(int usid, struct ksignal *ksig, +static inline int a32_setup_frame(int usid, struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) { return -ENOSYS; } -static inline int compat_setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, +static inline int a32_setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) { return -ENOSYS; } -static inline void compat_setup_restart_syscall(struct pt_regs *regs) +static inline void a32_setup_restart_syscall(struct pt_regs *regs) { } -#endif /* CONFIG_COMPAT */ +#endif /* CONFIG_AARCH32_EL0 */ #endif /* __KERNEL__ */ #endif /* __ASM_SIGNAL32_H */ diff --git a/arch/arm64/include/asm/signal32_common.h b/arch/arm64/include/asm/signal32_common.h new file mode 100644 index 0000000000000000000000000000000000000000..10bcdf6b8b4c25cb636e4f5ff225186deb886817 --- /dev/null +++ b/arch/arm64/include/asm/signal32_common.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef __ASM_SIGNAL32_COMMON_H +#define __ASM_SIGNAL32_COMMON_H + +#ifdef CONFIG_COMPAT + +int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set); +int get_sigset_t(sigset_t *set, const compat_sigset_t __user *uset); + +#endif /* CONFIG_COMPAT*/ + +#endif /* __ASM_SIGNAL32_COMMON_H */ diff --git a/arch/arm64/include/asm/signal_common.h b/arch/arm64/include/asm/signal_common.h new file mode 100644 index 0000000000000000000000000000000000000000..4045faab34b4c51d54fe7bbc0ae33a60c3a751c2 --- /dev/null +++ b/arch/arm64/include/asm/signal_common.h @@ -0,0 +1,303 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +/* + * Copyright (C) 1995-2009 Russell King + * Copyright (C) 2012 ARM Ltd. + * Copyright (C) 2018 Cavium Networks. + */ + +#ifndef __ASM_SIGNAL_COMMON_H +#define __ASM_SIGNAL_COMMON_H + +#include +#include +#include + +#define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16) +#define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16) +#define SIGCONTEXT_RESERVED_SIZE sizeof(((struct sigcontext *)0)->__reserved) +#define RT_SIGFRAME_RESERVED_OFFSET \ + offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved) + +/* + * Sanity limit on the approximate maximum size of signal frame we'll + * try to generate. Stack alignment padding and the frame record are + * not taken into account. This limit is not a guarantee and is + * NOT ABI. + */ +#define SIGFRAME_MAXSZ SZ_64K + +struct rt_sigframe_user_layout { + void __user *sigframe; + struct frame_record __user *next_frame; + + unsigned long size; /* size of allocated sigframe data */ + unsigned long limit; /* largest allowed size */ + + unsigned long fpsimd_offset; + unsigned long esr_offset; + unsigned long sve_offset; + unsigned long extra_offset; + unsigned long end_offset; +}; + +struct user_ctxs { + struct fpsimd_context __user *fpsimd; + struct sve_context __user *sve; +}; + +struct frame_record { + u64 fp; + u64 lr; +}; + +void __user *apply_user_offset(struct rt_sigframe_user_layout const *user, + unsigned long offset); + +int setup_sigframe_layout(struct rt_sigframe_user_layout *user, bool add_all); +int setup_extra_context(char __user *sfp, unsigned long sf_size, + char __user *exprap); +int __parse_user_sigcontext(struct user_ctxs *user, + struct sigcontext __user const *sc, + void __user const *sigframe_base); +#define parse_user_sigcontext(user, sf) \ + __parse_user_sigcontext(user, &(sf)->uc.uc_mcontext, sf) + +int preserve_fpsimd_context(struct fpsimd_context __user *ctx); +int restore_fpsimd_context(struct fpsimd_context __user *ctx); + +#ifdef CONFIG_ARM64_SVE +int preserve_sve_context(struct sve_context __user *ctx); +int restore_sve_fpsimd_context(struct user_ctxs *user); +#else /* ! CONFIG_ARM64_SVE */ + +/* Turn any non-optimised out attempts to use these into a link error: */ +extern int preserve_sve_context(void __user *ctx); +extern int restore_sve_fpsimd_context(struct user_ctxs *user); + +#endif /* ! CONFIG_ARM64_SVE */ + +int sigframe_alloc(struct rt_sigframe_user_layout *user, + unsigned long *offset, size_t size); +int sigframe_alloc_end(struct rt_sigframe_user_layout *user); + +void __setup_return(struct pt_regs *regs, struct k_sigaction *ka, + struct rt_sigframe_user_layout *user, int usig); + +static void init_user_layout(struct rt_sigframe_user_layout *user) +{ + memset(user, 0, sizeof(*user)); + user->size = RT_SIGFRAME_RESERVED_OFFSET; + + user->limit = user->size + SIGCONTEXT_RESERVED_SIZE; + + user->limit -= TERMINATOR_SIZE; + user->limit -= EXTRA_CONTEXT_SIZE; + /* Reserve space for extension and terminator ^ */ +} + +static size_t sigframe_size(struct rt_sigframe_user_layout const *user) +{ + return round_up(max(user->size, sizeof(struct rt_sigframe)), 16); +} + +static int get_sigframe(struct rt_sigframe_user_layout *user, + struct ksignal *ksig, struct pt_regs *regs) +{ + unsigned long sp, sp_top; + int err; + + init_user_layout(user); + err = setup_sigframe_layout(user, false); + if (err) + return err; + + sp = sp_top = sigsp(regs->sp, ksig); + + sp = round_down(sp - sizeof(struct frame_record), 16); + user->next_frame = (struct frame_record __user *)sp; + + sp = round_down(sp, 16) - sigframe_size(user); + user->sigframe = (void __user *)sp; + + /* + * Check that we can actually write to the signal frame. + */ + if (!access_ok(user->sigframe, sp_top - sp)) + return -EFAULT; + + return 0; +} + +static int restore_sigframe(struct pt_regs *regs, + struct rt_sigframe __user *sf) +{ + sigset_t set; + int i, err; + struct user_ctxs user; + + err = get_sigset(&set, &sf->uc.uc_sigmask); + if (err == 0) + set_current_blocked(&set); + + for (i = 0; i < 31; i++) + __get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], + err); + __get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); + __get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); + __get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); + + /* + * Avoid sys_rt_sigreturn() restarting. + */ + forget_syscall(regs); + + err |= !valid_user_regs(®s->user_regs, current); + if (err == 0) + err = parse_user_sigcontext(&user, sf); + + if (err == 0) { + if (!user.fpsimd) + return -EINVAL; + + if (user.sve) { + if (!system_supports_sve()) + return -EINVAL; + + err = restore_sve_fpsimd_context(&user); + } else { + err = restore_fpsimd_context(user.fpsimd); + } + } + + return err; +} + +static int setup_sigframe(struct rt_sigframe_user_layout *user, + struct pt_regs *regs, sigset_t *set) +{ + int i, err = 0; + struct rt_sigframe __user *sf = user->sigframe; + + /* set up the stack frame for unwinding */ + __put_user_error(regs->regs[29], &user->next_frame->fp, err); + __put_user_error(regs->regs[30], &user->next_frame->lr, err); + + for (i = 0; i < 31; i++) + __put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], + err); + __put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); + __put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); + __put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); + + __put_user_error(current->thread.fault_address, + &sf->uc.uc_mcontext.fault_address, err); + + err |= put_sigset(set, &sf->uc.uc_sigmask); + + if (err == 0) { + struct fpsimd_context __user *fpsimd_ctx = + apply_user_offset(user, user->fpsimd_offset); + err |= preserve_fpsimd_context(fpsimd_ctx); + } + + /* fault information, if valid */ + if (err == 0 && user->esr_offset) { + struct esr_context __user *esr_ctx = + apply_user_offset(user, user->esr_offset); + + __put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err); + __put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err); + __put_user_error(current->thread.fault_code, + &esr_ctx->esr, err); + } + + /* Scalable Vector Extension state, if present */ + if (system_supports_sve() && err == 0 && user->sve_offset) { + struct sve_context __user *sve_ctx = + apply_user_offset(user, user->sve_offset); + err |= preserve_sve_context(sve_ctx); + } + + if (err == 0 && user->extra_offset) + setup_extra_context((char __user *)user->sigframe, user->size, + (char __user *)apply_user_offset(user, + user->extra_offset)); + + /* set the "end" magic */ + if (err == 0) { + struct _aarch64_ctx __user *end = + apply_user_offset(user, user->end_offset); + + __put_user_error(0, &end->magic, err); + __put_user_error(0, &end->size, err); + } + + return err; +} + +static long __sys_rt_sigreturn(struct pt_regs *regs) +{ + struct rt_sigframe __user *frame; + + /* Always make any pending restarted system calls return -EINTR */ + current->restart_block.fn = do_no_restart_syscall; + + /* + * Since we stacked the signal on a 128-bit boundary, then 'sp' should + * be word aligned here. + */ + if (regs->sp & 15) + goto badframe; + + frame = (struct rt_sigframe __user *)regs->sp; + + if (!access_ok(frame, sizeof(*frame))) + goto badframe; + + if (restore_sigframe(regs, frame)) + goto badframe; + + if (restore_altstack(&frame->uc.uc_stack)) + goto badframe; + + return regs->regs[0]; + +badframe: + arm64_notify_segfault(regs->sp); + return 0; +} + +static int __setup_rt_frame(int usig, struct ksignal *ksig, + sigset_t *set, struct pt_regs *regs) +{ + struct rt_sigframe_user_layout user; + struct rt_sigframe __user *frame; + int err = 0; + + fpsimd_signal_preserve_current_state(); + + if (get_sigframe(&user, ksig, regs)) + return 1; + + frame = user.sigframe; + + __put_user_error(0, &frame->uc.uc_flags, err); + __put_user_error((typeof(frame->uc.uc_link)) 0, + &frame->uc.uc_link, err); + + err |= __save_altstack(&frame->uc.uc_stack, regs->sp); + err |= setup_sigframe(&user, regs, set); + if (err == 0) { + setup_return(regs, &ksig->ka, &user, usig); + if (ksig->ka.sa.sa_flags & SA_SIGINFO) { + err |= copy_siginfo_to_user(&frame->info, &ksig->info); + regs->regs[1] = (unsigned long)&frame->info; + regs->regs[2] = (unsigned long)&frame->uc; + } + } + + return err; +} + +#endif /* __ASM_SIGNAL_COMMON_H */ diff --git a/arch/arm64/include/asm/signal_ilp32.h b/arch/arm64/include/asm/signal_ilp32.h new file mode 100644 index 0000000000000000000000000000000000000000..7ee97c133605679c150aa82f510f869253868cd4 --- /dev/null +++ b/arch/arm64/include/asm/signal_ilp32.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef __ASM_SIGNAL_ILP32_H +#define __ASM_SIGNAL_ILP32_H + +#ifdef CONFIG_ARM64_ILP32 + +#include + +int ilp32_setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, + struct pt_regs *regs); + +#else + +static inline int ilp32_setup_rt_frame(int usig, struct ksignal *ksig, + sigset_t *set, struct pt_regs *regs) +{ + return -ENOSYS; +} + +#endif /* CONFIG_ARM64_ILP32 */ + +#endif /* __ASM_SIGNAL_ILP32_H */ diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index f82b447bd34f03c94998a5ac105fb9adeb1ee7b0..f0898dc3d3805356a576541a8862377a01e2b5ba 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h @@ -151,6 +151,24 @@ bool cpus_are_stuck_in_kernel(void); extern void crash_smp_send_stop(void); extern bool smp_crash_stop_failed(void); +void ipi_set_nmi_prio(void __iomem *base, u8 prio); + +#ifdef CONFIG_ARM64_CPU_PARK +#define PARK_SECTION_SIZE 1024 +struct cpu_park_info { + /* Physical address of reserved park memory. */ + unsigned long start; + /* park reserve mem len should be PARK_SECTION_SIZE * NR_CPUS */ + unsigned long len; + /* Virtual address of reserved park memory. */ + unsigned long start_v; +}; +extern struct cpu_park_info park_info; +extern void enter_cpu_park(unsigned long text, unsigned long exit); +extern void do_cpu_park(unsigned long exit); +extern int kexec_smp_send_park(void); +#endif + #endif /* ifndef __ASSEMBLY__ */ #endif /* ifndef __ASM_SMP_H */ diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h index af58dcdefb21a03aafd752d49b0bcbaefc793089..63e29335f426cc1e9e6f6328e15caa1a15074d8a 100644 --- a/arch/arm64/include/asm/smp_plat.h +++ b/arch/arm64/include/asm/smp_plat.h @@ -56,4 +56,18 @@ static inline int get_logical_index(u64 mpidr) return -EINVAL; } +#ifdef CONFIG_ARCH_GET_PREFERRED_SIBLING_CPUMASK +void update_mpidr_siblings_masks(unsigned int cpu, bool remove); + +static inline void mpidr_siblings_add_cpu(unsigned int cpu) +{ + update_mpidr_siblings_masks(cpu, false); +} + +static inline void mpidr_siblings_remove_cpu(unsigned int cpu) +{ + update_mpidr_siblings_masks(cpu, true); +} +#endif + #endif /* __ASM_SMP_PLAT_H */ diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index 38116008d18b316c06a0cd2c5f53d6e9fcc4ff48..4a668995014c5daf616ebea371e19b9bc2ffb3e2 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -18,8 +18,31 @@ #include #include +#include /* See include/linux/spinlock.h */ #define smp_mb__after_spinlock() smp_mb() +/* + * Changing this will break osq_lock() thanks to the call inside + * smp_cond_load_relaxed(). + * + * See: + * https://lore.kernel.org/lkml/20200110100612.GC2827@hirez.programming.kicks-ass.net + */ +#define vcpu_is_preempted vcpu_is_preempted +#ifdef CONFIG_PARAVIRT +static inline bool vcpu_is_preempted(int cpu) +{ + return pv_vcpu_is_preempted(cpu); +} + +#else + +static inline bool vcpu_is_preempted(int cpu) +{ + return false; +} +#endif /* CONFIG_PARAVIRT */ + #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/arm64/include/asm/stage2_pgtable.h b/arch/arm64/include/asm/stage2_pgtable.h index 8b68099348e5a305d53bb1c26a9fe1ed5fe70c8c..5412fa40825e83d892b19d379083d3b8cf479bb7 100644 --- a/arch/arm64/include/asm/stage2_pgtable.h +++ b/arch/arm64/include/asm/stage2_pgtable.h @@ -19,122 +19,224 @@ #ifndef __ARM64_S2_PGTABLE_H_ #define __ARM64_S2_PGTABLE_H_ +#include #include /* - * The hardware supports concatenation of up to 16 tables at stage2 entry level - * and we use the feature whenever possible. - * - * Now, the minimum number of bits resolved at any level is (PAGE_SHIFT - 3). - * On arm64, the smallest PAGE_SIZE supported is 4k, which means - * (PAGE_SHIFT - 3) > 4 holds for all page sizes. - * This implies, the total number of page table levels at stage2 expected - * by the hardware is actually the number of levels required for (KVM_PHYS_SHIFT - 4) - * in normal translations(e.g, stage1), since we cannot have another level in - * the range (KVM_PHYS_SHIFT, KVM_PHYS_SHIFT - 4). + * PGDIR_SHIFT determines the size a top-level page table entry can map + * and depends on the number of levels in the page table. Compute the + * PGDIR_SHIFT for a given number of levels. */ -#define STAGE2_PGTABLE_LEVELS ARM64_HW_PGTABLE_LEVELS(KVM_PHYS_SHIFT - 4) +#define pt_levels_pgdir_shift(lvls) ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - (lvls)) /* - * With all the supported VA_BITs and 40bit guest IPA, the following condition - * is always true: - * - * STAGE2_PGTABLE_LEVELS <= CONFIG_PGTABLE_LEVELS + * The hardware supports concatenation of up to 16 tables at stage2 entry + * level and we use the feature whenever possible, which means we resolve 4 + * additional bits of address at the entry level. * - * We base our stage-2 page table walker helpers on this assumption and - * fall back to using the host version of the helper wherever possible. - * i.e, if a particular level is not folded (e.g, PUD) at stage2, we fall back - * to using the host version, since it is guaranteed it is not folded at host. - * - * If the condition breaks in the future, we can rearrange the host level - * definitions and reuse them for stage2. Till then... + * This implies, the total number of page table levels required for + * IPA_SHIFT at stage2 expected by the hardware can be calculated using + * the same logic used for the (non-collapsable) stage1 page tables but for + * (IPA_SHIFT - 4). */ -#if STAGE2_PGTABLE_LEVELS > CONFIG_PGTABLE_LEVELS -#error "Unsupported combination of guest IPA and host VA_BITS." -#endif +#define stage2_pgtable_levels(ipa) ARM64_HW_PGTABLE_LEVELS((ipa) - 4) +#define kvm_stage2_levels(kvm) VTCR_EL2_LVLS(kvm->arch.vtcr) -/* S2_PGDIR_SHIFT is the size mapped by top-level stage2 entry */ -#define S2_PGDIR_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - STAGE2_PGTABLE_LEVELS) -#define S2_PGDIR_SIZE (_AC(1, UL) << S2_PGDIR_SHIFT) -#define S2_PGDIR_MASK (~(S2_PGDIR_SIZE - 1)) +/* stage2_pgdir_shift() is the size mapped by top-level stage2 entry for the VM */ +#define stage2_pgdir_shift(kvm) pt_levels_pgdir_shift(kvm_stage2_levels(kvm)) +#define stage2_pgdir_size(kvm) (1ULL << stage2_pgdir_shift(kvm)) +#define stage2_pgdir_mask(kvm) ~(stage2_pgdir_size(kvm) - 1) /* * The number of PTRS across all concatenated stage2 tables given by the * number of bits resolved at the initial level. + * If we force more levels than necessary, we may have (stage2_pgdir_shift > IPA), + * in which case, stage2_pgd_ptrs will have one entry. */ -#define PTRS_PER_S2_PGD (1 << (KVM_PHYS_SHIFT - S2_PGDIR_SHIFT)) +#define pgd_ptrs_shift(ipa, pgdir_shift) \ + ((ipa) > (pgdir_shift) ? ((ipa) - (pgdir_shift)) : 0) +#define __s2_pgd_ptrs(ipa, lvls) \ + (1 << (pgd_ptrs_shift((ipa), pt_levels_pgdir_shift(lvls)))) +#define __s2_pgd_size(ipa, lvls) (__s2_pgd_ptrs((ipa), (lvls)) * sizeof(pgd_t)) + +#define stage2_pgd_ptrs(kvm) __s2_pgd_ptrs(kvm_phys_shift(kvm), kvm_stage2_levels(kvm)) +#define stage2_pgd_size(kvm) __s2_pgd_size(kvm_phys_shift(kvm), kvm_stage2_levels(kvm)) /* - * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation - * levels in addition to the PGD. + * kvm_mmmu_cache_min_pages() is the number of pages required to install + * a stage-2 translation. We pre-allocate the entry level page table at + * the VM creation. */ -#define KVM_MMU_CACHE_MIN_PAGES (STAGE2_PGTABLE_LEVELS - 1) +#define kvm_mmu_cache_min_pages(kvm) (kvm_stage2_levels(kvm) - 1) - -#if STAGE2_PGTABLE_LEVELS > 3 +/* Stage2 PUD definitions when the level is present */ +static inline bool kvm_stage2_has_pud(struct kvm *kvm) +{ + return (CONFIG_PGTABLE_LEVELS > 3) && (kvm_stage2_levels(kvm) > 3); +} #define S2_PUD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(1) -#define S2_PUD_SIZE (_AC(1, UL) << S2_PUD_SHIFT) +#define S2_PUD_SIZE (1UL << S2_PUD_SHIFT) #define S2_PUD_MASK (~(S2_PUD_SIZE - 1)) -#define stage2_pgd_none(pgd) pgd_none(pgd) -#define stage2_pgd_clear(pgd) pgd_clear(pgd) -#define stage2_pgd_present(pgd) pgd_present(pgd) -#define stage2_pgd_populate(pgd, pud) pgd_populate(NULL, pgd, pud) -#define stage2_pud_offset(pgd, address) pud_offset(pgd, address) -#define stage2_pud_free(pud) pud_free(NULL, pud) +static inline bool stage2_pgd_none(struct kvm *kvm, pgd_t pgd) +{ + if (kvm_stage2_has_pud(kvm)) + return pgd_none(pgd); + else + return 0; +} -#define stage2_pud_table_empty(pudp) kvm_page_empty(pudp) +static inline void stage2_pgd_clear(struct kvm *kvm, pgd_t *pgdp) +{ + if (kvm_stage2_has_pud(kvm)) + pgd_clear(pgdp); +} -static inline phys_addr_t stage2_pud_addr_end(phys_addr_t addr, phys_addr_t end) +static inline bool stage2_pgd_present(struct kvm *kvm, pgd_t pgd) { - phys_addr_t boundary = (addr + S2_PUD_SIZE) & S2_PUD_MASK; + if (kvm_stage2_has_pud(kvm)) + return pgd_present(pgd); + else + return 1; +} - return (boundary - 1 < end - 1) ? boundary : end; +static inline void stage2_pgd_populate(struct kvm *kvm, pgd_t *pgd, pud_t *pud) +{ + if (kvm_stage2_has_pud(kvm)) + pgd_populate(NULL, pgd, pud); } -#endif /* STAGE2_PGTABLE_LEVELS > 3 */ +static inline pud_t *stage2_pud_offset(struct kvm *kvm, + pgd_t *pgd, unsigned long address) +{ + if (kvm_stage2_has_pud(kvm)) + return pud_offset(pgd, address); + else + return (pud_t *)pgd; +} +static inline void stage2_pud_free(struct kvm *kvm, pud_t *pud) +{ + if (kvm_stage2_has_pud(kvm)) + pud_free(NULL, pud); +} -#if STAGE2_PGTABLE_LEVELS > 2 +static inline bool stage2_pud_table_empty(struct kvm *kvm, pud_t *pudp) +{ + if (kvm_stage2_has_pud(kvm)) + return kvm_page_empty(pudp); + else + return false; +} + +static inline phys_addr_t +stage2_pud_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) +{ + if (kvm_stage2_has_pud(kvm)) { + phys_addr_t boundary = (addr + S2_PUD_SIZE) & S2_PUD_MASK; + + return (boundary - 1 < end - 1) ? boundary : end; + } else { + return end; + } +} + +/* Stage2 PMD definitions when the level is present */ +static inline bool kvm_stage2_has_pmd(struct kvm *kvm) +{ + return (CONFIG_PGTABLE_LEVELS > 2) && (kvm_stage2_levels(kvm) > 2); +} #define S2_PMD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(2) -#define S2_PMD_SIZE (_AC(1, UL) << S2_PMD_SHIFT) +#define S2_PMD_SIZE (1UL << S2_PMD_SHIFT) #define S2_PMD_MASK (~(S2_PMD_SIZE - 1)) -#define stage2_pud_none(pud) pud_none(pud) -#define stage2_pud_clear(pud) pud_clear(pud) -#define stage2_pud_present(pud) pud_present(pud) -#define stage2_pud_populate(pud, pmd) pud_populate(NULL, pud, pmd) -#define stage2_pmd_offset(pud, address) pmd_offset(pud, address) -#define stage2_pmd_free(pmd) pmd_free(NULL, pmd) +static inline bool stage2_pud_none(struct kvm *kvm, pud_t pud) +{ + if (kvm_stage2_has_pmd(kvm)) + return pud_none(pud); + else + return 0; +} -#define stage2_pud_huge(pud) pud_huge(pud) -#define stage2_pmd_table_empty(pmdp) kvm_page_empty(pmdp) +static inline void stage2_pud_clear(struct kvm *kvm, pud_t *pud) +{ + if (kvm_stage2_has_pmd(kvm)) + pud_clear(pud); +} -static inline phys_addr_t stage2_pmd_addr_end(phys_addr_t addr, phys_addr_t end) +static inline bool stage2_pud_present(struct kvm *kvm, pud_t pud) { - phys_addr_t boundary = (addr + S2_PMD_SIZE) & S2_PMD_MASK; + if (kvm_stage2_has_pmd(kvm)) + return pud_present(pud); + else + return 1; +} - return (boundary - 1 < end - 1) ? boundary : end; +static inline void stage2_pud_populate(struct kvm *kvm, pud_t *pud, pmd_t *pmd) +{ + if (kvm_stage2_has_pmd(kvm)) + pud_populate(NULL, pud, pmd); } -#endif /* STAGE2_PGTABLE_LEVELS > 2 */ +static inline pmd_t *stage2_pmd_offset(struct kvm *kvm, + pud_t *pud, unsigned long address) +{ + if (kvm_stage2_has_pmd(kvm)) + return pmd_offset(pud, address); + else + return (pmd_t *)pud; +} -#define stage2_pte_table_empty(ptep) kvm_page_empty(ptep) +static inline void stage2_pmd_free(struct kvm *kvm, pmd_t *pmd) +{ + if (kvm_stage2_has_pmd(kvm)) + pmd_free(NULL, pmd); +} -#if STAGE2_PGTABLE_LEVELS == 2 -#include -#elif STAGE2_PGTABLE_LEVELS == 3 -#include -#endif +static inline bool stage2_pud_huge(struct kvm *kvm, pud_t pud) +{ + if (kvm_stage2_has_pmd(kvm)) + return pud_huge(pud); + else + return 0; +} + +static inline bool stage2_pmd_table_empty(struct kvm *kvm, pmd_t *pmdp) +{ + if (kvm_stage2_has_pmd(kvm)) + return kvm_page_empty(pmdp); + else + return 0; +} + +static inline phys_addr_t +stage2_pmd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) +{ + if (kvm_stage2_has_pmd(kvm)) { + phys_addr_t boundary = (addr + S2_PMD_SIZE) & S2_PMD_MASK; + return (boundary - 1 < end - 1) ? boundary : end; + } else { + return end; + } +} + +static inline bool stage2_pte_table_empty(struct kvm *kvm, pte_t *ptep) +{ + return kvm_page_empty(ptep); +} -#define stage2_pgd_index(addr) (((addr) >> S2_PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1)) +static inline unsigned long stage2_pgd_index(struct kvm *kvm, phys_addr_t addr) +{ + return (((addr) >> stage2_pgdir_shift(kvm)) & (stage2_pgd_ptrs(kvm) - 1)); +} -static inline phys_addr_t stage2_pgd_addr_end(phys_addr_t addr, phys_addr_t end) +static inline phys_addr_t +stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) { - phys_addr_t boundary = (addr + S2_PGDIR_SIZE) & S2_PGDIR_MASK; + phys_addr_t boundary = (addr + stage2_pgdir_size(kvm)) & stage2_pgdir_mask(kvm); return (boundary - 1 < end - 1) ? boundary : end; } diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h index dd95d33a5bd5d652647955c08ab927fb3d8d78f8..941c8c446f4ad675a848175da424dff88bcb263b 100644 --- a/arch/arm64/include/asm/string.h +++ b/arch/arm64/include/asm/string.h @@ -16,6 +16,7 @@ #ifndef __ASM_STRING_H #define __ASM_STRING_H +#ifndef CONFIG_KASAN #define __HAVE_ARCH_STRRCHR extern char *strrchr(const char *, int c); @@ -34,24 +35,27 @@ extern __kernel_size_t strlen(const char *); #define __HAVE_ARCH_STRNLEN extern __kernel_size_t strnlen(const char *, __kernel_size_t); +#define __HAVE_ARCH_MEMCMP +extern int memcmp(const void *, const void *, size_t); + +#define __HAVE_ARCH_MEMCHR +extern void *memchr(const void *, int, __kernel_size_t); +#endif + #define __HAVE_ARCH_MEMCPY extern void *memcpy(void *, const void *, __kernel_size_t); extern void *__memcpy(void *, const void *, __kernel_size_t); +extern void *memcpy_mc(void *, const void *, __kernel_size_t); +extern void *__memcpy_mc(void *, const void *, __kernel_size_t); #define __HAVE_ARCH_MEMMOVE extern void *memmove(void *, const void *, __kernel_size_t); extern void *__memmove(void *, const void *, __kernel_size_t); -#define __HAVE_ARCH_MEMCHR -extern void *memchr(const void *, int, __kernel_size_t); - #define __HAVE_ARCH_MEMSET extern void *memset(void *, int, __kernel_size_t); extern void *__memset(void *, int, __kernel_size_t); -#define __HAVE_ARCH_MEMCMP -extern int memcmp(const void *, const void *, size_t); - #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE #define __HAVE_ARCH_MEMCPY_FLUSHCACHE void memcpy_flushcache(void *dst, const void *src, size_t cnt); @@ -63,7 +67,7 @@ void memcpy_flushcache(void *dst, const void *src, size_t cnt); * For files that are not instrumented (e.g. mm/slub.c) we * should use not instrumented version of mem* functions. */ - +#define memcpy_mc(dst, src, len) __memcpy_mc(dst, src, len) #define memcpy(dst, src, len) __memcpy(dst, src, len) #define memmove(dst, src, len) __memmove(dst, src, len) #define memset(s, c, n) __memset(s, c, n) diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h index ad8be16a39c9d18bdbd406f522c02432529c4cf6..b9bfc8e9790eb14af27ceb214005ab398ea2ba61 100644 --- a/arch/arm64/include/asm/syscall.h +++ b/arch/arm64/include/asm/syscall.h @@ -20,12 +20,16 @@ #include #include -typedef long (*syscall_fn_t)(struct pt_regs *regs); +typedef long (*syscall_fn_t)(const struct pt_regs *regs); extern const syscall_fn_t sys_call_table[]; -#ifdef CONFIG_COMPAT -extern const syscall_fn_t compat_sys_call_table[]; +#ifdef CONFIG_AARCH32_EL0 +extern const syscall_fn_t a32_sys_call_table[]; +#endif + +#ifdef CONFIG_ARM64_ILP32 +extern const syscall_fn_t ilp32_sys_call_table[]; #endif static inline int syscall_get_nr(struct task_struct *task, @@ -119,7 +123,7 @@ static inline void syscall_set_arguments(struct task_struct *task, */ static inline int syscall_get_arch(void) { - if (is_compat_task()) + if (is_a32_compat_task()) return AUDIT_ARCH_ARM; return AUDIT_ARCH_AARCH64; diff --git a/arch/arm64/include/asm/syscall_wrapper.h b/arch/arm64/include/asm/syscall_wrapper.h index a4477e515b798d3788c210ac184f9284e059e989..8523ac1281f9e6553cd34ea1bf07bdd7333615eb 100644 --- a/arch/arm64/include/asm/syscall_wrapper.h +++ b/arch/arm64/include/asm/syscall_wrapper.h @@ -30,10 +30,10 @@ } \ static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) -#define COMPAT_SYSCALL_DEFINE0(sname) \ - asmlinkage long __arm64_compat_sys_##sname(void); \ - ALLOW_ERROR_INJECTION(__arm64_compat_sys_##sname, ERRNO); \ - asmlinkage long __arm64_compat_sys_##sname(void) +#define COMPAT_SYSCALL_DEFINE0(sname) \ + asmlinkage long __arm64_compat_sys_##sname(const struct pt_regs *__unused); \ + ALLOW_ERROR_INJECTION(__arm64_compat_sys_##sname, ERRNO); \ + asmlinkage long __arm64_compat_sys_##sname(const struct pt_regs *__unused) #define COND_SYSCALL_COMPAT(name) \ cond_syscall(__arm64_compat_sys_##name); @@ -62,11 +62,11 @@ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) #ifndef SYSCALL_DEFINE0 -#define SYSCALL_DEFINE0(sname) \ - SYSCALL_METADATA(_##sname, 0); \ - asmlinkage long __arm64_sys_##sname(void); \ - ALLOW_ERROR_INJECTION(__arm64_sys_##sname, ERRNO); \ - asmlinkage long __arm64_sys_##sname(void) +#define SYSCALL_DEFINE0(sname) \ + SYSCALL_METADATA(_##sname, 0); \ + asmlinkage long __arm64_sys_##sname(const struct pt_regs *__unused); \ + ALLOW_ERROR_INJECTION(__arm64_sys_##sname, ERRNO); \ + asmlinkage long __arm64_sys_##sname(const struct pt_regs *__unused) #endif #ifndef COND_SYSCALL @@ -77,4 +77,9 @@ #define SYS_NI(name) SYSCALL_ALIAS(__arm64_sys_##name, sys_ni_posix_timers); #endif +struct pt_regs; +asmlinkage long __arm64_sys_io_uring_setup(const struct pt_regs *regs); +asmlinkage long __arm64_sys_io_uring_enter(const struct pt_regs *regs); +asmlinkage long __arm64_sys_io_uring_register(const struct pt_regs *regs); + #endif /* __ASM_SYSCALL_WRAPPER_H */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index c1470931b8974936ed2a86fb231c15764d08f573..242273a04af9913139b5cbf427064278afa21f82 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -86,11 +86,14 @@ #define REG_PSTATE_PAN_IMM sys_reg(0, 0, 4, 0, 4) #define REG_PSTATE_UAO_IMM sys_reg(0, 0, 4, 0, 3) +#define REG_PSTATE_SSBS_IMM sys_reg(0, 3, 4, 0, 1) #define SET_PSTATE_PAN(x) __emit_inst(0xd5000000 | REG_PSTATE_PAN_IMM | \ (!!x)<<8 | 0x1f) #define SET_PSTATE_UAO(x) __emit_inst(0xd5000000 | REG_PSTATE_UAO_IMM | \ (!!x)<<8 | 0x1f) +#define SET_PSTATE_SSBS(x) __emit_inst(0xd5000000 | REG_PSTATE_SSBS_IMM | \ + (!!x)<<8 | 0x1f) #define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2) #define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2) @@ -156,6 +159,7 @@ #define SYS_ID_AA64ISAR0_EL1 sys_reg(3, 0, 0, 6, 0) #define SYS_ID_AA64ISAR1_EL1 sys_reg(3, 0, 0, 6, 1) +#define SYS_ID_AA64ISAR2_EL1 sys_reg(3, 0, 0, 6, 2) #define SYS_ID_AA64MMFR0_EL1 sys_reg(3, 0, 0, 7, 0) #define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1) @@ -189,6 +193,9 @@ #define SYS_FAR_EL1 sys_reg(3, 0, 6, 0, 0) #define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0) +#define SYS_PAR_EL1_F BIT(0) +#define SYS_PAR_EL1_FST GENMASK(6, 1) + /*** Statistical Profiling Extension ***/ /* ID registers */ #define SYS_PMSIDR_EL1 sys_reg(3, 0, 9, 9, 7) @@ -419,6 +426,7 @@ #define SYS_ICH_LR15_EL2 __SYS__LR8_EL2(7) /* Common SCTLR_ELx flags. */ +#define SCTLR_ELx_DSSBS (1UL << 44) #define SCTLR_ELx_EE (1 << 25) #define SCTLR_ELx_IESB (1 << 21) #define SCTLR_ELx_WXN (1 << 19) @@ -439,7 +447,7 @@ (1 << 10) | (1 << 13) | (1 << 14) | (1 << 15) | \ (1 << 17) | (1 << 20) | (1 << 24) | (1 << 26) | \ (1 << 27) | (1 << 30) | (1 << 31) | \ - (0xffffffffUL << 32)) + (0xffffefffUL << 32)) #ifdef CONFIG_CPU_BIG_ENDIAN #define ENDIAN_SET_EL2 SCTLR_ELx_EE @@ -453,7 +461,7 @@ #define SCTLR_EL2_SET (SCTLR_ELx_IESB | ENDIAN_SET_EL2 | SCTLR_EL2_RES1) #define SCTLR_EL2_CLEAR (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \ SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \ - ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0) + SCTLR_ELx_DSSBS | ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0) #if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffff #error "Inconsistent SCTLR_EL2 set/clear bits" @@ -477,7 +485,7 @@ (1 << 29)) #define SCTLR_EL1_RES0 ((1 << 6) | (1 << 10) | (1 << 13) | (1 << 17) | \ (1 << 27) | (1 << 30) | (1 << 31) | \ - (0xffffffffUL << 32)) + (0xffffefffUL << 32)) #ifdef CONFIG_CPU_BIG_ENDIAN #define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE) @@ -494,7 +502,7 @@ ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_RES1) #define SCTLR_EL1_CLEAR (SCTLR_ELx_A | SCTLR_EL1_CP15BEN | SCTLR_EL1_ITD |\ SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\ - SCTLR_EL1_RES0) + SCTLR_ELx_DSSBS | SCTLR_EL1_RES0) #if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffff #error "Inconsistent SCTLR_EL1 set/clear bits" @@ -520,10 +528,14 @@ #define ID_AA64ISAR1_JSCVT_SHIFT 12 #define ID_AA64ISAR1_DPB_SHIFT 0 +/* id_aa64isar2 */ +#define ID_AA64ISAR2_CLEARBHB_SHIFT 28 + /* id_aa64pfr0 */ #define ID_AA64PFR0_CSV3_SHIFT 60 #define ID_AA64PFR0_CSV2_SHIFT 56 #define ID_AA64PFR0_DIT_SHIFT 48 +#define ID_AA64PFR0_MPAM_SHIFT 40 #define ID_AA64PFR0_SVE_SHIFT 32 #define ID_AA64PFR0_RAS_SHIFT 28 #define ID_AA64PFR0_GIC_SHIFT 24 @@ -534,6 +546,7 @@ #define ID_AA64PFR0_EL1_SHIFT 4 #define ID_AA64PFR0_EL0_SHIFT 0 +#define ID_AA64PFR0_MPAM 0x1 #define ID_AA64PFR0_SVE 0x1 #define ID_AA64PFR0_RAS_V1 0x1 #define ID_AA64PFR0_FP_NI 0xf @@ -544,6 +557,13 @@ #define ID_AA64PFR0_EL0_64BIT_ONLY 0x1 #define ID_AA64PFR0_EL0_32BIT_64BIT 0x2 +/* id_aa64pfr1 */ +#define ID_AA64PFR1_SSBS_SHIFT 4 + +#define ID_AA64PFR1_SSBS_PSTATE_NI 0 +#define ID_AA64PFR1_SSBS_PSTATE_ONLY 1 +#define ID_AA64PFR1_SSBS_PSTATE_INSNS 2 + /* id_aa64mmfr0 */ #define ID_AA64MMFR0_TGRAN4_SHIFT 28 #define ID_AA64MMFR0_TGRAN64_SHIFT 24 @@ -570,6 +590,7 @@ #endif /* id_aa64mmfr1 */ +#define ID_AA64MMFR1_ECBHB_SHIFT 60 #define ID_AA64MMFR1_PAN_SHIFT 20 #define ID_AA64MMFR1_LOR_SHIFT 16 #define ID_AA64MMFR1_HPD_SHIFT 12 @@ -598,6 +619,12 @@ #define ID_AA64DFR0_TRACEVER_SHIFT 4 #define ID_AA64DFR0_DEBUGVER_SHIFT 0 +#define ID_AA64DFR0_PMUVER_8_1 0x4 + +#define ID_DFR0_PERFMON_SHIFT 24 + +#define ID_DFR0_PERFMON_8_1 0x4 + #define ID_ISAR5_RDM_SHIFT 24 #define ID_ISAR5_CRC32_SHIFT 16 #define ID_ISAR5_SHA2_SHIFT 12 @@ -684,20 +711,39 @@ #include #include -asm( -" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n" -" .equ .L__reg_num_x\\num, \\num\n" -" .endr\n" +#define __DEFINE_MRS_MSR_S_REGNUM \ +" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n" \ +" .equ .L__reg_num_x\\num, \\num\n" \ +" .endr\n" \ " .equ .L__reg_num_xzr, 31\n" -"\n" -" .macro mrs_s, rt, sreg\n" - __emit_inst(0xd5200000|(\\sreg)|(.L__reg_num_\\rt)) + +#define DEFINE_MRS_S \ + __DEFINE_MRS_MSR_S_REGNUM \ +" .macro mrs_s, rt, sreg\n" \ + __emit_inst(0xd5200000|(\\sreg)|(.L__reg_num_\\rt)) \ " .endm\n" -"\n" -" .macro msr_s, sreg, rt\n" - __emit_inst(0xd5000000|(\\sreg)|(.L__reg_num_\\rt)) + +#define DEFINE_MSR_S \ + __DEFINE_MRS_MSR_S_REGNUM \ +" .macro msr_s, sreg, rt\n" \ + __emit_inst(0xd5000000|(\\sreg)|(.L__reg_num_\\rt)) \ " .endm\n" -); + +#define UNDEFINE_MRS_S \ +" .purgem mrs_s\n" + +#define UNDEFINE_MSR_S \ +" .purgem msr_s\n" + +#define __mrs_s(v, r) \ + DEFINE_MRS_S \ +" mrs_s " v ", " __stringify(r) "\n" \ + UNDEFINE_MRS_S + +#define __msr_s(r, v) \ + DEFINE_MSR_S \ +" msr_s " __stringify(r) ", " v "\n" \ + UNDEFINE_MSR_S /* * Unlike read_cpuid, calls to read_sysreg are never expected to be @@ -725,13 +771,13 @@ asm( */ #define read_sysreg_s(r) ({ \ u64 __val; \ - asm volatile("mrs_s %0, " __stringify(r) : "=r" (__val)); \ + asm volatile(__mrs_s("%0", r) : "=r" (__val)); \ __val; \ }) #define write_sysreg_s(v, r) do { \ u64 __val = (u64)(v); \ - asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \ + asm volatile(__msr_s(r, "%x0") : : "rZ" (__val)); \ } while (0) /* diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index cb2c10a8f0a8517edc4460809f9e3d5c5e6be178..6bc5fe80fd46f7709726bc2e1b1b54b490d36b82 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -69,6 +69,7 @@ void arch_release_task_struct(struct task_struct *tsk); * TIF_NEED_RESCHED - rescheduling necessary * TIF_NOTIFY_RESUME - callback before returning to user * TIF_USEDFPU - FPU was used by this task this quantum (SMP) + * TIF_POLLING_NRFLAG - idle is polling for TIF_NEED_RESCHED */ #define TIF_SIGPENDING 0 #define TIF_NEED_RESCHED 1 @@ -76,25 +77,33 @@ void arch_release_task_struct(struct task_struct *tsk); #define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */ #define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */ #define TIF_FSCHECK 5 /* Check FS is USER_DS on return */ +#define TIF_SEA_NOTIFY 6 /* notify to do an error recovery */ #define TIF_NOHZ 7 #define TIF_SYSCALL_TRACE 8 #define TIF_SYSCALL_AUDIT 9 #define TIF_SYSCALL_TRACEPOINT 10 #define TIF_SECCOMP 11 +#define TIF_POLLING_NRFLAG 16 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_FREEZE 19 #define TIF_RESTORE_SIGMASK 20 #define TIF_SINGLESTEP 21 -#define TIF_32BIT 22 /* 32bit process */ +#define TIF_32BIT 22 /* AARCH32 process */ #define TIF_SVE 23 /* Scalable Vector Extension in use */ #define TIF_SVE_VL_INHERIT 24 /* Inherit sve_vl_onexec across exec */ #define TIF_SSBD 25 /* Wants SSB mitigation */ +#define TIF_32BIT_AARCH64 26 /* 32 bit process on AArch64(ILP32) */ + +#ifdef CONFIG_UCE_KERNEL_RECOVERY +#define TIF_UCE_KERNEL_RECOVERY 27 +#endif #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE) #define _TIF_NOHZ (1 << TIF_NOHZ) +#define _TIF_SEA_NOTIFY (1 << TIF_SEA_NOTIFY) #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) @@ -103,10 +112,16 @@ void arch_release_task_struct(struct task_struct *tsk); #define _TIF_FSCHECK (1 << TIF_FSCHECK) #define _TIF_32BIT (1 << TIF_32BIT) #define _TIF_SVE (1 << TIF_SVE) +#define _TIF_32BIT_AARCH64 (1 << TIF_32BIT_AARCH64) +#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) + +#ifdef CONFIG_UCE_KERNEL_RECOVERY +#define _TIF_UCE_KERNEL_RECOVERY (1 << TIF_UCE_KERNEL_RECOVERY) +#endif #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \ - _TIF_UPROBE | _TIF_FSCHECK) + _TIF_UPROBE | _TIF_FSCHECK | _TIF_SEA_NOTIFY) #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \ diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index a3233167be60226fa1e15e76767db26a527e5436..106fdc951b6eefdda0a97c877c2493b7bdfac1f8 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h @@ -22,16 +22,10 @@ #include #include -#ifdef CONFIG_HAVE_RCU_TABLE_FREE - -#define tlb_remove_entry(tlb, entry) tlb_remove_table(tlb, entry) static inline void __tlb_remove_table(void *_table) { free_page_and_swap_cache((struct page *)_table); } -#else -#define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry) -#endif /* CONFIG_HAVE_RCU_TABLE_FREE */ static void tlb_flush(struct mmu_gather *tlb); @@ -40,36 +34,35 @@ static void tlb_flush(struct mmu_gather *tlb); static inline void tlb_flush(struct mmu_gather *tlb) { struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0); + bool last_level = !tlb->freed_tables; + unsigned long stride = tlb_get_unmap_size(tlb); /* - * The ASID allocator will either invalidate the ASID or mark - * it as used. + * If we're tearing down the address space then we only care about + * invalidating the walk-cache, since the ASID allocator won't + * reallocate our ASID without invalidating the entire TLB. */ - if (tlb->fullmm) + if (tlb->fullmm) { + if (!last_level) + flush_tlb_mm(tlb->mm); return; + } - /* - * The intermediate page table levels are already handled by - * the __(pte|pmd|pud)_free_tlb() functions, so last level - * TLBI is sufficient here. - */ - __flush_tlb_range(&vma, tlb->start, tlb->end, true); + __flush_tlb_range(&vma, tlb->start, tlb->end, stride, last_level); } static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr) { - __flush_tlb_pgtable(tlb->mm, addr); pgtable_page_dtor(pte); - tlb_remove_entry(tlb, pte); + tlb_remove_table(tlb, pte); } #if CONFIG_PGTABLE_LEVELS > 2 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr) { - __flush_tlb_pgtable(tlb->mm, addr); - tlb_remove_entry(tlb, virt_to_page(pmdp)); + tlb_remove_table(tlb, virt_to_page(pmdp)); } #endif @@ -77,8 +70,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp, unsigned long addr) { - __flush_tlb_pgtable(tlb->mm, addr); - tlb_remove_entry(tlb, virt_to_page(pudp)); + tlb_remove_table(tlb, virt_to_page(pudp)); } #endif diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index a4a1901140ee98d21863f4f2978a7b627c5d0014..2051277ebc33d8e2e55a744032683a2b912de91b 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -21,6 +21,7 @@ #ifndef __ASSEMBLY__ +#include #include #include #include @@ -70,43 +71,73 @@ }) /* - * TLB Management - * ============== + * TLB Invalidation + * ================ * - * The TLB specific code is expected to perform whatever tests it needs - * to determine if it should invalidate the TLB for each call. Start - * addresses are inclusive and end addresses are exclusive; it is safe to - * round these addresses down. + * This header file implements the low-level TLB invalidation routines + * (sometimes referred to as "flushing" in the kernel) for arm64. + * + * Every invalidation operation uses the following template: + * + * DSB ISHST // Ensure prior page-table updates have completed + * TLBI ... // Invalidate the TLB + * DSB ISH // Ensure the TLB invalidation has completed + * if (invalidated kernel mappings) + * ISB // Discard any instructions fetched from the old mapping * - * flush_tlb_all() * - * Invalidate the entire TLB. + * The following functions form part of the "core" TLB invalidation API, + * as documented in Documentation/core-api/cachetlb.rst: + * + * flush_tlb_all() + * Invalidate the entire TLB (kernel + user) on all CPUs * * flush_tlb_mm(mm) + * Invalidate an entire user address space on all CPUs. + * The 'mm' argument identifies the ASID to invalidate. + * + * flush_tlb_range(vma, start, end) + * Invalidate the virtual-address range '[start, end)' on all + * CPUs for the user address space corresponding to 'vma->mm'. + * Note that this operation also invalidates any walk-cache + * entries associated with translations for the specified address + * range. * - * Invalidate all TLB entries in a particular address space. - * - mm - mm_struct describing address space + * flush_tlb_kernel_range(start, end) + * Same as flush_tlb_range(..., start, end), but applies to + * kernel mappings rather than a particular user address space. + * Whilst not explicitly documented, this function is used when + * unmapping pages from vmalloc/io space. * - * flush_tlb_range(mm,start,end) + * flush_tlb_page(vma, addr) + * Invalidate a single user mapping for address 'addr' in the + * address space corresponding to 'vma->mm'. Note that this + * operation only invalidates a single, last-level page-table + * entry and therefore does not affect any walk-caches. * - * Invalidate a range of TLB entries in the specified address - * space. - * - mm - mm_struct describing address space - * - start - start address (may not be aligned) - * - end - end address (exclusive, may not be aligned) * - * flush_tlb_page(vaddr,vma) + * Next, we have some undocumented invalidation routines that you probably + * don't want to call unless you know what you're doing: * - * Invalidate the specified page in the specified address range. - * - vaddr - virtual address (may not be aligned) - * - vma - vma_struct describing address range + * local_flush_tlb_all() + * Same as flush_tlb_all(), but only applies to the calling CPU. * - * flush_kern_tlb_page(kaddr) + * __flush_tlb_kernel_pgtable(addr) + * Invalidate a single kernel mapping for address 'addr' on all + * CPUs, ensuring that any walk-cache entries associated with the + * translation are also invalidated. * - * Invalidate the TLB entry for the specified page. The address - * will be in the kernels virtual memory space. Current uses - * only require the D-TLB to be invalidated. - * - kaddr - Kernel virtual memory address + * __flush_tlb_range(vma, start, end, stride, last_level) + * Invalidate the virtual-address range '[start, end)' on all + * CPUs for the user address space corresponding to 'vma->mm'. + * The invalidation operations are issued at a granularity + * determined by 'stride' and only affect any walk-cache entries + * if 'last_level' is equal to false. + * + * + * Finally, take a look at asm/tlb.h to see how tlb_flush() is implemented + * on top of these routines, since that is our interface to the mmu_gather + * API as used by munmap() and friends. */ static inline void local_flush_tlb_all(void) { @@ -124,6 +155,33 @@ static inline void flush_tlb_all(void) isb(); } +/* + * This is meant to avoid soft lock-ups on large TLB flushing ranges and not + * necessarily a performance improvement. + */ +#define MAX_TLBI_OPS PTRS_PER_PTE + +#ifdef CONFIG_ARM64_TLBI_IPI + +void flush_tlb_mm(struct mm_struct *mm); +void flush_tlb_page_nosync(struct vm_area_struct *vma, + unsigned long uaddr); +void __flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end, unsigned long stride, bool last_level); +bool test_tlbi_ipi_switch(void); + +static inline void local_flush_tlb_mm(struct mm_struct *mm) +{ + unsigned long asid = __TLBI_VADDR(0, ASID(mm)); + + dsb(nshst); + __tlbi(aside1, asid); + __tlbi_user(aside1, asid); + dsb(nsh); +} + +#else /* CONFIG_ARM64_TLBI_IPI */ + static inline void flush_tlb_mm(struct mm_struct *mm) { unsigned long asid = __TLBI_VADDR(0, ASID(mm)); @@ -134,40 +192,39 @@ static inline void flush_tlb_mm(struct mm_struct *mm) dsb(ish); } -static inline void flush_tlb_page(struct vm_area_struct *vma, - unsigned long uaddr) +static inline void flush_tlb_page_nosync(struct vm_area_struct *vma, + unsigned long uaddr) { unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm)); dsb(ishst); __tlbi(vale1is, addr); __tlbi_user(vale1is, addr); - dsb(ish); } -/* - * This is meant to avoid soft lock-ups on large TLB flushing ranges and not - * necessarily a performance improvement. - */ -#define MAX_TLB_RANGE (1024UL << PAGE_SHIFT) - static inline void __flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, - bool last_level) + unsigned long stride, bool last_level) { unsigned long asid = ASID(vma->vm_mm); unsigned long addr; - if ((end - start) > MAX_TLB_RANGE) { + start = round_down(start, stride); + end = round_up(end, stride); + + if ((end - start) >= (MAX_TLBI_OPS * stride)) { flush_tlb_mm(vma->vm_mm); return; } + /* Convert the stride into units of 4k */ + stride >>= 12; + start = __TLBI_VADDR(start, asid); end = __TLBI_VADDR(end, asid); dsb(ishst); - for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) { + for (addr = start; addr < end; addr += stride) { if (last_level) { __tlbi(vale1is, addr); __tlbi_user(vale1is, addr); @@ -178,18 +235,30 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, } dsb(ish); } +#endif /* CONFIG_ARM64_TLBI_IPI */ + +static inline void flush_tlb_page(struct vm_area_struct *vma, + unsigned long uaddr) +{ + flush_tlb_page_nosync(vma, uaddr); + dsb(ish); +} static inline void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - __flush_tlb_range(vma, start, end, false); + /* + * We cannot use leaf-only invalidation here, since we may be invalidating + * table entries as part of collapsing hugepages or moving page tables. + */ + __flush_tlb_range(vma, start, end, PAGE_SIZE, false); } static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) { unsigned long addr; - if ((end - start) > MAX_TLB_RANGE) { + if ((end - start) > (MAX_TLBI_OPS * PAGE_SIZE)) { flush_tlb_all(); return; } @@ -199,7 +268,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end dsb(ishst); for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) - __tlbi(vaae1is, addr); + __tlbi(vaale1is, addr); dsb(ish); isb(); } @@ -208,22 +277,14 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end * Used to invalidate the TLB (walk caches) corresponding to intermediate page * table levels (pgd/pud/pmd). */ -static inline void __flush_tlb_pgtable(struct mm_struct *mm, - unsigned long uaddr) -{ - unsigned long addr = __TLBI_VADDR(uaddr, ASID(mm)); - - __tlbi(vae1is, addr); - __tlbi_user(vae1is, addr); - dsb(ish); -} - static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr) { unsigned long addr = __TLBI_VADDR(kaddr, 0); + dsb(ishst); __tlbi(vaae1is, addr); dsb(ish); + isb(); } #endif diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index e66b0fca99c2f9e500788db6fa2e24693ade11c3..e9d3f4c88a766c0f927d22696f61540a6ae716af 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -104,7 +104,7 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si */ #define untagged_addr(addr) sign_extend64(addr, 55) -#define access_ok(type, addr, size) __range_ok(addr, size) +#define access_ok(addr, size) __range_ok(addr, size) #define user_addr_max get_fs #define _ASM_EXTABLE(from, to) \ @@ -310,7 +310,7 @@ do { \ ({ \ __typeof__(*(ptr)) __user *__p = (ptr); \ might_fault(); \ - if (access_ok(VERIFY_READ, __p, sizeof(*__p))) { \ + if (access_ok(__p, sizeof(*__p))) { \ __p = uaccess_mask_ptr(__p); \ __get_user_err((x), __p, (err)); \ } else { \ @@ -331,7 +331,74 @@ do { \ __gu_err; \ }) +#ifdef CONFIG_UCE_KERNEL_RECOVERY +extern void get_user_func(long *p, const long __user *addr, int size, int *err); +extern int is_get_user_kernel_recovery_enable(void); + +#define __get_user_uce_err(x, ptr, size, err) \ +do { \ + unsigned long __gu_val; \ + __chk_user_ptr(ptr); \ + uaccess_enable_not_uao(); \ + switch ((size)) { \ + case 1: \ + __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \ + (err), ARM64_HAS_UAO); \ + break; \ + case 2: \ + __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \ + (err), ARM64_HAS_UAO); \ + break; \ + case 4: \ + __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \ + (err), ARM64_HAS_UAO); \ + break; \ + case 8: \ + __get_user_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \ + (err), ARM64_HAS_UAO); \ + break; \ + default: \ + __gu_val = 0; (err) = -EFAULT; \ + break; \ + } \ + uaccess_disable_not_uao(); \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ +} while (0) + +#define __get_user_uce_check(x, ptr, size, err) \ +({ \ + __typeof__(*(ptr)) __user *__p = (ptr); \ + might_fault(); \ + if (access_ok(__p, sizeof(*__p))) { \ + __p = uaccess_mask_ptr(__p); \ + __get_user_uce_err((x), __p, (size), (err)); \ + } else { \ + (x) = 0; (err) = -EFAULT; \ + } \ +}) + +/* + * uce kernel recovery use kallsyms_lookup_size_offset to confirm the + * location of triggering uce which based on function, so here needs to + * be implemented based on function. + */ +#define get_user(x, ptr) \ +({ \ + int __gu_err = 0; \ + \ + if (!is_get_user_kernel_recovery_enable()) { \ + __get_user_check((x), (ptr), __gu_err); \ + } else { \ + long __t; \ + const long *__s = (const long *)(ptr); \ + get_user_func(&__t, __s, sizeof(*(ptr)), &__gu_err); \ + (x) = (__typeof__(x))__t; \ + } \ + __gu_err; \ +}) +#else #define get_user __get_user +#endif #define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \ asm volatile( \ @@ -379,7 +446,7 @@ do { \ ({ \ __typeof__(*(ptr)) __user *__p = (ptr); \ might_fault(); \ - if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) { \ + if (access_ok(__p, sizeof(*__p))) { \ __p = uaccess_mask_ptr(__p); \ __put_user_err((x), __p, (err)); \ } else { \ @@ -408,6 +475,14 @@ extern unsigned long __must_check __arch_copy_from_user(void *to, const void __u __arch_copy_from_user((to), __uaccess_mask_ptr(from), (n)); \ }) +#ifdef CONFIG_UCE_KERNEL_RECOVERY +extern unsigned long __must_check __arch_copy_to_user_generic_read(void __user *to, const void *from, unsigned long n); +#define raw_copy_to_user_generic_read(to, from, n) \ +({ \ + __arch_copy_to_user_generic_read(__uaccess_mask_ptr(to), (from), (n)); \ +}) +#endif + extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n); #define raw_copy_to_user(to, from, n) \ ({ \ @@ -427,7 +502,7 @@ extern unsigned long __must_check __arch_copy_in_user(void __user *to, const voi extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n); static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n) { - if (access_ok(VERIFY_WRITE, to, n)) + if (access_ok(to, n)) n = __arch_clear_user(__uaccess_mask_ptr(to), n); return n; } diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index e0d0f5b856e74589404de44c8ca4323e454973ec..f3401de12dc9db49da3eec4d086bdbf70ebe0889 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -13,12 +13,16 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ + #ifdef CONFIG_COMPAT #define __ARCH_WANT_COMPAT_STAT64 +#define __ARCH_WANT_SYS_LLSEEK +#endif + +#ifdef CONFIG_AARCH32_EL0 #define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_PAUSE #define __ARCH_WANT_SYS_GETPGRP -#define __ARCH_WANT_SYS_LLSEEK #define __ARCH_WANT_SYS_NICE #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPROCMASK @@ -40,8 +44,9 @@ * The following SVCs are ARM private. */ #define __ARM_NR_COMPAT_BASE 0x0f0000 -#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) -#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) +#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE + 2) +#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5) +#define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800) #define __NR_compat_syscalls 399 #endif diff --git a/arch/arm64/include/asm/uprobes.h b/arch/arm64/include/asm/uprobes.h index 8d004073d0e8e32b61426e539bfd97d0d6bfa094..f57c96ac042f1dbb6e2474468e44e2222a00c6b4 100644 --- a/arch/arm64/include/asm/uprobes.h +++ b/arch/arm64/include/asm/uprobes.h @@ -13,21 +13,19 @@ #include #include -#define MAX_UINSN_BYTES AARCH64_INSN_SIZE - -#define UPROBE_SWBP_INSN BRK64_OPCODE_UPROBES +#define UPROBE_SWBP_INSN cpu_to_le32(BRK64_OPCODE_UPROBES) #define UPROBE_SWBP_INSN_SIZE AARCH64_INSN_SIZE -#define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES +#define UPROBE_XOL_SLOT_BYTES AARCH64_INSN_SIZE -typedef u32 uprobe_opcode_t; +typedef __le32 uprobe_opcode_t; struct arch_uprobe_task { }; struct arch_uprobe { union { - u8 insn[MAX_UINSN_BYTES]; - u8 ixol[MAX_UINSN_BYTES]; + __le32 insn; + __le32 ixol; }; struct arch_probe_insn api; bool simulate; diff --git a/arch/arm64/include/asm/vdso.h b/arch/arm64/include/asm/vdso.h index 839ce0031bd58a076893e844aa12c9a8ed4d33aa..33a4e10014aad31c8fd1257eccf3350c7b5f9135 100644 --- a/arch/arm64/include/asm/vdso.h +++ b/arch/arm64/include/asm/vdso.h @@ -29,6 +29,12 @@ #include +#ifdef CONFIG_ARM64_ILP32 +#include +#else +#define vdso_offset_sigtramp_ilp32 ({ BUILD_BUG(); 0; }) +#endif + #define VDSO_SYMBOL(base, name) \ ({ \ (void *)(vdso_offset_##name - VDSO_LBASE + (unsigned long)(base)); \ diff --git a/arch/arm64/include/asm/vdso_datapage.h b/arch/arm64/include/asm/vdso_datapage.h index 2b9a63771eda8c81b12ec4279a9e279543739ecc..6c466a041d75781ac9d483faa6037cfe3cfeca99 100644 --- a/arch/arm64/include/asm/vdso_datapage.h +++ b/arch/arm64/include/asm/vdso_datapage.h @@ -38,6 +38,8 @@ struct vdso_data { __u32 tz_minuteswest; /* Whacky timezone stuff */ __u32 tz_dsttime; __u32 use_syscall; + __u32 hrtimer_res; + __u32 vdso_fix; /* Avoid the clock bug in VDSO */ }; #endif /* !__ASSEMBLY__ */ diff --git a/arch/arm64/include/asm/vectors.h b/arch/arm64/include/asm/vectors.h new file mode 100644 index 0000000000000000000000000000000000000000..695583b9a145b79b539295db8cea221da8ea47a4 --- /dev/null +++ b/arch/arm64/include/asm/vectors.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2022 ARM Ltd. + */ +#ifndef __ASM_VECTORS_H +#define __ASM_VECTORS_H + +#include +#include + +#include +#include + +extern char vectors[]; +extern char tramp_vectors[]; +extern char __bp_harden_el1_vectors[]; + +/* + * Note: the order of this enum corresponds to two arrays in entry.S: + * tramp_vecs and __bp_harden_el1_vectors. By default the canonical + * 'full fat' vectors are used directly. + */ +enum arm64_bp_harden_el1_vectors { +#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY + /* + * Perform the BHB loop mitigation, before branching to the canonical + * vectors. + */ + EL1_VECTOR_BHB_LOOP, + + /* + * Make the SMC call for firmware mitigation, before branching to the + * canonical vectors. + */ + EL1_VECTOR_BHB_FW, + + /* + * Use the ClearBHB instruction, before branching to the canonical + * vectors. + */ + EL1_VECTOR_BHB_CLEAR_INSN, +#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ + + /* + * Remap the kernel before branching to the canonical vectors. + */ + EL1_VECTOR_KPTI, +}; + +#ifndef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY +#define EL1_VECTOR_BHB_LOOP -1 +#define EL1_VECTOR_BHB_FW -1 +#define EL1_VECTOR_BHB_CLEAR_INSN -1 +#endif /* !CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ + +/* The vectors to use on return from EL0. e.g. to remap the kernel */ +DECLARE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector); + +#ifndef CONFIG_UNMAP_KERNEL_AT_EL0 +#define TRAMP_VALIAS 0 +#endif + +static inline const char * +arm64_get_bp_hardening_vector(enum arm64_bp_harden_el1_vectors slot) +{ + if (arm64_kernel_unmapped_at_el0()) + return (char *)TRAMP_VALIAS + SZ_2K * slot; + + WARN_ON_ONCE(slot == EL1_VECTOR_KPTI); + + return __bp_harden_el1_vectors + SZ_2K * slot; +} + +#endif /* __ASM_VECTORS_H */ diff --git a/arch/arm64/include/asm/vmalloc.h b/arch/arm64/include/asm/vmalloc.h new file mode 100644 index 0000000000000000000000000000000000000000..fc9a12d6cc1ac64fd268737923837686e6089c7b --- /dev/null +++ b/arch/arm64/include/asm/vmalloc.h @@ -0,0 +1,29 @@ +#ifndef _ASM_ARM64_VMALLOC_H +#define _ASM_ARM64_VMALLOC_H + +#include + +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP +static inline bool arch_vmap_p4d_supported(pgprot_t prot) +{ + return false; +} + +static inline bool arch_vmap_pud_supported(pgprot_t prot) +{ + /* + * Only 4k granule supports level 1 block mappings. + * SW table walks can't handle removal of intermediate entries. + */ + return IS_ENABLED(CONFIG_ARM64_4K_PAGES) && + !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS); +} + +static inline bool arch_vmap_pmd_supported(pgprot_t prot) +{ + /* See arch_vmap_pud_supported() */ + return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS); +} +#endif + +#endif /* _ASM_ARM64_VMALLOC_H */ diff --git a/arch/arm64/include/asm/word-at-a-time.h b/arch/arm64/include/asm/word-at-a-time.h index b0d708ff7f4e57523908fff7b5aeedcefd4fe243..a2601c1ccf43558e40e661de2dcee0ca527f805e 100644 --- a/arch/arm64/include/asm/word-at-a-time.h +++ b/arch/arm64/include/asm/word-at-a-time.h @@ -64,7 +64,7 @@ static inline unsigned long find_zero(unsigned long mask) */ static inline unsigned long load_unaligned_zeropad(const void *addr) { - unsigned long ret, offset; + unsigned long ret, tmp; /* Load word from unaligned pointer addr */ asm( @@ -72,9 +72,9 @@ static inline unsigned long load_unaligned_zeropad(const void *addr) "2:\n" " .pushsection .fixup,\"ax\"\n" " .align 2\n" - "3: and %1, %2, #0x7\n" - " bic %2, %2, #0x7\n" - " ldr %0, [%2]\n" + "3: bic %1, %2, #0x7\n" + " ldr %0, [%1]\n" + " and %1, %2, #0x7\n" " lsl %1, %1, #0x3\n" #ifndef __AARCH64EB__ " lsr %0, %0, %1\n" @@ -84,7 +84,7 @@ static inline unsigned long load_unaligned_zeropad(const void *addr) " b 2b\n" " .popsection\n" _ASM_EXTABLE(1b, 3b) - : "=&r" (ret), "=&r" (offset) + : "=&r" (ret), "=&r" (tmp) : "r" (addr), "Q" (*(unsigned long *)addr)); return ret; diff --git a/arch/arm64/include/asm/xen/events.h b/arch/arm64/include/asm/xen/events.h index 4e22b7a8c0388c96d6558efd8993f7a30eb7ed03..2788e95d0ff022512dfc112460fc5337eca1ad3f 100644 --- a/arch/arm64/include/asm/xen/events.h +++ b/arch/arm64/include/asm/xen/events.h @@ -14,7 +14,7 @@ enum ipi_vector { static inline int xen_irqs_disabled(struct pt_regs *regs) { - return raw_irqs_disabled_flags((unsigned long) regs->pstate); + return !interrupts_enabled(regs); } #define xchg_xen_ulong(ptr, val) xchg((ptr), (val)) diff --git a/arch/arm64/include/asm/xor.h b/arch/arm64/include/asm/xor.h new file mode 100644 index 0000000000000000000000000000000000000000..856386ad076c6e7eebdda416dcf4044359c2fdc2 --- /dev/null +++ b/arch/arm64/include/asm/xor.h @@ -0,0 +1,73 @@ +/* + * arch/arm64/include/asm/xor.h + * + * Authors: Jackie Liu + * Copyright (C) 2018,Tianjin KYLIN Information Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include + +#ifdef CONFIG_KERNEL_MODE_NEON + +extern struct xor_block_template const xor_block_inner_neon; + +static void +xor_neon_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) +{ + kernel_neon_begin(); + xor_block_inner_neon.do_2(bytes, p1, p2); + kernel_neon_end(); +} + +static void +xor_neon_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3) +{ + kernel_neon_begin(); + xor_block_inner_neon.do_3(bytes, p1, p2, p3); + kernel_neon_end(); +} + +static void +xor_neon_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3, unsigned long *p4) +{ + kernel_neon_begin(); + xor_block_inner_neon.do_4(bytes, p1, p2, p3, p4); + kernel_neon_end(); +} + +static void +xor_neon_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3, unsigned long *p4, unsigned long *p5) +{ + kernel_neon_begin(); + xor_block_inner_neon.do_5(bytes, p1, p2, p3, p4, p5); + kernel_neon_end(); +} + +static struct xor_block_template xor_block_arm64 = { + .name = "arm64_neon", + .do_2 = xor_neon_2, + .do_3 = xor_neon_3, + .do_4 = xor_neon_4, + .do_5 = xor_neon_5 +}; +#undef XOR_TRY_TEMPLATES +#define XOR_TRY_TEMPLATES \ + do { \ + xor_speed(&xor_block_8regs); \ + xor_speed(&xor_block_32regs); \ + if (cpu_has_neon()) { \ + xor_speed(&xor_block_arm64);\ + } \ + } while (0) + +#endif /* ! CONFIG_KERNEL_MODE_NEON */ diff --git a/arch/arm64/include/uapi/asm/bitsperlong.h b/arch/arm64/include/uapi/asm/bitsperlong.h index 485d60bee26ca313ad15797f230efe10072befc9..9a05a9659e761a14d215d0dd75200d0a66e1bb32 100644 --- a/arch/arm64/include/uapi/asm/bitsperlong.h +++ b/arch/arm64/include/uapi/asm/bitsperlong.h @@ -17,7 +17,14 @@ #ifndef __ASM_BITSPERLONG_H #define __ASM_BITSPERLONG_H -#define __BITS_PER_LONG 64 +#if defined(__LP64__) +/* Assuming __LP64__ will be defined for native ELF64's and not for ILP32. */ +# define __BITS_PER_LONG 64 +#elif defined(__ILP32__) +# define __BITS_PER_LONG 32 +#else +# error "Neither LP64 nor ILP32: unsupported ABI in asm/bitsperlong.h" +#endif #include diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h index 17c65c8f33cb6073acda182e7b1c943e08730870..2bcd6e4f34740337c0d122fef5a2b513dcb75dd1 100644 --- a/arch/arm64/include/uapi/asm/hwcap.h +++ b/arch/arm64/include/uapi/asm/hwcap.h @@ -48,5 +48,6 @@ #define HWCAP_USCAT (1 << 25) #define HWCAP_ILRCPC (1 << 26) #define HWCAP_FLAGM (1 << 27) +#define HWCAP_SSBS (1 << 28) #endif /* _UAPI__ASM_HWCAP_H */ diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 97c3478ee6e718c8ac2de4c01edee6a0dd4cd27e..307d13612399d14dd49369ed2f7d0f7bc1ab0e48 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -265,8 +265,10 @@ struct kvm_vcpu_events { #define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1 /* KVM_IRQ_LINE irq field index values */ +#define KVM_ARM_IRQ_VCPU2_SHIFT 28 +#define KVM_ARM_IRQ_VCPU2_MASK 0xf #define KVM_ARM_IRQ_TYPE_SHIFT 24 -#define KVM_ARM_IRQ_TYPE_MASK 0xff +#define KVM_ARM_IRQ_TYPE_MASK 0xf #define KVM_ARM_IRQ_VCPU_SHIFT 16 #define KVM_ARM_IRQ_VCPU_MASK 0xff #define KVM_ARM_IRQ_NUM_SHIFT 0 diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h index 98c4ce55d9c360518c2a527b26153403be5c5463..010223459d5195bb9e997037885a18459e925bd8 100644 --- a/arch/arm64/include/uapi/asm/ptrace.h +++ b/arch/arm64/include/uapi/asm/ptrace.h @@ -46,6 +46,7 @@ #define PSR_I_BIT 0x00000080 #define PSR_A_BIT 0x00000100 #define PSR_D_BIT 0x00000200 +#define PSR_SSBS_BIT 0x00001000 #define PSR_PAN_BIT 0x00400000 #define PSR_UAO_BIT 0x00800000 #define PSR_V_BIT 0x10000000 @@ -64,8 +65,6 @@ #ifndef __ASSEMBLY__ -#include - /* * User structures for general purpose, floating point and debug registers. */ @@ -112,10 +111,10 @@ struct user_sve_header { /* * Common SVE_PT_* flags: - * These must be kept in sync with prctl interface in + * These must be kept in sync with prctl interface in */ -#define SVE_PT_VL_INHERIT (PR_SVE_VL_INHERIT >> 16) -#define SVE_PT_VL_ONEXEC (PR_SVE_SET_VL_ONEXEC >> 16) +#define SVE_PT_VL_INHERIT ((1 << 17) /* PR_SVE_VL_INHERIT */ >> 16) +#define SVE_PT_VL_ONEXEC ((1 << 18) /* PR_SVE_SET_VL_ONEXEC */ >> 16) /* @@ -130,7 +129,7 @@ struct user_sve_header { /* Offset from the start of struct user_sve_header to the register data */ #define SVE_PT_REGS_OFFSET \ - ((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1)) \ + ((sizeof(struct user_sve_header) + (SVE_VQ_BYTES - 1)) \ / SVE_VQ_BYTES * SVE_VQ_BYTES) /* @@ -175,6 +174,10 @@ struct user_sve_header { * FPCR uint32_t FPCR * * Additional data might be appended in the future. + * + * The Z-, P- and FFR registers are represented in memory in an endianness- + * invariant layout which differs from the layout used for the FPSIMD + * V-registers on big-endian systems: see sigcontext.h for more explanation. */ #define SVE_PT_SVE_ZREG_SIZE(vq) SVE_SIG_ZREG_SIZE(vq) diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h index dca8f8b5168b44828b30d6048660a2d69e198b71..7498e41bf453cfd0ed7d23734e18fe71f6aa3401 100644 --- a/arch/arm64/include/uapi/asm/sigcontext.h +++ b/arch/arm64/include/uapi/asm/sigcontext.h @@ -77,6 +77,15 @@ struct fpsimd_context { __uint128_t vregs[32]; }; +/* + * Note: similarly to all other integer fields, each V-register is stored in an + * endianness-dependent format, with the byte at offset i from the start of the + * in-memory representation of the register value containing + * + * bits [(7 + 8 * i) : (8 * i)] of the register on little-endian hosts; or + * bits [(127 - 8 * i) : (120 - 8 * i)] on big-endian hosts. + */ + /* ESR_EL1 context */ #define ESR_MAGIC 0x45535201 @@ -203,6 +212,11 @@ struct sve_context { * FFR uint16_t[vq] first-fault status register * * Additional data might be appended in the future. + * + * Unlike vregs[] in fpsimd_context, each SVE scalable register (Z-, P- or FFR) + * is encoded in memory in an endianness-invariant format, with the byte at + * offset i from the start of the in-memory representation containing bits + * [(7 + 8 * i) : (8 * i)] of the register value. */ #define SVE_SIG_ZREG_SIZE(vq) ((__u32)(vq) * SVE_VQ_BYTES) diff --git a/arch/arm64/include/uapi/asm/unistd.h b/arch/arm64/include/uapi/asm/unistd.h index 5072cbd15c82955ce3fcf1a3ca828103b882248d..80f1cb4ae2e13c42d17f36b46bf219524bcb7d34 100644 --- a/arch/arm64/include/uapi/asm/unistd.h +++ b/arch/arm64/include/uapi/asm/unistd.h @@ -15,6 +15,19 @@ * along with this program. If not, see . */ +/* + * Use AARCH32 interface for sys_sync_file_range() as it passes 64-bit arguments. + */ +#if defined(__ILP32__) || defined(__SYSCALL_COMPAT) +#define __ARCH_WANT_SYNC_FILE_RANGE2 +#endif + +/* + * AARCH64/ILP32 is introduced after next syscalls were deprecated. + */ +#if !(defined(__ILP32__) || defined(__SYSCALL_COMPAT)) #define __ARCH_WANT_RENAMEAT +#define __ARCH_WANT_SET_GET_RLIMIT +#endif #include diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 4c8b13bede80f98195ba1090fea11601a6d3df22..e34b9b5969bfbcadf7ad8f43b57039af7d6b308c 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -21,14 +21,19 @@ arm64-obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ smp.o smp_spin_table.o topology.o smccc-call.o \ syscall.o +arm64-obj-$(CONFIG_ARM64_TLBI_IPI) += tlbflush.o + extra-$(CONFIG_EFI) := efi-entry.o OBJCOPYFLAGS := --prefix-symbols=__efistub_ $(obj)/%.stub.o: $(obj)/%.o FORCE $(call if_changed,objcopy) -arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ - sys_compat.o +arm64-obj-$(CONFIG_AARCH32_EL0) += sys32.o kuser32.o signal32.o \ + sys_compat.o binfmt_elf32.o +arm64-obj-$(CONFIG_ARM64_ILP32) += binfmt_ilp32.o sys_ilp32.o \ + signal_ilp32.o +arm64-obj-$(CONFIG_COMPAT) += sys32_common.o signal32_common.o arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o arm64-obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o @@ -37,6 +42,7 @@ arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o arm64-obj-$(CONFIG_CPU_PM) += sleep.o suspend.o arm64-obj-$(CONFIG_CPU_IDLE) += cpuidle.o +arm64-obj-$(CONFIG_LIVEPATCH) += livepatch.o arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o arm64-obj-$(CONFIG_KGDB) += kgdb.o arm64-obj-$(CONFIG_EFI) += efi.o efi-entry.stub.o \ @@ -44,21 +50,26 @@ arm64-obj-$(CONFIG_EFI) += efi.o efi-entry.stub.o \ arm64-obj-$(CONFIG_PCI) += pci.o arm64-obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o arm64-obj-$(CONFIG_ACPI) += acpi.o +arm64-obj-$(CONFIG_ARM64_ERR_RECOV) += ras.o arm64-obj-$(CONFIG_ACPI_NUMA) += acpi_numa.o arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o -arm64-obj-$(CONFIG_PARAVIRT) += paravirt.o +arm64-obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt-spinlocks.o arm64-obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o arm64-obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o arm64-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o \ cpu-reset.o +arm64-obj-$(CONFIG_ARM64_CPU_PARK) += cpu-park.o arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o arm64-obj-$(CONFIG_CRASH_CORE) += crash_core.o arm64-obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o arm64-obj-$(CONFIG_ARM64_SSBD) += ssbd.o +arm64-obj-$(CONFIG_SDEI_WATCHDOG) += watchdog_sdei.o +arm64-obj-$(CONFIG_MPAM) += mpam/ obj-y += $(arm64-obj-y) vdso/ probes/ +obj-$(CONFIG_ARM64_ILP32) += vdso-ilp32/ obj-m += $(arm64-obj-m) head-y := head.o extra-y += $(head-y) vmlinux.lds diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c index ed46dc188b225d2d0aec587f435b5b2ce774c5cd..cc0ab74e7838f489eb4e7b11d7db471a7e351a45 100644 --- a/arch/arm64/kernel/acpi.c +++ b/arch/arm64/kernel/acpi.c @@ -28,8 +28,10 @@ #include #include +#include #include #include +#include #include #include @@ -154,10 +156,14 @@ static int __init acpi_fadt_sanity_check(void) */ if (table->revision < 5 || (table->revision == 5 && fadt->minor_revision < 1)) { - pr_err("Unsupported FADT revision %d.%d, should be 5.1+\n", + pr_err(FW_BUG "Unsupported FADT revision %d.%d, should be 5.1+\n", table->revision, fadt->minor_revision); - ret = -EINVAL; - goto out; + + if (!fadt->arm_boot_flags) { + ret = -EINVAL; + goto out; + } + pr_err("FADT has ARM boot flags set, assuming 5.1\n"); } if (!(fadt->flags & ACPI_FADT_HW_REDUCED)) { @@ -257,3 +263,34 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr) return __pgprot(PROT_NORMAL_NC); return __pgprot(PROT_DEVICE_nGnRnE); } + +int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, + int *pcpu) +{ + int cpu, nid; + + cpu = acpi_map_cpuid(physid, acpi_id); + if (cpu < 0) { + pr_info("Unable to map GICC to logical cpu number\n"); + return cpu; + } + nid = acpi_get_node(handle); + if (nid != NUMA_NO_NODE) { + set_cpu_numa_node(cpu, nid); + } + + *pcpu = cpu; + set_cpu_present(cpu, true); + + return 0; +} +EXPORT_SYMBOL(acpi_map_cpu); + +int acpi_unmap_cpu(int cpu) +{ + set_cpu_present(cpu, false); + set_cpu_numa_node(cpu, NUMA_NO_NODE); + + return 0; +} +EXPORT_SYMBOL(acpi_unmap_cpu); diff --git a/arch/arm64/kernel/acpi_numa.c b/arch/arm64/kernel/acpi_numa.c index 4f4f1815e0471e0eb2e242b8462a7e4eb8e8b137..fc1ccf2b3b5f0717037ae167930e58a5ea6ea6bb 100644 --- a/arch/arm64/kernel/acpi_numa.c +++ b/arch/arm64/kernel/acpi_numa.c @@ -28,7 +28,7 @@ #include -static int acpi_early_node_map[NR_CPUS] __initdata = { NUMA_NO_NODE }; +static int acpi_early_node_map[NR_CPUS] __initdata = { [0 ... NR_CPUS - 1] = NUMA_NO_NODE }; int __init acpi_numa_get_nid(unsigned int cpu) { @@ -46,7 +46,7 @@ static inline int get_cpu_for_acpi_id(u32 uid) return -EINVAL; } -static int __init acpi_parse_gicc_pxm(struct acpi_subtable_header *header, +static int __init acpi_parse_gicc_pxm(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_srat_gicc_affinity *pa; diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index b5d603992d4012201fbd0f655a9bde73eb77e5cb..ce5a26080b466174a617c0bf7e36ae8d39d4abfa 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -32,32 +32,35 @@ #define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset) #define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset) -int alternatives_applied; +static int all_alternatives_applied; + +static DECLARE_BITMAP(applied_alternatives, ARM64_NCAPS); struct alt_region { struct alt_instr *begin; struct alt_instr *end; }; +bool set_cap_spectre_bhb; + +bool alternative_is_applied(u16 cpufeature) +{ + if (cpufeature == ARM64_SPECTRE_BHB) + return set_cap_spectre_bhb; + + if (WARN_ON(cpufeature >= ARM64_NCAPS)) + return false; + + return test_bit(cpufeature, applied_alternatives); +} + /* * Check if the target PC is within an alternative block. */ static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc) { - unsigned long replptr; - - if (kernel_text_address(pc)) - return true; - - replptr = (unsigned long)ALT_REPL_PTR(alt); - if (pc >= replptr && pc <= (replptr + alt->alt_len)) - return false; - - /* - * Branching into *another* alternate sequence is doomed, and - * we're not even trying to fix it up. - */ - BUG(); + unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt); + return !(pc >= replptr && pc <= (replptr + alt->alt_len)); } #define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1)) @@ -145,7 +148,8 @@ static void clean_dcache_range_nopatch(u64 start, u64 end) } while (cur += d_size, cur < end); } -static void __apply_alternatives(void *alt_region, bool is_module) +static void __apply_alternatives(void *alt_region, bool is_module, + unsigned long *feature_mask) { struct alt_instr *alt; struct alt_region *region = alt_region; @@ -155,6 +159,9 @@ static void __apply_alternatives(void *alt_region, bool is_module) for (alt = region->begin; alt < region->end; alt++) { int nr_inst; + if (!test_bit(alt->cpufeature, feature_mask)) + continue; + /* Use ARM64_CB_PATCH as an unconditional patch */ if (alt->cpufeature < ARM64_CB_PATCH && !cpus_have_cap(alt->cpufeature)) @@ -192,6 +199,12 @@ static void __apply_alternatives(void *alt_region, bool is_module) dsb(ish); __flush_icache_all(); isb(); + + /* Ignore ARM64_CB bit from feature mask */ + bitmap_or(applied_alternatives, applied_alternatives, + feature_mask, ARM64_NCAPS); + bitmap_and(applied_alternatives, applied_alternatives, + cpu_hwcaps, ARM64_NCAPS); } } @@ -208,14 +221,19 @@ static int __apply_alternatives_multi_stop(void *unused) /* We always have a CPU 0 at this point (__init) */ if (smp_processor_id()) { - while (!READ_ONCE(alternatives_applied)) + while (!READ_ONCE(all_alternatives_applied)) cpu_relax(); isb(); } else { - BUG_ON(alternatives_applied); - __apply_alternatives(®ion, false); + DECLARE_BITMAP(remaining_capabilities, ARM64_NPATCHABLE); + + bitmap_complement(remaining_capabilities, boot_capabilities, + ARM64_NPATCHABLE); + + BUG_ON(all_alternatives_applied); + __apply_alternatives(®ion, false, remaining_capabilities); /* Barriers provided by the cache flushing */ - WRITE_ONCE(alternatives_applied, 1); + WRITE_ONCE(all_alternatives_applied, 1); } return 0; @@ -227,6 +245,65 @@ void __init apply_alternatives_all(void) stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask); } +#if defined(CONFIG_NUMA_AWARE_SPINLOCKS) +/* + * Constant (boot-param configurable) flag selecting the NUMA-aware variant + * of spinlock. Possible values: -1 (off, default) / 0 (auto) / 1 (on). + */ +static int numa_spinlock_flag = -1; + +static int __init numa_spinlock_setup(char *str) +{ + if (!strcmp(str, "auto")) { + numa_spinlock_flag = 0; + return 0; + } else if (!strcmp(str, "on")) { + numa_spinlock_flag = 1; + return 0; + } else if (!strcmp(str, "off")) { + numa_spinlock_flag = -1; + return 0; + } + + return -EINVAL; +} + +early_param("numa_spinlock", numa_spinlock_setup); + +#endif + +/* + * This is called very early in the boot process (directly after we run + * a feature detect on the boot CPU). No need to worry about other CPUs + * here. + */ +void __init apply_boot_alternatives(void) +{ + struct alt_region region = { + .begin = (struct alt_instr *)__alt_instructions, + .end = (struct alt_instr *)__alt_instructions_end, + }; + + /* If called on non-boot cpu things could go wrong */ + WARN_ON(smp_processor_id() != 0); + + __apply_alternatives(®ion, false, &boot_capabilities[0]); + +#if defined(CONFIG_NUMA_AWARE_SPINLOCKS) + /* + * If numa_spinlock=auto, switch to the NUMA-friendly slow path for + * spinlocks when we have multiple NUMA nodes in native environment. + */ + if ((numa_spinlock_flag == 1) || + (numa_spinlock_flag == 0 && nr_node_ids > 1 && + cna_queued_spin_lock_slowpath == + native_queued_spin_lock_slowpath)) { + cna_queued_spin_lock_slowpath = + __cna_queued_spin_lock_slowpath; + } +#endif +} + #ifdef CONFIG_MODULES void apply_alternatives_module(void *start, size_t length) { @@ -234,7 +311,10 @@ void apply_alternatives_module(void *start, size_t length) .begin = start, .end = start + length, }; + DECLARE_BITMAP(all_capabilities, ARM64_NPATCHABLE); + + bitmap_fill(all_capabilities, ARM64_NPATCHABLE); - __apply_alternatives(®ion, true); + __apply_alternatives(®ion, true, &all_capabilities[0]); } #endif diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c index d894a20b70b28f709f776d0330edb598283aecad..72f63a59b0088ac0f825b815ea692e44ad05004f 100644 --- a/arch/arm64/kernel/arm64ksyms.c +++ b/arch/arm64/kernel/arm64ksyms.c @@ -44,20 +44,23 @@ EXPORT_SYMBOL(__arch_copy_in_user); EXPORT_SYMBOL(memstart_addr); /* string / mem functions */ +#ifndef CONFIG_KASAN EXPORT_SYMBOL(strchr); EXPORT_SYMBOL(strrchr); EXPORT_SYMBOL(strcmp); EXPORT_SYMBOL(strncmp); EXPORT_SYMBOL(strlen); EXPORT_SYMBOL(strnlen); +EXPORT_SYMBOL(memcmp); +EXPORT_SYMBOL(memchr); +#endif + EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memmove); EXPORT_SYMBOL(__memset); EXPORT_SYMBOL(__memcpy); EXPORT_SYMBOL(__memmove); -EXPORT_SYMBOL(memchr); -EXPORT_SYMBOL(memcmp); /* atomic bitops */ EXPORT_SYMBOL(set_bit); diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index 92be1d12d59080d06472e05e2f292f812a38d224..07a0095956f16b1382b323028290c185ca0b769c 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c @@ -62,6 +62,7 @@ struct insn_emulation { static LIST_HEAD(insn_emulation); static int nr_insn_emulated __initdata; static DEFINE_RAW_SPINLOCK(insn_emulation_lock); +static DEFINE_MUTEX(insn_emulation_mutex); static void register_emulation_hooks(struct insn_emulation_ops *ops) { @@ -177,6 +178,9 @@ static void __init register_insn_emulation(struct insn_emulation_ops *ops) struct insn_emulation *insn; insn = kzalloc(sizeof(*insn), GFP_KERNEL); + if (!insn) + return; + insn->ops = ops; insn->min = INSN_UNDEF; @@ -207,10 +211,12 @@ static int emulation_proc_handler(struct ctl_table *table, int write, loff_t *ppos) { int ret = 0; - struct insn_emulation *insn = (struct insn_emulation *) table->data; - enum insn_emulation_mode prev_mode = insn->current_mode; + struct insn_emulation *insn; + enum insn_emulation_mode prev_mode; - table->data = &insn->current_mode; + mutex_lock(&insn_emulation_mutex); + insn = container_of(table->data, struct insn_emulation, current_mode); + prev_mode = insn->current_mode; ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (ret || !write || prev_mode == insn->current_mode) @@ -223,7 +229,7 @@ static int emulation_proc_handler(struct ctl_table *table, int write, update_insn_emulation_mode(insn, INSN_UNDEF); } ret: - table->data = insn; + mutex_unlock(&insn_emulation_mutex); return ret; } @@ -236,6 +242,8 @@ static void __init register_insn_emulation_sysctl(void) insns_sysctl = kcalloc(nr_insn_emulated + 1, sizeof(*sysctl), GFP_KERNEL); + if (!insns_sysctl) + return; raw_spin_lock_irqsave(&insn_emulation_lock, flags); list_for_each_entry(insn, &insn_emulation, node) { @@ -245,7 +253,7 @@ static void __init register_insn_emulation_sysctl(void) sysctl->maxlen = sizeof(int); sysctl->procname = insn->ops->name; - sysctl->data = insn; + sysctl->data = &insn->current_mode; sysctl->extra1 = &insn->min; sysctl->extra2 = &insn->max; sysctl->proc_handler = emulation_proc_handler; @@ -402,7 +410,7 @@ static int swp_handler(struct pt_regs *regs, u32 instr) /* Check access in reasonable access range for both SWP and SWPB */ user_ptr = (const void __user *)(unsigned long)(address & ~3); - if (!access_ok(VERIFY_WRITE, user_ptr, 4)) { + if (!access_ok(user_ptr, 4)) { pr_debug("SWP{B} emulation: access to 0x%08x not allowed!\n", address); goto fault; @@ -554,7 +562,7 @@ static int setend_set_hw_mode(bool enable) return 0; } -static int compat_setend_handler(struct pt_regs *regs, u32 big_endian) +static int __a32_setend_handler(struct pt_regs *regs, u32 big_endian) { char *insn; @@ -577,14 +585,14 @@ static int compat_setend_handler(struct pt_regs *regs, u32 big_endian) static int a32_setend_handler(struct pt_regs *regs, u32 instr) { - int rc = compat_setend_handler(regs, (instr >> 9) & 1); + int rc = __a32_setend_handler(regs, (instr >> 9) & 1); arm64_skip_faulting_instruction(regs, 4); return rc; } static int t16_setend_handler(struct pt_regs *regs, u32 instr) { - int rc = compat_setend_handler(regs, (instr >> 3) & 1); + int rc = __a32_setend_handler(regs, (instr >> 3) & 1); arm64_skip_faulting_instruction(regs, 2); return rc; } diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 323aeb5f2fe62c743d1855a0968a836c5f8e6094..6e1847fb4411563f68e7949067466c19c291c360 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -70,7 +70,7 @@ int main(void) DEFINE(S_X28, offsetof(struct pt_regs, regs[28])); DEFINE(S_LR, offsetof(struct pt_regs, regs[30])); DEFINE(S_SP, offsetof(struct pt_regs, sp)); -#ifdef CONFIG_COMPAT +#ifdef CONFIG_AARCH32_EL0 DEFINE(S_COMPAT_SP, offsetof(struct pt_regs, compat_sp)); #endif DEFINE(S_PSTATE, offsetof(struct pt_regs, pstate)); @@ -78,6 +78,7 @@ int main(void) DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0)); DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno)); DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit)); + DEFINE(S_PMR_SAVE, offsetof(struct pt_regs, pmr_save)); DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe)); DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs)); BLANK(); @@ -99,7 +100,7 @@ int main(void) DEFINE(CLOCK_REALTIME, CLOCK_REALTIME); DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC); DEFINE(CLOCK_MONOTONIC_RAW, CLOCK_MONOTONIC_RAW); - DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); + DEFINE(CLOCK_REALTIME_RES, offsetof(struct vdso_data, hrtimer_res)); DEFINE(CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE); DEFINE(CLOCK_MONOTONIC_COARSE,CLOCK_MONOTONIC_COARSE); DEFINE(CLOCK_COARSE_RES, LOW_RES_NSEC); @@ -121,12 +122,20 @@ int main(void) DEFINE(VDSO_TZ_MINWEST, offsetof(struct vdso_data, tz_minuteswest)); DEFINE(VDSO_TZ_DSTTIME, offsetof(struct vdso_data, tz_dsttime)); DEFINE(VDSO_USE_SYSCALL, offsetof(struct vdso_data, use_syscall)); + DEFINE(VDSO_FIX, offsetof(struct vdso_data, vdso_fix)); BLANK(); DEFINE(TVAL_TV_SEC, offsetof(struct timeval, tv_sec)); DEFINE(TVAL_TV_USEC, offsetof(struct timeval, tv_usec)); DEFINE(TSPEC_TV_SEC, offsetof(struct timespec, tv_sec)); DEFINE(TSPEC_TV_NSEC, offsetof(struct timespec, tv_nsec)); BLANK(); +#ifdef CONFIG_COMPAT + DEFINE(COMPAT_TVAL_TV_SEC, offsetof(struct compat_timeval, tv_sec)); + DEFINE(COMPAT_TVAL_TV_USEC, offsetof(struct compat_timeval, tv_usec)); + DEFINE(COMPAT_TSPEC_TV_SEC, offsetof(struct compat_timespec, tv_sec)); + DEFINE(COMPAT_TSPEC_TV_NSEC, offsetof(struct compat_timespec, tv_nsec)); + BLANK(); +#endif DEFINE(TZ_MINWEST, offsetof(struct timezone, tz_minuteswest)); DEFINE(TZ_DSTTIME, offsetof(struct timezone, tz_dsttime)); BLANK(); @@ -143,6 +152,7 @@ int main(void) DEFINE(VCPU_FPEXC32_EL2, offsetof(struct kvm_vcpu, arch.ctxt.sys_regs[FPEXC32_EL2])); DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu)); + DEFINE(HOST_DATA_CONTEXT, offsetof(struct kvm_host_data, host_ctxt)); #endif #ifdef CONFIG_CPU_PM DEFINE(CPU_SUSPEND_SZ, sizeof(struct cpu_suspend_ctx)); diff --git a/arch/arm64/kernel/binfmt_elf32.c b/arch/arm64/kernel/binfmt_elf32.c new file mode 100644 index 0000000000000000000000000000000000000000..2b49d2a40d8bdfed666c8f8024edfffd69de0f6f --- /dev/null +++ b/arch/arm64/kernel/binfmt_elf32.c @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/* + * Support for AArch32 Linux ELF binaries. + */ + +/* AArch32 EABI. */ +#define EF_ARM_EABI_MASK 0xff000000 + +#define compat_start_thread compat_start_thread +/* + * Unlike the native SET_PERSONALITY macro, the compat version inherits + * READ_IMPLIES_EXEC across a fork() since this is the behaviour on + * arch/arm/. + */ +#define COMPAT_SET_PERSONALITY(ex) \ +({ \ + clear_thread_flag(TIF_32BIT_AARCH64); \ + set_thread_flag(TIF_32BIT); \ +}) + +#define COMPAT_ARCH_DLINFO +#define COMPAT_ELF_HWCAP (a32_elf_hwcap) +#define COMPAT_ELF_HWCAP2 (a32_elf_hwcap2) + +#define compat_arch_setup_additional_pages \ + aarch32_setup_vectors_page + +/* AArch32 EABI. */ +#define compat_elf_check_arch(x) (system_supports_32bit_el0() && \ + ((x)->e_machine == EM_ARM) && \ + ((x)->e_flags & EF_ARM_EABI_MASK)) + + +#include "../../../fs/compat_binfmt_elf.c" diff --git a/arch/arm64/kernel/binfmt_ilp32.c b/arch/arm64/kernel/binfmt_ilp32.c new file mode 100644 index 0000000000000000000000000000000000000000..26b2477d190dbd9994de0ff2ef8288fba1ecf082 --- /dev/null +++ b/arch/arm64/kernel/binfmt_ilp32.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/* + * Support for ILP32 Linux/aarch64 ELF binaries. + */ +#undef CONFIG_AARCH32_EL0 +#define compat_elf_gregset_t elf_gregset_t + +#include +#include + +#undef ELF_CLASS +#define ELF_CLASS ELFCLASS32 + +#undef elfhdr +#undef elf_phdr +#undef elf_shdr +#undef elf_note +#undef elf_addr_t +#define elfhdr elf32_hdr +#define elf_phdr elf32_phdr +#define elf_shdr elf32_shdr +#define elf_note elf32_note +#define elf_addr_t Elf32_Addr + +/* + * Some data types as stored in coredump. + */ +#define user_long_t compat_long_t +#define user_siginfo_t compat_siginfo_t +#define copy_siginfo_to_user copy_siginfo_to_user32 + +/* + * The machine-dependent core note format types are defined in elfcore-compat.h, + * which requires asm/elf.h to define compat_elf_gregset_t et al. + */ +#define elf_prstatus compat_elf_prstatus +#define elf_prpsinfo compat_elf_prpsinfo + +/* AARCH64 ILP32 EABI. */ +#undef elf_check_arch +#define elf_check_arch(x) (((x)->e_machine == EM_AARCH64) \ + && (x)->e_ident[EI_CLASS] == ELFCLASS32) + +#undef SET_PERSONALITY +#define SET_PERSONALITY(ex) \ +do { \ + set_bit(TIF_32BIT, ¤t->mm->context.flags); \ + set_thread_flag(TIF_32BIT_AARCH64); \ + clear_thread_flag(TIF_32BIT); \ +} while (0) + +#undef ARCH_DLINFO +#define ARCH_DLINFO \ +do { \ + NEW_AUX_ENT(AT_SYSINFO_EHDR, \ + (elf_addr_t)(long)current->mm->context.vdso); \ +} while (0) + +#undef ELF_PLATFORM +#ifdef __AARCH64EB__ +#define ELF_PLATFORM ("aarch64_be:ilp32") +#else +#define ELF_PLATFORM ("aarch64:ilp32") +#endif + +#undef ELF_ET_DYN_BASE +#define ELF_ET_DYN_BASE COMPAT_ELF_ET_DYN_BASE + +#undef ELF_HWCAP +#undef ELF_HWCAP2 +#define ELF_HWCAP ((u32) elf_hwcap) +#define ELF_HWCAP2 ((u32) (elf_hwcap >> 32)) + +/* + * Rename a few of the symbols that binfmt_elf.c will define. + * These are all local so the names don't really matter, but it + * might make some debugging less confusing not to duplicate them. + */ +#define elf_format compat_elf_format +#define init_elf_binfmt init_compat_elf_binfmt +#define exit_elf_binfmt exit_compat_elf_binfmt + +#undef ns_to_timeval +#define ns_to_timeval ns_to_compat_timeval + +#include "../../../fs/binfmt_elf.c" diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c index 0bf0a835122f8d64b4e90e736c277e357d4f1084..0c0cd4d26b875366bda231dca92c743bf93cec6b 100644 --- a/arch/arm64/kernel/cacheinfo.c +++ b/arch/arm64/kernel/cacheinfo.c @@ -28,6 +28,17 @@ #define CLIDR_CTYPE(clidr, level) \ (((clidr) & CLIDR_CTYPE_MASK(level)) >> CLIDR_CTYPE_SHIFT(level)) +int cache_line_size(void) +{ + u32 cwg = cache_type_cwg(); + + if (coherency_max_size != 0) + return coherency_max_size; + + return cwg ? 4 << cwg : ARCH_DMA_MINALIGN; +} +EXPORT_SYMBOL_GPL(cache_line_size); + static inline enum cache_type get_cache_type(int level) { u64 clidr; diff --git a/arch/arm64/kernel/cpu-park.S b/arch/arm64/kernel/cpu-park.S new file mode 100644 index 0000000000000000000000000000000000000000..fc8f61e9bd58972b6b74912aaa2f224b24f1ef85 --- /dev/null +++ b/arch/arm64/kernel/cpu-park.S @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * CPU park routines + * + * Copyright (C) 2020 Huawei Technologies., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include + +.text +.pushsection .idmap.text, "awx" + +/* cpu park helper in idmap section */ +ENTRY(enter_cpu_park) + /* Clear sctlr_el1 flags. */ + mrs x12, sctlr_el1 + mov_q x13, SCTLR_ELx_FLAGS + bic x12, x12, x13 + pre_disable_mmu_workaround + msr sctlr_el1, x12 /* disable mmu */ + isb + + mov x18, x0 + mov x0, x1 /* secondary_entry addr */ + br x18 /* call do_cpu_park of each cpu */ +ENDPROC(enter_cpu_park) + +.popsection + +ENTRY(do_cpu_park) + ldr x18, =PARK_MAGIC /* magic number "park" */ + add x1, x0, #8 + str x18, [x1] /* set on-park flag */ + dc civac, x1 /* flush cache of "park" */ + dsb nsh + isb + +.Lloop: + wfe + isb + ldr x19, [x0] + cmp x19, #0 /* test secondary_entry */ + b.eq .Lloop + + ic iallu /* invalidate the local I-cache */ + dsb nsh + isb + + br x19 /* jump to secondary_entry */ +ENDPROC(do_cpu_park) + diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index dec10898d68861ec114681cf15dff0799b01232b..3b911e8964a702f2e66e06e95637c3b81644b25f 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -19,9 +19,17 @@ #include #include #include +#include #include #include #include +#include +#include +#ifdef CONFIG_HISILICON_ERRATUM_HIP08_RU_PREFETCH +#include +#include +#include +#endif static bool __maybe_unused is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) @@ -64,19 +72,64 @@ is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope) return model == entry->midr_range.model; } +#ifdef CONFIG_HISILICON_ERRATUM_1980005 +static bool +hisilicon_1980005_match(const struct arm64_cpu_capabilities *entry, + int scope) +{ + static const struct midr_range idc_support_list[] = { + MIDR_ALL_VERSIONS(MIDR_HISI_TSV110), + MIDR_REV(MIDR_HISI_TSV200, 1, 0), + { /* sentinel */ } + }; + + return is_midr_in_range_list(read_cpuid_id(), idc_support_list); +} + +static void +hisilicon_1980005_enable(const struct arm64_cpu_capabilities *__unused) +{ + cpus_set_cap(ARM64_HAS_CACHE_IDC); + arm64_ftr_reg_ctrel0.sys_val |= BIT(CTR_IDC_SHIFT); + arm64_ftr_reg_ctrel0.strict_mask &= ~BIT(CTR_IDC_SHIFT); + sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0); +} +#endif + static bool has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry, int scope) { - u64 mask = CTR_CACHE_MINLINE_MASK; + u64 mask = arm64_ftr_reg_ctrel0.strict_mask; + u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask; + u64 ctr_raw, ctr_real; /* Skip matching the min line sizes for cache type check */ if (entry->capability == ARM64_MISMATCHED_CACHE_TYPE) mask ^= arm64_ftr_reg_ctrel0.strict_mask; WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); - return (read_cpuid_cachetype() & mask) != - (arm64_ftr_reg_ctrel0.sys_val & mask); + + /* + * We want to make sure that all the CPUs in the system expose + * a consistent CTR_EL0 to make sure that applications behaves + * correctly with migration. + * + * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 : + * + * 1) It is safe if the system doesn't support IDC, as CPU anyway + * reports IDC = 0, consistent with the rest. + * + * 2) If the system has IDC, it is still safe as we trap CTR_EL0 + * access on this CPU via the ARM64_HAS_CACHE_IDC capability. + * + * So, we need to make sure either the raw CTR_EL0 or the effective + * CTR_EL0 matches the system's copy to allow a secondary CPU to boot. + */ + ctr_raw = read_cpuid_cachetype() & mask; + ctr_real = read_cpuid_effective_cachetype() & mask; + + return (ctr_real != sys) && (ctr_raw != sys); } static void @@ -87,7 +140,6 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused) atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1); -#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR #include #include @@ -96,6 +148,16 @@ DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); #ifdef CONFIG_KVM_INDIRECT_VECTORS extern char __smccc_workaround_1_smc_start[]; extern char __smccc_workaround_1_smc_end[]; +extern char __smccc_workaround_3_smc_start[]; +extern char __smccc_workaround_3_smc_end[]; +extern char __spectre_bhb_loop_k8_start[]; +extern char __spectre_bhb_loop_k8_end[]; +extern char __spectre_bhb_loop_k24_start[]; +extern char __spectre_bhb_loop_k24_end[]; +extern char __spectre_bhb_loop_k32_start[]; +extern char __spectre_bhb_loop_k32_end[]; +extern char __spectre_bhb_clearbhb_start[]; +extern char __spectre_bhb_clearbhb_end[]; static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, const char *hyp_vecs_end) @@ -109,11 +171,11 @@ static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K); } -static void __install_bp_hardening_cb(bp_hardening_cb_t fn, - const char *hyp_vecs_start, - const char *hyp_vecs_end) +static DEFINE_SPINLOCK(bp_lock); +static void install_bp_hardening_cb(bp_hardening_cb_t fn, + const char *hyp_vecs_start, + const char *hyp_vecs_end) { - static DEFINE_SPINLOCK(bp_lock); int cpu, slot = -1; spin_lock(&bp_lock); @@ -130,15 +192,19 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn, __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end); } - __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot); - __this_cpu_write(bp_hardening_data.fn, fn); + if (fn != __this_cpu_read(bp_hardening_data.fn)) { + __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot); + __this_cpu_write(bp_hardening_data.fn, fn); + __this_cpu_write(bp_hardening_data.template_start, + hyp_vecs_start); + } spin_unlock(&bp_lock); } #else #define __smccc_workaround_1_smc_start NULL #define __smccc_workaround_1_smc_end NULL -static void __install_bp_hardening_cb(bp_hardening_cb_t fn, +static void install_bp_hardening_cb(bp_hardening_cb_t fn, const char *hyp_vecs_start, const char *hyp_vecs_end) { @@ -146,23 +212,6 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn, } #endif /* CONFIG_KVM_INDIRECT_VECTORS */ -static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry, - bp_hardening_cb_t fn, - const char *hyp_vecs_start, - const char *hyp_vecs_end) -{ - u64 pfr0; - - if (!entry->matches(entry, SCOPE_LOCAL_CPU)) - return; - - pfr0 = read_cpuid(ID_AA64PFR0_EL1); - if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT)) - return; - - __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end); -} - #include #include #include @@ -189,26 +238,44 @@ static void qcom_link_stack_sanitization(void) : "=&r" (tmp)); } -static void -enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry) +static bool __nospectre_v2; +static int __init parse_nospectre_v2(char *str) +{ + __nospectre_v2 = true; + return 0; +} +early_param("nospectre_v2", parse_nospectre_v2); + +/* + * -1: No workaround + * 0: No workaround required + * 1: Workaround installed + */ +static int detect_harden_bp_fw(void) { bp_hardening_cb_t cb; void *smccc_start, *smccc_end; struct arm_smccc_res res; u32 midr = read_cpuid_id(); - if (!entry->matches(entry, SCOPE_LOCAL_CPU)) - return; - if (psci_ops.smccc_version == SMCCC_VERSION_1_0) - return; + return -1; + + arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, + ARM_SMCCC_ARCH_WORKAROUND_1, &res); + + switch ((int)res.a0) { + case 1: + /* Firmware says we're just fine */ + return 0; + case 0: + break; + default: + return -1; + } switch (psci_ops.conduit) { case PSCI_CONDUIT_HVC: - arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, - ARM_SMCCC_ARCH_WORKAROUND_1, &res); - if ((int)res.a0 < 0) - return; cb = call_hvc_arch_workaround_1; /* This is a guest, no need to patch KVM vectors */ smccc_start = NULL; @@ -216,33 +283,29 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry) break; case PSCI_CONDUIT_SMC: - arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, - ARM_SMCCC_ARCH_WORKAROUND_1, &res); - if ((int)res.a0 < 0) - return; cb = call_smc_arch_workaround_1; smccc_start = __smccc_workaround_1_smc_start; smccc_end = __smccc_workaround_1_smc_end; break; default: - return; + return -1; } if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) || ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) cb = qcom_link_stack_sanitization; - install_bp_hardening_cb(entry, cb, smccc_start, smccc_end); + if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) + install_bp_hardening_cb(cb, smccc_start, smccc_end); - return; + return 1; } -#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ -#ifdef CONFIG_ARM64_SSBD DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); int ssbd_state __read_mostly = ARM64_SSBD_KERNEL; +static bool __ssb_safe = true; static const struct ssbd_options { const char *str; @@ -312,19 +375,24 @@ void __init arm64_enable_wa2_handling(struct alt_instr *alt, void arm64_set_ssbd_mitigation(bool state) { - switch (psci_ops.conduit) { - case PSCI_CONDUIT_HVC: - arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL); - break; + int conduit; - case PSCI_CONDUIT_SMC: - arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL); - break; + if (!IS_ENABLED(CONFIG_ARM64_SSBD)) { + pr_info_once("SSBD disabled by kernel configuration\n"); + return; + } - default: - WARN_ON_ONCE(1); - break; + if (this_cpu_has_cap(ARM64_SSBS)) { + if (state) + asm volatile(SET_PSTATE_SSBS(0)); + else + asm volatile(SET_PSTATE_SSBS(1)); + return; } + + conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, state, + NULL); + WARN_ON_ONCE(conduit == PSCI_CONDUIT_NONE); } static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, @@ -333,27 +401,39 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, struct arm_smccc_res res; bool required = true; s32 val; + bool this_cpu_safe = false; + int conduit; WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + if (cpu_mitigations_off()) + ssbd_state = ARM64_SSBD_FORCE_DISABLE; + + /* delay setting __ssb_safe until we get a firmware response */ + if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list)) + this_cpu_safe = true; + + if (this_cpu_has_cap(ARM64_SSBS)) { + if (!this_cpu_safe) + __ssb_safe = false; + required = false; + goto out_printmsg; + } + if (psci_ops.smccc_version == SMCCC_VERSION_1_0) { ssbd_state = ARM64_SSBD_UNKNOWN; + if (!this_cpu_safe) + __ssb_safe = false; return false; } - switch (psci_ops.conduit) { - case PSCI_CONDUIT_HVC: - arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, - ARM_SMCCC_ARCH_WORKAROUND_2, &res); - break; + conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, + ARM_SMCCC_ARCH_WORKAROUND_2, &res); - case PSCI_CONDUIT_SMC: - arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, - ARM_SMCCC_ARCH_WORKAROUND_2, &res); - break; - - default: + if (conduit == PSCI_CONDUIT_NONE) { ssbd_state = ARM64_SSBD_UNKNOWN; + if (!this_cpu_safe) + __ssb_safe = false; return false; } @@ -362,14 +442,18 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, switch (val) { case SMCCC_RET_NOT_SUPPORTED: ssbd_state = ARM64_SSBD_UNKNOWN; + if (!this_cpu_safe) + __ssb_safe = false; return false; + /* machines with mixed mitigation requirements must not return this */ case SMCCC_RET_NOT_REQUIRED: pr_info_once("%s mitigation not required\n", entry->desc); ssbd_state = ARM64_SSBD_MITIGATED; return false; case SMCCC_RET_SUCCESS: + __ssb_safe = false; required = true; break; @@ -379,12 +463,13 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, default: WARN_ON(1); + if (!this_cpu_safe) + __ssb_safe = false; return false; } switch (ssbd_state) { case ARM64_SSBD_FORCE_DISABLE: - pr_info_once("%s disabled from command-line\n", entry->desc); arm64_set_ssbd_mitigation(false); required = false; break; @@ -397,7 +482,6 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, break; case ARM64_SSBD_FORCE_ENABLE: - pr_info_once("%s forced from command-line\n", entry->desc); arm64_set_ssbd_mitigation(true); required = true; break; @@ -407,9 +491,111 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, break; } +out_printmsg: + switch (ssbd_state) { + case ARM64_SSBD_FORCE_DISABLE: + pr_info_once("%s disabled from command-line\n", entry->desc); + break; + + case ARM64_SSBD_FORCE_ENABLE: + pr_info_once("%s forced from command-line\n", entry->desc); + break; + } + return required; } -#endif /* CONFIG_ARM64_SSBD */ + +static void __maybe_unused +cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) +{ + sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0); +} + +#ifdef CONFIG_HISILICON_ERRATUM_HIP08_RU_PREFETCH +# ifdef CONFIG_HISILICON_HIP08_RU_PREFETCH_DEFAULT_OFF +static bool readunique_prefetch_enabled; +# else +static bool readunique_prefetch_enabled = true; +# endif +static int __init readunique_prefetch_switch(char *data) +{ + if (!data) + return -EINVAL; + + if (strcmp(data, "off") == 0) + readunique_prefetch_enabled = false; + else if (strcmp(data, "on") == 0) + readunique_prefetch_enabled = true; + else + return -EINVAL; + + return 0; +} +early_param("readunique_prefetch", readunique_prefetch_switch); + +static const struct midr_range readunique_prefetch_cpus[] = { + MIDR_ALL_VERSIONS(MIDR_HISI_TSV110), + {}, +}; + +static bool +should_disable_hisi_hip08_ru_prefetch(void) +{ + u64 el; + + if (readunique_prefetch_enabled) + return false; + + if (!is_midr_in_range_list(read_cpuid_id(), readunique_prefetch_cpus)) + return false; + + el = read_sysreg(CurrentEL); + return el == CurrentEL_EL2; +} + +#define CTLR_HISI_HIP08_RU_PREFETCH (1L << 40) +static void __maybe_unused +__hisi_hip08_ru_prefetch_disable(void* unused) +{ + sysreg_clear_set(S3_1_c15_c6_4, 0, CTLR_HISI_HIP08_RU_PREFETCH); +} + +static int hisi_hip08_ru_prefetch_disable(void) +{ + if (should_disable_hisi_hip08_ru_prefetch()) { + on_each_cpu(__hisi_hip08_ru_prefetch_disable, NULL, 1); + pr_info("CPU erratum: HiSilicon HIP08 Cache Readunique Prefetch Disable"); + } + + return 0; +} + +late_initcall(hisi_hip08_ru_prefetch_disable); +#endif + +/* known invulnerable cores */ +static const struct midr_range arm64_ssb_cpus[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), + {}, +}; + +#ifdef CONFIG_ARM64_ERRATUM_1463225 +DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); + +static bool +has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry, + int scope) +{ + u32 midr = read_cpuid_id(); + /* Cortex-A76 r0p0 - r3p1 */ + struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1); + + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode(); +} +#endif #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ .matches = is_affected_midr_range, \ @@ -448,6 +634,10 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ CAP_MIDR_RANGE_LIST(midr_list) +/* Track overall mitigation state. We are only mitigated if all cores are ok */ +static bool __hardenbp_enab = true; +static bool __spectrev2_safe = true; + /* * Generic helper for handling capabilties with multiple (match,enable) pairs * of call backs, sharing the same capability bit. @@ -480,26 +670,94 @@ multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry) caps->cpu_enable(caps); } -#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR +/* + * List of CPUs that do not need any Spectre-v2 mitigation at all. + */ +static const struct midr_range spectre_v2_safe_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), + MIDR_ALL_VERSIONS(MIDR_HISI_TSV110), + { /* sentinel */ } +}; /* - * List of CPUs where we need to issue a psci call to - * harden the branch predictor. + * Track overall bp hardening for all heterogeneous cores in the machine. + * We are only considered "safe" if all booted cores are known safe. */ -static const struct midr_range arm64_bp_harden_smccc_cpus[] = { - MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), - MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), - MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), - MIDR_ALL_VERSIONS(MIDR_CORTEX_A75), +static bool __maybe_unused +check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope) +{ + int need_wa; + + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + + /* If the CPU has CSV2 set, we're safe */ + if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1), + ID_AA64PFR0_CSV2_SHIFT)) + return false; + + /* Alternatively, we have a list of unaffected CPUs */ + if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list)) + return false; + + /* Fallback to firmware detection */ + need_wa = detect_harden_bp_fw(); + if (!need_wa) + return false; + + __spectrev2_safe = false; + + if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) { + pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n"); + __hardenbp_enab = false; + return false; + } + + /* forced off */ + if (__nospectre_v2 || cpu_mitigations_off()) { + pr_info_once("spectrev2 mitigation disabled by command line option\n"); + __hardenbp_enab = false; + return false; + } + + if (need_wa < 0) { + pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n"); + __hardenbp_enab = false; + } + + return (need_wa > 0); +} + +static void +cpu_enable_branch_predictor_hardening(const struct arm64_cpu_capabilities *cap) +{ + cap->matches(cap, SCOPE_LOCAL_CPU); +} + +static const __maybe_unused struct midr_range tx2_family_cpus[] = { MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), - MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1), - MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR), - MIDR_ALL_VERSIONS(MIDR_NVIDIA_DENVER), {}, }; -#endif +static bool __maybe_unused +needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry, + int scope) +{ + int i; + + if (!is_affected_midr_range_list(entry, scope) || + !is_hyp_mode_available()) + return false; + + for_each_possible_cpu(i) { + if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0) + return true; + } + + return false; +} #ifdef CONFIG_HARDEN_EL2_VECTORS @@ -511,6 +769,15 @@ static const struct midr_range arm64_harden_el2_vectors[] = { #endif +#ifdef CONFIG_ARM64_ERRATUM_1742098 +static struct midr_range broken_aarch32_aes[] = { + MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), + {}, +}; +#endif + + const struct arm64_cpu_capabilities arm64_errata[] = { #if defined(CONFIG_ARM64_ERRATUM_826319) || \ defined(CONFIG_ARM64_ERRATUM_827319) || \ @@ -629,6 +896,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = { .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, .cpu_enable = cpu_enable_trap_ctr_access, }, +#ifdef CONFIG_HISILICON_ERRATUM_1980005 + { + .desc = "Taishan IDC coherence workaround", + .capability = ARM64_WORKAROUND_HISILICON_1980005, + .matches = hisilicon_1980005_match, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .cpu_enable = hisilicon_1980005_enable, + }, +#endif #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 { .desc = "Qualcomm Technologies Falkor erratum 1003", @@ -658,13 +934,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), }, #endif -#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR { + .desc = "Branch predictor hardening", .capability = ARM64_HARDEN_BRANCH_PREDICTOR, - .cpu_enable = enable_smccc_arch_workaround_1, - ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus), + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + .matches = check_branch_predictor, + .cpu_enable = cpu_enable_branch_predictor_hardening, }, -#endif #ifdef CONFIG_HARDEN_EL2_VECTORS { .desc = "EL2 vector hardening", @@ -672,14 +948,443 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors), }, #endif -#ifdef CONFIG_ARM64_SSBD { .desc = "Speculative Store Bypass Disable", .capability = ARM64_SSBD, .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, .matches = has_ssbd_mitigation, + .midr_range_list = arm64_ssb_cpus, + }, + { + .desc = "Spectre-BHB", + .capability = ARM64_SPECTRE_BHB, + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + .matches = is_spectre_bhb_affected, + .cpu_enable = spectre_bhb_enable_mitigation, + }, +#ifdef CONFIG_ARM64_ERRATUM_1463225 + { + .desc = "ARM erratum 1463225", + .capability = ARM64_WORKAROUND_1463225, + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + .matches = has_cortex_a76_erratum_1463225, }, +#endif +#ifdef CONFIG_CAVIUM_TX2_ERRATUM_219 + { + .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)", + .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM, + ERRATA_MIDR_RANGE_LIST(tx2_family_cpus), + .matches = needs_tx2_tvm_workaround, + }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_1742098 + { + .desc = "ARM erratum 1742098", + .capability = ARM64_WORKAROUND_1742098, + CAP_MIDR_RANGE_LIST(broken_aarch32_aes), + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + }, #endif { } }; + +ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "Mitigation: __user pointer sanitization\n"); +} + +static const char *get_bhb_affected_string(enum mitigation_state bhb_state) +{ + switch (bhb_state) { + case SPECTRE_UNAFFECTED: + return ""; + default: + case SPECTRE_VULNERABLE: + return ", but not BHB"; + case SPECTRE_MITIGATED: + return ", BHB"; + } +} + +ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, + char *buf) +{ + enum mitigation_state bhb_state = arm64_get_spectre_bhb_state(); + const char *bhb_str = get_bhb_affected_string(bhb_state); + const char *v2_str = "Branch predictor hardening"; + + if (__spectrev2_safe) { + if (bhb_state == SPECTRE_UNAFFECTED) + return sprintf(buf, "Not affected\n"); + + /* + * Platforms affected by Spectre-BHB can't report + * "Not affected" for Spectre-v2. + */ + v2_str = "CSV2"; + } + + if (__hardenbp_enab) + return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str); + + return sprintf(buf, "Vulnerable\n"); +} + +ssize_t cpu_show_spec_store_bypass(struct device *dev, + struct device_attribute *attr, char *buf) +{ + if (__ssb_safe) + return sprintf(buf, "Not affected\n"); + + switch (ssbd_state) { + case ARM64_SSBD_KERNEL: + case ARM64_SSBD_FORCE_ENABLE: + if (IS_ENABLED(CONFIG_ARM64_SSBD)) + return sprintf(buf, + "Mitigation: Speculative Store Bypass disabled via prctl\n"); + } + + return sprintf(buf, "Vulnerable\n"); +} + +/* + * We try to ensure that the mitigation state can never change as the result of + * onlining a late CPU. + */ +static void update_mitigation_state(enum mitigation_state *oldp, + enum mitigation_state new) +{ + enum mitigation_state state; + + do { + state = READ_ONCE(*oldp); + if (new <= state) + break; + } while (cmpxchg_relaxed(oldp, state, new) != state); +} + +/* + * Spectre BHB. + * + * A CPU is either: + * - Mitigated by a branchy loop a CPU specific number of times, and listed + * in our "loop mitigated list". + * - Mitigated in software by the firmware Spectre v2 call. + * - Has the ClearBHB instruction to perform the mitigation. + * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no + * software mitigation in the vectors is needed. + * - Has CSV2.3, so is unaffected. + */ +static enum mitigation_state spectre_bhb_state; + +enum mitigation_state arm64_get_spectre_bhb_state(void) +{ + return spectre_bhb_state; +} + +/* + * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any + * SCOPE_SYSTEM call will give the right answer. + */ +u8 spectre_bhb_loop_affected(int scope) +{ + u8 k = 0; + static u8 max_bhb_k; + + if (scope == SCOPE_LOCAL_CPU) { + static const struct midr_range spectre_bhb_k32_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A78), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C), + MIDR_ALL_VERSIONS(MIDR_CORTEX_X1), + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1), + {}, + }; + static const struct midr_range spectre_bhb_k24_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A76), + {}, + }; + static const struct midr_range spectre_bhb_k11_list[] = { + MIDR_ALL_VERSIONS(MIDR_AMPERE1), + {}, + }; + static const struct midr_range spectre_bhb_k8_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), + {}, + }; + + if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list)) + k = 32; + else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list)) + k = 24; + else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list)) + k = 11; + else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list)) + k = 8; + + max_bhb_k = max(max_bhb_k, k); + } else { + k = max_bhb_k; + } + + return k; +} + +static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void) +{ + int ret; + struct arm_smccc_res res; + + if (psci_ops.smccc_version == SMCCC_VERSION_1_0) + return SPECTRE_VULNERABLE; + + switch (psci_ops.conduit) { + case PSCI_CONDUIT_HVC: + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, + ARM_SMCCC_ARCH_WORKAROUND_3, &res); + break; + + case PSCI_CONDUIT_SMC: + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, + ARM_SMCCC_ARCH_WORKAROUND_3, &res); + break; + + default: + return SPECTRE_VULNERABLE; + } + + ret = res.a0; + switch (ret) { + case SMCCC_RET_SUCCESS: + return SPECTRE_MITIGATED; + case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED: + return SPECTRE_UNAFFECTED; + default: + case SMCCC_RET_NOT_SUPPORTED: + return SPECTRE_VULNERABLE; + } +} + +static bool is_spectre_bhb_fw_affected(int scope) +{ + static bool system_affected; + enum mitigation_state fw_state; + bool has_smccc = (psci_ops.smccc_version >= SMCCC_VERSION_1_1); + static const struct midr_range spectre_bhb_firmware_mitigated_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A75), + {}, + }; + bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(), + spectre_bhb_firmware_mitigated_list); + + if (scope != SCOPE_LOCAL_CPU) + return system_affected; + + fw_state = spectre_bhb_get_cpu_fw_mitigation_state(); + if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) { + system_affected = true; + return true; + } + + return false; +} + +static bool supports_ecbhb(int scope) +{ + u64 mmfr1; + + if (scope == SCOPE_LOCAL_CPU) + mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1); + else + mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); + + return cpuid_feature_extract_unsigned_field(mmfr1, + ID_AA64MMFR1_ECBHB_SHIFT); +} + +bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, + int scope) +{ + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + + if (supports_csv2p3(scope)) + return false; + + if (supports_clearbhb(scope)) + return true; + + if (spectre_bhb_loop_affected(scope)) + return true; + + if (is_spectre_bhb_fw_affected(scope)) + return true; + + return false; +} + +static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot) +{ + const char *v = arm64_get_bp_hardening_vector(slot); + + if (slot < 0) + return; + + __this_cpu_write(this_cpu_vector, v); + + /* + * When KPTI is in use, the vectors are switched when exiting to + * user-space. + */ + if (arm64_kernel_unmapped_at_el0()) + return; + + write_sysreg(v, vbar_el1); + isb(); +} + +#ifdef CONFIG_KVM_INDIRECT_VECTORS +static const char *kvm_bhb_get_vecs_end(const char *start) +{ + if (start == __smccc_workaround_3_smc_start) + return __smccc_workaround_3_smc_end; + else if (start == __spectre_bhb_loop_k8_start) + return __spectre_bhb_loop_k8_end; + else if (start == __spectre_bhb_loop_k24_start) + return __spectre_bhb_loop_k24_end; + else if (start == __spectre_bhb_loop_k32_start) + return __spectre_bhb_loop_k32_end; + else if (start == __spectre_bhb_clearbhb_start) + return __spectre_bhb_clearbhb_end; + + return NULL; +} + +static void kvm_setup_bhb_slot(const char *hyp_vecs_start) +{ + int cpu, slot = -1; + const char *hyp_vecs_end; + + if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available()) + return; + + hyp_vecs_end = kvm_bhb_get_vecs_end(hyp_vecs_start); + if (WARN_ON_ONCE(!hyp_vecs_start || !hyp_vecs_end)) + return; + + spin_lock(&bp_lock); + for_each_possible_cpu(cpu) { + if (per_cpu(bp_hardening_data.template_start, cpu) == hyp_vecs_start) { + slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu); + break; + } + } + + if (slot == -1) { + slot = atomic_inc_return(&arm64_el2_vector_last_slot); + BUG_ON(slot >= BP_HARDEN_EL2_SLOTS); + __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end); + } + + if (hyp_vecs_start != __this_cpu_read(bp_hardening_data.template_start)) { + __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot); + __this_cpu_write(bp_hardening_data.template_start, + hyp_vecs_start); + } + spin_unlock(&bp_lock); +} +#else +#define __smccc_workaround_3_smc_start NULL +#define __spectre_bhb_loop_k8_start NULL +#define __spectre_bhb_loop_k24_start NULL +#define __spectre_bhb_loop_k32_start NULL +#define __spectre_bhb_clearbhb_start NULL + +static void kvm_setup_bhb_slot(const char *hyp_vecs_start) { }; +#endif + +void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry) +{ + enum mitigation_state fw_state, state = SPECTRE_VULNERABLE; + + if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU)) + return; + + if (!__spectrev2_safe && !__hardenbp_enab) { + /* No point mitigating Spectre-BHB alone. */ + } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) { + pr_info_once("spectre-bhb mitigation disabled by compile time option\n"); + } else if (cpu_mitigations_off()) { + pr_info_once("spectre-bhb mitigation disabled by command line option\n"); + } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) { + state = SPECTRE_MITIGATED; + } else if (supports_clearbhb(SCOPE_LOCAL_CPU)) { + kvm_setup_bhb_slot(__spectre_bhb_clearbhb_start); + this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN); + + state = SPECTRE_MITIGATED; + } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) { + switch (spectre_bhb_loop_affected(SCOPE_SYSTEM)) { + case 8: + /* + * A57/A72-r0 will already have selected the + * spectre-indirect vector, which is sufficient + * for BHB too. + */ + if (!__this_cpu_read(bp_hardening_data.fn)) + kvm_setup_bhb_slot(__spectre_bhb_loop_k8_start); + break; + case 24: + kvm_setup_bhb_slot(__spectre_bhb_loop_k24_start); + break; + case 32: + kvm_setup_bhb_slot(__spectre_bhb_loop_k32_start); + break; + default: + WARN_ON_ONCE(1); + } + this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP); + + state = SPECTRE_MITIGATED; + } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) { + fw_state = spectre_bhb_get_cpu_fw_mitigation_state(); + if (fw_state == SPECTRE_MITIGATED) { + kvm_setup_bhb_slot(__smccc_workaround_3_smc_start); + this_cpu_set_vectors(EL1_VECTOR_BHB_FW); + + /* + * With WA3 in the vectors, the WA1 calls can be + * removed. + */ + __this_cpu_write(bp_hardening_data.fn, NULL); + + state = SPECTRE_MITIGATED; + } + } + + update_mitigation_state(&spectre_bhb_state, state); +} + +/* Patched to correct the immediate */ +void __init spectre_bhb_patch_loop_iter(struct alt_instr *alt, + __le32 *origptr, __le32 *updptr, int nr_inst) +{ + u8 rd; + u32 insn; + u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM); + + BUG_ON(nr_inst != 1); /* MOV -> MOV */ + + if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) + return; + + insn = le32_to_cpu(*origptr); + rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn); + insn = aarch64_insn_gen_movewide(rd, loop_count, 0, + AARCH64_INSN_VARIANT_64BIT, + AARCH64_INSN_MOVEWIDE_ZERO); + *updptr++ = cpu_to_le32(insn); +} diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c index ea001241bdd470ab4a0a13ba4dad9bdb5a818bae..00f8b8612b69f87bd3b9dbaa4f7c9a08254d2bca 100644 --- a/arch/arm64/kernel/cpu_ops.c +++ b/arch/arm64/kernel/cpu_ops.c @@ -85,6 +85,7 @@ static const char *__init cpu_read_enable_method(int cpu) pr_err("%pOF: missing enable-method property\n", dn); } + of_node_put(dn); } else { enable_method = acpi_get_enable_method(cpu); if (!enable_method) { diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index e238b7932096d5641de25a6f16a9292cc1f3d517..2c8db192e4a9c3fa5e9fc469b1924f42930dd75e 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -20,38 +20,46 @@ #include #include +#include #include #include #include #include +#include + #include #include #include #include +#include #include #include #include #include +#include #include unsigned long elf_hwcap __read_mostly; EXPORT_SYMBOL_GPL(elf_hwcap); -#ifdef CONFIG_COMPAT -#define COMPAT_ELF_HWCAP_DEFAULT \ +#ifdef CONFIG_AARCH32_EL0 +#define AARCH32_EL0_ELF_HWCAP_DEFAULT \ (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\ COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\ - COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\ - COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\ - COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\ + COMPAT_HWCAP_TLS|COMPAT_HWCAP_IDIV|\ COMPAT_HWCAP_LPAE) -unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT; -unsigned int compat_elf_hwcap2 __read_mostly; +unsigned int a32_elf_hwcap __read_mostly = AARCH32_EL0_ELF_HWCAP_DEFAULT; +unsigned int a32_elf_hwcap2 __read_mostly; #endif DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); EXPORT_SYMBOL(cpu_hwcaps); +/* Need also bit for ARM64_CB_PATCH */ +DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE); + +DEFINE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector) = vectors; + /* * Flag to indicate if we have computed the system wide * capabilities based on the boot time active CPUs. This @@ -146,9 +154,15 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { ARM64_FTR_END, }; +static const struct arm64_ftr_bits ftr_id_aa64isar2[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_CLEARBHB_SHIFT, 4, 0), + ARM64_FTR_END, +}; + static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_MPAM_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_DIT_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0), @@ -164,10 +178,23 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { ARM64_FTR_END, }; +static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = { + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI), + ARM64_FTR_END, +}; + static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { - S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI), - S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI), + /* + * We already refuse to boot CPUs that don't support our configured + * page size, so we can only detect mismatches for a page size other + * than the one we're currently using. Unfortunately, SoCs like this + * exist in the wild so, even though we don't like it, we'll have to go + * along with it and treat them as non-strict. + */ + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI), + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0), /* Linux shouldn't care about secure memory */ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0), @@ -206,8 +233,8 @@ static const struct arm64_ftr_bits ftr_ctr[] = { ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DIC_SHIFT, 1, 1), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IDC_SHIFT, 1, 1), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_CWG_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_ERG_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_CWG_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_ERG_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1), /* * Linux can handle differing I-cache policies. Userspace JITs will @@ -252,6 +279,30 @@ static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = { ARM64_FTR_END, }; +static const struct arm64_ftr_bits ftr_mvfr0[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPROUND_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPSHVEC_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPSQRT_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPDIVIDE_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPTRAP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPDP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPSP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_SIMD_SHIFT, 4, 0), + ARM64_FTR_END, +}; + +static const struct arm64_ftr_bits ftr_mvfr1[] = { + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_SIMDFMAC_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_FPHP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_SIMDHP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_SIMDSP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_SIMDINT_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_SIMDLS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_FPDNAN_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_FPFTZ_SHIFT, 4, 0), + ARM64_FTR_END, +}; + static const struct arm64_ftr_bits ftr_mvfr2[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* FPMisc */ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* SIMDMisc */ @@ -267,10 +318,10 @@ static const struct arm64_ftr_bits ftr_dczid[] = { static const struct arm64_ftr_bits ftr_id_isar5[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_RDM_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SEVL_SHIFT, 4, 0), ARM64_FTR_END, }; @@ -289,7 +340,7 @@ static const struct arm64_ftr_bits ftr_id_pfr0[] = { }; static const struct arm64_ftr_bits ftr_id_dfr0[] = { - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0), + /* [31:28] TraceFilt */ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf), /* PerfMon */ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0), @@ -310,7 +361,7 @@ static const struct arm64_ftr_bits ftr_zcr[] = { * Common ftr bits for a 32bit register with all hidden, strict * attributes, with 4bit feature fields and a default safe value of * 0. Covers the following 32bit registers: - * id_isar[0-4], id_mmfr[1-3], id_pfr1, mvfr[0-1] + * id_isar[1-3], id_mmfr[1-3] */ static const struct arm64_ftr_bits ftr_generic_32bits[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0), @@ -365,13 +416,13 @@ static const struct __ftr_reg_entry { ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4), /* Op1 = 0, CRn = 0, CRm = 3 */ - ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits), - ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits), + ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_mvfr0), + ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_mvfr1), ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2), /* Op1 = 0, CRn = 0, CRm = 4 */ ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0), - ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_raz), + ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1), ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_raz), /* Op1 = 0, CRn = 0, CRm = 5 */ @@ -381,6 +432,7 @@ static const struct __ftr_reg_entry { /* Op1 = 0, CRn = 0, CRm = 6 */ ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0), ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1), + ARM64_FTR_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2), /* Op1 = 0, CRn = 0, CRm = 7 */ ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0), @@ -449,6 +501,10 @@ static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new, case FTR_LOWER_SAFE: ret = new < cur ? new : cur; break; + case FTR_HIGHER_OR_ZERO_SAFE: + if (!cur || !new) + break; + /* Fallthrough */ case FTR_HIGHER_SAFE: ret = new > cur ? new : cur; break; @@ -525,6 +581,7 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info) init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1); init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0); init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1); + init_cpu_ftr_reg(SYS_ID_AA64ISAR2_EL1, info->reg_id_aa64isar2); init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0); init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1); init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2); @@ -642,6 +699,8 @@ void update_cpu_features(int cpu, info->reg_id_aa64isar0, boot->reg_id_aa64isar0); taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu, info->reg_id_aa64isar1, boot->reg_id_aa64isar1); + taint |= check_update_ftr_reg(SYS_ID_AA64ISAR2_EL1, cpu, + info->reg_id_aa64isar2, boot->reg_id_aa64isar2); /* * Differing PARange support is fine as long as all peripherals and @@ -657,7 +716,6 @@ void update_cpu_features(int cpu, /* * EL3 is not our concern. - * ID_AA64PFR1 is currently RES0. */ taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu, info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0); @@ -743,6 +801,19 @@ u64 read_sanitised_ftr_reg(u32 id) return regp->sys_val; } +int arm64_cpu_ftr_regs_traverse(int (*op)(u32, u64, void *), void *argp) +{ + int i, ret; + + for (i = 0; i < ARRAY_SIZE(arm64_ftr_regs); i++) { + ret = (*op)(arm64_ftr_regs[i].sys_id, + arm64_ftr_regs[i].reg->sys_val, argp); + if (ret < 0) + return ret; + } + return 0; +} + #define read_sysreg_case(r) \ case r: return read_sysreg_s(r) @@ -779,6 +850,7 @@ static u64 __read_sysreg_by_encoding(u32 sys_id) read_sysreg_case(SYS_ID_AA64MMFR2_EL1); read_sysreg_case(SYS_ID_AA64ISAR0_EL1); read_sysreg_case(SYS_ID_AA64ISAR1_EL1); + read_sysreg_case(SYS_ID_AA64ISAR2_EL1); read_sysreg_case(SYS_CNTFRQ_EL0); read_sysreg_case(SYS_CTR_EL0); @@ -792,6 +864,20 @@ static u64 __read_sysreg_by_encoding(u32 sys_id) #include +static bool lse_disabled; + +static int __init parse_lse(char *str) +{ + if (str == NULL) + return 1; + + if (!strncmp(str, "off", 3)) + lse_disabled = true; + + return 0; +} +early_param("lse", parse_lse); + static bool feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry) { @@ -800,20 +886,56 @@ feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry) return val >= entry->min_field_value; } -static bool -has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope) +static u64 +read_scoped_sysreg(const struct arm64_cpu_capabilities *entry, int scope) { - u64 val; - WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible()); if (scope == SCOPE_SYSTEM) - val = read_sanitised_ftr_reg(entry->sys_reg); + return read_sanitised_ftr_reg(entry->sys_reg); else - val = __read_sysreg_by_encoding(entry->sys_reg); + return __read_sysreg_by_encoding(entry->sys_reg); +} + +static bool +has_user_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope) +{ + int mask; + struct arm64_ftr_reg *regp; + u64 val = read_scoped_sysreg(entry, scope); + + regp = get_arm64_ftr_reg(entry->sys_reg); + if (!regp) + return false; + + mask = cpuid_feature_extract_unsigned_field(regp->user_mask, + entry->field_pos); + if (!mask) + return false; return feature_matches(val, entry); } +static bool +has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope) +{ + u64 val = read_scoped_sysreg(entry, scope); + return feature_matches(val, entry); +} + +#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS) +static bool has_cpuid_feature_lse(const struct arm64_cpu_capabilities *entry, + int scope) +{ + if (lse_disabled) { + pr_info_once("%s forced OFF by command line option\n", + entry->desc); + return false; + } + + return has_cpuid_feature(entry, scope); +} +#endif + static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope) { bool has_sre; @@ -834,7 +956,7 @@ static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int _ u32 midr = read_cpuid_id(); /* Cavium ThunderX pass 1.x and 2.x */ - return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX, + return midr_is_cpu_model_range(midr, MIDR_THUNDERX, MIDR_CPU_VAR_REV(0, 0), MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK)); } @@ -848,18 +970,57 @@ static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unus } static bool has_cache_idc(const struct arm64_cpu_capabilities *entry, - int __unused) + int scope) +{ + u64 ctr; + +#ifndef CONFIG_HISILICON_ERRATUM_1980005 + /* Fix kABI compatible for CONFIG_HISILICON_ERRATUM_1980005 */ + static const struct midr_range idc_support_list[] = { + MIDR_ALL_VERSIONS(MIDR_HISI_TSV110), + MIDR_REV(MIDR_HISI_TSV200, 1, 0), + { /* sentinel */ } + }; + if (is_midr_in_range_list(read_cpuid_id(), idc_support_list)) { + pr_info("CPU features: detected: Taishan IDC coherence workaround\n"); + return true; + } +#endif + + if (scope == SCOPE_SYSTEM) + ctr = arm64_ftr_reg_ctrel0.sys_val; + else + ctr = read_cpuid_effective_cachetype(); + + return ctr & BIT(CTR_IDC_SHIFT); +} + +static void cpu_emulate_effective_ctr(const struct arm64_cpu_capabilities *__unused) { - return read_sanitised_ftr_reg(SYS_CTR_EL0) & BIT(CTR_IDC_SHIFT); + /* + * If the CPU exposes raw CTR_EL0.IDC = 0, while effectively + * CTR_EL0.IDC = 1 (from CLIDR values), we need to trap accesses + * to the CTR_EL0 on this CPU and emulate it with the real/safe + * value. + */ + if (!(read_cpuid_cachetype() & BIT(CTR_IDC_SHIFT))) + sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0); } static bool has_cache_dic(const struct arm64_cpu_capabilities *entry, - int __unused) + int scope) { - return read_sanitised_ftr_reg(SYS_CTR_EL0) & BIT(CTR_DIC_SHIFT); + u64 ctr; + + if (scope == SCOPE_SYSTEM) + ctr = arm64_ftr_reg_ctrel0.sys_val; + else + ctr = read_cpuid_cachetype(); + + return ctr & BIT(CTR_DIC_SHIFT); } -#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +static bool __meltdown_safe = true; static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, @@ -869,9 +1030,26 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, static const struct midr_range kpti_safe_list[] = { MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), + MIDR_ALL_VERSIONS(MIDR_HISI_TSV110), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), { /* sentinel */ } }; - char const *str = "command line option"; + char const *str = "kpti command line option"; + bool meltdown_safe; + + meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list); + + /* Defer to CPU feature registers */ + if (has_cpuid_feature(entry, scope)) + meltdown_safe = true; + + if (!meltdown_safe) + __meltdown_safe = false; /* * For reasons that aren't entirely clear, enabling KPTI on Cavium @@ -883,6 +1061,24 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, __kpti_forced = -1; } + /* Useful for KASLR robustness */ + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0) { + if (!__kpti_forced) { + str = "KASLR"; + __kpti_forced = 1; + } + } + + if (cpu_mitigations_off() && !__kpti_forced) { + str = "mitigations=off"; + __kpti_forced = -1; + } + + if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) { + pr_info_once("kernel page table isolation disabled by kernel configuration\n"); + return false; + } + /* Forced? */ if (__kpti_forced) { pr_info_once("kernel page table isolation forced %s by %s\n", @@ -890,18 +1086,10 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, return __kpti_forced > 0; } - /* Useful for KASLR robustness */ - if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) - return true; - - /* Don't force KPTI for CPUs that are not vulnerable */ - if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list)) - return false; - - /* Defer to CPU feature registers */ - return !has_cpuid_feature(entry, scope); + return !meltdown_safe; } +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 static void kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) { @@ -912,6 +1100,12 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) static bool kpti_applied = false; int cpu = smp_processor_id(); + if (__this_cpu_read(this_cpu_vector) == vectors) { + const char *v = arm64_get_bp_hardening_vector(EL1_VECTOR_KPTI); + + __this_cpu_write(this_cpu_vector, v); + } + if (kpti_applied) return; @@ -926,6 +1120,12 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) return; } +#else +static void +kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) +{ +} +#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ static int __init parse_kpti(char *str) { @@ -939,7 +1139,6 @@ static int __init parse_kpti(char *str) return 0; } early_param("kpti", parse_kpti); -#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ #ifdef CONFIG_ARM64_HW_AFDBM static inline void __cpu_enable_hw_dbm(void) @@ -1022,7 +1221,7 @@ static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused) * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to * do anything here. */ - if (!alternatives_applied) + if (!alternative_is_applied(ARM64_HAS_VIRT_HOST_EXTN)) write_sysreg(read_sysreg(tpidr_el1), tpidr_el2); } #endif @@ -1035,11 +1234,111 @@ static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused) WARN_ON(val & (7 << 27 | 7 << 21)); } +#ifdef CONFIG_ARM64_SSBD +static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr) +{ + if (user_mode(regs)) + return 1; + + if (instr & BIT(CRm_shift)) + regs->pstate |= PSR_SSBS_BIT; + else + regs->pstate &= ~PSR_SSBS_BIT; + + arm64_skip_faulting_instruction(regs, 4); + return 0; +} + +static struct undef_hook ssbs_emulation_hook = { + .instr_mask = ~(1U << CRm_shift), + .instr_val = 0xd500001f | REG_PSTATE_SSBS_IMM, + .fn = ssbs_emulation_handler, +}; + +static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused) +{ + static bool undef_hook_registered = false; + static DEFINE_SPINLOCK(hook_lock); + + spin_lock(&hook_lock); + if (!undef_hook_registered) { + register_undef_hook(&ssbs_emulation_hook); + undef_hook_registered = true; + } + spin_unlock(&hook_lock); + + if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) { + sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS); + arm64_set_ssbd_mitigation(false); + } else { + arm64_set_ssbd_mitigation(true); + } +} +#endif /* CONFIG_ARM64_SSBD */ + +#ifdef CONFIG_ARM64_PAN +static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused) +{ + /* + * We modify PSTATE. This won't work from irq context as the PSTATE + * is discarded once we return from the exception. + */ + WARN_ON_ONCE(in_interrupt()); + + sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPAN, 0); + asm(SET_PSTATE_PAN(1)); +} +#endif /* CONFIG_ARM64_PAN */ + +#ifdef CONFIG_ARM64_RAS_EXTN +static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused) +{ + /* Firmware may have left a deferred SError in this register. */ + write_sysreg_s(0, SYS_DISR_EL1); +} +#endif /* CONFIG_ARM64_RAS_EXTN */ + +ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, + char *buf) +{ + if (__meltdown_safe) + return sprintf(buf, "Not affected\n"); + + if (arm64_kernel_unmapped_at_el0()) + return sprintf(buf, "Mitigation: PTI\n"); + + return sprintf(buf, "Vulnerable\n"); +} + +#ifdef CONFIG_ARM64_PSEUDO_NMI +bool enable_pseudo_nmi; + +static int __init early_enable_pseudo_nmi(char *p) +{ + return strtobool(p, &enable_pseudo_nmi); +} +early_param("irqchip.gicv3_pseudo_nmi", early_enable_pseudo_nmi); + +static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry, + int scope) +{ + return enable_pseudo_nmi && has_useable_gicv3_cpuif(entry, scope); +} +#endif + +static void elf_hwcap_fixup(void) +{ +#ifdef CONFIG_ARM64_ERRATUM_1742098 + if (cpus_have_const_cap(ARM64_WORKAROUND_1742098)) + a32_elf_hwcap2 &= ~COMPAT_HWCAP2_AES; +#endif /* ARM64_ERRATUM_1742098 */ +} + static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "GIC system register CPU interface", .capability = ARM64_HAS_SYSREG_GIC_CPUIF, - .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, .matches = has_useable_gicv3_cpuif, .sys_reg = SYS_ID_AA64PFR0_EL1, .field_pos = ID_AA64PFR0_GIC_SHIFT, @@ -1064,7 +1363,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .desc = "LSE atomic instructions", .capability = ARM64_HAS_LSE_ATOMICS, .type = ARM64_CPUCAP_SYSTEM_FEATURE, - .matches = has_cpuid_feature, + .matches = has_cpuid_feature_lse, .sys_reg = SYS_ID_AA64ISAR0_EL1, .field_pos = ID_AA64ISAR0_ATOMICS_SHIFT, .sign = FTR_UNSIGNED, @@ -1118,7 +1417,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .field_pos = ID_AA64PFR0_EL0_SHIFT, .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT, }, -#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 { .desc = "Kernel page table isolation (KPTI)", .capability = ARM64_UNMAP_KERNEL_AT_EL0, @@ -1134,11 +1432,10 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = unmap_kernel_at_el0, .cpu_enable = kpti_install_ng_mappings, }, -#endif { /* FP/SIMD is not implemented */ .capability = ARM64_HAS_NO_FPSIMD, - .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE, .min_field_value = 0, .matches = has_no_fpsimd, }, @@ -1179,11 +1476,24 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .cpu_enable = cpu_clear_disr, }, #endif /* CONFIG_ARM64_RAS_EXTN */ +#ifdef CONFIG_MPAM + { + .desc = "ARM64 MPAM Extension Support", + .capability = ARM64_HAS_MPAM, + .type = ARM64_CPUCAP_SCOPE_SYSTEM, + .matches = has_cpuid_feature, + .sys_reg = SYS_ID_AA64PFR0_EL1, + .sign = FTR_UNSIGNED, + .field_pos = ID_AA64PFR0_MPAM_SHIFT, + .min_field_value = ID_AA64PFR0_MPAM, + }, +#endif /* CONFIG_MPAM */ { .desc = "Data cache clean to the PoU not required for I/D coherence", .capability = ARM64_HAS_CACHE_IDC, .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = has_cache_idc, + .cpu_enable = cpu_emulate_effective_ctr, }, { .desc = "Instruction cache invalidation not required for I/D coherence", @@ -1221,21 +1531,70 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = has_hw_dbm, .cpu_enable = cpu_enable_hw_dbm, }, +#endif +#ifdef CONFIG_ARM64_PSEUDO_NMI + { + /* + * Depends on having GICv3 + */ + .desc = "IRQ priority masking", + .capability = ARM64_HAS_IRQ_PRIO_MASKING, + .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, + .matches = can_use_gic_priorities, + .sys_reg = SYS_ID_AA64PFR0_EL1, + .field_pos = ID_AA64PFR0_GIC_SHIFT, + .sign = FTR_UNSIGNED, + .min_field_value = 1, + }, +#endif + { + .desc = "CRC32 instructions", + .capability = ARM64_HAS_CRC32, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + .sys_reg = SYS_ID_AA64ISAR0_EL1, + .field_pos = ID_AA64ISAR0_CRC32_SHIFT, + .min_field_value = 1, + }, +#ifdef CONFIG_ARM64_SSBD + { + .desc = "Speculative Store Bypassing Safe (SSBS)", + .capability = ARM64_SSBS, + .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, + .matches = has_cpuid_feature, + .sys_reg = SYS_ID_AA64PFR1_EL1, + .field_pos = ID_AA64PFR1_SSBS_SHIFT, + .sign = FTR_UNSIGNED, + .min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY, + .cpu_enable = cpu_enable_ssbs, + }, #endif {}, }; -#define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \ - { \ - .desc = #cap, \ - .type = ARM64_CPUCAP_SYSTEM_FEATURE, \ - .matches = has_cpuid_feature, \ +#define HWCAP_CPUID_MATCH(reg, field, s, min_value) \ + .matches = has_user_cpuid_feature, \ .sys_reg = reg, \ .field_pos = field, \ .sign = s, \ .min_field_value = min_value, \ + +#define __HWCAP_CAP(name, cap_type, cap) \ + .desc = name, \ + .type = ARM64_CPUCAP_SYSTEM_FEATURE, \ .hwcap_type = cap_type, \ .hwcap = cap, \ + +#define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \ + { \ + __HWCAP_CAP(#cap, cap_type, cap) \ + HWCAP_CPUID_MATCH(reg, field, s, min_value) \ + } + +#define HWCAP_CAP_MATCH(match, cap_type, cap) \ + { \ + __HWCAP_CAP(#cap, cap_type, cap) \ + .matches = match, \ } static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { @@ -1267,11 +1626,39 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { #ifdef CONFIG_ARM64_SVE HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE), #endif + HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, HWCAP_SSBS), {}, }; -static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = { -#ifdef CONFIG_COMPAT +#ifdef CONFIG_AARCH32_EL0 +static bool compat_has_neon(const struct arm64_cpu_capabilities *cap, int scope) +{ + /* + * Check that all of MVFR1_EL1.{SIMDSP, SIMDInt, SIMDLS} are available, + * in line with that of arm32 as in vfp_init(). We make sure that the + * check is future proof, by making sure value is non-zero. + */ + u32 mvfr1; + + WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible()); + if (scope == SCOPE_SYSTEM) + mvfr1 = read_sanitised_ftr_reg(SYS_MVFR1_EL1); + else + mvfr1 = read_sysreg_s(SYS_MVFR1_EL1); + + return cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDSP_SHIFT) && + cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDINT_SHIFT) && + cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDLS_SHIFT); +} +#endif + +static const struct arm64_cpu_capabilities a32_elf_hwcaps[] = { +#ifdef CONFIG_AARCH32_EL0 + HWCAP_CAP_MATCH(compat_has_neon, CAP_COMPAT_HWCAP, COMPAT_HWCAP_NEON), + HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_SIMDFMAC_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4), + /* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */ + HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP), + HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3), HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL), HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES), HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1), @@ -1287,12 +1674,12 @@ static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap) case CAP_HWCAP: elf_hwcap |= cap->hwcap; break; -#ifdef CONFIG_COMPAT +#ifdef CONFIG_AARCH32_EL0 case CAP_COMPAT_HWCAP: - compat_elf_hwcap |= (u32)cap->hwcap; + a32_elf_hwcap |= (u32)cap->hwcap; break; case CAP_COMPAT_HWCAP2: - compat_elf_hwcap2 |= (u32)cap->hwcap; + a32_elf_hwcap2 |= (u32)cap->hwcap; break; #endif default: @@ -1310,12 +1697,12 @@ static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap) case CAP_HWCAP: rc = (elf_hwcap & cap->hwcap) != 0; break; -#ifdef CONFIG_COMPAT +#ifdef CONFIG_AARCH32_EL0 case CAP_COMPAT_HWCAP: - rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0; + rc = (a32_elf_hwcap & (u32)cap->hwcap) != 0; break; case CAP_COMPAT_HWCAP2: - rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0; + rc = (a32_elf_hwcap2 & (u32)cap->hwcap) != 0; break; #endif default: @@ -1366,6 +1753,9 @@ static void __update_cpu_capabilities(const struct arm64_cpu_capabilities *caps, if (!cpus_have_cap(caps->capability) && caps->desc) pr_info("%s %s\n", info, caps->desc); cpus_set_cap(caps->capability); + + if ((scope_mask & SCOPE_BOOT_CPU) && (caps->type & SCOPE_BOOT_CPU)) + set_bit(caps->capability, boot_capabilities); } } @@ -1396,6 +1786,11 @@ __enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps, for (; caps->matches; caps++) { unsigned int num = caps->capability; + if (num == ARM64_SPECTRE_BHB) { + set_cap_spectre_bhb = true; + continue; + } + if (!(caps->type & scope_mask) || !cpus_have_cap(num)) continue; @@ -1561,7 +1956,7 @@ static void verify_local_cpu_capabilities(void) verify_local_elf_hwcaps(arm64_elf_hwcaps); if (system_supports_32bit_el0()) - verify_local_elf_hwcaps(compat_elf_hwcaps); + verify_local_elf_hwcaps(a32_elf_hwcaps); if (system_supports_sve()) verify_sve_features(); @@ -1631,8 +2026,10 @@ void __init setup_cpu_features(void) mark_const_caps_ready(); setup_elf_hwcaps(arm64_elf_hwcaps); - if (system_supports_32bit_el0()) - setup_elf_hwcaps(compat_elf_hwcaps); + if (system_supports_32bit_el0()) { + setup_elf_hwcaps(a32_elf_hwcaps); + elf_hwcap_fixup(); + } if (system_uses_ttbr0_pan()) pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n"); @@ -1660,7 +2057,7 @@ cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused) /* * We emulate only the following system register space. - * Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 4 - 7] + * Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 2 - 7] * See Table C5-6 System instruction encodings for System register accesses, * ARMv8 ARM(ARM DDI 0487A.f) for more details. */ @@ -1670,7 +2067,7 @@ static inline bool __attribute_const__ is_emulated(u32 id) sys_reg_CRn(id) == 0x0 && sys_reg_Op1(id) == 0x0 && (sys_reg_CRm(id) == 0 || - ((sys_reg_CRm(id) >= 4) && (sys_reg_CRm(id) <= 7)))); + ((sys_reg_CRm(id) >= 2) && (sys_reg_CRm(id) <= 7)))); } /* @@ -1755,9 +2152,3 @@ static int __init enable_mrs_emulation(void) } core_initcall(enable_mrs_emulation); - -void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused) -{ - /* Firmware may have left a deferred SError in this register. */ - write_sysreg_s(0, SYS_DISR_EL1); -} diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index e9ab7b3ed31765e2a915c9841d515679f8e8749a..005d88db1082ab8080dd7136c991e3ac382de48d 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -81,10 +81,11 @@ static const char *const hwcap_str[] = { "uscat", "ilrcpc", "flagm", + "ssbs", NULL }; -#ifdef CONFIG_COMPAT +#ifdef CONFIG_AARCH32_EL0 static const char *const compat_hwcap_str[] = { "swp", "half", @@ -119,12 +120,12 @@ static const char *const compat_hwcap2_str[] = { "crc32", NULL }; -#endif /* CONFIG_COMPAT */ +#endif /* CONFIG_AARCH32_EL0 */ static int c_show(struct seq_file *m, void *v) { int i, j; - bool compat = personality(current->personality) == PER_LINUX32; + bool aarch32 = personality(current->personality) == PER_LINUX32; for_each_online_cpu(i) { struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i); @@ -136,7 +137,7 @@ static int c_show(struct seq_file *m, void *v) * "processor". Give glibc what it expects. */ seq_printf(m, "processor\t: %d\n", i); - if (compat) + if (aarch32) seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n", MIDR_REVISION(midr), COMPAT_ELF_PLATFORM); @@ -151,16 +152,16 @@ static int c_show(struct seq_file *m, void *v) * software which does already (at least for 32-bit). */ seq_puts(m, "Features\t:"); - if (compat) { -#ifdef CONFIG_COMPAT + if (aarch32) { +#ifdef CONFIG_AARCH32_EL0 for (j = 0; compat_hwcap_str[j]; j++) - if (compat_elf_hwcap & (1 << j)) + if (a32_elf_hwcap & (1 << j)) seq_printf(m, " %s", compat_hwcap_str[j]); for (j = 0; compat_hwcap2_str[j]; j++) - if (compat_elf_hwcap2 & (1 << j)) + if (a32_elf_hwcap2 & (1 << j)) seq_printf(m, " %s", compat_hwcap2_str[j]); -#endif /* CONFIG_COMPAT */ +#endif /* CONFIG_AARCH32_EL0 */ } else { for (j = 0; hwcap_str[j]; j++) if (elf_hwcap & (1 << j)) @@ -324,7 +325,15 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info) static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) { info->reg_cntfrq = arch_timer_get_cntfrq(); - info->reg_ctr = read_cpuid_cachetype(); + /* + * Use the effective value of the CTR_EL0 than the raw value + * exposed by the CPU. CTR_E0.IDC field value must be interpreted + * with the CLIDR_EL1 fields to avoid triggering false warnings + * when there is a mismatch across the CPUs. Keep track of the + * effective value of the CTR_EL0 in our internal records for + * acurate sanity check and feature enablement. + */ + info->reg_ctr = read_cpuid_effective_cachetype(); info->reg_dczid = read_cpuid(DCZID_EL0); info->reg_midr = read_cpuid_id(); info->reg_revidr = read_cpuid(REVIDR_EL1); @@ -333,6 +342,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1); info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1); info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1); + info->reg_id_aa64isar2 = read_cpuid(ID_AA64ISAR2_EL1); info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1); info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); info->reg_id_aa64mmfr2 = read_cpuid(ID_AA64MMFR2_EL1); diff --git a/arch/arm64/kernel/crash_dump.c b/arch/arm64/kernel/crash_dump.c index f46d57c31443062c626e6062f4925d6206ee832b..76905a2585505fffb6783e9c2c4b1e795adb4e34 100644 --- a/arch/arm64/kernel/crash_dump.c +++ b/arch/arm64/kernel/crash_dump.c @@ -67,5 +67,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos) { memcpy(buf, phys_to_virt((phys_addr_t)*ppos), count); + *ppos += count; + return count; } diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index 06ca574495af9f6c67e841a188b1f6d13f5f1f9d..3beb8847e296cbaf2e56e05a3ca19c1d9d4ea9d8 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -135,6 +135,7 @@ NOKPROBE_SYMBOL(disable_debug_monitors); */ static int clear_os_lock(unsigned int cpu) { + write_sysreg(0, osdlr_el1); write_sysreg(0, oslar_el1); isb(); return 0; @@ -339,10 +340,10 @@ int aarch32_break_handler(struct pt_regs *regs) bool bp = false; void __user *pc = (void __user *)instruction_pointer(regs); - if (!compat_user_mode(regs)) + if (!a32_user_mode(regs)) return -EFAULT; - if (compat_thumb_mode(regs)) { + if (a32_thumb_mode(regs)) { /* get 16-bit Thumb instruction */ __le16 instr; get_user(instr, (__le16 __user *)pc); diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index 1175f5827ae17ffabc9f071e46b942c62b530a1f..295951f3172ea69ab8cbe3e188a4fa6935635ff1 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S @@ -79,7 +79,6 @@ .macro mcount_get_lr reg ldr \reg, [x29] ldr \reg, [\reg, #8] - mcount_adjust_addr \reg, \reg .endm .macro mcount_get_lr_addr reg diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 09dbea221a2744cb23f6652fd476bd253c5f935d..7c231eb211a427ff321a502653dfb6893b98ce9a 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -70,18 +70,21 @@ .macro kernel_ventry, el, label, regsize = 64 .align 7 -#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 -alternative_if ARM64_UNMAP_KERNEL_AT_EL0 +.Lventry_start\@: .if \el == 0 + /* + * This must be the first instruction of the EL0 vector entries. It is + * skipped by the trampoline vectors, to trigger the cleanup. + */ + b .Lskip_tramp_vectors_cleanup\@ .if \regsize == 64 mrs x30, tpidrro_el0 msr tpidrro_el0, xzr .else mov x30, xzr .endif +.Lskip_tramp_vectors_cleanup\@: .endif -alternative_else_nop_endif -#endif sub sp, sp, #S_FRAME_SIZE #ifdef CONFIG_VMAP_STACK @@ -127,11 +130,15 @@ alternative_else_nop_endif mrs x0, tpidrro_el0 #endif b el\()\el\()_\label +.org .Lventry_start\@ + 128 // Did we overflow the ventry slot? .endm - .macro tramp_alias, dst, sym + .macro tramp_alias, dst, sym, tmp mov_q \dst, TRAMP_VALIAS - add \dst, \dst, #(\sym - .entry.tramp.text) + adr_l \tmp, \sym + add \dst, \dst, \tmp + adr_l \tmp, .entry.tramp.text + sub \dst, \dst, \tmp .endm // This macro corrupts x0-x3. It is the caller's duty @@ -249,9 +256,16 @@ alternative_else_nop_endif msr sp_el0, tsk .endif + /* Save pmr */ +alternative_if ARM64_HAS_IRQ_PRIO_MASKING + mrs_s x20, SYS_ICC_PMR_EL1 + str x20, [sp, #S_PMR_SAVE] +alternative_else_nop_endif + /* * Registers that may be useful after this macro is invoked: * + * x20 - ICC_PMR_EL1 * x21 - aborted SP * x22 - aborted PC * x23 - aborted PSTATE @@ -269,6 +283,16 @@ alternative_else_nop_endif /* No need to restore UAO, it will be restored from SPSR_EL1 */ .endif + /* Restore pmr */ +alternative_if ARM64_HAS_IRQ_PRIO_MASKING + ldr x20, [sp, #S_PMR_SAVE] + msr_s SYS_ICC_PMR_EL1, x20 + mrs_s x21, SYS_ICC_CTLR_EL1 + tbz x21, #6, .L__skip_pmr_sync\@ // Check for ICC_CTLR_EL1.PMHE + dsb sy // Ensure priority change is seen by redistributor +.L__skip_pmr_sync\@: +alternative_else_nop_endif + ldp x21, x22, [sp, #S_PC] // load ELR, SPSR .if \el == 0 ct_user_enter @@ -342,25 +366,29 @@ alternative_else_nop_endif ldp x24, x25, [sp, #16 * 12] ldp x26, x27, [sp, #16 * 13] ldp x28, x29, [sp, #16 * 14] - ldr lr, [sp, #S_LR] - add sp, sp, #S_FRAME_SIZE // restore sp /* * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on eret context synchronization * when returning from IPI handler, and when returning to user-space. */ .if \el == 0 -alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 +alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0 + ldr lr, [sp, #S_LR] + add sp, sp, #S_FRAME_SIZE // restore sp + eret +alternative_else_nop_endif #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 bne 4f - msr far_el1, x30 - tramp_alias x30, tramp_exit_native + msr far_el1, x29 + tramp_alias x30, tramp_exit_native, x29 br x30 4: - tramp_alias x30, tramp_exit_compat + tramp_alias x30, tramp_exit_compat, x29 br x30 #endif .else + ldr lr, [sp, #S_LR] + add sp, sp, #S_FRAME_SIZE // restore sp eret .endif .endm @@ -419,6 +447,38 @@ tsk .req x28 // current thread_info irq_stack_exit .endm +#ifdef CONFIG_ARM64_PSEUDO_NMI + /* + * Set res to 0 if irqs were unmasked in interrupted context. + * Otherwise set res to non-0 value. + */ + .macro test_irqs_unmasked res:req, pmr:req +alternative_if ARM64_HAS_IRQ_PRIO_MASKING + sub \res, \pmr, #GIC_PRIO_IRQON +alternative_else + mov \res, xzr +alternative_endif + .endm +#endif + + .macro gic_prio_kentry_setup, tmp:req +#ifdef CONFIG_ARM64_PSEUDO_NMI + alternative_if ARM64_HAS_IRQ_PRIO_MASKING + mov \tmp, #(GIC_PRIO_PSR_I_SET | GIC_PRIO_IRQON) + msr_s SYS_ICC_PMR_EL1, \tmp + alternative_else_nop_endif +#endif + .endm + + .macro gic_prio_irq_setup, pmr:req, tmp:req +#ifdef CONFIG_ARM64_PSEUDO_NMI + alternative_if ARM64_HAS_IRQ_PRIO_MASKING + orr \tmp, \pmr, #GIC_PRIO_PSR_I_SET + msr_s SYS_ICC_PMR_EL1, \tmp + alternative_else_nop_endif +#endif + .endm + .text /* @@ -443,7 +503,7 @@ ENTRY(vectors) kernel_ventry 0, fiq_invalid // FIQ 64-bit EL0 kernel_ventry 0, error // Error 64-bit EL0 -#ifdef CONFIG_COMPAT +#ifdef CONFIG_AARCH32_EL0 kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0 kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0 kernel_ventry 0, fiq_invalid_compat, 32 // FIQ 32-bit EL0 @@ -512,7 +572,7 @@ el0_error_invalid: inv_entry 0, BAD_ERROR ENDPROC(el0_error_invalid) -#ifdef CONFIG_COMPAT +#ifdef CONFIG_AARCH32_EL0 el0_fiq_invalid_compat: inv_entry 0, BAD_FIQ, 32 ENDPROC(el0_fiq_invalid_compat) @@ -548,10 +608,8 @@ el1_sync: b.eq el1_ia cmp x24, #ESR_ELx_EC_SYS64 // configurable trap b.eq el1_undef - cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception - b.eq el1_sp_pc cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception - b.eq el1_sp_pc + b.eq el1_pc cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1 b.eq el1_undef cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1 @@ -573,9 +631,11 @@ el1_da: bl do_mem_abort kernel_exit 1 -el1_sp_pc: +el1_pc: /* - * Stack or PC alignment exception handling + * PC alignment exception handling. We don't handle SP alignment faults, + * since we will have hit a recursive exception when trying to push the + * initial pt_regs. */ mrs x0, far_el1 inherit_daif pstate=x23, tmp=x2 @@ -589,7 +649,7 @@ el1_undef: inherit_daif pstate=x23, tmp=x2 mov x0, sp bl do_undefinstr - ASM_BUG() + kernel_exit 1 el1_dbg: /* * Debug exception handling @@ -597,6 +657,7 @@ el1_dbg: cmp x24, #ESR_ELx_EC_BRK64 // if BRK64 cinc x24, x24, eq // set bit '0' tbz x24, #0, el1_inv // EL1 only + gic_prio_kentry_setup tmp=x3 mrs x0, far_el1 mov x2, sp // struct pt_regs bl do_debug_exception @@ -614,7 +675,16 @@ ENDPROC(el1_sync) .align 6 el1_irq: kernel_entry 1 + gic_prio_irq_setup pmr=x20, tmp=x1 enable_da_f + +#ifdef CONFIG_ARM64_PSEUDO_NMI + test_irqs_unmasked res=x0, pmr=x20 + cbz x0, 1f + bl asm_nmi_enter +1: +#endif + #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_off #endif @@ -623,15 +693,42 @@ el1_irq: #ifdef CONFIG_PREEMPT ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count - cbnz w24, 1f // preempt count != 0 +alternative_if ARM64_HAS_IRQ_PRIO_MASKING + /* + * DA_F were cleared at start of handling. If anything is set in DAIF, + * we come back from an NMI, so skip preemption + */ + mrs x0, daif + orr w24, w24, w0 +alternative_else_nop_endif + cbnz w24, 1f // preempt count != 0 || NMI return path ldr x0, [tsk, #TSK_TI_FLAGS] // get flags tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? bl el1_preempt 1: #endif + +#ifdef CONFIG_ARM64_PSEUDO_NMI + /* + * When using IRQ priority masking, we can get spurious interrupts while + * PMR is set to GIC_PRIO_IRQOFF. An NMI might also have occurred in a + * section with interrupts disabled. Skip tracing in those cases. + */ + test_irqs_unmasked res=x0, pmr=x20 + cbz x0, 1f + bl asm_nmi_exit +1: +#endif + #ifdef CONFIG_TRACE_IRQFLAGS +#ifdef CONFIG_ARM64_PSEUDO_NMI + test_irqs_unmasked res=x0, pmr=x20 + cbnz x0, 1f +#endif bl trace_hardirqs_on +1: #endif + kernel_exit 1 ENDPROC(el1_irq) @@ -667,16 +764,16 @@ el0_sync: cmp x24, #ESR_ELx_EC_SYS64 // configurable trap b.eq el0_sys cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception - b.eq el0_sp_pc + b.eq el0_sp cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception - b.eq el0_sp_pc + b.eq el0_pc cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0 b.eq el0_undef cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0 b.ge el0_dbg b el0_inv -#ifdef CONFIG_COMPAT +#ifdef CONFIG_AARCH32_EL0 .align 6 el0_sync_compat: kernel_entry 0, 32 @@ -693,7 +790,7 @@ el0_sync_compat: cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception b.eq el0_fpsimd_exc cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception - b.eq el0_sp_pc + b.eq el0_pc cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0 b.eq el0_undef cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap @@ -710,6 +807,7 @@ el0_sync_compat: b.ge el0_dbg b el0_inv el0_svc_compat: + gic_prio_kentry_setup tmp=x1 mov x0, sp bl el0_svc_compat_handler b ret_to_user @@ -741,6 +839,7 @@ el0_ia: * Instruction abort handling */ mrs x26, far_el1 + gic_prio_kentry_setup tmp=x0 enable_da_f #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_off @@ -781,11 +880,16 @@ el0_fpsimd_exc: mov x1, sp bl do_fpsimd_exc b ret_to_user +el0_sp: + ldr x26, [sp, #S_SP] + b el0_sp_pc +el0_pc: + mrs x26, far_el1 el0_sp_pc: /* * Stack or PC alignment exception handling */ - mrs x26, far_el1 + gic_prio_kentry_setup tmp=x0 enable_da_f #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_off @@ -820,11 +924,12 @@ el0_dbg: * Debug exception handling */ tbnz x24, #0, el0_inv // EL0 only + gic_prio_kentry_setup tmp=x3 mrs x0, far_el1 mov x1, x25 mov x2, sp bl do_debug_exception - enable_daif + enable_da_f ct_user_exit b ret_to_user el0_inv: @@ -841,7 +946,9 @@ ENDPROC(el0_sync) el0_irq: kernel_entry 0 el0_irq_naked: + gic_prio_irq_setup pmr=x20, tmp=x0 enable_da_f + #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_off #endif @@ -863,6 +970,7 @@ ENDPROC(el0_irq) el1_error: kernel_entry 1 mrs x1, esr_el1 + gic_prio_kentry_setup tmp=x2 enable_dbg mov x0, sp bl do_serror @@ -873,10 +981,11 @@ el0_error: kernel_entry 0 el0_error_naked: mrs x1, esr_el1 + gic_prio_kentry_setup tmp=x2 enable_dbg mov x0, sp bl do_serror - enable_daif + enable_da_f ct_user_exit b ret_to_user ENDPROC(el0_error) @@ -897,6 +1006,7 @@ work_pending: */ ret_to_user: disable_daif + gic_prio_kentry_setup tmp=x3 ldr x1, [tsk, #TSK_TI_FLAGS] and x2, x1, #_TIF_WORK_MASK cbnz x2, work_pending @@ -913,6 +1023,7 @@ ENDPROC(ret_to_user) */ .align 6 el0_svc: + gic_prio_kentry_setup tmp=x1 mov x0, sp bl el0_svc_handler b ret_to_user @@ -920,12 +1031,7 @@ ENDPROC(el0_svc) .popsection // .entry.text -#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 -/* - * Exception vectors trampoline. - */ - .pushsection ".entry.tramp.text", "ax" - + // Move from tramp_pg_dir to swapper_pg_dir .macro tramp_map_kernel, tmp mrs \tmp, ttbr1_el1 add \tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE) @@ -957,12 +1063,47 @@ alternative_else_nop_endif */ .endm - .macro tramp_ventry, regsize = 64 + .macro tramp_data_page dst + adr_l \dst, .entry.tramp.text + sub \dst, \dst, PAGE_SIZE + .endm + + .macro tramp_data_read_var dst, var +#ifdef CONFIG_RANDOMIZE_BASE + tramp_data_page \dst + add \dst, \dst, #:lo12:__entry_tramp_data_\var + ldr \dst, [\dst] +#else + ldr \dst, =\var +#endif + .endm + +#define BHB_MITIGATION_NONE 0 +#define BHB_MITIGATION_LOOP 1 +#define BHB_MITIGATION_FW 2 +#define BHB_MITIGATION_INSN 3 + + .macro tramp_ventry, vector_start, regsize, kpti, bhb .align 7 1: .if \regsize == 64 msr tpidrro_el0, x30 // Restored in kernel_ventry .endif + + .if \bhb == BHB_MITIGATION_LOOP + /* + * This sequence must appear before the first indirect branch. i.e. the + * ret out of tramp_ventry. It appears here because x30 is free. + */ + __mitigate_spectre_bhb_loop x30 + .endif // \bhb == BHB_MITIGATION_LOOP + + .if \bhb == BHB_MITIGATION_INSN + clearbhb + isb + .endif // \bhb == BHB_MITIGATION_INSN + + .if \kpti == 1 /* * Defend against branch aliasing attacks by pushing a dummy * entry onto the return stack and using a RET instruction to @@ -972,43 +1113,75 @@ alternative_else_nop_endif b . 2: tramp_map_kernel x30 -#ifdef CONFIG_RANDOMIZE_BASE - adr x30, tramp_vectors + PAGE_SIZE alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003 - ldr x30, [x30] -#else - ldr x30, =vectors -#endif - prfm plil1strm, [x30, #(1b - tramp_vectors)] + tramp_data_read_var x30, vectors + prfm plil1strm, [x30, #(1b - \vector_start)] msr vbar_el1, x30 - add x30, x30, #(1b - tramp_vectors) isb + .else + ldr x30, =vectors + .endif // \kpti == 1 + + .if \bhb == BHB_MITIGATION_FW + /* + * The firmware sequence must appear before the first indirect branch. + * i.e. the ret out of tramp_ventry. But it also needs the stack to be + * mapped to save/restore the registers the SMC clobbers. + */ + __mitigate_spectre_bhb_fw + .endif // \bhb == BHB_MITIGATION_FW + + add x30, x30, #(1b - \vector_start + 4) ret +.org 1b + 128 // Did we overflow the ventry slot? .endm .macro tramp_exit, regsize = 64 - adr x30, tramp_vectors + tramp_data_read_var x30, this_cpu_vector +alternative_if_not ARM64_HAS_VIRT_HOST_EXTN + mrs x29, tpidr_el1 +alternative_else + mrs x29, tpidr_el2 +alternative_endif + ldr x30, [x30, x29] + msr vbar_el1, x30 - tramp_unmap_kernel x30 + ldr lr, [sp, #S_LR] + tramp_unmap_kernel x29 .if \regsize == 64 - mrs x30, far_el1 + mrs x29, far_el1 .endif + add sp, sp, #S_FRAME_SIZE // restore sp eret .endm - .align 11 -ENTRY(tramp_vectors) + .macro generate_tramp_vector, kpti, bhb +.Lvector_start\@: .space 0x400 - tramp_ventry - tramp_ventry - tramp_ventry - tramp_ventry + .rept 4 + tramp_ventry .Lvector_start\@, 64, \kpti, \bhb + .endr + .rept 4 + tramp_ventry .Lvector_start\@, 32, \kpti, \bhb + .endr + .endm - tramp_ventry 32 - tramp_ventry 32 - tramp_ventry 32 - tramp_ventry 32 +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +/* + * Exception vectors trampoline. + * The order must match __bp_harden_el1_vectors and the + * arm64_bp_harden_el1_vectors enum. + */ + .pushsection ".entry.tramp.text", "ax" + .align 11 +ENTRY(tramp_vectors) +#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY + generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP + generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW + generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_INSN +#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ + generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE END(tramp_vectors) ENTRY(tramp_exit_native) @@ -1026,11 +1199,55 @@ END(tramp_exit_compat) .align PAGE_SHIFT .globl __entry_tramp_data_start __entry_tramp_data_start: +__entry_tramp_data_vectors: .quad vectors +#ifdef CONFIG_ARM_SDE_INTERFACE +__entry_tramp_data___sdei_asm_handler: + .quad __sdei_asm_handler +#endif /* CONFIG_ARM_SDE_INTERFACE */ +__entry_tramp_data_this_cpu_vector: + .quad this_cpu_vector .popsection // .rodata #endif /* CONFIG_RANDOMIZE_BASE */ #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ +/* + * Exception vectors for spectre mitigations on entry from EL1 when + * kpti is not in use. + */ + .macro generate_el1_vector, bhb +.Lvector_start\@: + kernel_ventry 1, sync_invalid // Synchronous EL1t + kernel_ventry 1, irq_invalid // IRQ EL1t + kernel_ventry 1, fiq_invalid // FIQ EL1t + kernel_ventry 1, error_invalid // Error EL1t + + kernel_ventry 1, sync // Synchronous EL1h + kernel_ventry 1, irq // IRQ EL1h + kernel_ventry 1, fiq_invalid // FIQ EL1h + kernel_ventry 1, error // Error EL1h + + .rept 4 + tramp_ventry .Lvector_start\@, 64, 0, \bhb + .endr + .rept 4 + tramp_ventry .Lvector_start\@, 32, 0, \bhb + .endr + .endm + +/* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */ + .pushsection ".entry.text", "ax" + .align 11 +ENTRY(__bp_harden_el1_vectors) +#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY + generate_el1_vector bhb=BHB_MITIGATION_LOOP + generate_el1_vector bhb=BHB_MITIGATION_FW + generate_el1_vector bhb=BHB_MITIGATION_INSN +#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ +END(__bp_harden_el1_vectors) + .popsection + + /* * Register switch for AArch64. The callee-saved registers need to be saved * and restored. On entry: @@ -1117,13 +1334,7 @@ ENTRY(__sdei_asm_entry_trampoline) */ 1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)] -#ifdef CONFIG_RANDOMIZE_BASE - adr x4, tramp_vectors + PAGE_SIZE - add x4, x4, #:lo12:__sdei_asm_trampoline_next_handler - ldr x4, [x4] -#else - ldr x4, =__sdei_asm_handler -#endif + tramp_data_read_var x4, __sdei_asm_handler br x4 ENDPROC(__sdei_asm_entry_trampoline) NOKPROBE(__sdei_asm_entry_trampoline) @@ -1146,12 +1357,6 @@ ENDPROC(__sdei_asm_exit_trampoline) NOKPROBE(__sdei_asm_exit_trampoline) .ltorg .popsection // .entry.tramp.text -#ifdef CONFIG_RANDOMIZE_BASE -.pushsection ".rodata", "a" -__sdei_asm_trampoline_next_handler: - .quad __sdei_asm_handler -.popsection // .rodata -#endif /* CONFIG_RANDOMIZE_BASE */ #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ /* @@ -1247,7 +1452,7 @@ alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0 alternative_else_nop_endif #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 - tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline + tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline, tmp=x3 br x5 #endif ENDPROC(__sdei_asm_handler) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 58c53bc969289ac9dc85951286986db4d58af38b..bb048144c3bd10c5a4882fcf3ade6ffd09876560 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include @@ -218,6 +219,7 @@ static void sve_free(struct task_struct *task) static void task_fpsimd_load(void) { WARN_ON(!in_softirq() && !irqs_disabled()); + WARN_ON(!system_supports_fpsimd()); if (system_supports_sve() && test_thread_flag(TIF_SVE)) sve_load_state(sve_pffr(¤t->thread), @@ -238,6 +240,7 @@ void fpsimd_save(void) struct user_fpsimd_state *st = __this_cpu_read(fpsimd_last_state.st); /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */ + WARN_ON(!system_supports_fpsimd()); WARN_ON(!in_softirq() && !irqs_disabled()); if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) { @@ -355,6 +358,23 @@ static int __init sve_sysctl_init(void) { return 0; } #define ZREG(sve_state, vq, n) ((char *)(sve_state) + \ (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET)) +#ifdef CONFIG_CPU_BIG_ENDIAN +static __uint128_t arm64_cpu_to_le128(__uint128_t x) +{ + u64 a = swab64(x); + u64 b = swab64(x >> 64); + + return ((__uint128_t)a << 64) | b; +} +#else +static __uint128_t arm64_cpu_to_le128(__uint128_t x) +{ + return x; +} +#endif + +#define arm64_le128_to_cpu(x) arm64_cpu_to_le128(x) + /* * Transfer the FPSIMD state in task->thread.uw.fpsimd_state to * task->thread.sve_state. @@ -372,14 +392,16 @@ static void fpsimd_to_sve(struct task_struct *task) void *sst = task->thread.sve_state; struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state; unsigned int i; + __uint128_t *p; if (!system_supports_sve()) return; vq = sve_vq_from_vl(task->thread.sve_vl); - for (i = 0; i < 32; ++i) - memcpy(ZREG(sst, vq, i), &fst->vregs[i], - sizeof(fst->vregs[i])); + for (i = 0; i < 32; ++i) { + p = (__uint128_t *)ZREG(sst, vq, i); + *p = arm64_cpu_to_le128(fst->vregs[i]); + } } /* @@ -398,14 +420,16 @@ static void sve_to_fpsimd(struct task_struct *task) void const *sst = task->thread.sve_state; struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state; unsigned int i; + __uint128_t const *p; if (!system_supports_sve()) return; vq = sve_vq_from_vl(task->thread.sve_vl); - for (i = 0; i < 32; ++i) - memcpy(&fst->vregs[i], ZREG(sst, vq, i), - sizeof(fst->vregs[i])); + for (i = 0; i < 32; ++i) { + p = (__uint128_t const *)ZREG(sst, vq, i); + fst->vregs[i] = arm64_le128_to_cpu(*p); + } } #ifdef CONFIG_ARM64_SVE @@ -432,7 +456,7 @@ size_t sve_state_size(struct task_struct const *task) void sve_alloc(struct task_struct *task) { if (task->thread.sve_state) { - memset(task->thread.sve_state, 0, sve_state_size(current)); + memset(task->thread.sve_state, 0, sve_state_size(task)); return; } @@ -494,6 +518,7 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task) void *sst = task->thread.sve_state; struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state; unsigned int i; + __uint128_t *p; if (!test_tsk_thread_flag(task, TIF_SVE)) return; @@ -502,9 +527,10 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task) memset(sst, 0, SVE_SIG_REGS_SIZE(vq)); - for (i = 0; i < 32; ++i) - memcpy(ZREG(sst, vq, i), &fst->vregs[i], - sizeof(fst->vregs[i])); + for (i = 0; i < 32; ++i) { + p = (__uint128_t *)ZREG(sst, vq, i); + *p = arm64_cpu_to_le128(fst->vregs[i]); + } } int sve_set_vector_length(struct task_struct *task, @@ -977,6 +1003,7 @@ void fpsimd_bind_task_to_cpu(void) struct fpsimd_last_state_struct *last = this_cpu_ptr(&fpsimd_last_state); + WARN_ON(!system_supports_fpsimd()); last->st = ¤t->thread.uw.fpsimd_state; current->thread.fpsimd_cpu = smp_processor_id(); @@ -996,6 +1023,7 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st) struct fpsimd_last_state_struct *last = this_cpu_ptr(&fpsimd_last_state); + WARN_ON(!system_supports_fpsimd()); WARN_ON(!in_softirq() && !irqs_disabled()); last->st = st; @@ -1008,8 +1036,19 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st) */ void fpsimd_restore_current_state(void) { - if (!system_supports_fpsimd()) + /* + * For the tasks that were created before we detected the absence of + * FP/SIMD, the TIF_FOREIGN_FPSTATE could be set via fpsimd_thread_switch(), + * e.g, init. This could be then inherited by the children processes. + * If we later detect that the system doesn't support FP/SIMD, + * we must clear the flag for all the tasks to indicate that the + * FPSTATE is clean (as we can't have one) to avoid looping for ever in + * do_notify_resume(). + */ + if (!system_supports_fpsimd()) { + clear_thread_flag(TIF_FOREIGN_FPSTATE); return; + } local_bh_disable(); @@ -1028,7 +1067,7 @@ void fpsimd_restore_current_state(void) */ void fpsimd_update_current_state(struct user_fpsimd_state const *state) { - if (!system_supports_fpsimd()) + if (WARN_ON(!system_supports_fpsimd())) return; local_bh_disable(); @@ -1055,6 +1094,7 @@ void fpsimd_flush_task_state(struct task_struct *t) void fpsimd_flush_cpu_state(void) { + WARN_ON(!system_supports_fpsimd()); __this_cpu_write(fpsimd_last_state.st, NULL); set_thread_flag(TIF_FOREIGN_FPSTATE); } diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index 50986e388d2b27e92f6984914af4ce756ea0ee46..4e511d0663b630aecd5881f02505463f2112903e 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -76,9 +76,21 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) if (offset < -SZ_128M || offset >= SZ_128M) { #ifdef CONFIG_ARM64_MODULE_PLTS - struct plt_entry trampoline; struct module *mod; + /* + * There is only one ftrace trampoline per module. For now, + * this is not a problem since on arm64, all dynamic ftrace + * invocations are routed via ftrace_caller(). This will need + * to be revisited if support for multiple ftrace entry points + * is added in the future, but for now, the pr_err() below + * deals with a theoretical issue only. + */ + if (addr != FTRACE_ADDR) { + pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n"); + return -EINVAL; + } + /* * On kernels that support module PLTs, the offset between the * branch instruction and its target may legally exceed the @@ -96,32 +108,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) if (WARN_ON(!mod)) return -EINVAL; - /* - * There is only one ftrace trampoline per module. For now, - * this is not a problem since on arm64, all dynamic ftrace - * invocations are routed via ftrace_caller(). This will need - * to be revisited if support for multiple ftrace entry points - * is added in the future, but for now, the pr_err() below - * deals with a theoretical issue only. - */ - trampoline = get_plt_entry(addr); - if (!plt_entries_equal(mod->arch.ftrace_trampoline, - &trampoline)) { - if (!plt_entries_equal(mod->arch.ftrace_trampoline, - &(struct plt_entry){})) { - pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n"); - return -EINVAL; - } - - /* point the trampoline to our ftrace entry point */ - module_disable_ro(mod); - *mod->arch.ftrace_trampoline = trampoline; - module_enable_ro(mod, true); - - /* update trampoline before patching in the branch */ - smp_wmb(); - } - addr = (unsigned long)(void *)mod->arch.ftrace_trampoline; + addr = (unsigned long)mod->arch.ftrace_trampoline; #else /* CONFIG_ARM64_MODULE_PLTS */ return -EINVAL; #endif /* CONFIG_ARM64_MODULE_PLTS */ @@ -216,8 +203,6 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, { unsigned long return_hooker = (unsigned long)&return_to_handler; unsigned long old; - struct ftrace_graph_ent trace; - int err; if (unlikely(atomic_read(¤t->tracing_graph_pause))) return; @@ -229,18 +214,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, */ old = *parent; - trace.func = self_addr; - trace.depth = current->curr_ret_stack + 1; - - /* Only trace if the calling function expects to */ - if (!ftrace_graph_entry(&trace)) - return; - - err = ftrace_push_return_trace(old, self_addr, &trace.depth, - frame_pointer, NULL); - if (err == -EBUSY) - return; - else + if (!function_graph_enter(old, self_addr, frame_pointer, NULL)) *parent = return_hooker; } diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index b0853069702f73b1597b3b44d3d5282373a9c47c..9f083b11efe255855e99fe625bc6717002363d93 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -202,7 +202,7 @@ ENDPROC(preserve_boot_args) * to be composed of multiple pages. (This effectively scales the end index). * * vstart: virtual address of start of range - * vend: virtual address of end of range + * vend: virtual address of end of range - we map [vstart, vend] * shift: shift used to transform virtual address into index * ptrs: number of entries in page table * istart: index in table corresponding to vstart @@ -239,17 +239,18 @@ ENDPROC(preserve_boot_args) * * tbl: location of page table * rtbl: address to be used for first level page table entry (typically tbl + PAGE_SIZE) - * vstart: start address to map - * vend: end address to map - we map [vstart, vend] + * vstart: virtual address of start of range + * vend: virtual address of end of range - we map [vstart, vend - 1] * flags: flags to use to map last level entries * phys: physical address corresponding to vstart - physical memory is contiguous * pgds: the number of pgd entries * * Temporaries: istart, iend, tmp, count, sv - these need to be different registers - * Preserves: vstart, vend, flags - * Corrupts: tbl, rtbl, istart, iend, tmp, count, sv + * Preserves: vstart, flags + * Corrupts: tbl, rtbl, vend, istart, iend, tmp, count, sv */ .macro map_memory, tbl, rtbl, vstart, vend, flags, phys, pgds, istart, iend, tmp, count, sv + sub \vend, \vend, #1 add \rtbl, \tbl, #PAGE_SIZE mov \sv, \rtbl mov \count, #0 @@ -494,10 +495,9 @@ ENTRY(el2_setup) #endif /* Hyp configuration. */ - mov x0, #HCR_RW // 64-bit EL1 + mov_q x0, HCR_HOST_NVHE_FLAGS cbz x2, set_hcr - orr x0, x0, #HCR_TGE // Enable Host Extensions - orr x0, x0, #HCR_E2H + mov_q x0, HCR_HOST_VHE_FLAGS set_hcr: msr hcr_el2, x0 isb @@ -523,8 +523,7 @@ set_hcr: /* GICv3 system register access */ mrs x0, id_aa64pfr0_el1 ubfx x0, x0, #24, #4 - cmp x0, #1 - b.ne 3f + cbz x0, 3f mrs_s x0, SYS_ICC_SRE_EL2 orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1 @@ -544,7 +543,7 @@ set_hcr: msr vpidr_el2, x0 msr vmpidr_el2, x1 -#ifdef CONFIG_COMPAT +#ifdef CONFIG_AARCH32_EL0 msr hstr_el2, xzr // Disable CP15 traps to EL2 #endif @@ -669,7 +668,7 @@ ENTRY(__boot_cpu_mode) * with MMU turned off. */ ENTRY(__early_cpu_boot_status) - .long 0 + .quad 0 .popsection @@ -705,6 +704,7 @@ secondary_startup: /* * Common entry point for secondary CPUs. */ + bl __cpu_secondary_check52bitva bl __cpu_setup // initialise processor bl __enable_mmu ldr x8, =__secondary_switched @@ -781,6 +781,31 @@ ENTRY(__enable_mmu) ret ENDPROC(__enable_mmu) +ENTRY(__cpu_secondary_check52bitva) +#ifdef CONFIG_ARM64_52BIT_VA + ldr_l x0, vabits_user + cmp x0, #52 + b.ne 2f + + mrs_s x0, SYS_ID_AA64MMFR2_EL1 + and x0, x0, #(0xf << ID_AA64MMFR2_LVA_SHIFT) + cbnz x0, 2f + + adr_l x0, va52mismatch + mov w1, #1 + strb w1, [x0] + dmb sy + dc ivac, x0 // Invalidate potentially stale cache line + + update_early_cpu_boot_status CPU_STUCK_IN_KERNEL, x0, x1 +1: wfe + wfi + b 1b + +#endif +2: ret +ENDPROC(__cpu_secondary_check52bitva) + __no_granule_support: /* Indicate that this CPU can't boot and is stuck in the kernel */ update_early_cpu_boot_status CPU_STUCK_IN_KERNEL, x1, x2 @@ -843,6 +868,7 @@ __primary_switch: tlbi vmalle1 // Remove any stale TLB entries dsb nsh + isb msr sctlr_el1, x19 // re-enable the MMU isb diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index 6b2686d54411fdc0a92e3d4cda2dd38f5b21d40b..9859e1178e6bed174835ccb90b355f9f43c0a5f8 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -214,7 +214,7 @@ static int create_safe_exec_page(void *src_start, size_t length, } memcpy((void *)dst, src_start, length); - flush_icache_range(dst, dst + length); + __flush_icache_range(dst, dst + length); pgdp = pgd_offset_raw(allocator(mask), dst_addr); if (pgd_none(READ_ONCE(*pgdp))) { @@ -299,8 +299,10 @@ int swsusp_arch_suspend(void) dcache_clean_range(__idmap_text_start, __idmap_text_end); /* Clean kvm setup code to PoC? */ - if (el2_reset_needed()) + if (el2_reset_needed()) { dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end); + dcache_clean_range(__hyp_text_start, __hyp_text_end); + } /* make the crash dump kernel image protected again */ crash_post_resume(); diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index 8c9644376326fe96f05645d298c1b4fcd383d652..bc83afe54134e27b3682ac5a803ebb3971f12a06 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -168,7 +168,7 @@ enum hw_breakpoint_ops { HW_BREAKPOINT_RESTORE }; -static int is_compat_bp(struct perf_event *bp) +static int is_a32_compat_bp(struct perf_event *bp) { struct task_struct *tsk = bp->hw.target; @@ -179,7 +179,7 @@ static int is_compat_bp(struct perf_event *bp) * deprecated behaviour if we use unaligned watchpoints in * AArch64 state. */ - return tsk && is_compat_thread(task_thread_info(tsk)); + return tsk && is_a32_compat_thread(task_thread_info(tsk)); } /** @@ -478,7 +478,7 @@ static int arch_build_bp_info(struct perf_event *bp, * Watchpoints can be of length 1, 2, 4 or 8 bytes. */ if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE) { - if (is_compat_bp(bp)) { + if (is_a32_compat_bp(bp)) { if (hw->ctrl.len != ARM_BREAKPOINT_LEN_2 && hw->ctrl.len != ARM_BREAKPOINT_LEN_4) return -EINVAL; @@ -536,7 +536,7 @@ int hw_breakpoint_arch_parse(struct perf_event *bp, * AArch32 tasks expect some simple alignment fixups, so emulate * that here. */ - if (is_compat_bp(bp)) { + if (is_a32_compat_bp(bp)) { if (hw->ctrl.len == ARM_BREAKPOINT_LEN_8) alignment_mask = 0x7; else @@ -547,13 +547,14 @@ int hw_breakpoint_arch_parse(struct perf_event *bp, /* Aligned */ break; case 1: - /* Allow single byte watchpoint. */ - if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1) - break; case 2: /* Allow halfword watchpoints and breakpoints. */ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2) break; + case 3: + /* Allow single byte watchpoint. */ + if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1) + break; default: return -EINVAL; } @@ -736,6 +737,27 @@ static u64 get_distance_from_watchpoint(unsigned long addr, u64 val, return 0; } +static int watchpoint_report(struct perf_event *wp, unsigned long addr, + struct pt_regs *regs) +{ + int step = is_default_overflow_handler(wp); + struct arch_hw_breakpoint *info = counter_arch_bp(wp); + + info->trigger = addr; + + /* + * If we triggered a user watchpoint from a uaccess routine, then + * handle the stepping ourselves since userspace really can't help + * us with this. + */ + if (!user_mode(regs) && info->ctrl.privilege == AARCH64_BREAKPOINT_EL0) + step = 1; + else + perf_bp_event(wp, regs); + + return step; +} + static int watchpoint_handler(unsigned long addr, unsigned int esr, struct pt_regs *regs) { @@ -745,7 +767,6 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr, u64 val; struct perf_event *wp, **slots; struct debug_info *debug_info; - struct arch_hw_breakpoint *info; struct arch_hw_breakpoint_ctrl ctrl; slots = this_cpu_ptr(wp_on_reg); @@ -783,25 +804,13 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr, if (dist != 0) continue; - info = counter_arch_bp(wp); - info->trigger = addr; - perf_bp_event(wp, regs); - - /* Do we need to handle the stepping? */ - if (is_default_overflow_handler(wp)) - step = 1; + step = watchpoint_report(wp, addr, regs); } - if (min_dist > 0 && min_dist != -1) { - /* No exact match found. */ - wp = slots[closest_match]; - info = counter_arch_bp(wp); - info->trigger = addr; - perf_bp_event(wp, regs); - /* Do we need to handle the stepping? */ - if (is_default_overflow_handler(wp)) - step = 1; - } + /* No exact match found? */ + if (min_dist > 0 && min_dist != -1) + step = watchpoint_report(slots[closest_match], addr, regs); + rcu_read_unlock(); if (!step) diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S index e1261fbaa374e02471214664df742cb26c4da1d7..17f325ba831e8299131415390a2b40494412709d 100644 --- a/arch/arm64/kernel/hyp-stub.S +++ b/arch/arm64/kernel/hyp-stub.S @@ -28,6 +28,8 @@ #include .text + .pushsection .hyp.text, "ax" + .align 11 ENTRY(__hyp_stub_vectors) diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h index a820ed07fb80035405ace44f36a6a05418730a58..eff6a564ab8081c5726b4dce0258ff8d350dc9f1 100644 --- a/arch/arm64/kernel/image.h +++ b/arch/arm64/kernel/image.h @@ -73,17 +73,11 @@ #ifdef CONFIG_EFI -__efistub_stext_offset = stext - _text; - /* - * Prevent the symbol aliases below from being emitted into the kallsyms - * table, by forcing them to be absolute symbols (which are conveniently - * ignored by scripts/kallsyms) rather than section relative symbols. - * The distinction is only relevant for partial linking, and only for symbols - * that are defined within a section declaration (which is not the case for - * the definitions below) so the resulting values will be identical. + * Use ABSOLUTE() to avoid ld.lld treating this as a relative symbol: + * https://github.com/ClangBuiltLinux/linux/issues/561 */ -#define KALLSYMS_HIDE(sym) ABSOLUTE(sym) +__efistub_stext_offset = ABSOLUTE(stext - _text); /* * The EFI stub has its own symbol namespace prefixed by __efistub_, to @@ -94,28 +88,28 @@ __efistub_stext_offset = stext - _text; * linked at. The routines below are all implemented in assembler in a * position independent manner */ -__efistub_memcmp = KALLSYMS_HIDE(__pi_memcmp); -__efistub_memchr = KALLSYMS_HIDE(__pi_memchr); -__efistub_memcpy = KALLSYMS_HIDE(__pi_memcpy); -__efistub_memmove = KALLSYMS_HIDE(__pi_memmove); -__efistub_memset = KALLSYMS_HIDE(__pi_memset); -__efistub_strlen = KALLSYMS_HIDE(__pi_strlen); -__efistub_strnlen = KALLSYMS_HIDE(__pi_strnlen); -__efistub_strcmp = KALLSYMS_HIDE(__pi_strcmp); -__efistub_strncmp = KALLSYMS_HIDE(__pi_strncmp); -__efistub_strrchr = KALLSYMS_HIDE(__pi_strrchr); -__efistub___flush_dcache_area = KALLSYMS_HIDE(__pi___flush_dcache_area); +__efistub_memcmp = __pi_memcmp; +__efistub_memchr = __pi_memchr; +__efistub_memcpy = __pi_memcpy; +__efistub_memmove = __pi_memmove; +__efistub_memset = __pi_memset; +__efistub_strlen = __pi_strlen; +__efistub_strnlen = __pi_strnlen; +__efistub_strcmp = __pi_strcmp; +__efistub_strncmp = __pi_strncmp; +__efistub_strrchr = __pi_strrchr; +__efistub___flush_dcache_area = __pi___flush_dcache_area; #ifdef CONFIG_KASAN -__efistub___memcpy = KALLSYMS_HIDE(__pi_memcpy); -__efistub___memmove = KALLSYMS_HIDE(__pi_memmove); -__efistub___memset = KALLSYMS_HIDE(__pi_memset); +__efistub___memcpy = __pi_memcpy; +__efistub___memmove = __pi_memmove; +__efistub___memset = __pi_memset; #endif -__efistub__text = KALLSYMS_HIDE(_text); -__efistub__end = KALLSYMS_HIDE(_end); -__efistub__edata = KALLSYMS_HIDE(_edata); -__efistub_screen_info = KALLSYMS_HIDE(screen_info); +__efistub__text = _text; +__efistub__end = _end; +__efistub__edata = _edata; +__efistub_screen_info = screen_info; #endif diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 2b3413549734522cae30cf1dcd36093245c9939b..cd37edbdedcb7395ed8aeeea968adff0825ac84f 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -734,6 +734,46 @@ u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg, state); } +u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result, + enum aarch64_insn_register address, + enum aarch64_insn_register value, + enum aarch64_insn_size_type size) +{ + u32 insn = aarch64_insn_get_ldadd_value(); + + switch (size) { + case AARCH64_INSN_SIZE_32: + case AARCH64_INSN_SIZE_64: + break; + default: + pr_err("%s: unimplemented size encoding %d\n", __func__, size); + return AARCH64_BREAK_FAULT; + } + + insn = aarch64_insn_encode_ldst_size(size, insn); + + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, + result); + + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, + address); + + return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn, + value); +} + +u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address, + enum aarch64_insn_register value, + enum aarch64_insn_size_type size) +{ + /* + * STADD is simply encoded as an alias for LDADD with XZR as + * the destination register. + */ + return aarch64_insn_gen_ldadd(AARCH64_INSN_REG_ZR, address, + value, size); +} + static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type, enum aarch64_insn_prfm_target target, enum aarch64_insn_prfm_policy policy, @@ -1450,16 +1490,10 @@ static u32 aarch64_encode_immediate(u64 imm, u32 insn) { unsigned int immr, imms, n, ones, ror, esz, tmp; - u64 mask = ~0UL; - - /* Can't encode full zeroes or full ones */ - if (!imm || !~imm) - return AARCH64_BREAK_FAULT; + u64 mask; switch (variant) { case AARCH64_INSN_VARIANT_32BIT: - if (upper_32_bits(imm)) - return AARCH64_BREAK_FAULT; esz = 32; break; case AARCH64_INSN_VARIANT_64BIT: @@ -1471,6 +1505,12 @@ static u32 aarch64_encode_immediate(u64 imm, return AARCH64_BREAK_FAULT; } + mask = GENMASK(esz - 1, 0); + + /* Can't encode full zeroes, full ones, or value wider than the mask */ + if (!imm || imm == mask || imm & ~mask) + return AARCH64_BREAK_FAULT; + /* * Inverse of Replicate(). Try to spot a repeating pattern * with a pow2 stride. diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c index 780a12f59a8f8c3426c3a4274e32ae9c3d829ab0..e8daa7aa77bcf063b9a8d60f2a9bbc599e14e151 100644 --- a/arch/arm64/kernel/irq.c +++ b/arch/arm64/kernel/irq.c @@ -27,12 +27,17 @@ #include #include #include +#include #include #include +#include #include unsigned long irq_err_count; +/* Only access this in an NMI enter/exit */ +DEFINE_PER_CPU(struct nmi_ctx, nmi_contexts); + DEFINE_PER_CPU(unsigned long *, irq_stack_ptr); int arch_show_interrupts(struct seq_file *p, int prec) @@ -72,4 +77,28 @@ void __init init_IRQ(void) irqchip_init(); if (!handle_arch_irq) panic("No interrupt controller found."); + + if (system_uses_irq_prio_masking()) { + /* + * Now that we have a stack for our IRQ handler, set + * the PMR/PSR pair to a consistent state. + */ + WARN_ON(read_sysreg(daif) & PSR_A_BIT); + local_daif_restore(DAIF_PROCCTX_NOIRQ); + } +} + +/* + * Stubs to make nmi_enter/exit() code callable from ASM + */ +asmlinkage void notrace asm_nmi_enter(void) +{ + nmi_enter(); +} +NOKPROBE_SYMBOL(asm_nmi_enter); + +asmlinkage void notrace asm_nmi_exit(void) +{ + nmi_exit(); } +NOKPROBE_SYMBOL(asm_nmi_exit); diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c index e0756416e567ec7e5bc4043b9ea82bf89b987868..b90754aebd12a66c9ae63829cf05d895506d9790 100644 --- a/arch/arm64/kernel/jump_label.c +++ b/arch/arm64/kernel/jump_label.c @@ -20,8 +20,6 @@ #include #include -#ifdef HAVE_JUMP_LABEL - void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type) { @@ -49,5 +47,3 @@ void arch_jump_label_transform_static(struct jump_entry *entry, * NOP needs to be replaced by a branch. */ } - -#endif /* HAVE_JUMP_LABEL */ diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index f0e6ab8abe9c9657e3103dbc84cf08c0a4242a82..06941c1fe418e0b2df2a8916ea90835ac748e6f6 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -14,6 +14,7 @@ #include #include +#include #include #include #include @@ -43,7 +44,7 @@ static __init u64 get_kaslr_seed(void *fdt) return ret; } -static __init const u8 *get_cmdline(void *fdt) +static __init const u8 *kaslr_get_cmdline(void *fdt) { static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE; @@ -87,6 +88,7 @@ u64 __init kaslr_early_init(u64 dt_phys) * we end up running with module randomization disabled. */ module_alloc_base = (u64)_etext - MODULES_VSIZE; + __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base)); /* * Try to map the FDT early. If this fails, we simply bail, @@ -109,7 +111,7 @@ u64 __init kaslr_early_init(u64 dt_phys) * Check if 'nokaslr' appears on the command line, and * return 0 if that is the case. */ - cmdline = get_cmdline(fdt); + cmdline = kaslr_get_cmdline(fdt); str = strstr(cmdline, "nokaslr"); if (str == cmdline || (str > cmdline && *(str - 1) == ' ')) return 0; @@ -143,15 +145,15 @@ u64 __init kaslr_early_init(u64 dt_phys) if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) { /* - * Randomize the module region over a 4 GB window covering the + * Randomize the module region over a 2 GB window covering the * kernel. This reduces the risk of modules leaking information * about the address of the kernel itself, but results in * branches between modules and the core kernel that are * resolved via PLTs. (Branches between modules will be * resolved normally.) */ - module_range = SZ_4G - (u64)(_end - _stext); - module_alloc_base = max((u64)_end + offset - SZ_4G, + module_range = SZ_2G - (u64)(_end - _stext); + module_alloc_base = max((u64)_end + offset - SZ_2G, (u64)MODULES_VADDR); } else { /* @@ -169,5 +171,8 @@ u64 __init kaslr_early_init(u64 dt_phys) module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21; module_alloc_base &= PAGE_MASK; + __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base)); + __flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed)); + return offset; } diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c index a20de58061a8e62b271a17a6da700ffd884db620..5ba179b3ebe4df351dbb54bf88ab6652c6a253bb 100644 --- a/arch/arm64/kernel/kgdb.c +++ b/arch/arm64/kernel/kgdb.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -244,27 +245,33 @@ int kgdb_arch_handle_exception(int exception_vector, int signo, static int kgdb_brk_fn(struct pt_regs *regs, unsigned int esr) { + if (user_mode(regs)) + return DBG_HOOK_ERROR; + kgdb_handle_exception(1, SIGTRAP, 0, regs); - return 0; + return DBG_HOOK_HANDLED; } NOKPROBE_SYMBOL(kgdb_brk_fn) static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr) { + if (user_mode(regs)) + return DBG_HOOK_ERROR; + compiled_break = 1; kgdb_handle_exception(1, SIGTRAP, 0, regs); - return 0; + return DBG_HOOK_HANDLED; } NOKPROBE_SYMBOL(kgdb_compiled_brk_fn); static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr) { - if (!kgdb_single_step) + if (user_mode(regs) || !kgdb_single_step) return DBG_HOOK_ERROR; kgdb_handle_exception(1, SIGTRAP, 0, regs); - return 0; + return DBG_HOOK_HANDLED; } NOKPROBE_SYMBOL(kgdb_step_brk_fn); @@ -291,6 +298,9 @@ static void kgdb_call_nmi_hook(void *ignored) void kgdb_roundup_cpus(unsigned long flags) { + if (gic_prio_masking_enabled()) + gic_arch_enable_irqs(); + local_irq_enable(); smp_call_function(kgdb_call_nmi_hook, NULL, 0); local_irq_disable(); diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c new file mode 100644 index 0000000000000000000000000000000000000000..235e6f8b6719722a39042f948f2813f9e70453c6 --- /dev/null +++ b/arch/arm64/kernel/livepatch.c @@ -0,0 +1,411 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * livepatch.c - arm64-specific Kernel Live Patching Core + * + * Copyright (C) 2014 Li Bin + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_LIVEPATCH_WO_FTRACE +#include +#include +#endif + +#ifdef CONFIG_ARM64_MODULE_PLTS +static inline bool offset_in_range(unsigned long pc, unsigned long addr, + long range) +{ + long offset = addr - pc; + + return (offset >= -range && offset < range); +} +#endif + +#define LJMP_INSN_SIZE 4 + +struct klp_func_node { + struct list_head node; + struct list_head func_stack; + unsigned long old_addr; +#ifdef CONFIG_ARM64_MODULE_PLTS + u32 old_insns[LJMP_INSN_SIZE]; +#else + u32 old_insn; +#endif +}; + +static LIST_HEAD(klp_func_list); + +static struct klp_func_node *klp_find_func_node(unsigned long old_addr) +{ + struct klp_func_node *func_node; + + list_for_each_entry(func_node, &klp_func_list, node) { + if (func_node->old_addr == old_addr) + return func_node; + } + + return NULL; +} + +struct walk_stackframe_args { + struct klp_patch *patch; + int enable; + int ret; +}; + +static inline int klp_compare_address(unsigned long pc, unsigned long func_addr, + unsigned long func_size, const char *func_name) +{ + if (pc >= func_addr && pc < func_addr + func_size) { + pr_err("func %s is in use!\n", func_name); + return -EBUSY; + } + return 0; +} + +static int klp_check_activeness_func(struct stackframe *frame, void *data) +{ + struct walk_stackframe_args *args = data; + struct klp_patch *patch = args->patch; + struct klp_object *obj; + struct klp_func *func; + unsigned long func_addr, func_size; + const char *func_name; + struct klp_func_node *func_node; + + if (args->ret) + return args->ret; + + for (obj = patch->objs; obj->funcs; obj++) { + for (func = obj->funcs; func->old_name; func++) { + if (args->enable) { + if (func->force) + continue; + + /* + * When enable, checking the currently + * active functions. + */ + func_node = klp_find_func_node(func->old_addr); + if (!func_node || + list_empty(&func_node->func_stack)) { + func_addr = func->old_addr; + func_size = func->old_size; + } else { + /* + * Previously patched function + * [the active one] + */ + struct klp_func *prev; + + prev = list_first_or_null_rcu( + &func_node->func_stack, + struct klp_func, stack_node); + func_addr = (unsigned long)prev->new_func; + func_size = prev->new_size; + } + } else { + /* + * When disable, check for the function + * itself which to be unpatched. + */ + func_addr = (unsigned long)func->new_func; + func_size = func->new_size; + } + func_name = func->old_name; + args->ret = klp_compare_address(frame->pc, func_addr, + func_size, func_name); + if (args->ret) + return args->ret; + } + } + + return args->ret; +} + +int klp_check_calltrace(struct klp_patch *patch, int enable) +{ + struct task_struct *g, *t; + struct stackframe frame; + int ret = 0; + + struct walk_stackframe_args args = { + .patch = patch, + .enable = enable, + .ret = 0 + }; + + for_each_process_thread(g, t) { + /* + * Handle the current carefully on each CPUs, we shouldn't + * use saved FP and PC when backtrace current. It's difficult + * to backtrack other CPU currents here. But fortunately, + * all CPUs will stay in this function, so the current's + * backtrace is so similar + */ + if (t == current) { + /* current on this CPU */ + frame.fp = (unsigned long)__builtin_frame_address(0); + frame.pc = (unsigned long)klp_check_calltrace; + } else if (strncmp(t->comm, "migration/", 10) == 0) { + /* + * current on other CPU + * we call this in stop_machine, so the current + * of each CPUs is mirgation, just compare the + * task_comm here, because we can't get the + * cpu_curr(task_cpu(t))). This assumes that no + * other thread will pretend to be a stopper via + * task_comm.  + */ + continue; + } else { + frame.fp = thread_saved_fp(t); + frame.pc = thread_saved_pc(t); + } +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + frame.graph = t->curr_ret_stack; +#endif + walk_stackframe(t, &frame, klp_check_activeness_func, &args); + if (args.ret) { + ret = args.ret; + pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); + show_stack(t, NULL); + goto out; + } + } + +out: + return ret; +} + +#ifdef CONFIG_LIVEPATCH_WO_FTRACE +int arch_klp_patch_func(struct klp_func *func) +{ + struct klp_func_node *func_node; + unsigned long pc, new_addr; + u32 insn; + u32 memory_flag = 0; +#ifdef CONFIG_ARM64_MODULE_PLTS + int i; + u32 insns[LJMP_INSN_SIZE]; +#endif + + func_node = klp_find_func_node(func->old_addr); + if (!func_node) { + func_node = kzalloc(sizeof(*func_node), GFP_ATOMIC); + if (!func_node) + return -ENOMEM; + memory_flag = 1; + + INIT_LIST_HEAD(&func_node->func_stack); + func_node->old_addr = func->old_addr; + +#ifdef CONFIG_ARM64_MODULE_PLTS + for (i = 0; i < LJMP_INSN_SIZE; i++) { + aarch64_insn_read(((u32 *)func->old_addr) + i, + &func_node->old_insns[i]); + } +#else + aarch64_insn_read((void *)func->old_addr, &func_node->old_insn); +#endif + + list_add_rcu(&func_node->node, &klp_func_list); + } + + list_add_rcu(&func->stack_node, &func_node->func_stack); + + pc = func->old_addr; + new_addr = (unsigned long)func->new_func; + +#ifdef CONFIG_ARM64_MODULE_PLTS + if (offset_in_range(pc, new_addr, SZ_128M)) { + insn = aarch64_insn_gen_branch_imm(pc, new_addr, + AARCH64_INSN_BRANCH_NOLINK); + if (aarch64_insn_patch_text_nosync((void *)pc, insn)) + goto ERR_OUT; + } else { + insns[0] = 0x92800010 | (((~new_addr) & 0xffff)) << 5; + insns[1] = 0xf2a00010 | (((new_addr >> 16) & 0xffff)) << 5; + insns[2] = 0xf2c00010 | (((new_addr >> 32) & 0xffff)) << 5; + insns[3] = 0xd61f0200; + for (i = 0; i < LJMP_INSN_SIZE; i++) { + if (aarch64_insn_patch_text_nosync(((u32 *)pc) + i, insns[i])) + goto ERR_OUT; + } + } +#else + insn = aarch64_insn_gen_branch_imm(pc, new_addr, + AARCH64_INSN_BRANCH_NOLINK); + + if (aarch64_insn_patch_text_nosync((void *)pc, insn)) + goto ERR_OUT; +#endif + return 0; +ERR_OUT: + if (memory_flag) { + list_del_rcu(&func->stack_node); + list_del_rcu(&func_node->node); + kfree(func_node); + } + + return -EPERM; +} + +void arch_klp_unpatch_func(struct klp_func *func) +{ + struct klp_func_node *func_node; + struct klp_func *next_func; + unsigned long pc, new_addr; + u32 insn; +#ifdef CONFIG_ARM64_MODULE_PLTS + int i; + u32 insns[LJMP_INSN_SIZE]; +#endif + + func_node = klp_find_func_node(func->old_addr); + if (WARN_ON(!func_node)) + return; + + pc = func_node->old_addr; + if (list_is_singular(&func_node->func_stack)) { +#ifdef CONFIG_ARM64_MODULE_PLTS + for (i = 0; i < LJMP_INSN_SIZE; i++) + insns[i] = func_node->old_insns[i]; +#else + insn = func_node->old_insn; +#endif + list_del_rcu(&func->stack_node); + list_del_rcu(&func_node->node); + kfree(func_node); + +#ifdef CONFIG_ARM64_MODULE_PLTS + for (i = 0; i < LJMP_INSN_SIZE; i++) { + aarch64_insn_patch_text_nosync(((u32 *)pc) + i, + insns[i]); + } +#else + aarch64_insn_patch_text_nosync((void *)pc, insn); +#endif + } else { + list_del_rcu(&func->stack_node); + next_func = list_first_or_null_rcu(&func_node->func_stack, + struct klp_func, stack_node); + if (WARN_ON(!next_func)) + return; + + new_addr = (unsigned long)next_func->new_func; +#ifdef CONFIG_ARM64_MODULE_PLTS + if (offset_in_range(pc, new_addr, SZ_128M)) { + insn = aarch64_insn_gen_branch_imm(pc, new_addr, + AARCH64_INSN_BRANCH_NOLINK); + + aarch64_insn_patch_text_nosync((void *)pc, insn); + } else { + insns[0] = 0x92800010 | (((~new_addr) & 0xffff)) << 5; + insns[1] = 0xf2a00010 | (((new_addr >> 16) & 0xffff)) << 5; + insns[2] = 0xf2c00010 | (((new_addr >> 32) & 0xffff)) << 5; + insns[3] = 0xd61f0200; + for (i = 0; i < LJMP_INSN_SIZE; i++) + aarch64_insn_patch_text_nosync(((u32 *)pc) + i, + insns[i]); + } +#else + insn = aarch64_insn_gen_branch_imm(pc, new_addr, + AARCH64_INSN_BRANCH_NOLINK); + + aarch64_insn_patch_text_nosync((void *)pc, insn); +#endif + } +} + +#ifdef CONFIG_ARM64_MODULE_PLTS +/* return 0 if the func can be patched */ +int arch_klp_func_can_patch(struct klp_func *func) +{ + unsigned long pc = func->old_addr; + unsigned long new_addr = (unsigned long)func->new_func; + unsigned long old_size = func->old_size; + + if ((long)old_size <= 0) + return -EINVAL; + + if (!offset_in_range(pc, new_addr, SZ_128M) && + (old_size < LJMP_INSN_SIZE * sizeof(u32))) { + pr_err("func %s size less than limit\n", func->old_name); + return -EPERM; + } + return 0; +} +#else +int arch_klp_func_can_patch(struct klp_func *func) +{ + return 0; +} +#endif /* #ifdef CONFIG_ARM64_MODULE_PLTS */ +#endif + + +/* Apply per-object alternatives. Based on arm64 module_finalize() */ +void arch_klp_init_object_loaded(struct klp_patch *patch, + struct klp_object *obj) +{ + int cnt; + struct klp_modinfo *info; + Elf_Shdr *s, *alt = NULL; + void *aseg; + const char *objname; + char sec_objname[MODULE_NAME_LEN]; + char secname[KSYM_NAME_LEN]; + + info = patch->mod->klp_info; + objname = obj->name ? obj->name : "vmlinux"; + + /* See livepatch core code for BUILD_BUG_ON() explanation */ + BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128); + + for (s = info->sechdrs; s < info->sechdrs + info->hdr.e_shnum; s++) { + /* Apply per-object .klp.arch sections */ + cnt = sscanf(info->secstrings + s->sh_name, + ".klp.arch.%55[^.].%127s", + sec_objname, secname); + + if (cnt != 2) + continue; + if (strcmp(sec_objname, objname)) + continue; + if (!strcmp(".altinstructions", secname)) + alt = s; + } + + if (alt) { + aseg = (void *) alt->sh_addr; + apply_alternatives_module(aseg, alt->sh_size); + } +} diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index 922add8adb7498ff4609725782d09a56b46781d6..b41d6d482704e3e3002494fae61f82d3647f0873 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -192,6 +193,7 @@ void machine_kexec(struct kimage *kimage) * the offline CPUs. Therefore, we must use the __* variant here. */ __flush_icache_range((uintptr_t)reboot_code_buffer, + (uintptr_t)reboot_code_buffer + arm64_relocate_new_kernel_size); /* Flush the kimage list and its buffers. */ @@ -219,7 +221,7 @@ void machine_kexec(struct kimage *kimage) BUG(); /* Should never get here. */ } -static void machine_kexec_mask_interrupts(void) +void machine_kexec_mask_interrupts(void) { unsigned int i; struct irq_desc *desc; @@ -260,6 +262,16 @@ void machine_crash_shutdown(struct pt_regs *regs) /* shutdown non-crashing cpus */ crash_smp_send_stop(); + /* + * when we panic in hardlockup detected by sdei_watchdog, the secure + * timer interrupt remains activate here because firmware clear eoi + * after dispatch is completed. This will cause arm_arch_timer + * interrupt failed to trigger in the second kernel. So we clear eoi + * of the secure timer before booting the second kernel. + */ + if (in_nmi()) + sdei_watchdog_clear_eoi(); + /* for crashing cpu */ crash_save_cpu(regs, smp_processor_id()); machine_kexec_mask_interrupts(); diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c index f0690c2ca3e0f2f27f761a1568e6b3cc637779a4..8d3500898ff076d09b85269bcf00cea76e144dcb 100644 --- a/arch/arm64/kernel/module-plts.c +++ b/arch/arm64/kernel/module-plts.c @@ -42,6 +42,48 @@ u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela, return (u64)&plt[i]; } +#ifdef CONFIG_LIVEPATCH +void klp_get_core_plts(struct module *mod) +{ + if (is_livepatch_module(mod) && mod->arch.have_plts) + mod->arch.core_plts = (struct plt_entry *) + mod->arch.core.plt->sh_addr; +} + +u64 livepatch_emit_plt_entry(struct module *mod, void *loc, + const Elf64_Rela *rela, Elf64_Sym *sym) +{ + struct mod_plt_sec *pltsec = &mod->arch.core; + struct plt_entry *plt = (struct plt_entry *)mod->arch.core_plts; + int i = pltsec->plt_num_entries; + u64 val = sym->st_value + rela->r_addend; + + plt[i] = get_plt_entry(val); + + /* + * Check if the entry we just created is a duplicate. Given that the + * relocations are sorted, this will be the last entry we allocated. + * (if one exists). + */ + if (i > 0 && plt_entries_equal(plt + i, plt + i - 1)) + return (u64)&plt[i - 1]; + + pltsec->plt_num_entries++; + if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries)) + return 0; + + return (u64)&plt[i]; +} +#else +u64 livepatch_emit_plt_entry(struct module *mod, void *loc, + const Elf64_Rela *rela, Elf64_Sym *sym) +{ + WARN(1, "Live patching support is disabled, but catch SHF_RELA_LIVEPATCH relocation\n"); + + return 0; +} +#endif /* #ifdef CONFIG_LIVEPATCH */ + #ifdef CONFIG_ARM64_ERRATUM_843419 u64 module_emit_veneer_for_adrp(struct module *mod, void *loc, u64 val) { @@ -254,6 +296,12 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, sechdrs[i].sh_info, dstsec); } +#ifdef CONFIG_LIVEPATCH + if (mod->arch.core.plt) + mod->arch.have_plts = true; + mod->arch.core_plts = NULL; +#endif + mod->arch.core.plt->sh_type = SHT_NOBITS; mod->arch.core.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; mod->arch.core.plt->sh_addralign = L1_CACHE_BYTES; diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index f0f27aeefb73623a0983c1f3eec2054d306021dc..81e1eb1eae53815c0d83ce11f7024f3cb62cc293 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -20,6 +20,7 @@ #include #include +#include #include #include #include @@ -32,6 +33,7 @@ void *module_alloc(unsigned long size) { + u64 module_alloc_end = module_alloc_base + MODULES_VSIZE; gfp_t gfp_mask = GFP_KERNEL; void *p; @@ -39,9 +41,12 @@ void *module_alloc(unsigned long size) if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) gfp_mask |= __GFP_NOWARN; + if (IS_ENABLED(CONFIG_KASAN)) + /* don't exceed the static module region - see below */ + module_alloc_end = MODULES_END; + p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, - module_alloc_base + MODULES_VSIZE, - gfp_mask, PAGE_KERNEL_EXEC, 0, + module_alloc_end, gfp_mask, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, __builtin_return_address(0)); if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && @@ -56,7 +61,7 @@ void *module_alloc(unsigned long size) * can simply omit this fallback in that case. */ p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, - module_alloc_base + SZ_4G, GFP_KERNEL, + module_alloc_base + SZ_2G, GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, __builtin_return_address(0)); @@ -96,16 +101,50 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len) { s64 sval = do_reloc(op, place, val); + /* + * The ELF psABI for AArch64 documents the 16-bit and 32-bit place + * relative and absolute relocations as having a range of [-2^15, 2^16) + * or [-2^31, 2^32), respectively. However, in order to be able to + * detect overflows reliably, we have to choose whether we interpret + * such quantities as signed or as unsigned, and stick with it. + * The way we organize our address space requires a signed + * interpretation of 32-bit relative references, so let's use that + * for all R_AARCH64_PRELxx relocations. This means our upper + * bound for overflow detection should be Sxx_MAX rather than Uxx_MAX. + */ + switch (len) { case 16: *(s16 *)place = sval; - if (sval < S16_MIN || sval > U16_MAX) - return -ERANGE; + switch (op) { + case RELOC_OP_ABS: + if (sval < 0 || sval > U16_MAX) + return -ERANGE; + break; + case RELOC_OP_PREL: + if (sval < S16_MIN || sval > S16_MAX) + return -ERANGE; + break; + default: + pr_err("Invalid 16-bit data relocation (%d)\n", op); + return 0; + } break; case 32: *(s32 *)place = sval; - if (sval < S32_MIN || sval > U32_MAX) - return -ERANGE; + switch (op) { + case RELOC_OP_ABS: + if (sval < 0 || sval > U32_MAX) + return -ERANGE; + break; + case RELOC_OP_PREL: + if (sval < S32_MIN || sval > S32_MAX) + return -ERANGE; + break; + default: + pr_err("Invalid 32-bit data relocation (%d)\n", op); + return 0; + } break; case 64: *(s64 *)place = sval; @@ -413,7 +452,13 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && ovf == -ERANGE) { - val = module_emit_plt_entry(me, loc, &rel[i], sym); + if (!(sechdrs[relsec].sh_flags + & SHF_RELA_LIVEPATCH)) + val = module_emit_plt_entry(me, + loc, &rel[i], sym); + else + val = livepatch_emit_plt_entry(me, + loc, &rel[i], sym); if (!val) return -ENOEXEC; ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, @@ -440,22 +485,48 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, return -ENOEXEC; } -int module_finalize(const Elf_Ehdr *hdr, - const Elf_Shdr *sechdrs, - struct module *me) +static const Elf_Shdr *find_section(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + const char *name) { const Elf_Shdr *s, *se; const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) { - if (strcmp(".altinstructions", secstrs + s->sh_name) == 0) - apply_alternatives_module((void *)s->sh_addr, s->sh_size); -#ifdef CONFIG_ARM64_MODULE_PLTS - if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) && - !strcmp(".text.ftrace_trampoline", secstrs + s->sh_name)) - me->arch.ftrace_trampoline = (void *)s->sh_addr; -#endif + if (strcmp(name, secstrs + s->sh_name) == 0) + return s; } + return NULL; +} + +static int module_init_ftrace_plt(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + struct module *mod) +{ +#if defined(CONFIG_ARM64_MODULE_PLTS) && defined(CONFIG_DYNAMIC_FTRACE) + const Elf_Shdr *s; + struct plt_entry *plt; + + s = find_section(hdr, sechdrs, ".text.ftrace_trampoline"); + if (!s) + return -ENOEXEC; + + plt = (void *)s->sh_addr; + *plt = get_plt_entry(FTRACE_ADDR); + mod->arch.ftrace_trampoline = plt; +#endif return 0; } + +int module_finalize(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + struct module *me) +{ + const Elf_Shdr *s; + s = find_section(hdr, sechdrs, ".altinstructions"); + if (s) + apply_alternatives_module((void *)s->sh_addr, s->sh_size); + + return module_init_ftrace_plt(hdr, sechdrs, me); +} diff --git a/arch/arm64/kernel/module.lds b/arch/arm64/kernel/module.lds index 22e36a21c1134576eb58a9209d75f2c6b2f09f85..09a0eef71d12bd00fd14829861d70cb7c70b6549 100644 --- a/arch/arm64/kernel/module.lds +++ b/arch/arm64/kernel/module.lds @@ -1,5 +1,5 @@ SECTIONS { - .plt (NOLOAD) : { BYTE(0) } - .init.plt (NOLOAD) : { BYTE(0) } - .text.ftrace_trampoline (NOLOAD) : { BYTE(0) } + .plt 0 (NOLOAD) : { BYTE(0) } + .init.plt 0 (NOLOAD) : { BYTE(0) } + .text.ftrace_trampoline 0 (NOLOAD) : { BYTE(0) } } diff --git a/arch/arm64/kernel/mpam/Makefile b/arch/arm64/kernel/mpam/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..23fe2d5095fbc38bc5c615ee25752ed8810f9429 --- /dev/null +++ b/arch/arm64/kernel/mpam/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_MPAM) += mpam_resctrl.o mpam_mon.o \ + mpam_ctrlmon.o mpam_device.o mpam_setup.o diff --git a/arch/arm64/kernel/mpam/mpam_ctrlmon.c b/arch/arm64/kernel/mpam/mpam_ctrlmon.c new file mode 100644 index 0000000000000000000000000000000000000000..3780c0ec08197bb7749667b56b7113293bf7ee2d --- /dev/null +++ b/arch/arm64/kernel/mpam/mpam_ctrlmon.c @@ -0,0 +1,1065 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Common code for ARM v8 MPAM + * - allocation and monitor management + * + * Copyright (C) 2016 Intel Corporation + * Copyright (C) 2018-2019 Huawei Technologies Co., Ltd + * + * Authors: + * Fenghua Yu + * Tony Luck + * Xie XiuQi + * + * Code was partially borrowed from arch/x86/kernel/cpu/intel_rdt*. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * More information about MPAM be found in the Arm Architecture Reference + * Manual. + * + * https://static.docs.arm.com/ddi0598/a/DDI0598_MPAM_supp_armv8a.pdf + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include + +#include "mpam_resource.h" +#include "mpam_internal.h" + +/* schemata content list */ +LIST_HEAD(resctrl_all_schema); + +/* Init schemata content */ +static int add_schema(enum resctrl_conf_type t, struct resctrl_resource *r) +{ + int ret = 0; + char *suffix = ""; + struct resctrl_schema *s; + struct raw_resctrl_resource *rr; + struct resctrl_schema_ctrl *sc, *tmp; + enum resctrl_ctrl_type type; + + s = kzalloc(sizeof(*s), GFP_KERNEL); + if (!s) + return -ENOMEM; + + s->res = r; + s->conf_type = t; + + /* + * code and data is separated for resources LxCache but + * not for MB(Memory Bandwidth), it's necessary to set + * cdp_mc_both to let resctrl know operating the two closid/ + * monitor simultaneously when configuring/monitoring. + */ + if (is_resctrl_cdp_enabled()) + s->cdp_mc_both = !r->cdp_enable; + + switch (t) { + case CDP_CODE: + suffix = "CODE"; + break; + case CDP_DATA: + suffix = "DATA"; + break; + case CDP_BOTH: + suffix = ""; + break; + default: + kfree(s); + return -EINVAL; + } + + WARN_ON_ONCE(strlen(r->name) + strlen(suffix) + 1 > RESCTRL_NAME_LEN); + snprintf(s->name, sizeof(s->name), "%s%s", r->name, suffix); + + INIT_LIST_HEAD(&s->list); + list_add_tail(&s->list, &resctrl_all_schema); + + /* + * Initialize extension ctrl type with MPAM capabilities, + * e.g. priority/hardlimit. + */ + rr = r->res; + INIT_LIST_HEAD(&s->schema_ctrl_list); + for_each_extend_ctrl_type(type) { + struct resctrl_ctrl_feature *feature = + &rr->ctrl_features[type]; + + if (!rr->ctrl_features[type].enabled || + !rr->ctrl_features[type].max_wd) + continue; + + sc = kzalloc(sizeof(*sc), GFP_KERNEL); + if (!sc) { + ret = -ENOMEM; + goto err; + } + sc->ctrl_type = type; + + WARN_ON_ONCE(strlen(r->name) + strlen(suffix) + + strlen(feature->ctrl_suffix) + 1 > RESCTRL_NAME_LEN); + snprintf(sc->name, sizeof(sc->name), "%s%s%s", r->name, + suffix, feature->ctrl_suffix); + + list_add_tail(&sc->list, &s->schema_ctrl_list); + } + + return 0; + +err: + list_for_each_entry_safe(sc, tmp, &s->schema_ctrl_list, list) { + list_del(&sc->list); + kfree(sc); + } + list_del(&s->list); + kfree(s); + return ret; +} + +int schemata_list_init(void) +{ + int ret = 0; + struct mpam_resctrl_res *res; + struct resctrl_resource *r; + + for_each_supported_resctrl_exports(res) { + r = &res->resctrl_res; + if (!r || !r->alloc_capable) + continue; + + if (r->cdp_enable) { + ret = add_schema(CDP_CODE, r); + ret |= add_schema(CDP_DATA, r); + } else { + ret = add_schema(CDP_BOTH, r); + } + if (ret) + break; + } + + return ret; +} + +/* + * During resctrl_kill_sb(), the mba_sc state is reset before + * schemata_list_destroy() is called: unconditionally try to free the + * array. + */ +void schemata_list_destroy(void) +{ + struct resctrl_schema *s, *tmp; + struct resctrl_schema_ctrl *sc, *sc_tmp; + + list_for_each_entry_safe(s, tmp, &resctrl_all_schema, list) { + list_for_each_entry_safe(sc, sc_tmp, &s->schema_ctrl_list, list) { + list_del(&sc->list); + kfree(sc); + } + list_del(&s->list); + kfree(s); + } +} + +static void +resctrl_dom_ctrl_config(bool cdp_both_ctrl, struct resctrl_resource *r, + struct rdt_domain *dom, struct msr_param *para) +{ + struct raw_resctrl_resource *rr; + + rr = r->res; + rr->msr_update(r, dom, para); + + if (cdp_both_ctrl) { + resctrl_cdp_mpamid_map_val(para->closid->reqpartid, CDP_DATA, + para->closid->reqpartid); + rr->msr_update(r, dom, para); + } +} + +/** + * Resync resctrl group domain ctrls, use rdtgrp->resync to indicate + * whether the resync procedure will be called. When resync==1, all + * domain ctrls of this group be synchronized again. This happens + * when rmid of this group is changed, and all configurations need to + * be remapped again accordingly. + */ +static void resctrl_group_resync_domain_ctrls(struct rdtgroup *rdtgrp, + struct resctrl_resource *r, struct rdt_domain *dom) +{ + int i; + int staged_start, staged_end; + struct resctrl_staged_config *cfg; + struct sd_closid closid; + struct list_head *head; + struct rdtgroup *entry; + struct msr_param para; + bool cdp_both_ctrl; + + cfg = dom->staged_cfg; + para.closid = &closid; + + staged_start = (r->cdp_enable) ? CDP_CODE : CDP_BOTH; + staged_end = (r->cdp_enable) ? CDP_DATA : CDP_BOTH; + + for (i = staged_start; i <= staged_end; i++) { + cdp_both_ctrl = cfg[i].cdp_both_ctrl; + + resctrl_cdp_mpamid_map_val(rdtgrp->closid.intpartid, + cfg[i].conf_type, closid.intpartid); + resctrl_cdp_mpamid_map_val(rdtgrp->closid.reqpartid, + cfg[i].conf_type, closid.reqpartid); + resctrl_dom_ctrl_config(cdp_both_ctrl, r, dom, ¶); + + /* + * we should synchronize all child mon groups' + * configuration from this ctrl rdtgrp + */ + head = &rdtgrp->mon.crdtgrp_list; + list_for_each_entry(entry, head, mon.crdtgrp_list) { + resctrl_cdp_mpamid_map_val(entry->closid.reqpartid, + cfg[i].conf_type, closid.reqpartid); + resctrl_dom_ctrl_config(cdp_both_ctrl, r, dom, ¶); + } + } +} + +static void resctrl_group_update_domain_ctrls(struct rdtgroup *rdtgrp, + struct resctrl_resource *r, struct rdt_domain *dom) +{ + int i; + struct resctrl_staged_config *cfg; + enum resctrl_ctrl_type type; + struct sd_closid closid; + struct list_head *head; + struct rdtgroup *entry; + struct msr_param para; + bool update_on, cdp_both_ctrl; + + cfg = dom->staged_cfg; + para.closid = &closid; + + for (i = 0; i < ARRAY_SIZE(dom->staged_cfg); i++) { + if (!cfg[i].have_new_ctrl) + continue; + update_on = false; + cdp_both_ctrl = cfg[i].cdp_both_ctrl; + + resctrl_cdp_mpamid_map_val(rdtgrp->closid.intpartid, + cfg[i].conf_type, closid.intpartid); + for_each_ctrl_type(type) { + /* if ctrl group's config has changed, refresh it first. */ + if (dom->ctrl_val[type][closid.intpartid] != cfg[i].new_ctrl[type] && + cfg[i].ctrl_updated[type] == true) { + /* + * duplicate ctrl group's configuration indexed + * by intpartid from domain ctrl_val array. + */ + resctrl_cdp_mpamid_map_val(rdtgrp->closid.reqpartid, + cfg[i].conf_type, closid.reqpartid); + + dom->ctrl_val[type][closid.intpartid] = + cfg[i].new_ctrl[type]; + dom->have_new_ctrl = true; + cfg[i].ctrl_updated[type] = false; + update_on = true; + } + } + if (update_on) + resctrl_dom_ctrl_config(cdp_both_ctrl, r, dom, ¶); + + /* + * we should synchronize all child mon groups' + * configuration from this ctrl rdtgrp + */ + head = &rdtgrp->mon.crdtgrp_list; + list_for_each_entry(entry, head, mon.crdtgrp_list) { + resctrl_cdp_mpamid_map_val(entry->closid.reqpartid, + cfg[i].conf_type, closid.reqpartid); + resctrl_dom_ctrl_config(cdp_both_ctrl, r, dom, ¶); + cond_resched(); + } + } +} + +static int resctrl_group_update_domains(struct rdtgroup *rdtgrp, + struct resctrl_resource *r) +{ + struct rdt_domain *d; + + list_for_each_entry(d, &r->domains, list) { + if (rdtgrp->resync) + resctrl_group_resync_domain_ctrls(rdtgrp, r, d); + else + resctrl_group_update_domain_ctrls(rdtgrp, r, d); + } + + return 0; +} + +/* + * For each domain in this resource we expect to find a series of: + * id=mask + * separated by ";". The "id" is in decimal, and must match one of + * the "id"s for this resource. + */ +static int +parse_line(char *line, struct resctrl_resource *r, + enum resctrl_conf_type conf_type, + enum resctrl_ctrl_type ctrl_type, u32 closid) +{ + struct raw_resctrl_resource *rr = r->res; + char *dom = NULL; + char *id; + struct rdt_domain *d; + unsigned long dom_id; + hw_closid_t hw_closid; + + if (!rr->ctrl_features[ctrl_type].enabled) + return -EINVAL; + +next: + if (!line || line[0] == '\0') + return 0; + dom = strsep(&line, ";"); + id = strsep(&dom, "="); + if (!dom || kstrtoul(id, 10, &dom_id)) { + rdt_last_cmd_puts("Missing '=' or non-numeric domain\n"); + return -EINVAL; + } + dom = strim(dom); + list_for_each_entry(d, &r->domains, list) { + if (d->id == dom_id) { + resctrl_cdp_mpamid_map(closid, conf_type, hw_closid); + if (rr->parse_ctrlval(dom, r, + &d->staged_cfg[conf_type], ctrl_type)) + return -EINVAL; + d->staged_cfg[conf_type].hw_closid = hw_closid; + d->staged_cfg[conf_type].conf_type = conf_type; + d->staged_cfg[conf_type].ctrl_type = ctrl_type; + goto next; + } + } + return -EINVAL; +} + +static int +resctrl_group_parse_schema_resource(char *resname, char *tok, u32 closid) +{ + struct resctrl_resource *r; + struct resctrl_schema *s; + enum resctrl_conf_type t; + struct resctrl_schema_ctrl *sc; + + list_for_each_entry(s, &resctrl_all_schema, list) { + r = s->res; + + if (!r) + continue; + + if (r->alloc_enabled) { + if (closid >= mpam_sysprops_num_partid()) + continue; + t = conf_name_to_conf_type(s->name); + if (!strcmp(resname, s->name)) + return parse_line(tok, r, t, + SCHEMA_COMM, closid); + + list_for_each_entry(sc, &s->schema_ctrl_list, list) { + if (!strcmp(resname, sc->name)) + return parse_line(tok, r, t, + sc->ctrl_type, + closid); + } + } + } + rdt_last_cmd_printf("unknown/unsupported resource name '%s'\n", resname); + + return -EINVAL; +} + +ssize_t resctrl_group_schemata_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + struct rdtgroup *rdtgrp; + struct rdt_domain *dom; + struct resctrl_resource *r; + struct mpam_resctrl_res *res; + enum resctrl_conf_type conf_type; + struct resctrl_staged_config *cfg; + enum resctrl_ctrl_type t; + char *tok, *resname; + u32 closid; + int ret = 0; + + /* Valid input requires a trailing newline */ + if (nbytes == 0 || buf[nbytes - 1] != '\n') + return -EINVAL; + buf[nbytes - 1] = '\0'; + + rdtgrp = resctrl_group_kn_lock_live(of->kn); + if (!rdtgrp) { + resctrl_group_kn_unlock(of->kn); + return -ENOENT; + } + + rdt_last_cmd_clear(); + + closid = rdtgrp->closid.intpartid; + + for_each_supported_resctrl_exports(res) { + r = &res->resctrl_res; + + if (!r->alloc_enabled) + continue; + + list_for_each_entry(dom, &r->domains, list) { + dom->have_new_ctrl = false; + for_each_conf_type(conf_type) { + cfg = &dom->staged_cfg[conf_type]; + for_each_ctrl_type(t) { + cfg->ctrl_updated[t] = false; + } + cfg->have_new_ctrl = false; + } + } + } + + while ((tok = strsep(&buf, "\n")) != NULL) { + resname = strim(strsep(&tok, ":")); + if (!tok) { + rdt_last_cmd_puts("Missing ':'\n"); + ret = -EINVAL; + goto out; + } + if (tok[0] == '\0') { + rdt_last_cmd_printf("Missing '%s' value\n", resname); + ret = -EINVAL; + goto out; + } + ret = resctrl_group_parse_schema_resource(resname, tok, closid); + if (ret) + goto out; + } + + ret = resctrl_update_groups_config(rdtgrp); +out: + resctrl_group_kn_unlock(of->kn); + return ret ?: nbytes; +} + +/** + * MPAM resources such as L2 may have too many domains for arm64, + * at this time we should rearrange this display for brevity and + * harmonious interaction. + * + * Before rearrangement: L2:0=ff;1=ff;2=fc;3=ff;4=f;....;255=ff + * After rearrangement: L2:S;2=fc;S;4=f;S + * Those continuous fully sharable domains will be combined into + * a single "S" simply. + */ +static void show_doms(struct seq_file *s, struct resctrl_resource *r, + char *schema_name, enum resctrl_ctrl_type type, + struct sd_closid *closid) +{ + struct raw_resctrl_resource *rr = r->res; + struct rdt_domain *dom; + struct msr_param para; + bool sep = false; + bool rg = false; + bool prev_auto_fill = false; + u32 reg_val; + + if (!rr->ctrl_features[type].enabled) + return; + + para.closid = closid; + para.type = type; + + if (r->dom_num > RESCTRL_SHOW_DOM_MAX_NUM) + rg = true; + + seq_printf(s, "%*s:", max_name_width, schema_name); + list_for_each_entry(dom, &r->domains, list) { + reg_val = rr->msr_read(r, dom, ¶); + + if (reg_val == rr->ctrl_features[SCHEMA_COMM].default_ctrl && + rg && prev_auto_fill == true) + continue; + + if (sep) + seq_puts(s, ";"); + if (rg && reg_val == rr->ctrl_features[SCHEMA_COMM].default_ctrl) { + prev_auto_fill = true; + seq_puts(s, "S"); + } else { + seq_printf(s, rr->format_str, dom->id, + max_data_width, reg_val); + } + sep = true; + } + seq_puts(s, "\n"); +} + +int resctrl_group_schemata_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + struct rdtgroup *rdtgrp; + struct resctrl_resource *r; + struct resctrl_schema *rs; + int ret = 0; + struct sd_closid closid; + struct resctrl_schema_ctrl *sc; + + rdtgrp = resctrl_group_kn_lock_live(of->kn); + if (rdtgrp) { + list_for_each_entry(rs, &resctrl_all_schema, list) { + r = rs->res; + if (!r) + continue; + if (r->alloc_enabled) { + resctrl_cdp_mpamid_map_val(rdtgrp->closid.intpartid, + rs->conf_type, closid.intpartid); + + resctrl_cdp_mpamid_map_val(rdtgrp->closid.reqpartid, + rs->conf_type, closid.reqpartid); + + show_doms(s, r, rs->name, SCHEMA_COMM, &closid); + list_for_each_entry(sc, &rs->schema_ctrl_list, list) { + show_doms(s, r, sc->name, sc->ctrl_type, &closid); + } + } + } + } else { + ret = -ENOENT; + } + resctrl_group_kn_unlock(of->kn); + return ret; +} + +static inline char *kernfs_node_name(struct kernfs_open_file *of) +{ + return (char *)(of ? of->kn->name : NULL); +} + +static inline void put_resource_name(char *res) +{ + kfree(res); +} + +/* + * pick resource name from mon data name + * eg. from mon_L3_01 we got L3 + * */ +static inline char *get_resource_name(char *name) +{ + char *s, *p, *res; + + if (!name) + return NULL; + + s = name + 4; /* skip "mon_" prefix */ + p = strrchr(name, '_'); + res = kmemdup_nul(s, p - s, GFP_KERNEL); + if (!res) + res = NULL; + + return res; +} + +static u64 resctrl_dom_mon_data(struct resctrl_resource *r, + struct rdt_domain *d, void *md_priv) +{ + u64 ret; + union mon_data_bits md; + struct raw_resctrl_resource *rr; + + md.priv = md_priv; + rr = r->res; + ret = rr->mon_read(d, md.priv); + if (md.u.cdp_both_mon) { + resctrl_cdp_mpamid_map_val(md.u.partid, CDP_DATA, md.u.partid); + ret += rr->mon_read(d, md.priv); + } + + return ret; +} + +int resctrl_group_mondata_show(struct seq_file *m, void *arg) +{ + struct kernfs_open_file *of = m->private; + struct rdtgroup *rdtgrp; + struct rdt_domain *d; + struct resctrl_resource *r; + union mon_data_bits md; + int ret = 0; + char *resname = get_resource_name(kernfs_node_name(of)); + u64 usage; + int pmg; + + if (!resname) + return -ENOMEM; + + rdtgrp = resctrl_group_kn_lock_live(of->kn); + if (!rdtgrp) { + ret = -ENOENT; + goto out; + } + + md.priv = of->kn->priv; + + r = mpam_resctrl_get_resource(md.u.rid); + + /* show monitor data */ + d = mpam_find_domain(r, md.u.domid, NULL); + if (IS_ERR_OR_NULL(d)) { + pr_warn("Could't find domain id %d\n", md.u.domid); + ret = -ENOENT; + goto out; + } + + usage = resctrl_dom_mon_data(r, d, md.priv); + + /* + * if this rdtgroup is ctrlmon group, also collect it's + * mon groups' monitor data. + */ + if (rdtgrp->type == RDTCTRL_GROUP) { + struct list_head *head; + struct rdtgroup *entry; + hw_closid_t hw_closid; + enum resctrl_conf_type type = CDP_CODE; + + resctrl_cdp_mpamid_map(rdtgrp->closid.reqpartid, + CDP_CODE, hw_closid); + /* CDP_CODE share the same closid with CDP_BOTH */ + if (md.u.partid != hw_closid_val(hw_closid)) + type = CDP_DATA; + + head = &rdtgrp->mon.crdtgrp_list; + list_for_each_entry(entry, head, mon.crdtgrp_list) { + resctrl_cdp_mpamid_map_val(entry->closid.reqpartid, + type, md.u.partid); + + ret = mpam_rmid_to_partid_pmg(entry->mon.rmid, + NULL, &pmg); + if (ret) + return ret; + + md.u.pmg = pmg; + resctrl_cdp_mpamid_map_val(get_rmid_mon(entry->mon.rmid, + r->rid), type, md.u.mon); + + usage += resctrl_dom_mon_data(r, d, md.priv); + + cond_resched(); + } + } + + seq_printf(m, "%llu\n", usage); + +out: + put_resource_name(resname); + resctrl_group_kn_unlock(of->kn); + return ret; +} + +static struct kernfs_ops kf_mondata_ops = { + .atomic_write_len = PAGE_SIZE, + .seq_show = resctrl_group_mondata_show, +}; + +/* set uid and gid of resctrl_group dirs and files to that of the creator */ +static int resctrl_group_kn_set_ugid(struct kernfs_node *kn) +{ + struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID, + .ia_uid = current_fsuid(), + .ia_gid = current_fsgid(), }; + + if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) && + gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID)) + return 0; + + return kernfs_setattr(kn, &iattr); +} + +static int resctrl_mkdir_mondata_dom(struct kernfs_node *parent_kn, + struct rdt_domain *d, struct resctrl_schema *s, + struct resctrl_group *prgrp) + +{ + struct resctrl_resource *r; + struct raw_resctrl_resource *rr; + union mon_data_bits md; + struct kernfs_node *kn; + char name[32]; + int ret = 0; + int pmg; + + r = s->res; + rr = r->res; + + md.u.rid = r->rid; + md.u.domid = d->id; + /* monitoring use reqpartid (reqpartid) */ + resctrl_cdp_mpamid_map_val(prgrp->closid.reqpartid, s->conf_type, + md.u.partid); + resctrl_cdp_mpamid_map_val(get_rmid_mon(prgrp->mon.rmid, r->rid), + s->conf_type, md.u.mon); + + ret = mpam_rmid_to_partid_pmg(prgrp->mon.rmid, NULL, &pmg); + if (ret) + return ret; + md.u.pmg = pmg; + + md.u.cdp_both_mon = s->cdp_mc_both; + + if (!parent_kn) { + pr_err("%s: error parent_kn null\n", __func__); + return -EINVAL; + } + + snprintf(name, sizeof(name), "mon_%s_%02d", s->name, d->id); + kn = kernfs_find_and_get(parent_kn, name); + if (!kn) { + kn = __kernfs_create_file(parent_kn, name, 0444, + GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 0, + &kf_mondata_ops, md.priv, NULL, NULL); + if (IS_ERR(kn)) + return PTR_ERR(kn); + + ret = resctrl_group_kn_set_ugid(kn); + if (ret) { + pr_info("%s: create name %s, error ret %d\n", + __func__, name, ret); + kernfs_remove(kn); + return ret; + } + } + + kn->priv = md.priv; + + /* Could we remove the MATCH_* param ? */ + rr->mon_write(d, md.priv); + + return ret; +} + +static int resctrl_mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn, + struct resctrl_schema *s, struct resctrl_group *prgrp) +{ + struct resctrl_resource *r; + struct rdt_domain *dom; + int ret; + + r = s->res; + list_for_each_entry(dom, &r->domains, list) { + ret = resctrl_mkdir_mondata_dom(parent_kn, dom, s, prgrp); + if (ret) + return ret; + } + + return 0; +} + +int resctrl_mkdir_mondata_all_subdir(struct kernfs_node *parent_kn, + struct resctrl_group *prgrp) +{ + struct resctrl_schema *s; + struct resctrl_resource *r; + int ret; + + /* + * Create the subdirectories for each domain. Note that all events + * in a domain like L3 are grouped into a resource whose domain is L3 + */ + list_for_each_entry(s, &resctrl_all_schema, list) { + r = s->res; + + if (r->mon_enabled) { + struct raw_resctrl_resource *rr; + + rr = r->res; + + ret = resctrl_mkdir_mondata_subdir_alldom(parent_kn, + s, prgrp); + if (ret) + break; + } + } + + return ret; +} + +static int resctrl_group_mkdir_info_resdir(struct resctrl_resource *r, + char *name,unsigned long fflags, struct kernfs_node *kn_info) +{ + struct kernfs_node *kn_subdir; + int ret; + + kn_subdir = kernfs_create_dir(kn_info, name, + kn_info->mode, r); + if (IS_ERR(kn_subdir)) + return PTR_ERR(kn_subdir); + + ret = resctrl_group_kn_set_ugid(kn_subdir); + if (ret) + return ret; + + ret = resctrl_group_add_files(kn_subdir, fflags); + if (!ret) + kernfs_activate(kn_subdir); + + return ret; +} + +int resctrl_group_create_info_dir(struct kernfs_node *parent_kn, + struct kernfs_node **kn_info) +{ + struct resctrl_schema *s; + struct resctrl_resource *r; + struct raw_resctrl_resource *rr; + unsigned long fflags; + char name[32]; + int ret; + + /* create the directory */ + *kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL); + if (IS_ERR(*kn_info)) + return PTR_ERR(*kn_info); + + ret = resctrl_group_add_files(*kn_info, RF_TOP_INFO); + if (ret) + goto out_destroy; + + list_for_each_entry(s, &resctrl_all_schema, list) { + r = s->res; + if (!r) + continue; + rr = r->res; + if (r->alloc_enabled) { + fflags = rr->fflags | RF_CTRL_INFO; + ret = resctrl_group_mkdir_info_resdir(r, s->name, + fflags, *kn_info); + if (ret) + goto out_destroy; + } + } + + list_for_each_entry(s, &resctrl_all_schema, list) { + r = s->res; + if (!r) + continue; + rr = r->res; + if (r->mon_enabled) { + fflags = rr->fflags | RF_MON_INFO; + snprintf(name, sizeof(name), "%s_MON", s->name); + ret = resctrl_group_mkdir_info_resdir(r, name, + fflags, *kn_info); + if (ret) + goto out_destroy; + } + } + + ret = resctrl_group_kn_set_ugid(*kn_info); + if (ret) + goto out_destroy; + + kernfs_activate(*kn_info); + + return 0; + +out_destroy: + kernfs_remove(*kn_info); + return ret; +} + + + +/* Initialize MBA resource with default values. */ +static void rdtgroup_init_mba(struct resctrl_schema *s, u32 closid) +{ + struct resctrl_staged_config *cfg; + struct resctrl_resource *r; + struct raw_resctrl_resource *rr; + struct rdt_domain *d; + enum resctrl_ctrl_type t; + + r = s->res; + if (WARN_ON(!r)) + return; + rr = r->res; + + list_for_each_entry(d, &s->res->domains, list) { + cfg = &d->staged_cfg[CDP_BOTH]; + cfg->cdp_both_ctrl = s->cdp_mc_both; + cfg->new_ctrl[SCHEMA_COMM] = rr->ctrl_features[SCHEMA_COMM].default_ctrl; + cfg->ctrl_updated[SCHEMA_COMM] = true; + resctrl_cdp_mpamid_map(closid, CDP_BOTH, cfg->hw_closid); + cfg->have_new_ctrl = true; + /* Set extension ctrl default value, e.g. priority/hardlimit */ + for_each_extend_ctrl_type(t) { + cfg->new_ctrl[t] = rr->ctrl_features[t].default_ctrl; + cfg->ctrl_updated[t] = true; + } + } +} + +/* + * Initialize cache resources with default values. + * + * A new resctrl group is being created on an allocation capable (CAT) + * supporting system. Set this group up to start off with all usable + * allocations. + * + * If there are no more shareable bits available on any domain then + * the entire allocation will fail. + */ +static int rdtgroup_init_cat(struct resctrl_schema *s, u32 closid) +{ + struct resctrl_staged_config *cfg; + enum resctrl_conf_type conf_type = s->conf_type; + enum resctrl_ctrl_type ctrl_type; + struct rdt_domain *d; + struct resctrl_resource *r; + struct raw_resctrl_resource *rr; + u32 used_b = 0; + u32 unused_b = 0; + unsigned long tmp_cbm; + + r = s->res; + if (WARN_ON(!r)) + return -EINVAL; + rr = r->res; + + list_for_each_entry(d, &s->res->domains, list) { + cfg = &d->staged_cfg[conf_type]; + cfg->cdp_both_ctrl = s->cdp_mc_both; + cfg->have_new_ctrl = false; + cfg->new_ctrl[SCHEMA_COMM] = r->cache.shareable_bits; + used_b = r->cache.shareable_bits; + + unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1); + unused_b &= BIT_MASK(r->cache.cbm_len) - 1; + cfg->new_ctrl[SCHEMA_COMM] |= unused_b; + + /* Ensure cbm does not access out-of-bound */ + tmp_cbm = cfg->new_ctrl[SCHEMA_COMM]; + if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < + r->cache.min_cbm_bits) { + rdt_last_cmd_printf("No space on %s:%d\n", + r->name, d->id); + return -ENOSPC; + } + + resctrl_cdp_mpamid_map(closid, conf_type, cfg->hw_closid); + cfg->ctrl_updated[SCHEMA_COMM] = true; + cfg->have_new_ctrl = true; + + /* + * Set extension ctrl default value, e.g. priority/hardlimit + * with MPAM capabilities. + */ + for_each_extend_ctrl_type(ctrl_type) { + cfg->new_ctrl[ctrl_type] = + rr->ctrl_features[ctrl_type].default_ctrl; + cfg->ctrl_updated[ctrl_type] = true; + } + } + + return 0; +} + +/* Initialize the resctrl group's allocations. */ +int resctrl_group_init_alloc(struct rdtgroup *rdtgrp) +{ + struct resctrl_schema *s; + struct resctrl_resource *r; + int ret; + + list_for_each_entry(s, &resctrl_all_schema, list) { + r = s->res; + if (r->rid == RDT_RESOURCE_MC) { + rdtgroup_init_mba(s, rdtgrp->closid.intpartid); + } else { + ret = rdtgroup_init_cat(s, rdtgrp->closid.intpartid); + if (ret < 0) + return ret; + } + + ret = resctrl_group_update_domains(rdtgrp, r); + if (ret < 0) { + rdt_last_cmd_puts("Failed to initialize allocations\n"); + return ret; + } + } + + return 0; +} + +int resctrl_update_groups_config(struct rdtgroup *rdtgrp) +{ + int ret = 0; + struct resctrl_resource *r; + struct mpam_resctrl_res *res; + + for_each_supported_resctrl_exports(res) { + r = &res->resctrl_res; + if (r->alloc_enabled) { + ret = resctrl_group_update_domains(rdtgrp, r); + if (ret) + break; + } + } + + /* after resync all configurations, restore resync to 0 */ + rdtgrp->resync = 0; + + return ret; +} + +int __resctrl_group_show_options(struct seq_file *seq) +{ + struct resctrl_resource *res; + struct raw_resctrl_resource *r; + + res = mpam_resctrl_get_resource(RDT_RESOURCE_L3); + if (res && res->cdp_enable) + seq_puts(seq, ",cdpl3"); + + res = mpam_resctrl_get_resource(RDT_RESOURCE_L2); + if (res && res->cdp_enable) + seq_puts(seq, ",cdpl2"); + + r = mpam_get_raw_resctrl_resource(RDT_RESOURCE_L3); + if (r && r->ctrl_features[SCHEMA_PBM].enabled) + seq_puts(seq, ",caPbm"); + if (r && r->ctrl_features[SCHEMA_MAX].enabled) + seq_puts(seq, ",caMax"); + if (r && r->ctrl_features[SCHEMA_PRI].enabled) + seq_puts(seq, ",caPrio"); + + r = mpam_get_raw_resctrl_resource(RDT_RESOURCE_MC); + if (r && r->ctrl_features[SCHEMA_MAX].enabled) + seq_puts(seq, ",mbMax"); + if (r && r->ctrl_features[SCHEMA_MIN].enabled) + seq_puts(seq, ",mbMin"); + if (r && r->ctrl_features[SCHEMA_HDL].enabled) + seq_puts(seq, ",mbHdl"); + if (r && r->ctrl_features[SCHEMA_PRI].enabled) + seq_puts(seq, ",mbPrio"); + + return 0; +} diff --git a/arch/arm64/kernel/mpam/mpam_device.c b/arch/arm64/kernel/mpam/mpam_device.c new file mode 100644 index 0000000000000000000000000000000000000000..c516b93eeb5f2dfe9c5a3ad6fa651e5714a87b21 --- /dev/null +++ b/arch/arm64/kernel/mpam/mpam_device.c @@ -0,0 +1,1884 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Common code for ARM v8 MPAM + * + * Copyright (C) 2020-2021 Huawei Technologies Co., Ltd + * + * Author: Wang Shaobo + * + * Code was partially borrowed from http://www.linux-arm.org/ + * git?p=linux-jm.git;a=shortlog;h=refs/heads/mpam/snapshot/may. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * More information about MPAM be found in the Arm Architecture Reference + * Manual. + * + * https://static.docs.arm.com/ddi0598/a/DDI0598_MPAM_supp_armv8a.pdf + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mpam_resource.h" +#include "mpam_device.h" +#include "mpam_internal.h" + +/* + * During discovery this lock protects writers to class, components and devices. + * Once all devices are successfully probed, the system_supports_mpam() static + * key is enabled, and these lists become read only. + */ +static DEFINE_MUTEX(mpam_devices_lock); + +/* Devices are MSCs */ +static LIST_HEAD(mpam_all_devices); + +/* Classes are the set of MSCs that make up components of the same type. */ +LIST_HEAD(mpam_classes); + +static DEFINE_MUTEX(mpam_cpuhp_lock); +static int mpam_cpuhp_state; + +static bool resctrl_registered; + +static inline int mpam_cpu_online(unsigned int cpu); +static inline int mpam_cpu_offline(unsigned int cpu); + +static struct mpam_sysprops_prop mpam_sysprops; + +/* + * mpam is enabled once all devices have been probed from CPU online callbacks, + * scheduled via this work_struct. + */ +static struct work_struct mpam_enable_work; + +/* + * This gets set if something terrible happens, it prevents future attempts + * to configure devices. + */ +static int mpam_broken; +static struct work_struct mpam_failed_work; + +void mpam_class_list_lock_held(void) +{ + lockdep_assert_held(&mpam_devices_lock); +} + +static inline u32 mpam_read_reg(struct mpam_device *dev, u16 reg) +{ + WARN_ON_ONCE(reg > SZ_MPAM_DEVICE); + assert_spin_locked(&dev->lock); + + /* + * If we touch a device that isn't accessible from this CPU we may get + * an external-abort. + */ + WARN_ON_ONCE(preemptible()); + WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &dev->fw_affinity)); + + return readl_relaxed(dev->mapped_hwpage + reg); +} + +static inline void mpam_write_reg(struct mpam_device *dev, u16 reg, u32 val) +{ + WARN_ON_ONCE(reg > SZ_MPAM_DEVICE); + assert_spin_locked(&dev->lock); + + /* + * If we touch a device that isn't accessible from this CPU we may get + * an external-abort. If we're lucky, we corrupt another mpam:component. + */ + WARN_ON_ONCE(preemptible()); + WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &dev->fw_affinity)); + + writel_relaxed(val, dev->mapped_hwpage + reg); +} + +static void +mpam_probe_update_sysprops(u16 max_partid, u16 max_pmg) +{ + lockdep_assert_held(&mpam_devices_lock); + + mpam_sysprops.max_partid = + (mpam_sysprops.max_partid < max_partid) ? + mpam_sysprops.max_partid : max_partid; + mpam_sysprops.max_pmg = + (mpam_sysprops.max_pmg < max_pmg) ? + mpam_sysprops.max_pmg : max_pmg; +} + +static int mpam_device_probe(struct mpam_device *dev) +{ + u32 hwfeatures, part_sel; + u16 max_intpartid = 0; + u16 max_partid, max_pmg; + + if (mpam_read_reg(dev, MPAMF_AIDR) != MPAM_ARCHITECTURE_V1) { + pr_err_once("device at 0x%llx does not match MPAM architecture v1.0\n", + dev->hwpage_address); + return -EIO; + } + + hwfeatures = mpam_read_reg(dev, MPAMF_IDR); + max_partid = hwfeatures & MPAMF_IDR_PARTID_MAX_MASK; + max_pmg = (hwfeatures & MPAMF_IDR_PMG_MAX_MASK) >> MPAMF_IDR_PMG_MAX_SHIFT; + + dev->num_partid = max_partid + 1; + dev->num_pmg = max_pmg + 1; + + /* Partid Narrowing*/ + if (MPAMF_IDR_HAS_PARTID_NRW(hwfeatures)) { + u32 partid_nrw_features = mpam_read_reg(dev, MPAMF_PARTID_NRW_IDR); + + max_intpartid = partid_nrw_features & MPAMF_PARTID_NRW_IDR_MASK; + dev->num_intpartid = max_intpartid + 1; + mpam_set_feature(mpam_feat_part_nrw, &dev->features); + } + + mpam_probe_update_sysprops(max_partid, max_pmg); + + /* Cache Capacity Partitioning */ + if (MPAMF_IDR_HAS_CCAP_PART(hwfeatures)) { + u32 ccap_features = mpam_read_reg(dev, MPAMF_CCAP_IDR); + + pr_debug("probe: probed CCAP_PART\n"); + + dev->cmax_wd = ccap_features & MPAMF_CCAP_IDR_CMAX_WD; + if (dev->cmax_wd) + mpam_set_feature(mpam_feat_ccap_part, &dev->features); + } + + /* Cache Portion partitioning */ + if (MPAMF_IDR_HAS_CPOR_PART(hwfeatures)) { + u32 cpor_features = mpam_read_reg(dev, MPAMF_CPOR_IDR); + + pr_debug("probe: probed CPOR_PART\n"); + + dev->cpbm_wd = cpor_features & MPAMF_CPOR_IDR_CPBM_WD; + if (dev->cpbm_wd) + mpam_set_feature(mpam_feat_cpor_part, &dev->features); + } + + /* Memory bandwidth partitioning */ + if (MPAMF_IDR_HAS_MBW_PART(hwfeatures)) { + u32 mbw_features = mpam_read_reg(dev, MPAMF_MBW_IDR); + + pr_debug("probe: probed MBW_PART\n"); + + /* portion bitmap resolution */ + dev->mbw_pbm_bits = (mbw_features & MPAMF_MBW_IDR_BWPBM_WD) >> + MPAMF_MBW_IDR_BWPBM_WD_SHIFT; + if (dev->mbw_pbm_bits && (mbw_features & + MPAMF_MBW_IDR_HAS_PBM)) + mpam_set_feature(mpam_feat_mbw_part, &dev->features); + + dev->bwa_wd = (mbw_features & MPAMF_MBW_IDR_BWA_WD); + if (dev->bwa_wd && (mbw_features & MPAMF_MBW_IDR_HAS_MAX)) { + mpam_set_feature(mpam_feat_mbw_max, &dev->features); + /* we want to export MBW hardlimit support */ + mpam_set_feature(mpam_feat_part_hdl, &dev->features); + } + + if (dev->bwa_wd && (mbw_features & MPAMF_MBW_IDR_HAS_MIN)) + mpam_set_feature(mpam_feat_mbw_min, &dev->features); + + if (dev->bwa_wd && (mbw_features & MPAMF_MBW_IDR_HAS_PROP)) { + mpam_set_feature(mpam_feat_mbw_prop, &dev->features); + /* we want to export MBW hardlimit support */ + mpam_set_feature(mpam_feat_part_hdl, &dev->features); + } + } + + /* Priority partitioning */ + if (MPAMF_IDR_HAS_PRI_PART(hwfeatures)) { + u32 pri_features, hwdef_pri; + /* + * if narrow support, MPAMCFG_PART_SEL.INTERNAL must be 1 when + * reading/writing MPAMCFG register other than MPAMCFG_INTPARTID. + */ + if (mpam_has_feature(mpam_feat_part_nrw, dev->features)) { + part_sel = MPAMCFG_PART_SEL_INTERNAL; + mpam_write_reg(dev, MPAMCFG_PART_SEL, part_sel); + } + pri_features = mpam_read_reg(dev, MPAMF_PRI_IDR); + hwdef_pri = mpam_read_reg(dev, MPAMCFG_PRI); + + pr_debug("probe: probed PRI_PART\n"); + + dev->intpri_wd = (pri_features & MPAMF_PRI_IDR_INTPRI_WD) >> + MPAMF_PRI_IDR_INTPRI_WD_SHIFT; + if (dev->intpri_wd && (pri_features & MPAMF_PRI_IDR_HAS_INTPRI)) { + mpam_set_feature(mpam_feat_intpri_part, &dev->features); + dev->hwdef_intpri = MPAMCFG_INTPRI_GET(hwdef_pri); + if (pri_features & MPAMF_PRI_IDR_INTPRI_0_IS_LOW) + mpam_set_feature(mpam_feat_intpri_part_0_low, + &dev->features); + else + /* keep higher value higher priority */ + dev->hwdef_intpri = GENMASK(dev->intpri_wd - 1, 0) & + ~dev->hwdef_intpri; + + } + + dev->dspri_wd = (pri_features & MPAMF_PRI_IDR_DSPRI_WD) >> + MPAMF_PRI_IDR_DSPRI_WD_SHIFT; + if (dev->dspri_wd && (pri_features & MPAMF_PRI_IDR_HAS_DSPRI)) { + mpam_set_feature(mpam_feat_dspri_part, &dev->features); + dev->hwdef_dspri = MPAMCFG_DSPRI_GET(hwdef_pri); + if (pri_features & MPAMF_PRI_IDR_DSPRI_0_IS_LOW) + mpam_set_feature(mpam_feat_dspri_part_0_low, + &dev->features); + else + /* keep higher value higher priority */ + dev->hwdef_dspri = GENMASK(dev->dspri_wd - 1, 0) & + ~dev->hwdef_dspri; + } + } + + /* Performance Monitoring */ + if (MPAMF_IDR_HAS_MSMON(hwfeatures)) { + u32 msmon_features = mpam_read_reg(dev, MPAMF_MSMON_IDR); + + pr_debug("probe: probed MSMON\n"); + + if (msmon_features & MPAMF_MSMON_IDR_MSMON_CSU) { + u32 csumonidr; + + csumonidr = mpam_read_reg(dev, MPAMF_CSUMON_IDR); + dev->num_csu_mon = csumonidr & MPAMF_CSUMON_IDR_NUM_MON; + if (dev->num_csu_mon) + mpam_set_feature(mpam_feat_msmon_csu, + &dev->features); + } + if (msmon_features & MPAMF_MSMON_IDR_MSMON_MBWU) { + u32 mbwumonidr = mpam_read_reg(dev, MPAMF_MBWUMON_IDR); + + dev->num_mbwu_mon = mbwumonidr & + MPAMF_MBWUMON_IDR_NUM_MON; + if (dev->num_mbwu_mon) + mpam_set_feature(mpam_feat_msmon_mbwu, + &dev->features); + } + } + dev->probed = true; + + return 0; +} + +/* + * If device doesn't match class feature/configuration, do the right thing. + * For 'num' properties we can just take the minimum. + * For properties where the mismatched unused bits would make a difference, we + * nobble the class feature, as we can't configure all the devices. + * e.g. The L3 cache is composed of two devices with 13 and 17 portion + * bitmaps respectively. + */ +static void __device_class_feature_mismatch(struct mpam_device *dev, + struct mpam_class *class) +{ + lockdep_assert_held(&mpam_devices_lock); /* we modify class */ + + if (class->cpbm_wd != dev->cpbm_wd) + mpam_clear_feature(mpam_feat_cpor_part, &class->features); + if (class->mbw_pbm_bits != dev->mbw_pbm_bits) + mpam_clear_feature(mpam_feat_mbw_part, &class->features); + + /* For num properties, take the minimum */ + if (class->num_partid != dev->num_partid) + class->num_partid = min(class->num_partid, dev->num_partid); + if (class->num_intpartid != dev->num_intpartid) + class->num_intpartid = min(class->num_intpartid, dev->num_intpartid); + if (class->num_pmg != dev->num_pmg) + class->num_pmg = min(class->num_pmg, dev->num_pmg); + if (class->num_csu_mon != dev->num_csu_mon) + class->num_csu_mon = min(class->num_csu_mon, dev->num_csu_mon); + if (class->num_mbwu_mon != dev->num_mbwu_mon) + class->num_mbwu_mon = min(class->num_mbwu_mon, + dev->num_mbwu_mon); + + /* bwa_wd is a count of bits, fewer bits means less precision */ + if (class->bwa_wd != dev->bwa_wd) + class->bwa_wd = min(class->bwa_wd, dev->bwa_wd); + + if (class->intpri_wd != dev->intpri_wd) + class->intpri_wd = min(class->intpri_wd, dev->intpri_wd); + if (class->dspri_wd != dev->dspri_wd) + class->dspri_wd = min(class->dspri_wd, dev->dspri_wd); + + /* {int,ds}pri may not have differing 0-low behaviour */ + if (mpam_has_feature(mpam_feat_intpri_part_0_low, class->features) != + mpam_has_feature(mpam_feat_intpri_part_0_low, dev->features)) + mpam_clear_feature(mpam_feat_intpri_part, &class->features); + if (mpam_has_feature(mpam_feat_dspri_part_0_low, class->features) != + mpam_has_feature(mpam_feat_dspri_part_0_low, dev->features)) + mpam_clear_feature(mpam_feat_dspri_part, &class->features); +} + +/* + * Squash common class=>component=>device->features down to the + * class->features + */ +static void mpam_enable_squash_features(void) +{ + unsigned long flags; + struct mpam_device *dev; + struct mpam_class *class; + struct mpam_component *comp; + + lockdep_assert_held(&mpam_devices_lock); + + list_for_each_entry(class, &mpam_classes, classes_list) { + /* + * Copy the first component's first device's properties and + * features to the class. __device_class_feature_mismatch() + * will fix them as appropriate. + * It is not possible to have a component with no devices. + */ + if (!list_empty(&class->components)) { + comp = list_first_entry_or_null(&class->components, + struct mpam_component, class_list); + if (WARN_ON(!comp)) + break; + + dev = list_first_entry_or_null(&comp->devices, + struct mpam_device, comp_list); + if (WARN_ON(!dev)) + break; + + spin_lock_irqsave(&dev->lock, flags); + class->features = dev->features; + class->cpbm_wd = dev->cpbm_wd; + class->mbw_pbm_bits = dev->mbw_pbm_bits; + class->bwa_wd = dev->bwa_wd; + class->intpri_wd = dev->intpri_wd; + class->dspri_wd = dev->dspri_wd; + class->num_partid = dev->num_partid; + class->num_intpartid = dev->num_intpartid; + class->num_pmg = dev->num_pmg; + class->num_csu_mon = dev->num_csu_mon; + class->num_mbwu_mon = dev->num_mbwu_mon; + class->hwdef_intpri = dev->hwdef_intpri; + class->hwdef_dspri = dev->hwdef_dspri; + spin_unlock_irqrestore(&dev->lock, flags); + } + + list_for_each_entry(comp, &class->components, class_list) { + list_for_each_entry(dev, &comp->devices, comp_list) { + spin_lock_irqsave(&dev->lock, flags); + __device_class_feature_mismatch(dev, class); + class->features &= dev->features; + spin_unlock_irqrestore(&dev->lock, flags); + } + } + } +} + +static int mpam_allocate_config(void) +{ + struct mpam_class *class; + struct mpam_component *comp; + + lockdep_assert_held(&mpam_devices_lock); + + list_for_each_entry(class, &mpam_classes, classes_list) { + list_for_each_entry(comp, &class->components, class_list) { + comp->cfg = kcalloc(mpam_sysprops_num_partid(), sizeof(*comp->cfg), + GFP_KERNEL); + if (!comp->cfg) + return -ENOMEM; + } + } + + return 0; +} + +static const char *mpam_msc_err_str[_MPAM_NUM_ERRCODE] = { + [MPAM_ERRCODE_NONE] = "No Error", + [MPAM_ERRCODE_PARTID_SEL_RANGE] = "Out of range PARTID selected", + [MPAM_ERRCODE_REQ_PARTID_RANGE] = "Out of range PARTID requested", + [MPAM_ERRCODE_REQ_PMG_RANGE] = "Out of range PMG requested", + [MPAM_ERRCODE_MONITOR_RANGE] = "Out of range Monitor selected", + [MPAM_ERRCODE_MSMONCFG_ID_RANGE] = "Out of range Monitor:PARTID or PMG written", + + /* These two are about PARTID narrowing, which we don't support */ + [MPAM_ERRCODE_INTPARTID_RANGE] = "Out or range Internal-PARTID written", + [MPAM_ERRCODE_UNEXPECTED_INTERNAL] = "Internal-PARTID set but not expected", +}; + + +static irqreturn_t mpam_handle_error_irq(int irq, void *data) +{ + u32 device_esr; + u16 device_errcode; + struct mpam_device *dev = data; + + spin_lock(&dev->lock); + device_esr = mpam_read_reg(dev, MPAMF_ESR); + spin_unlock(&dev->lock); + + device_errcode = (device_esr & MPAMF_ESR_ERRCODE) >> MPAMF_ESR_ERRCODE_SHIFT; + if (device_errcode == MPAM_ERRCODE_NONE) + return IRQ_NONE; + + /* No-one expects MPAM errors! */ + if (device_errcode <= _MPAM_NUM_ERRCODE) + pr_err_ratelimited("unexpected error '%s' [esr:%x]\n", + mpam_msc_err_str[device_errcode], + device_esr); + else + pr_err_ratelimited("unexpected error %d [esr:%x]\n", + device_errcode, device_esr); + + if (!cmpxchg(&mpam_broken, -EINTR, 0)) + schedule_work(&mpam_failed_work); + + /* A write of 0 to MPAMF_ESR.ERRCODE clears level interrupts */ + spin_lock(&dev->lock); + mpam_write_reg(dev, MPAMF_ESR, 0); + spin_unlock(&dev->lock); + + return IRQ_HANDLED; +} +/* register and enable all device error interrupts */ +static void mpam_enable_irqs(void) +{ + struct mpam_device *dev; + int rc, irq, request_flags; + unsigned long irq_save_flags; + + list_for_each_entry(dev, &mpam_all_devices, glbl_list) { + spin_lock_irqsave(&dev->lock, irq_save_flags); + irq = dev->error_irq; + request_flags = dev->error_irq_flags; + spin_unlock_irqrestore(&dev->lock, irq_save_flags); + + if (request_flags & MPAM_IRQ_MODE_LEVEL) { + struct cpumask tmp; + bool inaccessible_cpus; + + request_flags = IRQF_TRIGGER_LOW | IRQF_SHARED; + + /* + * If the MSC is not accessible from any CPU the IRQ + * may be migrated to, we won't be able to clear it. + * ~dev->fw_affinity is all the CPUs that can't access + * the MSC. 'and' cpu_possible_mask tells us whether we + * care. + */ + spin_lock_irqsave(&dev->lock, irq_save_flags); + inaccessible_cpus = cpumask_andnot(&tmp, + cpu_possible_mask, + &dev->fw_affinity); + spin_unlock_irqrestore(&dev->lock, irq_save_flags); + + if (inaccessible_cpus) { + pr_err_once("NOT registering MPAM error level-irq that isn't globally reachable"); + continue; + } + } else { + request_flags = IRQF_TRIGGER_RISING | IRQF_SHARED; + } + + rc = request_irq(irq, mpam_handle_error_irq, request_flags, + "MPAM ERR IRQ", dev); + if (rc) { + pr_warn_ratelimited("Not support to register irq %u\n", irq); + continue; + } + + /* + * temporary: the interrupt will only be enabled when cpus + * subsequently come online after mpam_enable(). + */ + spin_lock_irqsave(&dev->lock, irq_save_flags); + dev->enable_error_irq = true; + spin_unlock_irqrestore(&dev->lock, irq_save_flags); + } +} + +static void mpam_disable_irqs(void) +{ + int irq; + bool do_unregister; + struct mpam_device *dev; + unsigned long irq_save_flags; + + list_for_each_entry(dev, &mpam_all_devices, glbl_list) { + spin_lock_irqsave(&dev->lock, irq_save_flags); + irq = dev->error_irq; + do_unregister = dev->enable_error_irq; + dev->enable_error_irq = false; + spin_unlock_irqrestore(&dev->lock, irq_save_flags); + + if (do_unregister) + free_irq(irq, dev); + } +} + +/* + * Enable mpam once all devices have been probed. + * Scheduled by mpam_discovery_complete() once all devices have been created. + * Also scheduled when new devices are probed when new CPUs come online. + */ +static void mpam_enable(struct work_struct *work) +{ + int err; + unsigned long flags; + struct mpam_device *dev; + bool all_devices_probed = true; + static atomic_t once; + + /* Have we probed all the devices? */ + mutex_lock(&mpam_devices_lock); + list_for_each_entry(dev, &mpam_all_devices, glbl_list) { + spin_lock_irqsave(&dev->lock, flags); + if (!dev->probed) + all_devices_probed = false; + spin_unlock_irqrestore(&dev->lock, flags); + + if (!all_devices_probed) + break; + } + mutex_unlock(&mpam_devices_lock); + + if (!(all_devices_probed && !atomic_fetch_inc(&once))) + return; + + mutex_lock(&mpam_devices_lock); + mpam_enable_squash_features(); + err = mpam_allocate_config(); + if (err) { + mutex_unlock(&mpam_devices_lock); + return; + } + mutex_unlock(&mpam_devices_lock); + + mpam_enable_irqs(); + + /* + * mpam_enable() runs in parallel with cpuhp callbacks bringing other + * CPUs online, as we eagerly schedule the work. To give resctrl a + * clean start, we make all cpus look offline, set resctrl_registered, + * and then bring them back. + */ + mutex_lock(&mpam_cpuhp_lock); + if (!mpam_cpuhp_state) { + /* We raced with mpam_failed(). */ + mutex_unlock(&mpam_cpuhp_lock); + return; + } + cpuhp_remove_state(mpam_cpuhp_state); + + mutex_lock(&mpam_devices_lock); + err = mpam_resctrl_setup(); + if (!err) { + err = mpam_resctrl_init(); + if (!err) + resctrl_registered = true; + } + if (err) + pr_err("Failed to setup/init resctrl\n"); + mutex_unlock(&mpam_devices_lock); + + mpam_cpuhp_state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "mpam:online", mpam_cpu_online, + mpam_cpu_offline); + if (mpam_cpuhp_state <= 0) + pr_err("Failed to re-register 'dyn' cpuhp callbacks"); + mutex_unlock(&mpam_cpuhp_lock); +} + +static void mpam_failed(struct work_struct *work) +{ + /* + * Make it look like all CPUs are offline. This also resets the + * cpu default values and disables interrupts. + */ + mutex_lock(&mpam_cpuhp_lock); + if (mpam_cpuhp_state) { + cpuhp_remove_state(mpam_cpuhp_state); + mpam_cpuhp_state = 0; + + mpam_disable_irqs(); + } + mutex_unlock(&mpam_cpuhp_lock); +} + +static struct mpam_device * +mpam_device_alloc(struct mpam_component *comp) +{ + struct mpam_device *dev; + + lockdep_assert_held(&mpam_devices_lock); + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return ERR_PTR(-ENOMEM); + + spin_lock_init(&dev->lock); + INIT_LIST_HEAD(&dev->comp_list); + INIT_LIST_HEAD(&dev->glbl_list); + + dev->comp = comp; + list_add(&dev->comp_list, &comp->devices); + list_add(&dev->glbl_list, &mpam_all_devices); + + return dev; +} + +static void mpam_devices_destroy(struct mpam_component *comp) +{ + struct mpam_device *dev, *tmp; + + lockdep_assert_held(&mpam_devices_lock); + + list_for_each_entry_safe(dev, tmp, &comp->devices, comp_list) { + list_del(&dev->comp_list); + list_del(&dev->glbl_list); + kfree(dev); + } +} + +static struct mpam_component *mpam_component_alloc(int id) +{ + struct mpam_component *comp; + + comp = kzalloc(sizeof(*comp), GFP_KERNEL); + if (!comp) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&comp->devices); + INIT_LIST_HEAD(&comp->class_list); + + comp->comp_id = id; + + return comp; +} + +struct mpam_component *mpam_component_get(struct mpam_class *class, int id, + bool alloc) +{ + struct mpam_component *comp; + + list_for_each_entry(comp, &class->components, class_list) { + if (comp->comp_id == id) + return comp; + } + + if (!alloc) + return ERR_PTR(-ENOENT); + + comp = mpam_component_alloc(id); + if (IS_ERR(comp)) + return comp; + + list_add(&comp->class_list, &class->components); + + return comp; +} + +static struct mpam_class *mpam_class_alloc(u8 level_idx, + enum mpam_class_types type) +{ + struct mpam_class *class; + + lockdep_assert_held(&mpam_devices_lock); + + class = kzalloc(sizeof(*class), GFP_KERNEL); + if (!class) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&class->components); + INIT_LIST_HEAD(&class->classes_list); + + mutex_init(&class->lock); + + class->level = level_idx; + class->type = type; + + list_add(&class->classes_list, &mpam_classes); + + return class; +} + +/* free all components and devices of this class */ +static void mpam_class_destroy(struct mpam_class *class) +{ + struct mpam_component *comp, *tmp; + + lockdep_assert_held(&mpam_devices_lock); + + list_for_each_entry_safe(comp, tmp, &class->components, class_list) { + mpam_devices_destroy(comp); + list_del(&comp->class_list); + kfree(comp->cfg); + kfree(comp); + } +} + +static struct mpam_class *mpam_class_get(u8 level_idx, + enum mpam_class_types type, + bool alloc) +{ + bool found = false; + struct mpam_class *class; + + lockdep_assert_held(&mpam_devices_lock); + + list_for_each_entry(class, &mpam_classes, classes_list) { + if (class->type == type && class->level == level_idx) { + found = true; + break; + } + } + + if (found) + return class; + + if (!alloc) + return ERR_PTR(-ENOENT); + + return mpam_class_alloc(level_idx, type); +} + +/* + * Create a a device with this @hwpage_address, of class type:level_idx. + * class/component structures may be allocated. + * Returns the new device, or an ERR_PTR(). + */ +struct mpam_device * +__mpam_device_create(u8 level_idx, enum mpam_class_types type, + int component_id, const struct cpumask *fw_affinity, + phys_addr_t hwpage_address) +{ + struct mpam_device *dev; + struct mpam_class *class; + struct mpam_component *comp; + + if (!fw_affinity) + fw_affinity = cpu_possible_mask; + + mutex_lock(&mpam_devices_lock); + do { + class = mpam_class_get(level_idx, type, true); + if (IS_ERR(class)) { + dev = (void *)class; + break; + } + + comp = mpam_component_get(class, component_id, true); + if (IS_ERR(comp)) { + dev = (void *)comp; + break; + } + + /* + * For caches we learn the affinity from the cache-id as CPUs + * come online. For everything else, we have to be told. + */ + if (type != MPAM_CLASS_CACHE) + cpumask_or(&comp->fw_affinity, &comp->fw_affinity, + fw_affinity); + + dev = mpam_device_alloc(comp); + if (IS_ERR(dev)) + break; + + dev->fw_affinity = *fw_affinity; + dev->hwpage_address = hwpage_address; + dev->mapped_hwpage = ioremap(hwpage_address, SZ_MPAM_DEVICE); + if (!dev->mapped_hwpage) + dev = ERR_PTR(-ENOMEM); + } while (0); + mutex_unlock(&mpam_devices_lock); + + return dev; +} + +void mpam_device_set_error_irq(struct mpam_device *dev, u32 irq, + u32 flags) +{ + unsigned long irq_save_flags; + + spin_lock_irqsave(&dev->lock, irq_save_flags); + dev->error_irq = irq; + dev->error_irq_flags = flags & MPAM_IRQ_FLAGS_MASK; + spin_unlock_irqrestore(&dev->lock, irq_save_flags); +} + +void mpam_device_set_overflow_irq(struct mpam_device *dev, u32 irq, + u32 flags) +{ + unsigned long irq_save_flags; + + spin_lock_irqsave(&dev->lock, irq_save_flags); + dev->overflow_irq = irq; + dev->overflow_irq_flags = flags & MPAM_IRQ_FLAGS_MASK; + spin_unlock_irqrestore(&dev->lock, irq_save_flags); +} + +static int mpam_cpus_have_feature(void) +{ + if (!cpus_have_const_cap(ARM64_HAS_MPAM)) + return 0; + return 1; +} + +/* + * get max partid from reading SYS_MPAMIDR_EL1. + */ +static inline u16 mpam_cpu_max_partid(void) +{ + u64 reg; + + reg = mpam_read_sysreg_s(SYS_MPAMIDR_EL1, "SYS_MPAMIDR_EL1"); + return reg & PARTID_MAX_MASK; +} + +/* + * get max pmg from reading SYS_MPAMIDR_EL1. + */ +static inline u16 mpam_cpu_max_pmg(void) +{ + u64 reg; + + reg = mpam_read_sysreg_s(SYS_MPAMIDR_EL1, "SYS_MPAMIDR_EL1"); + return (reg & PMG_MAX_MASK) >> PMG_MAX_SHIFT; +} + +/* + * prepare for initializing devices. + */ +int mpam_discovery_start(void) +{ + if (!mpam_cpus_have_feature()) + return -EOPNOTSUPP; + + mpam_sysprops.max_partid = mpam_cpu_max_partid(); + mpam_sysprops.max_pmg = mpam_cpu_max_pmg(); + + INIT_WORK(&mpam_enable_work, mpam_enable); + INIT_WORK(&mpam_failed_work, mpam_failed); + + return 0; +} + +static void mpam_reset_device_bitmap(struct mpam_device *dev, u16 reg, u16 wd) +{ + u32 bm = ~0; + int i; + + lockdep_assert_held(&dev->lock); + + /* write all but the last full-32bit-word */ + for (i = 0; i < wd / 32; i++, reg += sizeof(bm)) + mpam_write_reg(dev, reg, bm); + + /* and the last partial 32bit word */ + bm = GENMASK(wd % 32, 0); + if (bm) + mpam_write_reg(dev, reg, bm); +} + +static void mpam_reset_device_config(struct mpam_component *comp, + struct mpam_device *dev, u32 partid) +{ + u16 intpri, dspri; + u32 pri_val = 0; + u32 mbw_max; + + lockdep_assert_held(&dev->lock); + + if (mpam_has_feature(mpam_feat_part_nrw, dev->features)) + partid = partid | MPAMCFG_PART_SEL_INTERNAL; + mpam_write_reg(dev, MPAMCFG_PART_SEL, partid); + wmb(); /* subsequent writes must be applied to our new partid */ + + if (mpam_has_feature(mpam_feat_cpor_part, dev->features)) + mpam_reset_device_bitmap(dev, MPAMCFG_CPBM, dev->cpbm_wd); + if (mpam_has_feature(mpam_feat_mbw_part, dev->features)) + mpam_reset_device_bitmap(dev, MPAMCFG_MBW_PBM, + dev->mbw_pbm_bits); + if (mpam_has_feature(mpam_feat_mbw_max, dev->features)) { + mbw_max = MBW_MAX_SET(MBW_MAX_BWA_FRACT(dev->bwa_wd), dev->bwa_wd); + mbw_max = MBW_MAX_SET_HDL(mbw_max); + mpam_write_reg(dev, MPAMCFG_MBW_MAX, mbw_max); + } + if (mpam_has_feature(mpam_feat_mbw_min, dev->features)) { + mpam_write_reg(dev, MPAMCFG_MBW_MIN, 0); + } + + if (mpam_has_feature(mpam_feat_intpri_part, dev->features) || + mpam_has_feature(mpam_feat_dspri_part, dev->features)) { + intpri = dev->hwdef_intpri; + dspri = dev->hwdef_dspri; + + if (mpam_has_feature(mpam_feat_intpri_part, dev->features)) { + if (!mpam_has_feature(mpam_feat_intpri_part_0_low, dev->features)) + intpri = GENMASK(dev->intpri_wd - 1, 0) & ~intpri; + pri_val |= intpri; + } + + if (mpam_has_feature(mpam_feat_dspri_part, dev->features)) { + if (!mpam_has_feature(mpam_feat_dspri_part_0_low, dev->features)) + dspri = GENMASK(dev->dspri_wd - 1, 0) & ~dspri; + pri_val |= (dspri << MPAMCFG_PRI_DSPRI_SHIFT); + } + + mpam_write_reg(dev, MPAMCFG_PRI, pri_val); + } + mb(); /* complete the configuration before the cpu can use this partid */ +} + +/* + * Called from cpuhp callbacks and with the cpus_read_lock() held from + * mpam_reset_devices(). + */ +static void mpam_reset_device(struct mpam_component *comp, + struct mpam_device *dev) +{ + u32 partid; + + lockdep_assert_held(&dev->lock); + + if (dev->enable_error_irq) + mpam_write_reg(dev, MPAMF_ECR, MPAMF_ECR_INTEN); + + if (!mpam_has_feature(mpam_feat_part_nrw, dev->features)) { + for (partid = 0; partid < dev->num_partid; partid++) + mpam_reset_device_config(comp, dev, partid); + } else { + for (partid = 0; partid < dev->num_intpartid; partid++) + mpam_reset_device_config(comp, dev, partid); + } +} + +static int __online_devices(struct mpam_component *comp, int cpu) +{ + int err = 0; + unsigned long flags; + struct mpam_device *dev; + bool new_device_probed = false; + + list_for_each_entry(dev, &comp->devices, comp_list) { + if (!cpumask_test_cpu(cpu, &dev->fw_affinity)) + continue; + + spin_lock_irqsave(&dev->lock, flags); + if (!dev->probed) { + err = mpam_device_probe(dev); + if (!err) + new_device_probed = true; + } + + if (!err && cpumask_empty(&dev->online_affinity)) + mpam_reset_device(comp, dev); + + cpumask_set_cpu(cpu, &dev->online_affinity); + spin_unlock_irqrestore(&dev->lock, flags); + + if (err) + return err; + } + + if (new_device_probed) + return 1; + + return 0; +} + +/* + * Firmware didn't give us an affinity, but a cache-id, if this cpu has that + * cache-id, update the fw_affinity for this component. + */ +static void +mpam_sync_cpu_cache_component_fw_affinity(struct mpam_class *class, int cpu) +{ + int cpu_cache_id; + struct cacheinfo *leaf; + struct mpam_component *comp; + + lockdep_assert_held(&mpam_devices_lock); /* we modify mpam_sysprops */ + + if (class->type != MPAM_CLASS_CACHE) + return; + + cpu_cache_id = cpu_to_node(cpu); + comp = mpam_component_get(class, cpu_cache_id, false); + + /* This cpu does not have a component of this class */ + if (IS_ERR(comp)) + return; + + /* + * The resctrl rmid_threshold is based on cache size. Keep track of + * the biggest cache we've seen. + */ + leaf = get_cpu_cache_leaf(cpu, class->level); + if (leaf) + mpam_sysprops.mpam_llc_size = max(mpam_sysprops.mpam_llc_size, + leaf->size); + + cpumask_set_cpu(cpu, &comp->fw_affinity); + cpumask_set_cpu(cpu, &class->fw_affinity); +} + +static int mpam_cpu_online(unsigned int cpu) +{ + int err = 0; + struct mpam_class *class; + struct mpam_component *comp; + bool new_device_probed = false; + + mutex_lock(&mpam_devices_lock); + + list_for_each_entry(class, &mpam_classes, classes_list) { + mpam_sync_cpu_cache_component_fw_affinity(class, cpu); + + list_for_each_entry(comp, &class->components, class_list) { + if (!cpumask_test_cpu(cpu, &comp->fw_affinity)) + continue; + + err = __online_devices(comp, cpu); + if (err > 0) + new_device_probed = true; + if (err < 0) + break; // mpam_broken + } + } + + if (new_device_probed && err >= 0) + schedule_work(&mpam_enable_work); + + mutex_unlock(&mpam_devices_lock); + if (err < 0) { + if (!cmpxchg(&mpam_broken, err, 0)) + schedule_work(&mpam_failed_work); + return err; + } + + if (resctrl_registered) + mpam_resctrl_cpu_online(cpu); + + return 0; +} + +static int mpam_cpu_offline(unsigned int cpu) +{ + unsigned long flags; + struct mpam_device *dev; + + mutex_lock(&mpam_devices_lock); + list_for_each_entry(dev, &mpam_all_devices, glbl_list) { + if (!cpumask_test_cpu(cpu, &dev->online_affinity)) + continue; + cpumask_clear_cpu(cpu, &dev->online_affinity); + + if (cpumask_empty(&dev->online_affinity)) { + spin_lock_irqsave(&dev->lock, flags); + mpam_write_reg(dev, MPAMF_ECR, 0); + spin_unlock_irqrestore(&dev->lock, flags); + } + } + + mutex_unlock(&mpam_devices_lock); + + if (resctrl_registered) + mpam_resctrl_cpu_offline(cpu); + + return 0; +} + +int mpam_discovery_complete(void) +{ + int ret = 0; + + mutex_lock(&mpam_cpuhp_lock); + mpam_cpuhp_state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "mpam:online", mpam_cpu_online, + mpam_cpu_offline); + if (mpam_cpuhp_state <= 0) { + pr_err("Failed to register 'dyn' cpuhp callbacks"); + ret = -EINVAL; + } + mutex_unlock(&mpam_cpuhp_lock); + + return ret; +} + +void mpam_discovery_failed(void) +{ + struct mpam_class *class, *tmp; + + mutex_lock(&mpam_devices_lock); + list_for_each_entry_safe(class, tmp, &mpam_classes, classes_list) { + mpam_class_destroy(class); + list_del(&class->classes_list); + kfree(class); + } + mutex_unlock(&mpam_devices_lock); +} + +u16 mpam_sysprops_num_partid(void) +{ + /* At least one partid for system width */ + return mpam_sysprops.max_partid + 1; +} + +u16 mpam_sysprops_num_pmg(void) +{ + /* At least one pmg for system width */ + return mpam_sysprops.max_pmg + 1; +} + +u32 mpam_sysprops_llc_size(void) +{ + return mpam_sysprops.mpam_llc_size; +} + +static u32 mpam_device_read_csu_mon(struct mpam_device *dev, + struct sync_args *args) +{ + u16 mon; + u32 clt, flt, cur_clt, cur_flt; + + mon = args->mon; + + mpam_write_reg(dev, MSMON_CFG_MON_SEL, mon); + wmb(); /* subsequent writes must be applied to this mon */ + + /* + * We don't bother with capture as we don't expose a way of measuring + * multiple partid:pmg with a single capture. + */ + clt = MSMON_CFG_CTL_MATCH_PARTID | MSMON_CFG_CSU_TYPE; + if (args->match_pmg) + clt |= MSMON_CFG_CTL_MATCH_PMG; + flt = args->closid.reqpartid | + (args->pmg << MSMON_CFG_CSU_FLT_PMG_SHIFT); + + /* + * We read the existing configuration to avoid re-writing the same + * values. + */ + cur_flt = mpam_read_reg(dev, MSMON_CFG_CSU_FLT); + cur_clt = mpam_read_reg(dev, MSMON_CFG_CSU_CTL); + + if (cur_flt != flt || cur_clt != (clt | MSMON_CFG_CTL_EN)) { + mpam_write_reg(dev, MSMON_CFG_CSU_FLT, flt); + + /* + * Write the ctl with the enable bit cleared, reset the + * counter, then enable counter. + */ + mpam_write_reg(dev, MSMON_CFG_CSU_CTL, clt); + wmb(); + + mpam_write_reg(dev, MSMON_CSU, 0); + wmb(); + + clt |= MSMON_CFG_CTL_EN; + mpam_write_reg(dev, MSMON_CFG_CSU_CTL, clt); + wmb(); + } + + return mpam_read_reg(dev, MSMON_CSU); +} + +static u32 mpam_device_read_mbwu_mon(struct mpam_device *dev, + struct sync_args *args) +{ + u16 mon; + u32 clt, flt, cur_clt, cur_flt; + + mon = args->mon; + + mpam_write_reg(dev, MSMON_CFG_MON_SEL, mon); + wmb(); /* subsequent writes must be applied to this mon */ + + /* + * We don't bother with capture as we don't expose a way of measuring + * multiple partid:pmg with a single capture. + */ + clt = MSMON_CFG_CTL_MATCH_PARTID | MSMON_CFG_MBWU_TYPE; + if (args->match_pmg) + clt |= MSMON_CFG_CTL_MATCH_PMG; + flt = args->closid.reqpartid | + (args->pmg << MSMON_CFG_MBWU_FLT_PMG_SHIFT); + + /* + * We read the existing configuration to avoid re-writing the same + * values. + */ + cur_flt = mpam_read_reg(dev, MSMON_CFG_MBWU_FLT); + cur_clt = mpam_read_reg(dev, MSMON_CFG_MBWU_CTL); + + if (cur_flt != flt || cur_clt != (clt | MSMON_CFG_CTL_EN)) { + mpam_write_reg(dev, MSMON_CFG_MBWU_FLT, flt); + + /* + * Write the ctl with the enable bit cleared, reset the + * counter, then enable counter. + */ + mpam_write_reg(dev, MSMON_CFG_MBWU_CTL, clt); + wmb(); + + mpam_write_reg(dev, MSMON_MBWU, 0); + wmb(); + + clt |= MSMON_CFG_CTL_EN; + mpam_write_reg(dev, MSMON_CFG_MBWU_CTL, clt); + wmb(); + } + + return mpam_read_reg(dev, MSMON_MBWU); +} + +static int mpam_device_frob_mon(struct mpam_device *dev, + struct mpam_device_sync *ctx) +{ + struct sync_args *args = ctx->args; + u32 val; + + lockdep_assert_held(&dev->lock); + + if (mpam_broken) + return -EIO; + + if (!args) + return -EINVAL; + + if (args->eventid == QOS_L3_OCCUP_EVENT_ID && + mpam_has_feature(mpam_feat_msmon_csu, dev->features)) + val = mpam_device_read_csu_mon(dev, args); + else if (args->eventid == QOS_L3_MBM_LOCAL_EVENT_ID && + mpam_has_feature(mpam_feat_msmon_mbwu, dev->features)) + val = mpam_device_read_mbwu_mon(dev, args); + else + return -EOPNOTSUPP; + + if (val & MSMON___NRDY) + return -EBUSY; + + val = val & MSMON___VALUE; + atomic64_add(val, &ctx->mon_value); + return 0; +} + +static void mpam_device_narrow_map(struct mpam_device *dev, u32 partid, + u32 intpartid) +{ + int cur_intpartid; + + lockdep_assert_held(&dev->lock); + + mpam_write_reg(dev, MPAMCFG_PART_SEL, partid); + wmb(); /* subsequent writes must be applied to our new partid */ + + cur_intpartid = mpam_read_reg(dev, MPAMCFG_INTPARTID); + /* write association, this need set 16 bit to 1 */ + intpartid = intpartid | MPAMCFG_INTPARTID_INTERNAL; + /* reqpartid has already been associated to this intpartid */ + if (cur_intpartid == intpartid) + return; + + mpam_write_reg(dev, MPAMCFG_INTPARTID, intpartid); +} + +/* + * partid should be narrowed to intpartid if this feature implemented, + * before writing to register MPAMCFG_PART_SEL should we check this. + */ +static int try_to_narrow_device_intpartid(struct mpam_device *dev, + u32 *partid, u32 intpartid) +{ + if (!mpam_has_part_sel(dev->features)) + return -EINVAL; + + if (mpam_has_feature(mpam_feat_part_nrw, dev->features)) { + mpam_device_narrow_map(dev, *partid, intpartid); + /* narrowing intpartid success, then set 16 bit to 1*/ + *partid = intpartid | MPAMCFG_PART_SEL_INTERNAL; + } + + return 0; +} + +static int +mpam_device_config(struct mpam_device *dev, struct sd_closid *closid, + struct mpam_config *cfg) +{ + u16 cmax = GENMASK(dev->cmax_wd, 0); + u32 pri_val = 0; + u16 intpri, dspri, max_intpri, max_dspri; + u32 mbw_pbm, mbw_max, mbw_min; + /* + * if dev supports narrowing, narrowing first and then apply this slave's + * configuration. + */ + u32 intpartid = closid->intpartid; + u32 partid = closid->reqpartid; + + lockdep_assert_held(&dev->lock); + + if (try_to_narrow_device_intpartid(dev, &partid, intpartid)) + return -EINVAL; + + mpam_write_reg(dev, MPAMCFG_PART_SEL, partid); + wmb(); /* subsequent writes must be applied to our new partid */ + + if (mpam_has_feature(mpam_feat_ccap_part, dev->features)) + mpam_write_reg(dev, MPAMCFG_CMAX, cmax); + + if (mpam_has_feature(mpam_feat_cpor_part, dev->features)) { + if (cfg && mpam_has_feature(mpam_feat_cpor_part, cfg->valid)) { + /* + * cpor_part being valid implies the bitmap fits in a + * single write. + */ + mpam_write_reg(dev, MPAMCFG_CPBM, cfg->cpbm); + } + } + + if (mpam_has_feature(mpam_feat_mbw_part, dev->features)) { + mbw_pbm = cfg->mbw_pbm; + if (cfg && mpam_has_feature(mpam_feat_mbw_part, cfg->valid)) { + if (!mpam_has_feature(mpam_feat_part_hdl, cfg->valid) || + (mpam_has_feature(mpam_feat_part_hdl, cfg->valid) && cfg->hdl)) + mbw_pbm = MBW_PROP_SET_HDL(cfg->mbw_pbm); + mpam_write_reg(dev, MPAMCFG_MBW_PBM, mbw_pbm); + } + } + + if (mpam_has_feature(mpam_feat_mbw_max, dev->features)) { + if (cfg && mpam_has_feature(mpam_feat_mbw_max, cfg->valid)) { + mbw_max = MBW_MAX_SET(cfg->mbw_max, dev->bwa_wd); + if (!mpam_has_feature(mpam_feat_part_hdl, cfg->valid) || + (mpam_has_feature(mpam_feat_part_hdl, cfg->valid) && cfg->hdl)) + mbw_max = MBW_MAX_SET_HDL(mbw_max); + mpam_write_reg(dev, MPAMCFG_MBW_MAX, mbw_max); + } + } + + if (mpam_has_feature(mpam_feat_mbw_min, dev->features)) { + if (cfg && mpam_has_feature(mpam_feat_mbw_min, cfg->valid)) { + mbw_min = MBW_MAX_SET(cfg->mbw_min, dev->bwa_wd); + mpam_write_reg(dev, MPAMCFG_MBW_MIN, mbw_min); + } + } + + if (mpam_has_feature(mpam_feat_intpri_part, dev->features) || + mpam_has_feature(mpam_feat_dspri_part, dev->features)) { + if (mpam_has_feature(mpam_feat_intpri_part, cfg->valid) && + mpam_has_feature(mpam_feat_intpri_part, dev->features)) { + max_intpri = GENMASK(dev->intpri_wd - 1, 0); + /* + * Each priority portion only occupys a bit, not only that + * we leave lowest priority, which may be not suitable when + * owning large dspri_wd or intpri_wd. + * dspri and intpri are from same input, so if one + * exceeds it's max width, set it to max priority. + */ + intpri = (cfg->intpri > max_intpri) ? max_intpri : cfg->intpri; + if (!mpam_has_feature(mpam_feat_intpri_part_0_low, + dev->features)) + intpri = GENMASK(dev->intpri_wd - 1, 0) & ~intpri; + pri_val |= intpri; + } + if (mpam_has_feature(mpam_feat_dspri_part, cfg->valid) && + mpam_has_feature(mpam_feat_dspri_part, dev->features)) { + max_dspri = GENMASK(dev->dspri_wd - 1, 0); + dspri = (cfg->dspri > max_dspri) ? max_dspri : cfg->dspri; + if (!mpam_has_feature(mpam_feat_dspri_part_0_low, + dev->features)) + dspri = GENMASK(dev->dspri_wd - 1, 0) & ~dspri; + pri_val |= (dspri << MPAMCFG_PRI_DSPRI_SHIFT); + } + + mpam_write_reg(dev, MPAMCFG_PRI, pri_val); + } + + /* + * complete the configuration before the cpu can + * use this partid + */ + mb(); + + return 0; +} + +static void mpam_component_device_sync(void *__ctx) +{ + int err = 0; + u32 reqpartid; + unsigned long flags; + struct mpam_device *dev; + struct mpam_device_sync *ctx = (struct mpam_device_sync *)__ctx; + struct mpam_component *comp = ctx->comp; + struct sync_args *args = ctx->args; + + list_for_each_entry(dev, &comp->devices, comp_list) { + if (cpumask_intersects(&dev->online_affinity, + &ctx->updated_on)) + continue; + + /* This device needs updating, can I reach it? */ + if (!cpumask_test_cpu(smp_processor_id(), + &dev->online_affinity)) + continue; + + /* Apply new configuration to this device */ + err = 0; + spin_lock_irqsave(&dev->lock, flags); + if (args) { + /* + * at this time reqpartid shows where the + * configuration was stored. + */ + reqpartid = args->closid.reqpartid; + if (ctx->config_mon) + err = mpam_device_frob_mon(dev, ctx); + else + err = mpam_device_config(dev, &args->closid, + &comp->cfg[reqpartid]); + } else { + mpam_reset_device(comp, dev); + } + spin_unlock_irqrestore(&dev->lock, flags); + if (err) + cmpxchg(&ctx->error, 0, err); + } + + cpumask_set_cpu(smp_processor_id(), &ctx->updated_on); +} + +/** + * in some cases/platforms the MSC register access is only possible with + * the associated CPUs. And need to check if those CPUS are online before + * accessing it. So we use those CPUs dev->online_affinity to apply config. + */ +static int do_device_sync(struct mpam_component *comp, + struct mpam_device_sync *sync_ctx) +{ + int cpu; + struct mpam_device *dev; + + lockdep_assert_cpus_held(); + + cpu = get_cpu(); + if (cpumask_test_cpu(cpu, &comp->fw_affinity)) + mpam_component_device_sync(sync_ctx); + put_cpu(); + + /* + * Find the set of other CPUs we need to run on to update + * this component + */ + list_for_each_entry(dev, &comp->devices, comp_list) { + if (sync_ctx->error) + break; + + if (cpumask_intersects(&dev->online_affinity, + &sync_ctx->updated_on)) + continue; + + /* + * This device needs the config applying, and hasn't been + * reachable by any cpu so far. + */ + cpu = cpumask_any(&dev->online_affinity); + smp_call_function_single(cpu, mpam_component_device_sync, + sync_ctx, 1); + } + + return sync_ctx->error; +} + +static inline void +mpam_device_sync_config_prepare(struct mpam_component *comp, + struct mpam_device_sync *sync_ctx, struct sync_args *args) +{ + sync_ctx->comp = comp; + sync_ctx->args = args; + sync_ctx->config_mon = false; + sync_ctx->error = 0; + cpumask_clear(&sync_ctx->updated_on); +} + +int mpam_component_config(struct mpam_component *comp, struct sync_args *args) +{ + struct mpam_device_sync sync_ctx; + + mpam_device_sync_config_prepare(comp, &sync_ctx, args); + + return do_device_sync(comp, &sync_ctx); +} + +/* + * Reset every component, configuring every partid unrestricted. + */ +void mpam_reset_devices(void) +{ + struct mpam_class *class; + struct mpam_component *comp; + + mutex_lock(&mpam_devices_lock); + list_for_each_entry(class, &mpam_classes, classes_list) { + list_for_each_entry(comp, &class->components, class_list) + mpam_component_config(comp, NULL); + } + mutex_unlock(&mpam_devices_lock); +} + +static inline void +mpam_device_sync_mon_prepare(struct mpam_component *comp, + struct mpam_device_sync *sync_ctx, struct sync_args *args) +{ + sync_ctx->comp = comp; + sync_ctx->args = args; + sync_ctx->error = 0; + sync_ctx->config_mon = true; + cpumask_clear(&sync_ctx->updated_on); + atomic64_set(&sync_ctx->mon_value, 0); +} + +int mpam_component_mon(struct mpam_component *comp, + struct sync_args *args, u64 *result) +{ + int ret; + struct mpam_device_sync sync_ctx; + + mpam_device_sync_mon_prepare(comp, &sync_ctx, args); + + ret = do_device_sync(comp, &sync_ctx); + if (!ret && result) + *result = atomic64_read(&sync_ctx.mon_value); + + return ret; +} + +static void mpam_component_read_mpamcfg(void *_ctx) +{ + unsigned long flags; + struct mpam_device *dev; + struct mpam_device_sync *ctx = (struct mpam_device_sync *)_ctx; + struct mpam_component *comp = ctx->comp; + struct sync_args *args = ctx->args; + u64 val = 0; + u32 partid, intpartid; + u32 dspri = 0; + u32 intpri = 0; + u64 range; + + if (!args) + return; + + + partid = args->closid.reqpartid; + intpartid = args->closid.intpartid; + + list_for_each_entry(dev, &comp->devices, comp_list) { + if (!cpumask_test_cpu(smp_processor_id(), + &dev->online_affinity)) + continue; + + spin_lock_irqsave(&dev->lock, flags); + if (try_to_narrow_device_intpartid(dev, &partid, intpartid)) { + spin_unlock_irqrestore(&dev->lock, flags); + return; + } + + mpam_write_reg(dev, MPAMCFG_PART_SEL, partid); + wmb(); + + switch (args->eventid) { + case QOS_CAT_CPBM_EVENT_ID: + if (!mpam_has_feature(mpam_feat_cpor_part, dev->features)) + break; + val = mpam_read_reg(dev, MPAMCFG_CPBM); + break; + case QOS_CAT_CMAX_EVENT_ID: + if (!mpam_has_feature(mpam_feat_ccap_part, dev->features)) + break; + val = mpam_read_reg(dev, MPAMCFG_CMAX); + break; + case QOS_MBA_MAX_EVENT_ID: + if (!mpam_has_feature(mpam_feat_mbw_max, dev->features)) + break; + val = mpam_read_reg(dev, MPAMCFG_MBW_MAX); + range = MBW_MAX_BWA_FRACT(dev->bwa_wd); + val = MBW_MAX_GET(val, dev->bwa_wd) * (MAX_MBA_BW - 1) / range; + break; + case QOS_MBA_MIN_EVENT_ID: + if (!mpam_has_feature(mpam_feat_mbw_min, dev->features)) + break; + val = mpam_read_reg(dev, MPAMCFG_MBW_MIN); + range = MBW_MAX_BWA_FRACT(dev->bwa_wd); + val = MBW_MAX_GET(val, dev->bwa_wd) * (MAX_MBA_BW - 1) / range; + break; + case QOS_MBA_PBM_EVENT_ID: + if (!mpam_has_feature(mpam_feat_mbw_part, dev->features)) + break; + val = mpam_read_reg(dev, MPAMCFG_MBW_PBM); + range = dev->mbw_pbm_bits; + val = val * MAX_MBA_BW / range; + break; + case QOS_MBA_HDL_EVENT_ID: + if (!mpam_has_feature(mpam_feat_mbw_max, dev->features)) + break; + val = mpam_read_reg(dev, MPAMCFG_MBW_MAX); + val = MBW_MAX_GET_HDL(val); + break; + case QOS_CAT_INTPRI_EVENT_ID: + case QOS_MBA_INTPRI_EVENT_ID: + if (!mpam_has_feature(mpam_feat_intpri_part, dev->features)) + break; + val = mpam_read_reg(dev, MPAMCFG_PRI); + intpri = MPAMCFG_INTPRI_GET(val); + if (!mpam_has_feature(mpam_feat_intpri_part_0_low, dev->features)) + intpri = GENMASK(dev->intpri_wd - 1, 0) & ~intpri; + val = intpri; + break; + case QOS_CAT_DSPRI_EVENT_ID: + case QOS_MBA_DSPRI_EVENT_ID: + if (!mpam_has_feature(mpam_feat_dspri_part, dev->features)) + break; + val = mpam_read_reg(dev, MPAMCFG_PRI); + dspri = MPAMCFG_DSPRI_GET(val); + if (!mpam_has_feature(mpam_feat_dspri_part_0_low, dev->features)) + dspri = GENMASK(dev->dspri_wd - 1, 0) & ~dspri; + val = dspri; + break; + default: + break; + } + + atomic64_add(val, &ctx->cfg_value); + spin_unlock_irqrestore(&dev->lock, flags); + + break; + } +} + +/* + * reading first device of the this component is enough + * for getting configuration. + */ +static void +mpam_component_get_config_local(struct mpam_component *comp, + struct sync_args *args, u32 *result) +{ + int cpu; + struct mpam_device *dev; + struct mpam_device_sync sync_ctx; + + sync_ctx.args = args; + sync_ctx.comp = comp; + atomic64_set(&sync_ctx.cfg_value, 0); + + dev = list_first_entry_or_null(&comp->devices, + struct mpam_device, comp_list); + if (WARN_ON(!dev)) + return; + + cpu = cpumask_any(&dev->online_affinity); + smp_call_function_single(cpu, mpam_component_read_mpamcfg, &sync_ctx, 1); + + if (result) + *result = atomic64_read(&sync_ctx.cfg_value); +} + +void mpam_component_get_config(struct mpam_component *comp, + struct sync_args *args, u32 *result) +{ + mpam_component_get_config_local(comp, args, result); +} + +#define ARM_MPAM_PDEV_NAME "arm-mpam" + +static const struct of_device_id arm_mpam_of_device_ids[] = { + {.compatible = "arm,mpam"}, + { } +}; + +static int of_mpam_parse_irq(struct device_node *node, + struct mpam_device *dev) +{ + u32 overflow_interrupt, overflow_flags; + u32 error_interrupt, error_interrupt_flags; + + of_property_read_u32(node, "overflow-interrupt", &overflow_interrupt); + of_property_read_u32(node, "overflow-flags", &overflow_flags); + of_property_read_u32(node, "error-interrupt", &error_interrupt); + of_property_read_u32(node, "error-interrupt-flags", + &error_interrupt_flags); + + return mpam_register_device_irq(dev, + overflow_interrupt, overflow_flags, + error_interrupt, error_interrupt_flags); +} + +static int of_mpam_parse_cache(struct platform_device *pdev, + struct device_node *node) +{ + struct mpam_device *dev; + int cache_level, cache_id; + u64 reg_value[2]; + + if (of_property_read_u32(node, "cache-level", &cache_level)) { + dev_err(&pdev->dev, "missing cache level property\n"); + return -EINVAL; + } + + if (of_property_read_u32(node, "cache-id", &cache_id)) { + dev_err(&pdev->dev, "missing cache id property\n"); + return -EINVAL; + } + + /* Base address */ + if (of_property_read_u64_array(node, "reg", reg_value, 2)) { + dev_err(&pdev->dev, "missing io resource property\n"); + return -EINVAL; + } + + dev = mpam_device_create_cache(cache_level, cache_id, NULL, + reg_value[0]); + if (IS_ERR(dev)) { + dev_err(&pdev->dev, "Failed to create cache node\n"); + return -EINVAL; + } + + return of_mpam_parse_irq(node, dev); +} + +static int of_mpam_parse_memory(struct platform_device *pdev, + struct device_node *node) +{ + struct mpam_device *dev; + int numa_id; + u64 reg_value[2]; + + if (of_property_read_u32(node, "numa-node-id", &numa_id)) { + dev_err(&pdev->dev, "missing numa node id property\n"); + return -EINVAL; + } + + /* Base address */ + if (of_property_read_u64_array(node, "reg", reg_value, 2)) { + dev_err(&pdev->dev, "missing io resource property\n"); + return -EINVAL; + } + + dev = mpam_device_create_memory(numa_id, reg_value[0]); + if (IS_ERR(dev)) { + dev_err(&pdev->dev, "Failed to create memory node\n"); + return -EINVAL; + } + + return of_mpam_parse_irq(node, dev); +} + +static int of_mpam_add_child(struct platform_device *pdev, + struct device_node *node) +{ + enum mpam_class_types type; + + if (of_property_read_u32(node, "type", &type)) { + dev_err(&pdev->dev, "missing type property\n"); + return -EINVAL; + } + + switch (type) { + case MPAM_CLASS_CACHE: + return of_mpam_parse_cache(pdev, node); + case MPAM_CLASS_MEMORY: + return of_mpam_parse_memory(pdev, node); + default: + pr_warn_once("Unknown node type %u.\n", type); + return -EINVAL; + /* fall through */ + case MPAM_CLASS_SMMU: + /* not yet supported */ + /* fall through */ + case MPAM_CLASS_UNKNOWN: + break; + } + + return 0; +} + +static int arm_mpam_device_probe(struct platform_device *pdev) +{ + int ret; + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct device_node *child = NULL; + + if (!cpus_have_const_cap(ARM64_HAS_MPAM)) + return 0; + + if (!acpi_disabled || mpam_enabled != MPAM_ENABLE_OF) + return 0; + + if (!node || !of_match_node(arm_mpam_of_device_ids, pdev->dev.of_node)) + return -EINVAL; + + ret = mpam_discovery_start(); + if (ret) + return ret; + + for_each_available_child_of_node(node, child) { + ret = of_mpam_add_child(pdev, child); + if (ret) + break; + } + + if (ret) { + mpam_discovery_failed(); + } else { + ret = mpam_discovery_complete(); + if (!ret) + pr_info("Successfully init mpam by DT.\n"); + } + + return ret; +} + +static struct platform_driver arm_mpam_driver = { + .driver = { + .name = ARM_MPAM_PDEV_NAME, + .of_match_table = arm_mpam_of_device_ids, + }, + .probe = arm_mpam_device_probe, +}; + +static int __init arm_mpam_driver_init(void) +{ + if (acpi_disabled) + return platform_driver_register(&arm_mpam_driver); + else + return acpi_mpam_parse(); +} + +/* + * We want to run after cacheinfo_sysfs_init() has caused the cacheinfo + * structures to be populated. That runs as a device_initcall. + */ +device_initcall_sync(arm_mpam_driver_init); diff --git a/arch/arm64/kernel/mpam/mpam_device.h b/arch/arm64/kernel/mpam/mpam_device.h new file mode 100644 index 0000000000000000000000000000000000000000..f3ebd3f8b23d0b74ff6c21bf6f96ba4dc74280b1 --- /dev/null +++ b/arch/arm64/kernel/mpam/mpam_device.h @@ -0,0 +1,140 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_ARM64_MPAM_DEVICE_H +#define _ASM_ARM64_MPAM_DEVICE_H + +#include +#include +#include +#include + +struct mpam_config; + +/* + * Size of the memory mapped registers: 4K of feature page + * then 2x 4K bitmap registers + */ +#define SZ_MPAM_DEVICE (3 * SZ_4K) + +/* + * An mpam_device corresponds to an MSC, an interface to a component's cache + * or bandwidth controls. It is associated with a set of CPUs, and a component. + * For resctrl the component is expected to be a well-known cache (e.g. L2). + * We may have multiple interfaces per component, each for a set of CPUs that + * share the same component. + */ +struct mpam_device { + /* member of mpam_component:devices */ + struct list_head comp_list; + struct mpam_component *comp; + + /* member of mpam_all_devices */ + struct list_head glbl_list; + + /* The affinity learn't from firmware */ + struct cpumask fw_affinity; + /* of which these cpus are online */ + struct cpumask online_affinity; + + spinlock_t lock; + bool probed; + + phys_addr_t hwpage_address; + void __iomem *mapped_hwpage; + + u32 features; + + u16 cmax_wd; + u16 cpbm_wd; + u16 mbw_pbm_bits; + u16 bwa_wd; + u16 intpri_wd; + u16 dspri_wd; + u16 num_partid; + u16 num_intpartid; + u16 num_pmg; + u16 num_csu_mon; + u16 num_mbwu_mon; + + /* for reset device MPAMCFG_PRI */ + u16 hwdef_intpri; + u16 hwdef_dspri; + + bool enable_error_irq; + u32 error_irq; + u32 error_irq_flags; + u32 overflow_irq; + u32 overflow_irq_flags; +}; + +/* + * A set of devices that share the same component. e.g. the MSCs that + * make up the L2 cache. This may be 1:1. Exposed to user-space as a domain by + * resctrl when the component is a well-known cache. + */ +struct mpam_component { + u32 comp_id; + + /* mpam_devices in this domain */ + struct list_head devices; + + struct cpumask fw_affinity; + + struct mpam_config *cfg; + + /* member of mpam_class:components */ + struct list_head class_list; +}; + +/* + * All the components of the same type at a particular level, + * e.g. all the L2 cache components. Exposed to user-space as a resource + * by resctrl when the component is a well-known cache. We may have additional + * classes such as system-caches, or internal components that are not exposed. + */ +struct mpam_class { + /* + * resctrl expects to see an empty domain list if all 'those' CPUs are + * offline. As we can't discover the cpu affinity of 'unknown' MSCs, we + * need a second list. + * mpam_components in this class. + */ + struct list_head components; + + struct cpumask fw_affinity; + + u8 level; + enum mpam_class_types type; + + /* Once enabled, the common features */ + u32 features; + + struct mutex lock; + + /* member of mpam_classes */ + struct list_head classes_list; + + u16 cmax_wd; + u16 cpbm_wd; + u16 mbw_pbm_bits; + u16 bwa_wd; + u16 intpri_wd; + u16 dspri_wd; + u16 num_partid; + u16 num_intpartid; + u16 num_pmg; + u16 num_csu_mon; + u16 num_mbwu_mon; + + /* for reset class MPAMCFG_PRI */ + u16 hwdef_intpri; + u16 hwdef_dspri; +}; + +/* System wide properties */ +struct mpam_sysprops_prop { + u32 mpam_llc_size; + u16 max_partid; + u16 max_pmg; +}; + +#endif /* _ASM_ARM64_MPAM_DEVICE_H */ diff --git a/arch/arm64/kernel/mpam/mpam_internal.h b/arch/arm64/kernel/mpam/mpam_internal.h new file mode 100644 index 0000000000000000000000000000000000000000..7b84ea54975aa59a4c92eae8f6c935f0225799b6 --- /dev/null +++ b/arch/arm64/kernel/mpam/mpam_internal.h @@ -0,0 +1,345 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_ARM64_MPAM_INTERNAL_H +#define _ASM_ARM64_MPAM_INTERNAL_H + +#include + +typedef u32 mpam_features_t; + +struct mpam_component; +struct rdt_domain; +struct mpam_class; +struct raw_resctrl_resource; +struct resctrl_resource; +/* MPAM register */ +#define SYS_MPAM0_EL1 sys_reg(3, 0, 10, 5, 1) +#define SYS_MPAM1_EL1 sys_reg(3, 0, 10, 5, 0) +#define SYS_MPAM2_EL2 sys_reg(3, 4, 10, 5, 0) +#define SYS_MPAM3_EL3 sys_reg(3, 6, 10, 5, 0) +#define SYS_MPAM1_EL12 sys_reg(3, 5, 10, 5, 0) +#define SYS_MPAMHCR_EL2 sys_reg(3, 4, 10, 4, 0) +#define SYS_MPAMVPMV_EL2 sys_reg(3, 4, 10, 4, 1) +#define SYS_MPAMVPMn_EL2(n) sys_reg(3, 4, 10, 6, n) +#define SYS_MPAMIDR_EL1 sys_reg(3, 0, 10, 4, 4) + +#define MPAM_MASK(n) ((1UL << n) - 1) +/* plan to use GENMASK(n, 0) instead */ + +/* + * MPAMx_ELn: + * 15:0 PARTID_I + * 31:16 PARTID_D + * 39:32 PMG_I + * 47:40 PMG_D + * 48 TRAPMPAM1EL1 + * 49 TRAPMPAM0EL1 + * 61:49 Reserved + * 62 TRAPLOWER + * 63 MPAMEN + */ +#define PARTID_BITS (16) +#define PMG_BITS (8) +#define PARTID_MASK MPAM_MASK(PARTID_BITS) +#define PMG_MASK MPAM_MASK(PMG_BITS) + +#define PARTID_I_SHIFT (0) +#define PARTID_D_SHIFT (PARTID_I_SHIFT + PARTID_BITS) +#define PMG_I_SHIFT (PARTID_D_SHIFT + PARTID_BITS) +#define PMG_D_SHIFT (PMG_I_SHIFT + PMG_BITS) + +#define PARTID_I_MASK (PARTID_MASK << PARTID_I_SHIFT) +#define PARTID_D_MASK (PARTID_MASK << PARTID_D_SHIFT) +#define PARTID_I_CLR(r) ((r) & ~PARTID_I_MASK) +#define PARTID_D_CLR(r) ((r) & ~PARTID_D_MASK) +#define PARTID_CLR(r) (PARTID_I_CLR(r) & PARTID_D_CLR(r)) + +#define PARTID_I_SET(r, id) (PARTID_I_CLR(r) | ((id) << PARTID_I_SHIFT)) +#define PARTID_D_SET(r, id) (PARTID_D_CLR(r) | ((id) << PARTID_D_SHIFT)) +#define PARTID_SET(r, id) (PARTID_CLR(r) | ((id) << PARTID_I_SHIFT) | ((id) << PARTID_D_SHIFT)) + +#define PMG_I_MASK (PMG_MASK << PMG_I_SHIFT) +#define PMG_D_MASK (PMG_MASK << PMG_D_SHIFT) +#define PMG_I_CLR(r) ((r) & ~PMG_I_MASK) +#define PMG_D_CLR(r) ((r) & ~PMG_D_MASK) +#define PMG_CLR(r) (PMG_I_CLR(r) & PMG_D_CLR(r)) + +#define PMG_I_SET(r, id) (PMG_I_CLR(r) | ((id) << PMG_I_SHIFT)) +#define PMG_D_SET(r, id) (PMG_D_CLR(r) | ((id) << PMG_D_SHIFT)) +#define PMG_SET(r, id) (PMG_CLR(r) | ((id) << PMG_I_SHIFT) | ((id) << PMG_D_SHIFT)) + +#define TRAPMPAM1EL1_SHIFT (PMG_D_SHIFT + PMG_BITS) +#define TRAPMPAM0EL1_SHIFT (TRAPMPAM1EL1_SHIFT + 1) +#define TRAPLOWER_SHIFT (TRAPMPAM0EL1_SHIFT + 13) +#define MPAMEN_SHIFT (TRAPLOWER_SHIFT + 1) + +/* + * MPAMHCR_EL2: + * 0 EL0_VPMEN + * 1 EL1_VPMEN + * 7:2 Reserved + * 8 GSTAPP_PLK + * 30:9 Reserved + * 31 TRAP_MPAMIDR_EL1 + * 63:32 Reserved + */ +#define EL0_VPMEN_SHIFT (0) +#define EL1_VPMEN_SHIFT (EL0_VPMEN_SHIFT + 1) +#define GSTAPP_PLK_SHIFT (8) +#define TRAP_MPAMIDR_EL1_SHIFT (31) + +/* + * MPAMIDR_EL1: + * 15:0 PARTID_MAX + * 16 Reserved + * 17 HAS_HCR + * 20:18 VPMR_MAX + * 31:21 Reserved + * 39:32 PMG_MAX + * 63:40 Reserved + */ +#define VPMR_MAX_BITS (3) +#define PARTID_MAX_SHIFT (0) +#define PARTID_MAX_MASK (MPAM_MASK(PARTID_BITS) << PARTID_MAX_SHIFT) +#define HAS_HCR_SHIFT (PARTID_MAX_SHIFT + PARTID_BITS + 1) +#define VPMR_MAX_SHIFT (HAS_HCR_SHIFT + 1) +#define PMG_MAX_SHIFT (VPMR_MAX_SHIFT + VPMR_MAX_BITS + 11) +#define PMG_MAX_MASK (MPAM_MASK(PMG_BITS) << PMG_MAX_SHIFT) +#define VPMR_MASK MPAM_MASK(VPMR_MAX_BITS) + +/* + * MPAMVPMV_EL2: + * 31:0 VPM_V + * 63:32 Reserved + */ +#define VPM_V_BITS 32 + +DECLARE_STATIC_KEY_FALSE(resctrl_enable_key); +DECLARE_STATIC_KEY_FALSE(resctrl_mon_enable_key); + +extern int max_name_width, max_data_width; + +#define RESCTRL_SHOW_DOM_MAX_NUM 8 + +#define mpam_read_sysreg_s(reg, name) read_sysreg_s(reg) +#define mpam_write_sysreg_s(v, r, n) write_sysreg_s(v, r) +#define mpam_readl(addr) readl(addr) +#define mpam_writel(v, addr) writel(v, addr) + +/* 64bit arm64 specified */ +union mon_data_bits { + void *priv; + struct { + u8 rid; + u8 domid; + u8 partid; + u8 pmg; + u8 mon; + u8 cdp_both_mon; + } u; +}; + +ssize_t resctrl_group_schemata_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off); + +int resctrl_group_schemata_show(struct kernfs_open_file *of, + struct seq_file *s, void *v); + +struct rdt_domain *mpam_find_domain(struct resctrl_resource *r, int id, + struct list_head **pos); + +extern bool rdt_alloc_capable; +extern bool rdt_mon_capable; + +extern struct list_head mpam_classes; + +#define MAX_MBA_BW 100u +#define GRAN_MBA_BW 2u + +#define MPAM_ERRCODE_NONE 0 +#define MPAM_ERRCODE_PARTID_SEL_RANGE 1 +#define MPAM_ERRCODE_REQ_PARTID_RANGE 2 +#define MPAM_ERRCODE_MSMONCFG_ID_RANGE 3 +#define MPAM_ERRCODE_REQ_PMG_RANGE 4 +#define MPAM_ERRCODE_MONITOR_RANGE 5 +#define MPAM_ERRCODE_INTPARTID_RANGE 6 +#define MPAM_ERRCODE_UNEXPECTED_INTERNAL 7 +#define _MPAM_NUM_ERRCODE 8 + +struct mpam_resctrl_dom { + struct mpam_component *comp; + + struct rdt_domain resctrl_dom; +}; + +struct mpam_resctrl_res { + struct mpam_class *class; + + bool resctrl_mba_uses_mbw_part; + + struct resctrl_resource resctrl_res; +}; + +struct sync_args { + u8 domid; + u8 pmg; + struct sd_closid closid; + u32 mon; + bool match_pmg; + enum rdt_event_id eventid; +}; + +struct mpam_device_sync { + struct mpam_component *comp; + + struct sync_args *args; + + bool config_mon; + atomic64_t mon_value; + + struct cpumask updated_on; + + atomic64_t cfg_value; + int error; +}; + +#define for_each_resctrl_exports(r) \ + for (r = &mpam_resctrl_exports[0]; \ + r < &mpam_resctrl_exports[0] + \ + ARRAY_SIZE(mpam_resctrl_exports); r++) + +#define for_each_supported_resctrl_exports(r) \ + for_each_resctrl_exports(r) \ + if (r->class) + +/* + * MPAM component config Structure + */ +struct mpam_config { + + /* + * The biggest config we could pass around is 4K, but resctrl's max + * cbm is u32, so we only need the full-size config during reset. + * Just in case a cache with a >u32 bitmap is exported for another + * reason, we need to track which bits of the configuration are valid. + */ + mpam_features_t valid; + + u32 cpbm; + u32 cmax; + u32 mbw_pbm; + u16 mbw_max; + u16 mbw_min; + + /* + * dspri is downstream priority, intpri is internal priority. + */ + u16 dspri; + u16 intpri; + + /* + * hardlimit or not + */ + bool hdl; +}; + +/* Bits for mpam_features_t */ +enum mpam_device_features { + mpam_feat_ccap_part = 0, + mpam_feat_cpor_part, + mpam_feat_mbw_part, + mpam_feat_mbw_min, + mpam_feat_mbw_max, + mpam_feat_mbw_prop, + mpam_feat_intpri_part, + mpam_feat_intpri_part_0_low, + mpam_feat_dspri_part, + mpam_feat_dspri_part_0_low, + mpam_feat_msmon, + mpam_feat_msmon_csu, + mpam_feat_msmon_csu_capture, + mpam_feat_msmon_mbwu, + mpam_feat_msmon_mbwu_capture, + mpam_feat_msmon_capt, + mpam_feat_part_nrw, + /* this feature always enabled */ + mpam_feat_part_hdl, + MPAM_FEATURE_LAST, +}; + +static inline bool mpam_has_feature(enum mpam_device_features feat, + mpam_features_t supported) +{ + return (1< + * Xie XiuQi + * + * Code was partially borrowed from arch/x86/kernel/cpu/intel_rdt*. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * More information about MPAM be found in the Arm Architecture Reference + * Manual. + * + * https://static.docs.arm.com/ddi0598/a/DDI0598_MPAM_supp_armv8a.pdf + */ + +#include +#include + +#include "mpam_internal.h" + +/* + * Global boolean for rdt_monitor which is true if any + * resource monitoring is enabled. + */ +bool rdt_mon_capable; + +struct rmid_entry { + u32 rmid; + u32 mon[RDT_NUM_RESOURCES]; + struct list_head mon_exclusive_q; + struct list_head mon_wait_q; +}; + +/** + * @rmid_mon_exclusive_all List of allocated RMIDs with + * exclusive available mon. + */ +static LIST_HEAD(rmid_mon_exclusive_all); + +/** + * @rmid_mon_wait_all List of allocated RMIDs with default + * 0 mon and wait for exclusive available mon. + */ +static LIST_HEAD(rmid_mon_wait_all); + +static u32 rmid_ptrs_len; + +/** + * @rmid_entry - The entry in the mon list. + */ +static struct rmid_entry *rmid_ptrs; + +static int mon_free_map[RDT_NUM_RESOURCES]; + +static void mon_init(void) +{ + u16 mon_num; + u32 times, flag; + struct mpam_resctrl_res *res; + struct resctrl_resource *r; + struct raw_resctrl_resource *rr; + + for_each_supported_resctrl_exports(res) { + r = &res->resctrl_res; + rr = r->res; + + hw_alloc_times_validate(times, flag); + /* for cdp*/ + mon_num = rounddown(rr->num_mon, times); + mon_free_map[r->rid] = BIT_MASK(mon_num) - 1; + + /* mon = 0 is reserved */ + mon_free_map[r->rid] &= ~(BIT_MASK(times) - 1); + } +} + +static u32 mon_alloc(enum resctrl_resource_level rid) +{ + u32 mon = 0; + u32 times, flag; + + hw_alloc_times_validate(times, flag); + + mon = ffs(mon_free_map[rid]); + if (mon == 0) + return -ENOSPC; + + mon--; + mon_free_map[rid] &= ~(GENMASK(mon + times - 1, mon)); + + return mon; +} + +static void mon_free(u32 mon, enum resctrl_resource_level rid) +{ + u32 times, flag; + + hw_alloc_times_validate(times, flag); + mon_free_map[rid] |= GENMASK(mon + times - 1, mon); +} + +static inline struct rmid_entry *__rmid_entry(u32 rmid) +{ + struct rmid_entry *entry; + + if (rmid >= rmid_ptrs_len) + return NULL; + + entry = &rmid_ptrs[rmid]; + WARN_ON(entry->rmid != rmid); + + return entry; +} + +static void mon_wait_q_init(void) +{ + INIT_LIST_HEAD(&rmid_mon_wait_all); +} + +static void mon_exclusive_q_init(void) +{ + INIT_LIST_HEAD(&rmid_mon_exclusive_all); +} + +static void put_mon_wait_q(struct rmid_entry *entry) +{ + list_add_tail(&entry->mon_wait_q, &rmid_mon_wait_all); +} + +static void put_mon_exclusive_q(struct rmid_entry *entry) +{ + list_add_tail(&entry->mon_exclusive_q, &rmid_mon_exclusive_all); +} + +static void mon_wait_q_del(struct rmid_entry *entry) +{ + list_del(&entry->mon_wait_q); +} + +static void mon_exclusive_q_del(struct rmid_entry *entry) +{ + list_del(&entry->mon_exclusive_q); +} + +static int is_mon_wait_q_exist(u32 rmid) +{ + struct rmid_entry *entry; + + list_for_each_entry(entry, &rmid_mon_wait_all, mon_wait_q) { + if (entry->rmid == rmid) + return 1; + } + + return 0; +} + +static int is_mon_exclusive_q_exist(u32 rmid) +{ + struct rmid_entry *entry; + + list_for_each_entry(entry, &rmid_mon_exclusive_all, mon_exclusive_q) { + if (entry->rmid == rmid) + return 1; + } + + return 0; +} + +static int is_rmid_mon_wait_q_exist(u32 rmid) +{ + struct rmid_entry *entry; + + list_for_each_entry(entry, &rmid_mon_wait_all, mon_wait_q) { + if (entry->rmid == rmid) + return 1; + } + + return 0; +} + +int rmid_mon_ptrs_init(u32 nr_rmids) +{ + struct rmid_entry *entry = NULL; + int i; + + if (rmid_ptrs) + kfree(rmid_ptrs); + + rmid_ptrs = kcalloc(nr_rmids, sizeof(struct rmid_entry), GFP_KERNEL); + if (!rmid_ptrs) + return -ENOMEM; + + rmid_ptrs_len = nr_rmids; + + for (i = 0; i < nr_rmids; i++) { + entry = &rmid_ptrs[i]; + entry->rmid = i; + } + + mon_exclusive_q_init(); + mon_wait_q_init(); + + /* + * RMID 0 is special and is always allocated. It's used for all + * tasks monitoring. + */ + entry = __rmid_entry(0); + if (!entry) { + kfree(rmid_ptrs); + rmid_ptrs = NULL; + return -EINVAL; + } + + put_mon_exclusive_q(entry); + + mon_init(); + + return 0; +} + +int assoc_rmid_with_mon(u32 rmid) +{ + int mon; + bool has_mon_wait = false; + struct rmid_entry *entry; + struct mpam_resctrl_res *res; + struct resctrl_resource *r; + + if (is_mon_exclusive_q_exist(rmid) || + is_rmid_mon_wait_q_exist(rmid)) + return -EINVAL; + + entry = __rmid_entry(rmid); + if (!entry) + return -EINVAL; + + for_each_supported_resctrl_exports(res) { + r = &res->resctrl_res; + if (!r->mon_enabled) + continue; + + mon = mon_alloc(r->rid); + if (mon < 0) { + entry->mon[r->rid] = 0; + has_mon_wait = true; + } else { + entry->mon[r->rid] = mon; + } + } + + if (has_mon_wait) + put_mon_wait_q(entry); + else + put_mon_exclusive_q(entry); + + return 0; +} + +void deassoc_rmid_with_mon(u32 rmid) +{ + bool has_mon_wait; + struct mpam_resctrl_res *res; + struct resctrl_resource *r; + struct rmid_entry *entry = __rmid_entry(rmid); + struct rmid_entry *wait, *tmp; + + if (!entry) + return; + + if (!is_mon_wait_q_exist(rmid) && + !is_mon_exclusive_q_exist(rmid)) + return; + + if (is_mon_wait_q_exist(rmid)) + mon_wait_q_del(entry); + else + mon_exclusive_q_del(entry); + + list_for_each_entry_safe(wait, tmp, &rmid_mon_wait_all, mon_wait_q) { + has_mon_wait = false; + for_each_supported_resctrl_exports(res) { + r = &res->resctrl_res; + if (!r->mon_enabled) + continue; + + if (!wait->mon[r->rid]) { + wait->mon[r->rid] = entry->mon[r->rid]; + entry->mon[r->rid] = 0; + } + + if (!wait->mon[r->rid]) + has_mon_wait = true; + } + if (!has_mon_wait) { + mon_wait_q_del(wait); + put_mon_exclusive_q(wait); + } + } + + for_each_supported_resctrl_exports(res) { + r = &res->resctrl_res; + if (!r->mon_enabled) + continue; + + if (entry->mon[r->rid]) + mon_free(entry->mon[r->rid], r->rid); + } +} + +u32 get_rmid_mon(u32 rmid, enum resctrl_resource_level rid) +{ + struct rmid_entry *entry = __rmid_entry(rmid); + + if (!entry) + return 0; + + if (!is_mon_wait_q_exist(rmid) && !is_mon_exclusive_q_exist(rmid)) + return 0; + + return entry->mon[rid]; +} diff --git a/arch/arm64/kernel/mpam/mpam_resctrl.c b/arch/arm64/kernel/mpam/mpam_resctrl.c new file mode 100644 index 0000000000000000000000000000000000000000..c5316b276d05cb554a33b051a0b1295601107acb --- /dev/null +++ b/arch/arm64/kernel/mpam/mpam_resctrl.c @@ -0,0 +1,2460 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Common code for ARM v8 MPAM + * + * Copyright (C) 2016 Intel Corporation + * Copyright (C) 2018-2019 Huawei Technologies Co., Ltd + * + * Authors: + * Fenghua Yu + * Tony Luck + * Vikas Shivappa + * Xie XiuQi + * + * Code was partially borrowed from arch/x86/kernel/cpu/intel_rdt*. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * More information about MPAM be found in the Arm Architecture Reference + * Manual. + * + * https://static.docs.arm.com/ddi0598/a/DDI0598_MPAM_supp_armv8a.pdf + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mpam_device.h" +#include "mpam_resource.h" +#include "mpam_internal.h" + +/* Mutex to protect rdtgroup access. */ +DEFINE_MUTEX(resctrl_group_mutex); + +/* + * The cached intel_pqr_state is strictly per CPU and can never be + * updated from a remote CPU. Functions which modify the state + * are called with interrupts disabled and no preemption, which + * is sufficient for the protection. + */ +DEFINE_PER_CPU(struct intel_pqr_state, pqr_state); + +/* + * Used to store the max resource name width and max resource data width + * to display the schemata in a tabular format + */ +int max_name_width, max_data_width; + +/* + * Global boolean for rdt_alloc which is true if any + * resource allocation is enabled. + */ +bool rdt_alloc_capable; + +/* + * Indicate if had mount cdpl2/cdpl3 option. + */ +static bool resctrl_cdp_enabled; + +/* + * Hi1620 2P Base Address Map + * + * AFF2 | NODE | DIE | Base Address + * ------------------------------------ + * 01 | 0 | P0 TB | 0x000098xxxxxx + * 03 | 1 | P0 TA | 0x000090xxxxxx + * 05 | 2 | P1 TB | 0x200098xxxxxx + * 07 | 3 | P2 TA | 0x200090xxxxxx + * + * AFF2: MPIDR.AFF2 + */ + +int mpam_resctrl_set_default_cpu(unsigned int cpu) +{ + /* The cpu is set in default rdtgroup after online. */ + cpumask_set_cpu(cpu, &resctrl_group_default.cpu_mask); + + /* Update CPU mpam sysregs' default setting when cdp enabled */ + if (resctrl_cdp_enabled) + resctrl_cdp_update_cpus_state(&resctrl_group_default); + + return 0; +} + +void mpam_resctrl_clear_default_cpu(unsigned int cpu) +{ + struct resctrl_group *rdtgrp; + + list_for_each_entry(rdtgrp, &resctrl_all_groups, resctrl_group_list) { + /* The cpu is clear in associated rdtgroup after offline. */ + cpumask_clear_cpu(cpu, &rdtgrp->cpu_mask); + } +} + +bool is_resctrl_cdp_enabled(void) +{ + return !!resctrl_cdp_enabled; +} + +static void +mpam_resctrl_update_component_cfg(struct resctrl_resource *r, + struct rdt_domain *d, struct sd_closid *closid); + +static void +common_wrmsr(struct resctrl_resource *r, struct rdt_domain *d, + struct msr_param *para); + +static u64 cache_rdmsr(struct resctrl_resource *r, struct rdt_domain *d, + struct msr_param *para); +static u64 mbw_rdmsr(struct resctrl_resource *r, struct rdt_domain *d, + struct msr_param *para); + +static u64 cache_rdmon(struct rdt_domain *d, void *md_priv); +static u64 mbw_rdmon(struct rdt_domain *d, void *md_priv); + +static int common_wrmon(struct rdt_domain *d, void *md_priv); + +static int parse_cache(char *buf, struct resctrl_resource *r, + struct resctrl_staged_config *cfg, enum resctrl_ctrl_type ctrl_type); +static int parse_bw(char *buf, struct resctrl_resource *r, + struct resctrl_staged_config *cfg, enum resctrl_ctrl_type ctrl_type); + +struct raw_resctrl_resource raw_resctrl_resources_all[] = { + [RDT_RESOURCE_L3] = { + .msr_update = common_wrmsr, + .msr_read = cache_rdmsr, + .parse_ctrlval = parse_cache, + .format_str = "%d=%0*x", + .mon_read = cache_rdmon, + .mon_write = common_wrmon, + .fflags = RFTYPE_RES_CACHE, + .ctrl_features = { + [SCHEMA_COMM] = { + .type = SCHEMA_COMM, + .flags = SCHEMA_COMM, + .name = "comm", + .base = 16, + .evt = QOS_CAT_CPBM_EVENT_ID, + .capable = 1, + .ctrl_suffix = "", + }, + [SCHEMA_PRI] = { + .type = SCHEMA_PRI, + .flags = SCHEMA_PRI, + .name = "caPrio", + .base = 10, + .evt = QOS_CAT_INTPRI_EVENT_ID, + .ctrl_suffix = "PRI", + }, + [SCHEMA_PBM] = { + .type = SCHEMA_PBM, + .flags = SCHEMA_COMM, + .name = "caPbm", + .base = 16, + .evt = QOS_CAT_CPBM_EVENT_ID, + .ctrl_suffix = "PBM", + }, + [SCHEMA_MAX] = { + .type = SCHEMA_MAX, + .flags = SCHEMA_COMM, + .name = "caMax", + .base = 10, + .evt = QOS_CAT_CMAX_EVENT_ID, + .ctrl_suffix = "MAX", + }, + }, + }, + [RDT_RESOURCE_L2] = { + .msr_update = common_wrmsr, + .msr_read = cache_rdmsr, + .parse_ctrlval = parse_cache, + .format_str = "%d=%0*x", + .mon_read = cache_rdmon, + .mon_write = common_wrmon, + .fflags = RFTYPE_RES_CACHE, + .ctrl_features = { + [SCHEMA_COMM] = { + .type = SCHEMA_COMM, + .flags = SCHEMA_COMM, + .name = "comm", + .base = 16, + .evt = QOS_CAT_CPBM_EVENT_ID, + .capable = 1, + .ctrl_suffix = "", + }, + [SCHEMA_PRI] = { + .type = SCHEMA_PRI, + .flags = SCHEMA_PRI, + .name = "caPrio", + .base = 10, + .evt = QOS_CAT_INTPRI_EVENT_ID, + .ctrl_suffix = "PRI", + }, + [SCHEMA_PBM] = { + .type = SCHEMA_PBM, + .flags = SCHEMA_COMM, + .name = "caPbm", + .base = 16, + .evt = QOS_CAT_CPBM_EVENT_ID, + .ctrl_suffix = "PBM", + }, + [SCHEMA_MAX] = { + .type = SCHEMA_MAX, + .flags = SCHEMA_COMM, + .name = "caMax", + .base = 10, + .evt = QOS_CAT_CMAX_EVENT_ID, + .ctrl_suffix = "MAX", + }, + }, + }, + [RDT_RESOURCE_MC] = { + .msr_update = common_wrmsr, + .msr_read = mbw_rdmsr, + .parse_ctrlval = parse_bw, + .format_str = "%d=%0*d", + .mon_read = mbw_rdmon, + .mon_write = common_wrmon, + .fflags = RFTYPE_RES_MB, + .ctrl_features = { + [SCHEMA_COMM] = { + .type = SCHEMA_COMM, + .flags = SCHEMA_COMM, + .name = "comm", + .base = 10, + .evt = QOS_MBA_MAX_EVENT_ID, + .capable = 1, + .ctrl_suffix = "", + }, + [SCHEMA_PRI] = { + .type = SCHEMA_PRI, + .flags = SCHEMA_PRI, + .name = "mbPrio", + .base = 10, + .evt = QOS_MBA_INTPRI_EVENT_ID, + .ctrl_suffix = "PRI", + }, + [SCHEMA_HDL] = { + .type = SCHEMA_HDL, + .flags = SCHEMA_HDL, + .name = "mbHdl", + .base = 10, + .evt = QOS_MBA_HDL_EVENT_ID, + .ctrl_suffix = "HDL", + }, + [SCHEMA_PBM] = { + .type = SCHEMA_PBM, + .flags = SCHEMA_COMM, + .name = "mbPbm", + .base = 16, + .evt = QOS_MBA_PBM_EVENT_ID, + .ctrl_suffix = "PBM", + }, + [SCHEMA_MAX] = { + .type = SCHEMA_MAX, + .flags = SCHEMA_COMM, + .name = "mbMax", + .base = 10, + .evt = QOS_MBA_MAX_EVENT_ID, + .ctrl_suffix = "MAX", + }, + [SCHEMA_MIN] = { + .type = SCHEMA_MIN, + .flags = SCHEMA_COMM, + .name = "mbMin", + .base = 10, + .evt = QOS_MBA_MIN_EVENT_ID, + .ctrl_suffix = "MIN", + }, + }, + }, +}; + +struct raw_resctrl_resource * +mpam_get_raw_resctrl_resource(enum resctrl_resource_level level) +{ + if (level >= RDT_NUM_RESOURCES) + return NULL; + + return &raw_resctrl_resources_all[level]; +} + +/* + * Read one cache schema row. Check that it is valid for the current + * resource type. + */ +static int +parse_cache(char *buf, struct resctrl_resource *r, + struct resctrl_staged_config *cfg, + enum resctrl_ctrl_type type) +{ + unsigned long data; + struct raw_resctrl_resource *rr = r->res; + + if (cfg->have_new_ctrl) { + rdt_last_cmd_printf("duplicate domain\n"); + return -EINVAL; + } + + if (kstrtoul(buf, rr->ctrl_features[type].base, &data)) + return -EINVAL; + + if (data >= rr->ctrl_features[type].max_wd) + return -EINVAL; + + if (type == SCHEMA_COMM && data == 0) { + rdt_last_cmd_puts("No allowed CPBM to be set to 0\n"); + return -EINVAL; + } + + cfg->new_ctrl[type] = data; + cfg->ctrl_updated[type] = true; + cfg->have_new_ctrl = true; + + return 0; +} + +static int +parse_bw(char *buf, struct resctrl_resource *r, + struct resctrl_staged_config *cfg, + enum resctrl_ctrl_type type) +{ + unsigned long data; + struct raw_resctrl_resource *rr = r->res; + + if (cfg->have_new_ctrl) { + rdt_last_cmd_printf("duplicate domain\n"); + return -EINVAL; + } + + switch (rr->ctrl_features[type].evt) { + case QOS_MBA_MAX_EVENT_ID: + case QOS_MBA_PBM_EVENT_ID: + if (kstrtoul(buf, rr->ctrl_features[type].base, &data)) + return -EINVAL; + data = (data < r->mbw.min_bw) ? r->mbw.min_bw : data; + data = roundup(data, r->mbw.bw_gran); + break; + case QOS_MBA_MIN_EVENT_ID: + if (kstrtoul(buf, rr->ctrl_features[type].base, &data)) + return -EINVAL; + /* for mbw min feature, 0 of setting is allowed */ + data = roundup(data, r->mbw.bw_gran); + break; + default: + if (kstrtoul(buf, rr->ctrl_features[type].base, &data)) + return -EINVAL; + break; + } + + if (data >= rr->ctrl_features[type].max_wd) + return -EINVAL; + + cfg->new_ctrl[type] = data; + cfg->ctrl_updated[type] = true; + cfg->have_new_ctrl = true; + + return 0; +} + +static void +common_wrmsr(struct resctrl_resource *r, struct rdt_domain *d, + struct msr_param *para) +{ + struct sync_args args; + struct mpam_resctrl_dom *dom; + + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + + mpam_resctrl_update_component_cfg(r, d, para->closid); + + /* + * so far we have accomplished configuration replication, + * it is ready to apply this configuration. + */ + args.closid = *para->closid; + mpam_component_config(dom->comp, &args); +} + +static u64 cache_rdmsr(struct resctrl_resource *r, struct rdt_domain *d, + struct msr_param *para) +{ + u32 result; + struct sync_args args; + struct mpam_resctrl_dom *dom; + struct raw_resctrl_resource *rr = r->res; + + args.closid = *para->closid; + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + + args.eventid = rr->ctrl_features[para->type].evt; + mpam_component_get_config(dom->comp, &args, &result); + + return result; +} + +static u64 mbw_rdmsr(struct resctrl_resource *r, struct rdt_domain *d, + struct msr_param *para) +{ + u32 result; + struct sync_args args; + struct mpam_resctrl_dom *dom; + struct raw_resctrl_resource *rr = r->res; + + args.closid = *para->closid; + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + + args.eventid = rr->ctrl_features[para->type].evt; + mpam_component_get_config(dom->comp, &args, &result); + + switch (rr->ctrl_features[para->type].evt) { + case QOS_MBA_MAX_EVENT_ID: + case QOS_MBA_MIN_EVENT_ID: + case QOS_MBA_PBM_EVENT_ID: + result = roundup(result, r->mbw.bw_gran); + break; + default: + break; + } + + return result; +} + +/* + * use pmg as monitor id + * just use match_pardid only. + */ +static u64 cache_rdmon(struct rdt_domain *d, void *md_priv) +{ + int err; + u64 result; + union mon_data_bits md; + struct sync_args args; + struct mpam_resctrl_dom *dom; + unsigned long timeout; + + md.priv = md_priv; + + /* monitoring only need reqpartid */ + args.closid.reqpartid = md.u.partid; + args.mon = md.u.mon; + args.pmg = md.u.pmg; + args.match_pmg = true; + args.eventid = QOS_L3_OCCUP_EVENT_ID; + + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + + /** + * We should judge if return is OK, it is possible affected + * by NRDY bit. + */ + timeout = READ_ONCE(jiffies) + msecs_to_jiffies(1000); + do { + if (time_after(READ_ONCE(jiffies), timeout)) { + err = -ETIMEDOUT; + break; + } + err = mpam_component_mon(dom->comp, &args, &result); + /* Currently just report it */ + WARN_ON(err && (err != -EBUSY)); + } while (err == -EBUSY); + + return result; +} +/* + * use pmg as monitor id + * just use match_pardid only. + */ +static u64 mbw_rdmon(struct rdt_domain *d, void *md_priv) +{ + int err; + u64 result; + union mon_data_bits md; + struct sync_args args; + struct mpam_resctrl_dom *dom; + unsigned long timeout; + + md.priv = md_priv; + + /* monitoring only need reqpartid */ + args.closid.reqpartid = md.u.partid; + args.mon = md.u.mon; + args.pmg = md.u.pmg; + args.match_pmg = true; + args.eventid = QOS_L3_MBM_LOCAL_EVENT_ID; + + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + + /** + * We should judge if return is OK, it is possible affected + * by NRDY bit. + */ + timeout = READ_ONCE(jiffies) + msecs_to_jiffies(1000); + do { + if (time_after(READ_ONCE(jiffies), timeout)) { + err = -ETIMEDOUT; + break; + } + err = mpam_component_mon(dom->comp, &args, &result); + /* Currently just report it */ + WARN_ON(err && (err != -EBUSY)); + } while (err == -EBUSY); + + return result; +} + +static int +common_wrmon(struct rdt_domain *d, void *md_priv) +{ + u64 result; + union mon_data_bits md; + struct sync_args args; + struct mpam_resctrl_dom *dom; + + md.priv = md_priv; + /* monitoring only need reqpartid */ + args.closid.reqpartid = md.u.partid; + args.mon = md.u.mon; + args.pmg = md.u.pmg; + + args.match_pmg = true; + + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + + /** + * We needn't judge if return is OK, we just want to configure + * monitor info. + */ + mpam_component_mon(dom->comp, &args, &result); + + return 0; +} + +/* + * Notifing resctrl_id_init() should be called after calling parse_ + * resctrl_group_fs_options() to guarantee resctrl_cdp_enabled() active. + * + * Using a global CLOSID across all resources has some advantages and + * some drawbacks: + * + We can simply set "current->closid" to assign a task to a resource + * group. + * + Context switch code can avoid extra memory references deciding which + * CLOSID to load into the PQR_ASSOC MSR + * - We give up some options in configuring resource groups across multi-socket + * systems. + * - Our choices on how to configure each resource become progressively more + * limited as the number of resources grows. + */ + +static int num_intpartid, num_reqpartid; +static unsigned long *intpartid_free_map; + +static void mpam_resctrl_closid_collect(void) +{ + struct mpam_resctrl_res *res; + struct raw_resctrl_resource *rr; + + /* + * num_reqpartid refers to the maximum partid number + * that system width provides. + */ + num_reqpartid = mpam_sysprops_num_partid(); + /* + * we make intpartid the closid, this is because when + * system platform supports intpartid narrowing, this + * intpartid concept represents the resctrl maximum + * group we can create, so it should be less than + * maximum reqpartid number and maximum closid number + * allowed by resctrl sysfs provided by @Intel-RDT. + */ + num_intpartid = mpam_sysprops_num_partid(); + num_intpartid = min(num_reqpartid, RESCTRL_MAX_CLOSID); + + /* + * as we know we make intpartid the closid given to + * resctrl, we should know if any resource supports + * intpartid narrowing. + */ + for_each_supported_resctrl_exports(res) { + rr = res->resctrl_res.res; + if (!rr->num_intpartid) + continue; + num_intpartid = min(num_intpartid, (int)rr->num_intpartid); + } +} + +static u32 get_nr_closid(void) +{ + if (!intpartid_free_map) + return 0; + + return num_intpartid; +} + +int closid_bitmap_init(void) +{ + int pos; + u32 times, flag; + u32 bits_num; + + mpam_resctrl_closid_collect(); + bits_num = num_intpartid; + hw_alloc_times_validate(times, flag); + bits_num = rounddown(bits_num, times); + if (!bits_num) + return -EINVAL; + + if (intpartid_free_map) + kfree(intpartid_free_map); + + intpartid_free_map = bitmap_zalloc(bits_num, GFP_KERNEL); + if (!intpartid_free_map) + return -ENOMEM; + + bitmap_set(intpartid_free_map, 0, bits_num); + + /* CLOSID 0 is always reserved for the default group */ + pos = find_first_bit(intpartid_free_map, bits_num); + bitmap_clear(intpartid_free_map, pos, times); + + return 0; +} + +/** + * struct rmid_transform - Matrix for transforming rmid to partid and pmg + * @rows: Number of bits for remap_body[:] bitmap + * @clos: Number of bitmaps + * @nr_usage: Number rmid we have + * @step_size: Step size from traversing the point of matrix once + * @step_cnt: Indicates how many times to traverse(.e.g if cdp;step_cnt=2) + * @remap_body: Storing bitmaps' entry and itself + */ +struct rmid_transform { + u32 rows; + u32 cols; + u32 nr_usage; + int step_size; + int step_cnt; + unsigned long **remap_body; +}; +static struct rmid_transform rmid_remap_matrix; +DEFINE_STATIC_KEY_FALSE(rmid_remap_enable_key); + +static u32 get_nr_rmids(void) +{ + if (!static_branch_likely(&rmid_remap_enable_key)) + return 0; + + return rmid_remap_matrix.nr_usage; +} + +/* + * a rmid remap matrix is delivered for transforming partid pmg to rmid, + * this matrix is organized like this: + * + * [bitmap entry indexed by partid] + * + * [0] [1] [2] [3] [4] [5] + * occ 1 0 0 1 1 1 + * bitmap[:0] 1 0 0 1 1 1 + * bitmap[:1] 1 1 1 1 1 1 + * bitmap[:2] 1 1 1 1 1 1 + * [pos is pmg] + * + * Calculate rmid = partid + NR_partid * pmg + * + * occ represents if this bitmap has been used by a partid, it is because + * a certain partid should not be accompany with a duplicated pmg for + * monitoring, this design easily saves a lot of space, and can also decrease + * time complexity of allocating and free rmid process from O(NR_partid)* + * O(NR_pmg) to O(NR_partid) + O(log(NR_pmg)) compared with using list. + */ +static int set_rmid_remap_matrix(u32 rows, u32 cols) +{ + u32 times, flag; + int ret, col; + + /* + * cols stands for partid, so if cdp enabled we must + * keep at least two partid for LxCODE and LxDATA + * respectively once time. + */ + hw_alloc_times_validate(times, flag); + rmid_remap_matrix.cols = rounddown(cols, times); + rmid_remap_matrix.step_cnt = times; + if (times > rmid_remap_matrix.cols) + return -EINVAL; + /* + * if only pmg(Performance Monitor Group) + * work on the monitor, step_size must be + * set to maximum number of columns, + * otherwise set it to 1, such as kunpeng + * 920 does. + */ + rmid_remap_matrix.step_size = 1; + + /* + * first row of rmid remap matrix is used for indicating + * if remap bitmap is occupied by a col index. + */ + rmid_remap_matrix.rows = rows + 1; + + if (rows == 0 || cols == 0) + return -EINVAL; + + rmid_remap_matrix.nr_usage = rows * cols; + + /* free history pointer for matrix recreation */ + if (rmid_remap_matrix.remap_body) { + for (col = 0; col < cols; col++) { + if (!rmid_remap_matrix.remap_body[col]) + continue; + kfree(rmid_remap_matrix.remap_body[col]); + } + kfree(rmid_remap_matrix.remap_body); + } + + rmid_remap_matrix.remap_body = kcalloc(rmid_remap_matrix.cols, + sizeof(*rmid_remap_matrix.remap_body), GFP_KERNEL); + if (!rmid_remap_matrix.remap_body) + return -ENOMEM; + + for (col = 0; col < cols; col++) { + if (rmid_remap_matrix.remap_body[col]) + kfree(rmid_remap_matrix.remap_body[col]); + + rmid_remap_matrix.remap_body[col] = + bitmap_zalloc(rmid_remap_matrix.rows, + GFP_KERNEL); + if (!rmid_remap_matrix.remap_body[col]) { + ret = -ENOMEM; + goto clean; + } + + bitmap_set(rmid_remap_matrix.remap_body[col], + 0, rmid_remap_matrix.rows); + } + + /* make column entry of rmid matrix visible */ + static_branch_enable_cpuslocked(&rmid_remap_enable_key); + + return 0; +clean: + for (col = 0; col < cols; col++) { + if (!rmid_remap_matrix.remap_body[col]) + continue; + kfree(rmid_remap_matrix.remap_body[col]); + rmid_remap_matrix.remap_body[col] = NULL; + } + if (rmid_remap_matrix.remap_body) { + kfree(rmid_remap_matrix.remap_body); + rmid_remap_matrix.remap_body = NULL; + } + + /* if recreation failed, cannot use rmid remap matrix */ + static_branch_disable_cpuslocked(&rmid_remap_enable_key); + + return ret; +} + +static u32 probe_rmid_remap_matrix_cols(void) +{ + return (u32)num_reqpartid; +} + +static u32 probe_rmid_remap_matrix_rows(void) +{ + return (u32)mpam_sysprops_num_pmg(); +} + +static inline unsigned long **__rmid_remap_bmp(u32 col) +{ + if (!static_branch_likely(&rmid_remap_enable_key)) + return NULL; + + if (col >= rmid_remap_matrix.cols) + return NULL; + + return rmid_remap_matrix.remap_body + col; +} + +/* + * these macros defines how can we traverse rmid remap matrix, there are + * three scenarios: + * + * (1) step_size is default set to 1, if only PMG(NR_PMG=4) works, makes + * it equals to number of columns, step_cnt means how many times are + * allocated and released each time, at this time rmid remap matrix + * looks like: + * + * ^ + * | + * ------column------> + * + * RMID 0 1 2 3 (step_size=1) + * `---' + * `--> (step_cnt=2 if cdp enabled) + * + * RMID 0 1 2 3 (step_size=1) + * `-- + * `--> (step_cnt=1 if cdp disabled) + * + * (2) if PARTID(NR_PARTID=4) and PMG(NR_PMG=4) works together, at this + * time rmid remap matrix looks like: + * + * ------------row------------> + * | + * | RMID 0 1 2 3 (step_size=1) + * | `---' + * | `--> (step_cnt=2 if cdp enabled) + * | 4 5 6 7 + * | 8 9 10 11 + * v 12 13 14 15 + * + * (3) step_size not equal to 1, cross-line traversal, but this scenario + * did not happen yet. + */ + +#define __xy_initialize(x, y, from) \ + (x = from, y = 0) +#define __xy_overflow(x, y) \ + (y >= rmid_remap_matrix.cols) +#define __x_forward(x) \ + (x = (x + 1) % rmid_remap_matrix.cols) +#define __y_forward(x, y) \ + (y += ((x) ? 0 : 1)) + +#define __step_xy_initialize(step, x, y, from) \ + (x = from, step = 1, y = 0) +#define __step_align(from) \ + (!(from % (rmid_remap_matrix.step_size * \ + rmid_remap_matrix.step_cnt))) +#define __step_overflow(step) \ + (__xy_overflow(x, y) || \ + (step > rmid_remap_matrix.step_cnt)) +#define __step_x_forward(x) \ + __x_forward(x) +#define __step_forward(step, x) \ + (step += ((x % rmid_remap_matrix.step_size) ? 0 : 1)) +#define __step_y_forward(x, y) \ + __y_forward(x, y) + +#define for_each_rmid_transform_point_step_from(p_entry, step, x, y, from) \ + for (__step_xy_initialize(step, x, y, from), \ + (p_entry) = __rmid_remap_bmp((from)); \ + __step_align(from) && !__step_overflow(step); \ + __step_x_forward(x), \ + __step_forward(step, x), \ + __step_y_forward(x, y), \ + (p_entry) = __rmid_remap_bmp(x)) \ + if (unlikely(((p_entry) == NULL) || \ + (*p_entry) == NULL)) \ + WARN_ON_ONCE(1); \ + else + +#define for_each_rmid_transform_point_from(p_entry, x, y, from) \ + for (__xy_initialize(x, y, from), \ + (p_entry) = __rmid_remap_bmp((from)); \ + !__xy_overflow(x, y); \ + __x_forward(x), \ + __y_forward(x, y), \ + (p_entry) = __rmid_remap_bmp(x)) \ + if (unlikely(((p_entry) == NULL) || \ + (*p_entry) == NULL)) \ + WARN_ON_ONCE(1); \ + else + +static void set_rmid_remap_bmp_occ(unsigned long *bmp) +{ + clear_bit(0, bmp); +} + +static void unset_rmid_remap_bmp_occ(unsigned long *bmp) +{ + set_bit(0, bmp); +} + +static int is_rmid_remap_bmp_bdr_set(unsigned long *bmp, int b) +{ + return (test_bit(b + 1, bmp) == 0) ? 1 : 0; +} + +static void rmid_remap_bmp_bdr_set(unsigned long *bmp, int b) +{ + set_bit(b + 1, bmp); +} + +static void rmid_remap_bmp_bdr_clear(unsigned long *bmp, int b) +{ + clear_bit(b + 1, bmp); +} + +static int is_rmid_remap_bmp_occ(unsigned long *bmp) +{ + return (find_first_bit(bmp, rmid_remap_matrix.rows) == 0) ? 0 : 1; +} + +static int is_rmid_remap_bmp_full(unsigned long *bmp) +{ + return ((is_rmid_remap_bmp_occ(bmp) && + bitmap_weight(bmp, rmid_remap_matrix.rows) == + (rmid_remap_matrix.rows-1)) || + bitmap_full(bmp, rmid_remap_matrix.rows)); +} + +static int rmid_remap_bmp_find_step_entry(int partid) +{ + int x, y; + unsigned long **bmp; + + if (rmid_remap_matrix.step_size == + rmid_remap_matrix.cols) + return 0; + + /* step entry should be non-occupied and aligned */ + bmp = __rmid_remap_bmp(partid); + if (bmp) + return (is_rmid_remap_bmp_occ(*bmp) || + !__step_align(partid)) ? -ENOSPC : partid; + + for_each_rmid_transform_point_from(bmp, x, y, 0) { + /* + * do not waste partid resource, start + * from step aligned position. + */ + if (__step_align(x) && !is_rmid_remap_bmp_occ(*bmp)) + return x; + } + + return -ENOSPC; +} + +static int rmid_remap_bmp_alloc_pmg(unsigned long *bmp) +{ + int pos; + + pos = find_first_bit(bmp, rmid_remap_matrix.rows); + if (pos == rmid_remap_matrix.rows) + return -ENOSPC; + + clear_bit(pos, bmp); + return pos - 1; +} + +static int rmid_remap_matrix_init(void) +{ + int x, y, step, ret; + u32 cols, rows; + unsigned long **bmp; + + cols = probe_rmid_remap_matrix_cols(); + rows = probe_rmid_remap_matrix_rows(); + + ret = set_rmid_remap_matrix(rows, cols); + if (ret) + goto out; + + /* + * if CDP disabled, drop partid = 0, pmg = 0 + * from bitmap for root resctrl group reserving + * default rmid, otherwise drop partid = 0 and + * partid = 1 for LxCACHE, LxDATA reservation. + */ + for_each_rmid_transform_point_step_from(bmp, step, x, y, 0) { + set_rmid_remap_bmp_occ(*bmp); + rmid_remap_bmp_alloc_pmg(*bmp); + } + + ret = rmid_mon_ptrs_init(rmid_remap_matrix.nr_usage); + if (ret) + goto out; + + return 0; +out: + return ret; +} + +int resctrl_id_init(void) +{ + int ret; + + ret = closid_bitmap_init(); + if (ret) + return ret; + + return rmid_remap_matrix_init(); +} + +static int is_rmid_valid(int rmid) +{ + return ((u32)rmid >= rmid_remap_matrix.nr_usage) ? 0 : 1; +} + +static int to_rmid(int partid, int pmg) +{ + return (partid + (rmid_remap_matrix.cols * pmg)); +} + +static int rmid_to_partid_pmg(int rmid, int *partid, int *pmg) +{ + if (!is_rmid_valid(rmid)) + return -EINVAL; + + if (pmg) + *pmg = rmid / rmid_remap_matrix.cols; + if (partid) + *partid = rmid % rmid_remap_matrix.cols; + return 0; +} + +static int __rmid_alloc(int partid, int pmg) +{ + int x, y, step, ret, rmid; + bool checkpmg = false; + unsigned long **bmp; + + if (pmg >= 0) + checkpmg = true; + + /* traverse from first non-occupied and step-aligned entry */ + ret = rmid_remap_bmp_find_step_entry(partid); + if (ret < 0) + goto out; + partid = ret; + + for_each_rmid_transform_point_step_from(bmp, step, x, y, partid) { + set_rmid_remap_bmp_occ(*bmp); + + /* checking if the given pmg is available */ + if (checkpmg) { + /* + * it can only happened in step_size aligned + * position, so it does not exist pmgs cleared + * before. + */ + if (is_rmid_remap_bmp_bdr_set(*bmp, pmg + y)) { + ret = -EEXIST; + goto out; + } + rmid_remap_bmp_bdr_clear(*bmp, pmg + y); + continue; + } + + /* alloc available pmg */ + ret = rmid_remap_bmp_alloc_pmg(*bmp); + if (ret < 0) + goto out; + /* always return first pmg */ + if (pmg < 0) + pmg = ret; + } + + rmid = to_rmid(partid, pmg); + if (!is_rmid_valid(rmid)) { + ret = -ENOSPC; + goto out; + } + ret = assoc_rmid_with_mon(rmid); + if (ret) { + rmid_free(rmid); + goto out; + } + + return rmid; +out: + return ret; +} + +int rmid_alloc(int partid) +{ + return __rmid_alloc(partid, -1); +} + +void rmid_free(int rmid) +{ + int x, y, step, partid, pmg; + unsigned long **bmp; + + if (rmid_to_partid_pmg(rmid, &partid, &pmg)) + return; + + for_each_rmid_transform_point_step_from(bmp, step, x, y, partid) { + rmid_remap_bmp_bdr_set(*bmp, pmg + y); + if (is_rmid_remap_bmp_full(*bmp)) + unset_rmid_remap_bmp_occ(*bmp); + } + + deassoc_rmid_with_mon(rmid); +} + +int mpam_rmid_to_partid_pmg(int rmid, int *partid, int *pmg) +{ + return rmid_to_partid_pmg(rmid, partid, pmg); +} +EXPORT_SYMBOL(mpam_rmid_to_partid_pmg); + +/* + * If cdp enabled, allocate two closid once time, then return first + * allocated id. + */ +int closid_alloc(void) +{ + int pos; + u32 times, flag; + + hw_alloc_times_validate(times, flag); + + pos = find_first_bit(intpartid_free_map, num_intpartid); + if (pos == num_intpartid) + return -ENOSPC; + + bitmap_clear(intpartid_free_map, pos, times); + + return pos; +} + +void closid_free(int closid) +{ + u32 times, flag; + + hw_alloc_times_validate(times, flag); + bitmap_set(intpartid_free_map, closid, times); +} + +/* + * Choose a width for the resource name and resource data based on the + * resource that has widest name and cbm. + */ +static void mpam_init_padding(void) +{ + int cl; + struct mpam_resctrl_res *res; + struct resctrl_resource *r; + struct raw_resctrl_resource *rr; + + for_each_supported_resctrl_exports(res) { + r = &res->resctrl_res; + + cl = strlen(r->name); + if (cl > max_name_width) + max_name_width = cl; + + rr = r->res; + if (!rr) + continue; + cl = rr->data_width; + if (cl > max_data_width) + max_data_width = cl; + } +} + +void post_resctrl_mount(void) +{ + if (rdt_alloc_capable) + static_branch_enable_cpuslocked(&resctrl_alloc_enable_key); + if (rdt_mon_capable) + static_branch_enable_cpuslocked(&resctrl_mon_enable_key); + + if (rdt_alloc_capable || rdt_mon_capable) + static_branch_enable_cpuslocked(&resctrl_enable_key); +} + +void release_rdtgroupfs_options(void) +{ +} + +static void disable_cdp(void) +{ + struct mpam_resctrl_res *res; + struct resctrl_resource *r; + + for_each_supported_resctrl_exports(res) { + r = &res->resctrl_res; + r->cdp_enable = false; + } + + resctrl_cdp_enabled = false; +} + +static int try_to_enable_cdp(enum resctrl_resource_level level) +{ + struct resctrl_resource *r = mpam_resctrl_get_resource(level); + + if (!r || !r->cdp_capable) + return -EINVAL; + + r->cdp_enable = true; + + resctrl_cdp_enabled = true; + return 0; +} + +static int cdpl3_enable(void) +{ + return try_to_enable_cdp(RDT_RESOURCE_L3); +} + +static int cdpl2_enable(void) +{ + return try_to_enable_cdp(RDT_RESOURCE_L2); +} + +static void basic_ctrl_enable(void) +{ + struct mpam_resctrl_res *res; + struct raw_resctrl_resource *rr; + + for_each_supported_resctrl_exports(res) { + rr = res->resctrl_res.res; + /* At least SCHEMA_COMM is supported */ + rr->ctrl_features[SCHEMA_COMM].enabled = true; + } +} + +static int extend_ctrl_enable(char *tok) +{ + bool match = false; + struct resctrl_resource *r; + struct raw_resctrl_resource *rr; + struct mpam_resctrl_res *res; + struct resctrl_ctrl_feature *feature; + enum resctrl_ctrl_type type; + + for_each_supported_resctrl_exports(res) { + r = &res->resctrl_res; + if (!r->alloc_capable) + continue; + rr = r->res; + for_each_extend_ctrl_type(type) { + feature = &rr->ctrl_features[type]; + if (!feature->capable || !feature->name) + continue; + if (strcmp(feature->name, tok)) + continue; + + rr->ctrl_features[type].enabled = true; + /* + * If we chose to enable a feature also embraces + * SCHEMA_COMM, SCHEMA_COMM will not be selected. + */ + if (feature->flags == SCHEMA_COMM) + rr->ctrl_features[SCHEMA_COMM].enabled = false;; + match = true; + } + } + + if (!match) + return -EINVAL; + + return 0; +} + +static void extend_ctrl_disable(void) +{ + struct raw_resctrl_resource *rr; + struct mpam_resctrl_res *res; + struct resctrl_ctrl_feature *feature; + enum resctrl_ctrl_type type; + + for_each_supported_resctrl_exports(res) { + rr = res->resctrl_res.res; + for_each_extend_ctrl_type(type) { + feature = &rr->ctrl_features[type]; + feature->enabled = false; + } + } +} + +int parse_rdtgroupfs_options(char *data) +{ + char *token; + char *o = data; + int ret = 0; + + disable_cdp(); + extend_ctrl_disable(); + basic_ctrl_enable(); + + while ((token = strsep(&o, ",")) != NULL) { + if (!*token) { + ret = -EINVAL; + goto out; + } + + if (!strcmp(token, "cdpl3")) { + ret = cdpl3_enable(); + if (ret) + goto out; + } else if (!strcmp(token, "cdpl2")) { + ret = cdpl2_enable(); + if (ret) + goto out; + } else { + ret = extend_ctrl_enable(token); + if (ret) + goto out; + } + } + + return 0; + +out: + pr_err("Invalid mount option \"%s\"\n", token); + + return ret; +} + +/* + * This is safe against intel_resctrl_sched_in() called from __switch_to() + * because __switch_to() is executed with interrupts disabled. A local call + * from update_closid_rmid() is proteced against __switch_to() because + * preemption is disabled. + */ +void update_cpu_closid_rmid(void *info) +{ + struct rdtgroup *r = info; + + if (r) { + this_cpu_write(pqr_state.default_closid, resctrl_navie_closid(r->closid)); + this_cpu_write(pqr_state.default_rmid, resctrl_navie_rmid(r->mon.rmid)); + } + + /* + * We cannot unconditionally write the MSR because the current + * executing task might have its own closid selected. Just reuse + * the context switch code. + */ + mpam_sched_in(); +} + +/* + * Update the PGR_ASSOC MSR on all cpus in @cpu_mask, + * + * Per task closids/rmids must have been set up before calling this function. + */ +void +update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r) +{ + int cpu = get_cpu(); + + if (cpumask_test_cpu(cpu, cpu_mask)) + update_cpu_closid_rmid(r); + smp_call_function_many(cpu_mask, update_cpu_closid_rmid, r, 1); + put_cpu(); +} + +struct task_move_callback { + struct callback_head work; + struct rdtgroup *rdtgrp; +}; + +static void move_myself(struct callback_head *head) +{ + struct task_move_callback *callback; + struct rdtgroup *rdtgrp; + + callback = container_of(head, struct task_move_callback, work); + rdtgrp = callback->rdtgrp; + + /* + * If resource group was deleted before this task work callback + * was invoked, then assign the task to root group and free the + * resource group. + */ + if (atomic_dec_and_test(&rdtgrp->waitcount) && + (rdtgrp->flags & RDT_DELETED)) { + current->closid = 0; + current->rmid = 0; + rdtgroup_remove(rdtgrp); + } + + preempt_disable(); + /* update PQR_ASSOC MSR to make resource group go into effect */ + mpam_sched_in(); + preempt_enable(); + + kfree(callback); +} + +int __resctrl_group_move_task(struct task_struct *tsk, + struct rdtgroup *rdtgrp) +{ + struct task_move_callback *callback; + int ret; + + callback = kzalloc(sizeof(*callback), GFP_NOWAIT); + if (!callback) + return -ENOMEM; + callback->work.func = move_myself; + callback->rdtgrp = rdtgrp; + + /* + * Take a refcount, so rdtgrp cannot be freed before the + * callback has been invoked. + */ + atomic_inc(&rdtgrp->waitcount); + ret = task_work_add(tsk, &callback->work, true); + if (ret) { + /* + * Task is exiting. Drop the refcount and free the callback. + * No need to check the refcount as the group cannot be + * deleted before the write function unlocks resctrl_group_mutex. + */ + atomic_dec(&rdtgrp->waitcount); + kfree(callback); + rdt_last_cmd_puts("task exited\n"); + } else { + /* + * For ctrl_mon groups move both closid and rmid. + * For monitor groups, can move the tasks only from + * their parent CTRL group. + */ + if (rdtgrp->type == RDTCTRL_GROUP) { + tsk->closid = resctrl_navie_closid(rdtgrp->closid); + tsk->rmid = resctrl_navie_rmid(rdtgrp->mon.rmid); + } else if (rdtgrp->type == RDTMON_GROUP) { + if (rdtgrp->mon.parent->closid.intpartid == tsk->closid) { + tsk->closid = resctrl_navie_closid(rdtgrp->closid); + tsk->rmid = resctrl_navie_rmid(rdtgrp->mon.rmid); + } else { + rdt_last_cmd_puts("Can't move task to different control group\n"); + ret = -EINVAL; + } + } + } + return ret; +} + +static int resctrl_group_seqfile_show(struct seq_file *m, void *arg) +{ + struct kernfs_open_file *of = m->private; + struct rftype *rft = of->kn->priv; + + if (rft->seq_show) + return rft->seq_show(of, m, arg); + return 0; +} + +static ssize_t resctrl_group_file_write(struct kernfs_open_file *of, char *buf, + size_t nbytes, loff_t off) +{ + struct rftype *rft = of->kn->priv; + + if (rft->write) + return rft->write(of, buf, nbytes, off); + + return -EINVAL; +} + +struct kernfs_ops resctrl_group_kf_single_ops = { + .atomic_write_len = PAGE_SIZE, + .write = resctrl_group_file_write, + .seq_show = resctrl_group_seqfile_show, +}; + +static bool is_cpu_list(struct kernfs_open_file *of) +{ + struct rftype *rft = of->kn->priv; + + return rft->flags & RFTYPE_FLAGS_CPUS_LIST; +} + +static int resctrl_group_cpus_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + struct rdtgroup *rdtgrp; + int ret = 0; + + rdtgrp = resctrl_group_kn_lock_live(of->kn); + + if (rdtgrp) { + seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n", + cpumask_pr_args(&rdtgrp->cpu_mask)); + } else { + ret = -ENOENT; + } + resctrl_group_kn_unlock(of->kn); + + return ret; +} + +static void cpumask_rdtgrp_clear(struct rdtgroup *r, struct cpumask *m) +{ + struct rdtgroup *crgrp; + + cpumask_andnot(&r->cpu_mask, &r->cpu_mask, m); + /* update the child mon group masks as well*/ + list_for_each_entry(crgrp, &r->mon.crdtgrp_list, mon.crdtgrp_list) + cpumask_and(&crgrp->cpu_mask, &r->cpu_mask, &crgrp->cpu_mask); +} + +int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, + cpumask_var_t tmpmask, cpumask_var_t tmpmask1) +{ + struct rdtgroup *r, *crgrp; + struct list_head *head; + + /* Check whether cpus are dropped from this group */ + cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); + if (cpumask_weight(tmpmask)) { + /* Can't drop from default group */ + if (rdtgrp == &resctrl_group_default) { + rdt_last_cmd_puts("Can't drop CPUs from default group\n"); + return -EINVAL; + } + + /* Give any dropped cpus to rdtgroup_default */ + cpumask_or(&resctrl_group_default.cpu_mask, + &resctrl_group_default.cpu_mask, tmpmask); + update_closid_rmid(tmpmask, &resctrl_group_default); + } + + /* + * If we added cpus, remove them from previous group and + * the prev group's child groups that owned them + * and update per-cpu closid/rmid. + */ + cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); + if (cpumask_weight(tmpmask)) { + list_for_each_entry(r, &resctrl_all_groups, resctrl_group_list) { + if (r == rdtgrp) + continue; + cpumask_and(tmpmask1, &r->cpu_mask, tmpmask); + if (cpumask_weight(tmpmask1)) + cpumask_rdtgrp_clear(r, tmpmask1); + } + update_closid_rmid(tmpmask, rdtgrp); + } + + /* Done pushing/pulling - update this group with new mask */ + cpumask_copy(&rdtgrp->cpu_mask, newmask); + + /* + * Clear child mon group masks since there is a new parent mask + * now and update the rmid for the cpus the child lost. + */ + head = &rdtgrp->mon.crdtgrp_list; + list_for_each_entry(crgrp, head, mon.crdtgrp_list) { + cpumask_and(tmpmask, &rdtgrp->cpu_mask, &crgrp->cpu_mask); + update_closid_rmid(tmpmask, rdtgrp); + cpumask_clear(&crgrp->cpu_mask); + } + + return 0; +} + +int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, + cpumask_var_t tmpmask) +{ + struct rdtgroup *prgrp = rdtgrp->mon.parent, *crgrp; + struct list_head *head; + + /* Check whether cpus belong to parent ctrl group */ + cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask); + if (cpumask_weight(tmpmask)) { + rdt_last_cmd_puts("can only add CPUs to mongroup that belong to parent\n"); + return -EINVAL; + } + + /* Check whether cpus are dropped from this group */ + cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); + if (cpumask_weight(tmpmask)) { + /* Give any dropped cpus to parent rdtgroup */ + cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask); + update_closid_rmid(tmpmask, prgrp); + } + + /* + * If we added cpus, remove them from previous group that owned them + * and update per-cpu rmid + */ + cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); + if (cpumask_weight(tmpmask)) { + head = &prgrp->mon.crdtgrp_list; + list_for_each_entry(crgrp, head, mon.crdtgrp_list) { + if (crgrp == rdtgrp) + continue; + cpumask_andnot(&crgrp->cpu_mask, &crgrp->cpu_mask, + tmpmask); + } + update_closid_rmid(tmpmask, rdtgrp); + } + + /* Done pushing/pulling - update this group with new mask */ + cpumask_copy(&rdtgrp->cpu_mask, newmask); + + return 0; +} + +static ssize_t resctrl_group_cpus_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + cpumask_var_t tmpmask, newmask, tmpmask1; + struct rdtgroup *rdtgrp; + int ret; + + if (!buf) + return -EINVAL; + + if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) + return -ENOMEM; + if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) { + free_cpumask_var(tmpmask); + return -ENOMEM; + } + if (!zalloc_cpumask_var(&tmpmask1, GFP_KERNEL)) { + free_cpumask_var(tmpmask); + free_cpumask_var(newmask); + return -ENOMEM; + } + + rdtgrp = resctrl_group_kn_lock_live(of->kn); + rdt_last_cmd_clear(); + if (!rdtgrp) { + ret = -ENOENT; + rdt_last_cmd_puts("directory was removed\n"); + goto unlock; + } + + if (is_cpu_list(of)) + ret = cpulist_parse(buf, newmask); + else + ret = cpumask_parse(buf, newmask); + + if (ret) { + rdt_last_cmd_puts("bad cpu list/mask\n"); + goto unlock; + } + + /* check that user didn't specify any offline cpus */ + cpumask_andnot(tmpmask, newmask, cpu_online_mask); + if (cpumask_weight(tmpmask)) { + ret = -EINVAL; + rdt_last_cmd_puts("can only assign online cpus\n"); + goto unlock; + } + + if (rdtgrp->type == RDTCTRL_GROUP) + ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask, tmpmask1); + else if (rdtgrp->type == RDTMON_GROUP) + ret = cpus_mon_write(rdtgrp, newmask, tmpmask); + else + ret = -EINVAL; + +unlock: + resctrl_group_kn_unlock(of->kn); + free_cpumask_var(tmpmask); + free_cpumask_var(newmask); + free_cpumask_var(tmpmask1); + + return ret ?: nbytes; +} + + +static int resctrl_group_task_write_permission(struct task_struct *task, + struct kernfs_open_file *of) +{ + const struct cred *tcred = get_task_cred(task); + const struct cred *cred = current_cred(); + int ret = 0; + + /* + * Even if we're attaching all tasks in the thread group, we only + * need to check permissions on one of them. + */ + if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) && + !uid_eq(cred->euid, tcred->uid) && + !uid_eq(cred->euid, tcred->suid)) { + rdt_last_cmd_printf("No permission to move task %d\n", task->pid); + ret = -EPERM; + } + + put_cred(tcred); + return ret; +} + +static int resctrl_group_move_task(pid_t pid, struct rdtgroup *rdtgrp, + struct kernfs_open_file *of) +{ + struct task_struct *tsk; + int ret; + + rcu_read_lock(); + if (pid) { + tsk = find_task_by_vpid(pid); + if (!tsk) { + rcu_read_unlock(); + rdt_last_cmd_printf("No task %d\n", pid); + return -ESRCH; + } + } else { + tsk = current; + } + + get_task_struct(tsk); + rcu_read_unlock(); + + ret = resctrl_group_task_write_permission(tsk, of); + if (!ret) + ret = __resctrl_group_move_task(tsk, rdtgrp); + + put_task_struct(tsk); + return ret; +} + +static struct seq_buf last_cmd_status; +static char last_cmd_status_buf[512]; + +void rdt_last_cmd_clear(void) +{ + lockdep_assert_held(&resctrl_group_mutex); + seq_buf_clear(&last_cmd_status); +} + +void rdt_last_cmd_puts(const char *s) +{ + lockdep_assert_held(&resctrl_group_mutex); + seq_buf_puts(&last_cmd_status, s); +} + +void rdt_last_cmd_printf(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + lockdep_assert_held(&resctrl_group_mutex); + seq_buf_vprintf(&last_cmd_status, fmt, ap); + va_end(ap); +} + +static int resctrl_last_cmd_status_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + int len; + + mutex_lock(&resctrl_group_mutex); + len = seq_buf_used(&last_cmd_status); + if (len) + seq_printf(seq, "%.*s", len, last_cmd_status_buf); + else + seq_puts(seq, ""); + mutex_unlock(&resctrl_group_mutex); + return 0; +} + +static int resctrl_num_closids_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + u32 flag, times; + + hw_alloc_times_validate(times, flag); + + seq_printf(seq, "%u\n", get_nr_closid() / times); + return 0; +} + +static int resctrl_cbm_mask_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_resource *r = of->kn->parent->priv; + struct raw_resctrl_resource *rr = r->res; + + seq_printf(seq, "%x\n", rr->ctrl_features[SCHEMA_COMM].default_ctrl); + return 0; +} + +static int resctrl_min_cbm_bits_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_resource *r = of->kn->parent->priv; + + seq_printf(seq, "%u\n", r->cache.min_cbm_bits); + return 0; +} + +static int resctrl_shareable_bits_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_resource *r = of->kn->parent->priv; + + seq_printf(seq, "%x\n", r->cache.shareable_bits); + return 0; +} + +static int resctrl_features_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + enum resctrl_ctrl_type type; + struct resctrl_resource *r = of->kn->parent->priv; + struct raw_resctrl_resource *rr = r->res; + + for_each_extend_ctrl_type(type) { + if (!rr->ctrl_features[type].enabled) + continue; + /* + * we define the range of ctrl features with integer, + * here give maximum upper bound to user space. + */ + switch (rr->ctrl_features[type].base) { + case 10: + seq_printf(seq, "%s@%u\n", rr->ctrl_features[type].name, + rr->ctrl_features[type].max_wd - 1); + break; + case 16: + seq_printf(seq, "%s@%x\n", rr->ctrl_features[type].name, + rr->ctrl_features[type].max_wd - 1); + break; + default: + break; + } + } + return 0; +} + +static int resctrl_min_bandwidth_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_resource *r = of->kn->parent->priv; + + seq_printf(seq, "%u\n", r->mbw.min_bw); + return 0; +} + +static int resctrl_bandwidth_gran_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_resource *r = of->kn->parent->priv; + + seq_printf(seq, "%u\n", r->mbw.bw_gran); + return 0; +} + +static int resctrl_num_rmids_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + u32 flag, times; + + hw_alloc_times_validate(times, flag); + seq_printf(seq, "%u\n", get_nr_rmids() / times); + return 0; +} + +static int resctrl_num_monitors_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_resource *r = of->kn->parent->priv; + struct raw_resctrl_resource *rr = r->res; + u32 flag, times; + + hw_alloc_times_validate(times, flag); + seq_printf(seq, "%u\n", rr->num_mon / times); + return 0; +} + + +static ssize_t resctrl_group_tasks_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + struct rdtgroup *rdtgrp; + int ret = 0; + pid_t pid; + + if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0) + return -EINVAL; + rdtgrp = resctrl_group_kn_lock_live(of->kn); + rdt_last_cmd_clear(); + + if (rdtgrp) + ret = resctrl_group_move_task(pid, rdtgrp, of); + else + ret = -ENOENT; + + resctrl_group_kn_unlock(of->kn); + + return ret ?: nbytes; +} + +static void show_resctrl_tasks(struct rdtgroup *r, struct seq_file *s) +{ + struct task_struct *p, *t; + + rcu_read_lock(); + for_each_process_thread(p, t) { + if ((r->type == RDTMON_GROUP && + t->rmid == resctrl_navie_rmid(r->mon.rmid)) || + (r->type == RDTCTRL_GROUP && + t->closid == resctrl_navie_closid(r->closid))) + seq_printf(s, "%d\n", t->pid); + } + rcu_read_unlock(); +} + +static int resctrl_group_tasks_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + struct rdtgroup *rdtgrp; + int ret = 0; + + rdtgrp = resctrl_group_kn_lock_live(of->kn); + if (rdtgrp) + show_resctrl_tasks(rdtgrp, s); + else + ret = -ENOENT; + resctrl_group_kn_unlock(of->kn); + + return ret; +} + +static int resctrl_group_rmid_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + int ret = 0; + struct rdtgroup *rdtgrp; + u32 flag, times; + + hw_alloc_times_validate(times, flag); + + rdtgrp = resctrl_group_kn_lock_live(of->kn); + if (rdtgrp) { + if (flag) + seq_printf(s, "%u-%u\n", rdtgrp->mon.rmid, + rdtgrp->mon.rmid + 1); + else + seq_printf(s, "%u\n", rdtgrp->mon.rmid); + } else + ret = -ENOENT; + resctrl_group_kn_unlock(of->kn); + + return ret; +} + +static ssize_t resctrl_group_rmid_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + struct rdtgroup *rdtgrp; + int ret = 0; + int partid; + int pmg; + int rmid; + int old_rmid; + int old_reqpartid; + struct task_struct *p, *t; + + if (kstrtoint(strstrip(buf), 0, &rmid) || rmid < 0) + return -EINVAL; + + rdtgrp = resctrl_group_kn_lock_live(of->kn); + if (!rdtgrp) { + ret = -ENOENT; + goto unlock; + } + + rdt_last_cmd_clear(); + + if (rmid == 0 || rdtgrp->mon.rmid == 0) { + ret = -EINVAL; + rdt_last_cmd_puts("default rmid 0 is always kept\n"); + goto unlock; + } + + ret = rmid_to_partid_pmg(rmid, &partid, &pmg); + if (ret < 0) { + ret = -EINVAL; + rdt_last_cmd_puts("invalid rmid\n"); + goto unlock; + } + + if (rmid == rdtgrp->mon.rmid) + goto unlock; + + if (rdtgrp->type != RDTCTRL_GROUP || + !list_empty(&rdtgrp->mon.crdtgrp_list)) { + ret = -EOPNOTSUPP; + rdt_last_cmd_puts("unsupported operation\n"); + goto unlock; + } + + ret = __rmid_alloc(partid, pmg); + if (ret < 0) { + rdt_last_cmd_puts("set rmid failed\n"); + goto unlock; + } + + old_rmid = rdtgrp->mon.rmid; + old_reqpartid = rdtgrp->closid.reqpartid; + + /* + * we use intpartid as group control, use reqpartid for config + * synchronization and monitor, only update the reqpartid + */ + rdtgrp->closid.reqpartid = partid; + rdtgrp->mon.rmid = rmid; + + /* update rmid for mondata */ + ret = resctrl_mkdir_mondata_all_subdir(rdtgrp->mon.mon_data_kn, rdtgrp); + if (ret) { + rdt_last_cmd_puts("update rmid for mondata failed\n"); + goto rollback; + } + + /* resync groups configuration */ + rdtgrp->resync = 1; + ret = resctrl_update_groups_config(rdtgrp); + if (ret) { + rdt_last_cmd_puts("update groups config failed\n"); + goto rollback; + } + + read_lock(&tasklist_lock); + for_each_process_thread(p, t) { + if (t->closid == rdtgrp->closid.intpartid) { + ret = __resctrl_group_move_task(t, rdtgrp); + if (ret) { + read_unlock(&tasklist_lock); + goto rollback; + } + } + } + read_unlock(&tasklist_lock); + + update_closid_rmid(&rdtgrp->cpu_mask, rdtgrp); + rmid_free(old_rmid); + +unlock: + resctrl_group_kn_unlock(of->kn); + if (ret) + return ret; + + return nbytes; + +rollback: + rdtgrp->mon.rmid = old_rmid; + rdtgrp->closid.reqpartid = old_reqpartid; + + /* the old rmid is valid, so mkdir mondata here won't fail */ + resctrl_mkdir_mondata_all_subdir(rdtgrp->mon.mon_data_kn, rdtgrp); + + rdtgrp->resync = 1; + WARN_ON_ONCE(resctrl_update_groups_config(rdtgrp)); + + read_lock(&tasklist_lock); + for_each_process_thread(p, t) { + if (t->closid == rdtgrp->closid.intpartid) + WARN_ON_ONCE(__resctrl_group_move_task(t, rdtgrp)); + } + read_unlock(&tasklist_lock); + + rmid_free(rmid); + resctrl_group_kn_unlock(of->kn); + return ret; +} + +/* rdtgroup information files for one cache resource. */ +static struct rftype res_specific_files[] = { + { + .name = "last_cmd_status", + .mode = 0444, + .kf_ops = &resctrl_group_kf_single_ops, + .seq_show = resctrl_last_cmd_status_show, + .fflags = RF_TOP_INFO, + }, + { + .name = "num_closids", + .mode = 0444, + .kf_ops = &resctrl_group_kf_single_ops, + .seq_show = resctrl_num_closids_show, + .fflags = RF_CTRL_INFO, + }, + { + .name = "cbm_mask", + .mode = 0444, + .kf_ops = &resctrl_group_kf_single_ops, + .seq_show = resctrl_cbm_mask_show, + .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, + }, + { + .name = "min_cbm_bits", + .mode = 0444, + .kf_ops = &resctrl_group_kf_single_ops, + .seq_show = resctrl_min_cbm_bits_show, + .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, + }, + { + .name = "shareable_bits", + .mode = 0444, + .kf_ops = &resctrl_group_kf_single_ops, + .seq_show = resctrl_shareable_bits_show, + .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, + }, + { + .name = "features", + .mode = 0444, + .kf_ops = &resctrl_group_kf_single_ops, + .seq_show = resctrl_features_show, + .fflags = RF_CTRL_INFO, + }, + { + .name = "min_bandwidth", + .mode = 0444, + .kf_ops = &resctrl_group_kf_single_ops, + .seq_show = resctrl_min_bandwidth_show, + .fflags = RF_CTRL_INFO | RFTYPE_RES_MB, + }, + { + .name = "bandwidth_gran", + .mode = 0444, + .kf_ops = &resctrl_group_kf_single_ops, + .seq_show = resctrl_bandwidth_gran_show, + .fflags = RF_CTRL_INFO | RFTYPE_RES_MB, + }, + { + .name = "num_rmids", + .mode = 0444, + .kf_ops = &resctrl_group_kf_single_ops, + .seq_show = resctrl_num_rmids_show, + .fflags = RF_MON_INFO, + }, + { + .name = "num_monitors", + .mode = 0444, + .kf_ops = &resctrl_group_kf_single_ops, + .seq_show = resctrl_num_monitors_show, + .fflags = RF_MON_INFO, + }, + { + .name = "cpus", + .mode = 0644, + .kf_ops = &resctrl_group_kf_single_ops, + .write = resctrl_group_cpus_write, + .seq_show = resctrl_group_cpus_show, + .fflags = RFTYPE_BASE, + }, + { + .name = "cpus_list", + .mode = 0644, + .kf_ops = &resctrl_group_kf_single_ops, + .write = resctrl_group_cpus_write, + .seq_show = resctrl_group_cpus_show, + .flags = RFTYPE_FLAGS_CPUS_LIST, + .fflags = RFTYPE_BASE, + }, + { + .name = "tasks", + .mode = 0644, + .kf_ops = &resctrl_group_kf_single_ops, + .write = resctrl_group_tasks_write, + .seq_show = resctrl_group_tasks_show, + .fflags = RFTYPE_BASE, + }, + { + .name = "rmid", + .mode = 0644, + .kf_ops = &resctrl_group_kf_single_ops, + .write = resctrl_group_rmid_write, + .seq_show = resctrl_group_rmid_show, + .fflags = RFTYPE_BASE, + }, + { + .name = "schemata", + .mode = 0644, + .kf_ops = &resctrl_group_kf_single_ops, + .write = resctrl_group_schemata_write, + .seq_show = resctrl_group_schemata_show, + .fflags = RF_CTRL_BASE, + } +}; + +struct rdt_domain *mpam_find_domain(struct resctrl_resource *r, int id, + struct list_head **pos) +{ + struct rdt_domain *d; + struct list_head *l; + + if (id < 0) + return ERR_PTR(id); + + list_for_each(l, &r->domains) { + d = list_entry(l, struct rdt_domain, list); + /* When id is found, return its domain. */ + if (id == d->id) + return d; + /* Stop searching when finding id's position in sorted list. */ + if (id < d->id) + break; + } + + if (pos) + *pos = l; + + return NULL; +} + +enum mpam_enable_type __read_mostly mpam_enabled; +static int __init mpam_setup(char *str) +{ + if (!strcmp(str, "=acpi")) + mpam_enabled = MPAM_ENABLE_ACPI; + else if (!strcmp(str, "=of")) + mpam_enabled = MPAM_ENABLE_OF; + + return 1; +} +__setup("mpam", mpam_setup); + +int mpam_resctrl_init(void) +{ + mpam_init_padding(); + + register_resctrl_specific_files(res_specific_files, + ARRAY_SIZE(res_specific_files)); + + seq_buf_init(&last_cmd_status, last_cmd_status_buf, + sizeof(last_cmd_status_buf)); + + return resctrl_group_init(); +} + +/* + * __intel_rdt_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR + * + * Following considerations are made so that this has minimal impact + * on scheduler hot path: + * - This will stay as no-op unless we are running on an Intel SKU + * which supports resource control or monitoring and we enable by + * mounting the resctrl file system. + * - Caches the per cpu CLOSid/RMID values and does the MSR write only + * when a task with a different CLOSid/RMID is scheduled in. + * - We allocate RMIDs/CLOSids globally in order to keep this as + * simple as possible. + * Must be called with preemption disabled. + */ +void __mpam_sched_in(void) +{ + struct intel_pqr_state *state = this_cpu_ptr(&pqr_state); + u64 partid_d, partid_i; + u64 rmid = state->default_rmid; + u64 closid = state->default_closid; + u64 reqpartid = 0; + u64 pmg = 0; + + /* + * If this task has a closid/rmid assigned, use it. + * Else use the closid/rmid assigned to this cpu. + */ + if (static_branch_likely(&resctrl_alloc_enable_key)) { + if (current->closid) + closid = current->closid; + } + + if (static_branch_likely(&resctrl_mon_enable_key)) { + if (current->rmid) + rmid = current->rmid; + } + + if (closid != state->cur_closid || rmid != state->cur_rmid) { + u64 reg; + + resctrl_navie_rmid_partid_pmg(rmid, (int *)&reqpartid, (int *)&pmg); + + if (resctrl_cdp_enabled) { + resctrl_cdp_mpamid_map_val(reqpartid, CDP_DATA, partid_d); + resctrl_cdp_mpamid_map_val(reqpartid, CDP_CODE, partid_i); + + /* set in EL0 */ + reg = mpam_read_sysreg_s(SYS_MPAM0_EL1, "SYS_MPAM0_EL1"); + reg = PARTID_D_SET(reg, partid_d); + reg = PARTID_I_SET(reg, partid_i); + reg = PMG_SET(reg, pmg); + mpam_write_sysreg_s(reg, SYS_MPAM0_EL1, "SYS_MPAM0_EL1"); + + /* set in EL1 */ + reg = mpam_read_sysreg_s(SYS_MPAM1_EL1, "SYS_MPAM1_EL1"); + reg = PARTID_D_SET(reg, partid_d); + reg = PARTID_I_SET(reg, partid_i); + reg = PMG_SET(reg, pmg); + mpam_write_sysreg_s(reg, SYS_MPAM1_EL1, "SYS_MPAM1_EL1"); + } else { + /* set in EL0 */ + reg = mpam_read_sysreg_s(SYS_MPAM0_EL1, "SYS_MPAM0_EL1"); + reg = PARTID_SET(reg, reqpartid); + reg = PMG_SET(reg, pmg); + mpam_write_sysreg_s(reg, SYS_MPAM0_EL1, "SYS_MPAM0_EL1"); + + /* set in EL1 */ + reg = mpam_read_sysreg_s(SYS_MPAM1_EL1, "SYS_MPAM1_EL1"); + reg = PARTID_SET(reg, reqpartid); + reg = PMG_SET(reg, pmg); + mpam_write_sysreg_s(reg, SYS_MPAM1_EL1, "SYS_MPAM1_EL1"); + } + + state->cur_rmid = rmid; + state->cur_closid = closid; + } +} + +static void +mpam_update_from_resctrl_cfg(struct mpam_resctrl_res *res, + u32 resctrl_cfg, enum rdt_event_id evt, + struct mpam_config *mpam_cfg) +{ + u64 range; + + switch (evt) { + case QOS_MBA_PBM_EVENT_ID: + /* .. the number of bits we can set */ + range = res->class->mbw_pbm_bits; + mpam_cfg->mbw_pbm = + (resctrl_cfg * range) / MAX_MBA_BW; + mpam_set_feature(mpam_feat_mbw_part, &mpam_cfg->valid); + break; + case QOS_MBA_MAX_EVENT_ID: + range = MBW_MAX_BWA_FRACT(res->class->bwa_wd); + mpam_cfg->mbw_max = (resctrl_cfg * range) / (MAX_MBA_BW - 1); + mpam_cfg->mbw_max = + (mpam_cfg->mbw_max > range) ? range : mpam_cfg->mbw_max; + mpam_set_feature(mpam_feat_mbw_max, &mpam_cfg->valid); + break; + case QOS_MBA_MIN_EVENT_ID: + range = MBW_MAX_BWA_FRACT(res->class->bwa_wd); + mpam_cfg->mbw_min = (resctrl_cfg * range) / (MAX_MBA_BW - 1); + mpam_cfg->mbw_min = + (mpam_cfg->mbw_min > range) ? range : mpam_cfg->mbw_min; + mpam_set_feature(mpam_feat_mbw_min, &mpam_cfg->valid); + break; + case QOS_MBA_HDL_EVENT_ID: + mpam_cfg->hdl = resctrl_cfg; + mpam_set_feature(mpam_feat_part_hdl, &mpam_cfg->valid); + break; + case QOS_MBA_INTPRI_EVENT_ID: + mpam_cfg->intpri = resctrl_cfg; + mpam_set_feature(mpam_feat_intpri_part, &mpam_cfg->valid); + break; + case QOS_CAT_CPBM_EVENT_ID: + mpam_cfg->cpbm = resctrl_cfg; + mpam_set_feature(mpam_feat_cpor_part, &mpam_cfg->valid); + break; + case QOS_CAT_CMAX_EVENT_ID: + mpam_cfg->cmax = resctrl_cfg; + mpam_set_feature(mpam_feat_ccap_part, &mpam_cfg->valid); + break; + case QOS_CAT_INTPRI_EVENT_ID: + mpam_cfg->intpri = resctrl_cfg; + mpam_set_feature(mpam_feat_intpri_part, &mpam_cfg->valid); + break; + default: + break; + } +} + +/* + * copy all ctrl type at once looks more efficient, as it + * only needs refresh devices' state once time through + * mpam_component_config, this feature will be checked + * again when appling configuration. + */ +static void +mpam_resctrl_update_component_cfg(struct resctrl_resource *r, + struct rdt_domain *d, struct sd_closid *closid) +{ + struct mpam_resctrl_dom *dom; + struct mpam_resctrl_res *res; + struct mpam_config *slave_mpam_cfg; + struct raw_resctrl_resource *rr = r->res; + enum resctrl_ctrl_type type; + u32 intpartid = closid->intpartid; + u32 reqpartid = closid->reqpartid; + u32 resctrl_cfg; + + lockdep_assert_held(&resctrl_group_mutex); + + /* Out of range */ + if (intpartid >= mpam_sysprops_num_partid() || + reqpartid >= mpam_sysprops_num_partid()) + return; + + res = container_of(r, struct mpam_resctrl_res, resctrl_res); + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + + /* + * now reqpartid is used for duplicating master's configuration, + * mpam_cfg[intpartid] needn't duplicate this setting, + * it is because only reqpartid stands for each rdtgroup's + * mpam_cfg index id. + */ + slave_mpam_cfg = &dom->comp->cfg[reqpartid]; + if (WARN_ON_ONCE(!slave_mpam_cfg)) + return; + slave_mpam_cfg->valid = 0; + + for_each_ctrl_type(type) { + if (!rr->ctrl_features[type].enabled) + continue; + + resctrl_cfg = d->ctrl_val[type][intpartid]; + mpam_update_from_resctrl_cfg(res, resctrl_cfg, + rr->ctrl_features[type].evt, slave_mpam_cfg); + } +} + +static void mpam_reset_cfg(struct mpam_resctrl_res *res, + struct mpam_resctrl_dom *dom, struct rdt_domain *d) + +{ + int i; + struct resctrl_resource *r = &res->resctrl_res; + struct raw_resctrl_resource *rr = r->res; + enum resctrl_ctrl_type type; + + for (i = 0; i != mpam_sysprops_num_partid(); i++) { + for_each_ctrl_type(type) { + mpam_update_from_resctrl_cfg(res, + rr->ctrl_features[type].default_ctrl, + rr->ctrl_features[type].evt, &dom->comp->cfg[i]); + d->ctrl_val[type][i] = rr->ctrl_features[type].default_ctrl; + } + } +} + +void resctrl_resource_reset(void) +{ + struct mpam_resctrl_res *res; + struct mpam_resctrl_dom *dom; + struct rdt_domain *d; + + for_each_supported_resctrl_exports(res) { + if (!res->resctrl_res.alloc_capable) + continue; + + list_for_each_entry(d, &res->resctrl_res.domains, list) { + dom = container_of(d, struct mpam_resctrl_dom, + resctrl_dom); + mpam_reset_cfg(res, dom, d); + } + } + + mpam_reset_devices(); + + /* + * reset CDP configuration used in recreating schema list nodes. + */ + resctrl_cdp_enabled = false; +} diff --git a/arch/arm64/kernel/mpam/mpam_resource.h b/arch/arm64/kernel/mpam/mpam_resource.h new file mode 100644 index 0000000000000000000000000000000000000000..a9e8334e879e80cbf0d70c27facb9122bb034a51 --- /dev/null +++ b/arch/arm64/kernel/mpam/mpam_resource.h @@ -0,0 +1,228 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_ARM64_MPAM_RESOURCE_H +#define _ASM_ARM64_MPAM_RESOURCE_H + +#include + +#define MPAMF_IDR 0x0000 +#define MPAMF_SIDR 0x0008 +#define MPAMF_MSMON_IDR 0x0080 +#define MPAMF_IMPL_IDR 0x0028 +#define MPAMF_CPOR_IDR 0x0030 +#define MPAMF_CCAP_IDR 0x0038 +#define MPAMF_MBW_IDR 0x0040 +#define MPAMF_PRI_IDR 0x0048 +#define MPAMF_CSUMON_IDR 0x0088 +#define MPAMF_MBWUMON_IDR 0x0090 +#define MPAMF_PARTID_NRW_IDR 0x0050 +#define MPAMF_IIDR 0x0018 +#define MPAMF_AIDR 0x0020 +#define MPAMCFG_PART_SEL 0x0100 +#define MPAMCFG_CPBM 0x1000 +#define MPAMCFG_CMAX 0x0108 +#define MPAMCFG_MBW_MIN 0x0200 +#define MPAMCFG_MBW_MAX 0x0208 +#define MPAMCFG_MBW_WINWD 0x0220 +#define MPAMCFG_MBW_PBM 0x2000 +#define MPAMCFG_PRI 0x0400 +#define MPAMCFG_MBW_PROP 0x0500 +#define MPAMCFG_INTPARTID 0x0600 +#define MSMON_CFG_MON_SEL 0x0800 +#define MSMON_CFG_CSU_FLT 0x0810 +#define MSMON_CFG_CSU_CTL 0x0818 +#define MSMON_CFG_MBWU_FLT 0x0820 +#define MSMON_CFG_MBWU_CTL 0x0828 +#define MSMON_CSU 0x0840 +#define MSMON_CSU_CAPTURE 0x0848 +#define MSMON_MBWU 0x0860 +#define MSMON_MBWU_CAPTURE 0x0868 +#define MSMON_CAPT_EVNT 0x0808 +#define MPAMF_ESR 0x00F8 +#define MPAMF_ECR 0x00F0 + +#define HAS_CCAP_PART BIT(24) +#define HAS_CPOR_PART BIT(25) +#define HAS_MBW_PART BIT(26) +#define HAS_PRI_PART BIT(27) +#define HAS_IMPL_IDR BIT(29) +#define HAS_MSMON BIT(30) +#define HAS_PARTID_NRW BIT(31) + +/* MPAMF_IDR */ +#define MPAMF_IDR_PMG_MAX_MASK ((BIT(8) - 1) << 16) +#define MPAMF_IDR_PMG_MAX_SHIFT 16 +#define MPAMF_IDR_PARTID_MAX_MASK (BIT(16) - 1) +#define MPAMF_IDR_PMG_MAX_GET(v) ((v & MPAMF_IDR_PMG_MAX_MASK) >> 16) +#define MPAMF_IDR_PARTID_MAX_GET(v) (v & MPAMF_IDR_PARTID_MAX_MASK) + +#define MPAMF_IDR_HAS_CCAP_PART(v) ((v) & HAS_CCAP_PART) +#define MPAMF_IDR_HAS_CPOR_PART(v) ((v) & HAS_CPOR_PART) +#define MPAMF_IDR_HAS_MBW_PART(v) ((v) & HAS_MBW_PART) +#define MPAMF_IDR_HAS_MSMON(v) ((v) & HAS_MSMON) +#define MPAMF_IDR_PARTID_MASK GENMASK(15, 0) +#define MPAMF_IDR_PMG_MASK GENMASK(23, 16) +#define MPAMF_IDR_PMG_SHIFT 16 +#define MPAMF_IDR_HAS_PARTID_NRW(v) ((v) & HAS_PARTID_NRW) +#define NUM_MON_MASK (BIT(16) - 1) +#define MPAMF_IDR_NUM_MON(v) ((v) & NUM_MON_MASK) + +#define CPBM_WD_MASK 0xFFFF +#define CPBM_MASK 0x7FFF + +#define MBW_MAX_HARDLIM BIT(31) +#define MBW_PROP_HARDLIM BIT(31) +#define MBW_MAX_MASK GENMASK(15, 0) +#define MBW_MAX_BWA_FRACT(w) GENMASK(w - 1, 0) +#define MBW_MAX_SET(v, w) (v << (16 - w)) +/* MPAMCFG_MBW_PROP */ +#define MBW_PROP_SET_HDL(r) (r | MBW_PROP_HARDLIM) +/* MPAMCFG_MBW_MAX */ +#define MBW_MAX_SET_HDL(r) (r | MBW_MAX_HARDLIM) +#define MBW_MAX_GET_HDL(r) ((r & MBW_MAX_HARDLIM) >> 31) +#define MBW_MAX_GET(v, w) (((v) & MBW_MAX_MASK) >> (16 - w)) + +#define MSMON_MATCH_PMG BIT(17) +#define MSMON_MATCH_PARTID BIT(16) +#define MSMON_CFG_CTL_EN BIT(31) +#define MSMON_CFG_FLT_SET(r, p) ((r) << 16|(p)) +#define MBWU_SUBTYPE_DEFAULT (3 << 20) +#define MSMON_CFG_MBWU_CTL_SET(m) (BIT(31)|MBWU_SUBTYPE_DEFAULT|(m)) +#define MSMON_CFG_CSU_CTL_SET(m) (BIT(31)|(m)) +#define MSMON_CFG_CSU_TYPE 0x43 +#define MSMON_CFG_MBWU_TYPE 0x42 + +/* + * Set MPAMCFG_INTPARTID internal bit + */ +#define MPAMCFG_INTPARTID_INTERNAL BIT(16) +#define INTPARTID_INTPARTID_MASK (BIT(15) - 1) +#define MPAMCFG_INTPARTID_INTPARTID_GET(r) (r & INTPARTID_INTPARTID_MASK) +/* + * Set MPAMCFG_PART_SEL internal bit + */ +#define MPAMCFG_PART_SEL_INTERNAL BIT(16) + +/* MPAMF_ESR - MPAM Error Status Register */ +#define MPAMF_ESR_PARTID_OR_MON GENMASK(15, 0) +#define MPAMF_ESR_PMG GENMASK(23, 16) +#define MPAMF_ESR_ERRCODE GENMASK(27, 24) +#define MPAMF_ESR_ERRCODE_SHIFT 24 +#define MPAMF_ESR_OVRWR BIT(31) +#define MPAMF_ESR_ERRCODE_MASK ((BIT(4) - 1) << 24) + +/* MPAMF_ECR - MPAM Error Control Register */ +#define MPAMF_ECR_INTEN BIT(0) + +/* + * Size of the memory mapped registers: 4K of feature page then 2 x 4K + * bitmap registers + */ +#define SZ_MPAM_DEVICE (3 * SZ_4K) + +/* + * MSMON_CSU - Memory system performance monitor cache storage usage monitor + * register + * MSMON_CSU_CAPTURE - Memory system performance monitor cache storage usage + * capture register + * MSMON_MBWU - Memory system performance monitor memory bandwidth usage + * monitor register + * MSMON_MBWU_CAPTURE - Memory system performance monitor memory bandwidth usage + * capture register + */ +#define MSMON___VALUE GENMASK(30, 0) +#define MSMON___NRDY BIT(31) + +/* + * MSMON_CAPT_EVNT - Memory system performance monitoring capture event + * generation register + */ +#define MSMON_CAPT_EVNT_NOW BIT(0) +/* + * MPAMCFG_MBW_MAX SET - temp Hard code + */ +#define MPAMCFG_PRI_DSPRI_SHIFT 16 +#define MPAMCFG_INTPRI_GET(r) (r & GENMASK(15, 0)) +#define MPAMCFG_DSPRI_GET(r) ((r & GENMASK(16, 31)) >> 16) +/* Always same if both supported */ +#define MPAMCFG_PRI_GET(r) (MPAMCFG_DSPRI_GET(r) | MPAMCFG_INTPRI_GET(r)) + +/* MPAMF_PRI_IDR - MPAM features priority partitioning ID register */ +#define MPAMF_PRI_IDR_HAS_INTPRI BIT(0) +#define MPAMF_PRI_IDR_INTPRI_0_IS_LOW BIT(1) +#define MPAMF_PRI_IDR_INTPRI_WD_SHIFT 4 +#define MPAMF_PRI_IDR_INTPRI_WD GENMASK(9, 4) +#define MPAMF_PRI_IDR_HAS_DSPRI BIT(16) +#define MPAMF_PRI_IDR_DSPRI_0_IS_LOW BIT(17) +#define MPAMF_PRI_IDR_DSPRI_WD_SHIFT 20 +#define MPAMF_PRI_IDR_DSPRI_WD GENMASK(25, 20) + +/* MPAMF_CSUMON_IDR - MPAM cache storage usage monitor ID register */ +#define MPAMF_CSUMON_IDR_NUM_MON GENMASK(15, 0) +#define MPAMF_CSUMON_IDR_HAS_CAPTURE BIT(31) + +/* MPAMF_MBWUMON_IDR - MPAM memory bandwidth usage monitor ID register */ +#define MPAMF_MBWUMON_IDR_NUM_MON GENMASK(15, 0) +#define MPAMF_MBWUMON_IDR_HAS_CAPTURE BIT(31) + +/* MPAMF_CPOR_IDR - MPAM features cache portion partitioning ID register */ +#define MPAMF_CPOR_IDR_CPBM_WD GENMASK(15, 0) + +/* MPAMF_CCAP_IDR - MPAM features cache capacity partitioning ID register */ +#define MPAMF_CCAP_IDR_CMAX_WD GENMASK(5, 0) + +/* MPAMF_MBW_IDR - MPAM features memory bandwidth partitioning ID register */ +#define MPAMF_MBW_IDR_BWA_WD GENMASK(5, 0) +#define MPAMF_MBW_IDR_HAS_MIN BIT(10) +#define MPAMF_MBW_IDR_HAS_MAX BIT(11) +#define MPAMF_MBW_IDR_HAS_PBM BIT(12) + +#define MPAMF_MBW_IDR_HAS_PROP BIT(13) +#define MPAMF_MBW_IDR_WINDWR BIT(14) +#define MPAMF_MBW_IDR_BWPBM_WD GENMASK(28, 16) +#define MPAMF_MBW_IDR_BWPBM_WD_SHIFT 16 + +/* MPAMF_PARTID_NRW_IDR - MPAM features partid narrow ID register */ +#define MPAMF_PARTID_NRW_IDR_MASK (BIT(16) - 1) + +#define MSMON_CFG_CTL_TYPE GENMASK(7, 0) +#define MSMON_CFG_CTL_MATCH_PARTID BIT(16) +#define MSMON_CFG_CTL_MATCH_PMG BIT(17) +#define MSMON_CFG_CTL_SUBTYPE GENMASK(23, 20) +#define MSMON_CFG_CTL_SUBTYPE_SHIFT 20 +#define MSMON_CFG_CTL_OFLOW_FRZ BIT(24) +#define MSMON_CFG_CTL_OFLOW_INTR BIT(25) +#define MSMON_CFG_CTL_OFLOW_STATUS BIT(26) +#define MSMON_CFG_CTL_CAPT_RESET BIT(27) +#define MSMON_CFG_CTL_CAPT_EVNT GENMASK(30, 28) +#define MSMON_CFG_CTL_CAPT_EVNT_SHIFT 28 +#define MSMON_CFG_CTL_EN BIT(31) + +#define MPAMF_IDR_HAS_PRI_PART(v) (v & BIT(27)) + +/* MPAMF_MSMON_IDR - MPAM performance monitoring ID register */ +#define MPAMF_MSMON_IDR_MSMON_CSU BIT(16) +#define MPAMF_MSMON_IDR_MSMON_MBWU BIT(17) +#define MPAMF_MSMON_IDR_HAS_LOCAL_CAPT_EVNT BIT(31) + +/* + * MSMON_CFG_MBWU_FLT - Memory system performance monitor configure memory + * bandwidth usage monitor filter register + */ +#define MSMON_CFG_MBWU_FLT_PARTID GENMASK(15, 0) +#define MSMON_CFG_MBWU_FLT_PMG_SHIFT 16 +#define MSMON_CFG_MBWU_FLT_PMG GENMASK(23, 16) +#define MSMON_CFG_MBWU_TYPE 0x42 + +/* + * MSMON_CFG_CSU_FLT - Memory system performance monitor configure cache storage + * usage monitor filter register + */ +#define MSMON_CFG_CSU_FLT_PARTID GENMASK(15, 0) +#define MSMON_CFG_CSU_FLT_PMG GENMASK(23, 16) +#define MSMON_CFG_CSU_FLT_PMG_SHIFT 16 +#define MSMON_CFG_CSU_TYPE 0x43 + +/* hard code for mbw_max max-percentage's cresponding masks */ +#define MBA_MAX_WD 63u + +#endif /* _ASM_ARM64_MPAM_RESOURCE_H */ diff --git a/arch/arm64/kernel/mpam/mpam_setup.c b/arch/arm64/kernel/mpam/mpam_setup.c new file mode 100644 index 0000000000000000000000000000000000000000..a174ba62ba4e4389e60c1d9511449f0a1beb90b8 --- /dev/null +++ b/arch/arm64/kernel/mpam/mpam_setup.c @@ -0,0 +1,622 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Common code for ARM v8 MPAM + * + * Copyright (C) 2020-2021 Huawei Technologies Co., Ltd + * + * Author: Wang Shaobo + * + * Code was partially borrowed from http://www.linux-arm.org/ + * git?p=linux-jm.git;a=shortlog;h=refs/heads/mpam/snapshot/may. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * More information about MPAM be found in the Arm Architecture Reference + * Manual. + * + * https://static.docs.arm.com/ddi0598/a/DDI0598_MPAM_supp_armv8a.pdf + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include + +#include "mpam_device.h" +#include "mpam_internal.h" + +/* + * The classes we've picked to map to resctrl resources. + * Class pointer may be NULL. + */ +struct mpam_resctrl_res mpam_resctrl_exports[RDT_NUM_RESOURCES]; +struct mpam_resctrl_res mpam_resctrl_events[RESCTRL_NUM_EVENT_IDS]; + +/* Like resctrl_get_domain_from_cpu(), but for offline CPUs */ +static struct mpam_resctrl_dom * +mpam_get_domain_from_cpu(int cpu, struct mpam_resctrl_res *res) +{ + struct rdt_domain *d; + struct mpam_resctrl_dom *dom; + + list_for_each_entry(d, &res->resctrl_res.domains, list) { + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + + if (cpumask_test_cpu(cpu, &dom->comp->fw_affinity)) + return dom; + } + + return NULL; +} + +static int mpam_resctrl_setup_domain(unsigned int cpu, + struct mpam_resctrl_res *res) +{ + struct rdt_domain *d; + struct mpam_resctrl_dom *dom; + struct mpam_class *class = res->class; + struct mpam_component *comp_iter, *comp; + u32 num_partid; + u32 **ctrlval_ptr; + enum resctrl_ctrl_type type, type_free; + struct list_head *tmp; + + num_partid = mpam_sysprops_num_partid(); + + comp = NULL; + list_for_each_entry(comp_iter, &class->components, class_list) { + if (cpumask_test_cpu(cpu, &comp_iter->fw_affinity)) { + comp = comp_iter; + break; + } + } + + /* cpu with unknown exported component? */ + if (WARN_ON_ONCE(!comp)) + return 0; + + dom = kzalloc_node(sizeof(*dom), GFP_KERNEL, cpu_to_node(cpu)); + if (!dom) + return -ENOMEM; + + dom->comp = comp; + INIT_LIST_HEAD(&dom->resctrl_dom.list); + dom->resctrl_dom.id = comp->comp_id; + cpumask_set_cpu(cpu, &dom->resctrl_dom.cpu_mask); + + for_each_ctrl_type(type) { + ctrlval_ptr = &dom->resctrl_dom.ctrl_val[type]; + *ctrlval_ptr = kmalloc_array(num_partid, + sizeof(**ctrlval_ptr), GFP_KERNEL); + if (!*ctrlval_ptr) { + for_each_ctrl_type(type_free) { + if (type_free == type) + break; + ctrlval_ptr = &dom->resctrl_dom.ctrl_val[type_free]; + kfree(*ctrlval_ptr); + } + kfree(dom); + return -ENOMEM; + } + } + + tmp = &res->resctrl_res.domains; + /* insert domains in id ascending order */ + list_for_each_entry(d, &res->resctrl_res.domains, list) { + /* find the last domain with id greater than this domain */ + if (dom->resctrl_dom.id > d->id) + tmp = &d->list; + if (dom->resctrl_dom.id < d->id) + break; + } + list_add(&dom->resctrl_dom.list, tmp); + + res->resctrl_res.dom_num++; + + return 0; +} + +int mpam_resctrl_cpu_online(unsigned int cpu) +{ + int ret; + struct mpam_resctrl_dom *dom; + struct mpam_resctrl_res *res; + + for_each_supported_resctrl_exports(res) { + dom = mpam_get_domain_from_cpu(cpu, res); + if (dom) { + cpumask_set_cpu(cpu, &dom->resctrl_dom.cpu_mask); + } else { + ret = mpam_resctrl_setup_domain(cpu, res); + if (ret) + return ret; + } + } + + return mpam_resctrl_set_default_cpu(cpu); +} + +static inline struct rdt_domain * +resctrl_get_domain_from_cpu(int cpu, struct resctrl_resource *r) +{ + struct rdt_domain *d; + + list_for_each_entry(d, &r->domains, list) { + /* Find the domain that contains this CPU */ + if (cpumask_test_cpu(cpu, &d->cpu_mask)) + return d; + } + + return NULL; +} + +int mpam_resctrl_cpu_offline(unsigned int cpu) +{ + struct rdt_domain *d; + struct mpam_resctrl_res *res; + struct mpam_resctrl_dom *dom; + u32 **ctrlval_ptr; + enum resctrl_ctrl_type type; + + for_each_supported_resctrl_exports(res) { + d = resctrl_get_domain_from_cpu(cpu, &res->resctrl_res); + + /* cpu with unknown exported component? */ + if (WARN_ON_ONCE(!d)) + continue; + + cpumask_clear_cpu(cpu, &d->cpu_mask); + + if (!cpumask_empty(&d->cpu_mask)) + continue; + + list_del(&d->list); + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + for_each_ctrl_type(type) { + ctrlval_ptr = &dom->resctrl_dom.ctrl_val[type]; + kfree(*ctrlval_ptr); + } + + kfree(dom); + } + + mpam_resctrl_clear_default_cpu(cpu); + + return 0; +} + + +/* Test whether we can export MPAM_CLASS_CACHE:{2,3}? */ +static void mpam_resctrl_pick_caches(void) +{ + struct mpam_class *class; + struct mpam_resctrl_res *res; + + mpam_class_list_lock_held(); + + list_for_each_entry(class, &mpam_classes, classes_list) { + if (class->type != MPAM_CLASS_CACHE) + continue; + + if (class->level != 2 && class->level != 3) + continue; + + if (!mpam_has_feature(mpam_feat_cpor_part, class->features) && + !mpam_has_feature(mpam_feat_msmon_csu, class->features)) + continue; + + if (!mpam_has_feature(mpam_feat_msmon_csu, class->features) && + mpam_sysprops_num_partid() <= 1) + continue; + + if (class->cpbm_wd > RESCTRL_MAX_CBM) + continue; + + if (class->level == 2) { + res = &mpam_resctrl_exports[RDT_RESOURCE_L2]; + res->resctrl_res.name = "L2"; + } else { + res = &mpam_resctrl_exports[RDT_RESOURCE_L3]; + res->resctrl_res.name = "L3"; + } + res->class = class; + } +} + +/* Find what we can can export as MBA */ +static void mpam_resctrl_pick_mba(void) +{ + u8 resctrl_llc; + struct mpam_class *class; + struct mpam_class *candidate = NULL; + + mpam_class_list_lock_held(); + + /* At least two partitions ... */ + if (mpam_sysprops_num_partid() <= 1) + return; + + if (mpam_resctrl_exports[RDT_RESOURCE_L3].class) + resctrl_llc = 3; + else if (mpam_resctrl_exports[RDT_RESOURCE_L2].class) + resctrl_llc = 2; + else + resctrl_llc = 0; + + list_for_each_entry(class, &mpam_classes, classes_list) { + if (class->type == MPAM_CLASS_UNKNOWN) + continue; + + if (class->level < resctrl_llc) + continue; + + /* + * Once we support MBM counters, we should require the MBA + * class to be at the same point in the hierarchy. Practically, + * this means the MBA class must support MBWU. Until then + * having something is better than nothing, but this may cause + * the MBA resource to disappear over a kernel update on a + * system that could support both, but not at the same time. + */ + + /* + * There are two ways we can generate delays for MBA, either + * with the mbw portion bitmap, or the mbw max control. + */ + if (!mpam_has_feature(mpam_feat_mbw_part, class->features) && + !mpam_has_feature(mpam_feat_mbw_max, class->features)) { + continue; + } + + /* pick the class 'closest' to resctrl_llc */ + if (!candidate || (class->level < candidate->level)) + candidate = class; + } + + if (candidate) + mpam_resctrl_exports[RDT_RESOURCE_MC].class = candidate; +} + +static void mpam_resctrl_pick_event_l3_occup(void) +{ + /* + * as the name suggests, resctrl can only use this if your cache is + * called 'l3'. + */ + struct mpam_resctrl_res *res = &mpam_resctrl_exports[RDT_RESOURCE_L3]; + + if (!res->class) + return; + + if (!mpam_has_feature(mpam_feat_msmon_csu, res->class->features)) + return; + + mpam_resctrl_events[QOS_L3_OCCUP_EVENT_ID] = *res; + + rdt_mon_capable = true; + res->resctrl_res.mon_capable = true; + res->resctrl_res.mon_capable = true; +} + +static void mpam_resctrl_pick_event_mbm_total(void) +{ + u64 num_counters; + struct mpam_resctrl_res *res; + + /* We prefer to measure mbm_total on whatever we used as MBA... */ + res = &mpam_resctrl_exports[RDT_RESOURCE_MC]; + if (!res->class) { + /* ... but if there isn't one, the L3 cache works */ + res = &mpam_resctrl_exports[RDT_RESOURCE_L3]; + if (!res->class) + return; + } + + /* + * to measure bandwidth in a resctrl like way, we need to leave a + * counter running all the time. As these are PMU-like, it is really + * unlikely we have enough... To be useful, we'd need at least one per + * closid. + */ + num_counters = mpam_sysprops_num_partid(); + + if (mpam_has_feature(mpam_feat_msmon_mbwu, res->class->features)) { + if (res->class->num_mbwu_mon >= num_counters) { + /* + * We don't support this use of monitors, let the + * world know this platform could make use of them + * if we did! + */ + } + } +} + +static void mpam_resctrl_pick_event_mbm_local(void) +{ + struct mpam_resctrl_res *res; + + res = &mpam_resctrl_exports[RDT_RESOURCE_MC]; + if (!res->class) + return; + + if (mpam_has_feature(mpam_feat_msmon_mbwu, res->class->features)) { + res->resctrl_res.mon_capable = true; + rdt_mon_capable = true; + mpam_resctrl_events[QOS_L3_MBM_LOCAL_EVENT_ID] = *res; + } +} + +static int mpam_resctrl_resource_init(struct mpam_resctrl_res *res) +{ + struct mpam_class *class = res->class; + struct resctrl_resource *r = &res->resctrl_res; + struct raw_resctrl_resource *rr = NULL; + + if (class == mpam_resctrl_exports[RDT_RESOURCE_SMMU].class) { + return 0; + } else if (class == mpam_resctrl_exports[RDT_RESOURCE_MC].class) { + r->rid = RDT_RESOURCE_MC; + r->name = "MB"; + r->fflags = RFTYPE_RES_MC; + r->mbw.delay_linear = true; + rr = mpam_get_raw_resctrl_resource(RDT_RESOURCE_MC); + rr->num_mon = class->num_mbwu_mon; + r->res = rr; + + if (mpam_has_feature(mpam_feat_mbw_part, class->features)) { + /* + * The maximum throttling is the number of bits we can + * unset in the bitmap. We never clear all of them, + * so the minimum is one bit, as a percentage. + */ + r->mbw.min_bw = MAX_MBA_BW / class->mbw_pbm_bits; + rr->ctrl_features[SCHEMA_PBM].max_wd = MAX_MBA_BW + 1; + rr->ctrl_features[SCHEMA_PBM].capable = true; + } + + if (mpam_has_feature(mpam_feat_mbw_max, class->features)) { + /* + * The maximum throttling is the number of fractions we + * can represent with the implemented bits. We never + * set 0. The minimum is the LSB, as a percentage. + */ + r->mbw.min_bw = MAX_MBA_BW / + ((1ULL << class->bwa_wd) - 1); + /* the largest mbw_max is 100 */ + rr->ctrl_features[SCHEMA_MAX].default_ctrl = MAX_MBA_BW; + rr->ctrl_features[SCHEMA_MAX].max_wd = MAX_MBA_BW + 1; + rr->ctrl_features[SCHEMA_MAX].capable = true; + + /* default set max stride MAX as COMMON ctrl feature */ + rr->ctrl_features[SCHEMA_COMM].default_ctrl = + rr->ctrl_features[SCHEMA_MAX].default_ctrl; + rr->ctrl_features[SCHEMA_COMM].max_wd = + rr->ctrl_features[SCHEMA_MAX].max_wd; + rr->ctrl_features[SCHEMA_COMM].capable = + rr->ctrl_features[SCHEMA_MAX].capable; + } + + if (mpam_has_feature(mpam_feat_mbw_min, class->features)) { + rr->ctrl_features[SCHEMA_MIN].max_wd = MAX_MBA_BW + 1; + rr->ctrl_features[SCHEMA_MIN].capable = true; + } + + /* + * Export priority setting, which represents the max level of + * control we can export. this default priority from hardware, + * no clever here, no need to define additional default value. + */ + if (mpam_has_feature(mpam_feat_intpri_part, class->features)) { + rr->ctrl_features[SCHEMA_PRI].max_wd = 1 << class->intpri_wd; + rr->ctrl_features[SCHEMA_PRI].default_ctrl = class->hwdef_intpri; + rr->ctrl_features[SCHEMA_PRI].capable = true; + } + + + /* Just in case we have an excessive number of bits */ + if (!r->mbw.min_bw) + r->mbw.min_bw = 1; + + /* + * james said because its linear with no offset, the granule is the same + * as the smallest value. It is a little fuzzy here because a granularity + * of 1 would appear too fine to make percentage conversions. + */ + r->mbw.bw_gran = GRAN_MBA_BW; + + /* We will only pick a class that can monitor and control */ + r->alloc_capable = true; + r->alloc_enabled = true; + rdt_alloc_capable = true; + r->mon_capable = true; + r->mon_enabled = true; + /* Export memory bandwidth hardlimit, default active hardlimit */ + rr->ctrl_features[SCHEMA_HDL].default_ctrl = 1; + rr->ctrl_features[SCHEMA_HDL].max_wd = 2; + rr->ctrl_features[SCHEMA_HDL].capable = true; + } else if (class == mpam_resctrl_exports[RDT_RESOURCE_L3].class) { + r->rid = RDT_RESOURCE_L3; + rr = mpam_get_raw_resctrl_resource(RDT_RESOURCE_L3); + rr->num_mon = class->num_csu_mon; + r->res = rr; + r->fflags = RFTYPE_RES_CACHE; + r->name = "L3"; + + if (mpam_has_feature(mpam_feat_cpor_part, class->features)) { + r->cache.cbm_len = class->cpbm_wd; + rr->ctrl_features[SCHEMA_PBM].default_ctrl = GENMASK(class->cpbm_wd - 1, 0); + rr->ctrl_features[SCHEMA_PBM].max_wd = + rr->ctrl_features[SCHEMA_PBM].default_ctrl + 1; + rr->ctrl_features[SCHEMA_PBM].capable = true; + /* + * Which bits are shared with other ...things... + * Unknown devices use partid-0 which uses all the bitmap + * fields. Until we configured the SMMU and GIC not to do this + * 'all the bits' is the correct answer here. + */ + r->cache.shareable_bits = rr->ctrl_features[SCHEMA_PBM].default_ctrl; + r->cache.min_cbm_bits = 1; + + /* default set CPBM as COMMON ctrl feature */ + rr->ctrl_features[SCHEMA_COMM].default_ctrl = + rr->ctrl_features[SCHEMA_PBM].default_ctrl; + rr->ctrl_features[SCHEMA_COMM].max_wd = + rr->ctrl_features[SCHEMA_PBM].max_wd; + rr->ctrl_features[SCHEMA_COMM].capable = + rr->ctrl_features[SCHEMA_PBM].capable; + } + + if (mpam_has_feature(mpam_feat_intpri_part, class->features)) { + /* + * Export internal priority setting, which represents the + * max level of control we can export to resctrl. this default + * priority is from hardware, no clever here. + */ + rr->ctrl_features[SCHEMA_PRI].max_wd = 1 << class->intpri_wd; + rr->ctrl_features[SCHEMA_PRI].default_ctrl = class->hwdef_intpri; + rr->ctrl_features[SCHEMA_PRI].capable = true; + } + + if (mpam_has_feature(mpam_feat_ccap_part, class->features)) { + rr->ctrl_features[SCHEMA_MAX].max_wd = mpam_sysprops_llc_size() + 1; + rr->ctrl_features[SCHEMA_MAX].capable = true; + } + + /* + * Only this resource is allocable can it be picked from + * mpam_resctrl_pick_caches(). So directly set following + * fields to true. + */ + r->alloc_capable = true; + r->alloc_enabled = true; + rdt_alloc_capable = true; + /* + * While this is a CPU-interface feature of MPAM, we only tell + * resctrl about it for caches, as that seems to be how x86 + * works, and thus what resctrl expects. + */ + r->cdp_capable = true; + r->mon_capable = true; + r->mon_enabled = true; + + } else if (class == mpam_resctrl_exports[RDT_RESOURCE_L2].class) { + r->rid = RDT_RESOURCE_L2; + rr = mpam_get_raw_resctrl_resource(RDT_RESOURCE_L2); + rr->num_mon = class->num_csu_mon; + r->res = rr; + r->fflags = RFTYPE_RES_CACHE; + r->name = "L2"; + + if (mpam_has_feature(mpam_feat_cpor_part, class->features)) { + r->cache.cbm_len = class->cpbm_wd; + rr->ctrl_features[SCHEMA_PBM].default_ctrl = + GENMASK(class->cpbm_wd - 1, 0); + rr->ctrl_features[SCHEMA_PBM].max_wd = + rr->ctrl_features[SCHEMA_PBM].default_ctrl + 1; + rr->ctrl_features[SCHEMA_PBM].capable = true; + /* + * Which bits are shared with other ...things... + * Unknown devices use partid-0 which uses all the bitmap + * fields. Until we configured the SMMU and GIC not to do this + * 'all the bits' is the correct answer here. + */ + r->cache.shareable_bits = rr->ctrl_features[SCHEMA_COMM].default_ctrl; + /* default set max stride MAX as COMMON ctrl feature */ + rr->ctrl_features[SCHEMA_COMM].default_ctrl = + rr->ctrl_features[SCHEMA_PBM].default_ctrl; + rr->ctrl_features[SCHEMA_COMM].max_wd = + rr->ctrl_features[SCHEMA_PBM].max_wd; + rr->ctrl_features[SCHEMA_COMM].capable = + rr->ctrl_features[SCHEMA_PBM].capable; + } + + if (mpam_has_feature(mpam_feat_ccap_part, class->features)) { + rr->ctrl_features[SCHEMA_MAX].max_wd = ~0; + rr->ctrl_features[SCHEMA_MAX].capable = true; + } + + if (mpam_has_feature(mpam_feat_intpri_part, class->features)) { + /* + * Export internal priority setting, which represents the + * max level of control we can export to resctrl. this default + * priority is from hardware, no clever here. + */ + rr->ctrl_features[SCHEMA_PRI].max_wd = 1 << class->intpri_wd; + rr->ctrl_features[SCHEMA_PRI].default_ctrl = class->hwdef_intpri; + rr->ctrl_features[SCHEMA_PRI].capable = true; + } + /* + * Only this resource is allocable can it be picked from + * mpam_resctrl_pick_caches(). So directly set following + * fields to true. + */ + r->alloc_capable = true; + r->alloc_enabled = true; + rdt_alloc_capable = true; + + /* + * While this is a CPU-interface feature of MPAM, we only tell + * resctrl about it for caches, as that seems to be how x86 + * works, and thus what resctrl expects. + */ + r->cdp_capable = true; + r->mon_capable = false; + } + + if (rr && class) { + rr->num_partid = class->num_partid; + rr->num_intpartid = class->num_intpartid; + rr->num_pmg = class->num_pmg; + } + + return 0; +} + +/* Called with the mpam classes lock held */ +int mpam_resctrl_setup(void) +{ + int rc; + struct mpam_resctrl_res *res; + enum resctrl_resource_level level = 0; + + for_each_resctrl_exports(res) { + INIT_LIST_HEAD(&res->resctrl_res.domains); + res->resctrl_res.rid = level; + level++; + } + + mpam_resctrl_pick_caches(); + mpam_resctrl_pick_mba(); + + mpam_resctrl_pick_event_l3_occup(); + mpam_resctrl_pick_event_mbm_total(); + mpam_resctrl_pick_event_mbm_local(); + + for_each_supported_resctrl_exports(res) { + rc = mpam_resctrl_resource_init(res); + if (rc) + return rc; + } + + if (!rdt_alloc_capable && !rdt_mon_capable) + return -EOPNOTSUPP; + + return 0; +} + +struct resctrl_resource * +mpam_resctrl_get_resource(enum resctrl_resource_level level) +{ + if (level >= RDT_NUM_RESOURCES || + !mpam_resctrl_exports[level].class) + return NULL; + + return &mpam_resctrl_exports[level].resctrl_res; +} diff --git a/arch/arm64/kernel/paravirt-spinlocks.c b/arch/arm64/kernel/paravirt-spinlocks.c new file mode 100644 index 0000000000000000000000000000000000000000..fd733eb02d425fb36b98db252a1df972761e9a49 --- /dev/null +++ b/arch/arm64/kernel/paravirt-spinlocks.c @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright(c) 2019 Huawei Technologies Co., Ltd + * Author: Zengruan Ye + */ + +#include +#include + +__visible bool __native_vcpu_is_preempted(int cpu) +{ + return false; +} diff --git a/arch/arm64/kernel/paravirt.c b/arch/arm64/kernel/paravirt.c index 53f371ed4568c8a08cf07fc27fafccc4dc44770e..c14cc6f63cb441eaa65b5e98fd6d7c4fff8a651a 100644 --- a/arch/arm64/kernel/paravirt.c +++ b/arch/arm64/kernel/paravirt.c @@ -13,13 +13,131 @@ * Author: Stefano Stabellini */ +#define pr_fmt(fmt) "arm-pv: " fmt + +#include +#include #include +#include #include +#include +#include #include #include +#include struct static_key paravirt_steal_enabled; struct static_key paravirt_steal_rq_enabled; struct pv_time_ops pv_time_ops; +struct paravirt_patch_template pv_ops = { + .sched.vcpu_is_preempted = __native_vcpu_is_preempted, +}; + EXPORT_SYMBOL_GPL(pv_time_ops); +EXPORT_SYMBOL_GPL(pv_ops); + +DEFINE_PER_CPU(struct pvsched_vcpu_state, pvsched_vcpu_region) __aligned(64); +EXPORT_PER_CPU_SYMBOL(pvsched_vcpu_region); + +static bool kvm_vcpu_is_preempted(int cpu) +{ + struct pvsched_vcpu_state *reg; + u32 preempted; + + reg = &per_cpu(pvsched_vcpu_region, cpu); + if (!reg) { + pr_warn_once("PV sched enabled but not configured for cpu %d\n", + cpu); + return false; + } + + preempted = le32_to_cpu(READ_ONCE(reg->preempted)); + + return !!preempted; +} + +static int pvsched_vcpu_state_dying_cpu(unsigned int cpu) +{ + struct pvsched_vcpu_state *reg; + struct arm_smccc_res res; + + reg = this_cpu_ptr(&pvsched_vcpu_region); + if (!reg) + return -EFAULT; + + arm_smccc_1_1_invoke(ARM_SMCCC_HV_PV_SCHED_IPA_RELEASE, &res); + memset(reg, 0, sizeof(*reg)); + + return 0; +} + +static int init_pvsched_vcpu_state(unsigned int cpu) +{ + struct pvsched_vcpu_state *reg; + struct arm_smccc_res res; + + reg = this_cpu_ptr(&pvsched_vcpu_region); + if (!reg) + return -EFAULT; + + /* Pass the memory address to host via hypercall */ + arm_smccc_1_1_invoke(ARM_SMCCC_HV_PV_SCHED_IPA_INIT, + virt_to_phys(reg), &res); + + return 0; +} + +static int kvm_arm_init_pvsched(void) +{ + int ret; + + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "hypervisor/arm/pvsched:starting", + init_pvsched_vcpu_state, + pvsched_vcpu_state_dying_cpu); + + if (ret < 0) { + pr_warn("PV sched init failed\n"); + return ret; + } + + return 0; +} + +static bool has_kvm_pvsched(void) +{ + struct arm_smccc_res res; + + /* To detect the presence of PV sched support we require SMCCC 1.1+ */ + if (psci_ops.smccc_version < SMCCC_VERSION_1_1) + return false; + + arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, + ARM_SMCCC_HV_PV_SCHED_FEATURES, &res); + + return (res.a0 == SMCCC_RET_SUCCESS); +} + +int __init pv_sched_init(void) +{ + int ret; + + if (is_hyp_mode_available()) + return 0; + + if (!has_kvm_pvsched()) { + pr_warn("PV sched is not available\n"); + return 0; + } + + ret = kvm_arm_init_pvsched(); + if (ret) + return ret; + + pv_ops.sched.vcpu_is_preempted = kvm_vcpu_is_preempted; + pr_info("using PV sched preempted\n"); + + return 0; +} +early_initcall(pv_sched_init); diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c index 0e2ea1c785427849b68435eb1c188e8b7d65eb64..7a91e4736293c549c986206d736bbbd6240364cd 100644 --- a/arch/arm64/kernel/pci.c +++ b/arch/arm64/kernel/pci.c @@ -169,6 +169,7 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) struct acpi_pci_generic_root_info *ri; struct pci_bus *bus, *child; struct acpi_pci_root_ops *root_ops; + struct pci_host_bridge *host; ri = kzalloc_node(sizeof(*ri), GFP_KERNEL, node); if (!ri) @@ -194,8 +195,16 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) if (!bus) return NULL; - pci_bus_size_bridges(bus); - pci_bus_assign_resources(bus); + /* If we must preserve the resource configuration, claim now */ + host = pci_find_host_bridge(bus); + if (host->preserve_config) + pci_bus_claim_resources(bus); + + /* + * Assign whatever was left unassigned. If we didn't claim above, + * this will reassign everything. + */ + pci_assign_unassigned_root_bus_resources(bus); list_for_each_entry(child, &bus->children, node) pcie_bus_configure_settings(child); diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c index bcafd7dcfe8b186c07c1f7138ca4dc62035aa61e..2dbe201f768732200657c1925f0963c93c4791a0 100644 --- a/arch/arm64/kernel/perf_callchain.c +++ b/arch/arm64/kernel/perf_callchain.c @@ -37,7 +37,7 @@ user_backtrace(struct frame_tail __user *tail, unsigned long err; /* Also check accessibility of one struct frame_tail beyond */ - if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) + if (!access_ok(tail, sizeof(buftail))) return NULL; pagefault_disable(); @@ -59,30 +59,30 @@ user_backtrace(struct frame_tail __user *tail, return buftail.fp; } -#ifdef CONFIG_COMPAT +#ifdef CONFIG_AARCH32_EL0 /* * The registers we're interested in are at the end of the variable * length saved register structure. The fp points at the end of this * structure so the address of this struct is: - * (struct compat_frame_tail *)(xxx->fp)-1 + * (struct a32_frame_tail *)(xxx->fp)-1 * * This code has been adapted from the ARM OProfile support. */ -struct compat_frame_tail { - compat_uptr_t fp; /* a (struct compat_frame_tail *) in compat mode */ +struct a32_frame_tail { + compat_uptr_t fp; /* a (struct a32_frame_tail *) in compat mode */ u32 sp; u32 lr; } __attribute__((packed)); -static struct compat_frame_tail __user * -compat_user_backtrace(struct compat_frame_tail __user *tail, +static struct a32_frame_tail __user * +compat_user_backtrace(struct a32_frame_tail __user *tail, struct perf_callchain_entry_ctx *entry) { - struct compat_frame_tail buftail; + struct a32_frame_tail buftail; unsigned long err; /* Also check accessibility of one struct frame_tail beyond */ - if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) + if (!access_ok(tail, sizeof(buftail))) return NULL; pagefault_disable(); @@ -98,13 +98,13 @@ compat_user_backtrace(struct compat_frame_tail __user *tail, * Frame pointers should strictly progress back up the stack * (towards higher addresses). */ - if (tail + 1 >= (struct compat_frame_tail __user *) + if (tail + 1 >= (struct a32_frame_tail __user *) compat_ptr(buftail.fp)) return NULL; - return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1; + return (struct a32_frame_tail __user *)compat_ptr(buftail.fp) - 1; } -#endif /* CONFIG_COMPAT */ +#endif /* CONFIG_AARCH32_EL0 */ void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) @@ -116,7 +116,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry, perf_callchain_store(entry, regs->pc); - if (!compat_user_mode(regs)) { + if (!a32_user_mode(regs)) { /* AARCH64 mode */ struct frame_tail __user *tail; @@ -126,11 +126,11 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry, tail && !((unsigned long)tail & 0xf)) tail = user_backtrace(tail, entry); } else { -#ifdef CONFIG_COMPAT +#ifdef CONFIG_AARCH32_EL0 /* AARCH32 compat mode */ - struct compat_frame_tail __user *tail; + struct a32_frame_tail __user *tail; - tail = (struct compat_frame_tail __user *)regs->compat_fp - 1; + tail = (struct a32_frame_tail __user *)regs->compat_fp - 1; while ((entry->nr < entry->max_stack) && tail && !((unsigned long)tail & 0x3)) diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index e213f8e867f65fa63ae84cac48555aaffb1794af..7be7f27b8f458abe81fc1b177e38139bd62234e8 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -26,9 +26,11 @@ #include #include +#include #include #include #include +#include /* * ARMv8 PMUv3 Performance Events handling code. @@ -83,6 +85,47 @@ #define ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL 0x2E #define ARMV8_PMUV3_PERFCTR_L2D_TLB 0x2F #define ARMV8_PMUV3_PERFCTR_L2I_TLB 0x30 +#define ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS 0x31 +#define ARMV8_PMUV3_PERFCTR_LL_CACHE 0x32 +#define ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS 0x33 +#define ARMV8_PMUV3_PERFCTR_DTLB_WALK 0x34 +#define ARMV8_PMUV3_PERFCTR_ITLB_WALK 0x35 +#define ARMV8_PMUV3_PERFCTR_LL_CACHE_RD 0x36 +#define ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD 0x37 +#define ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD 0x38 +#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_LMISS_RD 0x39 +#define ARMV8_PMUV3_PERFCTR_OP_RETIRED 0x3A +#define ARMV8_PMUV3_PERFCTR_OP_SPEC 0x3B +#define ARMV8_PMUV3_PERFCTR_STALL 0x3C +#define ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND 0x3D +#define ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND 0x3E +#define ARMV8_PMUV3_PERFCTR_STALL_SLOT 0x3F + +/* Statistical profiling extension microarchitectural events */ +#define ARMV8_SPE_PERFCTR_SAMPLE_POP 0x4000 +#define ARMV8_SPE_PERFCTR_SAMPLE_FEED 0x4001 +#define ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE 0x4002 +#define ARMV8_SPE_PERFCTR_SAMPLE_COLLISION 0x4003 + +/* AMUv1 architecture events */ +#define ARMV8_AMU_PERFCTR_CNT_CYCLES 0x4004 +#define ARMV8_AMU_PERFCTR_STALL_BACKEND_MEM 0x4005 + +/* long-latency read miss events */ +#define ARMV8_PMUV3_PERFCTR_L1I_CACHE_LMISS 0x4006 +#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_LMISS_RD 0x4009 +#define ARMV8_PMUV3_PERFCTR_L2I_CACHE_LMISS 0x400A +#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_LMISS_RD 0x400B + +/* additional latency from alignment events */ +#define ARMV8_PMUV3_PERFCTR_LDST_ALIGN_LAT 0x4020 +#define ARMV8_PMUV3_PERFCTR_LD_ALIGN_LAT 0x4021 +#define ARMV8_PMUV3_PERFCTR_ST_ALIGN_LAT 0x4022 + +/* Armv8.5 Memory Tagging Extension events */ +#define ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED 0x4024 +#define ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_RD 0x4025 +#define ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_WR 0x4026 /* ARMv8 recommended implementation defined event types */ #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD 0x40 @@ -183,12 +226,10 @@ #define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS 0xEC #define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS 0xED -/* PMUv3 HW events mapping. */ - /* * ARMv8 Architectural defined events, not all of these may - * be supported on any given implementation. Undefined events will - * be disabled at run-time. + * be supported on any given implementation. Unsupported events will + * be disabled at run-time based on the PMCEID registers. */ static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = { PERF_MAP_ALL_UNSUPPORTED, @@ -313,7 +354,7 @@ armv8pmu_events_sysfs_show(struct device *dev, pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); - return sprintf(page, "event=0x%03llx\n", pmu_attr->id); + return sprintf(page, "event=0x%04llx\n", pmu_attr->id); } #define ARMV8_EVENT_ATTR_RESOLVE(m) #m @@ -370,6 +411,38 @@ ARMV8_EVENT_ATTR(l2d_tlb_refill, ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL); ARMV8_EVENT_ATTR(l2i_tlb_refill, ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL); ARMV8_EVENT_ATTR(l2d_tlb, ARMV8_PMUV3_PERFCTR_L2D_TLB); ARMV8_EVENT_ATTR(l2i_tlb, ARMV8_PMUV3_PERFCTR_L2I_TLB); +ARMV8_EVENT_ATTR(remote_access, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS); +ARMV8_EVENT_ATTR(ll_cache, ARMV8_PMUV3_PERFCTR_LL_CACHE); +ARMV8_EVENT_ATTR(ll_cache_miss, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS); +ARMV8_EVENT_ATTR(dtlb_walk, ARMV8_PMUV3_PERFCTR_DTLB_WALK); +ARMV8_EVENT_ATTR(itlb_walk, ARMV8_PMUV3_PERFCTR_ITLB_WALK); +ARMV8_EVENT_ATTR(ll_cache_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_RD); +ARMV8_EVENT_ATTR(ll_cache_miss_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD); +ARMV8_EVENT_ATTR(remote_access_rd, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD); +ARMV8_EVENT_ATTR(l1d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L1D_CACHE_LMISS_RD); +ARMV8_EVENT_ATTR(op_retired, ARMV8_PMUV3_PERFCTR_OP_RETIRED); +ARMV8_EVENT_ATTR(op_spec, ARMV8_PMUV3_PERFCTR_OP_SPEC); +ARMV8_EVENT_ATTR(stall, ARMV8_PMUV3_PERFCTR_STALL); +ARMV8_EVENT_ATTR(stall_slot_backend, ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND); +ARMV8_EVENT_ATTR(stall_slot_frontend, ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND); +ARMV8_EVENT_ATTR(stall_slot, ARMV8_PMUV3_PERFCTR_STALL_SLOT); +ARMV8_EVENT_ATTR(sample_pop, ARMV8_SPE_PERFCTR_SAMPLE_POP); +ARMV8_EVENT_ATTR(sample_feed, ARMV8_SPE_PERFCTR_SAMPLE_FEED); +ARMV8_EVENT_ATTR(sample_filtrate, ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE); +ARMV8_EVENT_ATTR(sample_collision, ARMV8_SPE_PERFCTR_SAMPLE_COLLISION); +ARMV8_EVENT_ATTR(cnt_cycles, ARMV8_AMU_PERFCTR_CNT_CYCLES); +ARMV8_EVENT_ATTR(stall_backend_mem, ARMV8_AMU_PERFCTR_STALL_BACKEND_MEM); +ARMV8_EVENT_ATTR(l1i_cache_lmiss, ARMV8_PMUV3_PERFCTR_L1I_CACHE_LMISS); +ARMV8_EVENT_ATTR(l2d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L2D_CACHE_LMISS_RD); +ARMV8_EVENT_ATTR(l2i_cache_lmiss, ARMV8_PMUV3_PERFCTR_L2I_CACHE_LMISS); +ARMV8_EVENT_ATTR(l3d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L3D_CACHE_LMISS_RD); +ARMV8_EVENT_ATTR(ldst_align_lat, ARMV8_PMUV3_PERFCTR_LDST_ALIGN_LAT); +ARMV8_EVENT_ATTR(ld_align_lat, ARMV8_PMUV3_PERFCTR_LD_ALIGN_LAT); +ARMV8_EVENT_ATTR(st_align_lat, ARMV8_PMUV3_PERFCTR_ST_ALIGN_LAT); +ARMV8_EVENT_ATTR(mem_access_checked, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED); +ARMV8_EVENT_ATTR(mem_access_checked_rd, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_RD); +ARMV8_EVENT_ATTR(mem_access_checked_wr, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_WR); + static struct attribute *armv8_pmuv3_event_attrs[] = { &armv8_event_attr_sw_incr.attr.attr, @@ -420,7 +493,38 @@ static struct attribute *armv8_pmuv3_event_attrs[] = { &armv8_event_attr_l2i_tlb_refill.attr.attr, &armv8_event_attr_l2d_tlb.attr.attr, &armv8_event_attr_l2i_tlb.attr.attr, - NULL, + &armv8_event_attr_remote_access.attr.attr, + &armv8_event_attr_ll_cache.attr.attr, + &armv8_event_attr_ll_cache_miss.attr.attr, + &armv8_event_attr_dtlb_walk.attr.attr, + &armv8_event_attr_itlb_walk.attr.attr, + &armv8_event_attr_ll_cache_rd.attr.attr, + &armv8_event_attr_ll_cache_miss_rd.attr.attr, + &armv8_event_attr_remote_access_rd.attr.attr, + &armv8_event_attr_l1d_cache_lmiss_rd.attr.attr, + &armv8_event_attr_op_retired.attr.attr, + &armv8_event_attr_op_spec.attr.attr, + &armv8_event_attr_stall.attr.attr, + &armv8_event_attr_stall_slot_backend.attr.attr, + &armv8_event_attr_stall_slot_frontend.attr.attr, + &armv8_event_attr_stall_slot.attr.attr, + &armv8_event_attr_sample_pop.attr.attr, + &armv8_event_attr_sample_feed.attr.attr, + &armv8_event_attr_sample_filtrate.attr.attr, + &armv8_event_attr_sample_collision.attr.attr, + &armv8_event_attr_cnt_cycles.attr.attr, + &armv8_event_attr_stall_backend_mem.attr.attr, + &armv8_event_attr_l1i_cache_lmiss.attr.attr, + &armv8_event_attr_l2d_cache_lmiss_rd.attr.attr, + &armv8_event_attr_l2i_cache_lmiss.attr.attr, + &armv8_event_attr_l3d_cache_lmiss_rd.attr.attr, + &armv8_event_attr_ldst_align_lat.attr.attr, + &armv8_event_attr_ld_align_lat.attr.attr, + &armv8_event_attr_st_align_lat.attr.attr, + &armv8_event_attr_mem_access_checked.attr.attr, + &armv8_event_attr_mem_access_checked_rd.attr.attr, + &armv8_event_attr_mem_access_checked_wr.attr.attr, + NULL, }; static umode_t @@ -434,9 +538,18 @@ armv8pmu_event_attr_is_visible(struct kobject *kobj, pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr); - if (test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap)) + if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS && + test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap)) return attr->mode; + if (pmu_attr->id >= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE) { + u64 id = pmu_attr->id - ARMV8_PMUV3_EXT_COMMON_EVENT_BASE; + + if (id < ARMV8_PMUV3_MAX_COMMON_EVENTS && + test_bit(id, cpu_pmu->pmceid_ext_bitmap)) + return attr->mode; + } + return 0; } @@ -498,6 +611,78 @@ static inline bool armv8pmu_event_is_chained(struct perf_event *event) #define ARMV8_IDX_TO_COUNTER(x) \ (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK) +/* + * This code is really good + */ + +#define PMEVN_CASE(__n, case_macro) \ + case __n: case_macro(__n); break; + +#define PMEVN_SWITCH(__x, case_macro) \ + do { \ + switch (__x) { \ + PMEVN_CASE(0, case_macro); \ + PMEVN_CASE(1, case_macro); \ + PMEVN_CASE(2, case_macro); \ + PMEVN_CASE(3, case_macro); \ + PMEVN_CASE(4, case_macro); \ + PMEVN_CASE(5, case_macro); \ + PMEVN_CASE(6, case_macro); \ + PMEVN_CASE(7, case_macro); \ + PMEVN_CASE(8, case_macro); \ + PMEVN_CASE(9, case_macro); \ + PMEVN_CASE(10, case_macro); \ + PMEVN_CASE(11, case_macro); \ + PMEVN_CASE(12, case_macro); \ + PMEVN_CASE(13, case_macro); \ + PMEVN_CASE(14, case_macro); \ + PMEVN_CASE(15, case_macro); \ + PMEVN_CASE(16, case_macro); \ + PMEVN_CASE(17, case_macro); \ + PMEVN_CASE(18, case_macro); \ + PMEVN_CASE(19, case_macro); \ + PMEVN_CASE(20, case_macro); \ + PMEVN_CASE(21, case_macro); \ + PMEVN_CASE(22, case_macro); \ + PMEVN_CASE(23, case_macro); \ + PMEVN_CASE(24, case_macro); \ + PMEVN_CASE(25, case_macro); \ + PMEVN_CASE(26, case_macro); \ + PMEVN_CASE(27, case_macro); \ + PMEVN_CASE(28, case_macro); \ + PMEVN_CASE(29, case_macro); \ + PMEVN_CASE(30, case_macro); \ + default: WARN(1, "Invalid PMEV* index %#x", __x); \ + } \ + } while (0) + +#define RETURN_READ_PMEVCNTRN(n) \ + return read_sysreg(pmevcntr##n##_el0); +static unsigned long read_pmevcntrn(int n) +{ + PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN); + return 0; +} +#undef RETURN_READ_PMEVCNTRN + +#define WRITE_PMEVCNTRN(n) \ + write_sysreg(val, pmevcntr##n##_el0); +static void write_pmevcntrn(int n, unsigned long val) +{ + PMEVN_SWITCH(n, WRITE_PMEVCNTRN); +} +#undef WRITE_PMEVCNTRN + +#define WRITE_PMEVTYPERN(n) \ + write_sysreg(val, pmevtyper##n##_el0); +static void write_pmevtypern(int n, unsigned long val) +{ + PMEVN_SWITCH(n, WRITE_PMEVTYPERN); +} +#undef WRITE_PMEVTYPERN + +#undef GENERATE_PMEVN_SWITCH + static inline u32 armv8pmu_pmcr_read(void) { return read_sysreg(pmcr_el0); @@ -535,6 +720,11 @@ static inline void armv8pmu_select_counter(int idx) static inline u32 armv8pmu_read_evcntr(int idx) { + u32 counter = ARMV8_IDX_TO_COUNTER(idx); + + if (pmu_nmi_enable) + return read_pmevcntrn(counter); + armv8pmu_select_counter(idx); return read_sysreg(pmxevcntr_el0); } @@ -570,8 +760,14 @@ static inline u64 armv8pmu_read_counter(struct perf_event *event) static inline void armv8pmu_write_evcntr(int idx, u32 value) { - armv8pmu_select_counter(idx); - write_sysreg(value, pmxevcntr_el0); + u32 counter = ARMV8_IDX_TO_COUNTER(idx); + + if (pmu_nmi_enable) { + write_pmevcntrn(counter, value); + } else { + armv8pmu_select_counter(idx); + write_sysreg(value, pmxevcntr_el0); + } } static inline void armv8pmu_write_hw_counter(struct perf_event *event, @@ -612,12 +808,19 @@ static inline void armv8pmu_write_counter(struct perf_event *event, u64 value) static inline void armv8pmu_write_evtype(int idx, u32 val) { - armv8pmu_select_counter(idx); - val &= ARMV8_PMU_EVTYPE_MASK; - write_sysreg(val, pmxevtyper_el0); + u32 counter = ARMV8_IDX_TO_COUNTER(idx); + + if (pmu_nmi_enable) { + val &= ARMV8_PMU_EVTYPE_MASK; + write_pmevtypern(counter, val); + } else { + armv8pmu_select_counter(idx); + val &= ARMV8_PMU_EVTYPE_MASK; + write_sysreg(val, pmxevtyper_el0); + } } -static inline void armv8pmu_write_event_type(struct perf_event *event) +static inline void armv8pmu_write_hw_type(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; @@ -638,6 +841,26 @@ static inline void armv8pmu_write_event_type(struct perf_event *event) } } +static inline void armv8pmu_write_event_type(struct perf_event *event) +{ + if (!pmu_nmi_enable) { + armv8pmu_write_hw_type(event); + } else { + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (!armv8pmu_counter_valid(cpu_pmu, idx)) + pr_err("CPU%u writing wrong event %d\n", + smp_processor_id(), idx); + else if (idx == ARMV8_IDX_CYCLE_COUNTER) + write_sysreg(hwc->config_base & ARMV8_PMU_EVTYPE_MASK, + pmccfiltr_el0); + else + armv8pmu_write_hw_type(event); + } +} + static inline int armv8pmu_enable_counter(int idx) { u32 counter = ARMV8_IDX_TO_COUNTER(idx); @@ -647,12 +870,21 @@ static inline int armv8pmu_enable_counter(int idx) static inline void armv8pmu_enable_event_counter(struct perf_event *event) { + struct perf_event_attr *attr = &event->attr; int idx = event->hw.idx; + u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx)); - armv8pmu_enable_counter(idx); if (armv8pmu_event_is_chained(event)) - armv8pmu_enable_counter(idx - 1); - isb(); + counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1)); + + kvm_set_pmu_events(counter_bits, attr); + + /* We rely on the hypervisor switch code to enable guest counters */ + if (!kvm_pmu_counter_deferred(attr)) { + armv8pmu_enable_counter(idx); + if (armv8pmu_event_is_chained(event)) + armv8pmu_enable_counter(idx - 1); + } } static inline int armv8pmu_disable_counter(int idx) @@ -665,11 +897,21 @@ static inline int armv8pmu_disable_counter(int idx) static inline void armv8pmu_disable_event_counter(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; + struct perf_event_attr *attr = &event->attr; int idx = hwc->idx; + u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx)); if (armv8pmu_event_is_chained(event)) - armv8pmu_disable_counter(idx - 1); - armv8pmu_disable_counter(idx); + counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1)); + + kvm_clr_pmu_events(counter_bits); + + /* We rely on the hypervisor switch code to disable guest counters */ + if (!kvm_pmu_counter_deferred(attr)) { + if (armv8pmu_event_is_chained(event)) + armv8pmu_disable_counter(idx - 1); + armv8pmu_disable_counter(idx); + } } static inline int armv8pmu_enable_intens(int idx) @@ -717,7 +959,7 @@ static inline u32 armv8pmu_getreset_flags(void) static void armv8pmu_enable_event(struct perf_event *event) { - unsigned long flags; + unsigned long flags = 0; struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); @@ -725,7 +967,8 @@ static void armv8pmu_enable_event(struct perf_event *event) * Enable counter and interrupt, and set the counter to count * the event that we're interested in. */ - raw_spin_lock_irqsave(&events->pmu_lock, flags); + if (!pmu_nmi_enable) + raw_spin_lock_irqsave(&events->pmu_lock, flags); /* * Disable counter @@ -747,19 +990,21 @@ static void armv8pmu_enable_event(struct perf_event *event) */ armv8pmu_enable_event_counter(event); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); + if (!pmu_nmi_enable) + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void armv8pmu_disable_event(struct perf_event *event) { - unsigned long flags; + unsigned long flags = 0; struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); /* * Disable counter and interrupt */ - raw_spin_lock_irqsave(&events->pmu_lock, flags); + if (!pmu_nmi_enable) + raw_spin_lock_irqsave(&events->pmu_lock, flags); /* * Disable counter @@ -771,29 +1016,44 @@ static void armv8pmu_disable_event(struct perf_event *event) */ armv8pmu_disable_event_irq(event); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); + if (!pmu_nmi_enable) + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void armv8pmu_start(struct arm_pmu *cpu_pmu) { - unsigned long flags; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); + if (pmu_nmi_enable) { + preempt_disable(); + /* Enable all counters */ + armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E); + preempt_enable(); + } else { + unsigned long flags; + struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); - raw_spin_lock_irqsave(&events->pmu_lock, flags); - /* Enable all counters */ - armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); + raw_spin_lock_irqsave(&events->pmu_lock, flags); + /* Enable all counters */ + armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E); + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); + } } static void armv8pmu_stop(struct arm_pmu *cpu_pmu) { - unsigned long flags; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); + if (pmu_nmi_enable) { + preempt_disable(); + /* Disable all counters */ + armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E); + preempt_enable(); + } else { + unsigned long flags; + struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); - raw_spin_lock_irqsave(&events->pmu_lock, flags); - /* Disable all counters */ - armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); + raw_spin_lock_irqsave(&events->pmu_lock, flags); + /* Disable all counters */ + armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E); + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); + } } static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu) @@ -858,7 +1118,8 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu) * platforms that can have the PMU interrupts raised as an NMI, this * will not work. */ - irq_work_run(); + if (!pmu_nmi_enable || !in_nmi()) + irq_work_run(); return IRQ_HANDLED; } @@ -946,14 +1207,23 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event, * with other architectures (x86 and Power). */ if (is_kernel_in_hyp_mode()) { - if (!attr->exclude_kernel) + if (!attr->exclude_kernel && !attr->exclude_host) config_base |= ARMV8_PMU_INCLUDE_EL2; - } else { - if (attr->exclude_kernel) + if (attr->exclude_guest) config_base |= ARMV8_PMU_EXCLUDE_EL1; - if (!attr->exclude_hv) + if (attr->exclude_host) + config_base |= ARMV8_PMU_EXCLUDE_EL0; + } else { + if (!attr->exclude_hv && !attr->exclude_host) config_base |= ARMV8_PMU_INCLUDE_EL2; } + + /* + * Filter out !VHE kernels and guest kernels + */ + if (attr->exclude_kernel) + config_base |= ARMV8_PMU_EXCLUDE_EL1; + if (attr->exclude_user) config_base |= ARMV8_PMU_EXCLUDE_EL0; @@ -983,6 +1253,9 @@ static void armv8pmu_reset(void *info) armv8pmu_disable_intens(idx); } + /* Clear the counters we flip at guest entry/exit */ + kvm_clr_pmu_events(U32_MAX); + /* * Initialize & Reset PMNC. Request overflow interrupt for * 64 bit cycle counter but cheat in armv8pmu_write_counter(). @@ -1061,6 +1334,7 @@ static void __armv8pmu_probe_pmu(void *info) struct armv8pmu_probe_info *probe = info; struct arm_pmu *cpu_pmu = probe->pmu; u64 dfr0; + u64 pmceid_raw[2]; u32 pmceid[2]; int pmuver; @@ -1079,11 +1353,17 @@ static void __armv8pmu_probe_pmu(void *info) /* Add the CPU cycles counter */ cpu_pmu->num_events += 1; - pmceid[0] = read_sysreg(pmceid0_el0); - pmceid[1] = read_sysreg(pmceid1_el0); + pmceid[0] = pmceid_raw[0] = read_sysreg(pmceid0_el0); + pmceid[1] = pmceid_raw[1] = read_sysreg(pmceid1_el0); bitmap_from_arr32(cpu_pmu->pmceid_bitmap, pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS); + + pmceid[0] = pmceid_raw[0] >> 32; + pmceid[1] = pmceid_raw[1] >> 32; + + bitmap_from_arr32(cpu_pmu->pmceid_ext_bitmap, + pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS); } static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu) @@ -1274,6 +1554,7 @@ static struct platform_driver armv8_pmu_driver = { .driver = { .name = ARMV8_PMU_PDEV_NAME, .of_match_table = armv8_pmu_of_device_ids, + .suppress_bind_attrs = true, }, .probe = armv8_pmu_device_probe, }; @@ -1287,11 +1568,22 @@ static int __init armv8_pmu_driver_init(void) } device_initcall(armv8_pmu_driver_init) +static u64 cyc_to_ns(u64 cyc, u16 time_shift, u32 time_mult) +{ + u64 quot, rem; + + quot = cyc >> time_shift; + rem = cyc & (((u64)1 << time_shift) - 1); + return quot * time_mult + + ((rem * time_mult) >> time_shift); +} + void arch_perf_update_userpage(struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now) { u32 freq; u32 shift; + u64 offset; /* * Internal timekeeping for enabled/running/stopped times @@ -1314,4 +1606,16 @@ void arch_perf_update_userpage(struct perf_event *event, } userpg->time_shift = (u16)shift; userpg->time_offset = -now; + + offset = local_clock() - cyc_to_ns(arch_timer_read_counter(), + userpg->time_shift, userpg->time_mult); + + /* + * cap_user_time_zero doesn't make sense when we're using a different + * time base for the records. + */ + if (!event->attr.use_clockid) { + userpg->cap_user_time_zero = 1; + userpg->time_zero = offset; + } } diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c index 0bbac612146eabecdc1c78a4aa12f602bf8f9ccb..f64bcee5b5a62634b979fbf98abb4845df44b623 100644 --- a/arch/arm64/kernel/perf_regs.c +++ b/arch/arm64/kernel/perf_regs.c @@ -19,7 +19,7 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) * - PC has been set in the pt_regs struct in kernel_entry, * - Handle SP and LR here. */ - if (compat_user_mode(regs)) { + if (a32_user_mode(regs)) { if ((u32)idx == PERF_REG_ARM64_SP) return regs->compat_sp; if ((u32)idx == PERF_REG_ARM64_LR) @@ -47,7 +47,7 @@ int perf_reg_validate(u64 mask) u64 perf_reg_abi(struct task_struct *task) { - if (is_compat_thread(task_thread_info(task))) + if (is_a32_compat_thread(task_thread_info(task))) return PERF_SAMPLE_REGS_ABI_32; else return PERF_SAMPLE_REGS_ABI_64; diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index e78c3ef04d95de696dfc87ce03ebdc90c258b4d1..fe9d20791d5bac752cd83d18cc7bf9d852f7ad5b 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -23,11 +23,14 @@ #include #include #include +#include #include +#include #include #include #include #include +#include #include #include #include @@ -42,10 +45,21 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); static void __kprobes post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *); +static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode) +{ + void *addrs[1]; + u32 insns[1]; + + addrs[0] = addr; + insns[0] = opcode; + + return aarch64_insn_patch_text(addrs, insns, 1); +} + static void __kprobes arch_prepare_ss_slot(struct kprobe *p) { /* prepare insn slot */ - p->ainsn.api.insn[0] = cpu_to_le32(p->opcode); + patch_text(p->ainsn.api.insn, p->opcode); flush_icache_range((uintptr_t) (p->ainsn.api.insn), (uintptr_t) (p->ainsn.api.insn) + @@ -118,15 +132,15 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) return 0; } -static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode) +void *alloc_insn_page(void) { - void *addrs[1]; - u32 insns[1]; + void *page; - addrs[0] = (void *)addr; - insns[0] = (u32)opcode; + page = vmalloc_exec(PAGE_SIZE); + if (page) + set_memory_ro((unsigned long)page, 1); - return aarch64_insn_patch_text(addrs, insns, 1); + return page; } /* arm kprobe: install breakpoint in text */ @@ -166,33 +180,6 @@ static void __kprobes set_current_kprobe(struct kprobe *p) __this_cpu_write(current_kprobe, p); } -/* - * When PSTATE.D is set (masked), then software step exceptions can not be - * generated. - * SPSR's D bit shows the value of PSTATE.D immediately before the - * exception was taken. PSTATE.D is set while entering into any exception - * mode, however software clears it for any normal (none-debug-exception) - * mode in the exception entry. Therefore, when we are entering into kprobe - * breakpoint handler from any normal mode then SPSR.D bit is already - * cleared, however it is set when we are entering from any debug exception - * mode. - * Since we always need to generate single step exception after a kprobe - * breakpoint exception therefore we need to clear it unconditionally, when - * we become sure that the current breakpoint exception is for kprobe. - */ -static void __kprobes -spsr_set_debug_flag(struct pt_regs *regs, int mask) -{ - unsigned long spsr = regs->pstate; - - if (mask) - spsr |= PSR_D_BIT; - else - spsr &= ~PSR_D_BIT; - - regs->pstate = spsr; -} - /* * Interrupts need to be disabled before single-step mode is set, and not * reenabled until after single-step mode ends. @@ -204,17 +191,17 @@ spsr_set_debug_flag(struct pt_regs *regs, int mask) static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb, struct pt_regs *regs) { - kcb->saved_irqflag = regs->pstate; + kcb->saved_irqflag = regs->pstate & DAIF_MASK; regs->pstate |= PSR_I_BIT; + /* Unmask PSTATE.D for enabling software step exceptions. */ + regs->pstate &= ~PSR_D_BIT; } static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb, struct pt_regs *regs) { - if (kcb->saved_irqflag & PSR_I_BIT) - regs->pstate |= PSR_I_BIT; - else - regs->pstate &= ~PSR_I_BIT; + regs->pstate &= ~DAIF_MASK; + regs->pstate |= kcb->saved_irqflag; } static void __kprobes @@ -251,8 +238,6 @@ static void __kprobes setup_singlestep(struct kprobe *p, set_ss_context(kcb, slot); /* mark pending ss */ - spsr_set_debug_flag(regs, 0); - /* IRQs and single stepping do not mix well. */ kprobes_save_local_irqflag(kcb, regs); kernel_enable_single_step(regs); @@ -437,6 +422,9 @@ kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr) struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); int retval; + if (user_mode(regs)) + return DBG_HOOK_ERROR; + /* return error if this is not our step */ retval = kprobe_ss_hit(kcb, instruction_pointer(regs)); @@ -453,6 +441,9 @@ kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr) int __kprobes kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr) { + if (user_mode(regs)) + return DBG_HOOK_ERROR; + kprobe_handler(regs); return DBG_HOOK_HANDLED; } @@ -465,13 +456,13 @@ bool arch_within_kprobe_blacklist(unsigned long addr) addr < (unsigned long)__entry_text_end) || (addr >= (unsigned long)__idmap_text_start && addr < (unsigned long)__idmap_text_end) || + (addr >= (unsigned long)__hyp_text_start && + addr < (unsigned long)__hyp_text_end) || !!search_exception_tables(addr)) return true; if (!is_kernel_in_hyp_mode()) { - if ((addr >= (unsigned long)__hyp_text_start && - addr < (unsigned long)__hyp_text_end) || - (addr >= (unsigned long)__hyp_idmap_text_start && + if ((addr >= (unsigned long)__hyp_idmap_text_start && addr < (unsigned long)__hyp_idmap_text_end)) return true; } diff --git a/arch/arm64/kernel/probes/uprobes.c b/arch/arm64/kernel/probes/uprobes.c index 636ca0119c0efa7cb1254568d04edfdddf791db9..b9aabab718db94724141a497803ce4effb427f76 100644 --- a/arch/arm64/kernel/probes/uprobes.c +++ b/arch/arm64/kernel/probes/uprobes.c @@ -45,7 +45,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE)) return -EINVAL; - insn = *(probe_opcode_t *)(&auprobe->insn[0]); + insn = le32_to_cpu(auprobe->insn); switch (arm_probe_decode_insn(insn, &auprobe->api)) { case INSN_REJECTED: @@ -111,7 +111,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) if (!auprobe->simulate) return false; - insn = *(probe_opcode_t *)(&auprobe->insn[0]); + insn = le32_to_cpu(auprobe->insn); addr = instruction_pointer(regs); if (auprobe->api.handler) diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 7f1628effe6d7b866e60712b338fedb6e885d6ed..e5be78915632cec26c0edab336eb1966c349f53a 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -51,17 +51,18 @@ #include #include -#include +#include #include #include #include #include #include #include +#include #ifdef CONFIG_STACKPROTECTOR #include -unsigned long __stack_chk_guard __read_mostly; +unsigned long __stack_chk_guard __ro_after_init; EXPORT_SYMBOL(__stack_chk_guard); #endif @@ -73,6 +74,50 @@ EXPORT_SYMBOL_GPL(pm_power_off); void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); +static void __cpu_do_idle(void) +{ + dsb(sy); + wfi(); +} + +static void __cpu_do_idle_irqprio(void) +{ + unsigned long pmr; + unsigned long daif_bits; + + daif_bits = read_sysreg(daif); + write_sysreg(daif_bits | PSR_I_BIT, daif); + + /* + * Unmask PMR before going idle to make sure interrupts can + * be raised. + */ + pmr = gic_read_pmr(); + gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); + + __cpu_do_idle(); + + gic_write_pmr(pmr); + write_sysreg(daif_bits, daif); +} + +/* + * cpu_do_idle() + * + * Idle the processor (wait for interrupt). + * + * If the CPU supports priority masking we must do additional work to + * ensure that interrupts are not masked at the PMR (because the core will + * not wake up if we block the wake up signal in the interrupt controller). + */ +void cpu_do_idle(void) +{ + if (system_uses_irq_prio_masking()) + __cpu_do_idle_irqprio(); + else + __cpu_do_idle(); +} + /* * This is our default idle handler. */ @@ -88,6 +133,10 @@ void arch_cpu_idle(void) trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); } +#if defined(CONFIG_HALTPOLL_CPUIDLE_MODULE) +EXPORT_SYMBOL(arch_cpu_idle); +#endif + #ifdef CONFIG_HOTPLUG_CPU void arch_cpu_idle_dead(void) { @@ -106,6 +155,10 @@ void arch_cpu_idle_dead(void) */ void machine_shutdown(void) { +#ifdef CONFIG_ARM64_CPU_PARK + if (kexec_smp_send_park() == 0) + return; +#endif disable_nonboot_cpus(); } @@ -174,7 +227,7 @@ static void print_pstate(struct pt_regs *regs) { u64 pstate = regs->pstate; - if (compat_user_mode(regs)) { + if (a32_user_mode(regs)) { printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c)\n", pstate, pstate & PSR_AA32_N_BIT ? 'N' : 'n', @@ -208,7 +261,7 @@ void __show_regs(struct pt_regs *regs) int i, top_reg; u64 lr, sp; - if (compat_user_mode(regs)) { + if (a32_user_mode(regs)) { lr = regs->compat_lr; sp = regs->compat_sp; top_reg = 12; @@ -231,6 +284,9 @@ void __show_regs(struct pt_regs *regs) printk("sp : %016llx\n", sp); + if (system_uses_irq_prio_masking()) + printk("pmr_save: %08llx\n", regs->pmr_save); + i = top_reg; while (i >= 0) { @@ -256,7 +312,7 @@ static void tls_thread_flush(void) { write_sysreg(0, tpidr_el0); - if (is_compat_task()) { + if (is_a32_compat_task()) { current->thread.uw.tp_value = 0; /* @@ -285,22 +341,27 @@ void arch_release_task_struct(struct task_struct *tsk) fpsimd_release_task(tsk); } -/* - * src and dst may temporarily have aliased sve_state after task_struct - * is copied. We cannot fix this properly here, because src may have - * live SVE state and dst's thread_info may not exist yet, so tweaking - * either src's or dst's TIF_SVE is not safe. - * - * The unaliasing is done in copy_thread() instead. This works because - * dst is not schedulable or traceable until both of these functions - * have been called. - */ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { if (current->mm) fpsimd_preserve_current_state(); *dst = *src; + /* We rely on the above assignment to initialize dst's thread_flags: */ + BUILD_BUG_ON(!IS_ENABLED(CONFIG_THREAD_INFO_IN_TASK)); + + /* + * Detach src's sve_state (if any) from dst so that it does not + * get erroneously used or freed prematurely. dst's sve_state + * will be allocated on demand later on if dst uses SVE. + * For consistency, also clear TIF_SVE here: this could be done + * later in copy_process(), but to avoid tripping up future + * maintainers it is best not to leave TIF_SVE and sve_state in + * an inconsistent state, even temporarily. + */ + dst->thread.sve_state = NULL; + clear_tsk_thread_flag(dst, TIF_SVE); + return 0; } @@ -313,13 +374,6 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); - /* - * Unalias p->thread.sve_state (if any) from the parent task - * and disable discard SVE state for p: - */ - clear_tsk_thread_flag(p, TIF_SVE); - p->thread.sve_state = NULL; - /* * In case p was allocated the same task_struct pointer as some * other recently-exited task, make sure p is disassociated from @@ -340,7 +394,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, *task_user_tls(p) = read_sysreg(tpidr_el0); if (stack_start) { - if (is_compat_thread(task_thread_info(p))) + if (is_a32_compat_thread(task_thread_info(p))) childregs->compat_sp = stack_start; else childregs->sp = stack_start; @@ -358,6 +412,12 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, if (IS_ENABLED(CONFIG_ARM64_UAO) && cpus_have_const_cap(ARM64_HAS_UAO)) childregs->pstate |= PSR_UAO_BIT; + if (system_uses_irq_prio_masking()) + childregs->pmr_save = GIC_PRIO_IRQON; + + if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) + set_ssbs_bit(childregs); + p->thread.cpu_context.x19 = stack_start; p->thread.cpu_context.x20 = stk_sz; } @@ -378,7 +438,7 @@ static void tls_thread_switch(struct task_struct *next) { tls_preserve_current_state(); - if (is_compat_thread(task_thread_info(next))) + if (is_a32_compat_thread(task_thread_info(next))) write_sysreg(next->thread.uw.tp_value, tpidrro_el0); else if (!arm64_kernel_unmapped_at_el0()) write_sysreg(0, tpidrro_el0); @@ -397,6 +457,39 @@ void uao_thread_switch(struct task_struct *next) } } +/* + * Force SSBS state on context-switch, since it may be lost after migrating + * from a CPU which treats the bit as RES0 in a heterogeneous system. + */ +static void ssbs_thread_switch(struct task_struct *next) +{ + struct pt_regs *regs = task_pt_regs(next); + + /* + * Nothing to do for kernel threads, but 'regs' may be junk + * (e.g. idle task) so check the flags and bail early. + */ + if (unlikely(next->flags & PF_KTHREAD)) + return; + + /* + * If all CPUs implement the SSBS extension, then we just need to + * context-switch the PSTATE field. + */ + if (cpu_have_feature(cpu_feature(SSBS))) + return; + + /* If the mitigation is enabled, then we leave SSBS clear. */ + if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) || + test_tsk_thread_flag(next, TIF_SSBD)) + return; + + if (a32_user_mode(regs)) + set_compat_ssbs_bit(regs); + else if (user_mode(regs)) + set_ssbs_bit(regs); +} + /* * We store our current task in sp_el0, which is clobbered by userspace. Keep a * shadow copy so that we can restore this upon entry from userspace. @@ -425,6 +518,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, contextidr_thread_switch(next); entry_task_switch(next); uao_thread_switch(next); + ssbs_thread_switch(next); /* * Complete any pending TLB or cache maintenance on this CPU in case @@ -437,6 +531,8 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, /* the actual thread switch */ last = cpu_switch_to(prev, next); + mpam_sched_in(); + return last; } diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index e8edbf13302aad06875703c5b680dd3513c4bb91..3ebb2a56e5f7bb812b9ec07660c66331cefeae50 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c @@ -69,7 +69,6 @@ static int cpu_psci_cpu_disable(unsigned int cpu) static void cpu_psci_cpu_die(unsigned int cpu) { - int ret; /* * There are no known implementations of PSCI actually using the * power state field, pass a sensible default for now. @@ -77,14 +76,13 @@ static void cpu_psci_cpu_die(unsigned int cpu) u32 state = PSCI_POWER_STATE_TYPE_POWER_DOWN << PSCI_0_2_POWER_STATE_TYPE_SHIFT; - ret = psci_ops.cpu_off(state); - - pr_crit("unable to power off CPU%u (%d)\n", cpu, ret); + psci_ops.cpu_off(state); } static int cpu_psci_cpu_kill(unsigned int cpu) { - int err, i; + int err; + unsigned long start, end; if (!psci_ops.affinity_info) return 0; @@ -94,16 +92,18 @@ static int cpu_psci_cpu_kill(unsigned int cpu) * while it is dying. So, try again a few times. */ - for (i = 0; i < 10; i++) { + start = jiffies; + end = start + msecs_to_jiffies(100); + do { err = psci_ops.affinity_info(cpu_logical_map(cpu), 0); if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) { - pr_info("CPU%d killed.\n", cpu); + pr_info("CPU%d killed (polled %d ms)\n", cpu, + jiffies_to_msecs(jiffies - start)); return 0; } - msleep(10); - pr_info("Retrying again to check for CPU kill\n"); - } + usleep_range(100, 1000); + } while (time_before(jiffies, end)); pr_warn("CPU%d may not have shut down cleanly (AFFINITY_INFO reports %d)\n", cpu, err); diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 6219486fa25fa4490d4397f6aee15e18fc208559..ea5851e96804293c55cdfb206cf65f52c31f4681 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -41,7 +41,6 @@ #include #include -#include #include #include #include @@ -190,8 +189,8 @@ static void ptrace_hbptriggered(struct perf_event *bp, info.si_code = TRAP_HWBKPT; info.si_addr = (void __user *)(bkpt->trigger); -#ifdef CONFIG_COMPAT - if (is_compat_task()) { +#ifdef CONFIG_AARCH32_EL0 + if (is_a32_compat_task()) { int si_errno = 0; int i; @@ -627,6 +626,13 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset, return 0; } +static int fpr_active(struct task_struct *target, const struct user_regset *regset) +{ + if (!system_supports_fpsimd()) + return -ENODEV; + return regset->n; +} + /* * TODO: update fp accessors for lazy context switching (sync/flush hwstate) */ @@ -649,6 +655,9 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { + if (!system_supports_fpsimd()) + return -EINVAL; + if (target == current) fpsimd_preserve_current_state(); @@ -688,6 +697,9 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset, { int ret; + if (!system_supports_fpsimd()) + return -EINVAL; + ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0); if (ret) return ret; @@ -990,6 +1002,7 @@ static const struct user_regset aarch64_regsets[] = { */ .size = sizeof(u32), .align = sizeof(u32), + .active = fpr_active, .get = fpr_get, .set = fpr_set }, @@ -1047,6 +1060,10 @@ static const struct user_regset_view user_aarch64_view = { }; #ifdef CONFIG_COMPAT +#include +#endif + +#ifdef CONFIG_AARCH32_EL0 enum compat_regset { REGSET_COMPAT_GPR, REGSET_COMPAT_VFP, @@ -1176,6 +1193,9 @@ static int compat_vfp_get(struct task_struct *target, compat_ulong_t fpscr; int ret, vregs_end_pos; + if (!system_supports_fpsimd()) + return -EINVAL; + uregs = &target->thread.uw.fpsimd_state; if (target == current) @@ -1209,6 +1229,9 @@ static int compat_vfp_set(struct task_struct *target, compat_ulong_t fpscr; int ret, vregs_end_pos; + if (!system_supports_fpsimd()) + return -EINVAL; + uregs = &target->thread.uw.fpsimd_state; vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t); @@ -1266,6 +1289,7 @@ static const struct user_regset aarch32_regsets[] = { .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), .size = sizeof(compat_ulong_t), .align = sizeof(compat_ulong_t), + .active = fpr_active, .get = compat_vfp_get, .set = compat_vfp_set }, @@ -1505,7 +1529,7 @@ static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num, } #endif /* CONFIG_HAVE_HW_BREAKPOINT */ -long compat_arch_ptrace(struct task_struct *child, compat_long_t request, +static long compat_a32_ptrace(struct task_struct *child, compat_long_t request, compat_ulong_t caddr, compat_ulong_t cdata) { unsigned long addr = caddr; @@ -1582,20 +1606,35 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, return ret; } -#endif /* CONFIG_COMPAT */ + +#else +#define compat_a32_ptrace(child, request, caddr, cdata) (0) +#endif /* CONFIG_AARCH32_EL0 */ + +#ifdef CONFIG_COMPAT +long compat_arch_ptrace(struct task_struct *child, compat_long_t request, + compat_ulong_t caddr, compat_ulong_t cdata) +{ + if (is_a32_compat_task()) + return compat_a32_ptrace(child, request, caddr, cdata); + + /* ILP32 */ + return compat_ptrace_request(child, request, caddr, cdata); +} +#endif const struct user_regset_view *task_user_regset_view(struct task_struct *task) { -#ifdef CONFIG_COMPAT +#ifdef CONFIG_AARCH32_EL0 /* * Core dumping of 32-bit tasks or compat ptrace requests must use the * user_aarch32_view compatible with arm32. Native ptrace requests on * 32-bit children use an extended user_aarch32_ptrace_view to allow * access to the TLS register. */ - if (is_compat_task()) + if (is_a32_compat_task()) return &user_aarch32_view; - else if (is_compat_thread(task_thread_info(task))) + else if (is_a32_compat_thread(task_thread_info(task))) return &user_aarch32_ptrace_view; #endif return &user_aarch64_view; @@ -1622,7 +1661,7 @@ static void tracehook_report_syscall(struct pt_regs *regs, * A scratch register (ip(r12) on AArch32, x7 on AArch64) is * used to denote syscall entry/exit: */ - regno = (is_compat_task() ? 12 : 7); + regno = (is_a32_compat_task() ? 12 : 7); saved_reg = regs->regs[regno]; regs->regs[regno] = dir; @@ -1666,19 +1705,20 @@ void syscall_trace_exit(struct pt_regs *regs) } /* - * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487C.a - * We also take into account DIT (bit 24), which is not yet documented, and - * treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may be - * allocated an EL0 meaning in future. + * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a. + * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is + * not described in ARM DDI 0487D.a. + * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may + * be allocated an EL0 meaning in future. * Userspace cannot use these until they have an architectural meaning. * Note that this follows the SPSR_ELx format, not the AArch32 PSR format. * We also reserve IL for the kernel; SS is handled dynamically. */ #define SPSR_EL1_AARCH64_RES0_BITS \ - (GENMASK_ULL(63,32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \ - GENMASK_ULL(20, 10) | GENMASK_ULL(5, 5)) + (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \ + GENMASK_ULL(20, 13) | GENMASK_ULL(11, 10) | GENMASK_ULL(5, 5)) #define SPSR_EL1_AARCH32_RES0_BITS \ - (GENMASK_ULL(63,32) | GENMASK_ULL(23, 22) | GENMASK_ULL(20,20)) + (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20)) static int valid_compat_regs(struct user_pt_regs *regs) { @@ -1739,7 +1779,7 @@ int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task) if (!test_tsk_thread_flag(task, TIF_SINGLESTEP)) regs->pstate &= ~DBG_SPSR_SS; - if (is_compat_thread(task_thread_info(task))) + if (is_a32_compat_thread(task_thread_info(task))) return valid_compat_regs(regs); else return valid_native_regs(regs); diff --git a/arch/arm64/kernel/ras.c b/arch/arm64/kernel/ras.c new file mode 100644 index 0000000000000000000000000000000000000000..b57041f9e6e7b45f58453cdf60a4e4b0600b1901 --- /dev/null +++ b/arch/arm64/kernel/ras.c @@ -0,0 +1,181 @@ +/* + * ARM64 SEA error recoery support + * + * Copyright 2017 Huawei Technologies Co., Ltd. + * Author: Xie XiuQi + * Author: Wang Xiongfeng + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation; + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +/* + * Need to save faulting physical address associated with a process + * in the sea ghes handler some place where we can grab it back + * later in sea_notify_process() + */ +#define SEA_INFO_MAX 16 + +struct sea_info { + atomic_t inuse; + struct task_struct *t; + __u64 paddr; +} sea_info[SEA_INFO_MAX]; + +static bool sea_save_info(__u64 addr) +{ + struct sea_info *si; + + for (si = sea_info; si < &sea_info[SEA_INFO_MAX]; si++) { + if (atomic_cmpxchg(&si->inuse, 0, 1) == 0) { + si->t = current; + si->paddr = addr; + return true; + } + } + + pr_err("Too many concurrent recoverable errors\n"); + return false; +} + +static struct sea_info *sea_find_info(void) +{ + struct sea_info *si; + + for (si = sea_info; si < &sea_info[SEA_INFO_MAX]; si++) + if (atomic_read(&si->inuse) && si->t == current) + return si; + return NULL; +} + +static void sea_clear_info(struct sea_info *si) +{ + atomic_set(&si->inuse, 0); +} + +/* + * Called in process context that interrupted by SEA and marked with + * TIF_SEA_NOTIFY, just before returning to erroneous userland. + * This code is allowed to sleep. + * Attempt possible recovery such as calling the high level VM handler to + * process any corrupted pages, and kill/signal current process if required. + * Action required errors are handled here. + */ +void sea_notify_process(void) +{ + unsigned long pfn; + int fail = 0, flags = MF_ACTION_REQUIRED; + struct sea_info *si = sea_find_info(); + + if (!si) + panic("Lost physical address for consumed uncorrectable error"); + +#ifdef CONFIG_UCE_KERNEL_RECOVERY + if (test_thread_flag(TIF_UCE_KERNEL_RECOVERY)) { + flags |= MF_UCE_KERNEL_RECOVERY; + clear_thread_flag(TIF_UCE_KERNEL_RECOVERY); + } +#endif + + clear_thread_flag(TIF_SEA_NOTIFY); + do { + pfn = si->paddr >> PAGE_SHIFT; + + + pr_err("Uncorrected hardware memory error in user-access at %pK\n", + (void *)si->paddr); + /* + * We must call memory_failure() here even if the current process is + * doomed. We still need to mark the page as poisoned and alert any + * other users of the page. + */ + if (memory_failure(pfn, flags) < 0) + fail++; + + sea_clear_info(si); + + si = sea_find_info(); + } while (si); + + if (fail) { + pr_err("Memory error not recovered\n"); + force_sig(SIGBUS, current); + } +} + +void ghes_arm_process_error(struct ghes *ghes, + struct cper_sec_proc_arm *err, int sec_sev) +{ + int i; + bool info_saved = false; + struct cper_arm_err_info *err_info; + + log_arm_hw_error(err, sec_sev); + + if ((ghes->generic->notify.type != ACPI_HEST_NOTIFY_SEA) || + (ghes->estatus->error_severity != CPER_SEV_RECOVERABLE)) + return; + + err_info = (struct cper_arm_err_info *)(err + 1); + for (i = 0; i < err->err_info_num; i++, err_info++) { + if ((err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR) && + (err_info->type == CPER_ARM_CACHE_ERROR)) + info_saved |= sea_save_info(err_info->physical_fault_addr); + } + + if (info_saved) + set_thread_flag(TIF_SEA_NOTIFY); +} + +int ghes_mem_err_callback(struct notifier_block *nb, unsigned long val, void *data) +{ + bool info_saved = false; + struct ghes_mem_err *ghes_mem = (struct ghes_mem_err *)data; + struct cper_sec_mem_err *mem_err = ghes_mem->mem_err; + + if ((ghes_mem->notify_type != ACPI_HEST_NOTIFY_SEA) || + (ghes_mem->severity != CPER_SEV_RECOVERABLE)) + return 0; + + if (mem_err->validation_bits & CPER_MEM_VALID_PA) + info_saved = sea_save_info(mem_err->physical_addr); + + if (info_saved) + set_thread_flag(TIF_SEA_NOTIFY); + + return 0; +} + +static struct notifier_block ghes_mem_err_nb = { + .notifier_call = ghes_mem_err_callback, +}; + +static int arm64_err_recov_init(void) +{ + atomic_notifier_chain_register(&ghes_mem_err_chain, &ghes_mem_err_nb); + return 0; +} + +late_initcall(arm64_err_recov_init); diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c index 933adbc0f654d84e2e7331a93455ee74c747a091..0311fe52c8ffb5be933f0546dd95cfe702cec6db 100644 --- a/arch/arm64/kernel/return_address.c +++ b/arch/arm64/kernel/return_address.c @@ -11,6 +11,7 @@ #include #include +#include #include #include @@ -32,6 +33,7 @@ static int save_return_addr(struct stackframe *frame, void *d) return 0; } } +NOKPROBE_SYMBOL(save_return_addr); void *return_address(unsigned int level) { @@ -55,3 +57,4 @@ void *return_address(unsigned int level) return NULL; } EXPORT_SYMBOL_GPL(return_address); +NOKPROBE_SYMBOL(return_address); diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c index 5ba4465e44f09028c89fb190b7d65927635a9d10..ea94cf8f9dc6d15f58a7c8e298eba6d8bfdecede 100644 --- a/arch/arm64/kernel/sdei.c +++ b/arch/arm64/kernel/sdei.c @@ -94,6 +94,9 @@ static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info) unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr); unsigned long high = low + SDEI_STACK_SIZE; + if (!low) + return false; + if (sp < low || sp >= high) return false; @@ -111,6 +114,9 @@ static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info) unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr); unsigned long high = low + SDEI_STACK_SIZE; + if (!low) + return false; + if (sp < low || sp >= high) return false; diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index b3354ff94e7984641dd5a0c076d63f67db5f62e9..c2ccaf6fa9ed100c7d3862d6402a85d823effe06 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -42,6 +42,9 @@ #include #include #include +#ifdef CONFIG_PIN_MEMORY +#include +#endif #include #include @@ -63,10 +66,24 @@ #include #include #include +#include static int num_standard_resources; static struct resource *standard_resources; +#ifdef CONFIG_ARM64_BOOTPARAM_HOTPLUG_CPU0 +static int arm64_cpu0_hotpluggable = 1; +#else +static int arm64_cpu0_hotpluggable; +static int __init arm64_enable_cpu0_hotplug(char *str) +{ + arm64_cpu0_hotpluggable = 1; + return 1; +} + +__setup("arm64_cpu0_hotplug", arm64_enable_cpu0_hotplug); +#endif + phys_addr_t __fdt_pointer __initdata; /* @@ -208,8 +225,8 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys) static void __init request_standard_resources(void) { struct memblock_region *region; - struct resource *res; - unsigned long i = 0; + struct resource *res, *res_resources; + unsigned long i = 0, j, res_count; kernel_code.start = __pa_symbol(_text); kernel_code.end = __pa_symbol(__init_begin - 1); @@ -217,8 +234,21 @@ static void __init request_standard_resources(void) kernel_data.end = __pa_symbol(_end - 1); num_standard_resources = memblock.memory.cnt; - standard_resources = alloc_bootmem_low(num_standard_resources * - sizeof(*standard_resources)); + standard_resources = alloc_bootmem(num_standard_resources * + sizeof(*standard_resources)); + res_resources = alloc_bootmem(res_mem_count * + sizeof(struct resource)); + + for (res_count = 0; res_count < res_mem_count; res_count++) { + if (!res_mem[res_count].size) + continue; + + res_resources[res_count].name = "memmap reserved"; + res_resources[res_count].flags = IORESOURCE_MEM; + res_resources[res_count].start = res_mem[res_count].base; + res_resources[res_count].end = res_resources[res_count].start + + res_mem[res_count].size - 1; + } for_each_memblock(memory, region) { res = &standard_resources[i++]; @@ -242,10 +272,31 @@ static void __init request_standard_resources(void) request_resource(res, &kernel_data); #ifdef CONFIG_KEXEC_CORE /* Userspace will find "Crash kernel" region in /proc/iomem. */ + if (crashk_low_res.end && crashk_low_res.start >= res->start && + crashk_low_res.end <= res->end) + request_resource(res, &crashk_low_res); if (crashk_res.end && crashk_res.start >= res->start && crashk_res.end <= res->end) request_resource(res, &crashk_res); #endif +#ifdef CONFIG_PIN_MEMORY + if (pin_memory_resource.end && pin_memory_resource.start >= res->start && + pin_memory_resource.end <= res->end) + request_resource(res, &pin_memory_resource); +#endif + +#ifdef CONFIG_QUICK_KEXEC + if (quick_kexec_res.end && + quick_kexec_res.start >= res->start && + quick_kexec_res.end <= res->end) + request_resource(res, &quick_kexec_res); +#endif + + for (j = 0; j < res_mem_count; j++) { + if (res_resources[j].start >= res->start && + res_resources[j].end <= res->end) + request_resource(res, &res_resources[j]); + } } } @@ -293,6 +344,11 @@ void __init setup_arch(char **cmdline_p) setup_machine_fdt(__fdt_pointer); + /* + * Initialise the static keys early as they may be enabled by the + * cpufeature code and early parameters. + */ + jump_label_init(); parse_early_param(); /* @@ -370,9 +426,12 @@ static int __init topology_init(void) for_each_online_node(i) register_one_node(i); - for_each_possible_cpu(i) { + for_each_present_cpu(i) { struct cpu *cpu = &per_cpu(cpu_data.cpu, i); - cpu->hotpluggable = 1; + if (i == 0) + cpu->hotpluggable = arm64_cpu0_hotpluggable; + else + cpu->hotpluggable = 1; register_cpu(cpu, i); } @@ -408,3 +467,24 @@ static int __init register_kernel_offset_dumper(void) return 0; } __initcall(register_kernel_offset_dumper); + +#ifdef CONFIG_HOTPLUG_CPU + +int arch_register_cpu(int num) +{ + struct cpu *cpu = &per_cpu(cpu_data.cpu, num); + + cpu->hotpluggable = 1; + return register_cpu(cpu, num); +} +EXPORT_SYMBOL(arch_register_cpu); + +void arch_unregister_cpu(int num) +{ + struct cpu *cpu = &per_cpu(cpu_data.cpu, num); + + unregister_cpu(cpu); +} +EXPORT_SYMBOL(arch_unregister_cpu); + +#endif diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 5dcc942906db3afe34cc9972a3eaaa6b72b0848c..75009eba3ae2ae20b6e7c5dad4cde7c25ef6686e 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -43,6 +43,11 @@ #include #include #include +#include +#include + +#define get_sigset(s, m) __copy_from_user(s, m, sizeof(*s)) +#define put_sigset(s, m) __copy_to_user(m, s, sizeof(*s)) /* * Do a signal return; undo the signal stack. These are aligned to 128-bit. @@ -51,57 +56,12 @@ struct rt_sigframe { struct siginfo info; struct ucontext uc; }; +struct rt_sigframe_user_layout; -struct frame_record { - u64 fp; - u64 lr; -}; - -struct rt_sigframe_user_layout { - struct rt_sigframe __user *sigframe; - struct frame_record __user *next_frame; - - unsigned long size; /* size of allocated sigframe data */ - unsigned long limit; /* largest allowed size */ - - unsigned long fpsimd_offset; - unsigned long esr_offset; - unsigned long sve_offset; - unsigned long extra_offset; - unsigned long end_offset; -}; - -#define BASE_SIGFRAME_SIZE round_up(sizeof(struct rt_sigframe), 16) -#define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16) -#define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16) - -static void init_user_layout(struct rt_sigframe_user_layout *user) -{ - const size_t reserved_size = - sizeof(user->sigframe->uc.uc_mcontext.__reserved); - - memset(user, 0, sizeof(*user)); - user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved); - - user->limit = user->size + reserved_size; - - user->limit -= TERMINATOR_SIZE; - user->limit -= EXTRA_CONTEXT_SIZE; - /* Reserve space for extension and terminator ^ */ -} - -static size_t sigframe_size(struct rt_sigframe_user_layout const *user) -{ - return round_up(max(user->size, sizeof(struct rt_sigframe)), 16); -} +static void setup_return(struct pt_regs *regs, struct k_sigaction *ka, + struct rt_sigframe_user_layout *user, int usig); -/* - * Sanity limit on the approximate maximum size of signal frame we'll - * try to generate. Stack alignment padding and the frame record are - * not taken into account. This limit is not a guarantee and is - * NOT ABI. - */ -#define SIGFRAME_MAXSZ SZ_64K +#include static int __sigframe_alloc(struct rt_sigframe_user_layout *user, unsigned long *offset, size_t size, bool extend) @@ -146,14 +106,14 @@ static int __sigframe_alloc(struct rt_sigframe_user_layout *user, * signal frame. The offset from the signal frame base address to the * allocated block is assigned to *offset. */ -static int sigframe_alloc(struct rt_sigframe_user_layout *user, +int sigframe_alloc(struct rt_sigframe_user_layout *user, unsigned long *offset, size_t size) { return __sigframe_alloc(user, offset, size, true); } /* Allocate the null terminator record and prevent further allocations */ -static int sigframe_alloc_end(struct rt_sigframe_user_layout *user) +int sigframe_alloc_end(struct rt_sigframe_user_layout *user) { int ret; @@ -170,7 +130,7 @@ static int sigframe_alloc_end(struct rt_sigframe_user_layout *user) return 0; } -static void __user *apply_user_offset( +void __user *apply_user_offset( struct rt_sigframe_user_layout const *user, unsigned long offset) { char __user *base = (char __user *)user->sigframe; @@ -178,7 +138,7 @@ static void __user *apply_user_offset( return base + offset; } -static int preserve_fpsimd_context(struct fpsimd_context __user *ctx) +int preserve_fpsimd_context(struct fpsimd_context __user *ctx) { struct user_fpsimd_state const *fpsimd = ¤t->thread.uw.fpsimd_state; @@ -196,7 +156,7 @@ static int preserve_fpsimd_context(struct fpsimd_context __user *ctx) return err ? -EFAULT : 0; } -static int restore_fpsimd_context(struct fpsimd_context __user *ctx) +int restore_fpsimd_context(struct fpsimd_context __user *ctx) { struct user_fpsimd_state fpsimd; __u32 magic, size; @@ -225,15 +185,9 @@ static int restore_fpsimd_context(struct fpsimd_context __user *ctx) return err ? -EFAULT : 0; } - -struct user_ctxs { - struct fpsimd_context __user *fpsimd; - struct sve_context __user *sve; -}; - #ifdef CONFIG_ARM64_SVE -static int preserve_sve_context(struct sve_context __user *ctx) +int preserve_sve_context(struct sve_context __user *ctx) { int err = 0; u16 reserved[ARRAY_SIZE(ctx->__reserved)]; @@ -265,7 +219,7 @@ static int preserve_sve_context(struct sve_context __user *ctx) return err ? -EFAULT : 0; } -static int restore_sve_fpsimd_context(struct user_ctxs *user) +int restore_sve_fpsimd_context(struct user_ctxs *user) { int err; unsigned int vq; @@ -328,25 +282,18 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user) return err ? -EFAULT : 0; } -#else /* ! CONFIG_ARM64_SVE */ - -/* Turn any non-optimised out attempts to use these into a link error: */ -extern int preserve_sve_context(void __user *ctx); -extern int restore_sve_fpsimd_context(struct user_ctxs *user); - #endif /* ! CONFIG_ARM64_SVE */ - -static int parse_user_sigframe(struct user_ctxs *user, - struct rt_sigframe __user *sf) +int __parse_user_sigcontext(struct user_ctxs *user, + struct sigcontext __user const *sc, + void __user const *sigframe_base) { - struct sigcontext __user *const sc = &sf->uc.uc_mcontext; struct _aarch64_ctx __user *head; char __user *base = (char __user *)&sc->__reserved; size_t offset = 0; size_t limit = sizeof(sc->__reserved); bool have_extra_context = false; - char const __user *const sfp = (char const __user *)sf; + char const __user *const sfp = (char const __user *)sigframe_base; user->fpsimd = NULL; user->sve = NULL; @@ -470,7 +417,7 @@ static int parse_user_sigframe(struct user_ctxs *user, offset = 0; limit = extra_size; - if (!access_ok(VERIFY_READ, base, limit)) + if (!access_ok(base, limit)) goto invalid; continue; @@ -495,81 +442,11 @@ static int parse_user_sigframe(struct user_ctxs *user, return -EINVAL; } -static int restore_sigframe(struct pt_regs *regs, - struct rt_sigframe __user *sf) -{ - sigset_t set; - int i, err; - struct user_ctxs user; - - err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); - if (err == 0) - set_current_blocked(&set); - - for (i = 0; i < 31; i++) - __get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], - err); - __get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); - __get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); - __get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); - - /* - * Avoid sys_rt_sigreturn() restarting. - */ - forget_syscall(regs); - - err |= !valid_user_regs(®s->user_regs, current); - if (err == 0) - err = parse_user_sigframe(&user, sf); - - if (err == 0) { - if (!user.fpsimd) - return -EINVAL; - - if (user.sve) { - if (!system_supports_sve()) - return -EINVAL; - - err = restore_sve_fpsimd_context(&user); - } else { - err = restore_fpsimd_context(user.fpsimd); - } - } - - return err; -} - SYSCALL_DEFINE0(rt_sigreturn) { struct pt_regs *regs = current_pt_regs(); - struct rt_sigframe __user *frame; - - /* Always make any pending restarted system calls return -EINTR */ - current->restart_block.fn = do_no_restart_syscall; - - /* - * Since we stacked the signal on a 128-bit boundary, then 'sp' should - * be word aligned here. - */ - if (regs->sp & 15) - goto badframe; - - frame = (struct rt_sigframe __user *)regs->sp; - - if (!access_ok(VERIFY_READ, frame, sizeof (*frame))) - goto badframe; - if (restore_sigframe(regs, frame)) - goto badframe; - - if (restore_altstack(&frame->uc.uc_stack)) - goto badframe; - - return regs->regs[0]; - -badframe: - arm64_notify_segfault(regs->sp); - return 0; + return __sys_rt_sigreturn(regs); } /* @@ -579,8 +456,7 @@ SYSCALL_DEFINE0(rt_sigreturn) * this task; otherwise, generates a layout for the current state * of the task. */ -static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, - bool add_all) +int setup_sigframe_layout(struct rt_sigframe_user_layout *user, bool add_all) { int err; @@ -618,122 +494,49 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, return sigframe_alloc_end(user); } -static int setup_sigframe(struct rt_sigframe_user_layout *user, - struct pt_regs *regs, sigset_t *set) +int setup_extra_context(char __user *sfp, unsigned long sf_size, + char __user *extrap) { - int i, err = 0; - struct rt_sigframe __user *sf = user->sigframe; - - /* set up the stack frame for unwinding */ - __put_user_error(regs->regs[29], &user->next_frame->fp, err); - __put_user_error(regs->regs[30], &user->next_frame->lr, err); - - for (i = 0; i < 31; i++) - __put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], - err); - __put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); - __put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); - __put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); - - __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err); - - err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); - - if (err == 0) { - struct fpsimd_context __user *fpsimd_ctx = - apply_user_offset(user, user->fpsimd_offset); - err |= preserve_fpsimd_context(fpsimd_ctx); - } - - /* fault information, if valid */ - if (err == 0 && user->esr_offset) { - struct esr_context __user *esr_ctx = - apply_user_offset(user, user->esr_offset); - - __put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err); - __put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err); - __put_user_error(current->thread.fault_code, &esr_ctx->esr, err); - } - - /* Scalable Vector Extension state, if present */ - if (system_supports_sve() && err == 0 && user->sve_offset) { - struct sve_context __user *sve_ctx = - apply_user_offset(user, user->sve_offset); - err |= preserve_sve_context(sve_ctx); - } - - if (err == 0 && user->extra_offset) { - char __user *sfp = (char __user *)user->sigframe; - char __user *userp = - apply_user_offset(user, user->extra_offset); - - struct extra_context __user *extra; - struct _aarch64_ctx __user *end; - u64 extra_datap; - u32 extra_size; - - extra = (struct extra_context __user *)userp; - userp += EXTRA_CONTEXT_SIZE; + int err = 0; + struct extra_context __user *extra; + struct _aarch64_ctx __user *end; + u64 extra_datap; + u32 extra_size; - end = (struct _aarch64_ctx __user *)userp; - userp += TERMINATOR_SIZE; + extra = (struct extra_context __user *)extrap; + extrap += EXTRA_CONTEXT_SIZE; - /* - * extra_datap is just written to the signal frame. - * The value gets cast back to a void __user * - * during sigreturn. - */ - extra_datap = (__force u64)userp; - extra_size = sfp + round_up(user->size, 16) - userp; + end = (struct _aarch64_ctx __user *)extrap; + extrap += TERMINATOR_SIZE; - __put_user_error(EXTRA_MAGIC, &extra->head.magic, err); - __put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err); - __put_user_error(extra_datap, &extra->datap, err); - __put_user_error(extra_size, &extra->size, err); - - /* Add the terminator */ - __put_user_error(0, &end->magic, err); - __put_user_error(0, &end->size, err); - } + /* + * extra_datap is just written to the signal frame. + * The value gets cast back to a void __user * + * during sigreturn. + */ + extra_datap = (__force u64)extrap; + extra_size = sfp + round_up(sf_size, 16) - extrap; - /* set the "end" magic */ - if (err == 0) { - struct _aarch64_ctx __user *end = - apply_user_offset(user, user->end_offset); + __put_user_error(EXTRA_MAGIC, &extra->head.magic, err); + __put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err); + __put_user_error(extra_datap, &extra->datap, err); + __put_user_error(extra_size, &extra->size, err); - __put_user_error(0, &end->magic, err); - __put_user_error(0, &end->size, err); - } + /* Add the terminator */ + __put_user_error(0, &end->magic, err); + __put_user_error(0, &end->size, err); return err; } -static int get_sigframe(struct rt_sigframe_user_layout *user, - struct ksignal *ksig, struct pt_regs *regs) +void __setup_return(struct pt_regs *regs, struct k_sigaction *ka, + struct rt_sigframe_user_layout *user, int usig) { - unsigned long sp, sp_top; - int err; - - init_user_layout(user); - err = setup_sigframe_layout(user, false); - if (err) - return err; - - sp = sp_top = sigsp(regs->sp, ksig); - - sp = round_down(sp - sizeof(struct frame_record), 16); - user->next_frame = (struct frame_record __user *)sp; - - sp = round_down(sp, 16) - sigframe_size(user); - user->sigframe = (struct rt_sigframe __user *)sp; - - /* - * Check that we can actually write to the signal frame. - */ - if (!access_ok(VERIFY_WRITE, user->sigframe, sp_top - sp)) - return -EFAULT; + regs->regs[0] = usig; + regs->sp = (unsigned long)user->sigframe; + regs->regs[29] = (unsigned long)&user->next_frame->fp; + regs->pc = (unsigned long)ka->sa.sa_handler; - return 0; } static void setup_return(struct pt_regs *regs, struct k_sigaction *ka, @@ -741,10 +544,7 @@ static void setup_return(struct pt_regs *regs, struct k_sigaction *ka, { __sigrestore_t sigtramp; - regs->regs[0] = usig; - regs->sp = (unsigned long)user->sigframe; - regs->regs[29] = (unsigned long)&user->next_frame->fp; - regs->pc = (unsigned long)ka->sa.sa_handler; + __setup_return(regs, ka, user, usig); if (ka->sa.sa_flags & SA_RESTORER) sigtramp = ka->sa.sa_restorer; @@ -757,38 +557,13 @@ static void setup_return(struct pt_regs *regs, struct k_sigaction *ka, static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) { - struct rt_sigframe_user_layout user; - struct rt_sigframe __user *frame; - int err = 0; - - fpsimd_signal_preserve_current_state(); - - if (get_sigframe(&user, ksig, regs)) - return 1; - - frame = user.sigframe; - - __put_user_error(0, &frame->uc.uc_flags, err); - __put_user_error(NULL, &frame->uc.uc_link, err); - - err |= __save_altstack(&frame->uc.uc_stack, regs->sp); - err |= setup_sigframe(&user, regs, set); - if (err == 0) { - setup_return(regs, &ksig->ka, &user, usig); - if (ksig->ka.sa.sa_flags & SA_SIGINFO) { - err |= copy_siginfo_to_user(&frame->info, &ksig->info); - regs->regs[1] = (unsigned long)&frame->info; - regs->regs[2] = (unsigned long)&frame->uc; - } - } - - return err; + return __setup_rt_frame(usig, ksig, set, regs); } static void setup_restart_syscall(struct pt_regs *regs) { - if (is_compat_task()) - compat_setup_restart_syscall(regs); + if (is_a32_compat_task()) + a32_setup_restart_syscall(regs); else regs->regs[8] = __NR_restart_syscall; } @@ -808,11 +583,13 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) /* * Set up the stack frame */ - if (is_compat_task()) { + if (is_a32_compat_task()) { if (ksig->ka.sa.sa_flags & SA_SIGINFO) - ret = compat_setup_rt_frame(usig, ksig, oldset, regs); + ret = a32_setup_rt_frame(usig, ksig, oldset, regs); else - ret = compat_setup_frame(usig, ksig, oldset, regs); + ret = a32_setup_frame(usig, ksig, oldset, regs); + } else if (is_ilp32_compat_task()) { + ret = ilp32_setup_rt_frame(usig, ksig, oldset, regs); } else { ret = setup_rt_frame(usig, ksig, oldset, regs); } @@ -853,7 +630,7 @@ static void do_signal(struct pt_regs *regs) */ if (syscall) { continue_addr = regs->pc; - restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4); + restart_addr = continue_addr - (a32_thumb_mode(regs) ? 2 : 4); retval = regs->regs[0]; /* @@ -922,6 +699,12 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, */ trace_hardirqs_off(); +#ifdef CONFIG_ARM64_ERR_RECOV + /* notify userspace of pending SEAs */ + if (unlikely(thread_flags & _TIF_SEA_NOTIFY)) + sea_notify_process(); +#endif /* CONFIG_ARM64_ERR_RECOV */ + do { /* Check valid user FS if needed */ addr_limit_user_check(); diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index 24b09003f8214ce0df5a222a112e6cf10d2161e9..06c370cc2618fd3467425435bb6466417667d8dc 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c @@ -27,10 +27,11 @@ #include #include #include +#include #include #include -struct compat_sigcontext { +struct a32_sigcontext { /* We always set these two fields to 0 */ compat_ulong_t trap_no; compat_ulong_t error_code; @@ -56,17 +57,17 @@ struct compat_sigcontext { compat_ulong_t fault_address; }; -struct compat_ucontext { +struct a32_ucontext { compat_ulong_t uc_flags; compat_uptr_t uc_link; compat_stack_t uc_stack; - struct compat_sigcontext uc_mcontext; + struct a32_sigcontext uc_mcontext; compat_sigset_t uc_sigmask; int __unused[32 - (sizeof (compat_sigset_t) / sizeof (int))]; compat_ulong_t uc_regspace[128] __attribute__((__aligned__(8))); }; -struct compat_vfp_sigframe { +struct a32_vfp_sigframe { compat_ulong_t magic; compat_ulong_t size; struct compat_user_vfp { @@ -81,56 +82,34 @@ struct compat_vfp_sigframe { } __attribute__((__aligned__(8))); #define VFP_MAGIC 0x56465001 -#define VFP_STORAGE_SIZE sizeof(struct compat_vfp_sigframe) +#define VFP_STORAGE_SIZE sizeof(struct a32_vfp_sigframe) #define FSR_WRITE_SHIFT (11) -struct compat_aux_sigframe { - struct compat_vfp_sigframe vfp; +struct a32_aux_sigframe { + struct a32_vfp_sigframe vfp; /* Something that isn't a valid magic number for any coprocessor. */ unsigned long end_magic; } __attribute__((__aligned__(8))); -struct compat_sigframe { - struct compat_ucontext uc; +struct a32_sigframe { + struct a32_ucontext uc; compat_ulong_t retcode[2]; }; -struct compat_rt_sigframe { +struct a32_rt_sigframe { struct compat_siginfo info; - struct compat_sigframe sig; + struct a32_sigframe sig; }; #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) -static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set) -{ - compat_sigset_t cset; - - cset.sig[0] = set->sig[0] & 0xffffffffull; - cset.sig[1] = set->sig[0] >> 32; - - return copy_to_user(uset, &cset, sizeof(*uset)); -} - -static inline int get_sigset_t(sigset_t *set, - const compat_sigset_t __user *uset) -{ - compat_sigset_t s32; - - if (copy_from_user(&s32, uset, sizeof(*uset))) - return -EFAULT; - - set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32); - return 0; -} - /* * VFP save/restore code. * * We have to be careful with endianness, since the fpsimd context-switch - * code operates on 128-bit (Q) register values whereas the compat ABI + * code operates on 128-bit (Q) register values whereas the a32 ABI * uses an array of 64-bit (D) registers. Consequently, we need to swap * the two halves of each Q register when running on a big-endian CPU. */ @@ -147,7 +126,7 @@ union __fpsimd_vreg { }; }; -static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame) +static int a32_preserve_vfp_context(struct a32_vfp_sigframe __user *frame) { struct user_fpsimd_state const *fpsimd = ¤t->thread.uw.fpsimd_state; @@ -197,7 +176,7 @@ static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame) return err ? -EFAULT : 0; } -static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame) +static int a32_restore_vfp_context(struct a32_vfp_sigframe __user *frame) { struct user_fpsimd_state fpsimd; compat_ulong_t magic = VFP_MAGIC; @@ -237,12 +216,12 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame) return err ? -EFAULT : 0; } -static int compat_restore_sigframe(struct pt_regs *regs, - struct compat_sigframe __user *sf) +static int a32_restore_sigframe(struct pt_regs *regs, + struct a32_sigframe __user *sf) { int err; sigset_t set; - struct compat_aux_sigframe __user *aux; + struct a32_aux_sigframe __user *aux; unsigned long psr; err = get_sigset_t(&set, &sf->uc.uc_sigmask); @@ -278,9 +257,9 @@ static int compat_restore_sigframe(struct pt_regs *regs, err |= !valid_user_regs(®s->user_regs, current); - aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace; + aux = (struct a32_aux_sigframe __user *) sf->uc.uc_regspace; if (err == 0) - err |= compat_restore_vfp_context(&aux->vfp); + err |= a32_restore_vfp_context(&aux->vfp); return err; } @@ -288,7 +267,7 @@ static int compat_restore_sigframe(struct pt_regs *regs, COMPAT_SYSCALL_DEFINE0(sigreturn) { struct pt_regs *regs = current_pt_regs(); - struct compat_sigframe __user *frame; + struct a32_sigframe __user *frame; /* Always make any pending restarted system calls return -EINTR */ current->restart_block.fn = do_no_restart_syscall; @@ -301,12 +280,12 @@ COMPAT_SYSCALL_DEFINE0(sigreturn) if (regs->compat_sp & 7) goto badframe; - frame = (struct compat_sigframe __user *)regs->compat_sp; + frame = (struct a32_sigframe __user *)regs->compat_sp; - if (!access_ok(VERIFY_READ, frame, sizeof (*frame))) + if (!access_ok(frame, sizeof (*frame))) goto badframe; - if (compat_restore_sigframe(regs, frame)) + if (a32_restore_sigframe(regs, frame)) goto badframe; return regs->regs[0]; @@ -319,7 +298,7 @@ COMPAT_SYSCALL_DEFINE0(sigreturn) COMPAT_SYSCALL_DEFINE0(rt_sigreturn) { struct pt_regs *regs = current_pt_regs(); - struct compat_rt_sigframe __user *frame; + struct a32_rt_sigframe __user *frame; /* Always make any pending restarted system calls return -EINTR */ current->restart_block.fn = do_no_restart_syscall; @@ -332,12 +311,12 @@ COMPAT_SYSCALL_DEFINE0(rt_sigreturn) if (regs->compat_sp & 7) goto badframe; - frame = (struct compat_rt_sigframe __user *)regs->compat_sp; + frame = (struct a32_rt_sigframe __user *)regs->compat_sp; - if (!access_ok(VERIFY_READ, frame, sizeof (*frame))) + if (!access_ok(frame, sizeof (*frame))) goto badframe; - if (compat_restore_sigframe(regs, &frame->sig)) + if (a32_restore_sigframe(regs, &frame->sig)) goto badframe; if (compat_restore_altstack(&frame->sig.uc.uc_stack)) @@ -350,7 +329,7 @@ COMPAT_SYSCALL_DEFINE0(rt_sigreturn) return 0; } -static void __user *compat_get_sigframe(struct ksignal *ksig, +static void __user *a32_get_sigframe(struct ksignal *ksig, struct pt_regs *regs, int framesize) { @@ -365,13 +344,13 @@ static void __user *compat_get_sigframe(struct ksignal *ksig, /* * Check that we can actually write to the signal frame. */ - if (!access_ok(VERIFY_WRITE, frame, framesize)) + if (!access_ok(frame, framesize)) frame = NULL; return frame; } -static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka, +static void a32_setup_return(struct pt_regs *regs, struct k_sigaction *ka, compat_ulong_t __user *rc, void __user *frame, int usig) { @@ -415,10 +394,10 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka, regs->pstate = spsr; } -static int compat_setup_sigframe(struct compat_sigframe __user *sf, +static int a32_setup_sigframe(struct a32_sigframe __user *sf, struct pt_regs *regs, sigset_t *set) { - struct compat_aux_sigframe __user *aux; + struct a32_aux_sigframe __user *aux; unsigned long psr = pstate_to_compat_psr(regs->pstate); int err = 0; @@ -441,7 +420,7 @@ static int compat_setup_sigframe(struct compat_sigframe __user *sf, __put_user_error(psr, &sf->uc.uc_mcontext.arm_cpsr, err); __put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.trap_no, err); - /* set the compat FSR WnR */ + /* set the aarch32 FSR WnR */ __put_user_error(!!(current->thread.fault_code & ESR_ELx_WNR) << FSR_WRITE_SHIFT, &sf->uc.uc_mcontext.error_code, err); __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err); @@ -449,25 +428,25 @@ static int compat_setup_sigframe(struct compat_sigframe __user *sf, err |= put_sigset_t(&sf->uc.uc_sigmask, set); - aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace; + aux = (struct a32_aux_sigframe __user *) sf->uc.uc_regspace; if (err == 0) - err |= compat_preserve_vfp_context(&aux->vfp); + err |= a32_preserve_vfp_context(&aux->vfp); __put_user_error(0, &aux->end_magic, err); return err; } /* - * 32-bit signal handling routines called from signal.c + * aarch32-bit signal handling routines called from signal.c */ -int compat_setup_rt_frame(int usig, struct ksignal *ksig, +int a32_setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) { - struct compat_rt_sigframe __user *frame; + struct a32_rt_sigframe __user *frame; int err = 0; - frame = compat_get_sigframe(ksig, regs, sizeof(*frame)); + frame = a32_get_sigframe(ksig, regs, sizeof(*frame)); if (!frame) return 1; @@ -479,10 +458,10 @@ int compat_setup_rt_frame(int usig, struct ksignal *ksig, err |= __compat_save_altstack(&frame->sig.uc.uc_stack, regs->compat_sp); - err |= compat_setup_sigframe(&frame->sig, regs, set); + err |= a32_setup_sigframe(&frame->sig, regs, set); if (err == 0) { - compat_setup_return(regs, &ksig->ka, frame->sig.retcode, frame, usig); + a32_setup_return(regs, &ksig->ka, frame->sig.retcode, frame, usig); regs->regs[1] = (compat_ulong_t)(unsigned long)&frame->info; regs->regs[2] = (compat_ulong_t)(unsigned long)&frame->sig.uc; } @@ -490,27 +469,27 @@ int compat_setup_rt_frame(int usig, struct ksignal *ksig, return err; } -int compat_setup_frame(int usig, struct ksignal *ksig, sigset_t *set, +int a32_setup_frame(int usig, struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) { - struct compat_sigframe __user *frame; + struct a32_sigframe __user *frame; int err = 0; - frame = compat_get_sigframe(ksig, regs, sizeof(*frame)); + frame = a32_get_sigframe(ksig, regs, sizeof(*frame)); if (!frame) return 1; __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err); - err |= compat_setup_sigframe(frame, regs, set); + err |= a32_setup_sigframe(frame, regs, set); if (err == 0) - compat_setup_return(regs, &ksig->ka, frame->retcode, frame, usig); + a32_setup_return(regs, &ksig->ka, frame->retcode, frame, usig); return err; } -void compat_setup_restart_syscall(struct pt_regs *regs) +void a32_setup_restart_syscall(struct pt_regs *regs) { regs->regs[7] = __NR_compat_restart_syscall; } diff --git a/arch/arm64/kernel/signal32_common.c b/arch/arm64/kernel/signal32_common.c new file mode 100644 index 0000000000000000000000000000000000000000..21995fc4e0b44ffe70ec203c5da56441871aac5d --- /dev/null +++ b/arch/arm64/kernel/signal32_common.c @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/* + * Based on arch/arm/kernel/signal.c + * + * Copyright (C) 1995-2009 Russell King + * Copyright (C) 2012 ARM Ltd. + * Modified by Will Deacon + */ + +#include +#include +#include + +#include +#include + +int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set) +{ + compat_sigset_t cset; + + cset.sig[0] = set->sig[0] & 0xffffffffull; + cset.sig[1] = set->sig[0] >> 32; + + return copy_to_user(uset, &cset, sizeof(*uset)); +} + +int get_sigset_t(sigset_t *set, const compat_sigset_t __user *uset) +{ + compat_sigset_t s32; + + if (copy_from_user(&s32, uset, sizeof(*uset))) + return -EFAULT; + + set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32); + return 0; +} diff --git a/arch/arm64/kernel/signal_ilp32.c b/arch/arm64/kernel/signal_ilp32.c new file mode 100644 index 0000000000000000000000000000000000000000..6e84c8669a60803992f785db1103322047363059 --- /dev/null +++ b/arch/arm64/kernel/signal_ilp32.c @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/* + * Copyright (C) 1995-2009 Russell King + * Copyright (C) 2012 ARM Ltd. + * Copyright (C) 2018 Cavium Networks. + * Yury Norov + */ + +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +#define get_sigset(s, m) get_sigset_t(s, m) +#define put_sigset(s, m) put_sigset_t(m, s) + +#define restore_altstack(stack) compat_restore_altstack(stack) +#define __save_altstack(stack, sp) __compat_save_altstack(stack, sp) +#define copy_siginfo_to_user(frame_info, ksig_info) \ + copy_siginfo_to_user32(frame_info, ksig_info) + +#define setup_return(regs, ka, user_layout, usig) \ +{ \ + __setup_return(regs, ka, user_layout, usig); \ + regs->regs[30] = \ + (unsigned long)VDSO_SYMBOL(current->mm->context.vdso, \ + sigtramp_ilp32); \ +} + +struct ilp32_ucontext { + u32 uc_flags; + u32 uc_link; + compat_stack_t uc_stack; + compat_sigset_t uc_sigmask; + /* glibc uses a 1024-bit sigset_t */ + __u8 __unused[1024 / 8 - sizeof(compat_sigset_t)]; + /* last for future expansion */ + struct sigcontext uc_mcontext; +}; + +struct rt_sigframe { + struct compat_siginfo info; + struct ilp32_ucontext uc; +}; + +#include + +COMPAT_SYSCALL_DEFINE0(ilp32_rt_sigreturn) +{ + struct pt_regs *regs = current_pt_regs(); + + return __sys_rt_sigreturn(regs); +} + +int ilp32_setup_rt_frame(int usig, struct ksignal *ksig, + sigset_t *set, struct pt_regs *regs) +{ + return __setup_rt_frame(usig, ksig, set, regs); +} diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 25fcd22a4bb2430480e3acb2447a670d65474c8f..6b8bc313a87b37a223e7f8fbe5cabc3767364b96 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -35,12 +35,16 @@ #include #include #include +#include +#include #include #include #include #include #include #include +#include +#include #include #include @@ -82,7 +86,8 @@ enum ipi_msg_type { IPI_CPU_CRASH_STOP, IPI_TIMER, IPI_IRQ_WORK, - IPI_WAKEUP + IPI_WAKEUP, + IPI_CPU_BACKTRACE }; #ifdef CONFIG_HOTPLUG_CPU @@ -94,6 +99,167 @@ static inline int op_cpu_kill(unsigned int cpu) } #endif +#ifdef CONFIG_ARM64_CPU_PARK +struct cpu_park_section { + unsigned long exit; /* exit address of park look */ + unsigned long magic; /* maigc represent park state */ + char text[0]; /* text section of park */ +}; + +static int mmap_cpu_park_mem(void) +{ + if (!park_info.start) + return -ENOMEM; + + if (park_info.start_v) + return 0; + + park_info.start_v = (unsigned long)__ioremap(park_info.start, + park_info.len, + PAGE_KERNEL_EXEC); + if (!park_info.start_v) { + pr_warn("map park memory failed."); + return -ENOMEM; + } + + return 0; +} + +static inline unsigned long cpu_park_section_v(unsigned int cpu) +{ + return park_info.start_v + PARK_SECTION_SIZE * (cpu - 1); +} + +static inline unsigned long cpu_park_section_p(unsigned int cpu) +{ + return park_info.start + PARK_SECTION_SIZE * (cpu - 1); +} + +/* + * Write the secondary_entry to exit section of park state. + * Then the secondary cpu will jump straight into the kernel + * by the secondary_entry. + */ +static int write_park_exit(unsigned int cpu) +{ + struct cpu_park_section *park_section; + unsigned long *park_exit; + unsigned long *park_text; + + if (mmap_cpu_park_mem() != 0) + return -EPERM; + + park_section = (struct cpu_park_section *)cpu_park_section_v(cpu); + park_exit = &park_section->exit; + park_text = (unsigned long *)park_section->text; + pr_debug("park_text 0x%lx : 0x%lx, do_cpu_park text 0x%lx : 0x%lx", + (unsigned long)park_text, *park_text, + (unsigned long)do_cpu_park, + *(unsigned long *)do_cpu_park); + + /* + * Test first 8 bytes to determine + * whether needs to write cpu park exit. + */ + if (*park_text == *(unsigned long *)do_cpu_park) { + writeq_relaxed(__pa_symbol(secondary_entry), park_exit); + __flush_dcache_area((__force void *)park_exit, + sizeof(unsigned long)); + flush_icache_range((unsigned long)park_exit, + (unsigned long)(park_exit + 1)); + sev(); + dsb(sy); + isb(); + + pr_debug("Write cpu %u secondary entry 0x%lx to 0x%lx.", + cpu, *park_exit, (unsigned long)park_exit); + pr_info("Boot cpu %u from PARK state.", cpu); + return 0; + } + + return -EPERM; +} + +/* Install cpu park sections for the specific cpu. */ +static int install_cpu_park(unsigned int cpu) +{ + struct cpu_park_section *park_section; + unsigned long *park_exit; + unsigned long *park_magic; + unsigned long park_text_len; + + park_section = (struct cpu_park_section *)cpu_park_section_v(cpu); + pr_debug("Install cpu park on cpu %u park exit 0x%lx park text 0x%lx", + cpu, (unsigned long)park_section, + (unsigned long)(park_section->text)); + + park_exit = &park_section->exit; + park_magic = &park_section->magic; + park_text_len = PARK_SECTION_SIZE - sizeof(struct cpu_park_section); + + *park_exit = 0UL; + *park_magic = 0UL; + memcpy((void *)park_section->text, do_cpu_park, park_text_len); + __flush_dcache_area((void *)park_section, PARK_SECTION_SIZE); + + return 0; +} + +static int uninstall_cpu_park(unsigned int cpu) +{ + unsigned long park_section; + + if (mmap_cpu_park_mem() != 0) + return -EPERM; + + park_section = cpu_park_section_v(cpu); + memset((void *)park_section, 0, PARK_SECTION_SIZE); + __flush_dcache_area((void *)park_section, PARK_SECTION_SIZE); + + return 0; +} + +static int cpu_wait_park(unsigned int cpu) +{ + long timeout; + struct cpu_park_section *park_section; + + volatile unsigned long *park_magic; + + park_section = (struct cpu_park_section *)cpu_park_section_v(cpu); + park_magic = &park_section->magic; + + timeout = USEC_PER_SEC; + while (*park_magic != PARK_MAGIC && timeout--) + udelay(1); + + if (timeout > 0) + pr_debug("cpu %u park done.", cpu); + else + pr_err("cpu %u park failed.", cpu); + + return *park_magic == PARK_MAGIC; +} + +static void cpu_park(unsigned int cpu) +{ + unsigned long park_section_p; + unsigned long park_exit_phy; + unsigned long do_park; + typeof(enter_cpu_park) *park; + + park_section_p = cpu_park_section_p(cpu); + park_exit_phy = park_section_p; + pr_debug("Go to park cpu %u exit address 0x%lx", cpu, park_exit_phy); + + do_park = park_section_p + sizeof(struct cpu_park_section); + park = (void *)__pa_symbol(enter_cpu_park); + + cpu_install_idmap(); + park(do_park, park_exit_phy); + unreachable(); +} +#endif /* * Boot a secondary CPU, and assign it the specified idle task. @@ -101,6 +267,10 @@ static inline int op_cpu_kill(unsigned int cpu) */ static int boot_secondary(unsigned int cpu, struct task_struct *idle) { +#ifdef CONFIG_ARM64_CPU_PARK + if (write_park_exit(cpu) == 0) + return 0; +#endif if (cpu_ops[cpu]->cpu_boot) return cpu_ops[cpu]->cpu_boot(cpu); @@ -108,6 +278,7 @@ static int boot_secondary(unsigned int cpu, struct task_struct *idle) } static DECLARE_COMPLETION(cpu_running); +bool va52mismatch __ro_after_init; int __cpu_up(unsigned int cpu, struct task_struct *idle) { @@ -133,16 +304,25 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) * time out. */ wait_for_completion_timeout(&cpu_running, - msecs_to_jiffies(1000)); + msecs_to_jiffies(5000)); if (!cpu_online(cpu)) { pr_crit("CPU%u: failed to come online\n", cpu); + + if (IS_ENABLED(CONFIG_ARM64_52BIT_VA) && va52mismatch) + pr_crit("CPU%u: does not support 52-bit VAs\n", cpu); + ret = -EIO; } } else { pr_err("CPU%u: failed to boot: %d\n", cpu, ret); + return ret; } +#ifdef CONFIG_ARM64_CPU_PARK + uninstall_cpu_park(cpu); +#endif + secondary_data.task = NULL; secondary_data.stack = NULL; status = READ_ONCE(secondary_data.status); @@ -175,6 +355,20 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) return ret; } +static void init_gic_priority_masking(void) +{ + u32 cpuflags; + + if (WARN_ON(!gic_enable_sre())) + return; + + cpuflags = read_sysreg(daif); + + WARN_ON(!(cpuflags & PSR_I_BIT)); + + gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); +} + /* * This is the secondary CPU boot entry. We're using this CPUs * idle thread stack, but a set of temporary page tables. @@ -194,6 +388,9 @@ asmlinkage notrace void secondary_start_kernel(void) */ mmgrab(mm); current->active_mm = mm; +#ifdef CONFIG_ARM64_TLBI_IPI + cpumask_set_cpu(cpu, mm_cpumask(mm)); +#endif /* * TTBR0 is only used for the identity mapping at this stage. Make it @@ -201,6 +398,9 @@ asmlinkage notrace void secondary_start_kernel(void) */ cpu_uninstall_idmap(); + if (system_uses_irq_prio_masking()) + init_gic_priority_masking(); + preempt_disable(); trace_hardirqs_off(); @@ -226,6 +426,9 @@ asmlinkage notrace void secondary_start_kernel(void) store_cpu_topology(cpu); numa_add_cpu(cpu); +#ifdef CONFIG_ARCH_GET_PREFERRED_SIBLING_CPUMASK + mpidr_siblings_add_cpu(cpu); +#endif /* * OK, now it's safe to let the boot CPU continue. Wait for @@ -281,6 +484,9 @@ int __cpu_disable(void) remove_cpu_topology(cpu); numa_remove_cpu(cpu); +#ifdef CONFIG_ARCH_GET_PREFERRED_SIBLING_CPUMASK + mpidr_siblings_remove_cpu(cpu); +#endif /* * Take this CPU offline. Once we clear this, we can't return, @@ -293,6 +499,13 @@ int __cpu_disable(void) */ irq_migrate_all_off_this_cpu(); +#ifdef CONFIG_ARM64_TLBI_IPI + /* + * Remove this CPU from the vm mask set of all processes. + */ + clear_tasks_mm_cpumask(cpu); +#endif + return 0; } @@ -408,12 +621,18 @@ void __init smp_cpus_done(unsigned int max_cpus) void __init smp_prepare_boot_cpu(void) { set_my_cpu_offset(per_cpu_offset(smp_processor_id())); + cpuinfo_store_boot_cpu(); + /* - * Initialise the static keys early as they may be enabled by the - * cpufeature code. + * We now know enough about the boot CPU to apply the + * alternatives that cannot wait until interrupt handling + * and/or scheduling is enabled. */ - jump_label_init(); - cpuinfo_store_boot_cpu(); + apply_boot_alternatives(); + + /* Conditionally switch to GIC PMR for interrupt masking */ + if (system_uses_irq_prio_masking()) + init_gic_priority_masking(); } static u64 __init of_get_cpu_mpidr(struct device_node *dn) @@ -480,6 +699,34 @@ static bool bootcpu_valid __initdata; static unsigned int cpu_count = 1; #ifdef CONFIG_ACPI + +#ifdef CONFIG_ARCH_PHYTIUM +/* + * On phytium S2500 multi-socket server, for example 2-socket(2P), there are + * socekt0 and socket1 on the server: + * If storage device(like SAS controller and disks to save vmcore into) is + * installed on socket1 and second kernel brings up 2 CPUs both on socket0 with + * nr_cpus=2, then vmcore will fail to be saved into the disk as interrupts like + * SPI and LPI(except SGI) can't communicate across cpu sockets in this server + * platform. + * To avoid this issue, Bypass other non-cpu0 to ensure that each cpu0 on each + * socket can boot up and handle interrupt when booting the second kernel. + */ +static bool __init is_phytium_kdump_cpu_need_bypass(u64 hwid) +{ + if ((read_cpuid_id() & MIDR_CPU_MODEL_MASK) != MIDR_FT_2500) + return false; + + /* + * Bypass other non-cpu0 to ensure second kernel can bring up each cpu0 + * on each socket + */ + if (is_kdump_kernel() && (hwid & 0xffff) != (cpu_logical_map(0) & 0xffff)) + return true; + return false; +} +#endif + static struct acpi_madt_generic_interrupt cpu_madt_gicc[NR_CPUS]; struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu) @@ -498,16 +745,14 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) { u64 hwid = processor->arm_mpidr; - if (!(processor->flags & ACPI_MADT_ENABLED)) { - pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid); - return; - } - if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) { pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid); return; } + if (!(processor->flags & ACPI_MADT_ENABLED)) + pr_debug("disabled CPU entry with 0x%llx MPIDR\n", hwid); + if (is_mpidr_duplicate(cpu_count, hwid)) { pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid); return; @@ -528,6 +773,11 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) if (cpu_count >= NR_CPUS) return; +#ifdef CONFIG_ARCH_PHYTIUM + if (is_phytium_kdump_cpu_need_bypass(hwid)) + return; +#endif + /* map the logical cpu id to cpu MPIDR */ cpu_logical_map(cpu_count) = hwid; @@ -548,7 +798,7 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) } static int __init -acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header, +acpi_parse_gic_cpu_interface(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_generic_interrupt *processor; @@ -557,7 +807,7 @@ acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header, if (BAD_MADT_GICC_ENTRY(processor, end)) return -EINVAL; - acpi_table_print_madt_entry(header); + acpi_table_print_madt_entry(&header->common); acpi_map_gic_cpu_interface(processor); @@ -701,6 +951,9 @@ void __init smp_prepare_cpus(unsigned int max_cpus) store_cpu_topology(this_cpu); numa_store_cpu_info(this_cpu); numa_add_cpu(this_cpu); +#ifdef CONFIG_ARCH_GET_PREFERRED_SIBLING_CPUMASK + mpidr_siblings_add_cpu(this_cpu); +#endif /* * If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set @@ -727,8 +980,17 @@ void __init smp_prepare_cpus(unsigned int max_cpus) err = cpu_ops[cpu]->cpu_prepare(cpu); if (err) continue; - +#ifdef CONFIG_ACPI + if (!acpi_disabled) { + if ((cpu_madt_gicc[cpu].flags & ACPI_MADT_ENABLED)) + set_cpu_present(cpu, true); + } else { + set_cpu_present(cpu, true); + } +#else set_cpu_present(cpu, true); +#endif + numa_store_cpu_info(cpu); } } @@ -749,6 +1011,7 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = { S(IPI_TIMER, "Timer broadcast interrupts"), S(IPI_IRQ_WORK, "IRQ work interrupts"), S(IPI_WAKEUP, "CPU wake-up interrupts"), + S(IPI_CPU_BACKTRACE, "backtrace interrupts"), }; static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) @@ -817,6 +1080,20 @@ static void ipi_cpu_stop(unsigned int cpu) local_daif_mask(); sdei_mask_local_cpu(); +#ifdef CONFIG_ARM64_CPU_PARK + /* + * Go to cpu park state. + * Otherwise go to cpu die. + */ + if (kexec_in_progress && park_info.start_v) { + machine_kexec_mask_interrupts(); + cpu_park(cpu); + + if (cpu_ops[cpu]->cpu_die) + cpu_ops[cpu]->cpu_die(cpu); + } +#endif + while (1) cpu_relax(); } @@ -852,6 +1129,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs) { unsigned int cpu = smp_processor_id(); struct pt_regs *old_regs = set_irq_regs(regs); + bool irqs_enabled = interrupts_enabled(regs); if ((unsigned)ipinr < NR_IPI) { trace_ipi_entry_rcuidle(ipi_types[ipinr]); @@ -877,7 +1155,11 @@ void handle_IPI(int ipinr, struct pt_regs *regs) case IPI_CPU_CRASH_STOP: if (IS_ENABLED(CONFIG_KEXEC_CORE)) { - irq_enter(); + if (gic_supports_pseudo_nmis()) { + if (irqs_enabled) + nmi_enter(); + } else + irq_enter(); ipi_cpu_crash_stop(cpu, regs); unreachable(); @@ -908,6 +1190,24 @@ void handle_IPI(int ipinr, struct pt_regs *regs) break; #endif + case IPI_CPU_BACKTRACE: + if (gic_supports_pseudo_nmis()) { + if (irqs_enabled) + nmi_enter(); + } else { + printk_nmi_enter(); + irq_enter(); + } + nmi_cpu_backtrace(regs); + if (gic_supports_pseudo_nmis()) { + if (irqs_enabled) + nmi_exit(); + } else { + irq_exit(); + printk_nmi_exit(); + } + break; + default: pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); break; @@ -930,11 +1230,22 @@ void tick_broadcast(const struct cpumask *mask) } #endif +/* + * The number of CPUs online, not counting this CPU (which may not be + * fully online and so not counted in num_online_cpus()). + */ +static inline unsigned int num_other_online_cpus(void) +{ + unsigned int this_cpu_online = cpu_online(smp_processor_id()); + + return num_online_cpus() - this_cpu_online; +} + void smp_send_stop(void) { unsigned long timeout; - if (num_online_cpus() > 1) { + if (num_other_online_cpus()) { cpumask_t mask; cpumask_copy(&mask, cpu_online_mask); @@ -947,16 +1258,55 @@ void smp_send_stop(void) /* Wait up to one second for other CPUs to stop */ timeout = USEC_PER_SEC; - while (num_online_cpus() > 1 && timeout--) + while (num_other_online_cpus() && timeout--) udelay(1); - if (num_online_cpus() > 1) + if (num_other_online_cpus()) pr_warning("SMP: failed to stop secondary CPUs %*pbl\n", cpumask_pr_args(cpu_online_mask)); sdei_mask_local_cpu(); } +#ifdef CONFIG_ARM64_CPU_PARK +int kexec_smp_send_park(void) +{ + unsigned long cpu; + + if (WARN_ON(!kexec_in_progress)) { + pr_crit("%s called not in kexec progress.", __func__); + return -EPERM; + } + + if (mmap_cpu_park_mem() != 0) { + pr_info("no cpuparkmem, goto normal way."); + return -EPERM; + } + + local_irq_disable(); + + if (num_online_cpus() > 1) { + cpumask_t mask; + + cpumask_copy(&mask, cpu_online_mask); + cpumask_clear_cpu(smp_processor_id(), &mask); + + for_each_cpu(cpu, &mask) + install_cpu_park(cpu); + smp_cross_call(&mask, IPI_CPU_STOP); + + /* Wait for other CPUs to park */ + for_each_cpu(cpu, &mask) + cpu_wait_park(cpu); + pr_info("smp park other cpus done\n"); + } + + sdei_mask_local_cpu(); + + return 0; +} +#endif + #ifdef CONFIG_KEXEC_CORE void crash_smp_send_stop(void) { @@ -973,7 +1323,11 @@ void crash_smp_send_stop(void) cpus_stopped = 1; - if (num_online_cpus() == 1) { + /* + * If this cpu is the only one alive at this point in time, online or + * not, there are no stop messages to be sent around, so just back out. + */ + if (num_other_online_cpus() == 0) { sdei_mask_local_cpu(); return; } @@ -981,7 +1335,7 @@ void crash_smp_send_stop(void) cpumask_copy(&mask, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &mask); - atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); + atomic_set(&waiting_for_crash_ipi, num_other_online_cpus()); pr_crit("SMP: stopping secondary CPUs\n"); smp_cross_call(&mask, IPI_CPU_CRASH_STOP); @@ -1029,3 +1383,152 @@ bool cpus_are_stuck_in_kernel(void) return !!cpus_stuck_in_kernel || smp_spin_tables; } + +static void __ipi_set_nmi_prio(void __iomem *base, u8 prio, int ipinr) +{ + /* + * Use writeb here may cause hardware error on D05, + * aovid this problem by using writel. + */ + + u32 offset = (ipinr / 4) * 4; + u32 shift = (ipinr % 4) * 8; + u32 prios = readl_relaxed(base + GICR_IPRIORITYR0 + offset); + + /* clean old priority */ + prios &= ~(0xff << shift); + /* set new priority*/ + prios |= (prio << shift); + + writel_relaxed(prios, base + GICR_IPRIORITYR0 + offset); +} + +void ipi_set_nmi_prio(void __iomem *base, u8 prio) +{ + __ipi_set_nmi_prio(base, prio, IPI_CPU_BACKTRACE); + __ipi_set_nmi_prio(base, prio, IPI_CPU_CRASH_STOP); +} + +static void raise_nmi(cpumask_t *mask) +{ + /* + * Generate the backtrace directly if we are running in a + * calling context that is not preemptible by the backtrace IPI. + */ + if (cpumask_test_cpu(smp_processor_id(), mask) && irqs_disabled()) + nmi_cpu_backtrace(NULL); + + smp_cross_call(mask, IPI_CPU_BACKTRACE); +} + +void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) +{ + nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_nmi); +} + +#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF +s64 hardlockup_cpu_freq; +static DEFINE_PER_CPU(u64, cpu_freq_probed); + +static int __init hardlockup_cpu_freq_setup(char *str) +{ + if (!strcasecmp(str, "auto")) + hardlockup_cpu_freq = -1; + else + hardlockup_cpu_freq = simple_strtoll(str, NULL, 0); + + return 1; +} +__setup("hardlockup_cpu_freq=", hardlockup_cpu_freq_setup); + +static u64 arch_pmu_get_cycles(void) +{ + return read_sysreg(PMCCNTR_EL0); +} + +static u64 arch_probe_cpu_freq(void) +{ + volatile int i; + u32 loop = 50000000; + u64 cycles_a, cycles_b; + u64 timer_a, timer_b; + u32 timer_hz = arch_timer_get_cntfrq(); + struct perf_event *evt; + struct perf_event_attr timer_attr = { + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_CPU_CYCLES, + .size = sizeof(struct perf_event_attr), + .pinned = 1, + .disabled = 0, + .sample_period = 0xffffffffUL, + }; + + /* make sure the cycle counter is enabled */ + evt = perf_event_create_kernel_counter(&timer_attr, smp_processor_id(), + NULL, NULL, NULL); + if (IS_ERR(evt)) + return 0; + + do { + timer_b = timer_a; + + /* avoid dead loop here */ + if (loop) + loop >>= 1; + else + break; + + timer_a = arch_timer_read_counter(); + cycles_a = arch_pmu_get_cycles(); + + for (i = 0; i < loop; i++) + ; + + timer_b = arch_timer_read_counter(); + cycles_b = arch_pmu_get_cycles(); + } while (cycles_b <= cycles_a); + + perf_event_release_kernel(evt); + if (unlikely(timer_b == timer_a)) + return 0; + + return timer_hz * (cycles_b - cycles_a) / (timer_b - timer_a); +} + +static u64 arch_get_cpu_freq(void) +{ + u64 cpu_freq; + unsigned int cpu = smp_processor_id(); + + cpu_freq = per_cpu(cpu_freq_probed, cpu); + + if (!cpu_freq) { + cpu_freq = arch_probe_cpu_freq(); + pr_info("NMI watchdog: CPU%u freq probed as %llu HZ.\n", + smp_processor_id(), cpu_freq); + if (!cpu_freq) + cpu_freq = -1; + per_cpu(cpu_freq_probed, cpu) = cpu_freq; + } + + if (-1 == cpu_freq) + cpu_freq = 0; + + return cpu_freq; +} + +u64 hw_nmi_get_sample_period(int watchdog_thresh) +{ + u64 cpu_freq; + + if (!pmu_nmi_enable) + return 0; + + if (hardlockup_cpu_freq < 0) { + cpu_freq = arch_get_cpu_freq(); + return cpu_freq * watchdog_thresh; + } + + return hardlockup_cpu_freq * 1000 * watchdog_thresh; +} +#endif diff --git a/arch/arm64/kernel/ssbd.c b/arch/arm64/kernel/ssbd.c index 3432e5ef9f41882c06462b7f3ec4ff91f02fd931..52cfc6148355f8d129fd307010a1aabecf515631 100644 --- a/arch/arm64/kernel/ssbd.c +++ b/arch/arm64/kernel/ssbd.c @@ -3,17 +3,34 @@ * Copyright (C) 2018 ARM Ltd, All Rights Reserved. */ +#include #include +#include #include +#include #include #include +static void ssbd_ssbs_enable(struct task_struct *task) +{ + u64 val = is_compat_thread(task_thread_info(task)) ? + PSR_AA32_SSBS_BIT : PSR_SSBS_BIT; + + task_pt_regs(task)->pstate |= val; +} + +static void ssbd_ssbs_disable(struct task_struct *task) +{ + u64 val = is_compat_thread(task_thread_info(task)) ? + PSR_AA32_SSBS_BIT : PSR_SSBS_BIT; + + task_pt_regs(task)->pstate &= ~val; +} + /* * prctl interface for SSBD - * FIXME: Drop the below ifdefery once merged in 4.18. */ -#ifdef PR_SPEC_STORE_BYPASS static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl) { int state = arm64_get_ssbd_state(); @@ -46,12 +63,14 @@ static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl) return -EPERM; task_clear_spec_ssb_disable(task); clear_tsk_thread_flag(task, TIF_SSBD); + ssbd_ssbs_enable(task); break; case PR_SPEC_DISABLE: if (state == ARM64_SSBD_FORCE_DISABLE) return -EPERM; task_set_spec_ssb_disable(task); set_tsk_thread_flag(task, TIF_SSBD); + ssbd_ssbs_disable(task); break; case PR_SPEC_FORCE_DISABLE: if (state == ARM64_SSBD_FORCE_DISABLE) @@ -59,6 +78,7 @@ static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl) task_set_spec_ssb_disable(task); task_set_spec_ssb_force_disable(task); set_tsk_thread_flag(task, TIF_SSBD); + ssbd_ssbs_disable(task); break; default: return -ERANGE; @@ -107,4 +127,3 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) return -ENODEV; } } -#endif /* PR_SPEC_STORE_BYPASS */ diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 4989f7ea1e59925ff9eda5ac68c7ac3cedb3d4fe..bb482ec044b61d43fe5071605b6a039ba8985c28 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -85,6 +86,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) return 0; } +NOKPROBE_SYMBOL(unwind_frame); void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame, int (*fn)(struct stackframe *, void *), void *data) @@ -99,6 +101,7 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame, break; } } +NOKPROBE_SYMBOL(walk_stackframe); #ifdef CONFIG_STACKTRACE struct stack_trace_data { diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index 70c283368b6469f34156aeeac04a7eb4bc8fda92..2444312400c802055c4a0266c5c164e6a6892729 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -127,6 +127,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) return ret; } +EXPORT_SYMBOL_GPL(cpu_suspend); +EXPORT_SYMBOL_GPL(cpu_resume); static int __init cpu_suspend_init(void) { diff --git a/arch/arm64/kernel/sys.c b/arch/arm64/kernel/sys.c index b44065fb16160c62d3fa113d0a86f1dd1e4564dc..fe20c461582a1ecdc9d60224e5f8fae37a3aaf33 100644 --- a/arch/arm64/kernel/sys.c +++ b/arch/arm64/kernel/sys.c @@ -31,7 +31,7 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, - unsigned long, fd, off_t, off) + unsigned long, fd, unsigned long, off) { if (offset_in_page(off) != 0) return -EINVAL; @@ -47,22 +47,26 @@ SYSCALL_DEFINE1(arm64_personality, unsigned int, personality) return ksys_personality(personality); } +asmlinkage long sys_ni_syscall(void); + +asmlinkage long __arm64_sys_ni_syscall(const struct pt_regs *__unused) +{ + return sys_ni_syscall(); +} + /* * Wrappers to pass the pt_regs argument. */ -#define sys_personality sys_arm64_personality - -asmlinkage long sys_ni_syscall(const struct pt_regs *); -#define __arm64_sys_ni_syscall sys_ni_syscall +#define __arm64_sys_personality __arm64_sys_arm64_personality #undef __SYSCALL #define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *); #include #undef __SYSCALL -#define __SYSCALL(nr, sym) [nr] = (syscall_fn_t)__arm64_##sym, +#define __SYSCALL(nr, sym) [nr] = __arm64_##sym, const syscall_fn_t sys_call_table[__NR_syscalls] = { - [0 ... __NR_syscalls - 1] = (syscall_fn_t)sys_ni_syscall, + [0 ... __NR_syscalls - 1] = __arm64_sys_ni_syscall, #include }; diff --git a/arch/arm64/kernel/sys32.c b/arch/arm64/kernel/sys32.c index 0f8bcb7de70086edecc3da67b70b8a126f28ded7..8e99938a1c2b45b3618f2e5c0416b508640b7cd2 100644 --- a/arch/arm64/kernel/sys32.c +++ b/arch/arm64/kernel/sys32.c @@ -31,119 +31,14 @@ asmlinkage long compat_sys_sigreturn(void); asmlinkage long compat_sys_rt_sigreturn(void); -COMPAT_SYSCALL_DEFINE3(aarch32_statfs64, const char __user *, pathname, - compat_size_t, sz, struct compat_statfs64 __user *, buf) -{ - /* - * 32-bit ARM applies an OABI compatibility fixup to statfs64 and - * fstatfs64 regardless of whether OABI is in use, and therefore - * arbitrary binaries may rely upon it, so we must do the same. - * For more details, see commit: - * - * 713c481519f19df9 ("[ARM] 3108/2: old ABI compat: statfs64 and - * fstatfs64") - */ - if (sz == 88) - sz = 84; - - return kcompat_sys_statfs64(pathname, sz, buf); -} - -COMPAT_SYSCALL_DEFINE3(aarch32_fstatfs64, unsigned int, fd, compat_size_t, sz, - struct compat_statfs64 __user *, buf) -{ - /* see aarch32_statfs64 */ - if (sz == 88) - sz = 84; - - return kcompat_sys_fstatfs64(fd, sz, buf); -} - -/* - * Note: off_4k is always in units of 4K. If we can't do the - * requested offset because it is not page-aligned, we return -EINVAL. - */ -COMPAT_SYSCALL_DEFINE6(aarch32_mmap2, unsigned long, addr, unsigned long, len, - unsigned long, prot, unsigned long, flags, - unsigned long, fd, unsigned long, off_4k) -{ - if (off_4k & (~PAGE_MASK >> 12)) - return -EINVAL; - - off_4k >>= (PAGE_SHIFT - 12); - - return ksys_mmap_pgoff(addr, len, prot, flags, fd, off_4k); -} - -#ifdef CONFIG_CPU_BIG_ENDIAN -#define arg_u32p(name) u32, name##_hi, u32, name##_lo -#else -#define arg_u32p(name) u32, name##_lo, u32, name##_hi -#endif - -#define arg_u64(name) (((u64)name##_hi << 32) | name##_lo) - -COMPAT_SYSCALL_DEFINE6(aarch32_pread64, unsigned int, fd, char __user *, buf, - size_t, count, u32, __pad, arg_u32p(pos)) -{ - return ksys_pread64(fd, buf, count, arg_u64(pos)); -} - -COMPAT_SYSCALL_DEFINE6(aarch32_pwrite64, unsigned int, fd, - const char __user *, buf, size_t, count, u32, __pad, - arg_u32p(pos)) -{ - return ksys_pwrite64(fd, buf, count, arg_u64(pos)); -} - -COMPAT_SYSCALL_DEFINE4(aarch32_truncate64, const char __user *, pathname, - u32, __pad, arg_u32p(length)) -{ - return ksys_truncate(pathname, arg_u64(length)); -} - -COMPAT_SYSCALL_DEFINE4(aarch32_ftruncate64, unsigned int, fd, u32, __pad, - arg_u32p(length)) -{ - return ksys_ftruncate(fd, arg_u64(length)); -} - -COMPAT_SYSCALL_DEFINE5(aarch32_readahead, int, fd, u32, __pad, - arg_u32p(offset), size_t, count) -{ - return ksys_readahead(fd, arg_u64(offset), count); -} - -COMPAT_SYSCALL_DEFINE6(aarch32_fadvise64_64, int, fd, int, advice, - arg_u32p(offset), arg_u32p(len)) -{ - return ksys_fadvise64_64(fd, arg_u64(offset), arg_u64(len), advice); -} - -COMPAT_SYSCALL_DEFINE6(aarch32_sync_file_range2, int, fd, unsigned int, flags, - arg_u32p(offset), arg_u32p(nbytes)) -{ - return ksys_sync_file_range(fd, arg_u64(offset), arg_u64(nbytes), - flags); -} - -COMPAT_SYSCALL_DEFINE6(aarch32_fallocate, int, fd, int, mode, - arg_u32p(offset), arg_u32p(len)) -{ - return ksys_fallocate(fd, mode, arg_u64(offset), arg_u64(len)); -} - -asmlinkage long sys_ni_syscall(const struct pt_regs *); -#define __arm64_sys_ni_syscall sys_ni_syscall - #undef __SYSCALL #define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *); #include #undef __SYSCALL -#define __SYSCALL(nr, sym) [nr] = (syscall_fn_t)__arm64_##sym, +#define __SYSCALL(nr, sym) [nr] = __arm64_##sym, -const syscall_fn_t compat_sys_call_table[__NR_compat_syscalls] = { - [0 ... __NR_compat_syscalls - 1] = (syscall_fn_t)sys_ni_syscall, +const syscall_fn_t a32_sys_call_table[__NR_compat_syscalls] = { + [0 ... __NR_compat_syscalls - 1] = __arm64_sys_ni_syscall, #include }; diff --git a/arch/arm64/kernel/sys32_common.c b/arch/arm64/kernel/sys32_common.c new file mode 100644 index 0000000000000000000000000000000000000000..453cc62741132b07ffa03167390b19f573d9c575 --- /dev/null +++ b/arch/arm64/kernel/sys32_common.c @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include +#include + +COMPAT_SYSCALL_DEFINE3(aarch32_statfs64, const char __user *, pathname, + compat_size_t, sz, struct compat_statfs64 __user *, buf) +{ + /* + * 32-bit ARM applies an OABI compatibility fixup to statfs64 and + * fstatfs64 regardless of whether OABI is in use, and therefore + * arbitrary binaries may rely upon it, so we must do the same. + * For more details, see commit: + * + * 713c481519f19df9 ("[ARM] 3108/2: old ABI compat: statfs64 and + * fstatfs64") + */ + if (sz == 88) + sz = 84; + + return kcompat_sys_statfs64(pathname, sz, buf); +} + +COMPAT_SYSCALL_DEFINE3(aarch32_fstatfs64, unsigned int, fd, compat_size_t, sz, + struct compat_statfs64 __user *, buf) +{ + /* see aarch32_statfs64 */ + if (sz == 88) + sz = 84; + + return kcompat_sys_fstatfs64(fd, sz, buf); +} + +/* + * Note: off_4k is always in units of 4K. If we can't do the + * requested offset because it is not page-aligned, we return -EINVAL. + */ +COMPAT_SYSCALL_DEFINE6(aarch32_mmap2, unsigned long, addr, unsigned long, len, + unsigned long, prot, unsigned long, flags, + unsigned long, fd, unsigned long, off_4k) +{ + if (off_4k & (~PAGE_MASK >> 12)) + return -EINVAL; + + off_4k >>= (PAGE_SHIFT - 12); + + return ksys_mmap_pgoff(addr, len, prot, flags, fd, off_4k); +} + +#ifdef CONFIG_CPU_BIG_ENDIAN +#define arg_u32p(name) u32, name##_hi, u32, name##_lo +#else +#define arg_u32p(name) u32, name##_lo, u32, name##_hi +#endif + +#define arg_u64(name) (((u64)name##_hi << 32) | name##_lo) + +COMPAT_SYSCALL_DEFINE6(aarch32_pread64, unsigned int, fd, char __user *, buf, + size_t, count, u32, __pad, arg_u32p(pos)) +{ + return ksys_pread64(fd, buf, count, arg_u64(pos)); +} + +COMPAT_SYSCALL_DEFINE6(aarch32_pwrite64, unsigned int, fd, + const char __user *, buf, size_t, count, u32, __pad, + arg_u32p(pos)) +{ + return ksys_pwrite64(fd, buf, count, arg_u64(pos)); +} + +COMPAT_SYSCALL_DEFINE4(aarch32_truncate64, const char __user *, pathname, + u32, __pad, arg_u32p(length)) +{ + return ksys_truncate(pathname, arg_u64(length)); +} + +COMPAT_SYSCALL_DEFINE4(aarch32_ftruncate64, unsigned int, fd, u32, __pad, + arg_u32p(length)) +{ + return ksys_ftruncate(fd, arg_u64(length)); +} + +COMPAT_SYSCALL_DEFINE5(aarch32_readahead, int, fd, u32, __pad, + arg_u32p(offset), size_t, count) +{ + return ksys_readahead(fd, arg_u64(offset), count); +} + +COMPAT_SYSCALL_DEFINE6(aarch32_fadvise64_64, int, fd, int, advice, + arg_u32p(offset), arg_u32p(len)) +{ + return ksys_fadvise64_64(fd, arg_u64(offset), arg_u64(len), advice); +} + +COMPAT_SYSCALL_DEFINE6(aarch32_sync_file_range2, int, fd, unsigned int, flags, + arg_u32p(offset), arg_u32p(nbytes)) +{ + return ksys_sync_file_range(fd, arg_u64(offset), arg_u64(nbytes), + flags); +} + +COMPAT_SYSCALL_DEFINE6(aarch32_fallocate, int, fd, int, mode, + arg_u32p(offset), arg_u32p(len)) +{ + return ksys_fallocate(fd, mode, arg_u64(offset), arg_u64(len)); +} diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c index a6109825eeb97133b8cbccc9a7fe98d2f11f14e4..505a2d75cbd9f294bc1c4b62f8d6cdc00e321b9e 100644 --- a/arch/arm64/kernel/sys_compat.c +++ b/arch/arm64/kernel/sys_compat.c @@ -31,7 +31,7 @@ #include static long -__do_compat_cache_op(unsigned long start, unsigned long end) +__do_a32_cache_op(unsigned long start, unsigned long end) { long ret; @@ -53,25 +53,24 @@ __do_compat_cache_op(unsigned long start, unsigned long end) } static inline long -do_compat_cache_op(unsigned long start, unsigned long end, int flags) +do_a32_cache_op(unsigned long start, unsigned long end, int flags) { if (end < start || flags) return -EINVAL; - if (!access_ok(VERIFY_READ, (const void __user *)start, end - start)) + if (!access_ok((const void __user *)start, end - start)) return -EFAULT; - return __do_compat_cache_op(start, end); + return __do_a32_cache_op(start, end); } /* * Handle all unrecognised system calls. */ -long compat_arm_syscall(struct pt_regs *regs) +long a32_arm_syscall(struct pt_regs *regs, int scno) { siginfo_t info; - unsigned int no = regs->regs[7]; - switch (no) { + switch (scno) { /* * Flush a region from virtual address 'r0' to virtual address 'r1' * _exclusive_. There is no alignment requirement on either address; @@ -87,7 +86,7 @@ long compat_arm_syscall(struct pt_regs *regs) * the specified region). */ case __ARM_NR_compat_cacheflush: - return do_compat_cache_op(regs->regs[0], regs->regs[1], regs->regs[2]); + return do_a32_cache_op(regs->regs[0], regs->regs[1], regs->regs[2]); case __ARM_NR_compat_set_tls: current->thread.uw.tp_value = regs->regs[0]; @@ -102,12 +101,12 @@ long compat_arm_syscall(struct pt_regs *regs) default: /* - * Calls 9f00xx..9f07ff are defined to return -ENOSYS + * Calls 0xf0xxx..0xf07ff are defined to return -ENOSYS * if not implemented, rather than raising SIGILL. This * way the calling program can gracefully determine whether * a feature is supported. */ - if ((no & 0xffff) <= 0x7ff) + if (scno < __ARM_NR_COMPAT_END) return -ENOSYS; break; } @@ -117,8 +116,8 @@ long compat_arm_syscall(struct pt_regs *regs) info.si_errno = 0; info.si_code = ILL_ILLTRP; info.si_addr = (void __user *)instruction_pointer(regs) - - (compat_thumb_mode(regs) ? 2 : 4); + (a32_thumb_mode(regs) ? 2 : 4); - arm64_notify_die("Oops - bad compat syscall(2)", regs, &info, no); + arm64_notify_die("Oops - bad compat syscall(2)", regs, &info, scno); return 0; } diff --git a/arch/arm64/kernel/sys_ilp32.c b/arch/arm64/kernel/sys_ilp32.c new file mode 100644 index 0000000000000000000000000000000000000000..ce82c297da3b7c7566e411ab9f44cc2f7fd2248c --- /dev/null +++ b/arch/arm64/kernel/sys_ilp32.c @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/* + * AArch64- ILP32 specific system calls implementation + * Copyright (C) 2018 Marvell. + */ + +#define __SYSCALL_COMPAT + +#include +#include +#include + +#include + +/* + * AARCH32 requires 4-page alignment for shared memory, + * but AARCH64 - only 1 page. This is the only difference + * between compat and native sys_shmat(). So ILP32 just pick + * AARCH64 version. + */ +#define __arm64_compat_sys_shmat __arm64_sys_shmat + +/* + * ILP32 needs special handling for some ptrace requests. + */ +#define __arm64_sys_ptrace __arm64_compat_sys_ptrace + +/* + * Using AARCH32 interface for syscalls that take 64-bit + * parameters in registers. + */ +#define __arm64_compat_sys_fadvise64_64 __arm64_compat_sys_aarch32_fadvise64_64 +#define __arm64_compat_sys_fallocate __arm64_compat_sys_aarch32_fallocate +#define __arm64_compat_sys_ftruncate64 __arm64_compat_sys_aarch32_ftruncate64 +#define __arm64_compat_sys_pread64 __arm64_compat_sys_aarch32_pread64 +#define __arm64_compat_sys_pwrite64 __arm64_compat_sys_aarch32_pwrite64 +#define __arm64_compat_sys_readahead __arm64_compat_sys_aarch32_readahead +#define __arm64_compat_sys_sync_file_range2 __arm64_compat_sys_aarch32_sync_file_range2 +#define __arm64_compat_sys_truncate64 __arm64_compat_sys_aarch32_truncate64 +#define __arm64_sys_mmap2 __arm64_compat_sys_aarch32_mmap2 + +/* + * Using AARCH32 interface for syscalls that take the size of + * struct statfs as an argument, as it's calculated differently + * in kernel and user spaces. + */ +#define __arm64_compat_sys_fstatfs64 __arm64_compat_sys_aarch32_fstatfs64 +#define __arm64_compat_sys_statfs64 __arm64_compat_sys_aarch32_statfs64 + +/* + * Using custom wrapper for rt_sigreturn() to handle custom + * struct rt_sigframe. + */ +#define __arm64_compat_sys_rt_sigreturn __arm64_compat_sys_ilp32_rt_sigreturn + +/* + * Wrappers to pass the pt_regs argument. + */ +#define sys_personality sys_arm64_personality + +asmlinkage long sys_ni_syscall(const struct pt_regs *); +#define __arm64_sys_ni_syscall sys_ni_syscall + +#undef __SYSCALL +#define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *); +#include + +#undef __SYSCALL +#define __SYSCALL(nr, sym) [nr] = (syscall_fn_t)__arm64_##sym, + +const syscall_fn_t ilp32_sys_call_table[__NR_syscalls] = { + [0 ... __NR_syscalls - 1] = (syscall_fn_t)sys_ni_syscall, +#include +}; diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c index 032d223128815bbfdf0b9f5e4b85a22ce2ab7e2f..e36ad39d8d14951ac2d0f4838d0a6e2b902cd84e 100644 --- a/arch/arm64/kernel/syscall.c +++ b/arch/arm64/kernel/syscall.c @@ -8,21 +8,21 @@ #include #include +#include #include #include #include #include -long compat_arm_syscall(struct pt_regs *regs); - +long a32_arm_syscall(struct pt_regs *regs, int scno); long sys_ni_syscall(void); -asmlinkage long do_ni_syscall(struct pt_regs *regs) +static long do_ni_syscall(struct pt_regs *regs, int scno) { -#ifdef CONFIG_COMPAT +#ifdef CONFIG_AARCH32_EL0 long ret; - if (is_compat_task()) { - ret = compat_arm_syscall(regs); + if (is_a32_compat_task()) { + ret = a32_arm_syscall(regs, scno); if (ret != -ENOSYS) return ret; } @@ -47,7 +47,14 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno, syscall_fn = syscall_table[array_index_nospec(scno, sc_nr)]; ret = __invoke_syscall(regs, syscall_fn); } else { - ret = do_ni_syscall(regs); + if (scno == 425) + ret = __arm64_sys_io_uring_setup(regs); + else if (likely(scno == 426)) + ret = __arm64_sys_io_uring_enter(regs); + else if (scno == 427) + ret = __arm64_sys_io_uring_register(regs); + else + ret = do_ni_syscall(regs, scno); } regs->regs[0] = ret; @@ -61,6 +68,35 @@ static inline bool has_syscall_work(unsigned long flags) int syscall_trace_enter(struct pt_regs *regs); void syscall_trace_exit(struct pt_regs *regs); +#ifdef CONFIG_ARM64_ERRATUM_1463225 +DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); + +static void cortex_a76_erratum_1463225_svc_handler(void) +{ + u32 reg, val; + + if (!unlikely(test_thread_flag(TIF_SINGLESTEP))) + return; + + if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225))) + return; + + __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1); + reg = read_sysreg(mdscr_el1); + val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE; + write_sysreg(val, mdscr_el1); + asm volatile("msr daifclr, #8"); + isb(); + + /* We will have taken a single-step exception by this point */ + + write_sysreg(reg, mdscr_el1); + __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0); +} +#else +static void cortex_a76_erratum_1463225_svc_handler(void) { } +#endif /* CONFIG_ARM64_ERRATUM_1463225 */ + static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr, const syscall_fn_t syscall_table[]) { @@ -69,8 +105,9 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr, regs->orig_x0 = regs->regs[0]; regs->syscallno = scno; + cortex_a76_erratum_1463225_svc_handler(); + user_exit_irqoff(); local_daif_restore(DAIF_PROCCTX); - user_exit(); if (has_syscall_work(flags)) { /* set default errno for user-issued syscall(-1) */ @@ -124,16 +161,39 @@ static inline void sve_user_discard(void) sve_user_disable(); } +#ifdef CONFIG_ARM64_ILP32 +static inline void delouse_pt_regs(struct pt_regs *regs) +{ + regs->regs[0] &= UINT_MAX; + regs->regs[1] &= UINT_MAX; + regs->regs[2] &= UINT_MAX; + regs->regs[3] &= UINT_MAX; + regs->regs[4] &= UINT_MAX; + regs->regs[5] &= UINT_MAX; + regs->regs[6] &= UINT_MAX; + regs->regs[7] &= UINT_MAX; +} +#endif + asmlinkage void el0_svc_handler(struct pt_regs *regs) { + const syscall_fn_t *t = sys_call_table; + +#ifdef CONFIG_ARM64_ILP32 + if (is_ilp32_compat_task()) { + t = ilp32_sys_call_table; + delouse_pt_regs(regs); + } +#endif + sve_user_discard(); - el0_svc_common(regs, regs->regs[8], __NR_syscalls, sys_call_table); + el0_svc_common(regs, regs->regs[8], __NR_syscalls, t); } -#ifdef CONFIG_COMPAT +#ifdef CONFIG_AARCH32_EL0 asmlinkage void el0_svc_compat_handler(struct pt_regs *regs) { el0_svc_common(regs, regs->regs[7], __NR_compat_syscalls, - compat_sys_call_table); + a32_sys_call_table); } #endif diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c index f258636273c9588dca80420341a3627cfdd61d2c..902d0f0b4f7babaf2f5bc0fdba119c853758b4e7 100644 --- a/arch/arm64/kernel/time.c +++ b/arch/arm64/kernel/time.c @@ -64,6 +64,32 @@ unsigned long profile_pc(struct pt_regs *regs) } EXPORT_SYMBOL(profile_pc); +static void dummy_clock_access(struct timespec64 *ts) +{ + ts->tv_sec = 0; + ts->tv_nsec = 0; +} + +static clock_access_fn __read_persistent_clock = dummy_clock_access; + +void read_persistent_clock64(struct timespec64 *ts) +{ + __read_persistent_clock(ts); +} + +int register_persistent_clock(clock_access_fn read_persistent) +{ + /* Only allow the clockaccess functions to be registered once */ + if (__read_persistent_clock == dummy_clock_access) { + if (read_persistent) + __read_persistent_clock = read_persistent; + return 0; + } + + return -EINVAL; +} +EXPORT_SYMBOL(register_persistent_clock); + void __init time_init(void) { u32 arch_timer_rate; diff --git a/arch/arm64/kernel/tlbflush.c b/arch/arm64/kernel/tlbflush.c new file mode 100644 index 0000000000000000000000000000000000000000..9a51941b18b3b343ee1a7f355a2d055a80a9bc9b --- /dev/null +++ b/arch/arm64/kernel/tlbflush.c @@ -0,0 +1,215 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2019 FUJITSU LIMITED + +#include +#include +#include + +struct tlb_args { + struct vm_area_struct *ta_vma; + unsigned long ta_start; + unsigned long ta_end; + unsigned long ta_stride; + bool ta_last_level; +}; + + +unsigned int disable_tlbflush_is; + +#define FLAG_TLBFLUSH_RANGE 0x0001 +#define FLAG_TLBFLUSH_PAGE 0x0002 +#define FLAG_TLBFLUSH_SWITCH 0x0004 +#define FLAG_TLBFLUSH_MM 0x0008 + +#define TEST_TLBFLUSH_FLAG_EXTERN(flag, FLAG) \ +bool test_tlbi_ipi_##flag(void) \ +{ \ + return !!(disable_tlbflush_is & FLAG_TLBFLUSH_##FLAG); \ +} + +#define TEST_TLBFLUSH_FLAG(flag, FLAG) \ +static __always_inline TEST_TLBFLUSH_FLAG_EXTERN(flag, FLAG) + +TEST_TLBFLUSH_FLAG(mm, MM) +TEST_TLBFLUSH_FLAG(page, PAGE) +TEST_TLBFLUSH_FLAG(range, RANGE) +TEST_TLBFLUSH_FLAG_EXTERN(switch, SWITCH) + +#ifdef CONFIG_ARM64_TLBI_IPI +static int __init disable_tlbflush_is_setup(char *str) +{ + unsigned int flags = 0; + + while (isalpha(*str)) { + if (!strncmp(str, "range,", 6)) { + str += 6; + flags |= FLAG_TLBFLUSH_RANGE; + continue; + } + + if (!strncmp(str, "page,", 5)) { + str += 5; + flags |= FLAG_TLBFLUSH_PAGE; + continue; + } + + if (!strncmp(str, "switch,", 7)) { + str += 7; + flags |= FLAG_TLBFLUSH_SWITCH; + continue; + } + + if (!strcmp(str, "mm")) { + str += 2; + flags |= FLAG_TLBFLUSH_MM; + break; + } + + pr_warn("disable_tlbflush_is: Error, unknown flag\n"); + return 0; + } + + disable_tlbflush_is = flags; + pr_info("DISABLE_TLBFLUSH_IS : [%s] [%s] [%s] [%s]\n", + test_tlbi_ipi_page() ? "PAGE" : "NA", + test_tlbi_ipi_range() ? "RANGE" : "NA", + test_tlbi_ipi_switch() ? "SWITCH" : "NA", + test_tlbi_ipi_mm() ? "MM" : "NA"); + + return 0; +} +early_param("disable_tlbflush_is", disable_tlbflush_is_setup); +#endif + +static inline void __flush_tlb_mm(struct mm_struct *mm) +{ + unsigned long asid = __TLBI_VADDR(0, ASID(mm)); + + dsb(ishst); + __tlbi(aside1is, asid); + __tlbi_user(aside1is, asid); + dsb(ish); +} + +static inline void ipi_flush_tlb_mm(void *arg) +{ + struct mm_struct *mm = arg; + + local_flush_tlb_mm(mm); +} + +void flush_tlb_mm(struct mm_struct *mm) +{ + if (unlikely(test_tlbi_ipi_mm())) + on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, + (void *)mm, true); + else + __flush_tlb_mm(mm); +} + +static inline void __flush_tlb_page_nosync(unsigned long addr) +{ + dsb(ishst); + __tlbi(vale1is, addr); + __tlbi_user(vale1is, addr); +} + +static inline void __local_flush_tlb_page_nosync(unsigned long addr) +{ + dsb(nshst); + __tlbi(vale1, addr); + __tlbi_user(vale1, addr); + dsb(nsh); +} + +static inline void ipi_flush_tlb_page_nosync(void *arg) +{ + unsigned long addr = *(unsigned long *)arg; + + __local_flush_tlb_page_nosync(addr); +} + +void flush_tlb_page_nosync(struct vm_area_struct *vma, unsigned long uaddr) +{ + unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm)); + + if (unlikely(test_tlbi_ipi_page())) + on_each_cpu_mask(mm_cpumask(vma->vm_mm), + ipi_flush_tlb_page_nosync, &addr, true); + else + __flush_tlb_page_nosync(addr); +} + +static inline void ___flush_tlb_range(unsigned long start, unsigned long end, + unsigned long stride, bool last_level) +{ + unsigned long addr; + + dsb(ishst); + for (addr = start; addr < end; addr += stride) { + if (last_level) { + __tlbi(vale1is, addr); + __tlbi_user(vale1is, addr); + } else { + __tlbi(vae1is, addr); + __tlbi_user(vae1is, addr); + } + } + dsb(ish); +} + +static inline void __local_flush_tlb_range(unsigned long addr, bool last_level) +{ + dsb(nshst); + if (last_level) { + __tlbi(vale1, addr); + __tlbi_user(vale1, addr); + } else { + __tlbi(vae1, addr); + __tlbi_user(vae1, addr); + } + dsb(nsh); +} + +static inline void ipi_flush_tlb_range(void *arg) +{ + struct tlb_args *ta = (struct tlb_args *)arg; + unsigned long addr; + + for (addr = ta->ta_start; addr < ta->ta_end; addr += ta->ta_stride) + __local_flush_tlb_range(addr, ta->ta_last_level); +} + +void __flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end, unsigned long stride, bool last_level) +{ + unsigned long asid = ASID(vma->vm_mm); + + start = round_down(start, stride); + end = round_up(end, stride); + + if ((end - start) >= (MAX_TLBI_OPS * stride)) { + flush_tlb_mm(vma->vm_mm); + return; + } + + /* Convert the stride into units of 4k */ + stride >>= 12; + + start = __TLBI_VADDR(start, asid); + end = __TLBI_VADDR(end, asid); + + + if (unlikely(test_tlbi_ipi_range())) { + struct tlb_args ta = { + .ta_start = start, + .ta_end = end, + .ta_stride = stride, + .ta_last_level = last_level, + }; + + on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, + &ta, true); + } else + ___flush_tlb_range(start, end, stride, last_level); +} diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index 0825c4a856e33da0d794f403fd2a6464543467c3..bf937d334b812fba28cec9c19181d128817ee419 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -272,20 +272,29 @@ void store_cpu_topology(unsigned int cpuid) if (mpidr & MPIDR_UP_BITMASK) return; - /* Create cpu topology mapping based on MPIDR. */ - if (mpidr & MPIDR_MT_BITMASK) { - /* Multiprocessor system : Multi-threads per core */ - cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); - cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); - cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) | - MPIDR_AFFINITY_LEVEL(mpidr, 3) << 8; - } else { - /* Multiprocessor system : Single-thread per core */ - cpuid_topo->thread_id = -1; - cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); - cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) | - MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8 | - MPIDR_AFFINITY_LEVEL(mpidr, 3) << 16; + /* + * This would be the place to create cpu topology based on MPIDR. + * + * However, it cannot be trusted to depict the actual topology; some + * pieces of the architecture enforce an artificial cap on Aff0 values + * (e.g. GICv3's ICC_SGI1R_EL1 limits it to 15), leading to an + * artificial cycling of Aff1, Aff2 and Aff3 values. IOW, these end up + * having absolutely no relationship to the actual underlying system + * topology, and cannot be reasonably used as core / package ID. + * + * If the MT bit is set, Aff0 *could* be used to define a thread ID, but + * we still wouldn't be able to obtain a sane core ID. This means we + * need to entirely ignore MPIDR for any topology deduction. + */ + cpuid_topo->thread_id = -1; + cpuid_topo->core_id = cpuid; + cpuid_topo->package_id = cpu_to_node(cpuid); + + /* Some PHYTIUM FT2000PLUS platform firmware has no PPTT table */ + if ((read_cpuid_id() & MIDR_CPU_MODEL_MASK) == MIDR_FT_2000PLUS + && cpu_to_node(cpuid) == NUMA_NO_NODE) { + cpuid_topo->thread_id = 0; + cpuid_topo->package_id = 0; } pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n", @@ -339,18 +348,81 @@ void remove_cpu_topology(unsigned int cpu) clear_cpu_topology(cpu); } +#ifdef CONFIG_ARCH_GET_PREFERRED_SIBLING_CPUMASK +#define MAX_MPIDR_SIBLINGS 100 +static struct cpumask mpidr_siblings_cpumask_map[MAX_MPIDR_SIBLINGS]; + +static void +__update_mpidr_siblings_masks(unsigned int cpu, int sibling, bool remove) +{ + if (WARN_ON_ONCE(sibling < 0 || sibling >= MAX_MPIDR_SIBLINGS)) + return; + + if (remove) + cpumask_clear_cpu(cpu, &mpidr_siblings_cpumask_map[sibling]); + else + cpumask_set_cpu(cpu, &mpidr_siblings_cpumask_map[sibling]); +} + +void update_mpidr_siblings_masks(unsigned int cpu, bool remove) +{ + int sibling, affinity; + u32 midr_impl = MIDR_IMPLEMENTOR(read_cpuid_id()); + u64 mpidr = read_cpuid_mpidr(); + bool mt = mpidr & MPIDR_MT_BITMASK; + + switch (midr_impl) { + case ARM_CPU_IMP_HISI: + if (mt && read_cpuid_part_number() == HISI_CPU_PART_TSV110) { + affinity = MPIDR_AFFINITY_LEVEL(mpidr, 2); + sibling = ((affinity >> 3) - 1) / 2; + __update_mpidr_siblings_masks(cpu, sibling, remove); + } + break; + default: + break; + } +} + +void arch_get_preferred_sibling_cpumask(unsigned int sibling, + cpumask_var_t dstp) +{ + if (!dstp) + return; + + if (sibling >= MAX_MPIDR_SIBLINGS) { + cpumask_clear(dstp); + return; + } + + cpumask_copy(dstp, &mpidr_siblings_cpumask_map[sibling]); +} +EXPORT_SYMBOL(arch_get_preferred_sibling_cpumask); +#endif + #ifdef CONFIG_ACPI +static bool __init acpi_cpu_is_threaded(int cpu) +{ + int is_threaded = acpi_pptt_cpu_is_thread(cpu); + + /* + * if the PPTT doesn't have thread information, assume a homogeneous + * machine and return the current CPU's thread state. + */ + if (is_threaded < 0) + is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK; + + return !!is_threaded; +} + /* * Propagate the topology information of the processor_topology_node tree to the * cpu_topology array. */ static int __init parse_acpi_topology(void) { - bool is_threaded; int cpu, topology_id; - is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK; - for_each_possible_cpu(cpu) { int i, cache_id; @@ -358,7 +430,7 @@ static int __init parse_acpi_topology(void) if (topology_id < 0) return topology_id; - if (is_threaded) { + if (acpi_cpu_is_threaded(cpu)) { cpu_topology[cpu].thread_id = topology_id; topology_id = find_acpi_cpu_topology(cpu, 1); cpu_topology[cpu].core_id = topology_id; diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 039e9ff379cc45dc620006b081cb6812aa7ea76e..cfe9b486281639e9828c9fdc47b6a137d26af6b1 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -18,6 +18,7 @@ */ #include +#include #include #include #include @@ -101,10 +102,16 @@ static void dump_instr(const char *lvl, struct pt_regs *regs) void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) { struct stackframe frame; - int skip; + int skip = 0; pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); + if (regs) { + if (user_mode(regs)) + return; + skip = 1; + } + if (!tsk) tsk = current; @@ -125,7 +132,6 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) frame.graph = tsk->curr_ret_stack; #endif - skip = !!regs; printk("Call trace:\n"); do { /* skip until specified stack frame */ @@ -175,15 +181,13 @@ static int __die(const char *str, int err, struct pt_regs *regs) return ret; print_modules(); - __show_regs(regs); pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n", TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk)); + show_regs(regs); - if (!user_mode(regs)) { - dump_backtrace(regs, tsk); + if (!user_mode(regs)) dump_instr(KERN_EMERG, regs); - } return ret; } @@ -310,10 +314,12 @@ static int call_undef_hook(struct pt_regs *regs) int (*fn)(struct pt_regs *regs, u32 instr) = NULL; void __user *pc = (void __user *)instruction_pointer(regs); - if (!user_mode(regs)) - return 1; - - if (compat_thumb_mode(regs)) { + if (!user_mode(regs)) { + __le32 instr_le; + if (probe_kernel_address((__force __le32 *)pc, instr_le)) + goto exit; + instr = le32_to_cpu(instr_le); + } else if (a32_thumb_mode(regs)) { /* 16-bit Thumb instruction */ __le16 instr_le; if (get_user(instr_le, (__le16 __user *)pc)) @@ -352,6 +358,9 @@ void force_signal_inject(int signal, int code, unsigned long address) const char *desc; struct pt_regs *regs = current_pt_regs(); + if (WARN_ON(!user_mode(regs))) + return; + clear_siginfo(&info); switch (signal) { @@ -406,14 +415,10 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) if (call_undef_hook(regs) == 0) return; + BUG_ON(!user_mode(regs)); force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc); } -void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) -{ - sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0); -} - #define __user_cache_maint(insn, address, res) \ if (address >= user_addr_max()) { \ res = -EFAULT; \ @@ -605,7 +610,6 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) handler[reason], smp_processor_id(), esr, esr_get_class_string(esr)); - die("Oops - bad mode", regs, 0); local_daif_mask(); panic("bad mode"); } @@ -713,13 +717,17 @@ bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr) asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr) { - nmi_enter(); + const bool was_in_nmi = in_nmi(); + + if (!was_in_nmi) + nmi_enter(); /* non-RAS errors are not containable */ if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr)) arm64_serror_panic(regs, esr); - nmi_exit(); + if (!was_in_nmi) + nmi_exit(); } void __pte_error(const char *file, int line, unsigned long val) diff --git a/arch/arm64/kernel/vdso-ilp32/.gitignore b/arch/arm64/kernel/vdso-ilp32/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..61806c3fd68b06bb602a8ee2ad4554f4d39c8998 --- /dev/null +++ b/arch/arm64/kernel/vdso-ilp32/.gitignore @@ -0,0 +1,2 @@ +vdso-ilp32.lds +vdso-ilp32-offsets.h diff --git a/arch/arm64/kernel/vdso-ilp32/Makefile b/arch/arm64/kernel/vdso-ilp32/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..c41eb5cf4e00beefb53ac585e4103275d418aa24 --- /dev/null +++ b/arch/arm64/kernel/vdso-ilp32/Makefile @@ -0,0 +1,92 @@ +# SPDX-License-Identifier: GPL-2.0+ + +# +# Building a vDSO image for AArch64. +# +# Author: Will Deacon +# Heavily based on the vDSO Makefiles for other archs. +# + +obj-ilp32-vdso := gettimeofday-ilp32.o note-ilp32.o sigreturn-ilp32.o + +# Build rules +targets := $(obj-ilp32-vdso) vdso-ilp32.so vdso-ilp32.so.dbg +obj-ilp32-vdso := $(addprefix $(obj)/, $(obj-ilp32-vdso)) + +ccflags-y := -shared -fno-common -fno-builtin -fno-stack-protector +ccflags-y += -DDISABLE_BRANCH_PROFILING +ccflags-y += -nostdlib -Wl,-soname=linux-ilp32-vdso.so.1 \ + $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) + +# Force -O2 to avoid libgcc dependencies +CFLAGS_REMOVE_gettimeofday-ilp32.o = -pg -Os +CFLAGS_gettimeofday-ilp32.o = -O2 -mcmodel=tiny -mabi=ilp32 + +# Disable gcov profiling for VDSO code +GCOV_PROFILE := n +KASAN_SANITIZE := n +UBSAN_SANITIZE := n +KCOV_INSTRUMENT := n + +# Workaround for bare-metal (ELF) toolchains that neglect to pass -shared +# down to collect2, resulting in silent corruption of the vDSO image. +ccflags-y += -Wl,-shared + +obj-y += vdso-ilp32.o +extra-y += vdso-ilp32.lds +CPPFLAGS_vdso-ilp32.lds += -P -C -U$(ARCH) -mabi=ilp32 + +# Force dependency (incbin is bad) +$(obj)/vdso-ilp32.o : $(obj)/vdso-ilp32.so + +# Link rule for the .so file, .lds has to be first +$(obj)/vdso-ilp32.so.dbg: $(src)/vdso-ilp32.lds $(obj-ilp32-vdso) + $(call if_changed,vdso-ilp32ld) + +# Strip rule for the .so file +$(obj)/%.so: OBJCOPYFLAGS := -S +$(obj)/%.so: $(obj)/%.so.dbg FORCE + $(call if_changed,objcopy) + +# Generate VDSO offsets using helper script +gen-vdsosym := $(srctree)/$(src)/../vdso/gen_vdso_offsets.sh +quiet_cmd_vdsosym = VDSOSYM $@ +define cmd_vdsosym + $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ +endef + +include/generated/vdso-ilp32-offsets.h: $(obj)/vdso-ilp32.so.dbg FORCE + $(call if_changed,vdsosym) + +# Assembly rules for the .S files +#$(obj-ilp32-vdso): %.o: $(src)/../vdso/$(subst -ilp32,,%.S) +# $(call if_changed_dep,vdso-ilp32as) + +$(obj)/gettimeofday-ilp32.o: $(src)/../vdso/gettimeofday.c + $(call if_changed_dep,vdso-ilp32cc) + +$(obj)/note-ilp32.o: $(src)/../vdso/note.S + $(call if_changed_dep,vdso-ilp32as) + +# This one should be fine because ILP32 uses the same generic +# __NR_rt_sigreturn syscall number. +$(obj)/sigreturn-ilp32.o: $(src)/../vdso/sigreturn.S + $(call if_changed_dep,vdso-ilp32as) + +# Actual build commands +quiet_cmd_vdso-ilp32ld = VDSOILP32L $@ + cmd_vdso-ilp32ld = $(CC) $(c_flags) -mabi=ilp32 -Wl,-n -Wl,-T $^ -o $@ +quiet_cmd_vdso-ilp32as = VDSOILP32C $@ + cmd_vdso-ilp32cc= $(CC) $(c_flags) -mabi=ilp32 -c -o $@ $< +quiet_cmd_vdso-ilp32as = VDSOILP32A $@ + cmd_vdso-ilp32as = $(CC) $(a_flags) -mabi=ilp32 -c -o $@ $< + +# Install commands for the unstripped file +quiet_cmd_vdso_install = INSTALL $@ + cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@ + +vdso-ilp32.so: $(obj)/vdso-ilp32.so.dbg + @mkdir -p $(MODLIB)/vdso + $(call cmd,vdso_install) + +vdso_install: vdso-ilp32.so diff --git a/arch/arm64/kernel/vdso-ilp32/vdso-ilp32.S b/arch/arm64/kernel/vdso-ilp32/vdso-ilp32.S new file mode 100644 index 0000000000000000000000000000000000000000..dee65ab796626f664f1e47810df3eb9ea23c4e85 --- /dev/null +++ b/arch/arm64/kernel/vdso-ilp32/vdso-ilp32.S @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +/* + * Copyright (C) 2012 ARM Limited + * Author: Will Deacon + */ + +#include +#include +#include +#include + + __PAGE_ALIGNED_DATA + + .globl vdso_ilp32_start, vdso_ilp32_end + .balign PAGE_SIZE +vdso_ilp32_start: + .incbin "arch/arm64/kernel/vdso-ilp32/vdso-ilp32.so" + .balign PAGE_SIZE +vdso_ilp32_end: + + .previous diff --git a/arch/arm64/kernel/vdso-ilp32/vdso-ilp32.lds.S b/arch/arm64/kernel/vdso-ilp32/vdso-ilp32.lds.S new file mode 100644 index 0000000000000000000000000000000000000000..9f14666feef721c5d41acd342cb801f3ec5f2deb --- /dev/null +++ b/arch/arm64/kernel/vdso-ilp32/vdso-ilp32.lds.S @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +/* + * GNU linker script for the VDSO library. + * + * Copyright (C) 2012 ARM Limited + * Author: Will Deacon + * Heavily based on the vDSO linker scripts for other archs. + */ + +#include +#include +#include + +SECTIONS +{ + PROVIDE(_vdso_data = . - PAGE_SIZE); + . = VDSO_LBASE + SIZEOF_HEADERS; + + .hash : { *(.hash) } :text + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + + .note : { *(.note.*) } :text :note + + . = ALIGN(16); + + .text : { *(.text*) } :text =0xd503201f + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + + .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr + .eh_frame : { KEEP (*(.eh_frame)) } :text + + .dynamic : { *(.dynamic) } :text :dynamic + + .rodata : { *(.rodata*) } :text + + _end = .; + PROVIDE(end = .); + + /DISCARD/ : { + *(.note.GNU-stack) + *(.data .data.* .gnu.linkonce.d.* .sdata*) + *(.bss .sbss .dynbss .dynsbss) + } +} + +/* + * We must supply the ELF program headers explicitly to get just one + * PT_LOAD segment, and set the flags explicitly to make segments read-only. + */ +PHDRS +{ + text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */ + dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ + note PT_NOTE FLAGS(4); /* PF_R */ + eh_frame_hdr PT_GNU_EH_FRAME; +} + +/* + * This controls what symbols we export from the DSO. + */ +VERSION +{ + LINUX_4.12 { + global: + __kernel_rt_sigreturn; + __kernel_gettimeofday; + __kernel_clock_gettime; + __kernel_clock_getres; + local: *; + }; +} + +/* + * Make the sigreturn code visible to the kernel. + */ +VDSO_sigtramp_ilp32 = __kernel_rt_sigreturn; diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 2d419006ad4330c5a76cb0cf9372fdb0c76cf279..65252f0dd24c19af994b49fa1089dd4eafd32c4e 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -37,8 +37,13 @@ #include #include -extern char vdso_start[], vdso_end[]; -static unsigned long vdso_pages __ro_after_init; +extern char vdso_lp64_start[], vdso_lp64_end[]; +static unsigned long vdso_lp64_pages __ro_after_init; + +#ifdef CONFIG_ARM64_ILP32 +extern char vdso_ilp32_start[], vdso_ilp32_end[]; +static unsigned long vdso_ilp32_pages __ro_after_init; +#endif /* * The vDSO data page. @@ -49,7 +54,7 @@ static union { } vdso_data_store __page_aligned_data; struct vdso_data *vdso_data = &vdso_data_store.data; -#ifdef CONFIG_COMPAT +#ifdef CONFIG_AARCH32_EL0 /* * Create and map the vectors page for AArch32 tasks. */ @@ -108,13 +113,13 @@ int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp) return PTR_ERR_OR_ZERO(ret); } -#endif /* CONFIG_COMPAT */ +#endif /* CONFIG_AARCH32_EL0 */ static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) { unsigned long new_size = new_vma->vm_end - new_vma->vm_start; - unsigned long vdso_size = vdso_end - vdso_start; + unsigned long vdso_size = vdso_lp64_end - vdso_lp64_start; if (vdso_size != new_size) return -EINVAL; @@ -124,7 +129,7 @@ static int vdso_mremap(const struct vm_special_mapping *sm, return 0; } -static struct vm_special_mapping vdso_spec[2] __ro_after_init = { +static struct vm_special_mapping vdso_lp64_spec[2] __ro_after_init = { { .name = "[vvar]", }, @@ -134,9 +139,23 @@ static struct vm_special_mapping vdso_spec[2] __ro_after_init = { }, }; -static int __init vdso_init(void) +#ifdef CONFIG_ARM64_ILP32 +static struct vm_special_mapping vdso_ilp32_spec[2] __ro_after_init = { + { + .name = "[vvar]", + }, + { + .name = "[vdso]", + }, +}; +#endif + +static int __init vdso_init(char *vdso_start, char *vdso_end, + unsigned long *vdso_pagesp, + struct vm_special_mapping *vdso_spec) { int i; + unsigned long vdso_pages; struct page **vdso_pagelist; unsigned long pfn; @@ -146,8 +165,7 @@ static int __init vdso_init(void) } vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT; - pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n", - vdso_pages + 1, vdso_pages, vdso_start, 1L, vdso_data); + *vdso_pagesp = vdso_pages; /* Allocate the vDSO pagelist, plus a page for the data. */ vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *), @@ -170,7 +188,22 @@ static int __init vdso_init(void) return 0; } -arch_initcall(vdso_init); + +static int __init vdso_lp64_init(void) +{ + return vdso_init(vdso_lp64_start, vdso_lp64_end, + &vdso_lp64_pages, vdso_lp64_spec); +} +arch_initcall(vdso_lp64_init); + +#ifdef CONFIG_ARM64_ILP32 +static int __init vdso_ilp32_init(void) +{ + return vdso_init(vdso_ilp32_start, vdso_ilp32_end, + &vdso_ilp32_pages, vdso_ilp32_spec); +} +arch_initcall(vdso_ilp32_init); +#endif int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) @@ -178,8 +211,17 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, struct mm_struct *mm = current->mm; unsigned long vdso_base, vdso_text_len, vdso_mapping_len; void *ret; + unsigned long pages = vdso_lp64_pages; + struct vm_special_mapping *vdso_spec = vdso_lp64_spec; + +#ifdef CONFIG_ARM64_ILP32 + if (is_ilp32_compat_task()) { + pages = vdso_ilp32_pages; + vdso_spec = vdso_ilp32_spec; + } +#endif - vdso_text_len = vdso_pages << PAGE_SHIFT; + vdso_text_len = pages << PAGE_SHIFT; /* Be sure to map the data page */ vdso_mapping_len = vdso_text_len + PAGE_SIZE; @@ -226,12 +268,16 @@ void update_vsyscall(struct timekeeper *tk) smp_wmb(); vdso_data->use_syscall = use_syscall; + vdso_data->vdso_fix = tk->tkr_mono.clock->archdata.vdso_fix; vdso_data->xtime_coarse_sec = tk->xtime_sec; vdso_data->xtime_coarse_nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift; vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec; vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; + /* Read without the seqlock held by clock_getres() */ + WRITE_ONCE(vdso_data->hrtimer_res, hrtimer_resolution); + if (!use_syscall) { /* tkr_mono.cycle_last == tkr_raw.cycle_last */ vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last; diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index b215c712d89704e3247951fde01bcd4224b0c190..9952cec089f4a54baa0618f32663723499259ab2 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -12,12 +12,20 @@ obj-vdso := gettimeofday.o note.o sigreturn.o targets := $(obj-vdso) vdso.so vdso.so.dbg obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) -ccflags-y := -shared -fno-common -fno-builtin +ccflags-y := -shared -fno-common -fno-builtin -fno-stack-protector +ccflags-y += -DDISABLE_BRANCH_PROFILING ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \ $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) +# Force -O2 to avoid libgcc dependencies +CFLAGS_REMOVE_gettimeofday.o = -pg -Os +CFLAGS_gettimeofday.o = -O2 -mcmodel=tiny + # Disable gcov profiling for VDSO code GCOV_PROFILE := n +KASAN_SANITIZE := n +UBSAN_SANITIZE := n +KCOV_INSTRUMENT := n # Workaround for bare-metal (ELF) toolchains that neglect to pass -shared # down to collect2, resulting in silent corruption of the vDSO image. @@ -49,15 +57,9 @@ endef include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE $(call if_changed,vdsosym) -# Assembly rules for the .S files -$(obj-vdso): %.o: %.S FORCE - $(call if_changed_dep,vdsoas) - # Actual build commands quiet_cmd_vdsold = VDSOL $@ cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@ -quiet_cmd_vdsoas = VDSOA $@ - cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $< # Install commands for the unstripped file quiet_cmd_vdso_install = INSTALL $@ diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S deleted file mode 100644 index c39872a7b03c3e152315781c753f3e6b186524ed..0000000000000000000000000000000000000000 --- a/arch/arm64/kernel/vdso/gettimeofday.S +++ /dev/null @@ -1,328 +0,0 @@ -/* - * Userspace implementations of gettimeofday() and friends. - * - * Copyright (C) 2012 ARM Limited - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - * Author: Will Deacon - */ - -#include -#include -#include - -#define NSEC_PER_SEC_LO16 0xca00 -#define NSEC_PER_SEC_HI16 0x3b9a - -vdso_data .req x6 -seqcnt .req w7 -w_tmp .req w8 -x_tmp .req x8 - -/* - * Conventions for macro arguments: - * - An argument is write-only if its name starts with "res". - * - All other arguments are read-only, unless otherwise specified. - */ - - .macro seqcnt_acquire -9999: ldr seqcnt, [vdso_data, #VDSO_TB_SEQ_COUNT] - tbnz seqcnt, #0, 9999b - dmb ishld - .endm - - .macro seqcnt_check fail - dmb ishld - ldr w_tmp, [vdso_data, #VDSO_TB_SEQ_COUNT] - cmp w_tmp, seqcnt - b.ne \fail - .endm - - .macro syscall_check fail - ldr w_tmp, [vdso_data, #VDSO_USE_SYSCALL] - cbnz w_tmp, \fail - .endm - - .macro get_nsec_per_sec res - mov \res, #NSEC_PER_SEC_LO16 - movk \res, #NSEC_PER_SEC_HI16, lsl #16 - .endm - - /* - * Returns the clock delta, in nanoseconds left-shifted by the clock - * shift. - */ - .macro get_clock_shifted_nsec res, cycle_last, mult - /* Read the virtual counter. */ - isb - mrs x_tmp, cntvct_el0 - /* Calculate cycle delta and convert to ns. */ - sub \res, x_tmp, \cycle_last - /* We can only guarantee 56 bits of precision. */ - movn x_tmp, #0xff00, lsl #48 - and \res, x_tmp, \res - mul \res, \res, \mult - .endm - - /* - * Returns in res_{sec,nsec} the REALTIME timespec, based on the - * "wall time" (xtime) and the clock_mono delta. - */ - .macro get_ts_realtime res_sec, res_nsec, \ - clock_nsec, xtime_sec, xtime_nsec, nsec_to_sec - add \res_nsec, \clock_nsec, \xtime_nsec - udiv x_tmp, \res_nsec, \nsec_to_sec - add \res_sec, \xtime_sec, x_tmp - msub \res_nsec, x_tmp, \nsec_to_sec, \res_nsec - .endm - - /* - * Returns in res_{sec,nsec} the timespec based on the clock_raw delta, - * used for CLOCK_MONOTONIC_RAW. - */ - .macro get_ts_clock_raw res_sec, res_nsec, clock_nsec, nsec_to_sec - udiv \res_sec, \clock_nsec, \nsec_to_sec - msub \res_nsec, \res_sec, \nsec_to_sec, \clock_nsec - .endm - - /* sec and nsec are modified in place. */ - .macro add_ts sec, nsec, ts_sec, ts_nsec, nsec_to_sec - /* Add timespec. */ - add \sec, \sec, \ts_sec - add \nsec, \nsec, \ts_nsec - - /* Normalise the new timespec. */ - cmp \nsec, \nsec_to_sec - b.lt 9999f - sub \nsec, \nsec, \nsec_to_sec - add \sec, \sec, #1 -9999: - cmp \nsec, #0 - b.ge 9998f - add \nsec, \nsec, \nsec_to_sec - sub \sec, \sec, #1 -9998: - .endm - - .macro clock_gettime_return, shift=0 - .if \shift == 1 - lsr x11, x11, x12 - .endif - stp x10, x11, [x1, #TSPEC_TV_SEC] - mov x0, xzr - ret - .endm - - .macro jump_slot jumptable, index, label - .if (. - \jumptable) != 4 * (\index) - .error "Jump slot index mismatch" - .endif - b \label - .endm - - .text - -/* int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz); */ -ENTRY(__kernel_gettimeofday) - .cfi_startproc - adr vdso_data, _vdso_data - /* If tv is NULL, skip to the timezone code. */ - cbz x0, 2f - - /* Compute the time of day. */ -1: seqcnt_acquire - syscall_check fail=4f - ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST] - /* w11 = cs_mono_mult, w12 = cs_shift */ - ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT] - ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC] - seqcnt_check fail=1b - - get_nsec_per_sec res=x9 - lsl x9, x9, x12 - - get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11 - get_ts_realtime res_sec=x10, res_nsec=x11, \ - clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9 - - /* Convert ns to us. */ - mov x13, #1000 - lsl x13, x13, x12 - udiv x11, x11, x13 - stp x10, x11, [x0, #TVAL_TV_SEC] -2: - /* If tz is NULL, return 0. */ - cbz x1, 3f - ldp w4, w5, [vdso_data, #VDSO_TZ_MINWEST] - stp w4, w5, [x1, #TZ_MINWEST] -3: - mov x0, xzr - ret -4: - /* Syscall fallback. */ - mov x8, #__NR_gettimeofday - svc #0 - ret - .cfi_endproc -ENDPROC(__kernel_gettimeofday) - -#define JUMPSLOT_MAX CLOCK_MONOTONIC_COARSE - -/* int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp); */ -ENTRY(__kernel_clock_gettime) - .cfi_startproc - cmp w0, #JUMPSLOT_MAX - b.hi syscall - adr vdso_data, _vdso_data - adr x_tmp, jumptable - add x_tmp, x_tmp, w0, uxtw #2 - br x_tmp - - ALIGN -jumptable: - jump_slot jumptable, CLOCK_REALTIME, realtime - jump_slot jumptable, CLOCK_MONOTONIC, monotonic - b syscall - b syscall - jump_slot jumptable, CLOCK_MONOTONIC_RAW, monotonic_raw - jump_slot jumptable, CLOCK_REALTIME_COARSE, realtime_coarse - jump_slot jumptable, CLOCK_MONOTONIC_COARSE, monotonic_coarse - - .if (. - jumptable) != 4 * (JUMPSLOT_MAX + 1) - .error "Wrong jumptable size" - .endif - - ALIGN -realtime: - seqcnt_acquire - syscall_check fail=syscall - ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST] - /* w11 = cs_mono_mult, w12 = cs_shift */ - ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT] - ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC] - seqcnt_check fail=realtime - - /* All computations are done with left-shifted nsecs. */ - get_nsec_per_sec res=x9 - lsl x9, x9, x12 - - get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11 - get_ts_realtime res_sec=x10, res_nsec=x11, \ - clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9 - clock_gettime_return, shift=1 - - ALIGN -monotonic: - seqcnt_acquire - syscall_check fail=syscall - ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST] - /* w11 = cs_mono_mult, w12 = cs_shift */ - ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT] - ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC] - ldp x3, x4, [vdso_data, #VDSO_WTM_CLK_SEC] - seqcnt_check fail=monotonic - - /* All computations are done with left-shifted nsecs. */ - lsl x4, x4, x12 - get_nsec_per_sec res=x9 - lsl x9, x9, x12 - - get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11 - get_ts_realtime res_sec=x10, res_nsec=x11, \ - clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9 - - add_ts sec=x10, nsec=x11, ts_sec=x3, ts_nsec=x4, nsec_to_sec=x9 - clock_gettime_return, shift=1 - - ALIGN -monotonic_raw: - seqcnt_acquire - syscall_check fail=syscall - ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST] - /* w11 = cs_raw_mult, w12 = cs_shift */ - ldp w12, w11, [vdso_data, #VDSO_CS_SHIFT] - ldp x13, x14, [vdso_data, #VDSO_RAW_TIME_SEC] - seqcnt_check fail=monotonic_raw - - /* All computations are done with left-shifted nsecs. */ - get_nsec_per_sec res=x9 - lsl x9, x9, x12 - - get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11 - get_ts_clock_raw res_sec=x10, res_nsec=x11, \ - clock_nsec=x15, nsec_to_sec=x9 - - add_ts sec=x10, nsec=x11, ts_sec=x13, ts_nsec=x14, nsec_to_sec=x9 - clock_gettime_return, shift=1 - - ALIGN -realtime_coarse: - seqcnt_acquire - ldp x10, x11, [vdso_data, #VDSO_XTIME_CRS_SEC] - seqcnt_check fail=realtime_coarse - clock_gettime_return - - ALIGN -monotonic_coarse: - seqcnt_acquire - ldp x10, x11, [vdso_data, #VDSO_XTIME_CRS_SEC] - ldp x13, x14, [vdso_data, #VDSO_WTM_CLK_SEC] - seqcnt_check fail=monotonic_coarse - - /* Computations are done in (non-shifted) nsecs. */ - get_nsec_per_sec res=x9 - add_ts sec=x10, nsec=x11, ts_sec=x13, ts_nsec=x14, nsec_to_sec=x9 - clock_gettime_return - - ALIGN -syscall: /* Syscall fallback. */ - mov x8, #__NR_clock_gettime - svc #0 - ret - .cfi_endproc -ENDPROC(__kernel_clock_gettime) - -/* int __kernel_clock_getres(clockid_t clock_id, struct timespec *res); */ -ENTRY(__kernel_clock_getres) - .cfi_startproc - cmp w0, #CLOCK_REALTIME - ccmp w0, #CLOCK_MONOTONIC, #0x4, ne - ccmp w0, #CLOCK_MONOTONIC_RAW, #0x4, ne - b.ne 1f - - ldr x2, 5f - b 2f -1: - cmp w0, #CLOCK_REALTIME_COARSE - ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne - b.ne 4f - ldr x2, 6f -2: - cbz x1, 3f - stp xzr, x2, [x1] - -3: /* res == NULL. */ - mov w0, wzr - ret - -4: /* Syscall fallback. */ - mov x8, #__NR_clock_getres - svc #0 - ret -5: - .quad CLOCK_REALTIME_RES -6: - .quad CLOCK_COARSE_RES - .cfi_endproc -ENDPROC(__kernel_clock_getres) diff --git a/arch/arm64/kernel/vdso/gettimeofday.c b/arch/arm64/kernel/vdso/gettimeofday.c new file mode 100644 index 0000000000000000000000000000000000000000..3140ffb5e89de87dd6e9df2ebdf57ed0e38f1760 --- /dev/null +++ b/arch/arm64/kernel/vdso/gettimeofday.c @@ -0,0 +1,386 @@ +/* + * Userspace implementations of gettimeofday() and friends. + * + * Copyright (C) 2017 Cavium, Inc. + * Copyright (C) 2012 ARM Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * Author: Will Deacon + * Rewriten into C by: Andrew Pinski + */ + +#include +#include +#include +#include +#include +#include + +#ifdef __ILP32__ +#undef BITS_PER_LONG +#define BITS_PER_LONG 32 +#endif + +#include + +extern struct vdso_data _vdso_data; + +static notrace int gettimeofday_fallback(struct timeval *_tv, + struct timezone *_tz) +{ + register struct timezone *tz asm("x1") = _tz; + register struct timeval *tv asm("x0") = _tv; + register long ret asm ("x0"); + register long nr asm("x8") = __NR_gettimeofday; + + asm volatile( + " svc #0\n" + : "=r" (ret) + : "r" (tv), "r" (tz), "r" (nr) + : "memory"); + + return ret; +} + +static notrace long clock_gettime_fallback(clockid_t _clkid, + struct timespec *_ts) +{ + register struct timespec *ts asm("x1") = _ts; + register clockid_t clkid asm("x0") = _clkid; + register long ret asm ("x0"); + register long nr asm("x8") = __NR_clock_gettime; + + asm volatile( + " svc #0\n" + : "=r" (ret) + : "r" (clkid), "r" (ts), "r" (nr) + : "memory"); + + return ret; +} + +static notrace int clock_getres_fallback(clockid_t _clkid, + struct timespec *_ts) +{ + register struct timespec *ts asm("x1") = _ts; + register clockid_t clkid asm("x0") = _clkid; + register long ret asm ("x0"); + register long nr asm("x8") = __NR_clock_getres; + + asm volatile( + " svc #0\n" + : "=r" (ret) + : "r" (clkid), "r" (ts), "r" (nr) + : "memory"); + + return ret; +} + +static notrace u32 vdso_read_begin(struct vdso_data *vd) +{ + u32 seq; + + do { + seq = READ_ONCE(vd->tb_seq_count); + + if ((seq & 1) == 0) + break; + + asm volatile ("" : : : "memory"); + } while (true); + + smp_rmb(); /* Pairs with second smp_wmb in update_vsyscall */ + return seq; +} + +static notrace u32 vdso_read_retry(struct vdso_data *vd, u32 start) +{ + u32 seq; + + smp_rmb(); /* Pairs with first smp_wmb in update_vsyscall */ + seq = READ_ONCE(vd->tb_seq_count); + return seq != start; +} + + +/* + * Returns the clock delta, in nanoseconds left-shifted by the clock + * shift. + */ +static notrace u64 get_clock_shifted_nsec(u64 cycle_last, u64 mult) +{ + u64 res; + + /* Read the virtual counter. */ + isb(); + asm volatile("mrs %0, cntvct_el0" : "=r" (res) :: "memory"); + if (_vdso_data.vdso_fix) { + u64 new; + int retries = 50; + + asm volatile("mrs %0, cntvct_el0" : "=r" (new) :: "memory"); + while (unlikely((new - res) >> 5) && retries) { + asm volatile("mrs %0, cntvct_el0" : "=r" (res) :: "memory"); + asm volatile("mrs %0, cntvct_el0" : "=r" (new) :: "memory"); + retries--; + } + } + + res = res - cycle_last; + /* We can only guarantee 56 bits of precision. */ + res &= ~(0xff00ull<<48); + + return res * mult; +} + +/* + * Fake address dependency from the value computed from the counter + * register to subsequent data page accesses so that the sequence + * locking also orders the read of the counter. + */ +static notrace struct vdso_data *arch_counter_vdso_data_ordering(struct vdso_data *vd, u64 res) +{ + struct vdso_data *vd_res = vd; + u64 tmp; + + asm volatile( + " and %0, %1, xzr\n" \ + " add %2, %2, %0\n" \ + : "=r" (tmp) \ + : "r"(res), "r"(vd_res)); + + return vd_res; +} + +/* Code size doesn't matter (vdso is 4k/16k/64k anyway) and this is faster. */ + +static __always_inline notrace int do_realtime(struct vdso_data *vd, + struct timespec *ts) +{ + u32 seq, cs_mono_mult, cs_shift; + u64 ns, sec, cycle_last; + + do { + seq = vdso_read_begin(vd); + + if (vd->use_syscall) + return -1; + + cycle_last = vd->cs_cycle_last; + + cs_mono_mult = vd->cs_mono_mult; + cs_shift = vd->cs_shift; + + sec = vd->xtime_clock_sec; + ns = vd->xtime_clock_nsec; + + ns += get_clock_shifted_nsec(cycle_last, cs_mono_mult); + vd = arch_counter_vdso_data_ordering(vd, ns); + } while (unlikely(vdso_read_retry(vd, seq))); + + ns >>= cs_shift; + ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); + ts->tv_nsec = ns; + + return 0; +} + +static notrace int do_monotonic(struct vdso_data *vd, + struct timespec *ts) +{ + u32 seq, cs_mono_mult, cs_shift; + u64 ns, cycle_last, sec; + + do { + seq = vdso_read_begin(vd); + + if (vd->use_syscall) + return 1; + + cycle_last = vd->cs_cycle_last; + + cs_mono_mult = vd->cs_mono_mult; + cs_shift = vd->cs_shift; + + sec = vd->xtime_clock_sec; + ns = vd->xtime_clock_nsec; + + sec += vd->wtm_clock_sec; + ns += vd->wtm_clock_nsec << cs_shift; + + ns += get_clock_shifted_nsec(cycle_last, cs_mono_mult); + vd = arch_counter_vdso_data_ordering(vd, ns); + } while (unlikely(vdso_read_retry(vd, seq))); + + ns >>= cs_shift; + + ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); + ts->tv_nsec = ns; + + return 0; +} + +static notrace int do_monotonic_raw(struct vdso_data *vd, + struct timespec *ts) +{ + u32 seq, cs_raw_mult, cs_shift; + u64 ns, sec, cycle_last; + + do { + seq = vdso_read_begin(vd); + + if (vd->use_syscall) + return -1; + + cycle_last = vd->cs_cycle_last; + + cs_raw_mult = vd->cs_raw_mult; + cs_shift = vd->cs_shift; + + sec = vd->raw_time_sec; + ns = vd->raw_time_nsec; + + ns += get_clock_shifted_nsec(cycle_last, cs_raw_mult); + vd = arch_counter_vdso_data_ordering(vd, ns); + } while (unlikely(vdso_read_retry(vd, seq))); + + ns >>= cs_shift; + ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); + ts->tv_nsec = ns; + + return 0; +} + + +static notrace void do_realtime_coarse(struct vdso_data *vd, + struct timespec *ts) +{ + u32 seq; + u64 ns, sec; + + do { + seq = vdso_read_begin(vd); + + sec = vd->xtime_coarse_sec; + ns = vd->xtime_coarse_nsec; + + } while (unlikely(vdso_read_retry(vd, seq))); + + ts->tv_sec = sec; + ts->tv_nsec = ns; +} + +static notrace void do_monotonic_coarse(struct vdso_data *vd, + struct timespec *ts) +{ + u32 seq; + u64 ns, sec, wtm_sec, wtm_ns; + + do { + + seq = vdso_read_begin(vd); + + sec = vd->xtime_coarse_sec; + ns = vd->xtime_coarse_nsec; + + wtm_sec = vd->wtm_clock_sec; + wtm_ns = vd->wtm_clock_nsec; + + } while (unlikely(vdso_read_retry(vd, seq))); + + sec += wtm_sec; + ns += wtm_ns; + ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); + ts->tv_nsec = ns; +} + +notrace int __kernel_clock_gettime(clockid_t clock, struct timespec *ts) +{ + struct vdso_data *vd = &_vdso_data; + + switch (clock) { + case CLOCK_REALTIME: + if (do_realtime(vd, ts)) + goto fallback; + break; + case CLOCK_MONOTONIC: + if (do_monotonic(vd, ts)) + goto fallback; + break; + case CLOCK_MONOTONIC_RAW: + if (do_monotonic_raw(vd, ts)) + goto fallback; + break; + case CLOCK_REALTIME_COARSE: + do_realtime_coarse(vd, ts); + break; + case CLOCK_MONOTONIC_COARSE: + do_monotonic_coarse(vd, ts); + break; + default: + goto fallback; + } + + return 0; +fallback: + return clock_gettime_fallback(clock, ts); +} + + + +notrace int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz) +{ + struct vdso_data *vd = &_vdso_data; + + if (likely(tv != NULL)) { + struct timespec ts; + + if (do_realtime(vd, &ts)) + return gettimeofday_fallback(tv, tz); + + tv->tv_sec = ts.tv_sec; + tv->tv_usec = ts.tv_nsec / 1000; + } + + if (unlikely(tz != NULL)) { + tz->tz_minuteswest = vd->tz_minuteswest; + tz->tz_dsttime = vd->tz_dsttime; + } + + return 0; +} + + +int __kernel_clock_getres(clockid_t clock_id, struct timespec *res) +{ + struct vdso_data *vd = &_vdso_data; + u64 ns; + + if (clock_id == CLOCK_REALTIME || + clock_id == CLOCK_MONOTONIC || + clock_id == CLOCK_MONOTONIC_RAW) + ns = vd->hrtimer_res; + else if (clock_id == CLOCK_REALTIME_COARSE || + clock_id == CLOCK_MONOTONIC_COARSE) + ns = LOW_RES_NSEC; + else + return clock_getres_fallback(clock_id, res); + + if (res) { + res->tv_sec = 0; + res->tv_nsec = ns; + } + + return 0; +} diff --git a/arch/arm64/kernel/vdso/vdso.S b/arch/arm64/kernel/vdso/vdso.S index 82379a70ef03ff4296cb8d7fc04226a3d68e1392..a40ae24854308771ff32c4a5ae58fdc44103ce78 100644 --- a/arch/arm64/kernel/vdso/vdso.S +++ b/arch/arm64/kernel/vdso/vdso.S @@ -21,12 +21,12 @@ #include #include - .globl vdso_start, vdso_end + .globl vdso_lp64_start, vdso_lp64_end .section .rodata .balign PAGE_SIZE -vdso_start: +vdso_lp64_start: .incbin "arch/arm64/kernel/vdso/vdso.so" .balign PAGE_SIZE -vdso_end: +vdso_lp64_end: .previous diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S index beca249bc2f394ca3592a76d868b60ff275ddb5b..b3e6c4d5b75c8f8bc008edc3cc146d6fdc43f83b 100644 --- a/arch/arm64/kernel/vdso/vdso.lds.S +++ b/arch/arm64/kernel/vdso/vdso.lds.S @@ -39,6 +39,13 @@ SECTIONS .gnu.version_d : { *(.gnu.version_d) } .gnu.version_r : { *(.gnu.version_r) } + /* + * Discard .note.gnu.property sections which are unused and have + * different alignment requirement from vDSO note sections. + */ + /DISCARD/ : { + *(.note.GNU-stack .note.gnu.property) + } .note : { *(.note.*) } :text :note . = ALIGN(16); @@ -59,7 +66,6 @@ SECTIONS PROVIDE(end = .); /DISCARD/ : { - *(.note.GNU-stack) *(.data .data.* .gnu.linkonce.d.* .sdata*) *(.bss .sbss .dynbss .dynsbss) } diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 605d1b60469c2488f28a097965a4636d46bb9d52..2b965dd67881cca7ba78775ff9c85df997727e58 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -99,7 +99,8 @@ SECTIONS *(.discard) *(.discard.*) *(.interp .dynamic) - *(.dynsym .dynstr .hash) + *(.dynsym .dynstr .hash .gnu.hash) + *(.eh_frame) } . = KIMAGE_VADDR + TEXT_OFFSET; @@ -153,9 +154,6 @@ SECTIONS *(.altinstructions) __alt_instructions_end = .; } - .altinstr_replacement : { - *(.altinstr_replacement) - } . = ALIGN(PAGE_SIZE); __inittext_end = .; @@ -176,12 +174,12 @@ SECTIONS PERCPU_SECTION(L1_CACHE_BYTES) - .rela : ALIGN(8) { + .rela.dyn : ALIGN(8) { *(.rela .rela*) } - __rela_offset = ABSOLUTE(ADDR(.rela) - KIMAGE_VADDR); - __rela_size = SIZEOF(.rela); + __rela_offset = ABSOLUTE(ADDR(.rela.dyn) - KIMAGE_VADDR); + __rela_size = SIZEOF(.rela.dyn); . = ALIGN(SEGMENT_ALIGN); __initdata_end = .; @@ -253,7 +251,7 @@ ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1)) <= SZ_4K, "Hibernate exit text too big or misaligned") #endif #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 -ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE, +ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) <= 3*PAGE_SIZE, "Entry trampoline text too big") #endif /* diff --git a/arch/arm64/kernel/watchdog_sdei.c b/arch/arm64/kernel/watchdog_sdei.c new file mode 100644 index 0000000000000000000000000000000000000000..5884abdaeb9de0be7e76c763afc872422958e261 --- /dev/null +++ b/arch/arm64/kernel/watchdog_sdei.c @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Detect hard lockups on a system + * + * Note: Most of this code is borrowed heavily from the perf hardlockup + * detector, so thanks to Don for the initial implementation. + */ + +#define pr_fmt(fmt) "SDEI NMI watchdog: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +/* We use the secure physical timer as SDEI NMI watchdog timer */ +#define SDEI_NMI_WATCHDOG_HWIRQ 29 + +static int sdei_watchdog_event_num; +static bool disable_sdei_nmi_watchdog; +static bool sdei_watchdog_registered; +static DEFINE_PER_CPU(ktime_t, last_check_time); + +int watchdog_nmi_enable(unsigned int cpu) +{ + int ret; + + if (!sdei_watchdog_registered) + return -EINVAL; + +#ifdef CONFIG_HARDLOCKUP_CHECK_TIMESTAMP + refresh_hld_last_timestamp(); +#endif + + __this_cpu_write(last_check_time, ktime_get_mono_fast_ns()); + sdei_api_set_secure_timer_period(watchdog_thresh); + + ret = sdei_api_event_enable(sdei_watchdog_event_num); + if (ret) { + pr_err("Enable NMI Watchdog failed on cpu%d\n", + smp_processor_id()); + return ret; + } + + return 0; +} + +void watchdog_nmi_disable(unsigned int cpu) +{ + int ret; + + if (!sdei_watchdog_registered) + return; + + ret = sdei_api_event_disable(sdei_watchdog_event_num); + if (ret) + pr_err("Disable NMI Watchdog failed on cpu%d\n", + smp_processor_id()); +} + +static int sdei_watchdog_callback(u32 event, + struct pt_regs *regs, void *arg) +{ + ktime_t delta, now = ktime_get_mono_fast_ns(); + + delta = now - __this_cpu_read(last_check_time); + __this_cpu_write(last_check_time, now); + + /* + * Set delta to 4/5 of the actual watchdog threshold period so the + * hrtimer is guaranteed to fire at least once within the real + * watchdog threshold. + */ + if (delta < watchdog_thresh * (u64)NSEC_PER_SEC * 4 / 5) { + pr_err(FW_BUG "SDEI Watchdog event triggered too soon, " + "time to last check:%lld ns\n", delta); + return 0; + } + + watchdog_hardlockup_check(regs); + + return 0; +} +NOKPROBE_SYMBOL(sdei_watchdog_callback); + +static void sdei_nmi_watchdog_bind(void *data) +{ + int ret; + + ret = sdei_api_event_interrupt_bind(SDEI_NMI_WATCHDOG_HWIRQ); + if (ret < 0) + pr_err("SDEI bind failed on cpu%d, return %d\n", + smp_processor_id(), ret); +} + +static int __init disable_sdei_nmi_watchdog_setup(char *str) +{ + disable_sdei_nmi_watchdog = true; + return 1; +} +__setup("disable_sdei_nmi_watchdog", disable_sdei_nmi_watchdog_setup); + +void sdei_watchdog_clear_eoi(void) +{ + if (sdei_watchdog_registered) + sdei_api_clear_eoi(SDEI_NMI_WATCHDOG_HWIRQ); +} + +int __init watchdog_nmi_probe(void) +{ + int ret; + + if (disable_sdei_nmi_watchdog) + return -EINVAL; + + if (!is_hyp_mode_available()) { + pr_err("Disable SDEI NMI Watchdog in VM\n"); + return -EINVAL; + } + + sdei_watchdog_event_num = sdei_api_event_interrupt_bind(SDEI_NMI_WATCHDOG_HWIRQ); + if (sdei_watchdog_event_num < 0) { + pr_err("Bind interrupt failed. Firmware may not support SDEI !\n"); + return sdei_watchdog_event_num; + } + + /* + * After we introduced 'sdei_api_set_secure_timer_period', we disselect + * 'CONFIG_HARDLOCKUP_CHECK_TIMESTAMP'. So we need to make sure that + * firmware can set the period of the secure timer and the timer + * interrupt doesn't trigger too soon. + */ + if (sdei_api_set_secure_timer_period(watchdog_thresh)) { + pr_err("Firmware doesn't support setting the secure timer period, please update your BIOS !\n"); + return -EINVAL; + } + + on_each_cpu(sdei_nmi_watchdog_bind, NULL, true); + + ret = sdei_event_register(sdei_watchdog_event_num, + sdei_watchdog_callback, NULL); + if (ret) { + pr_err("SDEI Watchdog register callback failed\n"); + return ret; + } + + sdei_watchdog_registered = true; + pr_info("SDEI Watchdog registered successfully\n"); + + return 0; +} diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index 0f2a135ba15bbe5bd66d148325d3ed227b1fe072..258f9fd91ba588525c7c7994e07e2f7bb0c86feb 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -15,11 +15,13 @@ obj-$(CONFIG_KVM_ARM_HOST) += hyp/ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vfio.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arm.o $(KVM)/arm/mmu.o $(KVM)/arm/mmio.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/psci.o $(KVM)/arm/perf.o +kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hypercalls.o +kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/pvsched.o kvm-$(CONFIG_KVM_ARM_HOST) += inject_fault.o regmap.o va_layout.o kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o -kvm-$(CONFIG_KVM_ARM_HOST) += vgic-sys-reg-v3.o fpsimd.o +kvm-$(CONFIG_KVM_ARM_HOST) += vgic-sys-reg-v3.o fpsimd.o pmu.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/aarch32.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic.o @@ -34,6 +36,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio-v3.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-kvm-device.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-its.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-debug.o +kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hisi_cpu_model.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/irqchip.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o kvm-$(CONFIG_KVM_ARM_PMU) += $(KVM)/arm/pmu.o diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c index 00d422336a45225ea8f9b16c6a99ebf042ebf68c..4e722d73a3c34f0473bef535192049b53e04ab2e 100644 --- a/arch/arm64/kvm/debug.c +++ b/arch/arm64/kvm/debug.c @@ -112,7 +112,7 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) { bool trap_debug = !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY); - unsigned long mdscr; + unsigned long mdscr, orig_mdcr_el2 = vcpu->arch.mdcr_el2; trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug); @@ -208,6 +208,10 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE)) vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY; + /* Write mdcr_el2 changes since vcpu_load on VHE systems */ + if (has_vhe() && orig_mdcr_el2 != vcpu->arch.mdcr_el2) + write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); + trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2); trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1)); } diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index a6c9fbaeaefcdd71d0ea70c8eeb89c55692f8b66..1358e67f4f78fa8c668f1c9cda81412fd30f274b 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -36,6 +36,7 @@ #define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM } #define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU } +#define DFX_STAT(x) { #x, offsetof(struct kvm_vcpu_stat, x), DFX_STAT_U64 } struct kvm_stats_debugfs_item debugfs_entries[] = { VCPU_STAT(hvc_exit_stat), @@ -44,6 +45,39 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { VCPU_STAT(mmio_exit_user), VCPU_STAT(mmio_exit_kernel), VCPU_STAT(exits), + { "vcpu_stat", 0, KVM_STAT_DFX }, + { NULL } +}; + +/* debugfs entries of Detail For vcpu stat EXtension */ +struct dfx_kvm_stats_debugfs_item dfx_debugfs_entries[] = { + DFX_STAT(pid), + DFX_STAT(hvc_exit_stat), + DFX_STAT(wfe_exit_stat), + DFX_STAT(wfi_exit_stat), + DFX_STAT(mmio_exit_user), + DFX_STAT(mmio_exit_kernel), + DFX_STAT(exits), + DFX_STAT(fp_asimd_exit_stat), + DFX_STAT(irq_exit_stat), + DFX_STAT(sys64_exit_stat), + DFX_STAT(mabt_exit_stat), + DFX_STAT(fail_entry_exit_stat), + DFX_STAT(internal_error_exit_stat), + DFX_STAT(unknown_ec_exit_stat), + DFX_STAT(cp15_32_exit_stat), + DFX_STAT(cp15_64_exit_stat), + DFX_STAT(cp14_mr_exit_stat), + DFX_STAT(cp14_ls_exit_stat), + DFX_STAT(cp14_64_exit_stat), + DFX_STAT(smc_exit_stat), + DFX_STAT(sve_exit_stat), + DFX_STAT(debug_exit_stat), + DFX_STAT(steal), + DFX_STAT(st_max), + DFX_STAT(utime), + DFX_STAT(stime), + DFX_STAT(gtime), { NULL } }; diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index e5e741bfffe19140d3551334a6b425bf4c2b7672..d86dfd2d61d71e8c93bd42cffe06cf4ee14d76a6 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -22,8 +22,6 @@ #include #include -#include - #include #include #include @@ -33,6 +31,8 @@ #include #include +#include + #define CREATE_TRACE_POINTS #include "trace.h" @@ -73,6 +73,7 @@ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) */ vcpu_set_reg(vcpu, 0, ~0UL); kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); + vcpu->stat.smc_exit_stat++; return 1; } @@ -135,6 +136,7 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run) run->exit_reason = KVM_EXIT_DEBUG; run->debug.arch.hsr = hsr; + vcpu->stat.debug_exit_stat++; switch (ESR_ELx_EC(hsr)) { case ESR_ELx_EC_WATCHPT_LOW: @@ -163,6 +165,7 @@ static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run) hsr, esr_get_class_string(hsr)); kvm_inject_undefined(vcpu); + vcpu->stat.unknown_ec_exit_stat++; return 1; } @@ -170,6 +173,7 @@ static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run) { /* Until SVE is supported for guests: */ kvm_inject_undefined(vcpu); + vcpu->stat.sve_exit_stat++; return 1; } @@ -226,7 +230,10 @@ static int handle_trap_exceptions(struct kvm_vcpu *vcpu, struct kvm_run *run) exit_handle_fn exit_handler; exit_handler = kvm_get_exit_handler(vcpu); + trace_kvm_trap_enter(vcpu->vcpu_id, + kvm_vcpu_trap_get_class(vcpu)); handled = exit_handler(vcpu, run); + trace_kvm_trap_exit(vcpu->vcpu_id); } /* @@ -267,6 +274,7 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, switch (exception_index) { case ARM_EXCEPTION_IRQ: + vcpu->stat.irq_exit_stat++; return 1; case ARM_EXCEPTION_EL1_SERROR: /* We may still need to return for single-step */ @@ -283,11 +291,21 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, * is pre-empted by kvm_reboot()'s shutdown call. */ run->exit_reason = KVM_EXIT_FAIL_ENTRY; + vcpu->stat.fail_entry_exit_stat++; return 0; + case ARM_EXCEPTION_IL: + /* + * We attempted an illegal exception return. Guest state must + * have been corrupted somehow. Give up. + */ + run->exit_reason = KVM_EXIT_FAIL_ENTRY; + vcpu->stat.fail_entry_exit_stat++; + return -EINVAL; default: kvm_pr_unimpl("Unsupported exception type: %d", exception_index); run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + vcpu->stat.internal_error_exit_stat++; return 0; } } diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile index 2fabc2dc1966f3b0ccce43cd7782a98975332546..ea710f674cb6b4f41e55e9c261ee3be6c980b7c8 100644 --- a/arch/arm64/kvm/hyp/Makefile +++ b/arch/arm64/kvm/hyp/Makefile @@ -10,6 +10,7 @@ KVM=../../../../virt/kvm obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o +obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/aarch32.o obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-cpuif-proxy.o obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o @@ -19,7 +20,6 @@ obj-$(CONFIG_KVM_ARM_HOST) += switch.o obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o obj-$(CONFIG_KVM_ARM_HOST) += tlb.o obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o -obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o # KVM code is run at a different exception code with a different map, so # compiler instrumentation that inserts callbacks or checks into the code may diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 24b4fbafe3e4ac9f9c30aaa2da04c16a798bf9ff..454012f16c00d81bddee7f2e1443ed689a90f179 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -111,6 +111,10 @@ el1_hvc_guest: /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */ eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \ ARM_SMCCC_ARCH_WORKAROUND_2) + cbz w1, wa_epilogue + + eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_2 ^ \ + ARM_SMCCC_ARCH_WORKAROUND_3) cbnz w1, el1_trap #ifdef CONFIG_ARM64_SSBD @@ -162,6 +166,20 @@ el1_error: mov x0, #ARM_EXCEPTION_EL1_SERROR b __guest_exit +el2_sync: + /* Check for illegal exception return, otherwise panic */ + mrs x0, spsr_el2 + + /* if this was something else, then panic! */ + tst x0, #PSR_IL_BIT + b.eq __hyp_panic + + /* Let's attempt a recovery from the illegal exception return */ + get_vcpu_ptr x1, x0 + mov x0, #ARM_EXCEPTION_IL + b __guest_exit + + el2_error: ldp x0, x1, [sp], #16 @@ -240,7 +258,7 @@ ENTRY(__kvm_hyp_vector) invalid_vect el2t_fiq_invalid // FIQ EL2t invalid_vect el2t_error_invalid // Error EL2t - invalid_vect el2h_sync_invalid // Synchronous EL2h + valid_vect el2_sync // Synchronous EL2h invalid_vect el2h_irq_invalid // IRQ EL2h invalid_vect el2h_fiq_invalid // FIQ EL2h valid_vect el2_error // Error EL2h @@ -315,4 +333,64 @@ ENTRY(__smccc_workaround_1_smc_start) ldp x0, x1, [sp, #(8 * 2)] add sp, sp, #(8 * 4) ENTRY(__smccc_workaround_1_smc_end) + +ENTRY(__smccc_workaround_3_smc_start) + esb + sub sp, sp, #(8 * 4) + stp x2, x3, [sp, #(8 * 0)] + stp x0, x1, [sp, #(8 * 2)] + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3 + smc #0 + ldp x2, x3, [sp, #(8 * 0)] + ldp x0, x1, [sp, #(8 * 2)] + add sp, sp, #(8 * 4) +ENTRY(__smccc_workaround_3_smc_end) + +ENTRY(__spectre_bhb_loop_k8_start) + esb + sub sp, sp, #(8 * 2) + stp x0, x1, [sp, #(8 * 0)] + mov x0, #8 +2: b . + 4 + subs x0, x0, #1 + b.ne 2b + dsb nsh + isb + ldp x0, x1, [sp, #(8 * 0)] + add sp, sp, #(8 * 2) +ENTRY(__spectre_bhb_loop_k8_end) + +ENTRY(__spectre_bhb_loop_k24_start) + esb + sub sp, sp, #(8 * 2) + stp x0, x1, [sp, #(8 * 0)] + mov x0, #24 +2: b . + 4 + subs x0, x0, #1 + b.ne 2b + dsb nsh + isb + ldp x0, x1, [sp, #(8 * 0)] + add sp, sp, #(8 * 2) +ENTRY(__spectre_bhb_loop_k24_end) + +ENTRY(__spectre_bhb_loop_k32_start) + esb + sub sp, sp, #(8 * 2) + stp x0, x1, [sp, #(8 * 0)] + mov x0, #32 +2: b . + 4 + subs x0, x0, #1 + b.ne 2b + dsb nsh + isb + ldp x0, x1, [sp, #(8 * 0)] + add sp, sp, #(8 * 2) +ENTRY(__spectre_bhb_loop_k32_end) + +ENTRY(__spectre_bhb_clearbhb_start) + esb + clearbhb + isb +ENTRY(__spectre_bhb_clearbhb_end) #endif diff --git a/arch/arm64/kvm/hyp/s2-setup.c b/arch/arm64/kvm/hyp/s2-setup.c deleted file mode 100644 index 603e1ee83e8924708be61c9d92416d126d9887f2..0000000000000000000000000000000000000000 --- a/arch/arm64/kvm/hyp/s2-setup.c +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright (C) 2016 - ARM Ltd - * Author: Marc Zyngier - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include -#include -#include -#include - -u32 __hyp_text __init_stage2_translation(void) -{ - u64 val = VTCR_EL2_FLAGS; - u64 parange; - u64 tmp; - - /* - * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS - * bits in VTCR_EL2. Amusingly, the PARange is 4 bits, while - * PS is only 3. Fortunately, bit 19 is RES0 in VTCR_EL2... - */ - parange = read_sysreg(id_aa64mmfr0_el1) & 7; - if (parange > ID_AA64MMFR0_PARANGE_MAX) - parange = ID_AA64MMFR0_PARANGE_MAX; - val |= parange << 16; - - /* Compute the actual PARange... */ - switch (parange) { - case 0: - parange = 32; - break; - case 1: - parange = 36; - break; - case 2: - parange = 40; - break; - case 3: - parange = 42; - break; - case 4: - parange = 44; - break; - case 5: - default: - parange = 48; - break; - } - - /* - * ... and clamp it to 40 bits, unless we have some braindead - * HW that implements less than that. In all cases, we'll - * return that value for the rest of the kernel to decide what - * to do. - */ - val |= 64 - (parange > 40 ? 40 : parange); - - /* - * Check the availability of Hardware Access Flag / Dirty Bit - * Management in ID_AA64MMFR1_EL1 and enable the feature in VTCR_EL2. - */ - tmp = (read_sysreg(id_aa64mmfr1_el1) >> ID_AA64MMFR1_HADBS_SHIFT) & 0xf; - if (tmp) - val |= VTCR_EL2_HA; - - /* - * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS - * bit in VTCR_EL2. - */ - tmp = (read_sysreg(id_aa64mmfr1_el1) >> ID_AA64MMFR1_VMIDBITS_SHIFT) & 0xf; - val |= (tmp == ID_AA64MMFR1_VMIDBITS_16) ? - VTCR_EL2_VS_16BIT : - VTCR_EL2_VS_8BIT; - - write_sysreg(val, vtcr_el2); - - return parange; -} diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index ca46153d79154bae1b0833231245129752484362..6473c7c96b14d11e324fc27ccb8d03ea51c7a2c6 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -16,13 +16,16 @@ */ #include +#include #include #include #include #include +#include #include +#include #include #include #include @@ -32,11 +35,20 @@ #include #include #include +#include /* Check whether the FP regs were dirtied while in the host-side run loop: */ static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu) { - if (vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE) + /* + * When the system doesn't support FP/SIMD, we cannot rely on + * the _TIF_FOREIGN_FPSTATE flag. However, we always inject an + * abort on the very first access to FP and thus we should never + * see KVM_ARM64_FP_ENABLED. For added safety, make sure we always + * trap the accesses. + */ + if (!system_supports_fpsimd() || + vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE) vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | KVM_ARM64_FP_HOST); @@ -107,6 +119,7 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu) write_sysreg(kvm_get_hyp_vector(), vbar_el1); } +NOKPROBE_SYMBOL(activate_traps_vhe); static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu) { @@ -141,11 +154,15 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu) static void deactivate_traps_vhe(void) { - extern char vectors[]; /* kernel exception vectors */ + const char *host_vectors = vectors; write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1); - write_sysreg(vectors, vbar_el1); + + if (!arm64_kernel_unmapped_at_el0()) + host_vectors = __this_cpu_read(this_cpu_vector); + write_sysreg(host_vectors, vbar_el1); } +NOKPROBE_SYMBOL(deactivate_traps_vhe); static void __hyp_text __deactivate_traps_nvhe(void) { @@ -157,7 +174,7 @@ static void __hyp_text __deactivate_traps_nvhe(void) mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT; write_sysreg(mdcr_el2, mdcr_el2); - write_sysreg(HCR_RW, hcr_el2); + write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2); write_sysreg(CPTR_EL2_DEFAULT, cptr_el2); } @@ -198,7 +215,7 @@ void deactivate_traps_vhe_put(void) static void __hyp_text __activate_vm(struct kvm *kvm) { - write_sysreg(kvm->arch.vttbr, vttbr_el2); + __load_guest_stage2(kvm); } static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu) @@ -263,7 +280,7 @@ static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar) return false; /* Translation failed, back to guest */ /* Convert PAR to HPFAR format */ - *hpfar = ((tmp >> 12) & ((1UL << 36) - 1)) << 4; + *hpfar = PAR_TO_HPFAR(tmp); return true; } @@ -402,8 +419,10 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) * undefined instruction exception to the guest. */ if (system_supports_fpsimd() && - kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_FP_ASIMD) + kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_FP_ASIMD) { + vcpu->stat.fp_asimd_exit_stat++; return __hyp_switch_fpsimd(vcpu); + } if (!__populate_fault_info(vcpu)) return true; @@ -486,6 +505,44 @@ static void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu) #endif } +/** + * Disable host events, enable guest events + */ +static bool __hyp_text __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt) +{ + struct kvm_host_data *host; + struct kvm_pmu_events *pmu; + + host = container_of(host_ctxt, struct kvm_host_data, host_ctxt); + pmu = &host->pmu_events; + + if (pmu->events_host) + write_sysreg(pmu->events_host, pmcntenclr_el0); + + if (pmu->events_guest) + write_sysreg(pmu->events_guest, pmcntenset_el0); + + return (pmu->events_host || pmu->events_guest); +} + +/** + * Disable guest events, enable host events + */ +static void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt) +{ + struct kvm_host_data *host; + struct kvm_pmu_events *pmu; + + host = container_of(host_ctxt, struct kvm_host_data, host_ctxt); + pmu = &host->pmu_events; + + if (pmu->events_guest) + write_sysreg(pmu->events_guest, pmcntenclr_el0); + + if (pmu->events_host) + write_sysreg(pmu->events_host, pmcntenset_el0); +} + /* Switch to the guest for VHE systems running in EL2 */ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) { @@ -529,20 +586,35 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) return exit_code; } +NOKPROBE_SYMBOL(kvm_vcpu_run_vhe); /* Switch to the guest for legacy non-VHE systems */ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) { struct kvm_cpu_context *host_ctxt; struct kvm_cpu_context *guest_ctxt; + bool pmu_switch_needed; u64 exit_code; + /* + * Having IRQs masked via PMR when entering the guest means the GIC + * will not signal the CPU of interrupts of lower priority, and the + * only way to get out will be via guest exceptions. + * Naturally, we want to avoid this. + */ + if (system_uses_irq_prio_masking()) { + gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); + pmr_sync(); + } + vcpu = kern_hyp_va(vcpu); host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); host_ctxt->__hyp_running_vcpu = vcpu; guest_ctxt = &vcpu->arch.ctxt; + pmu_switch_needed = __pmu_switch_to_guest(host_ctxt); + __sysreg_save_state_nvhe(host_ctxt); __activate_traps(vcpu); @@ -589,6 +661,13 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) */ __debug_switch_to_host(vcpu); + if (pmu_switch_needed) + __pmu_switch_to_host(host_ctxt); + + /* Returning to host will clear PSR.I, remask PMR if needed */ + if (system_uses_irq_prio_masking()) + gic_write_pmr(GIC_PRIO_IRQOFF); + return exit_code; } @@ -636,6 +715,7 @@ static void __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par, read_sysreg_el2(esr), read_sysreg_el2(far), read_sysreg(hpfar_el2), par, vcpu); } +NOKPROBE_SYMBOL(__hyp_call_panic_vhe); void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) { diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index 9ce223944983b803e3b4161d57a5ff6e17e8ec5b..c52a8451637c483f949b40f931e3a0ba3a99351e 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -18,6 +18,7 @@ #include #include +#include #include #include #include @@ -52,7 +53,6 @@ static void __hyp_text __sysreg_save_user_state(struct kvm_cpu_context *ctxt) static void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) { - ctxt->sys_regs[MPIDR_EL1] = read_sysreg(vmpidr_el2); ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1); ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(sctlr); ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1); @@ -98,12 +98,14 @@ void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt) { __sysreg_save_common_state(ctxt); } +NOKPROBE_SYMBOL(sysreg_save_host_state_vhe); void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt) { __sysreg_save_common_state(ctxt); __sysreg_save_el2_return_state(ctxt); } +NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe); static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt) { @@ -152,8 +154,25 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) static void __hyp_text __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt) { + u64 pstate = ctxt->gp_regs.regs.pstate; + u64 mode = pstate & PSR_AA32_MODE_MASK; + + /* + * Safety check to ensure we're setting the CPU up to enter the guest + * in a less privileged mode. + * + * If we are attempting a return to EL2 or higher in AArch64 state, + * program SPSR_EL2 with M=EL2h and the IL bit set which ensures that + * we'll take an illegal exception state exception immediately after + * the ERET to the guest. Attempts to return to AArch32 Hyp will + * result in an illegal exception return because EL2's execution state + * is determined by SCR_EL3.RW. + */ + if (!(mode & PSR_MODE32_BIT) && mode >= PSR_MODE_EL2t) + pstate = PSR_MODE_EL2h | PSR_IL_BIT; + write_sysreg_el2(ctxt->gp_regs.regs.pc, elr); - write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr); + write_sysreg_el2(pstate, spsr); if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2); @@ -171,12 +190,14 @@ void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt) { __sysreg_restore_common_state(ctxt); } +NOKPROBE_SYMBOL(sysreg_restore_host_state_vhe); void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt) { __sysreg_restore_common_state(ctxt); __sysreg_restore_el2_return_state(ctxt); } +NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe); void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu) { @@ -288,3 +309,14 @@ void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) vcpu->arch.sysregs_loaded_on_cpu = false; } + +void __hyp_text __kvm_enable_ssbs(void) +{ + u64 tmp; + + asm volatile( + "mrs %0, sctlr_el2\n" + "orr %0, %0, %1\n" + "msr sctlr_el2, %0" + : "=&r" (tmp) : "L" (SCTLR_ELx_DSSBS)); +} diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c index 131c7772703c290c3cd27c1a539090928dfc8e73..c35e9b99b0c5024275696e46ebf475d13ba424d7 100644 --- a/arch/arm64/kvm/hyp/tlb.c +++ b/arch/arm64/kvm/hyp/tlb.c @@ -15,14 +15,19 @@ * along with this program. If not, see . */ +#include + #include #include #include -static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm) +static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm, + unsigned long *flags) { u64 val; + local_irq_save(*flags); + /* * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and * most TLB operations target EL2/EL0. In order to affect the @@ -30,16 +35,17 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm) * bits. Changing E2H is impossible (goodbye TTBR1_EL2), so * let's flip TGE before executing the TLB operation. */ - write_sysreg(kvm->arch.vttbr, vttbr_el2); + __load_guest_stage2(kvm); val = read_sysreg(hcr_el2); val &= ~HCR_TGE; write_sysreg(val, hcr_el2); isb(); } -static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm) +static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm, + unsigned long *flags) { - write_sysreg(kvm->arch.vttbr, vttbr_el2); + __load_guest_stage2(kvm); isb(); } @@ -48,7 +54,8 @@ static hyp_alternate_select(__tlb_switch_to_guest, __tlb_switch_to_guest_vhe, ARM64_HAS_VIRT_HOST_EXTN); -static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm) +static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm, + unsigned long flags) { /* * We're done with the TLB operation, let's restore the host's @@ -56,9 +63,12 @@ static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm) */ write_sysreg(0, vttbr_el2); write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); + isb(); + local_irq_restore(flags); } -static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm) +static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm, + unsigned long flags) { write_sysreg(0, vttbr_el2); } @@ -70,11 +80,13 @@ static hyp_alternate_select(__tlb_switch_to_host, void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) { + unsigned long flags; + dsb(ishst); /* Switch to requested VMID */ kvm = kern_hyp_va(kvm); - __tlb_switch_to_guest()(kvm); + __tlb_switch_to_guest()(kvm, &flags); /* * We could do so much better if we had the VA as well. @@ -117,36 +129,40 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) if (!has_vhe() && icache_is_vpipt()) __flush_icache_all(); - __tlb_switch_to_host()(kvm); + __tlb_switch_to_host()(kvm, flags); } void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) { + unsigned long flags; + dsb(ishst); /* Switch to requested VMID */ kvm = kern_hyp_va(kvm); - __tlb_switch_to_guest()(kvm); + __tlb_switch_to_guest()(kvm, &flags); __tlbi(vmalls12e1is); dsb(ish); isb(); - __tlb_switch_to_host()(kvm); + __tlb_switch_to_host()(kvm, flags); } -void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) +void __hyp_text __kvm_flush_cpu_context(struct kvm_vcpu *vcpu) { struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm); + unsigned long flags; /* Switch to requested VMID */ - __tlb_switch_to_guest()(kvm); + __tlb_switch_to_guest()(kvm, &flags); __tlbi(vmalle1); + asm volatile("ic iallu"); dsb(nsh); isb(); - __tlb_switch_to_host()(kvm); + __tlb_switch_to_host()(kvm, flags); } void __hyp_text __kvm_flush_vm_context(void) diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c new file mode 100644 index 0000000000000000000000000000000000000000..e71d00bb5271bac281d8068ce10b5d8f36a5cce3 --- /dev/null +++ b/arch/arm64/kvm/pmu.c @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 Arm Limited + * Author: Andrew Murray + */ +#include +#include +#include + +/* + * Given the perf event attributes and system type, determine + * if we are going to need to switch counters at guest entry/exit. + */ +static bool kvm_pmu_switch_needed(struct perf_event_attr *attr) +{ + /** + * With VHE the guest kernel runs at EL1 and the host at EL2, + * where user (EL0) is excluded then we have no reason to switch + * counters. + */ + if (has_vhe() && attr->exclude_user) + return false; + + /* Only switch if attributes are different */ + return (attr->exclude_host != attr->exclude_guest); +} + +/* + * Add events to track that we may want to switch at guest entry/exit + * time. + */ +void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) +{ + struct kvm_host_data *ctx = this_cpu_ptr(&kvm_host_data); + + if (!kvm_pmu_switch_needed(attr)) + return; + + if (!attr->exclude_host) + ctx->pmu_events.events_host |= set; + if (!attr->exclude_guest) + ctx->pmu_events.events_guest |= set; +} + +/* + * Stop tracking events + */ +void kvm_clr_pmu_events(u32 clr) +{ + struct kvm_host_data *ctx = this_cpu_ptr(&kvm_host_data); + + ctx->pmu_events.events_host &= ~clr; + ctx->pmu_events.events_guest &= ~clr; +} + +#define PMEVTYPER_READ_CASE(idx) \ + case idx: \ + return read_sysreg(pmevtyper##idx##_el0) + +#define PMEVTYPER_WRITE_CASE(idx) \ + case idx: \ + write_sysreg(val, pmevtyper##idx##_el0); \ + break + +#define PMEVTYPER_CASES(readwrite) \ + PMEVTYPER_##readwrite##_CASE(0); \ + PMEVTYPER_##readwrite##_CASE(1); \ + PMEVTYPER_##readwrite##_CASE(2); \ + PMEVTYPER_##readwrite##_CASE(3); \ + PMEVTYPER_##readwrite##_CASE(4); \ + PMEVTYPER_##readwrite##_CASE(5); \ + PMEVTYPER_##readwrite##_CASE(6); \ + PMEVTYPER_##readwrite##_CASE(7); \ + PMEVTYPER_##readwrite##_CASE(8); \ + PMEVTYPER_##readwrite##_CASE(9); \ + PMEVTYPER_##readwrite##_CASE(10); \ + PMEVTYPER_##readwrite##_CASE(11); \ + PMEVTYPER_##readwrite##_CASE(12); \ + PMEVTYPER_##readwrite##_CASE(13); \ + PMEVTYPER_##readwrite##_CASE(14); \ + PMEVTYPER_##readwrite##_CASE(15); \ + PMEVTYPER_##readwrite##_CASE(16); \ + PMEVTYPER_##readwrite##_CASE(17); \ + PMEVTYPER_##readwrite##_CASE(18); \ + PMEVTYPER_##readwrite##_CASE(19); \ + PMEVTYPER_##readwrite##_CASE(20); \ + PMEVTYPER_##readwrite##_CASE(21); \ + PMEVTYPER_##readwrite##_CASE(22); \ + PMEVTYPER_##readwrite##_CASE(23); \ + PMEVTYPER_##readwrite##_CASE(24); \ + PMEVTYPER_##readwrite##_CASE(25); \ + PMEVTYPER_##readwrite##_CASE(26); \ + PMEVTYPER_##readwrite##_CASE(27); \ + PMEVTYPER_##readwrite##_CASE(28); \ + PMEVTYPER_##readwrite##_CASE(29); \ + PMEVTYPER_##readwrite##_CASE(30) + +/* + * Read a value direct from PMEVTYPER where idx is 0-30 + * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31). + */ +static u64 kvm_vcpu_pmu_read_evtype_direct(int idx) +{ + switch (idx) { + PMEVTYPER_CASES(READ); + case ARMV8_PMU_CYCLE_IDX: + return read_sysreg(pmccfiltr_el0); + default: + WARN_ON(1); + } + + return 0; +} + +/* + * Write a value direct to PMEVTYPER where idx is 0-30 + * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31). + */ +static void kvm_vcpu_pmu_write_evtype_direct(int idx, u32 val) +{ + switch (idx) { + PMEVTYPER_CASES(WRITE); + case ARMV8_PMU_CYCLE_IDX: + write_sysreg(val, pmccfiltr_el0); + break; + default: + WARN_ON(1); + } +} + +/* + * Modify ARMv8 PMU events to include EL0 counting + */ +static void kvm_vcpu_pmu_enable_el0(unsigned long events) +{ + u64 typer; + u32 counter; + + for_each_set_bit(counter, &events, 32) { + typer = kvm_vcpu_pmu_read_evtype_direct(counter); + typer &= ~ARMV8_PMU_EXCLUDE_EL0; + kvm_vcpu_pmu_write_evtype_direct(counter, typer); + } +} + +/* + * Modify ARMv8 PMU events to exclude EL0 counting + */ +static void kvm_vcpu_pmu_disable_el0(unsigned long events) +{ + u64 typer; + u32 counter; + + for_each_set_bit(counter, &events, 32) { + typer = kvm_vcpu_pmu_read_evtype_direct(counter); + typer |= ARMV8_PMU_EXCLUDE_EL0; + kvm_vcpu_pmu_write_evtype_direct(counter, typer); + } +} + +/* + * On VHE ensure that only guest events have EL0 counting enabled + */ +void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) +{ + struct kvm_cpu_context *host_ctxt; + struct kvm_host_data *host; + u32 events_guest, events_host; + + if (!has_vhe()) + return; + + host_ctxt = vcpu->arch.host_cpu_context; + host = container_of(host_ctxt, struct kvm_host_data, host_ctxt); + events_guest = host->pmu_events.events_guest; + events_host = host->pmu_events.events_host; + + kvm_vcpu_pmu_enable_el0(events_guest); + kvm_vcpu_pmu_disable_el0(events_host); +} + +/* + * On VHE ensure that only host events have EL0 counting enabled + */ +void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) +{ + struct kvm_cpu_context *host_ctxt; + struct kvm_host_data *host; + u32 events_guest, events_host; + + if (!has_vhe()) + return; + + host_ctxt = vcpu->arch.host_cpu_context; + host = container_of(host_ctxt, struct kvm_host_data, host_ctxt); + events_guest = host->pmu_events.events_guest; + events_host = host->pmu_events.events_host; + + kvm_vcpu_pmu_enable_el0(events_host); + kvm_vcpu_pmu_disable_el0(events_guest); +} diff --git a/arch/arm64/kvm/regmap.c b/arch/arm64/kvm/regmap.c index 7a5173ea227648b4ec534ee60d557c8e5894cf73..4c2e96ef306ed9e81cabdb6dd6eb49303d668128 100644 --- a/arch/arm64/kvm/regmap.c +++ b/arch/arm64/kvm/regmap.c @@ -189,13 +189,18 @@ void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v) switch (spsr_idx) { case KVM_SPSR_SVC: write_sysreg_el1(v, spsr); + break; case KVM_SPSR_ABT: write_sysreg(v, spsr_abt); + break; case KVM_SPSR_UND: write_sysreg(v, spsr_und); + break; case KVM_SPSR_IRQ: write_sysreg(v, spsr_irq); + break; case KVM_SPSR_FIQ: write_sysreg(v, spsr_fiq); + break; } } diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index e37c78bbe1ca9cc750c354412ef64092af5fc5dc..dfc4b067fb5005d477fe85f7e55eea226ae7c76a 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -26,13 +26,18 @@ #include +#include #include #include #include #include #include +#include #include +/* Maximum phys_shift supported for any VM on this host */ +static u32 kvm_ipa_limit; + /* * ARMv8 Reset Values */ @@ -85,6 +90,9 @@ int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_VCPU_EVENTS: r = 1; break; + case KVM_CAP_ARM_VM_IPA_SIZE: + r = kvm_ipa_limit; + break; default: r = 0; } @@ -99,16 +107,36 @@ int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext) * This function finds the right table above and sets the registers on * the virtual CPU struct to their architecturally defined reset * values. + * + * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT + * ioctl or as part of handling a request issued by another VCPU in the PSCI + * handling code. In the first case, the VCPU will not be loaded, and in the + * second case the VCPU will be loaded. Because this function operates purely + * on the memory-backed valus of system registers, we want to do a full put if + * we were loaded (handling a request) and load the values back at the end of + * the function. Otherwise we leave the state alone. In both cases, we + * disable preemption around the vcpu reset as we would otherwise race with + * preempt notifiers which also call put/load. */ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) { const struct kvm_regs *cpu_reset; + int ret = -EINVAL; + bool loaded; + + /* Reset PMU outside of the non-preemptible section */ + kvm_pmu_vcpu_reset(vcpu); + + preempt_disable(); + loaded = (vcpu->cpu != -1); + if (loaded) + kvm_arch_vcpu_put(vcpu); switch (vcpu->arch.target) { default: if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { if (!cpu_has_32bit_el1()) - return -EINVAL; + goto out; cpu_reset = &default_regs_reset32; } else { cpu_reset = &default_regs_reset; @@ -123,13 +151,134 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) /* Reset system registers */ kvm_reset_sys_regs(vcpu); - /* Reset PMU */ - kvm_pmu_vcpu_reset(vcpu); + /* + * Additional reset state handling that PSCI may have imposed on us. + * Must be done after all the sys_reg reset. + */ + if (vcpu->arch.reset_state.reset) { + unsigned long target_pc = vcpu->arch.reset_state.pc; + + /* Gracefully handle Thumb2 entry point */ + if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) { + target_pc &= ~1UL; + vcpu_set_thumb(vcpu); + } + + /* Propagate caller endianness */ + if (vcpu->arch.reset_state.be) + kvm_vcpu_set_be(vcpu); + + *vcpu_pc(vcpu) = target_pc; + vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0); + + vcpu->arch.reset_state.reset = false; + } /* Default workaround setup is enabled (if supported) */ if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL) vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; /* Reset timer */ - return kvm_timer_vcpu_reset(vcpu); + ret = kvm_timer_vcpu_reset(vcpu); +out: + if (loaded) + kvm_arch_vcpu_load(vcpu, smp_processor_id()); + preempt_enable(); + return ret; +} + +void kvm_set_ipa_limit(void) +{ + unsigned int ipa_max, pa_max, va_max, parange; + + parange = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1) & 0x7; + pa_max = id_aa64mmfr0_parange_to_phys_shift(parange); + + /* Clamp the IPA limit to the PA size supported by the kernel */ + ipa_max = (pa_max > PHYS_MASK_SHIFT) ? PHYS_MASK_SHIFT : pa_max; + /* + * Since our stage2 table is dependent on the stage1 page table code, + * we must always honor the following condition: + * + * Number of levels in Stage1 >= Number of levels in Stage2. + * + * So clamp the ipa limit further down to limit the number of levels. + * Since we can concatenate upto 16 tables at entry level, we could + * go upto 4bits above the maximum VA addressible with the current + * number of levels. + */ + va_max = PGDIR_SHIFT + PAGE_SHIFT - 3; + va_max += 4; + + if (va_max < ipa_max) + ipa_max = va_max; + + /* + * If the final limit is lower than the real physical address + * limit of the CPUs, report the reason. + */ + if (ipa_max < pa_max) + pr_info("kvm: Limiting the IPA size due to kernel %s Address limit\n", + (va_max < pa_max) ? "Virtual" : "Physical"); + + WARN(ipa_max < KVM_PHYS_SHIFT, + "KVM IPA limit (%d bit) is smaller than default size\n", ipa_max); + kvm_ipa_limit = ipa_max; + kvm_info("IPA Size Limit: %dbits\n", kvm_ipa_limit); +} + +/* + * Configure the VTCR_EL2 for this VM. The VTCR value is common + * across all the physical CPUs on the system. We use system wide + * sanitised values to fill in different fields, except for Hardware + * Management of Access Flags. HA Flag is set unconditionally on + * all CPUs, as it is safe to run with or without the feature and + * the bit is RES0 on CPUs that don't support it. + */ +int kvm_arm_config_vm(struct kvm *kvm, unsigned long type) +{ + u64 vtcr = VTCR_EL2_FLAGS; + u32 parange, phys_shift; + u8 lvls; + + if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK) + return -EINVAL; + + phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type); + if (phys_shift) { + if (phys_shift > kvm_ipa_limit || + phys_shift < 32) + return -EINVAL; + } else { + phys_shift = KVM_PHYS_SHIFT; + } + + parange = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1) & 7; + if (parange > ID_AA64MMFR0_PARANGE_MAX) + parange = ID_AA64MMFR0_PARANGE_MAX; + vtcr |= parange << VTCR_EL2_PS_SHIFT; + + vtcr |= VTCR_EL2_T0SZ(phys_shift); + /* + * Use a minimum 2 level page table to prevent splitting + * host PMD huge pages at stage2. + */ + lvls = stage2_pgtable_levels(phys_shift); + if (lvls < 2) + lvls = 2; + vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls); + + /* + * Enable the Hardware Access Flag management, unconditionally + * on all CPUs. The features is RES0 on CPUs without the support + * and must be ignored by the CPUs. + */ + vtcr |= VTCR_EL2_HA; + + /* Set the vmid bits */ + vtcr |= (kvm_get_vmid_bits() == 16) ? + VTCR_EL2_VS_16BIT : + VTCR_EL2_VS_8BIT; + kvm->arch.vtcr = vtcr; + return 0; } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 22fbbdbece3caf19fcc070c736f30956c4419c8c..cc824a208684c5c2c355346b87a2259c70e78947 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -76,7 +76,7 @@ static bool write_to_read_only(struct kvm_vcpu *vcpu, return false; } -u64 vcpu_read_sys_reg(struct kvm_vcpu *vcpu, int reg) +u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg) { if (!vcpu->arch.sysregs_loaded_on_cpu) goto immediate_read; @@ -626,7 +626,7 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) */ val = ((pmcr & ~ARMV8_PMU_PMCR_MASK) | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E); - __vcpu_sys_reg(vcpu, PMCR_EL0) = val; + __vcpu_sys_reg(vcpu, r->reg) = val; } static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags) @@ -678,6 +678,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, val |= p->regval & ARMV8_PMU_PMCR_MASK; __vcpu_sys_reg(vcpu, PMCR_EL0) = val; kvm_pmu_handle_pmcr(vcpu, val); + kvm_vcpu_pmu_restore_guest(vcpu); } else { /* PMCR.P & PMCR.C are RAZ */ val = __vcpu_sys_reg(vcpu, PMCR_EL0) @@ -833,6 +834,7 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, if (p->is_write) { kvm_pmu_set_counter_event_type(vcpu, p->regval, idx); __vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK; + kvm_vcpu_pmu_restore_guest(vcpu); } else { p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK; } @@ -858,6 +860,7 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, /* accessing PMCNTENSET_EL0 */ __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val; kvm_pmu_enable_counter(vcpu, val); + kvm_vcpu_pmu_restore_guest(vcpu); } else { /* accessing PMCNTENCLR_EL0 */ __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val; @@ -968,13 +971,13 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ { SYS_DESC(SYS_DBGBVRn_EL1(n)), \ - trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \ + trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \ { SYS_DESC(SYS_DBGBCRn_EL1(n)), \ - trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \ + trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \ { SYS_DESC(SYS_DBGWVRn_EL1(n)), \ - trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \ + trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \ { SYS_DESC(SYS_DBGWCRn_EL1(n)), \ - trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr } + trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr } /* Macro to expand the PMEVCNTRn_EL0 register */ #define PMU_PMEVCNTR_EL0(n) \ @@ -1028,12 +1031,46 @@ static bool access_cntp_cval(struct kvm_vcpu *vcpu, return true; } +static struct id_reg_info *kvm_id_reg(struct kvm_vcpu *vcpu, u64 id) +{ + int i; + + for (i = 0; i < vcpu->arch.idregs.num; ++i) { + if (vcpu->arch.idregs.regs[i].sys_id == id) + return &vcpu->arch.idregs.regs[i]; + } + return NULL; +} + +static u64 kvm_get_id_reg(struct kvm_vcpu *vcpu, u64 id) +{ + struct id_reg_info *ri = kvm_id_reg(vcpu, id); + + if (!ri) { + WARN_ON(1); + return 0; + } + return ri->sys_val; +} + +static void kvm_set_id_reg(struct kvm_vcpu *vcpu, u64 id, u64 value) +{ + struct id_reg_info *ri = kvm_id_reg(vcpu, id); + + if (!ri) { + WARN_ON(1); + return; + } + ri->sys_val = value; +} + /* Read a sanitised cpufeature ID register by sys_reg_desc */ -static u64 read_id_reg(struct sys_reg_desc const *r, bool raz) +static u64 read_id_reg(struct kvm_vcpu *vcpu, + struct sys_reg_desc const *r, bool raz) { u32 id = sys_reg((u32)r->Op0, (u32)r->Op1, (u32)r->CRn, (u32)r->CRm, (u32)r->Op2); - u64 val = raz ? 0 : read_sanitised_ftr_reg(id); + u64 val = raz ? 0 : kvm_get_id_reg(vcpu, id); if (id == SYS_ID_AA64PFR0_EL1) { if (val & (0xfUL << ID_AA64PFR0_SVE_SHIFT)) @@ -1045,6 +1082,16 @@ static u64 read_id_reg(struct sys_reg_desc const *r, bool raz) kvm_debug("LORegions unsupported for guests, suppressing\n"); val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT); + } else if (id == SYS_ID_AA64DFR0_EL1) { + /* Limit guests to PMUv3 for ARMv8.1 */ + val = cpuid_feature_cap_perfmon_field(val, + ID_AA64DFR0_PMUVER_SHIFT, + ID_AA64DFR0_PMUVER_8_1); + } else if (id == SYS_ID_DFR0_EL1) { + /* Limit guests to PMUv3 for ARMv8.1 */ + val = cpuid_feature_cap_perfmon_field(val, + ID_DFR0_PERFMON_SHIFT, + ID_DFR0_PERFMON_8_1); } return val; @@ -1060,7 +1107,7 @@ static bool __access_id_reg(struct kvm_vcpu *vcpu, if (p->is_write) return write_to_read_only(vcpu, p, r); - p->regval = read_id_reg(r, raz); + p->regval = read_id_reg(vcpu, r, raz); return true; } @@ -1084,21 +1131,19 @@ static u64 sys_reg_to_index(const struct sys_reg_desc *reg); /* * cpufeature ID register user accessors - * - * For now, these registers are immutable for userspace, so no values - * are stored, and for set_id_reg() we don't allow the effective value - * to be changed. */ -static int __get_id_reg(const struct sys_reg_desc *rd, void __user *uaddr, +static int __get_id_reg(struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd, void __user *uaddr, bool raz) { const u64 id = sys_reg_to_index(rd); - const u64 val = read_id_reg(rd, raz); + const u64 val = read_id_reg(vcpu, rd, raz); return reg_to_user(uaddr, &val, id); } -static int __set_id_reg(const struct sys_reg_desc *rd, void __user *uaddr, +static int __set_id_reg(struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd, void __user *uaddr, bool raz) { const u64 id = sys_reg_to_index(rd); @@ -1109,9 +1154,14 @@ static int __set_id_reg(const struct sys_reg_desc *rd, void __user *uaddr, if (err) return err; - /* This is what we mean by invariant: you can't change it. */ - if (val != read_id_reg(rd, raz)) - return -EINVAL; + if (raz) { + if (val != read_id_reg(vcpu, rd, raz)) + return -EINVAL; + } else { + u32 reg_id = sys_reg((u32)rd->Op0, (u32)rd->Op1, (u32)rd->CRn, + (u32)rd->CRm, (u32)rd->Op2); + kvm_set_id_reg(vcpu, reg_id, val); + } return 0; } @@ -1119,25 +1169,25 @@ static int __set_id_reg(const struct sys_reg_desc *rd, void __user *uaddr, static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { - return __get_id_reg(rd, uaddr, false); + return __get_id_reg(vcpu, rd, uaddr, false); } static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { - return __set_id_reg(rd, uaddr, false); + return __set_id_reg(vcpu, rd, uaddr, false); } static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { - return __get_id_reg(rd, uaddr, true); + return __get_id_reg(vcpu, rd, uaddr, true); } static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { - return __set_id_reg(rd, uaddr, true); + return __set_id_reg(vcpu, rd, uaddr, true); } /* sys_reg_desc initialiser for known cpufeature ID registers */ @@ -1285,7 +1335,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { /* CRm=6 */ ID_SANITISED(ID_AA64ISAR0_EL1), ID_SANITISED(ID_AA64ISAR1_EL1), - ID_UNALLOCATED(6,2), + ID_SANITISED(ID_AA64ISAR2_EL1), ID_UNALLOCATED(6,3), ID_UNALLOCATED(6,4), ID_UNALLOCATED(6,5), @@ -1359,7 +1409,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_CSSELR_EL1), NULL, reset_unknown, CSSELR_EL1 }, - { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, }, + { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, PMCR_EL0 }, { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 }, { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 }, { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 }, @@ -1456,7 +1506,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 }, { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 }, - { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x70 }, + { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 }, }; static bool trap_dbgidr(struct kvm_vcpu *vcpu, @@ -1850,6 +1900,8 @@ static void perform_access(struct kvm_vcpu *vcpu, struct sys_reg_params *params, const struct sys_reg_desc *r) { + trace_kvm_sys_access(*vcpu_pc(vcpu), params, r); + /* * Not having an accessor means that we have configured a trap * that we don't know how to handle. This certainly qualifies @@ -1912,8 +1964,8 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu, WARN_ON(1); } - kvm_err("Unsupported guest CP%d access at: %08lx\n", - cp, *vcpu_pc(vcpu)); + kvm_err("Unsupported guest CP%d access at: %08lx [%08lx]\n", + cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu)); print_sys_reg_instr(params); kvm_inject_undefined(vcpu); } @@ -2063,8 +2115,8 @@ static int emulate_sys_reg(struct kvm_vcpu *vcpu, if (likely(r)) { perform_access(vcpu, params, r); } else { - kvm_err("Unsupported guest sys_reg access at: %lx\n", - *vcpu_pc(vcpu)); + kvm_err("Unsupported guest sys_reg access at: %lx [%08lx]\n", + *vcpu_pc(vcpu), *vcpu_cpsr(vcpu)); print_sys_reg_instr(params); kvm_inject_undefined(vcpu); } @@ -2072,13 +2124,19 @@ static int emulate_sys_reg(struct kvm_vcpu *vcpu, } static void reset_sys_reg_descs(struct kvm_vcpu *vcpu, - const struct sys_reg_desc *table, size_t num) + const struct sys_reg_desc *table, size_t num, + unsigned long *bmap) { unsigned long i; for (i = 0; i < num; i++) - if (table[i].reset) + if (table[i].reset) { + int reg = table[i].reg; + table[i].reset(vcpu, &table[i]); + if (reg > 0 && reg < NR_SYS_REGS) + set_bit(reg, bmap); + } } /** @@ -2094,6 +2152,7 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; trace_kvm_handle_sys_reg(esr); + vcpu->stat.sys64_exit_stat++; params.is_aarch32 = false; params.is_32bit = false; @@ -2168,8 +2227,11 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu, if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG) return NULL; + if (!index_to_params(id, ¶ms)) + return NULL; + table = get_target_table(vcpu->arch.target, true, &num); - r = find_reg_by_id(id, ¶ms, table, num); + r = find_reg(¶ms, table, num); if (!r) r = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); @@ -2576,17 +2638,17 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) { size_t num; const struct sys_reg_desc *table; - - /* Catch someone adding a register without putting in reset entry. */ - memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs)); + DECLARE_BITMAP(bmap, NR_SYS_REGS) = { 0, }; /* Generic chip reset first (so target could override). */ - reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); + reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs), bmap); table = get_target_table(vcpu->arch.target, true, &num); - reset_sys_reg_descs(vcpu, table, num); + reset_sys_reg_descs(vcpu, table, num, bmap); - for (num = 1; num < NR_SYS_REGS; num++) - if (__vcpu_sys_reg(vcpu, num) == 0x4242424242424242) - panic("Didn't reset __vcpu_sys_reg(%zi)", num); + for (num = 1; num < NR_SYS_REGS; num++) { + if (WARN(!test_bit(num, bmap), + "Didn't reset __vcpu_sys_reg(%zi)\n", num)) + break; + } } diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h index cd710f8b63e0f9a33c8a39819428ae07fcf22eeb..3b1bc7f01d0bd284314308898f7aa1448f5e0436 100644 --- a/arch/arm64/kvm/sys_regs.h +++ b/arch/arm64/kvm/sys_regs.h @@ -35,6 +35,9 @@ struct sys_reg_params { }; struct sys_reg_desc { + /* Sysreg string for debug */ + const char *name; + /* MRS/MSR instruction which accesses it. */ u8 Op0; u8 Op1; @@ -130,6 +133,7 @@ const struct sys_reg_desc *find_reg_by_id(u64 id, #define Op2(_x) .Op2 = _x #define SYS_DESC(reg) \ + .name = #reg, \ Op0(sys_reg_Op0(reg)), Op1(sys_reg_Op1(reg)), \ CRn(sys_reg_CRn(reg)), CRm(sys_reg_CRm(reg)), \ Op2(sys_reg_Op2(reg)) diff --git a/arch/arm64/kvm/trace.h b/arch/arm64/kvm/trace.h index 3b82fb1ddd097783ef66d6ac0f0ab334b3ab6bb2..6014c73d267896092e0fd73ce857a41bcc747a17 100644 --- a/arch/arm64/kvm/trace.h +++ b/arch/arm64/kvm/trace.h @@ -3,10 +3,46 @@ #define _TRACE_ARM64_KVM_H #include +#include "sys_regs.h" #undef TRACE_SYSTEM #define TRACE_SYSTEM kvm +TRACE_EVENT(kvm_trap_enter, + TP_PROTO(unsigned int vcpu_id, unsigned int esr_ec), + TP_ARGS(vcpu_id, esr_ec), + + TP_STRUCT__entry( + __field(unsigned int, vcpu_id) + __field(unsigned int, esr_ec) + ), + + TP_fast_assign( + __entry->vcpu_id = vcpu_id; + __entry->esr_ec = esr_ec; + ), + + TP_printk("VCPU %u: HSR_EC=0x%04x (%s)", + __entry->vcpu_id, + __entry->esr_ec, + __print_symbolic(__entry->esr_ec, kvm_arm_exception_class)) +); + +TRACE_EVENT(kvm_trap_exit, + TP_PROTO(unsigned int vcpu_id), + TP_ARGS(vcpu_id), + + TP_STRUCT__entry( + __field(unsigned int, vcpu_id) + ), + + TP_fast_assign( + __entry->vcpu_id = vcpu_id; + ), + + TP_printk("VCPU %u", __entry->vcpu_id) +); + TRACE_EVENT(kvm_wfx_arm64, TP_PROTO(unsigned long vcpu_pc, bool is_wfe), TP_ARGS(vcpu_pc, is_wfe), @@ -152,6 +188,40 @@ TRACE_EVENT(kvm_handle_sys_reg, TP_printk("HSR 0x%08lx", __entry->hsr) ); +TRACE_EVENT(kvm_sys_access, + TP_PROTO(unsigned long vcpu_pc, struct sys_reg_params *params, const struct sys_reg_desc *reg), + TP_ARGS(vcpu_pc, params, reg), + + TP_STRUCT__entry( + __field(unsigned long, vcpu_pc) + __field(bool, is_write) + __field(const char *, name) + __field(u8, Op0) + __field(u8, Op1) + __field(u8, CRn) + __field(u8, CRm) + __field(u8, Op2) + ), + + TP_fast_assign( + __entry->vcpu_pc = vcpu_pc; + __entry->is_write = params->is_write; + __entry->name = reg->name; + __entry->Op0 = reg->Op0; + __entry->Op0 = reg->Op0; + __entry->Op1 = reg->Op1; + __entry->CRn = reg->CRn; + __entry->CRm = reg->CRm; + __entry->Op2 = reg->Op2; + ), + + TP_printk("PC: %lx %s (%d,%d,%d,%d,%d) %s", + __entry->vcpu_pc, __entry->name ?: "UNKN", + __entry->Op0, __entry->Op1, __entry->CRn, + __entry->CRm, __entry->Op2, + __entry->is_write ? "write" : "read") +); + TRACE_EVENT(kvm_set_guest_debug, TP_PROTO(struct kvm_vcpu *vcpu, __u32 guest_debug), TP_ARGS(vcpu, guest_debug), diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile index 68755fd70dcf4c4164cb1453fbe2695a1bc8ff33..e7d29f205d470b1eb179fe8ef2dbd8f941b0cd29 100644 --- a/arch/arm64/lib/Makefile +++ b/arch/arm64/lib/Makefile @@ -3,7 +3,15 @@ lib-y := clear_user.o delay.o copy_from_user.o \ copy_to_user.o copy_in_user.o copy_page.o \ clear_page.o memchr.o memcpy.o memmove.o memset.o \ memcmp.o strcmp.o strncmp.o strlen.o strnlen.o \ - strchr.o strrchr.o tishift.o + strchr.o strrchr.o tishift.o csum.o memcpy_mc.o + +lib-$(CONFIG_UCE_KERNEL_RECOVERY) += get_user.o + +ifeq ($(CONFIG_KERNEL_MODE_NEON), y) +obj-$(CONFIG_XOR_BLOCKS) += xor-neon.o +CFLAGS_REMOVE_xor-neon.o += -mgeneral-regs-only +CFLAGS_xor-neon.o += -ffreestanding +endif # Tell the compiler to treat all general purpose registers (with the # exception of the IP registers, which are already handled by the caller @@ -12,7 +20,7 @@ lib-y := clear_user.o delay.o copy_from_user.o \ # when supported by the CPU. Result and argument registers are handled # correctly, based on the function prototype. lib-$(CONFIG_ARM64_LSE_ATOMICS) += atomic_ll_sc.o -CFLAGS_atomic_ll_sc.o := -fcall-used-x0 -ffixed-x1 -ffixed-x2 \ +CFLAGS_atomic_ll_sc.o := -ffixed-x1 -ffixed-x2 \ -ffixed-x3 -ffixed-x4 -ffixed-x5 -ffixed-x6 \ -ffixed-x7 -fcall-saved-x8 -fcall-saved-x9 \ -fcall-saved-x10 -fcall-saved-x11 -fcall-saved-x12 \ @@ -25,3 +33,5 @@ KCOV_INSTRUMENT_atomic_ll_sc.o := n UBSAN_SANITIZE_atomic_ll_sc.o := n lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o + +obj-$(CONFIG_CRC32) += crc32.o diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S index 21ba0b29621b2fab087aa8adaeb15e0d589966ac..410768a8d416600ce0019f9f31b561493dce4791 100644 --- a/arch/arm64/lib/clear_user.S +++ b/arch/arm64/lib/clear_user.S @@ -34,6 +34,9 @@ ENTRY(__arch_clear_user) mov x2, x1 // save the size for fixup return subs x1, x1, #8 b.mi 2f +#ifdef CONFIG_ARCH_HISI + .align 5 +#endif 1: uao_user_alternative 9f, str, sttr, xzr, x0, 8 subs x1, x1, #8 @@ -57,5 +60,6 @@ ENDPROC(__arch_clear_user) .section .fixup,"ax" .align 2 9: mov x0, x2 // return the original size + uaccess_disable_not_uao x2, x3 ret .previous diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S index 20305d485046754c7c59c48ef28c77dfc54f2ca0..d1afb61df158b1259870038b28aef696bd36b22b 100644 --- a/arch/arm64/lib/copy_from_user.S +++ b/arch/arm64/lib/copy_from_user.S @@ -39,7 +39,7 @@ .endm .macro ldrh1 ptr, regB, val - uao_user_alternative 9998f, ldrh, ldtrh, \ptr, \regB, \val + uao_user_alternative 9997f, ldrh, ldtrh, \ptr, \regB, \val .endm .macro strh1 ptr, regB, val @@ -47,7 +47,7 @@ .endm .macro ldr1 ptr, regB, val - uao_user_alternative 9998f, ldr, ldtr, \ptr, \regB, \val + uao_user_alternative 9997f, ldr, ldtr, \ptr, \regB, \val .endm .macro str1 ptr, regB, val @@ -55,7 +55,7 @@ .endm .macro ldp1 ptr, regB, regC, val - uao_ldp 9998f, \ptr, \regB, \regC, \val + uao_ldp 9997f, \ptr, \regB, \regC, \val .endm .macro stp1 ptr, regB, regC, val @@ -63,17 +63,31 @@ .endm end .req x5 +srcin .req x15 ENTRY(__arch_copy_from_user) uaccess_enable_not_uao x3, x4, x5 add end, x0, x2 + mov srcin, x1 #include "copy_template.S" uaccess_disable_not_uao x3, x4 mov x0, #0 // Nothing to copy ret + + .global copy_from_user_sea_fallback +copy_from_user_sea_fallback: + uaccess_disable_not_uao x3, x4 + mov x0, #-1 + ret ENDPROC(__arch_copy_from_user) .section .fixup,"ax" .align 2 +9997: cmp dst, dstin + b.ne 9998f + // Before being absolutely sure we couldn't copy anything, try harder +USER(9998f, ldtrb tmp1w, [srcin]) + strb tmp1w, [dst], #1 9998: sub x0, end, dst // bytes not copied + uaccess_disable_not_uao x3, x4 ret .previous diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S index 54b75deb1d1606c2d5c317b35ee2f5bbc78144ba..b20d3a0b32374c0050afe3393557464bbdfc8ca5 100644 --- a/arch/arm64/lib/copy_in_user.S +++ b/arch/arm64/lib/copy_in_user.S @@ -40,34 +40,36 @@ .endm .macro ldrh1 ptr, regB, val - uao_user_alternative 9998f, ldrh, ldtrh, \ptr, \regB, \val + uao_user_alternative 9997f, ldrh, ldtrh, \ptr, \regB, \val .endm .macro strh1 ptr, regB, val - uao_user_alternative 9998f, strh, sttrh, \ptr, \regB, \val + uao_user_alternative 9997f, strh, sttrh, \ptr, \regB, \val .endm .macro ldr1 ptr, regB, val - uao_user_alternative 9998f, ldr, ldtr, \ptr, \regB, \val + uao_user_alternative 9997f, ldr, ldtr, \ptr, \regB, \val .endm .macro str1 ptr, regB, val - uao_user_alternative 9998f, str, sttr, \ptr, \regB, \val + uao_user_alternative 9997f, str, sttr, \ptr, \regB, \val .endm .macro ldp1 ptr, regB, regC, val - uao_ldp 9998f, \ptr, \regB, \regC, \val + uao_ldp 9997f, \ptr, \regB, \regC, \val .endm .macro stp1 ptr, regB, regC, val - uao_stp 9998f, \ptr, \regB, \regC, \val + uao_stp 9997f, \ptr, \regB, \regC, \val .endm end .req x5 +srcin .req x15 ENTRY(__arch_copy_in_user) uaccess_enable_not_uao x3, x4, x5 add end, x0, x2 + mov srcin, x1 #include "copy_template.S" uaccess_disable_not_uao x3, x4 mov x0, #0 @@ -76,6 +78,13 @@ ENDPROC(__arch_copy_in_user) .section .fixup,"ax" .align 2 +9997: cmp dst, dstin + b.ne 9998f + // Before being absolutely sure we couldn't copy anything, try harder +USER(9998f, ldtrb tmp1w, [srcin]) +USER(9998f, sttrb tmp1w, [dst]) + add dst, dst, #1 9998: sub x0, end, dst // bytes not copied + uaccess_disable_not_uao x3, x4 ret .previous diff --git a/arch/arm64/lib/copy_page.S b/arch/arm64/lib/copy_page.S index 076c43715e64afe7fcc12eabd753d355ca94558f..649cf4eb96bf474f043b9e24f27ab981b974becd 100644 --- a/arch/arm64/lib/copy_page.S +++ b/arch/arm64/lib/copy_page.S @@ -87,3 +87,81 @@ alternative_else_nop_endif ret ENDPROC(copy_page) + +#ifdef CONFIG_UCE_KERNEL_RECOVERY +#The difference between copy_page_cow and copy_page: +# 1) copy_page_cow adds the recovery path of sea fault(copy_page_cow_sea_fallback). +# 2) copy_page_cow with return value: 0 - copy success 1 - copy fail. +/* + * COW copy a page from src to dest (both are page aligned) + * + * Parameters: + * x0 - dest + * x1 - src + */ +ENTRY(copy_page_cow) +alternative_if ARM64_HAS_NO_HW_PREFETCH + // Prefetch three cache lines ahead. + prfm pldl1strm, [x1, #128] + prfm pldl1strm, [x1, #256] + prfm pldl1strm, [x1, #384] +alternative_else_nop_endif + + ldp x2, x3, [x1] + ldp x4, x5, [x1, #16] + ldp x6, x7, [x1, #32] + ldp x8, x9, [x1, #48] + ldp x10, x11, [x1, #64] + ldp x12, x13, [x1, #80] + ldp x14, x15, [x1, #96] + ldp x16, x17, [x1, #112] + + mov x18, #(PAGE_SIZE - 128) + add x1, x1, #128 +1: + subs x18, x18, #128 + +alternative_if ARM64_HAS_NO_HW_PREFETCH + prfm pldl1strm, [x1, #384] +alternative_else_nop_endif + + stnp x2, x3, [x0] + ldp x2, x3, [x1] + stnp x4, x5, [x0, #16] + ldp x4, x5, [x1, #16] + stnp x6, x7, [x0, #32] + ldp x6, x7, [x1, #32] + stnp x8, x9, [x0, #48] + ldp x8, x9, [x1, #48] + stnp x10, x11, [x0, #64] + ldp x10, x11, [x1, #64] + stnp x12, x13, [x0, #80] + ldp x12, x13, [x1, #80] + stnp x14, x15, [x0, #96] + ldp x14, x15, [x1, #96] + stnp x16, x17, [x0, #112] + ldp x16, x17, [x1, #112] + + add x0, x0, #128 + add x1, x1, #128 + + b.gt 1b + + stnp x2, x3, [x0] + stnp x4, x5, [x0, #16] + stnp x6, x7, [x0, #32] + stnp x8, x9, [x0, #48] + stnp x10, x11, [x0, #64] + stnp x12, x13, [x0, #80] + stnp x14, x15, [x0, #96] + stnp x16, x17, [x0, #112] + + mov x0, #0 + ret + + .global copy_page_cow_sea_fallback +copy_page_cow_sea_fallback: + mov x0, #1 + ret +ENDPROC(copy_page_cow) +#endif diff --git a/arch/arm64/lib/copy_template_generic_read.S b/arch/arm64/lib/copy_template_generic_read.S new file mode 100644 index 0000000000000000000000000000000000000000..2871935272600e05763dd3035454ca47af7e3545 --- /dev/null +++ b/arch/arm64/lib/copy_template_generic_read.S @@ -0,0 +1,193 @@ +/* + * Copyright (C) 2013 ARM Ltd. + * Copyright (C) 2013 Linaro. + * + * This code is based on glibc cortex strings work originally authored by Linaro + * and re-licensed under GPLv2 for the Linux kernel. The original code can + * be found @ + * + * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/ + * files/head:/src/aarch64/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + + +/* + * Copy a buffer from src to dest (alignment handled by the hardware) + * + * Parameters: + * x0 - dest + * x1 - src + * x2 - n + * Returns: + * x0 - dest + */ +dstin .req x0 +src .req x1 +count .req x2 +tmp1 .req x3 +tmp1w .req w3 +tmp2 .req x4 +tmp2w .req w4 +dst .req x6 + +A_l .req x7 +A_h .req x8 +B_l .req x9 +B_h .req x10 +C_l .req x11 +C_h .req x12 +D_l .req x13 +D_h .req x14 + + mov dst, dstin + cmp count, #16 + /*When memory length is less than 16, the accessed are not aligned.*/ + b.lo .Ltiny15_gr + + neg tmp2, src + ands tmp2, tmp2, #15/* Bytes to reach alignment. */ + b.eq .LSrcAligned_gr + sub count, count, tmp2 + /* + * Copy the leading memory data from src to dst in an increasing + * address order.By this way,the risk of overwriting the source + * memory data is eliminated when the distance between src and + * dst is less than 16. The memory accesses here are alignment. + */ + tbz tmp2, #0, 1f + ldrb1 tmp1w, src, #1 + strb1 tmp1w, dst, #1 +1: + tbz tmp2, #1, 2f + ldrh1 tmp1w, src, #2 + strh1 tmp1w, dst, #2 +2: + tbz tmp2, #2, 3f + ldr1 tmp1w, src, #4 + str1 tmp1w, dst, #4 +3: + tbz tmp2, #3, .LSrcAligned_gr + ldr1 tmp1, src, #8 + str1 tmp1, dst, #8 + +.LSrcAligned_gr: + cmp count, #64 + b.ge .Lcpy_over64_gr + /* + * Deal with small copies quickly by dropping straight into the + * exit block. + */ +.Ltail63_gr: + /* + * Copy up to 48 bytes of data. At this point we only need the + * bottom 6 bits of count to be accurate. + */ + ands tmp1, count, #0x30 + b.eq .Ltiny15_gr + cmp tmp1w, #0x20 + b.eq 1f + b.lt 2f + ldp1 A_l, A_h, src, #16 + stp1 A_l, A_h, dst, #16 +1: + ldp1 A_l, A_h, src, #16 + stp1 A_l, A_h, dst, #16 +2: + ldp1 A_l, A_h, src, #16 + stp1 A_l, A_h, dst, #16 +.Ltiny15_gr: + /* + * Prefer to break one ldp/stp into several load/store to access + * memory in an increasing address order,rather than to load/store 16 + * bytes from (src-16) to (dst-16) and to backward the src to aligned + * address,which way is used in original cortex memcpy. If keeping + * the original memcpy process here, memmove need to satisfy the + * precondition that src address is at least 16 bytes bigger than dst + * address,otherwise some source data will be overwritten when memove + * call memcpy directly. To make memmove simpler and decouple the + * memcpy's dependency on memmove, withdrew the original process. + */ + tbz count, #3, 1f + ldr1 tmp1, src, #8 + str1 tmp1, dst, #8 +1: + tbz count, #2, 2f + ldr1 tmp1w, src, #4 + str1 tmp1w, dst, #4 +2: + tbz count, #1, 3f + ldrh1 tmp1w, src, #2 + strh1 tmp1w, dst, #2 +3: + tbz count, #0, .Lexitfunc_gr + ldrb1 tmp1w, src, #1 + strb1 tmp1w, dst, #1 + + b .Lexitfunc_gr + +.Lcpy_over64_gr: + subs count, count, #128 + b.ge .Lcpy_body_large_gr + /* + * Less than 128 bytes to copy, so handle 64 here and then jump + * to the tail. + */ + ldp1 A_l, A_h, src, #16 + stp1 A_l, A_h, dst, #16 + ldp1 B_l, B_h, src, #16 + ldp1 C_l, C_h, src, #16 + stp1 B_l, B_h, dst, #16 + stp1 C_l, C_h, dst, #16 + ldp1 D_l, D_h, src, #16 + stp1 D_l, D_h, dst, #16 + + tst count, #0x3f + b.ne .Ltail63_gr + b .Lexitfunc_gr + + /* + * Critical loop. Start at a new cache line boundary. Assuming + * 64 bytes per line this ensures the entire loop is in one line. + */ + .p2align L1_CACHE_SHIFT +.Lcpy_body_large_gr: + /* pre-get 64 bytes data. */ + ldp1 A_l, A_h, src, #16 + ldp1 B_l, B_h, src, #16 + ldp1 C_l, C_h, src, #16 + ldp1 D_l, D_h, src, #16 +1: + /* + * interlace the load of next 64 bytes data block with store of the last + * loaded 64 bytes data. + */ + stp1 A_l, A_h, dst, #16 + ldp1 A_l, A_h, src, #16 + stp1 B_l, B_h, dst, #16 + ldp1 B_l, B_h, src, #16 + stp1 C_l, C_h, dst, #16 + ldp1 C_l, C_h, src, #16 + stp1 D_l, D_h, dst, #16 + ldp1 D_l, D_h, src, #16 + subs count, count, #64 + b.ge 1b + stp1 A_l, A_h, dst, #16 + stp1 B_l, B_h, dst, #16 + stp1 C_l, C_h, dst, #16 + stp1 D_l, D_h, dst, #16 + + tst count, #0x3f + b.ne .Ltail63_gr +.Lexitfunc_gr: diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S index fda6172d6b88819bcec2a69439fa202c57119f40..6214e49721414ec605e178c40ce21dae3c7e5934 100644 --- a/arch/arm64/lib/copy_to_user.S +++ b/arch/arm64/lib/copy_to_user.S @@ -42,7 +42,7 @@ .endm .macro strh1 ptr, regB, val - uao_user_alternative 9998f, strh, sttrh, \ptr, \regB, \val + uao_user_alternative 9997f, strh, sttrh, \ptr, \regB, \val .endm .macro ldr1 ptr, regB, val @@ -50,7 +50,7 @@ .endm .macro str1 ptr, regB, val - uao_user_alternative 9998f, str, sttr, \ptr, \regB, \val + uao_user_alternative 9997f, str, sttr, \ptr, \regB, \val .endm .macro ldp1 ptr, regB, regC, val @@ -58,21 +58,50 @@ .endm .macro stp1 ptr, regB, regC, val - uao_stp 9998f, \ptr, \regB, \regC, \val + uao_stp 9997f, \ptr, \regB, \regC, \val .endm end .req x5 +srcin .req x15 ENTRY(__arch_copy_to_user) uaccess_enable_not_uao x3, x4, x5 add end, x0, x2 + mov srcin, x1 #include "copy_template.S" uaccess_disable_not_uao x3, x4 mov x0, #0 ret ENDPROC(__arch_copy_to_user) +#ifdef CONFIG_UCE_KERNEL_RECOVERY +ENTRY(__arch_copy_to_user_generic_read) + uaccess_enable_not_uao x3, x4, x5 + add end, x0, x2 + mov srcin, x1 +#include "copy_template_generic_read.S" + uaccess_disable_not_uao x3, x4 + + mov x0, #0 + ret + + .global copy_generic_read_sea_fallback +copy_generic_read_sea_fallback: + uaccess_disable_not_uao x3, x4 + + mov x0, #-1 + ret +ENDPROC(__arch_copy_to_user_generic_read) +#endif + .section .fixup,"ax" .align 2 +9997: cmp dst, dstin + b.ne 9998f + // Before being absolutely sure we couldn't copy anything, try harder + ldrb tmp1w, [srcin] +USER(9998f, sttrb tmp1w, [dst]) + add dst, dst, #1 9998: sub x0, end, dst // bytes not copied + uaccess_disable_not_uao x3, x4 ret .previous diff --git a/arch/arm64/lib/crc32.S b/arch/arm64/lib/crc32.S new file mode 100644 index 0000000000000000000000000000000000000000..f132f2a7522e34af6f59874b77e139407ca6812c --- /dev/null +++ b/arch/arm64/lib/crc32.S @@ -0,0 +1,104 @@ +/* + * Accelerated CRC32(C) using AArch64 CRC instructions + * + * Copyright (C) 2016 - 2018 Linaro Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include + + .cpu generic+crc + + .macro __crc32, c + cmp x2, #16 + b.lt 8f // less than 16 bytes + + and x7, x2, #0x1f + and x2, x2, #~0x1f + cbz x7, 32f // multiple of 32 bytes + + and x8, x7, #0xf + ldp x3, x4, [x1] + add x8, x8, x1 + add x1, x1, x7 + ldp x5, x6, [x8] +CPU_BE( rev x3, x3 ) +CPU_BE( rev x4, x4 ) +CPU_BE( rev x5, x5 ) +CPU_BE( rev x6, x6 ) + + tst x7, #8 + crc32\c\()x w8, w0, x3 + csel x3, x3, x4, eq + csel w0, w0, w8, eq + tst x7, #4 + lsr x4, x3, #32 + crc32\c\()w w8, w0, w3 + csel x3, x3, x4, eq + csel w0, w0, w8, eq + tst x7, #2 + lsr w4, w3, #16 + crc32\c\()h w8, w0, w3 + csel w3, w3, w4, eq + csel w0, w0, w8, eq + tst x7, #1 + crc32\c\()b w8, w0, w3 + csel w0, w0, w8, eq + tst x7, #16 + crc32\c\()x w8, w0, x5 + crc32\c\()x w8, w8, x6 + csel w0, w0, w8, eq + cbz x2, 0f + +32: ldp x3, x4, [x1], #32 + sub x2, x2, #32 + ldp x5, x6, [x1, #-16] +CPU_BE( rev x3, x3 ) +CPU_BE( rev x4, x4 ) +CPU_BE( rev x5, x5 ) +CPU_BE( rev x6, x6 ) + crc32\c\()x w0, w0, x3 + crc32\c\()x w0, w0, x4 + crc32\c\()x w0, w0, x5 + crc32\c\()x w0, w0, x6 + cbnz x2, 32b +0: ret + +8: tbz x2, #3, 4f + ldr x3, [x1], #8 +CPU_BE( rev x3, x3 ) + crc32\c\()x w0, w0, x3 +4: tbz x2, #2, 2f + ldr w3, [x1], #4 +CPU_BE( rev w3, w3 ) + crc32\c\()w w0, w0, w3 +2: tbz x2, #1, 1f + ldrh w3, [x1], #2 +CPU_BE( rev16 w3, w3 ) + crc32\c\()h w0, w0, w3 +1: tbz x2, #0, 0f + ldrb w3, [x1] + crc32\c\()b w0, w0, w3 +0: ret + .endm + + .align 5 +ENTRY(crc32_le) +alternative_if_not ARM64_HAS_CRC32 + b crc32_le_base +alternative_else_nop_endif + __crc32 +ENDPROC(crc32_le) + + .align 5 +ENTRY(__crc32c_le) +alternative_if_not ARM64_HAS_CRC32 + b __crc32c_le_base +alternative_else_nop_endif + __crc32 c +ENDPROC(__crc32c_le) diff --git a/arch/arm64/lib/csum.S b/arch/arm64/lib/csum.S new file mode 100644 index 0000000000000000000000000000000000000000..8c93c39f7c76512f470ccbfb0686fbea5794fea3 --- /dev/null +++ b/arch/arm64/lib/csum.S @@ -0,0 +1,135 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 Linaro, Ltd. + */ + +#include +#include + +ENTRY(do_csum) + // len is zero or negative + cmp w1, wzr + b.le out + + adds x2, xzr, xzr // clear x2 and C flag + + // 64 bytes at a time + lsr x3, x1, #6 + and x1, x1, #63 + cbz x3, 1f + + // Eight 64-bit adds per iteration +0: ldp x4, x5, [x0], #64 + ldp x6, x7, [x0, #-48] + ldp x8, x9, [x0, #-32] + ldp x10, x11, [x0, #-16] + adcs x2, x2, x4 + sub x3, x3, #1 + adcs x2, x2, x5 + adcs x2, x2, x6 + adcs x2, x2, x7 + adcs x2, x2, x8 + adcs x2, x2, x9 + adcs x2, x2, x10 + adcs x2, x2, x11 + cbnz x3, 0b + adc x2, x2, xzr + + cbz x1, 7f + bic x3, x1, #1 + add x12, x0, x1 + add x0, x0, x3 + neg x3, x3 + add x3, x3, #64 + lsl x3, x3, #3 + + // Handle remaining 63 bytes or less using an overlapping 64-byte load + // and a branchless code path to complete the calculation + ldp x4, x5, [x0, #-64] + ldp x6, x7, [x0, #-48] + ldp x8, x9, [x0, #-32] + ldp x10, x11, [x0, #-16] + ldrb w12, [x12, #-1] + + .irp reg, x4, x5, x6, x7, x8, x9, x10, x11 + cmp x3, #64 + csel \reg, \reg, xzr, lt + ccmp x3, xzr, #0, lt + csel x13, x3, xzr, gt + sub x3, x3, #64 +CPU_LE( lsr \reg, \reg, x13 ) +CPU_BE( lsl \reg, \reg, x13 ) + .endr + + adds x2, x2, x4 + adcs x2, x2, x5 + adcs x2, x2, x6 + adcs x2, x2, x7 + adcs x2, x2, x8 + adcs x2, x2, x9 + adcs x2, x2, x10 + adcs x2, x2, x11 + adc x2, x2, xzr + +CPU_LE( adds x12, x2, x12 ) +CPU_BE( adds x12, x2, x12, lsl #8 ) + adc x12, x12, xzr + tst x1, #1 + csel x2, x2, x12, eq + +7: lsr x1, x2, #32 + adds w2, w2, w1 + adc w2, w2, wzr + + lsr w1, w2, #16 + uxth w2, w2 + add w2, w2, w1 + + lsr w1, w2, #16 // handle the carry by hand + add w2, w2, w1 + + uxth w0, w2 + ret + + // Handle 63 bytes or less +1: tbz x1, #5, 2f + ldp x4, x5, [x0], #32 + ldp x6, x7, [x0, #-16] + adds x2, x2, x4 + adcs x2, x2, x5 + adcs x2, x2, x6 + adcs x2, x2, x7 + adc x2, x2, xzr + +2: tbz x1, #4, 3f + ldp x4, x5, [x0], #16 + adds x2, x2, x4 + adcs x2, x2, x5 + adc x2, x2, xzr + +3: tbz x1, #3, 4f + ldr x4, [x0], #8 + adds x2, x2, x4 + adc x2, x2, xzr + +4: tbz x1, #2, 5f + ldr w4, [x0], #4 + adds x2, x2, x4 + adc x2, x2, xzr + +5: tbz x1, #1, 6f + ldrh w4, [x0], #2 + adds x2, x2, x4 + adc x2, x2, xzr + +6: tbz x1, #0, 7b + ldrb w4, [x0] +CPU_LE( adds x2, x2, x4 ) +CPU_BE( adds x2, x2, x4, lsl #8 ) + adc x2, x2, xzr + b 7b + +out: + mov w0, #0 + ret +ENDPROC(do_csum) diff --git a/arch/arm64/lib/get_user.c b/arch/arm64/lib/get_user.c new file mode 100644 index 0000000000000000000000000000000000000000..818ccf91c2105af4ba5034d1b77363d163e737ce --- /dev/null +++ b/arch/arm64/lib/get_user.c @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +/* get user space to a kernel buffer */ +noinline void get_user_func(long *p, const long __user *addr, + int size, int *err) +{ + asm volatile(".global get_user_sea_fallback\n" + "get_user_sea_fallback:\n"); + + if (unlikely(current->flags & PF_UCE_KERNEL_RECOVERY)) { + current->flags &= ~PF_UCE_KERNEL_RECOVERY; + *err = -EFAULT; + return; + } + + __get_user_uce_check(*p, addr, size, *err); +} +EXPORT_SYMBOL(get_user_func); diff --git a/arch/arm64/lib/memchr.S b/arch/arm64/lib/memchr.S index 4444c1d25f4bb7217f540715e8cde1b27d28913d..0f164a4baf52a16dfc0fd9232b6b797a06e2a314 100644 --- a/arch/arm64/lib/memchr.S +++ b/arch/arm64/lib/memchr.S @@ -30,7 +30,7 @@ * Returns: * x0 - address of first occurrence of 'c' or 0 */ -ENTRY(memchr) +WEAK(memchr) and w1, w1, #0xff 1: subs x2, x2, #1 b.mi 2f diff --git a/arch/arm64/lib/memcmp.S b/arch/arm64/lib/memcmp.S index 2a4e239bd17a03441881ebc85beb4a8100824098..fb295f52e9f8771aed4e2bc52db70dd4a55b51c6 100644 --- a/arch/arm64/lib/memcmp.S +++ b/arch/arm64/lib/memcmp.S @@ -58,7 +58,7 @@ pos .req x11 limit_wd .req x12 mask .req x13 -ENTRY(memcmp) +WEAK(memcmp) cbz limit, .Lret0 eor tmp1, src1, src2 tst tmp1, #7 diff --git a/arch/arm64/lib/memcpy.S b/arch/arm64/lib/memcpy.S index 67613937711f10d209b3be73ce6a322f674581b5..dfedd4ab1a766fdf7ed47028cca1913e150b4b1e 100644 --- a/arch/arm64/lib/memcpy.S +++ b/arch/arm64/lib/memcpy.S @@ -68,9 +68,8 @@ stp \ptr, \regB, [\regC], \val .endm - .weak memcpy ENTRY(__memcpy) -ENTRY(memcpy) +WEAK(memcpy) #include "copy_template.S" ret ENDPIPROC(memcpy) diff --git a/arch/arm64/include/asm/stage2_pgtable-nopmd.h b/arch/arm64/lib/memcpy_mc.S similarity index 32% rename from arch/arm64/include/asm/stage2_pgtable-nopmd.h rename to arch/arm64/lib/memcpy_mc.S index 2656a0fd05a6c09c3f9a7ffe889648d8d92a9a96..e88f07b506c476ebcb423c84988b180b22deabe3 100644 --- a/arch/arm64/include/asm/stage2_pgtable-nopmd.h +++ b/arch/arm64/lib/memcpy_mc.S @@ -1,5 +1,13 @@ /* - * Copyright (C) 2016 - ARM Ltd + * Copyright (C) 2013 ARM Ltd. + * Copyright (C) 2013 Linaro. + * + * This code is based on glibc cortex strings work originally authored by Linaro + * and re-licensed under GPLv2 for the Linux kernel. The original code can + * be found @ + * + * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/ + * files/head:/src/aarch64/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -14,29 +22,58 @@ * along with this program. If not, see . */ -#ifndef __ARM64_S2_PGTABLE_NOPMD_H_ -#define __ARM64_S2_PGTABLE_NOPMD_H_ +#include +#include +#include + +/* + * Copy a buffer from src to dest (alignment handled by the hardware) + * + * Parameters: + * x0 - dest + * x1 - src + * x2 - n + * Returns: + * x0 - dest + */ + .macro ldrb1 ptr, regB, val + ldrb \ptr, [\regB], \val + .endm + + .macro strb1 ptr, regB, val + strb \ptr, [\regB], \val + .endm -#include + .macro ldrh1 ptr, regB, val + ldrh \ptr, [\regB], \val + .endm -#define __S2_PGTABLE_PMD_FOLDED + .macro strh1 ptr, regB, val + strh \ptr, [\regB], \val + .endm -#define S2_PMD_SHIFT S2_PUD_SHIFT -#define S2_PTRS_PER_PMD 1 -#define S2_PMD_SIZE (1UL << S2_PMD_SHIFT) -#define S2_PMD_MASK (~(S2_PMD_SIZE-1)) + .macro ldr1 ptr, regB, val + ldr \ptr, [\regB], \val + .endm -#define stage2_pud_none(pud) (0) -#define stage2_pud_present(pud) (1) -#define stage2_pud_clear(pud) do { } while (0) -#define stage2_pud_populate(pud, pmd) do { } while (0) -#define stage2_pmd_offset(pud, address) ((pmd_t *)(pud)) + .macro str1 ptr, regB, val + str \ptr, [\regB], \val + .endm -#define stage2_pmd_free(pmd) do { } while (0) + .macro ldp1 ptr, regB, regC, val + ldp \ptr, \regB, [\regC], \val + .endm -#define stage2_pmd_addr_end(addr, end) (end) + .macro stp1 ptr, regB, regC, val + stp \ptr, \regB, [\regC], \val + .endm -#define stage2_pud_huge(pud) (0) -#define stage2_pmd_table_empty(pmdp) (0) +ENTRY(__memcpy_mc) +WEAK(memcpy_mc) +#include "copy_template.S" -#endif + .global memcpy_mc_sea_fallback +memcpy_mc_sea_fallback: + ret +ENDPIPROC(memcpy_mc) +ENDPROC(__memcpy_mc) diff --git a/arch/arm64/lib/memmove.S b/arch/arm64/lib/memmove.S index a5a4459013b1a59d5a54b2a70fd1ad8e0f256fd9..e3de8f05c21a81b54fafbaedbcf6cf7d86d7ce36 100644 --- a/arch/arm64/lib/memmove.S +++ b/arch/arm64/lib/memmove.S @@ -57,9 +57,8 @@ C_h .req x12 D_l .req x13 D_h .req x14 - .weak memmove ENTRY(__memmove) -ENTRY(memmove) +WEAK(memmove) cmp dstin, src b.lo __memcpy add tmp1, src, count diff --git a/arch/arm64/lib/memset.S b/arch/arm64/lib/memset.S index f2670a9f218c919ff68a1ffcfdbce468e693ae60..316263c47c00683546946901106aeed2a80f9c85 100644 --- a/arch/arm64/lib/memset.S +++ b/arch/arm64/lib/memset.S @@ -54,9 +54,8 @@ dst .req x8 tmp3w .req w9 tmp3 .req x9 - .weak memset ENTRY(__memset) -ENTRY(memset) +WEAK(memset) mov dst, dstin /* Preserve return value. */ and A_lw, val, #255 orr A_lw, A_lw, A_lw, lsl #8 diff --git a/arch/arm64/lib/strchr.S b/arch/arm64/lib/strchr.S index dae0cf5591f99d92e424831894f361bf4ffb50fd..7c83091d1bcdd2c67c03d2af8e69264858985c09 100644 --- a/arch/arm64/lib/strchr.S +++ b/arch/arm64/lib/strchr.S @@ -29,7 +29,7 @@ * Returns: * x0 - address of first occurrence of 'c' or 0 */ -ENTRY(strchr) +WEAK(strchr) and w1, w1, #0xff 1: ldrb w2, [x0], #1 cmp w2, w1 diff --git a/arch/arm64/lib/strcmp.S b/arch/arm64/lib/strcmp.S index 471fe61760ef661213007542df37c7c8fdcb1a18..7d5d15398bfbc6260e61d369f5246e17be5422fa 100644 --- a/arch/arm64/lib/strcmp.S +++ b/arch/arm64/lib/strcmp.S @@ -60,7 +60,7 @@ tmp3 .req x9 zeroones .req x10 pos .req x11 -ENTRY(strcmp) +WEAK(strcmp) eor tmp1, src1, src2 mov zeroones, #REP8_01 tst tmp1, #7 diff --git a/arch/arm64/lib/strlen.S b/arch/arm64/lib/strlen.S index 55ccc8e24c08440399034d41bf8aa04699e09812..8e0b14205dcb419e5752209294922ddff072bd52 100644 --- a/arch/arm64/lib/strlen.S +++ b/arch/arm64/lib/strlen.S @@ -56,7 +56,7 @@ pos .req x12 #define REP8_7f 0x7f7f7f7f7f7f7f7f #define REP8_80 0x8080808080808080 -ENTRY(strlen) +WEAK(strlen) mov zeroones, #REP8_01 bic src, srcin, #15 ands tmp1, srcin, #15 diff --git a/arch/arm64/lib/strncmp.S b/arch/arm64/lib/strncmp.S index e267044761c6f2c1b4cadcba729e8d0dbe79f766..66bd145935d9ed0abaababc2d8f12beecc1a2875 100644 --- a/arch/arm64/lib/strncmp.S +++ b/arch/arm64/lib/strncmp.S @@ -64,7 +64,7 @@ limit_wd .req x13 mask .req x14 endloop .req x15 -ENTRY(strncmp) +WEAK(strncmp) cbz limit, .Lret0 eor tmp1, src1, src2 mov zeroones, #REP8_01 diff --git a/arch/arm64/lib/strnlen.S b/arch/arm64/lib/strnlen.S index eae38da6e0bb3911a5cad1fb2f6da46b4bb0090a..355be04441fe60f949978d6b80778a3812672f3c 100644 --- a/arch/arm64/lib/strnlen.S +++ b/arch/arm64/lib/strnlen.S @@ -59,7 +59,7 @@ limit_wd .req x14 #define REP8_7f 0x7f7f7f7f7f7f7f7f #define REP8_80 0x8080808080808080 -ENTRY(strnlen) +WEAK(strnlen) cbz limit, .Lhit_limit mov zeroones, #REP8_01 bic src, srcin, #15 diff --git a/arch/arm64/lib/strrchr.S b/arch/arm64/lib/strrchr.S index f8e2784d5752124fc15cca8b6d005f6937655f21..ea84924d599019c1dd4f6d196c47f60328dd4118 100644 --- a/arch/arm64/lib/strrchr.S +++ b/arch/arm64/lib/strrchr.S @@ -29,7 +29,7 @@ * Returns: * x0 - address of last occurrence of 'c' or 0 */ -ENTRY(strrchr) +WEAK(strrchr) mov x3, #0 and w1, w1, #0xff 1: ldrb w2, [x0], #1 diff --git a/arch/arm64/lib/xor-neon.c b/arch/arm64/lib/xor-neon.c new file mode 100644 index 0000000000000000000000000000000000000000..131c60c27dff646aad80816d1dd26f5e8d11cf0c --- /dev/null +++ b/arch/arm64/lib/xor-neon.c @@ -0,0 +1,184 @@ +/* + * arch/arm64/lib/xor-neon.c + * + * Authors: Jackie Liu + * Copyright (C) 2018,Tianjin KYLIN Information Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include + +void xor_arm64_neon_2(unsigned long bytes, unsigned long *p1, + unsigned long *p2) +{ + uint64_t *dp1 = (uint64_t *)p1; + uint64_t *dp2 = (uint64_t *)p2; + + register uint64x2_t v0, v1, v2, v3; + long lines = bytes / (sizeof(uint64x2_t) * 4); + + do { + /* p1 ^= p2 */ + v0 = veorq_u64(vld1q_u64(dp1 + 0), vld1q_u64(dp2 + 0)); + v1 = veorq_u64(vld1q_u64(dp1 + 2), vld1q_u64(dp2 + 2)); + v2 = veorq_u64(vld1q_u64(dp1 + 4), vld1q_u64(dp2 + 4)); + v3 = veorq_u64(vld1q_u64(dp1 + 6), vld1q_u64(dp2 + 6)); + + /* store */ + vst1q_u64(dp1 + 0, v0); + vst1q_u64(dp1 + 2, v1); + vst1q_u64(dp1 + 4, v2); + vst1q_u64(dp1 + 6, v3); + + dp1 += 8; + dp2 += 8; + } while (--lines > 0); +} + +void xor_arm64_neon_3(unsigned long bytes, unsigned long *p1, + unsigned long *p2, unsigned long *p3) +{ + uint64_t *dp1 = (uint64_t *)p1; + uint64_t *dp2 = (uint64_t *)p2; + uint64_t *dp3 = (uint64_t *)p3; + + register uint64x2_t v0, v1, v2, v3; + long lines = bytes / (sizeof(uint64x2_t) * 4); + + do { + /* p1 ^= p2 */ + v0 = veorq_u64(vld1q_u64(dp1 + 0), vld1q_u64(dp2 + 0)); + v1 = veorq_u64(vld1q_u64(dp1 + 2), vld1q_u64(dp2 + 2)); + v2 = veorq_u64(vld1q_u64(dp1 + 4), vld1q_u64(dp2 + 4)); + v3 = veorq_u64(vld1q_u64(dp1 + 6), vld1q_u64(dp2 + 6)); + + /* p1 ^= p3 */ + v0 = veorq_u64(v0, vld1q_u64(dp3 + 0)); + v1 = veorq_u64(v1, vld1q_u64(dp3 + 2)); + v2 = veorq_u64(v2, vld1q_u64(dp3 + 4)); + v3 = veorq_u64(v3, vld1q_u64(dp3 + 6)); + + /* store */ + vst1q_u64(dp1 + 0, v0); + vst1q_u64(dp1 + 2, v1); + vst1q_u64(dp1 + 4, v2); + vst1q_u64(dp1 + 6, v3); + + dp1 += 8; + dp2 += 8; + dp3 += 8; + } while (--lines > 0); +} + +void xor_arm64_neon_4(unsigned long bytes, unsigned long *p1, + unsigned long *p2, unsigned long *p3, unsigned long *p4) +{ + uint64_t *dp1 = (uint64_t *)p1; + uint64_t *dp2 = (uint64_t *)p2; + uint64_t *dp3 = (uint64_t *)p3; + uint64_t *dp4 = (uint64_t *)p4; + + register uint64x2_t v0, v1, v2, v3; + long lines = bytes / (sizeof(uint64x2_t) * 4); + + do { + /* p1 ^= p2 */ + v0 = veorq_u64(vld1q_u64(dp1 + 0), vld1q_u64(dp2 + 0)); + v1 = veorq_u64(vld1q_u64(dp1 + 2), vld1q_u64(dp2 + 2)); + v2 = veorq_u64(vld1q_u64(dp1 + 4), vld1q_u64(dp2 + 4)); + v3 = veorq_u64(vld1q_u64(dp1 + 6), vld1q_u64(dp2 + 6)); + + /* p1 ^= p3 */ + v0 = veorq_u64(v0, vld1q_u64(dp3 + 0)); + v1 = veorq_u64(v1, vld1q_u64(dp3 + 2)); + v2 = veorq_u64(v2, vld1q_u64(dp3 + 4)); + v3 = veorq_u64(v3, vld1q_u64(dp3 + 6)); + + /* p1 ^= p4 */ + v0 = veorq_u64(v0, vld1q_u64(dp4 + 0)); + v1 = veorq_u64(v1, vld1q_u64(dp4 + 2)); + v2 = veorq_u64(v2, vld1q_u64(dp4 + 4)); + v3 = veorq_u64(v3, vld1q_u64(dp4 + 6)); + + /* store */ + vst1q_u64(dp1 + 0, v0); + vst1q_u64(dp1 + 2, v1); + vst1q_u64(dp1 + 4, v2); + vst1q_u64(dp1 + 6, v3); + + dp1 += 8; + dp2 += 8; + dp3 += 8; + dp4 += 8; + } while (--lines > 0); +} + +void xor_arm64_neon_5(unsigned long bytes, unsigned long *p1, + unsigned long *p2, unsigned long *p3, + unsigned long *p4, unsigned long *p5) +{ + uint64_t *dp1 = (uint64_t *)p1; + uint64_t *dp2 = (uint64_t *)p2; + uint64_t *dp3 = (uint64_t *)p3; + uint64_t *dp4 = (uint64_t *)p4; + uint64_t *dp5 = (uint64_t *)p5; + + register uint64x2_t v0, v1, v2, v3; + long lines = bytes / (sizeof(uint64x2_t) * 4); + + do { + /* p1 ^= p2 */ + v0 = veorq_u64(vld1q_u64(dp1 + 0), vld1q_u64(dp2 + 0)); + v1 = veorq_u64(vld1q_u64(dp1 + 2), vld1q_u64(dp2 + 2)); + v2 = veorq_u64(vld1q_u64(dp1 + 4), vld1q_u64(dp2 + 4)); + v3 = veorq_u64(vld1q_u64(dp1 + 6), vld1q_u64(dp2 + 6)); + + /* p1 ^= p3 */ + v0 = veorq_u64(v0, vld1q_u64(dp3 + 0)); + v1 = veorq_u64(v1, vld1q_u64(dp3 + 2)); + v2 = veorq_u64(v2, vld1q_u64(dp3 + 4)); + v3 = veorq_u64(v3, vld1q_u64(dp3 + 6)); + + /* p1 ^= p4 */ + v0 = veorq_u64(v0, vld1q_u64(dp4 + 0)); + v1 = veorq_u64(v1, vld1q_u64(dp4 + 2)); + v2 = veorq_u64(v2, vld1q_u64(dp4 + 4)); + v3 = veorq_u64(v3, vld1q_u64(dp4 + 6)); + + /* p1 ^= p5 */ + v0 = veorq_u64(v0, vld1q_u64(dp5 + 0)); + v1 = veorq_u64(v1, vld1q_u64(dp5 + 2)); + v2 = veorq_u64(v2, vld1q_u64(dp5 + 4)); + v3 = veorq_u64(v3, vld1q_u64(dp5 + 6)); + + /* store */ + vst1q_u64(dp1 + 0, v0); + vst1q_u64(dp1 + 2, v1); + vst1q_u64(dp1 + 4, v2); + vst1q_u64(dp1 + 6, v3); + + dp1 += 8; + dp2 += 8; + dp3 += 8; + dp4 += 8; + dp5 += 8; + } while (--lines > 0); +} + +struct xor_block_template const xor_block_inner_neon = { + .name = "__inner_neon__", + .do_2 = xor_arm64_neon_2, + .do_3 = xor_arm64_neon_3, + .do_4 = xor_arm64_neon_4, + .do_5 = xor_arm64_neon_5, +}; +EXPORT_SYMBOL(xor_block_inner_neon); + +MODULE_AUTHOR("Jackie Liu "); +MODULE_DESCRIPTION("ARMv8 XOR Extensions"); +MODULE_LICENSE("GPL"); diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 0c22ede52f9062241d7db938d709a16087bd142c..a194fd0e837fb913abf10e35807cea6c672a2af5 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -212,6 +212,9 @@ ENDPROC(__dma_clean_area) * - size - size in question */ ENTRY(__clean_dcache_area_pop) + alternative_if_not ARM64_HAS_DCPOP + b __clean_dcache_area_poc + alternative_else_nop_endif dcache_by_line_op cvap, sy, x0, x1, x2, x3 ret ENDPIPROC(__clean_dcache_area_pop) diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index c127f94da8e2854bc3a3156f4dbe31126618c559..e7cc878a0b05f3b6d2b2a5f345bf036121109afa 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -37,6 +37,10 @@ static DEFINE_PER_CPU(atomic64_t, active_asids); static DEFINE_PER_CPU(u64, reserved_asids); static cpumask_t tlb_flush_pending; +static unsigned long max_pinned_asids; +static unsigned long nr_pinned_asids; +static unsigned long *pinned_asid_map; + #define ASID_MASK (~GENMASK(asid_bits - 1, 0)) #define ASID_FIRST_VERSION (1UL << asid_bits) @@ -88,13 +92,16 @@ void verify_cpu_asid_bits(void) } } +#define asid_gen_match(asid) \ + (!(((asid) ^ atomic64_read(&asid_generation)) >> asid_bits)) + static void flush_context(unsigned int cpu) { int i; u64 asid; /* Update the list of reserved ASIDs and the ASID bitmap. */ - bitmap_clear(asid_map, 0, NUM_USER_ASIDS); + bitmap_copy(asid_map, pinned_asid_map, NUM_USER_ASIDS); for_each_possible_cpu(i) { asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0); @@ -151,6 +158,10 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) if (asid != 0) { u64 newasid = generation | (asid & ~ASID_MASK); + /* That ASID is pinned for us, we're good to go. */ + if (mm->context.refcount) + return newasid; + /* * If our current ASID was active during a rollover, we * can continue to use it and this was just a false alarm. @@ -158,6 +169,14 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) if (check_update_reserved_asid(asid, newasid)) return newasid; + /* + * If it is pinned, we can keep using it. Note that reserved + * takes priority, because even if it is also pinned, we need to + * update the generation into the reserved_asids. + */ + if (mm->context.pinned) + return newasid; + /* * We had a valid ASID in a previous life, so try to re-use * it if possible. @@ -188,6 +207,9 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) set_asid: __set_bit(asid, asid_map); cur_idx = asid; +#ifdef CONFIG_ARM64_TLBI_IPI + cpumask_clear(mm_cpumask(mm)); +#endif return idx2asid(asid) | generation; } @@ -213,8 +235,7 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) * because atomic RmWs are totally ordered for a given location. */ old_active_asid = atomic64_read(&per_cpu(active_asids, cpu)); - if (old_active_asid && - !((asid ^ atomic64_read(&asid_generation)) >> asid_bits) && + if (old_active_asid && asid_gen_match(asid) && atomic64_cmpxchg_relaxed(&per_cpu(active_asids, cpu), old_active_asid, asid)) goto switch_mm_fastpath; @@ -222,7 +243,7 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) raw_spin_lock_irqsave(&cpu_asid_lock, flags); /* Check that our ASID belongs to the current generation. */ asid = atomic64_read(&mm->context.id); - if ((asid ^ atomic64_read(&asid_generation)) >> asid_bits) { + if (!asid_gen_match(asid)) { asid = new_context(mm, cpu); atomic64_set(&mm->context.id, asid); } @@ -236,6 +257,9 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) switch_mm_fastpath: arm64_apply_bp_hardening(); +#ifdef CONFIG_ARM64_TLBI_IPI + cpumask_set_cpu(cpu, mm_cpumask(mm)); +#endif /* * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when @@ -245,6 +269,65 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) cpu_switch_mm(mm->pgd, mm); } +unsigned long mm_context_get(struct mm_struct *mm) +{ + unsigned long flags; + u64 asid; + + raw_spin_lock_irqsave(&cpu_asid_lock, flags); + + asid = atomic64_read(&mm->context.id); + + if (mm->context.pinned) { + mm->context.pinned++; + asid &= ~ASID_MASK; + goto out_unlock; + } + + if (nr_pinned_asids >= max_pinned_asids) { + asid = 0; + goto out_unlock; + } + + if (!asid_gen_match(asid)) { + /* + * We went through one or more rollover since that ASID was + * used. Ensure that it is still valid, or generate a new one. + * The cpu argument isn't used by new_context. + */ + asid = new_context(mm, 0); + atomic64_set(&mm->context.id, asid); + } + + asid &= ~ASID_MASK; + + nr_pinned_asids++; + __set_bit(asid2idx(asid), pinned_asid_map); + mm->context.pinned++; + +out_unlock: + raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); + + return asid; +} +EXPORT_SYMBOL_GPL(mm_context_get); + +void mm_context_put(struct mm_struct *mm) +{ + unsigned long flags; + u64 asid = atomic64_read(&mm->context.id) & ~ASID_MASK; + + raw_spin_lock_irqsave(&cpu_asid_lock, flags); + + if (--mm->context.pinned == 0) { + __clear_bit(asid2idx(asid), pinned_asid_map); + nr_pinned_asids--; + } + + raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); +} +EXPORT_SYMBOL_GPL(mm_context_put); + /* Errata workaround post TTBRx_EL1 update. */ asmlinkage void post_ttbr_update_workaround(void) { @@ -269,6 +352,19 @@ static int asids_init(void) panic("Failed to allocate bitmap for %lu ASIDs\n", NUM_USER_ASIDS); + pinned_asid_map = kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS) + * sizeof(*pinned_asid_map), GFP_KERNEL); + if (!pinned_asid_map) + panic("Failed to allocate pinned bitmap\n"); + + /* + * We assume that an ASID is always available after a rollover. This + * means that even if all CPUs have a reserved ASID, there still is at + * least one slot available in the asid map. + */ + max_pinned_asids = NUM_USER_ASIDS - num_possible_cpus() - 2; + nr_pinned_asids = 0; + pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS); return 0; } diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c index 22e4cb4d6f538baa43f7071ad1729dec01216d23..506d166d1dec7ea992b124037b90508a0f17b7c4 100644 --- a/arch/arm64/mm/copypage.c +++ b/arch/arm64/mm/copypage.c @@ -30,6 +30,20 @@ void __cpu_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) } EXPORT_SYMBOL_GPL(__cpu_copy_user_page); +#ifdef CONFIG_UCE_KERNEL_RECOVERY +int __cpu_copy_user_page_cow(void *kto, const void *kfrom, unsigned long vaddr) +{ + int ret; + + struct page *page = virt_to_page(kto); + ret = copy_page_cow(kto, kfrom); + flush_dcache_page(page); + + return ret; +} +EXPORT_SYMBOL_GPL(__cpu_copy_user_page_cow); +#endif + void __cpu_clear_user_page(void *kaddr, unsigned long vaddr) { clear_page(kaddr); diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 072c51fb07d73031578267eea0d5f877685f2676..f4d5765940a3da22a8e0e5d82fede356983f49fb 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -587,9 +587,9 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, prot, __builtin_return_address(0)); if (addr) { - memset(addr, 0, size); if (!coherent) __dma_flush_area(page_to_virt(page), iosize); + memset(addr, 0, size); } else { iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs); dma_release_from_contiguous(dev, page, @@ -664,6 +664,11 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) return ret; + if (!is_vmalloc_addr(cpu_addr)) { + unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr)); + return __swiotlb_mmap_pfn(vma, pfn, size); + } + if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { /* * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped, @@ -687,6 +692,11 @@ static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt, unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; struct vm_struct *area = find_vm_area(cpu_addr); + if (!is_vmalloc_addr(cpu_addr)) { + struct page *page = virt_to_page(cpu_addr); + return __swiotlb_get_sgtable_page(sgt, page, size); + } + if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { /* * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped, @@ -712,7 +722,7 @@ static void __iommu_sync_single_for_cpu(struct device *dev, if (is_device_dma_coherent(dev)) return; - phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr); + phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dev_addr); __dma_unmap_area(phys_to_virt(phys), size, dir); } @@ -725,7 +735,7 @@ static void __iommu_sync_single_for_device(struct device *dev, if (is_device_dma_coherent(dev)) return; - phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr); + phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dev_addr); __dma_map_area(phys_to_virt(phys), size, dir); } @@ -738,9 +748,9 @@ static dma_addr_t __iommu_map_page(struct device *dev, struct page *page, int prot = dma_info_to_prot(dir, coherent, attrs); dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot); - if (!iommu_dma_mapping_error(dev, dev_addr) && - (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) - __iommu_sync_single_for_device(dev, dev_addr, size, dir); + if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && + !iommu_dma_mapping_error(dev, dev_addr)) + __dma_map_area(page_address(page) + offset, size, dir); return dev_addr; } diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 50b30ff30de4b45b35f97252d2c0a6b8aa9dd749..96bfdade877ae066a2dd9574b83c414e329d8e26 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -18,6 +18,7 @@ * along with this program. If not, see . */ +#include #include #include #include @@ -37,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -255,6 +257,38 @@ static inline bool is_el1_permission_fault(unsigned int esr, return false; } +static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr, + unsigned int esr, + struct pt_regs *regs) +{ + unsigned long flags; + u64 par, dfsc; + + if (ESR_ELx_EC(esr) != ESR_ELx_EC_DABT_CUR || + (esr & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT) + return false; + + local_irq_save(flags); + asm volatile("at s1e1r, %0" :: "r" (addr)); + isb(); + par = read_sysreg(par_el1); + local_irq_restore(flags); + + /* + * If we now have a valid translation, treat the translation fault as + * spurious. + */ + if (!(par & SYS_PAR_EL1_F)) + return true; + + /* + * If we got a different type of fault from the AT instruction, + * treat the translation fault as spurious. + */ + dfsc = FIELD_GET(SYS_PAR_EL1_FST, par); + return (dfsc & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT; +} + static void die_kernel_fault(const char *msg, unsigned long addr, unsigned int esr, struct pt_regs *regs) { @@ -283,6 +317,10 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr, if (!is_el1_instruction_abort(esr) && fixup_exception(regs)) return; + if (WARN_RATELIMIT(is_spurious_el1_translation_fault(addr, esr, regs), + "Ignoring spurious kernel translation fault at virtual address %016lx\n", addr)) + return; + if (is_el1_permission_fault(esr, regs, addr)) { if (esr & ESR_ELx_WNR) msg = "write to read-only memory"; @@ -376,8 +414,8 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re } } -#define VM_FAULT_BADMAP 0x010000 -#define VM_FAULT_BADACCESS 0x020000 +#define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000) +#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000) static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int mm_flags, unsigned long vm_flags, @@ -428,7 +466,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, struct mm_struct *mm; struct siginfo si; vm_fault_t fault, major = 0; - unsigned long vm_flags = VM_READ | VM_WRITE; + unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC; unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; if (notify_page_fault(regs, esr)) @@ -509,12 +547,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, return 0; } - /* - * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of - * starvation. - */ if (mm_flags & FAULT_FLAG_ALLOW_RETRY) { - mm_flags &= ~FAULT_FLAG_ALLOW_RETRY; mm_flags |= FAULT_FLAG_TRIED; goto retry; } @@ -623,6 +656,152 @@ static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs) return 1; /* "fault" */ } +#ifdef CONFIG_UCE_KERNEL_RECOVERY +int kernel_access_sea_recovery; + +#define UCE_KER_REC_NUM ARRAY_SIZE(reco_info) +/* + * One entry corresponds to one scene, and the scene switch is controlled by the + * corresponding bit of kernel_access_sea_recovery + * (the first entry corresponds to bit0, the second entry corresponds to bit1...), + * and the switch is visible to the user, so the order of eatch entry here cannot + * be easily change. Now the maximum entry is limited by the type of variable + * kernel_access_sea_recovery. + */ +static struct uce_kernel_recovery_info reco_info[] = { + {copy_page_cow_sea_fallback, "copy_page_cow", (unsigned long)copy_page_cow, 0, 0}, + {copy_generic_read_sea_fallback, "__arch_copy_to_user_generic_read", (unsigned long)__arch_copy_to_user_generic_read, 0, 0}, + {copy_from_user_sea_fallback, "__arch_copy_from_user", (unsigned long)__arch_copy_from_user, 0, 0}, + {get_user_sea_fallback, "get_user_sea_fallback", (unsigned long)get_user_sea_fallback, 0, KR_SET_TASK_STATE}, + {memcpy_mc_sea_fallback, "memcpy_mc_sea_fallback", (unsigned long)memcpy_mc, 0, 0}, +}; + +static int __init kernel_access_sea_recovery_init(void) +{ + unsigned long addr, size, offset; + unsigned int i; + + for (i = 0; i < UCE_KER_REC_NUM; i++) { + addr = reco_info[i].addr; + if (!kallsyms_lookup_size_offset(addr, &size, &offset)) { + pr_info("UCE: symbol %s lookup addr fail.\n", + reco_info[i].name); + size = 0; + } + + reco_info[i].size = size; + } + + return 1; +} +fs_initcall(kernel_access_sea_recovery_init); + +static int __init enable_kernel_access_sea_recovery(char *str) +{ + int max = (1 << UCE_KER_REC_NUM) - 1; + int val; + + if (kstrtoint(str, 0, &val)) + return -EINVAL; + + if (val < 0 || val > max) { + pr_info("UCE: invalid uce_kernel_recovery value %d", val); + return -EINVAL; + } + + kernel_access_sea_recovery = val; + + return 1; +} +__setup("uce_kernel_recovery=", enable_kernel_access_sea_recovery); + +int is_cow_kernel_recovery_enable(void) +{ + return kernel_access_sea_recovery & 0x1; +} + +int is_pagecache_reading_kernel_recovery_enable(void) +{ + return kernel_access_sea_recovery & 0x2; +} + +inline int is_get_user_kernel_recovery_enable(void) +{ + return kernel_access_sea_recovery & 0x8; +} +EXPORT_SYMBOL(is_get_user_kernel_recovery_enable); +/* + * what is kernel recovery? + * If the process's private data is accessed in the kernel mode to trigger + * special sea fault, it can controlled by killing the process and isolating + * the failure pages instead of die. + */ +static int is_in_kernel_recovery(unsigned int esr, struct pt_regs *regs) +{ + /* + * target insn: ldp-pre, ldp-post, ldp-offset, + * ldr-64bit-pre/pose, ldr-32bit-pre/post, ldrb-pre/post, ldrh-pre/post + */ + u32 target_insn[] = {0xa8c, 0xa9c, 0xa94, 0xf84, 0x784, 0x384, 0xb84}; + void *pc = (void *)instruction_pointer(regs); + struct uce_kernel_recovery_info *info; + bool insn_match = false; + u32 insn; + int i; + + pr_emerg("UCE: %s-%d, kernel recovery: 0x%x, esr: 0x%08x -- %s, %pS\n", + current->comm, current->pid, kernel_access_sea_recovery, esr, + esr_get_class_string(esr), pc); + + if (aarch64_insn_read((void *)pc, &insn)) { + pr_emerg("UCE: insn read fail.\n"); + return -EFAULT; + } + + /* + * We process special ESR: + * EC : 0b100101 Data Abort taken without a change in Exception level. + * DFSC : 0b010000 Synchronous External abort, not on translation table + * walk or hardware update of translation table. + * eg: 0x96000610 + */ + if (ESR_ELx_EC(esr) != ESR_ELx_EC_DABT_CUR || + (esr & ESR_ELx_FSC) != ESR_ELx_FSC_EXTABT) { + pr_emerg("UCE: esr not match.\n"); + return -EINVAL; + } + + insn = (insn >> 20) & 0xffc; + for (i = 0; i < ARRAY_SIZE(target_insn); i++) { + if (insn == target_insn[i]) { + insn_match = true; + break; + } + } + + if (!insn_match) { + pr_emerg("UCE: insn 0x%x is not match.\n", insn); + return -EINVAL; + } + + for (i = 0; i < UCE_KER_REC_NUM; i++) { + if (!((kernel_access_sea_recovery >> i) & 0x1)) + continue; + + info = &reco_info[i]; + if (info->fn && regs->pc >= info->addr && + regs->pc < (info->addr + info->size)) { + pr_emerg("UCE: total match %s success.\n", info->name); + return i; + } + } + + pr_emerg("UCE: symbol is not match or switch if off, kernel recovery %d.\n", + kernel_access_sea_recovery); + return -EINVAL; +} +#endif + static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs) { struct siginfo info; @@ -645,15 +824,60 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs) nmi_exit(); } - clear_siginfo(&info); - info.si_signo = inf->sig; - info.si_errno = 0; - info.si_code = inf->code; - if (esr & ESR_ELx_FnV) - info.si_addr = NULL; - else - info.si_addr = (void __user *)addr; - arm64_notify_die(inf->name, regs, &info, esr); + if (user_mode(regs)) { + if (test_thread_flag(TIF_SEA_NOTIFY)) + return 0; + + clear_siginfo(&info); + info.si_signo = inf->sig; + info.si_errno = 0; + info.si_code = inf->code; + if (esr & ESR_ELx_FnV) + info.si_addr = NULL; + else + info.si_addr = (void __user *)addr; + + arm64_notify_die(inf->name, regs, &info, esr); + } else { +#ifdef CONFIG_UCE_KERNEL_RECOVERY + int idx; + + if (!current->mm || !kernel_access_sea_recovery) { + pr_emerg("UCE: kernel recovery %d, %s-%d is %s-thread.\n", + kernel_access_sea_recovery, + current->comm, current->pid, + (current->mm) ? "user" : "kernel"); + die("Uncorrected hardware memory error in kernel-access\n", + regs, esr); + } + + idx = is_in_kernel_recovery(esr, regs); + if (idx >= 0 && idx < UCE_KER_REC_NUM) { + set_thread_flag(TIF_UCE_KERNEL_RECOVERY); + clear_siginfo(&info); + info.si_signo = inf->sig; + info.si_errno = 0; + info.si_code = inf->code; + info.si_addr = NULL; + + current->thread.fault_address = regs->pc; + current->thread.fault_code = esr; + if (reco_info[idx].flags & KR_SET_TASK_STATE) + current->flags |= PF_UCE_KERNEL_RECOVERY; + regs->pc = (unsigned long)reco_info[idx].fn; + arm64_force_sig_info(&info, + "Uncorrected hardware memory use with kernel recovery in kernel-access\n", + current); + } else { + die("Uncorrected hardware memory error (not match idx or sence switch is off) in kernel-access\n", + regs, esr); + } + +#else + die("Uncorrected hardware memory error in kernel-access\n", + regs, esr); +#endif + } return 0; } @@ -771,7 +995,7 @@ asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr, if (addr > TASK_SIZE) arm64_apply_bp_hardening(); - local_irq_enable(); + local_daif_restore(DAIF_PROCCTX); do_mem_abort(addr, esr, regs); } @@ -785,7 +1009,7 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr, if (user_mode(regs)) { if (instruction_pointer(regs) > TASK_SIZE) arm64_apply_bp_hardening(); - local_irq_enable(); + local_daif_restore(DAIF_PROCCTX); } clear_siginfo(&info); @@ -827,13 +1051,47 @@ void __init hook_debug_fault_code(int nr, debug_fault_info[nr].name = name; } -asmlinkage int __exception do_debug_exception(unsigned long addr, - unsigned int esr, - struct pt_regs *regs) +#ifdef CONFIG_ARM64_ERRATUM_1463225 +DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); + +static int __exception +cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) +{ + if (user_mode(regs)) + return 0; + + if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa)) + return 0; + + /* + * We've taken a dummy step exception from the kernel to ensure + * that interrupts are re-enabled on the syscall path. Return back + * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions + * masked so that we can safely restore the mdscr and get on with + * handling the syscall. + */ + regs->pstate |= PSR_D_BIT; + return 1; +} +#else +static int __exception +cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) +{ + return 0; +} +#endif /* CONFIG_ARM64_ERRATUM_1463225 */ + +asmlinkage int __exception do_debug_exception(unsigned long addr_if_watchpoint, + unsigned int esr, + struct pt_regs *regs) { const struct fault_info *inf = debug_fault_info + DBG_ESR_EVT(esr); + unsigned long pc = instruction_pointer(regs); int rv; + if (cortex_a76_erratum_1463225_debug_handler(regs)) + return 0; + /* * Tell lockdep we disabled irqs in entry.S. Do nothing if they were * already disabled to preserve the last enabled/disabled addresses. @@ -841,10 +1099,10 @@ asmlinkage int __exception do_debug_exception(unsigned long addr, if (interrupts_enabled(regs)) trace_hardirqs_off(); - if (user_mode(regs) && instruction_pointer(regs) > TASK_SIZE) + if (user_mode(regs) && pc > TASK_SIZE) arm64_apply_bp_hardening(); - if (!inf->fn(addr, esr, regs)) { + if (!inf->fn(addr_if_watchpoint, esr, regs)) { rv = 1; } else { struct siginfo info; @@ -853,7 +1111,7 @@ asmlinkage int __exception do_debug_exception(unsigned long addr, info.si_signo = inf->sig; info.si_errno = 0; info.si_code = inf->code; - info.si_addr = (void __user *)addr; + info.si_addr = (void __user *)pc; arm64_notify_die(inf->name, regs, &info, esr); rv = 0; } @@ -864,17 +1122,3 @@ asmlinkage int __exception do_debug_exception(unsigned long addr, return rv; } NOKPROBE_SYMBOL(do_debug_exception); - -#ifdef CONFIG_ARM64_PAN -void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused) -{ - /* - * We modify PSTATE. This won't work from irq context as the PSTATE - * is discarded once we return from the exception. - */ - WARN_ON_ONCE(in_interrupt()); - - sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPAN, 0); - asm(SET_PSTATE_PAN(1)); -} -#endif /* CONFIG_ARM64_PAN */ diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index 30695a8681074402057f1a357870760d321b950b..5c9073bace83a656e0c2b6cb64d3a5bc62b1462a 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c @@ -33,7 +33,11 @@ void sync_icache_aliases(void *kaddr, unsigned long len) __clean_dcache_area_pou(kaddr, len); __flush_icache_all(); } else { - flush_icache_range(addr, addr + len); + /* + * Don't issue kick_all_cpus_sync() after I-cache invalidation + * for user mappings. + */ + __flush_icache_range(addr, addr + len); } } diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index f58ea503ad014fda52fbab06e6edc743551a4b6c..1d765676131666a30db9b5f1b27b7d43f93dcd8e 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -218,6 +218,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, ptep = (pte_t *)pudp; } else if (sz == (PAGE_SIZE * CONT_PTES)) { pmdp = pmd_alloc(mm, pudp, addr); + if (!pmdp) + return NULL; WARN_ON(addr & (sz - 1)); /* diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 787e27964ab9de8658398bba239be5743e2416c7..e3fd8d36c00639aa39dc09514fb85fe5e320b4b5 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -40,6 +40,11 @@ #include #include #include +#include +#include +#ifdef CONFIG_PIN_MEMORY +#include +#endif #include #include @@ -62,6 +67,9 @@ s64 memstart_addr __ro_after_init = -1; phys_addr_t arm64_dma_phys_limit __ro_after_init; +struct res_mem res_mem[MAX_RES_REGIONS]; +int res_mem_count; + #ifdef CONFIG_BLK_DEV_INITRD static int __init early_initrd(char *p) { @@ -80,7 +88,61 @@ static int __init early_initrd(char *p) early_param("initrd", early_initrd); #endif +/* The main usage of linux,usable-memory-range is for crash dump kernel. + * Originally, the number of usable-memory regions is one. Now crash dump + * kernel support at most two crash kernel regions, low_region and high + * region. + */ +#define MAX_USABLE_RANGES 2 + +#ifdef CONFIG_PIN_MEMORY +struct resource pin_memory_resource = { + .name = "Pin memory", + .start = 0, + .end = 0, + .flags = IORESOURCE_MEM, + .desc = IOMMU_RESV_RESERVED +}; + +static void __init reserve_pin_memory_res(void) +{ + unsigned long long mem_start, mem_len; + int ret; + + ret = parse_pin_memory(boot_command_line, memblock_phys_mem_size(), + &mem_len, &mem_start); + if (ret || !mem_len) + return; + + mem_len = PAGE_ALIGN(mem_len); + + if (!memblock_is_region_memory(mem_start, mem_len)) { + pr_warn("cannot reserve for pin memory: region is not memory!\n"); + return; + } + + if (memblock_is_region_reserved(mem_start, mem_len)) { + pr_warn("cannot reserve for pin memory: region overlaps reserved memory!\n"); + return; + } + + if (!IS_ALIGNED(mem_start, SZ_2M)) { + pr_warn("cannot reserve for pin memory: base address is not 2MB aligned\n"); + return; + } + + memblock_reserve(mem_start, mem_len); + pin_memory_resource.start = mem_start; + pin_memory_resource.end = mem_start + mem_len - 1; +} +#else +static void __init reserve_pin_memory_res(void) +{ +} +#endif /* CONFIG_PIN_MEMORY */ + #ifdef CONFIG_KEXEC_CORE + /* * reserve_crashkernel() - reserves memory for crash kernel * @@ -91,20 +153,30 @@ early_param("initrd", early_initrd); static void __init reserve_crashkernel(void) { unsigned long long crash_base, crash_size; + bool high = false; int ret; ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), &crash_size, &crash_base); /* no crashkernel= or invalid value specified */ - if (ret || !crash_size) - return; + if (ret || !crash_size) { + /* crashkernel=X,high */ + ret = parse_crashkernel_high(boot_command_line, + memblock_phys_mem_size(), + &crash_size, &crash_base); + if (ret || !crash_size) + return; + high = true; + } crash_size = PAGE_ALIGN(crash_size); if (crash_base == 0) { /* Current arm64 boot protocol requires 2MB alignment */ - crash_base = memblock_find_in_range(0, ARCH_LOW_ADDRESS_LIMIT, - crash_size, SZ_2M); + crash_base = memblock_find_in_range(0, + high ? memblock_end_of_DRAM() + : ARCH_LOW_ADDRESS_LIMIT, + crash_size, CRASH_ALIGN); if (crash_base == 0) { pr_warn("cannot allocate crashkernel (size:0x%llx)\n", crash_size); @@ -122,13 +194,18 @@ static void __init reserve_crashkernel(void) return; } - if (!IS_ALIGNED(crash_base, SZ_2M)) { + if (!IS_ALIGNED(crash_base, CRASH_ALIGN)) { pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n"); return; } } memblock_reserve(crash_base, crash_size); + if (crash_base >= SZ_4G && reserve_crashkernel_low()) { + memblock_free(crash_base, crash_size); + return; + } + pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n", crash_base, crash_base + crash_size, crash_size >> 20); @@ -166,6 +243,45 @@ static void __init kexec_reserve_crashkres_pages(void) } #endif /* CONFIG_KEXEC_CORE */ +#ifdef CONFIG_QUICK_KEXEC +static int __init parse_quick_kexec(char *p) +{ + if (!p) + return 0; + + quick_kexec_res.end = PAGE_ALIGN(memparse(p, NULL)); + + return 0; +} +early_param("quickkexec", parse_quick_kexec); + +static void __init reserve_quick_kexec(void) +{ + unsigned long long mem_start, mem_len; + + mem_len = quick_kexec_res.end; + if (mem_len == 0) + return; + + /* Current arm64 boot protocol requires 2MB alignment */ + mem_start = memblock_find_in_range(0, ARCH_LOW_ADDRESS_LIMIT, + mem_len, CRASH_ALIGN); + if (mem_start == 0) { + pr_warn("cannot allocate quick kexec mem (size:0x%llx)\n", + mem_len); + quick_kexec_res.end = 0; + return; + } + + memblock_reserve(mem_start, mem_len); + pr_info("quick kexec mem reserved: 0x%016llx - 0x%016llx (%lld MB)\n", + mem_start, mem_start + mem_len, mem_len >> 20); + + quick_kexec_res.start = mem_start; + quick_kexec_res.end = mem_start + mem_len - 1; +} +#endif + #ifdef CONFIG_CRASH_DUMP static int __init early_init_dt_scan_elfcorehdr(unsigned long node, const char *uname, int depth, void *data) @@ -233,8 +349,9 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) { unsigned long max_zone_pfns[MAX_NR_ZONES] = {0}; - if (IS_ENABLED(CONFIG_ZONE_DMA32)) - max_zone_pfns[ZONE_DMA32] = PFN_DOWN(max_zone_dma_phys()); +#ifdef CONFIG_ZONE_DMA32 + max_zone_pfns[ZONE_DMA32] = PFN_DOWN(max_zone_dma_phys()); +#endif max_zone_pfns[ZONE_NORMAL] = max; free_area_init_nodes(max_zone_pfns); @@ -291,6 +408,14 @@ int pfn_valid(unsigned long pfn) if ((addr >> PAGE_SHIFT) != pfn) return 0; + +#ifdef CONFIG_SPARSEMEM + if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) + return 0; + + if (!valid_section(__nr_to_section(pfn_to_section_nr(pfn)))) + return 0; +#endif return memblock_is_map_memory(addr); } EXPORT_SYMBOL(pfn_valid); @@ -334,9 +459,9 @@ early_param("mem", early_mem); static int __init early_init_dt_scan_usablemem(unsigned long node, const char *uname, int depth, void *data) { - struct memblock_region *usablemem = data; - const __be32 *reg; - int len; + struct memblock_type *usablemem = data; + const __be32 *reg, *endp; + int len, nr = 0; if (depth != 1 || strcmp(uname, "chosen") != 0) return 0; @@ -345,24 +470,170 @@ static int __init early_init_dt_scan_usablemem(unsigned long node, if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells))) return 1; - usablemem->base = dt_mem_next_cell(dt_root_addr_cells, ®); - usablemem->size = dt_mem_next_cell(dt_root_size_cells, ®); + endp = reg + (len / sizeof(__be32)); + while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) { + unsigned long base = dt_mem_next_cell(dt_root_addr_cells, ®); + unsigned long size = dt_mem_next_cell(dt_root_size_cells, ®); + + if (memblock_add_range(usablemem, base, size, NUMA_NO_NODE, + MEMBLOCK_NONE)) + return 0; + if (++nr >= MAX_USABLE_RANGES) + break; + } return 1; } static void __init fdt_enforce_memory_region(void) { - struct memblock_region reg = { - .size = 0, + struct memblock_region usable_regions[MAX_USABLE_RANGES]; + struct memblock_type usablemem = { + .max = MAX_USABLE_RANGES, + .regions = usable_regions, }; - of_scan_flat_dt(early_init_dt_scan_usablemem, ®); + of_scan_flat_dt(early_init_dt_scan_usablemem, &usablemem); - if (reg.size) - memblock_cap_memory_range(reg.base, reg.size); + if (usablemem.cnt) + memblock_cap_memory_ranges(usablemem.regions, usablemem.cnt); } +static void __init reserve_memmap_mem(void) +{ + u64 base, size; + int i; + + for (i = 0; i < res_mem_count; i++) { + base = res_mem[i].base; + size = res_mem[i].size; + + if (!memblock_is_region_memory(base, size)) { + pr_warn("memmap reserve: 0x%08llx - 0x%08llx is not a memory region - ignore\n", + base, base + size); + res_mem[i].size = 0; + continue; + } + + if (memblock_is_region_reserved(base, size)) { + pr_warn("memmap reserve: 0x%08llx - 0x%08llx overlaps in-use memory region - ignore\n", + base, base + size); + res_mem[i].size = 0; + continue; + } + + if (memblock_reserve(base, size)) { + pr_warn("memmap reserve: 0x%08llx - 0x%08llx failed\n", + base, base + size); + res_mem[i].size = 0; + continue; + } + + pr_info("memmap reserved: 0x%08llx - 0x%08llx (%lld MB)", + base, base + size, size >> 20); + } +} + +static int __init parse_memmap_one(char *p) +{ + char *oldp; + phys_addr_t start_at, mem_size; + + if (!p) + return -EINVAL; + + oldp = p; + mem_size = memparse(p, &p); + if (p == oldp) + return -EINVAL; + + if (!mem_size) + return -EINVAL; + + mem_size = PAGE_ALIGN(mem_size); + + if (*p == '$') { + start_at = memparse(p+1, &p); + if (res_mem_count >= MAX_RES_REGIONS) { + pr_err("Too many memmap specified, exceed %d\n", MAX_RES_REGIONS); + return -EINVAL; + } + res_mem[res_mem_count].base = start_at; + res_mem[res_mem_count].size = mem_size; + res_mem_count++; + } else + pr_info("Unrecognized memmap option, please check the parameter.\n"); + + return *p == '\0' ? 0 : -EINVAL; +} + +static int __init parse_memmap_opt(char *str) +{ + while (str) { + char *k = strchr(str, ','); + + if (k) + *k++ = 0; + + parse_memmap_one(str); + str = k; + } + + return 0; +} +early_param("memmap", parse_memmap_opt); + +#ifdef CONFIG_ARM64_CPU_PARK +struct cpu_park_info park_info = { + .start = 0, + .len = PARK_SECTION_SIZE * NR_CPUS, + .start_v = 0, +}; + +static int __init parse_park_mem(char *p) +{ + if (!p) + return 0; + + park_info.start = PAGE_ALIGN(memparse(p, NULL)); + if (park_info.start == 0) + pr_info("cpu park mem params[%s]", p); + + return 0; +} +early_param("cpuparkmem", parse_park_mem); + +static int __init reserve_park_mem(void) +{ + if (park_info.start == 0 || park_info.len == 0) + return 0; + + park_info.start = PAGE_ALIGN(park_info.start); + park_info.len = PAGE_ALIGN(park_info.len); + + if (!memblock_is_region_memory(park_info.start, park_info.len)) { + pr_warn("cannot reserve park mem: region is not memory!"); + goto out; + } + + if (memblock_is_region_reserved(park_info.start, park_info.len)) { + pr_warn("cannot reserve park mem: region overlaps reserved memory!"); + goto out; + } + + memblock_remove(park_info.start, park_info.len); + pr_info("cpu park mem reserved: 0x%016lx - 0x%016lx (%ld MB)", + park_info.start, park_info.start + park_info.len, + park_info.len >> 20); + + return 0; +out: + park_info.start = 0; + park_info.len = 0; + return -EINVAL; +} +#endif + void __init arm64_memblock_init(void) { const s64 linear_region_size = -(s64)PAGE_OFFSET; @@ -441,16 +712,19 @@ void __init arm64_memblock_init(void) if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { extern u16 memstart_offset_seed; - u64 range = linear_region_size - - (memblock_end_of_DRAM() - memblock_start_of_DRAM()); + u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1); + int parange = cpuid_feature_extract_unsigned_field( + mmfr0, ID_AA64MMFR0_PARANGE_SHIFT); + s64 range = linear_region_size - + BIT(id_aa64mmfr0_parange_to_phys_shift(parange)); /* * If the size of the linear region exceeds, by a sufficient - * margin, the size of the region that the available physical - * memory spans, randomize the linear region as well. + * margin, the size of the region that the physical memory can + * span, randomize the linear region as well. */ - if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) { - range = range / ARM64_MEMSTART_ALIGN + 1; + if (memstart_offset_seed > 0 && range >= (s64)ARM64_MEMSTART_ALIGN) { + range /= ARM64_MEMSTART_ALIGN; memstart_addr -= ARM64_MEMSTART_ALIGN * ((range * memstart_offset_seed) >> 16); } @@ -479,15 +753,38 @@ void __init arm64_memblock_init(void) else arm64_dma_phys_limit = PHYS_MASK + 1; + if (efi_enabled(EFI_MEMMAP)) { + efi_fake_memmap(); + efi_find_mirror(); + } + + reserve_memmap_mem(); + reserve_pin_memory_res(); + + /* + * Reserve park memory before crashkernel and quick kexec. + * Because park memory must be specified by address, but + * crashkernel and quickkexec may be specified by memory length, + * then find one sutiable memory region to reserve. + * + * So reserve park memory firstly is better, but it may cause + * crashkernel or quickkexec reserving failed. + */ +#ifdef CONFIG_ARM64_CPU_PARK + reserve_park_mem(); +#endif + reserve_crashkernel(); +#ifdef CONFIG_QUICK_KEXEC + reserve_quick_kexec(); +#endif + reserve_elfcorehdr(); high_memory = __va(memblock_end_of_DRAM() - 1) + 1; dma_contiguous_reserve(arm64_dma_phys_limit); - - memblock_allow_resize(); } void __init bootmem_init(void) @@ -603,6 +900,12 @@ void __init mem_init(void) /* this will put all unused low memory onto the freelists */ free_all_bootmem(); +#ifdef CONFIG_PIN_MEMORY + /* pre alloc the pages for pin memory */ + init_reserve_page_map((unsigned long)pin_memory_resource.start, + (unsigned long)(pin_memory_resource.end - pin_memory_resource.start)); +#endif + kexec_reserve_crashkres_pages(); mem_init_print_info(NULL); @@ -667,6 +970,72 @@ static int __init keepinitrd_setup(char *__unused) __setup("keepinitrd", keepinitrd_setup); #endif +#ifdef CONFIG_ASCEND_FEATURES + +#include +#ifdef CONFIG_CORELOCKUP_DETECTOR +#include +#endif + +void ascend_enable_all_features(void) +{ +#ifdef CONFIG_GPIO_DWAPB + extern bool enable_ascend_gpio_dwapb; + + enable_ascend_gpio_dwapb = true; +#endif + + if (IS_ENABLED(CONFIG_ASCEND_DVPP_MMAP)) + enable_mmap_dvpp = 1; + + if (IS_ENABLED(CONFIG_ASCEND_IOPF_HIPRI)) + enable_iopf_hipri = 1; + + if (IS_ENABLED(CONFIG_ASCEND_CHARGE_MIGRATE_HUGEPAGES)) + enable_charge_mighp = 1; + + if (IS_ENABLED(CONFIG_SUSPEND)) + mem_sleep_current = PM_SUSPEND_ON; + + if (IS_ENABLED(CONFIG_PMU_WATCHDOG)) + pmu_nmi_enable = true; + + if (IS_ENABLED(CONFIG_MEMCG_KMEM)) { + extern bool cgroup_memory_nokmem; + cgroup_memory_nokmem = false; + } + +#ifdef CONFIG_ARM64_PSEUDO_NMI + enable_pseudo_nmi = true; +#endif + +#ifdef CONFIG_CORELOCKUP_DETECTOR + enable_corelockup_detector = true; +#endif +} + +static int __init ascend_enable_setup(char *__unused) +{ + ascend_enable_all_features(); + + return 1; +} + +early_param("ascend_enable_all", ascend_enable_setup); + +static int __init ascend_mini_enable_setup(char *s) +{ +#ifdef CONFIG_GPIO_DWAPB + extern bool enable_ascend_mini_gpio_dwapb; + + enable_ascend_mini_gpio_dwapb = true; +#endif + return 1; +} +__setup("ascend_mini_enable", ascend_mini_enable_setup); +#endif + + /* * Dump out memory limit information on panic. */ diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index 12145874c02b8c8a3f82b4dac954e73521085dd7..af7fdd95f378f833517100a311b745e12ec5efbb 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -40,7 +40,7 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node) { void *p = memblock_virt_alloc_try_nid(PAGE_SIZE, PAGE_SIZE, __pa(MAX_DMA_ADDRESS), - MEMBLOCK_ALLOC_ACCESSIBLE, node); + MEMBLOCK_ALLOC_KASAN, node); return __pa(p); } diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c index 842c8a5fcd53c0f5573bdf79c072c671c441ae54..87f29df8126ba231a996de98e3d39d3af5040b24 100644 --- a/arch/arm64/mm/mmap.c +++ b/arch/arm64/mm/mmap.c @@ -28,6 +28,7 @@ #include #include #include +#include #include @@ -54,7 +55,7 @@ unsigned long arch_mmap_rnd(void) unsigned long rnd; #ifdef CONFIG_COMPAT - if (test_thread_flag(TIF_32BIT)) + if (is_compat_task()) rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1); else #endif @@ -65,7 +66,11 @@ unsigned long arch_mmap_rnd(void) static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack) { unsigned long gap = rlim_stack->rlim_cur; - unsigned long pad = (STACK_RND_MASK << PAGE_SHIFT) + stack_guard_gap; + unsigned long pad = stack_guard_gap; + + /* Account for stack randomization if necessary */ + if (current->flags & PF_RANDOMIZE) + pad += (STACK_RND_MASK << PAGE_SHIFT); /* Values close to RLIM_INFINITY can overflow. */ if (gap + pad > gap) @@ -76,7 +81,10 @@ static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack) else if (gap > MAX_GAP) gap = MAX_GAP; - return PAGE_ALIGN(STACK_TOP - gap - rnd); + if (sp_is_enabled()) + return ALIGN_DOWN(MMAP_SHARE_POOL_START - rnd, PAGE_SIZE); + else + return PAGE_ALIGN(STACK_TOP - gap - rnd); } /* diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 8080c9f489c3e43af385066514f3f60cca629141..fa53bee52f87ca8e6a5f48006ebf53b7d754fd70 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -541,6 +541,8 @@ early_param("rodata", parse_rodata); #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 static int __init map_entry_trampoline(void) { + int i; + pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start); @@ -549,11 +551,15 @@ static int __init map_entry_trampoline(void) /* Map only the text into the trampoline page table */ memset(tramp_pg_dir, 0, PGD_SIZE); - __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE, - prot, pgd_pgtable_alloc, 0); + __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, + entry_tramp_text_size(), prot, pgd_pgtable_alloc, + 0); /* Map both the text and data into the kernel page table */ - __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot); + for (i = 0; i < DIV_ROUND_UP(entry_tramp_text_size(), PAGE_SIZE); i++) + __set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i, + pa_start + i * PAGE_SIZE, prot); + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { extern char __entry_tramp_data_start[]; @@ -657,6 +663,8 @@ void __init paging_init(void) memblock_free(__pa_symbol(swapper_pg_dir) + PAGE_SIZE, __pa_symbol(swapper_pg_end) - __pa_symbol(swapper_pg_dir) - PAGE_SIZE); + + memblock_allow_resize(); } /* @@ -919,17 +927,6 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys) return dt_virt; } -int __init arch_ioremap_pud_supported(void) -{ - /* only 4k granule supports level 1 block mappings */ - return IS_ENABLED(CONFIG_ARM64_4K_PAGES); -} - -int __init arch_ioremap_pmd_supported(void) -{ - return 1; -} - int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot) { pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT | @@ -1028,3 +1025,35 @@ int pud_free_pmd_page(pud_t *pudp, unsigned long addr) pmd_free(NULL, table); return 1; } + +#ifdef CONFIG_MEMORY_HOTPLUG +int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, + bool want_memblock) +{ + int flags = 0; + + if (debug_pagealloc_enabled()) + flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; + + __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), + size, PAGE_KERNEL, pgd_pgtable_alloc, flags); + + return __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT, + altmap, want_memblock); +} +void arch_remove_memory(int nid, u64 start, u64 size, + struct vmem_altmap *altmap) +{ + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long nr_pages = size >> PAGE_SHIFT; + + /* + * FIXME: Cleanup page tables (also in arch_add_memory() in case + * adding fails). Until then, this function should only be used + * during memory hotplug (adding memory), not for memory + * unplug. ARCH_ENABLE_MEMORY_HOTREMOVE must not be + * unlocked yet. + */ + __remove_pages(start_pfn, nr_pages, altmap); +} +#endif diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c index 146c04ceaa514bace2f7aaa49936ca5ca8865741..60bdaaf95b901ca159503cea4403f68354bf0d1c 100644 --- a/arch/arm64/mm/numa.c +++ b/arch/arm64/mm/numa.c @@ -37,6 +37,179 @@ static int numa_distance_cnt; static u8 *numa_distance; bool numa_off; +#ifdef CONFIG_COHERENT_DEVICE +nodemask_t __cacheline_aligned cdmmask; + +inline int arch_check_node_cdm(int nid) +{ + return node_isset(nid, cdmmask); +} + +#ifdef CONFIG_ASCEND_CLEAN_CDM + +#define MAX_PARTATION_COUNT 8 +#define MAX_CDM_PER_PATRATION 8 + +/* + * Here we provide a way to precisely specify the target node to which we want + * to move the kernel structrue for a cdm node, instead of guessing the hardware + * topologies. Even the node isn't a cdm node, the movement could be reasonable. + * Suppose a node is designed to be used only by some certern processes and + * devices, the kernel structure of that node could be overwritten by a broken + * process. + * + * A possible configure in bootargs: + * cdm_move_map=0,2,3,6;1,4,5,7 + * That means to move the kernel structure for node 2,3,6 to node 0 and kernel + * structure for node 4,5,7 to node 1. + */ +static bool cdm_to_ddr_hardcode = true; +static int cdm_to_ddr_map[MAX_PARTATION_COUNT][MAX_CDM_PER_PATRATION + 1]; + +static int __init cdm_to_ddr_parse_param(char *str) +{ + int i, j; + char *p, *n; + + cdm_to_ddr_hardcode = false; + for (i = 0; i < MAX_PARTATION_COUNT; i++) + for (j = 0; j < MAX_CDM_PER_PATRATION + 1; j++) + cdm_to_ddr_map[i][j] = -1; + + for (p = n = str, i = 0; strsep(&p, ";"); i++, n = p) { + char *s = n; + + for (j = 0; strsep(&n, ","); j++, s = n) { + int err; + unsigned long long nid; + + if (j >= MAX_CDM_PER_PATRATION + 1) { + pr_warn("the cdm nodes in this partation is more than supported\n"); + break; + } + + err = kstrtoull(s, 0, &nid); + if (err) { + pr_err("bootargs for cdm_move_map invalid, %d\n", + err); + return err; + } + + cdm_to_ddr_map[i][j] = (int)nid; + if (j > 0) + pr_info("node %d moved to node %d\n", + cdm_to_ddr_map[i][j], + cdm_to_ddr_map[i][0]); + } + } + + return 0; +} +early_param("cdm_move_map", cdm_to_ddr_parse_param); + +static int __init cdm_node_to_ddr_node_mapped(int nid) +{ + int i, j; + + for (i = 0; i < MAX_PARTATION_COUNT; i++) { + if (cdm_to_ddr_map[i][0] == -1) + break; + for (j = 1; j < MAX_CDM_PER_PATRATION + 1; j++) { + if (cdm_to_ddr_map[i][j] == -1) + break; + else if (cdm_to_ddr_map[i][j] == nid) + return cdm_to_ddr_map[i][0]; + } + } + + return nid; +} + +/** + * cdm_node_to_ddr_node - Convert the cdm node to the ddr node of the + * same partion. + * @nid: input node ID + * + * Here is a typical memory topology in usage. + * There are some DDR and HBM in each partion and DDRs present at first, then + * come all the HBMs of the first partion, then HBMs of the second partion, etc. + * + * ------------------------- + * | P0 | P1 | + * ----------- | ----------- + * |node0 DDR| | |node1 DDR| + * |---------- | ----------| + * |node2 HBM| | |node4 HBM| + * |---------- | ----------| + * |node3 HBM| | |node5 HBM| + * |---------- | ----------| + * | ... | | | ... | + * |---------- | ----------| + * + * Return: + * This function returns a ddr node which is of the same partion with the input + * node if the input node is a HBM node. + * The input nid is returned if it is a DDR node or if the memory topology of + * the system doesn't apply to the above model. + */ +int __init cdm_node_to_ddr_node(int nid) +{ + nodemask_t ddr_mask; + int nr_ddr, cdm_per_part, fake_nid; + int nr_cdm = nodes_weight(cdmmask); + /* + * Specify the count of hbm nodes whoes management structrue would be + * moved. Here number 2 is a magic and we should make it configable + * for extending + */ + int hbm_per_part = 2; + + if (!cdm_to_ddr_hardcode) + return cdm_node_to_ddr_node_mapped(nid); + + if (!nr_cdm || nodes_empty(numa_nodes_parsed)) + return nid; + + if (!node_isset(nid, cdmmask)) + return nid; + + nodes_xor(ddr_mask, cdmmask, numa_nodes_parsed); + nr_ddr = nodes_weight(ddr_mask); + cdm_per_part = nr_cdm / nr_ddr; + + if (cdm_per_part == 0 || nid < nr_ddr || + nid >= (hbm_per_part + 1) * nr_ddr) + /* our assumption has borken, just return the original nid. */ + return nid; + + fake_nid = (nid - nr_ddr) / hbm_per_part; + fake_nid = !node_isset(fake_nid, cdmmask) ? fake_nid : nid; + + pr_info("nid: %d, fake_nid: %d\n", nid, fake_nid); + + return fake_nid; +} +#endif + +static int __init cdm_nodes_setup(char *s) +{ + int nid; + unsigned long tmpmask; + int err; + + err = kstrtoul(s, 0, &tmpmask); + if (err) + return err; + + for (nid = 0; nid < MAX_NUMNODES; nid++) { + if ((tmpmask >> nid) & 1) + node_set(nid, cdmmask); + } + return 0; +} +early_param("cdm-nodes", cdm_nodes_setup); +#endif + static __init int numa_parse_early_param(char *opt) { if (!opt) @@ -58,7 +231,11 @@ EXPORT_SYMBOL(node_to_cpumask_map); */ const struct cpumask *cpumask_of_node(int node) { - if (WARN_ON(node >= nr_node_ids)) + + if (node == NUMA_NO_NODE) + return cpu_all_mask; + + if (WARN_ON(node < 0 || node >= nr_node_ids)) return cpu_none_mask; if (WARN_ON(node_to_cpumask_map[node] == NULL)) @@ -233,11 +410,12 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) u64 nd_pa; void *nd; int tnid; + int fake_nid = cdm_node_to_ddr_node(nid); if (start_pfn >= end_pfn) pr_info("Initmem setup node %d []\n", nid); - nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid); + nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, fake_nid); nd = __va(nd_pa); /* report and initialize */ @@ -391,7 +569,6 @@ static int __init numa_init(int (*init_func)(void)) nodes_clear(numa_nodes_parsed); nodes_clear(node_possible_map); nodes_clear(node_online_map); - numa_free_distance(); ret = numa_alloc_distance(); if (ret < 0) @@ -399,20 +576,24 @@ static int __init numa_init(int (*init_func)(void)) ret = init_func(); if (ret < 0) - return ret; + goto out_free_distance; if (nodes_empty(numa_nodes_parsed)) { pr_info("No NUMA configuration found\n"); - return -EINVAL; + ret = -EINVAL; + goto out_free_distance; } ret = numa_register_nodes(); if (ret < 0) - return ret; + goto out_free_distance; setup_node_to_cpumask_map(); return 0; +out_free_distance: + numa_free_distance(); + return ret; } /** @@ -432,7 +613,7 @@ static int __init dummy_numa_init(void) if (numa_off) pr_info("NUMA disabled\n"); /* Forced off on command line. */ pr_info("Faking a node at [mem %#018Lx-%#018Lx]\n", - 0LLU, PFN_PHYS(max_pfn) - 1); + memblock_start_of_DRAM(), memblock_end_of_DRAM() - 1); for_each_memblock(memory, mblk) { ret = numa_add_memblk(0, mblk->base, mblk->base + mblk->size); @@ -464,3 +645,13 @@ void __init arm64_numa_init(void) numa_init(dummy_numa_init); } + +/* + * We hope that we will be hotplugging memory on nodes we already know about, + * such that acpi_get_node() succeeds and we never fall back to this... + */ +int memory_add_physaddr_to_nid(u64 addr) +{ + pr_warn("Unknown node for memory at 0x%llx, assuming node 0\n", addr); + return 0; +} diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index a56359373d8b3592e6cde6891d9b44206bd96137..3e398d078706a9fddb0817519673498bfdde9624 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -87,8 +87,16 @@ static int change_memory_common(unsigned long addr, int numpages, area = find_vm_area((void *)addr); if (!area || end > (unsigned long)area->addr + area->size || - !(area->flags & VM_ALLOC)) + !(area->flags & VM_ALLOC)) { + /* + * When pagealloc debug is enabled, the linear address is + * mapped with NO_BLOCK_MAPPINGS and NO_CONT_MAPPINGS flags. + */ + if (numpages && debug_pagealloc_enabled()) + return __change_memory_common(start, size, + set_mask, clear_mask); return -EINVAL; + } if (!numpages) return 0; diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 03646e6a2ef4f240412d1eb62a1cbc27d04705b0..9ff213bb584e4d3343c86f889052a6da0bfaad20 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -49,17 +49,6 @@ #define MAIR(attr, mt) ((attr) << ((mt) * 8)) -/* - * cpu_do_idle() - * - * Idle the processor (wait for interrupt). - */ -ENTRY(cpu_do_idle) - dsb sy // WFI may enter a low-power mode - wfi - ret -ENDPROC(cpu_do_idle) - #ifdef CONFIG_CPU_PM /** * cpu_do_suspend - save CPU registers context @@ -70,24 +59,25 @@ ENTRY(cpu_do_suspend) mrs x2, tpidr_el0 mrs x3, tpidrro_el0 mrs x4, contextidr_el1 - mrs x5, cpacr_el1 - mrs x6, tcr_el1 - mrs x7, vbar_el1 - mrs x8, mdscr_el1 - mrs x9, oslsr_el1 - mrs x10, sctlr_el1 + mrs x5, osdlr_el1 + mrs x6, cpacr_el1 + mrs x7, tcr_el1 + mrs x8, vbar_el1 + mrs x9, mdscr_el1 + mrs x10, oslsr_el1 + mrs x11, sctlr_el1 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN - mrs x11, tpidr_el1 + mrs x12, tpidr_el1 alternative_else - mrs x11, tpidr_el2 + mrs x12, tpidr_el2 alternative_endif - mrs x12, sp_el0 + mrs x13, sp_el0 stp x2, x3, [x0] - stp x4, xzr, [x0, #16] - stp x5, x6, [x0, #32] - stp x7, x8, [x0, #48] - stp x9, x10, [x0, #64] - stp x11, x12, [x0, #80] + stp x4, x5, [x0, #16] + stp x6, x7, [x0, #32] + stp x8, x9, [x0, #48] + stp x10, x11, [x0, #64] + stp x12, x13, [x0, #80] ret ENDPROC(cpu_do_suspend) @@ -110,8 +100,8 @@ ENTRY(cpu_do_resume) msr cpacr_el1, x6 /* Don't change t0sz here, mask those bits when restoring */ - mrs x5, tcr_el1 - bfi x8, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH + mrs x7, tcr_el1 + bfi x8, x7, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH msr tcr_el1, x8 msr vbar_el1, x9 @@ -135,6 +125,7 @@ alternative_endif /* * Restore oslsr_el1 by writing oslar_el1 */ + msr osdlr_el1, x5 ubfx x11, x11, #1, #1 msr oslar_el1, x11 reset_pmuserenr_el0 x0 // Disable PMU access from EL0 @@ -292,6 +283,15 @@ skip_pgd: msr sctlr_el1, x18 isb + /* + * Invalidate the local I-cache so that any instructions fetched + * speculatively from the PoC are discarded, since they may have + * been dynamically patched at the PoU. + */ + ic iallu + dsb nsh + isb + /* Set the flag to zero to indicate that we're all done */ str wzr, [flag_ptr] ret diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h index 783de51a6c4e5b33725d4904be6b09011dd48605..76606e87233f38c8b59e64d2c0cc1f5cffc3dd7a 100644 --- a/arch/arm64/net/bpf_jit.h +++ b/arch/arm64/net/bpf_jit.h @@ -100,11 +100,9 @@ #define A64_STXR(sf, Rt, Rn, Rs) \ A64_LSX(sf, Rt, Rn, Rs, STORE_EX) -/* Prefetch */ -#define A64_PRFM(Rn, type, target, policy) \ - aarch64_insn_gen_prefetch(Rn, AARCH64_INSN_PRFM_TYPE_##type, \ - AARCH64_INSN_PRFM_TARGET_##target, \ - AARCH64_INSN_PRFM_POLICY_##policy) +/* LSE atomics */ +#define A64_STADD(sf, Rn, Rs) \ + aarch64_insn_gen_stadd(Rn, Rs, A64_SIZE(sf)) /* Add/subtract (immediate) */ #define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \ diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index a6fdaea07c6339cf2754d582765747ee5d8b2ff5..04d82cf2d4617e427ecbc3de247bfbde8fdd6bb9 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -351,7 +351,8 @@ static void build_epilogue(struct jit_ctx *ctx) * >0 - successfully JITed a 16-byte eBPF instruction. * <0 - failed to JIT. */ -static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) +static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, + bool extra_pass) { const u8 code = insn->code; const u8 dst = bpf2a64[insn->dst_reg]; @@ -364,7 +365,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) const int i = insn - ctx->prog->insnsi; const bool is64 = BPF_CLASS(code) == BPF_ALU64; const bool isdw = BPF_SIZE(code) == BPF_DW; - u8 jmp_cond; + u8 jmp_cond, reg; s32 jmp_offset; #define check_imm(bits, imm) do { \ @@ -625,12 +626,19 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) case BPF_JMP | BPF_CALL: { const u8 r0 = bpf2a64[BPF_REG_0]; - const u64 func = (u64)__bpf_call_base + imm; + bool func_addr_fixed; + u64 func_addr; + int ret; - if (ctx->prog->is_func) - emit_addr_mov_i64(tmp, func, ctx); + ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, + &func_addr, &func_addr_fixed); + if (ret < 0) + return ret; + if (func_addr_fixed) + /* We can use optimized emission here. */ + emit_a64_mov_i64(tmp, func_addr, ctx); else - emit_a64_mov_i64(tmp, func, ctx); + emit_addr_mov_i64(tmp, func_addr, ctx); emit(A64_BLR(tmp), ctx); emit(A64_MOV(1, r0, A64_R(0)), ctx); break; @@ -685,6 +693,19 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) } break; + /* speculation barrier */ + case BPF_ST | BPF_NOSPEC: + /* + * Nothing required here. + * + * In case of arm64, we rely on the firmware mitigation of + * Speculative Store Bypass as controlled via the ssbd kernel + * parameter. Whenever the mitigation is enabled, it works + * for all of the kernel code with no need to provide any + * additional instructions. + */ + break; + /* ST: *(size *)(dst + off) = imm */ case BPF_ST | BPF_MEM | BPF_W: case BPF_ST | BPF_MEM | BPF_H: @@ -730,19 +751,28 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) break; } break; + /* STX XADD: lock *(u32 *)(dst + off) += src */ case BPF_STX | BPF_XADD | BPF_W: /* STX XADD: lock *(u64 *)(dst + off) += src */ case BPF_STX | BPF_XADD | BPF_DW: - emit_a64_mov_i(1, tmp, off, ctx); - emit(A64_ADD(1, tmp, tmp, dst), ctx); - emit(A64_PRFM(tmp, PST, L1, STRM), ctx); - emit(A64_LDXR(isdw, tmp2, tmp), ctx); - emit(A64_ADD(isdw, tmp2, tmp2, src), ctx); - emit(A64_STXR(isdw, tmp2, tmp, tmp3), ctx); - jmp_offset = -3; - check_imm19(jmp_offset); - emit(A64_CBNZ(0, tmp3, jmp_offset), ctx); + if (!off) { + reg = dst; + } else { + emit_a64_mov_i(1, tmp, off, ctx); + emit(A64_ADD(1, tmp, tmp, dst), ctx); + reg = tmp; + } + if (cpus_have_cap(ARM64_HAS_LSE_ATOMICS)) { + emit(A64_STADD(isdw, reg, src), ctx); + } else { + emit(A64_LDXR(isdw, tmp2, reg), ctx); + emit(A64_ADD(isdw, tmp2, tmp2, src), ctx); + emit(A64_STXR(isdw, tmp2, reg, tmp3), ctx); + jmp_offset = -3; + check_imm19(jmp_offset); + emit(A64_CBNZ(0, tmp3, jmp_offset), ctx); + } break; default: @@ -753,7 +783,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) return 0; } -static int build_body(struct jit_ctx *ctx) +static int build_body(struct jit_ctx *ctx, bool extra_pass) { const struct bpf_prog *prog = ctx->prog; int i; @@ -762,7 +792,7 @@ static int build_body(struct jit_ctx *ctx) const struct bpf_insn *insn = &prog->insnsi[i]; int ret; - ret = build_insn(insn, ctx); + ret = build_insn(insn, ctx, extra_pass); if (ret > 0) { i++; if (ctx->image == NULL) @@ -858,7 +888,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) /* 1. Initial fake pass to compute ctx->idx. */ /* Fake pass to fill in ctx->offset. */ - if (build_body(&ctx)) { + if (build_body(&ctx, extra_pass)) { prog = orig_prog; goto out_off; } @@ -888,7 +918,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) build_prologue(&ctx, was_classic); - if (build_body(&ctx)) { + if (build_body(&ctx, extra_pass)) { bpf_jit_binary_free(header); prog = orig_prog; goto out_off; diff --git a/arch/c6x/include/asm/vmalloc.h b/arch/c6x/include/asm/vmalloc.h new file mode 100644 index 0000000000000000000000000000000000000000..26c6c6696bbd95a6a248edb9457d22a5885c2b2b --- /dev/null +++ b/arch/c6x/include/asm/vmalloc.h @@ -0,0 +1,4 @@ +#ifndef _ASM_C6X_VMALLOC_H +#define _ASM_C6X_VMALLOC_H + +#endif /* _ASM_C6X_VMALLOC_H */ diff --git a/arch/c6x/include/uapi/asm/unistd.h b/arch/c6x/include/uapi/asm/unistd.h index 0d2daf7f9809c37e68c2bdb7c49cb270be1f298f..df40dc128ba924bf51fd961d27f4a9b1c92ce592 100644 --- a/arch/c6x/include/uapi/asm/unistd.h +++ b/arch/c6x/include/uapi/asm/unistd.h @@ -16,6 +16,7 @@ */ #define __ARCH_WANT_RENAMEAT +#define __ARCH_WANT_SET_GET_RLIMIT #define __ARCH_WANT_SYS_CLONE /* Use the standard ABI for syscalls. */ diff --git a/arch/c6x/kernel/signal.c b/arch/c6x/kernel/signal.c index 3c4bb5a5c3820a1d3ad7b7a1ffef55f6206ff838..33b9f69c38f7ba8f95aa5e07e259c3d343bd0c97 100644 --- a/arch/c6x/kernel/signal.c +++ b/arch/c6x/kernel/signal.c @@ -80,7 +80,7 @@ asmlinkage int do_rt_sigreturn(struct pt_regs *regs) frame = (struct rt_sigframe __user *) ((unsigned long) regs->sp + 8); - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; @@ -149,7 +149,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, frame = get_sigframe(ksig, regs, sizeof(*frame)); - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) return -EFAULT; err |= __put_user(&frame->info, &frame->pinfo); diff --git a/arch/csky/include/asm/vmalloc.h b/arch/csky/include/asm/vmalloc.h new file mode 100644 index 0000000000000000000000000000000000000000..43dca6336b4c67f4fdfcc93eb731d9cdc873efbb --- /dev/null +++ b/arch/csky/include/asm/vmalloc.h @@ -0,0 +1,4 @@ +#ifndef _ASM_CSKY_VMALLOC_H +#define _ASM_CSKY_VMALLOC_H + +#endif /* _ASM_CSKY_VMALLOC_H */ diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig index 0b334b671e90c95f817a8638a89655f7f717c4d8..4b0b4d4c947fa77392363be60c4d5c2e4963c584 100644 --- a/arch/h8300/Kconfig +++ b/arch/h8300/Kconfig @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 config H8300 def_bool y + select ARCH_32BIT_OFF_T select GENERIC_ATOMIC64 select HAVE_UID16 select VIRT_TO_BUS diff --git a/arch/h8300/Makefile b/arch/h8300/Makefile index 58634e6bae92893aedbf4172940f9e56ee97ec27..55f251810129e426dcf70de2c3eb93bba030f27a 100644 --- a/arch/h8300/Makefile +++ b/arch/h8300/Makefile @@ -27,7 +27,7 @@ KBUILD_LDFLAGS += $(ldflags-y) CHECKFLAGS += -msize-long ifeq ($(CROSS_COMPILE),) -CROSS_COMPILE := h8300-unknown-linux- +CROSS_COMPILE := $(call cc-cross-prefix, h8300-unknown-linux- h8300-linux-) endif core-y += arch/$(ARCH)/kernel/ arch/$(ARCH)/mm/ diff --git a/arch/h8300/include/asm/vmalloc.h b/arch/h8300/include/asm/vmalloc.h new file mode 100644 index 0000000000000000000000000000000000000000..08a55c1dfa23c581f6cdd5b3aae656f2a7066b4c --- /dev/null +++ b/arch/h8300/include/asm/vmalloc.h @@ -0,0 +1,4 @@ +#ifndef _ASM_H8300_VMALLOC_H +#define _ASM_H8300_VMALLOC_H + +#endif /* _ASM_H8300_VMALLOC_H */ diff --git a/arch/h8300/include/uapi/asm/unistd.h b/arch/h8300/include/uapi/asm/unistd.h index 7dd20ef7625adeeba03fba80cde6aaa70d91e3c2..2f98394b77d4fc7cdf7f246a42f1466b11aeb027 100644 --- a/arch/h8300/include/uapi/asm/unistd.h +++ b/arch/h8300/include/uapi/asm/unistd.h @@ -1,5 +1,6 @@ #define __ARCH_NOMMU #define __ARCH_WANT_RENAMEAT +#define __ARCH_WANT_SET_GET_RLIMIT #include diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c index 1e8070d08770a0cfaa0a4c05c48148ece129de0b..e0f2b708e5d9dbb6dd92cc7ac59c1f0a24411c96 100644 --- a/arch/h8300/kernel/signal.c +++ b/arch/h8300/kernel/signal.c @@ -110,7 +110,7 @@ asmlinkage int sys_rt_sigreturn(void) sigset_t set; int er0; - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; @@ -165,7 +165,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, frame = get_sigframe(ksig, regs, sizeof(*frame)); - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) return -EFAULT; if (ksig->ka.sa.sa_flags & SA_SIGINFO) diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index 89a4b22f34d9bdf3de94236bbb0fd5b1447fb7af..1c7a4582c3ce51ec1dc209303f9ffb69a5f23fac 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig @@ -4,6 +4,7 @@ comment "Linux Kernel Configuration for Hexagon" config HEXAGON def_bool y + select ARCH_32BIT_OFF_T select ARCH_NO_PREEMPT select HAVE_OPROFILE # Other pending projects/to-do items. diff --git a/arch/hexagon/include/asm/futex.h b/arch/hexagon/include/asm/futex.h index c889f5993ecd35f1646e06fa1fab846860655ac6..cb635216a732c98c2d05e965762696543d7ec3e4 100644 --- a/arch/hexagon/include/asm/futex.h +++ b/arch/hexagon/include/asm/futex.h @@ -77,7 +77,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, int prev; int ret; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + if (!access_ok(uaddr, sizeof(u32))) return -EFAULT; __asm__ __volatile__ ( diff --git a/arch/hexagon/include/asm/uaccess.h b/arch/hexagon/include/asm/uaccess.h index 458b69886b3452fc66d3a33957285645db1c70e0..a30e58d5f3516cce39fd35caec420603e508a45b 100644 --- a/arch/hexagon/include/asm/uaccess.h +++ b/arch/hexagon/include/asm/uaccess.h @@ -29,9 +29,6 @@ /* * access_ok: - Checks if a user space pointer is valid - * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that - * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe - * to write to a block, it is always safe to read from it. * @addr: User space pointer to start of block to check * @size: Size of block to check * diff --git a/arch/hexagon/include/asm/vmalloc.h b/arch/hexagon/include/asm/vmalloc.h new file mode 100644 index 0000000000000000000000000000000000000000..7b04609e525c4b97418ae2727f970972a05aa628 --- /dev/null +++ b/arch/hexagon/include/asm/vmalloc.h @@ -0,0 +1,4 @@ +#ifndef _ASM_HEXAGON_VMALLOC_H +#define _ASM_HEXAGON_VMALLOC_H + +#endif /* _ASM_HEXAGON_VMALLOC_H */ diff --git a/arch/hexagon/include/uapi/asm/unistd.h b/arch/hexagon/include/uapi/asm/unistd.h index ea181e79162e7a14051e565f40caa2d77d4657d0..c9eb56ed574484f5adc4918ccc04f57f18d1668f 100644 --- a/arch/hexagon/include/uapi/asm/unistd.h +++ b/arch/hexagon/include/uapi/asm/unistd.h @@ -29,6 +29,7 @@ #define sys_mmap2 sys_mmap_pgoff #define __ARCH_WANT_RENAMEAT +#define __ARCH_WANT_SET_GET_RLIMIT #define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_CLONE #define __ARCH_WANT_SYS_VFORK diff --git a/arch/hexagon/kernel/signal.c b/arch/hexagon/kernel/signal.c index 78aa7304a5c9f4ac6ddf97343e2a37f29df67a8d..31e2cf95f189c303cc4039753fb26fdd4db7505a 100644 --- a/arch/hexagon/kernel/signal.c +++ b/arch/hexagon/kernel/signal.c @@ -115,7 +115,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, frame = get_sigframe(ksig, regs, sizeof(struct rt_sigframe)); - if (!access_ok(VERIFY_WRITE, frame, sizeof(struct rt_sigframe))) + if (!access_ok(frame, sizeof(struct rt_sigframe))) return -EFAULT; if (copy_siginfo_to_user(&frame->info, &ksig->info)) @@ -244,7 +244,7 @@ asmlinkage int sys_rt_sigreturn(void) current->restart_block.fn = do_no_restart_syscall; frame = (struct rt_sigframe __user *)pt_psp(regs); - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&blocked, &frame->uc.uc_sigmask, sizeof(blocked))) goto badframe; diff --git a/arch/hexagon/mm/uaccess.c b/arch/hexagon/mm/uaccess.c index c599eb126c9e7be9a3324744297f562ad743e97a..6f9c4697552cc302ab09a564175c4ffdead5830d 100644 --- a/arch/hexagon/mm/uaccess.c +++ b/arch/hexagon/mm/uaccess.c @@ -51,7 +51,7 @@ __kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count) unsigned long clear_user_hexagon(void __user *dest, unsigned long count) { - if (!access_ok(VERIFY_WRITE, dest, count)) + if (!access_ok(dest, count)) return count; else return __clear_user_hexagon(dest, count); diff --git a/arch/hexagon/mm/vm_fault.c b/arch/hexagon/mm/vm_fault.c index eb263e61daf40d5e238e63ef1432d2460d4ec5aa..38190cfb105e15a1c38558c225b23c4d36d54e59 100644 --- a/arch/hexagon/mm/vm_fault.c +++ b/arch/hexagon/mm/vm_fault.c @@ -115,7 +115,6 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs) else current->min_flt++; if (fault & VM_FAULT_RETRY) { - flags &= ~FAULT_FLAG_ALLOW_RETRY; flags |= FAULT_FLAG_TRIED; goto retry; } diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h index db2dd85918c2923ce2877e76eed8d7d693885b2a..2e106d46219650bd8093c1100279aff3a73c7e2a 100644 --- a/arch/ia64/include/asm/futex.h +++ b/arch/ia64/include/asm/futex.h @@ -86,7 +86,7 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval) { - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + if (!access_ok(uaddr, sizeof(u32))) return -EFAULT; { diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h index 516355a774bfe89b2dc8ce6413aa0f3a8e1e71c0..5d032d97c254e5e53878f67518c7a3c9a2e727c6 100644 --- a/arch/ia64/include/asm/tlb.h +++ b/arch/ia64/include/asm/tlb.h @@ -268,6 +268,16 @@ __tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long addre tlb->end_addr = address + PAGE_SIZE; } +static inline void +tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address, + unsigned long size) +{ + if (tlb->start_addr > address) + tlb->start_addr = address; + if (tlb->end_addr < address + size) + tlb->end_addr = address + size; +} + #define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm) #define tlb_start_vma(tlb, vma) do { } while (0) diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h index a74524f2d625f2f6669fafc9f9eead31b78a15ec..306d469e43da6d45e5b7bb2b5aac58f387fd9955 100644 --- a/arch/ia64/include/asm/uaccess.h +++ b/arch/ia64/include/asm/uaccess.h @@ -67,7 +67,7 @@ static inline int __access_ok(const void __user *p, unsigned long size) return likely(addr <= seg) && (seg == KERNEL_DS.seg || likely(REGION_OFFSET(addr) < RGN_MAP_LIMIT)); } -#define access_ok(type, addr, size) __access_ok((addr), (size)) +#define access_ok(addr, size) __access_ok((addr), (size)) /* * These are the main single-value transfer routines. They automatically diff --git a/arch/ia64/include/asm/vmalloc.h b/arch/ia64/include/asm/vmalloc.h new file mode 100644 index 0000000000000000000000000000000000000000..a2b51141ad2850c65c4bbd6606aebaa96d8e0d17 --- /dev/null +++ b/arch/ia64/include/asm/vmalloc.h @@ -0,0 +1,4 @@ +#ifndef _ASM_IA64_VMALLOC_H +#define _ASM_IA64_VMALLOC_H + +#endif /* _ASM_IA64_VMALLOC_H */ diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 1dacbf5e9e09a5a71f9e21bd5fd88eed8199e823..faf5d81ecbdbbb30b7842cdb37dd0062dad024bf 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -177,7 +177,7 @@ struct acpi_table_madt *acpi_madt __initdata; static u8 has_8259; static int __init -acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header, +acpi_parse_lapic_addr_ovr(union acpi_subtable_headers * header, const unsigned long end) { struct acpi_madt_local_apic_override *lapic; @@ -195,7 +195,7 @@ acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header, } static int __init -acpi_parse_lsapic(struct acpi_subtable_header * header, const unsigned long end) +acpi_parse_lsapic(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_local_sapic *lsapic; @@ -216,7 +216,7 @@ acpi_parse_lsapic(struct acpi_subtable_header * header, const unsigned long end) } static int __init -acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end) +acpi_parse_lapic_nmi(union acpi_subtable_headers * header, const unsigned long end) { struct acpi_madt_local_apic_nmi *lacpi_nmi; @@ -230,7 +230,7 @@ acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long e } static int __init -acpi_parse_iosapic(struct acpi_subtable_header * header, const unsigned long end) +acpi_parse_iosapic(union acpi_subtable_headers * header, const unsigned long end) { struct acpi_madt_io_sapic *iosapic; @@ -245,7 +245,7 @@ acpi_parse_iosapic(struct acpi_subtable_header * header, const unsigned long end static unsigned int __initdata acpi_madt_rev; static int __init -acpi_parse_plat_int_src(struct acpi_subtable_header * header, +acpi_parse_plat_int_src(union acpi_subtable_headers * header, const unsigned long end) { struct acpi_madt_interrupt_source *plintsrc; @@ -329,7 +329,7 @@ unsigned int get_cpei_target_cpu(void) } static int __init -acpi_parse_int_src_ovr(struct acpi_subtable_header * header, +acpi_parse_int_src_ovr(union acpi_subtable_headers * header, const unsigned long end) { struct acpi_madt_interrupt_override *p; @@ -350,7 +350,7 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header, } static int __init -acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end) +acpi_parse_nmi_src(union acpi_subtable_headers * header, const unsigned long end) { struct acpi_madt_nmi_source *nmi_src; @@ -537,7 +537,8 @@ void __init acpi_numa_fixup(void) if (srat_num_cpus == 0) { node_set_online(0); node_cpuid[0].phys_id = hard_smp_processor_id(); - return; + node_distance(0, 0) = LOCAL_DISTANCE; + goto out; } /* @@ -580,7 +581,7 @@ void __init acpi_numa_fixup(void) for (j = 0; j < MAX_NUMNODES; j++) node_distance(i, j) = i == j ? LOCAL_DISTANCE : REMOTE_DISTANCE; - return; + goto out; } memset(numa_slit, -1, sizeof(numa_slit)); @@ -605,6 +606,8 @@ void __init acpi_numa_fixup(void) printk("\n"); } #endif +out: + node_possible_map = node_online_map; } #endif /* CONFIG_ACPI_NUMA */ diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c index 326448f9df16068392e49af096afd1c8b2713131..1a42ba885188a5c8e865bb28108c685ed1a37515 100644 --- a/arch/ia64/kernel/module.c +++ b/arch/ia64/kernel/module.c @@ -914,10 +914,14 @@ module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mo void module_arch_cleanup (struct module *mod) { - if (mod->arch.init_unw_table) + if (mod->arch.init_unw_table) { unw_remove_unwind_table(mod->arch.init_unw_table); - if (mod->arch.core_unw_table) + mod->arch.init_unw_table = NULL; + } + if (mod->arch.core_unw_table) { unw_remove_unwind_table(mod->arch.core_unw_table); + mod->arch.core_unw_table = NULL; + } } void *dereference_module_function_descriptor(struct module *mod, void *ptr) diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index 427cd565fd61d38429c9ed66cb9b2534a21ee7c3..6d50ede0ed691ca1899540722e65edb3cf896510 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c @@ -836,7 +836,7 @@ ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr) char nat = 0; int i; - if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs))) + if (!access_ok(ppr, sizeof(struct pt_all_user_regs))) return -EIO; pt = task_pt_regs(child); @@ -981,7 +981,7 @@ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr) memset(&fpval, 0, sizeof(fpval)); - if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs))) + if (!access_ok(ppr, sizeof(struct pt_all_user_regs))) return -EIO; pt = task_pt_regs(child); diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index d1234a5ba4c55e79a1b43366b4093925ad085698..b08f6c2e24555945b12f0adc18a58bcd6b299142 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c @@ -133,7 +133,7 @@ ia64_rt_sigreturn (struct sigscratch *scr) */ retval = (long) &ia64_strace_leave_kernel; - if (!access_ok(VERIFY_READ, sc, sizeof(*sc))) + if (!access_ok(sc, sizeof(*sc))) goto give_sigsegv; if (GET_SIGSET(&set, &sc->sc_mask)) @@ -302,7 +302,7 @@ setup_frame(struct ksignal *ksig, sigset_t *set, struct sigscratch *scr) } frame = (void __user *) ((new_sp - sizeof(*frame)) & -STACK_ALIGN); - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) { return force_sigsegv_info(ksig->sig, frame); err = __put_user(ksig->sig, &frame->arg0); diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 1928d5719e417d5b5899a7c2b48c73a3da3294bd..32c16a68815dbfd1925f522a5bbca573f925aabb 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -631,12 +631,11 @@ void __init paging_init(void) zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); } -#ifdef CONFIG_MEMORY_HOTPLUG -pg_data_t *arch_alloc_nodedata(int nid) +pg_data_t * __init arch_alloc_nodedata(int nid) { unsigned long size = compute_pernodesize(nid); - return kzalloc(size, GFP_KERNEL); + return _va(memblock_alloc(size, SMP_CACHE_BYTES)); } void arch_free_nodedata(pg_data_t *pgdat) @@ -649,7 +648,6 @@ void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat) pgdat_list[update_node] = update_pgdat; scatter_node_data(); } -#endif #ifdef CONFIG_SPARSEMEM_VMEMMAP int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index a9d55ad8d67be8e1bb4ef0c0010cda5742637a43..49bb7f9885e8e42c8cb447956093fb3716131091 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -189,7 +189,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re else current->min_flt++; if (fault & VM_FAULT_RETRY) { - flags &= ~FAULT_FLAG_ALLOW_RETRY; flags |= FAULT_FLAG_TRIED; /* No need to up_read(&mm->mmap_sem) as we would diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 3b85c3ecac38d2ae0860bc0d5aedd79f1cc96a39..561e2573bd34c53fef4ace83ebb3eb82063d262b 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -499,7 +499,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg) if (map_start < map_end) memmap_init_zone((unsigned long)(map_end - map_start), args->nid, args->zone, page_to_pfn(map_start), - MEMMAP_EARLY, NULL); + MEMINIT_EARLY, NULL); return 0; } @@ -508,8 +508,8 @@ memmap_init (unsigned long size, int nid, unsigned long zone, unsigned long start_pfn) { if (!vmem_map) { - memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY, - NULL); + memmap_init_zone(size, nid, zone, start_pfn, + MEMINIT_EARLY, NULL); } else { struct page *start; struct memmap_init_callback_data args; @@ -661,21 +661,12 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, return ret; } -#ifdef CONFIG_MEMORY_HOTREMOVE -int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) +void arch_remove_memory(int nid, u64 start, u64 size, + struct vmem_altmap *altmap) { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - struct zone *zone; - int ret; - - zone = page_zone(pfn_to_page(start_pfn)); - ret = __remove_pages(zone, start_pfn, nr_pages, altmap); - if (ret) - pr_warn("%s: Problem encountered in __remove_pages() as" - " ret=%d\n", __func__, ret); - return ret; + __remove_pages(start_pfn, nr_pages, altmap); } #endif -#endif diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c index aa19b7ac8222a2fb604b90fc9edd3d3b86ef483c..476c7b4be3787ed0e1c07c1a6965f637ec28b227 100644 --- a/arch/ia64/mm/numa.c +++ b/arch/ia64/mm/numa.c @@ -49,6 +49,7 @@ paddr_to_nid(unsigned long paddr) return (i < num_node_memblks) ? node_memblk[i].nid : (num_node_memblks ? -1 : 0); } +EXPORT_SYMBOL(paddr_to_nid); #if defined(CONFIG_SPARSEMEM) && defined(CONFIG_NUMA) /* diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 070553791e9774e035f7add07c782c2d3b89f0d1..bfcf1fa1497db1e9bc0efae12511467eec397ac4 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -2,6 +2,7 @@ config M68K bool default y + select ARCH_32BIT_OFF_T select ARCH_HAS_SYNC_DMA_FOR_DEVICE if HAS_DMA select ARCH_MIGHT_HAVE_PC_PARPORT if ISA select ARCH_NO_COHERENT_DMA_MMAP if !MMU diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile index 997c9f20ea0f937667031760193efe02fec00faf..4474e51ee53e16cb4b3efaaff10557c8cd24fe90 100644 --- a/arch/m68k/Makefile +++ b/arch/m68k/Makefile @@ -58,7 +58,10 @@ cpuflags-$(CONFIG_M5206e) := $(call cc-option,-mcpu=5206e,-m5200) cpuflags-$(CONFIG_M5206) := $(call cc-option,-mcpu=5206,-m5200) KBUILD_AFLAGS += $(cpuflags-y) -KBUILD_CFLAGS += $(cpuflags-y) -pipe +KBUILD_CFLAGS += $(cpuflags-y) + +KBUILD_CFLAGS += -pipe -ffreestanding + ifdef CONFIG_MMU # without -fno-strength-reduce the 53c7xx.c driver fails ;-( KBUILD_CFLAGS += -fno-strength-reduce -ffixed-a2 diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig index 1d5483f6e457b0a0fe62d3722e1a0e3377063823..85904b73e261c53af6fe66bcc3b5b54012e02d42 100644 --- a/arch/m68k/configs/amiga_defconfig +++ b/arch/m68k/configs/amiga_defconfig @@ -621,7 +621,6 @@ CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_AEGIS128=m @@ -657,7 +656,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig index 52a0af127951f5f0ed11e9022822fe6992d1140c..9b3818bbb68b681e08f534085e0e5a333cd5a1a3 100644 --- a/arch/m68k/configs/apollo_defconfig +++ b/arch/m68k/configs/apollo_defconfig @@ -578,7 +578,6 @@ CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_AEGIS128=m @@ -614,7 +613,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig index b3103e51268a31759ae841a28559720a6f1aadac..769677809945d493ea0f27ea37a015ecc074df92 100644 --- a/arch/m68k/configs/atari_defconfig +++ b/arch/m68k/configs/atari_defconfig @@ -599,7 +599,6 @@ CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_AEGIS128=m @@ -635,7 +634,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig index fb7d651a4cabe203b02e455f55d0dff0e19b1e55..7dd264ddf2eabb5d4e29cd42fc07c9855c74da78 100644 --- a/arch/m68k/configs/bvme6000_defconfig +++ b/arch/m68k/configs/bvme6000_defconfig @@ -570,7 +570,6 @@ CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_AEGIS128=m @@ -606,7 +605,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig index 6b37f5537c3905c5e96b8d65a7653acc490abc65..515f7439c755353c512202c5fb81e09827b25e6f 100644 --- a/arch/m68k/configs/hp300_defconfig +++ b/arch/m68k/configs/hp300_defconfig @@ -580,7 +580,6 @@ CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_AEGIS128=m @@ -616,7 +615,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig index c717bf8794492114c79d8dd5b3448567c663a690..8e1038ceb407a54c3299a7bffb8235a48bea7ed5 100644 --- a/arch/m68k/configs/mac_defconfig +++ b/arch/m68k/configs/mac_defconfig @@ -602,7 +602,6 @@ CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_AEGIS128=m @@ -638,7 +637,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig index 226c994ce794f6acff23ed1f5f20cffa0cc1ef8c..62c8aaa15cc76e4d9dfd61ba061b9ab651ae8ee5 100644 --- a/arch/m68k/configs/multi_defconfig +++ b/arch/m68k/configs/multi_defconfig @@ -684,7 +684,6 @@ CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_AEGIS128=m @@ -720,7 +719,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig index b383327fd77a9411922848696e9f299a0ba8909d..733973f912974b571655454178c7fb4ceb5a93cb 100644 --- a/arch/m68k/configs/mvme147_defconfig +++ b/arch/m68k/configs/mvme147_defconfig @@ -570,7 +570,6 @@ CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_AEGIS128=m @@ -606,7 +605,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig index 9783d3deb9e9d8525ba864ca9eeeae02ca6a46cb..fee30cc9ac16235208955c1d746f88ec23f516e7 100644 --- a/arch/m68k/configs/mvme16x_defconfig +++ b/arch/m68k/configs/mvme16x_defconfig @@ -570,7 +570,6 @@ CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_AEGIS128=m @@ -606,7 +605,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig index a35d10ee10cb709b9e16b27f97d0f6144b9692b1..eebf9c9088e74b5e623dbed25894ccdd466928cb 100644 --- a/arch/m68k/configs/q40_defconfig +++ b/arch/m68k/configs/q40_defconfig @@ -593,7 +593,6 @@ CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_AEGIS128=m @@ -629,7 +628,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig index 573bf922d44823b767db2208204d07907a9cce5b..dabc54318c09ba082e791df7786527bd2dfeaa50 100644 --- a/arch/m68k/configs/sun3_defconfig +++ b/arch/m68k/configs/sun3_defconfig @@ -571,7 +571,6 @@ CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_AEGIS128=m @@ -607,7 +606,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig index efb27a7fcc559d5bb778998330d90006a46f9b0b..0d9a5c2a311a30a3dd294fa6b671bba4542bf444 100644 --- a/arch/m68k/configs/sun3x_defconfig +++ b/arch/m68k/configs/sun3x_defconfig @@ -572,7 +572,6 @@ CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_AEGIS128=m @@ -608,7 +607,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/include/asm/atarihw.h b/arch/m68k/include/asm/atarihw.h index 9000b249d225e97bbc1cd4af21c50f327493e624..407a617fa3a2bca1d9dbe85675a7bfefddda0f95 100644 --- a/arch/m68k/include/asm/atarihw.h +++ b/arch/m68k/include/asm/atarihw.h @@ -22,7 +22,6 @@ #include #include -#include #include extern u_long atari_mch_cookie; @@ -126,14 +125,6 @@ extern struct atari_hw_present atari_hw_present; */ -#define atari_readb raw_inb -#define atari_writeb raw_outb - -#define atari_inb_p raw_inb -#define atari_outb_p raw_outb - - - #include #include diff --git a/arch/m68k/include/asm/io_mm.h b/arch/m68k/include/asm/io_mm.h index 782b78f8a04890b315685b0ea78cab16a2d219af..e056feabbaf0b38ff08442edccb8e92ba524fef6 100644 --- a/arch/m68k/include/asm/io_mm.h +++ b/arch/m68k/include/asm/io_mm.h @@ -29,7 +29,11 @@ #include #ifdef CONFIG_ATARI -#include +#define atari_readb raw_inb +#define atari_writeb raw_outb + +#define atari_inb_p raw_inb +#define atari_outb_p raw_outb #endif diff --git a/arch/m68k/include/asm/macintosh.h b/arch/m68k/include/asm/macintosh.h index 08cee11180e6998d2069b1591e78ad57288f9134..e441517785fda77020b6297826ddc33038fbf7b1 100644 --- a/arch/m68k/include/asm/macintosh.h +++ b/arch/m68k/include/asm/macintosh.h @@ -4,6 +4,7 @@ #include #include +#include #include diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h index 6181e4134483c26aa1a34d55e4b316ddad98f5f5..fe3ddd73a0ccb9e4fec24425164cc8c6c7f477bc 100644 --- a/arch/m68k/include/asm/pgtable_mm.h +++ b/arch/m68k/include/asm/pgtable_mm.h @@ -55,12 +55,12 @@ */ #ifdef CONFIG_SUN3 #define PTRS_PER_PTE 16 -#define __PAGETABLE_PMD_FOLDED +#define __PAGETABLE_PMD_FOLDED 1 #define PTRS_PER_PMD 1 #define PTRS_PER_PGD 2048 #elif defined(CONFIG_COLDFIRE) #define PTRS_PER_PTE 512 -#define __PAGETABLE_PMD_FOLDED +#define __PAGETABLE_PMD_FOLDED 1 #define PTRS_PER_PMD 1 #define PTRS_PER_PGD 1024 #else diff --git a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h index c4cb889660aa0c3582d59f0f40428fec718d92a3..7e85de984df197aebb6c56debfe56dbf1f3fc7fe 100644 --- a/arch/m68k/include/asm/uaccess_mm.h +++ b/arch/m68k/include/asm/uaccess_mm.h @@ -10,7 +10,7 @@ #include /* We let the MMU do all checking */ -static inline int access_ok(int type, const void __user *addr, +static inline int access_ok(const void __user *addr, unsigned long size) { return 1; diff --git a/arch/m68k/include/asm/uaccess_no.h b/arch/m68k/include/asm/uaccess_no.h index 892efb56beef81b184a8cf6e21f9cd998d613eb4..0134008bf539b8fc8f0c1a46b20052eca638b7be 100644 --- a/arch/m68k/include/asm/uaccess_no.h +++ b/arch/m68k/include/asm/uaccess_no.h @@ -10,7 +10,7 @@ #include -#define access_ok(type,addr,size) _access_ok((unsigned long)(addr),(size)) +#define access_ok(addr,size) _access_ok((unsigned long)(addr),(size)) /* * It is not enough to just have access_ok check for a real RAM address. diff --git a/arch/m68k/include/asm/vmalloc.h b/arch/m68k/include/asm/vmalloc.h new file mode 100644 index 0000000000000000000000000000000000000000..bc1dca6cf134f66a3a5d46258f1fb5fa27167622 --- /dev/null +++ b/arch/m68k/include/asm/vmalloc.h @@ -0,0 +1,4 @@ +#ifndef _ASM_M68K_VMALLOC_H +#define _ASM_M68K_VMALLOC_H + +#endif /* _ASM_M68K_VMALLOC_H */ diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S index 97cd3ea5f10b837949b124024951912d6973e8f6..f834a403e4d570169e3f238c940aa42f89892fb2 100644 --- a/arch/m68k/kernel/entry.S +++ b/arch/m68k/kernel/entry.S @@ -422,7 +422,9 @@ resume: movec %a0,%dfc /* restore status register */ - movew %a1@(TASK_THREAD+THREAD_SR),%sr + movew %a1@(TASK_THREAD+THREAD_SR),%d0 + oriw #0x0700,%d0 + movew %d0,%sr rts diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c index 5d3596c180f9f75efab10fa416199e554fba3794..de44899c0e6172957a8d88d7ac6df9aee5c439cb 100644 --- a/arch/m68k/kernel/setup_mm.c +++ b/arch/m68k/kernel/setup_mm.c @@ -165,8 +165,6 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record) be32_to_cpu(m->addr); m68k_memory[m68k_num_memory].size = be32_to_cpu(m->size); - memblock_add(m68k_memory[m68k_num_memory].addr, - m68k_memory[m68k_num_memory].size); m68k_num_memory++; } else pr_warn("%s: too many memory chunks\n", diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c index 72850b85ecf859a94bdc97b0043df7ccf42b5848..e2a9421c57975034e55695dd273cb42228723773 100644 --- a/arch/m68k/kernel/signal.c +++ b/arch/m68k/kernel/signal.c @@ -787,7 +787,7 @@ asmlinkage int do_sigreturn(struct pt_regs *regs, struct switch_stack *sw) struct sigframe __user *frame = (struct sigframe __user *)(usp - 4); sigset_t set; - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) goto badframe; if (__get_user(set.sig[0], &frame->sc.sc_mask) || (_NSIG_WORDS > 1 && @@ -812,7 +812,7 @@ asmlinkage int do_rt_sigreturn(struct pt_regs *regs, struct switch_stack *sw) struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4); sigset_t set; - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; diff --git a/arch/m68k/kernel/uboot.c b/arch/m68k/kernel/uboot.c index b29c3b241e1bb590eba500918e39e4ce9bc59796..10708287706451e4d7801a1ad334e7523b3cc259 100644 --- a/arch/m68k/kernel/uboot.c +++ b/arch/m68k/kernel/uboot.c @@ -102,5 +102,5 @@ __init void process_uboot_commandline(char *commandp, int size) } parse_uboot_commandline(commandp, len); - commandp[size - 1] = 0; + commandp[len - 1] = 0; } diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index 9b6163c05a754f8d86e6508eba4fd77483f71892..ec7728cc8e38892592cbc2109515ba0621ef957c 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -162,9 +162,6 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, else current->min_flt++; if (fault & VM_FAULT_RETRY) { - /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk - * of starvation. */ - flags &= ~FAULT_FLAG_ALLOW_RETRY; flags |= FAULT_FLAG_TRIED; /* diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index 4e17ecb5928aae85155c9f6a5b0b0704a24b1100..2eb2b31fb16a6ccad8b8493035dd516f5a2541bf 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c @@ -228,6 +228,7 @@ void __init paging_init(void) min_addr = m68k_memory[0].addr; max_addr = min_addr + m68k_memory[0].size; + memblock_add(m68k_memory[0].addr, m68k_memory[0].size); for (i = 1; i < m68k_num_memory;) { if (m68k_memory[i].addr < min_addr) { printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n", @@ -238,6 +239,7 @@ void __init paging_init(void) (m68k_num_memory - i) * sizeof(struct m68k_mem_info)); continue; } + memblock_add(m68k_memory[i].addr, m68k_memory[i].size); addr = m68k_memory[i].addr + m68k_memory[i].size; if (addr > max_addr) max_addr = addr; diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index ace5c5bf18361f52ca438f5a2b8da073abd05403..8dc00891256d21c3223bc9d7b1d0b5adf46cde3c 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -1,5 +1,6 @@ config MICROBLAZE def_bool y + select ARCH_32BIT_OFF_T select ARCH_NO_SWAP select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_SYNC_DMA_FOR_CPU @@ -7,7 +8,7 @@ config MICROBLAZE select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_NO_COHERENT_DMA_MMAP if !MMU select ARCH_WANT_IPC_PARSE_VERSION - select BUILDTIME_EXTABLE_SORT + select BUILDTIME_TABLE_SORT select TIMER_OF select CLONE_BACKWARDS3 select COMMON_CLK diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile index 4f3ab5707265204f309b31695cb3a60a4f7a8d1c..548bac6c60f8ccd18216e58f99197034c313b41a 100644 --- a/arch/microblaze/Makefile +++ b/arch/microblaze/Makefile @@ -83,19 +83,21 @@ archclean: linux.bin linux.bin.gz linux.bin.ub: vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ + @echo 'Kernel: $(boot)/$@ is ready' ' (#'`cat .version`')' simpleImage.%: vmlinux - $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ + $(Q)$(MAKE) $(build)=$(boot) $(addprefix $(boot)/$@., ub unstrip strip) + @echo 'Kernel: $(boot)/$@ is ready' ' (#'`cat .version`')' define archhelp echo '* linux.bin - Create raw binary' echo ' linux.bin.gz - Create compressed raw binary' echo ' linux.bin.ub - Create U-Boot wrapped raw binary' - echo ' simpleImage.
- ELF image with $(arch)/boot/dts/
.dts linked in' - echo ' - stripped elf with fdt blob' - echo ' simpleImage.
.unstrip - full ELF image with fdt blob' - echo ' *_defconfig - Select default config from arch/microblaze/configs' - echo '' + echo ' simpleImage.
- Create the following images with
.dtb linked in' + echo ' simpleImage.
: raw image' + echo ' simpleImage.
.ub : raw image with U-Boot header' + echo ' simpleImage.
.unstrip: ELF (identical to vmlinux)' + echo ' simpleImage.
.strip : stripped ELF' echo ' Targets with
embed a device tree blob inside the image' echo ' These targets support board with firmware that does not' echo ' support passing a device tree directly. Replace
with the' diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile index 600e5a198bd2aa509ecf577081f3fa4ce2d82657..cff570a719461ffd34ffec2e93d0ddaea0776de6 100644 --- a/arch/microblaze/boot/Makefile +++ b/arch/microblaze/boot/Makefile @@ -3,38 +3,33 @@ # arch/microblaze/boot/Makefile # -targets := linux.bin linux.bin.gz linux.bin.ub simpleImage.% +targets := linux.bin linux.bin.gz linux.bin.ub simpleImage.* OBJCOPYFLAGS := -R .note -R .comment -R .note.gnu.build-id -O binary $(obj)/linux.bin: vmlinux FORCE $(call if_changed,objcopy) - @echo 'Kernel: $@ is ready' ' (#'`cat .version`')' $(obj)/linux.bin.ub: $(obj)/linux.bin FORCE $(call if_changed,uimage) - @echo 'Kernel: $@ is ready' ' (#'`cat .version`')' $(obj)/linux.bin.gz: $(obj)/linux.bin FORCE $(call if_changed,gzip) - @echo 'Kernel: $@ is ready' ' (#'`cat .version`')' - -quiet_cmd_cp = CP $< $@$2 - cmd_cp = cat $< >$@$2 || (rm -f $@ && echo false) quiet_cmd_strip = STRIP $< $@$2 cmd_strip = $(STRIP) -K microblaze_start -K _end -K __log_buf \ -K _fdt_start $< -o $@$2 UIMAGE_LOADADDR = $(CONFIG_KERNEL_BASE_ADDR) -UIMAGE_IN = $@ -UIMAGE_OUT = $@.ub -$(obj)/simpleImage.%: vmlinux FORCE - $(call if_changed,cp,.unstrip) +$(obj)/simpleImage.$(DTB): vmlinux FORCE $(call if_changed,objcopy) + +$(obj)/simpleImage.$(DTB).ub: $(obj)/simpleImage.$(DTB) FORCE $(call if_changed,uimage) - $(call if_changed,strip,.strip) - @echo 'Kernel: $(UIMAGE_OUT) is ready' ' (#'`cat .version`')' -clean-files += simpleImage.*.unstrip linux.bin.ub +$(obj)/simpleImage.$(DTB).unstrip: vmlinux FORCE + $(call if_changed,shipped) + +$(obj)/simpleImage.$(DTB).strip: vmlinux FORCE + $(call if_changed,strip) diff --git a/arch/microblaze/include/asm/futex.h b/arch/microblaze/include/asm/futex.h index 2572077b04eaa48db06861d63a4322e4922dbd7f..8c90357e59831230afd31893ee7afe792fc1f832 100644 --- a/arch/microblaze/include/asm/futex.h +++ b/arch/microblaze/include/asm/futex.h @@ -71,7 +71,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, int ret = 0, cmp; u32 prev; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + if (!access_ok(uaddr, sizeof(u32))) return -EFAULT; __asm__ __volatile__ ("1: lwx %1, %3, r0; \ diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h index 7b650ab14fa089f22cf2953c135fdcff08da7e9a..2ca598534cc7b40751cbde3b38951eb7d1c58618 100644 --- a/arch/microblaze/include/asm/pgtable.h +++ b/arch/microblaze/include/asm/pgtable.h @@ -63,7 +63,7 @@ extern int mem_init_done; #include -#define __PAGETABLE_PMD_FOLDED +#define __PAGETABLE_PMD_FOLDED 1 #ifdef __KERNEL__ #ifndef __ASSEMBLY__ diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index 81f16aadbf9ea8fd61df1c91df7bfd64ccf843c3..dbfea093a7c7db596bd2e108a0a47220b7698457 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h @@ -60,26 +60,25 @@ static inline int ___range_ok(unsigned long addr, unsigned long size) #define __range_ok(addr, size) \ ___range_ok((unsigned long)(addr), (unsigned long)(size)) -#define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0) +#define access_ok(addr, size) (__range_ok((addr), (size)) == 0) #else -static inline int access_ok(int type, const void __user *addr, - unsigned long size) +static inline int access_ok(const void __user *addr, unsigned long size) { if (!size) goto ok; if ((get_fs().seg < ((unsigned long)addr)) || (get_fs().seg < ((unsigned long)addr + size - 1))) { - pr_devel("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n", - type ? "WRITE" : "READ ", (__force u32)addr, (u32)size, + pr_devel("ACCESS fail at 0x%08x (size 0x%x), seg 0x%08x\n", + (__force u32)addr, (u32)size, (u32)get_fs().seg); return 0; } ok: - pr_devel("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n", - type ? "WRITE" : "READ ", (__force u32)addr, (u32)size, + pr_devel("ACCESS OK at 0x%08x (size 0x%x), seg 0x%08x\n", + (__force u32)addr, (u32)size, (u32)get_fs().seg); return 1; } @@ -120,7 +119,7 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) { might_fault(); - if (unlikely(!access_ok(VERIFY_WRITE, to, n))) + if (unlikely(!access_ok(to, n))) return n; return __clear_user(to, n); @@ -174,7 +173,7 @@ extern long __user_bad(void); const typeof(*(ptr)) __user *__gu_addr = (ptr); \ int __gu_err = 0; \ \ - if (access_ok(VERIFY_READ, __gu_addr, size)) { \ + if (access_ok(__gu_addr, size)) { \ switch (size) { \ case 1: \ __get_user_asm("lbu", __gu_addr, __gu_val, \ @@ -286,7 +285,7 @@ extern long __user_bad(void); typeof(*(ptr)) __user *__pu_addr = (ptr); \ int __pu_err = 0; \ \ - if (access_ok(VERIFY_WRITE, __pu_addr, size)) { \ + if (access_ok(__pu_addr, size)) { \ switch (size) { \ case 1: \ __put_user_asm("sb", __pu_addr, __pu_val, \ @@ -358,7 +357,7 @@ extern int __strncpy_user(char *to, const char __user *from, int len); static inline long strncpy_from_user(char *dst, const char __user *src, long count) { - if (!access_ok(VERIFY_READ, src, 1)) + if (!access_ok(src, 1)) return -EFAULT; return __strncpy_user(dst, src, count); } @@ -372,7 +371,7 @@ extern int __strnlen_user(const char __user *sstr, int len); static inline long strnlen_user(const char __user *src, long n) { - if (!access_ok(VERIFY_READ, src, 1)) + if (!access_ok(src, 1)) return 0; return __strnlen_user(src, n); } diff --git a/arch/microblaze/include/asm/vmalloc.h b/arch/microblaze/include/asm/vmalloc.h new file mode 100644 index 0000000000000000000000000000000000000000..04013a42b0fec87c7c28a75b06b3858818bf2afe --- /dev/null +++ b/arch/microblaze/include/asm/vmalloc.h @@ -0,0 +1,4 @@ +#ifndef _ASM_MICROBLAZE_VMALLOC_H +#define _ASM_MICROBLAZE_VMALLOC_H + +#endif /* _ASM_MICROBLAZE_VMALLOC_H */ diff --git a/arch/microblaze/kernel/ftrace.c b/arch/microblaze/kernel/ftrace.c index d57563c58a26be43672098d9895d9107c945e8d8..224eea40e1ee805fa15d56f33e16b248fbaec30a 100644 --- a/arch/microblaze/kernel/ftrace.c +++ b/arch/microblaze/kernel/ftrace.c @@ -22,8 +22,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) { unsigned long old; - int faulted, err; - struct ftrace_graph_ent trace; + int faulted; unsigned long return_hooker = (unsigned long) &return_to_handler; @@ -63,18 +62,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) return; } - err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0, NULL); - if (err == -EBUSY) { + if (function_graph_enter(old, self_addr, 0, NULL)) *parent = old; - return; - } - - trace.func = self_addr; - /* Only trace if the calling function expects to */ - if (!ftrace_graph_entry(&trace)) { - current->curr_ret_stack--; - *parent = old; - } } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c index 97001524ca2d8e3062a51e4a536ad4d1d18a702f..0685696349bb4415a4c4f1c6d9d3249c54be501f 100644 --- a/arch/microblaze/kernel/signal.c +++ b/arch/microblaze/kernel/signal.c @@ -91,7 +91,7 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) /* Always make any pending restarted system calls return -EINTR */ current->restart_block.fn = do_no_restart_syscall; - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) @@ -166,7 +166,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, frame = get_sigframe(ksig, regs, sizeof(*frame)); - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) return -EFAULT; if (ksig->ka.sa.sa_flags & SA_SIGINFO) diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index 202ad6a494f595cf9185b12a9aadd887c9bdab96..ed7e518d94652de7d0077adea59d062ed941f770 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c @@ -236,7 +236,6 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, else current->min_flt++; if (fault & VM_FAULT_RETRY) { - flags &= ~FAULT_FLAG_ALLOW_RETRY; flags |= FAULT_FLAG_TRIED; /* diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 35511999156af4a2d3559b3ac9d6c0038630451e..d2fefde97d086c7409e760451e5d88ccdd9c4faf 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -2,6 +2,7 @@ config MIPS bool default y + select ARCH_32BIT_OFF_T if !64BIT select ARCH_BINFMT_ELF_STATE select ARCH_CLOCKSOURCE_DATA select ARCH_DISCARD_MEMBLOCK @@ -13,7 +14,7 @@ config MIPS select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_SPINLOCKS select ARCH_WANT_IPC_PARSE_VERSION - select BUILDTIME_EXTABLE_SORT + select BUILDTIME_TABLE_SORT select CLONE_BACKWARDS select CPU_PM if CPU_IDLE select DMA_DIRECT_OPS @@ -794,6 +795,7 @@ config SIBYTE_SWARM select SYS_SUPPORTS_HIGHMEM select SYS_SUPPORTS_LITTLE_ENDIAN select ZONE_DMA32 if 64BIT + select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI config SIBYTE_LITTLESUR bool "Sibyte BCM91250C2-LittleSur" @@ -805,6 +807,7 @@ config SIBYTE_LITTLESUR select SYS_SUPPORTS_BIG_ENDIAN select SYS_SUPPORTS_HIGHMEM select SYS_SUPPORTS_LITTLE_ENDIAN + select ZONE_DMA32 if 64BIT config SIBYTE_SENTOSA bool "Sibyte BCM91250E-Sentosa" @@ -814,6 +817,7 @@ config SIBYTE_SENTOSA select SYS_HAS_CPU_SB1 select SYS_SUPPORTS_BIG_ENDIAN select SYS_SUPPORTS_LITTLE_ENDIAN + select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI config SIBYTE_BIGSUR bool "Sibyte BCM91480B-BigSur" @@ -826,6 +830,7 @@ config SIBYTE_BIGSUR select SYS_SUPPORTS_HIGHMEM select SYS_SUPPORTS_LITTLE_ENDIAN select ZONE_DMA32 if 64BIT + select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI config SNI_RM bool "SNI RM200/300/400" @@ -3149,6 +3154,7 @@ config MIPS32_O32 config MIPS32_N32 bool "Kernel support for n32 binaries" depends on 64BIT + select ARCH_WANT_COMPAT_IPC_PARSE_VERSION select COMPAT select MIPS32_COMPAT select SYSVIPC_COMPAT if SYSVIPC diff --git a/arch/mips/Makefile b/arch/mips/Makefile index d74b3742fa5d8d38d0b3995d988119b8c8b15cf5..ad0a92f95af108ce86c0f852723f04e6c7e52632 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -16,6 +16,7 @@ archscripts: scripts_basic $(Q)$(MAKE) $(build)=arch/mips/boot/tools relocs KBUILD_DEFCONFIG := 32r2el_defconfig +KBUILD_DTBS := dtbs # # Select the object file format to substitute into the linker script. @@ -385,7 +386,7 @@ quiet_cmd_64 = OBJCOPY $@ vmlinux.64: vmlinux $(call cmd,64) -all: $(all-y) +all: $(all-y) $(KBUILD_DTBS) # boot $(boot-y): $(vmlinux-32) FORCE diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c index 6054d49e608eec038e1bbd49599bc783270aa09a..fe3773539effe61e638ec7e277783ac24b022563 100644 --- a/arch/mips/bcm47xx/setup.c +++ b/arch/mips/bcm47xx/setup.c @@ -173,6 +173,31 @@ void __init plat_mem_setup(void) pm_power_off = bcm47xx_machine_halt; } +#ifdef CONFIG_BCM47XX_BCMA +static struct device * __init bcm47xx_setup_device(void) +{ + struct device *dev; + int err; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return NULL; + + err = dev_set_name(dev, "bcm47xx_soc"); + if (err) { + pr_err("Failed to set SoC device name: %d\n", err); + kfree(dev); + return NULL; + } + + err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (err) + pr_err("Failed to set SoC DMA mask: %d\n", err); + + return dev; +} +#endif + /* * This finishes bus initialization doing things that were not possible without * kmalloc. Make sure to call it late enough (after mm_init). @@ -183,6 +208,10 @@ void __init bcm47xx_bus_setup(void) if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) { int err; + bcm47xx_bus.bcma.dev = bcm47xx_setup_device(); + if (!bcm47xx_bus.bcma.dev) + panic("Failed to setup SoC device\n"); + err = bcma_host_soc_init(&bcm47xx_bus.bcma); if (err) panic("Failed to initialize BCMA bus (err %d)", err); @@ -235,6 +264,8 @@ static int __init bcm47xx_register_bus_complete(void) #endif #ifdef CONFIG_BCM47XX_BCMA case BCM47XX_BUS_TYPE_BCMA: + if (device_register(bcm47xx_bus.bcma.dev)) + pr_err("Failed to register SoC device\n"); bcma_bus_register(&bcm47xx_bus.bcma.bus); break; #endif diff --git a/arch/mips/bcm47xx/workarounds.c b/arch/mips/bcm47xx/workarounds.c index 1a8a07e7a5633d0c92056989ab1dfddbc834615e..46eddbec8d9fdec090ee273e1e5ba3cd573ba612 100644 --- a/arch/mips/bcm47xx/workarounds.c +++ b/arch/mips/bcm47xx/workarounds.c @@ -5,9 +5,8 @@ #include #include -static void __init bcm47xx_workarounds_netgear_wnr3500l(void) +static void __init bcm47xx_workarounds_enable_usb_power(int usb_power) { - const int usb_power = 12; int err; err = gpio_request_one(usb_power, GPIOF_OUT_INIT_HIGH, "usb_power"); @@ -23,7 +22,10 @@ void __init bcm47xx_workarounds(void) switch (board) { case BCM47XX_BOARD_NETGEAR_WNR3500L: - bcm47xx_workarounds_netgear_wnr3500l(); + bcm47xx_workarounds_enable_usb_power(12); + break; + case BCM47XX_BOARD_NETGEAR_WNDR3400_V3: + bcm47xx_workarounds_enable_usb_power(21); break; default: /* No workaround(s) needed */ diff --git a/arch/mips/bcm63xx/dev-enet.c b/arch/mips/bcm63xx/dev-enet.c index 07b4c65a88a43708467626b410baf79936bac203..8e73d65f348064792e5a1515ddcd7809b12da2c8 100644 --- a/arch/mips/bcm63xx/dev-enet.c +++ b/arch/mips/bcm63xx/dev-enet.c @@ -70,6 +70,8 @@ static struct platform_device bcm63xx_enet_shared_device = { static int shared_device_registered; +static u64 enet_dmamask = DMA_BIT_MASK(32); + static struct resource enet0_res[] = { { .start = -1, /* filled at runtime */ @@ -99,6 +101,8 @@ static struct platform_device bcm63xx_enet0_device = { .resource = enet0_res, .dev = { .platform_data = &enet0_pd, + .dma_mask = &enet_dmamask, + .coherent_dma_mask = DMA_BIT_MASK(32), }, }; @@ -131,6 +135,8 @@ static struct platform_device bcm63xx_enet1_device = { .resource = enet1_res, .dev = { .platform_data = &enet1_pd, + .dma_mask = &enet_dmamask, + .coherent_dma_mask = DMA_BIT_MASK(32), }, }; @@ -157,6 +163,8 @@ static struct platform_device bcm63xx_enetsw_device = { .resource = enetsw_res, .dev = { .platform_data = &enetsw_pd, + .dma_mask = &enet_dmamask, + .coherent_dma_mask = DMA_BIT_MASK(32), }, }; diff --git a/arch/mips/bcm63xx/prom.c b/arch/mips/bcm63xx/prom.c index 7019e2967009e98e6f6191c1e86969d4663a957f..bbbf8057565b236e69561439f8326106204ff82b 100644 --- a/arch/mips/bcm63xx/prom.c +++ b/arch/mips/bcm63xx/prom.c @@ -84,7 +84,7 @@ void __init prom_init(void) * Here we will start up CPU1 in the background and ask it to * reconfigure itself then go back to sleep. */ - memcpy((void *)0xa0000200, &bmips_smp_movevec, 0x20); + memcpy((void *)0xa0000200, bmips_smp_movevec, 0x20); __sync(); set_c0_cause(C_SW0); cpumask_set_cpu(1, &bmips_booted_mask); diff --git a/arch/mips/bcm63xx/reset.c b/arch/mips/bcm63xx/reset.c index a2af38cf28a701b7cd3f3d2f1c8f5da3d391ba82..64574e74cb236a3124c4f2d0b9c279ce036309aa 100644 --- a/arch/mips/bcm63xx/reset.c +++ b/arch/mips/bcm63xx/reset.c @@ -120,7 +120,7 @@ #define BCM6368_RESET_DSL 0 #define BCM6368_RESET_SAR SOFTRESET_6368_SAR_MASK #define BCM6368_RESET_EPHY SOFTRESET_6368_EPHY_MASK -#define BCM6368_RESET_ENETSW 0 +#define BCM6368_RESET_ENETSW SOFTRESET_6368_ENETSW_MASK #define BCM6368_RESET_PCM SOFTRESET_6368_PCM_MASK #define BCM6368_RESET_MPI SOFTRESET_6368_MPI_MASK #define BCM6368_RESET_PCIE 0 diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile index 3c453a1f1ff10c218e809ec1f5efe2bb70b1ef7d..172801ed35b89994f6a52e492a7c486cb517d821 100644 --- a/arch/mips/boot/compressed/Makefile +++ b/arch/mips/boot/compressed/Makefile @@ -78,6 +78,8 @@ OBJCOPYFLAGS_piggy.o := --add-section=.image=$(obj)/vmlinux.bin.z \ $(obj)/piggy.o: $(obj)/dummy.o $(obj)/vmlinux.bin.z FORCE $(call if_changed,objcopy) +HOSTCFLAGS_calc_vmlinuz_load_addr.o += $(LINUXINCLUDE) + # Calculate the load address of the compressed kernel image hostprogs-y := calc_vmlinuz_load_addr diff --git a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c index 37fe58c19a90f96feede3390b5f4374aeebbf7ac..d14f75ec827323702d61d6ef6eb2871ecfda3513 100644 --- a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c +++ b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c @@ -13,6 +13,7 @@ #include #include #include +#include int main(int argc, char *argv[]) { @@ -45,11 +46,11 @@ int main(int argc, char *argv[]) vmlinuz_load_addr = vmlinux_load_addr + vmlinux_size; /* - * Align with 16 bytes: "greater than that used for any standard data - * types by a MIPS compiler." -- See MIPS Run Linux (Second Edition). + * Align with 64KB: KEXEC needs load sections to be aligned to PAGE_SIZE, + * which may be as large as 64KB depending on the kernel configuration. */ - vmlinuz_load_addr += (16 - vmlinux_size % 16); + vmlinuz_load_addr += (SZ_64K - vmlinux_size % SZ_64K); printf("0x%llx\n", vmlinuz_load_addr); diff --git a/arch/mips/boot/dts/img/boston.dts b/arch/mips/boot/dts/img/boston.dts index 65af3f6ba81c3a6515e4e9a199916c9eff131d87..84328afa3a55c2ab83b23cf344aceda4a07d55f6 100644 --- a/arch/mips/boot/dts/img/boston.dts +++ b/arch/mips/boot/dts/img/boston.dts @@ -141,6 +141,12 @@ #size-cells = <2>; #interrupt-cells = <1>; + eg20t_phub@2,0,0 { + compatible = "pci8086,8801"; + reg = <0x00020000 0 0 0 0>; + intel,eg20t-prefetch = <0>; + }; + eg20t_mac@2,0,1 { compatible = "pci8086,8802"; reg = <0x00020100 0 0 0 0>; diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts index 50cff3cbcc6de6386d11b07cdbe5a10b60ef976f..4f7b1fa31cf53f04517e51a72a9e522487454550 100644 --- a/arch/mips/boot/dts/ingenic/ci20.dts +++ b/arch/mips/boot/dts/ingenic/ci20.dts @@ -76,7 +76,7 @@ status = "okay"; pinctrl-names = "default"; - pinctrl-0 = <&pins_uart2>; + pinctrl-0 = <&pins_uart3>; }; &uart4 { @@ -196,9 +196,9 @@ bias-disable; }; - pins_uart2: uart2 { - function = "uart2"; - groups = "uart2-data", "uart2-hwflow"; + pins_uart3: uart3 { + function = "uart3"; + groups = "uart3-data", "uart3-hwflow"; bias-disable; }; diff --git a/arch/mips/boot/dts/qca/ar9331.dtsi b/arch/mips/boot/dts/qca/ar9331.dtsi index 2bae201aa365106ac262865b0ba407fe36905599..1c7bf11f8450b42f0c2bb032b901fc5124060fa5 100644 --- a/arch/mips/boot/dts/qca/ar9331.dtsi +++ b/arch/mips/boot/dts/qca/ar9331.dtsi @@ -99,7 +99,7 @@ miscintc: interrupt-controller@18060010 { compatible = "qca,ar7240-misc-intc"; - reg = <0x18060010 0x4>; + reg = <0x18060010 0x8>; interrupt-parent = <&cpuintc>; interrupts = <6>; diff --git a/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c b/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c index 8241fc6aa17d8668c21373febf27960e8b495e57..3839feba68f2043c99ad4ed46a24698e7ec5780f 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c +++ b/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c @@ -266,7 +266,7 @@ int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id) } else { union cvmx_pko_mem_debug8 debug8; debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8); - return debug8.cn58xx.doorbell; + return debug8.cn50xx.doorbell; } case CVMX_CMD_QUEUE_ZIP: case CVMX_CMD_QUEUE_DFA: diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c index 75108ec669ebc881c6949962ef61f6368c4a814a..3ddbb98dff848f81ac7f61fd7a7bfbbc1b272f65 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-helper.c +++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c @@ -67,7 +67,7 @@ void (*cvmx_override_pko_queue_priority) (int pko_port, void (*cvmx_override_ipd_port_setup) (int ipd_port); /* Port count per interface */ -static int interface_port_count[5]; +static int interface_port_count[9]; /** * Return the number of interfaces the chip has. Each interface @@ -286,7 +286,8 @@ static cvmx_helper_interface_mode_t __cvmx_get_mode_cn7xxx(int interface) case 3: return CVMX_HELPER_INTERFACE_MODE_LOOP; case 4: - return CVMX_HELPER_INTERFACE_MODE_RGMII; + /* TODO: Implement support for AGL (RGMII). */ + return CVMX_HELPER_INTERFACE_MODE_DISABLED; default: return CVMX_HELPER_INTERFACE_MODE_DISABLED; } diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c index 807cadaf554e2e0e3d5665c95c8d2cb19ce1d705..5ba181e87d2c1ff2dab04190f700c7db9bcbf8af 100644 --- a/arch/mips/cavium-octeon/octeon-platform.c +++ b/arch/mips/cavium-octeon/octeon-platform.c @@ -501,7 +501,7 @@ static void __init octeon_fdt_set_phy(int eth, int phy_addr) if (phy_addr >= 256 && alt_phy > 0) { const struct fdt_property *phy_prop; struct fdt_property *alt_prop; - u32 phy_handle_name; + fdt32_t phy_handle_name; /* Use the alt phy node instead.*/ phy_prop = fdt_get_property(initial_boot_params, eth, "phy-handle", NULL); diff --git a/arch/mips/configs/ath79_defconfig b/arch/mips/configs/ath79_defconfig index 951c4231bdb85a414d9ca14b1ee15fe953452cd0..4c47b3fd958b6cfa86097b3262ec6f3d8237cba6 100644 --- a/arch/mips/configs/ath79_defconfig +++ b/arch/mips/configs/ath79_defconfig @@ -71,6 +71,7 @@ CONFIG_SERIAL_8250_CONSOLE=y # CONFIG_SERIAL_8250_PCI is not set CONFIG_SERIAL_8250_NR_UARTS=1 CONFIG_SERIAL_8250_RUNTIME_UARTS=1 +CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_AR933X=y CONFIG_SERIAL_AR933X_CONSOLE=y # CONFIG_HW_RANDOM is not set diff --git a/arch/mips/configs/cavium_octeon_defconfig b/arch/mips/configs/cavium_octeon_defconfig index 490b12af103c1285043ecfec912b1839c5586f06..c52d0efacd1466f0320a025d3519ffb8ba212a09 100644 --- a/arch/mips/configs/cavium_octeon_defconfig +++ b/arch/mips/configs/cavium_octeon_defconfig @@ -140,6 +140,7 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_DS1307=y CONFIG_STAGING=y CONFIG_OCTEON_ETHERNET=y +CONFIG_OCTEON_USB=y # CONFIG_IOMMU_SUPPORT is not set CONFIG_RAS=y CONFIG_EXT4_FS=y diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig index 55438fc9991ec53715b392f074dbb3d864dc197c..e5976b2972fb08d9966bb235f17ed17c3ca60486 100644 --- a/arch/mips/configs/gpr_defconfig +++ b/arch/mips/configs/gpr_defconfig @@ -73,7 +73,6 @@ CONFIG_IP_NF_RAW=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_DECNET_NF_GRABULATOR=m CONFIG_BRIDGE_NF_EBTABLES=m CONFIG_BRIDGE_EBT_BROUTE=m CONFIG_BRIDGE_EBT_T_FILTER=m @@ -104,7 +103,6 @@ CONFIG_ATM_MPOA=m CONFIG_ATM_BR2684=m CONFIG_BRIDGE=m CONFIG_VLAN_8021Q=m -CONFIG_DECNET=m CONFIG_LLC2=m CONFIG_IPX=m CONFIG_ATALK=m diff --git a/arch/mips/configs/jazz_defconfig b/arch/mips/configs/jazz_defconfig index 9ad1c94376c8d46a84793bc1048025887a359926..b6a9a3e2b29a21d9c7dd7aa8933acf981dcaa952 100644 --- a/arch/mips/configs/jazz_defconfig +++ b/arch/mips/configs/jazz_defconfig @@ -120,7 +120,6 @@ CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m -CONFIG_DECNET_NF_GRABULATOR=m CONFIG_BRIDGE_NF_EBTABLES=m CONFIG_BRIDGE_EBT_BROUTE=m CONFIG_BRIDGE_EBT_T_FILTER=m @@ -142,7 +141,6 @@ CONFIG_BRIDGE_EBT_SNAT=m CONFIG_BRIDGE_EBT_LOG=m CONFIG_BRIDGE_EBT_ULOG=m CONFIG_BRIDGE=m -CONFIG_DECNET=m CONFIG_NET_SCHED=y CONFIG_NET_SCH_CBQ=m CONFIG_NET_SCH_HTB=m diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig index c3d0d0a6e04483247baeec72c6704966a4ee559c..87c0b7a349290914fa55698f9b0970894fb699c5 100644 --- a/arch/mips/configs/mtx1_defconfig +++ b/arch/mips/configs/mtx1_defconfig @@ -108,7 +108,6 @@ CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m -CONFIG_DECNET_NF_GRABULATOR=m CONFIG_BRIDGE_NF_EBTABLES=m CONFIG_BRIDGE_EBT_BROUTE=m CONFIG_BRIDGE_EBT_T_FILTER=m @@ -139,7 +138,6 @@ CONFIG_ATM_MPOA=m CONFIG_ATM_BR2684=m CONFIG_BRIDGE=m CONFIG_VLAN_8021Q=m -CONFIG_DECNET=m CONFIG_LLC2=m CONFIG_IPX=m CONFIG_ATALK=m @@ -623,7 +621,6 @@ CONFIG_USB_SERIAL_OMNINET=m CONFIG_USB_EMI62=m CONFIG_USB_EMI26=m CONFIG_USB_ADUTUX=m -CONFIG_USB_RIO500=m CONFIG_USB_LEGOTOWER=m CONFIG_USB_LCD=m CONFIG_USB_CYPRESS_CY7C63=m diff --git a/arch/mips/configs/nlm_xlp_defconfig b/arch/mips/configs/nlm_xlp_defconfig index e8e1dd8e0e99cd525dff3e5b2f9bd8de413461a4..8a13ae190245cd0827f3bd194d8927de1c9acef6 100644 --- a/arch/mips/configs/nlm_xlp_defconfig +++ b/arch/mips/configs/nlm_xlp_defconfig @@ -217,7 +217,6 @@ CONFIG_IP6_NF_TARGET_REJECT=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_IP6_NF_SECURITY=m -CONFIG_DECNET_NF_GRABULATOR=m CONFIG_BRIDGE_NF_EBTABLES=m CONFIG_BRIDGE_EBT_BROUTE=m CONFIG_BRIDGE_EBT_T_FILTER=m @@ -252,7 +251,6 @@ CONFIG_ATM_BR2684=m CONFIG_BRIDGE=m CONFIG_VLAN_8021Q=m CONFIG_VLAN_8021Q_GVRP=y -CONFIG_DECNET=m CONFIG_LLC2=m CONFIG_IPX=m CONFIG_ATALK=m diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig index c4477a4d40c119b5ad053cdca84de40a9996fc32..be1927e157a6e87c7e375e790379f87349cff22b 100644 --- a/arch/mips/configs/nlm_xlr_defconfig +++ b/arch/mips/configs/nlm_xlr_defconfig @@ -198,7 +198,6 @@ CONFIG_IP6_NF_TARGET_REJECT=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_IP6_NF_SECURITY=m -CONFIG_DECNET_NF_GRABULATOR=m CONFIG_BRIDGE_NF_EBTABLES=m CONFIG_BRIDGE_EBT_BROUTE=m CONFIG_BRIDGE_EBT_T_FILTER=m @@ -233,7 +232,6 @@ CONFIG_ATM_BR2684=m CONFIG_BRIDGE=m CONFIG_VLAN_8021Q=m CONFIG_VLAN_8021Q_GVRP=y -CONFIG_DECNET=m CONFIG_LLC2=m CONFIG_IPX=m CONFIG_ATALK=m diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig index 5f71aa598b06df7d00038a5062a95e7e5dadfe42..fd25d461534dea9e349bead7c09e0e869d31fc5e 100644 --- a/arch/mips/configs/rm200_defconfig +++ b/arch/mips/configs/rm200_defconfig @@ -129,7 +129,6 @@ CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m -CONFIG_DECNET_NF_GRABULATOR=m CONFIG_BRIDGE_NF_EBTABLES=m CONFIG_BRIDGE_EBT_BROUTE=m CONFIG_BRIDGE_EBT_T_FILTER=m @@ -151,7 +150,6 @@ CONFIG_BRIDGE_EBT_SNAT=m CONFIG_BRIDGE_EBT_LOG=m CONFIG_BRIDGE_EBT_ULOG=m CONFIG_BRIDGE=m -CONFIG_DECNET=m CONFIG_NET_SCHED=y CONFIG_NET_SCH_CBQ=m CONFIG_NET_SCH_HTB=m @@ -335,7 +333,6 @@ CONFIG_USB_SERIAL_SAFE_PADDED=y CONFIG_USB_SERIAL_CYBERJACK=m CONFIG_USB_SERIAL_XIRCOM=m CONFIG_USB_SERIAL_OMNINET=m -CONFIG_USB_RIO500=m CONFIG_USB_LEGOTOWER=m CONFIG_USB_LCD=m CONFIG_USB_CYTHERM=m diff --git a/arch/mips/fw/sni/sniprom.c b/arch/mips/fw/sni/sniprom.c index 8772617b64cefec0835523fe107a2febdaa778ab..80112f2298b68cce9ba80e994167eaec0c435098 100644 --- a/arch/mips/fw/sni/sniprom.c +++ b/arch/mips/fw/sni/sniprom.c @@ -43,7 +43,7 @@ /* O32 stack has to be 8-byte aligned. */ static u64 o32_stk[4096]; -#define O32_STK &o32_stk[sizeof(o32_stk)] +#define O32_STK (&o32_stk[ARRAY_SIZE(o32_stk)]) #define __PROM_O32(fun, arg) fun arg __asm__(#fun); \ __asm__(#fun " = call_o32") diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index d4ea7a5b60cf469772ed1651fd58d1f7010402e4..9e805317847d8864e300538cab4bbe1da40a3415 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -306,7 +306,7 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \ { \ long result; \ \ - if (kernel_uses_llsc && R10000_LLSC_WAR) { \ + if (kernel_uses_llsc) { \ long temp; \ \ __asm__ __volatile__( \ diff --git a/arch/mips/include/asm/bmips.h b/arch/mips/include/asm/bmips.h index bf6a8afd7ad2783dff63cb55954a2e34d8ca6e79..581a6a3c66e405c6ea23cdfefc45690a2fe43209 100644 --- a/arch/mips/include/asm/bmips.h +++ b/arch/mips/include/asm/bmips.h @@ -75,11 +75,11 @@ static inline int register_bmips_smp_ops(void) #endif } -extern char bmips_reset_nmi_vec; -extern char bmips_reset_nmi_vec_end; -extern char bmips_smp_movevec; -extern char bmips_smp_int_vec; -extern char bmips_smp_int_vec_end; +extern char bmips_reset_nmi_vec[]; +extern char bmips_reset_nmi_vec_end[]; +extern char bmips_smp_movevec[]; +extern char bmips_smp_int_vec[]; +extern char bmips_smp_int_vec_end[]; extern int bmips_smp_enabled; extern int bmips_cpu_offset; diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h index e8161e4dfde7039a80cb3875bba7b9415520c67b..dcebaaf8c862497342631d469163b39fea9c41b9 100644 --- a/arch/mips/include/asm/checksum.h +++ b/arch/mips/include/asm/checksum.h @@ -63,7 +63,7 @@ static inline __wsum csum_and_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr) { - if (access_ok(VERIFY_READ, src, len)) + if (access_ok(src, len)) return csum_partial_copy_from_user(src, dst, len, sum, err_ptr); if (len) @@ -81,7 +81,7 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, __wsum sum, int *err_ptr) { might_fault(); - if (access_ok(VERIFY_WRITE, dst, len)) { + if (access_ok(dst, len)) { if (uaccess_kernel()) return __csum_partial_copy_kernel(src, (__force void *)dst, diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h index 89e9fb7976fe61e3671100f7d2dae00b3d59bb5f..520ca166cbed572f8c75d0a333740616aa2d3adc 100644 --- a/arch/mips/include/asm/cmpxchg.h +++ b/arch/mips/include/asm/cmpxchg.h @@ -73,8 +73,8 @@ extern unsigned long __xchg_called_with_bad_pointer(void) extern unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int size); -static inline unsigned long __xchg(volatile void *ptr, unsigned long x, - int size) +static __always_inline +unsigned long __xchg(volatile void *ptr, unsigned long x, int size) { switch (size) { case 1: @@ -146,8 +146,9 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x, extern unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old, unsigned long new, unsigned int size); -static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, - unsigned long new, unsigned int size) +static __always_inline +unsigned long __cmpxchg(volatile void *ptr, unsigned long old, + unsigned long new, unsigned int size) { switch (size) { case 1: diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index 0edba3e757471b74d441fa520c3ab44084c5f630..4e2ee743088fd4deedadfa4f780595b6ce4d0729 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -387,6 +387,22 @@ #define cpu_has_dsp3 __ase(MIPS_ASE_DSP3) #endif +#ifndef cpu_has_loongson_mmi +#define cpu_has_loongson_mmi __ase(MIPS_ASE_LOONGSON_MMI) +#endif + +#ifndef cpu_has_loongson_cam +#define cpu_has_loongson_cam __ase(MIPS_ASE_LOONGSON_CAM) +#endif + +#ifndef cpu_has_loongson_ext +#define cpu_has_loongson_ext __ase(MIPS_ASE_LOONGSON_EXT) +#endif + +#ifndef cpu_has_loongson_ext2 +#define cpu_has_loongson_ext2 __ase(MIPS_ASE_LOONGSON_EXT2) +#endif + #ifndef cpu_has_mipsmt #define cpu_has_mipsmt __isa_lt_and_ase(6, MIPS_ASE_MIPSMT) #endif diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h index a41059d47d31c52d377aa39694a0da3ba3e07af5..ed7ffe4e63a32edbe4c793936beed17626e7d305 100644 --- a/arch/mips/include/asm/cpu-info.h +++ b/arch/mips/include/asm/cpu-info.h @@ -50,7 +50,7 @@ struct guest_info { #define MIPS_CACHE_PINDEX 0x00000020 /* Physically indexed cache */ struct cpuinfo_mips { - unsigned long asid_cache; + u64 asid_cache; #ifdef CONFIG_MIPS_ASID_BITS_VARIABLE unsigned long asid_mask; #endif diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h index dacbdb84516a09134896edd704d129b4f5f516f7..2b4b14a56575ca005bd315f63d21198b3b14340e 100644 --- a/arch/mips/include/asm/cpu.h +++ b/arch/mips/include/asm/cpu.h @@ -436,5 +436,9 @@ enum cpu_type_enum { #define MIPS_ASE_MSA 0x00000100 /* MIPS SIMD Architecture */ #define MIPS_ASE_DSP3 0x00000200 /* Signal Processing ASE Rev 3*/ #define MIPS_ASE_MIPS16E2 0x00000400 /* MIPS16e2 */ +#define MIPS_ASE_LOONGSON_MMI 0x00000800 /* Loongson MultiMedia extensions Instructions */ +#define MIPS_ASE_LOONGSON_CAM 0x00001000 /* Loongson CAM */ +#define MIPS_ASE_LOONGSON_EXT 0x00002000 /* Loongson EXTensions */ +#define MIPS_ASE_LOONGSON_EXT2 0x00004000 /* Loongson EXTensions R2 */ #endif /* _ASM_CPU_H */ diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h index a9e61ea54ca96ba69fa49f009d4fc98ff9cb1bdf..a767aacacc91d8a0fc4cff6b2dd109406e0df8a4 100644 --- a/arch/mips/include/asm/futex.h +++ b/arch/mips/include/asm/futex.h @@ -129,7 +129,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, int ret = 0; u32 val; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + if (!access_ok(uaddr, sizeof(u32))) return -EFAULT; if (cpu_has_llsc && R10000_LLSC_WAR) { diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h index e77672539e8ed8f6744c03d49eaeb20c76d80b78..e4456e450f946d5c9c55b52d78aeee60d3a2a0e2 100644 --- a/arch/mips/include/asm/jump_label.h +++ b/arch/mips/include/asm/jump_label.h @@ -21,15 +21,15 @@ #endif #ifdef CONFIG_CPU_MICROMIPS -#define NOP_INSN "nop32" +#define B_INSN "b32" #else -#define NOP_INSN "nop" +#define B_INSN "b" #endif static __always_inline bool arch_static_branch(struct static_key *key, bool branch) { - asm_volatile_goto("1:\t" NOP_INSN "\n\t" - "nop\n\t" + asm_volatile_goto("1:\t" B_INSN " 2f\n\t" + "2:\tnop\n\t" ".pushsection __jump_table, \"aw\"\n\t" WORD_INSN " 1b, %l[l_yes], %0\n\t" ".popsection\n\t" diff --git a/arch/mips/include/asm/kexec.h b/arch/mips/include/asm/kexec.h index 493a3cc7c39ad5a412d6d460061b740260dfff9c..cfdbe66575f4d82012d6350bfad2a1c624639e71 100644 --- a/arch/mips/include/asm/kexec.h +++ b/arch/mips/include/asm/kexec.h @@ -12,11 +12,11 @@ #include /* Maximum physical address we can use pages from */ -#define KEXEC_SOURCE_MEMORY_LIMIT (0x20000000) +#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) /* Maximum address we can reach in physical address mode */ -#define KEXEC_DESTINATION_MEMORY_LIMIT (0x20000000) +#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) /* Maximum address we can use for the control code buffer */ -#define KEXEC_CONTROL_MEMORY_LIMIT (0x20000000) +#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL) /* Reserve 3*4096 bytes for board-specific info */ #define KEXEC_CONTROL_PAGE_SIZE (4096 + 3*4096) diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 2c1c53d12179302140d3576dddd11a732a5b13d9..f567ace7a9e91f0a73623fa2f7ff821ae9354a54 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -1131,7 +1131,7 @@ static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {} -static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {} +static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} diff --git a/arch/mips/include/asm/mach-ath79/ar933x_uart.h b/arch/mips/include/asm/mach-ath79/ar933x_uart.h index c2917b39966bf18797b6e4885d5f713b52e2b11d..bba2c883795163a43056c61946285eddd80cf282 100644 --- a/arch/mips/include/asm/mach-ath79/ar933x_uart.h +++ b/arch/mips/include/asm/mach-ath79/ar933x_uart.h @@ -27,8 +27,8 @@ #define AR933X_UART_CS_PARITY_S 0 #define AR933X_UART_CS_PARITY_M 0x3 #define AR933X_UART_CS_PARITY_NONE 0 -#define AR933X_UART_CS_PARITY_ODD 1 -#define AR933X_UART_CS_PARITY_EVEN 2 +#define AR933X_UART_CS_PARITY_ODD 2 +#define AR933X_UART_CS_PARITY_EVEN 3 #define AR933X_UART_CS_IF_MODE_S 2 #define AR933X_UART_CS_IF_MODE_M 0x3 #define AR933X_UART_CS_IF_MODE_NONE 0 diff --git a/arch/mips/include/asm/mach-jz4740/jz4740_mmc.h b/arch/mips/include/asm/mach-jz4740/jz4740_mmc.h index e9cc62cfac99d8520ccf9f992c32608199ef17e6..ff50aeb1a933fc2a4003aedf9672f25e0ab0a0af 100644 --- a/arch/mips/include/asm/mach-jz4740/jz4740_mmc.h +++ b/arch/mips/include/asm/mach-jz4740/jz4740_mmc.h @@ -4,8 +4,6 @@ struct jz4740_mmc_platform_data { int gpio_power; - int gpio_card_detect; - int gpio_read_only; unsigned card_detect_active_low:1; unsigned read_only_active_low:1; unsigned power_active_low:1; diff --git a/arch/mips/include/asm/mach-loongson64/irq.h b/arch/mips/include/asm/mach-loongson64/irq.h index 3644b68c0cccdd2a8f8f34ee0490b90133ab2128..be9f727a932803d50b4a379f8a8f83372c3b2197 100644 --- a/arch/mips/include/asm/mach-loongson64/irq.h +++ b/arch/mips/include/asm/mach-loongson64/irq.h @@ -10,7 +10,7 @@ #define MIPS_CPU_IRQ_BASE 56 #define LOONGSON_UART_IRQ (MIPS_CPU_IRQ_BASE + 2) /* UART */ -#define LOONGSON_HT1_IRQ (MIPS_CPU_IRQ_BASE + 3) /* HT1 */ +#define LOONGSON_BRIDGE_IRQ (MIPS_CPU_IRQ_BASE + 3) /* CASCADE */ #define LOONGSON_TIMER_IRQ (MIPS_CPU_IRQ_BASE + 7) /* CPU Timer */ #define LOONGSON_HT1_CFG_BASE loongson_sysconf.ht_control_base diff --git a/arch/mips/include/asm/mach-loongson64/mmzone.h b/arch/mips/include/asm/mach-loongson64/mmzone.h index c9f7e231e66bbe78a86412516bee7301b5cf8027..59c8b11c090ee22114a259be0a7629d3a67e918a 100644 --- a/arch/mips/include/asm/mach-loongson64/mmzone.h +++ b/arch/mips/include/asm/mach-loongson64/mmzone.h @@ -21,6 +21,7 @@ #define NODE3_ADDRSPACE_OFFSET 0x300000000000UL #define pa_to_nid(addr) (((addr) & 0xf00000000000) >> NODE_ADDRSPACE_SHIFT) +#define nid_to_addrbase(nid) ((nid) << NODE_ADDRSPACE_SHIFT) #define LEVELS_PER_SLICE 128 diff --git a/arch/mips/include/asm/mips-gic.h b/arch/mips/include/asm/mips-gic.h index 558059a8f2189fc720e15519b8e32ba7e8d6362e..0277b56157af8a4c04b428f56a2d2391a95e285d 100644 --- a/arch/mips/include/asm/mips-gic.h +++ b/arch/mips/include/asm/mips-gic.h @@ -314,6 +314,36 @@ static inline bool mips_gic_present(void) return IS_ENABLED(CONFIG_MIPS_GIC) && mips_gic_base; } +/** + * mips_gic_vx_map_reg() - Return GIC_Vx__MAP register offset + * @intr: A GIC local interrupt + * + * Determine the index of the GIC_VL__MAP or GIC_VO__MAP register + * within the block of GIC map registers. This is almost the same as the order + * of interrupts in the pending & mask registers, as used by enum + * mips_gic_local_interrupt, but moves the FDC interrupt & thus offsets the + * interrupts after it... + * + * Return: The map register index corresponding to @intr. + * + * The return value is suitable for use with the (read|write)_gic_v[lo]_map + * accessor functions. + */ +static inline unsigned int +mips_gic_vx_map_reg(enum mips_gic_local_interrupt intr) +{ + /* WD, Compare & Timer are 1:1 */ + if (intr <= GIC_LOCAL_INT_TIMER) + return intr; + + /* FDC moves to after Timer... */ + if (intr == GIC_LOCAL_INT_FDC) + return GIC_LOCAL_INT_TIMER + 1; + + /* As a result everything else is offset by 1 */ + return intr + 1; +} + /** * gic_get_c0_compare_int() - Return cp0 count/compare interrupt virq * diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 01df9ad62fb83d3b8e50006b342a6f182f6fc862..1bb9448777c5c59ec9a4332bdfb63feb70f1b3ee 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -688,6 +688,9 @@ #define MIPS_CONF7_IAR (_ULCAST_(1) << 10) #define MIPS_CONF7_AR (_ULCAST_(1) << 16) +/* Ingenic Config7 bits */ +#define MIPS_CONF7_BTB_LOOP_EN (_ULCAST_(1) << 4) + /* Config7 Bits specific to MIPS Technologies. */ /* Performance counters implemented Per TC */ @@ -2774,6 +2777,7 @@ __BUILD_SET_C0(status) __BUILD_SET_C0(cause) __BUILD_SET_C0(config) __BUILD_SET_C0(config5) +__BUILD_SET_C0(config7) __BUILD_SET_C0(intcontrol) __BUILD_SET_C0(intctl) __BUILD_SET_C0(srsmap) diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h index 0740be7d5d4ac02a67206bf165825c497565910f..24d6b42345fb8ab800f2337447c897ecebfe4e2e 100644 --- a/arch/mips/include/asm/mmu.h +++ b/arch/mips/include/asm/mmu.h @@ -7,7 +7,7 @@ #include typedef struct { - unsigned long asid[NR_CPUS]; + u64 asid[NR_CPUS]; void *vdso; atomic_t fp_mode_switching; diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index 94414561de0e7b9c46d6d6af17c42de6e8519ab6..a589585be21be2111bc6e002f165884e3a42dfad 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h @@ -76,14 +76,14 @@ extern unsigned long pgd_current[]; * All unused by hardware upper bits will be considered * as a software asid extension. */ -static unsigned long asid_version_mask(unsigned int cpu) +static inline u64 asid_version_mask(unsigned int cpu) { unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]); - return ~(asid_mask | (asid_mask - 1)); + return ~(u64)(asid_mask | (asid_mask - 1)); } -static unsigned long asid_first_version(unsigned int cpu) +static inline u64 asid_first_version(unsigned int cpu) { return ~asid_version_mask(cpu) + 1; } @@ -102,14 +102,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) static inline void get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) { - unsigned long asid = asid_cache(cpu); + u64 asid = asid_cache(cpu); if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) { if (cpu_has_vtag_icache) flush_icache_all(); local_flush_tlb_all(); /* start new asid cycle */ - if (!asid) /* fix version if needed */ - asid = asid_first_version(cpu); } cpu_context(cpu, mm) = asid_cache(cpu) = asid; diff --git a/arch/mips/include/asm/mmzone.h b/arch/mips/include/asm/mmzone.h index f085fba41da501b729da0d419cca6d91a6fceb4a..b826b8473e956ad2657f31ee25aeafb7c14037ec 100644 --- a/arch/mips/include/asm/mmzone.h +++ b/arch/mips/include/asm/mmzone.h @@ -7,7 +7,18 @@ #define _ASM_MMZONE_H_ #include -#include + +#ifdef CONFIG_NEED_MULTIPLE_NODES +# include +#endif + +#ifndef pa_to_nid +#define pa_to_nid(addr) 0 +#endif + +#ifndef nid_to_addrbase +#define nid_to_addrbase(nid) 0 +#endif #ifdef CONFIG_DISCONTIGMEM diff --git a/arch/mips/include/asm/octeon/cvmx-pko.h b/arch/mips/include/asm/octeon/cvmx-pko.h index 5f47f76ed510a53dd760bedf911d3d1414d8a4de..20eb9c46a75ab18dbb2fec8b0a67d7dfb1af7300 100644 --- a/arch/mips/include/asm/octeon/cvmx-pko.h +++ b/arch/mips/include/asm/octeon/cvmx-pko.h @@ -611,7 +611,7 @@ static inline void cvmx_pko_get_port_status(uint64_t port_num, uint64_t clear, pko_reg_read_idx.s.index = cvmx_pko_get_base_queue(port_num); cvmx_write_csr(CVMX_PKO_REG_READ_IDX, pko_reg_read_idx.u64); debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8); - status->doorbell = debug8.cn58xx.doorbell; + status->doorbell = debug8.cn50xx.doorbell; } } diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h index 0036ea0c717354b4b79689c896885a1a040e0097..813dfe5f45a5988efe94636d717ef5db00c744ed 100644 --- a/arch/mips/include/asm/pgtable-64.h +++ b/arch/mips/include/asm/pgtable-64.h @@ -18,10 +18,12 @@ #include #define __ARCH_USE_5LEVEL_HACK -#if defined(CONFIG_PAGE_SIZE_64KB) && !defined(CONFIG_MIPS_VA_BITS_48) +#if CONFIG_PGTABLE_LEVELS == 2 #include -#elif !(defined(CONFIG_PAGE_SIZE_4KB) && defined(CONFIG_MIPS_VA_BITS_48)) +#elif CONFIG_PGTABLE_LEVELS == 3 #include +#else +#include #endif /* @@ -216,6 +218,9 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd) return pgd_val(pgd); } +#define pgd_phys(pgd) virt_to_phys((void *)pgd_val(pgd)) +#define pgd_page(pgd) (pfn_to_page(pgd_phys(pgd) >> PAGE_SHIFT)) + static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) { return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address); @@ -265,6 +270,11 @@ static inline int pmd_bad(pmd_t pmd) static inline int pmd_present(pmd_t pmd) { +#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT + if (unlikely(pmd_val(pmd) & _PAGE_HUGE)) + return pmd_val(pmd) & _PAGE_PRESENT; +#endif + return pmd_val(pmd) != (unsigned long) invalid_pte_table; } diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index 49d6046ca1d0c1661403111519478666964f5a16..c373eb605040246ad67e332f72bc6de8aee0dfa8 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h @@ -81,7 +81,7 @@ extern unsigned int vced_count, vcei_count; #endif -#define VDSO_RANDOMIZE_SIZE (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_256M) +#define VDSO_RANDOMIZE_SIZE (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_64M) extern unsigned long mips_stack_top(void); #define STACK_TOP mips_stack_top() diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h index 7f12d7e27c94ef5e78a49478359cad68bc4a1eb4..e5190126080ee6a9600a4697e7c61aa46a6021a7 100644 --- a/arch/mips/include/asm/r4kcache.h +++ b/arch/mips/include/asm/r4kcache.h @@ -20,6 +20,7 @@ #include #include #include +#include #include /* for uaccess_kernel() */ extern void (*r4k_blast_dcache)(void); @@ -747,4 +748,25 @@ __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , ) __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , ) __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , ) +/* Currently, this is very specific to Loongson-3 */ +#define __BUILD_BLAST_CACHE_NODE(pfx, desc, indexop, hitop, lsize) \ +static inline void blast_##pfx##cache##lsize##_node(long node) \ +{ \ + unsigned long start = CAC_BASE | nid_to_addrbase(node); \ + unsigned long end = start + current_cpu_data.desc.waysize; \ + unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \ + unsigned long ws_end = current_cpu_data.desc.ways << \ + current_cpu_data.desc.waybit; \ + unsigned long ws, addr; \ + \ + for (ws = 0; ws < ws_end; ws += ws_inc) \ + for (addr = start; addr < end; addr += lsize * 32) \ + cache##lsize##_unroll32(addr|ws, indexop); \ +} + +__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16) +__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32) +__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64) +__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128) + #endif /* _ASM_R4KCACHE_H */ diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h index 0170602a1e4e3f920b0b3834e0df26203d81c5a5..6cf8ffb5367ec3fb725aac26c701d0ae5d81923c 100644 --- a/arch/mips/include/asm/syscall.h +++ b/arch/mips/include/asm/syscall.h @@ -73,7 +73,7 @@ static inline unsigned long mips_get_syscall_arg(unsigned long *arg, #ifdef CONFIG_64BIT case 4: case 5: case 6: case 7: #ifdef CONFIG_MIPS32_O32 - if (test_thread_flag(TIF_32BIT_REGS)) + if (test_tsk_thread_flag(task, TIF_32BIT_REGS)) return get_user(*arg, (int *)usp + n); else #endif diff --git a/arch/mips/include/asm/termios.h b/arch/mips/include/asm/termios.h index ce2d72e34274c0f2d2e217277aa6706ab5f4cb93..bc29eeacc55adb17ac7b95759d62222636619c9e 100644 --- a/arch/mips/include/asm/termios.h +++ b/arch/mips/include/asm/termios.h @@ -32,7 +32,7 @@ static inline int user_termio_to_kernel_termios(struct ktermios *termios, unsigned short iflag, oflag, cflag, lflag; unsigned int err; - if (!access_ok(VERIFY_READ, termio, sizeof(struct termio))) + if (!access_ok(termio, sizeof(struct termio))) return -EFAULT; err = __get_user(iflag, &termio->c_iflag); @@ -61,7 +61,7 @@ static inline int kernel_termios_to_user_termio(struct termio __user *termio, { int err; - if (!access_ok(VERIFY_WRITE, termio, sizeof(struct termio))) + if (!access_ok(termio, sizeof(struct termio))) return -EFAULT; err = __put_user(termios->c_iflag, &termio->c_iflag); diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index 4993db40482c80fa17cacec1ef54f4e356b6d26d..ee26f9a4575dfc2b09568855015200043ca0ffbe 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h @@ -49,8 +49,26 @@ struct thread_info { .addr_limit = KERNEL_DS, \ } -/* How to get the thread information struct from C. */ +/* + * A pointer to the struct thread_info for the currently executing thread is + * held in register $28/$gp. + * + * We declare __current_thread_info as a global register variable rather than a + * local register variable within current_thread_info() because clang doesn't + * support explicit local register variables. + * + * When building the VDSO we take care not to declare the global register + * variable because this causes GCC to not preserve the value of $28/$gp in + * functions that change its value (which is common in the PIC VDSO when + * accessing the GOT). Since the VDSO shouldn't be accessing + * __current_thread_info anyway we declare it extern in order to cause a link + * failure if it's referenced. + */ +#ifdef __VDSO__ +extern struct thread_info *__current_thread_info; +#else register struct thread_info *__current_thread_info __asm__("$28"); +#endif static inline struct thread_info *current_thread_info(void) { diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 06629011a4342f490bf4bcabd9e4fe06ed0952ac..d43c1dc6ef157a59c4b96d456aacc9d69ce077ed 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -109,9 +109,6 @@ static inline bool eva_kernel_access(void) /* * access_ok: - Checks if a user space pointer is valid - * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that - * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe - * to write to a block, it is always safe to read from it. * @addr: User space pointer to start of block to check * @size: Size of block to check * @@ -134,7 +131,7 @@ static inline int __access_ok(const void __user *p, unsigned long size) return (get_fs().seg & (addr | (addr + size) | __ua_size(size))) == 0; } -#define access_ok(type, addr, size) \ +#define access_ok(addr, size) \ likely(__access_ok((addr), (size))) /* @@ -304,7 +301,7 @@ do { \ const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \ \ might_fault(); \ - if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) { \ + if (likely(access_ok( __gu_ptr, size))) { \ if (eva_kernel_access()) \ __get_kernel_common((x), size, __gu_ptr); \ else \ @@ -446,7 +443,7 @@ do { \ int __pu_err = -EFAULT; \ \ might_fault(); \ - if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \ + if (likely(access_ok( __pu_addr, size))) { \ if (eva_kernel_access()) \ __put_kernel_common(__pu_addr, size); \ else \ @@ -691,8 +688,7 @@ __clear_user(void __user *addr, __kernel_size_t size) ({ \ void __user * __cl_addr = (addr); \ unsigned long __cl_size = (n); \ - if (__cl_size && access_ok(VERIFY_WRITE, \ - __cl_addr, __cl_size)) \ + if (__cl_size && access_ok(__cl_addr, __cl_size)) \ __cl_size = __clear_user(__cl_addr, __cl_size); \ __cl_size; \ }) diff --git a/arch/mips/include/asm/vmalloc.h b/arch/mips/include/asm/vmalloc.h new file mode 100644 index 0000000000000000000000000000000000000000..25dc09b25eaf9876937b46640cfdcf0a43678f20 --- /dev/null +++ b/arch/mips/include/asm/vmalloc.h @@ -0,0 +1,4 @@ +#ifndef _ASM_MIPS_VMALLOC_H +#define _ASM_MIPS_VMALLOC_H + +#endif /* _ASM_MIPS_VMALLOC_H */ diff --git a/arch/mips/include/uapi/asm/hwcap.h b/arch/mips/include/uapi/asm/hwcap.h index a2aba4b059e63ff535ac3f46d35efa67ef258bf0..1ade1daa49210713c53bdd20af2564eeb628e3cd 100644 --- a/arch/mips/include/uapi/asm/hwcap.h +++ b/arch/mips/include/uapi/asm/hwcap.h @@ -6,5 +6,16 @@ #define HWCAP_MIPS_R6 (1 << 0) #define HWCAP_MIPS_MSA (1 << 1) #define HWCAP_MIPS_CRC32 (1 << 2) +#define HWCAP_MIPS_MIPS16 (1 << 3) +#define HWCAP_MIPS_MDMX (1 << 4) +#define HWCAP_MIPS_MIPS3D (1 << 5) +#define HWCAP_MIPS_SMARTMIPS (1 << 6) +#define HWCAP_MIPS_DSP (1 << 7) +#define HWCAP_MIPS_DSP2 (1 << 8) +#define HWCAP_MIPS_DSP3 (1 << 9) +#define HWCAP_MIPS_MIPS16E2 (1 << 10) +#define HWCAP_LOONGSON_MMI (1 << 11) +#define HWCAP_LOONGSON_EXT (1 << 12) +#define HWCAP_LOONGSON_EXT2 (1 << 13) #endif /* _UAPI_ASM_HWCAP_H */ diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h index c05dcf5ab414e98eddd1839bb9afcd8d079f3ca8..273ef58f4d435e34baaae732269840be63503d93 100644 --- a/arch/mips/include/uapi/asm/inst.h +++ b/arch/mips/include/uapi/asm/inst.h @@ -369,8 +369,8 @@ enum mm_32a_minor_op { mm_ext_op = 0x02c, mm_pool32axf_op = 0x03c, mm_srl32_op = 0x040, + mm_srlv32_op = 0x050, mm_sra_op = 0x080, - mm_srlv32_op = 0x090, mm_rotr_op = 0x0c0, mm_lwxs_op = 0x118, mm_addu32_op = 0x150, diff --git a/arch/mips/include/uapi/asm/mman.h b/arch/mips/include/uapi/asm/mman.h index 3035ca499cd8a5e6e54e10876de3abea6aecd522..1a6ffb5515ed16b8136ba108620fd3fad2573397 100644 --- a/arch/mips/include/uapi/asm/mman.h +++ b/arch/mips/include/uapi/asm/mman.h @@ -51,6 +51,9 @@ #define MAP_STACK 0x40000 /* give out an address that is best suited for process/thread stacks */ #define MAP_HUGETLB 0x80000 /* create a huge page mapping */ #define MAP_FIXED_NOREPLACE 0x100000 /* MAP_FIXED which doesn't unmap underlying mapping */ +#define MAP_PA32BIT 0x400000 /* physical address is within 4G */ +#define MAP_CHECKNODE 0x800000 /* hugetlb numa node check */ +#define MAP_ALIGN 0x2000000 /* create an aligned mapping */ /* * Flags for msync diff --git a/arch/mips/include/uapi/asm/sgidefs.h b/arch/mips/include/uapi/asm/sgidefs.h index 26143e3b7c26d26291552fb81754b5df93281c5d..69c3de90c536e424cc915632c4733437fef86ed0 100644 --- a/arch/mips/include/uapi/asm/sgidefs.h +++ b/arch/mips/include/uapi/asm/sgidefs.h @@ -11,14 +11,6 @@ #ifndef __ASM_SGIDEFS_H #define __ASM_SGIDEFS_H -/* - * Using a Linux compiler for building Linux seems logic but not to - * everybody. - */ -#ifndef __linux__ -#error Use a Linux compiler or give up. -#endif - /* * Definitions for the ISA levels * diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c index d31bc2f0120882afa493f2dd19efa4fa79ae946f..fb2b6d0b77c365f19bfb347a08b831909f0a1277 100644 --- a/arch/mips/jazz/jazzdma.c +++ b/arch/mips/jazz/jazzdma.c @@ -74,14 +74,15 @@ static int __init vdma_init(void) get_order(VDMA_PGTBL_SIZE)); BUG_ON(!pgtbl); dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE); - pgtbl = (VDMA_PGTBL_ENTRY *)KSEG1ADDR(pgtbl); + pgtbl = (VDMA_PGTBL_ENTRY *)CKSEG1ADDR((unsigned long)pgtbl); /* * Clear the R4030 translation table */ vdma_pgtbl_init(); - r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, CPHYSADDR(pgtbl)); + r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, + CPHYSADDR((unsigned long)pgtbl)); r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE); r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0); diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c index af0c8ace0141667337e39b615220da512ebc8586..05c60fa4fa06b1c22a25546b2fd3c9a263631193 100644 --- a/arch/mips/jz4740/board-qi_lb60.c +++ b/arch/mips/jz4740/board-qi_lb60.c @@ -43,7 +43,6 @@ #include "clock.h" /* GPIOs */ -#define QI_LB60_GPIO_SD_CD JZ_GPIO_PORTD(0) #define QI_LB60_GPIO_SD_VCC_EN_N JZ_GPIO_PORTD(2) #define QI_LB60_GPIO_KEYOUT(x) (JZ_GPIO_PORTC(10) + (x)) @@ -386,12 +385,18 @@ static struct platform_device qi_lb60_gpio_keys = { }; static struct jz4740_mmc_platform_data qi_lb60_mmc_pdata = { - .gpio_card_detect = QI_LB60_GPIO_SD_CD, - .gpio_read_only = -1, .gpio_power = QI_LB60_GPIO_SD_VCC_EN_N, .power_active_low = 1, }; +static struct gpiod_lookup_table qi_lb60_mmc_gpio_table = { + .dev_id = "jz4740-mmc.0", + .table = { + GPIO_LOOKUP("GPIOD", 0, "cd", GPIO_ACTIVE_HIGH), + { }, + }, +}; + /* beeper */ static struct pwm_lookup qi_lb60_pwm_lookup[] = { PWM_LOOKUP("jz4740-pwm", 4, "pwm-beeper", NULL, 0, @@ -466,27 +471,27 @@ static unsigned long pin_cfg_bias_disable[] = { static struct pinctrl_map pin_map[] __initdata = { /* NAND pin configuration */ PIN_MAP_MUX_GROUP_DEFAULT("jz4740-nand", - "10010000.jz4740-pinctrl", "nand", "nand-cs1"), + "10010000.pin-controller", "nand-cs1", "nand"), /* fbdev pin configuration */ PIN_MAP_MUX_GROUP("jz4740-fb", PINCTRL_STATE_DEFAULT, - "10010000.jz4740-pinctrl", "lcd", "lcd-8bit"), + "10010000.pin-controller", "lcd-8bit", "lcd"), PIN_MAP_MUX_GROUP("jz4740-fb", PINCTRL_STATE_SLEEP, - "10010000.jz4740-pinctrl", "lcd", "lcd-no-pins"), + "10010000.pin-controller", "lcd-no-pins", "lcd"), /* MMC pin configuration */ PIN_MAP_MUX_GROUP_DEFAULT("jz4740-mmc.0", - "10010000.jz4740-pinctrl", "mmc", "mmc-1bit"), + "10010000.pin-controller", "mmc-1bit", "mmc"), PIN_MAP_MUX_GROUP_DEFAULT("jz4740-mmc.0", - "10010000.jz4740-pinctrl", "mmc", "mmc-4bit"), + "10010000.pin-controller", "mmc-4bit", "mmc"), PIN_MAP_CONFIGS_PIN_DEFAULT("jz4740-mmc.0", - "10010000.jz4740-pinctrl", "PD0", pin_cfg_bias_disable), + "10010000.pin-controller", "PD0", pin_cfg_bias_disable), PIN_MAP_CONFIGS_PIN_DEFAULT("jz4740-mmc.0", - "10010000.jz4740-pinctrl", "PD2", pin_cfg_bias_disable), + "10010000.pin-controller", "PD2", pin_cfg_bias_disable), /* PWM pin configuration */ PIN_MAP_MUX_GROUP_DEFAULT("jz4740-pwm", - "10010000.jz4740-pinctrl", "pwm4", "pwm4"), + "10010000.pin-controller", "pwm4", "pwm4"), }; @@ -500,6 +505,7 @@ static int __init qi_lb60_init_platform_devices(void) gpiod_add_lookup_table(&qi_lb60_audio_gpio_table); gpiod_add_lookup_table(&qi_lb60_nand_gpio_table); gpiod_add_lookup_table(&qi_lb60_spigpio_gpio_table); + gpiod_add_lookup_table(&qi_lb60_mmc_gpio_table); spi_register_board_info(qi_lb60_spi_board_info, ARRAY_SIZE(qi_lb60_spi_board_info)); diff --git a/arch/mips/kernel/cacheinfo.c b/arch/mips/kernel/cacheinfo.c index 97d5239ca47baef7602b7500f46dc3d4cc8681f1..428ef218920398c6162b82e5688ab61dfe311580 100644 --- a/arch/mips/kernel/cacheinfo.c +++ b/arch/mips/kernel/cacheinfo.c @@ -80,6 +80,8 @@ static int __populate_cache_leaves(unsigned int cpu) if (c->tcache.waysize) populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED); + this_cpu_ci->cpu_map_populated = true; + return 0; } diff --git a/arch/mips/kernel/cmpxchg.c b/arch/mips/kernel/cmpxchg.c index 0b9535bc2c53d0c450a0c02beb24691d356614e2..6b2a4a902a981c7365cb05abc94744cfc6648741 100644 --- a/arch/mips/kernel/cmpxchg.c +++ b/arch/mips/kernel/cmpxchg.c @@ -54,10 +54,9 @@ unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int s unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old, unsigned long new, unsigned int size) { - u32 mask, old32, new32, load32; + u32 mask, old32, new32, load32, load; volatile u32 *ptr32; unsigned int shift; - u8 load; /* Check that ptr is naturally aligned */ WARN_ON((unsigned long)ptr & (size - 1)); diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index d535fc706a8b38a07c8e4c7de12ea111fcd95a60..581defb369c36049aaebc3f4055f09efceba8aa9 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -1489,6 +1489,8 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) __cpu_name[cpu] = "ICT Loongson-3"; set_elf_platform(cpu, "loongson3a"); set_isa(c, MIPS_CPU_ISA_M64R1); + c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM | + MIPS_ASE_LOONGSON_EXT); break; case PRID_REV_LOONGSON3B_R1: case PRID_REV_LOONGSON3B_R2: @@ -1496,6 +1498,8 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) __cpu_name[cpu] = "ICT Loongson-3"; set_elf_platform(cpu, "loongson3b"); set_isa(c, MIPS_CPU_ISA_M64R1); + c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM | + MIPS_ASE_LOONGSON_EXT); break; } @@ -1861,6 +1865,8 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) decode_configs(c); c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE; c->writecombine = _CACHE_UNCACHED_ACCELERATED; + c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM | + MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2); break; default: panic("Unknown Loongson Processor ID!"); @@ -1879,6 +1885,13 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu) c->cputype = CPU_JZRISC; c->writecombine = _CACHE_UNCACHED_ACCELERATED; __cpu_name[cpu] = "Ingenic JZRISC"; + /* + * The XBurst core by default attempts to avoid branch target + * buffer lookups by detecting & special casing loops. This + * feature will cause BogoMIPS and lpj calculate in error. + * Set cp0 config7 bit 4 to disable this feature. + */ + set_c0_config7(MIPS_CONF7_BTB_LOOP_EN); break; default: panic("Unknown Ingenic Processor ID!"); @@ -2092,6 +2105,39 @@ void cpu_probe(void) elf_hwcap |= HWCAP_MIPS_MSA; } + if (cpu_has_mips16) + elf_hwcap |= HWCAP_MIPS_MIPS16; + + if (cpu_has_mdmx) + elf_hwcap |= HWCAP_MIPS_MDMX; + + if (cpu_has_mips3d) + elf_hwcap |= HWCAP_MIPS_MIPS3D; + + if (cpu_has_smartmips) + elf_hwcap |= HWCAP_MIPS_SMARTMIPS; + + if (cpu_has_dsp) + elf_hwcap |= HWCAP_MIPS_DSP; + + if (cpu_has_dsp2) + elf_hwcap |= HWCAP_MIPS_DSP2; + + if (cpu_has_dsp3) + elf_hwcap |= HWCAP_MIPS_DSP3; + + if (cpu_has_mips16e2) + elf_hwcap |= HWCAP_MIPS_MIPS16E2; + + if (cpu_has_loongson_mmi) + elf_hwcap |= HWCAP_LOONGSON_MMI; + + if (cpu_has_loongson_ext) + elf_hwcap |= HWCAP_LOONGSON_EXT; + + if (cpu_has_loongson_ext2) + elf_hwcap |= HWCAP_LOONGSON_EXT2; + if (cpu_has_vz) cpu_probe_vz(c); diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c index d455363d51c3d0e6e441ac65c2b4303fffe87117..4c07a43a3242a1ebe96004de447060b0c0888fbd 100644 --- a/arch/mips/kernel/crash.c +++ b/arch/mips/kernel/crash.c @@ -36,6 +36,9 @@ static void crash_shutdown_secondary(void *passed_regs) if (!cpu_online(cpu)) return; + /* We won't be sent IPIs any more. */ + set_cpu_online(cpu, false); + local_irq_disable(); if (!cpumask_test_cpu(cpu, &cpus_in_crash)) crash_save_cpu(regs, cpu); diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 7f3dfdbc3657e6705b6a797c6e1dfa565fa3bff9..b122cbb4aad184c5dddd56d4990c7924ed9ad563 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -322,7 +322,6 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, unsigned long fp) { unsigned long old_parent_ra; - struct ftrace_graph_ent trace; unsigned long return_hooker = (unsigned long) &return_to_handler; int faulted, insns; @@ -369,12 +368,6 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, if (unlikely(faulted)) goto out; - if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp, - NULL) == -EBUSY) { - *parent_ra_addr = old_parent_ra; - return; - } - /* * Get the recorded ip of the current mcount calling site in the * __mcount_loc section, which will be used to filter the function @@ -382,13 +375,10 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, */ insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; - trace.func = self_ra - (MCOUNT_INSN_SIZE * insns); + self_ra -= (MCOUNT_INSN_SIZE * insns); - /* Only trace if the calling function expects to */ - if (!ftrace_graph_entry(&trace)) { - current->curr_ret_stack--; + if (function_graph_enter(old_parent_ra, self_ra, fp, NULL)) *parent_ra_addr = old_parent_ra; - } return; out: ftrace_graph_stop(); diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c index 5f209f111e59e3ad9c6e31bee8352623cacc3e68..df7ddd246eaac730333bc24cc361f1893be100aa 100644 --- a/arch/mips/kernel/i8253.c +++ b/arch/mips/kernel/i8253.c @@ -32,7 +32,8 @@ void __init setup_pit_timer(void) static int __init init_pit_clocksource(void) { - if (num_possible_cpus() > 1) /* PIT does not scale! */ + if (num_possible_cpus() > 1 || /* PIT does not scale! */ + !clockevent_state_periodic(&i8253_clockevent)) return 0; return clocksource_i8253_init(); diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index ba150c755fccebe8ed3e60fc3af89fa8cd943bfa..85b6c60f285d2f3392fb094a0584562833fc7ffd 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c @@ -52,6 +52,7 @@ asmlinkage void spurious_interrupt(void) void __init init_IRQ(void) { int i; + unsigned int order = get_order(IRQ_STACK_SIZE); for (i = 0; i < NR_IRQS; i++) irq_set_noprobe(i); @@ -62,8 +63,7 @@ void __init init_IRQ(void) arch_init_irq(); for_each_possible_cpu(i) { - int irq_pages = IRQ_STACK_SIZE / PAGE_SIZE; - void *s = (void *)__get_free_pages(GFP_KERNEL, irq_pages); + void *s = (void *)__get_free_pages(GFP_KERNEL, order); irq_stack[i] = s; pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i, diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c index 32e3168316cd98f013a792a9f8827e67b4e4db9d..ab943927f97ab20271d3fb255c36e4dc14f5fcc7 100644 --- a/arch/mips/kernel/jump_label.c +++ b/arch/mips/kernel/jump_label.c @@ -16,8 +16,6 @@ #include #include -#ifdef HAVE_JUMP_LABEL - /* * Define parameters for the standard MIPS and the microMIPS jump * instruction encoding respectively: @@ -70,5 +68,3 @@ void arch_jump_label_transform(struct jump_entry *e, mutex_unlock(&text_mutex); } - -#endif /* HAVE_JUMP_LABEL */ diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c index eb6c0d582626b114fcb8d30f9fb28ee3472e8cc9..2c1e30ca7ee4e838bbfc38afbb56c1a8cd8b66a9 100644 --- a/arch/mips/kernel/kgdb.c +++ b/arch/mips/kernel/kgdb.c @@ -33,6 +33,7 @@ #include #include #include +#include static struct hard_trap_info { unsigned char tt; /* Trap type code for MIPS R3xxx and R4xxx */ @@ -214,7 +215,7 @@ static void kgdb_call_nmi_hook(void *ignored) old_fs = get_fs(); set_fs(get_ds()); - kgdb_nmicallback(raw_smp_processor_id(), NULL); + kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs()); set_fs(old_fs); } diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c index 8b574bcd39ba8868364c7d22e355e96cd4e46d70..4b3726e4fe3ac68b7236dd67abf0db79298d6b74 100644 --- a/arch/mips/kernel/machine_kexec.c +++ b/arch/mips/kernel/machine_kexec.c @@ -118,6 +118,9 @@ machine_kexec(struct kimage *image) *ptr = (unsigned long) phys_to_virt(*ptr); } + /* Mark offline BEFORE disabling local irq. */ + set_cpu_online(smp_processor_id(), false); + /* * we do not want to be bothered. */ diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c index 8f5bd04f320a90be3861dd75664fdead350655a3..7f3f136572decc82d666d0fca0515f38d15dd2b3 100644 --- a/arch/mips/kernel/mips-cm.c +++ b/arch/mips/kernel/mips-cm.c @@ -457,5 +457,5 @@ void mips_cm_error_report(void) } /* reprime cause register */ - write_gcr_error_cause(0); + write_gcr_error_cause(cm_error); } diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c index eb18b186e858c6bec7113785beca3a8a905fdb0e..25112d332f28b20b54b03c5db178487facc727b2 100644 --- a/arch/mips/kernel/mips-r2-to-r6-emul.c +++ b/arch/mips/kernel/mips-r2-to-r6-emul.c @@ -1212,7 +1212,7 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31) case lwl_op: rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); - if (!access_ok(VERIFY_READ, (void __user *)vaddr, 4)) { + if (!access_ok((void __user *)vaddr, 4)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; @@ -1285,7 +1285,7 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31) case lwr_op: rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); - if (!access_ok(VERIFY_READ, (void __user *)vaddr, 4)) { + if (!access_ok((void __user *)vaddr, 4)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; @@ -1359,7 +1359,7 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31) case swl_op: rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); - if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 4)) { + if (!access_ok((void __user *)vaddr, 4)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; @@ -1429,7 +1429,7 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31) case swr_op: rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); - if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 4)) { + if (!access_ok((void __user *)vaddr, 4)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; @@ -1504,7 +1504,7 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31) rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); - if (!access_ok(VERIFY_READ, (void __user *)vaddr, 8)) { + if (!access_ok((void __user *)vaddr, 8)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; @@ -1623,7 +1623,7 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31) rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); - if (!access_ok(VERIFY_READ, (void __user *)vaddr, 8)) { + if (!access_ok((void __user *)vaddr, 8)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; @@ -1742,7 +1742,7 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31) rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); - if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 8)) { + if (!access_ok((void __user *)vaddr, 8)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; @@ -1860,7 +1860,7 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31) rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); - if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 8)) { + if (!access_ok((void __user *)vaddr, 8)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; @@ -1977,7 +1977,7 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31) err = SIGBUS; break; } - if (!access_ok(VERIFY_READ, (void __user *)vaddr, 4)) { + if (!access_ok((void __user *)vaddr, 4)) { current->thread.cp0_baduaddr = vaddr; err = SIGBUS; break; @@ -2033,7 +2033,7 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31) err = SIGBUS; break; } - if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 4)) { + if (!access_ok((void __user *)vaddr, 4)) { current->thread.cp0_baduaddr = vaddr; err = SIGBUS; break; @@ -2096,7 +2096,7 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31) err = SIGBUS; break; } - if (!access_ok(VERIFY_READ, (void __user *)vaddr, 8)) { + if (!access_ok((void __user *)vaddr, 8)) { current->thread.cp0_baduaddr = vaddr; err = SIGBUS; break; @@ -2157,7 +2157,7 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31) err = SIGBUS; break; } - if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 8)) { + if (!access_ok((void __user *)vaddr, 8)) { current->thread.cp0_baduaddr = vaddr; err = SIGBUS; break; diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 413863508f6fab1c718a19751fa030b278a506a3..d67fb64e908c4b1157c1672f7fb77899122e0c52 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -64,17 +64,11 @@ struct mips_perf_event { #define CNTR_EVEN 0x55555555 #define CNTR_ODD 0xaaaaaaaa #define CNTR_ALL 0xffffffff -#ifdef CONFIG_MIPS_MT_SMP enum { T = 0, V = 1, P = 2, } range; -#else - #define T - #define V - #define P -#endif }; static struct mips_perf_event raw_event; @@ -325,9 +319,7 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx) { struct perf_event *event = container_of(evt, struct perf_event, hw); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); -#ifdef CONFIG_MIPS_MT_SMP unsigned int range = evt->event_base >> 24; -#endif /* CONFIG_MIPS_MT_SMP */ WARN_ON(idx < 0 || idx >= mipspmu.num_counters); @@ -336,21 +328,15 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx) /* Make sure interrupt enabled. */ MIPS_PERFCTRL_IE; -#ifdef CONFIG_CPU_BMIPS5000 - { + if (IS_ENABLED(CONFIG_CPU_BMIPS5000)) { /* enable the counter for the calling thread */ cpuc->saved_ctrl[idx] |= (1 << (12 + vpe_id())) | BRCM_PERFCTRL_TC; - } -#else -#ifdef CONFIG_MIPS_MT_SMP - if (range > V) { + } else if (IS_ENABLED(CONFIG_MIPS_MT_SMP) && range > V) { /* The counter is processor wide. Set it up to count all TCs. */ pr_debug("Enabling perf counter for all TCs\n"); cpuc->saved_ctrl[idx] |= M_TC_EN_ALL; - } else -#endif /* CONFIG_MIPS_MT_SMP */ - { + } else { unsigned int cpu, ctrl; /* @@ -365,7 +351,6 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx) cpuc->saved_ctrl[idx] |= ctrl; pr_debug("Enabling perf counter for CPU%d\n", cpu); } -#endif /* CONFIG_CPU_BMIPS5000 */ /* * We do not actually let the counter run. Leave it until start(). */ diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index b2de408a259e493b65a4b28abe0a746081f0aa78..f8d36710cd581f8b628056d09c406f8195c43619 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -124,6 +124,10 @@ static int show_cpuinfo(struct seq_file *m, void *v) if (cpu_has_eva) seq_printf(m, "%s", " eva"); if (cpu_has_htw) seq_printf(m, "%s", " htw"); if (cpu_has_xpa) seq_printf(m, "%s", " xpa"); + if (cpu_has_loongson_mmi) seq_printf(m, "%s", " loongson-mmi"); + if (cpu_has_loongson_cam) seq_printf(m, "%s", " loongson-cam"); + if (cpu_has_loongson_ext) seq_printf(m, "%s", " loongson-ext"); + if (cpu_has_loongson_ext2) seq_printf(m, "%s", " loongson-ext2"); seq_printf(m, "\n"); if (cpu_has_mmips) { diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index d4f7fd4550e10d7ea0dfd8ddcfe916f08df4a03e..85522c137f19fbc6d74c0c6efea1b856721b14fd 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -371,7 +371,7 @@ static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size) static int get_frame_info(struct mips_frame_info *info) { bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS); - union mips_instruction insn, *ip, *ip_end; + union mips_instruction insn, *ip; const unsigned int max_insns = 128; unsigned int last_insn_size = 0; unsigned int i; @@ -384,10 +384,9 @@ static int get_frame_info(struct mips_frame_info *info) if (!ip) goto err; - ip_end = (void *)ip + info->func_size; - - for (i = 0; i < max_insns && ip < ip_end; i++) { + for (i = 0; i < max_insns; i++) { ip = (void *)ip + last_insn_size; + if (is_mmips && mm_insn_16bit(ip->halfword[0])) { insn.word = ip->halfword[0] << 16; last_insn_size = 2; diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c index 89950b7bf536b7fe30e04fc531677c6b3b8666ea..bdaf3536241a2d936d0ea55e37f3f4cd944b6467 100644 --- a/arch/mips/kernel/prom.c +++ b/arch/mips/kernel/prom.c @@ -41,7 +41,19 @@ char *mips_get_machine_name(void) #ifdef CONFIG_USE_OF void __init early_init_dt_add_memory_arch(u64 base, u64 size) { - return add_memory_region(base, size, BOOT_MEM_RAM); + if (base >= PHYS_ADDR_MAX) { + pr_warn("Trying to add an invalid memory region, skipped\n"); + return; + } + + /* Truncate the passed memory region instead of type casting */ + if (base + size - 1 >= PHYS_ADDR_MAX || base + size < base) { + pr_warn("Truncate memory region %llx @ %llx to size %llx\n", + size, base, PHYS_ADDR_MAX - base); + size = PHYS_ADDR_MAX - base; + } + + add_memory_region(base, size, BOOT_MEM_RAM); } int __init early_init_dt_reserve_memory_arch(phys_addr_t base, diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index e5ba56c01ee0a88b090507e6d9bb6cb08f7b13ba..48db5bcefd51155587af7770acb1f75fe5c26450 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -105,7 +105,7 @@ int ptrace_getregs(struct task_struct *child, struct user_pt_regs __user *data) struct pt_regs *regs; int i; - if (!access_ok(VERIFY_WRITE, data, 38 * 8)) + if (!access_ok(data, 38 * 8)) return -EIO; regs = task_pt_regs(child); @@ -132,7 +132,7 @@ int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data) struct pt_regs *regs; int i; - if (!access_ok(VERIFY_READ, data, 38 * 8)) + if (!access_ok(data, 38 * 8)) return -EIO; regs = task_pt_regs(child); @@ -155,7 +155,7 @@ int ptrace_getfpregs(struct task_struct *child, __u32 __user *data) { int i; - if (!access_ok(VERIFY_WRITE, data, 33 * 8)) + if (!access_ok(data, 33 * 8)) return -EIO; if (tsk_used_math(child)) { @@ -181,7 +181,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data) u32 value; int i; - if (!access_ok(VERIFY_READ, data, 33 * 8)) + if (!access_ok(data, 33 * 8)) return -EIO; init_fp_ctx(child); @@ -208,7 +208,7 @@ int ptrace_get_watch_regs(struct task_struct *child, if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0) return -EIO; - if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs))) + if (!access_ok(addr, sizeof(struct pt_watch_regs))) return -EIO; #ifdef CONFIG_32BIT @@ -250,7 +250,7 @@ int ptrace_set_watch_regs(struct task_struct *child, if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0) return -EIO; - if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs))) + if (!access_ok(addr, sizeof(struct pt_watch_regs))) return -EIO; /* Check the values. */ for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 73913f072e3916f36c23bda86870f83002a725c0..579608342ac69fb86ed841abb420fabb98549468 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -125,7 +125,7 @@ trace_a_syscall: subu t1, v0, __NR_O32_Linux move a1, v0 bnez t1, 1f /* __NR_syscall at offset 0 */ - lw a1, PT_R4(sp) /* Arg1 for __NR_syscall case */ + ld a1, PT_R4(sp) /* Arg1 for __NR_syscall case */ .set pop 1: jal syscall_trace_enter diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 109ed163a6a6aeaa25015c737052ced450924f00..ec77419bd46d87558db0bb04e6f164f37210c8d8 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -561,7 +561,7 @@ SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act, if (act) { old_sigset_t mask; - if (!access_ok(VERIFY_READ, act, sizeof(*act))) + if (!access_ok(act, sizeof(*act))) return -EFAULT; err |= __get_user(new_ka.sa.sa_handler, &act->sa_handler); err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); @@ -575,7 +575,7 @@ SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act, ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { - if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact))) + if (!access_ok(oact, sizeof(*oact))) return -EFAULT; err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler); @@ -601,7 +601,7 @@ asmlinkage void sys_sigreturn(void) regs = current_pt_regs(); frame = (struct sigframe __user *)regs->regs[29]; - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked))) goto badframe; @@ -638,7 +638,7 @@ asmlinkage void sys_rt_sigreturn(void) regs = current_pt_regs(); frame = (struct rt_sigframe __user *)regs->regs[29]; - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set))) goto badframe; @@ -676,7 +676,7 @@ static int setup_frame(void *sig_return, struct ksignal *ksig, int err = 0; frame = get_sigframe(ksig, regs, sizeof(*frame)); - if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) + if (!access_ok(frame, sizeof (*frame))) return -EFAULT; err |= setup_sigcontext(regs, &frame->sf_sc); @@ -715,7 +715,7 @@ static int setup_rt_frame(void *sig_return, struct ksignal *ksig, int err = 0; frame = get_sigframe(ksig, regs, sizeof(*frame)); - if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) + if (!access_ok(frame, sizeof (*frame))) return -EFAULT; /* Create siginfo. */ diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index b5d9e1784aff35ef0724a96e2784d11eb9e15057..59b8965433c2fdb2f8337de49680ca6a50b09dde 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c @@ -46,7 +46,7 @@ SYSCALL_DEFINE3(32_sigaction, long, sig, const struct compat_sigaction __user *, old_sigset_t mask; s32 handler; - if (!access_ok(VERIFY_READ, act, sizeof(*act))) + if (!access_ok(act, sizeof(*act))) return -EFAULT; err |= __get_user(handler, &act->sa_handler); new_ka.sa.sa_handler = (void __user *)(s64)handler; @@ -61,7 +61,7 @@ SYSCALL_DEFINE3(32_sigaction, long, sig, const struct compat_sigaction __user *, ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { - if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact))) + if (!access_ok(oact, sizeof(*oact))) return -EFAULT; err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); err |= __put_user((u32)(u64)old_ka.sa.sa_handler, diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c index 8f65aaf9206d1ba88ab0f68e580eb176aacd173e..c498b027823e695a31f39bdee86d7056ccc56403 100644 --- a/arch/mips/kernel/signal_n32.c +++ b/arch/mips/kernel/signal_n32.c @@ -73,7 +73,7 @@ asmlinkage void sysn32_rt_sigreturn(void) regs = current_pt_regs(); frame = (struct rt_sigframe_n32 __user *)regs->regs[29]; - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) goto badframe; if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask)) goto badframe; @@ -110,7 +110,7 @@ static int setup_rt_frame_n32(void *sig_return, struct ksignal *ksig, int err = 0; frame = get_sigframe(ksig, regs, sizeof(*frame)); - if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) + if (!access_ok(frame, sizeof (*frame))) return -EFAULT; /* Create siginfo. */ diff --git a/arch/mips/kernel/signal_o32.c b/arch/mips/kernel/signal_o32.c index b6e3ddef48a06f9a543a10edc3c96e5ce1116b2b..df259618e834bb390629b7df18e6a65225c9d109 100644 --- a/arch/mips/kernel/signal_o32.c +++ b/arch/mips/kernel/signal_o32.c @@ -118,7 +118,7 @@ static int setup_frame_32(void *sig_return, struct ksignal *ksig, int err = 0; frame = get_sigframe(ksig, regs, sizeof(*frame)); - if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) + if (!access_ok(frame, sizeof (*frame))) return -EFAULT; err |= setup_sigcontext32(regs, &frame->sf_sc); @@ -160,7 +160,7 @@ asmlinkage void sys32_rt_sigreturn(void) regs = current_pt_regs(); frame = (struct rt_sigframe32 __user *)regs->regs[29]; - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) goto badframe; if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask)) goto badframe; @@ -197,7 +197,7 @@ static int setup_rt_frame_32(void *sig_return, struct ksignal *ksig, int err = 0; frame = get_sigframe(ksig, regs, sizeof(*frame)); - if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) + if (!access_ok(frame, sizeof (*frame))) return -EFAULT; /* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */ @@ -262,7 +262,7 @@ asmlinkage void sys32_sigreturn(void) regs = current_pt_regs(); frame = (struct sigframe32 __user *)regs->regs[29]; - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) goto badframe; if (__copy_conv_sigset_from_user(&blocked, &frame->sf_mask)) goto badframe; diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 159e83add4bb3e6b43f105b521761eb9cb80491b..5ec546b5eed1c0c6a46b6bfbc5dadf0188667a42 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -457,10 +457,10 @@ static void bmips_wr_vec(unsigned long dst, char *start, char *end) static inline void bmips_nmi_handler_setup(void) { - bmips_wr_vec(BMIPS_NMI_RESET_VEC, &bmips_reset_nmi_vec, - &bmips_reset_nmi_vec_end); - bmips_wr_vec(BMIPS_WARM_RESTART_VEC, &bmips_smp_int_vec, - &bmips_smp_int_vec_end); + bmips_wr_vec(BMIPS_NMI_RESET_VEC, bmips_reset_nmi_vec, + bmips_reset_nmi_vec_end); + bmips_wr_vec(BMIPS_WARM_RESTART_VEC, bmips_smp_int_vec, + bmips_smp_int_vec_end); } struct reset_vec_info { diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index d84b9066b4654e4e9a3eee2fc7136260fd25b702..7206a6977be9b1e7736695c6d1616dff45808415 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -372,6 +372,9 @@ asmlinkage void start_secondary(void) cpu = smp_processor_id(); cpu_data[cpu].udelay_val = loops_per_jiffy; + set_cpu_sibling_map(cpu); + set_cpu_core_map(cpu); + cpumask_set_cpu(cpu, &cpu_coherent_mask); notify_cpu_starting(cpu); @@ -383,9 +386,6 @@ asmlinkage void start_secondary(void) /* The CPU is running and counters synchronised, now mark it online */ set_cpu_online(cpu, true); - set_cpu_sibling_map(cpu); - set_cpu_core_map(cpu); - calculate_cpu_foreign_map(); /* diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 69c17b549fd3cc598946bbfc67f424b4751161b1..aad8b6c124502952140ea204db1628f1ff8b2cb0 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -101,7 +101,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) if (unlikely(addr & 3)) return -EINVAL; - if (unlikely(!access_ok(VERIFY_WRITE, (const void __user *)addr, 4))) + if (unlikely(!access_ok((const void __user *)addr, 4))) return -EINVAL; if (cpu_has_llsc && R10000_LLSC_WAR) { diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 2d0b912f9e3e40a987a5a481c7066bf46dcf4acb..fa80787b9adc911f6d40ffbca078a12d236b2ac0 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -943,7 +943,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, if (insn.dsp_format.func == lx_op) { switch (insn.dsp_format.op) { case lwx_op: - if (!access_ok(VERIFY_READ, addr, 4)) + if (!access_ok(addr, 4)) goto sigbus; LoadW(addr, value, res); if (res) @@ -952,7 +952,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, regs->regs[insn.dsp_format.rd] = value; break; case lhx_op: - if (!access_ok(VERIFY_READ, addr, 2)) + if (!access_ok(addr, 2)) goto sigbus; LoadHW(addr, value, res); if (res) @@ -975,7 +975,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, set_fs(USER_DS); switch (insn.spec3_format.func) { case lhe_op: - if (!access_ok(VERIFY_READ, addr, 2)) { + if (!access_ok(addr, 2)) { set_fs(seg); goto sigbus; } @@ -988,7 +988,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, regs->regs[insn.spec3_format.rt] = value; break; case lwe_op: - if (!access_ok(VERIFY_READ, addr, 4)) { + if (!access_ok(addr, 4)) { set_fs(seg); goto sigbus; } @@ -1001,7 +1001,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, regs->regs[insn.spec3_format.rt] = value; break; case lhue_op: - if (!access_ok(VERIFY_READ, addr, 2)) { + if (!access_ok(addr, 2)) { set_fs(seg); goto sigbus; } @@ -1014,7 +1014,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, regs->regs[insn.spec3_format.rt] = value; break; case she_op: - if (!access_ok(VERIFY_WRITE, addr, 2)) { + if (!access_ok(addr, 2)) { set_fs(seg); goto sigbus; } @@ -1027,7 +1027,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, } break; case swe_op: - if (!access_ok(VERIFY_WRITE, addr, 4)) { + if (!access_ok(addr, 4)) { set_fs(seg); goto sigbus; } @@ -1048,7 +1048,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, #endif break; case lh_op: - if (!access_ok(VERIFY_READ, addr, 2)) + if (!access_ok(addr, 2)) goto sigbus; if (IS_ENABLED(CONFIG_EVA)) { @@ -1067,7 +1067,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, break; case lw_op: - if (!access_ok(VERIFY_READ, addr, 4)) + if (!access_ok(addr, 4)) goto sigbus; if (IS_ENABLED(CONFIG_EVA)) { @@ -1086,7 +1086,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, break; case lhu_op: - if (!access_ok(VERIFY_READ, addr, 2)) + if (!access_ok(addr, 2)) goto sigbus; if (IS_ENABLED(CONFIG_EVA)) { @@ -1113,7 +1113,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ - if (!access_ok(VERIFY_READ, addr, 4)) + if (!access_ok(addr, 4)) goto sigbus; LoadWU(addr, value, res); @@ -1136,7 +1136,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ - if (!access_ok(VERIFY_READ, addr, 8)) + if (!access_ok(addr, 8)) goto sigbus; LoadDW(addr, value, res); @@ -1151,7 +1151,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, goto sigill; case sh_op: - if (!access_ok(VERIFY_WRITE, addr, 2)) + if (!access_ok(addr, 2)) goto sigbus; compute_return_epc(regs); @@ -1171,7 +1171,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, break; case sw_op: - if (!access_ok(VERIFY_WRITE, addr, 4)) + if (!access_ok(addr, 4)) goto sigbus; compute_return_epc(regs); @@ -1199,7 +1199,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ - if (!access_ok(VERIFY_WRITE, addr, 8)) + if (!access_ok(addr, 8)) goto sigbus; compute_return_epc(regs); @@ -1250,7 +1250,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, switch (insn.msa_mi10_format.func) { case msa_ld_op: - if (!access_ok(VERIFY_READ, addr, sizeof(*fpr))) + if (!access_ok(addr, sizeof(*fpr))) goto sigbus; do { @@ -1286,7 +1286,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, break; case msa_st_op: - if (!access_ok(VERIFY_WRITE, addr, sizeof(*fpr))) + if (!access_ok(addr, sizeof(*fpr))) goto sigbus; /* @@ -1458,7 +1458,7 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, if (reg == 31) goto sigbus; - if (!access_ok(VERIFY_READ, addr, 8)) + if (!access_ok(addr, 8)) goto sigbus; LoadW(addr, value, res); @@ -1477,7 +1477,7 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, if (reg == 31) goto sigbus; - if (!access_ok(VERIFY_WRITE, addr, 8)) + if (!access_ok(addr, 8)) goto sigbus; value = regs->regs[reg]; @@ -1497,7 +1497,7 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, if (reg == 31) goto sigbus; - if (!access_ok(VERIFY_READ, addr, 16)) + if (!access_ok(addr, 16)) goto sigbus; LoadDW(addr, value, res); @@ -1520,7 +1520,7 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, if (reg == 31) goto sigbus; - if (!access_ok(VERIFY_WRITE, addr, 16)) + if (!access_ok(addr, 16)) goto sigbus; value = regs->regs[reg]; @@ -1543,11 +1543,10 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, if ((rvar > 9) || !reg) goto sigill; if (reg & 0x10) { - if (!access_ok - (VERIFY_READ, addr, 4 * (rvar + 1))) + if (!access_ok(addr, 4 * (rvar + 1))) goto sigbus; } else { - if (!access_ok(VERIFY_READ, addr, 4 * rvar)) + if (!access_ok(addr, 4 * rvar)) goto sigbus; } if (rvar == 9) @@ -1580,11 +1579,10 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, if ((rvar > 9) || !reg) goto sigill; if (reg & 0x10) { - if (!access_ok - (VERIFY_WRITE, addr, 4 * (rvar + 1))) + if (!access_ok(addr, 4 * (rvar + 1))) goto sigbus; } else { - if (!access_ok(VERIFY_WRITE, addr, 4 * rvar)) + if (!access_ok(addr, 4 * rvar)) goto sigbus; } if (rvar == 9) @@ -1618,11 +1616,10 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, if ((rvar > 9) || !reg) goto sigill; if (reg & 0x10) { - if (!access_ok - (VERIFY_READ, addr, 8 * (rvar + 1))) + if (!access_ok(addr, 8 * (rvar + 1))) goto sigbus; } else { - if (!access_ok(VERIFY_READ, addr, 8 * rvar)) + if (!access_ok(addr, 8 * rvar)) goto sigbus; } if (rvar == 9) @@ -1660,11 +1657,10 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, if ((rvar > 9) || !reg) goto sigill; if (reg & 0x10) { - if (!access_ok - (VERIFY_WRITE, addr, 8 * (rvar + 1))) + if (!access_ok(addr, 8 * (rvar + 1))) goto sigbus; } else { - if (!access_ok(VERIFY_WRITE, addr, 8 * rvar)) + if (!access_ok(addr, 8 * rvar)) goto sigbus; } if (rvar == 9) @@ -1779,7 +1775,7 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, case mm_lwm16_op: reg = insn.mm16_m_format.rlist; rvar = reg + 1; - if (!access_ok(VERIFY_READ, addr, 4 * rvar)) + if (!access_ok(addr, 4 * rvar)) goto sigbus; for (i = 16; rvar; rvar--, i++) { @@ -1799,7 +1795,7 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, case mm_swm16_op: reg = insn.mm16_m_format.rlist; rvar = reg + 1; - if (!access_ok(VERIFY_WRITE, addr, 4 * rvar)) + if (!access_ok(addr, 4 * rvar)) goto sigbus; for (i = 16; rvar; rvar--, i++) { @@ -1853,7 +1849,7 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, } loadHW: - if (!access_ok(VERIFY_READ, addr, 2)) + if (!access_ok(addr, 2)) goto sigbus; LoadHW(addr, value, res); @@ -1863,7 +1859,7 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, goto success; loadHWU: - if (!access_ok(VERIFY_READ, addr, 2)) + if (!access_ok(addr, 2)) goto sigbus; LoadHWU(addr, value, res); @@ -1873,7 +1869,7 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, goto success; loadW: - if (!access_ok(VERIFY_READ, addr, 4)) + if (!access_ok(addr, 4)) goto sigbus; LoadW(addr, value, res); @@ -1891,7 +1887,7 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ - if (!access_ok(VERIFY_READ, addr, 4)) + if (!access_ok(addr, 4)) goto sigbus; LoadWU(addr, value, res); @@ -1913,7 +1909,7 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ - if (!access_ok(VERIFY_READ, addr, 8)) + if (!access_ok(addr, 8)) goto sigbus; LoadDW(addr, value, res); @@ -1927,7 +1923,7 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, goto sigill; storeHW: - if (!access_ok(VERIFY_WRITE, addr, 2)) + if (!access_ok(addr, 2)) goto sigbus; value = regs->regs[reg]; @@ -1937,7 +1933,7 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, goto success; storeW: - if (!access_ok(VERIFY_WRITE, addr, 4)) + if (!access_ok(addr, 4)) goto sigbus; value = regs->regs[reg]; @@ -1955,7 +1951,7 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ - if (!access_ok(VERIFY_WRITE, addr, 8)) + if (!access_ok(addr, 8)) goto sigbus; value = regs->regs[reg]; @@ -2113,7 +2109,7 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) goto sigbus; case MIPS16e_lh_op: - if (!access_ok(VERIFY_READ, addr, 2)) + if (!access_ok(addr, 2)) goto sigbus; LoadHW(addr, value, res); @@ -2124,7 +2120,7 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) break; case MIPS16e_lhu_op: - if (!access_ok(VERIFY_READ, addr, 2)) + if (!access_ok(addr, 2)) goto sigbus; LoadHWU(addr, value, res); @@ -2137,7 +2133,7 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) case MIPS16e_lw_op: case MIPS16e_lwpc_op: case MIPS16e_lwsp_op: - if (!access_ok(VERIFY_READ, addr, 4)) + if (!access_ok(addr, 4)) goto sigbus; LoadW(addr, value, res); @@ -2156,7 +2152,7 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ - if (!access_ok(VERIFY_READ, addr, 4)) + if (!access_ok(addr, 4)) goto sigbus; LoadWU(addr, value, res); @@ -2180,7 +2176,7 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ - if (!access_ok(VERIFY_READ, addr, 8)) + if (!access_ok(addr, 8)) goto sigbus; LoadDW(addr, value, res); @@ -2195,7 +2191,7 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) goto sigill; case MIPS16e_sh_op: - if (!access_ok(VERIFY_WRITE, addr, 2)) + if (!access_ok(addr, 2)) goto sigbus; MIPS16e_compute_return_epc(regs, &oldinst); @@ -2208,7 +2204,7 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) case MIPS16e_sw_op: case MIPS16e_swsp_op: case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */ - if (!access_ok(VERIFY_WRITE, addr, 4)) + if (!access_ok(addr, 4)) goto sigbus; MIPS16e_compute_return_epc(regs, &oldinst); @@ -2228,7 +2224,7 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ - if (!access_ok(VERIFY_WRITE, addr, 8)) + if (!access_ok(addr, 8)) goto sigbus; MIPS16e_compute_return_epc(regs, &oldinst); diff --git a/arch/mips/kernel/uprobes.c b/arch/mips/kernel/uprobes.c index 4aaff3b3175c4c6323052d531a795a22fbcad618..6dbe4eab0a0e832a0df3cc80aebf2d52ea4ab720 100644 --- a/arch/mips/kernel/uprobes.c +++ b/arch/mips/kernel/uprobes.c @@ -112,9 +112,6 @@ int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs) */ aup->resume_epc = regs->cp0_epc + 4; if (insn_has_delay_slot((union mips_instruction) aup->insn[0])) { - unsigned long epc; - - epc = regs->cp0_epc; __compute_return_epc_for_insn(regs, (union mips_instruction) aup->insn[0]); aup->resume_epc = regs->cp0_epc; diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c index 48a9c6b90e079110e52603947901be76018323a1..9df3ebdc7b0f7164730b4211fa2c62c7aaeec312 100644 --- a/arch/mips/kernel/vdso.c +++ b/arch/mips/kernel/vdso.c @@ -126,8 +126,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) /* Map delay slot emulation page */ base = mmap_region(NULL, STACK_TOP, PAGE_SIZE, - VM_READ|VM_WRITE|VM_EXEC| - VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, + VM_READ | VM_EXEC | + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, 0, NULL); if (IS_ERR_VALUE(base)) { ret = base; diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 971a504001c27cfd2408b634ace6d2501d47f6f8..36f2e860ba3eade90e3c1f535c18713d257ebaff 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -140,6 +140,13 @@ SECTIONS PERCPU_SECTION(1 << CONFIG_MIPS_L1_CACHE_SHIFT) #endif +#ifdef CONFIG_MIPS_ELF_APPENDED_DTB + .appended_dtb : AT(ADDR(.appended_dtb) - LOAD_OFFSET) { + *(.appended_dtb) + KEEP(*(.appended_dtb)) + } +#endif + #ifdef CONFIG_RELOCATABLE . = ALIGN(4); @@ -164,11 +171,6 @@ SECTIONS __appended_dtb = .; /* leave space for appended DTB */ . += 0x100000; -#elif defined(CONFIG_MIPS_ELF_APPENDED_DTB) - .appended_dtb : AT(ADDR(.appended_dtb) - LOAD_OFFSET) { - *(.appended_dtb) - KEEP(*(.appended_dtb)) - } #endif /* * Align to 64K in attempt to eliminate holes before the diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index f7ea8e21656b168fbd16a57a46851c0ab78b0129..e3f7606bdbb49d28c72b1eb58af52b8046d08253 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -1004,14 +1004,37 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; - bool is_dirty = false; + bool flush = false; int r; mutex_lock(&kvm->slots_lock); - r = kvm_get_dirty_log_protect(kvm, log, &is_dirty); + r = kvm_get_dirty_log_protect(kvm, log, &flush); - if (is_dirty) { + if (flush) { + slots = kvm_memslots(kvm); + memslot = id_to_memslot(slots, log->slot); + + /* Let implementation handle TLB/GVA invalidation */ + kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot); + } + + mutex_unlock(&kvm->slots_lock); + return r; +} + +int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, struct kvm_clear_dirty_log *log) +{ + struct kvm_memslots *slots; + struct kvm_memory_slot *memslot; + bool flush = false; + int r; + + mutex_lock(&kvm->slots_lock); + + r = kvm_clear_dirty_log_protect(kvm, log, &flush); + + if (flush) { slots = kvm_memslots(kvm); memslot = id_to_memslot(slots, log->slot); @@ -1099,6 +1122,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_MAX_VCPUS: r = KVM_MAX_VCPUS; break; + case KVM_CAP_MAX_VCPU_ID: + r = KVM_MAX_VCPU_ID; + break; case KVM_CAP_MIPS_FPU: /* We don't handle systems with inconsistent cpu_has_fpu */ r = !!raw_cpu_has_fpu; diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c index f0bc3312ed1103bea83c69338e1cf4cc010619cf..37caeadb2964c956256040ff201743c140de310c 100644 --- a/arch/mips/lantiq/irq.c +++ b/arch/mips/lantiq/irq.c @@ -156,8 +156,9 @@ static int ltq_eiu_settype(struct irq_data *d, unsigned int type) if (edge) irq_set_handler(d->hwirq, handle_edge_irq); - ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) | - (val << (i * 4)), LTQ_EIU_EXIN_C); + ltq_eiu_w32((ltq_eiu_r32(LTQ_EIU_EXIN_C) & + (~(7 << (i * 4)))) | (val << (i * 4)), + LTQ_EIU_EXIN_C); } } @@ -224,9 +225,11 @@ static struct irq_chip ltq_eiu_type = { .irq_set_type = ltq_eiu_settype, }; -static void ltq_hw_irqdispatch(int module) +static void ltq_hw_irq_handler(struct irq_desc *desc) { + int module = irq_desc_get_irq(desc) - 2; u32 irq; + int hwirq; irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR); if (irq == 0) @@ -237,7 +240,8 @@ static void ltq_hw_irqdispatch(int module) * other bits might be bogus */ irq = __fls(irq); - do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module)); + hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module); + generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq)); /* if this is a EBU irq, we need to ack it or get a deadlock */ if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT) @@ -245,49 +249,6 @@ static void ltq_hw_irqdispatch(int module) LTQ_EBU_PCC_ISTAT); } -#define DEFINE_HWx_IRQDISPATCH(x) \ - static void ltq_hw ## x ## _irqdispatch(void) \ - { \ - ltq_hw_irqdispatch(x); \ - } -DEFINE_HWx_IRQDISPATCH(0) -DEFINE_HWx_IRQDISPATCH(1) -DEFINE_HWx_IRQDISPATCH(2) -DEFINE_HWx_IRQDISPATCH(3) -DEFINE_HWx_IRQDISPATCH(4) - -#if MIPS_CPU_TIMER_IRQ == 7 -static void ltq_hw5_irqdispatch(void) -{ - do_IRQ(MIPS_CPU_TIMER_IRQ); -} -#else -DEFINE_HWx_IRQDISPATCH(5) -#endif - -static void ltq_hw_irq_handler(struct irq_desc *desc) -{ - ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2); -} - -asmlinkage void plat_irq_dispatch(void) -{ - unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM; - int irq; - - if (!pending) { - spurious_interrupt(); - return; - } - - pending >>= CAUSEB_IP; - while (pending) { - irq = fls(pending) - 1; - do_IRQ(MIPS_CPU_IRQ_BASE + irq); - pending &= ~BIT(irq); - } -} - static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { struct irq_chip *chip = <q_irq_type; @@ -343,28 +304,10 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent) for (i = 0; i < MAX_IM; i++) irq_set_chained_handler(i + 2, ltq_hw_irq_handler); - if (cpu_has_vint) { - pr_info("Setting up vectored interrupts\n"); - set_vi_handler(2, ltq_hw0_irqdispatch); - set_vi_handler(3, ltq_hw1_irqdispatch); - set_vi_handler(4, ltq_hw2_irqdispatch); - set_vi_handler(5, ltq_hw3_irqdispatch); - set_vi_handler(6, ltq_hw4_irqdispatch); - set_vi_handler(7, ltq_hw5_irqdispatch); - } - ltq_domain = irq_domain_add_linear(node, (MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE, &irq_domain_ops, 0); -#ifndef CONFIG_MIPS_MT_SMP - set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | - IE_IRQ3 | IE_IRQ4 | IE_IRQ5); -#else - set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 | - IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5); -#endif - /* tell oprofile which irq to use */ ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ); diff --git a/arch/mips/loongson64/Platform b/arch/mips/loongson64/Platform index 0fce4608aa88665febfcad2964690beff5000a0d..12abf14aed4a37569ff19044a74bf1b854205d97 100644 --- a/arch/mips/loongson64/Platform +++ b/arch/mips/loongson64/Platform @@ -43,6 +43,10 @@ else $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) endif +# Some -march= flags enable MMI instructions, and GCC complains about that +# support being enabled alongside -msoft-float. Thus explicitly disable MMI. +cflags-y += $(call cc-option,-mno-loongson-mmi) + # # Loongson Machines' Support # diff --git a/arch/mips/loongson64/common/reset.c b/arch/mips/loongson64/common/reset.c index a60715e11306b272bc0402a8f8351a2afec5fc48..b26892ce871c87cdf1f8c4e39d5ca81b11bfff31 100644 --- a/arch/mips/loongson64/common/reset.c +++ b/arch/mips/loongson64/common/reset.c @@ -59,7 +59,12 @@ static void loongson_poweroff(void) { #ifndef CONFIG_LEFI_FIRMWARE_INTERFACE mach_prepare_shutdown(); - unreachable(); + + /* + * It needs a wait loop here, but mips/kernel/reset.c already calls + * a generic delay loop, machine_hang(), so simply return. + */ + return; #else void (*fw_poweroff)(void) = (void *)loongson_sysconf.poweroff_addr; diff --git a/arch/mips/loongson64/common/serial.c b/arch/mips/loongson64/common/serial.c index ffefc1cb26121e7b653b64b661b267cac4b4e1cd..98c3a7feb10f8b2391c968661e5e76c8d6c2770e 100644 --- a/arch/mips/loongson64/common/serial.c +++ b/arch/mips/loongson64/common/serial.c @@ -110,7 +110,7 @@ static int __init serial_init(void) } module_init(serial_init); -static void __init serial_exit(void) +static void __exit serial_exit(void) { platform_device_unregister(&uart8250_device); } diff --git a/arch/mips/loongson64/lemote-2f/irq.c b/arch/mips/loongson64/lemote-2f/irq.c index 9e33e45aa17c5d6881d6bc8cd5ca3c90d42098d0..b213cecb8e3ac4e76573e334c42cbde7c88636f4 100644 --- a/arch/mips/loongson64/lemote-2f/irq.c +++ b/arch/mips/loongson64/lemote-2f/irq.c @@ -103,7 +103,7 @@ static struct irqaction ip6_irqaction = { static struct irqaction cascade_irqaction = { .handler = no_action, .name = "cascade", - .flags = IRQF_NO_THREAD, + .flags = IRQF_NO_THREAD | IRQF_NO_SUSPEND, }; void __init mach_init_irq(void) diff --git a/arch/mips/loongson64/loongson-3/irq.c b/arch/mips/loongson64/loongson-3/irq.c index cbeb20f9fc95ca25d7396b2e779dcbab4ccbcd3c..5605061f5f981457cc9e4bec79c312d3142ded52 100644 --- a/arch/mips/loongson64/loongson-3/irq.c +++ b/arch/mips/loongson64/loongson-3/irq.c @@ -96,51 +96,8 @@ void mach_irq_dispatch(unsigned int pending) } } -static struct irqaction cascade_irqaction = { - .handler = no_action, - .flags = IRQF_NO_SUSPEND, - .name = "cascade", -}; - -static inline void mask_loongson_irq(struct irq_data *d) -{ - clear_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE)); - irq_disable_hazard(); - - /* Workaround: UART IRQ may deliver to any core */ - if (d->irq == LOONGSON_UART_IRQ) { - int cpu = smp_processor_id(); - int node_id = cpu_logical_map(cpu) / loongson_sysconf.cores_per_node; - int core_id = cpu_logical_map(cpu) % loongson_sysconf.cores_per_node; - u64 intenclr_addr = smp_group[node_id] | - (u64)(&LOONGSON_INT_ROUTER_INTENCLR); - u64 introuter_lpc_addr = smp_group[node_id] | - (u64)(&LOONGSON_INT_ROUTER_LPC); - - *(volatile u32 *)intenclr_addr = 1 << 10; - *(volatile u8 *)introuter_lpc_addr = 0x10 + (1<irq == LOONGSON_UART_IRQ) { - int cpu = smp_processor_id(); - int node_id = cpu_logical_map(cpu) / loongson_sysconf.cores_per_node; - int core_id = cpu_logical_map(cpu) % loongson_sysconf.cores_per_node; - u64 intenset_addr = smp_group[node_id] | - (u64)(&LOONGSON_INT_ROUTER_INTENSET); - u64 introuter_lpc_addr = smp_group[node_id] | - (u64)(&LOONGSON_INT_ROUTER_LPC); - - *(volatile u32 *)intenset_addr = 1 << 10; - *(volatile u8 *)introuter_lpc_addr = 0x10 + (1<irq - MIPS_CPU_IRQ_BASE)); - irq_enable_hazard(); -} +static inline void mask_loongson_irq(struct irq_data *d) { } +static inline void unmask_loongson_irq(struct irq_data *d) { } /* For MIPS IRQs which shared by all cores */ static struct irq_chip loongson_irq_chip = { @@ -183,12 +140,11 @@ void __init mach_init_irq(void) chip->irq_set_affinity = plat_set_irq_affinity; irq_set_chip_and_handler(LOONGSON_UART_IRQ, - &loongson_irq_chip, handle_level_irq); - - /* setup HT1 irq */ - setup_irq(LOONGSON_HT1_IRQ, &cascade_irqaction); + &loongson_irq_chip, handle_percpu_irq); + irq_set_chip_and_handler(LOONGSON_BRIDGE_IRQ, + &loongson_irq_chip, handle_percpu_irq); - set_c0_status(STATUSF_IP2 | STATUSF_IP6); + set_c0_status(STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP6); } #ifdef CONFIG_HOTPLUG_CPU diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index 62deb025970bdd5391f1288a554fa24d14070bf5..26c93c21feeb90b6dfe3c6445605a593266a75c3 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -1063,7 +1063,7 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx, MIPSInst_SIMM(ir)); MIPS_FPU_EMU_INC_STATS(loads); - if (!access_ok(VERIFY_READ, dva, sizeof(u64))) { + if (!access_ok(dva, sizeof(u64))) { MIPS_FPU_EMU_INC_STATS(errors); *fault_addr = dva; return SIGBUS; @@ -1081,7 +1081,7 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx, MIPSInst_SIMM(ir)); MIPS_FPU_EMU_INC_STATS(stores); DIFROMREG(dval, MIPSInst_RT(ir)); - if (!access_ok(VERIFY_WRITE, dva, sizeof(u64))) { + if (!access_ok(dva, sizeof(u64))) { MIPS_FPU_EMU_INC_STATS(errors); *fault_addr = dva; return SIGBUS; @@ -1097,7 +1097,7 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx, wva = (u32 __user *) (xcp->regs[MIPSInst_RS(ir)] + MIPSInst_SIMM(ir)); MIPS_FPU_EMU_INC_STATS(loads); - if (!access_ok(VERIFY_READ, wva, sizeof(u32))) { + if (!access_ok(wva, sizeof(u32))) { MIPS_FPU_EMU_INC_STATS(errors); *fault_addr = wva; return SIGBUS; @@ -1115,7 +1115,7 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx, MIPSInst_SIMM(ir)); MIPS_FPU_EMU_INC_STATS(stores); SIFROMREG(wval, MIPSInst_RT(ir)); - if (!access_ok(VERIFY_WRITE, wva, sizeof(u32))) { + if (!access_ok(wva, sizeof(u32))) { MIPS_FPU_EMU_INC_STATS(errors); *fault_addr = wva; return SIGBUS; @@ -1493,7 +1493,7 @@ static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, xcp->regs[MIPSInst_FT(ir)]); MIPS_FPU_EMU_INC_STATS(loads); - if (!access_ok(VERIFY_READ, va, sizeof(u32))) { + if (!access_ok(va, sizeof(u32))) { MIPS_FPU_EMU_INC_STATS(errors); *fault_addr = va; return SIGBUS; @@ -1513,7 +1513,7 @@ static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, MIPS_FPU_EMU_INC_STATS(stores); SIFROMREG(val, MIPSInst_FS(ir)); - if (!access_ok(VERIFY_WRITE, va, sizeof(u32))) { + if (!access_ok(va, sizeof(u32))) { MIPS_FPU_EMU_INC_STATS(errors); *fault_addr = va; return SIGBUS; @@ -1590,7 +1590,7 @@ static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, xcp->regs[MIPSInst_FT(ir)]); MIPS_FPU_EMU_INC_STATS(loads); - if (!access_ok(VERIFY_READ, va, sizeof(u64))) { + if (!access_ok(va, sizeof(u64))) { MIPS_FPU_EMU_INC_STATS(errors); *fault_addr = va; return SIGBUS; @@ -1609,7 +1609,7 @@ static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, MIPS_FPU_EMU_INC_STATS(stores); DIFROMREG(val, MIPSInst_FS(ir)); - if (!access_ok(VERIFY_WRITE, va, sizeof(u64))) { + if (!access_ok(va, sizeof(u64))) { MIPS_FPU_EMU_INC_STATS(errors); *fault_addr = va; return SIGBUS; diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c index 5450f4d1c920e22d5119c6ec389fb8ddc22cc176..e2d46cb93ca987f16dc0d19f3d47d04e3235e332 100644 --- a/arch/mips/math-emu/dsemul.c +++ b/arch/mips/math-emu/dsemul.c @@ -214,8 +214,9 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, { int isa16 = get_isa16_mode(regs->cp0_epc); mips_instruction break_math; - struct emuframe __user *fr; - int err, fr_idx; + unsigned long fr_uaddr; + struct emuframe fr; + int fr_idx, ret; /* NOP is easy */ if (ir == 0) @@ -250,27 +251,31 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, fr_idx = alloc_emuframe(); if (fr_idx == BD_EMUFRAME_NONE) return SIGBUS; - fr = &dsemul_page()[fr_idx]; /* Retrieve the appropriately encoded break instruction */ break_math = BREAK_MATH(isa16); /* Write the instructions to the frame */ if (isa16) { - err = __put_user(ir >> 16, - (u16 __user *)(&fr->emul)); - err |= __put_user(ir & 0xffff, - (u16 __user *)((long)(&fr->emul) + 2)); - err |= __put_user(break_math >> 16, - (u16 __user *)(&fr->badinst)); - err |= __put_user(break_math & 0xffff, - (u16 __user *)((long)(&fr->badinst) + 2)); + union mips_instruction _emul = { + .halfword = { ir >> 16, ir } + }; + union mips_instruction _badinst = { + .halfword = { break_math >> 16, break_math } + }; + + fr.emul = _emul.word; + fr.badinst = _badinst.word; } else { - err = __put_user(ir, &fr->emul); - err |= __put_user(break_math, &fr->badinst); + fr.emul = ir; + fr.badinst = break_math; } - if (unlikely(err)) { + /* Write the frame to user memory */ + fr_uaddr = (unsigned long)&dsemul_page()[fr_idx]; + ret = access_process_vm(current, fr_uaddr, &fr, sizeof(fr), + FOLL_FORCE | FOLL_WRITE); + if (unlikely(ret != sizeof(fr))) { MIPS_FPU_EMU_INC_STATS(errors); free_emuframe(fr_idx, current->mm); return SIGBUS; @@ -282,10 +287,7 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, atomic_set(¤t->thread.bd_emu_frame, fr_idx); /* Change user register context to execute the frame */ - regs->cp0_epc = (unsigned long)&fr->emul | isa16; - - /* Ensure the icache observes our newly written frame */ - flush_cache_sigtramp((unsigned long)&fr->emul); + regs->cp0_epc = fr_uaddr | isa16; return 0; } diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c index 3466fcdae0ca294b2d2f43c1d4105693576aca3d..01848cdf207412f466630f11560dfaea45ca53b0 100644 --- a/arch/mips/mm/c-r3k.c +++ b/arch/mips/mm/c-r3k.c @@ -245,7 +245,7 @@ static void r3k_flush_cache_page(struct vm_area_struct *vma, pmd_t *pmdp; pte_t *ptep; - pr_debug("cpage[%08lx,%08lx]\n", + pr_debug("cpage[%08llx,%08lx]\n", cpu_context(smp_processor_id(), mm), addr); /* No ASID => no such page in the cache. */ diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index a9ef057c79fe4a23e0f27d2b54ba6b97e1a41df0..05a539d3a5970f1d21e49ddb7142eba18b86fe61 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -459,11 +459,28 @@ static void r4k_blast_scache_setup(void) r4k_blast_scache = blast_scache128; } +static void (*r4k_blast_scache_node)(long node); + +static void r4k_blast_scache_node_setup(void) +{ + unsigned long sc_lsize = cpu_scache_line_size(); + + if (current_cpu_type() != CPU_LOONGSON3) + r4k_blast_scache_node = (void *)cache_noop; + else if (sc_lsize == 16) + r4k_blast_scache_node = blast_scache16_node; + else if (sc_lsize == 32) + r4k_blast_scache_node = blast_scache32_node; + else if (sc_lsize == 64) + r4k_blast_scache_node = blast_scache64_node; + else if (sc_lsize == 128) + r4k_blast_scache_node = blast_scache128_node; +} + static inline void local_r4k___flush_cache_all(void * args) { switch (current_cpu_type()) { case CPU_LOONGSON2: - case CPU_LOONGSON3: case CPU_R4000SC: case CPU_R4000MC: case CPU_R4400SC: @@ -480,6 +497,11 @@ static inline void local_r4k___flush_cache_all(void * args) r4k_blast_scache(); break; + case CPU_LOONGSON3: + /* Use get_ebase_cpunum() for both NUMA=y/n */ + r4k_blast_scache_node(get_ebase_cpunum() >> 2); + break; + case CPU_BMIPS5000: r4k_blast_scache(); __sync(); @@ -840,10 +862,14 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) preempt_disable(); if (cpu_has_inclusive_pcaches) { - if (size >= scache_size) - r4k_blast_scache(); - else + if (size >= scache_size) { + if (current_cpu_type() != CPU_LOONGSON3) + r4k_blast_scache(); + else + r4k_blast_scache_node(pa_to_nid(addr)); + } else { blast_scache_range(addr, addr + size); + } preempt_enable(); __sync(); return; @@ -877,9 +903,12 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) preempt_disable(); if (cpu_has_inclusive_pcaches) { - if (size >= scache_size) - r4k_blast_scache(); - else { + if (size >= scache_size) { + if (current_cpu_type() != CPU_LOONGSON3) + r4k_blast_scache(); + else + r4k_blast_scache_node(pa_to_nid(addr)); + } else { /* * There is no clearly documented alignment requirement * for the cache instruction on MIPS processors and @@ -1918,6 +1947,7 @@ void r4k_cache_init(void) r4k_blast_scache_page_setup(); r4k_blast_scache_page_indexed_setup(); r4k_blast_scache_setup(); + r4k_blast_scache_node_setup(); #ifdef CONFIG_EVA r4k_blast_dcache_user_page_setup(); r4k_blast_icache_user_page_setup(); diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 70a523151ff39dfa41b330f03c1dd1330838b4f5..55099fbff4e6d783ce184d8a6bb7242c27e1f164 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -76,7 +76,7 @@ SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes, { if (bytes == 0) return 0; - if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes)) + if (!access_ok((void __user *) addr, bytes)) return -EFAULT; __flush_icache_user_range(addr, addr + bytes); diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 73d8a0f0b810c669d7d541704ab417a386e2cb74..be6aa573cbf0f95ae249e23141f2a9ffc20439b9 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -178,7 +178,6 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write, tsk->min_flt++; } if (fault & VM_FAULT_RETRY) { - flags &= ~FAULT_FLAG_ALLOW_RETRY; flags |= FAULT_FLAG_TRIED; /* diff --git a/arch/mips/mm/gup.c b/arch/mips/mm/gup.c index 5a4875cac1ec979da60be7ed3f80379eab5d6b65..0d14e0d8eacf058f49e25be97c1347847af6a137 100644 --- a/arch/mips/mm/gup.c +++ b/arch/mips/mm/gup.c @@ -195,8 +195,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, addr = start; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, - (void __user *)start, len))) + if (unlikely(!access_ok((void __user *)start, len))) return 0; /* diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c index 2f616ebeb7e0cff264a7d399a8c341c694e8c003..233033f99d8fc62e2dc1d5dfc0c583f6ea4b1809 100644 --- a/arch/mips/mm/mmap.c +++ b/arch/mips/mm/mmap.c @@ -21,8 +21,9 @@ unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ EXPORT_SYMBOL(shm_align_mask); /* gap between mmap and stack */ -#define MIN_GAP (128*1024*1024UL) -#define MAX_GAP ((TASK_SIZE)/6*5) +#define MIN_GAP (128*1024*1024UL) +#define MAX_GAP ((TASK_SIZE)/6*5) +#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) static int mmap_is_legacy(struct rlimit *rlim_stack) { @@ -38,6 +39,15 @@ static int mmap_is_legacy(struct rlimit *rlim_stack) static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack) { unsigned long gap = rlim_stack->rlim_cur; + unsigned long pad = stack_guard_gap; + + /* Account for stack randomization if necessary */ + if (current->flags & PF_RANDOMIZE) + pad += (STACK_RND_MASK << PAGE_SHIFT); + + /* Values close to RLIM_INFINITY can overflow. */ + if (gap + pad > gap) + gap += pad; if (gap < MIN_GAP) gap = MIN_GAP; @@ -203,6 +213,11 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) int __virt_addr_valid(const volatile void *kaddr) { + unsigned long vaddr = (unsigned long)kaddr; + + if ((vaddr < PAGE_OFFSET) || (vaddr >= MAP_BASE)) + return 0; + return pfn_valid(PFN_DOWN(virt_to_phys(kaddr))); } EXPORT_SYMBOL_GPL(__virt_addr_valid); diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 06771429164317c3e8bfe5da8e02e33445fb0b14..3944c49eee0c4c2ebb77e62a686e05efd3e37399 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -391,6 +391,7 @@ static struct work_registers build_get_work_registers(u32 **p) static void build_restore_work_registers(u32 **p) { if (scratch_reg >= 0) { + uasm_i_ehb(p); UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); return; } @@ -629,7 +630,7 @@ static __maybe_unused void build_convert_pte_to_entrylo(u32 **p, return; } - if (cpu_has_rixi && _PAGE_NO_EXEC) { + if (cpu_has_rixi && !!_PAGE_NO_EXEC) { if (fill_includes_sw_bits) { UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL)); } else { @@ -653,6 +654,13 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r, int restore_scratch) { if (restore_scratch) { + /* + * Ensure the MFC0 below observes the value written to the + * KScratch register by the prior MTC0. + */ + if (scratch_reg >= 0) + uasm_i_ehb(p); + /* Reset default page size */ if (PM_DEFAULT_MASK >> 16) { uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); @@ -919,6 +927,10 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, } if (mode != not_refill && check_for_high_segbits) { uasm_l_large_segbits_fault(l, *p); + + if (mode == refill_scratch && scratch_reg >= 0) + uasm_i_ehb(p); + /* * We get here if we are an xsseg address, or if we are * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary. @@ -1255,6 +1267,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */ if (c0_scratch_reg >= 0) { + uasm_i_ehb(p); UASM_i_MFC0(p, scratch, c0_kscratch(), c0_scratch_reg); build_tlb_write_entry(p, l, r, tlb_random); uasm_l_leave(l, *p); @@ -1600,15 +1613,17 @@ static void build_setup_pgd(void) uasm_i_dinsm(&p, a0, 0, 29, 64 - 29); uasm_l_tlbl_goaround1(&l, p); UASM_i_SLL(&p, a0, a0, 11); - uasm_i_jr(&p, 31); UASM_i_MTC0(&p, a0, C0_CONTEXT); + uasm_i_jr(&p, 31); + uasm_i_ehb(&p); } else { /* PGD in c0_KScratch */ - uasm_i_jr(&p, 31); if (cpu_has_ldpte) UASM_i_MTC0(&p, a0, C0_PWBASE); else UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg); + uasm_i_jr(&p, 31); + uasm_i_ehb(&p); } #else #ifdef CONFIG_SMP @@ -1622,13 +1637,16 @@ static void build_setup_pgd(void) UASM_i_LA_mostly(&p, a2, pgdc); UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2); #endif /* SMP */ - uasm_i_jr(&p, 31); /* if pgd_reg is allocated, save PGD also to scratch register */ - if (pgd_reg != -1) + if (pgd_reg != -1) { UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg); - else + uasm_i_jr(&p, 31); + uasm_i_ehb(&p); + } else { + uasm_i_jr(&p, 31); uasm_i_nop(&p); + } #endif if (p >= (u32 *)tlbmiss_handler_setup_pgd_end) panic("tlbmiss_handler_setup_pgd space exceeded"); diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c index 4d8cb9bb8365d07660d33c8049c045502fd4e1c6..43e6597c720c2d40362d11988f93a6de98cde2b0 100644 --- a/arch/mips/net/bpf_jit.c +++ b/arch/mips/net/bpf_jit.c @@ -662,6 +662,11 @@ static void build_epilogue(struct jit_ctx *ctx) ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative : func) : \ func##_positive) +static bool is_bad_offset(int b_off) +{ + return b_off > 0x1ffff || b_off < -0x20000; +} + static int build_body(struct jit_ctx *ctx) { const struct bpf_prog *prog = ctx->skf; @@ -728,7 +733,10 @@ static int build_body(struct jit_ctx *ctx) /* Load return register on DS for failures */ emit_reg_move(r_ret, r_zero, ctx); /* Return with error */ - emit_b(b_imm(prog->len, ctx), ctx); + b_off = b_imm(prog->len, ctx); + if (is_bad_offset(b_off)) + return -E2BIG; + emit_b(b_off, ctx); emit_nop(ctx); break; case BPF_LD | BPF_W | BPF_IND: @@ -775,8 +783,10 @@ static int build_body(struct jit_ctx *ctx) emit_jalr(MIPS_R_RA, r_s0, ctx); emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */ /* Check the error value */ - emit_bcond(MIPS_COND_NE, r_ret, 0, - b_imm(prog->len, ctx), ctx); + b_off = b_imm(prog->len, ctx); + if (is_bad_offset(b_off)) + return -E2BIG; + emit_bcond(MIPS_COND_NE, r_ret, 0, b_off, ctx); emit_reg_move(r_ret, r_zero, ctx); /* We are good */ /* X <- P[1:K] & 0xf */ @@ -855,8 +865,10 @@ static int build_body(struct jit_ctx *ctx) /* A /= X */ ctx->flags |= SEEN_X | SEEN_A; /* Check if r_X is zero */ - emit_bcond(MIPS_COND_EQ, r_X, r_zero, - b_imm(prog->len, ctx), ctx); + b_off = b_imm(prog->len, ctx); + if (is_bad_offset(b_off)) + return -E2BIG; + emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx); emit_load_imm(r_ret, 0, ctx); /* delay slot */ emit_div(r_A, r_X, ctx); break; @@ -864,8 +876,10 @@ static int build_body(struct jit_ctx *ctx) /* A %= X */ ctx->flags |= SEEN_X | SEEN_A; /* Check if r_X is zero */ - emit_bcond(MIPS_COND_EQ, r_X, r_zero, - b_imm(prog->len, ctx), ctx); + b_off = b_imm(prog->len, ctx); + if (is_bad_offset(b_off)) + return -E2BIG; + emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx); emit_load_imm(r_ret, 0, ctx); /* delay slot */ emit_mod(r_A, r_X, ctx); break; @@ -926,7 +940,10 @@ static int build_body(struct jit_ctx *ctx) break; case BPF_JMP | BPF_JA: /* pc += K */ - emit_b(b_imm(i + k + 1, ctx), ctx); + b_off = b_imm(i + k + 1, ctx); + if (is_bad_offset(b_off)) + return -E2BIG; + emit_b(b_off, ctx); emit_nop(ctx); break; case BPF_JMP | BPF_JEQ | BPF_K: @@ -1056,12 +1073,16 @@ static int build_body(struct jit_ctx *ctx) break; case BPF_RET | BPF_A: ctx->flags |= SEEN_A; - if (i != prog->len - 1) + if (i != prog->len - 1) { /* * If this is not the last instruction * then jump to the epilogue */ - emit_b(b_imm(prog->len, ctx), ctx); + b_off = b_imm(prog->len, ctx); + if (is_bad_offset(b_off)) + return -E2BIG; + emit_b(b_off, ctx); + } emit_reg_move(r_ret, r_A, ctx); /* delay slot */ break; case BPF_RET | BPF_K: @@ -1075,7 +1096,10 @@ static int build_body(struct jit_ctx *ctx) * If this is not the last instruction * then jump to the epilogue */ - emit_b(b_imm(prog->len, ctx), ctx); + b_off = b_imm(prog->len, ctx); + if (is_bad_offset(b_off)) + return -E2BIG; + emit_b(b_off, ctx); emit_nop(ctx); } break; @@ -1133,8 +1157,10 @@ static int build_body(struct jit_ctx *ctx) /* Load *dev pointer */ emit_load_ptr(r_s0, r_skb, off, ctx); /* error (0) in the delay slot */ - emit_bcond(MIPS_COND_EQ, r_s0, r_zero, - b_imm(prog->len, ctx), ctx); + b_off = b_imm(prog->len, ctx); + if (is_bad_offset(b_off)) + return -E2BIG; + emit_bcond(MIPS_COND_EQ, r_s0, r_zero, b_off, ctx); emit_reg_move(r_ret, r_zero, ctx); if (code == (BPF_ANC | SKF_AD_IFINDEX)) { BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); @@ -1244,7 +1270,10 @@ void bpf_jit_compile(struct bpf_prog *fp) /* Generate the actual JIT code */ build_prologue(&ctx); - build_body(&ctx); + if (build_body(&ctx)) { + module_memfree(ctx.target); + goto out; + } build_epilogue(&ctx); /* Update the icache */ diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c index aeb7b1b0f2024e1de01c25c14566d7f3e71c03e4..947a7172c814ede9fac4a86a66926d5ad1257de8 100644 --- a/arch/mips/net/ebpf_jit.c +++ b/arch/mips/net/ebpf_jit.c @@ -343,12 +343,15 @@ static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg) const struct bpf_prog *prog = ctx->skf; int stack_adjust = ctx->stack_size; int store_offset = stack_adjust - 8; + enum reg_val_type td; int r0 = MIPS_R_V0; - if (dest_reg == MIPS_R_RA && - get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX) + if (dest_reg == MIPS_R_RA) { /* Don't let zero extended value escape. */ - emit_instr(ctx, sll, r0, r0, 0); + td = get_reg_val_type(ctx, prog->len, BPF_REG_0); + if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) + emit_instr(ctx, sll, r0, r0, 0); + } if (ctx->flags & EBPF_SAVE_RA) { emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP); @@ -583,6 +586,7 @@ static void emit_const_to_reg(struct jit_ctx *ctx, int dst, u64 value) static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx) { int off, b_off; + int tcc_reg; ctx->flags |= EBPF_SEEN_TC; /* @@ -595,14 +599,14 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx) b_off = b_imm(this_idx + 1, ctx); emit_instr(ctx, bne, MIPS_R_AT, MIPS_R_ZERO, b_off); /* - * if (--TCC < 0) + * if (TCC-- < 0) * goto out; */ /* Delay slot */ - emit_instr(ctx, daddiu, MIPS_R_T5, - (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4, -1); + tcc_reg = (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4; + emit_instr(ctx, daddiu, MIPS_R_T5, tcc_reg, -1); b_off = b_imm(this_idx + 1, ctx); - emit_instr(ctx, bltz, MIPS_R_T5, b_off); + emit_instr(ctx, bltz, tcc_reg, b_off); /* * prog = array->ptrs[index]; * if (prog == NULL) @@ -1278,6 +1282,9 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, } break; + case BPF_ST | BPF_NOSPEC: /* speculation barrier */ + break; + case BPF_ST | BPF_B | BPF_MEM: case BPF_ST | BPF_H | BPF_MEM: case BPF_ST | BPF_W | BPF_MEM: @@ -1815,7 +1822,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) /* Update the icache */ flush_icache_range((unsigned long)ctx.target, - (unsigned long)(ctx.target + ctx.idx * sizeof(u32))); + (unsigned long)&ctx.target[ctx.idx]); if (bpf_jit_enable > 1) /* Dump JIT code */ diff --git a/arch/mips/oprofile/backtrace.c b/arch/mips/oprofile/backtrace.c index 806fb798091f36f7a31d5efdab2e604c812e6ddb..07d98ba7f49e3a12a7c71ab5406d2e974e203e71 100644 --- a/arch/mips/oprofile/backtrace.c +++ b/arch/mips/oprofile/backtrace.c @@ -19,7 +19,7 @@ struct stackframe { static inline int get_mem(unsigned long addr, unsigned long *result) { unsigned long *address = (unsigned long *) addr; - if (!access_ok(VERIFY_READ, address, sizeof(unsigned long))) + if (!access_ok(address, sizeof(unsigned long))) return -1; if (__copy_from_user_inatomic(result, address, sizeof(unsigned long))) return -3; diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c index 2a5bb849b10efa742a82f8855708b4afcc8a9c4e..288b58b00dc84537fd1d49f43da0ecd3db02aefd 100644 --- a/arch/mips/pci/msi-octeon.c +++ b/arch/mips/pci/msi-octeon.c @@ -369,7 +369,9 @@ int __init octeon_msi_initialize(void) int irq; struct irq_chip *msi; - if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) { + if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_INVALID) { + return 0; + } else if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) { msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0; msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1; msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2; diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c index f1e92bf743c27df2812c338be0220dbb27b1f83f..3c3b1e6abb53562fd0508d7ac25ddb0b09891525 100644 --- a/arch/mips/pci/pci-legacy.c +++ b/arch/mips/pci/pci-legacy.c @@ -127,8 +127,12 @@ static void pcibios_scanbus(struct pci_controller *hose) if (pci_has_flag(PCI_PROBE_ONLY)) { pci_bus_claim_resources(bus); } else { + struct pci_bus *child; + pci_bus_size_bridges(bus); pci_bus_assign_resources(bus); + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child); } pci_bus_add_devices(bus); } diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c index 5017d5843c5ac4913aa254b2157bd66d86b91bcd..fc29b85cfa926d1b70e69901b2e2c3abc7e46fcc 100644 --- a/arch/mips/pci/pci-octeon.c +++ b/arch/mips/pci/pci-octeon.c @@ -568,6 +568,11 @@ static int __init octeon_pci_setup(void) if (octeon_has_feature(OCTEON_FEATURE_PCIE)) return 0; + if (!octeon_is_pci_host()) { + pr_notice("Not in host mode, PCI Controller not initialized\n"); + return 0; + } + /* Point pcibios_map_irq() to the PCI version of it */ octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq; @@ -579,11 +584,6 @@ static int __init octeon_pci_setup(void) else octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG; - if (!octeon_is_pci_host()) { - pr_notice("Not in host mode, PCI Controller not initialized\n"); - return 0; - } - /* PCI I/O and PCI MEM values */ set_io_port_base(OCTEON_PCI_IOSPACE_BASE); ioport_resource.start = 0; diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c old mode 100644 new mode 100755 index d919a0d813a17a3639f8c6ade767e7dccc822146..38de2a9c3cf1a0bfefb48f8349eebddf39142736 --- a/arch/mips/pci/pcie-octeon.c +++ b/arch/mips/pci/pcie-octeon.c @@ -230,12 +230,18 @@ static inline uint64_t __cvmx_pcie_build_config_addr(int pcie_port, int bus, { union cvmx_pcie_address pcie_addr; union cvmx_pciercx_cfg006 pciercx_cfg006; + union cvmx_pciercx_cfg032 pciercx_cfg032; pciercx_cfg006.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG006(pcie_port)); if ((bus <= pciercx_cfg006.s.pbnum) && (dev != 0)) return 0; + pciercx_cfg032.u32 = + cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port)); + if ((pciercx_cfg032.s.dlla == 0) || (pciercx_cfg032.s.lt == 1)) + return 0; + pcie_addr.u64 = 0; pcie_addr.config.upper = 2; pcie_addr.config.io = 1; diff --git a/arch/mips/pistachio/Platform b/arch/mips/pistachio/Platform index d80cd612df1f7aa14b7567076031d37a653a20d6..c3592b374ad23592d318851d32f64222b8561de4 100644 --- a/arch/mips/pistachio/Platform +++ b/arch/mips/pistachio/Platform @@ -6,3 +6,4 @@ cflags-$(CONFIG_MACH_PISTACHIO) += \ -I$(srctree)/arch/mips/include/asm/mach-pistachio load-$(CONFIG_MACH_PISTACHIO) += 0xffffffff80400000 zload-$(CONFIG_MACH_PISTACHIO) += 0xffffffff81000000 +all-$(CONFIG_MACH_PISTACHIO) := uImage.gz diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig index 1f9cb0e3c79a695e38211cd7bcaac85688e298ee..613d61763433338a7b6ba0cd6ef6eb2a129168d7 100644 --- a/arch/mips/ralink/Kconfig +++ b/arch/mips/ralink/Kconfig @@ -38,6 +38,7 @@ choice config SOC_MT7620 bool "MT7620/8" + select CPU_MIPSR2_IRQ_VI select HW_HAS_PCI config SOC_MT7621 diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c index 41b71c4352c25216095d9c197e391796be10be85..c1ce6f43642bc8ee93b189bbaa86dec9cf1d9cb3 100644 --- a/arch/mips/ralink/mt7620.c +++ b/arch/mips/ralink/mt7620.c @@ -84,7 +84,7 @@ static struct rt2880_pmx_func pcie_rst_grp[] = { }; static struct rt2880_pmx_func nd_sd_grp[] = { FUNC("nand", MT7620_GPIO_MODE_NAND, 45, 15), - FUNC("sd", MT7620_GPIO_MODE_SD, 45, 15) + FUNC("sd", MT7620_GPIO_MODE_SD, 47, 13) }; static struct rt2880_pmx_group mt7620a_pinmux_data[] = { diff --git a/arch/mips/sibyte/common/Makefile b/arch/mips/sibyte/common/Makefile index b3d6bf23a6620c5837f3ac9fcc2fb3d6357cfba3..3ef3fb65813697b6ac7bf4ef1239399180fbdffb 100644 --- a/arch/mips/sibyte/common/Makefile +++ b/arch/mips/sibyte/common/Makefile @@ -1,4 +1,5 @@ obj-y := cfe.o +obj-$(CONFIG_SWIOTLB) += dma.o obj-$(CONFIG_SIBYTE_BUS_WATCHER) += bus_watcher.o obj-$(CONFIG_SIBYTE_CFE_CONSOLE) += cfe_console.o obj-$(CONFIG_SIBYTE_TBPROF) += sb_tbprof.o diff --git a/arch/mips/sibyte/common/dma.c b/arch/mips/sibyte/common/dma.c new file mode 100644 index 0000000000000000000000000000000000000000..eb47a94f3583edfa47fb17a3090ad89065c5fa7f --- /dev/null +++ b/arch/mips/sibyte/common/dma.c @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * DMA support for Broadcom SiByte platforms. + * + * Copyright (c) 2018 Maciej W. Rozycki + */ + +#include +#include + +void __init plat_swiotlb_setup(void) +{ + swiotlb_init(1); +} diff --git a/arch/mips/sibyte/common/sb_tbprof.c b/arch/mips/sibyte/common/sb_tbprof.c index 99c720be72d261abed3ff920f28716cdebafab51..9ff26b0cd3b660d8f31efdf0771e54f30da7fe80 100644 --- a/arch/mips/sibyte/common/sb_tbprof.c +++ b/arch/mips/sibyte/common/sb_tbprof.c @@ -458,7 +458,7 @@ static ssize_t sbprof_tb_read(struct file *filp, char *buf, char *dest = buf; long cur_off = *offp; - if (!access_ok(VERIFY_WRITE, buf, size)) + if (!access_ok(buf, size)) return -EFAULT; mutex_lock(&sbp.lock); diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c index f6d9182ef82a9cd2aa528d2de40f6e4634116119..70a1ab66d252c15f4c305d5901dee16b669f9320 100644 --- a/arch/mips/txx9/generic/setup.c +++ b/arch/mips/txx9/generic/setup.c @@ -960,12 +960,11 @@ void __init txx9_sramc_init(struct resource *r) goto exit_put; err = sysfs_create_bin_file(&dev->dev.kobj, &dev->bindata_attr); if (err) { - device_unregister(&dev->dev); iounmap(dev->base); - kfree(dev); + device_unregister(&dev->dev); } return; exit_put: + iounmap(dev->base); put_device(&dev->dev); - return; } diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile index 34605ca214984c7257507fa2fb5925eb48bd9f92..c99fa1c1bd9ccec73423197edc3a5c201ca0aa3e 100644 --- a/arch/mips/vdso/Makefile +++ b/arch/mips/vdso/Makefile @@ -8,6 +8,8 @@ ccflags-vdso := \ $(filter -E%,$(KBUILD_CFLAGS)) \ $(filter -mmicromips,$(KBUILD_CFLAGS)) \ $(filter -march=%,$(KBUILD_CFLAGS)) \ + $(filter -m%-float,$(KBUILD_CFLAGS)) \ + $(filter -mno-loongson-%,$(KBUILD_CFLAGS)) \ -D__VDSO__ ifeq ($(cc-name),clang) @@ -128,7 +130,7 @@ $(obj)/%-o32.o: $(src)/%.c FORCE $(call cmd,force_checksrc) $(call if_changed_rule,cc_o_c) -$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := -mabi=32 +$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=32 $(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE $(call if_changed_dep,cpp_lds_S) @@ -168,7 +170,7 @@ $(obj)/%-n32.o: $(src)/%.c FORCE $(call cmd,force_checksrc) $(call if_changed_rule,cc_o_c) -$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := -mabi=n32 +$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=n32 $(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE $(call if_changed_dep,cpp_lds_S) diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig index 7068f341133d7eb038bb94a9953a3b9946d9bf51..0cd422345d49be17bb24165a0f84393c8951a083 100644 --- a/arch/nds32/Kconfig +++ b/arch/nds32/Kconfig @@ -31,6 +31,7 @@ config NDS32 select HAVE_DEBUG_KMEMLEAK select HAVE_MEMBLOCK select HAVE_REGS_AND_STACK_ACCESS_API + select ARCH_32BIT_OFF_T select IRQ_DOMAIN select LOCKDEP_SUPPORT select MODULES_USE_ELF_RELA diff --git a/arch/nds32/include/asm/bitfield.h b/arch/nds32/include/asm/bitfield.h index 8e84fc385b946c391b932777d49bb82d05e25598..19b2841219adfeeab42d37edcfb5c8ccd87d68d1 100644 --- a/arch/nds32/include/asm/bitfield.h +++ b/arch/nds32/include/asm/bitfield.h @@ -692,8 +692,8 @@ #define PFM_CTL_offKU1 13 /* Enable user mode event counting for PFMC1 */ #define PFM_CTL_offKU2 14 /* Enable user mode event counting for PFMC2 */ #define PFM_CTL_offSEL0 15 /* The event selection for PFMC0 */ -#define PFM_CTL_offSEL1 21 /* The event selection for PFMC1 */ -#define PFM_CTL_offSEL2 27 /* The event selection for PFMC2 */ +#define PFM_CTL_offSEL1 16 /* The event selection for PFMC1 */ +#define PFM_CTL_offSEL2 22 /* The event selection for PFMC2 */ /* bit 28:31 reserved */ #define PFM_CTL_mskEN0 ( 0x01 << PFM_CTL_offEN0 ) diff --git a/arch/nds32/include/asm/futex.h b/arch/nds32/include/asm/futex.h index cb6cb91cfdf81622dc170286d83803e2d4e7ad73..baf178bf1d0b2aa39ec940bbf833de228adb2882 100644 --- a/arch/nds32/include/asm/futex.h +++ b/arch/nds32/include/asm/futex.h @@ -40,7 +40,7 @@ futex_atomic_cmpxchg_inatomic(u32 * uval, u32 __user * uaddr, int ret = 0; u32 val, tmp, flags; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + if (!access_ok(uaddr, sizeof(u32))) return -EFAULT; smp_mb(); diff --git a/arch/nds32/include/asm/pgtable.h b/arch/nds32/include/asm/pgtable.h index d3e19a55cf530046795f7c2836fbc13dc3b823fb..9f52db930c004ecc5c6de013721e06d7b4bf52a3 100644 --- a/arch/nds32/include/asm/pgtable.h +++ b/arch/nds32/include/asm/pgtable.h @@ -4,7 +4,7 @@ #ifndef _ASMNDS32_PGTABLE_H #define _ASMNDS32_PGTABLE_H -#define __PAGETABLE_PMD_FOLDED +#define __PAGETABLE_PMD_FOLDED 1 #include #include diff --git a/arch/nds32/include/asm/uaccess.h b/arch/nds32/include/asm/uaccess.h index 362a32d9bd16871e1db6c45d544eb2c4450bfeb5..53dcb49b0b12f5b8a85dd1858f71d3b4a0f23939 100644 --- a/arch/nds32/include/asm/uaccess.h +++ b/arch/nds32/include/asm/uaccess.h @@ -13,9 +13,6 @@ #include #include -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - #define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" /* @@ -53,7 +50,7 @@ static inline void set_fs(mm_segment_t fs) #define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs() -size)) -#define access_ok(type, addr, size) \ +#define access_ok(addr, size) \ __range_ok((unsigned long)addr, (unsigned long)size) /* * Single-value transfer routines. They automatically use the right @@ -94,7 +91,7 @@ static inline void set_fs(mm_segment_t fs) ({ \ const __typeof__(*(ptr)) __user *__p = (ptr); \ might_fault(); \ - if (access_ok(VERIFY_READ, __p, sizeof(*__p))) { \ + if (access_ok(__p, sizeof(*__p))) { \ __get_user_err((x), __p, (err)); \ } else { \ (x) = 0; (err) = -EFAULT; \ @@ -189,7 +186,7 @@ do { \ ({ \ __typeof__(*(ptr)) __user *__p = (ptr); \ might_fault(); \ - if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) { \ + if (access_ok(__p, sizeof(*__p))) { \ __put_user_err((x), __p, (err)); \ } else { \ (err) = -EFAULT; \ @@ -279,7 +276,7 @@ extern unsigned long __arch_copy_to_user(void __user * to, const void *from, #define INLINE_COPY_TO_USER static inline unsigned long clear_user(void __user * to, unsigned long n) { - if (access_ok(VERIFY_WRITE, to, n)) + if (access_ok(to, n)) n = __arch_clear_user(to, n); return n; } diff --git a/arch/nds32/include/asm/vmalloc.h b/arch/nds32/include/asm/vmalloc.h new file mode 100644 index 0000000000000000000000000000000000000000..caeed389841944e0eba9eb189ada7cb4ea27a882 --- /dev/null +++ b/arch/nds32/include/asm/vmalloc.h @@ -0,0 +1,4 @@ +#ifndef _ASM_NDS32_VMALLOC_H +#define _ASM_NDS32_VMALLOC_H + +#endif /* _ASM_NDS32_VMALLOC_H */ diff --git a/arch/nds32/include/uapi/asm/unistd.h b/arch/nds32/include/uapi/asm/unistd.h index 6e95901cabe3c9131325fd084a02c05526eec42a..539dd4eaa5c08b9f566e0bc52291bc62e3e699c9 100644 --- a/arch/nds32/include/uapi/asm/unistd.h +++ b/arch/nds32/include/uapi/asm/unistd.h @@ -2,6 +2,7 @@ // Copyright (C) 2005-2017 Andes Technology Corporation #define __ARCH_WANT_SYNC_FILE_RANGE2 +#define __ARCH_WANT_SET_GET_RLIMIT /* Use the standard ABI for syscalls */ #include diff --git a/arch/nds32/kernel/ftrace.c b/arch/nds32/kernel/ftrace.c index a0a9679ad5dee8a9d08810556cc204dedd127c08..8a41372551ff3cbca4bcf0a94e1bea400dcb4c01 100644 --- a/arch/nds32/kernel/ftrace.c +++ b/arch/nds32/kernel/ftrace.c @@ -211,29 +211,15 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, unsigned long frame_pointer) { unsigned long return_hooker = (unsigned long)&return_to_handler; - struct ftrace_graph_ent trace; unsigned long old; - int err; if (unlikely(atomic_read(¤t->tracing_graph_pause))) return; old = *parent; - trace.func = self_addr; - trace.depth = current->curr_ret_stack + 1; - - /* Only trace if the calling function expects to */ - if (!ftrace_graph_entry(&trace)) - return; - - err = ftrace_push_return_trace(old, self_addr, &trace.depth, - frame_pointer, NULL); - - if (err == -EBUSY) - return; - - *parent = return_hooker; + if (!function_graph_enter(old, self_addr, frame_pointer, NULL)) + *parent = return_hooker; } noinline void ftrace_graph_caller(void) diff --git a/arch/nds32/kernel/setup.c b/arch/nds32/kernel/setup.c index 63a1a5ef5219f47bcd9797e543298056294da22f..87683583f2064ad011a6befc19cc6bbdf50ba52c 100644 --- a/arch/nds32/kernel/setup.c +++ b/arch/nds32/kernel/setup.c @@ -71,8 +71,9 @@ static const char *hwcap_str[] = { "div", "mac", "l2c", - "dx_regs", + "fpu_dp", "v2", + "dx_regs", NULL, }; diff --git a/arch/nds32/kernel/signal.c b/arch/nds32/kernel/signal.c index 5d01f6e33cb8e6469810fdc358b4960cabb1e42e..4fccf9f2d436fbb38f41682effcc7fc49ef76e1a 100644 --- a/arch/nds32/kernel/signal.c +++ b/arch/nds32/kernel/signal.c @@ -94,7 +94,7 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) frame = (struct rt_sigframe __user *)regs->sp; - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) goto badframe; if (restore_sigframe(regs, frame)) @@ -215,7 +215,7 @@ setup_rt_frame(struct ksignal *ksig, sigset_t * set, struct pt_regs *regs) get_sigframe(ksig, regs, sizeof(*frame)); int err = 0; - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) return -EFAULT; __put_user_error(0, &frame->uc.uc_flags, err); diff --git a/arch/nds32/mm/Makefile b/arch/nds32/mm/Makefile index 6b685585222385a8a5232668143e027ea41ff368..7c5c15ad854aa534389f377aecd55a21f0f65079 100644 --- a/arch/nds32/mm/Makefile +++ b/arch/nds32/mm/Makefile @@ -4,4 +4,8 @@ obj-y := extable.o tlb.o \ obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o obj-$(CONFIG_HIGHMEM) += highmem.o -CFLAGS_proc-n13.o += -fomit-frame-pointer + +ifdef CONFIG_FUNCTION_TRACER +CFLAGS_REMOVE_proc.o = $(CC_FLAGS_FTRACE) +endif +CFLAGS_proc.o += -fomit-frame-pointer diff --git a/arch/nds32/mm/alignment.c b/arch/nds32/mm/alignment.c index e1aed9dc692dd3bac752720a82880de865e7cb2d..c8b9061a2ee3d58f713dfa6194e0e9d82bd71ce0 100644 --- a/arch/nds32/mm/alignment.c +++ b/arch/nds32/mm/alignment.c @@ -289,13 +289,13 @@ static inline int do_16(unsigned long inst, struct pt_regs *regs) unaligned_addr += shift; if (load) { - if (!access_ok(VERIFY_READ, (void *)unaligned_addr, len)) + if (!access_ok((void *)unaligned_addr, len)) return -EACCES; get_data(unaligned_addr, &target_val, len); *idx_to_addr(regs, target_idx) = target_val; } else { - if (!access_ok(VERIFY_WRITE, (void *)unaligned_addr, len)) + if (!access_ok((void *)unaligned_addr, len)) return -EACCES; target_val = *idx_to_addr(regs, target_idx); set_data((void *)unaligned_addr, target_val, len); @@ -479,7 +479,7 @@ static inline int do_32(unsigned long inst, struct pt_regs *regs) if (load) { - if (!access_ok(VERIFY_READ, (void *)unaligned_addr, len)) + if (!access_ok((void *)unaligned_addr, len)) return -EACCES; get_data(unaligned_addr, &target_val, len); @@ -491,7 +491,7 @@ static inline int do_32(unsigned long inst, struct pt_regs *regs) *idx_to_addr(regs, RT(inst)) = target_val; } else { - if (!access_ok(VERIFY_WRITE, (void *)unaligned_addr, len)) + if (!access_ok((void *)unaligned_addr, len)) return -EACCES; target_val = *idx_to_addr(regs, RT(inst)); diff --git a/arch/nds32/mm/fault.c b/arch/nds32/mm/fault.c index b740534b152c1dbf340d11db9dd1650e4228a11c..0202c970bb00b7e03b01bee5f3f8912da0112eed 100644 --- a/arch/nds32/mm/fault.c +++ b/arch/nds32/mm/fault.c @@ -237,7 +237,6 @@ void do_page_fault(unsigned long entry, unsigned long addr, else tsk->min_flt++; if (fault & VM_FAULT_RETRY) { - flags &= ~FAULT_FLAG_ALLOW_RETRY; flags |= FAULT_FLAG_TRIED; /* No need to up_read(&mm->mmap_sem) as we would diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig index f4ad1138e6b9031e2438cf429a683e7df85fbe86..4ccd84aa5c902eca8ff25aacd905caf78348c2aa 100644 --- a/arch/nios2/Kconfig +++ b/arch/nios2/Kconfig @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 config NIOS2 def_bool y + select ARCH_32BIT_OFF_T select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_NO_SWAP diff --git a/arch/nios2/include/asm/uaccess.h b/arch/nios2/include/asm/uaccess.h index dfa3c7cb30b47cf3b8acd981af49d658d3adb3a8..e0ea10806491f47f8e9e7b21b8edd938881b1b5b 100644 --- a/arch/nios2/include/asm/uaccess.h +++ b/arch/nios2/include/asm/uaccess.h @@ -37,7 +37,7 @@ (((signed long)(((long)get_fs().seg) & \ ((long)(addr) | (((long)(addr)) + (len)) | (len)))) == 0) -#define access_ok(type, addr, len) \ +#define access_ok(addr, len) \ likely(__access_ok((unsigned long)(addr), (unsigned long)(len))) # define __EX_TABLE_SECTION ".section __ex_table,\"a\"\n" @@ -70,7 +70,7 @@ static inline unsigned long __must_check __clear_user(void __user *to, static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) { - if (!access_ok(VERIFY_WRITE, to, n)) + if (!access_ok(to, n)) return n; return __clear_user(to, n); } @@ -142,7 +142,7 @@ do { \ long __gu_err = -EFAULT; \ const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ unsigned long __gu_val = 0; \ - if (access_ok(VERIFY_READ, __gu_ptr, sizeof(*__gu_ptr))) \ + if (access_ok( __gu_ptr, sizeof(*__gu_ptr))) \ __get_user_common(__gu_val, sizeof(*__gu_ptr), \ __gu_ptr, __gu_err); \ (x) = (__force __typeof__(x))__gu_val; \ @@ -168,7 +168,7 @@ do { \ long __pu_err = -EFAULT; \ __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \ __typeof__(*(ptr)) __pu_val = (__typeof(*ptr))(x); \ - if (access_ok(VERIFY_WRITE, __pu_ptr, sizeof(*__pu_ptr))) { \ + if (access_ok(__pu_ptr, sizeof(*__pu_ptr))) { \ switch (sizeof(*__pu_ptr)) { \ case 1: \ __put_user_asm(__pu_val, "stb", __pu_ptr, __pu_err); \ diff --git a/arch/nios2/include/asm/vmalloc.h b/arch/nios2/include/asm/vmalloc.h new file mode 100644 index 0000000000000000000000000000000000000000..ec7a9260090bcc2ff2709a94fcce170b2fa6345a --- /dev/null +++ b/arch/nios2/include/asm/vmalloc.h @@ -0,0 +1,4 @@ +#ifndef _ASM_NIOS2_VMALLOC_H +#define _ASM_NIOS2_VMALLOC_H + +#endif /* _ASM_NIOS2_VMALLOC_H */ diff --git a/arch/nios2/include/uapi/asm/unistd.h b/arch/nios2/include/uapi/asm/unistd.h index b6bdae04bc84c118f50d7f656f5f39cf62318c32..aa4e90e0088551d63cbcfb723fb551846c8e0c07 100644 --- a/arch/nios2/include/uapi/asm/unistd.h +++ b/arch/nios2/include/uapi/asm/unistd.h @@ -19,6 +19,7 @@ #define sys_mmap2 sys_mmap_pgoff #define __ARCH_WANT_RENAMEAT +#define __ARCH_WANT_SET_GET_RLIMIT /* Use the standard ABI for syscalls */ #include diff --git a/arch/nios2/kernel/signal.c b/arch/nios2/kernel/signal.c index 20662b0f6c9e30cd52279ce3274bcbc463fa5236..4a81876b6086e57fb066e74817d976c74cdb1a23 100644 --- a/arch/nios2/kernel/signal.c +++ b/arch/nios2/kernel/signal.c @@ -106,7 +106,7 @@ asmlinkage int do_rt_sigreturn(struct switch_stack *sw) sigset_t set; int rval; - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c index 24fd84cf6006cf2a7ac285152918db0668af69da..3cb796e465223171cc6244ff3b2c67d3d0741149 100644 --- a/arch/nios2/mm/fault.c +++ b/arch/nios2/mm/fault.c @@ -158,9 +158,6 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause, else current->min_flt++; if (fault & VM_FAULT_RETRY) { - /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk - * of starvation. */ - flags &= ~FAULT_FLAG_ALLOW_RETRY; flags |= FAULT_FLAG_TRIED; /* diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig index e0081e7348271d212b56ea5e28680d4e07ce59cb..078b06c61f0a957a3945353e625960000cffc15e 100644 --- a/arch/openrisc/Kconfig +++ b/arch/openrisc/Kconfig @@ -6,6 +6,7 @@ config OPENRISC def_bool y + select ARCH_32BIT_OFF_T select ARCH_HAS_SYNC_DMA_FOR_DEVICE select DMA_NONCOHERENT_OPS select OF diff --git a/arch/openrisc/include/asm/futex.h b/arch/openrisc/include/asm/futex.h index 618da4a1bffb9a54aa48daf8942324ea59d7e96f..fe894e6331aedb7c517eba144318494ab3ac623a 100644 --- a/arch/openrisc/include/asm/futex.h +++ b/arch/openrisc/include/asm/futex.h @@ -72,7 +72,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, int ret = 0; u32 prev; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + if (!access_ok(uaddr, sizeof(u32))) return -EFAULT; __asm__ __volatile__ ( \ diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h index bbf5c79cce7a30832422c1a290e895074fab1042..a44682c8adc34efeeb794a9340d4962fdf50d622 100644 --- a/arch/openrisc/include/asm/uaccess.h +++ b/arch/openrisc/include/asm/uaccess.h @@ -58,8 +58,12 @@ /* Ensure that addr is below task's addr_limit */ #define __addr_ok(addr) ((unsigned long) addr < get_fs()) -#define access_ok(type, addr, size) \ - __range_ok((unsigned long)addr, (unsigned long)size) +#define access_ok(addr, size) \ +({ \ + unsigned long __ao_addr = (unsigned long)(addr); \ + unsigned long __ao_size = (unsigned long)(size); \ + __range_ok(__ao_addr, __ao_size); \ +}) /* * These are the main single-value transfer routines. They automatically @@ -102,7 +106,7 @@ extern long __put_user_bad(void); ({ \ long __pu_err = -EFAULT; \ __typeof__(*(ptr)) *__pu_addr = (ptr); \ - if (access_ok(VERIFY_WRITE, __pu_addr, size)) \ + if (access_ok(__pu_addr, size)) \ __put_user_size((x), __pu_addr, (size), __pu_err); \ __pu_err; \ }) @@ -175,7 +179,7 @@ struct __large_struct { ({ \ long __gu_err = -EFAULT, __gu_val = 0; \ const __typeof__(*(ptr)) * __gu_addr = (ptr); \ - if (access_ok(VERIFY_READ, __gu_addr, size)) \ + if (access_ok(__gu_addr, size)) \ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ @@ -254,7 +258,7 @@ extern unsigned long __clear_user(void *addr, unsigned long size); static inline __must_check unsigned long clear_user(void *addr, unsigned long size) { - if (likely(access_ok(VERIFY_WRITE, addr, size))) + if (likely(access_ok(addr, size))) size = __clear_user(addr, size); return size; } diff --git a/arch/openrisc/include/asm/vmalloc.h b/arch/openrisc/include/asm/vmalloc.h new file mode 100644 index 0000000000000000000000000000000000000000..75435eceec32a9ee552ec8dfdfdb02dad7561573 --- /dev/null +++ b/arch/openrisc/include/asm/vmalloc.h @@ -0,0 +1,4 @@ +#ifndef _ASM_OPENRISC_VMALLOC_H +#define _ASM_OPENRISC_VMALLOC_H + +#endif /* _ASM_OPENRISC_VMALLOC_H */ diff --git a/arch/openrisc/include/uapi/asm/unistd.h b/arch/openrisc/include/uapi/asm/unistd.h index 11c5a58ab3336a548889f091ca707a67c2e93ab0..017d57d62b045e2c9e24f8f8fbb443a61825709b 100644 --- a/arch/openrisc/include/uapi/asm/unistd.h +++ b/arch/openrisc/include/uapi/asm/unistd.h @@ -20,6 +20,7 @@ #define sys_mmap2 sys_mmap_pgoff #define __ARCH_WANT_RENAMEAT +#define __ARCH_WANT_SET_GET_RLIMIT #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_CLONE diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S index 0c826ad6e994cce359474229acf08ff0d0330b78..ee6159d2ed22ec4954fe7def5eea7f2e6099bcc6 100644 --- a/arch/openrisc/kernel/entry.S +++ b/arch/openrisc/kernel/entry.S @@ -240,7 +240,7 @@ handler: ;\ * occured. in fact they never do. if you need them use * values saved on stack (for SPR_EPC, SPR_ESR) or content * of r4 (for SPR_EEAR). for details look at EXCEPTION_HANDLE() - * in 'arch/or32/kernel/head.S' + * in 'arch/openrisc/kernel/head.S' */ /* =====================================================[ exceptions] === */ diff --git a/arch/openrisc/kernel/head.S b/arch/openrisc/kernel/head.S index 9fc6b60140f007bea1442f60727a22aee24776c9..31ed257ff06188eec745477154aa74d17910ef37 100644 --- a/arch/openrisc/kernel/head.S +++ b/arch/openrisc/kernel/head.S @@ -1728,7 +1728,7 @@ _string_nl: /* * .data section should be page aligned - * (look into arch/or32/kernel/vmlinux.lds) + * (look into arch/openrisc/kernel/vmlinux.lds.S) */ .section .data,"aw" .align 8192 diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c index 265f10fb393071a7264d350a8e88d3d498354cd0..5ac9d3b1d6158ba166c382ee70264613e763598d 100644 --- a/arch/openrisc/kernel/signal.c +++ b/arch/openrisc/kernel/signal.c @@ -50,7 +50,7 @@ static int restore_sigcontext(struct pt_regs *regs, /* * Restore the regs from &sc->regs. - * (sc is already checked for VERIFY_READ since the sigframe was + * (sc is already checked since the sigframe was * checked in sys_sigreturn previously) */ err |= __copy_from_user(regs, sc->regs.gpr, 32 * sizeof(unsigned long)); @@ -83,7 +83,7 @@ asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs) if (((long)frame) & 3) goto badframe; - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; @@ -161,7 +161,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, frame = get_sigframe(ksig, regs, sizeof(*frame)); - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) return -EFAULT; /* Create siginfo. */ diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c index dc4dbafc1d83254066687dce319aaf3b16c2d752..ae72d50e91a8f010338c62fdcbad8726b651b301 100644 --- a/arch/openrisc/mm/fault.c +++ b/arch/openrisc/mm/fault.c @@ -185,7 +185,6 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address, else tsk->min_flt++; if (fault & VM_FAULT_RETRY) { - flags &= ~FAULT_FLAG_ALLOW_RETRY; flags |= FAULT_FLAG_TRIED; /* No need to up_read(&mm->mmap_sem) as we would diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 8e6d83f79e72bcd6a64dc4956f4f882b7352bd7a..8a5172768f415fc67577b69340387ae2373c2f4b 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 config PARISC def_bool y + select ARCH_32BIT_OFF_T if !64BIT select ARCH_MIGHT_HAVE_PC_PARPORT select HAVE_IDE select HAVE_OPROFILE @@ -18,7 +19,7 @@ config PARISC select HAVE_MEMBLOCK select NO_BOOTMEM select BUG - select BUILDTIME_EXTABLE_SORT + select BUILDTIME_TABLE_SORT select HAVE_PERF_EVENTS select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_GZIP diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 5ce030266e7d03bbfd7da5885471b1a874eefcd7..253d7ca71472474af109df131cf95d9206027229 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -71,6 +71,13 @@ ifdef CONFIG_MLONGCALLS KBUILD_CFLAGS_KERNEL += -mlong-calls endif +# Without this, "ld -r" results in .text sections that are too big (> 0x40000) +# for branches to reach stubs. And multiple .text sections trigger a warning +# when creating the sysfs module information section. +ifndef CONFIG_64BIT +KBUILD_CFLAGS_MODULE += -ffunction-sections +endif + # select which processor to optimise for cflags-$(CONFIG_PA7000) += -march=1.1 -mschedule=7100 cflags-$(CONFIG_PA7200) += -march=1.1 -mschedule=7200 diff --git a/arch/parisc/boot/compressed/head.S b/arch/parisc/boot/compressed/head.S index 5aba20fa48aafeac0ce7475bdf228b803d7f2f5c..e8b798fd0cf038d5b596d5d988a3c9e83e073a09 100644 --- a/arch/parisc/boot/compressed/head.S +++ b/arch/parisc/boot/compressed/head.S @@ -22,7 +22,7 @@ __HEAD ENTRY(startup) - .level LEVEL + .level PA_ASM_LEVEL #define PSW_W_SM 0x200 #define PSW_W_BIT 36 @@ -63,7 +63,7 @@ $bss_loop: load32 BOOTADDR(decompress_kernel),%r3 #ifdef CONFIG_64BIT - .level LEVEL + .level PA_ASM_LEVEL ssm PSW_W_SM, %r0 /* set W-bit */ depdi 0, 31, 32, %r3 #endif @@ -72,7 +72,7 @@ $bss_loop: startup_continue: #ifdef CONFIG_64BIT - .level LEVEL + .level PA_ASM_LEVEL rsm PSW_W_SM, %r0 /* clear W-bit */ #endif diff --git a/arch/parisc/boot/compressed/vmlinux.lds.S b/arch/parisc/boot/compressed/vmlinux.lds.S index 4ebd4e65524cd3cf6347a2948dede9163a4807b9..41ebe97fad1097f85a0c5d602db8bb74f474b946 100644 --- a/arch/parisc/boot/compressed/vmlinux.lds.S +++ b/arch/parisc/boot/compressed/vmlinux.lds.S @@ -42,8 +42,8 @@ SECTIONS #endif _startcode_end = .; - /* bootloader code and data starts behind area of extracted kernel */ - . = (SZ_end - SZparisc_kernel_start + KERNEL_BINARY_TEXT_START); + /* bootloader code and data starts at least behind area of extracted kernel */ + . = MAX(ABSOLUTE(.), (SZ_end - SZparisc_kernel_start + KERNEL_BINARY_TEXT_START)); /* align on next page boundary */ . = ALIGN(4096); diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h index e9c6385ef0d16235dd24ff256a9ea9181745c355..6f30fa5bdaedfb64c09be83ba81dbe32aed8a917 100644 --- a/arch/parisc/include/asm/assembly.h +++ b/arch/parisc/include/asm/assembly.h @@ -61,14 +61,14 @@ #define LDCW ldcw,co #define BL b,l # ifdef CONFIG_64BIT -# define LEVEL 2.0w +# define PA_ASM_LEVEL 2.0w # else -# define LEVEL 2.0 +# define PA_ASM_LEVEL 2.0 # endif #else #define LDCW ldcw #define BL bl -#define LEVEL 1.1 +#define PA_ASM_LEVEL 1.1 #endif #ifdef __ASSEMBLY__ diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h index f627c37dad9c92a7beeb277ead459726afcf5dad..ab5c215cf46c3d81a5ef840fe46ffe740174c7ec 100644 --- a/arch/parisc/include/asm/cmpxchg.h +++ b/arch/parisc/include/asm/cmpxchg.h @@ -44,8 +44,14 @@ __xchg(unsigned long x, __volatile__ void *ptr, int size) ** if (((unsigned long)p & 0xf) == 0) ** return __ldcw(p); */ -#define xchg(ptr, x) \ - ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))) +#define xchg(ptr, x) \ +({ \ + __typeof__(*(ptr)) __ret; \ + __typeof__(*(ptr)) _x_ = (x); \ + __ret = (__typeof__(*(ptr))) \ + __xchg((unsigned long)_x_, (ptr), sizeof(*(ptr))); \ + __ret; \ +}) /* bug catcher for when unsupported size is used - won't link */ extern void __cmpxchg_called_with_bad_pointer(void); diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h index cf7ba058f619a94d25d3d26ca7dfc3da27c91290..d2c3e410685188d24d54263f08c3c7c35c154053 100644 --- a/arch/parisc/include/asm/futex.h +++ b/arch/parisc/include/asm/futex.h @@ -95,7 +95,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, if (uaccess_kernel() && !uaddr) return -EFAULT; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + if (!access_ok(uaddr, sizeof(u32))) return -EFAULT; /* HPPA has no cmpxchg in hardware and therefore the diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index fa6b7c78f18a9c82620ef0b67d25b1a97adf4027..ff0860b2b21ab34e9c2c69197fbffe0de939c36c 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -117,7 +117,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) #if CONFIG_PGTABLE_LEVELS == 3 #define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY) #else -#define __PAGETABLE_PMD_FOLDED +#define __PAGETABLE_PMD_FOLDED 1 #define BITS_PER_PMD 0 #endif #define PTRS_PER_PMD (1UL << BITS_PER_PMD) diff --git a/arch/parisc/include/asm/ptrace.h b/arch/parisc/include/asm/ptrace.h index 2a27b275ab092cc60b3d003250aaaf647aa9c916..9ff033d261ab381c9e356fea458d768170f9effc 100644 --- a/arch/parisc/include/asm/ptrace.h +++ b/arch/parisc/include/asm/ptrace.h @@ -22,13 +22,14 @@ unsigned long profile_pc(struct pt_regs *); static inline unsigned long regs_return_value(struct pt_regs *regs) { - return regs->gr[20]; + return regs->gr[28]; } static inline void instruction_pointer_set(struct pt_regs *regs, unsigned long val) { - regs->iaoq[0] = val; + regs->iaoq[0] = val; + regs->iaoq[1] = val + 4; } /* Query offset/name of register from its name/offset */ diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index ea70e36ce6af52e5fe4b244ee61ce0769d7b6e93..30ac2865ea730cdf650efd7ca993cfd6daa79d2e 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -27,7 +27,7 @@ * that put_user is the same as __put_user, etc. */ -#define access_ok(type, uaddr, size) \ +#define access_ok(uaddr, size) \ ( (uaddr) == (uaddr) ) #define put_user __put_user diff --git a/arch/parisc/include/asm/vmalloc.h b/arch/parisc/include/asm/vmalloc.h new file mode 100644 index 0000000000000000000000000000000000000000..1088ae4e7af9fec24c547abadb3372d8de235fee --- /dev/null +++ b/arch/parisc/include/asm/vmalloc.h @@ -0,0 +1,4 @@ +#ifndef _ASM_PARISC_VMALLOC_H +#define _ASM_PARISC_VMALLOC_H + +#endif /* _ASM_PARISC_VMALLOC_H */ diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h index 870fbf8c708811bca3bbbbffea8b21afa55f17b6..8d88c457fe100d2b58fff8b9cb80ef1369277558 100644 --- a/arch/parisc/include/uapi/asm/mman.h +++ b/arch/parisc/include/uapi/asm/mman.h @@ -27,6 +27,9 @@ #define MAP_STACK 0x40000 /* give out an address that is best suited for process/thread stacks */ #define MAP_HUGETLB 0x80000 /* create a huge page mapping */ #define MAP_FIXED_NOREPLACE 0x100000 /* MAP_FIXED which doesn't unmap underlying mapping */ +#define MAP_PA32BIT 0x400000 /* physical address is within 4G */ +#define MAP_CHECKNODE 0x800000 /* hugetlb numa node check */ +#define MAP_ALIGN 0x2000000 /* create an aligned mapping */ #define MS_SYNC 1 /* synchronous memory sync */ #define MS_ASYNC 2 /* sync memory asynchronously */ diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c index 5eb979d04b905420e28f63dd526e6ca13aaa9842..a1a5e4c59e6b2a7034b1610a173e3cb4a992450a 100644 --- a/arch/parisc/kernel/drivers.c +++ b/arch/parisc/kernel/drivers.c @@ -789,7 +789,7 @@ EXPORT_SYMBOL(device_to_hwpath); static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high, struct device *parent); -static void walk_lower_bus(struct parisc_device *dev) +static void __init walk_lower_bus(struct parisc_device *dev) { unsigned long io_io_low, io_io_high; diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 242c5ab6561130e6dd673f9d401485317c59b658..d2f92273fe37634e8da8736a263ca7e1473bffd4 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -186,7 +186,7 @@ bv,n 0(%r3) nop .word 0 /* checksum (will be patched) */ - .word PA(os_hpmc) /* address of handler */ + .word 0 /* address of handler */ .word 0 /* length of handler */ .endm diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c index 6fa8535d3cceb55de7ecf1051fcb2c812dcb1f51..e46a4157a8948862697755439496a16e5acb29f4 100644 --- a/arch/parisc/kernel/ftrace.c +++ b/arch/parisc/kernel/ftrace.c @@ -30,7 +30,6 @@ static void __hot prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) { unsigned long old; - struct ftrace_graph_ent trace; extern int parisc_return_to_handler; if (unlikely(ftrace_graph_is_dead())) @@ -41,19 +40,9 @@ static void __hot prepare_ftrace_return(unsigned long *parent, old = *parent; - trace.func = self_addr; - trace.depth = current->curr_ret_stack + 1; - - /* Only trace if the calling function expects to */ - if (!ftrace_graph_entry(&trace)) - return; - - if (ftrace_push_return_trace(old, self_addr, &trace.depth, - 0, NULL) == -EBUSY) - return; - - /* activate parisc_return_to_handler() as return point */ - *parent = (unsigned long) &parisc_return_to_handler; + if (!function_graph_enter(old, self_addr, 0, NULL)) + /* activate parisc_return_to_handler() as return point */ + *parent = (unsigned long) &parisc_return_to_handler; } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S index fbb4e43fda05332de57bdc135ec3711d4bd5ee77..f56cbab64ac107c5afa77d2ad107a1b7d722578e 100644 --- a/arch/parisc/kernel/head.S +++ b/arch/parisc/kernel/head.S @@ -22,7 +22,7 @@ #include #include - .level LEVEL + .level PA_ASM_LEVEL __INITDATA ENTRY(boot_args) @@ -258,7 +258,7 @@ stext_pdc_ret: ldo R%PA(fault_vector_11)(%r10),%r10 $is_pa20: - .level LEVEL /* restore 1.1 || 2.0w */ + .level PA_ASM_LEVEL /* restore 1.1 || 2.0w */ #endif /*!CONFIG_64BIT*/ load32 PA(fault_vector_20),%r10 diff --git a/arch/parisc/kernel/hpmc.S b/arch/parisc/kernel/hpmc.S index 781c3b9a3e46afcfa47dde5ae852f5a8f76630b5..fde6541155645734b14374994b122f9c285bf484 100644 --- a/arch/parisc/kernel/hpmc.S +++ b/arch/parisc/kernel/hpmc.S @@ -85,7 +85,7 @@ END(hpmc_pim_data) .import intr_save, code .align 16 -ENTRY_CFI(os_hpmc) +ENTRY(os_hpmc) .os_hpmc: /* @@ -302,7 +302,6 @@ os_hpmc_6: b . nop .align 16 /* make function length multiple of 16 bytes */ -ENDPROC_CFI(os_hpmc) .os_hpmc_end: diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index eb39e7e380d7e27b24f6bae39ae0e6c3583511e3..97c206734e24f372175c8592703033084d81d0ae 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -193,6 +193,7 @@ int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r) */ int running_on_qemu __read_mostly; +EXPORT_SYMBOL(running_on_qemu); void __cpuidle arch_cpu_idle_dead(void) { @@ -210,12 +211,6 @@ void __cpuidle arch_cpu_idle(void) static int __init parisc_idle_init(void) { - const char *marker; - - /* check QEMU/SeaBIOS marker in PAGE0 */ - marker = (char *) &PAGE0->pad0; - running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0); - if (!running_on_qemu) cpu_idle_poll_ctrl(1); diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index 2582df1c529bbcbb00262bba3f4e9534439766df..de2998cb189e8814d4cc55121cd7e9fb03476807 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c @@ -167,6 +167,9 @@ long arch_ptrace(struct task_struct *child, long request, if ((addr & (sizeof(unsigned long)-1)) || addr >= sizeof(struct pt_regs)) break; + if (addr == PT_IAOQ0 || addr == PT_IAOQ1) { + data |= 3; /* ensure userspace privilege */ + } if ((addr >= PT_GR1 && addr <= PT_GR31) || addr == PT_IAOQ0 || addr == PT_IAOQ1 || (addr >= PT_FR0 && addr <= PT_FR31 + 4) || @@ -228,16 +231,18 @@ long arch_ptrace(struct task_struct *child, long request, static compat_ulong_t translate_usr_offset(compat_ulong_t offset) { - if (offset < 0) - return sizeof(struct pt_regs); - else if (offset <= 32*4) /* gr[0..31] */ - return offset * 2 + 4; - else if (offset <= 32*4+32*8) /* gr[0..31] + fr[0..31] */ - return offset + 32*4; - else if (offset < sizeof(struct pt_regs)/2 + 32*4) - return offset * 2 + 4 - 32*8; + compat_ulong_t pos; + + if (offset < 32*4) /* gr[0..31] */ + pos = offset * 2 + 4; + else if (offset < 32*4+32*8) /* fr[0] ... fr[31] */ + pos = (offset - 32*4) + PT_FR0; + else if (offset < sizeof(struct pt_regs)/2 + 32*4) /* sr[0] ... ipsw */ + pos = (offset - 32*4 - 32*8) * 2 + PT_SR0 + 4; else - return sizeof(struct pt_regs); + pos = sizeof(struct pt_regs); + + return pos; } long compat_arch_ptrace(struct task_struct *child, compat_long_t request, @@ -281,9 +286,12 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, addr = translate_usr_offset(addr); if (addr >= sizeof(struct pt_regs)) break; + if (addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4) { + data |= 3; /* ensure userspace privilege */ + } if (addr >= PT_FR0 && addr <= PT_FR31 + 4) { /* Special case, fp regs are 64 bits anyway */ - *(__u64 *) ((char *) task_regs(child) + addr) = data; + *(__u32 *) ((char *) task_regs(child) + addr) = data; ret = 0; } else if ((addr >= PT_GR1+4 && addr <= PT_GR31+4) || @@ -308,15 +316,29 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, long do_syscall_trace_enter(struct pt_regs *regs) { - if (test_thread_flag(TIF_SYSCALL_TRACE) && - tracehook_report_syscall_entry(regs)) { + if (test_thread_flag(TIF_SYSCALL_TRACE)) { + int rc = tracehook_report_syscall_entry(regs); + /* - * Tracing decided this syscall should not happen or the - * debugger stored an invalid system call number. Skip - * the system call and the system call restart handling. + * As tracesys_next does not set %r28 to -ENOSYS + * when %r20 is set to -1, initialize it here. */ - regs->gr[20] = -1UL; - goto out; + regs->gr[28] = -ENOSYS; + + if (rc) { + /* + * A nonzero return code from + * tracehook_report_syscall_entry() tells us + * to prevent the syscall execution. Skip + * the syscall call and the syscall restart handling. + * + * Note that the tracer may also just change + * regs->gr[20] to an invalid syscall number, + * that is handled by tracesys_next. + */ + regs->gr[20] = -1UL; + return -1; + } } /* Do the secure computing check after ptrace. */ @@ -340,7 +362,6 @@ long do_syscall_trace_enter(struct pt_regs *regs) regs->gr[24] & 0xffffffff, regs->gr[23] & 0xffffffff); -out: /* * Sign extend the syscall number to 64bit since it may have been * modified by a compat ptrace call @@ -483,7 +504,8 @@ static void set_reg(struct pt_regs *regs, int num, unsigned long val) return; case RI(iaoq[0]): case RI(iaoq[1]): - regs->iaoq[num - RI(iaoq[0])] = val; + /* set 2 lowest bits to ensure userspace privilege: */ + regs->iaoq[num - RI(iaoq[0])] = val | 3; return; case RI(sar): regs->sar = val; return; diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index 4e87c35c22b7215722aebbf6a980e2706a15c44a..79c8b994e7d1198fe7e68577ad07519eae9c4622 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c @@ -399,6 +399,9 @@ void __init start_parisc(void) int ret, cpunum; struct pdc_coproc_cfg coproc_cfg; + /* check QEMU/SeaBIOS marker in PAGE0 */ + running_on_qemu = (memcmp(&PAGE0->pad0, "SeaBIOS", 8) == 0); + cpunum = smp_processor_id(); init_cpu_topology(); diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index f453997a7b8f219e2657cfd146660658d87c3d2a..61a647a55c69551ee9e046abb8cac83e88b9cda2 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -48,7 +48,7 @@ registers). */ #define KILL_INSN break 0,0 - .level LEVEL + .level PA_ASM_LEVEL .text diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 68f10f87073dab74f414ebf5bdef2dd681707fb4..f509ce685f0eca0b6364d500d8be49a4e76212ec 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -697,6 +697,8 @@ void notrace handle_interruption(int code, struct pt_regs *regs) } up_read(¤t->mm->mmap_sem); } + /* CPU could not fetch instruction, so clear stale IIR value. */ + regs->iir = 0xbaadf00d; /* Fall Through */ case 27: /* Data memory protection ID trap */ @@ -802,7 +804,8 @@ void __init initialize_ivt(const void *iva) * the Length/4 words starting at Address is zero. */ - /* Compute Checksum for HPMC handler */ + /* Setup IVA and compute checksum for HPMC handler */ + ivap[6] = (u32)__pa(os_hpmc); length = os_hpmc_size; ivap[7] = length; diff --git a/arch/parisc/math-emu/cnv_float.h b/arch/parisc/math-emu/cnv_float.h index 933423fa5144aca0b32a8110d88ff20abc98b90e..b0db61188a612873c84ba25b27de0c3f28168e69 100644 --- a/arch/parisc/math-emu/cnv_float.h +++ b/arch/parisc/math-emu/cnv_float.h @@ -60,19 +60,19 @@ ((exponent < (SGL_P - 1)) ? \ (Sall(sgl_value) << (SGL_EXP_LENGTH + 1 + exponent)) : FALSE) -#define Int_isinexact_to_sgl(int_value) (int_value << 33 - SGL_EXP_LENGTH) +#define Int_isinexact_to_sgl(int_value) ((int_value << 33 - SGL_EXP_LENGTH) != 0) #define Sgl_roundnearest_from_int(int_value,sgl_value) \ if (int_value & 1<<(SGL_EXP_LENGTH - 2)) /* round bit */ \ - if ((int_value << 34 - SGL_EXP_LENGTH) || Slow(sgl_value)) \ + if (((int_value << 34 - SGL_EXP_LENGTH) != 0) || Slow(sgl_value)) \ Sall(sgl_value)++ #define Dint_isinexact_to_sgl(dint_valueA,dint_valueB) \ - ((Dintp1(dint_valueA) << 33 - SGL_EXP_LENGTH) || Dintp2(dint_valueB)) + (((Dintp1(dint_valueA) << 33 - SGL_EXP_LENGTH) != 0) || Dintp2(dint_valueB)) #define Sgl_roundnearest_from_dint(dint_valueA,dint_valueB,sgl_value) \ if (Dintp1(dint_valueA) & 1<<(SGL_EXP_LENGTH - 2)) \ - if ((Dintp1(dint_valueA) << 34 - SGL_EXP_LENGTH) || \ + if (((Dintp1(dint_valueA) << 34 - SGL_EXP_LENGTH) != 0) || \ Dintp2(dint_valueB) || Slow(sgl_value)) Sall(sgl_value)++ #define Dint_isinexact_to_dbl(dint_value) \ diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index c8e8b7c0555837144d3427daaf395b6317d06aa6..c10bb2bdba777e34611994d8f7a8c9e3c44b2ce4 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -327,14 +327,12 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, else current->min_flt++; if (fault & VM_FAULT_RETRY) { - flags &= ~FAULT_FLAG_ALLOW_RETRY; - /* * No need to up_read(&mm->mmap_sem) as we would * have already released it in __lock_page_or_retry * in mm/filemap.c. */ - + flags |= FAULT_FLAG_TRIED; goto retry; } } diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 74842d28a7a16e9deeda1ff3a38446605805b04a..aae9b0d71c1e1ac5fe30419f7fbab29505c044ec 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -494,12 +494,8 @@ static void __init map_pages(unsigned long start_vaddr, pte = pte_mkhuge(pte); } - if (address >= end_paddr) { - if (force) - break; - else - pte_val(pte) = 0; - } + if (address >= end_paddr) + break; set_pte(pg_table, pte); diff --git a/arch/parisc/mm/ioremap.c b/arch/parisc/mm/ioremap.c index 92a9b5f12f98adfe4e87e6a18ff381e528108946..f29f682352f017fd2c55dd0ba34df9ac82a8c0c5 100644 --- a/arch/parisc/mm/ioremap.c +++ b/arch/parisc/mm/ioremap.c @@ -3,7 +3,7 @@ * arch/parisc/mm/ioremap.c * * (C) Copyright 1995 1996 Linus Torvalds - * (C) Copyright 2001-2006 Helge Deller + * (C) Copyright 2001-2019 Helge Deller * (C) Copyright 2005 Kyle McMartin */ @@ -84,7 +84,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l addr = (void __iomem *) area->addr; if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size, phys_addr, pgprot)) { - vfree(addr); + vunmap(addr); return NULL; } @@ -92,9 +92,11 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l } EXPORT_SYMBOL(__ioremap); -void iounmap(const volatile void __iomem *addr) +void iounmap(const volatile void __iomem *io_addr) { - if (addr > high_memory) - return vfree((void *) (PAGE_MASK & (unsigned long __force) addr)); + unsigned long addr = (unsigned long)io_addr & PAGE_MASK; + + if (is_vmalloc_addr((void *)addr)) + vunmap((void *)addr); } EXPORT_SYMBOL(iounmap); diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index a80669209155383343ba8adbb90b3e8427e2afdb..5329ef16decdcef0b0baad8736d3bbaa8d9fa37b 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -128,6 +128,7 @@ config PPC # # Please keep this list sorted alphabetically. # + select ARCH_32BIT_OFF_T if PPC32 select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_DMA_SET_COHERENT_MASK select ARCH_HAS_ELF_RANDOMIZE @@ -154,7 +155,7 @@ config PPC select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WEAK_RELEASE_ACQUIRE select BINFMT_ELF - select BUILDTIME_EXTABLE_SORT + select BUILDTIME_TABLE_SORT select CLONE_BACKWARDS select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN select DYNAMIC_FTRACE if FUNCTION_TRACER @@ -202,7 +203,7 @@ config PPC select HAVE_KPROBES_ON_FTRACE select HAVE_KRETPROBES select HAVE_LD_DEAD_CODE_DATA_ELIMINATION - select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS + select HAVE_LIVEPATCH_FTRACE if HAVE_DYNAMIC_FTRACE_WITH_REGS select HAVE_MEMBLOCK select HAVE_MEMBLOCK_NODE_MAP select HAVE_MOD_ARCH_SPECIFIC @@ -1008,6 +1009,19 @@ config FSL_RIO source "drivers/rapidio/Kconfig" +config PPC_RTAS_FILTER + bool "Enable filtering of RTAS syscalls" + default y + depends on PPC_RTAS + help + The RTAS syscall API has security issues that could be used to + compromise system integrity. This option enforces restrictions on the + RTAS calls and arguments passed by userspace programs to mitigate + these issues. + + Say Y unless you know what you are doing and the filter is causing + problems for you. + endmenu config NONSTATIC_KERNEL diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 11a1acba164a1629f10a3080946c80f64feb0eca..e43321f46a3bef0d4cf6744a4bbcf9867a81787f 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -90,11 +90,13 @@ MULTIPLEWORD := -mmultiple endif ifdef CONFIG_PPC64 +ifndef CONFIG_CC_IS_CLANG cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1) cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mcall-aixdesc) aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1) aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mabi=elfv2 endif +endif ifneq ($(cc-name),clang) cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mno-strict-align @@ -134,6 +136,7 @@ endif endif CFLAGS-$(CONFIG_PPC64) := $(call cc-option,-mtraceback=no) +ifndef CONFIG_CC_IS_CLANG ifdef CONFIG_CPU_LITTLE_ENDIAN CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2,$(call cc-option,-mcall-aixdesc)) AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2) @@ -142,10 +145,18 @@ CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1) CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcall-aixdesc) AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1) endif +endif CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,$(call cc-option,-mminimal-toc)) CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions) -CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 $(MULTIPLEWORD) +# Clang unconditionally reserves r2 on ppc32 and does not support the flag +# https://bugs.llvm.org/show_bug.cgi?id=39555 +CFLAGS-$(CONFIG_PPC32) := $(call cc-option, -ffixed-r2) + +# Clang doesn't support -mmultiple / -mno-multiple +# https://bugs.llvm.org/show_bug.cgi?id=39556 +CFLAGS-$(CONFIG_PPC32) += $(call cc-option, $(MULTIPLEWORD)) + CFLAGS-$(CONFIG_PPC32) += $(call cc-option,-mno-readonly-in-sdata) ifdef CONFIG_PPC_BOOK3S_64 @@ -160,8 +171,17 @@ else CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=powerpc64 endif +ifdef CONFIG_FUNCTION_TRACER +CC_FLAGS_FTRACE := -pg ifdef CONFIG_MPROFILE_KERNEL - CC_FLAGS_FTRACE := -pg -mprofile-kernel +CC_FLAGS_FTRACE += -mprofile-kernel +endif +# Work around gcc code-gen bugs with -pg / -fno-omit-frame-pointer in gcc <= 4.8 +# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=44199 +# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=52828 +ifneq ($(cc-name),clang) +CC_FLAGS_FTRACE += $(call cc-ifversion, -lt, 0409, -mno-sched-epilog) +endif endif CFLAGS-$(CONFIG_TARGET_CPU_BOOL) += $(call cc-option,-mcpu=$(CONFIG_TARGET_CPU)) @@ -229,16 +249,15 @@ ifdef CONFIG_6xx KBUILD_CFLAGS += -mcpu=powerpc endif -# Work around a gcc code-gen bug with -fno-omit-frame-pointer. -ifdef CONFIG_FUNCTION_TRACER -KBUILD_CFLAGS += -mno-sched-epilog -endif - cpu-as-$(CONFIG_4xx) += -Wa,-m405 cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec) cpu-as-$(CONFIG_E200) += -Wa,-me200 cpu-as-$(CONFIG_E500) += -Wa,-me500 -cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4 + +# When using '-many -mpower4' gas will first try and find a matching power4 +# mnemonic and failing that it will allow any valid mnemonic that GAS knows +# about. GCC will pass -many to GAS when assembling, clang does not. +cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4 -Wa,-many cpu-as-$(CONFIG_PPC_E500MC) += $(call as-option,-Wa$(comma)-me500mc) KBUILD_AFLAGS += $(cpu-as-y) @@ -404,36 +423,9 @@ archprepare: checkbin # to stdout and these checks are run even on install targets. TOUT := .tmp_gas_check -# Check gcc and binutils versions: -# - gcc-3.4 and binutils-2.14 are a fatal combination -# - Require gcc 4.0 or above on 64-bit -# - gcc-4.2.0 has issues compiling modules on 64-bit +# Check toolchain versions: +# - gcc-4.6 is the minimum kernel-wide version so nothing required. checkbin: - @if test "$(cc-name)" != "clang" \ - && test "$(cc-version)" = "0304" ; then \ - if ! /bin/echo mftb 5 | $(AS) -v -mppc -many -o $(TOUT) >/dev/null 2>&1 ; then \ - echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build '; \ - echo 'correctly with gcc-3.4 and your version of binutils.'; \ - echo '*** Please upgrade your binutils or downgrade your gcc'; \ - false; \ - fi ; \ - fi - @if test "$(cc-name)" != "clang" \ - && test "$(cc-version)" -lt "0400" \ - && test "x${CONFIG_PPC64}" = "xy" ; then \ - echo -n "Sorry, GCC v4.0 or above is required to build " ; \ - echo "the 64-bit powerpc kernel." ; \ - false ; \ - fi - @if test "$(cc-name)" != "clang" \ - && test "$(cc-fullversion)" = "040200" \ - && test "x${CONFIG_MODULES}${CONFIG_PPC64}" = "xyy" ; then \ - echo -n '*** GCC-4.2.0 cannot compile the 64-bit powerpc ' ; \ - echo 'kernel with modules enabled.' ; \ - echo -n '*** Please use a different GCC version or ' ; \ - echo 'disable kernel modules' ; \ - false ; \ - fi @if test "x${CONFIG_CPU_LITTLE_ENDIAN}" = "xy" \ && $(LD) --version | head -1 | grep ' 2\.24$$' >/dev/null ; then \ echo -n '*** binutils 2.24 miscompiles weak symbols ' ; \ diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index 0fb96c26136f6aa506ce212278b15b22637ae154..7d5ddf53750ce7f0f6974f240604cbb9322d5188 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -32,8 +32,8 @@ else endif BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ - -fno-strict-aliasing -Os -msoft-float -pipe \ - -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \ + -fno-strict-aliasing -Os -msoft-float -mno-altivec -mno-vsx \ + -pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \ -D$(compress-y) ifdef CONFIG_PPC64_BOOT_WRAPPER @@ -55,6 +55,11 @@ BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc BOOTARFLAGS := -cr$(KBUILD_ARFLAGS) +ifdef CONFIG_CC_IS_CLANG +BOOTCFLAGS += $(CLANG_FLAGS) +BOOTAFLAGS += $(CLANG_FLAGS) +endif + ifdef CONFIG_DEBUG_INFO BOOTCFLAGS += -g endif diff --git a/arch/powerpc/boot/addnote.c b/arch/powerpc/boot/addnote.c index 9d9f6f334d3cc022f8c1ee39c30fe802863b38b0..3da3e2b1b51bcb8ac55a52b11ace265a775a22ce 100644 --- a/arch/powerpc/boot/addnote.c +++ b/arch/powerpc/boot/addnote.c @@ -223,7 +223,11 @@ main(int ac, char **av) PUT_16(E_PHNUM, np + 2); /* write back */ - lseek(fd, (long) 0, SEEK_SET); + i = lseek(fd, (long) 0, SEEK_SET); + if (i < 0) { + perror("lseek"); + exit(1); + } i = write(fd, buf, n); if (i < 0) { perror("write"); diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S index dcf2f15e679715b2e4000727a576e1a9f6cb1bb1..9b9d17437373bfd7ce821c87e4a98c36e7ef371a 100644 --- a/arch/powerpc/boot/crt0.S +++ b/arch/powerpc/boot/crt0.S @@ -15,7 +15,7 @@ RELA = 7 RELACOUNT = 0x6ffffff9 - .text + .data /* A procedure descriptor used when booting this as a COFF file. * When making COFF, this comes first in the link and we're * linked at 0x500000. @@ -23,6 +23,8 @@ RELACOUNT = 0x6ffffff9 .globl _zimage_start_opd _zimage_start_opd: .long 0x500000, 0, 0, 0 + .text + b _zimage_start #ifdef __powerpc64__ .balign 8 @@ -47,8 +49,10 @@ p_end: .long _end p_pstack: .long _platform_stack_top #endif - .weak _zimage_start .globl _zimage_start + /* Clang appears to require the .weak directive to be after the symbol + * is defined. See https://bugs.llvm.org/show_bug.cgi?id=38921 */ + .weak _zimage_start _zimage_start: .globl _zimage_start_lib _zimage_start_lib: diff --git a/arch/powerpc/boot/dts/bamboo.dts b/arch/powerpc/boot/dts/bamboo.dts index 538e42b1120d861b28385bdf6468ba4c0a1e0b2c..b5861fa3836c112e55d9da0712d2841bc276caad 100644 --- a/arch/powerpc/boot/dts/bamboo.dts +++ b/arch/powerpc/boot/dts/bamboo.dts @@ -268,8 +268,10 @@ /* Outbound ranges, one memory and one IO, * later cannot be changed. Chip supports a second * IO range but we don't use it for now + * The chip also supports a larger memory range but + * it's not naturally aligned, so our code will break */ - ranges = <0x02000000 0x00000000 0xa0000000 0x00000000 0xa0000000 0x00000000 0x40000000 + ranges = <0x02000000 0x00000000 0xa0000000 0x00000000 0xa0000000 0x00000000 0x20000000 0x02000000 0x00000000 0x00000000 0x00000000 0xe0000000 0x00000000 0x00100000 0x01000000 0x00000000 0x00000000 0x00000000 0xe8000000 0x00000000 0x00010000>; diff --git a/arch/powerpc/boot/libfdt_env.h b/arch/powerpc/boot/libfdt_env.h index 2a0c8b1bf147959bd23b53d844acc19a242dcdbc..9757d4f6331e78d55acb07b8c847c0af44852548 100644 --- a/arch/powerpc/boot/libfdt_env.h +++ b/arch/powerpc/boot/libfdt_env.h @@ -5,6 +5,10 @@ #include #include +#define INT_MAX ((int)(~0U>>1)) +#define UINT32_MAX ((u32)~0U) +#define INT32_MAX ((s32)(UINT32_MAX >> 1)) + #include "of.h" typedef unsigned long uintptr_t; diff --git a/arch/powerpc/boot/opal.c b/arch/powerpc/boot/opal.c index 0272570d02de15ba70363809c987ca7efd97dd99..dfb199ef5b949d066ecedb2ce3a950711b425f25 100644 --- a/arch/powerpc/boot/opal.c +++ b/arch/powerpc/boot/opal.c @@ -13,8 +13,6 @@ #include #include "../include/asm/opal-api.h" -#ifdef CONFIG_PPC64_BOOT_WRAPPER - /* Global OPAL struct used by opal-call.S */ struct opal { u64 base; @@ -101,9 +99,3 @@ int opal_console_init(void *devp, struct serial_console_data *scdp) return 0; } -#else -int opal_console_init(void *devp, struct serial_console_data *scdp) -{ - return -1; -} -#endif /* __powerpc64__ */ diff --git a/arch/powerpc/boot/xz_config.h b/arch/powerpc/boot/xz_config.h index e22e5b3770ddcc32b4ee37fecc63893b96522535..ebfadd39e1924a357bdb818a5a60d3353c1dfa6c 100644 --- a/arch/powerpc/boot/xz_config.h +++ b/arch/powerpc/boot/xz_config.h @@ -20,10 +20,30 @@ static inline uint32_t swab32p(void *p) #ifdef __LITTLE_ENDIAN__ #define get_le32(p) (*((uint32_t *) (p))) +#define cpu_to_be32(x) swab32(x) +static inline u32 be32_to_cpup(const u32 *p) +{ + return swab32p((u32 *)p); +} #else #define get_le32(p) swab32p(p) +#define cpu_to_be32(x) (x) +static inline u32 be32_to_cpup(const u32 *p) +{ + return *p; +} #endif +static inline uint32_t get_unaligned_be32(const void *p) +{ + return be32_to_cpup(p); +} + +static inline void put_unaligned_be32(u32 val, void *p) +{ + *((u32 *)p) = cpu_to_be32(val); +} + #define memeq(a, b, size) (memcmp(a, b, size) == 0) #define memzero(buf, size) memset(buf, 0, size) diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig index 6daa56f8895cb22bcd37011328c68932abe50074..5f743db4c9f8c89289bd65143fcd8f2f14bf6be1 100644 --- a/arch/powerpc/configs/pasemi_defconfig +++ b/arch/powerpc/configs/pasemi_defconfig @@ -111,7 +111,6 @@ CONFIG_FB_NVIDIA=y CONFIG_FB_NVIDIA_I2C=y CONFIG_FB_RADEON=y # CONFIG_LCD_CLASS_DEVICE is not set -CONFIG_VGACON_SOFT_SCROLLBACK=y CONFIG_LOGO=y CONFIG_SOUND=y CONFIG_SND=y diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index 7ee736f207740ea6ced36d16b7bf51bde47cbaca..80559d24881cf2805ddb4d9175f465d19a531b47 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig @@ -253,8 +253,6 @@ CONFIG_ATM_LANE=m CONFIG_ATM_BR2684=m CONFIG_BRIDGE=m CONFIG_VLAN_8021Q=m -CONFIG_DECNET=m -CONFIG_DECNET_ROUTER=y CONFIG_IPX=m CONFIG_ATALK=m CONFIG_DEV_APPLETALK=m @@ -662,8 +660,6 @@ CONFIG_HW_RANDOM_VIRTIO=m CONFIG_NVRAM=y CONFIG_DTLK=m CONFIG_R3964=m -CONFIG_CARDMAN_4000=m -CONFIG_CARDMAN_4040=m CONFIG_IPWIRELESS=m CONFIG_I2C_CHARDEV=m CONFIG_I2C_HYDRA=m @@ -780,7 +776,6 @@ CONFIG_FB_TRIDENT=m CONFIG_FB_SM501=m CONFIG_FB_IBM_GXT4500=y CONFIG_LCD_PLATFORM=m -CONFIG_VGACON_SOFT_SCROLLBACK=y CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y CONFIG_LOGO=y diff --git a/arch/powerpc/configs/skiroot_defconfig b/arch/powerpc/configs/skiroot_defconfig index 6bd5e726133578deadf0f743f2f0559dcda9f13d..ffeaed63675b1114dc2decfcf3d24517cdcf6852 100644 --- a/arch/powerpc/configs/skiroot_defconfig +++ b/arch/powerpc/configs/skiroot_defconfig @@ -195,6 +195,7 @@ CONFIG_UDF_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y +CONFIG_HUGETLBFS=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y # CONFIG_MISC_FILESYSTEMS is not set diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index 1f4691ce412618d42d51e5fa5206d23c9c8242cf..d0609c116e4fd2acf65bb49445b1c16b37a22790 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -38,7 +38,7 @@ extern struct static_key hcall_tracepoint_key; void __trace_hcall_entry(unsigned long opcode, unsigned long *args); void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf); /* OPAL tracing */ -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL extern struct static_key opal_tracepoint_key; #endif @@ -146,8 +146,11 @@ void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr); /* Patch sites */ extern s32 patch__call_flush_count_cache; extern s32 patch__flush_count_cache_return; +extern s32 patch__flush_link_stack_return; +extern s32 patch__call_kvm_flush_link_stack; extern s32 patch__memset_nocache, patch__memcpy_nocache; extern long flush_count_cache; +extern long kvm_flush_link_stack; #endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */ diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h index 50888388a3590966d521c71ef75633af4f636c97..02544939ef0ba513486c0463cb496b03e9a07e0d 100644 --- a/arch/powerpc/include/asm/book3s/64/hugetlb.h +++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h @@ -35,6 +35,14 @@ static inline int hstate_get_psize(struct hstate *hstate) #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE static inline bool gigantic_page_supported(void) { + /* + * We used gigantic page reservation with hypervisor assist in some case. + * We cannot use runtime allocation of gigantic pages in those platforms + * This is hash translation mode LPARs. + */ + if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled()) + return false; + return true; } #endif diff --git a/arch/powerpc/include/asm/book3s/64/kup-radix.h b/arch/powerpc/include/asm/book3s/64/kup-radix.h new file mode 100644 index 0000000000000000000000000000000000000000..aa54ac2e5659e799bf61059c36b297afe2f862a2 --- /dev/null +++ b/arch/powerpc/include/asm/book3s/64/kup-radix.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H +#define _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H + +DECLARE_STATIC_KEY_FALSE(uaccess_flush_key); + +/* Prototype for function defined in exceptions-64s.S */ +void do_uaccess_flush(void); + +static __always_inline void allow_user_access(void __user *to, const void __user *from, + unsigned long size) +{ +} + +static inline void prevent_user_access(void __user *to, const void __user *from, + unsigned long size) +{ + if (static_branch_unlikely(&uaccess_flush_key)) + do_uaccess_flush(); +} + +#endif /* _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H */ diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h index 391ed2c3b6972d5372b60f60dd95085c068eb954..f9019b579903a350cf7931bad2a019b2a345629e 100644 --- a/arch/powerpc/include/asm/book3s/64/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h @@ -83,6 +83,9 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), pgtable_gfp_flags(mm, GFP_KERNEL)); + if (unlikely(!pgd)) + return pgd; + /* * Don't scan the PGD for pointers, it contains references to PUDs but * those references are not full pointers and so can't be recognised by diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 2a2486526d1fc2a6c2ae84ada6601396758360ba..855dbae6d351d40f7cc013e79793ff915adfa908 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -1234,21 +1234,13 @@ extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, #define pmd_move_must_withdraw pmd_move_must_withdraw struct spinlock; -static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, - struct spinlock *old_pmd_ptl, - struct vm_area_struct *vma) -{ - if (radix_enabled()) - return false; - /* - * Archs like ppc64 use pgtable to store per pmd - * specific information. So when we switch the pmd, - * we should also withdraw and deposit the pgtable - */ - return true; -} - - +extern int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, + struct spinlock *old_pmd_ptl, + struct vm_area_struct *vma); +/* + * Hash translation mode use the deposited table to store hash pte + * slot information. + */ #define arch_needs_pgtable_deposit arch_needs_pgtable_deposit static inline bool arch_needs_pgtable_deposit(void) { diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h index 7d1a3d1543fc0fc2d699394ac9d0cbf4cd76f9bb..4954df7e8e1c70578f751904a139e012b3faa816 100644 --- a/arch/powerpc/include/asm/book3s/64/radix.h +++ b/arch/powerpc/include/asm/book3s/64/radix.h @@ -204,8 +204,10 @@ static inline void radix__set_pte_at(struct mm_struct *mm, unsigned long addr, * from ptesync, it should probably go into update_mmu_cache, rather * than set_pte_at (which is used to set ptes unrelated to faults). * - * Spurious faults to vmalloc region are not tolerated, so there is - * a ptesync in flush_cache_vmap. + * Spurious faults from the kernel memory are not tolerated, so there + * is a ptesync in flush_cache_vmap, and __map_kernel_page() follows + * the pte update sequence from ISA Book III 6.10 Translation Table + * Update Synchronization Requirements. */ } diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h index d5a8d7bf07594b0e0e127db45bf03d6129c1bfbc..b189f7aee222e3277e73c98e0a16ce704f8e8797 100644 --- a/arch/powerpc/include/asm/cacheflush.h +++ b/arch/powerpc/include/asm/cacheflush.h @@ -32,9 +32,12 @@ * not expect this type of fault. flush_cache_vmap is not exactly the right * place to put this, but it seems to work well enough. */ -#define flush_cache_vmap(start, end) do { asm volatile("ptesync" ::: "memory"); } while (0) +static inline void flush_cache_vmap(unsigned long start, unsigned long end) +{ + asm volatile("ptesync" ::: "memory"); +} #else -#define flush_cache_vmap(start, end) do { } while (0) +static inline void flush_cache_vmap(unsigned long start, unsigned long end) { } #endif #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 29f49a35d6eecee903ee04ccf3732fadc2971070..59b35b93eadec88a1dd377a0e863a4b0aef2d292 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -44,6 +44,7 @@ extern int machine_check_e500(struct pt_regs *regs); extern int machine_check_e200(struct pt_regs *regs); extern int machine_check_47x(struct pt_regs *regs); int machine_check_8xx(struct pt_regs *regs); +int machine_check_83xx(struct pt_regs *regs); extern void cpu_down_flush_e500v2(void); extern void cpu_down_flush_e500mc(void); @@ -212,8 +213,9 @@ static inline void cpu_feature_keys_init(void) { } #define CPU_FTR_POWER9_DD2_1 LONG_ASM_CONST(0x0000080000000000) #define CPU_FTR_P9_TM_HV_ASSIST LONG_ASM_CONST(0x0000100000000000) #define CPU_FTR_P9_TM_XER_SO_BUG LONG_ASM_CONST(0x0000200000000000) -#define CPU_FTR_P9_TLBIE_BUG LONG_ASM_CONST(0x0000400000000000) +#define CPU_FTR_P9_TLBIE_STQ_BUG LONG_ASM_CONST(0x0000400000000000) #define CPU_FTR_P9_TIDR LONG_ASM_CONST(0x0000800000000000) +#define CPU_FTR_P9_TLBIE_ERAT_BUG LONG_ASM_CONST(0x0001000000000000) #ifndef __ASSEMBLY__ @@ -460,7 +462,7 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \ CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_PKEY | \ - CPU_FTR_P9_TLBIE_BUG | CPU_FTR_P9_TIDR) + CPU_FTR_P9_TLBIE_STQ_BUG | CPU_FTR_P9_TLBIE_ERAT_BUG | CPU_FTR_P9_TIDR) #define CPU_FTRS_POWER9_DD2_0 CPU_FTRS_POWER9 #define CPU_FTRS_POWER9_DD2_1 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1) #define CPU_FTRS_POWER9_DD2_2 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1 | \ diff --git a/arch/powerpc/include/asm/drmem.h b/arch/powerpc/include/asm/drmem.h index ce242b9ea8c67d1546db351ade84d6b994f35b06..7c1d8e74b25d4f18ca31fdfa1dd773ab47a0e542 100644 --- a/arch/powerpc/include/asm/drmem.h +++ b/arch/powerpc/include/asm/drmem.h @@ -99,4 +99,9 @@ void __init walk_drmem_lmbs_early(unsigned long node, void (*func)(struct drmem_lmb *, const __be32 **)); #endif +static inline void invalidate_lmb_associativity_index(struct drmem_lmb *lmb) +{ + lmb->aa_index = 0xffffffff; +} + #endif /* _ASM_POWERPC_LMB_H */ diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index a86feddddad0cdceb012e2249c229fa7f6e31ff2..35fb5b11955a0b2cceb185b77ef78c7d3fe579e2 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -90,11 +90,18 @@ nop; \ nop +#define ENTRY_FLUSH_SLOT \ + ENTRY_FLUSH_FIXUP_SECTION; \ + nop; \ + nop; \ + nop; + /* * r10 must be free to use, r13 must be paca */ #define INTERRUPT_TO_KERNEL \ - STF_ENTRY_BARRIER_SLOT + STF_ENTRY_BARRIER_SLOT; \ + ENTRY_FLUSH_SLOT /* * Macros for annotating the expected destination of (h)rfid diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h index 1e7a33592e297aae7f6e5001c20d3b005eb6d81c..15bc07a31c467c04031dbb8acc2c380be1fb0567 100644 --- a/arch/powerpc/include/asm/fadump.h +++ b/arch/powerpc/include/asm/fadump.h @@ -200,7 +200,7 @@ struct fad_crash_memory_ranges { unsigned long long size; }; -extern int is_fadump_boot_memory_area(u64 addr, ulong size); +extern int is_fadump_memory_area(u64 addr, ulong size); extern int early_init_dt_scan_fw_dump(unsigned long node, const char *uname, int depth, void *data); extern int fadump_reserve_mem(void); diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h index 33b6f9c892c819c62fa6bbbd503d8f1e66cafb21..5bf3f0779b9366c70b024b26f2a2e7af1486722d 100644 --- a/arch/powerpc/include/asm/feature-fixups.h +++ b/arch/powerpc/include/asm/feature-fixups.h @@ -205,6 +205,22 @@ label##3: \ FTR_ENTRY_OFFSET 955b-956b; \ .popsection; +#define UACCESS_FLUSH_FIXUP_SECTION \ +959: \ + .pushsection __uaccess_flush_fixup,"a"; \ + .align 2; \ +960: \ + FTR_ENTRY_OFFSET 959b-960b; \ + .popsection; + +#define ENTRY_FLUSH_FIXUP_SECTION \ +957: \ + .pushsection __entry_flush_fixup,"a"; \ + .align 2; \ +958: \ + FTR_ENTRY_OFFSET 957b-958b; \ + .popsection; + #define RFI_FLUSH_FIXUP_SECTION \ 951: \ .pushsection __rfi_flush_fixup,"a"; \ @@ -221,15 +237,30 @@ label##3: \ FTR_ENTRY_OFFSET 953b-954b; \ .popsection; +#define START_BTB_FLUSH_SECTION \ +955: \ + +#define END_BTB_FLUSH_SECTION \ +956: \ + .pushsection __btb_flush_fixup,"a"; \ + .align 2; \ +957: \ + FTR_ENTRY_OFFSET 955b-957b; \ + FTR_ENTRY_OFFSET 956b-957b; \ + .popsection; #ifndef __ASSEMBLY__ #include extern long stf_barrier_fallback; +extern long entry_flush_fallback; extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup; extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup; +extern long __start___uaccess_flush_fixup, __stop___uaccess_flush_fixup; +extern long __start___entry_flush_fixup, __stop___entry_flush_fixup; extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup; extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup; +extern long __start__btb_flush_fixup, __stop__btb_flush_fixup; void apply_feature_fixups(void); void setup_feature_keys(void); diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h index 94542776a62d630f6c9037e28264fc371d839245..eea28ca679dbbb244c795a423533e9d1edb5f99a 100644 --- a/arch/powerpc/include/asm/futex.h +++ b/arch/powerpc/include/asm/futex.h @@ -35,6 +35,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, { int oldval = 0, ret; + allow_write_to_user(uaddr, sizeof(*uaddr)); pagefault_disable(); switch (op) { @@ -59,9 +60,9 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, pagefault_enable(); - if (!ret) - *oval = oldval; + *oval = oldval; + prevent_write_to_user(uaddr, sizeof(*uaddr)); return ret; } @@ -72,9 +73,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, int ret = 0; u32 prev; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + if (!access_ok(uaddr, sizeof(u32))) return -EFAULT; + allow_write_to_user(uaddr, sizeof(*uaddr)); __asm__ __volatile__ ( PPC_ATOMIC_ENTRY_BARRIER "1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\ @@ -95,6 +97,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, : "cc", "memory"); *uval = prev; + prevent_write_to_user(uaddr, sizeof(*uaddr)); return ret; } diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index a0b17f9f1ea4e5c40e1b2d7a2c6e1aa913759ea5..8347f57e1c6a30dfd259785572b1f70d472a6520 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -383,7 +383,7 @@ long plpar_hcall_norets(unsigned long opcode, ...); * Used for all but the craziest of phyp interfaces (see plpar_hcall9) */ #define PLPAR_HCALL_BUFSIZE 4 -long plpar_hcall(unsigned long opcode, unsigned long *retbuf, ...); +long plpar_hcall(unsigned long opcode, unsigned long retbuf[static PLPAR_HCALL_BUFSIZE], ...); /** * plpar_hcall_raw: - Make a hypervisor call without calculating hcall stats @@ -397,7 +397,7 @@ long plpar_hcall(unsigned long opcode, unsigned long *retbuf, ...); * plpar_hcall, but plpar_hcall_raw works in real mode and does not * calculate hypervisor call statistics. */ -long plpar_hcall_raw(unsigned long opcode, unsigned long *retbuf, ...); +long plpar_hcall_raw(unsigned long opcode, unsigned long retbuf[static PLPAR_HCALL_BUFSIZE], ...); /** * plpar_hcall9: - Make a pseries hypervisor call with up to 9 return arguments @@ -408,8 +408,8 @@ long plpar_hcall_raw(unsigned long opcode, unsigned long *retbuf, ...); * PLPAR_HCALL9_BUFSIZE to size the return argument buffer. */ #define PLPAR_HCALL9_BUFSIZE 9 -long plpar_hcall9(unsigned long opcode, unsigned long *retbuf, ...); -long plpar_hcall9_raw(unsigned long opcode, unsigned long *retbuf, ...); +long plpar_hcall9(unsigned long opcode, unsigned long retbuf[static PLPAR_HCALL9_BUFSIZE], ...); +long plpar_hcall9_raw(unsigned long opcode, unsigned long retbuf[static PLPAR_HCALL9_BUFSIZE], ...); struct hvcall_mpp_data { unsigned long entitled_mem; diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index e0331e7545685c5f2f9310ea38eb876d3491746a..b855f56489acc3cd01c771c92d4d721a0e207d7b 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -285,19 +285,13 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src, * their hooks, a bitfield is reserved for use by the platform near the * top of MMIO addresses (not PIO, those have to cope the hard way). * - * This bit field is 12 bits and is at the top of the IO virtual - * addresses PCI_IO_INDIRECT_TOKEN_MASK. + * The highest address in the kernel virtual space are: * - * The kernel virtual space is thus: + * d0003fffffffffff # with Hash MMU + * c00fffffffffffff # with Radix MMU * - * 0xD000000000000000 : vmalloc - * 0xD000080000000000 : PCI PHB IO space - * 0xD000080080000000 : ioremap - * 0xD0000fffffffffff : end of ioremap region - * - * Since the top 4 bits are reserved as the region ID, we use thus - * the next 12 bits and keep 4 bits available for the future if the - * virtual address space is ever to be extended. + * The top 4 bits are reserved as the region ID on hash, leaving us 8 bits + * that can be used for the field. * * The direct IO mapping operations will then mask off those bits * before doing the actual access, though that only happen when @@ -309,8 +303,8 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src, */ #ifdef CONFIG_PPC_INDIRECT_MMIO -#define PCI_IO_IND_TOKEN_MASK 0x0fff000000000000ul -#define PCI_IO_IND_TOKEN_SHIFT 48 +#define PCI_IO_IND_TOKEN_SHIFT 52 +#define PCI_IO_IND_TOKEN_MASK (0xfful << PCI_IO_IND_TOKEN_SHIFT) #define PCI_FIX_ADDR(addr) \ ((PCI_IO_ADDR)(((unsigned long)(addr)) & ~PCI_IO_IND_TOKEN_MASK)) #define PCI_GET_ADDR_TOKEN(addr) \ diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h new file mode 100644 index 0000000000000000000000000000000000000000..f0f8e36ad71f51708b65c5aeceedb7757fddfecf --- /dev/null +++ b/arch/powerpc/include/asm/kup.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_KUP_H_ +#define _ASM_POWERPC_KUP_H_ + +#ifndef __ASSEMBLY__ + +#include + +#ifdef CONFIG_PPC_BOOK3S_64 +#include +#else +static inline void allow_user_access(void __user *to, const void __user *from, + unsigned long size) { } +static inline void prevent_user_access(void __user *to, const void __user *from, + unsigned long size) { } +#endif /* CONFIG_PPC_BOOK3S_64 */ + +static inline void allow_read_from_user(const void __user *from, unsigned long size) +{ + allow_user_access(NULL, from, size); +} + +static inline void allow_write_to_user(void __user *to, unsigned long size) +{ + allow_user_access(to, NULL, size); +} + +static inline void prevent_read_from_user(const void __user *from, unsigned long size) +{ + prevent_user_access(NULL, from, size); +} + +static inline void prevent_write_to_user(void __user *to, unsigned long size) +{ + prevent_user_access(to, NULL, size); +} + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_POWERPC_KUP_H_ */ diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 83a9aa3cf689172648d234da87ec7d4e95add69d..dd18d8174504f5550538a6536aff04f7f6cd38ea 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -301,12 +301,12 @@ static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) { - vcpu->arch.cr = val; + vcpu->arch.regs.ccr = val; } static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) { - return vcpu->arch.cr; + return vcpu->arch.regs.ccr; } static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val) diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index dc435a5af7d6cfd04ddb81e81d0476f8b214bdb2..14fa07c73f44dfc9aaceaf805e89e5db34982aa7 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -482,7 +482,7 @@ static inline u64 sanitize_msr(u64 msr) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu) { - vcpu->arch.cr = vcpu->arch.cr_tm; + vcpu->arch.regs.ccr = vcpu->arch.cr_tm; vcpu->arch.regs.xer = vcpu->arch.xer_tm; vcpu->arch.regs.link = vcpu->arch.lr_tm; vcpu->arch.regs.ctr = vcpu->arch.ctr_tm; @@ -499,7 +499,7 @@ static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu) static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu) { - vcpu->arch.cr_tm = vcpu->arch.cr; + vcpu->arch.cr_tm = vcpu->arch.regs.ccr; vcpu->arch.xer_tm = vcpu->arch.regs.xer; vcpu->arch.lr_tm = vcpu->arch.regs.link; vcpu->arch.ctr_tm = vcpu->arch.regs.ctr; diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h index d513e3ed1c659c711d4a68e5db5924f720c66833..f0cef625f17ce0afcb1d08510d27a43fce77f6fc 100644 --- a/arch/powerpc/include/asm/kvm_booke.h +++ b/arch/powerpc/include/asm/kvm_booke.h @@ -46,12 +46,12 @@ static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) { - vcpu->arch.cr = val; + vcpu->arch.regs.ccr = val; } static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) { - return vcpu->arch.cr; + return vcpu->arch.regs.ccr; } static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val) diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 906bcbdfd2a1be56771d30d8e19845e102e36c94..2f95e38f05491a6dcb60a1adda6ff850ce39c1b9 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -299,6 +299,7 @@ struct kvm_arch { #ifdef CONFIG_PPC_BOOK3S_64 struct list_head spapr_tce_tables; struct list_head rtas_tokens; + struct mutex rtas_token_lock; DECLARE_BITMAP(enabled_hcalls, MAX_HCALL_OPCODE/4 + 1); #endif #ifdef CONFIG_KVM_MPIC @@ -537,8 +538,6 @@ struct kvm_vcpu_arch { ulong tar; #endif - u32 cr; - #ifdef CONFIG_PPC_BOOK3S ulong hflags; ulong guest_owned_ext; @@ -822,7 +821,7 @@ struct kvm_vcpu_arch { static inline void kvm_arch_hardware_disable(void) {} static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} -static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {} +static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_exit(void) {} diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index e991821dd7fa1ccd422c4f2859892c3f4370b645..a061c3d48c482af6035e679926df6db0886817c8 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -458,9 +458,100 @@ static inline u32 kvmppc_get_xics_latch(void) return xirr; } -static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi) +/* + * To avoid the need to unnecessarily exit fully to the host kernel, an IPI to + * a CPU thread that's running/napping inside of a guest is by default regarded + * as a request to wake the CPU (if needed) and continue execution within the + * guest, potentially to process new state like externally-generated + * interrupts or IPIs sent from within the guest itself (e.g. H_PROD/H_IPI). + * + * To force an exit to the host kernel, kvmppc_set_host_ipi() must be called + * prior to issuing the IPI to set the corresponding 'host_ipi' flag in the + * target CPU's PACA. To avoid unnecessary exits to the host, this flag should + * be immediately cleared via kvmppc_clear_host_ipi() by the IPI handler on + * the receiving side prior to processing the IPI work. + * + * NOTE: + * + * We currently issue an smp_mb() at the beginning of kvmppc_set_host_ipi(). + * This is to guard against sequences such as the following: + * + * CPU + * X: smp_muxed_ipi_set_message(): + * X: smp_mb() + * X: message[RESCHEDULE] = 1 + * X: doorbell_global_ipi(42): + * X: kvmppc_set_host_ipi(42) + * X: ppc_msgsnd_sync()/smp_mb() + * X: ppc_msgsnd() -> 42 + * 42: doorbell_exception(): // from CPU X + * 42: ppc_msgsync() + * 105: smp_muxed_ipi_set_message(): + * 105: smb_mb() + * // STORE DEFERRED DUE TO RE-ORDERING + * --105: message[CALL_FUNCTION] = 1 + * | 105: doorbell_global_ipi(42): + * | 105: kvmppc_set_host_ipi(42) + * | 42: kvmppc_clear_host_ipi(42) + * | 42: smp_ipi_demux_relaxed() + * | 42: // returns to executing guest + * | // RE-ORDERED STORE COMPLETES + * ->105: message[CALL_FUNCTION] = 1 + * 105: ppc_msgsnd_sync()/smp_mb() + * 105: ppc_msgsnd() -> 42 + * 42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored + * 105: // hangs waiting on 42 to process messages/call_single_queue + * + * We also issue an smp_mb() at the end of kvmppc_clear_host_ipi(). This is + * to guard against sequences such as the following (as well as to create + * a read-side pairing with the barrier in kvmppc_set_host_ipi()): + * + * CPU + * X: smp_muxed_ipi_set_message(): + * X: smp_mb() + * X: message[RESCHEDULE] = 1 + * X: doorbell_global_ipi(42): + * X: kvmppc_set_host_ipi(42) + * X: ppc_msgsnd_sync()/smp_mb() + * X: ppc_msgsnd() -> 42 + * 42: doorbell_exception(): // from CPU X + * 42: ppc_msgsync() + * // STORE DEFERRED DUE TO RE-ORDERING + * -- 42: kvmppc_clear_host_ipi(42) + * | 42: smp_ipi_demux_relaxed() + * | 105: smp_muxed_ipi_set_message(): + * | 105: smb_mb() + * | 105: message[CALL_FUNCTION] = 1 + * | 105: doorbell_global_ipi(42): + * | 105: kvmppc_set_host_ipi(42) + * | // RE-ORDERED STORE COMPLETES + * -> 42: kvmppc_clear_host_ipi(42) + * 42: // returns to executing guest + * 105: ppc_msgsnd_sync()/smp_mb() + * 105: ppc_msgsnd() -> 42 + * 42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored + * 105: // hangs waiting on 42 to process messages/call_single_queue + */ +static inline void kvmppc_set_host_ipi(int cpu) { - paca_ptrs[cpu]->kvm_hstate.host_ipi = host_ipi; + /* + * order stores of IPI messages vs. setting of host_ipi flag + * + * pairs with the barrier in kvmppc_clear_host_ipi() + */ + smp_mb(); + paca_ptrs[cpu]->kvm_hstate.host_ipi = 1; +} + +static inline void kvmppc_clear_host_ipi(int cpu) +{ + paca_ptrs[cpu]->kvm_hstate.host_ipi = 0; + /* + * order clearing of host_ipi flag vs. processing of IPI messages + * + * pairs with the barrier in kvmppc_set_host_ipi() + */ + smp_mb(); } static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu) @@ -489,7 +580,10 @@ static inline u32 kvmppc_get_xics_latch(void) return 0; } -static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi) +static inline void kvmppc_set_host_ipi(int cpu) +{} + +static inline void kvmppc_clear_host_ipi(int cpu) {} static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu) diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h index 4f547752ae79595086c9ad55a44281ea68b52505..193f53116c7ae66158f41b4adc1b6538bb913202 100644 --- a/arch/powerpc/include/asm/mmu-8xx.h +++ b/arch/powerpc/include/asm/mmu-8xx.h @@ -34,20 +34,12 @@ * respectively NA for All or X for Supervisor and no access for User. * Then we use the APG to say whether accesses are according to Page rules or * "all Supervisor" rules (Access to all) - * We also use the 2nd APG bit for _PAGE_ACCESSED when having SWAP: - * When that bit is not set access is done iaw "all user" - * which means no access iaw page rules. - * Therefore, we define 4 APG groups. lsb is _PMD_USER, 2nd is _PAGE_ACCESSED - * 0x => No access => 11 (all accesses performed as user iaw page definition) - * 10 => No user => 01 (all accesses performed according to page definition) - * 11 => User => 00 (all accesses performed as supervisor iaw page definition) + * Therefore, we define 2 APG groups. lsb is _PMD_USER + * 0 => No user => 01 (all accesses performed according to page definition) + * 1 => User => 00 (all accesses performed as supervisor iaw page definition) * We define all 16 groups so that all other bits of APG can take any value */ -#ifdef CONFIG_SWAP -#define MI_APG_INIT 0xf4f4f4f4 -#else #define MI_APG_INIT 0x44444444 -#endif /* The effective page number register. When read, contains the information * about the last instruction TLB miss. When MI_RPN is written, bits in @@ -115,20 +107,12 @@ * Supervisor and no access for user and NA for ALL. * Then we use the APG to say whether accesses are according to Page rules or * "all Supervisor" rules (Access to all) - * We also use the 2nd APG bit for _PAGE_ACCESSED when having SWAP: - * When that bit is not set access is done iaw "all user" - * which means no access iaw page rules. - * Therefore, we define 4 APG groups. lsb is _PMD_USER, 2nd is _PAGE_ACCESSED - * 0x => No access => 11 (all accesses performed as user iaw page definition) - * 10 => No user => 01 (all accesses performed according to page definition) - * 11 => User => 00 (all accesses performed as supervisor iaw page definition) + * Therefore, we define 2 APG groups. lsb is _PMD_USER + * 0 => No user => 01 (all accesses performed according to page definition) + * 1 => User => 00 (all accesses performed as supervisor iaw page definition) * We define all 16 groups so that all other bits of APG can take any value */ -#ifdef CONFIG_SWAP -#define MD_APG_INIT 0xf4f4f4f4 -#else #define MD_APG_INIT 0x44444444 -#endif /* The effective page number register. When read, contains the information * about the last instruction TLB miss. When MD_RPN is written, bits in @@ -180,12 +164,6 @@ */ #define SPRN_M_TW 799 -/* APGs */ -#define M_APG0 0x00000000 -#define M_APG1 0x00000020 -#define M_APG2 0x00000040 -#define M_APG3 0x00000060 - #ifdef CONFIG_PPC_MM_SLICES #include #define SLICE_ARRAY_SIZE (1 << (32 - SLICE_LOW_SHIFT - 1)) diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index b694d6af115080765cad72b749bea690dbb6fb79..ae953958c0f33cf9d671b14af6fd11d682a8392f 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -217,12 +217,6 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, #endif } -static inline int arch_dup_mmap(struct mm_struct *oldmm, - struct mm_struct *mm) -{ - return 0; -} - #ifndef CONFIG_PPC_BOOK3S_64 static inline void arch_exit_mmap(struct mm_struct *mm) { @@ -247,6 +241,7 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm, #ifdef CONFIG_PPC_MEM_KEYS bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write, bool execute, bool foreign); +void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm); #else /* CONFIG_PPC_MEM_KEYS */ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write, bool execute, bool foreign) @@ -259,6 +254,7 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, #define thread_pkey_regs_save(thread) #define thread_pkey_regs_restore(new_thread, old_thread) #define thread_pkey_regs_init(thread) +#define arch_dup_pkeys(oldmm, mm) static inline u64 pte_to_hpte_pkey_bits(u64 pteflags) { @@ -267,5 +263,12 @@ static inline u64 pte_to_hpte_pkey_bits(u64 pteflags) #endif /* CONFIG_PPC_MEM_KEYS */ +static inline int arch_dup_mmap(struct mm_struct *oldmm, + struct mm_struct *mm) +{ + arch_dup_pkeys(oldmm, mm); + return 0; +} + #endif /* __KERNEL__ */ #endif /* __ASM_POWERPC_MMU_CONTEXT_H */ diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h index fad8ddd697ac464403ab8db3c009a5ffbbb54787..0abf2e7fd22262e843075f55ea8f14d1eb2625a2 100644 --- a/arch/powerpc/include/asm/mpic.h +++ b/arch/powerpc/include/asm/mpic.h @@ -393,7 +393,14 @@ extern struct bus_type mpic_subsys; #define MPIC_REGSET_TSI108 MPIC_REGSET(1) /* Tsi108/109 PIC */ /* Get the version of primary MPIC */ +#ifdef CONFIG_MPIC extern u32 fsl_mpic_primary_get_version(void); +#else +static inline u32 fsl_mpic_primary_get_version(void) +{ + return 0; +} +#endif /* Allocate the controller structure and setup the linux irq descs * for the range if interrupts passed in. No HW initialization is diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index ff3866473afe411f63c3d4e968ff6b574694402b..d8d886dee54e3c92356d23f370dbc2a3dcc66b79 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h @@ -275,7 +275,7 @@ int64_t opal_xive_get_vp_info(uint64_t vp, int64_t opal_xive_set_vp_info(uint64_t vp, uint64_t flags, uint64_t report_cl_pair); -int64_t opal_xive_allocate_irq(uint32_t chip_id); +int64_t opal_xive_allocate_irq_raw(uint32_t chip_id); int64_t opal_xive_free_irq(uint32_t girq); int64_t opal_xive_sync(uint32_t type, uint32_t id); int64_t opal_xive_dump(uint32_t type, uint32_t id); diff --git a/arch/powerpc/include/asm/powernv.h b/arch/powerpc/include/asm/powernv.h index 2f3ff7a278815a131f9ebec73bffcdaedf0abab3..d85fcfea32ca6498aa41bb24164c494cdcd508fe 100644 --- a/arch/powerpc/include/asm/powernv.h +++ b/arch/powerpc/include/asm/powernv.h @@ -23,6 +23,8 @@ extern int pnv_npu2_handle_fault(struct npu_context *context, uintptr_t *ea, unsigned long *flags, unsigned long *status, int count); +void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val); + void pnv_tm_init(void); #else static inline void powernv_set_nmmu_ptcr(unsigned long ptcr) { } diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 665af14850e4249c01153c200a85eff609f1a09d..d9d5391b2af6f9d0cb1271e239bec72d44bf9afe 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -300,6 +300,7 @@ /* Misc instructions for BPF compiler */ #define PPC_INST_LBZ 0x88000000 #define PPC_INST_LD 0xe8000000 +#define PPC_INST_LDX 0x7c00002a #define PPC_INST_LHZ 0xa0000000 #define PPC_INST_LWZ 0x80000000 #define PPC_INST_LHBRX 0x7c00062c @@ -307,6 +308,7 @@ #define PPC_INST_STB 0x98000000 #define PPC_INST_STH 0xb0000000 #define PPC_INST_STD 0xf8000000 +#define PPC_INST_STDX 0x7c00012a #define PPC_INST_STDU 0xf8000001 #define PPC_INST_STW 0x90000000 #define PPC_INST_STWU 0x94000000 @@ -334,6 +336,7 @@ #define PPC_INST_MULLI 0x1c000000 #define PPC_INST_DIVWU 0x7c000396 #define PPC_INST_DIVD 0x7c0003d2 +#define PPC_INST_DIVDU 0x7c000392 #define PPC_INST_RLWINM 0x54000000 #define PPC_INST_RLWIMI 0x50000000 #define PPC_INST_RLDICL 0x78000000 diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index b5d023680801b982cb5af2d82a21e943a7af57b9..5c901bf4c505f423b76db7b7f4f669412cce8476 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -821,4 +821,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601) stringify_in_c(.long (_target) - . ;) \ stringify_in_c(.previous) +#ifdef CONFIG_PPC_FSL_BOOK3E +#define BTB_FLUSH(reg) \ + lis reg,BUCSR_INIT@h; \ + ori reg,reg,BUCSR_INIT@l; \ + mtspr SPRN_BUCSR,reg; \ + isync; +#else +#define BTB_FLUSH(reg) +#endif /* CONFIG_PPC_FSL_BOOK3E */ + #endif /* _ASM_POWERPC_PPC_ASM_H */ diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index e5b314ed054e027d69782c9fd7261eaaefe85eb8..af99716615122c259e94f6d4e0a5e0fa7d430e3d 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -118,11 +118,16 @@ #define MSR_TS_S __MASK(MSR_TS_S_LG) /* Transaction Suspended */ #define MSR_TS_T __MASK(MSR_TS_T_LG) /* Transaction Transactional */ #define MSR_TS_MASK (MSR_TS_T | MSR_TS_S) /* Transaction State bits */ -#define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */ #define MSR_TM_RESV(x) (((x) & MSR_TS_MASK) == MSR_TS_MASK) /* Reserved */ #define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T) #define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S) +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +#define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */ +#else +#define MSR_TM_ACTIVE(x) 0 +#endif + #if defined(CONFIG_PPC_BOOK3S_64) #define MSR_64BIT MSR_SF @@ -763,6 +768,8 @@ #define SRR1_PROGTRAP 0x00020000 /* Trap */ #define SRR1_PROGADDR 0x00010000 /* SRR0 contains subsequent addr */ +#define SRR1_MCE_MCP 0x00080000 /* Machine check signal caused interrupt */ + #define SPRN_HSRR0 0x13A /* Save/Restore Register 0 */ #define SPRN_HSRR1 0x13B /* Save/Restore Register 1 */ #define HSRR1_DENORM 0x00100000 /* Denorm exception */ diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index eb2a33d5df26b559cf73d7f072fd4655ebb86852..e382bd6ede84548ac6f745b5df618b7b77d53ad8 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h @@ -41,7 +41,7 @@ #if defined(CONFIG_PPC_BOOK3E_64) #define MSR_64BIT MSR_CM -#define MSR_ (MSR_ME | MSR_CE) +#define MSR_ (MSR_ME | MSR_RI | MSR_CE) #define MSR_KERNEL (MSR_ | MSR_64BIT) #define MSR_USER32 (MSR_ | MSR_PR | MSR_EE) #define MSR_USER64 (MSR_USER32 | MSR_64BIT) diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h index 759597bf0fd867bd6d4c151acb8acce7f7f3ff6b..3b45a64e491e54af5e14d92ed113324d123a6521 100644 --- a/arch/powerpc/include/asm/security_features.h +++ b/arch/powerpc/include/asm/security_features.h @@ -81,12 +81,22 @@ static inline bool security_ftr_enabled(unsigned long feature) // Software required to flush count cache on context switch #define SEC_FTR_FLUSH_COUNT_CACHE 0x0000000000000400ull +// Software required to flush link stack on context switch +#define SEC_FTR_FLUSH_LINK_STACK 0x0000000000001000ull + +// The L1-D cache should be flushed when entering the kernel +#define SEC_FTR_L1D_FLUSH_ENTRY 0x0000000000004000ull + +// The L1-D cache should be flushed after user accesses from the kernel +#define SEC_FTR_L1D_FLUSH_UACCESS 0x0000000000008000ull // Features enabled by default #define SEC_FTR_DEFAULT \ (SEC_FTR_L1D_FLUSH_HV | \ SEC_FTR_L1D_FLUSH_PR | \ SEC_FTR_BNDS_CHK_SPEC_BAR | \ + SEC_FTR_L1D_FLUSH_ENTRY | \ + SEC_FTR_L1D_FLUSH_UACCESS | \ SEC_FTR_FAVOUR_SECURITY) #endif /* _ASM_POWERPC_SECURITY_FEATURES_H */ diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index 1fffbba8d6a5e64a5fefdb06a6ecab29f4ec66e5..6f2f4497e13b37f8d5ca81fa6c631040e7f19dd4 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -52,12 +52,16 @@ enum l1d_flush_type { }; void setup_rfi_flush(enum l1d_flush_type, bool enable); +void setup_entry_flush(bool enable); +void setup_uaccess_flush(bool enable); void do_rfi_flush_fixups(enum l1d_flush_type types); #ifdef CONFIG_PPC_BARRIER_NOSPEC void setup_barrier_nospec(void); #else static inline void setup_barrier_nospec(void) { }; #endif +void do_uaccess_flush_fixups(enum l1d_flush_type types); +void do_entry_flush_fixups(enum l1d_flush_type types); void do_barrier_nospec_fixups(bool enable); extern bool barrier_nospec_enabled; @@ -67,6 +71,13 @@ void do_barrier_nospec_fixups_range(bool enable, void *start, void *end); static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { }; #endif +#ifdef CONFIG_PPC_FSL_BOOK3E +void setup_spectre_v2(void); +#else +static inline void setup_spectre_v2(void) {}; +#endif +void do_btb_flush_fixups(void); + #endif /* !__ASSEMBLY__ */ #endif /* _ASM_POWERPC_SETUP_H */ diff --git a/arch/powerpc/include/asm/sfp-machine.h b/arch/powerpc/include/asm/sfp-machine.h index d89beaba26ff95d2ab0ed48cdaf1ba7fc8f3bd73..8b957aabb826d3b55674cab70e3fc096869f8ec0 100644 --- a/arch/powerpc/include/asm/sfp-machine.h +++ b/arch/powerpc/include/asm/sfp-machine.h @@ -213,30 +213,18 @@ * respectively. The result is placed in HIGH_SUM and LOW_SUM. Overflow * (i.e. carry out) is not stored anywhere, and is lost. */ -#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ do { \ if (__builtin_constant_p (bh) && (bh) == 0) \ - __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ - : "%r" ((USItype)(ah)), \ - "%r" ((USItype)(al)), \ - "rI" ((USItype)(bl))); \ - else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0) \ - __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ - : "%r" ((USItype)(ah)), \ - "%r" ((USItype)(al)), \ - "rI" ((USItype)(bl))); \ + __asm__ ("add%I4c %1,%3,%4\n\taddze %0,%2" \ + : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\ + else if (__builtin_constant_p (bh) && (bh) == ~(USItype) 0) \ + __asm__ ("add%I4c %1,%3,%4\n\taddme %0,%2" \ + : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\ else \ - __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ - : "%r" ((USItype)(ah)), \ - "r" ((USItype)(bh)), \ - "%r" ((USItype)(al)), \ - "rI" ((USItype)(bl))); \ + __asm__ ("add%I5c %1,%4,%5\n\tadde %0,%2,%3" \ + : "=r" (sh), "=&r" (sl) \ + : "%r" (ah), "r" (bh), "%r" (al), "rI" (bl)); \ } while (0) /* sub_ddmmss is used in op-2.h and udivmodti4.c and should be equivalent to @@ -248,44 +236,24 @@ * and LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere, * and is lost. */ -#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ do { \ if (__builtin_constant_p (ah) && (ah) == 0) \ - __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ - : "r" ((USItype)(bh)), \ - "rI" ((USItype)(al)), \ - "r" ((USItype)(bl))); \ - else if (__builtin_constant_p (ah) && (ah) ==~(USItype) 0) \ - __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ - : "r" ((USItype)(bh)), \ - "rI" ((USItype)(al)), \ - "r" ((USItype)(bl))); \ + __asm__ ("subf%I3c %1,%4,%3\n\tsubfze %0,%2" \ + : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\ + else if (__builtin_constant_p (ah) && (ah) == ~(USItype) 0) \ + __asm__ ("subf%I3c %1,%4,%3\n\tsubfme %0,%2" \ + : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\ else if (__builtin_constant_p (bh) && (bh) == 0) \ - __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ - : "r" ((USItype)(ah)), \ - "rI" ((USItype)(al)), \ - "r" ((USItype)(bl))); \ - else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0) \ - __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ - : "r" ((USItype)(ah)), \ - "rI" ((USItype)(al)), \ - "r" ((USItype)(bl))); \ + __asm__ ("subf%I3c %1,%4,%3\n\taddme %0,%2" \ + : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\ + else if (__builtin_constant_p (bh) && (bh) == ~(USItype) 0) \ + __asm__ ("subf%I3c %1,%4,%3\n\taddze %0,%2" \ + : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\ else \ - __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ - : "r" ((USItype)(ah)), \ - "r" ((USItype)(bh)), \ - "rI" ((USItype)(al)), \ - "r" ((USItype)(bl))); \ + __asm__ ("subf%I4c %1,%5,%4\n\tsubfe %0,%3,%2" \ + : "=r" (sh), "=&r" (sl) \ + : "r" (ah), "r" (bh), "rI" (al), "r" (bl)); \ } while (0) /* asm fragments for mul and div */ @@ -294,13 +262,10 @@ * UWtype integers MULTIPLER and MULTIPLICAND, and generates a two UWtype * word product in HIGH_PROD and LOW_PROD. */ -#define umul_ppmm(ph, pl, m0, m1) \ +#define umul_ppmm(ph, pl, m0, m1) \ do { \ USItype __m0 = (m0), __m1 = (m1); \ - __asm__ ("mulhwu %0,%1,%2" \ - : "=r" ((USItype)(ph)) \ - : "%r" (__m0), \ - "r" (__m1)); \ + __asm__ ("mulhwu %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1)); \ (pl) = __m0 * __m1; \ } while (0) @@ -312,9 +277,10 @@ * significant bit of DENOMINATOR must be 1, then the pre-processor symbol * UDIV_NEEDS_NORMALIZATION is defined to 1. */ -#define udiv_qrnnd(q, r, n1, n0, d) \ +#define udiv_qrnnd(q, r, n1, n0, d) \ do { \ - UWtype __d1, __d0, __q1, __q0, __r1, __r0, __m; \ + UWtype __d1, __d0, __q1, __q0; \ + UWtype __r1, __r0, __m; \ __d1 = __ll_highpart (d); \ __d0 = __ll_lowpart (d); \ \ @@ -325,7 +291,7 @@ if (__r1 < __m) \ { \ __q1--, __r1 += (d); \ - if (__r1 >= (d)) /* we didn't get carry when adding to __r1 */ \ + if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\ if (__r1 < __m) \ __q1--, __r1 += (d); \ } \ diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 685c72310f5d0e5506d5973d6250449376b7ef46..3198dde87d4db2a1f503546f2138b5e2b9554e06 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -19,6 +19,7 @@ * * (the type definitions are in asm/spinlock_types.h) */ +#include #include #ifdef CONFIG_PPC64 #include @@ -53,10 +54,12 @@ #endif #ifdef CONFIG_PPC_PSERIES +DECLARE_STATIC_KEY_FALSE(shared_processor); + #define vcpu_is_preempted vcpu_is_preempted static inline bool vcpu_is_preempted(int cpu) { - if (!firmware_has_feature(FW_FEATURE_SPLPAR)) + if (!static_branch_unlikely(&shared_processor)) return false; return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1); } diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index a4a718dbfec6e0e4dbcfbf80b020f54689258290..f85e2b01c3df2b686cba72440b4b55415d8dbfc3 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h @@ -132,6 +132,8 @@ static inline void shared_proc_topology_init(void) {} #define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) #define topology_core_id(cpu) (cpu_to_core_id(cpu)) + +int dlpar_cpu_readd(int cpu); #endif #endif diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index bac225bb7f64174a066de394e878e94bd9e5b9f7..f04f5d43496a2ce522c3582cb0995f50622fe243 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -6,6 +6,7 @@ #include #include #include +#include /* * The fs value determines whether argument validity checking should be @@ -62,8 +63,8 @@ static inline int __access_ok(unsigned long addr, unsigned long size, #endif -#define access_ok(type, addr, size) \ - (__chk_user_ptr(addr), \ +#define access_ok(addr, size) \ + (__chk_user_ptr(addr), \ __access_ok((__force unsigned long)(addr), (size), get_fs())) /* @@ -91,9 +92,14 @@ static inline int __access_ok(unsigned long addr, unsigned long size, __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) #define __get_user(x, ptr) \ - __get_user_nocheck((x), (ptr), sizeof(*(ptr))) + __get_user_nocheck((x), (ptr), sizeof(*(ptr)), true) #define __put_user(x, ptr) \ - __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) + __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), true) + +#define __get_user_allowed(x, ptr) \ + __get_user_nocheck((x), (ptr), sizeof(*(ptr)), false) +#define __put_user_allowed(x, ptr) \ + __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), false) #define __get_user_inatomic(x, ptr) \ __get_user_nosleep((x), (ptr), sizeof(*(ptr))) @@ -138,7 +144,7 @@ extern long __put_user_bad(void); : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) #endif /* __powerpc64__ */ -#define __put_user_size(x, ptr, size, retval) \ +#define __put_user_size_allowed(x, ptr, size, retval) \ do { \ retval = 0; \ switch (size) { \ @@ -150,14 +156,28 @@ do { \ } \ } while (0) -#define __put_user_nocheck(x, ptr, size) \ +#define __put_user_size(x, ptr, size, retval) \ +do { \ + allow_write_to_user(ptr, size); \ + __put_user_size_allowed(x, ptr, size, retval); \ + prevent_write_to_user(ptr, size); \ +} while (0) + +#define __put_user_nocheck(x, ptr, size, do_allow) \ ({ \ long __pu_err; \ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ + __typeof__(*(ptr)) __pu_val = (x); \ + __typeof__(size) __pu_size = (size); \ + \ if (!is_kernel_addr((unsigned long)__pu_addr)) \ might_fault(); \ - __chk_user_ptr(ptr); \ - __put_user_size((x), __pu_addr, (size), __pu_err); \ + __chk_user_ptr(__pu_addr); \ + if (do_allow) \ + __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \ + else \ + __put_user_size_allowed(__pu_val, __pu_addr, __pu_size, __pu_err); \ + \ __pu_err; \ }) @@ -165,9 +185,13 @@ do { \ ({ \ long __pu_err = -EFAULT; \ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ + __typeof__(*(ptr)) __pu_val = (x); \ + __typeof__(size) __pu_size = (size); \ + \ might_fault(); \ - if (access_ok(VERIFY_WRITE, __pu_addr, size)) \ - __put_user_size((x), __pu_addr, (size), __pu_err); \ + if (access_ok(__pu_addr, __pu_size)) \ + __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \ + \ __pu_err; \ }) @@ -175,8 +199,12 @@ do { \ ({ \ long __pu_err; \ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ - __chk_user_ptr(ptr); \ - __put_user_size((x), __pu_addr, (size), __pu_err); \ + __typeof__(*(ptr)) __pu_val = (x); \ + __typeof__(size) __pu_size = (size); \ + \ + __chk_user_ptr(__pu_addr); \ + __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \ + \ __pu_err; \ }) @@ -234,7 +262,7 @@ extern long __get_user_bad(void); : "b" (addr), "i" (-EFAULT), "0" (err)) #endif /* __powerpc64__ */ -#define __get_user_size(x, ptr, size, retval) \ +#define __get_user_size_allowed(x, ptr, size, retval) \ do { \ retval = 0; \ __chk_user_ptr(ptr); \ @@ -249,6 +277,13 @@ do { \ } \ } while (0) +#define __get_user_size(x, ptr, size, retval) \ +do { \ + allow_read_from_user(ptr, size); \ + __get_user_size_allowed(x, ptr, size, retval); \ + prevent_read_from_user(ptr, size); \ +} while (0) + /* * This is a type: either unsigned long, if the argument fits into * that type, or otherwise unsigned long long. @@ -256,17 +291,23 @@ do { \ #define __long_type(x) \ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) -#define __get_user_nocheck(x, ptr, size) \ +#define __get_user_nocheck(x, ptr, size, do_allow) \ ({ \ long __gu_err; \ __long_type(*(ptr)) __gu_val; \ - const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ - __chk_user_ptr(ptr); \ + __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ + __typeof__(size) __gu_size = (size); \ + \ + __chk_user_ptr(__gu_addr); \ if (!is_kernel_addr((unsigned long)__gu_addr)) \ might_fault(); \ barrier_nospec(); \ - __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ + if (do_allow) \ + __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \ + else \ + __get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \ (x) = (__typeof__(*(ptr)))__gu_val; \ + \ __gu_err; \ }) @@ -274,13 +315,16 @@ do { \ ({ \ long __gu_err = -EFAULT; \ __long_type(*(ptr)) __gu_val = 0; \ - const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ + __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ + __typeof__(size) __gu_size = (size); \ + \ might_fault(); \ - if (access_ok(VERIFY_READ, __gu_addr, (size))) { \ + if (access_ok(__gu_addr, __gu_size)) { \ barrier_nospec(); \ - __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ + __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \ } \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ + \ __gu_err; \ }) @@ -288,11 +332,14 @@ do { \ ({ \ long __gu_err; \ __long_type(*(ptr)) __gu_val; \ - const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ - __chk_user_ptr(ptr); \ + __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ + __typeof__(size) __gu_size = (size); \ + \ + __chk_user_ptr(__gu_addr); \ barrier_nospec(); \ - __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ + __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ + \ __gu_err; \ }) @@ -306,15 +353,22 @@ extern unsigned long __copy_tofrom_user(void __user *to, static inline unsigned long raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) { - return __copy_tofrom_user(to, from, n); + unsigned long ret; + + barrier_nospec(); + allow_user_access(to, from, n); + ret = __copy_tofrom_user(to, from, n); + prevent_user_access(to, from, n); + return ret; } #endif /* __powerpc64__ */ static inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) { + unsigned long ret; if (__builtin_constant_p(n) && (n <= 8)) { - unsigned long ret = 1; + ret = 1; switch (n) { case 1: @@ -339,27 +393,30 @@ static inline unsigned long raw_copy_from_user(void *to, } barrier_nospec(); - return __copy_tofrom_user((__force void __user *)to, from, n); + allow_read_from_user(from, n); + ret = __copy_tofrom_user((__force void __user *)to, from, n); + prevent_read_from_user(from, n); + return ret; } -static inline unsigned long raw_copy_to_user(void __user *to, - const void *from, unsigned long n) +static inline unsigned long +raw_copy_to_user_allowed(void __user *to, const void *from, unsigned long n) { if (__builtin_constant_p(n) && (n <= 8)) { unsigned long ret = 1; switch (n) { case 1: - __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret); + __put_user_size_allowed(*(u8 *)from, (u8 __user *)to, 1, ret); break; case 2: - __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret); + __put_user_size_allowed(*(u16 *)from, (u16 __user *)to, 2, ret); break; case 4: - __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret); + __put_user_size_allowed(*(u32 *)from, (u32 __user *)to, 4, ret); break; case 8: - __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret); + __put_user_size_allowed(*(u64 *)from, (u64 __user *)to, 8, ret); break; } if (ret == 0) @@ -369,14 +426,34 @@ static inline unsigned long raw_copy_to_user(void __user *to, return __copy_tofrom_user(to, (__force const void __user *)from, n); } -extern unsigned long __clear_user(void __user *addr, unsigned long size); +static inline unsigned long +raw_copy_to_user(void __user *to, const void *from, unsigned long n) +{ + unsigned long ret; + + allow_write_to_user(to, n); + ret = raw_copy_to_user_allowed(to, from, n); + prevent_write_to_user(to, n); + return ret; +} + +unsigned long __arch_clear_user(void __user *addr, unsigned long size); static inline unsigned long clear_user(void __user *addr, unsigned long size) { + unsigned long ret = size; might_fault(); - if (likely(access_ok(VERIFY_WRITE, addr, size))) - return __clear_user(addr, size); - return size; + if (likely(access_ok(addr, size))) { + allow_write_to_user(addr, size); + ret = __arch_clear_user(addr, size); + prevent_write_to_user(addr, size); + } + return ret; +} + +static inline unsigned long __clear_user(void __user *addr, unsigned long size) +{ + return clear_user(addr, size); } extern long strncpy_from_user(char *dst, const char __user *src, long count); @@ -387,4 +464,13 @@ extern long __copy_from_user_flushcache(void *dst, const void __user *src, extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len); +#define user_access_begin(ptr, len) access_ok(ptr, len) +#define user_access_end() prevent_user_access(NULL, NULL, ~0ul) + +#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0) +#define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e) +#define unsafe_put_user(x, p, e) unsafe_op_wrap(__put_user_allowed(x, p), e) +#define unsafe_copy_to_user(d, s, l, e) \ + unsafe_op_wrap(raw_copy_to_user_allowed(d, s, l), e) + #endif /* _ARCH_POWERPC_UACCESS_H */ diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h index 1afe90ade595e161016af3ca712b65f9990407f4..4333b9a473dc47aa02ea9b80d7b474c7f112354f 100644 --- a/arch/powerpc/include/asm/vdso_datapage.h +++ b/arch/powerpc/include/asm/vdso_datapage.h @@ -82,10 +82,11 @@ struct vdso_data { __u32 icache_block_size; /* L1 i-cache block size */ __u32 dcache_log_block_size; /* L1 d-cache log block size */ __u32 icache_log_block_size; /* L1 i-cache log block size */ - __s32 wtom_clock_sec; /* Wall to monotonic clock */ - __s32 wtom_clock_nsec; - struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */ - __u32 stamp_sec_fraction; /* fractional seconds of stamp_xtime */ + __u32 stamp_sec_fraction; /* fractional seconds of stamp_xtime */ + __s32 wtom_clock_nsec; /* Wall to monotonic clock nsec */ + __s64 wtom_clock_sec; /* Wall to monotonic clock sec */ + struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */ + __u32 hrtimer_res; /* hrtimer resolution */ __u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls */ __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */ }; @@ -107,6 +108,7 @@ struct vdso_data { __s32 wtom_clock_nsec; struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */ __u32 stamp_sec_fraction; /* fractional seconds of stamp_xtime */ + __u32 hrtimer_res; /* hrtimer resolution */ __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */ __u32 dcache_block_size; /* L1 d-cache block size */ __u32 icache_block_size; /* L1 i-cache block size */ diff --git a/arch/powerpc/include/asm/vmalloc.h b/arch/powerpc/include/asm/vmalloc.h new file mode 100644 index 0000000000000000000000000000000000000000..105abb73f075cf0204f147c60b9659658a60a805 --- /dev/null +++ b/arch/powerpc/include/asm/vmalloc.h @@ -0,0 +1,12 @@ +#ifndef _ASM_POWERPC_VMALLOC_H +#define _ASM_POWERPC_VMALLOC_H + +#include + +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP +bool arch_vmap_p4d_supported(pgprot_t prot); +bool arch_vmap_pud_supported(pgprot_t prot); +bool arch_vmap_pmd_supported(pgprot_t prot); +#endif + +#endif /* _ASM_POWERPC_VMALLOC_H */ diff --git a/arch/powerpc/include/uapi/asm/mman.h b/arch/powerpc/include/uapi/asm/mman.h index 65065ce3281496f82668a52cbc42a6faf781caad..802cc3fe1358cee39f1b126bbcbc37d2e96b9a04 100644 --- a/arch/powerpc/include/uapi/asm/mman.h +++ b/arch/powerpc/include/uapi/asm/mman.h @@ -29,6 +29,9 @@ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ #define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ #define MAP_HUGETLB 0x40000 /* create a huge page mapping */ +#define MAP_PA32BIT 0x400000 /* physical address is within 4G */ +#define MAP_CHECKNODE 0x800000 /* hugetlb numa node check */ +#define MAP_ALIGN 0x2000000 /* create an aligned mapping */ /* Override any generic PKEY permission defines */ #define PKEY_DISABLE_EXECUTE 0x4 diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 3b66f2c19c84e0996567d59fdc1f8cb46ab273d5..d450280e5c29c1b5e1260d55fd6a98ac1be55fda 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -5,6 +5,9 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' +# Avoid clang warnings around longjmp/setjmp declarations +CFLAGS_crash.o += -ffreestanding + subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror ifdef CONFIG_PPC64 @@ -22,10 +25,10 @@ CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) ifdef CONFIG_FUNCTION_TRACER # Do not trace early boot code -CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE) -CFLAGS_REMOVE_prom_init.o = -mno-sched-epilog $(CC_FLAGS_FTRACE) -CFLAGS_REMOVE_btext.o = -mno-sched-epilog $(CC_FLAGS_FTRACE) -CFLAGS_REMOVE_prom.o = -mno-sched-epilog $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_cputable.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_prom_init.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_btext.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_prom.o = $(CC_FLAGS_FTRACE) endif obj-y := cputable.o ptrace.o syscalls.o \ diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index 11550a3d1ac2e8c5cf092de7f4a849d4a3439d32..0d1b6370bae00bb1ce44442c19b8480e2a0b1d6e 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -131,8 +131,7 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg, /* Verify the address of the operand */ if (unlikely(user_mode(regs) && - !access_ok((flags & ST ? VERIFY_WRITE : VERIFY_READ), - addr, nb))) + !access_ok(addr, nb))) return -EFAULT; /* userland only */ diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 89cf15566c4e80ba4e81d500e1d86f204e1241b2..50400f213bbf2a57ea60ba3330cf3beee5fd9da4 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -379,6 +379,7 @@ int main(void) OFFSET(WTOM_CLOCK_NSEC, vdso_data, wtom_clock_nsec); OFFSET(STAMP_XTIME, vdso_data, stamp_xtime); OFFSET(STAMP_SEC_FRAC, vdso_data, stamp_sec_fraction); + OFFSET(CLOCK_HRTIMER_RES, vdso_data, hrtimer_res); OFFSET(CFG_ICACHE_BLOCKSZ, vdso_data, icache_block_size); OFFSET(CFG_DCACHE_BLOCKSZ, vdso_data, dcache_block_size); OFFSET(CFG_ICACHE_LOGBLOCKSZ, vdso_data, icache_log_block_size); @@ -409,7 +410,6 @@ int main(void) DEFINE(CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE); DEFINE(CLOCK_MONOTONIC_COARSE, CLOCK_MONOTONIC_COARSE); DEFINE(NSEC_PER_SEC, NSEC_PER_SEC); - DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); #ifdef CONFIG_BUG DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry)); @@ -438,7 +438,7 @@ int main(void) #ifdef CONFIG_PPC_BOOK3S OFFSET(VCPU_TAR, kvm_vcpu, arch.tar); #endif - OFFSET(VCPU_CR, kvm_vcpu, arch.cr); + OFFSET(VCPU_CR, kvm_vcpu, arch.regs.ccr); OFFSET(VCPU_PC, kvm_vcpu, arch.regs.nip); #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE OFFSET(VCPU_MSR, kvm_vcpu, arch.shregs.msr); @@ -695,7 +695,7 @@ int main(void) #endif /* CONFIG_PPC_BOOK3S_64 */ #else /* CONFIG_PPC_BOOK3S */ - OFFSET(VCPU_CR, kvm_vcpu, arch.cr); + OFFSET(VCPU_CR, kvm_vcpu, arch.regs.ccr); OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer); OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link); OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr); diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 2da01340c84c3f3ade55cecb0486b33730856d04..1eab54bc6ee9385c8de90ac0b9cde6b7d6da1302 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -1141,6 +1141,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .machine_check = machine_check_generic, .platform = "ppc603", }, +#ifdef CONFIG_PPC_83xx { /* e300c1 (a 603e core, plus some) on 83xx */ .pvr_mask = 0x7fff0000, .pvr_value = 0x00830000, @@ -1151,7 +1152,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, - .machine_check = machine_check_generic, + .machine_check = machine_check_83xx, .platform = "ppc603", }, { /* e300c2 (an e300c1 core, plus some, minus FPU) on 83xx */ @@ -1165,7 +1166,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, - .machine_check = machine_check_generic, + .machine_check = machine_check_83xx, .platform = "ppc603", }, { /* e300c3 (e300c1, plus one IU, half cache size) on 83xx */ @@ -1179,7 +1180,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, - .machine_check = machine_check_generic, + .machine_check = machine_check_83xx, .num_pmcs = 4, .oprofile_cpu_type = "ppc/e300", .oprofile_type = PPC_OPROFILE_FSL_EMB, @@ -1196,12 +1197,13 @@ static struct cpu_spec __initdata cpu_specs[] = { .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, - .machine_check = machine_check_generic, + .machine_check = machine_check_83xx, .num_pmcs = 4, .oprofile_cpu_type = "ppc/e300", .oprofile_type = PPC_OPROFILE_FSL_EMB, .platform = "ppc603", }, +#endif { /* default match, we assume split I/D cache & TB (non-601)... */ .pvr_mask = 0x00000000, .pvr_value = 0x00000000, diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c index b6fe883b1016dd395a21c0ec4c64867c2d208b57..5828144555af5d1802456647d783f04387cf5245 100644 --- a/arch/powerpc/kernel/dbell.c +++ b/arch/powerpc/kernel/dbell.c @@ -36,7 +36,7 @@ void doorbell_global_ipi(int cpu) { u32 tag = get_hard_smp_processor_id(cpu); - kvmppc_set_host_ipi(cpu, 1); + kvmppc_set_host_ipi(cpu); /* Order previous accesses vs. msgsnd, which is treated as a store */ ppc_msgsnd_sync(); ppc_msgsnd(PPC_DBELL_MSGTYPE, 0, tag); @@ -51,7 +51,7 @@ void doorbell_core_ipi(int cpu) { u32 tag = cpu_thread_in_core(cpu); - kvmppc_set_host_ipi(cpu, 1); + kvmppc_set_host_ipi(cpu); /* Order previous accesses vs. msgsnd, which is treated as a store */ ppc_msgsnd_sync(); ppc_msgsnd(PPC_DBELL_MSGTYPE, 0, tag); @@ -86,7 +86,7 @@ void doorbell_exception(struct pt_regs *regs) may_hard_irq_enable(); - kvmppc_set_host_ipi(smp_processor_id(), 0); + kvmppc_clear_host_ipi(smp_processor_id()); __this_cpu_inc(irq_stat.doorbell_irqs); smp_ipi_demux_relaxed(); /* already performed the barrier */ diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index f432054234a472eae7077afda818b6dcb51ccf00..c6f41907f0d716fca6298715f7bf845c1f5411ac 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -694,9 +694,37 @@ static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f) return true; } +/* + * Handle POWER9 broadcast tlbie invalidation issue using + * cpu feature flag. + */ +static __init void update_tlbie_feature_flag(unsigned long pvr) +{ + if (PVR_VER(pvr) == PVR_POWER9) { + /* + * Set the tlbie feature flag for anything below + * Nimbus DD 2.3 and Cumulus DD 1.3 + */ + if ((pvr & 0xe000) == 0) { + /* Nimbus */ + if ((pvr & 0xfff) < 0x203) + cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG; + } else if ((pvr & 0xc000) == 0) { + /* Cumulus */ + if ((pvr & 0xfff) < 0x103) + cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG; + } else { + WARN_ONCE(1, "Unknown PVR"); + cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG; + } + + cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_ERAT_BUG; + } +} + static __init void cpufeatures_cpu_quirks(void) { - int version = mfspr(SPRN_PVR); + unsigned long version = mfspr(SPRN_PVR); /* * Not all quirks can be derived from the cpufeatures device tree. @@ -715,10 +743,10 @@ static __init void cpufeatures_cpu_quirks(void) if ((version & 0xffff0000) == 0x004e0000) { cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR); - cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_BUG; cur_cpu_spec->cpu_features |= CPU_FTR_P9_TIDR; } + update_tlbie_feature_flag(version); /* * PKEY was not in the initial base or feature node * specification, but it should become optional in the next diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 6ebba3e48b012b50b3150b3462bd224166c1b456..fe3c6f3bd3b6226727fffc12b0ec0e3bef1fb4b4 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -169,6 +169,11 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len) int n = 0, l = 0; char buffer[128]; + if (!pdn) { + pr_warn("EEH: Note: No error log for absent device.\n"); + return 0; + } + n += scnprintf(buf+n, len-n, "%04x:%02x:%02x.%01x\n", pdn->phb->global_number, pdn->busno, PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); @@ -355,10 +360,19 @@ static inline unsigned long eeh_token_to_phys(unsigned long token) ptep = find_init_mm_pte(token, &hugepage_shift); if (!ptep) return token; - WARN_ON(hugepage_shift); - pa = pte_pfn(*ptep) << PAGE_SHIFT; - return pa | (token & (PAGE_SIZE-1)); + pa = pte_pfn(*ptep); + + /* On radix we can do hugepage mappings for io, so handle that */ + if (hugepage_shift) { + pa <<= hugepage_shift; + pa |= token & ((1ul << hugepage_shift) - 1); + } else { + pa <<= PAGE_SHIFT; + pa |= token & (PAGE_SIZE - 1); + } + + return pa; } /* diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 67619b4b3f96c721993ce8b94b0f56cca6b4f418..af1f3d5f9a0f71c08a1e38d296615acf502560d7 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -281,6 +281,10 @@ static void eeh_pe_report_edev(struct eeh_dev *edev, eeh_report_fn fn, struct pci_driver *driver; enum pci_ers_result new_result; + if (!edev->pdev) { + eeh_edev_info(edev, "no device"); + return; + } device_lock(&edev->pdev->dev); if (eeh_edev_actionable(edev)) { driver = eeh_pcid_get(edev->pdev); @@ -811,6 +815,10 @@ void eeh_handle_normal_event(struct eeh_pe *pe) pr_warn("EEH: This PCI device has failed %d times in the last hour and will be permanently disabled after %d failures.\n", pe->freeze_count, eeh_max_freezes); + eeh_for_each_pe(pe, tmp_pe) + eeh_pe_for_each_dev(tmp_pe, edev, tmp) + edev->mode &= ~EEH_DEV_NO_HANDLER; + /* Walk the various device drivers attached to this slot through * a reset sequence, giving each an opportunity to do what it needs * to accomplish the reset. Each child gets a report of the @@ -1004,7 +1012,8 @@ void eeh_handle_normal_event(struct eeh_pe *pe) */ void eeh_handle_special_event(void) { - struct eeh_pe *pe, *phb_pe; + struct eeh_pe *pe, *phb_pe, *tmp_pe; + struct eeh_dev *edev, *tmp_edev; struct pci_bus *bus; struct pci_controller *hose; unsigned long flags; @@ -1075,6 +1084,10 @@ void eeh_handle_special_event(void) (phb_pe->state & EEH_PE_RECOVERING)) continue; + eeh_for_each_pe(pe, tmp_pe) + eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev) + edev->mode &= ~EEH_DEV_NO_HANDLER; + /* Notify all devices to be down */ eeh_pe_state_clear(pe, EEH_PE_PRI_BUS); eeh_set_channel_state(pe, pci_channel_io_perm_failure); diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c index 1b238ecc553e2fc4d591daa3e6b995da981c2c27..210d239a9395077f63ec42148b3ce44a35fd6780 100644 --- a/arch/powerpc/kernel/eeh_pe.c +++ b/arch/powerpc/kernel/eeh_pe.c @@ -379,7 +379,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev) while (parent) { if (!(parent->type & EEH_PE_INVALID)) break; - parent->type &= ~(EEH_PE_INVALID | EEH_PE_KEEP); + parent->type &= ~EEH_PE_INVALID; parent = parent->parent; } diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index e58c3f467db51dc518f9520df36ba5ef28f414d3..26b3f853cbf6f8bd6713d5fb6ef9977ab2f7041b 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -745,6 +745,9 @@ fast_exception_return: mtcr r10 lwz r10,_LINK(r11) mtlr r10 + /* Clear the exception_marker on the stack to avoid confusing stacktrace */ + li r10, 0 + stw r10, 8(r11) REST_GPR(10, r11) #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS) mtspr SPRN_NRI, r0 @@ -982,6 +985,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) mtcrf 0xFF,r10 mtlr r11 + /* Clear the exception_marker on the stack to avoid confusing stacktrace */ + li r10, 0 + stw r10, 8(r1) /* * Once we put values in SRR0 and SRR1, we are in a state * where exceptions are not recoverable, since taking an @@ -1021,6 +1027,9 @@ exc_exit_restart_end: mtlr r11 lwz r10,_CCR(r1) mtcrf 0xff,r10 + /* Clear the exception_marker on the stack to avoid confusing stacktrace */ + li r10, 0 + stw r10, 8(r1) REST_2GPRS(9, r1) .globl exc_exit_restart exc_exit_restart: diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 2206912ea4f09115debe3b6845b05581f8fa7414..58b50967b3e5903e8903bc68d8dbb5255a620823 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -80,6 +80,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM) std r0,GPR0(r1) std r10,GPR1(r1) beq 2f /* if from kernel mode */ +#ifdef CONFIG_PPC_FSL_BOOK3E +START_BTB_FLUSH_SECTION + BTB_FLUSH(r10) +END_BTB_FLUSH_SECTION +#endif ACCOUNT_CPU_USER_ENTRY(r13, r10, r11) 2: std r2,GPR2(r1) std r3,GPR3(r1) @@ -528,6 +533,7 @@ flush_count_cache: /* Save LR into r9 */ mflr r9 + // Flush the link stack .rept 64 bl .+4 .endr @@ -537,6 +543,11 @@ flush_count_cache: .balign 32 /* Restore LR */ 1: mtlr r9 + + // If we're just flushing the link stack, return here +3: nop + patch_site 3b patch__flush_link_stack_return + li r9,0x7fff mtctr r9 @@ -989,6 +1000,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) ld r2,_NIP(r1) mtspr SPRN_SRR0,r2 + /* + * Leaving a stale exception_marker on the stack can confuse + * the reliable stack unwinder later on. Clear it. + */ + li r2,0 + std r2,STACK_FRAME_OVERHEAD-16(r1) + ld r0,GPR0(r1) ld r2,GPR2(r1) ld r3,GPR3(r1) diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 6d6e144a28ce0971d3be09d112d8b0404d57cb0f..447defdd450373f72e43f6c11891e85574b2094f 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -296,7 +296,8 @@ ret_from_mc_except: andi. r10,r11,MSR_PR; /* save stack pointer */ \ beq 1f; /* branch around if supervisor */ \ ld r1,PACAKSAVE(r13); /* get kernel stack coming from usr */\ -1: cmpdi cr1,r1,0; /* check if SP makes sense */ \ +1: type##_BTB_FLUSH \ + cmpdi cr1,r1,0; /* check if SP makes sense */ \ bge- cr1,exc_##n##_bad_stack;/* bad stack (TODO: out of line) */ \ mfspr r10,SPRN_##type##_SRR0; /* read SRR0 before touching stack */ @@ -328,6 +329,30 @@ ret_from_mc_except: #define SPRN_MC_SRR0 SPRN_MCSRR0 #define SPRN_MC_SRR1 SPRN_MCSRR1 +#ifdef CONFIG_PPC_FSL_BOOK3E +#define GEN_BTB_FLUSH \ + START_BTB_FLUSH_SECTION \ + beq 1f; \ + BTB_FLUSH(r10) \ + 1: \ + END_BTB_FLUSH_SECTION + +#define CRIT_BTB_FLUSH \ + START_BTB_FLUSH_SECTION \ + BTB_FLUSH(r10) \ + END_BTB_FLUSH_SECTION + +#define DBG_BTB_FLUSH CRIT_BTB_FLUSH +#define MC_BTB_FLUSH CRIT_BTB_FLUSH +#define GDBELL_BTB_FLUSH GEN_BTB_FLUSH +#else +#define GEN_BTB_FLUSH +#define CRIT_BTB_FLUSH +#define DBG_BTB_FLUSH +#define MC_BTB_FLUSH +#define GDBELL_BTB_FLUSH +#endif + #define NORMAL_EXCEPTION_PROLOG(n, intnum, addition) \ EXCEPTION_PROLOG(n, intnum, GEN, addition##_GEN(n)) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 2d8fc8c9da7a1f210816bd9734c3d8453d8fc04e..344e2758b22dfdf27babb4f0de01839db5812ea3 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -520,6 +520,10 @@ EXC_COMMON_BEGIN(machine_check_handle_early) RFI_TO_USER_OR_KERNEL 9: /* Deliver the machine check to host kernel in V mode. */ +BEGIN_FTR_SECTION + ld r10,ORIG_GPR3(r1) + mtspr SPRN_CFAR,r10 +END_FTR_SECTION_IFSET(CPU_FTR_CFAR) MACHINE_CHECK_HANDLER_WINDUP b machine_check_pSeries @@ -536,7 +540,7 @@ EXC_COMMON_BEGIN(unrecover_mce) b 1b -EXC_REAL(data_access, 0x300, 0x80) +EXC_REAL_OOL(data_access, 0x300, 0x80) EXC_VIRT(data_access, 0x4300, 0x80, 0x300) TRAMP_KVM_SKIP(PACA_EXGEN, 0x300) @@ -568,13 +572,16 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80) SET_SCRATCH0(r13) EXCEPTION_PROLOG_0(PACA_EXSLB) + b tramp_data_access_slb +EXC_REAL_END(data_access_slb, 0x380, 0x80) + +TRAMP_REAL_BEGIN(tramp_data_access_slb) EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380) mr r12,r3 /* save r3 */ mfspr r3,SPRN_DAR mfspr r11,SPRN_SRR1 crset 4*cr6+eq BRANCH_TO_COMMON(r10, slb_miss_common) -EXC_REAL_END(data_access_slb, 0x380, 0x80) EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80) SET_SCRATCH0(r13) @@ -589,7 +596,7 @@ EXC_VIRT_END(data_access_slb, 0x4380, 0x80) TRAMP_KVM_SKIP(PACA_EXSLB, 0x380) -EXC_REAL(instruction_access, 0x400, 0x80) +EXC_REAL_OOL(instruction_access, 0x400, 0x80) EXC_VIRT(instruction_access, 0x4400, 0x80, 0x400) TRAMP_KVM(PACA_EXGEN, 0x400) @@ -612,13 +619,16 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x80) SET_SCRATCH0(r13) EXCEPTION_PROLOG_0(PACA_EXSLB) + b tramp_instruction_access_slb +EXC_REAL_END(instruction_access_slb, 0x480, 0x80) + +TRAMP_REAL_BEGIN(tramp_instruction_access_slb) EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480) mr r12,r3 /* save r3 */ mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ mfspr r11,SPRN_SRR1 crclr 4*cr6+eq BRANCH_TO_COMMON(r10, slb_miss_common) -EXC_REAL_END(instruction_access_slb, 0x480, 0x80) EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80) SET_SCRATCH0(r13) @@ -879,13 +889,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM) EXC_REAL_OOL_MASKABLE(decrementer, 0x900, 0x80, IRQS_DISABLED) -EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x80, 0x900, IRQS_DISABLED) +EXC_VIRT_OOL_MASKABLE(decrementer, 0x4900, 0x80, 0x900, IRQS_DISABLED) TRAMP_KVM(PACA_EXGEN, 0x900) EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt) -EXC_REAL_HV(hdecrementer, 0x980, 0x80) -EXC_VIRT_HV(hdecrementer, 0x4980, 0x80, 0x980) +EXC_REAL_OOL_HV(hdecrementer, 0x980, 0x80) +EXC_VIRT_OOL_HV(hdecrementer, 0x4980, 0x80, 0x980) TRAMP_KVM_HV(PACA_EXGEN, 0x980) EXC_COMMON(hdecrementer_common, 0x980, hdec_interrupt) @@ -1119,7 +1129,7 @@ TRAMP_REAL_BEGIN(hmi_exception_early) EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN) EXCEPTION_PROLOG_COMMON_3(0xe60) addi r3,r1,STACK_FRAME_OVERHEAD - BRANCH_LINK_TO_FAR(hmi_exception_realmode) /* Function call ABI */ + BRANCH_LINK_TO_FAR(DOTSYM(hmi_exception_realmode)) /* Function call ABI */ cmpdi cr0,r3,0 /* Windup the stack. */ @@ -1519,15 +1529,8 @@ TRAMP_REAL_BEGIN(stf_barrier_fallback) .endr blr -TRAMP_REAL_BEGIN(rfi_flush_fallback) - SET_SCRATCH0(r13); - GET_PACA(r13); - std r1,PACA_EXRFI+EX_R12(r13) - ld r1,PACAKSAVE(r13) - std r9,PACA_EXRFI+EX_R9(r13) - std r10,PACA_EXRFI+EX_R10(r13) - std r11,PACA_EXRFI+EX_R11(r13) - mfctr r9 +/* Clobbers r10, r11, ctr */ +.macro L1D_DISPLACEMENT_FLUSH ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) ld r11,PACA_L1D_FLUSH_SIZE(r13) srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ @@ -1538,7 +1541,7 @@ TRAMP_REAL_BEGIN(rfi_flush_fallback) sync /* - * The load adresses are at staggered offsets within cachelines, + * The load addresses are at staggered offsets within cachelines, * which suits some pipelines better (on others it should not * hurt). */ @@ -1553,7 +1556,30 @@ TRAMP_REAL_BEGIN(rfi_flush_fallback) ld r11,(0x80 + 8)*7(r10) addi r10,r10,0x80*8 bdnz 1b +.endm +TRAMP_REAL_BEGIN(entry_flush_fallback) + std r9,PACA_EXRFI+EX_R9(r13) + std r10,PACA_EXRFI+EX_R10(r13) + std r11,PACA_EXRFI+EX_R11(r13) + mfctr r9 + L1D_DISPLACEMENT_FLUSH + mtctr r9 + ld r9,PACA_EXRFI+EX_R9(r13) + ld r10,PACA_EXRFI+EX_R10(r13) + ld r11,PACA_EXRFI+EX_R11(r13) + blr + +TRAMP_REAL_BEGIN(rfi_flush_fallback) + SET_SCRATCH0(r13); + GET_PACA(r13); + std r1,PACA_EXRFI+EX_R12(r13) + ld r1,PACAKSAVE(r13) + std r9,PACA_EXRFI+EX_R9(r13) + std r10,PACA_EXRFI+EX_R10(r13) + std r11,PACA_EXRFI+EX_R11(r13) + mfctr r9 + L1D_DISPLACEMENT_FLUSH mtctr r9 ld r9,PACA_EXRFI+EX_R9(r13) ld r10,PACA_EXRFI+EX_R10(r13) @@ -1571,32 +1597,7 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback) std r10,PACA_EXRFI+EX_R10(r13) std r11,PACA_EXRFI+EX_R11(r13) mfctr r9 - ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) - ld r11,PACA_L1D_FLUSH_SIZE(r13) - srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ - mtctr r11 - DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ - - /* order ld/st prior to dcbt stop all streams with flushing */ - sync - - /* - * The load adresses are at staggered offsets within cachelines, - * which suits some pipelines better (on others it should not - * hurt). - */ -1: - ld r11,(0x80 + 8)*0(r10) - ld r11,(0x80 + 8)*1(r10) - ld r11,(0x80 + 8)*2(r10) - ld r11,(0x80 + 8)*3(r10) - ld r11,(0x80 + 8)*4(r10) - ld r11,(0x80 + 8)*5(r10) - ld r11,(0x80 + 8)*6(r10) - ld r11,(0x80 + 8)*7(r10) - addi r10,r10,0x80*8 - bdnz 1b - + L1D_DISPLACEMENT_FLUSH mtctr r9 ld r9,PACA_EXRFI+EX_R9(r13) ld r10,PACA_EXRFI+EX_R10(r13) @@ -1605,6 +1606,19 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback) GET_SCRATCH0(r13); hrfid +USE_TEXT_SECTION() + +_GLOBAL(do_uaccess_flush) + UACCESS_FLUSH_FIXUP_SECTION + nop + nop + nop + blr + L1D_DISPLACEMENT_FLUSH + blr +_ASM_NOKPROBE_SYMBOL(do_uaccess_flush) +EXPORT_SYMBOL(do_uaccess_flush) + /* * Real mode exceptions actually use this too, but alternate * instruction code patches (which end up in the common .text area) @@ -1745,7 +1759,7 @@ handle_page_fault: addi r3,r1,STACK_FRAME_OVERHEAD bl do_page_fault cmpdi r3,0 - beq+ 12f + beq+ ret_from_except_lite bl save_nvgprs mr r5,r3 addi r3,r1,STACK_FRAME_OVERHEAD @@ -1760,7 +1774,12 @@ handle_dabr_fault: ld r5,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD bl do_break -12: b ret_from_except_lite + /* + * do_break() may have changed the NV GPRS while handling a breakpoint. + * If so, we need to restore them with their updated values. Don't use + * ret_from_except_lite here. + */ + b ret_from_except #ifdef CONFIG_PPC_BOOK3S_64 diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index a711d22339ea76d75cc09a1a9e215764147eb983..c02c95287a5f82e4c94edd6b2d932ff5846e4ef1 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -118,13 +118,19 @@ int __init early_init_dt_scan_fw_dump(unsigned long node, /* * If fadump is registered, check if the memory provided - * falls within boot memory area. + * falls within boot memory area and reserved memory area. */ -int is_fadump_boot_memory_area(u64 addr, ulong size) +int is_fadump_memory_area(u64 addr, ulong size) { + u64 d_start = fw_dump.reserve_dump_area_start; + u64 d_end = d_start + fw_dump.reserve_dump_area_size; + if (!fw_dump.dump_registered) return 0; + if (((addr + size) > d_start) && (addr <= d_end)) + return 1; + return (addr + size) > RMA_START && addr <= fw_dump.boot_memory_size; } diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 4898e9491a1cd3ab5b90b2e50c75758b7ebb9d00..3fb564f3e8874eeddfb5b31c6e4b2a489f2fbbde 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -906,6 +906,7 @@ p_toc: .8byte __toc_start + 0x8000 - 0b /* * This is where the main kernel code starts. */ +__REF start_here_multiplatform: /* set up the TOC */ bl relative_toc @@ -970,7 +971,9 @@ start_here_multiplatform: /* Restore parameters passed from prom_init/kexec */ mr r3,r31 - bl early_setup /* also sets r13 and SPRG_PACA */ + LOAD_REG_ADDR(r12, DOTSYM(early_setup)) + mtctr r12 + bctrl /* also sets r13 and SPRG_PACA */ LOAD_REG_ADDR(r3, start_here_common) ld r4,PACAKMSR(r13) @@ -979,6 +982,7 @@ start_here_multiplatform: RFI b . /* prevent speculative execution */ + .previous /* This is where all platforms converge execution */ start_here_common: diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 6582f824d6206dfefddcdd81ed2509f7ebe4b1f2..9fd2ff28b8ff26b4ee3bc36aa18f038ae3d58c21 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -353,13 +353,14 @@ _ENTRY(ITLBMiss_cmp) #if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE) mtcr r12 #endif - -#ifdef CONFIG_SWAP - rlwinm r11, r10, 31, _PAGE_ACCESSED >> 1 -#endif /* Load the MI_TWC with the attributes for this "segment." */ mtspr SPRN_MI_TWC, r11 /* Set segment attributes */ +#ifdef CONFIG_SWAP + rlwinm r11, r10, 32-5, _PAGE_PRESENT + and r11, r11, r10 + rlwimi r10, r11, 0, _PAGE_PRESENT +#endif li r11, RPN_PATTERN | 0x200 /* The Linux PTE won't go exactly into the MMU TLB. * Software indicator bits 20 and 23 must be clear. @@ -470,14 +471,22 @@ _ENTRY(DTLBMiss_jmp) * above. */ rlwimi r11, r10, 0, _PAGE_GUARDED -#ifdef CONFIG_SWAP - /* _PAGE_ACCESSED has to be set. We use second APG bit for that, 0 - * on that bit will represent a Non Access group - */ - rlwinm r11, r10, 31, _PAGE_ACCESSED >> 1 -#endif mtspr SPRN_MD_TWC, r11 + /* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set. + * We also need to know if the insn is a load/store, so: + * Clear _PAGE_PRESENT and load that which will + * trap into DTLB Error with store bit set accordinly. + */ + /* PRESENT=0x1, ACCESSED=0x20 + * r11 = ((r10 & PRESENT) & ((r10 & ACCESSED) >> 5)); + * r10 = (r10 & ~PRESENT) | r11; + */ +#ifdef CONFIG_SWAP + rlwinm r11, r10, 32-5, _PAGE_PRESENT + and r11, r11, r10 + rlwimi r10, r11, 0, _PAGE_PRESENT +#endif /* The Linux PTE won't go exactly into the MMU TLB. * Software indicator bits 24, 25, 26, and 27 must be * set. All other Linux PTE bits control the behavior @@ -637,8 +646,8 @@ InstructionBreakpoint: */ DTLBMissIMMR: mtcr r12 - /* Set 512k byte guarded page and mark it valid and accessed */ - li r10, MD_PS512K | MD_GUARDED | MD_SVALID | M_APG2 + /* Set 512k byte guarded page and mark it valid */ + li r10, MD_PS512K | MD_GUARDED | MD_SVALID mtspr SPRN_MD_TWC, r10 mfspr r10, SPRN_IMMR /* Get current IMMR */ rlwinm r10, r10, 0, 0xfff80000 /* Get 512 kbytes boundary */ @@ -656,8 +665,8 @@ _ENTRY(dtlb_miss_exit_2) DTLBMissLinear: mtcr r12 - /* Set 8M byte page and mark it valid and accessed */ - li r11, MD_PS8MEG | MD_SVALID | M_APG2 + /* Set 8M byte page and mark it valid */ + li r11, MD_PS8MEG | MD_SVALID mtspr SPRN_MD_TWC, r11 rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */ ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_PRIVILEGED | _PAGE_DIRTY | \ @@ -675,8 +684,8 @@ _ENTRY(dtlb_miss_exit_3) #ifndef CONFIG_PIN_TLB_TEXT ITLBMissLinear: mtcr r12 - /* Set 8M byte page and mark it valid,accessed */ - li r11, MI_PS8MEG | MI_SVALID | M_APG2 + /* Set 8M byte page and mark it valid */ + li r11, MI_PS8MEG | MI_SVALID mtspr SPRN_MI_TWC, r11 rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */ ori r10, r10, 0xf0 | MI_SPS16K | _PAGE_PRIVILEGED | _PAGE_DIRTY | \ @@ -910,11 +919,12 @@ start_here: /* set up the PTE pointers for the Abatron bdiGDB. */ - tovirt(r6,r6) lis r5, abatron_pteptrs@h ori r5, r5, abatron_pteptrs@l stw r5, 0xf0(0) /* Must match your Abatron config file */ tophys(r5,r5) + lis r6, swapper_pg_dir@h + ori r6, r6, swapper_pg_dir@l stw r6, 0(r5) /* Now turn on the MMU for real! */ @@ -960,7 +970,7 @@ initial_mmu: ori r8, r8, MI_EVALID /* Mark it valid */ mtspr SPRN_MI_EPN, r8 li r8, MI_PS8MEG /* Set 8M byte page */ - ori r8, r8, MI_SVALID | M_APG2 /* Make it valid, APG 2 */ + ori r8, r8, MI_SVALID /* Make it valid */ mtspr SPRN_MI_TWC, r8 li r8, MI_BOOTINIT /* Create RPN for address 0 */ mtspr SPRN_MI_RPN, r8 /* Store TLB entry */ @@ -987,7 +997,7 @@ initial_mmu: ori r8, r8, MD_EVALID /* Mark it valid */ mtspr SPRN_MD_EPN, r8 li r8, MD_PS512K | MD_GUARDED /* Set 512k byte page */ - ori r8, r8, MD_SVALID | M_APG2 /* Make it valid and accessed */ + ori r8, r8, MD_SVALID /* Make it valid */ mtspr SPRN_MD_TWC, r8 mr r8, r9 /* Create paddr for TLB */ ori r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */ diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index d0862a100d29b147e0242dd9d02195d2007fb501..306e26c073a043b91d86f42c25075a2771760bf0 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h @@ -32,6 +32,16 @@ */ #define THREAD_NORMSAVE(offset) (THREAD_NORMSAVES + (offset * 4)) +#ifdef CONFIG_PPC_FSL_BOOK3E +#define BOOKE_CLEAR_BTB(reg) \ +START_BTB_FLUSH_SECTION \ + BTB_FLUSH(reg) \ +END_BTB_FLUSH_SECTION +#else +#define BOOKE_CLEAR_BTB(reg) +#endif + + #define NORMAL_EXCEPTION_PROLOG(intno) \ mtspr SPRN_SPRG_WSCRATCH0, r10; /* save one register */ \ mfspr r10, SPRN_SPRG_THREAD; \ @@ -43,6 +53,7 @@ andi. r11, r11, MSR_PR; /* check whether user or kernel */\ mr r11, r1; \ beq 1f; \ + BOOKE_CLEAR_BTB(r11) \ /* if from user, start at top of this thread's kernel stack */ \ lwz r11, THREAD_INFO-THREAD(r10); \ ALLOC_STACK_FRAME(r11, THREAD_SIZE); \ @@ -128,6 +139,7 @@ stw r9,_CCR(r8); /* save CR on stack */\ mfspr r11,exc_level_srr1; /* check whether user or kernel */\ DO_KVM BOOKE_INTERRUPT_##intno exc_level_srr1; \ + BOOKE_CLEAR_BTB(r10) \ andi. r11,r11,MSR_PR; \ mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\ lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\ diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index e2750b856c8fc5468ca5065adb8be0ce88c5b18c..2386ce2a9c6e4604ecc5a8f245858d3f0c547b6b 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -453,6 +453,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) mfcr r13 stw r13, THREAD_NORMSAVE(3)(r10) DO_KVM BOOKE_INTERRUPT_DTLB_MISS SPRN_SRR1 +START_BTB_FLUSH_SECTION + mfspr r11, SPRN_SRR1 + andi. r10,r11,MSR_PR + beq 1f + BTB_FLUSH(r10) +1: +END_BTB_FLUSH_SECTION mfspr r10, SPRN_DEAR /* Get faulting address */ /* If we are faulting a kernel address, we have to use the @@ -547,6 +554,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) mfcr r13 stw r13, THREAD_NORMSAVE(3)(r10) DO_KVM BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR1 +START_BTB_FLUSH_SECTION + mfspr r11, SPRN_SRR1 + andi. r10,r11,MSR_PR + beq 1f + BTB_FLUSH(r10) +1: +END_BTB_FLUSH_SECTION + mfspr r10, SPRN_SRR0 /* Get faulting address */ /* If we are faulting a kernel address, we have to use the diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index 7f5ac2e8581be7f56e552db87f5e0e165d39cddb..4a860d3b922962295dbec0764a734e61b1f63c03 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S @@ -170,6 +170,12 @@ core_idle_lock_held: bne- core_idle_lock_held blr +/* Reuse some unused pt_regs slots for AMR/IAMR/UAMOR/UAMOR */ +#define PNV_POWERSAVE_AMR _TRAP +#define PNV_POWERSAVE_IAMR _DAR +#define PNV_POWERSAVE_UAMOR _DSISR +#define PNV_POWERSAVE_AMOR RESULT + /* * Pass requested state in r3: * r3 - PNV_THREAD_NAP/SLEEP/WINKLE in POWER8 @@ -200,6 +206,20 @@ pnv_powersave_common: /* Continue saving state */ SAVE_GPR(2, r1) SAVE_NVGPRS(r1) + +BEGIN_FTR_SECTION + mfspr r4, SPRN_AMR + mfspr r5, SPRN_IAMR + mfspr r6, SPRN_UAMOR + std r4, PNV_POWERSAVE_AMR(r1) + std r5, PNV_POWERSAVE_IAMR(r1) + std r6, PNV_POWERSAVE_UAMOR(r1) +BEGIN_FTR_SECTION_NESTED(42) + mfspr r7, SPRN_AMOR + std r7, PNV_POWERSAVE_AMOR(r1) +END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42) +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) + mfcr r5 std r5,_CCR(r1) std r1,PACAR1(r13) @@ -924,6 +944,25 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) REST_NVGPRS(r1) REST_GPR(2, r1) + +BEGIN_FTR_SECTION + /* These regs were saved in pnv_powersave_common() */ + ld r4, PNV_POWERSAVE_AMR(r1) + ld r5, PNV_POWERSAVE_IAMR(r1) + ld r6, PNV_POWERSAVE_UAMOR(r1) + mtspr SPRN_AMR, r4 + mtspr SPRN_IAMR, r5 + mtspr SPRN_UAMOR, r6 +BEGIN_FTR_SECTION_NESTED(42) + ld r7, PNV_POWERSAVE_AMOR(r1) + mtspr SPRN_AMOR, r7 +END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42) + /* + * We don't need an isync here after restoring IAMR because the upcoming + * mtmsrd is execution synchronizing. + */ +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) + ld r4,PACAKMSR(r13) ld r5,_LINK(r1) ld r6,_CCR(r1) diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 19b4c628f3beced4211da41d48c528085254fee9..f0dc680e659af7029868b88cc36a002f3dd73126 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -785,9 +785,9 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, vaddr = page_address(page) + offset; uaddr = (unsigned long)vaddr; - npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl)); if (tbl) { + npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl)); align = 0; if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE && ((unsigned long)vaddr & ~PAGE_MASK) == 0) diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 916ddc4aac443985da55fc38997d75a146401182..d37704ebccdbc9946a50945bfb4e4ac21e651fe3 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -634,8 +634,6 @@ void __do_irq(struct pt_regs *regs) trace_irq_entry(regs); - check_stack_overflow(); - /* * Query the platform PIC for the interrupt & ack it. * @@ -667,6 +665,8 @@ void do_IRQ(struct pt_regs *regs) irqtp = hardirq_ctx[raw_smp_processor_id()]; sirqtp = softirq_ctx[raw_smp_processor_id()]; + check_stack_overflow(); + /* Already there ? */ if (unlikely(curtp == irqtp || curtp == sirqtp)) { __do_irq(regs); diff --git a/arch/powerpc/kernel/jump_label.c b/arch/powerpc/kernel/jump_label.c index 6472472093d084a01d2aaac7f2fa53f6a332f4da..0080c5fbd225de4752904754cb7d5834d377e5ce 100644 --- a/arch/powerpc/kernel/jump_label.c +++ b/arch/powerpc/kernel/jump_label.c @@ -11,7 +11,6 @@ #include #include -#ifdef HAVE_JUMP_LABEL void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type) { @@ -22,4 +21,3 @@ void arch_jump_label_transform(struct jump_entry *entry, else patch_instruction(addr, PPC_INST_NOP); } -#endif diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c index 683b5b3805bd17493d97c261afc19279ac76b69f..cd381e2291dfeb38a569fed214778838cef42a2e 100644 --- a/arch/powerpc/kernel/kvm.c +++ b/arch/powerpc/kernel/kvm.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -712,6 +713,12 @@ static void kvm_use_magic_page(void) static __init void kvm_free_tmp(void) { + /* + * Inform kmemleak about the hole in the .bss section since the + * corresponding pages will be unmapped with DEBUG_PAGEALLOC=y. + */ + kmemleak_free_part(&kvm_tmp[kvm_tmp_index], + ARRAY_SIZE(kvm_tmp) - kvm_tmp_index); free_reserved_area(&kvm_tmp[kvm_tmp_index], &kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL); } diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c index 33b34a58fc62f50b9d7cb8ad1e82c6eab46bb39f..5b9dce17f0c926c9f4c9c838a06658920086bcdb 100644 --- a/arch/powerpc/kernel/legacy_serial.c +++ b/arch/powerpc/kernel/legacy_serial.c @@ -372,6 +372,8 @@ void __init find_legacy_serial_ports(void) /* Now find out if one of these is out firmware console */ path = of_get_property(of_chosen, "linux,stdout-path", NULL); + if (path == NULL) + path = of_get_property(of_chosen, "stdout-path", NULL); if (path != NULL) { stdout = of_find_node_by_path(path); if (stdout) @@ -595,8 +597,10 @@ static int __init check_legacy_serial_console(void) /* We are getting a weird phandle from OF ... */ /* ... So use the full path instead */ name = of_get_property(of_chosen, "linux,stdout-path", NULL); + if (name == NULL) + name = of_get_property(of_chosen, "stdout-path", NULL); if (name == NULL) { - DBG(" no linux,stdout-path !\n"); + DBG(" no stdout-path !\n"); return -ENODEV; } prom_stdout = of_find_node_by_path(name); diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c index efdd16a79075f699ecf866f4dfc22abd794699fa..93e06778b136b14db884e864f26f9befdee93016 100644 --- a/arch/powerpc/kernel/mce.c +++ b/arch/powerpc/kernel/mce.c @@ -45,6 +45,7 @@ static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_ue_event_queue); static void machine_check_process_queued_event(struct irq_work *work); +static void machine_check_ue_irq_work(struct irq_work *work); void machine_check_ue_event(struct machine_check_event *evt); static void machine_process_ue_event(struct work_struct *work); @@ -52,6 +53,10 @@ static struct irq_work mce_event_process_work = { .func = machine_check_process_queued_event, }; +static struct irq_work mce_ue_event_irq_work = { + .func = machine_check_ue_irq_work, +}; + DECLARE_WORK(mce_ue_event_work, machine_process_ue_event); static void mce_set_error_info(struct machine_check_event *mce, @@ -208,6 +213,10 @@ void release_mce_event(void) get_mce_event(NULL, true); } +static void machine_check_ue_irq_work(struct irq_work *work) +{ + schedule_work(&mce_ue_event_work); +} /* * Queue up the MCE event which then can be handled later. @@ -225,7 +234,7 @@ void machine_check_ue_event(struct machine_check_event *evt) memcpy(this_cpu_ptr(&mce_ue_event_queue[index]), evt, sizeof(*evt)); /* Queue work to process this event later. */ - schedule_work(&mce_ue_event_work); + irq_work_queue(&mce_ue_event_irq_work); } /* diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c index 3497c8329c1d70bc7d010e97a3aeb66f524b9e1d..37a110b8e7e1724571ee7e3d22036b5b9a51832b 100644 --- a/arch/powerpc/kernel/mce_power.c +++ b/arch/powerpc/kernel/mce_power.c @@ -39,6 +39,7 @@ static unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr) { pte_t *ptep; + unsigned int shift; unsigned long flags; struct mm_struct *mm; @@ -48,13 +49,18 @@ static unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr) mm = &init_mm; local_irq_save(flags); - if (mm == current->mm) - ptep = find_current_mm_pte(mm->pgd, addr, NULL, NULL); - else - ptep = find_init_mm_pte(addr, NULL); + ptep = __find_linux_pte(mm->pgd, addr, NULL, &shift); local_irq_restore(flags); + if (!ptep || pte_special(*ptep)) return ULONG_MAX; + + if (shift > PAGE_SHIFT) { + unsigned long rpnmask = (1ul << shift) - PAGE_SIZE; + + return pte_pfn(__pte(pte_val(*ptep) | (addr & rpnmask))); + } + return pte_pfn(*ptep); } @@ -89,6 +95,13 @@ static void flush_and_reload_slb(void) static void flush_erat(void) { +#ifdef CONFIG_PPC_BOOK3S_64 + if (!early_cpu_has_feature(CPU_FTR_ARCH_300)) { + flush_and_reload_slb(); + return; + } +#endif + /* PPC_INVALIDATE_ERAT can only be used on ISA v3 and newer */ asm volatile(PPC_INVALIDATE_ERAT : : :"memory"); } @@ -332,7 +345,7 @@ static const struct mce_derror_table mce_p9_derror_table[] = { MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, }, { 0, false, 0, 0, 0, 0 } }; -static int mce_find_instr_ea_and_pfn(struct pt_regs *regs, uint64_t *addr, +static int mce_find_instr_ea_and_phys(struct pt_regs *regs, uint64_t *addr, uint64_t *phys_addr) { /* @@ -523,7 +536,8 @@ static int mce_handle_derror(struct pt_regs *regs, * kernel/exception-64s.h */ if (get_paca()->in_mce < MAX_MCE_DEPTH) - mce_find_instr_ea_and_pfn(regs, addr, phys_addr); + mce_find_instr_ea_and_phys(regs, addr, + phys_addr); } found = 1; } diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 262ba948178107a8180a7aff69603e226a248cec..facc02964ab3155b9c400f825dd38f9018de4014 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -87,7 +87,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) subf r8,r6,r4 /* compute length */ add r8,r8,r5 /* ensure we get enough */ lwz r9,DCACHEL1LOGBLOCKSIZE(r10) /* Get log-2 of cache block size */ - srw. r8,r8,r9 /* compute line count */ + srd. r8,r8,r9 /* compute line count */ beqlr /* nothing to do? */ mtctr r8 1: dcbst 0,r6 @@ -103,7 +103,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) subf r8,r6,r4 /* compute length */ add r8,r8,r5 lwz r9,ICACHEL1LOGBLOCKSIZE(r10) /* Get log-2 of Icache block size */ - srw. r8,r8,r9 /* compute line count */ + srd. r8,r8,r9 /* compute line count */ beqlr /* nothing to do? */ mtctr r8 2: icbi 0,r6 @@ -135,7 +135,7 @@ _GLOBAL_TOC(flush_dcache_range) subf r8,r6,r4 /* compute length */ add r8,r8,r5 /* ensure we get enough */ lwz r9,DCACHEL1LOGBLOCKSIZE(r10) /* Get log-2 of dcache block size */ - srw. r8,r8,r9 /* compute line count */ + srd. r8,r8,r9 /* compute line count */ beqlr /* nothing to do? */ mtctr r8 0: dcbst 0,r6 @@ -153,7 +153,7 @@ _GLOBAL(flush_inval_dcache_range) subf r8,r6,r4 /* compute length */ add r8,r8,r5 /* ensure we get enough */ lwz r9,DCACHEL1LOGBLOCKSIZE(r10)/* Get log-2 of dcache block size */ - srw. r8,r8,r9 /* compute line count */ + srd. r8,r8,r9 /* compute line count */ beqlr /* nothing to do? */ sync isync diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c index 77371c9ef3d8f9fd46894e47a119e215ca86ae3a..2d861a36662ed310e5efbff4f1ae133047ffcb57 100644 --- a/arch/powerpc/kernel/module.c +++ b/arch/powerpc/kernel/module.c @@ -74,6 +74,14 @@ int module_finalize(const Elf_Ehdr *hdr, (void *)sect->sh_addr + sect->sh_size); #endif /* CONFIG_PPC64 */ +#ifdef PPC64_ELF_ABI_v1 + sect = find_section(hdr, sechdrs, ".opd"); + if (sect != NULL) { + me->arch.start_opd = sect->sh_addr; + me->arch.end_opd = sect->sh_addr + sect->sh_size; + } +#endif /* PPC64_ELF_ABI_v1 */ + #ifdef CONFIG_PPC_BARRIER_NOSPEC sect = find_section(hdr, sechdrs, "__spec_barrier_fixup"); if (sect != NULL) diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index b8d61e019d06189f74d56114c23aa3637d1579b7..8661eea78503f5351c590417a61bf81b159b7be8 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -360,11 +360,6 @@ int module_frob_arch_sections(Elf64_Ehdr *hdr, else if (strcmp(secstrings+sechdrs[i].sh_name,"__versions")==0) dedotify_versions((void *)hdr + sechdrs[i].sh_offset, sechdrs[i].sh_size); - else if (!strcmp(secstrings + sechdrs[i].sh_name, ".opd")) { - me->arch.start_opd = sechdrs[i].sh_addr; - me->arch.end_opd = sechdrs[i].sh_addr + - sechdrs[i].sh_size; - } /* We don't handle .init for the moment: rename to _init */ while ((p = strstr(secstrings + sechdrs[i].sh_name, ".init"))) @@ -685,7 +680,14 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, case R_PPC64_REL32: /* 32 bits relative (used by relative exception tables) */ - *(u32 *)location = value - (unsigned long)location; + /* Convert value to relative */ + value -= (unsigned long)location; + if (value + 0x80000000 > 0xffffffff) { + pr_err("%s: REL32 %li out of range!\n", + me->name, (long int)value); + return -ENOEXEC; + } + *(u32 *)location = value; break; case R_PPC64_TOCSAVE: diff --git a/arch/powerpc/kernel/msi.c b/arch/powerpc/kernel/msi.c index dab616a33b8dbe283aa46c05b5492af69190650f..f2197654be070721abd7bd263b68ae2ddd8094eb 100644 --- a/arch/powerpc/kernel/msi.c +++ b/arch/powerpc/kernel/msi.c @@ -34,5 +34,10 @@ void arch_teardown_msi_irqs(struct pci_dev *dev) { struct pci_controller *phb = pci_bus_to_host(dev->bus); - phb->controller_ops.teardown_msi_irqs(dev); + /* + * We can be called even when arch_setup_msi_irqs() returns -ENOSYS, + * so check the pointer again. + */ + if (phb->controller_ops.teardown_msi_irqs) + phb->controller_ops.teardown_msi_irqs(dev); } diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c index 22e9d281324daf63e92d16b1f32db3ec73aa23ad..e7d4ce6964ae99c675b18c1539997aae81f513fd 100644 --- a/arch/powerpc/kernel/nvram_64.c +++ b/arch/powerpc/kernel/nvram_64.c @@ -563,8 +563,6 @@ static int nvram_pstore_init(void) nvram_pstore_info.buf = oops_data; nvram_pstore_info.bufsize = oops_data_sz; - spin_lock_init(&nvram_pstore_info.buf_lock); - rc = pstore_register(&nvram_pstore_info); if (rc && (rc != -EPERM)) /* Print error only when pstore.backend == nvram */ diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index dff28f90351245d58f6b77130fb26fcb73351c5d..ac956dfdbbde69b01cd3334d343a379f37c7cee2 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c @@ -144,7 +144,8 @@ static int pcibios_map_phb_io_space(struct pci_controller *hose) * with incomplete address decoding but I'd rather not deal with * those outside of the reserved 64K legacy region. */ - area = __get_vm_area(size_page, 0, PHB_IO_BASE, PHB_IO_END); + area = __get_vm_area_caller(size_page, 0, PHB_IO_BASE, PHB_IO_END, + __builtin_return_address(0)); if (area == NULL) return -ENOMEM; hose->io_base_alloc = area->addr; diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c index 98f04725def75f09dc924f7861d795c986ca98f0..c101b321dece8480f3474d709e7d10fcc17bc1e6 100644 --- a/arch/powerpc/kernel/pci_of_scan.c +++ b/arch/powerpc/kernel/pci_of_scan.c @@ -45,6 +45,8 @@ unsigned int pci_parse_of_flags(u32 addr0, int bridge) if (addr0 & 0x02000000) { flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY; flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64; + if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) + flags |= IORESOURCE_MEM_64; flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M; if (addr0 & 0x40000000) flags |= IORESOURCE_PREFETCH diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index bb6ac471a784e70918d25a450e31ecba3f352881..02b69a68139cc2d85eafc27d5e17ad089c5fdd67 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -102,27 +102,8 @@ static void check_if_tm_restore_required(struct task_struct *tsk) } } -static inline bool msr_tm_active(unsigned long msr) -{ - return MSR_TM_ACTIVE(msr); -} - -static bool tm_active_with_fp(struct task_struct *tsk) -{ - return msr_tm_active(tsk->thread.regs->msr) && - (tsk->thread.ckpt_regs.msr & MSR_FP); -} - -static bool tm_active_with_altivec(struct task_struct *tsk) -{ - return msr_tm_active(tsk->thread.regs->msr) && - (tsk->thread.ckpt_regs.msr & MSR_VEC); -} #else -static inline bool msr_tm_active(unsigned long msr) { return false; } static inline void check_if_tm_restore_required(struct task_struct *tsk) { } -static inline bool tm_active_with_fp(struct task_struct *tsk) { return false; } -static inline bool tm_active_with_altivec(struct task_struct *tsk) { return false; } #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ bool strict_msr_control; @@ -180,7 +161,7 @@ static void __giveup_fpu(struct task_struct *tsk) save_fpu(tsk); msr = tsk->thread.regs->msr; - msr &= ~MSR_FP; + msr &= ~(MSR_FP|MSR_FE0|MSR_FE1); #ifdef CONFIG_VSX if (cpu_has_feature(CPU_FTR_VSX)) msr &= ~MSR_VSX; @@ -247,7 +228,8 @@ void enable_kernel_fp(void) * giveup as this would save to the 'live' structure not the * checkpointed structure. */ - if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr)) + if (!MSR_TM_ACTIVE(cpumsr) && + MSR_TM_ACTIVE(current->thread.regs->msr)) return; __giveup_fpu(current); } @@ -256,7 +238,7 @@ EXPORT_SYMBOL(enable_kernel_fp); static int restore_fp(struct task_struct *tsk) { - if (tsk->thread.load_fp || tm_active_with_fp(tsk)) { + if (tsk->thread.load_fp) { load_fp_state(¤t->thread.fp_state); current->thread.load_fp++; return 1; @@ -311,7 +293,8 @@ void enable_kernel_altivec(void) * giveup as this would save to the 'live' structure not the * checkpointed structure. */ - if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr)) + if (!MSR_TM_ACTIVE(cpumsr) && + MSR_TM_ACTIVE(current->thread.regs->msr)) return; __giveup_altivec(current); } @@ -337,8 +320,7 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread); static int restore_altivec(struct task_struct *tsk) { - if (cpu_has_feature(CPU_FTR_ALTIVEC) && - (tsk->thread.load_vec || tm_active_with_altivec(tsk))) { + if (cpu_has_feature(CPU_FTR_ALTIVEC) && (tsk->thread.load_vec)) { load_vr_state(&tsk->thread.vr_state); tsk->thread.used_vr = 1; tsk->thread.load_vec++; @@ -397,7 +379,8 @@ void enable_kernel_vsx(void) * giveup as this would save to the 'live' structure not the * checkpointed structure. */ - if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr)) + if (!MSR_TM_ACTIVE(cpumsr) && + MSR_TM_ACTIVE(current->thread.regs->msr)) return; __giveup_vsx(current); } @@ -499,13 +482,14 @@ void giveup_all(struct task_struct *tsk) if (!tsk->thread.regs) return; + check_if_tm_restore_required(tsk); + usermsr = tsk->thread.regs->msr; if ((usermsr & msr_all_available) == 0) return; msr_check_and_set(msr_all_available); - check_if_tm_restore_required(tsk); WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC))); @@ -530,7 +514,7 @@ void restore_math(struct pt_regs *regs) { unsigned long msr; - if (!msr_tm_active(regs->msr) && + if (!MSR_TM_ACTIVE(regs->msr) && !current->thread.load_fp && !loadvec(current->thread)) return; @@ -591,12 +575,11 @@ void flush_all_to_thread(struct task_struct *tsk) if (tsk->thread.regs) { preempt_disable(); BUG_ON(tsk != current); - save_all(tsk); - #ifdef CONFIG_SPE if (tsk->thread.regs->msr & MSR_SPE) tsk->thread.spefscr = mfspr(SPRN_SPEFSCR); #endif + save_all(tsk); preempt_enable(); } diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index c4d7078e5295fa6eec81f1913783191a162aac06..8e88f78e57dbae9c5d11b6eb033cc72f7ac56c34 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -129,7 +129,7 @@ static void __init move_device_tree(void) p = __va(memblock_alloc(size, PAGE_SIZE)); memcpy(p, initial_boot_params, size); initial_boot_params = p; - DBG("Moved device tree to 0x%p\n", p); + DBG("Moved device tree to 0x%px\n", p); } DBG("<- move_device_tree\n"); @@ -689,7 +689,7 @@ void __init early_init_devtree(void *params) { phys_addr_t limit; - DBG(" -> early_init_devtree(%p)\n", params); + DBG(" -> early_init_devtree(%px)\n", params); /* Too early to BUG_ON(), do it by hand */ if (!early_init_dt_verify(params)) @@ -749,7 +749,7 @@ void __init early_init_devtree(void *params) memblock_allow_resize(); memblock_dump_all(); - DBG("Phys. mem: %llx\n", memblock_phys_mem_size()); + DBG("Phys. mem: %llx\n", (unsigned long long)memblock_phys_mem_size()); /* We may need to relocate the flat tree, do it now. * FIXME .. and the initrd too? */ diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 9667666eb18e56d4afcc7f76b76b9779b1ac6536..d245f0af412a7994614a78ea0630be4ff268aece 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -561,6 +561,7 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset, /* * Copy out only the low-order word of vrsave. */ + int start, end; union { elf_vrreg_t reg; u32 word; @@ -569,8 +570,10 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset, vrsave.word = target->thread.vrsave; + start = 33 * sizeof(vector128); + end = start + sizeof(vrsave); ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave, - 33 * sizeof(vector128), -1); + start, end); } return ret; @@ -608,6 +611,7 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset, /* * We use only the first word of vrsave. */ + int start, end; union { elf_vrreg_t reg; u32 word; @@ -616,8 +620,10 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset, vrsave.word = target->thread.vrsave; + start = 33 * sizeof(vector128); + end = start + sizeof(vrsave); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave, - 33 * sizeof(vector128), -1); + start, end); if (!ret) target->thread.vrsave = vrsave.word; } @@ -2974,6 +2980,9 @@ long arch_ptrace(struct task_struct *child, long request, void __user *datavp = (void __user *) data; unsigned long __user *datalp = datavp; + // ptrace_get/put_fpr() rely on PPC32 and VSX being incompatible + BUILD_BUG_ON(IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_VSX)); + switch (request) { /* read the word at location addr in the USER area. */ case PTRACE_PEEKUSR: { @@ -3000,10 +3009,13 @@ long arch_ptrace(struct task_struct *child, long request, unsigned int fpidx = index - PT_FPR0; flush_fp_to_thread(child); - if (fpidx < (PT_FPSCR - PT_FPR0)) - memcpy(&tmp, &child->thread.TS_FPR(fpidx), - sizeof(long)); - else + if (fpidx < (PT_FPSCR - PT_FPR0)) { + if (IS_ENABLED(CONFIG_PPC32)) + // On 32-bit the index we are passed refers to 32-bit words + tmp = ((u32 *)child->thread.fp_state.fpr)[fpidx]; + else + memcpy(&tmp, &child->thread.TS_FPR(fpidx), sizeof(long)); + } else tmp = child->thread.fp_state.fpscr; } ret = put_user(tmp, datalp); @@ -3033,10 +3045,13 @@ long arch_ptrace(struct task_struct *child, long request, unsigned int fpidx = index - PT_FPR0; flush_fp_to_thread(child); - if (fpidx < (PT_FPSCR - PT_FPR0)) - memcpy(&child->thread.TS_FPR(fpidx), &data, - sizeof(long)); - else + if (fpidx < (PT_FPSCR - PT_FPR0)) { + if (IS_ENABLED(CONFIG_PPC32)) + // On 32-bit the index we are passed refers to 32-bit words + ((u32 *)child->thread.fp_state.fpr)[fpidx] = data; + else + memcpy(&child->thread.TS_FPR(fpidx), &data, sizeof(long)); + } else child->thread.fp_state.fpscr = data; ret = 0; } diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 8afd146bc9c70dc6480e2fff20d6239d327e33d3..7e0722b62cae90ab0674ca3dbfbd5ecb67389147 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -875,15 +875,17 @@ static int rtas_cpu_state_change_mask(enum rtas_cpu_state state, return 0; for_each_cpu(cpu, cpus) { + struct device *dev = get_cpu_device(cpu); + switch (state) { case DOWN: - cpuret = cpu_down(cpu); + cpuret = device_offline(dev); break; case UP: - cpuret = cpu_up(cpu); + cpuret = device_online(dev); break; } - if (cpuret) { + if (cpuret < 0) { pr_debug("%s: cpu_%s for cpu#%d returned %d.\n", __func__, ((state == UP) ? "up" : "down"), @@ -972,6 +974,8 @@ int rtas_ibm_suspend_me(u64 handle) data.token = rtas_token("ibm,suspend-me"); data.complete = &done; + lock_device_hotplug(); + /* All present CPUs must be online */ cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask); cpuret = rtas_online_cpus_mask(offline_mask); @@ -981,6 +985,7 @@ int rtas_ibm_suspend_me(u64 handle) goto out; } + cpu_hotplug_disable(); stop_topology_update(); /* Call function on all CPUs. One of us will make the @@ -995,6 +1000,7 @@ int rtas_ibm_suspend_me(u64 handle) printk(KERN_ERR "Error doing global join\n"); start_topology_update(); + cpu_hotplug_enable(); /* Take down CPUs not online prior to suspend */ cpuret = rtas_offline_cpus_mask(offline_mask); @@ -1003,6 +1009,7 @@ int rtas_ibm_suspend_me(u64 handle) __func__); out: + unlock_device_hotplug(); free_cpumask_var(offline_mask); return atomic_read(&data.error); } @@ -1050,6 +1057,147 @@ struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log, return NULL; } +#ifdef CONFIG_PPC_RTAS_FILTER + +/* + * The sys_rtas syscall, as originally designed, allows root to pass + * arbitrary physical addresses to RTAS calls. A number of RTAS calls + * can be abused to write to arbitrary memory and do other things that + * are potentially harmful to system integrity, and thus should only + * be used inside the kernel and not exposed to userspace. + * + * All known legitimate users of the sys_rtas syscall will only ever + * pass addresses that fall within the RMO buffer, and use a known + * subset of RTAS calls. + * + * Accordingly, we filter RTAS requests to check that the call is + * permitted, and that provided pointers fall within the RMO buffer. + * The rtas_filters list contains an entry for each permitted call, + * with the indexes of the parameters which are expected to contain + * addresses and sizes of buffers allocated inside the RMO buffer. + */ +struct rtas_filter { + const char *name; + int token; + /* Indexes into the args buffer, -1 if not used */ + int buf_idx1; + int size_idx1; + int buf_idx2; + int size_idx2; + + int fixed_size; +}; + +static struct rtas_filter rtas_filters[] __ro_after_init = { + { "ibm,activate-firmware", -1, -1, -1, -1, -1 }, + { "ibm,configure-connector", -1, 0, -1, 1, -1, 4096 }, /* Special cased */ + { "display-character", -1, -1, -1, -1, -1 }, + { "ibm,display-message", -1, 0, -1, -1, -1 }, + { "ibm,errinjct", -1, 2, -1, -1, -1, 1024 }, + { "ibm,close-errinjct", -1, -1, -1, -1, -1 }, + { "ibm,open-errinct", -1, -1, -1, -1, -1 }, + { "ibm,get-config-addr-info2", -1, -1, -1, -1, -1 }, + { "ibm,get-dynamic-sensor-state", -1, 1, -1, -1, -1 }, + { "ibm,get-indices", -1, 2, 3, -1, -1 }, + { "get-power-level", -1, -1, -1, -1, -1 }, + { "get-sensor-state", -1, -1, -1, -1, -1 }, + { "ibm,get-system-parameter", -1, 1, 2, -1, -1 }, + { "get-time-of-day", -1, -1, -1, -1, -1 }, + { "ibm,get-vpd", -1, 0, -1, 1, 2 }, + { "ibm,lpar-perftools", -1, 2, 3, -1, -1 }, + { "ibm,platform-dump", -1, 4, 5, -1, -1 }, + { "ibm,read-slot-reset-state", -1, -1, -1, -1, -1 }, + { "ibm,scan-log-dump", -1, 0, 1, -1, -1 }, + { "ibm,set-dynamic-indicator", -1, 2, -1, -1, -1 }, + { "ibm,set-eeh-option", -1, -1, -1, -1, -1 }, + { "set-indicator", -1, -1, -1, -1, -1 }, + { "set-power-level", -1, -1, -1, -1, -1 }, + { "set-time-for-power-on", -1, -1, -1, -1, -1 }, + { "ibm,set-system-parameter", -1, 1, -1, -1, -1 }, + { "set-time-of-day", -1, -1, -1, -1, -1 }, + { "ibm,suspend-me", -1, -1, -1, -1, -1 }, + { "ibm,update-nodes", -1, 0, -1, -1, -1, 4096 }, + { "ibm,update-properties", -1, 0, -1, -1, -1, 4096 }, + { "ibm,physical-attestation", -1, 0, 1, -1, -1 }, +}; + +static bool in_rmo_buf(u32 base, u32 end) +{ + return base >= rtas_rmo_buf && + base < (rtas_rmo_buf + RTAS_RMOBUF_MAX) && + base <= end && + end >= rtas_rmo_buf && + end < (rtas_rmo_buf + RTAS_RMOBUF_MAX); +} + +static bool block_rtas_call(int token, int nargs, + struct rtas_args *args) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(rtas_filters); i++) { + struct rtas_filter *f = &rtas_filters[i]; + u32 base, size, end; + + if (token != f->token) + continue; + + if (f->buf_idx1 != -1) { + base = be32_to_cpu(args->args[f->buf_idx1]); + if (f->size_idx1 != -1) + size = be32_to_cpu(args->args[f->size_idx1]); + else if (f->fixed_size) + size = f->fixed_size; + else + size = 1; + + end = base + size - 1; + if (!in_rmo_buf(base, end)) + goto err; + } + + if (f->buf_idx2 != -1) { + base = be32_to_cpu(args->args[f->buf_idx2]); + if (f->size_idx2 != -1) + size = be32_to_cpu(args->args[f->size_idx2]); + else if (f->fixed_size) + size = f->fixed_size; + else + size = 1; + end = base + size - 1; + + /* + * Special case for ibm,configure-connector where the + * address can be 0 + */ + if (!strcmp(f->name, "ibm,configure-connector") && + base == 0) + return false; + + if (!in_rmo_buf(base, end)) + goto err; + } + + return false; + } + +err: + pr_err_ratelimited("sys_rtas: RTAS call blocked - exploit attempt?\n"); + pr_err_ratelimited("sys_rtas: token=0x%x, nargs=%d (called by %s)\n", + token, nargs, current->comm); + return true; +} + +#else + +static bool block_rtas_call(int token, int nargs, + struct rtas_args *args) +{ + return false; +} + +#endif /* CONFIG_PPC_RTAS_FILTER */ + /* We assume to be passed big endian arguments */ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs) { @@ -1087,6 +1235,9 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs) args.rets = &args.args[nargs]; memset(args.rets, 0, nret * sizeof(rtas_arg_t)); + if (block_rtas_call(token, nargs, &args)) + return -EINVAL; + /* Need to handle ibm,suspend_me call specially */ if (token == ibm_suspend_me_token) { @@ -1148,6 +1299,9 @@ void __init rtas_initialize(void) unsigned long rtas_region = RTAS_INSTANTIATE_MAX; u32 base, size, entry; int no_base, no_size, no_entry; +#ifdef CONFIG_PPC_RTAS_FILTER + int i; +#endif /* Get RTAS dev node and fill up our "rtas" structure with infos * about it. @@ -1183,6 +1337,12 @@ void __init rtas_initialize(void) #ifdef CONFIG_RTAS_ERROR_LOGGING rtas_last_error_token = rtas_token("rtas-last-error"); #endif + +#ifdef CONFIG_PPC_RTAS_FILTER + for (i = 0; i < ARRAY_SIZE(rtas_filters); i++) { + rtas_filters[i].token = rtas_token(rtas_filters[i].name); + } +#endif } int __init early_init_dt_scan_rtas(unsigned long node, diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c index 10fabae2574d5910b3cbf5ada83d912879d71351..8246f437bbc69b7bf2fdcda32e18fc6ebfe85a06 100644 --- a/arch/powerpc/kernel/rtas_flash.c +++ b/arch/powerpc/kernel/rtas_flash.c @@ -523,7 +523,7 @@ static ssize_t validate_flash_write(struct file *file, const char __user *buf, args_buf->status = VALIDATE_INCOMPLETE; } - if (!access_ok(VERIFY_READ, buf, count)) { + if (!access_ok(buf, count)) { rc = -EFAULT; goto done; } diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c index 44d66c33d59d5a2d9e38c0138cb0749927eebd6f..219e488e8c4ebfdf58d8876634426229a3e397e4 100644 --- a/arch/powerpc/kernel/rtasd.c +++ b/arch/powerpc/kernel/rtasd.c @@ -274,27 +274,16 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal) } #ifdef CONFIG_PPC_PSERIES -static s32 prrn_update_scope; - -static void prrn_work_fn(struct work_struct *work) +static void handle_prrn_event(s32 scope) { /* * For PRRN, we must pass the negative of the scope value in * the RTAS event. */ - pseries_devicetree_update(-prrn_update_scope); + pseries_devicetree_update(-scope); numa_update_cpu_topology(false); } -static DECLARE_WORK(prrn_work, prrn_work_fn); - -static void prrn_schedule_update(u32 scope) -{ - flush_work(&prrn_work); - prrn_update_scope = scope; - schedule_work(&prrn_work); -} - static void handle_rtas_event(const struct rtas_error_log *log) { if (rtas_error_type(log) != RTAS_TYPE_PRRN || !prrn_is_enabled()) @@ -303,7 +292,7 @@ static void handle_rtas_event(const struct rtas_error_log *log) /* For PRRN Events the extended log length is used to denote * the scope for calling rtas update-nodes. */ - prrn_schedule_update(rtas_error_extended_log_length(log)); + handle_prrn_event(rtas_error_extended_log_length(log)); } #else @@ -342,7 +331,7 @@ static ssize_t rtas_log_read(struct file * file, char __user * buf, count = rtas_error_log_buffer_max; - if (!access_ok(VERIFY_WRITE, buf, count)) + if (!access_ok(buf, count)) return -EFAULT; tmp = kmalloc(count, GFP_KERNEL); diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index f6f469fc4073e5aada3236e7f1889df05f0e7819..6a3dde9587ccba3d36b7fac2370bcb94e07002c8 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -4,6 +4,7 @@ // // Copyright 2018, Michael Ellerman, IBM Corporation. +#include #include #include #include @@ -22,10 +23,15 @@ enum count_cache_flush_type { COUNT_CACHE_FLUSH_SW = 0x2, COUNT_CACHE_FLUSH_HW = 0x4, }; -static enum count_cache_flush_type count_cache_flush_type; +static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; +static bool link_stack_flush_enabled; bool barrier_nospec_enabled; static bool no_nospec; +static bool btb_flush_enabled; +#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64) +static bool no_spectrev2; +#endif static void enable_barrier_nospec(bool enable) { @@ -52,7 +58,7 @@ void setup_barrier_nospec(void) enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR); - if (!no_nospec) + if (!no_nospec && !cpu_mitigations_off()) enable_barrier_nospec(enable); } @@ -101,6 +107,26 @@ static __init int barrier_nospec_debugfs_init(void) device_initcall(barrier_nospec_debugfs_init); #endif /* CONFIG_DEBUG_FS */ +#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64) +static int __init handle_nospectre_v2(char *p) +{ + no_spectrev2 = true; + + return 0; +} +early_param("nospectre_v2", handle_nospectre_v2); +#endif /* CONFIG_PPC_FSL_BOOK3E || CONFIG_PPC_BOOK3S_64 */ + +#ifdef CONFIG_PPC_FSL_BOOK3E +void setup_spectre_v2(void) +{ + if (no_spectrev2 || cpu_mitigations_off()) + do_btb_flush_fixups(); + else + btb_flush_enabled = true; +} +#endif /* CONFIG_PPC_FSL_BOOK3E */ + #ifdef CONFIG_PPC_BOOK3S_64 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) { @@ -108,32 +134,33 @@ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, cha thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV); - if (rfi_flush || thread_priv) { + if (rfi_flush) { struct seq_buf s; seq_buf_init(&s, buf, PAGE_SIZE - 1); - seq_buf_printf(&s, "Mitigation: "); - - if (rfi_flush) - seq_buf_printf(&s, "RFI Flush"); - - if (rfi_flush && thread_priv) - seq_buf_printf(&s, ", "); - + seq_buf_printf(&s, "Mitigation: RFI Flush"); if (thread_priv) - seq_buf_printf(&s, "L1D private per thread"); + seq_buf_printf(&s, ", L1D private per thread"); seq_buf_printf(&s, "\n"); return s.len; } + if (thread_priv) + return sprintf(buf, "Vulnerable: L1D private per thread\n"); + if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)) return sprintf(buf, "Not affected\n"); return sprintf(buf, "Vulnerable\n"); } + +ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_meltdown(dev, attr, buf); +} #endif ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) @@ -168,31 +195,35 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED); ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED); - if (bcs || ccd || count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) { - bool comma = false; + if (bcs || ccd) { seq_buf_printf(&s, "Mitigation: "); - if (bcs) { + if (bcs) seq_buf_printf(&s, "Indirect branch serialisation (kernel only)"); - comma = true; - } - if (ccd) { - if (comma) - seq_buf_printf(&s, ", "); + if (bcs && ccd) + seq_buf_printf(&s, ", "); + + if (ccd) seq_buf_printf(&s, "Indirect branch cache disabled"); - comma = true; - } - if (comma) - seq_buf_printf(&s, ", "); + if (link_stack_flush_enabled) + seq_buf_printf(&s, ", Software link stack flush"); - seq_buf_printf(&s, "Software count cache flush"); + } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) { + seq_buf_printf(&s, "Mitigation: Software count cache flush"); if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW) - seq_buf_printf(&s, "(hardware accelerated)"); - } else + seq_buf_printf(&s, " (hardware accelerated)"); + + if (link_stack_flush_enabled) + seq_buf_printf(&s, ", Software link stack flush"); + + } else if (btb_flush_enabled) { + seq_buf_printf(&s, "Mitigation: Branch predictor state flush"); + } else { seq_buf_printf(&s, "Vulnerable"); + } seq_buf_printf(&s, "\n"); @@ -282,7 +313,7 @@ void setup_stf_barrier(void) stf_enabled_flush_types = type; - if (!no_stf_barrier) + if (!no_stf_barrier && !cpu_mitigations_off()) stf_barrier_enable(enable); } @@ -348,18 +379,49 @@ static __init int stf_barrier_debugfs_init(void) device_initcall(stf_barrier_debugfs_init); #endif /* CONFIG_DEBUG_FS */ +static void no_count_cache_flush(void) +{ + count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; + pr_info("count-cache-flush: software flush disabled.\n"); +} + static void toggle_count_cache_flush(bool enable) { - if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) { + if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE) && + !security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK)) + enable = false; + + if (!enable) { patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP); - count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; - pr_info("count-cache-flush: software flush disabled.\n"); +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE + patch_instruction_site(&patch__call_kvm_flush_link_stack, PPC_INST_NOP); +#endif + pr_info("link-stack-flush: software flush disabled.\n"); + link_stack_flush_enabled = false; + no_count_cache_flush(); return; } + // This enables the branch from _switch to flush_count_cache patch_branch_site(&patch__call_flush_count_cache, (u64)&flush_count_cache, BRANCH_SET_LINK); +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE + // This enables the branch from guest_exit_cont to kvm_flush_link_stack + patch_branch_site(&patch__call_kvm_flush_link_stack, + (u64)&kvm_flush_link_stack, BRANCH_SET_LINK); +#endif + + pr_info("link-stack-flush: software flush enabled.\n"); + link_stack_flush_enabled = true; + + // If we just need to flush the link stack, patch an early return + if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) { + patch_instruction_site(&patch__flush_link_stack_return, PPC_INST_BLR); + no_count_cache_flush(); + return; + } + if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) { count_cache_flush_type = COUNT_CACHE_FLUSH_SW; pr_info("count-cache-flush: full software flush sequence enabled.\n"); @@ -373,7 +435,26 @@ static void toggle_count_cache_flush(bool enable) void setup_count_cache_flush(void) { - toggle_count_cache_flush(true); + bool enable = true; + + if (no_spectrev2 || cpu_mitigations_off()) { + if (security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED) || + security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED)) + pr_warn("Spectre v2 mitigations not fully under software control, can't disable\n"); + + enable = false; + } + + /* + * There's no firmware feature flag/hypervisor bit to tell us we need to + * flush the link stack on context switch. So we set it here if we see + * either of the Spectre v2 mitigations that aim to protect userspace. + */ + if (security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED) || + security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) + security_ftr_set(SEC_FTR_FLUSH_LINK_STACK); + + toggle_count_cache_flush(enable); } #ifdef CONFIG_DEBUG_FS diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 93fa0c99681e6ed81060470527c9244195cc179c..508244bcf19c219ab967e0a5c7e3c867fa7588e5 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -973,6 +973,7 @@ void __init setup_arch(char **cmdline_p) ppc_md.setup_arch(); setup_barrier_nospec(); + setup_spectre_v2(); paging_init(); diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 6a501b25dd85f9290c86c30fcdda409be1f2002d..97da86b318c87fb56a351477557753fd69e02e6c 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -243,13 +243,19 @@ static void cpu_ready_for_interrupts(void) } /* - * Fixup HFSCR:TM based on CPU features. The bit is set by our - * early asm init because at that point we haven't updated our - * CPU features from firmware and device-tree. Here we have, - * so let's do it. + * Set HFSCR:TM based on CPU features: + * In the special case of TM no suspend (P9N DD2.1), Linux is + * told TM is off via the dt-ftrs but told to (partially) use + * it via OPAL_REINIT_CPUS_TM_SUSPEND_DISABLED. So HFSCR[TM] + * will be off from dt-ftrs but we need to turn it on for the + * no suspend case. */ - if (cpu_has_feature(CPU_FTR_HVMODE) && !cpu_has_feature(CPU_FTR_TM_COMP)) - mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM); + if (cpu_has_feature(CPU_FTR_HVMODE)) { + if (cpu_has_feature(CPU_FTR_TM_COMP)) + mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) | HFSCR_TM); + else + mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM); + } /* Set IR and DR in PACA MSR */ get_paca()->kernel_msr = MSR_KERNEL; @@ -855,7 +861,13 @@ early_initcall(disable_hardlockup_detector); static enum l1d_flush_type enabled_flush_types; static void *l1d_flush_fallback_area; static bool no_rfi_flush; +static bool no_entry_flush; +static bool no_uaccess_flush; bool rfi_flush; +bool entry_flush; +bool uaccess_flush; +DEFINE_STATIC_KEY_FALSE(uaccess_flush_key); +EXPORT_SYMBOL(uaccess_flush_key); static int __init handle_no_rfi_flush(char *p) { @@ -865,6 +877,22 @@ static int __init handle_no_rfi_flush(char *p) } early_param("no_rfi_flush", handle_no_rfi_flush); +static int __init handle_no_entry_flush(char *p) +{ + pr_info("entry-flush: disabled on command line."); + no_entry_flush = true; + return 0; +} +early_param("no_entry_flush", handle_no_entry_flush); + +static int __init handle_no_uaccess_flush(char *p) +{ + pr_info("uaccess-flush: disabled on command line."); + no_uaccess_flush = true; + return 0; +} +early_param("no_uaccess_flush", handle_no_uaccess_flush); + /* * The RFI flush is not KPTI, but because users will see doco that says to use * nopti we hijack that option here to also disable the RFI flush. @@ -896,6 +924,32 @@ void rfi_flush_enable(bool enable) rfi_flush = enable; } +void entry_flush_enable(bool enable) +{ + if (enable) { + do_entry_flush_fixups(enabled_flush_types); + on_each_cpu(do_nothing, NULL, 1); + } else { + do_entry_flush_fixups(L1D_FLUSH_NONE); + } + + entry_flush = enable; +} + +void uaccess_flush_enable(bool enable) +{ + if (enable) { + do_uaccess_flush_fixups(enabled_flush_types); + static_branch_enable(&uaccess_flush_key); + on_each_cpu(do_nothing, NULL, 1); + } else { + static_branch_disable(&uaccess_flush_key); + do_uaccess_flush_fixups(L1D_FLUSH_NONE); + } + + uaccess_flush = enable; +} + static void __ref init_fallback_flush(void) { u64 l1d_size, limit; @@ -949,10 +1003,28 @@ void setup_rfi_flush(enum l1d_flush_type types, bool enable) enabled_flush_types = types; - if (!no_rfi_flush) + if (!cpu_mitigations_off() && !no_rfi_flush) rfi_flush_enable(enable); } +void setup_entry_flush(bool enable) +{ + if (cpu_mitigations_off()) + return; + + if (!no_entry_flush) + entry_flush_enable(enable); +} + +void setup_uaccess_flush(bool enable) +{ + if (cpu_mitigations_off()) + return; + + if (!no_uaccess_flush) + uaccess_flush_enable(enable); +} + #ifdef CONFIG_DEBUG_FS static int rfi_flush_set(void *data, u64 val) { @@ -980,9 +1052,63 @@ static int rfi_flush_get(void *data, u64 *val) DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n"); +static int entry_flush_set(void *data, u64 val) +{ + bool enable; + + if (val == 1) + enable = true; + else if (val == 0) + enable = false; + else + return -EINVAL; + + /* Only do anything if we're changing state */ + if (enable != entry_flush) + entry_flush_enable(enable); + + return 0; +} + +static int entry_flush_get(void *data, u64 *val) +{ + *val = entry_flush ? 1 : 0; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_entry_flush, entry_flush_get, entry_flush_set, "%llu\n"); + +static int uaccess_flush_set(void *data, u64 val) +{ + bool enable; + + if (val == 1) + enable = true; + else if (val == 0) + enable = false; + else + return -EINVAL; + + /* Only do anything if we're changing state */ + if (enable != uaccess_flush) + uaccess_flush_enable(enable); + + return 0; +} + +static int uaccess_flush_get(void *data, u64 *val) +{ + *val = uaccess_flush ? 1 : 0; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_uaccess_flush, uaccess_flush_get, uaccess_flush_set, "%llu\n"); + static __init int rfi_flush_debugfs_init(void) { debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush); + debugfs_create_file("entry_flush", 0600, powerpc_debugfs_root, NULL, &fops_entry_flush); + debugfs_create_file("uaccess_flush", 0600, powerpc_debugfs_root, NULL, &fops_uaccess_flush); return 0; } device_initcall(rfi_flush_debugfs_init); diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index b3e8db376ecde459bb8b5a1cd00b10c9606df289..e6c30cee6abf1748e52fe5a4ae9a0fbc01c3274a 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c @@ -44,7 +44,7 @@ void __user *get_sigframe(struct ksignal *ksig, unsigned long sp, newsp = (oldsp - frame_size) & ~0xFUL; /* Check access */ - if (!access_ok(VERIFY_WRITE, (void __user *)newsp, oldsp - newsp)) + if (!access_ok((void __user *)newsp, oldsp - newsp)) return NULL; return (void __user *)newsp; diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index e6474a45cef50623be68bc1fbf0b83635275dceb..34fb8018157988f6a6683af6a33d85af3766c0e5 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -848,7 +848,23 @@ static long restore_tm_user_regs(struct pt_regs *regs, /* If TM bits are set to the reserved value, it's an invalid context */ if (MSR_TM_RESV(msr_hi)) return 1; - /* Pull in the MSR TM bits from the user context */ + + /* + * Disabling preemption, since it is unsafe to be preempted + * with MSR[TS] set without recheckpointing. + */ + preempt_disable(); + + /* + * CAUTION: + * After regs->MSR[TS] being updated, make sure that get_user(), + * put_user() or similar functions are *not* called. These + * functions can generate page faults which will cause the process + * to be de-scheduled with MSR[TS] set but without calling + * tm_recheckpoint(). This can cause a bug. + * + * Pull in the MSR TM bits from the user context + */ regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK); /* Now, recheckpoint. This loads up all of the checkpointed (older) * registers, including FP and V[S]Rs. After recheckpointing, the @@ -873,6 +889,8 @@ static long restore_tm_user_regs(struct pt_regs *regs, } #endif + preempt_enable(); + return 0; } #endif @@ -999,7 +1017,7 @@ static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int #else if (__get_user(mcp, &ucp->uc_regs)) return -EFAULT; - if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp))) + if (!access_ok(mcp, sizeof(*mcp))) return -EFAULT; #endif set_current_blocked(&set); @@ -1102,7 +1120,7 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx, */ mctx = (struct mcontext __user *) ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL); - if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size) + if (!access_ok(old_ctx, ctx_size) || save_user_regs(regs, mctx, NULL, 0, ctx_has_vsx_region) || put_sigset_t(&old_ctx->uc_sigmask, ¤t->blocked) || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs)) @@ -1110,7 +1128,7 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx, } if (new_ctx == NULL) return 0; - if (!access_ok(VERIFY_READ, new_ctx, ctx_size) || + if (!access_ok(new_ctx, ctx_size) || fault_in_pages_readable((u8 __user *)new_ctx, ctx_size)) return -EFAULT; @@ -1151,7 +1169,7 @@ SYSCALL_DEFINE0(rt_sigreturn) rt_sf = (struct rt_sigframe __user *) (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16); - if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf))) + if (!access_ok(rt_sf, sizeof(*rt_sf))) goto bad; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM @@ -1184,6 +1202,9 @@ SYSCALL_DEFINE0(rt_sigreturn) goto bad; if (MSR_TM_ACTIVE(msr_hi<<32)) { + /* Trying to start TM on non TM system */ + if (!cpu_has_feature(CPU_FTR_TM)) + goto bad; /* We only recheckpoint on return if we're * transaction. */ @@ -1289,7 +1310,7 @@ SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx, current->thread.debug.dbcr0 = new_dbcr0; #endif - if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx)) || + if (!access_ok(ctx, sizeof(*ctx)) || fault_in_pages_readable((u8 __user *)ctx, sizeof(*ctx))) return -EFAULT; @@ -1474,7 +1495,7 @@ SYSCALL_DEFINE0(sigreturn) { sr = (struct mcontext __user *)from_user_ptr(sigctx.regs); addr = sr; - if (!access_ok(VERIFY_READ, sr, sizeof(*sr)) + if (!access_ok(sr, sizeof(*sr)) || restore_user_regs(regs, sr, 1)) goto badframe; } diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 83d51bf586c7e1ec3697a424a33a1559579147b8..f2ec2a843ee445e6e05e2dd2ad0eb765ccc856a7 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -383,7 +383,7 @@ static long restore_sigcontext(struct task_struct *tsk, sigset_t *set, int sig, err |= __get_user(v_regs, &sc->v_regs); if (err) return err; - if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128))) + if (v_regs && !access_ok(v_regs, 34 * sizeof(vector128))) return -EFAULT; /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ if (v_regs != NULL && (msr & MSR_VEC) != 0) { @@ -467,20 +467,6 @@ static long restore_tm_sigcontexts(struct task_struct *tsk, if (MSR_TM_RESV(msr)) return -EINVAL; - /* pull in MSR TS bits from user context */ - regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK); - - /* - * Ensure that TM is enabled in regs->msr before we leave the signal - * handler. It could be the case that (a) user disabled the TM bit - * through the manipulation of the MSR bits in uc_mcontext or (b) the - * TM bit was disabled because a sufficient number of context switches - * happened whilst in the signal handler and load_tm overflowed, - * disabling the TM bit. In either case we can end up with an illegal - * TM state leading to a TM Bad Thing when we return to userspace. - */ - regs->msr |= MSR_TM; - /* pull in MSR LE from user context */ regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); @@ -516,10 +502,9 @@ static long restore_tm_sigcontexts(struct task_struct *tsk, err |= __get_user(tm_v_regs, &tm_sc->v_regs); if (err) return err; - if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128))) + if (v_regs && !access_ok(v_regs, 34 * sizeof(vector128))) return -EFAULT; - if (tm_v_regs && !access_ok(VERIFY_READ, - tm_v_regs, 34 * sizeof(vector128))) + if (tm_v_regs && !access_ok(tm_v_regs, 34 * sizeof(vector128))) return -EFAULT; /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ if (v_regs != NULL && tm_v_regs != NULL && (msr & MSR_VEC) != 0) { @@ -572,6 +557,34 @@ static long restore_tm_sigcontexts(struct task_struct *tsk, tm_enable(); /* Make sure the transaction is marked as failed */ tsk->thread.tm_texasr |= TEXASR_FS; + + /* + * Disabling preemption, since it is unsafe to be preempted + * with MSR[TS] set without recheckpointing. + */ + preempt_disable(); + + /* pull in MSR TS bits from user context */ + regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK); + + /* + * Ensure that TM is enabled in regs->msr before we leave the signal + * handler. It could be the case that (a) user disabled the TM bit + * through the manipulation of the MSR bits in uc_mcontext or (b) the + * TM bit was disabled because a sufficient number of context switches + * happened whilst in the signal handler and load_tm overflowed, + * disabling the TM bit. In either case we can end up with an illegal + * TM state leading to a TM Bad Thing when we return to userspace. + * + * CAUTION: + * After regs->MSR[TS] being updated, make sure that get_user(), + * put_user() or similar functions are *not* called. These + * functions can generate page faults which will cause the process + * to be de-scheduled with MSR[TS] set but without calling + * tm_recheckpoint(). This can cause a bug. + */ + regs->msr |= MSR_TM; + /* This loads the checkpointed FP/VEC state, if used */ tm_recheckpoint(&tsk->thread); @@ -585,6 +598,8 @@ static long restore_tm_sigcontexts(struct task_struct *tsk, regs->msr |= MSR_VEC; } + preempt_enable(); + return err; } #endif @@ -654,7 +669,7 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx, ctx_has_vsx_region = 1; if (old_ctx != NULL) { - if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size) + if (!access_ok(old_ctx, ctx_size) || setup_sigcontext(&old_ctx->uc_mcontext, current, 0, NULL, 0, ctx_has_vsx_region) || __copy_to_user(&old_ctx->uc_sigmask, @@ -663,7 +678,7 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx, } if (new_ctx == NULL) return 0; - if (!access_ok(VERIFY_READ, new_ctx, ctx_size) + if (!access_ok(new_ctx, ctx_size) || __get_user(tmp, (u8 __user *) new_ctx) || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1)) return -EFAULT; @@ -708,7 +723,7 @@ SYSCALL_DEFINE0(rt_sigreturn) /* Always make any pending restarted system calls return -EINTR */ current->restart_block.fn = do_no_restart_syscall; - if (!access_ok(VERIFY_READ, uc, sizeof(*uc))) + if (!access_ok(uc, sizeof(*uc))) goto badframe; if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set))) @@ -734,17 +749,35 @@ SYSCALL_DEFINE0(rt_sigreturn) if (MSR_TM_ACTIVE(msr)) { /* We recheckpoint on return. */ struct ucontext __user *uc_transact; + + /* Trying to start TM on non TM system */ + if (!cpu_has_feature(CPU_FTR_TM)) + goto badframe; + if (__get_user(uc_transact, &uc->uc_link)) goto badframe; if (restore_tm_sigcontexts(current, &uc->uc_mcontext, &uc_transact->uc_mcontext)) goto badframe; - } - else - /* Fall through, for non-TM restore */ + } else #endif - if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext)) - goto badframe; + { + /* + * Fall through, for non-TM restore + * + * Unset MSR[TS] on the thread regs since MSR from user + * context does not have MSR active, and recheckpoint was + * not called since restore_tm_sigcontexts() was not called + * also. + * + * If not unsetting it, the code can RFID to userspace with + * MSR[TS] set, but without CPU in the proper state, + * causing a TM bad thing. + */ + current->thread.regs->msr &= ~MSR_TS_MASK; + if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext)) + goto badframe; + } if (restore_altstack(&uc->uc_stack)) goto badframe; diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 61c1fadbc644441bcd90b3fb05363891ecdf7082..6dc43205382ba27a82e20b7a60a77c0cc2ead085 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -338,13 +338,12 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask) * NMI IPIs may not be recoverable, so should not be used as ongoing part of * a running system. They can be used for crash, debug, halt/reboot, etc. * - * NMI IPIs are globally single threaded. No more than one in progress at - * any time. - * * The IPI call waits with interrupts disabled until all targets enter the - * NMI handler, then the call returns. + * NMI handler, then returns. Subsequent IPIs can be issued before targets + * have returned from their handlers, so there is no guarantee about + * concurrency or re-entrancy. * - * No new NMI can be initiated until targets exit the handler. + * A new NMI can be issued before all targets exit the handler. * * The IPI call may time out without all targets entering the NMI handler. * In that case, there is some logic to recover (and ignore subsequent @@ -355,7 +354,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask) static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0); static struct cpumask nmi_ipi_pending_mask; -static int nmi_ipi_busy_count = 0; +static bool nmi_ipi_busy = false; static void (*nmi_ipi_function)(struct pt_regs *) = NULL; static void nmi_ipi_lock_start(unsigned long *flags) @@ -394,7 +393,7 @@ static void nmi_ipi_unlock_end(unsigned long *flags) */ int smp_handle_nmi_ipi(struct pt_regs *regs) { - void (*fn)(struct pt_regs *); + void (*fn)(struct pt_regs *) = NULL; unsigned long flags; int me = raw_smp_processor_id(); int ret = 0; @@ -405,29 +404,17 @@ int smp_handle_nmi_ipi(struct pt_regs *regs) * because the caller may have timed out. */ nmi_ipi_lock_start(&flags); - if (!nmi_ipi_busy_count) - goto out; - if (!cpumask_test_cpu(me, &nmi_ipi_pending_mask)) - goto out; - - fn = nmi_ipi_function; - if (!fn) - goto out; - - cpumask_clear_cpu(me, &nmi_ipi_pending_mask); - nmi_ipi_busy_count++; - nmi_ipi_unlock(); - - ret = 1; - - fn(regs); - - nmi_ipi_lock(); - if (nmi_ipi_busy_count > 1) /* Can race with caller time-out */ - nmi_ipi_busy_count--; -out: + if (cpumask_test_cpu(me, &nmi_ipi_pending_mask)) { + cpumask_clear_cpu(me, &nmi_ipi_pending_mask); + fn = READ_ONCE(nmi_ipi_function); + WARN_ON_ONCE(!fn); + ret = 1; + } nmi_ipi_unlock_end(&flags); + if (fn) + fn(regs); + return ret; } @@ -453,7 +440,7 @@ static void do_smp_send_nmi_ipi(int cpu, bool safe) * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS. * - fn is the target callback function. * - delay_us > 0 is the delay before giving up waiting for targets to - * complete executing the handler, == 0 specifies indefinite delay. + * begin executing the handler, == 0 specifies indefinite delay. */ int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool safe) { @@ -467,31 +454,33 @@ int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool if (unlikely(!smp_ops)) return 0; - /* Take the nmi_ipi_busy count/lock with interrupts hard disabled */ nmi_ipi_lock_start(&flags); - while (nmi_ipi_busy_count) { + while (nmi_ipi_busy) { nmi_ipi_unlock_end(&flags); - spin_until_cond(nmi_ipi_busy_count == 0); + spin_until_cond(!nmi_ipi_busy); nmi_ipi_lock_start(&flags); } - + nmi_ipi_busy = true; nmi_ipi_function = fn; + WARN_ON_ONCE(!cpumask_empty(&nmi_ipi_pending_mask)); + if (cpu < 0) { /* ALL_OTHERS */ cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask); cpumask_clear_cpu(me, &nmi_ipi_pending_mask); } else { - /* cpumask starts clear */ cpumask_set_cpu(cpu, &nmi_ipi_pending_mask); } - nmi_ipi_busy_count++; + nmi_ipi_unlock(); + /* Interrupts remain hard disabled */ + do_smp_send_nmi_ipi(cpu, safe); nmi_ipi_lock(); - /* nmi_ipi_busy_count is held here, so unlock/lock is okay */ + /* nmi_ipi_busy is set here, so unlock/lock is okay */ while (!cpumask_empty(&nmi_ipi_pending_mask)) { nmi_ipi_unlock(); udelay(1); @@ -503,29 +492,15 @@ int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool } } - while (nmi_ipi_busy_count > 1) { - nmi_ipi_unlock(); - udelay(1); - nmi_ipi_lock(); - if (delay_us) { - delay_us--; - if (!delay_us) - break; - } - } - if (!cpumask_empty(&nmi_ipi_pending_mask)) { /* Timeout waiting for CPUs to call smp_handle_nmi_ipi */ ret = 0; cpumask_clear(&nmi_ipi_pending_mask); } - if (nmi_ipi_busy_count > 1) { - /* Timeout waiting for CPUs to execute fn */ - ret = 0; - nmi_ipi_busy_count = 1; - } - nmi_ipi_busy_count--; + nmi_ipi_function = NULL; + nmi_ipi_busy = false; + nmi_ipi_unlock_end(&flags); return ret; @@ -593,17 +568,8 @@ void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) static void nmi_stop_this_cpu(struct pt_regs *regs) { /* - * This is a special case because it never returns, so the NMI IPI - * handling would never mark it as done, which makes any later - * smp_send_nmi_ipi() call spin forever. Mark it done now. - * * IRQs are already hard disabled by the smp_handle_nmi_ipi. */ - nmi_ipi_lock(); - if (nmi_ipi_busy_count > 1) - nmi_ipi_busy_count--; - nmi_ipi_unlock(); - spin_begin(); while (1) spin_cpu_relax(); diff --git a/arch/powerpc/kernel/swsusp_32.S b/arch/powerpc/kernel/swsusp_32.S index 7a919e9a3400bb9a41cc98b42875b1285e9f5f02..cbdf86228eaaa7c0d64082979d265e332de42bbc 100644 --- a/arch/powerpc/kernel/swsusp_32.S +++ b/arch/powerpc/kernel/swsusp_32.S @@ -25,11 +25,19 @@ #define SL_IBAT2 0x48 #define SL_DBAT3 0x50 #define SL_IBAT3 0x58 -#define SL_TB 0x60 -#define SL_R2 0x68 -#define SL_CR 0x6c -#define SL_LR 0x70 -#define SL_R12 0x74 /* r12 to r31 */ +#define SL_DBAT4 0x60 +#define SL_IBAT4 0x68 +#define SL_DBAT5 0x70 +#define SL_IBAT5 0x78 +#define SL_DBAT6 0x80 +#define SL_IBAT6 0x88 +#define SL_DBAT7 0x90 +#define SL_IBAT7 0x98 +#define SL_TB 0xa0 +#define SL_R2 0xa8 +#define SL_CR 0xac +#define SL_LR 0xb0 +#define SL_R12 0xb4 /* r12 to r31 */ #define SL_SIZE (SL_R12 + 80) .section .data @@ -114,6 +122,41 @@ _GLOBAL(swsusp_arch_suspend) mfibatl r4,3 stw r4,SL_IBAT3+4(r11) +BEGIN_MMU_FTR_SECTION + mfspr r4,SPRN_DBAT4U + stw r4,SL_DBAT4(r11) + mfspr r4,SPRN_DBAT4L + stw r4,SL_DBAT4+4(r11) + mfspr r4,SPRN_DBAT5U + stw r4,SL_DBAT5(r11) + mfspr r4,SPRN_DBAT5L + stw r4,SL_DBAT5+4(r11) + mfspr r4,SPRN_DBAT6U + stw r4,SL_DBAT6(r11) + mfspr r4,SPRN_DBAT6L + stw r4,SL_DBAT6+4(r11) + mfspr r4,SPRN_DBAT7U + stw r4,SL_DBAT7(r11) + mfspr r4,SPRN_DBAT7L + stw r4,SL_DBAT7+4(r11) + mfspr r4,SPRN_IBAT4U + stw r4,SL_IBAT4(r11) + mfspr r4,SPRN_IBAT4L + stw r4,SL_IBAT4+4(r11) + mfspr r4,SPRN_IBAT5U + stw r4,SL_IBAT5(r11) + mfspr r4,SPRN_IBAT5L + stw r4,SL_IBAT5+4(r11) + mfspr r4,SPRN_IBAT6U + stw r4,SL_IBAT6(r11) + mfspr r4,SPRN_IBAT6L + stw r4,SL_IBAT6+4(r11) + mfspr r4,SPRN_IBAT7U + stw r4,SL_IBAT7(r11) + mfspr r4,SPRN_IBAT7L + stw r4,SL_IBAT7+4(r11) +END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) + #if 0 /* Backup various CPU config stuffs */ bl __save_cpu_setup @@ -279,27 +322,41 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) mtibatu 3,r4 lwz r4,SL_IBAT3+4(r11) mtibatl 3,r4 -#endif - BEGIN_MMU_FTR_SECTION - li r4,0 + lwz r4,SL_DBAT4(r11) mtspr SPRN_DBAT4U,r4 + lwz r4,SL_DBAT4+4(r11) mtspr SPRN_DBAT4L,r4 + lwz r4,SL_DBAT5(r11) mtspr SPRN_DBAT5U,r4 + lwz r4,SL_DBAT5+4(r11) mtspr SPRN_DBAT5L,r4 + lwz r4,SL_DBAT6(r11) mtspr SPRN_DBAT6U,r4 + lwz r4,SL_DBAT6+4(r11) mtspr SPRN_DBAT6L,r4 + lwz r4,SL_DBAT7(r11) mtspr SPRN_DBAT7U,r4 + lwz r4,SL_DBAT7+4(r11) mtspr SPRN_DBAT7L,r4 + lwz r4,SL_IBAT4(r11) mtspr SPRN_IBAT4U,r4 + lwz r4,SL_IBAT4+4(r11) mtspr SPRN_IBAT4L,r4 + lwz r4,SL_IBAT5(r11) mtspr SPRN_IBAT5U,r4 + lwz r4,SL_IBAT5+4(r11) mtspr SPRN_IBAT5L,r4 + lwz r4,SL_IBAT6(r11) mtspr SPRN_IBAT6U,r4 + lwz r4,SL_IBAT6+4(r11) mtspr SPRN_IBAT6L,r4 + lwz r4,SL_IBAT7(r11) mtspr SPRN_IBAT7U,r4 + lwz r4,SL_IBAT7+4(r11) mtspr SPRN_IBAT7L,r4 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) +#endif /* Flush all TLBs */ lis r4,0x1000 diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c index 466216506eb2f4bfa7b6b94ed89140914b1ea682..e6982ab2181663037b210b24f697d8a1bb52e269 100644 --- a/arch/powerpc/kernel/syscalls.c +++ b/arch/powerpc/kernel/syscalls.c @@ -89,7 +89,7 @@ ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, s if ( (unsigned long)n >= 4096 ) { unsigned long __user *buffer = (unsigned long __user *)n; - if (!access_ok(VERIFY_READ, buffer, 5*sizeof(unsigned long)) + if (!access_ok(buffer, 5*sizeof(unsigned long)) || __get_user(n, buffer) || __get_user(inp, ((fd_set __user * __user *)(buffer+1))) || __get_user(outp, ((fd_set __user * __user *)(buffer+2))) diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 70f145e0248776d6692439d60e4fe6f10097abc0..5449e76cf2dfd5d37bc213fdfc0e192a42ff15ec 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -235,7 +235,7 @@ static u64 scan_dispatch_log(u64 stop_tb) * Accumulate stolen time by scanning the dispatch trace log. * Called on entry from user mode. */ -void accumulate_stolen_time(void) +void notrace accumulate_stolen_time(void) { u64 sst, ust; unsigned long save_irq_soft_mask = irq_soft_mask_return(); @@ -929,6 +929,7 @@ void update_vsyscall(struct timekeeper *tk) vdso_data->wtom_clock_nsec = tk->wall_to_monotonic.tv_nsec; vdso_data->stamp_xtime = xt; vdso_data->stamp_sec_fraction = frac_sec; + vdso_data->hrtimer_res = hrtimer_resolution; smp_wmb(); ++(vdso_data->tb_update_count); } @@ -984,10 +985,14 @@ static void register_decrementer_clockevent(int cpu) *dec = decrementer_clockevent; dec->cpumask = cpumask_of(cpu); + clockevents_config_and_register(dec, ppc_tb_freq, 2, decrementer_max); + printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n", dec->name, dec->mult, dec->shift, cpu); - clockevents_register_device(dec); + /* Set values for KVM, see kvm_emulate_dec() */ + decrementer_clockevent.mult = dec->mult; + decrementer_clockevent.shift = dec->shift; } static void enable_large_decrementer(void) @@ -1035,18 +1040,7 @@ static void __init set_decrementer_max(void) static void __init init_decrementer_clockevent(void) { - int cpu = smp_processor_id(); - - clockevents_calc_mult_shift(&decrementer_clockevent, ppc_tb_freq, 4); - - decrementer_clockevent.max_delta_ns = - clockevent_delta2ns(decrementer_max, &decrementer_clockevent); - decrementer_clockevent.max_delta_ticks = decrementer_max; - decrementer_clockevent.min_delta_ns = - clockevent_delta2ns(2, &decrementer_clockevent); - decrementer_clockevent.min_delta_ticks = 2; - - register_decrementer_clockevent(cpu); + register_decrementer_clockevent(smp_processor_id()); } void secondary_cpu_time_init(void) diff --git a/arch/powerpc/kernel/trace/Makefile b/arch/powerpc/kernel/trace/Makefile index d22d8bafb6434cab546d17cf2c6aa5f04a79eea3..d868ba42032f2810a053675f1366f51e3b00c652 100644 --- a/arch/powerpc/kernel/trace/Makefile +++ b/arch/powerpc/kernel/trace/Makefile @@ -7,7 +7,7 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror ifdef CONFIG_FUNCTION_TRACER # do not trace tracer code -CFLAGS_REMOVE_ftrace.o = -mno-sched-epilog $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) endif obj32-$(CONFIG_FUNCTION_TRACER) += ftrace_32.o diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c index 4bfbb54dee517de53fd4e9e09c4d0ae1a2cdfdae..19ef4f5866b6a305f1c96c5f74d854cd960734dd 100644 --- a/arch/powerpc/kernel/trace/ftrace.c +++ b/arch/powerpc/kernel/trace/ftrace.c @@ -697,7 +697,6 @@ int ftrace_disable_ftrace_graph_caller(void) */ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip) { - struct ftrace_graph_ent trace; unsigned long return_hooker; if (unlikely(ftrace_graph_is_dead())) @@ -708,18 +707,8 @@ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip) return_hooker = ppc_function_entry(return_to_handler); - trace.func = ip; - trace.depth = current->curr_ret_stack + 1; - - /* Only trace if the calling function expects to */ - if (!ftrace_graph_entry(&trace)) - goto out; - - if (ftrace_push_return_trace(parent, ip, &trace.depth, 0, - NULL) == -EBUSY) - goto out; - - parent = return_hooker; + if (!function_graph_enter(parent, ip, 0, NULL)) + parent = return_hooker; out: return parent; } diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index c85adb8582713075426ab05b537c6910d19d467a..ee91b5e22951099c90b5a4d0f8b223edf8d82f5b 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -399,6 +399,7 @@ void system_reset_exception(struct pt_regs *regs) if (debugger(regs)) goto out; + kmsg_dump(KMSG_DUMP_OOPS); /* * A system reset is a request to dump, so we always send * it through the crashdump code (if fadump or kdump are @@ -767,12 +768,17 @@ void machine_check_exception(struct pt_regs *regs) if (check_io_access(regs)) goto bail; + if (!nested) + nmi_exit(); + die("Machine check", regs, SIGBUS); /* Must die if the interrupt is not recoverable */ if (!(regs->msr & MSR_RI)) nmi_panic(regs, "Unrecoverable Machine check"); + return; + bail: if (!nested) nmi_exit(); @@ -836,7 +842,7 @@ static void p9_hmi_special_emu(struct pt_regs *regs) addr = (__force const void __user *)ea; /* Check it */ - if (!access_ok(VERIFY_READ, addr, 16)) { + if (!access_ok(addr, 16)) { pr_devel("HMI vec emu: bad access %i:%s[%d] nip=%016lx" " instr=%08x addr=%016lx\n", smp_processor_id(), current->comm, current->pid, @@ -1540,8 +1546,8 @@ void alignment_exception(struct pt_regs *regs) void StackOverflow(struct pt_regs *regs) { - printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n", - current, regs->gpr[1]); + pr_crit("Kernel stack overflow in process %s[%d], r1=%lx\n", + current->comm, task_pid_nr(current), regs->gpr[1]); debugger(regs); show_regs(regs); panic("kernel stack overflow"); diff --git a/arch/powerpc/kernel/vdso32/datapage.S b/arch/powerpc/kernel/vdso32/datapage.S index 3745113fcc652d8ca3e66692aaab7d87f7ea9338..2a7eb5452aba79fc4d2b2c709642b0e71cfc6130 100644 --- a/arch/powerpc/kernel/vdso32/datapage.S +++ b/arch/powerpc/kernel/vdso32/datapage.S @@ -37,6 +37,7 @@ data_page_branch: mtlr r0 addi r3, r3, __kernel_datapage_offset-data_page_branch lwz r0,0(r3) + .cfi_restore lr add r3,r0,r3 blr .cfi_endproc diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S index 769c2624e0a6b4dc162d12781d29198a84d51786..49eecd28aef1e49c90a8a9237ffebdc91963eac9 100644 --- a/arch/powerpc/kernel/vdso32/gettimeofday.S +++ b/arch/powerpc/kernel/vdso32/gettimeofday.S @@ -98,7 +98,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime) * can be used, r7 contains NSEC_PER_SEC. */ - lwz r5,WTOM_CLOCK_SEC(r9) + lwz r5,(WTOM_CLOCK_SEC+LOPART)(r9) lwz r6,WTOM_CLOCK_NSEC(r9) /* We now have our offset in r5,r6. We create a fake dependency @@ -139,6 +139,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime) */ 99: li r0,__NR_clock_gettime + .cfi_restore lr sc blr .cfi_endproc @@ -159,12 +160,15 @@ V_FUNCTION_BEGIN(__kernel_clock_getres) cror cr0*4+eq,cr0*4+eq,cr1*4+eq bne cr0,99f + mflr r12 + .cfi_register lr,r12 + bl __get_datapage@local /* get data page */ + lwz r5, CLOCK_HRTIMER_RES(r3) + mtlr r12 li r3,0 cmpli cr0,r4,0 crclr cr0*4+so beqlr - lis r5,CLOCK_REALTIME_RES@h - ori r5,r5,CLOCK_REALTIME_RES@l stw r3,TSPC32_TV_SEC(r4) stw r5,TSPC32_TV_NSEC(r4) blr diff --git a/arch/powerpc/kernel/vdso64/cacheflush.S b/arch/powerpc/kernel/vdso64/cacheflush.S index 69c5af2b3c96cfd46a85a2c250a9727e585fcabf..228a4a2383d69e52d5dd4a2d3f779004ce5d6ef1 100644 --- a/arch/powerpc/kernel/vdso64/cacheflush.S +++ b/arch/powerpc/kernel/vdso64/cacheflush.S @@ -39,7 +39,7 @@ V_FUNCTION_BEGIN(__kernel_sync_dicache) subf r8,r6,r4 /* compute length */ add r8,r8,r5 /* ensure we get enough */ lwz r9,CFG_DCACHE_LOGBLOCKSZ(r10) - srw. r8,r8,r9 /* compute line count */ + srd. r8,r8,r9 /* compute line count */ crclr cr0*4+so beqlr /* nothing to do? */ mtctr r8 @@ -56,7 +56,7 @@ V_FUNCTION_BEGIN(__kernel_sync_dicache) subf r8,r6,r4 /* compute length */ add r8,r8,r5 lwz r9,CFG_ICACHE_LOGBLOCKSZ(r10) - srw. r8,r8,r9 /* compute line count */ + srd. r8,r8,r9 /* compute line count */ crclr cr0*4+so beqlr /* nothing to do? */ mtctr r8 diff --git a/arch/powerpc/kernel/vdso64/datapage.S b/arch/powerpc/kernel/vdso64/datapage.S index abf17feffe4048af18382a075b01553f7e9be00e..bf966869151169b6b101675b4822609375058019 100644 --- a/arch/powerpc/kernel/vdso64/datapage.S +++ b/arch/powerpc/kernel/vdso64/datapage.S @@ -37,6 +37,7 @@ data_page_branch: mtlr r0 addi r3, r3, __kernel_datapage_offset-data_page_branch lwz r0,0(r3) + .cfi_restore lr add r3,r0,r3 blr .cfi_endproc diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S index c002adcc694c66966c67fceba4db34a1bf685059..020e90d1079ddc2c9c0b1f40e76e37e5f3abdbb2 100644 --- a/arch/powerpc/kernel/vdso64/gettimeofday.S +++ b/arch/powerpc/kernel/vdso64/gettimeofday.S @@ -92,7 +92,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime) * At this point, r4,r5 contain our sec/nsec values. */ - lwa r6,WTOM_CLOCK_SEC(r3) + ld r6,WTOM_CLOCK_SEC(r3) lwa r9,WTOM_CLOCK_NSEC(r3) /* We now have our result in r6,r9. We create a fake dependency @@ -125,7 +125,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime) bne cr6,75f /* CLOCK_MONOTONIC_COARSE */ - lwa r6,WTOM_CLOCK_SEC(r3) + ld r6,WTOM_CLOCK_SEC(r3) lwa r9,WTOM_CLOCK_NSEC(r3) /* check if counter has updated */ @@ -169,6 +169,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime) */ 99: li r0,__NR_clock_gettime + .cfi_restore lr sc blr .cfi_endproc @@ -189,12 +190,15 @@ V_FUNCTION_BEGIN(__kernel_clock_getres) cror cr0*4+eq,cr0*4+eq,cr1*4+eq bne cr0,99f + mflr r12 + .cfi_register lr,r12 + bl V_LOCAL_FUNC(__get_datapage) + lwz r5, CLOCK_HRTIMER_RES(r3) + mtlr r12 li r3,0 cmpldi cr0,r4,0 crclr cr0*4+so beqlr - lis r5,CLOCK_REALTIME_RES@h - ori r5,r5,CLOCK_REALTIME_RES@l std r3,TSPC64_TV_SEC(r4) std r5,TSPC64_TV_NSEC(r4) blr diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 07ae018e550e1110e2743c6e63f8e065d59db42a..1d815d0adaf3e02716028337bfd4fc73421b2dfd 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -140,6 +140,20 @@ SECTIONS __stop___stf_entry_barrier_fixup = .; } + . = ALIGN(8); + __uaccess_flush_fixup : AT(ADDR(__uaccess_flush_fixup) - LOAD_OFFSET) { + __start___uaccess_flush_fixup = .; + *(__uaccess_flush_fixup) + __stop___uaccess_flush_fixup = .; + } + + . = ALIGN(8); + __entry_flush_fixup : AT(ADDR(__entry_flush_fixup) - LOAD_OFFSET) { + __start___entry_flush_fixup = .; + *(__entry_flush_fixup) + __stop___entry_flush_fixup = .; + } + . = ALIGN(8); __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) { __start___stf_exit_barrier_fixup = .; @@ -164,6 +178,14 @@ SECTIONS } #endif /* CONFIG_PPC_BARRIER_NOSPEC */ +#ifdef CONFIG_PPC_FSL_BOOK3E + . = ALIGN(8); + __spec_btb_flush_fixup : AT(ADDR(__spec_btb_flush_fixup) - LOAD_OFFSET) { + __start__btb_flush_fixup = .; + *(__btb_flush_fixup) + __stop__btb_flush_fixup = .; + } +#endif EXCEPTION_TABLE(0) NOTES :kernel :notes @@ -296,6 +318,10 @@ SECTIONS #ifdef CONFIG_PPC32 .data : AT(ADDR(.data) - LOAD_OFFSET) { DATA_DATA +#ifdef CONFIG_UBSAN + *(.data..Lubsan_data*) + *(.data..Lubsan_type*) +#endif *(.data.rel*) *(SDATA_MAIN) *(.sdata2) diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c index 3c6ab22a0c4e3bf4d3e71fe044c33c4f14eb4b87..af3c15a1d41eb159670be90218c12b9966afa7f1 100644 --- a/arch/powerpc/kernel/watchdog.c +++ b/arch/powerpc/kernel/watchdog.c @@ -77,7 +77,7 @@ static u64 wd_smp_panic_timeout_tb __read_mostly; /* panic other CPUs */ static u64 wd_timer_period_ms __read_mostly; /* interval between heartbeat */ -static DEFINE_PER_CPU(struct timer_list, wd_timer); +static DEFINE_PER_CPU(struct hrtimer, wd_hrtimer); static DEFINE_PER_CPU(u64, wd_timer_tb); /* SMP checker bits */ @@ -293,21 +293,21 @@ void soft_nmi_interrupt(struct pt_regs *regs) nmi_exit(); } -static void wd_timer_reset(unsigned int cpu, struct timer_list *t) -{ - t->expires = jiffies + msecs_to_jiffies(wd_timer_period_ms); - if (wd_timer_period_ms > 1000) - t->expires = __round_jiffies_up(t->expires, cpu); - add_timer_on(t, cpu); -} - -static void wd_timer_fn(struct timer_list *t) +static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) { int cpu = smp_processor_id(); + if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) + return HRTIMER_NORESTART; + + if (!cpumask_test_cpu(cpu, &watchdog_cpumask)) + return HRTIMER_NORESTART; + watchdog_timer_interrupt(cpu); - wd_timer_reset(cpu, t); + hrtimer_forward_now(hrtimer, ms_to_ktime(wd_timer_period_ms)); + + return HRTIMER_RESTART; } void arch_touch_nmi_watchdog(void) @@ -323,37 +323,22 @@ void arch_touch_nmi_watchdog(void) } EXPORT_SYMBOL(arch_touch_nmi_watchdog); -static void start_watchdog_timer_on(unsigned int cpu) -{ - struct timer_list *t = per_cpu_ptr(&wd_timer, cpu); - - per_cpu(wd_timer_tb, cpu) = get_tb(); - - timer_setup(t, wd_timer_fn, TIMER_PINNED); - wd_timer_reset(cpu, t); -} - -static void stop_watchdog_timer_on(unsigned int cpu) -{ - struct timer_list *t = per_cpu_ptr(&wd_timer, cpu); - - del_timer_sync(t); -} - -static int start_wd_on_cpu(unsigned int cpu) +static void start_watchdog(void *arg) { + struct hrtimer *hrtimer = this_cpu_ptr(&wd_hrtimer); + int cpu = smp_processor_id(); unsigned long flags; if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) { WARN_ON(1); - return 0; + return; } if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) - return 0; + return; if (!cpumask_test_cpu(cpu, &watchdog_cpumask)) - return 0; + return; wd_smp_lock(&flags); cpumask_set_cpu(cpu, &wd_cpus_enabled); @@ -363,27 +348,40 @@ static int start_wd_on_cpu(unsigned int cpu) } wd_smp_unlock(&flags); - start_watchdog_timer_on(cpu); + *this_cpu_ptr(&wd_timer_tb) = get_tb(); - return 0; + hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer->function = watchdog_timer_fn; + hrtimer_start(hrtimer, ms_to_ktime(wd_timer_period_ms), + HRTIMER_MODE_REL_PINNED); } -static int stop_wd_on_cpu(unsigned int cpu) +static int start_watchdog_on_cpu(unsigned int cpu) { + return smp_call_function_single(cpu, start_watchdog, NULL, true); +} + +static void stop_watchdog(void *arg) +{ + struct hrtimer *hrtimer = this_cpu_ptr(&wd_hrtimer); + int cpu = smp_processor_id(); unsigned long flags; if (!cpumask_test_cpu(cpu, &wd_cpus_enabled)) - return 0; /* Can happen in CPU unplug case */ + return; /* Can happen in CPU unplug case */ - stop_watchdog_timer_on(cpu); + hrtimer_cancel(hrtimer); wd_smp_lock(&flags); cpumask_clear_cpu(cpu, &wd_cpus_enabled); wd_smp_unlock(&flags); wd_smp_clear_cpu_pending(cpu, get_tb()); +} - return 0; +static int stop_watchdog_on_cpu(unsigned int cpu) +{ + return smp_call_function_single(cpu, stop_watchdog, NULL, true); } static void watchdog_calc_timeouts(void) @@ -402,7 +400,7 @@ void watchdog_nmi_stop(void) int cpu; for_each_cpu(cpu, &wd_cpus_enabled) - stop_wd_on_cpu(cpu); + stop_watchdog_on_cpu(cpu); } void watchdog_nmi_start(void) @@ -411,7 +409,7 @@ void watchdog_nmi_start(void) watchdog_calc_timeouts(); for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask) - start_wd_on_cpu(cpu); + start_watchdog_on_cpu(cpu); } /* @@ -423,7 +421,8 @@ int __init watchdog_nmi_probe(void) err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "powerpc/watchdog:online", - start_wd_on_cpu, stop_wd_on_cpu); + start_watchdog_on_cpu, + stop_watchdog_on_cpu); if (err < 0) { pr_warn("could not be initialized"); return err; diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 87348e498c89e917d4b2066074054fedb2d5cc18..cc05f346e04219d0c737948c463d890f55bf6deb 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -78,8 +78,11 @@ void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu) { if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) { ulong pc = kvmppc_get_pc(vcpu); + ulong lr = kvmppc_get_lr(vcpu); if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS) kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK); + if ((lr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS) + kvmppc_set_lr(vcpu, lr & ~SPLIT_HACK_MASK); vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK; } } @@ -840,6 +843,7 @@ int kvmppc_core_init_vm(struct kvm *kvm) #ifdef CONFIG_PPC64 INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables); INIT_LIST_HEAD(&kvm->arch.rtas_tokens); + mutex_init(&kvm->arch.rtas_token_lock); #endif return kvm->arch.kvm_ops->init_vm(kvm); diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 68e14afecac85b1d1fd0eff661f6a3aabe0217a6..18799f3d217dba222ff5595e38954bbb947a12c4 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -744,12 +744,15 @@ void kvmppc_rmap_reset(struct kvm *kvm) srcu_idx = srcu_read_lock(&kvm->srcu); slots = kvm_memslots(kvm); kvm_for_each_memslot(memslot, slots) { + /* Mutual exclusion with kvm_unmap_hva_range etc. */ + spin_lock(&kvm->mmu_lock); /* * This assumes it is acceptable to lose reference and * change bits across a reset. */ memset(memslot->arch.rmap, 0, memslot->npages * sizeof(*memslot->arch.rmap)); + spin_unlock(&kvm->mmu_lock); } srcu_read_unlock(&kvm->srcu, srcu_idx); } @@ -1741,7 +1744,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf, int first_pass; unsigned long hpte[2]; - if (!access_ok(VERIFY_WRITE, buf, count)) + if (!access_ok(buf, count)) return -EFAULT; if (kvm_is_radix(kvm)) return 0; @@ -1841,7 +1844,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf, int mmu_ready; int pshift; - if (!access_ok(VERIFY_READ, buf, count)) + if (!access_ok(buf, count)) return -EFAULT; if (kvm_is_radix(kvm)) return -EINVAL; diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 9a3f2646ecc7e87bd24c03c02198eb9f45763725..0c6478f3414bbfcc135c74f84d4d21aeeb42e9fe 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -58,33 +58,34 @@ static unsigned long kvmppc_stt_pages(unsigned long tce_pages) static long kvmppc_account_memlimit(unsigned long stt_pages, bool inc) { - long ret = 0; + long locked_vm, ret = 0; if (!current || !current->mm) return ret; /* process exited */ down_write(¤t->mm->mmap_sem); + locked_vm = atomic_long_read(¤t->mm->locked_vm); if (inc) { unsigned long locked, lock_limit; - locked = current->mm->locked_vm + stt_pages; + locked = locked_vm + stt_pages; lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; if (locked > lock_limit && !capable(CAP_IPC_LOCK)) ret = -ENOMEM; else - current->mm->locked_vm += stt_pages; + atomic_long_add(stt_pages, ¤t->mm->locked_vm); } else { - if (WARN_ON_ONCE(stt_pages > current->mm->locked_vm)) - stt_pages = current->mm->locked_vm; + if (WARN_ON_ONCE(stt_pages > locked_vm)) + stt_pages = locked_vm; - current->mm->locked_vm -= stt_pages; + atomic_long_sub(stt_pages, ¤t->mm->locked_vm); } pr_debug("[%d] RLIMIT_MEMLOCK KVM %c%ld %ld/%ld%s\n", current->pid, inc ? '+' : '-', stt_pages << PAGE_SHIFT, - current->mm->locked_vm << PAGE_SHIFT, + atomic_long_read(¤t->mm->locked_vm) << PAGE_SHIFT, rlimit(RLIMIT_MEMLOCK), ret ? " - exceeded" : ""); @@ -401,7 +402,7 @@ static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm, long ret; if (WARN_ON_ONCE(iommu_tce_xchg(tbl, entry, &hpa, &dir))) - return H_HARDWARE; + return H_TOO_HARD; if (dir == DMA_NONE) return H_SUCCESS; @@ -449,15 +450,15 @@ long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, return H_TOO_HARD; if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa))) - return H_HARDWARE; + return H_TOO_HARD; if (mm_iommu_mapped_inc(mem)) - return H_CLOSED; + return H_TOO_HARD; ret = iommu_tce_xchg(tbl, entry, &hpa, &dir); if (WARN_ON_ONCE(ret)) { mm_iommu_mapped_dec(mem); - return H_HARDWARE; + return H_TOO_HARD; } if (dir != DMA_NONE) @@ -602,8 +603,10 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, if (kvmppc_gpa_to_ua(vcpu->kvm, tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), - &ua, NULL)) - return H_PARAMETER; + &ua, NULL)) { + ret = H_PARAMETER; + goto unlock_exit; + } list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index 6821ead4b4ebc128a9a772a712feefb213b1bad0..d258ed4ef77c338a7e7052e259e134071f269cef 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -300,10 +300,10 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift, &hpa))) - return H_HARDWARE; + return H_TOO_HARD; if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem))) - return H_CLOSED; + return H_TOO_HARD; ret = iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir); if (ret) { @@ -501,7 +501,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, rmap = (void *) vmalloc_to_phys(rmap); if (WARN_ON_ONCE_RM(!rmap)) - return H_HARDWARE; + return H_TOO_HARD; /* * Synchronize with the MMU notifier callbacks in @@ -528,8 +528,10 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, ua = 0; if (kvmppc_gpa_to_ua(vcpu->kvm, tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), - &ua, NULL)) - return H_PARAMETER; + &ua, NULL)) { + ret = H_PARAMETER; + goto unlock_exit; + } list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt, diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index 36b11c5a0dbb968444da85b230cbc7020f69f583..2654df220d05487cfd27394b0bb8360e2ab11c20 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c @@ -110,7 +110,7 @@ static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu) vcpu->arch.ctr_tm = vcpu->arch.regs.ctr; vcpu->arch.tar_tm = vcpu->arch.tar; vcpu->arch.lr_tm = vcpu->arch.regs.link; - vcpu->arch.cr_tm = vcpu->arch.cr; + vcpu->arch.cr_tm = vcpu->arch.regs.ccr; vcpu->arch.xer_tm = vcpu->arch.regs.xer; vcpu->arch.vrsave_tm = vcpu->arch.vrsave; } @@ -129,7 +129,7 @@ static inline void kvmppc_copyfrom_vcpu_tm(struct kvm_vcpu *vcpu) vcpu->arch.regs.ctr = vcpu->arch.ctr_tm; vcpu->arch.tar = vcpu->arch.tar_tm; vcpu->arch.regs.link = vcpu->arch.lr_tm; - vcpu->arch.cr = vcpu->arch.cr_tm; + vcpu->arch.regs.ccr = vcpu->arch.cr_tm; vcpu->arch.regs.xer = vcpu->arch.xer_tm; vcpu->arch.vrsave = vcpu->arch.vrsave_tm; } @@ -141,7 +141,7 @@ static void kvmppc_emulate_treclaim(struct kvm_vcpu *vcpu, int ra_val) uint64_t texasr; /* CR0 = 0 | MSR[TS] | 0 */ - vcpu->arch.cr = (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)) | + vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) | (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1)) << CR0_SHIFT); @@ -220,7 +220,7 @@ void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val) tm_abort(ra_val); /* CR0 = 0 | MSR[TS] | 0 */ - vcpu->arch.cr = (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)) | + vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) | (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1)) << CR0_SHIFT); @@ -494,8 +494,8 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, if (!(kvmppc_get_msr(vcpu) & MSR_PR)) { preempt_disable(); - vcpu->arch.cr = (CR0_TBEGIN_FAILURE | - (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT))); + vcpu->arch.regs.ccr = (CR0_TBEGIN_FAILURE | + (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT))); vcpu->arch.texasr = (TEXASR_FS | TEXASR_EXACT | (((u64)(TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT)) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 3e3a71594e63194acf20174576642b5b2454059d..3ae3e8d141e3e19f593fbc5acfe0392c3f646d07 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -410,8 +410,8 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu) vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1); pr_err("sprg2 = %.16llx sprg3 = %.16llx\n", vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); - pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n", - vcpu->arch.cr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr); + pr_err("cr = %.8lx xer = %.16lx dsisr = %.8x\n", + vcpu->arch.regs.ccr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr); pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); pr_err("fault dar = %.16lx dsisr = %.8x\n", vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); @@ -426,12 +426,7 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu) static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id) { - struct kvm_vcpu *ret; - - mutex_lock(&kvm->lock); - ret = kvm_get_vcpu_by_id(kvm, id); - mutex_unlock(&kvm->lock); - return ret; + return kvm_get_vcpu_by_id(kvm, id); } static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa) @@ -1309,7 +1304,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, struct kvmppc_vcore *vc = vcpu->arch.vcore; u64 mask; - mutex_lock(&kvm->lock); spin_lock(&vc->lock); /* * If ILE (interrupt little-endian) has changed, update the @@ -1349,7 +1343,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, mask &= 0xFFFFFFFF; vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); spin_unlock(&vc->lock); - mutex_unlock(&kvm->lock); } static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, @@ -1414,7 +1407,14 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, *val = get_reg_val(id, vcpu->arch.pspb); break; case KVM_REG_PPC_DPDES: - *val = get_reg_val(id, vcpu->arch.vcore->dpdes); + /* + * On POWER9, where we are emulating msgsndp etc., + * we return 1 bit for each vcpu, which can come from + * either vcore->dpdes or doorbell_request. + * On POWER8, doorbell_request is 0. + */ + *val = get_reg_val(id, vcpu->arch.vcore->dpdes | + vcpu->arch.doorbell_request); break; case KVM_REG_PPC_VTB: *val = get_reg_val(id, vcpu->arch.vcore->vtb); @@ -2557,7 +2557,7 @@ static void collect_piggybacks(struct core_info *cip, int target_threads) if (!spin_trylock(&pvc->lock)) continue; prepare_threads(pvc); - if (!pvc->n_runnable) { + if (!pvc->n_runnable || !pvc->kvm->arch.mmu_ready) { list_del_init(&pvc->preempt_list); if (pvc->runner == NULL) { pvc->vcore_state = VCORE_INACTIVE; @@ -2578,15 +2578,20 @@ static void collect_piggybacks(struct core_info *cip, int target_threads) spin_unlock(&lp->lock); } -static bool recheck_signals(struct core_info *cip) +static bool recheck_signals_and_mmu(struct core_info *cip) { int sub, i; struct kvm_vcpu *vcpu; + struct kvmppc_vcore *vc; - for (sub = 0; sub < cip->n_subcores; ++sub) - for_each_runnable_thread(i, vcpu, cip->vc[sub]) + for (sub = 0; sub < cip->n_subcores; ++sub) { + vc = cip->vc[sub]; + if (!vc->kvm->arch.mmu_ready) + return true; + for_each_runnable_thread(i, vcpu, vc) if (signal_pending(vcpu->arch.run_task)) return true; + } return false; } @@ -2807,7 +2812,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) local_irq_disable(); hard_irq_disable(); if (lazy_irq_pending() || need_resched() || - recheck_signals(&core_info) || !vc->kvm->arch.mmu_ready) { + recheck_signals_and_mmu(&core_info)) { local_irq_enable(); vc->vcore_state = VCORE_INACTIVE; /* Unlock all except the primary vcore */ @@ -3820,12 +3825,15 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) /* Must be called with kvm->lock held and mmu_ready = 0 and no vcpus running */ int kvmppc_switch_mmu_to_hpt(struct kvm *kvm) { + kvmppc_rmap_reset(kvm); + kvm->arch.process_table = 0; + /* Mutual exclusion with kvm_unmap_hva_range etc. */ + spin_lock(&kvm->mmu_lock); + kvm->arch.radix = 0; + spin_unlock(&kvm->mmu_lock); kvmppc_free_radix(kvm); kvmppc_update_lpcr(kvm, LPCR_VPM1, LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR); - kvmppc_rmap_reset(kvm); - kvm->arch.radix = 0; - kvm->arch.process_table = 0; return 0; } @@ -3838,10 +3846,14 @@ int kvmppc_switch_mmu_to_radix(struct kvm *kvm) if (err) return err; + kvmppc_rmap_reset(kvm); + /* Mutual exclusion with kvm_unmap_hva_range etc. */ + spin_lock(&kvm->mmu_lock); + kvm->arch.radix = 1; + spin_unlock(&kvm->mmu_lock); kvmppc_free_hpt(&kvm->arch.hpt); kvmppc_update_lpcr(kvm, LPCR_UPRT | LPCR_GTSE | LPCR_HR, LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR); - kvm->arch.radix = 1; return 0; } diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index a67cf1cdeda400cd659ba0858ab9040d10ced4c8..02ab86be9dedd4adf79a66eafe3261fce1c6ecc2 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -434,6 +434,37 @@ static inline int is_mmio_hpte(unsigned long v, unsigned long r) (HPTE_R_KEY_HI | HPTE_R_KEY_LO)); } +static inline void fixup_tlbie_lpid(unsigned long rb_value, unsigned long lpid) +{ + + if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) { + /* Radix flush for a hash guest */ + + unsigned long rb,rs,prs,r,ric; + + rb = PPC_BIT(52); /* IS = 2 */ + rs = 0; /* lpid = 0 */ + prs = 0; /* partition scoped */ + r = 1; /* radix format */ + ric = 0; /* RIC_FLSUH_TLB */ + + /* + * Need the extra ptesync to make sure we don't + * re-order the tlbie + */ + asm volatile("ptesync": : :"memory"); + asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) + : : "r"(rb), "i"(r), "i"(prs), + "i"(ric), "r"(rs) : "memory"); + } + + if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) { + asm volatile("ptesync": : :"memory"); + asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : : + "r" (rb_value), "r" (lpid)); + } +} + static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues, long npages, int global, bool need_sync) { @@ -452,16 +483,7 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues, "r" (rbvalues[i]), "r" (kvm->arch.lpid)); } - if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) { - /* - * Need the extra ptesync to make sure we don't - * re-order the tlbie - */ - asm volatile("ptesync": : :"memory"); - asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : : - "r" (rbvalues[0]), "r" (kvm->arch.lpid)); - } - + fixup_tlbie_lpid(rbvalues[i - 1], kvm->arch.lpid); asm volatile("eieio; tlbsync; ptesync" : : : "memory"); } else { if (need_sync) diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c index 758d1d23215e94b2feb25cf9a53fd778a4785f44..aaafb9f080d59864684bbbbb5e1968b5dabbcf83 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_xics.c +++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c @@ -61,7 +61,7 @@ static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu) hcpu = hcore << threads_shift; kvmppc_host_rm_ops_hv->rm_core[hcore].rm_data = vcpu; smp_muxed_ipi_set_message(hcpu, PPC_MSG_RM_HOST_ACTION); - kvmppc_set_host_ipi(hcpu, 1); + kvmppc_set_host_ipi(hcpu); smp_mb(); kvmhv_rm_send_ipi(hcpu); } diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 1d14046124a01afffda02a70d73e7666b7b4b6bc..7fe3077a1ef642465c9b2e6724087948790c9780 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -18,6 +18,7 @@ */ #include +#include #include #include #include @@ -56,6 +57,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) #define STACK_SLOT_DAWR (SFS-56) #define STACK_SLOT_DAWRX (SFS-64) #define STACK_SLOT_HFSCR (SFS-72) +#define STACK_SLOT_AMR (SFS-80) +#define STACK_SLOT_UAMOR (SFS-88) /* * Call kvmppc_hv_entry in real mode. @@ -760,11 +763,9 @@ BEGIN_FTR_SECTION mfspr r5, SPRN_TIDR mfspr r6, SPRN_PSSCR mfspr r7, SPRN_PID - mfspr r8, SPRN_IAMR std r5, STACK_SLOT_TID(r1) std r6, STACK_SLOT_PSSCR(r1) std r7, STACK_SLOT_PID(r1) - std r8, STACK_SLOT_IAMR(r1) mfspr r5, SPRN_HFSCR std r5, STACK_SLOT_HFSCR(r1) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) @@ -772,11 +773,18 @@ BEGIN_FTR_SECTION mfspr r5, SPRN_CIABR mfspr r6, SPRN_DAWR mfspr r7, SPRN_DAWRX + mfspr r8, SPRN_IAMR std r5, STACK_SLOT_CIABR(r1) std r6, STACK_SLOT_DAWR(r1) std r7, STACK_SLOT_DAWRX(r1) + std r8, STACK_SLOT_IAMR(r1) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) + mfspr r5, SPRN_AMR + std r5, STACK_SLOT_AMR(r1) + mfspr r6, SPRN_UAMOR + std r6, STACK_SLOT_UAMOR(r1) + BEGIN_FTR_SECTION /* Set partition DABR */ /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ @@ -1202,7 +1210,7 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) ld r5, VCPU_LR(r4) - lwz r6, VCPU_CR(r4) + ld r6, VCPU_CR(r4) mtlr r5 mtcr r6 @@ -1313,7 +1321,7 @@ kvmppc_interrupt_hv: std r3, VCPU_GPR(R12)(r9) /* CR is in the high half of r12 */ srdi r4, r12, 32 - stw r4, VCPU_CR(r9) + std r4, VCPU_CR(r9) BEGIN_FTR_SECTION ld r3, HSTATE_CFAR(r13) std r3, VCPU_CFAR(r9) @@ -1552,6 +1560,10 @@ mc_cont: 1: #endif /* CONFIG_KVM_XICS */ + /* Possibly flush the link stack here. */ +1: nop + patch_site 1b patch__call_kvm_flush_link_stack + /* For hash guest, read the guest SLB and save it away */ ld r5, VCPU_KVM(r9) lbz r0, KVM_RADIX(r5) @@ -1713,22 +1725,25 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) mtspr SPRN_PSPB, r0 mtspr SPRN_WORT, r0 BEGIN_FTR_SECTION - mtspr SPRN_IAMR, r0 mtspr SPRN_TCSCR, r0 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ li r0, 1 sldi r0, r0, 31 mtspr SPRN_MMCRS, r0 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) -8: - /* Save and reset AMR and UAMOR before turning on the MMU */ + /* Save and restore AMR, IAMR and UAMOR before turning on the MMU */ + ld r8, STACK_SLOT_IAMR(r1) + mtspr SPRN_IAMR, r8 + +8: /* Power7 jumps back in here */ mfspr r5,SPRN_AMR mfspr r6,SPRN_UAMOR std r5,VCPU_AMR(r9) std r6,VCPU_UAMOR(r9) - li r6,0 - mtspr SPRN_AMR,r6 + ld r5,STACK_SLOT_AMR(r1) + ld r6,STACK_SLOT_UAMOR(r1) + mtspr SPRN_AMR, r5 mtspr SPRN_UAMOR, r6 /* Switch DSCR back to host value */ @@ -1897,11 +1912,9 @@ BEGIN_FTR_SECTION ld r5, STACK_SLOT_TID(r1) ld r6, STACK_SLOT_PSSCR(r1) ld r7, STACK_SLOT_PID(r1) - ld r8, STACK_SLOT_IAMR(r1) mtspr SPRN_TIDR, r5 mtspr SPRN_PSSCR, r6 mtspr SPRN_PID, r7 - mtspr SPRN_IAMR, r8 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) #ifdef CONFIG_PPC_RADIX_MMU @@ -2099,6 +2112,29 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) mtlr r0 blr +.balign 32 +.global kvm_flush_link_stack +kvm_flush_link_stack: + /* Save LR into r0 */ + mflr r0 + + /* Flush the link stack. On Power8 it's up to 32 entries in size. */ + .rept 32 + bl .+4 + .endr + + /* And on Power9 it's up to 64. */ +BEGIN_FTR_SECTION + .rept 32 + bl .+4 + .endr +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) + + /* Restore LR */ + mtlr r0 + blr + + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* * Softpatch interrupt for transactional memory emulation cases @@ -2895,29 +2931,39 @@ kvm_cede_prodded: kvm_cede_exit: ld r9, HSTATE_KVM_VCPU(r13) #ifdef CONFIG_KVM_XICS - /* Abort if we still have a pending escalation */ + /* are we using XIVE with single escalation? */ + ld r10, VCPU_XIVE_ESC_VADDR(r9) + cmpdi r10, 0 + beq 3f + li r6, XIVE_ESB_SET_PQ_00 + /* + * If we still have a pending escalation, abort the cede, + * and we must set PQ to 10 rather than 00 so that we don't + * potentially end up with two entries for the escalation + * interrupt in the XIVE interrupt queue. In that case + * we also don't want to set xive_esc_on to 1 here in + * case we race with xive_esc_irq(). + */ lbz r5, VCPU_XIVE_ESC_ON(r9) cmpwi r5, 0 - beq 1f + beq 4f li r0, 0 stb r0, VCPU_CEDED(r9) -1: /* Enable XIVE escalation */ - li r5, XIVE_ESB_SET_PQ_00 + li r6, XIVE_ESB_SET_PQ_10 + b 5f +4: li r0, 1 + stb r0, VCPU_XIVE_ESC_ON(r9) + /* make sure store to xive_esc_on is seen before xive_esc_irq runs */ + sync +5: /* Enable XIVE escalation */ mfmsr r0 andi. r0, r0, MSR_DR /* in real mode? */ beq 1f - ld r10, VCPU_XIVE_ESC_VADDR(r9) - cmpdi r10, 0 - beq 3f - ldx r0, r10, r5 + ldx r0, r10, r6 b 2f 1: ld r10, VCPU_XIVE_ESC_RADDR(r9) - cmpdi r10, 0 - beq 3f - ldcix r0, r10, r5 + ldcix r0, r10, r6 2: sync - li r0, 1 - stb r0, VCPU_XIVE_ESC_ON(r9) #endif /* CONFIG_KVM_XICS */ 3: b guest_exit_cont diff --git a/arch/powerpc/kvm/book3s_hv_tm.c b/arch/powerpc/kvm/book3s_hv_tm.c index 008285058f9b554616cb52fdc53a85843826ae23..31cd0f327c8a2d5af48401001be86a1747b8ae51 100644 --- a/arch/powerpc/kvm/book3s_hv_tm.c +++ b/arch/powerpc/kvm/book3s_hv_tm.c @@ -130,8 +130,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) return RESUME_GUEST; } /* Set CR0 to indicate previous transactional state */ - vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | - (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28); + vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | + (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29); /* L=1 => tresume, L=0 => tsuspend */ if (instr & (1 << 21)) { if (MSR_TM_SUSPENDED(msr)) @@ -174,8 +174,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) copy_from_checkpoint(vcpu); /* Set CR0 to indicate previous transactional state */ - vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | - (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28); + vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | + (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29); vcpu->arch.shregs.msr &= ~MSR_TS_MASK; return RESUME_GUEST; @@ -204,8 +204,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) copy_to_checkpoint(vcpu); /* Set CR0 to indicate previous transactional state */ - vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | - (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28); + vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | + (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29); vcpu->arch.shregs.msr = msr | MSR_TS_S; return RESUME_GUEST; } diff --git a/arch/powerpc/kvm/book3s_hv_tm_builtin.c b/arch/powerpc/kvm/book3s_hv_tm_builtin.c index b2c7c6fca4f96e5a315371e39ec9d25dc220da8d..3cf5863bc06e8513d5cb7d359f401846ba07aab5 100644 --- a/arch/powerpc/kvm/book3s_hv_tm_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_tm_builtin.c @@ -89,7 +89,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu) if (instr & (1 << 21)) vcpu->arch.shregs.msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; /* Set CR0 to 0b0010 */ - vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0x20000000; + vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | + 0x20000000; return 1; } @@ -105,5 +106,5 @@ void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu) vcpu->arch.shregs.msr &= ~MSR_TS_MASK; /* go to N state */ vcpu->arch.regs.nip = vcpu->arch.tfhar; copy_from_checkpoint(vcpu); - vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0xa0000000; + vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | 0xa0000000; } diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 614ebb4261f76593bb07f52f2fd0a2db7307d4fe..de9702219dee9442e23a59eeced647d99b476465 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -167,7 +167,7 @@ void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu) svcpu->gpr[11] = vcpu->arch.regs.gpr[11]; svcpu->gpr[12] = vcpu->arch.regs.gpr[12]; svcpu->gpr[13] = vcpu->arch.regs.gpr[13]; - svcpu->cr = vcpu->arch.cr; + svcpu->cr = vcpu->arch.regs.ccr; svcpu->xer = vcpu->arch.regs.xer; svcpu->ctr = vcpu->arch.regs.ctr; svcpu->lr = vcpu->arch.regs.link; @@ -249,7 +249,7 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu) vcpu->arch.regs.gpr[11] = svcpu->gpr[11]; vcpu->arch.regs.gpr[12] = svcpu->gpr[12]; vcpu->arch.regs.gpr[13] = svcpu->gpr[13]; - vcpu->arch.cr = svcpu->cr; + vcpu->arch.regs.ccr = svcpu->cr; vcpu->arch.regs.xer = svcpu->xer; vcpu->arch.regs.ctr = svcpu->ctr; vcpu->arch.regs.link = svcpu->lr; diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c index 2d3b2b1cc272b0989858bfb4e945567ddef96369..a56c56aa829c14b9821bcc652697a4462190d8de 100644 --- a/arch/powerpc/kvm/book3s_rtas.c +++ b/arch/powerpc/kvm/book3s_rtas.c @@ -146,7 +146,7 @@ static int rtas_token_undefine(struct kvm *kvm, char *name) { struct rtas_token_definition *d, *tmp; - lockdep_assert_held(&kvm->lock); + lockdep_assert_held(&kvm->arch.rtas_token_lock); list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) { if (rtas_name_matches(d->handler->name, name)) { @@ -167,7 +167,7 @@ static int rtas_token_define(struct kvm *kvm, char *name, u64 token) bool found; int i; - lockdep_assert_held(&kvm->lock); + lockdep_assert_held(&kvm->arch.rtas_token_lock); list_for_each_entry(d, &kvm->arch.rtas_tokens, list) { if (d->token == token) @@ -206,14 +206,14 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp) if (copy_from_user(&args, argp, sizeof(args))) return -EFAULT; - mutex_lock(&kvm->lock); + mutex_lock(&kvm->arch.rtas_token_lock); if (args.token) rc = rtas_token_define(kvm, args.name, args.token); else rc = rtas_token_undefine(kvm, args.name); - mutex_unlock(&kvm->lock); + mutex_unlock(&kvm->arch.rtas_token_lock); return rc; } @@ -243,9 +243,20 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) * value so we can restore it on the way out. */ orig_rets = args.rets; + if (be32_to_cpu(args.nargs) >= ARRAY_SIZE(args.args)) { + /* + * Don't overflow our args array: ensure there is room for + * at least rets[0] (even if the call specifies 0 nret). + * + * Each handler must then check for the correct nargs and nret + * values, but they may always return failure in rets[0]. + */ + rc = -EINVAL; + goto fail; + } args.rets = &args.args[be32_to_cpu(args.nargs)]; - mutex_lock(&vcpu->kvm->lock); + mutex_lock(&vcpu->kvm->arch.rtas_token_lock); rc = -ENOENT; list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) { @@ -256,7 +267,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) } } - mutex_unlock(&vcpu->kvm->lock); + mutex_unlock(&vcpu->kvm->arch.rtas_token_lock); if (rc == 0) { args.rets = orig_rets; @@ -270,9 +281,17 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) fail: /* * We only get here if the guest has called RTAS with a bogus - * args pointer. That means we can't get to the args, and so we - * can't fail the RTAS call. So fail right out to userspace, - * which should kill the guest. + * args pointer or nargs/nret values that would overflow the + * array. That means we can't get to the args, and so we can't + * fail the RTAS call. So fail right out to userspace, which + * should kill the guest. + * + * SLOF should actually pass the hcall return value from the + * rtas handler call in r3, so enter_rtas could be modified to + * return a failure indication in r3 and we could return such + * errors to the guest rather than failing to host userspace. + * However old guests that don't test for failure could then + * continue silently after errors, so for now we won't do this. */ return rc; } @@ -282,8 +301,6 @@ void kvmppc_rtas_tokens_free(struct kvm *kvm) { struct rtas_token_definition *d, *tmp; - lockdep_assert_held(&kvm->lock); - list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) { list_del(&d->list); kfree(d); diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index 30c2eb7669549a1076b1ce9776c55a8babfc7b21..031f07f048afde7c8761f2eaa8eab4e2f357e278 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -1037,20 +1037,22 @@ void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) /* Mask the VP IPI */ xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01); - /* Disable the VP */ - xive_native_disable_vp(xc->vp_id); - - /* Free the queues & associated interrupts */ + /* Free escalations */ for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { - struct xive_q *q = &xc->queues[i]; - - /* Free the escalation irq */ if (xc->esc_virq[i]) { free_irq(xc->esc_virq[i], vcpu); irq_dispose_mapping(xc->esc_virq[i]); kfree(xc->esc_virq_names[i]); } - /* Free the queue */ + } + + /* Disable the VP */ + xive_native_disable_vp(xc->vp_id); + + /* Free the queues */ + for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { + struct xive_q *q = &xc->queues[i]; + xive_native_disable_queue(xc->vp_id, q, i); if (q->qpage) { free_pages((unsigned long)q->qpage, @@ -1723,7 +1725,6 @@ static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd) { xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01); xive_native_configure_irq(hw_num, 0, MASKED, 0); - xive_cleanup_irq_data(xd); } static void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb) @@ -1737,9 +1738,10 @@ static void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb) continue; kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data); + xive_cleanup_irq_data(&state->ipi_data); xive_native_free_irq(state->ipi_number); - /* Pass-through, cleanup too */ + /* Pass-through, cleanup too but keep IRQ hw data */ if (state->pt_number) kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data); diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S index 81bd8a07aa51f0c393cab9081e77bd316b1f6536..4e5081e584098b38f64a8774b7e64b6352e1f674 100644 --- a/arch/powerpc/kvm/bookehv_interrupts.S +++ b/arch/powerpc/kvm/bookehv_interrupts.S @@ -75,6 +75,10 @@ PPC_LL r1, VCPU_HOST_STACK(r4) PPC_LL r2, HOST_R2(r1) +START_BTB_FLUSH_SECTION + BTB_FLUSH(r10) +END_BTB_FLUSH_SECTION + mfspr r10, SPRN_PID lwz r8, VCPU_HOST_PID(r4) PPC_LL r11, VCPU_SHARED(r4) @@ -182,7 +186,7 @@ */ PPC_LL r4, PACACURRENT(r13) PPC_LL r4, (THREAD + THREAD_KVM_VCPU)(r4) - stw r10, VCPU_CR(r4) + PPC_STL r10, VCPU_CR(r4) PPC_STL r11, VCPU_GPR(R4)(r4) PPC_STL r5, VCPU_GPR(R5)(r4) PPC_STL r6, VCPU_GPR(R6)(r4) @@ -292,7 +296,7 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1) PPC_STL r4, VCPU_GPR(R4)(r11) PPC_LL r4, THREAD_NORMSAVE(0)(r10) PPC_STL r5, VCPU_GPR(R5)(r11) - stw r13, VCPU_CR(r11) + PPC_STL r13, VCPU_CR(r11) mfspr r5, \srr0 PPC_STL r3, VCPU_GPR(R10)(r11) PPC_LL r3, THREAD_NORMSAVE(2)(r10) @@ -319,7 +323,7 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1) PPC_STL r4, VCPU_GPR(R4)(r11) PPC_LL r4, GPR9(r8) PPC_STL r5, VCPU_GPR(R5)(r11) - stw r9, VCPU_CR(r11) + PPC_STL r9, VCPU_CR(r11) mfspr r5, \srr0 PPC_STL r3, VCPU_GPR(R8)(r11) PPC_LL r3, GPR10(r8) @@ -643,7 +647,7 @@ lightweight_exit: PPC_LL r3, VCPU_LR(r4) PPC_LL r5, VCPU_XER(r4) PPC_LL r6, VCPU_CTR(r4) - lwz r7, VCPU_CR(r4) + PPC_LL r7, VCPU_CR(r4) PPC_LL r8, VCPU_PC(r4) PPC_LD(r9, VCPU_SHARED_MSR, r11) PPC_LL r0, VCPU_GPR(R0)(r4) diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c index 3f8189eb56ed038f2be56f478f8c6e6925c11f27..fde1de08b4d77e7321bfa0dbf82fc3c6c6565859 100644 --- a/arch/powerpc/kvm/e500_emulate.c +++ b/arch/powerpc/kvm/e500_emulate.c @@ -277,6 +277,13 @@ int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_va vcpu->arch.pwrmgtcr0 = spr_val; break; + case SPRN_BUCSR: + /* + * If we are here, it means that we have already flushed the + * branch predictor, so just return to guest. + */ + break; + /* extra exceptions */ #ifdef CONFIG_SPE_POSSIBLE case SPRN_IVOR32: diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c index 75dce1ef3bc83473e23689ded4c7bfdf72637a7e..f91b1309a0a861688c9c794f691eacaf2e8351c2 100644 --- a/arch/powerpc/kvm/emulate_loadstore.c +++ b/arch/powerpc/kvm/emulate_loadstore.c @@ -117,7 +117,6 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) emulated = EMULATE_FAIL; vcpu->arch.regs.msr = vcpu->arch.shared->msr; - vcpu->arch.regs.ccr = vcpu->arch.cr; if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) { int type = op.type & INSTR_TYPE_MASK; int size = GETSIZE(op.type); diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index eba5756d5b417db04c1254987c7fa2485a5553bd..b9df096bfae9656a8f112d3a139038cbd5583489 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -61,6 +61,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) return !!(v->arch.pending_exceptions) || kvm_request_pending(v); } +bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) +{ + return kvm_arch_vcpu_runnable(vcpu); +} + bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) { return false; @@ -518,7 +523,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_PPC_UNSET_IRQ: case KVM_CAP_PPC_IRQ_LEVEL: case KVM_CAP_ENABLE_CAP: - case KVM_CAP_ENABLE_CAP_VM: case KVM_CAP_ONE_REG: case KVM_CAP_IOEVENTFD: case KVM_CAP_DEVICE_CTRL: @@ -543,8 +547,11 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) #ifdef CONFIG_PPC_BOOK3S_64 case KVM_CAP_SPAPR_TCE: case KVM_CAP_SPAPR_TCE_64: - /* fallthrough */ + r = 1; + break; case KVM_CAP_SPAPR_TCE_VFIO: + r = !!cpu_has_feature(CPU_FTR_HVMODE); + break; case KVM_CAP_PPC_RTAS: case KVM_CAP_PPC_FIXUP_HCALL: case KVM_CAP_PPC_ENABLE_HCALL: @@ -629,6 +636,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_MAX_VCPUS: r = KVM_MAX_VCPUS; break; + case KVM_CAP_MAX_VCPU_ID: + r = KVM_MAX_VCPU_ID; + break; #ifdef CONFIG_PPC_BOOK3S_64 case KVM_CAP_PPC_GET_SMMU_INFO: r = 1; @@ -1984,9 +1994,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, { struct kvm_enable_cap cap; r = -EFAULT; - vcpu_load(vcpu); if (copy_from_user(&cap, argp, sizeof(cap))) goto out; + vcpu_load(vcpu); r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); vcpu_put(vcpu); break; @@ -2010,9 +2020,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_DIRTY_TLB: { struct kvm_dirty_tlb dirty; r = -EFAULT; - vcpu_load(vcpu); if (copy_from_user(&dirty, argp, sizeof(dirty))) goto out; + vcpu_load(vcpu); r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); vcpu_put(vcpu); break; @@ -2079,8 +2089,8 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, } -static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, - struct kvm_enable_cap *cap) +int kvm_vm_ioctl_enable_cap(struct kvm *kvm, + struct kvm_enable_cap *cap) { int r; @@ -2260,15 +2270,6 @@ long kvm_arch_vm_ioctl(struct file *filp, break; } - case KVM_ENABLE_CAP: - { - struct kvm_enable_cap cap; - r = -EFAULT; - if (copy_from_user(&cap, argp, sizeof(cap))) - goto out; - r = kvm_vm_ioctl_enable_cap(kvm, &cap); - break; - } #ifdef CONFIG_SPAPR_TCE_IOMMU case KVM_CREATE_SPAPR_TCE_64: { struct kvm_create_spapr_tce_64 create_tce_64; diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h index 491b0f715d6bc2c345850645f2dbcd4700f6f182..ea1d7c80831900c4403443b8d836cd462998bf22 100644 --- a/arch/powerpc/kvm/trace.h +++ b/arch/powerpc/kvm/trace.h @@ -6,8 +6,6 @@ #undef TRACE_SYSTEM #define TRACE_SYSTEM kvm -#define TRACE_INCLUDE_PATH . -#define TRACE_INCLUDE_FILE trace /* * Tracepoint for guest mode entry. @@ -120,4 +118,10 @@ TRACE_EVENT(kvm_check_requests, #endif /* _TRACE_KVM_H */ /* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE + +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE trace + #include diff --git a/arch/powerpc/kvm/trace_booke.h b/arch/powerpc/kvm/trace_booke.h index ac640e81fdc5f43709858ad8b3dd5ec2eee58a8f..3837842986aa46ee4ac80f4759d1051d9221c87c 100644 --- a/arch/powerpc/kvm/trace_booke.h +++ b/arch/powerpc/kvm/trace_booke.h @@ -6,8 +6,6 @@ #undef TRACE_SYSTEM #define TRACE_SYSTEM kvm_booke -#define TRACE_INCLUDE_PATH . -#define TRACE_INCLUDE_FILE trace_booke #define kvm_trace_symbol_exit \ {0, "CRITICAL"}, \ @@ -218,4 +216,11 @@ TRACE_EVENT(kvm_booke_queue_irqprio, #endif /* This part must be outside protection */ + +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE + +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE trace_booke + #include diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h index bcfe8a987f6a977e65f2e9c7a02962a7099c2a66..8a1e3b0047f190e53a64dfe57c9c88f9ac11d617 100644 --- a/arch/powerpc/kvm/trace_hv.h +++ b/arch/powerpc/kvm/trace_hv.h @@ -9,8 +9,6 @@ #undef TRACE_SYSTEM #define TRACE_SYSTEM kvm_hv -#define TRACE_INCLUDE_PATH . -#define TRACE_INCLUDE_FILE trace_hv #define kvm_trace_symbol_hcall \ {H_REMOVE, "H_REMOVE"}, \ @@ -497,4 +495,11 @@ TRACE_EVENT(kvmppc_run_vcpu_exit, #endif /* _TRACE_KVM_HV_H */ /* This part must be outside protection */ + +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE + +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE trace_hv + #include diff --git a/arch/powerpc/kvm/trace_pr.h b/arch/powerpc/kvm/trace_pr.h index 2f9a8829552b946ee8a308a2e069ab6a5c9bb1ba..46a46d328fbf2237dd203d3c33d54dbe0db129b1 100644 --- a/arch/powerpc/kvm/trace_pr.h +++ b/arch/powerpc/kvm/trace_pr.h @@ -8,8 +8,6 @@ #undef TRACE_SYSTEM #define TRACE_SYSTEM kvm_pr -#define TRACE_INCLUDE_PATH . -#define TRACE_INCLUDE_FILE trace_pr TRACE_EVENT(kvm_book3s_reenter, TP_PROTO(int r, struct kvm_vcpu *vcpu), @@ -257,4 +255,11 @@ TRACE_EVENT(kvm_exit, #endif /* _TRACE_KVM_H */ /* This part must be outside protection */ + +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE + +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE trace_pr + #include diff --git a/arch/powerpc/lib/checksum_wrappers.c b/arch/powerpc/lib/checksum_wrappers.c index a0cb63fb76a1ada4b6ab727533c62e0cc681e33c..bb9307ce2440d2e45edd6352c8fa72fed6d6dce9 100644 --- a/arch/powerpc/lib/checksum_wrappers.c +++ b/arch/powerpc/lib/checksum_wrappers.c @@ -29,6 +29,7 @@ __wsum csum_and_copy_from_user(const void __user *src, void *dst, unsigned int csum; might_sleep(); + allow_read_from_user(src, len); *err_ptr = 0; @@ -37,7 +38,7 @@ __wsum csum_and_copy_from_user(const void __user *src, void *dst, goto out; } - if (unlikely((len < 0) || !access_ok(VERIFY_READ, src, len))) { + if (unlikely((len < 0) || !access_ok(src, len))) { *err_ptr = -EFAULT; csum = (__force unsigned int)sum; goto out; @@ -60,6 +61,7 @@ __wsum csum_and_copy_from_user(const void __user *src, void *dst, } out: + prevent_read_from_user(src, len); return (__force __wsum)csum; } EXPORT_SYMBOL(csum_and_copy_from_user); @@ -70,6 +72,7 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, unsigned int csum; might_sleep(); + allow_write_to_user(dst, len); *err_ptr = 0; @@ -78,7 +81,7 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, goto out; } - if (unlikely((len < 0) || !access_ok(VERIFY_WRITE, dst, len))) { + if (unlikely((len < 0) || !access_ok(dst, len))) { *err_ptr = -EFAULT; csum = -1; /* invalid checksum */ goto out; @@ -97,6 +100,7 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, } out: + prevent_write_to_user(dst, len); return (__force __wsum)csum; } EXPORT_SYMBOL(csum_and_copy_to_user); diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index e613b02bb2f0c3817fef705602e0b95d4cdeba23..585d4d846dba062fa346511e3666303d181cfd43 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -232,6 +232,124 @@ void do_stf_barrier_fixups(enum stf_barrier_type types) do_stf_exit_barrier_fixups(types); } +void do_uaccess_flush_fixups(enum l1d_flush_type types) +{ + unsigned int instrs[4], *dest; + long *start, *end; + int i; + + start = PTRRELOC(&__start___uaccess_flush_fixup); + end = PTRRELOC(&__stop___uaccess_flush_fixup); + + instrs[0] = 0x60000000; /* nop */ + instrs[1] = 0x60000000; /* nop */ + instrs[2] = 0x60000000; /* nop */ + instrs[3] = 0x4e800020; /* blr */ + + i = 0; + if (types == L1D_FLUSH_FALLBACK) { + instrs[3] = 0x60000000; /* nop */ + /* fallthrough to fallback flush */ + } + + if (types & L1D_FLUSH_ORI) { + instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */ + instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/ + } + + if (types & L1D_FLUSH_MTTRIG) + instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */ + + for (i = 0; start < end; start++, i++) { + dest = (void *)start + *start; + + pr_devel("patching dest %lx\n", (unsigned long)dest); + + patch_instruction(dest, instrs[0]); + + patch_instruction((dest + 1), instrs[1]); + patch_instruction((dest + 2), instrs[2]); + patch_instruction((dest + 3), instrs[3]); + } + + printk(KERN_DEBUG "uaccess-flush: patched %d locations (%s flush)\n", i, + (types == L1D_FLUSH_NONE) ? "no" : + (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" : + (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG) + ? "ori+mttrig type" + : "ori type" : + (types & L1D_FLUSH_MTTRIG) ? "mttrig type" + : "unknown"); +} + +static int __do_entry_flush_fixups(void *data) +{ + enum l1d_flush_type types = *(enum l1d_flush_type *)data; + unsigned int instrs[3], *dest; + long *start, *end; + int i; + + start = PTRRELOC(&__start___entry_flush_fixup); + end = PTRRELOC(&__stop___entry_flush_fixup); + + instrs[0] = 0x60000000; /* nop */ + instrs[1] = 0x60000000; /* nop */ + instrs[2] = 0x60000000; /* nop */ + + i = 0; + if (types == L1D_FLUSH_FALLBACK) { + instrs[i++] = 0x7d4802a6; /* mflr r10 */ + instrs[i++] = 0x60000000; /* branch patched below */ + instrs[i++] = 0x7d4803a6; /* mtlr r10 */ + } + + if (types & L1D_FLUSH_ORI) { + instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */ + instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/ + } + + if (types & L1D_FLUSH_MTTRIG) + instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */ + + for (i = 0; start < end; start++, i++) { + dest = (void *)start + *start; + + pr_devel("patching dest %lx\n", (unsigned long)dest); + + patch_instruction(dest, instrs[0]); + + if (types == L1D_FLUSH_FALLBACK) + patch_branch((dest + 1), (unsigned long)&entry_flush_fallback, + BRANCH_SET_LINK); + else + patch_instruction((dest + 1), instrs[1]); + + patch_instruction((dest + 2), instrs[2]); + } + + printk(KERN_DEBUG "entry-flush: patched %d locations (%s flush)\n", i, + (types == L1D_FLUSH_NONE) ? "no" : + (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" : + (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG) + ? "ori+mttrig type" + : "ori type" : + (types & L1D_FLUSH_MTTRIG) ? "mttrig type" + : "unknown"); + + return 0; +} + +void do_entry_flush_fixups(enum l1d_flush_type types) +{ + /* + * The call to the fallback flush can not be safely patched in/out while + * other CPUs are executing it. So call __do_entry_flush_fixups() on one + * CPU while all other CPUs spin in the stop machine core with interrupts + * hard disabled. + */ + stop_machine(__do_entry_flush_fixups, &types, NULL); +} + void do_rfi_flush_fixups(enum l1d_flush_type types) { unsigned int instrs[3], *dest; @@ -347,6 +465,29 @@ void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_ printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i); } + +static void patch_btb_flush_section(long *curr) +{ + unsigned int *start, *end; + + start = (void *)curr + *curr; + end = (void *)curr + *(curr + 1); + for (; start < end; start++) { + pr_devel("patching dest %lx\n", (unsigned long)start); + patch_instruction(start, PPC_INST_NOP); + } +} + +void do_btb_flush_fixups(void) +{ + long *start, *end; + + start = PTRRELOC(&__start__btb_flush_fixup); + end = PTRRELOC(&__stop__btb_flush_fixup); + + for (; start < end; start += 2) + patch_btb_flush_section(start); +} #endif /* CONFIG_PPC_FSL_BOOK3E */ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) diff --git a/arch/powerpc/lib/memcmp_64.S b/arch/powerpc/lib/memcmp_64.S index 844d8e774492e65929168bfff4d0655fa50dda74..b7f6f6e0b6e801c6cf0fbb1d11d5c0d53014fb4d 100644 --- a/arch/powerpc/lib/memcmp_64.S +++ b/arch/powerpc/lib/memcmp_64.S @@ -215,11 +215,20 @@ _GLOBAL_TOC(memcmp) beq .Lzero .Lcmp_rest_lt8bytes: - /* Here we have only less than 8 bytes to compare with. at least s1 - * Address is aligned with 8 bytes. - * The next double words are load and shift right with appropriate - * bits. + /* + * Here we have less than 8 bytes to compare. At least s1 is aligned to + * 8 bytes, but s2 may not be. We must make sure s2 + 7 doesn't cross a + * page boundary, otherwise we might read past the end of the buffer and + * trigger a page fault. We use 4K as the conservative minimum page + * size. If we detect that case we go to the byte-by-byte loop. + * + * Otherwise the next double word is loaded from s1 and s2, and shifted + * right to compare the appropriate bits. */ + clrldi r6,r4,(64-12) // r6 = r4 & 0xfff + cmpdi r6,0xff8 + bgt .Lshort + subfic r6,r5,8 slwi r6,r6,3 LD rA,0,r3 diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index d81568f783e5c7fe400719a84d6de082d4c21855..6c85319282744f7b6dcf87c1de5d987e89dc8be4 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -473,6 +473,8 @@ static int do_fp_load(struct instruction_op *op, unsigned long ea, } u; nb = GETSIZE(op->type); + if (nb > sizeof(u)) + return -EINVAL; if (!address_ok(regs, ea, nb)) return -EFAULT; rn = op->reg; @@ -523,6 +525,8 @@ static int do_fp_store(struct instruction_op *op, unsigned long ea, } u; nb = GETSIZE(op->type); + if (nb > sizeof(u)) + return -EINVAL; if (!address_ok(regs, ea, nb)) return -EFAULT; rn = op->reg; @@ -567,6 +571,9 @@ static nokprobe_inline int do_vec_load(int rn, unsigned long ea, u8 b[sizeof(__vector128)]; } u = {}; + if (size > sizeof(u)) + return -EINVAL; + if (!address_ok(regs, ea & ~0xfUL, 16)) return -EFAULT; /* align to multiple of size */ @@ -594,6 +601,9 @@ static nokprobe_inline int do_vec_store(int rn, unsigned long ea, u8 b[sizeof(__vector128)]; } u; + if (size > sizeof(u)) + return -EINVAL; + if (!address_ok(regs, ea & ~0xfUL, 16)) return -EFAULT; /* align to multiple of size */ diff --git a/arch/powerpc/lib/string_32.S b/arch/powerpc/lib/string_32.S index f69a6aab7bfbb5cb65fd082eaedb939d46c12149..1ddb26394e8ac5a1739b6f5aaf12512e5c3ed9f6 100644 --- a/arch/powerpc/lib/string_32.S +++ b/arch/powerpc/lib/string_32.S @@ -17,7 +17,7 @@ CACHELINE_BYTES = L1_CACHE_BYTES LG_CACHELINE_BYTES = L1_CACHE_SHIFT CACHELINE_MASK = (L1_CACHE_BYTES-1) -_GLOBAL(__clear_user) +_GLOBAL(__arch_clear_user) /* * Use dcbz on the complete cache lines in the destination * to set them to zero. This requires that the destination @@ -87,4 +87,4 @@ _GLOBAL(__clear_user) EX_TABLE(8b, 91b) EX_TABLE(9b, 91b) -EXPORT_SYMBOL(__clear_user) +EXPORT_SYMBOL(__arch_clear_user) diff --git a/arch/powerpc/lib/string_64.S b/arch/powerpc/lib/string_64.S index 56aac4c2202570c27da243049398b30c07f7cde5..ea3798f4f25f2f450521da30164e512a24952616 100644 --- a/arch/powerpc/lib/string_64.S +++ b/arch/powerpc/lib/string_64.S @@ -29,7 +29,7 @@ PPC64_CACHES: .section ".text" /** - * __clear_user: - Zero a block of memory in user space, with less checking. + * __arch_clear_user: - Zero a block of memory in user space, with less checking. * @to: Destination address, in user space. * @n: Number of bytes to zero. * @@ -70,7 +70,7 @@ err3; stb r0,0(r3) mr r3,r4 blr -_GLOBAL_TOC(__clear_user) +_GLOBAL_TOC(__arch_clear_user) cmpdi r4,32 neg r6,r3 li r0,0 @@ -193,4 +193,4 @@ err1; dcbz 0,r3 cmpdi r4,32 blt .Lshort_clear b .Lmedium_clear -EXPORT_SYMBOL(__clear_user) +EXPORT_SYMBOL(__arch_clear_user) diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c index cf77d755246db6e4a57a0d41a0dd0ebc7adfb4fa..5d53684c2ebd7a1c33aea1d81e1134b72c61baf6 100644 --- a/arch/powerpc/mm/8xx_mmu.c +++ b/arch/powerpc/mm/8xx_mmu.c @@ -79,7 +79,7 @@ void __init MMU_init_hw(void) for (; i < 32 && mem >= LARGE_PAGE_SIZE_8M; i++) { mtspr(SPRN_MD_CTR, ctr | (i << 8)); mtspr(SPRN_MD_EPN, (unsigned long)__va(addr) | MD_EVALID); - mtspr(SPRN_MD_TWC, MD_PS8MEG | MD_SVALID | M_APG2); + mtspr(SPRN_MD_TWC, MD_PS8MEG | MD_SVALID); mtspr(SPRN_MD_RPN, addr | flags | _PAGE_PRESENT); addr += LARGE_PAGE_SIZE_8M; mem -= LARGE_PAGE_SIZE_8M; diff --git a/arch/powerpc/mm/dump_linuxpagetables.c b/arch/powerpc/mm/dump_linuxpagetables.c index 876e2a3c79f201eb1a86187814efdf35d911e00b..8464c2c01c0ca2341c987957ad6158d1f470e65f 100644 --- a/arch/powerpc/mm/dump_linuxpagetables.c +++ b/arch/powerpc/mm/dump_linuxpagetables.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -418,12 +419,13 @@ static void walk_pagetables(struct pg_state *st) unsigned int i; unsigned long addr; + addr = st->start_address; + /* * Traverse the linux pagetable structure and dump pages that are in * the hash pagetable. */ - for (i = 0; i < PTRS_PER_PGD; i++, pgd++) { - addr = KERN_VIRT_START + i * PGDIR_SIZE; + for (i = 0; i < PTRS_PER_PGD; i++, pgd++, addr += PGDIR_SIZE) { if (!pgd_none(*pgd) && !pgd_huge(*pgd)) /* pgd exists */ walk_pud(st, pgd, addr); @@ -472,9 +474,14 @@ static int ptdump_show(struct seq_file *m, void *v) { struct pg_state st = { .seq = m, - .start_address = KERN_VIRT_START, .marker = address_markers, }; + + if (radix_enabled()) + st.start_address = PAGE_OFFSET; + else + st.start_address = KERN_VIRT_START; + /* Traverse kernel page tables */ walk_pagetables(&st); note_page(&st, 0, 0, 0); diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index d51cf5f4e45ef5ff3fd3e788a91315774f582612..41f1de87edb60953636c0a3a94331e223bfc7b44 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -221,7 +221,7 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, static bool bad_kernel_fault(bool is_exec, unsigned long error_code, unsigned long address) { - if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT))) { + if (is_exec) { printk_ratelimited(KERN_CRIT "kernel tried to execute" " exec-protected page (%lx) -" "exploit attempt? (uid: %d)\n", @@ -267,7 +267,7 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address, return false; if ((flags & FAULT_FLAG_WRITE) && (flags & FAULT_FLAG_USER) && - access_ok(VERIFY_READ, nip, sizeof(*nip))) { + access_ok(nip, sizeof(*nip))) { unsigned int inst; int res; @@ -562,13 +562,7 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address, * case. */ if (unlikely(fault & VM_FAULT_RETRY)) { - /* We retry only once */ if (flags & FAULT_FLAG_ALLOW_RETRY) { - /* - * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk - * of starvation. - */ - flags &= ~FAULT_FLAG_ALLOW_RETRY; flags |= FAULT_FLAG_TRIED; if (!fatal_signal_pending(current)) goto retry; @@ -631,21 +625,22 @@ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) switch (TRAP(regs)) { case 0x300: case 0x380: - printk(KERN_ALERT "Unable to handle kernel paging request for " - "data at address 0x%08lx\n", regs->dar); + pr_alert("BUG: %s at 0x%08lx\n", + regs->dar < PAGE_SIZE ? "Kernel NULL pointer dereference" : + "Unable to handle kernel data access", regs->dar); break; case 0x400: case 0x480: - printk(KERN_ALERT "Unable to handle kernel paging request for " - "instruction fetch\n"); + pr_alert("BUG: Unable to handle kernel instruction fetch%s", + regs->nip < PAGE_SIZE ? " (NULL pointer?)\n" : "\n"); break; case 0x600: - printk(KERN_ALERT "Unable to handle kernel paging request for " - "unaligned access at address 0x%08lx\n", regs->dar); + pr_alert("BUG: Unable to handle kernel unaligned access at 0x%08lx\n", + regs->dar); break; default: - printk(KERN_ALERT "Unable to handle kernel paging request for " - "unknown fault\n"); + pr_alert("BUG: Unable to handle unknown paging fault at 0x%08lx\n", + regs->dar); break; } printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n", diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 729f02df8290c4a9730d9b6af21abc0530c48072..42a48c5f7b7f5910490a0ff4ad7aed41b89e9ffd 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -115,6 +115,8 @@ static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is) tlbiel_hash_set_isa300(0, is, 0, 2, 1); asm volatile("ptesync": : :"memory"); + + asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory"); } void hash__tlbiel_all(unsigned int action) @@ -140,8 +142,6 @@ void hash__tlbiel_all(unsigned int action) tlbiel_all_isa206(POWER7_TLB_SETS, is); else WARN(1, "%s called on pre-POWER7 CPU\n", __func__); - - asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory"); } static inline unsigned long ___tlbie(unsigned long vpn, int psize, @@ -201,9 +201,32 @@ static inline unsigned long ___tlbie(unsigned long vpn, int psize, return va; } -static inline void fixup_tlbie(unsigned long vpn, int psize, int apsize, int ssize) +static inline void fixup_tlbie_vpn(unsigned long vpn, int psize, + int apsize, int ssize) { - if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) { + if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) { + /* Radix flush for a hash guest */ + + unsigned long rb,rs,prs,r,ric; + + rb = PPC_BIT(52); /* IS = 2 */ + rs = 0; /* lpid = 0 */ + prs = 0; /* partition scoped */ + r = 1; /* radix format */ + ric = 0; /* RIC_FLSUH_TLB */ + + /* + * Need the extra ptesync to make sure we don't + * re-order the tlbie + */ + asm volatile("ptesync": : :"memory"); + asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) + : : "r"(rb), "i"(r), "i"(prs), + "i"(ric), "r"(rs) : "memory"); + } + + + if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) { /* Need the extra ptesync to ensure we don't reorder tlbie*/ asm volatile("ptesync": : :"memory"); ___tlbie(vpn, psize, apsize, ssize); @@ -287,7 +310,7 @@ static inline void tlbie(unsigned long vpn, int psize, int apsize, asm volatile("ptesync": : :"memory"); } else { __tlbie(vpn, psize, apsize, ssize); - fixup_tlbie(vpn, psize, apsize, ssize); + fixup_tlbie_vpn(vpn, psize, apsize, ssize); asm volatile("eieio; tlbsync; ptesync": : :"memory"); } if (lock_tlbie && !use_local) @@ -860,7 +883,7 @@ static void native_flush_hash_range(unsigned long number, int local) /* * Just do one more with the last used values. */ - fixup_tlbie(vpn, psize, psize, ssize); + fixup_tlbie_vpn(vpn, psize, psize, ssize); asm volatile("eieio; tlbsync; ptesync":::"memory"); if (lock_tlbie) diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index f23a89d8e4ce6c8fecf0816d23b88d621c68428b..8894c8f300eac9ea552b5c558d5899f7adada0f2 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -295,10 +296,18 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, ret = mmu_hash_ops.hpte_insert(hpteg, vpn, paddr, tprot, HPTE_V_BOLTED, psize, psize, ssize); - + if (ret == -1) { + /* Try to remove a non bolted entry */ + ret = mmu_hash_ops.hpte_remove(hpteg); + if (ret != -1) + ret = mmu_hash_ops.hpte_insert(hpteg, vpn, paddr, tprot, + HPTE_V_BOLTED, psize, psize, + ssize); + } if (ret < 0) break; + cond_resched(); #ifdef CONFIG_DEBUG_PAGEALLOC if (debug_pagealloc_enabled() && (paddr >> PAGE_SHIFT) < linear_map_hash_count) @@ -1859,11 +1868,20 @@ void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base, * * For guests on platforms before POWER9, we clamp the it limit to 1G * to avoid some funky things such as RTAS bugs etc... + * + * On POWER9 we limit to 1TB in case the host erroneously told us that + * the RMA was >1TB. Effective address bits 0:23 are treated as zero + * (meaning the access is aliased to zero i.e. addr = addr % 1TB) + * for virtual real mode addressing and so it doesn't make sense to + * have an area larger than 1TB as it can't be addressed. */ if (!early_cpu_has_feature(CPU_FTR_HVMODE)) { ppc64_rma_size = first_memblock_size; if (!early_cpu_has_feature(CPU_FTR_ARCH_300)) ppc64_rma_size = min_t(u64, ppc64_rma_size, 0x40000000); + else + ppc64_rma_size = min_t(u64, ppc64_rma_size, + 1UL << SID_SHIFT_1T); /* Finally limit subsequent allocations */ memblock_set_current_limit(ppc64_rma_size); @@ -1882,10 +1900,16 @@ static int hpt_order_get(void *data, u64 *val) static int hpt_order_set(void *data, u64 val) { + int ret; + if (!mmu_hash_ops.resize_hpt) return -ENODEV; - return mmu_hash_ops.resize_hpt(val); + cpus_read_lock(); + ret = mmu_hash_ops.resize_hpt(val); + cpus_read_unlock(); + + return ret; } DEFINE_SIMPLE_ATTRIBUTE(fops_hpt_order, hpt_order_get, hpt_order_set, "%llu\n"); diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c index 2486bee0f93e33e4a5a119fa90cc1924b056a768..97c7a39ebc009c1aa4e9296db096ac0a581a478f 100644 --- a/arch/powerpc/mm/hugetlbpage-radix.c +++ b/arch/powerpc/mm/hugetlbpage-radix.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include #include +#include #include #include #include @@ -73,7 +74,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, if (addr) { addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); - if (high_limit - len >= addr && + if (high_limit - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -83,7 +84,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, */ info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; - info.low_limit = PAGE_SIZE; + info.low_limit = max(PAGE_SIZE, mmap_min_addr); info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW); info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_offset = 0; diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index e87f9ef9115b41d6a7df4647f5c869f769fc03c0..163b0ef7d156e157a989f3f152e1736d1bea58a5 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -112,6 +113,8 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, for (i = i - 1 ; i >= 0; i--, hpdp--) *hpdp = __hugepd(0); kmem_cache_free(cachep, new); + } else { + kmemleak_ignore(new); } spin_unlock(ptl); return 0; @@ -147,6 +150,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz } else { pdshift = PUD_SHIFT; pu = pud_alloc(mm, pg, addr); + if (!pu) + return NULL; if (pshift == PUD_SHIFT) return (pte_t *)pu; else if (pshift > PMD_SHIFT) { @@ -155,6 +160,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz } else { pdshift = PMD_SHIFT; pm = pmd_alloc(mm, pu, addr); + if (!pm) + return NULL; if (pshift == PMD_SHIFT) /* 16MB hugepage */ return (pte_t *)pm; @@ -171,12 +178,16 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz } else { pdshift = PUD_SHIFT; pu = pud_alloc(mm, pg, addr); + if (!pu) + return NULL; if (pshift >= PUD_SHIFT) { ptl = pud_lockptr(mm, pu); hpdp = (hugepd_t *)pu; } else { pdshift = PMD_SHIFT; pm = pmd_alloc(mm, pu, addr); + if (!pm) + return NULL; ptl = pmd_lockptr(mm, pm); hpdp = (hugepd_t *)pm; } @@ -230,17 +241,22 @@ int __init pseries_alloc_bootmem_huge_page(struct hstate *hstate) m->hstate = hstate; return 1; } + +bool __init hugetlb_node_alloc_supported(void) +{ + return false; +} #endif -int __init alloc_bootmem_huge_page(struct hstate *h) +int __init alloc_bootmem_huge_page(struct hstate *h, int nid) { #ifdef CONFIG_PPC_BOOK3S_64 if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled()) return pseries_alloc_bootmem_huge_page(h); #endif - return __alloc_bootmem_huge_page(h); + return __alloc_bootmem_huge_page(h, nid); } #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx) diff --git a/arch/powerpc/mm/init-common.c b/arch/powerpc/mm/init-common.c index 2b656e67f2eaaa3914cd74d1cd57e36a5060486b..927703af49be23c16fa96024d1b1c2a3a294f6d7 100644 --- a/arch/powerpc/mm/init-common.c +++ b/arch/powerpc/mm/init-common.c @@ -65,7 +65,7 @@ void pgtable_cache_add(unsigned shift, void (*ctor)(void *)) * as to leave enough 0 bits in the address to contain it. */ unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1, HUGEPD_SHIFT_MASK + 1); - struct kmem_cache *new; + struct kmem_cache *new = NULL; /* It would be nice if this was a BUILD_BUG_ON(), but at the * moment, gcc doesn't seem to recognize is_power_of_2 as a @@ -78,7 +78,8 @@ void pgtable_cache_add(unsigned shift, void (*ctor)(void *)) align = max_t(unsigned long, align, minalign); name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift); - new = kmem_cache_create(name, table_size, align, 0, ctor); + if (name) + new = kmem_cache_create(name, table_size, align, 0, ctor); if (!new) panic("Could not allocate pgtable cache for order %d", shift); diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 7a9886f98b0c12e8df43fe0e8a897a7d4cbeac9d..a5091c03474753111f77df8de2910152ee38abb8 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -188,15 +188,20 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node); for (; start < end; start += page_size) { - void *p; + void *p = NULL; int rc; if (vmemmap_populated(start, page_size)) continue; + /* + * Allocate from the altmap first if we have one. This may + * fail due to alignment issues when using 16MB hugepages, so + * fall back to system memory if the altmap allocation fail. + */ if (altmap) p = altmap_alloc_block_buf(page_size, altmap); - else + if (!p) p = vmemmap_alloc_block_buf(page_size, node); if (!p) return -ENOMEM; @@ -255,8 +260,15 @@ void __ref vmemmap_free(unsigned long start, unsigned long end, { unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; unsigned long page_order = get_order(page_size); + unsigned long alt_start = ~0, alt_end = ~0; + unsigned long base_pfn; start = _ALIGN_DOWN(start, page_size); + if (altmap) { + alt_start = altmap->base_pfn; + alt_end = altmap->base_pfn + altmap->reserve + + altmap->free + altmap->alloc + altmap->align; + } pr_debug("vmemmap_free %lx...%lx\n", start, end); @@ -280,8 +292,9 @@ void __ref vmemmap_free(unsigned long start, unsigned long end, page = pfn_to_page(addr >> PAGE_SHIFT); section_base = pfn_to_page(vmemmap_section_start(start)); nr_pages = 1 << page_order; + base_pfn = PHYS_PFN(addr); - if (altmap) { + if (base_pfn >= alt_start && base_pfn < alt_end) { vmem_altmap_free(altmap, nr_pages); } else if (PageReserved(page)) { /* allocated from bootmem */ diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 04ccb274a6205bba58357d5897105ada90f81c0f..84a012e42a7ed51533e8f08f61ba31ade575ce9c 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -118,8 +118,8 @@ int __weak remove_section_mapping(unsigned long start, unsigned long end) return -ENODEV; } -int __meminit arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, - bool want_memblock) +int __ref arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, + bool want_memblock) { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; @@ -139,30 +139,20 @@ int __meminit arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap * return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock); } -#ifdef CONFIG_MEMORY_HOTREMOVE -int __meminit arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) +void __ref arch_remove_memory(int nid, u64 start, u64 size, + struct vmem_altmap *altmap) { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - struct page *page; int ret; - /* - * If we have an altmap then we need to skip over any reserved PFNs - * when querying the zone. - */ - page = pfn_to_page(start_pfn); - if (altmap) - page += vmem_altmap_offset(altmap); - - ret = __remove_pages(page_zone(page), start_pfn, nr_pages, altmap); - if (ret) - return ret; + __remove_pages(start_pfn, nr_pages, altmap); /* Remove htab bolted mappings for this section of memory */ start = (unsigned long)__va(start); flush_inval_dcache_range(start, start + size); ret = remove_section_mapping(start, start + size); + WARN_ON_ONCE(ret); /* Ensure all vmalloc mappings are flushed in case they also * hit that section of memory @@ -170,11 +160,8 @@ int __meminit arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap vm_unmap_aliases(); resize_hpt_for_hotplug(memblock_phys_mem_size()); - - return ret; } #endif -#endif /* CONFIG_MEMORY_HOTPLUG */ /* * walk_memory_resource() needs to make sure there is no holes in a given @@ -344,6 +331,14 @@ void __init mem_init(void) BUILD_BUG_ON(MMU_PAGE_COUNT > 16); #ifdef CONFIG_SWIOTLB + /* + * Some platforms (e.g. 85xx) limit DMA-able memory way below + * 4G. We force memblock to bottom-up mode to ensure that the + * memory allocated in swiotlb_init() is DMA-able. + * As it's the last memblock allocation, no need to reset it + * back to to-down. + */ + memblock_set_bottom_up(true); swiotlb_init(0); #endif diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c index dbd8f762140b6942b32f41a8466cb3bfc7aee840..68984d85ad6b8f184e6c63b606887d700d672a55 100644 --- a/arch/powerpc/mm/mmu_context_book3s64.c +++ b/arch/powerpc/mm/mmu_context_book3s64.c @@ -53,14 +53,48 @@ int hash__alloc_context_id(void) } EXPORT_SYMBOL_GPL(hash__alloc_context_id); +static int realloc_context_ids(mm_context_t *ctx) +{ + int i, id; + + /* + * id 0 (aka. ctx->id) is special, we always allocate a new one, even if + * there wasn't one allocated previously (which happens in the exec + * case where ctx is newly allocated). + * + * We have to be a bit careful here. We must keep the existing ids in + * the array, so that we can test if they're non-zero to decide if we + * need to allocate a new one. However in case of error we must free the + * ids we've allocated but *not* any of the existing ones (or risk a + * UAF). That's why we decrement i at the start of the error handling + * loop, to skip the id that we just tested but couldn't reallocate. + */ + for (i = 0; i < ARRAY_SIZE(ctx->extended_id); i++) { + if (i == 0 || ctx->extended_id[i]) { + id = hash__alloc_context_id(); + if (id < 0) + goto error; + + ctx->extended_id[i] = id; + } + } + + /* The caller expects us to return id */ + return ctx->id; + +error: + for (i--; i >= 0; i--) { + if (ctx->extended_id[i]) + ida_free(&mmu_context_ida, ctx->extended_id[i]); + } + + return id; +} + static int hash__init_new_context(struct mm_struct *mm) { int index; - index = hash__alloc_context_id(); - if (index < 0) - return index; - /* * The old code would re-promote on fork, we don't do that when using * slices as it could cause problem promoting slices that have been @@ -78,6 +112,10 @@ static int hash__init_new_context(struct mm_struct *mm) if (mm->context.id == 0) slice_init_new_context_exec(mm); + index = realloc_context_ids(&mm->context); + if (index < 0) + return index; + subpage_prot_init_new_context(mm); pkey_mm_init(mm); diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c index 56c2234cc6ae70b8c7c27c64897bb3560ad86219..a8f66975bf535489d4b1191e7c16a8cd293319cb 100644 --- a/arch/powerpc/mm/mmu_context_iommu.c +++ b/arch/powerpc/mm/mmu_context_iommu.c @@ -41,31 +41,31 @@ struct mm_iommu_table_group_mem_t { static long mm_iommu_adjust_locked_vm(struct mm_struct *mm, unsigned long npages, bool incr) { - long ret = 0, locked, lock_limit; + long ret = 0, locked, lock_limit, locked_vm; if (!npages) return 0; down_write(&mm->mmap_sem); - + locked_vm = atomic_long_read(&mm->locked_vm); if (incr) { - locked = mm->locked_vm + npages; + locked = locked_vm + npages; lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; if (locked > lock_limit && !capable(CAP_IPC_LOCK)) ret = -ENOMEM; else - mm->locked_vm += npages; + atomic_long_add(npages, &mm->locked_vm); } else { - if (WARN_ON_ONCE(npages > mm->locked_vm)) - npages = mm->locked_vm; - mm->locked_vm -= npages; + if (WARN_ON_ONCE(npages > locked_vm)) + npages = locked_vm; + atomic_long_sub(npages, &mm->locked_vm); } pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n", current ? current->pid : 0, incr ? '+' : '-', npages << PAGE_SHIFT, - mm->locked_vm << PAGE_SHIFT, + atomic_long_read(&mm->locked_vm) << PAGE_SHIFT, rlimit(RLIMIT_MEMLOCK)); up_write(&mm->mmap_sem); diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 055b211b7126694e1a97643542a8013853ce1c17..f473c05e964977bb27638cbaa450d2f39f230a59 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1179,7 +1179,7 @@ static long vphn_get_associativity(unsigned long cpu, switch (rc) { case H_FUNCTION: - printk(KERN_INFO + printk_once(KERN_INFO "VPHN is not supported. Disabling polling...\n"); stop_topology_update(); break; @@ -1461,13 +1461,6 @@ static void reset_topology_timer(void) #ifdef CONFIG_SMP -static void stage_topology_update(int core_id) -{ - cpumask_or(&cpu_associativity_changes_mask, - &cpu_associativity_changes_mask, cpu_sibling_mask(core_id)); - reset_topology_timer(); -} - static int dt_update_callback(struct notifier_block *nb, unsigned long action, void *data) { @@ -1480,7 +1473,7 @@ static int dt_update_callback(struct notifier_block *nb, !of_prop_cmp(update->prop->name, "ibm,associativity")) { u32 core_id; of_property_read_u32(update->dn, "reg", &core_id); - stage_topology_update(core_id); + rc = dlpar_cpu_readd(core_id); rc = NOTIFY_OK; } break; @@ -1502,6 +1495,9 @@ int start_topology_update(void) { int rc = 0; + if (!topology_updates_enabled) + return 0; + if (firmware_has_feature(FW_FEATURE_PRRN)) { if (!prrn_enabled) { prrn_enabled = 1; @@ -1531,6 +1527,9 @@ int stop_topology_update(void) { int rc = 0; + if (!topology_updates_enabled) + return 0; + if (prrn_enabled) { prrn_enabled = 0; #ifdef CONFIG_SMP @@ -1586,11 +1585,13 @@ static ssize_t topology_write(struct file *file, const char __user *buf, kbuf[read_len] = '\0'; - if (!strncmp(kbuf, "on", 2)) + if (!strncmp(kbuf, "on", 2)) { + topology_updates_enabled = true; start_topology_update(); - else if (!strncmp(kbuf, "off", 3)) + } else if (!strncmp(kbuf, "off", 3)) { stop_topology_update(); - else + topology_updates_enabled = false; + } else return -EINVAL; return count; @@ -1605,9 +1606,7 @@ static const struct file_operations topology_ops = { static int topology_update_init(void) { - /* Do not poll for changes if disabled at boot */ - if (topology_updates_enabled) - start_topology_update(); + start_topology_update(); if (vphn_enabled) topology_schedule_update(); diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c index 01d7c0f7c4f0c2a12d15cd09bd66c88609dd235e..297db665d953c4f981076020b60705cb503bf888 100644 --- a/arch/powerpc/mm/pgtable-book3s64.c +++ b/arch/powerpc/mm/pgtable-book3s64.c @@ -477,3 +477,25 @@ void arch_report_meminfo(struct seq_file *m) atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20); } #endif /* CONFIG_PROC_FS */ + +/* + * For hash translation mode, we use the deposited table to store hash slot + * information and they are stored at PTRS_PER_PMD offset from related pmd + * location. Hence a pmd move requires deposit and withdraw. + * + * For radix translation with split pmd ptl, we store the deposited table in the + * pmd page. Hence if we have different pmd page we need to withdraw during pmd + * move. + * + * With hash we use deposited table always irrespective of anon or not. + * With radix we use deposited table only for anonymous mapping. + */ +int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, + struct spinlock *old_pmd_ptl, + struct vm_area_struct *vma) +{ + if (radix_enabled()) + return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma); + + return true; +} diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index c879979faa73b766cc1336bfacf0d3b6c65db5ea..4001faf6edaebc19001717aa09d9bc8ecb621ffd 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -115,7 +115,7 @@ static int early_map_kernel_page(unsigned long ea, unsigned long pa, set_the_pte: set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags)); - smp_wmb(); + asm volatile("ptesync": : :"memory"); return 0; } @@ -169,7 +169,7 @@ static int __map_kernel_page(unsigned long ea, unsigned long pa, set_the_pte: set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags)); - smp_wmb(); + asm volatile("ptesync": : :"memory"); return 0; } @@ -294,15 +294,15 @@ static int __meminit create_physical_mapping(unsigned long start, } if (split_text_mapping && (mapping_size == PUD_SIZE) && - (addr <= __pa_symbol(__init_begin)) && - (addr + mapping_size) >= __pa_symbol(_stext)) { + (addr < __pa_symbol(__init_begin)) && + (addr + mapping_size) > __pa_symbol(__init_begin)) { max_mapping_size = PMD_SIZE; goto retry; } if (split_text_mapping && (mapping_size == PMD_SIZE) && - (addr <= __pa_symbol(__init_begin)) && - (addr + mapping_size) >= __pa_symbol(_stext)) { + (addr < __pa_symbol(__init_begin)) && + (addr + mapping_size) > __pa_symbol(__init_begin)) { mapping_size = PAGE_SIZE; psize = mmu_virtual_psize; } @@ -521,14 +521,6 @@ void __init radix__early_init_devtree(void) mmu_psize_defs[MMU_PAGE_64K].shift = 16; mmu_psize_defs[MMU_PAGE_64K].ap = 0x5; found: -#ifdef CONFIG_SPARSEMEM_VMEMMAP - if (mmu_psize_defs[MMU_PAGE_2M].shift) { - /* - * map vmemmap using 2M if available - */ - mmu_vmemmap_psize = MMU_PAGE_2M; - } -#endif /* CONFIG_SPARSEMEM_VMEMMAP */ return; } @@ -567,7 +559,13 @@ void __init radix__early_init_mmu(void) #ifdef CONFIG_SPARSEMEM_VMEMMAP /* vmemmap mapping */ - mmu_vmemmap_psize = mmu_virtual_psize; + if (mmu_psize_defs[MMU_PAGE_2M].shift) { + /* + * map vmemmap using 2M if available + */ + mmu_vmemmap_psize = MMU_PAGE_2M; + } else + mmu_vmemmap_psize = mmu_virtual_psize; #endif /* * initialize page table size diff --git a/arch/powerpc/mm/pkeys.c b/arch/powerpc/mm/pkeys.c index b271b283c785e3a07589ea81c6b8e40e7def5a69..25a8dd9cd71dbbae59a8768e86febe5ea22e93df 100644 --- a/arch/powerpc/mm/pkeys.c +++ b/arch/powerpc/mm/pkeys.c @@ -414,3 +414,13 @@ bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write, return pkey_access_permitted(vma_pkey(vma), write, execute); } + +void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm) +{ + if (static_branch_likely(&pkey_disabled)) + return; + + /* Duplicate the oldmm pkey state in mm: */ + mm_pkey_allocation_map(mm) = mm_pkey_allocation_map(oldmm); + mm->context.execute_only_pkey = oldmm->context.execute_only_pkey; +} diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c index bea6c544e38f94c1ede88190013775b1b8413ebb..06783270a12428fdb73b42bfd5f693be2903c367 100644 --- a/arch/powerpc/mm/ppc_mmu_32.c +++ b/arch/powerpc/mm/ppc_mmu_32.c @@ -52,7 +52,7 @@ struct batrange { /* stores address ranges mapped by BATs */ phys_addr_t v_block_mapped(unsigned long va) { int b; - for (b = 0; b < 4; ++b) + for (b = 0; b < ARRAY_SIZE(bat_addrs); ++b) if (va >= bat_addrs[b].start && va < bat_addrs[b].limit) return bat_addrs[b].phys + (va - bat_addrs[b].start); return 0; @@ -64,7 +64,7 @@ phys_addr_t v_block_mapped(unsigned long va) unsigned long p_block_mapped(phys_addr_t pa) { int b; - for (b = 0; b < 4; ++b) + for (b = 0; b < ARRAY_SIZE(bat_addrs); ++b) if (pa >= bat_addrs[b].phys && pa < (bat_addrs[b].limit-bat_addrs[b].start) +bat_addrs[b].phys) diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 9f574e59d178618af5ba5056f13241f0776f9852..2f162c6e52d4f1632e53e2cee263b3b007e8b838 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -355,7 +355,7 @@ void slb_initialize(void) #endif } - get_paca()->stab_rr = SLB_NUM_BOLTED; + get_paca()->stab_rr = SLB_NUM_BOLTED - 1; lflags = SLB_VSID_KERNEL | linear_llp; vflags = SLB_VSID_KERNEL | vmalloc_llp; diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 205fe557ca109dda9a16e3fd9a8fe0f75909bc25..53e9b58e83c2c4aebd10e61b8e6c8589c0274406 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -61,6 +62,13 @@ static void slice_print_mask(const char *label, const struct slice_mask *mask) { #endif +static inline bool slice_addr_is_low(unsigned long addr) +{ + u64 tmp = (u64)addr; + + return tmp < SLICE_LOW_TOP; +} + static void slice_range_to_mask(unsigned long start, unsigned long len, struct slice_mask *ret) { @@ -70,7 +78,7 @@ static void slice_range_to_mask(unsigned long start, unsigned long len, if (SLICE_NUM_HIGH) bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); - if (start < SLICE_LOW_TOP) { + if (slice_addr_is_low(start)) { unsigned long mend = min(end, (unsigned long)(SLICE_LOW_TOP - 1)); @@ -78,7 +86,7 @@ static void slice_range_to_mask(unsigned long start, unsigned long len, - (1u << GET_LOW_SLICE_INDEX(start)); } - if ((start + len) > SLICE_LOW_TOP) { + if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) { unsigned long start_index = GET_HIGH_SLICE_INDEX(start); unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT)); unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index; @@ -133,7 +141,7 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret, if (!slice_low_has_vma(mm, i)) ret->low_slices |= 1u << i; - if (high_limit <= SLICE_LOW_TOP) + if (slice_addr_is_low(high_limit - 1)) return; for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++) @@ -182,7 +190,7 @@ static bool slice_check_range_fits(struct mm_struct *mm, unsigned long end = start + len - 1; u64 low_slices = 0; - if (start < SLICE_LOW_TOP) { + if (slice_addr_is_low(start)) { unsigned long mend = min(end, (unsigned long)(SLICE_LOW_TOP - 1)); @@ -192,7 +200,7 @@ static bool slice_check_range_fits(struct mm_struct *mm, if ((low_slices & available->low_slices) != low_slices) return false; - if (SLICE_NUM_HIGH && ((start + len) > SLICE_LOW_TOP)) { + if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) { unsigned long start_index = GET_HIGH_SLICE_INDEX(start); unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT)); unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index; @@ -303,7 +311,7 @@ static bool slice_scan_available(unsigned long addr, int end, unsigned long *boundary_addr) { unsigned long slice; - if (addr < SLICE_LOW_TOP) { + if (slice_addr_is_low(addr)) { slice = GET_LOW_SLICE_INDEX(addr); *boundary_addr = (slice + end) << SLICE_LOW_SHIFT; return !!(available->low_slices & (1u << slice)); @@ -369,6 +377,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm, int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); unsigned long addr, found, prev; struct vm_unmapped_area_info info; + unsigned long min_addr = max(PAGE_SIZE, mmap_min_addr); info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; @@ -385,7 +394,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm, if (high_limit > DEFAULT_MAP_WINDOW) addr += mm->context.slb_addr_limit - DEFAULT_MAP_WINDOW; - while (addr > PAGE_SIZE) { + while (addr > min_addr) { info.high_limit = addr; if (!slice_scan_available(addr - 1, available, 0, &addr)) continue; @@ -397,8 +406,8 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm, * Check if we need to reduce the range, or if we can * extend it to cover the previous available slice. */ - if (addr < PAGE_SIZE) - addr = PAGE_SIZE; + if (addr < min_addr) + addr = min_addr; else if (slice_scan_available(addr - 1, available, 0, &prev)) { addr = prev; goto prev_slice; @@ -520,7 +529,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, addr = _ALIGN_UP(addr, page_size); slice_dbg(" aligned addr=%lx\n", addr); /* Ignore hint if it's too large or overlaps a VMA */ - if (addr > high_limit - len || + if (addr > high_limit - len || addr < mmap_min_addr || !slice_area_is_free(mm, addr, len)) addr = 0; } @@ -706,7 +715,7 @@ unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr) VM_BUG_ON(radix_enabled()); - if (addr < SLICE_LOW_TOP) { + if (slice_addr_is_low(addr)) { psizes = mm->context.low_slices_psize; index = GET_LOW_SLICE_INDEX(addr); } else { diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c index 3327551c8b47ceb693e40b4da4f7a068a7d63fcc..5e4178790deef77d7edebd8601113c4c96829e9c 100644 --- a/arch/powerpc/mm/subpage-prot.c +++ b/arch/powerpc/mm/subpage-prot.c @@ -214,7 +214,7 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr, return 0; } - if (!access_ok(VERIFY_READ, map, (len >> PAGE_SHIFT) * sizeof(u32))) + if (!access_ok(map, (len >> PAGE_SHIFT) * sizeof(u32))) return -EFAULT; down_write(&mm->mmap_sem); diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c index fef3e1eb3a1998158287884cc08bbe0736cbbc30..1749f15fc0705714996d07d3b102f88e4258f7c3 100644 --- a/arch/powerpc/mm/tlb-radix.c +++ b/arch/powerpc/mm/tlb-radix.c @@ -215,22 +215,83 @@ static inline void __tlbie_lpid_va(unsigned long va, unsigned long lpid, trace_tlbie(lpid, 0, rb, rs, ric, prs, r); } -static inline void fixup_tlbie(void) + +static inline void fixup_tlbie_va(unsigned long va, unsigned long pid, + unsigned long ap) +{ + if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) { + asm volatile("ptesync": : :"memory"); + __tlbie_va(va, 0, ap, RIC_FLUSH_TLB); + } + + if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) { + asm volatile("ptesync": : :"memory"); + __tlbie_va(va, pid, ap, RIC_FLUSH_TLB); + } +} + +static inline void fixup_tlbie_va_range(unsigned long va, unsigned long pid, + unsigned long ap) { - unsigned long pid = 0; + if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) { + asm volatile("ptesync": : :"memory"); + __tlbie_pid(0, RIC_FLUSH_TLB); + } + + if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) { + asm volatile("ptesync": : :"memory"); + __tlbie_va(va, pid, ap, RIC_FLUSH_TLB); + } +} + +static inline void fixup_tlbie_pid(unsigned long pid) +{ + /* + * We can use any address for the invalidation, pick one which is + * probably unused as an optimisation. + */ unsigned long va = ((1UL << 52) - 1); - if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) { + if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) { + asm volatile("ptesync": : :"memory"); + __tlbie_pid(0, RIC_FLUSH_TLB); + } + + if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) { asm volatile("ptesync": : :"memory"); __tlbie_va(va, pid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB); } } + +static inline void fixup_tlbie_lpid_va(unsigned long va, unsigned long lpid, + unsigned long ap) +{ + if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) { + asm volatile("ptesync": : :"memory"); + __tlbie_lpid_va(va, 0, ap, RIC_FLUSH_TLB); + } + + if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) { + asm volatile("ptesync": : :"memory"); + __tlbie_lpid_va(va, lpid, ap, RIC_FLUSH_TLB); + } +} + static inline void fixup_tlbie_lpid(unsigned long lpid) { + /* + * We can use any address for the invalidation, pick one which is + * probably unused as an optimisation. + */ unsigned long va = ((1UL << 52) - 1); - if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) { + if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) { + asm volatile("ptesync": : :"memory"); + __tlbie_lpid(0, RIC_FLUSH_TLB); + } + + if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) { asm volatile("ptesync": : :"memory"); __tlbie_lpid_va(va, lpid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB); } @@ -277,6 +338,7 @@ static inline void _tlbie_pid(unsigned long pid, unsigned long ric) switch (ric) { case RIC_FLUSH_TLB: __tlbie_pid(pid, RIC_FLUSH_TLB); + fixup_tlbie_pid(pid); break; case RIC_FLUSH_PWC: __tlbie_pid(pid, RIC_FLUSH_PWC); @@ -284,8 +346,8 @@ static inline void _tlbie_pid(unsigned long pid, unsigned long ric) case RIC_FLUSH_ALL: default: __tlbie_pid(pid, RIC_FLUSH_ALL); + fixup_tlbie_pid(pid); } - fixup_tlbie(); asm volatile("eieio; tlbsync; ptesync": : :"memory"); } @@ -329,6 +391,7 @@ static inline void _tlbie_lpid(unsigned long lpid, unsigned long ric) switch (ric) { case RIC_FLUSH_TLB: __tlbie_lpid(lpid, RIC_FLUSH_TLB); + fixup_tlbie_lpid(lpid); break; case RIC_FLUSH_PWC: __tlbie_lpid(lpid, RIC_FLUSH_PWC); @@ -336,8 +399,8 @@ static inline void _tlbie_lpid(unsigned long lpid, unsigned long ric) case RIC_FLUSH_ALL: default: __tlbie_lpid(lpid, RIC_FLUSH_ALL); + fixup_tlbie_lpid(lpid); } - fixup_tlbie_lpid(lpid); asm volatile("eieio; tlbsync; ptesync": : :"memory"); } @@ -366,6 +429,7 @@ static inline void _tlbiel_lpid_guest(unsigned long lpid, unsigned long ric) __tlbiel_lpid_guest(lpid, set, RIC_FLUSH_TLB); asm volatile("ptesync": : :"memory"); + asm volatile(PPC_INVALIDATE_ERAT : : :"memory"); } @@ -410,6 +474,8 @@ static inline void __tlbie_va_range(unsigned long start, unsigned long end, for (addr = start; addr < end; addr += page_size) __tlbie_va(addr, pid, ap, RIC_FLUSH_TLB); + + fixup_tlbie_va_range(addr - page_size, pid, ap); } static inline void _tlbie_va(unsigned long va, unsigned long pid, @@ -419,7 +485,7 @@ static inline void _tlbie_va(unsigned long va, unsigned long pid, asm volatile("ptesync": : :"memory"); __tlbie_va(va, pid, ap, ric); - fixup_tlbie(); + fixup_tlbie_va(va, pid, ap); asm volatile("eieio; tlbsync; ptesync": : :"memory"); } @@ -430,7 +496,7 @@ static inline void _tlbie_lpid_va(unsigned long va, unsigned long lpid, asm volatile("ptesync": : :"memory"); __tlbie_lpid_va(va, lpid, ap, ric); - fixup_tlbie_lpid(lpid); + fixup_tlbie_lpid_va(va, lpid, ap); asm volatile("eieio; tlbsync; ptesync": : :"memory"); } @@ -442,7 +508,6 @@ static inline void _tlbie_va_range(unsigned long start, unsigned long end, if (also_pwc) __tlbie_pid(pid, RIC_FLUSH_PWC); __tlbie_va_range(start, end, pid, page_size, psize); - fixup_tlbie(); asm volatile("eieio; tlbsync; ptesync": : :"memory"); } @@ -773,7 +838,7 @@ static inline void __radix__flush_tlb_range(struct mm_struct *mm, if (gflush) __tlbie_va_range(gstart, gend, pid, PUD_SIZE, MMU_PAGE_1G); - fixup_tlbie(); + asm volatile("eieio; tlbsync; ptesync": : :"memory"); } } @@ -1007,7 +1072,6 @@ void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr) goto local; } _tlbie_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true); - goto local; } else { local: _tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true); diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S index 7fd20c52a8ec8e2c5762bf5569cc9dd4def83724..9ed90064f54292ea5774841b4d13b9276dc171ac 100644 --- a/arch/powerpc/mm/tlb_low_64e.S +++ b/arch/powerpc/mm/tlb_low_64e.S @@ -70,6 +70,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) std r15,EX_TLB_R15(r12) std r10,EX_TLB_CR(r12) #ifdef CONFIG_PPC_FSL_BOOK3E +START_BTB_FLUSH_SECTION + mfspr r11, SPRN_SRR1 + andi. r10,r11,MSR_PR + beq 1f + BTB_FLUSH(r10) +1: +END_BTB_FLUSH_SECTION std r7,EX_TLB_R7(r12) #endif TLB_MISS_PROLOG_STATS diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index 15fe5f0c8665b0774e2d9e76f487d9a6d9b5978c..ae5d568e267f681d43367b19e9b21f7307debfe3 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c @@ -503,6 +503,9 @@ static void setup_page_sizes(void) for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { struct mmu_psize_def *def = &mmu_psize_defs[psize]; + if (!def->shift) + continue; + if (tlb1ps & (1U << (def->shift - 10))) { def->flags |= MMU_PAGE_SIZE_DIRECT; diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index 47fc6660845d3735545efc718ae013ee3c1609a8..e5c1d30ee968b49ab17bca8724e2d6b92066b339 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -51,6 +51,8 @@ #define PPC_LIS(r, i) PPC_ADDIS(r, 0, i) #define PPC_STD(r, base, i) EMIT(PPC_INST_STD | ___PPC_RS(r) | \ ___PPC_RA(base) | ((i) & 0xfffc)) +#define PPC_STDX(r, base, b) EMIT(PPC_INST_STDX | ___PPC_RS(r) | \ + ___PPC_RA(base) | ___PPC_RB(b)) #define PPC_STDU(r, base, i) EMIT(PPC_INST_STDU | ___PPC_RS(r) | \ ___PPC_RA(base) | ((i) & 0xfffc)) #define PPC_STW(r, base, i) EMIT(PPC_INST_STW | ___PPC_RS(r) | \ @@ -65,7 +67,9 @@ #define PPC_LBZ(r, base, i) EMIT(PPC_INST_LBZ | ___PPC_RT(r) | \ ___PPC_RA(base) | IMM_L(i)) #define PPC_LD(r, base, i) EMIT(PPC_INST_LD | ___PPC_RT(r) | \ - ___PPC_RA(base) | IMM_L(i)) + ___PPC_RA(base) | ((i) & 0xfffc)) +#define PPC_LDX(r, base, b) EMIT(PPC_INST_LDX | ___PPC_RT(r) | \ + ___PPC_RA(base) | ___PPC_RB(b)) #define PPC_LWZ(r, base, i) EMIT(PPC_INST_LWZ | ___PPC_RT(r) | \ ___PPC_RA(base) | IMM_L(i)) #define PPC_LHZ(r, base, i) EMIT(PPC_INST_LHZ | ___PPC_RT(r) | \ @@ -85,17 +89,6 @@ ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_BPF_STDCX(s, a, b) EMIT(PPC_INST_STDCX | ___PPC_RS(s) | \ ___PPC_RA(a) | ___PPC_RB(b)) - -#ifdef CONFIG_PPC64 -#define PPC_BPF_LL(r, base, i) do { PPC_LD(r, base, i); } while(0) -#define PPC_BPF_STL(r, base, i) do { PPC_STD(r, base, i); } while(0) -#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0) -#else -#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0) -#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0) -#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0) -#endif - #define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i)) #define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i)) #define PPC_CMPW(a, b) EMIT(PPC_INST_CMPW | ___PPC_RA(a) | \ @@ -123,7 +116,7 @@ ___PPC_RA(a) | IMM_L(i)) #define PPC_DIVWU(d, a, b) EMIT(PPC_INST_DIVWU | ___PPC_RT(d) | \ ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_DIVD(d, a, b) EMIT(PPC_INST_DIVD | ___PPC_RT(d) | \ +#define PPC_DIVDU(d, a, b) EMIT(PPC_INST_DIVDU | ___PPC_RT(d) | \ ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_AND(d, a, b) EMIT(PPC_INST_AND | ___PPC_RA(d) | \ ___PPC_RS(a) | ___PPC_RB(b)) diff --git a/arch/powerpc/net/bpf_jit32.h b/arch/powerpc/net/bpf_jit32.h index 6f4daacad296240c1892e30af7456391541f2659..ade04547703fa4ba0035a80444f4b9a4624f3b36 100644 --- a/arch/powerpc/net/bpf_jit32.h +++ b/arch/powerpc/net/bpf_jit32.h @@ -123,6 +123,10 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh); #define PPC_NTOHS_OFFS(r, base, i) PPC_LHZ_OFFS(r, base, i) #endif +#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0) +#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0) +#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0) + #define SEEN_DATAREF 0x10000 /* might call external helpers */ #define SEEN_XREG 0x20000 /* X reg is used */ #define SEEN_MEM 0x40000 /* SEEN_MEM+(1<idx; @@ -226,7 +252,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 * if (tail_call_cnt > MAX_TAIL_CALL_CNT) * goto out; */ - PPC_LD(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx)); + PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx)); PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT); PPC_BCC(COND_GT, out); @@ -239,7 +265,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 /* prog = array->ptrs[index]; */ PPC_MULI(b2p[TMP_REG_1], b2p_index, 8); PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array); - PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs)); + PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs)); /* * if (prog == NULL) @@ -249,7 +275,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 PPC_BCC(COND_EQ, out); /* goto *(prog->bpf_func + prologue_size); */ - PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func)); + PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func)); #ifdef PPC64_ELF_ABI_v1 /* skip past the function descriptor */ PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], @@ -273,7 +299,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, { const struct bpf_insn *insn = fp->insnsi; int flen = fp->len; - int i; + int i, ret; /* Start of epilogue code - will only be valid 2nd pass onwards */ u32 exit_addr = addrs[flen]; @@ -284,8 +310,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 src_reg = b2p[insn[i].src_reg]; s16 off = insn[i].off; s32 imm = insn[i].imm; + bool func_addr_fixed; + u64 func_addr; u64 imm64; - u8 *func; u32 true_cond; u32 tmp_idx; @@ -372,12 +399,12 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */ case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */ if (BPF_OP(code) == BPF_MOD) { - PPC_DIVD(b2p[TMP_REG_1], dst_reg, src_reg); + PPC_DIVDU(b2p[TMP_REG_1], dst_reg, src_reg); PPC_MULD(b2p[TMP_REG_1], src_reg, b2p[TMP_REG_1]); PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]); } else - PPC_DIVD(dst_reg, dst_reg, src_reg); + PPC_DIVDU(dst_reg, dst_reg, src_reg); break; case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */ case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */ @@ -405,7 +432,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, break; case BPF_ALU64: if (BPF_OP(code) == BPF_MOD) { - PPC_DIVD(b2p[TMP_REG_2], dst_reg, + PPC_DIVDU(b2p[TMP_REG_2], dst_reg, b2p[TMP_REG_1]); PPC_MULD(b2p[TMP_REG_1], b2p[TMP_REG_1], @@ -413,7 +440,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]); } else - PPC_DIVD(dst_reg, dst_reg, + PPC_DIVDU(dst_reg, dst_reg, b2p[TMP_REG_1]); break; } @@ -565,17 +592,21 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, PPC_MR(dst_reg, b2p[TMP_REG_1]); break; case 64: - /* - * Way easier and faster(?) to store the value - * into stack and then use ldbrx - * - * ctx->seen will be reliable in pass2, but - * the instructions generated will remain the - * same across all passes - */ - PPC_STD(dst_reg, 1, bpf_jit_stack_local(ctx)); - PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx)); - PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]); + /* Store the value to stack and then use byte-reverse loads */ + PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx)); + EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx))); + if (cpu_has_feature(CPU_FTR_ARCH_206)) { + EMIT(PPC_RAW_LDBRX(dst_reg, 0, b2p[TMP_REG_1])); + } else { + EMIT(PPC_RAW_LWBRX(dst_reg, 0, b2p[TMP_REG_1])); + if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN)) + EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32)); + EMIT(PPC_RAW_LI(b2p[TMP_REG_2], 4)); + EMIT(PPC_RAW_LWBRX(b2p[TMP_REG_2], b2p[TMP_REG_2], b2p[TMP_REG_1])); + if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + EMIT(PPC_RAW_SLDI(b2p[TMP_REG_2], b2p[TMP_REG_2], 32)); + EMIT(PPC_RAW_OR(dst_reg, dst_reg, b2p[TMP_REG_2])); + } break; } break; @@ -596,6 +627,12 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, } break; + /* + * BPF_ST NOSPEC (speculation barrier) + */ + case BPF_ST | BPF_NOSPEC: + break; + /* * BPF_ST(X) */ @@ -629,7 +666,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, PPC_LI32(b2p[TMP_REG_1], imm); src_reg = b2p[TMP_REG_1]; } - PPC_STD(src_reg, dst_reg, off); + PPC_BPF_STL(src_reg, dst_reg, off); break; /* @@ -676,7 +713,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, break; /* dst = *(u64 *)(ul) (src + off) */ case BPF_LDX | BPF_MEM | BPF_DW: - PPC_LD(dst_reg, src_reg, off); + PPC_BPF_LL(dst_reg, src_reg, off); break; /* @@ -711,23 +748,15 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, case BPF_JMP | BPF_CALL: ctx->seen |= SEEN_FUNC; - /* bpf function call */ - if (insn[i].src_reg == BPF_PSEUDO_CALL) - if (!extra_pass) - func = NULL; - else if (fp->aux->func && off < fp->aux->func_cnt) - /* use the subprog id from the off - * field to lookup the callee address - */ - func = (u8 *) fp->aux->func[off]->bpf_func; - else - return -EINVAL; - /* kernel helper call */ - else - func = (u8 *) __bpf_call_base + imm; - - bpf_jit_emit_func_call(image, ctx, (u64)func); + ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass, + &func_addr, &func_addr_fixed); + if (ret < 0) + return ret; + if (func_addr_fixed) + bpf_jit_emit_func_call_hlp(image, ctx, func_addr); + else + bpf_jit_emit_func_call_rel(image, ctx, func_addr); /* move return value from r3 to BPF_REG_0 */ PPC_MR(b2p[BPF_REG_0], 3); break; @@ -949,6 +978,19 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) goto out_addrs; } + /* + * If we have seen a tail call, we need a second pass. + * This is because bpf_jit_emit_common_epilogue() is called + * from bpf_jit_emit_tail_call() with a not yet stable ctx->seen. + */ + if (cgctx.seen & SEEN_TAILCALL) { + cgctx.idx = 0; + if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) { + fp = org_fp; + goto out_addrs; + } + } + /* * Pretend to build prologue, given the features we've seen. This will * update ctgtx.idx as it pretends to output instructions, then we can diff --git a/arch/powerpc/oprofile/backtrace.c b/arch/powerpc/oprofile/backtrace.c index ad054dd0d6666332b68476d51e2f4ec49637d800..b196e3400d060285627ebe6c98ba2d3c41249cd0 100644 --- a/arch/powerpc/oprofile/backtrace.c +++ b/arch/powerpc/oprofile/backtrace.c @@ -31,7 +31,7 @@ static unsigned int user_getsp32(unsigned int sp, int is_first) unsigned int stack_frame[2]; void __user *p = compat_ptr(sp); - if (!access_ok(VERIFY_READ, p, sizeof(stack_frame))) + if (!access_ok(p, sizeof(stack_frame))) return 0; /* @@ -57,7 +57,7 @@ static unsigned long user_getsp64(unsigned long sp, int is_first) { unsigned long stack_frame[3]; - if (!access_ok(VERIFY_READ, (void __user *)sp, sizeof(stack_frame))) + if (!access_ok((void __user *)sp, sizeof(stack_frame))) return 0; if (__copy_from_user_inatomic(stack_frame, (void __user *)sp, diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 81f8a0c838ae3899ae832f5bdd8e3527c2c55352..4004dbdab9c7ba30d0c79d819baa8ef195b3baf6 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -1827,6 +1827,7 @@ static int power_pmu_event_init(struct perf_event *event) int n; int err; struct cpu_hw_events *cpuhw; + u64 bhrb_filter; if (!ppmu) return -ENOENT; @@ -1932,13 +1933,14 @@ static int power_pmu_event_init(struct perf_event *event) err = power_check_constraints(cpuhw, events, cflags, n + 1); if (has_branch_stack(event)) { - cpuhw->bhrb_filter = ppmu->bhrb_filter_map( + bhrb_filter = ppmu->bhrb_filter_map( event->attr.branch_sample_type); - if (cpuhw->bhrb_filter == -1) { + if (bhrb_filter == -1) { put_cpu_var(cpu_hw_events); return -EOPNOTSUPP; } + cpuhw->bhrb_filter = bhrb_filter; } put_cpu_var(cpu_hw_events); diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c index 1fafc32b12a0f6bd0a5cbd054d7d2d279fc6a74f..65ee4fe863b2694d968272287663bc7df1af3550 100644 --- a/arch/powerpc/perf/imc-pmu.c +++ b/arch/powerpc/perf/imc-pmu.c @@ -261,6 +261,8 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu) attr_group->attrs = attrs; do { ev_val_str = kasprintf(GFP_KERNEL, "event=0x%x", pmu->events[i].value); + if (!ev_val_str) + continue; dev_str = device_str_attr_create(pmu->events[i].name, ev_val_str); if (!dev_str) continue; @@ -268,6 +270,8 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu) attrs[j++] = dev_str; if (pmu->events[i].scale) { ev_scale_str = kasprintf(GFP_KERNEL, "%s.scale", pmu->events[i].name); + if (!ev_scale_str) + continue; dev_str = device_str_attr_create(ev_scale_str, pmu->events[i].scale); if (!dev_str) continue; @@ -277,6 +281,8 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu) if (pmu->events[i].unit) { ev_unit_str = kasprintf(GFP_KERNEL, "%s.unit", pmu->events[i].name); + if (!ev_unit_str) + continue; dev_str = device_str_attr_create(ev_unit_str, pmu->events[i].unit); if (!dev_str) continue; @@ -496,6 +502,11 @@ static int nest_imc_event_init(struct perf_event *event) * Get the base memory addresss for this cpu. */ chip_id = cpu_to_chip_id(event->cpu); + + /* Return, if chip_id is not valid */ + if (chip_id < 0) + return -ENODEV; + pcni = pmu->mem_info; do { if (pcni->id == chip_id) { @@ -503,7 +514,7 @@ static int nest_imc_event_init(struct perf_event *event) break; } pcni++; - } while (pcni); + } while (pcni->vbase != 0); if (!flag) return -ENODEV; diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c index 177de814286fca3006aad90bee1199f899c3387d..053b8e9aa9e756124d3e25ed1cc5c4595a68b2ad 100644 --- a/arch/powerpc/perf/isa207-common.c +++ b/arch/powerpc/perf/isa207-common.c @@ -148,6 +148,14 @@ static bool is_thresh_cmp_valid(u64 event) return true; } +static unsigned int dc_ic_rld_quad_l1_sel(u64 event) +{ + unsigned int cache; + + cache = (event >> EVENT_CACHE_SEL_SHIFT) & MMCR1_DC_IC_QUAL_MASK; + return cache; +} + static inline u64 isa207_find_source(u64 idx, u32 sub_idx) { u64 ret = PERF_MEM_NA; @@ -226,8 +234,13 @@ void isa207_get_mem_weight(u64 *weight) u64 mmcra = mfspr(SPRN_MMCRA); u64 exp = MMCRA_THR_CTR_EXP(mmcra); u64 mantissa = MMCRA_THR_CTR_MANT(mmcra); + u64 sier = mfspr(SPRN_SIER); + u64 val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT; - *weight = mantissa << (2 * exp); + if (val == 0 || val == 7) + *weight = 0; + else + *weight = mantissa << (2 * exp); } int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) @@ -283,10 +296,10 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) * have a cache selector of zero. The bank selector (bit 3) is * irrelevant, as long as the rest of the value is 0. */ - if (cache & 0x7) + if (!cpu_has_feature(CPU_FTR_ARCH_300) && (cache & 0x7)) return -1; - } else if (event & EVENT_IS_L1) { + } else if (cpu_has_feature(CPU_FTR_ARCH_300) || (event & EVENT_IS_L1)) { mask |= CNST_L1_QUAL_MASK; value |= CNST_L1_QUAL_VAL(cache); } @@ -389,11 +402,14 @@ int isa207_compute_mmcr(u64 event[], int n_ev, /* In continuous sampling mode, update SDAR on TLB miss */ mmcra_sdar_mode(event[i], &mmcra); - if (event[i] & EVENT_IS_L1) { - cache = event[i] >> EVENT_CACHE_SEL_SHIFT; - mmcr1 |= (cache & 1) << MMCR1_IC_QUAL_SHIFT; - cache >>= 1; - mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT; + if (cpu_has_feature(CPU_FTR_ARCH_300)) { + cache = dc_ic_rld_quad_l1_sel(event[i]); + mmcr1 |= (cache) << MMCR1_DC_IC_QUAL_SHIFT; + } else { + if (event[i] & EVENT_IS_L1) { + cache = dc_ic_rld_quad_l1_sel(event[i]); + mmcr1 |= (cache) << MMCR1_DC_IC_QUAL_SHIFT; + } } if (is_event_marked(event[i])) { diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h index 0028f4b9490dba671b0e87e72fdf6fb0781f90a0..e5a621699a6d84cb1c24e145d0f9c49c34f3cf4a 100644 --- a/arch/powerpc/perf/isa207-common.h +++ b/arch/powerpc/perf/isa207-common.h @@ -163,8 +163,8 @@ #define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1)) #define MMCR1_PMCSEL_SHIFT(pmc) (24 - (((pmc) - 1)) * 8) #define MMCR1_FAB_SHIFT 36 -#define MMCR1_DC_QUAL_SHIFT 47 -#define MMCR1_IC_QUAL_SHIFT 46 +#define MMCR1_DC_IC_QUAL_MASK 0x3 +#define MMCR1_DC_IC_QUAL_SHIFT 46 /* MMCR1 Combine bits macro for power9 */ #define p9_MMCR1_COMBINE_SHIFT(pmc) (38 - ((pmc - 1) * 2)) diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c index d12a2db2635353c38fd27510512dff89da80ea28..d10feef93b6bc1daaf0c7f6d0fefb796b58d01e3 100644 --- a/arch/powerpc/perf/power8-pmu.c +++ b/arch/powerpc/perf/power8-pmu.c @@ -29,6 +29,7 @@ enum { #define POWER8_MMCRA_IFM1 0x0000000040000000UL #define POWER8_MMCRA_IFM2 0x0000000080000000UL #define POWER8_MMCRA_IFM3 0x00000000C0000000UL +#define POWER8_MMCRA_BHRB_MASK 0x00000000C0000000UL /* * Raw event encoding for PowerISA v2.07 (Power8): @@ -243,6 +244,8 @@ static u64 power8_bhrb_filter_map(u64 branch_sample_type) static void power8_config_bhrb(u64 pmu_bhrb_filter) { + pmu_bhrb_filter &= POWER8_MMCRA_BHRB_MASK; + /* Enable BHRB filter in PMU */ mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter)); } diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c index e012b1030a5b186ae797cc8516934096b0b45ddb..c07b1615ee39d3131dcbea6edd9fd5d407a4961d 100644 --- a/arch/powerpc/perf/power9-pmu.c +++ b/arch/powerpc/perf/power9-pmu.c @@ -100,6 +100,7 @@ enum { #define POWER9_MMCRA_IFM1 0x0000000040000000UL #define POWER9_MMCRA_IFM2 0x0000000080000000UL #define POWER9_MMCRA_IFM3 0x00000000C0000000UL +#define POWER9_MMCRA_BHRB_MASK 0x00000000C0000000UL /* Nasty Power9 specific hack */ #define PVR_POWER9_CUMULUS 0x00002000 @@ -308,6 +309,8 @@ static u64 power9_bhrb_filter_map(u64 branch_sample_type) static void power9_config_bhrb(u64 pmu_bhrb_filter) { + pmu_bhrb_filter &= POWER9_MMCRA_BHRB_MASK; + /* Enable BHRB filter in PMU */ mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter)); } diff --git a/arch/powerpc/platforms/4xx/uic.c b/arch/powerpc/platforms/4xx/uic.c index 8b4dd0da08395556950c50029af05bcfad41085c..9e27cfe2702686fe1dc718e05341dd025c3bc3e6 100644 --- a/arch/powerpc/platforms/4xx/uic.c +++ b/arch/powerpc/platforms/4xx/uic.c @@ -158,6 +158,7 @@ static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type) mtdcr(uic->dcrbase + UIC_PR, pr); mtdcr(uic->dcrbase + UIC_TR, tr); + mtdcr(uic->dcrbase + UIC_SR, ~mask); raw_spin_unlock_irqrestore(&uic->lock, flags); diff --git a/arch/powerpc/platforms/83xx/misc.c b/arch/powerpc/platforms/83xx/misc.c index d75c9816a5c92ad4211d71568e90848ab7fc998a..2b6589fe812dde8746f847e129211802408f4408 100644 --- a/arch/powerpc/platforms/83xx/misc.c +++ b/arch/powerpc/platforms/83xx/misc.c @@ -14,6 +14,7 @@ #include #include +#include #include #include #include @@ -150,3 +151,19 @@ void __init mpc83xx_setup_arch(void) mpc83xx_setup_pci(); } + +int machine_check_83xx(struct pt_regs *regs) +{ + u32 mask = 1 << (31 - IPIC_MCP_WDT); + + if (!(regs->msr & SRR1_MCE_MCP) || !(ipic_get_mcp_status() & mask)) + return machine_check_generic(regs); + ipic_clear_mcp_status(mask); + + if (debugger_fault_handler(regs)) + return 1; + + die("Watchdog NMI Reset", regs, 0); + + return 1; +} diff --git a/arch/powerpc/platforms/83xx/suspend-asm.S b/arch/powerpc/platforms/83xx/suspend-asm.S index 3d1ecd2117769906c1d69eab8c39776012cff1b3..8137f77abad577503397a56f051ae9f97a71ab5e 100644 --- a/arch/powerpc/platforms/83xx/suspend-asm.S +++ b/arch/powerpc/platforms/83xx/suspend-asm.S @@ -26,13 +26,13 @@ #define SS_MSR 0x74 #define SS_SDR1 0x78 #define SS_LR 0x7c -#define SS_SPRG 0x80 /* 4 SPRGs */ -#define SS_DBAT 0x90 /* 8 DBATs */ -#define SS_IBAT 0xd0 /* 8 IBATs */ -#define SS_TB 0x110 -#define SS_CR 0x118 -#define SS_GPREG 0x11c /* r12-r31 */ -#define STATE_SAVE_SIZE 0x16c +#define SS_SPRG 0x80 /* 8 SPRGs */ +#define SS_DBAT 0xa0 /* 8 DBATs */ +#define SS_IBAT 0xe0 /* 8 IBATs */ +#define SS_TB 0x120 +#define SS_CR 0x128 +#define SS_GPREG 0x12c /* r12-r31 */ +#define STATE_SAVE_SIZE 0x17c .section .data .align 5 @@ -103,6 +103,16 @@ _GLOBAL(mpc83xx_enter_deep_sleep) stw r7, SS_SPRG+12(r3) stw r8, SS_SDR1(r3) + mfspr r4, SPRN_SPRG4 + mfspr r5, SPRN_SPRG5 + mfspr r6, SPRN_SPRG6 + mfspr r7, SPRN_SPRG7 + + stw r4, SS_SPRG+16(r3) + stw r5, SS_SPRG+20(r3) + stw r6, SS_SPRG+24(r3) + stw r7, SS_SPRG+28(r3) + mfspr r4, SPRN_DBAT0U mfspr r5, SPRN_DBAT0L mfspr r6, SPRN_DBAT1U @@ -493,6 +503,16 @@ mpc83xx_deep_resume: mtspr SPRN_IBAT7U, r6 mtspr SPRN_IBAT7L, r7 + lwz r4, SS_SPRG+16(r3) + lwz r5, SS_SPRG+20(r3) + lwz r6, SS_SPRG+24(r3) + lwz r7, SS_SPRG+28(r3) + + mtspr SPRN_SPRG4, r4 + mtspr SPRN_SPRG5, r5 + mtspr SPRN_SPRG6, r6 + mtspr SPRN_SPRG7, r7 + lwz r4, SS_SPRG+0(r3) lwz r5, SS_SPRG+4(r3) lwz r6, SS_SPRG+8(r3) diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 6c6a7c72cae459ea2e53dc5f791fef8ead5c85bd..ad0216c41d2c5607693d77fe6a3c194718b12a91 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -330,7 +330,7 @@ config ARCH_ENABLE_SPLIT_PMD_PTLOCK config PPC_RADIX_MMU bool "Radix MMU Support" - depends on PPC_BOOK3S_64 + depends on PPC_BOOK3S_64 && HUGETLB_PAGE select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA default y help diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index 43e7b93f27c71c609d75b96089aa4e34d958a899..ae8123edddc670ed2b413129ee8a97aade121200 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -609,7 +609,7 @@ static ssize_t spufs_mbox_read(struct file *file, char __user *buf, if (len < 4) return -EINVAL; - if (!access_ok(VERIFY_WRITE, buf, len)) + if (!access_ok(buf, len)) return -EFAULT; udata = (void __user *)buf; @@ -717,7 +717,7 @@ static ssize_t spufs_ibox_read(struct file *file, char __user *buf, if (len < 4) return -EINVAL; - if (!access_ok(VERIFY_WRITE, buf, len)) + if (!access_ok(buf, len)) return -EFAULT; udata = (void __user *)buf; @@ -856,7 +856,7 @@ static ssize_t spufs_wbox_write(struct file *file, const char __user *buf, return -EINVAL; udata = (void __user *)buf; - if (!access_ok(VERIFY_READ, buf, len)) + if (!access_ok(buf, len)) return -EFAULT; if (__get_user(wbox_data, udata)) @@ -1994,7 +1994,7 @@ static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf, int ret; struct spu_context *ctx = file->private_data; - if (!access_ok(VERIFY_WRITE, buf, len)) + if (!access_ok(buf, len)) return -EFAULT; ret = spu_acquire_saved(ctx); @@ -2034,7 +2034,7 @@ static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf, struct spu_context *ctx = file->private_data; int ret; - if (!access_ok(VERIFY_WRITE, buf, len)) + if (!access_ok(buf, len)) return -EFAULT; ret = spu_acquire_saved(ctx); @@ -2077,7 +2077,7 @@ static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf, struct spu_context *ctx = file->private_data; int ret; - if (!access_ok(VERIFY_WRITE, buf, len)) + if (!access_ok(buf, len)) return -EFAULT; ret = spu_acquire_saved(ctx); @@ -2129,7 +2129,7 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf, struct spu_context *ctx = file->private_data; int ret; - if (!access_ok(VERIFY_WRITE, buf, len)) + if (!access_ok(buf, len)) return -EFAULT; ret = spu_acquire_saved(ctx); @@ -2160,7 +2160,7 @@ static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx, if (len < ret) return -EINVAL; - if (!access_ok(VERIFY_WRITE, buf, len)) + if (!access_ok(buf, len)) return -EFAULT; info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW; diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c index 403523c061bad3229f4f5a7e6c5ab64c617bb97c..343bffd20fcaf2ceb378a06dd597fd4e4367b268 100644 --- a/arch/powerpc/platforms/embedded6xx/wii.c +++ b/arch/powerpc/platforms/embedded6xx/wii.c @@ -83,6 +83,10 @@ unsigned long __init wii_mmu_mapin_mem2(unsigned long top) /* MEM2 64MB@0x10000000 */ delta = wii_hole_start + wii_hole_size; size = top - delta; + + if (__map_without_bats) + return delta; + for (bl = 128<<10; bl < max_size; bl <<= 1) { if (bl * 2 > size) break; diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile index f2839eed0f89715bd4bb3ff2a55f7602587fa761..561a67d65e4d46b1d069839f66a0e6b3da8b9578 100644 --- a/arch/powerpc/platforms/powermac/Makefile +++ b/arch/powerpc/platforms/powermac/Makefile @@ -3,7 +3,7 @@ CFLAGS_bootx_init.o += -fPIC ifdef CONFIG_FUNCTION_TRACER # Do not trace early boot code -CFLAGS_REMOVE_bootx_init.o = -mno-sched-epilog $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_bootx_init.o = $(CC_FLAGS_FTRACE) endif obj-y += pic.o setup.o time.o feature.o pci.o \ diff --git a/arch/powerpc/platforms/powermac/sleep.S b/arch/powerpc/platforms/powermac/sleep.S index f89808b9713d0671d51d7a38ac0e8ae320b4d575..b0660ef691779407ec1296d80586093683aa8dc2 100644 --- a/arch/powerpc/platforms/powermac/sleep.S +++ b/arch/powerpc/platforms/powermac/sleep.S @@ -38,10 +38,18 @@ #define SL_IBAT2 0x48 #define SL_DBAT3 0x50 #define SL_IBAT3 0x58 -#define SL_TB 0x60 -#define SL_R2 0x68 -#define SL_CR 0x6c -#define SL_R12 0x70 /* r12 to r31 */ +#define SL_DBAT4 0x60 +#define SL_IBAT4 0x68 +#define SL_DBAT5 0x70 +#define SL_IBAT5 0x78 +#define SL_DBAT6 0x80 +#define SL_IBAT6 0x88 +#define SL_DBAT7 0x90 +#define SL_IBAT7 0x98 +#define SL_TB 0xa0 +#define SL_R2 0xa8 +#define SL_CR 0xac +#define SL_R12 0xb0 /* r12 to r31 */ #define SL_SIZE (SL_R12 + 80) .section .text @@ -126,6 +134,41 @@ _GLOBAL(low_sleep_handler) mfibatl r4,3 stw r4,SL_IBAT3+4(r1) +BEGIN_MMU_FTR_SECTION + mfspr r4,SPRN_DBAT4U + stw r4,SL_DBAT4(r1) + mfspr r4,SPRN_DBAT4L + stw r4,SL_DBAT4+4(r1) + mfspr r4,SPRN_DBAT5U + stw r4,SL_DBAT5(r1) + mfspr r4,SPRN_DBAT5L + stw r4,SL_DBAT5+4(r1) + mfspr r4,SPRN_DBAT6U + stw r4,SL_DBAT6(r1) + mfspr r4,SPRN_DBAT6L + stw r4,SL_DBAT6+4(r1) + mfspr r4,SPRN_DBAT7U + stw r4,SL_DBAT7(r1) + mfspr r4,SPRN_DBAT7L + stw r4,SL_DBAT7+4(r1) + mfspr r4,SPRN_IBAT4U + stw r4,SL_IBAT4(r1) + mfspr r4,SPRN_IBAT4L + stw r4,SL_IBAT4+4(r1) + mfspr r4,SPRN_IBAT5U + stw r4,SL_IBAT5(r1) + mfspr r4,SPRN_IBAT5L + stw r4,SL_IBAT5+4(r1) + mfspr r4,SPRN_IBAT6U + stw r4,SL_IBAT6(r1) + mfspr r4,SPRN_IBAT6L + stw r4,SL_IBAT6+4(r1) + mfspr r4,SPRN_IBAT7U + stw r4,SL_IBAT7(r1) + mfspr r4,SPRN_IBAT7L + stw r4,SL_IBAT7+4(r1) +END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) + /* Backup various CPU config stuffs */ bl __save_cpu_setup @@ -326,22 +369,37 @@ grackle_wake_up: mtibatl 3,r4 BEGIN_MMU_FTR_SECTION - li r4,0 + lwz r4,SL_DBAT4(r1) mtspr SPRN_DBAT4U,r4 + lwz r4,SL_DBAT4+4(r1) mtspr SPRN_DBAT4L,r4 + lwz r4,SL_DBAT5(r1) mtspr SPRN_DBAT5U,r4 + lwz r4,SL_DBAT5+4(r1) mtspr SPRN_DBAT5L,r4 + lwz r4,SL_DBAT6(r1) mtspr SPRN_DBAT6U,r4 + lwz r4,SL_DBAT6+4(r1) mtspr SPRN_DBAT6L,r4 + lwz r4,SL_DBAT7(r1) mtspr SPRN_DBAT7U,r4 + lwz r4,SL_DBAT7+4(r1) mtspr SPRN_DBAT7L,r4 + lwz r4,SL_IBAT4(r1) mtspr SPRN_IBAT4U,r4 + lwz r4,SL_IBAT4+4(r1) mtspr SPRN_IBAT4L,r4 + lwz r4,SL_IBAT5(r1) mtspr SPRN_IBAT5U,r4 + lwz r4,SL_IBAT5+4(r1) mtspr SPRN_IBAT5L,r4 + lwz r4,SL_IBAT6(r1) mtspr SPRN_IBAT6U,r4 + lwz r4,SL_IBAT6+4(r1) mtspr SPRN_IBAT6L,r4 + lwz r4,SL_IBAT7(r1) mtspr SPRN_IBAT7U,r4 + lwz r4,SL_IBAT7+4(r1) mtspr SPRN_IBAT7L,r4 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index 3c1beae29f2d838ddc3871a0f78608b5068fb125..9dd5b8909178bdc4f736bf975c94c454d36d8ee5 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c @@ -578,8 +578,8 @@ static void pnv_eeh_get_phb_diag(struct eeh_pe *pe) static int pnv_eeh_get_phb_state(struct eeh_pe *pe) { struct pnv_phb *phb = pe->phb->private_data; - u8 fstate; - __be16 pcierr; + u8 fstate = 0; + __be16 pcierr = 0; s64 rc; int result = 0; @@ -617,8 +617,8 @@ static int pnv_eeh_get_phb_state(struct eeh_pe *pe) static int pnv_eeh_get_pe_state(struct eeh_pe *pe) { struct pnv_phb *phb = pe->phb->private_data; - u8 fstate; - __be16 pcierr; + u8 fstate = 0; + __be16 pcierr = 0; s64 rc; int result; diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c index 35f699ebb662189fd574ca23f43c089cf7b92a17..e52f9b06dd9c31fb2c68bf65e43214ee28037624 100644 --- a/arch/powerpc/platforms/powernv/idle.c +++ b/arch/powerpc/platforms/powernv/idle.c @@ -458,7 +458,8 @@ EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_release); #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ #ifdef CONFIG_HOTPLUG_CPU -static void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val) + +void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val) { u64 pir = get_hard_smp_processor_id(cpu); @@ -481,20 +482,6 @@ unsigned long pnv_cpu_offline(unsigned int cpu) { unsigned long srr1; u32 idle_states = pnv_get_supported_cpuidle_states(); - u64 lpcr_val; - - /* - * We don't want to take decrementer interrupts while we are - * offline, so clear LPCR:PECE1. We keep PECE2 (and - * LPCR_PECE_HVEE on P9) enabled as to let IPIs in. - * - * If the CPU gets woken up by a special wakeup, ensure that - * the SLW engine sets LPCR with decrementer bit cleared, else - * the CPU will come back to the kernel due to a spurious - * wakeup. - */ - lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1; - pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val); __ppc64_runlatch_off(); @@ -526,16 +513,6 @@ unsigned long pnv_cpu_offline(unsigned int cpu) __ppc64_runlatch_on(); - /* - * Re-enable decrementer interrupts in LPCR. - * - * Further, we want stop states to be woken up by decrementer - * for non-hotplug cases. So program the LPCR via stop api as - * well. - */ - lpcr_val = mfspr(SPRN_LPCR) | (u64)LPCR_PECE1; - pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val); - return srr1; } #endif diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c index 51dc398ae3f7a2431b39f6705a4cfc495a378aeb..84d038ed3882a7794987ba3f222aab7351895d8f 100644 --- a/arch/powerpc/platforms/powernv/memtrace.c +++ b/arch/powerpc/platforms/powernv/memtrace.c @@ -70,6 +70,7 @@ static int change_memblock_state(struct memory_block *mem, void *arg) return 0; } +/* called with device_hotplug_lock held */ static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages) { u64 end_pfn = start_pfn + nr_pages - 1; @@ -90,17 +91,15 @@ static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages) walk_memory_range(start_pfn, end_pfn, (void *)MEM_OFFLINE, change_memblock_state); - lock_device_hotplug(); - remove_memory(nid, start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT); - unlock_device_hotplug(); return true; } static u64 memtrace_alloc_node(u32 nid, u64 size) { - u64 start_pfn, end_pfn, nr_pages; + u64 start_pfn, end_pfn, nr_pages, pfn; u64 base_pfn; + u64 bytes = memory_block_size_bytes(); if (!node_spanned_pages(nid)) return 0; @@ -112,10 +111,24 @@ static u64 memtrace_alloc_node(u32 nid, u64 size) /* Trace memory needs to be aligned to the size */ end_pfn = round_down(end_pfn - nr_pages, nr_pages); + lock_device_hotplug(); for (base_pfn = end_pfn; base_pfn > start_pfn; base_pfn -= nr_pages) { - if (memtrace_offline_pages(nid, base_pfn, nr_pages) == true) + if (memtrace_offline_pages(nid, base_pfn, nr_pages) == true) { + /* + * Remove memory in memory block size chunks so that + * iomem resources are always split to the same size and + * we never try to remove memory that spans two iomem + * resources. + */ + end_pfn = base_pfn + nr_pages; + for (pfn = base_pfn; pfn < end_pfn; pfn += bytes>> PAGE_SHIFT) { + __remove_memory(nid, pfn << PAGE_SHIFT, bytes); + } + unlock_device_hotplug(); return base_pfn << PAGE_SHIFT; + } } + unlock_device_hotplug(); return 0; } @@ -231,9 +244,11 @@ static int memtrace_online(void) * we need to online the memory ourselves. */ if (!memhp_auto_online) { + lock_device_hotplug(); walk_memory_range(PFN_DOWN(ent->start), PFN_UP(ent->start + ent->size - 1), NULL, online_mem_block); + unlock_device_hotplug(); } /* diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index 8006c54a91e3fb6c08fa22e87631b9297e5dca5e..fd8166ffbffa74c12d8aec8561631bedfba5af69 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c @@ -56,9 +56,22 @@ static struct dentry *atsd_threshold_dentry; static struct pci_dev *get_pci_dev(struct device_node *dn) { struct pci_dn *pdn = PCI_DN(dn); + struct pci_dev *pdev; - return pci_get_domain_bus_and_slot(pci_domain_nr(pdn->phb->bus), + pdev = pci_get_domain_bus_and_slot(pci_domain_nr(pdn->phb->bus), pdn->busno, pdn->devfn); + + /* + * pci_get_domain_bus_and_slot() increased the reference count of + * the PCI device, but callers don't need that actually as the PE + * already holds a reference to the device. Since callers aren't + * aware of the reference count change, call pci_dev_put() now to + * avoid leaks. + */ + if (pdev) + pci_dev_put(pdev); + + return pdev; } /* Given a NPU device get the associated PCI device. */ diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c index 58a07948c76e783bdcd9eb14deb527bdb08508d6..649fb268f446141bd255f4414f1f692be187fba1 100644 --- a/arch/powerpc/platforms/powernv/opal-imc.c +++ b/arch/powerpc/platforms/powernv/opal-imc.c @@ -57,9 +57,9 @@ static void export_imc_mode_and_cmd(struct device_node *node, struct imc_pmu *pmu_ptr) { static u64 loc, *imc_mode_addr, *imc_cmd_addr; - int chip = 0, nid; char mode[16], cmd[16]; u32 cb_offset; + struct imc_mem_info *ptr = pmu_ptr->mem_info; imc_debugfs_parent = debugfs_create_dir("imc", powerpc_debugfs_root); @@ -73,20 +73,20 @@ static void export_imc_mode_and_cmd(struct device_node *node, if (of_property_read_u32(node, "cb_offset", &cb_offset)) cb_offset = IMC_CNTL_BLK_OFFSET; - for_each_node(nid) { - loc = (u64)(pmu_ptr->mem_info[chip].vbase) + cb_offset; + while (ptr->vbase != NULL) { + loc = (u64)(ptr->vbase) + cb_offset; imc_mode_addr = (u64 *)(loc + IMC_CNTL_BLK_MODE_OFFSET); - sprintf(mode, "imc_mode_%d", nid); + sprintf(mode, "imc_mode_%d", (u32)(ptr->id)); if (!imc_debugfs_create_x64(mode, 0600, imc_debugfs_parent, imc_mode_addr)) goto err; imc_cmd_addr = (u64 *)(loc + IMC_CNTL_BLK_CMD_OFFSET); - sprintf(cmd, "imc_cmd_%d", nid); + sprintf(cmd, "imc_cmd_%d", (u32)(ptr->id)); if (!imc_debugfs_create_x64(cmd, 0600, imc_debugfs_parent, imc_cmd_addr)) goto err; - chip++; + ptr++; } return; @@ -127,7 +127,7 @@ static int imc_get_mem_addr_nest(struct device_node *node, nr_chips)) goto error; - pmu_ptr->mem_info = kcalloc(nr_chips, sizeof(*pmu_ptr->mem_info), + pmu_ptr->mem_info = kcalloc(nr_chips + 1, sizeof(*pmu_ptr->mem_info), GFP_KERNEL); if (!pmu_ptr->mem_info) goto error; @@ -161,6 +161,10 @@ static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain) struct imc_pmu *pmu_ptr; u32 offset; + /* Return for unknown domain */ + if (domain < 0) + return -EINVAL; + /* memory for pmu */ pmu_ptr = kzalloc(sizeof(*pmu_ptr), GFP_KERNEL); if (!pmu_ptr) diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c b/arch/powerpc/platforms/powernv/opal-irqchip.c index bc97770a67dbf05747ca49b0029025eb0d9d3d1e..e71f2111c8c0b6e99892376be6cc039f408b0b98 100644 --- a/arch/powerpc/platforms/powernv/opal-irqchip.c +++ b/arch/powerpc/platforms/powernv/opal-irqchip.c @@ -282,6 +282,8 @@ int __init opal_event_init(void) else name = kasprintf(GFP_KERNEL, "opal"); + if (!name) + continue; /* Install interrupt handler */ rc = request_irq(r->start, opal_interrupt, r->flags & IRQD_TRIGGER_MASK, name, NULL); diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c index 6c7ad1d8b32edad20154322a246385aea42ee244..2623996a193ab292cab4c9e37bf0dcf3824cc414 100644 --- a/arch/powerpc/platforms/powernv/opal-lpc.c +++ b/arch/powerpc/platforms/powernv/opal-lpc.c @@ -192,7 +192,7 @@ static ssize_t lpc_debug_read(struct file *filp, char __user *ubuf, u32 data, pos, len, todo; int rc; - if (!access_ok(VERIFY_WRITE, ubuf, count)) + if (!access_ok(ubuf, count)) return -EFAULT; todo = count; @@ -283,7 +283,7 @@ static ssize_t lpc_debug_write(struct file *filp, const char __user *ubuf, u32 data, pos, len, todo; int rc; - if (!access_ok(VERIFY_READ, ubuf, count)) + if (!access_ok(ubuf, count)) return -EFAULT; todo = count; diff --git a/arch/powerpc/platforms/powernv/opal-msglog.c b/arch/powerpc/platforms/powernv/opal-msglog.c index acd3206dfae3477452f11c4a96dfc1638cafae00..06628c71cef6996119b90a59fb68d129072a61f5 100644 --- a/arch/powerpc/platforms/powernv/opal-msglog.c +++ b/arch/powerpc/platforms/powernv/opal-msglog.c @@ -98,7 +98,7 @@ static ssize_t opal_msglog_read(struct file *file, struct kobject *kobj, } static struct bin_attribute opal_msglog_attr = { - .attr = {.name = "msglog", .mode = 0444}, + .attr = {.name = "msglog", .mode = 0400}, .read = opal_msglog_read }; diff --git a/arch/powerpc/platforms/powernv/opal-tracepoints.c b/arch/powerpc/platforms/powernv/opal-tracepoints.c index 1ab7d26c0a2cd972674d299339928b0475b2ff91..f16a43540e307575b874dd4caa60e7a980ea51ec 100644 --- a/arch/powerpc/platforms/powernv/opal-tracepoints.c +++ b/arch/powerpc/platforms/powernv/opal-tracepoints.c @@ -4,7 +4,7 @@ #include #include -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL struct static_key opal_tracepoint_key = STATIC_KEY_INIT; int opal_tracepoint_regfunc(void) diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S index 251528231a9e943b1318fc4f117163717f3d160c..74215ebda142da25794fff581fd6bfaffb5a7cb0 100644 --- a/arch/powerpc/platforms/powernv/opal-wrappers.S +++ b/arch/powerpc/platforms/powernv/opal-wrappers.S @@ -20,7 +20,7 @@ .section ".text" #ifdef CONFIG_TRACEPOINTS -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL #define OPAL_BRANCH(LABEL) \ ARCH_STATIC_BRANCH(LABEL, opal_tracepoint_key) #else @@ -303,7 +303,7 @@ OPAL_CALL(opal_xive_set_queue_info, OPAL_XIVE_SET_QUEUE_INFO); OPAL_CALL(opal_xive_donate_page, OPAL_XIVE_DONATE_PAGE); OPAL_CALL(opal_xive_alloc_vp_block, OPAL_XIVE_ALLOCATE_VP_BLOCK); OPAL_CALL(opal_xive_free_vp_block, OPAL_XIVE_FREE_VP_BLOCK); -OPAL_CALL(opal_xive_allocate_irq, OPAL_XIVE_ALLOCATE_IRQ); +OPAL_CALL(opal_xive_allocate_irq_raw, OPAL_XIVE_ALLOCATE_IRQ); OPAL_CALL(opal_xive_free_irq, OPAL_XIVE_FREE_IRQ); OPAL_CALL(opal_xive_get_vp_info, OPAL_XIVE_GET_VP_INFO); OPAL_CALL(opal_xive_set_vp_info, OPAL_XIVE_SET_VP_INFO); diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index 38fe4087484a61e7ead4fed3ea89db130b596b8b..edf9032e2e5ccacac0de15e9d902a139c21941e9 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -680,7 +680,10 @@ static ssize_t symbol_map_read(struct file *fp, struct kobject *kobj, bin_attr->size); } -static BIN_ATTR_RO(symbol_map, 0); +static struct bin_attribute symbol_map_attr = { + .attr = {.name = "symbol_map", .mode = 0400}, + .read = symbol_map_read +}; static void opal_export_symmap(void) { @@ -697,10 +700,10 @@ static void opal_export_symmap(void) return; /* Setup attributes */ - bin_attr_symbol_map.private = __va(be64_to_cpu(syms[0])); - bin_attr_symbol_map.size = be64_to_cpu(syms[1]); + symbol_map_attr.private = __va(be64_to_cpu(syms[0])); + symbol_map_attr.size = be64_to_cpu(syms[1]); - rc = sysfs_create_bin_file(opal_kobj, &bin_attr_symbol_map); + rc = sysfs_create_bin_file(opal_kobj, &symbol_map_attr); if (rc) pr_warn("Error %d creating OPAL symbols file\n", rc); } diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c index fe9691040f54c26561949c469738f277c90069e6..15a567128c0f1d99423e8bfef3881fd1330bee3c 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c +++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c @@ -36,7 +36,8 @@ static __be64 *pnv_alloc_tce_level(int nid, unsigned int shift) struct page *tce_mem = NULL; __be64 *addr; - tce_mem = alloc_pages_node(nid, GFP_KERNEL, shift - PAGE_SHIFT); + tce_mem = alloc_pages_node(nid, GFP_ATOMIC | __GFP_NOWARN, + shift - PAGE_SHIFT); if (!tce_mem) { pr_err("Failed to allocate a TCE memory, level shift=%d\n", shift); @@ -48,6 +49,9 @@ static __be64 *pnv_alloc_tce_level(int nid, unsigned int shift) return addr; } +static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr, + unsigned long size, unsigned int levels); + static __be64 *pnv_tce(struct iommu_table *tbl, bool user, long idx, bool alloc) { __be64 *tmp = user ? tbl->it_userspace : (__be64 *) tbl->it_base; @@ -57,9 +61,9 @@ static __be64 *pnv_tce(struct iommu_table *tbl, bool user, long idx, bool alloc) while (level) { int n = (idx & mask) >> (level * shift); - unsigned long tce; + unsigned long oldtce, tce = be64_to_cpu(READ_ONCE(tmp[n])); - if (tmp[n] == 0) { + if (!tce) { __be64 *tmp2; if (!alloc) @@ -70,10 +74,15 @@ static __be64 *pnv_tce(struct iommu_table *tbl, bool user, long idx, bool alloc) if (!tmp2) return NULL; - tmp[n] = cpu_to_be64(__pa(tmp2) | - TCE_PCI_READ | TCE_PCI_WRITE); + tce = __pa(tmp2) | TCE_PCI_READ | TCE_PCI_WRITE; + oldtce = be64_to_cpu(cmpxchg(&tmp[n], 0, + cpu_to_be64(tce))); + if (oldtce) { + pnv_pci_ioda2_table_do_free_pages(tmp2, + ilog2(tbl->it_level_size) + 3, 1); + tce = oldtce; + } } - tce = be64_to_cpu(tmp[n]); tmp = __va(tce & ~(TCE_PCI_READ | TCE_PCI_WRITE)); idx &= ~mask; @@ -161,6 +170,9 @@ void pnv_tce_free(struct iommu_table *tbl, long index, long npages) if (ptce) *ptce = cpu_to_be64(0); + else + /* Skip the rest of the level */ + i |= tbl->it_level_size - 1; } } @@ -260,7 +272,6 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, unsigned int table_shift = max_t(unsigned int, entries_shift + 3, PAGE_SHIFT); const unsigned long tce_table_size = 1UL << table_shift; - unsigned int tmplevels = levels; if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS)) return -EINVAL; @@ -268,9 +279,6 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, if (!is_power_of_2(window_size)) return -EINVAL; - if (alloc_userspace_copy && (window_size > (1ULL << 32))) - tmplevels = 1; - /* Adjust direct table size from window_size and levels */ entries_shift = (entries_shift + levels - 1) / levels; level_shift = entries_shift + 3; @@ -281,7 +289,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, /* Allocate TCE table */ addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift, - tmplevels, tce_table_size, &offset, &total_allocated); + 1, tce_table_size, &offset, &total_allocated); /* addr==NULL means that the first level allocation failed */ if (!addr) @@ -292,18 +300,18 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, * we did not allocate as much as we wanted, * release partially allocated table. */ - if (tmplevels == levels && offset < tce_table_size) + if (levels == 1 && offset < tce_table_size) goto free_tces_exit; /* Allocate userspace view of the TCE table */ if (alloc_userspace_copy) { offset = 0; uas = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift, - levels, tce_table_size, &offset, + 1, tce_table_size, &offset, &total_allocated_uas); if (!uas) goto free_tces_exit; - if (tmplevels == levels && (offset < tce_table_size || + if (levels == 1 && (offset < tce_table_size || total_allocated_uas != total_allocated)) goto free_uas_exit; } @@ -313,13 +321,12 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, page_shift); tbl->it_level_size = 1ULL << (level_shift - 3); tbl->it_indirect_levels = levels - 1; - tbl->it_allocated_size = total_allocated; tbl->it_userspace = uas; tbl->it_nid = nid; pr_debug("Created TCE table: ws=%08llx ts=%lx @%08llx base=%lx uas=%p levels=%d/%d\n", window_size, tce_table_size, bus_offset, tbl->it_base, - tbl->it_userspace, tmplevels, levels); + tbl->it_userspace, 1, levels); return 0; diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index cde710297a4e434c58d32f07da8ee61d0a24d4a7..ee63749a2d47eaee50cb45bb0da47e9b8ccaab4e 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -605,8 +605,8 @@ static int pnv_ioda_unfreeze_pe(struct pnv_phb *phb, int pe_no, int opt) static int pnv_ioda_get_pe_state(struct pnv_phb *phb, int pe_no) { struct pnv_ioda_pe *slave, *pe; - u8 fstate, state; - __be16 pcierr; + u8 fstate = 0, state; + __be16 pcierr = 0; s64 rc; /* Sanity check on PE number */ @@ -2603,8 +2603,13 @@ static long pnv_pci_ioda2_create_table_userspace( int num, __u32 page_shift, __u64 window_size, __u32 levels, struct iommu_table **ptbl) { - return pnv_pci_ioda2_create_table(table_group, + long ret = pnv_pci_ioda2_create_table(table_group, num, page_shift, window_size, levels, true, ptbl); + + if (!ret) + (*ptbl)->it_allocated_size = pnv_pci_ioda2_get_table_size( + page_shift, window_size, levels); + return ret; } static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group) diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index 13aef2323bbca5638889afa54d8f2fcfe7b98636..db230a35609bfef59f97fdee53aed43cce843f9c 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -602,8 +602,8 @@ static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no) static void pnv_pci_config_check_eeh(struct pci_dn *pdn) { struct pnv_phb *phb = pdn->phb->private_data; - u8 fstate; - __be16 pcierr; + u8 fstate = 0; + __be16 pcierr = 0; unsigned int pe_no; s64 rc; diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 8b37b28e383181bfd7fb8a0b24e44983d17efc81..e302aa092d4f1e09f8076873ea4ede348919ad4f 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -243,7 +243,7 @@ extern void pnv_npu_release_ownership(struct pnv_ioda_pe *npe); extern int pnv_npu2_init(struct pnv_phb *phb); /* pci-ioda-tce.c */ -#define POWERNV_IOMMU_DEFAULT_LEVELS 1 +#define POWERNV_IOMMU_DEFAULT_LEVELS 2 #define POWERNV_IOMMU_MAX_LEVELS 5 extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages, diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index adddde023622754d00f2c33168e18bfd16d95a80..5068dd7f6e74b0b88670f757a6c157001f7b9acc 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -125,12 +125,29 @@ static void pnv_setup_rfi_flush(void) type = L1D_FLUSH_ORI; } + /* + * If we are non-Power9 bare metal, we don't need to flush on kernel + * entry or after user access: they fix a P9 specific vulnerability. + */ + if (!pvr_version_is(PVR_POWER9)) { + security_ftr_clear(SEC_FTR_L1D_FLUSH_ENTRY); + security_ftr_clear(SEC_FTR_L1D_FLUSH_UACCESS); + } + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \ (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || \ security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV)); setup_rfi_flush(type, enable); setup_count_cache_flush(); + + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && + security_ftr_enabled(SEC_FTR_L1D_FLUSH_ENTRY); + setup_entry_flush(enable); + + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && + security_ftr_enabled(SEC_FTR_L1D_FLUSH_UACCESS); + setup_uaccess_flush(enable); } static void __init pnv_setup_arch(void) diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index 0d354e19ef926e7cc9b6ca6b1170403b4c952fdd..8d49ba370c5046e4f5da725e5aa81aafa9033daa 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c @@ -39,6 +39,7 @@ #include #include #include +#include #include "powernv.h" @@ -149,22 +150,27 @@ static int pnv_smp_cpu_disable(void) return 0; } +static void pnv_flush_interrupts(void) +{ + if (cpu_has_feature(CPU_FTR_ARCH_300)) { + if (xive_enabled()) + xive_flush_interrupt(); + else + icp_opal_flush_interrupt(); + } else { + icp_native_flush_interrupt(); + } +} + static void pnv_smp_cpu_kill_self(void) { + unsigned long srr1, unexpected_mask, wmask; unsigned int cpu; - unsigned long srr1, wmask; + u64 lpcr_val; /* Standard hot unplug procedure */ - /* - * This hard disables local interurpts, ensuring we have no lazy - * irqs pending. - */ - WARN_ON(irqs_disabled()); - hard_irq_disable(); - WARN_ON(lazy_irq_pending()); idle_task_exit(); - current->active_mm = NULL; /* for sanity */ cpu = smp_processor_id(); DBG("CPU%d offline\n", cpu); generic_set_cpu_dead(cpu); @@ -174,6 +180,40 @@ static void pnv_smp_cpu_kill_self(void) if (cpu_has_feature(CPU_FTR_ARCH_207S)) wmask = SRR1_WAKEMASK_P8; + /* + * This turns the irq soft-disabled state we're called with, into a + * hard-disabled state with pending irq_happened interrupts cleared. + * + * PACA_IRQ_DEC - Decrementer should be ignored. + * PACA_IRQ_HMI - Can be ignored, processing is done in real mode. + * PACA_IRQ_DBELL, EE, PMI - Unexpected. + */ + hard_irq_disable(); + if (generic_check_cpu_restart(cpu)) + goto out; + + unexpected_mask = ~(PACA_IRQ_DEC | PACA_IRQ_HMI | PACA_IRQ_HARD_DIS); + if (local_paca->irq_happened & unexpected_mask) { + if (local_paca->irq_happened & PACA_IRQ_EE) + pnv_flush_interrupts(); + DBG("CPU%d Unexpected exit while offline irq_happened=%lx!\n", + cpu, local_paca->irq_happened); + } + local_paca->irq_happened = PACA_IRQ_HARD_DIS; + + /* + * We don't want to take decrementer interrupts while we are + * offline, so clear LPCR:PECE1. We keep PECE2 (and + * LPCR_PECE_HVEE on P9) enabled so as to let IPIs in. + * + * If the CPU gets woken up by a special wakeup, ensure that + * the SLW engine sets LPCR with decrementer bit cleared, else + * the CPU will come back to the kernel due to a spurious + * wakeup. + */ + lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1; + pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val); + while (!generic_check_cpu_restart(cpu)) { /* * Clear IPI flag, since we don't handle IPIs while @@ -182,10 +222,11 @@ static void pnv_smp_cpu_kill_self(void) * for coming online, which are handled via * generic_check_cpu_restart() calls. */ - kvmppc_set_host_ipi(cpu, 0); + kvmppc_clear_host_ipi(cpu); srr1 = pnv_cpu_offline(cpu); + WARN_ON_ONCE(!irqs_disabled()); WARN_ON(lazy_irq_pending()); /* @@ -201,13 +242,7 @@ static void pnv_smp_cpu_kill_self(void) */ if (((srr1 & wmask) == SRR1_WAKEEE) || ((srr1 & wmask) == SRR1_WAKEHVI)) { - if (cpu_has_feature(CPU_FTR_ARCH_300)) { - if (xive_enabled()) - xive_flush_interrupt(); - else - icp_opal_flush_interrupt(); - } else - icp_native_flush_interrupt(); + pnv_flush_interrupts(); } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) { unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); asm volatile(PPC_MSGCLR(%0) : : "r" (msg)); @@ -246,6 +281,16 @@ static void pnv_smp_cpu_kill_self(void) } + /* + * Re-enable decrementer interrupts in LPCR. + * + * Further, we want stop states to be woken up by decrementer + * for non-hotplug cases. So program the LPCR via stop api as + * well. + */ + lpcr_val = mfspr(SPRN_LPCR) | (u64)LPCR_PECE1; + pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val); +out: DBG("CPU%d coming online...\n", cpu); } diff --git a/arch/powerpc/platforms/ps3/os-area.c b/arch/powerpc/platforms/ps3/os-area.c index cdbfc5cfd6f38ee85620288c14d2288808572e1e..f5387ad822798125402d76a530a6d7cc2c1733f5 100644 --- a/arch/powerpc/platforms/ps3/os-area.c +++ b/arch/powerpc/platforms/ps3/os-area.c @@ -664,7 +664,7 @@ static int update_flash_db(void) db_set_64(db, &os_area_db_id_rtc_diff, saved_params.rtc_diff); count = os_area_flash_write(db, sizeof(struct os_area_db), pos); - if (count < sizeof(struct os_area_db)) { + if (count < 0 || count < sizeof(struct os_area_db)) { pr_debug("%s: os_area_flash_write failed %zd\n", __func__, count); error = count < 0 ? count : -EIO; diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c index 25427a48feae3a2dcdee2182cd3bb6c5765066d9..502ebcc6c3cbe4adfd08c6aced7dd83d6c0d98dd 100644 --- a/arch/powerpc/platforms/pseries/cmm.c +++ b/arch/powerpc/platforms/pseries/cmm.c @@ -425,6 +425,10 @@ static struct bus_type cmm_subsys = { .dev_name = "cmm", }; +static void cmm_release_device(struct device *dev) +{ +} + /** * cmm_sysfs_register - Register with sysfs * @@ -440,6 +444,7 @@ static int cmm_sysfs_register(struct device *dev) dev->id = 0; dev->bus = &cmm_subsys; + dev->release = cmm_release_device; if ((rc = device_register(dev))) goto subsys_unregister; diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index a0b20c03f078cc41b9ad126fbf2603a4ffb68463..c5ffcadab7302f65b9737e81a710a5bcf7b3f787 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -63,6 +63,10 @@ static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa) name = (char *)ccwa + be32_to_cpu(ccwa->name_offset); prop->name = kstrdup(name, GFP_KERNEL); + if (!prop->name) { + dlpar_free_cc_property(prop); + return NULL; + } prop->length = be32_to_cpu(ccwa->prop_length); value = (char *)ccwa + be32_to_cpu(ccwa->prop_offset); @@ -272,6 +276,8 @@ int dlpar_detach_node(struct device_node *dn) if (rc) return rc; + of_node_put(dn); + return 0; } diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c index 18014cdeb590aa40eb61ff78ae8e273cb2f3233a..ef6595153642e3af4b5f52b300cbb3dd7d6b6d1f 100644 --- a/arch/powerpc/platforms/pseries/dtl.c +++ b/arch/powerpc/platforms/pseries/dtl.c @@ -149,7 +149,7 @@ static int dtl_start(struct dtl *dtl) /* Register our dtl buffer with the hypervisor. The HV expects the * buffer size to be passed in the second word of the buffer */ - ((u32 *)dtl->buf)[1] = DISPATCH_LOG_BYTES; + ((u32 *)dtl->buf)[1] = cpu_to_be32(DISPATCH_LOG_BYTES); hwcpu = get_hard_smp_processor_id(dtl->cpu); addr = __pa(dtl->buf); @@ -184,7 +184,7 @@ static void dtl_stop(struct dtl *dtl) static u64 dtl_current_index(struct dtl *dtl) { - return lppaca_of(dtl->cpu).dtl_idx; + return be64_to_cpu(lppaca_of(dtl->cpu).dtl_idx); } #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index 6ef77caf7bcf47a7af035d31891707f9dd40a3b2..1d3f9313c02ffffdf2f3795c8062f583a12a9c40 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -802,6 +802,25 @@ static int dlpar_cpu_add_by_count(u32 cpus_to_add) return rc; } +int dlpar_cpu_readd(int cpu) +{ + struct device_node *dn; + struct device *dev; + u32 drc_index; + int rc; + + dev = get_cpu_device(cpu); + dn = dev->of_node; + + rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index); + + rc = dlpar_cpu_remove_by_index(drc_index); + if (!rc) + rc = dlpar_cpu_add(drc_index); + + return rc; +} + int dlpar_cpu(struct pseries_hp_errorlog *hp_elog) { u32 count, drc_index; diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index c1578f54c62610df5d09f93e170259131044b7ce..63191f53f89cad03b5ce0a98dce8fb5f513f2590 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -163,7 +163,7 @@ static u32 find_aa_index(struct device_node *dr_node, return aa_index; } -static u32 lookup_lmb_associativity_index(struct drmem_lmb *lmb) +static int update_lmb_associativity_index(struct drmem_lmb *lmb) { struct device_node *parent, *lmb_node, *dr_node; struct property *ala_prop; @@ -202,44 +202,16 @@ static u32 lookup_lmb_associativity_index(struct drmem_lmb *lmb) aa_index = find_aa_index(dr_node, ala_prop, lmb_assoc); + of_node_put(dr_node); dlpar_free_cc_nodes(lmb_node); - return aa_index; -} - -static int dlpar_add_device_tree_lmb(struct drmem_lmb *lmb) -{ - int rc, aa_index; - - lmb->flags |= DRCONF_MEM_ASSIGNED; - aa_index = lookup_lmb_associativity_index(lmb); if (aa_index < 0) { - pr_err("Couldn't find associativity index for drc index %x\n", - lmb->drc_index); - return aa_index; + pr_err("Could not find LMB associativity\n"); + return -1; } lmb->aa_index = aa_index; - - rtas_hp_event = true; - rc = drmem_update_dt(); - rtas_hp_event = false; - - return rc; -} - -static int dlpar_remove_device_tree_lmb(struct drmem_lmb *lmb) -{ - int rc; - - lmb->flags &= ~DRCONF_MEM_ASSIGNED; - lmb->aa_index = 0xffffffff; - - rtas_hp_event = true; - rc = drmem_update_dt(); - rtas_hp_event = false; - - return rc; + return 0; } static struct memory_block *lmb_to_memblock(struct drmem_lmb *lmb) @@ -334,7 +306,7 @@ static int pseries_remove_memblock(unsigned long base, unsigned int memblock_siz nid = memory_add_physaddr_to_nid(base); for (i = 0; i < sections_per_block; i++) { - remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE); + __remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE); base += MIN_MEMORY_BLOCK_SIZE; } @@ -389,8 +361,11 @@ static bool lmb_is_removable(struct drmem_lmb *lmb) phys_addr = lmb->base_addr; #ifdef CONFIG_FA_DUMP - /* Don't hot-remove memory that falls in fadump boot memory area */ - if (is_fadump_boot_memory_area(phys_addr, block_sz)) + /* + * Don't hot-remove memory that falls in fadump boot memory area + * and memory that is reserved for capturing old kernel memory. + */ + if (is_fadump_memory_area(phys_addr, block_sz)) return false; #endif @@ -423,12 +398,14 @@ static int dlpar_remove_lmb(struct drmem_lmb *lmb) block_sz = pseries_memory_block_size(); nid = memory_add_physaddr_to_nid(lmb->base_addr); - remove_memory(nid, lmb->base_addr, block_sz); + __remove_memory(nid, lmb->base_addr, block_sz); /* Update memory regions for memory remove */ memblock_remove(lmb->base_addr, block_sz); - dlpar_remove_device_tree_lmb(lmb); + invalidate_lmb_associativity_index(lmb); + lmb->flags &= ~DRCONF_MEM_ASSIGNED; + return 0; } @@ -513,7 +490,7 @@ static int dlpar_memory_remove_by_index(u32 drc_index) int lmb_found; int rc; - pr_info("Attempting to hot-remove LMB, drc index %x\n", drc_index); + pr_debug("Attempting to hot-remove LMB, drc index %x\n", drc_index); lmb_found = 0; for_each_drmem_lmb(lmb) { @@ -527,14 +504,15 @@ static int dlpar_memory_remove_by_index(u32 drc_index) } } - if (!lmb_found) + if (!lmb_found) { + pr_debug("Failed to look up LMB for drc index %x\n", drc_index); rc = -EINVAL; - - if (rc) - pr_info("Failed to hot-remove memory at %llx\n", - lmb->base_addr); - else - pr_info("Memory at %llx was hot-removed\n", lmb->base_addr); + } else if (rc) { + pr_debug("Failed to hot-remove memory at %llx\n", + lmb->base_addr); + } else { + pr_debug("Memory at %llx was hot-removed\n", lmb->base_addr); + } return rc; } @@ -688,10 +666,8 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb) if (lmb->flags & DRCONF_MEM_ASSIGNED) return -EINVAL; - rc = dlpar_add_device_tree_lmb(lmb); + rc = update_lmb_associativity_index(lmb); if (rc) { - pr_err("Couldn't update device tree for drc index %x\n", - lmb->drc_index); dlpar_release_drc(lmb->drc_index); return rc; } @@ -702,16 +678,16 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb) nid = memory_add_physaddr_to_nid(lmb->base_addr); /* Add the memory */ - rc = add_memory(nid, lmb->base_addr, block_sz); + rc = __add_memory(nid, lmb->base_addr, block_sz); if (rc) { - dlpar_remove_device_tree_lmb(lmb); + invalidate_lmb_associativity_index(lmb); return rc; } rc = dlpar_online_lmb(lmb); if (rc) { - remove_memory(nid, lmb->base_addr, block_sz); - dlpar_remove_device_tree_lmb(lmb); + __remove_memory(nid, lmb->base_addr, block_sz); + invalidate_lmb_associativity_index(lmb); } else { lmb->flags |= DRCONF_MEM_ASSIGNED; } @@ -789,8 +765,8 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add) if (!drmem_lmb_reserved(lmb)) continue; - pr_info("Memory at %llx (drc index %x) was hot-added\n", - lmb->base_addr, lmb->drc_index); + pr_debug("Memory at %llx (drc index %x) was hot-added\n", + lmb->base_addr, lmb->drc_index); drmem_remove_lmb_reservation(lmb); } rc = 0; @@ -958,6 +934,12 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog) break; } + if (!rc) { + rtas_hp_event = true; + rc = drmem_update_dt(); + rtas_hp_event = false; + } + unlock_device_hotplug(); return rc; } @@ -1009,6 +991,9 @@ static int pseries_update_drconf_memory(struct of_reconfig_data *pr) if (!memblock_size) return -EINVAL; + if (!pr->old_prop) + return 0; + p = (__be32 *) pr->old_prop->value; if (!p) return -EINVAL; diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S index d91412c591effa968b7235f43bcf9081df07b4c6..50dc9426d0be94528f3078886595f1d257b0e282 100644 --- a/arch/powerpc/platforms/pseries/hvCall.S +++ b/arch/powerpc/platforms/pseries/hvCall.S @@ -19,7 +19,7 @@ #ifdef CONFIG_TRACEPOINTS -#ifndef HAVE_JUMP_LABEL +#ifndef CONFIG_JUMP_LABEL .section ".toc","aw" .globl hcall_tracepoint_refcount @@ -79,7 +79,7 @@ hcall_tracepoint_refcount: mr r5,BUFREG; \ __HCALL_INST_POSTCALL -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL #define HCALL_BRANCH(LABEL) \ ARCH_STATIC_BRANCH(LABEL, hcall_tracepoint_key) #else diff --git a/arch/powerpc/platforms/pseries/hvconsole.c b/arch/powerpc/platforms/pseries/hvconsole.c index 74da18de853af670f911acc800e50d076bff4f8c..73ec15cd27080f256cd86ac57f04f48550686603 100644 --- a/arch/powerpc/platforms/pseries/hvconsole.c +++ b/arch/powerpc/platforms/pseries/hvconsole.c @@ -62,7 +62,7 @@ EXPORT_SYMBOL(hvc_get_chars); * @vtermno: The vtermno or unit_address of the adapter from which the data * originated. * @buf: The character buffer that contains the character data to send to - * firmware. + * firmware. Must be at least 16 bytes, even if count is less than 16. * @count: Send this number of characters. */ int hvc_put_chars(uint32_t vtermno, const char *buf, int count) diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index d3992ced0782bcaae22191e88929a950105e7489..49e3a88b6a0c1474656c3c7020ea5b3d7e8c4691 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -48,6 +48,7 @@ #include #include #include +#include #include "pseries.h" @@ -647,7 +648,10 @@ static int pseries_lpar_resize_hpt_commit(void *data) return 0; } -/* Must be called in user context */ +/* + * Must be called in process context. The caller must hold the + * cpus_lock. + */ static int pseries_lpar_resize_hpt(unsigned long shift) { struct hpt_resize_state state = { @@ -699,7 +703,8 @@ static int pseries_lpar_resize_hpt(unsigned long shift) t1 = ktime_get(); - rc = stop_machine(pseries_lpar_resize_hpt_commit, &state, NULL); + rc = stop_machine_cpuslocked(pseries_lpar_resize_hpt_commit, + &state, NULL); t2 = ktime_get(); @@ -828,7 +833,7 @@ EXPORT_SYMBOL(arch_free_page); #endif /* CONFIG_PPC_BOOK3S_64 */ #ifdef CONFIG_TRACEPOINTS -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL struct static_key hcall_tracepoint_key = STATIC_KEY_INIT; int hcall_tracepoint_regfunc(void) @@ -1028,3 +1033,56 @@ static int __init reserve_vrma_context_id(void) return 0; } machine_device_initcall(pseries, reserve_vrma_context_id); + +#ifdef CONFIG_DEBUG_FS +/* debugfs file interface for vpa data */ +static ssize_t vpa_file_read(struct file *filp, char __user *buf, size_t len, + loff_t *pos) +{ + int cpu = (long)filp->private_data; + struct lppaca *lppaca = &lppaca_of(cpu); + + return simple_read_from_buffer(buf, len, pos, lppaca, + sizeof(struct lppaca)); +} + +static const struct file_operations vpa_fops = { + .open = simple_open, + .read = vpa_file_read, + .llseek = default_llseek, +}; + +static int __init vpa_debugfs_init(void) +{ + char name[16]; + long i; + static struct dentry *vpa_dir; + + if (!firmware_has_feature(FW_FEATURE_SPLPAR)) + return 0; + + vpa_dir = debugfs_create_dir("vpa", powerpc_debugfs_root); + if (!vpa_dir) { + pr_warn("%s: can't create vpa root dir\n", __func__); + return -ENOMEM; + } + + /* set up the per-cpu vpa file*/ + for_each_possible_cpu(i) { + struct dentry *d; + + sprintf(name, "cpu-%ld", i); + + d = debugfs_create_file(name, 0400, vpa_dir, (void *)i, + &vpa_fops); + if (!d) { + pr_warn("%s: can't create per-cpu vpa file\n", + __func__); + return -ENOMEM; + } + } + + return 0; +} +machine_arch_initcall(pseries, vpa_debugfs_init); +#endif /* CONFIG_DEBUG_FS */ diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c index f0e30dc949888a578ecfd6939c65b1eecc210604..e4ea713833832273bbc3e1b0a5fc9b13c214bb6b 100644 --- a/arch/powerpc/platforms/pseries/mobility.c +++ b/arch/powerpc/platforms/pseries/mobility.c @@ -9,8 +9,10 @@ * 2 as published by the Free Software Foundation. */ +#include #include #include +#include #include #include #include @@ -208,7 +210,11 @@ static int update_dt_node(__be32 phandle, s32 scope) prop_data += vd; } + + cond_resched(); } + + cond_resched(); } while (rtas_rc == 1); of_node_put(dn); @@ -317,8 +323,12 @@ int pseries_devicetree_update(s32 scope) add_dt_node(phandle, drc_index); break; } + + cond_resched(); } } + + cond_resched(); } while (rc == 1); kfree(rtas_buf); @@ -344,11 +354,19 @@ void post_mobility_fixup(void) if (rc) printk(KERN_ERR "Post-mobility activate-fw failed: %d\n", rc); + /* + * We don't want CPUs to go online/offline while the device + * tree is being updated. + */ + cpus_read_lock(); + rc = pseries_devicetree_update(MIGRATION_SCOPE); if (rc) printk(KERN_ERR "Post-mobility device tree update " "failed: %d\n", rc); + cpus_read_unlock(); + /* Possibly switch to a new RFI flush type */ pseries_setup_rfi_flush(); diff --git a/arch/powerpc/platforms/pseries/pseries_energy.c b/arch/powerpc/platforms/pseries/pseries_energy.c index 6ed22127391b6d0a7789bb363476452bf0991a65..921f12182f3e01a850372fd51b0a88a5bede296c 100644 --- a/arch/powerpc/platforms/pseries/pseries_energy.c +++ b/arch/powerpc/platforms/pseries/pseries_energy.c @@ -77,18 +77,27 @@ static u32 cpu_to_drc_index(int cpu) ret = drc.drc_index_start + (thread_index * drc.sequential_inc); } else { - const __be32 *indexes; - - indexes = of_get_property(dn, "ibm,drc-indexes", NULL); - if (indexes == NULL) - goto err_of_node_put; + u32 nr_drc_indexes, thread_drc_index; /* - * The first element indexes[0] is the number of drc_indexes - * returned in the list. Hence thread_index+1 will get the - * drc_index corresponding to core number thread_index. + * The first element of ibm,drc-indexes array is the + * number of drc_indexes returned in the list. Hence + * thread_index+1 will get the drc_index corresponding + * to core number thread_index. */ - ret = indexes[thread_index + 1]; + rc = of_property_read_u32_index(dn, "ibm,drc-indexes", + 0, &nr_drc_indexes); + if (rc) + goto err_of_node_put; + + WARN_ON_ONCE(thread_index > nr_drc_indexes); + rc = of_property_read_u32_index(dn, "ibm,drc-indexes", + thread_index + 1, + &thread_drc_index); + if (rc) + goto err_of_node_put; + + ret = thread_drc_index; } rc = 0; diff --git a/arch/powerpc/platforms/pseries/scanlog.c b/arch/powerpc/platforms/pseries/scanlog.c index 054ce7a16fc336b42b1c05243be5a45a68b91d5a..24b157e1e89020de3c881187bbc3b96ea643b9dd 100644 --- a/arch/powerpc/platforms/pseries/scanlog.c +++ b/arch/powerpc/platforms/pseries/scanlog.c @@ -63,7 +63,7 @@ static ssize_t scanlog_read(struct file *file, char __user *buf, return -EINVAL; } - if (!access_ok(VERIFY_WRITE, buf, count)) + if (!access_ok(buf, count)) return -EFAULT; for (;;) { diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index ba1791fd3234dbbfd5fac997ea1433119d44f92d..35ec6ed65bd1b87e8faa909fdfa7aac0d6000736 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -75,6 +75,9 @@ #include "pseries.h" #include "../../../../drivers/pci/pci.h" +DEFINE_STATIC_KEY_FALSE(shared_processor); +EXPORT_SYMBOL_GPL(shared_processor); + int CMO_PrPSP = -1; int CMO_SecPSP = -1; unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K); @@ -306,8 +309,8 @@ static inline int alloc_dispatch_logs(void) static int alloc_dispatch_log_kmem_cache(void) { - dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES, - DISPATCH_LOG_BYTES, 0, NULL); + dtl_cache = kmem_cache_create_usercopy("dtl", DISPATCH_LOG_BYTES, + DISPATCH_LOG_BYTES, 0, 0, DISPATCH_LOG_BYTES, NULL); if (!dtl_cache) { pr_warn("Failed to create dispatch trace log buffer cache\n"); pr_warn("Stolen time statistics will be unreliable\n"); @@ -325,6 +328,9 @@ static void pseries_lpar_idle(void) * low power mode by ceding processor to hypervisor */ + if (!prep_irq_for_idle()) + return; + /* Indicate to hypervisor that we are idle. */ get_lppaca()->idle = 1; @@ -559,6 +565,14 @@ void pseries_setup_rfi_flush(void) setup_rfi_flush(types, enable); setup_count_cache_flush(); + + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && + security_ftr_enabled(SEC_FTR_L1D_FLUSH_ENTRY); + setup_entry_flush(enable); + + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && + security_ftr_enabled(SEC_FTR_L1D_FLUSH_UACCESS); + setup_uaccess_flush(enable); } #ifdef CONFIG_PCI_IOV @@ -758,6 +772,10 @@ static void __init pSeries_setup_arch(void) if (firmware_has_feature(FW_FEATURE_LPAR)) { vpa_init(boot_cpuid); + + if (lppaca_shared_proc(get_lppaca())) + static_branch_enable(&shared_processor); + ppc_md.power_save = pseries_lpar_idle; ppc_md.enable_pmcs = pseries_lpar_enable_pmcs; #ifdef CONFIG_PCI_IOV diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c index 37bfbc54aacb438e4fe111968977bfd320014c2e..340de58a15bd69c6ca9ece2a8010fcf14095c3e8 100644 --- a/arch/powerpc/sysdev/xics/icp-native.c +++ b/arch/powerpc/sysdev/xics/icp-native.c @@ -145,7 +145,7 @@ static unsigned int icp_native_get_irq(void) static void icp_native_cause_ipi(int cpu) { - kvmppc_set_host_ipi(cpu, 1); + kvmppc_set_host_ipi(cpu); icp_native_set_qirr(cpu, IPI_PRIORITY); } @@ -184,7 +184,7 @@ void icp_native_flush_interrupt(void) if (vec == XICS_IPI) { /* Clear pending IPI */ int cpu = smp_processor_id(); - kvmppc_set_host_ipi(cpu, 0); + kvmppc_clear_host_ipi(cpu); icp_native_set_qirr(cpu, 0xff); } else { pr_err("XICS: hw interrupt 0x%x to offline cpu, disabling\n", @@ -205,7 +205,7 @@ static irqreturn_t icp_native_ipi_action(int irq, void *dev_id) { int cpu = smp_processor_id(); - kvmppc_set_host_ipi(cpu, 0); + kvmppc_clear_host_ipi(cpu); icp_native_set_qirr(cpu, 0xff); return smp_ipi_demux(); diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c index c71d2ea42627ad6f4f95854d3a41ab0e38036a21..e3e52cf035a92a3e18d729a7c8f83d4c63ae264c 100644 --- a/arch/powerpc/sysdev/xics/icp-opal.c +++ b/arch/powerpc/sysdev/xics/icp-opal.c @@ -130,7 +130,7 @@ static void icp_opal_cause_ipi(int cpu) { int hw_cpu = get_hard_smp_processor_id(cpu); - kvmppc_set_host_ipi(cpu, 1); + kvmppc_set_host_ipi(cpu); opal_int_set_mfrr(hw_cpu, IPI_PRIORITY); } @@ -138,7 +138,7 @@ static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id) { int cpu = smp_processor_id(); - kvmppc_set_host_ipi(cpu, 0); + kvmppc_clear_host_ipi(cpu); opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff); return smp_ipi_demux(); @@ -161,7 +161,7 @@ void icp_opal_flush_interrupt(void) if (vec == XICS_IPI) { /* Clear pending IPI */ int cpu = smp_processor_id(); - kvmppc_set_host_ipi(cpu, 0); + kvmppc_clear_host_ipi(cpu); opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff); } else { pr_err("XICS: hw interrupt 0x%x to offline cpu, " diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index 959a2a62f23329775beba45473e944395005192a..3c939b9de488ec21c60ee9d32ef2eb6a16a5ccd8 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c @@ -483,7 +483,7 @@ static int xive_find_target_in_mask(const struct cpumask *mask, * Now go through the entire mask until we find a valid * target. */ - for (;;) { + do { /* * We re-check online as the fallback case passes us * an untested affinity mask @@ -491,12 +491,11 @@ static int xive_find_target_in_mask(const struct cpumask *mask, if (cpu_online(cpu) && xive_try_pick_target(cpu)) return cpu; cpu = cpumask_next(cpu, mask); - if (cpu == first) - break; /* Wrap around */ if (cpu >= nr_cpu_ids) cpu = cpumask_first(mask); - } + } while (cpu != first); + return -1; } @@ -969,6 +968,15 @@ static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw) xd->target = XIVE_INVALID_TARGET; irq_set_handler_data(virq, xd); + /* + * Turn OFF by default the interrupt being mapped. A side + * effect of this check is the mapping the ESB page of the + * interrupt in the Linux address space. This prevents page + * fault issues in the crash handler which masks all + * interrupts. + */ + xive_esb_read(xd, XIVE_ESB_SET_PQ_01); + return 0; } @@ -1010,12 +1018,13 @@ static void xive_ipi_eoi(struct irq_data *d) { struct xive_cpu *xc = __this_cpu_read(xive_cpu); - DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n", - d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio); - /* Handle possible race with unplug and drop stale IPIs */ if (!xc) return; + + DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n", + d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio); + xive_do_source_eoi(xc->hw_ipi, &xc->ipi_data); xive_do_queue_eoi(xc); } diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c index 5b20a678d755b6f3e2bdb29a0bec9132521e70d4..6d5b2802245285a700924258dea315b7c3629a44 100644 --- a/arch/powerpc/sysdev/xive/native.c +++ b/arch/powerpc/sysdev/xive/native.c @@ -235,6 +235,17 @@ static bool xive_native_match(struct device_node *node) return of_device_is_compatible(node, "ibm,opal-xive-vc"); } +static s64 opal_xive_allocate_irq(u32 chip_id) +{ + s64 irq = opal_xive_allocate_irq_raw(chip_id); + + /* + * Old versions of skiboot can incorrectly return 0xffffffff to + * indicate no space, fix it up here. + */ + return irq == 0xffffffff ? OPAL_RESOURCE : irq; +} + #ifdef CONFIG_SMP static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc) { diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c index 575db3b06a6b84acdcb7625dc3b1e2eaa688e882..e3ebf64693929f64da6248495a0297e980bcc1c6 100644 --- a/arch/powerpc/sysdev/xive/spapr.c +++ b/arch/powerpc/sysdev/xive/spapr.c @@ -359,20 +359,28 @@ static int xive_spapr_populate_irq_data(u32 hw_irq, struct xive_irq_data *data) data->esb_shift = esb_shift; data->trig_page = trig_page; + data->hw_irq = hw_irq; + /* * No chip-id for the sPAPR backend. This has an impact how we * pick a target. See xive_pick_irq_target(). */ data->src_chip = XIVE_INVALID_CHIP_ID; + /* + * When the H_INT_ESB flag is set, the H_INT_ESB hcall should + * be used for interrupt management. Skip the remapping of the + * ESB pages which are not available. + */ + if (data->flags & XIVE_IRQ_FLAG_H_INT_ESB) + return 0; + data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift); if (!data->eoi_mmio) { pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq); return -ENOMEM; } - data->hw_irq = hw_irq; - /* Full function page supports trigger */ if (flags & XIVE_SRC_TRIGGER) { data->trig_mmio = data->eoi_mmio; diff --git a/arch/powerpc/tools/relocs_check.sh b/arch/powerpc/tools/relocs_check.sh index ec2d5c835170a6753a588d4ba6a16d2c51412d3c..d6c16e7faa3871b989a162f7d52b55b1727a5bbe 100755 --- a/arch/powerpc/tools/relocs_check.sh +++ b/arch/powerpc/tools/relocs_check.sh @@ -23,7 +23,7 @@ objdump="$1" vmlinux="$2" bad_relocs=$( -"$objdump" -R "$vmlinux" | +$objdump -R "$vmlinux" | # Only look at relocation lines. grep -E '\:' | awk '{print $1}' ) BRANCHES=$( -"$objdump" -R "$vmlinux" -D --start-address=0xc000000000000000 \ +$objdump -R "$vmlinux" -D --start-address=0xc000000000000000 \ --stop-address=${end_intr} | grep -e "^c[0-9a-f]*:[[:space:]]*\([0-9a-f][0-9a-f][[:space:]]\)\{4\}[[:space:]]*b" | grep -v '\<__start_initialization_multiplatform>' | diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile index 1bc3abb237cda0a4125cc7738af4ddfcd26a9d39..365e711bebabbb4b8209fffadcaf6ce7fb63d4af 100644 --- a/arch/powerpc/xmon/Makefile +++ b/arch/powerpc/xmon/Makefile @@ -1,14 +1,23 @@ # SPDX-License-Identifier: GPL-2.0 # Makefile for xmon -subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror +# Avoid clang warnings around longjmp/setjmp declarations +subdir-ccflags-y := -ffreestanding + +subdir-ccflags-$(CONFIG_PPC_WERROR) += -Werror GCOV_PROFILE := n UBSAN_SANITIZE := n # Disable ftrace for the entire directory ORIG_CFLAGS := $(KBUILD_CFLAGS) -KBUILD_CFLAGS = $(subst -mno-sched-epilog,,$(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS))) +KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS)) + +ifdef CONFIG_CC_IS_CLANG +# clang stores addresses on the stack causing the frame size to blow +# out. See https://github.com/ClangBuiltLinux/linux/issues/252 +KBUILD_CFLAGS += -Wframe-larger-than=4096 +endif ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) diff --git a/arch/powerpc/xmon/ppc-dis.c b/arch/powerpc/xmon/ppc-dis.c index 9deea5ee13f652cd6c8a8b7f1f7d58a9229f31ae..27f1e64150360e1cb030d2a3840e835a91da6219 100644 --- a/arch/powerpc/xmon/ppc-dis.c +++ b/arch/powerpc/xmon/ppc-dis.c @@ -158,7 +158,7 @@ int print_insn_powerpc (unsigned long insn, unsigned long memaddr) dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_POWER7 | PPC_OPCODE_POWER8 | PPC_OPCODE_POWER9 | PPC_OPCODE_HTM | PPC_OPCODE_ALTIVEC | PPC_OPCODE_ALTIVEC2 - | PPC_OPCODE_VSX | PPC_OPCODE_VSX3), + | PPC_OPCODE_VSX | PPC_OPCODE_VSX3); /* Get the major opcode of the insn. */ opcode = NULL; diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 4264aedc7775a70b4df3af92378fb7c23b3712e8..f0fa22e7d36c78b0011e4a7932266a9609f825bb 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -75,6 +75,9 @@ static int xmon_gate; #define xmon_owner 0 #endif /* CONFIG_SMP */ +#ifdef CONFIG_PPC_PSERIES +static int set_indicator_token = RTAS_UNKNOWN_SERVICE; +#endif static unsigned long in_xmon __read_mostly = 0; static int xmon_on = IS_ENABLED(CONFIG_XMON_DEFAULT); @@ -358,7 +361,6 @@ static inline void disable_surveillance(void) #ifdef CONFIG_PPC_PSERIES /* Since this can't be a module, args should end up below 4GB. */ static struct rtas_args args; - int token; /* * At this point we have got all the cpus we can into @@ -367,11 +369,11 @@ static inline void disable_surveillance(void) * If we did try to take rtas.lock there would be a * real possibility of deadlock. */ - token = rtas_token("set-indicator"); - if (token == RTAS_UNKNOWN_SERVICE) + if (set_indicator_token == RTAS_UNKNOWN_SERVICE) return; - rtas_call_unlocked(&args, token, 3, 1, NULL, SURVEILLANCE_TOKEN, 0, 0); + rtas_call_unlocked(&args, set_indicator_token, 3, 1, NULL, + SURVEILLANCE_TOKEN, 0, 0); #endif /* CONFIG_PPC_PSERIES */ } @@ -464,8 +466,10 @@ static int xmon_core(struct pt_regs *regs, int fromipi) local_irq_save(flags); hard_irq_disable(); - tracing_enabled = tracing_is_on(); - tracing_off(); + if (!fromipi) { + tracing_enabled = tracing_is_on(); + tracing_off(); + } bp = in_breakpoint_table(regs->nip, &offset); if (bp != NULL) { @@ -2493,13 +2497,16 @@ static void dump_pacas(void) static void dump_one_xive(int cpu) { unsigned int hwid = get_hard_smp_processor_id(cpu); + bool hv = cpu_has_feature(CPU_FTR_HVMODE); - opal_xive_dump(XIVE_DUMP_TM_HYP, hwid); - opal_xive_dump(XIVE_DUMP_TM_POOL, hwid); - opal_xive_dump(XIVE_DUMP_TM_OS, hwid); - opal_xive_dump(XIVE_DUMP_TM_USER, hwid); - opal_xive_dump(XIVE_DUMP_VP, hwid); - opal_xive_dump(XIVE_DUMP_EMU_STATE, hwid); + if (hv) { + opal_xive_dump(XIVE_DUMP_TM_HYP, hwid); + opal_xive_dump(XIVE_DUMP_TM_POOL, hwid); + opal_xive_dump(XIVE_DUMP_TM_OS, hwid); + opal_xive_dump(XIVE_DUMP_TM_USER, hwid); + opal_xive_dump(XIVE_DUMP_VP, hwid); + opal_xive_dump(XIVE_DUMP_EMU_STATE, hwid); + } if (setjmp(bus_error_jmp) != 0) { catch_memory_errors = 0; @@ -3486,7 +3493,7 @@ void dump_segments(void) printf("sr0-15 ="); for (i = 0; i < 16; ++i) - printf(" %x", mfsrin(i)); + printf(" %x", mfsrin(i << 28)); printf("\n"); } #endif @@ -3672,6 +3679,14 @@ static void xmon_init(int enable) __debugger_iabr_match = xmon_iabr_match; __debugger_break_match = xmon_break_match; __debugger_fault_handler = xmon_fault_handler; + +#ifdef CONFIG_PPC_PSERIES + /* + * Get the token here to avoid trying to get a lock + * during the crash, causing a deadlock. + */ + set_indicator_token = rtas_token("set-indicator"); +#endif } else { __debugger = NULL; __debugger_ipi = NULL; diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index a344980287a519eec038d553c8ae4d0f82add8e3..1efa59f493cca4ef56d390f6b9657b921a826fb3 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -11,6 +11,7 @@ config 32BIT config RISCV def_bool y + select ARCH_32BIT_OFF_T if !64BIT # even on 32-bit, physical (and DMA) addresses are > 32-bits select PHYS_ADDR_T_64BIT select OF diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index 61ec42405ec9630082beb941cb58a963127c13ad..110be14e612261677ff2451648829d60c09006b8 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -82,4 +82,8 @@ core-y += arch/riscv/kernel/ arch/riscv/mm/ libs-y += arch/riscv/lib/ +PHONY += vdso_install +vdso_install: + $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@ + all: vmlinux diff --git a/arch/riscv/include/asm/module.h b/arch/riscv/include/asm/module.h index 349df33808c4231d155d2e6018e19cc4cb19cb4d..cd2af4b013e3826e3b43f44565a9b6c1c6ae7b70 100644 --- a/arch/riscv/include/asm/module.h +++ b/arch/riscv/include/asm/module.h @@ -8,6 +8,7 @@ #define MODULE_ARCH_VERMAGIC "riscv" +struct module; u64 module_emit_got_entry(struct module *mod, u64 val); u64 module_emit_plt_entry(struct module *mod, u64 val); diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h index 2fa2942be221e480088ab1a5681c9240ef3a1e23..470755cb75584ee6e6b8c607070b2e89669ce816 100644 --- a/arch/riscv/include/asm/pgtable-bits.h +++ b/arch/riscv/include/asm/pgtable-bits.h @@ -35,6 +35,12 @@ #define _PAGE_SPECIAL _PAGE_SOFT #define _PAGE_TABLE _PAGE_PRESENT +/* + * _PAGE_PROT_NONE is set on not-present pages (and ignored by the hardware) to + * distinguish them from swapped out pages + */ +#define _PAGE_PROT_NONE _PAGE_READ + #define _PAGE_PFN_SHIFT 10 /* Set of bits to preserve across pte_modify() */ diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 16301966d65b6fd8a54614d12f0815866d19d948..a8179a8c1491c28a6ad3eeedda34a122bc00a2c3 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -44,7 +44,7 @@ /* Page protection bits */ #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER) -#define PAGE_NONE __pgprot(0) +#define PAGE_NONE __pgprot(_PAGE_PROT_NONE) #define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ) #define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE) #define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC) @@ -98,7 +98,7 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; static inline int pmd_present(pmd_t pmd) { - return (pmd_val(pmd) & _PAGE_PRESENT); + return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); } static inline int pmd_none(pmd_t pmd) @@ -178,7 +178,7 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr) static inline int pte_present(pte_t pte) { - return (pte_val(pte) & _PAGE_PRESENT); + return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); } static inline int pte_none(pte_t pte) @@ -380,7 +380,7 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma, * * Format of swap PTE: * bit 0: _PAGE_PRESENT (zero) - * bit 1: reserved for future use (zero) + * bit 1: _PAGE_PROT_NONE (zero) * bits 2 to 6: swap type * bits 7 to XLEN-1: swap offset */ diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h index 3fe4af8147d2d6bc692f9ad32f2015e77c01a4bd..c23578a37b44317d1705c7ee1ec37cedc00a1e10 100644 --- a/arch/riscv/include/asm/processor.h +++ b/arch/riscv/include/asm/processor.h @@ -22,7 +22,7 @@ * This decides where the kernel will search for a free chunk of vm * space during mmap's. */ -#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE >> 1) +#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3) #define STACK_TOP TASK_SIZE #define STACK_TOP_MAX STACK_TOP diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h index dd6b05bff75b6fd41b3fd12239ace6edcf022bad..d911a8c2314d209871c8cb29ac9f1a81d4236f53 100644 --- a/arch/riscv/include/asm/switch_to.h +++ b/arch/riscv/include/asm/switch_to.h @@ -23,7 +23,7 @@ extern void __fstate_restore(struct task_struct *restore_from); static inline void __fstate_clean(struct pt_regs *regs) { - regs->sstatus |= (regs->sstatus & ~(SR_FS)) | SR_FS_CLEAN; + regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_CLEAN; } static inline void fstate_save(struct task_struct *task, diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h index 8d25f8904c008314beaf790b190ec41fd987e8be..1dcde0fda435695d30d713fdf2d00c6990acd3e8 100644 --- a/arch/riscv/include/asm/syscall.h +++ b/arch/riscv/include/asm/syscall.h @@ -78,10 +78,11 @@ static inline void syscall_get_arguments(struct task_struct *task, if (i == 0) { args[0] = regs->orig_a0; args++; - i++; n--; + } else { + i--; } - memcpy(args, ®s->a1 + i * sizeof(regs->a1), n * sizeof(args[0])); + memcpy(args, ®s->a1 + i, n * sizeof(args[0])); } static inline void syscall_set_arguments(struct task_struct *task, @@ -93,10 +94,11 @@ static inline void syscall_set_arguments(struct task_struct *task, if (i == 0) { regs->orig_a0 = args[0]; args++; - i++; n--; - } - memcpy(®s->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0)); + } else { + i--; + } + memcpy(®s->a1 + i, args, n * sizeof(regs->a1)); } #endif /* _ASM_RISCV_SYSCALL_H */ diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h index 473cfc84e412f3827703caaadffa34a8983c978d..aa82df30e38a63cb02989d702360b7b84be14a99 100644 --- a/arch/riscv/include/asm/uaccess.h +++ b/arch/riscv/include/asm/uaccess.h @@ -54,14 +54,8 @@ static inline void set_fs(mm_segment_t fs) #define user_addr_max() (get_fs()) -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - /** * access_ok: - Checks if a user space pointer is valid - * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that - * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe - * to write to a block, it is always safe to read from it. * @addr: User space pointer to start of block to check * @size: Size of block to check * @@ -76,7 +70,7 @@ static inline void set_fs(mm_segment_t fs) * checks that the pointer is in the user space range - after calling * this function, memory access functions may still return -EFAULT. */ -#define access_ok(type, addr, size) ({ \ +#define access_ok(addr, size) ({ \ __chk_user_ptr(addr); \ likely(__access_ok((unsigned long __force)(addr), (size))); \ }) @@ -258,7 +252,7 @@ do { \ ({ \ const __typeof__(*(ptr)) __user *__p = (ptr); \ might_fault(); \ - access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \ + access_ok(__p, sizeof(*__p)) ? \ __get_user((x), __p) : \ ((x) = 0, -EFAULT); \ }) @@ -307,7 +301,7 @@ do { \ " .balign 4\n" \ "4:\n" \ " li %0, %6\n" \ - " jump 2b, %1\n" \ + " jump 3b, %1\n" \ " .previous\n" \ " .section __ex_table,\"a\"\n" \ " .balign " RISCV_SZPTR "\n" \ @@ -386,7 +380,7 @@ do { \ ({ \ __typeof__(*(ptr)) __user *__p = (ptr); \ might_fault(); \ - access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \ + access_ok(__p, sizeof(*__p)) ? \ __put_user((x), __p) : \ -EFAULT; \ }) @@ -400,13 +394,13 @@ extern unsigned long __must_check __asm_copy_from_user(void *to, static inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) { - return __asm_copy_to_user(to, from, n); + return __asm_copy_from_user(to, from, n); } static inline unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n) { - return __asm_copy_from_user(to, from, n); + return __asm_copy_to_user(to, from, n); } extern long strncpy_from_user(char *dest, const char __user *src, long count); @@ -421,7 +415,7 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) { might_fault(); - return access_ok(VERIFY_WRITE, to, n) ? + return access_ok(to, n) ? __clear_user(to, n) : n; } diff --git a/arch/riscv/include/asm/unistd.h b/arch/riscv/include/asm/unistd.h index 0caea01d5ccabf58de5e7199c77ae38d650f13c0..1b164c6cbba1d8c6e1ad2f00ee7bd322cc359150 100644 --- a/arch/riscv/include/asm/unistd.h +++ b/arch/riscv/include/asm/unistd.h @@ -17,5 +17,6 @@ */ #define __ARCH_WANT_SYS_CLONE +#define __ARCH_WANT_SET_GET_RLIMIT #include #include diff --git a/arch/riscv/include/asm/vmalloc.h b/arch/riscv/include/asm/vmalloc.h new file mode 100644 index 0000000000000000000000000000000000000000..ff9abc00d1394f58933cb4f90d9b773c795b0f0d --- /dev/null +++ b/arch/riscv/include/asm/vmalloc.h @@ -0,0 +1,4 @@ +#ifndef _ASM_RISCV_VMALLOC_H +#define _ASM_RISCV_VMALLOC_H + +#endif /* _ASM_RISCV_VMALLOC_H */ diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index fa2c08e3c05e6ee74ea0258b62cdf6580f98a385..a03821b2656aa80d8879ae614ce67ae47b40ad04 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -171,9 +171,13 @@ ENTRY(handle_exception) move a1, s4 /* scause */ tail do_IRQ 1: - /* Exceptions run with interrupts enabled */ + /* Exceptions run with interrupts enabled or disabled + depending on the state of sstatus.SR_SPIE */ + andi t0, s1, SR_SPIE + beqz t0, 1f csrs sstatus, SR_SIE +1: /* Handle syscalls */ li t0, EXC_SYSCALL beq s4, t0, handle_syscall diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c index 1157b6b52d259fa79edf0d44f589022174aff18c..6d39f64e4dce4eefbe060dce93e1c47f563c113c 100644 --- a/arch/riscv/kernel/ftrace.c +++ b/arch/riscv/kernel/ftrace.c @@ -132,8 +132,6 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, { unsigned long return_hooker = (unsigned long)&return_to_handler; unsigned long old; - struct ftrace_graph_ent trace; - int err; if (unlikely(atomic_read(¤t->tracing_graph_pause))) return; @@ -144,17 +142,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, */ old = *parent; - trace.func = self_addr; - trace.depth = current->curr_ret_stack + 1; - - if (!ftrace_graph_entry(&trace)) - return; - - err = ftrace_push_return_trace(old, self_addr, &trace.depth, - frame_pointer, parent); - if (err == -EBUSY) - return; - *parent = return_hooker; + if (!function_graph_enter(old, self_addr, frame_pointer, parent)) + *parent = return_hooker; } #ifdef CONFIG_DYNAMIC_FTRACE diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c index 3303ed2cd4193f82c51730a992d6c875b361ff80..7dd308129b40f1862ab04dc1e12c790bf7c111fe 100644 --- a/arch/riscv/kernel/module.c +++ b/arch/riscv/kernel/module.c @@ -21,7 +21,7 @@ static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v) { if (v != (u32)v) { pr_err("%s: value %016llx out of range for 32-bit field\n", - me->name, v); + me->name, (long long)v); return -EINVAL; } *location = v; @@ -102,7 +102,7 @@ static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location, if (offset != (s32)offset) { pr_err( "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", - me->name, v, location); + me->name, (long long)v, location); return -EINVAL; } @@ -144,7 +144,7 @@ static int apply_r_riscv_hi20_rela(struct module *me, u32 *location, if (IS_ENABLED(CMODEL_MEDLOW)) { pr_err( "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", - me->name, v, location); + me->name, (long long)v, location); return -EINVAL; } @@ -188,7 +188,7 @@ static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location, } else { pr_err( "%s: can not generate the GOT entry for symbol = %016llx from PC = %p\n", - me->name, v, location); + me->name, (long long)v, location); return -EINVAL; } @@ -212,7 +212,7 @@ static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location, } else { pr_err( "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", - me->name, v, location); + me->name, (long long)v, location); return -EINVAL; } } @@ -234,7 +234,7 @@ static int apply_r_riscv_call_rela(struct module *me, u32 *location, if (offset != fill_v) { pr_err( "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", - me->name, v, location); + me->name, (long long)v, location); return -EINVAL; } diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c index 9f82a7e34c648a370ec42f2e0bad711058e9baf2..9db7d0076375a9381bb5fd5a70a0029c51fdbeee 100644 --- a/arch/riscv/kernel/ptrace.c +++ b/arch/riscv/kernel/ptrace.c @@ -120,6 +120,6 @@ void do_syscall_trace_exit(struct pt_regs *regs) #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) - trace_sys_exit(regs, regs->regs[0]); + trace_sys_exit(regs, regs_return_value(regs)); #endif } diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index b2d26d9d8489c8e8b6bba01adee0c573fd6564f8..9713d4e8c22bbe15ca33cb81c1a1b7f78d19adcb 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -186,7 +186,7 @@ static void __init setup_bootmem(void) BUG_ON(mem_size == 0); set_max_mapnr(PFN_DOWN(mem_size)); - max_low_pfn = memblock_end_of_DRAM(); + max_low_pfn = PFN_DOWN(memblock_end_of_DRAM()); #ifdef CONFIG_BLK_DEV_INITRD setup_initrd(); diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c index 718d0c984ef094dc5b3c02f8e0291910159ebc3e..587fa414a7fb9bca066db6b2c114f4ebdabd981a 100644 --- a/arch/riscv/kernel/signal.c +++ b/arch/riscv/kernel/signal.c @@ -91,7 +91,7 @@ SYSCALL_DEFINE0(rt_sigreturn) frame = (struct rt_sigframe __user *)regs->sp; - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) @@ -166,7 +166,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, long err = 0; frame = get_sigframe(ksig, regs, sizeof(*frame)); - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) return -EFAULT; err |= copy_siginfo_to_user(&frame->info, &ksig->info); diff --git a/arch/riscv/lib/delay.c b/arch/riscv/lib/delay.c index dce8ae24c6d33b4ca3ebd0c19c3650f1ce890bde..ee6853c1e341c5455c9440cdb1cc585351b5541f 100644 --- a/arch/riscv/lib/delay.c +++ b/arch/riscv/lib/delay.c @@ -88,7 +88,7 @@ EXPORT_SYMBOL(__delay); void udelay(unsigned long usecs) { - unsigned long ucycles = usecs * lpj_fine * UDELAY_MULT; + u64 ucycles = (u64)usecs * lpj_fine * UDELAY_MULT; if (unlikely(usecs > MAX_UDELAY_US)) { __delay((u64)usecs * riscv_timebase / 1000000ULL); diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 88401d5125bcc0b354833eb5205b7b995f033752..de529e7a8aee160b29745a441d91161bbf87bf76 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -29,6 +29,7 @@ #include #include +#include /* * This routine handles page faults. It determines the address and the @@ -154,11 +155,6 @@ asmlinkage void do_page_fault(struct pt_regs *regs) 1, regs, addr); } if (fault & VM_FAULT_RETRY) { - /* - * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk - * of starvation. - */ - flags &= ~(FAULT_FLAG_ALLOW_RETRY); flags |= FAULT_FLAG_TRIED; /* @@ -281,6 +277,18 @@ asmlinkage void do_page_fault(struct pt_regs *regs) pte_k = pte_offset_kernel(pmd_k, addr); if (!pte_present(*pte_k)) goto no_context; + + /* + * The kernel assumes that TLBs don't cache invalid + * entries, but in RISC-V, SFENCE.VMA specifies an + * ordering constraint, not a cache flush; it is + * necessary even after writing invalid entries. + * Relying on flush_tlb_fix_spurious_fault would + * suffice, but the extra traps reduce + * performance. So, eagerly SFENCE.VMA. + */ + local_flush_tlb_page(addr); + return; } } diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 58a522f9bcc319ae5d40a8ae15da5d9021921ebd..200a4b315e15a0f631eb24b1384e5bec0810401e 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -29,7 +29,8 @@ static void __init zone_sizes_init(void) unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, }; #ifdef CONFIG_ZONE_DMA32 - max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, max_low_pfn)); + max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, + (unsigned long) PFN_PHYS(max_low_pfn))); #endif max_zone_pfns[ZONE_NORMAL] = max_low_pfn; diff --git a/arch/riscv/mm/ioremap.c b/arch/riscv/mm/ioremap.c index 70ef2724cdf61e5b2001f0ec6243b7f5e9c6bfaa..bd2f2db557cc54f182794284bb0d9d317cb2b36a 100644 --- a/arch/riscv/mm/ioremap.c +++ b/arch/riscv/mm/ioremap.c @@ -42,7 +42,7 @@ static void __iomem *__ioremap_caller(phys_addr_t addr, size_t size, /* Page-align mappings */ offset = addr & (~PAGE_MASK); - addr &= PAGE_MASK; + addr -= offset; size = PAGE_ALIGN(size + offset); area = get_vm_area_caller(size, VM_IOREMAP, caller); diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 9a9c7a6fe925915f561dd48c454e1e02d5ee7a51..18fb5ece51de88034fef5d19bff7ec4c50b8bea9 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -107,7 +107,7 @@ config S390 select ARCH_USE_CMPXCHG_LOCKREF select ARCH_WANTS_DYNAMIC_TASK_STRUCT select ARCH_WANT_IPC_PARSE_VERSION - select BUILDTIME_EXTABLE_SORT + select BUILDTIME_TABLE_SORT select CLONE_BACKWARDS2 select DYNAMIC_FTRACE if FUNCTION_TRACER select GENERIC_CLOCKEVENTS @@ -151,7 +151,7 @@ config S390 select HAVE_KPROBES select HAVE_KRETPROBES select HAVE_KVM - select HAVE_LIVEPATCH + select HAVE_LIVEPATCH_FTRACE select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP select HAVE_MEMBLOCK diff --git a/arch/s390/Makefile b/arch/s390/Makefile index ee65185bbc807284b8729b5c9d52c129fafba30e..e6c2e8925fefa3dc5e45322438d8a38cb21dbc7a 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -24,6 +24,7 @@ KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float KBUILD_CFLAGS_DECOMPRESSOR += -fno-asynchronous-unwind-tables KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-option,-ffreestanding) +KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, address-of-packed-member) KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g) KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,)) UTS_MACHINE := s390x diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile index 9e6668ee93de83122fbfc1c3ade9ab9b0519d87c..f6a9b0c2035530fef55f9cf579d63a396c4598a9 100644 --- a/arch/s390/boot/Makefile +++ b/arch/s390/boot/Makefile @@ -6,6 +6,7 @@ KCOV_INSTRUMENT := n GCOV_PROFILE := n UBSAN_SANITIZE := n +KASAN_SANITIZE := n KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR) KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR) diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile index 04609478d18b99303909a01d7b313df8c799873b..9b3d821e5b46ecd56d66e80ef2006ad879695afb 100644 --- a/arch/s390/boot/compressed/Makefile +++ b/arch/s390/boot/compressed/Makefile @@ -8,6 +8,7 @@ KCOV_INSTRUMENT := n GCOV_PROFILE := n UBSAN_SANITIZE := n +KASAN_SANITIZE := n obj-y := $(if $(CONFIG_KERNEL_UNCOMPRESSED),,head.o misc.o) piggy.o targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 @@ -20,7 +21,7 @@ KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR) OBJECTS := $(addprefix $(obj)/,$(obj-y)) LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T -$(obj)/vmlinux: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS) +$(obj)/vmlinux: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS) FORCE $(call if_changed,ld) # extract required uncompressed vmlinux symbols and adjust them to reflect offsets inside vmlinux.bin @@ -51,17 +52,17 @@ suffix-$(CONFIG_KERNEL_LZMA) := .lzma suffix-$(CONFIG_KERNEL_LZO) := .lzo suffix-$(CONFIG_KERNEL_XZ) := .xz -$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) +$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) FORCE $(call if_changed,gzip) -$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) +$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) FORCE $(call if_changed,bzip2) -$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y) +$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y) FORCE $(call if_changed,lz4) -$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) +$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) FORCE $(call if_changed,lzma) -$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) +$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE $(call if_changed,lzo) -$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) +$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) FORCE $(call if_changed,xzkern) LDFLAGS_piggy.o := -r --format binary --oformat $(LD_BFD) -T diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index 941d8cc6c9f5990ffe5aa39aa16bd79ff70ed9fc..259d1698ac50a468021e17a6a2fbe93526f520f2 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -668,7 +668,6 @@ CONFIG_CRYPTO_USER=m # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set CONFIG_CRYPTO_PCRYPT=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_LRW=m diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig index eb6f75f242089b6f67115bbbd74c7e8a4f2fbeac..37fd60c20e22dec8cd8452baaf89135debccf735 100644 --- a/arch/s390/configs/performance_defconfig +++ b/arch/s390/configs/performance_defconfig @@ -610,7 +610,6 @@ CONFIG_CRYPTO_USER=m # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set CONFIG_CRYPTO_PCRYPT=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_LRW=m diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index c54cb26eb7f5049b047a3cd140c9069926a615b2..7e16cb5b5cc7af8401d3c9baa922360489ac39b2 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -27,14 +27,14 @@ #include #include #include -#include +#include #include #include #include #include static u8 *ctrblk; -static DEFINE_SPINLOCK(ctrblk_lock); +static DEFINE_MUTEX(ctrblk_lock); static cpacf_mask_t km_functions, kmc_functions, kmctr_functions, kma_functions; @@ -585,6 +585,9 @@ static int xts_aes_encrypt(struct blkcipher_desc *desc, struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; + if (!nbytes) + return -EINVAL; + if (unlikely(!xts_ctx->fc)) return xts_fallback_encrypt(desc, dst, src, nbytes); @@ -599,6 +602,9 @@ static int xts_aes_decrypt(struct blkcipher_desc *desc, struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; + if (!nbytes) + return -EINVAL; + if (unlikely(!xts_ctx->fc)) return xts_fallback_decrypt(desc, dst, src, nbytes); @@ -698,7 +704,7 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier, unsigned int n, nbytes; int ret, locked; - locked = spin_trylock(&ctrblk_lock); + locked = mutex_trylock(&ctrblk_lock); ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE); while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { @@ -716,13 +722,14 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier, ret = blkcipher_walk_done(desc, walk, nbytes - n); } if (locked) - spin_unlock(&ctrblk_lock); + mutex_unlock(&ctrblk_lock); /* * final block may be < AES_BLOCK_SIZE, copy only nbytes */ if (nbytes) { - cpacf_kmctr(sctx->fc | modifier, sctx->key, - buf, walk->src.virt.addr, + memset(buf, 0, AES_BLOCK_SIZE); + memcpy(buf, walk->src.virt.addr, nbytes); + cpacf_kmctr(sctx->fc | modifier, sctx->key, buf, buf, AES_BLOCK_SIZE, walk->iv); memcpy(walk->dst.virt.addr, buf, nbytes); crypto_inc(walk->iv, AES_BLOCK_SIZE); @@ -826,19 +833,45 @@ static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize) return 0; } -static void gcm_sg_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg, - unsigned int len) +static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg, + unsigned int len) { memset(gw, 0, sizeof(*gw)); gw->walk_bytes_remain = len; scatterwalk_start(&gw->walk, sg); } -static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) +static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw) +{ + struct scatterlist *nextsg; + + gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain); + while (!gw->walk_bytes) { + nextsg = sg_next(gw->walk.sg); + if (!nextsg) + return 0; + scatterwalk_start(&gw->walk, nextsg); + gw->walk_bytes = scatterwalk_clamp(&gw->walk, + gw->walk_bytes_remain); + } + gw->walk_ptr = scatterwalk_map(&gw->walk); + return gw->walk_bytes; +} + +static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw, + unsigned int nbytes) +{ + gw->walk_bytes_remain -= nbytes; + scatterwalk_unmap(&gw->walk); + scatterwalk_advance(&gw->walk, nbytes); + scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain); + gw->walk_ptr = NULL; +} + +static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) { int n; - /* minbytesneeded <= AES_BLOCK_SIZE */ if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) { gw->ptr = gw->buf; gw->nbytes = gw->buf_bytes; @@ -851,13 +884,11 @@ static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) goto out; } - gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain); - if (!gw->walk_bytes) { - scatterwalk_start(&gw->walk, sg_next(gw->walk.sg)); - gw->walk_bytes = scatterwalk_clamp(&gw->walk, - gw->walk_bytes_remain); + if (!_gcm_sg_clamp_and_map(gw)) { + gw->ptr = NULL; + gw->nbytes = 0; + goto out; } - gw->walk_ptr = scatterwalk_map(&gw->walk); if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) { gw->ptr = gw->walk_ptr; @@ -869,51 +900,90 @@ static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes); memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n); gw->buf_bytes += n; - gw->walk_bytes_remain -= n; - scatterwalk_unmap(&gw->walk); - scatterwalk_advance(&gw->walk, n); - scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain); - + _gcm_sg_unmap_and_advance(gw, n); if (gw->buf_bytes >= minbytesneeded) { gw->ptr = gw->buf; gw->nbytes = gw->buf_bytes; goto out; } - - gw->walk_bytes = scatterwalk_clamp(&gw->walk, - gw->walk_bytes_remain); - if (!gw->walk_bytes) { - scatterwalk_start(&gw->walk, sg_next(gw->walk.sg)); - gw->walk_bytes = scatterwalk_clamp(&gw->walk, - gw->walk_bytes_remain); + if (!_gcm_sg_clamp_and_map(gw)) { + gw->ptr = NULL; + gw->nbytes = 0; + goto out; } - gw->walk_ptr = scatterwalk_map(&gw->walk); } out: return gw->nbytes; } -static void gcm_sg_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone) +static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) { - int n; + if (gw->walk_bytes_remain == 0) { + gw->ptr = NULL; + gw->nbytes = 0; + goto out; + } + if (!_gcm_sg_clamp_and_map(gw)) { + gw->ptr = NULL; + gw->nbytes = 0; + goto out; + } + + if (gw->walk_bytes >= minbytesneeded) { + gw->ptr = gw->walk_ptr; + gw->nbytes = gw->walk_bytes; + goto out; + } + + scatterwalk_unmap(&gw->walk); + gw->walk_ptr = NULL; + + gw->ptr = gw->buf; + gw->nbytes = sizeof(gw->buf); + +out: + return gw->nbytes; +} + +static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone) +{ if (gw->ptr == NULL) - return; + return 0; if (gw->ptr == gw->buf) { - n = gw->buf_bytes - bytesdone; + int n = gw->buf_bytes - bytesdone; if (n > 0) { memmove(gw->buf, gw->buf + bytesdone, n); - gw->buf_bytes -= n; + gw->buf_bytes = n; } else gw->buf_bytes = 0; - } else { - gw->walk_bytes_remain -= bytesdone; - scatterwalk_unmap(&gw->walk); - scatterwalk_advance(&gw->walk, bytesdone); - scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain); - } + } else + _gcm_sg_unmap_and_advance(gw, bytesdone); + + return bytesdone; +} + +static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone) +{ + int i, n; + + if (gw->ptr == NULL) + return 0; + + if (gw->ptr == gw->buf) { + for (i = 0; i < bytesdone; i += n) { + if (!_gcm_sg_clamp_and_map(gw)) + return i; + n = min(gw->walk_bytes, bytesdone - i); + memcpy(gw->walk_ptr, gw->buf + i, n); + _gcm_sg_unmap_and_advance(gw, n); + } + } else + _gcm_sg_unmap_and_advance(gw, bytesdone); + + return bytesdone; } static int gcm_aes_crypt(struct aead_request *req, unsigned int flags) @@ -926,7 +996,7 @@ static int gcm_aes_crypt(struct aead_request *req, unsigned int flags) unsigned int pclen = req->cryptlen; int ret = 0; - unsigned int len, in_bytes, out_bytes, + unsigned int n, len, in_bytes, out_bytes, min_bytes, bytes, aad_bytes, pc_bytes; struct gcm_sg_walk gw_in, gw_out; u8 tag[GHASH_DIGEST_SIZE]; @@ -963,14 +1033,14 @@ static int gcm_aes_crypt(struct aead_request *req, unsigned int flags) *(u32 *)(param.j0 + ivsize) = 1; memcpy(param.k, ctx->key, ctx->key_len); - gcm_sg_walk_start(&gw_in, req->src, len); - gcm_sg_walk_start(&gw_out, req->dst, len); + gcm_walk_start(&gw_in, req->src, len); + gcm_walk_start(&gw_out, req->dst, len); do { min_bytes = min_t(unsigned int, aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE); - in_bytes = gcm_sg_walk_go(&gw_in, min_bytes); - out_bytes = gcm_sg_walk_go(&gw_out, min_bytes); + in_bytes = gcm_in_walk_go(&gw_in, min_bytes); + out_bytes = gcm_out_walk_go(&gw_out, min_bytes); bytes = min(in_bytes, out_bytes); if (aadlen + pclen <= bytes) { @@ -997,8 +1067,11 @@ static int gcm_aes_crypt(struct aead_request *req, unsigned int flags) gw_in.ptr + aad_bytes, pc_bytes, gw_in.ptr, aad_bytes); - gcm_sg_walk_done(&gw_in, aad_bytes + pc_bytes); - gcm_sg_walk_done(&gw_out, aad_bytes + pc_bytes); + n = aad_bytes + pc_bytes; + if (gcm_in_walk_done(&gw_in, n) != n) + return -ENOMEM; + if (gcm_out_walk_done(&gw_out, n) != n) + return -ENOMEM; aadlen -= aad_bytes; pclen -= pc_bytes; } while (aadlen + pclen > 0); diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c index 5346b5a80bb6c1bfd805b421e7fcf05e86157a12..65bda1178963c0617f9628b157a8e85fd954b67b 100644 --- a/arch/s390/crypto/des_s390.c +++ b/arch/s390/crypto/des_s390.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -21,7 +22,7 @@ #define DES3_KEY_SIZE (3 * DES_KEY_SIZE) static u8 *ctrblk; -static DEFINE_SPINLOCK(ctrblk_lock); +static DEFINE_MUTEX(ctrblk_lock); static cpacf_mask_t km_functions, kmc_functions, kmctr_functions; @@ -387,7 +388,7 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, unsigned long fc, unsigned int n, nbytes; int ret, locked; - locked = spin_trylock(&ctrblk_lock); + locked = mutex_trylock(&ctrblk_lock); ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE); while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) { @@ -404,7 +405,7 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, unsigned long fc, ret = blkcipher_walk_done(desc, walk, nbytes - n); } if (locked) - spin_unlock(&ctrblk_lock); + mutex_unlock(&ctrblk_lock); /* final block may be < DES_BLOCK_SIZE, copy only nbytes */ if (nbytes) { cpacf_kmctr(fc, ctx->key, buf, walk->src.virt.addr, diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c index ab9a0ebecc199b52507246b47db7b79dd0420058..7a8bba99867c19a8cfbac1700d5bb9b9c98dff97 100644 --- a/arch/s390/crypto/paes_s390.c +++ b/arch/s390/crypto/paes_s390.c @@ -483,10 +483,12 @@ static int ctr_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier, * final block may be < AES_BLOCK_SIZE, copy only nbytes */ if (nbytes) { + memset(buf, 0, AES_BLOCK_SIZE); + memcpy(buf, walk->src.virt.addr, nbytes); while (1) { if (cpacf_kmctr(ctx->fc | modifier, ctx->pk.protkey, buf, - walk->src.virt.addr, AES_BLOCK_SIZE, + buf, AES_BLOCK_SIZE, walk->iv) == AES_BLOCK_SIZE) break; if (__ctr_paes_set_key(ctx) != 0) diff --git a/arch/s390/defconfig b/arch/s390/defconfig index f40600eb17628cbeaa44857a479544b60c1a2068..5134c71a4937b00886bff9e5efdcb94049d918b0 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -221,7 +221,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_DEFLATE=m diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index c681329fdeec6ba42c95cb321b4ea8ae23b0df06..e4d17d9ea93d86692ca25985e3c0fe1fc0df983a 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c @@ -269,7 +269,7 @@ static int hypfs_show_options(struct seq_file *s, struct dentry *root) static int hypfs_fill_super(struct super_block *sb, void *data, int silent) { struct inode *root_inode; - struct dentry *root_dentry; + struct dentry *root_dentry, *update_file; int rc = 0; struct hypfs_sb_info *sbi; @@ -300,9 +300,10 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent) rc = hypfs_diag_create_files(root_dentry); if (rc) return rc; - sbi->update_file = hypfs_create_update_file(root_dentry); - if (IS_ERR(sbi->update_file)) - return PTR_ERR(sbi->update_file); + update_file = hypfs_create_update_file(root_dentry); + if (IS_ERR(update_file)) + return PTR_ERR(update_file); + sbi->update_file = update_file; hypfs_update_update(sb); pr_info("Hypervisor filesystem mounted\n"); return 0; diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h index 8c00fd509c45555fbde5ef9febe6b732c3c4f46b..1a6a7092d94209d4ee330003cfd3d2ccf713b916 100644 --- a/arch/s390/include/asm/ap.h +++ b/arch/s390/include/asm/ap.h @@ -221,16 +221,22 @@ static inline struct ap_queue_status ap_aqic(ap_qid_t qid, void *ind) { register unsigned long reg0 asm ("0") = qid | (3UL << 24); - register struct ap_qirq_ctrl reg1_in asm ("1") = qirqctrl; - register struct ap_queue_status reg1_out asm ("1"); + register union { + unsigned long value; + struct ap_qirq_ctrl qirqctrl; + struct ap_queue_status status; + } reg1 asm ("1"); register void *reg2 asm ("2") = ind; + reg1.qirqctrl = qirqctrl; + asm volatile( ".long 0xb2af0000" /* PQAP(AQIC) */ - : "=d" (reg1_out) - : "d" (reg0), "d" (reg1_in), "d" (reg2) + : "+d" (reg1) + : "d" (reg0), "d" (reg2) : "cc"); - return reg1_out; + + return reg1.status; } /* @@ -264,17 +270,21 @@ static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit, { register unsigned long reg0 asm ("0") = qid | (5UL << 24) | ((ifbit & 0x01) << 22); - register unsigned long reg1_in asm ("1") = apinfo->val; - register struct ap_queue_status reg1_out asm ("1"); + register union { + unsigned long value; + struct ap_queue_status status; + } reg1 asm ("1"); register unsigned long reg2 asm ("2"); + reg1.value = apinfo->val; + asm volatile( ".long 0xb2af0000" /* PQAP(QACT) */ - : "+d" (reg1_in), "=d" (reg1_out), "=d" (reg2) + : "+d" (reg1), "=d" (reg2) : "d" (reg0) : "cc"); apinfo->val = reg2; - return reg1_out; + return reg1.status; } /** diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index 7d22a474a040ddd3d0e76c84075db6ab17bb2263..f74639a05f0ffc33f638c264af58c48933e36139 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h @@ -252,11 +252,14 @@ do { \ /* * Cache aliasing on the latest machines calls for a mapping granularity - * of 512KB. For 64-bit processes use a 512KB alignment and a randomization - * of up to 1GB. For 31-bit processes the virtual address space is limited, - * use no alignment and limit the randomization to 8MB. + * of 512KB for the anonymous mapping base. For 64-bit processes use a + * 512KB alignment and a randomization of up to 1GB. For 31-bit processes + * the virtual address space is limited, use no alignment and limit the + * randomization to 8MB. + * For the additional randomization of the program break use 32MB for + * 64-bit and 8MB for 31-bit. */ -#define BRK_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ffffUL) +#define BRK_RND_MASK (is_compat_task() ? 0x7ffUL : 0x1fffUL) #define MMAP_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ff80UL) #define MMAP_ALIGN_MASK (is_compat_task() ? 0 : 0x7fUL) #define STACK_RND_MASK MMAP_RND_MASK diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h index 99c8ce30b3cd1a4f70540ffa0d03cbb99e35df4d..7ffbc5d7ccf380fde85b0c13df4c019d50a87a88 100644 --- a/arch/s390/include/asm/facility.h +++ b/arch/s390/include/asm/facility.h @@ -59,6 +59,18 @@ static inline int test_facility(unsigned long nr) return __test_facility(nr, &S390_lowcore.stfle_fac_list); } +static inline unsigned long __stfle_asm(u64 *stfle_fac_list, int size) +{ + register unsigned long reg0 asm("0") = size - 1; + + asm volatile( + ".insn s,0xb2b00000,0(%1)" /* stfle */ + : "+d" (reg0) + : "a" (stfle_fac_list) + : "memory", "cc"); + return reg0; +} + /** * stfle - Store facility list extended * @stfle_fac_list: array where facility list can be stored @@ -76,13 +88,8 @@ static inline void stfle(u64 *stfle_fac_list, int size) memcpy(stfle_fac_list, &S390_lowcore.stfl_fac_list, 4); if (S390_lowcore.stfl_fac_list & 0x01000000) { /* More facility bits available with stfle */ - register unsigned long reg0 asm("0") = size - 1; - - asm volatile(".insn s,0xb2b00000,0(%1)" /* stfle */ - : "+d" (reg0) - : "a" (stfle_fac_list) - : "memory", "cc"); - nr = (reg0 + 1) * 8; /* # bytes stored by stfle */ + nr = __stfle_asm(stfle_fac_list, size); + nr = min_t(unsigned long, (nr + 1) * 8, size * 8); } memset((char *) stfle_fac_list + nr, 0, size * 8 - nr); preempt_enable(); diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h index 40f651292aa78997232c74ae5f91afc0bbea035e..9c7dc970e96653fd739f3daa1435a670a409b197 100644 --- a/arch/s390/include/asm/jump_label.h +++ b/arch/s390/include/asm/jump_label.h @@ -10,6 +10,12 @@ #define JUMP_LABEL_NOP_SIZE 6 #define JUMP_LABEL_NOP_OFFSET 2 +#if __GNUC__ < 9 +#define JUMP_LABEL_STATIC_KEY_CONSTRAINT "X" +#else +#define JUMP_LABEL_STATIC_KEY_CONSTRAINT "jdd" +#endif + /* * We use a brcl 0,2 instruction for jump labels at compile time so it * can be easily distinguished from a hotpatch generated instruction. @@ -19,9 +25,9 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran asm_volatile_goto("0: brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n" ".pushsection __jump_table, \"aw\"\n" ".balign 8\n" - ".quad 0b, %l[label], %0\n" + ".quad 0b, %l[label], %0+%1\n" ".popsection\n" - : : "X" (&((char *)key)[branch]) : : label); + : : JUMP_LABEL_STATIC_KEY_CONSTRAINT (key), "i" (branch) : : label); return false; label: @@ -33,9 +39,9 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool asm_volatile_goto("0: brcl 15, %l[label]\n" ".pushsection __jump_table, \"aw\"\n" ".balign 8\n" - ".quad 0b, %l[label], %0\n" + ".quad 0b, %l[label], %0+%1\n" ".popsection\n" - : : "X" (&((char *)key)[branch]) : : label); + : : JUMP_LABEL_STATIC_KEY_CONSTRAINT (key), "i" (branch) : : label); return false; label: diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 29c940bf8506a78ca726befcde6888e570582b22..dad110e9f41b3e2c72ad8bd0a2e94223697c48f0 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -865,7 +865,7 @@ static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {} -static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {} +static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {} static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) {} diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index a8418e1379eb7ee08c92acd034eae000cb19c695..bcfb6371086f2319f6901d2cc52a1d8c44fd0a1a 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h @@ -32,6 +32,8 @@ typedef struct { unsigned int uses_cmm:1; /* The gmaps associated with this context are allowed to use huge pages. */ unsigned int allow_gmap_hpage_1m:1; + /* The mmu context is for compat task */ + unsigned int compat_mm:1; } mm_context_t; #define INIT_MM_CONTEXT(name) \ diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 0717ee76885d634cfc10dd0ce790004639737dd2..8d04e6f3f79649d460376f09217c9e8fe211a850 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -25,6 +25,7 @@ static inline int init_new_context(struct task_struct *tsk, atomic_set(&mm->context.flush_count, 0); mm->context.gmap_asce = 0; mm->context.flush_mm = 0; + mm->context.compat_mm = test_thread_flag(TIF_31BIT); #ifdef CONFIG_PGSTE mm->context.alloc_pgste = page_table_allocate_pgste || test_thread_flag(TIF_PGSTE) || @@ -45,8 +46,6 @@ static inline int init_new_context(struct task_struct *tsk, mm->context.asce_limit = STACK_TOP_MAX; mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | _ASCE_USER_BITS | _ASCE_TYPE_REGION3; - /* pgd_alloc() did not account this pud */ - mm_inc_nr_puds(mm); break; case -PAGE_SIZE: /* forked 5-level task, set new asce with new_mm->pgd */ @@ -62,9 +61,6 @@ static inline int init_new_context(struct task_struct *tsk, /* forked 2-level compat task, set new asce with new mm->pgd */ mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT; - /* pgd_alloc() did not account this pmd */ - mm_inc_nr_pmds(mm); - mm_inc_nr_puds(mm); } crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); return 0; @@ -94,8 +90,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, { int cpu = smp_processor_id(); - if (prev == next) - return; S390_lowcore.user_asce = next->context.asce; cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); /* Clear previous user-ASCE from CR1 and CR7 */ @@ -107,7 +101,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, __ctl_load(S390_lowcore.vdso_asce, 7, 7); clear_cpu_flag(CIF_ASCE_SECONDARY); } - cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); + if (prev != next) + cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); } #define finish_arch_post_lock_switch finish_arch_post_lock_switch diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index 41e3908b397f8f2faa5bab59266fec25c635a6a7..0d753291c43c0f2427a2234b2fde9074b290ae3c 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h @@ -176,6 +176,8 @@ static inline int devmem_is_allowed(unsigned long pfn) #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#define ARCH_ZONE_DMA_BITS 31 + #include #include diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index f0f9bcf94c03749b0f0030d9de5765cff1597d37..67838df3f3f92f4d37c51f94b8bdcd94a1e9b481 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h @@ -36,11 +36,11 @@ static inline void crst_table_init(unsigned long *crst, unsigned long entry) static inline unsigned long pgd_entry_type(struct mm_struct *mm) { - if (mm->context.asce_limit <= _REGION3_SIZE) + if (mm_pmd_folded(mm)) return _SEGMENT_ENTRY_EMPTY; - if (mm->context.asce_limit <= _REGION2_SIZE) + if (mm_pud_folded(mm)) return _REGION3_ENTRY_EMPTY; - if (mm->context.asce_limit <= _REGION1_SIZE) + if (mm_p4d_folded(mm)) return _REGION2_ENTRY_EMPTY; return _REGION1_ENTRY_EMPTY; } @@ -56,7 +56,12 @@ static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address) crst_table_init(table, _REGION2_ENTRY_EMPTY); return (p4d_t *) table; } -#define p4d_free(mm, p4d) crst_table_free(mm, (unsigned long *) p4d) + +static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d) +{ + if (!mm_p4d_folded(mm)) + crst_table_free(mm, (unsigned long *) p4d); +} static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) { @@ -65,7 +70,12 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) crst_table_init(table, _REGION3_ENTRY_EMPTY); return (pud_t *) table; } -#define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud) + +static inline void pud_free(struct mm_struct *mm, pud_t *pud) +{ + if (!mm_pud_folded(mm)) + crst_table_free(mm, (unsigned long *) pud); +} static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) { @@ -83,6 +93,8 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) { + if (mm_pmd_folded(mm)) + return; pgtable_pmd_page_dtor(virt_to_page(pmd)); crst_table_free(mm, (unsigned long *) pmd); } diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 0e7cb0dc9c33b7f5a8187df912aefb8b15c100fc..0a326da1562fc80598ccc2f71ad33e40f5532f9c 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -485,6 +485,24 @@ static inline int is_module_addr(void *addr) _REGION_ENTRY_PROTECT | \ _REGION_ENTRY_NOEXEC) +static inline bool mm_p4d_folded(struct mm_struct *mm) +{ + return mm->context.asce_limit <= _REGION1_SIZE; +} +#define mm_p4d_folded(mm) mm_p4d_folded(mm) + +static inline bool mm_pud_folded(struct mm_struct *mm) +{ + return mm->context.asce_limit <= _REGION2_SIZE; +} +#define mm_pud_folded(mm) mm_pud_folded(mm) + +static inline bool mm_pmd_folded(struct mm_struct *mm) +{ + return mm->context.asce_limit <= _REGION3_SIZE; +} +#define mm_pmd_folded(mm) mm_pmd_folded(mm) + static inline int mm_has_pgste(struct mm_struct *mm) { #ifdef CONFIG_PGSTE @@ -1132,8 +1150,6 @@ void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr); static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t entry) { - if (!MACHINE_HAS_NX) - pte_val(entry) &= ~_PAGE_NOEXEC; if (pte_present(entry)) pte_val(entry) &= ~_PAGE_UNUSED; if (mm_has_pgste(mm)) @@ -1150,6 +1166,8 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) { pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); + if (!MACHINE_HAS_NX) + pte_val(__pte) &= ~_PAGE_NOEXEC; return pte_mkyoung(__pte); } diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index 64539c221672b8606001733c4f46feb9cce39491..2dc9eb4e1acca26f09f5aad4d2cb45a5e19833a2 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h @@ -10,8 +10,9 @@ #ifndef _ASM_S390_TIMEX_H #define _ASM_S390_TIMEX_H -#include +#include #include +#include /* The value of the TOD clock for 1.1.1970. */ #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL @@ -186,15 +187,18 @@ extern unsigned char tod_clock_base[16] __aligned(8); /** * get_clock_monotonic - returns current time in clock rate units * - * The caller must ensure that preemption is disabled. * The clock and tod_clock_base get changed via stop_machine. - * Therefore preemption must be disabled when calling this - * function, otherwise the returned value is not guaranteed to - * be monotonic. + * Therefore preemption must be disabled, otherwise the returned + * value is not guaranteed to be monotonic. */ static inline unsigned long long get_tod_clock_monotonic(void) { - return get_tod_clock() - *(unsigned long long *) &tod_clock_base[1]; + unsigned long long tod; + + preempt_disable_notrace(); + tod = get_tod_clock() - *(unsigned long long *) &tod_clock_base[1]; + preempt_enable_notrace(); + return tod; } /** diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index 457b7ba0fbb66de24fd82219e18a51ad2663221f..1df28a8e2f19e257d61a6ff43fdaa72779c82722 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -116,6 +116,20 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb, return tlb_remove_page(tlb, page); } +static inline void tlb_flush_pmd_range(struct mmu_gather *tlb, + unsigned long address, unsigned long size) +{ + /* + * the range might exceed the original range that was provided to + * tlb_gather_mmu(), so we need to update it despite the fact it is + * usually not updated. + */ + if (tlb->start > address) + tlb->start = address; + if (tlb->end < address + size) + tlb->end = address + size; +} + /* * pte_free_tlb frees a pte table and clears the CRSTE for the * page table from the tlb. @@ -136,7 +150,7 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, unsigned long address) { - if (tlb->mm->context.asce_limit <= _REGION3_SIZE) + if (mm_pmd_folded(tlb->mm)) return; pgtable_pmd_page_dtor(virt_to_page(pmd)); tlb_remove_table(tlb, pmd); @@ -152,7 +166,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, unsigned long address) { - if (tlb->mm->context.asce_limit <= _REGION1_SIZE) + if (mm_p4d_folded(tlb->mm)) return; tlb_remove_table(tlb, p4d); } @@ -167,7 +181,7 @@ static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, unsigned long address) { - if (tlb->mm->context.asce_limit <= _REGION2_SIZE) + if (mm_pud_folded(tlb->mm)) return; tlb_remove_table(tlb, pud); } @@ -177,6 +191,8 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, #define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0) #define tlb_remove_pmd_tlb_entry(tlb, pmdp, addr) do { } while (0) #define tlb_migrate_finish(mm) do { } while (0) +#define tlb_flush_pmd_range(tlb, addr, sz) do { } while (0) + #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ tlb_remove_tlb_entry(tlb, ptep, address) diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index ad6b91013a0525d82002090788038e0c9773b34a..29b1657658f2efa3842f8487884c42e18d860588 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -48,7 +48,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size) __range_ok((unsigned long)(addr), (size)); \ }) -#define access_ok(type, addr, size) __access_ok(addr, size) +#define access_ok(addr, size) __access_ok(addr, size) unsigned long __must_check raw_copy_from_user(void *to, const void __user *from, unsigned long n); @@ -56,8 +56,10 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n); unsigned long __must_check raw_copy_to_user(void __user *to, const void *from, unsigned long n); +#ifndef CONFIG_KASAN #define INLINE_COPY_FROM_USER #define INLINE_COPY_TO_USER +#endif #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES @@ -82,7 +84,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n); __rc; \ }) -static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size) +static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size) { unsigned long spec = 0x010000UL; int rc; @@ -112,7 +114,7 @@ static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size) return rc; } -static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size) +static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size) { unsigned long spec = 0x01UL; int rc; diff --git a/arch/s390/include/asm/vmalloc.h b/arch/s390/include/asm/vmalloc.h new file mode 100644 index 0000000000000000000000000000000000000000..3ba3a6bdca254a7e29bddd5b3b3e54e1895b26bc --- /dev/null +++ b/arch/s390/include/asm/vmalloc.h @@ -0,0 +1,4 @@ +#ifndef _ASM_S390_VMALLOC_H +#define _ASM_S390_VMALLOC_H + +#endif /* _ASM_S390_VMALLOC_H */ diff --git a/arch/s390/include/uapi/asm/zcrypt.h b/arch/s390/include/uapi/asm/zcrypt.h index 2bb1f3bb98ac5cc6f3c7bb0ac74dedda5e202058..48c784f2101a8179547b8d4b3205fc2340049d63 100644 --- a/arch/s390/include/uapi/asm/zcrypt.h +++ b/arch/s390/include/uapi/asm/zcrypt.h @@ -147,8 +147,8 @@ struct ica_xcRB { * @cprb_len: CPRB header length [0x0020] * @cprb_ver_id: CPRB version id. [0x04] * @pad_000: Alignment pad bytes - * @flags: Admin cmd [0x80] or functional cmd [0x00] - * @func_id: Function id / subtype [0x5434] + * @flags: Admin bit [0x80], Special bit [0x20] + * @func_id: Function id / subtype [0x5434] "T4" * @source_id: Source id [originator id] * @target_id: Target id [usage/ctrl domain id] * @ret_code: Return code diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index dbfd1730e631acfb71d8688ca4383ff98385a106..762fc45376ffdba56ebcbfefcecada3941468fdd 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -23,6 +23,8 @@ KCOV_INSTRUMENT_early_nobss.o := n UBSAN_SANITIZE_early.o := n UBSAN_SANITIZE_early_nobss.o := n +KASAN_SANITIZE_early_nobss.o := n + # # Passing null pointers is ok for smp code, since we access the lowcore here. # @@ -44,7 +46,7 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o early_nobss.o -obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o +obj-y += sysinfo.o lgr.o os_info.o machine_kexec.o pgm_check.o obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o obj-y += nospec-branch.o @@ -68,6 +70,7 @@ obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_UPROBES) += uprobes.o +obj-$(CONFIG_JUMP_LABEL) += jump_label.o obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o obj-$(CONFIG_KEXEC_FILE) += kexec_elf.o diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index b2c68fbf26346a3e9d6e626333a34c7c374bef31..41925f2206940e6ca6fcce881186b75598bedc8e 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c @@ -462,10 +462,11 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr) ptr += sprintf(ptr, "%%c%i", value); else if (operand->flags & OPERAND_VR) ptr += sprintf(ptr, "%%v%i", value); - else if (operand->flags & OPERAND_PCREL) - ptr += sprintf(ptr, "%lx", (signed int) value - + addr); - else if (operand->flags & OPERAND_SIGNED) + else if (operand->flags & OPERAND_PCREL) { + void *pcrel = (void *)((int)value + addr); + + ptr += sprintf(ptr, "%px", pcrel); + } else if (operand->flags & OPERAND_SIGNED) ptr += sprintf(ptr, "%i", value); else ptr += sprintf(ptr, "%u", value); @@ -537,7 +538,7 @@ void show_code(struct pt_regs *regs) else *ptr++ = ' '; addr = regs->psw.addr + start - 32; - ptr += sprintf(ptr, "%016lx: ", addr); + ptr += sprintf(ptr, "%px: ", (void *)addr); if (start + opsize >= end) break; for (i = 0; i < opsize; i++) @@ -565,7 +566,7 @@ void print_fn_code(unsigned char *code, unsigned long len) opsize = insn_length(*code); if (opsize > len) break; - ptr += sprintf(ptr, "%p: ", code); + ptr += sprintf(ptr, "%px: ", code); for (i = 0; i < opsize; i++) ptr += sprintf(ptr, "%02x", code[i]); *ptr++ = '\t'; diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 5b28b434f8a153d27ca8a7124d156ec05897e71e..e7e6608b996c6347cd18a91dd1fd8aae733e35bc 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -64,10 +64,10 @@ static noinline __init void detect_machine_type(void) if (stsi(vmms, 3, 2, 2) || !vmms->count) return; - /* Running under KVM? If not we assume z/VM */ + /* Detect known hypervisors */ if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3)) S390_lowcore.machine_flags |= MACHINE_FLAG_KVM; - else + else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4)) S390_lowcore.machine_flags |= MACHINE_FLAG_VM; } diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index 84be7f02d0c2157029cb2868231b67c7603bbf42..39b13d71a8fe6dc2979e8a8320ae62b675b8ee9b 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c @@ -203,22 +203,13 @@ device_initcall(ftrace_plt_init); */ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip) { - struct ftrace_graph_ent trace; - if (unlikely(ftrace_graph_is_dead())) goto out; if (unlikely(atomic_read(¤t->tracing_graph_pause))) goto out; ip -= MCOUNT_INSN_SIZE; - trace.func = ip; - trace.depth = current->curr_ret_stack + 1; - /* Only trace if the calling function expects to. */ - if (!ftrace_graph_entry(&trace)) - goto out; - if (ftrace_push_return_trace(parent, ip, &trace.depth, 0, - NULL) == -EBUSY) - goto out; - parent = (unsigned long) return_to_handler; + if (!function_graph_enter(parent, ip, 0, NULL)) + parent = (unsigned long) return_to_handler; out: return parent; } diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c index b9d8fe45737aa2e529cb9eec244349e121803858..8f8456816d83e890a3f3150ccec184f21810a509 100644 --- a/arch/s390/kernel/idle.c +++ b/arch/s390/kernel/idle.c @@ -69,18 +69,26 @@ DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL); static ssize_t show_idle_time(struct device *dev, struct device_attribute *attr, char *buf) { + unsigned long long now, idle_time, idle_enter, idle_exit, in_idle; struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); - unsigned long long now, idle_time, idle_enter, idle_exit; unsigned int seq; do { - now = get_tod_clock(); seq = read_seqcount_begin(&idle->seqcount); idle_time = READ_ONCE(idle->idle_time); idle_enter = READ_ONCE(idle->clock_idle_enter); idle_exit = READ_ONCE(idle->clock_idle_exit); } while (read_seqcount_retry(&idle->seqcount, seq)); - idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0; + in_idle = 0; + now = get_tod_clock(); + if (idle_enter) { + if (idle_exit) { + in_idle = idle_exit - idle_enter; + } else if (now > idle_enter) { + in_idle = now - idle_enter; + } + } + idle_time += in_idle; return sprintf(buf, "%llu\n", idle_time >> 12); } DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL); @@ -88,17 +96,24 @@ DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL); u64 arch_cpu_idle_time(int cpu) { struct s390_idle_data *idle = &per_cpu(s390_idle, cpu); - unsigned long long now, idle_enter, idle_exit; + unsigned long long now, idle_enter, idle_exit, in_idle; unsigned int seq; do { - now = get_tod_clock(); seq = read_seqcount_begin(&idle->seqcount); idle_enter = READ_ONCE(idle->clock_idle_enter); idle_exit = READ_ONCE(idle->clock_idle_exit); } while (read_seqcount_retry(&idle->seqcount, seq)); - - return cputime_to_nsecs(idle_enter ? ((idle_exit ?: now) - idle_enter) : 0); + in_idle = 0; + now = get_tod_clock(); + if (idle_enter) { + if (idle_exit) { + in_idle = idle_exit - idle_enter; + } else if (now > idle_enter) { + in_idle = now - idle_enter; + } + } + return cputime_to_nsecs(in_idle); } void arch_cpu_idle_enter(void) diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c index 43f8430fb67d50aa44d3a4636d43b83bda17951e..68f415e334a5943d9f5ccc98cd174550770e190f 100644 --- a/arch/s390/kernel/jump_label.c +++ b/arch/s390/kernel/jump_label.c @@ -10,8 +10,6 @@ #include #include -#ifdef HAVE_JUMP_LABEL - struct insn { u16 opcode; s32 offset; @@ -102,5 +100,3 @@ void arch_jump_label_transform_static(struct jump_entry *entry, { __jump_label_transform(entry, type, 1); } - -#endif diff --git a/arch/s390/kernel/kexec_elf.c b/arch/s390/kernel/kexec_elf.c index 5a286b012043bc2c4437ebb1509241dad0282546..602e7cc26d1181e8869bd9f4e51f240b286066c0 100644 --- a/arch/s390/kernel/kexec_elf.c +++ b/arch/s390/kernel/kexec_elf.c @@ -19,10 +19,15 @@ static int kexec_file_add_elf_kernel(struct kimage *image, struct kexec_buf buf; const Elf_Ehdr *ehdr; const Elf_Phdr *phdr; + Elf_Addr entry; int i, ret; ehdr = (Elf_Ehdr *)kernel; buf.image = image; + if (image->type == KEXEC_TYPE_CRASH) + entry = STARTUP_KDUMP_OFFSET; + else + entry = ehdr->e_entry; phdr = (void *)ehdr + ehdr->e_phoff; for (i = 0; i < ehdr->e_phnum; i++, phdr++) { @@ -35,7 +40,7 @@ static int kexec_file_add_elf_kernel(struct kimage *image, buf.mem = ALIGN(phdr->p_paddr, phdr->p_align); buf.memsz = phdr->p_memsz; - if (phdr->p_paddr == 0) { + if (entry - phdr->p_paddr < phdr->p_memsz) { data->kernel_buf = buf.buffer; data->memsz += STARTUP_NORMAL_OFFSET; diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c index bdddaae9655984dfbf59ccee386c7bb3608cb183..649135cbedd5c4407f16d89f4020d5022f40395e 100644 --- a/arch/s390/kernel/nospec-branch.c +++ b/arch/s390/kernel/nospec-branch.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include #include +#include #include static int __init nobp_setup_early(char *str) @@ -58,7 +59,7 @@ early_param("nospectre_v2", nospectre_v2_setup_early); void __init nospec_auto_detect(void) { - if (test_facility(156)) { + if (test_facility(156) || cpu_mitigations_off()) { /* * The machine supports etokens. * Disable expolines and disable nobp. diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index cc085e2d2ce9907690fbe0912dd301ab44e8171d..d5523adeddbf4dc0d9b92962bb39328d474cba22 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c @@ -346,6 +346,8 @@ static int __hw_perf_event_init(struct perf_event *event) break; case PERF_TYPE_HARDWARE: + if (is_sampling_event(event)) /* No sampling support */ + return -ENOENT; ev = attr->config; /* Count user space (problem-state) only */ if (!attr->exclude_user && attr->exclude_kernel) { @@ -373,7 +375,7 @@ static int __hw_perf_event_init(struct perf_event *event) return -ENOENT; if (ev > PERF_CPUM_CF_MAX_CTR) - return -EINVAL; + return -ENOENT; /* Obtain the counter set to which the specified counter belongs */ set = get_counter_set(ev); diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index 5c53e977be62710ad9ed2001e739ca863e2ca02e..5bfb1ce129f4b81994bc78e5a2188365d8cf7409 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -193,7 +193,7 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb, unsigned long num_sdb, gfp_t gfp_flags) { int i, rc; - unsigned long *new, *tail; + unsigned long *new, *tail, *tail_prev = NULL; if (!sfb->sdbt || !sfb->tail) return -EINVAL; @@ -232,6 +232,7 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb, sfb->num_sdbt++; /* Link current page to tail of chain */ *tail = (unsigned long)(void *) new + 1; + tail_prev = tail; tail = new; } @@ -241,10 +242,22 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb, * issue, a new realloc call (if required) might succeed. */ rc = alloc_sample_data_block(tail, gfp_flags); - if (rc) + if (rc) { + /* Undo last SDBT. An SDBT with no SDB at its first + * entry but with an SDBT entry instead can not be + * handled by the interrupt handler code. + * Avoid this situation. + */ + if (tail_prev) { + sfb->num_sdbt--; + free_page((unsigned long) new); + tail = tail_prev; + } break; + } sfb->num_sdb++; tail++; + tail_prev = new = NULL; /* Allocated at least one SBD */ } /* Link sampling buffer to its origin */ @@ -1248,18 +1261,28 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all) */ if (flush_all && done) break; - - /* If an event overflow happened, discard samples by - * processing any remaining sample-data-blocks. - */ - if (event_overflow) - flush_all = 1; } /* Account sample overflows in the event hardware structure */ if (sampl_overflow) OVERFLOW_REG(hwc) = DIV_ROUND_UP(OVERFLOW_REG(hwc) + sampl_overflow, 1 + num_sdb); + + /* Perf_event_overflow() and perf_event_account_interrupt() limit + * the interrupt rate to an upper limit. Roughly 1000 samples per + * task tick. + * Hitting this limit results in a large number + * of throttled REF_REPORT_THROTTLE entries and the samples + * are dropped. + * Slightly increase the interval to avoid hitting this limit. + */ + if (event_overflow) { + SAMPL_RATE(hwc) += DIV_ROUND_UP(SAMPL_RATE(hwc), 10); + debug_sprintf_event(sfdbg, 1, "%s: rate adjustment %ld\n", + __func__, + DIV_ROUND_UP(SAMPL_RATE(hwc), 10)); + } + if (sampl_overflow || event_overflow) debug_sprintf_event(sfdbg, 4, "hw_perf_event_update: " "overflow stats: sample=%llu event=%llu\n", @@ -1600,7 +1623,7 @@ static void aux_sdb_init(unsigned long sdb) /* * aux_buffer_setup() - Setup AUX buffer for diagnostic mode sampling - * @cpu: On which to allocate, -1 means current + * @event: Event the buffer is setup for, event->cpu == -1 means current * @pages: Array of pointers to buffer pages passed from perf core * @nr_pages: Total pages * @snapshot: Flag for snapshot mode @@ -1612,8 +1635,8 @@ static void aux_sdb_init(unsigned long sdb) * * Return the private AUX buffer structure if success or NULL if fails. */ -static void *aux_buffer_setup(int cpu, void **pages, int nr_pages, - bool snapshot) +static void *aux_buffer_setup(struct perf_event *event, void **pages, + int nr_pages, bool snapshot) { struct sf_buffer *sfb; struct aux_buffer *aux; @@ -2045,14 +2068,17 @@ static int __init init_cpum_sampling_pmu(void) } sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80); - if (!sfdbg) + if (!sfdbg) { pr_err("Registering for s390dbf failed\n"); + return -ENOMEM; + } debug_register_view(sfdbg, &debug_sprintf_view); err = register_external_irq(EXT_IRQ_MEASURE_ALERT, cpumf_measurement_alert); if (err) { pr_cpumsf_err(RS_INIT_FAILURE_ALRT); + debug_unregister(sfdbg); goto out; } @@ -2061,6 +2087,7 @@ static int __init init_cpum_sampling_pmu(void) pr_cpumsf_err(RS_INIT_FAILURE_PERF); unregister_external_irq(EXT_IRQ_MEASURE_ALERT, cpumf_measurement_alert); + debug_unregister(sfdbg); goto out; } diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 6e758bb6cd29b70821bec49fd69b6d8d76111d2e..99ef537e548a372e920a825386431621f0a703c0 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -183,20 +183,30 @@ unsigned long get_wchan(struct task_struct *p) if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p)) return 0; + + if (!try_get_task_stack(p)) + return 0; + low = task_stack_page(p); high = (struct stack_frame *) task_pt_regs(p); sf = (struct stack_frame *) p->thread.ksp; - if (sf <= low || sf > high) - return 0; + if (sf <= low || sf > high) { + return_address = 0; + goto out; + } for (count = 0; count < 16; count++) { sf = (struct stack_frame *) sf->back_chain; - if (sf <= low || sf > high) - return 0; + if (sf <= low || sf > high) { + return_address = 0; + goto out; + } return_address = sf->gprs[8]; if (!in_sched_functions(return_address)) - return return_address; + goto out; } - return 0; +out: + put_task_stack(p); + return return_address; } unsigned long arch_align_stack(unsigned long sp) diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index cd3df5514552cc262dee1d1b99260d0ee089668b..05e93dd5f67e65456fc5fdcd402c852b44e6152d 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -389,6 +389,7 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) /* * floating point control reg. is in the thread structure */ + save_fpu_regs(); if ((unsigned int) data != 0 || test_fp_ctl(data >> (BITS_PER_LONG - 32))) return -EINVAL; @@ -747,6 +748,7 @@ static int __poke_user_compat(struct task_struct *child, /* * floating point control reg. is in the thread structure */ + save_fpu_regs(); if (test_fp_ctl(tmp)) return -EINVAL; child->thread.fpu.fpc = data; @@ -977,9 +979,7 @@ static int s390_fpregs_set(struct task_struct *target, int rc = 0; freg_t fprs[__NUM_FPRS]; - if (target == current) - save_fpu_regs(); - + save_fpu_regs(); if (MACHINE_HAS_VX) convert_vx_to_fp(fprs, target->thread.fpu.vxrs); else diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index c637c12f9e37ccef3c0ab9a35bbe312259f75414..5f85e0dfa66d1d1661bb250baad4861f42fbd371 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -303,7 +303,7 @@ early_param("vmalloc", parse_vmalloc); void *restart_stack __section(.data); -static void __init setup_lowcore(void) +static void __init setup_lowcore_dat_off(void) { struct lowcore *lc; @@ -314,19 +314,16 @@ static void __init setup_lowcore(void) lc = memblock_virt_alloc_low(sizeof(*lc), sizeof(*lc)); lc->restart_psw.mask = PSW_KERNEL_BITS; lc->restart_psw.addr = (unsigned long) restart_int_handler; - lc->external_new_psw.mask = PSW_KERNEL_BITS | - PSW_MASK_DAT | PSW_MASK_MCHECK; + lc->external_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK; lc->external_new_psw.addr = (unsigned long) ext_int_handler; lc->svc_new_psw.mask = PSW_KERNEL_BITS | - PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; + PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; lc->svc_new_psw.addr = (unsigned long) system_call; - lc->program_new_psw.mask = PSW_KERNEL_BITS | - PSW_MASK_DAT | PSW_MASK_MCHECK; + lc->program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK; lc->program_new_psw.addr = (unsigned long) pgm_check_handler; lc->mcck_new_psw.mask = PSW_KERNEL_BITS; lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler; - lc->io_new_psw.mask = PSW_KERNEL_BITS | - PSW_MASK_DAT | PSW_MASK_MCHECK; + lc->io_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK; lc->io_new_psw.addr = (unsigned long) io_int_handler; lc->clock_comparator = clock_comparator_max; lc->kernel_stack = ((unsigned long) &init_thread_union) @@ -388,6 +385,16 @@ static void __init setup_lowcore(void) lowcore_ptr[0] = lc; } +static void __init setup_lowcore_dat_on(void) +{ + __ctl_clear_bit(0, 28); + S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT; + S390_lowcore.svc_new_psw.mask |= PSW_MASK_DAT; + S390_lowcore.program_new_psw.mask |= PSW_MASK_DAT; + S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT; + __ctl_set_bit(0, 28); +} + static struct resource code_resource = { .name = "Kernel code", .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, @@ -882,6 +889,8 @@ void __init setup_arch(char **cmdline_p) pr_info("Linux is running under KVM in 64-bit mode\n"); else if (MACHINE_IS_LPAR) pr_info("Linux is running natively in 64-bit mode\n"); + else + pr_info("Linux is running as a guest in 64-bit mode\n"); /* Have one command line that is parsed and saved in /proc/cmdline */ /* boot_command_line has been already set up in early.c */ @@ -944,7 +953,7 @@ void __init setup_arch(char **cmdline_p) #endif setup_resources(); - setup_lowcore(); + setup_lowcore_dat_off(); smp_fill_possible_mask(); cpu_detect_mhz_feature(); cpu_init(); @@ -957,6 +966,12 @@ void __init setup_arch(char **cmdline_p) */ paging_init(); + /* + * After paging_init created the kernel page table, the new PSWs + * in lowcore can now run with DAT enabled. + */ + setup_lowcore_dat_on(); + /* Setup default console */ conmode_default(); set_preferred_console(); diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 2f8f7d7dd9a8387b2152999e5331f478bebabbbb..ecd24711f3aa9caa137544b0434f23e29797c144 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -261,9 +261,12 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) lc->spinlock_index = 0; lc->percpu_offset = __per_cpu_offset[cpu]; lc->kernel_asce = S390_lowcore.kernel_asce; + lc->user_asce = S390_lowcore.kernel_asce; lc->machine_flags = S390_lowcore.machine_flags; lc->user_timer = lc->system_timer = lc->steal_timer = 0; __ctl_store(lc->cregs_save_area, 0, 15); + lc->cregs_save_area[1] = lc->kernel_asce; + lc->cregs_save_area[7] = lc->vdso_asce; save_access_regs((unsigned int *) lc->access_regs_save_area); memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, sizeof(lc->stfle_fac_list)); @@ -371,9 +374,13 @@ void smp_call_online_cpu(void (*func)(void *), void *data) */ void smp_call_ipl_cpu(void (*func)(void *), void *data) { + struct lowcore *lc = pcpu_devices->lowcore; + + if (pcpu_devices[0].address == stap()) + lc = &S390_lowcore; + pcpu_delegate(&pcpu_devices[0], func, data, - pcpu_devices->lowcore->panic_stack - - PANIC_FRAME_OFFSET + PAGE_SIZE); + lc->panic_stack - PANIC_FRAME_OFFSET + PAGE_SIZE); } int smp_find_processor_id(u16 address) @@ -708,39 +715,67 @@ static void __ref smp_get_core_info(struct sclp_core_info *info, int early) static int smp_add_present_cpu(int cpu); -static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add) +static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail, + bool configured, bool early) { struct pcpu *pcpu; - cpumask_t avail; - int cpu, nr, i, j; + int cpu, nr, i; u16 address; nr = 0; - cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); - cpu = cpumask_first(&avail); - for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) { - if (sclp.has_core_type && info->core[i].type != boot_core_type) + if (sclp.has_core_type && core->type != boot_core_type) + return nr; + cpu = cpumask_first(avail); + address = core->core_id << smp_cpu_mt_shift; + for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) { + if (pcpu_find_address(cpu_present_mask, address + i)) continue; - address = info->core[i].core_id << smp_cpu_mt_shift; - for (j = 0; j <= smp_cpu_mtid; j++) { - if (pcpu_find_address(cpu_present_mask, address + j)) - continue; - pcpu = pcpu_devices + cpu; - pcpu->address = address + j; - pcpu->state = - (cpu >= info->configured*(smp_cpu_mtid + 1)) ? - CPU_STATE_STANDBY : CPU_STATE_CONFIGURED; - smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); - set_cpu_present(cpu, true); - if (sysfs_add && smp_add_present_cpu(cpu) != 0) - set_cpu_present(cpu, false); - else - nr++; - cpu = cpumask_next(cpu, &avail); - if (cpu >= nr_cpu_ids) + pcpu = pcpu_devices + cpu; + pcpu->address = address + i; + if (configured) + pcpu->state = CPU_STATE_CONFIGURED; + else + pcpu->state = CPU_STATE_STANDBY; + smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); + set_cpu_present(cpu, true); + if (!early && smp_add_present_cpu(cpu) != 0) + set_cpu_present(cpu, false); + else + nr++; + cpumask_clear_cpu(cpu, avail); + cpu = cpumask_next(cpu, avail); + } + return nr; +} + +static int __smp_rescan_cpus(struct sclp_core_info *info, bool early) +{ + struct sclp_core_entry *core; + cpumask_t avail; + bool configured; + u16 core_id; + int nr, i; + + nr = 0; + cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); + /* + * Add IPL core first (which got logical CPU number 0) to make sure + * that all SMT threads get subsequent logical CPU numbers. + */ + if (early) { + core_id = pcpu_devices[0].address >> smp_cpu_mt_shift; + for (i = 0; i < info->configured; i++) { + core = &info->core[i]; + if (core->core_id == core_id) { + nr += smp_add_core(core, &avail, true, early); break; + } } } + for (i = 0; i < info->combined; i++) { + configured = i < info->configured; + nr += smp_add_core(&info->core[i], &avail, configured, early); + } return nr; } @@ -786,7 +821,7 @@ void __init smp_detect_cpus(void) /* Add CPUs present at boot */ get_online_cpus(); - __smp_rescan_cpus(info, 0); + __smp_rescan_cpus(info, true); put_online_cpus(); memblock_free_early((unsigned long)info, sizeof(*info)); } @@ -806,6 +841,8 @@ static void smp_start_secondary(void *cpuvoid) restore_access_regs(S390_lowcore.access_regs_save_area); __ctl_load(S390_lowcore.cregs_save_area, 0, 15); __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT); + set_cpu_flag(CIF_ASCE_PRIMARY); + set_cpu_flag(CIF_ASCE_SECONDARY); cpu_init(); preempt_disable(); init_cpu_timer(); @@ -1136,7 +1173,7 @@ int __ref smp_rescan_cpus(void) smp_get_core_info(info, 0); get_online_cpus(); mutex_lock(&smp_cpu_state_mutex); - nr = __smp_rescan_cpus(info, 1); + nr = __smp_rescan_cpus(info, false); mutex_unlock(&smp_cpu_state_mutex); put_online_cpus(); kfree(info); @@ -1152,7 +1189,11 @@ static ssize_t __ref rescan_store(struct device *dev, { int rc; + rc = lock_device_hotplug_sysfs(); + if (rc) + return rc; rc = smp_rescan_cpus(); + unlock_device_hotplug(); return rc ? rc : count; } static DEVICE_ATTR_WO(rescan); diff --git a/arch/s390/kernel/sthyi.c b/arch/s390/kernel/sthyi.c index 0859cde36f7520616e62df133c2f88df69a47e27..888cc2f166db726d8e5c967c4082e781cc6137f4 100644 --- a/arch/s390/kernel/sthyi.c +++ b/arch/s390/kernel/sthyi.c @@ -183,17 +183,19 @@ static void fill_hdr(struct sthyi_sctns *sctns) static void fill_stsi_mac(struct sthyi_sctns *sctns, struct sysinfo_1_1_1 *sysinfo) { + sclp_ocf_cpc_name_copy(sctns->mac.infmname); + if (*(u64 *)sctns->mac.infmname != 0) + sctns->mac.infmval1 |= MAC_NAME_VLD; + if (stsi(sysinfo, 1, 1, 1)) return; - sclp_ocf_cpc_name_copy(sctns->mac.infmname); - memcpy(sctns->mac.infmtype, sysinfo->type, sizeof(sctns->mac.infmtype)); memcpy(sctns->mac.infmmanu, sysinfo->manufacturer, sizeof(sctns->mac.infmmanu)); memcpy(sctns->mac.infmpman, sysinfo->plant, sizeof(sctns->mac.infmpman)); memcpy(sctns->mac.infmseq, sysinfo->sequence, sizeof(sctns->mac.infmseq)); - sctns->mac.infmval1 |= MAC_ID_VLD | MAC_NAME_VLD; + sctns->mac.infmval1 |= MAC_ID_VLD; } static void fill_stsi_par(struct sthyi_sctns *sctns, diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index e8184a15578a332eae08bcb60e21fbb3ec0dd780..7b96888974db11ece70015bf3caf3450c41174bf 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -311,7 +311,8 @@ int arch_update_cpu_topology(void) on_each_cpu(__arch_update_dedicated_flag, NULL, 0); for_each_online_cpu(cpu) { dev = get_cpu_device(cpu); - kobject_uevent(&dev->kobj, KOBJ_CHANGE); + if (dev) + kobject_uevent(&dev->kobj, KOBJ_CHANGE); } return rc; } diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index 3031cc6dd0ab48de8ebf3797a2bc748995d67c49..7ab7d256d1eb7ed9622e9d04c43d47f9d0bf13b5 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -56,7 +56,7 @@ static vm_fault_t vdso_fault(const struct vm_special_mapping *sm, vdso_pagelist = vdso64_pagelist; vdso_pages = vdso64_pages; #ifdef CONFIG_COMPAT - if (is_compat_task()) { + if (vma->vm_mm->context.compat_mm) { vdso_pagelist = vdso32_pagelist; vdso_pages = vdso32_pages; } @@ -77,7 +77,7 @@ static int vdso_mremap(const struct vm_special_mapping *sm, vdso_pages = vdso64_pages; #ifdef CONFIG_COMPAT - if (is_compat_task()) + if (vma->vm_mm->context.compat_mm) vdso_pages = vdso32_pages; #endif @@ -224,7 +224,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) vdso_pages = vdso64_pages; #ifdef CONFIG_COMPAT - if (is_compat_task()) + mm->context.compat_mm = is_compat_task(); + if (mm->context.compat_mm) vdso_pages = vdso32_pages; #endif /* diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile index c5c856f320bca47e9b64c0c35726881fb2394d1d..e76309fbbcb3b6e23af21350f98f2b555502b978 100644 --- a/arch/s390/kernel/vdso32/Makefile +++ b/arch/s390/kernel/vdso32/Makefile @@ -28,15 +28,16 @@ obj-y += vdso32_wrapper.o extra-y += vdso32.lds CPPFLAGS_vdso32.lds += -P -C -U$(ARCH) -# Disable gcov profiling and ubsan for VDSO code +# Disable gcov profiling, ubsan and kasan for VDSO code GCOV_PROFILE := n UBSAN_SANITIZE := n +KASAN_SANITIZE := n # Force dependency (incbin is bad) $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so # link rule for the .so file, .lds has to be first -$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) +$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) FORCE $(call if_changed,vdso32ld) # strip rule for the .so file @@ -45,12 +46,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE $(call if_changed,objcopy) # assembly rules for the .S files -$(obj-vdso32): %.o: %.S +$(obj-vdso32): %.o: %.S FORCE $(call if_changed_dep,vdso32as) # actual build commands quiet_cmd_vdso32ld = VDSO32L $@ - cmd_vdso32ld = $(CC) $(c_flags) -Wl,-T $^ -o $@ + cmd_vdso32ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@ quiet_cmd_vdso32as = VDSO32A $@ cmd_vdso32as = $(CC) $(a_flags) -c -o $@ $< diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S index a9418bf975db5a32db1c88b07d59f71454ce550b..ada5c11a16e5adb20cfcd7f9908296eb1ac6cbb9 100644 --- a/arch/s390/kernel/vdso32/clock_gettime.S +++ b/arch/s390/kernel/vdso32/clock_gettime.S @@ -10,6 +10,7 @@ #include #include #include +#include .text .align 4 @@ -18,8 +19,8 @@ __kernel_clock_gettime: CFI_STARTPROC ahi %r15,-16 - CFI_DEF_CFA_OFFSET 176 - CFI_VAL_OFFSET 15, -160 + CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16 + CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD basr %r5,0 0: al %r5,21f-0b(%r5) /* get &_vdso_data */ chi %r2,__CLOCK_REALTIME_COARSE @@ -72,13 +73,13 @@ __kernel_clock_gettime: st %r1,4(%r3) /* store tp->tv_nsec */ lhi %r2,0 ahi %r15,16 - CFI_DEF_CFA_OFFSET 160 + CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD CFI_RESTORE 15 br %r14 /* CLOCK_MONOTONIC_COARSE */ - CFI_DEF_CFA_OFFSET 176 - CFI_VAL_OFFSET 15, -160 + CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16 + CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD 9: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ tml %r4,0x0001 /* pending update ? loop */ jnz 9b @@ -158,17 +159,17 @@ __kernel_clock_gettime: st %r1,4(%r3) /* store tp->tv_nsec */ lhi %r2,0 ahi %r15,16 - CFI_DEF_CFA_OFFSET 160 + CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD CFI_RESTORE 15 br %r14 /* Fallback to system call */ - CFI_DEF_CFA_OFFSET 176 - CFI_VAL_OFFSET 15, -160 + CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16 + CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD 19: lhi %r1,__NR_clock_gettime svc 0 ahi %r15,16 - CFI_DEF_CFA_OFFSET 160 + CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD CFI_RESTORE 15 br %r14 CFI_ENDPROC diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S index 3c0db0fa6ad90304929e7263ea2ca07bbe077eca..b23063fbc892cd91b1e08fabc52a52b26f968e98 100644 --- a/arch/s390/kernel/vdso32/gettimeofday.S +++ b/arch/s390/kernel/vdso32/gettimeofday.S @@ -10,6 +10,7 @@ #include #include #include +#include .text .align 4 @@ -19,7 +20,7 @@ __kernel_gettimeofday: CFI_STARTPROC ahi %r15,-16 CFI_ADJUST_CFA_OFFSET 16 - CFI_VAL_OFFSET 15, -160 + CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD basr %r5,0 0: al %r5,13f-0b(%r5) /* get &_vdso_data */ 1: ltr %r3,%r3 /* check if tz is NULL */ diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile index 15b1ceafc4c18fd2cf7fc52920e6c259212b1708..f849ac61c5da02ee8b764bc3c01fc44c16137e04 100644 --- a/arch/s390/kernel/vdso64/Makefile +++ b/arch/s390/kernel/vdso64/Makefile @@ -28,15 +28,16 @@ obj-y += vdso64_wrapper.o extra-y += vdso64.lds CPPFLAGS_vdso64.lds += -P -C -U$(ARCH) -# Disable gcov profiling and ubsan for VDSO code +# Disable gcov profiling, ubsan and kasan for VDSO code GCOV_PROFILE := n UBSAN_SANITIZE := n +KASAN_SANITIZE := n # Force dependency (incbin is bad) $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so # link rule for the .so file, .lds has to be first -$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) +$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) FORCE $(call if_changed,vdso64ld) # strip rule for the .so file @@ -45,12 +46,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE $(call if_changed,objcopy) # assembly rules for the .S files -$(obj-vdso64): %.o: %.S +$(obj-vdso64): %.o: %.S FORCE $(call if_changed_dep,vdso64as) # actual build commands quiet_cmd_vdso64ld = VDSO64L $@ - cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@ + cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@ quiet_cmd_vdso64as = VDSO64A $@ cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $< diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S index fac3ab5ec83a9c3a73f9201b5e094309dda3a1a9..9d2ee79b90f250afeedaeb22e6646ef55bfce056 100644 --- a/arch/s390/kernel/vdso64/clock_gettime.S +++ b/arch/s390/kernel/vdso64/clock_gettime.S @@ -10,6 +10,7 @@ #include #include #include +#include .text .align 4 @@ -18,8 +19,8 @@ __kernel_clock_gettime: CFI_STARTPROC aghi %r15,-16 - CFI_DEF_CFA_OFFSET 176 - CFI_VAL_OFFSET 15, -160 + CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16 + CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD larl %r5,_vdso_data cghi %r2,__CLOCK_REALTIME_COARSE je 4f @@ -56,13 +57,13 @@ __kernel_clock_gettime: stg %r1,8(%r3) /* store tp->tv_nsec */ lghi %r2,0 aghi %r15,16 - CFI_DEF_CFA_OFFSET 160 + CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD CFI_RESTORE 15 br %r14 /* CLOCK_MONOTONIC_COARSE */ - CFI_DEF_CFA_OFFSET 176 - CFI_VAL_OFFSET 15, -160 + CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16 + CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD 3: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ tmll %r4,0x0001 /* pending update ? loop */ jnz 3b @@ -115,13 +116,13 @@ __kernel_clock_gettime: stg %r1,8(%r3) /* store tp->tv_nsec */ lghi %r2,0 aghi %r15,16 - CFI_DEF_CFA_OFFSET 160 + CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD CFI_RESTORE 15 br %r14 /* CPUCLOCK_VIRT for this thread */ - CFI_DEF_CFA_OFFSET 176 - CFI_VAL_OFFSET 15, -160 + CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16 + CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD 9: lghi %r4,0 icm %r0,15,__VDSO_ECTG_OK(%r5) jz 12f @@ -142,17 +143,17 @@ __kernel_clock_gettime: stg %r4,8(%r3) lghi %r2,0 aghi %r15,16 - CFI_DEF_CFA_OFFSET 160 + CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD CFI_RESTORE 15 br %r14 /* Fallback to system call */ - CFI_DEF_CFA_OFFSET 176 - CFI_VAL_OFFSET 15, -160 + CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16 + CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD 12: lghi %r1,__NR_clock_gettime svc 0 aghi %r15,16 - CFI_DEF_CFA_OFFSET 160 + CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD CFI_RESTORE 15 br %r14 CFI_ENDPROC diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S index 6e1f0b421695ac5c4b4bee16adee3690bf89b705..aebe10dc7c99a13498edd6ffddf99d822cc37d23 100644 --- a/arch/s390/kernel/vdso64/gettimeofday.S +++ b/arch/s390/kernel/vdso64/gettimeofday.S @@ -10,6 +10,7 @@ #include #include #include +#include .text .align 4 @@ -19,7 +20,7 @@ __kernel_gettimeofday: CFI_STARTPROC aghi %r15,-16 CFI_ADJUST_CFA_OFFSET 16 - CFI_VAL_OFFSET 15, -160 + CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD larl %r5,_vdso_data 0: ltgr %r3,%r3 /* check if tz is NULL */ je 1f diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index b43f8d33a3697de32e7c9f7dae4cbf2e7cf3bc46..18ede6e806b917ce8a302bf864219de236282b4a 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -31,10 +31,9 @@ PHDRS { SECTIONS { . = 0x100000; - _stext = .; /* Start of text section */ .text : { - /* Text and read-only data */ - _text = .; + _stext = .; /* Start of text section */ + _text = .; /* Text and read-only data */ HEAD_TEXT TEXT_TEXT SCHED_TEXT @@ -46,11 +45,10 @@ SECTIONS *(.text.*_indirect_*) *(.fixup) *(.gnu.warning) + . = ALIGN(PAGE_SIZE); + _etext = .; /* End of text section */ } :text = 0x0700 - . = ALIGN(PAGE_SIZE); - _etext = .; /* End of text section */ - NOTES :text :note .dummy : { *(.dummy) } :data diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index fcb55b02990ef96e20148472828de2e324c6a56f..c567a20ecb7819445a73563a290d092a0f0c14b4 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -1144,7 +1144,7 @@ void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu) * The vcpu gave up the cpu voluntarily, mark it as a good * yield-candidate. */ - vcpu->preempted = true; + vcpu->ready = true; swake_up_one(&vcpu->wq); vcpu->stat.halt_wakeup++; } @@ -1879,6 +1879,16 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int, case KVM_S390_MCHK: irq->u.mchk.mcic = s390int->parm64; break; + case KVM_S390_INT_PFAULT_INIT: + irq->u.ext.ext_params = s390int->parm; + irq->u.ext.ext_params2 = s390int->parm64; + break; + case KVM_S390_RESTART: + case KVM_S390_INT_CLOCK_COMP: + case KVM_S390_INT_CPU_TIMER: + break; + default: + return -EINVAL; } return 0; } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index ac5da6b0b862a3d725297890b7feb6cae6d06788..50aee0b03e901f5a349ab66df28a1ccf107305f8 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -416,19 +416,30 @@ static void kvm_s390_cpu_feat_init(void) int kvm_arch_init(void *opaque) { + int rc; + kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long)); if (!kvm_s390_dbf) return -ENOMEM; if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) { - debug_unregister(kvm_s390_dbf); - return -ENOMEM; + rc = -ENOMEM; + goto out_debug_unreg; } kvm_s390_cpu_feat_init(); /* Register floating interrupt controller interface. */ - return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC); + rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC); + if (rc) { + pr_err("Failed to register FLIC rc=%d\n", rc); + goto out_debug_unreg; + } + return 0; + +out_debug_unreg: + debug_unregister(kvm_s390_dbf); + return rc; } void kvm_arch_exit(void) @@ -463,7 +474,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_S390_CSS_SUPPORT: case KVM_CAP_IOEVENTFD: case KVM_CAP_DEVICE_CTRL: - case KVM_CAP_ENABLE_CAP_VM: case KVM_CAP_S390_IRQCHIP: case KVM_CAP_VM_ATTRIBUTES: case KVM_CAP_MP_STATE: @@ -489,6 +499,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) break; case KVM_CAP_NR_VCPUS: case KVM_CAP_MAX_VCPUS: + case KVM_CAP_MAX_VCPU_ID: r = KVM_S390_BSCA_CPU_SLOTS; if (!kvm_s390_use_sca_entries()) r = KVM_MAX_VCPUS; @@ -606,7 +617,7 @@ static void icpt_operexc_on_all_vcpus(struct kvm *kvm) } } -static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) +int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) { int r; @@ -927,6 +938,8 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm) /* mark all the pages in active slots as dirty */ for (slotnr = 0; slotnr < slots->used_slots; slotnr++) { ms = slots->memslots + slotnr; + if (!ms->dirty_bitmap) + return -EINVAL; /* * The second half of the bitmap is only used on x86, * and would be wasted otherwise, so we put it to good @@ -1898,14 +1911,6 @@ long kvm_arch_vm_ioctl(struct file *filp, r = kvm_s390_inject_vm(kvm, &s390int); break; } - case KVM_ENABLE_CAP: { - struct kvm_enable_cap cap; - r = -EFAULT; - if (copy_from_user(&cap, argp, sizeof(cap))) - break; - r = kvm_vm_ioctl_enable_cap(kvm, &cap); - break; - } case KVM_CREATE_IRQCHIP: { struct kvm_irq_routing_entry routing; @@ -2107,13 +2112,13 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags); if (!kvm->arch.sca) goto out_err; - spin_lock(&kvm_lock); + mutex_lock(&kvm_lock); sca_offset += 16; if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE) sca_offset = 0; kvm->arch.sca = (struct bsca_block *) ((char *) kvm->arch.sca + sca_offset); - spin_unlock(&kvm_lock); + mutex_unlock(&kvm_lock); sprintf(debug_name, "kvm-%u", current->pid); @@ -2976,10 +2981,6 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) vcpu_load(vcpu); - if (test_fp_ctl(fpu->fpc)) { - ret = -EINVAL; - goto out; - } vcpu->run->s.regs.fpc = fpu->fpc; if (MACHINE_HAS_VX) convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs, @@ -2987,7 +2988,6 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) else memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs)); -out: vcpu_put(vcpu); return ret; } @@ -3887,7 +3887,7 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu, const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION | KVM_S390_MEMOP_F_CHECK_ONLY; - if (mop->flags & ~supported_flags) + if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size) return -EINVAL; if (mop->size > MEM_OP_MAX_SIZE) @@ -3955,7 +3955,7 @@ long kvm_arch_vcpu_async_ioctl(struct file *filp, } case KVM_S390_INTERRUPT: { struct kvm_s390_interrupt s390int; - struct kvm_s390_irq s390irq; + struct kvm_s390_irq s390irq = {}; if (copy_from_user(&s390int, argp, sizeof(s390int))) return -EFAULT; @@ -4155,21 +4155,28 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, const struct kvm_memory_slot *new, enum kvm_mr_change change) { - int rc; - - /* If the basics of the memslot do not change, we do not want - * to update the gmap. Every update causes several unnecessary - * segment translation exceptions. This is usually handled just - * fine by the normal fault handler + gmap, but it will also - * cause faults on the prefix page of running guest CPUs. - */ - if (old->userspace_addr == mem->userspace_addr && - old->base_gfn * PAGE_SIZE == mem->guest_phys_addr && - old->npages * PAGE_SIZE == mem->memory_size) - return; + int rc = 0; - rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, - mem->guest_phys_addr, mem->memory_size); + switch (change) { + case KVM_MR_DELETE: + rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, + old->npages * PAGE_SIZE); + break; + case KVM_MR_MOVE: + rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, + old->npages * PAGE_SIZE); + if (rc) + break; + /* FALLTHROUGH */ + case KVM_MR_CREATE: + rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, + mem->guest_phys_addr, mem->memory_size); + break; + case KVM_MR_FLAGS_ONLY: + break; + default: + WARN(1, "Unknown KVM MR CHANGE: %d\n", change); + } if (rc) pr_warn("failed to commit memory region\n"); return; diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index a2b28cd1e3fedb2bdcc6dbb1cff530ba0e3371a7..93de43a32cfb91d06862ac1375104851d6af0e2e 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -933,7 +933,6 @@ static int acquire_gmap_shadow(struct kvm_vcpu *vcpu, gmap = gmap_shadow(vcpu->arch.gmap, asce, edat); if (IS_ERR(gmap)) return PTR_ERR(gmap); - gmap->private = vcpu->kvm; WRITE_ONCE(vsie_page->gmap, gmap); return 0; } diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile index 57ab40188d4bddab071505f1d5a204a82dca3ce5..5418d10dc2a819b030d01c985a5e8129d5b1e3ce 100644 --- a/arch/s390/lib/Makefile +++ b/arch/s390/lib/Makefile @@ -9,5 +9,9 @@ lib-$(CONFIG_SMP) += spinlock.o lib-$(CONFIG_KPROBES) += probes.o lib-$(CONFIG_UPROBES) += probes.o +# Instrumenting memory accesses to __user data (in different address space) +# produce false positives +KASAN_SANITIZE_uaccess.o := n + chkbss := mem.o include $(srctree)/arch/s390/scripts/Makefile.chkbss diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c index c4f8039a35e8dda0bc20999b7db089e3ab09b613..0267405ab7c69fec93f73df3ed35f381b03c7f38 100644 --- a/arch/s390/lib/uaccess.c +++ b/arch/s390/lib/uaccess.c @@ -64,10 +64,13 @@ mm_segment_t enable_sacf_uaccess(void) { mm_segment_t old_fs; unsigned long asce, cr; + unsigned long flags; old_fs = current->thread.mm_segment; if (old_fs & 1) return old_fs; + /* protect against a concurrent page table upgrade */ + local_irq_save(flags); current->thread.mm_segment |= 1; asce = S390_lowcore.kernel_asce; if (likely(old_fs == USER_DS)) { @@ -83,6 +86,7 @@ mm_segment_t enable_sacf_uaccess(void) __ctl_load(asce, 7, 7); set_cpu_flag(CIF_ASCE_SECONDARY); } + local_irq_restore(flags); return old_fs; } EXPORT_SYMBOL(enable_sacf_uaccess); diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c index 510a18299196f3b797be51fd22b2bbfbe5cd003f..a51c892f14f3ee5bd393ed74003c96fd80268979 100644 --- a/arch/s390/mm/cmm.c +++ b/arch/s390/mm/cmm.c @@ -298,16 +298,16 @@ static int cmm_timeout_handler(struct ctl_table *ctl, int write, } if (write) { - len = *lenp; - if (copy_from_user(buf, buffer, - len > sizeof(buf) ? sizeof(buf) : len)) + len = min(*lenp, sizeof(buf)); + if (copy_from_user(buf, buffer, len)) return -EFAULT; - buf[sizeof(buf) - 1] = '\0'; + buf[len - 1] = '\0'; cmm_skip_blanks(buf, &p); nr = simple_strtoul(p, &p, 0); cmm_skip_blanks(p, &p); seconds = simple_strtoul(p, &p, 0); cmm_set_timeout(nr, seconds); + *ppos += *lenp; } else { len = sprintf(buf, "%ld %ld\n", cmm_timeout_pages, cmm_timeout_seconds); @@ -315,9 +315,9 @@ static int cmm_timeout_handler(struct ctl_table *ctl, int write, len = *lenp; if (copy_to_user(buffer, buf, len)) return -EFAULT; + *lenp = len; + *ppos += len; } - *lenp = len; - *ppos += len; return 0; } diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 72af23bacbb586ee87dce086fac2e28dd85bc7ec..c28a970cb1b9eb9f831105a3b7e68320a9e3f422 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -107,7 +107,6 @@ void bust_spinlocks(int yes) /* * Find out which address space caused the exception. - * Access register mode is impossible, ignore space == 3. */ static inline enum fault_type get_fault_type(struct pt_regs *regs) { @@ -132,6 +131,10 @@ static inline enum fault_type get_fault_type(struct pt_regs *regs) } return VDSO_FAULT; } + if (trans_exc_code == 1) { + /* access register mode, not used in the kernel */ + return USER_FAULT; + } /* home space exception -> access via kernel ASCE */ return KERNEL_FAULT; } @@ -534,10 +537,7 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access) fault = VM_FAULT_PFAULT; goto out_up; } - /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk - * of starvation. */ - flags &= ~(FAULT_FLAG_ALLOW_RETRY | - FAULT_FLAG_RETRY_NOWAIT); + flags &= ~FAULT_FLAG_RETRY_NOWAIT; flags |= FAULT_FLAG_TRIED; down_read(&mm->mmap_sem); goto retry; diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index 911c7ded35f15a2706b293ac2533b02b4906c4aa..df2de0b9822b81fef4d81844d01048ae566bc75d 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -1652,6 +1652,7 @@ struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce, return ERR_PTR(-ENOMEM); new->mm = parent->mm; new->parent = gmap_get(parent); + new->private = parent->private; new->orig_asce = asce; new->edat_level = edat_level; new->initialized = false; diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index 2809d11c7a283b825dc28b1ce9bde1cc4af28b09..9b5b866d8adf1025b2f0fff05fdd938ce4af727f 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c @@ -39,7 +39,8 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr, VM_BUG_ON(!pfn_valid(pte_pfn(pte))); page = pte_page(pte); head = compound_head(page); - if (!page_cache_get_speculative(head)) + if (unlikely(WARN_ON_ONCE(page_ref_count(head) < 0) + || !page_cache_get_speculative(head))) return 0; if (unlikely(pte_val(pte) != pte_val(*ptep))) { put_page(head); @@ -77,7 +78,8 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, refs++; } while (addr += PAGE_SIZE, addr != end); - if (!page_cache_add_speculative(head, refs)) { + if (unlikely(WARN_ON_ONCE(page_ref_count(head) < 0) + || !page_cache_add_speculative(head, refs))) { *nr -= refs; return 0; } @@ -151,7 +153,8 @@ static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr, refs++; } while (addr += PAGE_SIZE, addr != end); - if (!page_cache_add_speculative(head, refs)) { + if (unlikely(WARN_ON_ONCE(page_ref_count(head) < 0) + || !page_cache_add_speculative(head, refs))) { *nr -= refs; return 0; } diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 3fa3e532361227ad134f32b46c6c0db58d9d1240..e3ea6583c20d55b48fbcdf6d7b93e1387456cb66 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -229,6 +229,9 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, unsigned long size_pages = PFN_DOWN(size); int rc; + if (WARN_ON_ONCE(restrictions->altmap)) + return -EINVAL; + rc = vmem_add_mapping(start, size); if (rc) return rc; @@ -239,15 +242,13 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, return rc; } -#ifdef CONFIG_MEMORY_HOTREMOVE -int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) +void arch_remove_memory(int nid, u64 start, u64 size, + struct vmem_altmap *altmap) { - /* - * There is no hardware or firmware interface which could trigger a - * hot memory remove on s390. So there is nothing that needs to be - * implemented. - */ - return -EBUSY; + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long nr_pages = size >> PAGE_SHIFT; + + __remove_pages(start_pfn, nr_pages, altmap); + vmem_remove_mapping(start, size); } -#endif #endif /* CONFIG_MEMORY_HOTPLUG */ diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index 76d89ee8b428837fc6c32f962d0104787caa29a3..f3bc9c9305da6231cedd7b0a73afce19c83836ba 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -72,8 +72,20 @@ static void __crst_table_upgrade(void *arg) { struct mm_struct *mm = arg; - if (current->active_mm == mm) - set_user_asce(mm); + /* we must change all active ASCEs to avoid the creation of new TLBs */ + if (current->active_mm == mm) { + S390_lowcore.user_asce = mm->context.asce; + if (current->thread.mm_segment == USER_DS) { + __ctl_load(S390_lowcore.user_asce, 1, 1); + /* Mark user-ASCE present in CR1 */ + clear_cpu_flag(CIF_ASCE_PRIMARY); + } + if (current->thread.mm_segment == USER_DS_SACF) { + __ctl_load(S390_lowcore.user_asce, 7, 7); + /* enable_sacf_uaccess does all or nothing */ + WARN_ON(!test_cpu_flag(CIF_ASCE_SECONDARY)); + } + } __tlb_flush_local(); } @@ -101,6 +113,7 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end) mm->context.asce_limit = _REGION1_SIZE; mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | _ASCE_USER_BITS | _ASCE_TYPE_REGION2; + mm_inc_nr_puds(mm); } else { crst_table_init(table, _REGION1_ENTRY_EMPTY); pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd); diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index f2cc7da473e4ed2afb858c868a6e3238d9c1dce5..ae894ac83fd6185071b502c144a9832979713495 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -410,6 +410,7 @@ static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm, return old; } +#ifdef CONFIG_PGSTE static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; @@ -427,6 +428,7 @@ static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr) pmd = pmd_alloc(mm, pud, addr); return pmd; } +#endif pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t new) diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index d7052cbe984f81c02d203a6b34e7af6d658bd4f9..fe7c505b4caa7b083fa0398d6605c478c2d75df4 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -561,10 +561,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i EMIT4(0xb9080000, dst_reg, src_reg); break; case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */ - if (!imm) - break; - /* alfi %dst,imm */ - EMIT6_IMM(0xc20b0000, dst_reg, imm); + if (imm != 0) { + /* alfi %dst,imm */ + EMIT6_IMM(0xc20b0000, dst_reg, imm); + } EMIT_ZERO(dst_reg); break; case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */ @@ -586,17 +586,22 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i EMIT4(0xb9090000, dst_reg, src_reg); break; case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */ - if (!imm) - break; - /* alfi %dst,-imm */ - EMIT6_IMM(0xc20b0000, dst_reg, -imm); + if (imm != 0) { + /* alfi %dst,-imm */ + EMIT6_IMM(0xc20b0000, dst_reg, -imm); + } EMIT_ZERO(dst_reg); break; case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */ if (!imm) break; - /* agfi %dst,-imm */ - EMIT6_IMM(0xc2080000, dst_reg, -imm); + if (imm == -0x80000000) { + /* algfi %dst,0x80000000 */ + EMIT6_IMM(0xc20a0000, dst_reg, 0x80000000); + } else { + /* agfi %dst,-imm */ + EMIT6_IMM(0xc2080000, dst_reg, -imm); + } break; /* * BPF_MUL @@ -611,10 +616,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i EMIT4(0xb90c0000, dst_reg, src_reg); break; case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */ - if (imm == 1) - break; - /* msfi %r5,imm */ - EMIT6_IMM(0xc2010000, dst_reg, imm); + if (imm != 1) { + /* msfi %r5,imm */ + EMIT6_IMM(0xc2010000, dst_reg, imm); + } EMIT_ZERO(dst_reg); break; case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */ @@ -665,6 +670,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i if (BPF_OP(insn->code) == BPF_MOD) /* lhgi %dst,0 */ EMIT4_IMM(0xa7090000, dst_reg, 0); + else + EMIT_ZERO(dst_reg); break; } /* lhi %w0,0 */ @@ -757,10 +764,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i EMIT4(0xb9820000, dst_reg, src_reg); break; case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */ - if (!imm) - break; - /* xilf %dst,imm */ - EMIT6_IMM(0xc0070000, dst_reg, imm); + if (imm != 0) { + /* xilf %dst,imm */ + EMIT6_IMM(0xc0070000, dst_reg, imm); + } EMIT_ZERO(dst_reg); break; case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */ @@ -781,10 +788,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0); break; case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */ - if (imm == 0) - break; - /* sll %dst,imm(%r0) */ - EMIT4_DISP(0x89000000, dst_reg, REG_0, imm); + if (imm != 0) { + /* sll %dst,imm(%r0) */ + EMIT4_DISP(0x89000000, dst_reg, REG_0, imm); + } EMIT_ZERO(dst_reg); break; case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */ @@ -806,10 +813,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0); break; case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */ - if (imm == 0) - break; - /* srl %dst,imm(%r0) */ - EMIT4_DISP(0x88000000, dst_reg, REG_0, imm); + if (imm != 0) { + /* srl %dst,imm(%r0) */ + EMIT4_DISP(0x88000000, dst_reg, REG_0, imm); + } EMIT_ZERO(dst_reg); break; case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */ @@ -841,7 +848,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i break; case BPF_ALU64 | BPF_NEG: /* dst = -dst */ /* lcgr %dst,%dst */ - EMIT4(0xb9130000, dst_reg, dst_reg); + EMIT4(0xb9030000, dst_reg, dst_reg); break; /* * BPF_FROM_BE/LE @@ -883,6 +890,11 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i break; } break; + /* + * BPF_NOSPEC (speculation barrier) + */ + case BPF_ST | BPF_NOSPEC: + break; /* * BPF_ST(X) */ @@ -1015,8 +1027,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i /* llgf %w1,map.max_entries(%b2) */ EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2, offsetof(struct bpf_array, map.max_entries)); - /* clgrj %b3,%w1,0xa,label0: if %b3 >= %w1 goto out */ - EMIT6_PCREL_LABEL(0xec000000, 0x0065, BPF_REG_3, + /* clrj %b3,%w1,0xa,label0: if (u32)%b3 >= (u32)%w1 goto out */ + EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3, REG_W1, 0, 0xa); /* @@ -1042,8 +1054,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i * goto out; */ - /* sllg %r1,%b3,3: %r1 = index * 8 */ - EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, BPF_REG_3, REG_0, 3); + /* llgfr %r1,%b3: %r1 = (u32) index */ + EMIT4(0xb9160000, REG_1, BPF_REG_3); + /* sllg %r1,%r1,3: %r1 *= 8 */ + EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3); /* lg %r1,prog(%b2,%r1) */ EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2, REG_1, offsetof(struct bpf_array, ptrs)); diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c index 5bd374491f9461467790e1e0522f838827f094df..6c151b42e65db2ec8c5b81243190eac2416e4f04 100644 --- a/arch/s390/numa/numa.c +++ b/arch/s390/numa/numa.c @@ -54,6 +54,7 @@ int __node_distance(int a, int b) { return mode->distance ? mode->distance(a, b) : 0; } +EXPORT_SYMBOL(__node_distance); int numa_debug_enabled; diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c index 19b2d2a9b43db346a4ac3e308582c7a322c9d01c..eeb7450db18c06b8760cd5e1a577234ebc2c95f7 100644 --- a/arch/s390/pci/pci_clp.c +++ b/arch/s390/pci/pci_clp.c @@ -436,7 +436,7 @@ int clp_get_state(u32 fid, enum zpci_state *state) struct clp_state_data sd = {fid, ZPCI_FN_STATE_RESERVED}; int rc; - rrb = clp_alloc_block(GFP_KERNEL); + rrb = clp_alloc_block(GFP_ATOMIC); if (!rrb) return -ENOMEM; diff --git a/arch/s390/purgatory/Makefile b/arch/s390/purgatory/Makefile index ce6a3f75065bf9719eb06a6c3c03f9b8b33f1082..fdccb7689bb9b8f3f962edc5570d9b322512e3c8 100644 --- a/arch/s390/purgatory/Makefile +++ b/arch/s390/purgatory/Makefile @@ -13,8 +13,10 @@ $(obj)/sha256.o: $(srctree)/lib/sha256.c FORCE $(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S FORCE $(call if_changed_rule,as_o_S) -$(obj)/string.o: $(srctree)/arch/s390/lib/string.c FORCE - $(call if_changed_rule,cc_o_c) +KCOV_INSTRUMENT := n +GCOV_PROFILE := n +UBSAN_SANITIZE := n +KASAN_SANITIZE := n LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib LDFLAGS_purgatory.ro += -z nodefaultlib diff --git a/arch/s390/purgatory/string.c b/arch/s390/purgatory/string.c new file mode 100644 index 0000000000000000000000000000000000000000..c98c22a72db717834085c4ef00f66475144c780e --- /dev/null +++ b/arch/s390/purgatory/string.c @@ -0,0 +1,3 @@ +// SPDX-License-Identifier: GPL-2.0 +#define __HAVE_ARCH_MEMCMP /* arch function */ +#include "../lib/string.c" diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 1fb7b6d72bafcdae6c10cdc5b4174ae6c45a1db9..79f9e9d2b01f68dc22fae7010c7e61b12975ca34 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -61,6 +61,7 @@ config SUPERH config SUPERH32 def_bool "$(ARCH)" = "sh" + select ARCH_32BIT_OFF_T select HAVE_KPROBES select HAVE_KRETPROBES select HAVE_IOREMAP_PROT if MMU && !X2TLB diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig index 6394b4f0a69be95d4a44b1c347ceb4f09dfdb166..f42feab25dcf1deddf7deeb1e54daf2d50eb9f14 100644 --- a/arch/sh/boards/Kconfig +++ b/arch/sh/boards/Kconfig @@ -8,27 +8,19 @@ config SH_ALPHA_BOARD bool config SH_DEVICE_TREE - bool "Board Described by Device Tree" + bool select OF select OF_EARLY_FLATTREE select TIMER_OF select COMMON_CLK select GENERIC_CALIBRATE_DELAY - help - Select Board Described by Device Tree to build a kernel that - does not hard-code any board-specific knowledge but instead uses - a device tree blob provided by the boot-loader. You must enable - drivers for any hardware you want to use separately. At this - time, only boards based on the open-hardware J-Core processors - have sufficient driver coverage to use this option; do not - select it if you are using original SuperH hardware. config SH_JCORE_SOC bool "J-Core SoC" - depends on SH_DEVICE_TREE && (CPU_SH2 || CPU_J2) + select SH_DEVICE_TREE select CLKSRC_JCORE_PIT select JCORE_AIC - default y if CPU_J2 + depends on CPU_J2 help Select this option to include drivers core components of the J-Core SoC, including interrupt controllers and timers. diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c index e59c577ed871591a62e5b25789469549a2641b2d..c70bc7809ddaecce66030c43ee914c24eee894e7 100644 --- a/arch/sh/boards/mach-kfr2r09/setup.c +++ b/arch/sh/boards/mach-kfr2r09/setup.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/sh/boards/of-generic.c b/arch/sh/boards/of-generic.c index 26789ad2819360fc2e480e51185a4c3161cbe972..cb99df514a1ca3f584711051c48c6cf97cdc07c1 100644 --- a/arch/sh/boards/of-generic.c +++ b/arch/sh/boards/of-generic.c @@ -175,10 +175,10 @@ static struct sh_machine_vector __initmv sh_of_generic_mv = { struct sh_clk_ops; -void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx) +void __init __weak arch_init_clk_ops(struct sh_clk_ops **ops, int idx) { } -void __init plat_irq_setup(void) +void __init __weak plat_irq_setup(void) { } diff --git a/arch/sh/drivers/push-switch.c b/arch/sh/drivers/push-switch.c index 762bc561991058c82cc2e19c90d87c1fa49fb70c..a82596dd9060b0433398d1de102f495d13a13326 100644 --- a/arch/sh/drivers/push-switch.c +++ b/arch/sh/drivers/push-switch.c @@ -104,8 +104,8 @@ static int switch_drv_remove(struct platform_device *pdev) device_remove_file(&pdev->dev, &dev_attr_switch); platform_set_drvdata(pdev, NULL); - flush_work(&psw->work); del_timer_sync(&psw->debounce); + flush_work(&psw->work); free_irq(irq, pdev); kfree(psw); diff --git a/arch/sh/include/asm/checksum_32.h b/arch/sh/include/asm/checksum_32.h index 9c84386d35cba988f95bc6d2c80b558cbc99ad9a..8a7f90866cdadb85f2751e5ba00a38fdf734caec 100644 --- a/arch/sh/include/asm/checksum_32.h +++ b/arch/sh/include/asm/checksum_32.h @@ -200,7 +200,7 @@ static inline __wsum csum_and_copy_to_user(const void *src, int len, __wsum sum, int *err_ptr) { - if (access_ok(VERIFY_WRITE, dst, len)) + if (access_ok(dst, len)) return csum_partial_copy_generic((__force const void *)src, dst, len, sum, NULL, err_ptr); diff --git a/arch/sh/include/asm/futex.h b/arch/sh/include/asm/futex.h index 6d192f4908a729c5d9d57b58dc060442a362ed49..3190ec89df81c3b41597fc7fbec032d2b5de7ba8 100644 --- a/arch/sh/include/asm/futex.h +++ b/arch/sh/include/asm/futex.h @@ -22,7 +22,7 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval) { - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + if (!access_ok(uaddr, sizeof(u32))) return -EFAULT; return atomic_futex_op_cmpxchg_inatomic(uval, uaddr, oldval, newval); diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 98cb8c802b1a8cccafb1cd52d4717a149490792c..0ae60d6800004f332b25f6e447077b2ee8227184 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -371,7 +371,11 @@ static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; } #define ioremap_nocache ioremap #define ioremap_uc ioremap -#define iounmap __iounmap + +static inline void iounmap(void __iomem *addr) +{ + __iounmap(addr); +} /* * Convert a physical pointer to a virtual kernel pointer for /dev/mem diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h index 77abe192fb43d90cd6d56bfe878b54126188ac34..adcb0bfe238e3a202e64c5e1d10926928d9e96c3 100644 --- a/arch/sh/include/asm/tlb.h +++ b/arch/sh/include/asm/tlb.h @@ -127,6 +127,16 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb, return tlb_remove_page(tlb, page); } +static inline void +tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address, + unsigned long size) +{ + if (tlb->start > address) + tlb->start = address; + if (tlb->end < address + size) + tlb->end = address + size; +} + #define tlb_remove_check_page_size_change tlb_remove_check_page_size_change static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, unsigned int page_size) diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h index 32eb56e00c11f9558b346a809d66643463203d4e..5fe751ad75821f44b38996186b86879481088ac4 100644 --- a/arch/sh/include/asm/uaccess.h +++ b/arch/sh/include/asm/uaccess.h @@ -16,9 +16,12 @@ * sum := addr + size; carry? --> flag = true; * if (sum >= addr_limit) flag = true; */ -#define __access_ok(addr, size) \ - (__addr_ok((addr) + (size))) -#define access_ok(type, addr, size) \ +#define __access_ok(addr, size) ({ \ + unsigned long __ao_a = (addr), __ao_b = (size); \ + unsigned long __ao_end = __ao_a + __ao_b - !!__ao_b; \ + __ao_end >= __ao_a && __addr_ok(__ao_end); }) + +#define access_ok(addr, size) \ (__chk_user_ptr(addr), \ __access_ok((unsigned long __force)(addr), (size))) @@ -66,7 +69,7 @@ struct __large_struct { unsigned long buf[100]; }; long __gu_err = -EFAULT; \ unsigned long __gu_val = 0; \ const __typeof__(*(ptr)) *__gu_addr = (ptr); \ - if (likely(access_ok(VERIFY_READ, __gu_addr, (size)))) \ + if (likely(access_ok(__gu_addr, (size)))) \ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ @@ -87,7 +90,7 @@ struct __large_struct { unsigned long buf[100]; }; long __pu_err = -EFAULT; \ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ __typeof__(*(ptr)) __pu_val = x; \ - if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) \ + if (likely(access_ok(__pu_addr, size))) \ __put_user_size(__pu_val, __pu_addr, (size), \ __pu_err); \ __pu_err; \ @@ -132,8 +135,7 @@ __kernel_size_t __clear_user(void *addr, __kernel_size_t size); void __user * __cl_addr = (addr); \ unsigned long __cl_size = (n); \ \ - if (__cl_size && access_ok(VERIFY_WRITE, \ - ((unsigned long)(__cl_addr)), __cl_size)) \ + if (__cl_size && access_ok(__cl_addr, __cl_size)) \ __cl_size = __clear_user(__cl_addr, __cl_size); \ \ __cl_size; \ diff --git a/arch/sh/include/asm/vmalloc.h b/arch/sh/include/asm/vmalloc.h new file mode 100644 index 0000000000000000000000000000000000000000..716b774726465838ad892c9da522cb1586bff880 --- /dev/null +++ b/arch/sh/include/asm/vmalloc.h @@ -0,0 +1,4 @@ +#ifndef _ASM_SH_VMALLOC_H +#define _ASM_SH_VMALLOC_H + +#endif /* _ASM_SH_VMALLOC_H */ diff --git a/arch/sh/include/cpu-sh4/cpu/sh7734.h b/arch/sh/include/cpu-sh4/cpu/sh7734.h index 96f0246ad2f2b756cf8db42b3533bc9a62d7537e..82b63208135aec06252b41ae7e0795277cf56265 100644 --- a/arch/sh/include/cpu-sh4/cpu/sh7734.h +++ b/arch/sh/include/cpu-sh4/cpu/sh7734.h @@ -134,7 +134,7 @@ enum { GPIO_FN_EX_WAIT1, GPIO_FN_SD1_DAT0_A, GPIO_FN_DREQ2, GPIO_FN_CAN1_TX_C, GPIO_FN_ET0_LINK_C, GPIO_FN_ET0_ETXD5_A, GPIO_FN_EX_WAIT0, GPIO_FN_TCLK1_B, - GPIO_FN_RD_WR, GPIO_FN_TCLK0, + GPIO_FN_RD_WR, GPIO_FN_TCLK0, GPIO_FN_CAN_CLK_B, GPIO_FN_ET0_ETXD4, GPIO_FN_EX_CS5, GPIO_FN_SD1_CMD_A, GPIO_FN_ATADIR, GPIO_FN_QSSL_B, GPIO_FN_ET0_ETXD3_A, GPIO_FN_EX_CS4, GPIO_FN_SD1_WP_A, GPIO_FN_ATAWR, GPIO_FN_QMI_QIO1_B, diff --git a/arch/sh/include/cpu-sh4/cpu/sh7786.h b/arch/sh/include/cpu-sh4/cpu/sh7786.h index 96b8cb1f754a9556b428c1de4c6481f653bde334..029bbadaf7ab50ac6ecbac8964b750604db4481c 100644 --- a/arch/sh/include/cpu-sh4/cpu/sh7786.h +++ b/arch/sh/include/cpu-sh4/cpu/sh7786.h @@ -135,7 +135,7 @@ enum { static inline u32 sh7786_mm_sel(void) { - return __raw_readl(0xFC400020) & 0x7; + return __raw_readl((const volatile void __iomem *)0xFC400020) & 0x7; } #endif /* __CPU_SH7786_H__ */ diff --git a/arch/sh/kernel/cpu/proc.c b/arch/sh/kernel/cpu/proc.c index 85961b4f9c6956cc1e9e6d1ca08f5e6443c1441c..136b4aefd73bbb4662218019a86c67a5ba0d1afc 100644 --- a/arch/sh/kernel/cpu/proc.c +++ b/arch/sh/kernel/cpu/proc.c @@ -133,7 +133,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) static void *c_start(struct seq_file *m, loff_t *pos) { - return *pos < NR_CPUS ? cpu_data + *pos : NULL; + return *pos < nr_cpu_ids ? cpu_data + *pos : NULL; } static void *c_next(struct seq_file *m, void *v, loff_t *pos) { diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c index 4ca78ed71ad2c8ff33707fb8a472d55b553de36e..fb97dab3bad7dc07a11948c0cb11c9de96b4fe8b 100644 --- a/arch/sh/kernel/cpu/sh4/sq.c +++ b/arch/sh/kernel/cpu/sh4/sq.c @@ -106,7 +106,8 @@ static int __sq_remap(struct sq_mapping *map, pgprot_t prot) #if defined(CONFIG_MMU) struct vm_struct *vma; - vma = __get_vm_area(map->size, VM_ALLOC, map->sq_addr, SQ_ADDRMAX); + vma = __get_vm_area_caller(map->size, VM_ALLOC, map->sq_addr, + SQ_ADDRMAX, __builtin_return_address(0)); if (!vma) return -ENOMEM; diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c index 96dd9f7da2506d061fa499c5992a8e7eb4a426bf..1b04270e5460e8d77decb68a5c15f329227706ea 100644 --- a/arch/sh/kernel/ftrace.c +++ b/arch/sh/kernel/ftrace.c @@ -321,8 +321,7 @@ int ftrace_disable_ftrace_graph_caller(void) void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) { unsigned long old; - int faulted, err; - struct ftrace_graph_ent trace; + int faulted; unsigned long return_hooker = (unsigned long)&return_to_handler; if (unlikely(ftrace_graph_is_dead())) @@ -365,18 +364,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) return; } - err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0, NULL); - if (err == -EBUSY) { + if (function_graph_enter(old, self_addr, 0, NULL)) __raw_writel(old, parent); - return; - } - - trace.func = self_addr; - - /* Only trace if the calling function expects to */ - if (!ftrace_graph_entry(&trace)) { - current->curr_ret_stack--; - __raw_writel(old, parent); - } } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c index d9ff3b42da7cb11a3e6d62ec39dcfc2c35ce40d2..2569ffc061f9c69e7896ef47eb0bc53ed9947768 100644 --- a/arch/sh/kernel/hw_breakpoint.c +++ b/arch/sh/kernel/hw_breakpoint.c @@ -160,6 +160,7 @@ int arch_bp_generic_fields(int sh_len, int sh_type, switch (sh_type) { case SH_BREAKPOINT_READ: *gen_type = HW_BREAKPOINT_R; + break; case SH_BREAKPOINT_WRITE: *gen_type = HW_BREAKPOINT_W; break; diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c index c46c0020ff55edf0fdfda120ed90cd3fa7b65d17..2a2121ba8ebe2eee55e479abbdc29878701e86b7 100644 --- a/arch/sh/kernel/signal_32.c +++ b/arch/sh/kernel/signal_32.c @@ -160,7 +160,7 @@ asmlinkage int sys_sigreturn(void) /* Always make any pending restarted system calls return -EINTR */ current->restart_block.fn = do_no_restart_syscall; - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) goto badframe; if (__get_user(set.sig[0], &frame->sc.oldmask) @@ -190,7 +190,7 @@ asmlinkage int sys_rt_sigreturn(void) /* Always make any pending restarted system calls return -EINTR */ current->restart_block.fn = do_no_restart_syscall; - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) @@ -272,7 +272,7 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set, frame = get_sigframe(&ksig->ka, regs->regs[15], sizeof(*frame)); - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) return -EFAULT; err |= setup_sigcontext(&frame->sc, regs, set->sig[0]); @@ -338,7 +338,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, frame = get_sigframe(&ksig->ka, regs->regs[15], sizeof(*frame)); - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) return -EFAULT; err |= copy_siginfo_to_user(&frame->info, &ksig->info); diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c index 7b77f18124349c5fabc7be8ca335e0693dfd2995..fe1f9af3c8387f3f0378690d78691847edb4f5d6 100644 --- a/arch/sh/kernel/signal_64.c +++ b/arch/sh/kernel/signal_64.c @@ -262,7 +262,7 @@ asmlinkage int sys_sigreturn(unsigned long r2, unsigned long r3, /* Always make any pending restarted system calls return -EINTR */ current->restart_block.fn = do_no_restart_syscall; - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) goto badframe; if (__get_user(set.sig[0], &frame->sc.oldmask) @@ -296,7 +296,7 @@ asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3, /* Always make any pending restarted system calls return -EINTR */ current->restart_block.fn = do_no_restart_syscall; - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) @@ -382,7 +382,7 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs frame = get_sigframe(&ksig->ka, regs->regs[REG_SP], sizeof(*frame)); - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) return -EFAULT; err |= setup_sigcontext(&frame->sc, regs, set->sig[0]); @@ -468,7 +468,7 @@ static int setup_rt_frame(struct ksignal *kig, sigset_t *set, frame = get_sigframe(&ksig->ka, regs->regs[REG_SP], sizeof(*frame)); - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) return -EFAULT; err |= __put_user(&frame->info, &frame->pinfo); diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c index 014fb08cf133a44d26fe0e78c6310055763d4418..d267d699fab82c10143487de3b7e1cd624f3721a 100644 --- a/arch/sh/kernel/traps_64.c +++ b/arch/sh/kernel/traps_64.c @@ -43,7 +43,7 @@ static int read_opcode(reg_size_t pc, insn_size_t *result_opcode, int from_user_ /* SHmedia */ aligned_pc = pc & ~3; if (from_user_mode) { - if (!access_ok(VERIFY_READ, aligned_pc, sizeof(insn_size_t))) { + if (!access_ok(aligned_pc, sizeof(insn_size_t))) { get_user_error = -EFAULT; } else { get_user_error = __get_user(opcode, (insn_size_t *)aligned_pc); @@ -183,7 +183,7 @@ static int misaligned_load(struct pt_regs *regs, if (user_mode(regs)) { __u64 buffer; - if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<> PAGE_SHIFT; - struct zone *zone; - int ret; - - zone = page_zone(pfn_to_page(start_pfn)); - ret = __remove_pages(zone, start_pfn, nr_pages, altmap); - if (unlikely(ret)) - pr_warn("%s: Failed, __remove_pages() == %d\n", __func__, - ret); - return ret; + __remove_pages(start_pfn, nr_pages, altmap); } -#endif #endif /* CONFIG_MEMORY_HOTPLUG */ diff --git a/arch/sh/oprofile/backtrace.c b/arch/sh/oprofile/backtrace.c index c7695f99c8c3cb2e6fc2972607cca9f555d2e6d8..8279a7e91043cb079b8121c3a884c8ee0c8e98fb 100644 --- a/arch/sh/oprofile/backtrace.c +++ b/arch/sh/oprofile/backtrace.c @@ -51,7 +51,7 @@ user_backtrace(unsigned long *stackaddr, struct pt_regs *regs) unsigned long buf_stack; /* Also check accessibility of address */ - if (!access_ok(VERIFY_READ, stackaddr, sizeof(unsigned long))) + if (!access_ok(stackaddr, sizeof(unsigned long))) return NULL; if (__copy_from_user_inatomic(&buf_stack, stackaddr, sizeof(unsigned long))) diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index e6f2a38d2e61ece051d30350ad332b7b2acab229..453988f1a1f174b5c75214edb111ea7ff8f0d9fd 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -50,6 +50,7 @@ config SPARC config SPARC32 def_bool !64BIT + select ARCH_32BIT_OFF_T select ARCH_HAS_SYNC_DMA_FOR_CPU select DMA_NONCOHERENT_OPS select GENERIC_ATOMIC64 diff --git a/arch/sparc/include/asm/checksum_32.h b/arch/sparc/include/asm/checksum_32.h index d1e53d7aed39f1fe30b2c041e2aeba6d9cad8eff..5fc98d80b03bccd0a3be9aea757836bd0465535e 100644 --- a/arch/sparc/include/asm/checksum_32.h +++ b/arch/sparc/include/asm/checksum_32.h @@ -87,7 +87,7 @@ static inline __wsum csum_partial_copy_to_user(const void *src, void __user *dst, int len, __wsum sum, int *err) { - if (!access_ok (VERIFY_WRITE, dst, len)) { + if (!access_ok(dst, len)) { *err = -EFAULT; return sum; } else { diff --git a/arch/sparc/include/asm/cmpxchg_64.h b/arch/sparc/include/asm/cmpxchg_64.h index f71ef3729888f2b14d764d2b39e3ad9741fe230e..316faa0130bab987e0818a33f3654fe9a50be0b0 100644 --- a/arch/sparc/include/asm/cmpxchg_64.h +++ b/arch/sparc/include/asm/cmpxchg_64.h @@ -52,7 +52,12 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long return val; } -#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) +#define xchg(ptr,x) \ +({ __typeof__(*(ptr)) __ret; \ + __ret = (__typeof__(*(ptr))) \ + __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \ + __ret; \ +}) void __xchg_called_with_bad_pointer(void); diff --git a/arch/sparc/include/asm/io_64.h b/arch/sparc/include/asm/io_64.h index b162c23ae8c2305eea1ce1f8b6bb2a16aadcca24..7a836d21ff0c8cd4002febc01eb0b9e6efcf18e0 100644 --- a/arch/sparc/include/asm/io_64.h +++ b/arch/sparc/include/asm/io_64.h @@ -409,6 +409,7 @@ static inline void __iomem *ioremap(unsigned long offset, unsigned long size) } #define ioremap_nocache(X,Y) ioremap((X),(Y)) +#define ioremap_uc(X,Y) ioremap((X),(Y)) #define ioremap_wc(X,Y) ioremap((X),(Y)) #define ioremap_wt(X,Y) ioremap((X),(Y)) diff --git a/arch/sparc/include/asm/parport.h b/arch/sparc/include/asm/parport.h index 05df5f0430535f307495662610f2c06dda92d0b5..3c5a1c620f0f7759aa1af38c889078ddb77da2b2 100644 --- a/arch/sparc/include/asm/parport.h +++ b/arch/sparc/include/asm/parport.h @@ -21,6 +21,7 @@ */ #define HAS_DMA +#ifdef CONFIG_PARPORT_PC_FIFO static DEFINE_SPINLOCK(dma_spin_lock); #define claim_dma_lock() \ @@ -31,6 +32,7 @@ static DEFINE_SPINLOCK(dma_spin_lock); #define release_dma_lock(__flags) \ spin_unlock_irqrestore(&dma_spin_lock, __flags); +#endif static struct sparc_ebus_info { struct ebus_dma_info info; diff --git a/arch/sparc/include/asm/switch_to_64.h b/arch/sparc/include/asm/switch_to_64.h index 4ff29b1406a9b7c44d438d20c73118fc77630416..b1d4e2e3210fbe6d948cdb7c91f42ab656e57191 100644 --- a/arch/sparc/include/asm/switch_to_64.h +++ b/arch/sparc/include/asm/switch_to_64.h @@ -67,6 +67,7 @@ do { save_and_clear_fpu(); \ } while(0) void synchronize_user_stack(void); -void fault_in_user_windows(void); +struct pt_regs; +void fault_in_user_windows(struct pt_regs *); #endif /* __SPARC64_SWITCH_TO_64_H */ diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index de71c65b99f022b586c306df754ea36f3a9d7471..5153798051fb2f0a3a529ec62e8f8fe9b7b7ff2e 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -39,8 +39,7 @@ #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; }) #define __kernel_ok (uaccess_kernel()) #define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size))) -#define access_ok(type, addr, size) \ - ({ (void)(type); __access_ok((unsigned long)(addr), size); }) +#define access_ok(addr, size) __access_ok((unsigned long)(addr), size) /* * The exception table consists of pairs of addresses: the first is the diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index cbb308cee394f51b0d5e161d34dfe1cfb2cc316b..87ae9ffb1521b199ea57b477c4fc8fb789bf3dc3 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -68,7 +68,7 @@ static inline int __access_ok(const void __user * addr, unsigned long size) return 1; } -static inline int access_ok(int type, const void __user * addr, unsigned long size) +static inline int access_ok(const void __user * addr, unsigned long size) { return 1; } diff --git a/arch/sparc/include/asm/vmalloc.h b/arch/sparc/include/asm/vmalloc.h new file mode 100644 index 0000000000000000000000000000000000000000..04b8ab9518b88d224a1e749a9ba1a145d9cd9949 --- /dev/null +++ b/arch/sparc/include/asm/vmalloc.h @@ -0,0 +1,4 @@ +#ifndef _ASM_SPARC_VMALLOC_H +#define _ASM_SPARC_VMALLOC_H + +#endif /* _ASM_SPARC_VMALLOC_H */ diff --git a/arch/sparc/include/uapi/asm/mman.h b/arch/sparc/include/uapi/asm/mman.h index f6f99ec65bb3e418c35f31d9976dc17c419cb04c..953528235c2e52f4c1ca0159bea29eb7681beb8c 100644 --- a/arch/sparc/include/uapi/asm/mman.h +++ b/arch/sparc/include/uapi/asm/mman.h @@ -26,6 +26,9 @@ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ #define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ #define MAP_HUGETLB 0x40000 /* create a huge page mapping */ +#define MAP_PA32BIT 0x400000 /* physical address is within 4G */ +#define MAP_CHECKNODE 0x800000 /* hugetlb numa node check */ +#define MAP_ALIGN 0x2000000 /* create an aligned mapping */ #endif /* _UAPI__SPARC_MMAN_H__ */ diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index cf8640841b7a2ce81a5731989faed519656e64e0..97c0e19263d1f66ffb1faa87c1fd8d0c455de286 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile @@ -118,4 +118,4 @@ pc--$(CONFIG_PERF_EVENTS) := perf_event.o obj-$(CONFIG_SPARC64) += $(pc--y) obj-$(CONFIG_UPROBES) += uprobes.o -obj-$(CONFIG_SPARC64) += jump_label.o +obj-$(CONFIG_JUMP_LABEL) += jump_label.o diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c index 915dda4ae41205ca51d5aa0d8ac4ee4c777ba3e8..684b84ce397f711c8ebf7857d9f4c826d4eb09ca 100644 --- a/arch/sparc/kernel/ftrace.c +++ b/arch/sparc/kernel/ftrace.c @@ -126,20 +126,11 @@ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long frame_pointer) { unsigned long return_hooker = (unsigned long) &return_to_handler; - struct ftrace_graph_ent trace; if (unlikely(atomic_read(¤t->tracing_graph_pause))) return parent + 8UL; - trace.func = self_addr; - trace.depth = current->curr_ret_stack + 1; - - /* Only trace if the calling function expects to */ - if (!ftrace_graph_entry(&trace)) - return parent + 8UL; - - if (ftrace_push_return_trace(parent, self_addr, &trace.depth, - frame_pointer, NULL) == -EBUSY) + if (function_graph_enter(parent, self_addr, frame_pointer, NULL)) return parent + 8UL; return return_hooker; diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c index 7f8eac51df337ecb494e625d240ac3b0b7e90011..a4cfaeecaf5ea8c88849078747d08dc0c07db334 100644 --- a/arch/sparc/kernel/jump_label.c +++ b/arch/sparc/kernel/jump_label.c @@ -9,8 +9,6 @@ #include -#ifdef HAVE_JUMP_LABEL - void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type) { @@ -47,5 +45,3 @@ void arch_jump_label_transform(struct jump_entry *entry, flushi(insn); mutex_unlock(&text_mutex); } - -#endif diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c index 39a2503fa3e18e8816dde7078c43dc5c5d509b5e..51028abe5e9038a127a39f5473049704120b66c6 100644 --- a/arch/sparc/kernel/mdesc.c +++ b/arch/sparc/kernel/mdesc.c @@ -357,6 +357,8 @@ static int get_vdev_port_node_info(struct mdesc_handle *md, u64 node, node_info->vdev_port.id = *idp; node_info->vdev_port.name = kstrdup_const(name, GFP_KERNEL); + if (!node_info->vdev_port.name) + return -1; node_info->vdev_port.parent_cfg_hdl = *parent_cfg_hdlp; return 0; diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 67b3e6b3ce5d7cf8b417d361c5bbaadce92cc1e0..1ad5911f62b416e63cb3dd80fe4bb897f0df1c93 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -891,6 +891,10 @@ static int sparc_perf_event_set_period(struct perf_event *event, s64 period = hwc->sample_period; int ret = 0; + /* The period may have been changed by PERF_EVENT_IOC_PERIOD */ + if (unlikely(period != hwc->last_period)) + left = period - (hwc->last_period - left); + if (unlikely(left <= -period)) { left = period; local64_set(&hwc->period_left, left); diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index 6c086086ca8fafd6a55bcb1a09b24f56f5b42ef8..59eaf6227af1d69462c577a7104c25f72521533b 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include @@ -521,7 +522,12 @@ static void stack_unaligned(unsigned long sp) force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *) sp, 0, current); } -void fault_in_user_windows(void) +static const char uwfault32[] = KERN_INFO \ + "%s[%d]: bad register window fault: SP %08lx (orig_sp %08lx) TPC %08lx O7 %08lx\n"; +static const char uwfault64[] = KERN_INFO \ + "%s[%d]: bad register window fault: SP %016lx (orig_sp %016lx) TPC %08lx O7 %016lx\n"; + +void fault_in_user_windows(struct pt_regs *regs) { struct thread_info *t = current_thread_info(); unsigned long window; @@ -534,9 +540,9 @@ void fault_in_user_windows(void) do { struct reg_window *rwin = &t->reg_window[window]; int winsize = sizeof(struct reg_window); - unsigned long sp; + unsigned long sp, orig_sp; - sp = t->rwbuf_stkptrs[window]; + orig_sp = sp = t->rwbuf_stkptrs[window]; if (test_thread_64bit_stack(sp)) sp += STACK_BIAS; @@ -547,8 +553,16 @@ void fault_in_user_windows(void) stack_unaligned(sp); if (unlikely(copy_to_user((char __user *)sp, - rwin, winsize))) + rwin, winsize))) { + if (show_unhandled_signals) + printk_ratelimited(is_compat_task() ? + uwfault32 : uwfault64, + current->comm, current->pid, + sp, orig_sp, + regs->tpc, + regs->u_regs[UREG_I7]); goto barf; + } } while (window--); } set_thread_wsaved(0); @@ -556,8 +570,7 @@ void fault_in_user_windows(void) barf: set_thread_wsaved(window + 1); - user_exit(); - do_exit(SIGILL); + force_sig(SIGSEGV, current); } asmlinkage long sparc_do_fork(unsigned long clone_flags, diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S index 4073e2b87dd0e39045eebb8cc67328ba720fcadb..29aa34f11720cea3e098372995f751298b09e641 100644 --- a/arch/sparc/kernel/rtrap_64.S +++ b/arch/sparc/kernel/rtrap_64.S @@ -39,6 +39,7 @@ __handle_preemption: wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate __handle_user_windows: + add %sp, PTREGS_OFF, %o0 call fault_in_user_windows 661: wrpr %g0, RTRAP_PSTATE, %pstate /* If userspace is using ADI, it could potentially pass diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c index 44d379db3f6484bf57a7f0b2ddbddabbee412f23..4c5b3fcbed94c376a2a44bb0c4fdaab139b3a169 100644 --- a/arch/sparc/kernel/signal32.c +++ b/arch/sparc/kernel/signal32.c @@ -371,7 +371,11 @@ static int setup_frame32(struct ksignal *ksig, struct pt_regs *regs, get_sigframe(ksig, regs, sigframe_size); if (invalid_frame_pointer(sf, sigframe_size)) { - do_exit(SIGILL); + if (show_unhandled_signals) + pr_info("%s[%d] bad frame in setup_frame32: %08lx TPC %08lx O7 %08lx\n", + current->comm, current->pid, (unsigned long)sf, + regs->tpc, regs->u_regs[UREG_I7]); + force_sigsegv(ksig->sig, current); return -EINVAL; } @@ -501,7 +505,11 @@ static int setup_rt_frame32(struct ksignal *ksig, struct pt_regs *regs, get_sigframe(ksig, regs, sigframe_size); if (invalid_frame_pointer(sf, sigframe_size)) { - do_exit(SIGILL); + if (show_unhandled_signals) + pr_info("%s[%d] bad frame in setup_rt_frame32: %08lx TPC %08lx O7 %08lx\n", + current->comm, current->pid, (unsigned long)sf, + regs->tpc, regs->u_regs[UREG_I7]); + force_sigsegv(ksig->sig, current); return -EINVAL; } diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index 48366e5eb5b266fbc48ddce4d02a6809120f92b3..e9de1803a22e004adbb3d6b541ad552f71626e7d 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c @@ -370,7 +370,11 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs) get_sigframe(ksig, regs, sf_size); if (invalid_frame_pointer (sf)) { - do_exit(SIGILL); /* won't return, actually */ + if (show_unhandled_signals) + pr_info("%s[%d] bad frame in setup_rt_frame: %016lx TPC %016lx O7 %016lx\n", + current->comm, current->pid, (unsigned long)sf, + regs->tpc, regs->u_regs[UREG_I7]); + force_sigsegv(ksig->sig, current); return -EINVAL; } diff --git a/arch/sparc/kernel/sigutil_32.c b/arch/sparc/kernel/sigutil_32.c index 1e9fae56a8530417a1a72667464dd35fa09c19f7..f25c6daa9f5254c45f3e4446cba33af4a4c995a8 100644 --- a/arch/sparc/kernel/sigutil_32.c +++ b/arch/sparc/kernel/sigutil_32.c @@ -65,7 +65,7 @@ int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) set_used_math(); clear_tsk_thread_flag(current, TIF_USEDFPU); - if (!access_ok(VERIFY_READ, fpu, sizeof(*fpu))) + if (!access_ok(fpu, sizeof(*fpu))) return -EFAULT; err = __copy_from_user(¤t->thread.float_regs[0], &fpu->si_float_regs[0], diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S index bb68c805b891855e18af6397ce534f74d5550a4d..ff9389a1c9f3f68c5acaa32123d65c5fedbf9846 100644 --- a/arch/sparc/kernel/systbls_64.S +++ b/arch/sparc/kernel/systbls_64.S @@ -47,9 +47,9 @@ sys_call_table32: .word sys_recvfrom, sys_setreuid16, sys_setregid16, sys_rename, compat_sys_truncate /*130*/ .word compat_sys_ftruncate, sys_flock, compat_sys_lstat64, sys_sendto, sys_shutdown .word sys_socketpair, sys_mkdir, sys_rmdir, compat_sys_utimes, compat_sys_stat64 -/*140*/ .word sys_sendfile64, sys_nis_syscall, compat_sys_futex, sys_gettid, compat_sys_getrlimit +/*140*/ .word sys_sendfile64, sys_getpeername, compat_sys_futex, sys_gettid, compat_sys_getrlimit .word compat_sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write -/*150*/ .word sys_nis_syscall, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64 +/*150*/ .word sys_getsockname, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64 .word compat_sys_fcntl64, sys_inotify_rm_watch, compat_sys_statfs, compat_sys_fstatfs, sys_oldumount /*160*/ .word compat_sys_sched_setaffinity, compat_sys_sched_getaffinity, sys_getdomainname, sys_setdomainname, sys_nis_syscall .word sys_quotactl, sys_set_tid_address, compat_sys_mount, compat_sys_ustat, sys_setxattr diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c index 64ac8c0c14290e53543d58b77b8d9753729bfbf8..83db94c0b43189e4c938d30878368ee2cbf41c08 100644 --- a/arch/sparc/kernel/unaligned_32.c +++ b/arch/sparc/kernel/unaligned_32.c @@ -278,7 +278,6 @@ static inline int ok_for_user(struct pt_regs *regs, unsigned int insn, enum direction dir) { unsigned int reg; - int check = (dir == load) ? VERIFY_READ : VERIFY_WRITE; int size = ((insn >> 19) & 3) == 3 ? 8 : 4; if ((regs->pc | regs->npc) & 3) @@ -290,18 +289,18 @@ static inline int ok_for_user(struct pt_regs *regs, unsigned int insn, reg = (insn >> 25) & 0x1f; if (reg >= 16) { - if (!access_ok(check, WINREG_ADDR(reg - 16), size)) + if (!access_ok(WINREG_ADDR(reg - 16), size)) return -EFAULT; } reg = (insn >> 14) & 0x1f; if (reg >= 16) { - if (!access_ok(check, WINREG_ADDR(reg - 16), size)) + if (!access_ok(WINREG_ADDR(reg - 16), size)) return -EFAULT; } if (!(insn & 0x2000)) { reg = (insn & 0x1f); if (reg >= 16) { - if (!access_ok(check, WINREG_ADDR(reg - 16), size)) + if (!access_ok(WINREG_ADDR(reg - 16), size)) return -EFAULT; } } diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index b0440b0edd97b4af739c6aa53d49402bc8de2656..6e8e6ea00836e039dbf7288c991b76fcadd859bd 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -261,7 +261,6 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, 1, regs, address); } if (fault & VM_FAULT_RETRY) { - flags &= ~FAULT_FLAG_ALLOW_RETRY; flags |= FAULT_FLAG_TRIED; /* No need to up_read(&mm->mmap_sem) as we would diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 8f8a604c130023fb238408cedbb3f5a2a30c0a26..73399ef2e25558817b7e59133b7d556382779de7 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -459,7 +459,6 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) 1, regs, address); } if (fault & VM_FAULT_RETRY) { - flags &= ~FAULT_FLAG_ALLOW_RETRY; flags |= FAULT_FLAG_TRIED; /* No need to up_read(&mm->mmap_sem) as we would diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index f396048a0d6808f7faddc7addbd6c461e3c835ad..39822f611c01503211413a28d8c3492de60e6e26 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -1383,6 +1383,7 @@ int __node_distance(int from, int to) } return numa_latency[from][to]; } +EXPORT_SYMBOL(__node_distance); static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp) { diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S index d245f89d139528f049f2c8d596729974fb7e2020..d220b6848746c5ce46095fd2fb4aae4669ccb9ea 100644 --- a/arch/sparc/mm/ultra.S +++ b/arch/sparc/mm/ultra.S @@ -587,7 +587,7 @@ xcall_flush_tlb_kernel_range: /* 44 insns */ sub %g7, %g1, %g3 srlx %g3, 18, %g2 brnz,pn %g2, 2f - add %g2, 1, %g2 + sethi %hi(PAGE_SIZE), %g2 sub %g3, %g2, %g3 or %g1, 0x20, %g1 ! Nucleus 1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP @@ -751,7 +751,7 @@ __cheetah_xcall_flush_tlb_kernel_range: /* 44 insns */ sub %g7, %g1, %g3 srlx %g3, 18, %g2 brnz,pn %g2, 2f - add %g2, 1, %g2 + sethi %hi(PAGE_SIZE), %g2 sub %g3, %g2, %g3 or %g1, 0x20, %g1 ! Nucleus 1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c index 222785af550b46736676808b6e00d8d8cef9a286..1bb1e64d4377db3cb82f781dbc3fb465b77b69c8 100644 --- a/arch/sparc/net/bpf_jit_comp_64.c +++ b/arch/sparc/net/bpf_jit_comp_64.c @@ -1261,6 +1261,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) emit(opcode | RS1(src) | rs2 | RD(dst), ctx); break; } + /* speculation barrier */ + case BPF_ST | BPF_NOSPEC: + break; /* ST: *(size *)(dst + off) = imm */ case BPF_ST | BPF_MEM | BPF_W: case BPF_ST | BPF_MEM | BPF_H: @@ -1270,6 +1273,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) const u8 tmp2 = bpf2sparc[TMP_REG_2]; u32 opcode = 0, rs2; + if (insn->dst_reg == BPF_REG_FP) + ctx->saw_frame_pointer = true; + ctx->tmp_2_used = true; emit_loadimm(imm, tmp2, ctx); @@ -1308,6 +1314,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) const u8 tmp = bpf2sparc[TMP_REG_1]; u32 opcode = 0, rs2; + if (insn->dst_reg == BPF_REG_FP) + ctx->saw_frame_pointer = true; + switch (BPF_SIZE(code)) { case BPF_W: opcode = ST32; @@ -1340,6 +1349,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) const u8 tmp2 = bpf2sparc[TMP_REG_2]; const u8 tmp3 = bpf2sparc[TMP_REG_3]; + if (insn->dst_reg == BPF_REG_FP) + ctx->saw_frame_pointer = true; + ctx->tmp_1_used = true; ctx->tmp_2_used = true; ctx->tmp_3_used = true; @@ -1360,6 +1372,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) const u8 tmp2 = bpf2sparc[TMP_REG_2]; const u8 tmp3 = bpf2sparc[TMP_REG_3]; + if (insn->dst_reg == BPF_REG_FP) + ctx->saw_frame_pointer = true; + ctx->tmp_1_used = true; ctx->tmp_2_used = true; ctx->tmp_3_used = true; @@ -1425,12 +1440,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) struct bpf_prog *tmp, *orig_prog = prog; struct sparc64_jit_data *jit_data; struct bpf_binary_header *header; + u32 prev_image_size, image_size; bool tmp_blinded = false; bool extra_pass = false; struct jit_ctx ctx; - u32 image_size; u8 *image_ptr; - int pass; + int pass, i; if (!prog->jit_requested) return orig_prog; @@ -1461,61 +1476,82 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) header = jit_data->header; extra_pass = true; image_size = sizeof(u32) * ctx.idx; + prev_image_size = image_size; + pass = 1; goto skip_init_ctx; } memset(&ctx, 0, sizeof(ctx)); ctx.prog = prog; - ctx.offset = kcalloc(prog->len, sizeof(unsigned int), GFP_KERNEL); + ctx.offset = kmalloc_array(prog->len, sizeof(unsigned int), GFP_KERNEL); if (ctx.offset == NULL) { prog = orig_prog; goto out_off; } - /* Fake pass to detect features used, and get an accurate assessment - * of what the final image size will be. + /* Longest sequence emitted is for bswap32, 12 instructions. Pre-cook + * the offset array so that we converge faster. */ - if (build_body(&ctx)) { - prog = orig_prog; - goto out_off; - } - build_prologue(&ctx); - build_epilogue(&ctx); - - /* Now we know the actual image size. */ - image_size = sizeof(u32) * ctx.idx; - header = bpf_jit_binary_alloc(image_size, &image_ptr, - sizeof(u32), jit_fill_hole); - if (header == NULL) { - prog = orig_prog; - goto out_off; - } + for (i = 0; i < prog->len; i++) + ctx.offset[i] = i * (12 * 4); - ctx.image = (u32 *)image_ptr; -skip_init_ctx: - for (pass = 1; pass < 3; pass++) { + prev_image_size = ~0U; + for (pass = 1; pass < 40; pass++) { ctx.idx = 0; build_prologue(&ctx); - if (build_body(&ctx)) { - bpf_jit_binary_free(header); prog = orig_prog; goto out_off; } - build_epilogue(&ctx); if (bpf_jit_enable > 1) - pr_info("Pass %d: shrink = %d, seen = [%c%c%c%c%c%c]\n", pass, - image_size - (ctx.idx * 4), + pr_info("Pass %d: size = %u, seen = [%c%c%c%c%c%c]\n", pass, + ctx.idx * 4, ctx.tmp_1_used ? '1' : ' ', ctx.tmp_2_used ? '2' : ' ', ctx.tmp_3_used ? '3' : ' ', ctx.saw_frame_pointer ? 'F' : ' ', ctx.saw_call ? 'C' : ' ', ctx.saw_tail_call ? 'T' : ' '); + + if (ctx.idx * 4 == prev_image_size) + break; + prev_image_size = ctx.idx * 4; + cond_resched(); + } + + /* Now we know the actual image size. */ + image_size = sizeof(u32) * ctx.idx; + header = bpf_jit_binary_alloc(image_size, &image_ptr, + sizeof(u32), jit_fill_hole); + if (header == NULL) { + prog = orig_prog; + goto out_off; + } + + ctx.image = (u32 *)image_ptr; +skip_init_ctx: + ctx.idx = 0; + + build_prologue(&ctx); + + if (build_body(&ctx)) { + bpf_jit_binary_free(header); + prog = orig_prog; + goto out_off; + } + + build_epilogue(&ctx); + + if (ctx.idx * 4 != prev_image_size) { + pr_err("bpf_jit: Failed to converge, prev_size=%u size=%d\n", + prev_image_size, ctx.idx * 4); + bpf_jit_binary_free(header); + prog = orig_prog; + goto out_off; } if (bpf_jit_enable > 1) diff --git a/arch/um/Kconfig.debug b/arch/um/Kconfig.debug index 2014597605ea9cd24ff881370ce063ddb57992e0..85726eeec34512cde5fa0dd66a4485a9b64af311 100644 --- a/arch/um/Kconfig.debug +++ b/arch/um/Kconfig.debug @@ -16,6 +16,7 @@ config GPROF config GCOV bool "Enable gcov support" depends on DEBUG_INFO + depends on !KCOV help This option allows developers to retrieve coverage data from a UML session. diff --git a/arch/um/Makefile-skas b/arch/um/Makefile-skas index ac35de5316a6a547d4c8293b61387ddbae546476..9d7886ff27629247a377c3341e34420019bdc6e0 100644 --- a/arch/um/Makefile-skas +++ b/arch/um/Makefile-skas @@ -4,7 +4,12 @@ # GPROF_OPT += -pg + +ifeq ($(CONFIG_PGO_KERNEL),y) +GCOV_OPT += -fprofile-generate -ftest-coverage +else GCOV_OPT += -fprofile-arcs -ftest-coverage +endif CFLAGS-$(CONFIG_GCOV) += $(GCOV_OPT) CFLAGS-$(CONFIG_GPROF) += $(GPROF_OPT) diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c index 8d80b27502e6ae4feb235d01801a67be5ca8cce4..71e26488dfde2798aa1babb1f55aee440be86161 100644 --- a/arch/um/drivers/line.c +++ b/arch/um/drivers/line.c @@ -261,7 +261,7 @@ static irqreturn_t line_write_interrupt(int irq, void *data) if (err == 0) { spin_unlock(&line->lock); return IRQ_NONE; - } else if (err < 0) { + } else if ((err < 0) && (err != -EAGAIN)) { line->head = line->buffer; line->tail = line->buffer; } @@ -284,7 +284,7 @@ int line_setup_irq(int fd, int input, int output, struct line *line, void *data) if (err) return err; if (output) - err = um_request_irq(driver->write_irq, fd, IRQ_NONE, + err = um_request_irq(driver->write_irq, fd, IRQ_WRITE, line_write_interrupt, IRQF_SHARED, driver->write_irq_name, data); return err; @@ -683,24 +683,26 @@ void register_winch_irq(int fd, int tty_fd, int pid, struct tty_port *port, goto cleanup; } - *winch = ((struct winch) { .list = LIST_HEAD_INIT(winch->list), - .fd = fd, + *winch = ((struct winch) { .fd = fd, .tty_fd = tty_fd, .pid = pid, .port = port, .stack = stack }); + spin_lock(&winch_handler_lock); + list_add(&winch->list, &winch_handlers); + spin_unlock(&winch_handler_lock); + if (um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt, IRQF_SHARED, "winch", winch) < 0) { printk(KERN_ERR "register_winch_irq - failed to register " "IRQ\n"); + spin_lock(&winch_handler_lock); + list_del(&winch->list); + spin_unlock(&winch_handler_lock); goto out_free; } - spin_lock(&winch_handler_lock); - list_add(&winch->list, &winch_handlers); - spin_unlock(&winch_handler_lock); - return; out_free: diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c index 3ef1b48e064a87dd33465ff083c604c4657b8bb2..19779e38e077e6820a3c63cd98762176e27ec735 100644 --- a/arch/um/drivers/net_kern.c +++ b/arch/um/drivers/net_kern.c @@ -349,7 +349,7 @@ static struct platform_driver uml_net_driver = { static void net_device_release(struct device *dev) { - struct uml_net *device = dev_get_drvdata(dev); + struct uml_net *device = container_of(dev, struct uml_net, pdev.dev); struct net_device *netdev = device->dev; struct uml_net_private *lp = netdev_priv(netdev); diff --git a/arch/um/drivers/port_user.c b/arch/um/drivers/port_user.c index 9a8e1b64c22e3dcf4a51845b4a9343adfbc1e144..5f56d11b886fc2926ac412b0f3b292a1a88b2ec3 100644 --- a/arch/um/drivers/port_user.c +++ b/arch/um/drivers/port_user.c @@ -168,7 +168,7 @@ int port_connection(int fd, int *socket, int *pid_out) { int new, err; char *argv[] = { "/usr/sbin/in.telnetd", "-L", - "/usr/lib/uml/port-helper", NULL }; + OS_LIB_PATH "/uml/port-helper", NULL }; struct port_pre_exec_data data; new = accept(fd, NULL, 0); diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index 83c470364dfb34dce51320322e9b119c10157f3e..709aa3e0af3faee0eab639cd2eee87a2ed5ed0db 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -854,7 +854,7 @@ static int ubd_open_dev(struct ubd *ubd_dev) static void ubd_device_release(struct device *dev) { - struct ubd *ubd_dev = dev_get_drvdata(dev); + struct ubd *ubd_dev = container_of(dev, struct ubd, pdev.dev); blk_cleanup_queue(ubd_dev->queue); *ubd_dev = ((struct ubd) DEFAULT_UBD); diff --git a/arch/um/drivers/vector_user.c b/arch/um/drivers/vector_user.c index 4d6a78e31089f6c12f94bddbf9a53f85bf8d0401..00c4c2735a5f7069f092de80bd73865e73510657 100644 --- a/arch/um/drivers/vector_user.c +++ b/arch/um/drivers/vector_user.c @@ -30,6 +30,7 @@ #include #include #include +#include #include "vector_user.h" #define ID_GRE 0 diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h index fca34b2177e28a055663055d01c4fb7d78420285..129fb1d1f1c5b346b06a2f5da43ce8eae858e940 100644 --- a/arch/um/include/asm/mmu_context.h +++ b/arch/um/include/asm/mmu_context.h @@ -53,7 +53,7 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new) * when the new ->mm is used for the first time. */ __switch_mm(&new->context.id); - down_write(&new->mmap_sem); + down_write_nested(&new->mmap_sem, 1); uml_setup_stubs(new); up_write(&new->mmap_sem); } diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h index 7485398d07370034e361ea1fe4173eeb9bd51d1c..9c04562310b36630bb3ec008c99fdf05e3597c85 100644 --- a/arch/um/include/asm/pgtable.h +++ b/arch/um/include/asm/pgtable.h @@ -197,12 +197,17 @@ static inline pte_t pte_mkold(pte_t pte) static inline pte_t pte_wrprotect(pte_t pte) { - pte_clear_bits(pte, _PAGE_RW); + if (likely(pte_get_bits(pte, _PAGE_RW))) + pte_clear_bits(pte, _PAGE_RW); + else + return pte; return(pte_mknewprot(pte)); } static inline pte_t pte_mkread(pte_t pte) { + if (unlikely(pte_get_bits(pte, _PAGE_USER))) + return pte; pte_set_bits(pte, _PAGE_USER); return(pte_mknewprot(pte)); } @@ -221,6 +226,8 @@ static inline pte_t pte_mkyoung(pte_t pte) static inline pte_t pte_mkwrite(pte_t pte) { + if (unlikely(pte_get_bits(pte, _PAGE_RW))) + return pte; pte_set_bits(pte, _PAGE_RW); return(pte_mknewprot(pte)); } diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h index dce6db147f24563eb14310aaabf76cada9a878bb..02e61f6abfcab3ae39ab65a6c2a4fd2e3f8265be 100644 --- a/arch/um/include/asm/tlb.h +++ b/arch/um/include/asm/tlb.h @@ -130,6 +130,18 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb, return tlb_remove_page(tlb, page); } +static inline void +tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address, + unsigned long size) +{ + tlb->need_flush = 1; + + if (tlb->start > address) + tlb->start = address; + if (tlb->end < address + size) + tlb->end = address + size; +} + /** * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. * diff --git a/arch/um/include/asm/vmalloc.h b/arch/um/include/asm/vmalloc.h new file mode 100644 index 0000000000000000000000000000000000000000..9a7b9ed9373370ed8e421004f39fb65e26feec1e --- /dev/null +++ b/arch/um/include/asm/vmalloc.h @@ -0,0 +1,4 @@ +#ifndef _ASM_UM_VMALLOC_H +#define _ASM_UM_VMALLOC_H + +#endif /* _ASM_UM_VMALLOC_H */ diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c index 1a1d88a4d94035b8cd0685ea5d6ea34ae23cd7f5..5f47422401e1e98d634d12882bd098a9c77a6207 100644 --- a/arch/um/kernel/ptrace.c +++ b/arch/um/kernel/ptrace.c @@ -66,7 +66,7 @@ long arch_ptrace(struct task_struct *child, long request, #ifdef PTRACE_GETREGS case PTRACE_GETREGS: { /* Get all gp regs from the child. */ - if (!access_ok(VERIFY_WRITE, p, MAX_REG_OFFSET)) { + if (!access_ok(p, MAX_REG_OFFSET)) { ret = -EIO; break; } @@ -81,7 +81,7 @@ long arch_ptrace(struct task_struct *child, long request, #ifdef PTRACE_SETREGS case PTRACE_SETREGS: { /* Set all gp regs in the child. */ unsigned long tmp = 0; - if (!access_ok(VERIFY_READ, p, MAX_REG_OFFSET)) { + if (!access_ok(p, MAX_REG_OFFSET)) { ret = -EIO; break; } diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c index 052de4c8acb2ec42c04d4fe30b267aa607e8a332..0c572a48158e8de36a18f41ce5b61e3e0ee286b5 100644 --- a/arch/um/kernel/time.c +++ b/arch/um/kernel/time.c @@ -56,7 +56,7 @@ static int itimer_one_shot(struct clock_event_device *evt) static struct clock_event_device timer_clockevent = { .name = "posix-timer", .rating = 250, - .cpumask = cpu_all_mask, + .cpumask = cpu_possible_mask, .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .set_state_shutdown = itimer_shutdown, diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index cced829460427180091b494e7eb2c9315dd66d73..eacf4fe7ae4d7b406571b577bc8891760dd8cc78 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -96,7 +96,6 @@ int handle_page_fault(unsigned long address, unsigned long ip, else current->min_flt++; if (fault & VM_FAULT_RETRY) { - flags &= ~FAULT_FLAG_ALLOW_RETRY; flags |= FAULT_FLAG_TRIED; goto retry; diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c index c94c3bd70ccd797d03a53f7f9b7b5773ff274506..df4a985716eba7047cb0873fcea7fb61a57d7a80 100644 --- a/arch/um/os-Linux/skas/process.c +++ b/arch/um/os-Linux/skas/process.c @@ -610,6 +610,11 @@ int start_idle_thread(void *stack, jmp_buf *switch_buf) fatal_sigsegv(); } longjmp(*switch_buf, 1); + + /* unreachable */ + printk(UM_KERN_ERR "impossible long jump!"); + fatal_sigsegv(); + return 0; } void initial_thread_cb_skas(void (*proc)(void *), void *arg) diff --git a/arch/um/scripts/Makefile.rules b/arch/um/scripts/Makefile.rules index a4dfa7d7636e9a6ba637d22b16dc64e5fb3acaca..60d01d68a84e9036f7e2f04ffdbd47230a402ace 100644 --- a/arch/um/scripts/Makefile.rules +++ b/arch/um/scripts/Makefile.rules @@ -22,6 +22,12 @@ $(USER_OBJS) $(UNPROFILE_OBJS): \ CHECKFLAGS := $(patsubst $(NOSTDINC_FLAGS),,$(CHECKFLAGS)) # The stubs can't try to call mcount or update basic block data +ifeq ($(CONFIG_PGO_KERNEL),y) +define unprofile + $(patsubst -pg,,$(patsubst -fprofile-generate -ftest-coverage,,$(1))) +endef +else define unprofile $(patsubst -pg,,$(patsubst -fprofile-arcs -ftest-coverage,,$(1))) endef +endif diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig index 60eae744d8fd0112d39dc0bbbe13a48e30790726..981bf646bf8a1016de601e53c308335a285a2f09 100644 --- a/arch/unicore32/Kconfig +++ b/arch/unicore32/Kconfig @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 config UNICORE32 def_bool y + select ARCH_32BIT_OFF_T select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO diff --git a/arch/unicore32/include/asm/vmalloc.h b/arch/unicore32/include/asm/vmalloc.h new file mode 100644 index 0000000000000000000000000000000000000000..054435818a1462b59a6e0b8c59df52545670f731 --- /dev/null +++ b/arch/unicore32/include/asm/vmalloc.h @@ -0,0 +1,4 @@ +#ifndef _ASM_UNICORE32_VMALLOC_H +#define _ASM_UNICORE32_VMALLOC_H + +#endif /* _ASM_UNICORE32_VMALLOC_H */ diff --git a/arch/unicore32/include/uapi/asm/unistd.h b/arch/unicore32/include/uapi/asm/unistd.h index 65856eaab163e79ef04603b087af8217306d8c18..0314f78943755b5cc4058eb5cf662cd2a4492c83 100644 --- a/arch/unicore32/include/uapi/asm/unistd.h +++ b/arch/unicore32/include/uapi/asm/unistd.h @@ -12,6 +12,7 @@ */ #define __ARCH_WANT_RENAMEAT +#define __ARCH_WANT_SET_GET_RLIMIT /* Use the standard ABI for syscalls. */ #include diff --git a/arch/unicore32/kernel/signal.c b/arch/unicore32/kernel/signal.c index 4ae51cf15adea6ef7ddc6741b431793b545903d3..63be04809d401df14681d4671ef38897f3abb1cd 100644 --- a/arch/unicore32/kernel/signal.c +++ b/arch/unicore32/kernel/signal.c @@ -117,7 +117,7 @@ asmlinkage int __sys_rt_sigreturn(struct pt_regs *regs) frame = (struct rt_sigframe __user *)regs->UCreg_sp; - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) goto badframe; if (restore_sigframe(regs, &frame->sig)) @@ -205,7 +205,7 @@ static inline void __user *get_sigframe(struct k_sigaction *ka, /* * Check that we can actually write to the signal frame. */ - if (!access_ok(VERIFY_WRITE, frame, framesize)) + if (!access_ok(frame, framesize)) frame = NULL; return frame; diff --git a/arch/unicore32/mm/fault.c b/arch/unicore32/mm/fault.c index 8f12a5b50a42bffaa14efa03c04c8f6f67f8b1b1..14144b9e774a21dc5c1a7f2c216f332e2049419b 100644 --- a/arch/unicore32/mm/fault.c +++ b/arch/unicore32/mm/fault.c @@ -268,9 +268,7 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs) else tsk->min_flt++; if (fault & VM_FAULT_RETRY) { - /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk - * of starvation. */ - flags &= ~FAULT_FLAG_ALLOW_RETRY; + flags |= FAULT_FLAG_TRIED; goto retry; } } diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 1a0be022f91d8d6d89bc154642e3bd29619e483c..7c28f4b18d6ba930b6f327db626fde4a3d385123 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -14,7 +14,6 @@ config X86_32 select ARCH_WANT_IPC_PARSE_VERSION select CLKSRC_I8253 select CLONE_BACKWARDS - select HAVE_AOUT select HAVE_GENERIC_DMA_COHERENT select MODULES_USE_ELF_REL select OLD_SIGACTION @@ -47,6 +46,7 @@ config X86 select ACPI_LEGACY_TABLES_LOOKUP if ACPI select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI select ANON_INODES + select ARCH_32BIT_OFF_T if X86_32 select ARCH_CLOCKSOURCE_DATA select ARCH_DISCARD_MEMBLOCK select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI @@ -84,7 +84,7 @@ config X86 select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH select ARCH_WANTS_DYNAMIC_TASK_STRUCT select ARCH_WANTS_THP_SWAP if X86_64 - select BUILDTIME_EXTABLE_SORT + select BUILDTIME_TABLE_SORT select CLKEVT_I8253 select CLOCKSOURCE_VALIDATE_LAST_CYCLE select CLOCKSOURCE_WATCHDOG @@ -112,6 +112,7 @@ config X86 select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNLEN_USER select GENERIC_TIME_VSYSCALL + select HARDIRQS_SW_RESEND select HARDLOCKUP_CHECK_TIMESTAMP if X86_64 select HAVE_ACPI_APEI if ACPI select HAVE_ACPI_APEI_NMI if ACPI @@ -166,7 +167,8 @@ config X86 select HAVE_FUNCTION_ERROR_INJECTION select HAVE_KRETPROBES select HAVE_KVM - select HAVE_LIVEPATCH if X86_64 + select HAVE_LIVEPATCH_FTRACE if X86_64 + select HAVE_LIVEPATCH_WO_FTRACE if X86_64 select HAVE_MEMBLOCK select HAVE_MEMBLOCK_NODE_MAP select HAVE_MIXED_BREAKPOINTS_REGS @@ -441,10 +443,6 @@ config RETPOLINE branches. Requires a compiler with -mindirect-branch=thunk-extern support for full protection. The kernel may run slower. - Without compiler support, at least indirect branches in assembler - code are eliminated. Since this includes the syscall entry path, - it is not entirely pointless. - config INTEL_RDT bool "Intel Resource Director Technology support" default n @@ -788,6 +786,7 @@ config KVM_GUEST bool "KVM Guest support (including kvmclock)" depends on PARAVIRT select PARAVIRT_CLOCK + select ARCH_CPUIDLE_HALTPOLL default y ---help--- This option enables various optimizations for running under the KVM @@ -796,6 +795,15 @@ config KVM_GUEST underlying device model, the host provides the guest with timing infrastructure such as time of day, and system time +config ARCH_CPUIDLE_HALTPOLL + def_bool n + prompt "Disable host haltpoll when loading haltpoll driver" + help + If virtualized under KVM, disable host haltpoll. When loading + haltpoll driver, It allows the guest vcpus to poll for a + specified amount of time before halting, The execution of + haltpoll on the host is redundant. + config KVM_DEBUG_FS bool "Enable debug information for KVM Guests in debugfs" depends on KVM_GUEST && DEBUG_FS @@ -1005,13 +1013,7 @@ config NR_CPUS to the kernel image. config SCHED_SMT - bool "SMT (Hyperthreading) scheduler support" - depends on SMP - ---help--- - SMT scheduler support improves the CPU scheduler's decision making - when dealing with Intel Pentium 4 chips with HyperThreading at a - cost of slightly increased overhead in some places. If unsure say - N here. + def_bool y if SMP config SCHED_MC def_bool y @@ -1286,15 +1288,15 @@ config X86_REBOOTFIXUPS config MICROCODE bool "CPU microcode loading support" default y - depends on CPU_SUP_AMD || CPU_SUP_INTEL + depends on CPU_SUP_AMD || CPU_SUP_INTEL || CPU_SUP_HYGON select FW_LOADER ---help--- - If you say Y here, you will be able to update the microcode on - Intel and AMD processors. The Intel support is for the IA32 family, + If you say Y here, you will be able to update the microcode on Intel, + AMD and Hygon processors. The Intel support is for the IA32 family, e.g. Pentium Pro, Pentium II, Pentium III, Pentium 4, Xeon etc. The - AMD support is for families 0x10 and later. You will obviously need - the actual microcode binary data itself which is not shipped with - the Linux kernel. + AMD support is for families 0x10 and later. The Hygon support is for + families 0x18 and later. You will obviously need the actual microcode + binary data itself which is not shipped with the Linux kernel. The preferred method to load microcode from a detached initrd is described in Documentation/x86/microcode.txt. For that you need to enable @@ -1326,6 +1328,15 @@ config MICROCODE_AMD If you select this option, microcode patch loading support for AMD processors will be enabled. +config MICROCODE_HYGON + bool "Hygon microcode loading support" + depends on CPU_SUP_HYGON && MICROCODE + default MICROCODE + select MICROCODE_AMD + help + If you select this option, microcode patch loading support for Hygon + processors will be enabled. + config MICROCODE_OLD_INTERFACE def_bool y depends on MICROCODE @@ -1498,6 +1509,7 @@ config AMD_MEM_ENCRYPT bool "AMD Secure Memory Encryption (SME) support" depends on X86_64 && CPU_SUP_AMD select DYNAMIC_PHYSICAL_MASK + select ARCH_USE_MEMREMAP_PROT ---help--- Say yes to enable support for the encryption of system memory. This requires an AMD processor that supports Secure Memory @@ -1517,10 +1529,6 @@ config AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT If set to N, then the encryption of system memory can be activated with the mem_encrypt=on command line option. -config ARCH_USE_MEMREMAP_PROT - def_bool y - depends on AMD_MEM_ENCRYPT - # Common NUMA Features config NUMA bool "Numa Memory Allocation and Scheduler Support" @@ -1542,6 +1550,25 @@ config NUMA Otherwise, you should say N. +config NUMA_AWARE_SPINLOCKS + bool "Numa-aware spinlocks" + depends on NUMA + depends on QUEUED_SPINLOCKS + # For now, we depend on PARAVIRT_SPINLOCKS to make the patching work. + # This is awkward, but hopefully would be resolved once static_call() + # is available. + depends on PARAVIRT_SPINLOCKS + default y + help + Introduce NUMA (Non Uniform Memory Access) awareness into + the slow path of spinlocks. + + In this variant of qspinlock, the kernel will try to keep the lock + on the same node, thus reducing the number of remote cache misses, + while trading some of the short term fairness for better performance. + + Say N if you want absolute first come first serve fairness. + config AMD_NUMA def_bool y prompt "Old style AMD Opteron NUMA detection" @@ -1853,16 +1880,15 @@ config X86_SMAP If unsure, say Y. -config X86_INTEL_UMIP +config X86_UMIP def_bool y - depends on CPU_SUP_INTEL - prompt "Intel User Mode Instruction Prevention" if EXPERT + prompt "User Mode Instruction Prevention" if EXPERT ---help--- - The User Mode Instruction Prevention (UMIP) is a security - feature in newer Intel processors. If enabled, a general - protection fault is issued if the SGDT, SLDT, SIDT, SMSW - or STR instructions are executed in user mode. These instructions - unnecessarily expose information about the hardware state. + User Mode Instruction Prevention (UMIP) is a security feature in + some x86 processors. If enabled, a general protection fault is + issued if the SGDT, SLDT, SIDT, SMSW or STR instructions are + executed in user mode. These instructions unnecessarily expose + information about the hardware state. The vast majority of applications do not use these instructions. For the very few that do, software emulation is provided in @@ -1913,11 +1939,57 @@ config X86_INTEL_MEMORY_PROTECTION_KEYS If unsure, say y. +choice + prompt "TSX enable mode" + depends on CPU_SUP_INTEL + default X86_INTEL_TSX_MODE_OFF + help + Intel's TSX (Transactional Synchronization Extensions) feature + allows to optimize locking protocols through lock elision which + can lead to a noticeable performance boost. + + On the other hand it has been shown that TSX can be exploited + to form side channel attacks (e.g. TAA) and chances are there + will be more of those attacks discovered in the future. + + Therefore TSX is not enabled by default (aka tsx=off). An admin + might override this decision by tsx=on the command line parameter. + Even with TSX enabled, the kernel will attempt to enable the best + possible TAA mitigation setting depending on the microcode available + for the particular machine. + + This option allows to set the default tsx mode between tsx=on, =off + and =auto. See Documentation/admin-guide/kernel-parameters.txt for more + details. + + Say off if not sure, auto if TSX is in use but it should be used on safe + platforms or on if TSX is in use and the security aspect of tsx is not + relevant. + +config X86_INTEL_TSX_MODE_OFF + bool "off" + help + TSX is disabled if possible - equals to tsx=off command line parameter. + +config X86_INTEL_TSX_MODE_ON + bool "on" + help + TSX is always enabled on TSX capable HW - equals the tsx=on command + line parameter. + +config X86_INTEL_TSX_MODE_AUTO + bool "auto" + help + TSX is enabled on TSX capable HW that is believed to be safe against + side channel attacks- equals the tsx=auto command line parameter. +endchoice + config EFI bool "EFI runtime service support" depends on ACPI select UCS2_STRING select EFI_RUNTIME_WRAPPERS + select ARCH_USE_MEMREMAP_PROT ---help--- This enables the kernel to use EFI runtime services that are available (such as the EFI variable services). @@ -2209,14 +2281,8 @@ config RANDOMIZE_MEMORY_PHYSICAL_PADDING If unsure, leave at the default value. config HOTPLUG_CPU - bool "Support for hot-pluggable CPUs" + def_bool y depends on SMP - ---help--- - Say Y here to allow turning CPUs off and on. CPUs can be - controlled through /sys/devices/system/cpu. - ( Note: power management support will enable this option - automatically on SMP systems. ) - Say N if you want to disable CPU hotplug. config BOOTPARAM_HOTPLUG_CPU0 bool "Set default setting of cpu0_hotpluggable" @@ -2410,6 +2476,25 @@ config ARCH_ENABLE_SPLIT_PMD_PTLOCK def_bool y depends on X86_64 || X86_PAE +config GDS_FORCE_MITIGATION + bool "Force GDS Mitigation" + depends on CPU_SUP_INTEL + default n + help + Gather Data Sampling (GDS) is a hardware vulnerability which allows + unprivileged speculative access to data which was previously stored in + vector registers. + + This option is equivalent to setting gather_data_sampling=force on the + command line. The microcode mitigation is used if present, otherwise + AVX is disabled as a mitigation. On affected systems that are missing + the microcode any userspace code that unconditionally uses AVX will + break with this option set. + + Setting this option on systems not vulnerable to GDS has no effect. + + If in doubt, say N. + config ARCH_ENABLE_HUGEPAGE_MIGRATION def_bool y depends on X86_64 && HUGETLB_PAGE && MIGRATION @@ -2742,8 +2827,7 @@ config OLPC config OLPC_XO1_PM bool "OLPC XO-1 Power Management" - depends on OLPC && MFD_CS5535 && PM_SLEEP - select MFD_CORE + depends on OLPC && MFD_CS5535=y && PM_SLEEP ---help--- Add support for poweroff and suspend of the OLPC XO-1 laptop. @@ -2878,6 +2962,7 @@ config IA32_EMULATION config IA32_AOUT tristate "IA32 a.out support" depends on IA32_EMULATION + depends on BROKEN ---help--- Support old a.out binaries in the 32bit emulation. diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 638411f22267aa34bd6ddda10eeccded1a1f6b48..d1a51794c587d3b0b2998ad9913f31bd2db8c9ff 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu @@ -426,6 +426,20 @@ config CPU_SUP_AMD If unsure, say N. +config CPU_SUP_HYGON + default y + bool "Support Hygon processors" if PROCESSOR_SELECT + select CPU_SUP_AMD + ---help--- + This enables detection, tunings and quirks for Hygon processors + + You need this enabled if you want your kernel to run on an + Hygon CPU. Disabling this option on other types of CPUs + makes the kernel a tiny bit smaller. Disabling it on an Hygon + CPU might render the kernel unbootable. + + If unsure, say N. + config CPU_SUP_CENTAUR default y bool "Support Centaur processors" if PROCESSOR_SELECT @@ -466,3 +480,16 @@ config CPU_SUP_UMC_32 CPU might render the kernel unbootable. If unsure, say N. + +config CPU_SUP_ZHAOXIN + default y + bool "Support Zhaoxin processors" if PROCESSOR_SELECT + help + This enables detection, tunings and quirks for Zhaoxin processors + + You need this enabled if you want your kernel to run on a + Zhaoxin CPU. Disabling this option on other types of CPUs + makes the kernel a tiny bit smaller. Disabling it on a Zhaoxin + CPU might render the kernel unbootable. + + If unsure, say N. diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 8f6e7eb8ae9fc2342b79cb0fc65de922dee11f2e..4833dd7e2cc0311f6ed36cab657d7f7959787e48 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -38,6 +38,7 @@ REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -DDISABLE_BRANCH_PROFILING \ REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -ffreestanding) REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -fno-stack-protector) +REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member) REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4)) export REALMODE_CFLAGS @@ -47,7 +48,7 @@ export REALMODE_CFLAGS export BITS ifdef CONFIG_X86_NEED_RELOCS - LDFLAGS_vmlinux := --emit-relocs + LDFLAGS_vmlinux := --emit-relocs --discard-none endif # @@ -223,9 +224,16 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables # Avoid indirect branches in kernel to deal with Spectre ifdef CONFIG_RETPOLINE -ifneq ($(RETPOLINE_CFLAGS),) - KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE -endif + KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) + # Additionally, avoid generating expensive indirect jumps which + # are subject to retpolines for small number of switch cases. + # clang turns off jump table generation by default when under + # retpoline builds, however, gcc does not for x86. This has + # only been fixed starting from gcc stable version 8.4.0 and + # onwards, but not for older ones. See gcc bug #86952. + ifndef CONFIG_CC_IS_CLANG + KBUILD_CFLAGS += $(call cc-option,-fno-jump-tables) + endif endif archscripts: scripts_basic @@ -298,10 +306,17 @@ vdso_install: archprepare: checkbin checkbin: -ifndef CC_HAVE_ASM_GOTO +ifndef CONFIG_CC_HAS_ASM_GOTO @echo Compiler lacks asm-goto support. @exit 1 endif +ifdef CONFIG_RETPOLINE +ifeq ($(RETPOLINE_CFLAGS),) + @echo "You are building kernel with non-retpoline compiler." >&2 + @echo "Please update your compiler." >&2 + @false +endif +endif archclean: $(Q)rm -rf $(objtree)/arch/i386 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile index 9b5adae9cc40cf59a5c8244bb3eb361a4772b626..e2839b5c246c21e45ee2f783e92c2abb04bb93ea 100644 --- a/arch/x86/boot/Makefile +++ b/arch/x86/boot/Makefile @@ -100,7 +100,7 @@ $(obj)/zoffset.h: $(obj)/compressed/vmlinux FORCE AFLAGS_header.o += -I$(objtree)/$(obj) $(obj)/header.o: $(obj)/zoffset.h -LDFLAGS_setup.elf := -T +LDFLAGS_setup.elf := -m elf_i386 -T $(obj)/setup.elf: $(src)/setup.ld $(SETUP_OBJS) FORCE $(call if_changed,ld) diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index 1458b1700fc7e4c580ea963920d199fd7a4fb625..544ac4fafd112a8b32802e0c4c5a4392b71bd67c 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -1,3 +1,4 @@ + /* ----------------------------------------------------------------------- * * Copyright 2011 Intel Corporation; author Matt Fleming @@ -634,37 +635,54 @@ static efi_status_t alloc_e820ext(u32 nr_desc, struct setup_data **e820ext, return status; } +static efi_status_t allocate_e820(struct boot_params *params, + struct setup_data **e820ext, + u32 *e820ext_size) +{ + unsigned long map_size, desc_size, buff_size; + struct efi_boot_memmap boot_map; + efi_memory_desc_t *map; + efi_status_t status; + __u32 nr_desc; + + boot_map.map = ↦ + boot_map.map_size = &map_size; + boot_map.desc_size = &desc_size; + boot_map.desc_ver = NULL; + boot_map.key_ptr = NULL; + boot_map.buff_size = &buff_size; + + status = efi_get_memory_map(sys_table, &boot_map); + if (status != EFI_SUCCESS) + return status; + + nr_desc = buff_size / desc_size; + + if (nr_desc > ARRAY_SIZE(params->e820_table)) { + u32 nr_e820ext = nr_desc - ARRAY_SIZE(params->e820_table); + + status = alloc_e820ext(nr_e820ext, e820ext, e820ext_size); + if (status != EFI_SUCCESS) + return status; + } + + return EFI_SUCCESS; +} + struct exit_boot_struct { struct boot_params *boot_params; struct efi_info *efi; - struct setup_data *e820ext; - __u32 e820ext_size; }; static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg, struct efi_boot_memmap *map, void *priv) { - static bool first = true; const char *signature; __u32 nr_desc; efi_status_t status; struct exit_boot_struct *p = priv; - if (first) { - nr_desc = *map->buff_size / *map->desc_size; - if (nr_desc > ARRAY_SIZE(p->boot_params->e820_table)) { - u32 nr_e820ext = nr_desc - - ARRAY_SIZE(p->boot_params->e820_table); - - status = alloc_e820ext(nr_e820ext, &p->e820ext, - &p->e820ext_size); - if (status != EFI_SUCCESS) - return status; - } - first = false; - } - signature = efi_is_64bit() ? EFI64_LOADER_SIGNATURE : EFI32_LOADER_SIGNATURE; memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32)); @@ -687,8 +705,8 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle) { unsigned long map_sz, key, desc_size, buff_size; efi_memory_desc_t *mem_map; - struct setup_data *e820ext; - __u32 e820ext_size; + struct setup_data *e820ext = NULL; + __u32 e820ext_size = 0; efi_status_t status; __u32 desc_version; struct efi_boot_memmap map; @@ -702,8 +720,10 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle) map.buff_size = &buff_size; priv.boot_params = boot_params; priv.efi = &boot_params->efi_info; - priv.e820ext = NULL; - priv.e820ext_size = 0; + + status = allocate_e820(boot_params, &e820ext, &e820ext_size); + if (status != EFI_SUCCESS) + return status; /* Might as well exit boot services now */ status = efi_exit_boot_services(sys_table, handle, &map, &priv, @@ -711,9 +731,6 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle) if (status != EFI_SUCCESS) return status; - e820ext = priv.e820ext; - e820ext_size = priv.e820ext_size; - /* Historic? */ boot_params->alt_mem_k = 32 * 1024; @@ -738,6 +755,7 @@ efi_main(struct efi_config *c, struct boot_params *boot_params) struct desc_struct *desc; void *handle; efi_system_table_t *_table; + unsigned long cmdline_paddr; efi_early = c; @@ -755,6 +773,15 @@ efi_main(struct efi_config *c, struct boot_params *boot_params) else setup_boot_services32(efi_early); + /* + * make_boot_params() may have been called before efi_main(), in which + * case this is the second time we parse the cmdline. This is ok, + * parsing the cmdline multiple times does not have side-effects. + */ + cmdline_paddr = ((u64)hdr->cmd_line_ptr | + ((u64)boot_params->ext_cmd_line_ptr << 32)); + efi_parse_options((char *)cmdline_paddr); + /* * If the boot loader gave us a value for secure_boot then we use that, * otherwise we ask the BIOS. diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 64037895b0859fdb25b8b104a2e5ed5d028baf45..f62e347862ccc61ba417d80dabee304ef28b6ec7 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -600,6 +600,16 @@ ENTRY(trampoline_32bit_src) leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax movl %eax, %cr3 3: + /* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */ + pushl %ecx + pushl %edx + movl $MSR_EFER, %ecx + rdmsr + btsl $_EFER_LME, %eax + wrmsr + popl %edx + popl %ecx + /* Enable PAE and LA57 (if required) paging modes */ movl $X86_CR4_PAE, %eax cmpl $0, %edx diff --git a/arch/x86/boot/compressed/kaslr_64.c b/arch/x86/boot/compressed/kaslr_64.c index 748456c365f4691af041753c63d6991f9bc8b4b8..9557c5a15b91e29a6465e502599960aad87e3e60 100644 --- a/arch/x86/boot/compressed/kaslr_64.c +++ b/arch/x86/boot/compressed/kaslr_64.c @@ -29,9 +29,6 @@ #define __PAGE_OFFSET __PAGE_OFFSET_BASE #include "../../mm/ident_map.c" -/* Used by pgtable.h asm code to force instruction serialization. */ -unsigned long __force_order; - /* Used to track our page table allocation area. */ struct alloc_pgt_data { unsigned char *pgt_buf; diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 8dd1d5ccae58023fb7cb1e3c0cf83255b6b1988b..0387d7a96c842b335ba836dfe4a44c1af5120f19 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -17,6 +17,7 @@ #include "pgtable.h" #include "../string.h" #include "../voffset.h" +#include /* * WARNING!! diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index a423bdb426862dc25c7df6ab8309b3d0d6108a38..47fd18db6b3bf54fa40e20f4856df7d07e8f15e0 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h @@ -22,7 +22,6 @@ #include #include #include -#include #define BOOT_BOOT_H #include "../ctype.h" diff --git a/arch/x86/boot/compressed/pgtable.h b/arch/x86/boot/compressed/pgtable.h index 91f75638f6e68ef5df8da5f428fe54341bef3813..6ff7e81b5628456b92779ede7a9c941fe7df6a85 100644 --- a/arch/x86/boot/compressed/pgtable.h +++ b/arch/x86/boot/compressed/pgtable.h @@ -6,7 +6,7 @@ #define TRAMPOLINE_32BIT_PGTABLE_OFFSET 0 #define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE -#define TRAMPOLINE_32BIT_CODE_SIZE 0x60 +#define TRAMPOLINE_32BIT_CODE_SIZE 0x70 #define TRAMPOLINE_32BIT_STACK_END TRAMPOLINE_32BIT_SIZE diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c index 9e215737149103dcb627ebc9138a9b7126b24f99..76e1edf5bf12a0ffec3401fef160a2c5fe26e37f 100644 --- a/arch/x86/boot/compressed/pgtable_64.c +++ b/arch/x86/boot/compressed/pgtable_64.c @@ -1,5 +1,7 @@ +#include #include #include +#include #include "pgtable.h" #include "../string.h" @@ -37,9 +39,10 @@ int cmdline_find_option_bool(const char *option); static unsigned long find_trampoline_placement(void) { - unsigned long bios_start, ebda_start; + unsigned long bios_start = 0, ebda_start = 0; unsigned long trampoline_start; struct boot_e820_entry *entry; + char *signature; int i; /* @@ -47,8 +50,18 @@ static unsigned long find_trampoline_placement(void) * This code is based on reserve_bios_regions(). */ - ebda_start = *(unsigned short *)0x40e << 4; - bios_start = *(unsigned short *)0x413 << 10; + /* + * EFI systems may not provide legacy ROM. The memory may not be mapped + * at all. + * + * Only look for values in the legacy ROM for non-EFI system. + */ + signature = (char *)&boot_params->efi_info.efi_loader_signature; + if (strncmp(signature, EFI32_LOADER_SIGNATURE, 4) && + strncmp(signature, EFI64_LOADER_SIGNATURE, 4)) { + ebda_start = *(unsigned short *)0x40e << 4; + bios_start = *(unsigned short *)0x413 << 10; + } if (bios_start < BIOS_START_MIN || bios_start > BIOS_START_MAX) bios_start = BIOS_START_MAX; @@ -60,6 +73,8 @@ static unsigned long find_trampoline_placement(void) /* Find the first usable memory region under bios_start. */ for (i = boot_params->e820_entries - 1; i >= 0; i--) { + unsigned long new = bios_start; + entry = &boot_params->e820_table[i]; /* Skip all entries above bios_start. */ @@ -72,15 +87,20 @@ static unsigned long find_trampoline_placement(void) /* Adjust bios_start to the end of the entry if needed. */ if (bios_start > entry->addr + entry->size) - bios_start = entry->addr + entry->size; + new = entry->addr + entry->size; /* Keep bios_start page-aligned. */ - bios_start = round_down(bios_start, PAGE_SIZE); + new = round_down(new, PAGE_SIZE); /* Skip the entry if it's too small. */ - if (bios_start - TRAMPOLINE_32BIT_SIZE < entry->addr) + if (new - TRAMPOLINE_32BIT_SIZE < entry->addr) continue; + /* Protect against underflow. */ + if (new - TRAMPOLINE_32BIT_SIZE > bios_start) + break; + + bios_start = new; break; } diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c index c4428a176973311950429e0ba00c4ce13e9f6fe6..2622c0742c92d0e4a6b515d37c2a2f6b1a461758 100644 --- a/arch/x86/boot/string.c +++ b/arch/x86/boot/string.c @@ -34,6 +34,14 @@ int memcmp(const void *s1, const void *s2, size_t len) return diff; } +/* + * Clang may lower `memcmp == 0` to `bcmp == 0`. + */ +int bcmp(const void *s1, const void *s2, size_t len) +{ + return memcmp(s1, s2, len); +} + int strcmp(const char *str1, const char *str2) { const unsigned char *s1 = (const unsigned char *)str1; diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c index d4e6cd4577e5dd526e76b1ed7b2b1d951b06a46d..bf0e824003584c3d941b64b587a531678e902126 100644 --- a/arch/x86/boot/tools/build.c +++ b/arch/x86/boot/tools/build.c @@ -391,6 +391,13 @@ int main(int argc, char ** argv) die("Unable to mmap '%s': %m", argv[2]); /* Number of 16-byte paragraphs, including space for a 4-byte CRC */ sys_size = (sz + 15 + 4) / 16; +#ifdef CONFIG_EFI_STUB + /* + * COFF requires minimum 32-byte alignment of sections, and + * adding a signature is problematic without that alignment. + */ + sys_size = (sys_size + 1) & ~1; +#endif /* Patch the setup code with the appropriate size parameters */ buf[0x1f1] = setup_sectors-1; diff --git a/arch/x86/configs/hulk_defconfig b/arch/x86/configs/hulk_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..bdd662877601c4a11b26dac5670234dbf288841e --- /dev/null +++ b/arch/x86/configs/hulk_defconfig @@ -0,0 +1,7569 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/x86 4.19.21 Kernel Configuration +# + +# +# Compiler: gcc (GCC) 4.8.5 +# +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=40805 +CONFIG_CLANG_VERSION=0 +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_EXTABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_BZIP2=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +CONFIG_HAVE_KERNEL_LZ4=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_BZIP2 is not set +# CONFIG_KERNEL_LZMA is not set +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZO is not set +# CONFIG_KERNEL_LZ4 is not set +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y +CONFIG_AUDIT_WATCH=y +CONFIG_AUDIT_TREE=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_PENDING_IRQ=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GENERIC_MSI_IRQ_DOMAIN=y +CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y +CONFIG_GENERIC_IRQ_RESERVATION_MODE=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +CONFIG_CLOCKSOURCE_WATCHDOG=y +CONFIG_ARCH_CLOCKSOURCE_DATA=y +CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y +CONFIG_GENERIC_CMOS_UPDATE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +# CONFIG_NO_HZ_IDLE is not set +CONFIG_NO_HZ_FULL=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PREEMPT is not set + +# +# CPU/Task time and stats accounting +# +CONFIG_VIRT_CPU_ACCOUNTING=y +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_SCHED_AVG_IRQ=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_SRCU=y +CONFIG_TREE_SRCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +CONFIG_CONTEXT_TRACKING=y +# CONFIG_CONTEXT_TRACKING_FORCE is not set +CONFIG_RCU_NOCB_CPU=y +CONFIG_BUILD_BIN2C=y +# CONFIG_IKCONFIG is not set +CONFIG_LOG_BUF_SHIFT=20 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 +CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y +CONFIG_ARCH_SUPPORTS_INT128=y +CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_MEMCG_SWAP_ENABLED=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +# CONFIG_DEBUG_BLK_CGROUP is not set +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_QOS_SCHED_DYNAMIC_AFFINITY=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +# CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_CGROUP_FILES=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_STEAL=y +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_HAVE_PCSPKR_PLATFORM=y +CONFIG_BPF=y +# CONFIG_EXPERT is not set +CONFIG_UID16=y +CONFIG_MULTIUSER=y +CONFIG_SGETMASK_SYSCALL=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_PRINTK_NMI=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_PCSPKR_PLATFORM=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT_ALWAYS_ON=y +CONFIG_USERFAULTFD=y +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y +CONFIG_RSEQ=y +# CONFIG_EMBEDDED is not set +CONFIG_HAVE_PERF_EVENTS=y + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_SLUB_DEBUG=y +# CONFIG_COMPAT_BRK is not set +# CONFIG_SLAB is not set +CONFIG_SLUB=y +CONFIG_SLAB_MERGE_DEFAULT=y +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SLAB_FREELIST_HARDENED is not set +CONFIG_SLUB_CPU_PARTIAL=y +CONFIG_SYSTEM_DATA_VERIFICATION=y +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y +CONFIG_64BIT=y +CONFIG_X86_64=y +CONFIG_X86=y +CONFIG_INSTRUCTION_DECODER=y +CONFIG_OUTPUT_FORMAT="elf64-x86-64" +CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig" +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_MMU=y +CONFIG_ARCH_MMAP_RND_BITS_MIN=28 +CONFIG_ARCH_MMAP_RND_BITS_MAX=32 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_GENERIC_ISA_DMA=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_ARCH_MAY_HAVE_PC_FDC=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_ARCH_HAS_CPU_RELAX=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_ARCH_HAS_FILTER_PGPROT=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_ARCH_WANT_GENERAL_HUGETLB=y +CONFIG_ZONE_DMA32=y +CONFIG_AUDIT_ARCH=y +CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_HAVE_INTEL_TXT=y +CONFIG_X86_64_SMP=y +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_DYNAMIC_PHYSICAL_MASK=y +CONFIG_PGTABLE_LEVELS=5 +CONFIG_CC_HAS_SANE_STACKPROTECTOR=y + +# +# Processor type and features +# +CONFIG_ZONE_DMA=y +CONFIG_SMP=y +CONFIG_X86_FEATURE_NAMES=y +CONFIG_X86_X2APIC=y +CONFIG_X86_MPPARSE=y +# CONFIG_GOLDFISH is not set +CONFIG_RETPOLINE=y +CONFIG_INTEL_RDT=y +CONFIG_X86_EXTENDED_PLATFORM=y +# CONFIG_X86_NUMACHIP is not set +# CONFIG_X86_VSMP is not set +CONFIG_X86_UV=y +# CONFIG_X86_GOLDFISH is not set +# CONFIG_X86_INTEL_MID is not set +CONFIG_X86_INTEL_LPSS=y +CONFIG_X86_AMD_PLATFORM_DEVICE=y +CONFIG_IOSF_MBI=y +# CONFIG_IOSF_MBI_DEBUG is not set +CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y +CONFIG_SCHED_OMIT_FRAME_POINTER=y +CONFIG_HYPERVISOR_GUEST=y +CONFIG_PARAVIRT=y +# CONFIG_PARAVIRT_DEBUG is not set +CONFIG_PARAVIRT_SPINLOCKS=y +# CONFIG_QUEUED_LOCK_STAT is not set +CONFIG_XEN=y +# CONFIG_XEN_PV is not set +CONFIG_XEN_PVHVM=y +CONFIG_XEN_PVHVM_SMP=y +CONFIG_XEN_SAVE_RESTORE=y +# CONFIG_XEN_DEBUG_FS is not set +# CONFIG_XEN_PVH is not set +CONFIG_KVM_GUEST=y +# CONFIG_KVM_DEBUG_FS is not set +CONFIG_PARAVIRT_TIME_ACCOUNTING=y +CONFIG_PARAVIRT_CLOCK=y +# CONFIG_JAILHOUSE_GUEST is not set +CONFIG_NO_BOOTMEM=y +# CONFIG_MK8 is not set +# CONFIG_MPSC is not set +# CONFIG_MCORE2 is not set +# CONFIG_MATOM is not set +CONFIG_GENERIC_CPU=y +CONFIG_X86_INTERNODE_CACHE_SHIFT=6 +CONFIG_X86_L1_CACHE_SHIFT=6 +CONFIG_X86_TSC=y +CONFIG_X86_CMPXCHG64=y +CONFIG_X86_CMOV=y +CONFIG_X86_MINIMUM_CPU_FAMILY=64 +CONFIG_X86_DEBUGCTLMSR=y +CONFIG_CPU_SUP_INTEL=y +CONFIG_CPU_SUP_AMD=y +CONFIG_CPU_SUP_CENTAUR=y +CONFIG_HPET_TIMER=y +CONFIG_HPET_EMULATE_RTC=y +CONFIG_DMI=y +# CONFIG_GART_IOMMU is not set +# CONFIG_CALGARY_IOMMU is not set +CONFIG_MAXSMP=y +CONFIG_NR_CPUS_RANGE_BEGIN=8192 +CONFIG_NR_CPUS_RANGE_END=8192 +CONFIG_NR_CPUS_DEFAULT=8192 +CONFIG_NR_CPUS=8192 +CONFIG_SCHED_SMT=y +CONFIG_SCHED_MC=y +CONFIG_SCHED_MC_PRIO=y +CONFIG_X86_LOCAL_APIC=y +CONFIG_X86_IO_APIC=y +CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y +CONFIG_X86_MCE=y +CONFIG_X86_MCELOG_LEGACY=y +CONFIG_X86_MCE_INTEL=y +CONFIG_X86_MCE_AMD=y +CONFIG_X86_MCE_THRESHOLD=y +CONFIG_X86_MCE_INJECT=m +CONFIG_X86_THERMAL_VECTOR=y + +# +# Performance monitoring +# +CONFIG_PERF_EVENTS_INTEL_UNCORE=m +CONFIG_PERF_EVENTS_INTEL_RAPL=m +CONFIG_PERF_EVENTS_INTEL_CSTATE=m +CONFIG_PERF_EVENTS_AMD_POWER=m +CONFIG_X86_16BIT=y +CONFIG_X86_ESPFIX64=y +CONFIG_X86_VSYSCALL_EMULATION=y +CONFIG_I8K=m +CONFIG_MICROCODE=y +CONFIG_MICROCODE_INTEL=y +CONFIG_MICROCODE_AMD=y +CONFIG_MICROCODE_OLD_INTERFACE=y +CONFIG_X86_MSR=y +CONFIG_X86_CPUID=y +CONFIG_X86_5LEVEL=y +CONFIG_X86_DIRECT_GBPAGES=y +CONFIG_ARCH_HAS_MEM_ENCRYPT=y +CONFIG_AMD_MEM_ENCRYPT=y +# CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT is not set +CONFIG_ARCH_USE_MEMREMAP_PROT=y +CONFIG_NUMA=y +CONFIG_AMD_NUMA=y +CONFIG_X86_64_ACPI_NUMA=y +CONFIG_NODES_SPAN_OTHER_NODES=y +CONFIG_NUMA_EMU=y +CONFIG_NODES_SHIFT=10 +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +# CONFIG_ARCH_MEMORY_PROBE is not set +CONFIG_ARCH_PROC_KCORE_TEXT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_X86_PMEM_LEGACY_DEVICE=y +CONFIG_X86_PMEM_LEGACY=m +CONFIG_X86_CHECK_BIOS_CORRUPTION=y +# CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK is not set +CONFIG_X86_RESERVE_LOW=64 +CONFIG_MTRR=y +CONFIG_MTRR_SANITIZER=y +CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1 +CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 +CONFIG_X86_PAT=y +CONFIG_ARCH_USES_PG_UNCACHED=y +CONFIG_ARCH_RANDOM=y +CONFIG_X86_SMAP=y +CONFIG_X86_INTEL_UMIP=y +# CONFIG_X86_INTEL_MPX is not set +CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_EFI_MIXED=y +CONFIG_SECCOMP=y +# CONFIG_HZ_100 is not set +# CONFIG_HZ_250 is not set +# CONFIG_HZ_300 is not set +CONFIG_HZ_1000=y +CONFIG_HZ=1000 +CONFIG_SCHED_HRTICK=y +CONFIG_KEXEC=y +CONFIG_KEXEC_FILE=y +CONFIG_ARCH_HAS_KEXEC_PURGATORY=y +CONFIG_KEXEC_VERIFY_SIG=y +CONFIG_KEXEC_BZIMAGE_VERIFY_SIG=y +CONFIG_CRASH_DUMP=y +CONFIG_KEXEC_JUMP=y +CONFIG_PHYSICAL_START=0x1000000 +CONFIG_RELOCATABLE=y +CONFIG_RANDOMIZE_BASE=y +CONFIG_X86_NEED_RELOCS=y +CONFIG_PHYSICAL_ALIGN=0x200000 +CONFIG_DYNAMIC_MEMORY_LAYOUT=y +CONFIG_RANDOMIZE_MEMORY=y +CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING=0xa +CONFIG_HOTPLUG_CPU=y +CONFIG_BOOTPARAM_HOTPLUG_CPU0=y +# CONFIG_DEBUG_HOTPLUG_CPU0 is not set +# CONFIG_COMPAT_VDSO is not set +CONFIG_LEGACY_VSYSCALL_EMULATE=y +# CONFIG_LEGACY_VSYSCALL_NONE is not set +# CONFIG_CMDLINE_BOOL is not set +CONFIG_MODIFY_LDT_SYSCALL=y +CONFIG_HAVE_LIVEPATCH_FTRACE=y + +# +# Enable Livepatch +# +CONFIG_LIVEPATCH=y +# CONFIG_LIVEPATCH_FTRACE is not set +CONFIG_LIVEPATCH_WO_FTRACE=y +CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY=y +# CONFIG_LIVEPATCH_STACK is not set +CONFIG_LIVEPATCH_RESTRICT_KPROBE=y +CONFIG_ARCH_HAS_ADD_PAGES=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y + +# +# Power management and ACPI options +# +CONFIG_ARCH_HIBERNATION_HEADER=y +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HIBERNATE_CALLBACKS=y +CONFIG_HIBERNATION=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +CONFIG_PM_DEBUG=y +# CONFIG_PM_ADVANCED_DEBUG is not set +# CONFIG_PM_TEST_SUSPEND is not set +CONFIG_PM_SLEEP_DEBUG=y +# CONFIG_PM_TRACE_RTC is not set +CONFIG_PM_CLK=y +CONFIG_PM_GENERIC_DOMAINS=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +CONFIG_PM_GENERIC_DOMAINS_SLEEP=y +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y +CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y +CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y +# CONFIG_ACPI_DEBUGGER is not set +CONFIG_ACPI_SPCR_TABLE=y +CONFIG_ACPI_LPIT=y +CONFIG_ACPI_SLEEP=y +# CONFIG_ACPI_PROCFS_POWER is not set +CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y +CONFIG_ACPI_EC_DEBUGFS=m +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_VIDEO=m +CONFIG_ACPI_FAN=y +CONFIG_ACPI_TAD=m +CONFIG_ACPI_DOCK=y +CONFIG_ACPI_CPU_FREQ_PSS=y +CONFIG_ACPI_PROCESSOR_CSTATE=y +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_CPPC_LIB=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_PROCESSOR_AGGREGATOR=m +CONFIG_ACPI_THERMAL=y +CONFIG_ACPI_NUMA=y +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +# CONFIG_ACPI_DEBUG is not set +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_CONTAINER=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_ACPI_HOTPLUG_IOAPIC=y +CONFIG_ACPI_SBS=m +CONFIG_ACPI_HED=y +# CONFIG_ACPI_CUSTOM_METHOD is not set +CONFIG_ACPI_BGRT=y +CONFIG_ACPI_NFIT=m +CONFIG_ACPI_HMAT=y +CONFIG_HAVE_ACPI_APEI=y +CONFIG_HAVE_ACPI_APEI_NMI=y +CONFIG_ACPI_APEI=y +CONFIG_ACPI_APEI_GHES=y +CONFIG_ACPI_APEI_PCIEAER=y +CONFIG_ACPI_APEI_MEMORY_FAILURE=y +CONFIG_ACPI_APEI_EINJ=m +# CONFIG_ACPI_APEI_ERST_DEBUG is not set +CONFIG_DPTF_POWER=m +CONFIG_ACPI_WATCHDOG=y +CONFIG_ACPI_EXTLOG=m +CONFIG_PMIC_OPREGION=y +# CONFIG_ACPI_CONFIGFS is not set +CONFIG_X86_PM_TIMER=y +CONFIG_SFI=y + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set + +# +# CPU frequency scaling drivers +# +CONFIG_X86_INTEL_PSTATE=y +# CONFIG_X86_PCC_CPUFREQ is not set +CONFIG_X86_ACPI_CPUFREQ=m +CONFIG_X86_ACPI_CPUFREQ_CPB=y +CONFIG_X86_POWERNOW_K8=m +CONFIG_X86_AMD_FREQ_SENSITIVITY=m +# CONFIG_X86_SPEEDSTEP_CENTRINO is not set +CONFIG_X86_P4_CLOCKMOD=m + +# +# shared options +# +CONFIG_X86_SPEEDSTEP_LIB=m + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +# CONFIG_CPU_IDLE_GOV_LADDER is not set +CONFIG_CPU_IDLE_GOV_MENU=y +CONFIG_INTEL_IDLE=y + +# +# Bus options (PCI etc.) +# +CONFIG_PCI=y +CONFIG_PCI_DIRECT=y +CONFIG_PCI_MMCONFIG=y +CONFIG_PCI_XEN=y +CONFIG_PCI_DOMAINS=y +CONFIG_MMCONF_FAM10H=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIEASPM=y +# CONFIG_PCIEASPM_DEBUG is not set +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +CONFIG_PCIE_DPC=y +# CONFIG_PCIE_PTM is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_IRQ_DOMAIN=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +CONFIG_PCI_STUB=y +# CONFIG_PCI_PF_STUB is not set +# CONFIG_XEN_PCIDEV_FRONTEND is not set +CONFIG_PCI_ATS=y +CONFIG_PCI_LOCKLESS_CONFIG=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +CONFIG_PCI_LABEL=y +CONFIG_PCI_HYPERV=m +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_ACPI_IBM=m +# CONFIG_HOTPLUG_PCI_CPCI is not set +CONFIG_HOTPLUG_PCI_SHPC=y + +# +# PCI controller drivers +# + +# +# Cadence PCIe controllers support +# +CONFIG_VMD=y + +# +# DesignWare PCI Core Support +# +# CONFIG_PCIE_DW_PLAT_HOST is not set + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +CONFIG_ISA_DMA_API=y +CONFIG_AMD_NB=y +CONFIG_PCCARD=y +# CONFIG_PCMCIA is not set +CONFIG_CARDBUS=y + +# +# PC-card bridges +# +CONFIG_YENTA=m +CONFIG_YENTA_O2=y +CONFIG_YENTA_RICOH=y +CONFIG_YENTA_TI=y +CONFIG_YENTA_ENE_TUNE=y +CONFIG_YENTA_TOSHIBA=y +# CONFIG_RAPIDIO is not set +# CONFIG_X86_SYSFB is not set + +# +# Binary Emulations +# +CONFIG_IA32_EMULATION=y +# CONFIG_IA32_AOUT is not set +# CONFIG_X86_X32 is not set +CONFIG_COMPAT_32=y +CONFIG_COMPAT=y +CONFIG_COMPAT_FOR_U64_ALIGNMENT=y +CONFIG_SYSVIPC_COMPAT=y +CONFIG_X86_DEV_DMA_OPS=y +CONFIG_HAVE_GENERIC_GUP=y + +# +# Firmware Drivers +# +CONFIG_EDD=m +# CONFIG_EDD_OFF is not set +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DELL_RBU=m +CONFIG_DCDBAS=m +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=y +CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y +CONFIG_ISCSI_IBFT_FIND=y +CONFIG_ISCSI_IBFT=m +CONFIG_FW_CFG_SYSFS=y +# CONFIG_FW_CFG_SYSFS_CMDLINE is not set +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +CONFIG_EFI_VARS=y +CONFIG_EFI_ESRT=y +CONFIG_EFI_VARS_PSTORE=y +CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y +CONFIG_EFI_RUNTIME_MAP=y +# CONFIG_EFI_FAKE_MEMMAP is not set +CONFIG_EFI_RUNTIME_WRAPPERS=y +# CONFIG_EFI_BOOTLOADER_CONTROL is not set +# CONFIG_EFI_CAPSULE_LOADER is not set +# CONFIG_EFI_TEST is not set +CONFIG_APPLE_PROPERTIES=y +# CONFIG_RESET_ATTACK_MITIGATION is not set +CONFIG_UEFI_CPER=y +CONFIG_UEFI_CPER_X86=y +CONFIG_EFI_DEV_PATH_PARSER=y + +# +# Tegra firmware driver +# +CONFIG_HAVE_KVM=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_KVM_ASYNC_PF=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_KVM_COMPAT=y +CONFIG_HAVE_KVM_IRQ_BYPASS=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=m +CONFIG_KVM_INTEL=m +CONFIG_KVM_AMD=m +CONFIG_KVM_AMD_SEV=y +CONFIG_KVM_MMU_AUDIT=y +CONFIG_VHOST_NET=m +# CONFIG_VHOST_SCSI is not set +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set + +# +# General architecture-dependent options +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_HOTPLUG_SMT=y +CONFIG_OPROFILE=m +CONFIG_OPROFILE_EVENT_MULTIPLEX=y +CONFIG_HAVE_OPROFILE=y +CONFIG_OPROFILE_NMI_TIMER=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +CONFIG_OPTPROBES=y +CONFIG_KPROBES_ON_FTRACE=y +CONFIG_UPROBES=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_KRETPROBES=y +CONFIG_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_OPTPROBES=y +CONFIG_HAVE_KPROBES_ON_FTRACE=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y +CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y +CONFIG_HAVE_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_PERF_EVENTS_NMI=y +CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_RCU_TABLE_FREE=y +CONFIG_HAVE_RCU_TABLE_INVALIDATE=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP_FILTER=y +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_CC_HAS_STACKPROTECTOR_NONE=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_HAVE_ARCH_SOFT_DIRTY=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_HAVE_EXIT_THREAD=y +CONFIG_ARCH_MMAP_RND_BITS=28 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8 +CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y +CONFIG_HAVE_COPY_THREAD_TLS=y +CONFIG_HAVE_STACK_VALIDATION=y +CONFIG_HAVE_RELIABLE_STACKTRACE=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_ARCH_HAS_REFCOUNT=y +# CONFIG_REFCOUNT_FULL is not set +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +CONFIG_PLUGIN_HOSTCC="g++" +CONFIG_HAVE_GCC_PLUGINS=y +# CONFIG_GCC_PLUGINS is not set +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_FORCE is not set +CONFIG_MODULE_SIG_ALL=y +# CONFIG_MODULE_SIG_SHA1 is not set +# CONFIG_MODULE_SIG_SHA224 is not set +CONFIG_MODULE_SIG_SHA256=y +# CONFIG_MODULE_SIG_SHA384 is not set +# CONFIG_MODULE_SIG_SHA512 is not set +CONFIG_MODULE_SIG_HASH="sha256" +# CONFIG_MODULE_COMPRESS is not set +# CONFIG_TRIM_UNUSED_KSYMS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLK_SCSI_REQUEST=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +# CONFIG_BLK_DEV_ZONED is not set +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +# CONFIG_BLK_CMDLINE_PARSER is not set +CONFIG_BLK_WBT=y +# CONFIG_BLK_CGROUP_IOLATENCY is not set +# CONFIG_BLK_WBT_SQ is not set +CONFIG_BLK_WBT_MQ=y +CONFIG_BLK_DEBUG_FS=y +# CONFIG_BLK_SED_OPAL is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +CONFIG_OSF_PARTITION=y +CONFIG_AMIGA_PARTITION=y +# CONFIG_ATARI_PARTITION is not set +CONFIG_MAC_PARTITION=y +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +# CONFIG_LDM_PARTITION is not set +CONFIG_SGI_PARTITION=y +# CONFIG_ULTRIX_PARTITION is not set +CONFIG_SUN_PARTITION=y +CONFIG_KARMA_PARTITION=y +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +CONFIG_BLOCK_COMPAT=y +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_MQ_RDMA=y + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_CFQ_GROUP_IOSCHED is not set +# CONFIG_DEFAULT_DEADLINE is not set +CONFIG_DEFAULT_CFQ=y +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="cfq" +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y + +# +# Memory Management options +# +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_NEED_MULTIPLE_NODES=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +CONFIG_ARCH_DISCARD_MEMBLOCK=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_HAVE_BOOTMEM_INFO_NODE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_SPARSE=y +# CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE is not set +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_BOUNCE=y +CONFIG_VIRT_TO_BUS=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +CONFIG_MEMORY_FAILURE=y +CONFIG_HWPOISON_INJECT=m +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_ARCH_WANTS_THP_SWAP=y +CONFIG_THP_SWAP=y +CONFIG_TRANSPARENT_HUGE_PAGECACHE=y +CONFIG_CLEANCACHE=y +CONFIG_FRONTSWAP=y +CONFIG_SHRINK_PAGECACHE=y +CONFIG_MEMCG_QOS=y +# CONFIG_CMA is not set +CONFIG_MEM_SOFT_DIRTY=y +CONFIG_ZSWAP=y +CONFIG_ZPOOL=y +CONFIG_ZBUD=y +# CONFIG_Z3FOLD is not set +CONFIG_ZSMALLOC=y +# CONFIG_PGTABLE_MAPPING is not set +CONFIG_ZSMALLOC_STAT=y +CONFIG_GENERIC_EARLY_IOREMAP=y +CONFIG_DEFERRED_STRUCT_PAGE_INIT=y +CONFIG_IDLE_PAGE_TRACKING=y +CONFIG_ARCH_HAS_ZONE_DEVICE=y +CONFIG_ZONE_DEVICE=y +CONFIG_ARCH_HAS_HMM=y +CONFIG_MIGRATE_VMA_HELPER=y +CONFIG_DEV_PAGEMAP_OPS=y +CONFIG_HMM=y +CONFIG_HMM_MIRROR=y +CONFIG_DEVICE_PRIVATE=y +CONFIG_DEVICE_PUBLIC=y +CONFIG_FRAME_VECTOR=y +CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y +CONFIG_ARCH_HAS_PKEYS=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_BENCHMARK is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_XFRM=y +CONFIG_XFRM_OFFLOAD=y +CONFIG_XFRM_ALGO=y +CONFIG_XFRM_USER=y +# CONFIG_XFRM_INTERFACE is not set +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +# CONFIG_SMC is not set +CONFIG_XDP_SOCKETS=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +# CONFIG_IP_PNP is not set +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE_COMMON=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=m +# CONFIG_NET_FOU is not set +# CONFIG_NET_FOU_IP_TUNNELS is not set +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_XFRM_MODE_TRANSPORT=m +CONFIG_INET_XFRM_MODE_TUNNEL=m +CONFIG_INET_XFRM_MODE_BEET=m +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +# CONFIG_INET_DIAG_DESTROY is not set +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +# CONFIG_TCP_CONG_CDG is not set +CONFIG_TCP_CONG_BBR=m +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_TCP_COMP=y +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +# CONFIG_IPV6_ILA is not set +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +CONFIG_INET6_XFRM_MODE_TRANSPORT=m +CONFIG_INET6_XFRM_MODE_TUNNEL=m +CONFIG_INET6_XFRM_MODE_BEET=m +CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +CONFIG_IPV6_MULTIPLE_TABLES=y +# CONFIG_IPV6_SUBTREES is not set +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +# CONFIG_IPV6_SEG6_LWTUNNEL is not set +# CONFIG_IPV6_SEG6_HMAC is not set +CONFIG_NETLABEL=y +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +# CONFIG_NETFILTER_NETLINK_ACCT is not set +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_COMMON=m +CONFIG_NF_LOG_NETDEV=m +CONFIG_NETFILTER_CONNCOUNT=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=m +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_NEEDED=y +CONFIG_NF_NAT_PROTO_DCCP=y +CONFIG_NF_NAT_PROTO_UDPLITE=y +CONFIG_NF_NAT_PROTO_SCTP=y +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_SET=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_COUNTER=m +# CONFIG_NFT_CONNLIMIT is not set +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +# CONFIG_NFT_TUNNEL is not set +CONFIG_NFT_OBJREF=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m +CONFIG_NFT_FIB_INET=m +# CONFIG_NFT_SOCKET is not set +# CONFIG_NFT_OSF is not set +# CONFIG_NFT_TPROXY is not set +CONFIG_NF_DUP_NETDEV=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +# CONFIG_NF_FLOW_TABLE is not set +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +# CONFIG_NETFILTER_XT_TARGET_LED is not set +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +# CONFIG_NETFILTER_XT_MATCH_L2TP is not set +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +# CONFIG_NETFILTER_XT_MATCH_NFACCT is not set +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +# CONFIG_NETFILTER_XT_MATCH_TIME is not set +# CONFIG_NETFILTER_XT_MATCH_U32 is not set +CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +# CONFIG_IP_VS_DEBUG is not set +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +# CONFIG_IP_VS_MH is not set +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_DUP_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_IPV4=m +CONFIG_NF_NAT_MASQUERADE_IPV4=y +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NFT_MASQ_IPV4=m +CONFIG_NFT_REDIR_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PROTO_GRE=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +# CONFIG_IP_NF_TARGET_CLUSTERIP is not set +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TPROXY_IPV6=m +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m +CONFIG_NFT_MASQ_IPV6=m +CONFIG_NFT_REDIR_IPV6=m +CONFIG_NFT_REJECT_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m +CONFIG_NF_NAT_IPV6=m +CONFIG_NF_NAT_MASQUERADE_IPV6=y +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +# CONFIG_IP6_NF_MATCH_SRH is not set +# CONFIG_IP6_NF_TARGET_HL is not set +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_DEFRAG_IPV6=m +CONFIG_NF_TABLES_BRIDGE=y +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_NF_LOG_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +# CONFIG_BPFILTER is not set +# CONFIG_IP_DCCP is not set +CONFIG_IP_SCTP=m +# CONFIG_SCTP_DBG_OBJCNT is not set +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_INET_SCTP_DIAG=m +# CONFIG_RDS is not set +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_IB=y +CONFIG_TIPC_MEDIA_UDP=y +CONFIG_TIPC_DIAG=m +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +# CONFIG_ATM_CLIP_NO_ICMP is not set +CONFIG_ATM_LANE=m +# CONFIG_ATM_MPOA is not set +CONFIG_ATM_BR2684=m +# CONFIG_ATM_BR2684_IPFILTER is not set +CONFIG_L2TP=m +CONFIG_L2TP_DEBUGFS=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_MRP=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_HAVE_NET_DSA=y +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +# CONFIG_DECNET is not set +CONFIG_LLC=m +# CONFIG_LLC2 is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_DEBUGFS is not set +# CONFIG_6LOWPAN_NHC is not set +CONFIG_IEEE802154=m +# CONFIG_IEEE802154_NL802154_EXPERIMENTAL is not set +CONFIG_IEEE802154_SOCKET=m +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_ATM=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +# CONFIG_NET_SCH_CBS is not set +# CONFIG_NET_SCH_ETF is not set +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +# CONFIG_NET_SCH_SKBPRIO is not set +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=y +# CONFIG_NET_SCH_CAKE is not set +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_DEFAULT=y +# CONFIG_DEFAULT_FQ is not set +# CONFIG_DEFAULT_CODEL is not set +CONFIG_DEFAULT_FQ_CODEL=y +# CONFIG_DEFAULT_SFQ is not set +# CONFIG_DEFAULT_PFIFO_FAST is not set +CONFIG_DEFAULT_NET_SCH="fq_codel" + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +# CONFIG_NET_EMATCH_CANID is not set +CONFIG_NET_EMATCH_IPSET=m +# CONFIG_NET_EMATCH_IPT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +# CONFIG_NET_ACT_IPT is not set +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +# CONFIG_NET_ACT_CONNMARK is not set +CONFIG_NET_ACT_SKBMOD=m +# CONFIG_NET_ACT_IFE is not set +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_CLS_IND=y +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=m +# CONFIG_BATMAN_ADV is not set +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_OPENVSWITCH_GENEVE=m +CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VMWARE_VMCI_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS_COMMON=m +CONFIG_HYPERV_VSOCKETS=m +CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=y +# CONFIG_MPLS_ROUTING is not set +CONFIG_NET_NSH=y +# CONFIG_HSR is not set +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_L3_MASTER_DEV=y +# CONFIG_NET_NCSI is not set +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_JIT=y +# CONFIG_BPF_STREAM_PARSER is not set +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +CONFIG_NET_DROP_MONITOR=y +# CONFIG_HAMRADIO is not set +CONFIG_CAN=m +CONFIG_CAN_RAW=m +CONFIG_CAN_BCM=m +CONFIG_CAN_GW=m +CONFIG_CAN_J1939=m + +# +# CAN Device Drivers +# +CONFIG_CAN_VCAN=m +# CONFIG_CAN_VXCAN is not set +CONFIG_CAN_SLCAN=m +CONFIG_CAN_DEV=m +CONFIG_CAN_CALC_BITTIMING=y +CONFIG_CAN_C_CAN=m +CONFIG_CAN_C_CAN_PLATFORM=m +CONFIG_CAN_C_CAN_PCI=m +CONFIG_CAN_CC770=m +# CONFIG_CAN_CC770_ISA is not set +CONFIG_CAN_CC770_PLATFORM=m +# CONFIG_CAN_IFI_CANFD is not set +# CONFIG_CAN_M_CAN is not set +# CONFIG_CAN_PEAK_PCIEFD is not set +CONFIG_CAN_SJA1000=m +# CONFIG_CAN_SJA1000_ISA is not set +CONFIG_CAN_SJA1000_PLATFORM=m +CONFIG_CAN_EMS_PCI=m +CONFIG_CAN_PEAK_PCI=m +CONFIG_CAN_PEAK_PCIEC=y +CONFIG_CAN_KVASER_PCI=m +CONFIG_CAN_PLX_PCI=m +CONFIG_CAN_SOFTING=m + +# +# CAN SPI interfaces +# +# CONFIG_CAN_HI311X is not set +# CONFIG_CAN_MCP251X is not set + +# +# CAN USB interfaces +# +CONFIG_CAN_8DEV_USB=m +CONFIG_CAN_EMS_USB=m +CONFIG_CAN_ESD_USB2=m +# CONFIG_CAN_GS_USB is not set +CONFIG_CAN_KVASER_USB=m +# CONFIG_CAN_MCBA_USB is not set +CONFIG_CAN_PEAK_USB=m +# CONFIG_CAN_UCAN is not set +# CONFIG_CAN_DEBUG_DEVICES is not set +CONFIG_BT=m +CONFIG_BT_BREDR=y +CONFIG_BT_RFCOMM=m +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=m +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_CMTP=m +CONFIG_BT_HIDP=m +CONFIG_BT_HS=y +CONFIG_BT_LE=y +# CONFIG_BT_6LOWPAN is not set +# CONFIG_BT_LEDS is not set +# CONFIG_BT_SELFTEST is not set +CONFIG_BT_DEBUGFS=y + +# +# Bluetooth device drivers +# +CONFIG_BT_INTEL=m +CONFIG_BT_BCM=m +CONFIG_BT_RTL=m +CONFIG_BT_HCIBTUSB=m +CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y +CONFIG_BT_HCIBTUSB_BCM=y +CONFIG_BT_HCIBTUSB_RTL=y +CONFIG_BT_HCIBTSDIO=m +CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_H4=y +CONFIG_BT_HCIUART_BCSP=y +CONFIG_BT_HCIUART_ATH3K=y +# CONFIG_BT_HCIUART_INTEL is not set +# CONFIG_BT_HCIUART_AG6XX is not set +# CONFIG_BT_HCIUART_MRVL is not set +CONFIG_BT_HCIBCM203X=m +CONFIG_BT_HCIBPA10X=m +CONFIG_BT_HCIBFUSB=m +CONFIG_BT_HCIVHCI=m +CONFIG_BT_MRVL=m +CONFIG_BT_MRVL_SDIO=m +CONFIG_BT_ATH3K=m +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_STREAM_PARSER=m +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_CFG80211=m +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y +CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +CONFIG_CFG80211_CRDA_SUPPORT=y +# CONFIG_CFG80211_WEXT is not set +CONFIG_MAC80211=m +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_MINSTREL_HT=y +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +# CONFIG_MAC80211_MESH is not set +CONFIG_MAC80211_LEDS=y +CONFIG_MAC80211_DEBUGFS=y +# CONFIG_MAC80211_MESSAGE_TRACING is not set +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +# CONFIG_WIMAX is not set +CONFIG_RFKILL=m +CONFIG_RFKILL_LEDS=y +CONFIG_RFKILL_INPUT=y +# CONFIG_RFKILL_GPIO is not set +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +# CONFIG_NFC is not set +CONFIG_PSAMPLE=m +# CONFIG_NET_IFE is not set +CONFIG_LWTUNNEL=y +# CONFIG_LWTUNNEL_BPF is not set +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_SOCK_VALIDATE_XMIT=y +CONFIG_NET_DEVLINK=m +CONFIG_MAY_USE_DEVLINK=m +CONFIG_PAGE_POOL=y +CONFIG_FAILOVER=m +CONFIG_HAVE_EBPF_JIT=y + +# +# Device Drivers +# + +# +# Generic Driver Options +# +# CONFIG_UEVENT_HELPER is not set +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_EXTRA_FIRMWARE="" +CONFIG_FW_LOADER_USER_HELPER=y +# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set +CONFIG_WANT_DEV_COREDUMP=y +CONFIG_ALLOW_DEV_COREDUMP=y +CONFIG_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_SYS_HYPERVISOR=y +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=y +CONFIG_REGMAP_SPI=y +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set + +# +# Bus devices +# +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y +# CONFIG_GNSS is not set +CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +# CONFIG_MTD_AR7_PARTS is not set + +# +# Partition parsers +# + +# +# User Modules And Translation Layers +# +CONFIG_MTD_BLKDEVS=m +CONFIG_MTD_BLOCK=m +# CONFIG_MTD_BLOCK_RO is not set +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# CONFIG_MTD_ONENAND is not set +# CONFIG_MTD_NAND is not set +# CONFIG_MTD_SPI_NAND is not set + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# CONFIG_MTD_SPI_NOR is not set +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_LIMIT=20 +# CONFIG_MTD_UBI_FASTMAP is not set +# CONFIG_MTD_UBI_GLUEBI is not set +# CONFIG_MTD_UBI_BLOCK is not set +# CONFIG_OF is not set +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y +CONFIG_PARPORT=m +CONFIG_PARPORT_PC=m +CONFIG_PARPORT_SERIAL=m +# CONFIG_PARPORT_PC_FIFO is not set +# CONFIG_PARPORT_PC_SUPERIO is not set +# CONFIG_PARPORT_AX88796 is not set +CONFIG_PARPORT_1284=y +CONFIG_PARPORT_NOT_PC=y +CONFIG_PNP=y +# CONFIG_PNP_DEBUG_MESSAGES is not set + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_NULL_BLK=m +# CONFIG_BLK_DEV_FD is not set +CONFIG_CDROM=m +# CONFIG_PARIDE is not set +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +CONFIG_ZRAM=m +# CONFIG_ZRAM_WRITEBACK is not set +# CONFIG_ZRAM_MEMORY_TRACKING is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_DRBD is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_SKD is not set +# CONFIG_BLK_DEV_SX8 is not set +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=16384 +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_XEN_BLKDEV_FRONTEND=m +CONFIG_VIRTIO_BLK=m +# CONFIG_VIRTIO_BLK_SCSI is not set +CONFIG_BLK_DEV_RBD=m +# CONFIG_BLK_DEV_RSXX is not set + +# +# NVME Support +# +CONFIG_NVME_CORE=m +CONFIG_BLK_DEV_NVME=m +# CONFIG_NVME_MULTIPATH is not set +CONFIG_NVME_FABRICS=m +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=m +CONFIG_NVME_TARGET=m +CONFIG_NVME_TARGET_LOOP=m +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=m +CONFIG_NVME_TARGET_FCLOOP=m + +# +# Misc devices +# +CONFIG_SENSORS_LIS3LV02D=m +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_IBM_ASM is not set +# CONFIG_PHANTOM is not set +CONFIG_SGI_IOC4=m +CONFIG_TIFM_CORE=m +CONFIG_TIFM_7XX1=m +# CONFIG_ICS932S401 is not set +CONFIG_ENCLOSURE_SERVICES=m +CONFIG_SGI_XP=m +CONFIG_HP_ILO=m +CONFIG_SGI_GRU=m +# CONFIG_SGI_GRU_DEBUG is not set +CONFIG_APDS9802ALS=m +CONFIG_ISL29003=m +CONFIG_ISL29020=m +CONFIG_SENSORS_TSL2550=m +CONFIG_SENSORS_BH1770=m +CONFIG_SENSORS_APDS990X=m +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +CONFIG_VMWARE_BALLOON=m +# CONFIG_USB_SWITCH_FSA9480 is not set +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +CONFIG_MISC_RTSX=m +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_MAX6875=m +CONFIG_EEPROM_93CX6=m +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +CONFIG_CB710_CORE=m +# CONFIG_CB710_DEBUG is not set +CONFIG_CB710_DEBUG_ASSUMPTIONS=y + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +CONFIG_SENSORS_LIS3_I2C=m +CONFIG_ALTERA_STAPL=m +CONFIG_INTEL_MEI=m +CONFIG_INTEL_MEI_ME=m +# CONFIG_INTEL_MEI_TXE is not set +CONFIG_VMWARE_VMCI=m + +# +# Intel MIC & related support +# + +# +# Intel MIC Bus Driver +# +# CONFIG_INTEL_MIC_BUS is not set + +# +# SCIF Bus Driver +# +# CONFIG_SCIF_BUS is not set + +# +# VOP Bus Driver +# +# CONFIG_VOP_BUS is not set + +# +# Intel MIC Host Driver +# + +# +# Intel MIC Card Driver +# + +# +# SCIF Driver +# + +# +# Intel MIC Coprocessor State Management (COSM) Drivers +# + +# +# VOP Driver +# +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +CONFIG_MISC_RTSX_PCI=m +CONFIG_MISC_RTSX_USB=m +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=m +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_MQ_DEFAULT=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=m +CONFIG_CHR_DEV_ST=m +# CONFIG_CHR_DEV_OSST is not set +CONFIG_BLK_DEV_SR=m +CONFIG_BLK_DEV_SR_VENDOR=y +CONFIG_CHR_DEV_SG=m +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=m +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=m +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=m +# CONFIG_SCSI_CXGB3_ISCSI is not set +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_SCSI_BNX2X_FCOE=m +CONFIG_BE2ISCSI=m +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +CONFIG_SCSI_HPSA=m +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +CONFIG_SCSI_AACRAID=m +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +# CONFIG_SCSI_MVSAS is not set +# CONFIG_SCSI_MVUMI is not set +# CONFIG_SCSI_DPT_I2O is not set +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +# CONFIG_MEGARAID_NEWGEN is not set +# CONFIG_MEGARAID_LEGACY is not set +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_SMARTPQI=m +# CONFIG_SCSI_UFSHCD is not set +CONFIG_RAMAXEL_SPRAID=m +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_BUSLOGIC is not set +CONFIG_VMWARE_PVSCSI=m +# CONFIG_XEN_SCSI_FRONTEND is not set +CONFIG_HYPERV_STORAGE=m +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +# CONFIG_FCOE is not set +CONFIG_FCOE_FNIC=m +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_GDTH is not set +# CONFIG_SCSI_ISCI is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_PPA is not set +# CONFIG_SCSI_IMM is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set +# CONFIG_SCSI_QLOGIC_1280 is not set +CONFIG_SCSI_QLA_FC=m +# CONFIG_TCM_QLA2XXX is not set +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_QEDI=m +CONFIG_QEDF=m +CONFIG_SCSI_LPFC=m +# CONFIG_SCSI_LPFC_DEBUG_FS is not set +CONFIG_SCSI_HUAWEI_FC=m +CONFIG_SCSI_FC_HIFC=m +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +CONFIG_SCSI_DEBUG=m +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_BFA_FC is not set +CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_CHELSIO_FCOE=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +# CONFIG_SCSI_OSD_INITIATOR is not set +CONFIG_ATA=m +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=m +CONFIG_SATA_MOBILE_LPM_POLICY=0 +CONFIG_SATA_AHCI_PLATFORM=m +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +CONFIG_ATA_PIIX=m +# CONFIG_SATA_DWC is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +# CONFIG_SATA_SIL is not set +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_RZ1000 is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_PATA_ACPI is not set +CONFIG_ATA_GENERIC=m +# CONFIG_PATA_LEGACY is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +# CONFIG_MD_MULTIPATH is not set +CONFIG_MD_FAULTY=m +# CONFIG_MD_CLUSTER is not set +# CONFIG_BCACHE is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=m +# CONFIG_DM_MQ_DEFAULT is not set +CONFIG_DM_DEBUG=y +CONFIG_DM_BUFIO=m +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +# CONFIG_DM_UNSTRIPED is not set +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m +# CONFIG_DM_WRITECACHE is not set +CONFIG_DM_ERA=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +# CONFIG_DM_VERITY_FEC is not set +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_TCM_FC=m +CONFIG_ISCSI_TARGET=m +CONFIG_ISCSI_TARGET_CXGB4=m +# CONFIG_SBP_TARGET is not set +CONFIG_FUSION=y +CONFIG_FUSION_SPI=m +# CONFIG_FUSION_FC is not set +CONFIG_FUSION_SAS=m +CONFIG_FUSION_MAX_SGE=128 +# CONFIG_FUSION_CTL is not set +CONFIG_FUSION_LOGGING=y + +# +# IEEE 1394 (FireWire) support +# +CONFIG_FIREWIRE=m +CONFIG_FIREWIRE_OHCI=m +CONFIG_FIREWIRE_SBP2=m +CONFIG_FIREWIRE_NET=m +# CONFIG_FIREWIRE_NOSY is not set +CONFIG_MACINTOSH_DRIVERS=y +CONFIG_MAC_EMUMOUSEBTN=y +CONFIG_NETDEVICES=y +CONFIG_MII=m +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +# CONFIG_EQUALIZER is not set +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +# CONFIG_GTP is not set +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_NETPOLL=y +CONFIG_NET_POLL_CONTROLLER=y +CONFIG_TUN=m +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ARCNET is not set +# CONFIG_ATM_DRIVERS is not set + +# +# CAIF transport drivers +# + +# +# Distributed Switch Architecture drivers +# +CONFIG_ETHERNET=y +CONFIG_MDIO=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set +CONFIG_NET_VENDOR_AMAZON=y +CONFIG_ENA_ETHERNET=m +CONFIG_NET_VENDOR_AMD=y +# CONFIG_AMD8111_ETH is not set +# CONFIG_PCNET32 is not set +CONFIG_AMD_XGBE=m +# CONFIG_AMD_XGBE_DCB is not set +CONFIG_AMD_XGBE_HAVE_ECC=y +CONFIG_NET_VENDOR_AQUANTIA=y +CONFIG_AQTION=m +# CONFIG_NET_VENDOR_ARC is not set +CONFIG_NET_VENDOR_ATHEROS=y +CONFIG_ATL2=m +CONFIG_ATL1=m +CONFIG_ATL1E=m +CONFIG_ATL1C=m +CONFIG_ALX=m +# CONFIG_NET_VENDOR_AURORA is not set +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +CONFIG_BNX2=m +CONFIG_CNIC=m +CONFIG_TIGON3=m +CONFIG_TIGON3_HWMON=y +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set +CONFIG_BNXT=m +CONFIG_BNXT_SRIOV=y +CONFIG_BNXT_FLOWER_OFFLOAD=y +CONFIG_BNXT_DCB=y +CONFIG_BNXT_HWMON=y +CONFIG_NET_VENDOR_BROCADE=y +# CONFIG_BNA is not set +CONFIG_NET_VENDOR_CADENCE=y +# CONFIG_MACB is not set +CONFIG_NET_VENDOR_CAVIUM=y +# CONFIG_THUNDER_NIC_PF is not set +# CONFIG_THUNDER_NIC_VF is not set +# CONFIG_THUNDER_NIC_BGX is not set +# CONFIG_THUNDER_NIC_RGX is not set +CONFIG_CAVIUM_PTP=y +CONFIG_LIQUIDIO=m +CONFIG_LIQUIDIO_VF=m +CONFIG_NET_VENDOR_CHELSIO=y +# CONFIG_CHELSIO_T1 is not set +# CONFIG_CHELSIO_T3 is not set +CONFIG_CHELSIO_T4=m +# CONFIG_CHELSIO_T4_DCB is not set +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_LIB=m +CONFIG_NET_VENDOR_CISCO=y +CONFIG_ENIC=m +# CONFIG_NET_VENDOR_CORTINA is not set +# CONFIG_CX_ECAT is not set +CONFIG_DNET=m +CONFIG_NET_VENDOR_DEC=y +# CONFIG_NET_TULIP is not set +CONFIG_NET_VENDOR_DLINK=y +CONFIG_DL2K=m +# CONFIG_SUNDANCE is not set +CONFIG_NET_VENDOR_EMULEX=y +CONFIG_BE2NET=m +CONFIG_BE2NET_HWMON=y +# CONFIG_BE2NET_BE2 is not set +# CONFIG_BE2NET_BE3 is not set +CONFIG_BE2NET_LANCER=y +CONFIG_BE2NET_SKYHAWK=y +# CONFIG_NET_VENDOR_EZCHIP is not set +# CONFIG_NET_VENDOR_HP is not set +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_E1000E_HWTS=y +CONFIG_HINIC=m +CONFIG_IGB=m +CONFIG_IGB_HWMON=y +CONFIG_IGB_DCA=y +CONFIG_IGBVF=m +# CONFIG_IXGB is not set +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBE_DCA=y +CONFIG_IXGBE_DCB=y +CONFIG_IXGBEVF=m +CONFIG_I40E=m +CONFIG_I40E_DCB=y +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_FM10K=m +# CONFIG_JME is not set +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX4_EN=m +CONFIG_MLX4_EN_DCB=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y +# CONFIG_MLX4_CORE_GEN2 is not set +CONFIG_MLX5_CORE=m +CONFIG_MLX5_ACCEL=y +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_EN_ARFS=y +CONFIG_MLX5_EN_RXNFC=y +CONFIG_MLX5_MPFS=y +CONFIG_MLX5_ESWITCH=y +CONFIG_MLX5_CORE_EN_DCB=y +CONFIG_MLX5_CORE_IPOIB=y +CONFIG_MLX5_EN_IPSEC=y +# CONFIG_MLX5_EN_TLS is not set +CONFIG_MLXSW_CORE=m +CONFIG_MLXSW_CORE_HWMON=y +CONFIG_MLXSW_CORE_THERMAL=y +CONFIG_MLXSW_PCI=m +CONFIG_MLXSW_I2C=m +CONFIG_MLXSW_SWITCHIB=m +CONFIG_MLXSW_SWITCHX2=m +CONFIG_MLXSW_SPECTRUM=m +CONFIG_MLXSW_SPECTRUM_DCB=y +CONFIG_MLXSW_MINIMAL=m +CONFIG_MLXFW=m +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set +CONFIG_NET_VENDOR_MYRI=y +CONFIG_MYRI10GE=m +CONFIG_MYRI10GE_DCA=y +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +CONFIG_NET_VENDOR_NETERION=y +# CONFIG_S2IO is not set +# CONFIG_VXGE is not set +CONFIG_NET_VENDOR_NETRONOME=y +CONFIG_NFP=m +CONFIG_NFP_APP_FLOWER=y +CONFIG_NFP_APP_ABM_NIC=y +# CONFIG_NFP_DEBUG is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +CONFIG_NET_VENDOR_OKI=y +CONFIG_ETHOC=m +CONFIG_NET_VENDOR_PACKET_ENGINES=y +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +CONFIG_NET_VENDOR_QLOGIC=y +CONFIG_QLA3XXX=m +# CONFIG_QLCNIC is not set +# CONFIG_QLGE is not set +CONFIG_NETXEN_NIC=m +CONFIG_QED=m +CONFIG_QED_LL2=y +CONFIG_QED_SRIOV=y +CONFIG_QEDE=m +CONFIG_QED_RDMA=y +CONFIG_QED_ISCSI=y +CONFIG_QED_FCOE=y +CONFIG_QED_OOO=y +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_NET_VENDOR_REALTEK=y +# CONFIG_ATP is not set +CONFIG_8139CP=m +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +# CONFIG_8139TOO_TUNE_TWISTER is not set +CONFIG_8139TOO_8129=y +# CONFIG_8139_OLD_RX_RESET is not set +CONFIG_R8169=m +# CONFIG_NET_VENDOR_RENESAS is not set +CONFIG_NET_VENDOR_ROCKER=y +CONFIG_ROCKER=m +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +CONFIG_NET_VENDOR_SOLARFLARE=y +CONFIG_SFC=m +CONFIG_SFC_MTD=y +CONFIG_SFC_MCDI_MON=y +CONFIG_SFC_SRIOV=y +CONFIG_SFC_MCDI_LOGGING=y +# CONFIG_SFC_FALCON is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +# CONFIG_MDIO_BCM_UNIMAC is not set +CONFIG_MDIO_BITBANG=m +CONFIG_MDIO_CAVIUM=m +# CONFIG_MDIO_GPIO is not set +CONFIG_MDIO_MSCC_MIIM=m +CONFIG_MDIO_THUNDER=m +CONFIG_PHYLIB=y +CONFIG_SWPHY=y +CONFIG_LED_TRIGGER_PHY=y + +# +# MII PHY device drivers +# +CONFIG_AMD_PHY=m +CONFIG_AQUANTIA_PHY=m +CONFIG_ASIX_PHY=m +CONFIG_AT803X_PHY=m +CONFIG_BCM7XXX_PHY=m +CONFIG_BCM87XX_PHY=m +CONFIG_BCM_NET_PHYLIB=m +CONFIG_BROADCOM_PHY=m +CONFIG_CICADA_PHY=m +CONFIG_CORTINA_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_DP83822_PHY=m +CONFIG_DP83TC811_PHY=m +CONFIG_DP83848_PHY=m +CONFIG_DP83867_PHY=m +CONFIG_FIXED_PHY=y +CONFIG_ICPLUS_PHY=m +CONFIG_INTEL_XWAY_PHY=m +CONFIG_LSI_ET1011C_PHY=m +CONFIG_LXT_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_MARVELL_10G_PHY=m +CONFIG_MICREL_PHY=m +CONFIG_MICROCHIP_PHY=m +CONFIG_MICROCHIP_T1_PHY=m +CONFIG_MICROSEMI_PHY=m +CONFIG_NATIONAL_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_REALTEK_PHY=m +CONFIG_RENESAS_PHY=m +CONFIG_ROCKCHIP_PHY=m +CONFIG_SMSC_PHY=m +CONFIG_STE10XP=m +CONFIG_TERANETICS_PHY=m +CONFIG_VITESSE_PHY=m +CONFIG_XILINX_GMII2RGMII=m +CONFIG_MICREL_KS8995MA=m +# CONFIG_PLIP is not set +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m +CONFIG_PPPOE=m +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLHC=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +# CONFIG_SLIP_MODE_SLIP6 is not set +CONFIG_USB_NET_DRIVERS=y +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m +CONFIG_USB_USBNET=m +CONFIG_USB_NET_AX8817X=m +CONFIG_USB_NET_AX88179_178A=m +CONFIG_USB_NET_CDCETHER=m +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_CDC_NCM=m +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +# CONFIG_USB_NET_SR9700 is not set +# CONFIG_USB_NET_SR9800 is not set +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m +CONFIG_USB_NET_NET1080=m +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_NET_RNDIS_HOST=m +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m +CONFIG_USB_NET_CDC_SUBSET=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y +CONFIG_USB_BELKIN=y +CONFIG_USB_ARMLINUX=y +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y +CONFIG_USB_NET_ZAURUS=m +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +CONFIG_WLAN=y +# CONFIG_WLAN_VENDOR_ADMTEK is not set +CONFIG_ATH_COMMON=m +CONFIG_WLAN_VENDOR_ATH=y +# CONFIG_ATH_DEBUG is not set +# CONFIG_ATH5K is not set +# CONFIG_ATH5K_PCI is not set +CONFIG_ATH9K_HW=m +CONFIG_ATH9K_COMMON=m +CONFIG_ATH9K_COMMON_DEBUG=y +CONFIG_ATH9K_BTCOEX_SUPPORT=y +CONFIG_ATH9K=m +CONFIG_ATH9K_PCI=y +CONFIG_ATH9K_AHB=y +CONFIG_ATH9K_DEBUGFS=y +# CONFIG_ATH9K_STATION_STATISTICS is not set +# CONFIG_ATH9K_DYNACK is not set +CONFIG_ATH9K_WOW=y +CONFIG_ATH9K_RFKILL=y +# CONFIG_ATH9K_CHANNEL_CONTEXT is not set +CONFIG_ATH9K_PCOEM=y +CONFIG_ATH9K_HTC=m +# CONFIG_ATH9K_HTC_DEBUGFS is not set +# CONFIG_ATH9K_HWRNG is not set +# CONFIG_ATH9K_COMMON_SPECTRAL is not set +# CONFIG_CARL9170 is not set +# CONFIG_ATH6KL is not set +# CONFIG_AR5523 is not set +# CONFIG_WIL6210 is not set +CONFIG_ATH10K=m +CONFIG_ATH10K_CE=y +CONFIG_ATH10K_PCI=m +# CONFIG_ATH10K_SDIO is not set +# CONFIG_ATH10K_USB is not set +# CONFIG_ATH10K_DEBUG is not set +CONFIG_ATH10K_DEBUGFS=y +# CONFIG_ATH10K_SPECTRAL is not set +# CONFIG_ATH10K_TRACING is not set +# CONFIG_WCN36XX is not set +# CONFIG_WLAN_VENDOR_ATMEL is not set +CONFIG_WLAN_VENDOR_BROADCOM=y +# CONFIG_B43 is not set +# CONFIG_B43LEGACY is not set +CONFIG_BRCMUTIL=m +CONFIG_BRCMSMAC=m +CONFIG_BRCMFMAC=m +CONFIG_BRCMFMAC_PROTO_BCDC=y +CONFIG_BRCMFMAC_PROTO_MSGBUF=y +CONFIG_BRCMFMAC_SDIO=y +CONFIG_BRCMFMAC_USB=y +CONFIG_BRCMFMAC_PCIE=y +# CONFIG_BRCM_TRACING is not set +# CONFIG_BRCMDBG is not set +# CONFIG_WLAN_VENDOR_CISCO is not set +CONFIG_WLAN_VENDOR_INTEL=y +# CONFIG_IPW2100 is not set +# CONFIG_IPW2200 is not set +# CONFIG_IWL4965 is not set +# CONFIG_IWL3945 is not set +CONFIG_IWLWIFI=m +CONFIG_IWLWIFI_LEDS=y +CONFIG_IWLDVM=m +CONFIG_IWLMVM=m +CONFIG_IWLWIFI_OPMODE_MODULAR=y +# CONFIG_IWLWIFI_BCAST_FILTERING is not set + +# +# Debugging Options +# +# CONFIG_IWLWIFI_DEBUG is not set +CONFIG_IWLWIFI_DEBUGFS=y +# CONFIG_IWLWIFI_DEVICE_TRACING is not set +# CONFIG_WLAN_VENDOR_INTERSIL is not set +CONFIG_WLAN_VENDOR_MARVELL=y +# CONFIG_LIBERTAS is not set +# CONFIG_LIBERTAS_THINFIRM is not set +CONFIG_MWIFIEX=m +CONFIG_MWIFIEX_SDIO=m +CONFIG_MWIFIEX_PCIE=m +CONFIG_MWIFIEX_USB=m +# CONFIG_MWL8K is not set +CONFIG_WLAN_VENDOR_MEDIATEK=y +CONFIG_MT7601U=m +CONFIG_MT76_CORE=m +CONFIG_MT76_LEDS=y +CONFIG_MT76x2_COMMON=m +# CONFIG_MT76x0U is not set +CONFIG_MT76x2E=m +# CONFIG_MT76x2U is not set +CONFIG_WLAN_VENDOR_RALINK=y +CONFIG_RT2X00=m +# CONFIG_RT2400PCI is not set +# CONFIG_RT2500PCI is not set +# CONFIG_RT61PCI is not set +CONFIG_RT2800PCI=m +CONFIG_RT2800PCI_RT33XX=y +CONFIG_RT2800PCI_RT35XX=y +CONFIG_RT2800PCI_RT53XX=y +CONFIG_RT2800PCI_RT3290=y +# CONFIG_RT2500USB is not set +# CONFIG_RT73USB is not set +CONFIG_RT2800USB=m +CONFIG_RT2800USB_RT33XX=y +CONFIG_RT2800USB_RT35XX=y +CONFIG_RT2800USB_RT3573=y +CONFIG_RT2800USB_RT53XX=y +CONFIG_RT2800USB_RT55XX=y +CONFIG_RT2800USB_UNKNOWN=y +CONFIG_RT2800_LIB=m +CONFIG_RT2800_LIB_MMIO=m +CONFIG_RT2X00_LIB_MMIO=m +CONFIG_RT2X00_LIB_PCI=m +CONFIG_RT2X00_LIB_USB=m +CONFIG_RT2X00_LIB=m +CONFIG_RT2X00_LIB_FIRMWARE=y +CONFIG_RT2X00_LIB_CRYPTO=y +CONFIG_RT2X00_LIB_LEDS=y +CONFIG_RT2X00_LIB_DEBUGFS=y +# CONFIG_RT2X00_DEBUG is not set +CONFIG_WLAN_VENDOR_REALTEK=y +# CONFIG_RTL8180 is not set +# CONFIG_RTL8187 is not set +CONFIG_RTL_CARDS=m +CONFIG_RTL8192CE=m +CONFIG_RTL8192SE=m +CONFIG_RTL8192DE=m +CONFIG_RTL8723AE=m +CONFIG_RTL8723BE=m +CONFIG_RTL8188EE=m +CONFIG_RTL8192EE=m +CONFIG_RTL8821AE=m +CONFIG_RTL8192CU=m +CONFIG_RTLWIFI=m +CONFIG_RTLWIFI_PCI=m +CONFIG_RTLWIFI_USB=m +# CONFIG_RTLWIFI_DEBUG is not set +CONFIG_RTL8192C_COMMON=m +CONFIG_RTL8723_COMMON=m +CONFIG_RTLBTCOEXIST=m +CONFIG_RTL8XXXU=m +# CONFIG_RTL8XXXU_UNTESTED is not set +# CONFIG_WLAN_VENDOR_RSI is not set +# CONFIG_WLAN_VENDOR_ST is not set +# CONFIG_WLAN_VENDOR_TI is not set +# CONFIG_WLAN_VENDOR_ZYDAS is not set +CONFIG_WLAN_VENDOR_QUANTENNA=y +# CONFIG_QTNFMAC_PEARL_PCIE is not set +CONFIG_MAC80211_HWSIM=m +# CONFIG_USB_NET_RNDIS_WLAN is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +CONFIG_WAN=y +# CONFIG_LANMEDIA is not set +CONFIG_HDLC=m +CONFIG_HDLC_RAW=m +# CONFIG_HDLC_RAW_ETH is not set +CONFIG_HDLC_CISCO=m +CONFIG_HDLC_FR=m +CONFIG_HDLC_PPP=m + +# +# X.25/LAPB support is disabled +# +# CONFIG_PCI200SYN is not set +# CONFIG_WANXL is not set +# CONFIG_PC300TOO is not set +# CONFIG_FARSYNC is not set +# CONFIG_DSCC4 is not set +CONFIG_DLCI=m +CONFIG_DLCI_MAX=8 +# CONFIG_SBNI is not set +CONFIG_IEEE802154_DRIVERS=m +CONFIG_IEEE802154_FAKELB=m +# CONFIG_IEEE802154_AT86RF230 is not set +# CONFIG_IEEE802154_MRF24J40 is not set +# CONFIG_IEEE802154_CC2520 is not set +# CONFIG_IEEE802154_ATUSB is not set +# CONFIG_IEEE802154_ADF7242 is not set +# CONFIG_IEEE802154_CA8210 is not set +# CONFIG_IEEE802154_MCR20A is not set +# CONFIG_IEEE802154_HWSIM is not set +CONFIG_XEN_NETDEV_FRONTEND=m +CONFIG_VMXNET3=m +CONFIG_FUJITSU_ES=m +CONFIG_THUNDERBOLT_NET=m +CONFIG_HYPERV_NET=m +# CONFIG_NETDEVSIM is not set +CONFIG_NET_FAILOVER=m +CONFIG_ISDN=y +CONFIG_ISDN_I4L=m +CONFIG_ISDN_PPP=y +CONFIG_ISDN_PPP_VJ=y +CONFIG_ISDN_MPP=y +CONFIG_IPPP_FILTER=y +# CONFIG_ISDN_PPP_BSDCOMP is not set +CONFIG_ISDN_AUDIO=y +CONFIG_ISDN_TTY_FAX=y + +# +# ISDN feature submodules +# +CONFIG_ISDN_DIVERSION=m + +# +# ISDN4Linux hardware drivers +# + +# +# Passive cards +# +CONFIG_ISDN_DRV_HISAX=m + +# +# D-channel protocol features +# +CONFIG_HISAX_EURO=y +CONFIG_DE_AOC=y +CONFIG_HISAX_NO_SENDCOMPLETE=y +CONFIG_HISAX_NO_LLC=y +CONFIG_HISAX_NO_KEYPAD=y +CONFIG_HISAX_1TR6=y +CONFIG_HISAX_NI1=y +CONFIG_HISAX_MAX_CARDS=8 + +# +# HiSax supported cards +# +CONFIG_HISAX_16_3=y +CONFIG_HISAX_TELESPCI=y +CONFIG_HISAX_S0BOX=y +CONFIG_HISAX_FRITZPCI=y +CONFIG_HISAX_AVM_A1_PCMCIA=y +CONFIG_HISAX_ELSA=y +CONFIG_HISAX_DIEHLDIVA=y +CONFIG_HISAX_SEDLBAUER=y +CONFIG_HISAX_NETJET=y +CONFIG_HISAX_NETJET_U=y +CONFIG_HISAX_NICCY=y +CONFIG_HISAX_BKM_A4T=y +CONFIG_HISAX_SCT_QUADRO=y +CONFIG_HISAX_GAZEL=y +CONFIG_HISAX_HFC_PCI=y +CONFIG_HISAX_W6692=y +CONFIG_HISAX_HFC_SX=y +CONFIG_HISAX_ENTERNOW_PCI=y +# CONFIG_HISAX_DEBUG is not set + +# +# HiSax PCMCIA card service modules +# + +# +# HiSax sub driver modules +# +CONFIG_HISAX_ST5481=m +# CONFIG_HISAX_HFCUSB is not set +CONFIG_HISAX_HFC4S8S=m +CONFIG_HISAX_FRITZ_PCIPNP=m +CONFIG_ISDN_CAPI=m +# CONFIG_CAPI_TRACE is not set +CONFIG_ISDN_CAPI_CAPI20=m +CONFIG_ISDN_CAPI_MIDDLEWARE=y +CONFIG_ISDN_CAPI_CAPIDRV=m +CONFIG_ISDN_CAPI_CAPIDRV_VERBOSE=y + +# +# CAPI hardware drivers +# +CONFIG_CAPI_AVM=y +CONFIG_ISDN_DRV_AVMB1_B1PCI=m +CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y +CONFIG_ISDN_DRV_AVMB1_T1PCI=m +CONFIG_ISDN_DRV_AVMB1_C4=m +# CONFIG_CAPI_EICON is not set +CONFIG_ISDN_DRV_GIGASET=m +CONFIG_GIGASET_CAPI=y +CONFIG_GIGASET_BASE=m +CONFIG_GIGASET_M105=m +CONFIG_GIGASET_M101=m +# CONFIG_GIGASET_DEBUG is not set +CONFIG_HYSDN=m +CONFIG_HYSDN_CAPI=y +CONFIG_MISDN=m +CONFIG_MISDN_DSP=m +CONFIG_MISDN_L1OIP=m + +# +# mISDN hardware drivers +# +CONFIG_MISDN_HFCPCI=m +CONFIG_MISDN_HFCMULTI=m +CONFIG_MISDN_HFCUSB=m +CONFIG_MISDN_AVMFRITZ=m +CONFIG_MISDN_SPEEDFAX=m +CONFIG_MISDN_INFINEON=m +CONFIG_MISDN_W6692=m +CONFIG_MISDN_NETJET=m +CONFIG_MISDN_IPAC=m +CONFIG_MISDN_ISAR=m +CONFIG_ISDN_HDLC=m +# CONFIG_NVM is not set + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_LEDS=y +CONFIG_INPUT_FF_MEMLESS=m +CONFIG_INPUT_POLLDEV=m +CONFIG_INPUT_SPARSEKMAP=m +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +CONFIG_INPUT_JOYDEV=m +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADC is not set +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set +# CONFIG_KEYBOARD_XTKBD is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=y +CONFIG_MOUSE_PS2_ALPS=y +CONFIG_MOUSE_PS2_BYD=y +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y +CONFIG_MOUSE_PS2_CYPRESS=y +CONFIG_MOUSE_PS2_LIFEBOOK=y +CONFIG_MOUSE_PS2_TRACKPOINT=y +CONFIG_MOUSE_PS2_ELANTECH=y +CONFIG_MOUSE_PS2_ELANTECH_SMBUS=y +CONFIG_MOUSE_PS2_SENTELIC=y +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +CONFIG_MOUSE_PS2_FOCALTECH=y +CONFIG_MOUSE_PS2_VMMOUSE=y +CONFIG_MOUSE_PS2_SMBUS=y +CONFIG_MOUSE_SERIAL=m +CONFIG_MOUSE_APPLETOUCH=m +CONFIG_MOUSE_BCM5974=m +CONFIG_MOUSE_CYAPA=m +CONFIG_MOUSE_ELAN_I2C=m +CONFIG_MOUSE_ELAN_I2C_I2C=y +CONFIG_MOUSE_ELAN_I2C_SMBUS=y +CONFIG_MOUSE_VSXXXAA=m +# CONFIG_MOUSE_GPIO is not set +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m +# CONFIG_INPUT_JOYSTICK is not set +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=m +CONFIG_TABLET_USB_AIPTEK=m +CONFIG_TABLET_USB_GTCO=m +# CONFIG_TABLET_USB_HANWANG is not set +CONFIG_TABLET_USB_KBTAB=m +# CONFIG_TABLET_USB_PEGASUS is not set +CONFIG_TABLET_SERIAL_WACOM4=m +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_PROPERTIES=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_ADC is not set +# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set +# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_BU21029 is not set +# CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_EGALAX_SERIAL is not set +# CONFIG_TOUCHSCREEN_EXC3000 is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GOODIX is not set +# CONFIG_TOUCHSCREEN_HIDEEP is not set +# CONFIG_TOUCHSCREEN_ILI210X is not set +# CONFIG_TOUCHSCREEN_S6SY761 is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_EKTF2127 is not set +# CONFIG_TOUCHSCREEN_ELAN is not set +CONFIG_TOUCHSCREEN_ELO=m +CONFIG_TOUCHSCREEN_WACOM_W8001=m +CONFIG_TOUCHSCREEN_WACOM_I2C=m +# CONFIG_TOUCHSCREEN_MAX11801 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MMS114 is not set +# CONFIG_TOUCHSCREEN_MELFAS_MIP4 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_MK712 is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_PIXCIR is not set +# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set +# CONFIG_TOUCHSCREEN_WM97XX is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC_SERIO is not set +# CONFIG_TOUCHSCREEN_TSC2004 is not set +# CONFIG_TOUCHSCREEN_TSC2005 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_RM_TS is not set +# CONFIG_TOUCHSCREEN_SILEAD is not set +# CONFIG_TOUCHSCREEN_SIS_I2C is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_STMFTS is not set +# CONFIG_TOUCHSCREEN_SUR40 is not set +# CONFIG_TOUCHSCREEN_SURFACE3_SPI is not set +# CONFIG_TOUCHSCREEN_SX8654 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +# CONFIG_TOUCHSCREEN_ZET6223 is not set +# CONFIG_TOUCHSCREEN_ZFORCE is not set +# CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_BMA150 is not set +# CONFIG_INPUT_E3X0_BUTTON is not set +CONFIG_INPUT_PCSPKR=m +# CONFIG_INPUT_MMA8450 is not set +CONFIG_INPUT_APANEL=m +CONFIG_INPUT_GP2A=m +# CONFIG_INPUT_GPIO_BEEPER is not set +# CONFIG_INPUT_GPIO_DECODER is not set +CONFIG_INPUT_ATLAS_BTNS=m +CONFIG_INPUT_ATI_REMOTE2=m +CONFIG_INPUT_KEYSPAN_REMOTE=m +# CONFIG_INPUT_KXTJ9 is not set +CONFIG_INPUT_POWERMATE=m +CONFIG_INPUT_YEALINK=m +CONFIG_INPUT_CM109=m +CONFIG_INPUT_UINPUT=m +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_PWM_BEEPER is not set +# CONFIG_INPUT_PWM_VIBRA is not set +CONFIG_INPUT_GPIO_ROTARY_ENCODER=m +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_IMS_PCU is not set +# CONFIG_INPUT_CMA3000 is not set +CONFIG_INPUT_XEN_KBDDEV_FRONTEND=m +# CONFIG_INPUT_IDEAPAD_SLIDEBAR is not set +# CONFIG_INPUT_DRV260X_HAPTICS is not set +# CONFIG_INPUT_DRV2665_HAPTICS is not set +# CONFIG_INPUT_DRV2667_HAPTICS is not set +CONFIG_RMI4_CORE=m +CONFIG_RMI4_I2C=m +CONFIG_RMI4_SPI=m +CONFIG_RMI4_SMB=m +CONFIG_RMI4_F03=y +CONFIG_RMI4_F03_SERIO=m +CONFIG_RMI4_2D_SENSOR=y +CONFIG_RMI4_F11=y +CONFIG_RMI4_F12=y +CONFIG_RMI4_F30=y +CONFIG_RMI4_F34=y +# CONFIG_RMI4_F54 is not set +CONFIG_RMI4_F55=y + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +CONFIG_SERIO_I8042=y +CONFIG_SERIO_SERPORT=y +# CONFIG_SERIO_CT82C710 is not set +# CONFIG_SERIO_PARKBD is not set +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +# CONFIG_SERIO_PS2MULT is not set +CONFIG_SERIO_ARC_PS2=m +CONFIG_HYPERV_KEYBOARD=m +# CONFIG_SERIO_GPIO_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_ROCKETPORT is not set +CONFIG_CYCLADES=m +# CONFIG_CYZ_INTR is not set +# CONFIG_MOXA_INTELLIO is not set +# CONFIG_MOXA_SMARTIO is not set +CONFIG_SYNCLINK=m +CONFIG_SYNCLINKMP=m +CONFIG_SYNCLINK_GT=m +CONFIG_NOZOMI=m +# CONFIG_ISI is not set +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +# CONFIG_TRACE_SINK is not set +CONFIG_DEVMEM=y +# CONFIG_DEVKMEM is not set + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DW=y +# CONFIG_SERIAL_8250_RT288X is not set +CONFIG_SERIAL_8250_LPSS=y +CONFIG_SERIAL_8250_MID=y +# CONFIG_SERIAL_8250_MOXA is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_KGDB_NMI is not set +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_CONSOLE_POLL=y +CONFIG_SERIAL_JSM=m +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_IFX6X60 is not set +CONFIG_SERIAL_ARC=m +CONFIG_SERIAL_ARC_NR_PORTS=1 +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_DEV_BUS is not set +CONFIG_PRINTER=m +# CONFIG_LP_CONSOLE is not set +CONFIG_PPDEV=m +CONFIG_HVC_DRIVER=y +CONFIG_HVC_IRQ=y +CONFIG_HVC_XEN=y +CONFIG_HVC_XEN_FRONTEND=y +CONFIG_VIRTIO_CONSOLE=m +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DMI_DECODE=y +# CONFIG_IPMI_PANIC_EVENT is not set +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +CONFIG_IPMI_SSIF=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m +CONFIG_HW_RANDOM_INTEL=m +CONFIG_HW_RANDOM_AMD=m +CONFIG_HW_RANDOM_VIA=m +CONFIG_HW_RANDOM_VIRTIO=y +CONFIG_NVRAM=y +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set +# CONFIG_MWAVE is not set +CONFIG_RAW_DRIVER=y +CONFIG_MAX_RAW_DEVS=8192 +CONFIG_HPET=y +CONFIG_HPET_MMAP=y +# CONFIG_HPET_MMAP_DEFAULT is not set +CONFIG_HANGCHECK_TIMER=m +CONFIG_UV_MMTIMER=m +CONFIG_TCG_TPM=y +CONFIG_HW_RANDOM_TPM=y +CONFIG_TCG_TIS_CORE=y +CONFIG_TCG_TIS=y +# CONFIG_TCG_TIS_SPI is not set +CONFIG_TCG_TIS_I2C_ATMEL=m +CONFIG_TCG_TIS_I2C_INFINEON=m +CONFIG_TCG_TIS_I2C_NUVOTON=m +CONFIG_TCG_NSC=m +CONFIG_TCG_ATMEL=m +CONFIG_TCG_INFINEON=m +# CONFIG_TCG_XEN is not set +CONFIG_TCG_CRB=y +# CONFIG_TCG_VTPM_PROXY is not set +CONFIG_TCG_TIS_ST33ZP24=m +CONFIG_TCG_TIS_ST33ZP24_I2C=m +# CONFIG_TCG_TIS_ST33ZP24_SPI is not set +CONFIG_TELCLOCK=m +CONFIG_DEVPORT=y +# CONFIG_XILLYBUS is not set +# CONFIG_RANDOM_TRUST_CPU is not set + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_ACPI_I2C_OPREGION=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_MUX=m + +# +# Multiplexer I2C Chip support +# +# CONFIG_I2C_MUX_GPIO is not set +# CONFIG_I2C_MUX_LTC4306 is not set +# CONFIG_I2C_MUX_PCA9541 is not set +# CONFIG_I2C_MUX_PCA954x is not set +# CONFIG_I2C_MUX_REG is not set +CONFIG_I2C_MUX_MLXCPLD=m +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_SMBUS=m +CONFIG_I2C_ALGOBIT=m +CONFIG_I2C_ALGOPCA=m + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +CONFIG_I2C_AMD756=m +CONFIG_I2C_AMD756_S4882=m +CONFIG_I2C_AMD8111=m +CONFIG_I2C_I801=m +CONFIG_I2C_ISCH=m +CONFIG_I2C_ISMT=m +CONFIG_I2C_PIIX4=m +CONFIG_I2C_NFORCE2=m +CONFIG_I2C_NFORCE2_S4985=m +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +CONFIG_I2C_SIS96X=m +CONFIG_I2C_VIA=m +CONFIG_I2C_VIAPRO=m + +# +# ACPI drivers +# +CONFIG_I2C_SCMI=m + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CBUS_GPIO is not set +CONFIG_I2C_DESIGNWARE_CORE=m +CONFIG_I2C_DESIGNWARE_PLATFORM=m +# CONFIG_I2C_DESIGNWARE_SLAVE is not set +# CONFIG_I2C_DESIGNWARE_PCI is not set +CONFIG_I2C_DESIGNWARE_BAYTRAIL=y +# CONFIG_I2C_EMEV2 is not set +# CONFIG_I2C_GPIO is not set +# CONFIG_I2C_OCORES is not set +CONFIG_I2C_PCA_PLATFORM=m +CONFIG_I2C_SIMTEC=m +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +CONFIG_I2C_DIOLAN_U2C=m +CONFIG_I2C_PARPORT=m +CONFIG_I2C_PARPORT_LIGHT=m +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +CONFIG_I2C_TINY_USB=m +CONFIG_I2C_VIPERBOARD=m + +# +# Other I2C/SMBus bus drivers +# +CONFIG_I2C_MLXCPLD=m +CONFIG_I2C_STUB=m +# CONFIG_I2C_SLAVE is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y +# CONFIG_SPI_MEM is not set + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_BUTTERFLY is not set +# CONFIG_SPI_CADENCE is not set +# CONFIG_SPI_DESIGNWARE is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_LM70_LLP is not set +# CONFIG_SPI_OC_TINY is not set +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_ROCKCHIP is not set +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_ZYNQMP_GQSPI is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_PARPORT=m +CONFIG_PPS_CLIENT_GPIO=m + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y +CONFIG_DP83640_PHY=m +CONFIG_PTP_1588_CLOCK_KVM=m +CONFIG_PINCTRL=y +CONFIG_PINMUX=y +CONFIG_PINCONF=y +CONFIG_GENERIC_PINCONF=y +# CONFIG_DEBUG_PINCTRL is not set +CONFIG_PINCTRL_AMD=m +# CONFIG_PINCTRL_MCP23S08 is not set +# CONFIG_PINCTRL_SX150X is not set +CONFIG_PINCTRL_BAYTRAIL=y +# CONFIG_PINCTRL_CHERRYVIEW is not set +CONFIG_PINCTRL_INTEL=m +CONFIG_PINCTRL_BROXTON=m +CONFIG_PINCTRL_CANNONLAKE=m +CONFIG_PINCTRL_CEDARFORK=m +CONFIG_PINCTRL_DENVERTON=m +CONFIG_PINCTRL_GEMINILAKE=m +CONFIG_PINCTRL_ICELAKE=m +CONFIG_PINCTRL_LEWISBURG=m +CONFIG_PINCTRL_SUNRISEPOINT=m +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_GPIO_ACPI=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_GENERIC=m + +# +# Memory mapped GPIO drivers +# +CONFIG_GPIO_AMDPT=m +# CONFIG_GPIO_DWAPB is not set +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_GENERIC_PLATFORM is not set +CONFIG_GPIO_ICH=m +# CONFIG_GPIO_LYNXPOINT is not set +# CONFIG_GPIO_MB86S7X is not set +# CONFIG_GPIO_MOCKUP is not set +# CONFIG_GPIO_VX855 is not set + +# +# Port-mapped I/O GPIO drivers +# +# CONFIG_GPIO_F7188X is not set +# CONFIG_GPIO_IT87 is not set +# CONFIG_GPIO_SCH is not set +# CONFIG_GPIO_SCH311X is not set +# CONFIG_GPIO_WINBOND is not set +# CONFIG_GPIO_WS16C48 is not set + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_ADP5588 is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set + +# +# MFD GPIO expanders +# + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_AMD8111 is not set +# CONFIG_GPIO_ML_IOH is not set +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_PCIE_IDIO_24 is not set +# CONFIG_GPIO_RDC321X is not set + +# +# SPI GPIO expanders +# +# CONFIG_GPIO_MAX3191X is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set + +# +# USB GPIO expanders +# +CONFIG_GPIO_VIPERBOARD=m +# CONFIG_W1 is not set +# CONFIG_POWER_AVS is not set +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_RESTART is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_GENERIC_ADC_BATTERY is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_MANAGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_LTC3651 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ25890 is not set +CONFIG_CHARGER_SMB347=m +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_CHARGER_RT9455 is not set +CONFIG_HWMON=y +CONFIG_HWMON_VID=m +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +CONFIG_SENSORS_ABITUGURU=m +CONFIG_SENSORS_ABITUGURU3=m +# CONFIG_SENSORS_AD7314 is not set +CONFIG_SENSORS_AD7414=m +CONFIG_SENSORS_AD7418=m +CONFIG_SENSORS_ADM1021=m +CONFIG_SENSORS_ADM1025=m +CONFIG_SENSORS_ADM1026=m +CONFIG_SENSORS_ADM1029=m +CONFIG_SENSORS_ADM1031=m +CONFIG_SENSORS_ADM9240=m +CONFIG_SENSORS_ADT7X10=m +# CONFIG_SENSORS_ADT7310 is not set +CONFIG_SENSORS_ADT7410=m +CONFIG_SENSORS_ADT7411=m +CONFIG_SENSORS_ADT7462=m +CONFIG_SENSORS_ADT7470=m +CONFIG_SENSORS_ADT7475=m +CONFIG_SENSORS_ASC7621=m +CONFIG_SENSORS_K8TEMP=m +CONFIG_SENSORS_K10TEMP=m +CONFIG_SENSORS_FAM15H_POWER=m +CONFIG_SENSORS_APPLESMC=m +CONFIG_SENSORS_ASB100=m +# CONFIG_SENSORS_ASPEED is not set +CONFIG_SENSORS_ATXP1=m +CONFIG_SENSORS_DS620=m +CONFIG_SENSORS_DS1621=m +CONFIG_SENSORS_DELL_SMM=m +CONFIG_SENSORS_I5K_AMB=m +CONFIG_SENSORS_F71805F=m +CONFIG_SENSORS_F71882FG=m +CONFIG_SENSORS_F75375S=m +CONFIG_SENSORS_FSCHMD=m +# CONFIG_SENSORS_FTSTEUTATES is not set +CONFIG_SENSORS_GL518SM=m +CONFIG_SENSORS_GL520SM=m +CONFIG_SENSORS_G760A=m +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_HIH6130 is not set +CONFIG_SENSORS_IBMAEM=m +CONFIG_SENSORS_IBMPEX=m +# CONFIG_SENSORS_IIO_HWMON is not set +CONFIG_SENSORS_I5500=m +CONFIG_SENSORS_CORETEMP=m +CONFIG_SENSORS_IT87=m +CONFIG_SENSORS_JC42=m +# CONFIG_SENSORS_POWR1220 is not set +CONFIG_SENSORS_LINEAGE=m +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC2990 is not set +CONFIG_SENSORS_LTC4151=m +CONFIG_SENSORS_LTC4215=m +# CONFIG_SENSORS_LTC4222 is not set +CONFIG_SENSORS_LTC4245=m +# CONFIG_SENSORS_LTC4260 is not set +CONFIG_SENSORS_LTC4261=m +# CONFIG_SENSORS_MAX1111 is not set +CONFIG_SENSORS_MAX16065=m +CONFIG_SENSORS_MAX1619=m +CONFIG_SENSORS_MAX1668=m +CONFIG_SENSORS_MAX197=m +# CONFIG_SENSORS_MAX31722 is not set +# CONFIG_SENSORS_MAX6621 is not set +CONFIG_SENSORS_MAX6639=m +CONFIG_SENSORS_MAX6642=m +CONFIG_SENSORS_MAX6650=m +CONFIG_SENSORS_MAX6697=m +# CONFIG_SENSORS_MAX31790 is not set +CONFIG_SENSORS_MCP3021=m +# CONFIG_SENSORS_MLXREG_FAN is not set +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_ADCXX is not set +CONFIG_SENSORS_LM63=m +# CONFIG_SENSORS_LM70 is not set +CONFIG_SENSORS_LM73=m +CONFIG_SENSORS_LM75=m +CONFIG_SENSORS_LM77=m +CONFIG_SENSORS_LM78=m +CONFIG_SENSORS_LM80=m +CONFIG_SENSORS_LM83=m +CONFIG_SENSORS_LM85=m +CONFIG_SENSORS_LM87=m +CONFIG_SENSORS_LM90=m +CONFIG_SENSORS_LM92=m +CONFIG_SENSORS_LM93=m +CONFIG_SENSORS_LM95234=m +CONFIG_SENSORS_LM95241=m +CONFIG_SENSORS_LM95245=m +CONFIG_SENSORS_PC87360=m +CONFIG_SENSORS_PC87427=m +CONFIG_SENSORS_NTC_THERMISTOR=m +# CONFIG_SENSORS_NCT6683 is not set +CONFIG_SENSORS_NCT6775=m +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NCT7904 is not set +# CONFIG_SENSORS_NPCM7XX is not set +CONFIG_SENSORS_PCF8591=m +CONFIG_PMBUS=m +CONFIG_SENSORS_PMBUS=m +CONFIG_SENSORS_ADM1275=m +# CONFIG_SENSORS_IBM_CFFPS is not set +# CONFIG_SENSORS_IR35221 is not set +CONFIG_SENSORS_LM25066=m +CONFIG_SENSORS_LTC2978=m +# CONFIG_SENSORS_LTC3815 is not set +CONFIG_SENSORS_MAX16064=m +# CONFIG_SENSORS_MAX20751 is not set +# CONFIG_SENSORS_MAX31785 is not set +CONFIG_SENSORS_MAX34440=m +CONFIG_SENSORS_MAX8688=m +# CONFIG_SENSORS_TPS40422 is not set +# CONFIG_SENSORS_TPS53679 is not set +CONFIG_SENSORS_UCD9000=m +CONFIG_SENSORS_UCD9200=m +CONFIG_SENSORS_ZL6100=m +CONFIG_SENSORS_SHT15=m +CONFIG_SENSORS_SHT21=m +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHTC1 is not set +CONFIG_SENSORS_SIS5595=m +CONFIG_SENSORS_DME1737=m +CONFIG_SENSORS_EMC1403=m +# CONFIG_SENSORS_EMC2103 is not set +CONFIG_SENSORS_EMC6W201=m +CONFIG_SENSORS_SMSC47M1=m +CONFIG_SENSORS_SMSC47M192=m +CONFIG_SENSORS_SMSC47B397=m +CONFIG_SENSORS_SCH56XX_COMMON=m +CONFIG_SENSORS_SCH5627=m +CONFIG_SENSORS_SCH5636=m +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_SMM665 is not set +# CONFIG_SENSORS_ADC128D818 is not set +CONFIG_SENSORS_ADS1015=m +CONFIG_SENSORS_ADS7828=m +# CONFIG_SENSORS_ADS7871 is not set +CONFIG_SENSORS_AMC6821=m +CONFIG_SENSORS_INA209=m +CONFIG_SENSORS_INA2XX=m +# CONFIG_SENSORS_INA3221 is not set +# CONFIG_SENSORS_TC74 is not set +CONFIG_SENSORS_THMC50=m +CONFIG_SENSORS_TMP102=m +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP108 is not set +CONFIG_SENSORS_TMP401=m +CONFIG_SENSORS_TMP421=m +CONFIG_SENSORS_VIA_CPUTEMP=m +CONFIG_SENSORS_VIA686A=m +CONFIG_SENSORS_VT1211=m +CONFIG_SENSORS_VT8231=m +# CONFIG_SENSORS_W83773G is not set +CONFIG_SENSORS_W83781D=m +CONFIG_SENSORS_W83791D=m +CONFIG_SENSORS_W83792D=m +CONFIG_SENSORS_W83793=m +CONFIG_SENSORS_W83795=m +# CONFIG_SENSORS_W83795_FANCTRL is not set +CONFIG_SENSORS_W83L785TS=m +CONFIG_SENSORS_W83L786NG=m +CONFIG_SENSORS_W83627HF=m +CONFIG_SENSORS_W83627EHF=m +# CONFIG_SENSORS_XGENE is not set + +# +# ACPI drivers +# +CONFIG_SENSORS_ACPI_POWER=m +CONFIG_SENSORS_ATK0110=m +CONFIG_THERMAL=y +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +CONFIG_THERMAL_GOV_BANG_BANG=y +CONFIG_THERMAL_GOV_USER_SPACE=y +# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set +# CONFIG_THERMAL_EMULATION is not set +CONFIG_INTEL_POWERCLAMP=m +CONFIG_X86_PKG_TEMP_THERMAL=m +CONFIG_INTEL_SOC_DTS_IOSF_CORE=m +# CONFIG_INTEL_SOC_DTS_THERMAL is not set + +# +# ACPI INT340X thermal drivers +# +CONFIG_INT340X_THERMAL=m +CONFIG_ACPI_THERMAL_REL=m +# CONFIG_INT3406_THERMAL is not set +# CONFIG_INTEL_PCH_THERMAL is not set +# CONFIG_GENERIC_ADC_THERMAL is not set +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y +CONFIG_WATCHDOG_SYSFS=y + +# +# Watchdog Device Drivers +# +CONFIG_SOFT_WATCHDOG=m +CONFIG_WDAT_WDT=m +# CONFIG_XILINX_WATCHDOG is not set +# CONFIG_ZIIRAVE_WATCHDOG is not set +# CONFIG_CADENCE_WATCHDOG is not set +# CONFIG_DW_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set +# CONFIG_ACQUIRE_WDT is not set +# CONFIG_ADVANTECH_WDT is not set +CONFIG_ALIM1535_WDT=m +CONFIG_ALIM7101_WDT=m +# CONFIG_EBC_C384_WDT is not set +CONFIG_F71808E_WDT=m +CONFIG_SP5100_TCO=m +CONFIG_SBC_FITPC2_WATCHDOG=m +# CONFIG_EUROTECH_WDT is not set +CONFIG_IB700_WDT=m +CONFIG_IBMASR=m +# CONFIG_WAFER_WDT is not set +CONFIG_I6300ESB_WDT=m +CONFIG_IE6XX_WDT=m +CONFIG_ITCO_WDT=m +CONFIG_ITCO_VENDOR_SUPPORT=y +CONFIG_IT8712F_WDT=m +CONFIG_IT87_WDT=m +CONFIG_HP_WATCHDOG=m +CONFIG_HPWDT_NMI_DECODING=y +# CONFIG_SC1200_WDT is not set +# CONFIG_PC87413_WDT is not set +CONFIG_NV_TCO=m +# CONFIG_60XX_WDT is not set +# CONFIG_CPU5_WDT is not set +CONFIG_SMSC_SCH311X_WDT=m +# CONFIG_SMSC37B787_WDT is not set +CONFIG_VIA_WDT=m +CONFIG_W83627HF_WDT=m +CONFIG_W83877F_WDT=m +CONFIG_W83977F_WDT=m +CONFIG_MACHZ_WDT=m +# CONFIG_SBC_EPX_C3_WATCHDOG is not set +CONFIG_INTEL_MEI_WDT=m +# CONFIG_NI903X_WDT is not set +# CONFIG_NIC7018_WDT is not set +# CONFIG_MEN_A21_WDT is not set +CONFIG_XEN_WDT=m + +# +# PCI-based Watchdog Cards +# +CONFIG_PCIPCWATCHDOG=m +CONFIG_WDTPCI=m + +# +# USB-based Watchdog Cards +# +CONFIG_USBPCWATCHDOG=m + +# +# Watchdog Pretimeout Governors +# +# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +CONFIG_BCMA=m +CONFIG_BCMA_HOST_PCI_POSSIBLE=y +CONFIG_BCMA_HOST_PCI=y +# CONFIG_BCMA_HOST_SOC is not set +CONFIG_BCMA_DRIVER_PCI=y +CONFIG_BCMA_DRIVER_GMAC_CMN=y +CONFIG_BCMA_DRIVER_GPIO=y +# CONFIG_BCMA_DEBUG is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=y +# CONFIG_MFD_AS3711 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_CROS_EC is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_MFD_INTEL_QUARK_I2C_GPIO is not set +CONFIG_LPC_ICH=m +CONFIG_LPC_SCH=m +# CONFIG_INTEL_SOC_PMIC is not set +# CONFIG_INTEL_SOC_PMIC_CHTWC is not set +# CONFIG_INTEL_SOC_PMIC_CHTDC_TI is not set +CONFIG_MFD_INTEL_LPSS=y +CONFIG_MFD_INTEL_LPSS_ACPI=y +CONFIG_MFD_INTEL_LPSS_PCI=y +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77843 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_EZX_PCAP is not set +CONFIG_MFD_VIPERBOARD=m +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_UCB1400_CORE is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RC5T583 is not set +# CONFIG_MFD_SEC_CORE is not set +# CONFIG_MFD_SI476X_CORE is not set +CONFIG_MFD_SM501=m +CONFIG_MFD_SM501_GPIO=y +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_SMSC is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_SYSCON is not set +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_TPS68470 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS80031 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +CONFIG_MFD_VX855=m +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_REGULATOR is not set +CONFIG_CEC_CORE=m +CONFIG_RC_CORE=m +CONFIG_RC_MAP=m +CONFIG_LIRC=y +CONFIG_RC_DECODERS=y +CONFIG_IR_NEC_DECODER=m +CONFIG_IR_RC5_DECODER=m +CONFIG_IR_RC6_DECODER=m +CONFIG_IR_JVC_DECODER=m +CONFIG_IR_SONY_DECODER=m +CONFIG_IR_SANYO_DECODER=m +# CONFIG_IR_SHARP_DECODER is not set +CONFIG_IR_MCE_KBD_DECODER=m +# CONFIG_IR_XMP_DECODER is not set +CONFIG_IR_IMON_DECODER=m +CONFIG_RC_DEVICES=y +CONFIG_RC_ATI_REMOTE=m +CONFIG_IR_ENE=m +CONFIG_IR_IMON=m +CONFIG_IR_IMON_RAW=m +CONFIG_IR_MCEUSB=m +CONFIG_IR_ITE_CIR=m +CONFIG_IR_FINTEK=m +CONFIG_IR_NUVOTON=m +CONFIG_IR_REDRAT3=m +CONFIG_IR_STREAMZAP=m +CONFIG_IR_WINBOND_CIR=m +# CONFIG_IR_IGORPLUGUSB is not set +CONFIG_IR_IGUANA=m +CONFIG_IR_TTUSBIR=m +# CONFIG_RC_LOOPBACK is not set +CONFIG_IR_SERIAL=m +CONFIG_IR_SERIAL_TRANSMITTER=y +CONFIG_IR_SIR=m +CONFIG_MEDIA_SUPPORT=m + +# +# Multimedia core support +# +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_ANALOG_TV_SUPPORT=y +CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y +CONFIG_MEDIA_RADIO_SUPPORT=y +# CONFIG_MEDIA_SDR_SUPPORT is not set +CONFIG_MEDIA_CEC_SUPPORT=y +# CONFIG_MEDIA_CEC_RC is not set +# CONFIG_MEDIA_CONTROLLER is not set +CONFIG_VIDEO_DEV=m +CONFIG_VIDEO_V4L2=m +# CONFIG_VIDEO_ADV_DEBUG is not set +# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set +CONFIG_VIDEO_TUNER=m +CONFIG_VIDEOBUF_GEN=m +CONFIG_VIDEOBUF_DMA_SG=m +CONFIG_VIDEOBUF_VMALLOC=m +CONFIG_DVB_CORE=m +# CONFIG_DVB_MMAP is not set +CONFIG_DVB_NET=y +CONFIG_TTPCI_EEPROM=m +CONFIG_DVB_MAX_ADAPTERS=8 +CONFIG_DVB_DYNAMIC_MINORS=y +# CONFIG_DVB_DEMUX_SECTION_LOSS_LOG is not set +# CONFIG_DVB_ULE_DEBUG is not set + +# +# Media drivers +# +CONFIG_MEDIA_USB_SUPPORT=y + +# +# Webcam devices +# +CONFIG_USB_VIDEO_CLASS=m +CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y +CONFIG_USB_GSPCA=m +CONFIG_USB_M5602=m +CONFIG_USB_STV06XX=m +CONFIG_USB_GL860=m +CONFIG_USB_GSPCA_BENQ=m +CONFIG_USB_GSPCA_CONEX=m +CONFIG_USB_GSPCA_CPIA1=m +# CONFIG_USB_GSPCA_DTCS033 is not set +CONFIG_USB_GSPCA_ETOMS=m +CONFIG_USB_GSPCA_FINEPIX=m +CONFIG_USB_GSPCA_JEILINJ=m +CONFIG_USB_GSPCA_JL2005BCD=m +# CONFIG_USB_GSPCA_KINECT is not set +CONFIG_USB_GSPCA_KONICA=m +CONFIG_USB_GSPCA_MARS=m +CONFIG_USB_GSPCA_MR97310A=m +CONFIG_USB_GSPCA_NW80X=m +CONFIG_USB_GSPCA_OV519=m +CONFIG_USB_GSPCA_OV534=m +CONFIG_USB_GSPCA_OV534_9=m +CONFIG_USB_GSPCA_PAC207=m +CONFIG_USB_GSPCA_PAC7302=m +CONFIG_USB_GSPCA_PAC7311=m +CONFIG_USB_GSPCA_SE401=m +CONFIG_USB_GSPCA_SN9C2028=m +CONFIG_USB_GSPCA_SN9C20X=m +CONFIG_USB_GSPCA_SONIXB=m +CONFIG_USB_GSPCA_SONIXJ=m +CONFIG_USB_GSPCA_SPCA500=m +CONFIG_USB_GSPCA_SPCA501=m +CONFIG_USB_GSPCA_SPCA505=m +CONFIG_USB_GSPCA_SPCA506=m +CONFIG_USB_GSPCA_SPCA508=m +CONFIG_USB_GSPCA_SPCA561=m +CONFIG_USB_GSPCA_SPCA1528=m +CONFIG_USB_GSPCA_SQ905=m +CONFIG_USB_GSPCA_SQ905C=m +CONFIG_USB_GSPCA_SQ930X=m +CONFIG_USB_GSPCA_STK014=m +# CONFIG_USB_GSPCA_STK1135 is not set +CONFIG_USB_GSPCA_STV0680=m +CONFIG_USB_GSPCA_SUNPLUS=m +CONFIG_USB_GSPCA_T613=m +CONFIG_USB_GSPCA_TOPRO=m +# CONFIG_USB_GSPCA_TOUPTEK is not set +CONFIG_USB_GSPCA_TV8532=m +CONFIG_USB_GSPCA_VC032X=m +CONFIG_USB_GSPCA_VICAM=m +CONFIG_USB_GSPCA_XIRLINK_CIT=m +CONFIG_USB_GSPCA_ZC3XX=m +CONFIG_USB_PWC=m +# CONFIG_USB_PWC_DEBUG is not set +CONFIG_USB_PWC_INPUT_EVDEV=y +# CONFIG_VIDEO_CPIA2 is not set +CONFIG_USB_ZR364XX=m +CONFIG_USB_STKWEBCAM=m +CONFIG_USB_S2255=m +# CONFIG_VIDEO_USBTV is not set + +# +# Analog TV USB devices +# +CONFIG_VIDEO_PVRUSB2=m +CONFIG_VIDEO_PVRUSB2_SYSFS=y +CONFIG_VIDEO_PVRUSB2_DVB=y +# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set +CONFIG_VIDEO_HDPVR=m +CONFIG_VIDEO_USBVISION=m +# CONFIG_VIDEO_STK1160_COMMON is not set +# CONFIG_VIDEO_GO7007 is not set + +# +# Analog/digital TV USB devices +# +CONFIG_VIDEO_AU0828=m +CONFIG_VIDEO_AU0828_V4L2=y +# CONFIG_VIDEO_AU0828_RC is not set +CONFIG_VIDEO_CX231XX=m +CONFIG_VIDEO_CX231XX_RC=y +CONFIG_VIDEO_CX231XX_ALSA=m +CONFIG_VIDEO_CX231XX_DVB=m +CONFIG_VIDEO_TM6000=m +CONFIG_VIDEO_TM6000_ALSA=m +CONFIG_VIDEO_TM6000_DVB=m + +# +# Digital TV USB devices +# +CONFIG_DVB_USB=m +# CONFIG_DVB_USB_DEBUG is not set +CONFIG_DVB_USB_DIB3000MC=m +CONFIG_DVB_USB_A800=m +CONFIG_DVB_USB_DIBUSB_MB=m +# CONFIG_DVB_USB_DIBUSB_MB_FAULTY is not set +CONFIG_DVB_USB_DIBUSB_MC=m +CONFIG_DVB_USB_DIB0700=m +CONFIG_DVB_USB_UMT_010=m +CONFIG_DVB_USB_CXUSB=m +CONFIG_DVB_USB_M920X=m +CONFIG_DVB_USB_DIGITV=m +CONFIG_DVB_USB_VP7045=m +CONFIG_DVB_USB_VP702X=m +CONFIG_DVB_USB_GP8PSK=m +CONFIG_DVB_USB_NOVA_T_USB2=m +CONFIG_DVB_USB_TTUSB2=m +CONFIG_DVB_USB_DTT200U=m +CONFIG_DVB_USB_OPERA1=m +CONFIG_DVB_USB_AF9005=m +CONFIG_DVB_USB_AF9005_REMOTE=m +CONFIG_DVB_USB_PCTV452E=m +CONFIG_DVB_USB_DW2102=m +CONFIG_DVB_USB_CINERGY_T2=m +CONFIG_DVB_USB_DTV5100=m +CONFIG_DVB_USB_AZ6027=m +CONFIG_DVB_USB_TECHNISAT_USB2=m +CONFIG_DVB_USB_V2=m +CONFIG_DVB_USB_AF9015=m +CONFIG_DVB_USB_AF9035=m +CONFIG_DVB_USB_ANYSEE=m +CONFIG_DVB_USB_AU6610=m +CONFIG_DVB_USB_AZ6007=m +CONFIG_DVB_USB_CE6230=m +CONFIG_DVB_USB_EC168=m +CONFIG_DVB_USB_GL861=m +CONFIG_DVB_USB_LME2510=m +CONFIG_DVB_USB_MXL111SF=m +CONFIG_DVB_USB_RTL28XXU=m +# CONFIG_DVB_USB_DVBSKY is not set +# CONFIG_DVB_USB_ZD1301 is not set +CONFIG_DVB_TTUSB_BUDGET=m +CONFIG_DVB_TTUSB_DEC=m +CONFIG_SMS_USB_DRV=m +CONFIG_DVB_B2C2_FLEXCOP_USB=m +# CONFIG_DVB_B2C2_FLEXCOP_USB_DEBUG is not set +# CONFIG_DVB_AS102 is not set + +# +# Webcam, TV (analog/digital) USB devices +# +CONFIG_VIDEO_EM28XX=m +# CONFIG_VIDEO_EM28XX_V4L2 is not set +CONFIG_VIDEO_EM28XX_ALSA=m +CONFIG_VIDEO_EM28XX_DVB=m +CONFIG_VIDEO_EM28XX_RC=m + +# +# USB HDMI CEC adapters +# +CONFIG_USB_PULSE8_CEC=m +CONFIG_USB_RAINSHADOW_CEC=m +CONFIG_MEDIA_PCI_SUPPORT=y + +# +# Media capture support +# +# CONFIG_VIDEO_MEYE is not set +# CONFIG_VIDEO_SOLO6X10 is not set +# CONFIG_VIDEO_TW5864 is not set +# CONFIG_VIDEO_TW68 is not set +# CONFIG_VIDEO_TW686X is not set + +# +# Media capture/analog TV support +# +CONFIG_VIDEO_IVTV=m +# CONFIG_VIDEO_IVTV_DEPRECATED_IOCTLS is not set +# CONFIG_VIDEO_IVTV_ALSA is not set +CONFIG_VIDEO_FB_IVTV=m +# CONFIG_VIDEO_HEXIUM_GEMINI is not set +# CONFIG_VIDEO_HEXIUM_ORION is not set +# CONFIG_VIDEO_MXB is not set +# CONFIG_VIDEO_DT3155 is not set + +# +# Media capture/analog/hybrid TV support +# +CONFIG_VIDEO_CX18=m +CONFIG_VIDEO_CX18_ALSA=m +CONFIG_VIDEO_CX23885=m +CONFIG_MEDIA_ALTERA_CI=m +# CONFIG_VIDEO_CX25821 is not set +CONFIG_VIDEO_CX88=m +CONFIG_VIDEO_CX88_ALSA=m +CONFIG_VIDEO_CX88_BLACKBIRD=m +CONFIG_VIDEO_CX88_DVB=m +# CONFIG_VIDEO_CX88_ENABLE_VP3054 is not set +CONFIG_VIDEO_CX88_MPEG=m +CONFIG_VIDEO_BT848=m +CONFIG_DVB_BT8XX=m +CONFIG_VIDEO_SAA7134=m +CONFIG_VIDEO_SAA7134_ALSA=m +CONFIG_VIDEO_SAA7134_RC=y +CONFIG_VIDEO_SAA7134_DVB=m +CONFIG_VIDEO_SAA7164=m + +# +# Media digital TV PCI Adapters +# +CONFIG_DVB_AV7110_IR=y +CONFIG_DVB_AV7110=m +CONFIG_DVB_AV7110_OSD=y +CONFIG_DVB_BUDGET_CORE=m +CONFIG_DVB_BUDGET=m +CONFIG_DVB_BUDGET_CI=m +CONFIG_DVB_BUDGET_AV=m +CONFIG_DVB_BUDGET_PATCH=m +CONFIG_DVB_B2C2_FLEXCOP_PCI=m +# CONFIG_DVB_B2C2_FLEXCOP_PCI_DEBUG is not set +CONFIG_DVB_PLUTO2=m +CONFIG_DVB_DM1105=m +CONFIG_DVB_PT1=m +# CONFIG_DVB_PT3 is not set +CONFIG_MANTIS_CORE=m +CONFIG_DVB_MANTIS=m +CONFIG_DVB_HOPPER=m +CONFIG_DVB_NGENE=m +CONFIG_DVB_DDBRIDGE=m +# CONFIG_DVB_DDBRIDGE_MSIENABLE is not set +# CONFIG_DVB_SMIPCIE is not set +# CONFIG_DVB_NETUP_UNIDVB is not set +# CONFIG_V4L_PLATFORM_DRIVERS is not set +# CONFIG_V4L_MEM2MEM_DRIVERS is not set +# CONFIG_V4L_TEST_DRIVERS is not set +# CONFIG_DVB_PLATFORM_DRIVERS is not set +CONFIG_CEC_PLATFORM_DRIVERS=y + +# +# Supported MMC/SDIO adapters +# +CONFIG_SMS_SDIO_DRV=m +CONFIG_RADIO_ADAPTERS=y +CONFIG_RADIO_TEA575X=m +# CONFIG_RADIO_SI470X is not set +# CONFIG_RADIO_SI4713 is not set +# CONFIG_USB_MR800 is not set +# CONFIG_USB_DSBR is not set +# CONFIG_RADIO_MAXIRADIO is not set +# CONFIG_RADIO_SHARK is not set +# CONFIG_RADIO_SHARK2 is not set +# CONFIG_USB_KEENE is not set +# CONFIG_USB_RAREMONO is not set +# CONFIG_USB_MA901 is not set +# CONFIG_RADIO_TEA5764 is not set +# CONFIG_RADIO_SAA7706H is not set +# CONFIG_RADIO_TEF6862 is not set +# CONFIG_RADIO_WL1273 is not set + +# +# Texas Instruments WL128x FM driver (ST based) +# + +# +# Supported FireWire (IEEE 1394) Adapters +# +CONFIG_DVB_FIREDTV=m +CONFIG_DVB_FIREDTV_INPUT=y +CONFIG_MEDIA_COMMON_OPTIONS=y + +# +# common driver options +# +CONFIG_VIDEO_CX2341X=m +CONFIG_VIDEO_TVEEPROM=m +CONFIG_CYPRESS_FIRMWARE=m +CONFIG_VIDEOBUF2_CORE=m +CONFIG_VIDEOBUF2_V4L2=m +CONFIG_VIDEOBUF2_MEMOPS=m +CONFIG_VIDEOBUF2_VMALLOC=m +CONFIG_VIDEOBUF2_DMA_SG=m +CONFIG_VIDEOBUF2_DVB=m +CONFIG_DVB_B2C2_FLEXCOP=m +CONFIG_VIDEO_SAA7146=m +CONFIG_VIDEO_SAA7146_VV=m +CONFIG_SMS_SIANO_MDTV=m +CONFIG_SMS_SIANO_RC=y +# CONFIG_SMS_SIANO_DEBUGFS is not set + +# +# Media ancillary drivers (tuners, sensors, i2c, spi, frontends) +# +CONFIG_MEDIA_SUBDRV_AUTOSELECT=y +CONFIG_MEDIA_ATTACH=y +CONFIG_VIDEO_IR_I2C=m + +# +# Audio decoders, processors and mixers +# +CONFIG_VIDEO_TVAUDIO=m +CONFIG_VIDEO_TDA7432=m +CONFIG_VIDEO_MSP3400=m +CONFIG_VIDEO_CS3308=m +CONFIG_VIDEO_CS5345=m +CONFIG_VIDEO_CS53L32A=m +CONFIG_VIDEO_WM8775=m +CONFIG_VIDEO_WM8739=m +CONFIG_VIDEO_VP27SMPX=m + +# +# RDS decoders +# +CONFIG_VIDEO_SAA6588=m + +# +# Video decoders +# +CONFIG_VIDEO_SAA711X=m + +# +# Video and audio decoders +# +CONFIG_VIDEO_SAA717X=m +CONFIG_VIDEO_CX25840=m + +# +# Video encoders +# +CONFIG_VIDEO_SAA7127=m + +# +# Camera sensor devices +# + +# +# Flash devices +# + +# +# Video improvement chips +# +CONFIG_VIDEO_UPD64031A=m +CONFIG_VIDEO_UPD64083=m + +# +# Audio/Video compression chips +# +CONFIG_VIDEO_SAA6752HS=m + +# +# SDR tuner chips +# + +# +# Miscellaneous helper chips +# +CONFIG_VIDEO_M52790=m + +# +# Sensors used on soc_camera driver +# + +# +# Media SPI Adapters +# +# CONFIG_CXD2880_SPI_DRV is not set +CONFIG_MEDIA_TUNER=m +CONFIG_MEDIA_TUNER_SIMPLE=m +CONFIG_MEDIA_TUNER_TDA18250=m +CONFIG_MEDIA_TUNER_TDA8290=m +CONFIG_MEDIA_TUNER_TDA827X=m +CONFIG_MEDIA_TUNER_TDA18271=m +CONFIG_MEDIA_TUNER_TDA9887=m +CONFIG_MEDIA_TUNER_TEA5761=m +CONFIG_MEDIA_TUNER_TEA5767=m +CONFIG_MEDIA_TUNER_MT20XX=m +CONFIG_MEDIA_TUNER_MT2060=m +CONFIG_MEDIA_TUNER_MT2063=m +CONFIG_MEDIA_TUNER_MT2266=m +CONFIG_MEDIA_TUNER_MT2131=m +CONFIG_MEDIA_TUNER_QT1010=m +CONFIG_MEDIA_TUNER_XC2028=m +CONFIG_MEDIA_TUNER_XC5000=m +CONFIG_MEDIA_TUNER_XC4000=m +CONFIG_MEDIA_TUNER_MXL5005S=m +CONFIG_MEDIA_TUNER_MXL5007T=m +CONFIG_MEDIA_TUNER_MC44S803=m +CONFIG_MEDIA_TUNER_MAX2165=m +CONFIG_MEDIA_TUNER_TDA18218=m +CONFIG_MEDIA_TUNER_FC0011=m +CONFIG_MEDIA_TUNER_FC0012=m +CONFIG_MEDIA_TUNER_FC0013=m +CONFIG_MEDIA_TUNER_TDA18212=m +CONFIG_MEDIA_TUNER_E4000=m +CONFIG_MEDIA_TUNER_FC2580=m +CONFIG_MEDIA_TUNER_M88RS6000T=m +CONFIG_MEDIA_TUNER_TUA9001=m +CONFIG_MEDIA_TUNER_SI2157=m +CONFIG_MEDIA_TUNER_IT913X=m +CONFIG_MEDIA_TUNER_R820T=m +CONFIG_MEDIA_TUNER_QM1D1C0042=m +CONFIG_MEDIA_TUNER_QM1D1B0004=m + +# +# Multistandard (satellite) frontends +# +CONFIG_DVB_STB0899=m +CONFIG_DVB_STB6100=m +CONFIG_DVB_STV090x=m +CONFIG_DVB_STV0910=m +CONFIG_DVB_STV6110x=m +CONFIG_DVB_STV6111=m +CONFIG_DVB_MXL5XX=m +CONFIG_DVB_M88DS3103=m + +# +# Multistandard (cable + terrestrial) frontends +# +CONFIG_DVB_DRXK=m +CONFIG_DVB_TDA18271C2DD=m +CONFIG_DVB_SI2165=m +CONFIG_DVB_MN88472=m +CONFIG_DVB_MN88473=m + +# +# DVB-S (satellite) frontends +# +CONFIG_DVB_CX24110=m +CONFIG_DVB_CX24123=m +CONFIG_DVB_MT312=m +CONFIG_DVB_ZL10036=m +CONFIG_DVB_ZL10039=m +CONFIG_DVB_S5H1420=m +CONFIG_DVB_STV0288=m +CONFIG_DVB_STB6000=m +CONFIG_DVB_STV0299=m +CONFIG_DVB_STV6110=m +CONFIG_DVB_STV0900=m +CONFIG_DVB_TDA8083=m +CONFIG_DVB_TDA10086=m +CONFIG_DVB_TDA8261=m +CONFIG_DVB_VES1X93=m +CONFIG_DVB_TUNER_ITD1000=m +CONFIG_DVB_TUNER_CX24113=m +CONFIG_DVB_TDA826X=m +CONFIG_DVB_TUA6100=m +CONFIG_DVB_CX24116=m +CONFIG_DVB_CX24117=m +CONFIG_DVB_CX24120=m +CONFIG_DVB_SI21XX=m +CONFIG_DVB_TS2020=m +CONFIG_DVB_DS3000=m +CONFIG_DVB_MB86A16=m +CONFIG_DVB_TDA10071=m + +# +# DVB-T (terrestrial) frontends +# +CONFIG_DVB_SP8870=m +CONFIG_DVB_SP887X=m +CONFIG_DVB_CX22700=m +CONFIG_DVB_CX22702=m +CONFIG_DVB_DRXD=m +CONFIG_DVB_L64781=m +CONFIG_DVB_TDA1004X=m +CONFIG_DVB_NXT6000=m +CONFIG_DVB_MT352=m +CONFIG_DVB_ZL10353=m +CONFIG_DVB_DIB3000MB=m +CONFIG_DVB_DIB3000MC=m +CONFIG_DVB_DIB7000M=m +CONFIG_DVB_DIB7000P=m +CONFIG_DVB_TDA10048=m +CONFIG_DVB_AF9013=m +CONFIG_DVB_EC100=m +CONFIG_DVB_STV0367=m +CONFIG_DVB_CXD2820R=m +CONFIG_DVB_CXD2841ER=m +CONFIG_DVB_RTL2830=m +CONFIG_DVB_RTL2832=m +CONFIG_DVB_SI2168=m +CONFIG_DVB_GP8PSK_FE=m + +# +# DVB-C (cable) frontends +# +CONFIG_DVB_VES1820=m +CONFIG_DVB_TDA10021=m +CONFIG_DVB_TDA10023=m +CONFIG_DVB_STV0297=m + +# +# ATSC (North American/Korean Terrestrial/Cable DTV) frontends +# +CONFIG_DVB_NXT200X=m +CONFIG_DVB_OR51211=m +CONFIG_DVB_OR51132=m +CONFIG_DVB_BCM3510=m +CONFIG_DVB_LGDT330X=m +CONFIG_DVB_LGDT3305=m +CONFIG_DVB_LGDT3306A=m +CONFIG_DVB_LG2160=m +CONFIG_DVB_S5H1409=m +CONFIG_DVB_AU8522=m +CONFIG_DVB_AU8522_DTV=m +CONFIG_DVB_AU8522_V4L=m +CONFIG_DVB_S5H1411=m + +# +# ISDB-T (terrestrial) frontends +# +CONFIG_DVB_S921=m +CONFIG_DVB_DIB8000=m +CONFIG_DVB_MB86A20S=m + +# +# ISDB-S (satellite) & ISDB-T (terrestrial) frontends +# +CONFIG_DVB_TC90522=m + +# +# Digital terrestrial only tuners/PLL +# +CONFIG_DVB_PLL=m +CONFIG_DVB_TUNER_DIB0070=m +CONFIG_DVB_TUNER_DIB0090=m + +# +# SEC control devices for DVB-S +# +CONFIG_DVB_DRX39XYJ=m +CONFIG_DVB_LNBH25=m +CONFIG_DVB_LNBP21=m +CONFIG_DVB_LNBP22=m +CONFIG_DVB_ISL6405=m +CONFIG_DVB_ISL6421=m +CONFIG_DVB_ISL6423=m +CONFIG_DVB_A8293=m +CONFIG_DVB_LGS8GXX=m +CONFIG_DVB_ATBM8830=m +CONFIG_DVB_TDA665x=m +CONFIG_DVB_IX2505V=m +CONFIG_DVB_M88RS2000=m +CONFIG_DVB_AF9033=m + +# +# Common Interface (EN50221) controller drivers +# +CONFIG_DVB_CXD2099=m + +# +# Tools to develop new frontends +# +CONFIG_DVB_DUMMY_FE=m + +# +# Graphics support +# +CONFIG_AGP=y +CONFIG_AGP_AMD64=y +CONFIG_AGP_INTEL=y +CONFIG_AGP_SIS=y +CONFIG_AGP_VIA=y +CONFIG_INTEL_GTT=y +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_VGA_SWITCHEROO=y +CONFIG_DRM=m +CONFIG_DRM_MIPI_DSI=y +CONFIG_DRM_DP_AUX_CHARDEV=y +# CONFIG_DRM_DEBUG_SELFTEST is not set +CONFIG_DRM_KMS_HELPER=m +CONFIG_DRM_KMS_FB_HELPER=y +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +# CONFIG_DRM_DP_CEC is not set +CONFIG_DRM_TTM=m +CONFIG_DRM_VM=y +CONFIG_DRM_SCHED=m + +# +# I2C encoder or helper chips +# +CONFIG_DRM_I2C_CH7006=m +CONFIG_DRM_I2C_SIL164=m +# CONFIG_DRM_I2C_NXP_TDA998X is not set +# CONFIG_DRM_I2C_NXP_TDA9950 is not set +CONFIG_DRM_RADEON=m +CONFIG_DRM_RADEON_USERPTR=y +CONFIG_DRM_AMDGPU=m +# CONFIG_DRM_AMDGPU_SI is not set +# CONFIG_DRM_AMDGPU_CIK is not set +# CONFIG_DRM_AMDGPU_USERPTR is not set +# CONFIG_DRM_AMDGPU_GART_DEBUGFS is not set + +# +# ACP (Audio CoProcessor) Configuration +# +CONFIG_DRM_AMD_ACP=y + +# +# Display Engine Configuration +# +CONFIG_DRM_AMD_DC=y +CONFIG_DRM_AMD_DC_DCN1_0=y +# CONFIG_DEBUG_KERNEL_DC is not set + +# +# AMD Library routines +# +CONFIG_CHASH=m +# CONFIG_CHASH_STATS is not set +# CONFIG_CHASH_SELFTEST is not set +CONFIG_DRM_NOUVEAU=m +CONFIG_NOUVEAU_DEBUG=5 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3 +# CONFIG_NOUVEAU_DEBUG_MMU is not set +CONFIG_DRM_NOUVEAU_BACKLIGHT=y +CONFIG_DRM_I915=m +# CONFIG_DRM_I915_ALPHA_SUPPORT is not set +CONFIG_DRM_I915_CAPTURE_ERROR=y +CONFIG_DRM_I915_COMPRESS_ERROR=y +CONFIG_DRM_I915_USERPTR=y +CONFIG_DRM_I915_GVT=y +CONFIG_DRM_I915_GVT_KVMGT=m +# CONFIG_DRM_VGEM is not set +# CONFIG_DRM_VKMS is not set +CONFIG_DRM_VMWGFX=m +CONFIG_DRM_VMWGFX_FBCON=y +CONFIG_DRM_GMA500=m +CONFIG_DRM_GMA600=y +CONFIG_DRM_GMA3600=y +CONFIG_DRM_UDL=m +CONFIG_DRM_AST=m +CONFIG_DRM_MGAG200=m +CONFIG_DRM_CIRRUS_QEMU=m +CONFIG_DRM_QXL=m +CONFIG_DRM_BOCHS=m +CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN is not set +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +CONFIG_HSA_AMD=m +# CONFIG_DRM_HISI_HIBMC is not set +# CONFIG_DRM_TINYDRM is not set +# CONFIG_DRM_XEN is not set +# CONFIG_DRM_LEGACY is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y + +# +# Frame buffer Devices +# +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_CMDLINE=y +CONFIG_FB_NOTIFY=y +CONFIG_FB_BOOT_VESA_SUPPORT=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=m +CONFIG_FB_SYS_COPYAREA=m +CONFIG_FB_SYS_IMAGEBLIT=m +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=m +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_BACKLIGHT=y +# CONFIG_FB_MODE_HELPERS is not set +CONFIG_FB_TILEBLITTING=y + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ARC is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_VGA16 is not set +# CONFIG_FB_UVESA is not set +CONFIG_FB_VESA=y +CONFIG_FB_EFI=y +# CONFIG_FB_N411 is not set +# CONFIG_FB_HGA is not set +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_LE80578 is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_VIA is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SM501 is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_XEN_FBDEV_FRONTEND is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +CONFIG_FB_HYPERV=m +# CONFIG_FB_SIMPLE is not set +# CONFIG_FB_SM712 is not set +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set +CONFIG_LCD_PLATFORM=m +# CONFIG_LCD_S6E63M0 is not set +# CONFIG_LCD_LD9040 is not set +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +# CONFIG_LCD_OTM3225A is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_GENERIC is not set +# CONFIG_BACKLIGHT_PWM is not set +CONFIG_BACKLIGHT_APPLE=m +# CONFIG_BACKLIGHT_PM8941_WLED is not set +# CONFIG_BACKLIGHT_SAHARA is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3630A is not set +# CONFIG_BACKLIGHT_LM3639 is not set +CONFIG_BACKLIGHT_LP855X=m +# CONFIG_BACKLIGHT_GPIO is not set +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +CONFIG_HDMI=y + +# +# Console display driver support +# +CONFIG_VGA_CONSOLE=y +CONFIG_VGACON_SOFT_SCROLLBACK=y +CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64 +# CONFIG_VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT is not set +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_LOGO_LINUX_CLUT224=y +CONFIG_SOUND=m +CONFIG_SOUND_OSS_CORE=y +CONFIG_SOUND_OSS_CORE_PRECLAIM=y +CONFIG_SND=m +CONFIG_SND_TIMER=m +CONFIG_SND_PCM=m +CONFIG_SND_PCM_ELD=y +CONFIG_SND_HWDEP=m +CONFIG_SND_SEQ_DEVICE=m +CONFIG_SND_RAWMIDI=m +CONFIG_SND_COMPRESS_OFFLOAD=m +CONFIG_SND_JACK=y +CONFIG_SND_JACK_INPUT_DEV=y +CONFIG_SND_OSSEMUL=y +# CONFIG_SND_MIXER_OSS is not set +# CONFIG_SND_PCM_OSS is not set +CONFIG_SND_PCM_TIMER=y +CONFIG_SND_HRTIMER=m +CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_MAX_CARDS=32 +# CONFIG_SND_SUPPORT_OLD_API is not set +CONFIG_SND_PROC_FS=y +CONFIG_SND_VERBOSE_PROCFS=y +# CONFIG_SND_VERBOSE_PRINTK is not set +# CONFIG_SND_DEBUG is not set +CONFIG_SND_VMASTER=y +CONFIG_SND_DMA_SGBUF=y +CONFIG_SND_SEQUENCER=m +CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_SEQUENCER_OSS=m +CONFIG_SND_SEQ_HRTIMER_DEFAULT=y +CONFIG_SND_SEQ_MIDI_EVENT=m +CONFIG_SND_SEQ_MIDI=m +CONFIG_SND_SEQ_MIDI_EMUL=m +CONFIG_SND_SEQ_VIRMIDI=m +CONFIG_SND_MPU401_UART=m +CONFIG_SND_OPL3_LIB=m +CONFIG_SND_OPL3_LIB_SEQ=m +CONFIG_SND_VX_LIB=m +CONFIG_SND_AC97_CODEC=m +CONFIG_SND_DRIVERS=y +CONFIG_SND_PCSP=m +CONFIG_SND_DUMMY=m +CONFIG_SND_ALOOP=m +CONFIG_SND_VIRMIDI=m +CONFIG_SND_MTPAV=m +# CONFIG_SND_MTS64 is not set +# CONFIG_SND_SERIAL_U16550 is not set +CONFIG_SND_MPU401=m +# CONFIG_SND_PORTMAN2X4 is not set +CONFIG_SND_AC97_POWER_SAVE=y +CONFIG_SND_AC97_POWER_SAVE_DEFAULT=5 +CONFIG_SND_PCI=y +CONFIG_SND_AD1889=m +# CONFIG_SND_ALS300 is not set +# CONFIG_SND_ALS4000 is not set +CONFIG_SND_ALI5451=m +CONFIG_SND_ASIHPI=m +CONFIG_SND_ATIIXP=m +CONFIG_SND_ATIIXP_MODEM=m +CONFIG_SND_AU8810=m +CONFIG_SND_AU8820=m +CONFIG_SND_AU8830=m +# CONFIG_SND_AW2 is not set +# CONFIG_SND_AZT3328 is not set +CONFIG_SND_BT87X=m +# CONFIG_SND_BT87X_OVERCLOCK is not set +CONFIG_SND_CA0106=m +CONFIG_SND_CMIPCI=m +CONFIG_SND_OXYGEN_LIB=m +CONFIG_SND_OXYGEN=m +# CONFIG_SND_CS4281 is not set +CONFIG_SND_CS46XX=m +CONFIG_SND_CS46XX_NEW_DSP=y +CONFIG_SND_CTXFI=m +CONFIG_SND_DARLA20=m +CONFIG_SND_GINA20=m +CONFIG_SND_LAYLA20=m +CONFIG_SND_DARLA24=m +CONFIG_SND_GINA24=m +CONFIG_SND_LAYLA24=m +CONFIG_SND_MONA=m +CONFIG_SND_MIA=m +CONFIG_SND_ECHO3G=m +CONFIG_SND_INDIGO=m +CONFIG_SND_INDIGOIO=m +CONFIG_SND_INDIGODJ=m +CONFIG_SND_INDIGOIOX=m +CONFIG_SND_INDIGODJX=m +CONFIG_SND_EMU10K1=m +CONFIG_SND_EMU10K1_SEQ=m +CONFIG_SND_EMU10K1X=m +CONFIG_SND_ENS1370=m +CONFIG_SND_ENS1371=m +# CONFIG_SND_ES1938 is not set +CONFIG_SND_ES1968=m +CONFIG_SND_ES1968_INPUT=y +CONFIG_SND_ES1968_RADIO=y +# CONFIG_SND_FM801 is not set +CONFIG_SND_HDSP=m +CONFIG_SND_HDSPM=m +CONFIG_SND_ICE1712=m +CONFIG_SND_ICE1724=m +CONFIG_SND_INTEL8X0=m +CONFIG_SND_INTEL8X0M=m +CONFIG_SND_KORG1212=m +CONFIG_SND_LOLA=m +CONFIG_SND_LX6464ES=m +CONFIG_SND_MAESTRO3=m +CONFIG_SND_MAESTRO3_INPUT=y +CONFIG_SND_MIXART=m +# CONFIG_SND_NM256 is not set +CONFIG_SND_PCXHR=m +# CONFIG_SND_RIPTIDE is not set +CONFIG_SND_RME32=m +CONFIG_SND_RME96=m +CONFIG_SND_RME9652=m +# CONFIG_SND_SONICVIBES is not set +CONFIG_SND_TRIDENT=m +CONFIG_SND_VIA82XX=m +CONFIG_SND_VIA82XX_MODEM=m +CONFIG_SND_VIRTUOSO=m +CONFIG_SND_VX222=m +# CONFIG_SND_YMFPCI is not set + +# +# HD-Audio +# +CONFIG_SND_HDA=m +CONFIG_SND_HDA_INTEL=m +CONFIG_SND_HDA_HWDEP=y +CONFIG_SND_HDA_RECONFIG=y +CONFIG_SND_HDA_INPUT_BEEP=y +CONFIG_SND_HDA_INPUT_BEEP_MODE=0 +CONFIG_SND_HDA_PATCH_LOADER=y +CONFIG_SND_HDA_CODEC_REALTEK=m +CONFIG_SND_HDA_CODEC_ANALOG=m +CONFIG_SND_HDA_CODEC_SIGMATEL=m +CONFIG_SND_HDA_CODEC_VIA=m +CONFIG_SND_HDA_CODEC_HDMI=m +CONFIG_SND_HDA_CODEC_CIRRUS=m +CONFIG_SND_HDA_CODEC_CONEXANT=m +CONFIG_SND_HDA_CODEC_CA0110=m +CONFIG_SND_HDA_CODEC_CA0132=m +CONFIG_SND_HDA_CODEC_CA0132_DSP=y +CONFIG_SND_HDA_CODEC_CMEDIA=m +CONFIG_SND_HDA_CODEC_SI3054=m +CONFIG_SND_HDA_GENERIC=m +CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0 +CONFIG_SND_HDA_CORE=m +CONFIG_SND_HDA_DSP_LOADER=y +CONFIG_SND_HDA_COMPONENT=y +CONFIG_SND_HDA_I915=y +CONFIG_SND_HDA_EXT_CORE=m +CONFIG_SND_HDA_PREALLOC_SIZE=512 +# CONFIG_SND_SPI is not set +CONFIG_SND_USB=y +CONFIG_SND_USB_AUDIO=m +CONFIG_SND_USB_UA101=m +CONFIG_SND_USB_USX2Y=m +CONFIG_SND_USB_CAIAQ=m +CONFIG_SND_USB_CAIAQ_INPUT=y +CONFIG_SND_USB_US122L=m +CONFIG_SND_USB_6FIRE=m +CONFIG_SND_USB_HIFACE=m +CONFIG_SND_BCD2000=m +CONFIG_SND_USB_LINE6=m +CONFIG_SND_USB_POD=m +CONFIG_SND_USB_PODHD=m +CONFIG_SND_USB_TONEPORT=m +CONFIG_SND_USB_VARIAX=m +CONFIG_SND_FIREWIRE=y +CONFIG_SND_FIREWIRE_LIB=m +CONFIG_SND_DICE=m +CONFIG_SND_OXFW=m +CONFIG_SND_ISIGHT=m +CONFIG_SND_FIREWORKS=m +CONFIG_SND_BEBOB=m +CONFIG_SND_FIREWIRE_DIGI00X=m +CONFIG_SND_FIREWIRE_TASCAM=m +CONFIG_SND_FIREWIRE_MOTU=m +CONFIG_SND_FIREFACE=m +CONFIG_SND_SOC=m +CONFIG_SND_SOC_COMPRESS=y +CONFIG_SND_SOC_TOPOLOGY=y +CONFIG_SND_SOC_ACPI=m +# CONFIG_SND_SOC_AMD_ACP is not set +# CONFIG_SND_ATMEL_SOC is not set +# CONFIG_SND_DESIGNWARE_I2S is not set + +# +# SoC Audio for Freescale CPUs +# + +# +# Common SoC Audio options for Freescale CPUs: +# +# CONFIG_SND_SOC_FSL_ASRC is not set +# CONFIG_SND_SOC_FSL_SAI is not set +# CONFIG_SND_SOC_FSL_SSI is not set +# CONFIG_SND_SOC_FSL_SPDIF is not set +# CONFIG_SND_SOC_FSL_ESAI is not set +# CONFIG_SND_SOC_IMX_AUDMUX is not set +# CONFIG_SND_I2S_HI6210_I2S is not set +# CONFIG_SND_SOC_IMG is not set +CONFIG_SND_SOC_INTEL_SST_TOPLEVEL=y +CONFIG_SND_SST_IPC=m +CONFIG_SND_SST_IPC_ACPI=m +CONFIG_SND_SOC_INTEL_SST_ACPI=m +CONFIG_SND_SOC_INTEL_SST=m +CONFIG_SND_SOC_INTEL_SST_FIRMWARE=m +CONFIG_SND_SOC_INTEL_HASWELL=m +CONFIG_SND_SST_ATOM_HIFI2_PLATFORM=m +# CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_PCI is not set +CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_ACPI=m +CONFIG_SND_SOC_INTEL_SKYLAKE_SSP_CLK=m +CONFIG_SND_SOC_INTEL_SKYLAKE=m +CONFIG_SND_SOC_ACPI_INTEL_MATCH=m +CONFIG_SND_SOC_INTEL_MACH=y +# CONFIG_SND_SOC_INTEL_HASWELL_MACH is not set +# CONFIG_SND_SOC_INTEL_BDW_RT5677_MACH is not set +# CONFIG_SND_SOC_INTEL_BROADWELL_MACH is not set +CONFIG_SND_SOC_INTEL_BYTCR_RT5640_MACH=m +CONFIG_SND_SOC_INTEL_BYTCR_RT5651_MACH=m +CONFIG_SND_SOC_INTEL_CHT_BSW_RT5672_MACH=m +CONFIG_SND_SOC_INTEL_CHT_BSW_RT5645_MACH=m +CONFIG_SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH=m +CONFIG_SND_SOC_INTEL_CHT_BSW_NAU8824_MACH=m +CONFIG_SND_SOC_INTEL_BYT_CHT_DA7213_MACH=m +CONFIG_SND_SOC_INTEL_BYT_CHT_ES8316_MACH=m +CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH=m +CONFIG_SND_SOC_INTEL_SKL_RT286_MACH=m +CONFIG_SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH=m +CONFIG_SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH=m +CONFIG_SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH=m +CONFIG_SND_SOC_INTEL_BXT_RT298_MACH=m +CONFIG_SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH=m +CONFIG_SND_SOC_INTEL_KBL_RT5663_RT5514_MAX98927_MACH=m +CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98357A_MACH=m +# CONFIG_SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH is not set + +# +# STMicroelectronics STM32 SOC audio support +# +# CONFIG_SND_SOC_XTFPGA_I2S is not set +# CONFIG_ZX_TDM is not set +CONFIG_SND_SOC_I2C_AND_SPI=m + +# +# CODEC drivers +# +# CONFIG_SND_SOC_AC97_CODEC is not set +# CONFIG_SND_SOC_ADAU1701 is not set +# CONFIG_SND_SOC_ADAU1761_I2C is not set +# CONFIG_SND_SOC_ADAU1761_SPI is not set +# CONFIG_SND_SOC_ADAU7002 is not set +# CONFIG_SND_SOC_AK4104 is not set +# CONFIG_SND_SOC_AK4458 is not set +# CONFIG_SND_SOC_AK4554 is not set +# CONFIG_SND_SOC_AK4613 is not set +# CONFIG_SND_SOC_AK4642 is not set +# CONFIG_SND_SOC_AK5386 is not set +# CONFIG_SND_SOC_AK5558 is not set +# CONFIG_SND_SOC_ALC5623 is not set +# CONFIG_SND_SOC_BD28623 is not set +# CONFIG_SND_SOC_BT_SCO is not set +# CONFIG_SND_SOC_CS35L32 is not set +# CONFIG_SND_SOC_CS35L33 is not set +# CONFIG_SND_SOC_CS35L34 is not set +# CONFIG_SND_SOC_CS35L35 is not set +# CONFIG_SND_SOC_CS42L42 is not set +# CONFIG_SND_SOC_CS42L51_I2C is not set +# CONFIG_SND_SOC_CS42L52 is not set +# CONFIG_SND_SOC_CS42L56 is not set +# CONFIG_SND_SOC_CS42L73 is not set +# CONFIG_SND_SOC_CS4265 is not set +# CONFIG_SND_SOC_CS4270 is not set +# CONFIG_SND_SOC_CS4271_I2C is not set +# CONFIG_SND_SOC_CS4271_SPI is not set +# CONFIG_SND_SOC_CS42XX8_I2C is not set +# CONFIG_SND_SOC_CS43130 is not set +# CONFIG_SND_SOC_CS4349 is not set +# CONFIG_SND_SOC_CS53L30 is not set +CONFIG_SND_SOC_DA7213=m +CONFIG_SND_SOC_DA7219=m +CONFIG_SND_SOC_DMIC=m +# CONFIG_SND_SOC_ES7134 is not set +# CONFIG_SND_SOC_ES7241 is not set +CONFIG_SND_SOC_ES8316=m +# CONFIG_SND_SOC_ES8328_I2C is not set +# CONFIG_SND_SOC_ES8328_SPI is not set +# CONFIG_SND_SOC_GTM601 is not set +CONFIG_SND_SOC_HDAC_HDMI=m +# CONFIG_SND_SOC_INNO_RK3036 is not set +CONFIG_SND_SOC_MAX98090=m +CONFIG_SND_SOC_MAX98357A=m +# CONFIG_SND_SOC_MAX98504 is not set +# CONFIG_SND_SOC_MAX9867 is not set +CONFIG_SND_SOC_MAX98927=m +# CONFIG_SND_SOC_MAX98373 is not set +# CONFIG_SND_SOC_MAX9860 is not set +# CONFIG_SND_SOC_MSM8916_WCD_DIGITAL is not set +# CONFIG_SND_SOC_PCM1681 is not set +# CONFIG_SND_SOC_PCM1789_I2C is not set +# CONFIG_SND_SOC_PCM179X_I2C is not set +# CONFIG_SND_SOC_PCM179X_SPI is not set +# CONFIG_SND_SOC_PCM186X_I2C is not set +# CONFIG_SND_SOC_PCM186X_SPI is not set +# CONFIG_SND_SOC_PCM3168A_I2C is not set +# CONFIG_SND_SOC_PCM3168A_SPI is not set +# CONFIG_SND_SOC_PCM512x_I2C is not set +# CONFIG_SND_SOC_PCM512x_SPI is not set +CONFIG_SND_SOC_RL6231=m +CONFIG_SND_SOC_RL6347A=m +CONFIG_SND_SOC_RT286=m +CONFIG_SND_SOC_RT298=m +CONFIG_SND_SOC_RT5514=m +CONFIG_SND_SOC_RT5514_SPI=m +# CONFIG_SND_SOC_RT5616 is not set +# CONFIG_SND_SOC_RT5631 is not set +CONFIG_SND_SOC_RT5640=m +CONFIG_SND_SOC_RT5645=m +CONFIG_SND_SOC_RT5651=m +CONFIG_SND_SOC_RT5663=m +CONFIG_SND_SOC_RT5670=m +# CONFIG_SND_SOC_SGTL5000 is not set +# CONFIG_SND_SOC_SIMPLE_AMPLIFIER is not set +# CONFIG_SND_SOC_SIRF_AUDIO_CODEC is not set +# CONFIG_SND_SOC_SPDIF is not set +# CONFIG_SND_SOC_SSM2305 is not set +# CONFIG_SND_SOC_SSM2602_SPI is not set +# CONFIG_SND_SOC_SSM2602_I2C is not set +CONFIG_SND_SOC_SSM4567=m +# CONFIG_SND_SOC_STA32X is not set +# CONFIG_SND_SOC_STA350 is not set +# CONFIG_SND_SOC_STI_SAS is not set +# CONFIG_SND_SOC_TAS2552 is not set +# CONFIG_SND_SOC_TAS5086 is not set +# CONFIG_SND_SOC_TAS571X is not set +# CONFIG_SND_SOC_TAS5720 is not set +# CONFIG_SND_SOC_TAS6424 is not set +# CONFIG_SND_SOC_TDA7419 is not set +# CONFIG_SND_SOC_TFA9879 is not set +# CONFIG_SND_SOC_TLV320AIC23_I2C is not set +# CONFIG_SND_SOC_TLV320AIC23_SPI is not set +# CONFIG_SND_SOC_TLV320AIC31XX is not set +# CONFIG_SND_SOC_TLV320AIC32X4_I2C is not set +# CONFIG_SND_SOC_TLV320AIC32X4_SPI is not set +# CONFIG_SND_SOC_TLV320AIC3X is not set +CONFIG_SND_SOC_TS3A227E=m +# CONFIG_SND_SOC_TSCS42XX is not set +# CONFIG_SND_SOC_TSCS454 is not set +# CONFIG_SND_SOC_WM8510 is not set +# CONFIG_SND_SOC_WM8523 is not set +# CONFIG_SND_SOC_WM8524 is not set +# CONFIG_SND_SOC_WM8580 is not set +# CONFIG_SND_SOC_WM8711 is not set +# CONFIG_SND_SOC_WM8728 is not set +# CONFIG_SND_SOC_WM8731 is not set +# CONFIG_SND_SOC_WM8737 is not set +# CONFIG_SND_SOC_WM8741 is not set +# CONFIG_SND_SOC_WM8750 is not set +# CONFIG_SND_SOC_WM8753 is not set +# CONFIG_SND_SOC_WM8770 is not set +# CONFIG_SND_SOC_WM8776 is not set +# CONFIG_SND_SOC_WM8782 is not set +# CONFIG_SND_SOC_WM8804_I2C is not set +# CONFIG_SND_SOC_WM8804_SPI is not set +# CONFIG_SND_SOC_WM8903 is not set +# CONFIG_SND_SOC_WM8960 is not set +# CONFIG_SND_SOC_WM8962 is not set +# CONFIG_SND_SOC_WM8974 is not set +# CONFIG_SND_SOC_WM8978 is not set +# CONFIG_SND_SOC_WM8985 is not set +# CONFIG_SND_SOC_ZX_AUD96P22 is not set +# CONFIG_SND_SOC_MAX9759 is not set +# CONFIG_SND_SOC_MT6351 is not set +# CONFIG_SND_SOC_NAU8540 is not set +# CONFIG_SND_SOC_NAU8810 is not set +CONFIG_SND_SOC_NAU8824=m +CONFIG_SND_SOC_NAU8825=m +# CONFIG_SND_SOC_TPA6130A2 is not set +# CONFIG_SND_SIMPLE_CARD is not set +CONFIG_SND_X86=y +CONFIG_HDMI_LPE_AUDIO=m +CONFIG_SND_SYNTH_EMUX=m +# CONFIG_SND_XEN_FRONTEND is not set +CONFIG_AC97_BUS=m + +# +# HID support +# +CONFIG_HID=y +CONFIG_HID_BATTERY_STRENGTH=y +CONFIG_HIDRAW=y +CONFIG_UHID=m +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=m +# CONFIG_HID_ACCUTOUCH is not set +CONFIG_HID_ACRUX=m +# CONFIG_HID_ACRUX_FF is not set +CONFIG_HID_APPLE=m +CONFIG_HID_APPLEIR=m +CONFIG_HID_ASUS=m +CONFIG_HID_AUREAL=m +CONFIG_HID_BELKIN=m +CONFIG_HID_BETOP_FF=m +CONFIG_HID_CHERRY=m +CONFIG_HID_CHICONY=m +CONFIG_HID_CORSAIR=m +# CONFIG_HID_COUGAR is not set +CONFIG_HID_PRODIKEYS=m +CONFIG_HID_CMEDIA=m +# CONFIG_HID_CP2112 is not set +CONFIG_HID_CYPRESS=m +CONFIG_HID_DRAGONRISE=m +# CONFIG_DRAGONRISE_FF is not set +# CONFIG_HID_EMS_FF is not set +CONFIG_HID_ELAN=m +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +CONFIG_HID_EZKEY=m +CONFIG_HID_GEMBIRD=m +CONFIG_HID_GFRM=m +CONFIG_HID_HOLTEK=m +# CONFIG_HOLTEK_FF is not set +# CONFIG_HID_GOOGLE_HAMMER is not set +CONFIG_HID_GT683R=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m +CONFIG_HID_GYRATION=m +CONFIG_HID_ICADE=m +CONFIG_HID_ITE=m +CONFIG_HID_JABRA=m +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=m +CONFIG_HID_LCPOWER=m +CONFIG_HID_LED=m +CONFIG_HID_LENOVO=m +CONFIG_HID_LOGITECH=m +CONFIG_HID_LOGITECH_DJ=m +CONFIG_HID_LOGITECH_HIDPP=m +# CONFIG_LOGITECH_FF is not set +# CONFIG_LOGIRUMBLEPAD2_FF is not set +# CONFIG_LOGIG940_FF is not set +# CONFIG_LOGIWHEELS_FF is not set +CONFIG_HID_MAGICMOUSE=y +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_REDRAGON is not set +CONFIG_HID_MICROSOFT=m +CONFIG_HID_MONTEREY=m +CONFIG_HID_MULTITOUCH=m +CONFIG_HID_NTI=m +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +# CONFIG_PANTHERLORD_FF is not set +CONFIG_HID_PENMOUNT=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +CONFIG_HID_PICOLCD_FB=y +CONFIG_HID_PICOLCD_BACKLIGHT=y +CONFIG_HID_PICOLCD_LCD=y +CONFIG_HID_PICOLCD_LEDS=y +CONFIG_HID_PICOLCD_CIR=y +CONFIG_HID_PLANTRONICS=m +CONFIG_HID_PRIMAX=m +# CONFIG_HID_RETRODE is not set +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=m +CONFIG_HID_SONY=m +CONFIG_SONY_FF=y +CONFIG_HID_SPEEDLINK=m +# CONFIG_HID_STEAM is not set +CONFIG_HID_STEELSERIES=m +CONFIG_HID_SUNPLUS=m +CONFIG_HID_RMI=m +CONFIG_HID_GREENASIA=m +# CONFIG_GREENASIA_FF is not set +CONFIG_HID_HYPERV_MOUSE=m +CONFIG_HID_SMARTJOYPLUS=m +# CONFIG_SMARTJOYPLUS_FF is not set +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +# CONFIG_THRUSTMASTER_FF is not set +# CONFIG_HID_UDRAW_PS3 is not set +CONFIG_HID_WACOM=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +# CONFIG_ZEROPLUS_FF is not set +CONFIG_HID_ZYDACRON=m +CONFIG_HID_SENSOR_HUB=y +CONFIG_HID_SENSOR_CUSTOM_SENSOR=m +CONFIG_HID_ALPS=m + +# +# USB HID support +# +CONFIG_USB_HID=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y + +# +# I2C HID support +# +CONFIG_I2C_HID=m + +# +# Intel ISH HID support +# +CONFIG_INTEL_ISH_HID=m +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_WHITELIST is not set +CONFIG_USB_LEDS_TRIGGER_USBPORT=m +CONFIG_USB_MON=y +CONFIG_USB_WUSB=m +CONFIG_USB_WUSB_CBAF=m +# CONFIG_USB_WUSB_CBAF_DEBUG is not set + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_XHCI_DBGCAP=y +CONFIG_USB_XHCI_PCI=y +# CONFIG_USB_XHCI_PLATFORM is not set +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +# CONFIG_USB_EHCI_HCD_PLATFORM is not set +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_FOTG210_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y +# CONFIG_USB_OHCI_HCD_PLATFORM is not set +CONFIG_USB_UHCI_HCD=y +# CONFIG_USB_U132_HCD is not set +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_WHCI_HCD is not set +CONFIG_USB_HWA_HCD=m +# CONFIG_USB_HCD_BCMA is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m +CONFIG_USB_WDM=m +CONFIG_USB_TMC=m + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_DEBUG is not set +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_REALTEK_AUTOPM=y +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_UAS=m + +# +# USB Imaging devices +# +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m +# CONFIG_USBIP_CORE is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +# CONFIG_USB_DWC2 is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +CONFIG_USB_USS720=m +CONFIG_USB_SERIAL=y +CONFIG_USB_SERIAL_CONSOLE=y +CONFIG_USB_SERIAL_GENERIC=y +# CONFIG_USB_SERIAL_SIMPLE is not set +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +# CONFIG_USB_SERIAL_F81232 is not set +CONFIG_USB_SERIAL_F8153X=m +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +# CONFIG_USB_SERIAL_METRO is not set +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7715_PARPORT=y +CONFIG_USB_SERIAL_MOS7840=m +CONFIG_USB_SERIAL_MXUPORT=m +CONFIG_USB_SERIAL_NAVMAN=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SAFE_PADDED=y +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_XIRCOM=m +CONFIG_USB_SERIAL_WWAN=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +# CONFIG_USB_SERIAL_WISHBONE is not set +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m +CONFIG_USB_SERIAL_UPD78F0730=m +CONFIG_USB_SERIAL_DEBUG=m + +# +# USB Miscellaneous drivers +# +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +# CONFIG_USB_RIO500 is not set +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +CONFIG_USB_IDMOUSE=m +CONFIG_USB_FTDI_ELAN=m +CONFIG_USB_APPLEDISPLAY=m +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_SISUSBVGA_CON=y +CONFIG_USB_LD=m +# CONFIG_USB_TRANCEVIBRATOR is not set +CONFIG_USB_IOWARRIOR=m +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +CONFIG_USB_ISIGHTFW=m +# CONFIG_USB_YUREX is not set +CONFIG_USB_EZUSB_FX2=m +# CONFIG_USB_HUB_USB251XB is not set +CONFIG_USB_HSIC_USB3503=m +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +# CONFIG_USB_CHAOSKEY is not set +CONFIG_USB_ATM=m +CONFIG_USB_SPEEDTOUCH=m +CONFIG_USB_CXACRU=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_XUSBATM=m + +# +# USB Physical Layer drivers +# +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ISP1301 is not set +# CONFIG_USB_GADGET is not set +CONFIG_TYPEC=y +# CONFIG_TYPEC_TCPM is not set +CONFIG_TYPEC_UCSI=y +CONFIG_UCSI_ACPI=y +# CONFIG_TYPEC_TPS6598X is not set + +# +# USB Type-C Multiplexer/DeMultiplexer Switch support +# +# CONFIG_TYPEC_MUX_PI3USB30532 is not set + +# +# USB Type-C Alternate Mode drivers +# +# CONFIG_TYPEC_DP_ALTMODE is not set +# CONFIG_USB_ROLE_SWITCH is not set +# CONFIG_USB_LED_TRIG is not set +# CONFIG_USB_ULPI_BUS is not set +CONFIG_UWB=m +CONFIG_UWB_HWA=m +CONFIG_UWB_WHCI=m +CONFIG_UWB_I1480U=m +CONFIG_MMC=m +CONFIG_MMC_BLOCK=m +CONFIG_MMC_BLOCK_MINORS=8 +CONFIG_SDIO_UART=m +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_RICOH_MMC=y +CONFIG_MMC_SDHCI_ACPI=m +CONFIG_MMC_SDHCI_PLTFM=m +# CONFIG_MMC_SDHCI_F_SDH30 is not set +# CONFIG_MMC_WBSD is not set +CONFIG_MMC_TIFM_SD=m +# CONFIG_MMC_SPI is not set +CONFIG_MMC_CB710=m +CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_VUB300=m +CONFIG_MMC_USHC=m +# CONFIG_MMC_USDHI6ROL0 is not set +CONFIG_MMC_REALTEK_PCI=m +CONFIG_MMC_REALTEK_USB=m +CONFIG_MMC_CQHCI=m +# CONFIG_MMC_TOSHIBA_PCI is not set +# CONFIG_MMC_MTK is not set +# CONFIG_MMC_SDHCI_XENON is not set +CONFIG_MEMSTICK=m +# CONFIG_MEMSTICK_DEBUG is not set + +# +# MemoryStick drivers +# +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set +CONFIG_MSPRO_BLOCK=m +# CONFIG_MS_BLOCK is not set + +# +# MemoryStick Host Controller Drivers +# +CONFIG_MEMSTICK_TIFM_MS=m +CONFIG_MEMSTICK_JMICRON_38X=m +CONFIG_MEMSTICK_R592=m +CONFIG_MEMSTICK_REALTEK_PCI=m +CONFIG_MEMSTICK_REALTEK_USB=m +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +# CONFIG_LEDS_CLASS_FLASH is not set +# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set + +# +# LED drivers +# +# CONFIG_LEDS_APU is not set +CONFIG_LEDS_LM3530=m +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_GPIO is not set +CONFIG_LEDS_LP3944=m +# CONFIG_LEDS_LP3952 is not set +CONFIG_LEDS_LP55XX_COMMON=m +CONFIG_LEDS_LP5521=m +CONFIG_LEDS_LP5523=m +CONFIG_LEDS_LP5562=m +# CONFIG_LEDS_LP8501 is not set +CONFIG_LEDS_CLEVO_MAIL=m +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_PWM is not set +# CONFIG_LEDS_BD2802 is not set +CONFIG_LEDS_INTEL_SS4200=m +CONFIG_LEDS_LT3593=m +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_TLC591XX is not set +# CONFIG_LEDS_LM355x is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# +CONFIG_LEDS_BLINKM=m +CONFIG_LEDS_MLXCPLD=m +# CONFIG_LEDS_MLXREG is not set +# CONFIG_LEDS_USER is not set +# CONFIG_LEDS_NIC78BX is not set + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_ONESHOT=m +CONFIG_LEDS_TRIGGER_DISK=y +# CONFIG_LEDS_TRIGGER_MTD is not set +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +# CONFIG_LEDS_TRIGGER_CPU is not set +# CONFIG_LEDS_TRIGGER_ACTIVITY is not set +CONFIG_LEDS_TRIGGER_GPIO=m +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m + +# +# iptables trigger is under Netfilter config (LED target) +# +CONFIG_LEDS_TRIGGER_TRANSIENT=m +CONFIG_LEDS_TRIGGER_CAMERA=m +# CONFIG_LEDS_TRIGGER_PANIC is not set +# CONFIG_LEDS_TRIGGER_NETDEV is not set +# CONFIG_ACCESSIBILITY is not set +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +# CONFIG_INFINIBAND_EXP_LEGACY_VERBS_NEW_UAPI is not set +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +# CONFIG_INFINIBAND_MTHCA is not set +# CONFIG_INFINIBAND_QIB is not set +CONFIG_INFINIBAND_CXGB4=m +CONFIG_INFINIBAND_I40IW=m +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +# CONFIG_INFINIBAND_NES is not set +# CONFIG_INFINIBAND_OCRDMA is not set +CONFIG_INFINIBAND_VMWARE_PVRDMA=m +CONFIG_INFINIBAND_USNIC=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +CONFIG_INFINIBAND_OPA_VNIC=m +CONFIG_INFINIBAND_RDMAVT=m +CONFIG_RDMA_RXE=m +CONFIG_INFINIBAND_HFI1=m +# CONFIG_HFI1_DEBUG_SDMA_ORDER is not set +# CONFIG_SDMA_VERBOSITY is not set +CONFIG_INFINIBAND_QEDR=m +CONFIG_INFINIBAND_BNXT_RE=m +CONFIG_EDAC_ATOMIC_SCRUB=y +CONFIG_EDAC_SUPPORT=y +CONFIG_EDAC=y +CONFIG_EDAC_LEGACY_SYSFS=y +# CONFIG_EDAC_DEBUG is not set +CONFIG_EDAC_DECODE_MCE=m +CONFIG_EDAC_GHES=y +CONFIG_EDAC_AMD64=m +# CONFIG_EDAC_AMD64_ERROR_INJECTION is not set +CONFIG_EDAC_E752X=m +CONFIG_EDAC_I82975X=m +CONFIG_EDAC_I3000=m +CONFIG_EDAC_I3200=m +CONFIG_EDAC_IE31200=m +CONFIG_EDAC_X38=m +CONFIG_EDAC_I5400=m +CONFIG_EDAC_I7CORE=m +CONFIG_EDAC_I5000=m +CONFIG_EDAC_I5100=m +CONFIG_EDAC_I7300=m +CONFIG_EDAC_SBRIDGE=m +CONFIG_EDAC_SKX=m +CONFIG_EDAC_I10NM=m +CONFIG_EDAC_PND2=m +CONFIG_RTC_LIB=y +CONFIG_RTC_MC146818_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_SYSTOHC is not set +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABX80X is not set +CONFIG_RTC_DRV_DS1307=m +# CONFIG_RTC_DRV_DS1307_CENTURY is not set +CONFIG_RTC_DRV_DS1374=m +# CONFIG_RTC_DRV_DS1374_WDT is not set +CONFIG_RTC_DRV_DS1672=m +CONFIG_RTC_DRV_MAX6900=m +CONFIG_RTC_DRV_RS5C372=m +CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_ISL12022=m +CONFIG_RTC_DRV_X1205=m +CONFIG_RTC_DRV_PCF8523=m +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF85363 is not set +CONFIG_RTC_DRV_PCF8563=m +CONFIG_RTC_DRV_PCF8583=m +CONFIG_RTC_DRV_M41T80=m +CONFIG_RTC_DRV_M41T80_WDT=y +CONFIG_RTC_DRV_BQ32K=m +# CONFIG_RTC_DRV_S35390A is not set +CONFIG_RTC_DRV_FM3130=m +# CONFIG_RTC_DRV_RX8010 is not set +CONFIG_RTC_DRV_RX8581=m +CONFIG_RTC_DRV_RX8025=m +CONFIG_RTC_DRV_EM3027=m +# CONFIG_RTC_DRV_RV8803 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T93 is not set +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1302 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1343 is not set +# CONFIG_RTC_DRV_DS1347 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6916 is not set +# CONFIG_RTC_DRV_R9701 is not set +CONFIG_RTC_DRV_RX4581=m +# CONFIG_RTC_DRV_RX6110 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_PCF2123 is not set +# CONFIG_RTC_DRV_MCP795 is not set +CONFIG_RTC_I2C_AND_SPI=y + +# +# SPI and I2C RTC drivers +# +CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_DS3232_HWMON=y +# CONFIG_RTC_DRV_PCF2127 is not set +CONFIG_RTC_DRV_RV3029C2=m +# CONFIG_RTC_DRV_RV3029_HWMON is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_CMOS=y +CONFIG_RTC_DRV_DS1286=m +CONFIG_RTC_DRV_DS1511=m +CONFIG_RTC_DRV_DS1553=m +# CONFIG_RTC_DRV_DS1685_FAMILY is not set +CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_DS2404=m +CONFIG_RTC_DRV_STK17TA8=m +# CONFIG_RTC_DRV_M48T86 is not set +CONFIG_RTC_DRV_M48T35=m +CONFIG_RTC_DRV_M48T59=m +CONFIG_RTC_DRV_MSM6242=m +CONFIG_RTC_DRV_BQ4802=m +CONFIG_RTC_DRV_RP5C01=m +CONFIG_RTC_DRV_V3020=m + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_FTRTC010 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_VIRTUAL_CHANNELS=y +CONFIG_DMA_ACPI=y +# CONFIG_ALTERA_MSGDMA is not set +CONFIG_INTEL_IDMA64=m +CONFIG_INTEL_IOATDMA=m +# CONFIG_QCOM_HIDMA_MGMT is not set +# CONFIG_QCOM_HIDMA is not set +CONFIG_DW_DMAC_CORE=y +CONFIG_DW_DMAC=m +CONFIG_DW_DMAC_PCI=y +CONFIG_HSU_DMA=y + +# +# DMA Clients +# +CONFIG_ASYNC_TX_DMA=y +# CONFIG_DMATEST is not set +CONFIG_DMA_ENGINE_RAID=y + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +CONFIG_DCA=m +# CONFIG_AUXDISPLAY is not set +# CONFIG_PANEL is not set +CONFIG_UIO=m +CONFIG_UIO_CIF=m +CONFIG_UIO_PDRV_GENIRQ=m +# CONFIG_UIO_DMEM_GENIRQ is not set +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=m +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +CONFIG_UIO_HV_GENERIC=m +CONFIG_VFIO_IOMMU_TYPE1=m +CONFIG_VFIO_VIRQFD=m +CONFIG_VFIO=m +CONFIG_VFIO_NOIOMMU=y +CONFIG_VFIO_PCI=m +# CONFIG_VFIO_PCI_VGA is not set +CONFIG_VFIO_PCI_MMAP=y +CONFIG_VFIO_PCI_INTX=y +# CONFIG_VFIO_PCI_IGD is not set +CONFIG_VFIO_MDEV=m +CONFIG_VFIO_MDEV_DEVICE=m +# CONFIG_VFIO_SPIMDEV is not set +CONFIG_IRQ_BYPASS_MANAGER=m +# CONFIG_VIRT_DRIVERS is not set +CONFIG_VIRTIO=y +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_INPUT=m +# CONFIG_VIRTIO_MMIO is not set + +# +# Microsoft Hyper-V guest support +# +CONFIG_HYPERV=m +CONFIG_HYPERV_TSCPAGE=y +CONFIG_HYPERV_UTILS=m +CONFIG_HYPERV_BALLOON=m + +# +# Xen driver support +# +CONFIG_XEN_BALLOON=y +# CONFIG_XEN_SELFBALLOONING is not set +# CONFIG_XEN_BALLOON_MEMORY_HOTPLUG is not set +CONFIG_XEN_SCRUB_PAGES_DEFAULT=y +CONFIG_XEN_DEV_EVTCHN=m +CONFIG_XENFS=m +CONFIG_XEN_COMPAT_XENFS=y +CONFIG_XEN_SYS_HYPERVISOR=y +CONFIG_XEN_XENBUS_FRONTEND=y +# CONFIG_XEN_GNTDEV is not set +# CONFIG_XEN_GRANT_DEV_ALLOC is not set +# CONFIG_XEN_GRANT_DMA_ALLOC is not set +CONFIG_SWIOTLB_XEN=y +CONFIG_XEN_TMEM=m +# CONFIG_XEN_PVCALLS_FRONTEND is not set +CONFIG_XEN_PRIVCMD=m +CONFIG_XEN_EFI=y +CONFIG_XEN_AUTO_XLATE=y +CONFIG_XEN_ACPI=y +# CONFIG_STAGING is not set +CONFIG_X86_PLATFORM_DEVICES=y +CONFIG_ACER_WMI=m +# CONFIG_ACER_WIRELESS is not set +CONFIG_ACERHDF=m +# CONFIG_ALIENWARE_WMI is not set +CONFIG_ASUS_LAPTOP=m +CONFIG_DELL_SMBIOS=m +CONFIG_DELL_SMBIOS_WMI=y +# CONFIG_DELL_SMBIOS_SMM is not set +CONFIG_DELL_LAPTOP=m +CONFIG_DELL_WMI=m +CONFIG_DELL_WMI_DESCRIPTOR=m +CONFIG_DELL_WMI_AIO=m +CONFIG_DELL_WMI_LED=m +CONFIG_DELL_SMO8800=m +CONFIG_DELL_RBTN=m +CONFIG_FUJITSU_LAPTOP=m +CONFIG_FUJITSU_TABLET=m +CONFIG_AMILO_RFKILL=m +# CONFIG_GPD_POCKET_FAN is not set +CONFIG_HP_ACCEL=m +CONFIG_HP_WIRELESS=m +CONFIG_HP_WMI=m +CONFIG_MSI_LAPTOP=m +CONFIG_PANASONIC_LAPTOP=m +CONFIG_COMPAL_LAPTOP=m +CONFIG_SONY_LAPTOP=m +CONFIG_SONYPI_COMPAT=y +CONFIG_IDEAPAD_LAPTOP=m +# CONFIG_SURFACE3_WMI is not set +CONFIG_THINKPAD_ACPI=m +CONFIG_THINKPAD_ACPI_ALSA_SUPPORT=y +# CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set +# CONFIG_THINKPAD_ACPI_DEBUG is not set +# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set +CONFIG_THINKPAD_ACPI_VIDEO=y +CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y +CONFIG_SENSORS_HDAPS=m +# CONFIG_INTEL_MENLOW is not set +CONFIG_EEEPC_LAPTOP=m +CONFIG_ASUS_WMI=m +CONFIG_ASUS_NB_WMI=m +CONFIG_EEEPC_WMI=m +# CONFIG_ASUS_WIRELESS is not set +CONFIG_ACPI_WMI=m +CONFIG_WMI_BMOF=m +CONFIG_INTEL_WMI_THUNDERBOLT=m +CONFIG_MSI_WMI=m +# CONFIG_PEAQ_WMI is not set +CONFIG_TOPSTAR_LAPTOP=m +# CONFIG_ACPI_TOSHIBA is not set +CONFIG_TOSHIBA_BT_RFKILL=m +# CONFIG_TOSHIBA_HAPS is not set +# CONFIG_TOSHIBA_WMI is not set +CONFIG_ACPI_CMPC=m +# CONFIG_INTEL_INT0002_VGPIO is not set +CONFIG_INTEL_HID_EVENT=m +CONFIG_INTEL_VBTN=m +CONFIG_INTEL_IPS=m +CONFIG_INTEL_PMC_CORE=m +# CONFIG_IBM_RTL is not set +CONFIG_SAMSUNG_LAPTOP=m +CONFIG_MXM_WMI=m +CONFIG_INTEL_OAKTRAIL=m +CONFIG_SAMSUNG_Q10=m +CONFIG_APPLE_GMUX=m +CONFIG_INTEL_RST=m +# CONFIG_INTEL_SMARTCONNECT is not set +CONFIG_PVPANIC=y +# CONFIG_INTEL_PMC_IPC is not set +# CONFIG_SURFACE_PRO3_BUTTON is not set +# CONFIG_INTEL_PUNIT_IPC is not set +CONFIG_MLX_PLATFORM=m +CONFIG_INTEL_TURBO_MAX_3=y +# CONFIG_I2C_MULTI_INSTANTIATE is not set +CONFIG_INTEL_SPEED_SELECT_INTERFACE=m +CONFIG_PMC_ATOM=y +# CONFIG_CHROME_PLATFORMS is not set +CONFIG_MELLANOX_PLATFORM=y +CONFIG_MLXREG_HOTPLUG=m +# CONFIG_MLXREG_IO is not set +CONFIG_CLKDEV_LOOKUP=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y + +# +# Common Clock Framework +# +# CONFIG_COMMON_CLK_MAX9485 is not set +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_COMMON_CLK_PWM is not set +CONFIG_HWSPINLOCK=y + +# +# Clock Source drivers +# +CONFIG_CLKEVT_I8253=y +CONFIG_I8253_LOCK=y +CONFIG_CLKBLD_I8253=y +CONFIG_MAILBOX=y +CONFIG_PCC=y +# CONFIG_ALTERA_MBOX is not set +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +# CONFIG_IOMMU_DEBUGFS is not set +CONFIG_IOMMU_DEFAULT_PASSTHROUGH=y +CONFIG_IOMMU_IOVA=y +CONFIG_AMD_IOMMU=y +CONFIG_AMD_IOMMU_V2=m +CONFIG_DMAR_TABLE=y +CONFIG_INTEL_IOMMU=y +# CONFIG_INTEL_IOMMU_SVM is not set +# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set +CONFIG_INTEL_IOMMU_FLOPPY_WA=y +CONFIG_IRQ_REMAP=y + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_QCOM_GLINK_RPM is not set +# CONFIG_RPMSG_VIRTIO is not set +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# + +# +# Broadcom SoC drivers +# + +# +# NXP/Freescale QorIQ SoC drivers +# + +# +# i.MX SoC drivers +# + +# +# Qualcomm SoC drivers +# +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# CONFIG_XILINX_VCU is not set +# CONFIG_PM_DEVFREQ is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set +CONFIG_IIO=m +CONFIG_IIO_BUFFER=y +# CONFIG_IIO_BUFFER_CB is not set +# CONFIG_IIO_BUFFER_HW_CONSUMER is not set +CONFIG_IIO_KFIFO_BUF=m +CONFIG_IIO_TRIGGERED_BUFFER=m +# CONFIG_IIO_CONFIGFS is not set +CONFIG_IIO_TRIGGER=y +CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 +# CONFIG_IIO_SW_DEVICE is not set +# CONFIG_IIO_SW_TRIGGER is not set + +# +# Accelerometers +# +# CONFIG_ADIS16201 is not set +# CONFIG_ADIS16209 is not set +# CONFIG_ADXL345_I2C is not set +# CONFIG_ADXL345_SPI is not set +# CONFIG_BMA180 is not set +# CONFIG_BMA220 is not set +# CONFIG_BMC150_ACCEL is not set +# CONFIG_DA280 is not set +# CONFIG_DA311 is not set +# CONFIG_DMARD09 is not set +# CONFIG_DMARD10 is not set +CONFIG_HID_SENSOR_ACCEL_3D=m +# CONFIG_IIO_CROS_EC_ACCEL_LEGACY is not set +# CONFIG_IIO_ST_ACCEL_3AXIS is not set +# CONFIG_KXSD9 is not set +# CONFIG_KXCJK1013 is not set +# CONFIG_MC3230 is not set +# CONFIG_MMA7455_I2C is not set +# CONFIG_MMA7455_SPI is not set +# CONFIG_MMA7660 is not set +# CONFIG_MMA8452 is not set +# CONFIG_MMA9551 is not set +# CONFIG_MMA9553 is not set +# CONFIG_MXC4005 is not set +# CONFIG_MXC6255 is not set +# CONFIG_SCA3000 is not set +# CONFIG_STK8312 is not set +# CONFIG_STK8BA50 is not set + +# +# Analog to digital converters +# +# CONFIG_AD7266 is not set +# CONFIG_AD7291 is not set +# CONFIG_AD7298 is not set +# CONFIG_AD7476 is not set +# CONFIG_AD7766 is not set +# CONFIG_AD7791 is not set +# CONFIG_AD7793 is not set +# CONFIG_AD7887 is not set +# CONFIG_AD7923 is not set +# CONFIG_AD799X is not set +# CONFIG_HI8435 is not set +# CONFIG_HX711 is not set +# CONFIG_INA2XX_ADC is not set +# CONFIG_LTC2471 is not set +# CONFIG_LTC2485 is not set +# CONFIG_LTC2497 is not set +# CONFIG_MAX1027 is not set +# CONFIG_MAX11100 is not set +# CONFIG_MAX1118 is not set +# CONFIG_MAX1363 is not set +# CONFIG_MAX9611 is not set +# CONFIG_MCP320X is not set +# CONFIG_MCP3422 is not set +# CONFIG_NAU7802 is not set +# CONFIG_TI_ADC081C is not set +# CONFIG_TI_ADC0832 is not set +# CONFIG_TI_ADC084S021 is not set +# CONFIG_TI_ADC12138 is not set +# CONFIG_TI_ADC108S102 is not set +# CONFIG_TI_ADC128S052 is not set +# CONFIG_TI_ADC161S626 is not set +# CONFIG_TI_ADS1015 is not set +# CONFIG_TI_ADS7950 is not set +# CONFIG_TI_TLC4541 is not set +# CONFIG_VIPERBOARD_ADC is not set + +# +# Analog Front Ends +# + +# +# Amplifiers +# +# CONFIG_AD8366 is not set + +# +# Chemical Sensors +# +# CONFIG_ATLAS_PH_SENSOR is not set +# CONFIG_BME680 is not set +# CONFIG_CCS811 is not set +# CONFIG_IAQCORE is not set +# CONFIG_VZ89X is not set + +# +# Hid Sensor IIO Common +# +CONFIG_HID_SENSOR_IIO_COMMON=m +CONFIG_HID_SENSOR_IIO_TRIGGER=m + +# +# SSP Sensor Common +# +# CONFIG_IIO_SSP_SENSORHUB is not set + +# +# Counters +# + +# +# Digital to analog converters +# +# CONFIG_AD5064 is not set +# CONFIG_AD5360 is not set +# CONFIG_AD5380 is not set +# CONFIG_AD5421 is not set +# CONFIG_AD5446 is not set +# CONFIG_AD5449 is not set +# CONFIG_AD5592R is not set +# CONFIG_AD5593R is not set +# CONFIG_AD5504 is not set +# CONFIG_AD5624R_SPI is not set +# CONFIG_LTC2632 is not set +# CONFIG_AD5686_SPI is not set +# CONFIG_AD5696_I2C is not set +# CONFIG_AD5755 is not set +# CONFIG_AD5758 is not set +# CONFIG_AD5761 is not set +# CONFIG_AD5764 is not set +# CONFIG_AD5791 is not set +# CONFIG_AD7303 is not set +# CONFIG_AD8801 is not set +# CONFIG_DS4424 is not set +# CONFIG_M62332 is not set +# CONFIG_MAX517 is not set +# CONFIG_MCP4725 is not set +# CONFIG_MCP4922 is not set +# CONFIG_TI_DAC082S085 is not set +# CONFIG_TI_DAC5571 is not set + +# +# IIO dummy driver +# + +# +# Frequency Synthesizers DDS/PLL +# + +# +# Clock Generator/Distribution +# +# CONFIG_AD9523 is not set + +# +# Phase-Locked Loop (PLL) frequency synthesizers +# +# CONFIG_ADF4350 is not set + +# +# Digital gyroscope sensors +# +# CONFIG_ADIS16080 is not set +# CONFIG_ADIS16130 is not set +# CONFIG_ADIS16136 is not set +# CONFIG_ADIS16260 is not set +# CONFIG_ADXRS450 is not set +# CONFIG_BMG160 is not set +CONFIG_HID_SENSOR_GYRO_3D=m +# CONFIG_MPU3050_I2C is not set +# CONFIG_IIO_ST_GYRO_3AXIS is not set +# CONFIG_ITG3200 is not set + +# +# Health Sensors +# + +# +# Heart Rate Monitors +# +# CONFIG_AFE4403 is not set +# CONFIG_AFE4404 is not set +# CONFIG_MAX30100 is not set +# CONFIG_MAX30102 is not set + +# +# Humidity sensors +# +# CONFIG_AM2315 is not set +# CONFIG_DHT11 is not set +# CONFIG_HDC100X is not set +CONFIG_HID_SENSOR_HUMIDITY=m +# CONFIG_HTS221 is not set +# CONFIG_HTU21 is not set +# CONFIG_SI7005 is not set +# CONFIG_SI7020 is not set + +# +# Inertial measurement units +# +# CONFIG_ADIS16400 is not set +# CONFIG_ADIS16480 is not set +# CONFIG_BMI160_I2C is not set +# CONFIG_BMI160_SPI is not set +# CONFIG_KMX61 is not set +# CONFIG_INV_MPU6050_I2C is not set +# CONFIG_INV_MPU6050_SPI is not set +# CONFIG_IIO_ST_LSM6DSX is not set + +# +# Light sensors +# +# CONFIG_ACPI_ALS is not set +# CONFIG_ADJD_S311 is not set +# CONFIG_AL3320A is not set +# CONFIG_APDS9300 is not set +# CONFIG_APDS9960 is not set +# CONFIG_BH1750 is not set +# CONFIG_BH1780 is not set +# CONFIG_CM32181 is not set +# CONFIG_CM3232 is not set +# CONFIG_CM3323 is not set +# CONFIG_CM36651 is not set +# CONFIG_GP2AP020A00F is not set +# CONFIG_SENSORS_ISL29018 is not set +# CONFIG_SENSORS_ISL29028 is not set +# CONFIG_ISL29125 is not set +CONFIG_HID_SENSOR_ALS=m +CONFIG_HID_SENSOR_PROX=m +# CONFIG_JSA1212 is not set +# CONFIG_RPR0521 is not set +# CONFIG_LTR501 is not set +# CONFIG_LV0104CS is not set +# CONFIG_MAX44000 is not set +# CONFIG_OPT3001 is not set +# CONFIG_PA12203001 is not set +# CONFIG_SI1133 is not set +# CONFIG_SI1145 is not set +# CONFIG_STK3310 is not set +# CONFIG_ST_UVIS25 is not set +# CONFIG_TCS3414 is not set +# CONFIG_TCS3472 is not set +# CONFIG_SENSORS_TSL2563 is not set +# CONFIG_TSL2583 is not set +# CONFIG_TSL2772 is not set +# CONFIG_TSL4531 is not set +# CONFIG_US5182D is not set +# CONFIG_VCNL4000 is not set +# CONFIG_VEML6070 is not set +# CONFIG_VL6180 is not set +# CONFIG_ZOPT2201 is not set + +# +# Magnetometer sensors +# +# CONFIG_AK8975 is not set +# CONFIG_AK09911 is not set +# CONFIG_BMC150_MAGN_I2C is not set +# CONFIG_BMC150_MAGN_SPI is not set +# CONFIG_MAG3110 is not set +CONFIG_HID_SENSOR_MAGNETOMETER_3D=m +# CONFIG_MMC35240 is not set +# CONFIG_IIO_ST_MAGN_3AXIS is not set +# CONFIG_SENSORS_HMC5843_I2C is not set +# CONFIG_SENSORS_HMC5843_SPI is not set + +# +# Multiplexers +# + +# +# Inclinometer sensors +# +CONFIG_HID_SENSOR_INCLINOMETER_3D=m +CONFIG_HID_SENSOR_DEVICE_ROTATION=m + +# +# Triggers - standalone +# +# CONFIG_IIO_INTERRUPT_TRIGGER is not set +# CONFIG_IIO_SYSFS_TRIGGER is not set + +# +# Digital potentiometers +# +# CONFIG_AD5272 is not set +# CONFIG_DS1803 is not set +# CONFIG_MAX5481 is not set +# CONFIG_MAX5487 is not set +# CONFIG_MCP4018 is not set +# CONFIG_MCP4131 is not set +# CONFIG_MCP4531 is not set +# CONFIG_TPL0102 is not set + +# +# Digital potentiostats +# +# CONFIG_LMP91000 is not set + +# +# Pressure sensors +# +# CONFIG_ABP060MG is not set +# CONFIG_BMP280 is not set +CONFIG_HID_SENSOR_PRESS=m +# CONFIG_HP03 is not set +# CONFIG_MPL115_I2C is not set +# CONFIG_MPL115_SPI is not set +# CONFIG_MPL3115 is not set +# CONFIG_MS5611 is not set +# CONFIG_MS5637 is not set +# CONFIG_IIO_ST_PRESS is not set +# CONFIG_T5403 is not set +# CONFIG_HP206C is not set +# CONFIG_ZPA2326 is not set + +# +# Lightning sensors +# +# CONFIG_AS3935 is not set + +# +# Proximity and distance sensors +# +# CONFIG_ISL29501 is not set +# CONFIG_LIDAR_LITE_V2 is not set +# CONFIG_RFD77402 is not set +# CONFIG_SRF04 is not set +# CONFIG_SX9500 is not set +# CONFIG_SRF08 is not set + +# +# Resolver to digital converters +# +# CONFIG_AD2S1200 is not set + +# +# Temperature sensors +# +# CONFIG_MAXIM_THERMOCOUPLE is not set +CONFIG_HID_SENSOR_TEMP=m +# CONFIG_MLX90614 is not set +# CONFIG_MLX90632 is not set +# CONFIG_TMP006 is not set +# CONFIG_TMP007 is not set +# CONFIG_TSYS01 is not set +# CONFIG_TSYS02D is not set +CONFIG_NTB=m +# CONFIG_NTB_AMD is not set +# CONFIG_NTB_IDT is not set +# CONFIG_NTB_INTEL is not set +# CONFIG_NTB_SWITCHTEC is not set +# CONFIG_NTB_PINGPONG is not set +# CONFIG_NTB_TOOL is not set +# CONFIG_NTB_PERF is not set +# CONFIG_NTB_TRANSPORT is not set +# CONFIG_VME_BUS is not set +CONFIG_PWM=y +CONFIG_PWM_SYSFS=y +CONFIG_PWM_LPSS=m +CONFIG_PWM_LPSS_PCI=m +CONFIG_PWM_LPSS_PLATFORM=m +# CONFIG_PWM_PCA9685 is not set + +# +# IRQ chip support +# +CONFIG_ARM_GIC_MAX_NR=1 +# CONFIG_IPACK_BUS is not set +# CONFIG_RESET_CONTROLLER is not set +# CONFIG_FMC is not set + +# +# PHY Subsystem +# +# CONFIG_GENERIC_PHY is not set +# CONFIG_BCM_KONA_USB2_PHY is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_CPCAP_USB is not set +CONFIG_POWERCAP=y +CONFIG_INTEL_RAPL=m +# CONFIG_IDLE_INJECT is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +CONFIG_RAS=y +CONFIG_RAS_CEC=y +CONFIG_THUNDERBOLT=y + +# +# Android +# +# CONFIG_ANDROID is not set +CONFIG_LIBNVDIMM=m +CONFIG_BLK_DEV_PMEM=m +CONFIG_ND_BLK=m +CONFIG_ND_CLAIM=y +CONFIG_ND_BTT=m +CONFIG_BTT=y +CONFIG_ND_PFN=m +CONFIG_NVDIMM_PFN=y +CONFIG_NVDIMM_DAX=y +CONFIG_DAX_DRIVER=y +CONFIG_DAX=y +CONFIG_DEV_DAX=m +CONFIG_DEV_DAX_PMEM=m +CONFIG_DEV_DAX_KMEM=m +CONFIG_NVMEM=y + +# +# HW tracing support +# +CONFIG_STM=m +CONFIG_STM_DUMMY=m +CONFIG_STM_SOURCE_CONSOLE=m +CONFIG_STM_SOURCE_HEARTBEAT=m +CONFIG_STM_SOURCE_FTRACE=m +CONFIG_INTEL_TH=m +CONFIG_INTEL_TH_PCI=m +CONFIG_INTEL_TH_ACPI=m +CONFIG_INTEL_TH_GTH=m +CONFIG_INTEL_TH_STH=m +CONFIG_INTEL_TH_MSU=m +CONFIG_INTEL_TH_PTI=m +# CONFIG_FPGA is not set +# CONFIG_UNISYS_VISORBUS is not set +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +CONFIG_FS_IOMAP=y +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=m +CONFIG_EXT4_USE_FOR_EXT2=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_ENCRYPTION is not set +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=m +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=m +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_XFS_FS=m +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set +# CONFIG_XFS_ONLINE_SCRUB is not set +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set +CONFIG_GFS2_FS=m +CONFIG_GFS2_FS_LOCKING_DLM=y +# CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +CONFIG_FS_DAX=y +CONFIG_FS_DAX_PMD=y +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FILE_LOCKING=y +# CONFIG_MANDATORY_FILE_LOCKING is not set +# CONFIG_FS_ENCRYPTION is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_PRINT_QUOTA_WARNING=y +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=y +# CONFIG_QFMT_V1 is not set +CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y +CONFIG_QUOTACTL_COMPAT=y +CONFIG_AUTOFS4_FS=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_OVERLAY_FS=m +# CONFIG_OVERLAY_FS_REDIRECT_DIR is not set +# CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW is not set +# CONFIG_OVERLAY_FS_INDEX is not set +# CONFIG_OVERLAY_FS_XINO_AUTO is not set +# CONFIG_OVERLAY_FS_METACOPY is not set + +# +# Caches +# +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_HISTOGRAM is not set +# CONFIG_FSCACHE_DEBUG is not set +# CONFIG_FSCACHE_OBJECT_LIST is not set +CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_HISTOGRAM is not set + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="ascii" +# CONFIG_FAT_DEFAULT_UTF8 is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y +# CONFIG_PROC_VMCORE_DEVICE_DUMP is not set +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_MEMFD_CREATE=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_FS is not set +# CONFIG_UBIFS_FS is not set +CONFIG_CRAMFS=m +CONFIG_CRAMFS_BLOCKDEV=y +# CONFIG_CRAMFS_MTD is not set +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_FILE_CACHE=y +# CONFIG_SQUASHFS_FILE_DIRECT is not set +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +# CONFIG_SQUASHFS_LZ4 is not set +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +CONFIG_PSTORE=y +CONFIG_PSTORE_DEFLATE_COMPRESS=y +# CONFIG_PSTORE_LZO_COMPRESS is not set +# CONFIG_PSTORE_LZ4_COMPRESS is not set +# CONFIG_PSTORE_LZ4HC_COMPRESS is not set +# CONFIG_PSTORE_842_COMPRESS is not set +# CONFIG_PSTORE_ZSTD_COMPRESS is not set +CONFIG_PSTORE_COMPRESS=y +CONFIG_PSTORE_DEFLATE_COMPRESS_DEFAULT=y +CONFIG_PSTORE_COMPRESS_DEFAULT="deflate" +# CONFIG_PSTORE_CONSOLE is not set +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +CONFIG_PSTORE_RAM=m +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +# CONFIG_NFS_V2 is not set +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +# CONFIG_NFS_SWAP is not set +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_V4_SECURITY_LABEL=y +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DEBUG=y +CONFIG_NFSD=m +CONFIG_NFSD_V2_ACL=y +CONFIG_NFSD_V3=y +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_PNFS=y +# CONFIG_NFSD_BLOCKLAYOUT is not set +CONFIG_NFSD_SCSILAYOUT=y +# CONFIG_NFSD_FLEXFILELAYOUT is not set +CONFIG_NFSD_V4_SECURITY_LABEL=y +# CONFIG_NFSD_FAULT_INJECTION is not set +CONFIG_GRACE_PERIOD=m +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=m +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_RPCSEC_GSS_KRB5=m +CONFIG_SUNRPC_DEBUG=y +CONFIG_SUNRPC_XPRT_RDMA=m +CONFIG_CEPH_FS=m +# CONFIG_CEPH_FSCACHE is not set +CONFIG_CEPH_FS_POSIX_ACL=y +CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_ACL=y +CONFIG_CIFS_DEBUG=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set +CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CIFS_SMB_DIRECT is not set +# CONFIG_CIFS_FSCACHE is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=m +CONFIG_DLM=m +CONFIG_DLM_DEBUG=y + +# +# Security options +# +CONFIG_KEYS=y +CONFIG_KEYS_COMPAT=y +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_BIG_KEYS=y +CONFIG_TRUSTED_KEYS=y +CONFIG_ENCRYPTED_KEYS=y +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +CONFIG_SECURITY_WRITABLE_HOOKS=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_PAGE_TABLE_ISOLATION=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +# CONFIG_SECURITY_PATH is not set +CONFIG_INTEL_TXT=y +CONFIG_LSM_MMAP_MIN_ADDR=65535 +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y +CONFIG_HARDENED_USERCOPY=y +CONFIG_HARDENED_USERCOPY_FALLBACK=y +CONFIG_FORTIFY_SOURCE=y +# CONFIG_STATIC_USERMODEHELPER is not set +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1 +CONFIG_SECURITY_SELINUX_DISABLE=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_SECURITY_LOADPIN is not set +CONFIG_SECURITY_YAMA=y +CONFIG_INTEGRITY=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_TRUSTED_KEYRING=y +CONFIG_INTEGRITY_AUDIT=y +CONFIG_IMA=y +CONFIG_IMA_MEASURE_PCR_IDX=10 +CONFIG_IMA_LSM_RULES=y +# CONFIG_IMA_TEMPLATE is not set +CONFIG_IMA_NG_TEMPLATE=y +# CONFIG_IMA_SIG_TEMPLATE is not set +CONFIG_IMA_DEFAULT_TEMPLATE="ima-ng" +CONFIG_IMA_DEFAULT_HASH_SHA1=y +# CONFIG_IMA_DEFAULT_HASH_SHA256 is not set +CONFIG_IMA_DEFAULT_HASH="sha1" +# CONFIG_IMA_WRITE_POLICY is not set +# CONFIG_IMA_READ_POLICY is not set +CONFIG_IMA_APPRAISE=y +# CONFIG_IMA_APPRAISE_BUILD_POLICY is not set +CONFIG_IMA_APPRAISE_BOOTPARAM=y +CONFIG_IMA_TRUSTED_KEYRING=y +# CONFIG_IMA_BLACKLIST_KEYRING is not set +# CONFIG_IMA_LOAD_X509 is not set +CONFIG_EVM=y +CONFIG_EVM_ATTR_FSUUID=y +# CONFIG_EVM_ADD_XATTRS is not set +# CONFIG_EVM_LOAD_X509 is not set +CONFIG_DEFAULT_SECURITY_SELINUX=y +# CONFIG_DEFAULT_SECURITY_DAC is not set +CONFIG_DEFAULT_SECURITY="selinux" +CONFIG_XOR_BLOCKS=m +CONFIG_ASYNC_CORE=m +CONFIG_ASYNC_MEMCPY=m +CONFIG_ASYNC_XOR=m +CONFIG_ASYNC_PQ=m +CONFIG_ASYNC_RAID6_RECOV=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=m +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=m +CONFIG_CRYPTO_ECDH=m +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_GF128MUL=y +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_CRYPTO_CRYPTD=y +CONFIG_CRYPTO_MCRYPTD=m +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_SIMD=y +CONFIG_CRYPTO_GLUE_HELPER_X86=y +CONFIG_CRYPTO_ENGINE=m + +# +# Authenticated Encryption with Associated Data +# +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_CHACHA20POLY1305=m +# CONFIG_CRYPTO_AEGIS128 is not set +# CONFIG_CRYPTO_AEGIS128L is not set +# CONFIG_CRYPTO_AEGIS256 is not set +# CONFIG_CRYPTO_AEGIS128_AESNI_SSE2 is not set +# CONFIG_CRYPTO_AEGIS128L_AESNI_SSE2 is not set +# CONFIG_CRYPTO_AEGIS256_AESNI_SSE2 is not set +# CONFIG_CRYPTO_MORUS640 is not set +# CONFIG_CRYPTO_MORUS640_SSE2 is not set +# CONFIG_CRYPTO_MORUS1280 is not set +# CONFIG_CRYPTO_MORUS1280_SSE2 is not set +# CONFIG_CRYPTO_MORUS1280_AVX2 is not set +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=m + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CFB is not set +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=m +# CONFIG_CRYPTO_KEYWRAP is not set + +# +# Hash modes +# +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_VMAC=m + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32C_INTEL=m +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRC32_PCLMUL=m +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_CRCT10DIF_PCLMUL=m +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_POLY1305=m +CONFIG_CRYPTO_POLY1305_X86_64=m +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD128=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_RMD256=m +CONFIG_CRYPTO_RMD320=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA1_SSSE3=y +CONFIG_CRYPTO_SHA256_SSSE3=y +CONFIG_CRYPTO_SHA512_SSSE3=m +CONFIG_CRYPTO_SHA1_MB=m +CONFIG_CRYPTO_SHA256_MB=m +CONFIG_CRYPTO_SHA512_MB=m +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=m +CONFIG_CRYPTO_SHA3=m +# CONFIG_CRYPTO_SM3 is not set +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_AES_X86_64=y +CONFIG_CRYPTO_AES_NI_INTEL=y +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_BLOWFISH_X86_64=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAMELLIA_X86_64=m +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64=m +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST5_AVX_X86_64=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_CAST6_AVX_X86_64=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_DES3_EDE_X86_64=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SALSA20=m +CONFIG_CRYPTO_CHACHA20=m +CONFIG_CRYPTO_CHACHA20_X86_64=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m +CONFIG_CRYPTO_SERPENT_AVX_X86_64=m +CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m +# CONFIG_CRYPTO_SM4 is not set +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m +CONFIG_CRYPTO_TWOFISH_X86_64=m +CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m +CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_842 is not set +# CONFIG_CRYPTO_LZ4 is not set +# CONFIG_CRYPTO_LZ4HC is not set +# CONFIG_CRYPTO_ZSTD is not set + +# +# Random Number Generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +CONFIG_CRYPTO_USER_API=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_HASH_INFO=y +CONFIG_CRYPTO_HW=y +CONFIG_CRYPTO_DEV_PADLOCK=m +CONFIG_CRYPTO_DEV_PADLOCK_AES=m +CONFIG_CRYPTO_DEV_PADLOCK_SHA=m +CONFIG_CRYPTO_DEV_CCP=y +CONFIG_CRYPTO_DEV_CCP_DD=m +CONFIG_CRYPTO_DEV_SP_CCP=y +CONFIG_CRYPTO_DEV_CCP_CRYPTO=m +CONFIG_CRYPTO_DEV_SP_PSP=y +CONFIG_CRYPTO_DEV_QAT=m +CONFIG_CRYPTO_DEV_QAT_DH895xCC=m +CONFIG_CRYPTO_DEV_QAT_C3XXX=m +CONFIG_CRYPTO_DEV_QAT_C62X=m +CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m +CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m +CONFIG_CRYPTO_DEV_QAT_C62XVF=m +CONFIG_CRYPTO_DEV_NITROX=m +CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m +CONFIG_CRYPTO_DEV_CHELSIO=m +CONFIG_CHELSIO_IPSEC_INLINE=y +# CONFIG_CRYPTO_DEV_CHELSIO_TLS is not set +CONFIG_CRYPTO_DEV_VIRTIO=m +# CONFIG_CRYPTO_DEV_HISILICON is not set +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +CONFIG_PKCS7_MESSAGE_PARSER=y +# CONFIG_PKCS7_TEST_KEY is not set +CONFIG_SIGNED_PE_FILE_VERIFICATION=y + +# +# Certificates for signature checking +# +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set +# CONFIG_SECONDARY_TRUSTED_KEYRING is not set +CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=m +CONFIG_BITREVERSE=y +CONFIG_RATIONAL=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_GENERIC_FIND_FIRST_BIT=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=m +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +# CONFIG_CRC64 is not set +# CONFIG_CRC4 is not set +CONFIG_CRC7=m +CONFIG_LIBCRC32C=m +CONFIG_CRC8=m +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMPRESS=y +CONFIG_ZSTD_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=m +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_BTREE=y +CONFIG_INTERVAL_TREE=y +CONFIG_RADIX_TREE_MULTIORDER=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_DMA_DIRECT_OPS=y +CONFIG_DMA_VIRT_OPS=y +CONFIG_SWIOTLB=y +CONFIG_SGL_ALLOC=y +CONFIG_CHECK_SIGNATURE=y +CONFIG_CPUMASK_OFFSTACK=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_CLZ_TAB=y +CONFIG_CORDIC=m +# CONFIG_DDR is not set +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_SIGNATURE=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_POOL=y +CONFIG_ARCH_HAS_SG_CHAIN=y +CONFIG_ARCH_HAS_PMEM_API=y +CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y +CONFIG_ARCH_HAS_UACCESS_MCSAFE=y +CONFIG_SBITMAP=y +CONFIG_PARMAN=m +# CONFIG_STRING_SELFTEST is not set + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +CONFIG_DEBUG_INFO_DWARF4=y +# CONFIG_GDB_SCRIPTS is not set +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=2048 +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_PAGE_OWNER is not set +CONFIG_DEBUG_FS=y +CONFIG_HEADERS_CHECK=y +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_STACK_VALIDATION=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_DEBUG_KERNEL=y + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +# CONFIG_DEBUG_RODATA_TEST is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_SLUB_STATS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_VM is not set +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_DEBUG_STACKOVERFLOW=y +CONFIG_DEBUG_STACKOVERFLOW=y +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_KASAN=y +# CONFIG_KASAN_OUTLINE is not set +CONFIG_KASAN_INLINE=y +# CONFIG_TEST_KASAN is not set +CONFIG_ARCH_HAS_KCOV=y +CONFIG_DEBUG_SHIRQ=y + +# +# Debug Lockups and Hangs +# +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y +CONFIG_HARDLOCKUP_DETECTOR=y +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=1 +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +# CONFIG_WQ_WATCHDOG is not set +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=0 +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +# CONFIG_SCHED_STACK_END_CHECK is not set +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PI_LIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_RCU_PERF_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +CONFIG_FUNCTION_ERROR_INJECTION=y +# CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_FENTRY=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_RING_BUFFER_ALLOW_SWAP=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_PREEMPTIRQ_EVENTS is not set +# CONFIG_IRQSOFF_TRACER is not set +CONFIG_SCHED_TRACER=y +CONFIG_HWLAT_TRACER=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_TRACER_SNAPSHOT=y +# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +CONFIG_STACK_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_KPROBE_EVENTS=y +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_PROBE_EVENTS=y +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_FUNCTION_PROFILER=y +# CONFIG_BPF_KPROBE_OVERRIDE is not set +CONFIG_FTRACE_MCOUNT_RECORD=y +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_MMIOTRACE is not set +CONFIG_TRACING_MAP=y +CONFIG_HIST_TRIGGERS=y +# CONFIG_TRACEPOINT_BENCHMARK is not set +CONFIG_RING_BUFFER_BENCHMARK=m +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_TRACE_EVAL_MAP_FILE is not set +# CONFIG_TRACING_EVENTS_GPIO is not set +CONFIG_PROVIDE_OHCI1394_DMA_INIT=y +# CONFIG_DMA_API_DEBUG is not set +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_LKDTM is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_TEST_SORT is not set +# CONFIG_KPROBES_SANITY_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +CONFIG_ATOMIC64_SELFTEST=y +CONFIG_ASYNC_RAID6_TEST=m +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_TEST_STRING_HELPERS is not set +CONFIG_TEST_KSTRTOX=y +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_BITFIELD is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_OVERFLOW is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_HASH is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_PARMAN is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_USER_COPY is not set +# CONFIG_TEST_BPF is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_TEST_FREE_PAGES is not set +# CONFIG_MEMTEST is not set +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_KGDB=y +CONFIG_KGDB_SERIAL_CONSOLE=y +CONFIG_KGDB_TESTS=y +# CONFIG_KGDB_TESTS_ON_BOOT is not set +CONFIG_KGDB_LOW_LEVEL_TRAP=y +CONFIG_KGDB_KDB=y +CONFIG_KDB_DEFAULT_ENABLE=0x0 +CONFIG_KDB_KEYBOARD=y +CONFIG_KDB_CONTINUE_CATASTROPHIC=0 +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +CONFIG_UBSAN=y +CONFIG_UBSAN_SANITIZE_ALL=y +# CONFIG_UBSAN_ALIGNMENT is not set +# CONFIG_TEST_UBSAN is not set +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y +CONFIG_STRICT_DEVMEM=y +# CONFIG_IO_STRICT_DEVMEM is not set +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_EARLY_PRINTK_USB=y +# CONFIG_X86_VERBOSE_BOOTUP is not set +CONFIG_EARLY_PRINTK=y +CONFIG_EARLY_PRINTK_DBGP=y +CONFIG_EARLY_PRINTK_EFI=y +CONFIG_EARLY_PRINTK_USB_XDBC=y +# CONFIG_X86_PTDUMP is not set +# CONFIG_EFI_PGT_DUMP is not set +# CONFIG_DEBUG_WX is not set +CONFIG_DOUBLEFAULT=y +# CONFIG_DEBUG_TLBFLUSH is not set +CONFIG_HAVE_MMIOTRACE_SUPPORT=y +CONFIG_X86_DECODER_SELFTEST=y +CONFIG_IO_DELAY_TYPE_0X80=0 +CONFIG_IO_DELAY_TYPE_0XED=1 +CONFIG_IO_DELAY_TYPE_UDELAY=2 +CONFIG_IO_DELAY_TYPE_NONE=3 +CONFIG_IO_DELAY_0X80=y +# CONFIG_IO_DELAY_0XED is not set +# CONFIG_IO_DELAY_UDELAY is not set +# CONFIG_IO_DELAY_NONE is not set +CONFIG_DEFAULT_IO_DELAY_TYPE=0 +CONFIG_DEBUG_BOOT_PARAMS=y +# CONFIG_CPA_DEBUG is not set +CONFIG_OPTIMIZE_INLINING=y +# CONFIG_DEBUG_ENTRY is not set +# CONFIG_DEBUG_NMI_SELFTEST is not set +# CONFIG_X86_DEBUG_FPU is not set +# CONFIG_PUNIT_ATOM_DEBUG is not set +CONFIG_UNWINDER_ORC=y +# CONFIG_UNWINDER_FRAME_POINTER is not set +CONFIG_ETMEM_SCAN=m +CONFIG_ETMEM_SWAP=m diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig index 0eb9f92f37179516637d1722bd12522e9060d231..7aab9c4f86a9f6730227fe6d0b709b2cd478c7a0 100644 --- a/arch/x86/configs/i386_defconfig +++ b/arch/x86/configs/i386_defconfig @@ -217,7 +217,6 @@ CONFIG_FB_MODE_HELPERS=y CONFIG_FB_TILEBLITTING=y CONFIG_FB_EFI=y # CONFIG_LCD_CLASS_DEVICE is not set -CONFIG_VGACON_SOFT_SCROLLBACK=y CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..7993f0f3e7a42a4da7fd5a5af5258f793f928e52 --- /dev/null +++ b/arch/x86/configs/openeuler_defconfig @@ -0,0 +1,7592 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/x86 4.19.90 Kernel Configuration +# +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_TABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_BZIP2=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +CONFIG_HAVE_KERNEL_LZ4=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_BZIP2 is not set +# CONFIG_KERNEL_LZMA is not set +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZO is not set +# CONFIG_KERNEL_LZ4 is not set +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y +CONFIG_AUDIT_WATCH=y +CONFIG_AUDIT_TREE=y +# CONFIG_KTASK is not set + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_PENDING_IRQ=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_GENERIC_IRQ_INJECTION=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GENERIC_MSI_IRQ_DOMAIN=y +CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y +CONFIG_GENERIC_IRQ_RESERVATION_MODE=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +CONFIG_CLOCKSOURCE_WATCHDOG=y +CONFIG_ARCH_CLOCKSOURCE_DATA=y +CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y +CONFIG_GENERIC_CMOS_UPDATE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +# CONFIG_NO_HZ_IDLE is not set +CONFIG_NO_HZ_FULL=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PREEMPT is not set + +# +# CPU/Task time and stats accounting +# +CONFIG_VIRT_CPU_ACCOUNTING=y +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_SCHED_AVG_IRQ=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_SRCU=y +CONFIG_TREE_SRCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +CONFIG_CONTEXT_TRACKING=y +# CONFIG_CONTEXT_TRACKING_FORCE is not set +CONFIG_RCU_NOCB_CPU=y +CONFIG_BUILD_BIN2C=y +# CONFIG_IKCONFIG is not set +CONFIG_LOG_BUF_SHIFT=20 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 +CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y +CONFIG_ARCH_SUPPORTS_INT128=y +CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_MEMCG_SWAP_ENABLED=y +CONFIG_MEMCG_KMEM=y +# CONFIG_MEMCG_MEMFS_INFO is not set +CONFIG_BLK_CGROUP=y +# CONFIG_DEBUG_BLK_CGROUP is not set +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_QOS_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_QOS_SCHED_DYNAMIC_AFFINITY=y +# CONFIG_QOS_SCHED_SMART_GRID is not set +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +# CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_CGROUP_FILES=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_SCHED_STEAL=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_HAVE_PCSPKR_PLATFORM=y +CONFIG_BPF=y +# CONFIG_EXPERT is not set +CONFIG_UID16=y +CONFIG_MULTIUSER=y +CONFIG_SGETMASK_SYSCALL=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_PRINTK_NMI=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_PCSPKR_PLATFORM=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_IO_URING=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT_ALWAYS_ON=y +# CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set +CONFIG_USERFAULTFD=y +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y +CONFIG_RSEQ=y +# CONFIG_EMBEDDED is not set +CONFIG_HAVE_PERF_EVENTS=y + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_SLUB_DEBUG=y +# CONFIG_COMPAT_BRK is not set +# CONFIG_SLAB is not set +CONFIG_SLUB=y +CONFIG_SLAB_MERGE_DEFAULT=y +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SLAB_FREELIST_HARDENED is not set +CONFIG_SLUB_CPU_PARTIAL=y +CONFIG_SYSTEM_DATA_VERIFICATION=y +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y +CONFIG_64BIT=y +CONFIG_X86_64=y +CONFIG_X86=y +CONFIG_INSTRUCTION_DECODER=y +CONFIG_OUTPUT_FORMAT="elf64-x86-64" +CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig" +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_MMU=y +CONFIG_ARCH_MMAP_RND_BITS_MIN=28 +CONFIG_ARCH_MMAP_RND_BITS_MAX=32 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_GENERIC_ISA_DMA=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_ARCH_MAY_HAVE_PC_FDC=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_ARCH_HAS_CPU_RELAX=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_ARCH_HAS_FILTER_PGPROT=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_ARCH_WANT_GENERAL_HUGETLB=y +CONFIG_ZONE_DMA32=y +CONFIG_AUDIT_ARCH=y +CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_HAVE_INTEL_TXT=y +CONFIG_X86_64_SMP=y +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_DYNAMIC_PHYSICAL_MASK=y +CONFIG_PGTABLE_LEVELS=5 + +# +# Processor type and features +# +CONFIG_ZONE_DMA=y +CONFIG_SMP=y +CONFIG_X86_FEATURE_NAMES=y +CONFIG_X86_X2APIC=y +CONFIG_X86_MPPARSE=y +# CONFIG_GOLDFISH is not set +CONFIG_RETPOLINE=y +# CONFIG_INTEL_RDT is not set +CONFIG_X86_EXTENDED_PLATFORM=y +# CONFIG_X86_NUMACHIP is not set +# CONFIG_X86_VSMP is not set +CONFIG_X86_UV=y +# CONFIG_X86_GOLDFISH is not set +# CONFIG_X86_INTEL_MID is not set +CONFIG_X86_INTEL_LPSS=y +CONFIG_X86_AMD_PLATFORM_DEVICE=y +CONFIG_IOSF_MBI=y +# CONFIG_IOSF_MBI_DEBUG is not set +CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y +CONFIG_SCHED_OMIT_FRAME_POINTER=y +CONFIG_HYPERVISOR_GUEST=y +CONFIG_PARAVIRT=y +# CONFIG_PARAVIRT_DEBUG is not set +CONFIG_PARAVIRT_SPINLOCKS=y +# CONFIG_QUEUED_LOCK_STAT is not set +CONFIG_XEN=y +# CONFIG_XEN_PV is not set +CONFIG_XEN_PVHVM=y +CONFIG_XEN_PVHVM_SMP=y +CONFIG_XEN_SAVE_RESTORE=y +# CONFIG_XEN_DEBUG_FS is not set +# CONFIG_XEN_PVH is not set +CONFIG_KVM_GUEST=y +CONFIG_ARCH_CPUIDLE_HALTPOLL=y +# CONFIG_KVM_DEBUG_FS is not set +CONFIG_PARAVIRT_TIME_ACCOUNTING=y +CONFIG_PARAVIRT_CLOCK=y +# CONFIG_JAILHOUSE_GUEST is not set +CONFIG_NO_BOOTMEM=y +# CONFIG_MK8 is not set +# CONFIG_MPSC is not set +# CONFIG_MCORE2 is not set +# CONFIG_MATOM is not set +CONFIG_GENERIC_CPU=y +CONFIG_X86_INTERNODE_CACHE_SHIFT=6 +CONFIG_X86_L1_CACHE_SHIFT=6 +CONFIG_X86_TSC=y +CONFIG_X86_CMPXCHG64=y +CONFIG_X86_CMOV=y +CONFIG_X86_MINIMUM_CPU_FAMILY=64 +CONFIG_X86_DEBUGCTLMSR=y +CONFIG_CPU_SUP_INTEL=y +CONFIG_CPU_SUP_AMD=y +CONFIG_CPU_SUP_HYGON=y +CONFIG_CPU_SUP_CENTAUR=y +CONFIG_CPU_SUP_ZHAOXIN=y +CONFIG_HPET_TIMER=y +CONFIG_HPET_EMULATE_RTC=y +CONFIG_DMI=y +# CONFIG_GART_IOMMU is not set +# CONFIG_CALGARY_IOMMU is not set +CONFIG_MAXSMP=y +CONFIG_NR_CPUS_RANGE_BEGIN=8192 +CONFIG_NR_CPUS_RANGE_END=8192 +CONFIG_NR_CPUS_DEFAULT=8192 +CONFIG_NR_CPUS=8192 +CONFIG_SCHED_SMT=y +CONFIG_SCHED_MC=y +CONFIG_SCHED_MC_PRIO=y +CONFIG_X86_LOCAL_APIC=y +CONFIG_X86_IO_APIC=y +CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y +CONFIG_X86_MCE=y +CONFIG_X86_MCELOG_LEGACY=y +CONFIG_X86_MCE_INTEL=y +CONFIG_X86_MCE_AMD=y +CONFIG_X86_MCE_THRESHOLD=y +CONFIG_X86_MCE_INJECT=m +CONFIG_X86_THERMAL_VECTOR=y + +# +# Performance monitoring +# +CONFIG_PERF_EVENTS_INTEL_UNCORE=m +CONFIG_PERF_EVENTS_INTEL_RAPL=m +CONFIG_PERF_EVENTS_INTEL_CSTATE=m +CONFIG_PERF_EVENTS_AMD_POWER=m +CONFIG_X86_16BIT=y +CONFIG_X86_ESPFIX64=y +CONFIG_X86_VSYSCALL_EMULATION=y +CONFIG_I8K=m +CONFIG_MICROCODE=y +CONFIG_MICROCODE_INTEL=y +CONFIG_MICROCODE_AMD=y +CONFIG_MICROCODE_HYGON=y +CONFIG_MICROCODE_OLD_INTERFACE=y +CONFIG_X86_MSR=y +CONFIG_X86_CPUID=y +CONFIG_X86_5LEVEL=y +CONFIG_X86_DIRECT_GBPAGES=y +CONFIG_ARCH_HAS_MEM_ENCRYPT=y +CONFIG_AMD_MEM_ENCRYPT=y +# CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT is not set +CONFIG_NUMA=y +# CONFIG_NUMA_AWARE_SPINLOCKS is not set +CONFIG_AMD_NUMA=y +CONFIG_X86_64_ACPI_NUMA=y +CONFIG_NODES_SPAN_OTHER_NODES=y +CONFIG_NUMA_EMU=y +CONFIG_NODES_SHIFT=10 +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +# CONFIG_ARCH_MEMORY_PROBE is not set +CONFIG_ARCH_PROC_KCORE_TEXT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_X86_PMEM_LEGACY_DEVICE=y +CONFIG_X86_PMEM_LEGACY=m +CONFIG_X86_CHECK_BIOS_CORRUPTION=y +# CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK is not set +CONFIG_X86_RESERVE_LOW=64 +CONFIG_MTRR=y +CONFIG_MTRR_SANITIZER=y +CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1 +CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 +CONFIG_X86_PAT=y +CONFIG_ARCH_USES_PG_UNCACHED=y +CONFIG_ARCH_RANDOM=y +CONFIG_X86_SMAP=y +CONFIG_X86_UMIP=y +# CONFIG_X86_INTEL_MPX is not set +CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y +CONFIG_X86_INTEL_TSX_MODE_OFF=y +# CONFIG_X86_INTEL_TSX_MODE_ON is not set +# CONFIG_X86_INTEL_TSX_MODE_AUTO is not set +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_EFI_MIXED=y +CONFIG_SECCOMP=y +# CONFIG_HZ_100 is not set +# CONFIG_HZ_250 is not set +# CONFIG_HZ_300 is not set +CONFIG_HZ_1000=y +CONFIG_HZ=1000 +CONFIG_SCHED_HRTICK=y +CONFIG_KEXEC=y +CONFIG_KEXEC_FILE=y +CONFIG_ARCH_HAS_KEXEC_PURGATORY=y +CONFIG_KEXEC_VERIFY_SIG=y +CONFIG_KEXEC_BZIMAGE_VERIFY_SIG=y +CONFIG_CRASH_DUMP=y +CONFIG_KEXEC_JUMP=y +CONFIG_PHYSICAL_START=0x1000000 +CONFIG_RELOCATABLE=y +CONFIG_RANDOMIZE_BASE=y +CONFIG_X86_NEED_RELOCS=y +CONFIG_PHYSICAL_ALIGN=0x200000 +CONFIG_DYNAMIC_MEMORY_LAYOUT=y +CONFIG_RANDOMIZE_MEMORY=y +CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING=0xa +CONFIG_HOTPLUG_CPU=y +CONFIG_BOOTPARAM_HOTPLUG_CPU0=y +# CONFIG_DEBUG_HOTPLUG_CPU0 is not set +# CONFIG_COMPAT_VDSO is not set +CONFIG_LEGACY_VSYSCALL_EMULATE=y +# CONFIG_LEGACY_VSYSCALL_NONE is not set +# CONFIG_CMDLINE_BOOL is not set +CONFIG_MODIFY_LDT_SYSCALL=y +CONFIG_HAVE_LIVEPATCH_FTRACE=y +CONFIG_HAVE_LIVEPATCH_WO_FTRACE=y + +# +# Enable Livepatch +# +CONFIG_LIVEPATCH=y +# CONFIG_LIVEPATCH_FTRACE is not set +CONFIG_LIVEPATCH_WO_FTRACE=y +CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY=y +# CONFIG_LIVEPATCH_STACK is not set +CONFIG_LIVEPATCH_RESTRICT_KPROBE=y +CONFIG_ARCH_HAS_ADD_PAGES=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y +# CONFIG_GDS_FORCE_MITIGATION is not set +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y + +# +# Power management and ACPI options +# +CONFIG_ARCH_HIBERNATION_HEADER=y +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HIBERNATE_CALLBACKS=y +CONFIG_HIBERNATION=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +CONFIG_PM_DEBUG=y +# CONFIG_PM_ADVANCED_DEBUG is not set +# CONFIG_PM_TEST_SUSPEND is not set +CONFIG_PM_SLEEP_DEBUG=y +# CONFIG_PM_TRACE_RTC is not set +CONFIG_PM_CLK=y +CONFIG_PM_GENERIC_DOMAINS=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +CONFIG_PM_GENERIC_DOMAINS_SLEEP=y +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y +CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y +CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y +# CONFIG_ACPI_DEBUGGER is not set +CONFIG_ACPI_SPCR_TABLE=y +CONFIG_ACPI_LPIT=y +CONFIG_ACPI_SLEEP=y +# CONFIG_ACPI_PROCFS_POWER is not set +CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y +CONFIG_ACPI_EC_DEBUGFS=m +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_VIDEO=m +CONFIG_ACPI_FAN=y +CONFIG_ACPI_TAD=m +CONFIG_ACPI_DOCK=y +CONFIG_ACPI_CPU_FREQ_PSS=y +CONFIG_ACPI_PROCESSOR_CSTATE=y +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_CPPC_LIB=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_PROCESSOR_AGGREGATOR=m +CONFIG_ACPI_THERMAL=y +CONFIG_ACPI_NUMA=y +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +# CONFIG_ACPI_DEBUG is not set +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_CONTAINER=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_ACPI_HOTPLUG_IOAPIC=y +CONFIG_ACPI_SBS=m +CONFIG_ACPI_HED=y +# CONFIG_ACPI_CUSTOM_METHOD is not set +CONFIG_ACPI_BGRT=y +CONFIG_ACPI_NFIT=m +CONFIG_ACPI_HMAT=y +CONFIG_HAVE_ACPI_APEI=y +CONFIG_HAVE_ACPI_APEI_NMI=y +CONFIG_ACPI_APEI=y +CONFIG_ACPI_APEI_GHES=y +CONFIG_ACPI_APEI_PCIEAER=y +CONFIG_ACPI_APEI_MEMORY_FAILURE=y +CONFIG_ACPI_APEI_EINJ=m +# CONFIG_ACPI_APEI_ERST_DEBUG is not set +CONFIG_DPTF_POWER=m +CONFIG_ACPI_WATCHDOG=y +CONFIG_ACPI_EXTLOG=m +CONFIG_ACPI_ADXL=y +CONFIG_PMIC_OPREGION=y +# CONFIG_ACPI_CONFIGFS is not set +CONFIG_X86_PM_TIMER=y +CONFIG_SFI=y + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set + +# +# CPU frequency scaling drivers +# +CONFIG_X86_INTEL_PSTATE=y +# CONFIG_X86_PCC_CPUFREQ is not set +CONFIG_X86_ACPI_CPUFREQ=m +CONFIG_X86_ACPI_CPUFREQ_CPB=y +CONFIG_X86_POWERNOW_K8=m +CONFIG_X86_AMD_FREQ_SENSITIVITY=m +# CONFIG_X86_SPEEDSTEP_CENTRINO is not set +CONFIG_X86_P4_CLOCKMOD=m + +# +# shared options +# +CONFIG_X86_SPEEDSTEP_LIB=m + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +# CONFIG_CPU_IDLE_GOV_LADDER is not set +CONFIG_CPU_IDLE_GOV_MENU=y +CONFIG_CPU_IDLE_GOV_HALTPOLL=y +CONFIG_HALTPOLL_CPUIDLE=y +CONFIG_INTEL_IDLE=y + +# +# Bus options (PCI etc.) +# +CONFIG_PCI=y +CONFIG_PCI_DIRECT=y +CONFIG_PCI_MMCONFIG=y +CONFIG_PCI_XEN=y +CONFIG_PCI_DOMAINS=y +CONFIG_MMCONF_FAM10H=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIEASPM=y +# CONFIG_PCIEASPM_DEBUG is not set +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +CONFIG_PCIE_DPC=y +# CONFIG_PCIE_PTM is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_IRQ_DOMAIN=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +CONFIG_PCI_STUB=y +CONFIG_PCI_PF_STUB=m +# CONFIG_XEN_PCIDEV_FRONTEND is not set +CONFIG_PCI_ATS=y +CONFIG_PCI_LOCKLESS_CONFIG=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +CONFIG_PCI_LABEL=y +CONFIG_PCI_HYPERV=m +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_ACPI_IBM=m +# CONFIG_HOTPLUG_PCI_CPCI is not set +CONFIG_HOTPLUG_PCI_SHPC=y + +# +# PCI controller drivers +# + +# +# Cadence PCIe controllers support +# +CONFIG_VMD=y + +# +# DesignWare PCI Core Support +# +# CONFIG_PCIE_DW_PLAT_HOST is not set +# CONFIG_HISILICON_PCIE_CAE is not set + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +CONFIG_ISA_DMA_API=y +CONFIG_AMD_NB=y +CONFIG_PCCARD=y +# CONFIG_PCMCIA is not set +CONFIG_CARDBUS=y + +# +# PC-card bridges +# +CONFIG_YENTA=m +CONFIG_YENTA_O2=y +CONFIG_YENTA_RICOH=y +CONFIG_YENTA_TI=y +CONFIG_YENTA_ENE_TUNE=y +CONFIG_YENTA_TOSHIBA=y +# CONFIG_RAPIDIO is not set +# CONFIG_X86_SYSFB is not set + +# +# Binary Emulations +# +CONFIG_IA32_EMULATION=y +# CONFIG_X86_X32 is not set +CONFIG_COMPAT_32=y +CONFIG_COMPAT=y +CONFIG_COMPAT_FOR_U64_ALIGNMENT=y +CONFIG_SYSVIPC_COMPAT=y +CONFIG_X86_DEV_DMA_OPS=y +CONFIG_HAVE_GENERIC_GUP=y + +# +# Firmware Drivers +# +CONFIG_EDD=m +# CONFIG_EDD_OFF is not set +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DELL_RBU=m +CONFIG_DCDBAS=m +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=y +CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y +CONFIG_ISCSI_IBFT_FIND=y +CONFIG_ISCSI_IBFT=m +CONFIG_FW_CFG_SYSFS=y +# CONFIG_FW_CFG_SYSFS_CMDLINE is not set +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +# CONFIG_EFI_VARS is not set +CONFIG_EFI_ESRT=y +CONFIG_EFI_RUNTIME_MAP=y +# CONFIG_EFI_FAKE_MEMMAP is not set +CONFIG_EFI_RUNTIME_WRAPPERS=y +# CONFIG_EFI_CAPSULE_LOADER is not set +# CONFIG_EFI_TEST is not set +CONFIG_APPLE_PROPERTIES=y +# CONFIG_RESET_ATTACK_MITIGATION is not set +CONFIG_UEFI_CPER=y +CONFIG_UEFI_CPER_X86=y +CONFIG_EFI_DEV_PATH_PARSER=y + +# +# Tegra firmware driver +# +CONFIG_HAVE_KVM=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_KVM_ASYNC_PF=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_KVM_COMPAT=y +CONFIG_HAVE_KVM_IRQ_BYPASS=y +CONFIG_HAVE_KVM_NO_POLL=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=m +CONFIG_KVM_INTEL=m +CONFIG_KVM_AMD=m +CONFIG_KVM_AMD_SEV=y +CONFIG_KVM_MMU_AUDIT=y +CONFIG_VHOST_NET=m +# CONFIG_VHOST_SCSI is not set +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set + +# +# General architecture-dependent options +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +# CONFIG_QUICK_KEXEC is not set +CONFIG_HOTPLUG_SMT=y +CONFIG_OPROFILE=m +CONFIG_OPROFILE_EVENT_MULTIPLEX=y +CONFIG_HAVE_OPROFILE=y +CONFIG_OPROFILE_NMI_TIMER=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +CONFIG_OPTPROBES=y +CONFIG_KPROBES_ON_FTRACE=y +CONFIG_UPROBES=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_KRETPROBES=y +CONFIG_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_OPTPROBES=y +CONFIG_HAVE_KPROBES_ON_FTRACE=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y +CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y +CONFIG_HAVE_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_PERF_EVENTS_NMI=y +CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_RCU_TABLE_FREE=y +CONFIG_HAVE_RCU_TABLE_INVALIDATE=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP_FILTER=y +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_HAVE_ARCH_SOFT_DIRTY=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_HAVE_EXIT_THREAD=y +CONFIG_ARCH_MMAP_RND_BITS=28 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8 +CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y +CONFIG_HAVE_COPY_THREAD_TLS=y +CONFIG_HAVE_STACK_VALIDATION=y +CONFIG_HAVE_RELIABLE_STACKTRACE=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_ARCH_HAS_REFCOUNT=y +# CONFIG_REFCOUNT_FULL is not set +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y +CONFIG_ARCH_USE_MEMREMAP_PROT=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +CONFIG_PLUGIN_HOSTCC="" +CONFIG_HAVE_GCC_PLUGINS=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_FORCE is not set +CONFIG_MODULE_SIG_ALL=y +# CONFIG_MODULE_SIG_SHA1 is not set +# CONFIG_MODULE_SIG_SHA224 is not set +CONFIG_MODULE_SIG_SHA256=y +# CONFIG_MODULE_SIG_SHA384 is not set +# CONFIG_MODULE_SIG_SHA512 is not set +CONFIG_MODULE_SIG_HASH="sha256" +# CONFIG_MODULE_COMPRESS is not set +# CONFIG_TRIM_UNUSED_KSYMS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLK_SCSI_REQUEST=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +# CONFIG_BLK_DEV_ZONED is not set +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +# CONFIG_BLK_CMDLINE_PARSER is not set +CONFIG_BLK_WBT=y +# CONFIG_BLK_CGROUP_IOLATENCY is not set +# CONFIG_BLK_WBT_SQ is not set +CONFIG_BLK_WBT_MQ=y +CONFIG_BLK_DEBUG_FS=y +# CONFIG_BLK_SED_OPAL is not set +# CONFIG_BLK_BIO_DISPATCH_ASYNC is not set +# CONFIG_BLK_IO_HIERARCHY_STATS is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +CONFIG_OSF_PARTITION=y +CONFIG_AMIGA_PARTITION=y +# CONFIG_ATARI_PARTITION is not set +CONFIG_MAC_PARTITION=y +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +# CONFIG_LDM_PARTITION is not set +CONFIG_SGI_PARTITION=y +# CONFIG_ULTRIX_PARTITION is not set +CONFIG_SUN_PARTITION=y +CONFIG_KARMA_PARTITION=y +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +CONFIG_BLOCK_COMPAT=y +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_MQ_RDMA=y + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_CFQ_GROUP_IOSCHED=y +# CONFIG_DEFAULT_DEADLINE is not set +CONFIG_DEFAULT_CFQ=y +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="cfq" +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y + +# +# Memory Management options +# +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_NEED_MULTIPLE_NODES=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +CONFIG_ARCH_DISCARD_MEMBLOCK=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_HAVE_BOOTMEM_INFO_NODE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_SPARSE=y +# CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE is not set +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_BOUNCE=y +CONFIG_VIRT_TO_BUS=y +CONFIG_MM_OWNER=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +CONFIG_MEMORY_FAILURE=y +CONFIG_HWPOISON_INJECT=m +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_ARCH_WANTS_THP_SWAP=y +CONFIG_THP_SWAP=y +CONFIG_TRANSPARENT_HUGE_PAGECACHE=y +CONFIG_CLEANCACHE=y +CONFIG_FRONTSWAP=y +CONFIG_SHRINK_PAGECACHE=y +CONFIG_USERSWAP=y +CONFIG_MEMCG_QOS=y +# CONFIG_CMA is not set +CONFIG_MEM_SOFT_DIRTY=y +CONFIG_ZSWAP=y +CONFIG_ZPOOL=y +CONFIG_ZBUD=y +# CONFIG_Z3FOLD is not set +CONFIG_ZSMALLOC=y +# CONFIG_PGTABLE_MAPPING is not set +CONFIG_ZSMALLOC_STAT=y +CONFIG_GENERIC_EARLY_IOREMAP=y +CONFIG_DEFERRED_STRUCT_PAGE_INIT=y +CONFIG_IDLE_PAGE_TRACKING=y +CONFIG_ARCH_HAS_ZONE_DEVICE=y +CONFIG_ZONE_DEVICE=y +CONFIG_ARCH_HAS_HMM=y +CONFIG_MIGRATE_VMA_HELPER=y +CONFIG_DEV_PAGEMAP_OPS=y +CONFIG_HMM=y +CONFIG_HMM_MIRROR=y +CONFIG_DEVICE_PRIVATE=y +CONFIG_DEVICE_PUBLIC=y +CONFIG_FRAME_VECTOR=y +CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y +CONFIG_ARCH_HAS_PKEYS=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_BENCHMARK is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +# CONFIG_CLEAR_FREELIST_PAGE is not set +CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_SCM=y +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_XFRM=y +CONFIG_XFRM_OFFLOAD=y +CONFIG_XFRM_ALGO=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +# CONFIG_SMC is not set +CONFIG_XDP_SOCKETS=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +# CONFIG_IP_PNP is not set +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE_COMMON=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=m +# CONFIG_NET_FOU is not set +# CONFIG_NET_FOU_IP_TUNNELS is not set +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_XFRM_MODE_TRANSPORT=m +CONFIG_INET_XFRM_MODE_TUNNEL=m +CONFIG_INET_XFRM_MODE_BEET=m +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +# CONFIG_INET_DIAG_DESTROY is not set +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +# CONFIG_TCP_CONG_CDG is not set +CONFIG_TCP_CONG_BBR=m +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_TCP_COMP=y +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +# CONFIG_IPV6_ILA is not set +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +CONFIG_INET6_XFRM_MODE_TRANSPORT=m +CONFIG_INET6_XFRM_MODE_TUNNEL=m +CONFIG_INET6_XFRM_MODE_BEET=m +CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +CONFIG_IPV6_MULTIPLE_TABLES=y +# CONFIG_IPV6_SUBTREES is not set +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +# CONFIG_IPV6_SEG6_LWTUNNEL is not set +# CONFIG_IPV6_SEG6_HMAC is not set +CONFIG_NETLABEL=y +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +# CONFIG_NETFILTER_NETLINK_ACCT is not set +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_COMMON=m +CONFIG_NF_LOG_NETDEV=m +CONFIG_NETFILTER_CONNCOUNT=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=m +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_NEEDED=y +CONFIG_NF_NAT_PROTO_DCCP=y +CONFIG_NF_NAT_PROTO_UDPLITE=y +CONFIG_NF_NAT_PROTO_SCTP=y +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_SET=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +# CONFIG_NFT_TUNNEL is not set +CONFIG_NFT_OBJREF=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m +CONFIG_NFT_FIB_INET=m +# CONFIG_NFT_SOCKET is not set +# CONFIG_NFT_OSF is not set +# CONFIG_NFT_TPROXY is not set +CONFIG_NF_DUP_NETDEV=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +# CONFIG_NF_FLOW_TABLE is not set +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +# CONFIG_NETFILTER_XT_TARGET_LED is not set +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +# CONFIG_NETFILTER_XT_MATCH_L2TP is not set +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +# CONFIG_NETFILTER_XT_MATCH_NFACCT is not set +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +# CONFIG_NETFILTER_XT_MATCH_TIME is not set +# CONFIG_NETFILTER_XT_MATCH_U32 is not set +CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +# CONFIG_IP_VS_DEBUG is not set +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +# CONFIG_IP_VS_MH is not set +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_DUP_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_IPV4=m +CONFIG_NF_NAT_MASQUERADE_IPV4=y +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NFT_MASQ_IPV4=m +CONFIG_NFT_REDIR_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PROTO_GRE=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +# CONFIG_IP_NF_TARGET_CLUSTERIP is not set +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TPROXY_IPV6=m +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m +CONFIG_NFT_MASQ_IPV6=m +CONFIG_NFT_REDIR_IPV6=m +CONFIG_NFT_REJECT_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m +CONFIG_NF_NAT_IPV6=m +CONFIG_NF_NAT_MASQUERADE_IPV6=y +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +# CONFIG_IP6_NF_MATCH_SRH is not set +# CONFIG_IP6_NF_TARGET_HL is not set +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_DEFRAG_IPV6=m +CONFIG_NF_TABLES_BRIDGE=y +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_NF_LOG_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +# CONFIG_BPFILTER is not set +# CONFIG_IP_DCCP is not set +CONFIG_IP_SCTP=m +# CONFIG_SCTP_DBG_OBJCNT is not set +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_INET_SCTP_DIAG=m +# CONFIG_RDS is not set +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_IB=y +CONFIG_TIPC_MEDIA_UDP=y +CONFIG_TIPC_DIAG=m +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +# CONFIG_ATM_CLIP_NO_ICMP is not set +CONFIG_ATM_LANE=m +# CONFIG_ATM_MPOA is not set +CONFIG_ATM_BR2684=m +# CONFIG_ATM_BR2684_IPFILTER is not set +CONFIG_L2TP=m +CONFIG_L2TP_DEBUGFS=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_MRP=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_HAVE_NET_DSA=y +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_LLC=m +# CONFIG_LLC2 is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_DEBUGFS is not set +# CONFIG_6LOWPAN_NHC is not set +CONFIG_IEEE802154=m +# CONFIG_IEEE802154_NL802154_EXPERIMENTAL is not set +CONFIG_IEEE802154_SOCKET=m +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_ATM=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +# CONFIG_NET_SCH_CBS is not set +# CONFIG_NET_SCH_ETF is not set +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +# CONFIG_NET_SCH_SKBPRIO is not set +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=y +# CONFIG_NET_SCH_CAKE is not set +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_DEFAULT=y +# CONFIG_DEFAULT_FQ is not set +# CONFIG_DEFAULT_CODEL is not set +CONFIG_DEFAULT_FQ_CODEL=y +# CONFIG_DEFAULT_SFQ is not set +# CONFIG_DEFAULT_PFIFO_FAST is not set +CONFIG_DEFAULT_NET_SCH="fq_codel" + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +# CONFIG_NET_EMATCH_CANID is not set +CONFIG_NET_EMATCH_IPSET=m +# CONFIG_NET_EMATCH_IPT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +# CONFIG_NET_ACT_IPT is not set +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +# CONFIG_NET_ACT_CONNMARK is not set +CONFIG_NET_ACT_SKBMOD=m +# CONFIG_NET_ACT_IFE is not set +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_CLS_IND=y +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=m +# CONFIG_BATMAN_ADV is not set +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_OPENVSWITCH_GENEVE=m +CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VMWARE_VMCI_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS_COMMON=m +CONFIG_HYPERV_VSOCKETS=m +CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=y +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_NET_NSH=y +# CONFIG_HSR is not set +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_L3_MASTER_DEV=y +# CONFIG_NET_NCSI is not set +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_JIT=y +CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +CONFIG_NET_DROP_MONITOR=y +# CONFIG_HAMRADIO is not set +CONFIG_CAN=m +CONFIG_CAN_RAW=m +CONFIG_CAN_BCM=m +CONFIG_CAN_GW=m +# CONFIG_CAN_J1939 is not set + +# +# CAN Device Drivers +# +CONFIG_CAN_VCAN=m +# CONFIG_CAN_VXCAN is not set +CONFIG_CAN_SLCAN=m +CONFIG_CAN_DEV=m +CONFIG_CAN_CALC_BITTIMING=y +CONFIG_CAN_C_CAN=m +CONFIG_CAN_C_CAN_PLATFORM=m +CONFIG_CAN_C_CAN_PCI=m +CONFIG_CAN_CC770=m +# CONFIG_CAN_CC770_ISA is not set +CONFIG_CAN_CC770_PLATFORM=m +# CONFIG_CAN_IFI_CANFD is not set +# CONFIG_CAN_M_CAN is not set +# CONFIG_CAN_PEAK_PCIEFD is not set +# CONFIG_CAN_PHYTIUM is not set +CONFIG_CAN_SJA1000=m +# CONFIG_CAN_SJA1000_ISA is not set +CONFIG_CAN_SJA1000_PLATFORM=m +CONFIG_CAN_EMS_PCI=m +CONFIG_CAN_PEAK_PCI=m +CONFIG_CAN_PEAK_PCIEC=y +CONFIG_CAN_KVASER_PCI=m +CONFIG_CAN_PLX_PCI=m +CONFIG_CAN_SOFTING=m + +# +# CAN SPI interfaces +# +# CONFIG_CAN_HI311X is not set +# CONFIG_CAN_MCP251X is not set + +# +# CAN USB interfaces +# +CONFIG_CAN_8DEV_USB=m +CONFIG_CAN_EMS_USB=m +CONFIG_CAN_ESD_USB2=m +# CONFIG_CAN_GS_USB is not set +CONFIG_CAN_KVASER_USB=m +# CONFIG_CAN_MCBA_USB is not set +CONFIG_CAN_PEAK_USB=m +# CONFIG_CAN_UCAN is not set +# CONFIG_CAN_DEBUG_DEVICES is not set +CONFIG_BT=m +CONFIG_BT_BREDR=y +CONFIG_BT_RFCOMM=m +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=m +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_CMTP=m +CONFIG_BT_HIDP=m +CONFIG_BT_HS=y +CONFIG_BT_LE=y +# CONFIG_BT_6LOWPAN is not set +# CONFIG_BT_LEDS is not set +# CONFIG_BT_SELFTEST is not set +CONFIG_BT_DEBUGFS=y + +# +# Bluetooth device drivers +# +CONFIG_BT_INTEL=m +CONFIG_BT_BCM=m +CONFIG_BT_RTL=m +CONFIG_BT_HCIBTUSB=m +CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y +CONFIG_BT_HCIBTUSB_BCM=y +CONFIG_BT_HCIBTUSB_RTL=y +CONFIG_BT_HCIBTSDIO=m +CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_H4=y +CONFIG_BT_HCIUART_BCSP=y +CONFIG_BT_HCIUART_ATH3K=y +# CONFIG_BT_HCIUART_INTEL is not set +# CONFIG_BT_HCIUART_AG6XX is not set +# CONFIG_BT_HCIUART_MRVL is not set +CONFIG_BT_HCIBCM203X=m +CONFIG_BT_HCIBPA10X=m +CONFIG_BT_HCIBFUSB=m +CONFIG_BT_HCIVHCI=m +CONFIG_BT_MRVL=m +CONFIG_BT_MRVL_SDIO=m +CONFIG_BT_ATH3K=m +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_STREAM_PARSER=y +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_CFG80211=m +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y +CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +CONFIG_CFG80211_CRDA_SUPPORT=y +# CONFIG_CFG80211_WEXT is not set +CONFIG_MAC80211=m +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_MINSTREL_HT=y +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +# CONFIG_MAC80211_MESH is not set +CONFIG_MAC80211_LEDS=y +CONFIG_MAC80211_DEBUGFS=y +# CONFIG_MAC80211_MESSAGE_TRACING is not set +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +# CONFIG_WIMAX is not set +CONFIG_RFKILL=m +CONFIG_RFKILL_LEDS=y +CONFIG_RFKILL_INPUT=y +# CONFIG_RFKILL_GPIO is not set +CONFIG_NET_9P=m +CONFIG_NET_9P_VIRTIO=m +# CONFIG_NET_9P_XEN is not set +# CONFIG_NET_9P_RDMA is not set +# CONFIG_NET_9P_DEBUG is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +# CONFIG_NFC is not set +CONFIG_PSAMPLE=m +# CONFIG_NET_IFE is not set +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_SOCK_VALIDATE_XMIT=y +CONFIG_NET_DEVLINK=y +CONFIG_MAY_USE_DEVLINK=y +CONFIG_PAGE_POOL=y +CONFIG_FAILOVER=m +CONFIG_HAVE_EBPF_JIT=y + +# +# Device Drivers +# + +# +# Generic Driver Options +# +# CONFIG_UEVENT_HELPER is not set +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_EXTRA_FIRMWARE="" +CONFIG_FW_LOADER_USER_HELPER=y +# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set +CONFIG_WANT_DEV_COREDUMP=y +CONFIG_ALLOW_DEV_COREDUMP=y +CONFIG_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +CONFIG_HMEM_REPORTING=y +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_SYS_HYPERVISOR=y +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=y +CONFIG_REGMAP_SPI=y +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set + +# +# Bus devices +# +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y +# CONFIG_GNSS is not set +CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +# CONFIG_MTD_AR7_PARTS is not set + +# +# Partition parsers +# + +# +# User Modules And Translation Layers +# +CONFIG_MTD_BLKDEVS=m +CONFIG_MTD_BLOCK=m +# CONFIG_MTD_BLOCK_RO is not set +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# CONFIG_MTD_ONENAND is not set +# CONFIG_MTD_NAND is not set +# CONFIG_MTD_SPI_NAND is not set + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# CONFIG_MTD_SPI_NOR is not set +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_LIMIT=20 +# CONFIG_MTD_UBI_FASTMAP is not set +# CONFIG_MTD_UBI_GLUEBI is not set +# CONFIG_MTD_UBI_BLOCK is not set +# CONFIG_OF is not set +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y +CONFIG_PARPORT=m +CONFIG_PARPORT_PC=m +CONFIG_PARPORT_SERIAL=m +# CONFIG_PARPORT_PC_FIFO is not set +# CONFIG_PARPORT_PC_SUPERIO is not set +# CONFIG_PARPORT_AX88796 is not set +CONFIG_PARPORT_1284=y +CONFIG_PARPORT_NOT_PC=y +CONFIG_PNP=y +# CONFIG_PNP_DEBUG_MESSAGES is not set + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_NULL_BLK=m +# CONFIG_BLK_DEV_FD is not set +CONFIG_CDROM=m +# CONFIG_PARIDE is not set +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +CONFIG_ZRAM=m +CONFIG_ZRAM_WRITEBACK=y +# CONFIG_ZRAM_MEMORY_TRACKING is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_DRBD is not set +CONFIG_BLK_DEV_NBD=m +# CONFIG_BLK_DEV_SKD is not set +# CONFIG_BLK_DEV_SX8 is not set +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=16384 +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_XEN_BLKDEV_FRONTEND=m +CONFIG_VIRTIO_BLK=m +# CONFIG_VIRTIO_BLK_SCSI is not set +CONFIG_BLK_DEV_RBD=m +# CONFIG_BLK_DEV_RSXX is not set + +# +# NVME Support +# +CONFIG_NVME_CORE=m +CONFIG_BLK_DEV_NVME=m +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_FABRICS=m +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=m +CONFIG_NVME_TARGET=m +CONFIG_NVME_TARGET_LOOP=m +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=m +CONFIG_NVME_TARGET_FCLOOP=m + +# +# Misc devices +# +CONFIG_SENSORS_LIS3LV02D=m +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_IBM_ASM is not set +# CONFIG_PHANTOM is not set +CONFIG_SGI_IOC4=m +CONFIG_TIFM_CORE=m +CONFIG_TIFM_7XX1=m +# CONFIG_ICS932S401 is not set +CONFIG_ENCLOSURE_SERVICES=m +CONFIG_SGI_XP=m +CONFIG_HP_ILO=m +CONFIG_SGI_GRU=m +# CONFIG_SGI_GRU_DEBUG is not set +CONFIG_APDS9802ALS=m +CONFIG_ISL29003=m +CONFIG_ISL29020=m +CONFIG_SENSORS_TSL2550=m +CONFIG_SENSORS_BH1770=m +CONFIG_SENSORS_APDS990X=m +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +CONFIG_VMWARE_BALLOON=m +# CONFIG_USB_SWITCH_FSA9480 is not set +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +CONFIG_MISC_RTSX=m +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_MAX6875=m +CONFIG_EEPROM_93CX6=m +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +CONFIG_CB710_CORE=m +# CONFIG_CB710_DEBUG is not set +CONFIG_CB710_DEBUG_ASSUMPTIONS=y + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +CONFIG_SENSORS_LIS3_I2C=m +CONFIG_ALTERA_STAPL=m +CONFIG_INTEL_MEI=m +CONFIG_INTEL_MEI_ME=m +# CONFIG_INTEL_MEI_TXE is not set +CONFIG_VMWARE_VMCI=m + +# +# Intel MIC & related support +# + +# +# Intel MIC Bus Driver +# +# CONFIG_INTEL_MIC_BUS is not set + +# +# SCIF Bus Driver +# +# CONFIG_SCIF_BUS is not set + +# +# VOP Bus Driver +# +# CONFIG_VOP_BUS is not set + +# +# Intel MIC Host Driver +# + +# +# Intel MIC Card Driver +# + +# +# SCIF Driver +# + +# +# Intel MIC Coprocessor State Management (COSM) Drivers +# + +# +# VOP Driver +# +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +CONFIG_MISC_RTSX_PCI=m +CONFIG_MISC_RTSX_USB=m +# CONFIG_UACCE is not set +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=m +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_MQ_DEFAULT=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=m +CONFIG_CHR_DEV_ST=m +# CONFIG_CHR_DEV_OSST is not set +CONFIG_BLK_DEV_SR=m +CONFIG_BLK_DEV_SR_VENDOR=y +CONFIG_CHR_DEV_SG=m +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=m +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=m +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=m +# CONFIG_SCSI_CXGB3_ISCSI is not set +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_SCSI_BNX2X_FCOE=m +CONFIG_BE2ISCSI=m +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +CONFIG_SCSI_HPSA=m +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +CONFIG_SCSI_AACRAID=m +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +# CONFIG_SCSI_MVSAS is not set +# CONFIG_SCSI_MVUMI is not set +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +# CONFIG_MEGARAID_NEWGEN is not set +# CONFIG_MEGARAID_LEGACY is not set +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_3SNIC_SSSRAID=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_SMARTPQI=m +CONFIG_SCSI_HISI_RAID=m +# CONFIG_SCSI_UFSHCD is not set +CONFIG_RAMAXEL_SPRAID=m +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_BUSLOGIC is not set +CONFIG_VMWARE_PVSCSI=m +# CONFIG_XEN_SCSI_FRONTEND is not set +CONFIG_HYPERV_STORAGE=m +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +CONFIG_FCOE=m +CONFIG_FCOE_FNIC=m +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_GDTH is not set +# CONFIG_SCSI_ISCI is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_PPA is not set +# CONFIG_SCSI_IMM is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set +# CONFIG_SCSI_QLOGIC_1280 is not set +CONFIG_SCSI_QLA_FC=m +# CONFIG_TCM_QLA2XXX is not set +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_QEDI=m +CONFIG_QEDF=m +CONFIG_SCSI_HUAWEI_FC=m +CONFIG_SCSI_FC_HIFC=m +CONFIG_SCSI_LPFC=m +# CONFIG_SCSI_LPFC_DEBUG_FS is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +CONFIG_SCSI_DEBUG=m +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_BFA_FC is not set +CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_CHELSIO_FCOE=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +# CONFIG_SCSI_OSD_INITIATOR is not set +CONFIG_ATA=m +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=m +CONFIG_SATA_MOBILE_LPM_POLICY=0 +CONFIG_SATA_AHCI_PLATFORM=m +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +CONFIG_ATA_PIIX=m +# CONFIG_SATA_DWC is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +# CONFIG_SATA_SIL is not set +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set +CONFIG_SATA_ZHAOXIN=m + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_RZ1000 is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_PATA_ACPI is not set +CONFIG_ATA_GENERIC=m +# CONFIG_PATA_LEGACY is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +# CONFIG_MD_MULTIPATH is not set +CONFIG_MD_FAULTY=m +# CONFIG_MD_CLUSTER is not set +# CONFIG_BCACHE is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=m +# CONFIG_DM_MQ_DEFAULT is not set +CONFIG_DM_DEBUG=y +CONFIG_DM_BUFIO=m +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +# CONFIG_DM_UNSTRIPED is not set +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m +CONFIG_DM_WRITECACHE=m +CONFIG_DM_ERA=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set +# CONFIG_DM_VERITY_FEC is not set +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +# CONFIG_TCM_FC is not set +CONFIG_ISCSI_TARGET=m +CONFIG_ISCSI_TARGET_CXGB4=m +# CONFIG_SBP_TARGET is not set +CONFIG_FUSION=y +CONFIG_FUSION_SPI=m +# CONFIG_FUSION_FC is not set +CONFIG_FUSION_SAS=m +CONFIG_FUSION_MAX_SGE=128 +# CONFIG_FUSION_CTL is not set +CONFIG_FUSION_LOGGING=y + +# +# IEEE 1394 (FireWire) support +# +CONFIG_FIREWIRE=m +CONFIG_FIREWIRE_OHCI=m +CONFIG_FIREWIRE_SBP2=m +CONFIG_FIREWIRE_NET=m +# CONFIG_FIREWIRE_NOSY is not set +CONFIG_MACINTOSH_DRIVERS=y +CONFIG_MAC_EMUMOUSEBTN=y +CONFIG_NETDEVICES=y +CONFIG_MII=m +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +# CONFIG_EQUALIZER is not set +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +# CONFIG_GTP is not set +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_NETPOLL=y +CONFIG_NET_POLL_CONTROLLER=y +CONFIG_TUN=m +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ARCNET is not set +# CONFIG_ATM_DRIVERS is not set + +# +# CAIF transport drivers +# + +# +# Distributed Switch Architecture drivers +# +CONFIG_ETHERNET=y +CONFIG_MDIO=m +# CONFIG_NET_VENDOR_3COM is not set +CONFIG_NET_VENDOR_3SNIC=y +CONFIG_SSSNIC=m +CONFIG_SSSNIC_HW=m +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set +CONFIG_NET_VENDOR_AMAZON=y +CONFIG_ENA_ETHERNET=m +CONFIG_NET_VENDOR_AMD=y +# CONFIG_AMD8111_ETH is not set +# CONFIG_PCNET32 is not set +CONFIG_AMD_XGBE=m +# CONFIG_AMD_XGBE_DCB is not set +CONFIG_AMD_XGBE_HAVE_ECC=y +CONFIG_NET_VENDOR_AQUANTIA=y +CONFIG_AQTION=m +# CONFIG_NET_VENDOR_ARC is not set +CONFIG_NET_VENDOR_ATHEROS=y +CONFIG_ATL2=m +CONFIG_ATL1=m +CONFIG_ATL1E=m +CONFIG_ATL1C=m +CONFIG_ALX=m +# CONFIG_NET_VENDOR_AURORA is not set +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +CONFIG_BNX2=m +CONFIG_CNIC=m +CONFIG_TIGON3=m +CONFIG_TIGON3_HWMON=y +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set +CONFIG_BNXT=m +CONFIG_BNXT_SRIOV=y +CONFIG_BNXT_FLOWER_OFFLOAD=y +CONFIG_BNXT_DCB=y +CONFIG_BNXT_HWMON=y +CONFIG_NET_VENDOR_BROCADE=y +# CONFIG_BNA is not set +CONFIG_NET_VENDOR_CADENCE=y +# CONFIG_MACB is not set +CONFIG_NET_VENDOR_CAVIUM=y +# CONFIG_THUNDER_NIC_PF is not set +# CONFIG_THUNDER_NIC_VF is not set +# CONFIG_THUNDER_NIC_BGX is not set +# CONFIG_THUNDER_NIC_RGX is not set +CONFIG_CAVIUM_PTP=y +CONFIG_LIQUIDIO=m +CONFIG_LIQUIDIO_VF=m +CONFIG_NET_VENDOR_CHELSIO=y +# CONFIG_CHELSIO_T1 is not set +# CONFIG_CHELSIO_T3 is not set +CONFIG_CHELSIO_T4=m +# CONFIG_CHELSIO_T4_DCB is not set +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_LIB=m +CONFIG_NET_VENDOR_CISCO=y +CONFIG_ENIC=m +# CONFIG_NET_VENDOR_CORTINA is not set +# CONFIG_CX_ECAT is not set +CONFIG_DNET=m +CONFIG_NET_VENDOR_DEC=y +# CONFIG_NET_TULIP is not set +CONFIG_NET_VENDOR_DLINK=y +CONFIG_DL2K=m +# CONFIG_SUNDANCE is not set +CONFIG_NET_VENDOR_EMULEX=y +CONFIG_BE2NET=m +CONFIG_BE2NET_HWMON=y +# CONFIG_BE2NET_BE2 is not set +# CONFIG_BE2NET_BE3 is not set +CONFIG_BE2NET_LANCER=y +CONFIG_BE2NET_SKYHAWK=y +# CONFIG_NET_VENDOR_EZCHIP is not set +# CONFIG_NET_VENDOR_HP is not set +CONFIG_NET_VENDOR_HUAWEI=y +CONFIG_HINIC=m +CONFIG_BMA=m +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_E1000E_HWTS=y +CONFIG_IGB=m +CONFIG_IGB_HWMON=y +CONFIG_IGB_DCA=y +CONFIG_IGBVF=m +# CONFIG_IXGB is not set +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBE_DCA=y +CONFIG_IXGBE_DCB=y +CONFIG_IXGBEVF=m +CONFIG_I40E=m +CONFIG_I40E_DCB=y +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_FM10K=m +CONFIG_NET_VENDOR_NETSWIFT=y +CONFIG_TXGBE=m +CONFIG_NGBE=m +# CONFIG_NGBE_HWMON is not set +# CONFIG_NGBE_PROCFS is not set +# CONFIG_NGBE_NO_LLI is not set +# CONFIG_NGBE_DEBUG_FS is not set +# CONFIG_NGBE_POLL_LINK_STATUS is not set +# CONFIG_NGBE_SYSFS is not set +# CONFIG_JME is not set +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX4_EN=m +CONFIG_MLX4_EN_DCB=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y +# CONFIG_MLX4_CORE_GEN2 is not set +CONFIG_MLX5_CORE=m +CONFIG_MLX5_ACCEL=y +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_EN_ARFS=y +CONFIG_MLX5_EN_RXNFC=y +CONFIG_MLX5_MPFS=y +CONFIG_MLX5_ESWITCH=y +CONFIG_MLX5_CORE_EN_DCB=y +CONFIG_MLX5_CORE_IPOIB=y +CONFIG_MLX5_EN_IPSEC=y +# CONFIG_MLX5_EN_TLS is not set +CONFIG_MLXSW_CORE=m +CONFIG_MLXSW_CORE_HWMON=y +CONFIG_MLXSW_CORE_THERMAL=y +CONFIG_MLXSW_PCI=m +CONFIG_MLXSW_I2C=m +CONFIG_MLXSW_SWITCHIB=m +CONFIG_MLXSW_SWITCHX2=m +CONFIG_MLXSW_SPECTRUM=m +CONFIG_MLXSW_SPECTRUM_DCB=y +CONFIG_MLXSW_MINIMAL=m +CONFIG_MLXFW=m +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set +CONFIG_NET_VENDOR_MYRI=y +CONFIG_MYRI10GE=m +CONFIG_MYRI10GE_DCA=y +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +CONFIG_NET_VENDOR_NETERION=y +# CONFIG_S2IO is not set +# CONFIG_VXGE is not set +CONFIG_NET_VENDOR_NETRONOME=y +CONFIG_NFP=m +CONFIG_NFP_APP_FLOWER=y +CONFIG_NFP_APP_ABM_NIC=y +# CONFIG_NFP_DEBUG is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +CONFIG_NET_VENDOR_OKI=y +CONFIG_ETHOC=m +CONFIG_NET_VENDOR_PACKET_ENGINES=y +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +CONFIG_NET_VENDOR_QLOGIC=y +CONFIG_QLA3XXX=m +# CONFIG_QLCNIC is not set +# CONFIG_QLGE is not set +CONFIG_NETXEN_NIC=m +CONFIG_QED=m +CONFIG_QED_LL2=y +CONFIG_QED_SRIOV=y +CONFIG_QEDE=m +CONFIG_QED_RDMA=y +CONFIG_QED_ISCSI=y +CONFIG_QED_FCOE=y +CONFIG_QED_OOO=y +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_NET_VENDOR_REALTEK=y +# CONFIG_ATP is not set +CONFIG_8139CP=m +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +# CONFIG_8139TOO_TUNE_TWISTER is not set +CONFIG_8139TOO_8129=y +# CONFIG_8139_OLD_RX_RESET is not set +CONFIG_R8169=m +# CONFIG_NET_VENDOR_RENESAS is not set +CONFIG_NET_VENDOR_ROCKER=y +CONFIG_ROCKER=m +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +CONFIG_NET_VENDOR_SOLARFLARE=y +CONFIG_SFC=m +CONFIG_SFC_MTD=y +CONFIG_SFC_MCDI_MON=y +CONFIG_SFC_SRIOV=y +CONFIG_SFC_MCDI_LOGGING=y +# CONFIG_SFC_FALCON is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +# CONFIG_MDIO_BCM_UNIMAC is not set +CONFIG_MDIO_BITBANG=m +CONFIG_MDIO_CAVIUM=m +# CONFIG_MDIO_GPIO is not set +CONFIG_MDIO_MSCC_MIIM=m +CONFIG_MDIO_THUNDER=m +CONFIG_PHYLIB=y +CONFIG_SWPHY=y +CONFIG_LED_TRIGGER_PHY=y + +# +# MII PHY device drivers +# +CONFIG_AMD_PHY=m +CONFIG_AQUANTIA_PHY=m +# CONFIG_AX88796B_PHY is not set +CONFIG_AT803X_PHY=m +CONFIG_BCM7XXX_PHY=m +CONFIG_BCM87XX_PHY=m +CONFIG_BCM_NET_PHYLIB=m +CONFIG_BROADCOM_PHY=m +CONFIG_CICADA_PHY=m +CONFIG_CORTINA_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_DP83822_PHY=m +CONFIG_DP83TC811_PHY=m +CONFIG_DP83848_PHY=m +CONFIG_DP83867_PHY=m +CONFIG_FIXED_PHY=y +CONFIG_ICPLUS_PHY=m +CONFIG_INTEL_XWAY_PHY=m +CONFIG_LSI_ET1011C_PHY=m +CONFIG_LXT_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_MARVELL_10G_PHY=m +CONFIG_MICREL_PHY=m +CONFIG_MICROCHIP_PHY=m +CONFIG_MICROCHIP_T1_PHY=m +CONFIG_MICROSEMI_PHY=m +CONFIG_NATIONAL_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_REALTEK_PHY=m +CONFIG_RENESAS_PHY=m +CONFIG_ROCKCHIP_PHY=m +CONFIG_SMSC_PHY=m +CONFIG_STE10XP=m +CONFIG_TERANETICS_PHY=m +CONFIG_VITESSE_PHY=m +CONFIG_XILINX_GMII2RGMII=m +CONFIG_MICREL_KS8995MA=m +# CONFIG_PLIP is not set +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m +CONFIG_PPPOE=m +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLHC=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +# CONFIG_SLIP_MODE_SLIP6 is not set +CONFIG_USB_NET_DRIVERS=y +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m +CONFIG_USB_USBNET=m +CONFIG_USB_NET_AX8817X=m +CONFIG_USB_NET_AX88179_178A=m +CONFIG_USB_NET_CDCETHER=m +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_CDC_NCM=m +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +# CONFIG_USB_NET_SR9700 is not set +# CONFIG_USB_NET_SR9800 is not set +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m +CONFIG_USB_NET_NET1080=m +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_NET_RNDIS_HOST=m +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m +CONFIG_USB_NET_CDC_SUBSET=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y +CONFIG_USB_BELKIN=y +CONFIG_USB_ARMLINUX=y +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y +CONFIG_USB_NET_ZAURUS=m +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +CONFIG_WLAN=y +# CONFIG_WLAN_VENDOR_ADMTEK is not set +CONFIG_ATH_COMMON=m +CONFIG_WLAN_VENDOR_ATH=y +# CONFIG_ATH_DEBUG is not set +# CONFIG_ATH5K is not set +# CONFIG_ATH5K_PCI is not set +CONFIG_ATH9K_HW=m +CONFIG_ATH9K_COMMON=m +CONFIG_ATH9K_COMMON_DEBUG=y +CONFIG_ATH9K_BTCOEX_SUPPORT=y +CONFIG_ATH9K=m +CONFIG_ATH9K_PCI=y +CONFIG_ATH9K_AHB=y +CONFIG_ATH9K_DEBUGFS=y +# CONFIG_ATH9K_STATION_STATISTICS is not set +# CONFIG_ATH9K_DYNACK is not set +CONFIG_ATH9K_WOW=y +CONFIG_ATH9K_RFKILL=y +# CONFIG_ATH9K_CHANNEL_CONTEXT is not set +CONFIG_ATH9K_PCOEM=y +CONFIG_ATH9K_HTC=m +# CONFIG_ATH9K_HTC_DEBUGFS is not set +# CONFIG_ATH9K_HWRNG is not set +# CONFIG_ATH9K_COMMON_SPECTRAL is not set +# CONFIG_CARL9170 is not set +# CONFIG_ATH6KL is not set +# CONFIG_AR5523 is not set +# CONFIG_WIL6210 is not set +CONFIG_ATH10K=m +CONFIG_ATH10K_CE=y +CONFIG_ATH10K_PCI=m +# CONFIG_ATH10K_SDIO is not set +# CONFIG_ATH10K_USB is not set +# CONFIG_ATH10K_DEBUG is not set +CONFIG_ATH10K_DEBUGFS=y +# CONFIG_ATH10K_SPECTRAL is not set +# CONFIG_ATH10K_TRACING is not set +# CONFIG_WCN36XX is not set +# CONFIG_WLAN_VENDOR_ATMEL is not set +CONFIG_WLAN_VENDOR_BROADCOM=y +# CONFIG_B43 is not set +# CONFIG_B43LEGACY is not set +CONFIG_BRCMUTIL=m +CONFIG_BRCMSMAC=m +CONFIG_BRCMFMAC=m +CONFIG_BRCMFMAC_PROTO_BCDC=y +CONFIG_BRCMFMAC_PROTO_MSGBUF=y +CONFIG_BRCMFMAC_SDIO=y +CONFIG_BRCMFMAC_USB=y +CONFIG_BRCMFMAC_PCIE=y +# CONFIG_BRCM_TRACING is not set +# CONFIG_BRCMDBG is not set +# CONFIG_WLAN_VENDOR_CISCO is not set +CONFIG_WLAN_VENDOR_INTEL=y +# CONFIG_IPW2100 is not set +# CONFIG_IPW2200 is not set +# CONFIG_IWL4965 is not set +# CONFIG_IWL3945 is not set +CONFIG_IWLWIFI=m +CONFIG_IWLWIFI_LEDS=y +CONFIG_IWLDVM=m +CONFIG_IWLMVM=m +CONFIG_IWLWIFI_OPMODE_MODULAR=y +# CONFIG_IWLWIFI_BCAST_FILTERING is not set + +# +# Debugging Options +# +# CONFIG_IWLWIFI_DEBUG is not set +CONFIG_IWLWIFI_DEBUGFS=y +# CONFIG_IWLWIFI_DEVICE_TRACING is not set +# CONFIG_WLAN_VENDOR_INTERSIL is not set +CONFIG_WLAN_VENDOR_MARVELL=y +# CONFIG_LIBERTAS is not set +# CONFIG_LIBERTAS_THINFIRM is not set +CONFIG_MWIFIEX=m +CONFIG_MWIFIEX_SDIO=m +CONFIG_MWIFIEX_PCIE=m +CONFIG_MWIFIEX_USB=m +# CONFIG_MWL8K is not set +CONFIG_WLAN_VENDOR_MEDIATEK=y +CONFIG_MT7601U=m +CONFIG_MT76_CORE=m +CONFIG_MT76_LEDS=y +CONFIG_MT76_USB=m +CONFIG_MT76x2_COMMON=m +CONFIG_MT76x0U=m +# CONFIG_MT76x2E is not set +CONFIG_MT76x2U=m +CONFIG_WLAN_VENDOR_RALINK=y +CONFIG_RT2X00=m +# CONFIG_RT2400PCI is not set +# CONFIG_RT2500PCI is not set +# CONFIG_RT61PCI is not set +CONFIG_RT2800PCI=m +CONFIG_RT2800PCI_RT33XX=y +CONFIG_RT2800PCI_RT35XX=y +CONFIG_RT2800PCI_RT53XX=y +CONFIG_RT2800PCI_RT3290=y +# CONFIG_RT2500USB is not set +# CONFIG_RT73USB is not set +CONFIG_RT2800USB=m +CONFIG_RT2800USB_RT33XX=y +CONFIG_RT2800USB_RT35XX=y +CONFIG_RT2800USB_RT3573=y +CONFIG_RT2800USB_RT53XX=y +CONFIG_RT2800USB_RT55XX=y +CONFIG_RT2800USB_UNKNOWN=y +CONFIG_RT2800_LIB=m +CONFIG_RT2800_LIB_MMIO=m +CONFIG_RT2X00_LIB_MMIO=m +CONFIG_RT2X00_LIB_PCI=m +CONFIG_RT2X00_LIB_USB=m +CONFIG_RT2X00_LIB=m +CONFIG_RT2X00_LIB_FIRMWARE=y +CONFIG_RT2X00_LIB_CRYPTO=y +CONFIG_RT2X00_LIB_LEDS=y +CONFIG_RT2X00_LIB_DEBUGFS=y +# CONFIG_RT2X00_DEBUG is not set +CONFIG_WLAN_VENDOR_REALTEK=y +# CONFIG_RTL8180 is not set +# CONFIG_RTL8187 is not set +CONFIG_RTL_CARDS=m +CONFIG_RTL8192CE=m +CONFIG_RTL8192SE=m +CONFIG_RTL8192DE=m +CONFIG_RTL8723AE=m +CONFIG_RTL8723BE=m +CONFIG_RTL8188EE=m +CONFIG_RTL8192EE=m +CONFIG_RTL8821AE=m +CONFIG_RTL8192CU=m +CONFIG_RTLWIFI=m +CONFIG_RTLWIFI_PCI=m +CONFIG_RTLWIFI_USB=m +# CONFIG_RTLWIFI_DEBUG is not set +CONFIG_RTL8192C_COMMON=m +CONFIG_RTL8723_COMMON=m +CONFIG_RTLBTCOEXIST=m +CONFIG_RTL8XXXU=m +# CONFIG_RTL8XXXU_UNTESTED is not set +# CONFIG_WLAN_VENDOR_RSI is not set +# CONFIG_WLAN_VENDOR_ST is not set +# CONFIG_WLAN_VENDOR_TI is not set +# CONFIG_WLAN_VENDOR_ZYDAS is not set +CONFIG_WLAN_VENDOR_QUANTENNA=y +# CONFIG_QTNFMAC_PEARL_PCIE is not set +CONFIG_MAC80211_HWSIM=m +# CONFIG_USB_NET_RNDIS_WLAN is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +CONFIG_WAN=y +# CONFIG_LANMEDIA is not set +CONFIG_HDLC=m +CONFIG_HDLC_RAW=m +# CONFIG_HDLC_RAW_ETH is not set +CONFIG_HDLC_CISCO=m +CONFIG_HDLC_FR=m +CONFIG_HDLC_PPP=m + +# +# X.25/LAPB support is disabled +# +# CONFIG_PCI200SYN is not set +# CONFIG_WANXL is not set +# CONFIG_PC300TOO is not set +# CONFIG_FARSYNC is not set +# CONFIG_DSCC4 is not set +CONFIG_DLCI=m +CONFIG_DLCI_MAX=8 +# CONFIG_SBNI is not set +CONFIG_IEEE802154_DRIVERS=m +CONFIG_IEEE802154_FAKELB=m +# CONFIG_IEEE802154_AT86RF230 is not set +# CONFIG_IEEE802154_MRF24J40 is not set +# CONFIG_IEEE802154_CC2520 is not set +# CONFIG_IEEE802154_ATUSB is not set +# CONFIG_IEEE802154_ADF7242 is not set +# CONFIG_IEEE802154_CA8210 is not set +# CONFIG_IEEE802154_MCR20A is not set +# CONFIG_IEEE802154_HWSIM is not set +CONFIG_XEN_NETDEV_FRONTEND=m +CONFIG_VMXNET3=m +CONFIG_FUJITSU_ES=m +CONFIG_THUNDERBOLT_NET=m +CONFIG_HYPERV_NET=m +CONFIG_NETDEVSIM=m +CONFIG_NET_FAILOVER=m +CONFIG_ISDN=y +CONFIG_ISDN_I4L=m +CONFIG_ISDN_PPP=y +CONFIG_ISDN_PPP_VJ=y +CONFIG_ISDN_MPP=y +CONFIG_IPPP_FILTER=y +# CONFIG_ISDN_PPP_BSDCOMP is not set +CONFIG_ISDN_AUDIO=y +CONFIG_ISDN_TTY_FAX=y + +# +# ISDN feature submodules +# +CONFIG_ISDN_DIVERSION=m + +# +# ISDN4Linux hardware drivers +# + +# +# Passive cards +# +CONFIG_ISDN_DRV_HISAX=m + +# +# D-channel protocol features +# +CONFIG_HISAX_EURO=y +CONFIG_DE_AOC=y +CONFIG_HISAX_NO_SENDCOMPLETE=y +CONFIG_HISAX_NO_LLC=y +CONFIG_HISAX_NO_KEYPAD=y +CONFIG_HISAX_1TR6=y +CONFIG_HISAX_NI1=y +CONFIG_HISAX_MAX_CARDS=8 + +# +# HiSax supported cards +# +CONFIG_HISAX_16_3=y +CONFIG_HISAX_TELESPCI=y +CONFIG_HISAX_S0BOX=y +CONFIG_HISAX_FRITZPCI=y +CONFIG_HISAX_AVM_A1_PCMCIA=y +CONFIG_HISAX_ELSA=y +CONFIG_HISAX_DIEHLDIVA=y +CONFIG_HISAX_SEDLBAUER=y +CONFIG_HISAX_NETJET=y +CONFIG_HISAX_NETJET_U=y +CONFIG_HISAX_NICCY=y +CONFIG_HISAX_BKM_A4T=y +CONFIG_HISAX_SCT_QUADRO=y +CONFIG_HISAX_GAZEL=y +CONFIG_HISAX_HFC_PCI=y +CONFIG_HISAX_W6692=y +CONFIG_HISAX_HFC_SX=y +CONFIG_HISAX_ENTERNOW_PCI=y +# CONFIG_HISAX_DEBUG is not set + +# +# HiSax PCMCIA card service modules +# + +# +# HiSax sub driver modules +# +CONFIG_HISAX_ST5481=m +# CONFIG_HISAX_HFCUSB is not set +CONFIG_HISAX_HFC4S8S=m +CONFIG_HISAX_FRITZ_PCIPNP=m +CONFIG_ISDN_CAPI=m +# CONFIG_CAPI_TRACE is not set +CONFIG_ISDN_CAPI_CAPI20=m +CONFIG_ISDN_CAPI_MIDDLEWARE=y +CONFIG_ISDN_CAPI_CAPIDRV=m +CONFIG_ISDN_CAPI_CAPIDRV_VERBOSE=y + +# +# CAPI hardware drivers +# +CONFIG_CAPI_AVM=y +CONFIG_ISDN_DRV_AVMB1_B1PCI=m +CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y +CONFIG_ISDN_DRV_AVMB1_T1PCI=m +CONFIG_ISDN_DRV_AVMB1_C4=m +# CONFIG_CAPI_EICON is not set +CONFIG_ISDN_DRV_GIGASET=m +CONFIG_GIGASET_CAPI=y +CONFIG_GIGASET_BASE=m +CONFIG_GIGASET_M105=m +CONFIG_GIGASET_M101=m +# CONFIG_GIGASET_DEBUG is not set +CONFIG_HYSDN=m +CONFIG_HYSDN_CAPI=y +CONFIG_MISDN=m +CONFIG_MISDN_DSP=m +CONFIG_MISDN_L1OIP=m + +# +# mISDN hardware drivers +# +CONFIG_MISDN_HFCPCI=m +CONFIG_MISDN_HFCMULTI=m +CONFIG_MISDN_HFCUSB=m +CONFIG_MISDN_AVMFRITZ=m +CONFIG_MISDN_SPEEDFAX=m +CONFIG_MISDN_INFINEON=m +CONFIG_MISDN_W6692=m +CONFIG_MISDN_NETJET=m +CONFIG_MISDN_IPAC=m +CONFIG_MISDN_ISAR=m +CONFIG_ISDN_HDLC=m + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_LEDS=y +CONFIG_INPUT_FF_MEMLESS=m +CONFIG_INPUT_POLLDEV=m +CONFIG_INPUT_SPARSEKMAP=m +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +CONFIG_INPUT_JOYDEV=m +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADC is not set +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set +# CONFIG_KEYBOARD_XTKBD is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=y +CONFIG_MOUSE_PS2_ALPS=y +CONFIG_MOUSE_PS2_BYD=y +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y +CONFIG_MOUSE_PS2_CYPRESS=y +CONFIG_MOUSE_PS2_LIFEBOOK=y +CONFIG_MOUSE_PS2_TRACKPOINT=y +CONFIG_MOUSE_PS2_ELANTECH=y +CONFIG_MOUSE_PS2_ELANTECH_SMBUS=y +CONFIG_MOUSE_PS2_SENTELIC=y +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +CONFIG_MOUSE_PS2_FOCALTECH=y +CONFIG_MOUSE_PS2_VMMOUSE=y +CONFIG_MOUSE_PS2_SMBUS=y +CONFIG_MOUSE_SERIAL=m +CONFIG_MOUSE_APPLETOUCH=m +CONFIG_MOUSE_BCM5974=m +CONFIG_MOUSE_CYAPA=m +CONFIG_MOUSE_ELAN_I2C=m +CONFIG_MOUSE_ELAN_I2C_I2C=y +CONFIG_MOUSE_ELAN_I2C_SMBUS=y +CONFIG_MOUSE_VSXXXAA=m +# CONFIG_MOUSE_GPIO is not set +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m +# CONFIG_INPUT_JOYSTICK is not set +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=m +CONFIG_TABLET_USB_AIPTEK=m +CONFIG_TABLET_USB_GTCO=m +# CONFIG_TABLET_USB_HANWANG is not set +CONFIG_TABLET_USB_KBTAB=m +# CONFIG_TABLET_USB_PEGASUS is not set +CONFIG_TABLET_SERIAL_WACOM4=m +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_PROPERTIES=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_ADC is not set +# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set +# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_BU21029 is not set +# CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_EGALAX_SERIAL is not set +# CONFIG_TOUCHSCREEN_EXC3000 is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GOODIX is not set +# CONFIG_TOUCHSCREEN_HIDEEP is not set +# CONFIG_TOUCHSCREEN_ILI210X is not set +# CONFIG_TOUCHSCREEN_S6SY761 is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_EKTF2127 is not set +# CONFIG_TOUCHSCREEN_ELAN is not set +CONFIG_TOUCHSCREEN_ELO=m +CONFIG_TOUCHSCREEN_WACOM_W8001=m +CONFIG_TOUCHSCREEN_WACOM_I2C=m +# CONFIG_TOUCHSCREEN_MAX11801 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MMS114 is not set +# CONFIG_TOUCHSCREEN_MELFAS_MIP4 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_MK712 is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_PIXCIR is not set +# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set +# CONFIG_TOUCHSCREEN_WM97XX is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC_SERIO is not set +# CONFIG_TOUCHSCREEN_TSC2004 is not set +# CONFIG_TOUCHSCREEN_TSC2005 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_RM_TS is not set +# CONFIG_TOUCHSCREEN_SILEAD is not set +# CONFIG_TOUCHSCREEN_SIS_I2C is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_STMFTS is not set +# CONFIG_TOUCHSCREEN_SUR40 is not set +# CONFIG_TOUCHSCREEN_SURFACE3_SPI is not set +# CONFIG_TOUCHSCREEN_SX8654 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +# CONFIG_TOUCHSCREEN_ZET6223 is not set +# CONFIG_TOUCHSCREEN_ZFORCE is not set +# CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_BMA150 is not set +# CONFIG_INPUT_E3X0_BUTTON is not set +CONFIG_INPUT_PCSPKR=m +# CONFIG_INPUT_MMA8450 is not set +CONFIG_INPUT_APANEL=m +CONFIG_INPUT_GP2A=m +# CONFIG_INPUT_GPIO_BEEPER is not set +# CONFIG_INPUT_GPIO_DECODER is not set +CONFIG_INPUT_ATLAS_BTNS=m +CONFIG_INPUT_ATI_REMOTE2=m +CONFIG_INPUT_KEYSPAN_REMOTE=m +# CONFIG_INPUT_KXTJ9 is not set +CONFIG_INPUT_POWERMATE=m +CONFIG_INPUT_YEALINK=m +CONFIG_INPUT_CM109=m +CONFIG_INPUT_UINPUT=m +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_PWM_BEEPER is not set +# CONFIG_INPUT_PWM_VIBRA is not set +CONFIG_INPUT_GPIO_ROTARY_ENCODER=m +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_IMS_PCU is not set +# CONFIG_INPUT_CMA3000 is not set +CONFIG_INPUT_XEN_KBDDEV_FRONTEND=m +# CONFIG_INPUT_IDEAPAD_SLIDEBAR is not set +# CONFIG_INPUT_DRV260X_HAPTICS is not set +# CONFIG_INPUT_DRV2665_HAPTICS is not set +# CONFIG_INPUT_DRV2667_HAPTICS is not set +CONFIG_RMI4_CORE=m +CONFIG_RMI4_I2C=m +CONFIG_RMI4_SPI=m +CONFIG_RMI4_SMB=m +CONFIG_RMI4_F03=y +CONFIG_RMI4_F03_SERIO=m +CONFIG_RMI4_2D_SENSOR=y +CONFIG_RMI4_F11=y +CONFIG_RMI4_F12=y +CONFIG_RMI4_F30=y +CONFIG_RMI4_F34=y +# CONFIG_RMI4_F54 is not set +CONFIG_RMI4_F55=y + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +CONFIG_SERIO_I8042=y +CONFIG_SERIO_SERPORT=y +# CONFIG_SERIO_CT82C710 is not set +# CONFIG_SERIO_PARKBD is not set +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +# CONFIG_SERIO_PS2MULT is not set +CONFIG_SERIO_ARC_PS2=m +CONFIG_HYPERV_KEYBOARD=m +# CONFIG_SERIO_GPIO_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_ROCKETPORT is not set +CONFIG_CYCLADES=m +# CONFIG_CYZ_INTR is not set +# CONFIG_MOXA_INTELLIO is not set +# CONFIG_MOXA_SMARTIO is not set +CONFIG_SYNCLINK=m +CONFIG_SYNCLINKMP=m +CONFIG_SYNCLINK_GT=m +CONFIG_NOZOMI=m +# CONFIG_ISI is not set +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +# CONFIG_TRACE_SINK is not set +CONFIG_LDISC_AUTOLOAD=y +CONFIG_DEVMEM=y +# CONFIG_DEVKMEM is not set + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DW=y +# CONFIG_SERIAL_8250_RT288X is not set +CONFIG_SERIAL_8250_LPSS=y +CONFIG_SERIAL_8250_MID=y +# CONFIG_SERIAL_8250_MOXA is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_KGDB_NMI is not set +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_CONSOLE_POLL=y +CONFIG_SERIAL_JSM=m +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_IFX6X60 is not set +CONFIG_SERIAL_ARC=m +CONFIG_SERIAL_ARC_NR_PORTS=1 +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_DEV_BUS is not set +CONFIG_PRINTER=m +# CONFIG_LP_CONSOLE is not set +CONFIG_PPDEV=m +CONFIG_HVC_DRIVER=y +CONFIG_HVC_IRQ=y +CONFIG_HVC_XEN=y +CONFIG_HVC_XEN_FRONTEND=y +CONFIG_VIRTIO_CONSOLE=m +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DMI_DECODE=y +CONFIG_IPMI_PANIC_EVENT=y +CONFIG_IPMI_PANIC_STRING=y +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +CONFIG_IPMI_SSIF=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m +CONFIG_HW_RANDOM_INTEL=m +CONFIG_HW_RANDOM_AMD=m +CONFIG_HW_RANDOM_ZHAOXIN=m +CONFIG_HW_RANDOM_VIA=m +CONFIG_HW_RANDOM_VIRTIO=y +CONFIG_NVRAM=y +# CONFIG_APPLICOM is not set +# CONFIG_MWAVE is not set +CONFIG_RAW_DRIVER=y +CONFIG_MAX_RAW_DEVS=8192 +CONFIG_HPET=y +CONFIG_HPET_MMAP=y +# CONFIG_HPET_MMAP_DEFAULT is not set +CONFIG_HANGCHECK_TIMER=m +CONFIG_UV_MMTIMER=m +CONFIG_TCG_TPM=y +CONFIG_HW_RANDOM_TPM=y +CONFIG_TCG_TIS_CORE=y +CONFIG_TCG_TIS=y +# CONFIG_TCG_TIS_SPI is not set +CONFIG_TCG_TIS_I2C_ATMEL=m +CONFIG_TCG_TIS_I2C_INFINEON=m +CONFIG_TCG_TIS_I2C_NUVOTON=m +CONFIG_TCG_NSC=m +CONFIG_TCG_ATMEL=m +CONFIG_TCG_INFINEON=m +# CONFIG_TCG_XEN is not set +CONFIG_TCG_CRB=y +# CONFIG_TCG_VTPM_PROXY is not set +CONFIG_TCG_TIS_ST33ZP24=m +CONFIG_TCG_TIS_ST33ZP24_I2C=m +# CONFIG_TCG_TIS_ST33ZP24_SPI is not set +CONFIG_TELCLOCK=m +CONFIG_DEVPORT=y +# CONFIG_XILLYBUS is not set +# CONFIG_RANDOM_TRUST_CPU is not set + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_ACPI_I2C_OPREGION=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_MUX=m + +# +# Multiplexer I2C Chip support +# +# CONFIG_I2C_MUX_GPIO is not set +# CONFIG_I2C_MUX_LTC4306 is not set +# CONFIG_I2C_MUX_PCA9541 is not set +# CONFIG_I2C_MUX_PCA954x is not set +# CONFIG_I2C_MUX_REG is not set +CONFIG_I2C_MUX_MLXCPLD=m +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_SMBUS=m +CONFIG_I2C_ALGOBIT=m +CONFIG_I2C_ALGOPCA=m + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +CONFIG_I2C_AMD756=m +CONFIG_I2C_AMD756_S4882=m +CONFIG_I2C_AMD8111=m +CONFIG_I2C_I801=m +CONFIG_I2C_ISCH=m +CONFIG_I2C_ISMT=m +CONFIG_I2C_PIIX4=m +CONFIG_I2C_NFORCE2=m +CONFIG_I2C_NFORCE2_S4985=m +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +CONFIG_I2C_SIS96X=m +CONFIG_I2C_VIA=m +CONFIG_I2C_VIAPRO=m +CONFIG_I2C_ZHAOXIN=m + +# +# ACPI drivers +# +CONFIG_I2C_SCMI=m + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CBUS_GPIO is not set +CONFIG_I2C_DESIGNWARE_CORE=m +CONFIG_I2C_DESIGNWARE_PLATFORM=m +# CONFIG_I2C_DESIGNWARE_SLAVE is not set +# CONFIG_I2C_DESIGNWARE_PCI is not set +CONFIG_I2C_DESIGNWARE_BAYTRAIL=y +# CONFIG_I2C_PHYTIUM_PLATFORM is not set +# CONFIG_I2C_EMEV2 is not set +# CONFIG_I2C_GPIO is not set +# CONFIG_I2C_OCORES is not set +CONFIG_I2C_PCA_PLATFORM=m +CONFIG_I2C_SIMTEC=m +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +CONFIG_I2C_DIOLAN_U2C=m +CONFIG_I2C_PARPORT=m +CONFIG_I2C_PARPORT_LIGHT=m +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +CONFIG_I2C_TINY_USB=m +CONFIG_I2C_VIPERBOARD=m + +# +# Other I2C/SMBus bus drivers +# +CONFIG_I2C_MLXCPLD=m +CONFIG_I2C_STUB=m +# CONFIG_I2C_SLAVE is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y +# CONFIG_SPI_MEM is not set + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_BUTTERFLY is not set +# CONFIG_SPI_CADENCE is not set +# CONFIG_SPI_DESIGNWARE is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_LM70_LLP is not set +# CONFIG_SPI_OC_TINY is not set +# CONFIG_SPI_PHYTIUM_PLAT is not set +# CONFIG_SPI_PHYTIUM_PCI is not set +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_ROCKCHIP is not set +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_ZYNQMP_GQSPI is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +CONFIG_SPI_DYNAMIC=y +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_PARPORT=m +CONFIG_PPS_CLIENT_GPIO=m + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y +CONFIG_DP83640_PHY=m +CONFIG_PTP_1588_CLOCK_KVM=m +CONFIG_PINCTRL=y +CONFIG_PINMUX=y +CONFIG_PINCONF=y +CONFIG_GENERIC_PINCONF=y +# CONFIG_DEBUG_PINCTRL is not set +CONFIG_PINCTRL_AMD=m +# CONFIG_PINCTRL_MCP23S08 is not set +# CONFIG_PINCTRL_SX150X is not set +CONFIG_PINCTRL_BAYTRAIL=y +# CONFIG_PINCTRL_CHERRYVIEW is not set +CONFIG_PINCTRL_INTEL=m +CONFIG_PINCTRL_BROXTON=m +CONFIG_PINCTRL_CANNONLAKE=m +CONFIG_PINCTRL_CEDARFORK=m +CONFIG_PINCTRL_DENVERTON=m +CONFIG_PINCTRL_GEMINILAKE=m +CONFIG_PINCTRL_ICELAKE=m +CONFIG_PINCTRL_LEWISBURG=m +CONFIG_PINCTRL_SUNRISEPOINT=m +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_GPIO_ACPI=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_GENERIC=m + +# +# Memory mapped GPIO drivers +# +CONFIG_GPIO_AMDPT=m +# CONFIG_GPIO_DWAPB is not set +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_GENERIC_PLATFORM is not set +CONFIG_GPIO_ICH=m +# CONFIG_GPIO_LYNXPOINT is not set +# CONFIG_GPIO_MB86S7X is not set +# CONFIG_GPIO_MOCKUP is not set +# CONFIG_GPIO_VX855 is not set + +# +# Port-mapped I/O GPIO drivers +# +# CONFIG_GPIO_F7188X is not set +# CONFIG_GPIO_IT87 is not set +# CONFIG_GPIO_SCH is not set +# CONFIG_GPIO_SCH311X is not set +# CONFIG_GPIO_WINBOND is not set +# CONFIG_GPIO_WS16C48 is not set + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_ADP5588 is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set + +# +# MFD GPIO expanders +# + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_AMD8111 is not set +# CONFIG_GPIO_BT8XX is not set +# CONFIG_GPIO_ML_IOH is not set +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_PCIE_IDIO_24 is not set +# CONFIG_GPIO_PHYTIUM_PCI is not set +# CONFIG_GPIO_RDC321X is not set + +# +# SPI GPIO expanders +# +# CONFIG_GPIO_MAX3191X is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set + +# +# USB GPIO expanders +# +CONFIG_GPIO_VIPERBOARD=m +# CONFIG_W1 is not set +# CONFIG_POWER_AVS is not set +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_RESTART is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_GENERIC_ADC_BATTERY is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_MANAGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_LTC3651 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ25890 is not set +CONFIG_CHARGER_SMB347=m +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_CHARGER_RT9455 is not set +CONFIG_HWMON=y +CONFIG_HWMON_VID=m +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +CONFIG_SENSORS_ABITUGURU=m +CONFIG_SENSORS_ABITUGURU3=m +# CONFIG_SENSORS_AD7314 is not set +CONFIG_SENSORS_AD7414=m +CONFIG_SENSORS_AD7418=m +CONFIG_SENSORS_ADM1021=m +CONFIG_SENSORS_ADM1025=m +CONFIG_SENSORS_ADM1026=m +CONFIG_SENSORS_ADM1029=m +CONFIG_SENSORS_ADM1031=m +CONFIG_SENSORS_ADM9240=m +CONFIG_SENSORS_ADT7X10=m +# CONFIG_SENSORS_ADT7310 is not set +CONFIG_SENSORS_ADT7410=m +CONFIG_SENSORS_ADT7411=m +CONFIG_SENSORS_ADT7462=m +CONFIG_SENSORS_ADT7470=m +CONFIG_SENSORS_ADT7475=m +CONFIG_SENSORS_ASC7621=m +CONFIG_SENSORS_K8TEMP=m +CONFIG_SENSORS_K10TEMP=m +CONFIG_SENSORS_FAM15H_POWER=m +CONFIG_SENSORS_APPLESMC=m +CONFIG_SENSORS_ASB100=m +# CONFIG_SENSORS_ASPEED is not set +CONFIG_SENSORS_ATXP1=m +CONFIG_SENSORS_DS620=m +CONFIG_SENSORS_DS1621=m +CONFIG_SENSORS_DELL_SMM=m +CONFIG_SENSORS_I5K_AMB=m +CONFIG_SENSORS_F71805F=m +CONFIG_SENSORS_F71882FG=m +CONFIG_SENSORS_F75375S=m +CONFIG_SENSORS_FSCHMD=m +# CONFIG_SENSORS_FTSTEUTATES is not set +CONFIG_SENSORS_GL518SM=m +CONFIG_SENSORS_GL520SM=m +CONFIG_SENSORS_G760A=m +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_HIH6130 is not set +CONFIG_SENSORS_IBMAEM=m +CONFIG_SENSORS_IBMPEX=m +# CONFIG_SENSORS_IIO_HWMON is not set +CONFIG_SENSORS_I5500=m +CONFIG_SENSORS_CORETEMP=m +CONFIG_SENSORS_IT87=m +CONFIG_SENSORS_JC42=m +# CONFIG_SENSORS_POWR1220 is not set +CONFIG_SENSORS_LINEAGE=m +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC2990 is not set +CONFIG_SENSORS_LTC4151=m +CONFIG_SENSORS_LTC4215=m +# CONFIG_SENSORS_LTC4222 is not set +CONFIG_SENSORS_LTC4245=m +# CONFIG_SENSORS_LTC4260 is not set +CONFIG_SENSORS_LTC4261=m +# CONFIG_SENSORS_MAX1111 is not set +CONFIG_SENSORS_MAX16065=m +CONFIG_SENSORS_MAX1619=m +CONFIG_SENSORS_MAX1668=m +CONFIG_SENSORS_MAX197=m +# CONFIG_SENSORS_MAX31722 is not set +# CONFIG_SENSORS_MAX6621 is not set +CONFIG_SENSORS_MAX6639=m +CONFIG_SENSORS_MAX6642=m +CONFIG_SENSORS_MAX6650=m +CONFIG_SENSORS_MAX6697=m +# CONFIG_SENSORS_MAX31790 is not set +CONFIG_SENSORS_MCP3021=m +# CONFIG_SENSORS_MLXREG_FAN is not set +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_ADCXX is not set +CONFIG_SENSORS_LM63=m +# CONFIG_SENSORS_LM70 is not set +CONFIG_SENSORS_LM73=m +CONFIG_SENSORS_LM75=m +CONFIG_SENSORS_LM77=m +CONFIG_SENSORS_LM78=m +CONFIG_SENSORS_LM80=m +CONFIG_SENSORS_LM83=m +CONFIG_SENSORS_LM85=m +CONFIG_SENSORS_LM87=m +CONFIG_SENSORS_LM90=m +CONFIG_SENSORS_LM92=m +CONFIG_SENSORS_LM93=m +CONFIG_SENSORS_LM95234=m +CONFIG_SENSORS_LM95241=m +CONFIG_SENSORS_LM95245=m +CONFIG_SENSORS_PC87360=m +CONFIG_SENSORS_PC87427=m +CONFIG_SENSORS_NTC_THERMISTOR=m +# CONFIG_SENSORS_NCT6683 is not set +CONFIG_SENSORS_NCT6775=m +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NCT7904 is not set +# CONFIG_SENSORS_NPCM7XX is not set +CONFIG_SENSORS_PCF8591=m +CONFIG_PMBUS=m +CONFIG_SENSORS_PMBUS=m +CONFIG_SENSORS_ADM1275=m +# CONFIG_SENSORS_IBM_CFFPS is not set +# CONFIG_SENSORS_IR35221 is not set +CONFIG_SENSORS_LM25066=m +CONFIG_SENSORS_LTC2978=m +# CONFIG_SENSORS_LTC3815 is not set +CONFIG_SENSORS_MAX16064=m +# CONFIG_SENSORS_MAX20751 is not set +# CONFIG_SENSORS_MAX31785 is not set +CONFIG_SENSORS_MAX34440=m +CONFIG_SENSORS_MAX8688=m +# CONFIG_SENSORS_TPS40422 is not set +# CONFIG_SENSORS_TPS53679 is not set +CONFIG_SENSORS_UCD9000=m +CONFIG_SENSORS_UCD9200=m +CONFIG_SENSORS_ZL6100=m +CONFIG_SENSORS_SHT15=m +CONFIG_SENSORS_SHT21=m +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHTC1 is not set +CONFIG_SENSORS_SIS5595=m +CONFIG_SENSORS_DME1737=m +CONFIG_SENSORS_EMC1403=m +# CONFIG_SENSORS_EMC2103 is not set +CONFIG_SENSORS_EMC6W201=m +CONFIG_SENSORS_SMSC47M1=m +CONFIG_SENSORS_SMSC47M192=m +CONFIG_SENSORS_SMSC47B397=m +CONFIG_SENSORS_SCH56XX_COMMON=m +CONFIG_SENSORS_SCH5627=m +CONFIG_SENSORS_SCH5636=m +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_SMM665 is not set +# CONFIG_SENSORS_ADC128D818 is not set +CONFIG_SENSORS_ADS1015=m +CONFIG_SENSORS_ADS7828=m +# CONFIG_SENSORS_ADS7871 is not set +CONFIG_SENSORS_AMC6821=m +CONFIG_SENSORS_INA209=m +CONFIG_SENSORS_INA2XX=m +# CONFIG_SENSORS_INA3221 is not set +# CONFIG_SENSORS_TC74 is not set +CONFIG_SENSORS_THMC50=m +CONFIG_SENSORS_TMP102=m +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP108 is not set +CONFIG_SENSORS_TMP401=m +CONFIG_SENSORS_TMP421=m +CONFIG_SENSORS_VIA_CPUTEMP=m +CONFIG_SENSORS_ZHAOXIN_CPUTEMP=m +CONFIG_SENSORS_VIA686A=m +CONFIG_SENSORS_VT1211=m +CONFIG_SENSORS_VT8231=m +# CONFIG_SENSORS_W83773G is not set +CONFIG_SENSORS_W83781D=m +CONFIG_SENSORS_W83791D=m +CONFIG_SENSORS_W83792D=m +CONFIG_SENSORS_W83793=m +CONFIG_SENSORS_W83795=m +# CONFIG_SENSORS_W83795_FANCTRL is not set +CONFIG_SENSORS_W83L785TS=m +CONFIG_SENSORS_W83L786NG=m +CONFIG_SENSORS_W83627HF=m +CONFIG_SENSORS_W83627EHF=m +# CONFIG_SENSORS_XGENE is not set + +# +# ACPI drivers +# +CONFIG_SENSORS_ACPI_POWER=m +CONFIG_SENSORS_ATK0110=m +CONFIG_THERMAL=y +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +CONFIG_THERMAL_GOV_BANG_BANG=y +CONFIG_THERMAL_GOV_USER_SPACE=y +# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set +# CONFIG_THERMAL_EMULATION is not set +CONFIG_INTEL_POWERCLAMP=m +CONFIG_X86_PKG_TEMP_THERMAL=m +CONFIG_INTEL_SOC_DTS_IOSF_CORE=m +# CONFIG_INTEL_SOC_DTS_THERMAL is not set + +# +# ACPI INT340X thermal drivers +# +CONFIG_INT340X_THERMAL=m +CONFIG_ACPI_THERMAL_REL=m +# CONFIG_INT3406_THERMAL is not set +CONFIG_INTEL_PCH_THERMAL=m +# CONFIG_GENERIC_ADC_THERMAL is not set +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y +CONFIG_WATCHDOG_SYSFS=y + +# +# Watchdog Device Drivers +# +CONFIG_SOFT_WATCHDOG=m +CONFIG_WDAT_WDT=m +# CONFIG_XILINX_WATCHDOG is not set +# CONFIG_ZIIRAVE_WATCHDOG is not set +# CONFIG_CADENCE_WATCHDOG is not set +# CONFIG_DW_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set +# CONFIG_ACQUIRE_WDT is not set +# CONFIG_ADVANTECH_WDT is not set +CONFIG_ALIM1535_WDT=m +CONFIG_ALIM7101_WDT=m +# CONFIG_EBC_C384_WDT is not set +CONFIG_F71808E_WDT=m +CONFIG_SP5100_TCO=m +CONFIG_SBC_FITPC2_WATCHDOG=m +# CONFIG_EUROTECH_WDT is not set +CONFIG_IB700_WDT=m +CONFIG_IBMASR=m +# CONFIG_WAFER_WDT is not set +CONFIG_I6300ESB_WDT=m +CONFIG_IE6XX_WDT=m +CONFIG_ITCO_WDT=m +CONFIG_ITCO_VENDOR_SUPPORT=y +CONFIG_IT8712F_WDT=m +CONFIG_IT87_WDT=m +CONFIG_HP_WATCHDOG=m +CONFIG_HPWDT_NMI_DECODING=y +# CONFIG_SC1200_WDT is not set +# CONFIG_PC87413_WDT is not set +CONFIG_NV_TCO=m +# CONFIG_60XX_WDT is not set +# CONFIG_CPU5_WDT is not set +CONFIG_SMSC_SCH311X_WDT=m +# CONFIG_SMSC37B787_WDT is not set +CONFIG_VIA_WDT=m +CONFIG_W83627HF_WDT=m +CONFIG_W83877F_WDT=m +CONFIG_W83977F_WDT=m +CONFIG_MACHZ_WDT=m +# CONFIG_SBC_EPX_C3_WATCHDOG is not set +CONFIG_INTEL_MEI_WDT=m +# CONFIG_NI903X_WDT is not set +# CONFIG_NIC7018_WDT is not set +# CONFIG_MEN_A21_WDT is not set +CONFIG_XEN_WDT=m + +# +# PCI-based Watchdog Cards +# +CONFIG_PCIPCWATCHDOG=m +CONFIG_WDTPCI=m + +# +# USB-based Watchdog Cards +# +CONFIG_USBPCWATCHDOG=m + +# +# Watchdog Pretimeout Governors +# +# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +CONFIG_BCMA=m +CONFIG_BCMA_HOST_PCI_POSSIBLE=y +CONFIG_BCMA_HOST_PCI=y +# CONFIG_BCMA_HOST_SOC is not set +CONFIG_BCMA_DRIVER_PCI=y +CONFIG_BCMA_DRIVER_GMAC_CMN=y +CONFIG_BCMA_DRIVER_GPIO=y +# CONFIG_BCMA_DEBUG is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=y +# CONFIG_MFD_AS3711 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_CROS_EC is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_MFD_INTEL_QUARK_I2C_GPIO is not set +CONFIG_LPC_ICH=m +CONFIG_LPC_SCH=m +# CONFIG_INTEL_SOC_PMIC_CHTDC_TI is not set +CONFIG_MFD_INTEL_LPSS=y +CONFIG_MFD_INTEL_LPSS_ACPI=y +CONFIG_MFD_INTEL_LPSS_PCI=y +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77843 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_EZX_PCAP is not set +CONFIG_MFD_VIPERBOARD=m +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_UCB1400_CORE is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RC5T583 is not set +# CONFIG_MFD_SEC_CORE is not set +# CONFIG_MFD_SI476X_CORE is not set +CONFIG_MFD_SM501=m +CONFIG_MFD_SM501_GPIO=y +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_SMSC is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_SYSCON is not set +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS80031 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +CONFIG_MFD_VX855=m +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_REGULATOR is not set +CONFIG_CEC_CORE=y +CONFIG_RC_CORE=m +CONFIG_RC_MAP=m +CONFIG_LIRC=y +CONFIG_RC_DECODERS=y +CONFIG_IR_NEC_DECODER=m +CONFIG_IR_RC5_DECODER=m +CONFIG_IR_RC6_DECODER=m +CONFIG_IR_JVC_DECODER=m +CONFIG_IR_SONY_DECODER=m +CONFIG_IR_SANYO_DECODER=m +# CONFIG_IR_SHARP_DECODER is not set +CONFIG_IR_MCE_KBD_DECODER=m +# CONFIG_IR_XMP_DECODER is not set +CONFIG_IR_IMON_DECODER=m +CONFIG_RC_DEVICES=y +CONFIG_RC_ATI_REMOTE=m +CONFIG_IR_ENE=m +CONFIG_IR_IMON=m +CONFIG_IR_IMON_RAW=m +CONFIG_IR_MCEUSB=m +CONFIG_IR_ITE_CIR=m +CONFIG_IR_FINTEK=m +CONFIG_IR_NUVOTON=m +CONFIG_IR_REDRAT3=m +CONFIG_IR_STREAMZAP=m +CONFIG_IR_WINBOND_CIR=m +# CONFIG_IR_IGORPLUGUSB is not set +CONFIG_IR_IGUANA=m +CONFIG_IR_TTUSBIR=m +# CONFIG_RC_LOOPBACK is not set +CONFIG_IR_SERIAL=m +CONFIG_IR_SERIAL_TRANSMITTER=y +CONFIG_IR_SIR=m +CONFIG_MEDIA_SUPPORT=m + +# +# Multimedia core support +# +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_ANALOG_TV_SUPPORT=y +CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y +CONFIG_MEDIA_RADIO_SUPPORT=y +# CONFIG_MEDIA_SDR_SUPPORT is not set +CONFIG_MEDIA_CEC_SUPPORT=y +# CONFIG_MEDIA_CONTROLLER is not set +CONFIG_VIDEO_DEV=m +CONFIG_VIDEO_V4L2=m +# CONFIG_VIDEO_ADV_DEBUG is not set +# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set +CONFIG_VIDEO_TUNER=m +CONFIG_DVB_CORE=m +# CONFIG_DVB_MMAP is not set +CONFIG_DVB_NET=y +CONFIG_TTPCI_EEPROM=m +CONFIG_DVB_MAX_ADAPTERS=8 +CONFIG_DVB_DYNAMIC_MINORS=y +# CONFIG_DVB_DEMUX_SECTION_LOSS_LOG is not set +# CONFIG_DVB_ULE_DEBUG is not set + +# +# Media drivers +# +CONFIG_MEDIA_USB_SUPPORT=y + +# +# Webcam devices +# +CONFIG_USB_VIDEO_CLASS=m +CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y +CONFIG_USB_GSPCA=m +CONFIG_USB_M5602=m +CONFIG_USB_STV06XX=m +CONFIG_USB_GL860=m +CONFIG_USB_GSPCA_BENQ=m +CONFIG_USB_GSPCA_CONEX=m +CONFIG_USB_GSPCA_CPIA1=m +# CONFIG_USB_GSPCA_DTCS033 is not set +CONFIG_USB_GSPCA_ETOMS=m +CONFIG_USB_GSPCA_FINEPIX=m +CONFIG_USB_GSPCA_JEILINJ=m +CONFIG_USB_GSPCA_JL2005BCD=m +# CONFIG_USB_GSPCA_KINECT is not set +CONFIG_USB_GSPCA_KONICA=m +CONFIG_USB_GSPCA_MARS=m +CONFIG_USB_GSPCA_MR97310A=m +CONFIG_USB_GSPCA_NW80X=m +CONFIG_USB_GSPCA_OV519=m +CONFIG_USB_GSPCA_OV534=m +CONFIG_USB_GSPCA_OV534_9=m +CONFIG_USB_GSPCA_PAC207=m +CONFIG_USB_GSPCA_PAC7302=m +CONFIG_USB_GSPCA_PAC7311=m +CONFIG_USB_GSPCA_SE401=m +CONFIG_USB_GSPCA_SN9C2028=m +CONFIG_USB_GSPCA_SN9C20X=m +CONFIG_USB_GSPCA_SONIXB=m +CONFIG_USB_GSPCA_SONIXJ=m +CONFIG_USB_GSPCA_SPCA500=m +CONFIG_USB_GSPCA_SPCA501=m +CONFIG_USB_GSPCA_SPCA505=m +CONFIG_USB_GSPCA_SPCA506=m +CONFIG_USB_GSPCA_SPCA508=m +CONFIG_USB_GSPCA_SPCA561=m +CONFIG_USB_GSPCA_SPCA1528=m +CONFIG_USB_GSPCA_SQ905=m +CONFIG_USB_GSPCA_SQ905C=m +CONFIG_USB_GSPCA_SQ930X=m +CONFIG_USB_GSPCA_STK014=m +# CONFIG_USB_GSPCA_STK1135 is not set +CONFIG_USB_GSPCA_STV0680=m +CONFIG_USB_GSPCA_SUNPLUS=m +CONFIG_USB_GSPCA_T613=m +CONFIG_USB_GSPCA_TOPRO=m +# CONFIG_USB_GSPCA_TOUPTEK is not set +CONFIG_USB_GSPCA_TV8532=m +CONFIG_USB_GSPCA_VC032X=m +CONFIG_USB_GSPCA_VICAM=m +CONFIG_USB_GSPCA_XIRLINK_CIT=m +CONFIG_USB_GSPCA_ZC3XX=m +CONFIG_USB_PWC=m +# CONFIG_USB_PWC_DEBUG is not set +CONFIG_USB_PWC_INPUT_EVDEV=y +# CONFIG_VIDEO_CPIA2 is not set +CONFIG_USB_STKWEBCAM=m +CONFIG_USB_S2255=m +# CONFIG_VIDEO_USBTV is not set + +# +# Analog TV USB devices +# +CONFIG_VIDEO_PVRUSB2=m +CONFIG_VIDEO_PVRUSB2_SYSFS=y +CONFIG_VIDEO_PVRUSB2_DVB=y +# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set +CONFIG_VIDEO_HDPVR=m +CONFIG_VIDEO_USBVISION=m +# CONFIG_VIDEO_STK1160_COMMON is not set +# CONFIG_VIDEO_GO7007 is not set + +# +# Analog/digital TV USB devices +# +CONFIG_VIDEO_AU0828=m +CONFIG_VIDEO_AU0828_V4L2=y +# CONFIG_VIDEO_AU0828_RC is not set + +# +# Digital TV USB devices +# +CONFIG_DVB_USB=m +# CONFIG_DVB_USB_DEBUG is not set +CONFIG_DVB_USB_DIB3000MC=m +CONFIG_DVB_USB_A800=m +CONFIG_DVB_USB_DIBUSB_MB=m +# CONFIG_DVB_USB_DIBUSB_MB_FAULTY is not set +CONFIG_DVB_USB_DIBUSB_MC=m +CONFIG_DVB_USB_DIB0700=m +CONFIG_DVB_USB_UMT_010=m +CONFIG_DVB_USB_CXUSB=m +CONFIG_DVB_USB_M920X=m +CONFIG_DVB_USB_DIGITV=m +CONFIG_DVB_USB_VP7045=m +CONFIG_DVB_USB_VP702X=m +CONFIG_DVB_USB_GP8PSK=m +CONFIG_DVB_USB_NOVA_T_USB2=m +CONFIG_DVB_USB_TTUSB2=m +CONFIG_DVB_USB_DTT200U=m +CONFIG_DVB_USB_OPERA1=m +CONFIG_DVB_USB_AF9005=m +CONFIG_DVB_USB_AF9005_REMOTE=m +CONFIG_DVB_USB_PCTV452E=m +CONFIG_DVB_USB_DW2102=m +CONFIG_DVB_USB_CINERGY_T2=m +CONFIG_DVB_USB_DTV5100=m +CONFIG_DVB_USB_AZ6027=m +CONFIG_DVB_USB_TECHNISAT_USB2=m +CONFIG_DVB_USB_V2=m +CONFIG_DVB_USB_AF9015=m +CONFIG_DVB_USB_AF9035=m +CONFIG_DVB_USB_ANYSEE=m +CONFIG_DVB_USB_AU6610=m +CONFIG_DVB_USB_AZ6007=m +CONFIG_DVB_USB_CE6230=m +CONFIG_DVB_USB_EC168=m +CONFIG_DVB_USB_GL861=m +CONFIG_DVB_USB_LME2510=m +CONFIG_DVB_USB_MXL111SF=m +CONFIG_DVB_USB_RTL28XXU=m +# CONFIG_DVB_USB_DVBSKY is not set +# CONFIG_DVB_USB_ZD1301 is not set +CONFIG_DVB_TTUSB_BUDGET=m +CONFIG_DVB_TTUSB_DEC=m +CONFIG_SMS_USB_DRV=m +CONFIG_DVB_B2C2_FLEXCOP_USB=m +# CONFIG_DVB_B2C2_FLEXCOP_USB_DEBUG is not set +# CONFIG_DVB_AS102 is not set + +# +# Webcam, TV (analog/digital) USB devices +# +CONFIG_VIDEO_EM28XX=m +# CONFIG_VIDEO_EM28XX_V4L2 is not set +CONFIG_VIDEO_EM28XX_ALSA=m +CONFIG_VIDEO_EM28XX_DVB=m +CONFIG_VIDEO_EM28XX_RC=m + +# +# USB HDMI CEC adapters +# +CONFIG_USB_PULSE8_CEC=m +CONFIG_USB_RAINSHADOW_CEC=m +CONFIG_MEDIA_PCI_SUPPORT=y + +# +# Media capture support +# +# CONFIG_VIDEO_MEYE is not set +# CONFIG_VIDEO_SOLO6X10 is not set +# CONFIG_VIDEO_TW5864 is not set +# CONFIG_VIDEO_TW68 is not set +# CONFIG_VIDEO_TW686X is not set + +# +# Media capture/analog TV support +# +CONFIG_VIDEO_IVTV=m +# CONFIG_VIDEO_IVTV_DEPRECATED_IOCTLS is not set +# CONFIG_VIDEO_IVTV_ALSA is not set +CONFIG_VIDEO_FB_IVTV=m +# CONFIG_VIDEO_DT3155 is not set + +# +# Media capture/analog/hybrid TV support +# +CONFIG_VIDEO_CX23885=m +CONFIG_MEDIA_ALTERA_CI=m +# CONFIG_VIDEO_CX25821 is not set +CONFIG_VIDEO_CX88=m +CONFIG_VIDEO_CX88_ALSA=m +CONFIG_VIDEO_CX88_BLACKBIRD=m +CONFIG_VIDEO_CX88_DVB=m +# CONFIG_VIDEO_CX88_ENABLE_VP3054 is not set +CONFIG_VIDEO_CX88_MPEG=m +CONFIG_VIDEO_SAA7134=m +CONFIG_VIDEO_SAA7134_ALSA=m +CONFIG_VIDEO_SAA7134_RC=y +CONFIG_VIDEO_SAA7134_DVB=m +CONFIG_VIDEO_SAA7164=m + +# +# Media digital TV PCI Adapters +# +CONFIG_DVB_BUDGET_CORE=m +CONFIG_DVB_BUDGET=m +CONFIG_DVB_BUDGET_CI=m +CONFIG_DVB_B2C2_FLEXCOP_PCI=m +# CONFIG_DVB_B2C2_FLEXCOP_PCI_DEBUG is not set +CONFIG_DVB_PLUTO2=m +CONFIG_DVB_DM1105=m +CONFIG_DVB_PT1=m +# CONFIG_DVB_PT3 is not set +CONFIG_MANTIS_CORE=m +CONFIG_DVB_MANTIS=m +CONFIG_DVB_HOPPER=m +CONFIG_DVB_NGENE=m +CONFIG_DVB_DDBRIDGE=m +# CONFIG_DVB_DDBRIDGE_MSIENABLE is not set +# CONFIG_DVB_SMIPCIE is not set +# CONFIG_DVB_NETUP_UNIDVB is not set +# CONFIG_V4L_PLATFORM_DRIVERS is not set +# CONFIG_V4L_MEM2MEM_DRIVERS is not set +# CONFIG_V4L_TEST_DRIVERS is not set +# CONFIG_DVB_PLATFORM_DRIVERS is not set +CONFIG_CEC_PLATFORM_DRIVERS=y + +# +# Supported MMC/SDIO adapters +# +CONFIG_SMS_SDIO_DRV=m +CONFIG_RADIO_ADAPTERS=y +CONFIG_RADIO_TEA575X=m +# CONFIG_RADIO_SI470X is not set +# CONFIG_RADIO_SI4713 is not set +# CONFIG_USB_MR800 is not set +# CONFIG_USB_DSBR is not set +# CONFIG_RADIO_MAXIRADIO is not set +# CONFIG_RADIO_SHARK is not set +# CONFIG_RADIO_SHARK2 is not set +# CONFIG_USB_KEENE is not set +# CONFIG_USB_RAREMONO is not set +# CONFIG_USB_MA901 is not set +# CONFIG_RADIO_TEA5764 is not set +# CONFIG_RADIO_SAA7706H is not set +# CONFIG_RADIO_TEF6862 is not set +# CONFIG_RADIO_WL1273 is not set + +# +# Texas Instruments WL128x FM driver (ST based) +# + +# +# Supported FireWire (IEEE 1394) Adapters +# +CONFIG_DVB_FIREDTV=m +CONFIG_DVB_FIREDTV_INPUT=y +CONFIG_MEDIA_COMMON_OPTIONS=y + +# +# common driver options +# +CONFIG_VIDEO_CX2341X=m +CONFIG_VIDEO_TVEEPROM=m +CONFIG_CYPRESS_FIRMWARE=m +CONFIG_VIDEOBUF2_CORE=m +CONFIG_VIDEOBUF2_V4L2=m +CONFIG_VIDEOBUF2_MEMOPS=m +CONFIG_VIDEOBUF2_VMALLOC=m +CONFIG_VIDEOBUF2_DMA_SG=m +CONFIG_VIDEOBUF2_DVB=m +CONFIG_DVB_B2C2_FLEXCOP=m +CONFIG_VIDEO_SAA7146=m +CONFIG_SMS_SIANO_MDTV=m +CONFIG_SMS_SIANO_RC=y +# CONFIG_SMS_SIANO_DEBUGFS is not set + +# +# Media ancillary drivers (tuners, sensors, i2c, spi, frontends) +# +CONFIG_MEDIA_SUBDRV_AUTOSELECT=y +CONFIG_MEDIA_ATTACH=y +CONFIG_VIDEO_IR_I2C=m + +# +# Audio decoders, processors and mixers +# +CONFIG_VIDEO_MSP3400=m +CONFIG_VIDEO_CS3308=m +CONFIG_VIDEO_CS53L32A=m +CONFIG_VIDEO_WM8775=m +CONFIG_VIDEO_WM8739=m +CONFIG_VIDEO_VP27SMPX=m + +# +# RDS decoders +# +CONFIG_VIDEO_SAA6588=m + +# +# Video decoders +# +CONFIG_VIDEO_SAA711X=m + +# +# Video and audio decoders +# +CONFIG_VIDEO_SAA717X=m +CONFIG_VIDEO_CX25840=m + +# +# Video encoders +# +CONFIG_VIDEO_SAA7127=m + +# +# Camera sensor devices +# + +# +# Flash devices +# + +# +# Video improvement chips +# +CONFIG_VIDEO_UPD64031A=m +CONFIG_VIDEO_UPD64083=m + +# +# Audio/Video compression chips +# +CONFIG_VIDEO_SAA6752HS=m + +# +# SDR tuner chips +# + +# +# Miscellaneous helper chips +# +CONFIG_VIDEO_M52790=m + +# +# Sensors used on soc_camera driver +# + +# +# Media SPI Adapters +# +# CONFIG_CXD2880_SPI_DRV is not set +CONFIG_MEDIA_TUNER=m +CONFIG_MEDIA_TUNER_SIMPLE=m +CONFIG_MEDIA_TUNER_TDA18250=m +CONFIG_MEDIA_TUNER_TDA8290=m +CONFIG_MEDIA_TUNER_TDA827X=m +CONFIG_MEDIA_TUNER_TDA18271=m +CONFIG_MEDIA_TUNER_TDA9887=m +CONFIG_MEDIA_TUNER_TEA5761=m +CONFIG_MEDIA_TUNER_TEA5767=m +CONFIG_MEDIA_TUNER_MT20XX=m +CONFIG_MEDIA_TUNER_MT2060=m +CONFIG_MEDIA_TUNER_MT2063=m +CONFIG_MEDIA_TUNER_MT2266=m +CONFIG_MEDIA_TUNER_MT2131=m +CONFIG_MEDIA_TUNER_QT1010=m +CONFIG_MEDIA_TUNER_XC2028=m +CONFIG_MEDIA_TUNER_XC5000=m +CONFIG_MEDIA_TUNER_XC4000=m +CONFIG_MEDIA_TUNER_MXL5005S=m +CONFIG_MEDIA_TUNER_MXL5007T=m +CONFIG_MEDIA_TUNER_MC44S803=m +CONFIG_MEDIA_TUNER_MAX2165=m +CONFIG_MEDIA_TUNER_TDA18218=m +CONFIG_MEDIA_TUNER_FC0011=m +CONFIG_MEDIA_TUNER_FC0012=m +CONFIG_MEDIA_TUNER_FC0013=m +CONFIG_MEDIA_TUNER_TDA18212=m +CONFIG_MEDIA_TUNER_E4000=m +CONFIG_MEDIA_TUNER_FC2580=m +CONFIG_MEDIA_TUNER_M88RS6000T=m +CONFIG_MEDIA_TUNER_TUA9001=m +CONFIG_MEDIA_TUNER_SI2157=m +CONFIG_MEDIA_TUNER_IT913X=m +CONFIG_MEDIA_TUNER_R820T=m +CONFIG_MEDIA_TUNER_QM1D1C0042=m +CONFIG_MEDIA_TUNER_QM1D1B0004=m + +# +# Multistandard (satellite) frontends +# +CONFIG_DVB_STB0899=m +CONFIG_DVB_STB6100=m +CONFIG_DVB_STV090x=m +CONFIG_DVB_STV0910=m +CONFIG_DVB_STV6110x=m +CONFIG_DVB_STV6111=m +CONFIG_DVB_MXL5XX=m +CONFIG_DVB_M88DS3103=m + +# +# Multistandard (cable + terrestrial) frontends +# +CONFIG_DVB_DRXK=m +CONFIG_DVB_TDA18271C2DD=m +CONFIG_DVB_SI2165=m +CONFIG_DVB_MN88472=m +CONFIG_DVB_MN88473=m + +# +# DVB-S (satellite) frontends +# +CONFIG_DVB_CX24123=m +CONFIG_DVB_MT312=m +CONFIG_DVB_ZL10036=m +CONFIG_DVB_ZL10039=m +CONFIG_DVB_S5H1420=m +CONFIG_DVB_STV0288=m +CONFIG_DVB_STB6000=m +CONFIG_DVB_STV0299=m +CONFIG_DVB_STV6110=m +CONFIG_DVB_STV0900=m +CONFIG_DVB_TDA8083=m +CONFIG_DVB_TDA10086=m +CONFIG_DVB_VES1X93=m +CONFIG_DVB_TUNER_ITD1000=m +CONFIG_DVB_TUNER_CX24113=m +CONFIG_DVB_TDA826X=m +CONFIG_DVB_CX24116=m +CONFIG_DVB_CX24117=m +CONFIG_DVB_CX24120=m +CONFIG_DVB_SI21XX=m +CONFIG_DVB_TS2020=m +CONFIG_DVB_DS3000=m +CONFIG_DVB_MB86A16=m +CONFIG_DVB_TDA10071=m + +# +# DVB-T (terrestrial) frontends +# +CONFIG_DVB_CX22700=m +CONFIG_DVB_CX22702=m +CONFIG_DVB_DRXD=m +CONFIG_DVB_L64781=m +CONFIG_DVB_TDA1004X=m +CONFIG_DVB_NXT6000=m +CONFIG_DVB_MT352=m +CONFIG_DVB_ZL10353=m +CONFIG_DVB_DIB3000MB=m +CONFIG_DVB_DIB3000MC=m +CONFIG_DVB_DIB7000M=m +CONFIG_DVB_DIB7000P=m +CONFIG_DVB_TDA10048=m +CONFIG_DVB_AF9013=m +CONFIG_DVB_EC100=m +CONFIG_DVB_STV0367=m +CONFIG_DVB_CXD2820R=m +CONFIG_DVB_CXD2841ER=m +CONFIG_DVB_RTL2830=m +CONFIG_DVB_RTL2832=m +CONFIG_DVB_SI2168=m +CONFIG_DVB_GP8PSK_FE=m + +# +# DVB-C (cable) frontends +# +CONFIG_DVB_VES1820=m +CONFIG_DVB_TDA10021=m +CONFIG_DVB_TDA10023=m +CONFIG_DVB_STV0297=m + +# +# ATSC (North American/Korean Terrestrial/Cable DTV) frontends +# +CONFIG_DVB_NXT200X=m +CONFIG_DVB_OR51132=m +CONFIG_DVB_BCM3510=m +CONFIG_DVB_LGDT330X=m +CONFIG_DVB_LGDT3305=m +CONFIG_DVB_LGDT3306A=m +CONFIG_DVB_LG2160=m +CONFIG_DVB_S5H1409=m +CONFIG_DVB_AU8522=m +CONFIG_DVB_AU8522_DTV=m +CONFIG_DVB_AU8522_V4L=m +CONFIG_DVB_S5H1411=m + +# +# ISDB-T (terrestrial) frontends +# +CONFIG_DVB_S921=m +CONFIG_DVB_DIB8000=m +CONFIG_DVB_MB86A20S=m + +# +# ISDB-S (satellite) & ISDB-T (terrestrial) frontends +# +CONFIG_DVB_TC90522=m + +# +# Digital terrestrial only tuners/PLL +# +CONFIG_DVB_PLL=m +CONFIG_DVB_TUNER_DIB0070=m +CONFIG_DVB_TUNER_DIB0090=m + +# +# SEC control devices for DVB-S +# +CONFIG_DVB_DRX39XYJ=m +CONFIG_DVB_LNBH25=m +CONFIG_DVB_LNBP21=m +CONFIG_DVB_LNBP22=m +CONFIG_DVB_ISL6405=m +CONFIG_DVB_ISL6421=m +CONFIG_DVB_ISL6423=m +CONFIG_DVB_A8293=m +CONFIG_DVB_LGS8GXX=m +CONFIG_DVB_ATBM8830=m +CONFIG_DVB_TDA665x=m +CONFIG_DVB_IX2505V=m +CONFIG_DVB_M88RS2000=m +CONFIG_DVB_AF9033=m + +# +# Common Interface (EN50221) controller drivers +# +CONFIG_DVB_CXD2099=m + +# +# Tools to develop new frontends +# +CONFIG_DVB_DUMMY_FE=m + +# +# Graphics support +# +# CONFIG_AGP is not set +CONFIG_INTEL_GTT=m +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_VGA_SWITCHEROO=y +CONFIG_DRM=m +CONFIG_DRM_MIPI_DSI=y +CONFIG_DRM_DP_AUX_CHARDEV=y +# CONFIG_DRM_DEBUG_SELFTEST is not set +CONFIG_DRM_KMS_HELPER=m +CONFIG_DRM_KMS_FB_HELPER=y +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_DRM_DP_CEC=y +CONFIG_DRM_TTM=m +CONFIG_DRM_VM=y +CONFIG_DRM_SCHED=m + +# +# I2C encoder or helper chips +# +CONFIG_DRM_I2C_CH7006=m +CONFIG_DRM_I2C_SIL164=m +# CONFIG_DRM_I2C_NXP_TDA998X is not set +# CONFIG_DRM_I2C_NXP_TDA9950 is not set +CONFIG_DRM_RADEON=m +CONFIG_DRM_RADEON_USERPTR=y +CONFIG_DRM_AMDGPU=m +# CONFIG_DRM_AMDGPU_SI is not set +# CONFIG_DRM_AMDGPU_CIK is not set +# CONFIG_DRM_AMDGPU_USERPTR is not set +# CONFIG_DRM_AMDGPU_GART_DEBUGFS is not set + +# +# ACP (Audio CoProcessor) Configuration +# +CONFIG_DRM_AMD_ACP=y + +# +# Display Engine Configuration +# +CONFIG_DRM_AMD_DC=y +CONFIG_DRM_AMD_DC_DCN1_0=y +# CONFIG_DEBUG_KERNEL_DC is not set + +# +# AMD Library routines +# +CONFIG_CHASH=m +# CONFIG_CHASH_STATS is not set +# CONFIG_CHASH_SELFTEST is not set +CONFIG_DRM_NOUVEAU=m +CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT=y +CONFIG_NOUVEAU_DEBUG=5 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3 +# CONFIG_NOUVEAU_DEBUG_MMU is not set +CONFIG_DRM_NOUVEAU_BACKLIGHT=y +CONFIG_DRM_I915=m +# CONFIG_DRM_I915_ALPHA_SUPPORT is not set +CONFIG_DRM_I915_CAPTURE_ERROR=y +CONFIG_DRM_I915_COMPRESS_ERROR=y +CONFIG_DRM_I915_USERPTR=y +CONFIG_DRM_I915_GVT=y +CONFIG_DRM_I915_GVT_KVMGT=m +# CONFIG_DRM_VGEM is not set +CONFIG_DRM_VKMS=m +CONFIG_DRM_VMWGFX=m +CONFIG_DRM_VMWGFX_FBCON=y +CONFIG_DRM_GMA500=m +CONFIG_DRM_GMA600=y +CONFIG_DRM_GMA3600=y +CONFIG_DRM_UDL=m +CONFIG_DRM_AST=m +CONFIG_DRM_MGAG200=m +CONFIG_DRM_CIRRUS_QEMU=m +CONFIG_DRM_QXL=m +CONFIG_DRM_BOCHS=m +CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN is not set +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +CONFIG_HSA_AMD=m +# CONFIG_DRM_HISI_HIBMC is not set +# CONFIG_DRM_TINYDRM is not set +# CONFIG_DRM_XEN is not set +# CONFIG_DRM_LEGACY is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y + +# +# Frame buffer Devices +# +CONFIG_FB_CMDLINE=y +CONFIG_FB_NOTIFY=y +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_BOOT_VESA_SUPPORT=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=m +CONFIG_FB_SYS_COPYAREA=m +CONFIG_FB_SYS_IMAGEBLIT=m +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=m +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_BACKLIGHT=y +# CONFIG_FB_MODE_HELPERS is not set +CONFIG_FB_TILEBLITTING=y + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ARC is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_VGA16 is not set +# CONFIG_FB_UVESA is not set +CONFIG_FB_VESA=y +CONFIG_FB_EFI=y +# CONFIG_FB_N411 is not set +# CONFIG_FB_HGA is not set +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_LE80578 is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_VIA is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SM501 is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_XEN_FBDEV_FRONTEND is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +CONFIG_FB_HYPERV=m +# CONFIG_FB_SIMPLE is not set +# CONFIG_FB_SM712 is not set +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set +CONFIG_LCD_PLATFORM=m +# CONFIG_LCD_S6E63M0 is not set +# CONFIG_LCD_LD9040 is not set +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +# CONFIG_LCD_OTM3225A is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_GENERIC is not set +# CONFIG_BACKLIGHT_PWM is not set +CONFIG_BACKLIGHT_APPLE=m +# CONFIG_BACKLIGHT_PM8941_WLED is not set +# CONFIG_BACKLIGHT_SAHARA is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3630A is not set +# CONFIG_BACKLIGHT_LM3639 is not set +CONFIG_BACKLIGHT_LP855X=m +# CONFIG_BACKLIGHT_GPIO is not set +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +CONFIG_HDMI=y + +# +# Console display driver support +# +CONFIG_VGA_CONSOLE=y +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_LOGO_LINUX_CLUT224=y +CONFIG_SOUND=m +CONFIG_SOUND_OSS_CORE=y +CONFIG_SOUND_OSS_CORE_PRECLAIM=y +CONFIG_SND=m +CONFIG_SND_TIMER=m +CONFIG_SND_PCM=m +CONFIG_SND_PCM_ELD=y +CONFIG_SND_HWDEP=m +CONFIG_SND_SEQ_DEVICE=m +CONFIG_SND_RAWMIDI=m +CONFIG_SND_COMPRESS_OFFLOAD=m +CONFIG_SND_JACK=y +CONFIG_SND_JACK_INPUT_DEV=y +CONFIG_SND_OSSEMUL=y +# CONFIG_SND_MIXER_OSS is not set +# CONFIG_SND_PCM_OSS is not set +CONFIG_SND_PCM_TIMER=y +CONFIG_SND_HRTIMER=m +CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_MAX_CARDS=32 +# CONFIG_SND_SUPPORT_OLD_API is not set +CONFIG_SND_PROC_FS=y +CONFIG_SND_VERBOSE_PROCFS=y +# CONFIG_SND_VERBOSE_PRINTK is not set +# CONFIG_SND_DEBUG is not set +CONFIG_SND_VMASTER=y +CONFIG_SND_DMA_SGBUF=y +CONFIG_SND_SEQUENCER=m +CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_SEQUENCER_OSS=m +CONFIG_SND_SEQ_HRTIMER_DEFAULT=y +CONFIG_SND_SEQ_MIDI_EVENT=m +CONFIG_SND_SEQ_MIDI=m +CONFIG_SND_SEQ_MIDI_EMUL=m +CONFIG_SND_SEQ_VIRMIDI=m +CONFIG_SND_MPU401_UART=m +CONFIG_SND_OPL3_LIB=m +CONFIG_SND_OPL3_LIB_SEQ=m +CONFIG_SND_VX_LIB=m +CONFIG_SND_AC97_CODEC=m +CONFIG_SND_DRIVERS=y +CONFIG_SND_PCSP=m +CONFIG_SND_DUMMY=m +CONFIG_SND_ALOOP=m +CONFIG_SND_VIRMIDI=m +CONFIG_SND_MTPAV=m +# CONFIG_SND_MTS64 is not set +# CONFIG_SND_SERIAL_U16550 is not set +CONFIG_SND_MPU401=m +# CONFIG_SND_PORTMAN2X4 is not set +CONFIG_SND_AC97_POWER_SAVE=y +CONFIG_SND_AC97_POWER_SAVE_DEFAULT=5 +CONFIG_SND_PCI=y +CONFIG_SND_AD1889=m +# CONFIG_SND_ALS300 is not set +# CONFIG_SND_ALS4000 is not set +CONFIG_SND_ALI5451=m +CONFIG_SND_ASIHPI=m +CONFIG_SND_ATIIXP=m +CONFIG_SND_ATIIXP_MODEM=m +CONFIG_SND_AU8810=m +CONFIG_SND_AU8820=m +CONFIG_SND_AU8830=m +# CONFIG_SND_AW2 is not set +# CONFIG_SND_AZT3328 is not set +CONFIG_SND_BT87X=m +# CONFIG_SND_BT87X_OVERCLOCK is not set +CONFIG_SND_CA0106=m +CONFIG_SND_CMIPCI=m +CONFIG_SND_OXYGEN_LIB=m +CONFIG_SND_OXYGEN=m +# CONFIG_SND_CS4281 is not set +CONFIG_SND_CS46XX=m +CONFIG_SND_CS46XX_NEW_DSP=y +CONFIG_SND_CTXFI=m +CONFIG_SND_DARLA20=m +CONFIG_SND_GINA20=m +CONFIG_SND_LAYLA20=m +CONFIG_SND_DARLA24=m +CONFIG_SND_GINA24=m +CONFIG_SND_LAYLA24=m +CONFIG_SND_MONA=m +CONFIG_SND_MIA=m +CONFIG_SND_ECHO3G=m +CONFIG_SND_INDIGO=m +CONFIG_SND_INDIGOIO=m +CONFIG_SND_INDIGODJ=m +CONFIG_SND_INDIGOIOX=m +CONFIG_SND_INDIGODJX=m +CONFIG_SND_EMU10K1=m +CONFIG_SND_EMU10K1_SEQ=m +CONFIG_SND_EMU10K1X=m +CONFIG_SND_ENS1370=m +CONFIG_SND_ENS1371=m +# CONFIG_SND_ES1938 is not set +CONFIG_SND_ES1968=m +CONFIG_SND_ES1968_INPUT=y +CONFIG_SND_ES1968_RADIO=y +# CONFIG_SND_FM801 is not set +CONFIG_SND_HDSP=m +CONFIG_SND_HDSPM=m +CONFIG_SND_ICE1712=m +CONFIG_SND_ICE1724=m +CONFIG_SND_INTEL8X0=m +CONFIG_SND_INTEL8X0M=m +CONFIG_SND_KORG1212=m +CONFIG_SND_LOLA=m +CONFIG_SND_LX6464ES=m +CONFIG_SND_MAESTRO3=m +CONFIG_SND_MAESTRO3_INPUT=y +CONFIG_SND_MIXART=m +# CONFIG_SND_NM256 is not set +CONFIG_SND_PCXHR=m +# CONFIG_SND_RIPTIDE is not set +CONFIG_SND_RME32=m +CONFIG_SND_RME96=m +CONFIG_SND_RME9652=m +# CONFIG_SND_SONICVIBES is not set +CONFIG_SND_TRIDENT=m +CONFIG_SND_VIA82XX=m +CONFIG_SND_VIA82XX_MODEM=m +CONFIG_SND_VIRTUOSO=m +CONFIG_SND_VX222=m +# CONFIG_SND_YMFPCI is not set + +# +# HD-Audio +# +CONFIG_SND_HDA=m +CONFIG_SND_HDA_INTEL=m +# CONFIG_SND_HDA_PHYTIUM is not set +CONFIG_SND_HDA_HWDEP=y +CONFIG_SND_HDA_RECONFIG=y +CONFIG_SND_HDA_INPUT_BEEP=y +CONFIG_SND_HDA_INPUT_BEEP_MODE=0 +CONFIG_SND_HDA_PATCH_LOADER=y +CONFIG_SND_HDA_CODEC_REALTEK=m +CONFIG_SND_HDA_CODEC_ANALOG=m +CONFIG_SND_HDA_CODEC_SIGMATEL=m +CONFIG_SND_HDA_CODEC_VIA=m +CONFIG_SND_HDA_CODEC_HDMI=m +CONFIG_SND_HDA_CODEC_CIRRUS=m +CONFIG_SND_HDA_CODEC_CONEXANT=m +CONFIG_SND_HDA_CODEC_CA0110=m +CONFIG_SND_HDA_CODEC_CA0132=m +CONFIG_SND_HDA_CODEC_CA0132_DSP=y +CONFIG_SND_HDA_CODEC_CMEDIA=m +CONFIG_SND_HDA_CODEC_SI3054=m +CONFIG_SND_HDA_GENERIC=m +CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0 +CONFIG_SND_HDA_CORE=m +CONFIG_SND_HDA_DSP_LOADER=y +CONFIG_SND_HDA_COMPONENT=y +CONFIG_SND_HDA_I915=y +CONFIG_SND_HDA_EXT_CORE=m +CONFIG_SND_HDA_PREALLOC_SIZE=512 +# CONFIG_SND_SPI is not set +CONFIG_SND_USB=y +CONFIG_SND_USB_AUDIO=m +CONFIG_SND_USB_UA101=m +CONFIG_SND_USB_USX2Y=m +CONFIG_SND_USB_CAIAQ=m +CONFIG_SND_USB_CAIAQ_INPUT=y +CONFIG_SND_USB_US122L=m +CONFIG_SND_USB_6FIRE=m +CONFIG_SND_USB_HIFACE=m +CONFIG_SND_BCD2000=m +CONFIG_SND_USB_LINE6=m +CONFIG_SND_USB_POD=m +CONFIG_SND_USB_PODHD=m +CONFIG_SND_USB_TONEPORT=m +CONFIG_SND_USB_VARIAX=m +CONFIG_SND_FIREWIRE=y +CONFIG_SND_FIREWIRE_LIB=m +CONFIG_SND_DICE=m +CONFIG_SND_OXFW=m +CONFIG_SND_ISIGHT=m +CONFIG_SND_FIREWORKS=m +CONFIG_SND_BEBOB=m +CONFIG_SND_FIREWIRE_DIGI00X=m +CONFIG_SND_FIREWIRE_TASCAM=m +CONFIG_SND_FIREWIRE_MOTU=m +CONFIG_SND_FIREFACE=m +CONFIG_SND_SOC=m +CONFIG_SND_SOC_COMPRESS=y +CONFIG_SND_SOC_TOPOLOGY=y +CONFIG_SND_SOC_ACPI=m +# CONFIG_SND_SOC_AMD_ACP is not set +# CONFIG_SND_ATMEL_SOC is not set +# CONFIG_SND_DESIGNWARE_I2S is not set + +# +# SoC Audio for Freescale CPUs +# + +# +# Common SoC Audio options for Freescale CPUs: +# +# CONFIG_SND_SOC_FSL_ASRC is not set +# CONFIG_SND_SOC_FSL_SAI is not set +# CONFIG_SND_SOC_FSL_SSI is not set +# CONFIG_SND_SOC_FSL_SPDIF is not set +# CONFIG_SND_SOC_FSL_ESAI is not set +# CONFIG_SND_SOC_IMX_AUDMUX is not set +# CONFIG_SND_I2S_HI6210_I2S is not set +# CONFIG_SND_SOC_IMG is not set +CONFIG_SND_SOC_INTEL_SST_TOPLEVEL=y +CONFIG_SND_SST_IPC=m +CONFIG_SND_SST_IPC_ACPI=m +CONFIG_SND_SOC_INTEL_SST_ACPI=m +CONFIG_SND_SOC_INTEL_SST=m +CONFIG_SND_SOC_INTEL_SST_FIRMWARE=m +CONFIG_SND_SOC_INTEL_HASWELL=m +CONFIG_SND_SST_ATOM_HIFI2_PLATFORM=m +# CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_PCI is not set +CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_ACPI=m +CONFIG_SND_SOC_INTEL_SKYLAKE_SSP_CLK=m +CONFIG_SND_SOC_INTEL_SKYLAKE=m +CONFIG_SND_SOC_ACPI_INTEL_MATCH=m +CONFIG_SND_SOC_INTEL_MACH=y +# CONFIG_SND_SOC_INTEL_HASWELL_MACH is not set +# CONFIG_SND_SOC_INTEL_BDW_RT5677_MACH is not set +# CONFIG_SND_SOC_INTEL_BROADWELL_MACH is not set +CONFIG_SND_SOC_INTEL_BYTCR_RT5640_MACH=m +CONFIG_SND_SOC_INTEL_BYTCR_RT5651_MACH=m +CONFIG_SND_SOC_INTEL_CHT_BSW_RT5672_MACH=m +CONFIG_SND_SOC_INTEL_CHT_BSW_RT5645_MACH=m +CONFIG_SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH=m +CONFIG_SND_SOC_INTEL_CHT_BSW_NAU8824_MACH=m +CONFIG_SND_SOC_INTEL_BYT_CHT_DA7213_MACH=m +CONFIG_SND_SOC_INTEL_BYT_CHT_ES8316_MACH=m +CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH=m +CONFIG_SND_SOC_INTEL_SKL_RT286_MACH=m +CONFIG_SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH=m +CONFIG_SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH=m +CONFIG_SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH=m +CONFIG_SND_SOC_INTEL_BXT_RT298_MACH=m +CONFIG_SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH=m +CONFIG_SND_SOC_INTEL_KBL_RT5663_RT5514_MAX98927_MACH=m +CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98357A_MACH=m +# CONFIG_SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH is not set + +# +# STMicroelectronics STM32 SOC audio support +# +# CONFIG_SND_SOC_XTFPGA_I2S is not set +# CONFIG_ZX_TDM is not set +CONFIG_SND_SOC_I2C_AND_SPI=m + +# +# CODEC drivers +# +# CONFIG_SND_SOC_AC97_CODEC is not set +# CONFIG_SND_SOC_ADAU1701 is not set +# CONFIG_SND_SOC_ADAU1761_I2C is not set +# CONFIG_SND_SOC_ADAU1761_SPI is not set +# CONFIG_SND_SOC_ADAU7002 is not set +# CONFIG_SND_SOC_AK4104 is not set +# CONFIG_SND_SOC_AK4458 is not set +# CONFIG_SND_SOC_AK4554 is not set +# CONFIG_SND_SOC_AK4613 is not set +# CONFIG_SND_SOC_AK4642 is not set +# CONFIG_SND_SOC_AK5386 is not set +# CONFIG_SND_SOC_AK5558 is not set +# CONFIG_SND_SOC_ALC5623 is not set +# CONFIG_SND_SOC_BD28623 is not set +# CONFIG_SND_SOC_BT_SCO is not set +# CONFIG_SND_SOC_CS35L32 is not set +# CONFIG_SND_SOC_CS35L33 is not set +# CONFIG_SND_SOC_CS35L34 is not set +# CONFIG_SND_SOC_CS35L35 is not set +# CONFIG_SND_SOC_CS42L42 is not set +# CONFIG_SND_SOC_CS42L51_I2C is not set +# CONFIG_SND_SOC_CS42L52 is not set +# CONFIG_SND_SOC_CS42L56 is not set +# CONFIG_SND_SOC_CS42L73 is not set +# CONFIG_SND_SOC_CS4265 is not set +# CONFIG_SND_SOC_CS4270 is not set +# CONFIG_SND_SOC_CS4271_I2C is not set +# CONFIG_SND_SOC_CS4271_SPI is not set +# CONFIG_SND_SOC_CS42XX8_I2C is not set +# CONFIG_SND_SOC_CS43130 is not set +# CONFIG_SND_SOC_CS4349 is not set +# CONFIG_SND_SOC_CS53L30 is not set +CONFIG_SND_SOC_DA7213=m +CONFIG_SND_SOC_DA7219=m +CONFIG_SND_SOC_DMIC=m +# CONFIG_SND_SOC_ES7134 is not set +# CONFIG_SND_SOC_ES7241 is not set +CONFIG_SND_SOC_ES8316=m +# CONFIG_SND_SOC_ES8328_I2C is not set +# CONFIG_SND_SOC_ES8328_SPI is not set +# CONFIG_SND_SOC_GTM601 is not set +CONFIG_SND_SOC_HDAC_HDMI=m +# CONFIG_SND_SOC_INNO_RK3036 is not set +CONFIG_SND_SOC_MAX98090=m +CONFIG_SND_SOC_MAX98357A=m +# CONFIG_SND_SOC_MAX98504 is not set +# CONFIG_SND_SOC_MAX9867 is not set +CONFIG_SND_SOC_MAX98927=m +# CONFIG_SND_SOC_MAX98373 is not set +# CONFIG_SND_SOC_MAX9860 is not set +# CONFIG_SND_SOC_MSM8916_WCD_DIGITAL is not set +# CONFIG_SND_SOC_PCM1681 is not set +# CONFIG_SND_SOC_PCM1789_I2C is not set +# CONFIG_SND_SOC_PCM179X_I2C is not set +# CONFIG_SND_SOC_PCM179X_SPI is not set +# CONFIG_SND_SOC_PCM186X_I2C is not set +# CONFIG_SND_SOC_PCM186X_SPI is not set +# CONFIG_SND_SOC_PCM3168A_I2C is not set +# CONFIG_SND_SOC_PCM3168A_SPI is not set +# CONFIG_SND_SOC_PCM512x_I2C is not set +# CONFIG_SND_SOC_PCM512x_SPI is not set +CONFIG_SND_SOC_RL6231=m +CONFIG_SND_SOC_RL6347A=m +CONFIG_SND_SOC_RT286=m +CONFIG_SND_SOC_RT298=m +CONFIG_SND_SOC_RT5514=m +CONFIG_SND_SOC_RT5514_SPI=m +# CONFIG_SND_SOC_RT5616 is not set +# CONFIG_SND_SOC_RT5631 is not set +CONFIG_SND_SOC_RT5640=m +CONFIG_SND_SOC_RT5645=m +CONFIG_SND_SOC_RT5651=m +CONFIG_SND_SOC_RT5663=m +CONFIG_SND_SOC_RT5670=m +# CONFIG_SND_SOC_SGTL5000 is not set +# CONFIG_SND_SOC_SIMPLE_AMPLIFIER is not set +# CONFIG_SND_SOC_SIRF_AUDIO_CODEC is not set +# CONFIG_SND_SOC_SPDIF is not set +# CONFIG_SND_SOC_SSM2305 is not set +# CONFIG_SND_SOC_SSM2602_SPI is not set +# CONFIG_SND_SOC_SSM2602_I2C is not set +CONFIG_SND_SOC_SSM4567=m +# CONFIG_SND_SOC_STA32X is not set +# CONFIG_SND_SOC_STA350 is not set +# CONFIG_SND_SOC_STI_SAS is not set +# CONFIG_SND_SOC_TAS2552 is not set +# CONFIG_SND_SOC_TAS5086 is not set +# CONFIG_SND_SOC_TAS571X is not set +# CONFIG_SND_SOC_TAS5720 is not set +# CONFIG_SND_SOC_TAS6424 is not set +# CONFIG_SND_SOC_TDA7419 is not set +# CONFIG_SND_SOC_TFA9879 is not set +# CONFIG_SND_SOC_TLV320AIC23_I2C is not set +# CONFIG_SND_SOC_TLV320AIC23_SPI is not set +# CONFIG_SND_SOC_TLV320AIC31XX is not set +# CONFIG_SND_SOC_TLV320AIC32X4_I2C is not set +# CONFIG_SND_SOC_TLV320AIC32X4_SPI is not set +# CONFIG_SND_SOC_TLV320AIC3X is not set +CONFIG_SND_SOC_TS3A227E=m +# CONFIG_SND_SOC_TSCS42XX is not set +# CONFIG_SND_SOC_TSCS454 is not set +# CONFIG_SND_SOC_WM8510 is not set +# CONFIG_SND_SOC_WM8523 is not set +# CONFIG_SND_SOC_WM8524 is not set +# CONFIG_SND_SOC_WM8580 is not set +# CONFIG_SND_SOC_WM8711 is not set +# CONFIG_SND_SOC_WM8728 is not set +# CONFIG_SND_SOC_WM8731 is not set +# CONFIG_SND_SOC_WM8737 is not set +# CONFIG_SND_SOC_WM8741 is not set +# CONFIG_SND_SOC_WM8750 is not set +# CONFIG_SND_SOC_WM8753 is not set +# CONFIG_SND_SOC_WM8770 is not set +# CONFIG_SND_SOC_WM8776 is not set +# CONFIG_SND_SOC_WM8782 is not set +# CONFIG_SND_SOC_WM8804_I2C is not set +# CONFIG_SND_SOC_WM8804_SPI is not set +# CONFIG_SND_SOC_WM8903 is not set +# CONFIG_SND_SOC_WM8960 is not set +# CONFIG_SND_SOC_WM8962 is not set +# CONFIG_SND_SOC_WM8974 is not set +# CONFIG_SND_SOC_WM8978 is not set +# CONFIG_SND_SOC_WM8985 is not set +# CONFIG_SND_SOC_ZX_AUD96P22 is not set +# CONFIG_SND_SOC_MAX9759 is not set +# CONFIG_SND_SOC_MT6351 is not set +# CONFIG_SND_SOC_NAU8540 is not set +# CONFIG_SND_SOC_NAU8810 is not set +CONFIG_SND_SOC_NAU8824=m +CONFIG_SND_SOC_NAU8825=m +# CONFIG_SND_SOC_TPA6130A2 is not set +# CONFIG_SND_SIMPLE_CARD is not set +CONFIG_SND_X86=y +CONFIG_HDMI_LPE_AUDIO=m +CONFIG_SND_SYNTH_EMUX=m +CONFIG_SND_XEN_FRONTEND=m +CONFIG_AC97_BUS=m + +# +# HID support +# +CONFIG_HID=y +CONFIG_HID_BATTERY_STRENGTH=y +CONFIG_HIDRAW=y +CONFIG_UHID=m +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=m +# CONFIG_HID_ACCUTOUCH is not set +CONFIG_HID_ACRUX=m +# CONFIG_HID_ACRUX_FF is not set +CONFIG_HID_APPLE=m +CONFIG_HID_APPLEIR=m +CONFIG_HID_ASUS=m +CONFIG_HID_AUREAL=m +CONFIG_HID_BELKIN=m +CONFIG_HID_BETOP_FF=m +CONFIG_HID_CHERRY=m +CONFIG_HID_CHICONY=m +CONFIG_HID_CORSAIR=m +# CONFIG_HID_COUGAR is not set +CONFIG_HID_PRODIKEYS=m +CONFIG_HID_CMEDIA=m +# CONFIG_HID_CP2112 is not set +CONFIG_HID_CYPRESS=m +CONFIG_HID_DRAGONRISE=m +# CONFIG_DRAGONRISE_FF is not set +# CONFIG_HID_EMS_FF is not set +CONFIG_HID_ELAN=m +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +CONFIG_HID_EZKEY=m +CONFIG_HID_GEMBIRD=m +CONFIG_HID_GFRM=m +CONFIG_HID_HOLTEK=m +# CONFIG_HOLTEK_FF is not set +# CONFIG_HID_GOOGLE_HAMMER is not set +CONFIG_HID_GT683R=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m +CONFIG_HID_GYRATION=m +CONFIG_HID_ICADE=m +CONFIG_HID_ITE=m +CONFIG_HID_JABRA=m +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=m +CONFIG_HID_LCPOWER=m +CONFIG_HID_LED=m +CONFIG_HID_LENOVO=m +CONFIG_HID_LOGITECH=m +CONFIG_HID_LOGITECH_DJ=m +CONFIG_HID_LOGITECH_HIDPP=m +# CONFIG_LOGITECH_FF is not set +# CONFIG_LOGIRUMBLEPAD2_FF is not set +# CONFIG_LOGIG940_FF is not set +# CONFIG_LOGIWHEELS_FF is not set +CONFIG_HID_MAGICMOUSE=y +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_REDRAGON is not set +CONFIG_HID_MICROSOFT=m +CONFIG_HID_MONTEREY=m +CONFIG_HID_MULTITOUCH=m +CONFIG_HID_NTI=m +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +# CONFIG_PANTHERLORD_FF is not set +CONFIG_HID_PENMOUNT=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +CONFIG_HID_PICOLCD_FB=y +CONFIG_HID_PICOLCD_BACKLIGHT=y +CONFIG_HID_PICOLCD_LCD=y +CONFIG_HID_PICOLCD_LEDS=y +CONFIG_HID_PICOLCD_CIR=y +CONFIG_HID_PLANTRONICS=m +CONFIG_HID_PRIMAX=m +# CONFIG_HID_RETRODE is not set +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=m +CONFIG_HID_SONY=m +CONFIG_SONY_FF=y +CONFIG_HID_SPEEDLINK=m +# CONFIG_HID_STEAM is not set +CONFIG_HID_STEELSERIES=m +CONFIG_HID_SUNPLUS=m +CONFIG_HID_RMI=m +CONFIG_HID_GREENASIA=m +# CONFIG_GREENASIA_FF is not set +CONFIG_HID_HYPERV_MOUSE=m +CONFIG_HID_SMARTJOYPLUS=m +# CONFIG_SMARTJOYPLUS_FF is not set +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +# CONFIG_THRUSTMASTER_FF is not set +# CONFIG_HID_UDRAW_PS3 is not set +CONFIG_HID_WACOM=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +# CONFIG_ZEROPLUS_FF is not set +CONFIG_HID_ZYDACRON=m +CONFIG_HID_SENSOR_HUB=y +CONFIG_HID_SENSOR_CUSTOM_SENSOR=m +CONFIG_HID_ALPS=m + +# +# USB HID support +# +CONFIG_USB_HID=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y + +# +# I2C HID support +# +CONFIG_I2C_HID=m + +# +# Intel ISH HID support +# +CONFIG_INTEL_ISH_HID=m +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_WHITELIST is not set +CONFIG_USB_LEDS_TRIGGER_USBPORT=m +CONFIG_USB_MON=y +CONFIG_USB_WUSB=m +CONFIG_USB_WUSB_CBAF=m +# CONFIG_USB_WUSB_CBAF_DEBUG is not set + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_XHCI_DBGCAP=y +CONFIG_USB_XHCI_PCI=y +# CONFIG_USB_XHCI_PLATFORM is not set +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +# CONFIG_USB_EHCI_HCD_PLATFORM is not set +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_FOTG210_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y +# CONFIG_USB_OHCI_HCD_PLATFORM is not set +CONFIG_USB_UHCI_HCD=y +# CONFIG_USB_U132_HCD is not set +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_WHCI_HCD is not set +CONFIG_USB_HWA_HCD=m +# CONFIG_USB_HCD_BCMA is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m +CONFIG_USB_WDM=m +CONFIG_USB_TMC=m + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_DEBUG is not set +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_REALTEK_AUTOPM=y +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_UAS=m + +# +# USB Imaging devices +# +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m +# CONFIG_USBIP_CORE is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +# CONFIG_USB_DWC2 is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +CONFIG_USB_USS720=m +CONFIG_USB_SERIAL=y +CONFIG_USB_SERIAL_CONSOLE=y +CONFIG_USB_SERIAL_GENERIC=y +# CONFIG_USB_SERIAL_SIMPLE is not set +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +# CONFIG_USB_SERIAL_F81232 is not set +CONFIG_USB_SERIAL_F8153X=m +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +# CONFIG_USB_SERIAL_METRO is not set +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7715_PARPORT=y +CONFIG_USB_SERIAL_MOS7840=m +CONFIG_USB_SERIAL_MXUPORT=m +CONFIG_USB_SERIAL_NAVMAN=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SAFE_PADDED=y +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_XIRCOM=m +CONFIG_USB_SERIAL_WWAN=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +# CONFIG_USB_SERIAL_WISHBONE is not set +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m +CONFIG_USB_SERIAL_UPD78F0730=m +CONFIG_USB_SERIAL_DEBUG=m + +# +# USB Miscellaneous drivers +# +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +CONFIG_USB_IDMOUSE=m +CONFIG_USB_FTDI_ELAN=m +CONFIG_USB_APPLEDISPLAY=m +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_SISUSBVGA_CON=y +CONFIG_USB_LD=m +# CONFIG_USB_TRANCEVIBRATOR is not set +CONFIG_USB_IOWARRIOR=m +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +CONFIG_USB_ISIGHTFW=m +# CONFIG_USB_YUREX is not set +CONFIG_USB_EZUSB_FX2=m +# CONFIG_USB_HUB_USB251XB is not set +CONFIG_USB_HSIC_USB3503=m +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +# CONFIG_USB_CHAOSKEY is not set +CONFIG_USB_ATM=m +CONFIG_USB_SPEEDTOUCH=m +CONFIG_USB_CXACRU=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_XUSBATM=m + +# +# USB Physical Layer drivers +# +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ISP1301 is not set +# CONFIG_USB_GADGET is not set +CONFIG_TYPEC=y +CONFIG_TYPEC_TCPM=y +CONFIG_TYPEC_TCPCI=y +CONFIG_TYPEC_RT1711H=y +CONFIG_TYPEC_FUSB302=m +CONFIG_TYPEC_UCSI=y +CONFIG_UCSI_ACPI=y +CONFIG_TYPEC_TPS6598X=m + +# +# USB Type-C Multiplexer/DeMultiplexer Switch support +# +CONFIG_TYPEC_MUX_PI3USB30532=m + +# +# USB Type-C Alternate Mode drivers +# +CONFIG_TYPEC_DP_ALTMODE=y +CONFIG_USB_ROLE_SWITCH=y +CONFIG_USB_ROLES_INTEL_XHCI=y +CONFIG_USB_LED_TRIG=y +# CONFIG_USB_ULPI_BUS is not set +CONFIG_UWB=m +CONFIG_UWB_HWA=m +CONFIG_UWB_WHCI=m +CONFIG_UWB_I1480U=m +CONFIG_MMC=m +CONFIG_MMC_BLOCK=m +CONFIG_MMC_BLOCK_MINORS=8 +CONFIG_SDIO_UART=m +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_RICOH_MMC=y +CONFIG_MMC_SDHCI_ACPI=m +CONFIG_MMC_SDHCI_PLTFM=m +# CONFIG_MMC_SDHCI_F_SDH30 is not set +# CONFIG_MMC_WBSD is not set +CONFIG_MMC_TIFM_SD=m +# CONFIG_MMC_SPI is not set +CONFIG_MMC_CB710=m +CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_VUB300=m +CONFIG_MMC_USHC=m +# CONFIG_MMC_USDHI6ROL0 is not set +CONFIG_MMC_REALTEK_PCI=m +CONFIG_MMC_REALTEK_USB=m +CONFIG_MMC_CQHCI=m +# CONFIG_MMC_TOSHIBA_PCI is not set +# CONFIG_MMC_MTK is not set +# CONFIG_MMC_SDHCI_XENON is not set +CONFIG_MEMSTICK=m +# CONFIG_MEMSTICK_DEBUG is not set + +# +# MemoryStick drivers +# +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set +CONFIG_MSPRO_BLOCK=m +# CONFIG_MS_BLOCK is not set + +# +# MemoryStick Host Controller Drivers +# +CONFIG_MEMSTICK_TIFM_MS=m +CONFIG_MEMSTICK_JMICRON_38X=m +CONFIG_MEMSTICK_R592=m +CONFIG_MEMSTICK_REALTEK_PCI=m +CONFIG_MEMSTICK_REALTEK_USB=m +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +# CONFIG_LEDS_CLASS_FLASH is not set +# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set + +# +# LED drivers +# +# CONFIG_LEDS_APU is not set +CONFIG_LEDS_LM3530=m +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_GPIO is not set +CONFIG_LEDS_LP3944=m +# CONFIG_LEDS_LP3952 is not set +CONFIG_LEDS_LP55XX_COMMON=m +CONFIG_LEDS_LP5521=m +CONFIG_LEDS_LP5523=m +CONFIG_LEDS_LP5562=m +# CONFIG_LEDS_LP8501 is not set +CONFIG_LEDS_CLEVO_MAIL=m +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_PWM is not set +# CONFIG_LEDS_BD2802 is not set +CONFIG_LEDS_INTEL_SS4200=m +CONFIG_LEDS_LT3593=m +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_TLC591XX is not set +# CONFIG_LEDS_LM355x is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# +CONFIG_LEDS_BLINKM=m +CONFIG_LEDS_MLXCPLD=m +# CONFIG_LEDS_MLXREG is not set +# CONFIG_LEDS_USER is not set +# CONFIG_LEDS_NIC78BX is not set + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_ONESHOT=m +CONFIG_LEDS_TRIGGER_DISK=y +# CONFIG_LEDS_TRIGGER_MTD is not set +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +# CONFIG_LEDS_TRIGGER_CPU is not set +# CONFIG_LEDS_TRIGGER_ACTIVITY is not set +CONFIG_LEDS_TRIGGER_GPIO=m +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m + +# +# iptables trigger is under Netfilter config (LED target) +# +CONFIG_LEDS_TRIGGER_TRANSIENT=m +CONFIG_LEDS_TRIGGER_CAMERA=m +# CONFIG_LEDS_TRIGGER_PANIC is not set +# CONFIG_LEDS_TRIGGER_NETDEV is not set +# CONFIG_ACCESSIBILITY is not set +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +# CONFIG_INFINIBAND_EXP_LEGACY_VERBS_NEW_UAPI is not set +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +# CONFIG_INFINIBAND_MTHCA is not set +# CONFIG_INFINIBAND_QIB is not set +CONFIG_INFINIBAND_CXGB4=m +CONFIG_INFINIBAND_I40IW=m +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +# CONFIG_INFINIBAND_NES is not set +# CONFIG_INFINIBAND_OCRDMA is not set +CONFIG_INFINIBAND_VMWARE_PVRDMA=m +CONFIG_INFINIBAND_USNIC=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +CONFIG_INFINIBAND_OPA_VNIC=m +CONFIG_INFINIBAND_RDMAVT=m +CONFIG_RDMA_RXE=m +CONFIG_INFINIBAND_HFI1=m +# CONFIG_HFI1_DEBUG_SDMA_ORDER is not set +# CONFIG_SDMA_VERBOSITY is not set +CONFIG_INFINIBAND_QEDR=m +CONFIG_INFINIBAND_BNXT_RE=m +CONFIG_EDAC_ATOMIC_SCRUB=y +CONFIG_EDAC_SUPPORT=y +CONFIG_EDAC=y +CONFIG_EDAC_LEGACY_SYSFS=y +# CONFIG_EDAC_DEBUG is not set +CONFIG_EDAC_DECODE_MCE=m +CONFIG_EDAC_GHES=y +CONFIG_EDAC_AMD64=m +# CONFIG_EDAC_AMD64_ERROR_INJECTION is not set +CONFIG_EDAC_E752X=m +CONFIG_EDAC_I82975X=m +CONFIG_EDAC_I3000=m +CONFIG_EDAC_I3200=m +CONFIG_EDAC_IE31200=m +CONFIG_EDAC_X38=m +CONFIG_EDAC_I5400=m +CONFIG_EDAC_I7CORE=m +CONFIG_EDAC_I5000=m +CONFIG_EDAC_I5100=m +CONFIG_EDAC_I7300=m +CONFIG_EDAC_SBRIDGE=m +CONFIG_EDAC_SKX=m +CONFIG_EDAC_I10NM=m +CONFIG_EDAC_PND2=m +CONFIG_RTC_LIB=y +CONFIG_RTC_MC146818_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_SYSTOHC is not set +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABX80X is not set +CONFIG_RTC_DRV_DS1307=m +# CONFIG_RTC_DRV_DS1307_CENTURY is not set +CONFIG_RTC_DRV_DS1374=m +# CONFIG_RTC_DRV_DS1374_WDT is not set +CONFIG_RTC_DRV_DS1672=m +CONFIG_RTC_DRV_MAX6900=m +CONFIG_RTC_DRV_RS5C372=m +CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_ISL12022=m +CONFIG_RTC_DRV_X1205=m +CONFIG_RTC_DRV_PCF8523=m +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF85363 is not set +CONFIG_RTC_DRV_PCF8563=m +CONFIG_RTC_DRV_PCF8583=m +CONFIG_RTC_DRV_M41T80=m +CONFIG_RTC_DRV_M41T80_WDT=y +CONFIG_RTC_DRV_BQ32K=m +# CONFIG_RTC_DRV_S35390A is not set +CONFIG_RTC_DRV_FM3130=m +# CONFIG_RTC_DRV_RX8010 is not set +CONFIG_RTC_DRV_RX8581=m +CONFIG_RTC_DRV_RX8025=m +CONFIG_RTC_DRV_EM3027=m +# CONFIG_RTC_DRV_RV8803 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T93 is not set +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1302 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1343 is not set +# CONFIG_RTC_DRV_DS1347 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6916 is not set +# CONFIG_RTC_DRV_R9701 is not set +CONFIG_RTC_DRV_RX4581=m +# CONFIG_RTC_DRV_RX6110 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_PCF2123 is not set +# CONFIG_RTC_DRV_MCP795 is not set +CONFIG_RTC_I2C_AND_SPI=y + +# +# SPI and I2C RTC drivers +# +CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_DS3232_HWMON=y +# CONFIG_RTC_DRV_PCF2127 is not set +CONFIG_RTC_DRV_RV3029C2=m +# CONFIG_RTC_DRV_RV3029_HWMON is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_CMOS=y +CONFIG_RTC_DRV_DS1286=m +CONFIG_RTC_DRV_DS1511=m +CONFIG_RTC_DRV_DS1553=m +# CONFIG_RTC_DRV_DS1685_FAMILY is not set +CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_DS2404=m +CONFIG_RTC_DRV_STK17TA8=m +# CONFIG_RTC_DRV_M48T86 is not set +CONFIG_RTC_DRV_M48T35=m +CONFIG_RTC_DRV_M48T59=m +CONFIG_RTC_DRV_MSM6242=m +CONFIG_RTC_DRV_BQ4802=m +CONFIG_RTC_DRV_RP5C01=m +CONFIG_RTC_DRV_V3020=m + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_FTRTC010 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_VIRTUAL_CHANNELS=y +CONFIG_DMA_ACPI=y +# CONFIG_ALTERA_MSGDMA is not set +CONFIG_INTEL_IDMA64=m +CONFIG_INTEL_IOATDMA=m +# CONFIG_QCOM_HIDMA_MGMT is not set +# CONFIG_QCOM_HIDMA is not set +CONFIG_DW_DMAC_CORE=y +CONFIG_DW_DMAC=m +CONFIG_DW_DMAC_PCI=y +CONFIG_HSU_DMA=y + +# +# DMA Clients +# +CONFIG_ASYNC_TX_DMA=y +# CONFIG_DMATEST is not set +CONFIG_DMA_ENGINE_RAID=y + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +CONFIG_DCA=m +# CONFIG_AUXDISPLAY is not set +# CONFIG_PANEL is not set +CONFIG_UIO=m +CONFIG_UIO_CIF=m +CONFIG_UIO_PDRV_GENIRQ=m +# CONFIG_UIO_DMEM_GENIRQ is not set +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=m +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +CONFIG_UIO_HV_GENERIC=m +CONFIG_VFIO_IOMMU_TYPE1=m +CONFIG_VFIO_VIRQFD=m +CONFIG_VFIO=m +CONFIG_VFIO_NOIOMMU=y +CONFIG_VFIO_PCI=m +# CONFIG_VFIO_PCI_VGA is not set +CONFIG_VFIO_PCI_MMAP=y +CONFIG_VFIO_PCI_INTX=y +# CONFIG_VFIO_PCI_IGD is not set +CONFIG_VFIO_MDEV=m +CONFIG_VFIO_MDEV_DEVICE=m +CONFIG_IRQ_BYPASS_MANAGER=m +# CONFIG_VIRT_DRIVERS is not set +CONFIG_VIRTIO=y +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_INPUT=m +# CONFIG_VIRTIO_MMIO is not set + +# +# Microsoft Hyper-V guest support +# +CONFIG_HYPERV=m +CONFIG_HYPERV_TSCPAGE=y +CONFIG_HYPERV_UTILS=m +CONFIG_HYPERV_BALLOON=m + +# +# Xen driver support +# +CONFIG_XEN_BALLOON=y +# CONFIG_XEN_SELFBALLOONING is not set +# CONFIG_XEN_BALLOON_MEMORY_HOTPLUG is not set +CONFIG_XEN_SCRUB_PAGES_DEFAULT=y +CONFIG_XEN_DEV_EVTCHN=m +CONFIG_XENFS=m +CONFIG_XEN_COMPAT_XENFS=y +CONFIG_XEN_SYS_HYPERVISOR=y +CONFIG_XEN_XENBUS_FRONTEND=y +# CONFIG_XEN_GNTDEV is not set +# CONFIG_XEN_GRANT_DEV_ALLOC is not set +# CONFIG_XEN_GRANT_DMA_ALLOC is not set +CONFIG_SWIOTLB_XEN=y +CONFIG_XEN_TMEM=m +# CONFIG_XEN_PVCALLS_FRONTEND is not set +CONFIG_XEN_PRIVCMD=m +CONFIG_XEN_EFI=y +CONFIG_XEN_AUTO_XLATE=y +CONFIG_XEN_ACPI=y +# CONFIG_STAGING is not set +CONFIG_X86_PLATFORM_DEVICES=y +CONFIG_ACER_WMI=m +# CONFIG_ACER_WIRELESS is not set +CONFIG_ACERHDF=m +# CONFIG_ALIENWARE_WMI is not set +CONFIG_ASUS_LAPTOP=m +CONFIG_DELL_SMBIOS=m +CONFIG_DELL_SMBIOS_WMI=y +# CONFIG_DELL_SMBIOS_SMM is not set +CONFIG_DELL_LAPTOP=m +CONFIG_DELL_WMI=m +CONFIG_DELL_WMI_DESCRIPTOR=m +CONFIG_DELL_WMI_AIO=m +CONFIG_DELL_WMI_LED=m +CONFIG_DELL_SMO8800=m +CONFIG_DELL_RBTN=m +CONFIG_FUJITSU_LAPTOP=m +CONFIG_FUJITSU_TABLET=m +CONFIG_AMILO_RFKILL=m +# CONFIG_GPD_POCKET_FAN is not set +CONFIG_HP_ACCEL=m +CONFIG_HP_WIRELESS=m +CONFIG_HP_WMI=m +CONFIG_MSI_LAPTOP=m +CONFIG_PANASONIC_LAPTOP=m +CONFIG_COMPAL_LAPTOP=m +CONFIG_SONY_LAPTOP=m +CONFIG_SONYPI_COMPAT=y +CONFIG_IDEAPAD_LAPTOP=m +# CONFIG_SURFACE3_WMI is not set +CONFIG_THINKPAD_ACPI=m +CONFIG_THINKPAD_ACPI_ALSA_SUPPORT=y +# CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set +# CONFIG_THINKPAD_ACPI_DEBUG is not set +# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set +CONFIG_THINKPAD_ACPI_VIDEO=y +CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y +CONFIG_SENSORS_HDAPS=m +# CONFIG_INTEL_MENLOW is not set +CONFIG_EEEPC_LAPTOP=m +CONFIG_ASUS_WMI=m +CONFIG_ASUS_NB_WMI=m +CONFIG_EEEPC_WMI=m +# CONFIG_ASUS_WIRELESS is not set +CONFIG_ACPI_WMI=m +CONFIG_WMI_BMOF=m +CONFIG_INTEL_WMI_THUNDERBOLT=m +CONFIG_MSI_WMI=m +# CONFIG_PEAQ_WMI is not set +CONFIG_TOPSTAR_LAPTOP=m +# CONFIG_ACPI_TOSHIBA is not set +CONFIG_TOSHIBA_BT_RFKILL=m +# CONFIG_TOSHIBA_HAPS is not set +# CONFIG_TOSHIBA_WMI is not set +CONFIG_ACPI_CMPC=m +# CONFIG_INTEL_INT0002_VGPIO is not set +CONFIG_INTEL_HID_EVENT=m +CONFIG_INTEL_VBTN=m +CONFIG_INTEL_IPS=m +CONFIG_INTEL_PMC_CORE=m +# CONFIG_IBM_RTL is not set +CONFIG_SAMSUNG_LAPTOP=m +CONFIG_MXM_WMI=m +CONFIG_INTEL_OAKTRAIL=m +CONFIG_SAMSUNG_Q10=m +CONFIG_APPLE_GMUX=m +CONFIG_INTEL_RST=m +# CONFIG_INTEL_SMARTCONNECT is not set +CONFIG_PVPANIC=y +# CONFIG_INTEL_PMC_IPC is not set +# CONFIG_SURFACE_PRO3_BUTTON is not set +# CONFIG_INTEL_PUNIT_IPC is not set +CONFIG_MLX_PLATFORM=m +CONFIG_INTEL_TURBO_MAX_3=y +# CONFIG_I2C_MULTI_INSTANTIATE is not set +# CONFIG_INTEL_ATOMISP2_PM is not set + +# +# Intel Speed Select Technology interface support +# +CONFIG_INTEL_SPEED_SELECT_INTERFACE=m +CONFIG_PMC_ATOM=y +# CONFIG_CHROME_PLATFORMS is not set +CONFIG_MELLANOX_PLATFORM=y +CONFIG_MLXREG_HOTPLUG=m +# CONFIG_MLXREG_IO is not set +CONFIG_CLKDEV_LOOKUP=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y + +# +# Common Clock Framework +# +# CONFIG_COMMON_CLK_MAX9485 is not set +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_COMMON_CLK_PWM is not set +CONFIG_HWSPINLOCK=y + +# +# Clock Source drivers +# +CONFIG_CLKEVT_I8253=y +CONFIG_I8253_LOCK=y +CONFIG_CLKBLD_I8253=y +CONFIG_MAILBOX=y +CONFIG_PCC=y +# CONFIG_ALTERA_MBOX is not set +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# + +# +# Generic PASID table support +# +# CONFIG_IOMMU_DEBUGFS is not set +CONFIG_IOMMU_DEFAULT_PASSTHROUGH=y +CONFIG_IOMMU_IOVA=y +CONFIG_AMD_IOMMU=y +CONFIG_AMD_IOMMU_V2=m +CONFIG_DMAR_TABLE=y +CONFIG_INTEL_IOMMU=y +# CONFIG_INTEL_IOMMU_SVM is not set +# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set +CONFIG_INTEL_IOMMU_FLOPPY_WA=y +CONFIG_IRQ_REMAP=y +# CONFIG_SMMU_BYPASS_DEV is not set + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_QCOM_GLINK_RPM is not set +# CONFIG_RPMSG_VIRTIO is not set +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# + +# +# Broadcom SoC drivers +# + +# +# NXP/Freescale QorIQ SoC drivers +# + +# +# i.MX SoC drivers +# + +# +# Qualcomm SoC drivers +# +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# CONFIG_XILINX_VCU is not set +# CONFIG_PM_DEVFREQ is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set +CONFIG_IIO=m +CONFIG_IIO_BUFFER=y +# CONFIG_IIO_BUFFER_CB is not set +# CONFIG_IIO_BUFFER_HW_CONSUMER is not set +CONFIG_IIO_KFIFO_BUF=m +CONFIG_IIO_TRIGGERED_BUFFER=m +# CONFIG_IIO_CONFIGFS is not set +CONFIG_IIO_TRIGGER=y +CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 +# CONFIG_IIO_SW_DEVICE is not set +# CONFIG_IIO_SW_TRIGGER is not set + +# +# Accelerometers +# +# CONFIG_ADIS16201 is not set +# CONFIG_ADIS16209 is not set +# CONFIG_ADXL345_I2C is not set +# CONFIG_ADXL345_SPI is not set +# CONFIG_BMA180 is not set +# CONFIG_BMA220 is not set +# CONFIG_BMC150_ACCEL is not set +# CONFIG_DA280 is not set +# CONFIG_DA311 is not set +# CONFIG_DMARD09 is not set +# CONFIG_DMARD10 is not set +CONFIG_HID_SENSOR_ACCEL_3D=m +# CONFIG_IIO_CROS_EC_ACCEL_LEGACY is not set +# CONFIG_IIO_ST_ACCEL_3AXIS is not set +# CONFIG_KXSD9 is not set +# CONFIG_KXCJK1013 is not set +# CONFIG_MC3230 is not set +# CONFIG_MMA7455_I2C is not set +# CONFIG_MMA7455_SPI is not set +# CONFIG_MMA7660 is not set +# CONFIG_MMA8452 is not set +# CONFIG_MMA9551 is not set +# CONFIG_MMA9553 is not set +# CONFIG_MXC4005 is not set +# CONFIG_MXC6255 is not set +# CONFIG_SCA3000 is not set +# CONFIG_STK8312 is not set +# CONFIG_STK8BA50 is not set + +# +# Analog to digital converters +# +# CONFIG_AD7266 is not set +# CONFIG_AD7291 is not set +# CONFIG_AD7298 is not set +# CONFIG_AD7476 is not set +# CONFIG_AD7766 is not set +# CONFIG_AD7791 is not set +# CONFIG_AD7793 is not set +# CONFIG_AD7887 is not set +# CONFIG_AD7923 is not set +# CONFIG_AD799X is not set +# CONFIG_HI8435 is not set +# CONFIG_HX711 is not set +# CONFIG_INA2XX_ADC is not set +# CONFIG_LTC2471 is not set +# CONFIG_LTC2485 is not set +# CONFIG_LTC2497 is not set +# CONFIG_MAX1027 is not set +# CONFIG_MAX11100 is not set +# CONFIG_MAX1118 is not set +# CONFIG_MAX1363 is not set +# CONFIG_MAX9611 is not set +# CONFIG_MCP320X is not set +# CONFIG_MCP3422 is not set +# CONFIG_NAU7802 is not set +# CONFIG_TI_ADC081C is not set +# CONFIG_TI_ADC0832 is not set +# CONFIG_TI_ADC084S021 is not set +# CONFIG_TI_ADC12138 is not set +# CONFIG_TI_ADC108S102 is not set +# CONFIG_TI_ADC128S052 is not set +# CONFIG_TI_ADC161S626 is not set +# CONFIG_TI_ADS1015 is not set +# CONFIG_TI_ADS7950 is not set +# CONFIG_TI_TLC4541 is not set +# CONFIG_VIPERBOARD_ADC is not set + +# +# Analog Front Ends +# + +# +# Amplifiers +# +# CONFIG_AD8366 is not set + +# +# Chemical Sensors +# +# CONFIG_ATLAS_PH_SENSOR is not set +# CONFIG_BME680 is not set +# CONFIG_CCS811 is not set +# CONFIG_IAQCORE is not set +# CONFIG_VZ89X is not set + +# +# Hid Sensor IIO Common +# +CONFIG_HID_SENSOR_IIO_COMMON=m +CONFIG_HID_SENSOR_IIO_TRIGGER=m + +# +# SSP Sensor Common +# +# CONFIG_IIO_SSP_SENSORHUB is not set + +# +# Counters +# + +# +# Digital to analog converters +# +# CONFIG_AD5064 is not set +# CONFIG_AD5360 is not set +# CONFIG_AD5380 is not set +# CONFIG_AD5421 is not set +# CONFIG_AD5446 is not set +# CONFIG_AD5449 is not set +# CONFIG_AD5592R is not set +# CONFIG_AD5593R is not set +# CONFIG_AD5504 is not set +# CONFIG_AD5624R_SPI is not set +# CONFIG_LTC2632 is not set +# CONFIG_AD5686_SPI is not set +# CONFIG_AD5696_I2C is not set +# CONFIG_AD5755 is not set +# CONFIG_AD5758 is not set +# CONFIG_AD5761 is not set +# CONFIG_AD5764 is not set +# CONFIG_AD5791 is not set +# CONFIG_AD7303 is not set +# CONFIG_AD8801 is not set +# CONFIG_DS4424 is not set +# CONFIG_M62332 is not set +# CONFIG_MAX517 is not set +# CONFIG_MCP4725 is not set +# CONFIG_MCP4922 is not set +# CONFIG_TI_DAC082S085 is not set +# CONFIG_TI_DAC5571 is not set + +# +# IIO dummy driver +# + +# +# Frequency Synthesizers DDS/PLL +# + +# +# Clock Generator/Distribution +# +# CONFIG_AD9523 is not set + +# +# Phase-Locked Loop (PLL) frequency synthesizers +# +# CONFIG_ADF4350 is not set + +# +# Digital gyroscope sensors +# +# CONFIG_ADIS16080 is not set +# CONFIG_ADIS16130 is not set +# CONFIG_ADIS16136 is not set +# CONFIG_ADIS16260 is not set +# CONFIG_ADXRS450 is not set +# CONFIG_BMG160 is not set +CONFIG_HID_SENSOR_GYRO_3D=m +# CONFIG_MPU3050_I2C is not set +# CONFIG_IIO_ST_GYRO_3AXIS is not set +# CONFIG_ITG3200 is not set + +# +# Health Sensors +# + +# +# Heart Rate Monitors +# +# CONFIG_AFE4403 is not set +# CONFIG_AFE4404 is not set +# CONFIG_MAX30100 is not set +# CONFIG_MAX30102 is not set + +# +# Humidity sensors +# +# CONFIG_AM2315 is not set +# CONFIG_DHT11 is not set +# CONFIG_HDC100X is not set +CONFIG_HID_SENSOR_HUMIDITY=m +# CONFIG_HTS221 is not set +# CONFIG_HTU21 is not set +# CONFIG_SI7005 is not set +# CONFIG_SI7020 is not set + +# +# Inertial measurement units +# +# CONFIG_ADIS16400 is not set +# CONFIG_ADIS16480 is not set +# CONFIG_BMI160_I2C is not set +# CONFIG_BMI160_SPI is not set +# CONFIG_KMX61 is not set +# CONFIG_INV_MPU6050_I2C is not set +# CONFIG_INV_MPU6050_SPI is not set +# CONFIG_IIO_ST_LSM6DSX is not set + +# +# Light sensors +# +# CONFIG_ACPI_ALS is not set +# CONFIG_ADJD_S311 is not set +# CONFIG_AL3320A is not set +# CONFIG_APDS9300 is not set +# CONFIG_APDS9960 is not set +# CONFIG_BH1750 is not set +# CONFIG_BH1780 is not set +# CONFIG_CM32181 is not set +# CONFIG_CM3232 is not set +# CONFIG_CM3323 is not set +# CONFIG_CM36651 is not set +# CONFIG_GP2AP020A00F is not set +# CONFIG_SENSORS_ISL29018 is not set +# CONFIG_SENSORS_ISL29028 is not set +# CONFIG_ISL29125 is not set +CONFIG_HID_SENSOR_ALS=m +CONFIG_HID_SENSOR_PROX=m +# CONFIG_JSA1212 is not set +# CONFIG_RPR0521 is not set +# CONFIG_LTR501 is not set +# CONFIG_LV0104CS is not set +# CONFIG_MAX44000 is not set +# CONFIG_OPT3001 is not set +# CONFIG_PA12203001 is not set +# CONFIG_SI1133 is not set +# CONFIG_SI1145 is not set +# CONFIG_STK3310 is not set +# CONFIG_ST_UVIS25 is not set +# CONFIG_TCS3414 is not set +# CONFIG_TCS3472 is not set +# CONFIG_SENSORS_TSL2563 is not set +# CONFIG_TSL2583 is not set +# CONFIG_TSL2772 is not set +# CONFIG_TSL4531 is not set +# CONFIG_US5182D is not set +# CONFIG_VCNL4000 is not set +# CONFIG_VEML6070 is not set +# CONFIG_VL6180 is not set +# CONFIG_ZOPT2201 is not set + +# +# Magnetometer sensors +# +# CONFIG_AK8975 is not set +# CONFIG_AK09911 is not set +# CONFIG_BMC150_MAGN_I2C is not set +# CONFIG_BMC150_MAGN_SPI is not set +# CONFIG_MAG3110 is not set +CONFIG_HID_SENSOR_MAGNETOMETER_3D=m +# CONFIG_MMC35240 is not set +# CONFIG_IIO_ST_MAGN_3AXIS is not set +# CONFIG_SENSORS_HMC5843_I2C is not set +# CONFIG_SENSORS_HMC5843_SPI is not set + +# +# Multiplexers +# + +# +# Inclinometer sensors +# +CONFIG_HID_SENSOR_INCLINOMETER_3D=m +CONFIG_HID_SENSOR_DEVICE_ROTATION=m + +# +# Triggers - standalone +# +# CONFIG_IIO_INTERRUPT_TRIGGER is not set +# CONFIG_IIO_SYSFS_TRIGGER is not set + +# +# Digital potentiometers +# +# CONFIG_AD5272 is not set +# CONFIG_DS1803 is not set +# CONFIG_MAX5481 is not set +# CONFIG_MAX5487 is not set +# CONFIG_MCP4018 is not set +# CONFIG_MCP4131 is not set +# CONFIG_MCP4531 is not set +# CONFIG_TPL0102 is not set + +# +# Digital potentiostats +# +# CONFIG_LMP91000 is not set + +# +# Pressure sensors +# +# CONFIG_ABP060MG is not set +# CONFIG_BMP280 is not set +CONFIG_HID_SENSOR_PRESS=m +# CONFIG_HP03 is not set +# CONFIG_MPL115_I2C is not set +# CONFIG_MPL115_SPI is not set +# CONFIG_MPL3115 is not set +# CONFIG_MS5611 is not set +# CONFIG_MS5637 is not set +# CONFIG_IIO_ST_PRESS is not set +# CONFIG_T5403 is not set +# CONFIG_HP206C is not set +# CONFIG_ZPA2326 is not set + +# +# Lightning sensors +# +# CONFIG_AS3935 is not set + +# +# Proximity and distance sensors +# +# CONFIG_ISL29501 is not set +# CONFIG_LIDAR_LITE_V2 is not set +# CONFIG_RFD77402 is not set +# CONFIG_SRF04 is not set +# CONFIG_SX9500 is not set +# CONFIG_SRF08 is not set + +# +# Resolver to digital converters +# +# CONFIG_AD2S1200 is not set + +# +# Temperature sensors +# +# CONFIG_MAXIM_THERMOCOUPLE is not set +CONFIG_HID_SENSOR_TEMP=m +# CONFIG_MLX90614 is not set +# CONFIG_MLX90632 is not set +# CONFIG_TMP006 is not set +# CONFIG_TMP007 is not set +# CONFIG_TSYS01 is not set +# CONFIG_TSYS02D is not set +CONFIG_NTB=m +# CONFIG_NTB_AMD is not set +# CONFIG_NTB_IDT is not set +# CONFIG_NTB_INTEL is not set +# CONFIG_NTB_SWITCHTEC is not set +# CONFIG_NTB_PINGPONG is not set +# CONFIG_NTB_TOOL is not set +# CONFIG_NTB_PERF is not set +# CONFIG_NTB_TRANSPORT is not set +# CONFIG_VME_BUS is not set +CONFIG_PWM=y +CONFIG_PWM_SYSFS=y +CONFIG_PWM_LPSS=m +CONFIG_PWM_LPSS_PCI=m +CONFIG_PWM_LPSS_PLATFORM=m +# CONFIG_PWM_PCA9685 is not set + +# +# IRQ chip support +# +CONFIG_ARM_GIC_MAX_NR=1 +# CONFIG_IPACK_BUS is not set +# CONFIG_RESET_CONTROLLER is not set +# CONFIG_FMC is not set + +# +# PHY Subsystem +# +# CONFIG_GENERIC_PHY is not set +# CONFIG_BCM_KONA_USB2_PHY is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_CPCAP_USB is not set +CONFIG_POWERCAP=y +CONFIG_INTEL_RAPL_CORE=m +CONFIG_INTEL_RAPL=m +# CONFIG_IDLE_INJECT is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +CONFIG_RAS=y +# CONFIG_RAS_CEC is not set +CONFIG_THUNDERBOLT=y + +# +# Android +# +# CONFIG_ANDROID is not set +CONFIG_LIBNVDIMM=m +CONFIG_BLK_DEV_PMEM=m +CONFIG_ND_BLK=m +CONFIG_ND_CLAIM=y +CONFIG_ND_BTT=m +CONFIG_BTT=y +CONFIG_ND_PFN=m +CONFIG_NVDIMM_PFN=y +CONFIG_NVDIMM_DAX=y +CONFIG_DAX_DRIVER=y +CONFIG_DAX=y +CONFIG_DEV_DAX=m +CONFIG_DEV_DAX_PMEM=m +CONFIG_DEV_DAX_KMEM=m +CONFIG_DEV_DAX_PMEM_COMPAT=m +CONFIG_NVMEM=y + +# +# HW tracing support +# +CONFIG_STM=m +CONFIG_STM_DUMMY=m +CONFIG_STM_SOURCE_CONSOLE=m +CONFIG_STM_SOURCE_HEARTBEAT=m +CONFIG_STM_SOURCE_FTRACE=m +CONFIG_INTEL_TH=m +CONFIG_INTEL_TH_PCI=m +CONFIG_INTEL_TH_ACPI=m +CONFIG_INTEL_TH_GTH=m +CONFIG_INTEL_TH_STH=m +CONFIG_INTEL_TH_MSU=m +CONFIG_INTEL_TH_PTI=m +# CONFIG_INTEL_TH_DEBUG is not set +# CONFIG_FPGA is not set +# CONFIG_UNISYS_VISORBUS is not set +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +CONFIG_FS_IOMAP=y +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=m +CONFIG_EXT4_USE_FOR_EXT2=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_ENCRYPTION is not set +# CONFIG_EXT4_DEBUG is not set +# CONFIG_EXT4_PARALLEL_DIO_READ is not set +CONFIG_JBD2=m +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=m +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_XFS_FS=m +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set +# CONFIG_XFS_ONLINE_SCRUB is not set +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set +CONFIG_GFS2_FS=m +CONFIG_GFS2_FS_LOCKING_DLM=y +# CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +CONFIG_FS_DAX=y +CONFIG_FS_DAX_PMD=y +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FILE_LOCKING=y +# CONFIG_MANDATORY_FILE_LOCKING is not set +# CONFIG_FS_ENCRYPTION is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_PRINT_QUOTA_WARNING=y +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=y +# CONFIG_QFMT_V1 is not set +CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y +CONFIG_QUOTACTL_COMPAT=y +CONFIG_AUTOFS4_FS=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_OVERLAY_FS=m +# CONFIG_OVERLAY_FS_REDIRECT_DIR is not set +# CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW is not set +# CONFIG_OVERLAY_FS_INDEX is not set +# CONFIG_OVERLAY_FS_XINO_AUTO is not set +# CONFIG_OVERLAY_FS_METACOPY is not set + +# +# Caches +# +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_HISTOGRAM is not set +# CONFIG_FSCACHE_DEBUG is not set +# CONFIG_FSCACHE_OBJECT_LIST is not set +CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_HISTOGRAM is not set + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="ascii" +# CONFIG_FAT_DEFAULT_UTF8 is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y +CONFIG_PROC_VMCORE_DEVICE_DUMP=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +# CONFIG_TMPFS_INODE64 is not set +# CONFIG_DYNAMIC_HUGETLB is not set +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_MEMFD_CREATE=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_FS is not set +# CONFIG_UBIFS_FS is not set +CONFIG_CRAMFS=m +CONFIG_CRAMFS_BLOCKDEV=y +# CONFIG_CRAMFS_MTD is not set +CONFIG_SQUASHFS=m +# CONFIG_SQUASHFS_FILE_CACHE is not set +CONFIG_SQUASHFS_FILE_DIRECT=y +# CONFIG_SQUASHFS_DECOMP_SINGLE is not set +# CONFIG_SQUASHFS_DECOMP_MULTI is not set +CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +# CONFIG_SQUASHFS_LZ4 is not set +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +CONFIG_PSTORE=y +CONFIG_PSTORE_DEFLATE_COMPRESS=y +# CONFIG_PSTORE_LZO_COMPRESS is not set +# CONFIG_PSTORE_LZ4_COMPRESS is not set +# CONFIG_PSTORE_LZ4HC_COMPRESS is not set +# CONFIG_PSTORE_842_COMPRESS is not set +# CONFIG_PSTORE_ZSTD_COMPRESS is not set +CONFIG_PSTORE_COMPRESS=y +CONFIG_PSTORE_DEFLATE_COMPRESS_DEFAULT=y +CONFIG_PSTORE_COMPRESS_DEFAULT="deflate" +# CONFIG_PSTORE_CONSOLE is not set +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +CONFIG_PSTORE_RAM=m +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +# CONFIG_NFS_V2 is not set +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +# CONFIG_NFS_SWAP is not set +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_V4_SECURITY_LABEL=y +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DEBUG=y +CONFIG_NFSD=m +CONFIG_NFSD_V2_ACL=y +CONFIG_NFSD_V3=y +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_PNFS=y +# CONFIG_NFSD_BLOCKLAYOUT is not set +CONFIG_NFSD_SCSILAYOUT=y +# CONFIG_NFSD_FLEXFILELAYOUT is not set +CONFIG_NFSD_V4_SECURITY_LABEL=y +# CONFIG_NFSD_FAULT_INJECTION is not set +CONFIG_GRACE_PERIOD=m +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=m +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_RPCSEC_GSS_KRB5=m +CONFIG_SUNRPC_DEBUG=y +CONFIG_SUNRPC_XPRT_RDMA=m +CONFIG_CEPH_FS=m +# CONFIG_CEPH_FSCACHE is not set +CONFIG_CEPH_FS_POSIX_ACL=y +CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_ACL=y +CONFIG_CIFS_DEBUG=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set +CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CIFS_SMB_DIRECT is not set +# CONFIG_CIFS_FSCACHE is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_9P_FS=m +CONFIG_9P_FSCACHE=y +CONFIG_9P_FS_POSIX_ACL=y +CONFIG_9P_FS_SECURITY=y +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=m +CONFIG_DLM=m +CONFIG_DLM_DEBUG=y +CONFIG_IO_WQ=y + +# +# Security options +# +CONFIG_KEYS=y +CONFIG_KEYS_COMPAT=y +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_BIG_KEYS=y +CONFIG_TRUSTED_KEYS=y +CONFIG_ENCRYPTED_KEYS=y +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +CONFIG_SECURITY_WRITABLE_HOOKS=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_PAGE_TABLE_ISOLATION=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +# CONFIG_SECURITY_PATH is not set +CONFIG_INTEL_TXT=y +CONFIG_LSM_MMAP_MIN_ADDR=65535 +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y +CONFIG_HARDENED_USERCOPY=y +CONFIG_HARDENED_USERCOPY_FALLBACK=y +CONFIG_FORTIFY_SOURCE=y +# CONFIG_STATIC_USERMODEHELPER is not set +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1 +CONFIG_SECURITY_SELINUX_DISABLE=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_SECURITY_LOADPIN is not set +CONFIG_SECURITY_YAMA=y +CONFIG_INTEGRITY=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_TRUSTED_KEYRING=y +CONFIG_INTEGRITY_AUDIT=y +CONFIG_IMA=y +CONFIG_IMA_MEASURE_PCR_IDX=10 +CONFIG_IMA_LSM_RULES=y +# CONFIG_IMA_TEMPLATE is not set +CONFIG_IMA_NG_TEMPLATE=y +# CONFIG_IMA_SIG_TEMPLATE is not set +CONFIG_IMA_DEFAULT_TEMPLATE="ima-ng" +CONFIG_IMA_DEFAULT_HASH_SHA1=y +# CONFIG_IMA_DEFAULT_HASH_SHA256 is not set +CONFIG_IMA_DEFAULT_HASH="sha1" +# CONFIG_IMA_WRITE_POLICY is not set +# CONFIG_IMA_READ_POLICY is not set +CONFIG_IMA_APPRAISE=y +# CONFIG_IMA_APPRAISE_BUILD_POLICY is not set +CONFIG_IMA_APPRAISE_BOOTPARAM=y +CONFIG_IMA_TRUSTED_KEYRING=y +# CONFIG_IMA_BLACKLIST_KEYRING is not set +# CONFIG_IMA_LOAD_X509 is not set +CONFIG_EVM=y +CONFIG_EVM_ATTR_FSUUID=y +# CONFIG_EVM_ADD_XATTRS is not set +# CONFIG_EVM_LOAD_X509 is not set +CONFIG_DEFAULT_SECURITY_SELINUX=y +# CONFIG_DEFAULT_SECURITY_DAC is not set +CONFIG_DEFAULT_SECURITY="selinux" +CONFIG_XOR_BLOCKS=m +CONFIG_ASYNC_CORE=m +CONFIG_ASYNC_MEMCPY=m +CONFIG_ASYNC_XOR=m +CONFIG_ASYNC_PQ=m +CONFIG_ASYNC_RAID6_RECOV=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=m +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=m +CONFIG_CRYPTO_ECDH=m +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_GF128MUL=y +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_CRYPTO_CRYPTD=y +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_SIMD=y +CONFIG_CRYPTO_GLUE_HELPER_X86=y + +# +# Authenticated Encryption with Associated Data +# +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_CHACHA20POLY1305=m +# CONFIG_CRYPTO_AEGIS128 is not set +# CONFIG_CRYPTO_AEGIS128L is not set +# CONFIG_CRYPTO_AEGIS256 is not set +# CONFIG_CRYPTO_AEGIS128_AESNI_SSE2 is not set +# CONFIG_CRYPTO_AEGIS128L_AESNI_SSE2 is not set +# CONFIG_CRYPTO_AEGIS256_AESNI_SSE2 is not set +# CONFIG_CRYPTO_MORUS640 is not set +# CONFIG_CRYPTO_MORUS640_SSE2 is not set +# CONFIG_CRYPTO_MORUS1280 is not set +# CONFIG_CRYPTO_MORUS1280_SSE2 is not set +# CONFIG_CRYPTO_MORUS1280_AVX2 is not set +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=m + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CFB=y +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=m +# CONFIG_CRYPTO_KEYWRAP is not set + +# +# Hash modes +# +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_VMAC=m + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32C_INTEL=m +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRC32_PCLMUL=m +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_CRCT10DIF_PCLMUL=m +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_POLY1305=m +CONFIG_CRYPTO_POLY1305_X86_64=m +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD128=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_RMD256=m +CONFIG_CRYPTO_RMD320=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA1_SSSE3=y +CONFIG_CRYPTO_SHA256_SSSE3=y +CONFIG_CRYPTO_SHA512_SSSE3=m +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=m +CONFIG_CRYPTO_SHA3=m +# CONFIG_CRYPTO_SM3 is not set +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_AES_X86_64=y +CONFIG_CRYPTO_AES_NI_INTEL=y +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_BLOWFISH_X86_64=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAMELLIA_X86_64=m +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64=m +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST5_AVX_X86_64=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_CAST6_AVX_X86_64=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_DES3_EDE_X86_64=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SALSA20=m +CONFIG_CRYPTO_CHACHA20=m +CONFIG_CRYPTO_CHACHA20_X86_64=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m +CONFIG_CRYPTO_SERPENT_AVX_X86_64=m +CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m +# CONFIG_CRYPTO_SM4 is not set +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m +CONFIG_CRYPTO_TWOFISH_X86_64=m +CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m +CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_842 is not set +# CONFIG_CRYPTO_LZ4 is not set +# CONFIG_CRYPTO_LZ4HC is not set +CONFIG_CRYPTO_ZSTD=y + +# +# Random Number Generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +CONFIG_CRYPTO_USER_API=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_HASH_INFO=y +CONFIG_CRYPTO_HW=y +CONFIG_CRYPTO_DEV_ZHAOXIN_SM3=m +CONFIG_CRYPTO_DEV_ZHAOXIN_SM4=m +CONFIG_CRYPTO_DEV_PADLOCK=m +CONFIG_CRYPTO_DEV_PADLOCK_AES=m +CONFIG_CRYPTO_DEV_PADLOCK_SHA=m +CONFIG_CRYPTO_DEV_ZHAOXIN=m +CONFIG_CRYPTO_DEV_ZHAOXIN_AES=m +CONFIG_CRYPTO_DEV_ZHAOXIN_SHA=m +CONFIG_CRYPTO_DEV_CCP=y +CONFIG_CRYPTO_DEV_CCP_DD=m +CONFIG_CRYPTO_DEV_SP_CCP=y +CONFIG_CRYPTO_DEV_CCP_CRYPTO=m +CONFIG_CRYPTO_DEV_SP_PSP=y +CONFIG_CRYPTO_DEV_QAT=m +CONFIG_CRYPTO_DEV_QAT_DH895xCC=m +CONFIG_CRYPTO_DEV_QAT_C3XXX=m +CONFIG_CRYPTO_DEV_QAT_C62X=m +CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m +CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m +CONFIG_CRYPTO_DEV_QAT_C62XVF=m +CONFIG_CRYPTO_DEV_NITROX=m +CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m +CONFIG_CRYPTO_DEV_CHELSIO=m +CONFIG_CHELSIO_IPSEC_INLINE=y +# CONFIG_CRYPTO_DEV_CHELSIO_TLS is not set +# CONFIG_CRYPTO_DEV_VIRTIO is not set +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +CONFIG_PKCS7_MESSAGE_PARSER=y +# CONFIG_PKCS7_TEST_KEY is not set +CONFIG_SIGNED_PE_FILE_VERIFICATION=y + +# +# Certificates for signature checking +# +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set +# CONFIG_SECONDARY_TRUSTED_KEYRING is not set +CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=m +CONFIG_BITREVERSE=y +CONFIG_RATIONAL=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_GENERIC_FIND_FIRST_BIT=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=m +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +# CONFIG_CRC64 is not set +# CONFIG_CRC4 is not set +CONFIG_CRC7=m +CONFIG_LIBCRC32C=m +CONFIG_CRC8=m +CONFIG_XXHASH=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMPRESS=y +CONFIG_ZSTD_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=m +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_BTREE=y +CONFIG_INTERVAL_TREE=y +CONFIG_RADIX_TREE_MULTIORDER=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_DMA_DIRECT_OPS=y +CONFIG_DMA_VIRT_OPS=y +CONFIG_SWIOTLB=y +CONFIG_SGL_ALLOC=y +CONFIG_CHECK_SIGNATURE=y +CONFIG_CPUMASK_OFFSTACK=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_CLZ_TAB=y +CONFIG_CORDIC=m +# CONFIG_DDR is not set +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_SIGNATURE=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_POOL=y +CONFIG_ARCH_HAS_SG_CHAIN=y +CONFIG_ARCH_HAS_PMEM_API=y +CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y +CONFIG_ARCH_HAS_UACCESS_MCSAFE=y +CONFIG_SBITMAP=y +CONFIG_PARMAN=m +CONFIG_ETMEM_SCAN=m +CONFIG_ETMEM_SWAP=m +# CONFIG_STRING_SELFTEST is not set + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +CONFIG_DEBUG_INFO_DWARF4=y +# CONFIG_GDB_SCRIPTS is not set +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=2048 +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_PAGE_OWNER is not set +CONFIG_DEBUG_FS=y +CONFIG_HEADERS_CHECK=y +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_STACK_VALIDATION=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# CONFIG_PGO_KERNEL is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_DEBUG_KERNEL=y + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +# CONFIG_DEBUG_RODATA_TEST is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_SLUB_STATS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_VM is not set +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_DEBUG_STACKOVERFLOW=y +CONFIG_DEBUG_STACKOVERFLOW=y +CONFIG_HAVE_ARCH_KASAN=y +# CONFIG_KASAN is not set +CONFIG_ARCH_HAS_KCOV=y +# CONFIG_KCOV is not set +CONFIG_DEBUG_SHIRQ=y + +# +# Debug Lockups and Hangs +# +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y +CONFIG_HARDLOCKUP_DETECTOR=y +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=1 +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +# CONFIG_WQ_WATCHDOG is not set +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=0 +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +# CONFIG_SCHED_STACK_END_CHECK is not set +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PI_LIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_RCU_PERF_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +CONFIG_FUNCTION_ERROR_INJECTION=y +# CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_FENTRY=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_RING_BUFFER_ALLOW_SWAP=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_PREEMPTIRQ_EVENTS is not set +# CONFIG_IRQSOFF_TRACER is not set +CONFIG_SCHED_TRACER=y +CONFIG_HWLAT_TRACER=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_TRACER_SNAPSHOT=y +# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +CONFIG_STACK_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_KPROBE_EVENTS=y +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_PROBE_EVENTS=y +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_FUNCTION_PROFILER=y +# CONFIG_BPF_KPROBE_OVERRIDE is not set +CONFIG_FTRACE_MCOUNT_RECORD=y +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_MMIOTRACE is not set +CONFIG_TRACING_MAP=y +CONFIG_HIST_TRIGGERS=y +# CONFIG_TRACEPOINT_BENCHMARK is not set +CONFIG_RING_BUFFER_BENCHMARK=m +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_TRACE_EVAL_MAP_FILE is not set +# CONFIG_TRACING_EVENTS_GPIO is not set +CONFIG_PROVIDE_OHCI1394_DMA_INIT=y +# CONFIG_DMA_API_DEBUG is not set +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_LKDTM is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_TEST_SORT is not set +# CONFIG_KPROBES_SANITY_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +CONFIG_ATOMIC64_SELFTEST=y +CONFIG_ASYNC_RAID6_TEST=m +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_TEST_STRING_HELPERS is not set +CONFIG_TEST_KSTRTOX=y +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_BITFIELD is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_OVERFLOW is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_HASH is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_PARMAN is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_USER_COPY is not set +# CONFIG_TEST_BPF is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_TEST_FREE_PAGES is not set +# CONFIG_MEMTEST is not set +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_KGDB=y +CONFIG_KGDB_SERIAL_CONSOLE=y +CONFIG_KGDB_TESTS=y +# CONFIG_KGDB_TESTS_ON_BOOT is not set +CONFIG_KGDB_LOW_LEVEL_TRAP=y +CONFIG_KGDB_KDB=y +CONFIG_KDB_DEFAULT_ENABLE=0x0 +CONFIG_KDB_KEYBOARD=y +CONFIG_KDB_CONTINUE_CATASTROPHIC=0 +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +# CONFIG_UBSAN is not set +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y +CONFIG_STRICT_DEVMEM=y +# CONFIG_IO_STRICT_DEVMEM is not set +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_EARLY_PRINTK_USB=y +# CONFIG_X86_VERBOSE_BOOTUP is not set +CONFIG_EARLY_PRINTK=y +CONFIG_EARLY_PRINTK_DBGP=y +CONFIG_EARLY_PRINTK_EFI=y +CONFIG_EARLY_PRINTK_USB_XDBC=y +# CONFIG_X86_PTDUMP is not set +# CONFIG_EFI_PGT_DUMP is not set +# CONFIG_DEBUG_WX is not set +CONFIG_DOUBLEFAULT=y +# CONFIG_DEBUG_TLBFLUSH is not set +CONFIG_HAVE_MMIOTRACE_SUPPORT=y +CONFIG_X86_DECODER_SELFTEST=y +CONFIG_IO_DELAY_TYPE_0X80=0 +CONFIG_IO_DELAY_TYPE_0XED=1 +CONFIG_IO_DELAY_TYPE_UDELAY=2 +CONFIG_IO_DELAY_TYPE_NONE=3 +CONFIG_IO_DELAY_0X80=y +# CONFIG_IO_DELAY_0XED is not set +# CONFIG_IO_DELAY_UDELAY is not set +# CONFIG_IO_DELAY_NONE is not set +CONFIG_DEFAULT_IO_DELAY_TYPE=0 +CONFIG_DEBUG_BOOT_PARAMS=y +# CONFIG_CPA_DEBUG is not set +CONFIG_OPTIMIZE_INLINING=y +# CONFIG_DEBUG_ENTRY is not set +# CONFIG_DEBUG_NMI_SELFTEST is not set +# CONFIG_X86_DEBUG_FPU is not set +# CONFIG_PUNIT_ATOM_DEBUG is not set +CONFIG_UNWINDER_ORC=y +# CONFIG_UNWINDER_FRAME_POINTER is not set diff --git a/arch/x86/configs/storage_ci_defconfig b/arch/x86/configs/storage_ci_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..51df58940ac6e15bb69eacdbab4a3f94427f5fe3 --- /dev/null +++ b/arch/x86/configs/storage_ci_defconfig @@ -0,0 +1,3336 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/x86 4.19.44 Kernel Configuration +# + +# +# Compiler: gcc (Ubuntu 5.4.0-6ubuntu1~16.04.10) 5.4.0 20160609 +# +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=50400 +CONFIG_CLANG_VERSION=0 +CONFIG_CONSTRUCTORS=y +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_EXTABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +CONFIG_LOCALVERSION="" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_BUILD_SALT="" +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_BZIP2=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +CONFIG_HAVE_KERNEL_LZ4=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_BZIP2 is not set +# CONFIG_KERNEL_LZMA is not set +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZO is not set +# CONFIG_KERNEL_LZ4 is not set +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y +CONFIG_AUDIT_WATCH=y +CONFIG_AUDIT_TREE=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_PENDING_IRQ=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GENERIC_MSI_IRQ_DOMAIN=y +CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y +CONFIG_GENERIC_IRQ_RESERVATION_MODE=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +CONFIG_CLOCKSOURCE_WATCHDOG=y +CONFIG_ARCH_CLOCKSOURCE_DATA=y +CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y +CONFIG_GENERIC_CMOS_UPDATE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +# CONFIG_NO_HZ_IDLE is not set +CONFIG_NO_HZ_FULL=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PREEMPT is not set + +# +# CPU/Task time and stats accounting +# +CONFIG_VIRT_CPU_ACCOUNTING=y +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y +# CONFIG_IRQ_TIME_ACCOUNTING is not set +CONFIG_HAVE_SCHED_AVG_IRQ=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_SRCU=y +CONFIG_TREE_SRCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +CONFIG_CONTEXT_TRACKING=y +# CONFIG_CONTEXT_TRACKING_FORCE is not set +CONFIG_RCU_NOCB_CPU=y +CONFIG_BUILD_BIN2C=y +# CONFIG_IKCONFIG is not set +CONFIG_LOG_BUF_SHIFT=25 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 +CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y +CONFIG_ARCH_SUPPORTS_INT128=y +CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_MEMCG_SWAP_ENABLED=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +# CONFIG_DEBUG_BLK_CGROUP is not set +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +# CONFIG_RT_GROUP_SCHED is not set +CONFIG_CGROUP_PIDS=y +# CONFIG_CGROUP_RDMA is not set +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +# CONFIG_CGROUP_BPF is not set +# CONFIG_CGROUP_DEBUG is not set +# CONFIG_CGROUP_FILES is not set +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +# CONFIG_CHECKPOINT_RESTORE is not set +CONFIG_SCHED_STEAL=y +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_HAVE_PCSPKR_PLATFORM=y +CONFIG_BPF=y +# CONFIG_EXPERT is not set +CONFIG_UID16=y +CONFIG_MULTIUSER=y +CONFIG_SGETMASK_SYSCALL=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_PRINTK_NMI=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_PCSPKR_PLATFORM=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_BPF_SYSCALL=y +# CONFIG_BPF_JIT_ALWAYS_ON is not set +CONFIG_USERFAULTFD=y +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y +CONFIG_RSEQ=y +# CONFIG_EMBEDDED is not set +CONFIG_HAVE_PERF_EVENTS=y + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_SLUB_DEBUG=y +# CONFIG_COMPAT_BRK is not set +# CONFIG_SLAB is not set +CONFIG_SLUB=y +CONFIG_SLAB_MERGE_DEFAULT=y +# CONFIG_SLAB_FREELIST_RANDOM is not set +# CONFIG_SLAB_FREELIST_HARDENED is not set +CONFIG_SLUB_CPU_PARTIAL=y +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y +CONFIG_64BIT=y +CONFIG_X86_64=y +CONFIG_X86=y +CONFIG_INSTRUCTION_DECODER=y +CONFIG_OUTPUT_FORMAT="elf64-x86-64" +CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig" +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_MMU=y +CONFIG_ARCH_MMAP_RND_BITS_MIN=28 +CONFIG_ARCH_MMAP_RND_BITS_MAX=32 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_GENERIC_ISA_DMA=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_ARCH_MAY_HAVE_PC_FDC=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_ARCH_HAS_CPU_RELAX=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_ARCH_HAS_FILTER_PGPROT=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_ARCH_WANT_GENERAL_HUGETLB=y +CONFIG_ZONE_DMA32=y +CONFIG_AUDIT_ARCH=y +CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_X86_64_SMP=y +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_PGTABLE_LEVELS=4 +CONFIG_CC_HAS_SANE_STACKPROTECTOR=y + +# +# Processor type and features +# +CONFIG_ZONE_DMA=y +CONFIG_SMP=y +CONFIG_X86_FEATURE_NAMES=y +CONFIG_X86_X2APIC=y +CONFIG_X86_MPPARSE=y +# CONFIG_GOLDFISH is not set +# CONFIG_RETPOLINE is not set +# CONFIG_INTEL_RDT is not set +CONFIG_X86_EXTENDED_PLATFORM=y +CONFIG_X86_NUMACHIP=y +# CONFIG_X86_VSMP is not set +CONFIG_X86_UV=y +# CONFIG_X86_GOLDFISH is not set +# CONFIG_X86_INTEL_MID is not set +CONFIG_X86_INTEL_LPSS=y +# CONFIG_X86_AMD_PLATFORM_DEVICE is not set +CONFIG_IOSF_MBI=y +# CONFIG_IOSF_MBI_DEBUG is not set +CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y +CONFIG_SCHED_OMIT_FRAME_POINTER=y +CONFIG_HYPERVISOR_GUEST=y +CONFIG_PARAVIRT=y +# CONFIG_PARAVIRT_DEBUG is not set +# CONFIG_PARAVIRT_SPINLOCKS is not set +# CONFIG_XEN is not set +CONFIG_KVM_GUEST=y +# CONFIG_KVM_DEBUG_FS is not set +CONFIG_PARAVIRT_TIME_ACCOUNTING=y +CONFIG_PARAVIRT_CLOCK=y +# CONFIG_JAILHOUSE_GUEST is not set +CONFIG_NO_BOOTMEM=y +# CONFIG_MK8 is not set +# CONFIG_MPSC is not set +# CONFIG_MCORE2 is not set +# CONFIG_MATOM is not set +CONFIG_GENERIC_CPU=y +CONFIG_X86_INTERNODE_CACHE_SHIFT=6 +CONFIG_X86_L1_CACHE_SHIFT=6 +CONFIG_X86_TSC=y +CONFIG_X86_CMPXCHG64=y +CONFIG_X86_CMOV=y +CONFIG_X86_MINIMUM_CPU_FAMILY=64 +CONFIG_X86_DEBUGCTLMSR=y +CONFIG_CPU_SUP_INTEL=y +CONFIG_CPU_SUP_AMD=y +CONFIG_CPU_SUP_CENTAUR=y +CONFIG_HPET_TIMER=y +CONFIG_HPET_EMULATE_RTC=y +CONFIG_DMI=y +# CONFIG_GART_IOMMU is not set +# CONFIG_CALGARY_IOMMU is not set +CONFIG_MAXSMP=y +CONFIG_NR_CPUS_RANGE_BEGIN=8192 +CONFIG_NR_CPUS_RANGE_END=8192 +CONFIG_NR_CPUS_DEFAULT=8192 +CONFIG_NR_CPUS=8192 +CONFIG_SCHED_SMT=y +CONFIG_SCHED_MC=y +CONFIG_SCHED_MC_PRIO=y +CONFIG_X86_LOCAL_APIC=y +CONFIG_X86_IO_APIC=y +CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y +CONFIG_X86_MCE=y +# CONFIG_X86_MCELOG_LEGACY is not set +CONFIG_X86_MCE_INTEL=y +CONFIG_X86_MCE_AMD=y +CONFIG_X86_MCE_THRESHOLD=y +# CONFIG_X86_MCE_INJECT is not set +CONFIG_X86_THERMAL_VECTOR=y + +# +# Performance monitoring +# +CONFIG_PERF_EVENTS_INTEL_UNCORE=y +CONFIG_PERF_EVENTS_INTEL_RAPL=y +CONFIG_PERF_EVENTS_INTEL_CSTATE=y +# CONFIG_PERF_EVENTS_AMD_POWER is not set +CONFIG_X86_16BIT=y +CONFIG_X86_ESPFIX64=y +CONFIG_X86_VSYSCALL_EMULATION=y +# CONFIG_I8K is not set +CONFIG_MICROCODE=y +CONFIG_MICROCODE_INTEL=y +CONFIG_MICROCODE_AMD=y +CONFIG_MICROCODE_OLD_INTERFACE=y +CONFIG_X86_MSR=y +CONFIG_X86_CPUID=y +# CONFIG_X86_5LEVEL is not set +CONFIG_X86_DIRECT_GBPAGES=y +CONFIG_ARCH_HAS_MEM_ENCRYPT=y +# CONFIG_AMD_MEM_ENCRYPT is not set +CONFIG_NUMA=y +CONFIG_AMD_NUMA=y +CONFIG_X86_64_ACPI_NUMA=y +CONFIG_NODES_SPAN_OTHER_NODES=y +# CONFIG_NUMA_EMU is not set +CONFIG_NODES_SHIFT=10 +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +# CONFIG_ARCH_MEMORY_PROBE is not set +CONFIG_ARCH_PROC_KCORE_TEXT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_X86_PMEM_LEGACY_DEVICE=y +CONFIG_X86_PMEM_LEGACY=y +CONFIG_X86_CHECK_BIOS_CORRUPTION=y +# CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK is not set +CONFIG_X86_RESERVE_LOW=64 +CONFIG_MTRR=y +CONFIG_MTRR_SANITIZER=y +CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=0 +CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 +CONFIG_X86_PAT=y +CONFIG_ARCH_USES_PG_UNCACHED=y +CONFIG_ARCH_RANDOM=y +CONFIG_X86_SMAP=y +CONFIG_X86_INTEL_UMIP=y +CONFIG_X86_INTEL_MPX=y +CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_EFI_MIXED=y +CONFIG_SECCOMP=y +# CONFIG_HZ_100 is not set +# CONFIG_HZ_250 is not set +# CONFIG_HZ_300 is not set +CONFIG_HZ_1000=y +CONFIG_HZ=1000 +CONFIG_SCHED_HRTICK=y +CONFIG_KEXEC=y +CONFIG_KEXEC_FILE=y +CONFIG_ARCH_HAS_KEXEC_PURGATORY=y +CONFIG_KEXEC_VERIFY_SIG=y +CONFIG_CRASH_DUMP=y +CONFIG_KEXEC_JUMP=y +CONFIG_PHYSICAL_START=0x1000000 +CONFIG_RELOCATABLE=y +CONFIG_RANDOMIZE_BASE=y +CONFIG_X86_NEED_RELOCS=y +CONFIG_PHYSICAL_ALIGN=0x1000000 +# CONFIG_RANDOMIZE_MEMORY is not set +CONFIG_HOTPLUG_CPU=y +# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set +# CONFIG_DEBUG_HOTPLUG_CPU0 is not set +# CONFIG_COMPAT_VDSO is not set +CONFIG_LEGACY_VSYSCALL_EMULATE=y +# CONFIG_LEGACY_VSYSCALL_NONE is not set +# CONFIG_CMDLINE_BOOL is not set +CONFIG_MODIFY_LDT_SYSCALL=y +CONFIG_HAVE_LIVEPATCH_FTRACE=y + +# +# Enable Livepatch +# +# CONFIG_LIVEPATCH is not set +CONFIG_ARCH_HAS_ADD_PAGES=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y + +# +# Power management and ACPI options +# +CONFIG_ARCH_HIBERNATION_HEADER=y +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HIBERNATE_CALLBACKS=y +CONFIG_HIBERNATION=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +CONFIG_PM_DEBUG=y +# CONFIG_PM_ADVANCED_DEBUG is not set +CONFIG_PM_TEST_SUSPEND=y +CONFIG_PM_SLEEP_DEBUG=y +CONFIG_PM_TRACE=y +CONFIG_PM_TRACE_RTC=y +CONFIG_PM_CLK=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y +CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y +CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y +# CONFIG_ACPI_DEBUGGER is not set +CONFIG_ACPI_SPCR_TABLE=y +CONFIG_ACPI_LPIT=y +CONFIG_ACPI_SLEEP=y +# CONFIG_ACPI_PROCFS_POWER is not set +# CONFIG_ACPI_REV_OVERRIDE_POSSIBLE is not set +# CONFIG_ACPI_EC_DEBUGFS is not set +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_FAN=y +# CONFIG_ACPI_TAD is not set +CONFIG_ACPI_DOCK=y +CONFIG_ACPI_CPU_FREQ_PSS=y +CONFIG_ACPI_PROCESSOR_CSTATE=y +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_CPPC_LIB=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_HOTPLUG_CPU=y +# CONFIG_ACPI_PROCESSOR_AGGREGATOR is not set +CONFIG_ACPI_THERMAL=y +CONFIG_ACPI_NUMA=y +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +# CONFIG_ACPI_DEBUG is not set +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_CONTAINER=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_ACPI_HOTPLUG_IOAPIC=y +# CONFIG_ACPI_SBS is not set +CONFIG_ACPI_HED=y +# CONFIG_ACPI_CUSTOM_METHOD is not set +CONFIG_ACPI_BGRT=y +CONFIG_ACPI_NFIT=y +CONFIG_HAVE_ACPI_APEI=y +CONFIG_HAVE_ACPI_APEI_NMI=y +CONFIG_ACPI_APEI=y +CONFIG_ACPI_APEI_GHES=y +CONFIG_ACPI_APEI_PCIEAER=y +CONFIG_ACPI_APEI_MEMORY_FAILURE=y +# CONFIG_ACPI_APEI_EINJ is not set +# CONFIG_ACPI_APEI_ERST_DEBUG is not set +# CONFIG_DPTF_POWER is not set +# CONFIG_ACPI_EXTLOG is not set +CONFIG_PMIC_OPREGION=y +# CONFIG_ACPI_CONFIGFS is not set +CONFIG_X86_PM_TIMER=y +CONFIG_SFI=y + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set + +# +# CPU frequency scaling drivers +# +CONFIG_X86_INTEL_PSTATE=y +# CONFIG_X86_PCC_CPUFREQ is not set +# CONFIG_X86_ACPI_CPUFREQ is not set +# CONFIG_X86_SPEEDSTEP_CENTRINO is not set +# CONFIG_X86_P4_CLOCKMOD is not set + +# +# shared options +# + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +# CONFIG_CPU_IDLE_GOV_LADDER is not set +CONFIG_CPU_IDLE_GOV_MENU=y +CONFIG_INTEL_IDLE=y + +# +# Bus options (PCI etc.) +# +CONFIG_PCI=y +CONFIG_PCI_DIRECT=y +CONFIG_PCI_MMCONFIG=y +CONFIG_PCI_DOMAINS=y +CONFIG_MMCONF_FAM10H=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +# CONFIG_PCIEAER_INJECT is not set +CONFIG_PCIE_ECRC=y +CONFIG_PCIEASPM=y +# CONFIG_PCIEASPM_DEBUG is not set +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +# CONFIG_PCIE_DPC is not set +# CONFIG_PCIE_PTM is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_IRQ_DOMAIN=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +CONFIG_PCI_STUB=y +# CONFIG_PCI_PF_STUB is not set +CONFIG_PCI_ATS=y +CONFIG_PCI_LOCKLESS_CONFIG=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +CONFIG_PCI_LABEL=y +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +# CONFIG_HOTPLUG_PCI_ACPI_IBM is not set +# CONFIG_HOTPLUG_PCI_CPCI is not set +# CONFIG_HOTPLUG_PCI_SHPC is not set + +# +# PCI controller drivers +# + +# +# Cadence PCIe controllers support +# +# CONFIG_VMD is not set + +# +# DesignWare PCI Core Support +# +# CONFIG_PCIE_DW_PLAT_HOST is not set + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +CONFIG_ISA_DMA_API=y +CONFIG_AMD_NB=y +# CONFIG_PCCARD is not set +# CONFIG_RAPIDIO is not set +# CONFIG_X86_SYSFB is not set + +# +# Binary Emulations +# +CONFIG_IA32_EMULATION=y +# CONFIG_IA32_AOUT is not set +# CONFIG_X86_X32 is not set +CONFIG_COMPAT_32=y +CONFIG_COMPAT=y +CONFIG_COMPAT_FOR_U64_ALIGNMENT=y +CONFIG_SYSVIPC_COMPAT=y +CONFIG_X86_DEV_DMA_OPS=y +CONFIG_HAVE_GENERIC_GUP=y + +# +# Firmware Drivers +# +# CONFIG_EDD is not set +CONFIG_FIRMWARE_MEMMAP=y +# CONFIG_DELL_RBU is not set +# CONFIG_DCDBAS is not set +# CONFIG_DMIID is not set +# CONFIG_DMI_SYSFS is not set +CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y +CONFIG_ISCSI_IBFT_FIND=y +# CONFIG_ISCSI_IBFT is not set +# CONFIG_FW_CFG_SYSFS is not set +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +# CONFIG_EFI_VARS is not set +CONFIG_EFI_ESRT=y +CONFIG_EFI_RUNTIME_MAP=y +# CONFIG_EFI_FAKE_MEMMAP is not set +CONFIG_EFI_RUNTIME_WRAPPERS=y +# CONFIG_EFI_CAPSULE_LOADER is not set +# CONFIG_EFI_TEST is not set +# CONFIG_APPLE_PROPERTIES is not set +# CONFIG_RESET_ATTACK_MITIGATION is not set +CONFIG_UEFI_CPER=y +CONFIG_UEFI_CPER_X86=y + +# +# Tegra firmware driver +# +CONFIG_HAVE_KVM=y +# CONFIG_VIRTUALIZATION is not set + +# +# General architecture-dependent options +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_HOTPLUG_SMT=y +# CONFIG_OPROFILE is not set +CONFIG_HAVE_OPROFILE=y +CONFIG_OPROFILE_NMI_TIMER=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +CONFIG_OPTPROBES=y +CONFIG_KPROBES_ON_FTRACE=y +CONFIG_UPROBES=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_KRETPROBES=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_OPTPROBES=y +CONFIG_HAVE_KPROBES_ON_FTRACE=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y +CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y +CONFIG_HAVE_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_PERF_EVENTS_NMI=y +CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_RCU_TABLE_FREE=y +CONFIG_HAVE_RCU_TABLE_INVALIDATE=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP_FILTER=y +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_CC_HAS_STACKPROTECTOR_NONE=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_HAVE_ARCH_SOFT_DIRTY=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_HAVE_EXIT_THREAD=y +CONFIG_ARCH_MMAP_RND_BITS=28 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8 +CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y +CONFIG_HAVE_COPY_THREAD_TLS=y +CONFIG_HAVE_STACK_VALIDATION=y +CONFIG_HAVE_RELIABLE_STACKTRACE=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_ARCH_HAS_REFCOUNT=y +# CONFIG_REFCOUNT_FULL is not set +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y + +# +# GCOV-based kernel profiling +# +CONFIG_GCOV_KERNEL=y +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +CONFIG_GCOV_PROFILE_ALL=y +CONFIG_GCOV_FORMAT_4_7=y +CONFIG_PLUGIN_HOSTCC="" +CONFIG_HAVE_GCC_PLUGINS=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +# CONFIG_MODULE_SIG is not set +# CONFIG_MODULE_COMPRESS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLK_SCSI_REQUEST=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +# CONFIG_BLK_CMDLINE_PARSER is not set +# CONFIG_BLK_WBT is not set +# CONFIG_BLK_CGROUP_IOLATENCY is not set +CONFIG_BLK_DEBUG_FS=y +CONFIG_BLK_DEBUG_FS_ZONED=y +# CONFIG_BLK_SED_OPAL is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +CONFIG_AIX_PARTITION=y +CONFIG_OSF_PARTITION=y +CONFIG_AMIGA_PARTITION=y +# CONFIG_ATARI_PARTITION is not set +CONFIG_MAC_PARTITION=y +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_LDM_PARTITION=y +# CONFIG_LDM_DEBUG is not set +CONFIG_SGI_PARTITION=y +# CONFIG_ULTRIX_PARTITION is not set +CONFIG_SUN_PARTITION=y +CONFIG_KARMA_PARTITION=y +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +CONFIG_BLOCK_COMPAT=y +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_MQ_RDMA=y + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_CFQ_GROUP_IOSCHED=y +# CONFIG_DEFAULT_DEADLINE is not set +CONFIG_DEFAULT_CFQ=y +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="cfq" +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=m +CONFIG_IOSCHED_BFQ=m +CONFIG_BFQ_GROUP_IOSCHED=y +CONFIG_ASN1=y +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +# CONFIG_BINFMT_MISC is not set +CONFIG_COREDUMP=y + +# +# Memory Management options +# +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_NEED_MULTIPLE_NODES=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +CONFIG_ARCH_DISCARD_MEMBLOCK=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_HAVE_BOOTMEM_INFO_NODE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_SPARSE=y +# CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE is not set +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_BOUNCE=y +CONFIG_VIRT_TO_BUS=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +CONFIG_MEMORY_FAILURE=y +# CONFIG_HWPOISON_INJECT is not set +CONFIG_TRANSPARENT_HUGEPAGE=y +# CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS is not set +CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y +CONFIG_ARCH_WANTS_THP_SWAP=y +CONFIG_THP_SWAP=y +CONFIG_TRANSPARENT_HUGE_PAGECACHE=y +CONFIG_CLEANCACHE=y +CONFIG_FRONTSWAP=y +# CONFIG_SHRINK_PAGECACHE is not set +CONFIG_CMA=y +# CONFIG_CMA_DEBUG is not set +# CONFIG_CMA_DEBUGFS is not set +CONFIG_CMA_AREAS=7 +CONFIG_ZSWAP=y +CONFIG_ZPOOL=y +CONFIG_ZBUD=y +# CONFIG_Z3FOLD is not set +CONFIG_ZSMALLOC=y +# CONFIG_PGTABLE_MAPPING is not set +# CONFIG_ZSMALLOC_STAT is not set +CONFIG_GENERIC_EARLY_IOREMAP=y +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +# CONFIG_IDLE_PAGE_TRACKING is not set +CONFIG_ARCH_HAS_ZONE_DEVICE=y +# CONFIG_ZONE_DEVICE is not set +CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y +CONFIG_ARCH_HAS_PKEYS=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_BENCHMARK is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +# CONFIG_PACKET_DIAG is not set +CONFIG_UNIX=y +# CONFIG_UNIX_DIAG is not set +# CONFIG_TLS is not set +# CONFIG_XFRM_USER is not set +# CONFIG_NET_KEY is not set +# CONFIG_SMC is not set +# CONFIG_XDP_SOCKETS is not set +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +# CONFIG_IP_ADVANCED_ROUTER is not set +# CONFIG_IP_PNP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE_DEMUX is not set +CONFIG_NET_IP_TUNNEL=m +# CONFIG_IP_MROUTE is not set +CONFIG_SYN_COOKIES=y +CONFIG_NET_UDP_TUNNEL=m +# CONFIG_NET_FOU is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_INET_XFRM_MODE_TRANSPORT is not set +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_DIAG is not set +CONFIG_TCP_CONG_ADVANCED=y +# CONFIG_TCP_CONG_BIC is not set +CONFIG_TCP_CONG_CUBIC=y +# CONFIG_TCP_CONG_WESTWOOD is not set +# CONFIG_TCP_CONG_HTCP is not set +# CONFIG_TCP_CONG_HSTCP is not set +# CONFIG_TCP_CONG_HYBLA is not set +# CONFIG_TCP_CONG_VEGAS is not set +# CONFIG_TCP_CONG_NV is not set +# CONFIG_TCP_CONG_SCALABLE is not set +# CONFIG_TCP_CONG_LP is not set +# CONFIG_TCP_CONG_VENO is not set +# CONFIG_TCP_CONG_YEAH is not set +# CONFIG_TCP_CONG_ILLINOIS is not set +# CONFIG_TCP_CONG_DCTCP is not set +# CONFIG_TCP_CONG_CDG is not set +# CONFIG_TCP_CONG_BBR is not set +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +# CONFIG_IPV6 is not set +# CONFIG_NETWORK_SECMARK is not set +# CONFIG_NETWORK_PHY_TIMESTAMPING is not set +# CONFIG_NETFILTER is not set +# CONFIG_BPFILTER is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_L2TP is not set +# CONFIG_BRIDGE is not set +CONFIG_HAVE_NET_DSA=y +# CONFIG_NET_DSA is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set +# CONFIG_NET_SCHED is not set +# CONFIG_DCB is not set +CONFIG_DNS_RESOLVER=y +# CONFIG_BATMAN_ADV is not set +# CONFIG_OPENVSWITCH is not set +# CONFIG_VSOCKETS is not set +# CONFIG_NETLINK_DIAG is not set +# CONFIG_MPLS is not set +# CONFIG_NET_NSH is not set +# CONFIG_HSR is not set +# CONFIG_NET_SWITCHDEV is not set +# CONFIG_NET_L3_MASTER_DEV is not set +# CONFIG_NET_NCSI is not set +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_XPS=y +# CONFIG_CGROUP_NET_PRIO is not set +# CONFIG_CGROUP_NET_CLASSID is not set +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_JIT=y +# CONFIG_BPF_STREAM_PARSER is not set +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NET_DROP_MONITOR is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +# CONFIG_WIRELESS is not set +# CONFIG_WIMAX is not set +# CONFIG_RFKILL is not set +CONFIG_NET_9P=y +CONFIG_NET_9P_VIRTIO=y +# CONFIG_NET_9P_RDMA is not set +# CONFIG_NET_9P_DEBUG is not set +# CONFIG_CAIF is not set +# CONFIG_CEPH_LIB is not set +# CONFIG_NFC is not set +# CONFIG_PSAMPLE is not set +# CONFIG_NET_IFE is not set +# CONFIG_LWTUNNEL is not set +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +# CONFIG_NET_DEVLINK is not set +CONFIG_MAY_USE_DEVLINK=y +CONFIG_FAILOVER=y +CONFIG_HAVE_EBPF_JIT=y + +# +# Device Drivers +# + +# +# Generic Driver Options +# +# CONFIG_UEVENT_HELPER is not set +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER is not set +CONFIG_ALLOW_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +CONFIG_DEBUG_DEVRES=y +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y +# CONFIG_DMA_CMA is not set + +# +# Bus devices +# +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y +# CONFIG_GNSS is not set +CONFIG_MTD=y +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +# CONFIG_MTD_AR7_PARTS is not set + +# +# Partition parsers +# + +# +# User Modules And Translation Layers +# +# CONFIG_MTD_BLOCK is not set +# CONFIG_MTD_BLOCK_RO is not set +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=y +CONFIG_MTD_JEDECPROBE=y +CONFIG_MTD_GEN_PROBE=y +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_NOSWAP=y +# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set +# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set +# CONFIG_MTD_CFI_GEOMETRY is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_OTP is not set +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +# CONFIG_MTD_CFI_STAA is not set +CONFIG_MTD_CFI_UTIL=y +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_PHYSMAP is not set +# CONFIG_MTD_AMD76XROM is not set +# CONFIG_MTD_ICHXROM is not set +# CONFIG_MTD_ESB2ROM is not set +# CONFIG_MTD_CK804XROM is not set +# CONFIG_MTD_SCB2_FLASH is not set +# CONFIG_MTD_NETtel is not set +# CONFIG_MTD_L440GX is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# CONFIG_MTD_ONENAND is not set +CONFIG_MTD_NAND_ECC=y +# CONFIG_MTD_NAND_ECC_SMC is not set +CONFIG_MTD_NAND=y +CONFIG_MTD_NAND_BCH=y +CONFIG_MTD_NAND_ECC_BCH=y +# CONFIG_MTD_NAND_DENALI_PCI is not set +# CONFIG_MTD_NAND_RICOH is not set +# CONFIG_MTD_NAND_DISKONCHIP is not set +# CONFIG_MTD_NAND_DOCG4 is not set +# CONFIG_MTD_NAND_CAFE is not set +CONFIG_MTD_NAND_NANDSIM=m +# CONFIG_MTD_NAND_PLATFORM is not set + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# CONFIG_MTD_SPI_NOR is not set +CONFIG_MTD_UBI=y +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_LIMIT=20 +CONFIG_MTD_UBI_FASTMAP=y +CONFIG_MTD_UBI_GLUEBI=y +CONFIG_MTD_UBI_BLOCK=y +# CONFIG_OF is not set +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y +# CONFIG_PARPORT is not set +CONFIG_PNP=y +# CONFIG_PNP_DEBUG_MESSAGES is not set + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_NULL_BLK=m +# CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION is not set +# CONFIG_BLK_DEV_FD is not set +CONFIG_CDROM=y +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +# CONFIG_ZRAM is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_DRBD is not set +CONFIG_BLK_DEV_NBD=m +# CONFIG_BLK_DEV_SKD is not set +# CONFIG_BLK_DEV_SX8 is not set +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=2 +CONFIG_BLK_DEV_RAM_SIZE=1048576 +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_VIRTIO_BLK=y +# CONFIG_VIRTIO_BLK_SCSI is not set +# CONFIG_BLK_DEV_RBD is not set +# CONFIG_BLK_DEV_RSXX is not set + +# +# NVME Support +# +CONFIG_NVME_CORE=m +CONFIG_BLK_DEV_NVME=m +# CONFIG_NVME_MULTIPATH is not set +CONFIG_NVME_FABRICS=m +# CONFIG_NVME_RDMA is not set +# CONFIG_NVME_FC is not set +CONFIG_NVME_TARGET=m +CONFIG_NVME_TARGET_LOOP=m +# CONFIG_NVME_TARGET_RDMA is not set +# CONFIG_NVME_TARGET_FC is not set + +# +# Misc devices +# +# CONFIG_DUMMY_IRQ is not set +# CONFIG_IBM_ASM is not set +# CONFIG_PHANTOM is not set +# CONFIG_SGI_IOC4 is not set +# CONFIG_TIFM_CORE is not set +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_SGI_XP is not set +# CONFIG_HP_ILO is not set +# CONFIG_SGI_GRU is not set +# CONFIG_SRAM is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_CB710_CORE is not set + +# +# Texas Instruments shared transport line discipline +# + +# +# Altera FPGA firmware download module (requires I2C) +# +# CONFIG_INTEL_MEI is not set +# CONFIG_INTEL_MEI_ME is not set +# CONFIG_INTEL_MEI_TXE is not set +# CONFIG_VMWARE_VMCI is not set + +# +# Intel MIC & related support +# + +# +# Intel MIC Bus Driver +# +# CONFIG_INTEL_MIC_BUS is not set + +# +# SCIF Bus Driver +# +# CONFIG_SCIF_BUS is not set + +# +# VOP Bus Driver +# +# CONFIG_VOP_BUS is not set + +# +# Intel MIC Host Driver +# + +# +# Intel MIC Card Driver +# + +# +# SCIF Driver +# + +# +# Intel MIC Coprocessor State Management (COSM) Drivers +# + +# +# VOP Driver +# +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_MISC_RTSX_PCI is not set +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +# CONFIG_RAID_ATTRS is not set +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +# CONFIG_SCSI_MQ_DEFAULT is not set +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +# CONFIG_CHR_DEV_ST is not set +# CONFIG_CHR_DEV_OSST is not set +CONFIG_BLK_DEV_SR=y +CONFIG_BLK_DEV_SR_VENDOR=y +CONFIG_CHR_DEV_SG=y +# CONFIG_CHR_DEV_SCH is not set +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +# CONFIG_SCSI_SPI_ATTRS is not set +# CONFIG_SCSI_FC_ATTRS is not set +CONFIG_SCSI_ISCSI_ATTRS=y +# CONFIG_SCSI_SAS_ATTRS is not set +# CONFIG_SCSI_SAS_LIBSAS is not set +CONFIG_SCSI_SRP_ATTRS=m +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=y +CONFIG_ISCSI_BOOT_SYSFS=y +CONFIG_SCSI_CXGB3_ISCSI=y +CONFIG_SCSI_CXGB4_ISCSI=y +CONFIG_SCSI_BNX2_ISCSI=y +CONFIG_BE2ISCSI=y +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +# CONFIG_SCSI_HPSA is not set +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +# CONFIG_SCSI_AACRAID is not set +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +# CONFIG_SCSI_MVSAS is not set +# CONFIG_SCSI_MVUMI is not set +# CONFIG_SCSI_DPT_I2O is not set +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +# CONFIG_MEGARAID_NEWGEN is not set +# CONFIG_MEGARAID_LEGACY is not set +# CONFIG_MEGARAID_SAS is not set +# CONFIG_SCSI_MPT3SAS is not set +# CONFIG_SCSI_MPT2SAS is not set +# CONFIG_SCSI_SMARTPQI is not set +# CONFIG_SCSI_UFSHCD is not set +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_BUSLOGIC is not set +# CONFIG_VMWARE_PVSCSI is not set +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_GDTH is not set +# CONFIG_SCSI_ISCI is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set +# CONFIG_SCSI_QLOGIC_1280 is not set +# CONFIG_SCSI_QLA_ISCSI is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +CONFIG_SCSI_DEBUG=m +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +CONFIG_SCSI_VIRTIO=y +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=m +# CONFIG_SCSI_DH_HP_SW is not set +CONFIG_SCSI_DH_EMC=m +CONFIG_SCSI_DH_ALUA=m +# CONFIG_SCSI_OSD_INITIATOR is not set +CONFIG_ATA=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=y +CONFIG_SATA_MOBILE_LPM_POLICY=0 +# CONFIG_SATA_AHCI_PLATFORM is not set +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +CONFIG_ATA_PIIX=y +# CONFIG_SATA_DWC is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +# CONFIG_SATA_SIL is not set +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_RZ1000 is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_PATA_ACPI is not set +# CONFIG_ATA_GENERIC is not set +# CONFIG_PATA_LEGACY is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y +# CONFIG_MD_LINEAR is not set +CONFIG_MD_RAID0=y +CONFIG_MD_RAID1=y +CONFIG_MD_RAID10=y +CONFIG_MD_RAID456=y +# CONFIG_MD_MULTIPATH is not set +# CONFIG_MD_FAULTY is not set +# CONFIG_BCACHE is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_MQ_DEFAULT=y +CONFIG_DM_DEBUG=y +CONFIG_DM_BUFIO=y +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=y +CONFIG_DM_PERSISTENT_DATA=y +# CONFIG_DM_UNSTRIPED is not set +# CONFIG_DM_CRYPT is not set +CONFIG_DM_SNAPSHOT=y +CONFIG_DM_THIN_PROVISIONING=y +CONFIG_DM_CACHE=y +CONFIG_DM_CACHE_SMQ=y +# CONFIG_DM_WRITECACHE is not set +# CONFIG_DM_ERA is not set +CONFIG_DM_MIRROR=y +# CONFIG_DM_LOG_USERSPACE is not set +# CONFIG_DM_RAID is not set +CONFIG_DM_ZERO=y +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=y +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=y +# CONFIG_DM_VERITY is not set +# CONFIG_DM_SWITCH is not set +CONFIG_DM_LOG_WRITES=y +# CONFIG_DM_INTEGRITY is not set +# CONFIG_DM_ZONED is not set +CONFIG_TARGET_CORE=y +CONFIG_TCM_IBLOCK=m +# CONFIG_TCM_FILEIO is not set +# CONFIG_TCM_PSCSI is not set +# CONFIG_TCM_USER2 is not set +# CONFIG_LOOPBACK_TARGET is not set +# CONFIG_ISCSI_TARGET is not set +# CONFIG_FUSION is not set + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_FIREWIRE is not set +# CONFIG_FIREWIRE_NOSY is not set +# CONFIG_MACINTOSH_DRIVERS is not set +CONFIG_NETDEVICES=y +CONFIG_NET_CORE=y +# CONFIG_BONDING is not set +# CONFIG_DUMMY is not set +# CONFIG_EQUALIZER is not set +# CONFIG_NET_FC is not set +# CONFIG_NET_TEAM is not set +# CONFIG_MACVLAN is not set +# CONFIG_VXLAN is not set +# CONFIG_GENEVE is not set +# CONFIG_GTP is not set +# CONFIG_MACSEC is not set +# CONFIG_NETCONSOLE is not set +# CONFIG_TUN is not set +# CONFIG_TUN_VNET_CROSS_LE is not set +# CONFIG_VETH is not set +CONFIG_VIRTIO_NET=y +# CONFIG_NLMON is not set +# CONFIG_ARCNET is not set + +# +# CAIF transport drivers +# + +# +# Distributed Switch Architecture drivers +# +CONFIG_ETHERNET=y +CONFIG_MDIO=y +CONFIG_NET_VENDOR_3COM=y +# CONFIG_VORTEX is not set +# CONFIG_TYPHOON is not set +CONFIG_NET_VENDOR_ADAPTEC=y +# CONFIG_ADAPTEC_STARFIRE is not set +CONFIG_NET_VENDOR_AGERE=y +# CONFIG_ET131X is not set +CONFIG_NET_VENDOR_ALACRITECH=y +# CONFIG_SLICOSS is not set +CONFIG_NET_VENDOR_ALTEON=y +# CONFIG_ACENIC is not set +# CONFIG_ALTERA_TSE is not set +CONFIG_NET_VENDOR_AMAZON=y +# CONFIG_ENA_ETHERNET is not set +CONFIG_NET_VENDOR_AMD=y +# CONFIG_AMD8111_ETH is not set +# CONFIG_PCNET32 is not set +# CONFIG_AMD_XGBE is not set +CONFIG_NET_VENDOR_AQUANTIA=y +# CONFIG_AQTION is not set +CONFIG_NET_VENDOR_ARC=y +CONFIG_NET_VENDOR_ATHEROS=y +# CONFIG_ATL2 is not set +# CONFIG_ATL1 is not set +# CONFIG_ATL1E is not set +# CONFIG_ATL1C is not set +# CONFIG_ALX is not set +CONFIG_NET_VENDOR_AURORA=y +# CONFIG_AURORA_NB8800 is not set +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +CONFIG_BNX2=y +CONFIG_CNIC=y +# CONFIG_TIGON3 is not set +# CONFIG_BNX2X is not set +# CONFIG_SYSTEMPORT is not set +# CONFIG_BNXT is not set +CONFIG_NET_VENDOR_BROCADE=y +# CONFIG_BNA is not set +CONFIG_NET_VENDOR_CADENCE=y +# CONFIG_MACB is not set +CONFIG_NET_VENDOR_CAVIUM=y +# CONFIG_THUNDER_NIC_PF is not set +# CONFIG_THUNDER_NIC_VF is not set +# CONFIG_THUNDER_NIC_BGX is not set +# CONFIG_THUNDER_NIC_RGX is not set +CONFIG_CAVIUM_PTP=y +# CONFIG_LIQUIDIO is not set +# CONFIG_LIQUIDIO_VF is not set +CONFIG_NET_VENDOR_CHELSIO=y +# CONFIG_CHELSIO_T1 is not set +CONFIG_CHELSIO_T3=y +CONFIG_CHELSIO_T4=y +# CONFIG_CHELSIO_T4VF is not set +CONFIG_CHELSIO_LIB=y +CONFIG_NET_VENDOR_CISCO=y +# CONFIG_ENIC is not set +CONFIG_NET_VENDOR_CORTINA=y +# CONFIG_CX_ECAT is not set +# CONFIG_DNET is not set +CONFIG_NET_VENDOR_DEC=y +# CONFIG_NET_TULIP is not set +CONFIG_NET_VENDOR_DLINK=y +# CONFIG_DL2K is not set +# CONFIG_SUNDANCE is not set +CONFIG_NET_VENDOR_EMULEX=y +# CONFIG_BE2NET is not set +CONFIG_NET_VENDOR_EZCHIP=y +CONFIG_NET_VENDOR_HP=y +# CONFIG_HP100 is not set +CONFIG_NET_VENDOR_I825XX=y +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set +# CONFIG_E1000 is not set +# CONFIG_E1000E is not set +# CONFIG_IGB is not set +# CONFIG_IGBVF is not set +# CONFIG_IXGB is not set +# CONFIG_IXGBE is not set +# CONFIG_IXGBEVF is not set +# CONFIG_I40E is not set +# CONFIG_I40EVF is not set +# CONFIG_ICE is not set +# CONFIG_FM10K is not set +# CONFIG_JME is not set +CONFIG_NET_VENDOR_MARVELL=y +# CONFIG_MVMDIO is not set +# CONFIG_SKGE is not set +# CONFIG_SKY2 is not set +CONFIG_NET_VENDOR_MELLANOX=y +# CONFIG_MLX4_EN is not set +# CONFIG_MLX5_CORE is not set +# CONFIG_MLXSW_CORE is not set +# CONFIG_MLXFW is not set +CONFIG_NET_VENDOR_MICREL=y +# CONFIG_KS8842 is not set +# CONFIG_KS8851_MLL is not set +# CONFIG_KSZ884X_PCI is not set +CONFIG_NET_VENDOR_MICROSEMI=y +CONFIG_NET_VENDOR_MYRI=y +# CONFIG_MYRI10GE is not set +# CONFIG_FEALNX is not set +CONFIG_NET_VENDOR_NATSEMI=y +# CONFIG_NATSEMI is not set +# CONFIG_NS83820 is not set +CONFIG_NET_VENDOR_NETERION=y +# CONFIG_S2IO is not set +# CONFIG_VXGE is not set +CONFIG_NET_VENDOR_NETRONOME=y +# CONFIG_NFP is not set +CONFIG_NET_VENDOR_NI=y +CONFIG_NET_VENDOR_8390=y +# CONFIG_NE2K_PCI is not set +CONFIG_NET_VENDOR_NVIDIA=y +# CONFIG_FORCEDETH is not set +CONFIG_NET_VENDOR_OKI=y +# CONFIG_ETHOC is not set +CONFIG_NET_VENDOR_PACKET_ENGINES=y +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +CONFIG_NET_VENDOR_QLOGIC=y +# CONFIG_QLA3XXX is not set +# CONFIG_QLCNIC is not set +# CONFIG_QLGE is not set +# CONFIG_NETXEN_NIC is not set +# CONFIG_QED is not set +CONFIG_NET_VENDOR_QUALCOMM=y +# CONFIG_QCOM_EMAC is not set +# CONFIG_RMNET is not set +CONFIG_NET_VENDOR_RDC=y +# CONFIG_R6040 is not set +CONFIG_NET_VENDOR_REALTEK=y +# CONFIG_8139CP is not set +# CONFIG_8139TOO is not set +# CONFIG_R8169 is not set +CONFIG_NET_VENDOR_RENESAS=y +CONFIG_NET_VENDOR_ROCKER=y +CONFIG_NET_VENDOR_SAMSUNG=y +# CONFIG_SXGBE_ETH is not set +CONFIG_NET_VENDOR_SEEQ=y +CONFIG_NET_VENDOR_SOLARFLARE=y +# CONFIG_SFC is not set +# CONFIG_SFC_FALCON is not set +CONFIG_NET_VENDOR_SILAN=y +# CONFIG_SC92031 is not set +CONFIG_NET_VENDOR_SIS=y +# CONFIG_SIS900 is not set +# CONFIG_SIS190 is not set +CONFIG_NET_VENDOR_SMSC=y +# CONFIG_EPIC100 is not set +# CONFIG_SMSC911X is not set +# CONFIG_SMSC9420 is not set +CONFIG_NET_VENDOR_SOCIONEXT=y +CONFIG_NET_VENDOR_STMICRO=y +# CONFIG_STMMAC_ETH is not set +CONFIG_NET_VENDOR_SUN=y +# CONFIG_HAPPYMEAL is not set +# CONFIG_SUNGEM is not set +# CONFIG_CASSINI is not set +# CONFIG_NIU is not set +CONFIG_NET_VENDOR_SYNOPSYS=y +# CONFIG_DWC_XLGMAC is not set +CONFIG_NET_VENDOR_TEHUTI=y +# CONFIG_TEHUTI is not set +CONFIG_NET_VENDOR_TI=y +# CONFIG_TI_CPSW_ALE is not set +# CONFIG_TLAN is not set +CONFIG_NET_VENDOR_VIA=y +# CONFIG_VIA_RHINE is not set +# CONFIG_VIA_VELOCITY is not set +CONFIG_NET_VENDOR_WIZNET=y +# CONFIG_WIZNET_W5100 is not set +# CONFIG_WIZNET_W5300 is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +# CONFIG_MDIO_DEVICE is not set +# CONFIG_PHYLIB is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set + +# +# Host-side USB support is needed for USB Network Adapter support +# +# CONFIG_WLAN is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set +# CONFIG_VMXNET3 is not set +# CONFIG_FUJITSU_ES is not set +# CONFIG_NETDEVSIM is not set +CONFIG_NET_FAILOVER=y +# CONFIG_ISDN is not set +# CONFIG_NVM is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set +# CONFIG_INPUT_SPARSEKMAP is not set +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set +# CONFIG_RMI4_CORE is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_ROCKETPORT is not set +# CONFIG_CYCLADES is not set +# CONFIG_MOXA_INTELLIO is not set +# CONFIG_MOXA_SMARTIO is not set +# CONFIG_SYNCLINK is not set +# CONFIG_SYNCLINKMP is not set +# CONFIG_SYNCLINK_GT is not set +# CONFIG_NOZOMI is not set +# CONFIG_ISI is not set +# CONFIG_N_HDLC is not set +# CONFIG_N_GSM is not set +# CONFIG_TRACE_SINK is not set +CONFIG_LDISC_AUTOLOAD=y +CONFIG_DEVMEM=y +# CONFIG_DEVKMEM is not set + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=32 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DW=y +CONFIG_SERIAL_8250_RT288X=y +CONFIG_SERIAL_8250_LPSS=y +CONFIG_SERIAL_8250_MID=y +# CONFIG_SERIAL_8250_MOXA is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_KGDB_NMI is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_CONSOLE_POLL=y +# CONFIG_SERIAL_JSM is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_ARC is not set +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_DEV_BUS is not set +CONFIG_HVC_DRIVER=y +CONFIG_VIRTIO_CONSOLE=y +# CONFIG_IPMI_HANDLER is not set +CONFIG_HW_RANDOM=y +# CONFIG_HW_RANDOM_TIMERIOMEM is not set +# CONFIG_HW_RANDOM_INTEL is not set +# CONFIG_HW_RANDOM_AMD is not set +# CONFIG_HW_RANDOM_VIA is not set +# CONFIG_HW_RANDOM_VIRTIO is not set +CONFIG_NVRAM=y +# CONFIG_APPLICOM is not set +# CONFIG_MWAVE is not set +CONFIG_RAW_DRIVER=y +CONFIG_MAX_RAW_DEVS=8192 +CONFIG_HPET=y +# CONFIG_HPET_MMAP is not set +# CONFIG_HANGCHECK_TIMER is not set +# CONFIG_UV_MMTIMER is not set +# CONFIG_TCG_TPM is not set +# CONFIG_TELCLOCK is not set +CONFIG_DEVPORT=y +# CONFIG_XILLYBUS is not set +# CONFIG_RANDOM_TRUST_CPU is not set + +# +# I2C support +# +# CONFIG_I2C is not set +# CONFIG_SPI is not set +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +# CONFIG_PPS is not set + +# +# PTP clock support +# +# CONFIG_PTP_1588_CLOCK is not set + +# +# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks. +# +CONFIG_PINCTRL=y +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_PINCTRL_AMD is not set +# CONFIG_PINCTRL_BAYTRAIL is not set +# CONFIG_PINCTRL_CHERRYVIEW is not set +# CONFIG_PINCTRL_BROXTON is not set +# CONFIG_PINCTRL_CANNONLAKE is not set +# CONFIG_PINCTRL_CEDARFORK is not set +# CONFIG_PINCTRL_DENVERTON is not set +# CONFIG_PINCTRL_GEMINILAKE is not set +# CONFIG_PINCTRL_ICELAKE is not set +# CONFIG_PINCTRL_LEWISBURG is not set +# CONFIG_PINCTRL_SUNRISEPOINT is not set +# CONFIG_GPIOLIB is not set +# CONFIG_W1 is not set +# CONFIG_POWER_AVS is not set +# CONFIG_POWER_RESET is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_HWMON is not set +CONFIG_THERMAL=y +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +CONFIG_THERMAL_GOV_BANG_BANG=y +CONFIG_THERMAL_GOV_USER_SPACE=y +# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set +# CONFIG_THERMAL_EMULATION is not set +# CONFIG_INTEL_POWERCLAMP is not set +# CONFIG_X86_PKG_TEMP_THERMAL is not set +# CONFIG_INTEL_SOC_DTS_THERMAL is not set + +# +# ACPI INT340X thermal drivers +# +# CONFIG_INT340X_THERMAL is not set +# CONFIG_INTEL_PCH_THERMAL is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +# CONFIG_BCMA is not set + +# +# Multifunction device drivers +# +# CONFIG_MFD_CROS_EC is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_MFD_INTEL_QUARK_I2C_GPIO is not set +# CONFIG_LPC_ICH is not set +# CONFIG_LPC_SCH is not set +# CONFIG_MFD_INTEL_LPSS_ACPI is not set +# CONFIG_MFD_INTEL_LPSS_PCI is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_SYSCON is not set +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_VX855 is not set +# CONFIG_REGULATOR is not set +# CONFIG_RC_CORE is not set +# CONFIG_MEDIA_SUPPORT is not set + +# +# Graphics support +# +CONFIG_AGP=y +CONFIG_AGP_AMD64=y +CONFIG_AGP_INTEL=y +CONFIG_AGP_SIS=y +CONFIG_AGP_VIA=y +CONFIG_INTEL_GTT=y +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=16 +# CONFIG_VGA_SWITCHEROO is not set +# CONFIG_DRM is not set +# CONFIG_DRM_DP_CEC is not set + +# +# ACP (Audio CoProcessor) Configuration +# + +# +# AMD Library routines +# +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y + +# +# Frame buffer Devices +# +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_CMDLINE=y +CONFIG_FB_NOTIFY=y +CONFIG_FB_BOOT_VESA_SUPPORT=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +# CONFIG_FB_MODE_HELPERS is not set +CONFIG_FB_TILEBLITTING=y + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ARC is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_VGA16 is not set +# CONFIG_FB_UVESA is not set +CONFIG_FB_VESA=y +CONFIG_FB_EFI=y +# CONFIG_FB_N411 is not set +# CONFIG_FB_HGA is not set +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_LE80578 is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +# CONFIG_FB_SIMPLE is not set +# CONFIG_FB_SM712 is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set + +# +# Console display driver support +# +CONFIG_VGA_CONSOLE=y +CONFIG_VGACON_SOFT_SCROLLBACK=y +CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64 +# CONFIG_VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT is not set +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +# CONFIG_LOGO is not set +# CONFIG_SOUND is not set + +# +# HID support +# +# CONFIG_HID is not set + +# +# Intel ISH HID support +# +# CONFIG_INTEL_ISH_HID is not set +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +# CONFIG_USB_SUPPORT is not set +# CONFIG_UWB is not set +# CONFIG_MMC is not set +# CONFIG_MEMSTICK is not set +# CONFIG_NEW_LEDS is not set +# CONFIG_ACCESSIBILITY is not set +CONFIG_INFINIBAND=y +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +# CONFIG_INFINIBAND_EXP_LEGACY_VERBS_NEW_UAPI is not set +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +# CONFIG_INFINIBAND_MTHCA is not set +# CONFIG_INFINIBAND_CXGB3 is not set +# CONFIG_INFINIBAND_CXGB4 is not set +# CONFIG_MLX4_INFINIBAND is not set +# CONFIG_INFINIBAND_NES is not set +# CONFIG_INFINIBAND_OCRDMA is not set +CONFIG_INFINIBAND_IPOIB=m +# CONFIG_INFINIBAND_IPOIB_CM is not set +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +# CONFIG_INFINIBAND_ISER is not set +# CONFIG_INFINIBAND_OPA_VNIC is not set +# CONFIG_INFINIBAND_RDMAVT is not set +CONFIG_RDMA_RXE=m +CONFIG_EDAC_ATOMIC_SCRUB=y +CONFIG_EDAC_SUPPORT=y +CONFIG_EDAC=y +CONFIG_EDAC_LEGACY_SYSFS=y +# CONFIG_EDAC_DEBUG is not set +# CONFIG_EDAC_DECODE_MCE is not set +# CONFIG_EDAC_GHES is not set +# CONFIG_EDAC_E752X is not set +# CONFIG_EDAC_I82975X is not set +# CONFIG_EDAC_I3000 is not set +# CONFIG_EDAC_I3200 is not set +# CONFIG_EDAC_IE31200 is not set +# CONFIG_EDAC_X38 is not set +# CONFIG_EDAC_I5400 is not set +# CONFIG_EDAC_I7CORE is not set +# CONFIG_EDAC_I5000 is not set +# CONFIG_EDAC_I5100 is not set +# CONFIG_EDAC_I7300 is not set +# CONFIG_EDAC_SBRIDGE is not set +# CONFIG_EDAC_SKX is not set +# CONFIG_EDAC_PND2 is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_MC146818_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_SYSTOHC is not set +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# + +# +# SPI RTC drivers +# + +# +# SPI and I2C RTC drivers +# + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_CMOS=y +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1685_FAMILY is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_DS2404 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_FTRTC010 is not set + +# +# HID Sensor RTC drivers +# +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_VIRTUAL_CHANNELS=y +CONFIG_DMA_ACPI=y +# CONFIG_ALTERA_MSGDMA is not set +# CONFIG_INTEL_IDMA64 is not set +# CONFIG_INTEL_IOATDMA is not set +# CONFIG_QCOM_HIDMA_MGMT is not set +# CONFIG_QCOM_HIDMA is not set +CONFIG_DW_DMAC_CORE=y +# CONFIG_DW_DMAC is not set +CONFIG_DW_DMAC_PCI=y +CONFIG_HSU_DMA=y + +# +# DMA Clients +# +CONFIG_ASYNC_TX_DMA=y +# CONFIG_DMATEST is not set + +# +# DMABUF options +# +# CONFIG_SYNC_FILE is not set +# CONFIG_AUXDISPLAY is not set +CONFIG_UIO=y +# CONFIG_UIO_CIF is not set +# CONFIG_UIO_PDRV_GENIRQ is not set +# CONFIG_UIO_DMEM_GENIRQ is not set +# CONFIG_UIO_AEC is not set +# CONFIG_UIO_SERCOS3 is not set +# CONFIG_UIO_PCI_GENERIC is not set +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +# CONFIG_VIRT_DRIVERS is not set +CONFIG_VIRTIO=y +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTIO_BALLOON=y +CONFIG_VIRTIO_INPUT=y +CONFIG_VIRTIO_MMIO=y +# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set + +# +# Microsoft Hyper-V guest support +# +# CONFIG_HYPERV is not set +# CONFIG_STAGING is not set +CONFIG_X86_PLATFORM_DEVICES=y +# CONFIG_ACER_WIRELESS is not set +# CONFIG_ACERHDF is not set +# CONFIG_DELL_SMBIOS is not set +# CONFIG_DELL_SMO8800 is not set +# CONFIG_FUJITSU_TABLET is not set +# CONFIG_GPD_POCKET_FAN is not set +# CONFIG_HP_WIRELESS is not set +# CONFIG_SENSORS_HDAPS is not set +# CONFIG_INTEL_MENLOW is not set +# CONFIG_ASUS_WIRELESS is not set +# CONFIG_ACPI_WMI is not set +# CONFIG_TOPSTAR_LAPTOP is not set +# CONFIG_TOSHIBA_BT_RFKILL is not set +# CONFIG_TOSHIBA_HAPS is not set +# CONFIG_INTEL_HID_EVENT is not set +# CONFIG_INTEL_VBTN is not set +# CONFIG_INTEL_IPS is not set +# CONFIG_INTEL_PMC_CORE is not set +# CONFIG_IBM_RTL is not set +# CONFIG_INTEL_RST is not set +CONFIG_INTEL_SMARTCONNECT=y +# CONFIG_PVPANIC is not set +# CONFIG_INTEL_PMC_IPC is not set +# CONFIG_SURFACE_PRO3_BUTTON is not set +# CONFIG_INTEL_PUNIT_IPC is not set +# CONFIG_INTEL_TURBO_MAX_3 is not set +# CONFIG_INTEL_ATOMISP2_PM is not set +CONFIG_PMC_ATOM=y +CONFIG_CHROME_PLATFORMS=y +# CONFIG_CHROMEOS_PSTORE is not set +# CONFIG_CHROMEOS_TBMC is not set +# CONFIG_MELLANOX_PLATFORM is not set +CONFIG_CLKDEV_LOOKUP=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y + +# +# Common Clock Framework +# +# CONFIG_HWSPINLOCK is not set + +# +# Clock Source drivers +# +CONFIG_CLKEVT_I8253=y +CONFIG_I8253_LOCK=y +CONFIG_CLKBLD_I8253=y +CONFIG_MAILBOX=y +CONFIG_PCC=y +# CONFIG_ALTERA_MBOX is not set +# CONFIG_IOMMU_SUPPORT is not set + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_QCOM_GLINK_RPM is not set +# CONFIG_RPMSG_VIRTIO is not set +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# + +# +# Broadcom SoC drivers +# + +# +# NXP/Freescale QorIQ SoC drivers +# + +# +# i.MX SoC drivers +# + +# +# Qualcomm SoC drivers +# +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# CONFIG_XILINX_VCU is not set +# CONFIG_PM_DEVFREQ is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set +# CONFIG_IIO is not set +# CONFIG_NTB is not set +# CONFIG_VME_BUS is not set +# CONFIG_PWM is not set + +# +# IRQ chip support +# +CONFIG_ARM_GIC_MAX_NR=1 +# CONFIG_IPACK_BUS is not set +# CONFIG_RESET_CONTROLLER is not set +# CONFIG_FMC is not set + +# +# PHY Subsystem +# +CONFIG_GENERIC_PHY=y +# CONFIG_BCM_KONA_USB2_PHY is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +CONFIG_RAS=y +# CONFIG_RAS_CEC is not set +# CONFIG_THUNDERBOLT is not set + +# +# Android +# +# CONFIG_ANDROID is not set +CONFIG_LIBNVDIMM=y +CONFIG_BLK_DEV_PMEM=y +CONFIG_ND_BLK=y +CONFIG_ND_CLAIM=y +CONFIG_ND_BTT=y +CONFIG_BTT=y +CONFIG_DAX_DRIVER=y +CONFIG_DAX=y +# CONFIG_DEV_DAX is not set +CONFIG_NVMEM=y + +# +# HW tracing support +# +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set +# CONFIG_FPGA is not set +# CONFIG_UNISYS_VISORBUS is not set +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +CONFIG_FS_IOMAP=y +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +CONFIG_EXT2_FS_SECURITY=y +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_ENCRYPTION=y +CONFIG_EXT4_FS_ENCRYPTION=y +CONFIG_EXT4_DEBUG=y +CONFIG_JBD2=y +CONFIG_JBD2_DEBUG=y +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_XFS_FS=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_XFS_RT=y +# CONFIG_XFS_ONLINE_SCRUB is not set +CONFIG_XFS_WARN=y +# CONFIG_XFS_DEBUG is not set +# CONFIG_GFS2_FS is not set +# CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +CONFIG_FS_DAX=y +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FILE_LOCKING=y +CONFIG_MANDATORY_FILE_LOCKING=y +CONFIG_FS_ENCRYPTION=y +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_PRINT_QUOTA_WARNING is not set +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=y +# CONFIG_QFMT_V1 is not set +CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y +CONFIG_QUOTACTL_COMPAT=y +CONFIG_AUTOFS4_FS=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=y +CONFIG_CUSE=y +CONFIG_OVERLAY_FS=y +# CONFIG_OVERLAY_FS_REDIRECT_DIR is not set +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y +# CONFIG_OVERLAY_FS_INDEX is not set +# CONFIG_OVERLAY_FS_XINO_AUTO is not set +# CONFIG_OVERLAY_FS_METACOPY is not set + +# +# Caches +# +# CONFIG_FSCACHE is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_FAT_DEFAULT_UTF8 is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y +# CONFIG_PROC_VMCORE_DEVICE_DUMP is not set +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_MEMFD_CREATE=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +CONFIG_JFFS2_FS=y +CONFIG_JFFS2_FS_DEBUG=0 +CONFIG_JFFS2_FS_WRITEBUFFER=y +CONFIG_JFFS2_FS_WBUF_VERIFY=y +CONFIG_JFFS2_SUMMARY=y +CONFIG_JFFS2_FS_XATTR=y +CONFIG_JFFS2_FS_POSIX_ACL=y +CONFIG_JFFS2_FS_SECURITY=y +CONFIG_JFFS2_COMPRESSION_OPTIONS=y +CONFIG_JFFS2_ZLIB=y +CONFIG_JFFS2_LZO=y +CONFIG_JFFS2_RTIME=y +CONFIG_JFFS2_RUBIN=y +# CONFIG_JFFS2_CMODE_NONE is not set +CONFIG_JFFS2_CMODE_PRIORITY=y +# CONFIG_JFFS2_CMODE_SIZE is not set +# CONFIG_JFFS2_CMODE_FAVOURLZO is not set +CONFIG_UBIFS_FS=y +CONFIG_UBIFS_FS_ADVANCED_COMPR=y +CONFIG_UBIFS_FS_LZO=y +CONFIG_UBIFS_FS_ZLIB=y +CONFIG_UBIFS_ATIME_SUPPORT=y +CONFIG_UBIFS_FS_XATTR=y +# CONFIG_UBIFS_FS_ENCRYPTION is not set +CONFIG_UBIFS_FS_SECURITY=y +# CONFIG_CRAMFS is not set +CONFIG_SQUASHFS=y +CONFIG_SQUASHFS_FILE_CACHE=y +# CONFIG_SQUASHFS_FILE_DIRECT is not set +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +CONFIG_SQUASHFS_4K_DEVBLK_SIZE=y +CONFIG_SQUASHFS_EMBEDDED=y +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +CONFIG_PSTORE=y +CONFIG_PSTORE_DEFLATE_COMPRESS=y +# CONFIG_PSTORE_LZO_COMPRESS is not set +# CONFIG_PSTORE_LZ4_COMPRESS is not set +# CONFIG_PSTORE_LZ4HC_COMPRESS is not set +# CONFIG_PSTORE_842_COMPRESS is not set +# CONFIG_PSTORE_ZSTD_COMPRESS is not set +CONFIG_PSTORE_COMPRESS=y +CONFIG_PSTORE_DEFLATE_COMPRESS_DEFAULT=y +CONFIG_PSTORE_COMPRESS_DEFAULT="deflate" +# CONFIG_PSTORE_CONSOLE is not set +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +# CONFIG_PSTORE_RAM is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V2=y +CONFIG_NFS_V3=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NFS_SWAP=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=y +CONFIG_PNFS_BLOCK=y +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DEBUG=y +CONFIG_NFSD=y +CONFIG_NFSD_V2_ACL=y +CONFIG_NFSD_V3=y +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_PNFS=y +CONFIG_NFSD_BLOCKLAYOUT=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_FLEXFILELAYOUT=y +# CONFIG_NFSD_FAULT_INJECTION is not set +CONFIG_GRACE_PERIOD=y +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=y +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=y +CONFIG_SUNRPC_GSS=y +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_SUNRPC_SWAP=y +CONFIG_RPCSEC_GSS_KRB5=y +CONFIG_SUNRPC_DEBUG=y +CONFIG_SUNRPC_XPRT_RDMA=y +# CONFIG_CEPH_FS is not set +CONFIG_CIFS=y +CONFIG_CIFS_STATS2=y +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_ACL=y +CONFIG_CIFS_DEBUG=y +CONFIG_CIFS_DEBUG2=y +CONFIG_CIFS_DEBUG_DUMP_KEYS=y +CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CIFS_SMB_DIRECT is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_9P_FS=y +# CONFIG_9P_FS_POSIX_ACL is not set +# CONFIG_9P_FS_SECURITY is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +# CONFIG_NLS_MAC_ROMAN is not set +# CONFIG_NLS_MAC_CELTIC is not set +# CONFIG_NLS_MAC_CENTEURO is not set +# CONFIG_NLS_MAC_CROATIAN is not set +# CONFIG_NLS_MAC_CYRILLIC is not set +# CONFIG_NLS_MAC_GAELIC is not set +# CONFIG_NLS_MAC_GREEK is not set +# CONFIG_NLS_MAC_ICELAND is not set +# CONFIG_NLS_MAC_INUIT is not set +# CONFIG_NLS_MAC_ROMANIAN is not set +# CONFIG_NLS_MAC_TURKISH is not set +# CONFIG_NLS_UTF8 is not set +# CONFIG_DLM is not set + +# +# Security options +# +CONFIG_KEYS=y +CONFIG_KEYS_COMPAT=y +# CONFIG_PERSISTENT_KEYRINGS is not set +# CONFIG_BIG_KEYS is not set +CONFIG_ENCRYPTED_KEYS=y +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +# CONFIG_SECURITY is not set +# CONFIG_SECURITYFS is not set +# CONFIG_PAGE_TABLE_ISOLATION is not set +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y +# CONFIG_HARDENED_USERCOPY is not set +# CONFIG_FORTIFY_SOURCE is not set +# CONFIG_STATIC_USERMODEHELPER is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_DEFAULT_SECURITY="" +CONFIG_XOR_BLOCKS=y +CONFIG_ASYNC_CORE=y +CONFIG_ASYNC_MEMCPY=y +CONFIG_ASYNC_XOR=y +CONFIG_ASYNC_PQ=y +CONFIG_ASYNC_RAID6_RECOV=y +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_RSA=y +# CONFIG_CRYPTO_DH is not set +# CONFIG_CRYPTO_ECDH is not set +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +# CONFIG_CRYPTO_USER is not set +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_GF128MUL=y +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +# CONFIG_CRYPTO_PCRYPT is not set +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_CRYPTO_CRYPTD=y +# CONFIG_CRYPTO_MCRYPTD is not set +# CONFIG_CRYPTO_AUTHENC is not set +# CONFIG_CRYPTO_TEST is not set +CONFIG_CRYPTO_SIMD=y +CONFIG_CRYPTO_GLUE_HELPER_X86=y +CONFIG_CRYPTO_ENGINE=m + +# +# Authenticated Encryption with Associated Data +# +CONFIG_CRYPTO_CCM=y +CONFIG_CRYPTO_GCM=y +# CONFIG_CRYPTO_CHACHA20POLY1305 is not set +# CONFIG_CRYPTO_AEGIS128 is not set +# CONFIG_CRYPTO_AEGIS128L is not set +# CONFIG_CRYPTO_AEGIS256 is not set +# CONFIG_CRYPTO_AEGIS128_AESNI_SSE2 is not set +# CONFIG_CRYPTO_AEGIS128L_AESNI_SSE2 is not set +# CONFIG_CRYPTO_AEGIS256_AESNI_SSE2 is not set +# CONFIG_CRYPTO_MORUS640 is not set +# CONFIG_CRYPTO_MORUS640_SSE2 is not set +# CONFIG_CRYPTO_MORUS1280 is not set +# CONFIG_CRYPTO_MORUS1280_SSE2 is not set +# CONFIG_CRYPTO_MORUS1280_AVX2 is not set +CONFIG_CRYPTO_SEQIV=y +# CONFIG_CRYPTO_ECHAINIV is not set + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CFB is not set +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=y +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_LRW=y +# CONFIG_CRYPTO_PCBC is not set +CONFIG_CRYPTO_XTS=y +# CONFIG_CRYPTO_KEYWRAP is not set + +# +# Hash modes +# +CONFIG_CRYPTO_CMAC=y +CONFIG_CRYPTO_HMAC=y +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +# CONFIG_CRYPTO_CRC32C_INTEL is not set +CONFIG_CRYPTO_CRC32=m +# CONFIG_CRYPTO_CRC32_PCLMUL is not set +CONFIG_CRYPTO_CRCT10DIF=y +# CONFIG_CRYPTO_CRCT10DIF_PCLMUL is not set +CONFIG_CRYPTO_GHASH=y +# CONFIG_CRYPTO_POLY1305 is not set +# CONFIG_CRYPTO_POLY1305_X86_64 is not set +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +CONFIG_CRYPTO_SHA1=y +# CONFIG_CRYPTO_SHA1_SSSE3 is not set +# CONFIG_CRYPTO_SHA256_SSSE3 is not set +# CONFIG_CRYPTO_SHA512_SSSE3 is not set +# CONFIG_CRYPTO_SHA1_MB is not set +# CONFIG_CRYPTO_SHA256_MB is not set +# CONFIG_CRYPTO_SHA512_MB is not set +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +# CONFIG_CRYPTO_SHA3 is not set +# CONFIG_CRYPTO_SM3 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set +# CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL is not set + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_AES_X86_64=y +CONFIG_CRYPTO_AES_NI_INTEL=y +# CONFIG_CRYPTO_ANUBIS is not set +CONFIG_CRYPTO_ARC4=y +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_BLOWFISH_X86_64 is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAMELLIA_X86_64 is not set +# CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64 is not set +# CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64 is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST5_AVX_X86_64 is not set +# CONFIG_CRYPTO_CAST6 is not set +# CONFIG_CRYPTO_CAST6_AVX_X86_64 is not set +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_DES3_EDE_X86_64 is not set +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_CHACHA20 is not set +# CONFIG_CRYPTO_CHACHA20_X86_64 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_SERPENT_SSE2_X86_64 is not set +# CONFIG_CRYPTO_SERPENT_AVX_X86_64 is not set +# CONFIG_CRYPTO_SERPENT_AVX2_X86_64 is not set +# CONFIG_CRYPTO_SM4 is not set +# CONFIG_CRYPTO_TEA is not set +# CONFIG_CRYPTO_TWOFISH is not set +# CONFIG_CRYPTO_TWOFISH_X86_64 is not set +# CONFIG_CRYPTO_TWOFISH_X86_64_3WAY is not set +# CONFIG_CRYPTO_TWOFISH_AVX_X86_64 is not set + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_842 is not set +# CONFIG_CRYPTO_LZ4 is not set +# CONFIG_CRYPTO_LZ4HC is not set +# CONFIG_CRYPTO_ZSTD is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +CONFIG_CRYPTO_USER_API=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_HASH_INFO=y +CONFIG_CRYPTO_HW=y +# CONFIG_CRYPTO_DEV_PADLOCK is not set +CONFIG_CRYPTO_DEV_CCP=y +# CONFIG_CRYPTO_DEV_CCP_DD is not set +# CONFIG_CRYPTO_DEV_QAT_DH895xCC is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXX is not set +# CONFIG_CRYPTO_DEV_QAT_C62X is not set +# CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set +# CONFIG_CRYPTO_DEV_QAT_C62XVF is not set +# CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set +# CONFIG_CRYPTO_DEV_CHELSIO is not set +CONFIG_CRYPTO_DEV_VIRTIO=m +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +CONFIG_PKCS7_MESSAGE_PARSER=y + +# +# Certificates for signature checking +# +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set +# CONFIG_SECONDARY_TRUSTED_KEYRING is not set +# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=y +CONFIG_BITREVERSE=y +CONFIG_RATIONAL=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_GENERIC_FIND_FIRST_BIT=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +# CONFIG_CRC_ITU_T is not set +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +# CONFIG_CRC64 is not set +# CONFIG_CRC4 is not set +# CONFIG_CRC7 is not set +CONFIG_LIBCRC32C=y +# CONFIG_CRC8 is not set +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_BCH=y +CONFIG_RADIX_TREE_MULTIORDER=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_DMA_DIRECT_OPS=y +CONFIG_DMA_VIRT_OPS=y +CONFIG_SWIOTLB=y +CONFIG_SGL_ALLOC=y +CONFIG_CPUMASK_OFFSTACK=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_CLZ_TAB=y +# CONFIG_CORDIC is not set +# CONFIG_DDR is not set +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_POOL=y +CONFIG_ARCH_HAS_SG_CHAIN=y +CONFIG_ARCH_HAS_PMEM_API=y +CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y +CONFIG_ARCH_HAS_UACCESS_MCSAFE=y +CONFIG_SBITMAP=y +# CONFIG_STRING_SELFTEST is not set + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +# CONFIG_DEBUG_INFO_DWARF4 is not set +# CONFIG_GDB_SCRIPTS is not set +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=2048 +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +CONFIG_UNUSED_SYMBOLS=y +# CONFIG_PAGE_OWNER is not set +CONFIG_DEBUG_FS=y +CONFIG_HEADERS_CHECK=y +# CONFIG_DEBUG_SECTION_MISMATCH is not set +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_STACK_VALIDATION=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x0 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_DEBUG_KERNEL=y + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +CONFIG_DEBUG_RODATA_TEST=y +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_SLUB_STATS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_STACK_USAGE is not set +CONFIG_DEBUG_VM=y +# CONFIG_DEBUG_VM_VMACACHE is not set +# CONFIG_DEBUG_VM_RB is not set +# CONFIG_DEBUG_VM_PGFLAGS is not set +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_DEBUG_STACKOVERFLOW=y +CONFIG_DEBUG_STACKOVERFLOW=y +CONFIG_HAVE_ARCH_KASAN=y +# CONFIG_KASAN is not set +CONFIG_ARCH_HAS_KCOV=y +CONFIG_DEBUG_SHIRQ=y + +# +# Debug Lockups and Hangs +# +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y +CONFIG_HARDLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=0 +# CONFIG_DETECT_HUNG_TASK is not set +# CONFIG_WQ_WATCHDOG is not set +# CONFIG_PANIC_ON_OOPS is not set +CONFIG_PANIC_ON_OOPS_VALUE=0 +CONFIG_PANIC_TIMEOUT=0 +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +# CONFIG_SCHED_STACK_END_CHECK is not set +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PI_LIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_RCU_PERF_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +CONFIG_FUNCTION_ERROR_INJECTION=y +CONFIG_FAULT_INJECTION=y +# CONFIG_FAILSLAB is not set +# CONFIG_FAIL_PAGE_ALLOC is not set +CONFIG_FAIL_MAKE_REQUEST=y +# CONFIG_FAIL_IO_TIMEOUT is not set +# CONFIG_FAIL_FUTEX is not set +CONFIG_FAULT_INJECTION_DEBUG_FS=y +# CONFIG_FAIL_FUNCTION is not set +CONFIG_LATENCYTOP=y +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_FENTRY=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_PREEMPTIRQ_EVENTS is not set +# CONFIG_IRQSOFF_TRACER is not set +CONFIG_SCHED_TRACER=y +# CONFIG_HWLAT_TRACER is not set +CONFIG_FTRACE_SYSCALLS=y +CONFIG_TRACER_SNAPSHOT=y +# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +CONFIG_STACK_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_KPROBE_EVENTS=y +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_PROBE_EVENTS=y +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_FUNCTION_PROFILER=y +# CONFIG_BPF_KPROBE_OVERRIDE is not set +CONFIG_FTRACE_MCOUNT_RECORD=y +# CONFIG_FTRACE_STARTUP_TEST is not set +CONFIG_MMIOTRACE=y +# CONFIG_HIST_TRIGGERS is not set +# CONFIG_MMIOTRACE_TEST is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_TRACE_EVAL_MAP_FILE is not set +# CONFIG_GCOV_PROFILE_FTRACE is not set +CONFIG_PROVIDE_OHCI1394_DMA_INIT=y +# CONFIG_DMA_API_DEBUG is not set +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_LKDTM is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_TEST_SORT is not set +# CONFIG_KPROBES_SANITY_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +CONFIG_ATOMIC64_SELFTEST=y +# CONFIG_ASYNC_RAID6_TEST is not set +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_TEST_STRING_HELPERS is not set +CONFIG_TEST_KSTRTOX=y +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_BITFIELD is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_OVERFLOW is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_HASH is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_USER_COPY is not set +# CONFIG_TEST_BPF is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_MEMTEST is not set +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_KGDB=y +CONFIG_KGDB_SERIAL_CONSOLE=y +CONFIG_KGDB_TESTS=y +# CONFIG_KGDB_TESTS_ON_BOOT is not set +CONFIG_KGDB_LOW_LEVEL_TRAP=y +# CONFIG_KGDB_KDB is not set +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +# CONFIG_UBSAN is not set +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y +CONFIG_STRICT_DEVMEM=y +# CONFIG_IO_STRICT_DEVMEM is not set +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_EARLY_PRINTK_USB=y +# CONFIG_X86_VERBOSE_BOOTUP is not set +CONFIG_EARLY_PRINTK=y +CONFIG_EARLY_PRINTK_DBGP=y +CONFIG_EARLY_PRINTK_EFI=y +# CONFIG_EARLY_PRINTK_USB_XDBC is not set +CONFIG_X86_PTDUMP_CORE=y +# CONFIG_X86_PTDUMP is not set +# CONFIG_EFI_PGT_DUMP is not set +CONFIG_DEBUG_WX=y +CONFIG_DOUBLEFAULT=y +# CONFIG_DEBUG_TLBFLUSH is not set +CONFIG_HAVE_MMIOTRACE_SUPPORT=y +# CONFIG_X86_DECODER_SELFTEST is not set +CONFIG_IO_DELAY_TYPE_0X80=0 +CONFIG_IO_DELAY_TYPE_0XED=1 +CONFIG_IO_DELAY_TYPE_UDELAY=2 +CONFIG_IO_DELAY_TYPE_NONE=3 +CONFIG_IO_DELAY_0X80=y +# CONFIG_IO_DELAY_0XED is not set +# CONFIG_IO_DELAY_UDELAY is not set +# CONFIG_IO_DELAY_NONE is not set +CONFIG_DEFAULT_IO_DELAY_TYPE=0 +CONFIG_DEBUG_BOOT_PARAMS=y +# CONFIG_CPA_DEBUG is not set +CONFIG_OPTIMIZE_INLINING=y +# CONFIG_DEBUG_ENTRY is not set +# CONFIG_DEBUG_NMI_SELFTEST is not set +# CONFIG_X86_DEBUG_FPU is not set +# CONFIG_PUNIT_ATOM_DEBUG is not set +CONFIG_UNWINDER_ORC=y +# CONFIG_UNWINDER_FRAME_POINTER is not set diff --git a/arch/x86/configs/syzkaller_defconfig b/arch/x86/configs/syzkaller_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..09a383dcb90250817df60a9d84aed899c8c2e954 --- /dev/null +++ b/arch/x86/configs/syzkaller_defconfig @@ -0,0 +1,4131 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/x86 4.19.32 Kernel Configuration +# + +# +# Compiler: gcc (Ubuntu 7.3.0-27ubuntu1~18.04) 7.3.0 +# +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=70300 +CONFIG_CLANG_VERSION=0 +CONFIG_CONSTRUCTORS=y +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_EXTABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +CONFIG_LOCALVERSION="" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_BUILD_SALT="" +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_BZIP2=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +CONFIG_HAVE_KERNEL_LZ4=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_BZIP2 is not set +# CONFIG_KERNEL_LZMA is not set +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZO is not set +# CONFIG_KERNEL_LZ4 is not set +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_CROSS_MEMORY_ATTACH=y +CONFIG_USELIB=y +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y +CONFIG_AUDIT_WATCH=y +CONFIG_AUDIT_TREE=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_PENDING_IRQ=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GENERIC_MSI_IRQ_DOMAIN=y +CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y +CONFIG_GENERIC_IRQ_RESERVATION_MODE=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +CONFIG_CLOCKSOURCE_WATCHDOG=y +CONFIG_ARCH_CLOCKSOURCE_DATA=y +CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y +CONFIG_GENERIC_CMOS_UPDATE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +# CONFIG_NO_HZ_IDLE is not set +CONFIG_NO_HZ_FULL=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PREEMPT is not set +CONFIG_PREEMPT_COUNT=y + +# +# CPU/Task time and stats accounting +# +CONFIG_VIRT_CPU_ACCOUNTING=y +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y +# CONFIG_IRQ_TIME_ACCOUNTING is not set +CONFIG_HAVE_SCHED_AVG_IRQ=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_SRCU=y +CONFIG_TREE_SRCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +CONFIG_CONTEXT_TRACKING=y +# CONFIG_CONTEXT_TRACKING_FORCE is not set +CONFIG_RCU_NOCB_CPU=y +CONFIG_BUILD_BIN2C=y +# CONFIG_IKCONFIG is not set +CONFIG_LOG_BUF_SHIFT=20 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 +CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y +CONFIG_ARCH_SUPPORTS_INT128=y +CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_MEMCG_SWAP_ENABLED=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +# CONFIG_DEBUG_BLK_CGROUP is not set +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_PIDS=y +# CONFIG_CGROUP_RDMA is not set +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +# CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y +# CONFIG_CGROUP_FILES is not set +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +# CONFIG_CHECKPOINT_RESTORE is not set +CONFIG_SCHED_STEAL=y +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_HAVE_PCSPKR_PLATFORM=y +CONFIG_BPF=y +# CONFIG_EXPERT is not set +CONFIG_UID16=y +CONFIG_MULTIUSER=y +CONFIG_SGETMASK_SYSCALL=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_PRINTK_NMI=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_PCSPKR_PLATFORM=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +# CONFIG_BPF_SYSCALL is not set +# CONFIG_USERFAULTFD is not set +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y +CONFIG_RSEQ=y +# CONFIG_EMBEDDED is not set +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +CONFIG_DEBUG_PERF_USE_VMALLOC=y +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_SLUB_DEBUG=y +# CONFIG_COMPAT_BRK is not set +# CONFIG_SLAB is not set +CONFIG_SLUB=y +CONFIG_SLAB_MERGE_DEFAULT=y +# CONFIG_SLAB_FREELIST_RANDOM is not set +# CONFIG_SLAB_FREELIST_HARDENED is not set +CONFIG_SLUB_CPU_PARTIAL=y +CONFIG_SYSTEM_DATA_VERIFICATION=y +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y +CONFIG_64BIT=y +CONFIG_X86_64=y +CONFIG_X86=y +CONFIG_INSTRUCTION_DECODER=y +CONFIG_OUTPUT_FORMAT="elf64-x86-64" +CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig" +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_MMU=y +CONFIG_ARCH_MMAP_RND_BITS_MIN=28 +CONFIG_ARCH_MMAP_RND_BITS_MAX=32 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_GENERIC_ISA_DMA=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_ARCH_MAY_HAVE_PC_FDC=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_ARCH_HAS_CPU_RELAX=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_ARCH_HAS_FILTER_PGPROT=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_ARCH_WANT_GENERAL_HUGETLB=y +CONFIG_ZONE_DMA32=y +CONFIG_AUDIT_ARCH=y +CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_KASAN_SHADOW_OFFSET=0xdffffc0000000000 +CONFIG_HAVE_INTEL_TXT=y +CONFIG_X86_64_SMP=y +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_PGTABLE_LEVELS=4 +CONFIG_CC_HAS_SANE_STACKPROTECTOR=y + +# +# Processor type and features +# +CONFIG_ZONE_DMA=y +CONFIG_SMP=y +CONFIG_X86_FEATURE_NAMES=y +CONFIG_X86_X2APIC=y +CONFIG_X86_MPPARSE=y +# CONFIG_GOLDFISH is not set +CONFIG_RETPOLINE=y +# CONFIG_INTEL_RDT is not set +CONFIG_X86_EXTENDED_PLATFORM=y +# CONFIG_X86_NUMACHIP is not set +# CONFIG_X86_VSMP is not set +CONFIG_X86_UV=y +# CONFIG_X86_GOLDFISH is not set +# CONFIG_X86_INTEL_MID is not set +CONFIG_X86_INTEL_LPSS=y +# CONFIG_X86_AMD_PLATFORM_DEVICE is not set +CONFIG_IOSF_MBI=y +# CONFIG_IOSF_MBI_DEBUG is not set +CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y +CONFIG_SCHED_OMIT_FRAME_POINTER=y +CONFIG_HYPERVISOR_GUEST=y +CONFIG_PARAVIRT=y +# CONFIG_PARAVIRT_DEBUG is not set +CONFIG_PARAVIRT_SPINLOCKS=y +# CONFIG_QUEUED_LOCK_STAT is not set +CONFIG_XEN=y +CONFIG_XEN_PV=y +CONFIG_XEN_PV_SMP=y +CONFIG_XEN_DOM0=y +CONFIG_XEN_PVHVM=y +CONFIG_XEN_PVHVM_SMP=y +CONFIG_XEN_512GB=y +CONFIG_XEN_SAVE_RESTORE=y +# CONFIG_XEN_DEBUG_FS is not set +# CONFIG_XEN_PVH is not set +CONFIG_KVM_GUEST=y +# CONFIG_KVM_DEBUG_FS is not set +CONFIG_PARAVIRT_TIME_ACCOUNTING=y +CONFIG_PARAVIRT_CLOCK=y +# CONFIG_JAILHOUSE_GUEST is not set +CONFIG_NO_BOOTMEM=y +# CONFIG_MK8 is not set +# CONFIG_MPSC is not set +# CONFIG_MCORE2 is not set +# CONFIG_MATOM is not set +CONFIG_GENERIC_CPU=y +CONFIG_X86_INTERNODE_CACHE_SHIFT=6 +CONFIG_X86_L1_CACHE_SHIFT=6 +CONFIG_X86_TSC=y +CONFIG_X86_CMPXCHG64=y +CONFIG_X86_CMOV=y +CONFIG_X86_MINIMUM_CPU_FAMILY=64 +CONFIG_X86_DEBUGCTLMSR=y +CONFIG_CPU_SUP_INTEL=y +CONFIG_CPU_SUP_AMD=y +CONFIG_CPU_SUP_CENTAUR=y +CONFIG_HPET_TIMER=y +CONFIG_HPET_EMULATE_RTC=y +CONFIG_DMI=y +CONFIG_GART_IOMMU=y +# CONFIG_CALGARY_IOMMU is not set +CONFIG_MAXSMP=y +CONFIG_NR_CPUS_RANGE_BEGIN=8192 +CONFIG_NR_CPUS_RANGE_END=8192 +CONFIG_NR_CPUS_DEFAULT=8192 +CONFIG_NR_CPUS=8192 +CONFIG_SCHED_SMT=y +CONFIG_SCHED_MC=y +CONFIG_SCHED_MC_PRIO=y +CONFIG_X86_LOCAL_APIC=y +CONFIG_X86_IO_APIC=y +CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y +CONFIG_X86_MCE=y +# CONFIG_X86_MCELOG_LEGACY is not set +CONFIG_X86_MCE_INTEL=y +CONFIG_X86_MCE_AMD=y +CONFIG_X86_MCE_THRESHOLD=y +# CONFIG_X86_MCE_INJECT is not set +CONFIG_X86_THERMAL_VECTOR=y + +# +# Performance monitoring +# +CONFIG_PERF_EVENTS_INTEL_UNCORE=y +CONFIG_PERF_EVENTS_INTEL_RAPL=y +CONFIG_PERF_EVENTS_INTEL_CSTATE=y +# CONFIG_PERF_EVENTS_AMD_POWER is not set +CONFIG_X86_16BIT=y +CONFIG_X86_ESPFIX64=y +CONFIG_X86_VSYSCALL_EMULATION=y +# CONFIG_I8K is not set +CONFIG_MICROCODE=y +CONFIG_MICROCODE_INTEL=y +CONFIG_MICROCODE_AMD=y +CONFIG_MICROCODE_OLD_INTERFACE=y +CONFIG_X86_MSR=y +CONFIG_X86_CPUID=y +# CONFIG_X86_5LEVEL is not set +CONFIG_X86_DIRECT_GBPAGES=y +CONFIG_ARCH_HAS_MEM_ENCRYPT=y +# CONFIG_AMD_MEM_ENCRYPT is not set +CONFIG_NUMA=y +CONFIG_AMD_NUMA=y +CONFIG_X86_64_ACPI_NUMA=y +CONFIG_NODES_SPAN_OTHER_NODES=y +# CONFIG_NUMA_EMU is not set +CONFIG_NODES_SHIFT=10 +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_ARCH_MEMORY_PROBE=y +CONFIG_ARCH_PROC_KCORE_TEXT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +# CONFIG_X86_PMEM_LEGACY is not set +CONFIG_X86_CHECK_BIOS_CORRUPTION=y +# CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK is not set +CONFIG_X86_RESERVE_LOW=64 +CONFIG_MTRR=y +CONFIG_MTRR_SANITIZER=y +CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1 +CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 +CONFIG_X86_PAT=y +CONFIG_ARCH_USES_PG_UNCACHED=y +CONFIG_ARCH_RANDOM=y +CONFIG_X86_SMAP=y +CONFIG_X86_INTEL_UMIP=y +CONFIG_X86_INTEL_MPX=y +CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_EFI_MIXED=y +CONFIG_SECCOMP=y +# CONFIG_HZ_100 is not set +# CONFIG_HZ_250 is not set +# CONFIG_HZ_300 is not set +CONFIG_HZ_1000=y +CONFIG_HZ=1000 +CONFIG_SCHED_HRTICK=y +CONFIG_KEXEC=y +CONFIG_KEXEC_FILE=y +CONFIG_ARCH_HAS_KEXEC_PURGATORY=y +CONFIG_KEXEC_VERIFY_SIG=y +CONFIG_KEXEC_BZIMAGE_VERIFY_SIG=y +CONFIG_CRASH_DUMP=y +CONFIG_KEXEC_JUMP=y +CONFIG_PHYSICAL_START=0x1000000 +CONFIG_RELOCATABLE=y +# CONFIG_RANDOMIZE_BASE is not set +CONFIG_PHYSICAL_ALIGN=0x1000000 +CONFIG_HOTPLUG_CPU=y +CONFIG_BOOTPARAM_HOTPLUG_CPU0=y +# CONFIG_DEBUG_HOTPLUG_CPU0 is not set +# CONFIG_COMPAT_VDSO is not set +CONFIG_LEGACY_VSYSCALL_EMULATE=y +# CONFIG_LEGACY_VSYSCALL_NONE is not set +# CONFIG_CMDLINE_BOOL is not set +CONFIG_MODIFY_LDT_SYSCALL=y +CONFIG_HAVE_LIVEPATCH_FTRACE=y + +# +# Enable Livepatch +# +CONFIG_LIVEPATCH=y +# CONFIG_LIVEPATCH_FTRACE is not set +CONFIG_LIVEPATCH_WO_FTRACE=y +CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY=y +# CONFIG_LIVEPATCH_STACK is not set +CONFIG_LIVEPATCH_RESTRICT_KPROBE=y +CONFIG_ARCH_HAS_ADD_PAGES=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y + +# +# Power management and ACPI options +# +CONFIG_ARCH_HIBERNATION_HEADER=y +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HIBERNATE_CALLBACKS=y +CONFIG_HIBERNATION=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_CLK=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y +CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y +CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y +# CONFIG_ACPI_DEBUGGER is not set +CONFIG_ACPI_SPCR_TABLE=y +CONFIG_ACPI_LPIT=y +CONFIG_ACPI_SLEEP=y +# CONFIG_ACPI_PROCFS_POWER is not set +CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y +# CONFIG_ACPI_EC_DEBUGFS is not set +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +# CONFIG_ACPI_VIDEO is not set +CONFIG_ACPI_FAN=y +# CONFIG_ACPI_TAD is not set +CONFIG_ACPI_DOCK=y +CONFIG_ACPI_CPU_FREQ_PSS=y +CONFIG_ACPI_PROCESSOR_CSTATE=y +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_CPPC_LIB=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_HOTPLUG_CPU=y +# CONFIG_ACPI_PROCESSOR_AGGREGATOR is not set +CONFIG_ACPI_THERMAL=y +CONFIG_ACPI_NUMA=y +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +# CONFIG_ACPI_DEBUG is not set +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_CONTAINER=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_ACPI_HOTPLUG_IOAPIC=y +# CONFIG_ACPI_SBS is not set +CONFIG_ACPI_HED=y +# CONFIG_ACPI_CUSTOM_METHOD is not set +CONFIG_ACPI_BGRT=y +# CONFIG_ACPI_NFIT is not set +CONFIG_HAVE_ACPI_APEI=y +CONFIG_HAVE_ACPI_APEI_NMI=y +CONFIG_ACPI_APEI=y +CONFIG_ACPI_APEI_GHES=y +CONFIG_ACPI_APEI_PCIEAER=y +CONFIG_ACPI_APEI_MEMORY_FAILURE=y +# CONFIG_ACPI_APEI_EINJ is not set +# CONFIG_ACPI_APEI_ERST_DEBUG is not set +# CONFIG_DPTF_POWER is not set +# CONFIG_ACPI_EXTLOG is not set +# CONFIG_PMIC_OPREGION is not set +# CONFIG_ACPI_CONFIGFS is not set +CONFIG_X86_PM_TIMER=y +CONFIG_SFI=y + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +# CONFIG_CPU_FREQ_STAT is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set + +# +# CPU frequency scaling drivers +# +CONFIG_X86_INTEL_PSTATE=y +# CONFIG_X86_PCC_CPUFREQ is not set +# CONFIG_X86_ACPI_CPUFREQ is not set +# CONFIG_X86_SPEEDSTEP_CENTRINO is not set +# CONFIG_X86_P4_CLOCKMOD is not set + +# +# shared options +# + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +# CONFIG_CPU_IDLE_GOV_LADDER is not set +CONFIG_CPU_IDLE_GOV_MENU=y +CONFIG_INTEL_IDLE=y + +# +# Bus options (PCI etc.) +# +CONFIG_PCI=y +CONFIG_PCI_DIRECT=y +CONFIG_PCI_MMCONFIG=y +CONFIG_PCI_XEN=y +CONFIG_PCI_DOMAINS=y +CONFIG_MMCONF_FAM10H=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +# CONFIG_PCIEAER_INJECT is not set +CONFIG_PCIE_ECRC=y +CONFIG_PCIEASPM=y +# CONFIG_PCIEASPM_DEBUG is not set +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +# CONFIG_PCIE_DPC is not set +# CONFIG_PCIE_PTM is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_IRQ_DOMAIN=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +CONFIG_PCI_STUB=y +# CONFIG_PCI_PF_STUB is not set +# CONFIG_XEN_PCIDEV_FRONTEND is not set +CONFIG_PCI_ATS=y +CONFIG_PCI_LOCKLESS_CONFIG=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +CONFIG_PCI_LABEL=y +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +# CONFIG_HOTPLUG_PCI_ACPI_IBM is not set +# CONFIG_HOTPLUG_PCI_CPCI is not set +# CONFIG_HOTPLUG_PCI_SHPC is not set + +# +# PCI controller drivers +# + +# +# Cadence PCIe controllers support +# +# CONFIG_VMD is not set + +# +# DesignWare PCI Core Support +# +# CONFIG_PCIE_DW_PLAT_HOST is not set + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +CONFIG_ISA_DMA_API=y +CONFIG_AMD_NB=y +CONFIG_PCCARD=y +# CONFIG_PCMCIA is not set +CONFIG_CARDBUS=y + +# +# PC-card bridges +# +# CONFIG_YENTA is not set +# CONFIG_RAPIDIO is not set +# CONFIG_X86_SYSFB is not set + +# +# Binary Emulations +# +CONFIG_IA32_EMULATION=y +# CONFIG_IA32_AOUT is not set +# CONFIG_X86_X32 is not set +CONFIG_COMPAT_32=y +CONFIG_COMPAT=y +CONFIG_COMPAT_FOR_U64_ALIGNMENT=y +CONFIG_SYSVIPC_COMPAT=y +CONFIG_X86_DEV_DMA_OPS=y +CONFIG_HAVE_GENERIC_GUP=y + +# +# Firmware Drivers +# +# CONFIG_EDD is not set +CONFIG_FIRMWARE_MEMMAP=y +# CONFIG_DELL_RBU is not set +# CONFIG_DCDBAS is not set +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=y +CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y +CONFIG_ISCSI_IBFT_FIND=y +# CONFIG_ISCSI_IBFT is not set +# CONFIG_FW_CFG_SYSFS is not set +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +# CONFIG_EFI_VARS is not set +CONFIG_EFI_ESRT=y +CONFIG_EFI_RUNTIME_MAP=y +# CONFIG_EFI_FAKE_MEMMAP is not set +CONFIG_EFI_RUNTIME_WRAPPERS=y +# CONFIG_EFI_CAPSULE_LOADER is not set +# CONFIG_EFI_TEST is not set +# CONFIG_APPLE_PROPERTIES is not set +# CONFIG_RESET_ATTACK_MITIGATION is not set +CONFIG_UEFI_CPER=y +CONFIG_UEFI_CPER_X86=y + +# +# Tegra firmware driver +# +CONFIG_HAVE_KVM=y +CONFIG_VIRTUALIZATION=y +# CONFIG_KVM is not set +# CONFIG_VHOST_NET is not set +# CONFIG_VHOST_VSOCK is not set +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set + +# +# General architecture-dependent options +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_HOTPLUG_SMT=y +# CONFIG_OPROFILE is not set +CONFIG_HAVE_OPROFILE=y +CONFIG_OPROFILE_NMI_TIMER=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +CONFIG_OPTPROBES=y +CONFIG_KPROBES_ON_FTRACE=y +CONFIG_UPROBES=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_KRETPROBES=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_OPTPROBES=y +CONFIG_HAVE_KPROBES_ON_FTRACE=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y +CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y +CONFIG_HAVE_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_PERF_EVENTS_NMI=y +CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_RCU_TABLE_FREE=y +CONFIG_HAVE_RCU_TABLE_INVALIDATE=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP_FILTER=y +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_CC_HAS_STACKPROTECTOR_NONE=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_HAVE_ARCH_SOFT_DIRTY=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_HAVE_EXIT_THREAD=y +CONFIG_ARCH_MMAP_RND_BITS=28 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8 +CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y +CONFIG_HAVE_COPY_THREAD_TLS=y +CONFIG_HAVE_STACK_VALIDATION=y +CONFIG_HAVE_RELIABLE_STACKTRACE=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_ARCH_HAS_REFCOUNT=y +# CONFIG_REFCOUNT_FULL is not set +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +CONFIG_PLUGIN_HOSTCC="" +CONFIG_HAVE_GCC_PLUGINS=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_FORCE is not set +CONFIG_MODULE_SIG_ALL=y +# CONFIG_MODULE_SIG_SHA1 is not set +# CONFIG_MODULE_SIG_SHA224 is not set +CONFIG_MODULE_SIG_SHA256=y +# CONFIG_MODULE_SIG_SHA384 is not set +# CONFIG_MODULE_SIG_SHA512 is not set +CONFIG_MODULE_SIG_HASH="sha256" +# CONFIG_MODULE_COMPRESS is not set +# CONFIG_TRIM_UNUSED_KSYMS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLK_SCSI_REQUEST=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +# CONFIG_BLK_DEV_ZONED is not set +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +# CONFIG_BLK_CMDLINE_PARSER is not set +# CONFIG_BLK_WBT is not set +# CONFIG_BLK_CGROUP_IOLATENCY is not set +CONFIG_BLK_DEBUG_FS=y +# CONFIG_BLK_SED_OPAL is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +CONFIG_OSF_PARTITION=y +CONFIG_AMIGA_PARTITION=y +# CONFIG_ATARI_PARTITION is not set +CONFIG_MAC_PARTITION=y +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +# CONFIG_LDM_PARTITION is not set +CONFIG_SGI_PARTITION=y +# CONFIG_ULTRIX_PARTITION is not set +CONFIG_SUN_PARTITION=y +CONFIG_KARMA_PARTITION=y +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +CONFIG_BLOCK_COMPAT=y +CONFIG_BLK_MQ_PCI=y + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_CFQ_GROUP_IOSCHED=y +CONFIG_DEFAULT_DEADLINE=y +# CONFIG_DEFAULT_CFQ is not set +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="deadline" +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +# CONFIG_IOSCHED_BFQ is not set +CONFIG_ASN1=y +CONFIG_UNINLINE_SPIN_UNLOCK=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +# CONFIG_BINFMT_MISC is not set +CONFIG_COREDUMP=y + +# +# Memory Management options +# +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_NEED_MULTIPLE_NODES=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +CONFIG_ARCH_DISCARD_MEMBLOCK=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_HAVE_BOOTMEM_INFO_NODE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_SPARSE=y +# CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE is not set +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_BOUNCE=y +CONFIG_VIRT_TO_BUS=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +CONFIG_MEMORY_FAILURE=y +# CONFIG_HWPOISON_INJECT is not set +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_ARCH_WANTS_THP_SWAP=y +CONFIG_THP_SWAP=y +CONFIG_TRANSPARENT_HUGE_PAGECACHE=y +CONFIG_CLEANCACHE=y +CONFIG_FRONTSWAP=y +# CONFIG_SHRINK_PAGECACHE is not set +CONFIG_CMA=y +# CONFIG_CMA_DEBUG is not set +# CONFIG_CMA_DEBUGFS is not set +CONFIG_CMA_AREAS=7 +CONFIG_ZSWAP=y +CONFIG_ZPOOL=y +CONFIG_ZBUD=y +# CONFIG_Z3FOLD is not set +CONFIG_ZSMALLOC=y +# CONFIG_PGTABLE_MAPPING is not set +# CONFIG_ZSMALLOC_STAT is not set +CONFIG_GENERIC_EARLY_IOREMAP=y +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +# CONFIG_IDLE_PAGE_TRACKING is not set +CONFIG_ARCH_HAS_ZONE_DEVICE=y +# CONFIG_ZONE_DEVICE is not set +CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y +CONFIG_ARCH_HAS_PKEYS=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_BENCHMARK is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +CONFIG_NET=y +CONFIG_NET_INGRESS=y + +# +# Networking options +# +CONFIG_PACKET=y +# CONFIG_PACKET_DIAG is not set +CONFIG_UNIX=y +# CONFIG_UNIX_DIAG is not set +# CONFIG_TLS is not set +CONFIG_XFRM=y +CONFIG_XFRM_ALGO=y +CONFIG_XFRM_USER=y +# CONFIG_XFRM_INTERFACE is not set +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +# CONFIG_NET_KEY is not set +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +# CONFIG_IP_PNP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE_DEMUX is not set +CONFIG_NET_IP_TUNNEL=y +CONFIG_IP_MROUTE_COMMON=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +# CONFIG_NET_IPVTI is not set +# CONFIG_NET_FOU is not set +# CONFIG_NET_FOU_IP_TUNNELS is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +CONFIG_INET_TUNNEL=y +CONFIG_INET_XFRM_MODE_TRANSPORT=y +CONFIG_INET_XFRM_MODE_TUNNEL=y +CONFIG_INET_XFRM_MODE_BEET=y +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_INET_UDP_DIAG is not set +# CONFIG_INET_RAW_DIAG is not set +# CONFIG_INET_DIAG_DESTROY is not set +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +# CONFIG_TCP_CONG_HSTCP is not set +# CONFIG_TCP_CONG_HYBLA is not set +# CONFIG_TCP_CONG_VEGAS is not set +# CONFIG_TCP_CONG_NV is not set +# CONFIG_TCP_CONG_SCALABLE is not set +# CONFIG_TCP_CONG_LP is not set +# CONFIG_TCP_CONG_VENO is not set +# CONFIG_TCP_CONG_YEAH is not set +# CONFIG_TCP_CONG_ILLINOIS is not set +# CONFIG_TCP_CONG_DCTCP is not set +# CONFIG_TCP_CONG_CDG is not set +# CONFIG_TCP_CONG_BBR is not set +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +# CONFIG_INET6_AH is not set +# CONFIG_INET6_ESP is not set +# CONFIG_INET6_IPCOMP is not set +# CONFIG_IPV6_MIP6 is not set +# CONFIG_IPV6_ILA is not set +CONFIG_INET6_XFRM_MODE_TRANSPORT=y +CONFIG_INET6_XFRM_MODE_TUNNEL=y +CONFIG_INET6_XFRM_MODE_BEET=y +# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set +# CONFIG_IPV6_VTI is not set +CONFIG_IPV6_SIT=y +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +# CONFIG_IPV6_TUNNEL is not set +CONFIG_IPV6_MULTIPLE_TABLES=y +# CONFIG_IPV6_SUBTREES is not set +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +# CONFIG_IPV6_SEG6_LWTUNNEL is not set +# CONFIG_IPV6_SEG6_HMAC is not set +CONFIG_NETLABEL=y +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_FAMILY_BRIDGE=y +# CONFIG_NETFILTER_NETLINK_ACCT is not set +# CONFIG_NETFILTER_NETLINK_QUEUE is not set +# CONFIG_NETFILTER_NETLINK_LOG is not set +# CONFIG_NETFILTER_NETLINK_OSF is not set +# CONFIG_NF_CONNTRACK is not set +# CONFIG_NF_LOG_NETDEV is not set +# CONFIG_NF_TABLES is not set +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +# CONFIG_NETFILTER_XT_MARK is not set + +# +# Xtables targets +# +# CONFIG_NETFILTER_XT_TARGET_AUDIT is not set +# CONFIG_NETFILTER_XT_TARGET_CLASSIFY is not set +# CONFIG_NETFILTER_XT_TARGET_HMARK is not set +# CONFIG_NETFILTER_XT_TARGET_IDLETIMER is not set +# CONFIG_NETFILTER_XT_TARGET_LED is not set +# CONFIG_NETFILTER_XT_TARGET_LOG is not set +# CONFIG_NETFILTER_XT_TARGET_MARK is not set +# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set +# CONFIG_NETFILTER_XT_TARGET_NFQUEUE is not set +# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set +# CONFIG_NETFILTER_XT_TARGET_TEE is not set +# CONFIG_NETFILTER_XT_TARGET_SECMARK is not set +# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set + +# +# Xtables matches +# +# CONFIG_NETFILTER_XT_MATCH_ADDRTYPE is not set +# CONFIG_NETFILTER_XT_MATCH_BPF is not set +# CONFIG_NETFILTER_XT_MATCH_CGROUP is not set +# CONFIG_NETFILTER_XT_MATCH_COMMENT is not set +# CONFIG_NETFILTER_XT_MATCH_CPU is not set +# CONFIG_NETFILTER_XT_MATCH_DCCP is not set +# CONFIG_NETFILTER_XT_MATCH_DEVGROUP is not set +# CONFIG_NETFILTER_XT_MATCH_DSCP is not set +# CONFIG_NETFILTER_XT_MATCH_ECN is not set +# CONFIG_NETFILTER_XT_MATCH_ESP is not set +# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set +# CONFIG_NETFILTER_XT_MATCH_HL is not set +# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set +# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set +# CONFIG_NETFILTER_XT_MATCH_L2TP is not set +# CONFIG_NETFILTER_XT_MATCH_LENGTH is not set +# CONFIG_NETFILTER_XT_MATCH_LIMIT is not set +# CONFIG_NETFILTER_XT_MATCH_MAC is not set +# CONFIG_NETFILTER_XT_MATCH_MARK is not set +# CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set +# CONFIG_NETFILTER_XT_MATCH_NFACCT is not set +# CONFIG_NETFILTER_XT_MATCH_OSF is not set +# CONFIG_NETFILTER_XT_MATCH_OWNER is not set +# CONFIG_NETFILTER_XT_MATCH_POLICY is not set +# CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set +# CONFIG_NETFILTER_XT_MATCH_PKTTYPE is not set +# CONFIG_NETFILTER_XT_MATCH_QUOTA is not set +# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set +# CONFIG_NETFILTER_XT_MATCH_REALM is not set +# CONFIG_NETFILTER_XT_MATCH_RECENT is not set +# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +# CONFIG_NETFILTER_XT_MATCH_SOCKET is not set +# CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set +# CONFIG_NETFILTER_XT_MATCH_STRING is not set +# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set +# CONFIG_NETFILTER_XT_MATCH_TIME is not set +# CONFIG_NETFILTER_XT_MATCH_U32 is not set +# CONFIG_IP_SET is not set +# CONFIG_IP_VS is not set + +# +# IP: Netfilter Configuration +# +# CONFIG_NF_SOCKET_IPV4 is not set +# CONFIG_NF_TPROXY_IPV4 is not set +# CONFIG_NF_DUP_IPV4 is not set +# CONFIG_NF_LOG_ARP is not set +# CONFIG_NF_LOG_IPV4 is not set +# CONFIG_NF_REJECT_IPV4 is not set +# CONFIG_IP_NF_IPTABLES is not set +# CONFIG_IP_NF_ARPTABLES is not set + +# +# IPv6: Netfilter Configuration +# +# CONFIG_NF_SOCKET_IPV6 is not set +# CONFIG_NF_TPROXY_IPV6 is not set +# CONFIG_NF_DUP_IPV6 is not set +# CONFIG_NF_REJECT_IPV6 is not set +# CONFIG_NF_LOG_IPV6 is not set +# CONFIG_IP6_NF_IPTABLES is not set +# CONFIG_BRIDGE_NF_EBTABLES is not set +# CONFIG_BPFILTER is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_L2TP is not set +CONFIG_STP=y +CONFIG_GARP=y +CONFIG_MRP=y +CONFIG_BRIDGE=y +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_HAVE_NET_DSA=y +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=y +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +# CONFIG_DECNET is not set +CONFIG_LLC=y +# CONFIG_LLC2 is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +# CONFIG_6LOWPAN is not set +# CONFIG_IEEE802154 is not set +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +# CONFIG_NET_SCH_CBQ is not set +# CONFIG_NET_SCH_HTB is not set +# CONFIG_NET_SCH_HFSC is not set +# CONFIG_NET_SCH_PRIO is not set +# CONFIG_NET_SCH_MULTIQ is not set +# CONFIG_NET_SCH_RED is not set +# CONFIG_NET_SCH_SFB is not set +# CONFIG_NET_SCH_SFQ is not set +# CONFIG_NET_SCH_TEQL is not set +# CONFIG_NET_SCH_TBF is not set +# CONFIG_NET_SCH_CBS is not set +# CONFIG_NET_SCH_ETF is not set +# CONFIG_NET_SCH_GRED is not set +# CONFIG_NET_SCH_DSMARK is not set +# CONFIG_NET_SCH_NETEM is not set +# CONFIG_NET_SCH_DRR is not set +# CONFIG_NET_SCH_MQPRIO is not set +# CONFIG_NET_SCH_SKBPRIO is not set +# CONFIG_NET_SCH_CHOKE is not set +# CONFIG_NET_SCH_QFQ is not set +# CONFIG_NET_SCH_CODEL is not set +# CONFIG_NET_SCH_FQ_CODEL is not set +# CONFIG_NET_SCH_CAKE is not set +# CONFIG_NET_SCH_FQ is not set +# CONFIG_NET_SCH_HHF is not set +# CONFIG_NET_SCH_PIE is not set +# CONFIG_NET_SCH_INGRESS is not set +# CONFIG_NET_SCH_PLUG is not set +# CONFIG_NET_SCH_DEFAULT is not set + +# +# Classification +# +CONFIG_NET_CLS=y +# CONFIG_NET_CLS_BASIC is not set +# CONFIG_NET_CLS_TCINDEX is not set +# CONFIG_NET_CLS_ROUTE4 is not set +# CONFIG_NET_CLS_FW is not set +# CONFIG_NET_CLS_U32 is not set +# CONFIG_NET_CLS_RSVP is not set +# CONFIG_NET_CLS_RSVP6 is not set +# CONFIG_NET_CLS_FLOW is not set +CONFIG_NET_CLS_CGROUP=y +# CONFIG_NET_CLS_BPF is not set +# CONFIG_NET_CLS_FLOWER is not set +# CONFIG_NET_CLS_MATCHALL is not set +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +# CONFIG_NET_EMATCH_CMP is not set +# CONFIG_NET_EMATCH_NBYTE is not set +# CONFIG_NET_EMATCH_U32 is not set +# CONFIG_NET_EMATCH_META is not set +# CONFIG_NET_EMATCH_TEXT is not set +# CONFIG_NET_EMATCH_IPT is not set +CONFIG_NET_CLS_ACT=y +# CONFIG_NET_ACT_POLICE is not set +# CONFIG_NET_ACT_GACT is not set +# CONFIG_NET_ACT_MIRRED is not set +# CONFIG_NET_ACT_SAMPLE is not set +# CONFIG_NET_ACT_NAT is not set +# CONFIG_NET_ACT_PEDIT is not set +# CONFIG_NET_ACT_SIMP is not set +# CONFIG_NET_ACT_SKBEDIT is not set +# CONFIG_NET_ACT_CSUM is not set +# CONFIG_NET_ACT_VLAN is not set +# CONFIG_NET_ACT_BPF is not set +# CONFIG_NET_ACT_SKBMOD is not set +# CONFIG_NET_ACT_IFE is not set +# CONFIG_NET_ACT_TUNNEL_KEY is not set +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +# CONFIG_DNS_RESOLVER is not set +# CONFIG_BATMAN_ADV is not set +# CONFIG_OPENVSWITCH is not set +CONFIG_VSOCKETS=y +CONFIG_VSOCKETS_DIAG=y +# CONFIG_NETLINK_DIAG is not set +CONFIG_MPLS=y +# CONFIG_NET_MPLS_GSO is not set +# CONFIG_MPLS_ROUTING is not set +# CONFIG_NET_NSH is not set +# CONFIG_HSR is not set +# CONFIG_NET_SWITCHDEV is not set +CONFIG_NET_L3_MASTER_DEV=y +# CONFIG_NET_NCSI is not set +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_XPS=y +# CONFIG_CGROUP_NET_PRIO is not set +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_JIT=y +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +CONFIG_NET_DROP_MONITOR=y +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +# CONFIG_CFG80211 is not set + +# +# CFG80211 needs to be enabled for MAC80211 +# +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +# CONFIG_WIMAX is not set +# CONFIG_RFKILL is not set +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +# CONFIG_CEPH_LIB is not set +# CONFIG_NFC is not set +# CONFIG_PSAMPLE is not set +# CONFIG_NET_IFE is not set +# CONFIG_LWTUNNEL is not set +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +# CONFIG_NET_DEVLINK is not set +CONFIG_MAY_USE_DEVLINK=y +# CONFIG_FAILOVER is not set +CONFIG_HAVE_EBPF_JIT=y + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER=y +CONFIG_UEVENT_HELPER_PATH="" +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER is not set +CONFIG_ALLOW_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_SYS_HYPERVISOR=y +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y +CONFIG_REGMAP=y +CONFIG_REGMAP_SPI=y +# CONFIG_DMA_CMA is not set + +# +# Bus devices +# +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y +# CONFIG_GNSS is not set +# CONFIG_MTD is not set +# CONFIG_OF is not set +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y +# CONFIG_PARPORT is not set +CONFIG_PNP=y +# CONFIG_PNP_DEBUG_MESSAGES is not set + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_NULL_BLK is not set +# CONFIG_BLK_DEV_FD is not set +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +# CONFIG_ZRAM is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +# CONFIG_BLK_DEV_LOOP is not set +# CONFIG_BLK_DEV_DRBD is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_SKD is not set +# CONFIG_BLK_DEV_SX8 is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=65536 +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_XEN_BLKDEV_FRONTEND=y +# CONFIG_XEN_BLKDEV_BACKEND is not set +# CONFIG_BLK_DEV_RBD is not set +# CONFIG_BLK_DEV_RSXX is not set + +# +# NVME Support +# +# CONFIG_BLK_DEV_NVME is not set +# CONFIG_NVME_FC is not set +# CONFIG_NVME_TARGET is not set + +# +# Misc devices +# +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_IBM_ASM is not set +# CONFIG_PHANTOM is not set +# CONFIG_SGI_IOC4 is not set +# CONFIG_TIFM_CORE is not set +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_SGI_XP is not set +# CONFIG_HP_ILO is not set +# CONFIG_SGI_GRU is not set +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_CB710_CORE is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set + +# +# Altera FPGA firmware download module (requires I2C) +# +# CONFIG_INTEL_MEI is not set +# CONFIG_INTEL_MEI_ME is not set +# CONFIG_INTEL_MEI_TXE is not set +# CONFIG_VMWARE_VMCI is not set + +# +# Intel MIC & related support +# + +# +# Intel MIC Bus Driver +# +# CONFIG_INTEL_MIC_BUS is not set + +# +# SCIF Bus Driver +# +# CONFIG_SCIF_BUS is not set + +# +# VOP Bus Driver +# +# CONFIG_VOP_BUS is not set + +# +# Intel MIC Host Driver +# + +# +# Intel MIC Card Driver +# + +# +# SCIF Driver +# + +# +# Intel MIC Coprocessor State Management (COSM) Drivers +# + +# +# VOP Driver +# +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_MISC_RTSX_PCI is not set +# CONFIG_MISC_RTSX_USB is not set +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +# CONFIG_RAID_ATTRS is not set +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +# CONFIG_SCSI_MQ_DEFAULT is not set +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +# CONFIG_BLK_DEV_SD is not set +# CONFIG_CHR_DEV_ST is not set +# CONFIG_CHR_DEV_OSST is not set +# CONFIG_BLK_DEV_SR is not set +# CONFIG_CHR_DEV_SG is not set +# CONFIG_CHR_DEV_SCH is not set +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +# CONFIG_SCSI_SPI_ATTRS is not set +# CONFIG_SCSI_FC_ATTRS is not set +# CONFIG_SCSI_ISCSI_ATTRS is not set +# CONFIG_SCSI_SAS_ATTRS is not set +# CONFIG_SCSI_SAS_LIBSAS is not set +# CONFIG_SCSI_SRP_ATTRS is not set +CONFIG_SCSI_LOWLEVEL=y +# CONFIG_ISCSI_TCP is not set +# CONFIG_ISCSI_BOOT_SYSFS is not set +# CONFIG_SCSI_CXGB3_ISCSI is not set +# CONFIG_SCSI_CXGB4_ISCSI is not set +# CONFIG_SCSI_BNX2_ISCSI is not set +# CONFIG_BE2ISCSI is not set +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +# CONFIG_SCSI_HPSA is not set +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +# CONFIG_SCSI_AACRAID is not set +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +# CONFIG_SCSI_MVSAS is not set +# CONFIG_SCSI_MVUMI is not set +# CONFIG_SCSI_DPT_I2O is not set +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +# CONFIG_MEGARAID_NEWGEN is not set +# CONFIG_MEGARAID_LEGACY is not set +# CONFIG_MEGARAID_SAS is not set +# CONFIG_SCSI_MPT3SAS is not set +# CONFIG_SCSI_MPT2SAS is not set +# CONFIG_SCSI_SMARTPQI is not set +# CONFIG_SCSI_UFSHCD is not set +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_BUSLOGIC is not set +# CONFIG_VMWARE_PVSCSI is not set +# CONFIG_XEN_SCSI_FRONTEND is not set +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_GDTH is not set +# CONFIG_SCSI_ISCI is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_QLOGIC_1280 is not set +# CONFIG_SCSI_QLA_ISCSI is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +# CONFIG_SCSI_DEBUG is not set +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +# CONFIG_SCSI_OSD_INITIATOR is not set +# CONFIG_ATA is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y +# CONFIG_MD_LINEAR is not set +# CONFIG_MD_RAID0 is not set +# CONFIG_MD_RAID1 is not set +# CONFIG_MD_RAID10 is not set +# CONFIG_MD_RAID456 is not set +# CONFIG_MD_MULTIPATH is not set +# CONFIG_MD_FAULTY is not set +# CONFIG_BCACHE is not set +# CONFIG_BLK_DEV_DM is not set +# CONFIG_TARGET_CORE is not set +CONFIG_FUSION=y +# CONFIG_FUSION_SPI is not set +# CONFIG_FUSION_SAS is not set +CONFIG_FUSION_MAX_SGE=128 +CONFIG_FUSION_LOGGING=y + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_FIREWIRE is not set +# CONFIG_FIREWIRE_NOSY is not set +CONFIG_MACINTOSH_DRIVERS=y +CONFIG_MAC_EMUMOUSEBTN=y +CONFIG_NETDEVICES=y +CONFIG_NET_CORE=y +CONFIG_BONDING=y +# CONFIG_DUMMY is not set +# CONFIG_EQUALIZER is not set +CONFIG_NET_FC=y +# CONFIG_IFB is not set +# CONFIG_NET_TEAM is not set +# CONFIG_MACVLAN is not set +CONFIG_IPVLAN=y +# CONFIG_IPVTAP is not set +# CONFIG_VXLAN is not set +# CONFIG_GENEVE is not set +# CONFIG_MACSEC is not set +# CONFIG_NETCONSOLE is not set +CONFIG_TUN=y +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=y +# CONFIG_NLMON is not set +# CONFIG_NET_VRF is not set +# CONFIG_ARCNET is not set + +# +# CAIF transport drivers +# + +# +# Distributed Switch Architecture drivers +# +CONFIG_ETHERNET=y +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +CONFIG_NET_VENDOR_AGERE=y +# CONFIG_ET131X is not set +CONFIG_NET_VENDOR_ALACRITECH=y +# CONFIG_SLICOSS is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set +CONFIG_NET_VENDOR_AMAZON=y +# CONFIG_ENA_ETHERNET is not set +# CONFIG_NET_VENDOR_AMD is not set +CONFIG_NET_VENDOR_AQUANTIA=y +# CONFIG_AQTION is not set +CONFIG_NET_VENDOR_ARC=y +CONFIG_NET_VENDOR_ATHEROS=y +# CONFIG_ATL2 is not set +# CONFIG_ATL1 is not set +# CONFIG_ATL1E is not set +# CONFIG_ATL1C is not set +# CONFIG_ALX is not set +CONFIG_NET_VENDOR_AURORA=y +# CONFIG_AURORA_NB8800 is not set +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +# CONFIG_BNX2 is not set +# CONFIG_CNIC is not set +# CONFIG_TIGON3 is not set +# CONFIG_BNX2X is not set +# CONFIG_SYSTEMPORT is not set +# CONFIG_BNXT is not set +CONFIG_NET_VENDOR_BROCADE=y +# CONFIG_BNA is not set +CONFIG_NET_VENDOR_CADENCE=y +# CONFIG_MACB is not set +CONFIG_NET_VENDOR_CAVIUM=y +# CONFIG_THUNDER_NIC_PF is not set +# CONFIG_THUNDER_NIC_VF is not set +# CONFIG_THUNDER_NIC_BGX is not set +# CONFIG_THUNDER_NIC_RGX is not set +CONFIG_CAVIUM_PTP=y +# CONFIG_LIQUIDIO is not set +# CONFIG_LIQUIDIO_VF is not set +CONFIG_NET_VENDOR_CHELSIO=y +# CONFIG_CHELSIO_T1 is not set +# CONFIG_CHELSIO_T3 is not set +# CONFIG_CHELSIO_T4 is not set +# CONFIG_CHELSIO_T4VF is not set +CONFIG_NET_VENDOR_CISCO=y +# CONFIG_ENIC is not set +CONFIG_NET_VENDOR_CORTINA=y +# CONFIG_CX_ECAT is not set +# CONFIG_DNET is not set +CONFIG_NET_VENDOR_DEC=y +CONFIG_NET_TULIP=y +# CONFIG_DE2104X is not set +# CONFIG_TULIP is not set +# CONFIG_DE4X5 is not set +# CONFIG_WINBOND_840 is not set +# CONFIG_DM9102 is not set +# CONFIG_ULI526X is not set +# CONFIG_PCMCIA_XIRCOM is not set +# CONFIG_NET_VENDOR_DLINK is not set +CONFIG_NET_VENDOR_EMULEX=y +# CONFIG_BE2NET is not set +CONFIG_NET_VENDOR_EZCHIP=y +# CONFIG_NET_VENDOR_HP is not set +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set +CONFIG_E1000=y +CONFIG_E1000E=y +CONFIG_E1000E_HWTS=y +# CONFIG_IGB is not set +CONFIG_HINIC=m +# CONFIG_IGBVF is not set +# CONFIG_IXGB is not set +# CONFIG_IXGBE is not set +# CONFIG_IXGBEVF is not set +# CONFIG_I40E is not set +# CONFIG_I40EVF is not set +# CONFIG_ICE is not set +# CONFIG_FM10K is not set +# CONFIG_JME is not set +CONFIG_NET_VENDOR_MARVELL=y +# CONFIG_MVMDIO is not set +# CONFIG_SKGE is not set +# CONFIG_SKY2 is not set +CONFIG_NET_VENDOR_MELLANOX=y +# CONFIG_MLX4_EN is not set +# CONFIG_MLX5_CORE is not set +# CONFIG_MLXSW_CORE is not set +# CONFIG_MLXFW is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +CONFIG_NET_VENDOR_MICROSEMI=y +CONFIG_NET_VENDOR_MYRI=y +# CONFIG_MYRI10GE is not set +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +CONFIG_NET_VENDOR_NETERION=y +# CONFIG_S2IO is not set +# CONFIG_VXGE is not set +CONFIG_NET_VENDOR_NETRONOME=y +# CONFIG_NFP is not set +CONFIG_NET_VENDOR_NI=y +# CONFIG_NET_VENDOR_NVIDIA is not set +CONFIG_NET_VENDOR_OKI=y +# CONFIG_ETHOC is not set +CONFIG_NET_VENDOR_PACKET_ENGINES=y +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +CONFIG_NET_VENDOR_QLOGIC=y +# CONFIG_QLA3XXX is not set +# CONFIG_QLCNIC is not set +# CONFIG_QLGE is not set +# CONFIG_NETXEN_NIC is not set +# CONFIG_QED is not set +CONFIG_NET_VENDOR_QUALCOMM=y +# CONFIG_QCOM_EMAC is not set +# CONFIG_RMNET is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_NET_VENDOR_REALTEK=y +# CONFIG_8139CP is not set +# CONFIG_8139TOO is not set +# CONFIG_R8169 is not set +CONFIG_NET_VENDOR_RENESAS=y +CONFIG_NET_VENDOR_ROCKER=y +CONFIG_NET_VENDOR_SAMSUNG=y +# CONFIG_SXGBE_ETH is not set +# CONFIG_NET_VENDOR_SEEQ is not set +CONFIG_NET_VENDOR_SOLARFLARE=y +# CONFIG_SFC is not set +# CONFIG_SFC_FALCON is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +CONFIG_NET_VENDOR_SMSC=y +# CONFIG_EPIC100 is not set +# CONFIG_SMSC911X is not set +# CONFIG_SMSC9420 is not set +CONFIG_NET_VENDOR_SOCIONEXT=y +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +CONFIG_NET_VENDOR_SYNOPSYS=y +# CONFIG_DWC_XLGMAC is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +CONFIG_NET_VENDOR_TI=y +# CONFIG_TI_CPSW_ALE is not set +# CONFIG_TLAN is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +# CONFIG_MDIO_BCM_UNIMAC is not set +# CONFIG_MDIO_BITBANG is not set +# CONFIG_MDIO_MSCC_MIIM is not set +# CONFIG_MDIO_THUNDER is not set +CONFIG_PHYLIB=y +CONFIG_SWPHY=y +# CONFIG_LED_TRIGGER_PHY is not set + +# +# MII PHY device drivers +# +# CONFIG_AMD_PHY is not set +# CONFIG_AQUANTIA_PHY is not set +# CONFIG_ASIX_PHY is not set +# CONFIG_AT803X_PHY is not set +# CONFIG_BCM7XXX_PHY is not set +# CONFIG_BCM87XX_PHY is not set +# CONFIG_BROADCOM_PHY is not set +# CONFIG_CICADA_PHY is not set +# CONFIG_CORTINA_PHY is not set +# CONFIG_DAVICOM_PHY is not set +# CONFIG_DP83822_PHY is not set +# CONFIG_DP83TC811_PHY is not set +# CONFIG_DP83848_PHY is not set +# CONFIG_DP83867_PHY is not set +CONFIG_FIXED_PHY=y +# CONFIG_ICPLUS_PHY is not set +# CONFIG_INTEL_XWAY_PHY is not set +# CONFIG_LSI_ET1011C_PHY is not set +# CONFIG_LXT_PHY is not set +# CONFIG_MARVELL_PHY is not set +# CONFIG_MARVELL_10G_PHY is not set +# CONFIG_MICREL_PHY is not set +# CONFIG_MICROCHIP_PHY is not set +# CONFIG_MICROCHIP_T1_PHY is not set +# CONFIG_MICROSEMI_PHY is not set +# CONFIG_NATIONAL_PHY is not set +# CONFIG_QSEMI_PHY is not set +# CONFIG_REALTEK_PHY is not set +# CONFIG_RENESAS_PHY is not set +# CONFIG_ROCKCHIP_PHY is not set +# CONFIG_SMSC_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_TERANETICS_PHY is not set +# CONFIG_VITESSE_PHY is not set +# CONFIG_XILINX_GMII2RGMII is not set +# CONFIG_MICREL_KS8995MA is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +CONFIG_USB_NET_DRIVERS=y +# CONFIG_USB_CATC is not set +# CONFIG_USB_KAWETH is not set +# CONFIG_USB_PEGASUS is not set +# CONFIG_USB_RTL8150 is not set +# CONFIG_USB_RTL8152 is not set +# CONFIG_USB_LAN78XX is not set +# CONFIG_USB_USBNET is not set +# CONFIG_USB_IPHETH is not set +CONFIG_WLAN=y +CONFIG_WLAN_VENDOR_ADMTEK=y +CONFIG_WLAN_VENDOR_ATH=y +# CONFIG_ATH_DEBUG is not set +# CONFIG_ATH5K_PCI is not set +CONFIG_WLAN_VENDOR_ATMEL=y +CONFIG_WLAN_VENDOR_BROADCOM=y +CONFIG_WLAN_VENDOR_CISCO=y +CONFIG_WLAN_VENDOR_INTEL=y +CONFIG_WLAN_VENDOR_INTERSIL=y +# CONFIG_HOSTAP is not set +# CONFIG_PRISM54 is not set +CONFIG_WLAN_VENDOR_MARVELL=y +CONFIG_WLAN_VENDOR_MEDIATEK=y +CONFIG_WLAN_VENDOR_RALINK=y +CONFIG_WLAN_VENDOR_REALTEK=y +CONFIG_WLAN_VENDOR_RSI=y +CONFIG_WLAN_VENDOR_ST=y +CONFIG_WLAN_VENDOR_TI=y +CONFIG_WLAN_VENDOR_ZYDAS=y +CONFIG_WLAN_VENDOR_QUANTENNA=y + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +CONFIG_WAN=y +# CONFIG_HDLC is not set +# CONFIG_DLCI is not set +# CONFIG_SBNI is not set +CONFIG_XEN_NETDEV_FRONTEND=y +# CONFIG_XEN_NETDEV_BACKEND is not set +# CONFIG_VMXNET3 is not set +# CONFIG_FUJITSU_ES is not set +# CONFIG_NETDEVSIM is not set +# CONFIG_NET_FAILOVER is not set +CONFIG_ISDN=y +# CONFIG_ISDN_I4L is not set +# CONFIG_ISDN_CAPI is not set +# CONFIG_ISDN_DRV_GIGASET is not set +# CONFIG_HYSDN is not set +# CONFIG_MISDN is not set +# CONFIG_NVM is not set + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_LEDS=y +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set +# CONFIG_INPUT_SPARSEKMAP is not set +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_XTKBD is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=y +CONFIG_MOUSE_PS2_ALPS=y +CONFIG_MOUSE_PS2_BYD=y +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +CONFIG_MOUSE_PS2_CYPRESS=y +CONFIG_MOUSE_PS2_LIFEBOOK=y +CONFIG_MOUSE_PS2_TRACKPOINT=y +CONFIG_MOUSE_PS2_ELANTECH=y +CONFIG_MOUSE_PS2_SENTELIC=y +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +CONFIG_MOUSE_PS2_FOCALTECH=y +CONFIG_MOUSE_PS2_VMMOUSE=y +# CONFIG_MOUSE_SERIAL is not set +# CONFIG_MOUSE_APPLETOUCH is not set +# CONFIG_MOUSE_BCM5974 is not set +# CONFIG_MOUSE_VSXXXAA is not set +# CONFIG_MOUSE_GPIO is not set +# CONFIG_MOUSE_SYNAPTICS_USB is not set +# CONFIG_INPUT_JOYSTICK is not set +CONFIG_INPUT_TABLET=y +# CONFIG_TABLET_USB_ACECAD is not set +# CONFIG_TABLET_USB_AIPTEK is not set +# CONFIG_TABLET_USB_GTCO is not set +# CONFIG_TABLET_USB_HANWANG is not set +# CONFIG_TABLET_USB_KBTAB is not set +# CONFIG_TABLET_USB_PEGASUS is not set +# CONFIG_TABLET_SERIAL_WACOM4 is not set +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_PROPERTIES=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EGALAX_SERIAL is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_ELO is not set +# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_MK712 is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC_SERIO is not set +# CONFIG_TOUCHSCREEN_TSC2005 is not set +# CONFIG_TOUCHSCREEN_SURFACE3_SPI is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_E3X0_BUTTON is not set +# CONFIG_INPUT_PCSPKR is not set +# CONFIG_INPUT_GPIO_BEEPER is not set +# CONFIG_INPUT_GPIO_DECODER is not set +# CONFIG_INPUT_ATLAS_BTNS is not set +# CONFIG_INPUT_ATI_REMOTE2 is not set +# CONFIG_INPUT_KEYSPAN_REMOTE is not set +# CONFIG_INPUT_POWERMATE is not set +# CONFIG_INPUT_YEALINK is not set +# CONFIG_INPUT_CM109 is not set +# CONFIG_INPUT_UINPUT is not set +# CONFIG_INPUT_PWM_BEEPER is not set +# CONFIG_INPUT_PWM_VIBRA is not set +# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_IMS_PCU is not set +# CONFIG_INPUT_CMA3000 is not set +CONFIG_INPUT_XEN_KBDDEV_FRONTEND=y +# CONFIG_INPUT_IDEAPAD_SLIDEBAR is not set +# CONFIG_RMI4_CORE is not set + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +CONFIG_SERIO_I8042=y +CONFIG_SERIO_SERPORT=y +# CONFIG_SERIO_CT82C710 is not set +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +# CONFIG_SERIO_RAW is not set +# CONFIG_SERIO_ALTERA_PS2 is not set +# CONFIG_SERIO_PS2MULT is not set +# CONFIG_SERIO_ARC_PS2 is not set +# CONFIG_SERIO_GPIO_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_ROCKETPORT is not set +# CONFIG_CYCLADES is not set +# CONFIG_MOXA_INTELLIO is not set +# CONFIG_MOXA_SMARTIO is not set +# CONFIG_SYNCLINK is not set +# CONFIG_SYNCLINKMP is not set +# CONFIG_SYNCLINK_GT is not set +# CONFIG_NOZOMI is not set +# CONFIG_ISI is not set +# CONFIG_N_HDLC is not set +# CONFIG_N_GSM is not set +# CONFIG_TRACE_SINK is not set +CONFIG_DEVMEM=y +# CONFIG_DEVKMEM is not set + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DW=y +# CONFIG_SERIAL_8250_RT288X is not set +CONFIG_SERIAL_8250_LPSS=y +CONFIG_SERIAL_8250_MID=y +# CONFIG_SERIAL_8250_MOXA is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_KGDB_NMI is not set +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_CONSOLE_POLL=y +# CONFIG_SERIAL_JSM is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_IFX6X60 is not set +# CONFIG_SERIAL_ARC is not set +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_DEV_BUS is not set +CONFIG_HVC_DRIVER=y +CONFIG_HVC_IRQ=y +CONFIG_HVC_XEN=y +CONFIG_HVC_XEN_FRONTEND=y +# CONFIG_IPMI_HANDLER is not set +CONFIG_HW_RANDOM=y +# CONFIG_HW_RANDOM_TIMERIOMEM is not set +CONFIG_HW_RANDOM_INTEL=y +CONFIG_HW_RANDOM_AMD=y +CONFIG_HW_RANDOM_VIA=y +CONFIG_NVRAM=y +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set +# CONFIG_MWAVE is not set +CONFIG_RAW_DRIVER=y +CONFIG_MAX_RAW_DEVS=8192 +CONFIG_HPET=y +CONFIG_HPET_MMAP=y +# CONFIG_HPET_MMAP_DEFAULT is not set +# CONFIG_HANGCHECK_TIMER is not set +CONFIG_UV_MMTIMER=m +CONFIG_TCG_TPM=y +CONFIG_HW_RANDOM_TPM=y +CONFIG_TCG_TIS_CORE=y +CONFIG_TCG_TIS=y +# CONFIG_TCG_TIS_SPI is not set +# CONFIG_TCG_NSC is not set +# CONFIG_TCG_ATMEL is not set +# CONFIG_TCG_INFINEON is not set +# CONFIG_TCG_XEN is not set +CONFIG_TCG_CRB=y +# CONFIG_TCG_VTPM_PROXY is not set +# CONFIG_TCG_TIS_ST33ZP24_SPI is not set +# CONFIG_TELCLOCK is not set +CONFIG_DEVPORT=y +# CONFIG_XILLYBUS is not set +# CONFIG_RANDOM_TRUST_CPU is not set + +# +# I2C support +# +# CONFIG_I2C is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y +# CONFIG_SPI_MEM is not set + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_CADENCE is not set +# CONFIG_SPI_DESIGNWARE is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_OC_TINY is not set +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_ROCKCHIP is not set +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_ZYNQMP_GQSPI is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +# CONFIG_PPS_CLIENT_LDISC is not set +# CONFIG_PPS_CLIENT_GPIO is not set + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y +# CONFIG_DP83640_PHY is not set +CONFIG_PTP_1588_CLOCK_KVM=y +CONFIG_PINCTRL=y +CONFIG_PINMUX=y +CONFIG_PINCONF=y +CONFIG_GENERIC_PINCONF=y +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_PINCTRL_AMD is not set +# CONFIG_PINCTRL_MCP23S08 is not set +CONFIG_PINCTRL_BAYTRAIL=y +# CONFIG_PINCTRL_CHERRYVIEW is not set +# CONFIG_PINCTRL_BROXTON is not set +# CONFIG_PINCTRL_CANNONLAKE is not set +# CONFIG_PINCTRL_CEDARFORK is not set +# CONFIG_PINCTRL_DENVERTON is not set +# CONFIG_PINCTRL_GEMINILAKE is not set +# CONFIG_PINCTRL_ICELAKE is not set +# CONFIG_PINCTRL_LEWISBURG is not set +# CONFIG_PINCTRL_SUNRISEPOINT is not set +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_GPIO_ACPI=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_SYSFS=y + +# +# Memory mapped GPIO drivers +# +# CONFIG_GPIO_AMDPT is not set +# CONFIG_GPIO_DWAPB is not set +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_GENERIC_PLATFORM is not set +# CONFIG_GPIO_ICH is not set +# CONFIG_GPIO_LYNXPOINT is not set +# CONFIG_GPIO_MB86S7X is not set +# CONFIG_GPIO_MOCKUP is not set +# CONFIG_GPIO_VX855 is not set + +# +# Port-mapped I/O GPIO drivers +# +# CONFIG_GPIO_F7188X is not set +# CONFIG_GPIO_IT87 is not set +# CONFIG_GPIO_SCH is not set +# CONFIG_GPIO_SCH311X is not set +# CONFIG_GPIO_WINBOND is not set +# CONFIG_GPIO_WS16C48 is not set + +# +# MFD GPIO expanders +# + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_AMD8111 is not set +# CONFIG_GPIO_BT8XX is not set +# CONFIG_GPIO_ML_IOH is not set +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_PCIE_IDIO_24 is not set +# CONFIG_GPIO_RDC321X is not set + +# +# SPI GPIO expanders +# +# CONFIG_GPIO_MAX3191X is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set + +# +# USB GPIO expanders +# +# CONFIG_W1 is not set +# CONFIG_POWER_AVS is not set +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_RESTART is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_LTC3651 is not set +CONFIG_HWMON=y +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +# CONFIG_SENSORS_ABITUGURU is not set +# CONFIG_SENSORS_ABITUGURU3 is not set +# CONFIG_SENSORS_AD7314 is not set +# CONFIG_SENSORS_ADT7310 is not set +# CONFIG_SENSORS_K8TEMP is not set +# CONFIG_SENSORS_K10TEMP is not set +# CONFIG_SENSORS_FAM15H_POWER is not set +# CONFIG_SENSORS_APPLESMC is not set +# CONFIG_SENSORS_ASPEED is not set +# CONFIG_SENSORS_DELL_SMM is not set +# CONFIG_SENSORS_I5K_AMB is not set +# CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_F71882FG is not set +# CONFIG_SENSORS_I5500 is not set +# CONFIG_SENSORS_CORETEMP is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_MAX1111 is not set +# CONFIG_SENSORS_MAX197 is not set +# CONFIG_SENSORS_MAX31722 is not set +# CONFIG_SENSORS_ADCXX is not set +# CONFIG_SENSORS_LM70 is not set +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_PC87427 is not set +# CONFIG_SENSORS_NTC_THERMISTOR is not set +# CONFIG_SENSORS_NCT6683 is not set +# CONFIG_SENSORS_NCT6775 is not set +# CONFIG_SENSORS_NPCM7XX is not set +# CONFIG_SENSORS_SHT15 is not set +# CONFIG_SENSORS_SIS5595 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_SCH5627 is not set +# CONFIG_SENSORS_SCH5636 is not set +# CONFIG_SENSORS_ADS7871 is not set +# CONFIG_SENSORS_VIA_CPUTEMP is not set +# CONFIG_SENSORS_VIA686A is not set +# CONFIG_SENSORS_VT1211 is not set +# CONFIG_SENSORS_VT8231 is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set +# CONFIG_SENSORS_XGENE is not set + +# +# ACPI drivers +# +# CONFIG_SENSORS_ACPI_POWER is not set +# CONFIG_SENSORS_ATK0110 is not set +CONFIG_THERMAL=y +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +CONFIG_THERMAL_GOV_BANG_BANG=y +CONFIG_THERMAL_GOV_USER_SPACE=y +# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set +# CONFIG_CLOCK_THERMAL is not set +# CONFIG_DEVFREQ_THERMAL is not set +# CONFIG_THERMAL_EMULATION is not set +# CONFIG_INTEL_POWERCLAMP is not set +CONFIG_X86_PKG_TEMP_THERMAL=m +# CONFIG_INTEL_SOC_DTS_THERMAL is not set + +# +# ACPI INT340X thermal drivers +# +# CONFIG_INT340X_THERMAL is not set +# CONFIG_INTEL_PCH_THERMAL is not set +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y +# CONFIG_WATCHDOG_SYSFS is not set + +# +# Watchdog Device Drivers +# +# CONFIG_SOFT_WATCHDOG is not set +# CONFIG_WDAT_WDT is not set +# CONFIG_XILINX_WATCHDOG is not set +# CONFIG_CADENCE_WATCHDOG is not set +# CONFIG_DW_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set +# CONFIG_ACQUIRE_WDT is not set +# CONFIG_ADVANTECH_WDT is not set +# CONFIG_ALIM1535_WDT is not set +# CONFIG_ALIM7101_WDT is not set +# CONFIG_EBC_C384_WDT is not set +# CONFIG_F71808E_WDT is not set +# CONFIG_SP5100_TCO is not set +# CONFIG_SBC_FITPC2_WATCHDOG is not set +# CONFIG_EUROTECH_WDT is not set +# CONFIG_IB700_WDT is not set +# CONFIG_IBMASR is not set +# CONFIG_WAFER_WDT is not set +# CONFIG_I6300ESB_WDT is not set +# CONFIG_IE6XX_WDT is not set +# CONFIG_ITCO_WDT is not set +# CONFIG_IT8712F_WDT is not set +# CONFIG_IT87_WDT is not set +# CONFIG_HP_WATCHDOG is not set +# CONFIG_SC1200_WDT is not set +# CONFIG_PC87413_WDT is not set +# CONFIG_NV_TCO is not set +# CONFIG_60XX_WDT is not set +# CONFIG_CPU5_WDT is not set +# CONFIG_SMSC_SCH311X_WDT is not set +# CONFIG_SMSC37B787_WDT is not set +# CONFIG_VIA_WDT is not set +# CONFIG_W83627HF_WDT is not set +# CONFIG_W83877F_WDT is not set +# CONFIG_W83977F_WDT is not set +# CONFIG_MACHZ_WDT is not set +# CONFIG_SBC_EPX_C3_WATCHDOG is not set +# CONFIG_NI903X_WDT is not set +# CONFIG_NIC7018_WDT is not set +# CONFIG_MEN_A21_WDT is not set +# CONFIG_XEN_WDT is not set + +# +# PCI-based Watchdog Cards +# +# CONFIG_PCIPCWATCHDOG is not set +# CONFIG_WDTPCI is not set + +# +# USB-based Watchdog Cards +# +# CONFIG_USBPCWATCHDOG is not set + +# +# Watchdog Pretimeout Governors +# +# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +# CONFIG_BCMA is not set + +# +# Multifunction device drivers +# +# CONFIG_MFD_CROS_EC is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_MFD_INTEL_QUARK_I2C_GPIO is not set +# CONFIG_LPC_ICH is not set +# CONFIG_LPC_SCH is not set +# CONFIG_MFD_INTEL_LPSS_ACPI is not set +# CONFIG_MFD_INTEL_LPSS_PCI is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_VIPERBOARD is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_SYSCON is not set +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_VX855 is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_REGULATOR is not set +# CONFIG_RC_CORE is not set +# CONFIG_MEDIA_SUPPORT is not set + +# +# Graphics support +# +CONFIG_AGP=y +CONFIG_AGP_AMD64=y +CONFIG_AGP_INTEL=y +CONFIG_AGP_SIS=y +CONFIG_AGP_VIA=y +CONFIG_INTEL_GTT=y +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_VGA_SWITCHEROO=y +# CONFIG_DRM is not set +# CONFIG_DRM_DP_CEC is not set + +# +# ACP (Audio CoProcessor) Configuration +# + +# +# AMD Library routines +# +# CONFIG_DRM_XEN is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y + +# +# Frame buffer Devices +# +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_CMDLINE=y +CONFIG_FB_NOTIFY=y +CONFIG_FB_BOOT_VESA_SUPPORT=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +# CONFIG_FB_MODE_HELPERS is not set +CONFIG_FB_TILEBLITTING=y + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ARC is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_VGA16 is not set +# CONFIG_FB_UVESA is not set +CONFIG_FB_VESA=y +CONFIG_FB_EFI=y +# CONFIG_FB_N411 is not set +# CONFIG_FB_HGA is not set +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_LE80578 is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_XEN_FBDEV_FRONTEND is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +# CONFIG_FB_SIMPLE is not set +# CONFIG_FB_SM712 is not set +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set +# CONFIG_LCD_PLATFORM is not set +# CONFIG_LCD_S6E63M0 is not set +# CONFIG_LCD_LD9040 is not set +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +# CONFIG_LCD_OTM3225A is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_GENERIC is not set +# CONFIG_BACKLIGHT_PWM is not set +# CONFIG_BACKLIGHT_APPLE is not set +# CONFIG_BACKLIGHT_PM8941_WLED is not set +# CONFIG_BACKLIGHT_SAHARA is not set +# CONFIG_BACKLIGHT_GPIO is not set + +# +# Console display driver support +# +CONFIG_VGA_CONSOLE=y +CONFIG_VGACON_SOFT_SCROLLBACK=y +CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64 +# CONFIG_VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT is not set +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_LOGO_LINUX_CLUT224=y +# CONFIG_SOUND is not set + +# +# HID support +# +CONFIG_HID=y +CONFIG_HID_BATTERY_STRENGTH=y +CONFIG_HIDRAW=y +# CONFIG_UHID is not set +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=y +# CONFIG_HID_ACCUTOUCH is not set +# CONFIG_HID_ACRUX is not set +CONFIG_HID_APPLE=y +# CONFIG_HID_APPLEIR is not set +# CONFIG_HID_ASUS is not set +# CONFIG_HID_AUREAL is not set +CONFIG_HID_BELKIN=y +# CONFIG_HID_BETOP_FF is not set +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +# CONFIG_HID_CORSAIR is not set +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_CMEDIA is not set +CONFIG_HID_CYPRESS=y +# CONFIG_HID_DRAGONRISE is not set +# CONFIG_HID_EMS_FF is not set +# CONFIG_HID_ELAN is not set +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_ELO is not set +CONFIG_HID_EZKEY=y +# CONFIG_HID_GEMBIRD is not set +# CONFIG_HID_GFRM is not set +# CONFIG_HID_HOLTEK is not set +# CONFIG_HID_GOOGLE_HAMMER is not set +# CONFIG_HID_GT683R is not set +# CONFIG_HID_KEYTOUCH is not set +# CONFIG_HID_KYE is not set +# CONFIG_HID_UCLOGIC is not set +# CONFIG_HID_WALTOP is not set +# CONFIG_HID_GYRATION is not set +# CONFIG_HID_ICADE is not set +CONFIG_HID_ITE=y +# CONFIG_HID_JABRA is not set +# CONFIG_HID_TWINHAN is not set +CONFIG_HID_KENSINGTON=y +# CONFIG_HID_LCPOWER is not set +# CONFIG_HID_LED is not set +# CONFIG_HID_LENOVO is not set +CONFIG_HID_LOGITECH=y +# CONFIG_HID_LOGITECH_DJ is not set +# CONFIG_HID_LOGITECH_HIDPP is not set +# CONFIG_LOGITECH_FF is not set +# CONFIG_LOGIRUMBLEPAD2_FF is not set +# CONFIG_LOGIG940_FF is not set +# CONFIG_LOGIWHEELS_FF is not set +CONFIG_HID_MAGICMOUSE=y +# CONFIG_HID_MAYFLASH is not set +CONFIG_HID_REDRAGON=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +# CONFIG_HID_MULTITOUCH is not set +# CONFIG_HID_NTI is not set +CONFIG_HID_NTRIG=y +# CONFIG_HID_ORTEK is not set +# CONFIG_HID_PANTHERLORD is not set +# CONFIG_HID_PENMOUNT is not set +# CONFIG_HID_PETALYNX is not set +# CONFIG_HID_PICOLCD is not set +# CONFIG_HID_PLANTRONICS is not set +# CONFIG_HID_PRIMAX is not set +# CONFIG_HID_RETRODE is not set +# CONFIG_HID_ROCCAT is not set +# CONFIG_HID_SAITEK is not set +# CONFIG_HID_SAMSUNG is not set +# CONFIG_HID_SONY is not set +# CONFIG_HID_SPEEDLINK is not set +# CONFIG_HID_STEAM is not set +# CONFIG_HID_STEELSERIES is not set +# CONFIG_HID_SUNPLUS is not set +# CONFIG_HID_RMI is not set +# CONFIG_HID_GREENASIA is not set +# CONFIG_HID_SMARTJOYPLUS is not set +# CONFIG_HID_TIVO is not set +# CONFIG_HID_TOPSEED is not set +# CONFIG_HID_THINGM is not set +# CONFIG_HID_THRUSTMASTER is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_WACOM is not set +# CONFIG_HID_WIIMOTE is not set +# CONFIG_HID_XINMO is not set +# CONFIG_HID_ZEROPLUS is not set +# CONFIG_HID_ZYDACRON is not set +# CONFIG_HID_SENSOR_HUB is not set +# CONFIG_HID_ALPS is not set + +# +# USB HID support +# +CONFIG_USB_HID=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y + +# +# Intel ISH HID support +# +# CONFIG_INTEL_ISH_HID is not set +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_LEDS_TRIGGER_USBPORT is not set +CONFIG_USB_MON=y +# CONFIG_USB_WUSB_CBAF is not set + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=y +# CONFIG_USB_XHCI_DBGCAP is not set +CONFIG_USB_XHCI_PCI=y +# CONFIG_USB_XHCI_PLATFORM is not set +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +# CONFIG_USB_EHCI_HCD_PLATFORM is not set +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_FOTG210_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y +# CONFIG_USB_OHCI_HCD_PLATFORM is not set +CONFIG_USB_UHCI_HCD=y +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +# CONFIG_USB_ACM is not set +# CONFIG_USB_PRINTER is not set +# CONFIG_USB_WDM is not set +# CONFIG_USB_TMC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +# CONFIG_USB_STORAGE is not set + +# +# USB Imaging devices +# +# CONFIG_USB_MDC800 is not set +# CONFIG_USB_MICROTEK is not set +# CONFIG_USBIP_CORE is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +# CONFIG_USB_DWC2 is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +CONFIG_USB_SERIAL=y +CONFIG_USB_SERIAL_CONSOLE=y +CONFIG_USB_SERIAL_GENERIC=y +# CONFIG_USB_SERIAL_SIMPLE is not set +# CONFIG_USB_SERIAL_AIRCABLE is not set +# CONFIG_USB_SERIAL_ARK3116 is not set +# CONFIG_USB_SERIAL_BELKIN is not set +# CONFIG_USB_SERIAL_CH341 is not set +# CONFIG_USB_SERIAL_WHITEHEAT is not set +# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set +# CONFIG_USB_SERIAL_CP210X is not set +# CONFIG_USB_SERIAL_CYPRESS_M8 is not set +# CONFIG_USB_SERIAL_EMPEG is not set +# CONFIG_USB_SERIAL_FTDI_SIO is not set +# CONFIG_USB_SERIAL_VISOR is not set +# CONFIG_USB_SERIAL_IPAQ is not set +# CONFIG_USB_SERIAL_IR is not set +# CONFIG_USB_SERIAL_EDGEPORT is not set +# CONFIG_USB_SERIAL_EDGEPORT_TI is not set +# CONFIG_USB_SERIAL_F81232 is not set +# CONFIG_USB_SERIAL_F8153X is not set +# CONFIG_USB_SERIAL_GARMIN is not set +# CONFIG_USB_SERIAL_IPW is not set +# CONFIG_USB_SERIAL_IUU is not set +# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set +# CONFIG_USB_SERIAL_KEYSPAN is not set +# CONFIG_USB_SERIAL_KLSI is not set +# CONFIG_USB_SERIAL_KOBIL_SCT is not set +# CONFIG_USB_SERIAL_MCT_U232 is not set +# CONFIG_USB_SERIAL_METRO is not set +# CONFIG_USB_SERIAL_MOS7720 is not set +# CONFIG_USB_SERIAL_MOS7840 is not set +# CONFIG_USB_SERIAL_MXUPORT is not set +# CONFIG_USB_SERIAL_NAVMAN is not set +# CONFIG_USB_SERIAL_PL2303 is not set +# CONFIG_USB_SERIAL_OTI6858 is not set +# CONFIG_USB_SERIAL_QCAUX is not set +# CONFIG_USB_SERIAL_QUALCOMM is not set +# CONFIG_USB_SERIAL_SPCP8X5 is not set +# CONFIG_USB_SERIAL_SAFE is not set +# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set +# CONFIG_USB_SERIAL_SYMBOL is not set +# CONFIG_USB_SERIAL_TI is not set +# CONFIG_USB_SERIAL_CYBERJACK is not set +# CONFIG_USB_SERIAL_XIRCOM is not set +# CONFIG_USB_SERIAL_OPTION is not set +# CONFIG_USB_SERIAL_OMNINET is not set +# CONFIG_USB_SERIAL_OPTICON is not set +# CONFIG_USB_SERIAL_XSENS_MT is not set +# CONFIG_USB_SERIAL_WISHBONE is not set +# CONFIG_USB_SERIAL_SSU100 is not set +# CONFIG_USB_SERIAL_QT2 is not set +# CONFIG_USB_SERIAL_UPD78F0730 is not set +# CONFIG_USB_SERIAL_DEBUG is not set + +# +# USB Miscellaneous drivers +# +# CONFIG_USB_EMI62 is not set +# CONFIG_USB_EMI26 is not set +# CONFIG_USB_ADUTUX is not set +# CONFIG_USB_SEVSEG is not set +# CONFIG_USB_RIO500 is not set +# CONFIG_USB_LEGOTOWER is not set +# CONFIG_USB_LCD is not set +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +# CONFIG_USB_IDMOUSE is not set +# CONFIG_USB_FTDI_ELAN is not set +# CONFIG_USB_APPLEDISPLAY is not set +# CONFIG_USB_SISUSBVGA is not set +# CONFIG_USB_LD is not set +# CONFIG_USB_TRANCEVIBRATOR is not set +# CONFIG_USB_IOWARRIOR is not set +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +# CONFIG_USB_ISIGHTFW is not set +# CONFIG_USB_YUREX is not set +# CONFIG_USB_EZUSB_FX2 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +# CONFIG_USB_CHAOSKEY is not set + +# +# USB Physical Layer drivers +# +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_GADGET is not set +# CONFIG_TYPEC is not set +# CONFIG_USB_ROLE_SWITCH is not set +# CONFIG_USB_LED_TRIG is not set +# CONFIG_USB_ULPI_BUS is not set +# CONFIG_UWB is not set +# CONFIG_MMC is not set +# CONFIG_MEMSTICK is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +# CONFIG_LEDS_CLASS_FLASH is not set +# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set + +# +# LED drivers +# +# CONFIG_LEDS_APU is not set +# CONFIG_LEDS_GPIO is not set +# CONFIG_LEDS_CLEVO_MAIL is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_PWM is not set +# CONFIG_LEDS_INTEL_SS4200 is not set +# CONFIG_LEDS_LT3593 is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# +# CONFIG_LEDS_MLXCPLD is not set +# CONFIG_LEDS_MLXREG is not set +# CONFIG_LEDS_USER is not set +# CONFIG_LEDS_NIC78BX is not set + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +# CONFIG_LEDS_TRIGGER_TIMER is not set +# CONFIG_LEDS_TRIGGER_ONESHOT is not set +# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set +# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set +# CONFIG_LEDS_TRIGGER_CPU is not set +# CONFIG_LEDS_TRIGGER_ACTIVITY is not set +# CONFIG_LEDS_TRIGGER_GPIO is not set +# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set + +# +# iptables trigger is under Netfilter config (LED target) +# +# CONFIG_LEDS_TRIGGER_TRANSIENT is not set +# CONFIG_LEDS_TRIGGER_CAMERA is not set +# CONFIG_LEDS_TRIGGER_PANIC is not set +# CONFIG_LEDS_TRIGGER_NETDEV is not set +# CONFIG_ACCESSIBILITY is not set +# CONFIG_INFINIBAND is not set +CONFIG_EDAC_ATOMIC_SCRUB=y +CONFIG_EDAC_SUPPORT=y +CONFIG_EDAC=y +CONFIG_EDAC_LEGACY_SYSFS=y +# CONFIG_EDAC_DEBUG is not set +CONFIG_EDAC_DECODE_MCE=y +# CONFIG_EDAC_GHES is not set +# CONFIG_EDAC_AMD64 is not set +# CONFIG_EDAC_E752X is not set +# CONFIG_EDAC_I82975X is not set +# CONFIG_EDAC_I3000 is not set +# CONFIG_EDAC_I3200 is not set +# CONFIG_EDAC_IE31200 is not set +# CONFIG_EDAC_X38 is not set +# CONFIG_EDAC_I5400 is not set +# CONFIG_EDAC_I7CORE is not set +# CONFIG_EDAC_I5000 is not set +# CONFIG_EDAC_I5100 is not set +# CONFIG_EDAC_I7300 is not set +# CONFIG_EDAC_SBRIDGE is not set +# CONFIG_EDAC_SKX is not set +# CONFIG_EDAC_PND2 is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_MC146818_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_SYSTOHC is not set +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T93 is not set +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1302 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1343 is not set +# CONFIG_RTC_DRV_DS1347 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6916 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RX4581 is not set +# CONFIG_RTC_DRV_RX6110 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_PCF2123 is not set +# CONFIG_RTC_DRV_MCP795 is not set +CONFIG_RTC_I2C_AND_SPI=y + +# +# SPI and I2C RTC drivers +# +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_PCF2127 is not set +# CONFIG_RTC_DRV_RV3029C2 is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_CMOS=y +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1685_FAMILY is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_DS2404 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_FTRTC010 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_VIRTUAL_CHANNELS=y +CONFIG_DMA_ACPI=y +# CONFIG_ALTERA_MSGDMA is not set +# CONFIG_INTEL_IDMA64 is not set +# CONFIG_INTEL_IOATDMA is not set +# CONFIG_QCOM_HIDMA_MGMT is not set +# CONFIG_QCOM_HIDMA is not set +CONFIG_DW_DMAC_CORE=y +# CONFIG_DW_DMAC is not set +CONFIG_DW_DMAC_PCI=y +CONFIG_HSU_DMA=y + +# +# DMA Clients +# +CONFIG_ASYNC_TX_DMA=y +# CONFIG_DMATEST is not set + +# +# DMABUF options +# +# CONFIG_SYNC_FILE is not set +CONFIG_AUXDISPLAY=y +# CONFIG_HD44780 is not set +# CONFIG_IMG_ASCII_LCD is not set +# CONFIG_UIO is not set +# CONFIG_VFIO is not set +# CONFIG_VIRT_DRIVERS is not set +CONFIG_VIRTIO_MENU=y +# CONFIG_VIRTIO_PCI is not set +# CONFIG_VIRTIO_MMIO is not set + +# +# Microsoft Hyper-V guest support +# +# CONFIG_HYPERV is not set + +# +# Xen driver support +# +CONFIG_XEN_BALLOON=y +# CONFIG_XEN_SELFBALLOONING is not set +# CONFIG_XEN_BALLOON_MEMORY_HOTPLUG is not set +CONFIG_XEN_SCRUB_PAGES_DEFAULT=y +CONFIG_XEN_DEV_EVTCHN=y +CONFIG_XEN_BACKEND=y +CONFIG_XENFS=y +CONFIG_XEN_COMPAT_XENFS=y +CONFIG_XEN_SYS_HYPERVISOR=y +CONFIG_XEN_XENBUS_FRONTEND=y +# CONFIG_XEN_GNTDEV is not set +# CONFIG_XEN_GRANT_DEV_ALLOC is not set +# CONFIG_XEN_GRANT_DMA_ALLOC is not set +CONFIG_SWIOTLB_XEN=y +CONFIG_XEN_TMEM=m +CONFIG_XEN_PCIDEV_BACKEND=m +# CONFIG_XEN_PVCALLS_FRONTEND is not set +# CONFIG_XEN_PVCALLS_BACKEND is not set +CONFIG_XEN_PRIVCMD=y +CONFIG_XEN_ACPI_PROCESSOR=m +# CONFIG_XEN_MCE_LOG is not set +CONFIG_XEN_HAVE_PVMMU=y +CONFIG_XEN_EFI=y +CONFIG_XEN_AUTO_XLATE=y +CONFIG_XEN_ACPI=y +CONFIG_XEN_SYMS=y +CONFIG_XEN_HAVE_VPMU=y +CONFIG_STAGING=y +# CONFIG_COMEDI is not set +# CONFIG_RTL8192U is not set +# CONFIG_RTLLIB is not set +# CONFIG_R8712U is not set +# CONFIG_RTS5208 is not set +# CONFIG_FB_SM750 is not set +# CONFIG_FB_XGI is not set + +# +# Speakup console speech +# +# CONFIG_SPEAKUP is not set +# CONFIG_STAGING_MEDIA is not set + +# +# Android +# +# CONFIG_LTE_GDM724X is not set +# CONFIG_DGNC is not set +# CONFIG_GS_FPGABOOT is not set +# CONFIG_UNISYSSPAR is not set +# CONFIG_FB_TFT is not set +# CONFIG_MOST is not set +# CONFIG_GREYBUS is not set +# CONFIG_PI433 is not set + +# +# Gasket devices +# +# CONFIG_STAGING_GASKET_FRAMEWORK is not set +# CONFIG_XIL_AXIS_FIFO is not set +# CONFIG_EROFS_FS is not set +CONFIG_X86_PLATFORM_DEVICES=y +# CONFIG_ACER_WIRELESS is not set +# CONFIG_ACERHDF is not set +# CONFIG_ASUS_LAPTOP is not set +# CONFIG_DELL_SMBIOS is not set +# CONFIG_DELL_SMO8800 is not set +# CONFIG_FUJITSU_LAPTOP is not set +# CONFIG_FUJITSU_TABLET is not set +# CONFIG_GPD_POCKET_FAN is not set +# CONFIG_HP_ACCEL is not set +# CONFIG_HP_WIRELESS is not set +# CONFIG_PANASONIC_LAPTOP is not set +# CONFIG_THINKPAD_ACPI is not set +# CONFIG_SENSORS_HDAPS is not set +# CONFIG_INTEL_MENLOW is not set +# CONFIG_EEEPC_LAPTOP is not set +# CONFIG_ASUS_WIRELESS is not set +# CONFIG_ACPI_WMI is not set +# CONFIG_TOPSTAR_LAPTOP is not set +# CONFIG_TOSHIBA_BT_RFKILL is not set +# CONFIG_TOSHIBA_HAPS is not set +# CONFIG_ACPI_CMPC is not set +# CONFIG_INTEL_INT0002_VGPIO is not set +# CONFIG_INTEL_HID_EVENT is not set +# CONFIG_INTEL_VBTN is not set +# CONFIG_INTEL_IPS is not set +# CONFIG_INTEL_PMC_CORE is not set +# CONFIG_IBM_RTL is not set +# CONFIG_SAMSUNG_LAPTOP is not set +# CONFIG_SAMSUNG_Q10 is not set +# CONFIG_APPLE_GMUX is not set +# CONFIG_INTEL_RST is not set +# CONFIG_INTEL_SMARTCONNECT is not set +CONFIG_PVPANIC=y +# CONFIG_INTEL_PMC_IPC is not set +# CONFIG_SURFACE_PRO3_BUTTON is not set +# CONFIG_INTEL_PUNIT_IPC is not set +# CONFIG_INTEL_TURBO_MAX_3 is not set +CONFIG_PMC_ATOM=y +# CONFIG_CHROME_PLATFORMS is not set +# CONFIG_MELLANOX_PLATFORM is not set +CONFIG_CLKDEV_LOOKUP=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y + +# +# Common Clock Framework +# +# CONFIG_COMMON_CLK_PWM is not set +# CONFIG_HWSPINLOCK is not set + +# +# Clock Source drivers +# +CONFIG_CLKEVT_I8253=y +CONFIG_I8253_LOCK=y +CONFIG_CLKBLD_I8253=y +CONFIG_MAILBOX=y +CONFIG_PCC=y +# CONFIG_ALTERA_MBOX is not set +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +# CONFIG_IOMMU_DEBUGFS is not set +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set +CONFIG_IOMMU_IOVA=y +CONFIG_AMD_IOMMU=y +# CONFIG_AMD_IOMMU_V2 is not set +CONFIG_DMAR_TABLE=y +CONFIG_INTEL_IOMMU=y +# CONFIG_INTEL_IOMMU_SVM is not set +# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set +CONFIG_INTEL_IOMMU_FLOPPY_WA=y +CONFIG_IRQ_REMAP=y + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_QCOM_GLINK_RPM is not set +# CONFIG_RPMSG_VIRTIO is not set +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# + +# +# Broadcom SoC drivers +# + +# +# NXP/Freescale QorIQ SoC drivers +# + +# +# i.MX SoC drivers +# + +# +# Qualcomm SoC drivers +# +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# CONFIG_XILINX_VCU is not set +CONFIG_PM_DEVFREQ=y + +# +# DEVFREQ Governors +# +# CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND is not set +# CONFIG_DEVFREQ_GOV_PERFORMANCE is not set +# CONFIG_DEVFREQ_GOV_POWERSAVE is not set +# CONFIG_DEVFREQ_GOV_USERSPACE is not set +# CONFIG_DEVFREQ_GOV_PASSIVE is not set + +# +# DEVFREQ Drivers +# +# CONFIG_PM_DEVFREQ_EVENT is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set +# CONFIG_IIO is not set +# CONFIG_NTB is not set +# CONFIG_VME_BUS is not set +CONFIG_PWM=y +CONFIG_PWM_SYSFS=y +# CONFIG_PWM_LPSS_PCI is not set +# CONFIG_PWM_LPSS_PLATFORM is not set + +# +# IRQ chip support +# +CONFIG_ARM_GIC_MAX_NR=1 +# CONFIG_IPACK_BUS is not set +# CONFIG_RESET_CONTROLLER is not set +# CONFIG_FMC is not set + +# +# PHY Subsystem +# +CONFIG_GENERIC_PHY=y +# CONFIG_BCM_KONA_USB2_PHY is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +CONFIG_POWERCAP=y +# CONFIG_INTEL_RAPL is not set +# CONFIG_IDLE_INJECT is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +CONFIG_RAS=y +# CONFIG_RAS_CEC is not set +# CONFIG_THUNDERBOLT is not set + +# +# Android +# +# CONFIG_ANDROID is not set +# CONFIG_LIBNVDIMM is not set +CONFIG_DAX=y +# CONFIG_DEV_DAX is not set +CONFIG_NVMEM=y + +# +# HW tracing support +# +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set +# CONFIG_FPGA is not set +CONFIG_PM_OPP=y +# CONFIG_UNISYS_VISORBUS is not set +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +CONFIG_FS_IOMAP=y +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +# CONFIG_EXT4_FS is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set +# CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +CONFIG_FS_DAX=y +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +# CONFIG_EXPORTFS_BLOCK_OPS is not set +CONFIG_FILE_LOCKING=y +CONFIG_MANDATORY_FILE_LOCKING=y +# CONFIG_FS_ENCRYPTION is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +# CONFIG_FANOTIFY_ACCESS_PERMISSIONS is not set +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_PRINT_QUOTA_WARNING=y +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=y +# CONFIG_QFMT_V1 is not set +CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y +CONFIG_QUOTACTL_COMPAT=y +CONFIG_AUTOFS4_FS=y +CONFIG_AUTOFS_FS=y +# CONFIG_FUSE_FS is not set +# CONFIG_OVERLAY_FS is not set + +# +# Caches +# +# CONFIG_FSCACHE is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +# CONFIG_MSDOS_FS is not set +# CONFIG_VFAT_FS is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y +# CONFIG_PROC_VMCORE_DEVICE_DUMP is not set +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +# CONFIG_PROC_CHILDREN is not set +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_MEMFD_CREATE=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +CONFIG_PSTORE=y +CONFIG_PSTORE_DEFLATE_COMPRESS=y +# CONFIG_PSTORE_LZO_COMPRESS is not set +# CONFIG_PSTORE_LZ4_COMPRESS is not set +# CONFIG_PSTORE_LZ4HC_COMPRESS is not set +# CONFIG_PSTORE_842_COMPRESS is not set +# CONFIG_PSTORE_ZSTD_COMPRESS is not set +CONFIG_PSTORE_COMPRESS=y +CONFIG_PSTORE_DEFLATE_COMPRESS_DEFAULT=y +CONFIG_PSTORE_COMPRESS_DEFAULT="deflate" +# CONFIG_PSTORE_CONSOLE is not set +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +# CONFIG_PSTORE_RAM is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +# CONFIG_NFS_FS is not set +# CONFIG_NFSD is not set +# CONFIG_CEPH_FS is not set +# CONFIG_CIFS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +CONFIG_NLS_ASCII=y +# CONFIG_NLS_ISO8859_1 is not set +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +# CONFIG_NLS_MAC_ROMAN is not set +# CONFIG_NLS_MAC_CELTIC is not set +# CONFIG_NLS_MAC_CENTEURO is not set +# CONFIG_NLS_MAC_CROATIAN is not set +# CONFIG_NLS_MAC_CYRILLIC is not set +# CONFIG_NLS_MAC_GAELIC is not set +# CONFIG_NLS_MAC_GREEK is not set +# CONFIG_NLS_MAC_ICELAND is not set +# CONFIG_NLS_MAC_INUIT is not set +# CONFIG_NLS_MAC_ROMANIAN is not set +# CONFIG_NLS_MAC_TURKISH is not set +# CONFIG_NLS_UTF8 is not set +# CONFIG_DLM is not set + +# +# Security options +# +CONFIG_KEYS=y +CONFIG_KEYS_COMPAT=y +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_BIG_KEYS=y +CONFIG_TRUSTED_KEYS=y +CONFIG_ENCRYPTED_KEYS=y +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +CONFIG_SECURITY_WRITABLE_HOOKS=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_PAGE_TABLE_ISOLATION=y +CONFIG_SECURITY_NETWORK_XFRM=y +# CONFIG_SECURITY_PATH is not set +CONFIG_INTEL_TXT=y +CONFIG_LSM_MMAP_MIN_ADDR=65535 +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y +# CONFIG_HARDENED_USERCOPY is not set +# CONFIG_FORTIFY_SOURCE is not set +# CONFIG_STATIC_USERMODEHELPER is not set +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1 +CONFIG_SECURITY_SELINUX_DISABLE=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_SECURITY_LOADPIN is not set +# CONFIG_SECURITY_YAMA is not set +CONFIG_INTEGRITY=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_TRUSTED_KEYRING=y +CONFIG_INTEGRITY_AUDIT=y +CONFIG_IMA=y +CONFIG_IMA_MEASURE_PCR_IDX=10 +CONFIG_IMA_LSM_RULES=y +# CONFIG_IMA_TEMPLATE is not set +CONFIG_IMA_NG_TEMPLATE=y +# CONFIG_IMA_SIG_TEMPLATE is not set +CONFIG_IMA_DEFAULT_TEMPLATE="ima-ng" +CONFIG_IMA_DEFAULT_HASH_SHA1=y +# CONFIG_IMA_DEFAULT_HASH_SHA256 is not set +CONFIG_IMA_DEFAULT_HASH="sha1" +# CONFIG_IMA_WRITE_POLICY is not set +# CONFIG_IMA_READ_POLICY is not set +CONFIG_IMA_APPRAISE=y +# CONFIG_IMA_APPRAISE_BUILD_POLICY is not set +CONFIG_IMA_APPRAISE_BOOTPARAM=y +CONFIG_IMA_TRUSTED_KEYRING=y +# CONFIG_IMA_BLACKLIST_KEYRING is not set +# CONFIG_IMA_LOAD_X509 is not set +CONFIG_EVM=y +CONFIG_EVM_ATTR_FSUUID=y +# CONFIG_EVM_ADD_XATTRS is not set +# CONFIG_EVM_LOAD_X509 is not set +CONFIG_DEFAULT_SECURITY_SELINUX=y +# CONFIG_DEFAULT_SECURITY_DAC is not set +CONFIG_DEFAULT_SECURITY="selinux" +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_RSA=y +# CONFIG_CRYPTO_DH is not set +# CONFIG_CRYPTO_ECDH is not set +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +# CONFIG_CRYPTO_USER is not set +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_GF128MUL=y +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +# CONFIG_CRYPTO_PCRYPT is not set +CONFIG_CRYPTO_WORKQUEUE=y +# CONFIG_CRYPTO_CRYPTD is not set +# CONFIG_CRYPTO_MCRYPTD is not set +# CONFIG_CRYPTO_AUTHENC is not set +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +CONFIG_CRYPTO_GCM=y +# CONFIG_CRYPTO_CHACHA20POLY1305 is not set +# CONFIG_CRYPTO_AEGIS128 is not set +# CONFIG_CRYPTO_AEGIS128L is not set +# CONFIG_CRYPTO_AEGIS256 is not set +# CONFIG_CRYPTO_AEGIS128_AESNI_SSE2 is not set +# CONFIG_CRYPTO_AEGIS128L_AESNI_SSE2 is not set +# CONFIG_CRYPTO_AEGIS256_AESNI_SSE2 is not set +# CONFIG_CRYPTO_MORUS640 is not set +# CONFIG_CRYPTO_MORUS640_SSE2 is not set +# CONFIG_CRYPTO_MORUS1280 is not set +# CONFIG_CRYPTO_MORUS1280_SSE2 is not set +# CONFIG_CRYPTO_MORUS1280_AVX2 is not set +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=m + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CFB is not set +CONFIG_CRYPTO_CTR=y +# CONFIG_CRYPTO_CTS is not set +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set +# CONFIG_CRYPTO_KEYWRAP is not set + +# +# Hash modes +# +# CONFIG_CRYPTO_CMAC is not set +CONFIG_CRYPTO_HMAC=y +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +# CONFIG_CRYPTO_CRC32C_INTEL is not set +# CONFIG_CRYPTO_CRC32 is not set +# CONFIG_CRYPTO_CRC32_PCLMUL is not set +CONFIG_CRYPTO_CRCT10DIF=y +# CONFIG_CRYPTO_CRCT10DIF_PCLMUL is not set +CONFIG_CRYPTO_GHASH=y +# CONFIG_CRYPTO_POLY1305 is not set +# CONFIG_CRYPTO_POLY1305_X86_64 is not set +# CONFIG_CRYPTO_MD4 is not set +CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA1_SSSE3=y +CONFIG_CRYPTO_SHA256_SSSE3=y +# CONFIG_CRYPTO_SHA512_SSSE3 is not set +# CONFIG_CRYPTO_SHA1_MB is not set +# CONFIG_CRYPTO_SHA256_MB is not set +# CONFIG_CRYPTO_SHA512_MB is not set +CONFIG_CRYPTO_SHA256=y +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_SHA3 is not set +# CONFIG_CRYPTO_SM3 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set +# CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL is not set + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_AES_X86_64=y +# CONFIG_CRYPTO_AES_NI_INTEL is not set +# CONFIG_CRYPTO_ANUBIS is not set +# CONFIG_CRYPTO_ARC4 is not set +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_BLOWFISH_X86_64 is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAMELLIA_X86_64 is not set +# CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64 is not set +# CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64 is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST5_AVX_X86_64 is not set +# CONFIG_CRYPTO_CAST6 is not set +# CONFIG_CRYPTO_CAST6_AVX_X86_64 is not set +# CONFIG_CRYPTO_DES is not set +# CONFIG_CRYPTO_DES3_EDE_X86_64 is not set +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_CHACHA20 is not set +# CONFIG_CRYPTO_CHACHA20_X86_64 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_SERPENT_SSE2_X86_64 is not set +# CONFIG_CRYPTO_SERPENT_AVX_X86_64 is not set +# CONFIG_CRYPTO_SERPENT_AVX2_X86_64 is not set +# CONFIG_CRYPTO_SM4 is not set +# CONFIG_CRYPTO_TEA is not set +# CONFIG_CRYPTO_TWOFISH is not set +# CONFIG_CRYPTO_TWOFISH_X86_64 is not set +# CONFIG_CRYPTO_TWOFISH_X86_64_3WAY is not set +# CONFIG_CRYPTO_TWOFISH_AVX_X86_64 is not set + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_842 is not set +# CONFIG_CRYPTO_LZ4 is not set +# CONFIG_CRYPTO_LZ4HC is not set +# CONFIG_CRYPTO_ZSTD is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +CONFIG_CRYPTO_USER_API=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +# CONFIG_CRYPTO_USER_API_RNG is not set +# CONFIG_CRYPTO_USER_API_AEAD is not set +CONFIG_CRYPTO_HASH_INFO=y +CONFIG_CRYPTO_HW=y +# CONFIG_CRYPTO_DEV_PADLOCK is not set +# CONFIG_CRYPTO_DEV_CCP is not set +# CONFIG_CRYPTO_DEV_QAT_DH895xCC is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXX is not set +# CONFIG_CRYPTO_DEV_QAT_C62X is not set +# CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set +# CONFIG_CRYPTO_DEV_QAT_C62XVF is not set +# CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set +# CONFIG_CRYPTO_DEV_HISILICON is not set +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +CONFIG_PKCS7_MESSAGE_PARSER=y +# CONFIG_PKCS7_TEST_KEY is not set +CONFIG_SIGNED_PE_FILE_VERIFICATION=y + +# +# Certificates for signature checking +# +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set +# CONFIG_SECONDARY_TRUSTED_KEYRING is not set +# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_BITREVERSE=y +CONFIG_RATIONAL=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_GENERIC_FIND_FIRST_BIT=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +# CONFIG_CRC_ITU_T is not set +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +# CONFIG_CRC64 is not set +# CONFIG_CRC4 is not set +# CONFIG_CRC7 is not set +# CONFIG_LIBCRC32C is not set +# CONFIG_CRC8 is not set +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_RADIX_TREE_MULTIORDER=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_DMA_DIRECT_OPS=y +CONFIG_SWIOTLB=y +CONFIG_SGL_ALLOC=y +CONFIG_IOMMU_HELPER=y +CONFIG_CPUMASK_OFFSTACK=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_CLZ_TAB=y +# CONFIG_CORDIC is not set +# CONFIG_DDR is not set +# CONFIG_IRQ_POLL is not set +CONFIG_MPILIB=y +CONFIG_SIGNATURE=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_POOL=y +CONFIG_ARCH_HAS_SG_CHAIN=y +CONFIG_ARCH_HAS_PMEM_API=y +CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y +CONFIG_ARCH_HAS_UACCESS_MCSAFE=y +CONFIG_STACKDEPOT=y +CONFIG_SBITMAP=y +# CONFIG_STRING_SELFTEST is not set + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +# CONFIG_DEBUG_INFO_DWARF4 is not set +# CONFIG_GDB_SCRIPTS is not set +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=2048 +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_PAGE_OWNER is not set +CONFIG_DEBUG_FS=y +CONFIG_HEADERS_CHECK=y +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_STACK_VALIDATION=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_DEBUG_KERNEL=y + +# +# Memory Debugging +# +CONFIG_PAGE_EXTENSION=y +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +CONFIG_DEBUG_RODATA_TEST=y +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SLUB_DEBUG_ON is not set +CONFIG_SLUB_STATS=y +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +CONFIG_DEBUG_STACK_USAGE=y +CONFIG_DEBUG_VM=y +# CONFIG_DEBUG_VM_VMACACHE is not set +# CONFIG_DEBUG_VM_RB is not set +# CONFIG_DEBUG_VM_PGFLAGS is not set +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_DEBUG_STACKOVERFLOW=y +# CONFIG_DEBUG_STACKOVERFLOW is not set +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_KASAN=y +# CONFIG_KASAN_OUTLINE is not set +CONFIG_KASAN_INLINE=y +# CONFIG_TEST_KASAN is not set +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +CONFIG_KCOV=y +CONFIG_KCOV_INSTRUMENT_ALL=y +CONFIG_DEBUG_SHIRQ=y + +# +# Debug Lockups and Hangs +# +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y +CONFIG_HARDLOCKUP_DETECTOR=y +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=1 +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=140 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +# CONFIG_WQ_WATCHDOG is not set +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=0 +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +# CONFIG_SCHED_STACK_END_CHECK is not set +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +CONFIG_PROVE_LOCKING=y +# CONFIG_LOCK_STAT is not set +CONFIG_DEBUG_RT_MUTEXES=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y +CONFIG_DEBUG_RWSEMS=y +CONFIG_DEBUG_LOCK_ALLOC=y +CONFIG_LOCKDEP=y +# CONFIG_DEBUG_LOCKDEP is not set +CONFIG_DEBUG_ATOMIC_SLEEP=y +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +CONFIG_TRACE_IRQFLAGS=y +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PI_LIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +CONFIG_PROVE_RCU=y +# CONFIG_RCU_PERF_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=100 +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +CONFIG_FUNCTION_ERROR_INJECTION=y +CONFIG_FAULT_INJECTION=y +CONFIG_FAILSLAB=y +CONFIG_FAIL_PAGE_ALLOC=y +CONFIG_FAIL_MAKE_REQUEST=y +CONFIG_FAIL_IO_TIMEOUT=y +# CONFIG_FAIL_FUTEX is not set +CONFIG_FAULT_INJECTION_DEBUG_FS=y +# CONFIG_FAIL_FUNCTION is not set +# CONFIG_LATENCYTOP is not set +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_FENTRY=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_PREEMPTIRQ_TRACEPOINTS=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_PREEMPTIRQ_EVENTS is not set +# CONFIG_IRQSOFF_TRACER is not set +CONFIG_SCHED_TRACER=y +# CONFIG_HWLAT_TRACER is not set +CONFIG_FTRACE_SYSCALLS=y +CONFIG_TRACER_SNAPSHOT=y +# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +CONFIG_STACK_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_KPROBE_EVENTS=y +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set +CONFIG_UPROBE_EVENTS=y +CONFIG_PROBE_EVENTS=y +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_FUNCTION_PROFILER=y +CONFIG_FTRACE_MCOUNT_RECORD=y +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_MMIOTRACE is not set +# CONFIG_HIST_TRIGGERS is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_TRACE_EVAL_MAP_FILE is not set +CONFIG_TRACING_EVENTS_GPIO=y +CONFIG_PROVIDE_OHCI1394_DMA_INIT=y +# CONFIG_DMA_API_DEBUG is not set +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_LKDTM is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_TEST_SORT is not set +# CONFIG_KPROBES_SANITY_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +CONFIG_ATOMIC64_SELFTEST=y +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_TEST_STRING_HELPERS is not set +CONFIG_TEST_KSTRTOX=y +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_BITFIELD is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_OVERFLOW is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_HASH is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_USER_COPY is not set +# CONFIG_TEST_BPF is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_MEMTEST is not set +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_KGDB=y +CONFIG_KGDB_SERIAL_CONSOLE=y +CONFIG_KGDB_TESTS=y +# CONFIG_KGDB_TESTS_ON_BOOT is not set +CONFIG_KGDB_LOW_LEVEL_TRAP=y +CONFIG_KGDB_KDB=y +CONFIG_KDB_DEFAULT_ENABLE=0x1 +CONFIG_KDB_KEYBOARD=y +CONFIG_KDB_CONTINUE_CATASTROPHIC=0 +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +CONFIG_UBSAN=y +CONFIG_UBSAN_SANITIZE_ALL=y +# CONFIG_UBSAN_ALIGNMENT is not set +# CONFIG_TEST_UBSAN is not set +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y +CONFIG_STRICT_DEVMEM=y +# CONFIG_IO_STRICT_DEVMEM is not set +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_EARLY_PRINTK_USB=y +# CONFIG_X86_VERBOSE_BOOTUP is not set +CONFIG_EARLY_PRINTK=y +CONFIG_EARLY_PRINTK_DBGP=y +CONFIG_EARLY_PRINTK_EFI=y +# CONFIG_EARLY_PRINTK_USB_XDBC is not set +# CONFIG_X86_PTDUMP is not set +# CONFIG_EFI_PGT_DUMP is not set +# CONFIG_DEBUG_WX is not set +CONFIG_DOUBLEFAULT=y +# CONFIG_DEBUG_TLBFLUSH is not set +# CONFIG_IOMMU_DEBUG is not set +CONFIG_HAVE_MMIOTRACE_SUPPORT=y +CONFIG_X86_DECODER_SELFTEST=y +CONFIG_IO_DELAY_TYPE_0X80=0 +CONFIG_IO_DELAY_TYPE_0XED=1 +CONFIG_IO_DELAY_TYPE_UDELAY=2 +CONFIG_IO_DELAY_TYPE_NONE=3 +CONFIG_IO_DELAY_0X80=y +# CONFIG_IO_DELAY_0XED is not set +# CONFIG_IO_DELAY_UDELAY is not set +# CONFIG_IO_DELAY_NONE is not set +CONFIG_DEFAULT_IO_DELAY_TYPE=0 +CONFIG_DEBUG_BOOT_PARAMS=y +# CONFIG_CPA_DEBUG is not set +CONFIG_OPTIMIZE_INLINING=y +# CONFIG_DEBUG_ENTRY is not set +# CONFIG_DEBUG_NMI_SELFTEST is not set +CONFIG_X86_DEBUG_FPU=y +# CONFIG_PUNIT_ATOM_DEBUG is not set +CONFIG_UNWINDER_ORC=y +# CONFIG_UNWINDER_FRAME_POINTER is not set diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index e32fc1f274d854d48e1175a27210b87ccfaf509c..7484a27861d4cec6ed011e637169070905eb0036 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig @@ -213,7 +213,6 @@ CONFIG_FB_MODE_HELPERS=y CONFIG_FB_TILEBLITTING=y CONFIG_FB_EFI=y # CONFIG_LCD_CLASS_DEVICE is not set -CONFIG_VGACON_SOFT_SCROLLBACK=y CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index a450ad573dcb40aa86786e3e383c372d8e18120b..9edfa5469f9f7538f0eba2ab538b37bca86b90c2 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -60,9 +60,6 @@ endif ifeq ($(avx2_supported),yes) obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o - obj-$(CONFIG_CRYPTO_SHA1_MB) += sha1-mb/ - obj-$(CONFIG_CRYPTO_SHA256_MB) += sha256-mb/ - obj-$(CONFIG_CRYPTO_SHA512_MB) += sha512-mb/ obj-$(CONFIG_CRYPTO_MORUS1280_AVX2) += morus1280-avx2.o endif diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c index 2a356b948720e10f93d49765a071c3c6dd6edb36..3ea71b8718135fc5cee116e2ccd1896189882052 100644 --- a/arch/x86/crypto/aegis128-aesni-glue.c +++ b/arch/x86/crypto/aegis128-aesni-glue.c @@ -119,31 +119,20 @@ static void crypto_aegis128_aesni_process_ad( } static void crypto_aegis128_aesni_process_crypt( - struct aegis_state *state, struct aead_request *req, + struct aegis_state *state, struct skcipher_walk *walk, const struct aegis_crypt_ops *ops) { - struct skcipher_walk walk; - u8 *src, *dst; - unsigned int chunksize, base; - - ops->skcipher_walk_init(&walk, req, false); - - while (walk.nbytes) { - src = walk.src.virt.addr; - dst = walk.dst.virt.addr; - chunksize = walk.nbytes; - - ops->crypt_blocks(state, chunksize, src, dst); - - base = chunksize & ~(AEGIS128_BLOCK_SIZE - 1); - src += base; - dst += base; - chunksize &= AEGIS128_BLOCK_SIZE - 1; - - if (chunksize > 0) - ops->crypt_tail(state, chunksize, src, dst); + while (walk->nbytes >= AEGIS128_BLOCK_SIZE) { + ops->crypt_blocks(state, + round_down(walk->nbytes, AEGIS128_BLOCK_SIZE), + walk->src.virt.addr, walk->dst.virt.addr); + skcipher_walk_done(walk, walk->nbytes % AEGIS128_BLOCK_SIZE); + } - skcipher_walk_done(&walk, 0); + if (walk->nbytes) { + ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr, + walk->dst.virt.addr); + skcipher_walk_done(walk, 0); } } @@ -186,13 +175,16 @@ static void crypto_aegis128_aesni_crypt(struct aead_request *req, { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aegis_ctx *ctx = crypto_aegis128_aesni_ctx(tfm); + struct skcipher_walk walk; struct aegis_state state; + ops->skcipher_walk_init(&walk, req, true); + kernel_fpu_begin(); crypto_aegis128_aesni_init(&state, ctx->key.bytes, req->iv); crypto_aegis128_aesni_process_ad(&state, req->src, req->assoclen); - crypto_aegis128_aesni_process_crypt(&state, req, ops); + crypto_aegis128_aesni_process_crypt(&state, &walk, ops); crypto_aegis128_aesni_final(&state, tag_xor, req->assoclen, cryptlen); kernel_fpu_end(); diff --git a/arch/x86/crypto/aegis128l-aesni-glue.c b/arch/x86/crypto/aegis128l-aesni-glue.c index dbe8bb980da15c46cb3c815d0ab529cd2522a346..1b1b39c66c5e2c907dbbaa84e6e58cc77fd34899 100644 --- a/arch/x86/crypto/aegis128l-aesni-glue.c +++ b/arch/x86/crypto/aegis128l-aesni-glue.c @@ -119,31 +119,20 @@ static void crypto_aegis128l_aesni_process_ad( } static void crypto_aegis128l_aesni_process_crypt( - struct aegis_state *state, struct aead_request *req, + struct aegis_state *state, struct skcipher_walk *walk, const struct aegis_crypt_ops *ops) { - struct skcipher_walk walk; - u8 *src, *dst; - unsigned int chunksize, base; - - ops->skcipher_walk_init(&walk, req, false); - - while (walk.nbytes) { - src = walk.src.virt.addr; - dst = walk.dst.virt.addr; - chunksize = walk.nbytes; - - ops->crypt_blocks(state, chunksize, src, dst); - - base = chunksize & ~(AEGIS128L_BLOCK_SIZE - 1); - src += base; - dst += base; - chunksize &= AEGIS128L_BLOCK_SIZE - 1; - - if (chunksize > 0) - ops->crypt_tail(state, chunksize, src, dst); + while (walk->nbytes >= AEGIS128L_BLOCK_SIZE) { + ops->crypt_blocks(state, round_down(walk->nbytes, + AEGIS128L_BLOCK_SIZE), + walk->src.virt.addr, walk->dst.virt.addr); + skcipher_walk_done(walk, walk->nbytes % AEGIS128L_BLOCK_SIZE); + } - skcipher_walk_done(&walk, 0); + if (walk->nbytes) { + ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr, + walk->dst.virt.addr); + skcipher_walk_done(walk, 0); } } @@ -186,13 +175,16 @@ static void crypto_aegis128l_aesni_crypt(struct aead_request *req, { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aegis_ctx *ctx = crypto_aegis128l_aesni_ctx(tfm); + struct skcipher_walk walk; struct aegis_state state; + ops->skcipher_walk_init(&walk, req, true); + kernel_fpu_begin(); crypto_aegis128l_aesni_init(&state, ctx->key.bytes, req->iv); crypto_aegis128l_aesni_process_ad(&state, req->src, req->assoclen); - crypto_aegis128l_aesni_process_crypt(&state, req, ops); + crypto_aegis128l_aesni_process_crypt(&state, &walk, ops); crypto_aegis128l_aesni_final(&state, tag_xor, req->assoclen, cryptlen); kernel_fpu_end(); diff --git a/arch/x86/crypto/aegis256-aesni-glue.c b/arch/x86/crypto/aegis256-aesni-glue.c index 8bebda2de92fe3f4453cb453596faeb4dfbfd6bb..6227ca3220a05a0d40c3558c3349fefdeb1ccafb 100644 --- a/arch/x86/crypto/aegis256-aesni-glue.c +++ b/arch/x86/crypto/aegis256-aesni-glue.c @@ -119,31 +119,20 @@ static void crypto_aegis256_aesni_process_ad( } static void crypto_aegis256_aesni_process_crypt( - struct aegis_state *state, struct aead_request *req, + struct aegis_state *state, struct skcipher_walk *walk, const struct aegis_crypt_ops *ops) { - struct skcipher_walk walk; - u8 *src, *dst; - unsigned int chunksize, base; - - ops->skcipher_walk_init(&walk, req, false); - - while (walk.nbytes) { - src = walk.src.virt.addr; - dst = walk.dst.virt.addr; - chunksize = walk.nbytes; - - ops->crypt_blocks(state, chunksize, src, dst); - - base = chunksize & ~(AEGIS256_BLOCK_SIZE - 1); - src += base; - dst += base; - chunksize &= AEGIS256_BLOCK_SIZE - 1; - - if (chunksize > 0) - ops->crypt_tail(state, chunksize, src, dst); + while (walk->nbytes >= AEGIS256_BLOCK_SIZE) { + ops->crypt_blocks(state, + round_down(walk->nbytes, AEGIS256_BLOCK_SIZE), + walk->src.virt.addr, walk->dst.virt.addr); + skcipher_walk_done(walk, walk->nbytes % AEGIS256_BLOCK_SIZE); + } - skcipher_walk_done(&walk, 0); + if (walk->nbytes) { + ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr, + walk->dst.virt.addr); + skcipher_walk_done(walk, 0); } } @@ -186,13 +175,16 @@ static void crypto_aegis256_aesni_crypt(struct aead_request *req, { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aegis_ctx *ctx = crypto_aegis256_aesni_ctx(tfm); + struct skcipher_walk walk; struct aegis_state state; + ops->skcipher_walk_init(&walk, req, true); + kernel_fpu_begin(); crypto_aegis256_aesni_init(&state, ctx->key, req->iv); crypto_aegis256_aesni_process_ad(&state, req->src, req->assoclen); - crypto_aegis256_aesni_process_crypt(&state, req, ops); + crypto_aegis256_aesni_process_crypt(&state, &walk, ops); crypto_aegis256_aesni_final(&state, tag_xor, req->assoclen, cryptlen); kernel_fpu_end(); diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index acbe7e8336d8556c272f915c7543d02132a852d3..917f25e4d0a80411f5b24c082daca3b0d27e63cc 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -817,7 +817,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, /* Linearize assoc, if not already linear */ if (req->src->length >= assoclen && req->src->length && (!PageHighMem(sg_page(req->src)) || - req->src->offset + req->src->length < PAGE_SIZE)) { + req->src->offset + req->src->length <= PAGE_SIZE)) { scatterwalk_start(&assoc_sg_walk, req->src); assoc = scatterwalk_map(&assoc_sg_walk); } else { @@ -830,11 +830,14 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0); } - src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen); - scatterwalk_start(&src_sg_walk, src_sg); - if (req->src != req->dst) { - dst_sg = scatterwalk_ffwd(dst_start, req->dst, req->assoclen); - scatterwalk_start(&dst_sg_walk, dst_sg); + if (left) { + src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen); + scatterwalk_start(&src_sg_walk, src_sg); + if (req->src != req->dst) { + dst_sg = scatterwalk_ffwd(dst_start, req->dst, + req->assoclen); + scatterwalk_start(&dst_sg_walk, dst_sg); + } } kernel_fpu_begin(); diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c index 5773e11610723f74fc4f99d44a53d6ef8402d050..48e20da286dfbece3a1a55df908d1592f8574d19 100644 --- a/arch/x86/crypto/crc32c-intel_glue.c +++ b/arch/x86/crypto/crc32c-intel_glue.c @@ -242,8 +242,15 @@ MODULE_DEVICE_TABLE(x86cpu, crc32c_cpu_id); static int __init crc32c_intel_mod_init(void) { + struct cpuinfo_x86 *c = &boot_cpu_data; + if (!x86_match_cpu(crc32c_cpu_id)) return -ENODEV; + + if ((c->x86_vendor == X86_VENDOR_ZHAOXIN || c->x86_vendor == X86_VENDOR_CENTAUR) && + (c->x86 <= 7 && c->x86_model <= 59)) { + return -ENODEV; + } #ifdef CONFIG_X86_64 if (boot_cpu_has(X86_FEATURE_PCLMULQDQ)) { alg.update = crc32c_pcl_intel_update; diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c index cd4df93225014b6fc64e3020bf07c0600647fd7c..7bbfe7d35da7dd0cc8b637517bd99be7729b7041 100644 --- a/arch/x86/crypto/crct10dif-pclmul_glue.c +++ b/arch/x86/crypto/crct10dif-pclmul_glue.c @@ -76,15 +76,14 @@ static int chksum_final(struct shash_desc *desc, u8 *out) return 0; } -static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len, - u8 *out) +static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out) { if (irq_fpu_usable()) { kernel_fpu_begin(); - *(__u16 *)out = crc_t10dif_pcl(*crcp, data, len); + *(__u16 *)out = crc_t10dif_pcl(crc, data, len); kernel_fpu_end(); } else - *(__u16 *)out = crc_t10dif_generic(*crcp, data, len); + *(__u16 *)out = crc_t10dif_generic(crc, data, len); return 0; } @@ -93,15 +92,13 @@ static int chksum_finup(struct shash_desc *desc, const u8 *data, { struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); - return __chksum_finup(&ctx->crc, data, len, out); + return __chksum_finup(ctx->crc, data, len, out); } static int chksum_digest(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out) { - struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); - - return __chksum_finup(&ctx->crc, data, length, out); + return __chksum_finup(0, data, length, out); } static struct shash_alg alg = { diff --git a/arch/x86/crypto/morus1280_glue.c b/arch/x86/crypto/morus1280_glue.c index 0dccdda1eb3a1fd5cd6bc143d3287137e5e359ea..7e600f8bcdad83a3db04eb75579856e79cdd6d40 100644 --- a/arch/x86/crypto/morus1280_glue.c +++ b/arch/x86/crypto/morus1280_glue.c @@ -85,31 +85,20 @@ static void crypto_morus1280_glue_process_ad( static void crypto_morus1280_glue_process_crypt(struct morus1280_state *state, struct morus1280_ops ops, - struct aead_request *req) + struct skcipher_walk *walk) { - struct skcipher_walk walk; - u8 *cursor_src, *cursor_dst; - unsigned int chunksize, base; - - ops.skcipher_walk_init(&walk, req, false); - - while (walk.nbytes) { - cursor_src = walk.src.virt.addr; - cursor_dst = walk.dst.virt.addr; - chunksize = walk.nbytes; - - ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize); - - base = chunksize & ~(MORUS1280_BLOCK_SIZE - 1); - cursor_src += base; - cursor_dst += base; - chunksize &= MORUS1280_BLOCK_SIZE - 1; - - if (chunksize > 0) - ops.crypt_tail(state, cursor_src, cursor_dst, - chunksize); + while (walk->nbytes >= MORUS1280_BLOCK_SIZE) { + ops.crypt_blocks(state, walk->src.virt.addr, + walk->dst.virt.addr, + round_down(walk->nbytes, + MORUS1280_BLOCK_SIZE)); + skcipher_walk_done(walk, walk->nbytes % MORUS1280_BLOCK_SIZE); + } - skcipher_walk_done(&walk, 0); + if (walk->nbytes) { + ops.crypt_tail(state, walk->src.virt.addr, walk->dst.virt.addr, + walk->nbytes); + skcipher_walk_done(walk, 0); } } @@ -147,12 +136,15 @@ static void crypto_morus1280_glue_crypt(struct aead_request *req, struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct morus1280_ctx *ctx = crypto_aead_ctx(tfm); struct morus1280_state state; + struct skcipher_walk walk; + + ops.skcipher_walk_init(&walk, req, true); kernel_fpu_begin(); ctx->ops->init(&state, &ctx->key, req->iv); crypto_morus1280_glue_process_ad(&state, ctx->ops, req->src, req->assoclen); - crypto_morus1280_glue_process_crypt(&state, ops, req); + crypto_morus1280_glue_process_crypt(&state, ops, &walk); ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen); kernel_fpu_end(); diff --git a/arch/x86/crypto/morus640_glue.c b/arch/x86/crypto/morus640_glue.c index 7b58fe4d9bd1a2f403e7f6fa29168d76fdc07015..cb3a817320160cb206523bdbf3206da2f998a8a1 100644 --- a/arch/x86/crypto/morus640_glue.c +++ b/arch/x86/crypto/morus640_glue.c @@ -85,31 +85,19 @@ static void crypto_morus640_glue_process_ad( static void crypto_morus640_glue_process_crypt(struct morus640_state *state, struct morus640_ops ops, - struct aead_request *req) + struct skcipher_walk *walk) { - struct skcipher_walk walk; - u8 *cursor_src, *cursor_dst; - unsigned int chunksize, base; - - ops.skcipher_walk_init(&walk, req, false); - - while (walk.nbytes) { - cursor_src = walk.src.virt.addr; - cursor_dst = walk.dst.virt.addr; - chunksize = walk.nbytes; - - ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize); - - base = chunksize & ~(MORUS640_BLOCK_SIZE - 1); - cursor_src += base; - cursor_dst += base; - chunksize &= MORUS640_BLOCK_SIZE - 1; - - if (chunksize > 0) - ops.crypt_tail(state, cursor_src, cursor_dst, - chunksize); + while (walk->nbytes >= MORUS640_BLOCK_SIZE) { + ops.crypt_blocks(state, walk->src.virt.addr, + walk->dst.virt.addr, + round_down(walk->nbytes, MORUS640_BLOCK_SIZE)); + skcipher_walk_done(walk, walk->nbytes % MORUS640_BLOCK_SIZE); + } - skcipher_walk_done(&walk, 0); + if (walk->nbytes) { + ops.crypt_tail(state, walk->src.virt.addr, walk->dst.virt.addr, + walk->nbytes); + skcipher_walk_done(walk, 0); } } @@ -143,12 +131,15 @@ static void crypto_morus640_glue_crypt(struct aead_request *req, struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct morus640_ctx *ctx = crypto_aead_ctx(tfm); struct morus640_state state; + struct skcipher_walk walk; + + ops.skcipher_walk_init(&walk, req, true); kernel_fpu_begin(); ctx->ops->init(&state, &ctx->key, req->iv); crypto_morus640_glue_process_ad(&state, ctx->ops, req->src, req->assoclen); - crypto_morus640_glue_process_crypt(&state, ops, req); + crypto_morus640_glue_process_crypt(&state, ops, &walk); ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen); kernel_fpu_end(); diff --git a/arch/x86/crypto/poly1305-avx2-x86_64.S b/arch/x86/crypto/poly1305-avx2-x86_64.S index 3b6e70d085da89775317c8e2a560625ab4799e01..8457cdd47f751167a2321ebf063eb18bdb4ef8aa 100644 --- a/arch/x86/crypto/poly1305-avx2-x86_64.S +++ b/arch/x86/crypto/poly1305-avx2-x86_64.S @@ -323,6 +323,12 @@ ENTRY(poly1305_4block_avx2) vpaddq t2,t1,t1 vmovq t1x,d4 + # Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 -> + # h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small + # amount. Careful: we must not assume the carry bits 'd0 >> 26', + # 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit + # integers. It's true in a single-block implementation, but not here. + # d1 += d0 >> 26 mov d0,%rax shr $26,%rax @@ -361,16 +367,16 @@ ENTRY(poly1305_4block_avx2) # h0 += (d4 >> 26) * 5 mov d4,%rax shr $26,%rax - lea (%eax,%eax,4),%eax - add %eax,%ebx + lea (%rax,%rax,4),%rax + add %rax,%rbx # h4 = d4 & 0x3ffffff mov d4,%rax and $0x3ffffff,%eax mov %eax,h4 # h1 += h0 >> 26 - mov %ebx,%eax - shr $26,%eax + mov %rbx,%rax + shr $26,%rax add %eax,h1 # h0 = h0 & 0x3ffffff andl $0x3ffffff,%ebx diff --git a/arch/x86/crypto/poly1305-sse2-x86_64.S b/arch/x86/crypto/poly1305-sse2-x86_64.S index c88c670cb5fc6d4b6331ba18882fae34038400b4..5851c7418fb73241cd0f346a2d26f008865c5715 100644 --- a/arch/x86/crypto/poly1305-sse2-x86_64.S +++ b/arch/x86/crypto/poly1305-sse2-x86_64.S @@ -253,16 +253,16 @@ ENTRY(poly1305_block_sse2) # h0 += (d4 >> 26) * 5 mov d4,%rax shr $26,%rax - lea (%eax,%eax,4),%eax - add %eax,%ebx + lea (%rax,%rax,4),%rax + add %rax,%rbx # h4 = d4 & 0x3ffffff mov d4,%rax and $0x3ffffff,%eax mov %eax,h4 # h1 += h0 >> 26 - mov %ebx,%eax - shr $26,%eax + mov %rbx,%rax + shr $26,%rax add %eax,h1 # h0 = h0 & 0x3ffffff andl $0x3ffffff,%ebx @@ -520,6 +520,12 @@ ENTRY(poly1305_2block_sse2) paddq t2,t1 movq t1,d4 + # Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 -> + # h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small + # amount. Careful: we must not assume the carry bits 'd0 >> 26', + # 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit + # integers. It's true in a single-block implementation, but not here. + # d1 += d0 >> 26 mov d0,%rax shr $26,%rax @@ -558,16 +564,16 @@ ENTRY(poly1305_2block_sse2) # h0 += (d4 >> 26) * 5 mov d4,%rax shr $26,%rax - lea (%eax,%eax,4),%eax - add %eax,%ebx + lea (%rax,%rax,4),%rax + add %rax,%rbx # h4 = d4 & 0x3ffffff mov d4,%rax and $0x3ffffff,%eax mov %eax,h4 # h1 += h0 >> 26 - mov %ebx,%eax - shr $26,%eax + mov %rbx,%rax + shr $26,%rax add %eax,h1 # h0 = h0 & 0x3ffffff andl $0x3ffffff,%ebx diff --git a/arch/x86/crypto/sha1-mb/Makefile b/arch/x86/crypto/sha1-mb/Makefile deleted file mode 100644 index 815ded3ba90e57685dd7176c9f23c73a6bc0cabe..0000000000000000000000000000000000000000 --- a/arch/x86/crypto/sha1-mb/Makefile +++ /dev/null @@ -1,14 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# -# Arch-specific CryptoAPI modules. -# - -OBJECT_FILES_NON_STANDARD := y - -avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\ - $(comma)4)$(comma)%ymm2,yes,no) -ifeq ($(avx2_supported),yes) - obj-$(CONFIG_CRYPTO_SHA1_MB) += sha1-mb.o - sha1-mb-y := sha1_mb.o sha1_mb_mgr_flush_avx2.o \ - sha1_mb_mgr_init_avx2.o sha1_mb_mgr_submit_avx2.o sha1_x8_avx2.o -endif diff --git a/arch/x86/crypto/sha1-mb/sha1_mb.c b/arch/x86/crypto/sha1-mb/sha1_mb.c deleted file mode 100644 index b93805664c1dd0ad290e731bc145dfbcb33f5a1c..0000000000000000000000000000000000000000 --- a/arch/x86/crypto/sha1-mb/sha1_mb.c +++ /dev/null @@ -1,1011 +0,0 @@ -/* - * Multi buffer SHA1 algorithm Glue Code - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Tim Chen - * - * BSD LICENSE - * - * Copyright(c) 2014 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "sha1_mb_ctx.h" - -#define FLUSH_INTERVAL 1000 /* in usec */ - -static struct mcryptd_alg_state sha1_mb_alg_state; - -struct sha1_mb_ctx { - struct mcryptd_ahash *mcryptd_tfm; -}; - -static inline struct mcryptd_hash_request_ctx - *cast_hash_to_mcryptd_ctx(struct sha1_hash_ctx *hash_ctx) -{ - struct ahash_request *areq; - - areq = container_of((void *) hash_ctx, struct ahash_request, __ctx); - return container_of(areq, struct mcryptd_hash_request_ctx, areq); -} - -static inline struct ahash_request - *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx) -{ - return container_of((void *) ctx, struct ahash_request, __ctx); -} - -static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx, - struct ahash_request *areq) -{ - rctx->flag = HASH_UPDATE; -} - -static asmlinkage void (*sha1_job_mgr_init)(struct sha1_mb_mgr *state); -static asmlinkage struct job_sha1* (*sha1_job_mgr_submit) - (struct sha1_mb_mgr *state, struct job_sha1 *job); -static asmlinkage struct job_sha1* (*sha1_job_mgr_flush) - (struct sha1_mb_mgr *state); -static asmlinkage struct job_sha1* (*sha1_job_mgr_get_comp_job) - (struct sha1_mb_mgr *state); - -static inline uint32_t sha1_pad(uint8_t padblock[SHA1_BLOCK_SIZE * 2], - uint64_t total_len) -{ - uint32_t i = total_len & (SHA1_BLOCK_SIZE - 1); - - memset(&padblock[i], 0, SHA1_BLOCK_SIZE); - padblock[i] = 0x80; - - i += ((SHA1_BLOCK_SIZE - 1) & - (0 - (total_len + SHA1_PADLENGTHFIELD_SIZE + 1))) - + 1 + SHA1_PADLENGTHFIELD_SIZE; - -#if SHA1_PADLENGTHFIELD_SIZE == 16 - *((uint64_t *) &padblock[i - 16]) = 0; -#endif - - *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3); - - /* Number of extra blocks to hash */ - return i >> SHA1_LOG2_BLOCK_SIZE; -} - -static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr, - struct sha1_hash_ctx *ctx) -{ - while (ctx) { - if (ctx->status & HASH_CTX_STS_COMPLETE) { - /* Clear PROCESSING bit */ - ctx->status = HASH_CTX_STS_COMPLETE; - return ctx; - } - - /* - * If the extra blocks are empty, begin hashing what remains - * in the user's buffer. - */ - if (ctx->partial_block_buffer_length == 0 && - ctx->incoming_buffer_length) { - - const void *buffer = ctx->incoming_buffer; - uint32_t len = ctx->incoming_buffer_length; - uint32_t copy_len; - - /* - * Only entire blocks can be hashed. - * Copy remainder to extra blocks buffer. - */ - copy_len = len & (SHA1_BLOCK_SIZE-1); - - if (copy_len) { - len -= copy_len; - memcpy(ctx->partial_block_buffer, - ((const char *) buffer + len), - copy_len); - ctx->partial_block_buffer_length = copy_len; - } - - ctx->incoming_buffer_length = 0; - - /* len should be a multiple of the block size now */ - assert((len % SHA1_BLOCK_SIZE) == 0); - - /* Set len to the number of blocks to be hashed */ - len >>= SHA1_LOG2_BLOCK_SIZE; - - if (len) { - - ctx->job.buffer = (uint8_t *) buffer; - ctx->job.len = len; - ctx = (struct sha1_hash_ctx *)sha1_job_mgr_submit(&mgr->mgr, - &ctx->job); - continue; - } - } - - /* - * If the extra blocks are not empty, then we are - * either on the last block(s) or we need more - * user input before continuing. - */ - if (ctx->status & HASH_CTX_STS_LAST) { - - uint8_t *buf = ctx->partial_block_buffer; - uint32_t n_extra_blocks = - sha1_pad(buf, ctx->total_length); - - ctx->status = (HASH_CTX_STS_PROCESSING | - HASH_CTX_STS_COMPLETE); - ctx->job.buffer = buf; - ctx->job.len = (uint32_t) n_extra_blocks; - ctx = (struct sha1_hash_ctx *) - sha1_job_mgr_submit(&mgr->mgr, &ctx->job); - continue; - } - - ctx->status = HASH_CTX_STS_IDLE; - return ctx; - } - - return NULL; -} - -static struct sha1_hash_ctx - *sha1_ctx_mgr_get_comp_ctx(struct sha1_ctx_mgr *mgr) -{ - /* - * If get_comp_job returns NULL, there are no jobs complete. - * If get_comp_job returns a job, verify that it is safe to return to - * the user. - * If it is not ready, resubmit the job to finish processing. - * If sha1_ctx_mgr_resubmit returned a job, it is ready to be returned. - * Otherwise, all jobs currently being managed by the hash_ctx_mgr - * still need processing. - */ - struct sha1_hash_ctx *ctx; - - ctx = (struct sha1_hash_ctx *) sha1_job_mgr_get_comp_job(&mgr->mgr); - return sha1_ctx_mgr_resubmit(mgr, ctx); -} - -static void sha1_ctx_mgr_init(struct sha1_ctx_mgr *mgr) -{ - sha1_job_mgr_init(&mgr->mgr); -} - -static struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr, - struct sha1_hash_ctx *ctx, - const void *buffer, - uint32_t len, - int flags) -{ - if (flags & ~(HASH_UPDATE | HASH_LAST)) { - /* User should not pass anything other than UPDATE or LAST */ - ctx->error = HASH_CTX_ERROR_INVALID_FLAGS; - return ctx; - } - - if (ctx->status & HASH_CTX_STS_PROCESSING) { - /* Cannot submit to a currently processing job. */ - ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING; - return ctx; - } - - if (ctx->status & HASH_CTX_STS_COMPLETE) { - /* Cannot update a finished job. */ - ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED; - return ctx; - } - - /* - * If we made it here, there were no errors during this call to - * submit - */ - ctx->error = HASH_CTX_ERROR_NONE; - - /* Store buffer ptr info from user */ - ctx->incoming_buffer = buffer; - ctx->incoming_buffer_length = len; - - /* - * Store the user's request flags and mark this ctx as currently - * being processed. - */ - ctx->status = (flags & HASH_LAST) ? - (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) : - HASH_CTX_STS_PROCESSING; - - /* Advance byte counter */ - ctx->total_length += len; - - /* - * If there is anything currently buffered in the extra blocks, - * append to it until it contains a whole block. - * Or if the user's buffer contains less than a whole block, - * append as much as possible to the extra block. - */ - if (ctx->partial_block_buffer_length || len < SHA1_BLOCK_SIZE) { - /* - * Compute how many bytes to copy from user buffer into - * extra block - */ - uint32_t copy_len = SHA1_BLOCK_SIZE - - ctx->partial_block_buffer_length; - if (len < copy_len) - copy_len = len; - - if (copy_len) { - /* Copy and update relevant pointers and counters */ - memcpy(&ctx->partial_block_buffer[ctx->partial_block_buffer_length], - buffer, copy_len); - - ctx->partial_block_buffer_length += copy_len; - ctx->incoming_buffer = (const void *) - ((const char *)buffer + copy_len); - ctx->incoming_buffer_length = len - copy_len; - } - - /* - * The extra block should never contain more than 1 block - * here - */ - assert(ctx->partial_block_buffer_length <= SHA1_BLOCK_SIZE); - - /* - * If the extra block buffer contains exactly 1 block, it can - * be hashed. - */ - if (ctx->partial_block_buffer_length >= SHA1_BLOCK_SIZE) { - ctx->partial_block_buffer_length = 0; - - ctx->job.buffer = ctx->partial_block_buffer; - ctx->job.len = 1; - ctx = (struct sha1_hash_ctx *) - sha1_job_mgr_submit(&mgr->mgr, &ctx->job); - } - } - - return sha1_ctx_mgr_resubmit(mgr, ctx); -} - -static struct sha1_hash_ctx *sha1_ctx_mgr_flush(struct sha1_ctx_mgr *mgr) -{ - struct sha1_hash_ctx *ctx; - - while (1) { - ctx = (struct sha1_hash_ctx *) sha1_job_mgr_flush(&mgr->mgr); - - /* If flush returned 0, there are no more jobs in flight. */ - if (!ctx) - return NULL; - - /* - * If flush returned a job, resubmit the job to finish - * processing. - */ - ctx = sha1_ctx_mgr_resubmit(mgr, ctx); - - /* - * If sha1_ctx_mgr_resubmit returned a job, it is ready to be - * returned. Otherwise, all jobs currently being managed by the - * sha1_ctx_mgr still need processing. Loop. - */ - if (ctx) - return ctx; - } -} - -static int sha1_mb_init(struct ahash_request *areq) -{ - struct sha1_hash_ctx *sctx = ahash_request_ctx(areq); - - hash_ctx_init(sctx); - sctx->job.result_digest[0] = SHA1_H0; - sctx->job.result_digest[1] = SHA1_H1; - sctx->job.result_digest[2] = SHA1_H2; - sctx->job.result_digest[3] = SHA1_H3; - sctx->job.result_digest[4] = SHA1_H4; - sctx->total_length = 0; - sctx->partial_block_buffer_length = 0; - sctx->status = HASH_CTX_STS_IDLE; - - return 0; -} - -static int sha1_mb_set_results(struct mcryptd_hash_request_ctx *rctx) -{ - int i; - struct sha1_hash_ctx *sctx = ahash_request_ctx(&rctx->areq); - __be32 *dst = (__be32 *) rctx->out; - - for (i = 0; i < 5; ++i) - dst[i] = cpu_to_be32(sctx->job.result_digest[i]); - - return 0; -} - -static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx, - struct mcryptd_alg_cstate *cstate, bool flush) -{ - int flag = HASH_UPDATE; - int nbytes, err = 0; - struct mcryptd_hash_request_ctx *rctx = *ret_rctx; - struct sha1_hash_ctx *sha_ctx; - - /* more work ? */ - while (!(rctx->flag & HASH_DONE)) { - nbytes = crypto_ahash_walk_done(&rctx->walk, 0); - if (nbytes < 0) { - err = nbytes; - goto out; - } - /* check if the walk is done */ - if (crypto_ahash_walk_last(&rctx->walk)) { - rctx->flag |= HASH_DONE; - if (rctx->flag & HASH_FINAL) - flag |= HASH_LAST; - - } - sha_ctx = (struct sha1_hash_ctx *) - ahash_request_ctx(&rctx->areq); - kernel_fpu_begin(); - sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, - rctx->walk.data, nbytes, flag); - if (!sha_ctx) { - if (flush) - sha_ctx = sha1_ctx_mgr_flush(cstate->mgr); - } - kernel_fpu_end(); - if (sha_ctx) - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - else { - rctx = NULL; - goto out; - } - } - - /* copy the results */ - if (rctx->flag & HASH_FINAL) - sha1_mb_set_results(rctx); - -out: - *ret_rctx = rctx; - return err; -} - -static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx, - struct mcryptd_alg_cstate *cstate, - int err) -{ - struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx); - struct sha1_hash_ctx *sha_ctx; - struct mcryptd_hash_request_ctx *req_ctx; - int ret; - - /* remove from work list */ - spin_lock(&cstate->work_lock); - list_del(&rctx->waiter); - spin_unlock(&cstate->work_lock); - - if (irqs_disabled()) - rctx->complete(&req->base, err); - else { - local_bh_disable(); - rctx->complete(&req->base, err); - local_bh_enable(); - } - - /* check to see if there are other jobs that are done */ - sha_ctx = sha1_ctx_mgr_get_comp_ctx(cstate->mgr); - while (sha_ctx) { - req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx); - ret = sha_finish_walk(&req_ctx, cstate, false); - if (req_ctx) { - spin_lock(&cstate->work_lock); - list_del(&req_ctx->waiter); - spin_unlock(&cstate->work_lock); - - req = cast_mcryptd_ctx_to_req(req_ctx); - if (irqs_disabled()) - req_ctx->complete(&req->base, ret); - else { - local_bh_disable(); - req_ctx->complete(&req->base, ret); - local_bh_enable(); - } - } - sha_ctx = sha1_ctx_mgr_get_comp_ctx(cstate->mgr); - } - - return 0; -} - -static void sha1_mb_add_list(struct mcryptd_hash_request_ctx *rctx, - struct mcryptd_alg_cstate *cstate) -{ - unsigned long next_flush; - unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL); - - /* initialize tag */ - rctx->tag.arrival = jiffies; /* tag the arrival time */ - rctx->tag.seq_num = cstate->next_seq_num++; - next_flush = rctx->tag.arrival + delay; - rctx->tag.expire = next_flush; - - spin_lock(&cstate->work_lock); - list_add_tail(&rctx->waiter, &cstate->work_list); - spin_unlock(&cstate->work_lock); - - mcryptd_arm_flusher(cstate, delay); -} - -static int sha1_mb_update(struct ahash_request *areq) -{ - struct mcryptd_hash_request_ctx *rctx = - container_of(areq, struct mcryptd_hash_request_ctx, areq); - struct mcryptd_alg_cstate *cstate = - this_cpu_ptr(sha1_mb_alg_state.alg_cstate); - - struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx); - struct sha1_hash_ctx *sha_ctx; - int ret = 0, nbytes; - - - /* sanity check */ - if (rctx->tag.cpu != smp_processor_id()) { - pr_err("mcryptd error: cpu clash\n"); - goto done; - } - - /* need to init context */ - req_ctx_init(rctx, areq); - - nbytes = crypto_ahash_walk_first(req, &rctx->walk); - - if (nbytes < 0) { - ret = nbytes; - goto done; - } - - if (crypto_ahash_walk_last(&rctx->walk)) - rctx->flag |= HASH_DONE; - - /* submit */ - sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq); - sha1_mb_add_list(rctx, cstate); - kernel_fpu_begin(); - sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, - nbytes, HASH_UPDATE); - kernel_fpu_end(); - - /* check if anything is returned */ - if (!sha_ctx) - return -EINPROGRESS; - - if (sha_ctx->error) { - ret = sha_ctx->error; - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - goto done; - } - - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - ret = sha_finish_walk(&rctx, cstate, false); - - if (!rctx) - return -EINPROGRESS; -done: - sha_complete_job(rctx, cstate, ret); - return ret; -} - -static int sha1_mb_finup(struct ahash_request *areq) -{ - struct mcryptd_hash_request_ctx *rctx = - container_of(areq, struct mcryptd_hash_request_ctx, areq); - struct mcryptd_alg_cstate *cstate = - this_cpu_ptr(sha1_mb_alg_state.alg_cstate); - - struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx); - struct sha1_hash_ctx *sha_ctx; - int ret = 0, flag = HASH_UPDATE, nbytes; - - /* sanity check */ - if (rctx->tag.cpu != smp_processor_id()) { - pr_err("mcryptd error: cpu clash\n"); - goto done; - } - - /* need to init context */ - req_ctx_init(rctx, areq); - - nbytes = crypto_ahash_walk_first(req, &rctx->walk); - - if (nbytes < 0) { - ret = nbytes; - goto done; - } - - if (crypto_ahash_walk_last(&rctx->walk)) { - rctx->flag |= HASH_DONE; - flag = HASH_LAST; - } - - /* submit */ - rctx->flag |= HASH_FINAL; - sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq); - sha1_mb_add_list(rctx, cstate); - - kernel_fpu_begin(); - sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, - nbytes, flag); - kernel_fpu_end(); - - /* check if anything is returned */ - if (!sha_ctx) - return -EINPROGRESS; - - if (sha_ctx->error) { - ret = sha_ctx->error; - goto done; - } - - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - ret = sha_finish_walk(&rctx, cstate, false); - if (!rctx) - return -EINPROGRESS; -done: - sha_complete_job(rctx, cstate, ret); - return ret; -} - -static int sha1_mb_final(struct ahash_request *areq) -{ - struct mcryptd_hash_request_ctx *rctx = - container_of(areq, struct mcryptd_hash_request_ctx, areq); - struct mcryptd_alg_cstate *cstate = - this_cpu_ptr(sha1_mb_alg_state.alg_cstate); - - struct sha1_hash_ctx *sha_ctx; - int ret = 0; - u8 data; - - /* sanity check */ - if (rctx->tag.cpu != smp_processor_id()) { - pr_err("mcryptd error: cpu clash\n"); - goto done; - } - - /* need to init context */ - req_ctx_init(rctx, areq); - - rctx->flag |= HASH_DONE | HASH_FINAL; - - sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq); - /* flag HASH_FINAL and 0 data size */ - sha1_mb_add_list(rctx, cstate); - kernel_fpu_begin(); - sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0, - HASH_LAST); - kernel_fpu_end(); - - /* check if anything is returned */ - if (!sha_ctx) - return -EINPROGRESS; - - if (sha_ctx->error) { - ret = sha_ctx->error; - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - goto done; - } - - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - ret = sha_finish_walk(&rctx, cstate, false); - if (!rctx) - return -EINPROGRESS; -done: - sha_complete_job(rctx, cstate, ret); - return ret; -} - -static int sha1_mb_export(struct ahash_request *areq, void *out) -{ - struct sha1_hash_ctx *sctx = ahash_request_ctx(areq); - - memcpy(out, sctx, sizeof(*sctx)); - - return 0; -} - -static int sha1_mb_import(struct ahash_request *areq, const void *in) -{ - struct sha1_hash_ctx *sctx = ahash_request_ctx(areq); - - memcpy(sctx, in, sizeof(*sctx)); - - return 0; -} - -static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm) -{ - struct mcryptd_ahash *mcryptd_tfm; - struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm); - struct mcryptd_hash_ctx *mctx; - - mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha1-mb", - CRYPTO_ALG_INTERNAL, - CRYPTO_ALG_INTERNAL); - if (IS_ERR(mcryptd_tfm)) - return PTR_ERR(mcryptd_tfm); - mctx = crypto_ahash_ctx(&mcryptd_tfm->base); - mctx->alg_state = &sha1_mb_alg_state; - ctx->mcryptd_tfm = mcryptd_tfm; - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct ahash_request) + - crypto_ahash_reqsize(&mcryptd_tfm->base)); - - return 0; -} - -static void sha1_mb_async_exit_tfm(struct crypto_tfm *tfm) -{ - struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm); - - mcryptd_free_ahash(ctx->mcryptd_tfm); -} - -static int sha1_mb_areq_init_tfm(struct crypto_tfm *tfm) -{ - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct ahash_request) + - sizeof(struct sha1_hash_ctx)); - - return 0; -} - -static void sha1_mb_areq_exit_tfm(struct crypto_tfm *tfm) -{ - struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm); - - mcryptd_free_ahash(ctx->mcryptd_tfm); -} - -static struct ahash_alg sha1_mb_areq_alg = { - .init = sha1_mb_init, - .update = sha1_mb_update, - .final = sha1_mb_final, - .finup = sha1_mb_finup, - .export = sha1_mb_export, - .import = sha1_mb_import, - .halg = { - .digestsize = SHA1_DIGEST_SIZE, - .statesize = sizeof(struct sha1_hash_ctx), - .base = { - .cra_name = "__sha1-mb", - .cra_driver_name = "__intel_sha1-mb", - .cra_priority = 100, - /* - * use ASYNC flag as some buffers in multi-buffer - * algo may not have completed before hashing thread - * sleep - */ - .cra_flags = CRYPTO_ALG_ASYNC | - CRYPTO_ALG_INTERNAL, - .cra_blocksize = SHA1_BLOCK_SIZE, - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT - (sha1_mb_areq_alg.halg.base.cra_list), - .cra_init = sha1_mb_areq_init_tfm, - .cra_exit = sha1_mb_areq_exit_tfm, - .cra_ctxsize = sizeof(struct sha1_hash_ctx), - } - } -}; - -static int sha1_mb_async_init(struct ahash_request *req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - return crypto_ahash_init(mcryptd_req); -} - -static int sha1_mb_async_update(struct ahash_request *req) -{ - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - return crypto_ahash_update(mcryptd_req); -} - -static int sha1_mb_async_finup(struct ahash_request *req) -{ - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - return crypto_ahash_finup(mcryptd_req); -} - -static int sha1_mb_async_final(struct ahash_request *req) -{ - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - return crypto_ahash_final(mcryptd_req); -} - -static int sha1_mb_async_digest(struct ahash_request *req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - return crypto_ahash_digest(mcryptd_req); -} - -static int sha1_mb_async_export(struct ahash_request *req, void *out) -{ - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - return crypto_ahash_export(mcryptd_req, out); -} - -static int sha1_mb_async_import(struct ahash_request *req, const void *in) -{ - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm); - struct mcryptd_hash_request_ctx *rctx; - struct ahash_request *areq; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - rctx = ahash_request_ctx(mcryptd_req); - areq = &rctx->areq; - - ahash_request_set_tfm(areq, child); - ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP, - rctx->complete, req); - - return crypto_ahash_import(mcryptd_req, in); -} - -static struct ahash_alg sha1_mb_async_alg = { - .init = sha1_mb_async_init, - .update = sha1_mb_async_update, - .final = sha1_mb_async_final, - .finup = sha1_mb_async_finup, - .digest = sha1_mb_async_digest, - .export = sha1_mb_async_export, - .import = sha1_mb_async_import, - .halg = { - .digestsize = SHA1_DIGEST_SIZE, - .statesize = sizeof(struct sha1_hash_ctx), - .base = { - .cra_name = "sha1", - .cra_driver_name = "sha1_mb", - /* - * Low priority, since with few concurrent hash requests - * this is extremely slow due to the flush delay. Users - * whose workloads would benefit from this can request - * it explicitly by driver name, or can increase its - * priority at runtime using NETLINK_CRYPTO. - */ - .cra_priority = 50, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA1_BLOCK_SIZE, - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(sha1_mb_async_alg.halg.base.cra_list), - .cra_init = sha1_mb_async_init_tfm, - .cra_exit = sha1_mb_async_exit_tfm, - .cra_ctxsize = sizeof(struct sha1_mb_ctx), - .cra_alignmask = 0, - }, - }, -}; - -static unsigned long sha1_mb_flusher(struct mcryptd_alg_cstate *cstate) -{ - struct mcryptd_hash_request_ctx *rctx; - unsigned long cur_time; - unsigned long next_flush = 0; - struct sha1_hash_ctx *sha_ctx; - - - cur_time = jiffies; - - while (!list_empty(&cstate->work_list)) { - rctx = list_entry(cstate->work_list.next, - struct mcryptd_hash_request_ctx, waiter); - if (time_before(cur_time, rctx->tag.expire)) - break; - kernel_fpu_begin(); - sha_ctx = (struct sha1_hash_ctx *) - sha1_ctx_mgr_flush(cstate->mgr); - kernel_fpu_end(); - if (!sha_ctx) { - pr_err("sha1_mb error: nothing got flushed for non-empty list\n"); - break; - } - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - sha_finish_walk(&rctx, cstate, true); - sha_complete_job(rctx, cstate, 0); - } - - if (!list_empty(&cstate->work_list)) { - rctx = list_entry(cstate->work_list.next, - struct mcryptd_hash_request_ctx, waiter); - /* get the hash context and then flush time */ - next_flush = rctx->tag.expire; - mcryptd_arm_flusher(cstate, get_delay(next_flush)); - } - return next_flush; -} - -static int __init sha1_mb_mod_init(void) -{ - - int cpu; - int err; - struct mcryptd_alg_cstate *cpu_state; - - /* check for dependent cpu features */ - if (!boot_cpu_has(X86_FEATURE_AVX2) || - !boot_cpu_has(X86_FEATURE_BMI2)) - return -ENODEV; - - /* initialize multibuffer structures */ - sha1_mb_alg_state.alg_cstate = alloc_percpu(struct mcryptd_alg_cstate); - - sha1_job_mgr_init = sha1_mb_mgr_init_avx2; - sha1_job_mgr_submit = sha1_mb_mgr_submit_avx2; - sha1_job_mgr_flush = sha1_mb_mgr_flush_avx2; - sha1_job_mgr_get_comp_job = sha1_mb_mgr_get_comp_job_avx2; - - if (!sha1_mb_alg_state.alg_cstate) - return -ENOMEM; - for_each_possible_cpu(cpu) { - cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu); - cpu_state->next_flush = 0; - cpu_state->next_seq_num = 0; - cpu_state->flusher_engaged = false; - INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher); - cpu_state->cpu = cpu; - cpu_state->alg_state = &sha1_mb_alg_state; - cpu_state->mgr = kzalloc(sizeof(struct sha1_ctx_mgr), - GFP_KERNEL); - if (!cpu_state->mgr) - goto err2; - sha1_ctx_mgr_init(cpu_state->mgr); - INIT_LIST_HEAD(&cpu_state->work_list); - spin_lock_init(&cpu_state->work_lock); - } - sha1_mb_alg_state.flusher = &sha1_mb_flusher; - - err = crypto_register_ahash(&sha1_mb_areq_alg); - if (err) - goto err2; - err = crypto_register_ahash(&sha1_mb_async_alg); - if (err) - goto err1; - - - return 0; -err1: - crypto_unregister_ahash(&sha1_mb_areq_alg); -err2: - for_each_possible_cpu(cpu) { - cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu); - kfree(cpu_state->mgr); - } - free_percpu(sha1_mb_alg_state.alg_cstate); - return -ENODEV; -} - -static void __exit sha1_mb_mod_fini(void) -{ - int cpu; - struct mcryptd_alg_cstate *cpu_state; - - crypto_unregister_ahash(&sha1_mb_async_alg); - crypto_unregister_ahash(&sha1_mb_areq_alg); - for_each_possible_cpu(cpu) { - cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu); - kfree(cpu_state->mgr); - } - free_percpu(sha1_mb_alg_state.alg_cstate); -} - -module_init(sha1_mb_mod_init); -module_exit(sha1_mb_mod_fini); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, multi buffer accelerated"); - -MODULE_ALIAS_CRYPTO("sha1"); diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h b/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h deleted file mode 100644 index 9454bd16f9f816ba94d3a392ba5421df980f792e..0000000000000000000000000000000000000000 --- a/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Header file for multi buffer SHA context - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Tim Chen - * - * BSD LICENSE - * - * Copyright(c) 2014 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _SHA_MB_CTX_INTERNAL_H -#define _SHA_MB_CTX_INTERNAL_H - -#include "sha1_mb_mgr.h" - -#define HASH_UPDATE 0x00 -#define HASH_LAST 0x01 -#define HASH_DONE 0x02 -#define HASH_FINAL 0x04 - -#define HASH_CTX_STS_IDLE 0x00 -#define HASH_CTX_STS_PROCESSING 0x01 -#define HASH_CTX_STS_LAST 0x02 -#define HASH_CTX_STS_COMPLETE 0x04 - -enum hash_ctx_error { - HASH_CTX_ERROR_NONE = 0, - HASH_CTX_ERROR_INVALID_FLAGS = -1, - HASH_CTX_ERROR_ALREADY_PROCESSING = -2, - HASH_CTX_ERROR_ALREADY_COMPLETED = -3, - -#ifdef HASH_CTX_DEBUG - HASH_CTX_ERROR_DEBUG_DIGEST_MISMATCH = -4, -#endif -}; - - -#define hash_ctx_user_data(ctx) ((ctx)->user_data) -#define hash_ctx_digest(ctx) ((ctx)->job.result_digest) -#define hash_ctx_processing(ctx) ((ctx)->status & HASH_CTX_STS_PROCESSING) -#define hash_ctx_complete(ctx) ((ctx)->status == HASH_CTX_STS_COMPLETE) -#define hash_ctx_status(ctx) ((ctx)->status) -#define hash_ctx_error(ctx) ((ctx)->error) -#define hash_ctx_init(ctx) \ - do { \ - (ctx)->error = HASH_CTX_ERROR_NONE; \ - (ctx)->status = HASH_CTX_STS_COMPLETE; \ - } while (0) - - -/* Hash Constants and Typedefs */ -#define SHA1_DIGEST_LENGTH 5 -#define SHA1_LOG2_BLOCK_SIZE 6 - -#define SHA1_PADLENGTHFIELD_SIZE 8 - -#ifdef SHA_MB_DEBUG -#define assert(expr) \ -do { \ - if (unlikely(!(expr))) { \ - printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \ - #expr, __FILE__, __func__, __LINE__); \ - } \ -} while (0) -#else -#define assert(expr) do {} while (0) -#endif - -struct sha1_ctx_mgr { - struct sha1_mb_mgr mgr; -}; - -/* typedef struct sha1_ctx_mgr sha1_ctx_mgr; */ - -struct sha1_hash_ctx { - /* Must be at struct offset 0 */ - struct job_sha1 job; - /* status flag */ - int status; - /* error flag */ - int error; - - uint64_t total_length; - const void *incoming_buffer; - uint32_t incoming_buffer_length; - uint8_t partial_block_buffer[SHA1_BLOCK_SIZE * 2]; - uint32_t partial_block_buffer_length; - void *user_data; -}; - -#endif diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr.h b/arch/x86/crypto/sha1-mb/sha1_mb_mgr.h deleted file mode 100644 index 08ad1a9acfd727327c68519c17294d18394a3732..0000000000000000000000000000000000000000 --- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr.h +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Header file for multi buffer SHA1 algorithm manager - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * James Guilford - * Tim Chen - * - * BSD LICENSE - * - * Copyright(c) 2014 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef __SHA_MB_MGR_H -#define __SHA_MB_MGR_H - - -#include - -#define NUM_SHA1_DIGEST_WORDS 5 - -enum job_sts { STS_UNKNOWN = 0, - STS_BEING_PROCESSED = 1, - STS_COMPLETED = 2, - STS_INTERNAL_ERROR = 3, - STS_ERROR = 4 -}; - -struct job_sha1 { - u8 *buffer; - u32 len; - u32 result_digest[NUM_SHA1_DIGEST_WORDS] __aligned(32); - enum job_sts status; - void *user_data; -}; - -/* SHA1 out-of-order scheduler */ - -/* typedef uint32_t sha1_digest_array[5][8]; */ - -struct sha1_args_x8 { - uint32_t digest[5][8]; - uint8_t *data_ptr[8]; -}; - -struct sha1_lane_data { - struct job_sha1 *job_in_lane; -}; - -struct sha1_mb_mgr { - struct sha1_args_x8 args; - - uint32_t lens[8]; - - /* each byte is index (0...7) of unused lanes */ - uint64_t unused_lanes; - /* byte 4 is set to FF as a flag */ - struct sha1_lane_data ldata[8]; -}; - - -#define SHA1_MB_MGR_NUM_LANES_AVX2 8 - -void sha1_mb_mgr_init_avx2(struct sha1_mb_mgr *state); -struct job_sha1 *sha1_mb_mgr_submit_avx2(struct sha1_mb_mgr *state, - struct job_sha1 *job); -struct job_sha1 *sha1_mb_mgr_flush_avx2(struct sha1_mb_mgr *state); -struct job_sha1 *sha1_mb_mgr_get_comp_job_avx2(struct sha1_mb_mgr *state); - -#endif diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_datastruct.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_datastruct.S deleted file mode 100644 index 86688c6e7a25bb7841470f0439ffd583dd3c550b..0000000000000000000000000000000000000000 --- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_datastruct.S +++ /dev/null @@ -1,287 +0,0 @@ -/* - * Header file for multi buffer SHA1 algorithm data structure - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * James Guilford - * Tim Chen - * - * BSD LICENSE - * - * Copyright(c) 2014 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -# Macros for defining data structures - -# Usage example - -#START_FIELDS # JOB_AES -### name size align -#FIELD _plaintext, 8, 8 # pointer to plaintext -#FIELD _ciphertext, 8, 8 # pointer to ciphertext -#FIELD _IV, 16, 8 # IV -#FIELD _keys, 8, 8 # pointer to keys -#FIELD _len, 4, 4 # length in bytes -#FIELD _status, 4, 4 # status enumeration -#FIELD _user_data, 8, 8 # pointer to user data -#UNION _union, size1, align1, \ -# size2, align2, \ -# size3, align3, \ -# ... -#END_FIELDS -#%assign _JOB_AES_size _FIELD_OFFSET -#%assign _JOB_AES_align _STRUCT_ALIGN - -######################################################################### - -# Alternate "struc-like" syntax: -# STRUCT job_aes2 -# RES_Q .plaintext, 1 -# RES_Q .ciphertext, 1 -# RES_DQ .IV, 1 -# RES_B .nested, _JOB_AES_SIZE, _JOB_AES_ALIGN -# RES_U .union, size1, align1, \ -# size2, align2, \ -# ... -# ENDSTRUCT -# # Following only needed if nesting -# %assign job_aes2_size _FIELD_OFFSET -# %assign job_aes2_align _STRUCT_ALIGN -# -# RES_* macros take a name, a count and an optional alignment. -# The count in in terms of the base size of the macro, and the -# default alignment is the base size. -# The macros are: -# Macro Base size -# RES_B 1 -# RES_W 2 -# RES_D 4 -# RES_Q 8 -# RES_DQ 16 -# RES_Y 32 -# RES_Z 64 -# -# RES_U defines a union. It's arguments are a name and two or more -# pairs of "size, alignment" -# -# The two assigns are only needed if this structure is being nested -# within another. Even if the assigns are not done, one can still use -# STRUCT_NAME_size as the size of the structure. -# -# Note that for nesting, you still need to assign to STRUCT_NAME_size. -# -# The differences between this and using "struc" directly are that each -# type is implicitly aligned to its natural length (although this can be -# over-ridden with an explicit third parameter), and that the structure -# is padded at the end to its overall alignment. -# - -######################################################################### - -#ifndef _SHA1_MB_MGR_DATASTRUCT_ASM_ -#define _SHA1_MB_MGR_DATASTRUCT_ASM_ - -## START_FIELDS -.macro START_FIELDS - _FIELD_OFFSET = 0 - _STRUCT_ALIGN = 0 -.endm - -## FIELD name size align -.macro FIELD name size align - _FIELD_OFFSET = (_FIELD_OFFSET + (\align) - 1) & (~ ((\align)-1)) - \name = _FIELD_OFFSET - _FIELD_OFFSET = _FIELD_OFFSET + (\size) -.if (\align > _STRUCT_ALIGN) - _STRUCT_ALIGN = \align -.endif -.endm - -## END_FIELDS -.macro END_FIELDS - _FIELD_OFFSET = (_FIELD_OFFSET + _STRUCT_ALIGN-1) & (~ (_STRUCT_ALIGN-1)) -.endm - -######################################################################## - -.macro STRUCT p1 -START_FIELDS -.struc \p1 -.endm - -.macro ENDSTRUCT - tmp = _FIELD_OFFSET - END_FIELDS - tmp = (_FIELD_OFFSET - %%tmp) -.if (tmp > 0) - .lcomm tmp -.endif -.endstruc -.endm - -## RES_int name size align -.macro RES_int p1 p2 p3 - name = \p1 - size = \p2 - align = .\p3 - - _FIELD_OFFSET = (_FIELD_OFFSET + (align) - 1) & (~ ((align)-1)) -.align align -.lcomm name size - _FIELD_OFFSET = _FIELD_OFFSET + (size) -.if (align > _STRUCT_ALIGN) - _STRUCT_ALIGN = align -.endif -.endm - - - -# macro RES_B name, size [, align] -.macro RES_B _name, _size, _align=1 -RES_int _name _size _align -.endm - -# macro RES_W name, size [, align] -.macro RES_W _name, _size, _align=2 -RES_int _name 2*(_size) _align -.endm - -# macro RES_D name, size [, align] -.macro RES_D _name, _size, _align=4 -RES_int _name 4*(_size) _align -.endm - -# macro RES_Q name, size [, align] -.macro RES_Q _name, _size, _align=8 -RES_int _name 8*(_size) _align -.endm - -# macro RES_DQ name, size [, align] -.macro RES_DQ _name, _size, _align=16 -RES_int _name 16*(_size) _align -.endm - -# macro RES_Y name, size [, align] -.macro RES_Y _name, _size, _align=32 -RES_int _name 32*(_size) _align -.endm - -# macro RES_Z name, size [, align] -.macro RES_Z _name, _size, _align=64 -RES_int _name 64*(_size) _align -.endm - - -#endif - -######################################################################## -#### Define constants -######################################################################## - -######################################################################## -#### Define SHA1 Out Of Order Data Structures -######################################################################## - -START_FIELDS # LANE_DATA -### name size align -FIELD _job_in_lane, 8, 8 # pointer to job object -END_FIELDS - -_LANE_DATA_size = _FIELD_OFFSET -_LANE_DATA_align = _STRUCT_ALIGN - -######################################################################## - -START_FIELDS # SHA1_ARGS_X8 -### name size align -FIELD _digest, 4*5*8, 16 # transposed digest -FIELD _data_ptr, 8*8, 8 # array of pointers to data -END_FIELDS - -_SHA1_ARGS_X4_size = _FIELD_OFFSET -_SHA1_ARGS_X4_align = _STRUCT_ALIGN -_SHA1_ARGS_X8_size = _FIELD_OFFSET -_SHA1_ARGS_X8_align = _STRUCT_ALIGN - -######################################################################## - -START_FIELDS # MB_MGR -### name size align -FIELD _args, _SHA1_ARGS_X4_size, _SHA1_ARGS_X4_align -FIELD _lens, 4*8, 8 -FIELD _unused_lanes, 8, 8 -FIELD _ldata, _LANE_DATA_size*8, _LANE_DATA_align -END_FIELDS - -_MB_MGR_size = _FIELD_OFFSET -_MB_MGR_align = _STRUCT_ALIGN - -_args_digest = _args + _digest -_args_data_ptr = _args + _data_ptr - - -######################################################################## -#### Define constants -######################################################################## - -#define STS_UNKNOWN 0 -#define STS_BEING_PROCESSED 1 -#define STS_COMPLETED 2 - -######################################################################## -#### Define JOB_SHA1 structure -######################################################################## - -START_FIELDS # JOB_SHA1 - -### name size align -FIELD _buffer, 8, 8 # pointer to buffer -FIELD _len, 4, 4 # length in bytes -FIELD _result_digest, 5*4, 32 # Digest (output) -FIELD _status, 4, 4 -FIELD _user_data, 8, 8 -END_FIELDS - -_JOB_SHA1_size = _FIELD_OFFSET -_JOB_SHA1_align = _STRUCT_ALIGN diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S deleted file mode 100644 index 7cfba738f104f52d27aa4f94a867f3e99e6c5f3f..0000000000000000000000000000000000000000 --- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S +++ /dev/null @@ -1,304 +0,0 @@ -/* - * Flush routine for SHA1 multibuffer - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * James Guilford - * Tim Chen - * - * BSD LICENSE - * - * Copyright(c) 2014 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#include -#include -#include "sha1_mb_mgr_datastruct.S" - - -.extern sha1_x8_avx2 - -# LINUX register definitions -#define arg1 %rdi -#define arg2 %rsi - -# Common definitions -#define state arg1 -#define job arg2 -#define len2 arg2 - -# idx must be a register not clobbered by sha1_x8_avx2 -#define idx %r8 -#define DWORD_idx %r8d - -#define unused_lanes %rbx -#define lane_data %rbx -#define tmp2 %rbx -#define tmp2_w %ebx - -#define job_rax %rax -#define tmp1 %rax -#define size_offset %rax -#define tmp %rax -#define start_offset %rax - -#define tmp3 %arg1 - -#define extra_blocks %arg2 -#define p %arg2 - -.macro LABEL prefix n -\prefix\n\(): -.endm - -.macro JNE_SKIP i -jne skip_\i -.endm - -.altmacro -.macro SET_OFFSET _offset -offset = \_offset -.endm -.noaltmacro - -# JOB* sha1_mb_mgr_flush_avx2(MB_MGR *state) -# arg 1 : rcx : state -ENTRY(sha1_mb_mgr_flush_avx2) - FRAME_BEGIN - push %rbx - - # If bit (32+3) is set, then all lanes are empty - mov _unused_lanes(state), unused_lanes - bt $32+3, unused_lanes - jc return_null - - # find a lane with a non-null job - xor idx, idx - offset = (_ldata + 1 * _LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) - cmovne one(%rip), idx - offset = (_ldata + 2 * _LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) - cmovne two(%rip), idx - offset = (_ldata + 3 * _LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) - cmovne three(%rip), idx - offset = (_ldata + 4 * _LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) - cmovne four(%rip), idx - offset = (_ldata + 5 * _LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) - cmovne five(%rip), idx - offset = (_ldata + 6 * _LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) - cmovne six(%rip), idx - offset = (_ldata + 7 * _LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) - cmovne seven(%rip), idx - - # copy idx to empty lanes -copy_lane_data: - offset = (_args + _data_ptr) - mov offset(state,idx,8), tmp - - I = 0 -.rep 8 - offset = (_ldata + I * _LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) -.altmacro - JNE_SKIP %I - offset = (_args + _data_ptr + 8*I) - mov tmp, offset(state) - offset = (_lens + 4*I) - movl $0xFFFFFFFF, offset(state) -LABEL skip_ %I - I = (I+1) -.noaltmacro -.endr - - # Find min length - vmovdqu _lens+0*16(state), %xmm0 - vmovdqu _lens+1*16(state), %xmm1 - - vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A} - vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C} - vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F} - vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E} - vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min value in low dword - - vmovd %xmm2, DWORD_idx - mov idx, len2 - and $0xF, idx - shr $4, len2 - jz len_is_0 - - vpand clear_low_nibble(%rip), %xmm2, %xmm2 - vpshufd $0, %xmm2, %xmm2 - - vpsubd %xmm2, %xmm0, %xmm0 - vpsubd %xmm2, %xmm1, %xmm1 - - vmovdqu %xmm0, _lens+0*16(state) - vmovdqu %xmm1, _lens+1*16(state) - - # "state" and "args" are the same address, arg1 - # len is arg2 - call sha1_x8_avx2 - # state and idx are intact - - -len_is_0: - # process completed job "idx" - imul $_LANE_DATA_size, idx, lane_data - lea _ldata(state, lane_data), lane_data - - mov _job_in_lane(lane_data), job_rax - movq $0, _job_in_lane(lane_data) - movl $STS_COMPLETED, _status(job_rax) - mov _unused_lanes(state), unused_lanes - shl $4, unused_lanes - or idx, unused_lanes - mov unused_lanes, _unused_lanes(state) - - movl $0xFFFFFFFF, _lens(state, idx, 4) - - vmovd _args_digest(state , idx, 4) , %xmm0 - vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0 - vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0 - vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0 - movl _args_digest+4*32(state, idx, 4), tmp2_w - - vmovdqu %xmm0, _result_digest(job_rax) - offset = (_result_digest + 1*16) - mov tmp2_w, offset(job_rax) - -return: - pop %rbx - FRAME_END - ret - -return_null: - xor job_rax, job_rax - jmp return -ENDPROC(sha1_mb_mgr_flush_avx2) - - -################################################################# - -.align 16 -ENTRY(sha1_mb_mgr_get_comp_job_avx2) - push %rbx - - ## if bit 32+3 is set, then all lanes are empty - mov _unused_lanes(state), unused_lanes - bt $(32+3), unused_lanes - jc .return_null - - # Find min length - vmovdqu _lens(state), %xmm0 - vmovdqu _lens+1*16(state), %xmm1 - - vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A} - vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C} - vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F} - vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E} - vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min value in low dword - - vmovd %xmm2, DWORD_idx - test $~0xF, idx - jnz .return_null - - # process completed job "idx" - imul $_LANE_DATA_size, idx, lane_data - lea _ldata(state, lane_data), lane_data - - mov _job_in_lane(lane_data), job_rax - movq $0, _job_in_lane(lane_data) - movl $STS_COMPLETED, _status(job_rax) - mov _unused_lanes(state), unused_lanes - shl $4, unused_lanes - or idx, unused_lanes - mov unused_lanes, _unused_lanes(state) - - movl $0xFFFFFFFF, _lens(state, idx, 4) - - vmovd _args_digest(state, idx, 4), %xmm0 - vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0 - vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0 - vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0 - movl _args_digest+4*32(state, idx, 4), tmp2_w - - vmovdqu %xmm0, _result_digest(job_rax) - movl tmp2_w, _result_digest+1*16(job_rax) - - pop %rbx - - ret - -.return_null: - xor job_rax, job_rax - pop %rbx - ret -ENDPROC(sha1_mb_mgr_get_comp_job_avx2) - -.section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16 -.align 16 -clear_low_nibble: -.octa 0x000000000000000000000000FFFFFFF0 - -.section .rodata.cst8, "aM", @progbits, 8 -.align 8 -one: -.quad 1 -two: -.quad 2 -three: -.quad 3 -four: -.quad 4 -five: -.quad 5 -six: -.quad 6 -seven: -.quad 7 diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_init_avx2.c b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_init_avx2.c deleted file mode 100644 index d2add0d35f43bb8d5c1a2fb53205ca392675a09b..0000000000000000000000000000000000000000 --- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_init_avx2.c +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Initialization code for multi buffer SHA1 algorithm for AVX2 - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Tim Chen - * - * BSD LICENSE - * - * Copyright(c) 2014 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "sha1_mb_mgr.h" - -void sha1_mb_mgr_init_avx2(struct sha1_mb_mgr *state) -{ - unsigned int j; - state->unused_lanes = 0xF76543210ULL; - for (j = 0; j < 8; j++) { - state->lens[j] = 0xFFFFFFFF; - state->ldata[j].job_in_lane = NULL; - } -} diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S deleted file mode 100644 index 7a93b1c0d69ab7c7b2160ab9a67de5b4918b5a91..0000000000000000000000000000000000000000 --- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S +++ /dev/null @@ -1,209 +0,0 @@ -/* - * Buffer submit code for multi buffer SHA1 algorithm - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * James Guilford - * Tim Chen - * - * BSD LICENSE - * - * Copyright(c) 2014 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include "sha1_mb_mgr_datastruct.S" - - -.extern sha1_x8_avx - -# LINUX register definitions -arg1 = %rdi -arg2 = %rsi -size_offset = %rcx -tmp2 = %rcx -extra_blocks = %rdx - -# Common definitions -#define state arg1 -#define job %rsi -#define len2 arg2 -#define p2 arg2 - -# idx must be a register not clobberred by sha1_x8_avx2 -idx = %r8 -DWORD_idx = %r8d -last_len = %r8 - -p = %r11 -start_offset = %r11 - -unused_lanes = %rbx -BYTE_unused_lanes = %bl - -job_rax = %rax -len = %rax -DWORD_len = %eax - -lane = %r12 -tmp3 = %r12 - -tmp = %r9 -DWORD_tmp = %r9d - -lane_data = %r10 - -# JOB* submit_mb_mgr_submit_avx2(MB_MGR *state, job_sha1 *job) -# arg 1 : rcx : state -# arg 2 : rdx : job -ENTRY(sha1_mb_mgr_submit_avx2) - FRAME_BEGIN - push %rbx - push %r12 - - mov _unused_lanes(state), unused_lanes - mov unused_lanes, lane - and $0xF, lane - shr $4, unused_lanes - imul $_LANE_DATA_size, lane, lane_data - movl $STS_BEING_PROCESSED, _status(job) - lea _ldata(state, lane_data), lane_data - mov unused_lanes, _unused_lanes(state) - movl _len(job), DWORD_len - - mov job, _job_in_lane(lane_data) - shl $4, len - or lane, len - - movl DWORD_len, _lens(state , lane, 4) - - # Load digest words from result_digest - vmovdqu _result_digest(job), %xmm0 - mov _result_digest+1*16(job), DWORD_tmp - vmovd %xmm0, _args_digest(state, lane, 4) - vpextrd $1, %xmm0, _args_digest+1*32(state , lane, 4) - vpextrd $2, %xmm0, _args_digest+2*32(state , lane, 4) - vpextrd $3, %xmm0, _args_digest+3*32(state , lane, 4) - movl DWORD_tmp, _args_digest+4*32(state , lane, 4) - - mov _buffer(job), p - mov p, _args_data_ptr(state, lane, 8) - - cmp $0xF, unused_lanes - jne return_null - -start_loop: - # Find min length - vmovdqa _lens(state), %xmm0 - vmovdqa _lens+1*16(state), %xmm1 - - vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A} - vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C} - vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F} - vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E} - vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min value in low dword - - vmovd %xmm2, DWORD_idx - mov idx, len2 - and $0xF, idx - shr $4, len2 - jz len_is_0 - - vpand clear_low_nibble(%rip), %xmm2, %xmm2 - vpshufd $0, %xmm2, %xmm2 - - vpsubd %xmm2, %xmm0, %xmm0 - vpsubd %xmm2, %xmm1, %xmm1 - - vmovdqa %xmm0, _lens + 0*16(state) - vmovdqa %xmm1, _lens + 1*16(state) - - - # "state" and "args" are the same address, arg1 - # len is arg2 - call sha1_x8_avx2 - - # state and idx are intact - -len_is_0: - # process completed job "idx" - imul $_LANE_DATA_size, idx, lane_data - lea _ldata(state, lane_data), lane_data - - mov _job_in_lane(lane_data), job_rax - mov _unused_lanes(state), unused_lanes - movq $0, _job_in_lane(lane_data) - movl $STS_COMPLETED, _status(job_rax) - shl $4, unused_lanes - or idx, unused_lanes - mov unused_lanes, _unused_lanes(state) - - movl $0xFFFFFFFF, _lens(state, idx, 4) - - vmovd _args_digest(state, idx, 4), %xmm0 - vpinsrd $1, _args_digest+1*32(state , idx, 4), %xmm0, %xmm0 - vpinsrd $2, _args_digest+2*32(state , idx, 4), %xmm0, %xmm0 - vpinsrd $3, _args_digest+3*32(state , idx, 4), %xmm0, %xmm0 - movl _args_digest+4*32(state, idx, 4), DWORD_tmp - - vmovdqu %xmm0, _result_digest(job_rax) - movl DWORD_tmp, _result_digest+1*16(job_rax) - -return: - pop %r12 - pop %rbx - FRAME_END - ret - -return_null: - xor job_rax, job_rax - jmp return - -ENDPROC(sha1_mb_mgr_submit_avx2) - -.section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16 -.align 16 -clear_low_nibble: - .octa 0x000000000000000000000000FFFFFFF0 diff --git a/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S b/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S deleted file mode 100644 index 20f77aa633dee6e20ce8d37e121db1ff44f08143..0000000000000000000000000000000000000000 --- a/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S +++ /dev/null @@ -1,492 +0,0 @@ -/* - * Multi-buffer SHA1 algorithm hash compute routine - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * James Guilford - * Tim Chen - * - * BSD LICENSE - * - * Copyright(c) 2014 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include "sha1_mb_mgr_datastruct.S" - -## code to compute oct SHA1 using SSE-256 -## outer calling routine takes care of save and restore of XMM registers - -## Function clobbers: rax, rcx, rdx, rbx, rsi, rdi, r9-r15# ymm0-15 -## -## Linux clobbers: rax rbx rcx rdx rsi r9 r10 r11 r12 r13 r14 r15 -## Linux preserves: rdi rbp r8 -## -## clobbers ymm0-15 - - -# TRANSPOSE8 r0, r1, r2, r3, r4, r5, r6, r7, t0, t1 -# "transpose" data in {r0...r7} using temps {t0...t1} -# Input looks like: {r0 r1 r2 r3 r4 r5 r6 r7} -# r0 = {a7 a6 a5 a4 a3 a2 a1 a0} -# r1 = {b7 b6 b5 b4 b3 b2 b1 b0} -# r2 = {c7 c6 c5 c4 c3 c2 c1 c0} -# r3 = {d7 d6 d5 d4 d3 d2 d1 d0} -# r4 = {e7 e6 e5 e4 e3 e2 e1 e0} -# r5 = {f7 f6 f5 f4 f3 f2 f1 f0} -# r6 = {g7 g6 g5 g4 g3 g2 g1 g0} -# r7 = {h7 h6 h5 h4 h3 h2 h1 h0} -# -# Output looks like: {r0 r1 r2 r3 r4 r5 r6 r7} -# r0 = {h0 g0 f0 e0 d0 c0 b0 a0} -# r1 = {h1 g1 f1 e1 d1 c1 b1 a1} -# r2 = {h2 g2 f2 e2 d2 c2 b2 a2} -# r3 = {h3 g3 f3 e3 d3 c3 b3 a3} -# r4 = {h4 g4 f4 e4 d4 c4 b4 a4} -# r5 = {h5 g5 f5 e5 d5 c5 b5 a5} -# r6 = {h6 g6 f6 e6 d6 c6 b6 a6} -# r7 = {h7 g7 f7 e7 d7 c7 b7 a7} -# - -.macro TRANSPOSE8 r0 r1 r2 r3 r4 r5 r6 r7 t0 t1 - # process top half (r0..r3) {a...d} - vshufps $0x44, \r1, \r0, \t0 # t0 = {b5 b4 a5 a4 b1 b0 a1 a0} - vshufps $0xEE, \r1, \r0, \r0 # r0 = {b7 b6 a7 a6 b3 b2 a3 a2} - vshufps $0x44, \r3, \r2, \t1 # t1 = {d5 d4 c5 c4 d1 d0 c1 c0} - vshufps $0xEE, \r3, \r2, \r2 # r2 = {d7 d6 c7 c6 d3 d2 c3 c2} - vshufps $0xDD, \t1, \t0, \r3 # r3 = {d5 c5 b5 a5 d1 c1 b1 a1} - vshufps $0x88, \r2, \r0, \r1 # r1 = {d6 c6 b6 a6 d2 c2 b2 a2} - vshufps $0xDD, \r2, \r0, \r0 # r0 = {d7 c7 b7 a7 d3 c3 b3 a3} - vshufps $0x88, \t1, \t0, \t0 # t0 = {d4 c4 b4 a4 d0 c0 b0 a0} - - # use r2 in place of t0 - # process bottom half (r4..r7) {e...h} - vshufps $0x44, \r5, \r4, \r2 # r2 = {f5 f4 e5 e4 f1 f0 e1 e0} - vshufps $0xEE, \r5, \r4, \r4 # r4 = {f7 f6 e7 e6 f3 f2 e3 e2} - vshufps $0x44, \r7, \r6, \t1 # t1 = {h5 h4 g5 g4 h1 h0 g1 g0} - vshufps $0xEE, \r7, \r6, \r6 # r6 = {h7 h6 g7 g6 h3 h2 g3 g2} - vshufps $0xDD, \t1, \r2, \r7 # r7 = {h5 g5 f5 e5 h1 g1 f1 e1} - vshufps $0x88, \r6, \r4, \r5 # r5 = {h6 g6 f6 e6 h2 g2 f2 e2} - vshufps $0xDD, \r6, \r4, \r4 # r4 = {h7 g7 f7 e7 h3 g3 f3 e3} - vshufps $0x88, \t1, \r2, \t1 # t1 = {h4 g4 f4 e4 h0 g0 f0 e0} - - vperm2f128 $0x13, \r1, \r5, \r6 # h6...a6 - vperm2f128 $0x02, \r1, \r5, \r2 # h2...a2 - vperm2f128 $0x13, \r3, \r7, \r5 # h5...a5 - vperm2f128 $0x02, \r3, \r7, \r1 # h1...a1 - vperm2f128 $0x13, \r0, \r4, \r7 # h7...a7 - vperm2f128 $0x02, \r0, \r4, \r3 # h3...a3 - vperm2f128 $0x13, \t0, \t1, \r4 # h4...a4 - vperm2f128 $0x02, \t0, \t1, \r0 # h0...a0 - -.endm -## -## Magic functions defined in FIPS 180-1 -## -# macro MAGIC_F0 F,B,C,D,T ## F = (D ^ (B & (C ^ D))) -.macro MAGIC_F0 regF regB regC regD regT - vpxor \regD, \regC, \regF - vpand \regB, \regF, \regF - vpxor \regD, \regF, \regF -.endm - -# macro MAGIC_F1 F,B,C,D,T ## F = (B ^ C ^ D) -.macro MAGIC_F1 regF regB regC regD regT - vpxor \regC, \regD, \regF - vpxor \regB, \regF, \regF -.endm - -# macro MAGIC_F2 F,B,C,D,T ## F = ((B & C) | (B & D) | (C & D)) -.macro MAGIC_F2 regF regB regC regD regT - vpor \regC, \regB, \regF - vpand \regC, \regB, \regT - vpand \regD, \regF, \regF - vpor \regT, \regF, \regF -.endm - -# macro MAGIC_F3 F,B,C,D,T ## F = (B ^ C ^ D) -.macro MAGIC_F3 regF regB regC regD regT - MAGIC_F1 \regF,\regB,\regC,\regD,\regT -.endm - -# PROLD reg, imm, tmp -.macro PROLD reg imm tmp - vpsrld $(32-\imm), \reg, \tmp - vpslld $\imm, \reg, \reg - vpor \tmp, \reg, \reg -.endm - -.macro PROLD_nd reg imm tmp src - vpsrld $(32-\imm), \src, \tmp - vpslld $\imm, \src, \reg - vpor \tmp, \reg, \reg -.endm - -.macro SHA1_STEP_00_15 regA regB regC regD regE regT regF memW immCNT MAGIC - vpaddd \immCNT, \regE, \regE - vpaddd \memW*32(%rsp), \regE, \regE - PROLD_nd \regT, 5, \regF, \regA - vpaddd \regT, \regE, \regE - \MAGIC \regF, \regB, \regC, \regD, \regT - PROLD \regB, 30, \regT - vpaddd \regF, \regE, \regE -.endm - -.macro SHA1_STEP_16_79 regA regB regC regD regE regT regF memW immCNT MAGIC - vpaddd \immCNT, \regE, \regE - offset = ((\memW - 14) & 15) * 32 - vmovdqu offset(%rsp), W14 - vpxor W14, W16, W16 - offset = ((\memW - 8) & 15) * 32 - vpxor offset(%rsp), W16, W16 - offset = ((\memW - 3) & 15) * 32 - vpxor offset(%rsp), W16, W16 - vpsrld $(32-1), W16, \regF - vpslld $1, W16, W16 - vpor W16, \regF, \regF - - ROTATE_W - - offset = ((\memW - 0) & 15) * 32 - vmovdqu \regF, offset(%rsp) - vpaddd \regF, \regE, \regE - PROLD_nd \regT, 5, \regF, \regA - vpaddd \regT, \regE, \regE - \MAGIC \regF,\regB,\regC,\regD,\regT ## FUN = MAGIC_Fi(B,C,D) - PROLD \regB,30, \regT - vpaddd \regF, \regE, \regE -.endm - -######################################################################## -######################################################################## -######################################################################## - -## FRAMESZ plus pushes must be an odd multiple of 8 -YMM_SAVE = (15-15)*32 -FRAMESZ = 32*16 + YMM_SAVE -_YMM = FRAMESZ - YMM_SAVE - -#define VMOVPS vmovups - -IDX = %rax -inp0 = %r9 -inp1 = %r10 -inp2 = %r11 -inp3 = %r12 -inp4 = %r13 -inp5 = %r14 -inp6 = %r15 -inp7 = %rcx -arg1 = %rdi -arg2 = %rsi -RSP_SAVE = %rdx - -# ymm0 A -# ymm1 B -# ymm2 C -# ymm3 D -# ymm4 E -# ymm5 F AA -# ymm6 T0 BB -# ymm7 T1 CC -# ymm8 T2 DD -# ymm9 T3 EE -# ymm10 T4 TMP -# ymm11 T5 FUN -# ymm12 T6 K -# ymm13 T7 W14 -# ymm14 T8 W15 -# ymm15 T9 W16 - - -A = %ymm0 -B = %ymm1 -C = %ymm2 -D = %ymm3 -E = %ymm4 -F = %ymm5 -T0 = %ymm6 -T1 = %ymm7 -T2 = %ymm8 -T3 = %ymm9 -T4 = %ymm10 -T5 = %ymm11 -T6 = %ymm12 -T7 = %ymm13 -T8 = %ymm14 -T9 = %ymm15 - -AA = %ymm5 -BB = %ymm6 -CC = %ymm7 -DD = %ymm8 -EE = %ymm9 -TMP = %ymm10 -FUN = %ymm11 -K = %ymm12 -W14 = %ymm13 -W15 = %ymm14 -W16 = %ymm15 - -.macro ROTATE_ARGS - TMP_ = E - E = D - D = C - C = B - B = A - A = TMP_ -.endm - -.macro ROTATE_W -TMP_ = W16 -W16 = W15 -W15 = W14 -W14 = TMP_ -.endm - -# 8 streams x 5 32bit words per digest x 4 bytes per word -#define DIGEST_SIZE (8*5*4) - -.align 32 - -# void sha1_x8_avx2(void **input_data, UINT128 *digest, UINT32 size) -# arg 1 : pointer to array[4] of pointer to input data -# arg 2 : size (in blocks) ;; assumed to be >= 1 -# -ENTRY(sha1_x8_avx2) - - # save callee-saved clobbered registers to comply with C function ABI - push %r12 - push %r13 - push %r14 - push %r15 - - #save rsp - mov %rsp, RSP_SAVE - sub $FRAMESZ, %rsp - - #align rsp to 32 Bytes - and $~0x1F, %rsp - - ## Initialize digests - vmovdqu 0*32(arg1), A - vmovdqu 1*32(arg1), B - vmovdqu 2*32(arg1), C - vmovdqu 3*32(arg1), D - vmovdqu 4*32(arg1), E - - ## transpose input onto stack - mov _data_ptr+0*8(arg1),inp0 - mov _data_ptr+1*8(arg1),inp1 - mov _data_ptr+2*8(arg1),inp2 - mov _data_ptr+3*8(arg1),inp3 - mov _data_ptr+4*8(arg1),inp4 - mov _data_ptr+5*8(arg1),inp5 - mov _data_ptr+6*8(arg1),inp6 - mov _data_ptr+7*8(arg1),inp7 - - xor IDX, IDX -lloop: - vmovdqu PSHUFFLE_BYTE_FLIP_MASK(%rip), F - I=0 -.rep 2 - VMOVPS (inp0, IDX), T0 - VMOVPS (inp1, IDX), T1 - VMOVPS (inp2, IDX), T2 - VMOVPS (inp3, IDX), T3 - VMOVPS (inp4, IDX), T4 - VMOVPS (inp5, IDX), T5 - VMOVPS (inp6, IDX), T6 - VMOVPS (inp7, IDX), T7 - - TRANSPOSE8 T0, T1, T2, T3, T4, T5, T6, T7, T8, T9 - vpshufb F, T0, T0 - vmovdqu T0, (I*8)*32(%rsp) - vpshufb F, T1, T1 - vmovdqu T1, (I*8+1)*32(%rsp) - vpshufb F, T2, T2 - vmovdqu T2, (I*8+2)*32(%rsp) - vpshufb F, T3, T3 - vmovdqu T3, (I*8+3)*32(%rsp) - vpshufb F, T4, T4 - vmovdqu T4, (I*8+4)*32(%rsp) - vpshufb F, T5, T5 - vmovdqu T5, (I*8+5)*32(%rsp) - vpshufb F, T6, T6 - vmovdqu T6, (I*8+6)*32(%rsp) - vpshufb F, T7, T7 - vmovdqu T7, (I*8+7)*32(%rsp) - add $32, IDX - I = (I+1) -.endr - # save old digests - vmovdqu A,AA - vmovdqu B,BB - vmovdqu C,CC - vmovdqu D,DD - vmovdqu E,EE - -## -## perform 0-79 steps -## - vmovdqu K00_19(%rip), K -## do rounds 0...15 - I = 0 -.rep 16 - SHA1_STEP_00_15 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F0 - ROTATE_ARGS - I = (I+1) -.endr - -## do rounds 16...19 - vmovdqu ((16 - 16) & 15) * 32 (%rsp), W16 - vmovdqu ((16 - 15) & 15) * 32 (%rsp), W15 -.rep 4 - SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F0 - ROTATE_ARGS - I = (I+1) -.endr - -## do rounds 20...39 - vmovdqu K20_39(%rip), K -.rep 20 - SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F1 - ROTATE_ARGS - I = (I+1) -.endr - -## do rounds 40...59 - vmovdqu K40_59(%rip), K -.rep 20 - SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F2 - ROTATE_ARGS - I = (I+1) -.endr - -## do rounds 60...79 - vmovdqu K60_79(%rip), K -.rep 20 - SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F3 - ROTATE_ARGS - I = (I+1) -.endr - - vpaddd AA,A,A - vpaddd BB,B,B - vpaddd CC,C,C - vpaddd DD,D,D - vpaddd EE,E,E - - sub $1, arg2 - jne lloop - - # write out digests - vmovdqu A, 0*32(arg1) - vmovdqu B, 1*32(arg1) - vmovdqu C, 2*32(arg1) - vmovdqu D, 3*32(arg1) - vmovdqu E, 4*32(arg1) - - # update input pointers - add IDX, inp0 - add IDX, inp1 - add IDX, inp2 - add IDX, inp3 - add IDX, inp4 - add IDX, inp5 - add IDX, inp6 - add IDX, inp7 - mov inp0, _data_ptr (arg1) - mov inp1, _data_ptr + 1*8(arg1) - mov inp2, _data_ptr + 2*8(arg1) - mov inp3, _data_ptr + 3*8(arg1) - mov inp4, _data_ptr + 4*8(arg1) - mov inp5, _data_ptr + 5*8(arg1) - mov inp6, _data_ptr + 6*8(arg1) - mov inp7, _data_ptr + 7*8(arg1) - - ################ - ## Postamble - - mov RSP_SAVE, %rsp - - # restore callee-saved clobbered registers - pop %r15 - pop %r14 - pop %r13 - pop %r12 - - ret -ENDPROC(sha1_x8_avx2) - - -.section .rodata.cst32.K00_19, "aM", @progbits, 32 -.align 32 -K00_19: -.octa 0x5A8279995A8279995A8279995A827999 -.octa 0x5A8279995A8279995A8279995A827999 - -.section .rodata.cst32.K20_39, "aM", @progbits, 32 -.align 32 -K20_39: -.octa 0x6ED9EBA16ED9EBA16ED9EBA16ED9EBA1 -.octa 0x6ED9EBA16ED9EBA16ED9EBA16ED9EBA1 - -.section .rodata.cst32.K40_59, "aM", @progbits, 32 -.align 32 -K40_59: -.octa 0x8F1BBCDC8F1BBCDC8F1BBCDC8F1BBCDC -.octa 0x8F1BBCDC8F1BBCDC8F1BBCDC8F1BBCDC - -.section .rodata.cst32.K60_79, "aM", @progbits, 32 -.align 32 -K60_79: -.octa 0xCA62C1D6CA62C1D6CA62C1D6CA62C1D6 -.octa 0xCA62C1D6CA62C1D6CA62C1D6CA62C1D6 - -.section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32 -.align 32 -PSHUFFLE_BYTE_FLIP_MASK: -.octa 0x0c0d0e0f08090a0b0405060700010203 -.octa 0x0c0d0e0f08090a0b0405060700010203 diff --git a/arch/x86/crypto/sha256-mb/Makefile b/arch/x86/crypto/sha256-mb/Makefile deleted file mode 100644 index 53ad6e7db747948077986ab721a234c8877f09f2..0000000000000000000000000000000000000000 --- a/arch/x86/crypto/sha256-mb/Makefile +++ /dev/null @@ -1,14 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# -# Arch-specific CryptoAPI modules. -# - -OBJECT_FILES_NON_STANDARD := y - -avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\ - $(comma)4)$(comma)%ymm2,yes,no) -ifeq ($(avx2_supported),yes) - obj-$(CONFIG_CRYPTO_SHA256_MB) += sha256-mb.o - sha256-mb-y := sha256_mb.o sha256_mb_mgr_flush_avx2.o \ - sha256_mb_mgr_init_avx2.o sha256_mb_mgr_submit_avx2.o sha256_x8_avx2.o -endif diff --git a/arch/x86/crypto/sha256-mb/sha256_mb.c b/arch/x86/crypto/sha256-mb/sha256_mb.c deleted file mode 100644 index 97c5fc43e115dac127e6f0681f881708691c413b..0000000000000000000000000000000000000000 --- a/arch/x86/crypto/sha256-mb/sha256_mb.c +++ /dev/null @@ -1,1013 +0,0 @@ -/* - * Multi buffer SHA256 algorithm Glue Code - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Megha Dey - * - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "sha256_mb_ctx.h" - -#define FLUSH_INTERVAL 1000 /* in usec */ - -static struct mcryptd_alg_state sha256_mb_alg_state; - -struct sha256_mb_ctx { - struct mcryptd_ahash *mcryptd_tfm; -}; - -static inline struct mcryptd_hash_request_ctx - *cast_hash_to_mcryptd_ctx(struct sha256_hash_ctx *hash_ctx) -{ - struct ahash_request *areq; - - areq = container_of((void *) hash_ctx, struct ahash_request, __ctx); - return container_of(areq, struct mcryptd_hash_request_ctx, areq); -} - -static inline struct ahash_request - *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx) -{ - return container_of((void *) ctx, struct ahash_request, __ctx); -} - -static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx, - struct ahash_request *areq) -{ - rctx->flag = HASH_UPDATE; -} - -static asmlinkage void (*sha256_job_mgr_init)(struct sha256_mb_mgr *state); -static asmlinkage struct job_sha256* (*sha256_job_mgr_submit) - (struct sha256_mb_mgr *state, struct job_sha256 *job); -static asmlinkage struct job_sha256* (*sha256_job_mgr_flush) - (struct sha256_mb_mgr *state); -static asmlinkage struct job_sha256* (*sha256_job_mgr_get_comp_job) - (struct sha256_mb_mgr *state); - -inline uint32_t sha256_pad(uint8_t padblock[SHA256_BLOCK_SIZE * 2], - uint64_t total_len) -{ - uint32_t i = total_len & (SHA256_BLOCK_SIZE - 1); - - memset(&padblock[i], 0, SHA256_BLOCK_SIZE); - padblock[i] = 0x80; - - i += ((SHA256_BLOCK_SIZE - 1) & - (0 - (total_len + SHA256_PADLENGTHFIELD_SIZE + 1))) - + 1 + SHA256_PADLENGTHFIELD_SIZE; - -#if SHA256_PADLENGTHFIELD_SIZE == 16 - *((uint64_t *) &padblock[i - 16]) = 0; -#endif - - *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3); - - /* Number of extra blocks to hash */ - return i >> SHA256_LOG2_BLOCK_SIZE; -} - -static struct sha256_hash_ctx - *sha256_ctx_mgr_resubmit(struct sha256_ctx_mgr *mgr, - struct sha256_hash_ctx *ctx) -{ - while (ctx) { - if (ctx->status & HASH_CTX_STS_COMPLETE) { - /* Clear PROCESSING bit */ - ctx->status = HASH_CTX_STS_COMPLETE; - return ctx; - } - - /* - * If the extra blocks are empty, begin hashing what remains - * in the user's buffer. - */ - if (ctx->partial_block_buffer_length == 0 && - ctx->incoming_buffer_length) { - - const void *buffer = ctx->incoming_buffer; - uint32_t len = ctx->incoming_buffer_length; - uint32_t copy_len; - - /* - * Only entire blocks can be hashed. - * Copy remainder to extra blocks buffer. - */ - copy_len = len & (SHA256_BLOCK_SIZE-1); - - if (copy_len) { - len -= copy_len; - memcpy(ctx->partial_block_buffer, - ((const char *) buffer + len), - copy_len); - ctx->partial_block_buffer_length = copy_len; - } - - ctx->incoming_buffer_length = 0; - - /* len should be a multiple of the block size now */ - assert((len % SHA256_BLOCK_SIZE) == 0); - - /* Set len to the number of blocks to be hashed */ - len >>= SHA256_LOG2_BLOCK_SIZE; - - if (len) { - - ctx->job.buffer = (uint8_t *) buffer; - ctx->job.len = len; - ctx = (struct sha256_hash_ctx *) - sha256_job_mgr_submit(&mgr->mgr, &ctx->job); - continue; - } - } - - /* - * If the extra blocks are not empty, then we are - * either on the last block(s) or we need more - * user input before continuing. - */ - if (ctx->status & HASH_CTX_STS_LAST) { - - uint8_t *buf = ctx->partial_block_buffer; - uint32_t n_extra_blocks = - sha256_pad(buf, ctx->total_length); - - ctx->status = (HASH_CTX_STS_PROCESSING | - HASH_CTX_STS_COMPLETE); - ctx->job.buffer = buf; - ctx->job.len = (uint32_t) n_extra_blocks; - ctx = (struct sha256_hash_ctx *) - sha256_job_mgr_submit(&mgr->mgr, &ctx->job); - continue; - } - - ctx->status = HASH_CTX_STS_IDLE; - return ctx; - } - - return NULL; -} - -static struct sha256_hash_ctx - *sha256_ctx_mgr_get_comp_ctx(struct sha256_ctx_mgr *mgr) -{ - /* - * If get_comp_job returns NULL, there are no jobs complete. - * If get_comp_job returns a job, verify that it is safe to return to - * the user. If it is not ready, resubmit the job to finish processing. - * If sha256_ctx_mgr_resubmit returned a job, it is ready to be - * returned. Otherwise, all jobs currently being managed by the - * hash_ctx_mgr still need processing. - */ - struct sha256_hash_ctx *ctx; - - ctx = (struct sha256_hash_ctx *) sha256_job_mgr_get_comp_job(&mgr->mgr); - return sha256_ctx_mgr_resubmit(mgr, ctx); -} - -static void sha256_ctx_mgr_init(struct sha256_ctx_mgr *mgr) -{ - sha256_job_mgr_init(&mgr->mgr); -} - -static struct sha256_hash_ctx *sha256_ctx_mgr_submit(struct sha256_ctx_mgr *mgr, - struct sha256_hash_ctx *ctx, - const void *buffer, - uint32_t len, - int flags) -{ - if (flags & ~(HASH_UPDATE | HASH_LAST)) { - /* User should not pass anything other than UPDATE or LAST */ - ctx->error = HASH_CTX_ERROR_INVALID_FLAGS; - return ctx; - } - - if (ctx->status & HASH_CTX_STS_PROCESSING) { - /* Cannot submit to a currently processing job. */ - ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING; - return ctx; - } - - if (ctx->status & HASH_CTX_STS_COMPLETE) { - /* Cannot update a finished job. */ - ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED; - return ctx; - } - - /* If we made it here, there was no error during this call to submit */ - ctx->error = HASH_CTX_ERROR_NONE; - - /* Store buffer ptr info from user */ - ctx->incoming_buffer = buffer; - ctx->incoming_buffer_length = len; - - /* - * Store the user's request flags and mark this ctx as currently - * being processed. - */ - ctx->status = (flags & HASH_LAST) ? - (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) : - HASH_CTX_STS_PROCESSING; - - /* Advance byte counter */ - ctx->total_length += len; - - /* - * If there is anything currently buffered in the extra blocks, - * append to it until it contains a whole block. - * Or if the user's buffer contains less than a whole block, - * append as much as possible to the extra block. - */ - if (ctx->partial_block_buffer_length || len < SHA256_BLOCK_SIZE) { - /* - * Compute how many bytes to copy from user buffer into - * extra block - */ - uint32_t copy_len = SHA256_BLOCK_SIZE - - ctx->partial_block_buffer_length; - if (len < copy_len) - copy_len = len; - - if (copy_len) { - /* Copy and update relevant pointers and counters */ - memcpy( - &ctx->partial_block_buffer[ctx->partial_block_buffer_length], - buffer, copy_len); - - ctx->partial_block_buffer_length += copy_len; - ctx->incoming_buffer = (const void *) - ((const char *)buffer + copy_len); - ctx->incoming_buffer_length = len - copy_len; - } - - /* The extra block should never contain more than 1 block */ - assert(ctx->partial_block_buffer_length <= SHA256_BLOCK_SIZE); - - /* - * If the extra block buffer contains exactly 1 block, - * it can be hashed. - */ - if (ctx->partial_block_buffer_length >= SHA256_BLOCK_SIZE) { - ctx->partial_block_buffer_length = 0; - - ctx->job.buffer = ctx->partial_block_buffer; - ctx->job.len = 1; - ctx = (struct sha256_hash_ctx *) - sha256_job_mgr_submit(&mgr->mgr, &ctx->job); - } - } - - return sha256_ctx_mgr_resubmit(mgr, ctx); -} - -static struct sha256_hash_ctx *sha256_ctx_mgr_flush(struct sha256_ctx_mgr *mgr) -{ - struct sha256_hash_ctx *ctx; - - while (1) { - ctx = (struct sha256_hash_ctx *) - sha256_job_mgr_flush(&mgr->mgr); - - /* If flush returned 0, there are no more jobs in flight. */ - if (!ctx) - return NULL; - - /* - * If flush returned a job, resubmit the job to finish - * processing. - */ - ctx = sha256_ctx_mgr_resubmit(mgr, ctx); - - /* - * If sha256_ctx_mgr_resubmit returned a job, it is ready to - * be returned. Otherwise, all jobs currently being managed by - * the sha256_ctx_mgr still need processing. Loop. - */ - if (ctx) - return ctx; - } -} - -static int sha256_mb_init(struct ahash_request *areq) -{ - struct sha256_hash_ctx *sctx = ahash_request_ctx(areq); - - hash_ctx_init(sctx); - sctx->job.result_digest[0] = SHA256_H0; - sctx->job.result_digest[1] = SHA256_H1; - sctx->job.result_digest[2] = SHA256_H2; - sctx->job.result_digest[3] = SHA256_H3; - sctx->job.result_digest[4] = SHA256_H4; - sctx->job.result_digest[5] = SHA256_H5; - sctx->job.result_digest[6] = SHA256_H6; - sctx->job.result_digest[7] = SHA256_H7; - sctx->total_length = 0; - sctx->partial_block_buffer_length = 0; - sctx->status = HASH_CTX_STS_IDLE; - - return 0; -} - -static int sha256_mb_set_results(struct mcryptd_hash_request_ctx *rctx) -{ - int i; - struct sha256_hash_ctx *sctx = ahash_request_ctx(&rctx->areq); - __be32 *dst = (__be32 *) rctx->out; - - for (i = 0; i < 8; ++i) - dst[i] = cpu_to_be32(sctx->job.result_digest[i]); - - return 0; -} - -static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx, - struct mcryptd_alg_cstate *cstate, bool flush) -{ - int flag = HASH_UPDATE; - int nbytes, err = 0; - struct mcryptd_hash_request_ctx *rctx = *ret_rctx; - struct sha256_hash_ctx *sha_ctx; - - /* more work ? */ - while (!(rctx->flag & HASH_DONE)) { - nbytes = crypto_ahash_walk_done(&rctx->walk, 0); - if (nbytes < 0) { - err = nbytes; - goto out; - } - /* check if the walk is done */ - if (crypto_ahash_walk_last(&rctx->walk)) { - rctx->flag |= HASH_DONE; - if (rctx->flag & HASH_FINAL) - flag |= HASH_LAST; - - } - sha_ctx = (struct sha256_hash_ctx *) - ahash_request_ctx(&rctx->areq); - kernel_fpu_begin(); - sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, - rctx->walk.data, nbytes, flag); - if (!sha_ctx) { - if (flush) - sha_ctx = sha256_ctx_mgr_flush(cstate->mgr); - } - kernel_fpu_end(); - if (sha_ctx) - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - else { - rctx = NULL; - goto out; - } - } - - /* copy the results */ - if (rctx->flag & HASH_FINAL) - sha256_mb_set_results(rctx); - -out: - *ret_rctx = rctx; - return err; -} - -static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx, - struct mcryptd_alg_cstate *cstate, - int err) -{ - struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx); - struct sha256_hash_ctx *sha_ctx; - struct mcryptd_hash_request_ctx *req_ctx; - int ret; - - /* remove from work list */ - spin_lock(&cstate->work_lock); - list_del(&rctx->waiter); - spin_unlock(&cstate->work_lock); - - if (irqs_disabled()) - rctx->complete(&req->base, err); - else { - local_bh_disable(); - rctx->complete(&req->base, err); - local_bh_enable(); - } - - /* check to see if there are other jobs that are done */ - sha_ctx = sha256_ctx_mgr_get_comp_ctx(cstate->mgr); - while (sha_ctx) { - req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx); - ret = sha_finish_walk(&req_ctx, cstate, false); - if (req_ctx) { - spin_lock(&cstate->work_lock); - list_del(&req_ctx->waiter); - spin_unlock(&cstate->work_lock); - - req = cast_mcryptd_ctx_to_req(req_ctx); - if (irqs_disabled()) - req_ctx->complete(&req->base, ret); - else { - local_bh_disable(); - req_ctx->complete(&req->base, ret); - local_bh_enable(); - } - } - sha_ctx = sha256_ctx_mgr_get_comp_ctx(cstate->mgr); - } - - return 0; -} - -static void sha256_mb_add_list(struct mcryptd_hash_request_ctx *rctx, - struct mcryptd_alg_cstate *cstate) -{ - unsigned long next_flush; - unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL); - - /* initialize tag */ - rctx->tag.arrival = jiffies; /* tag the arrival time */ - rctx->tag.seq_num = cstate->next_seq_num++; - next_flush = rctx->tag.arrival + delay; - rctx->tag.expire = next_flush; - - spin_lock(&cstate->work_lock); - list_add_tail(&rctx->waiter, &cstate->work_list); - spin_unlock(&cstate->work_lock); - - mcryptd_arm_flusher(cstate, delay); -} - -static int sha256_mb_update(struct ahash_request *areq) -{ - struct mcryptd_hash_request_ctx *rctx = - container_of(areq, struct mcryptd_hash_request_ctx, areq); - struct mcryptd_alg_cstate *cstate = - this_cpu_ptr(sha256_mb_alg_state.alg_cstate); - - struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx); - struct sha256_hash_ctx *sha_ctx; - int ret = 0, nbytes; - - /* sanity check */ - if (rctx->tag.cpu != smp_processor_id()) { - pr_err("mcryptd error: cpu clash\n"); - goto done; - } - - /* need to init context */ - req_ctx_init(rctx, areq); - - nbytes = crypto_ahash_walk_first(req, &rctx->walk); - - if (nbytes < 0) { - ret = nbytes; - goto done; - } - - if (crypto_ahash_walk_last(&rctx->walk)) - rctx->flag |= HASH_DONE; - - /* submit */ - sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq); - sha256_mb_add_list(rctx, cstate); - kernel_fpu_begin(); - sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, - nbytes, HASH_UPDATE); - kernel_fpu_end(); - - /* check if anything is returned */ - if (!sha_ctx) - return -EINPROGRESS; - - if (sha_ctx->error) { - ret = sha_ctx->error; - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - goto done; - } - - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - ret = sha_finish_walk(&rctx, cstate, false); - - if (!rctx) - return -EINPROGRESS; -done: - sha_complete_job(rctx, cstate, ret); - return ret; -} - -static int sha256_mb_finup(struct ahash_request *areq) -{ - struct mcryptd_hash_request_ctx *rctx = - container_of(areq, struct mcryptd_hash_request_ctx, areq); - struct mcryptd_alg_cstate *cstate = - this_cpu_ptr(sha256_mb_alg_state.alg_cstate); - - struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx); - struct sha256_hash_ctx *sha_ctx; - int ret = 0, flag = HASH_UPDATE, nbytes; - - /* sanity check */ - if (rctx->tag.cpu != smp_processor_id()) { - pr_err("mcryptd error: cpu clash\n"); - goto done; - } - - /* need to init context */ - req_ctx_init(rctx, areq); - - nbytes = crypto_ahash_walk_first(req, &rctx->walk); - - if (nbytes < 0) { - ret = nbytes; - goto done; - } - - if (crypto_ahash_walk_last(&rctx->walk)) { - rctx->flag |= HASH_DONE; - flag = HASH_LAST; - } - - /* submit */ - rctx->flag |= HASH_FINAL; - sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq); - sha256_mb_add_list(rctx, cstate); - - kernel_fpu_begin(); - sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, - nbytes, flag); - kernel_fpu_end(); - - /* check if anything is returned */ - if (!sha_ctx) - return -EINPROGRESS; - - if (sha_ctx->error) { - ret = sha_ctx->error; - goto done; - } - - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - ret = sha_finish_walk(&rctx, cstate, false); - if (!rctx) - return -EINPROGRESS; -done: - sha_complete_job(rctx, cstate, ret); - return ret; -} - -static int sha256_mb_final(struct ahash_request *areq) -{ - struct mcryptd_hash_request_ctx *rctx = - container_of(areq, struct mcryptd_hash_request_ctx, - areq); - struct mcryptd_alg_cstate *cstate = - this_cpu_ptr(sha256_mb_alg_state.alg_cstate); - - struct sha256_hash_ctx *sha_ctx; - int ret = 0; - u8 data; - - /* sanity check */ - if (rctx->tag.cpu != smp_processor_id()) { - pr_err("mcryptd error: cpu clash\n"); - goto done; - } - - /* need to init context */ - req_ctx_init(rctx, areq); - - rctx->flag |= HASH_DONE | HASH_FINAL; - - sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq); - /* flag HASH_FINAL and 0 data size */ - sha256_mb_add_list(rctx, cstate); - kernel_fpu_begin(); - sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0, - HASH_LAST); - kernel_fpu_end(); - - /* check if anything is returned */ - if (!sha_ctx) - return -EINPROGRESS; - - if (sha_ctx->error) { - ret = sha_ctx->error; - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - goto done; - } - - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - ret = sha_finish_walk(&rctx, cstate, false); - if (!rctx) - return -EINPROGRESS; -done: - sha_complete_job(rctx, cstate, ret); - return ret; -} - -static int sha256_mb_export(struct ahash_request *areq, void *out) -{ - struct sha256_hash_ctx *sctx = ahash_request_ctx(areq); - - memcpy(out, sctx, sizeof(*sctx)); - - return 0; -} - -static int sha256_mb_import(struct ahash_request *areq, const void *in) -{ - struct sha256_hash_ctx *sctx = ahash_request_ctx(areq); - - memcpy(sctx, in, sizeof(*sctx)); - - return 0; -} - -static int sha256_mb_async_init_tfm(struct crypto_tfm *tfm) -{ - struct mcryptd_ahash *mcryptd_tfm; - struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm); - struct mcryptd_hash_ctx *mctx; - - mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha256-mb", - CRYPTO_ALG_INTERNAL, - CRYPTO_ALG_INTERNAL); - if (IS_ERR(mcryptd_tfm)) - return PTR_ERR(mcryptd_tfm); - mctx = crypto_ahash_ctx(&mcryptd_tfm->base); - mctx->alg_state = &sha256_mb_alg_state; - ctx->mcryptd_tfm = mcryptd_tfm; - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct ahash_request) + - crypto_ahash_reqsize(&mcryptd_tfm->base)); - - return 0; -} - -static void sha256_mb_async_exit_tfm(struct crypto_tfm *tfm) -{ - struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm); - - mcryptd_free_ahash(ctx->mcryptd_tfm); -} - -static int sha256_mb_areq_init_tfm(struct crypto_tfm *tfm) -{ - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct ahash_request) + - sizeof(struct sha256_hash_ctx)); - - return 0; -} - -static void sha256_mb_areq_exit_tfm(struct crypto_tfm *tfm) -{ - struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm); - - mcryptd_free_ahash(ctx->mcryptd_tfm); -} - -static struct ahash_alg sha256_mb_areq_alg = { - .init = sha256_mb_init, - .update = sha256_mb_update, - .final = sha256_mb_final, - .finup = sha256_mb_finup, - .export = sha256_mb_export, - .import = sha256_mb_import, - .halg = { - .digestsize = SHA256_DIGEST_SIZE, - .statesize = sizeof(struct sha256_hash_ctx), - .base = { - .cra_name = "__sha256-mb", - .cra_driver_name = "__intel_sha256-mb", - .cra_priority = 100, - /* - * use ASYNC flag as some buffers in multi-buffer - * algo may not have completed before hashing thread - * sleep - */ - .cra_flags = CRYPTO_ALG_ASYNC | - CRYPTO_ALG_INTERNAL, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT - (sha256_mb_areq_alg.halg.base.cra_list), - .cra_init = sha256_mb_areq_init_tfm, - .cra_exit = sha256_mb_areq_exit_tfm, - .cra_ctxsize = sizeof(struct sha256_hash_ctx), - } - } -}; - -static int sha256_mb_async_init(struct ahash_request *req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - return crypto_ahash_init(mcryptd_req); -} - -static int sha256_mb_async_update(struct ahash_request *req) -{ - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - return crypto_ahash_update(mcryptd_req); -} - -static int sha256_mb_async_finup(struct ahash_request *req) -{ - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - return crypto_ahash_finup(mcryptd_req); -} - -static int sha256_mb_async_final(struct ahash_request *req) -{ - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - return crypto_ahash_final(mcryptd_req); -} - -static int sha256_mb_async_digest(struct ahash_request *req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - return crypto_ahash_digest(mcryptd_req); -} - -static int sha256_mb_async_export(struct ahash_request *req, void *out) -{ - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - return crypto_ahash_export(mcryptd_req, out); -} - -static int sha256_mb_async_import(struct ahash_request *req, const void *in) -{ - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm); - struct mcryptd_hash_request_ctx *rctx; - struct ahash_request *areq; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - rctx = ahash_request_ctx(mcryptd_req); - areq = &rctx->areq; - - ahash_request_set_tfm(areq, child); - ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP, - rctx->complete, req); - - return crypto_ahash_import(mcryptd_req, in); -} - -static struct ahash_alg sha256_mb_async_alg = { - .init = sha256_mb_async_init, - .update = sha256_mb_async_update, - .final = sha256_mb_async_final, - .finup = sha256_mb_async_finup, - .export = sha256_mb_async_export, - .import = sha256_mb_async_import, - .digest = sha256_mb_async_digest, - .halg = { - .digestsize = SHA256_DIGEST_SIZE, - .statesize = sizeof(struct sha256_hash_ctx), - .base = { - .cra_name = "sha256", - .cra_driver_name = "sha256_mb", - /* - * Low priority, since with few concurrent hash requests - * this is extremely slow due to the flush delay. Users - * whose workloads would benefit from this can request - * it explicitly by driver name, or can increase its - * priority at runtime using NETLINK_CRYPTO. - */ - .cra_priority = 50, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT - (sha256_mb_async_alg.halg.base.cra_list), - .cra_init = sha256_mb_async_init_tfm, - .cra_exit = sha256_mb_async_exit_tfm, - .cra_ctxsize = sizeof(struct sha256_mb_ctx), - .cra_alignmask = 0, - }, - }, -}; - -static unsigned long sha256_mb_flusher(struct mcryptd_alg_cstate *cstate) -{ - struct mcryptd_hash_request_ctx *rctx; - unsigned long cur_time; - unsigned long next_flush = 0; - struct sha256_hash_ctx *sha_ctx; - - - cur_time = jiffies; - - while (!list_empty(&cstate->work_list)) { - rctx = list_entry(cstate->work_list.next, - struct mcryptd_hash_request_ctx, waiter); - if (time_before(cur_time, rctx->tag.expire)) - break; - kernel_fpu_begin(); - sha_ctx = (struct sha256_hash_ctx *) - sha256_ctx_mgr_flush(cstate->mgr); - kernel_fpu_end(); - if (!sha_ctx) { - pr_err("sha256_mb error: nothing got" - " flushed for non-empty list\n"); - break; - } - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - sha_finish_walk(&rctx, cstate, true); - sha_complete_job(rctx, cstate, 0); - } - - if (!list_empty(&cstate->work_list)) { - rctx = list_entry(cstate->work_list.next, - struct mcryptd_hash_request_ctx, waiter); - /* get the hash context and then flush time */ - next_flush = rctx->tag.expire; - mcryptd_arm_flusher(cstate, get_delay(next_flush)); - } - return next_flush; -} - -static int __init sha256_mb_mod_init(void) -{ - - int cpu; - int err; - struct mcryptd_alg_cstate *cpu_state; - - /* check for dependent cpu features */ - if (!boot_cpu_has(X86_FEATURE_AVX2) || - !boot_cpu_has(X86_FEATURE_BMI2)) - return -ENODEV; - - /* initialize multibuffer structures */ - sha256_mb_alg_state.alg_cstate = alloc_percpu - (struct mcryptd_alg_cstate); - - sha256_job_mgr_init = sha256_mb_mgr_init_avx2; - sha256_job_mgr_submit = sha256_mb_mgr_submit_avx2; - sha256_job_mgr_flush = sha256_mb_mgr_flush_avx2; - sha256_job_mgr_get_comp_job = sha256_mb_mgr_get_comp_job_avx2; - - if (!sha256_mb_alg_state.alg_cstate) - return -ENOMEM; - for_each_possible_cpu(cpu) { - cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu); - cpu_state->next_flush = 0; - cpu_state->next_seq_num = 0; - cpu_state->flusher_engaged = false; - INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher); - cpu_state->cpu = cpu; - cpu_state->alg_state = &sha256_mb_alg_state; - cpu_state->mgr = kzalloc(sizeof(struct sha256_ctx_mgr), - GFP_KERNEL); - if (!cpu_state->mgr) - goto err2; - sha256_ctx_mgr_init(cpu_state->mgr); - INIT_LIST_HEAD(&cpu_state->work_list); - spin_lock_init(&cpu_state->work_lock); - } - sha256_mb_alg_state.flusher = &sha256_mb_flusher; - - err = crypto_register_ahash(&sha256_mb_areq_alg); - if (err) - goto err2; - err = crypto_register_ahash(&sha256_mb_async_alg); - if (err) - goto err1; - - - return 0; -err1: - crypto_unregister_ahash(&sha256_mb_areq_alg); -err2: - for_each_possible_cpu(cpu) { - cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu); - kfree(cpu_state->mgr); - } - free_percpu(sha256_mb_alg_state.alg_cstate); - return -ENODEV; -} - -static void __exit sha256_mb_mod_fini(void) -{ - int cpu; - struct mcryptd_alg_cstate *cpu_state; - - crypto_unregister_ahash(&sha256_mb_async_alg); - crypto_unregister_ahash(&sha256_mb_areq_alg); - for_each_possible_cpu(cpu) { - cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu); - kfree(cpu_state->mgr); - } - free_percpu(sha256_mb_alg_state.alg_cstate); -} - -module_init(sha256_mb_mod_init); -module_exit(sha256_mb_mod_fini); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, multi buffer accelerated"); - -MODULE_ALIAS_CRYPTO("sha256"); diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h b/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h deleted file mode 100644 index 7c432543dc7fa74345753debcb8ea35212eedd2c..0000000000000000000000000000000000000000 --- a/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Header file for multi buffer SHA256 context - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Megha Dey - * - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _SHA_MB_CTX_INTERNAL_H -#define _SHA_MB_CTX_INTERNAL_H - -#include "sha256_mb_mgr.h" - -#define HASH_UPDATE 0x00 -#define HASH_LAST 0x01 -#define HASH_DONE 0x02 -#define HASH_FINAL 0x04 - -#define HASH_CTX_STS_IDLE 0x00 -#define HASH_CTX_STS_PROCESSING 0x01 -#define HASH_CTX_STS_LAST 0x02 -#define HASH_CTX_STS_COMPLETE 0x04 - -enum hash_ctx_error { - HASH_CTX_ERROR_NONE = 0, - HASH_CTX_ERROR_INVALID_FLAGS = -1, - HASH_CTX_ERROR_ALREADY_PROCESSING = -2, - HASH_CTX_ERROR_ALREADY_COMPLETED = -3, - -#ifdef HASH_CTX_DEBUG - HASH_CTX_ERROR_DEBUG_DIGEST_MISMATCH = -4, -#endif -}; - - -#define hash_ctx_user_data(ctx) ((ctx)->user_data) -#define hash_ctx_digest(ctx) ((ctx)->job.result_digest) -#define hash_ctx_processing(ctx) ((ctx)->status & HASH_CTX_STS_PROCESSING) -#define hash_ctx_complete(ctx) ((ctx)->status == HASH_CTX_STS_COMPLETE) -#define hash_ctx_status(ctx) ((ctx)->status) -#define hash_ctx_error(ctx) ((ctx)->error) -#define hash_ctx_init(ctx) \ - do { \ - (ctx)->error = HASH_CTX_ERROR_NONE; \ - (ctx)->status = HASH_CTX_STS_COMPLETE; \ - } while (0) - - -/* Hash Constants and Typedefs */ -#define SHA256_DIGEST_LENGTH 8 -#define SHA256_LOG2_BLOCK_SIZE 6 - -#define SHA256_PADLENGTHFIELD_SIZE 8 - -#ifdef SHA_MB_DEBUG -#define assert(expr) \ -do { \ - if (unlikely(!(expr))) { \ - printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \ - #expr, __FILE__, __func__, __LINE__); \ - } \ -} while (0) -#else -#define assert(expr) do {} while (0) -#endif - -struct sha256_ctx_mgr { - struct sha256_mb_mgr mgr; -}; - -/* typedef struct sha256_ctx_mgr sha256_ctx_mgr; */ - -struct sha256_hash_ctx { - /* Must be at struct offset 0 */ - struct job_sha256 job; - /* status flag */ - int status; - /* error flag */ - int error; - - uint64_t total_length; - const void *incoming_buffer; - uint32_t incoming_buffer_length; - uint8_t partial_block_buffer[SHA256_BLOCK_SIZE * 2]; - uint32_t partial_block_buffer_length; - void *user_data; -}; - -#endif diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr.h b/arch/x86/crypto/sha256-mb/sha256_mb_mgr.h deleted file mode 100644 index b01ae408c56d7568c246e8d4736f04b926f8b629..0000000000000000000000000000000000000000 --- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr.h +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Header file for multi buffer SHA256 algorithm manager - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Megha Dey - * - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef __SHA_MB_MGR_H -#define __SHA_MB_MGR_H - -#include - -#define NUM_SHA256_DIGEST_WORDS 8 - -enum job_sts { STS_UNKNOWN = 0, - STS_BEING_PROCESSED = 1, - STS_COMPLETED = 2, - STS_INTERNAL_ERROR = 3, - STS_ERROR = 4 -}; - -struct job_sha256 { - u8 *buffer; - u32 len; - u32 result_digest[NUM_SHA256_DIGEST_WORDS] __aligned(32); - enum job_sts status; - void *user_data; -}; - -/* SHA256 out-of-order scheduler */ - -/* typedef uint32_t sha8_digest_array[8][8]; */ - -struct sha256_args_x8 { - uint32_t digest[8][8]; - uint8_t *data_ptr[8]; -}; - -struct sha256_lane_data { - struct job_sha256 *job_in_lane; -}; - -struct sha256_mb_mgr { - struct sha256_args_x8 args; - - uint32_t lens[8]; - - /* each byte is index (0...7) of unused lanes */ - uint64_t unused_lanes; - /* byte 4 is set to FF as a flag */ - struct sha256_lane_data ldata[8]; -}; - - -#define SHA256_MB_MGR_NUM_LANES_AVX2 8 - -void sha256_mb_mgr_init_avx2(struct sha256_mb_mgr *state); -struct job_sha256 *sha256_mb_mgr_submit_avx2(struct sha256_mb_mgr *state, - struct job_sha256 *job); -struct job_sha256 *sha256_mb_mgr_flush_avx2(struct sha256_mb_mgr *state); -struct job_sha256 *sha256_mb_mgr_get_comp_job_avx2(struct sha256_mb_mgr *state); - -#endif diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_datastruct.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_datastruct.S deleted file mode 100644 index 5c377bac21d0cfa962e61f48052f27647cd5ab31..0000000000000000000000000000000000000000 --- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_datastruct.S +++ /dev/null @@ -1,304 +0,0 @@ -/* - * Header file for multi buffer SHA256 algorithm data structure - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Megha Dey - * - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -# Macros for defining data structures - -# Usage example - -#START_FIELDS # JOB_AES -### name size align -#FIELD _plaintext, 8, 8 # pointer to plaintext -#FIELD _ciphertext, 8, 8 # pointer to ciphertext -#FIELD _IV, 16, 8 # IV -#FIELD _keys, 8, 8 # pointer to keys -#FIELD _len, 4, 4 # length in bytes -#FIELD _status, 4, 4 # status enumeration -#FIELD _user_data, 8, 8 # pointer to user data -#UNION _union, size1, align1, \ -# size2, align2, \ -# size3, align3, \ -# ... -#END_FIELDS -#%assign _JOB_AES_size _FIELD_OFFSET -#%assign _JOB_AES_align _STRUCT_ALIGN - -######################################################################### - -# Alternate "struc-like" syntax: -# STRUCT job_aes2 -# RES_Q .plaintext, 1 -# RES_Q .ciphertext, 1 -# RES_DQ .IV, 1 -# RES_B .nested, _JOB_AES_SIZE, _JOB_AES_ALIGN -# RES_U .union, size1, align1, \ -# size2, align2, \ -# ... -# ENDSTRUCT -# # Following only needed if nesting -# %assign job_aes2_size _FIELD_OFFSET -# %assign job_aes2_align _STRUCT_ALIGN -# -# RES_* macros take a name, a count and an optional alignment. -# The count in in terms of the base size of the macro, and the -# default alignment is the base size. -# The macros are: -# Macro Base size -# RES_B 1 -# RES_W 2 -# RES_D 4 -# RES_Q 8 -# RES_DQ 16 -# RES_Y 32 -# RES_Z 64 -# -# RES_U defines a union. It's arguments are a name and two or more -# pairs of "size, alignment" -# -# The two assigns are only needed if this structure is being nested -# within another. Even if the assigns are not done, one can still use -# STRUCT_NAME_size as the size of the structure. -# -# Note that for nesting, you still need to assign to STRUCT_NAME_size. -# -# The differences between this and using "struc" directly are that each -# type is implicitly aligned to its natural length (although this can be -# over-ridden with an explicit third parameter), and that the structure -# is padded at the end to its overall alignment. -# - -######################################################################### - -#ifndef _DATASTRUCT_ASM_ -#define _DATASTRUCT_ASM_ - -#define SZ8 8*SHA256_DIGEST_WORD_SIZE -#define ROUNDS 64*SZ8 -#define PTR_SZ 8 -#define SHA256_DIGEST_WORD_SIZE 4 -#define MAX_SHA256_LANES 8 -#define SHA256_DIGEST_WORDS 8 -#define SHA256_DIGEST_ROW_SIZE (MAX_SHA256_LANES * SHA256_DIGEST_WORD_SIZE) -#define SHA256_DIGEST_SIZE (SHA256_DIGEST_ROW_SIZE * SHA256_DIGEST_WORDS) -#define SHA256_BLK_SZ 64 - -# START_FIELDS -.macro START_FIELDS - _FIELD_OFFSET = 0 - _STRUCT_ALIGN = 0 -.endm - -# FIELD name size align -.macro FIELD name size align - _FIELD_OFFSET = (_FIELD_OFFSET + (\align) - 1) & (~ ((\align)-1)) - \name = _FIELD_OFFSET - _FIELD_OFFSET = _FIELD_OFFSET + (\size) -.if (\align > _STRUCT_ALIGN) - _STRUCT_ALIGN = \align -.endif -.endm - -# END_FIELDS -.macro END_FIELDS - _FIELD_OFFSET = (_FIELD_OFFSET + _STRUCT_ALIGN-1) & (~ (_STRUCT_ALIGN-1)) -.endm - -######################################################################## - -.macro STRUCT p1 -START_FIELDS -.struc \p1 -.endm - -.macro ENDSTRUCT - tmp = _FIELD_OFFSET - END_FIELDS - tmp = (_FIELD_OFFSET - %%tmp) -.if (tmp > 0) - .lcomm tmp -.endif -.endstruc -.endm - -## RES_int name size align -.macro RES_int p1 p2 p3 - name = \p1 - size = \p2 - align = .\p3 - - _FIELD_OFFSET = (_FIELD_OFFSET + (align) - 1) & (~ ((align)-1)) -.align align -.lcomm name size - _FIELD_OFFSET = _FIELD_OFFSET + (size) -.if (align > _STRUCT_ALIGN) - _STRUCT_ALIGN = align -.endif -.endm - -# macro RES_B name, size [, align] -.macro RES_B _name, _size, _align=1 -RES_int _name _size _align -.endm - -# macro RES_W name, size [, align] -.macro RES_W _name, _size, _align=2 -RES_int _name 2*(_size) _align -.endm - -# macro RES_D name, size [, align] -.macro RES_D _name, _size, _align=4 -RES_int _name 4*(_size) _align -.endm - -# macro RES_Q name, size [, align] -.macro RES_Q _name, _size, _align=8 -RES_int _name 8*(_size) _align -.endm - -# macro RES_DQ name, size [, align] -.macro RES_DQ _name, _size, _align=16 -RES_int _name 16*(_size) _align -.endm - -# macro RES_Y name, size [, align] -.macro RES_Y _name, _size, _align=32 -RES_int _name 32*(_size) _align -.endm - -# macro RES_Z name, size [, align] -.macro RES_Z _name, _size, _align=64 -RES_int _name 64*(_size) _align -.endm - -#endif - - -######################################################################## -#### Define SHA256 Out Of Order Data Structures -######################################################################## - -START_FIELDS # LANE_DATA -### name size align -FIELD _job_in_lane, 8, 8 # pointer to job object -END_FIELDS - - _LANE_DATA_size = _FIELD_OFFSET - _LANE_DATA_align = _STRUCT_ALIGN - -######################################################################## - -START_FIELDS # SHA256_ARGS_X4 -### name size align -FIELD _digest, 4*8*8, 4 # transposed digest -FIELD _data_ptr, 8*8, 8 # array of pointers to data -END_FIELDS - - _SHA256_ARGS_X4_size = _FIELD_OFFSET - _SHA256_ARGS_X4_align = _STRUCT_ALIGN - _SHA256_ARGS_X8_size = _FIELD_OFFSET - _SHA256_ARGS_X8_align = _STRUCT_ALIGN - -####################################################################### - -START_FIELDS # MB_MGR -### name size align -FIELD _args, _SHA256_ARGS_X4_size, _SHA256_ARGS_X4_align -FIELD _lens, 4*8, 8 -FIELD _unused_lanes, 8, 8 -FIELD _ldata, _LANE_DATA_size*8, _LANE_DATA_align -END_FIELDS - - _MB_MGR_size = _FIELD_OFFSET - _MB_MGR_align = _STRUCT_ALIGN - -_args_digest = _args + _digest -_args_data_ptr = _args + _data_ptr - -####################################################################### - -START_FIELDS #STACK_FRAME -### name size align -FIELD _data, 16*SZ8, 1 # transposed digest -FIELD _digest, 8*SZ8, 1 # array of pointers to data -FIELD _ytmp, 4*SZ8, 1 -FIELD _rsp, 8, 1 -END_FIELDS - - _STACK_FRAME_size = _FIELD_OFFSET - _STACK_FRAME_align = _STRUCT_ALIGN - -####################################################################### - -######################################################################## -#### Define constants -######################################################################## - -#define STS_UNKNOWN 0 -#define STS_BEING_PROCESSED 1 -#define STS_COMPLETED 2 - -######################################################################## -#### Define JOB_SHA256 structure -######################################################################## - -START_FIELDS # JOB_SHA256 - -### name size align -FIELD _buffer, 8, 8 # pointer to buffer -FIELD _len, 8, 8 # length in bytes -FIELD _result_digest, 8*4, 32 # Digest (output) -FIELD _status, 4, 4 -FIELD _user_data, 8, 8 -END_FIELDS - - _JOB_SHA256_size = _FIELD_OFFSET - _JOB_SHA256_align = _STRUCT_ALIGN diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S deleted file mode 100644 index d2364c55bbdeb6513730e646183cf7a925eac1e2..0000000000000000000000000000000000000000 --- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S +++ /dev/null @@ -1,307 +0,0 @@ -/* - * Flush routine for SHA256 multibuffer - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Megha Dey - * - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#include -#include -#include "sha256_mb_mgr_datastruct.S" - -.extern sha256_x8_avx2 - -#LINUX register definitions -#define arg1 %rdi -#define arg2 %rsi - -# Common register definitions -#define state arg1 -#define job arg2 -#define len2 arg2 - -# idx must be a register not clobberred by sha1_mult -#define idx %r8 -#define DWORD_idx %r8d - -#define unused_lanes %rbx -#define lane_data %rbx -#define tmp2 %rbx -#define tmp2_w %ebx - -#define job_rax %rax -#define tmp1 %rax -#define size_offset %rax -#define tmp %rax -#define start_offset %rax - -#define tmp3 %arg1 - -#define extra_blocks %arg2 -#define p %arg2 - -.macro LABEL prefix n -\prefix\n\(): -.endm - -.macro JNE_SKIP i -jne skip_\i -.endm - -.altmacro -.macro SET_OFFSET _offset -offset = \_offset -.endm -.noaltmacro - -# JOB_SHA256* sha256_mb_mgr_flush_avx2(MB_MGR *state) -# arg 1 : rcx : state -ENTRY(sha256_mb_mgr_flush_avx2) - FRAME_BEGIN - push %rbx - - # If bit (32+3) is set, then all lanes are empty - mov _unused_lanes(state), unused_lanes - bt $32+3, unused_lanes - jc return_null - - # find a lane with a non-null job - xor idx, idx - offset = (_ldata + 1 * _LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) - cmovne one(%rip), idx - offset = (_ldata + 2 * _LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) - cmovne two(%rip), idx - offset = (_ldata + 3 * _LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) - cmovne three(%rip), idx - offset = (_ldata + 4 * _LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) - cmovne four(%rip), idx - offset = (_ldata + 5 * _LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) - cmovne five(%rip), idx - offset = (_ldata + 6 * _LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) - cmovne six(%rip), idx - offset = (_ldata + 7 * _LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) - cmovne seven(%rip), idx - - # copy idx to empty lanes -copy_lane_data: - offset = (_args + _data_ptr) - mov offset(state,idx,8), tmp - - I = 0 -.rep 8 - offset = (_ldata + I * _LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) -.altmacro - JNE_SKIP %I - offset = (_args + _data_ptr + 8*I) - mov tmp, offset(state) - offset = (_lens + 4*I) - movl $0xFFFFFFFF, offset(state) -LABEL skip_ %I - I = (I+1) -.noaltmacro -.endr - - # Find min length - vmovdqu _lens+0*16(state), %xmm0 - vmovdqu _lens+1*16(state), %xmm1 - - vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A} - vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C} - vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F} - vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E} - vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min val in low dword - - vmovd %xmm2, DWORD_idx - mov idx, len2 - and $0xF, idx - shr $4, len2 - jz len_is_0 - - vpand clear_low_nibble(%rip), %xmm2, %xmm2 - vpshufd $0, %xmm2, %xmm2 - - vpsubd %xmm2, %xmm0, %xmm0 - vpsubd %xmm2, %xmm1, %xmm1 - - vmovdqu %xmm0, _lens+0*16(state) - vmovdqu %xmm1, _lens+1*16(state) - - # "state" and "args" are the same address, arg1 - # len is arg2 - call sha256_x8_avx2 - # state and idx are intact - -len_is_0: - # process completed job "idx" - imul $_LANE_DATA_size, idx, lane_data - lea _ldata(state, lane_data), lane_data - - mov _job_in_lane(lane_data), job_rax - movq $0, _job_in_lane(lane_data) - movl $STS_COMPLETED, _status(job_rax) - mov _unused_lanes(state), unused_lanes - shl $4, unused_lanes - or idx, unused_lanes - - mov unused_lanes, _unused_lanes(state) - movl $0xFFFFFFFF, _lens(state,idx,4) - - vmovd _args_digest(state , idx, 4) , %xmm0 - vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0 - vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0 - vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0 - vmovd _args_digest+4*32(state, idx, 4), %xmm1 - vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1 - vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1 - vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1 - - vmovdqu %xmm0, _result_digest(job_rax) - offset = (_result_digest + 1*16) - vmovdqu %xmm1, offset(job_rax) - -return: - pop %rbx - FRAME_END - ret - -return_null: - xor job_rax, job_rax - jmp return -ENDPROC(sha256_mb_mgr_flush_avx2) - -############################################################################## - -.align 16 -ENTRY(sha256_mb_mgr_get_comp_job_avx2) - push %rbx - - ## if bit 32+3 is set, then all lanes are empty - mov _unused_lanes(state), unused_lanes - bt $(32+3), unused_lanes - jc .return_null - - # Find min length - vmovdqu _lens(state), %xmm0 - vmovdqu _lens+1*16(state), %xmm1 - - vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A} - vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C} - vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F} - vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E} - vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min val in low dword - - vmovd %xmm2, DWORD_idx - test $~0xF, idx - jnz .return_null - - # process completed job "idx" - imul $_LANE_DATA_size, idx, lane_data - lea _ldata(state, lane_data), lane_data - - mov _job_in_lane(lane_data), job_rax - movq $0, _job_in_lane(lane_data) - movl $STS_COMPLETED, _status(job_rax) - mov _unused_lanes(state), unused_lanes - shl $4, unused_lanes - or idx, unused_lanes - mov unused_lanes, _unused_lanes(state) - - movl $0xFFFFFFFF, _lens(state, idx, 4) - - vmovd _args_digest(state, idx, 4), %xmm0 - vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0 - vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0 - vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0 - vmovd _args_digest+4*32(state, idx, 4), %xmm1 - vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1 - vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1 - vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1 - - vmovdqu %xmm0, _result_digest(job_rax) - offset = (_result_digest + 1*16) - vmovdqu %xmm1, offset(job_rax) - - pop %rbx - - ret - -.return_null: - xor job_rax, job_rax - pop %rbx - ret -ENDPROC(sha256_mb_mgr_get_comp_job_avx2) - -.section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16 -.align 16 -clear_low_nibble: -.octa 0x000000000000000000000000FFFFFFF0 - -.section .rodata.cst8, "aM", @progbits, 8 -.align 8 -one: -.quad 1 -two: -.quad 2 -three: -.quad 3 -four: -.quad 4 -five: -.quad 5 -six: -.quad 6 -seven: -.quad 7 diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_init_avx2.c b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_init_avx2.c deleted file mode 100644 index b0c498371e67161b23729b154870c92d5f9121d7..0000000000000000000000000000000000000000 --- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_init_avx2.c +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Initialization code for multi buffer SHA256 algorithm for AVX2 - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Megha Dey - * - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "sha256_mb_mgr.h" - -void sha256_mb_mgr_init_avx2(struct sha256_mb_mgr *state) -{ - unsigned int j; - - state->unused_lanes = 0xF76543210ULL; - for (j = 0; j < 8; j++) { - state->lens[j] = 0xFFFFFFFF; - state->ldata[j].job_in_lane = NULL; - } -} diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S deleted file mode 100644 index b36ae745408467698ed852e7fc9709f8494a7317..0000000000000000000000000000000000000000 --- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Buffer submit code for multi buffer SHA256 algorithm - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Megha Dey - * - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include "sha256_mb_mgr_datastruct.S" - -.extern sha256_x8_avx2 - -# LINUX register definitions -arg1 = %rdi -arg2 = %rsi -size_offset = %rcx -tmp2 = %rcx -extra_blocks = %rdx - -# Common definitions -#define state arg1 -#define job %rsi -#define len2 arg2 -#define p2 arg2 - -# idx must be a register not clobberred by sha1_x8_avx2 -idx = %r8 -DWORD_idx = %r8d -last_len = %r8 - -p = %r11 -start_offset = %r11 - -unused_lanes = %rbx -BYTE_unused_lanes = %bl - -job_rax = %rax -len = %rax -DWORD_len = %eax - -lane = %r12 -tmp3 = %r12 - -tmp = %r9 -DWORD_tmp = %r9d - -lane_data = %r10 - -# JOB* sha256_mb_mgr_submit_avx2(MB_MGR *state, JOB_SHA256 *job) -# arg 1 : rcx : state -# arg 2 : rdx : job -ENTRY(sha256_mb_mgr_submit_avx2) - FRAME_BEGIN - push %rbx - push %r12 - - mov _unused_lanes(state), unused_lanes - mov unused_lanes, lane - and $0xF, lane - shr $4, unused_lanes - imul $_LANE_DATA_size, lane, lane_data - movl $STS_BEING_PROCESSED, _status(job) - lea _ldata(state, lane_data), lane_data - mov unused_lanes, _unused_lanes(state) - movl _len(job), DWORD_len - - mov job, _job_in_lane(lane_data) - shl $4, len - or lane, len - - movl DWORD_len, _lens(state , lane, 4) - - # Load digest words from result_digest - vmovdqu _result_digest(job), %xmm0 - vmovdqu _result_digest+1*16(job), %xmm1 - vmovd %xmm0, _args_digest(state, lane, 4) - vpextrd $1, %xmm0, _args_digest+1*32(state , lane, 4) - vpextrd $2, %xmm0, _args_digest+2*32(state , lane, 4) - vpextrd $3, %xmm0, _args_digest+3*32(state , lane, 4) - vmovd %xmm1, _args_digest+4*32(state , lane, 4) - - vpextrd $1, %xmm1, _args_digest+5*32(state , lane, 4) - vpextrd $2, %xmm1, _args_digest+6*32(state , lane, 4) - vpextrd $3, %xmm1, _args_digest+7*32(state , lane, 4) - - mov _buffer(job), p - mov p, _args_data_ptr(state, lane, 8) - - cmp $0xF, unused_lanes - jne return_null - -start_loop: - # Find min length - vmovdqa _lens(state), %xmm0 - vmovdqa _lens+1*16(state), %xmm1 - - vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A} - vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C} - vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F} - vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E} - vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min val in low dword - - vmovd %xmm2, DWORD_idx - mov idx, len2 - and $0xF, idx - shr $4, len2 - jz len_is_0 - - vpand clear_low_nibble(%rip), %xmm2, %xmm2 - vpshufd $0, %xmm2, %xmm2 - - vpsubd %xmm2, %xmm0, %xmm0 - vpsubd %xmm2, %xmm1, %xmm1 - - vmovdqa %xmm0, _lens + 0*16(state) - vmovdqa %xmm1, _lens + 1*16(state) - - # "state" and "args" are the same address, arg1 - # len is arg2 - call sha256_x8_avx2 - - # state and idx are intact - -len_is_0: - # process completed job "idx" - imul $_LANE_DATA_size, idx, lane_data - lea _ldata(state, lane_data), lane_data - - mov _job_in_lane(lane_data), job_rax - mov _unused_lanes(state), unused_lanes - movq $0, _job_in_lane(lane_data) - movl $STS_COMPLETED, _status(job_rax) - shl $4, unused_lanes - or idx, unused_lanes - mov unused_lanes, _unused_lanes(state) - - movl $0xFFFFFFFF, _lens(state,idx,4) - - vmovd _args_digest(state, idx, 4), %xmm0 - vpinsrd $1, _args_digest+1*32(state , idx, 4), %xmm0, %xmm0 - vpinsrd $2, _args_digest+2*32(state , idx, 4), %xmm0, %xmm0 - vpinsrd $3, _args_digest+3*32(state , idx, 4), %xmm0, %xmm0 - vmovd _args_digest+4*32(state, idx, 4), %xmm1 - - vpinsrd $1, _args_digest+5*32(state , idx, 4), %xmm1, %xmm1 - vpinsrd $2, _args_digest+6*32(state , idx, 4), %xmm1, %xmm1 - vpinsrd $3, _args_digest+7*32(state , idx, 4), %xmm1, %xmm1 - - vmovdqu %xmm0, _result_digest(job_rax) - vmovdqu %xmm1, _result_digest+1*16(job_rax) - -return: - pop %r12 - pop %rbx - FRAME_END - ret - -return_null: - xor job_rax, job_rax - jmp return - -ENDPROC(sha256_mb_mgr_submit_avx2) - -.section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16 -.align 16 -clear_low_nibble: - .octa 0x000000000000000000000000FFFFFFF0 diff --git a/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S b/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S deleted file mode 100644 index 1687c80c59952d373a061d906f08a4e465fd0038..0000000000000000000000000000000000000000 --- a/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S +++ /dev/null @@ -1,598 +0,0 @@ -/* - * Multi-buffer SHA256 algorithm hash compute routine - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Megha Dey - * - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include "sha256_mb_mgr_datastruct.S" - -## code to compute oct SHA256 using SSE-256 -## outer calling routine takes care of save and restore of XMM registers -## Logic designed/laid out by JDG - -## Function clobbers: rax, rcx, rdx, rbx, rsi, rdi, r9-r15; %ymm0-15 -## Linux clobbers: rax rbx rcx rdx rsi r9 r10 r11 r12 r13 r14 r15 -## Linux preserves: rdi rbp r8 -## -## clobbers %ymm0-15 - -arg1 = %rdi -arg2 = %rsi -reg3 = %rcx -reg4 = %rdx - -# Common definitions -STATE = arg1 -INP_SIZE = arg2 - -IDX = %rax -ROUND = %rbx -TBL = reg3 - -inp0 = %r9 -inp1 = %r10 -inp2 = %r11 -inp3 = %r12 -inp4 = %r13 -inp5 = %r14 -inp6 = %r15 -inp7 = reg4 - -a = %ymm0 -b = %ymm1 -c = %ymm2 -d = %ymm3 -e = %ymm4 -f = %ymm5 -g = %ymm6 -h = %ymm7 - -T1 = %ymm8 - -a0 = %ymm12 -a1 = %ymm13 -a2 = %ymm14 -TMP = %ymm15 -TMP0 = %ymm6 -TMP1 = %ymm7 - -TT0 = %ymm8 -TT1 = %ymm9 -TT2 = %ymm10 -TT3 = %ymm11 -TT4 = %ymm12 -TT5 = %ymm13 -TT6 = %ymm14 -TT7 = %ymm15 - -# Define stack usage - -# Assume stack aligned to 32 bytes before call -# Therefore FRAMESZ mod 32 must be 32-8 = 24 - -#define FRAMESZ 0x388 - -#define VMOVPS vmovups - -# TRANSPOSE8 r0, r1, r2, r3, r4, r5, r6, r7, t0, t1 -# "transpose" data in {r0...r7} using temps {t0...t1} -# Input looks like: {r0 r1 r2 r3 r4 r5 r6 r7} -# r0 = {a7 a6 a5 a4 a3 a2 a1 a0} -# r1 = {b7 b6 b5 b4 b3 b2 b1 b0} -# r2 = {c7 c6 c5 c4 c3 c2 c1 c0} -# r3 = {d7 d6 d5 d4 d3 d2 d1 d0} -# r4 = {e7 e6 e5 e4 e3 e2 e1 e0} -# r5 = {f7 f6 f5 f4 f3 f2 f1 f0} -# r6 = {g7 g6 g5 g4 g3 g2 g1 g0} -# r7 = {h7 h6 h5 h4 h3 h2 h1 h0} -# -# Output looks like: {r0 r1 r2 r3 r4 r5 r6 r7} -# r0 = {h0 g0 f0 e0 d0 c0 b0 a0} -# r1 = {h1 g1 f1 e1 d1 c1 b1 a1} -# r2 = {h2 g2 f2 e2 d2 c2 b2 a2} -# r3 = {h3 g3 f3 e3 d3 c3 b3 a3} -# r4 = {h4 g4 f4 e4 d4 c4 b4 a4} -# r5 = {h5 g5 f5 e5 d5 c5 b5 a5} -# r6 = {h6 g6 f6 e6 d6 c6 b6 a6} -# r7 = {h7 g7 f7 e7 d7 c7 b7 a7} -# - -.macro TRANSPOSE8 r0 r1 r2 r3 r4 r5 r6 r7 t0 t1 - # process top half (r0..r3) {a...d} - vshufps $0x44, \r1, \r0, \t0 # t0 = {b5 b4 a5 a4 b1 b0 a1 a0} - vshufps $0xEE, \r1, \r0, \r0 # r0 = {b7 b6 a7 a6 b3 b2 a3 a2} - vshufps $0x44, \r3, \r2, \t1 # t1 = {d5 d4 c5 c4 d1 d0 c1 c0} - vshufps $0xEE, \r3, \r2, \r2 # r2 = {d7 d6 c7 c6 d3 d2 c3 c2} - vshufps $0xDD, \t1, \t0, \r3 # r3 = {d5 c5 b5 a5 d1 c1 b1 a1} - vshufps $0x88, \r2, \r0, \r1 # r1 = {d6 c6 b6 a6 d2 c2 b2 a2} - vshufps $0xDD, \r2, \r0, \r0 # r0 = {d7 c7 b7 a7 d3 c3 b3 a3} - vshufps $0x88, \t1, \t0, \t0 # t0 = {d4 c4 b4 a4 d0 c0 b0 a0} - - # use r2 in place of t0 - # process bottom half (r4..r7) {e...h} - vshufps $0x44, \r5, \r4, \r2 # r2 = {f5 f4 e5 e4 f1 f0 e1 e0} - vshufps $0xEE, \r5, \r4, \r4 # r4 = {f7 f6 e7 e6 f3 f2 e3 e2} - vshufps $0x44, \r7, \r6, \t1 # t1 = {h5 h4 g5 g4 h1 h0 g1 g0} - vshufps $0xEE, \r7, \r6, \r6 # r6 = {h7 h6 g7 g6 h3 h2 g3 g2} - vshufps $0xDD, \t1, \r2, \r7 # r7 = {h5 g5 f5 e5 h1 g1 f1 e1} - vshufps $0x88, \r6, \r4, \r5 # r5 = {h6 g6 f6 e6 h2 g2 f2 e2} - vshufps $0xDD, \r6, \r4, \r4 # r4 = {h7 g7 f7 e7 h3 g3 f3 e3} - vshufps $0x88, \t1, \r2, \t1 # t1 = {h4 g4 f4 e4 h0 g0 f0 e0} - - vperm2f128 $0x13, \r1, \r5, \r6 # h6...a6 - vperm2f128 $0x02, \r1, \r5, \r2 # h2...a2 - vperm2f128 $0x13, \r3, \r7, \r5 # h5...a5 - vperm2f128 $0x02, \r3, \r7, \r1 # h1...a1 - vperm2f128 $0x13, \r0, \r4, \r7 # h7...a7 - vperm2f128 $0x02, \r0, \r4, \r3 # h3...a3 - vperm2f128 $0x13, \t0, \t1, \r4 # h4...a4 - vperm2f128 $0x02, \t0, \t1, \r0 # h0...a0 - -.endm - -.macro ROTATE_ARGS -TMP_ = h -h = g -g = f -f = e -e = d -d = c -c = b -b = a -a = TMP_ -.endm - -.macro _PRORD reg imm tmp - vpslld $(32-\imm),\reg,\tmp - vpsrld $\imm,\reg, \reg - vpor \tmp,\reg, \reg -.endm - -# PRORD_nd reg, imm, tmp, src -.macro _PRORD_nd reg imm tmp src - vpslld $(32-\imm), \src, \tmp - vpsrld $\imm, \src, \reg - vpor \tmp, \reg, \reg -.endm - -# PRORD dst/src, amt -.macro PRORD reg imm - _PRORD \reg,\imm,TMP -.endm - -# PRORD_nd dst, src, amt -.macro PRORD_nd reg tmp imm - _PRORD_nd \reg, \imm, TMP, \tmp -.endm - -# arguments passed implicitly in preprocessor symbols i, a...h -.macro ROUND_00_15 _T1 i - PRORD_nd a0,e,5 # sig1: a0 = (e >> 5) - - vpxor g, f, a2 # ch: a2 = f^g - vpand e,a2, a2 # ch: a2 = (f^g)&e - vpxor g, a2, a2 # a2 = ch - - PRORD_nd a1,e,25 # sig1: a1 = (e >> 25) - - vmovdqu \_T1,(SZ8*(\i & 0xf))(%rsp) - vpaddd (TBL,ROUND,1), \_T1, \_T1 # T1 = W + K - vpxor e,a0, a0 # sig1: a0 = e ^ (e >> 5) - PRORD a0, 6 # sig1: a0 = (e >> 6) ^ (e >> 11) - vpaddd a2, h, h # h = h + ch - PRORD_nd a2,a,11 # sig0: a2 = (a >> 11) - vpaddd \_T1,h, h # h = h + ch + W + K - vpxor a1, a0, a0 # a0 = sigma1 - PRORD_nd a1,a,22 # sig0: a1 = (a >> 22) - vpxor c, a, \_T1 # maj: T1 = a^c - add $SZ8, ROUND # ROUND++ - vpand b, \_T1, \_T1 # maj: T1 = (a^c)&b - vpaddd a0, h, h - vpaddd h, d, d - vpxor a, a2, a2 # sig0: a2 = a ^ (a >> 11) - PRORD a2,2 # sig0: a2 = (a >> 2) ^ (a >> 13) - vpxor a1, a2, a2 # a2 = sig0 - vpand c, a, a1 # maj: a1 = a&c - vpor \_T1, a1, a1 # a1 = maj - vpaddd a1, h, h # h = h + ch + W + K + maj - vpaddd a2, h, h # h = h + ch + W + K + maj + sigma0 - ROTATE_ARGS -.endm - -# arguments passed implicitly in preprocessor symbols i, a...h -.macro ROUND_16_XX _T1 i - vmovdqu (SZ8*((\i-15)&0xf))(%rsp), \_T1 - vmovdqu (SZ8*((\i-2)&0xf))(%rsp), a1 - vmovdqu \_T1, a0 - PRORD \_T1,11 - vmovdqu a1, a2 - PRORD a1,2 - vpxor a0, \_T1, \_T1 - PRORD \_T1, 7 - vpxor a2, a1, a1 - PRORD a1, 17 - vpsrld $3, a0, a0 - vpxor a0, \_T1, \_T1 - vpsrld $10, a2, a2 - vpxor a2, a1, a1 - vpaddd (SZ8*((\i-16)&0xf))(%rsp), \_T1, \_T1 - vpaddd (SZ8*((\i-7)&0xf))(%rsp), a1, a1 - vpaddd a1, \_T1, \_T1 - - ROUND_00_15 \_T1,\i -.endm - -# SHA256_ARGS: -# UINT128 digest[8]; // transposed digests -# UINT8 *data_ptr[4]; - -# void sha256_x8_avx2(SHA256_ARGS *args, UINT64 bytes); -# arg 1 : STATE : pointer to array of pointers to input data -# arg 2 : INP_SIZE : size of input in blocks - # general registers preserved in outer calling routine - # outer calling routine saves all the XMM registers - # save rsp, allocate 32-byte aligned for local variables -ENTRY(sha256_x8_avx2) - - # save callee-saved clobbered registers to comply with C function ABI - push %r12 - push %r13 - push %r14 - push %r15 - - mov %rsp, IDX - sub $FRAMESZ, %rsp - and $~0x1F, %rsp - mov IDX, _rsp(%rsp) - - # Load the pre-transposed incoming digest. - vmovdqu 0*SHA256_DIGEST_ROW_SIZE(STATE),a - vmovdqu 1*SHA256_DIGEST_ROW_SIZE(STATE),b - vmovdqu 2*SHA256_DIGEST_ROW_SIZE(STATE),c - vmovdqu 3*SHA256_DIGEST_ROW_SIZE(STATE),d - vmovdqu 4*SHA256_DIGEST_ROW_SIZE(STATE),e - vmovdqu 5*SHA256_DIGEST_ROW_SIZE(STATE),f - vmovdqu 6*SHA256_DIGEST_ROW_SIZE(STATE),g - vmovdqu 7*SHA256_DIGEST_ROW_SIZE(STATE),h - - lea K256_8(%rip),TBL - - # load the address of each of the 4 message lanes - # getting ready to transpose input onto stack - mov _args_data_ptr+0*PTR_SZ(STATE),inp0 - mov _args_data_ptr+1*PTR_SZ(STATE),inp1 - mov _args_data_ptr+2*PTR_SZ(STATE),inp2 - mov _args_data_ptr+3*PTR_SZ(STATE),inp3 - mov _args_data_ptr+4*PTR_SZ(STATE),inp4 - mov _args_data_ptr+5*PTR_SZ(STATE),inp5 - mov _args_data_ptr+6*PTR_SZ(STATE),inp6 - mov _args_data_ptr+7*PTR_SZ(STATE),inp7 - - xor IDX, IDX -lloop: - xor ROUND, ROUND - - # save old digest - vmovdqu a, _digest(%rsp) - vmovdqu b, _digest+1*SZ8(%rsp) - vmovdqu c, _digest+2*SZ8(%rsp) - vmovdqu d, _digest+3*SZ8(%rsp) - vmovdqu e, _digest+4*SZ8(%rsp) - vmovdqu f, _digest+5*SZ8(%rsp) - vmovdqu g, _digest+6*SZ8(%rsp) - vmovdqu h, _digest+7*SZ8(%rsp) - i = 0 -.rep 2 - VMOVPS i*32(inp0, IDX), TT0 - VMOVPS i*32(inp1, IDX), TT1 - VMOVPS i*32(inp2, IDX), TT2 - VMOVPS i*32(inp3, IDX), TT3 - VMOVPS i*32(inp4, IDX), TT4 - VMOVPS i*32(inp5, IDX), TT5 - VMOVPS i*32(inp6, IDX), TT6 - VMOVPS i*32(inp7, IDX), TT7 - vmovdqu g, _ytmp(%rsp) - vmovdqu h, _ytmp+1*SZ8(%rsp) - TRANSPOSE8 TT0, TT1, TT2, TT3, TT4, TT5, TT6, TT7, TMP0, TMP1 - vmovdqu PSHUFFLE_BYTE_FLIP_MASK(%rip), TMP1 - vmovdqu _ytmp(%rsp), g - vpshufb TMP1, TT0, TT0 - vpshufb TMP1, TT1, TT1 - vpshufb TMP1, TT2, TT2 - vpshufb TMP1, TT3, TT3 - vpshufb TMP1, TT4, TT4 - vpshufb TMP1, TT5, TT5 - vpshufb TMP1, TT6, TT6 - vpshufb TMP1, TT7, TT7 - vmovdqu _ytmp+1*SZ8(%rsp), h - vmovdqu TT4, _ytmp(%rsp) - vmovdqu TT5, _ytmp+1*SZ8(%rsp) - vmovdqu TT6, _ytmp+2*SZ8(%rsp) - vmovdqu TT7, _ytmp+3*SZ8(%rsp) - ROUND_00_15 TT0,(i*8+0) - vmovdqu _ytmp(%rsp), TT0 - ROUND_00_15 TT1,(i*8+1) - vmovdqu _ytmp+1*SZ8(%rsp), TT1 - ROUND_00_15 TT2,(i*8+2) - vmovdqu _ytmp+2*SZ8(%rsp), TT2 - ROUND_00_15 TT3,(i*8+3) - vmovdqu _ytmp+3*SZ8(%rsp), TT3 - ROUND_00_15 TT0,(i*8+4) - ROUND_00_15 TT1,(i*8+5) - ROUND_00_15 TT2,(i*8+6) - ROUND_00_15 TT3,(i*8+7) - i = (i+1) -.endr - add $64, IDX - i = (i*8) - - jmp Lrounds_16_xx -.align 16 -Lrounds_16_xx: -.rep 16 - ROUND_16_XX T1, i - i = (i+1) -.endr - - cmp $ROUNDS,ROUND - jb Lrounds_16_xx - - # add old digest - vpaddd _digest+0*SZ8(%rsp), a, a - vpaddd _digest+1*SZ8(%rsp), b, b - vpaddd _digest+2*SZ8(%rsp), c, c - vpaddd _digest+3*SZ8(%rsp), d, d - vpaddd _digest+4*SZ8(%rsp), e, e - vpaddd _digest+5*SZ8(%rsp), f, f - vpaddd _digest+6*SZ8(%rsp), g, g - vpaddd _digest+7*SZ8(%rsp), h, h - - sub $1, INP_SIZE # unit is blocks - jne lloop - - # write back to memory (state object) the transposed digest - vmovdqu a, 0*SHA256_DIGEST_ROW_SIZE(STATE) - vmovdqu b, 1*SHA256_DIGEST_ROW_SIZE(STATE) - vmovdqu c, 2*SHA256_DIGEST_ROW_SIZE(STATE) - vmovdqu d, 3*SHA256_DIGEST_ROW_SIZE(STATE) - vmovdqu e, 4*SHA256_DIGEST_ROW_SIZE(STATE) - vmovdqu f, 5*SHA256_DIGEST_ROW_SIZE(STATE) - vmovdqu g, 6*SHA256_DIGEST_ROW_SIZE(STATE) - vmovdqu h, 7*SHA256_DIGEST_ROW_SIZE(STATE) - - # update input pointers - add IDX, inp0 - mov inp0, _args_data_ptr+0*8(STATE) - add IDX, inp1 - mov inp1, _args_data_ptr+1*8(STATE) - add IDX, inp2 - mov inp2, _args_data_ptr+2*8(STATE) - add IDX, inp3 - mov inp3, _args_data_ptr+3*8(STATE) - add IDX, inp4 - mov inp4, _args_data_ptr+4*8(STATE) - add IDX, inp5 - mov inp5, _args_data_ptr+5*8(STATE) - add IDX, inp6 - mov inp6, _args_data_ptr+6*8(STATE) - add IDX, inp7 - mov inp7, _args_data_ptr+7*8(STATE) - - # Postamble - mov _rsp(%rsp), %rsp - - # restore callee-saved clobbered registers - pop %r15 - pop %r14 - pop %r13 - pop %r12 - - ret -ENDPROC(sha256_x8_avx2) - -.section .rodata.K256_8, "a", @progbits -.align 64 -K256_8: - .octa 0x428a2f98428a2f98428a2f98428a2f98 - .octa 0x428a2f98428a2f98428a2f98428a2f98 - .octa 0x71374491713744917137449171374491 - .octa 0x71374491713744917137449171374491 - .octa 0xb5c0fbcfb5c0fbcfb5c0fbcfb5c0fbcf - .octa 0xb5c0fbcfb5c0fbcfb5c0fbcfb5c0fbcf - .octa 0xe9b5dba5e9b5dba5e9b5dba5e9b5dba5 - .octa 0xe9b5dba5e9b5dba5e9b5dba5e9b5dba5 - .octa 0x3956c25b3956c25b3956c25b3956c25b - .octa 0x3956c25b3956c25b3956c25b3956c25b - .octa 0x59f111f159f111f159f111f159f111f1 - .octa 0x59f111f159f111f159f111f159f111f1 - .octa 0x923f82a4923f82a4923f82a4923f82a4 - .octa 0x923f82a4923f82a4923f82a4923f82a4 - .octa 0xab1c5ed5ab1c5ed5ab1c5ed5ab1c5ed5 - .octa 0xab1c5ed5ab1c5ed5ab1c5ed5ab1c5ed5 - .octa 0xd807aa98d807aa98d807aa98d807aa98 - .octa 0xd807aa98d807aa98d807aa98d807aa98 - .octa 0x12835b0112835b0112835b0112835b01 - .octa 0x12835b0112835b0112835b0112835b01 - .octa 0x243185be243185be243185be243185be - .octa 0x243185be243185be243185be243185be - .octa 0x550c7dc3550c7dc3550c7dc3550c7dc3 - .octa 0x550c7dc3550c7dc3550c7dc3550c7dc3 - .octa 0x72be5d7472be5d7472be5d7472be5d74 - .octa 0x72be5d7472be5d7472be5d7472be5d74 - .octa 0x80deb1fe80deb1fe80deb1fe80deb1fe - .octa 0x80deb1fe80deb1fe80deb1fe80deb1fe - .octa 0x9bdc06a79bdc06a79bdc06a79bdc06a7 - .octa 0x9bdc06a79bdc06a79bdc06a79bdc06a7 - .octa 0xc19bf174c19bf174c19bf174c19bf174 - .octa 0xc19bf174c19bf174c19bf174c19bf174 - .octa 0xe49b69c1e49b69c1e49b69c1e49b69c1 - .octa 0xe49b69c1e49b69c1e49b69c1e49b69c1 - .octa 0xefbe4786efbe4786efbe4786efbe4786 - .octa 0xefbe4786efbe4786efbe4786efbe4786 - .octa 0x0fc19dc60fc19dc60fc19dc60fc19dc6 - .octa 0x0fc19dc60fc19dc60fc19dc60fc19dc6 - .octa 0x240ca1cc240ca1cc240ca1cc240ca1cc - .octa 0x240ca1cc240ca1cc240ca1cc240ca1cc - .octa 0x2de92c6f2de92c6f2de92c6f2de92c6f - .octa 0x2de92c6f2de92c6f2de92c6f2de92c6f - .octa 0x4a7484aa4a7484aa4a7484aa4a7484aa - .octa 0x4a7484aa4a7484aa4a7484aa4a7484aa - .octa 0x5cb0a9dc5cb0a9dc5cb0a9dc5cb0a9dc - .octa 0x5cb0a9dc5cb0a9dc5cb0a9dc5cb0a9dc - .octa 0x76f988da76f988da76f988da76f988da - .octa 0x76f988da76f988da76f988da76f988da - .octa 0x983e5152983e5152983e5152983e5152 - .octa 0x983e5152983e5152983e5152983e5152 - .octa 0xa831c66da831c66da831c66da831c66d - .octa 0xa831c66da831c66da831c66da831c66d - .octa 0xb00327c8b00327c8b00327c8b00327c8 - .octa 0xb00327c8b00327c8b00327c8b00327c8 - .octa 0xbf597fc7bf597fc7bf597fc7bf597fc7 - .octa 0xbf597fc7bf597fc7bf597fc7bf597fc7 - .octa 0xc6e00bf3c6e00bf3c6e00bf3c6e00bf3 - .octa 0xc6e00bf3c6e00bf3c6e00bf3c6e00bf3 - .octa 0xd5a79147d5a79147d5a79147d5a79147 - .octa 0xd5a79147d5a79147d5a79147d5a79147 - .octa 0x06ca635106ca635106ca635106ca6351 - .octa 0x06ca635106ca635106ca635106ca6351 - .octa 0x14292967142929671429296714292967 - .octa 0x14292967142929671429296714292967 - .octa 0x27b70a8527b70a8527b70a8527b70a85 - .octa 0x27b70a8527b70a8527b70a8527b70a85 - .octa 0x2e1b21382e1b21382e1b21382e1b2138 - .octa 0x2e1b21382e1b21382e1b21382e1b2138 - .octa 0x4d2c6dfc4d2c6dfc4d2c6dfc4d2c6dfc - .octa 0x4d2c6dfc4d2c6dfc4d2c6dfc4d2c6dfc - .octa 0x53380d1353380d1353380d1353380d13 - .octa 0x53380d1353380d1353380d1353380d13 - .octa 0x650a7354650a7354650a7354650a7354 - .octa 0x650a7354650a7354650a7354650a7354 - .octa 0x766a0abb766a0abb766a0abb766a0abb - .octa 0x766a0abb766a0abb766a0abb766a0abb - .octa 0x81c2c92e81c2c92e81c2c92e81c2c92e - .octa 0x81c2c92e81c2c92e81c2c92e81c2c92e - .octa 0x92722c8592722c8592722c8592722c85 - .octa 0x92722c8592722c8592722c8592722c85 - .octa 0xa2bfe8a1a2bfe8a1a2bfe8a1a2bfe8a1 - .octa 0xa2bfe8a1a2bfe8a1a2bfe8a1a2bfe8a1 - .octa 0xa81a664ba81a664ba81a664ba81a664b - .octa 0xa81a664ba81a664ba81a664ba81a664b - .octa 0xc24b8b70c24b8b70c24b8b70c24b8b70 - .octa 0xc24b8b70c24b8b70c24b8b70c24b8b70 - .octa 0xc76c51a3c76c51a3c76c51a3c76c51a3 - .octa 0xc76c51a3c76c51a3c76c51a3c76c51a3 - .octa 0xd192e819d192e819d192e819d192e819 - .octa 0xd192e819d192e819d192e819d192e819 - .octa 0xd6990624d6990624d6990624d6990624 - .octa 0xd6990624d6990624d6990624d6990624 - .octa 0xf40e3585f40e3585f40e3585f40e3585 - .octa 0xf40e3585f40e3585f40e3585f40e3585 - .octa 0x106aa070106aa070106aa070106aa070 - .octa 0x106aa070106aa070106aa070106aa070 - .octa 0x19a4c11619a4c11619a4c11619a4c116 - .octa 0x19a4c11619a4c11619a4c11619a4c116 - .octa 0x1e376c081e376c081e376c081e376c08 - .octa 0x1e376c081e376c081e376c081e376c08 - .octa 0x2748774c2748774c2748774c2748774c - .octa 0x2748774c2748774c2748774c2748774c - .octa 0x34b0bcb534b0bcb534b0bcb534b0bcb5 - .octa 0x34b0bcb534b0bcb534b0bcb534b0bcb5 - .octa 0x391c0cb3391c0cb3391c0cb3391c0cb3 - .octa 0x391c0cb3391c0cb3391c0cb3391c0cb3 - .octa 0x4ed8aa4a4ed8aa4a4ed8aa4a4ed8aa4a - .octa 0x4ed8aa4a4ed8aa4a4ed8aa4a4ed8aa4a - .octa 0x5b9cca4f5b9cca4f5b9cca4f5b9cca4f - .octa 0x5b9cca4f5b9cca4f5b9cca4f5b9cca4f - .octa 0x682e6ff3682e6ff3682e6ff3682e6ff3 - .octa 0x682e6ff3682e6ff3682e6ff3682e6ff3 - .octa 0x748f82ee748f82ee748f82ee748f82ee - .octa 0x748f82ee748f82ee748f82ee748f82ee - .octa 0x78a5636f78a5636f78a5636f78a5636f - .octa 0x78a5636f78a5636f78a5636f78a5636f - .octa 0x84c8781484c8781484c8781484c87814 - .octa 0x84c8781484c8781484c8781484c87814 - .octa 0x8cc702088cc702088cc702088cc70208 - .octa 0x8cc702088cc702088cc702088cc70208 - .octa 0x90befffa90befffa90befffa90befffa - .octa 0x90befffa90befffa90befffa90befffa - .octa 0xa4506ceba4506ceba4506ceba4506ceb - .octa 0xa4506ceba4506ceba4506ceba4506ceb - .octa 0xbef9a3f7bef9a3f7bef9a3f7bef9a3f7 - .octa 0xbef9a3f7bef9a3f7bef9a3f7bef9a3f7 - .octa 0xc67178f2c67178f2c67178f2c67178f2 - .octa 0xc67178f2c67178f2c67178f2c67178f2 - -.section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32 -.align 32 -PSHUFFLE_BYTE_FLIP_MASK: -.octa 0x0c0d0e0f08090a0b0405060700010203 -.octa 0x0c0d0e0f08090a0b0405060700010203 - -.section .rodata.cst256.K256, "aM", @progbits, 256 -.align 64 -.global K256 -K256: - .int 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 - .int 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 - .int 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 - .int 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 - .int 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc - .int 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da - .int 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 - .int 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 - .int 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 - .int 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 - .int 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 - .int 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 - .int 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 - .int 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 - .int 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 - .int 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 diff --git a/arch/x86/crypto/sha512-mb/Makefile b/arch/x86/crypto/sha512-mb/Makefile deleted file mode 100644 index 90f1ef69152ebe36064aa92d926a4f2b147d56b9..0000000000000000000000000000000000000000 --- a/arch/x86/crypto/sha512-mb/Makefile +++ /dev/null @@ -1,12 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# -# Arch-specific CryptoAPI modules. -# - -avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\ - $(comma)4)$(comma)%ymm2,yes,no) -ifeq ($(avx2_supported),yes) - obj-$(CONFIG_CRYPTO_SHA512_MB) += sha512-mb.o - sha512-mb-y := sha512_mb.o sha512_mb_mgr_flush_avx2.o \ - sha512_mb_mgr_init_avx2.o sha512_mb_mgr_submit_avx2.o sha512_x4_avx2.o -endif diff --git a/arch/x86/crypto/sha512-mb/sha512_mb.c b/arch/x86/crypto/sha512-mb/sha512_mb.c deleted file mode 100644 index 26b85678012d0f03a361c102e8444b916ff5f41b..0000000000000000000000000000000000000000 --- a/arch/x86/crypto/sha512-mb/sha512_mb.c +++ /dev/null @@ -1,1047 +0,0 @@ -/* - * Multi buffer SHA512 algorithm Glue Code - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Megha Dey - * - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "sha512_mb_ctx.h" - -#define FLUSH_INTERVAL 1000 /* in usec */ - -static struct mcryptd_alg_state sha512_mb_alg_state; - -struct sha512_mb_ctx { - struct mcryptd_ahash *mcryptd_tfm; -}; - -static inline struct mcryptd_hash_request_ctx - *cast_hash_to_mcryptd_ctx(struct sha512_hash_ctx *hash_ctx) -{ - struct ahash_request *areq; - - areq = container_of((void *) hash_ctx, struct ahash_request, __ctx); - return container_of(areq, struct mcryptd_hash_request_ctx, areq); -} - -static inline struct ahash_request - *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx) -{ - return container_of((void *) ctx, struct ahash_request, __ctx); -} - -static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx, - struct ahash_request *areq) -{ - rctx->flag = HASH_UPDATE; -} - -static asmlinkage void (*sha512_job_mgr_init)(struct sha512_mb_mgr *state); -static asmlinkage struct job_sha512* (*sha512_job_mgr_submit) - (struct sha512_mb_mgr *state, - struct job_sha512 *job); -static asmlinkage struct job_sha512* (*sha512_job_mgr_flush) - (struct sha512_mb_mgr *state); -static asmlinkage struct job_sha512* (*sha512_job_mgr_get_comp_job) - (struct sha512_mb_mgr *state); - -inline uint32_t sha512_pad(uint8_t padblock[SHA512_BLOCK_SIZE * 2], - uint64_t total_len) -{ - uint32_t i = total_len & (SHA512_BLOCK_SIZE - 1); - - memset(&padblock[i], 0, SHA512_BLOCK_SIZE); - padblock[i] = 0x80; - - i += ((SHA512_BLOCK_SIZE - 1) & - (0 - (total_len + SHA512_PADLENGTHFIELD_SIZE + 1))) - + 1 + SHA512_PADLENGTHFIELD_SIZE; - -#if SHA512_PADLENGTHFIELD_SIZE == 16 - *((uint64_t *) &padblock[i - 16]) = 0; -#endif - - *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3); - - /* Number of extra blocks to hash */ - return i >> SHA512_LOG2_BLOCK_SIZE; -} - -static struct sha512_hash_ctx *sha512_ctx_mgr_resubmit - (struct sha512_ctx_mgr *mgr, struct sha512_hash_ctx *ctx) -{ - while (ctx) { - if (ctx->status & HASH_CTX_STS_COMPLETE) { - /* Clear PROCESSING bit */ - ctx->status = HASH_CTX_STS_COMPLETE; - return ctx; - } - - /* - * If the extra blocks are empty, begin hashing what remains - * in the user's buffer. - */ - if (ctx->partial_block_buffer_length == 0 && - ctx->incoming_buffer_length) { - - const void *buffer = ctx->incoming_buffer; - uint32_t len = ctx->incoming_buffer_length; - uint32_t copy_len; - - /* - * Only entire blocks can be hashed. - * Copy remainder to extra blocks buffer. - */ - copy_len = len & (SHA512_BLOCK_SIZE-1); - - if (copy_len) { - len -= copy_len; - memcpy(ctx->partial_block_buffer, - ((const char *) buffer + len), - copy_len); - ctx->partial_block_buffer_length = copy_len; - } - - ctx->incoming_buffer_length = 0; - - /* len should be a multiple of the block size now */ - assert((len % SHA512_BLOCK_SIZE) == 0); - - /* Set len to the number of blocks to be hashed */ - len >>= SHA512_LOG2_BLOCK_SIZE; - - if (len) { - - ctx->job.buffer = (uint8_t *) buffer; - ctx->job.len = len; - ctx = (struct sha512_hash_ctx *) - sha512_job_mgr_submit(&mgr->mgr, - &ctx->job); - continue; - } - } - - /* - * If the extra blocks are not empty, then we are - * either on the last block(s) or we need more - * user input before continuing. - */ - if (ctx->status & HASH_CTX_STS_LAST) { - - uint8_t *buf = ctx->partial_block_buffer; - uint32_t n_extra_blocks = - sha512_pad(buf, ctx->total_length); - - ctx->status = (HASH_CTX_STS_PROCESSING | - HASH_CTX_STS_COMPLETE); - ctx->job.buffer = buf; - ctx->job.len = (uint32_t) n_extra_blocks; - ctx = (struct sha512_hash_ctx *) - sha512_job_mgr_submit(&mgr->mgr, &ctx->job); - continue; - } - - if (ctx) - ctx->status = HASH_CTX_STS_IDLE; - return ctx; - } - - return NULL; -} - -static struct sha512_hash_ctx - *sha512_ctx_mgr_get_comp_ctx(struct mcryptd_alg_cstate *cstate) -{ - /* - * If get_comp_job returns NULL, there are no jobs complete. - * If get_comp_job returns a job, verify that it is safe to return to - * the user. - * If it is not ready, resubmit the job to finish processing. - * If sha512_ctx_mgr_resubmit returned a job, it is ready to be - * returned. - * Otherwise, all jobs currently being managed by the hash_ctx_mgr - * still need processing. - */ - struct sha512_ctx_mgr *mgr; - struct sha512_hash_ctx *ctx; - unsigned long flags; - - mgr = cstate->mgr; - spin_lock_irqsave(&cstate->work_lock, flags); - ctx = (struct sha512_hash_ctx *) - sha512_job_mgr_get_comp_job(&mgr->mgr); - ctx = sha512_ctx_mgr_resubmit(mgr, ctx); - spin_unlock_irqrestore(&cstate->work_lock, flags); - return ctx; -} - -static void sha512_ctx_mgr_init(struct sha512_ctx_mgr *mgr) -{ - sha512_job_mgr_init(&mgr->mgr); -} - -static struct sha512_hash_ctx - *sha512_ctx_mgr_submit(struct mcryptd_alg_cstate *cstate, - struct sha512_hash_ctx *ctx, - const void *buffer, - uint32_t len, - int flags) -{ - struct sha512_ctx_mgr *mgr; - unsigned long irqflags; - - mgr = cstate->mgr; - spin_lock_irqsave(&cstate->work_lock, irqflags); - if (flags & ~(HASH_UPDATE | HASH_LAST)) { - /* User should not pass anything other than UPDATE or LAST */ - ctx->error = HASH_CTX_ERROR_INVALID_FLAGS; - goto unlock; - } - - if (ctx->status & HASH_CTX_STS_PROCESSING) { - /* Cannot submit to a currently processing job. */ - ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING; - goto unlock; - } - - if (ctx->status & HASH_CTX_STS_COMPLETE) { - /* Cannot update a finished job. */ - ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED; - goto unlock; - } - - /* - * If we made it here, there were no errors during this call to - * submit - */ - ctx->error = HASH_CTX_ERROR_NONE; - - /* Store buffer ptr info from user */ - ctx->incoming_buffer = buffer; - ctx->incoming_buffer_length = len; - - /* - * Store the user's request flags and mark this ctx as currently being - * processed. - */ - ctx->status = (flags & HASH_LAST) ? - (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) : - HASH_CTX_STS_PROCESSING; - - /* Advance byte counter */ - ctx->total_length += len; - - /* - * If there is anything currently buffered in the extra blocks, - * append to it until it contains a whole block. - * Or if the user's buffer contains less than a whole block, - * append as much as possible to the extra block. - */ - if (ctx->partial_block_buffer_length || len < SHA512_BLOCK_SIZE) { - /* Compute how many bytes to copy from user buffer into extra - * block - */ - uint32_t copy_len = SHA512_BLOCK_SIZE - - ctx->partial_block_buffer_length; - if (len < copy_len) - copy_len = len; - - if (copy_len) { - /* Copy and update relevant pointers and counters */ - memcpy - (&ctx->partial_block_buffer[ctx->partial_block_buffer_length], - buffer, copy_len); - - ctx->partial_block_buffer_length += copy_len; - ctx->incoming_buffer = (const void *) - ((const char *)buffer + copy_len); - ctx->incoming_buffer_length = len - copy_len; - } - - /* The extra block should never contain more than 1 block - * here - */ - assert(ctx->partial_block_buffer_length <= SHA512_BLOCK_SIZE); - - /* If the extra block buffer contains exactly 1 block, it can - * be hashed. - */ - if (ctx->partial_block_buffer_length >= SHA512_BLOCK_SIZE) { - ctx->partial_block_buffer_length = 0; - - ctx->job.buffer = ctx->partial_block_buffer; - ctx->job.len = 1; - ctx = (struct sha512_hash_ctx *) - sha512_job_mgr_submit(&mgr->mgr, &ctx->job); - } - } - - ctx = sha512_ctx_mgr_resubmit(mgr, ctx); -unlock: - spin_unlock_irqrestore(&cstate->work_lock, irqflags); - return ctx; -} - -static struct sha512_hash_ctx *sha512_ctx_mgr_flush(struct mcryptd_alg_cstate *cstate) -{ - struct sha512_ctx_mgr *mgr; - struct sha512_hash_ctx *ctx; - unsigned long flags; - - mgr = cstate->mgr; - spin_lock_irqsave(&cstate->work_lock, flags); - while (1) { - ctx = (struct sha512_hash_ctx *) - sha512_job_mgr_flush(&mgr->mgr); - - /* If flush returned 0, there are no more jobs in flight. */ - if (!ctx) - break; - - /* - * If flush returned a job, resubmit the job to finish - * processing. - */ - ctx = sha512_ctx_mgr_resubmit(mgr, ctx); - - /* - * If sha512_ctx_mgr_resubmit returned a job, it is ready to - * be returned. Otherwise, all jobs currently being managed by - * the sha512_ctx_mgr still need processing. Loop. - */ - if (ctx) - break; - } - spin_unlock_irqrestore(&cstate->work_lock, flags); - return ctx; -} - -static int sha512_mb_init(struct ahash_request *areq) -{ - struct sha512_hash_ctx *sctx = ahash_request_ctx(areq); - - hash_ctx_init(sctx); - sctx->job.result_digest[0] = SHA512_H0; - sctx->job.result_digest[1] = SHA512_H1; - sctx->job.result_digest[2] = SHA512_H2; - sctx->job.result_digest[3] = SHA512_H3; - sctx->job.result_digest[4] = SHA512_H4; - sctx->job.result_digest[5] = SHA512_H5; - sctx->job.result_digest[6] = SHA512_H6; - sctx->job.result_digest[7] = SHA512_H7; - sctx->total_length = 0; - sctx->partial_block_buffer_length = 0; - sctx->status = HASH_CTX_STS_IDLE; - - return 0; -} - -static int sha512_mb_set_results(struct mcryptd_hash_request_ctx *rctx) -{ - int i; - struct sha512_hash_ctx *sctx = ahash_request_ctx(&rctx->areq); - __be64 *dst = (__be64 *) rctx->out; - - for (i = 0; i < 8; ++i) - dst[i] = cpu_to_be64(sctx->job.result_digest[i]); - - return 0; -} - -static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx, - struct mcryptd_alg_cstate *cstate, bool flush) -{ - int flag = HASH_UPDATE; - int nbytes, err = 0; - struct mcryptd_hash_request_ctx *rctx = *ret_rctx; - struct sha512_hash_ctx *sha_ctx; - - /* more work ? */ - while (!(rctx->flag & HASH_DONE)) { - nbytes = crypto_ahash_walk_done(&rctx->walk, 0); - if (nbytes < 0) { - err = nbytes; - goto out; - } - /* check if the walk is done */ - if (crypto_ahash_walk_last(&rctx->walk)) { - rctx->flag |= HASH_DONE; - if (rctx->flag & HASH_FINAL) - flag |= HASH_LAST; - - } - sha_ctx = (struct sha512_hash_ctx *) - ahash_request_ctx(&rctx->areq); - kernel_fpu_begin(); - sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx, - rctx->walk.data, nbytes, flag); - if (!sha_ctx) { - if (flush) - sha_ctx = sha512_ctx_mgr_flush(cstate); - } - kernel_fpu_end(); - if (sha_ctx) - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - else { - rctx = NULL; - goto out; - } - } - - /* copy the results */ - if (rctx->flag & HASH_FINAL) - sha512_mb_set_results(rctx); - -out: - *ret_rctx = rctx; - return err; -} - -static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx, - struct mcryptd_alg_cstate *cstate, - int err) -{ - struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx); - struct sha512_hash_ctx *sha_ctx; - struct mcryptd_hash_request_ctx *req_ctx; - int ret; - unsigned long flags; - - /* remove from work list */ - spin_lock_irqsave(&cstate->work_lock, flags); - list_del(&rctx->waiter); - spin_unlock_irqrestore(&cstate->work_lock, flags); - - if (irqs_disabled()) - rctx->complete(&req->base, err); - else { - local_bh_disable(); - rctx->complete(&req->base, err); - local_bh_enable(); - } - - /* check to see if there are other jobs that are done */ - sha_ctx = sha512_ctx_mgr_get_comp_ctx(cstate); - while (sha_ctx) { - req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx); - ret = sha_finish_walk(&req_ctx, cstate, false); - if (req_ctx) { - spin_lock_irqsave(&cstate->work_lock, flags); - list_del(&req_ctx->waiter); - spin_unlock_irqrestore(&cstate->work_lock, flags); - - req = cast_mcryptd_ctx_to_req(req_ctx); - if (irqs_disabled()) - req_ctx->complete(&req->base, ret); - else { - local_bh_disable(); - req_ctx->complete(&req->base, ret); - local_bh_enable(); - } - } - sha_ctx = sha512_ctx_mgr_get_comp_ctx(cstate); - } - - return 0; -} - -static void sha512_mb_add_list(struct mcryptd_hash_request_ctx *rctx, - struct mcryptd_alg_cstate *cstate) -{ - unsigned long next_flush; - unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL); - unsigned long flags; - - /* initialize tag */ - rctx->tag.arrival = jiffies; /* tag the arrival time */ - rctx->tag.seq_num = cstate->next_seq_num++; - next_flush = rctx->tag.arrival + delay; - rctx->tag.expire = next_flush; - - spin_lock_irqsave(&cstate->work_lock, flags); - list_add_tail(&rctx->waiter, &cstate->work_list); - spin_unlock_irqrestore(&cstate->work_lock, flags); - - mcryptd_arm_flusher(cstate, delay); -} - -static int sha512_mb_update(struct ahash_request *areq) -{ - struct mcryptd_hash_request_ctx *rctx = - container_of(areq, struct mcryptd_hash_request_ctx, - areq); - struct mcryptd_alg_cstate *cstate = - this_cpu_ptr(sha512_mb_alg_state.alg_cstate); - - struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx); - struct sha512_hash_ctx *sha_ctx; - int ret = 0, nbytes; - - - /* sanity check */ - if (rctx->tag.cpu != smp_processor_id()) { - pr_err("mcryptd error: cpu clash\n"); - goto done; - } - - /* need to init context */ - req_ctx_init(rctx, areq); - - nbytes = crypto_ahash_walk_first(req, &rctx->walk); - - if (nbytes < 0) { - ret = nbytes; - goto done; - } - - if (crypto_ahash_walk_last(&rctx->walk)) - rctx->flag |= HASH_DONE; - - /* submit */ - sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq); - sha512_mb_add_list(rctx, cstate); - kernel_fpu_begin(); - sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx, rctx->walk.data, - nbytes, HASH_UPDATE); - kernel_fpu_end(); - - /* check if anything is returned */ - if (!sha_ctx) - return -EINPROGRESS; - - if (sha_ctx->error) { - ret = sha_ctx->error; - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - goto done; - } - - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - ret = sha_finish_walk(&rctx, cstate, false); - - if (!rctx) - return -EINPROGRESS; -done: - sha_complete_job(rctx, cstate, ret); - return ret; -} - -static int sha512_mb_finup(struct ahash_request *areq) -{ - struct mcryptd_hash_request_ctx *rctx = - container_of(areq, struct mcryptd_hash_request_ctx, - areq); - struct mcryptd_alg_cstate *cstate = - this_cpu_ptr(sha512_mb_alg_state.alg_cstate); - - struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx); - struct sha512_hash_ctx *sha_ctx; - int ret = 0, flag = HASH_UPDATE, nbytes; - - /* sanity check */ - if (rctx->tag.cpu != smp_processor_id()) { - pr_err("mcryptd error: cpu clash\n"); - goto done; - } - - /* need to init context */ - req_ctx_init(rctx, areq); - - nbytes = crypto_ahash_walk_first(req, &rctx->walk); - - if (nbytes < 0) { - ret = nbytes; - goto done; - } - - if (crypto_ahash_walk_last(&rctx->walk)) { - rctx->flag |= HASH_DONE; - flag = HASH_LAST; - } - - /* submit */ - rctx->flag |= HASH_FINAL; - sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq); - sha512_mb_add_list(rctx, cstate); - - kernel_fpu_begin(); - sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx, rctx->walk.data, - nbytes, flag); - kernel_fpu_end(); - - /* check if anything is returned */ - if (!sha_ctx) - return -EINPROGRESS; - - if (sha_ctx->error) { - ret = sha_ctx->error; - goto done; - } - - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - ret = sha_finish_walk(&rctx, cstate, false); - if (!rctx) - return -EINPROGRESS; -done: - sha_complete_job(rctx, cstate, ret); - return ret; -} - -static int sha512_mb_final(struct ahash_request *areq) -{ - struct mcryptd_hash_request_ctx *rctx = - container_of(areq, struct mcryptd_hash_request_ctx, - areq); - struct mcryptd_alg_cstate *cstate = - this_cpu_ptr(sha512_mb_alg_state.alg_cstate); - - struct sha512_hash_ctx *sha_ctx; - int ret = 0; - u8 data; - - /* sanity check */ - if (rctx->tag.cpu != smp_processor_id()) { - pr_err("mcryptd error: cpu clash\n"); - goto done; - } - - /* need to init context */ - req_ctx_init(rctx, areq); - - rctx->flag |= HASH_DONE | HASH_FINAL; - - sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq); - /* flag HASH_FINAL and 0 data size */ - sha512_mb_add_list(rctx, cstate); - kernel_fpu_begin(); - sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx, &data, 0, HASH_LAST); - kernel_fpu_end(); - - /* check if anything is returned */ - if (!sha_ctx) - return -EINPROGRESS; - - if (sha_ctx->error) { - ret = sha_ctx->error; - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - goto done; - } - - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - ret = sha_finish_walk(&rctx, cstate, false); - if (!rctx) - return -EINPROGRESS; -done: - sha_complete_job(rctx, cstate, ret); - return ret; -} - -static int sha512_mb_export(struct ahash_request *areq, void *out) -{ - struct sha512_hash_ctx *sctx = ahash_request_ctx(areq); - - memcpy(out, sctx, sizeof(*sctx)); - - return 0; -} - -static int sha512_mb_import(struct ahash_request *areq, const void *in) -{ - struct sha512_hash_ctx *sctx = ahash_request_ctx(areq); - - memcpy(sctx, in, sizeof(*sctx)); - - return 0; -} - -static int sha512_mb_async_init_tfm(struct crypto_tfm *tfm) -{ - struct mcryptd_ahash *mcryptd_tfm; - struct sha512_mb_ctx *ctx = crypto_tfm_ctx(tfm); - struct mcryptd_hash_ctx *mctx; - - mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha512-mb", - CRYPTO_ALG_INTERNAL, - CRYPTO_ALG_INTERNAL); - if (IS_ERR(mcryptd_tfm)) - return PTR_ERR(mcryptd_tfm); - mctx = crypto_ahash_ctx(&mcryptd_tfm->base); - mctx->alg_state = &sha512_mb_alg_state; - ctx->mcryptd_tfm = mcryptd_tfm; - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct ahash_request) + - crypto_ahash_reqsize(&mcryptd_tfm->base)); - - return 0; -} - -static void sha512_mb_async_exit_tfm(struct crypto_tfm *tfm) -{ - struct sha512_mb_ctx *ctx = crypto_tfm_ctx(tfm); - - mcryptd_free_ahash(ctx->mcryptd_tfm); -} - -static int sha512_mb_areq_init_tfm(struct crypto_tfm *tfm) -{ - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct ahash_request) + - sizeof(struct sha512_hash_ctx)); - - return 0; -} - -static void sha512_mb_areq_exit_tfm(struct crypto_tfm *tfm) -{ - struct sha512_mb_ctx *ctx = crypto_tfm_ctx(tfm); - - mcryptd_free_ahash(ctx->mcryptd_tfm); -} - -static struct ahash_alg sha512_mb_areq_alg = { - .init = sha512_mb_init, - .update = sha512_mb_update, - .final = sha512_mb_final, - .finup = sha512_mb_finup, - .export = sha512_mb_export, - .import = sha512_mb_import, - .halg = { - .digestsize = SHA512_DIGEST_SIZE, - .statesize = sizeof(struct sha512_hash_ctx), - .base = { - .cra_name = "__sha512-mb", - .cra_driver_name = "__intel_sha512-mb", - .cra_priority = 100, - /* - * use ASYNC flag as some buffers in multi-buffer - * algo may not have completed before hashing thread - * sleep - */ - .cra_flags = CRYPTO_ALG_ASYNC | - CRYPTO_ALG_INTERNAL, - .cra_blocksize = SHA512_BLOCK_SIZE, - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT - (sha512_mb_areq_alg.halg.base.cra_list), - .cra_init = sha512_mb_areq_init_tfm, - .cra_exit = sha512_mb_areq_exit_tfm, - .cra_ctxsize = sizeof(struct sha512_hash_ctx), - } - } -}; - -static int sha512_mb_async_init(struct ahash_request *req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - return crypto_ahash_init(mcryptd_req); -} - -static int sha512_mb_async_update(struct ahash_request *req) -{ - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - return crypto_ahash_update(mcryptd_req); -} - -static int sha512_mb_async_finup(struct ahash_request *req) -{ - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - return crypto_ahash_finup(mcryptd_req); -} - -static int sha512_mb_async_final(struct ahash_request *req) -{ - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - return crypto_ahash_final(mcryptd_req); -} - -static int sha512_mb_async_digest(struct ahash_request *req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - return crypto_ahash_digest(mcryptd_req); -} - -static int sha512_mb_async_export(struct ahash_request *req, void *out) -{ - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - return crypto_ahash_export(mcryptd_req, out); -} - -static int sha512_mb_async_import(struct ahash_request *req, const void *in) -{ - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm); - struct mcryptd_hash_request_ctx *rctx; - struct ahash_request *areq; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - rctx = ahash_request_ctx(mcryptd_req); - - areq = &rctx->areq; - - ahash_request_set_tfm(areq, child); - ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP, - rctx->complete, req); - - return crypto_ahash_import(mcryptd_req, in); -} - -static struct ahash_alg sha512_mb_async_alg = { - .init = sha512_mb_async_init, - .update = sha512_mb_async_update, - .final = sha512_mb_async_final, - .finup = sha512_mb_async_finup, - .digest = sha512_mb_async_digest, - .export = sha512_mb_async_export, - .import = sha512_mb_async_import, - .halg = { - .digestsize = SHA512_DIGEST_SIZE, - .statesize = sizeof(struct sha512_hash_ctx), - .base = { - .cra_name = "sha512", - .cra_driver_name = "sha512_mb", - /* - * Low priority, since with few concurrent hash requests - * this is extremely slow due to the flush delay. Users - * whose workloads would benefit from this can request - * it explicitly by driver name, or can increase its - * priority at runtime using NETLINK_CRYPTO. - */ - .cra_priority = 50, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA512_BLOCK_SIZE, - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT - (sha512_mb_async_alg.halg.base.cra_list), - .cra_init = sha512_mb_async_init_tfm, - .cra_exit = sha512_mb_async_exit_tfm, - .cra_ctxsize = sizeof(struct sha512_mb_ctx), - .cra_alignmask = 0, - }, - }, -}; - -static unsigned long sha512_mb_flusher(struct mcryptd_alg_cstate *cstate) -{ - struct mcryptd_hash_request_ctx *rctx; - unsigned long cur_time; - unsigned long next_flush = 0; - struct sha512_hash_ctx *sha_ctx; - - - cur_time = jiffies; - - while (!list_empty(&cstate->work_list)) { - rctx = list_entry(cstate->work_list.next, - struct mcryptd_hash_request_ctx, waiter); - if time_before(cur_time, rctx->tag.expire) - break; - kernel_fpu_begin(); - sha_ctx = (struct sha512_hash_ctx *) - sha512_ctx_mgr_flush(cstate); - kernel_fpu_end(); - if (!sha_ctx) { - pr_err("sha512_mb error: nothing got flushed for" - " non-empty list\n"); - break; - } - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - sha_finish_walk(&rctx, cstate, true); - sha_complete_job(rctx, cstate, 0); - } - - if (!list_empty(&cstate->work_list)) { - rctx = list_entry(cstate->work_list.next, - struct mcryptd_hash_request_ctx, waiter); - /* get the hash context and then flush time */ - next_flush = rctx->tag.expire; - mcryptd_arm_flusher(cstate, get_delay(next_flush)); - } - return next_flush; -} - -static int __init sha512_mb_mod_init(void) -{ - - int cpu; - int err; - struct mcryptd_alg_cstate *cpu_state; - - /* check for dependent cpu features */ - if (!boot_cpu_has(X86_FEATURE_AVX2) || - !boot_cpu_has(X86_FEATURE_BMI2)) - return -ENODEV; - - /* initialize multibuffer structures */ - sha512_mb_alg_state.alg_cstate = - alloc_percpu(struct mcryptd_alg_cstate); - - sha512_job_mgr_init = sha512_mb_mgr_init_avx2; - sha512_job_mgr_submit = sha512_mb_mgr_submit_avx2; - sha512_job_mgr_flush = sha512_mb_mgr_flush_avx2; - sha512_job_mgr_get_comp_job = sha512_mb_mgr_get_comp_job_avx2; - - if (!sha512_mb_alg_state.alg_cstate) - return -ENOMEM; - for_each_possible_cpu(cpu) { - cpu_state = per_cpu_ptr(sha512_mb_alg_state.alg_cstate, cpu); - cpu_state->next_flush = 0; - cpu_state->next_seq_num = 0; - cpu_state->flusher_engaged = false; - INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher); - cpu_state->cpu = cpu; - cpu_state->alg_state = &sha512_mb_alg_state; - cpu_state->mgr = kzalloc(sizeof(struct sha512_ctx_mgr), - GFP_KERNEL); - if (!cpu_state->mgr) - goto err2; - sha512_ctx_mgr_init(cpu_state->mgr); - INIT_LIST_HEAD(&cpu_state->work_list); - spin_lock_init(&cpu_state->work_lock); - } - sha512_mb_alg_state.flusher = &sha512_mb_flusher; - - err = crypto_register_ahash(&sha512_mb_areq_alg); - if (err) - goto err2; - err = crypto_register_ahash(&sha512_mb_async_alg); - if (err) - goto err1; - - - return 0; -err1: - crypto_unregister_ahash(&sha512_mb_areq_alg); -err2: - for_each_possible_cpu(cpu) { - cpu_state = per_cpu_ptr(sha512_mb_alg_state.alg_cstate, cpu); - kfree(cpu_state->mgr); - } - free_percpu(sha512_mb_alg_state.alg_cstate); - return -ENODEV; -} - -static void __exit sha512_mb_mod_fini(void) -{ - int cpu; - struct mcryptd_alg_cstate *cpu_state; - - crypto_unregister_ahash(&sha512_mb_async_alg); - crypto_unregister_ahash(&sha512_mb_areq_alg); - for_each_possible_cpu(cpu) { - cpu_state = per_cpu_ptr(sha512_mb_alg_state.alg_cstate, cpu); - kfree(cpu_state->mgr); - } - free_percpu(sha512_mb_alg_state.alg_cstate); -} - -module_init(sha512_mb_mod_init); -module_exit(sha512_mb_mod_fini); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, multi buffer accelerated"); - -MODULE_ALIAS("sha512"); diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h b/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h deleted file mode 100644 index e5c465bd821ed8a0228d6ca973a3a921a819058a..0000000000000000000000000000000000000000 --- a/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Header file for multi buffer SHA512 context - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Megha Dey - * - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _SHA_MB_CTX_INTERNAL_H -#define _SHA_MB_CTX_INTERNAL_H - -#include "sha512_mb_mgr.h" - -#define HASH_UPDATE 0x00 -#define HASH_LAST 0x01 -#define HASH_DONE 0x02 -#define HASH_FINAL 0x04 - -#define HASH_CTX_STS_IDLE 0x00 -#define HASH_CTX_STS_PROCESSING 0x01 -#define HASH_CTX_STS_LAST 0x02 -#define HASH_CTX_STS_COMPLETE 0x04 - -enum hash_ctx_error { - HASH_CTX_ERROR_NONE = 0, - HASH_CTX_ERROR_INVALID_FLAGS = -1, - HASH_CTX_ERROR_ALREADY_PROCESSING = -2, - HASH_CTX_ERROR_ALREADY_COMPLETED = -3, -}; - -#define hash_ctx_user_data(ctx) ((ctx)->user_data) -#define hash_ctx_digest(ctx) ((ctx)->job.result_digest) -#define hash_ctx_processing(ctx) ((ctx)->status & HASH_CTX_STS_PROCESSING) -#define hash_ctx_complete(ctx) ((ctx)->status == HASH_CTX_STS_COMPLETE) -#define hash_ctx_status(ctx) ((ctx)->status) -#define hash_ctx_error(ctx) ((ctx)->error) -#define hash_ctx_init(ctx) \ - do { \ - (ctx)->error = HASH_CTX_ERROR_NONE; \ - (ctx)->status = HASH_CTX_STS_COMPLETE; \ - } while (0) - -/* Hash Constants and Typedefs */ -#define SHA512_DIGEST_LENGTH 8 -#define SHA512_LOG2_BLOCK_SIZE 7 - -#define SHA512_PADLENGTHFIELD_SIZE 16 - -#ifdef SHA_MB_DEBUG -#define assert(expr) \ -do { \ - if (unlikely(!(expr))) { \ - printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \ - #expr, __FILE__, __func__, __LINE__); \ - } \ -} while (0) -#else -#define assert(expr) do {} while (0) -#endif - -struct sha512_ctx_mgr { - struct sha512_mb_mgr mgr; -}; - -/* typedef struct sha512_ctx_mgr sha512_ctx_mgr; */ - -struct sha512_hash_ctx { - /* Must be at struct offset 0 */ - struct job_sha512 job; - /* status flag */ - int status; - /* error flag */ - int error; - - uint64_t total_length; - const void *incoming_buffer; - uint32_t incoming_buffer_length; - uint8_t partial_block_buffer[SHA512_BLOCK_SIZE * 2]; - uint32_t partial_block_buffer_length; - void *user_data; -}; - -#endif diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr.h b/arch/x86/crypto/sha512-mb/sha512_mb_mgr.h deleted file mode 100644 index 178f17eef3825af0b98a3bf59c55c3dc16e32616..0000000000000000000000000000000000000000 --- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr.h +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Header file for multi buffer SHA512 algorithm manager - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Megha Dey - * - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __SHA_MB_MGR_H -#define __SHA_MB_MGR_H - -#include - -#define NUM_SHA512_DIGEST_WORDS 8 - -enum job_sts {STS_UNKNOWN = 0, - STS_BEING_PROCESSED = 1, - STS_COMPLETED = 2, - STS_INTERNAL_ERROR = 3, - STS_ERROR = 4 -}; - -struct job_sha512 { - u8 *buffer; - u64 len; - u64 result_digest[NUM_SHA512_DIGEST_WORDS] __aligned(32); - enum job_sts status; - void *user_data; -}; - -struct sha512_args_x4 { - uint64_t digest[8][4]; - uint8_t *data_ptr[4]; -}; - -struct sha512_lane_data { - struct job_sha512 *job_in_lane; -}; - -struct sha512_mb_mgr { - struct sha512_args_x4 args; - - uint64_t lens[4]; - - /* each byte is index (0...7) of unused lanes */ - uint64_t unused_lanes; - /* byte 4 is set to FF as a flag */ - struct sha512_lane_data ldata[4]; -}; - -#define SHA512_MB_MGR_NUM_LANES_AVX2 4 - -void sha512_mb_mgr_init_avx2(struct sha512_mb_mgr *state); -struct job_sha512 *sha512_mb_mgr_submit_avx2(struct sha512_mb_mgr *state, - struct job_sha512 *job); -struct job_sha512 *sha512_mb_mgr_flush_avx2(struct sha512_mb_mgr *state); -struct job_sha512 *sha512_mb_mgr_get_comp_job_avx2(struct sha512_mb_mgr *state); - -#endif diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_datastruct.S b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_datastruct.S deleted file mode 100644 index cf2636d4c9ba97aab011461dc065d2da8aed86a9..0000000000000000000000000000000000000000 --- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_datastruct.S +++ /dev/null @@ -1,281 +0,0 @@ -/* - * Header file for multi buffer SHA256 algorithm data structure - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Megha Dey - * - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -# Macros for defining data structures - -# Usage example - -#START_FIELDS # JOB_AES -### name size align -#FIELD _plaintext, 8, 8 # pointer to plaintext -#FIELD _ciphertext, 8, 8 # pointer to ciphertext -#FIELD _IV, 16, 8 # IV -#FIELD _keys, 8, 8 # pointer to keys -#FIELD _len, 4, 4 # length in bytes -#FIELD _status, 4, 4 # status enumeration -#FIELD _user_data, 8, 8 # pointer to user data -#UNION _union, size1, align1, \ -# size2, align2, \ -# size3, align3, \ -# ... -#END_FIELDS -#%assign _JOB_AES_size _FIELD_OFFSET -#%assign _JOB_AES_align _STRUCT_ALIGN - -######################################################################### - -# Alternate "struc-like" syntax: -# STRUCT job_aes2 -# RES_Q .plaintext, 1 -# RES_Q .ciphertext, 1 -# RES_DQ .IV, 1 -# RES_B .nested, _JOB_AES_SIZE, _JOB_AES_ALIGN -# RES_U .union, size1, align1, \ -# size2, align2, \ -# ... -# ENDSTRUCT -# # Following only needed if nesting -# %assign job_aes2_size _FIELD_OFFSET -# %assign job_aes2_align _STRUCT_ALIGN -# -# RES_* macros take a name, a count and an optional alignment. -# The count in in terms of the base size of the macro, and the -# default alignment is the base size. -# The macros are: -# Macro Base size -# RES_B 1 -# RES_W 2 -# RES_D 4 -# RES_Q 8 -# RES_DQ 16 -# RES_Y 32 -# RES_Z 64 -# -# RES_U defines a union. It's arguments are a name and two or more -# pairs of "size, alignment" -# -# The two assigns are only needed if this structure is being nested -# within another. Even if the assigns are not done, one can still use -# STRUCT_NAME_size as the size of the structure. -# -# Note that for nesting, you still need to assign to STRUCT_NAME_size. -# -# The differences between this and using "struc" directly are that each -# type is implicitly aligned to its natural length (although this can be -# over-ridden with an explicit third parameter), and that the structure -# is padded at the end to its overall alignment. -# - -######################################################################### - -#ifndef _DATASTRUCT_ASM_ -#define _DATASTRUCT_ASM_ - -#define PTR_SZ 8 -#define SHA512_DIGEST_WORD_SIZE 8 -#define SHA512_MB_MGR_NUM_LANES_AVX2 4 -#define NUM_SHA512_DIGEST_WORDS 8 -#define SZ4 4*SHA512_DIGEST_WORD_SIZE -#define ROUNDS 80*SZ4 -#define SHA512_DIGEST_ROW_SIZE (SHA512_MB_MGR_NUM_LANES_AVX2 * 8) - -# START_FIELDS -.macro START_FIELDS - _FIELD_OFFSET = 0 - _STRUCT_ALIGN = 0 -.endm - -# FIELD name size align -.macro FIELD name size align - _FIELD_OFFSET = (_FIELD_OFFSET + (\align) - 1) & (~ ((\align)-1)) - \name = _FIELD_OFFSET - _FIELD_OFFSET = _FIELD_OFFSET + (\size) -.if (\align > _STRUCT_ALIGN) - _STRUCT_ALIGN = \align -.endif -.endm - -# END_FIELDS -.macro END_FIELDS - _FIELD_OFFSET = (_FIELD_OFFSET + _STRUCT_ALIGN-1) & (~ (_STRUCT_ALIGN-1)) -.endm - -.macro STRUCT p1 -START_FIELDS -.struc \p1 -.endm - -.macro ENDSTRUCT - tmp = _FIELD_OFFSET - END_FIELDS - tmp = (_FIELD_OFFSET - ##tmp) -.if (tmp > 0) - .lcomm tmp -.endm - -## RES_int name size align -.macro RES_int p1 p2 p3 - name = \p1 - size = \p2 - align = .\p3 - - _FIELD_OFFSET = (_FIELD_OFFSET + (align) - 1) & (~ ((align)-1)) -.align align -.lcomm name size - _FIELD_OFFSET = _FIELD_OFFSET + (size) -.if (align > _STRUCT_ALIGN) - _STRUCT_ALIGN = align -.endif -.endm - -# macro RES_B name, size [, align] -.macro RES_B _name, _size, _align=1 -RES_int _name _size _align -.endm - -# macro RES_W name, size [, align] -.macro RES_W _name, _size, _align=2 -RES_int _name 2*(_size) _align -.endm - -# macro RES_D name, size [, align] -.macro RES_D _name, _size, _align=4 -RES_int _name 4*(_size) _align -.endm - -# macro RES_Q name, size [, align] -.macro RES_Q _name, _size, _align=8 -RES_int _name 8*(_size) _align -.endm - -# macro RES_DQ name, size [, align] -.macro RES_DQ _name, _size, _align=16 -RES_int _name 16*(_size) _align -.endm - -# macro RES_Y name, size [, align] -.macro RES_Y _name, _size, _align=32 -RES_int _name 32*(_size) _align -.endm - -# macro RES_Z name, size [, align] -.macro RES_Z _name, _size, _align=64 -RES_int _name 64*(_size) _align -.endm - -#endif - -################################################################### -### Define SHA512 Out Of Order Data Structures -################################################################### - -START_FIELDS # LANE_DATA -### name size align -FIELD _job_in_lane, 8, 8 # pointer to job object -END_FIELDS - - _LANE_DATA_size = _FIELD_OFFSET - _LANE_DATA_align = _STRUCT_ALIGN - -#################################################################### - -START_FIELDS # SHA512_ARGS_X4 -### name size align -FIELD _digest, 8*8*4, 4 # transposed digest -FIELD _data_ptr, 8*4, 8 # array of pointers to data -END_FIELDS - - _SHA512_ARGS_X4_size = _FIELD_OFFSET - _SHA512_ARGS_X4_align = _STRUCT_ALIGN - -##################################################################### - -START_FIELDS # MB_MGR -### name size align -FIELD _args, _SHA512_ARGS_X4_size, _SHA512_ARGS_X4_align -FIELD _lens, 8*4, 8 -FIELD _unused_lanes, 8, 8 -FIELD _ldata, _LANE_DATA_size*4, _LANE_DATA_align -END_FIELDS - - _MB_MGR_size = _FIELD_OFFSET - _MB_MGR_align = _STRUCT_ALIGN - -_args_digest = _args + _digest -_args_data_ptr = _args + _data_ptr - -####################################################################### - -####################################################################### -#### Define constants -####################################################################### - -#define STS_UNKNOWN 0 -#define STS_BEING_PROCESSED 1 -#define STS_COMPLETED 2 - -####################################################################### -#### Define JOB_SHA512 structure -####################################################################### - -START_FIELDS # JOB_SHA512 -### name size align -FIELD _buffer, 8, 8 # pointer to buffer -FIELD _len, 8, 8 # length in bytes -FIELD _result_digest, 8*8, 32 # Digest (output) -FIELD _status, 4, 4 -FIELD _user_data, 8, 8 -END_FIELDS - - _JOB_SHA512_size = _FIELD_OFFSET - _JOB_SHA512_align = _STRUCT_ALIGN diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S deleted file mode 100644 index 7c629caebc0523a244b32f0ad2088204d180f4f9..0000000000000000000000000000000000000000 --- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S +++ /dev/null @@ -1,297 +0,0 @@ -/* - * Flush routine for SHA512 multibuffer - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Megha Dey - * - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include "sha512_mb_mgr_datastruct.S" - -.extern sha512_x4_avx2 - -# LINUX register definitions -#define arg1 %rdi -#define arg2 %rsi - -# idx needs to be other than arg1, arg2, rbx, r12 -#define idx %rdx - -# Common definitions -#define state arg1 -#define job arg2 -#define len2 arg2 - -#define unused_lanes %rbx -#define lane_data %rbx -#define tmp2 %rbx - -#define job_rax %rax -#define tmp1 %rax -#define size_offset %rax -#define tmp %rax -#define start_offset %rax - -#define tmp3 arg1 - -#define extra_blocks arg2 -#define p arg2 - -#define tmp4 %r8 -#define lens0 %r8 - -#define lens1 %r9 -#define lens2 %r10 -#define lens3 %r11 - -.macro LABEL prefix n -\prefix\n\(): -.endm - -.macro JNE_SKIP i -jne skip_\i -.endm - -.altmacro -.macro SET_OFFSET _offset -offset = \_offset -.endm -.noaltmacro - -# JOB* sha512_mb_mgr_flush_avx2(MB_MGR *state) -# arg 1 : rcx : state -ENTRY(sha512_mb_mgr_flush_avx2) - FRAME_BEGIN - push %rbx - - # If bit (32+3) is set, then all lanes are empty - mov _unused_lanes(state), unused_lanes - bt $32+7, unused_lanes - jc return_null - - # find a lane with a non-null job - xor idx, idx - offset = (_ldata + 1*_LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) - cmovne one(%rip), idx - offset = (_ldata + 2*_LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) - cmovne two(%rip), idx - offset = (_ldata + 3*_LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) - cmovne three(%rip), idx - - # copy idx to empty lanes -copy_lane_data: - offset = (_args + _data_ptr) - mov offset(state,idx,8), tmp - - I = 0 -.rep 4 - offset = (_ldata + I * _LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) -.altmacro - JNE_SKIP %I - offset = (_args + _data_ptr + 8*I) - mov tmp, offset(state) - offset = (_lens + 8*I +4) - movl $0xFFFFFFFF, offset(state) -LABEL skip_ %I - I = (I+1) -.noaltmacro -.endr - - # Find min length - mov _lens + 0*8(state),lens0 - mov lens0,idx - mov _lens + 1*8(state),lens1 - cmp idx,lens1 - cmovb lens1,idx - mov _lens + 2*8(state),lens2 - cmp idx,lens2 - cmovb lens2,idx - mov _lens + 3*8(state),lens3 - cmp idx,lens3 - cmovb lens3,idx - mov idx,len2 - and $0xF,idx - and $~0xFF,len2 - jz len_is_0 - - sub len2, lens0 - sub len2, lens1 - sub len2, lens2 - sub len2, lens3 - shr $32,len2 - mov lens0, _lens + 0*8(state) - mov lens1, _lens + 1*8(state) - mov lens2, _lens + 2*8(state) - mov lens3, _lens + 3*8(state) - - # "state" and "args" are the same address, arg1 - # len is arg2 - call sha512_x4_avx2 - # state and idx are intact - -len_is_0: - # process completed job "idx" - imul $_LANE_DATA_size, idx, lane_data - lea _ldata(state, lane_data), lane_data - - mov _job_in_lane(lane_data), job_rax - movq $0, _job_in_lane(lane_data) - movl $STS_COMPLETED, _status(job_rax) - mov _unused_lanes(state), unused_lanes - shl $8, unused_lanes - or idx, unused_lanes - mov unused_lanes, _unused_lanes(state) - - movl $0xFFFFFFFF, _lens+4(state, idx, 8) - - vmovq _args_digest+0*32(state, idx, 8), %xmm0 - vpinsrq $1, _args_digest+1*32(state, idx, 8), %xmm0, %xmm0 - vmovq _args_digest+2*32(state, idx, 8), %xmm1 - vpinsrq $1, _args_digest+3*32(state, idx, 8), %xmm1, %xmm1 - vmovq _args_digest+4*32(state, idx, 8), %xmm2 - vpinsrq $1, _args_digest+5*32(state, idx, 8), %xmm2, %xmm2 - vmovq _args_digest+6*32(state, idx, 8), %xmm3 - vpinsrq $1, _args_digest+7*32(state, idx, 8), %xmm3, %xmm3 - - vmovdqu %xmm0, _result_digest(job_rax) - vmovdqu %xmm1, _result_digest+1*16(job_rax) - vmovdqu %xmm2, _result_digest+2*16(job_rax) - vmovdqu %xmm3, _result_digest+3*16(job_rax) - -return: - pop %rbx - FRAME_END - ret - -return_null: - xor job_rax, job_rax - jmp return -ENDPROC(sha512_mb_mgr_flush_avx2) -.align 16 - -ENTRY(sha512_mb_mgr_get_comp_job_avx2) - push %rbx - - mov _unused_lanes(state), unused_lanes - bt $(32+7), unused_lanes - jc .return_null - - # Find min length - mov _lens(state),lens0 - mov lens0,idx - mov _lens+1*8(state),lens1 - cmp idx,lens1 - cmovb lens1,idx - mov _lens+2*8(state),lens2 - cmp idx,lens2 - cmovb lens2,idx - mov _lens+3*8(state),lens3 - cmp idx,lens3 - cmovb lens3,idx - test $~0xF,idx - jnz .return_null - and $0xF,idx - - #process completed job "idx" - imul $_LANE_DATA_size, idx, lane_data - lea _ldata(state, lane_data), lane_data - - mov _job_in_lane(lane_data), job_rax - movq $0, _job_in_lane(lane_data) - movl $STS_COMPLETED, _status(job_rax) - mov _unused_lanes(state), unused_lanes - shl $8, unused_lanes - or idx, unused_lanes - mov unused_lanes, _unused_lanes(state) - - movl $0xFFFFFFFF, _lens+4(state, idx, 8) - - vmovq _args_digest(state, idx, 8), %xmm0 - vpinsrq $1, _args_digest+1*32(state, idx, 8), %xmm0, %xmm0 - vmovq _args_digest+2*32(state, idx, 8), %xmm1 - vpinsrq $1, _args_digest+3*32(state, idx, 8), %xmm1, %xmm1 - vmovq _args_digest+4*32(state, idx, 8), %xmm2 - vpinsrq $1, _args_digest+5*32(state, idx, 8), %xmm2, %xmm2 - vmovq _args_digest+6*32(state, idx, 8), %xmm3 - vpinsrq $1, _args_digest+7*32(state, idx, 8), %xmm3, %xmm3 - - vmovdqu %xmm0, _result_digest+0*16(job_rax) - vmovdqu %xmm1, _result_digest+1*16(job_rax) - vmovdqu %xmm2, _result_digest+2*16(job_rax) - vmovdqu %xmm3, _result_digest+3*16(job_rax) - - pop %rbx - - ret - -.return_null: - xor job_rax, job_rax - pop %rbx - ret -ENDPROC(sha512_mb_mgr_get_comp_job_avx2) - -.section .rodata.cst8.one, "aM", @progbits, 8 -.align 8 -one: -.quad 1 - -.section .rodata.cst8.two, "aM", @progbits, 8 -.align 8 -two: -.quad 2 - -.section .rodata.cst8.three, "aM", @progbits, 8 -.align 8 -three: -.quad 3 diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S deleted file mode 100644 index 4ba709ba78e5a0ffcdbf018d1a6da0e851731b11..0000000000000000000000000000000000000000 --- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S +++ /dev/null @@ -1,224 +0,0 @@ -/* - * Buffer submit code for multi buffer SHA512 algorithm - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Megha Dey - * - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include "sha512_mb_mgr_datastruct.S" - -.extern sha512_x4_avx2 - -#define arg1 %rdi -#define arg2 %rsi - -#define idx %rdx -#define last_len %rdx - -#define size_offset %rcx -#define tmp2 %rcx - -# Common definitions -#define state arg1 -#define job arg2 -#define len2 arg2 -#define p2 arg2 - -#define p %r11 -#define start_offset %r11 - -#define unused_lanes %rbx - -#define job_rax %rax -#define len %rax - -#define lane %r12 -#define tmp3 %r12 -#define lens3 %r12 - -#define extra_blocks %r8 -#define lens0 %r8 - -#define tmp %r9 -#define lens1 %r9 - -#define lane_data %r10 -#define lens2 %r10 - -#define DWORD_len %eax - -# JOB* sha512_mb_mgr_submit_avx2(MB_MGR *state, JOB *job) -# arg 1 : rcx : state -# arg 2 : rdx : job -ENTRY(sha512_mb_mgr_submit_avx2) - FRAME_BEGIN - push %rbx - push %r12 - - mov _unused_lanes(state), unused_lanes - movzb %bl,lane - shr $8, unused_lanes - imul $_LANE_DATA_size, lane,lane_data - movl $STS_BEING_PROCESSED, _status(job) - lea _ldata(state, lane_data), lane_data - mov unused_lanes, _unused_lanes(state) - movl _len(job), DWORD_len - - mov job, _job_in_lane(lane_data) - movl DWORD_len,_lens+4(state , lane, 8) - - # Load digest words from result_digest - vmovdqu _result_digest+0*16(job), %xmm0 - vmovdqu _result_digest+1*16(job), %xmm1 - vmovdqu _result_digest+2*16(job), %xmm2 - vmovdqu _result_digest+3*16(job), %xmm3 - - vmovq %xmm0, _args_digest(state, lane, 8) - vpextrq $1, %xmm0, _args_digest+1*32(state , lane, 8) - vmovq %xmm1, _args_digest+2*32(state , lane, 8) - vpextrq $1, %xmm1, _args_digest+3*32(state , lane, 8) - vmovq %xmm2, _args_digest+4*32(state , lane, 8) - vpextrq $1, %xmm2, _args_digest+5*32(state , lane, 8) - vmovq %xmm3, _args_digest+6*32(state , lane, 8) - vpextrq $1, %xmm3, _args_digest+7*32(state , lane, 8) - - mov _buffer(job), p - mov p, _args_data_ptr(state, lane, 8) - - cmp $0xFF, unused_lanes - jne return_null - -start_loop: - - # Find min length - mov _lens+0*8(state),lens0 - mov lens0,idx - mov _lens+1*8(state),lens1 - cmp idx,lens1 - cmovb lens1, idx - mov _lens+2*8(state),lens2 - cmp idx,lens2 - cmovb lens2,idx - mov _lens+3*8(state),lens3 - cmp idx,lens3 - cmovb lens3,idx - mov idx,len2 - and $0xF,idx - and $~0xFF,len2 - jz len_is_0 - - sub len2,lens0 - sub len2,lens1 - sub len2,lens2 - sub len2,lens3 - shr $32,len2 - mov lens0, _lens + 0*8(state) - mov lens1, _lens + 1*8(state) - mov lens2, _lens + 2*8(state) - mov lens3, _lens + 3*8(state) - - # "state" and "args" are the same address, arg1 - # len is arg2 - call sha512_x4_avx2 - # state and idx are intact - -len_is_0: - - # process completed job "idx" - imul $_LANE_DATA_size, idx, lane_data - lea _ldata(state, lane_data), lane_data - - mov _job_in_lane(lane_data), job_rax - mov _unused_lanes(state), unused_lanes - movq $0, _job_in_lane(lane_data) - movl $STS_COMPLETED, _status(job_rax) - shl $8, unused_lanes - or idx, unused_lanes - mov unused_lanes, _unused_lanes(state) - - movl $0xFFFFFFFF,_lens+4(state,idx,8) - vmovq _args_digest+0*32(state , idx, 8), %xmm0 - vpinsrq $1, _args_digest+1*32(state , idx, 8), %xmm0, %xmm0 - vmovq _args_digest+2*32(state , idx, 8), %xmm1 - vpinsrq $1, _args_digest+3*32(state , idx, 8), %xmm1, %xmm1 - vmovq _args_digest+4*32(state , idx, 8), %xmm2 - vpinsrq $1, _args_digest+5*32(state , idx, 8), %xmm2, %xmm2 - vmovq _args_digest+6*32(state , idx, 8), %xmm3 - vpinsrq $1, _args_digest+7*32(state , idx, 8), %xmm3, %xmm3 - - vmovdqu %xmm0, _result_digest + 0*16(job_rax) - vmovdqu %xmm1, _result_digest + 1*16(job_rax) - vmovdqu %xmm2, _result_digest + 2*16(job_rax) - vmovdqu %xmm3, _result_digest + 3*16(job_rax) - -return: - pop %r12 - pop %rbx - FRAME_END - ret - -return_null: - xor job_rax, job_rax - jmp return -ENDPROC(sha512_mb_mgr_submit_avx2) - -/* UNUSED? -.section .rodata.cst16, "aM", @progbits, 16 -.align 16 -H0: .int 0x6a09e667 -H1: .int 0xbb67ae85 -H2: .int 0x3c6ef372 -H3: .int 0xa54ff53a -H4: .int 0x510e527f -H5: .int 0x9b05688c -H6: .int 0x1f83d9ab -H7: .int 0x5be0cd19 -*/ diff --git a/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S b/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S deleted file mode 100644 index e22e907643a6df0537f62ae3c2b19b2389f1ae2c..0000000000000000000000000000000000000000 --- a/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S +++ /dev/null @@ -1,531 +0,0 @@ -/* - * Multi-buffer SHA512 algorithm hash compute routine - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Megha Dey - * - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -# code to compute quad SHA512 using AVX2 -# use YMMs to tackle the larger digest size -# outer calling routine takes care of save and restore of XMM registers -# Logic designed/laid out by JDG - -# Function clobbers: rax, rcx, rdx, rbx, rsi, rdi, r9-r15; ymm0-15 -# Stack must be aligned to 32 bytes before call -# Linux clobbers: rax rbx rcx rsi r8 r9 r10 r11 r12 -# Linux preserves: rcx rdx rdi rbp r13 r14 r15 -# clobbers ymm0-15 - -#include -#include "sha512_mb_mgr_datastruct.S" - -arg1 = %rdi -arg2 = %rsi - -# Common definitions -STATE = arg1 -INP_SIZE = arg2 - -IDX = %rax -ROUND = %rbx -TBL = %r8 - -inp0 = %r9 -inp1 = %r10 -inp2 = %r11 -inp3 = %r12 - -a = %ymm0 -b = %ymm1 -c = %ymm2 -d = %ymm3 -e = %ymm4 -f = %ymm5 -g = %ymm6 -h = %ymm7 - -a0 = %ymm8 -a1 = %ymm9 -a2 = %ymm10 - -TT0 = %ymm14 -TT1 = %ymm13 -TT2 = %ymm12 -TT3 = %ymm11 -TT4 = %ymm10 -TT5 = %ymm9 - -T1 = %ymm14 -TMP = %ymm15 - -# Define stack usage -STACK_SPACE1 = SZ4*16 + NUM_SHA512_DIGEST_WORDS*SZ4 + 24 - -#define VMOVPD vmovupd -_digest = SZ4*16 - -# transpose r0, r1, r2, r3, t0, t1 -# "transpose" data in {r0..r3} using temps {t0..t3} -# Input looks like: {r0 r1 r2 r3} -# r0 = {a7 a6 a5 a4 a3 a2 a1 a0} -# r1 = {b7 b6 b5 b4 b3 b2 b1 b0} -# r2 = {c7 c6 c5 c4 c3 c2 c1 c0} -# r3 = {d7 d6 d5 d4 d3 d2 d1 d0} -# -# output looks like: {t0 r1 r0 r3} -# t0 = {d1 d0 c1 c0 b1 b0 a1 a0} -# r1 = {d3 d2 c3 c2 b3 b2 a3 a2} -# r0 = {d5 d4 c5 c4 b5 b4 a5 a4} -# r3 = {d7 d6 c7 c6 b7 b6 a7 a6} - -.macro TRANSPOSE r0 r1 r2 r3 t0 t1 - vshufps $0x44, \r1, \r0, \t0 # t0 = {b5 b4 a5 a4 b1 b0 a1 a0} - vshufps $0xEE, \r1, \r0, \r0 # r0 = {b7 b6 a7 a6 b3 b2 a3 a2} - vshufps $0x44, \r3, \r2, \t1 # t1 = {d5 d4 c5 c4 d1 d0 c1 c0} - vshufps $0xEE, \r3, \r2, \r2 # r2 = {d7 d6 c7 c6 d3 d2 c3 c2} - - vperm2f128 $0x20, \r2, \r0, \r1 # h6...a6 - vperm2f128 $0x31, \r2, \r0, \r3 # h2...a2 - vperm2f128 $0x31, \t1, \t0, \r0 # h5...a5 - vperm2f128 $0x20, \t1, \t0, \t0 # h1...a1 -.endm - -.macro ROTATE_ARGS -TMP_ = h -h = g -g = f -f = e -e = d -d = c -c = b -b = a -a = TMP_ -.endm - -# PRORQ reg, imm, tmp -# packed-rotate-right-double -# does a rotate by doing two shifts and an or -.macro _PRORQ reg imm tmp - vpsllq $(64-\imm),\reg,\tmp - vpsrlq $\imm,\reg, \reg - vpor \tmp,\reg, \reg -.endm - -# non-destructive -# PRORQ_nd reg, imm, tmp, src -.macro _PRORQ_nd reg imm tmp src - vpsllq $(64-\imm), \src, \tmp - vpsrlq $\imm, \src, \reg - vpor \tmp, \reg, \reg -.endm - -# PRORQ dst/src, amt -.macro PRORQ reg imm - _PRORQ \reg, \imm, TMP -.endm - -# PRORQ_nd dst, src, amt -.macro PRORQ_nd reg tmp imm - _PRORQ_nd \reg, \imm, TMP, \tmp -.endm - -#; arguments passed implicitly in preprocessor symbols i, a...h -.macro ROUND_00_15 _T1 i - PRORQ_nd a0, e, (18-14) # sig1: a0 = (e >> 4) - - vpxor g, f, a2 # ch: a2 = f^g - vpand e,a2, a2 # ch: a2 = (f^g)&e - vpxor g, a2, a2 # a2 = ch - - PRORQ_nd a1,e,41 # sig1: a1 = (e >> 25) - - offset = SZ4*(\i & 0xf) - vmovdqu \_T1,offset(%rsp) - vpaddq (TBL,ROUND,1), \_T1, \_T1 # T1 = W + K - vpxor e,a0, a0 # sig1: a0 = e ^ (e >> 5) - PRORQ a0, 14 # sig1: a0 = (e >> 6) ^ (e >> 11) - vpaddq a2, h, h # h = h + ch - PRORQ_nd a2,a,6 # sig0: a2 = (a >> 11) - vpaddq \_T1,h, h # h = h + ch + W + K - vpxor a1, a0, a0 # a0 = sigma1 - vmovdqu a,\_T1 - PRORQ_nd a1,a,39 # sig0: a1 = (a >> 22) - vpxor c, \_T1, \_T1 # maj: T1 = a^c - add $SZ4, ROUND # ROUND++ - vpand b, \_T1, \_T1 # maj: T1 = (a^c)&b - vpaddq a0, h, h - vpaddq h, d, d - vpxor a, a2, a2 # sig0: a2 = a ^ (a >> 11) - PRORQ a2,28 # sig0: a2 = (a >> 2) ^ (a >> 13) - vpxor a1, a2, a2 # a2 = sig0 - vpand c, a, a1 # maj: a1 = a&c - vpor \_T1, a1, a1 # a1 = maj - vpaddq a1, h, h # h = h + ch + W + K + maj - vpaddq a2, h, h # h = h + ch + W + K + maj + sigma0 - ROTATE_ARGS -.endm - - -#; arguments passed implicitly in preprocessor symbols i, a...h -.macro ROUND_16_XX _T1 i - vmovdqu SZ4*((\i-15)&0xf)(%rsp), \_T1 - vmovdqu SZ4*((\i-2)&0xf)(%rsp), a1 - vmovdqu \_T1, a0 - PRORQ \_T1,7 - vmovdqu a1, a2 - PRORQ a1,42 - vpxor a0, \_T1, \_T1 - PRORQ \_T1, 1 - vpxor a2, a1, a1 - PRORQ a1, 19 - vpsrlq $7, a0, a0 - vpxor a0, \_T1, \_T1 - vpsrlq $6, a2, a2 - vpxor a2, a1, a1 - vpaddq SZ4*((\i-16)&0xf)(%rsp), \_T1, \_T1 - vpaddq SZ4*((\i-7)&0xf)(%rsp), a1, a1 - vpaddq a1, \_T1, \_T1 - - ROUND_00_15 \_T1,\i -.endm - - -# void sha512_x4_avx2(void *STATE, const int INP_SIZE) -# arg 1 : STATE : pointer to input data -# arg 2 : INP_SIZE : size of data in blocks (assumed >= 1) -ENTRY(sha512_x4_avx2) - # general registers preserved in outer calling routine - # outer calling routine saves all the XMM registers - # save callee-saved clobbered registers to comply with C function ABI - push %r12 - push %r13 - push %r14 - push %r15 - - sub $STACK_SPACE1, %rsp - - # Load the pre-transposed incoming digest. - vmovdqu 0*SHA512_DIGEST_ROW_SIZE(STATE),a - vmovdqu 1*SHA512_DIGEST_ROW_SIZE(STATE),b - vmovdqu 2*SHA512_DIGEST_ROW_SIZE(STATE),c - vmovdqu 3*SHA512_DIGEST_ROW_SIZE(STATE),d - vmovdqu 4*SHA512_DIGEST_ROW_SIZE(STATE),e - vmovdqu 5*SHA512_DIGEST_ROW_SIZE(STATE),f - vmovdqu 6*SHA512_DIGEST_ROW_SIZE(STATE),g - vmovdqu 7*SHA512_DIGEST_ROW_SIZE(STATE),h - - lea K512_4(%rip),TBL - - # load the address of each of the 4 message lanes - # getting ready to transpose input onto stack - mov _data_ptr+0*PTR_SZ(STATE),inp0 - mov _data_ptr+1*PTR_SZ(STATE),inp1 - mov _data_ptr+2*PTR_SZ(STATE),inp2 - mov _data_ptr+3*PTR_SZ(STATE),inp3 - - xor IDX, IDX -lloop: - xor ROUND, ROUND - - # save old digest - vmovdqu a, _digest(%rsp) - vmovdqu b, _digest+1*SZ4(%rsp) - vmovdqu c, _digest+2*SZ4(%rsp) - vmovdqu d, _digest+3*SZ4(%rsp) - vmovdqu e, _digest+4*SZ4(%rsp) - vmovdqu f, _digest+5*SZ4(%rsp) - vmovdqu g, _digest+6*SZ4(%rsp) - vmovdqu h, _digest+7*SZ4(%rsp) - i = 0 -.rep 4 - vmovdqu PSHUFFLE_BYTE_FLIP_MASK(%rip), TMP - VMOVPD i*32(inp0, IDX), TT2 - VMOVPD i*32(inp1, IDX), TT1 - VMOVPD i*32(inp2, IDX), TT4 - VMOVPD i*32(inp3, IDX), TT3 - TRANSPOSE TT2, TT1, TT4, TT3, TT0, TT5 - vpshufb TMP, TT0, TT0 - vpshufb TMP, TT1, TT1 - vpshufb TMP, TT2, TT2 - vpshufb TMP, TT3, TT3 - ROUND_00_15 TT0,(i*4+0) - ROUND_00_15 TT1,(i*4+1) - ROUND_00_15 TT2,(i*4+2) - ROUND_00_15 TT3,(i*4+3) - i = (i+1) -.endr - add $128, IDX - - i = (i*4) - - jmp Lrounds_16_xx -.align 16 -Lrounds_16_xx: -.rep 16 - ROUND_16_XX T1, i - i = (i+1) -.endr - cmp $0xa00,ROUND - jb Lrounds_16_xx - - # add old digest - vpaddq _digest(%rsp), a, a - vpaddq _digest+1*SZ4(%rsp), b, b - vpaddq _digest+2*SZ4(%rsp), c, c - vpaddq _digest+3*SZ4(%rsp), d, d - vpaddq _digest+4*SZ4(%rsp), e, e - vpaddq _digest+5*SZ4(%rsp), f, f - vpaddq _digest+6*SZ4(%rsp), g, g - vpaddq _digest+7*SZ4(%rsp), h, h - - sub $1, INP_SIZE # unit is blocks - jne lloop - - # write back to memory (state object) the transposed digest - vmovdqu a, 0*SHA512_DIGEST_ROW_SIZE(STATE) - vmovdqu b, 1*SHA512_DIGEST_ROW_SIZE(STATE) - vmovdqu c, 2*SHA512_DIGEST_ROW_SIZE(STATE) - vmovdqu d, 3*SHA512_DIGEST_ROW_SIZE(STATE) - vmovdqu e, 4*SHA512_DIGEST_ROW_SIZE(STATE) - vmovdqu f, 5*SHA512_DIGEST_ROW_SIZE(STATE) - vmovdqu g, 6*SHA512_DIGEST_ROW_SIZE(STATE) - vmovdqu h, 7*SHA512_DIGEST_ROW_SIZE(STATE) - - # update input data pointers - add IDX, inp0 - mov inp0, _data_ptr+0*PTR_SZ(STATE) - add IDX, inp1 - mov inp1, _data_ptr+1*PTR_SZ(STATE) - add IDX, inp2 - mov inp2, _data_ptr+2*PTR_SZ(STATE) - add IDX, inp3 - mov inp3, _data_ptr+3*PTR_SZ(STATE) - - #;;;;;;;;;;;;;;; - #; Postamble - add $STACK_SPACE1, %rsp - # restore callee-saved clobbered registers - - pop %r15 - pop %r14 - pop %r13 - pop %r12 - - # outer calling routine restores XMM and other GP registers - ret -ENDPROC(sha512_x4_avx2) - -.section .rodata.K512_4, "a", @progbits -.align 64 -K512_4: - .octa 0x428a2f98d728ae22428a2f98d728ae22,\ - 0x428a2f98d728ae22428a2f98d728ae22 - .octa 0x7137449123ef65cd7137449123ef65cd,\ - 0x7137449123ef65cd7137449123ef65cd - .octa 0xb5c0fbcfec4d3b2fb5c0fbcfec4d3b2f,\ - 0xb5c0fbcfec4d3b2fb5c0fbcfec4d3b2f - .octa 0xe9b5dba58189dbbce9b5dba58189dbbc,\ - 0xe9b5dba58189dbbce9b5dba58189dbbc - .octa 0x3956c25bf348b5383956c25bf348b538,\ - 0x3956c25bf348b5383956c25bf348b538 - .octa 0x59f111f1b605d01959f111f1b605d019,\ - 0x59f111f1b605d01959f111f1b605d019 - .octa 0x923f82a4af194f9b923f82a4af194f9b,\ - 0x923f82a4af194f9b923f82a4af194f9b - .octa 0xab1c5ed5da6d8118ab1c5ed5da6d8118,\ - 0xab1c5ed5da6d8118ab1c5ed5da6d8118 - .octa 0xd807aa98a3030242d807aa98a3030242,\ - 0xd807aa98a3030242d807aa98a3030242 - .octa 0x12835b0145706fbe12835b0145706fbe,\ - 0x12835b0145706fbe12835b0145706fbe - .octa 0x243185be4ee4b28c243185be4ee4b28c,\ - 0x243185be4ee4b28c243185be4ee4b28c - .octa 0x550c7dc3d5ffb4e2550c7dc3d5ffb4e2,\ - 0x550c7dc3d5ffb4e2550c7dc3d5ffb4e2 - .octa 0x72be5d74f27b896f72be5d74f27b896f,\ - 0x72be5d74f27b896f72be5d74f27b896f - .octa 0x80deb1fe3b1696b180deb1fe3b1696b1,\ - 0x80deb1fe3b1696b180deb1fe3b1696b1 - .octa 0x9bdc06a725c712359bdc06a725c71235,\ - 0x9bdc06a725c712359bdc06a725c71235 - .octa 0xc19bf174cf692694c19bf174cf692694,\ - 0xc19bf174cf692694c19bf174cf692694 - .octa 0xe49b69c19ef14ad2e49b69c19ef14ad2,\ - 0xe49b69c19ef14ad2e49b69c19ef14ad2 - .octa 0xefbe4786384f25e3efbe4786384f25e3,\ - 0xefbe4786384f25e3efbe4786384f25e3 - .octa 0x0fc19dc68b8cd5b50fc19dc68b8cd5b5,\ - 0x0fc19dc68b8cd5b50fc19dc68b8cd5b5 - .octa 0x240ca1cc77ac9c65240ca1cc77ac9c65,\ - 0x240ca1cc77ac9c65240ca1cc77ac9c65 - .octa 0x2de92c6f592b02752de92c6f592b0275,\ - 0x2de92c6f592b02752de92c6f592b0275 - .octa 0x4a7484aa6ea6e4834a7484aa6ea6e483,\ - 0x4a7484aa6ea6e4834a7484aa6ea6e483 - .octa 0x5cb0a9dcbd41fbd45cb0a9dcbd41fbd4,\ - 0x5cb0a9dcbd41fbd45cb0a9dcbd41fbd4 - .octa 0x76f988da831153b576f988da831153b5,\ - 0x76f988da831153b576f988da831153b5 - .octa 0x983e5152ee66dfab983e5152ee66dfab,\ - 0x983e5152ee66dfab983e5152ee66dfab - .octa 0xa831c66d2db43210a831c66d2db43210,\ - 0xa831c66d2db43210a831c66d2db43210 - .octa 0xb00327c898fb213fb00327c898fb213f,\ - 0xb00327c898fb213fb00327c898fb213f - .octa 0xbf597fc7beef0ee4bf597fc7beef0ee4,\ - 0xbf597fc7beef0ee4bf597fc7beef0ee4 - .octa 0xc6e00bf33da88fc2c6e00bf33da88fc2,\ - 0xc6e00bf33da88fc2c6e00bf33da88fc2 - .octa 0xd5a79147930aa725d5a79147930aa725,\ - 0xd5a79147930aa725d5a79147930aa725 - .octa 0x06ca6351e003826f06ca6351e003826f,\ - 0x06ca6351e003826f06ca6351e003826f - .octa 0x142929670a0e6e70142929670a0e6e70,\ - 0x142929670a0e6e70142929670a0e6e70 - .octa 0x27b70a8546d22ffc27b70a8546d22ffc,\ - 0x27b70a8546d22ffc27b70a8546d22ffc - .octa 0x2e1b21385c26c9262e1b21385c26c926,\ - 0x2e1b21385c26c9262e1b21385c26c926 - .octa 0x4d2c6dfc5ac42aed4d2c6dfc5ac42aed,\ - 0x4d2c6dfc5ac42aed4d2c6dfc5ac42aed - .octa 0x53380d139d95b3df53380d139d95b3df,\ - 0x53380d139d95b3df53380d139d95b3df - .octa 0x650a73548baf63de650a73548baf63de,\ - 0x650a73548baf63de650a73548baf63de - .octa 0x766a0abb3c77b2a8766a0abb3c77b2a8,\ - 0x766a0abb3c77b2a8766a0abb3c77b2a8 - .octa 0x81c2c92e47edaee681c2c92e47edaee6,\ - 0x81c2c92e47edaee681c2c92e47edaee6 - .octa 0x92722c851482353b92722c851482353b,\ - 0x92722c851482353b92722c851482353b - .octa 0xa2bfe8a14cf10364a2bfe8a14cf10364,\ - 0xa2bfe8a14cf10364a2bfe8a14cf10364 - .octa 0xa81a664bbc423001a81a664bbc423001,\ - 0xa81a664bbc423001a81a664bbc423001 - .octa 0xc24b8b70d0f89791c24b8b70d0f89791,\ - 0xc24b8b70d0f89791c24b8b70d0f89791 - .octa 0xc76c51a30654be30c76c51a30654be30,\ - 0xc76c51a30654be30c76c51a30654be30 - .octa 0xd192e819d6ef5218d192e819d6ef5218,\ - 0xd192e819d6ef5218d192e819d6ef5218 - .octa 0xd69906245565a910d69906245565a910,\ - 0xd69906245565a910d69906245565a910 - .octa 0xf40e35855771202af40e35855771202a,\ - 0xf40e35855771202af40e35855771202a - .octa 0x106aa07032bbd1b8106aa07032bbd1b8,\ - 0x106aa07032bbd1b8106aa07032bbd1b8 - .octa 0x19a4c116b8d2d0c819a4c116b8d2d0c8,\ - 0x19a4c116b8d2d0c819a4c116b8d2d0c8 - .octa 0x1e376c085141ab531e376c085141ab53,\ - 0x1e376c085141ab531e376c085141ab53 - .octa 0x2748774cdf8eeb992748774cdf8eeb99,\ - 0x2748774cdf8eeb992748774cdf8eeb99 - .octa 0x34b0bcb5e19b48a834b0bcb5e19b48a8,\ - 0x34b0bcb5e19b48a834b0bcb5e19b48a8 - .octa 0x391c0cb3c5c95a63391c0cb3c5c95a63,\ - 0x391c0cb3c5c95a63391c0cb3c5c95a63 - .octa 0x4ed8aa4ae3418acb4ed8aa4ae3418acb,\ - 0x4ed8aa4ae3418acb4ed8aa4ae3418acb - .octa 0x5b9cca4f7763e3735b9cca4f7763e373,\ - 0x5b9cca4f7763e3735b9cca4f7763e373 - .octa 0x682e6ff3d6b2b8a3682e6ff3d6b2b8a3,\ - 0x682e6ff3d6b2b8a3682e6ff3d6b2b8a3 - .octa 0x748f82ee5defb2fc748f82ee5defb2fc,\ - 0x748f82ee5defb2fc748f82ee5defb2fc - .octa 0x78a5636f43172f6078a5636f43172f60,\ - 0x78a5636f43172f6078a5636f43172f60 - .octa 0x84c87814a1f0ab7284c87814a1f0ab72,\ - 0x84c87814a1f0ab7284c87814a1f0ab72 - .octa 0x8cc702081a6439ec8cc702081a6439ec,\ - 0x8cc702081a6439ec8cc702081a6439ec - .octa 0x90befffa23631e2890befffa23631e28,\ - 0x90befffa23631e2890befffa23631e28 - .octa 0xa4506cebde82bde9a4506cebde82bde9,\ - 0xa4506cebde82bde9a4506cebde82bde9 - .octa 0xbef9a3f7b2c67915bef9a3f7b2c67915,\ - 0xbef9a3f7b2c67915bef9a3f7b2c67915 - .octa 0xc67178f2e372532bc67178f2e372532b,\ - 0xc67178f2e372532bc67178f2e372532b - .octa 0xca273eceea26619cca273eceea26619c,\ - 0xca273eceea26619cca273eceea26619c - .octa 0xd186b8c721c0c207d186b8c721c0c207,\ - 0xd186b8c721c0c207d186b8c721c0c207 - .octa 0xeada7dd6cde0eb1eeada7dd6cde0eb1e,\ - 0xeada7dd6cde0eb1eeada7dd6cde0eb1e - .octa 0xf57d4f7fee6ed178f57d4f7fee6ed178,\ - 0xf57d4f7fee6ed178f57d4f7fee6ed178 - .octa 0x06f067aa72176fba06f067aa72176fba,\ - 0x06f067aa72176fba06f067aa72176fba - .octa 0x0a637dc5a2c898a60a637dc5a2c898a6,\ - 0x0a637dc5a2c898a60a637dc5a2c898a6 - .octa 0x113f9804bef90dae113f9804bef90dae,\ - 0x113f9804bef90dae113f9804bef90dae - .octa 0x1b710b35131c471b1b710b35131c471b,\ - 0x1b710b35131c471b1b710b35131c471b - .octa 0x28db77f523047d8428db77f523047d84,\ - 0x28db77f523047d8428db77f523047d84 - .octa 0x32caab7b40c7249332caab7b40c72493,\ - 0x32caab7b40c7249332caab7b40c72493 - .octa 0x3c9ebe0a15c9bebc3c9ebe0a15c9bebc,\ - 0x3c9ebe0a15c9bebc3c9ebe0a15c9bebc - .octa 0x431d67c49c100d4c431d67c49c100d4c,\ - 0x431d67c49c100d4c431d67c49c100d4c - .octa 0x4cc5d4becb3e42b64cc5d4becb3e42b6,\ - 0x4cc5d4becb3e42b64cc5d4becb3e42b6 - .octa 0x597f299cfc657e2a597f299cfc657e2a,\ - 0x597f299cfc657e2a597f299cfc657e2a - .octa 0x5fcb6fab3ad6faec5fcb6fab3ad6faec,\ - 0x5fcb6fab3ad6faec5fcb6fab3ad6faec - .octa 0x6c44198c4a4758176c44198c4a475817,\ - 0x6c44198c4a4758176c44198c4a475817 - -.section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32 -.align 32 -PSHUFFLE_BYTE_FLIP_MASK: .octa 0x08090a0b0c0d0e0f0001020304050607 - .octa 0x18191a1b1c1d1e1f1011121314151617 diff --git a/arch/x86/entry/Makefile b/arch/x86/entry/Makefile index 06fc70cf5433df90a924471fb023a6366bdd826a..e71890149ce0e73e61c1c340a1d71396f7c13271 100644 --- a/arch/x86/entry/Makefile +++ b/arch/x86/entry/Makefile @@ -3,8 +3,6 @@ # Makefile for the x86 low level entry code # -OBJECT_FILES_NON_STANDARD_entry_64_compat.o := y - CFLAGS_syscall_64.o += $(call cc-option,-Wno-override-init,) CFLAGS_syscall_32.o += $(call cc-option,-Wno-override-init,) obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 352e70cd33e80b99186e892b4080dfe481ad5dc4..b1cf149d62ed81811e9ee5326e20ce9aa5d12702 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -6,6 +6,8 @@ #include #include #include +#include +#include /* @@ -144,27 +146,19 @@ For 32-bit we have the following conventions - kernel is built with .endif .endm -.macro POP_REGS pop_rdi=1 skip_r11rcx=0 +.macro POP_REGS pop_rdi=1 popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx - .if \skip_r11rcx - popq %rsi - .else popq %r11 - .endif popq %r10 popq %r9 popq %r8 popq %rax - .if \skip_r11rcx - popq %rsi - .else popq %rcx - .endif popq %rdx popq %rsi .if \pop_rdi @@ -329,6 +323,79 @@ For 32-bit we have the following conventions - kernel is built with #endif +/* + * IBRS kernel mitigation for Spectre_v2. + * + * Assumes full context is established (PUSH_REGS, CR3 and GS) and it clobbers + * the regs it uses (AX, CX, DX). Must be called before the first RET + * instruction (NOTE! UNTRAIN_RET includes a RET instruction) + * + * The optional argument is used to save/restore the current value, + * which is used on the paranoid paths. + * + * Assumes x86_spec_ctrl_{base,current} to have SPEC_CTRL_IBRS set. + */ +.macro IBRS_ENTER save_reg + ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS + movl $MSR_IA32_SPEC_CTRL, %ecx + +.ifnb \save_reg + rdmsr + shl $32, %rdx + or %rdx, %rax + mov %rax, \save_reg + test $SPEC_CTRL_IBRS, %eax + jz .Ldo_wrmsr_\@ + lfence + jmp .Lend_\@ +.Ldo_wrmsr_\@: +.endif + + movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx + movl %edx, %eax + shr $32, %rdx + wrmsr +.Lend_\@: +.endm + +/* + * Similar to IBRS_ENTER, requires KERNEL GS,CR3 and clobbers (AX, CX, DX) + * regs. Must be called after the last RET. + */ +.macro IBRS_EXIT save_reg + ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS + movl $MSR_IA32_SPEC_CTRL, %ecx + +.ifnb \save_reg + mov \save_reg, %rdx +.else + movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx + andl $(~SPEC_CTRL_IBRS), %edx +.endif + + movl %edx, %eax + shr $32, %rdx + wrmsr +.Lend_\@: +.endm + +/* + * Mitigate Spectre v1 for conditional swapgs code paths. + * + * FENCE_SWAPGS_USER_ENTRY is used in the user entry swapgs code path, to + * prevent a speculative swapgs when coming from kernel space. + * + * FENCE_SWAPGS_KERNEL_ENTRY is used in the kernel entry non-swapgs code path, + * to prevent the swapgs from getting speculatively skipped when coming from + * user space. + */ +.macro FENCE_SWAPGS_USER_ENTRY + ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_USER +.endm +.macro FENCE_SWAPGS_KERNEL_ENTRY + ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_KERNEL +.endm + #endif /* CONFIG_X86_64 */ /* @@ -337,7 +404,7 @@ For 32-bit we have the following conventions - kernel is built with */ .macro CALL_enter_from_user_mode #ifdef CONFIG_CONTEXT_TRACKING -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_enabled, def=0 #endif call enter_from_user_mode diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 3b2490b81918128a61f6df1807788436d4f8ceb7..d3944912cc8122a4fd110f4a9488a1911b544118 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -31,6 +31,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include @@ -212,6 +213,9 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs) #endif user_enter_irqoff(); + + mds_user_clear_cpu_buffers(); + amd_clear_divider(); } #define SYSCALL_EXIT_WORK_FLAGS \ @@ -288,6 +292,13 @@ __visible void do_syscall_64(unsigned long nr, struct pt_regs *regs) if (likely(nr < NR_syscalls)) { nr = array_index_nospec(nr, NR_syscalls); regs->ax = sys_call_table[nr](regs); + } else { + if (nr == 425) + regs->ax = __x64_sys_io_uring_setup(regs); + else if (likely(nr == 426)) + regs->ax = __x64_sys_io_uring_enter(regs); + else if (nr == 427) + regs->ax = __x64_sys_io_uring_register(regs); } syscall_return_slowpath(regs); diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index fbbf1ba57ec67cd37a86706037e93a91ef572b8c..a009e317f8b2f813f290194e6ffacab124746b05 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -648,6 +648,7 @@ ENTRY(__switch_to_asm) pushl %ebx pushl %edi pushl %esi + pushfl /* switch stack */ movl %esp, TASK_threadsp(%eax) @@ -658,7 +659,6 @@ ENTRY(__switch_to_asm) movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset #endif -#ifdef CONFIG_RETPOLINE /* * When switching from a shallower to a deeper call stack * the RSB may either underflow or use entries populated @@ -667,9 +667,9 @@ ENTRY(__switch_to_asm) * speculative execution to prevent attack. */ FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW -#endif /* restore callee-saved registers */ + popfl popl %esi popl %edi popl %ebx @@ -1096,6 +1096,30 @@ ENTRY(irq_entries_start) .endr END(irq_entries_start) +#ifdef CONFIG_X86_LOCAL_APIC + .align 8 +ENTRY(spurious_entries_start) + vector=FIRST_SYSTEM_VECTOR + .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR) + pushl $(~vector+0x80) /* Note: always in signed byte range */ + vector=vector+1 + jmp common_spurious + .align 8 + .endr +END(spurious_entries_start) + +common_spurious: + ASM_CLAC + addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */ + SAVE_ALL switch_stacks=1 + ENCODE_FRAME_POINTER + TRACE_IRQS_OFF + movl %esp, %eax + call smp_spurious_interrupt + jmp ret_from_intr +ENDPROC(common_spurious) +#endif + /* * the CPU automatically disables interrupts when executing an IRQ vector, * so IRQ-flags tracing has to follow that: diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index f95dcb209fdffce611edeb0fa79dc2cd83bd9418..994e3ea4c8b0afb8d79420d46223944c1d5f96ec 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -235,6 +235,10 @@ GLOBAL(entry_SYSCALL_64_after_hwframe) /* IRQs are off. */ movq %rax, %rdi movq %rsp, %rsi + + /* clobbers %rax, make sure it is after saving the syscall nr */ + IBRS_ENTER + call do_syscall_64 /* returns with IRQs disabled */ TRACE_IRQS_IRETQ /* we're about to change IF */ @@ -311,9 +315,8 @@ GLOBAL(entry_SYSCALL_64_after_hwframe) * perf profiles. Nothing jumps here. */ syscall_return_via_sysret: - /* rcx and r11 are already restored (see code above) */ - UNWIND_HINT_EMPTY - POP_REGS pop_rdi=0 skip_r11rcx=1 + IBRS_EXIT + POP_REGS pop_rdi=0 /* * Now all regs are restored except RSP and RDI. @@ -321,6 +324,7 @@ syscall_return_via_sysret: */ movq %rsp, %rdi movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp + UNWIND_HINT_EMPTY pushq RSP-RDI(%rdi) /* RSP */ pushq (%rdi) /* RDI */ @@ -352,6 +356,7 @@ ENTRY(__switch_to_asm) pushq %r13 pushq %r14 pushq %r15 + pushfq /* switch stack */ movq %rsp, TASK_threadsp(%rdi) @@ -362,7 +367,6 @@ ENTRY(__switch_to_asm) movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset #endif -#ifdef CONFIG_RETPOLINE /* * When switching from a shallower to a deeper call stack * the RSB may either underflow or use entries populated @@ -371,9 +375,9 @@ ENTRY(__switch_to_asm) * speculative execution to prevent attack. */ FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW -#endif /* restore callee-saved registers */ + popfq popq %r15 popq %r14 popq %r13 @@ -436,6 +440,18 @@ ENTRY(irq_entries_start) .endr END(irq_entries_start) + .align 8 +ENTRY(spurious_entries_start) + vector=FIRST_SYSTEM_VECTOR + .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR) + UNWIND_HINT_IRET_REGS + pushq $(~vector+0x80) /* Note: always in signed byte range */ + jmp common_spurious + .align 8 + vector=vector+1 + .endr +END(spurious_entries_start) + .macro DEBUG_ENTRY_ASSERT_IRQS_OFF #ifdef CONFIG_DEBUG_ENTRY pushq %rax @@ -561,14 +577,14 @@ END(irq_entries_start) * +----------------------------------------------------+ */ ENTRY(interrupt_entry) - UNWIND_HINT_FUNC + UNWIND_HINT_IRET_REGS offset=16 ASM_CLAC cld testb $3, CS-ORIG_RAX+8(%rsp) jz 1f SWAPGS - + FENCE_SWAPGS_USER_ENTRY /* * Switch to the thread stack. The IRET frame and orig_ax are * on the stack, as well as the return address. RDI..R12 are @@ -593,13 +609,15 @@ ENTRY(interrupt_entry) pushq 5*8(%rdi) /* regs->eflags */ pushq 4*8(%rdi) /* regs->cs */ pushq 3*8(%rdi) /* regs->ip */ + UNWIND_HINT_IRET_REGS pushq 2*8(%rdi) /* regs->orig_ax */ pushq 8(%rdi) /* return address */ - UNWIND_HINT_FUNC movq (%rdi), %rdi + jmp 2f 1: - + FENCE_SWAPGS_KERNEL_ENTRY +2: PUSH_AND_CLEAR_REGS save_ret=1 ENCODE_FRAME_POINTER 8 @@ -627,14 +645,25 @@ ENTRY(interrupt_entry) ret END(interrupt_entry) +_ASM_NOKPROBE(interrupt_entry) /* Interrupt entry/exit. */ - /* - * The interrupt stubs push (~vector+0x80) onto the stack and - * then jump to common_interrupt. - */ +/* + * The interrupt stubs push (~vector+0x80) onto the stack and + * then jump to common_spurious/interrupt. + */ +common_spurious: + addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */ + call interrupt_entry + UNWIND_HINT_REGS indirect=1 + call smp_spurious_interrupt /* rdi points to pt_regs */ + jmp ret_from_intr +END(common_spurious) +_ASM_NOKPROBE(common_spurious) + +/* common_interrupt is a hotpath. Align it */ .p2align CONFIG_X86_L1_CACHE_SHIFT common_interrupt: addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */ @@ -658,6 +687,7 @@ GLOBAL(retint_user) TRACE_IRQS_IRETQ GLOBAL(swapgs_restore_regs_and_return_to_usermode) + IBRS_EXIT #ifdef CONFIG_DEBUG_ENTRY /* Assert that pt_regs indicates user mode. */ testb $3, CS(%rsp) @@ -673,6 +703,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode) */ movq %rsp, %rdi movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp + UNWIND_HINT_EMPTY /* Copy the IRET frame to the trampoline stack. */ pushq 6*8(%rdi) /* SS */ @@ -826,6 +857,7 @@ native_irq_return_ldt: jmp native_irq_return_iret #endif END(common_interrupt) +_ASM_NOKPROBE(common_interrupt) /* * APIC interrupts. @@ -840,6 +872,7 @@ ENTRY(\sym) call \do_sym /* rdi points to pt_regs */ jmp ret_from_intr END(\sym) +_ASM_NOKPROBE(\sym) .endm /* Make sure APIC interrupt handlers end up in the irqentry section: */ @@ -900,7 +933,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt */ #define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8) -.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 +.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 create_gap=0 ENTRY(\sym) UNWIND_HINT_IRET_REGS offset=\has_error_code*8 @@ -920,6 +953,20 @@ ENTRY(\sym) jnz .Lfrom_usermode_switch_stack_\@ .endif + .if \create_gap == 1 + /* + * If coming from kernel space, create a 6-word gap to allow the + * int3 handler to emulate a call instruction. + */ + testb $3, CS-ORIG_RAX(%rsp) + jnz .Lfrom_usermode_no_gap_\@ + .rept 6 + pushq 5*8(%rsp) + .endr + UNWIND_HINT_IRET_REGS offset=8 +.Lfrom_usermode_no_gap_\@: + .endif + .if \paranoid call paranoid_entry .else @@ -984,6 +1031,7 @@ ENTRY(\sym) jmp error_exit .endif +_ASM_NOKPROBE(\sym) END(\sym) .endm @@ -1148,13 +1196,12 @@ apicinterrupt3 HYPERV_STIMER0_VECTOR \ #endif /* CONFIG_HYPERV */ idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK -idtentry int3 do_int3 has_error_code=0 +idtentry int3 do_int3 has_error_code=0 create_gap=1 idtentry stack_segment do_stack_segment has_error_code=1 #ifdef CONFIG_XEN idtentry xennmi do_nmi has_error_code=0 idtentry xendebug do_debug has_error_code=0 -idtentry xenint3 do_int3 has_error_code=0 #endif idtentry general_protection do_general_protection has_error_code=1 @@ -1199,7 +1246,20 @@ ENTRY(paranoid_entry) */ SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14 - ret + /* + * The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an + * unconditional CR3 write, even in the PTI case. So do an lfence + * to prevent GS speculation, regardless of whether PTI is enabled. + */ + FENCE_SWAPGS_KERNEL_ENTRY + + /* + * Once we have CR3 and %GS setup save and set SPEC_CTRL. Just like + * CR3 above, keep the old value in a callee saved register. + */ + IBRS_ENTER save_reg=%r15 + + RET END(paranoid_entry) /* @@ -1227,12 +1287,20 @@ ENTRY(paranoid_exit) jmp .Lparanoid_exit_restore .Lparanoid_exit_no_swapgs: TRACE_IRQS_IRETQ_DEBUG + + /* + * Must restore IBRS state before both CR3 and %GS since we need access + * to the per-CPU x86_spec_ctrl_shadow variable. + */ + IBRS_EXIT save_reg=%r15 + /* Always restore stashed CR3 value (see paranoid_entry) */ RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 .Lparanoid_exit_restore: jmp restore_regs_and_return_to_kernel END(paranoid_exit) + /* * Save all registers in pt_regs, and switch GS if needed. */ @@ -1249,8 +1317,10 @@ ENTRY(error_entry) * from user mode due to an IRET fault. */ SWAPGS + FENCE_SWAPGS_USER_ENTRY /* We have user CR3. Change to kernel CR3. */ SWITCH_TO_KERNEL_CR3 scratch_reg=%rax + IBRS_ENTER .Lerror_entry_from_usermode_after_swapgs: /* Put us onto the real thread stack. */ @@ -1270,6 +1340,8 @@ ENTRY(error_entry) CALL_enter_from_user_mode ret +.Lerror_entry_done_lfence: + FENCE_SWAPGS_KERNEL_ENTRY .Lerror_entry_done: TRACE_IRQS_OFF ret @@ -1288,7 +1360,7 @@ ENTRY(error_entry) cmpq %rax, RIP+8(%rsp) je .Lbstep_iret cmpq $.Lgs_change, RIP+8(%rsp) - jne .Lerror_entry_done + jne .Lerror_entry_done_lfence /* * hack: .Lgs_change can fail with user gsbase. If this happens, fix up @@ -1296,6 +1368,7 @@ ENTRY(error_entry) * .Lgs_change's error handler with kernel gsbase. */ SWAPGS + FENCE_SWAPGS_USER_ENTRY SWITCH_TO_KERNEL_CR3 scratch_reg=%rax jmp .Lerror_entry_done @@ -1310,7 +1383,9 @@ ENTRY(error_entry) * gsbase and CR3. Switch to kernel gsbase and CR3: */ SWAPGS + FENCE_SWAPGS_USER_ENTRY SWITCH_TO_KERNEL_CR3 scratch_reg=%rax + IBRS_ENTER /* * Pretend that the exception came from user mode: set up pt_regs @@ -1401,6 +1476,7 @@ ENTRY(nmi) swapgs cld + FENCE_SWAPGS_USER_ENTRY SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx movq %rsp, %rdx movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp @@ -1415,6 +1491,8 @@ ENTRY(nmi) PUSH_AND_CLEAR_REGS rdx=(%rdx) ENCODE_FRAME_POINTER + IBRS_ENTER + /* * At this point we no longer need to worry about stack damage * due to nesting -- we're on the normal thread stack and we're @@ -1638,6 +1716,9 @@ end_repeat_nmi: movq $-1, %rsi call do_nmi + /* Always restore stashed SPEC_CTRL value (see paranoid_entry) */ + IBRS_EXIT save_reg=%r15 + /* Always restore stashed CR3 value (see paranoid_entry) */ RESTORE_CR3 scratch_reg=%r15 save_reg=%r14 @@ -1675,11 +1756,17 @@ nmi_restore: iretq END(nmi) +#ifndef CONFIG_IA32_EMULATION +/* + * This handles SYSCALL from 32-bit code. There is no way to program + * MSRs to fully disable 32-bit SYSCALL. + */ ENTRY(ignore_sysret) UNWIND_HINT_EMPTY mov $-ENOSYS, %eax sysret END(ignore_sysret) +#endif ENTRY(rewind_stack_do_exit) UNWIND_HINT_FUNC diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index 7d0df78db727296d1c4451e3a930033669f47aa3..a46e70aa603d983ed46cf8914a0d669aa4f6dfa4 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -4,7 +4,6 @@ * * Copyright 2000-2002 Andi Kleen, SuSE Labs. */ -#include "calling.h" #include #include #include @@ -17,6 +16,8 @@ #include #include +#include "calling.h" + .section .entry.text, "ax" /* @@ -47,11 +48,14 @@ * 0(%ebp) arg6 */ ENTRY(entry_SYSENTER_compat) + UNWIND_HINT_EMPTY /* Interrupts are off on entry. */ SWAPGS /* We are about to clobber %rsp anyway, clobbering here is OK */ - SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp + pushq %rax + SWITCH_TO_KERNEL_CR3 scratch_reg=%rax + popq %rax movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp @@ -104,8 +108,13 @@ ENTRY(entry_SYSENTER_compat) xorl %r14d, %r14d /* nospec r14 */ pushq $0 /* pt_regs->r15 = 0 */ xorl %r15d, %r15d /* nospec r15 */ + + UNWIND_HINT_REGS + cld + IBRS_ENTER + /* * SYSENTER doesn't filter flags, so we need to clear NT and AC * ourselves. To save a few cycles, we can check whether @@ -147,7 +156,7 @@ ENTRY(entry_SYSENTER_compat) popfq jmp .Lsysenter_flags_fixed GLOBAL(__end_entry_SYSENTER_compat) -ENDPROC(entry_SYSENTER_compat) +END(entry_SYSENTER_compat) /* * 32-bit SYSCALL entry. @@ -197,6 +206,7 @@ ENDPROC(entry_SYSENTER_compat) * 0(%esp) arg6 */ ENTRY(entry_SYSCALL_compat) + UNWIND_HINT_EMPTY /* Interrupts are off on entry. */ swapgs @@ -247,12 +257,16 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe) pushq $0 /* pt_regs->r15 = 0 */ xorl %r15d, %r15d /* nospec r15 */ + UNWIND_HINT_REGS + /* * User mode is traced as though IRQs are on, and SYSENTER * turned them off. */ TRACE_IRQS_OFF + IBRS_ENTER + movq %rsp, %rdi call do_fast_syscall_32 /* XEN PV guests always use IRET path */ @@ -262,6 +276,9 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe) /* Opportunistic SYSRET */ sysret32_from_system_call: TRACE_IRQS_ON /* User mode traces as IRQs on. */ + + IBRS_EXIT + movq RBX(%rsp), %rbx /* pt_regs->rbx */ movq RBP(%rsp), %rbp /* pt_regs->rbp */ movq EFLAGS(%rsp), %r11 /* pt_regs->flags (in r11) */ @@ -335,6 +352,7 @@ END(entry_SYSCALL_compat) * ebp arg6 */ ENTRY(entry_INT80_compat) + UNWIND_HINT_EMPTY /* * Interrupts are off on entry. */ @@ -356,7 +374,8 @@ ENTRY(entry_INT80_compat) /* Need to switch before accessing the thread stack. */ SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi - movq %rsp, %rdi + /* In the Xen PV case we already run on the thread stack. */ + ALTERNATIVE "movq %rsp, %rdi", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp pushq 6*8(%rdi) /* regs->ss */ @@ -365,8 +384,9 @@ ENTRY(entry_INT80_compat) pushq 3*8(%rdi) /* regs->cs */ pushq 2*8(%rdi) /* regs->ip */ pushq 1*8(%rdi) /* regs->orig_ax */ - pushq (%rdi) /* pt_regs->di */ +.Lint80_keep_stack: + pushq %rsi /* pt_regs->si */ xorl %esi, %esi /* nospec si */ pushq %rdx /* pt_regs->dx */ @@ -394,6 +414,9 @@ ENTRY(entry_INT80_compat) xorl %r14d, %r14d /* nospec r14 */ pushq %r15 /* pt_regs->r15 */ xorl %r15d, %r15d /* nospec r15 */ + + UNWIND_HINT_REGS + cld /* @@ -401,6 +424,7 @@ ENTRY(entry_INT80_compat) * gate turned them off. */ TRACE_IRQS_OFF + IBRS_ENTER movq %rsp, %rdi call do_int80_syscall_32 diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile index 141d415a8c8098e9bd9747c94ee84e4de843c9f8..5bfe2243a08f882c4ab622cd87799ac1a28ff3c2 100644 --- a/arch/x86/entry/vdso/Makefile +++ b/arch/x86/entry/vdso/Makefile @@ -47,7 +47,7 @@ targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so) CPPFLAGS_vdso.lds += -P -C VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -soname linux-vdso.so.1 --no-undefined \ - -z max-page-size=4096 -z common-page-size=4096 + -z max-page-size=4096 $(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE $(call if_changed,vdso) @@ -98,7 +98,7 @@ CFLAGS_REMOVE_vvar.o = -pg CPPFLAGS_vdsox32.lds = $(CPPFLAGS_vdso.lds) VDSO_LDFLAGS_vdsox32.lds = -m elf32_x86_64 -soname linux-vdso.so.1 \ - -z max-page-size=4096 -z common-page-size=4096 + -z max-page-size=4096 # x32-rebranded versions vobjx32s-y := $(vobjs-y:.o=-x32.o) @@ -171,7 +171,8 @@ quiet_cmd_vdso = VDSO $@ sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@' VDSO_LDFLAGS = -shared $(call ld-option, --hash-style=both) \ - $(call ld-option, --build-id) -Bsymbolic + $(call ld-option, --build-id) $(call ld-option, --eh-frame-hdr) \ + -Bsymbolic GCOV_PROFILE := n # diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c index e48ca3afa0912cc8bb03bd6dba84b0999abe1982..8a88e738f87db7d49265814cfc99e0d262677fa5 100644 --- a/arch/x86/entry/vdso/vclock_gettime.c +++ b/arch/x86/entry/vdso/vclock_gettime.c @@ -29,12 +29,12 @@ extern int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz); extern time_t __vdso_time(time_t *t); #ifdef CONFIG_PARAVIRT_CLOCK -extern u8 pvclock_page +extern u8 pvclock_page[PAGE_SIZE] __attribute__((visibility("hidden"))); #endif #ifdef CONFIG_HYPERV_TSCPAGE -extern u8 hvclock_page +extern u8 hvclock_page[PAGE_SIZE] __attribute__((visibility("hidden"))); #endif @@ -191,13 +191,24 @@ notrace static inline u64 vgetsns(int *mode) if (gtod->vclock_mode == VCLOCK_TSC) cycles = vread_tsc(); + + /* + * For any memory-mapped vclock type, we need to make sure that gcc + * doesn't cleverly hoist a load before the mode check. Otherwise we + * might end up touching the memory-mapped page even if the vclock in + * question isn't enabled, which will segfault. Hence the barriers. + */ #ifdef CONFIG_PARAVIRT_CLOCK - else if (gtod->vclock_mode == VCLOCK_PVCLOCK) + else if (gtod->vclock_mode == VCLOCK_PVCLOCK) { + barrier(); cycles = vread_pvclock(mode); + } #endif #ifdef CONFIG_HYPERV_TSCPAGE - else if (gtod->vclock_mode == VCLOCK_HVCLOCK) + else if (gtod->vclock_mode == VCLOCK_HVCLOCK) { + barrier(); cycles = vread_hvclock(mode); + } #endif else return 0; diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index 5b8b556dbb12aa91664aac85fad9e0d5af0259cd..a1c31bb23170c8fa01890c367f38b9c8cca42a80 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c @@ -329,7 +329,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) static __init int vdso_setup(char *s) { vdso64_enabled = simple_strtoul(s, NULL, 0); - return 0; + return 1; } __setup("vdso=", vdso_setup); #endif @@ -343,7 +343,7 @@ static void vgetcpu_cpu_init(void *arg) #ifdef CONFIG_NUMA node = cpu_to_node(cpu); #endif - if (static_cpu_has(X86_FEATURE_RDTSCP)) + if (boot_cpu_has(X86_FEATURE_RDTSCP) || boot_cpu_has(X86_FEATURE_RDPID)) write_rdtscp_aux((node << 12) | cpu); /* diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c index 82ed001e8909d69db9ab6827151534a921e2434d..2347f0b5cc5fce40e72539e70e78529c4a82ffbc 100644 --- a/arch/x86/entry/vsyscall/vsyscall_64.c +++ b/arch/x86/entry/vsyscall/vsyscall_64.c @@ -99,7 +99,7 @@ static bool write_ok_or_segv(unsigned long ptr, size_t size) * sig_on_uaccess_err, this could go away. */ - if (!access_ok(VERIFY_WRITE, (void __user *)ptr, size)) { + if (!access_ok((void __user *)ptr, size)) { siginfo_t info; struct thread_struct *thread = ¤t->thread; diff --git a/arch/x86/events/Makefile b/arch/x86/events/Makefile index b8ccdb5c92442c6e5a05277e5a2839f956d198c8..ad4a7c789637e1a0d7b2abda494488435a7793db 100644 --- a/arch/x86/events/Makefile +++ b/arch/x86/events/Makefile @@ -2,3 +2,5 @@ obj-y += core.o obj-y += amd/ obj-$(CONFIG_X86_LOCAL_APIC) += msr.o obj-$(CONFIG_CPU_SUP_INTEL) += intel/ +obj-$(CONFIG_CPU_SUP_CENTAUR) += zhaoxin/ +obj-$(CONFIG_CPU_SUP_ZHAOXIN) += zhaoxin/ diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index c84584bb940280b56f3b7d6d5365803ec4364505..a58b71bc197bca1a9e2cfa1df9dbf60c4eaa87e0 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -3,10 +3,20 @@ #include #include #include +#include +#include #include +#include #include "../perf_event.h" +static DEFINE_PER_CPU(unsigned long, perf_nmi_tstamp); +static unsigned long perf_nmi_window; + +/* AMD Event 0xFFF: Merge. Used with Large Increment per Cycle events */ +#define AMD_MERGE_EVENT ((0xFULL << 32) | 0xFFULL) +#define AMD_MERGE_EVENT_ENABLE (AMD_MERGE_EVENT | ARCH_PERFMON_EVENTSEL_ENABLE) + static __initconst const u64 amd_hw_cache_event_ids [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] @@ -112,23 +122,144 @@ static __initconst const u64 amd_hw_cache_event_ids }, }; +static __initconst const u64 amd_hw_cache_event_ids_f17h + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { +[C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x0040, /* Data Cache Accesses */ + [C(RESULT_MISS)] = 0xc860, /* L2$ access from DC Miss */ + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = 0, + [C(RESULT_MISS)] = 0, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = 0xff5a, /* h/w prefetch DC Fills */ + [C(RESULT_MISS)] = 0, + }, +}, +[C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x0080, /* Instruction cache fetches */ + [C(RESULT_MISS)] = 0x0081, /* Instruction cache misses */ + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = 0, + [C(RESULT_MISS)] = 0, + }, +}, +[C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0, + [C(RESULT_MISS)] = 0, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = 0, + [C(RESULT_MISS)] = 0, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = 0, + [C(RESULT_MISS)] = 0, + }, +}, +[C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0xff45, /* All L2 DTLB accesses */ + [C(RESULT_MISS)] = 0xf045, /* L2 DTLB misses (PT walks) */ + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = 0, + [C(RESULT_MISS)] = 0, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = 0, + [C(RESULT_MISS)] = 0, + }, +}, +[C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x0084, /* L1 ITLB misses, L2 ITLB hits */ + [C(RESULT_MISS)] = 0xff85, /* L1 ITLB misses, L2 misses */ + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, +}, +[C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x00c2, /* Retired Branch Instr. */ + [C(RESULT_MISS)] = 0x00c3, /* Retired Mispredicted BI */ + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, +}, +[C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0, + [C(RESULT_MISS)] = 0, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, +}, +}; + /* - * AMD Performance Monitor K7 and later. + * AMD Performance Monitor K7 and later, up to and including Family 16h: */ static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] = { - [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, - [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, - [PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d, - [PERF_COUNT_HW_CACHE_MISSES] = 0x077e, - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, - [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, - [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */ - [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */ + [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, + [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, + [PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d, + [PERF_COUNT_HW_CACHE_MISSES] = 0x077e, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, + [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */ + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */ +}; + +/* + * AMD Performance Monitor Family 17h and later: + */ +static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] = +{ + [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, + [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, + [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, + [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287, + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x0187, }; static u64 amd_pmu_event_map(int hw_event) { + if (boot_cpu_data.x86 >= 0x17) + return amd_f17h_perfmon_event_map[hw_event]; + return amd_perfmon_event_map[hw_event]; } @@ -173,6 +304,25 @@ static inline int amd_pmu_addr_offset(int index, bool eventsel) return offset; } +/* + * AMD64 events are detected based on their event codes. + */ +static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc) +{ + return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff); +} + +static inline bool amd_is_pair_event_code(struct hw_perf_event *hwc) +{ + if (!(x86_pmu.flags & PMU_FL_PAIR)) + return false; + + switch (amd_get_event_code(hwc)) { + case 0x003: return true; /* Retired SSE/AVX FLOPs */ + default: return false; + } +} + static int amd_core_hw_config(struct perf_event *event) { if (event->attr.exclude_host && event->attr.exclude_guest) @@ -188,15 +338,10 @@ static int amd_core_hw_config(struct perf_event *event) else if (event->attr.exclude_guest) event->hw.config |= AMD64_EVENTSEL_HOSTONLY; - return 0; -} + if ((x86_pmu.flags & PMU_FL_PAIR) && amd_is_pair_event_code(&event->hw)) + event->hw.flags |= PERF_X86_EVENT_PAIR; -/* - * AMD64 events are detected based on their event codes. - */ -static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc) -{ - return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff); + return 0; } static inline int amd_is_nb_event(struct hw_perf_event *hwc) @@ -429,6 +574,131 @@ static void amd_pmu_cpu_dead(int cpu) } } +/* + * When a PMC counter overflows, an NMI is used to process the event and + * reset the counter. NMI latency can result in the counter being updated + * before the NMI can run, which can result in what appear to be spurious + * NMIs. This function is intended to wait for the NMI to run and reset + * the counter to avoid possible unhandled NMI messages. + */ +#define OVERFLOW_WAIT_COUNT 50 + +static void amd_pmu_wait_on_overflow(int idx) +{ + unsigned int i; + u64 counter; + + /* + * Wait for the counter to be reset if it has overflowed. This loop + * should exit very, very quickly, but just in case, don't wait + * forever... + */ + for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) { + rdmsrl(x86_pmu_event_addr(idx), counter); + if (counter & (1ULL << (x86_pmu.cntval_bits - 1))) + break; + + /* Might be in IRQ context, so can't sleep */ + udelay(1); + } +} + +static void amd_pmu_disable_all(void) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + int idx; + + x86_pmu_disable_all(); + + /* + * This shouldn't be called from NMI context, but add a safeguard here + * to return, since if we're in NMI context we can't wait for an NMI + * to reset an overflowed counter value. + */ + if (in_nmi()) + return; + + /* + * Check each counter for overflow and wait for it to be reset by the + * NMI if it has overflowed. This relies on the fact that all active + * counters are always enabled when this function is caled and + * ARCH_PERFMON_EVENTSEL_INT is always set. + */ + for (idx = 0; idx < x86_pmu.num_counters; idx++) { + if (!test_bit(idx, cpuc->active_mask)) + continue; + + amd_pmu_wait_on_overflow(idx); + } +} + +static void amd_pmu_disable_event(struct perf_event *event) +{ + x86_pmu_disable_event(event); + + /* + * This can be called from NMI context (via x86_pmu_stop). The counter + * may have overflowed, but either way, we'll never see it get reset + * by the NMI if we're already in the NMI. And the NMI latency support + * below will take care of any pending NMI that might have been + * generated by the overflow. + */ + if (in_nmi()) + return; + + amd_pmu_wait_on_overflow(event->hw.idx); +} + +/* + * Because of NMI latency, if multiple PMC counters are active or other sources + * of NMIs are received, the perf NMI handler can handle one or more overflowed + * PMC counters outside of the NMI associated with the PMC overflow. If the NMI + * doesn't arrive at the LAPIC in time to become a pending NMI, then the kernel + * back-to-back NMI support won't be active. This PMC handler needs to take into + * account that this can occur, otherwise this could result in unknown NMI + * messages being issued. Examples of this is PMC overflow while in the NMI + * handler when multiple PMCs are active or PMC overflow while handling some + * other source of an NMI. + * + * Attempt to mitigate this by creating an NMI window in which un-handled NMIs + * received during this window will be claimed. This prevents extending the + * window past when it is possible that latent NMIs should be received. The + * per-CPU perf_nmi_tstamp will be set to the window end time whenever perf has + * handled a counter. When an un-handled NMI is received, it will be claimed + * only if arriving within that window. + */ +static int amd_pmu_handle_irq(struct pt_regs *regs) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + int active, handled; + + /* + * Obtain the active count before calling x86_pmu_handle_irq() since + * it is possible that x86_pmu_handle_irq() may make a counter + * inactive (through x86_pmu_stop). + */ + active = __bitmap_weight(cpuc->active_mask, X86_PMC_IDX_MAX); + + /* Process any counter overflows */ + handled = x86_pmu_handle_irq(regs); + + /* + * If a counter was handled, record a timestamp such that un-handled + * NMIs will be claimed if arriving within that window. + */ + if (handled) { + this_cpu_write(perf_nmi_tstamp, + jiffies + perf_nmi_window); + + return handled; + } + + if (time_after(jiffies, this_cpu_read(perf_nmi_tstamp))) + return NMI_DONE; + + return NMI_HANDLED; +} + static struct event_constraint * amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx, struct perf_event *event) @@ -611,6 +881,29 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, int idx, } } +static struct event_constraint pair_constraint; + +static struct event_constraint * +amd_get_event_constraints_f17h(struct cpu_hw_events *cpuc, int idx, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (amd_is_pair_event_code(hwc)) + return &pair_constraint; + + return &unconstrained; +} + +static void amd_put_event_constraints_f17h(struct cpu_hw_events *cpuc, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (is_counter_pair(hwc)) + --cpuc->n_pair; +} + static ssize_t amd_event_sysfs_show(char *page, u64 config) { u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT) | @@ -621,11 +914,11 @@ static ssize_t amd_event_sysfs_show(char *page, u64 config) static __initconst const struct x86_pmu amd_pmu = { .name = "AMD", - .handle_irq = x86_pmu_handle_irq, - .disable_all = x86_pmu_disable_all, + .handle_irq = amd_pmu_handle_irq, + .disable_all = amd_pmu_disable_all, .enable_all = x86_pmu_enable_all, .enable = x86_pmu_enable_event, - .disable = x86_pmu_disable_event, + .disable = amd_pmu_disable_event, .hw_config = amd_pmu_hw_config, .schedule_events = x86_schedule_events, .eventsel = MSR_K7_EVNTSEL0, @@ -654,25 +947,14 @@ static __initconst const struct x86_pmu amd_pmu = { static int __init amd_core_pmu_init(void) { + u64 even_ctr_mask = 0ULL; + int i; + if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE)) return 0; - switch (boot_cpu_data.x86) { - case 0x15: - pr_cont("Fam15h "); - x86_pmu.get_event_constraints = amd_get_event_constraints_f15h; - break; - case 0x17: - pr_cont("Fam17h "); - /* - * In family 17h, there are no event constraints in the PMC hardware. - * We fallback to using default amd_get_event_constraints. - */ - break; - default: - pr_err("core perfctr but no constraints; unknown hardware!\n"); - return -ENODEV; - } + /* Avoid calulating the value each time in the NMI handler */ + perf_nmi_window = msecs_to_jiffies(100); /* * If core performance counter extensions exists, we must use @@ -688,6 +970,32 @@ static int __init amd_core_pmu_init(void) */ x86_pmu.amd_nb_constraints = 0; + if (boot_cpu_data.x86 == 0x15) { + pr_cont("Fam15h "); + x86_pmu.get_event_constraints = amd_get_event_constraints_f15h; + } + if (boot_cpu_data.x86 >= 0x17) { + pr_cont("Fam17h+ "); + /* + * Family 17h and compatibles have constraints for Large + * Increment per Cycle events: they may only be assigned an + * even numbered counter that has a consecutive adjacent odd + * numbered counter following it. + */ + for (i = 0; i < x86_pmu.num_counters - 1; i += 2) + even_ctr_mask |= 1 << i; + + pair_constraint = (struct event_constraint) + __EVENT_CONSTRAINT(0, even_ctr_mask, 0, + x86_pmu.num_counters / 2, 0, + PERF_X86_EVENT_PAIR); + + x86_pmu.get_event_constraints = amd_get_event_constraints_f17h; + x86_pmu.put_event_constraints = amd_put_event_constraints_f17h; + x86_pmu.perf_ctr_pair_en = AMD_MERGE_EVENT_ENABLE; + x86_pmu.flags |= PMU_FL_PAIR; + } + pr_cont("core perfctr, "); return 0; } @@ -714,9 +1022,10 @@ __init int amd_pmu_init(void) x86_pmu.amd_nb_constraints = 0; } - /* Events are common for all AMDs */ - memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, - sizeof(hw_cache_event_ids)); + if (boot_cpu_data.x86 >= 0x17) + memcpy(hw_cache_event_ids, amd_hw_cache_event_ids_f17h, sizeof(hw_cache_event_ids)); + else + memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, sizeof(hw_cache_event_ids)); return 0; } @@ -728,7 +1037,7 @@ void amd_pmu_enable_virt(void) cpuc->perf_ctr_virt_mask = 0; /* Reload all events */ - x86_pmu_disable_all(); + amd_pmu_disable_all(); x86_pmu_enable_all(0); } EXPORT_SYMBOL_GPL(amd_pmu_enable_virt); @@ -746,7 +1055,7 @@ void amd_pmu_disable_virt(void) cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY; /* Reload all events */ - x86_pmu_disable_all(); + amd_pmu_disable_all(); x86_pmu_enable_all(0); } EXPORT_SYMBOL_GPL(amd_pmu_disable_virt); diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c index d50bb4dc065036181f7fc3b05182c7c8dce6b8ec..25123359c8aa9cb6c891f609cce2db1009877f80 100644 --- a/arch/x86/events/amd/ibs.c +++ b/arch/x86/events/amd/ibs.c @@ -89,6 +89,7 @@ struct perf_ibs { u64 max_period; unsigned long offset_mask[1]; int offset_max; + unsigned int fetch_count_reset_broken : 1; struct cpu_perf_ibs __percpu *pcpu; struct attribute **format_attrs; @@ -346,11 +347,18 @@ static u64 get_ibs_op_count(u64 config) { u64 count = 0; - if (config & IBS_OP_VAL) - count += (config & IBS_OP_MAX_CNT) << 4; /* cnt rolled over */ - - if (ibs_caps & IBS_CAPS_RDWROPCNT) - count += (config & IBS_OP_CUR_CNT) >> 32; + /* + * If the internal 27-bit counter rolled over, the count is MaxCnt + * and the lower 7 bits of CurCnt are randomized. + * Otherwise CurCnt has the full 27-bit current counter value. + */ + if (config & IBS_OP_VAL) { + count = (config & IBS_OP_MAX_CNT) << 4; + if (ibs_caps & IBS_CAPS_OPCNTEXT) + count += config & IBS_OP_MAX_CNT_EXT_MASK; + } else if (ibs_caps & IBS_CAPS_RDWROPCNT) { + count = (config & IBS_OP_CUR_CNT) >> 32; + } return count; } @@ -375,7 +383,12 @@ perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event, static inline void perf_ibs_enable_event(struct perf_ibs *perf_ibs, struct hw_perf_event *hwc, u64 config) { - wrmsrl(hwc->config_base, hwc->config | config | perf_ibs->enable_mask); + u64 tmp = hwc->config | config; + + if (perf_ibs->fetch_count_reset_broken) + wrmsrl(hwc->config_base, tmp & ~perf_ibs->enable_mask); + + wrmsrl(hwc->config_base, tmp | perf_ibs->enable_mask); } /* @@ -389,7 +402,8 @@ static inline void perf_ibs_disable_event(struct perf_ibs *perf_ibs, struct hw_perf_event *hwc, u64 config) { config &= ~perf_ibs->cnt_mask; - wrmsrl(hwc->config_base, config); + if (boot_cpu_data.x86 == 0x10) + wrmsrl(hwc->config_base, config); config &= ~perf_ibs->enable_mask; wrmsrl(hwc->config_base, config); } @@ -405,7 +419,7 @@ static void perf_ibs_start(struct perf_event *event, int flags) struct hw_perf_event *hwc = &event->hw; struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu); struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu); - u64 period; + u64 period, config = 0; if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) return; @@ -414,13 +428,19 @@ static void perf_ibs_start(struct perf_event *event, int flags) hwc->state = 0; perf_ibs_set_period(perf_ibs, hwc, &period); + if (perf_ibs == &perf_ibs_op && (ibs_caps & IBS_CAPS_OPCNTEXT)) { + config |= period & IBS_OP_MAX_CNT_EXT_MASK; + period &= ~IBS_OP_MAX_CNT_EXT_MASK; + } + config |= period >> 4; + /* * Set STARTED before enabling the hardware, such that a subsequent NMI * must observe it. */ set_bit(IBS_STARTED, pcpu->state); clear_bit(IBS_STOPPING, pcpu->state); - perf_ibs_enable_event(perf_ibs, hwc, period >> 4); + perf_ibs_enable_event(perf_ibs, hwc, config); perf_event_update_userpage(event); } @@ -564,7 +584,8 @@ static struct perf_ibs perf_ibs_op = { }, .msr = MSR_AMD64_IBSOPCTL, .config_mask = IBS_OP_CONFIG_MASK, - .cnt_mask = IBS_OP_MAX_CNT, + .cnt_mask = IBS_OP_MAX_CNT | IBS_OP_CUR_CNT | + IBS_OP_CUR_CNT_RAND, .enable_mask = IBS_OP_ENABLE, .valid_mask = IBS_OP_VAL, .max_period = IBS_OP_MAX_CNT << 4, @@ -586,7 +607,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs) struct perf_ibs_data ibs_data; int offset, size, check_rip, offset_max, throttle = 0; unsigned int msr; - u64 *buf, *config, period; + u64 *buf, *config, period, new_config = 0; if (!test_bit(IBS_STARTED, pcpu->state)) { fail: @@ -625,7 +646,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs) if (event->attr.sample_type & PERF_SAMPLE_RAW) offset_max = perf_ibs->offset_max; else if (check_rip) - offset_max = 2; + offset_max = 3; else offset_max = 1; do { @@ -635,18 +656,24 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs) perf_ibs->offset_max, offset + 1); } while (offset < offset_max); + /* + * Read IbsBrTarget, IbsOpData4, and IbsExtdCtl separately + * depending on their availability. + * Can't add to offset_max as they are staggered + */ if (event->attr.sample_type & PERF_SAMPLE_RAW) { - /* - * Read IbsBrTarget and IbsOpData4 separately - * depending on their availability. - * Can't add to offset_max as they are staggered - */ - if (ibs_caps & IBS_CAPS_BRNTRGT) { - rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++); - size++; + if (perf_ibs == &perf_ibs_op) { + if (ibs_caps & IBS_CAPS_BRNTRGT) { + rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++); + size++; + } + if (ibs_caps & IBS_CAPS_OPDATA4) { + rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++); + size++; + } } - if (ibs_caps & IBS_CAPS_OPDATA4) { - rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++); + if (perf_ibs == &perf_ibs_fetch && (ibs_caps & IBS_CAPS_FETCHCTLEXTD)) { + rdmsrl(MSR_AMD64_ICIBSEXTDCTL, *buf++); size++; } } @@ -672,10 +699,21 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs) throttle = perf_event_overflow(event, &data, ®s); out: - if (throttle) + if (throttle) { perf_ibs_stop(event, 0); - else - perf_ibs_enable_event(perf_ibs, hwc, period >> 4); + } else { + if (perf_ibs == &perf_ibs_op) { + if (ibs_caps & IBS_CAPS_OPCNTEXT) { + new_config = period & IBS_OP_MAX_CNT_EXT_MASK; + period &= ~IBS_OP_MAX_CNT_EXT_MASK; + } + if ((ibs_caps & IBS_CAPS_RDWROPCNT) && (*config & IBS_OP_CNT_CTL)) + new_config |= *config & IBS_OP_CUR_CNT_RAND; + } + new_config |= period >> 4; + + perf_ibs_enable_event(perf_ibs, hwc, new_config); + } perf_event_update_userpage(event); @@ -735,12 +773,26 @@ static __init void perf_event_ibs_init(void) { struct attribute **attr = ibs_op_format_attrs; + /* + * Some chips fail to reset the fetch count when it is written; instead + * they need a 0-1 transition of IbsFetchEn. + */ + if (boot_cpu_data.x86 >= 0x16 && boot_cpu_data.x86 <= 0x18) + perf_ibs_fetch.fetch_count_reset_broken = 1; + perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch"); if (ibs_caps & IBS_CAPS_OPCNT) { perf_ibs_op.config_mask |= IBS_OP_CNT_CTL; *attr++ = &format_attr_cnt_ctl.attr; } + + if (ibs_caps & IBS_CAPS_OPCNTEXT) { + perf_ibs_op.max_period |= IBS_OP_MAX_CNT_EXT_MASK; + perf_ibs_op.config_mask |= IBS_OP_MAX_CNT_EXT_MASK; + perf_ibs_op.cnt_mask |= IBS_OP_MAX_CNT_EXT_MASK; + } + perf_ibs_pmu_init(&perf_ibs_op, "ibs_op"); register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs"); diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c index 3210fee27e7f9ac55a844d9709dc31ff14907e9c..55f37973f9f87ceb8d530a4504f799264893087c 100644 --- a/arch/x86/events/amd/iommu.c +++ b/arch/x86/events/amd/iommu.c @@ -84,12 +84,12 @@ static struct attribute_group amd_iommu_events_group = { }; struct amd_iommu_event_desc { - struct kobj_attribute attr; + struct device_attribute attr; const char *event; }; -static ssize_t _iommu_event_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) +static ssize_t _iommu_event_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct amd_iommu_event_desc *event = container_of(attr, struct amd_iommu_event_desc, attr); diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index 8671de126eac09e0a63358d72305ce0a5e9f4f31..e436978ad324e76343e677b0397d40ac0211b148 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c @@ -183,6 +183,43 @@ static void amd_uncore_del(struct perf_event *event, int flags) hwc->idx = -1; } +/* + * Return a full thread and slice mask unless user + * has provided them + */ +static u64 l3_thread_slice_mask(u64 config) +{ + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && + boot_cpu_data.x86 <= 0x18) + return ((config & AMD64_L3_SLICE_MASK) ? : AMD64_L3_SLICE_MASK) | + ((config & AMD64_L3_THREAD_MASK) ? : AMD64_L3_THREAD_MASK); + + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) { + if (boot_cpu_data.x86_model >= 0x6 && + boot_cpu_data.x86_model <= 0xf) + return ((config & HYGON_L3_SLICE_MASK) ? : HYGON_L3_SLICE_MASK) | + ((config & HYGON_L3_THREAD_MASK) ? : HYGON_L3_THREAD_MASK); + else + return ((config & AMD64_L3_SLICE_MASK) ? : AMD64_L3_SLICE_MASK) | + ((config & AMD64_L3_THREAD_MASK) ? : AMD64_L3_THREAD_MASK); + } + + /* + * If the user doesn't specify a threadmask, they're not trying to + * count core 0, so we enable all cores & threads. + * We'll also assume that they want to count slice 0 if they specify + * a threadmask and leave sliceid and enallslices unpopulated. + */ + if (!(config & AMD64_L3_F19H_THREAD_MASK)) + return AMD64_L3_F19H_THREAD_MASK | AMD64_L3_EN_ALL_SLICES | + AMD64_L3_EN_ALL_CORES; + + return config & (AMD64_L3_F19H_THREAD_MASK | AMD64_L3_SLICEID_MASK | + AMD64_L3_EN_ALL_CORES | AMD64_L3_EN_ALL_SLICES | + AMD64_L3_COREID_MASK); +} + static int amd_uncore_event_init(struct perf_event *event) { struct amd_uncore *uncore; @@ -210,16 +247,16 @@ static int amd_uncore_event_init(struct perf_event *event) hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB; hwc->idx = -1; - /* - * SliceMask and ThreadMask need to be set for certain L3 events in - * Family 17h. For other events, the two fields do not affect the count. - */ - if (l3_mask) - hwc->config |= (AMD64_L3_SLICE_MASK | AMD64_L3_THREAD_MASK); - if (event->cpu < 0) return -EINVAL; + /* + * SliceMask and ThreadMask need to be set for certain L3 events. + * For other events, the two fields do not affect the count. + */ + if (l3_mask && is_llc_event(event)) + hwc->config |= l3_thread_slice_mask(event->attr.config); + uncore = event_to_amd_uncore(event); if (!uncore) return -ENODEV; @@ -260,47 +297,74 @@ static struct attribute_group amd_uncore_attr_group = { .attrs = amd_uncore_attrs, }; -/* - * Similar to PMU_FORMAT_ATTR but allowing for format_attr to be assigned based - * on family - */ -#define AMD_FORMAT_ATTR(_dev, _name, _format) \ -static ssize_t \ -_dev##_show##_name(struct device *dev, \ - struct device_attribute *attr, \ - char *page) \ -{ \ - BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ - return sprintf(page, _format "\n"); \ -} \ -static struct device_attribute format_attr_##_dev##_name = __ATTR_RO(_dev); - -/* Used for each uncore counter type */ -#define AMD_ATTRIBUTE(_name) \ -static struct attribute *amd_uncore_format_attr_##_name[] = { \ - &format_attr_event_##_name.attr, \ - &format_attr_umask.attr, \ - NULL, \ -}; \ -static struct attribute_group amd_uncore_format_group_##_name = { \ - .name = "format", \ - .attrs = amd_uncore_format_attr_##_name, \ -}; \ -static const struct attribute_group *amd_uncore_attr_groups_##_name[] = { \ - &amd_uncore_attr_group, \ - &amd_uncore_format_group_##_name, \ - NULL, \ +#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \ +static ssize_t __uncore_##_var##_show(struct device *dev, \ + struct device_attribute *attr, \ + char *page) \ +{ \ + BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ + return sprintf(page, _format "\n"); \ +} \ +static struct device_attribute format_attr_##_var = \ + __ATTR(_name, 0444, __uncore_##_var##_show, NULL) + +DEFINE_UNCORE_FORMAT_ATTR(event12, event, "config:0-7,32-35"); +DEFINE_UNCORE_FORMAT_ATTR(event14, event, "config:0-7,32-35,59-60"); /* F17h+ DF */ +DEFINE_UNCORE_FORMAT_ATTR(event8, event, "config:0-7"); /* F17h+ L3 */ +DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); +DEFINE_UNCORE_FORMAT_ATTR(coreid, coreid, "config:42-44"); /* F19h L3 */ +DEFINE_UNCORE_FORMAT_ATTR(slicemask, slicemask, "config:48-51"); /* F17h L3 */ +DEFINE_UNCORE_FORMAT_ATTR(threadmask8, threadmask, "config:56-63"); /* F17h L3 */ +DEFINE_UNCORE_FORMAT_ATTR(threadmask2, threadmask, "config:56-57"); /* F19h L3 */ +DEFINE_UNCORE_FORMAT_ATTR(enallslices, enallslices, "config:46"); /* F19h L3 */ +DEFINE_UNCORE_FORMAT_ATTR(enallcores, enallcores, "config:47"); /* F19h L3 */ +DEFINE_UNCORE_FORMAT_ATTR(sliceid, sliceid, "config:48-50"); /* F19h L3 */ +DEFINE_UNCORE_FORMAT_ATTR(slicemask4, slicemask, "config:28-31"); /* F18h L3 */ +DEFINE_UNCORE_FORMAT_ATTR(threadmask32, threadmask, "config:32-63"); /* F18h L3 */ + +static struct attribute *amd_uncore_df_format_attr[] = { + &format_attr_event12.attr, /* event14 if F17h+ */ + &format_attr_umask.attr, + NULL, }; -AMD_FORMAT_ATTR(event, , "config:0-7,32-35"); -AMD_FORMAT_ATTR(umask, , "config:8-15"); -AMD_FORMAT_ATTR(event, _df, "config:0-7,32-35,59-60"); -AMD_FORMAT_ATTR(event, _l3, "config:0-7"); -AMD_ATTRIBUTE(df); -AMD_ATTRIBUTE(l3); +static struct attribute *amd_uncore_l3_format_attr[] = { + &format_attr_event12.attr, /* event8 if F17h+ */ + &format_attr_umask.attr, + NULL, /* slicemask if F17h, coreid if F19h */ + NULL, /* threadmask8 if F17h, enallslices if F19h */ + NULL, /* enallcores if F19h */ + NULL, /* sliceid if F19h */ + NULL, /* threadmask2 if F19h */ + NULL, +}; + +static struct attribute_group amd_uncore_df_format_group = { + .name = "format", + .attrs = amd_uncore_df_format_attr, +}; + +static struct attribute_group amd_uncore_l3_format_group = { + .name = "format", + .attrs = amd_uncore_l3_format_attr, +}; + +static const struct attribute_group *amd_uncore_df_attr_groups[] = { + &amd_uncore_attr_group, + &amd_uncore_df_format_group, + NULL, +}; + +static const struct attribute_group *amd_uncore_l3_attr_groups[] = { + &amd_uncore_attr_group, + &amd_uncore_l3_format_group, + NULL, +}; static struct pmu amd_nb_pmu = { .task_ctx_nr = perf_invalid_context, + .attr_groups = amd_uncore_df_attr_groups, + .name = "amd_nb", .event_init = amd_uncore_event_init, .add = amd_uncore_add, .del = amd_uncore_del, @@ -311,6 +375,8 @@ static struct pmu amd_nb_pmu = { static struct pmu amd_llc_pmu = { .task_ctx_nr = perf_invalid_context, + .attr_groups = amd_uncore_l3_attr_groups, + .name = "amd_l2", .event_init = amd_uncore_event_init, .add = amd_uncore_add, .del = amd_uncore_del, @@ -513,41 +579,36 @@ static int amd_uncore_cpu_dead(unsigned int cpu) static int __init amd_uncore_init(void) { + struct attribute **df_attr = amd_uncore_df_format_attr; + struct attribute **l3_attr = amd_uncore_l3_format_attr; int ret = -ENODEV; - if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && + boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) return -ENODEV; if (!boot_cpu_has(X86_FEATURE_TOPOEXT)) return -ENODEV; - if (boot_cpu_data.x86 == 0x17) { + num_counters_nb = NUM_COUNTERS_NB; + num_counters_llc = NUM_COUNTERS_L2; + if (boot_cpu_data.x86 >= 0x17) { /* - * For F17h, the Northbridge counters are repurposed as Data - * Fabric counters. Also, L3 counters are supported too. The PMUs - * are exported based on family as either L2 or L3 and NB or DF. + * For F17h and above, the Northbridge counters are + * repurposed as Data Fabric counters. Also, L3 + * counters are supported too. The PMUs are exported + * based on family as either L2 or L3 and NB or DF. */ - num_counters_nb = NUM_COUNTERS_NB; num_counters_llc = NUM_COUNTERS_L3; amd_nb_pmu.name = "amd_df"; amd_llc_pmu.name = "amd_l3"; - format_attr_event_df.show = &event_show_df; - format_attr_event_l3.show = &event_show_l3; l3_mask = true; - } else { - num_counters_nb = NUM_COUNTERS_NB; - num_counters_llc = NUM_COUNTERS_L2; - amd_nb_pmu.name = "amd_nb"; - amd_llc_pmu.name = "amd_l2"; - format_attr_event_df = format_attr_event; - format_attr_event_l3 = format_attr_event; - l3_mask = false; } - amd_nb_pmu.attr_groups = amd_uncore_attr_groups_df; - amd_llc_pmu.attr_groups = amd_uncore_attr_groups_l3; - if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) { + if (boot_cpu_data.x86 >= 0x17) + *df_attr = &format_attr_event14.attr; + amd_uncore_nb = alloc_percpu(struct amd_uncore *); if (!amd_uncore_nb) { ret = -ENOMEM; @@ -557,11 +618,41 @@ static int __init amd_uncore_init(void) if (ret) goto fail_nb; - pr_info("AMD NB counters detected\n"); + pr_info("%s NB counters detected\n", + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? + "HYGON" : "AMD"); ret = 0; } if (boot_cpu_has(X86_FEATURE_PERFCTR_LLC)) { + if (boot_cpu_data.x86 >= 0x19) { + *l3_attr++ = &format_attr_event8.attr; + *l3_attr++ = &format_attr_umask.attr; + *l3_attr++ = &format_attr_coreid.attr; + *l3_attr++ = &format_attr_enallslices.attr; + *l3_attr++ = &format_attr_enallcores.attr; + *l3_attr++ = &format_attr_sliceid.attr; + *l3_attr++ = &format_attr_threadmask2.attr; + } else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && + boot_cpu_data.x86 >= 0x17) { + *l3_attr++ = &format_attr_event8.attr; + *l3_attr++ = &format_attr_umask.attr; + *l3_attr++ = &format_attr_slicemask.attr; + *l3_attr++ = &format_attr_threadmask8.attr; + } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) { + *l3_attr++ = &format_attr_event8.attr; + *l3_attr++ = &format_attr_umask.attr; + if (boot_cpu_data.x86_model >= 6 && + boot_cpu_data.x86_model <= 0xf) { + *l3_attr++ = &format_attr_slicemask4.attr; + *l3_attr++ = &format_attr_threadmask32.attr; + } else { + *l3_attr++ = &format_attr_slicemask.attr; + *l3_attr++ = &format_attr_threadmask8.attr; + } + } + amd_uncore_llc = alloc_percpu(struct amd_uncore *); if (!amd_uncore_llc) { ret = -ENOMEM; @@ -571,7 +662,9 @@ static int __init amd_uncore_init(void) if (ret) goto fail_llc; - pr_info("AMD LLC counters detected\n"); + pr_info("%s LLC counters detected\n", + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? + "HYGON" : "AMD"); ret = 0; } diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index dfb2f7c0d0192bcd16569d03badd498f355accf7..fec9f1d62c8f42349a122b5c4660a7e97566a314 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -70,12 +70,14 @@ u64 x86_perf_event_update(struct perf_event *event) struct hw_perf_event *hwc = &event->hw; int shift = 64 - x86_pmu.cntval_bits; u64 prev_raw_count, new_raw_count; - int idx = hwc->idx; u64 delta; - if (idx == INTEL_PMC_IDX_FIXED_BTS) + if (unlikely(!hwc->event_base)) return 0; + if (unlikely(is_topdown_count(event)) && x86_pmu.update_topdown_event) + return x86_pmu.update_topdown_event(event); + /* * Careful: an NMI might modify the previous event value. * @@ -375,7 +377,7 @@ int x86_add_exclusive(unsigned int what) * LBR and BTS are still mutually exclusive. */ if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt) - return 0; + goto out; if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) { mutex_lock(&pmc_reserve_mutex); @@ -387,6 +389,7 @@ int x86_add_exclusive(unsigned int what) mutex_unlock(&pmc_reserve_mutex); } +out: atomic_inc(&active_events); return 0; @@ -397,11 +400,15 @@ int x86_add_exclusive(unsigned int what) void x86_del_exclusive(unsigned int what) { + atomic_dec(&active_events); + + /* + * See the comment in x86_add_exclusive(). + */ if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt) return; atomic_dec(&x86_pmu.lbr_exclusive[what]); - atomic_dec(&active_events); } int x86_setup_perfctr(struct perf_event *event) @@ -438,26 +445,6 @@ int x86_setup_perfctr(struct perf_event *event) if (config == -1LL) return -EINVAL; - /* - * Branch tracing: - */ - if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS && - !attr->freq && hwc->sample_period == 1) { - /* BTS is not supported by this architecture. */ - if (!x86_pmu.bts_active) - return -EOPNOTSUPP; - - /* BTS is currently only allowed for user-mode. */ - if (!attr->exclude_kernel) - return -EOPNOTSUPP; - - /* disallow bts if conflicting events are present */ - if (x86_add_exclusive(x86_lbr_exclusive_lbr)) - return -EBUSY; - - event->destroy = hw_perf_lbr_event_destroy; - } - hwc->config |= config; return 0; @@ -580,6 +567,21 @@ int x86_pmu_hw_config(struct perf_event *event) return -EINVAL; } + /* sample_regs_user never support XMM registers */ + if (unlikely(event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK)) + return -EINVAL; + /* + * Besides the general purpose registers, XMM registers may + * be collected in PEBS on some platforms, e.g. Icelake + */ + if (unlikely(event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK)) { + if (!(event->pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS)) + return -EINVAL; + + if (!event->attr.precise_ip) + return -EINVAL; + } + return x86_setup_perfctr(event); } @@ -617,6 +619,7 @@ void x86_pmu_disable_all(void) int idx; for (idx = 0; idx < x86_pmu.num_counters; idx++) { + struct hw_perf_event *hwc = &cpuc->events[idx]->hw; u64 val; if (!test_bit(idx, cpuc->active_mask)) @@ -626,6 +629,8 @@ void x86_pmu_disable_all(void) continue; val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; wrmsrl(x86_pmu_config_addr(idx), val); + if (is_counter_pair(hwc)) + wrmsrl(x86_pmu_config_addr(idx + 1), 0); } } @@ -681,6 +686,11 @@ static inline int is_x86_event(struct perf_event *event) return event->pmu == &pmu; } +struct pmu *x86_get_pmu(void) +{ + return &pmu; +} + /* * Event scheduler state: * @@ -694,7 +704,7 @@ struct sched_state { int counter; /* counter index */ int unassigned; /* number of events to be assigned left */ int nr_gp; /* number of GP counters used */ - unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; + u64 used; }; /* Total max is X86_PMC_IDX_MAX, but we are O(n!) limited */ @@ -751,8 +761,12 @@ static bool perf_sched_restore_state(struct perf_sched *sched) sched->saved_states--; sched->state = sched->saved[sched->saved_states]; - /* continue with next counter: */ - clear_bit(sched->state.counter++, sched->state.used); + /* this assignment didn't work out */ + /* XXX broken vs EVENT_PAIR */ + sched->state.used &= ~BIT_ULL(sched->state.counter); + + /* try the next one */ + sched->state.counter++; return true; } @@ -777,20 +791,32 @@ static bool __perf_sched_find_counter(struct perf_sched *sched) if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) { idx = INTEL_PMC_IDX_FIXED; for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) { - if (!__test_and_set_bit(idx, sched->state.used)) - goto done; + u64 mask = BIT_ULL(idx); + + if (sched->state.used & mask) + continue; + + sched->state.used |= mask; + goto done; } } /* Grab the first unused counter starting with idx */ idx = sched->state.counter; for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) { - if (!__test_and_set_bit(idx, sched->state.used)) { - if (sched->state.nr_gp++ >= sched->max_gp) - return false; + u64 mask = BIT_ULL(idx); - goto done; - } + if (c->flags & PERF_X86_EVENT_PAIR) + mask |= mask << 1; + + if (sched->state.used & mask) + continue; + + if (sched->state.nr_gp++ >= sched->max_gp) + return false; + + sched->state.used |= mask; + goto done; } return false; @@ -867,12 +893,10 @@ EXPORT_SYMBOL_GPL(perf_assign_events); int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) { struct event_constraint *c; - unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; struct perf_event *e; int i, wmin, wmax, unsched = 0; struct hw_perf_event *hwc; - - bitmap_zero(used_mask, X86_PMC_IDX_MAX); + u64 used_mask = 0; if (x86_pmu.start_scheduling) x86_pmu.start_scheduling(cpuc); @@ -890,6 +914,8 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) * fastpath, try to reuse previous register */ for (i = 0; i < n; i++) { + u64 mask; + hwc = &cpuc->event_list[i]->hw; c = cpuc->event_constraint[i]; @@ -901,11 +927,16 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) if (!test_bit(hwc->idx, c->idxmsk)) break; + mask = BIT_ULL(hwc->idx); + if (is_counter_pair(hwc)) + mask |= mask << 1; + /* not already used */ - if (test_bit(hwc->idx, used_mask)) + if (used_mask & mask) break; - __set_bit(hwc->idx, used_mask); + used_mask |= mask; + if (assign) assign[i] = hwc->idx; } @@ -928,6 +959,15 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) READ_ONCE(cpuc->excl_cntrs->exclusive_present)) gpmax /= 2; + /* + * Reduce the amount of available counters to allow fitting + * the extra Merge events needed by large increment events. + */ + if (x86_pmu.flags & PMU_FL_PAIR) { + gpmax = x86_pmu.num_counters - cpuc->n_pair; + WARN_ON(gpmax <= 0); + } + unsched = perf_assign_events(cpuc->event_constraint, n, wmin, wmax, gpmax, assign); } @@ -973,6 +1013,43 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) return unsched ? -EINVAL : 0; } +static int add_nr_metric_event(struct cpu_hw_events *cpuc, + struct perf_event *event) +{ + if (is_metric_event(event)) { + if (cpuc->n_metric == INTEL_TD_METRIC_NUM) + return -EINVAL; + cpuc->n_metric++; + cpuc->n_txn_metric++; + } + + return 0; +} + +static void del_nr_metric_event(struct cpu_hw_events *cpuc, + struct perf_event *event) +{ + if (is_metric_event(event)) + cpuc->n_metric--; +} + +static int collect_event(struct cpu_hw_events *cpuc, struct perf_event *event, + int max_count, int n) +{ + + if (x86_pmu.intel_cap.perf_metrics && add_nr_metric_event(cpuc, event)) + return -EINVAL; + + if (n >= max_count + cpuc->n_metric) + return -EINVAL; + + cpuc->event_list[n] = event; + if (is_counter_pair(&event->hw)) + cpuc->n_pair++; + + return 0; +} + /* * dogrp: true if must collect siblings events (group) * returns total number of events and error code @@ -988,23 +1065,21 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, n = cpuc->n_events; if (is_x86_event(leader)) { - if (n >= max_count) + if (collect_event(cpuc, leader, max_count, n)) return -EINVAL; - cpuc->event_list[n] = leader; n++; } + if (!dogrp) return n; for_each_sibling_event(event, leader) { - if (!is_x86_event(event) || - event->state <= PERF_EVENT_STATE_OFF) + if (!is_x86_event(event) || event->state <= PERF_EVENT_STATE_OFF) continue; - if (n >= max_count) + if (collect_event(cpuc, event, max_count, n)) return -EINVAL; - cpuc->event_list[n] = event; n++; } return n; @@ -1014,22 +1089,36 @@ static inline void x86_assign_hw_event(struct perf_event *event, struct cpu_hw_events *cpuc, int i) { struct hw_perf_event *hwc = &event->hw; + int idx; - hwc->idx = cpuc->assign[i]; + idx = hwc->idx = cpuc->assign[i]; hwc->last_cpu = smp_processor_id(); hwc->last_tag = ++cpuc->tags[i]; - if (hwc->idx == INTEL_PMC_IDX_FIXED_BTS) { + switch (hwc->idx) { + case INTEL_PMC_IDX_FIXED_BTS: + case INTEL_PMC_IDX_FIXED_VLBR: hwc->config_base = 0; hwc->event_base = 0; - } else if (hwc->idx >= INTEL_PMC_IDX_FIXED) { + break; + + case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END: + /* All the metric events are mapped onto the fixed counter 3. */ + idx = INTEL_PMC_IDX_FIXED_SLOTS; + /* fall through */ + case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS-1: hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; - hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - INTEL_PMC_IDX_FIXED); - hwc->event_base_rdpmc = (hwc->idx - INTEL_PMC_IDX_FIXED) | 1<<30; - } else { + hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + + (idx - INTEL_PMC_IDX_FIXED); + hwc->event_base_rdpmc = (idx - INTEL_PMC_IDX_FIXED) | + INTEL_PMC_FIXED_RDPMC_BASE; + break; + + default: hwc->config_base = x86_pmu_config_addr(hwc->idx); hwc->event_base = x86_pmu_event_addr(hwc->idx); hwc->event_base_rdpmc = x86_pmu_rdpmc_index(hwc->idx); + break; } } @@ -1129,9 +1218,13 @@ int x86_perf_event_set_period(struct perf_event *event) s64 period = hwc->sample_period; int ret = 0, idx = hwc->idx; - if (idx == INTEL_PMC_IDX_FIXED_BTS) + if (unlikely(!hwc->event_base)) return 0; + if (unlikely(is_topdown_count(event)) && + x86_pmu.set_topdown_event_period) + return x86_pmu.set_topdown_event_period(event); + /* * If we are way outside a reasonable range then just skip forward: */ @@ -1170,6 +1263,13 @@ int x86_perf_event_set_period(struct perf_event *event) wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); + /* + * Sign extend the Merge event counter's upper 16 bits since + * we currently declare a 48-bit counter width + */ + if (is_counter_pair(hwc)) + wrmsrl(x86_pmu_event_addr(idx + 1), 0xffff); + /* * Due to erratum on certan cpu we need * a second write to be sure the register @@ -1348,8 +1448,9 @@ void x86_pmu_stop(struct perf_event *event, int flags) struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct hw_perf_event *hwc = &event->hw; - if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) { + if (test_bit(hwc->idx, cpuc->active_mask)) { x86_pmu.disable(event); + __clear_bit(hwc->idx, cpuc->active_mask); cpuc->events[hwc->idx] = NULL; WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); hwc->state |= PERF_HES_STOPPED; @@ -1412,6 +1513,8 @@ static void x86_pmu_del(struct perf_event *event, int flags) cpuc->event_constraint[i-1] = cpuc->event_constraint[i]; } --cpuc->n_events; + if (x86_pmu.intel_cap.perf_metrics) + del_nr_metric_event(cpuc, event); perf_event_update_userpage(event); @@ -1446,16 +1549,8 @@ int x86_pmu_handle_irq(struct pt_regs *regs) apic_write(APIC_LVTPC, APIC_DM_NMI); for (idx = 0; idx < x86_pmu.num_counters; idx++) { - if (!test_bit(idx, cpuc->active_mask)) { - /* - * Though we deactivated the counter some cpus - * might still deliver spurious interrupts still - * in flight. Catch them: - */ - if (__test_and_clear_bit(idx, cpuc->running)) - handled++; + if (!test_bit(idx, cpuc->active_mask)) continue; - } event = cpuc->events[idx]; @@ -1589,68 +1684,6 @@ static struct attribute_group x86_pmu_format_group = { .attrs = NULL, }; -/* - * Remove all undefined events (x86_pmu.event_map(id) == 0) - * out of events_attr attributes. - */ -static void __init filter_events(struct attribute **attrs) -{ - struct device_attribute *d; - struct perf_pmu_events_attr *pmu_attr; - int offset = 0; - int i, j; - - for (i = 0; attrs[i]; i++) { - d = (struct device_attribute *)attrs[i]; - pmu_attr = container_of(d, struct perf_pmu_events_attr, attr); - /* str trumps id */ - if (pmu_attr->event_str) - continue; - if (x86_pmu.event_map(i + offset)) - continue; - - for (j = i; attrs[j]; j++) - attrs[j] = attrs[j + 1]; - - /* Check the shifted attr. */ - i--; - - /* - * event_map() is index based, the attrs array is organized - * by increasing event index. If we shift the events, then - * we need to compensate for the event_map(), otherwise - * we are looking up the wrong event in the map - */ - offset++; - } -} - -/* Merge two pointer arrays */ -__init struct attribute **merge_attr(struct attribute **a, struct attribute **b) -{ - struct attribute **new; - int j, i; - - for (j = 0; a[j]; j++) - ; - for (i = 0; b[i]; i++) - j++; - j++; - - new = kmalloc_array(j, sizeof(struct attribute *), GFP_KERNEL); - if (!new) - return NULL; - - j = 0; - for (i = 0; a[i]; i++) - new[j++] = a[i]; - for (i = 0; b[i]; i++) - new[j++] = b[i]; - new[j] = NULL; - - return new; -} - ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, char *page) { struct perf_pmu_events_attr *pmu_attr = \ @@ -1715,9 +1748,24 @@ static struct attribute *events_attr[] = { NULL, }; +/* + * Remove all undefined events (x86_pmu.event_map(id) == 0) + * out of events_attr attributes. + */ +static umode_t +is_visible(struct kobject *kobj, struct attribute *attr, int idx) +{ + struct perf_pmu_events_attr *pmu_attr; + + pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr); + /* str trumps id */ + return pmu_attr->event_str || x86_pmu.event_map(idx) ? attr->mode : 0; +} + static struct attribute_group x86_pmu_events_group = { .name = "events", .attrs = events_attr, + .is_visible = is_visible, }; ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event) @@ -1762,6 +1810,22 @@ ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event) static struct attribute_group x86_pmu_attr_group; static struct attribute_group x86_pmu_caps_group; +static int x86_pmu_attr_update_notify(struct notifier_block *nb, + unsigned long ret, void *group) +{ + struct pmu *tmp = group; + + if (tmp != &pmu) + return NOTIFY_DONE; + + *(int *)ret = sysfs_update_groups(&tmp->dev->kobj, x86_pmu.attr_update); + return NOTIFY_STOP; +} + +static struct notifier_block x86_pmu_attr_update_notifier = { + .notifier_call = &x86_pmu_attr_update_notify, +}; + static int __init init_hw_perf_events(void) { struct x86_pmu_quirk *quirk; @@ -1776,6 +1840,14 @@ static int __init init_hw_perf_events(void) case X86_VENDOR_AMD: err = amd_pmu_init(); break; + case X86_VENDOR_HYGON: + err = amd_pmu_init(); + x86_pmu.name = "HYGON"; + break; + case X86_VENDOR_ZHAOXIN: + case X86_VENDOR_CENTAUR: + err = zhaoxin_pmu_init(); + break; default: err = -ENOTSUPP; } @@ -1809,37 +1881,10 @@ static int __init init_hw_perf_events(void) x86_pmu_format_group.attrs = x86_pmu.format_attrs; - if (x86_pmu.caps_attrs) { - struct attribute **tmp; - - tmp = merge_attr(x86_pmu_caps_group.attrs, x86_pmu.caps_attrs); - if (!WARN_ON(!tmp)) - x86_pmu_caps_group.attrs = tmp; - } - - if (x86_pmu.event_attrs) - x86_pmu_events_group.attrs = x86_pmu.event_attrs; - if (!x86_pmu.events_sysfs_show) x86_pmu_events_group.attrs = &empty_attrs; - else - filter_events(x86_pmu_events_group.attrs); - if (x86_pmu.cpu_events) { - struct attribute **tmp; - - tmp = merge_attr(x86_pmu_events_group.attrs, x86_pmu.cpu_events); - if (!WARN_ON(!tmp)) - x86_pmu_events_group.attrs = tmp; - } - - if (x86_pmu.attrs) { - struct attribute **tmp; - - tmp = merge_attr(x86_pmu_attr_group.attrs, x86_pmu.attrs); - if (!WARN_ON(!tmp)) - x86_pmu_attr_group.attrs = tmp; - } + pmu_attr_update_register_notifier(&x86_pmu_attr_update_notifier); pr_info("... version: %d\n", x86_pmu.version); pr_info("... bit width: %d\n", x86_pmu.cntval_bits); @@ -1913,6 +1958,7 @@ static void x86_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags) perf_pmu_disable(pmu); __this_cpu_write(cpu_hw_events.n_txn, 0); + __this_cpu_write(cpu_hw_events.n_txn_metric, 0); } /* @@ -1938,6 +1984,7 @@ static void x86_pmu_cancel_txn(struct pmu *pmu) */ __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn)); __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn)); + __this_cpu_sub(cpu_hw_events.n_metric, __this_cpu_read(cpu_hw_events.n_txn_metric)); perf_pmu_enable(pmu); } @@ -1990,7 +2037,7 @@ static int x86_pmu_commit_txn(struct pmu *pmu) */ static void free_fake_cpuc(struct cpu_hw_events *cpuc) { - kfree(cpuc->shared_regs); + intel_cpuc_finish(cpuc); kfree(cpuc); } @@ -2002,14 +2049,11 @@ static struct cpu_hw_events *allocate_fake_cpuc(void) cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL); if (!cpuc) return ERR_PTR(-ENOMEM); - - /* only needed, if we have extra_regs */ - if (x86_pmu.extra_regs) { - cpuc->shared_regs = allocate_shared_regs(cpu); - if (!cpuc->shared_regs) - goto error; - } cpuc->is_fake = 1; + + if (intel_cpuc_prepare(cpuc, cpu)) + goto error; + return cpuc; error: free_fake_cpuc(cpuc); @@ -2121,6 +2165,7 @@ static int x86_pmu_event_init(struct perf_event *event) if (err) { if (event->destroy) event->destroy(event); + event->destroy = NULL; } if (READ_ONCE(x86_pmu.attr_rdpmc) && @@ -2168,17 +2213,15 @@ static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *m static int x86_pmu_event_idx(struct perf_event *event) { - int idx = event->hw.idx; + struct hw_perf_event *hwc = &event->hw; - if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) + if (!(hwc->flags & PERF_X86_EVENT_RDPMC_ALLOWED)) return 0; - if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) { - idx -= INTEL_PMC_IDX_FIXED; - idx |= 1 << 30; - } - - return idx + 1; + if (is_metric_idx(hwc->idx)) + return INTEL_PMC_FIXED_RDPMC_METRICS + 1; + else + return hwc->event_base_rdpmc + 1; } static ssize_t get_attr_rdpmc(struct device *cdev, @@ -2192,6 +2235,7 @@ static ssize_t set_attr_rdpmc(struct device *cdev, struct device_attribute *attr, const char *buf, size_t count) { + static DEFINE_MUTEX(rdpmc_mutex); unsigned long val; ssize_t ret; @@ -2205,6 +2249,8 @@ static ssize_t set_attr_rdpmc(struct device *cdev, if (x86_pmu.attr_rdpmc_broken) return -ENOTSUPP; + mutex_lock(&rdpmc_mutex); + if ((val == 2) != (x86_pmu.attr_rdpmc == 2)) { /* * Changing into or out of always available, aka @@ -2220,6 +2266,8 @@ static ssize_t set_attr_rdpmc(struct device *cdev, x86_pmu.attr_rdpmc = val; + mutex_unlock(&rdpmc_mutex); + return count; } @@ -2273,6 +2321,19 @@ void perf_check_microcode(void) x86_pmu.check_microcode(); } +static int x86_pmu_check_period(struct perf_event *event, u64 value) +{ + if (x86_pmu.check_period && x86_pmu.check_period(event, value)) + return -EINVAL; + + if (value && x86_pmu.limit_period) { + if (x86_pmu.limit_period(event, value) > value) + return -EINVAL; + } + + return 0; +} + static struct pmu pmu = { .pmu_enable = x86_pmu_enable, .pmu_disable = x86_pmu_disable, @@ -2297,6 +2358,7 @@ static struct pmu pmu = { .event_idx = x86_pmu_event_idx, .sched_task = x86_pmu_sched_task, .task_ctx_size = sizeof(struct x86_perf_task_context), + .check_period = x86_pmu_check_period, }; void arch_perf_update_userpage(struct perf_event *event, @@ -2339,6 +2401,15 @@ void arch_perf_update_userpage(struct perf_event *event, cyc2ns_read_end(); } +/* + * Determine whether the regs were taken from an irq/exception handler rather + * than from perf_arch_fetch_caller_regs(). + */ +static bool perf_hw_regs(struct pt_regs *regs) +{ + return regs->flags & X86_EFLAGS_FIXED; +} + void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { @@ -2353,8 +2424,12 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re if (perf_callchain_store(entry, regs->ip)) return; - for (unwind_start(&state, current, regs, NULL); !unwind_done(&state); - unwind_next_frame(&state)) { + if (perf_hw_regs(regs)) + unwind_start(&state, current, regs, NULL); + else + unwind_start(&state, current, NULL, (void *)regs->sp); + + for (; !unwind_done(&state); unwind_next_frame(&state)) { addr = unwind_get_return_address(&state); if (!addr || perf_callchain_store(entry, addr)) return; diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c index 24ffa1e88cf948ecbe9839f6d956c69a12ecd4f0..510f9461407e79109ba41d58d8b2567e1f4b14a0 100644 --- a/arch/x86/events/intel/bts.c +++ b/arch/x86/events/intel/bts.c @@ -71,16 +71,26 @@ struct bts_buffer { static struct pmu bts_pmu; +static int buf_nr_pages(struct page *page) +{ + if (!PagePrivate(page)) + return 1; + + return 1 << page_private(page); +} + static size_t buf_size(struct page *page) { - return 1 << (PAGE_SHIFT + page_private(page)); + return buf_nr_pages(page) * PAGE_SIZE; } static void * -bts_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool overwrite) +bts_buffer_setup_aux(struct perf_event *event, void **pages, + int nr_pages, bool overwrite) { struct bts_buffer *buf; struct page *page; + int cpu = event->cpu; int node = (cpu == -1) ? cpu : cpu_to_node(cpu); unsigned long offset; size_t size = nr_pages << PAGE_SHIFT; @@ -89,9 +99,7 @@ bts_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool overwrite) /* count all the high order buffers */ for (pg = 0, nbuf = 0; pg < nr_pages;) { page = virt_to_page(pages[pg]); - if (WARN_ON_ONCE(!PagePrivate(page) && nr_pages > 1)) - return NULL; - pg += 1 << page_private(page); + pg += buf_nr_pages(page); nbuf++; } @@ -115,7 +123,7 @@ bts_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool overwrite) unsigned int __nr_pages; page = virt_to_page(pages[pg]); - __nr_pages = PagePrivate(page) ? 1 << page_private(page) : 1; + __nr_pages = buf_nr_pages(page); buf->buf[nbuf].page = page; buf->buf[nbuf].offset = offset; buf->buf[nbuf].displacement = (pad ? BTS_RECORD_SIZE - pad : 0); diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 035c37481f572a253b08773df44069ab1590de91..c4ca55ac07bfb0646cf61f0892b0260e5ea957b2 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -238,11 +238,44 @@ static struct extra_reg intel_skl_extra_regs[] __read_mostly = { EVENT_EXTRA_END }; +static struct event_constraint intel_icl_event_constraints[] = { + FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ + INTEL_UEVENT_CONSTRAINT(0x1c0, 0), /* INST_RETIRED.PREC_DIST */ + FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ + FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ + FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */ + METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0), + METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1), + METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2), + METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3), + INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf), + INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf), + INTEL_EVENT_CONSTRAINT(0x32, 0xf), /* SW_PREFETCH_ACCESS.* */ + INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x54, 0xf), + INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf), + INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff), /* CYCLE_ACTIVITY.STALLS_TOTAL */ + INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff), /* CYCLE_ACTIVITY.STALLS_MEM_ANY */ + INTEL_EVENT_CONSTRAINT(0xa3, 0xf), /* CYCLE_ACTIVITY.* */ + INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf), + INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf), + INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf), + INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf), + EVENT_CONSTRAINT_END +}; + +static struct extra_reg intel_icl_extra_regs[] __read_mostly = { + INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffbfffull, RSP_0), + INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffffbfffull, RSP_1), + INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), + INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE), + EVENT_EXTRA_END +}; + EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3"); EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3"); EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2"); -static struct attribute *nhm_events_attrs[] = { +static struct attribute *nhm_mem_events_attrs[] = { EVENT_PTR(mem_ld_nhm), NULL, }; @@ -277,9 +310,13 @@ EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles, EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale, "4", "2"); +EVENT_ATTR_STR(slots, slots, "event=0x00,umask=0x4"); +EVENT_ATTR_STR(topdown-retiring, td_retiring, "event=0x00,umask=0x80"); +EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec, "event=0x00,umask=0x81"); +EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound, "event=0x00,umask=0x82"); +EVENT_ATTR_STR(topdown-be-bound, td_be_bound, "event=0x00,umask=0x83"); + static struct attribute *snb_events_attrs[] = { - EVENT_PTR(mem_ld_snb), - EVENT_PTR(mem_st_snb), EVENT_PTR(td_slots_issued), EVENT_PTR(td_slots_retired), EVENT_PTR(td_fetch_bubbles), @@ -290,6 +327,12 @@ static struct attribute *snb_events_attrs[] = { NULL, }; +static struct attribute *snb_mem_events_attrs[] = { + EVENT_PTR(mem_ld_snb), + EVENT_PTR(mem_st_snb), + NULL, +}; + static struct event_constraint intel_hsw_event_constraints[] = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ @@ -1822,6 +1865,45 @@ static __initconst const u64 glp_hw_cache_extra_regs }, }; +#define TNT_LOCAL_DRAM BIT_ULL(26) +#define TNT_DEMAND_READ GLM_DEMAND_DATA_RD +#define TNT_DEMAND_WRITE GLM_DEMAND_RFO +#define TNT_LLC_ACCESS GLM_ANY_RESPONSE +#define TNT_SNP_ANY (SNB_SNP_NOT_NEEDED|SNB_SNP_MISS| \ + SNB_NO_FWD|SNB_SNP_FWD|SNB_HITM) +#define TNT_LLC_MISS (TNT_SNP_ANY|SNB_NON_DRAM|TNT_LOCAL_DRAM) + +static __initconst const u64 tnt_hw_cache_extra_regs + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + [C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = TNT_DEMAND_READ| + TNT_LLC_ACCESS, + [C(RESULT_MISS)] = TNT_DEMAND_READ| + TNT_LLC_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = TNT_DEMAND_WRITE| + TNT_LLC_ACCESS, + [C(RESULT_MISS)] = TNT_DEMAND_WRITE| + TNT_LLC_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = 0x0, + [C(RESULT_MISS)] = 0x0, + }, + }, +}; + +static struct extra_reg intel_tnt_extra_regs[] __read_mostly = { + /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ + INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffffff9fffull, RSP_0), + INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xffffff9fffull, RSP_1), + EVENT_EXTRA_END +}; + #define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */ #define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */ #define KNL_MCDRAM_LOCAL BIT_ULL(21) @@ -1995,6 +2077,39 @@ static void intel_pmu_nhm_enable_all(int added) intel_pmu_enable_all(added); } +static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on) +{ + u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0; + + if (cpuc->tfa_shadow != val) { + cpuc->tfa_shadow = val; + wrmsrl(MSR_TSX_FORCE_ABORT, val); + } +} + +static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr) +{ + /* + * We're going to use PMC3, make sure TFA is set before we touch it. + */ + if (cntr == 3 && !cpuc->is_fake) + intel_set_tfa(cpuc, true); +} + +static void intel_tfa_pmu_enable_all(int added) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + + /* + * If we find PMC3 is no longer used when we enable the PMU, we can + * clear TFA. + */ + if (!test_bit(3, cpuc->active_mask)) + intel_set_tfa(cpuc, false); + + intel_pmu_enable_all(added); +} + static inline u64 intel_pmu_get_status(void) { u64 status; @@ -2009,47 +2124,92 @@ static inline void intel_pmu_ack_status(u64 ack) wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); } -static void intel_pmu_disable_fixed(struct hw_perf_event *hwc) +static inline bool event_is_checkpointed(struct perf_event *event) +{ + return unlikely(event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0; +} + +static inline void intel_set_masks(struct perf_event *event, int idx) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + + if (event->attr.exclude_host) + __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask); + if (event->attr.exclude_guest) + __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask); + if (event_is_checkpointed(event)) + __set_bit(idx, (unsigned long *)&cpuc->intel_cp_status); +} + +static inline void intel_clear_masks(struct perf_event *event, int idx) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + + __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask); + __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask); + __clear_bit(idx, (unsigned long *)&cpuc->intel_cp_status); +} + +static void intel_pmu_disable_fixed(struct perf_event *event) { - int idx = hwc->idx - INTEL_PMC_IDX_FIXED; + struct hw_perf_event *hwc = &event->hw; u64 ctrl_val, mask; + int idx = hwc->idx; - mask = 0xfULL << (idx * 4); + if (is_topdown_idx(idx)) { + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + + /* + * When there are other active TopDown events, + * don't disable the fixed counter 3. + */ + if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx)) + return; + idx = INTEL_PMC_IDX_FIXED_SLOTS; + } + + intel_clear_masks(event, idx); + mask = 0xfULL << ((idx - INTEL_PMC_IDX_FIXED) * 4); rdmsrl(hwc->config_base, ctrl_val); ctrl_val &= ~mask; wrmsrl(hwc->config_base, ctrl_val); } -static inline bool event_is_checkpointed(struct perf_event *event) -{ - return (event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0; -} - static void intel_pmu_disable_event(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; - struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + int idx = hwc->idx; - if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) { + switch (idx) { + case 0 ... INTEL_PMC_IDX_FIXED - 1: + intel_clear_masks(event, idx); + x86_pmu_disable_event(event); + break; + case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1: + case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END: + intel_pmu_disable_fixed(event); + break; + case INTEL_PMC_IDX_FIXED_BTS: intel_pmu_disable_bts(); intel_pmu_drain_bts_buffer(); return; + case INTEL_PMC_IDX_FIXED_VLBR: + intel_clear_masks(event, idx); + break; + default: + intel_clear_masks(event, idx); + pr_warn("Failed to disable the event with invalid index %d\n", + idx); + return; } - cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx); - cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx); - cpuc->intel_cp_status &= ~(1ull << hwc->idx); - + /* + * Needs to be called after x86_pmu_disable_event, + * so we don't trigger the event without PEBS bit set. + */ if (unlikely(event->attr.precise_ip)) intel_pmu_pebs_disable(event); - - if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { - intel_pmu_disable_fixed(hwc); - return; - } - - x86_pmu_disable_event(event); } static void intel_pmu_del_event(struct perf_event *event) @@ -2060,10 +2220,189 @@ static void intel_pmu_del_event(struct perf_event *event) intel_pmu_pebs_del(event); } +static int icl_set_topdown_event_period(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + s64 left = local64_read(&hwc->period_left); + + /* + * The values in PERF_METRICS MSR are derived from fixed counter 3. + * Software should start both registers, PERF_METRICS and fixed + * counter 3, from zero. + * Clear PERF_METRICS and Fixed counter 3 in initialization. + * After that, both MSRs will be cleared for each read. + * Don't need to clear them again. + */ + if (left == x86_pmu.max_period) { + wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0); + wrmsrl(MSR_PERF_METRICS, 0); + hwc->saved_slots = 0; + hwc->saved_metric = 0; + } + + if ((hwc->saved_slots) && is_slots_event(event)) { + wrmsrl(MSR_CORE_PERF_FIXED_CTR3, hwc->saved_slots); + wrmsrl(MSR_PERF_METRICS, hwc->saved_metric); + } + + perf_event_update_userpage(event); + + return 0; +} + +static inline u64 icl_get_metrics_event_value(u64 metric, u64 slots, int idx) +{ + u32 val; + + /* + * The metric is reported as an 8bit integer fraction + * suming up to 0xff. + * slots-in-metric = (Metric / 0xff) * slots + */ + val = (metric >> ((idx - INTEL_PMC_IDX_METRIC_BASE) * 8)) & 0xff; + return mul_u64_u32_div(slots, val, 0xff); +} + +static u64 icl_get_topdown_value(struct perf_event *event, + u64 slots, u64 metrics) +{ + int idx = event->hw.idx; + u64 delta; + + if (is_metric_idx(idx)) + delta = icl_get_metrics_event_value(metrics, slots, idx); + else + delta = slots; + + return delta; +} + +static void __icl_update_topdown_event(struct perf_event *event, + u64 slots, u64 metrics, + u64 last_slots, u64 last_metrics) +{ + u64 delta, last = 0; + + delta = icl_get_topdown_value(event, slots, metrics); + if (last_slots) + last = icl_get_topdown_value(event, last_slots, last_metrics); + + /* + * The 8bit integer fraction of metric may be not accurate, + * especially when the changes is very small. + * For example, if only a few bad_spec happens, the fraction + * may be reduced from 1 to 0. If so, the bad_spec event value + * will be 0 which is definitely less than the last value. + * Avoid update event->count for this case. + */ + if (delta > last) { + delta -= last; + local64_add(delta, &event->count); + } +} + +static void update_saved_topdown_regs(struct perf_event *event, + u64 slots, u64 metrics) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct perf_event *other; + int idx; + + event->hw.saved_slots = slots; + event->hw.saved_metric = metrics; + + for_each_set_bit(idx, cpuc->active_mask, INTEL_PMC_IDX_TD_BE_BOUND + 1) { + if (!is_topdown_idx(idx)) + continue; + other = cpuc->events[idx]; + other->hw.saved_slots = slots; + other->hw.saved_metric = metrics; + } +} + +/* + * Update all active Topdown events. + * + * The PERF_METRICS and Fixed counter 3 are read separately. The values may be + * modify by a NMI. PMU has to be disabled before calling this function. + */ +static u64 icl_update_topdown_event(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct perf_event *other; + u64 slots, metrics; + bool reset = true; + int idx; + + /* read Fixed counter 3 */ + rdpmcl((3 | INTEL_PMC_FIXED_RDPMC_BASE), slots); + if (!slots) + return 0; + + /* read PERF_METRICS */ + rdpmcl(INTEL_PMC_FIXED_RDPMC_METRICS, metrics); + + for_each_set_bit(idx, cpuc->active_mask, INTEL_PMC_IDX_TD_BE_BOUND + 1) { + if (!is_topdown_idx(idx)) + continue; + other = cpuc->events[idx]; + __icl_update_topdown_event(other, slots, metrics, + event ? event->hw.saved_slots : 0, + event ? event->hw.saved_metric : 0); + } + + /* + * Check and update this event, which may have been cleared + * in active_mask e.g. x86_pmu_stop() + */ + if (event && !test_bit(event->hw.idx, cpuc->active_mask)) { + __icl_update_topdown_event(event, slots, metrics, + event->hw.saved_slots, + event->hw.saved_metric); + + /* + * In x86_pmu_stop(), the event is cleared in active_mask first, + * then drain the delta, which indicates context switch for + * counting. + * Save metric and slots for context switch. + * Don't need to reset the PERF_METRICS and Fixed counter 3. + * Because the values will be restored in next schedule in. + */ + update_saved_topdown_regs(event, slots, metrics); + reset = false; + } + + if (reset) { + /* The fixed counter 3 has to be written before the PERF_METRICS. */ + wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0); + wrmsrl(MSR_PERF_METRICS, 0); + if (event) + update_saved_topdown_regs(event, 0, 0); + } + + return slots; +} + +static void intel_pmu_read_topdown_event(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + + /* Only need to call update_topdown_event() once for group read. */ + if ((cpuc->txn_flags & PERF_PMU_TXN_READ) && + !is_slots_event(event)) + return; + + perf_pmu_disable(event->pmu); + x86_pmu.update_topdown_event(event); + perf_pmu_enable(event->pmu); +} + static void intel_pmu_read_event(struct perf_event *event) { if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) intel_pmu_auto_reload_read(event); + else if (is_topdown_count(event) && x86_pmu.update_topdown_event) + intel_pmu_read_topdown_event(event); else x86_perf_event_update(event); } @@ -2071,8 +2410,22 @@ static void intel_pmu_read_event(struct perf_event *event) static void intel_pmu_enable_fixed(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; - int idx = hwc->idx - INTEL_PMC_IDX_FIXED; u64 ctrl_val, mask, bits = 0; + int idx = hwc->idx; + + if (is_topdown_idx(idx)) { + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + /* + * When there are other active TopDown events, + * don't enable the fixed counter 3 again. + */ + if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx)) + return; + + idx = INTEL_PMC_IDX_FIXED_SLOTS; + } + + intel_set_masks(event, idx); /* * Enable IRQ generation (0x8), if not PEBS, @@ -2092,9 +2445,15 @@ static void intel_pmu_enable_fixed(struct perf_event *event) if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY) bits |= 0x4; + idx -= INTEL_PMC_IDX_FIXED; bits <<= (idx * 4); mask = 0xfULL << (idx * 4); + if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) { + bits |= ICL_FIXED_0_ADAPTIVE << (idx * 4); + mask |= ICL_FIXED_0_ADAPTIVE << (idx * 4); + } + rdmsrl(hwc->config_base, ctrl_val); ctrl_val &= ~mask; ctrl_val |= bits; @@ -2104,33 +2463,32 @@ static void intel_pmu_enable_fixed(struct perf_event *event) static void intel_pmu_enable_event(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; - struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); - - if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) { - if (!__this_cpu_read(cpu_hw_events.enabled)) - return; - - intel_pmu_enable_bts(hwc->config); - return; - } - - if (event->attr.exclude_host) - cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx); - if (event->attr.exclude_guest) - cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx); - - if (unlikely(event_is_checkpointed(event))) - cpuc->intel_cp_status |= (1ull << hwc->idx); + int idx = hwc->idx; if (unlikely(event->attr.precise_ip)) intel_pmu_pebs_enable(event); - if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { + switch (idx) { + case 0 ... INTEL_PMC_IDX_FIXED - 1: + intel_set_masks(event, idx); + __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); + break; + case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1: + case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END: intel_pmu_enable_fixed(event); - return; + break; + case INTEL_PMC_IDX_FIXED_BTS: + if (!__this_cpu_read(cpu_hw_events.enabled)) + return; + intel_pmu_enable_bts(hwc->config); + break; + case INTEL_PMC_IDX_FIXED_VLBR: + intel_set_masks(event, idx); + break; + default: + pr_warn("Failed to enable the event with invalid index %d\n", + idx); } - - __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); } static void intel_pmu_add_event(struct perf_event *event) @@ -2200,59 +2558,15 @@ static void intel_pmu_reset(void) local_irq_restore(flags); } -/* - * This handler is triggered by the local APIC, so the APIC IRQ handling - * rules apply: - */ -static int intel_pmu_handle_irq(struct pt_regs *regs) +static int handle_pmi_common(struct pt_regs *regs, u64 status) { struct perf_sample_data data; - struct cpu_hw_events *cpuc; - int bit, loops; - u64 status; - int handled; - int pmu_enabled; - - cpuc = this_cpu_ptr(&cpu_hw_events); - - /* - * Save the PMU state. - * It needs to be restored when leaving the handler. - */ - pmu_enabled = cpuc->enabled; - /* - * No known reason to not always do late ACK, - * but just in case do it opt-in. - */ - if (!x86_pmu.late_ack) - apic_write(APIC_LVTPC, APIC_DM_NMI); - intel_bts_disable_local(); - cpuc->enabled = 0; - __intel_pmu_disable_all(); - handled = intel_pmu_drain_bts_buffer(); - handled += intel_bts_interrupt(); - status = intel_pmu_get_status(); - if (!status) - goto done; - - loops = 0; -again: - intel_pmu_lbr_read(); - intel_pmu_ack_status(status); - if (++loops > 100) { - static bool warned = false; - if (!warned) { - WARN(1, "perfevents: irq loop stuck!\n"); - perf_event_print_debug(); - warned = true; - } - intel_pmu_reset(); - goto done; - } + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + int bit; + int handled = 0; inc_irq_stat(apic_perf_irqs); - /* * Ignore a range of extra bits in status that do not indicate * overflow by themselves. @@ -2261,7 +2575,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) GLOBAL_STATUS_ASIF | GLOBAL_STATUS_LBRS_FROZEN); if (!status) - goto done; + return 0; /* * In case multiple PEBS events are sampled at the same time, * it is possible to have GLOBAL_STATUS bit 62 set indicating @@ -2290,7 +2604,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) /* * PEBS overflow sets bit 62 in the global status register */ - if (__test_and_clear_bit(62, (unsigned long *)&status)) { + if (__test_and_clear_bit(GLOBAL_STATUS_BUFFER_OVF_BIT, (unsigned long *)&status)) { handled++; x86_pmu.drain_pebs(regs); status &= x86_pmu.intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI; @@ -2299,13 +2613,22 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) /* * Intel PT */ - if (__test_and_clear_bit(55, (unsigned long *)&status)) { + if (__test_and_clear_bit(GLOBAL_STATUS_TRACE_TOPAPMI_BIT, (unsigned long *)&status)) { handled++; intel_pt_interrupt(); } /* - * Checkpointed counters can lead to 'spurious' PMIs because the + * Intel Perf mertrics + */ + if (__test_and_clear_bit(GLOBAL_STATUS_PERF_METRICS_OVF_BIT, (unsigned long *)&status)) { + handled++; + if (x86_pmu.update_topdown_event) + x86_pmu.update_topdown_event(NULL); + } + + /* + * Checkpointed counters can lead to 'spurious' PMIs because the * rollback caused by the PMI will have cleared the overflow status * bit. Therefore always force probe these counters. */ @@ -2331,6 +2654,61 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) x86_pmu_stop(event, 0); } + return handled; +} + +/* + * This handler is triggered by the local APIC, so the APIC IRQ handling + * rules apply: + */ +static int intel_pmu_handle_irq(struct pt_regs *regs) +{ + struct cpu_hw_events *cpuc; + int loops; + u64 status; + int handled; + int pmu_enabled; + + cpuc = this_cpu_ptr(&cpu_hw_events); + + /* + * Save the PMU state. + * It needs to be restored when leaving the handler. + */ + pmu_enabled = cpuc->enabled; + /* + * No known reason to not always do late ACK, + * but just in case do it opt-in. + */ + if (!x86_pmu.late_ack) + apic_write(APIC_LVTPC, APIC_DM_NMI); + intel_bts_disable_local(); + cpuc->enabled = 0; + __intel_pmu_disable_all(); + handled = intel_pmu_drain_bts_buffer(); + handled += intel_bts_interrupt(); + status = intel_pmu_get_status(); + if (!status) + goto done; + + loops = 0; +again: + intel_pmu_lbr_read(); + intel_pmu_ack_status(status); + if (++loops > 100) { + static bool warned; + + if (!warned) { + WARN(1, "perfevents: irq loop stuck!\n"); + perf_event_print_debug(); + warned = true; + } + intel_pmu_reset(); + goto done; + } + + handled += handle_pmi_common(regs, status); + /* * Repeat if there is more work to be done: */ @@ -2358,17 +2736,22 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) static struct event_constraint * intel_bts_constraints(struct perf_event *event) { - struct hw_perf_event *hwc = &event->hw; - unsigned int hw_event, bts_event; + if (unlikely(intel_pmu_has_bts(event))) + return &bts_constraint; - if (event->attr.freq) - return NULL; + return NULL; +} - hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; - bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); +/* + * Note: matches a fake event, like Fixed2. + */ +static struct event_constraint * +intel_vlbr_constraints(struct perf_event *event) +{ + struct event_constraint *c = &vlbr_constraint; - if (unlikely(hw_event == bts_event && hwc->sample_period == 1)) - return &bts_constraint; + if (unlikely(constraint_match(c, event->hw.config))) + return c; return NULL; } @@ -2547,7 +2930,7 @@ x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx, if (x86_pmu.event_constraints) { for_each_event_constraint(c, x86_pmu.event_constraints) { - if ((event->hw.config & c->cmask) == c->code) { + if (constraint_match(c, event->hw.config)) { event->hw.flags |= c->flags; return c; } @@ -2563,6 +2946,10 @@ __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, { struct event_constraint *c; + c = intel_vlbr_constraints(event); + if (c) + return c; + c = intel_bts_constraints(event); if (c) return c; @@ -2661,6 +3048,35 @@ intel_stop_scheduling(struct cpu_hw_events *cpuc) raw_spin_unlock(&excl_cntrs->lock); } +static struct event_constraint * +dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx) +{ + WARN_ON_ONCE(!cpuc->constraint_list); + + if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) { + struct event_constraint *cx; + + /* + * grab pre-allocated constraint entry + */ + cx = &cpuc->constraint_list[idx]; + + /* + * initialize dynamic constraint + * with static constraint + */ + *cx = *c; + + /* + * mark constraint as dynamic + */ + cx->flags |= PERF_X86_EVENT_DYNAMIC; + c = cx; + } + + return c; +} + static struct event_constraint * intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, int idx, struct event_constraint *c) @@ -2691,27 +3107,7 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, * only needed when constraint has not yet * been cloned (marked dynamic) */ - if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) { - struct event_constraint *cx; - - /* - * grab pre-allocated constraint entry - */ - cx = &cpuc->constraint_list[idx]; - - /* - * initialize dynamic constraint - * with static constraint - */ - *cx = *c; - - /* - * mark constraint as dynamic, so we - * can free it later on - */ - cx->flags |= PERF_X86_EVENT_DYNAMIC; - c = cx; - } + c = dyn_constraint(cpuc, c, idx); /* * From here on, the constraint is dynamic. @@ -2981,20 +3377,61 @@ static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event) flags &= ~PERF_SAMPLE_TIME; if (!event->attr.exclude_kernel) flags &= ~PERF_SAMPLE_REGS_USER; - if (event->attr.sample_regs_user & ~PEBS_REGS) + if (event->attr.sample_regs_user & ~PEBS_GP_REGS) flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR); return flags; } +static int intel_pmu_bts_config(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + + if (unlikely(intel_pmu_has_bts(event))) { + /* BTS is not supported by this architecture. */ + if (!x86_pmu.bts_active) + return -EOPNOTSUPP; + + /* BTS is currently only allowed for user-mode. */ + if (!attr->exclude_kernel) + return -EOPNOTSUPP; + + /* BTS is not allowed for precise events. */ + if (attr->precise_ip) + return -EOPNOTSUPP; + + /* disallow bts if conflicting events are present */ + if (x86_add_exclusive(x86_lbr_exclusive_lbr)) + return -EBUSY; + + event->destroy = hw_perf_lbr_event_destroy; + } + + return 0; +} + +static int core_pmu_hw_config(struct perf_event *event) +{ + int ret = x86_pmu_hw_config(event); + + if (ret) + return ret; + + return intel_pmu_bts_config(event); +} + static int intel_pmu_hw_config(struct perf_event *event) { int ret = x86_pmu_hw_config(event); + if (ret) + return ret; + + ret = intel_pmu_bts_config(event); if (ret) return ret; if (event->attr.precise_ip) { - if (!event->attr.freq) { + if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) { event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD; if (!(event->attr.sample_type & ~intel_pmu_large_pebs_flags(event))) @@ -3015,7 +3452,7 @@ static int intel_pmu_hw_config(struct perf_event *event) /* * BTS is set up earlier in this path, so don't account twice */ - if (!intel_pmu_has_bts(event)) { + if (!unlikely(intel_pmu_has_bts(event))) { /* disallow lbr if conflicting events are present */ if (x86_add_exclusive(x86_lbr_exclusive_lbr)) return -EBUSY; @@ -3027,6 +3464,56 @@ static int intel_pmu_hw_config(struct perf_event *event) if (event->attr.type != PERF_TYPE_RAW) return 0; + /* + * Config Topdown slots and metric events + * + * The slots event on Fixed Counter 3 can support sampling, + * which will be handled normally in x86_perf_event_update(). + * + * Metric events don't support sampling and require being paired + * with a slots event as group leader. When the slots event + * is used in a metrics group, it too cannot support sampling. + */ + if (x86_pmu.intel_cap.perf_metrics && is_topdown_event(event)) { + if (event->attr.config1 || event->attr.config2) + return -EINVAL; + + /* + * The TopDown metrics events and slots event don't + * support any filters. + */ + if (event->attr.config & X86_ALL_EVENT_FLAGS) + return -EINVAL; + + if (is_metric_event(event)) { + struct perf_event *leader = event->group_leader; + + /* The metric events don't support sampling. */ + if (is_sampling_event(event)) + return -EINVAL; + + /* The metric events require a slots group leader. */ + if (!is_slots_event(leader)) + return -EINVAL; + + /* + * The leader/SLOTS must not be a sampling event for + * metric use; hardware requires it starts at 0 when used + * in conjunction with MSR_PERF_METRICS. + */ + if (is_sampling_event(leader)) + return -EINVAL; + + event->event_caps |= PERF_EV_CAP_SIBLING; + /* + * Only once we have a METRICs sibling do we + * need TopDown magic. + */ + leader->hw.flags |= PERF_X86_EVENT_TOPDOWN; + event->hw.flags |= PERF_X86_EVENT_TOPDOWN; + } + } + if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY)) return 0; @@ -3164,6 +3651,12 @@ static struct event_constraint counter0_constraint = static struct event_constraint counter2_constraint = EVENT_CONSTRAINT(0, 0x4, 0); +static struct event_constraint fixed0_constraint = + FIXED_EVENT_CONSTRAINT(0x00c0, 0); + +static struct event_constraint fixed0_counter0_constraint = + INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000001ULL); + static struct event_constraint * hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx, struct perf_event *event) @@ -3182,6 +3675,21 @@ hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx, return c; } +static struct event_constraint * +icl_get_event_constraints(struct cpu_hw_events *cpuc, int idx, + struct perf_event *event) +{ + /* + * Fixed counter 0 has less skid. + * Force instruction:ppp in Fixed counter 0 + */ + if ((event->attr.precise_ip == 3) && + constraint_match(&fixed0_constraint, event->hw.config)) + return &fixed0_constraint; + + return hsw_get_event_constraints(cpuc, idx, event); +} + static struct event_constraint * glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx, struct perf_event *event) @@ -3197,6 +3705,49 @@ glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx, return c; } +static struct event_constraint * +tnt_get_event_constraints(struct cpu_hw_events *cpuc, int idx, + struct perf_event *event) +{ + struct event_constraint *c; + + /* + * :ppp means to do reduced skid PEBS, + * which is available on PMC0 and fixed counter 0. + */ + if (event->attr.precise_ip == 3) { + /* Force instruction:ppp on PMC0 and Fixed counter 0 */ + if (constraint_match(&fixed0_constraint, event->hw.config)) + return &fixed0_counter0_constraint; + + return &counter0_constraint; + } + + c = intel_get_event_constraints(cpuc, idx, event); + + return c; +} + +static bool allow_tsx_force_abort = true; + +static struct event_constraint * +tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx, + struct perf_event *event) +{ + struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event); + + /* + * Without TFA we must not use PMC3. + */ + if (!allow_tsx_force_abort && test_bit(3, c->idxmsk) && idx >= 0) { + c = dyn_constraint(cpuc, c, idx); + c->idxmsk64 &= ~(1ULL << 3); + c->weight--; + } + + return c; +} + /* * Broadwell: * @@ -3223,6 +3774,11 @@ static u64 bdw_limit_period(struct perf_event *event, u64 left) return left; } +static u64 nhm_limit_period(struct perf_event *event, u64 left) +{ + return max(left, 32ULL); +} + PMU_FORMAT_ATTR(event, "config:0-7" ); PMU_FORMAT_ATTR(umask, "config:8-15" ); PMU_FORMAT_ATTR(edge, "config:18" ); @@ -3250,7 +3806,7 @@ ssize_t intel_event_sysfs_show(char *page, u64 config) return x86_event_sysfs_show(page, config, event); } -struct intel_shared_regs *allocate_shared_regs(int cpu) +static struct intel_shared_regs *allocate_shared_regs(int cpu) { struct intel_shared_regs *regs; int i; @@ -3282,9 +3838,10 @@ static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu) return c; } -static int intel_pmu_cpu_prepare(int cpu) + +int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu) { - struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); + cpuc->pebs_record_size = x86_pmu.pebs_record_size; if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) { cpuc->shared_regs = allocate_shared_regs(cpu); @@ -3292,13 +3849,15 @@ static int intel_pmu_cpu_prepare(int cpu) goto err; } - if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { + if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) { size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint); - cpuc->constraint_list = kzalloc(sz, GFP_KERNEL); + cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu)); if (!cpuc->constraint_list) goto err_shared_regs; + } + if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { cpuc->excl_cntrs = allocate_excl_cntrs(cpu); if (!cpuc->excl_cntrs) goto err_constraint_list; @@ -3320,6 +3879,11 @@ static int intel_pmu_cpu_prepare(int cpu) return -ENOMEM; } +static int intel_pmu_cpu_prepare(int cpu) +{ + return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu); +} + static void flip_smm_bit(void *data) { unsigned long set = *(unsigned long *)data; @@ -3347,9 +3911,26 @@ static void intel_pmu_cpu_starting(int cpu) cpuc->lbr_sel = NULL; + if (x86_pmu.flags & PMU_FL_TFA) { + WARN_ON_ONCE(cpuc->tfa_shadow); + cpuc->tfa_shadow = ~0ULL; + intel_set_tfa(cpuc, false); + } + if (x86_pmu.version > 1) flip_smm_bit(&x86_pmu.attr_freeze_on_smi); + /* Disable perf metrics if any added CPU doesn't support it. */ + if (x86_pmu.intel_cap.perf_metrics) { + union perf_capabilities perf_cap; + + rdmsrl(MSR_IA32_PERF_CAPABILITIES, perf_cap.capabilities); + if (!perf_cap.perf_metrics) { + x86_pmu.intel_cap.perf_metrics = 0; + x86_pmu.intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS); + } + } + if (!cpuc->shared_regs) return; @@ -3391,9 +3972,8 @@ static void intel_pmu_cpu_starting(int cpu) } } -static void free_excl_cntrs(int cpu) +static void free_excl_cntrs(struct cpu_hw_events *cpuc) { - struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); struct intel_excl_cntrs *c; c = cpuc->excl_cntrs; @@ -3401,14 +3981,19 @@ static void free_excl_cntrs(int cpu) if (c->core_id == -1 || --c->refcnt == 0) kfree(c); cpuc->excl_cntrs = NULL; - kfree(cpuc->constraint_list); - cpuc->constraint_list = NULL; } + + kfree(cpuc->constraint_list); + cpuc->constraint_list = NULL; } static void intel_pmu_cpu_dying(int cpu) { - struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); + fini_debug_store_on_cpu(cpu); +} + +void intel_cpuc_finish(struct cpu_hw_events *cpuc) +{ struct intel_shared_regs *pc; pc = cpuc->shared_regs; @@ -3418,9 +4003,12 @@ static void intel_pmu_cpu_dying(int cpu) cpuc->shared_regs = NULL; } - free_excl_cntrs(cpu); + free_excl_cntrs(cpuc); +} - fini_debug_store_on_cpu(cpu); +static void intel_pmu_cpu_dead(int cpu) +{ + intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu)); } static void intel_pmu_sched_task(struct perf_event_context *ctx, @@ -3430,6 +4018,11 @@ static void intel_pmu_sched_task(struct perf_event_context *ctx, intel_pmu_lbr_sched_task(ctx, sched_in); } +static int intel_pmu_check_period(struct perf_event *event, u64 value) +{ + return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0; +} + PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); PMU_FORMAT_ATTR(ldlat, "config1:0-15"); @@ -3478,7 +4071,7 @@ static __initconst const struct x86_pmu core_pmu = { .enable_all = core_pmu_enable_all, .enable = core_pmu_enable_event, .disable = x86_pmu_disable_event, - .hw_config = x86_pmu_hw_config, + .hw_config = core_pmu_hw_config, .schedule_events = x86_schedule_events, .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, .perfctr = MSR_ARCH_PERFMON_PERFCTR0, @@ -3509,9 +4102,10 @@ static __initconst const struct x86_pmu core_pmu = { .cpu_prepare = intel_pmu_cpu_prepare, .cpu_starting = intel_pmu_cpu_starting, .cpu_dying = intel_pmu_cpu_dying, -}; + .cpu_dead = intel_pmu_cpu_dead, -static struct attribute *intel_pmu_attrs[]; + .check_period = intel_pmu_check_period, +}; static __initconst const struct x86_pmu intel_pmu = { .name = "Intel", @@ -3544,13 +4138,15 @@ static __initconst const struct x86_pmu intel_pmu = { .format_attrs = intel_arch3_formats_attr, .events_sysfs_show = intel_event_sysfs_show, - .attrs = intel_pmu_attrs, - .cpu_prepare = intel_pmu_cpu_prepare, .cpu_starting = intel_pmu_cpu_starting, .cpu_dying = intel_pmu_cpu_dying, + .cpu_dead = intel_pmu_cpu_dead, + .guest_get_msrs = intel_guest_get_msrs, .sched_task = intel_pmu_sched_task, + + .check_period = intel_pmu_check_period, }; static __init void intel_clovertown_quirk(void) @@ -3764,8 +4360,6 @@ EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1"); EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1"); static struct attribute *hsw_events_attrs[] = { - EVENT_PTR(mem_ld_hsw), - EVENT_PTR(mem_st_hsw), EVENT_PTR(td_slots_issued), EVENT_PTR(td_slots_retired), EVENT_PTR(td_fetch_bubbles), @@ -3776,6 +4370,12 @@ static struct attribute *hsw_events_attrs[] = { NULL }; +static struct attribute *hsw_mem_events_attrs[] = { + EVENT_PTR(mem_ld_hsw), + EVENT_PTR(mem_st_hsw), + NULL, +}; + static struct attribute *hsw_tsx_events_attrs[] = { EVENT_PTR(tx_start), EVENT_PTR(tx_commit), @@ -3792,12 +4392,43 @@ static struct attribute *hsw_tsx_events_attrs[] = { NULL }; -static __init struct attribute **get_hsw_events_attrs(void) -{ - return boot_cpu_has(X86_FEATURE_RTM) ? - merge_attr(hsw_events_attrs, hsw_tsx_events_attrs) : - hsw_events_attrs; -} +EVENT_ATTR_STR(tx-capacity-read, tx_capacity_read, "event=0x54,umask=0x80"); +EVENT_ATTR_STR(tx-capacity-write, tx_capacity_write, "event=0x54,umask=0x2"); +EVENT_ATTR_STR(el-capacity-read, el_capacity_read, "event=0x54,umask=0x80"); +EVENT_ATTR_STR(el-capacity-write, el_capacity_write, "event=0x54,umask=0x2"); + +static struct attribute *icl_events_attrs[] = { + EVENT_PTR(mem_ld_hsw), + EVENT_PTR(mem_st_hsw), + NULL, +}; + +static struct attribute *icl_td_events_attrs[] = { + EVENT_PTR(slots), + EVENT_PTR(td_retiring), + EVENT_PTR(td_bad_spec), + EVENT_PTR(td_fe_bound), + EVENT_PTR(td_be_bound), + NULL, +}; + +static struct attribute *icl_tsx_events_attrs[] = { + EVENT_PTR(tx_start), + EVENT_PTR(tx_abort), + EVENT_PTR(tx_commit), + EVENT_PTR(tx_capacity_read), + EVENT_PTR(tx_capacity_write), + EVENT_PTR(tx_conflict), + EVENT_PTR(el_start), + EVENT_PTR(el_abort), + EVENT_PTR(el_commit), + EVENT_PTR(el_capacity_read), + EVENT_PTR(el_capacity_write), + EVENT_PTR(el_conflict), + EVENT_PTR(cycles_t), + EVENT_PTR(cycles_ct), + NULL, +}; static ssize_t freeze_on_smi_show(struct device *cdev, struct device_attribute *attr, @@ -3870,21 +4501,105 @@ static struct attribute *intel_pmu_caps_attrs[] = { NULL }; +static DEVICE_BOOL_ATTR(allow_tsx_force_abort, 0644, allow_tsx_force_abort); + static struct attribute *intel_pmu_attrs[] = { &dev_attr_freeze_on_smi.attr, + NULL, /* &dev_attr_allow_tsx_force_abort.attr.attr */ + NULL, +}; + +static umode_t +tsx_is_visible(struct kobject *kobj, struct attribute *attr, int i) +{ + return boot_cpu_has(X86_FEATURE_RTM) ? attr->mode : 0; +} + +static umode_t +pebs_is_visible(struct kobject *kobj, struct attribute *attr, int i) +{ + return x86_pmu.pebs ? attr->mode : 0; +} + +static umode_t +lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i) +{ + return x86_pmu.lbr_nr ? attr->mode : 0; +} + +static umode_t +exra_is_visible(struct kobject *kobj, struct attribute *attr, int i) +{ + return x86_pmu.version >= 2 ? attr->mode : 0; +} + +static struct attribute_group group_events_td = { + .name = "events", +}; + +static struct attribute_group group_events_mem = { + .name = "events", + .is_visible = pebs_is_visible, +}; + +static struct attribute_group group_events_tsx = { + .name = "events", + .is_visible = tsx_is_visible, +}; + +static struct attribute_group group_caps_gen = { + .name = "caps", + .attrs = intel_pmu_caps_attrs, +}; + +static struct attribute_group group_caps_lbr = { + .name = "caps", + .attrs = lbr_attrs, + .is_visible = lbr_is_visible, +}; + +static struct attribute_group group_format_extra = { + .name = "format", + .is_visible = exra_is_visible, +}; + +static struct attribute_group group_format_extra_skl = { + .name = "format", + .is_visible = exra_is_visible, +}; + +static struct attribute_group group_default = { + .attrs = intel_pmu_attrs, +}; + +static const struct attribute_group *attr_update[] = { + &group_events_td, + &group_events_mem, + &group_events_tsx, + &group_caps_gen, + &group_caps_lbr, + &group_format_extra, + &group_format_extra_skl, + &group_default, NULL, }; +static struct attribute *empty_attrs; + __init int intel_pmu_init(void) { - struct attribute **extra_attr = NULL; - struct attribute **to_free = NULL; + struct attribute **extra_skl_attr = &empty_attrs; + struct attribute **extra_attr = &empty_attrs; + struct attribute **td_attr = &empty_attrs; + struct attribute **mem_attr = &empty_attrs; + struct attribute **tsx_attr = &empty_attrs; union cpuid10_edx edx; union cpuid10_eax eax; union cpuid10_ebx ebx; struct event_constraint *c; unsigned int unused; struct extra_reg *er; + bool pmem = false; int version, i; char *name; @@ -3985,8 +4700,9 @@ __init int intel_pmu_init(void) x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints; x86_pmu.enable_all = intel_pmu_nhm_enable_all; x86_pmu.extra_regs = intel_nehalem_extra_regs; + x86_pmu.limit_period = nhm_limit_period; - x86_pmu.cpu_events = nhm_events_attrs; + mem_attr = nhm_mem_events_attrs; /* UOPS_ISSUED.STALLED_CYCLES */ intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = @@ -4004,11 +4720,11 @@ __init int intel_pmu_init(void) name = "nehalem"; break; - case INTEL_FAM6_ATOM_PINEVIEW: - case INTEL_FAM6_ATOM_LINCROFT: - case INTEL_FAM6_ATOM_PENWELL: - case INTEL_FAM6_ATOM_CLOVERVIEW: - case INTEL_FAM6_ATOM_CEDARVIEW: + case INTEL_FAM6_ATOM_BONNELL: + case INTEL_FAM6_ATOM_BONNELL_MID: + case INTEL_FAM6_ATOM_SALTWELL: + case INTEL_FAM6_ATOM_SALTWELL_MID: + case INTEL_FAM6_ATOM_SALTWELL_TABLET: memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, sizeof(hw_cache_event_ids)); @@ -4021,9 +4737,11 @@ __init int intel_pmu_init(void) name = "bonnell"; break; - case INTEL_FAM6_ATOM_SILVERMONT1: - case INTEL_FAM6_ATOM_SILVERMONT2: + case INTEL_FAM6_ATOM_SILVERMONT: + case INTEL_FAM6_ATOM_SILVERMONT_X: + case INTEL_FAM6_ATOM_SILVERMONT_MID: case INTEL_FAM6_ATOM_AIRMONT: + case INTEL_FAM6_ATOM_AIRMONT_MID: memcpy(hw_cache_event_ids, slm_hw_cache_event_ids, sizeof(hw_cache_event_ids)); memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs, @@ -4035,14 +4753,14 @@ __init int intel_pmu_init(void) x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints; x86_pmu.extra_regs = intel_slm_extra_regs; x86_pmu.flags |= PMU_FL_HAS_RSP_1; - x86_pmu.cpu_events = slm_events_attrs; + td_attr = slm_events_attrs; extra_attr = slm_format_attr; pr_cont("Silvermont events, "); name = "silvermont"; break; case INTEL_FAM6_ATOM_GOLDMONT: - case INTEL_FAM6_ATOM_DENVERTON: + case INTEL_FAM6_ATOM_GOLDMONT_X: memcpy(hw_cache_event_ids, glm_hw_cache_event_ids, sizeof(hw_cache_event_ids)); memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs, @@ -4062,13 +4780,13 @@ __init int intel_pmu_init(void) x86_pmu.pebs_prec_dist = true; x86_pmu.lbr_pt_coexist = true; x86_pmu.flags |= PMU_FL_HAS_RSP_1; - x86_pmu.cpu_events = glm_events_attrs; + td_attr = glm_events_attrs; extra_attr = slm_format_attr; pr_cont("Goldmont events, "); name = "goldmont"; break; - case INTEL_FAM6_ATOM_GEMINI_LAKE: + case INTEL_FAM6_ATOM_GOLDMONT_PLUS: memcpy(hw_cache_event_ids, glp_hw_cache_event_ids, sizeof(hw_cache_event_ids)); memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs, @@ -4088,7 +4806,7 @@ __init int intel_pmu_init(void) x86_pmu.flags |= PMU_FL_HAS_RSP_1; x86_pmu.flags |= PMU_FL_PEBS_ALL; x86_pmu.get_event_constraints = glp_get_event_constraints; - x86_pmu.cpu_events = glm_events_attrs; + td_attr = glm_events_attrs; /* Goldmont Plus has 4-wide pipeline */ event_attr_td_total_slots_scale_glm.event_str = "4"; extra_attr = slm_format_attr; @@ -4096,6 +4814,32 @@ __init int intel_pmu_init(void) name = "goldmont_plus"; break; + case INTEL_FAM6_ATOM_TREMONT_X: + x86_pmu.late_ack = true; + memcpy(hw_cache_event_ids, glp_hw_cache_event_ids, + sizeof(hw_cache_event_ids)); + memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs, + sizeof(hw_cache_extra_regs)); + hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1; + + intel_pmu_lbr_init_skl(); + + x86_pmu.event_constraints = intel_slm_event_constraints; + x86_pmu.extra_regs = intel_tnt_extra_regs; + /* + * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS + * for precise cycles. + */ + x86_pmu.pebs_aliases = NULL; + x86_pmu.pebs_prec_dist = true; + x86_pmu.lbr_pt_coexist = true; + x86_pmu.flags |= PMU_FL_HAS_RSP_1; + x86_pmu.get_event_constraints = tnt_get_event_constraints; + extra_attr = slm_format_attr; + pr_cont("Tremont events, "); + name = "Tremont"; + break; + case INTEL_FAM6_WESTMERE: case INTEL_FAM6_WESTMERE_EP: case INTEL_FAM6_WESTMERE_EX: @@ -4112,7 +4856,7 @@ __init int intel_pmu_init(void) x86_pmu.extra_regs = intel_westmere_extra_regs; x86_pmu.flags |= PMU_FL_HAS_RSP_1; - x86_pmu.cpu_events = nhm_events_attrs; + mem_attr = nhm_mem_events_attrs; /* UOPS_ISSUED.STALLED_CYCLES */ intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = @@ -4151,7 +4895,8 @@ __init int intel_pmu_init(void) x86_pmu.flags |= PMU_FL_HAS_RSP_1; x86_pmu.flags |= PMU_FL_NO_HT_SHARING; - x86_pmu.cpu_events = snb_events_attrs; + td_attr = snb_events_attrs; + mem_attr = snb_mem_events_attrs; /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = @@ -4191,7 +4936,8 @@ __init int intel_pmu_init(void) x86_pmu.flags |= PMU_FL_HAS_RSP_1; x86_pmu.flags |= PMU_FL_NO_HT_SHARING; - x86_pmu.cpu_events = snb_events_attrs; + td_attr = snb_events_attrs; + mem_attr = snb_mem_events_attrs; /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = @@ -4226,10 +4972,12 @@ __init int intel_pmu_init(void) x86_pmu.hw_config = hsw_hw_config; x86_pmu.get_event_constraints = hsw_get_event_constraints; - x86_pmu.cpu_events = get_hsw_events_attrs(); x86_pmu.lbr_double_abort = true; extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? hsw_format_attr : nhm_format_attr; + td_attr = hsw_events_attrs; + mem_attr = hsw_mem_events_attrs; + tsx_attr = hsw_tsx_events_attrs; pr_cont("Haswell events, "); name = "haswell"; break; @@ -4265,10 +5013,12 @@ __init int intel_pmu_init(void) x86_pmu.hw_config = hsw_hw_config; x86_pmu.get_event_constraints = hsw_get_event_constraints; - x86_pmu.cpu_events = get_hsw_events_attrs(); x86_pmu.limit_period = bdw_limit_period; extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? hsw_format_attr : nhm_format_attr; + td_attr = hsw_events_attrs; + mem_attr = hsw_mem_events_attrs; + tsx_attr = hsw_tsx_events_attrs; pr_cont("Broadwell events, "); name = "broadwell"; break; @@ -4293,9 +5043,10 @@ __init int intel_pmu_init(void) name = "knights-landing"; break; + case INTEL_FAM6_SKYLAKE_X: + pmem = true; case INTEL_FAM6_SKYLAKE_MOBILE: case INTEL_FAM6_SKYLAKE_DESKTOP: - case INTEL_FAM6_SKYLAKE_X: case INTEL_FAM6_KABYLAKE_MOBILE: case INTEL_FAM6_KABYLAKE_DESKTOP: x86_pmu.late_ack = true; @@ -4322,15 +5073,59 @@ __init int intel_pmu_init(void) x86_pmu.get_event_constraints = hsw_get_event_constraints; extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? hsw_format_attr : nhm_format_attr; - extra_attr = merge_attr(extra_attr, skl_format_attr); - to_free = extra_attr; - x86_pmu.cpu_events = get_hsw_events_attrs(); - intel_pmu_pebs_data_source_skl( - boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X); + extra_skl_attr = skl_format_attr; + td_attr = hsw_events_attrs; + mem_attr = hsw_mem_events_attrs; + tsx_attr = hsw_tsx_events_attrs; + intel_pmu_pebs_data_source_skl(pmem); + + if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) { + x86_pmu.flags |= PMU_FL_TFA; + x86_pmu.get_event_constraints = tfa_get_event_constraints; + x86_pmu.enable_all = intel_tfa_pmu_enable_all; + x86_pmu.commit_scheduling = intel_tfa_commit_scheduling; + intel_pmu_attrs[1] = &dev_attr_allow_tsx_force_abort.attr.attr; + } + pr_cont("Skylake events, "); name = "skylake"; break; + case INTEL_FAM6_ICELAKE_X: + case INTEL_FAM6_ICELAKE_XEON_D: + pmem = true; + case INTEL_FAM6_ICELAKE_MOBILE: + case INTEL_FAM6_ICELAKE_DESKTOP: + x86_pmu.late_ack = true; + memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids)); + memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); + hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1; + intel_pmu_lbr_init_skl(); + + x86_pmu.event_constraints = intel_icl_event_constraints; + x86_pmu.pebs_constraints = intel_icl_pebs_event_constraints; + x86_pmu.extra_regs = intel_icl_extra_regs; + x86_pmu.pebs_aliases = NULL; + x86_pmu.pebs_prec_dist = true; + x86_pmu.flags |= PMU_FL_HAS_RSP_1; + x86_pmu.flags |= PMU_FL_NO_HT_SHARING; + + x86_pmu.hw_config = hsw_hw_config; + x86_pmu.get_event_constraints = icl_get_event_constraints; + extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? + hsw_format_attr : nhm_format_attr; + extra_skl_attr = skl_format_attr; + mem_attr = icl_events_attrs; + td_attr = icl_td_events_attrs; + tsx_attr = icl_tsx_events_attrs; + x86_pmu.lbr_pt_coexist = true; + intel_pmu_pebs_data_source_skl(pmem); + x86_pmu.update_topdown_event = icl_update_topdown_event; + x86_pmu.set_topdown_event_period = icl_set_topdown_event_period; + pr_cont("Icelake events, "); + name = "icelake"; + break; + default: switch (x86_pmu.version) { case 1: @@ -4351,11 +5146,13 @@ __init int intel_pmu_init(void) snprintf(pmu_name_str, sizeof pmu_name_str, "%s", name); - if (version >= 2 && extra_attr) { - x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr, - extra_attr); - WARN_ON(!x86_pmu.format_attrs); - } + group_events_td.attrs = td_attr; + group_events_mem.attrs = mem_attr; + group_events_tsx.attrs = tsx_attr; + group_format_extra.attrs = extra_attr; + group_format_extra_skl.attrs = extra_skl_attr; + + x86_pmu.attr_update = attr_update; if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) { WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!", @@ -4379,6 +5176,15 @@ __init int intel_pmu_init(void) * counter, so do not extend mask to generic counters */ for_each_event_constraint(c, x86_pmu.event_constraints) { + /* + * Don't extend the topdown slots and metrics + * events to the generic counters. + */ + if (c->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) { + c->weight = hweight64(c->idxmsk64); + continue; + } + if (c->cmask == FIXED_EVENT_FLAGS && c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES) { c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1; @@ -4403,12 +5209,8 @@ __init int intel_pmu_init(void) x86_pmu.lbr_nr = 0; } - x86_pmu.caps_attrs = intel_pmu_caps_attrs; - - if (x86_pmu.lbr_nr) { - x86_pmu.caps_attrs = merge_attr(x86_pmu.caps_attrs, lbr_attrs); + if (x86_pmu.lbr_nr) pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr); - } /* * Access extra MSR may cause #GP under certain circumstances. @@ -4431,7 +5233,9 @@ __init int intel_pmu_init(void) pr_cont("full-width counters, "); } - kfree(to_free); + if (x86_pmu.intel_cap.perf_metrics) + x86_pmu.intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS; + return 0; } @@ -4468,7 +5272,7 @@ static __init int fixup_ht_bug(void) hardlockup_detector_perf_restart(); for_each_online_cpu(c) - free_excl_cntrs(c); + free_excl_cntrs(&per_cpu(cpu_hw_events, c)); cpus_read_unlock(); pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n"); diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index 9f8084f18d58e440931f593d42cc825bbc00cfb8..4a650eb3d94a3a51db98f8a83be2c184bdaa206b 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -76,15 +76,15 @@ * Scope: Package (physical package) * MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter. * perf code: 0x04 - * Available model: HSW ULT,CNL + * Available model: HSW ULT,KBL,CNL * Scope: Package (physical package) * MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter. * perf code: 0x05 - * Available model: HSW ULT,CNL + * Available model: HSW ULT,KBL,CNL * Scope: Package (physical package) * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter. * perf code: 0x06 - * Available model: HSW ULT,GLM,CNL + * Available model: HSW ULT,KBL,GLM,CNL * Scope: Package (physical package) * */ @@ -559,8 +559,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = { X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_ULT, hswult_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT1, slm_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT2, slm_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT, slm_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT_X, slm_cstates), X86_CSTATES_MODEL(INTEL_FAM6_ATOM_AIRMONT, slm_cstates), X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_CORE, snb_cstates), @@ -572,8 +572,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = { X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates), X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_X, snb_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE, snb_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, snb_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE, hswult_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, hswult_cstates), X86_CSTATES_MODEL(INTEL_FAM6_CANNONLAKE_MOBILE, cnl_cstates), @@ -581,9 +581,11 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = { X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM, knl_cstates), X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT, glm_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_ATOM_DENVERTON, glm_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_X, glm_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GEMINI_LAKE, glm_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_PLUS, glm_cstates), + + X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_MOBILE, snb_cstates), { }, }; MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match); diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index b7b01d762d32a3a6a30f4e2bd640aeae23dd7a3f..b0ba46b828324fb61c1d57006895d223100d400c 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -684,7 +684,7 @@ struct event_constraint intel_core2_pebs_event_constraints[] = { INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */ INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01), EVENT_CONSTRAINT_END }; @@ -693,7 +693,7 @@ struct event_constraint intel_atom_pebs_event_constraints[] = { INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */ INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01), /* Allow all events as PEBS with no flags */ INTEL_ALL_EVENT_CONSTRAINT(0, 0x1), EVENT_CONSTRAINT_END @@ -701,7 +701,7 @@ struct event_constraint intel_atom_pebs_event_constraints[] = { struct event_constraint intel_slm_pebs_event_constraints[] = { /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x1), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x1), /* Allow all events as PEBS with no flags */ INTEL_ALL_EVENT_CONSTRAINT(0, 0x1), EVENT_CONSTRAINT_END @@ -726,7 +726,7 @@ struct event_constraint intel_nehalem_pebs_event_constraints[] = { INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f), EVENT_CONSTRAINT_END }; @@ -743,7 +743,7 @@ struct event_constraint intel_westmere_pebs_event_constraints[] = { INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f), EVENT_CONSTRAINT_END }; @@ -752,7 +752,7 @@ struct event_constraint intel_snb_pebs_event_constraints[] = { INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */ INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */ /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf), INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ @@ -767,9 +767,9 @@ struct event_constraint intel_ivb_pebs_event_constraints[] = { INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */ INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */ /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf), /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2), INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ @@ -783,9 +783,9 @@ struct event_constraint intel_hsw_pebs_event_constraints[] = { INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */ /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf), /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2), INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */ @@ -806,9 +806,9 @@ struct event_constraint intel_bdw_pebs_event_constraints[] = { INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */ /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf), /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2), INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */ @@ -829,9 +829,9 @@ struct event_constraint intel_bdw_pebs_event_constraints[] = { struct event_constraint intel_skl_pebs_event_constraints[] = { INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */ /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2), /* INST_RETIRED.TOTAL_CYCLES_PS (inv=1, cmask=16) (cycles:p). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f), + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f), INTEL_PLD_CONSTRAINT(0x1cd, 0xf), /* MEM_TRANS_RETIRED.* */ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */ @@ -849,6 +849,26 @@ struct event_constraint intel_skl_pebs_event_constraints[] = { EVENT_CONSTRAINT_END }; +struct event_constraint intel_icl_pebs_event_constraints[] = { + INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */ + INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL), /* SLOTS */ + + INTEL_PLD_CONSTRAINT(0x1cd, 0xff), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x1d0, 0xf), /* MEM_INST_RETIRED.LOAD */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x2d0, 0xf), /* MEM_INST_RETIRED.STORE */ + + INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf), /* MEM_LOAD_*_RETIRED.* */ + + INTEL_FLAGS_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */ + + /* + * Everything else is handled by PMU_FL_PEBS_ALL, because we + * need the full constraints from the main table. + */ + + EVENT_CONSTRAINT_END +}; + struct event_constraint *intel_pebs_constraints(struct perf_event *event) { struct event_constraint *c; @@ -858,7 +878,7 @@ struct event_constraint *intel_pebs_constraints(struct perf_event *event) if (x86_pmu.pebs_constraints) { for_each_event_constraint(c, x86_pmu.pebs_constraints) { - if ((event->hw.config & c->cmask) == c->code) { + if (constraint_match(c, event->hw.config)) { event->hw.flags |= c->flags; return c; } @@ -906,17 +926,87 @@ static inline void pebs_update_threshold(struct cpu_hw_events *cpuc) if (cpuc->n_pebs == cpuc->n_large_pebs) { threshold = ds->pebs_absolute_maximum - - reserved * x86_pmu.pebs_record_size; + reserved * cpuc->pebs_record_size; } else { - threshold = ds->pebs_buffer_base + x86_pmu.pebs_record_size; + threshold = ds->pebs_buffer_base + cpuc->pebs_record_size; } ds->pebs_interrupt_threshold = threshold; } +static void adaptive_pebs_record_size_update(void) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + u64 pebs_data_cfg = cpuc->pebs_data_cfg; + int sz = sizeof(struct pebs_basic); + + if (pebs_data_cfg & PEBS_DATACFG_MEMINFO) + sz += sizeof(struct pebs_meminfo); + if (pebs_data_cfg & PEBS_DATACFG_GP) + sz += sizeof(struct pebs_gprs); + if (pebs_data_cfg & PEBS_DATACFG_XMMS) + sz += sizeof(struct pebs_xmm); + if (pebs_data_cfg & PEBS_DATACFG_LBRS) + sz += x86_pmu.lbr_nr * sizeof(struct pebs_lbr_entry); + + cpuc->pebs_record_size = sz; +} + +#define PERF_PEBS_MEMINFO_TYPE (PERF_SAMPLE_ADDR | PERF_SAMPLE_DATA_SRC | \ + PERF_SAMPLE_PHYS_ADDR | PERF_SAMPLE_WEIGHT | \ + PERF_SAMPLE_TRANSACTION) + +static u64 pebs_update_adaptive_cfg(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + u64 sample_type = attr->sample_type; + u64 pebs_data_cfg = 0; + bool gprs, tsx_weight; + + if (!(sample_type & ~(PERF_SAMPLE_IP|PERF_SAMPLE_TIME)) && + attr->precise_ip > 1) + return pebs_data_cfg; + + if (sample_type & PERF_PEBS_MEMINFO_TYPE) + pebs_data_cfg |= PEBS_DATACFG_MEMINFO; + + /* + * We need GPRs when: + * + user requested them + * + precise_ip < 2 for the non event IP + * + For RTM TSX weight we need GPRs for the abort code. + */ + gprs = (sample_type & PERF_SAMPLE_REGS_INTR) && + (attr->sample_regs_intr & PEBS_GP_REGS); + + tsx_weight = (sample_type & PERF_SAMPLE_WEIGHT) && + ((attr->config & INTEL_ARCH_EVENT_MASK) == + x86_pmu.rtm_abort_event); + + if (gprs || (attr->precise_ip < 2) || tsx_weight) + pebs_data_cfg |= PEBS_DATACFG_GP; + + if ((sample_type & PERF_SAMPLE_REGS_INTR) && + (attr->sample_regs_intr & PERF_REG_EXTENDED_MASK)) + pebs_data_cfg |= PEBS_DATACFG_XMMS; + + if (sample_type & PERF_SAMPLE_BRANCH_STACK) { + /* + * For now always log all LBRs. Could configure this + * later. + */ + pebs_data_cfg |= PEBS_DATACFG_LBRS | + ((x86_pmu.lbr_nr-1) << PEBS_DATACFG_LBR_SHIFT); + } + + return pebs_data_cfg; +} + static void -pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc, struct pmu *pmu) +pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc, + struct perf_event *event, bool add) { + struct pmu *pmu = event->ctx->pmu; /* * Make sure we get updated with the first PEBS * event. It will trigger also during removal, but @@ -933,6 +1023,29 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc, struct pmu *pmu) update = true; } + /* + * The PEBS record doesn't shrink on pmu::del(). Doing so would require + * iterating all remaining PEBS events to reconstruct the config. + */ + if (x86_pmu.intel_cap.pebs_baseline && add) { + u64 pebs_data_cfg; + + /* Clear pebs_data_cfg and pebs_record_size for first PEBS. */ + if (cpuc->n_pebs == 1) { + cpuc->pebs_data_cfg = 0; + cpuc->pebs_record_size = sizeof(struct pebs_basic); + } + + pebs_data_cfg = pebs_update_adaptive_cfg(event); + + /* Update pebs_record_size if new event requires more data. */ + if (pebs_data_cfg & ~cpuc->pebs_data_cfg) { + cpuc->pebs_data_cfg |= pebs_data_cfg; + adaptive_pebs_record_size_update(); + update = true; + } + } + if (update) pebs_update_threshold(cpuc); } @@ -947,7 +1060,7 @@ void intel_pmu_pebs_add(struct perf_event *event) if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS) cpuc->n_large_pebs++; - pebs_update_state(needed_cb, cpuc, event->ctx->pmu); + pebs_update_state(needed_cb, cpuc, event, true); } void intel_pmu_pebs_enable(struct perf_event *event) @@ -960,11 +1073,19 @@ void intel_pmu_pebs_enable(struct perf_event *event) cpuc->pebs_enabled |= 1ULL << hwc->idx; - if (event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) + if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) && (x86_pmu.version < 5)) cpuc->pebs_enabled |= 1ULL << (hwc->idx + 32); else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST) cpuc->pebs_enabled |= 1ULL << 63; + if (x86_pmu.intel_cap.pebs_baseline) { + hwc->config |= ICL_EVENTSEL_ADAPTIVE; + if (cpuc->pebs_data_cfg != cpuc->active_pebs_data_cfg) { + wrmsrl(MSR_PEBS_DATA_CFG, cpuc->pebs_data_cfg); + cpuc->active_pebs_data_cfg = cpuc->pebs_data_cfg; + } + } + /* * Use auto-reload if possible to save a MSR write in the PMI. * This must be done in pmu::start(), because PERF_EVENT_IOC_PERIOD. @@ -991,7 +1112,7 @@ void intel_pmu_pebs_del(struct perf_event *event) if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS) cpuc->n_large_pebs--; - pebs_update_state(needed_cb, cpuc, event->ctx->pmu); + pebs_update_state(needed_cb, cpuc, event, false); } void intel_pmu_pebs_disable(struct perf_event *event) @@ -1004,7 +1125,8 @@ void intel_pmu_pebs_disable(struct perf_event *event) cpuc->pebs_enabled &= ~(1ULL << hwc->idx); - if (event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) + if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) && + (x86_pmu.version < 5)) cpuc->pebs_enabled &= ~(1ULL << (hwc->idx + 32)); else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST) cpuc->pebs_enabled &= ~(1ULL << 63); @@ -1125,34 +1247,57 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) return 0; } -static inline u64 intel_hsw_weight(struct pebs_record_skl *pebs) +static inline u64 intel_get_tsx_weight(u64 tsx_tuning) { - if (pebs->tsx_tuning) { - union hsw_tsx_tuning tsx = { .value = pebs->tsx_tuning }; + if (tsx_tuning) { + union hsw_tsx_tuning tsx = { .value = tsx_tuning }; return tsx.cycles_last_block; } return 0; } -static inline u64 intel_hsw_transaction(struct pebs_record_skl *pebs) +static inline u64 intel_get_tsx_transaction(u64 tsx_tuning, u64 ax) { - u64 txn = (pebs->tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32; + u64 txn = (tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32; /* For RTM XABORTs also log the abort code from AX */ - if ((txn & PERF_TXN_TRANSACTION) && (pebs->ax & 1)) - txn |= ((pebs->ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT; + if ((txn & PERF_TXN_TRANSACTION) && (ax & 1)) + txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT; return txn; } -static void setup_pebs_sample_data(struct perf_event *event, - struct pt_regs *iregs, void *__pebs, - struct perf_sample_data *data, - struct pt_regs *regs) +static inline u64 get_pebs_status(void *n) { + if (x86_pmu.intel_cap.pebs_format < 4) + return ((struct pebs_record_nhm *)n)->status; + return ((struct pebs_basic *)n)->applicable_counters; +} + #define PERF_X86_EVENT_PEBS_HSW_PREC \ (PERF_X86_EVENT_PEBS_ST_HSW | \ PERF_X86_EVENT_PEBS_LD_HSW | \ PERF_X86_EVENT_PEBS_NA_HSW) + +static u64 get_data_src(struct perf_event *event, u64 aux) +{ + u64 val = PERF_MEM_NA; + int fl = event->hw.flags; + bool fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC); + + if (fl & PERF_X86_EVENT_PEBS_LDLAT) + val = load_latency_data(aux); + else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC)) + val = precise_datala_hsw(event, aux); + else if (fst) + val = precise_store_data(aux); + return val; +} + +static void setup_pebs_fixed_sample_data(struct perf_event *event, + struct pt_regs *iregs, void *__pebs, + struct perf_sample_data *data, + struct pt_regs *regs) +{ /* * We cast to the biggest pebs_record but are careful not to * unconditionally access the 'extra' entries. @@ -1160,17 +1305,13 @@ static void setup_pebs_sample_data(struct perf_event *event, struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct pebs_record_skl *pebs = __pebs; u64 sample_type; - int fll, fst, dsrc; - int fl = event->hw.flags; + int fll; if (pebs == NULL) return; sample_type = event->attr.sample_type; - dsrc = sample_type & PERF_SAMPLE_DATA_SRC; - - fll = fl & PERF_X86_EVENT_PEBS_LDLAT; - fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC); + fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT; perf_sample_data_init(data, 0, event->hw.last_period); @@ -1185,16 +1326,8 @@ static void setup_pebs_sample_data(struct perf_event *event, /* * data.data_src encodes the data source */ - if (dsrc) { - u64 val = PERF_MEM_NA; - if (fll) - val = load_latency_data(pebs->dse); - else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC)) - val = precise_datala_hsw(event, pebs->dse); - else if (fst) - val = precise_store_data(pebs->dse); - data->data_src.val = val; - } + if (sample_type & PERF_SAMPLE_DATA_SRC) + data->data_src.val = get_data_src(event, pebs->dse); /* * We must however always use iregs for the unwinder to stay sane; the @@ -1281,10 +1414,11 @@ static void setup_pebs_sample_data(struct perf_event *event, if (x86_pmu.intel_cap.pebs_format >= 2) { /* Only set the TSX weight when no memory weight. */ if ((sample_type & PERF_SAMPLE_WEIGHT) && !fll) - data->weight = intel_hsw_weight(pebs); + data->weight = intel_get_tsx_weight(pebs->tsx_tuning); if (sample_type & PERF_SAMPLE_TRANSACTION) - data->txn = intel_hsw_transaction(pebs); + data->txn = intel_get_tsx_transaction(pebs->tsx_tuning, + pebs->ax); } /* @@ -1301,6 +1435,140 @@ static void setup_pebs_sample_data(struct perf_event *event, data->br_stack = &cpuc->lbr_stack; } +static void adaptive_pebs_save_regs(struct pt_regs *regs, + struct pebs_gprs *gprs) +{ + regs->ax = gprs->ax; + regs->bx = gprs->bx; + regs->cx = gprs->cx; + regs->dx = gprs->dx; + regs->si = gprs->si; + regs->di = gprs->di; + regs->bp = gprs->bp; + regs->sp = gprs->sp; +#ifndef CONFIG_X86_32 + regs->r8 = gprs->r8; + regs->r9 = gprs->r9; + regs->r10 = gprs->r10; + regs->r11 = gprs->r11; + regs->r12 = gprs->r12; + regs->r13 = gprs->r13; + regs->r14 = gprs->r14; + regs->r15 = gprs->r15; +#endif +} + +/* + * With adaptive PEBS the layout depends on what fields are configured. + */ + +static void setup_pebs_adaptive_sample_data(struct perf_event *event, + struct pt_regs *iregs, void *__pebs, + struct perf_sample_data *data, + struct pt_regs *regs) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct pebs_basic *basic = __pebs; + void *next_record = basic + 1; + u64 sample_type; + u64 format_size; + struct pebs_meminfo *meminfo = NULL; + struct pebs_gprs *gprs = NULL; + struct x86_perf_regs *perf_regs; + + if (basic == NULL) + return; + + perf_regs = container_of(regs, struct x86_perf_regs, regs); + perf_regs->xmm_regs = NULL; + + sample_type = event->attr.sample_type; + format_size = basic->format_size; + perf_sample_data_init(data, 0, event->hw.last_period); + data->period = event->hw.last_period; + + if (event->attr.use_clockid == 0) + data->time = native_sched_clock_from_tsc(basic->tsc); + + /* + * We must however always use iregs for the unwinder to stay sane; the + * record BP,SP,IP can point into thin air when the record is from a + * previous PMI context or an (I)RET happened between the record and + * PMI. + */ + if (sample_type & PERF_SAMPLE_CALLCHAIN) + data->callchain = perf_callchain(event, iregs); + + *regs = *iregs; + /* The ip in basic is EventingIP */ + set_linear_ip(regs, basic->ip); + regs->flags = PERF_EFLAGS_EXACT; + + /* + * The record for MEMINFO is in front of GP + * But PERF_SAMPLE_TRANSACTION needs gprs->ax. + * Save the pointer here but process later. + */ + if (format_size & PEBS_DATACFG_MEMINFO) { + meminfo = next_record; + next_record = meminfo + 1; + } + + if (format_size & PEBS_DATACFG_GP) { + gprs = next_record; + next_record = gprs + 1; + + if (event->attr.precise_ip < 2) { + set_linear_ip(regs, gprs->ip); + regs->flags &= ~PERF_EFLAGS_EXACT; + } + + if (sample_type & PERF_SAMPLE_REGS_INTR) + adaptive_pebs_save_regs(regs, gprs); + } + + if (format_size & PEBS_DATACFG_MEMINFO) { + if (sample_type & PERF_SAMPLE_WEIGHT) + data->weight = meminfo->latency ?: + intel_get_tsx_weight(meminfo->tsx_tuning); + + if (sample_type & PERF_SAMPLE_DATA_SRC) + data->data_src.val = get_data_src(event, meminfo->aux); + + if (sample_type & (PERF_SAMPLE_ADDR | PERF_SAMPLE_PHYS_ADDR)) + data->addr = meminfo->address; + + if (sample_type & PERF_SAMPLE_TRANSACTION) + data->txn = intel_get_tsx_transaction(meminfo->tsx_tuning, + gprs ? gprs->ax : 0); + } + + if (format_size & PEBS_DATACFG_XMMS) { + struct pebs_xmm *xmm = next_record; + + next_record = xmm + 1; + perf_regs->xmm_regs = xmm->xmm; + } + + if (format_size & PEBS_DATACFG_LBRS) { + struct pebs_lbr *lbr = next_record; + int num_lbr = ((format_size >> PEBS_DATACFG_LBR_SHIFT) + & 0xff) + 1; + next_record = next_record + num_lbr*sizeof(struct pebs_lbr_entry); + + if (has_branch_stack(event)) { + intel_pmu_store_pebs_lbrs(lbr); + data->br_stack = &cpuc->lbr_stack; + } + } + + WARN_ONCE(next_record != __pebs + (format_size >> 48), + "PEBS record size %llu, expected %llu, config %llx\n", + format_size >> 48, + (u64)(next_record - __pebs), + basic->format_size); +} + static inline void * get_next_pebs_record_by_bit(void *base, void *top, int bit) { @@ -1318,19 +1586,19 @@ get_next_pebs_record_by_bit(void *base, void *top, int bit) if (base == NULL) return NULL; - for (at = base; at < top; at += x86_pmu.pebs_record_size) { - struct pebs_record_nhm *p = at; + for (at = base; at < top; at += cpuc->pebs_record_size) { + unsigned long status = get_pebs_status(at); - if (test_bit(bit, (unsigned long *)&p->status)) { + if (test_bit(bit, (unsigned long *)&status)) { /* PEBS v3 has accurate status bits */ if (x86_pmu.intel_cap.pebs_format >= 3) return at; - if (p->status == (1 << bit)) + if (status == (1 << bit)) return at; /* clear non-PEBS bit and re-check */ - pebs_status = p->status & cpuc->pebs_enabled; + pebs_status = status & cpuc->pebs_enabled; pebs_status &= PEBS_COUNTER_MASK; if (pebs_status == (1 << bit)) return at; @@ -1410,11 +1678,18 @@ intel_pmu_save_and_restart_reload(struct perf_event *event, int count) static void __intel_pmu_pebs_event(struct perf_event *event, struct pt_regs *iregs, void *base, void *top, - int bit, int count) + int bit, int count, + void (*setup_sample)(struct perf_event *, + struct pt_regs *, + void *, + struct perf_sample_data *, + struct pt_regs *)) { + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct hw_perf_event *hwc = &event->hw; struct perf_sample_data data; - struct pt_regs regs; + struct x86_perf_regs perf_regs; + struct pt_regs *regs = &perf_regs.regs; void *at = get_next_pebs_record_by_bit(base, top, bit); if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) { @@ -1429,20 +1704,20 @@ static void __intel_pmu_pebs_event(struct perf_event *event, return; while (count > 1) { - setup_pebs_sample_data(event, iregs, at, &data, ®s); - perf_event_output(event, &data, ®s); - at += x86_pmu.pebs_record_size; + setup_sample(event, iregs, at, &data, regs); + perf_event_output(event, &data, regs); + at += cpuc->pebs_record_size; at = get_next_pebs_record_by_bit(at, top, bit); count--; } - setup_pebs_sample_data(event, iregs, at, &data, ®s); + setup_sample(event, iregs, at, &data, regs); /* * All but the last records are processed. * The last one is left to be able to call the overflow handler. */ - if (perf_event_overflow(event, &data, ®s)) { + if (perf_event_overflow(event, &data, regs)) { x86_pmu_stop(event, 0); return; } @@ -1483,7 +1758,27 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) return; } - __intel_pmu_pebs_event(event, iregs, at, top, 0, n); + __intel_pmu_pebs_event(event, iregs, at, top, 0, n, + setup_pebs_fixed_sample_data); +} + +static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int size) +{ + struct perf_event *event; + int bit; + + /* + * The drain_pebs() could be called twice in a short period + * for auto-reload event in pmu::read(). There are no + * overflows have happened in between. + * It needs to call intel_pmu_save_and_restart_reload() to + * update the event->count for this case. + */ + for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled, size) { + event = cpuc->events[bit]; + if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) + intel_pmu_save_and_restart_reload(event, 0); + } } static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) @@ -1513,19 +1808,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) } if (unlikely(base >= top)) { - /* - * The drain_pebs() could be called twice in a short period - * for auto-reload event in pmu::read(). There are no - * overflows have happened in between. - * It needs to call intel_pmu_save_and_restart_reload() to - * update the event->count for this case. - */ - for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled, - size) { - event = cpuc->events[bit]; - if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) - intel_pmu_save_and_restart_reload(event, 0); - } + intel_pmu_pebs_event_update_no_drain(cpuc, size); return; } @@ -1538,8 +1821,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) /* PEBS v3 has more accurate status bits */ if (x86_pmu.intel_cap.pebs_format >= 3) { - for_each_set_bit(bit, (unsigned long *)&pebs_status, - size) + for_each_set_bit(bit, (unsigned long *)&pebs_status, size) counts[bit]++; continue; @@ -1555,7 +1837,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) */ if (!pebs_status && cpuc->pebs_enabled && !(cpuc->pebs_enabled & (cpuc->pebs_enabled-1))) - pebs_status = cpuc->pebs_enabled; + pebs_status = p->status = cpuc->pebs_enabled; bit = find_first_bit((unsigned long *)&pebs_status, x86_pmu.max_pebs_events); @@ -1578,8 +1860,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) * If collision happened, the record will be dropped. */ if (p->status != (1ULL << bit)) { - for_each_set_bit(i, (unsigned long *)&pebs_status, - x86_pmu.max_pebs_events) + for_each_set_bit(i, (unsigned long *)&pebs_status, size) error[i]++; continue; } @@ -1587,7 +1868,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) counts[bit]++; } - for (bit = 0; bit < size; bit++) { + for_each_set_bit(bit, (unsigned long *)&mask, size) { if ((counts[bit] == 0) && (error[bit] == 0)) continue; @@ -1608,11 +1889,66 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) if (counts[bit]) { __intel_pmu_pebs_event(event, iregs, base, - top, bit, counts[bit]); + top, bit, counts[bit], + setup_pebs_fixed_sample_data); } } } +static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs) +{ + short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {}; + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct debug_store *ds = cpuc->ds; + struct perf_event *event; + void *base, *at, *top; + int bit, size; + u64 mask; + + if (!x86_pmu.pebs_active) + return; + + base = (struct pebs_basic *)(unsigned long)ds->pebs_buffer_base; + top = (struct pebs_basic *)(unsigned long)ds->pebs_index; + + ds->pebs_index = ds->pebs_buffer_base; + + mask = ((1ULL << x86_pmu.max_pebs_events) - 1) | + (((1ULL << x86_pmu.num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED); + size = INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed; + + if (unlikely(base >= top)) { + intel_pmu_pebs_event_update_no_drain(cpuc, size); + return; + } + + for (at = base; at < top; at += cpuc->pebs_record_size) { + u64 pebs_status; + + pebs_status = get_pebs_status(at) & cpuc->pebs_enabled; + pebs_status &= mask; + + for_each_set_bit(bit, (unsigned long *)&pebs_status, size) + counts[bit]++; + } + + for_each_set_bit(bit, (unsigned long *)&mask, size) { + if (counts[bit] == 0) + continue; + + event = cpuc->events[bit]; + if (WARN_ON_ONCE(!event)) + continue; + + if (WARN_ON_ONCE(!event->attr.precise_ip)) + continue; + + __intel_pmu_pebs_event(event, iregs, base, + top, bit, counts[bit], + setup_pebs_adaptive_sample_data); + } +} + /* * BTS, PEBS probe and setup */ @@ -1630,8 +1966,12 @@ void __init intel_ds_init(void) x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE; if (x86_pmu.pebs) { char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-'; + char *pebs_qual = ""; int format = x86_pmu.intel_cap.pebs_format; + if (format < 4) + x86_pmu.intel_cap.pebs_baseline = 0; + switch (format) { case 0: pr_cont("PEBS fmt0%c, ", pebs_type); @@ -1667,6 +2007,29 @@ void __init intel_ds_init(void) x86_pmu.large_pebs_flags |= PERF_SAMPLE_TIME; break; + case 4: + x86_pmu.drain_pebs = intel_pmu_drain_pebs_icl; + x86_pmu.pebs_record_size = sizeof(struct pebs_basic); + if (x86_pmu.intel_cap.pebs_baseline) { + x86_pmu.large_pebs_flags |= + PERF_SAMPLE_BRANCH_STACK | + PERF_SAMPLE_TIME; + x86_pmu.flags |= PMU_FL_PEBS_ALL; + pebs_qual = "-baseline"; + x86_get_pmu()->capabilities |= PERF_PMU_CAP_EXTENDED_REGS; + } else { + /* Only basic record supported */ + x86_pmu.large_pebs_flags &= + ~(PERF_SAMPLE_ADDR | + PERF_SAMPLE_TIME | + PERF_SAMPLE_DATA_SRC | + PERF_SAMPLE_TRANSACTION | + PERF_SAMPLE_REGS_USER | + PERF_SAMPLE_REGS_INTR); + } + pr_cont("PEBS fmt4%c%s, ", pebs_type, pebs_qual); + break; + default: pr_cont("no PEBS fmt%d%c, ", format, pebs_type); x86_pmu.pebs = 0; diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index c88ed39582a10095b41b364bf6532b0f5298c6e2..1fb1ede03c40fba157befc1b3ff0015b1c0de2af 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -5,6 +5,7 @@ #include #include #include +#include #include "../perf_event.h" @@ -383,6 +384,9 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx) wrmsrl(x86_pmu.lbr_tos, tos); task_ctx->lbr_stack_state = LBR_NONE; + + if (cpuc->lbr_select) + wrmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel); } static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx) @@ -415,6 +419,9 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx) cpuc->last_task_ctx = task_ctx; cpuc->last_log_id = ++task_ctx->log_id; + + if (cpuc->lbr_select) + rdmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel); } void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in) @@ -462,6 +469,9 @@ void intel_pmu_lbr_add(struct perf_event *event) if (!x86_pmu.lbr_nr) return; + if (event->hw.flags & PERF_X86_EVENT_LBR_SELECT) + cpuc->lbr_select = 1; + cpuc->br_sel = event->hw.branch_reg.reg; if (branch_user_callstack(cpuc->br_sel) && event->ctx->task_ctx_data) { @@ -488,6 +498,8 @@ void intel_pmu_lbr_add(struct perf_event *event) * be 'new'. Conversely, a new event can get installed through the * context switch path for the first time. */ + if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0) + cpuc->lbr_pebs_users++; perf_sched_cb_inc(event->ctx->pmu); if (!cpuc->lbr_users++ && !event->total_time_running) intel_pmu_lbr_reset(); @@ -507,16 +519,30 @@ void intel_pmu_lbr_del(struct perf_event *event) task_ctx->lbr_callstack_users--; } + if (event->hw.flags & PERF_X86_EVENT_LBR_SELECT) + cpuc->lbr_select = 0; + + if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0) + cpuc->lbr_pebs_users--; cpuc->lbr_users--; WARN_ON_ONCE(cpuc->lbr_users < 0); + WARN_ON_ONCE(cpuc->lbr_pebs_users < 0); perf_sched_cb_dec(event->ctx->pmu); } +static inline bool vlbr_exclude_host(void) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + + return test_bit(INTEL_PMC_IDX_FIXED_VLBR, + (unsigned long *)&cpuc->intel_ctrl_guest_mask); +} + void intel_pmu_lbr_enable_all(bool pmi) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); - if (cpuc->lbr_users) + if (cpuc->lbr_users && !vlbr_exclude_host()) __intel_pmu_lbr_enable(pmi); } @@ -524,7 +550,7 @@ void intel_pmu_lbr_disable_all(void) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); - if (cpuc->lbr_users) + if (cpuc->lbr_users && !vlbr_exclude_host()) __intel_pmu_lbr_disable(); } @@ -658,7 +684,14 @@ void intel_pmu_lbr_read(void) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); - if (!cpuc->lbr_users) + /* + * Don't read when all LBRs users are using adaptive PEBS. + * + * This could be smarter and actually check the event, + * but this simple approach seems to work for now. + */ + if (!cpuc->lbr_users || vlbr_exclude_host() || + cpuc->lbr_users == cpuc->lbr_pebs_users) return; if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) @@ -862,9 +895,9 @@ static int branch_type(unsigned long from, unsigned long to, int abort) * The LBR logs any address in the IP, even if the IP just * faulted. This means userspace can control the from address. * Ensure we don't blindy read any address by validating it is - * a known text address. + * a known text address and not a vsyscall address. */ - if (kernel_text_address(from)) { + if (kernel_text_address(from) && !in_gate_area_no_mm(from)) { addr = (void *)from; /* * Assume we can get the maximum possible size @@ -1079,6 +1112,28 @@ intel_pmu_lbr_filter(struct cpu_hw_events *cpuc) } } +void intel_pmu_store_pebs_lbrs(struct pebs_lbr *lbr) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + int i; + + cpuc->lbr_stack.nr = x86_pmu.lbr_nr; + for (i = 0; i < x86_pmu.lbr_nr; i++) { + u64 info = lbr->lbr[i].info; + struct perf_branch_entry *e = &cpuc->lbr_entries[i]; + + e->from = lbr->lbr[i].from; + e->to = lbr->lbr[i].to; + e->mispred = !!(info & LBR_INFO_MISPRED); + e->predicted = !(info & LBR_INFO_MISPRED); + e->in_tx = !!(info & LBR_INFO_IN_TX); + e->abort = !!(info & LBR_INFO_ABORT); + e->cycles = info & LBR_INFO_CYCLES; + e->reserved = 0; + } + intel_pmu_lbr_filter(cpuc); +} + /* * Map interface branch filters onto LBR filters */ @@ -1277,3 +1332,27 @@ void intel_pmu_lbr_init_knl(void) if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_LIP) x86_pmu.intel_cap.lbr_format = LBR_FORMAT_EIP_FLAGS; } + +/** + * x86_perf_get_lbr - get the LBR records information + * + * @lbr: the caller's memory to store the LBR records information + * + * Returns: 0 indicates the LBR info has been successfully obtained + */ +int x86_perf_get_lbr(struct x86_pmu_lbr *lbr) +{ + int lbr_fmt = x86_pmu.intel_cap.lbr_format; + + lbr->nr = x86_pmu.lbr_nr; + lbr->from = x86_pmu.lbr_from; + lbr->to = x86_pmu.lbr_to; + lbr->info = (lbr_fmt == LBR_FORMAT_INFO) ? MSR_LBR_INFO_0 : 0; + + return 0; +} +EXPORT_SYMBOL_GPL(x86_perf_get_lbr); + +struct event_constraint vlbr_constraint = + __EVENT_CONSTRAINT(INTEL_FIXED_VLBR_EVENT, (1ULL << INTEL_PMC_IDX_FIXED_VLBR), + FIXED_EVENT_FLAGS, 1, 0, PERF_X86_EVENT_LBR_SELECT); diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index 8d016ce5b80dcc0ab1102c0e864bb706297d6de4..774fb0f0bf6df8b90b7a3dd48157a13f13725428 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -69,7 +69,7 @@ static struct pt_cap_desc { PT_CAP(topa_multiple_entries, 0, CPUID_ECX, BIT(1)), PT_CAP(single_range_output, 0, CPUID_ECX, BIT(2)), PT_CAP(payloads_lip, 0, CPUID_ECX, BIT(31)), - PT_CAP(num_address_ranges, 1, CPUID_EAX, 0x3), + PT_CAP(num_address_ranges, 1, CPUID_EAX, 0x7), PT_CAP(mtc_periods, 1, CPUID_EAX, 0xffff0000), PT_CAP(cycle_thresholds, 1, CPUID_EBX, 0xffff), PT_CAP(psb_periods, 1, CPUID_EBX, 0xffff0000), @@ -1104,10 +1104,11 @@ static int pt_buffer_init_topa(struct pt_buffer *buf, unsigned long nr_pages, * Return: Our private PT buffer structure. */ static void * -pt_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool snapshot) +pt_buffer_setup_aux(struct perf_event *event, void **pages, + int nr_pages, bool snapshot) { struct pt_buffer *buf; - int node, ret; + int node, ret, cpu = event->cpu; if (!nr_pages) return NULL; @@ -1212,7 +1213,8 @@ static int pt_event_addr_filters_validate(struct list_head *filters) static void pt_event_addr_filters_sync(struct perf_event *event) { struct perf_addr_filters_head *head = perf_event_addr_filters(event); - unsigned long msr_a, msr_b, *offs = event->addr_filters_offs; + unsigned long msr_a, msr_b; + struct perf_addr_filter_range *fr = event->addr_filter_ranges; struct pt_filters *filters = event->hw.addr_filters; struct perf_addr_filter *filter; int range = 0; @@ -1221,12 +1223,12 @@ static void pt_event_addr_filters_sync(struct perf_event *event) return; list_for_each_entry(filter, &head->list, entry) { - if (filter->path.dentry && !offs[range]) { + if (filter->path.dentry && !fr[range].start) { msr_a = msr_b = 0; } else { /* apply the offset */ - msr_a = filter->offset + offs[range]; - msr_b = filter->size + msr_a - 1; + msr_a = fr[range].start; + msr_b = msr_a + fr[range].size - 1; } filters->filter[range].msr_a = msr_a; @@ -1513,8 +1515,7 @@ static __init int pt_init(void) } if (!pt_cap_get(PT_CAP_topa_multiple_entries)) - pt_pmu.pmu.capabilities = - PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_AUX_SW_DOUBLEBUF; + pt_pmu.pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG; pt_pmu.pmu.capabilities |= PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE; pt_pmu.pmu.attr_groups = pt_attr_groups; diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c index 32f3e9423e99ea40b23ee9196003fc0de2546829..2413169ce362710569e3f2f12f83005a3287496d 100644 --- a/arch/x86/events/intel/rapl.c +++ b/arch/x86/events/intel/rapl.c @@ -777,9 +777,11 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = { X86_RAPL_MODEL_MATCH(INTEL_FAM6_CANNONLAKE_MOBILE, skl_rapl_init), X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT, hsw_rapl_init), - X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_DENVERTON, hsw_rapl_init), + X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_X, hsw_rapl_init), - X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GEMINI_LAKE, hsw_rapl_init), + X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_PLUS, hsw_rapl_init), + + X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE, skl_rapl_init), {}, }; diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 27a461414b306851d70ca2438d054a43d8aa9fed..b5523a5e4d9057cfbc22d472123336757d5c572e 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -7,6 +7,7 @@ static struct intel_uncore_type *empty_uncore[] = { NULL, }; struct intel_uncore_type **uncore_msr_uncores = empty_uncore; struct intel_uncore_type **uncore_pci_uncores = empty_uncore; +struct intel_uncore_type **uncore_mmio_uncores = empty_uncore; static bool pcidrv_registered; struct pci_driver *uncore_pci_driver; @@ -27,7 +28,7 @@ struct event_constraint uncore_constraint_empty = MODULE_LICENSE("GPL"); -static int uncore_pcibus_to_physid(struct pci_bus *bus) +int uncore_pcibus_to_physid(struct pci_bus *bus) { struct pci2phy_map *map; int phys_id = -1; @@ -118,6 +119,21 @@ u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *eve return count; } +void uncore_mmio_exit_box(struct intel_uncore_box *box) +{ + if (box->io_addr) + iounmap(box->io_addr); +} + +u64 uncore_mmio_read_counter(struct intel_uncore_box *box, + struct perf_event *event) +{ + if (!box->io_addr) + return 0; + + return readq(box->io_addr + event->hw.event_base); +} + /* * generic get constraint function for shared match/mask registers. */ @@ -485,10 +501,8 @@ void uncore_pmu_event_start(struct perf_event *event, int flags) local64_set(&event->hw.prev_count, uncore_read_counter(box, event)); uncore_enable_event(box, event); - if (box->n_active == 1) { - uncore_enable_box(box); + if (box->n_active == 1) uncore_pmu_start_hrtimer(box); - } } void uncore_pmu_event_stop(struct perf_event *event, int flags) @@ -512,10 +526,8 @@ void uncore_pmu_event_stop(struct perf_event *event, int flags) WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); hwc->state |= PERF_HES_STOPPED; - if (box->n_active == 0) { - uncore_disable_box(box); + if (box->n_active == 0) uncore_pmu_cancel_hrtimer(box); - } } if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { @@ -740,6 +752,7 @@ static int uncore_pmu_event_init(struct perf_event *event) /* fixed counters have event field hardcoded to zero */ hwc->config = 0ULL; } else if (is_freerunning_event(event)) { + hwc->config = event->attr.config; if (!check_valid_freerunning_event(box, event)) return -EINVAL; event->hw.idx = UNCORE_PMC_IDX_FREERUNNING; @@ -768,6 +781,40 @@ static int uncore_pmu_event_init(struct perf_event *event) return ret; } +static void uncore_pmu_enable(struct pmu *pmu) +{ + struct intel_uncore_pmu *uncore_pmu; + struct intel_uncore_box *box; + + uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu); + if (!uncore_pmu) + return; + + box = uncore_pmu_to_box(uncore_pmu, smp_processor_id()); + if (!box) + return; + + if (uncore_pmu->type->ops->enable_box) + uncore_pmu->type->ops->enable_box(box); +} + +static void uncore_pmu_disable(struct pmu *pmu) +{ + struct intel_uncore_pmu *uncore_pmu; + struct intel_uncore_box *box; + + uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu); + if (!uncore_pmu) + return; + + box = uncore_pmu_to_box(uncore_pmu, smp_processor_id()); + if (!box) + return; + + if (uncore_pmu->type->ops->disable_box) + uncore_pmu->type->ops->disable_box(box); +} + static ssize_t uncore_get_attr_cpumask(struct device *dev, struct device_attribute *attr, char *buf) { @@ -793,6 +840,8 @@ static int uncore_pmu_register(struct intel_uncore_pmu *pmu) pmu->pmu = (struct pmu) { .attr_groups = pmu->type->attr_groups, .task_ctx_nr = perf_invalid_context, + .pmu_enable = uncore_pmu_enable, + .pmu_disable = uncore_pmu_disable, .event_init = uncore_pmu_event_init, .add = uncore_pmu_event_add, .del = uncore_pmu_event_del, @@ -1146,12 +1195,27 @@ static void uncore_change_context(struct intel_uncore_type **uncores, uncore_change_type_ctx(*uncores, old_cpu, new_cpu); } -static int uncore_event_cpu_offline(unsigned int cpu) +static void uncore_box_unref(struct intel_uncore_type **types, int id) { - struct intel_uncore_type *type, **types = uncore_msr_uncores; + struct intel_uncore_type *type; struct intel_uncore_pmu *pmu; struct intel_uncore_box *box; - int i, pkg, target; + int i; + + for (; *types; types++) { + type = *types; + pmu = type->pmus; + for (i = 0; i < type->num_boxes; i++, pmu++) { + box = pmu->boxes[id]; + if (box && atomic_dec_return(&box->refcnt) == 0) + uncore_box_exit(box); + } + } +} + +static int uncore_event_cpu_offline(unsigned int cpu) +{ + int die, target; /* Check if exiting cpu is used for collecting uncore events */ if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask)) @@ -1166,20 +1230,14 @@ static int uncore_event_cpu_offline(unsigned int cpu) target = -1; uncore_change_context(uncore_msr_uncores, cpu, target); + uncore_change_context(uncore_mmio_uncores, cpu, target); uncore_change_context(uncore_pci_uncores, cpu, target); unref: /* Clear the references */ - pkg = topology_logical_package_id(cpu); - for (; *types; types++) { - type = *types; - pmu = type->pmus; - for (i = 0; i < type->num_boxes; i++, pmu++) { - box = pmu->boxes[pkg]; - if (box && atomic_dec_return(&box->refcnt) == 0) - uncore_box_exit(box); - } - } + die = topology_logical_package_id(cpu); + uncore_box_unref(uncore_msr_uncores, die); + uncore_box_unref(uncore_mmio_uncores, die); return 0; } @@ -1222,15 +1280,15 @@ static int allocate_boxes(struct intel_uncore_type **types, return -ENOMEM; } -static int uncore_event_cpu_online(unsigned int cpu) +static int uncore_box_ref(struct intel_uncore_type **types, + int id, unsigned int cpu) { - struct intel_uncore_type *type, **types = uncore_msr_uncores; + struct intel_uncore_type *type; struct intel_uncore_pmu *pmu; struct intel_uncore_box *box; - int i, ret, pkg, target; + int i, ret; - pkg = topology_logical_package_id(cpu); - ret = allocate_boxes(types, pkg, cpu); + ret = allocate_boxes(types, id, cpu); if (ret) return ret; @@ -1238,11 +1296,23 @@ static int uncore_event_cpu_online(unsigned int cpu) type = *types; pmu = type->pmus; for (i = 0; i < type->num_boxes; i++, pmu++) { - box = pmu->boxes[pkg]; + box = pmu->boxes[id]; if (box && atomic_inc_return(&box->refcnt) == 1) uncore_box_init(box); } } + return 0; +} + +static int uncore_event_cpu_online(unsigned int cpu) +{ + int die, target, msr_ret, mmio_ret; + + die = topology_logical_die_id(cpu); + msr_ret = uncore_box_ref(uncore_msr_uncores, die, cpu); + mmio_ret = uncore_box_ref(uncore_mmio_uncores, die, cpu); + if (msr_ret && mmio_ret) + return -ENOMEM; /* * Check if there is an online cpu in the package @@ -1254,7 +1324,10 @@ static int uncore_event_cpu_online(unsigned int cpu) cpumask_set_cpu(cpu, &uncore_cpu_mask); - uncore_change_context(uncore_msr_uncores, -1, cpu); + if (!msr_ret) + uncore_change_context(uncore_msr_uncores, -1, cpu); + if (!mmio_ret) + uncore_change_context(uncore_mmio_uncores, -1, cpu); uncore_change_context(uncore_pci_uncores, -1, cpu); return 0; } @@ -1302,12 +1375,35 @@ static int __init uncore_cpu_init(void) return ret; } +static int __init uncore_mmio_init(void) +{ + struct intel_uncore_type **types = uncore_mmio_uncores; + int ret; + + ret = uncore_types_init(types, true); + if (ret) + goto err; + + for (; *types; types++) { + ret = type_pmu_register(*types); + if (ret) + goto err; + } + return 0; +err: + uncore_types_exit(uncore_mmio_uncores); + uncore_mmio_uncores = empty_uncore; + return ret; +} + + #define X86_UNCORE_MODEL_MATCH(model, init) \ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init } struct intel_uncore_init_fun { void (*cpu_init)(void); int (*pci_init)(void); + void (*mmio_init)(void); }; static const struct intel_uncore_init_fun nhm_uncore_init __initconst = { @@ -1373,6 +1469,23 @@ static const struct intel_uncore_init_fun skx_uncore_init __initconst = { .pci_init = skx_uncore_pci_init, }; +static const struct intel_uncore_init_fun icl_uncore_init __initconst = { + .cpu_init = icl_uncore_cpu_init, + .pci_init = skl_uncore_pci_init, +}; + +static const struct intel_uncore_init_fun icx_uncore_init __initconst = { + .cpu_init = icx_uncore_cpu_init, + .pci_init = icx_uncore_pci_init, + .mmio_init = icx_uncore_mmio_init, +}; + +static const struct intel_uncore_init_fun snr_uncore_init __initconst = { + .cpu_init = snr_uncore_cpu_init, + .pci_init = snr_uncore_pci_init, + .mmio_init = snr_uncore_mmio_init, +}; + static const struct x86_cpu_id intel_uncore_match[] __initconst = { X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EP, nhm_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM, nhm_uncore_init), @@ -1399,6 +1512,10 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = { X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, skx_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE, skl_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_uncore_init), + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE, icl_uncore_init), + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_XEON_D, icx_uncore_init), + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_X, icx_uncore_init), + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ATOM_TREMONT_X, snr_uncore_init), {}, }; @@ -1408,7 +1525,7 @@ static int __init intel_uncore_init(void) { const struct x86_cpu_id *id; struct intel_uncore_init_fun *uncore_init; - int pret = 0, cret = 0, ret; + int pret = 0, cret = 0, mret = 0, ret; id = x86_match_cpu(intel_uncore_match); if (!id) @@ -1431,7 +1548,12 @@ static int __init intel_uncore_init(void) cret = uncore_cpu_init(); } - if (cret && pret) + if (uncore_init->mmio_init) { + uncore_init->mmio_init(); + mret = uncore_mmio_init(); + } + + if (cret && pret && mret) return -ENODEV; /* Install hotplug callbacks to setup the targets for each package */ @@ -1445,6 +1567,7 @@ static int __init intel_uncore_init(void) err: uncore_types_exit(uncore_msr_uncores); + uncore_types_exit(uncore_mmio_uncores); uncore_pci_exit(); return ret; } @@ -1454,6 +1577,7 @@ static void __exit intel_uncore_exit(void) { cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE); uncore_types_exit(uncore_msr_uncores); + uncore_types_exit(uncore_mmio_uncores); uncore_pci_exit(); } module_exit(intel_uncore_exit); diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h index e17ab885b1e928d17a671eb96f12cf21905bdb93..f57651c4176e1dfa05efcedaaa2ae00da7ff665f 100644 --- a/arch/x86/events/intel/uncore.h +++ b/arch/x86/events/intel/uncore.h @@ -2,6 +2,7 @@ #include #include #include +#include #include #include "../perf_event.h" @@ -56,7 +57,10 @@ struct intel_uncore_type { unsigned fixed_ctr; unsigned fixed_ctl; unsigned box_ctl; - unsigned msr_offset; + union { + unsigned msr_offset; + unsigned mmio_offset; + }; unsigned num_shared_regs:8; unsigned single_fixed:1; unsigned pair_ctr_ctl:1; @@ -125,7 +129,7 @@ struct intel_uncore_box { struct hrtimer hrtimer; struct list_head list; struct list_head active_list; - void *io_addr; + void __iomem *io_addr; struct intel_uncore_extra_reg shared_regs[0]; }; @@ -143,6 +147,7 @@ struct freerunning_counters { unsigned int box_offset; unsigned int num_counters; unsigned int bits; + unsigned *box_offsets; }; struct pci2phy_map { @@ -152,6 +157,7 @@ struct pci2phy_map { }; struct pci2phy_map *__find_pci2phy_map(int segment); +int uncore_pcibus_to_physid(struct pci_bus *bus); ssize_t uncore_event_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf); @@ -183,6 +189,13 @@ static inline bool uncore_pmc_freerunning(int idx) return idx == UNCORE_PMC_IDX_FREERUNNING; } +static inline +unsigned int uncore_mmio_box_ctl(struct intel_uncore_box *box) +{ + return box->pmu->type->box_ctl + + box->pmu->type->mmio_offset * box->pmu->pmu_idx; +} + static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box) { return box->pmu->type->box_ctl; @@ -285,13 +298,15 @@ static inline unsigned int uncore_freerunning_counter(struct intel_uncore_box *box, struct perf_event *event) { - unsigned int type = uncore_freerunning_type(event->attr.config); - unsigned int idx = uncore_freerunning_idx(event->attr.config); + unsigned int type = uncore_freerunning_type(event->hw.config); + unsigned int idx = uncore_freerunning_idx(event->hw.config); struct intel_uncore_pmu *pmu = box->pmu; return pmu->type->freerunning[type].counter_base + pmu->type->freerunning[type].counter_offset * idx + - pmu->type->freerunning[type].box_offset * pmu->pmu_idx; + (pmu->type->freerunning[type].box_offsets ? + pmu->type->freerunning[type].box_offsets[pmu->pmu_idx] : + pmu->type->freerunning[type].box_offset * pmu->pmu_idx); } static inline @@ -313,7 +328,7 @@ unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx) static inline unsigned uncore_fixed_ctl(struct intel_uncore_box *box) { - if (box->pci_dev) + if (box->pci_dev || box->io_addr) return uncore_pci_fixed_ctl(box); else return uncore_msr_fixed_ctl(box); @@ -322,7 +337,7 @@ unsigned uncore_fixed_ctl(struct intel_uncore_box *box) static inline unsigned uncore_fixed_ctr(struct intel_uncore_box *box) { - if (box->pci_dev) + if (box->pci_dev || box->io_addr) return uncore_pci_fixed_ctr(box); else return uncore_msr_fixed_ctr(box); @@ -331,7 +346,7 @@ unsigned uncore_fixed_ctr(struct intel_uncore_box *box) static inline unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx) { - if (box->pci_dev) + if (box->pci_dev || box->io_addr) return uncore_pci_event_ctl(box, idx); else return uncore_msr_event_ctl(box, idx); @@ -340,7 +355,7 @@ unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx) static inline unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx) { - if (box->pci_dev) + if (box->pci_dev || box->io_addr) return uncore_pci_perf_ctr(box, idx); else return uncore_msr_perf_ctr(box, idx); @@ -360,7 +375,7 @@ static inline unsigned int uncore_freerunning_bits(struct intel_uncore_box *box, struct perf_event *event) { - unsigned int type = uncore_freerunning_type(event->attr.config); + unsigned int type = uncore_freerunning_type(event->hw.config); return box->pmu->type->freerunning[type].bits; } @@ -368,7 +383,7 @@ unsigned int uncore_freerunning_bits(struct intel_uncore_box *box, static inline int uncore_num_freerunning(struct intel_uncore_box *box, struct perf_event *event) { - unsigned int type = uncore_freerunning_type(event->attr.config); + unsigned int type = uncore_freerunning_type(event->hw.config); return box->pmu->type->freerunning[type].num_counters; } @@ -382,8 +397,8 @@ static inline int uncore_num_freerunning_types(struct intel_uncore_box *box, static inline bool check_valid_freerunning_event(struct intel_uncore_box *box, struct perf_event *event) { - unsigned int type = uncore_freerunning_type(event->attr.config); - unsigned int idx = uncore_freerunning_idx(event->attr.config); + unsigned int type = uncore_freerunning_type(event->hw.config); + unsigned int idx = uncore_freerunning_idx(event->hw.config); return (type < uncore_num_freerunning_types(box, event)) && (idx < uncore_num_freerunning(box, event)); @@ -402,16 +417,14 @@ static inline bool is_freerunning_event(struct perf_event *event) (((cfg >> 8) & 0xff) >= UNCORE_FREERUNNING_UMASK_START); } -static inline void uncore_disable_box(struct intel_uncore_box *box) +/* Check and reject invalid config */ +static inline int uncore_freerunning_hw_config(struct intel_uncore_box *box, + struct perf_event *event) { - if (box->pmu->type->ops->disable_box) - box->pmu->type->ops->disable_box(box); -} + if (is_freerunning_event(event)) + return 0; -static inline void uncore_enable_box(struct intel_uncore_box *box) -{ - if (box->pmu->type->ops->enable_box) - box->pmu->type->ops->enable_box(box); + return -EINVAL; } static inline void uncore_disable_event(struct intel_uncore_box *box, @@ -465,6 +478,9 @@ static inline struct intel_uncore_box *uncore_event_to_box(struct perf_event *ev struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu); u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event); +void uncore_mmio_exit_box(struct intel_uncore_box *box); +u64 uncore_mmio_read_counter(struct intel_uncore_box *box, + struct perf_event *event); void uncore_pmu_start_hrtimer(struct intel_uncore_box *box); void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box); void uncore_pmu_event_start(struct perf_event *event, int flags); @@ -480,6 +496,7 @@ u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx); extern struct intel_uncore_type **uncore_msr_uncores; extern struct intel_uncore_type **uncore_pci_uncores; +extern struct intel_uncore_type **uncore_mmio_uncores; extern struct pci_driver *uncore_pci_driver; extern raw_spinlock_t pci2phy_map_lock; extern struct list_head pci2phy_map_head; @@ -495,6 +512,7 @@ int skl_uncore_pci_init(void); void snb_uncore_cpu_init(void); void nhm_uncore_cpu_init(void); void skl_uncore_cpu_init(void); +void icl_uncore_cpu_init(void); int snb_pci2phy_map_init(int devid); /* uncore_snbep.c */ @@ -510,6 +528,12 @@ int knl_uncore_pci_init(void); void knl_uncore_cpu_init(void); int skx_uncore_pci_init(void); void skx_uncore_cpu_init(void); +int snr_uncore_pci_init(void); +void snr_uncore_cpu_init(void); +void snr_uncore_mmio_init(void); +int icx_uncore_pci_init(void); +void icx_uncore_cpu_init(void); +void icx_uncore_mmio_init(void); /* uncore_nhmex.c */ void nhmex_uncore_cpu_init(void); diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c index 8527c3e1038b78d868743274c35368ab318649ca..7eea98642b37a4ed52e40108cc34f1e204f45ad7 100644 --- a/arch/x86/events/intel/uncore_snb.c +++ b/arch/x86/events/intel/uncore_snb.c @@ -15,6 +15,27 @@ #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910 #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f +#define PCI_DEVICE_ID_INTEL_KBL_Y_IMC 0x590c +#define PCI_DEVICE_ID_INTEL_KBL_U_IMC 0x5904 +#define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC 0x5914 +#define PCI_DEVICE_ID_INTEL_KBL_SD_IMC 0x590f +#define PCI_DEVICE_ID_INTEL_KBL_SQ_IMC 0x591f +#define PCI_DEVICE_ID_INTEL_CFL_2U_IMC 0x3ecc +#define PCI_DEVICE_ID_INTEL_CFL_4U_IMC 0x3ed0 +#define PCI_DEVICE_ID_INTEL_CFL_4H_IMC 0x3e10 +#define PCI_DEVICE_ID_INTEL_CFL_6H_IMC 0x3ec4 +#define PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC 0x3e0f +#define PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC 0x3e1f +#define PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC 0x3ec2 +#define PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC 0x3e30 +#define PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC 0x3e18 +#define PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC 0x3ec6 +#define PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC 0x3e31 +#define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC 0x3e33 +#define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC 0x3eca +#define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC 0x3e32 +#define PCI_DEVICE_ID_INTEL_ICL_U_IMC 0x8a02 +#define PCI_DEVICE_ID_INTEL_ICL_U2_IMC 0x8a12 /* SNB event control */ #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff @@ -74,6 +95,12 @@ #define SKL_UNC_PERF_GLOBAL_CTL 0xe01 #define SKL_UNC_GLOBAL_CTL_CORE_ALL ((1 << 5) - 1) +/* ICL Cbo register */ +#define ICL_UNC_CBO_CONFIG 0x396 +#define ICL_UNC_NUM_CBO_MASK 0xf +#define ICL_UNC_CBO_0_PER_CTR0 0x702 +#define ICL_UNC_CBO_MSR_OFFSET 0x8 + DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); @@ -257,6 +284,70 @@ void skl_uncore_cpu_init(void) snb_uncore_arb.ops = &skl_uncore_msr_ops; } +static struct intel_uncore_type icl_uncore_cbox = { + .name = "cbox", + .num_counters = 2, + .perf_ctr_bits = 44, + .perf_ctr = ICL_UNC_CBO_0_PER_CTR0, + .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, + .event_mask = SNB_UNC_RAW_EVENT_MASK, + .msr_offset = ICL_UNC_CBO_MSR_OFFSET, + .ops = &skl_uncore_msr_ops, + .format_group = &snb_uncore_format_group, +}; + +static struct uncore_event_desc icl_uncore_events[] = { + INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff"), + { /* end: all zeroes */ }, +}; + +static struct attribute *icl_uncore_clock_formats_attr[] = { + &format_attr_event.attr, + NULL, +}; + +static struct attribute_group icl_uncore_clock_format_group = { + .name = "format", + .attrs = icl_uncore_clock_formats_attr, +}; + +static struct intel_uncore_type icl_uncore_clockbox = { + .name = "clock", + .num_counters = 1, + .num_boxes = 1, + .fixed_ctr_bits = 48, + .fixed_ctr = SNB_UNC_FIXED_CTR, + .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, + .single_fixed = 1, + .event_mask = SNB_UNC_CTL_EV_SEL_MASK, + .format_group = &icl_uncore_clock_format_group, + .ops = &skl_uncore_msr_ops, + .event_descs = icl_uncore_events, +}; + +static struct intel_uncore_type *icl_msr_uncores[] = { + &icl_uncore_cbox, + &snb_uncore_arb, + &icl_uncore_clockbox, + NULL, +}; + +static int icl_get_cbox_num(void) +{ + u64 num_boxes; + + rdmsrl(ICL_UNC_CBO_CONFIG, num_boxes); + + return num_boxes & ICL_UNC_NUM_CBO_MASK; +} + +void icl_uncore_cpu_init(void) +{ + uncore_msr_uncores = icl_msr_uncores; + icl_uncore_cbox.num_boxes = icl_get_cbox_num(); + snb_uncore_arb.ops = &skl_uncore_msr_ops; +} + enum { SNB_PCI_UNCORE_IMC, }; @@ -325,11 +416,6 @@ static void snb_uncore_imc_init_box(struct intel_uncore_box *box) box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL; } -static void snb_uncore_imc_exit_box(struct intel_uncore_box *box) -{ - iounmap(box->io_addr); -} - static void snb_uncore_imc_enable_box(struct intel_uncore_box *box) {} @@ -342,13 +428,6 @@ static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct per static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event) {} -static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - - return (u64)*(unsigned int *)(box->io_addr + hwc->event_base); -} - /* * Keep the custom event_init() function compatible with old event * encoding for free running counters. @@ -425,9 +504,11 @@ static int snb_uncore_imc_event_init(struct perf_event *event) /* must be done before validate_group */ event->hw.event_base = base; - event->hw.config = cfg; event->hw.idx = idx; + /* Convert to standard encoding format for freerunning counters */ + event->hw.config = ((cfg - 1) << 8) | 0x10ff; + /* no group validation needed, we have free running counters */ return 0; @@ -478,13 +559,13 @@ static struct pmu snb_uncore_imc_pmu = { static struct intel_uncore_ops snb_uncore_imc_ops = { .init_box = snb_uncore_imc_init_box, - .exit_box = snb_uncore_imc_exit_box, + .exit_box = uncore_mmio_exit_box, .enable_box = snb_uncore_imc_enable_box, .disable_box = snb_uncore_imc_disable_box, .disable_event = snb_uncore_imc_disable_event, .enable_event = snb_uncore_imc_enable_event, .hw_config = snb_uncore_imc_hw_config, - .read_counter = snb_uncore_imc_read_counter, + .read_counter = uncore_mmio_read_counter, }; static struct intel_uncore_type snb_uncore_imc = { @@ -569,7 +650,94 @@ static const struct pci_device_id skl_uncore_pci_ids[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC), .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_Y_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_U_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_UQ_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SD_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SQ_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2U_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4U_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4H_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6H_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* end: all zeroes */ }, +}; +static const struct pci_device_id icl_uncore_pci_ids[] = { + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICL_U_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICL_U2_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, { /* end: all zeroes */ }, }; @@ -598,6 +766,11 @@ static struct pci_driver skl_uncore_pci_driver = { .id_table = skl_uncore_pci_ids, }; +static struct pci_driver icl_uncore_pci_driver = { + .name = "icl_uncore", + .id_table = icl_uncore_pci_ids, +}; + struct imc_uncore_pci_dev { __u32 pci_id; struct pci_driver *driver; @@ -618,6 +791,27 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = { IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */ IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */ IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */ + IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver), /* 7th Gen Core Y */ + IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U */ + IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U Quad Core */ + IMC_DEV(KBL_SD_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Dual Core */ + IMC_DEV(KBL_SQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Quad Core */ + IMC_DEV(CFL_2U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 2 Cores */ + IMC_DEV(CFL_4U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 4 Cores */ + IMC_DEV(CFL_4H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 4 Cores */ + IMC_DEV(CFL_6H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 6 Cores */ + IMC_DEV(CFL_2S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 2 Cores Desktop */ + IMC_DEV(CFL_4S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Desktop */ + IMC_DEV(CFL_6S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Desktop */ + IMC_DEV(CFL_8S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Desktop */ + IMC_DEV(CFL_4S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Work Station */ + IMC_DEV(CFL_6S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Work Station */ + IMC_DEV(CFL_8S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Work Station */ + IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Server */ + IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Server */ + IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Server */ + IMC_DEV(ICL_U_IMC, &icl_uncore_pci_driver), /* 10th Gen Core Mobile */ + IMC_DEV(ICL_U2_IMC, &icl_uncore_pci_driver), /* 10th Gen Core Mobile */ { /* end marker */ } }; diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index c07bee31abe859c61c53c499e9aabcbe11f1f07b..8326dd5b5068ac42087c37991038a35fa5f11cd9 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -324,12 +324,114 @@ #define SKX_M2M_PCI_PMON_CTR0 0x200 #define SKX_M2M_PCI_PMON_BOX_CTL 0x258 +/* SNR Ubox */ +#define SNR_U_MSR_PMON_CTR0 0x1f98 +#define SNR_U_MSR_PMON_CTL0 0x1f91 +#define SNR_U_MSR_PMON_UCLK_FIXED_CTL 0x1f93 +#define SNR_U_MSR_PMON_UCLK_FIXED_CTR 0x1f94 + +/* SNR CHA */ +#define SNR_CHA_RAW_EVENT_MASK_EXT 0x3ffffff +#define SNR_CHA_MSR_PMON_CTL0 0x1c01 +#define SNR_CHA_MSR_PMON_CTR0 0x1c08 +#define SNR_CHA_MSR_PMON_BOX_CTL 0x1c00 +#define SNR_C0_MSR_PMON_BOX_FILTER0 0x1c05 + + +/* SNR IIO */ +#define SNR_IIO_MSR_PMON_CTL0 0x1e08 +#define SNR_IIO_MSR_PMON_CTR0 0x1e01 +#define SNR_IIO_MSR_PMON_BOX_CTL 0x1e00 +#define SNR_IIO_MSR_OFFSET 0x10 +#define SNR_IIO_PMON_RAW_EVENT_MASK_EXT 0x7ffff + +/* SNR IRP */ +#define SNR_IRP0_MSR_PMON_CTL0 0x1ea8 +#define SNR_IRP0_MSR_PMON_CTR0 0x1ea1 +#define SNR_IRP0_MSR_PMON_BOX_CTL 0x1ea0 +#define SNR_IRP_MSR_OFFSET 0x10 + +/* SNR M2PCIE */ +#define SNR_M2PCIE_MSR_PMON_CTL0 0x1e58 +#define SNR_M2PCIE_MSR_PMON_CTR0 0x1e51 +#define SNR_M2PCIE_MSR_PMON_BOX_CTL 0x1e50 +#define SNR_M2PCIE_MSR_OFFSET 0x10 + +/* SNR PCU */ +#define SNR_PCU_MSR_PMON_CTL0 0x1ef1 +#define SNR_PCU_MSR_PMON_CTR0 0x1ef8 +#define SNR_PCU_MSR_PMON_BOX_CTL 0x1ef0 +#define SNR_PCU_MSR_PMON_BOX_FILTER 0x1efc + +/* SNR M2M */ +#define SNR_M2M_PCI_PMON_CTL0 0x468 +#define SNR_M2M_PCI_PMON_CTR0 0x440 +#define SNR_M2M_PCI_PMON_BOX_CTL 0x438 +#define SNR_M2M_PCI_PMON_UMASK_EXT 0xff + +/* SNR PCIE3 */ +#define SNR_PCIE3_PCI_PMON_CTL0 0x508 +#define SNR_PCIE3_PCI_PMON_CTR0 0x4e8 +#define SNR_PCIE3_PCI_PMON_BOX_CTL 0x4e4 + +/* SNR IMC */ +#define SNR_IMC_MMIO_PMON_FIXED_CTL 0x54 +#define SNR_IMC_MMIO_PMON_FIXED_CTR 0x38 +#define SNR_IMC_MMIO_PMON_CTL0 0x40 +#define SNR_IMC_MMIO_PMON_CTR0 0x8 +#define SNR_IMC_MMIO_PMON_BOX_CTL 0x22800 +#define SNR_IMC_MMIO_OFFSET 0x4000 +#define SNR_IMC_MMIO_SIZE 0x4000 +#define SNR_IMC_MMIO_BASE_OFFSET 0xd0 +#define SNR_IMC_MMIO_BASE_MASK 0x1FFFFFFF +#define SNR_IMC_MMIO_MEM0_OFFSET 0xd8 +#define SNR_IMC_MMIO_MEM0_MASK 0x7FF + +/* ICX CHA */ +#define ICX_C34_MSR_PMON_CTR0 0xb68 +#define ICX_C34_MSR_PMON_CTL0 0xb61 +#define ICX_C34_MSR_PMON_BOX_CTL 0xb60 +#define ICX_C34_MSR_PMON_BOX_FILTER0 0xb65 + +/* ICX IIO */ +#define ICX_IIO_MSR_PMON_CTL0 0xa58 +#define ICX_IIO_MSR_PMON_CTR0 0xa51 +#define ICX_IIO_MSR_PMON_BOX_CTL 0xa50 + +/* ICX IRP */ +#define ICX_IRP0_MSR_PMON_CTL0 0xa4d +#define ICX_IRP0_MSR_PMON_CTR0 0xa4b +#define ICX_IRP0_MSR_PMON_BOX_CTL 0xa4a + +/* ICX M2PCIE */ +#define ICX_M2PCIE_MSR_PMON_CTL0 0xa46 +#define ICX_M2PCIE_MSR_PMON_CTR0 0xa41 +#define ICX_M2PCIE_MSR_PMON_BOX_CTL 0xa40 + +/* ICX UPI */ +#define ICX_UPI_PCI_PMON_CTL0 0x350 +#define ICX_UPI_PCI_PMON_CTR0 0x320 +#define ICX_UPI_PCI_PMON_BOX_CTL 0x318 +#define ICX_UPI_CTL_UMASK_EXT 0xffffff + +/* ICX M3UPI*/ +#define ICX_M3UPI_PCI_PMON_CTL0 0xd8 +#define ICX_M3UPI_PCI_PMON_CTR0 0xa8 +#define ICX_M3UPI_PCI_PMON_BOX_CTL 0xa0 + +/* ICX IMC */ +#define ICX_NUMBER_IMC_CHN 2 +#define ICX_IMC_MEM_STRIDE 0x4 + DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6"); DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21"); DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7"); DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55"); +DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57"); +DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39"); +DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55"); DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16"); DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19"); @@ -343,11 +445,14 @@ DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30"); DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51"); DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31"); DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43"); +DEFINE_UNCORE_FORMAT_ATTR(ch_mask2, ch_mask, "config:36-47"); DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46"); +DEFINE_UNCORE_FORMAT_ATTR(fc_mask2, fc_mask, "config:48-50"); DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4"); DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0"); DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5"); DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8"); +DEFINE_UNCORE_FORMAT_ATTR(filter_tid5, filter_tid, "config1:0-9"); DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5"); DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8"); DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8"); @@ -1222,6 +1327,8 @@ static struct pci_driver snbep_uncore_pci_driver = { .id_table = snbep_uncore_pci_ids, }; +#define NODE_ID_MASK 0x7 + /* * build pci bus to socket mapping */ @@ -1243,7 +1350,7 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool err = pci_read_config_dword(ubox_dev, nodeid_loc, &config); if (err) break; - nodeid = config; + nodeid = config & NODE_ID_MASK; /* get the Node ID mapping */ err = pci_read_config_dword(ubox_dev, idmap_loc, &config); if (err) @@ -3423,6 +3530,9 @@ static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *ev struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; struct extra_reg *er; int idx = 0; + /* Any of the CHA events may be filtered by Thread/Core-ID.*/ + if (event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN) + idx = SKX_CHA_MSR_PMON_BOX_FILTER_TID; for (er = skx_uncore_cha_extra_regs; er->msr; er++) { if (er->event != (event->hw.config & er->config_mask)) @@ -3490,6 +3600,7 @@ static struct event_constraint skx_uncore_iio_constraints[] = { UNCORE_EVENT_CONSTRAINT(0xc0, 0xc), UNCORE_EVENT_CONSTRAINT(0xc5, 0xc), UNCORE_EVENT_CONSTRAINT(0xd4, 0xc), + UNCORE_EVENT_CONSTRAINT(0xd5, 0xc), EVENT_CONSTRAINT_END }; @@ -3583,6 +3694,7 @@ static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = { static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = { .read_counter = uncore_msr_read_counter, + .hw_config = uncore_freerunning_hw_config, }; static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = { @@ -3965,3 +4077,1017 @@ int skx_uncore_pci_init(void) } /* end of SKX uncore support */ + +/* SNR uncore support */ + +static struct intel_uncore_type snr_uncore_ubox = { + .name = "ubox", + .num_counters = 2, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .perf_ctr = SNR_U_MSR_PMON_CTR0, + .event_ctl = SNR_U_MSR_PMON_CTL0, + .event_mask = SNBEP_PMON_RAW_EVENT_MASK, + .fixed_ctr = SNR_U_MSR_PMON_UCLK_FIXED_CTR, + .fixed_ctl = SNR_U_MSR_PMON_UCLK_FIXED_CTL, + .ops = &ivbep_uncore_msr_ops, + .format_group = &ivbep_uncore_format_group, +}; + +static struct attribute *snr_uncore_cha_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask_ext2.attr, + &format_attr_edge.attr, + &format_attr_tid_en.attr, + &format_attr_inv.attr, + &format_attr_thresh8.attr, + &format_attr_filter_tid5.attr, + NULL, +}; +static const struct attribute_group snr_uncore_chabox_format_group = { + .name = "format", + .attrs = snr_uncore_cha_formats_attr, +}; + +static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; + + reg1->reg = SNR_C0_MSR_PMON_BOX_FILTER0 + + box->pmu->type->msr_offset * box->pmu->pmu_idx; + reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID; + reg1->idx = 0; + + return 0; +} + +static void snr_cha_enable_event(struct intel_uncore_box *box, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_extra *reg1 = &hwc->extra_reg; + + if (reg1->idx != EXTRA_REG_NONE) + wrmsrl(reg1->reg, reg1->config); + + wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); +} + +static struct intel_uncore_ops snr_uncore_chabox_ops = { + .init_box = ivbep_uncore_msr_init_box, + .disable_box = snbep_uncore_msr_disable_box, + .enable_box = snbep_uncore_msr_enable_box, + .disable_event = snbep_uncore_msr_disable_event, + .enable_event = snr_cha_enable_event, + .read_counter = uncore_msr_read_counter, + .hw_config = snr_cha_hw_config, +}; + +static struct intel_uncore_type snr_uncore_chabox = { + .name = "cha", + .num_counters = 4, + .num_boxes = 6, + .perf_ctr_bits = 48, + .event_ctl = SNR_CHA_MSR_PMON_CTL0, + .perf_ctr = SNR_CHA_MSR_PMON_CTR0, + .box_ctl = SNR_CHA_MSR_PMON_BOX_CTL, + .msr_offset = HSWEP_CBO_MSR_OFFSET, + .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK, + .event_mask_ext = SNR_CHA_RAW_EVENT_MASK_EXT, + .ops = &snr_uncore_chabox_ops, + .format_group = &snr_uncore_chabox_format_group, +}; + +static struct attribute *snr_uncore_iio_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_thresh9.attr, + &format_attr_ch_mask2.attr, + &format_attr_fc_mask2.attr, + NULL, +}; + +static const struct attribute_group snr_uncore_iio_format_group = { + .name = "format", + .attrs = snr_uncore_iio_formats_attr, +}; + +static struct intel_uncore_type snr_uncore_iio = { + .name = "iio", + .num_counters = 4, + .num_boxes = 5, + .perf_ctr_bits = 48, + .event_ctl = SNR_IIO_MSR_PMON_CTL0, + .perf_ctr = SNR_IIO_MSR_PMON_CTR0, + .event_mask = SNBEP_PMON_RAW_EVENT_MASK, + .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT, + .box_ctl = SNR_IIO_MSR_PMON_BOX_CTL, + .msr_offset = SNR_IIO_MSR_OFFSET, + .ops = &ivbep_uncore_msr_ops, + .format_group = &snr_uncore_iio_format_group, +}; + +static struct intel_uncore_type snr_uncore_irp = { + .name = "irp", + .num_counters = 2, + .num_boxes = 5, + .perf_ctr_bits = 48, + .event_ctl = SNR_IRP0_MSR_PMON_CTL0, + .perf_ctr = SNR_IRP0_MSR_PMON_CTR0, + .event_mask = SNBEP_PMON_RAW_EVENT_MASK, + .box_ctl = SNR_IRP0_MSR_PMON_BOX_CTL, + .msr_offset = SNR_IRP_MSR_OFFSET, + .ops = &ivbep_uncore_msr_ops, + .format_group = &ivbep_uncore_format_group, +}; + +static struct intel_uncore_type snr_uncore_m2pcie = { + .name = "m2pcie", + .num_counters = 4, + .num_boxes = 5, + .perf_ctr_bits = 48, + .event_ctl = SNR_M2PCIE_MSR_PMON_CTL0, + .perf_ctr = SNR_M2PCIE_MSR_PMON_CTR0, + .box_ctl = SNR_M2PCIE_MSR_PMON_BOX_CTL, + .msr_offset = SNR_M2PCIE_MSR_OFFSET, + .event_mask = SNBEP_PMON_RAW_EVENT_MASK, + .ops = &ivbep_uncore_msr_ops, + .format_group = &ivbep_uncore_format_group, +}; + +static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_extra *reg1 = &hwc->extra_reg; + int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK; + + if (ev_sel >= 0xb && ev_sel <= 0xe) { + reg1->reg = SNR_PCU_MSR_PMON_BOX_FILTER; + reg1->idx = ev_sel - 0xb; + reg1->config = event->attr.config1 & (0xff << reg1->idx); + } + return 0; +} + +static struct intel_uncore_ops snr_uncore_pcu_ops = { + IVBEP_UNCORE_MSR_OPS_COMMON_INIT(), + .hw_config = snr_pcu_hw_config, + .get_constraint = snbep_pcu_get_constraint, + .put_constraint = snbep_pcu_put_constraint, +}; + +static struct intel_uncore_type snr_uncore_pcu = { + .name = "pcu", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .perf_ctr = SNR_PCU_MSR_PMON_CTR0, + .event_ctl = SNR_PCU_MSR_PMON_CTL0, + .event_mask = SNBEP_PMON_RAW_EVENT_MASK, + .box_ctl = SNR_PCU_MSR_PMON_BOX_CTL, + .num_shared_regs = 1, + .ops = &snr_uncore_pcu_ops, + .format_group = &skx_uncore_pcu_format_group, +}; + +enum perf_uncore_snr_iio_freerunning_type_id { + SNR_IIO_MSR_IOCLK, + SNR_IIO_MSR_BW_IN, + + SNR_IIO_FREERUNNING_TYPE_MAX, +}; + +static struct freerunning_counters snr_iio_freerunning[] = { + [SNR_IIO_MSR_IOCLK] = { 0x1eac, 0x1, 0x10, 1, 48 }, + [SNR_IIO_MSR_BW_IN] = { 0x1f00, 0x1, 0x10, 8, 48 }, +}; + +static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = { + /* Free-Running IIO CLOCKS Counter */ + INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"), + /* Free-Running IIO BANDWIDTH IN Counters */ + INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"), + INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"), + INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"), + INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"), + INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"), + INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"), + INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"), + INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"), + INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"), + { /* end: all zeroes */ }, +}; + +static struct intel_uncore_type snr_uncore_iio_free_running = { + .name = "iio_free_running", + .num_counters = 9, + .num_boxes = 5, + .num_freerunning_types = SNR_IIO_FREERUNNING_TYPE_MAX, + .freerunning = snr_iio_freerunning, + .ops = &skx_uncore_iio_freerunning_ops, + .event_descs = snr_uncore_iio_freerunning_events, + .format_group = &skx_uncore_iio_freerunning_format_group, +}; + +static struct intel_uncore_type *snr_msr_uncores[] = { + &snr_uncore_ubox, + &snr_uncore_chabox, + &snr_uncore_iio, + &snr_uncore_irp, + &snr_uncore_m2pcie, + &snr_uncore_pcu, + &snr_uncore_iio_free_running, + NULL, +}; + +void snr_uncore_cpu_init(void) +{ + uncore_msr_uncores = snr_msr_uncores; +} + +static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box) +{ + struct pci_dev *pdev = box->pci_dev; + int box_ctl = uncore_pci_box_ctl(box); + + __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags); + pci_write_config_dword(pdev, box_ctl, IVBEP_PMON_BOX_CTL_INT); +} + +static struct intel_uncore_ops snr_m2m_uncore_pci_ops = { + .init_box = snr_m2m_uncore_pci_init_box, + .disable_box = snbep_uncore_pci_disable_box, + .enable_box = snbep_uncore_pci_enable_box, + .disable_event = snbep_uncore_pci_disable_event, + .enable_event = snbep_uncore_pci_enable_event, + .read_counter = snbep_uncore_pci_read_counter, +}; + +static struct attribute *snr_m2m_uncore_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask_ext3.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_thresh8.attr, + NULL, +}; + +static const struct attribute_group snr_m2m_uncore_format_group = { + .name = "format", + .attrs = snr_m2m_uncore_formats_attr, +}; + +static struct intel_uncore_type snr_uncore_m2m = { + .name = "m2m", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .perf_ctr = SNR_M2M_PCI_PMON_CTR0, + .event_ctl = SNR_M2M_PCI_PMON_CTL0, + .event_mask = SNBEP_PMON_RAW_EVENT_MASK, + .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT, + .box_ctl = SNR_M2M_PCI_PMON_BOX_CTL, + .ops = &snr_m2m_uncore_pci_ops, + .format_group = &snr_m2m_uncore_format_group, +}; + +static struct intel_uncore_type snr_uncore_pcie3 = { + .name = "pcie3", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .perf_ctr = SNR_PCIE3_PCI_PMON_CTR0, + .event_ctl = SNR_PCIE3_PCI_PMON_CTL0, + .event_mask = SNBEP_PMON_RAW_EVENT_MASK, + .box_ctl = SNR_PCIE3_PCI_PMON_BOX_CTL, + .ops = &ivbep_uncore_pci_ops, + .format_group = &ivbep_uncore_format_group, +}; + +enum { + SNR_PCI_UNCORE_M2M, + SNR_PCI_UNCORE_PCIE3, +}; + +static struct intel_uncore_type *snr_pci_uncores[] = { + [SNR_PCI_UNCORE_M2M] = &snr_uncore_m2m, + [SNR_PCI_UNCORE_PCIE3] = &snr_uncore_pcie3, + NULL, +}; + +static const struct pci_device_id snr_uncore_pci_ids[] = { + { /* M2M */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0), + }, + { /* PCIe3 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0), + }, + { /* end: all zeroes */ } +}; + +static struct pci_driver snr_uncore_pci_driver = { + .name = "snr_uncore", + .id_table = snr_uncore_pci_ids, +}; + +int snr_uncore_pci_init(void) +{ + /* SNR UBOX DID */ + int ret = snbep_pci2phy_map_init(0x3460, SKX_CPUNODEID, + SKX_GIDNIDMAP, true); + + if (ret) + return ret; + + uncore_pci_uncores = snr_pci_uncores; + uncore_pci_driver = &snr_uncore_pci_driver; + return 0; +} + +static struct pci_dev *snr_uncore_get_mc_dev(int id) +{ + struct pci_dev *mc_dev = NULL; + int phys_id, pkg; + + while (1) { + mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3451, mc_dev); + if (!mc_dev) + break; + phys_id = uncore_pcibus_to_physid(mc_dev->bus); + if (phys_id < 0) + continue; + pkg = topology_phys_to_logical_pkg(phys_id); + if (pkg < 0) + continue; + else if (pkg == id) + break; + } + return mc_dev; +} + +static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box, + unsigned int box_ctl, int mem_offset) +{ + struct pci_dev *pdev = snr_uncore_get_mc_dev(box->pkgid); + resource_size_t addr; + u32 pci_dword; + + if (!pdev) + return; + + pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword); + addr = (pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23; + + pci_read_config_dword(pdev, mem_offset, &pci_dword); + addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12; + + addr += box_ctl; + + box->io_addr = ioremap(addr, SNR_IMC_MMIO_SIZE); + if (!box->io_addr) + return; + + writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr); +} + +static void snr_uncore_mmio_init_box(struct intel_uncore_box *box) +{ + __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box), + SNR_IMC_MMIO_MEM0_OFFSET); +} + +static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box) +{ + u32 config; + + if (!box->io_addr) + return; + + config = readl(box->io_addr); + config |= SNBEP_PMON_BOX_CTL_FRZ; + writel(config, box->io_addr); +} + +static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box) +{ + u32 config; + + if (!box->io_addr) + return; + + config = readl(box->io_addr); + config &= ~SNBEP_PMON_BOX_CTL_FRZ; + writel(config, box->io_addr); +} + +static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!box->io_addr) + return; + + writel(hwc->config | SNBEP_PMON_CTL_EN, + box->io_addr + hwc->config_base); +} + +static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!box->io_addr) + return; + + writel(hwc->config, box->io_addr + hwc->config_base); +} + +static struct intel_uncore_ops snr_uncore_mmio_ops = { + .init_box = snr_uncore_mmio_init_box, + .exit_box = uncore_mmio_exit_box, + .disable_box = snr_uncore_mmio_disable_box, + .enable_box = snr_uncore_mmio_enable_box, + .disable_event = snr_uncore_mmio_disable_event, + .enable_event = snr_uncore_mmio_enable_event, + .read_counter = uncore_mmio_read_counter, +}; + +static struct uncore_event_desc snr_uncore_imc_events[] = { + INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"), + INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x0f"), + INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"), + INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x30"), + INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"), + INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"), + { /* end: all zeroes */ }, +}; + +static struct intel_uncore_type snr_uncore_imc = { + .name = "imc", + .num_counters = 4, + .num_boxes = 2, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR, + .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL, + .event_descs = snr_uncore_imc_events, + .perf_ctr = SNR_IMC_MMIO_PMON_CTR0, + .event_ctl = SNR_IMC_MMIO_PMON_CTL0, + .event_mask = SNBEP_PMON_RAW_EVENT_MASK, + .box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL, + .mmio_offset = SNR_IMC_MMIO_OFFSET, + .ops = &snr_uncore_mmio_ops, + .format_group = &skx_uncore_format_group, +}; + +enum perf_uncore_snr_imc_freerunning_type_id { + SNR_IMC_DCLK, + SNR_IMC_DDR, + + SNR_IMC_FREERUNNING_TYPE_MAX, +}; + +static struct freerunning_counters snr_imc_freerunning[] = { + [SNR_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 }, + [SNR_IMC_DDR] = { 0x2290, 0x8, 0, 2, 48 }, +}; + +static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = { + INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"), + + INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"), + INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"), + INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"), + INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"), + INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"), + { /* end: all zeroes */ }, +}; + +static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = { + .init_box = snr_uncore_mmio_init_box, + .exit_box = uncore_mmio_exit_box, + .read_counter = uncore_mmio_read_counter, + .hw_config = uncore_freerunning_hw_config, +}; + +static struct intel_uncore_type snr_uncore_imc_free_running = { + .name = "imc_free_running", + .num_counters = 3, + .num_boxes = 1, + .num_freerunning_types = SNR_IMC_FREERUNNING_TYPE_MAX, + .freerunning = snr_imc_freerunning, + .ops = &snr_uncore_imc_freerunning_ops, + .event_descs = snr_uncore_imc_freerunning_events, + .format_group = &skx_uncore_iio_freerunning_format_group, +}; + +static struct intel_uncore_type *snr_mmio_uncores[] = { + &snr_uncore_imc, + &snr_uncore_imc_free_running, + NULL, +}; + +void snr_uncore_mmio_init(void) +{ + uncore_mmio_uncores = snr_mmio_uncores; +} + +/* end of SNR uncore support */ + +/* ICX uncore support */ + +static unsigned icx_cha_msr_offsets[] = { + 0x2a0, 0x2ae, 0x2bc, 0x2ca, 0x2d8, 0x2e6, 0x2f4, 0x302, 0x310, + 0x31e, 0x32c, 0x33a, 0x348, 0x356, 0x364, 0x372, 0x380, 0x38e, + 0x3aa, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f0, 0x3fe, 0x40c, 0x41a, + 0x428, 0x436, 0x444, 0x452, 0x460, 0x46e, 0x47c, 0x0, 0xe, + 0x1c, 0x2a, 0x38, 0x46, +}; + +static int icx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; + bool tie_en = !!(event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN); + + if (tie_en) { + reg1->reg = ICX_C34_MSR_PMON_BOX_FILTER0 + + icx_cha_msr_offsets[box->pmu->pmu_idx]; + reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID; + reg1->idx = 0; + } + + return 0; +} + +static struct intel_uncore_ops icx_uncore_chabox_ops = { + .init_box = ivbep_uncore_msr_init_box, + .disable_box = snbep_uncore_msr_disable_box, + .enable_box = snbep_uncore_msr_enable_box, + .disable_event = snbep_uncore_msr_disable_event, + .enable_event = snr_cha_enable_event, + .read_counter = uncore_msr_read_counter, + .hw_config = icx_cha_hw_config, +}; + +static struct intel_uncore_type icx_uncore_chabox = { + .name = "cha", + .num_counters = 4, + .perf_ctr_bits = 48, + .event_ctl = ICX_C34_MSR_PMON_CTL0, + .perf_ctr = ICX_C34_MSR_PMON_CTR0, + .box_ctl = ICX_C34_MSR_PMON_BOX_CTL, + .msr_offsets = icx_cha_msr_offsets, + .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK, + .event_mask_ext = SNR_CHA_RAW_EVENT_MASK_EXT, + .constraints = skx_uncore_chabox_constraints, + .ops = &icx_uncore_chabox_ops, + .format_group = &snr_uncore_chabox_format_group, +}; + +static unsigned icx_msr_offsets[] = { + 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0, +}; + +static struct event_constraint icx_uncore_iio_constraints[] = { + UNCORE_EVENT_CONSTRAINT(0x02, 0x3), + UNCORE_EVENT_CONSTRAINT(0x03, 0x3), + UNCORE_EVENT_CONSTRAINT(0x83, 0x3), + UNCORE_EVENT_CONSTRAINT(0xc0, 0xc), + UNCORE_EVENT_CONSTRAINT(0xc5, 0xc), + EVENT_CONSTRAINT_END +}; + +static struct intel_uncore_type icx_uncore_iio = { + .name = "iio", + .num_counters = 4, + .num_boxes = 6, + .perf_ctr_bits = 48, + .event_ctl = ICX_IIO_MSR_PMON_CTL0, + .perf_ctr = ICX_IIO_MSR_PMON_CTR0, + .event_mask = SNBEP_PMON_RAW_EVENT_MASK, + .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT, + .box_ctl = ICX_IIO_MSR_PMON_BOX_CTL, + .msr_offsets = icx_msr_offsets, + .constraints = icx_uncore_iio_constraints, + .ops = &skx_uncore_iio_ops, + .format_group = &snr_uncore_iio_format_group, +}; + +static struct intel_uncore_type icx_uncore_irp = { + .name = "irp", + .num_counters = 2, + .num_boxes = 6, + .perf_ctr_bits = 48, + .event_ctl = ICX_IRP0_MSR_PMON_CTL0, + .perf_ctr = ICX_IRP0_MSR_PMON_CTR0, + .event_mask = SNBEP_PMON_RAW_EVENT_MASK, + .box_ctl = ICX_IRP0_MSR_PMON_BOX_CTL, + .msr_offsets = icx_msr_offsets, + .ops = &ivbep_uncore_msr_ops, + .format_group = &ivbep_uncore_format_group, +}; + +static struct event_constraint icx_uncore_m2pcie_constraints[] = { + UNCORE_EVENT_CONSTRAINT(0x14, 0x3), + UNCORE_EVENT_CONSTRAINT(0x23, 0x3), + UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), + EVENT_CONSTRAINT_END +}; + +static struct intel_uncore_type icx_uncore_m2pcie = { + .name = "m2pcie", + .num_counters = 4, + .num_boxes = 6, + .perf_ctr_bits = 48, + .event_ctl = ICX_M2PCIE_MSR_PMON_CTL0, + .perf_ctr = ICX_M2PCIE_MSR_PMON_CTR0, + .box_ctl = ICX_M2PCIE_MSR_PMON_BOX_CTL, + .msr_offsets = icx_msr_offsets, + .constraints = icx_uncore_m2pcie_constraints, + .event_mask = SNBEP_PMON_RAW_EVENT_MASK, + .ops = &ivbep_uncore_msr_ops, + .format_group = &ivbep_uncore_format_group, +}; + +enum perf_uncore_icx_iio_freerunning_type_id { + ICX_IIO_MSR_IOCLK, + ICX_IIO_MSR_BW_IN, + + ICX_IIO_FREERUNNING_TYPE_MAX, +}; + +static unsigned icx_iio_clk_freerunning_box_offsets[] = { + 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0, +}; + +static unsigned icx_iio_bw_freerunning_box_offsets[] = { + 0x0, 0x10, 0x20, 0x90, 0xa0, 0xb0, +}; + +static struct freerunning_counters icx_iio_freerunning[] = { + [ICX_IIO_MSR_IOCLK] = { 0xa55, 0x1, 0x20, 1, 48, icx_iio_clk_freerunning_box_offsets }, + [ICX_IIO_MSR_BW_IN] = { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets }, +}; + +static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = { + /* Free-Running IIO CLOCKS Counter */ + INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"), + /* Free-Running IIO BANDWIDTH IN Counters */ + INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"), + INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"), + INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"), + INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"), + INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"), + INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"), + INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"), + INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"), + INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"), + { /* end: all zeroes */ }, +}; + +static struct intel_uncore_type icx_uncore_iio_free_running = { + .name = "iio_free_running", + .num_counters = 9, + .num_boxes = 6, + .num_freerunning_types = ICX_IIO_FREERUNNING_TYPE_MAX, + .freerunning = icx_iio_freerunning, + .ops = &skx_uncore_iio_freerunning_ops, + .event_descs = icx_uncore_iio_freerunning_events, + .format_group = &skx_uncore_iio_freerunning_format_group, +}; + +static struct intel_uncore_type *icx_msr_uncores[] = { + &skx_uncore_ubox, + &icx_uncore_chabox, + &icx_uncore_iio, + &icx_uncore_irp, + &icx_uncore_m2pcie, + &skx_uncore_pcu, + &icx_uncore_iio_free_running, + NULL, +}; + +/* + * To determine the number of CHAs, it should read CAPID6(Low) and CAPID7 (High) + * registers which located at Device 30, Function 3 + */ +#define ICX_CAPID6 0x9c +#define ICX_CAPID7 0xa0 + +static u64 icx_count_chabox(void) +{ + struct pci_dev *dev = NULL; + u64 caps = 0; + + dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x345b, dev); + if (!dev) + goto out; + + pci_read_config_dword(dev, ICX_CAPID6, (u32 *)&caps); + pci_read_config_dword(dev, ICX_CAPID7, (u32 *)&caps + 1); +out: + pci_dev_put(dev); + return hweight64(caps); +} + +void icx_uncore_cpu_init(void) +{ + u64 num_boxes = icx_count_chabox(); + + if (WARN_ON(num_boxes > ARRAY_SIZE(icx_cha_msr_offsets))) + return; + icx_uncore_chabox.num_boxes = num_boxes; + uncore_msr_uncores = icx_msr_uncores; +} + +static struct intel_uncore_type icx_uncore_m2m = { + .name = "m2m", + .num_counters = 4, + .num_boxes = 4, + .perf_ctr_bits = 48, + .perf_ctr = SNR_M2M_PCI_PMON_CTR0, + .event_ctl = SNR_M2M_PCI_PMON_CTL0, + .event_mask = SNBEP_PMON_RAW_EVENT_MASK, + .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT, + .box_ctl = SNR_M2M_PCI_PMON_BOX_CTL, + .ops = &snr_m2m_uncore_pci_ops, + .format_group = &snr_m2m_uncore_format_group, +}; + +static struct attribute *icx_upi_uncore_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask_ext4.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_thresh8.attr, + NULL, +}; + +static const struct attribute_group icx_upi_uncore_format_group = { + .name = "format", + .attrs = icx_upi_uncore_formats_attr, +}; + +static struct intel_uncore_type icx_uncore_upi = { + .name = "upi", + .num_counters = 4, + .num_boxes = 3, + .perf_ctr_bits = 48, + .perf_ctr = ICX_UPI_PCI_PMON_CTR0, + .event_ctl = ICX_UPI_PCI_PMON_CTL0, + .event_mask = SNBEP_PMON_RAW_EVENT_MASK, + .event_mask_ext = ICX_UPI_CTL_UMASK_EXT, + .box_ctl = ICX_UPI_PCI_PMON_BOX_CTL, + .ops = &skx_upi_uncore_pci_ops, + .format_group = &icx_upi_uncore_format_group, +}; + +static struct event_constraint icx_uncore_m3upi_constraints[] = { + UNCORE_EVENT_CONSTRAINT(0x1c, 0x1), + UNCORE_EVENT_CONSTRAINT(0x1d, 0x1), + UNCORE_EVENT_CONSTRAINT(0x1e, 0x1), + UNCORE_EVENT_CONSTRAINT(0x1f, 0x1), + UNCORE_EVENT_CONSTRAINT(0x40, 0x7), + UNCORE_EVENT_CONSTRAINT(0x4e, 0x7), + UNCORE_EVENT_CONSTRAINT(0x4f, 0x7), + UNCORE_EVENT_CONSTRAINT(0x50, 0x7), + EVENT_CONSTRAINT_END +}; + +static struct intel_uncore_type icx_uncore_m3upi = { + .name = "m3upi", + .num_counters = 4, + .num_boxes = 3, + .perf_ctr_bits = 48, + .perf_ctr = ICX_M3UPI_PCI_PMON_CTR0, + .event_ctl = ICX_M3UPI_PCI_PMON_CTL0, + .event_mask = SNBEP_PMON_RAW_EVENT_MASK, + .box_ctl = ICX_M3UPI_PCI_PMON_BOX_CTL, + .constraints = icx_uncore_m3upi_constraints, + .ops = &ivbep_uncore_pci_ops, + .format_group = &skx_uncore_format_group, +}; + +enum { + ICX_PCI_UNCORE_M2M, + ICX_PCI_UNCORE_UPI, + ICX_PCI_UNCORE_M3UPI, +}; + +static struct intel_uncore_type *icx_pci_uncores[] = { + [ICX_PCI_UNCORE_M2M] = &icx_uncore_m2m, + [ICX_PCI_UNCORE_UPI] = &icx_uncore_upi, + [ICX_PCI_UNCORE_M3UPI] = &icx_uncore_m3upi, + NULL, +}; + +static const struct pci_device_id icx_uncore_pci_ids[] = { + { /* M2M 0 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, ICX_PCI_UNCORE_M2M, 0), + }, + { /* M2M 1 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 0, ICX_PCI_UNCORE_M2M, 1), + }, + { /* M2M 2 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, ICX_PCI_UNCORE_M2M, 2), + }, + { /* M2M 3 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, ICX_PCI_UNCORE_M2M, 3), + }, + { /* UPI Link 0 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(2, 1, ICX_PCI_UNCORE_UPI, 0), + }, + { /* UPI Link 1 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(3, 1, ICX_PCI_UNCORE_UPI, 1), + }, + { /* UPI Link 2 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 1, ICX_PCI_UNCORE_UPI, 2), + }, + { /* M3UPI Link 0 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(5, 1, ICX_PCI_UNCORE_M3UPI, 0), + }, + { /* M3UPI Link 1 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(6, 1, ICX_PCI_UNCORE_M3UPI, 1), + }, + { /* M3UPI Link 2 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(7, 1, ICX_PCI_UNCORE_M3UPI, 2), + }, + { /* end: all zeroes */ } +}; + +static struct pci_driver icx_uncore_pci_driver = { + .name = "icx_uncore", + .id_table = icx_uncore_pci_ids, +}; + +int icx_uncore_pci_init(void) +{ + /* ICX UBOX DID */ + int ret = snbep_pci2phy_map_init(0x3450, SKX_CPUNODEID, + SKX_GIDNIDMAP, true); + + if (ret) + return ret; + + uncore_pci_uncores = icx_pci_uncores; + uncore_pci_driver = &icx_uncore_pci_driver; + return 0; +} + +static void icx_uncore_imc_init_box(struct intel_uncore_box *box) +{ + unsigned int box_ctl = box->pmu->type->box_ctl + + box->pmu->type->mmio_offset * (box->pmu->pmu_idx % ICX_NUMBER_IMC_CHN); + int mem_offset = (box->pmu->pmu_idx / ICX_NUMBER_IMC_CHN) * ICX_IMC_MEM_STRIDE + + SNR_IMC_MMIO_MEM0_OFFSET; + + __snr_uncore_mmio_init_box(box, box_ctl, mem_offset); +} + +static struct intel_uncore_ops icx_uncore_mmio_ops = { + .init_box = icx_uncore_imc_init_box, + .exit_box = uncore_mmio_exit_box, + .disable_box = snr_uncore_mmio_disable_box, + .enable_box = snr_uncore_mmio_enable_box, + .disable_event = snr_uncore_mmio_disable_event, + .enable_event = snr_uncore_mmio_enable_event, + .read_counter = uncore_mmio_read_counter, +}; + +static struct intel_uncore_type icx_uncore_imc = { + .name = "imc", + .num_counters = 4, + .num_boxes = 8, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR, + .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL, + .event_descs = hswep_uncore_imc_events, + .perf_ctr = SNR_IMC_MMIO_PMON_CTR0, + .event_ctl = SNR_IMC_MMIO_PMON_CTL0, + .event_mask = SNBEP_PMON_RAW_EVENT_MASK, + .box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL, + .mmio_offset = SNR_IMC_MMIO_OFFSET, + .ops = &icx_uncore_mmio_ops, + .format_group = &skx_uncore_format_group, +}; + +enum perf_uncore_icx_imc_freerunning_type_id { + ICX_IMC_DCLK, + ICX_IMC_DDR, + ICX_IMC_DDRT, + + ICX_IMC_FREERUNNING_TYPE_MAX, +}; + +static struct freerunning_counters icx_imc_freerunning[] = { + [ICX_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 }, + [ICX_IMC_DDR] = { 0x2290, 0x8, 0, 2, 48 }, + [ICX_IMC_DDRT] = { 0x22a0, 0x8, 0, 2, 48 }, +}; + +static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = { + INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"), + + INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"), + INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"), + INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"), + INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"), + INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"), + + INTEL_UNCORE_EVENT_DESC(ddrt_read, "event=0xff,umask=0x30"), + INTEL_UNCORE_EVENT_DESC(ddrt_read.scale, "6.103515625e-5"), + INTEL_UNCORE_EVENT_DESC(ddrt_read.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(ddrt_write, "event=0xff,umask=0x31"), + INTEL_UNCORE_EVENT_DESC(ddrt_write.scale, "6.103515625e-5"), + INTEL_UNCORE_EVENT_DESC(ddrt_write.unit, "MiB"), + { /* end: all zeroes */ }, +}; + +static void icx_uncore_imc_freerunning_init_box(struct intel_uncore_box *box) +{ + int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE + + SNR_IMC_MMIO_MEM0_OFFSET; + + __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box), mem_offset); +} + +static struct intel_uncore_ops icx_uncore_imc_freerunning_ops = { + .init_box = icx_uncore_imc_freerunning_init_box, + .exit_box = uncore_mmio_exit_box, + .read_counter = uncore_mmio_read_counter, + .hw_config = uncore_freerunning_hw_config, +}; + +static struct intel_uncore_type icx_uncore_imc_free_running = { + .name = "imc_free_running", + .num_counters = 5, + .num_boxes = 4, + .num_freerunning_types = ICX_IMC_FREERUNNING_TYPE_MAX, + .freerunning = icx_imc_freerunning, + .ops = &icx_uncore_imc_freerunning_ops, + .event_descs = icx_uncore_imc_freerunning_events, + .format_group = &skx_uncore_iio_freerunning_format_group, +}; + +static struct intel_uncore_type *icx_mmio_uncores[] = { + &icx_uncore_imc, + &icx_uncore_imc_free_running, + NULL, +}; + +void icx_uncore_mmio_init(void) +{ + uncore_mmio_uncores = icx_mmio_uncores; +} + +/* end of ICX uncore support */ diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c index b4771a6ddbc1b6686549ee306d3299d68a8b2e3e..ace6c1e752fb1c978a2d1d50927d53b65355f589 100644 --- a/arch/x86/events/msr.c +++ b/arch/x86/events/msr.c @@ -69,14 +69,14 @@ static bool test_intel(int idx) case INTEL_FAM6_BROADWELL_GT3E: case INTEL_FAM6_BROADWELL_X: - case INTEL_FAM6_ATOM_SILVERMONT1: - case INTEL_FAM6_ATOM_SILVERMONT2: + case INTEL_FAM6_ATOM_SILVERMONT: + case INTEL_FAM6_ATOM_SILVERMONT_X: case INTEL_FAM6_ATOM_AIRMONT: case INTEL_FAM6_ATOM_GOLDMONT: - case INTEL_FAM6_ATOM_DENVERTON: + case INTEL_FAM6_ATOM_GOLDMONT_X: - case INTEL_FAM6_ATOM_GEMINI_LAKE: + case INTEL_FAM6_ATOM_GOLDMONT_PLUS: case INTEL_FAM6_XEON_PHI_KNL: case INTEL_FAM6_XEON_PHI_KNM: @@ -89,6 +89,7 @@ static bool test_intel(int idx) case INTEL_FAM6_SKYLAKE_X: case INTEL_FAM6_KABYLAKE_MOBILE: case INTEL_FAM6_KABYLAKE_DESKTOP: + case INTEL_FAM6_ICELAKE_MOBILE: if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF) return true; break; diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 156286335351a43b6692ab07747e97e1fb97b3b2..db719665e1472648f9bc7d6a3f72edc95336894c 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -49,12 +49,19 @@ struct event_constraint { unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; u64 idxmsk64; }; - u64 code; - u64 cmask; - int weight; - int overlap; - int flags; + u64 code; + u64 cmask; + int weight; + int overlap; + int flags; + unsigned int size; }; + +static inline bool constraint_match(struct event_constraint *c, u64 ecode) +{ + return ((ecode & c->cmask) - c->code) <= (u64)c->size; +} + /* * struct hw_perf_event.flags flags */ @@ -70,7 +77,34 @@ struct event_constraint { #define PERF_X86_EVENT_EXCL_ACCT 0x0200 /* accounted EXCL event */ #define PERF_X86_EVENT_AUTO_RELOAD 0x0400 /* use PEBS auto-reload */ #define PERF_X86_EVENT_LARGE_PEBS 0x0800 /* use large PEBS */ +#define PERF_X86_EVENT_PAIR 0x1000 /* Large Increment per Cycle */ +#define PERF_X86_EVENT_LBR_SELECT 0x2000 /* Save/Restore MSR_LBR_SELECT */ + +#define PERF_X86_EVENT_TOPDOWN 0x4000 /* Count Topdown slots/metrics events */ +static inline bool is_topdown_count(struct perf_event *event) +{ + return event->hw.flags & PERF_X86_EVENT_TOPDOWN; +} + +static inline bool is_metric_event(struct perf_event *event) +{ + u64 config = event->attr.config; + + return ((config & ARCH_PERFMON_EVENTSEL_EVENT) == 0) && + ((config & INTEL_ARCH_EVENT_MASK) >= INTEL_TD_METRIC_RETIRING) && + ((config & INTEL_ARCH_EVENT_MASK) <= INTEL_TD_METRIC_MAX); +} + +static inline bool is_slots_event(struct perf_event *event) +{ + return (event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_TD_SLOTS; +} + +static inline bool is_topdown_event(struct perf_event *event) +{ + return is_metric_event(event) || is_slots_event(event); +} struct amd_nb { int nb_id; /* NorthBridge id */ @@ -96,25 +130,25 @@ struct amd_nb { PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER | \ PERF_SAMPLE_PERIOD) -#define PEBS_REGS \ - (PERF_REG_X86_AX | \ - PERF_REG_X86_BX | \ - PERF_REG_X86_CX | \ - PERF_REG_X86_DX | \ - PERF_REG_X86_DI | \ - PERF_REG_X86_SI | \ - PERF_REG_X86_SP | \ - PERF_REG_X86_BP | \ - PERF_REG_X86_IP | \ - PERF_REG_X86_FLAGS | \ - PERF_REG_X86_R8 | \ - PERF_REG_X86_R9 | \ - PERF_REG_X86_R10 | \ - PERF_REG_X86_R11 | \ - PERF_REG_X86_R12 | \ - PERF_REG_X86_R13 | \ - PERF_REG_X86_R14 | \ - PERF_REG_X86_R15) +#define PEBS_GP_REGS \ + ((1ULL << PERF_REG_X86_AX) | \ + (1ULL << PERF_REG_X86_BX) | \ + (1ULL << PERF_REG_X86_CX) | \ + (1ULL << PERF_REG_X86_DX) | \ + (1ULL << PERF_REG_X86_DI) | \ + (1ULL << PERF_REG_X86_SI) | \ + (1ULL << PERF_REG_X86_SP) | \ + (1ULL << PERF_REG_X86_BP) | \ + (1ULL << PERF_REG_X86_IP) | \ + (1ULL << PERF_REG_X86_FLAGS) | \ + (1ULL << PERF_REG_X86_R8) | \ + (1ULL << PERF_REG_X86_R9) | \ + (1ULL << PERF_REG_X86_R10) | \ + (1ULL << PERF_REG_X86_R11) | \ + (1ULL << PERF_REG_X86_R12) | \ + (1ULL << PERF_REG_X86_R13) | \ + (1ULL << PERF_REG_X86_R14) | \ + (1ULL << PERF_REG_X86_R15)) /* * Per register state. @@ -186,6 +220,7 @@ struct cpu_hw_events { they've never been enabled yet */ int n_txn; /* the # last events in the below arrays; added in the current transaction */ + int n_txn_metric; int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ u64 tags[X86_PMC_IDX_MAX]; @@ -207,16 +242,23 @@ struct cpu_hw_events { int n_pebs; int n_large_pebs; + /* Current super set of events hardware configuration */ + u64 pebs_data_cfg; + u64 active_pebs_data_cfg; + int pebs_record_size; + /* * Intel LBR bits */ int lbr_users; + int lbr_pebs_users; struct perf_branch_stack lbr_stack; struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; struct er_account *lbr_sel; u64 br_sel; struct x86_perf_task_context *last_task_ctx; int last_log_id; + int lbr_select; /* * Intel host/guest exclude bits @@ -242,28 +284,51 @@ struct cpu_hw_events { struct intel_excl_cntrs *excl_cntrs; int excl_thread_id; /* 0 or 1 */ + /* + * SKL TSX_FORCE_ABORT shadow + */ + u64 tfa_shadow; + + /* + * Perf Metrics + */ + /* number of accepted metrics events */ + int n_metric; + /* * AMD specific bits */ struct amd_nb *amd_nb; /* Inverted mask of bits to clear in the perf_ctr ctrl registers */ u64 perf_ctr_virt_mask; + int n_pair; /* Large increment events */ void *kfree_on_online[X86_PERF_KFREE_MAX]; }; -#define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\ +#define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) { \ { .idxmsk64 = (n) }, \ .code = (c), \ + .size = (e) - (c), \ .cmask = (m), \ .weight = (w), \ .overlap = (o), \ .flags = f, \ } +#define __EVENT_CONSTRAINT(c, n, m, w, o, f) \ + __EVENT_CONSTRAINT_RANGE(c, c, n, m, w, o, f) + #define EVENT_CONSTRAINT(c, n, m) \ __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0) +/* + * The constraint_match() function only works for 'simple' event codes + * and not for extended (AMD64_EVENTSEL_EVENT) events codes. + */ +#define EVENT_CONSTRAINT_RANGE(c, e, n, m) \ + __EVENT_CONSTRAINT_RANGE(c, e, n, m, HWEIGHT(n), 0, 0) + #define INTEL_EXCLEVT_CONSTRAINT(c, n) \ __EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\ 0, PERF_X86_EVENT_EXCL) @@ -298,6 +363,12 @@ struct cpu_hw_events { #define INTEL_EVENT_CONSTRAINT(c, n) \ EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT) +/* + * Constraint on a range of Event codes + */ +#define INTEL_EVENT_CONSTRAINT_RANGE(c, e, n) \ + EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT) + /* * Constraint on the Event code + UMask + fixed-mask * @@ -315,6 +386,19 @@ struct cpu_hw_events { #define FIXED_EVENT_CONSTRAINT(c, n) \ EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS) +/* + * The special metric counters do not actually exist. They are calculated from + * the combination of the FxCtr3 + MSR_PERF_METRICS. + * + * The special metric counters are mapped to a dummy offset for the scheduler. + * The sharing between multiple users of the same metric without multiplexing + * is not allowed, even though the hardware supports that in principle. + */ + +#define METRIC_EVENT_CONSTRAINT(c, n) \ + EVENT_CONSTRAINT(c, (1ULL << (INTEL_PMC_IDX_METRIC_BASE + n)), \ + INTEL_ARCH_EVENT_MASK) + /* * Constraint on the Event code + UMask */ @@ -345,6 +429,9 @@ struct cpu_hw_events { #define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \ EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS) +#define INTEL_FLAGS_EVENT_CONSTRAINT_RANGE(c, e, n) \ + EVENT_CONSTRAINT_RANGE(c, e, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS) + /* Check only flags, but allow all event/umask */ #define INTEL_ALL_EVENT_CONSTRAINT(code, n) \ EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS) @@ -361,6 +448,11 @@ struct cpu_hw_events { ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) +#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(code, end, n) \ + __EVENT_CONSTRAINT_RANGE(code, end, n, \ + ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ + HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) + #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \ __EVENT_CONSTRAINT(code, n, \ ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ @@ -468,6 +560,8 @@ union perf_capabilities { * values > 32bit. */ u64 full_width_write:1; + u64 pebs_baseline:1; + u64 perf_metrics:1; }; u64 capabilities; }; @@ -560,23 +654,23 @@ struct x86_pmu { struct event_constraint *event_constraints; struct x86_pmu_quirk *quirks; int perfctr_second_write; - bool late_ack; u64 (*limit_period)(struct perf_event *event, u64 l); + /* PMI handler bits */ + unsigned int late_ack :1, + enabled_ack :1, + counter_freezing :1; /* * sysfs attrs */ int attr_rdpmc_broken; int attr_rdpmc; struct attribute **format_attrs; - struct attribute **event_attrs; - struct attribute **caps_attrs; ssize_t (*events_sysfs_show)(char *page, u64 config); - struct attribute **cpu_events; + const struct attribute_group **attr_update; unsigned long attr_freeze_on_smi; - struct attribute **attrs; /* * CPU Hotplug hooks @@ -599,26 +693,27 @@ struct x86_pmu { /* * Intel DebugStore bits */ - unsigned int bts :1, - bts_active :1, - pebs :1, - pebs_active :1, - pebs_broken :1, - pebs_prec_dist :1, - pebs_no_tlb :1; + unsigned int bts :1, + bts_active :1, + pebs :1, + pebs_active :1, + pebs_broken :1, + pebs_prec_dist :1, + pebs_no_tlb :1; int pebs_record_size; int pebs_buffer_size; + int max_pebs_events; void (*drain_pebs)(struct pt_regs *regs); struct event_constraint *pebs_constraints; void (*pebs_aliases)(struct perf_event *event); - int max_pebs_events; unsigned long large_pebs_flags; + u64 rtm_abort_event; /* * Intel LBR */ - unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */ - int lbr_nr; /* hardware stack size */ + unsigned int lbr_tos, lbr_from, lbr_to, + lbr_nr; /* LBR base regs and size */ u64 lbr_sel_mask; /* LBR_SELECT valid bits */ const int *lbr_sel_map; /* lbr_select mappings */ bool lbr_double_abort; /* duplicated lbr aborts */ @@ -629,10 +724,17 @@ struct x86_pmu { */ atomic_t lbr_exclusive[x86_lbr_exclusive_max]; + /* + * Intel perf metrics + */ + u64 (*update_topdown_event)(struct perf_event *event); + int (*set_topdown_event_period)(struct perf_event *event); + /* * AMD bits */ unsigned int amd_nb_constraints : 1; + u64 perf_ctr_pair_en; /* * Extra registers for events @@ -644,12 +746,18 @@ struct x86_pmu { * Intel host/guest support (KVM) */ struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); + + /* + * Check period value for PERF_EVENT_IOC_PERIOD ioctl. + */ + int (*check_period) (struct perf_event *event, u64 period); }; struct x86_perf_task_context { u64 lbr_from[MAX_LBR_ENTRIES]; u64 lbr_to[MAX_LBR_ENTRIES]; u64 lbr_info[MAX_LBR_ENTRIES]; + u64 lbr_sel; int tos; int valid_lbrs; int lbr_callstack_users; @@ -674,6 +782,8 @@ do { \ #define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */ #define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */ #define PMU_FL_PEBS_ALL 0x10 /* all events are valid PEBS events */ +#define PMU_FL_TFA 0x20 /* deal with TSX force abort */ +#define PMU_FL_PAIR 0x40 /* merge counters for large incr. events */ #define EVENT_VAR(_id) event_attr_##_id #define EVENT_PTR(_id) &event_attr_##_id.attr.attr @@ -700,6 +810,7 @@ static struct perf_pmu_events_ht_attr event_attr_##v = { \ .event_str_ht = ht, \ } +struct pmu *x86_get_pmu(void); extern struct x86_pmu x86_pmu __read_mostly; static inline bool x86_pmu_has_lbr_callstack(void) @@ -768,6 +879,11 @@ int x86_pmu_hw_config(struct perf_event *event); void x86_pmu_disable_all(void); +static inline bool is_counter_pair(struct hw_perf_event *hwc) +{ + return hwc->flags & PERF_X86_EVENT_PAIR; +} + static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, u64 enable_mask) { @@ -775,6 +891,14 @@ static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, if (hwc->extra_reg.reg) wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); + + /* + * Add enabled Merge event on next counter + * if large increment event being enabled on this counter + */ + if (is_counter_pair(hwc)) + wrmsrl(x86_pmu_config_addr(hwc->idx + 1), x86_pmu.perf_ctr_pair_en); + wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask); } @@ -788,9 +912,13 @@ void x86_pmu_stop(struct perf_event *event, int flags); static inline void x86_pmu_disable_event(struct perf_event *event) { + u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask); struct hw_perf_event *hwc = &event->hw; - wrmsrl(hwc->config_base, hwc->config); + wrmsrl(hwc->config_base, hwc->config & ~disable_mask); + + if (is_counter_pair(hwc)) + wrmsrl(x86_pmu_config_addr(hwc->idx + 1), 0); } void x86_pmu_enable_event(struct perf_event *event); @@ -833,8 +961,6 @@ static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip) ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event); ssize_t intel_event_sysfs_show(char *page, u64 config); -struct attribute **merge_attr(struct attribute **a, struct attribute **b); - ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, char *page); ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr, @@ -855,13 +981,25 @@ static inline int amd_pmu_init(void) #ifdef CONFIG_CPU_SUP_INTEL +static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period) +{ + struct hw_perf_event *hwc = &event->hw; + unsigned int hw_event, bts_event; + + if (event->attr.freq) + return false; + + hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; + bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); + + return hw_event == bts_event && period == 1; +} + static inline bool intel_pmu_has_bts(struct perf_event *event) { - if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS && - !event->attr.freq && event->hw.sample_period == 1) - return true; + struct hw_perf_event *hwc = &event->hw; - return false; + return intel_pmu_has_bts_period(event, hwc->sample_period); } int intel_pmu_save_and_restart(struct perf_event *event); @@ -870,7 +1008,8 @@ struct event_constraint * x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx, struct perf_event *event); -struct intel_shared_regs *allocate_shared_regs(int cpu); +extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu); +extern void intel_cpuc_finish(struct cpu_hw_events *cpuc); int intel_pmu_init(void); @@ -883,6 +1022,7 @@ void release_ds_buffers(void); void reserve_ds_buffers(void); extern struct event_constraint bts_constraint; +extern struct event_constraint vlbr_constraint; void intel_pmu_enable_bts(u64 config); @@ -914,6 +1054,8 @@ extern struct event_constraint intel_bdw_pebs_event_constraints[]; extern struct event_constraint intel_skl_pebs_event_constraints[]; +extern struct event_constraint intel_icl_pebs_event_constraints[]; + struct event_constraint *intel_pebs_constraints(struct perf_event *event); void intel_pmu_pebs_add(struct perf_event *event); @@ -932,6 +1074,8 @@ void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in); void intel_pmu_auto_reload_read(struct perf_event *event); +void intel_pmu_store_pebs_lbrs(struct pebs_lbr *lbr); + void intel_ds_init(void); void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in); @@ -1006,9 +1150,13 @@ static inline int intel_pmu_init(void) return 0; } -static inline struct intel_shared_regs *allocate_shared_regs(int cpu) +static inline int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu) +{ + return 0; +} + +static inline void intel_cpuc_finish(struct cpu_hw_events *cpuc) { - return NULL; } static inline int is_ht_workaround_enabled(void) @@ -1016,3 +1164,12 @@ static inline int is_ht_workaround_enabled(void) return 0; } #endif /* CONFIG_CPU_SUP_INTEL */ + +#if ((defined CONFIG_CPU_SUP_CENTAUR) || (defined CONFIG_CPU_ZHAOXIN)) +int zhaoxin_pmu_init(void); +#else +static inline int zhaoxin_pmu_init(void) +{ + return 0; +} +#endif /*CONFIG_CPU_SUP_CENTAUR or CONFIG_CPU_SUP_ZHAOXIN*/ diff --git a/arch/x86/events/zhaoxin/Makefile b/arch/x86/events/zhaoxin/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..642c1174d662650192e31c213ffb48e29a19951c --- /dev/null +++ b/arch/x86/events/zhaoxin/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-y += core.o diff --git a/arch/x86/events/zhaoxin/core.c b/arch/x86/events/zhaoxin/core.c new file mode 100644 index 0000000000000000000000000000000000000000..e4e7a3373d0fb94f9f7b6e70af916ad806b96437 --- /dev/null +++ b/arch/x86/events/zhaoxin/core.c @@ -0,0 +1,627 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Zhaoxin PMU; like Intel Architectural PerfMon-v2 + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "../perf_event.h" + +/* + * Zhaoxin PerfMon, used on zxc and later. + */ +static u64 zx_pmon_event_map[PERF_COUNT_HW_MAX] __read_mostly = { + + [PERF_COUNT_HW_CPU_CYCLES] = 0x0082, + [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, + [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0515, + [PERF_COUNT_HW_CACHE_MISSES] = 0x051a, + [PERF_COUNT_HW_BUS_CYCLES] = 0x0083, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0028, + [PERF_COUNT_HW_BRANCH_MISSES] = 0x0029, +}; + +static struct event_constraint zxc_event_constraints[] __read_mostly = { + + FIXED_EVENT_CONSTRAINT(0x0082, 1), /* unhalted core clock cycles */ + EVENT_CONSTRAINT_END +}; + +static struct event_constraint zxd_event_constraints[] __read_mostly = { + + FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* retired instructions */ + FIXED_EVENT_CONSTRAINT(0x0082, 1), /* unhalted core clock cycles */ + FIXED_EVENT_CONSTRAINT(0x0083, 2), /* unhalted bus clock cycles */ + EVENT_CONSTRAINT_END +}; + +static __initconst const u64 zxd_hw_cache_event_ids + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { +[C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x0042, + [C(RESULT_MISS)] = 0x0538, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = 0x0043, + [C(RESULT_MISS)] = 0x0562, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, +}, +[C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x0300, + [C(RESULT_MISS)] = 0x0301, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = 0x030a, + [C(RESULT_MISS)] = 0x030b, + }, +}, +[C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, +}, +[C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x0042, + [C(RESULT_MISS)] = 0x052c, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = 0x0043, + [C(RESULT_MISS)] = 0x0530, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = 0x0564, + [C(RESULT_MISS)] = 0x0565, + }, +}, +[C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x00c0, + [C(RESULT_MISS)] = 0x0534, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, +}, +[C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x0700, + [C(RESULT_MISS)] = 0x0709, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, +}, +[C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, +}, +}; + +static __initconst const u64 zxe_hw_cache_event_ids + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { +[C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x0568, + [C(RESULT_MISS)] = 0x054b, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = 0x0669, + [C(RESULT_MISS)] = 0x0562, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, +}, +[C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x0300, + [C(RESULT_MISS)] = 0x0301, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = 0x030a, + [C(RESULT_MISS)] = 0x030b, + }, +}, +[C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x0, + [C(RESULT_MISS)] = 0x0, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = 0x0, + [C(RESULT_MISS)] = 0x0, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = 0x0, + [C(RESULT_MISS)] = 0x0, + }, +}, +[C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x0568, + [C(RESULT_MISS)] = 0x052c, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = 0x0669, + [C(RESULT_MISS)] = 0x0530, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = 0x0564, + [C(RESULT_MISS)] = 0x0565, + }, +}, +[C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x00c0, + [C(RESULT_MISS)] = 0x0534, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, +}, +[C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x0028, + [C(RESULT_MISS)] = 0x0029, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, +}, +[C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, +}, +}; + +static void zhaoxin_pmu_disable_all(void) +{ + wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); +} + +static void zhaoxin_pmu_enable_all(int added) +{ + wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); +} + +static inline u64 zhaoxin_pmu_get_status(void) +{ + u64 status; + + rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); + + return status; +} + +static inline void zhaoxin_pmu_ack_status(u64 ack) +{ + wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); +} + +static inline void zxc_pmu_ack_status(u64 ack) +{ + /* + * ZXC needs global control enabled in order to clear status bits. + */ + zhaoxin_pmu_enable_all(0); + zhaoxin_pmu_ack_status(ack); + zhaoxin_pmu_disable_all(); +} + +static void zhaoxin_pmu_disable_fixed(struct hw_perf_event *hwc) +{ + int idx = hwc->idx - INTEL_PMC_IDX_FIXED; + u64 ctrl_val, mask; + + mask = 0xfULL << (idx * 4); + + rdmsrl(hwc->config_base, ctrl_val); + ctrl_val &= ~mask; + wrmsrl(hwc->config_base, ctrl_val); +} + +static void zhaoxin_pmu_disable_event(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { + zhaoxin_pmu_disable_fixed(hwc); + return; + } + + x86_pmu_disable_event(event); +} + +static void zhaoxin_pmu_enable_fixed(struct hw_perf_event *hwc) +{ + int idx = hwc->idx - INTEL_PMC_IDX_FIXED; + u64 ctrl_val, bits, mask; + + /* + * Enable IRQ generation (0x8), + * and enable ring-3 counting (0x2) and ring-0 counting (0x1) + * if requested: + */ + bits = 0x8ULL; + if (hwc->config & ARCH_PERFMON_EVENTSEL_USR) + bits |= 0x2; + if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) + bits |= 0x1; + + bits <<= (idx * 4); + mask = 0xfULL << (idx * 4); + + rdmsrl(hwc->config_base, ctrl_val); + ctrl_val &= ~mask; + ctrl_val |= bits; + wrmsrl(hwc->config_base, ctrl_val); +} + +static void zhaoxin_pmu_enable_event(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { + zhaoxin_pmu_enable_fixed(hwc); + return; + } + + __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); +} + +/* + * This handler is triggered by the local APIC, so the APIC IRQ handling + * rules apply: + */ +static int zhaoxin_pmu_handle_irq(struct pt_regs *regs) +{ + struct perf_sample_data data; + struct cpu_hw_events *cpuc; + int handled = 0; + u64 status; + int bit; + + cpuc = this_cpu_ptr(&cpu_hw_events); + apic_write(APIC_LVTPC, APIC_DM_NMI); + zhaoxin_pmu_disable_all(); + status = zhaoxin_pmu_get_status(); + if (!status) + goto done; + +again: + if (x86_pmu.enabled_ack) + zxc_pmu_ack_status(status); + else + zhaoxin_pmu_ack_status(status); + + inc_irq_stat(apic_perf_irqs); + + /* + * CondChgd bit 63 doesn't mean any overflow status. Ignore + * and clear the bit. + */ + if (__test_and_clear_bit(63, (unsigned long *)&status)) { + if (!status) + goto done; + } + + for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { + struct perf_event *event = cpuc->events[bit]; + + handled++; + + if (!test_bit(bit, cpuc->active_mask)) + continue; + + x86_perf_event_update(event); + perf_sample_data_init(&data, 0, event->hw.last_period); + + if (!x86_perf_event_set_period(event)) + continue; + + if (perf_event_overflow(event, &data, regs)) + x86_pmu_stop(event, 0); + } + + /* + * Repeat if there is more work to be done: + */ + status = zhaoxin_pmu_get_status(); + if (status) + goto again; + +done: + zhaoxin_pmu_enable_all(0); + return handled; +} + +static u64 zhaoxin_pmu_event_map(int hw_event) +{ + return zx_pmon_event_map[hw_event]; +} + +static struct event_constraint * +zhaoxin_get_event_constraints(struct cpu_hw_events *cpuc, int idx, + struct perf_event *event) +{ + struct event_constraint *c; + + if (x86_pmu.event_constraints) { + for_each_event_constraint(c, x86_pmu.event_constraints) { + if ((event->hw.config & c->cmask) == c->code) + return c; + } + } + + return &unconstrained; +} + +PMU_FORMAT_ATTR(event, "config:0-7"); +PMU_FORMAT_ATTR(umask, "config:8-15"); +PMU_FORMAT_ATTR(edge, "config:18"); +PMU_FORMAT_ATTR(inv, "config:23"); +PMU_FORMAT_ATTR(cmask, "config:24-31"); + +static struct attribute *zx_arch_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_cmask.attr, + NULL, +}; + +static ssize_t zhaoxin_event_sysfs_show(char *page, u64 config) +{ + u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT); + + return x86_event_sysfs_show(page, config, event); +} + +static const struct x86_pmu zhaoxin_pmu __initconst = { + .name = "", + .handle_irq = zhaoxin_pmu_handle_irq, + .disable_all = zhaoxin_pmu_disable_all, + .enable_all = zhaoxin_pmu_enable_all, + .enable = zhaoxin_pmu_enable_event, + .disable = zhaoxin_pmu_disable_event, + .hw_config = x86_pmu_hw_config, + .schedule_events = x86_schedule_events, + .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, + .perfctr = MSR_ARCH_PERFMON_PERFCTR0, + .event_map = zhaoxin_pmu_event_map, + .max_events = ARRAY_SIZE(zx_pmon_event_map), + .apic = 1, + /* + * For zxd/zxe, read/write operation for PMCx MSR is 48 bits. + */ + .max_period = (1ULL << 47) - 1, + .get_event_constraints = zhaoxin_get_event_constraints, + + .format_attrs = zx_arch_formats_attr, + .events_sysfs_show = zhaoxin_event_sysfs_show, +}; + +static const struct { int id; char *name; } zx_arch_events_map[] __initconst = { + { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" }, + { PERF_COUNT_HW_INSTRUCTIONS, "instructions" }, + { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" }, + { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" }, + { PERF_COUNT_HW_CACHE_MISSES, "cache misses" }, + { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" }, + { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" }, +}; + +static __init void zhaoxin_arch_events_quirk(void) +{ + int bit; + + /* disable event that reported as not presend by cpuid */ + for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(zx_arch_events_map)) { + zx_pmon_event_map[zx_arch_events_map[bit].id] = 0; + pr_warn("CPUID marked event: \'%s\' unavailable\n", + zx_arch_events_map[bit].name); + } +} + +__init int zhaoxin_pmu_init(void) +{ + union cpuid10_edx edx; + union cpuid10_eax eax; + union cpuid10_ebx ebx; + struct event_constraint *c; + unsigned int unused; + int version; + + pr_info("Welcome to pmu!\n"); + + /* + * Check whether the Architectural PerfMon supports + * hw_event or not. + */ + cpuid(10, &eax.full, &ebx.full, &unused, &edx.full); + + if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT - 1) + return -ENODEV; + + version = eax.split.version_id; + if (version != 2) + return -ENODEV; + + x86_pmu = zhaoxin_pmu; + pr_info("Version check pass!\n"); + + x86_pmu.version = version; + x86_pmu.num_counters = eax.split.num_counters; + x86_pmu.cntval_bits = eax.split.bit_width; + x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1; + x86_pmu.events_maskl = ebx.full; + x86_pmu.events_mask_len = eax.split.mask_length; + + x86_pmu.num_counters_fixed = edx.split.num_counters_fixed; + x86_add_quirk(zhaoxin_arch_events_quirk); + + switch (boot_cpu_data.x86) { + case 0x06: + if (boot_cpu_data.x86_model == 0x0f || boot_cpu_data.x86_model == 0x19) { + + x86_pmu.max_period = x86_pmu.cntval_mask >> 1; + + /* Clearing status works only if the global control is enable on zxc. */ + x86_pmu.enabled_ack = 1; + + x86_pmu.event_constraints = zxc_event_constraints; + zx_pmon_event_map[PERF_COUNT_HW_INSTRUCTIONS] = 0; + zx_pmon_event_map[PERF_COUNT_HW_CACHE_REFERENCES] = 0; + zx_pmon_event_map[PERF_COUNT_HW_CACHE_MISSES] = 0; + zx_pmon_event_map[PERF_COUNT_HW_BUS_CYCLES] = 0; + zx_pmon_event_map[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0; + zx_pmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0; + + pr_cont("ZXC events, "); + break; + } + return -ENODEV; + + case 0x07: + zx_pmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = + X86_CONFIG(.event = 0x01, .umask = 0x01, .inv = 0x01, .cmask = 0x01); + + zx_pmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = + X86_CONFIG(.event = 0x0f, .umask = 0x04, .inv = 0, .cmask = 0); + + switch (boot_cpu_data.x86_model) { + case 0x1b: + memcpy(hw_cache_event_ids, zxd_hw_cache_event_ids, + sizeof(hw_cache_event_ids)); + + x86_pmu.event_constraints = zxd_event_constraints; + + zx_pmon_event_map[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0700; + zx_pmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x0709; + + pr_cont("ZXD events, "); + break; + case 0x3b: + memcpy(hw_cache_event_ids, zxe_hw_cache_event_ids, + sizeof(hw_cache_event_ids)); + + x86_pmu.event_constraints = zxd_event_constraints; + + pr_cont("ZXE events, "); + break; + case 0x5b: + zx_pmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = + X86_CONFIG(.event = 0x02, .umask = 0x01, .inv = 0x01, .cmask = 0x01); + + memcpy(hw_cache_event_ids, zxe_hw_cache_event_ids, + sizeof(hw_cache_event_ids)); + + x86_pmu.event_constraints = zxd_event_constraints; + zx_pmon_event_map[PERF_COUNT_HW_CACHE_REFERENCES] = 0x051a; + zx_pmon_event_map[PERF_COUNT_HW_CACHE_MISSES] = 0; + + pr_cont("CNX events, "); + break; + default: + return -ENODEV; + } + break; + + default: + return -ENODEV; + } + + x86_pmu.intel_ctrl = (1 << (x86_pmu.num_counters)) - 1; + x86_pmu.intel_ctrl |= ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED; + + if (x86_pmu.event_constraints) { + for_each_event_constraint(c, x86_pmu.event_constraints) { + c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1; + c->weight += x86_pmu.num_counters; + } + } + + return 0; +} + diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index 20c876c7c5bf08ac6ea5aad0f956daae557339f2..fda61a4dc22982530af30f0e63cb4f5774b5a7c4 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c @@ -17,6 +17,7 @@ * */ +#include #include #include #include @@ -95,15 +96,20 @@ void __percpu **hyperv_pcpu_input_arg; EXPORT_SYMBOL_GPL(hyperv_pcpu_input_arg); u32 hv_max_vp_index; +EXPORT_SYMBOL_GPL(hv_max_vp_index); static int hv_cpu_init(unsigned int cpu) { u64 msr_vp_index; struct hv_vp_assist_page **hvp = &hv_vp_assist_page[smp_processor_id()]; void **input_arg; + struct page *pg; input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg); - *input_arg = page_address(alloc_page(GFP_KERNEL)); + pg = alloc_page(GFP_KERNEL); + if (unlikely(!pg)) + return -ENOMEM; + *input_arg = page_address(pg); hv_get_vp_index(msr_vp_index); @@ -195,6 +201,9 @@ void set_hv_tscchange_cb(void (*cb)(void)) return; } + if (!hv_vp_index) + return; + hv_reenlightenment_cb = cb; /* Make sure callback is registered before we write to MSRs */ @@ -253,6 +262,22 @@ static int hv_cpu_die(unsigned int cpu) return 0; } +static int __init hv_pci_init(void) +{ + int gen2vm = efi_enabled(EFI_BOOT); + + /* + * For Generation-2 VM, we exit from pci_arch_init() by returning 0. + * The purpose is to suppress the harmless warning: + * "PCI: Fatal: No config space access function found" + */ + if (gen2vm) + return 0; + + /* For Generation-1 VM, we'll proceed in pci_arch_init(). */ + return 1; +} + /* * This function is to be invoked early in the boot sequence after the * hypervisor has been detected. @@ -329,6 +354,8 @@ void __init hyperv_init(void) hv_apic_init(); + x86_init.pci.arch_init = hv_pci_init; + /* * Register Hyper-V specific clocksource. */ @@ -387,6 +414,13 @@ void hyperv_cleanup(void) /* Reset our OS id */ wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0); + /* + * Reset hypercall page reference before reset the page, + * let hypercall operations fail safely rather than + * panic the kernel for using invalid hypercall page + */ + hv_hypercall_pg = NULL; + /* Reset the hypercall page */ hypercall_msr.as_uint64 = 0; wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c index ef5f29f913d7b064f1a086ac0674bebb4c8a845a..2f34d52753526bf30a6457c93c94f09fe334c7f0 100644 --- a/arch/x86/hyperv/mmu.c +++ b/arch/x86/hyperv/mmu.c @@ -37,12 +37,14 @@ static inline int fill_gva_list(u64 gva_list[], int offset, * Lower 12 bits encode the number of additional * pages to flush (in addition to the 'cur' page). */ - if (diff >= HV_TLB_FLUSH_UNIT) + if (diff >= HV_TLB_FLUSH_UNIT) { gva_list[gva_n] |= ~PAGE_MASK; - else if (diff) + cur += HV_TLB_FLUSH_UNIT; + } else if (diff) { gva_list[gva_n] |= (diff - 1) >> PAGE_SHIFT; + cur = end; + } - cur += HV_TLB_FLUSH_UNIT; gva_n++; } while (cur < end); diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index 8e02b30cf08e16a2ca5b3d0b6aa97bda051a2c85..8d78ea00d75fed20b39d530cc3442101e8e2a1cf 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c @@ -51,7 +51,7 @@ static unsigned long get_dr(int n) /* * fill in the user structure for a core dump.. */ -static void dump_thread32(struct pt_regs *regs, struct user32 *dump) +static void fill_dump(struct pt_regs *regs, struct user32 *dump) { u32 fs, gs; memset(dump, 0, sizeof(*dump)); @@ -157,10 +157,12 @@ static int aout_core_dump(struct coredump_params *cprm) fs = get_fs(); set_fs(KERNEL_DS); has_dumped = 1; + + fill_dump(cprm->regs, &dump); + strncpy(dump.u_comm, current->comm, sizeof(current->comm)); dump.u_ar0 = offsetof(struct user32, regs); dump.signal = cprm->siginfo->si_signo; - dump_thread32(cprm->regs, &dump); /* * If the size of the dump file exceeds the rlimit, then see @@ -176,10 +178,10 @@ static int aout_core_dump(struct coredump_params *cprm) /* make sure we actually have a data and stack area to dump */ set_fs(USER_DS); - if (!access_ok(VERIFY_READ, (void *) (unsigned long)START_DATA(dump), + if (!access_ok((void *) (unsigned long)START_DATA(dump), dump.u_dsize << PAGE_SHIFT)) dump.u_dsize = 0; - if (!access_ok(VERIFY_READ, (void *) (unsigned long)START_STACK(dump), + if (!access_ok((void *) (unsigned long)START_STACK(dump), dump.u_ssize << PAGE_SHIFT)) dump.u_ssize = 0; @@ -296,6 +298,7 @@ static int load_aout_binary(struct linux_binprm *bprm) set_personality_ia32(false); setup_new_exec(bprm); + install_exec_creds(bprm); regs->cs = __USER32_CS; regs->r8 = regs->r9 = regs->r10 = regs->r11 = regs->r12 = @@ -312,8 +315,6 @@ static int load_aout_binary(struct linux_binprm *bprm) if (retval < 0) return retval; - install_exec_creds(bprm); - if (N_MAGIC(ex) == OMAGIC) { unsigned long text_addr, map_size; diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index 86b1341cba9ac5c6b32a3dd941091d59f8ebb56c..4d5fcd47ab75a4e2815f2ed381b9356b3c18e7d1 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c @@ -61,9 +61,8 @@ } while (0) #define RELOAD_SEG(seg) { \ - unsigned int pre = GET_SEG(seg); \ + unsigned int pre = (seg) | 3; \ unsigned int cur = get_user_seg(seg); \ - pre |= 3; \ if (pre != cur) \ set_user_seg(seg, pre); \ } @@ -72,6 +71,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, struct sigcontext_32 __user *sc) { unsigned int tmpflags, err = 0; + u16 gs, fs, es, ds; void __user *buf; u32 tmp; @@ -79,16 +79,10 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, current->restart_block.fn = do_no_restart_syscall; get_user_try { - /* - * Reload fs and gs if they have changed in the signal - * handler. This does not handle long fs/gs base changes in - * the handler, but does not clobber them at least in the - * normal case. - */ - RELOAD_SEG(gs); - RELOAD_SEG(fs); - RELOAD_SEG(ds); - RELOAD_SEG(es); + gs = GET_SEG(gs); + fs = GET_SEG(fs); + ds = GET_SEG(ds); + es = GET_SEG(es); COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); COPY(dx); COPY(cx); COPY(ip); COPY(ax); @@ -106,6 +100,17 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, buf = compat_ptr(tmp); } get_user_catch(err); + /* + * Reload fs and gs if they have changed in the signal + * handler. This does not handle long fs/gs base changes in + * the handler, but does not clobber them at least in the + * normal case. + */ + RELOAD_SEG(gs); + RELOAD_SEG(fs); + RELOAD_SEG(ds); + RELOAD_SEG(es); + err |= fpu__restore_sig(buf, 1); force_iret(); @@ -119,7 +124,7 @@ asmlinkage long sys32_sigreturn(void) struct sigframe_ia32 __user *frame = (struct sigframe_ia32 __user *)(regs->sp-8); sigset_t set; - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) goto badframe; if (__get_user(set.sig[0], &frame->sc.oldmask) || (_COMPAT_NSIG_WORDS > 1 @@ -147,7 +152,7 @@ asmlinkage long sys32_rt_sigreturn(void) frame = (struct rt_sigframe_ia32 __user *)(regs->sp - 4); - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; @@ -269,7 +274,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig, frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate); - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) return -EFAULT; if (__put_user(sig, &frame->sig)) @@ -349,7 +354,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig, frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate); - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) return -EFAULT; put_user_try { diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c index 11ef7b7c9cc897940ec445498f338a31a3844d28..a4321203625797007091094be941587840558f59 100644 --- a/arch/x86/ia32/sys_ia32.c +++ b/arch/x86/ia32/sys_ia32.c @@ -75,7 +75,7 @@ static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat) typeof(ubuf->st_gid) gid = 0; SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid)); SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid)); - if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) || + if (!access_ok(ubuf, sizeof(struct stat64)) || __put_user(huge_encode_dev(stat->dev), &ubuf->st_dev) || __put_user(stat->ino, &ubuf->__st_ino) || __put_user(stat->ino, &ubuf->st_ino) || diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index a303d7b7d763fb352d7d9bb3e5de07034943ee41..036f2554ec8c843bace80b55d59fd549cb776011 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -107,7 +107,8 @@ static inline bool arch_has_acpi_pdc(void) { struct cpuinfo_x86 *c = &cpu_data(0); return (c->x86_vendor == X86_VENDOR_INTEL || - c->x86_vendor == X86_VENDOR_CENTAUR); + c->x86_vendor == X86_VENDOR_CENTAUR || + c->x86_vendor == X86_VENDOR_ZHAOXIN); } static inline void arch_acpi_set_pdc_bits(u32 *buf) diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h index fddb6d26239f59f5c679617f310f290a1161a9cb..b6e5db1069e9bb3954ef03d47d8a7e2756b999ba 100644 --- a/arch/x86/include/asm/amd_nb.h +++ b/arch/x86/include/asm/amd_nb.h @@ -84,6 +84,10 @@ u16 amd_nb_num(void); bool amd_nb_has_feature(unsigned int feature); struct amd_northbridge *node_to_amd_nb(int node); +bool hygon_f18h_m4h(void); +u16 hygon_nb_num(void); +int get_df_id(struct pci_dev *misc, u8 *id); + static inline u16 amd_pci_dev_to_node_id(struct pci_dev *pdev) { struct pci_dev *misc; @@ -103,6 +107,9 @@ static inline u16 amd_pci_dev_to_node_id(struct pci_dev *pdev) static inline bool amd_gart_present(void) { + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) + return false; + /* GART present only on Fam15h, upto model 0fh */ if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 || (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model < 0x10)) @@ -118,6 +125,10 @@ static inline bool amd_gart_present(void) #define node_to_amd_nb(x) NULL #define amd_gart_present(x) false +#define hygon_f18h_m4h false +#define hygon_nb_num(x) 0 +#define get_df_id(x, y) NULL + #endif diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 130e81e10fc7cfae6dd6e193965f65648a0d1a79..75bf0faad30d14581619c5215ceeb26ff3180468 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -48,11 +48,11 @@ static inline void generic_apic_probe(void) #ifdef CONFIG_X86_LOCAL_APIC -extern unsigned int apic_verbosity; +extern int apic_verbosity; extern int local_apic_timer_c2_ok; extern int disable_apic; -extern unsigned int lapic_timer_frequency; +extern unsigned int lapic_timer_period; extern enum apic_intr_mode_id apic_intr_mode; enum apic_intr_mode_id { @@ -138,6 +138,7 @@ extern void disable_local_APIC(void); extern void lapic_shutdown(void); extern void sync_Arb_IDs(void); extern void init_bsp_APIC(void); +extern void apic_intr_mode_select(void); extern void apic_intr_mode_init(void); extern void init_apic_mappings(void); void register_lapic_address(unsigned long address); @@ -172,8 +173,10 @@ static inline int apic_is_clustered_box(void) extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask); extern void lapic_assign_system_vectors(void); extern void lapic_assign_legacy_vector(unsigned int isairq, bool replace); +extern void lapic_update_legacy_vectors(void); extern void lapic_online(void); extern void lapic_offline(void); +extern bool apic_needs_pit(void); #else /* !CONFIG_X86_LOCAL_APIC */ static inline void lapic_shutdown(void) { } @@ -184,9 +187,11 @@ static inline void disable_local_APIC(void) { } # define setup_secondary_APIC_clock x86_init_noop static inline void lapic_update_tsc_freq(void) { } static inline void init_bsp_APIC(void) { } +static inline void apic_intr_mode_select(void) { } static inline void apic_intr_mode_init(void) { } static inline void lapic_assign_system_vectors(void) { } static inline void lapic_assign_legacy_vector(unsigned int i, bool r) { } +static inline bool apic_needs_pit(void) { return true; } #endif /* !CONFIG_X86_LOCAL_APIC */ #ifdef CONFIG_X86_X2APIC @@ -448,6 +453,14 @@ static inline void ack_APIC_irq(void) apic_eoi(); } + +static inline bool lapic_vector_set_in_irr(unsigned int vector) +{ + u32 irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); + + return !!(irr & (1U << (vector % 32))); +} + static inline unsigned default_get_apic_id(unsigned long x) { unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR)); diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h index 990770f9e76b5a52af6f85692883a8507af00af6..bcd69fc6cf342231ed84227de259940d5640d436 100644 --- a/arch/x86/include/asm/asm.h +++ b/arch/x86/include/asm/asm.h @@ -130,6 +130,9 @@ # define _ASM_EXTABLE(from, to) \ _ASM_EXTABLE_HANDLE(from, to, ex_handler_default) +# define _ASM_EXTABLE_UA(from, to) \ + _ASM_EXTABLE_HANDLE(from, to, ex_handler_uaccess) + # define _ASM_EXTABLE_FAULT(from, to) \ _ASM_EXTABLE_HANDLE(from, to, ex_handler_fault) @@ -144,31 +147,6 @@ _ASM_ALIGN ; \ _ASM_PTR (entry); \ .popsection - -.macro ALIGN_DESTINATION - /* check for bad alignment of destination */ - movl %edi,%ecx - andl $7,%ecx - jz 102f /* already aligned */ - subl $8,%ecx - negl %ecx - subl %ecx,%edx -100: movb (%rsi),%al -101: movb %al,(%rdi) - incq %rsi - incq %rdi - decl %ecx - jnz 100b -102: - .section .fixup,"ax" -103: addl %ecx,%edx /* ecx is zerorest also */ - jmp copy_user_handle_tail - .previous - - _ASM_EXTABLE(100b,103b) - _ASM_EXTABLE(101b,103b) - .endm - #else # define _EXPAND_EXTABLE_HANDLE(x) #x # define _ASM_EXTABLE_HANDLE(from, to, handler) \ @@ -182,6 +160,9 @@ # define _ASM_EXTABLE(from, to) \ _ASM_EXTABLE_HANDLE(from, to, ex_handler_default) +# define _ASM_EXTABLE_UA(from, to) \ + _ASM_EXTABLE_HANDLE(from, to, ex_handler_uaccess) + # define _ASM_EXTABLE_FAULT(from, to) \ _ASM_EXTABLE_HANDLE(from, to, ex_handler_fault) diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index ce84388e540c918a01e31599bc78881a5b9d2954..115127c7ad28ab5e8ba6d0732d72032cbef114c8 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -54,7 +54,7 @@ static __always_inline void arch_atomic_add(int i, atomic_t *v) { asm volatile(LOCK_PREFIX "addl %1,%0" : "+m" (v->counter) - : "ir" (i)); + : "ir" (i) : "memory"); } /** @@ -68,7 +68,7 @@ static __always_inline void arch_atomic_sub(int i, atomic_t *v) { asm volatile(LOCK_PREFIX "subl %1,%0" : "+m" (v->counter) - : "ir" (i)); + : "ir" (i) : "memory"); } /** @@ -82,7 +82,7 @@ static __always_inline void arch_atomic_sub(int i, atomic_t *v) */ static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v) { - GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e); + return GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, e, "er", i); } #define arch_atomic_sub_and_test arch_atomic_sub_and_test @@ -95,7 +95,7 @@ static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v) static __always_inline void arch_atomic_inc(atomic_t *v) { asm volatile(LOCK_PREFIX "incl %0" - : "+m" (v->counter)); + : "+m" (v->counter) :: "memory"); } #define arch_atomic_inc arch_atomic_inc @@ -108,7 +108,7 @@ static __always_inline void arch_atomic_inc(atomic_t *v) static __always_inline void arch_atomic_dec(atomic_t *v) { asm volatile(LOCK_PREFIX "decl %0" - : "+m" (v->counter)); + : "+m" (v->counter) :: "memory"); } #define arch_atomic_dec arch_atomic_dec @@ -122,7 +122,7 @@ static __always_inline void arch_atomic_dec(atomic_t *v) */ static __always_inline bool arch_atomic_dec_and_test(atomic_t *v) { - GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e); + return GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, e); } #define arch_atomic_dec_and_test arch_atomic_dec_and_test @@ -136,7 +136,7 @@ static __always_inline bool arch_atomic_dec_and_test(atomic_t *v) */ static __always_inline bool arch_atomic_inc_and_test(atomic_t *v) { - GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e); + return GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, e); } #define arch_atomic_inc_and_test arch_atomic_inc_and_test @@ -151,7 +151,7 @@ static __always_inline bool arch_atomic_inc_and_test(atomic_t *v) */ static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v) { - GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s); + return GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, s, "er", i); } #define arch_atomic_add_negative arch_atomic_add_negative diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h index 5f851d92eecd9ee8eaad86c6b002937633e9144f..5e86c0d68ac169d4c3856f9212e258389093a9d3 100644 --- a/arch/x86/include/asm/atomic64_64.h +++ b/arch/x86/include/asm/atomic64_64.h @@ -45,7 +45,7 @@ static __always_inline void arch_atomic64_add(long i, atomic64_t *v) { asm volatile(LOCK_PREFIX "addq %1,%0" : "=m" (v->counter) - : "er" (i), "m" (v->counter)); + : "er" (i), "m" (v->counter) : "memory"); } /** @@ -59,7 +59,7 @@ static inline void arch_atomic64_sub(long i, atomic64_t *v) { asm volatile(LOCK_PREFIX "subq %1,%0" : "=m" (v->counter) - : "er" (i), "m" (v->counter)); + : "er" (i), "m" (v->counter) : "memory"); } /** @@ -73,7 +73,7 @@ static inline void arch_atomic64_sub(long i, atomic64_t *v) */ static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v) { - GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e); + return GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, e, "er", i); } #define arch_atomic64_sub_and_test arch_atomic64_sub_and_test @@ -87,7 +87,7 @@ static __always_inline void arch_atomic64_inc(atomic64_t *v) { asm volatile(LOCK_PREFIX "incq %0" : "=m" (v->counter) - : "m" (v->counter)); + : "m" (v->counter) : "memory"); } #define arch_atomic64_inc arch_atomic64_inc @@ -101,7 +101,7 @@ static __always_inline void arch_atomic64_dec(atomic64_t *v) { asm volatile(LOCK_PREFIX "decq %0" : "=m" (v->counter) - : "m" (v->counter)); + : "m" (v->counter) : "memory"); } #define arch_atomic64_dec arch_atomic64_dec @@ -115,7 +115,7 @@ static __always_inline void arch_atomic64_dec(atomic64_t *v) */ static inline bool arch_atomic64_dec_and_test(atomic64_t *v) { - GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e); + return GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, e); } #define arch_atomic64_dec_and_test arch_atomic64_dec_and_test @@ -129,7 +129,7 @@ static inline bool arch_atomic64_dec_and_test(atomic64_t *v) */ static inline bool arch_atomic64_inc_and_test(atomic64_t *v) { - GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e); + return GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, e); } #define arch_atomic64_inc_and_test arch_atomic64_inc_and_test @@ -144,7 +144,7 @@ static inline bool arch_atomic64_inc_and_test(atomic64_t *v) */ static inline bool arch_atomic64_add_negative(long i, atomic64_t *v) { - GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s); + return GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, s, "er", i); } #define arch_atomic64_add_negative arch_atomic64_add_negative diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index 14de0432d288414bd1437e44b8cb13facc6f12e9..84f848c2541a6e5febb218fc64d209270c80e9bc 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h @@ -80,8 +80,8 @@ do { \ }) /* Atomic operations are already serializing on x86 */ -#define __smp_mb__before_atomic() barrier() -#define __smp_mb__after_atomic() barrier() +#define __smp_mb__before_atomic() do { } while (0) +#define __smp_mb__after_atomic() do { } while (0) #include diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 9f645ba57dbb263822600aae5d82138316c8f6e3..cb8a7386933fa82c3fc93edd7288315bf5f1a7a0 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -36,22 +36,17 @@ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). */ -#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) -/* Technically wrong, but this avoids compilation errors on some gcc - versions. */ -#define BITOP_ADDR(x) "=m" (*(volatile long *) (x)) -#else -#define BITOP_ADDR(x) "+m" (*(volatile long *) (x)) -#endif +#define RLONG_ADDR(x) "m" (*(volatile long *) (x)) +#define WBYTE_ADDR(x) "+m" (*(volatile char *) (x)) -#define ADDR BITOP_ADDR(addr) +#define ADDR RLONG_ADDR(addr) /* * We do the locked ops that don't return the old value as * a mask operation on a byte. */ #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3)) +#define CONST_MASK_ADDR(nr, addr) WBYTE_ADDR((void *)(addr) + ((nr)>>3)) #define CONST_MASK(nr) (1 << ((nr) & 7)) /** @@ -79,7 +74,7 @@ set_bit(long nr, volatile unsigned long *addr) : "memory"); } else { asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0" - : BITOP_ADDR(addr) : "Ir" (nr) : "memory"); + : : RLONG_ADDR(addr), "Ir" (nr) : "memory"); } } @@ -94,7 +89,7 @@ set_bit(long nr, volatile unsigned long *addr) */ static __always_inline void __set_bit(long nr, volatile unsigned long *addr) { - asm volatile(__ASM_SIZE(bts) " %1,%0" : ADDR : "Ir" (nr) : "memory"); + asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory"); } /** @@ -116,8 +111,7 @@ clear_bit(long nr, volatile unsigned long *addr) : "iq" ((u8)~CONST_MASK(nr))); } else { asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0" - : BITOP_ADDR(addr) - : "Ir" (nr)); + : : RLONG_ADDR(addr), "Ir" (nr) : "memory"); } } @@ -137,7 +131,7 @@ static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *ad static __always_inline void __clear_bit(long nr, volatile unsigned long *addr) { - asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr)); + asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory"); } static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) @@ -145,7 +139,7 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile bool negative; asm volatile(LOCK_PREFIX "andb %2,%1" CC_SET(s) - : CC_OUT(s) (negative), ADDR + : CC_OUT(s) (negative), WBYTE_ADDR(addr) : "ir" ((char) ~(1 << nr)) : "memory"); return negative; } @@ -161,13 +155,9 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile * __clear_bit() is non-atomic and implies release semantics before the memory * operation. It can be used for an unlock if no other CPUs can concurrently * modify other bits in the word. - * - * No memory barrier is required here, because x86 cannot reorder stores past - * older loads. Same principle as spin_unlock. */ static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) { - barrier(); __clear_bit(nr, addr); } @@ -182,7 +172,7 @@ static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long * */ static __always_inline void __change_bit(long nr, volatile unsigned long *addr) { - asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr)); + asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory"); } /** @@ -202,8 +192,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr) : "iq" ((u8)CONST_MASK(nr))); } else { asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0" - : BITOP_ADDR(addr) - : "Ir" (nr)); + : : RLONG_ADDR(addr), "Ir" (nr) : "memory"); } } @@ -217,8 +206,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr) */ static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr) { - GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), - *addr, "Ir", nr, "%0", c); + return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), *addr, c, "Ir", nr); } /** @@ -249,8 +237,8 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long * asm(__ASM_SIZE(bts) " %2,%1" CC_SET(c) - : CC_OUT(c) (oldbit), ADDR - : "Ir" (nr)); + : CC_OUT(c) (oldbit) + : ADDR, "Ir" (nr) : "memory"); return oldbit; } @@ -264,8 +252,7 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long * */ static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) { - GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), - *addr, "Ir", nr, "%0", c); + return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), *addr, c, "Ir", nr); } /** @@ -290,8 +277,8 @@ static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long asm volatile(__ASM_SIZE(btr) " %2,%1" CC_SET(c) - : CC_OUT(c) (oldbit), ADDR - : "Ir" (nr)); + : CC_OUT(c) (oldbit) + : ADDR, "Ir" (nr) : "memory"); return oldbit; } @@ -302,8 +289,8 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon asm volatile(__ASM_SIZE(btc) " %2,%1" CC_SET(c) - : CC_OUT(c) (oldbit), ADDR - : "Ir" (nr) : "memory"); + : CC_OUT(c) (oldbit) + : ADDR, "Ir" (nr) : "memory"); return oldbit; } @@ -318,8 +305,7 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon */ static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr) { - GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), - *addr, "Ir", nr, "%0", c); + return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", nr); } static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr) @@ -335,7 +321,7 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l asm volatile(__ASM_SIZE(bt) " %2,%1" CC_SET(c) : CC_OUT(c) (oldbit) - : "m" (*(unsigned long *)addr), "Ir" (nr)); + : "m" (*(unsigned long *)addr), "Ir" (nr) : "memory"); return oldbit; } diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h index a07ffd23e4dd67d3e182bd803eb868eaef1bcdf5..8fa49cf1211d36ec9547363d15cb2ee3c73d1909 100644 --- a/arch/x86/include/asm/bootparam_utils.h +++ b/arch/x86/include/asm/bootparam_utils.h @@ -18,6 +18,20 @@ * Note: efi_info is commonly left uninitialized, but that field has a * private magic, so it is better to leave it unchanged. */ + +#define sizeof_mbr(type, member) ({ sizeof(((type *)0)->member); }) + +#define BOOT_PARAM_PRESERVE(struct_member) \ + { \ + .start = offsetof(struct boot_params, struct_member), \ + .len = sizeof_mbr(struct boot_params, struct_member), \ + } + +struct boot_params_to_save { + unsigned int start; + unsigned int len; +}; + static void sanitize_boot_params(struct boot_params *boot_params) { /* @@ -36,19 +50,41 @@ static void sanitize_boot_params(struct boot_params *boot_params) */ if (boot_params->sentinel) { /* fields in boot_params are left uninitialized, clear them */ - memset(&boot_params->ext_ramdisk_image, 0, - (char *)&boot_params->efi_info - - (char *)&boot_params->ext_ramdisk_image); - memset(&boot_params->kbd_status, 0, - (char *)&boot_params->hdr - - (char *)&boot_params->kbd_status); - memset(&boot_params->_pad7[0], 0, - (char *)&boot_params->edd_mbr_sig_buffer[0] - - (char *)&boot_params->_pad7[0]); - memset(&boot_params->_pad8[0], 0, - (char *)&boot_params->eddbuf[0] - - (char *)&boot_params->_pad8[0]); - memset(&boot_params->_pad9[0], 0, sizeof(boot_params->_pad9)); + static struct boot_params scratch; + char *bp_base = (char *)boot_params; + char *save_base = (char *)&scratch; + int i; + + const struct boot_params_to_save to_save[] = { + BOOT_PARAM_PRESERVE(screen_info), + BOOT_PARAM_PRESERVE(apm_bios_info), + BOOT_PARAM_PRESERVE(tboot_addr), + BOOT_PARAM_PRESERVE(ist_info), + BOOT_PARAM_PRESERVE(hd0_info), + BOOT_PARAM_PRESERVE(hd1_info), + BOOT_PARAM_PRESERVE(sys_desc_table), + BOOT_PARAM_PRESERVE(olpc_ofw_header), + BOOT_PARAM_PRESERVE(efi_info), + BOOT_PARAM_PRESERVE(alt_mem_k), + BOOT_PARAM_PRESERVE(scratch), + BOOT_PARAM_PRESERVE(e820_entries), + BOOT_PARAM_PRESERVE(eddbuf_entries), + BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries), + BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer), + BOOT_PARAM_PRESERVE(secure_boot), + BOOT_PARAM_PRESERVE(hdr), + BOOT_PARAM_PRESERVE(e820_table), + BOOT_PARAM_PRESERVE(eddbuf), + }; + + memset(&scratch, 0, sizeof(scratch)); + + for (i = 0; i < ARRAY_SIZE(to_save); i++) { + memcpy(save_base + to_save[i].start, + bp_base + to_save[i].start, to_save[i].len); + } + + memcpy(boot_params, save_base, sizeof(*boot_params)); } } diff --git a/arch/x86/include/asm/cacheinfo.h b/arch/x86/include/asm/cacheinfo.h index e958e28f7ab5c8e865fe613ba55912d5acf8d1dc..86b2e0dcc4bfe0930cf0200e0f4c25d3aaf7a966 100644 --- a/arch/x86/include/asm/cacheinfo.h +++ b/arch/x86/include/asm/cacheinfo.h @@ -2,6 +2,7 @@ #ifndef _ASM_X86_CACHEINFO_H #define _ASM_X86_CACHEINFO_H -void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id); +void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu); +void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu); #endif /* _ASM_X86_CACHEINFO_H */ diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h index 7a659c74cd037039896c8c3313a5967a86393020..f57b94e02c5770f9c33f281b8ff647adc7280e12 100644 --- a/arch/x86/include/asm/checksum_32.h +++ b/arch/x86/include/asm/checksum_32.h @@ -182,7 +182,7 @@ static inline __wsum csum_and_copy_to_user(const void *src, __wsum ret; might_sleep(); - if (access_ok(VERIFY_WRITE, dst, len)) { + if (access_ok(dst, len)) { stac(); ret = csum_partial_copy_generic(src, (__force void *)dst, len, sum, NULL, err_ptr); diff --git a/arch/x86/include/asm/cpu_device_id.h b/arch/x86/include/asm/cpu_device_id.h index baeba05671268cc7d835ecf6decbfc9fc5fc93ea..cdf39decf73403f7df7a31a02f437190725f7d39 100644 --- a/arch/x86/include/asm/cpu_device_id.h +++ b/arch/x86/include/asm/cpu_device_id.h @@ -1,14 +1,195 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _CPU_DEVICE_ID -#define _CPU_DEVICE_ID 1 +#ifndef _ASM_X86_CPU_DEVICE_ID +#define _ASM_X86_CPU_DEVICE_ID /* * Declare drivers belonging to specific x86 CPUs * Similar in spirit to pci_device_id and related PCI functions + * + * The wildcard initializers are in mod_devicetable.h because + * file2alias needs them. Sigh. */ - #include +/* Get the INTEL_FAM* model defines */ +#include +/* And the X86_VENDOR_* ones */ +#include + +/* Centaur FAM6 models */ +#define X86_CENTAUR_FAM6_C7_A 0xa +#define X86_CENTAUR_FAM6_C7_D 0xd +#define X86_CENTAUR_FAM6_NANO 0xf + +#define X86_STEPPINGS(mins, maxs) GENMASK(maxs, mins) +/** + * X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE - Base macro for CPU matching + * @_vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY + * The name is expanded to X86_VENDOR_@_vendor + * @_family: The family number or X86_FAMILY_ANY + * @_model: The model number, model constant or X86_MODEL_ANY + * @_steppings: Bitmask for steppings, stepping constant or X86_STEPPING_ANY + * @_feature: A X86_FEATURE bit or X86_FEATURE_ANY + * @_data: Driver specific data or NULL. The internal storage + * format is unsigned long. The supplied value, pointer + * etc. is casted to unsigned long internally. + * + * Use only if you need all selectors. Otherwise use one of the shorter + * macros of the X86_MATCH_* family. If there is no matching shorthand + * macro, consider to add one. If you really need to wrap one of the macros + * into another macro at the usage site for good reasons, then please + * start this local macro with X86_MATCH to allow easy grepping. + */ +#define X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(_vendor, _family, _model, \ + _steppings, _feature, _data) { \ + .vendor = X86_VENDOR_##_vendor, \ + .family = _family, \ + .model = _model, \ + .steppings = _steppings, \ + .feature = _feature, \ + .driver_data = (unsigned long) _data \ +} + +/** + * X86_MATCH_VENDOR_FAM_MODEL_FEATURE - Macro for CPU matching + * @_vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY + * The name is expanded to X86_VENDOR_@_vendor + * @_family: The family number or X86_FAMILY_ANY + * @_model: The model number, model constant or X86_MODEL_ANY + * @_feature: A X86_FEATURE bit or X86_FEATURE_ANY + * @_data: Driver specific data or NULL. The internal storage + * format is unsigned long. The supplied value, pointer + * etc. is casted to unsigned long internally. + * + * The steppings arguments of X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE() is + * set to wildcards. + */ +#define X86_MATCH_VENDOR_FAM_MODEL_FEATURE(vendor, family, model, feature, data) \ + X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(vendor, family, model, \ + X86_STEPPING_ANY, feature, data) + +/** + * X86_MATCH_VENDOR_FAM_FEATURE - Macro for matching vendor, family and CPU feature + * @vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY + * The name is expanded to X86_VENDOR_@vendor + * @family: The family number or X86_FAMILY_ANY + * @feature: A X86_FEATURE bit + * @data: Driver specific data or NULL. The internal storage + * format is unsigned long. The supplied value, pointer + * etc. is casted to unsigned long internally. + * + * All other missing arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are + * set to wildcards. + */ +#define X86_MATCH_VENDOR_FAM_FEATURE(vendor, family, feature, data) \ + X86_MATCH_VENDOR_FAM_MODEL_FEATURE(vendor, family, \ + X86_MODEL_ANY, feature, data) + +/** + * X86_MATCH_VENDOR_FEATURE - Macro for matching vendor and CPU feature + * @vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY + * The name is expanded to X86_VENDOR_@vendor + * @feature: A X86_FEATURE bit + * @data: Driver specific data or NULL. The internal storage + * format is unsigned long. The supplied value, pointer + * etc. is casted to unsigned long internally. + * + * All other missing arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are + * set to wildcards. + */ +#define X86_MATCH_VENDOR_FEATURE(vendor, feature, data) \ + X86_MATCH_VENDOR_FAM_FEATURE(vendor, X86_FAMILY_ANY, feature, data) + +/** + * X86_MATCH_FEATURE - Macro for matching a CPU feature + * @feature: A X86_FEATURE bit + * @data: Driver specific data or NULL. The internal storage + * format is unsigned long. The supplied value, pointer + * etc. is casted to unsigned long internally. + * + * All other missing arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are + * set to wildcards. + */ +#define X86_MATCH_FEATURE(feature, data) \ + X86_MATCH_VENDOR_FEATURE(ANY, feature, data) + +/* Transitional to keep the existing code working */ +#define X86_FEATURE_MATCH(feature) X86_MATCH_FEATURE(feature, NULL) + +/** + * X86_MATCH_VENDOR_FAM_MODEL - Match vendor, family and model + * @vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY + * The name is expanded to X86_VENDOR_@vendor + * @family: The family number or X86_FAMILY_ANY + * @model: The model number, model constant or X86_MODEL_ANY + * @data: Driver specific data or NULL. The internal storage + * format is unsigned long. The supplied value, pointer + * etc. is casted to unsigned long internally. + * + * All other missing arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are + * set to wildcards. + */ +#define X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, data) \ + X86_MATCH_VENDOR_FAM_MODEL_FEATURE(vendor, family, model, \ + X86_FEATURE_ANY, data) + +/** + * X86_MATCH_VENDOR_FAM - Match vendor and family + * @vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY + * The name is expanded to X86_VENDOR_@vendor + * @family: The family number or X86_FAMILY_ANY + * @data: Driver specific data or NULL. The internal storage + * format is unsigned long. The supplied value, pointer + * etc. is casted to unsigned long internally. + * + * All other missing arguments to X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are + * set of wildcards. + */ +#define X86_MATCH_VENDOR_FAM(vendor, family, data) \ + X86_MATCH_VENDOR_FAM_MODEL(vendor, family, X86_MODEL_ANY, data) + +/** + * X86_MATCH_INTEL_FAM6_MODEL - Match vendor INTEL, family 6 and model + * @model: The model name without the INTEL_FAM6_ prefix or ANY + * The model name is expanded to INTEL_FAM6_@model internally + * @data: Driver specific data or NULL. The internal storage + * format is unsigned long. The supplied value, pointer + * etc. is casted to unsigned long internally. + * + * The vendor is set to INTEL, the family to 6 and all other missing + * arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are set to wildcards. + * + * See X86_MATCH_VENDOR_FAM_MODEL_FEATURE() for further information. + */ +#define X86_MATCH_INTEL_FAM6_MODEL(model, data) \ + X86_MATCH_VENDOR_FAM_MODEL(INTEL, 6, INTEL_FAM6_##model, data) + +/* + * Match specific microcode revisions. + * + * vendor/family/model/stepping must be all set. + * + * Only checks against the boot CPU. When mixed-stepping configs are + * valid for a CPU model, add a quirk for every valid stepping and + * do the fine-tuning in the quirk handler. + */ + +struct x86_cpu_desc { + u8 x86_family; + u8 x86_vendor; + u8 x86_model; + u8 x86_stepping; + u32 x86_microcode_rev; +}; + +#define INTEL_CPU_DESC(model, stepping, revision) { \ + .x86_family = 6, \ + .x86_vendor = X86_VENDOR_INTEL, \ + .x86_model = (model), \ + .x86_stepping = (stepping), \ + .x86_microcode_rev = (revision), \ +} extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match); +extern bool x86_cpu_has_min_microcode_rev(const struct x86_cpu_desc *table); -#endif +#endif /* _ASM_X86_CPU_DEVICE_ID */ diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index aced6c9290d6f96cdaf4eaadab3dd3835d80b94a..4ce54074eea57921aa12b04f8ff052abaed72c6a 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -22,8 +22,8 @@ enum cpuid_leafs CPUID_LNX_3, CPUID_7_0_EBX, CPUID_D_1_EAX, - CPUID_F_0_EDX, - CPUID_F_1_EDX, + CPUID_LNX_4, + CPUID_7_1_EAX, CPUID_8000_0008_EBX, CPUID_6_EAX, CPUID_8000_000A_EDX, @@ -140,7 +140,7 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit); #define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit) -#if defined(__clang__) && !defined(CC_HAVE_ASM_GOTO) +#if defined(__clang__) && !defined(CONFIG_CC_HAS_ASM_GOTO) /* * Workaround for the sake of BPF compilation which utilizes kernel diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 89a048c2faec7f8a818d1a461ccd7fa67eca0fd9..999badbcd2babaf00034cc8ec9b829709fa7163c 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -145,8 +145,12 @@ #define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */ /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ +#define X86_FEATURE_SM2 (5*32+0) /* sm2 present*/ +#define X86_FEATURE_SM2_EN (5*32+1) /* sm2 enabled */ #define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */ #define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */ +#define X86_FEATURE_CCS (5*32+4) /* "sm3 sm4" present */ +#define X86_FEATURE_CCS_EN (5*32+5) /* "sm3_en sm4_en" enabled */ #define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ #define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */ #define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */ @@ -155,6 +159,23 @@ #define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */ #define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */ #define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */ +#define X86_FEATURE_ZX_FMA (5*32+15) /* FMA supported */ +#define X86_FEATURE_PARALLAX (5*32+16) /* Adaptive P-state control present */ +#define X86_FEATURE_PARALLAX_EN (5*32+17) /* Adaptive P-state control enabled */ +#define X86_FEATURE_OVERSTRESS (5*32+18) /* Overstress Feature for auto overclock present */ +#define X86_FEATURE_OVERSTRESS_EN (5*32+19) /* Overstress Feature for auto overclock enabled */ +#define X86_FEATURE_TM3 (5*32+20) /* Thermal Monitor 3 present */ +#define X86_FEATURE_TM3_EN (5*32+21) /* Thermal Monitor 3 enabled */ +#define X86_FEATURE_RNG2 (5*32+22) /* 2nd generation of RNG present */ +#define X86_FEATURE_RNG2_EN (5*32+23) /* 2nd generation of RNG enabled */ +#define X86_FEATURE_SEM (5*32+24) /* SME feature present */ +#define X86_FEATURE_PHE2 (5*32+25) /* SHA384 and SHA 512 present */ +#define X86_FEATURE_PHE2_EN (5*32+26) /* SHA384 and SHA 512 enabled */ +#define X86_FEATURE_XMODX (5*32+27) /* "rsa" XMODEXP and MONTMUL2 instructions are present */ +#define X86_FEATURE_XMODX_EN (5*32+28) /* "rsa_en" XMODEXP and MONTMUL2instructions are enabled */ +#define X86_FEATURE_VEX (5*32+29) /* VEX instructions are present */ +#define X86_FEATURE_VEX_EN (5*32+30) /* VEX instructions are enabled */ +#define X86_FEATURE_STK (5*32+31) /* STK are present */ /* More extended AMD flags: CPUID level 0x80000001, ECX, word 6 */ #define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */ @@ -202,8 +223,8 @@ #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ #define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ #define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */ -#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */ -#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */ +#define X86_FEATURE_KERNEL_IBRS ( 7*32+12) /* "" Set/clear IBRS on kernel entry/exit */ +#define X86_FEATURE_RSB_VMEXIT ( 7*32+13) /* "" Fill RSB on VM-Exit */ #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ #define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */ #define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */ @@ -218,7 +239,7 @@ #define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */ #define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */ #define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */ -#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */ +#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 or above (Zen) */ #define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */ #define X86_FEATURE_IBRS_ENHANCED ( 7*32+30) /* Enhanced IBRS */ @@ -239,12 +260,14 @@ #define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */ #define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */ #define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */ +#define X86_FEATURE_FDP_EXCPTN_ONLY ( 9*32+ 6) /* "" FPU data pointer updated only on x87 exceptions */ #define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */ #define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */ #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB instructions */ #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */ #define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */ #define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */ +#define X86_FEATURE_ZERO_FCS_FDS ( 9*32+13) /* "" Zero out FPU CS and FPU DS */ #define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */ #define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */ #define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */ @@ -269,13 +292,30 @@ #define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 instruction */ #define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS instructions */ -/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (EDX), word 11 */ -#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */ +/* + * Extended auxiliary flags: Linux defined - for features scattered in various + * CPUID levels like 0xf, etc. + * + * Reuse free bits when adding new feature flags! + */ +#define X86_FEATURE_CQM_LLC (11*32+ 0) /* LLC QoS if 1 */ +#define X86_FEATURE_CQM_OCCUP_LLC (11*32+ 1) /* LLC occupancy monitoring */ +#define X86_FEATURE_CQM_MBM_TOTAL (11*32+ 2) /* LLC Total MBM monitoring */ +#define X86_FEATURE_CQM_MBM_LOCAL (11*32+ 3) /* LLC Local MBM monitoring */ +#define X86_FEATURE_FENCE_SWAPGS_USER (11*32+ 4) /* "" LFENCE in user entry SWAPGS path */ +#define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */ +/* FREE! (11*32+ 6) */ +/* FREE! (11*32+ 7) */ +/* FREE! (11*32+ 8) */ +/* FREE! (11*32+ 9) */ +/* FREE! (11*32+10) */ +#define X86_FEATURE_RRSBA_CTRL (11*32+11) /* "" RET prediction control */ +#define X86_FEATURE_RETPOLINE (11*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */ +#define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */ +#define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* "" Fill RSB on VM exit when EIBRS is enabled */ -/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (EDX), word 12 */ -#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring */ -#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */ -#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */ +/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ +#define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */ /* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */ #define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ @@ -284,9 +324,11 @@ #define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */ #define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */ #define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */ +#define X86_FEATURE_AMD_STIBP_ALWAYS_ON (13*32+17) /* "" Single Thread Indirect Branch Predictors always-on preferred */ #define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */ #define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */ #define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */ +#define X86_FEATURE_BTC_NO (13*32+29) /* "" Not vulnerable to Branch Type Confusion */ /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */ #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ @@ -331,6 +373,8 @@ #define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ #define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ #define X86_FEATURE_CLDEMOTE (16*32+25) /* CLDEMOTE instruction */ +#define X86_FEATURE_MOVDIRI (16*32+27) /* MOVDIRI instruction */ +#define X86_FEATURE_MOVDIR64B (16*32+28) /* MOVDIR64B instruction */ /* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */ #define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */ @@ -340,6 +384,9 @@ /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ +#define X86_FEATURE_SRBDS_CTRL (18*32+ 9) /* "" SRBDS mitigation MSR available */ +#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */ +#define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */ #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ @@ -377,5 +424,17 @@ #define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */ #define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */ #define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */ +#define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */ +#define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */ +#define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */ +#define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */ +#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */ +#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */ +#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */ +#define X86_BUG_RETBLEED X86_BUG(26) /* CPU is affected by RETBleed */ +#define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Pre dictions */ +#define X86_BUG_GDS X86_BUG(29) /* CPU is affected by Gather Data Sampling */ +#define X86_BUG_SMT_RSB X86_BUG(30) /* CPU is vulnerable to Cross-Thread Return Address Predictions */ +#define X86_BUG_DIV0 X86_BUG(31) /* AMD DIV0 speculation bug */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/include/asm/cpuidle_haltpoll.h b/arch/x86/include/asm/cpuidle_haltpoll.h new file mode 100644 index 0000000000000000000000000000000000000000..c8b39c6716ff1798ab3e1391722e9b4f99aab2aa --- /dev/null +++ b/arch/x86/include/asm/cpuidle_haltpoll.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ARCH_HALTPOLL_H +#define _ARCH_HALTPOLL_H + +void arch_haltpoll_enable(unsigned int cpu); +void arch_haltpoll_disable(unsigned int cpu); + +#endif diff --git a/arch/x86/include/asm/crash.h b/arch/x86/include/asm/crash.h index a7adb2bfbf0b819b6c4c6c8d8ff97e2d28519a17..6b8ad6fa3979a12af9c84c4516caeba977a1d8b8 100644 --- a/arch/x86/include/asm/crash.h +++ b/arch/x86/include/asm/crash.h @@ -2,6 +2,8 @@ #ifndef _ASM_X86_CRASH_H #define _ASM_X86_CRASH_H +struct kimage; + int crash_load_segments(struct kimage *image); int crash_copy_backup_region(struct kimage *image); int crash_setup_memmap_entries(struct kimage *image, diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h index 33833d1909afda4b31f266e2c25ce6226abb9e19..9d9da34874251245af1e28eb29d29dcb7a5a6267 100644 --- a/arch/x86/include/asm/disabled-features.h +++ b/arch/x86/include/asm/disabled-features.h @@ -16,7 +16,7 @@ # define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31)) #endif -#ifdef CONFIG_X86_INTEL_UMIP +#ifdef CONFIG_X86_UMIP # define DISABLE_UMIP 0 #else # define DISABLE_UMIP (1<<(X86_FEATURE_UMIP & 31)) diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index cec5fae23eb330d00c4e16f6d5ba22d0af29c414..00aa9de5abbb19ce4184523ba4b3d3a1d3467022 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -82,8 +82,7 @@ struct efi_scratch { #define arch_efi_call_virt_setup() \ ({ \ efi_sync_low_kernel_mappings(); \ - preempt_disable(); \ - __kernel_fpu_begin(); \ + kernel_fpu_begin(); \ firmware_restrict_branch_speculation_start(); \ \ if (!efi_enabled(EFI_OLD_MEMMAP)) \ @@ -99,8 +98,7 @@ struct efi_scratch { efi_switch_mm(efi_scratch.prev_mm); \ \ firmware_restrict_branch_speculation_end(); \ - __kernel_fpu_end(); \ - preempt_enable(); \ + kernel_fpu_end(); \ }) extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size, @@ -125,7 +123,6 @@ extern void __init efi_set_executable(efi_memory_desc_t *md, bool executable); extern int __init efi_memblock_x86_reserve_range(void); extern pgd_t * __init efi_call_phys_prolog(void); extern void __init efi_call_phys_epilog(pgd_t *save_pgd); -extern void __init efi_print_memmap(void); extern void __init efi_memory_uc(u64 addr, unsigned long size); extern void __init efi_map_region(efi_memory_desc_t *md); extern void __init efi_map_region_fixed(efi_memory_desc_t *md); diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index 6390bd8c141b44cd988dd21bc4b5b3efbbf1688b..5e12b2319d7a52119c3cd9860a63af730d5b6ce9 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h @@ -159,7 +159,7 @@ extern pte_t *kmap_pte; extern pte_t *pkmap_page_table; void __native_set_fixmap(enum fixed_addresses idx, pte_t pte); -void native_set_fixmap(enum fixed_addresses idx, +void native_set_fixmap(unsigned /* enum fixed_addresses */ idx, phys_addr_t phys, pgprot_t flags); #ifndef CONFIG_PARAVIRT diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h index a9caac9d4a729d8503828cf68813936c9a74bab5..b56d504af6545b95726f623886de5fb8ba2b006b 100644 --- a/arch/x86/include/asm/fpu/api.h +++ b/arch/x86/include/asm/fpu/api.h @@ -12,17 +12,12 @@ #define _ASM_X86_FPU_API_H /* - * Careful: __kernel_fpu_begin/end() must be called with preempt disabled - * and they don't touch the preempt state on their own. - * If you enable preemption after __kernel_fpu_begin(), preempt notifier - * should call the __kernel_fpu_end() to prevent the kernel/user FPU - * state from getting corrupted. KVM for example uses this model. - * - * All other cases use kernel_fpu_begin/end() which disable preemption - * during kernel FPU usage. + * Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It + * disables preemption so be careful if you intend to use it for long periods + * of time. + * If you intend to use the FPU in softirq you need to check first with + * irq_fpu_usable() if it is possible. */ -extern void __kernel_fpu_begin(void); -extern void __kernel_fpu_end(void); extern void kernel_fpu_begin(void); extern void kernel_fpu_end(void); extern bool irq_fpu_usable(void); diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index 69dcdf195b6112b691616e2512f8a4ecca4796a1..3379a97f16642ac9080376de2cc20e9e9bf07115 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h @@ -106,6 +106,9 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu); #define user_insn(insn, output, input...) \ ({ \ int err; \ + \ + might_fault(); \ + \ asm volatile(ASM_STAC "\n" \ "1:" #insn "\n\t" \ "2: " ASM_CLAC "\n" \ @@ -211,6 +214,14 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu) } } +static inline void fxsave(struct fxregs_state *fx) +{ + if (IS_ENABLED(CONFIG_X86_32)) + asm volatile( "fxsave %[fx]" : [fx] "=m" (*fx)); + else + asm volatile("fxsaveq %[fx]" : [fx] "=m" (*fx)); +} + /* These macros all use (%edi)/(%rdi) as the single memory argument. */ #define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27" #define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37" @@ -275,28 +286,6 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu) : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ : "memory") -/* - * This function is called only during boot time when x86 caps are not set - * up and alternative can not be used yet. - */ -static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate) -{ - u64 mask = -1; - u32 lmask = mask; - u32 hmask = mask >> 32; - int err; - - WARN_ON(system_state != SYSTEM_BOOTING); - - if (static_cpu_has(X86_FEATURE_XSAVES)) - XSTATE_OP(XSAVES, xstate, lmask, hmask, err); - else - XSTATE_OP(XSAVE, xstate, lmask, hmask, err); - - /* We should never fault when copying to a kernel buffer: */ - WARN_ON_FPU(err); -} - /* * This function is called only during boot time when x86 caps are not set * up and alternative can not be used yet. diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h index de4d68852d3afcfcddd036c5018801ec5aaacacf..13c83fe97988b6ab9af6cdcca17668b346a33abe 100644 --- a/arch/x86/include/asm/futex.h +++ b/arch/x86/include/asm/futex.h @@ -20,7 +20,7 @@ "3:\tmov\t%3, %1\n" \ "\tjmp\t2b\n" \ "\t.previous\n" \ - _ASM_EXTABLE(1b, 3b) \ + _ASM_EXTABLE_UA(1b, 3b) \ : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \ : "i" (-EFAULT), "0" (oparg), "1" (0)) @@ -36,8 +36,8 @@ "4:\tmov\t%5, %1\n" \ "\tjmp\t3b\n" \ "\t.previous\n" \ - _ASM_EXTABLE(1b, 4b) \ - _ASM_EXTABLE(2b, 4b) \ + _ASM_EXTABLE_UA(1b, 4b) \ + _ASM_EXTABLE_UA(2b, 4b) \ : "=&a" (oldval), "=&r" (ret), \ "+m" (*uaddr), "=&r" (tem) \ : "r" (oparg), "i" (-EFAULT), "1" (0)) diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 32e666e1231e77f128252b2f9354f708081cafdb..cbd97e22d2f3197053f785dd45f59e08448e7d6a 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h @@ -150,8 +150,11 @@ extern char irq_entries_start[]; #define trace_irq_entries_start irq_entries_start #endif +extern char spurious_entries_start[]; + #define VECTOR_UNUSED NULL -#define VECTOR_RETRIGGERED ((void *)~0UL) +#define VECTOR_SHUTDOWN ((void *)~0UL) +#define VECTOR_RETRIGGERED ((void *)~1UL) typedef struct irq_desc* vector_irq_t[NR_VECTORS]; DECLARE_PER_CPU(vector_irq_t, vector_irq); diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h index c2c01f84df75f1f9b35a3c898686a82973026d88..3e0e18d376d2c41a9f6a643eed45d1c00963025c 100644 --- a/arch/x86/include/asm/insn.h +++ b/arch/x86/include/asm/insn.h @@ -208,6 +208,21 @@ static inline int insn_offset_immediate(struct insn *insn) return insn_offset_displacement(insn) + insn->displacement.nbytes; } +/** + * for_each_insn_prefix() -- Iterate prefixes in the instruction + * @insn: Pointer to struct insn. + * @idx: Index storage. + * @prefix: Prefix byte. + * + * Iterate prefix bytes of given @insn. Each prefix byte is stored in @prefix + * and the index is stored in @idx (note that this @idx is just for a cursor, + * do not change it.) + * Since prefixes.nbytes can be bigger than 4 if some prefixes + * are repeated, it cannot be used for looping over the prefixes. + */ +#define for_each_insn_prefix(insn, idx, prefix) \ + for (idx = 0; idx < ARRAY_SIZE(insn->prefixes.bytes) && (prefix = insn->prefixes.bytes[idx]) != 0; idx++) + #define POP_SS_OPCODE 0x1f #define MOV_SREG_OPCODE 0x8e diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h index 7ed08a7c3398dc087a580abe82dd31761a8478f0..ccf07426a84dfa2315512fc35f56bd9ac8655a38 100644 --- a/arch/x86/include/asm/intel-family.h +++ b/arch/x86/include/asm/intel-family.h @@ -6,16 +6,16 @@ * "Big Core" Processors (Branded as Core, Xeon, etc...) * * The "_X" parts are generally the EP and EX Xeons, or the - * "Extreme" ones, like Broadwell-E. - * - * Things ending in "2" are usually because we have no better - * name for them. There's no processor called "SILVERMONT2". + * "Extreme" ones, like Broadwell-E, or Atom microserver. * * While adding a new CPUID for a new microarchitecture, add a new * group to keep logically sorted out in chronological order. Within * that group keep the CPUID for the variants sorted by model number. */ +/* Wildcard match for FAM6 so X86_MATCH_INTEL_FAM6_MODEL(ANY) works */ +#define INTEL_FAM6_ANY X86_MODEL_ANY + #define INTEL_FAM6_CORE_YONAH 0x0E #define INTEL_FAM6_CORE2_MEROM 0x0F @@ -55,27 +55,63 @@ #define INTEL_FAM6_CANNONLAKE_MOBILE 0x66 +#define INTEL_FAM6_ICELAKE_X 0x6A +#define INTEL_FAM6_ICELAKE_XEON_D 0x6C +#define INTEL_FAM6_ICELAKE_DESKTOP 0x7D +#define INTEL_FAM6_ICELAKE_MOBILE 0x7E +#define INTEL_FAM6_ICELAKE_NNPI 0x9D + +#define INTEL_FAM6_TIGERLAKE_L 0x8C +#define INTEL_FAM6_TIGERLAKE 0x8D + +#define INTEL_FAM6_COMETLAKE 0xA5 +#define INTEL_FAM6_COMETLAKE_L 0xA6 + +#define INTEL_FAM6_ROCKETLAKE 0xA7 + +/* Hybrid Core/Atom Processors */ + +#define INTEL_FAM6_LAKEFIELD 0x8A +#define INTEL_FAM6_ALDERLAKE 0x97 +#define INTEL_FAM6_ALDERLAKE_L 0x9A +#define INTEL_FAM6_ALDERLAKE_N 0xBE + +#define INTEL_FAM6_RAPTORLAKE 0xB7 +#define INTEL_FAM6_RAPTORLAKE_P 0xBA +#define INTEL_FAM6_RAPTORLAKE_S 0xBF + /* "Small Core" Processors (Atom) */ -#define INTEL_FAM6_ATOM_PINEVIEW 0x1C -#define INTEL_FAM6_ATOM_LINCROFT 0x26 -#define INTEL_FAM6_ATOM_PENWELL 0x27 -#define INTEL_FAM6_ATOM_CLOVERVIEW 0x35 -#define INTEL_FAM6_ATOM_CEDARVIEW 0x36 -#define INTEL_FAM6_ATOM_SILVERMONT1 0x37 /* BayTrail/BYT / Valleyview */ -#define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */ -#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */ -#define INTEL_FAM6_ATOM_MERRIFIELD 0x4A /* Tangier */ -#define INTEL_FAM6_ATOM_MOOREFIELD 0x5A /* Anniedale */ -#define INTEL_FAM6_ATOM_GOLDMONT 0x5C -#define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */ -#define INTEL_FAM6_ATOM_GEMINI_LAKE 0x7A +#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */ +#define INTEL_FAM6_ATOM_BONNELL_MID 0x26 /* Silverthorne, Lincroft */ + +#define INTEL_FAM6_ATOM_SALTWELL 0x36 /* Cedarview */ +#define INTEL_FAM6_ATOM_SALTWELL_MID 0x27 /* Penwell */ +#define INTEL_FAM6_ATOM_SALTWELL_TABLET 0x35 /* Cloverview */ + +#define INTEL_FAM6_ATOM_SILVERMONT 0x37 /* Bay Trail, Valleyview */ +#define INTEL_FAM6_ATOM_SILVERMONT_X 0x4D /* Avaton, Rangely */ +#define INTEL_FAM6_ATOM_SILVERMONT_MID 0x4A /* Merriefield */ + +#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* Cherry Trail, Braswell */ +#define INTEL_FAM6_ATOM_AIRMONT_MID 0x5A /* Moorefield */ + +#define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */ +#define INTEL_FAM6_ATOM_GOLDMONT_X 0x5F /* Denverton */ +#define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */ + +#define INTEL_FAM6_ATOM_TREMONT_X 0x86 /* Jacobsville */ +#define INTEL_FAM6_ATOM_TREMONT 0x96 /* Elkhart Lake */ +#define INTEL_FAM6_ATOM_TREMONT_L 0x9C /* Jasper Lake */ /* Xeon Phi */ #define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */ #define INTEL_FAM6_XEON_PHI_KNM 0x85 /* Knights Mill */ +/* Family 5 */ +#define INTEL_FAM5_QUARK_X1000 0x09 /* Quark X1000 SoC */ + /* Useful macros */ #define INTEL_CPU_FAM_ANY(_family, _model, _driver_data) \ { \ diff --git a/arch/x86/include/asm/intel_ds.h b/arch/x86/include/asm/intel_ds.h index ae26df1c27896d20d25ba18555d14625b905077a..8380c3ddd4b2ee29ec5a9ca7a117b0f1501bc6f0 100644 --- a/arch/x86/include/asm/intel_ds.h +++ b/arch/x86/include/asm/intel_ds.h @@ -8,7 +8,7 @@ /* The maximal number of PEBS events: */ #define MAX_PEBS_EVENTS 8 -#define MAX_FIXED_PEBS_EVENTS 3 +#define MAX_FIXED_PEBS_EVENTS 4 /* * A debug store configuration. diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index 15450a675031d3562b4a9da3d3b15816c11003fb..c99c66b41e53261e14c9bb3ab081e181d326d689 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h @@ -6,6 +6,8 @@ #ifndef __ASSEMBLY__ +#include + /* Provide __cpuidle; we can't safely include */ #define __cpuidle __attribute__((__section__(".cpuidle.text"))) @@ -54,11 +56,13 @@ static inline void native_irq_enable(void) static inline __cpuidle void native_safe_halt(void) { + mds_idle_clear_cpu_buffers(); asm volatile("sti; hlt": : :"memory"); } static inline __cpuidle void native_halt(void) { + mds_idle_clear_cpu_buffers(); asm volatile("hlt": : :"memory"); } diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h index 8c0de4282659b52079dff43b68affcbed665109f..7010e1c594c435889dc3d623e1e082dbd5fb4bc4 100644 --- a/arch/x86/include/asm/jump_label.h +++ b/arch/x86/include/asm/jump_label.h @@ -2,19 +2,6 @@ #ifndef _ASM_X86_JUMP_LABEL_H #define _ASM_X86_JUMP_LABEL_H -#ifndef HAVE_JUMP_LABEL -/* - * For better or for worse, if jump labels (the gcc extension) are missing, - * then the entire static branch patching infrastructure is compiled out. - * If that happens, the code in here will malfunction. Raise a compiler - * error instead. - * - * In theory, jump labels and the static branch patching infrastructure - * could be decoupled to fix this. - */ -#error asm/jump_label.h included on a non-jump-label kernel -#endif - #define JUMP_LABEL_NOP_SIZE 5 #ifdef CONFIG_X86_64 diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h index f327236f0fa7108454296a2af00ccc8e99fe571c..b3a2b4309a31e4ab982b0a9e841c401e8fccb1c7 100644 --- a/arch/x86/include/asm/kexec.h +++ b/arch/x86/include/asm/kexec.h @@ -18,6 +18,9 @@ # define KEXEC_CONTROL_CODE_MAX_SIZE 2048 +/* 16M alignment for crash kernel regions */ +#define CRASH_ALIGN (16 << 20) + #ifndef __ASSEMBLY__ #include @@ -67,7 +70,7 @@ struct kimage; /* Memory to backup during crash kdump */ #define KEXEC_BACKUP_SRC_START (0UL) -#define KEXEC_BACKUP_SRC_END (640 * 1024UL) /* 640K */ +#define KEXEC_BACKUP_SRC_END (640 * 1024UL - 1) /* 640K */ /* * CPU does not save ss and sp on stack if execution is already diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index 0f82cd91cd3c4d630993d573d7c6e9970d476414..93c4bf598fb06c7e53865141dd3e7faa514194ff 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -364,6 +364,10 @@ struct x86_emulate_ctxt { #define X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx 0x21726574 #define X86EMUL_CPUID_VENDOR_AMDisbetterI_edx 0x74656273 +#define X86EMUL_CPUID_VENDOR_HygonGenuine_ebx 0x6f677948 +#define X86EMUL_CPUID_VENDOR_HygonGenuine_ecx 0x656e6975 +#define X86EMUL_CPUID_VENDOR_HygonGenuine_edx 0x6e65476e + #define X86EMUL_CPUID_VENDOR_GenuineIntel_ebx 0x756e6547 #define X86EMUL_CPUID_VENDOR_GenuineIntel_ecx 0x6c65746e #define X86EMUL_CPUID_VENDOR_GenuineIntel_edx 0x49656e69 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 09b2e3e2cf1bec5f7b745590b187521c8576eacc..3eae4fd19060e96350bad529ef3239e7efe64c3e 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -49,6 +49,9 @@ #define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS +#define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \ + KVM_DIRTY_LOG_INITIALLY_SET) + /* x86-specific vcpu->requests bit members */ #define KVM_REQ_MIGRATE_TIMER KVM_ARCH_REQ(0) #define KVM_REQ_REPORT_TPR_ACCESS KVM_ARCH_REQ(1) @@ -117,7 +120,7 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) } #define KVM_PERMILLE_MMU_PAGES 20 -#define KVM_MIN_ALLOC_MMU_PAGES 64 +#define KVM_MIN_ALLOC_MMU_PAGES 64UL #define KVM_MMU_HASH_SHIFT 12 #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT) #define KVM_MIN_FREE_MMU_PAGES 5 @@ -177,6 +180,7 @@ enum { #define DR6_BD (1 << 13) #define DR6_BS (1 << 14) +#define DR6_BT (1 << 15) #define DR6_RTM (1 << 16) #define DR6_FIXED_1 0xfffe0ff0 #define DR6_INIT 0xffff0ff0 @@ -210,13 +214,6 @@ enum { PFERR_WRITE_MASK | \ PFERR_PRESENT_MASK) -/* - * The mask used to denote special SPTEs, which can be either MMIO SPTEs or - * Access Tracking SPTEs. We use bit 62 instead of bit 63 to avoid conflicting - * with the SVE bit in EPT PTEs. - */ -#define SPTE_SPECIAL_MASK (1ULL << 62) - /* apic attention bits */ #define KVM_APIC_CHECK_VAPIC 0 /* @@ -280,6 +277,7 @@ struct kvm_rmap_head { struct kvm_mmu_page { struct list_head link; struct hlist_node hash_link; + struct list_head lpage_disallowed_link; /* * The following two entries are used to key the shadow page in the @@ -292,6 +290,7 @@ struct kvm_mmu_page { /* hold the gfn of each spte inside spt */ gfn_t *gfns; bool unsync; + bool lpage_disallowed; /* Can't be replaced by an equiv large page */ int root_count; /* Currently serving as active root */ unsigned int unsync_children; struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */ @@ -314,6 +313,7 @@ struct kvm_mmu_page { }; struct kvm_pio_request { + unsigned long linear_rip; unsigned long count; int in; int port; @@ -523,9 +523,11 @@ struct kvm_vcpu_arch { u64 ia32_misc_enable_msr; u64 smbase; u64 smi_count; + bool at_instruction_boundary; bool tpr_access_reporting; u64 ia32_xss; u64 microcode_version; + u64 arch_capabilities; /* * Paging state of the vcpu @@ -663,7 +665,7 @@ struct kvm_vcpu_arch { /* Cache MMIO info */ u64 mmio_gva; - unsigned access; + unsigned mmio_access; gfn_t mmio_gfn; u64 mmio_gen; @@ -702,6 +704,8 @@ struct kvm_vcpu_arch { struct gfn_to_hva_cache data; } pv_eoi; + u64 msr_kvm_poll_control; + /* * Indicate whether the access faults on its page table in guest * which is set when fix page fault and used to detect unhandeable @@ -781,6 +785,9 @@ struct kvm_hv { u64 hv_reenlightenment_control; u64 hv_tsc_emulation_control; u64 hv_tsc_emulation_status; + + /* How many vCPUs have VP index != vCPU index */ + atomic_t num_mismatched_vp_indexes; }; enum kvm_irqchip_mode { @@ -790,9 +797,9 @@ enum kvm_irqchip_mode { }; struct kvm_arch { - unsigned int n_used_mmu_pages; - unsigned int n_requested_mmu_pages; - unsigned int n_max_mmu_pages; + unsigned long n_used_mmu_pages; + unsigned long n_requested_mmu_pages; + unsigned long n_max_mmu_pages; unsigned int indirect_shadow_pages; unsigned long mmu_valid_gen; struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; @@ -801,6 +808,7 @@ struct kvm_arch { */ struct list_head active_mmu_pages; struct list_head zapped_obsolete_pages; + struct list_head lpage_disallowed_mmu_pages; struct kvm_page_track_notifier_node mmu_sp_tracker; struct kvm_page_track_notifier_head track_notifier_head; @@ -871,6 +879,8 @@ struct kvm_arch { bool x2apic_broadcast_quirk_disabled; bool guest_can_read_msr_platform_info; + + struct task_struct *nx_lpage_recovery_thread; }; struct kvm_vm_stat { @@ -884,10 +894,12 @@ struct kvm_vm_stat { ulong mmu_unsync; ulong remote_tlb_flush; ulong lpages; + ulong nx_lpage_splits; ulong max_mmu_page_hash_collisions; }; struct kvm_vcpu_stat { + u64 pid; u64 pf_fixed; u64 pf_guest; u64 tlb_flush; @@ -915,6 +927,20 @@ struct kvm_vcpu_stat { u64 irq_injections; u64 nmi_injections; u64 req_event; + u64 cr_exits; + u64 msr_rd_exits; + u64 msr_wr_exits; + u64 apic_wr_exits; + u64 ept_vio_exits; + u64 ept_mis_exits; + u64 pause_exits; + u64 steal; + u64 st_max; + u64 utime; + u64 stime; + u64 gtime; + u64 preemption_reported; + u64 preemption_other; }; struct x86_instruction_info; @@ -1045,7 +1071,8 @@ struct kvm_x86_ops { bool (*has_wbinvd_exit)(void); u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu); - void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); + /* Returns actual tsc_offset set in active VMCS */ + u64 (*write_l1_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2); @@ -1056,6 +1083,7 @@ struct kvm_x86_ops { bool (*mpx_supported)(void); bool (*xsaves_supported)(void); bool (*umip_emulated)(void); + bool (*pku_supported)(void); int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr); void (*request_immediate_exit)(struct kvm_vcpu *vcpu); @@ -1109,6 +1137,7 @@ struct kvm_x86_ops { int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq, bool set); void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu); + bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu); int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc); void (*cancel_hv_timer)(struct kvm_vcpu *vcpu); @@ -1179,7 +1208,8 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, void kvm_mmu_reset_context(struct kvm_vcpu *vcpu); void kvm_mmu_slot_remove_write_access(struct kvm *kvm, - struct kvm_memory_slot *memslot); + struct kvm_memory_slot *memslot, + int start_level); void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, const struct kvm_memory_slot *memslot); void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, @@ -1192,9 +1222,9 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask); void kvm_mmu_zap_all(struct kvm *kvm); -void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots); -unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); -void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); +void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen); +unsigned long kvm_mmu_calculate_mmu_pages(struct kvm *kvm); +void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages); int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3); bool pdptrs_changed(struct kvm_vcpu *vcpu); @@ -1423,25 +1453,29 @@ enum { #define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0) #define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm) +asmlinkage void __noreturn kvm_spurious_fault(void); + /* * Hardware virtualization extension instructions may fault if a * reboot turns off virtualization while processes are running. - * Trap the fault and ignore the instruction if that happens. + * Usually after catching the fault we just panic; during reboot + * instead the instruction is ignored. */ -asmlinkage void kvm_spurious_fault(void); - -#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \ - "666: " insn "\n\t" \ - "668: \n\t" \ - ".pushsection .fixup, \"ax\" \n" \ - "667: \n\t" \ - cleanup_insn "\n\t" \ - "cmpb $0, kvm_rebooting \n\t" \ - "jne 668b \n\t" \ - __ASM_SIZE(push) " $666b \n\t" \ - "call kvm_spurious_fault \n\t" \ - ".popsection \n\t" \ - _ASM_EXTABLE(666b, 667b) +#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \ + "666: \n\t" \ + insn "\n\t" \ + "jmp 668f \n\t" \ + "667: \n\t" \ + "call kvm_spurious_fault \n\t" \ + "668: \n\t" \ + ".pushsection .fixup, \"ax\" \n\t" \ + "700: \n\t" \ + cleanup_insn "\n\t" \ + "cmpb $0, kvm_rebooting\n\t" \ + "je 667b \n\t" \ + "jmp 668b \n\t" \ + ".popsection \n\t" \ + _ASM_EXTABLE(666b, 700b) #define __kvm_handle_fault_on_reboot(insn) \ ____kvm_handle_fault_on_reboot(insn, "") diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h index 172f9749dbb24c405b6cb05a224663b634f4456a..5986bd4aacd621482e1acc874fe63ea1c67d9cf0 100644 --- a/arch/x86/include/asm/kvm_page_track.h +++ b/arch/x86/include/asm/kvm_page_track.h @@ -46,7 +46,7 @@ struct kvm_page_track_notifier_node { struct kvm_page_track_notifier_node *node); }; -void kvm_page_track_init(struct kvm *kvm); +int kvm_page_track_init(struct kvm *kvm); void kvm_page_track_cleanup(struct kvm *kvm); void kvm_page_track_free_memslot(struct kvm_memory_slot *free, diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h index 4c723632c036e4c53f320e5caf39dbf472a41486..7caaff8c79adacee1dd4bd776f5bf04e034670dc 100644 --- a/arch/x86/include/asm/kvm_para.h +++ b/arch/x86/include/asm/kvm_para.h @@ -6,8 +6,6 @@ #include #include -extern void kvmclock_init(void); - #ifdef CONFIG_KVM_GUEST bool kvm_check_and_clear_guest_paused(void); #else @@ -85,6 +83,8 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, } #ifdef CONFIG_KVM_GUEST +void kvmclock_init(void); +void kvmclock_disable(void); bool kvm_para_available(void); unsigned int kvm_arch_para_features(void); unsigned int kvm_arch_para_hints(void); diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h index ed80003ce3e22255af9f4e8f5153149e09376ead..8f8533dd36fc15151581d24491b406011f179ae8 100644 --- a/arch/x86/include/asm/livepatch.h +++ b/arch/x86/include/asm/livepatch.h @@ -24,17 +24,48 @@ #include #include +#ifdef CONFIG_LIVEPATCH static inline int klp_check_compiler_support(void) { -#ifndef CC_USING_FENTRY +#if defined(CONFIG_LIVEPATCH_FTRACE) && !defined(CC_USING_FENTRY) return 1; #endif return 0; } +#ifdef CONFIG_LIVEPATCH_FTRACE static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip) { regs->ip = ip; } +static inline unsigned long klp_arch_stub_ip(unsigned long addr) +{ + return addr; +} +#else /* CONFIG_LIVEPATCH_WO_FTRACE */ +#define klp_smp_isb() + +static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip) +{ + BUG(); +} + +static inline unsigned long klp_arch_stub_ip(unsigned long addr) +{ + BUG(); + return 0; +} + +struct klp_patch; +struct klp_func; +int arch_klp_patch_func(struct klp_func *func); +void arch_klp_unpatch_func(struct klp_func *func); +int klp_check_calltrace(struct klp_patch *patch, int enable); +#endif + +#else +#error Live patching support is disabled; check CONFIG_LIVEPATCH +#endif + #endif /* _ASM_X86_LIVEPATCH_H */ diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h index c91083c598457140925d1f0928a6c8fa4811004a..349a47acaa4a3524e25f056152cd701bd2c26224 100644 --- a/arch/x86/include/asm/local.h +++ b/arch/x86/include/asm/local.h @@ -53,7 +53,7 @@ static inline void local_sub(long i, local_t *l) */ static inline bool local_sub_and_test(long i, local_t *l) { - GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", e); + return GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, e, "er", i); } /** @@ -66,7 +66,7 @@ static inline bool local_sub_and_test(long i, local_t *l) */ static inline bool local_dec_and_test(local_t *l) { - GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", e); + return GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, e); } /** @@ -79,7 +79,7 @@ static inline bool local_dec_and_test(local_t *l) */ static inline bool local_inc_and_test(local_t *l) { - GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", e); + return GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, e); } /** @@ -93,7 +93,7 @@ static inline bool local_inc_and_test(local_t *l) */ static inline bool local_add_negative(long i, local_t *l) { - GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", s); + return GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, s, "er", i); } /** diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 3a17107594c88a83f013ecc1123c10d76c6d30c2..86cf068338c6c54b4f60aa7671c9b4c797da3395 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -10,41 +10,44 @@ /* MCG_CAP register defines */ #define MCG_BANKCNT_MASK 0xff /* Number of Banks */ -#define MCG_CTL_P (1ULL<<8) /* MCG_CTL register available */ -#define MCG_EXT_P (1ULL<<9) /* Extended registers available */ -#define MCG_CMCI_P (1ULL<<10) /* CMCI supported */ +#define MCG_CTL_P BIT_ULL(8) /* MCG_CTL register available */ +#define MCG_EXT_P BIT_ULL(9) /* Extended registers available */ +#define MCG_CMCI_P BIT_ULL(10) /* CMCI supported */ #define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */ #define MCG_EXT_CNT_SHIFT 16 #define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT) -#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */ -#define MCG_ELOG_P (1ULL<<26) /* Extended error log supported */ -#define MCG_LMCE_P (1ULL<<27) /* Local machine check supported */ +#define MCG_SER_P BIT_ULL(24) /* MCA recovery/new status bits */ +#define MCG_ELOG_P BIT_ULL(26) /* Extended error log supported */ +#define MCG_LMCE_P BIT_ULL(27) /* Local machine check supported */ /* MCG_STATUS register defines */ -#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */ -#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */ -#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */ -#define MCG_STATUS_LMCES (1ULL<<3) /* LMCE signaled */ +#define MCG_STATUS_RIPV BIT_ULL(0) /* restart ip valid */ +#define MCG_STATUS_EIPV BIT_ULL(1) /* ip points to correct instruction */ +#define MCG_STATUS_MCIP BIT_ULL(2) /* machine check in progress */ +#define MCG_STATUS_LMCES BIT_ULL(3) /* LMCE signaled */ /* MCG_EXT_CTL register defines */ -#define MCG_EXT_CTL_LMCE_EN (1ULL<<0) /* Enable LMCE */ +#define MCG_EXT_CTL_LMCE_EN BIT_ULL(0) /* Enable LMCE */ /* MCi_STATUS register defines */ -#define MCI_STATUS_VAL (1ULL<<63) /* valid error */ -#define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */ -#define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */ -#define MCI_STATUS_EN (1ULL<<60) /* error enabled */ -#define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */ -#define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */ -#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */ -#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */ -#define MCI_STATUS_AR (1ULL<<55) /* Action required */ +#define MCI_STATUS_VAL BIT_ULL(63) /* valid error */ +#define MCI_STATUS_OVER BIT_ULL(62) /* previous errors lost */ +#define MCI_STATUS_UC BIT_ULL(61) /* uncorrected error */ +#define MCI_STATUS_EN BIT_ULL(60) /* error enabled */ +#define MCI_STATUS_MISCV BIT_ULL(59) /* misc error reg. valid */ +#define MCI_STATUS_ADDRV BIT_ULL(58) /* addr reg. valid */ +#define MCI_STATUS_PCC BIT_ULL(57) /* processor context corrupt */ +#define MCI_STATUS_S BIT_ULL(56) /* Signaled machine check */ +#define MCI_STATUS_AR BIT_ULL(55) /* Action required */ +#define MCI_STATUS_CEC_SHIFT 38 /* Corrected Error Count */ +#define MCI_STATUS_CEC_MASK GENMASK_ULL(52,38) +#define MCI_STATUS_CEC(c) (((c) & MCI_STATUS_CEC_MASK) >> MCI_STATUS_CEC_SHIFT) /* AMD-specific bits */ -#define MCI_STATUS_TCC (1ULL<<55) /* Task context corrupt */ -#define MCI_STATUS_SYNDV (1ULL<<53) /* synd reg. valid */ -#define MCI_STATUS_DEFERRED (1ULL<<44) /* uncorrected error, deferred exception */ -#define MCI_STATUS_POISON (1ULL<<43) /* access poisonous data */ +#define MCI_STATUS_TCC BIT_ULL(55) /* Task context corrupt */ +#define MCI_STATUS_SYNDV BIT_ULL(53) /* synd reg. valid */ +#define MCI_STATUS_DEFERRED BIT_ULL(44) /* uncorrected error, deferred exception */ +#define MCI_STATUS_POISON BIT_ULL(43) /* access poisonous data */ /* * McaX field if set indicates a given bank supports MCA extensions: @@ -84,7 +87,7 @@ #define MCI_MISC_ADDR_GENERIC 7 /* generic */ /* CTL2 register defines */ -#define MCI_CTL2_CMCI_EN (1ULL << 30) +#define MCI_CTL2_CMCI_EN BIT_ULL(30) #define MCI_CTL2_CMCI_THRESHOLD_MASK 0x7fffULL #define MCJ_CTX_MASK 3 @@ -183,12 +186,8 @@ void mce_setup(struct mce *m); void mce_log(struct mce *m); DECLARE_PER_CPU(struct device *, mce_device); -/* - * Maximum banks number. - * This is the limit of the current register layout on - * Intel CPUs. - */ -#define MAX_NR_BANKS 32 +/* Maximum number of MCA banks per CPU. */ +#define MAX_NR_BANKS 64 #ifdef CONFIG_X86_MCE_INTEL void mce_intel_feature_init(struct cpuinfo_x86 *c); @@ -214,8 +213,12 @@ static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { } static inline int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) { return -EINVAL; }; #endif +static inline void mce_hygon_feature_init(struct cpuinfo_x86 *c) { return mce_amd_feature_init(c); } + int mce_available(struct cpuinfo_x86 *c); bool mce_is_memory_error(struct mce *m); +bool mce_is_correctable(struct mce *m); +int mce_usable_address(struct mce *m); DECLARE_PER_CPU(unsigned, mce_exception_count); DECLARE_PER_CPU(unsigned, mce_poll_count); @@ -292,6 +295,7 @@ extern void apei_mce_report_mem_error(int corrected, /* These may be used by multiple smca_hwid_mcatypes */ enum smca_bank_types { SMCA_LS = 0, /* Load Store */ + SMCA_LS_V2, /* Load Store */ SMCA_IF, /* Instruction Fetch */ SMCA_L2_CACHE, /* L2 Cache */ SMCA_DE, /* Decoder Unit */ @@ -300,11 +304,17 @@ enum smca_bank_types { SMCA_FP, /* Floating Point */ SMCA_L3_CACHE, /* L3 Cache */ SMCA_CS, /* Coherent Slave */ + SMCA_CS_V2, /* Coherent Slave */ SMCA_PIE, /* Power, Interrupts, etc. */ SMCA_UMC, /* Unified Memory Controller */ SMCA_PB, /* Parameter Block */ SMCA_PSP, /* Platform Security Processor */ + SMCA_PSP_V2, /* Platform Security Processor */ SMCA_SMU, /* System Management Unit */ + SMCA_SMU_V2, /* System Management Unit */ + SMCA_MP5, /* Microprocessor 5 Unit */ + SMCA_NBIO, /* Northbridge IO Unit */ + SMCA_PCIE, /* PCI Express Unit */ N_SMCA_BANK_TYPES }; diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index 2b7cc5397f80df70f6197cc4257e2f5aedbfcce4..bf4eb9057cda2b20f5bdd6cbd7bb5b29695c3d58 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h @@ -5,6 +5,7 @@ #include #include #include +#include struct ucode_patch { struct list_head plist; @@ -78,6 +79,12 @@ static inline struct microcode_ops * __init init_amd_microcode(void) static inline void __exit exit_amd_microcode(void) {} #endif +#ifdef CONFIG_MICROCODE_HYGON +extern const struct microcode_ops * __init init_hygon_microcode(void); +#else +#define init_hygon_microcode() NULL +#endif + #define MAX_UCODE_COUNT 128 #define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24)) @@ -87,6 +94,9 @@ static inline void __exit exit_amd_microcode(void) {} #define CPUID_AMD1 QCHAR('A', 'u', 't', 'h') #define CPUID_AMD2 QCHAR('e', 'n', 't', 'i') #define CPUID_AMD3 QCHAR('c', 'A', 'M', 'D') +#define CPUID_HYGON1 QCHAR('H', 'y', 'g', 'o') +#define CPUID_HYGON2 QCHAR('n', 'G', 'e', 'n') +#define CPUID_HYGON3 QCHAR('u', 'i', 'n', 'e') #define CPUID_IS(a, b, c, ebx, ecx, edx) \ (!((ebx ^ (a))|(edx ^ (b))|(ecx ^ (c)))) @@ -113,6 +123,9 @@ static inline int x86_cpuid_vendor(void) if (CPUID_IS(CPUID_AMD1, CPUID_AMD2, CPUID_AMD3, ebx, ecx, edx)) return X86_VENDOR_AMD; + if (CPUID_IS(CPUID_HYGON1, CPUID_HYGON2, CPUID_HYGON3, ebx, ecx, edx)) + return X86_VENDOR_HYGON; + return X86_VENDOR_UNKNOWN; } @@ -133,11 +146,13 @@ extern void load_ucode_ap(void); void reload_early_microcode(void); extern bool get_builtin_firmware(struct cpio_data *cd, const char *name); extern bool initrd_gone; +void microcode_bsp_resume(void); #else static inline int __init microcode_init(void) { return 0; }; static inline void __init load_ucode_bsp(void) { } static inline void load_ucode_ap(void) { } static inline void reload_early_microcode(void) { } +static inline void microcode_bsp_resume(void) { } static inline bool get_builtin_firmware(struct cpio_data *cd, const char *name) { return false; } #endif diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h index 209492849566cefc56305cd8e976fbf4a25d7527..b56a1c0d5f60a02b6618a5abf2e6041e3e1d1c38 100644 --- a/arch/x86/include/asm/microcode_amd.h +++ b/arch/x86/include/asm/microcode_amd.h @@ -41,18 +41,20 @@ struct microcode_amd { unsigned int mpb[0]; }; -#define PATCH_MAX_SIZE PAGE_SIZE +#define PATCH_MAX_SIZE (3 * PAGE_SIZE) #ifdef CONFIG_MICROCODE_AMD extern void __init load_ucode_amd_bsp(unsigned int family); extern void load_ucode_amd_ap(unsigned int family); extern int __init save_microcode_in_initrd_amd(unsigned int family); void reload_ucode_amd(void); +extern void amd_check_microcode(void); #else static inline void __init load_ucode_amd_bsp(unsigned int family) {} static inline void load_ucode_amd_ap(unsigned int family) {} static inline int __init save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; } -void reload_ucode_amd(void) {} +static inline void reload_ucode_amd(void) {} +static inline void amd_check_microcode(void) {} #endif #endif /* _ASM_X86_MICROCODE_AMD_H */ diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index eeeb9289c764db96099caea715682411d79396c8..2252b63d38b503fdcfce409b7c26396d8269fbfe 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -178,6 +178,10 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next) void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk); +/* + * Init a new mm. Used on mm copies, like at fork() + * and on mm's that are brand-new, like at execve(). + */ static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { @@ -228,8 +232,22 @@ do { \ } while (0) #endif +static inline void arch_dup_pkeys(struct mm_struct *oldmm, + struct mm_struct *mm) +{ +#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS + if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) + return; + + /* Duplicate the oldmm pkey state in mm: */ + mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map; + mm->context.execute_only_pkey = oldmm->context.execute_only_pkey; +#endif +} + static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) { + arch_dup_pkeys(oldmm, mm); paravirt_arch_dup_mmap(oldmm, mm); return ldt_dup_context(oldmm, mm); } diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 4731f0cf97c5c0058ec41cff870cdc5fef0eec67..3948ba5010a2a0a93846d6ab661cbed127e333ec 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -2,6 +2,8 @@ #ifndef _ASM_X86_MSR_INDEX_H #define _ASM_X86_MSR_INDEX_H +#include + /* * CPU model specific register (MSR) numbers. * @@ -40,13 +42,20 @@ /* Intel MSRs. Some also available on other CPUs */ #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */ -#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */ -#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */ +#define SPEC_CTRL_IBRS 1 /* Indirect Branch Restricted Speculation */ +#define SPEC_CTRL_STIBP_SHIFT 1 /* Single Thread Indirect Branch Predictor (STIBP) bit */ +#define SPEC_CTRL_STIBP BIT(SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */ #define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */ -#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */ +#define SPEC_CTRL_SSBD BIT(SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */ +#define SPEC_CTRL_RRSBA_DIS_S_SHIFT 6 /* Disable RRSBA behavior */ +#define SPEC_CTRL_RRSBA_DIS_S BIT(SPEC_CTRL_RRSBA_DIS_S_SHIFT) + +/* A mask for bits which the kernel toggles when controlling mitigations */ +#define SPEC_CTRL_MITIGATIONS_MASK (SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD \ + | SPEC_CTRL_RRSBA_DIS_S) #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ -#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */ +#define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */ #define MSR_PPIN_CTL 0x0000004e #define MSR_PPIN 0x0000004f @@ -68,24 +77,97 @@ #define MSR_MTRRcap 0x000000fe #define MSR_IA32_ARCH_CAPABILITIES 0x0000010a -#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */ -#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */ -#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH (1 << 3) /* Skip L1D flush on vmentry */ -#define ARCH_CAP_SSB_NO (1 << 4) /* - * Not susceptible to Speculative Store Bypass - * attack, so no Speculative Store Bypass - * control required. - */ +#define ARCH_CAP_RDCL_NO BIT(0) /* Not susceptible to Meltdown */ +#define ARCH_CAP_IBRS_ALL BIT(1) /* Enhanced IBRS support */ +#define ARCH_CAP_RSBA BIT(2) /* RET may use alternative branch predictors */ +#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH BIT(3) /* Skip L1D flush on vmentry */ +#define ARCH_CAP_SSB_NO BIT(4) /* + * Not susceptible to Speculative Store Bypass + * attack, so no Speculative Store Bypass + * control required. + */ +#define ARCH_CAP_MDS_NO BIT(5) /* + * Not susceptible to + * Microarchitectural Data + * Sampling (MDS) vulnerabilities. + */ +#define ARCH_CAP_PSCHANGE_MC_NO BIT(6) /* + * The processor is not susceptible to a + * machine check error due to modifying the + * code page size along with either the + * physical address or cache type + * without TLB invalidation. + */ +#define ARCH_CAP_TSX_CTRL_MSR BIT(7) /* MSR for TSX control is available. */ +#define ARCH_CAP_TAA_NO BIT(8) /* + * Not susceptible to + * TSX Async Abort (TAA) vulnerabilities. + */ +#define ARCH_CAP_SBDR_SSDP_NO BIT(13) /* + * Not susceptible to SBDR and SSDP + * variants of Processor MMIO stale data + * vulnerabilities. + */ +#define ARCH_CAP_FBSDP_NO BIT(14) /* + * Not susceptible to FBSDP variant of + * Processor MMIO stale data + * vulnerabilities. + */ +#define ARCH_CAP_PSDP_NO BIT(15) /* + * Not susceptible to PSDP variant of + * Processor MMIO stale data + * vulnerabilities. + */ +#define ARCH_CAP_FB_CLEAR BIT(17) /* + * VERW clears CPU fill buffer + * even on MDS_NO CPUs. + */ +#define ARCH_CAP_FB_CLEAR_CTRL BIT(18) /* + * MSR_IA32_MCU_OPT_CTRL[FB_CLEAR_DIS] + * bit available to control VERW + * behavior. + */ +#define ARCH_CAP_RRSBA BIT(19) /* + * Indicates RET may use predictors + * other than the RSB. With eIBRS + * enabled predictions in kernel mode + * are restricted to targets in + * kernel. + */ +#define ARCH_CAP_PBRSB_NO BIT(24) /* + * Not susceptible to Post-Barrier + * Return Stack Buffer Predictions. + */ +#define ARCH_CAP_GDS_CTRL BIT(25) /* + * CPU is vulnerable to Gather + * Data Sampling (GDS) and + * has controls for mitigation. + */ +#define ARCH_CAP_GDS_NO BIT(26) /* + * CPU is not vulnerable to Gather + * Data Sampling (GDS). + */ #define MSR_IA32_FLUSH_CMD 0x0000010b -#define L1D_FLUSH (1 << 0) /* - * Writeback and invalidate the - * L1 data cache. - */ +#define L1D_FLUSH BIT(0) /* + * Writeback and invalidate the + * L1 data cache. + */ #define MSR_IA32_BBL_CR_CTL 0x00000119 #define MSR_IA32_BBL_CR_CTL3 0x0000011e +#define MSR_IA32_TSX_CTRL 0x00000122 +#define TSX_CTRL_RTM_DISABLE BIT(0) /* Disable RTM feature */ +#define TSX_CTRL_CPUID_CLEAR BIT(1) /* Disable TSX enumeration */ + +/* SRBDS support */ +#define MSR_IA32_MCU_OPT_CTRL 0x00000123 +#define RNGDS_MITG_DIS BIT(0) +#define FB_CLEAR_DIS BIT(3) /* CPU Fill buffer clear disable */ +#define GDS_MITG_DIS BIT(4) /* Disable GDS mitigation */ +#define GDS_MITG_LOCKED BIT(5) /* GDS mitigation locked */ + #define MSR_IA32_SYSENTER_CS 0x00000174 #define MSR_IA32_SYSENTER_ESP 0x00000175 #define MSR_IA32_SYSENTER_EIP 0x00000176 @@ -115,6 +197,7 @@ #define LBR_INFO_CYCLES 0xffff #define MSR_IA32_PEBS_ENABLE 0x000003f1 +#define MSR_PEBS_DATA_CFG 0x000003f2 #define MSR_IA32_DS_AREA 0x00000600 #define MSR_IA32_PERF_CAPABILITIES 0x00000345 #define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6 @@ -326,11 +409,13 @@ #define MSR_AMD64_PATCH_LEVEL 0x0000008b #define MSR_AMD64_TSC_RATIO 0xc0000104 #define MSR_AMD64_NB_CFG 0xc001001f +#define MSR_AMD64_CPUID_FN_1 0xc0011004 #define MSR_AMD64_PATCH_LOADER 0xc0010020 #define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140 #define MSR_AMD64_OSVW_STATUS 0xc0010141 #define MSR_AMD64_LS_CFG 0xc0011020 #define MSR_AMD64_DC_CFG 0xc0011022 +#define MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT 9 #define MSR_AMD64_BU_CFG2 0xc001102a #define MSR_AMD64_IBSFETCHCTL 0xc0011030 #define MSR_AMD64_IBSFETCHLINAD 0xc0011031 @@ -348,6 +433,7 @@ #define MSR_AMD64_IBSOP_REG_MASK ((1UL< #include +#include #define MWAIT_SUBSTATE_MASK 0xf #define MWAIT_CSTATE_MASK 0xf @@ -20,7 +21,7 @@ #define MWAIT_ECX_INTERRUPT_BREAK 0x1 #define MWAITX_ECX_TIMER_ENABLE BIT(1) #define MWAITX_MAX_LOOPS ((u32)-1) -#define MWAITX_DISABLE_CSTATES 0xf +#define MWAITX_DISABLE_CSTATES 0xf0 static inline void __monitor(const void *eax, unsigned long ecx, unsigned long edx) @@ -40,6 +41,8 @@ static inline void __monitorx(const void *eax, unsigned long ecx, static inline void __mwait(unsigned long eax, unsigned long ecx) { + mds_idle_clear_cpu_buffers(); + /* "mwait %eax, %ecx;" */ asm volatile(".byte 0x0f, 0x01, 0xc9;" :: "a" (eax), "c" (ecx)); @@ -74,6 +77,8 @@ static inline void __mwait(unsigned long eax, unsigned long ecx) static inline void __mwaitx(unsigned long eax, unsigned long ebx, unsigned long ecx) { + /* No MDS buffer clear as this is AMD/HYGON only */ + /* "mwaitx %eax, %ebx, %ecx;" */ asm volatile(".byte 0x0f, 0x01, 0xfb;" :: "a" (eax), "b" (ebx), "c" (ecx)); @@ -81,6 +86,8 @@ static inline void __mwaitx(unsigned long eax, unsigned long ebx, static inline void __sti_mwait(unsigned long eax, unsigned long ecx) { + mds_idle_clear_cpu_buffers(); + trace_hardirqs_on(); /* "mwait %eax, %ecx;" */ asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index fd2a8c1b88bc157106c4f9346b8e7bec29fe6ba3..538dae0f9192bb6d20b564e4fa3efa858837cd88 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -3,10 +3,15 @@ #ifndef _ASM_X86_NOSPEC_BRANCH_H_ #define _ASM_X86_NOSPEC_BRANCH_H_ +#include +#include + #include #include #include #include +#include +#include /* * Fill the CPU return stack buffer. @@ -33,6 +38,7 @@ * the optimal version — two calls, each with their own speculation * trap should their return address end up getting used, in a loop. */ +#ifdef CONFIG_X86_64 #define __FILL_RETURN_BUFFER(reg, nr, sp) \ mov $(nr/2), reg; \ 771: \ @@ -48,9 +54,31 @@ lfence; \ jmp 775b; \ 774: \ + add $(BITS_PER_LONG/8) * 2, sp; \ dec reg; \ jnz 771b; \ + /* barrier for jnz misprediction */ \ + lfence; +#else +/* + * i386 doesn't unconditionally have LFENCE, as such it can't + * do a loop. + */ +#define __FILL_RETURN_BUFFER(reg, nr, sp) \ + .rept nr; \ + call 772f; \ + int3; \ +772:; \ + .endr; \ add $(BITS_PER_LONG/8) * nr, sp; +#endif + +#define ISSUE_UNBALANCED_RET_GUARD(sp) \ + call 992f; \ + int3; \ +992: \ + add $(BITS_PER_LONG/8), sp; \ + lfence; #ifdef __ASSEMBLY__ @@ -117,7 +145,7 @@ ANNOTATE_NOSPEC_ALTERNATIVE ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *\reg), \ __stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \ - __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *\reg), X86_FEATURE_RETPOLINE_AMD + __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *\reg), X86_FEATURE_RETPOLINE_LFENCE #else jmp *\reg #endif @@ -128,7 +156,7 @@ ANNOTATE_NOSPEC_ALTERNATIVE ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *\reg), \ __stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\ - __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *\reg), X86_FEATURE_RETPOLINE_AMD + __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *\reg), X86_FEATURE_RETPOLINE_LFENCE #else call *\reg #endif @@ -139,13 +167,9 @@ * monstrosity above, manually. */ .macro FILL_RETURN_BUFFER reg:req nr:req ftr:req -#ifdef CONFIG_RETPOLINE - ANNOTATE_NOSPEC_ALTERNATIVE - ALTERNATIVE "jmp .Lskip_rsb_\@", \ - __stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)) \ - \ftr + ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr + __FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP) .Lskip_rsb_\@: -#endif .endm #else /* __ASSEMBLY__ */ @@ -162,29 +186,35 @@ _ASM_PTR " 999b\n\t" \ ".popsection\n\t" -#if defined(CONFIG_X86_64) && defined(RETPOLINE) +#ifdef CONFIG_RETPOLINE +#ifdef CONFIG_X86_64 /* - * Since the inline asm uses the %V modifier which is only in newer GCC, - * the 64-bit one is dependent on RETPOLINE not CONFIG_RETPOLINE. + * Inline asm uses the %V modifier which is only in newer GCC + * which is ensured when CONFIG_RETPOLINE is defined. */ # define CALL_NOSPEC \ ANNOTATE_NOSPEC_ALTERNATIVE \ - ALTERNATIVE( \ + ALTERNATIVE_2( \ ANNOTATE_RETPOLINE_SAFE \ "call *%[thunk_target]\n", \ "call __x86_indirect_thunk_%V[thunk_target]\n", \ - X86_FEATURE_RETPOLINE) + X86_FEATURE_RETPOLINE, \ + "lfence;\n" \ + ANNOTATE_RETPOLINE_SAFE \ + "call *%[thunk_target]\n", \ + X86_FEATURE_RETPOLINE_LFENCE) # define THUNK_TARGET(addr) [thunk_target] "r" (addr) -#elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE) +#else /* CONFIG_X86_32 */ /* * For i386 we use the original ret-equivalent retpoline, because * otherwise we'll run out of registers. We don't care about CET * here, anyway. */ # define CALL_NOSPEC \ - ALTERNATIVE( \ + ANNOTATE_NOSPEC_ALTERNATIVE \ + ALTERNATIVE_2( \ ANNOTATE_RETPOLINE_SAFE \ "call *%[thunk_target]\n", \ " jmp 904f;\n" \ @@ -194,14 +224,19 @@ " lfence;\n" \ " jmp 902b;\n" \ " .align 16\n" \ - "903: addl $4, %%esp;\n" \ + "903: lea 4(%%esp), %%esp;\n" \ " pushl %[thunk_target];\n" \ " ret;\n" \ " .align 16\n" \ "904: call 901b;\n", \ - X86_FEATURE_RETPOLINE) + X86_FEATURE_RETPOLINE, \ + "lfence;\n" \ + ANNOTATE_RETPOLINE_SAFE \ + "call *%[thunk_target]\n", \ + X86_FEATURE_RETPOLINE_LFENCE) # define THUNK_TARGET(addr) [thunk_target] "rm" (addr) +#endif #else /* No retpoline for C / inline asm */ # define CALL_NOSPEC "call *%[thunk_target]\n" # define THUNK_TARGET(addr) [thunk_target] "rm" (addr) @@ -210,11 +245,21 @@ /* The Spectre V2 mitigation variants */ enum spectre_v2_mitigation { SPECTRE_V2_NONE, - SPECTRE_V2_RETPOLINE_MINIMAL, - SPECTRE_V2_RETPOLINE_MINIMAL_AMD, - SPECTRE_V2_RETPOLINE_GENERIC, - SPECTRE_V2_RETPOLINE_AMD, - SPECTRE_V2_IBRS_ENHANCED, + SPECTRE_V2_RETPOLINE, + SPECTRE_V2_LFENCE, + SPECTRE_V2_EIBRS, + SPECTRE_V2_EIBRS_RETPOLINE, + SPECTRE_V2_EIBRS_LFENCE, + SPECTRE_V2_IBRS, +}; + +/* The indirect branch speculation control variants */ +enum spectre_v2_user_mitigation { + SPECTRE_V2_USER_NONE, + SPECTRE_V2_USER_STRICT, + SPECTRE_V2_USER_STRICT_PREFERRED, + SPECTRE_V2_USER_PRCTL, + SPECTRE_V2_USER_SECCOMP, }; /* The Speculative Store Bypass disable variants */ @@ -234,15 +279,17 @@ extern char __indirect_thunk_end[]; * retpoline and IBRS mitigations for Spectre v2 need this; only on future * CPUs with IBRS_ALL *might* it be avoided. */ -static inline void vmexit_fill_RSB(void) +static __always_inline void vmexit_fill_RSB(void) { #ifdef CONFIG_RETPOLINE unsigned long loops; asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE - ALTERNATIVE("jmp 910f", - __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)), - X86_FEATURE_RETPOLINE) + ALTERNATIVE_2("jmp 910f", "", X86_FEATURE_RSB_VMEXIT, + "jmp 911f", X86_FEATURE_RSB_VMEXIT_LITE) + __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)) + "911:" + __stringify(ISSUE_UNBALANCED_RET_GUARD(%1)) "910:" : "=r" (loops), ASM_CALL_CONSTRAINT : : "memory" ); @@ -269,6 +316,9 @@ static inline void indirect_branch_prediction_barrier(void) /* The Intel SPEC CTRL MSR base value cache */ extern u64 x86_spec_ctrl_base; +DECLARE_PER_CPU(u64, x86_spec_ctrl_current); +extern void write_spec_ctrl_current(u64 val, bool force); +extern u64 spec_ctrl_current(void); /* * With retpoline, we must use IBRS to restrict branch prediction @@ -278,22 +328,76 @@ extern u64 x86_spec_ctrl_base; */ #define firmware_restrict_branch_speculation_start() \ do { \ - u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS; \ - \ preempt_disable(); \ - alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \ + alternative_msr_write(MSR_IA32_SPEC_CTRL, \ + spec_ctrl_current() | SPEC_CTRL_IBRS, \ X86_FEATURE_USE_IBRS_FW); \ } while (0) #define firmware_restrict_branch_speculation_end() \ do { \ - u64 val = x86_spec_ctrl_base; \ - \ - alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \ + alternative_msr_write(MSR_IA32_SPEC_CTRL, \ + spec_ctrl_current(), \ X86_FEATURE_USE_IBRS_FW); \ preempt_enable(); \ } while (0) +DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp); +DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); +DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb); + +DECLARE_STATIC_KEY_FALSE(mds_user_clear); +DECLARE_STATIC_KEY_FALSE(mds_idle_clear); + +DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear); + +#include + +/** + * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability + * + * This uses the otherwise unused and obsolete VERW instruction in + * combination with microcode which triggers a CPU buffer flush when the + * instruction is executed. + */ +static inline void mds_clear_cpu_buffers(void) +{ + static const u16 ds = __KERNEL_DS; + + /* + * Has to be the memory-operand variant because only that + * guarantees the CPU buffer flush functionality according to + * documentation. The register-operand variant does not. + * Works with any segment selector, but a valid writable + * data segment is the fastest variant. + * + * "cc" clobber is required because VERW modifies ZF. + */ + asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc"); +} + +/** + * mds_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability + * + * Clear CPU buffers if the corresponding static key is enabled + */ +static inline void mds_user_clear_cpu_buffers(void) +{ + if (static_branch_likely(&mds_user_clear)) + mds_clear_cpu_buffers(); +} + +/** + * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability + * + * Clear CPU buffers if the corresponding static key is enabled + */ +static inline void mds_idle_clear_cpu_buffers(void) +{ + if (static_branch_likely(&mds_idle_clear)) + mds_clear_cpu_buffers(); +} + #endif /* __ASSEMBLY__ */ /* diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index 6afac386a434ed8bf43f8fef086a19863b56c8de..0b6352aabbd3df3c84e0c55fbaa231ede922c0c7 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h @@ -7,7 +7,11 @@ #endif #ifdef CONFIG_KASAN +#ifdef CONFIG_KASAN_EXTRA +#define KASAN_STACK_ORDER 2 +#else #define KASAN_STACK_ORDER 1 +#endif #else #define KASAN_STACK_ORDER 0 #endif @@ -33,12 +37,14 @@ /* * Set __PAGE_OFFSET to the most negative possible address + - * PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a - * hypervisor to fit. Choosing 16 slots here is arbitrary, but it's - * what Xen requires. + * PGDIR_SIZE*17 (pgd slot 273). + * + * The gap is to allow a space for LDT remap for PTI (1 pgd slot) and space for + * a hypervisor (16 slots). Choosing 16 slots for a hypervisor is arbitrary, + * but it's what Xen requires. */ -#define __PAGE_OFFSET_BASE_L5 _AC(0xff10000000000000, UL) -#define __PAGE_OFFSET_BASE_L4 _AC(0xffff880000000000, UL) +#define __PAGE_OFFSET_BASE_L5 _AC(0xff11000000000000, UL) +#define __PAGE_OFFSET_BASE_L4 _AC(0xffff888000000000, UL) #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT #define __PAGE_OFFSET page_offset_base diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index e375d4266b53e35bbe2383d7633d478f29871c4f..a04677038872c89f0cf8c0f00d4703630d8a4080 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -768,6 +768,7 @@ static __always_inline bool pv_vcpu_is_preempted(long cpu) PV_RESTORE_ALL_CALLER_REGS \ FRAME_END \ "ret;" \ + ".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";" \ ".popsection") /* Get a reference to a callee-save function */ diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 78241b736f2a04aa4ccac261727ecd8798042cda..73986a6b340a8852e22a0dc58bc214618c2a5bb5 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -7,7 +7,7 @@ */ #define INTEL_PMC_MAX_GENERIC 32 -#define INTEL_PMC_MAX_FIXED 3 +#define INTEL_PMC_MAX_FIXED 4 #define INTEL_PMC_IDX_FIXED 32 #define X86_PMC_IDX_MAX 64 @@ -32,6 +32,8 @@ #define HSW_IN_TX (1ULL << 32) #define HSW_IN_TX_CHECKPOINTED (1ULL << 33) +#define ICL_EVENTSEL_ADAPTIVE (1ULL << 34) +#define ICL_FIXED_0_ADAPTIVE (1ULL << 32) #define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36) #define AMD64_EVENTSEL_GUESTONLY (1ULL << 40) @@ -46,13 +48,32 @@ #define INTEL_ARCH_EVENT_MASK \ (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT) +#define HYGON_L3_SLICE_SHIFT 28 +#define HYGON_L3_SLICE_MASK \ + (0xFULL << HYGON_L3_SLICE_SHIFT) + +#define HYGON_L3_THREAD_SHIFT 32 +#define HYGON_L3_THREAD_MASK \ + (0xFFFFFFFFULL << HYGON_L3_THREAD_SHIFT) + #define AMD64_L3_SLICE_SHIFT 48 #define AMD64_L3_SLICE_MASK \ - ((0xFULL) << AMD64_L3_SLICE_SHIFT) + (0xFULL << AMD64_L3_SLICE_SHIFT) +#define AMD64_L3_SLICEID_MASK \ + (0x7ULL << AMD64_L3_SLICE_SHIFT) #define AMD64_L3_THREAD_SHIFT 56 #define AMD64_L3_THREAD_MASK \ - ((0xFFULL) << AMD64_L3_THREAD_SHIFT) + (0xFFULL << AMD64_L3_THREAD_SHIFT) +#define AMD64_L3_F19H_THREAD_MASK \ + (0x3ULL << AMD64_L3_THREAD_SHIFT) + +#define AMD64_L3_EN_ALL_CORES BIT_ULL(47) +#define AMD64_L3_EN_ALL_SLICES BIT_ULL(46) + +#define AMD64_L3_COREID_SHIFT 42 +#define AMD64_L3_COREID_MASK \ + (0x7ULL << AMD64_L3_COREID_SHIFT) #define X86_RAW_EVENT_MASK \ (ARCH_PERFMON_EVENTSEL_EVENT | \ @@ -87,6 +108,12 @@ #define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6 #define ARCH_PERFMON_EVENTS_COUNT 7 +#define PEBS_DATACFG_MEMINFO BIT_ULL(0) +#define PEBS_DATACFG_GP BIT_ULL(1) +#define PEBS_DATACFG_XMMS BIT_ULL(2) +#define PEBS_DATACFG_LBRS BIT_ULL(3) +#define PEBS_DATACFG_LBR_SHIFT 24 + /* * Intel "Architectural Performance Monitoring" CPUID * detection/enumeration details: @@ -137,13 +164,29 @@ struct x86_pmu_capability { * Fixed-purpose performance events: */ +/* RDPMC offset for Fixed PMCs */ +#define INTEL_PMC_FIXED_RDPMC_BASE (1 << 30) +#define INTEL_PMC_FIXED_RDPMC_METRICS (1 << 29) + /* - * All 3 fixed-mode PMCs are configured via this single MSR: + * All the fixed-mode PMCs are configured via this single MSR: */ #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d /* - * The counts are available in three separate MSRs: + * There is no event-code assigned to the fixed-mode PMCs. + * + * For a fixed-mode PMC, which has an equivalent event on a general-purpose + * PMC, the event-code of the equivalent event is used for the fixed-mode PMC, + * e.g., Instr_Retired.Any and CPU_CLK_Unhalted.Core. + * + * For a fixed-mode PMC, which doesn't have an equivalent event, a + * pseudo-encoding is used, e.g., CPU_CLK_Unhalted.Ref and TOPDOWN.SLOTS. + * The pseudo event-code for a fixed-mode PMC must be 0x00. + * The pseudo umask-code is 0xX. The X equals the index of the fixed + * counter + 1, e.g., the fixed counter 2 has the pseudo-encoding 0x0300. + * + * The counts are available in separate MSRs: */ /* Instr_Retired.Any: */ @@ -154,27 +197,137 @@ struct x86_pmu_capability { #define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a #define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1) -/* CPU_CLK_Unhalted.Ref: */ +/* CPU_CLK_Unhalted.Ref: event=0x00,umask=0x3 (pseudo-encoding) */ #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b #define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2) #define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES) +/* TOPDOWN.SLOTS: event=0x00,umask=0x4 (pseudo-encoding) */ +#define MSR_ARCH_PERFMON_FIXED_CTR3 0x30c +#define INTEL_PMC_IDX_FIXED_SLOTS (INTEL_PMC_IDX_FIXED + 3) +#define INTEL_PMC_MSK_FIXED_SLOTS (1ULL << INTEL_PMC_IDX_FIXED_SLOTS) + /* * We model BTS tracing as another fixed-mode PMC. * - * We choose a value in the middle of the fixed event range, since lower + * We choose the value 47 for the fixed index of BTS, since lower * values are used by actual fixed events and higher values are used * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr. */ -#define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 16) +#define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 15) -#define GLOBAL_STATUS_COND_CHG BIT_ULL(63) -#define GLOBAL_STATUS_BUFFER_OVF BIT_ULL(62) -#define GLOBAL_STATUS_UNC_OVF BIT_ULL(61) -#define GLOBAL_STATUS_ASIF BIT_ULL(60) -#define GLOBAL_STATUS_COUNTERS_FROZEN BIT_ULL(59) -#define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(58) -#define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(55) +/* + * The PERF_METRICS MSR is modeled as several magic fixed-mode PMCs, one for + * each TopDown metric event. + * + * Internally the TopDown metric events are mapped to the FxCtr 3 (SLOTS). + */ +#define INTEL_PMC_IDX_METRIC_BASE (INTEL_PMC_IDX_FIXED + 16) +#define INTEL_PMC_IDX_TD_RETIRING (INTEL_PMC_IDX_METRIC_BASE + 0) +#define INTEL_PMC_IDX_TD_BAD_SPEC (INTEL_PMC_IDX_METRIC_BASE + 1) +#define INTEL_PMC_IDX_TD_FE_BOUND (INTEL_PMC_IDX_METRIC_BASE + 2) +#define INTEL_PMC_IDX_TD_BE_BOUND (INTEL_PMC_IDX_METRIC_BASE + 3) +#define INTEL_PMC_IDX_METRIC_END INTEL_PMC_IDX_TD_BE_BOUND +#define INTEL_PMC_MSK_TOPDOWN ((0xfull << INTEL_PMC_IDX_METRIC_BASE) | \ + INTEL_PMC_MSK_FIXED_SLOTS) + +/* + * There is no event-code assigned to the TopDown events. + * + * For the slots event, use the pseudo code of the fixed counter 3. + * + * For the metric events, the pseudo event-code is 0x00. + * The pseudo umask-code starts from the middle of the pseudo event + * space, 0x80. + */ +#define INTEL_TD_SLOTS 0x0400 /* TOPDOWN.SLOTS */ +/* Level 1 metrics */ +#define INTEL_TD_METRIC_RETIRING 0x8000 /* Retiring metric */ +#define INTEL_TD_METRIC_BAD_SPEC 0x8100 /* Bad speculation metric */ +#define INTEL_TD_METRIC_FE_BOUND 0x8200 /* FE bound metric */ +#define INTEL_TD_METRIC_BE_BOUND 0x8300 /* BE bound metric */ +#define INTEL_TD_METRIC_MAX INTEL_TD_METRIC_BE_BOUND +#define INTEL_TD_METRIC_NUM 4 + +static inline bool is_metric_idx(int idx) +{ + return (unsigned)(idx - INTEL_PMC_IDX_METRIC_BASE) < INTEL_TD_METRIC_NUM; +} + +static inline bool is_topdown_idx(int idx) +{ + return is_metric_idx(idx) || idx == INTEL_PMC_IDX_FIXED_SLOTS; +} + +#define INTEL_PMC_OTHER_TOPDOWN_BITS(bit) \ + (~(0x1ull << bit) & INTEL_PMC_MSK_TOPDOWN) + +#define GLOBAL_STATUS_COND_CHG BIT_ULL(63) +#define GLOBAL_STATUS_BUFFER_OVF_BIT 62 +#define GLOBAL_STATUS_BUFFER_OVF BIT_ULL(GLOBAL_STATUS_BUFFER_OVF_BIT) +#define GLOBAL_STATUS_UNC_OVF BIT_ULL(61) +#define GLOBAL_STATUS_ASIF BIT_ULL(60) +#define GLOBAL_STATUS_COUNTERS_FROZEN BIT_ULL(59) +#define GLOBAL_STATUS_LBRS_FROZEN_BIT 58 +#define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(GLOBAL_STATUS_LBRS_FROZEN_BIT) +#define GLOBAL_STATUS_TRACE_TOPAPMI_BIT 55 +#define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(GLOBAL_STATUS_TRACE_TOPAPMI_BIT) +#define GLOBAL_STATUS_PERF_METRICS_OVF_BIT 48 + +#define GLOBAL_CTRL_EN_PERF_METRICS 48 +/* + * We model guest LBR event tracing as another fixed-mode PMC like BTS. + * + * We choose bit 58 because it's used to indicate LBR stack frozen state + * for architectural perfmon v4, also we unconditionally mask that bit in + * the handle_pmi_common(), so it'll never be set in the overflow handling. + * + * With this fake counter assigned, the guest LBR event user (such as KVM), + * can program the LBR registers on its own, and we don't actually do anything + * with then in the host context. + */ +#define INTEL_PMC_IDX_FIXED_VLBR (GLOBAL_STATUS_LBRS_FROZEN_BIT) + +/* + * Pseudo-encoding the guest LBR event as event=0x00,umask=0x1b, + * since it would claim bit 58 which is effectively Fixed26. + */ +#define INTEL_FIXED_VLBR_EVENT 0x1b00 + +/* + * Adaptive PEBS v4 + */ + +struct pebs_basic { + u64 format_size; + u64 ip; + u64 applicable_counters; + u64 tsc; +}; + +struct pebs_meminfo { + u64 address; + u64 aux; + u64 latency; + u64 tsx_tuning; +}; + +struct pebs_gprs { + u64 flags, ip, ax, cx, dx, bx, sp, bp, si, di; + u64 r8, r9, r10, r11, r12, r13, r14, r15; +}; + +struct pebs_xmm { + u64 xmm[16*2]; /* two entries for each register */ +}; + +struct pebs_lbr_entry { + u64 from, to, info; +}; + +struct pebs_lbr { + struct pebs_lbr_entry lbr[0]; /* Variable length */ +}; /* * IBS cpuid feature detection @@ -209,21 +362,26 @@ struct x86_pmu_capability { #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8) #define IBSCTL_LVT_OFFSET_MASK 0x0F -/* ibs fetch bits/masks */ +/* IBS fetch bits/masks */ #define IBS_FETCH_RAND_EN (1ULL<<57) #define IBS_FETCH_VAL (1ULL<<49) #define IBS_FETCH_ENABLE (1ULL<<48) #define IBS_FETCH_CNT 0xFFFF0000ULL #define IBS_FETCH_MAX_CNT 0x0000FFFFULL -/* ibs op bits/masks */ -/* lower 4 bits of the current count are ignored: */ -#define IBS_OP_CUR_CNT (0xFFFF0ULL<<32) +/* + * IBS op bits/masks + * The lower 7 bits of the current count are random bits + * preloaded by hardware and ignored in software + */ +#define IBS_OP_CUR_CNT (0xFFF80ULL<<32) +#define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32) #define IBS_OP_CNT_CTL (1ULL<<19) #define IBS_OP_VAL (1ULL<<18) #define IBS_OP_ENABLE (1ULL<<17) #define IBS_OP_MAX_CNT 0x0000FFFFULL #define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */ +#define IBS_OP_MAX_CNT_EXT_MASK (0x7FULL<<20) /* separate upper 7 bits */ #define IBS_RIP_INVALID (1ULL<<38) #ifdef CONFIG_X86_LOCAL_APIC @@ -248,6 +406,11 @@ extern void perf_events_lapic_init(void); #define PERF_EFLAGS_VM (1UL << 5) struct pt_regs; +struct x86_perf_regs { + struct pt_regs regs; + u64 *xmm_regs; +}; + extern unsigned long perf_instruction_pointer(struct pt_regs *regs); extern unsigned long perf_misc_flags(struct pt_regs *regs); #define perf_misc_flags(regs) perf_misc_flags(regs) @@ -260,14 +423,9 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs); */ #define perf_arch_fetch_caller_regs(regs, __ip) { \ (regs)->ip = (__ip); \ - (regs)->bp = caller_frame_pointer(); \ + (regs)->sp = (unsigned long)__builtin_frame_address(0); \ (regs)->cs = __KERNEL_CS; \ regs->flags = 0; \ - asm volatile( \ - _ASM_MOV "%%"_ASM_SP ", %0\n" \ - : "=m" ((regs)->sp) \ - :: "memory" \ - ); \ } struct perf_guest_switch_msr { @@ -275,6 +433,13 @@ struct perf_guest_switch_msr { u64 host, guest; }; +struct x86_pmu_lbr { + unsigned int nr; + unsigned int from; + unsigned int to; + unsigned int info; +}; + extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr); extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap); extern void perf_check_microcode(void); @@ -294,6 +459,15 @@ static inline void perf_events_lapic_init(void) { } static inline void perf_check_microcode(void) { } #endif +#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) +extern int x86_perf_get_lbr(struct x86_pmu_lbr *lbr); +#else +static inline int x86_perf_get_lbr(struct x86_pmu_lbr *lbr) +{ + return -1; +} +#endif + #ifdef CONFIG_CPU_SUP_INTEL extern void intel_pt_handle_vmx(int on); #endif diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 690c0307afed0932974e5965ccf7bed98daa10f9..1d79485ae9721dc2c8976c5fb7082adfd268fb37 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -128,7 +128,7 @@ static inline int pte_dirty(pte_t pte) static inline u32 read_pkru(void) { if (boot_cpu_has(X86_FEATURE_OSPKE)) - return __read_pkru(); + return rdpkru(); return 0; } @@ -237,6 +237,7 @@ static inline int pmd_large(pmd_t pte) } #ifdef CONFIG_TRANSPARENT_HUGEPAGE +/* NOTE: when predicate huge page, consider also pmd_devmap, or use pmd_large */ static inline int pmd_trans_huge(pmd_t pmd) { return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE; @@ -1352,8 +1353,8 @@ static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) #endif #endif -#define PKRU_AD_BIT 0x1 -#define PKRU_WD_BIT 0x2 +#define PKRU_AD_BIT 0x1u +#define PKRU_WD_BIT 0x2u #define PKRU_BITS_PER_PKEY 2 static inline bool __pkru_allows_read(u32 pkru, u16 pkey) diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index b3ec519e39827e58eaeb8a567303e37a6bc2e919..4fe9e7fc74d37d8a5af686cebf7d4a21d9c38b95 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -37,7 +37,7 @@ void sync_initial_page_table(void); /* * Define this if things work differently on an i386 and an i486: * it will (on an i486) warn about kernel memory accesses that are - * done without a 'access_ok(VERIFY_WRITE,..)' + * done without a 'access_ok( ..)' */ #undef TEST_ACCESS_OK diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 04edd2d58211a78e3261993bd8d0e088e3b4c4ef..88bca456da994c5b2a76f7046bf12f4f2615361d 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -111,9 +111,12 @@ extern unsigned int ptrs_per_p4d; */ #define MAXMEM (1UL << MAX_PHYSMEM_BITS) -#define LDT_PGD_ENTRY_L4 -3UL -#define LDT_PGD_ENTRY_L5 -112UL -#define LDT_PGD_ENTRY (pgtable_l5_enabled() ? LDT_PGD_ENTRY_L5 : LDT_PGD_ENTRY_L4) +#define GUARD_HOLE_PGD_ENTRY -256UL +#define GUARD_HOLE_SIZE (16UL << PGDIR_SHIFT) +#define GUARD_HOLE_BASE_ADDR (GUARD_HOLE_PGD_ENTRY << PGDIR_SHIFT) +#define GUARD_HOLE_END_ADDR (GUARD_HOLE_BASE_ADDR + GUARD_HOLE_SIZE) + +#define LDT_PGD_ENTRY -240UL #define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) #define LDT_END_ADDR (LDT_BASE_ADDR + PGDIR_SIZE) diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h index 7f2dbd91fc743a516a0ad8e094680b13832f61ef..90cb2f36c042fec25dc765b4241d68ccb48a3eba 100644 --- a/arch/x86/include/asm/preempt.h +++ b/arch/x86/include/asm/preempt.h @@ -88,7 +88,7 @@ static __always_inline void __preempt_count_sub(int val) */ static __always_inline bool __preempt_count_dec_and_test(void) { - GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e); + return GEN_UNARY_RMWcc("decl", __preempt_count, e, __percpu_arg([var])); } /* diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index d53c54b842daca1c847d8076aadd89564b23cf00..b5b92fd355270cde82ea5fd42d5bfddaadde8217 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -117,7 +117,7 @@ struct cpuinfo_x86 { int x86_power; unsigned long loops_per_jiffy; /* cpuid returned max cores value: */ - u16 x86_max_cores; + u16 x86_max_cores; u16 apicid; u16 initial_apicid; u16 x86_clflush_size; @@ -135,6 +135,10 @@ struct cpuinfo_x86 { /* Address space bits used by the cache internally */ u8 x86_cache_bits; unsigned initialized : 1; +#ifndef __GENKSYMS__ + u16 cpu_die_id; + u16 logical_die_id; +#endif } __randomize_layout; struct cpuid_regs { @@ -155,7 +159,9 @@ enum cpuid_regs_idx { #define X86_VENDOR_CENTAUR 5 #define X86_VENDOR_TRANSMETA 7 #define X86_VENDOR_NSC 8 -#define X86_VENDOR_NUM 9 +#define X86_VENDOR_HYGON 9 +#define X86_VENDOR_ZHAOXIN 10 +#define X86_VENDOR_NUM 11 #define X86_VENDOR_UNKNOWN 0xff @@ -757,6 +763,7 @@ extern void load_direct_gdt(int); extern void load_fixmap_gdt(int); extern void load_percpu_segment(int); extern void cpu_init(void); +extern void cr4_init(void); static inline unsigned long get_debugctlmsr(void) { @@ -951,9 +958,11 @@ static inline int mpx_disable_management(void) #ifdef CONFIG_CPU_SUP_AMD extern u16 amd_get_nb_id(int cpu); extern u32 amd_get_nodes_per_socket(void); +extern void amd_clear_divider(void); #else static inline u16 amd_get_nb_id(int cpu) { return 0; } static inline u32 amd_get_nodes_per_socket(void) { return 0; } +static inline void amd_clear_divider(void) { } #endif static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves) @@ -997,4 +1006,19 @@ enum l1tf_mitigations { extern enum l1tf_mitigations l1tf_mitigation; +enum mds_mitigations { + MDS_MITIGATION_OFF, + MDS_MITIGATION_FULL, + MDS_MITIGATION_VMWERV, +}; + +enum taa_mitigations { + TAA_MITIGATION_OFF, + TAA_MITIGATION_UCODE_NEEDED, + TAA_MITIGATION_VERW, + TAA_MITIGATION_TSX_DISABLED, +}; + +extern bool gds_ucode_mitigated(void); + #endif /* _ASM_X86_PROCESSOR_H */ diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 6de1fd3d009744277c0be1593daaa380e1f0fedb..bb85b51085442390e1511d2dda7b159620ce7faa 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -159,6 +159,19 @@ static inline bool user_64bit_mode(struct pt_regs *regs) #endif } +/* + * Determine whether the register set came from any context that is running in + * 64-bit mode. + */ +static inline bool any_64bit_mode(struct pt_regs *regs) +{ +#ifdef CONFIG_X86_64 + return !user_mode(regs) || user_64bit_mode(regs); +#else + return false; +#endif +} + #ifdef CONFIG_X86_64 #define current_user_stack_pointer() current_pt_regs()->sp #define compat_user_stack_pointer() current_pt_regs()->sp @@ -236,24 +249,52 @@ static inline int regs_within_kernel_stack(struct pt_regs *regs, (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))); } +/** + * regs_get_kernel_stack_nth_addr() - get the address of the Nth entry on stack + * @regs: pt_regs which contains kernel stack pointer. + * @n: stack entry number. + * + * regs_get_kernel_stack_nth() returns the address of the @n th entry of the + * kernel stack which is specified by @regs. If the @n th entry is NOT in + * the kernel stack, this returns NULL. + */ +static inline unsigned long *regs_get_kernel_stack_nth_addr(struct pt_regs *regs, unsigned int n) +{ + unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); + + addr += n; + if (regs_within_kernel_stack(regs, (unsigned long)addr)) + return addr; + else + return NULL; +} + +/* To avoid include hell, we can't include uaccess.h */ +extern long probe_kernel_read(void *dst, const void *src, size_t size); + /** * regs_get_kernel_stack_nth() - get Nth entry of the stack * @regs: pt_regs which contains kernel stack pointer. * @n: stack entry number. * * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which - * is specified by @regs. If the @n th entry is NOT in the kernel stack, + * is specified by @regs. If the @n th entry is NOT in the kernel stack * this returns 0. */ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) { - unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); - addr += n; - if (regs_within_kernel_stack(regs, (unsigned long)addr)) - return *addr; - else - return 0; + unsigned long *addr; + unsigned long val; + long ret; + + addr = regs_get_kernel_stack_nth_addr(regs, n); + if (addr) { + ret = probe_kernel_read(&val, addr, sizeof(val)); + if (!ret) + return val; + } + return 0; } #define arch_has_single_step() (1) diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h index 3e70bed8a978e7ef3541d80ef860185c8a0f12de..4d425673d831ac0659ae2ce7f97150854fbe1ec7 100644 --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h @@ -6,14 +6,40 @@ #include #include #include +#include #define _Q_PENDING_LOOPS (1 << 9) +#define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire + +static __always_inline bool __queued_RMW_btsl(struct qspinlock *lock) +{ + return GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, + c, "I", _Q_PENDING_OFFSET); +} + +static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock) +{ + u32 val = 0; + + if (__queued_RMW_btsl(lock)) + val |= _Q_PENDING_VAL; + + val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK; + + return val; +} + +#ifdef CONFIG_NUMA_AWARE_SPINLOCKS +extern void __cna_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +#endif + #ifdef CONFIG_PARAVIRT_SPINLOCKS extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); extern void __pv_init_lock_hash(void); extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock); +extern bool nopvspin; #define queued_spin_unlock queued_spin_unlock /** diff --git a/arch/x86/include/asm/refcount.h b/arch/x86/include/asm/refcount.h index 19b90521954c906248c19234bfd722116a0d0fec..232f856e0db067e285bbec67da28f0ee4996e083 100644 --- a/arch/x86/include/asm/refcount.h +++ b/arch/x86/include/asm/refcount.h @@ -17,7 +17,7 @@ */ #define _REFCOUNT_EXCEPTION \ ".pushsection .text..refcount\n" \ - "111:\tlea %[counter], %%" _ASM_CX "\n" \ + "111:\tlea %[var], %%" _ASM_CX "\n" \ "112:\t" ASM_UD2 "\n" \ ASM_UNREACHABLE \ ".popsection\n" \ @@ -43,7 +43,7 @@ static __always_inline void refcount_add(unsigned int i, refcount_t *r) { asm volatile(LOCK_PREFIX "addl %1,%0\n\t" REFCOUNT_CHECK_LT_ZERO - : [counter] "+m" (r->refs.counter) + : [var] "+m" (r->refs.counter) : "ir" (i) : "cc", "cx"); } @@ -52,7 +52,7 @@ static __always_inline void refcount_inc(refcount_t *r) { asm volatile(LOCK_PREFIX "incl %0\n\t" REFCOUNT_CHECK_LT_ZERO - : [counter] "+m" (r->refs.counter) + : [var] "+m" (r->refs.counter) : : "cc", "cx"); } @@ -60,21 +60,37 @@ static __always_inline void refcount_dec(refcount_t *r) { asm volatile(LOCK_PREFIX "decl %0\n\t" REFCOUNT_CHECK_LE_ZERO - : [counter] "+m" (r->refs.counter) + : [var] "+m" (r->refs.counter) : : "cc", "cx"); } static __always_inline __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r) { - GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl", REFCOUNT_CHECK_LT_ZERO, - r->refs.counter, "er", i, "%0", e, "cx"); + bool ret = GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl", + REFCOUNT_CHECK_LT_ZERO, + r->refs.counter, e, "er", i, "cx"); + + if (ret) { + smp_acquire__after_ctrl_dep(); + return true; + } + + return false; } static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r) { - GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl", REFCOUNT_CHECK_LT_ZERO, - r->refs.counter, "%0", e, "cx"); + bool ret = GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl", + REFCOUNT_CHECK_LT_ZERO, + r->refs.counter, e, "cx"); + + if (ret) { + smp_acquire__after_ctrl_dep(); + return true; + } + + return false; } static __always_inline __must_check @@ -92,7 +108,7 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r) /* Did we try to increment from/to an undesirable state? */ if (unlikely(c < 0 || c == INT_MAX || result < c)) { asm volatile(REFCOUNT_ERROR - : : [counter] "m" (r->refs.counter) + : : [var] "m" (r->refs.counter) : "cc", "cx"); break; } diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h index 4914a3e7c8035538a167c0dc23a2a33afd4a1ca2..8a9eba1915169b99a8b9b679110ca961a7b96fb2 100644 --- a/arch/x86/include/asm/rmwcc.h +++ b/arch/x86/include/asm/rmwcc.h @@ -2,56 +2,69 @@ #ifndef _ASM_X86_RMWcc #define _ASM_X86_RMWcc +/* This counts to 12. Any more, it will return 13th argument. */ +#define __RMWcc_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _n, X...) _n +#define RMWcc_ARGS(X...) __RMWcc_ARGS(, ##X, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) + +#define __RMWcc_CONCAT(a, b) a ## b +#define RMWcc_CONCAT(a, b) __RMWcc_CONCAT(a, b) + #define __CLOBBERS_MEM(clb...) "memory", ## clb -#if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CC_HAVE_ASM_GOTO) +#if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CONFIG_CC_HAS_ASM_GOTO) /* Use asm goto */ -#define __GEN_RMWcc(fullop, var, cc, clobbers, ...) \ -do { \ +#define __GEN_RMWcc(fullop, _var, cc, clobbers, ...) \ +({ \ + bool c = false; \ asm_volatile_goto (fullop "; j" #cc " %l[cc_label]" \ - : : [counter] "m" (var), ## __VA_ARGS__ \ + : : [var] "m" (_var), ## __VA_ARGS__ \ : clobbers : cc_label); \ - return 0; \ -cc_label: \ - return 1; \ -} while (0) - -#define __BINARY_RMWcc_ARG " %1, " + if (0) { \ +cc_label: c = true; \ + } \ + c; \ +}) - -#else /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */ +#else /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CONFIG_CC_HAS_ASM_GOTO) */ /* Use flags output or a set instruction */ -#define __GEN_RMWcc(fullop, var, cc, clobbers, ...) \ -do { \ +#define __GEN_RMWcc(fullop, _var, cc, clobbers, ...) \ +({ \ bool c; \ asm volatile (fullop CC_SET(cc) \ - : [counter] "+m" (var), CC_OUT(cc) (c) \ + : [var] "+m" (_var), CC_OUT(cc) (c) \ : __VA_ARGS__ : clobbers); \ - return c; \ -} while (0) - -#define __BINARY_RMWcc_ARG " %2, " + c; \ +}) -#endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */ +#endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CONFIG_CC_HAS_ASM_GOTO) */ -#define GEN_UNARY_RMWcc(op, var, arg0, cc) \ +#define GEN_UNARY_RMWcc_4(op, var, cc, arg0) \ __GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM()) -#define GEN_UNARY_SUFFIXED_RMWcc(op, suffix, var, arg0, cc, clobbers...)\ - __GEN_RMWcc(op " " arg0 "\n\t" suffix, var, cc, \ - __CLOBBERS_MEM(clobbers)) +#define GEN_UNARY_RMWcc_3(op, var, cc) \ + GEN_UNARY_RMWcc_4(op, var, cc, "%[var]") -#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \ - __GEN_RMWcc(op __BINARY_RMWcc_ARG arg0, var, cc, \ - __CLOBBERS_MEM(), vcon (val)) +#define GEN_UNARY_RMWcc(X...) RMWcc_CONCAT(GEN_UNARY_RMWcc_, RMWcc_ARGS(X))(X) + +#define GEN_BINARY_RMWcc_6(op, var, cc, vcon, _val, arg0) \ + __GEN_RMWcc(op " %[val], " arg0, var, cc, \ + __CLOBBERS_MEM(), [val] vcon (_val)) + +#define GEN_BINARY_RMWcc_5(op, var, cc, vcon, val) \ + GEN_BINARY_RMWcc_6(op, var, cc, vcon, val, "%[var]") + +#define GEN_BINARY_RMWcc(X...) RMWcc_CONCAT(GEN_BINARY_RMWcc_, RMWcc_ARGS(X))(X) + +#define GEN_UNARY_SUFFIXED_RMWcc(op, suffix, var, cc, clobbers...) \ + __GEN_RMWcc(op " %[var]\n\t" suffix, var, cc, \ + __CLOBBERS_MEM(clobbers)) -#define GEN_BINARY_SUFFIXED_RMWcc(op, suffix, var, vcon, val, arg0, cc, \ - clobbers...) \ - __GEN_RMWcc(op __BINARY_RMWcc_ARG arg0 "\n\t" suffix, var, cc, \ - __CLOBBERS_MEM(clobbers), vcon (val)) +#define GEN_BINARY_SUFFIXED_RMWcc(op, suffix, var, cc, vcon, _val, clobbers...)\ + __GEN_RMWcc(op " %[val], %[var]\n\t" suffix, var, cc, \ + __CLOBBERS_MEM(clobbers), [val] vcon (_val)) #endif /* _ASM_X86_RMWcc */ diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h index ae7c2c5cd7f0e2e9f2becb438a1366461f5725c6..5393babc05989ebc0cbcbbb21251f2c241e3df04 100644 --- a/arch/x86/include/asm/spec-ctrl.h +++ b/arch/x86/include/asm/spec-ctrl.h @@ -53,12 +53,24 @@ static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn) return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT); } +static inline u64 stibp_tif_to_spec_ctrl(u64 tifn) +{ + BUILD_BUG_ON(TIF_SPEC_IB < SPEC_CTRL_STIBP_SHIFT); + return (tifn & _TIF_SPEC_IB) >> (TIF_SPEC_IB - SPEC_CTRL_STIBP_SHIFT); +} + static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl) { BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT); return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT); } +static inline unsigned long stibp_spec_ctrl_to_tif(u64 spec_ctrl) +{ + BUILD_BUG_ON(TIF_SPEC_IB < SPEC_CTRL_STIBP_SHIFT); + return (spec_ctrl & SPEC_CTRL_STIBP) << (TIF_SPEC_IB - SPEC_CTRL_STIBP_SHIFT); +} + static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn) { return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL; @@ -70,11 +82,7 @@ extern void speculative_store_bypass_ht_init(void); static inline void speculative_store_bypass_ht_init(void) { } #endif -extern void speculative_store_bypass_update(unsigned long tif); - -static inline void speculative_store_bypass_update_current(void) -{ - speculative_store_bypass_update(current_thread_info()->flags); -} +extern void speculation_ctrl_update(unsigned long tif); +extern void speculation_ctrl_update_current(void); #endif diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h index 317fc59b512c56f7b30fe4686b4d138fef791ac9..af5f0d9a99511edeba28d5ece62d68753f3715ff 100644 --- a/arch/x86/include/asm/special_insns.h +++ b/arch/x86/include/asm/special_insns.h @@ -6,6 +6,8 @@ #ifdef __KERNEL__ #include +#include +#include /* * Volatile isn't enough to prevent the compiler from reordering the @@ -16,6 +18,8 @@ */ extern unsigned long __force_order; +void native_write_cr0(unsigned long val); + static inline unsigned long native_read_cr0(void) { unsigned long val; @@ -23,11 +27,6 @@ static inline unsigned long native_read_cr0(void) return val; } -static inline void native_write_cr0(unsigned long val) -{ - asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order)); -} - static inline unsigned long native_read_cr2(void) { unsigned long val; @@ -72,10 +71,7 @@ static inline unsigned long native_read_cr4(void) return val; } -static inline void native_write_cr4(unsigned long val) -{ - asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order)); -} +void native_write_cr4(unsigned long val); #ifdef CONFIG_X86_64 static inline unsigned long native_read_cr8(void) @@ -92,7 +88,7 @@ static inline void native_write_cr8(unsigned long val) #endif #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS -static inline u32 __read_pkru(void) +static inline u32 rdpkru(void) { u32 ecx = 0; u32 edx, pkru; @@ -107,7 +103,7 @@ static inline u32 __read_pkru(void) return pkru; } -static inline void __write_pkru(u32 pkru) +static inline void wrpkru(u32 pkru) { u32 ecx = 0, edx = 0; @@ -118,8 +114,21 @@ static inline void __write_pkru(u32 pkru) asm volatile(".byte 0x0f,0x01,0xef\n\t" : : "a" (pkru), "c"(ecx), "d"(edx)); } + +static inline void __write_pkru(u32 pkru) +{ + /* + * WRPKRU is relatively expensive compared to RDPKRU. + * Avoid WRPKRU when it would not change the value. + */ + if (pkru == rdpkru()) + return; + + wrpkru(pkru); +} + #else -static inline u32 __read_pkru(void) +static inline u32 rdpkru(void) { return 0; } diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h index 8ec97a62c245175e87d96c0376eb962f51f8e91d..9c556ea2eaa728c959d159d6ed7671a47732e6df 100644 --- a/arch/x86/include/asm/stackprotector.h +++ b/arch/x86/include/asm/stackprotector.h @@ -55,8 +55,13 @@ /* * Initialize the stackprotector canary value. * - * NOTE: this must only be called from functions that never return, + * NOTE: this must only be called from functions that never return * and it must always be inlined. + * + * In addition, it should be called from a compilation unit for which + * stack protector is disabled. Alternatively, the caller should not end + * with a function call which gets tail-call optimized as that would + * lead to checking a modified canary value. */ static __always_inline void boot_init_stack_canary(void) { diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h index f335aad404a479e98e4a5d38dc41ee5e5aa419ec..beef7ad9e43a4a7391aad3db4231de806f4799d6 100644 --- a/arch/x86/include/asm/stacktrace.h +++ b/arch/x86/include/asm/stacktrace.h @@ -98,19 +98,6 @@ struct stack_frame_ia32 { u32 return_address; }; -static inline unsigned long caller_frame_pointer(void) -{ - struct stack_frame *frame; - - frame = __builtin_frame_address(0); - -#ifdef CONFIG_FRAME_POINTER - frame = frame->next_frame; -#endif - - return (unsigned long)frame; -} - void show_opcodes(struct pt_regs *regs, const char *loglvl); void show_ip(struct pt_regs *regs, const char *loglvl); #endif /* _ASM_X86_STACKTRACE_H */ diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h index 55d392c6bd29b3aae8034061ddc7c094fe0cf6fe..2fd165f1cffacca194c607730dde976a8c6d7978 100644 --- a/arch/x86/include/asm/string_32.h +++ b/arch/x86/include/asm/string_32.h @@ -179,14 +179,7 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len) * No 3D Now! */ -#if (__GNUC__ >= 4) #define memcpy(t, f, n) __builtin_memcpy(t, f, n) -#else -#define memcpy(t, f, n) \ - (__builtin_constant_p((n)) \ - ? __constant_memcpy((t), (f), (n)) \ - : __memcpy((t), (f), (n))) -#endif #endif #endif /* !CONFIG_FORTIFY_SOURCE */ @@ -282,12 +275,7 @@ void *__constant_c_and_count_memset(void *s, unsigned long pattern, { int d0, d1; -#if __GNUC__ == 4 && __GNUC_MINOR__ == 0 - /* Workaround for broken gcc 4.0 */ - register unsigned long eax asm("%eax") = pattern; -#else unsigned long eax = pattern; -#endif switch (count % 4) { case 0: @@ -321,15 +309,7 @@ void *__constant_c_and_count_memset(void *s, unsigned long pattern, #define __HAVE_ARCH_MEMSET extern void *memset(void *, int, size_t); #ifndef CONFIG_FORTIFY_SOURCE -#if (__GNUC__ >= 4) #define memset(s, c, count) __builtin_memset(s, c, count) -#else -#define memset(s, c, count) \ - (__builtin_constant_p(c) \ - ? __constant_c_x_memset((s), (0x01010101UL * (unsigned char)(c)), \ - (count)) \ - : __memset((s), (c), (count))) -#endif #endif /* !CONFIG_FORTIFY_SOURCE */ #define __HAVE_ARCH_MEMSET16 diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h index d33f92b9fa228d91a5c5356b4afa5e6fbdf98832..052a7a4ac02586157eea5cf4946b19fea12fabe5 100644 --- a/arch/x86/include/asm/string_64.h +++ b/arch/x86/include/asm/string_64.h @@ -32,21 +32,6 @@ static __always_inline void *__inline_memcpy(void *to, const void *from, size_t extern void *memcpy(void *to, const void *from, size_t len); extern void *__memcpy(void *to, const void *from, size_t len); -#ifndef CONFIG_FORTIFY_SOURCE -#if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4 -#define memcpy(dst, src, len) \ -({ \ - size_t __len = (len); \ - void *__ret; \ - if (__builtin_constant_p(len) && __len >= 64) \ - __ret = __memcpy((dst), (src), __len); \ - else \ - __ret = __builtin_memcpy((dst), (src), __len); \ - __ret; \ -}) -#endif -#endif /* !CONFIG_FORTIFY_SOURCE */ - #define __HAVE_ARCH_MEMSET void *memset(void *s, int c, size_t n); void *__memset(void *s, int c, size_t n); diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h index 8be6afb584715dc8d5a50d1bbd989bf053366294..32662cbaa27e8fa9a91151ba817b803d124cb3fb 100644 --- a/arch/x86/include/asm/suspend_32.h +++ b/arch/x86/include/asm/suspend_32.h @@ -21,7 +21,6 @@ struct saved_context { #endif unsigned long cr0, cr2, cr3, cr4; u64 misc_enable; - bool misc_enable_saved; struct saved_msrs saved_msrs; struct desc_ptr gdt_desc; struct desc_ptr idt; @@ -30,6 +29,7 @@ struct saved_context { unsigned long tr; unsigned long safety; unsigned long return_address; + bool misc_enable_saved; } __attribute__((packed)); #endif /* _ASM_X86_SUSPEND_32_H */ diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h index a7af9f53c0cb773d05fd84ebe94525a1971e3ebb..b2861400c6a2df9d15efaad23d2e7a3c3802e594 100644 --- a/arch/x86/include/asm/suspend_64.h +++ b/arch/x86/include/asm/suspend_64.h @@ -14,9 +14,13 @@ * Image of the saved processor state, used by the low level ACPI suspend to * RAM code and by the low level hibernation code. * - * If you modify it, fix arch/x86/kernel/acpi/wakeup_64.S and make sure that - * __save/__restore_processor_state(), defined in arch/x86/kernel/suspend_64.c, - * still work as required. + * If you modify it, check how it is used in arch/x86/kernel/acpi/wakeup_64.S + * and make sure that __save/__restore_processor_state(), defined in + * arch/x86/power/cpu.c, still work as required. + * + * Because the structure is packed, make sure to avoid unaligned members. For + * optimisation purposes but also because tools like kmemleak only search for + * pointers that are aligned. */ struct saved_context { struct pt_regs regs; @@ -36,7 +40,6 @@ struct saved_context { unsigned long cr0, cr2, cr3, cr4, cr8; u64 misc_enable; - bool misc_enable_saved; struct saved_msrs saved_msrs; unsigned long efer; u16 gdt_pad; /* Unused */ @@ -48,6 +51,7 @@ struct saved_context { unsigned long tr; unsigned long safety; unsigned long return_address; + bool misc_enable_saved; } __attribute__((packed)); #define loaddebug(thread,register) \ diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 93b462e480671da6867ece31ec53c14c90093684..b6dedf6c835c9eb23eb8460ca98d1d2424edb3b4 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -118,6 +118,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area { #define V_IGN_TPR_SHIFT 20 #define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT) +#define V_IRQ_INJECTION_BITS_MASK (V_IRQ_MASK | V_INTR_PRIO_MASK | V_IGN_TPR_MASK) + #define V_INTR_MASKING_SHIFT 24 #define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT) diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h index 36bd243843d6dc9b281a7986d71aaf2cf0041be8..157149d4129c099b227cd27a7d7f7f6755ee6cd1 100644 --- a/arch/x86/include/asm/switch_to.h +++ b/arch/x86/include/asm/switch_to.h @@ -11,9 +11,6 @@ struct task_struct *__switch_to_asm(struct task_struct *prev, __visible struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next); -struct tss_struct; -void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, - struct tss_struct *tss); /* This runs runs on the previous thread's stack. */ static inline void prepare_switch_to(struct task_struct *next) @@ -43,6 +40,7 @@ asmlinkage void ret_from_fork(void); * order of the fields must match the code in __switch_to_asm(). */ struct inactive_task_frame { + unsigned long flags; #ifdef CONFIG_X86_64 unsigned long r15; unsigned long r14; diff --git a/arch/x86/include/asm/syscall_wrapper.h b/arch/x86/include/asm/syscall_wrapper.h index e046a405743d84f13bd5b552d19c9214eae3cc3d..46e125b2d08afa1d40d89862041d5ad0c15707da 100644 --- a/arch/x86/include/asm/syscall_wrapper.h +++ b/arch/x86/include/asm/syscall_wrapper.h @@ -48,12 +48,13 @@ * To keep the naming coherent, re-define SYSCALL_DEFINE0 to create an alias * named __ia32_sys_*() */ -#define SYSCALL_DEFINE0(sname) \ - SYSCALL_METADATA(_##sname, 0); \ - asmlinkage long __x64_sys_##sname(void); \ - ALLOW_ERROR_INJECTION(__x64_sys_##sname, ERRNO); \ - SYSCALL_ALIAS(__ia32_sys_##sname, __x64_sys_##sname); \ - asmlinkage long __x64_sys_##sname(void) + +#define SYSCALL_DEFINE0(sname) \ + SYSCALL_METADATA(_##sname, 0); \ + asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused);\ + ALLOW_ERROR_INJECTION(__x64_sys_##sname, ERRNO); \ + SYSCALL_ALIAS(__ia32_sys_##sname, __x64_sys_##sname); \ + asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused) #define COND_SYSCALL(name) \ cond_syscall(__x64_sys_##name); \ @@ -181,11 +182,11 @@ * macros to work correctly. */ #ifndef SYSCALL_DEFINE0 -#define SYSCALL_DEFINE0(sname) \ - SYSCALL_METADATA(_##sname, 0); \ - asmlinkage long __x64_sys_##sname(void); \ - ALLOW_ERROR_INJECTION(__x64_sys_##sname, ERRNO); \ - asmlinkage long __x64_sys_##sname(void) +#define SYSCALL_DEFINE0(sname) \ + SYSCALL_METADATA(_##sname, 0); \ + asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused);\ + ALLOW_ERROR_INJECTION(__x64_sys_##sname, ERRNO); \ + asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused) #endif #ifndef COND_SYSCALL @@ -205,5 +206,8 @@ struct pt_regs; asmlinkage long __x64_sys_getcpu(const struct pt_regs *regs); asmlinkage long __x64_sys_gettimeofday(const struct pt_regs *regs); asmlinkage long __x64_sys_time(const struct pt_regs *regs); +asmlinkage long __x64_sys_io_uring_setup(const struct pt_regs *regs); +asmlinkage long __x64_sys_io_uring_enter(const struct pt_regs *regs); +asmlinkage long __x64_sys_io_uring_register(const struct pt_regs *regs); #endif /* _ASM_X86_SYSCALL_WRAPPER_H */ diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h index e85ff65c43c3efc85702841ab77bffe145778707..aba8c9c7500b45704463adcffe73776b643ae75e 100644 --- a/arch/x86/include/asm/text-patching.h +++ b/arch/x86/include/asm/text-patching.h @@ -35,8 +35,39 @@ extern void *text_poke_early(void *addr, const void *opcode, size_t len); * inconsistent instruction while you patch. */ extern void *text_poke(void *addr, const void *opcode, size_t len); +extern void *text_poke_kgdb(void *addr, const void *opcode, size_t len); extern int poke_int3_handler(struct pt_regs *regs); extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler); extern int after_bootmem; +#ifndef CONFIG_UML_X86 +static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip) +{ + regs->ip = ip; +} + +#define INT3_INSN_SIZE 1 +#define CALL_INSN_SIZE 5 + +#ifdef CONFIG_X86_64 +static inline void int3_emulate_push(struct pt_regs *regs, unsigned long val) +{ + /* + * The int3 handler in entry_64.S adds a gap between the + * stack where the break point happened, and the saving of + * pt_regs. We can extend the original stack because of + * this gap. See the idtentry macro's create_gap option. + */ + regs->sp -= sizeof(unsigned long); + *(unsigned long *)regs->sp = val; +} + +static inline void int3_emulate_call(struct pt_regs *regs, unsigned long func) +{ + int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE); + int3_emulate_jmp(regs, func); +} +#endif /* CONFIG_X86_64 */ +#endif /* !CONFIG_UML_X86 */ + #endif /* _ASM_X86_TEXT_PATCHING_H */ diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 2ff2a30a264f4c5f02a01b3b87e4148e8992dc5a..82b73b75d67ca23fd605578ec2da8336eccd08c8 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -79,10 +79,12 @@ struct thread_info { #define TIF_SIGPENDING 2 /* signal pending */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ -#define TIF_SSBD 5 /* Reduced data speculation */ +#define TIF_SSBD 5 /* Speculative store bypass disable */ #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SECCOMP 8 /* secure computing */ +#define TIF_SPEC_IB 9 /* Indirect branch speculation mitigation */ +#define TIF_SPEC_FORCE_UPDATE 10 /* Force speculation MSR update in context switch */ #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ #define TIF_UPROBE 12 /* breakpointed or singlestepping */ #define TIF_PATCH_PENDING 13 /* pending live patching update */ @@ -110,6 +112,8 @@ struct thread_info { #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) +#define _TIF_SPEC_IB (1 << TIF_SPEC_IB) +#define _TIF_SPEC_FORCE_UPDATE (1 << TIF_SPEC_FORCE_UPDATE) #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY) #define _TIF_UPROBE (1 << TIF_UPROBE) #define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING) @@ -145,8 +149,18 @@ struct thread_info { _TIF_FSCHECK) /* flags to check in __switch_to() */ -#define _TIF_WORK_CTXSW \ - (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD) +#define _TIF_WORK_CTXSW_BASE \ + (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP| \ + _TIF_SSBD | _TIF_SPEC_FORCE_UPDATE) + +/* + * Avoid calls to __switch_to_xtra() on UP as STIBP is not evaluated. + */ +#ifdef CONFIG_SMP +# define _TIF_WORK_CTXSW (_TIF_WORK_CTXSW_BASE | _TIF_SPEC_IB) +#else +# define _TIF_WORK_CTXSW (_TIF_WORK_CTXSW_BASE) +#endif #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) diff --git a/arch/x86/include/asm/time.h b/arch/x86/include/asm/time.h index cef818b16045f6b1d8e511133081b72173d52d8b..8ac563abb567b3f4254beb5b35214cd83e30da3e 100644 --- a/arch/x86/include/asm/time.h +++ b/arch/x86/include/asm/time.h @@ -7,6 +7,7 @@ extern void hpet_time_init(void); extern void time_init(void); +extern bool pit_timer_init(void); extern struct clock_event_device *global_clock_event; diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 58ce5288878e85db5c475d8891c0fff2817a20e4..79ec7add5f98fbaa5390bac9571f2f61652fa8c1 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -185,10 +185,14 @@ struct tlb_state { #define LOADED_MM_SWITCHING ((struct mm_struct *)1) + /* Last user mm for optimizing IBPB */ + union { + struct mm_struct *last_user_mm; + unsigned long last_user_mm_ibpb; + }; + u16 loaded_mm_asid; u16 next_asid; - /* last user mm's ctx id */ - u64 last_ctx_id; /* * We can be in one of several states: @@ -469,6 +473,12 @@ static inline void __native_flush_tlb_one_user(unsigned long addr) */ static inline void __flush_tlb_all(void) { + /* + * This is to catch users with enabled preemption and the PGE feature + * and don't trigger the warning in __native_flush_tlb(). + */ + VM_WARN_ON_ONCE(preemptible()); + if (boot_cpu_has(X86_FEATURE_PGE)) { __flush_tlb_global(); } else { diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 453cf38a1c33d5b2452dff29be06ce52b5b30ffb..b821c657506b824e520570813c2f0d890429dfc6 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h @@ -106,8 +106,12 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu); #define topology_logical_package_id(cpu) (cpu_data(cpu).logical_proc_id) #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) +#define topology_logical_die_id(cpu) (cpu_data(cpu).logical_die_id) +#define topology_die_id(cpu) (cpu_data(cpu).cpu_die_id) #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) +extern unsigned int __max_die_per_package; + #ifdef CONFIG_SMP #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) #define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) @@ -115,6 +119,11 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu); extern unsigned int __max_logical_packages; #define topology_max_packages() (__max_logical_packages) +static inline int topology_max_die_per_package(void) +{ + return __max_die_per_package; +} + extern int __max_smt_threads; static inline int topology_max_smt_threads(void) @@ -123,14 +132,21 @@ static inline int topology_max_smt_threads(void) } int topology_update_package_map(unsigned int apicid, unsigned int cpu); +int topology_update_die_map(unsigned int dieid, unsigned int cpu); int topology_phys_to_logical_pkg(unsigned int pkg); +int topology_phys_to_logical_die(unsigned int die, unsigned int cpu); bool topology_is_primary_thread(unsigned int cpu); bool topology_smt_supported(void); #else #define topology_max_packages() (1) static inline int topology_update_package_map(unsigned int apicid, unsigned int cpu) { return 0; } +static inline int +topology_update_die_map(unsigned int dieid, unsigned int cpu) { return 0; } static inline int topology_phys_to_logical_pkg(unsigned int pkg) { return 0; } +static inline int topology_phys_to_logical_die(unsigned int die, + unsigned int cpu) { return 0; } +static inline int topology_max_die_per_package(void) { return 1; } static inline int topology_max_smt_threads(void) { return 1; } static inline bool topology_is_primary_thread(unsigned int cpu) { return true; } static inline bool topology_smt_supported(void) { return false; } diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index 3de69330e6c50e1747f6a19b62f6fd31b47e9316..b771bb3d159bc8ce27f7897a01f3af4d8be2a78a 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -40,7 +40,7 @@ asmlinkage void simd_coprocessor_error(void); asmlinkage void xen_divide_error(void); asmlinkage void xen_xennmi(void); asmlinkage void xen_xendebug(void); -asmlinkage void xen_xenint3(void); +asmlinkage void xen_int3(void); asmlinkage void xen_overflow(void); asmlinkage void xen_bounds(void); asmlinkage void xen_invalid_op(void); @@ -104,9 +104,9 @@ extern int panic_on_unrecovered_nmi; void math_emulate(struct math_emu_info *); #ifndef CONFIG_X86_32 -asmlinkage void smp_thermal_interrupt(void); -asmlinkage void smp_threshold_interrupt(void); -asmlinkage void smp_deferred_error_interrupt(void); +asmlinkage void smp_thermal_interrupt(struct pt_regs *regs); +asmlinkage void smp_threshold_interrupt(struct pt_regs *regs); +asmlinkage void smp_deferred_error_interrupt(struct pt_regs *regs); #endif extern void ist_enter(struct pt_regs *regs); diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index aae77eb8491c0df9307831269d81dd47b72cad5b..40c2dd62fc1b483cb006504e1b4f6b295c2a0a13 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -77,9 +77,6 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un /** * access_ok: - Checks if a user space pointer is valid - * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that - * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe - * to write to a block, it is always safe to read from it. * @addr: User space pointer to start of block to check * @size: Size of block to check * @@ -95,7 +92,7 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un * checks that the pointer is in the user space range - after calling * this function, memory access functions may still return -EFAULT. */ -#define access_ok(type, addr, size) \ +#define access_ok(addr, size) \ ({ \ WARN_ON_IN_IRQ(); \ likely(!__range_not_ok(addr, size, user_addr_max())); \ @@ -198,8 +195,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) "4: movl %3,%0\n" \ " jmp 3b\n" \ ".previous\n" \ - _ASM_EXTABLE(1b, 4b) \ - _ASM_EXTABLE(2b, 4b) \ + _ASM_EXTABLE_UA(1b, 4b) \ + _ASM_EXTABLE_UA(2b, 4b) \ : "=r" (err) \ : "A" (x), "r" (addr), "i" (errret), "0" (err)) @@ -293,8 +290,7 @@ do { \ __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \ break; \ case 8: \ - __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \ - errret); \ + __put_user_asm_u64(x, ptr, retval, errret); \ break; \ default: \ __put_user_bad(); \ @@ -386,7 +382,7 @@ do { \ " xor"itype" %"rtype"1,%"rtype"1\n" \ " jmp 2b\n" \ ".previous\n" \ - _ASM_EXTABLE(1b, 3b) \ + _ASM_EXTABLE_UA(1b, 3b) \ : "=r" (err), ltype(x) \ : "m" (__m(addr)), "i" (errret), "0" (err)) @@ -440,8 +436,11 @@ do { \ #define __put_user_nocheck(x, ptr, size) \ ({ \ int __pu_err; \ + __typeof__(*(ptr)) __pu_val = (x); \ + __typeof__(ptr) __pu_ptr = (ptr); \ + __typeof__(size) __pu_size = (size); \ __uaccess_begin(); \ - __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \ + __put_user_size(__pu_val, __pu_ptr, __pu_size, __pu_err, -EFAULT);\ __uaccess_end(); \ __builtin_expect(__pu_err, 0); \ }) @@ -450,8 +449,10 @@ do { \ ({ \ int __gu_err; \ __inttype(*(ptr)) __gu_val; \ + __typeof__(ptr) __gu_ptr = (ptr); \ + __typeof__(size) __gu_size = (size); \ __uaccess_begin_nospec(); \ - __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ + __get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err, -EFAULT); \ __uaccess_end(); \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ __builtin_expect(__gu_err, 0); \ @@ -474,7 +475,7 @@ struct __large_struct { unsigned long buf[100]; }; "3: mov %3,%0\n" \ " jmp 2b\n" \ ".previous\n" \ - _ASM_EXTABLE(1b, 3b) \ + _ASM_EXTABLE_UA(1b, 3b) \ : "=r"(err) \ : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) @@ -602,7 +603,7 @@ extern void __cmpxchg_wrong_size(void) "3:\tmov %3, %0\n" \ "\tjmp 2b\n" \ "\t.previous\n" \ - _ASM_EXTABLE(1b, 3b) \ + _ASM_EXTABLE_UA(1b, 3b) \ : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ : "i" (-EFAULT), "q" (__new), "1" (__old) \ : "memory" \ @@ -618,7 +619,7 @@ extern void __cmpxchg_wrong_size(void) "3:\tmov %3, %0\n" \ "\tjmp 2b\n" \ "\t.previous\n" \ - _ASM_EXTABLE(1b, 3b) \ + _ASM_EXTABLE_UA(1b, 3b) \ : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ : "i" (-EFAULT), "r" (__new), "1" (__old) \ : "memory" \ @@ -634,7 +635,7 @@ extern void __cmpxchg_wrong_size(void) "3:\tmov %3, %0\n" \ "\tjmp 2b\n" \ "\t.previous\n" \ - _ASM_EXTABLE(1b, 3b) \ + _ASM_EXTABLE_UA(1b, 3b) \ : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ : "i" (-EFAULT), "r" (__new), "1" (__old) \ : "memory" \ @@ -653,7 +654,7 @@ extern void __cmpxchg_wrong_size(void) "3:\tmov %3, %0\n" \ "\tjmp 2b\n" \ "\t.previous\n" \ - _ASM_EXTABLE(1b, 3b) \ + _ASM_EXTABLE_UA(1b, 3b) \ : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ : "i" (-EFAULT), "r" (__new), "1" (__old) \ : "memory" \ @@ -670,7 +671,7 @@ extern void __cmpxchg_wrong_size(void) #define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \ ({ \ - access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \ + access_ok((ptr), sizeof(*(ptr))) ? \ __user_atomic_cmpxchg_inatomic((uval), (ptr), \ (old), (new), sizeof(*(ptr))) : \ -EFAULT; \ @@ -708,7 +709,14 @@ extern struct movsl_mask { * checking before using them, but you have to surround them with the * user_access_begin/end() pair. */ -#define user_access_begin() __uaccess_begin() +static __must_check inline bool user_access_begin(const void __user *ptr, size_t len) +{ + if (unlikely(!access_ok(ptr,len))) + return 0; + __uaccess_begin_nospec(); + return 1; +} +#define user_access_begin(a,b) user_access_begin(a,b) #define user_access_end() __uaccess_end() #define unsafe_put_user(x, ptr, err_label) \ @@ -728,5 +736,28 @@ do { \ if (unlikely(__gu_err)) goto err_label; \ } while (0) +/* + * We want the unsafe accessors to always be inlined and use + * the error labels - thus the macro games. + */ +#define unsafe_copy_loop(dst, src, len, type, label) \ + while (len >= sizeof(type)) { \ + unsafe_put_user(*(type *)src,(type __user *)dst,label); \ + dst += sizeof(type); \ + src += sizeof(type); \ + len -= sizeof(type); \ + } + +#define unsafe_copy_to_user(_dst,_src,_len,label) \ +do { \ + char __user *__ucu_dst = (_dst); \ + const char *__ucu_src = (_src); \ + size_t __ucu_len = (_len); \ + unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \ + unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \ + unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \ + unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \ +} while (0) + #endif /* _ASM_X86_UACCESS_H */ diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index a9d637bc301d7dd0086b5126a5ebac8f042c62c9..5cd1caa8bc6537c8795218581118c60552128ad8 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -207,9 +207,6 @@ __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size) return __copy_user_flushcache(dst, src, size); } -unsigned long -copy_user_handle_tail(char *to, char *from, unsigned len); - unsigned long mcsafe_handle_tail(char *to, char *from, unsigned len); diff --git a/arch/x86/include/asm/umip.h b/arch/x86/include/asm/umip.h index db43f2a0d92c34710a123082d1f30571ae485831..aeed98c3c9e1d643c66bdc11e2eae649db4c85b1 100644 --- a/arch/x86/include/asm/umip.h +++ b/arch/x86/include/asm/umip.h @@ -4,9 +4,9 @@ #include #include -#ifdef CONFIG_X86_INTEL_UMIP +#ifdef CONFIG_X86_UMIP bool fixup_umip_exception(struct pt_regs *regs); #else static inline bool fixup_umip_exception(struct pt_regs *regs) { return false; } -#endif /* CONFIG_X86_INTEL_UMIP */ +#endif /* CONFIG_X86_UMIP */ #endif /* _ASM_X86_UMIP_H */ diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h index 1f86e1b0a5cdc1afeb4667e8c770ec81456e9fb4..70fc159ebe6959fead369c46d8a143812a1bc058 100644 --- a/arch/x86/include/asm/unwind.h +++ b/arch/x86/include/asm/unwind.h @@ -19,10 +19,16 @@ struct unwind_state { #if defined(CONFIG_UNWINDER_ORC) bool signal, full_regs; unsigned long sp, bp, ip; - struct pt_regs *regs; + struct pt_regs *regs, *prev_regs; #elif defined(CONFIG_UNWINDER_FRAME_POINTER) bool got_irq; unsigned long *bp, *orig_sp, ip; + /* + * If non-NULL: The current frame is incomplete and doesn't contain a + * valid BP. When looking for the next frame, use this instead of the + * non-existent saved BP. + */ + unsigned long *next_bp; struct pt_regs *regs; #else unsigned long *sp; diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h index e652a7cc61863667fad8ff2baa0a7242485600df..3f697a9e3f59b37e4fa37cf0f672114a2d84312b 100644 --- a/arch/x86/include/asm/uv/bios.h +++ b/arch/x86/include/asm/uv/bios.h @@ -48,7 +48,8 @@ enum { BIOS_STATUS_SUCCESS = 0, BIOS_STATUS_UNIMPLEMENTED = -ENOSYS, BIOS_STATUS_EINVAL = -EINVAL, - BIOS_STATUS_UNAVAIL = -EBUSY + BIOS_STATUS_UNAVAIL = -EBUSY, + BIOS_STATUS_ABORT = -EINTR, }; /* Address map parameters */ @@ -167,4 +168,9 @@ extern long system_serial_number; extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */ +/* + * EFI runtime lock; cf. firmware/efi/runtime-wrappers.c for details + */ +extern struct semaphore __efi_uv_runtime_lock; + #endif /* _ASM_X86_UV_BIOS_H */ diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h index 0116b2ee9e64f34ebf612a0380410af39b4dd2e5..e05e0d3092445736ffd6a25ecbfddaa06168e7c6 100644 --- a/arch/x86/include/asm/virtext.h +++ b/arch/x86/include/asm/virtext.h @@ -83,9 +83,10 @@ static inline void cpu_emergency_vmxoff(void) */ static inline int cpu_has_svm(const char **msg) { - if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) { + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && + boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) { if (msg) - *msg = "not amd"; + *msg = "not amd or hygon"; return 0; } diff --git a/arch/x86/include/asm/vmalloc.h b/arch/x86/include/asm/vmalloc.h new file mode 100644 index 0000000000000000000000000000000000000000..17864fb62e4b346760c0806be7a64aecf5d234ab --- /dev/null +++ b/arch/x86/include/asm/vmalloc.h @@ -0,0 +1,12 @@ +#ifndef _ASM_X86_VMALLOC_H +#define _ASM_X86_VMALLOC_H + +#include + +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP +bool arch_vmap_p4d_supported(pgprot_t prot); +bool arch_vmap_pud_supported(pgprot_t prot); +bool arch_vmap_pmd_supported(pgprot_t prot); +#endif + +#endif /* _ASM_X86_VMALLOC_H */ diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index b85a7c54c6a13b51f27b26f13a2dd52f148547a7..227b18b6d6309485dfaff9bc7cce0dc2421cac36 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h @@ -51,12 +51,14 @@ struct x86_init_resources { * are set up. * @intr_init: interrupt init code * @trap_init: platform specific trap setup + * @intr_mode_select: interrupt delivery mode selection * @intr_mode_init: interrupt delivery mode setup */ struct x86_init_irqs { void (*pre_vector_init)(void); void (*intr_init)(void); void (*trap_init)(void); + void (*intr_mode_select)(void); void (*intr_mode_init)(void); }; diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index ef05bea7010de473c4a73f5d99a1f74af2327ebc..6b5c710846f514e9a4d38f60555090511d890204 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h @@ -206,6 +206,9 @@ xen_single_call(unsigned int call, __HYPERCALL_DECLS; __HYPERCALL_5ARG(a1, a2, a3, a4, a5); + if (call >= PAGE_SIZE / sizeof(hypercall_page[0])) + return -EINVAL; + asm volatile(CALL_NOSPEC : __HYPERCALL_5PARAM : [thunk_target] "a" (&hypercall_page[call]) diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h index fd23d5778ea17f04e4c1b178fbcb2ffb3718dec0..f1645578d9d086e3a0f9476c59069921b0da1018 100644 --- a/arch/x86/include/uapi/asm/kvm.h +++ b/arch/x86/include/uapi/asm/kvm.h @@ -378,6 +378,7 @@ struct kvm_sync_regs { #define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0) #define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1) #define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2) +#define KVM_X86_QUIRK_OUT_7E_INC_RIP (1 << 3) #define KVM_STATE_NESTED_GUEST_MODE 0x00000001 #define KVM_STATE_NESTED_RUN_PENDING 0x00000002 diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h index 19980ec1a316e8dc80a513948d5a5dff7b48fbda..21d5f0240595f51b3514e833b674cfc538161e4c 100644 --- a/arch/x86/include/uapi/asm/kvm_para.h +++ b/arch/x86/include/uapi/asm/kvm_para.h @@ -29,6 +29,7 @@ #define KVM_FEATURE_PV_TLB_FLUSH 9 #define KVM_FEATURE_ASYNC_PF_VMEXIT 10 #define KVM_FEATURE_PV_SEND_IPI 11 +#define KVM_FEATURE_POLL_CONTROL 12 #define KVM_HINTS_REALTIME 0 @@ -47,6 +48,7 @@ #define MSR_KVM_ASYNC_PF_EN 0x4b564d02 #define MSR_KVM_STEAL_TIME 0x4b564d03 #define MSR_KVM_PV_EOI_EN 0x4b564d04 +#define MSR_KVM_POLL_CONTROL 0x4b564d05 struct kvm_steal_time { __u64 steal; diff --git a/arch/x86/include/uapi/asm/perf_regs.h b/arch/x86/include/uapi/asm/perf_regs.h index f3329cabce5c6d9e7c605a0fb46f764e2d643141..7c9d2bb3833bd2cece61f5542c64ced1952d9091 100644 --- a/arch/x86/include/uapi/asm/perf_regs.h +++ b/arch/x86/include/uapi/asm/perf_regs.h @@ -27,8 +27,32 @@ enum perf_event_x86_regs { PERF_REG_X86_R13, PERF_REG_X86_R14, PERF_REG_X86_R15, - + /* These are the limits for the GPRs. */ PERF_REG_X86_32_MAX = PERF_REG_X86_GS + 1, PERF_REG_X86_64_MAX = PERF_REG_X86_R15 + 1, + + /* These all need two bits set because they are 128bit */ + PERF_REG_X86_XMM0 = 32, + PERF_REG_X86_XMM1 = 34, + PERF_REG_X86_XMM2 = 36, + PERF_REG_X86_XMM3 = 38, + PERF_REG_X86_XMM4 = 40, + PERF_REG_X86_XMM5 = 42, + PERF_REG_X86_XMM6 = 44, + PERF_REG_X86_XMM7 = 46, + PERF_REG_X86_XMM8 = 48, + PERF_REG_X86_XMM9 = 50, + PERF_REG_X86_XMM10 = 52, + PERF_REG_X86_XMM11 = 54, + PERF_REG_X86_XMM12 = 56, + PERF_REG_X86_XMM13 = 58, + PERF_REG_X86_XMM14 = 60, + PERF_REG_X86_XMM15 = 62, + + /* These include both GPRs and XMMX registers */ + PERF_REG_X86_XMM_MAX = PERF_REG_X86_XMM15 + 2, }; + +#define PERF_REG_EXTENDED_MASK (~((1ULL << PERF_REG_X86_XMM0) - 1)) + #endif /* _ASM_X86_PERF_REGS_H */ diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 8824d01c0c352d6dbd2c12e228bd0de9ca335166..66835d9a6f72bf1ccab8e1a93def8cef8d8d1c1c 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -49,7 +49,8 @@ obj-$(CONFIG_COMPAT) += signal_compat.o obj-y += traps.o idt.o irq.o irq_$(BITS).o dumpstack_$(BITS).o obj-y += time.o ioport.o dumpstack.o nmi.o obj-$(CONFIG_MODIFY_LDT_SYSCALL) += ldt.o -obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o +obj-y += setup.o x86_init.o i8259.o irqinit.o +obj-$(CONFIG_JUMP_LABEL) += jump_label.o obj-$(CONFIG_IRQ_WORK) += irq_work.o obj-y += probe_roms.o obj-$(CONFIG_X86_64) += sys_x86_64.o @@ -133,7 +134,7 @@ obj-$(CONFIG_EFI) += sysfb_efi.o obj-$(CONFIG_PERF_EVENTS) += perf_regs.o obj-$(CONFIG_TRACING) += tracepoint.o obj-$(CONFIG_SCHED_MC_PRIO) += itmt.o -obj-$(CONFIG_X86_INTEL_UMIP) += umip.o +obj-$(CONFIG_X86_UMIP) += umip.o obj-$(CONFIG_UNWINDER_ORC) += unwind_orc.o obj-$(CONFIG_UNWINDER_FRAME_POINTER) += unwind_frame.o diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 3b20607d581b5340fcf5e0345b7e70f9e83d95ad..6fc7e69b42559feb2f90dd75a58386bed3a23abf 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -196,7 +196,7 @@ static int acpi_register_lapic(int id, u32 acpiid, u8 enabled) } static int __init -acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end) +acpi_parse_x2apic(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_local_x2apic *processor = NULL; #ifdef CONFIG_X86_X2APIC @@ -209,7 +209,7 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end) if (BAD_MADT_ENTRY(processor, end)) return -EINVAL; - acpi_table_print_madt_entry(header); + acpi_table_print_madt_entry(&header->common); #ifdef CONFIG_X86_X2APIC apic_id = processor->local_apic_id; @@ -241,7 +241,7 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end) } static int __init -acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end) +acpi_parse_lapic(union acpi_subtable_headers * header, const unsigned long end) { struct acpi_madt_local_apic *processor = NULL; @@ -250,7 +250,7 @@ acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end) if (BAD_MADT_ENTRY(processor, end)) return -EINVAL; - acpi_table_print_madt_entry(header); + acpi_table_print_madt_entry(&header->common); /* Ignore invalid ID */ if (processor->id == 0xff) @@ -271,7 +271,7 @@ acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end) } static int __init -acpi_parse_sapic(struct acpi_subtable_header *header, const unsigned long end) +acpi_parse_sapic(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_local_sapic *processor = NULL; @@ -280,7 +280,7 @@ acpi_parse_sapic(struct acpi_subtable_header *header, const unsigned long end) if (BAD_MADT_ENTRY(processor, end)) return -EINVAL; - acpi_table_print_madt_entry(header); + acpi_table_print_madt_entry(&header->common); acpi_register_lapic((processor->id << 8) | processor->eid,/* APIC ID */ processor->processor_id, /* ACPI ID */ @@ -290,7 +290,7 @@ acpi_parse_sapic(struct acpi_subtable_header *header, const unsigned long end) } static int __init -acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header, +acpi_parse_lapic_addr_ovr(union acpi_subtable_headers * header, const unsigned long end) { struct acpi_madt_local_apic_override *lapic_addr_ovr = NULL; @@ -300,7 +300,7 @@ acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header, if (BAD_MADT_ENTRY(lapic_addr_ovr, end)) return -EINVAL; - acpi_table_print_madt_entry(header); + acpi_table_print_madt_entry(&header->common); acpi_lapic_addr = lapic_addr_ovr->address; @@ -308,7 +308,7 @@ acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header, } static int __init -acpi_parse_x2apic_nmi(struct acpi_subtable_header *header, +acpi_parse_x2apic_nmi(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_local_x2apic_nmi *x2apic_nmi = NULL; @@ -318,7 +318,7 @@ acpi_parse_x2apic_nmi(struct acpi_subtable_header *header, if (BAD_MADT_ENTRY(x2apic_nmi, end)) return -EINVAL; - acpi_table_print_madt_entry(header); + acpi_table_print_madt_entry(&header->common); if (x2apic_nmi->lint != 1) printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n"); @@ -327,7 +327,7 @@ acpi_parse_x2apic_nmi(struct acpi_subtable_header *header, } static int __init -acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end) +acpi_parse_lapic_nmi(union acpi_subtable_headers * header, const unsigned long end) { struct acpi_madt_local_apic_nmi *lapic_nmi = NULL; @@ -336,7 +336,7 @@ acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long e if (BAD_MADT_ENTRY(lapic_nmi, end)) return -EINVAL; - acpi_table_print_madt_entry(header); + acpi_table_print_madt_entry(&header->common); if (lapic_nmi->lint != 1) printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n"); @@ -448,7 +448,7 @@ static int __init mp_register_ioapic_irq(u8 bus_irq, u8 polarity, } static int __init -acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end) +acpi_parse_ioapic(union acpi_subtable_headers * header, const unsigned long end) { struct acpi_madt_io_apic *ioapic = NULL; struct ioapic_domain_cfg cfg = { @@ -461,7 +461,7 @@ acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end) if (BAD_MADT_ENTRY(ioapic, end)) return -EINVAL; - acpi_table_print_madt_entry(header); + acpi_table_print_madt_entry(&header->common); /* Statically assign IRQ numbers for IOAPICs hosting legacy IRQs */ if (ioapic->global_irq_base < nr_legacy_irqs()) @@ -507,7 +507,7 @@ static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger, } static int __init -acpi_parse_int_src_ovr(struct acpi_subtable_header * header, +acpi_parse_int_src_ovr(union acpi_subtable_headers * header, const unsigned long end) { struct acpi_madt_interrupt_override *intsrc = NULL; @@ -517,7 +517,7 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header, if (BAD_MADT_ENTRY(intsrc, end)) return -EINVAL; - acpi_table_print_madt_entry(header); + acpi_table_print_madt_entry(&header->common); if (intsrc->source_irq == acpi_gbl_FADT.sci_interrupt) { acpi_sci_ioapic_setup(intsrc->source_irq, @@ -549,7 +549,7 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header, } static int __init -acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end) +acpi_parse_nmi_src(union acpi_subtable_headers * header, const unsigned long end) { struct acpi_madt_nmi_source *nmi_src = NULL; @@ -558,7 +558,7 @@ acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end if (BAD_MADT_ENTRY(nmi_src, end)) return -EINVAL; - acpi_table_print_madt_entry(header); + acpi_table_print_madt_entry(&header->common); /* TBD: Support nimsrc entries? */ @@ -1565,10 +1565,18 @@ void __init acpi_boot_table_init(void) /* * Initialize the ACPI boot-time table parser. */ - if (acpi_table_init()) { + if (acpi_locate_initial_tables()) disable_acpi(); - return; - } + else + acpi_reserve_initial_tables(); +} + +int __init early_acpi_boot_init(void) +{ + if (acpi_disabled) + return 1; + + acpi_table_init_complete(); acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf); @@ -1581,18 +1589,9 @@ void __init acpi_boot_table_init(void) } else { printk(KERN_WARNING PREFIX "Disabling ACPI support\n"); disable_acpi(); - return; + return 1; } } -} - -int __init early_acpi_boot_init(void) -{ - /* - * If acpi_disabled, bail out - */ - if (acpi_disabled) - return 1; /* * Process the Multiple APIC Description Table (MADT), if present diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index 158ad1483c4352b2f93c7cbb931dfdb8dd40c3bf..af63131244901c8b5d3f48f5df06834ed4c5edb5 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c @@ -51,6 +51,41 @@ void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags, if (c->x86_vendor == X86_VENDOR_INTEL && (c->x86 > 0xf || (c->x86 == 6 && c->x86_model >= 0x0f))) flags->bm_control = 0; + + if (c->x86_vendor == X86_VENDOR_CENTAUR) { + if (c->x86 > 6 || (c->x86 == 6 && c->x86_model == 0x0f && + c->x86_stepping >= 0x0e)) { + /* + * For all recent Centaur CPUs, the ucode will make sure that each + * core can keep cache coherence with each other while entering C3 + * type state. So, set bm_check to 1 to indicate that the kernel + * doesn't need to execute a cache flush operation (WBINVD) when + * entering C3 type state. + */ + flags->bm_check = 1; + /* + * For all recent Centaur platforms, ARB_DISABLE is a nop. + * Set bm_control to zero to indicate that ARB_DISABLE is + * not required while entering C3 type state. + */ + flags->bm_control = 0; + } + } + + if (c->x86_vendor == X86_VENDOR_ZHAOXIN) { + /* + * All Zhaoxin CPUs that support C3 share cache. + * And caches should not be flushed by software while + * entering C3 type state. + */ + flags->bm_check = 1; + /* + * On all recent Zhaoxin platforms, ARB_DISABLE is a nop. + * So, set bm_control to zero to indicate that ARB_DISABLE + * is not required while entering C3 type state. + */ + flags->bm_control = 0; + } } EXPORT_SYMBOL(acpi_processor_power_init_bm_check); @@ -168,7 +203,10 @@ static int __init ffh_cstate_init(void) struct cpuinfo_x86 *c = &boot_cpu_data; if (c->x86_vendor != X86_VENDOR_INTEL && - c->x86_vendor != X86_VENDOR_AMD) + c->x86_vendor != X86_VENDOR_AMD && + c->x86_vendor != X86_VENDOR_HYGON && + c->x86_vendor != X86_VENDOR_CENTAUR && + c->x86_vendor != X86_VENDOR_ZHAOXIN) return -1; cpu_cstate_entry = alloc_percpu(struct cstate_entry); diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index b9d5e7c9ef43e66c0b8d19fc0346be1779fa78d4..0761bfa4157966373b91edbc2cc356936a980107 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -222,6 +222,10 @@ void __init arch_init_ideal_nops(void) } break; + case X86_VENDOR_HYGON: + ideal_nops = p6_nops; + return; + case X86_VENDOR_AMD: if (boot_cpu_data.x86 > 0xf) { ideal_nops = p6_nops; @@ -366,6 +370,17 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start, u8 insnbuf[MAX_PATCH_LEN]; DPRINTK("alt table %px, -> %px", start, end); + + /* + * In the case CONFIG_X86_5LEVEL=y, KASAN_SHADOW_START is defined using + * cpu_feature_enabled(X86_FEATURE_LA57) and is therefore patched here. + * During the process, KASAN becomes confused seeing partial LA57 + * conversion and triggers a false-positive out-of-bound report. + * + * Disable KASAN until the patching is complete. + */ + kasan_disable_current(); + /* * The scan order should be from start to end. A later scanned * alternative code can overwrite previously scanned alternative code. @@ -426,6 +441,8 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start, text_poke_early(instr, insnbuf, insnbuf_sz); } + + kasan_enable_current(); } #ifdef CONFIG_SMP @@ -608,6 +625,33 @@ extern struct paravirt_patch_site __start_parainstructions[], __stop_parainstructions[]; #endif /* CONFIG_PARAVIRT */ +#if defined(CONFIG_NUMA_AWARE_SPINLOCKS) +/* + * Constant (boot-param configurable) flag selecting the NUMA-aware variant + * of spinlock. Possible values: -1 (off) / 0 (auto, default) / 1 (on). + */ +static int numa_spinlock_flag; + +static int __init numa_spinlock_setup(char *str) +{ + if (!strcmp(str, "auto")) { + numa_spinlock_flag = 0; + return 1; + } else if (!strcmp(str, "on")) { + numa_spinlock_flag = 1; + return 1; + } else if (!strcmp(str, "off")) { + numa_spinlock_flag = -1; + return 1; + } + + return 0; +} + +__setup("numa_spinlock=", numa_spinlock_setup); + +#endif + void __init alternative_instructions(void) { /* The patching is not fully atomic, so try to avoid local interruptions @@ -643,6 +687,20 @@ void __init alternative_instructions(void) (unsigned long)__smp_locks_end); #endif +#if defined(CONFIG_NUMA_AWARE_SPINLOCKS) + /* + * By default, switch to the NUMA-friendly slow path for + * spinlocks when we have multiple NUMA nodes in native environment. + */ + if ((numa_spinlock_flag == 1) || + (numa_spinlock_flag == 0 && nr_node_ids > 1 && + pv_lock_ops.queued_spin_lock_slowpath == + native_queued_spin_lock_slowpath)) { + pv_lock_ops.queued_spin_lock_slowpath = + __cna_queued_spin_lock_slowpath; + } +#endif + apply_paravirt(__parainstructions, __parainstructions_end); restart_nmi(); @@ -662,30 +720,33 @@ void __init alternative_instructions(void) * handlers seeing an inconsistent instruction while you patch. */ void *__init_or_module text_poke_early(void *addr, const void *opcode, - size_t len) + size_t len) { unsigned long flags; - local_irq_save(flags); - memcpy(addr, opcode, len); - local_irq_restore(flags); - sync_core(); - /* Could also do a CLFLUSH here to speed up CPU recovery; but - that causes hangs on some VIA CPUs. */ + + if (boot_cpu_has(X86_FEATURE_NX) && + is_module_text_address((unsigned long)addr)) { + /* + * Modules text is marked initially as non-executable, so the + * code cannot be running and speculative code-fetches are + * prevented. Just change the code. + */ + memcpy(addr, opcode, len); + } else { + local_irq_save(flags); + memcpy(addr, opcode, len); + sync_core(); + local_irq_restore(flags); + + /* + * Could also do a CLFLUSH here to speed up CPU recovery; but + * that causes hangs on some VIA CPUs. + */ + } return addr; } -/** - * text_poke - Update instructions on a live kernel - * @addr: address to modify - * @opcode: source of the copy - * @len: length to copy - * - * Only atomic text poke/set should be allowed when not doing early patching. - * It means the size must be writable atomically and the address must be aligned - * in a way that permits an atomic write. It also makes sure we fit on a single - * page. - */ -void *text_poke(void *addr, const void *opcode, size_t len) +static void *__text_poke(void *addr, const void *opcode, size_t len) { unsigned long flags; char *vaddr; @@ -698,8 +759,6 @@ void *text_poke(void *addr, const void *opcode, size_t len) */ BUG_ON(!after_bootmem); - lockdep_assert_held(&text_mutex); - if (!core_kernel_text((unsigned long)addr)) { pages[0] = vmalloc_to_page(addr); pages[1] = vmalloc_to_page(addr + PAGE_SIZE); @@ -728,6 +787,43 @@ void *text_poke(void *addr, const void *opcode, size_t len) return addr; } +/** + * text_poke - Update instructions on a live kernel + * @addr: address to modify + * @opcode: source of the copy + * @len: length to copy + * + * Only atomic text poke/set should be allowed when not doing early patching. + * It means the size must be writable atomically and the address must be aligned + * in a way that permits an atomic write. It also makes sure we fit on a single + * page. + */ +void *text_poke(void *addr, const void *opcode, size_t len) +{ + lockdep_assert_held(&text_mutex); + + return __text_poke(addr, opcode, len); +} + +/** + * text_poke_kgdb - Update instructions on a live kernel by kgdb + * @addr: address to modify + * @opcode: source of the copy + * @len: length to copy + * + * Only atomic text poke/set should be allowed when not doing early patching. + * It means the size must be writable atomically and the address must be aligned + * in a way that permits an atomic write. It also makes sure we fit on a single + * page. + * + * Context: should only be used by kgdb, which ensures no other core is running, + * despite the fact it does not hold the text_mutex. + */ +void *text_poke_kgdb(void *addr, const void *opcode, size_t len) +{ + return __text_poke(addr, opcode, len); +} + static void do_sync_core(void *info) { sync_core(); diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index b481b95bd8f6b9e439c5d72e42af21b18d250af4..59f08eb3e5e9bb3a1c6760f072cf888a6d2e4b64 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -11,26 +11,40 @@ #include #include #include +#include #include #define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450 #define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0 -#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 +#define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480 #define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464 -#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec +#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494 +#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444 +#define PCI_DEVICE_ID_AMD_19H_DF_F4 0x1654 + +#define PCI_DEVICE_ID_HYGON_18H_M05H_ROOT 0x14a0 +#define PCI_DEVICE_ID_HYGON_18H_M10H_ROOT 0x14c0 +#define PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1 0x1491 +#define PCI_DEVICE_ID_HYGON_18H_M05H_DF_F1 0x14b1 +#define PCI_DEVICE_ID_HYGON_18H_M05H_DF_F4 0x14b4 +#define PCI_DEVICE_ID_HYGON_18H_M10H_DF_F4 0x14d4 +#define PCI_DEVICE_ID_HYGON_18H_M06H_DF_F5 0x14b5 /* Protect the PCI config register pairs used for SMN and DF indirect access. */ static DEFINE_MUTEX(smn_mutex); static u32 *flush_words; +static u16 nb_num; static const struct pci_device_id amd_root_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) }, {} }; + #define PCI_DEVICE_ID_AMD_CNB17H_F4 0x1704 const struct pci_device_id amd_nb_misc_ids[] = { @@ -44,7 +58,10 @@ const struct pci_device_id amd_nb_misc_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) }, {} }; EXPORT_SYMBOL_GPL(amd_nb_misc_ids); @@ -57,10 +74,37 @@ static const struct pci_device_id amd_nb_link_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) }, {} }; +static const struct pci_device_id hygon_root_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_ROOT) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M10H_ROOT) }, + {} +}; + +static const struct pci_device_id hygon_nb_misc_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M10H_DF_F3) }, + {} +}; + +static const struct pci_device_id hygon_nb_link_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M10H_DF_F4) }, + {} +}; + const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = { { 0x00, 0x18, 0x20 }, { 0xff, 0x00, 0x20 }, @@ -134,7 +178,14 @@ static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write) int amd_smn_read(u16 node, u32 address, u32 *value) { - return __amd_smn_rw(node, address, value, false); + int err = __amd_smn_rw(node, address, value, false); + + if (PCI_POSSIBLE_ERROR(*value)) { + err = -ENODEV; + *value = 0; + } + + return err; } EXPORT_SYMBOL_GPL(amd_smn_read); @@ -192,17 +243,255 @@ int amd_df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo) } EXPORT_SYMBOL_GPL(amd_df_indirect_read); -int amd_cache_northbridges(void) +bool hygon_f18h_m4h(void) { - u16 i = 0; + if (boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) + return false; + + if (boot_cpu_data.x86 == 0x18 && + boot_cpu_data.x86_model >= 0x4 && + boot_cpu_data.x86_model <= 0xf) + return true; + + return false; +} +EXPORT_SYMBOL_GPL(hygon_f18h_m4h); + +u16 hygon_nb_num(void) +{ + return nb_num; +} +EXPORT_SYMBOL_GPL(hygon_nb_num); + +static int get_df_register(struct pci_dev *misc, u8 func, int offset, u32 *value) +{ + struct pci_dev *df_func = NULL; + u32 device; + int err; + + if (func == 1) { + switch (boot_cpu_data.x86_model) { + case 0x4: + device = PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1; + break; + case 0x5: + if (misc->device == PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3) + device = PCI_DEVICE_ID_HYGON_18H_M05H_DF_F1; + else + device = PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1; + break; + case 0x6: + case 0x7: + device = PCI_DEVICE_ID_HYGON_18H_M05H_DF_F1; + break; + default: + return -ENODEV; + } + } else if (func == 5) { + switch (boot_cpu_data.x86_model) { + case 0x6: + case 0x7: + device = PCI_DEVICE_ID_HYGON_18H_M06H_DF_F5; + break; + default: + return -ENODEV; + } + } else { + return -ENODEV; + } + + while ((df_func = pci_get_device(misc->vendor, device, df_func))) + if (pci_domain_nr(df_func->bus) == pci_domain_nr(misc->bus) && + df_func->bus->number == misc->bus->number && + PCI_SLOT(df_func->devfn) == PCI_SLOT(misc->devfn)) + break; + + if (!df_func) { + pr_warn("Error getting DF F%d device.\n", func); + return -ENODEV; + } + + err = pci_read_config_dword(df_func, offset, value); + if (err) + pr_warn("Error reading DF F%d register.\n", func); + + return err; +} + +int get_df_id(struct pci_dev *misc, u8 *id) +{ + u32 value; + int ret; + + if (boot_cpu_data.x86_model >= 0x6 && + boot_cpu_data.x86_model <= 0x7) { + /* F5x180[19:16]: DF ID */ + ret = get_df_register(misc, 5, 0x180, &value); + *id = (value >> 16) & 0xf; + } else { + /* F1x200[23:20]: DF ID */ + ret = get_df_register(misc, 1, 0x200, &value); + *id = (value >> 20) & 0xf; + } + + return ret; +} +EXPORT_SYMBOL_GPL(get_df_id); + +static u8 get_socket_num(struct pci_dev *misc) +{ + u32 value; + int ret; + + /* F1x200[7:0]: Which socket is present. */ + ret = get_df_register(misc, 1, 0x200, &value); + + return ret ? 0 : hweight8(value & 0xff); +} + +static int northbridge_init_f18h_m4h(const struct pci_device_id *root_ids, + const struct pci_device_id *misc_ids, + const struct pci_device_id *link_ids) +{ + struct pci_dev *root, *misc, *link; + struct pci_dev *root_first = NULL; struct amd_northbridge *nb; + u16 roots_per_socket = 0; + u16 miscs_per_socket = 0; + u16 socket_num = 0; + u16 root_count = 0; + u16 misc_count = 0; + int err = -ENODEV; + u8 i, j, m, n; + u8 id; + + pr_info("Hygon Fam%xh Model%xh NB driver.\n", + boot_cpu_data.x86, boot_cpu_data.x86_model); + + misc = next_northbridge(NULL, misc_ids); + if (misc != NULL) { + socket_num = get_socket_num(misc); + pr_info("Socket number: %d\n", socket_num); + if (!socket_num) { + err = -ENODEV; + goto ret; + } + } else { + err = -ENODEV; + goto ret; + } + + misc = NULL; + while ((misc = next_northbridge(misc, misc_ids)) != NULL) + misc_count++; + + root = NULL; + while ((root = next_northbridge(root, root_ids)) != NULL) + root_count++; + + if (!root_count || !misc_count) { + err = -ENODEV; + goto ret; + } + + /* + * There should be _exactly_ N roots for each DF/SMN + * interface, and M DF/SMN interfaces in one socket. + */ + roots_per_socket = root_count / socket_num; + miscs_per_socket = misc_count / socket_num; + + if (!roots_per_socket || !miscs_per_socket) { + err = -ENODEV; + goto ret; + } + + nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL); + if (!nb) { + err = -ENOMEM; + goto ret; + } + + amd_northbridges.nb = nb; + amd_northbridges.num = misc_count; + + link = misc = root = NULL; + j = m = n = 0; + for (i = 0; i < amd_northbridges.num; i++) { + misc = next_northbridge(misc, misc_ids); + link = next_northbridge(link, link_ids); + + /* Only save the first PCI root device for each socket. */ + if (!(i % miscs_per_socket)) { + root_first = next_northbridge(root, root_ids); + root = root_first; + j = 1; + } + + if (get_df_id(misc, &id)) { + err = -ENODEV; + goto err; + } + pr_info("DF ID: %d\n", id); + + if (id < 4) { + /* Add the devices with id<4 from the tail. */ + node_to_amd_nb(misc_count - m - 1)->misc = misc; + node_to_amd_nb(misc_count - m - 1)->link = link; + node_to_amd_nb(misc_count - m - 1)->root = root_first; + m++; + } else { + node_to_amd_nb(n)->misc = misc; + node_to_amd_nb(n)->link = link; + node_to_amd_nb(n)->root = root_first; + n++; + } + + /* Skip the redundant PCI root devices per socket. */ + while (j < roots_per_socket) { + root = next_northbridge(root, root_ids); + j++; + } + } + nb_num = n; + + return 0; + +err: + kfree(nb); + amd_northbridges.nb = NULL; + +ret: + pr_err("Hygon Fam%xh Model%xh northbridge init failed(%d)!\n", + boot_cpu_data.x86, boot_cpu_data.x86_model, err); + return err; +} + +int amd_cache_northbridges(void) +{ + const struct pci_device_id *misc_ids = amd_nb_misc_ids; + const struct pci_device_id *link_ids = amd_nb_link_ids; + const struct pci_device_id *root_ids = amd_root_ids; struct pci_dev *root, *misc, *link; + struct amd_northbridge *nb; + u16 i = 0; if (amd_northbridges.num) return 0; + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + root_ids = hygon_root_ids; + misc_ids = hygon_nb_misc_ids; + link_ids = hygon_nb_link_ids; + + if (boot_cpu_data.x86_model >= 0x4 && + boot_cpu_data.x86_model <= 0xf) + return northbridge_init_f18h_m4h(root_ids, + misc_ids, link_ids); + } + misc = NULL; - while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL) + while ((misc = next_northbridge(misc, misc_ids)) != NULL) i++; if (!i) @@ -218,11 +507,11 @@ int amd_cache_northbridges(void) link = misc = root = NULL; for (i = 0; i != amd_northbridges.num; i++) { node_to_amd_nb(i)->root = root = - next_northbridge(root, amd_root_ids); + next_northbridge(root, root_ids); node_to_amd_nb(i)->misc = misc = - next_northbridge(misc, amd_nb_misc_ids); + next_northbridge(misc, misc_ids); node_to_amd_nb(i)->link = link = - next_northbridge(link, amd_nb_link_ids); + next_northbridge(link, link_ids); } if (amd_gart_present()) @@ -261,11 +550,19 @@ EXPORT_SYMBOL_GPL(amd_cache_northbridges); */ bool __init early_is_amd_nb(u32 device) { + const struct pci_device_id *misc_ids = amd_nb_misc_ids; const struct pci_device_id *id; u32 vendor = device & 0xffff; + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && + boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) + return false; + + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + misc_ids = hygon_nb_misc_ids; + device >>= 16; - for (id = amd_nb_misc_ids; id->vendor; id++) + for (id = misc_ids; id->vendor; id++) if (vendor == id->vendor && device == id->device) return true; return false; @@ -277,7 +574,8 @@ struct resource *amd_get_mmconfig_range(struct resource *res) u64 base, msr; unsigned int segn_busn_bits; - if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && + boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) return NULL; /* assume all cpus from fam10h have mmconfig */ diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 2c4d5ece74565f10330b4121af72f33622f820bc..93426c5fc70f48b4d91cd9be5059ed1c14f255b8 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c @@ -14,6 +14,7 @@ #define pr_fmt(fmt) "AGP: " fmt #include +#include #include #include #include @@ -57,7 +58,7 @@ int fallback_aper_force __initdata; int fix_aperture __initdata = 1; -#ifdef CONFIG_PROC_VMCORE +#if defined(CONFIG_PROC_VMCORE) || defined(CONFIG_PROC_KCORE) /* * If the first kernel maps the aperture over e820 RAM, the kdump kernel will * use the same range because it will remain configured in the northbridge. @@ -66,20 +67,25 @@ int fix_aperture __initdata = 1; */ static unsigned long aperture_pfn_start, aperture_page_count; -static int gart_oldmem_pfn_is_ram(unsigned long pfn) +static int gart_mem_pfn_is_ram(unsigned long pfn) { return likely((pfn < aperture_pfn_start) || (pfn >= aperture_pfn_start + aperture_page_count)); } -static void exclude_from_vmcore(u64 aper_base, u32 aper_order) +static void __init exclude_from_core(u64 aper_base, u32 aper_order) { aperture_pfn_start = aper_base >> PAGE_SHIFT; aperture_page_count = (32 * 1024 * 1024) << aper_order >> PAGE_SHIFT; - WARN_ON(register_oldmem_pfn_is_ram(&gart_oldmem_pfn_is_ram)); +#ifdef CONFIG_PROC_VMCORE + WARN_ON(register_oldmem_pfn_is_ram(&gart_mem_pfn_is_ram)); +#endif +#ifdef CONFIG_PROC_KCORE + WARN_ON(register_mem_pfn_is_ram(&gart_mem_pfn_is_ram)); +#endif } #else -static void exclude_from_vmcore(u64 aper_base, u32 aper_order) +static void exclude_from_core(u64 aper_base, u32 aper_order) { } #endif @@ -469,7 +475,7 @@ int __init gart_iommu_hole_init(void) * may have allocated the range over its e820 RAM * and fixed up the northbridge */ - exclude_from_vmcore(last_aper_base, last_aper_order); + exclude_from_core(last_aper_base, last_aper_order); return 1; } @@ -515,7 +521,7 @@ int __init gart_iommu_hole_init(void) * overlap with the first kernel's memory. We can't access the * range through vmcore even though it should be part of the dump. */ - exclude_from_vmcore(aper_alloc, aper_order); + exclude_from_core(aper_alloc, aper_order); /* Fix up the north bridges */ for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) { diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 84132eddb5a858ff97b4c83a7ac6708eb49f4ab1..f26589b1d61d0ce92c4aa8bd1e44f9ebcaf0c870 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -165,7 +165,7 @@ static __init int setup_apicpmtimer(char *s) { apic_calibrate_pmtmr = 1; notsc_setup(NULL); - return 0; + return 1; } __setup("apicpmtimer", setup_apicpmtimer); #endif @@ -181,7 +181,7 @@ EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); /* * Debug level, exported for io_apic.c */ -unsigned int apic_verbosity; +int apic_verbosity; int pic_mode; @@ -193,7 +193,7 @@ static struct resource lapic_resource = { .flags = IORESOURCE_MEM | IORESOURCE_BUSY, }; -unsigned int lapic_timer_frequency = 0; +unsigned int lapic_timer_period = 0; static void apic_pm_activate(void); @@ -224,6 +224,11 @@ static int modern_apic(void) if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && boot_cpu_data.x86 >= 0xf) return 1; + + /* Hygon systems use modern APIC */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + return 1; + return lapic_get_version() >= 0x14; } @@ -483,7 +488,19 @@ static int lapic_timer_shutdown(struct clock_event_device *evt) v = apic_read(APIC_LVTT); v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR); apic_write(APIC_LVTT, v); - apic_write(APIC_TMICT, 0); + + /* + * Setting APIC_LVT_MASKED (above) should be enough to tell + * the hardware that this timer will never fire. But AMD + * erratum 411 and some Intel CPU behavior circa 2024 say + * otherwise. Time for belt and suspenders programming: mask + * the timer _and_ zero the counter registers: + */ + if (v & APIC_LVT_TIMER_TSCDEADLINE) + wrmsrl(MSR_IA32_TSC_DEADLINE, 0); + else + apic_write(APIC_TMICT, 0); + return 0; } @@ -494,7 +511,7 @@ lapic_timer_set_periodic_oneshot(struct clock_event_device *evt, bool oneshot) if (evt->features & CLOCK_EVT_FEAT_DUMMY) return 0; - __setup_APIC_LVTT(lapic_timer_frequency, oneshot, 1); + __setup_APIC_LVTT(lapic_timer_period, oneshot, 1); return 0; } @@ -715,7 +732,7 @@ static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2; static __initdata unsigned long lapic_cal_j1, lapic_cal_j2; /* - * Temporary interrupt handler. + * Temporary interrupt handler and polled calibration function. */ static void __init lapic_cal_handler(struct clock_event_device *dev) { @@ -796,33 +813,88 @@ calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc) return 0; } +static int __init lapic_init_clockevent(void) +{ + if (!lapic_timer_period) + return -1; + + /* Calculate the scaled math multiplication factor */ + lapic_clockevent.mult = div_sc(lapic_timer_period/APIC_DIVISOR, + TICK_NSEC, lapic_clockevent.shift); + lapic_clockevent.max_delta_ns = + clockevent_delta2ns(0x7FFFFFFF, &lapic_clockevent); + lapic_clockevent.max_delta_ticks = 0x7FFFFFFF; + lapic_clockevent.min_delta_ns = + clockevent_delta2ns(0xF, &lapic_clockevent); + lapic_clockevent.min_delta_ticks = 0xF; + + return 0; +} + +bool __init apic_needs_pit(void) +{ + /* + * If the frequencies are not known, PIT is required for both TSC + * and apic timer calibration. + */ + if (!tsc_khz || !cpu_khz) + return true; + + /* Is there an APIC at all or is it disabled? */ + if (!boot_cpu_has(X86_FEATURE_APIC) || disable_apic) + return true; + + /* + * If interrupt delivery mode is legacy PIC or virtual wire without + * configuration, the local APIC timer wont be set up. Make sure + * that the PIT is initialized. + */ + if (apic_intr_mode == APIC_PIC || + apic_intr_mode == APIC_VIRTUAL_WIRE_NO_CONFIG) + return true; + + /* Virt guests may lack ARAT, but still have DEADLINE */ + if (!boot_cpu_has(X86_FEATURE_ARAT)) + return true; + + /* Deadline timer is based on TSC so no further PIT action required */ + if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) + return false; + + /* APIC timer disabled? */ + if (disable_apic_timer) + return true; + /* + * The APIC timer frequency is known already, no PIT calibration + * required. If unknown, let the PIT be initialized. + */ + return lapic_timer_period == 0; +} + static int __init calibrate_APIC_clock(void) { struct clock_event_device *levt = this_cpu_ptr(&lapic_events); - void (*real_handler)(struct clock_event_device *dev); + u64 tsc_perj = 0, tsc_start = 0; + unsigned long jif_start; unsigned long deltaj; long delta, deltatsc; int pm_referenced = 0; - /** - * check if lapic timer has already been calibrated by platform - * specific routine, such as tsc calibration code. if so, we just fill + if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) + return 0; + + /* + * Check if lapic timer has already been calibrated by platform + * specific routine, such as tsc calibration code. If so just fill * in the clockevent structure and return. */ - - if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) { - return 0; - } else if (lapic_timer_frequency) { + if (!lapic_init_clockevent()) { apic_printk(APIC_VERBOSE, "lapic timer already calibrated %d\n", - lapic_timer_frequency); - lapic_clockevent.mult = div_sc(lapic_timer_frequency/APIC_DIVISOR, - TICK_NSEC, lapic_clockevent.shift); - lapic_clockevent.max_delta_ns = - clockevent_delta2ns(0x7FFFFF, &lapic_clockevent); - lapic_clockevent.max_delta_ticks = 0x7FFFFF; - lapic_clockevent.min_delta_ns = - clockevent_delta2ns(0xF, &lapic_clockevent); - lapic_clockevent.min_delta_ticks = 0xF; + lapic_timer_period); + /* + * Direct calibration methods must have an always running + * local APIC timer, no need for broadcast timer. + */ lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY; return 0; } @@ -830,28 +902,64 @@ static int __init calibrate_APIC_clock(void) apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n" "calibrating APIC timer ...\n"); + /* + * There are platforms w/o global clockevent devices. Instead of + * making the calibration conditional on that, use a polling based + * approach everywhere. + */ local_irq_disable(); - /* Replace the global interrupt handler */ - real_handler = global_clock_event->event_handler; - global_clock_event->event_handler = lapic_cal_handler; - /* * Setup the APIC counter to maximum. There is no way the lapic * can underflow in the 100ms detection time frame */ __setup_APIC_LVTT(0xffffffff, 0, 0); - /* Let the interrupts run */ + /* + * Methods to terminate the calibration loop: + * 1) Global clockevent if available (jiffies) + * 2) TSC if available and frequency is known + */ + jif_start = READ_ONCE(jiffies); + + if (tsc_khz) { + tsc_start = rdtsc(); + tsc_perj = div_u64((u64)tsc_khz * 1000, HZ); + } + + /* + * Enable interrupts so the tick can fire, if a global + * clockevent device is available + */ local_irq_enable(); - while (lapic_cal_loops <= LAPIC_CAL_LOOPS) - cpu_relax(); + while (lapic_cal_loops <= LAPIC_CAL_LOOPS) { + /* Wait for a tick to elapse */ + while (1) { + if (tsc_khz) { + u64 tsc_now = rdtsc(); + if ((tsc_now - tsc_start) >= tsc_perj) { + tsc_start += tsc_perj; + break; + } + } else { + unsigned long jif_now = READ_ONCE(jiffies); - local_irq_disable(); + if (time_after(jif_now, jif_start)) { + jif_start = jif_now; + break; + } + } + cpu_relax(); + } - /* Restore the real event handler */ - global_clock_event->event_handler = real_handler; + /* Invoke the calibration routine */ + local_irq_disable(); + lapic_cal_handler(NULL); + local_irq_enable(); + } + + local_irq_disable(); /* Build delta t1-t2 as apic timer counts down */ delta = lapic_cal_t1 - lapic_cal_t2; @@ -863,22 +971,13 @@ static int __init calibrate_APIC_clock(void) pm_referenced = !calibrate_by_pmtimer(lapic_cal_pm2 - lapic_cal_pm1, &delta, &deltatsc); - /* Calculate the scaled math multiplication factor */ - lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS, - lapic_clockevent.shift); - lapic_clockevent.max_delta_ns = - clockevent_delta2ns(0x7FFFFFFF, &lapic_clockevent); - lapic_clockevent.max_delta_ticks = 0x7FFFFFFF; - lapic_clockevent.min_delta_ns = - clockevent_delta2ns(0xF, &lapic_clockevent); - lapic_clockevent.min_delta_ticks = 0xF; - - lapic_timer_frequency = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS; + lapic_timer_period = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS; + lapic_init_clockevent(); apic_printk(APIC_VERBOSE, "..... delta %ld\n", delta); apic_printk(APIC_VERBOSE, "..... mult: %u\n", lapic_clockevent.mult); apic_printk(APIC_VERBOSE, "..... calibration result: %u\n", - lapic_timer_frequency); + lapic_timer_period); if (boot_cpu_has(X86_FEATURE_TSC)) { apic_printk(APIC_VERBOSE, "..... CPU clock speed is " @@ -889,13 +988,13 @@ static int __init calibrate_APIC_clock(void) apic_printk(APIC_VERBOSE, "..... host bus clock speed is " "%u.%04u MHz.\n", - lapic_timer_frequency / (1000000 / HZ), - lapic_timer_frequency % (1000000 / HZ)); + lapic_timer_period / (1000000 / HZ), + lapic_timer_period % (1000000 / HZ)); /* * Do a sanity check on the APIC calibration result */ - if (lapic_timer_frequency < (1000000 / HZ)) { + if (lapic_timer_period < (1000000 / HZ)) { local_irq_enable(); pr_warning("APIC frequency too slow, disabling apic timer\n"); return -1; @@ -904,10 +1003,11 @@ static int __init calibrate_APIC_clock(void) levt->features &= ~CLOCK_EVT_FEAT_DUMMY; /* - * PM timer calibration failed or not turned on - * so lets try APIC timer based calibration + * PM timer calibration failed or not turned on so lets try APIC + * timer based calibration, if a global clockevent device is + * available. */ - if (!pm_referenced) { + if (!pm_referenced && global_clock_event) { apic_printk(APIC_VERBOSE, "... verify APIC timer\n"); /* @@ -1228,7 +1328,7 @@ void __init sync_Arb_IDs(void) enum apic_intr_mode_id apic_intr_mode; -static int __init apic_intr_mode_select(void) +static int __init __apic_intr_mode_select(void) { /* Check kernel option */ if (disable_apic) { @@ -1290,6 +1390,12 @@ static int __init apic_intr_mode_select(void) return APIC_SYMMETRIC_IO; } +/* Select the interrupt delivery mode for the BSP */ +void __init apic_intr_mode_select(void) +{ + apic_intr_mode = __apic_intr_mode_select(); +} + /* * An initial setup of the virtual wire mode. */ @@ -1344,8 +1450,6 @@ void __init apic_intr_mode_init(void) { bool upmode = IS_ENABLED(CONFIG_UP_LATE_INIT); - apic_intr_mode = apic_intr_mode_select(); - switch (apic_intr_mode) { case APIC_PIC: pr_info("APIC: Keep in PIC mode(8259)\n"); @@ -1412,53 +1516,72 @@ static void lapic_setup_esr(void) oldvalue, value); } -static void apic_pending_intr_clear(void) +#define APIC_IR_REGS APIC_ISR_NR +#define APIC_IR_BITS (APIC_IR_REGS * 32) +#define APIC_IR_MAPSIZE (APIC_IR_BITS / BITS_PER_LONG) + +union apic_ir { + unsigned long map[APIC_IR_MAPSIZE]; + u32 regs[APIC_IR_REGS]; +}; + +static bool apic_check_and_ack(union apic_ir *irr, union apic_ir *isr) { - long long max_loops = cpu_khz ? cpu_khz : 1000000; - unsigned long long tsc = 0, ntsc; - unsigned int queued; - unsigned long value; - int i, j, acked = 0; + int i, bit; + + /* Read the IRRs */ + for (i = 0; i < APIC_IR_REGS; i++) + irr->regs[i] = apic_read(APIC_IRR + i * 0x10); + + /* Read the ISRs */ + for (i = 0; i < APIC_IR_REGS; i++) + isr->regs[i] = apic_read(APIC_ISR + i * 0x10); - if (boot_cpu_has(X86_FEATURE_TSC)) - tsc = rdtsc(); /* - * After a crash, we no longer service the interrupts and a pending - * interrupt from previous kernel might still have ISR bit set. - * - * Most probably by now CPU has serviced that pending interrupt and - * it might not have done the ack_APIC_irq() because it thought, - * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it - * does not clear the ISR bit and cpu thinks it has already serivced - * the interrupt. Hence a vector might get locked. It was noticed - * for timer irq (vector 0x31). Issue an extra EOI to clear ISR. + * If the ISR map is not empty. ACK the APIC and run another round + * to verify whether a pending IRR has been unblocked and turned + * into a ISR. */ - do { - queued = 0; - for (i = APIC_ISR_NR - 1; i >= 0; i--) - queued |= apic_read(APIC_IRR + i*0x10); - - for (i = APIC_ISR_NR - 1; i >= 0; i--) { - value = apic_read(APIC_ISR + i*0x10); - for_each_set_bit(j, &value, 32) { - ack_APIC_irq(); - acked++; - } - } - if (acked > 256) { - pr_err("LAPIC pending interrupts after %d EOI\n", acked); - break; - } - if (queued) { - if (boot_cpu_has(X86_FEATURE_TSC) && cpu_khz) { - ntsc = rdtsc(); - max_loops = (cpu_khz << 10) - (ntsc - tsc); - } else { - max_loops--; - } - } - } while (queued && max_loops > 0); - WARN_ON(max_loops <= 0); + if (!bitmap_empty(isr->map, APIC_IR_BITS)) { + /* + * There can be multiple ISR bits set when a high priority + * interrupt preempted a lower priority one. Issue an ACK + * per set bit. + */ + for_each_set_bit(bit, isr->map, APIC_IR_BITS) + ack_APIC_irq(); + return true; + } + + return !bitmap_empty(irr->map, APIC_IR_BITS); +} + +/* + * After a crash, we no longer service the interrupts and a pending + * interrupt from previous kernel might still have ISR bit set. + * + * Most probably by now the CPU has serviced that pending interrupt and it + * might not have done the ack_APIC_irq() because it thought, interrupt + * came from i8259 as ExtInt. LAPIC did not get EOI so it does not clear + * the ISR bit and cpu thinks it has already serivced the interrupt. Hence + * a vector might get locked. It was noticed for timer irq (vector + * 0x31). Issue an extra EOI to clear ISR. + * + * If there are pending IRR bits they turn into ISR bits after a higher + * priority ISR bit has been acked. + */ +static void apic_pending_intr_clear(void) +{ + union apic_ir irr, isr; + unsigned int i; + + /* 512 loops are way oversized and give the APIC a chance to obey. */ + for (i = 0; i < 512; i++) { + if (!apic_check_and_ack(&irr, &isr)) + return; + } + /* Dump the IRR/ISR content if that failed */ + pr_warn("APIC: Stale IRR: %256pb ISR: %256pb\n", irr.map, isr.map); } /** @@ -1471,9 +1594,6 @@ static void setup_local_APIC(void) { int cpu = smp_processor_id(); unsigned int value; -#ifdef CONFIG_X86_32 - int logical_apicid, ldr_apicid; -#endif if (disable_apic) { @@ -1481,6 +1601,14 @@ static void setup_local_APIC(void) return; } + /* + * If this comes from kexec/kcrash the APIC might be enabled in + * SPIV. Soft disable it before doing further initialization. + */ + value = apic_read(APIC_SPIV); + value &= ~APIC_SPIV_APIC_ENABLED; + apic_write(APIC_SPIV, value); + #ifdef CONFIG_X86_32 /* Pound the ESR really hard over the head with a big hammer - mbligh */ if (lapic_is_integrated() && apic->disable_esr) { @@ -1506,16 +1634,21 @@ static void setup_local_APIC(void) apic->init_apic_ldr(); #ifdef CONFIG_X86_32 - /* - * APIC LDR is initialized. If logical_apicid mapping was - * initialized during get_smp_config(), make sure it matches the - * actual value. - */ - logical_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu); - ldr_apicid = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR)); - WARN_ON(logical_apicid != BAD_APICID && logical_apicid != ldr_apicid); - /* always use the value from LDR */ - early_per_cpu(x86_cpu_to_logical_apicid, cpu) = ldr_apicid; + if (apic->dest_logical) { + int logical_apicid, ldr_apicid; + + /* + * APIC LDR is initialized. If logical_apicid mapping was + * initialized during get_smp_config(), make sure it matches + * the actual value. + */ + logical_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu); + ldr_apicid = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR)); + if (logical_apicid != BAD_APICID) + WARN_ON(logical_apicid != ldr_apicid); + /* Always use the value from LDR. */ + early_per_cpu(x86_cpu_to_logical_apicid, cpu) = ldr_apicid; + } #endif /* @@ -1526,6 +1659,7 @@ static void setup_local_APIC(void) value &= ~APIC_TPRI_MASK; apic_write(APIC_TASKPRI, value); + /* Clear eventually stale ISR/IRR bits */ apic_pending_intr_clear(); /* @@ -1912,6 +2046,8 @@ static int __init detect_init_APIC(void) (boot_cpu_data.x86 >= 15)) break; goto no_apic; + case X86_VENDOR_HYGON: + break; case X86_VENDOR_INTEL: if (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15 || (boot_cpu_data.x86 == 5 && boot_cpu_has(X86_FEATURE_APIC))) @@ -2026,21 +2162,32 @@ __visible void __irq_entry smp_spurious_interrupt(struct pt_regs *regs) entering_irq(); trace_spurious_apic_entry(vector); + inc_irq_stat(irq_spurious_count); + /* - * Check if this really is a spurious interrupt and ACK it - * if it is a vectored one. Just in case... - * Spurious interrupts should not be ACKed. + * If this is a spurious interrupt then do not acknowledge + */ + if (vector == SPURIOUS_APIC_VECTOR) { + /* See SDM vol 3 */ + pr_info("Spurious APIC interrupt (vector 0xFF) on CPU#%d, should never happen.\n", + smp_processor_id()); + goto out; + } + + /* + * If it is a vectored one, verify it's set in the ISR. If set, + * acknowledge it. */ v = apic_read(APIC_ISR + ((vector & ~0x1f) >> 1)); - if (v & (1 << (vector & 0x1f))) + if (v & (1 << (vector & 0x1f))) { + pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Acked\n", + vector, smp_processor_id()); ack_APIC_irq(); - - inc_irq_stat(irq_spurious_count); - - /* see sw-dev-man vol 3, chapter 7.4.13.5 */ - pr_info("spurious APIC interrupt through vector %02x on CPU#%d, " - "should never happen.\n", vector, smp_processor_id()); - + } else { + pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Not pending!\n", + vector, smp_processor_id()); + } +out: trace_spurious_apic_exit(vector); exiting_irq(); } @@ -2416,6 +2563,7 @@ void __init apic_bsp_setup(bool upmode) end_local_APIC_setup(); irq_remap_enable_fault_handling(); setup_IO_APIC(); + lapic_update_legacy_vectors(); } #ifdef CONFIG_UP_LATE_INIT @@ -2491,6 +2639,13 @@ static int lapic_suspend(void) #endif local_irq_save(flags); + + /* + * Mask IOAPIC before disabling the local APIC to prevent stale IRR + * entries on some implementations. + */ + mask_ioapic_entries(); + disable_local_APIC(); irq_remapping_disable(); diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c index afee386ff711e95132dd5c01e79317ac22bd9a56..caedd8d60d3610b8b9bc11f5669de16521b3ac2a 100644 --- a/arch/x86/kernel/apic/bigsmp_32.c +++ b/arch/x86/kernel/apic/bigsmp_32.c @@ -38,32 +38,12 @@ static int bigsmp_early_logical_apicid(int cpu) return early_per_cpu(x86_cpu_to_apicid, cpu); } -static inline unsigned long calculate_ldr(int cpu) -{ - unsigned long val, id; - - val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; - id = per_cpu(x86_bios_cpu_apicid, cpu); - val |= SET_APIC_LOGICAL_ID(id); - - return val; -} - /* - * Set up the logical destination ID. - * - * Intel recommends to set DFR, LDR and TPR before enabling - * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel - * document number 292116). So here it goes... + * bigsmp enables physical destination mode + * and doesn't use LDR and DFR */ static void bigsmp_init_apic_ldr(void) { - unsigned long val; - int cpu = smp_processor_id(); - - apic_write(APIC_DFR, APIC_DFR_FLAT); - val = calculate_ldr(cpu); - apic_write(APIC_LDR, val); } static void bigsmp_setup_apic_routing(void) diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index ff0d14cd9e827ef78b1ee4c8fdcebf08c56dfe71..a63366344fcfa7102fc375a01724e9281f30a361 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -58,6 +58,7 @@ #include #include #include +#include #include #include #include @@ -1724,9 +1725,10 @@ static bool io_apic_level_ack_pending(struct mp_chip_data *data) static inline bool ioapic_irqd_mask(struct irq_data *data) { - /* If we are moving the irq we need to mask it */ + /* If we are moving the IRQ we need to mask it */ if (unlikely(irqd_is_setaffinity_pending(data))) { - mask_ioapic_irq(data); + if (!irqd_irq_masked(data)) + mask_ioapic_irq(data); return true; } return false; @@ -1763,7 +1765,9 @@ static inline void ioapic_irqd_unmask(struct irq_data *data, bool masked) */ if (!io_apic_level_ack_pending(data->chip_data)) irq_move_masked_irq(data); - unmask_ioapic_irq(data); + /* If the IRQ is masked in the core, leave it: */ + if (!irqd_irq_masked(data)) + unmask_ioapic_irq(data); } } #else @@ -1891,6 +1895,50 @@ static int ioapic_set_affinity(struct irq_data *irq_data, return ret; } +/* + * Interrupt shutdown masks the ioapic pin, but the interrupt might already + * be in flight, but not yet serviced by the target CPU. That means + * __synchronize_hardirq() would return and claim that everything is calmed + * down. So free_irq() would proceed and deactivate the interrupt and free + * resources. + * + * Once the target CPU comes around to service it it will find a cleared + * vector and complain. While the spurious interrupt is harmless, the full + * release of resources might prevent the interrupt from being acknowledged + * which keeps the hardware in a weird state. + * + * Verify that the corresponding Remote-IRR bits are clear. + */ +static int ioapic_irq_get_chip_state(struct irq_data *irqd, + enum irqchip_irq_state which, + bool *state) +{ + struct mp_chip_data *mcd = irqd->chip_data; + struct IO_APIC_route_entry rentry; + struct irq_pin_list *p; + + if (which != IRQCHIP_STATE_ACTIVE) + return -EINVAL; + + *state = false; + raw_spin_lock(&ioapic_lock); + for_each_irq_pin(p, mcd->irq_2_pin) { + rentry = __ioapic_read_entry(p->apic, p->pin); + /* + * The remote IRR is only valid in level trigger mode. It's + * meaning is undefined for edge triggered interrupts and + * irrelevant because the IO-APIC treats them as fire and + * forget. + */ + if (rentry.irr && rentry.trigger) { + *state = true; + break; + } + } + raw_spin_unlock(&ioapic_lock); + return 0; +} + static struct irq_chip ioapic_chip __read_mostly = { .name = "IO-APIC", .irq_startup = startup_ioapic_irq, @@ -1900,7 +1948,9 @@ static struct irq_chip ioapic_chip __read_mostly = { .irq_eoi = ioapic_ack_level, .irq_set_affinity = ioapic_set_affinity, .irq_retrigger = irq_chip_retrigger_hierarchy, - .flags = IRQCHIP_SKIP_SET_WAKE, + .irq_get_irqchip_state = ioapic_irq_get_chip_state, + .flags = IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_AFFINITY_PRE_STARTUP, }; static struct irq_chip ioapic_ir_chip __read_mostly = { @@ -1912,7 +1962,9 @@ static struct irq_chip ioapic_ir_chip __read_mostly = { .irq_eoi = ioapic_ir_ack_level, .irq_set_affinity = ioapic_set_affinity, .irq_retrigger = irq_chip_retrigger_hierarchy, - .flags = IRQCHIP_SKIP_SET_WAKE, + .irq_get_irqchip_state = ioapic_irq_get_chip_state, + .flags = IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_AFFINITY_PRE_STARTUP, }; static inline void init_IO_APIC_traps(void) @@ -2081,6 +2133,9 @@ static inline void __init check_timer(void) unsigned long flags; int no_pin1 = 0; + if (!global_clock_event) + return; + local_irq_save(flags); /* @@ -2201,6 +2256,7 @@ static inline void __init check_timer(void) legacy_pic->init(0); legacy_pic->make_irq(0); apic_write(APIC_LVT0, APIC_DM_EXTINT); + legacy_pic->unmask(0); unlock_ExtINT_logic(); @@ -2386,7 +2442,13 @@ unsigned int arch_dynirq_lower_bound(unsigned int from) * dmar_alloc_hwirq() may be called before setup_IO_APIC(), so use * gsi_top if ioapic_dynirq_base hasn't been initialized yet. */ - return ioapic_initialized ? ioapic_dynirq_base : gsi_top; + if (!ioapic_initialized) + return gsi_top; + /* + * For DT enabled machines ioapic_dynirq_base is irrelevant and not + * updated. So simply return @from if ioapic_dynirq_base == 0. + */ + return ioapic_dynirq_base ? : from; } #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c index 72a94401f9e03487f37dea32f762afe524db0578..d72a03dba1aec8a474f80194a2f3cea941bd0478 100644 --- a/arch/x86/kernel/apic/msi.c +++ b/arch/x86/kernel/apic/msi.c @@ -26,10 +26,8 @@ static struct irq_domain *msi_default_domain; -static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) +static void __irq_msi_compose_msg(struct irq_cfg *cfg, struct msi_msg *msg) { - struct irq_cfg *cfg = irqd_cfg(data); - msg->address_hi = MSI_ADDR_BASE_HI; if (x2apic_enabled()) @@ -50,6 +48,129 @@ static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) MSI_DATA_VECTOR(cfg->vector); } +static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) +{ + __irq_msi_compose_msg(irqd_cfg(data), msg); +} + +static void irq_msi_update_msg(struct irq_data *irqd, struct irq_cfg *cfg) +{ + struct msi_msg msg[2] = { [1] = { }, }; + + __irq_msi_compose_msg(cfg, msg); + irq_data_get_irq_chip(irqd)->irq_write_msi_msg(irqd, msg); +} + +static int +msi_set_affinity(struct irq_data *irqd, const struct cpumask *mask, bool force) +{ + struct irq_cfg old_cfg, *cfg = irqd_cfg(irqd); + struct irq_data *parent = irqd->parent_data; + unsigned int cpu; + int ret; + + /* Save the current configuration */ + cpu = cpumask_first(irq_data_get_effective_affinity_mask(irqd)); + old_cfg = *cfg; + + /* Allocate a new target vector */ + ret = parent->chip->irq_set_affinity(parent, mask, force); + if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE) + return ret; + + /* + * For non-maskable and non-remapped MSI interrupts the migration + * to a different destination CPU and a different vector has to be + * done careful to handle the possible stray interrupt which can be + * caused by the non-atomic update of the address/data pair. + * + * Direct update is possible when: + * - The MSI is maskable (remapped MSI does not use this code path)). + * The quirk bit is not set in this case. + * - The new vector is the same as the old vector + * - The old vector is MANAGED_IRQ_SHUTDOWN_VECTOR (interrupt starts up) + * - The interrupt is not yet started up + * - The new destination CPU is the same as the old destination CPU + */ + if (!irqd_msi_nomask_quirk(irqd) || + cfg->vector == old_cfg.vector || + old_cfg.vector == MANAGED_IRQ_SHUTDOWN_VECTOR || + !irqd_is_started(irqd) || + cfg->dest_apicid == old_cfg.dest_apicid) { + irq_msi_update_msg(irqd, cfg); + return ret; + } + + /* + * Paranoia: Validate that the interrupt target is the local + * CPU. + */ + if (WARN_ON_ONCE(cpu != smp_processor_id())) { + irq_msi_update_msg(irqd, cfg); + return ret; + } + + /* + * Redirect the interrupt to the new vector on the current CPU + * first. This might cause a spurious interrupt on this vector if + * the device raises an interrupt right between this update and the + * update to the final destination CPU. + * + * If the vector is in use then the installed device handler will + * denote it as spurious which is no harm as this is a rare event + * and interrupt handlers have to cope with spurious interrupts + * anyway. If the vector is unused, then it is marked so it won't + * trigger the 'No irq handler for vector' warning in do_IRQ(). + * + * This requires to hold vector lock to prevent concurrent updates to + * the affected vector. + */ + lock_vector_lock(); + + /* + * Mark the new target vector on the local CPU if it is currently + * unused. Reuse the VECTOR_RETRIGGERED state which is also used in + * the CPU hotplug path for a similar purpose. This cannot be + * undone here as the current CPU has interrupts disabled and + * cannot handle the interrupt before the whole set_affinity() + * section is done. In the CPU unplug case, the current CPU is + * about to vanish and will not handle any interrupts anymore. The + * vector is cleaned up when the CPU comes online again. + */ + if (IS_ERR_OR_NULL(this_cpu_read(vector_irq[cfg->vector]))) + this_cpu_write(vector_irq[cfg->vector], VECTOR_RETRIGGERED); + + /* Redirect it to the new vector on the local CPU temporarily */ + old_cfg.vector = cfg->vector; + irq_msi_update_msg(irqd, &old_cfg); + + /* Now transition it to the target CPU */ + irq_msi_update_msg(irqd, cfg); + + /* + * All interrupts after this point are now targeted at the new + * vector/CPU. + * + * Drop vector lock before testing whether the temporary assignment + * to the local CPU was hit by an interrupt raised in the device, + * because the retrigger function acquires vector lock again. + */ + unlock_vector_lock(); + + /* + * Check whether the transition raced with a device interrupt and + * is pending in the local APICs IRR. It is safe to do this outside + * of vector lock as the irq_desc::lock of this interrupt is still + * held and interrupts are disabled: The check is not accessing the + * underlying vector store. It's just checking the local APIC's + * IRR. + */ + if (lapic_vector_set_in_irr(cfg->vector)) + irq_data_get_irq_chip(irqd)->irq_retrigger(irqd); + + return ret; +} + /* * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices, * which implement the MSI or MSI-X Capability Structure. @@ -61,7 +182,9 @@ static struct irq_chip pci_msi_controller = { .irq_ack = irq_chip_ack_parent, .irq_retrigger = irq_chip_retrigger_hierarchy, .irq_compose_msi_msg = irq_msi_compose_msg, - .flags = IRQCHIP_SKIP_SET_WAKE, + .irq_set_affinity = msi_set_affinity, + .flags = IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_AFFINITY_PRE_STARTUP, }; int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) @@ -149,6 +272,8 @@ void __init arch_init_msi_domain(struct irq_domain *parent) } if (!msi_default_domain) pr_warn("failed to initialize irqdomain for MSI/MSI-x.\n"); + else + msi_default_domain->flags |= IRQ_DOMAIN_MSI_NOMASK_QUIRK; } #ifdef CONFIG_IRQ_REMAP @@ -159,7 +284,8 @@ static struct irq_chip pci_msi_ir_controller = { .irq_ack = irq_chip_ack_parent, .irq_retrigger = irq_chip_retrigger_hierarchy, .irq_set_vcpu_affinity = irq_chip_set_vcpu_affinity_parent, - .flags = IRQCHIP_SKIP_SET_WAKE, + .flags = IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_AFFINITY_PRE_STARTUP, }; static struct msi_domain_info pci_msi_ir_domain_info = { @@ -201,7 +327,8 @@ static struct irq_chip dmar_msi_controller = { .irq_retrigger = irq_chip_retrigger_hierarchy, .irq_compose_msi_msg = irq_msi_compose_msg, .irq_write_msi_msg = dmar_msi_write_msg, - .flags = IRQCHIP_SKIP_SET_WAKE, + .flags = IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_AFFINITY_PRE_STARTUP, }; static irq_hw_number_t dmar_msi_get_hwirq(struct msi_domain_info *info, @@ -298,7 +425,7 @@ static struct irq_chip hpet_msi_controller __ro_after_init = { .irq_retrigger = irq_chip_retrigger_hierarchy, .irq_compose_msi_msg = irq_msi_compose_msg, .irq_write_msi_msg = hpet_msi_write_msg, - .flags = IRQCHIP_SKIP_SET_WAKE, + .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_AFFINITY_PRE_STARTUP, }; static irq_hw_number_t hpet_msi_get_hwirq(struct msi_domain_info *info, diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c index 02e8acb134f856faa0030bf1c007dc3ac1bd9ca3..47ff2976c2927432047abbbe6e055975f6cc0ad7 100644 --- a/arch/x86/kernel/apic/probe_32.c +++ b/arch/x86/kernel/apic/probe_32.c @@ -185,6 +185,7 @@ void __init default_setup_apic_routing(void) break; } /* If P4 and above fall through */ + case X86_VENDOR_HYGON: case X86_VENDOR_AMD: def_to_bigsmp = 1; } diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index 7654febd510277bedf28f8cef6a5d599bcfe1f5c..92a4354d6b15ca6f6389648d41f8ffacfe6668e9 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -274,20 +274,24 @@ static int assign_irq_vector_any_locked(struct irq_data *irqd) const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd); int node = irq_data_get_node(irqd); - if (node == NUMA_NO_NODE) - goto all; - /* Try the intersection of @affmsk and node mask */ - cpumask_and(vector_searchmask, cpumask_of_node(node), affmsk); - if (!assign_vector_locked(irqd, vector_searchmask)) - return 0; - /* Try the node mask */ - if (!assign_vector_locked(irqd, cpumask_of_node(node))) - return 0; -all: + if (node != NUMA_NO_NODE) { + /* Try the intersection of @affmsk and node mask */ + cpumask_and(vector_searchmask, cpumask_of_node(node), affmsk); + if (!assign_vector_locked(irqd, vector_searchmask)) + return 0; + } + /* Try the full affinity mask */ cpumask_and(vector_searchmask, affmsk, cpu_online_mask); if (!assign_vector_locked(irqd, vector_searchmask)) return 0; + + if (node != NUMA_NO_NODE) { + /* Try the node mask */ + if (!assign_vector_locked(irqd, cpumask_of_node(node))) + return 0; + } + /* Try the full online mask */ return assign_vector_locked(irqd, cpu_online_mask); } @@ -313,14 +317,13 @@ assign_managed_vector(struct irq_data *irqd, const struct cpumask *dest) struct apic_chip_data *apicd = apic_chip_data(irqd); int vector, cpu; - cpumask_and(vector_searchmask, vector_searchmask, affmsk); - cpu = cpumask_first(vector_searchmask); - if (cpu >= nr_cpu_ids) - return -EINVAL; + cpumask_and(vector_searchmask, dest, affmsk); + /* set_affinity might call here for nothing */ if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask)) return 0; - vector = irq_matrix_alloc_managed(vector_matrix, cpu); + vector = irq_matrix_alloc_managed(vector_matrix, vector_searchmask, + &cpu); trace_vector_alloc_managed(irqd->irq, vector, vector); if (vector < 0) return vector; @@ -343,7 +346,7 @@ static void clear_irq_vector(struct irq_data *irqd) trace_vector_clear(irqd->irq, vector, apicd->cpu, apicd->prev_vector, apicd->prev_cpu); - per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_UNUSED; + per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_SHUTDOWN; irq_matrix_free(vector_matrix, apicd->cpu, vector, managed); apicd->vector = 0; @@ -352,7 +355,7 @@ static void clear_irq_vector(struct irq_data *irqd) if (!vector) return; - per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_UNUSED; + per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_SHUTDOWN; irq_matrix_free(vector_matrix, apicd->prev_cpu, vector, managed); apicd->prev_vector = 0; apicd->move_in_progress = 0; @@ -401,6 +404,17 @@ static int activate_reserved(struct irq_data *irqd) if (!irqd_can_reserve(irqd)) apicd->can_reserve = false; } + + /* + * Check to ensure that the effective affinity mask is a subset + * the user supplied affinity mask, and warn the user if it is not + */ + if (!cpumask_subset(irq_data_get_effective_affinity_mask(irqd), + irq_data_get_affinity_mask(irqd))) { + pr_warn("irq %u: Affinity broken due to vector space exhaustion.\n", + irqd->irq); + } + return ret; } @@ -438,12 +452,10 @@ static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd, trace_vector_activate(irqd->irq, apicd->is_managed, apicd->can_reserve, reserve); - /* Nothing to do for fixed assigned vectors */ - if (!apicd->can_reserve && !apicd->is_managed) - return 0; - raw_spin_lock_irqsave(&vector_lock, flags); - if (reserve || irqd_is_managed_and_shutdown(irqd)) + if (!apicd->can_reserve && !apicd->is_managed) + assign_irq_vector_any_locked(irqd); + else if (reserve || irqd_is_managed_and_shutdown(irqd)) vector_assign_managed_shutdown(irqd); else if (apicd->is_managed) ret = activate_managed(irqd); @@ -548,6 +560,16 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, irqd->chip_data = apicd; irqd->hwirq = virq + i; irqd_set_single_target(irqd); + /* + * Prevent that any of these interrupts is invoked in + * non interrupt context via e.g. generic_handle_irq() + * as that can corrupt the affinity move state. + */ + irqd_set_handle_enforce_irqctx(irqd); + + /* Don't invoke affinity setter on deactivated interrupts */ + irqd_set_affinity_on_activate(irqd); + /* * Legacy vectors are already assigned when the IOAPIC * takes them over. They stay on the same vector. This is @@ -666,6 +688,26 @@ void lapic_assign_legacy_vector(unsigned int irq, bool replace) irq_matrix_assign_system(vector_matrix, ISA_IRQ_VECTOR(irq), replace); } +void __init lapic_update_legacy_vectors(void) +{ + unsigned int i; + + if (IS_ENABLED(CONFIG_X86_IO_APIC) && nr_ioapics > 0) + return; + + /* + * If the IO/APIC is disabled via config, kernel command line or + * lack of enumeration then all legacy interrupts are routed + * through the PIC. Make sure that they are marked as legacy + * vectors. PIC_CASCADE_IRQ has already been marked in + * lapic_assign_system_vectors(). + */ + for (i = 0; i < nr_legacy_irqs(); i++) { + if (i != PIC_CASCADE_IR) + lapic_assign_legacy_vector(i, true); + } +} + void __init lapic_assign_system_vectors(void) { unsigned int i, vector = 0; @@ -761,20 +803,10 @@ void lapic_offline(void) static int apic_set_affinity(struct irq_data *irqd, const struct cpumask *dest, bool force) { - struct apic_chip_data *apicd = apic_chip_data(irqd); int err; - /* - * Core code can call here for inactive interrupts. For inactive - * interrupts which use managed or reservation mode there is no - * point in going through the vector assignment right now as the - * activation will assign a vector which fits the destination - * cpumask. Let the core code store the destination mask and be - * done with it. - */ - if (!irqd_is_activated(irqd) && - (apicd->is_managed || apicd->can_reserve)) - return IRQ_SET_MASK_OK; + if (WARN_ON_ONCE(!irqd_is_activated(irqd))) + return -EIO; raw_spin_lock(&vector_lock); cpumask_and(vector_searchmask, dest, cpu_online_mask); @@ -890,7 +922,8 @@ static void __send_cleanup_vector(struct apic_chip_data *apicd) hlist_add_head(&apicd->clist, per_cpu_ptr(&cleanup_list, cpu)); apic->send_IPI(cpu, IRQ_MOVE_CLEANUP_VECTOR); } else { - apicd->prev_vector = 0; + pr_warn("IRQ %u schedule cleanup for offline CPU %u\n", apicd->irq, cpu); + free_moved_vector(apicd); } raw_spin_unlock(&vector_lock); } @@ -926,6 +959,7 @@ void irq_complete_move(struct irq_cfg *cfg) */ void irq_force_complete_move(struct irq_desc *desc) { + unsigned int cpu = smp_processor_id(); struct apic_chip_data *apicd; struct irq_data *irqd; unsigned int vector; @@ -950,10 +984,11 @@ void irq_force_complete_move(struct irq_desc *desc) goto unlock; /* - * If prev_vector is empty, no action required. + * If prev_vector is empty or the descriptor is neither currently + * nor previously on the outgoing CPU no action required. */ vector = apicd->prev_vector; - if (!vector) + if (!vector || (apicd->cpu != cpu && apicd->prev_cpu != cpu)) goto unlock; /* diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index 7685444a106bb29a3994a5d85066e60b2b4c0d09..145517934171e17b9e6295850361859577a043bd 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c @@ -158,7 +158,8 @@ static int x2apic_dead_cpu(unsigned int dead_cpu) { struct cluster_mask *cmsk = per_cpu(cluster_masks, dead_cpu); - cpumask_clear_cpu(dead_cpu, &cmsk->mask); + if (cmsk) + cpumask_clear_cpu(dead_cpu, &cmsk->mask); free_cpumask_var(per_cpu(ipi_mask, dead_cpu)); return 0; } diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index b5cf9e7b3830c67128ae710911dd3ac6158f149f..78790c4bfdeb53ded8146fe876556327c2c3c32b 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c @@ -94,7 +94,10 @@ static void init_x2apic_ldr(void) static int x2apic_phys_probe(void) { - if (x2apic_mode && (x2apic_phys || x2apic_fadt_phys())) + if (!x2apic_mode) + return 0; + + if (x2apic_phys || x2apic_fadt_phys()) return 1; return apic == &apic_x2apic_phys; diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c index 33399426793e0c3cf6abce2a62c9bd98f15f72dc..cc8258a5378b07a21fa83b7b87167b49bd52f4fa 100644 --- a/arch/x86/kernel/check.c +++ b/arch/x86/kernel/check.c @@ -31,6 +31,11 @@ static __init int set_corruption_check(char *arg) ssize_t ret; unsigned long val; + if (!arg) { + pr_err("memory_corruption_check config string not provided\n"); + return -EINVAL; + } + ret = kstrtoul(arg, 10, &val); if (ret) return ret; @@ -45,6 +50,11 @@ static __init int set_corruption_check_period(char *arg) ssize_t ret; unsigned long val; + if (!arg) { + pr_err("memory_corruption_check_period config string not provided\n"); + return -EINVAL; + } + ret = kstrtoul(arg, 10, &val); if (ret) return ret; @@ -59,6 +69,11 @@ static __init int set_corruption_check_size(char *arg) char *end; unsigned size; + if (!arg) { + pr_err("memory_corruption_check_size config string not provided\n"); + return -EINVAL; + } + size = memparse(arg, &end); if (*end == '\0') diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 347137e80bf5ace65a2688a78897f6f704e96800..69bba2b1ef08f38221033cd7e695538c9bbac88e 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -28,18 +28,20 @@ obj-y += cpuid-deps.o obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o -obj-$(CONFIG_CPU_SUP_INTEL) += intel.o intel_pconfig.o +obj-$(CONFIG_CPU_SUP_INTEL) += intel.o intel_pconfig.o tsx.o obj-$(CONFIG_CPU_SUP_AMD) += amd.o +obj-$(CONFIG_CPU_SUP_HYGON) += hygon.o obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o +obj-$(CONFIG_CPU_SUP_ZHAOXIN) += zhaoxin.o obj-$(CONFIG_INTEL_RDT) += intel_rdt.o intel_rdt_rdtgroup.o intel_rdt_monitor.o obj-$(CONFIG_INTEL_RDT) += intel_rdt_ctrlmondata.o intel_rdt_pseudo_lock.o CFLAGS_intel_rdt_pseudo_lock.o = -I$(src) -obj-$(CONFIG_X86_MCE) += mcheck/ +obj-$(CONFIG_X86_MCE) += mce/ obj-$(CONFIG_MTRR) += mtrr/ obj-$(CONFIG_MICROCODE) += microcode/ diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index eeea634bee0a73291a6f879706ef2e280f8e0d4f..0b389735cb71ddfc49c74698c69a97aca36a8b93 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -25,6 +25,9 @@ static const int amd_erratum_383[]; static const int amd_erratum_400[]; +static const int amd_zenbleed[]; +static const int amd_div0[]; +static const int amd_erratum_1485[]; static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); /* @@ -328,7 +331,6 @@ static void amd_get_topology_early(struct cpuinfo_x86 *c) */ static void amd_get_topology(struct cpuinfo_x86 *c) { - u8 node_id; int cpu = smp_processor_id(); /* get information required for multi-node processors */ @@ -338,7 +340,7 @@ static void amd_get_topology(struct cpuinfo_x86 *c) cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); - node_id = ecx & 0xff; + c->cpu_die_id = ecx & 0xff; if (c->x86 == 0x15) c->cu_id = ebx & 0xff; @@ -358,15 +360,15 @@ static void amd_get_topology(struct cpuinfo_x86 *c) if (!err) c->x86_coreid_bits = get_count_order(c->x86_max_cores); - cacheinfo_amd_init_llc_id(c, cpu, node_id); + cacheinfo_amd_init_llc_id(c, cpu); } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { u64 value; rdmsrl(MSR_FAM10H_NODE_ID, value); - node_id = value & 7; + c->cpu_die_id = value & 7; - per_cpu(cpu_llc_id, cpu) = node_id; + per_cpu(cpu_llc_id, cpu) = c->cpu_die_id; } else return; @@ -391,7 +393,7 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c) /* Convert the initial APIC ID into the socket ID */ c->phys_proc_id = c->initial_apicid >> bits; /* use socket ID also for last level cache */ - per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; + per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id; } u16 amd_get_nb_id(int cpu) @@ -539,12 +541,12 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) u32 ecx; ecx = cpuid_ecx(0x8000001e); - nodes_per_socket = ((ecx >> 8) & 7) + 1; + __max_die_per_package = nodes_per_socket = ((ecx >> 8) & 7) + 1; } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) { u64 value; rdmsrl(MSR_FAM10H_NODE_ID, value); - nodes_per_socket = ((value >> 3) & 7) + 1; + __max_die_per_package = nodes_per_socket = ((value >> 3) & 7) + 1; } if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) && @@ -799,6 +801,64 @@ static void init_amd_ln(struct cpuinfo_x86 *c) msr_set_bit(MSR_AMD64_DE_CFG, 31); } +static bool rdrand_force; + +static int __init rdrand_cmdline(char *str) +{ + if (!str) + return -EINVAL; + + if (!strcmp(str, "force")) + rdrand_force = true; + else + return -EINVAL; + + return 0; +} +early_param("rdrand", rdrand_cmdline); + +static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c) +{ + /* + * Saving of the MSR used to hide the RDRAND support during + * suspend/resume is done by arch/x86/power/cpu.c, which is + * dependent on CONFIG_PM_SLEEP. + */ + if (!IS_ENABLED(CONFIG_PM_SLEEP)) + return; + + /* + * The nordrand option can clear X86_FEATURE_RDRAND, so check for + * RDRAND support using the CPUID function directly. + */ + if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force) + return; + + msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62); + + /* + * Verify that the CPUID change has occurred in case the kernel is + * running virtualized and the hypervisor doesn't support the MSR. + */ + if (cpuid_ecx(1) & BIT(30)) { + pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n"); + return; + } + + clear_cpu_cap(c, X86_FEATURE_RDRAND); + pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n"); +} + +static void init_amd_jg(struct cpuinfo_x86 *c) +{ + /* + * Some BIOS implementations do not restore proper RDRAND support + * across suspend and resume. Check on whether to hide the RDRAND + * instruction support via CPUID. + */ + clear_rdrand_cpuid_bit(c); +} + static void init_amd_bd(struct cpuinfo_x86 *c) { u64 value; @@ -813,17 +873,75 @@ static void init_amd_bd(struct cpuinfo_x86 *c) wrmsrl_safe(MSR_F15H_IC_CFG, value); } } + + /* + * Some BIOS implementations do not restore proper RDRAND support + * across suspend and resume. Check on whether to hide the RDRAND + * instruction support via CPUID. + */ + clear_rdrand_cpuid_bit(c); } static void init_amd_zn(struct cpuinfo_x86 *c) { set_cpu_cap(c, X86_FEATURE_ZEN); - /* - * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects - * all up to and including B1. - */ - if (c->x86_model <= 1 && c->x86_stepping <= 1) - set_cpu_cap(c, X86_FEATURE_CPB); + + /* Fix up CPUID bits, but only if not virtualised. */ + if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) { + + /* Erratum 1076: CPB feature bit not being set in CPUID. */ + if (!cpu_has(c, X86_FEATURE_CPB)) + set_cpu_cap(c, X86_FEATURE_CPB); + + /* + * Zen3 (Fam19 model < 0x10) parts are not susceptible to + * Branch Type Confusion, but predate the allocation of the + * BTC_NO bit. + */ + if (c->x86 == 0x19 && !cpu_has(c, X86_FEATURE_BTC_NO)) + set_cpu_cap(c, X86_FEATURE_BTC_NO); + } +} + +static bool cpu_has_zenbleed_microcode(void) +{ + u32 good_rev = 0; + + switch (boot_cpu_data.x86_model) { + case 0x30 ... 0x3f: good_rev = 0x0830107b; break; + case 0x60 ... 0x67: good_rev = 0x0860010c; break; + case 0x68 ... 0x6f: good_rev = 0x08608107; break; + case 0x70 ... 0x7f: good_rev = 0x08701033; break; + case 0xa0 ... 0xaf: good_rev = 0x08a00009; break; + + default: + return false; + break; + } + + if (boot_cpu_data.microcode < good_rev) + return false; + + return true; +} + +static void zenbleed_check(struct cpuinfo_x86 *c) +{ + if (!cpu_has_amd_erratum(c, amd_zenbleed)) + return; + + if (cpu_has(c, X86_FEATURE_HYPERVISOR)) + return; + + if (!cpu_has(c, X86_FEATURE_AVX)) + return; + + if (!cpu_has_zenbleed_microcode()) { + pr_notice_once("Zenbleed: please update your microcode for the most optimal fix\n"); + msr_set_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT); + } else { + msr_clear_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT); + } } static void init_amd(struct cpuinfo_x86 *c) @@ -854,7 +972,9 @@ static void init_amd(struct cpuinfo_x86 *c) case 0x10: init_amd_gh(c); break; case 0x12: init_amd_ln(c); break; case 0x15: init_amd_bd(c); break; - case 0x17: init_amd_zn(c); break; + case 0x16: init_amd_jg(c); break; + case 0x17: /* fallthrough */; + case 0x19: init_amd_zn(c); break; } /* @@ -916,6 +1036,19 @@ static void init_amd(struct cpuinfo_x86 *c) /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */ if (!cpu_has(c, X86_FEATURE_XENPV)) set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); + + check_null_seg_clears_base(c); + + zenbleed_check(c); + + if (cpu_has_amd_erratum(c, amd_div0)) { + pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n"); + setup_force_cpu_bug(X86_BUG_DIV0); + } + + if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && + cpu_has_amd_erratum(c, amd_erratum_1485)) + msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT); } #ifdef CONFIG_X86_32 @@ -1043,6 +1176,19 @@ static const int amd_erratum_400[] = static const int amd_erratum_383[] = AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); +static const int amd_zenbleed[] = + AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x30, 0x0, 0x4f, 0xf), + AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf), + AMD_MODEL_RANGE(0x17, 0x90, 0x0, 0x91, 0xf), + AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf)); + +static const int amd_erratum_1485[] = + AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x19, 0x10, 0x0, 0x1f, 0xf), + AMD_MODEL_RANGE(0x19, 0x60, 0x0, 0xaf, 0xf)); + +static const int amd_div0[] = + AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x00, 0x0, 0x2f, 0xf), + AMD_MODEL_RANGE(0x17, 0x50, 0x0, 0x5f, 0xf)); static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) { @@ -1093,3 +1239,29 @@ void set_dr_addr_mask(unsigned long mask, int dr) break; } } + +static void zenbleed_check_cpu(void *unused) +{ + struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); + + zenbleed_check(c); +} + +void amd_check_microcode(void) +{ + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) + return; + + on_each_cpu(zenbleed_check_cpu, NULL, 1); +} + +/* + * Issue a DIV 0/1 insn to clear any division data from previous DIV + * operations. + */ +void amd_clear_divider(void) +{ + asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0) + :: "a" (0), "d" (0), "r" (1)); +} +EXPORT_SYMBOL_GPL(amd_clear_divider); diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 40bdaea97fe7cac25f2b04bc239fdc520d68f09e..5cf0f6c5b5deb5abfcc4a3b9e0ea4d90d6d47769 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -30,23 +31,58 @@ #include #include #include +#include +#include "cpu.h" + +static void __init spectre_v1_select_mitigation(void); static void __init spectre_v2_select_mitigation(void); +static void __init retbleed_select_mitigation(void); +static void __init spectre_v2_user_select_mitigation(void); static void __init ssb_select_mitigation(void); static void __init l1tf_select_mitigation(void); - -/* - * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any - * writes to SPEC_CTRL contain whatever reserved bits have been set. - */ -u64 __ro_after_init x86_spec_ctrl_base; +static void __init mds_select_mitigation(void); +static void __init md_clear_update_mitigation(void); +static void __init md_clear_select_mitigation(void); +static void __init taa_select_mitigation(void); +static void __init mmio_select_mitigation(void); +static void __init srbds_select_mitigation(void); +static void __init gds_select_mitigation(void); + +/* The base value of the SPEC_CTRL MSR without task-specific bits set */ +u64 x86_spec_ctrl_base; EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); +/* The current value of the SPEC_CTRL MSR with task-specific bits set */ +DEFINE_PER_CPU(u64, x86_spec_ctrl_current); +EXPORT_SYMBOL_GPL(x86_spec_ctrl_current); + +static DEFINE_MUTEX(spec_ctrl_mutex); + /* - * The vendor and possibly platform specific bits which can be modified in - * x86_spec_ctrl_base. + * Keep track of the SPEC_CTRL MSR value for the current task, which may differ + * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update(). */ -static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS; +void write_spec_ctrl_current(u64 val, bool force) +{ + if (this_cpu_read(x86_spec_ctrl_current) == val) + return; + + this_cpu_write(x86_spec_ctrl_current, val); + + /* + * When KERNEL_IBRS this MSR is written on return-to-user, unless + * forced the update can be delayed until that time. + */ + if (force || !cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS)) + wrmsrl(MSR_IA32_SPEC_CTRL, val); +} + +u64 spec_ctrl_current(void) +{ + return this_cpu_read(x86_spec_ctrl_current); +} +EXPORT_SYMBOL_GPL(spec_ctrl_current); /* * AMD specific MSR info for Speculative Store Bypass control. @@ -55,6 +91,24 @@ static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS; u64 __ro_after_init x86_amd_ls_cfg_base; u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask; +/* Control conditional STIBP in switch_to() */ +DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp); +/* Control conditional IBPB in switch_mm() */ +DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); +/* Control unconditional IBPB in switch_mm() */ +DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb); + +/* Control MDS CPU buffer clear before returning to user space */ +DEFINE_STATIC_KEY_FALSE(mds_user_clear); +EXPORT_SYMBOL_GPL(mds_user_clear); +/* Control MDS CPU buffer clear before idling (halt, mwait) */ +DEFINE_STATIC_KEY_FALSE(mds_idle_clear); +EXPORT_SYMBOL_GPL(mds_idle_clear); + +/* Controls CPU Fill buffer clear before KVM guest MMIO accesses */ +DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear); +EXPORT_SYMBOL_GPL(mmio_stale_data_clear); + void __init check_bugs(void) { identify_boot_cpu(); @@ -63,7 +117,7 @@ void __init check_bugs(void) * identify_boot_cpu() initialized SMT support information, let the * core code know. */ - cpu_smt_check_topology_early(); + cpu_smt_check_topology(); if (!IS_ENABLED(CONFIG_SMP)) { pr_info("CPU: "); @@ -75,23 +129,39 @@ void __init check_bugs(void) * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD * init code as it is not enumerated and depends on the family. */ - if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) + if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) { rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); - /* Allow STIBP in MSR_SPEC_CTRL if supported */ - if (boot_cpu_has(X86_FEATURE_STIBP)) - x86_spec_ctrl_mask |= SPEC_CTRL_STIBP; + /* + * Previously running kernel (kexec), may have some controls + * turned ON. Clear them and let the mitigations setup below + * rediscover them based on configuration. + */ + x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK; + } - /* Select the proper spectre mitigation before patching alternatives */ + /* Select the proper CPU mitigations before patching alternatives: */ + spectre_v1_select_mitigation(); spectre_v2_select_mitigation(); - /* - * Select proper mitigation for any exposure to the Speculative Store - * Bypass vulnerability. + * retbleed_select_mitigation() relies on the state set by + * spectre_v2_select_mitigation(); specifically it wants to know about + * spectre_v2=ibrs. */ + retbleed_select_mitigation(); + /* + * spectre_v2_user_select_mitigation() relies on the state set by + * retbleed_select_mitigation(); specifically the STIBP selection is + * forced for UNRET. + */ + spectre_v2_user_select_mitigation(); ssb_select_mitigation(); - l1tf_select_mitigation(); + md_clear_select_mitigation(); + srbds_select_mitigation(); + gds_select_mitigation(); + + arch_smt_update(); #ifdef CONFIG_X86_32 /* @@ -125,52 +195,17 @@ void __init check_bugs(void) #endif } -/* The kernel command line selection */ -enum spectre_v2_mitigation_cmd { - SPECTRE_V2_CMD_NONE, - SPECTRE_V2_CMD_AUTO, - SPECTRE_V2_CMD_FORCE, - SPECTRE_V2_CMD_RETPOLINE, - SPECTRE_V2_CMD_RETPOLINE_GENERIC, - SPECTRE_V2_CMD_RETPOLINE_AMD, -}; - -static const char *spectre_v2_strings[] = { - [SPECTRE_V2_NONE] = "Vulnerable", - [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline", - [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline", - [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline", - [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline", - [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS", -}; - -#undef pr_fmt -#define pr_fmt(fmt) "Spectre V2 : " fmt - -static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = - SPECTRE_V2_NONE; - +/* + * NOTE: For VMX, this function is not called in the vmexit path. + * It uses vmx_spec_ctrl_restore_host() instead. + */ void x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) { - u64 msrval, guestval, hostval = x86_spec_ctrl_base; + u64 msrval, guestval = guest_spec_ctrl, hostval = spec_ctrl_current(); struct thread_info *ti = current_thread_info(); - /* Is MSR_SPEC_CTRL implemented ? */ if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) { - /* - * Restrict guest_spec_ctrl to supported values. Clear the - * modifiable bits in the host base value and or the - * modifiable bits from the guest value. - */ - guestval = hostval & ~x86_spec_ctrl_mask; - guestval |= guest_spec_ctrl & x86_spec_ctrl_mask; - - /* SSBD controlled in MSR_SPEC_CTRL */ - if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || - static_cpu_has(X86_FEATURE_AMD_SSBD)) - hostval |= ssbd_tif_to_spec_ctrl(ti->flags); - if (hostval != guestval) { msrval = setguest ? guestval : hostval; wrmsrl(MSR_IA32_SPEC_CTRL, msrval); @@ -204,7 +239,7 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) : ssbd_spec_ctrl_to_tif(hostval); - speculative_store_bypass_update(tif); + speculation_ctrl_update(tif); } } EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl); @@ -219,7 +254,730 @@ static void x86_amd_ssb_disable(void) wrmsrl(MSR_AMD64_LS_CFG, msrval); } -#ifdef RETPOLINE +#undef pr_fmt +#define pr_fmt(fmt) "MDS: " fmt + +/* Default mitigation for MDS-affected CPUs */ +static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL; +static bool mds_nosmt __ro_after_init = false; + +static const char * const mds_strings[] = { + [MDS_MITIGATION_OFF] = "Vulnerable", + [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers", + [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode", +}; + +static void __init mds_select_mitigation(void) +{ + if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) { + mds_mitigation = MDS_MITIGATION_OFF; + return; + } + + if (mds_mitigation == MDS_MITIGATION_FULL) { + if (!boot_cpu_has(X86_FEATURE_MD_CLEAR)) + mds_mitigation = MDS_MITIGATION_VMWERV; + + static_branch_enable(&mds_user_clear); + + if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) && + (mds_nosmt || cpu_mitigations_auto_nosmt())) + cpu_smt_disable(false); + } +} + +static int __init mds_cmdline(char *str) +{ + if (!boot_cpu_has_bug(X86_BUG_MDS)) + return 0; + + if (!str) + return -EINVAL; + + if (!strcmp(str, "off")) + mds_mitigation = MDS_MITIGATION_OFF; + else if (!strcmp(str, "full")) + mds_mitigation = MDS_MITIGATION_FULL; + else if (!strcmp(str, "full,nosmt")) { + mds_mitigation = MDS_MITIGATION_FULL; + mds_nosmt = true; + } + + return 0; +} +early_param("mds", mds_cmdline); + +#undef pr_fmt +#define pr_fmt(fmt) "TAA: " fmt + +/* Default mitigation for TAA-affected CPUs */ +static enum taa_mitigations taa_mitigation __ro_after_init = TAA_MITIGATION_VERW; +static bool taa_nosmt __ro_after_init; + +static const char * const taa_strings[] = { + [TAA_MITIGATION_OFF] = "Vulnerable", + [TAA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", + [TAA_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", + [TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled", +}; + +static void __init taa_select_mitigation(void) +{ + u64 ia32_cap; + + if (!boot_cpu_has_bug(X86_BUG_TAA)) { + taa_mitigation = TAA_MITIGATION_OFF; + return; + } + + /* TSX previously disabled by tsx=off */ + if (!boot_cpu_has(X86_FEATURE_RTM)) { + taa_mitigation = TAA_MITIGATION_TSX_DISABLED; + return; + } + + if (cpu_mitigations_off()) { + taa_mitigation = TAA_MITIGATION_OFF; + return; + } + + /* + * TAA mitigation via VERW is turned off if both + * tsx_async_abort=off and mds=off are specified. + */ + if (taa_mitigation == TAA_MITIGATION_OFF && + mds_mitigation == MDS_MITIGATION_OFF) + return; + + if (boot_cpu_has(X86_FEATURE_MD_CLEAR)) + taa_mitigation = TAA_MITIGATION_VERW; + else + taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; + + /* + * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1. + * A microcode update fixes this behavior to clear CPU buffers. It also + * adds support for MSR_IA32_TSX_CTRL which is enumerated by the + * ARCH_CAP_TSX_CTRL_MSR bit. + * + * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode + * update is required. + */ + ia32_cap = x86_read_arch_cap_msr(); + if ( (ia32_cap & ARCH_CAP_MDS_NO) && + !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR)) + taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; + + /* + * TSX is enabled, select alternate mitigation for TAA which is + * the same as MDS. Enable MDS static branch to clear CPU buffers. + * + * For guests that can't determine whether the correct microcode is + * present on host, enable the mitigation for UCODE_NEEDED as well. + */ + static_branch_enable(&mds_user_clear); + + if (taa_nosmt || cpu_mitigations_auto_nosmt()) + cpu_smt_disable(false); +} + +static int __init tsx_async_abort_parse_cmdline(char *str) +{ + if (!boot_cpu_has_bug(X86_BUG_TAA)) + return 0; + + if (!str) + return -EINVAL; + + if (!strcmp(str, "off")) { + taa_mitigation = TAA_MITIGATION_OFF; + } else if (!strcmp(str, "full")) { + taa_mitigation = TAA_MITIGATION_VERW; + } else if (!strcmp(str, "full,nosmt")) { + taa_mitigation = TAA_MITIGATION_VERW; + taa_nosmt = true; + } + + return 0; +} +early_param("tsx_async_abort", tsx_async_abort_parse_cmdline); + +#undef pr_fmt +#define pr_fmt(fmt) "MMIO Stale Data: " fmt + +enum mmio_mitigations { + MMIO_MITIGATION_OFF, + MMIO_MITIGATION_UCODE_NEEDED, + MMIO_MITIGATION_VERW, +}; + +/* Default mitigation for Processor MMIO Stale Data vulnerabilities */ +static enum mmio_mitigations mmio_mitigation __ro_after_init = MMIO_MITIGATION_VERW; +static bool mmio_nosmt __ro_after_init = false; + +static const char * const mmio_strings[] = { + [MMIO_MITIGATION_OFF] = "Vulnerable", + [MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", + [MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", +}; + +static void __init mmio_select_mitigation(void) +{ + u64 ia32_cap; + + if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) || + cpu_mitigations_off()) { + mmio_mitigation = MMIO_MITIGATION_OFF; + return; + } + + if (mmio_mitigation == MMIO_MITIGATION_OFF) + return; + + ia32_cap = x86_read_arch_cap_msr(); + + /* + * Enable CPU buffer clear mitigation for host and VMM, if also affected + * by MDS or TAA. Otherwise, enable mitigation for VMM only. + */ + if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) && + boot_cpu_has(X86_FEATURE_RTM))) + static_branch_enable(&mds_user_clear); + else + static_branch_enable(&mmio_stale_data_clear); + + /* + * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can + * be propagated to uncore buffers, clearing the Fill buffers on idle + * is required irrespective of SMT state. + */ + if (!(ia32_cap & ARCH_CAP_FBSDP_NO)) + static_branch_enable(&mds_idle_clear); + + /* + * Check if the system has the right microcode. + * + * CPU Fill buffer clear mitigation is enumerated by either an explicit + * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS + * affected systems. + */ + if ((ia32_cap & ARCH_CAP_FB_CLEAR) || + (boot_cpu_has(X86_FEATURE_MD_CLEAR) && + boot_cpu_has(X86_FEATURE_FLUSH_L1D) && + !(ia32_cap & ARCH_CAP_MDS_NO))) + mmio_mitigation = MMIO_MITIGATION_VERW; + else + mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED; + + if (mmio_nosmt || cpu_mitigations_auto_nosmt()) + cpu_smt_disable(false); +} + +static int __init mmio_stale_data_parse_cmdline(char *str) +{ + if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) + return 0; + + if (!str) + return -EINVAL; + + if (!strcmp(str, "off")) { + mmio_mitigation = MMIO_MITIGATION_OFF; + } else if (!strcmp(str, "full")) { + mmio_mitigation = MMIO_MITIGATION_VERW; + } else if (!strcmp(str, "full,nosmt")) { + mmio_mitigation = MMIO_MITIGATION_VERW; + mmio_nosmt = true; + } + + return 0; +} +early_param("mmio_stale_data", mmio_stale_data_parse_cmdline); + +#undef pr_fmt +#define pr_fmt(fmt) "" fmt + +static void __init md_clear_update_mitigation(void) +{ + if (cpu_mitigations_off()) + return; + + if (!static_key_enabled(&mds_user_clear)) + goto out; + + /* + * mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data + * mitigation, if necessary. + */ + if (mds_mitigation == MDS_MITIGATION_OFF && + boot_cpu_has_bug(X86_BUG_MDS)) { + mds_mitigation = MDS_MITIGATION_FULL; + mds_select_mitigation(); + } + if (taa_mitigation == TAA_MITIGATION_OFF && + boot_cpu_has_bug(X86_BUG_TAA)) { + taa_mitigation = TAA_MITIGATION_VERW; + taa_select_mitigation(); + } + if (mmio_mitigation == MMIO_MITIGATION_OFF && + boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) { + mmio_mitigation = MMIO_MITIGATION_VERW; + mmio_select_mitigation(); + } +out: + if (boot_cpu_has_bug(X86_BUG_MDS)) + pr_info("MDS: %s\n", mds_strings[mds_mitigation]); + if (boot_cpu_has_bug(X86_BUG_TAA)) + pr_info("TAA: %s\n", taa_strings[taa_mitigation]); + if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) + pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]); +} + +static void __init md_clear_select_mitigation(void) +{ + mds_select_mitigation(); + taa_select_mitigation(); + mmio_select_mitigation(); + + /* + * As MDS, TAA and MMIO Stale Data mitigations are inter-related, update + * and print their mitigation after MDS, TAA and MMIO Stale Data + * mitigation selection is done. + */ + md_clear_update_mitigation(); +} + +#undef pr_fmt +#define pr_fmt(fmt) "SRBDS: " fmt + +enum srbds_mitigations { + SRBDS_MITIGATION_OFF, + SRBDS_MITIGATION_UCODE_NEEDED, + SRBDS_MITIGATION_FULL, + SRBDS_MITIGATION_TSX_OFF, + SRBDS_MITIGATION_HYPERVISOR, +}; + +static enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL; + +static const char * const srbds_strings[] = { + [SRBDS_MITIGATION_OFF] = "Vulnerable", + [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", + [SRBDS_MITIGATION_FULL] = "Mitigation: Microcode", + [SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled", + [SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", +}; + +static bool srbds_off; + +void update_srbds_msr(void) +{ + u64 mcu_ctrl; + + if (!boot_cpu_has_bug(X86_BUG_SRBDS)) + return; + + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) + return; + + if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED) + return; + + rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); + + switch (srbds_mitigation) { + case SRBDS_MITIGATION_OFF: + case SRBDS_MITIGATION_TSX_OFF: + mcu_ctrl |= RNGDS_MITG_DIS; + break; + case SRBDS_MITIGATION_FULL: + mcu_ctrl &= ~RNGDS_MITG_DIS; + break; + default: + break; + } + + wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); +} + +static void __init srbds_select_mitigation(void) +{ + u64 ia32_cap; + + if (!boot_cpu_has_bug(X86_BUG_SRBDS)) + return; + + /* + * Check to see if this is one of the MDS_NO systems supporting TSX that + * are only exposed to SRBDS when TSX is enabled or when CPU is affected + * by Processor MMIO Stale Data vulnerability. + */ + ia32_cap = x86_read_arch_cap_msr(); + if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) && + !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) + srbds_mitigation = SRBDS_MITIGATION_TSX_OFF; + else if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) + srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR; + else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) + srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED; + else if (cpu_mitigations_off() || srbds_off) + srbds_mitigation = SRBDS_MITIGATION_OFF; + + update_srbds_msr(); + pr_info("%s\n", srbds_strings[srbds_mitigation]); +} + +static int __init srbds_parse_cmdline(char *str) +{ + if (!str) + return -EINVAL; + + if (!boot_cpu_has_bug(X86_BUG_SRBDS)) + return 0; + + srbds_off = !strcmp(str, "off"); + return 0; +} +early_param("srbds", srbds_parse_cmdline); + +#undef pr_fmt +#define pr_fmt(fmt) "GDS: " fmt + +enum gds_mitigations { + GDS_MITIGATION_OFF, + GDS_MITIGATION_UCODE_NEEDED, + GDS_MITIGATION_FORCE, + GDS_MITIGATION_FULL, + GDS_MITIGATION_FULL_LOCKED, + GDS_MITIGATION_HYPERVISOR, +}; + +#if IS_ENABLED(CONFIG_GDS_FORCE_MITIGATION) +static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FORCE; +#else +static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FULL; +#endif + +static const char * const gds_strings[] = { + [GDS_MITIGATION_OFF] = "Vulnerable", + [GDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", + [GDS_MITIGATION_FORCE] = "Mitigation: AVX disabled, no microcode", + [GDS_MITIGATION_FULL] = "Mitigation: Microcode", + [GDS_MITIGATION_FULL_LOCKED] = "Mitigation: Microcode (locked)", + [GDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", +}; + +bool gds_ucode_mitigated(void) +{ + return (gds_mitigation == GDS_MITIGATION_FULL || + gds_mitigation == GDS_MITIGATION_FULL_LOCKED); +} +EXPORT_SYMBOL_GPL(gds_ucode_mitigated); + +void update_gds_msr(void) +{ + u64 mcu_ctrl_after; + u64 mcu_ctrl; + + switch (gds_mitigation) { + case GDS_MITIGATION_OFF: + rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); + mcu_ctrl |= GDS_MITG_DIS; + break; + case GDS_MITIGATION_FULL_LOCKED: + /* + * The LOCKED state comes from the boot CPU. APs might not have + * the same state. Make sure the mitigation is enabled on all + * CPUs. + */ + case GDS_MITIGATION_FULL: + rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); + mcu_ctrl &= ~GDS_MITG_DIS; + break; + case GDS_MITIGATION_FORCE: + case GDS_MITIGATION_UCODE_NEEDED: + case GDS_MITIGATION_HYPERVISOR: + return; + }; + + wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); + + /* + * Check to make sure that the WRMSR value was not ignored. Writes to + * GDS_MITG_DIS will be ignored if this processor is locked but the boot + * processor was not. + */ + rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after); + WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after); +} + +static void __init gds_select_mitigation(void) +{ + u64 mcu_ctrl; + + if (!boot_cpu_has_bug(X86_BUG_GDS)) + return; + + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { + gds_mitigation = GDS_MITIGATION_HYPERVISOR; + goto out; + } + + if (cpu_mitigations_off()) + gds_mitigation = GDS_MITIGATION_OFF; + /* Will verify below that mitigation _can_ be disabled */ + + /* No microcode */ + if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL)) { + if (gds_mitigation == GDS_MITIGATION_FORCE) { + /* + * This only needs to be done on the boot CPU so do it + * here rather than in update_gds_msr() + */ + setup_clear_cpu_cap(X86_FEATURE_AVX); + pr_warn("Microcode update needed! Disabling AVX as mitigation.\n"); + } else { + gds_mitigation = GDS_MITIGATION_UCODE_NEEDED; + } + goto out; + } + + /* Microcode has mitigation, use it */ + if (gds_mitigation == GDS_MITIGATION_FORCE) + gds_mitigation = GDS_MITIGATION_FULL; + + rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); + if (mcu_ctrl & GDS_MITG_LOCKED) { + if (gds_mitigation == GDS_MITIGATION_OFF) + pr_warn("Mitigation locked. Disable failed.\n"); + + /* + * The mitigation is selected from the boot CPU. All other CPUs + * _should_ have the same state. If the boot CPU isn't locked + * but others are then update_gds_msr() will WARN() of the state + * mismatch. If the boot CPU is locked update_gds_msr() will + * ensure the other CPUs have the mitigation enabled. + */ + gds_mitigation = GDS_MITIGATION_FULL_LOCKED; + } + + update_gds_msr(); +out: + pr_info("%s\n", gds_strings[gds_mitigation]); +} + +static int __init gds_parse_cmdline(char *str) +{ + if (!str) + return -EINVAL; + + if (!boot_cpu_has_bug(X86_BUG_GDS)) + return 0; + + if (!strcmp(str, "off")) + gds_mitigation = GDS_MITIGATION_OFF; + else if (!strcmp(str, "force")) + gds_mitigation = GDS_MITIGATION_FORCE; + + return 0; +} +early_param("gather_data_sampling", gds_parse_cmdline); + +#undef pr_fmt +#define pr_fmt(fmt) "Spectre V1 : " fmt + +enum spectre_v1_mitigation { + SPECTRE_V1_MITIGATION_NONE, + SPECTRE_V1_MITIGATION_AUTO, +}; + +static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init = + SPECTRE_V1_MITIGATION_AUTO; + +static const char * const spectre_v1_strings[] = { + [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers", + [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization", +}; + +/* + * Does SMAP provide full mitigation against speculative kernel access to + * userspace? + */ +static bool smap_works_speculatively(void) +{ + if (!boot_cpu_has(X86_FEATURE_SMAP)) + return false; + + /* + * On CPUs which are vulnerable to Meltdown, SMAP does not + * prevent speculative access to user data in the L1 cache. + * Consider SMAP to be non-functional as a mitigation on these + * CPUs. + */ + if (boot_cpu_has(X86_BUG_CPU_MELTDOWN)) + return false; + + return true; +} + +static void __init spectre_v1_select_mitigation(void) +{ + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) { + spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; + return; + } + + if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) { + /* + * With Spectre v1, a user can speculatively control either + * path of a conditional swapgs with a user-controlled GS + * value. The mitigation is to add lfences to both code paths. + * + * If FSGSBASE is enabled, the user can put a kernel address in + * GS, in which case SMAP provides no protection. + * + * [ NOTE: Don't check for X86_FEATURE_FSGSBASE until the + * FSGSBASE enablement patches have been merged. ] + * + * If FSGSBASE is disabled, the user can only put a user space + * address in GS. That makes an attack harder, but still + * possible if there's no SMAP protection. + */ + if (!smap_works_speculatively()) { + /* + * Mitigation can be provided from SWAPGS itself or + * PTI as the CR3 write in the Meltdown mitigation + * is serializing. + * + * If neither is there, mitigate with an LFENCE to + * stop speculation through swapgs. + */ + if (boot_cpu_has_bug(X86_BUG_SWAPGS) && + !boot_cpu_has(X86_FEATURE_PTI)) + setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER); + + /* + * Enable lfences in the kernel entry (non-swapgs) + * paths, to prevent user entry from speculatively + * skipping swapgs. + */ + setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL); + } + } + + pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]); +} + +static int __init nospectre_v1_cmdline(char *str) +{ + spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; + return 0; +} +early_param("nospectre_v1", nospectre_v1_cmdline); + +static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = + SPECTRE_V2_NONE; + +#undef pr_fmt +#define pr_fmt(fmt) "RETBleed: " fmt + +enum retbleed_mitigation { + RETBLEED_MITIGATION_NONE, + RETBLEED_MITIGATION_IBRS, + RETBLEED_MITIGATION_EIBRS, +}; + +enum retbleed_mitigation_cmd { + RETBLEED_CMD_OFF, + RETBLEED_CMD_AUTO, +}; + +const char * const retbleed_strings[] = { + [RETBLEED_MITIGATION_NONE] = "Vulnerable", + [RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS", + [RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS", +}; + +static enum retbleed_mitigation retbleed_mitigation __ro_after_init = + RETBLEED_MITIGATION_NONE; +static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init = + RETBLEED_CMD_AUTO; + +static int __init retbleed_parse_cmdline(char *str) +{ + if (!str) + return -EINVAL; + + if (!strcmp(str, "off")) + retbleed_cmd = RETBLEED_CMD_OFF; + else if (!strcmp(str, "auto")) + retbleed_cmd = RETBLEED_CMD_AUTO; + else + pr_err("Unknown retbleed option (%s). Defaulting to 'auto'\n", str); + + return 0; +} +early_param("retbleed", retbleed_parse_cmdline); + +#define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n" +#define RETBLEED_COMPILER_MSG "WARNING: kernel not compiled with RETPOLINE or -mfunction-return capable compiler!\n" +#define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n" + +static void __init retbleed_select_mitigation(void) +{ + if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off()) + return; + + switch (retbleed_cmd) { + case RETBLEED_CMD_OFF: + return; + + case RETBLEED_CMD_AUTO: + default: + /* + * The Intel mitigation (IBRS) was already selected in + * spectre_v2_select_mitigation(). + */ + + break; + } + + switch (retbleed_mitigation) { + default: + break; + } + + /* + * Let IBRS trump all on Intel without affecting the effects of the + * retbleed= cmdline option. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { + switch (spectre_v2_enabled) { + case SPECTRE_V2_IBRS: + retbleed_mitigation = RETBLEED_MITIGATION_IBRS; + break; + case SPECTRE_V2_EIBRS: + case SPECTRE_V2_EIBRS_RETPOLINE: + case SPECTRE_V2_EIBRS_LFENCE: + retbleed_mitigation = RETBLEED_MITIGATION_EIBRS; + break; + default: + pr_err(RETBLEED_INTEL_MSG); + } + } + + pr_info("%s\n", retbleed_strings[retbleed_mitigation]); +} + +#undef pr_fmt +#define pr_fmt(fmt) "Spectre V2 : " fmt + +static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init = + SPECTRE_V2_USER_NONE; +static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init = + SPECTRE_V2_USER_NONE; + +#ifdef CONFIG_RETPOLINE static bool spectre_v2_bad_module; bool retpoline_module_ok(bool has_retpoline) @@ -240,89 +998,413 @@ static inline const char *spectre_v2_module_string(void) static inline const char *spectre_v2_module_string(void) { return ""; } #endif -static void __init spec2_print_if_insecure(const char *reason) +#define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n" +#define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n" +#define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n" +#define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n" + +#ifdef CONFIG_BPF_SYSCALL +void unpriv_ebpf_notify(int new_state) { - if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) - pr_info("%s selected on command line.\n", reason); + if (new_state) + return; + + /* Unprivileged eBPF is enabled */ + + switch (spectre_v2_enabled) { + case SPECTRE_V2_EIBRS: + pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); + break; + case SPECTRE_V2_EIBRS_LFENCE: + if (sched_smt_active()) + pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); + break; + default: + break; + } } +#endif -static void __init spec2_print_if_secure(const char *reason) +static inline bool match_option(const char *arg, int arglen, const char *opt) { - if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) - pr_info("%s selected on command line.\n", reason); + int len = strlen(opt); + + return len == arglen && !strncmp(arg, opt, len); } -static inline bool retp_compiler(void) +/* The kernel command line selection for spectre v2 */ +enum spectre_v2_mitigation_cmd { + SPECTRE_V2_CMD_NONE, + SPECTRE_V2_CMD_AUTO, + SPECTRE_V2_CMD_FORCE, + SPECTRE_V2_CMD_RETPOLINE, + SPECTRE_V2_CMD_RETPOLINE_GENERIC, + SPECTRE_V2_CMD_RETPOLINE_LFENCE, + SPECTRE_V2_CMD_EIBRS, + SPECTRE_V2_CMD_EIBRS_RETPOLINE, + SPECTRE_V2_CMD_EIBRS_LFENCE, + SPECTRE_V2_CMD_IBRS, +}; + +enum spectre_v2_user_cmd { + SPECTRE_V2_USER_CMD_NONE, + SPECTRE_V2_USER_CMD_AUTO, + SPECTRE_V2_USER_CMD_FORCE, + SPECTRE_V2_USER_CMD_PRCTL, + SPECTRE_V2_USER_CMD_PRCTL_IBPB, + SPECTRE_V2_USER_CMD_SECCOMP, + SPECTRE_V2_USER_CMD_SECCOMP_IBPB, +}; + +static const char * const spectre_v2_user_strings[] = { + [SPECTRE_V2_USER_NONE] = "User space: Vulnerable", + [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection", + [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection", + [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl", + [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl", +}; + +static const struct { + const char *option; + enum spectre_v2_user_cmd cmd; + bool secure; +} v2_user_options[] __initconst = { + { "auto", SPECTRE_V2_USER_CMD_AUTO, false }, + { "off", SPECTRE_V2_USER_CMD_NONE, false }, + { "on", SPECTRE_V2_USER_CMD_FORCE, true }, + { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false }, + { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false }, + { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false }, + { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false }, +}; + +static void __init spec_v2_user_print_cond(const char *reason, bool secure) { - return __is_defined(RETPOLINE); + if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) + pr_info("spectre_v2_user=%s forced on command line.\n", reason); } -static inline bool match_option(const char *arg, int arglen, const char *opt) +static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd; + +static enum spectre_v2_user_cmd __init +spectre_v2_parse_user_cmdline(void) { - int len = strlen(opt); + char arg[20]; + int ret, i; - return len == arglen && !strncmp(arg, opt, len); + switch (spectre_v2_cmd) { + case SPECTRE_V2_CMD_NONE: + return SPECTRE_V2_USER_CMD_NONE; + case SPECTRE_V2_CMD_FORCE: + return SPECTRE_V2_USER_CMD_FORCE; + default: + break; + } + + ret = cmdline_find_option(boot_command_line, "spectre_v2_user", + arg, sizeof(arg)); + if (ret < 0) + return SPECTRE_V2_USER_CMD_AUTO; + + for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) { + if (match_option(arg, ret, v2_user_options[i].option)) { + spec_v2_user_print_cond(v2_user_options[i].option, + v2_user_options[i].secure); + return v2_user_options[i].cmd; + } + } + + pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg); + return SPECTRE_V2_USER_CMD_AUTO; +} + +static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode) +{ + return mode == SPECTRE_V2_EIBRS || + mode == SPECTRE_V2_EIBRS_RETPOLINE || + mode == SPECTRE_V2_EIBRS_LFENCE; +} + +static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode) +{ + return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS; +} + +static void __init +spectre_v2_user_select_mitigation(void) +{ + enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE; + bool smt_possible = IS_ENABLED(CONFIG_SMP); + enum spectre_v2_user_cmd cmd; + + if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP)) + return; + + if (cpu_smt_control == CPU_SMT_FORCE_DISABLED || + cpu_smt_control == CPU_SMT_NOT_SUPPORTED) + smt_possible = false; + + cmd = spectre_v2_parse_user_cmdline(); + switch (cmd) { + case SPECTRE_V2_USER_CMD_NONE: + goto set_mode; + case SPECTRE_V2_USER_CMD_FORCE: + mode = SPECTRE_V2_USER_STRICT; + break; + case SPECTRE_V2_USER_CMD_PRCTL: + case SPECTRE_V2_USER_CMD_PRCTL_IBPB: + mode = SPECTRE_V2_USER_PRCTL; + break; + case SPECTRE_V2_USER_CMD_AUTO: + case SPECTRE_V2_USER_CMD_SECCOMP: + case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: + if (IS_ENABLED(CONFIG_SECCOMP)) + mode = SPECTRE_V2_USER_SECCOMP; + else + mode = SPECTRE_V2_USER_PRCTL; + break; + } + + /* Initialize Indirect Branch Prediction Barrier */ + if (boot_cpu_has(X86_FEATURE_IBPB)) { + setup_force_cpu_cap(X86_FEATURE_USE_IBPB); + + switch (cmd) { + case SPECTRE_V2_USER_CMD_FORCE: + case SPECTRE_V2_USER_CMD_PRCTL_IBPB: + case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: + static_branch_enable(&switch_mm_always_ibpb); + break; + case SPECTRE_V2_USER_CMD_PRCTL: + case SPECTRE_V2_USER_CMD_AUTO: + case SPECTRE_V2_USER_CMD_SECCOMP: + static_branch_enable(&switch_mm_cond_ibpb); + break; + default: + break; + } + + pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n", + static_key_enabled(&switch_mm_always_ibpb) ? + "always-on" : "conditional"); + + spectre_v2_user_ibpb = mode; + } + + /* + * If no STIBP, enhanced IBRS is enabled, or SMT impossible, STIBP + * is not required. + * + * Enhanced IBRS also protects against cross-thread branch target + * injection in user-mode as the IBRS bit remains always set which + * implicitly enables cross-thread protections. However, in legacy IBRS + * mode, the IBRS bit is set only on kernel entry and cleared on return + * to userspace. This disables the implicit cross-thread protection, + * so allow for STIBP to be selected in that case. + */ + if (!boot_cpu_has(X86_FEATURE_STIBP) || + !smt_possible || + spectre_v2_in_eibrs_mode(spectre_v2_enabled)) + return; + + /* + * At this point, an STIBP mode other than "off" has been set. + * If STIBP support is not being forced, check if STIBP always-on + * is preferred. + */ + if (mode != SPECTRE_V2_USER_STRICT && + boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON)) + mode = SPECTRE_V2_USER_STRICT_PREFERRED; + + spectre_v2_user_stibp = mode; + +set_mode: + pr_info("%s\n", spectre_v2_user_strings[mode]); } +static const char * const spectre_v2_strings[] = { + [SPECTRE_V2_NONE] = "Vulnerable", + [SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines", + [SPECTRE_V2_LFENCE] = "Mitigation: LFENCE", + [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced IBRS", + [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced IBRS + LFENCE", + [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced IBRS + Retpolines", + [SPECTRE_V2_IBRS] = "Mitigation: IBRS", +}; + static const struct { const char *option; enum spectre_v2_mitigation_cmd cmd; bool secure; -} mitigation_options[] = { - { "off", SPECTRE_V2_CMD_NONE, false }, - { "on", SPECTRE_V2_CMD_FORCE, true }, - { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false }, - { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false }, - { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false }, - { "auto", SPECTRE_V2_CMD_AUTO, false }, +} mitigation_options[] __initconst = { + { "off", SPECTRE_V2_CMD_NONE, false }, + { "on", SPECTRE_V2_CMD_FORCE, true }, + { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false }, + { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false }, + { "retpoline,lfence", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false }, + { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false }, + { "eibrs", SPECTRE_V2_CMD_EIBRS, false }, + { "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE, false }, + { "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE, false }, + { "auto", SPECTRE_V2_CMD_AUTO, false }, + { "ibrs", SPECTRE_V2_CMD_IBRS, false }, }; -static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) +static void __init spec_v2_print_cond(const char *reason, bool secure) +{ + if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) + pr_info("%s selected on command line.\n", reason); +} + +static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) +{ + enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO; + char arg[20]; + int ret, i; + + if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") || + cpu_mitigations_off()) + return SPECTRE_V2_CMD_NONE; + + ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); + if (ret < 0) + return SPECTRE_V2_CMD_AUTO; + + for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) { + if (!match_option(arg, ret, mitigation_options[i].option)) + continue; + cmd = mitigation_options[i].cmd; + break; + } + + if (i >= ARRAY_SIZE(mitigation_options)) { + pr_err("unknown option (%s). Switching to AUTO select\n", arg); + return SPECTRE_V2_CMD_AUTO; + } + + if ((cmd == SPECTRE_V2_CMD_RETPOLINE || + cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || + cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC || + cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || + cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && + !IS_ENABLED(CONFIG_RETPOLINE)) { + pr_err("%s selected but not compiled in. Switching to AUTO select\n", + mitigation_options[i].option); + return SPECTRE_V2_CMD_AUTO; + } + + if ((cmd == SPECTRE_V2_CMD_EIBRS || + cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || + cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && + !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { + pr_err("%s selected but CPU doesn't have eIBRS. Switching to AUTO select\n", + mitigation_options[i].option); + return SPECTRE_V2_CMD_AUTO; + } + + if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || + cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) && + !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { + pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n", + mitigation_options[i].option); + return SPECTRE_V2_CMD_AUTO; + } + + if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { + pr_err("%s selected but not Intel CPU. Switching to AUTO select\n", + mitigation_options[i].option); + return SPECTRE_V2_CMD_AUTO; + } + + if (cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) { + pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n", + mitigation_options[i].option); + return SPECTRE_V2_CMD_AUTO; + } + + if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_has(X86_FEATURE_XENPV)) { + pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n", + mitigation_options[i].option); + return SPECTRE_V2_CMD_AUTO; + } + + spec_v2_print_cond(mitigation_options[i].option, + mitigation_options[i].secure); + return cmd; +} + +static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void) +{ + if (!IS_ENABLED(CONFIG_RETPOLINE)) { + pr_err("Kernel not compiled with retpoline; no mitigation available!"); + return SPECTRE_V2_NONE; + } + + return SPECTRE_V2_RETPOLINE; +} + +/* Disable in-kernel use of non-RSB RET predictors */ +static void __init spec_ctrl_disable_kernel_rrsba(void) { - char arg[20]; - int ret, i; - enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO; + u64 ia32_cap; - if (cmdline_find_option_bool(boot_command_line, "nospectre_v2")) - return SPECTRE_V2_CMD_NONE; - else { - ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); - if (ret < 0) - return SPECTRE_V2_CMD_AUTO; + if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL)) + return; - for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) { - if (!match_option(arg, ret, mitigation_options[i].option)) - continue; - cmd = mitigation_options[i].cmd; - break; - } + ia32_cap = x86_read_arch_cap_msr(); - if (i >= ARRAY_SIZE(mitigation_options)) { - pr_err("unknown option (%s). Switching to AUTO select\n", arg); - return SPECTRE_V2_CMD_AUTO; - } + if (ia32_cap & ARCH_CAP_RRSBA) { + x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S; + write_spec_ctrl_current(x86_spec_ctrl_base, true); } +} - if ((cmd == SPECTRE_V2_CMD_RETPOLINE || - cmd == SPECTRE_V2_CMD_RETPOLINE_AMD || - cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) && - !IS_ENABLED(CONFIG_RETPOLINE)) { - pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option); - return SPECTRE_V2_CMD_AUTO; - } +static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode) +{ + /* + * Similar to context switches, there are two types of RSB attacks + * after VM exit: + * + * 1) RSB underflow + * + * 2) Poisoned RSB entry + * + * When retpoline is enabled, both are mitigated by filling/clearing + * the RSB. + * + * When IBRS is enabled, while #1 would be mitigated by the IBRS branch + * prediction isolation protections, RSB still needs to be cleared + * because of #2. Note that SMEP provides no protection here, unlike + * user-space-poisoned RSB entries. + * + * eIBRS should protect against RSB poisoning, but if the EIBRS_PBRSB + * bug is present then a LITE version of RSB protection is required, + * just a single call needs to retire before a RET is executed. + */ + switch (mode) { + case SPECTRE_V2_NONE: + return; - if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD && - boot_cpu_data.x86_vendor != X86_VENDOR_AMD) { - pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n"); - return SPECTRE_V2_CMD_AUTO; - } + case SPECTRE_V2_EIBRS_LFENCE: + case SPECTRE_V2_EIBRS: + if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB) && + (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)) { + setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE); + pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n"); + } + return; - if (mitigation_options[i].secure) - spec2_print_if_secure(mitigation_options[i].option); - else - spec2_print_if_insecure(mitigation_options[i].option); + case SPECTRE_V2_EIBRS_RETPOLINE: + case SPECTRE_V2_RETPOLINE: + case SPECTRE_V2_LFENCE: + case SPECTRE_V2_IBRS: + setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT); + pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n"); + return; + } - return cmd; + pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit"); + dump_stack(); } static void __init spectre_v2_select_mitigation(void) @@ -345,85 +1427,274 @@ static void __init spectre_v2_select_mitigation(void) case SPECTRE_V2_CMD_FORCE: case SPECTRE_V2_CMD_AUTO: if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { - mode = SPECTRE_V2_IBRS_ENHANCED; - /* Force it so VMEXIT will restore correctly */ - x86_spec_ctrl_base |= SPEC_CTRL_IBRS; - wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); - goto specv2_set_mode; + mode = SPECTRE_V2_EIBRS; + break; } - if (IS_ENABLED(CONFIG_RETPOLINE)) - goto retpoline_auto; + + if (boot_cpu_has_bug(X86_BUG_RETBLEED) && + retbleed_cmd != RETBLEED_CMD_OFF && + boot_cpu_has(X86_FEATURE_IBRS) && + boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { + mode = SPECTRE_V2_IBRS; + break; + } + + mode = spectre_v2_select_retpoline(); break; - case SPECTRE_V2_CMD_RETPOLINE_AMD: - if (IS_ENABLED(CONFIG_RETPOLINE)) - goto retpoline_amd; + + case SPECTRE_V2_CMD_RETPOLINE_LFENCE: + pr_err(SPECTRE_V2_LFENCE_MSG); + mode = SPECTRE_V2_LFENCE; break; + case SPECTRE_V2_CMD_RETPOLINE_GENERIC: - if (IS_ENABLED(CONFIG_RETPOLINE)) - goto retpoline_generic; + mode = SPECTRE_V2_RETPOLINE; break; + case SPECTRE_V2_CMD_RETPOLINE: - if (IS_ENABLED(CONFIG_RETPOLINE)) - goto retpoline_auto; + mode = spectre_v2_select_retpoline(); + break; + + case SPECTRE_V2_CMD_IBRS: + mode = SPECTRE_V2_IBRS; + break; + + case SPECTRE_V2_CMD_EIBRS: + mode = SPECTRE_V2_EIBRS; + break; + + case SPECTRE_V2_CMD_EIBRS_LFENCE: + mode = SPECTRE_V2_EIBRS_LFENCE; + break; + + case SPECTRE_V2_CMD_EIBRS_RETPOLINE: + mode = SPECTRE_V2_EIBRS_RETPOLINE; break; } - pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!"); - return; -retpoline_auto: - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { - retpoline_amd: - if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { - pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n"); - goto retpoline_generic; - } - mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD : - SPECTRE_V2_RETPOLINE_MINIMAL_AMD; - setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD); - setup_force_cpu_cap(X86_FEATURE_RETPOLINE); - } else { - retpoline_generic: - mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC : - SPECTRE_V2_RETPOLINE_MINIMAL; + if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) + pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); + + if (spectre_v2_in_ibrs_mode(mode)) { + x86_spec_ctrl_base |= SPEC_CTRL_IBRS; + write_spec_ctrl_current(x86_spec_ctrl_base, true); + } + + switch (mode) { + case SPECTRE_V2_NONE: + case SPECTRE_V2_EIBRS: + break; + + case SPECTRE_V2_IBRS: + setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS); + if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) + pr_warn(SPECTRE_V2_IBRS_PERF_MSG); + break; + + case SPECTRE_V2_LFENCE: + case SPECTRE_V2_EIBRS_LFENCE: + setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE); + /* fallthrough */ + + case SPECTRE_V2_RETPOLINE: + case SPECTRE_V2_EIBRS_RETPOLINE: setup_force_cpu_cap(X86_FEATURE_RETPOLINE); + break; } -specv2_set_mode: + /* + * Disable alternate RSB predictions in kernel when indirect CALLs and + * JMPs gets protection against BHI and Intramode-BTI, but RET + * prediction from a non-RSB predictor is still a risk. + */ + if (mode == SPECTRE_V2_EIBRS_LFENCE || + mode == SPECTRE_V2_EIBRS_RETPOLINE || + mode == SPECTRE_V2_RETPOLINE) + spec_ctrl_disable_kernel_rrsba(); + spectre_v2_enabled = mode; pr_info("%s\n", spectre_v2_strings[mode]); /* - * If spectre v2 protection has been enabled, unconditionally fill - * RSB during a context switch; this protects against two independent - * issues: + * If Spectre v2 protection has been enabled, fill the RSB during a + * context switch. In general there are two types of RSB attacks + * across context switches, for which the CALLs/RETs may be unbalanced. + * + * 1) RSB underflow + * + * Some Intel parts have "bottomless RSB". When the RSB is empty, + * speculated return targets may come from the branch predictor, + * which could have a user-poisoned BTB or BHB entry. + * + * AMD has it even worse: *all* returns are speculated from the BTB, + * regardless of the state of the RSB. + * + * When IBRS or eIBRS is enabled, the "user -> kernel" attack + * scenario is mitigated by the IBRS branch prediction isolation + * properties, so the RSB buffer filling wouldn't be necessary to + * protect against this type of attack. + * + * The "user -> user" attack scenario is mitigated by RSB filling. * - * - RSB underflow (and switch to BTB) on Skylake+ - * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs + * 2) Poisoned RSB entry + * + * If the 'next' in-kernel return stack is shorter than 'prev', + * 'next' could be tricked into speculating with a user-poisoned RSB + * entry. + * + * The "user -> kernel" attack scenario is mitigated by SMEP and + * eIBRS. + * + * The "user -> user" scenario, also known as SpectreBHB, requires + * RSB clearing. + * + * So to mitigate all cases, unconditionally fill RSB on context + * switches. + * + * FIXME: Is this pointless for retbleed-affected AMD? */ setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n"); - /* Initialize Indirect Branch Prediction Barrier if supported */ - if (boot_cpu_has(X86_FEATURE_IBPB)) { - setup_force_cpu_cap(X86_FEATURE_USE_IBPB); - pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n"); - } + spectre_v2_determine_rsb_fill_type_at_vmexit(mode); /* - * Retpoline means the kernel is safe because it has no indirect - * branches. Enhanced IBRS protects firmware too, so, enable restricted - * speculation around firmware calls only when Enhanced IBRS isn't - * supported. + * Retpoline protects the kernel, but doesn't protect firmware. IBRS + * and Enhanced IBRS protect firmware too, so enable IBRS around + * firmware calls only when IBRS / Enhanced IBRS aren't otherwise + * enabled. * * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because * the user might select retpoline on the kernel command line and if * the CPU supports Enhanced IBRS, kernel might un-intentionally not * enable IBRS around firmware calls. */ - if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) { + if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) { setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); pr_info("Enabling Restricted Speculation for firmware calls\n"); } + + /* Set up IBPB and STIBP depending on the general spectre V2 command */ + spectre_v2_cmd = cmd; +} + +static void update_stibp_msr(void * __unused) +{ + u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP); + write_spec_ctrl_current(val, true); +} + +/* Update x86_spec_ctrl_base in case SMT state changed. */ +static void update_stibp_strict(void) +{ + u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP; + + if (sched_smt_active()) + mask |= SPEC_CTRL_STIBP; + + if (mask == x86_spec_ctrl_base) + return; + + pr_info("Update user space SMT mitigation: STIBP %s\n", + mask & SPEC_CTRL_STIBP ? "always-on" : "off"); + x86_spec_ctrl_base = mask; + on_each_cpu(update_stibp_msr, NULL, 1); +} + +/* Update the static key controlling the evaluation of TIF_SPEC_IB */ +static void update_indir_branch_cond(void) +{ + if (sched_smt_active()) + static_branch_enable(&switch_to_cond_stibp); + else + static_branch_disable(&switch_to_cond_stibp); +} + +#undef pr_fmt +#define pr_fmt(fmt) fmt + +/* Update the static key controlling the MDS CPU buffer clear in idle */ +static void update_mds_branch_idle(void) +{ + u64 ia32_cap = x86_read_arch_cap_msr(); + + /* + * Enable the idle clearing if SMT is active on CPUs which are + * affected only by MSBDS and not any other MDS variant. + * + * The other variants cannot be mitigated when SMT is enabled, so + * clearing the buffers on idle just to prevent the Store Buffer + * repartitioning leak would be a window dressing exercise. + */ + if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY)) + return; + + if (sched_smt_active()) { + static_branch_enable(&mds_idle_clear); + } else if (mmio_mitigation == MMIO_MITIGATION_OFF || + (ia32_cap & ARCH_CAP_FBSDP_NO)) { + static_branch_disable(&mds_idle_clear); + } +} + +#define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n" +#define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n" +#define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n" + +void arch_smt_update(void) +{ + mutex_lock(&spec_ctrl_mutex); + + if (sched_smt_active() && unprivileged_ebpf_enabled() && + spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) + pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); + + switch (spectre_v2_user_stibp) { + case SPECTRE_V2_USER_NONE: + break; + case SPECTRE_V2_USER_STRICT: + case SPECTRE_V2_USER_STRICT_PREFERRED: + update_stibp_strict(); + break; + case SPECTRE_V2_USER_PRCTL: + case SPECTRE_V2_USER_SECCOMP: + update_indir_branch_cond(); + break; + } + + switch (mds_mitigation) { + case MDS_MITIGATION_FULL: + case MDS_MITIGATION_VMWERV: + if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY)) + pr_warn_once(MDS_MSG_SMT); + update_mds_branch_idle(); + break; + case MDS_MITIGATION_OFF: + break; + } + + switch (taa_mitigation) { + case TAA_MITIGATION_VERW: + case TAA_MITIGATION_UCODE_NEEDED: + if (sched_smt_active()) + pr_warn_once(TAA_MSG_SMT); + break; + case TAA_MITIGATION_TSX_DISABLED: + case TAA_MITIGATION_OFF: + break; + } + + switch (mmio_mitigation) { + case MMIO_MITIGATION_VERW: + case MMIO_MITIGATION_UCODE_NEEDED: + if (sched_smt_active()) + pr_warn_once(MMIO_MSG_SMT); + break; + case MMIO_MITIGATION_OFF: + break; + } + + mutex_unlock(&spec_ctrl_mutex); } #undef pr_fmt @@ -440,7 +1711,7 @@ enum ssb_mitigation_cmd { SPEC_STORE_BYPASS_CMD_SECCOMP, }; -static const char *ssb_strings[] = { +static const char * const ssb_strings[] = { [SPEC_STORE_BYPASS_NONE] = "Vulnerable", [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled", [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl", @@ -450,7 +1721,7 @@ static const char *ssb_strings[] = { static const struct { const char *option; enum ssb_mitigation_cmd cmd; -} ssb_mitigation_options[] = { +} ssb_mitigation_options[] __initconst = { { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */ { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */ { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */ @@ -464,7 +1735,8 @@ static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void) char arg[20]; int ret, i; - if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) { + if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") || + cpu_mitigations_off()) { return SPEC_STORE_BYPASS_CMD_NONE; } else { ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable", @@ -542,8 +1814,7 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void) x86_amd_ssb_disable(); } else { x86_spec_ctrl_base |= SPEC_CTRL_SSBD; - x86_spec_ctrl_mask |= SPEC_CTRL_SSBD; - wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); + write_spec_ctrl_current(x86_spec_ctrl_base, true); } } @@ -561,10 +1832,25 @@ static void ssb_select_mitigation(void) #undef pr_fmt #define pr_fmt(fmt) "Speculation prctl: " fmt -static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) +static void task_update_spec_tif(struct task_struct *tsk) { - bool update; + /* Force the update of the real TIF bits */ + set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE); + + /* + * Immediately update the speculation control MSRs for the current + * task, but for a non-current task delay setting the CPU + * mitigation until it is scheduled next. + * + * This can only happen for SECCOMP mitigation. For PRCTL it's + * always the current task. + */ + if (tsk == current) + speculation_ctrl_update_current(); +} +static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) +{ if (ssb_mode != SPEC_STORE_BYPASS_PRCTL && ssb_mode != SPEC_STORE_BYPASS_SECCOMP) return -ENXIO; @@ -575,28 +1861,67 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) if (task_spec_ssb_force_disable(task)) return -EPERM; task_clear_spec_ssb_disable(task); - update = test_and_clear_tsk_thread_flag(task, TIF_SSBD); + task_update_spec_tif(task); break; case PR_SPEC_DISABLE: task_set_spec_ssb_disable(task); - update = !test_and_set_tsk_thread_flag(task, TIF_SSBD); + task_update_spec_tif(task); break; case PR_SPEC_FORCE_DISABLE: task_set_spec_ssb_disable(task); task_set_spec_ssb_force_disable(task); - update = !test_and_set_tsk_thread_flag(task, TIF_SSBD); + task_update_spec_tif(task); break; default: return -ERANGE; } + return 0; +} - /* - * If being set on non-current task, delay setting the CPU - * mitigation until it is next scheduled. - */ - if (task == current && update) - speculative_store_bypass_update_current(); +static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) +{ + switch (ctrl) { + case PR_SPEC_ENABLE: + if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && + spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) + return 0; + /* + * Indirect branch speculation is always disabled in strict + * mode. It can neither be enabled if it was force-disabled + * by a previous prctl call. + */ + if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED || + task_spec_ib_force_disable(task)) + return -EPERM; + task_clear_spec_ib_disable(task); + task_update_spec_tif(task); + break; + case PR_SPEC_DISABLE: + case PR_SPEC_FORCE_DISABLE: + /* + * Indirect branch speculation is always allowed when + * mitigation is force disabled. + */ + if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && + spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) + return -EPERM; + if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) + return 0; + task_set_spec_ib_disable(task); + if (ctrl == PR_SPEC_FORCE_DISABLE) + task_set_spec_ib_force_disable(task); + task_update_spec_tif(task); + if (task == current) + indirect_branch_prediction_barrier(); + break; + default: + return -ERANGE; + } return 0; } @@ -606,6 +1931,8 @@ int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, switch (which) { case PR_SPEC_STORE_BYPASS: return ssb_prctl_set(task, ctrl); + case PR_SPEC_INDIRECT_BRANCH: + return ib_prctl_set(task, ctrl); default: return -ENODEV; } @@ -616,6 +1943,9 @@ void arch_seccomp_spec_mitigate(struct task_struct *task) { if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); + if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || + spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) + ib_prctl_set(task, PR_SPEC_FORCE_DISABLE); } #endif @@ -638,11 +1968,38 @@ static int ssb_prctl_get(struct task_struct *task) } } +static int ib_prctl_get(struct task_struct *task) +{ + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) + return PR_SPEC_NOT_AFFECTED; + + if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && + spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) + return PR_SPEC_ENABLE; + else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) + return PR_SPEC_DISABLE; + else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL || + spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || + spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL || + spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) { + if (task_spec_ib_force_disable(task)) + return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; + if (task_spec_ib_disable(task)) + return PR_SPEC_PRCTL | PR_SPEC_DISABLE; + return PR_SPEC_PRCTL | PR_SPEC_ENABLE; + } else + return PR_SPEC_NOT_AFFECTED; +} + int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) { switch (which) { case PR_SPEC_STORE_BYPASS: return ssb_prctl_get(task); + case PR_SPEC_INDIRECT_BRANCH: + return ib_prctl_get(task); default: return -ENODEV; } @@ -651,12 +2008,15 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) void x86_spec_ctrl_setup_ap(void) { if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) - wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); + write_spec_ctrl_current(x86_spec_ctrl_base, true); if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) x86_amd_ssb_disable(); } +bool itlb_multihit_kvm_mitigation; +EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation); + #undef pr_fmt #define pr_fmt(fmt) "L1TF: " fmt @@ -714,6 +2074,11 @@ static void __init l1tf_select_mitigation(void) if (!boot_cpu_has_bug(X86_BUG_L1TF)) return; + if (cpu_mitigations_off()) + l1tf_mitigation = L1TF_MITIGATION_OFF; + else if (cpu_mitigations_auto_nosmt()) + l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; + override_cache_bits(&boot_cpu_data); switch (l1tf_mitigation) { @@ -736,12 +2101,13 @@ static void __init l1tf_select_mitigation(void) #endif half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; - if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { + if (l1tf_mitigation != L1TF_MITIGATION_OFF && + e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n", half_pa); pr_info("However, doing so will make a part of your RAM unusable.\n"); - pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n"); + pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n"); return; } @@ -774,13 +2140,14 @@ static int __init l1tf_cmdline(char *str) early_param("l1tf", l1tf_cmdline); #undef pr_fmt +#define pr_fmt(fmt) fmt #ifdef CONFIG_SYSFS #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion" #if IS_ENABLED(CONFIG_KVM_INTEL) -static const char *l1tf_vmx_states[] = { +static const char * const l1tf_vmx_states[] = { [VMENTER_L1D_FLUSH_AUTO] = "auto", [VMENTER_L1D_FLUSH_NEVER] = "vulnerable", [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes", @@ -796,21 +2163,163 @@ static ssize_t l1tf_show_state(char *buf) if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED || (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER && - cpu_smt_control == CPU_SMT_ENABLED)) + sched_smt_active())) { return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG, l1tf_vmx_states[l1tf_vmx_mitigation]); + } return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG, l1tf_vmx_states[l1tf_vmx_mitigation], - cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : "disabled"); + sched_smt_active() ? "vulnerable" : "disabled"); +} + +static ssize_t itlb_multihit_show_state(char *buf) +{ + if (itlb_multihit_kvm_mitigation) + return sprintf(buf, "KVM: Mitigation: Split huge pages\n"); + else + return sprintf(buf, "KVM: Vulnerable\n"); } #else static ssize_t l1tf_show_state(char *buf) { return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG); } + +static ssize_t itlb_multihit_show_state(char *buf) +{ + return sprintf(buf, "Processor vulnerable\n"); +} #endif +static ssize_t mds_show_state(char *buf) +{ + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { + return sprintf(buf, "%s; SMT Host state unknown\n", + mds_strings[mds_mitigation]); + } + + if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) { + return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], + (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" : + sched_smt_active() ? "mitigated" : "disabled")); + } + + return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], + sched_smt_active() ? "vulnerable" : "disabled"); +} + +static ssize_t tsx_async_abort_show_state(char *buf) +{ + if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) || + (taa_mitigation == TAA_MITIGATION_OFF)) + return sprintf(buf, "%s\n", taa_strings[taa_mitigation]); + + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { + return sprintf(buf, "%s; SMT Host state unknown\n", + taa_strings[taa_mitigation]); + } + + return sprintf(buf, "%s; SMT %s\n", taa_strings[taa_mitigation], + sched_smt_active() ? "vulnerable" : "disabled"); +} + +static ssize_t mmio_stale_data_show_state(char *buf) +{ + if (mmio_mitigation == MMIO_MITIGATION_OFF) + return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]); + + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { + return sysfs_emit(buf, "%s; SMT Host state unknown\n", + mmio_strings[mmio_mitigation]); + } + + return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation], + sched_smt_active() ? "vulnerable" : "disabled"); +} + +static char *stibp_state(void) +{ + if (spectre_v2_in_eibrs_mode(spectre_v2_enabled)) + return ""; + + switch (spectre_v2_user_stibp) { + case SPECTRE_V2_USER_NONE: + return ", STIBP: disabled"; + case SPECTRE_V2_USER_STRICT: + return ", STIBP: forced"; + case SPECTRE_V2_USER_STRICT_PREFERRED: + return ", STIBP: always-on"; + case SPECTRE_V2_USER_PRCTL: + case SPECTRE_V2_USER_SECCOMP: + if (static_key_enabled(&switch_to_cond_stibp)) + return ", STIBP: conditional"; + } + return ""; +} + +static char *ibpb_state(void) +{ + if (boot_cpu_has(X86_FEATURE_IBPB)) { + if (static_key_enabled(&switch_mm_always_ibpb)) + return ", IBPB: always-on"; + if (static_key_enabled(&switch_mm_cond_ibpb)) + return ", IBPB: conditional"; + return ", IBPB: disabled"; + } + return ""; +} + +static char *pbrsb_eibrs_state(void) +{ + if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { + if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) || + boot_cpu_has(X86_FEATURE_RSB_VMEXIT)) + return ", PBRSB-eIBRS: SW sequence"; + else + return ", PBRSB-eIBRS: Vulnerable"; + } else { + return ", PBRSB-eIBRS: Not affected"; + } +} + +static ssize_t spectre_v2_show_state(char *buf) +{ + if (spectre_v2_enabled == SPECTRE_V2_LFENCE) + return sprintf(buf, "Vulnerable: LFENCE\n"); + + if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) + return sprintf(buf, "Vulnerable: eIBRS with unprivileged eBPF\n"); + + if (sched_smt_active() && unprivileged_ebpf_enabled() && + spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) + return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n"); + + return sprintf(buf, "%s%s%s%s%s%s%s\n", + spectre_v2_strings[spectre_v2_enabled], + ibpb_state(), + boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", + stibp_state(), + boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "", + pbrsb_eibrs_state(), + spectre_v2_module_string()); +} + +static ssize_t srbds_show_state(char *buf) +{ + return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]); +} + +static ssize_t retbleed_show_state(char *buf) +{ + return sprintf(buf, "%s\n", retbleed_strings[retbleed_mitigation]); +} + +static ssize_t gds_show_state(char *buf) +{ + return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]); +} + static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, char *buf, unsigned int bug) { @@ -828,13 +2337,10 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr break; case X86_BUG_SPECTRE_V1: - return sprintf(buf, "Mitigation: __user pointer sanitization\n"); + return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]); case X86_BUG_SPECTRE_V2: - return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], - boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "", - boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", - spectre_v2_module_string()); + return spectre_v2_show_state(buf); case X86_BUG_SPEC_STORE_BYPASS: return sprintf(buf, "%s\n", ssb_strings[ssb_mode]); @@ -843,6 +2349,28 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV)) return l1tf_show_state(buf); break; + + case X86_BUG_MDS: + return mds_show_state(buf); + + case X86_BUG_TAA: + return tsx_async_abort_show_state(buf); + + case X86_BUG_ITLB_MULTIHIT: + return itlb_multihit_show_state(buf); + + case X86_BUG_SRBDS: + return srbds_show_state(buf); + + case X86_BUG_MMIO_STALE_DATA: + return mmio_stale_data_show_state(buf); + + case X86_BUG_RETBLEED: + return retbleed_show_state(buf); + + case X86_BUG_GDS: + return gds_show_state(buf); + default: break; } @@ -874,4 +2402,39 @@ ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *b { return cpu_show_common(dev, attr, buf, X86_BUG_L1TF); } + +ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_common(dev, attr, buf, X86_BUG_MDS); +} + +ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_common(dev, attr, buf, X86_BUG_TAA); +} + +ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT); +} + +ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS); +} + +ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA); +} + +ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED); +} + +ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_common(dev, attr, buf, X86_BUG_GDS); +} #endif diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c index 0c5fcbd998cf11badefad906a2122400a3512d58..171557704038be615c2b60538a6d1a19c6ecd623 100644 --- a/arch/x86/kernel/cpu/cacheinfo.c +++ b/arch/x86/kernel/cpu/cacheinfo.c @@ -602,6 +602,10 @@ cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf) else amd_cpuid4(index, &eax, &ebx, &ecx); amd_init_l3_cache(this_leaf, index); + } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + cpuid_count(0x8000001d, index, &eax.full, + &ebx.full, &ecx.full, &edx); + amd_init_l3_cache(this_leaf, index); } else { cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); } @@ -625,7 +629,8 @@ static int find_num_cache_leaves(struct cpuinfo_x86 *c) union _cpuid4_leaf_eax cache_eax; int i = -1; - if (c->x86_vendor == X86_VENDOR_AMD) + if (c->x86_vendor == X86_VENDOR_AMD || + c->x86_vendor == X86_VENDOR_HYGON) op = 0x8000001d; else op = 4; @@ -639,7 +644,7 @@ static int find_num_cache_leaves(struct cpuinfo_x86 *c) return i; } -void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id) +void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu) { /* * We may have multiple LLCs if L3 caches exist, so check if we @@ -650,9 +655,8 @@ void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id) if (c->x86 < 0x17) { /* LLC is at the node level. */ - per_cpu(cpu_llc_id, cpu) = node_id; - } else if (c->x86 == 0x17 && - c->x86_model >= 0 && c->x86_model <= 0x1F) { + per_cpu(cpu_llc_id, cpu) = c->cpu_die_id; + } else if (c->x86 == 0x17 && c->x86_model <= 0x1F) { /* * LLC is at the core complex level. * Core complex ID is ApicId[3] for these processors. @@ -678,6 +682,42 @@ void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id) } } +void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu) +{ + /* + * We may have multiple LLCs if L3 caches exist, so check if we + * have an L3 cache by looking at the L3 cache CPUID leaf. + */ + if (!cpuid_edx(0x80000006)) + return; + + if (c->x86_model < 0x5 || + (c->x86_model >= 0x10 && c->x86_model <= 0x1f)) { + /* + * LLC is at the core complex level. + * Core complex ID is ApicId[3] for these processors. + */ + per_cpu(cpu_llc_id, cpu) = c->apicid >> 3; + } else { + /* + * LLC ID is calculated from the number of threads + * sharing the cache. + */ + u32 eax, ebx, ecx, edx, num_sharing_cache = 0; + u32 llc_index = find_num_cache_leaves(c) - 1; + + cpuid_count(0x8000001d, llc_index, &eax, &ebx, &ecx, &edx); + if (eax) + num_sharing_cache = ((eax >> 14) & 0xfff) + 1; + + if (num_sharing_cache) { + int bits = get_count_order(num_sharing_cache); + + per_cpu(cpu_llc_id, cpu) = c->apicid >> bits; + } + } +} + void init_amd_cacheinfo(struct cpuinfo_x86 *c) { @@ -691,6 +731,11 @@ void init_amd_cacheinfo(struct cpuinfo_x86 *c) } } +void init_hygon_cacheinfo(struct cpuinfo_x86 *c) +{ + num_cache_leaves = find_num_cache_leaves(c); +} + void init_intel_cacheinfo(struct cpuinfo_x86 *c) { /* Cache sizes */ @@ -913,7 +958,8 @@ static void __cache_cpumap_setup(unsigned int cpu, int index, int index_msb, i; struct cpuinfo_x86 *c = &cpu_data(cpu); - if (c->x86_vendor == X86_VENDOR_AMD) { + if (c->x86_vendor == X86_VENDOR_AMD || + c->x86_vendor == X86_VENDOR_HYGON) { if (__cache_amd_cpumap_setup(cpu, index, base)) return; } diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 14433ff5b8285f124805c8e47dec0d9b3cc19f0b..608b8dfa119f187636b1fd0bda42edcf0d639839 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -72,7 +72,8 @@ static void init_c3(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_REP_GOOD); } - cpu_detect_cache_sizes(c); + if (c->x86 >= 7) + set_cpu_cap(c, X86_FEATURE_REP_GOOD); } enum { @@ -98,18 +99,15 @@ enum { static void early_init_centaur(struct cpuinfo_x86 *c) { - switch (c->x86) { #ifdef CONFIG_X86_32 - case 5: - /* Emulate MTRRs using Centaur's MCR. */ + /* Emulate MTRRs using Centaur's MCR. */ + if (c->x86 == 5) set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR); - break; #endif - case 6: - if (c->x86_model >= 0xf) - set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); - break; - } + if ((c->x86 == 6 && c->x86_model >= 0xf) || + (c->x86 >= 7)) + set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); + #ifdef CONFIG_X86_64 set_cpu_cap(c, X86_FEATURE_SYSENTER32); #endif @@ -117,6 +115,21 @@ static void early_init_centaur(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); } + + if (c->cpuid_level >= 0x00000001) { + u32 eax, ebx, ecx, edx; + + cpuid(0x00000001, &eax, &ebx, &ecx, &edx); + /* + * If HTT (EDX[28]) is set EBX[16:23] contain the number of + * apicids which are reserved per package. Store the resulting + * shift value for the package management code. + */ + if (edx & (1U << 28)) + c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff); + } + if (detect_extended_topology_early(c) < 0) + detect_ht_early(c); } static void centaur_detect_vmx_virtcap(struct cpuinfo_x86 *c) @@ -160,11 +173,14 @@ static void init_centaur(struct cpuinfo_x86 *c) clear_cpu_cap(c, 0*32+31); #endif early_init_centaur(c); + detect_extended_topology(c); init_intel_cacheinfo(c); - detect_num_cpu_cores(c); + if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { + detect_num_cpu_cores(c); #ifdef CONFIG_X86_32 detect_ht(c); #endif + } if (c->cpuid_level > 9) { unsigned int eax = cpuid_eax(10); @@ -178,9 +194,8 @@ static void init_centaur(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); } - switch (c->x86) { #ifdef CONFIG_X86_32 - case 5: + if (c->x86 == 5) { switch (c->x86_model) { case 4: name = "C6"; @@ -240,12 +255,10 @@ static void init_centaur(struct cpuinfo_x86 *c) c->x86_cache_size = (cc>>24)+(dd>>24); } sprintf(c->x86_model_id, "WinChip %s", name); - break; + } #endif - case 6: + if (c->x86 == 6 || c->x86 >= 7) init_c3(c); - break; - } #ifdef CONFIG_X86_64 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); #endif diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 44c4ef3d989b59b7bd98ebf617f8e5ee1d2a9548..453ddaf45108f9e81724bcd812695149e914a2c9 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -353,7 +353,7 @@ static __always_inline void setup_umip(struct cpuinfo_x86 *c) cr4_set_bits(X86_CR4_UMIP); - pr_info("x86/cpu: Activated the Intel User Mode Instruction Prevention (UMIP) CPU feature\n"); + pr_info_once("x86/cpu: User Mode Instruction Prevention (UMIP) activated\n"); return; @@ -365,6 +365,77 @@ static __always_inline void setup_umip(struct cpuinfo_x86 *c) cr4_clear_bits(X86_CR4_UMIP); } +static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning); +static unsigned long cr4_pinned_bits __ro_after_init; + +void native_write_cr0(unsigned long val) +{ + unsigned long bits_missing = 0; + +set_register: + asm volatile("mov %0,%%cr0": "+r" (val), "+m" (__force_order)); + + if (static_branch_likely(&cr_pinning)) { + if (unlikely((val & X86_CR0_WP) != X86_CR0_WP)) { + bits_missing = X86_CR0_WP; + val |= bits_missing; + goto set_register; + } + /* Warn after we've set the missing bits. */ + WARN_ONCE(bits_missing, "CR0 WP bit went missing!?\n"); + } +} +EXPORT_SYMBOL(native_write_cr0); + +void native_write_cr4(unsigned long val) +{ + unsigned long bits_missing = 0; + +set_register: + asm volatile("mov %0,%%cr4": "+r" (val), "+m" (cr4_pinned_bits)); + + if (static_branch_likely(&cr_pinning)) { + if (unlikely((val & cr4_pinned_bits) != cr4_pinned_bits)) { + bits_missing = ~val & cr4_pinned_bits; + val |= bits_missing; + goto set_register; + } + /* Warn after we've set the missing bits. */ + WARN_ONCE(bits_missing, "CR4 bits went missing: %lx!?\n", + bits_missing); + } +} +EXPORT_SYMBOL(native_write_cr4); + +void cr4_init(void) +{ + unsigned long cr4 = __read_cr4(); + + if (boot_cpu_has(X86_FEATURE_PCID)) + cr4 |= X86_CR4_PCIDE; + if (static_branch_likely(&cr_pinning)) + cr4 |= cr4_pinned_bits; + + __write_cr4(cr4); + + /* Initialize cr4 shadow for this CPU. */ + this_cpu_write(cpu_tlbstate.cr4, cr4); +} + +/* + * Once CPU feature detection is finished (and boot params have been + * parsed), record any of the sensitive CR bits that are set, and + * enable CR pinning. + */ +static void __init setup_cr_pinning(void) +{ + unsigned long mask; + + mask = (X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP); + cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & mask; + static_key_enable(&cr_pinning.key); +} + /* * Protection Keys are not available in 32-bit mode. */ @@ -808,6 +879,30 @@ static void init_speculation_control(struct cpuinfo_x86 *c) } } +static void init_cqm(struct cpuinfo_x86 *c) +{ + if (!cpu_has(c, X86_FEATURE_CQM_LLC)) { + c->x86_cache_max_rmid = -1; + c->x86_cache_occ_scale = -1; + return; + } + + /* will be overridden if occupancy monitoring exists */ + c->x86_cache_max_rmid = cpuid_ebx(0xf); + + if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC) || + cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL) || + cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)) { + u32 eax, ebx, ecx, edx; + + /* QoS sub-leaf, EAX=0Fh, ECX=1 */ + cpuid_count(0xf, 1, &eax, &ebx, &ecx, &edx); + + c->x86_cache_max_rmid = ecx; + c->x86_cache_occ_scale = ebx; + } +} + void get_cpu_cap(struct cpuinfo_x86 *c) { u32 eax, ebx, ecx, edx; @@ -830,6 +925,12 @@ void get_cpu_cap(struct cpuinfo_x86 *c) c->x86_capability[CPUID_7_0_EBX] = ebx; c->x86_capability[CPUID_7_ECX] = ecx; c->x86_capability[CPUID_7_EDX] = edx; + + /* Check valid sub-leaf index before accessing it */ + if (eax >= 1) { + cpuid_count(0x00000007, 1, &eax, &ebx, &ecx, &edx); + c->x86_capability[CPUID_7_1_EAX] = eax; + } } /* Extended state features: level 0x0000000d */ @@ -839,33 +940,6 @@ void get_cpu_cap(struct cpuinfo_x86 *c) c->x86_capability[CPUID_D_1_EAX] = eax; } - /* Additional Intel-defined flags: level 0x0000000F */ - if (c->cpuid_level >= 0x0000000F) { - - /* QoS sub-leaf, EAX=0Fh, ECX=0 */ - cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx); - c->x86_capability[CPUID_F_0_EDX] = edx; - - if (cpu_has(c, X86_FEATURE_CQM_LLC)) { - /* will be overridden if occupancy monitoring exists */ - c->x86_cache_max_rmid = ebx; - - /* QoS sub-leaf, EAX=0Fh, ECX=1 */ - cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx); - c->x86_capability[CPUID_F_1_EDX] = edx; - - if ((cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) || - ((cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL)) || - (cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)))) { - c->x86_cache_max_rmid = ecx; - c->x86_cache_occ_scale = ebx; - } - } else { - c->x86_cache_max_rmid = -1; - c->x86_cache_occ_scale = -1; - } - } - /* AMD-defined flags: level 0x80000001 */ eax = cpuid_eax(0x80000000); c->extended_cpuid_level = eax; @@ -896,6 +970,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c) init_scattered_cpuid_features(c); init_speculation_control(c); + init_cqm(c); /* * Clear/Set all flags overridden by options, after probe. @@ -948,77 +1023,269 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) #endif } -static const __initconst struct x86_cpu_id cpu_no_speculation[] = { - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW, X86_FEATURE_ANY }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW, X86_FEATURE_ANY }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT, X86_FEATURE_ANY }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL, X86_FEATURE_ANY }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW, X86_FEATURE_ANY }, - { X86_VENDOR_CENTAUR, 5 }, - { X86_VENDOR_INTEL, 5 }, - { X86_VENDOR_NSC, 5 }, - { X86_VENDOR_ANY, 4 }, - {} -}; +#define NO_SPECULATION BIT(0) +#define NO_MELTDOWN BIT(1) +#define NO_SSB BIT(2) +#define NO_L1TF BIT(3) +#define NO_MDS BIT(4) +#define MSBDS_ONLY BIT(5) +#define NO_SWAPGS BIT(6) +#define NO_ITLB_MULTIHIT BIT(7) +#define NO_SPECTRE_V2 BIT(8) +#define NO_EIBRS_PBRSB BIT(9) -static const __initconst struct x86_cpu_id cpu_no_meltdown[] = { - { X86_VENDOR_AMD }, - {} -}; +#define VULNWL(_vendor, _family, _model, _whitelist) \ + { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist } -/* Only list CPUs which speculate but are non susceptible to SSB */ -static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = { - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM }, - { X86_VENDOR_AMD, 0x12, }, - { X86_VENDOR_AMD, 0x11, }, - { X86_VENDOR_AMD, 0x10, }, - { X86_VENDOR_AMD, 0xf, }, +#define VULNWL_INTEL(model, whitelist) \ + VULNWL(INTEL, 6, INTEL_FAM6_##model, whitelist) + +#define VULNWL_AMD(family, whitelist) \ + VULNWL(AMD, family, X86_MODEL_ANY, whitelist) + +#define VULNWL_HYGON(family, whitelist) \ + VULNWL(HYGON, family, X86_MODEL_ANY, whitelist) + +static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { + VULNWL(ANY, 4, X86_MODEL_ANY, NO_SPECULATION), + VULNWL(CENTAUR, 5, X86_MODEL_ANY, NO_SPECULATION), + VULNWL(INTEL, 5, X86_MODEL_ANY, NO_SPECULATION), + VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION), + + /* Intel Family 6 */ + VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT), + VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT), + VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT), + VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION | NO_ITLB_MULTIHIT), + VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT), + + VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), + VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), + VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), + VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), + VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), + VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), + + VULNWL_INTEL(CORE_YONAH, NO_SSB), + + VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), + + VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), + VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), + VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB), + + /* + * Technically, swapgs isn't serializing on AMD (despite it previously + * being documented as such in the APM). But according to AMD, %gs is + * updated non-speculatively, and the issuing of %gs-relative memory + * operands will be blocked until the %gs update completes, which is + * good enough for our purposes. + */ + + VULNWL_INTEL(ATOM_TREMONT, NO_EIBRS_PBRSB), + VULNWL_INTEL(ATOM_TREMONT_L, NO_EIBRS_PBRSB), + VULNWL_INTEL(ATOM_TREMONT_X, NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB), + + /* AMD Family 0xf - 0x12 */ + VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), + VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), + VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), + VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), + + /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */ + VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), + VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), + + /* Zhaoxin Family 7 */ + VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS), + VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS), {} }; -static const __initconst struct x86_cpu_id cpu_no_l1tf[] = { - /* in addition to cpu_no_speculation */ - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MOOREFIELD }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GEMINI_LAKE }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM }, +#define VULNBL(vendor, family, model, blacklist) \ + X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, blacklist) + +#define VULNBL_INTEL_STEPPINGS(model, steppings, issues) \ + X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, \ + INTEL_FAM6_##model, steppings, \ + X86_FEATURE_ANY, issues) + +#define VULNBL_AMD(family, blacklist) \ + VULNBL(AMD, family, X86_MODEL_ANY, blacklist) + +#define VULNBL_HYGON(family, blacklist) \ + VULNBL(HYGON, family, X86_MODEL_ANY, blacklist) + +#define SRBDS BIT(0) +/* CPU is affected by X86_BUG_MMIO_STALE_DATA */ +#define MMIO BIT(1) +/* CPU is affected by Shared Buffers Data Sampling (SBDS), a variant of X86_BUG_MMIO_STALE_DATA */ +#define MMIO_SBDS BIT(2) +/* CPU is affected by RETbleed, speculating where you would not expect it */ +#define RETBLEED BIT(3) +/* CPU is affected by GDS */ +#define GDS BIT(4) +/* CPU is affected by SMT (cross-thread) return predictions */ +#define SMT_RSB BIT(5) + +static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { + VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(HASWELL_CORE, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(HASWELL_ULT, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(HASWELL_GT3E, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(HASWELL_X, X86_STEPPING_ANY, MMIO), + VULNBL_INTEL_STEPPINGS(BROADWELL_XEON_D,X86_STEPPING_ANY, MMIO), + VULNBL_INTEL_STEPPINGS(BROADWELL_GT3E, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO), + VULNBL_INTEL_STEPPINGS(BROADWELL_CORE, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS), + VULNBL_INTEL_STEPPINGS(SKYLAKE_MOBILE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), + VULNBL_INTEL_STEPPINGS(SKYLAKE_DESKTOP, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), + VULNBL_INTEL_STEPPINGS(KABYLAKE_MOBILE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), + VULNBL_INTEL_STEPPINGS(KABYLAKE_DESKTOP,X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), + VULNBL_INTEL_STEPPINGS(CANNONLAKE_MOBILE,X86_STEPPING_ANY, RETBLEED), + VULNBL_INTEL_STEPPINGS(ICELAKE_MOBILE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), + VULNBL_INTEL_STEPPINGS(ICELAKE_XEON_D, X86_STEPPING_ANY, MMIO | GDS), + VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS), + VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), + VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), + VULNBL_INTEL_STEPPINGS(TIGERLAKE_L, X86_STEPPING_ANY, GDS), + VULNBL_INTEL_STEPPINGS(TIGERLAKE, X86_STEPPING_ANY, GDS), + VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED), + VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS), + VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPING_ANY, MMIO | MMIO_SBDS), + VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_X, X86_STEPPING_ANY, MMIO), + VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS), + + VULNBL_AMD(0x15, RETBLEED), + VULNBL_AMD(0x16, RETBLEED), + VULNBL_AMD(0x17, RETBLEED | SMT_RSB), + VULNBL_HYGON(0x18, SMT_RSB), {} }; -static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) +static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long which) +{ + const struct x86_cpu_id *m = x86_match_cpu(table); + + return m && !!(m->driver_data & which); +} + +u64 x86_read_arch_cap_msr(void) { u64 ia32_cap = 0; - if (x86_match_cpu(cpu_no_speculation)) + if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) + rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); + + return ia32_cap; +} + +static bool arch_cap_mmio_immune(u64 ia32_cap) +{ + return (ia32_cap & ARCH_CAP_FBSDP_NO && + ia32_cap & ARCH_CAP_PSDP_NO && + ia32_cap & ARCH_CAP_SBDR_SSDP_NO); +} + +static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) +{ + u64 ia32_cap = x86_read_arch_cap_msr(); + + /* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */ + if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) && + !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO)) + setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT); + + if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION)) return; setup_force_cpu_bug(X86_BUG_SPECTRE_V1); - setup_force_cpu_bug(X86_BUG_SPECTRE_V2); - if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES)) - rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); + if (!cpu_matches(cpu_vuln_whitelist, NO_SPECTRE_V2)) + setup_force_cpu_bug(X86_BUG_SPECTRE_V2); - if (!x86_match_cpu(cpu_no_spec_store_bypass) && - !(ia32_cap & ARCH_CAP_SSB_NO) && + if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) && + !(ia32_cap & ARCH_CAP_SSB_NO) && !cpu_has(c, X86_FEATURE_AMD_SSB_NO)) setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); if (ia32_cap & ARCH_CAP_IBRS_ALL) setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED); - if (x86_match_cpu(cpu_no_meltdown)) + if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) && + !(ia32_cap & ARCH_CAP_MDS_NO)) { + setup_force_cpu_bug(X86_BUG_MDS); + if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY)) + setup_force_cpu_bug(X86_BUG_MSBDS_ONLY); + } + + if (!cpu_matches(cpu_vuln_whitelist, NO_SWAPGS)) + setup_force_cpu_bug(X86_BUG_SWAPGS); + + /* + * When the CPU is not mitigated for TAA (TAA_NO=0) set TAA bug when: + * - TSX is supported or + * - TSX_CTRL is present + * + * TSX_CTRL check is needed for cases when TSX could be disabled before + * the kernel boot e.g. kexec. + * TSX_CTRL check alone is not sufficient for cases when the microcode + * update is not present or running as guest that don't get TSX_CTRL. + */ + if (!(ia32_cap & ARCH_CAP_TAA_NO) && + (cpu_has(c, X86_FEATURE_RTM) || + (ia32_cap & ARCH_CAP_TSX_CTRL_MSR))) + setup_force_cpu_bug(X86_BUG_TAA); + + /* + * SRBDS affects CPUs which support RDRAND or RDSEED and are listed + * in the vulnerability blacklist. + * + * Some of the implications and mitigation of Shared Buffers Data + * Sampling (SBDS) are similar to SRBDS. Give SBDS same treatment as + * SRBDS. + */ + if ((cpu_has(c, X86_FEATURE_RDRAND) || + cpu_has(c, X86_FEATURE_RDSEED)) && + cpu_matches(cpu_vuln_blacklist, SRBDS | MMIO_SBDS)) + setup_force_cpu_bug(X86_BUG_SRBDS); + + /* + * Processor MMIO Stale Data bug enumeration + * + * Affected CPU list is generally enough to enumerate the vulnerability, + * but for virtualization case check for ARCH_CAP MSR bits also, VMM may + * not want the guest to enumerate the bug. + */ + if (cpu_matches(cpu_vuln_blacklist, MMIO) && + !arch_cap_mmio_immune(ia32_cap)) + setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA); + + if (!cpu_has(c, X86_FEATURE_BTC_NO)) { + if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA)) + setup_force_cpu_bug(X86_BUG_RETBLEED); + } + + if (cpu_has(c, X86_FEATURE_IBRS_ENHANCED) && + !cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) && + !(ia32_cap & ARCH_CAP_PBRSB_NO)) + setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB); + + /* + * Check if CPU is vulnerable to GDS. If running in a virtual machine on + * an affected processor, the VMM may have disabled the use of GATHER by + * disabling AVX2. The only way to do this in HW is to clear XCR0[2], + * which means that AVX will be disabled. + */ + if (cpu_matches(cpu_vuln_blacklist, GDS) && !(ia32_cap & ARCH_CAP_GDS_NO) && + boot_cpu_has(X86_FEATURE_AVX)) + setup_force_cpu_bug(X86_BUG_GDS); + + if (cpu_matches(cpu_vuln_blacklist, SMT_RSB)) + setup_force_cpu_bug(X86_BUG_SMT_RSB); + + if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) return; /* Rogue Data Cache Load? No! */ @@ -1027,7 +1294,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); - if (x86_match_cpu(cpu_no_l1tf)) + if (cpu_matches(cpu_vuln_whitelist, NO_L1TF)) return; setup_force_cpu_bug(X86_BUG_L1TF); @@ -1076,6 +1343,9 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) memset(&c->x86_capability, 0, sizeof c->x86_capability); c->extended_cpuid_level = 0; + if (!have_cpuid_p()) + identify_cpu_without_cpuid(c); + /* cyrix could have cpuid enabled via c_identify()*/ if (have_cpuid_p()) { cpu_detect(c); @@ -1093,7 +1363,6 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) if (this_cpu->c_bsp_init) this_cpu->c_bsp_init(c); } else { - identify_cpu_without_cpuid(c); setup_clear_cpu_cap(X86_FEATURE_CPUID); } @@ -1162,9 +1431,8 @@ void __init early_cpu_init(void) early_identify_cpu(&boot_cpu_data); } -static void detect_null_seg_behavior(struct cpuinfo_x86 *c) +static bool detect_null_seg_behavior(void) { -#ifdef CONFIG_X86_64 /* * Empirically, writing zero to a segment selector on AMD does * not clear the base, whereas writing zero to a segment @@ -1185,10 +1453,43 @@ static void detect_null_seg_behavior(struct cpuinfo_x86 *c) wrmsrl(MSR_FS_BASE, 1); loadsegment(fs, 0); rdmsrl(MSR_FS_BASE, tmp); - if (tmp != 0) - set_cpu_bug(c, X86_BUG_NULL_SEG); wrmsrl(MSR_FS_BASE, old_base); -#endif + return tmp == 0; +} + +void check_null_seg_clears_base(struct cpuinfo_x86 *c) +{ + /* BUG_NULL_SEG is only relevant with 64bit userspace */ + if (!IS_ENABLED(CONFIG_X86_64)) + return; + + /* Zen3 CPUs advertise Null Selector Clears Base in CPUID. */ + if (c->extended_cpuid_level >= 0x80000021 && + cpuid_eax(0x80000021) & BIT(6)) + return; + + /* + * CPUID bit above wasn't set. If this kernel is still running + * as a HV guest, then the HV has decided not to advertize + * that CPUID bit for whatever reason. For example, one + * member of the migration pool might be vulnerable. Which + * means, the bug is present: set the BUG flag and return. + */ + if (cpu_has(c, X86_FEATURE_HYPERVISOR)) { + set_cpu_bug(c, X86_BUG_NULL_SEG); + return; + } + + /* + * Zen2 CPUs also have this behaviour, but no CPUID bit. + * 0x18 is the respective family for Hygon. + */ + if ((c->x86 == 0x17 || c->x86 == 0x18) && + detect_null_seg_behavior()) + return; + + /* All the remaining ones are affected */ + set_cpu_bug(c, X86_BUG_NULL_SEG); } static void generic_identify(struct cpuinfo_x86 *c) @@ -1224,8 +1525,6 @@ static void generic_identify(struct cpuinfo_x86 *c) get_model_name(c); /* Default name */ - detect_null_seg_behavior(c); - /* * ESPFIX is a strange bug. All real CPUs have it. Paravirt * systems that run Linux at CPL > 0 may or may not have the @@ -1282,6 +1581,7 @@ static void validate_apic_and_package_id(struct cpuinfo_x86 *c) cpu, apicid, c->initial_apicid); } BUG_ON(topology_update_package_map(c->phys_proc_id, cpu)); + BUG_ON(topology_update_die_map(c->cpu_die_id, cpu)); #else c->logical_proc_id = 0; #endif @@ -1447,6 +1747,8 @@ void __init identify_boot_cpu(void) enable_sep_cpu(); #endif cpu_detect_tlb(&boot_cpu_data); + tsx_init(); + setup_cr_pinning(); } void identify_secondary_cpu(struct cpuinfo_x86 *c) @@ -1459,6 +1761,9 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c) mtrr_ap_init(); validate_apic_and_package_id(c); x86_spec_ctrl_setup_ap(); + update_srbds_msr(); + if (boot_cpu_has_bug(X86_BUG_GDS)) + update_gds_msr(); } static __init int setup_noclflush(char *arg) @@ -1689,12 +1994,6 @@ void cpu_init(void) wait_for_master_cpu(cpu); - /* - * Initialize the CR4 shadow before doing anything that could - * try to read it. - */ - cr4_init_shadow(); - if (cpu) load_ucode_ap(); @@ -1794,12 +2093,6 @@ void cpu_init(void) wait_for_master_cpu(cpu); - /* - * Initialize the CR4 shadow before doing anything that could - * try to read it. - */ - cr4_init_shadow(); - show_ucode_info_early(); pr_info("Initializing CPU#%d\n", cpu); @@ -1875,6 +2168,8 @@ void microcode_check(void) perf_check_microcode(); + amd_check_microcode(); + /* Reload CPUID max function as it might've changed. */ info.cpuid_level = cpuid_eax(0); diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index 7b229afa0a37a6a5bd6c910eaaa2e515fb562e82..937e1d5a4ebcf817aed3769f3eb5eb4afd2807bc 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h @@ -45,6 +45,22 @@ struct _tlb_table { extern const struct cpu_dev *const __x86_cpu_dev_start[], *const __x86_cpu_dev_end[]; +#ifdef CONFIG_CPU_SUP_INTEL +enum tsx_ctrl_states { + TSX_CTRL_ENABLE, + TSX_CTRL_DISABLE, + TSX_CTRL_NOT_SUPPORTED, +}; + +extern __ro_after_init enum tsx_ctrl_states tsx_ctrl_state; + +extern void __init tsx_init(void); +extern void tsx_enable(void); +extern void tsx_disable(void); +#else +static inline void tsx_init(void) { } +#endif /* CONFIG_CPU_SUP_INTEL */ + extern void get_cpu_cap(struct cpuinfo_x86 *c); extern void get_cpu_address_sizes(struct cpuinfo_x86 *c); extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); @@ -54,15 +70,21 @@ extern u32 get_scattered_cpuid_leaf(unsigned int level, enum cpuid_regs_idx reg); extern void init_intel_cacheinfo(struct cpuinfo_x86 *c); extern void init_amd_cacheinfo(struct cpuinfo_x86 *c); +extern void init_hygon_cacheinfo(struct cpuinfo_x86 *c); extern void detect_num_cpu_cores(struct cpuinfo_x86 *c); extern int detect_extended_topology_early(struct cpuinfo_x86 *c); extern int detect_extended_topology(struct cpuinfo_x86 *c); extern int detect_ht_early(struct cpuinfo_x86 *c); extern void detect_ht(struct cpuinfo_x86 *c); +extern void check_null_seg_clears_base(struct cpuinfo_x86 *c); unsigned int aperfmperf_get_khz(int cpu); extern void x86_spec_ctrl_setup_ap(void); +extern void update_srbds_msr(void); +extern void update_gds_msr(void); + +extern u64 x86_read_arch_cap_msr(void); #endif /* ARCH_X86_CPU_H */ diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c index 2c0bd38a44ab125c6a6f9627eea0cc69c3d882f5..a86dfa5badcddc20da5e8ae2cb16ec7453671e0b 100644 --- a/arch/x86/kernel/cpu/cpuid-deps.c +++ b/arch/x86/kernel/cpu/cpuid-deps.c @@ -39,7 +39,10 @@ static const struct cpuid_dep cpuid_deps[] = { { X86_FEATURE_F16C, X86_FEATURE_XMM2, }, { X86_FEATURE_AES, X86_FEATURE_XMM2 }, { X86_FEATURE_SHA_NI, X86_FEATURE_XMM2 }, + { X86_FEATURE_GFNI, X86_FEATURE_XMM2 }, { X86_FEATURE_FMA, X86_FEATURE_AVX }, + { X86_FEATURE_VAES, X86_FEATURE_AVX }, + { X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX }, { X86_FEATURE_AVX2, X86_FEATURE_AVX, }, { X86_FEATURE_AVX512F, X86_FEATURE_AVX, }, { X86_FEATURE_AVX512IFMA, X86_FEATURE_AVX512F }, @@ -51,14 +54,15 @@ static const struct cpuid_dep cpuid_deps[] = { { X86_FEATURE_AVX512VL, X86_FEATURE_AVX512F }, { X86_FEATURE_AVX512VBMI, X86_FEATURE_AVX512F }, { X86_FEATURE_AVX512_VBMI2, X86_FEATURE_AVX512VL }, - { X86_FEATURE_GFNI, X86_FEATURE_AVX512VL }, - { X86_FEATURE_VAES, X86_FEATURE_AVX512VL }, - { X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX512VL }, { X86_FEATURE_AVX512_VNNI, X86_FEATURE_AVX512VL }, { X86_FEATURE_AVX512_BITALG, X86_FEATURE_AVX512VL }, { X86_FEATURE_AVX512_4VNNIW, X86_FEATURE_AVX512F }, { X86_FEATURE_AVX512_4FMAPS, X86_FEATURE_AVX512F }, { X86_FEATURE_AVX512_VPOPCNTDQ, X86_FEATURE_AVX512F }, + { X86_FEATURE_CQM_OCCUP_LLC, X86_FEATURE_CQM_LLC }, + { X86_FEATURE_CQM_MBM_TOTAL, X86_FEATURE_CQM_LLC }, + { X86_FEATURE_CQM_MBM_LOCAL, X86_FEATURE_CQM_LLC }, + { X86_FEATURE_AVX512_BF16, X86_FEATURE_AVX512VL }, {} }; diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index 8949b7ae6d92536c1bbff659d7463588d2bdfb06..1d9b8aaea06c8c9c7d14b0c30d51ded3bac83d7d 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c @@ -124,7 +124,7 @@ static void set_cx86_reorder(void) setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ /* Load/Store Serialize to mem access disable (=reorder it) */ - setCx86_old(CX86_PCR0, getCx86_old(CX86_PCR0) & ~0x80); + setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80); /* set load/store serialize from 1GB to 4GB */ ccr3 |= 0xe0; setCx86(CX86_CCR3, ccr3); @@ -135,11 +135,11 @@ static void set_cx86_memwb(void) pr_info("Enable Memory-Write-back mode on Cyrix/NSC processor.\n"); /* CCR2 bit 2: unlock NW bit */ - setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) & ~0x04); + setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04); /* set 'Not Write-through' */ write_cr0(read_cr0() | X86_CR0_NW); /* CCR2 bit 2: lock NW bit and set WT1 */ - setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x14); + setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14); } /* @@ -153,14 +153,14 @@ static void geode_configure(void) local_irq_save(flags); /* Suspend on halt power saving and enable #SUSP pin */ - setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x88); + setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88); ccr3 = getCx86(CX86_CCR3); setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ /* FPU fast, DTE cache, Mem bypass */ - setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x38); + setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38); setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ set_cx86_memwb(); @@ -296,7 +296,7 @@ static void init_cyrix(struct cpuinfo_x86 *c) /* GXm supports extended cpuid levels 'ala' AMD */ if (c->cpuid_level == 2) { /* Enable cxMMX extensions (GX1 Datasheet 54) */ - setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7) | 1); + setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1); /* * GXm : 0x30 ... 0x5f GXm datasheet 51 @@ -319,7 +319,7 @@ static void init_cyrix(struct cpuinfo_x86 *c) if (dir1 > 7) { dir0_msn++; /* M II */ /* Enable MMX extensions (App note 108) */ - setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7)|1); + setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1); } else { /* A 6x86MX - it has the bug. */ set_cpu_bug(c, X86_BUG_COMA); @@ -437,7 +437,7 @@ static void cyrix_identify(struct cpuinfo_x86 *c) /* enable MAPEN */ setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable cpuid */ - setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x80); + setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x80); /* disable MAPEN */ setCx86(CX86_CCR3, ccr3); local_irq_restore(flags); diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c new file mode 100644 index 0000000000000000000000000000000000000000..2ae7d08aa418048115ca603d7f90371a9f02a49b --- /dev/null +++ b/arch/x86/kernel/cpu/hygon.c @@ -0,0 +1,421 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Hygon Processor Support for Linux + * + * Copyright (C) 2018 Chengdu Haiguang IC Design Co., Ltd. + * + * Author: Pu Wen + */ +#include + +#include +#include +#include +#include +#include +#ifdef CONFIG_X86_64 +# include +#endif + +#include "cpu.h" + +#define APICID_SOCKET_ID_BIT 6 + +/* + * nodes_per_socket: Stores the number of nodes per socket. + * Refer to CPUID Fn8000_001E_ECX Node Identifiers[10:8] + */ +static u32 nodes_per_socket = 1; + +#ifdef CONFIG_NUMA +/* + * To workaround broken NUMA config. Read the comment in + * srat_detect_node(). + */ +static int nearby_node(int apicid) +{ + int i, node; + + for (i = apicid - 1; i >= 0; i--) { + node = __apicid_to_node[i]; + if (node != NUMA_NO_NODE && node_online(node)) + return node; + } + for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { + node = __apicid_to_node[i]; + if (node != NUMA_NO_NODE && node_online(node)) + return node; + } + return first_node(node_online_map); /* Shouldn't happen */ +} +#endif + +static void hygon_get_topology_early(struct cpuinfo_x86 *c) +{ + if (cpu_has(c, X86_FEATURE_TOPOEXT)) + smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1; +} + +/* + * Fixup core topology information for + * (1) Hygon multi-node processors + * Assumption: Number of cores in each internal node is the same. + * (2) Hygon processors supporting compute units + */ +static void hygon_get_topology(struct cpuinfo_x86 *c) +{ + int cpu = smp_processor_id(); + + /* get information required for multi-node processors */ + if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { + int err; + u32 eax, ebx, ecx, edx; + + cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); + + c->cpu_die_id = ecx & 0xff; + + c->cpu_core_id = ebx & 0xff; + + if (smp_num_siblings > 1) + c->x86_max_cores /= smp_num_siblings; + + if (c->x86_model < 0x4 && + !boot_cpu_has(X86_FEATURE_HYPERVISOR)) { + /* + * Socket ID is ApicId[6] for the processors with + * model < 0x4 when running on host. + */ + c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT; + } else { + /* + * In case leaf 0xB is available, use it to derive + * topology information. + */ + err = detect_extended_topology(c); + if (!err) { + c->x86_coreid_bits = + get_count_order(c->x86_max_cores); + __max_die_per_package = nodes_per_socket; + } + } + + cacheinfo_hygon_init_llc_id(c, cpu); + } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { + u64 value; + + rdmsrl(MSR_FAM10H_NODE_ID, value); + c->cpu_die_id = value & 7; + + per_cpu(cpu_llc_id, cpu) = c->cpu_die_id; + } else + return; + + if (nodes_per_socket > 1) + set_cpu_cap(c, X86_FEATURE_AMD_DCM); +} + +/* + * On Hygon setup the lower bits of the APIC id distinguish the cores. + * Assumes number of cores is a power of two. + */ +static void hygon_detect_cmp(struct cpuinfo_x86 *c) +{ + unsigned int bits; + int cpu = smp_processor_id(); + + bits = c->x86_coreid_bits; + /* Low order bits define the core id (index of core in socket) */ + c->cpu_core_id = c->initial_apicid & ((1 << bits)-1); + /* Convert the initial APIC ID into the socket ID */ + c->phys_proc_id = c->initial_apicid >> bits; + /* use socket ID also for last level cache */ + per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id; +} + +static void srat_detect_node(struct cpuinfo_x86 *c) +{ +#ifdef CONFIG_NUMA + int cpu = smp_processor_id(); + int node; + unsigned int apicid = c->apicid; + + node = numa_cpu_node(cpu); + if (node == NUMA_NO_NODE) + node = per_cpu(cpu_llc_id, cpu); + + /* + * On multi-fabric platform (e.g. Numascale NumaChip) a + * platform-specific handler needs to be called to fixup some + * IDs of the CPU. + */ + if (x86_cpuinit.fixup_cpu_id) + x86_cpuinit.fixup_cpu_id(c, node); + + if (!node_online(node)) { + /* + * Two possibilities here: + * + * - The CPU is missing memory and no node was created. In + * that case try picking one from a nearby CPU. + * + * - The APIC IDs differ from the HyperTransport node IDs. + * Assume they are all increased by a constant offset, but + * in the same order as the HT nodeids. If that doesn't + * result in a usable node fall back to the path for the + * previous case. + * + * This workaround operates directly on the mapping between + * APIC ID and NUMA node, assuming certain relationship + * between APIC ID, HT node ID and NUMA topology. As going + * through CPU mapping may alter the outcome, directly + * access __apicid_to_node[]. + */ + int ht_nodeid = c->initial_apicid; + + if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE) + node = __apicid_to_node[ht_nodeid]; + /* Pick a nearby node */ + if (!node_online(node)) + node = nearby_node(apicid); + } + numa_set_node(cpu, node); +#endif +} + +static void early_init_hygon_mc(struct cpuinfo_x86 *c) +{ +#ifdef CONFIG_SMP + unsigned int bits, ecx; + + /* Multi core CPU? */ + if (c->extended_cpuid_level < 0x80000008) + return; + + ecx = cpuid_ecx(0x80000008); + + c->x86_max_cores = (ecx & 0xff) + 1; + + /* CPU telling us the core id bits shift? */ + bits = (ecx >> 12) & 0xF; + + /* Otherwise recompute */ + if (bits == 0) { + while ((1 << bits) < c->x86_max_cores) + bits++; + } + + c->x86_coreid_bits = bits; +#endif +} + +static void bsp_init_hygon(struct cpuinfo_x86 *c) +{ +#ifdef CONFIG_X86_64 + unsigned long long tseg; + + /* + * Split up direct mapping around the TSEG SMM area. + * Don't do it for gbpages because there seems very little + * benefit in doing so. + */ + if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { + unsigned long pfn = tseg >> PAGE_SHIFT; + + pr_debug("tseg: %010llx\n", tseg); + if (pfn_range_is_mapped(pfn, pfn + 1)) + set_memory_4k((unsigned long)__va(tseg), 1); + } +#endif + + if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { + u64 val; + + rdmsrl(MSR_K7_HWCR, val); + if (!(val & BIT(24))) + pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n"); + } + + if (cpu_has(c, X86_FEATURE_MWAITX)) + use_mwaitx_delay(); + + if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { + u32 ecx; + + ecx = cpuid_ecx(0x8000001e); + nodes_per_socket = ((ecx >> 8) & 7) + 1; + } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) { + u64 value; + + rdmsrl(MSR_FAM10H_NODE_ID, value); + nodes_per_socket = ((value >> 3) & 7) + 1; + } + + if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) && + !boot_cpu_has(X86_FEATURE_VIRT_SSBD)) { + /* + * Try to cache the base value so further operations can + * avoid RMW. If that faults, do not enable SSBD. + */ + if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) { + setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD); + setup_force_cpu_cap(X86_FEATURE_SSBD); + x86_amd_ls_cfg_ssbd_mask = 1ULL << 10; + } + } +} + +static void early_init_hygon(struct cpuinfo_x86 *c) +{ + u32 dummy; + + early_init_hygon_mc(c); + + set_cpu_cap(c, X86_FEATURE_K8); + + rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); + + /* + * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate + * with P/T states and does not stop in deep C-states + */ + if (c->x86_power & (1 << 8)) { + set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); + set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); + } + + /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */ + if (c->x86_power & BIT(12)) + set_cpu_cap(c, X86_FEATURE_ACC_POWER); + +#ifdef CONFIG_X86_64 + set_cpu_cap(c, X86_FEATURE_SYSCALL32); +#endif + +#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI) + /* + * ApicID can always be treated as an 8-bit value for Hygon APIC So, we + * can safely set X86_FEATURE_EXTD_APICID unconditionally. + */ + if (boot_cpu_has(X86_FEATURE_APIC)) + set_cpu_cap(c, X86_FEATURE_EXTD_APICID); +#endif + + /* + * This is only needed to tell the kernel whether to use VMCALL + * and VMMCALL. VMMCALL is never executed except under virt, so + * we can set it unconditionally. + */ + set_cpu_cap(c, X86_FEATURE_VMMCALL); + + hygon_get_topology_early(c); +} + +static void init_hygon(struct cpuinfo_x86 *c) +{ + early_init_hygon(c); + + /* + * Bit 31 in normal CPUID used for nonstandard 3DNow ID; + * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway + */ + clear_cpu_cap(c, 0*32+31); + + set_cpu_cap(c, X86_FEATURE_REP_GOOD); + + /* get apicid instead of initial apic id from cpuid */ + c->apicid = hard_smp_processor_id(); + + set_cpu_cap(c, X86_FEATURE_ZEN); + set_cpu_cap(c, X86_FEATURE_CPB); + + cpu_detect_cache_sizes(c); + + hygon_detect_cmp(c); + hygon_get_topology(c); + srat_detect_node(c); + + init_hygon_cacheinfo(c); + + if (cpu_has(c, X86_FEATURE_XMM2)) { + unsigned long long val; + int ret; + + /* + * A serializing LFENCE has less overhead than MFENCE, so + * use it for execution serialization. On families which + * don't have that MSR, LFENCE is already serializing. + * msr_set_bit() uses the safe accessors, too, even if the MSR + * is not present. + */ + msr_set_bit(MSR_F10H_DECFG, + MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT); + + /* + * Verify that the MSR write was successful (could be running + * under a hypervisor) and only then assume that LFENCE is + * serializing. + */ + ret = rdmsrl_safe(MSR_F10H_DECFG, &val); + if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) { + /* A serializing LFENCE stops RDTSC speculation */ + set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); + } else { + /* MFENCE stops RDTSC speculation */ + set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); + } + } + + /* + * Hygon processors have APIC timer running in deep C states. + */ + set_cpu_cap(c, X86_FEATURE_ARAT); + + /* Hygon CPUs don't reset SS attributes on SYSRET, Xen does. */ + if (!cpu_has(c, X86_FEATURE_XENPV)) + set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); +} + +static void cpu_detect_tlb_hygon(struct cpuinfo_x86 *c) +{ + u32 ebx, eax, ecx, edx; + u16 mask = 0xfff; + + if (c->extended_cpuid_level < 0x80000006) + return; + + cpuid(0x80000006, &eax, &ebx, &ecx, &edx); + + tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask; + tlb_lli_4k[ENTRIES] = ebx & mask; + + /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */ + if (!((eax >> 16) & mask)) + tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff; + else + tlb_lld_2m[ENTRIES] = (eax >> 16) & mask; + + /* a 4M entry uses two 2M entries */ + tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1; + + /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */ + if (!(eax & mask)) { + cpuid(0x80000005, &eax, &ebx, &ecx, &edx); + tlb_lli_2m[ENTRIES] = eax & 0xff; + } else + tlb_lli_2m[ENTRIES] = eax & mask; + + tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1; +} + +static const struct cpu_dev hygon_cpu_dev = { + .c_vendor = "Hygon", + .c_ident = { "HygonGenuine" }, + .c_early_init = early_init_hygon, + .c_detect_tlb = cpu_detect_tlb_hygon, + .c_bsp_init = bsp_init_hygon, + .c_init = init_hygon, + .c_x86_vendor = X86_VENDOR_HYGON, +}; + +cpu_dev_register(hygon_cpu_dev); diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index fc3c07fe7df58a22c01c8c1180d0b394bde8b59a..76692590eff862ae7504d200fdd0f95612eaec08 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -71,7 +71,7 @@ static bool ring3mwait_disabled __read_mostly; static int __init ring3mwait_disable(char *__unused) { ring3mwait_disabled = true; - return 0; + return 1; } __setup("ring3mwait=disable", ring3mwait_disable); @@ -766,6 +766,11 @@ static void init_intel(struct cpuinfo_x86 *c) init_intel_energy_perf(c); init_intel_misc_features(c); + + if (tsx_ctrl_state == TSX_CTRL_ENABLE) + tsx_enable(); + if (tsx_ctrl_state == TSX_CTRL_DISABLE) + tsx_disable(); } #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c index abb71ac704433cea9f2eca68b8f3ed60566c22da..f955847a9527f2465a98640924194b102ea4b631 100644 --- a/arch/x86/kernel/cpu/intel_rdt.c +++ b/arch/x86/kernel/cpu/intel_rdt.c @@ -421,7 +421,7 @@ struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id, struct list_head *l; if (id < 0) - return ERR_PTR(id); + return ERR_PTR(-ENODEV); list_for_each(l, &r->domains) { d = list_entry(l, struct rdt_domain, list); @@ -561,6 +561,8 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r) } if (r->mon_capable && domain_setup_mon_state(r, d)) { + kfree(d->ctrl_val); + kfree(d->mbps_val); kfree(d); return; } @@ -610,6 +612,13 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r) cancel_delayed_work(&d->cqm_limbo); } + /* + * rdt_domain "d" is going to be freed below, so clear + * its pointer from pseudo_lock_region struct. + */ + if (d->plr) + d->plr->d = NULL; + kfree(d->ctrl_val); kfree(d->mbps_val); kfree(d->rmid_busy_llc); diff --git a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c index 0f53049719cd670f3b31a444863f5af378645b7d..2052e1e6a11c903f22e78205ae234a05b78cc665 100644 --- a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c +++ b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c @@ -23,6 +23,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include #include #include @@ -310,9 +311,11 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, return -EINVAL; buf[nbytes - 1] = '\0'; + cpus_read_lock(); rdtgrp = rdtgroup_kn_lock_live(of->kn); if (!rdtgrp) { rdtgroup_kn_unlock(of->kn); + cpus_read_unlock(); return -ENOENT; } rdt_last_cmd_clear(); @@ -367,6 +370,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, out: rdtgroup_kn_unlock(of->kn); + cpus_read_unlock(); return ret ?: nbytes; } @@ -404,8 +408,16 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of, for_each_alloc_enabled_rdt_resource(r) seq_printf(s, "%s:uninitialized\n", r->name); } else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { - seq_printf(s, "%s:%d=%x\n", rdtgrp->plr->r->name, - rdtgrp->plr->d->id, rdtgrp->plr->cbm); + if (!rdtgrp->plr->d) { + rdt_last_cmd_clear(); + rdt_last_cmd_puts("Cache domain offline\n"); + ret = -ENODEV; + } else { + seq_printf(s, "%s:%d=%x\n", + rdtgrp->plr->r->name, + rdtgrp->plr->d->id, + rdtgrp->plr->cbm); + } } else { closid = rdtgrp->closid; for_each_alloc_enabled_rdt_resource(r) { @@ -447,6 +459,10 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg) int ret = 0; rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (!rdtgrp) { + ret = -ENOENT; + goto out; + } md.priv = of->kn->priv; resid = md.u.rid; @@ -455,7 +471,7 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg) r = &rdt_resources_all[resid]; d = rdt_find_domain(r, domid, NULL); - if (!d) { + if (IS_ERR_OR_NULL(d)) { ret = -ENOENT; goto out; } diff --git a/arch/x86/kernel/cpu/intel_rdt_monitor.c b/arch/x86/kernel/cpu/intel_rdt_monitor.c index b0f3aed76b750763d2c92b7c8a911bb678cbe103..3d4ec80a6bb96eb90b3ab49315fd696028c5c931 100644 --- a/arch/x86/kernel/cpu/intel_rdt_monitor.c +++ b/arch/x86/kernel/cpu/intel_rdt_monitor.c @@ -371,6 +371,9 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm) struct list_head *head; struct rdtgroup *entry; + if (!is_mbm_local_enabled()) + return; + r_mba = &rdt_resources_all[RDT_RESOURCE_MBA]; closid = rgrp->closid; rmid = rgrp->mon.rmid; diff --git a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c index f8c260d522ca045f33a675e47a225c908854be5d..a999a58ca33180ea3374b6ee570cd2426aea67f9 100644 --- a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c +++ b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c @@ -91,7 +91,7 @@ static u64 get_prefetch_disable_bits(void) */ return 0xF; case INTEL_FAM6_ATOM_GOLDMONT: - case INTEL_FAM6_ATOM_GEMINI_LAKE: + case INTEL_FAM6_ATOM_GOLDMONT_PLUS: /* * SDM defines bits of MSR_MISC_FEATURE_CONTROL register * as: @@ -995,7 +995,7 @@ static int measure_cycles_perf_fn(void *_plr) switch (boot_cpu_data.x86_model) { case INTEL_FAM6_ATOM_GOLDMONT: - case INTEL_FAM6_ATOM_GEMINI_LAKE: + case INTEL_FAM6_ATOM_GOLDMONT_PLUS: l2_hit_bits = (0x52ULL << 16) | (0x2 << 8) | 0xd1; l2_miss_bits = (0x52ULL << 16) | (0x10 << 8) | 0xd1; break; @@ -1116,6 +1116,11 @@ static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel) goto out; } + if (!plr->d) { + ret = -ENODEV; + goto out; + } + plr->thread_done = 0; cpu = cpumask_first(&plr->d->cpu_mask); if (!cpu_online(cpu)) { @@ -1429,6 +1434,11 @@ static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma) plr = rdtgrp->plr; + if (!plr->d) { + mutex_unlock(&rdtgroup_mutex); + return -ENODEV; + } + /* * Task is required to run with affinity to the cpus associated * with the pseudo-locked region. If this is not the case the task diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index b140c68bc14ba81b35406d835190428772c91836..686f685c8c7e6e3153824277a26f9aed65550b94 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c @@ -268,17 +268,27 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of, struct seq_file *s, void *v) { struct rdtgroup *rdtgrp; + struct cpumask *mask; int ret = 0; rdtgrp = rdtgroup_kn_lock_live(of->kn); if (rdtgrp) { - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) - seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n", - cpumask_pr_args(&rdtgrp->plr->d->cpu_mask)); - else + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { + if (!rdtgrp->plr->d) { + rdt_last_cmd_clear(); + rdt_last_cmd_puts("Cache domain offline\n"); + ret = -ENODEV; + } else { + mask = &rdtgrp->plr->d->cpu_mask; + seq_printf(s, is_cpu_list(of) ? + "%*pbl\n" : "%*pb\n", + cpumask_pr_args(mask)); + } + } else { seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n", cpumask_pr_args(&rdtgrp->cpu_mask)); + } } else { ret = -ENOENT; } @@ -733,7 +743,7 @@ static int rdt_last_cmd_status_show(struct kernfs_open_file *of, if (len) seq_printf(seq, "%.*s", len, last_cmd_status_buf); else - seq_puts(seq, "ok\n"); + seq_puts(seq, ""); mutex_unlock(&rdtgroup_mutex); return 0; } @@ -792,8 +802,12 @@ static int rdt_bit_usage_show(struct kernfs_open_file *of, struct seq_file *seq, void *v) { struct rdt_resource *r = of->kn->parent->priv; - u32 sw_shareable = 0, hw_shareable = 0; - u32 exclusive = 0, pseudo_locked = 0; + /* + * Use unsigned long even though only 32 bits are used to ensure + * test_bit() is used safely. + */ + unsigned long sw_shareable = 0, hw_shareable = 0; + unsigned long exclusive = 0, pseudo_locked = 0; struct rdt_domain *dom; int i, hwb, swb, excl, psl; enum rdtgrp_mode mode; @@ -838,10 +852,10 @@ static int rdt_bit_usage_show(struct kernfs_open_file *of, } for (i = r->cache.cbm_len - 1; i >= 0; i--) { pseudo_locked = dom->plr ? dom->plr->cbm : 0; - hwb = test_bit(i, (unsigned long *)&hw_shareable); - swb = test_bit(i, (unsigned long *)&sw_shareable); - excl = test_bit(i, (unsigned long *)&exclusive); - psl = test_bit(i, (unsigned long *)&pseudo_locked); + hwb = test_bit(i, &hw_shareable); + swb = test_bit(i, &sw_shareable); + excl = test_bit(i, &exclusive); + psl = test_bit(i, &pseudo_locked); if (hwb && swb) seq_putc(seq, 'X'); else if (hwb && !swb) @@ -961,7 +975,78 @@ static int rdtgroup_mode_show(struct kernfs_open_file *of, } /** - * rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other + * rdt_cdp_peer_get - Retrieve CDP peer if it exists + * @r: RDT resource to which RDT domain @d belongs + * @d: Cache instance for which a CDP peer is requested + * @r_cdp: RDT resource that shares hardware with @r (RDT resource peer) + * Used to return the result. + * @d_cdp: RDT domain that shares hardware with @d (RDT domain peer) + * Used to return the result. + * + * RDT resources are managed independently and by extension the RDT domains + * (RDT resource instances) are managed independently also. The Code and + * Data Prioritization (CDP) RDT resources, while managed independently, + * could refer to the same underlying hardware. For example, + * RDT_RESOURCE_L2CODE and RDT_RESOURCE_L2DATA both refer to the L2 cache. + * + * When provided with an RDT resource @r and an instance of that RDT + * resource @d rdt_cdp_peer_get() will return if there is a peer RDT + * resource and the exact instance that shares the same hardware. + * + * Return: 0 if a CDP peer was found, <0 on error or if no CDP peer exists. + * If a CDP peer was found, @r_cdp will point to the peer RDT resource + * and @d_cdp will point to the peer RDT domain. + */ +static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d, + struct rdt_resource **r_cdp, + struct rdt_domain **d_cdp) +{ + struct rdt_resource *_r_cdp = NULL; + struct rdt_domain *_d_cdp = NULL; + int ret = 0; + + switch (r->rid) { + case RDT_RESOURCE_L3DATA: + _r_cdp = &rdt_resources_all[RDT_RESOURCE_L3CODE]; + break; + case RDT_RESOURCE_L3CODE: + _r_cdp = &rdt_resources_all[RDT_RESOURCE_L3DATA]; + break; + case RDT_RESOURCE_L2DATA: + _r_cdp = &rdt_resources_all[RDT_RESOURCE_L2CODE]; + break; + case RDT_RESOURCE_L2CODE: + _r_cdp = &rdt_resources_all[RDT_RESOURCE_L2DATA]; + break; + default: + ret = -ENOENT; + goto out; + } + + /* + * When a new CPU comes online and CDP is enabled then the new + * RDT domains (if any) associated with both CDP RDT resources + * are added in the same CPU online routine while the + * rdtgroup_mutex is held. It should thus not happen for one + * RDT domain to exist and be associated with its RDT CDP + * resource but there is no RDT domain associated with the + * peer RDT CDP resource. Hence the WARN. + */ + _d_cdp = rdt_find_domain(_r_cdp, d->id, NULL); + if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) { + _r_cdp = NULL; + ret = -EINVAL; + } + +out: + *r_cdp = _r_cdp; + *d_cdp = _d_cdp; + + return ret; +} + +/** + * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other * @r: Resource to which domain instance @d belongs. * @d: The domain instance for which @closid is being tested. * @cbm: Capacity bitmask being tested. @@ -980,8 +1065,8 @@ static int rdtgroup_mode_show(struct kernfs_open_file *of, * * Return: false if CBM does not overlap, true if it does. */ -bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, - unsigned long cbm, int closid, bool exclusive) +static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, + unsigned long cbm, int closid, bool exclusive) { enum rdtgrp_mode mode; unsigned long ctrl_b; @@ -1016,6 +1101,41 @@ bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, return false; } +/** + * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware + * @r: Resource to which domain instance @d belongs. + * @d: The domain instance for which @closid is being tested. + * @cbm: Capacity bitmask being tested. + * @closid: Intended closid for @cbm. + * @exclusive: Only check if overlaps with exclusive resource groups + * + * Resources that can be allocated using a CBM can use the CBM to control + * the overlap of these allocations. rdtgroup_cmb_overlaps() is the test + * for overlap. Overlap test is not limited to the specific resource for + * which the CBM is intended though - when dealing with CDP resources that + * share the underlying hardware the overlap check should be performed on + * the CDP resource sharing the hardware also. + * + * Refer to description of __rdtgroup_cbm_overlaps() for the details of the + * overlap test. + * + * Return: true if CBM overlap detected, false if there is no overlap + */ +bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, + unsigned long cbm, int closid, bool exclusive) +{ + struct rdt_resource *r_cdp; + struct rdt_domain *d_cdp; + + if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, exclusive)) + return true; + + if (rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp) < 0) + return false; + + return __rdtgroup_cbm_overlaps(r_cdp, d_cdp, cbm, closid, exclusive); +} + /** * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive * @@ -1176,6 +1296,7 @@ static int rdtgroup_size_show(struct kernfs_open_file *of, struct rdt_resource *r; struct rdt_domain *d; unsigned int size; + int ret = 0; bool sep; u32 ctrl; @@ -1186,11 +1307,18 @@ static int rdtgroup_size_show(struct kernfs_open_file *of, } if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { - seq_printf(s, "%*s:", max_name_width, rdtgrp->plr->r->name); - size = rdtgroup_cbm_to_size(rdtgrp->plr->r, - rdtgrp->plr->d, - rdtgrp->plr->cbm); - seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size); + if (!rdtgrp->plr->d) { + rdt_last_cmd_clear(); + rdt_last_cmd_puts("Cache domain offline\n"); + ret = -ENODEV; + } else { + seq_printf(s, "%*s:", max_name_width, + rdtgrp->plr->r->name); + size = rdtgroup_cbm_to_size(rdtgrp->plr->r, + rdtgrp->plr->d, + rdtgrp->plr->cbm); + seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size); + } goto out; } @@ -1220,7 +1348,7 @@ static int rdtgroup_size_show(struct kernfs_open_file *of, out: rdtgroup_kn_unlock(of->kn); - return 0; + return ret; } /* rdtgroup information files for one cache resource. */ @@ -2320,26 +2448,19 @@ static int mkdir_mondata_all(struct kernfs_node *parent_kn, */ static void cbm_ensure_valid(u32 *_val, struct rdt_resource *r) { - /* - * Convert the u32 _val to an unsigned long required by all the bit - * operations within this function. No more than 32 bits of this - * converted value can be accessed because all bit operations are - * additionally provided with cbm_len that is initialized during - * hardware enumeration using five bits from the EAX register and - * thus never can exceed 32 bits. - */ - unsigned long *val = (unsigned long *)_val; + unsigned long val = *_val; unsigned int cbm_len = r->cache.cbm_len; unsigned long first_bit, zero_bit; - if (*val == 0) + if (val == 0) return; - first_bit = find_first_bit(val, cbm_len); - zero_bit = find_next_zero_bit(val, cbm_len, first_bit); + first_bit = find_first_bit(&val, cbm_len); + zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); /* Clear any remaining bits to ensure contiguous region */ - bitmap_clear(val, zero_bit, cbm_len - zero_bit); + bitmap_clear(&val, zero_bit, cbm_len - zero_bit); + *_val = (u32)val; } /** @@ -2379,7 +2500,7 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) if (closid_allocated(i) && i != closid) { mode = rdtgroup_mode_by_closid(i); if (mode == RDT_MODE_PSEUDO_LOCKSETUP) - break; + continue; used_b |= *ctrl; if (mode == RDT_MODE_SHAREABLE) d->new_ctrl |= *ctrl; @@ -2805,6 +2926,13 @@ static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf) { if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled) seq_puts(seq, ",cdp"); + + if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled) + seq_puts(seq, ",cdpl2"); + + if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA])) + seq_puts(seq, ",mba_MBps"); + return 0; } diff --git a/arch/x86/kernel/cpu/match.c b/arch/x86/kernel/cpu/match.c index 3fed38812eea340ff1ca2adb4b47534de1e73a59..ad6776081e60daedd4ea8c5efc41e033440ef14a 100644 --- a/arch/x86/kernel/cpu/match.c +++ b/arch/x86/kernel/cpu/match.c @@ -16,12 +16,17 @@ * respective wildcard entries. * * A typical table entry would be to match a specific CPU - * { X86_VENDOR_INTEL, 6, 0x12 } - * or to match a specific CPU feature - * { X86_FEATURE_MATCH(X86_FEATURE_FOOBAR) } + * + * X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_BROADWELL, + * X86_FEATURE_ANY, NULL); * * Fields can be wildcarded with %X86_VENDOR_ANY, %X86_FAMILY_ANY, - * %X86_MODEL_ANY, %X86_FEATURE_ANY or 0 (except for vendor) + * %X86_MODEL_ANY, %X86_FEATURE_ANY (except for vendor) + * + * asm/cpu_device_id.h contains a set of useful macros which are shortcuts + * for various common selections. The above can be shortened to: + * + * X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, NULL); * * Arrays used to match for this should also be declared using * MODULE_DEVICE_TABLE(x86cpu, ...) @@ -34,13 +39,18 @@ const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match) const struct x86_cpu_id *m; struct cpuinfo_x86 *c = &boot_cpu_data; - for (m = match; m->vendor | m->family | m->model | m->feature; m++) { + for (m = match; + m->vendor | m->family | m->model | m->steppings | m->feature; + m++) { if (m->vendor != X86_VENDOR_ANY && c->x86_vendor != m->vendor) continue; if (m->family != X86_FAMILY_ANY && c->x86 != m->family) continue; if (m->model != X86_MODEL_ANY && c->x86_model != m->model) continue; + if (m->steppings != X86_STEPPING_ANY && + !(BIT(c->x86_stepping) & m->steppings)) + continue; if (m->feature != X86_FEATURE_ANY && !cpu_has(c, m->feature)) continue; return m; @@ -48,3 +58,34 @@ const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match) return NULL; } EXPORT_SYMBOL(x86_match_cpu); + +static const struct x86_cpu_desc * +x86_match_cpu_with_stepping(const struct x86_cpu_desc *match) +{ + struct cpuinfo_x86 *c = &boot_cpu_data; + const struct x86_cpu_desc *m; + + for (m = match; m->x86_family | m->x86_model; m++) { + if (c->x86_vendor != m->x86_vendor) + continue; + if (c->x86 != m->x86_family) + continue; + if (c->x86_model != m->x86_model) + continue; + if (c->x86_stepping != m->x86_stepping) + continue; + return m; + } + return NULL; +} + +bool x86_cpu_has_min_microcode_rev(const struct x86_cpu_desc *table) +{ + const struct x86_cpu_desc *res = x86_match_cpu_with_stepping(table); + + if (!res || res->x86_microcode_rev > boot_cpu_data.microcode) + return false; + + return true; +} +EXPORT_SYMBOL_GPL(x86_cpu_has_min_microcode_rev); diff --git a/arch/x86/kernel/cpu/mcheck/Makefile b/arch/x86/kernel/cpu/mce/Makefile similarity index 61% rename from arch/x86/kernel/cpu/mcheck/Makefile rename to arch/x86/kernel/cpu/mce/Makefile index bcc7c54c7041f95b2f0cf55b8d52fa07511b9365..9f020c9941545ed033d41acd94092d6d0a85b6b9 100644 --- a/arch/x86/kernel/cpu/mcheck/Makefile +++ b/arch/x86/kernel/cpu/mce/Makefile @@ -1,14 +1,16 @@ # SPDX-License-Identifier: GPL-2.0 -obj-y = mce.o mce-severity.o mce-genpool.o +obj-y = core.o severity.o genpool.o obj-$(CONFIG_X86_ANCIENT_MCE) += winchip.o p5.o -obj-$(CONFIG_X86_MCE_INTEL) += mce_intel.o -obj-$(CONFIG_X86_MCE_AMD) += mce_amd.o +obj-$(CONFIG_X86_MCE_INTEL) += intel.o +obj-$(CONFIG_X86_MCE_AMD) += amd.o obj-$(CONFIG_X86_MCE_THRESHOLD) += threshold.o + +mce-inject-y := inject.o obj-$(CONFIG_X86_MCE_INJECT) += mce-inject.o obj-$(CONFIG_X86_THERMAL_VECTOR) += therm_throt.o -obj-$(CONFIG_ACPI_APEI) += mce-apei.o +obj-$(CONFIG_ACPI_APEI) += apei.o obj-$(CONFIG_X86_MCELOG_LEGACY) += dev-mcelog.o diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mce/amd.c similarity index 89% rename from arch/x86/kernel/cpu/mcheck/mce_amd.c rename to arch/x86/kernel/cpu/mce/amd.c index dd33c357548f11c0ac21c367d0edc20b34671218..6ae54119205a1514760f7528f93b6faff96b918a 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mce/amd.c @@ -23,12 +23,13 @@ #include #include +#include #include #include #include #include -#include "mce-internal.h" +#include "internal.h" #define NR_BLOCKS 5 #define THRESHOLD_MAX 0xFFF @@ -56,7 +57,7 @@ /* Threshold LVT offset is at MSR0xC0000410[15:12] */ #define SMCA_THR_LVT_OFF 0xF000 -static bool thresholding_en; +static bool thresholding_irq_en; static const char * const th_names[] = { "load_store", @@ -79,6 +80,7 @@ struct smca_bank_name { static struct smca_bank_name smca_names[] = { [SMCA_LS] = { "load_store", "Load Store Unit" }, + [SMCA_LS_V2] = { "load_store", "Load Store Unit" }, [SMCA_IF] = { "insn_fetch", "Instruction Fetch Unit" }, [SMCA_L2_CACHE] = { "l2_cache", "L2 Cache" }, [SMCA_DE] = { "decode_unit", "Decode Unit" }, @@ -87,11 +89,17 @@ static struct smca_bank_name smca_names[] = { [SMCA_FP] = { "floating_point", "Floating Point Unit" }, [SMCA_L3_CACHE] = { "l3_cache", "L3 Cache" }, [SMCA_CS] = { "coherent_slave", "Coherent Slave" }, + [SMCA_CS_V2] = { "coherent_slave", "Coherent Slave" }, [SMCA_PIE] = { "pie", "Power, Interrupts, etc." }, [SMCA_UMC] = { "umc", "Unified Memory Controller" }, [SMCA_PB] = { "param_block", "Parameter Block" }, [SMCA_PSP] = { "psp", "Platform Security Processor" }, + [SMCA_PSP_V2] = { "psp", "Platform Security Processor" }, [SMCA_SMU] = { "smu", "System Management Unit" }, + [SMCA_SMU_V2] = { "smu", "System Management Unit" }, + [SMCA_MP5] = { "mp5", "Microprocessor 5 Unit" }, + [SMCA_NBIO] = { "nbio", "Northbridge IO Unit" }, + [SMCA_PCIE] = { "pcie", "PCI Express Unit" }, }; static u32 smca_bank_addrs[MAX_NR_BANKS][NR_BLOCKS] __ro_after_init = @@ -99,7 +107,7 @@ static u32 smca_bank_addrs[MAX_NR_BANKS][NR_BLOCKS] __ro_after_init = [0 ... MAX_NR_BANKS - 1] = { [0 ... NR_BLOCKS - 1] = -1 } }; -const char *smca_get_name(enum smca_bank_types t) +static const char *smca_get_name(enum smca_bank_types t) { if (t >= N_SMCA_BANK_TYPES) return NULL; @@ -137,30 +145,43 @@ static struct smca_hwid smca_hwid_mcatypes[] = { { SMCA_RESERVED, HWID_MCATYPE(0x00, 0x0), 0x0 }, /* ZN Core (HWID=0xB0) MCA types */ - { SMCA_LS, HWID_MCATYPE(0xB0, 0x0), 0x1FFFEF }, + { SMCA_LS, HWID_MCATYPE(0xB0, 0x0), 0x1FFFFF }, + { SMCA_LS_V2, HWID_MCATYPE(0xB0, 0x10), 0xFFFFFF }, { SMCA_IF, HWID_MCATYPE(0xB0, 0x1), 0x3FFF }, { SMCA_L2_CACHE, HWID_MCATYPE(0xB0, 0x2), 0xF }, { SMCA_DE, HWID_MCATYPE(0xB0, 0x3), 0x1FF }, /* HWID 0xB0 MCATYPE 0x4 is Reserved */ - { SMCA_EX, HWID_MCATYPE(0xB0, 0x5), 0x7FF }, + { SMCA_EX, HWID_MCATYPE(0xB0, 0x5), 0xFFF }, { SMCA_FP, HWID_MCATYPE(0xB0, 0x6), 0x7F }, { SMCA_L3_CACHE, HWID_MCATYPE(0xB0, 0x7), 0xFF }, /* Data Fabric MCA types */ { SMCA_CS, HWID_MCATYPE(0x2E, 0x0), 0x1FF }, - { SMCA_PIE, HWID_MCATYPE(0x2E, 0x1), 0xF }, + { SMCA_PIE, HWID_MCATYPE(0x2E, 0x1), 0x1F }, + { SMCA_CS_V2, HWID_MCATYPE(0x2E, 0x2), 0x3FFF }, /* Unified Memory Controller MCA type */ - { SMCA_UMC, HWID_MCATYPE(0x96, 0x0), 0x3F }, + { SMCA_UMC, HWID_MCATYPE(0x96, 0x0), 0xFF }, /* Parameter Block MCA type */ { SMCA_PB, HWID_MCATYPE(0x05, 0x0), 0x1 }, /* Platform Security Processor MCA type */ { SMCA_PSP, HWID_MCATYPE(0xFF, 0x0), 0x1 }, + { SMCA_PSP_V2, HWID_MCATYPE(0xFF, 0x1), 0x3FFFF }, /* System Management Unit MCA type */ { SMCA_SMU, HWID_MCATYPE(0x01, 0x0), 0x1 }, + { SMCA_SMU_V2, HWID_MCATYPE(0x01, 0x1), 0x7FF }, + + /* Microprocessor 5 Unit MCA type */ + { SMCA_MP5, HWID_MCATYPE(0x01, 0x2), 0x3FF }, + + /* Northbridge IO Unit MCA type */ + { SMCA_NBIO, HWID_MCATYPE(0x18, 0x0), 0x1F }, + + /* PCI Express Unit MCA type */ + { SMCA_PCIE, HWID_MCATYPE(0x46, 0x0), 0x1F }, }; struct smca_bank smca_banks[MAX_NR_BANKS]; @@ -178,7 +199,7 @@ EXPORT_SYMBOL_GPL(smca_banks); static char buf_mcatype[MAX_MCATYPE_NAME_LEN]; static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks); -static DEFINE_PER_CPU(unsigned int, bank_map); /* see which banks are on */ +static DEFINE_PER_CPU(u64, bank_map); /* see which banks are on */ static void amd_threshold_interrupt(void); static void amd_deferred_error_interrupt(void); @@ -227,10 +248,10 @@ static void smca_configure(unsigned int bank, unsigned int cpu) } /* Return early if this bank was already initialized. */ - if (smca_banks[bank].hwid) + if (smca_banks[bank].hwid && smca_banks[bank].hwid->hwid_mcatype != 0) return; - if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) { + if (rdmsr_safe(MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) { pr_warn("Failed to read MCA_IPID for bank %d\n", bank); return; } @@ -507,7 +528,7 @@ prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr, int new; if (!block) - per_cpu(bank_map, cpu) |= (1 << bank); + per_cpu(bank_map, cpu) |= BIT_ULL(bank); memset(&b, 0, sizeof(b)); b.cpu = cpu; @@ -534,9 +555,8 @@ prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr, set_offset: offset = setup_APIC_mce_threshold(offset, new); - - if ((offset == new) && (mce_threshold_vector != amd_threshold_interrupt)) - mce_threshold_vector = amd_threshold_interrupt; + if (offset == new) + thresholding_irq_en = true; done: mce_threshold_block_init(&b, offset); @@ -545,6 +565,40 @@ prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr, return offset; } +/* + * Turn off MC4_MISC thresholding banks on all family 0x15 models since + * they're not supported there. + */ +void disable_err_thresholding(struct cpuinfo_x86 *c) +{ + int i; + u64 hwcr; + bool need_toggle; + u32 msrs[] = { + 0x00000413, /* MC4_MISC0 */ + 0xc0000408, /* MC4_MISC1 */ + }; + + if (c->x86 != 0x15) + return; + + rdmsrl(MSR_K7_HWCR, hwcr); + + /* McStatusWrEn has to be set */ + need_toggle = !(hwcr & BIT(18)); + + if (need_toggle) + wrmsrl(MSR_K7_HWCR, hwcr | BIT(18)); + + /* Clear CntP bit safely */ + for (i = 0; i < ARRAY_SIZE(msrs); i++) + msr_clear_bit(msrs[i], 62); + + /* restore old settings */ + if (need_toggle) + wrmsrl(MSR_K7_HWCR, hwcr); +} + /* cpu init entry point, called from mce.c with preempt off */ void mce_amd_feature_init(struct cpuinfo_x86 *c) { @@ -552,6 +606,8 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) unsigned int bank, block, cpu = smp_processor_id(); int offset = -1; + disable_err_thresholding(c); + for (bank = 0; bank < mca_cfg.banks; ++bank) { if (mce_flags.smca) smca_configure(bank, cpu); @@ -595,8 +651,11 @@ int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) u8 cs_mask, cs_id = 0; bool hash_enabled = false; - /* Read D18F0x1B4 (DramOffset), check if base 1 is used. */ - if (amd_df_indirect_read(nid, 0, 0x1B4, umc, &tmp)) + /* Read DramOffset, check if base 1 is used. */ + if (hygon_f18h_m4h() && + amd_df_indirect_read(nid, 0, 0x214, umc, &tmp)) + goto out_err; + else if (amd_df_indirect_read(nid, 0, 0x1B4, umc, &tmp)) goto out_err; /* Remove HiAddrOffset from normalized address, if enabled: */ @@ -620,6 +679,9 @@ int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) goto out_err; } + intlv_num_sockets = 0; + if (hygon_f18h_m4h()) + intlv_num_sockets = (tmp >> 2) & 0x3; lgcy_mmio_hole_en = tmp & BIT(1); intlv_num_chan = (tmp >> 4) & 0xF; intlv_addr_sel = (tmp >> 8) & 0x7; @@ -636,7 +698,8 @@ int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) if (amd_df_indirect_read(nid, 0, 0x114 + (8 * base), umc, &tmp)) goto out_err; - intlv_num_sockets = (tmp >> 8) & 0x1; + if (!hygon_f18h_m4h()) + intlv_num_sockets = (tmp >> 8) & 0x1; intlv_num_dies = (tmp >> 10) & 0x3; dram_limit_addr = ((tmp & GENMASK_ULL(31, 12)) << 16) | GENMASK_ULL(27, 0); @@ -654,6 +717,9 @@ int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) hash_enabled = true; break; default: + if (hygon_f18h_m4h() && boot_cpu_data.x86_model == 0x4 && + intlv_num_chan == 2) + break; pr_err("%s: Invalid number of interleaved channels %d.\n", __func__, intlv_num_chan); goto out_err; @@ -672,8 +738,9 @@ int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) /* Add a bit if sockets are interleaved. */ num_intlv_bits += intlv_num_sockets; - /* Assert num_intlv_bits <= 4 */ - if (num_intlv_bits > 4) { + /* Assert num_intlv_bits in the correct range. */ + if ((hygon_f18h_m4h() && num_intlv_bits > 7) || + (!hygon_f18h_m4h() && num_intlv_bits > 4)) { pr_err("%s: Invalid interleave bits %d.\n", __func__, num_intlv_bits); goto out_err; @@ -692,7 +759,10 @@ int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) if (amd_df_indirect_read(nid, 0, 0x50, umc, &tmp)) goto out_err; - cs_fabric_id = (tmp >> 8) & 0xFF; + if (hygon_f18h_m4h()) + cs_fabric_id = (tmp >> 8) & 0x7FF; + else + cs_fabric_id = (tmp >> 8) & 0xFF; die_id_bit = 0; /* If interleaved over more than 1 channel: */ @@ -712,8 +782,13 @@ int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) /* If interleaved over more than 1 die. */ if (intlv_num_dies) { sock_id_bit = die_id_bit + intlv_num_dies; - die_id_shift = (tmp >> 24) & 0xF; - die_id_mask = (tmp >> 8) & 0xFF; + if (hygon_f18h_m4h()) { + die_id_shift = (tmp >> 12) & 0xF; + die_id_mask = tmp & 0x7FF; + } else { + die_id_shift = (tmp >> 24) & 0xF; + die_id_mask = (tmp >> 8) & 0xFF; + } cs_id |= ((cs_fabric_id & die_id_mask) >> die_id_shift) << die_id_bit; } @@ -721,7 +796,10 @@ int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) /* If interleaved over more than 1 socket. */ if (intlv_num_sockets) { socket_id_shift = (tmp >> 28) & 0xF; - socket_id_mask = (tmp >> 16) & 0xFF; + if (hygon_f18h_m4h()) + socket_id_mask = (tmp >> 16) & 0x7FF; + else + socket_id_mask = (tmp >> 16) & 0xFF; cs_id |= ((cs_fabric_id & socket_id_mask) >> socket_id_shift) << sock_id_bit; } @@ -825,7 +903,7 @@ static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc) mce_log(&m); } -asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(void) +asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(struct pt_regs *regs) { entering_irq(); trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR); @@ -937,7 +1015,7 @@ static void amd_threshold_interrupt(void) unsigned int bank, cpu = smp_processor_id(); for (bank = 0; bank < mca_cfg.banks; ++bank) { - if (!(per_cpu(bank_map, cpu) & (1 << bank))) + if (!(per_cpu(bank_map, cpu) & BIT_ULL(bank))) continue; first_block = per_cpu(threshold_banks, cpu)[bank]->blocks; @@ -1116,8 +1194,9 @@ static const char *get_name(unsigned int bank, struct threshold_block *b) return buf_mcatype; } -static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank, - unsigned int block, u32 address) +static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb, + unsigned int bank, unsigned int block, + u32 address) { struct threshold_block *b = NULL; u32 low, high; @@ -1161,16 +1240,12 @@ static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank, INIT_LIST_HEAD(&b->miscj); - if (per_cpu(threshold_banks, cpu)[bank]->blocks) { - list_add(&b->miscj, - &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj); - } else { - per_cpu(threshold_banks, cpu)[bank]->blocks = b; - } + if (tb->blocks) + list_add(&b->miscj, &tb->blocks->miscj); + else + tb->blocks = b; - err = kobject_init_and_add(&b->kobj, &threshold_ktype, - per_cpu(threshold_banks, cpu)[bank]->kobj, - get_name(bank, b)); + err = kobject_init_and_add(&b->kobj, &threshold_ktype, tb->kobj, get_name(bank, b)); if (err) goto out_free; recurse: @@ -1178,7 +1253,7 @@ static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank, if (!address) return 0; - err = allocate_threshold_blocks(cpu, bank, block, address); + err = allocate_threshold_blocks(cpu, tb, bank, block, address); if (err) goto out_free; @@ -1263,8 +1338,6 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank) goto out_free; } - per_cpu(threshold_banks, cpu)[bank] = b; - if (is_shared_bank(bank)) { refcount_set(&b->cpus, 1); @@ -1275,9 +1348,13 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank) } } - err = allocate_threshold_blocks(cpu, bank, 0, msr_ops.misc(bank)); - if (!err) - goto out; + err = allocate_threshold_blocks(cpu, b, bank, 0, msr_ops.misc(bank)); + if (err) + goto out_free; + + per_cpu(threshold_banks, cpu)[bank] = b; + + return 0; out_free: kfree(b); @@ -1357,11 +1434,8 @@ int mce_threshold_remove_device(unsigned int cpu) { unsigned int bank; - if (!thresholding_en) - return 0; - for (bank = 0; bank < mca_cfg.banks; ++bank) { - if (!(per_cpu(bank_map, cpu) & (1 << bank))) + if (!(per_cpu(bank_map, cpu) & BIT_ULL(bank))) continue; threshold_remove_bank(cpu, bank); } @@ -1377,9 +1451,6 @@ int mce_threshold_create_device(unsigned int cpu) struct threshold_bank **bp; int err = 0; - if (!thresholding_en) - return 0; - bp = per_cpu(threshold_banks, cpu); if (bp) return 0; @@ -1392,7 +1463,7 @@ int mce_threshold_create_device(unsigned int cpu) per_cpu(threshold_banks, cpu) = bp; for (bank = 0; bank < mca_cfg.banks; ++bank) { - if (!(per_cpu(bank_map, cpu) & (1 << bank))) + if (!(per_cpu(bank_map, cpu) & BIT_ULL(bank))) continue; err = threshold_create_bank(cpu, bank); if (err) @@ -1408,9 +1479,6 @@ static __init int threshold_init_device(void) { unsigned lcpu = 0; - if (mce_threshold_vector == amd_threshold_interrupt) - thresholding_en = true; - /* to hit CPUs online before the notifier is up */ for_each_online_cpu(lcpu) { int err = mce_threshold_create_device(lcpu); @@ -1419,6 +1487,9 @@ static __init int threshold_init_device(void) return err; } + if (thresholding_irq_en) + mce_threshold_vector = amd_threshold_interrupt; + return 0; } /* diff --git a/arch/x86/kernel/cpu/mcheck/mce-apei.c b/arch/x86/kernel/cpu/mce/apei.c similarity index 99% rename from arch/x86/kernel/cpu/mcheck/mce-apei.c rename to arch/x86/kernel/cpu/mce/apei.c index 2eee853796891460c5cb0dd20dc594697bdd6d1f..1d9b3ce662a0b8a6d8347a76ae88257c1b82531d 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-apei.c +++ b/arch/x86/kernel/cpu/mce/apei.c @@ -36,7 +36,7 @@ #include #include -#include "mce-internal.h" +#include "internal.h" void apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err) { diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mce/core.c similarity index 93% rename from arch/x86/kernel/cpu/mcheck/mce.c rename to arch/x86/kernel/cpu/mce/core.c index 953b3ce92dccf0f684ce90e3a27015c99e692470..2b0fecdc137db87a956a42e8a0f2bbf505adaada 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -52,7 +52,7 @@ #include #include -#include "mce-internal.h" +#include "internal.h" static DEFINE_MUTEX(mce_log_mutex); @@ -270,7 +270,7 @@ static void print_mce(struct mce *m) { __print_mce(m); - if (m->cpuvendor != X86_VENDOR_AMD) + if (m->cpuvendor != X86_VENDOR_AMD && m->cpuvendor != X86_VENDOR_HYGON) pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n"); } @@ -462,36 +462,21 @@ static void mce_irq_work_cb(struct irq_work *entry) mce_schedule_work(); } -static void mce_report_event(struct pt_regs *regs) -{ - if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) { - mce_notify_irq(); - /* - * Triggering the work queue here is just an insurance - * policy in case the syscall exit notify handler - * doesn't run soon enough or ends up running on the - * wrong CPU (can happen when audit sleeps) - */ - mce_schedule_work(); - return; - } - - irq_work_queue(&mce_irq_work); -} - /* * Check if the address reported by the CPU is in a format we can parse. * It would be possible to add code for most other cases, but all would * be somewhat complicated (e.g. segment offset would require an instruction * parser). So only support physical addresses up to page granuality for now. */ -static int mce_usable_address(struct mce *m) +int mce_usable_address(struct mce *m) { if (!(m->status & MCI_STATUS_ADDRV)) return 0; - /* Checks after this one are Intel-specific: */ - if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) + /* Checks after this one are Intel/Zhaoxin-specific: */ + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL && + boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN && + boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR) return 1; if (!(m->status & MCI_STATUS_MISCV)) @@ -505,13 +490,18 @@ static int mce_usable_address(struct mce *m) return 1; } +EXPORT_SYMBOL_GPL(mce_usable_address); bool mce_is_memory_error(struct mce *m) { - if (m->cpuvendor == X86_VENDOR_AMD) { + switch (m->cpuvendor) { + case X86_VENDOR_AMD: + case X86_VENDOR_HYGON: return amd_mce_is_memory_error(m); - } else if (m->cpuvendor == X86_VENDOR_INTEL) { + case X86_VENDOR_INTEL: + case X86_VENDOR_ZHAOXIN: + case X86_VENDOR_CENTAUR: /* * Intel SDM Volume 3B - 15.9.2 Compound Error Codes * @@ -528,22 +518,27 @@ bool mce_is_memory_error(struct mce *m) return (m->status & 0xef80) == BIT(7) || (m->status & 0xef00) == BIT(8) || (m->status & 0xeffc) == 0xc; - } - return false; + default: + return false; + } } EXPORT_SYMBOL_GPL(mce_is_memory_error); -static bool mce_is_correctable(struct mce *m) +bool mce_is_correctable(struct mce *m) { if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED) return false; + if (m->cpuvendor == X86_VENDOR_HYGON && m->status & MCI_STATUS_DEFERRED) + return false; + if (m->status & MCI_STATUS_UC) return false; return true; } +EXPORT_SYMBOL_GPL(mce_is_correctable); static bool cec_add_mce(struct mce *m) { @@ -709,19 +704,49 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b) barrier(); m.status = mce_rdmsrl(msr_ops.status(i)); + + /* If this entry is not valid, ignore it */ if (!(m.status & MCI_STATUS_VAL)) continue; /* - * Uncorrected or signalled events are handled by the exception - * handler when it is enabled, so don't process those here. - * - * TBD do the same check for MCI_STATUS_EN here? + * If we are logging everything (at CPU online) or this + * is a corrected error, then we must log it. */ - if (!(flags & MCP_UC) && - (m.status & (mca_cfg.ser ? MCI_STATUS_S : MCI_STATUS_UC))) - continue; + if ((flags & MCP_UC) || !(m.status & MCI_STATUS_UC)) + goto log_it; + /* + * Newer Intel systems that support software error + * recovery need to make additional checks. Other + * CPUs should skip over uncorrected errors, but log + * everything else. + */ + if (!mca_cfg.ser) { + if (m.status & MCI_STATUS_UC) + continue; + goto log_it; + } + + /* Log "not enabled" (speculative) errors */ + if (!(m.status & MCI_STATUS_EN)) + goto log_it; + + /* + * Log UCNA (SDM: 15.6.3 "UCR Error Classification") + * UC == 1 && PCC == 0 && S == 0 + */ + if (!(m.status & MCI_STATUS_PCC) && !(m.status & MCI_STATUS_S)) + goto log_it; + + /* + * Skip anything else. Presumption is that our read of this + * bank is racing with a machine check. Leave the log alone + * for do_machine_check() to deal with it. + */ + continue; + +log_it: error_seen = true; mce_read_aux(&m, i); @@ -768,7 +793,7 @@ EXPORT_SYMBOL_GPL(machine_check_poll); static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, struct pt_regs *regs) { - char *tmp; + char *tmp = *msg; int i; for (i = 0; i < mca_cfg.banks; i++) { @@ -780,6 +805,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, if (quirk_no_way_out) quirk_no_way_out(i, m, regs); + m->bank = i; if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { mce_read_aux(m, i); *msg = tmp; @@ -1092,6 +1118,13 @@ static bool __mc_check_crashing_cpu(int cpu) u64 mcgstatus; mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS); + + if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN || + boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) { + if (mcgstatus & MCG_STATUS_LMCES) + return false; + } + if (mcgstatus & MCG_STATUS_RIPV) { mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); return true; @@ -1185,8 +1218,8 @@ void do_machine_check(struct pt_regs *regs, long error_code) DECLARE_BITMAP(toclear, MAX_NR_BANKS); struct mca_config *cfg = &mca_cfg; int cpu = smp_processor_id(); - char *msg = "Unknown"; struct mce m, *final; + char *msg = NULL; int worst = 0; /* @@ -1241,9 +1274,11 @@ void do_machine_check(struct pt_regs *regs, long error_code) /* * Check if this MCE is signaled to only this logical processor, - * on Intel only. + * on Intel, Zhaoxin only. */ - if (m.cpuvendor == X86_VENDOR_INTEL) + if (m.cpuvendor == X86_VENDOR_INTEL || + m.cpuvendor == X86_VENDOR_ZHAOXIN || + m.cpuvendor == X86_VENDOR_CENTAUR) lmce = m.mcgstatus & MCG_STATUS_LMCES; /* @@ -1297,7 +1332,8 @@ void do_machine_check(struct pt_regs *regs, long error_code) mce_panic("Fatal machine check on current CPU", &m, msg); if (worst > 0) - mce_report_event(regs); + irq_work_queue(&mce_irq_work); + mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); sync_core(); @@ -1447,13 +1483,12 @@ EXPORT_SYMBOL_GPL(mce_notify_irq); static int __mcheck_cpu_mce_banks_init(void) { int i; - u8 num_banks = mca_cfg.banks; - mce_banks = kcalloc(num_banks, sizeof(struct mce_bank), GFP_KERNEL); + mce_banks = kcalloc(MAX_NR_BANKS, sizeof(struct mce_bank), GFP_KERNEL); if (!mce_banks) return -ENOMEM; - for (i = 0; i < num_banks; i++) { + for (i = 0; i < MAX_NR_BANKS; i++) { struct mce_bank *b = &mce_banks[i]; b->ctl = -1ULL; @@ -1467,28 +1502,19 @@ static int __mcheck_cpu_mce_banks_init(void) */ static int __mcheck_cpu_cap_init(void) { - unsigned b; u64 cap; + u8 b; rdmsrl(MSR_IA32_MCG_CAP, cap); b = cap & MCG_BANKCNT_MASK; - if (!mca_cfg.banks) - pr_info("CPU supports %d MCE banks\n", b); - - if (b > MAX_NR_BANKS) { - pr_warn("Using only %u machine check banks out of %u\n", - MAX_NR_BANKS, b); + if (WARN_ON_ONCE(b > MAX_NR_BANKS)) b = MAX_NR_BANKS; - } - /* Don't support asymmetric configurations today */ - WARN_ON(mca_cfg.banks != 0 && b != mca_cfg.banks); - mca_cfg.banks = b; + mca_cfg.banks = max(mca_cfg.banks, b); if (!mce_banks) { int err = __mcheck_cpu_mce_banks_init(); - if (err) return err; } @@ -1608,36 +1634,6 @@ static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) if (c->x86 == 0x15 && c->x86_model <= 0xf) mce_flags.overflow_recov = 1; - /* - * Turn off MC4_MISC thresholding banks on those models since - * they're not supported there. - */ - if (c->x86 == 0x15 && - (c->x86_model >= 0x10 && c->x86_model <= 0x1f)) { - int i; - u64 hwcr; - bool need_toggle; - u32 msrs[] = { - 0x00000413, /* MC4_MISC0 */ - 0xc0000408, /* MC4_MISC1 */ - }; - - rdmsrl(MSR_K7_HWCR, hwcr); - - /* McStatusWrEn has to be set */ - need_toggle = !(hwcr & BIT(18)); - - if (need_toggle) - wrmsrl(MSR_K7_HWCR, hwcr | BIT(18)); - - /* Clear CntP bit safely */ - for (i = 0; i < ARRAY_SIZE(msrs); i++) - msr_clear_bit(msrs[i], 62); - - /* restore old settings */ - if (need_toggle) - wrmsrl(MSR_K7_HWCR, hwcr); - } } if (c->x86_vendor == X86_VENDOR_INTEL) { @@ -1671,6 +1667,19 @@ static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) if (c->x86 == 6 && c->x86_model == 45) quirk_no_way_out = quirk_sandybridge_ifu; } + + if (c->x86_vendor == X86_VENDOR_ZHAOXIN || + c->x86_vendor == X86_VENDOR_CENTAUR) { + /* + * All newer Zhaoxin CPUs support MCE broadcasting. Enable + * synchronization with a one second timeout. + */ + if (c->x86 > 6 || (c->x86_model == 0x19 || c->x86_model == 0x1f)) { + if (cfg->monarch_timeout < 0) + cfg->monarch_timeout = USEC_PER_SEC; + } + } + if (cfg->monarch_timeout < 0) cfg->monarch_timeout = 0; if (cfg->bootlog != 0) @@ -1705,7 +1714,7 @@ static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c) */ static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c) { - if (c->x86_vendor == X86_VENDOR_AMD) { + if (c->x86_vendor == X86_VENDOR_AMD || c->x86_vendor == X86_VENDOR_HYGON) { mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV); mce_flags.succor = !!cpu_has(c, X86_FEATURE_SUCCOR); mce_flags.smca = !!cpu_has(c, X86_FEATURE_SMCA); @@ -1719,19 +1728,32 @@ static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c) } } -static void mce_centaur_feature_init(struct cpuinfo_x86 *c) +static void mce_zhaoxin_feature_init(struct cpuinfo_x86 *c) { struct mca_config *cfg = &mca_cfg; - - /* - * All newer Centaur CPUs support MCE broadcasting. Enable - * synchronization with a one second timeout. - */ - if ((c->x86 == 6 && c->x86_model == 0xf && c->x86_stepping >= 0xe) || - c->x86 > 6) { - if (cfg->monarch_timeout < 0) - cfg->monarch_timeout = USEC_PER_SEC; + /* + * These CPUs have MCA bank 8 which reports only one error type called + * SVAD (System View Address Decoder). The reporting of that error is + * controlled by IA32_MC8.CTL.0. + * + * If enabled, prefetching on these CPUs will cause SVAD MCE when + * virtual machines start and result in a system panic. Always disable + * bank 8 SVAD error by default. + */ + if ((c->x86 == 7 && c->x86_model == 0x1b) || + (c->x86_model == 0x19 || c->x86_model == 0x1f)) { + if (cfg->banks > 8) + mce_banks[8].ctl = 0; } + + intel_init_cmci(); + intel_init_lmce(); + mce_adjust_timer = cmci_intel_adjust_timer; +} + +static void mce_zhaoxin_feature_clear(struct cpuinfo_x86 *c) +{ + intel_clear_lmce(); } static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) @@ -1746,8 +1768,14 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) mce_amd_feature_init(c); break; } + + case X86_VENDOR_HYGON: + mce_hygon_feature_init(c); + break; + case X86_VENDOR_CENTAUR: - mce_centaur_feature_init(c); + case X86_VENDOR_ZHAOXIN: + mce_zhaoxin_feature_init(c); break; default: @@ -1761,6 +1789,12 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c) case X86_VENDOR_INTEL: mce_intel_feature_clear(c); break; + + case X86_VENDOR_ZHAOXIN: + case X86_VENDOR_CENTAUR: + mce_zhaoxin_feature_clear(c); + break; + default: break; } @@ -1971,13 +2005,17 @@ static void mce_disable_error_reporting(void) static void vendor_disable_error_reporting(void) { /* - * Don't clear on Intel or AMD CPUs. Some of these MSRs are socket-wide. + * Don't clear on Intel, AMD, Hygon or Zhaoxin CPUs. Some of these + * MSRs are socket-wide. * Disabling them for just a single offlined CPU is bad, since it will * inhibit reporting for all shared resources on the socket like the * last level cache (LLC), the integrated memory controller (iMC), etc. */ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL || - boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON || + boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN || + boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) return; mce_disable_error_reporting(); @@ -2470,6 +2508,8 @@ EXPORT_SYMBOL_GPL(mcsafe_key); static int __init mcheck_late_init(void) { + pr_info("Using %d MCE banks\n", mca_cfg.banks); + if (mca_cfg.recovery) static_branch_inc(&mcsafe_key); diff --git a/arch/x86/kernel/cpu/mcheck/dev-mcelog.c b/arch/x86/kernel/cpu/mce/dev-mcelog.c similarity index 99% rename from arch/x86/kernel/cpu/mcheck/dev-mcelog.c rename to arch/x86/kernel/cpu/mce/dev-mcelog.c index 97685a0c317513330385cd2943ffc3ebe1e9a829..8d2e4e3411b1f946ae6a946a6dbd6bfe5bf8fc59 100644 --- a/arch/x86/kernel/cpu/mcheck/dev-mcelog.c +++ b/arch/x86/kernel/cpu/mce/dev-mcelog.c @@ -15,7 +15,7 @@ #include #include -#include "mce-internal.h" +#include "internal.h" static BLOCKING_NOTIFIER_HEAD(mce_injector_chain); diff --git a/arch/x86/kernel/cpu/mcheck/mce-genpool.c b/arch/x86/kernel/cpu/mce/genpool.c similarity index 99% rename from arch/x86/kernel/cpu/mcheck/mce-genpool.c rename to arch/x86/kernel/cpu/mce/genpool.c index 217cd4449bc9db3aedfd6367529bc9ca99fb8443..3395549c51d3f74502c18060ac5160ad934a1b48 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-genpool.c +++ b/arch/x86/kernel/cpu/mce/genpool.c @@ -10,7 +10,7 @@ #include #include #include -#include "mce-internal.h" +#include "internal.h" /* * printk() is not safe in MCE context. This is a lock-less memory allocator diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mce/inject.c similarity index 97% rename from arch/x86/kernel/cpu/mcheck/mce-inject.c rename to arch/x86/kernel/cpu/mce/inject.c index c805a06e14c388bc9e46cd66bf11a00b1c6f742d..f21f3f26c555fff460600e8ad4fc67fff2745619 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c +++ b/arch/x86/kernel/cpu/mce/inject.c @@ -38,7 +38,7 @@ #include #include -#include "mce-internal.h" +#include "internal.h" /* * Collect all the MCi_XXX settings @@ -46,8 +46,6 @@ static struct mce i_mce; static struct dentry *dfs_inj; -static u8 n_banks; - #define MAX_FLAG_OPT_SIZE 4 #define NBCFG 0x44 @@ -108,6 +106,9 @@ static void setup_inj_struct(struct mce *m) memset(m, 0, sizeof(struct mce)); m->cpuvendor = boot_cpu_data.x86_vendor; + m->time = ktime_get_real_seconds(); + m->cpuid = cpuid_eax(1); + m->microcode = boot_cpu_data.microcode; } /* Update fake mce registers on current CPU. */ @@ -517,7 +518,7 @@ static void do_inject(void) */ if (inj_type == DFR_INT_INJ) { i_mce.status |= MCI_STATUS_DEFERRED; - i_mce.status |= (i_mce.status & ~MCI_STATUS_UC); + i_mce.status &= ~MCI_STATUS_UC; } /* @@ -567,15 +568,24 @@ static void do_inject(void) static int inj_bank_set(void *data, u64 val) { struct mce *m = (struct mce *)data; + u8 n_banks; + u64 cap; + + /* Get bank count on target CPU so we can handle non-uniform values. */ + rdmsrl_on_cpu(m->extcpu, MSR_IA32_MCG_CAP, &cap); + n_banks = cap & MCG_BANKCNT_MASK; if (val >= n_banks) { - pr_err("Non-existent MCE bank: %llu\n", val); + pr_err("MCA bank %llu non-existent on CPU%d\n", val, m->extcpu); return -EINVAL; } m->bank = val; do_inject(); + /* Reset injection struct */ + setup_inj_struct(&i_mce); + return 0; } @@ -659,10 +669,6 @@ static struct dfs_node { static int __init debugfs_init(void) { unsigned int i; - u64 cap; - - rdmsrl(MSR_IA32_MCG_CAP, cap); - n_banks = cap & MCG_BANKCNT_MASK; dfs_inj = debugfs_create_dir("mce-inject", NULL); if (!dfs_inj) diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mce/intel.c similarity index 97% rename from arch/x86/kernel/cpu/mcheck/mce_intel.c rename to arch/x86/kernel/cpu/mce/intel.c index d05be307d081a67c12eef433b5c832fcfcffa1a7..03b090331134778c54ace806bbe7159460a10693 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/arch/x86/kernel/cpu/mce/intel.c @@ -18,7 +18,7 @@ #include #include -#include "mce-internal.h" +#include "internal.h" /* * Support for Intel Correct Machine Check Interrupts. This allows @@ -85,8 +85,11 @@ static int cmci_supported(int *banks) * initialization is vendor keyed and this * makes sure none of the backdoors are entered otherwise. */ - if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL && + boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN && + boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR) return 0; + if (!boot_cpu_has(X86_FEATURE_APIC) || lapic_get_maxlvt() < 6) return 0; rdmsrl(MSR_IA32_MCG_CAP, cap); @@ -423,7 +426,7 @@ void cmci_disable_bank(int bank) raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); } -static void intel_init_cmci(void) +void intel_init_cmci(void) { int banks; @@ -442,7 +445,7 @@ static void intel_init_cmci(void) cmci_recheck(); } -static void intel_init_lmce(void) +void intel_init_lmce(void) { u64 val; @@ -455,7 +458,7 @@ static void intel_init_lmce(void) wrmsrl(MSR_IA32_MCG_EXT_CTL, val | MCG_EXT_CTL_LMCE_EN); } -static void intel_clear_lmce(void) +void intel_clear_lmce(void) { u64 val; diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mce/internal.h similarity index 95% rename from arch/x86/kernel/cpu/mcheck/mce-internal.h rename to arch/x86/kernel/cpu/mce/internal.h index ceb67cd5918ff4b0b5dbce93fa1f3a36670084bc..22e8aa8c8fe72501698022ad0b422f48a5a1cf84 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-internal.h +++ b/arch/x86/kernel/cpu/mce/internal.h @@ -52,11 +52,17 @@ unsigned long cmci_intel_adjust_timer(unsigned long interval); bool mce_intel_cmci_poll(void); void mce_intel_hcpu_update(unsigned long cpu); void cmci_disable_bank(int bank); +void intel_init_cmci(void); +void intel_init_lmce(void); +void intel_clear_lmce(void); #else # define cmci_intel_adjust_timer mce_adjust_timer_default static inline bool mce_intel_cmci_poll(void) { return false; } static inline void mce_intel_hcpu_update(unsigned long cpu) { } static inline void cmci_disable_bank(int bank) { } +static inline void intel_init_cmci(void) { } +static inline void intel_init_lmce(void) { } +static inline void intel_clear_lmce(void) { } #endif void mce_timer_kick(unsigned long interval); diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mce/p5.c similarity index 100% rename from arch/x86/kernel/cpu/mcheck/p5.c rename to arch/x86/kernel/cpu/mce/p5.c diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mce/severity.c similarity index 97% rename from arch/x86/kernel/cpu/mcheck/mce-severity.c rename to arch/x86/kernel/cpu/mce/severity.c index f34d89c01edc5c761e0df331da1331f8a0f98f3a..65201e180fe0ee019b7571fc3921a0745a6bdbe1 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-severity.c +++ b/arch/x86/kernel/cpu/mce/severity.c @@ -16,7 +16,7 @@ #include #include -#include "mce-internal.h" +#include "internal.h" /* * Grade an mce by severity. In general the most severe ones are processed @@ -165,6 +165,11 @@ static struct severity { SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA), KERNEL ), + MCESEV( + PANIC, "Instruction fetch error in kernel", + SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR), + KERNEL + ), #endif MCESEV( PANIC, "Action required: unknown MCACOD", @@ -336,7 +341,8 @@ int (*mce_severity)(struct mce *m, int tolerant, char **msg, bool is_excp) = void __init mcheck_vendor_init_severity(void) { - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) mce_severity = mce_severity_amd; } diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mce/therm_throt.c similarity index 99% rename from arch/x86/kernel/cpu/mcheck/therm_throt.c rename to arch/x86/kernel/cpu/mce/therm_throt.c index 2da67b70ba989821db25e3190a5c68964f9e1c26..ec6a07b04fdbbee7e9807aeeca7aaf95e07c3777 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mce/therm_throt.c @@ -25,6 +25,7 @@ #include #include +#include #include #include #include @@ -184,7 +185,7 @@ static void therm_throt_process(bool new_event, int event, int level) /* if we just entered the thermal event */ if (new_event) { if (event == THERMAL_THROTTLING_EVENT) - pr_crit("CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n", + pr_warn("CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n", this_cpu, level == CORE_LEVEL ? "Core" : "Package", state->count); @@ -390,7 +391,7 @@ static void unexpected_thermal_interrupt(void) static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt; -asmlinkage __visible void __irq_entry smp_thermal_interrupt(struct pt_regs *r) +asmlinkage __visible void __irq_entry smp_thermal_interrupt(struct pt_regs *regs) { entering_irq(); trace_thermal_apic_entry(THERMAL_APIC_VECTOR); diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mce/threshold.c similarity index 86% rename from arch/x86/kernel/cpu/mcheck/threshold.c rename to arch/x86/kernel/cpu/mce/threshold.c index 2b584b319eff37532df8c8729fc520725bee5c29..c21e0a1efd0fb7d9a09fc93a2396a5b2f92bd3ff 100644 --- a/arch/x86/kernel/cpu/mcheck/threshold.c +++ b/arch/x86/kernel/cpu/mce/threshold.c @@ -6,6 +6,7 @@ #include #include +#include #include #include #include @@ -18,7 +19,7 @@ static void default_threshold_interrupt(void) void (*mce_threshold_vector)(void) = default_threshold_interrupt; -asmlinkage __visible void __irq_entry smp_threshold_interrupt(void) +asmlinkage __visible void __irq_entry smp_threshold_interrupt(struct pt_regs *regs) { entering_irq(); trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR); diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mce/winchip.c similarity index 100% rename from arch/x86/kernel/cpu/mcheck/winchip.c rename to arch/x86/kernel/cpu/mce/winchip.c diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 07b5fc00b188047cdd2830f164626e19de9da8f9..0d548a4264aa7f65b794e9b2af8c81fd988b8cca 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -239,11 +239,14 @@ apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size, bool save_p static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) { #ifdef CONFIG_X86_64 - char fw_name[36] = "amd-ucode/microcode_amd.bin"; + char fw_name[40] = "amd-ucode/microcode_amd.bin"; - if (family >= 0x15) + if (x86_cpuid_vendor() == X86_VENDOR_AMD && family >= 0x15) snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", family); + else if (x86_cpuid_vendor() == X86_VENDOR_HYGON) + snprintf(fw_name, sizeof(fw_name), + "hygon-ucode/microcode_hygon_fam%.2xh.bin", family); return get_builtin_firmware(cp, fw_name); #else @@ -260,11 +263,18 @@ static void __load_ucode_amd(unsigned int cpuid_1_eax, struct cpio_data *ret) if (IS_ENABLED(CONFIG_X86_32)) { uci = (struct ucode_cpu_info *)__pa_nodebug(ucode_cpu_info); - path = (const char *)__pa_nodebug(ucode_path); + if (x86_cpuid_vendor() == X86_VENDOR_HYGON) + path = (const char *)__pa_nodebug( + "kernel/x86/microcode/HygonGenuine.bin"); + else + path = (const char *)__pa_nodebug(ucode_path); use_pa = true; } else { uci = ucode_cpu_info; - path = ucode_path; + if (x86_cpuid_vendor() == X86_VENDOR_HYGON) + path = "kernel/x86/microcode/HygonGenuine.bin"; + else + path = ucode_path; use_pa = false; } @@ -327,8 +337,14 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax) struct cont_desc desc = { 0 }; enum ucode_state ret; struct cpio_data cp; + const char *path; + + if (x86_cpuid_vendor() == X86_VENDOR_HYGON) + path = "kernel/x86/microcode/HygonGenuine.bin"; + else + path = ucode_path; - cp = find_microcode_in_initrd(ucode_path, false); + cp = find_microcode_in_initrd(path, false); if (!(cp.data && cp.size)) return -EINVAL; @@ -466,27 +482,22 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size, { u32 max_size; + if (family >= 0x15) + return min_t(u32, patch_size, size); + #define F1XH_MPB_MAX_SIZE 2048 #define F14H_MPB_MAX_SIZE 1824 -#define F15H_MPB_MAX_SIZE 4096 -#define F16H_MPB_MAX_SIZE 3458 -#define F17H_MPB_MAX_SIZE 3200 switch (family) { + case 0x10 ... 0x12: + max_size = F1XH_MPB_MAX_SIZE; + break; case 0x14: max_size = F14H_MPB_MAX_SIZE; break; - case 0x15: - max_size = F15H_MPB_MAX_SIZE; - break; - case 0x16: - max_size = F16H_MPB_MAX_SIZE; - break; - case 0x17: - max_size = F17H_MPB_MAX_SIZE; - break; default: - max_size = F1XH_MPB_MAX_SIZE; + WARN(1, "%s: WTF family: 0x%x\n", __func__, family); + return 0; break; } @@ -707,7 +718,7 @@ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size) if (!p) { return ret; } else { - if (boot_cpu_data.microcode == p->patch_id) + if (boot_cpu_data.microcode >= p->patch_id) return ret; ret = UCODE_NEW; @@ -742,7 +753,7 @@ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size) static enum ucode_state request_microcode_amd(int cpu, struct device *device, bool refresh_fw) { - char fw_name[36] = "amd-ucode/microcode_amd.bin"; + char fw_name[40] = "amd-ucode/microcode_amd.bin"; struct cpuinfo_x86 *c = &cpu_data(cpu); bool bsp = c->cpu_index == boot_cpu_data.cpu_index; enum ucode_state ret = UCODE_NFOUND; @@ -752,8 +763,12 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device, if (!refresh_fw || !bsp) return UCODE_OK; - if (c->x86 >= 0x15) - snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86); + if (x86_cpuid_vendor() == X86_VENDOR_AMD && c->x86 >= 0x15) + snprintf(fw_name, sizeof(fw_name), + "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86); + else if (x86_cpuid_vendor() == X86_VENDOR_HYGON) + snprintf(fw_name, sizeof(fw_name), + "hygon-ucode/microcode_hygon_fam%.2xh.bin", c->x86); if (request_firmware_direct(&fw, (const char *)fw_name, device)) { pr_debug("failed to load file %s\n", fw_name); @@ -812,6 +827,22 @@ struct microcode_ops * __init init_amd_microcode(void) return µcode_amd_ops; } +#ifdef CONFIG_MICROCODE_HYGON +const struct microcode_ops * __init init_hygon_microcode(void) +{ + struct cpuinfo_x86 *c = &boot_cpu_data; + + if (c->x86_vendor != X86_VENDOR_HYGON) + return NULL; + + if (ucode_new_rev) + pr_info_once("microcode updated early to new patch_level=0x%08x\n", + ucode_new_rev); + + return µcode_amd_ops; +} +#endif + void __exit exit_amd_microcode(void) { cleanup(); diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index b9bc8a1a584e39590e7beecdafb47773015794f7..2aac491ae6b7e868db19235df632c648c6c9b136 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -46,7 +46,11 @@ #define DRIVER_VERSION "2.2" +#ifdef CONFIG_MICROCODE_HYGON +static const struct microcode_ops *microcode_ops; +#else static struct microcode_ops *microcode_ops; +#endif static bool dis_ucode_ldr = true; bool initrd_gone; @@ -138,7 +142,8 @@ static bool __init check_loader_disabled_bsp(void) if (native_cpuid_ecx(1) & BIT(31)) return *res; - if (x86_cpuid_vendor() == X86_VENDOR_AMD) { + if (x86_cpuid_vendor() == X86_VENDOR_AMD || + x86_cpuid_vendor() == X86_VENDOR_HYGON) { if (amd_check_current_patch_level()) return *res; } @@ -190,6 +195,10 @@ void __init load_ucode_bsp(void) intel = false; break; + case X86_VENDOR_HYGON: + intel = false; + break; + default: return; } @@ -230,6 +239,9 @@ void load_ucode_ap(void) if (x86_family(cpuid_1_eax) >= 0x10) load_ucode_amd_ap(cpuid_1_eax); break; + case X86_VENDOR_HYGON: + load_ucode_amd_ap(cpuid_1_eax); + break; default: break; } @@ -249,6 +261,9 @@ static int __init save_microcode_in_initrd(void) if (c->x86 >= 0x10) ret = save_microcode_in_initrd_amd(cpuid_eax(1)); break; + case X86_VENDOR_HYGON: + ret = save_microcode_in_initrd_amd(cpuid_eax(1)); + break; default: break; } @@ -342,6 +357,9 @@ void reload_early_microcode(void) if (family >= 0x10) reload_ucode_amd(); break; + case X86_VENDOR_HYGON: + reload_ucode_amd(); + break; default: break; } @@ -418,8 +436,9 @@ static int do_microcode_update(const void __user *buf, size_t size) if (ustate == UCODE_ERROR) { error = -1; break; - } else if (ustate == UCODE_OK) + } else if (ustate == UCODE_NEW) { apply_microcode_on_target(cpu); + } } return error; @@ -626,16 +645,16 @@ static ssize_t reload_store(struct device *dev, if (val != 1) return size; - tmp_ret = microcode_ops->request_microcode_fw(bsp, µcode_pdev->dev, true); - if (tmp_ret != UCODE_NEW) - return size; - get_online_cpus(); ret = check_online_cpus(); if (ret) goto put; + tmp_ret = microcode_ops->request_microcode_fw(bsp, µcode_pdev->dev, true); + if (tmp_ret != UCODE_NEW) + goto put; + mutex_lock(µcode_mutex); ret = microcode_reload_late(); mutex_unlock(µcode_mutex); @@ -772,9 +791,9 @@ static struct subsys_interface mc_cpu_interface = { }; /** - * mc_bp_resume - Update boot CPU microcode during resume. + * microcode_bsp_resume - Update boot CPU microcode during resume. */ -static void mc_bp_resume(void) +void microcode_bsp_resume(void) { int cpu = smp_processor_id(); struct ucode_cpu_info *uci = ucode_cpu_info + cpu; @@ -786,16 +805,19 @@ static void mc_bp_resume(void) } static struct syscore_ops mc_syscore_ops = { - .resume = mc_bp_resume, + .resume = microcode_bsp_resume, }; -static int mc_cpu_online(unsigned int cpu) +static int mc_cpu_starting(unsigned int cpu) { - struct device *dev; - - dev = get_cpu_device(cpu); microcode_update_cpu(cpu); pr_debug("CPU%d added\n", cpu); + return 0; +} + +static int mc_cpu_online(unsigned int cpu) +{ + struct device *dev = get_cpu_device(cpu); if (sysfs_create_group(&dev->kobj, &mc_attr_group)) pr_err("Failed to create group for CPU%d\n", cpu); @@ -836,6 +858,8 @@ int __init microcode_init(void) microcode_ops = init_intel_microcode(); else if (c->x86_vendor == X86_VENDOR_AMD) microcode_ops = init_amd_microcode(); + else if (c->x86_vendor == X86_VENDOR_HYGON) + microcode_ops = init_hygon_microcode(); else pr_err("no support for this CPU vendor\n"); @@ -872,6 +896,8 @@ int __init microcode_init(void) goto out_ucode_group; register_syscore_ops(&mc_syscore_ops); + cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:starting", + mc_cpu_starting, NULL); cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online", mc_cpu_online, mc_cpu_down_prep); diff --git a/arch/x86/kernel/cpu/mkcapflags.sh b/arch/x86/kernel/cpu/mkcapflags.sh index d0dfb892c72fe7e3648fba753e0d7351d19006f9..aed45b8895d5b5f5c293ae4f6a47499e580f5825 100644 --- a/arch/x86/kernel/cpu/mkcapflags.sh +++ b/arch/x86/kernel/cpu/mkcapflags.sh @@ -4,6 +4,8 @@ # Generate the x86_cap/bug_flags[] arrays from include/asm/cpufeatures.h # +set -e + IN=$1 OUT=$2 diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index ad12733f605854b4eae755102d5b68060fdf4bb8..b28874ef70141b1e9458cbb501fff7dcbae59c73 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -242,6 +243,7 @@ static void __init ms_hyperv_init_platform(void) ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) { x86_platform.calibrate_tsc = hv_get_tsc_khz; x86_platform.calibrate_cpu = hv_get_tsc_khz; + setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ); } if (ms_hyperv.hints & HV_X64_ENLIGHTENED_VMCS_RECOMMENDED) { @@ -259,9 +261,9 @@ static void __init ms_hyperv_init_platform(void) rdmsrl(HV_X64_MSR_APIC_FREQUENCY, hv_lapic_frequency); hv_lapic_frequency = div_u64(hv_lapic_frequency, HZ); - lapic_timer_frequency = hv_lapic_frequency; + lapic_timer_period = hv_lapic_frequency; pr_info("Hyper-V: LAPIC Timer Frequency: %#x\n", - lapic_timer_frequency); + lapic_timer_period); } register_nmi_handler(NMI_UNKNOWN, hv_nmi_unknown, NMI_FLAG_FIRST, @@ -285,6 +287,16 @@ static void __init ms_hyperv_init_platform(void) if (efi_enabled(EFI_BOOT)) x86_platform.get_nmi_reason = hv_get_nmi_reason; + /* + * Hyper-V VMs have a PIT emulation quirk such that zeroing the + * counter register during PIT shutdown restarts the PIT. So it + * continues to interrupt @18.2 HZ. Setting i8253_clear_counter + * to false tells pit_shutdown() not to zero the counter so that + * the PIT really is shutdown. Generation 2 VMs don't have a PIT, + * and setting this value has no effect. + */ + i8253_clear_counter_on_shutdown = false; + #if IS_ENABLED(CONFIG_HYPERV) /* * Setup the hook to get control post apic initialization. diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c index 765afd5990398c9ef687d1cfe8b3b97b6fc46335..3668c5df90c6997737ddaf5de028e51d943080aa 100644 --- a/arch/x86/kernel/cpu/mtrr/cleanup.c +++ b/arch/x86/kernel/cpu/mtrr/cleanup.c @@ -831,7 +831,8 @@ int __init amd_special_default_mtrr(void) { u32 l, h; - if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && + boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) return 0; if (boot_cpu_data.x86 < 0xf) return 0; diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c index 40eee6cc412484470daba013f2a197439163707a..254683b503a9f957d2910708556df89840df4493 100644 --- a/arch/x86/kernel/cpu/mtrr/if.c +++ b/arch/x86/kernel/cpu/mtrr/if.c @@ -165,6 +165,8 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg) struct mtrr_gentry gentry; void __user *arg = (void __user *) __arg; + memset(&gentry, 0, sizeof(gentry)); + switch (cmd) { case MTRRIOC_ADD_ENTRY: case MTRRIOC_SET_ENTRY: diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c index 9a19c800fe40095f1c706e4a50d5378339d429b8..93ed6d3bf30021e9f2e88a948d93fb9289f760d7 100644 --- a/arch/x86/kernel/cpu/mtrr/mtrr.c +++ b/arch/x86/kernel/cpu/mtrr/mtrr.c @@ -127,7 +127,7 @@ static void __init set_num_var_ranges(void) if (use_intel()) rdmsr(MSR_MTRRcap, config, dummy); - else if (is_cpu(AMD)) + else if (is_cpu(AMD) || is_cpu(HYGON)) config = 2; else if (is_cpu(CYRIX) || is_cpu(CENTAUR)) config = 8; @@ -819,7 +819,7 @@ void mtrr_save_state(void) { int first_cpu; - if (!mtrr_enabled()) + if (!mtrr_enabled() || !mtrr_state.have_fixed) return; first_cpu = cpumask_first(cpu_online_mask); diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index d389083330c50f8efb57a436a6af6773365ae00e..a548d9104604f19d787e13679d11d62f0c13e16f 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c @@ -46,6 +46,7 @@ static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr) { /* returns the bit offset of the performance counter register */ switch (boot_cpu_data.x86_vendor) { + case X86_VENDOR_HYGON: case X86_VENDOR_AMD: if (msr >= MSR_F15H_PERF_CTR) return (msr - MSR_F15H_PERF_CTR) >> 1; @@ -62,6 +63,10 @@ static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr) case 15: return msr - MSR_P4_BPU_PERFCTR0; } + break; + case X86_VENDOR_ZHAOXIN: + case X86_VENDOR_CENTAUR: + return msr - MSR_ARCH_PERFMON_PERFCTR0; } return 0; } @@ -74,6 +79,7 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) { /* returns the bit offset of the event selection register */ switch (boot_cpu_data.x86_vendor) { + case X86_VENDOR_HYGON: case X86_VENDOR_AMD: if (msr >= MSR_F15H_PERF_CTL) return (msr - MSR_F15H_PERF_CTL) >> 1; @@ -90,6 +96,10 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) case 15: return msr - MSR_P4_BSU_ESCR0; } + break; + case X86_VENDOR_ZHAOXIN: + case X86_VENDOR_CENTAUR: + return msr - MSR_ARCH_PERFMON_EVENTSEL0; } return 0; diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index 772c219b688989926eac0e082d9dbdba1b4109f1..90bd155d7e7a29e51675c6d531e6afbb2ec3b1d1 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c @@ -21,6 +21,11 @@ struct cpuid_bit { static const struct cpuid_bit cpuid_bits[] = { { X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 }, { X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 }, + { X86_FEATURE_RRSBA_CTRL, CPUID_EDX, 2, 0x00000007, 2 }, + { X86_FEATURE_CQM_LLC, CPUID_EDX, 1, 0x0000000f, 0 }, + { X86_FEATURE_CQM_OCCUP_LLC, CPUID_EDX, 0, 0x0000000f, 1 }, + { X86_FEATURE_CQM_MBM_TOTAL, CPUID_EDX, 1, 0x0000000f, 1 }, + { X86_FEATURE_CQM_MBM_LOCAL, CPUID_EDX, 2, 0x0000000f, 1 }, { X86_FEATURE_CAT_L3, CPUID_EBX, 1, 0x00000010, 0 }, { X86_FEATURE_CAT_L2, CPUID_EBX, 2, 0x00000010, 0 }, { X86_FEATURE_CDP_L3, CPUID_ECX, 2, 0x00000010, 1 }, diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c index 71ca064e379488f072468f0d3bc57a37beec7855..5c15e1329fcbd237387f32fcc161934d86d623ac 100644 --- a/arch/x86/kernel/cpu/topology.c +++ b/arch/x86/kernel/cpu/topology.c @@ -13,44 +13,77 @@ /* leaf 0xb SMT level */ #define SMT_LEVEL 0 -/* leaf 0xb sub-leaf types */ +/* extended topology sub-leaf types */ #define INVALID_TYPE 0 #define SMT_TYPE 1 #define CORE_TYPE 2 +#define DIE_TYPE 5 #define LEAFB_SUBTYPE(ecx) (((ecx) >> 8) & 0xff) #define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f) #define LEVEL_MAX_SIBLINGS(ebx) ((ebx) & 0xffff) -int detect_extended_topology_early(struct cpuinfo_x86 *c) -{ +unsigned int __max_die_per_package __read_mostly = 1; +EXPORT_SYMBOL(__max_die_per_package); + #ifdef CONFIG_SMP +/* + * Check if given CPUID extended toplogy "leaf" is implemented + */ +static int check_extended_topology_leaf(int leaf) +{ unsigned int eax, ebx, ecx, edx; - if (c->cpuid_level < 0xb) + cpuid_count(leaf, SMT_LEVEL, &eax, &ebx, &ecx, &edx); + + if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE)) return -1; - cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx); + return 0; +} +/* + * Return best CPUID Extended Toplogy Leaf supported + */ +static int detect_extended_topology_leaf(struct cpuinfo_x86 *c) +{ + if (c->cpuid_level >= 0x1f) { + if (check_extended_topology_leaf(0x1f) == 0) + return 0x1f; + } - /* - * check if the cpuid leaf 0xb is actually implemented. - */ - if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE)) + if (c->cpuid_level >= 0xb) { + if (check_extended_topology_leaf(0xb) == 0) + return 0xb; + } + + return -1; +} +#endif + +int detect_extended_topology_early(struct cpuinfo_x86 *c) +{ +#ifdef CONFIG_SMP + unsigned int eax, ebx, ecx, edx; + int leaf; + + leaf = detect_extended_topology_leaf(c); + if (leaf < 0) return -1; set_cpu_cap(c, X86_FEATURE_XTOPOLOGY); + cpuid_count(leaf, SMT_LEVEL, &eax, &ebx, &ecx, &edx); /* * initial apic id, which also represents 32-bit extended x2apic id. */ c->initial_apicid = edx; - smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx); + smp_num_siblings = max_t(int, smp_num_siblings, LEVEL_MAX_SIBLINGS(ebx)); #endif return 0; } /* - * Check for extended topology enumeration cpuid leaf 0xb and if it + * Check for extended topology enumeration cpuid leaf, and if it * exists, use it for populating initial_apicid and cpu topology * detection. */ @@ -58,22 +91,30 @@ int detect_extended_topology(struct cpuinfo_x86 *c) { #ifdef CONFIG_SMP unsigned int eax, ebx, ecx, edx, sub_index; - unsigned int ht_mask_width, core_plus_mask_width; + unsigned int ht_mask_width, core_plus_mask_width, die_plus_mask_width; unsigned int core_select_mask, core_level_siblings; + unsigned int die_select_mask, die_level_siblings; + bool die_level_present = false; + int leaf; - if (detect_extended_topology_early(c) < 0) + leaf = detect_extended_topology_leaf(c); + if (leaf < 0) return -1; /* * Populate HT related information from sub-leaf level 0. */ - cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx); - core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx); + cpuid_count(leaf, SMT_LEVEL, &eax, &ebx, &ecx, &edx); + c->initial_apicid = edx; + core_level_siblings = LEVEL_MAX_SIBLINGS(ebx); + smp_num_siblings = max_t(int, smp_num_siblings, LEVEL_MAX_SIBLINGS(ebx)); core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); + die_level_siblings = LEVEL_MAX_SIBLINGS(ebx); + die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); sub_index = 1; do { - cpuid_count(0xb, sub_index, &eax, &ebx, &ecx, &edx); + cpuid_count(leaf, sub_index, &eax, &ebx, &ecx, &edx); /* * Check for the Core type in the implemented sub leaves. @@ -81,23 +122,39 @@ int detect_extended_topology(struct cpuinfo_x86 *c) if (LEAFB_SUBTYPE(ecx) == CORE_TYPE) { core_level_siblings = LEVEL_MAX_SIBLINGS(ebx); core_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); - break; + die_level_siblings = core_level_siblings; + die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); + } + if (LEAFB_SUBTYPE(ecx) == DIE_TYPE) { + die_level_present = true; + die_level_siblings = LEVEL_MAX_SIBLINGS(ebx); + die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); } sub_index++; } while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE); core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width; + die_select_mask = (~(-1 << die_plus_mask_width)) >> + core_plus_mask_width; + + c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, + ht_mask_width) & core_select_mask; + + if (die_level_present) { + c->cpu_die_id = apic->phys_pkg_id(c->initial_apicid, + core_plus_mask_width) & die_select_mask; + } - c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, ht_mask_width) - & core_select_mask; - c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, core_plus_mask_width); + c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, + die_plus_mask_width); /* * Reinit the apicid, now that we have extended initial_apicid. */ c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); c->x86_max_cores = (core_level_siblings / smp_num_siblings); + __max_die_per_package = (die_level_siblings / core_level_siblings); #endif return 0; } diff --git a/arch/x86/kernel/cpu/tsx.c b/arch/x86/kernel/cpu/tsx.c new file mode 100644 index 0000000000000000000000000000000000000000..3e20d322bc98b636cde2747394d26fcec8783a0c --- /dev/null +++ b/arch/x86/kernel/cpu/tsx.c @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel Transactional Synchronization Extensions (TSX) control. + * + * Copyright (C) 2019 Intel Corporation + * + * Author: + * Pawan Gupta + */ + +#include + +#include + +#include "cpu.h" + +enum tsx_ctrl_states tsx_ctrl_state __ro_after_init = TSX_CTRL_NOT_SUPPORTED; + +void tsx_disable(void) +{ + u64 tsx; + + rdmsrl(MSR_IA32_TSX_CTRL, tsx); + + /* Force all transactions to immediately abort */ + tsx |= TSX_CTRL_RTM_DISABLE; + + /* + * Ensure TSX support is not enumerated in CPUID. + * This is visible to userspace and will ensure they + * do not waste resources trying TSX transactions that + * will always abort. + */ + tsx |= TSX_CTRL_CPUID_CLEAR; + + wrmsrl(MSR_IA32_TSX_CTRL, tsx); +} + +void tsx_enable(void) +{ + u64 tsx; + + rdmsrl(MSR_IA32_TSX_CTRL, tsx); + + /* Enable the RTM feature in the cpu */ + tsx &= ~TSX_CTRL_RTM_DISABLE; + + /* + * Ensure TSX support is enumerated in CPUID. + * This is visible to userspace and will ensure they + * can enumerate and use the TSX feature. + */ + tsx &= ~TSX_CTRL_CPUID_CLEAR; + + wrmsrl(MSR_IA32_TSX_CTRL, tsx); +} + +static bool __init tsx_ctrl_is_supported(void) +{ + u64 ia32_cap = x86_read_arch_cap_msr(); + + /* + * TSX is controlled via MSR_IA32_TSX_CTRL. However, support for this + * MSR is enumerated by ARCH_CAP_TSX_MSR bit in MSR_IA32_ARCH_CAPABILITIES. + * + * TSX control (aka MSR_IA32_TSX_CTRL) is only available after a + * microcode update on CPUs that have their MSR_IA32_ARCH_CAPABILITIES + * bit MDS_NO=1. CPUs with MDS_NO=0 are not planned to get + * MSR_IA32_TSX_CTRL support even after a microcode update. Thus, + * tsx= cmdline requests will do nothing on CPUs without + * MSR_IA32_TSX_CTRL support. + */ + return !!(ia32_cap & ARCH_CAP_TSX_CTRL_MSR); +} + +static enum tsx_ctrl_states x86_get_tsx_auto_mode(void) +{ + if (boot_cpu_has_bug(X86_BUG_TAA)) + return TSX_CTRL_DISABLE; + + return TSX_CTRL_ENABLE; +} + +void __init tsx_init(void) +{ + char arg[5] = {}; + int ret; + + if (!tsx_ctrl_is_supported()) + return; + + ret = cmdline_find_option(boot_command_line, "tsx", arg, sizeof(arg)); + if (ret >= 0) { + if (!strcmp(arg, "on")) { + tsx_ctrl_state = TSX_CTRL_ENABLE; + } else if (!strcmp(arg, "off")) { + tsx_ctrl_state = TSX_CTRL_DISABLE; + } else if (!strcmp(arg, "auto")) { + tsx_ctrl_state = x86_get_tsx_auto_mode(); + } else { + tsx_ctrl_state = TSX_CTRL_DISABLE; + pr_err("tsx: invalid option, defaulting to off\n"); + } + } else { + /* tsx= not provided */ + if (IS_ENABLED(CONFIG_X86_INTEL_TSX_MODE_AUTO)) + tsx_ctrl_state = x86_get_tsx_auto_mode(); + else if (IS_ENABLED(CONFIG_X86_INTEL_TSX_MODE_OFF)) + tsx_ctrl_state = TSX_CTRL_DISABLE; + else + tsx_ctrl_state = TSX_CTRL_ENABLE; + } + + if (tsx_ctrl_state == TSX_CTRL_DISABLE) { + tsx_disable(); + + /* + * tsx_disable() will change the state of the + * RTM CPUID bit. Clear it here since it is now + * expected to be not set. + */ + setup_clear_cpu_cap(X86_FEATURE_RTM); + } else if (tsx_ctrl_state == TSX_CTRL_ENABLE) { + + /* + * HW defaults TSX to be enabled at bootup. + * We may still need the TSX enable support + * during init for special cases like + * kexec after TSX is disabled. + */ + tsx_enable(); + + /* + * tsx_enable() will change the state of the + * RTM CPUID bit. Force it here since it is now + * expected to be set. + */ + setup_force_cpu_cap(X86_FEATURE_RTM); + } +} diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index 8e005329648b6b49d9afe671062368fe25db38e9..9780568dd3afca71ef330f76a15a5419ff269dce 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c @@ -77,7 +77,7 @@ static __init int setup_vmw_sched_clock(char *s) } early_param("no-vmw-sched-clock", setup_vmw_sched_clock); -static unsigned long long vmware_sched_clock(void) +static unsigned long long notrace vmware_sched_clock(void) { unsigned long long ns; @@ -157,7 +157,7 @@ static void __init vmware_platform_setup(void) #ifdef CONFIG_X86_LOCAL_APIC /* Skip lapic calibration since we know the bus frequency. */ - lapic_timer_frequency = ecx / HZ; + lapic_timer_period = ecx / HZ; pr_info("Host bus clock speed read from hypervisor : %u Hz\n", ecx); #endif diff --git a/arch/x86/kernel/cpu/zhaoxin.c b/arch/x86/kernel/cpu/zhaoxin.c new file mode 100644 index 0000000000000000000000000000000000000000..e4ed34361a1ffeb954ec92ba22ab6aa464dcb366 --- /dev/null +++ b/arch/x86/kernel/cpu/zhaoxin.c @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +#include + +#include "cpu.h" + +#define MSR_ZHAOXIN_FCR57 0x00001257 + +#define ACE_PRESENT (1 << 6) +#define ACE_ENABLED (1 << 7) +#define ACE_FCR (1 << 7) /* MSR_ZHAOXIN_FCR */ + +#define RNG_PRESENT (1 << 2) +#define RNG_ENABLED (1 << 3) +#define RNG_ENABLE (1 << 8) /* MSR_ZHAOXIN_RNG */ + +#define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000 +#define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000 +#define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000 +#define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001 +#define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002 +#define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020 + +static void init_zhaoxin_cap(struct cpuinfo_x86 *c) +{ + u32 lo, hi; + + /* Test for Extended Feature Flags presence */ + if (cpuid_eax(0xC0000000) >= 0xC0000001) { + u32 tmp = cpuid_edx(0xC0000001); + + /* Enable ACE unit, if present and disabled */ + if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) { + rdmsr(MSR_ZHAOXIN_FCR57, lo, hi); + /* Enable ACE unit */ + lo |= ACE_FCR; + wrmsr(MSR_ZHAOXIN_FCR57, lo, hi); + pr_info("CPU: Enabled ACE h/w crypto\n"); + } + + /* Enable RNG unit, if present and disabled */ + if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) { + rdmsr(MSR_ZHAOXIN_FCR57, lo, hi); + /* Enable RNG unit */ + lo |= RNG_ENABLE; + wrmsr(MSR_ZHAOXIN_FCR57, lo, hi); + pr_info("CPU: Enabled h/w RNG\n"); + } + + /* + * Store Extended Feature Flags as word 5 of the CPU + * capability bit array + */ + c->x86_capability[CPUID_C000_0001_EDX] = cpuid_edx(0xC0000001); + } + + if (c->x86 >= 0x6) + set_cpu_cap(c, X86_FEATURE_REP_GOOD); +} + +static void early_init_zhaoxin(struct cpuinfo_x86 *c) +{ + if (c->x86 >= 0x6) + set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); +#ifdef CONFIG_X86_64 + set_cpu_cap(c, X86_FEATURE_SYSENTER32); +#endif + if (c->x86_power & (1 << 8)) { + set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); + set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); + } + + if (c->cpuid_level >= 0x00000001) { + u32 eax, ebx, ecx, edx; + + cpuid(0x00000001, &eax, &ebx, &ecx, &edx); + /* + * If HTT (EDX[28]) is set EBX[16:23] contain the number of + * apicids which are reserved per package. Store the resulting + * shift value for the package management code. + */ + if (edx & (1U << 28)) + c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff); + } + + if (detect_extended_topology_early(c) < 0) + detect_ht_early(c); +} + +static void zhaoxin_detect_vmx_virtcap(struct cpuinfo_x86 *c) +{ + u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2; + + rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high); + msr_ctl = vmx_msr_high | vmx_msr_low; + + if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW) + set_cpu_cap(c, X86_FEATURE_TPR_SHADOW); + if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI) + set_cpu_cap(c, X86_FEATURE_VNMI); + if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) { + rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, + vmx_msr_low, vmx_msr_high); + msr_ctl2 = vmx_msr_high | vmx_msr_low; + if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) && + (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)) + set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); + if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT) + set_cpu_cap(c, X86_FEATURE_EPT); + if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID) + set_cpu_cap(c, X86_FEATURE_VPID); + } +} + +static void init_zhaoxin(struct cpuinfo_x86 *c) +{ + early_init_zhaoxin(c); + detect_extended_topology(c); + init_intel_cacheinfo(c); + if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { + detect_num_cpu_cores(c); +#ifdef CONFIG_X86_32 + detect_ht(c); +#endif + } + + if (c->cpuid_level > 9) { + unsigned int eax = cpuid_eax(10); + + /* + * Check for version and the number of counters + * Version(eax[7:0]) can't be 0; + * Counters(eax[15:8]) should be greater than 1; + */ + if ((eax & 0xff) && (((eax >> 8) & 0xff) > 1)) + set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); + } + + if (c->x86 >= 0x6) + init_zhaoxin_cap(c); +#ifdef CONFIG_X86_64 + set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); +#endif + + if (cpu_has(c, X86_FEATURE_VMX)) + zhaoxin_detect_vmx_virtcap(c); +} + +#ifdef CONFIG_X86_32 +static unsigned int +zhaoxin_size_cache(struct cpuinfo_x86 *c, unsigned int size) +{ + return size; +} +#endif + +static const struct cpu_dev zhaoxin_cpu_dev = { + .c_vendor = "zhaoxin", + .c_ident = { " Shanghai " }, + .c_early_init = early_init_zhaoxin, + .c_init = init_zhaoxin, +#ifdef CONFIG_X86_32 + .legacy_cache_size = zhaoxin_size_cache, +#endif + .c_x86_vendor = X86_VENDOR_ZHAOXIN, +}; + +cpu_dev_register(zhaoxin_cpu_dev); diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index f631a3f155875bfad8b8e709364a1e5c38706e24..91b3483e5085f3317703e77903522554e351d548 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -356,7 +356,7 @@ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params) struct crash_memmap_data cmd; struct crash_mem *cmem; - cmem = vzalloc(sizeof(struct crash_mem)); + cmem = vzalloc(struct_size(cmem, ranges, 1)); if (!cmem) return -ENOMEM; diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c index cd53f3030e4004e89d7d027cee217fb8e0ea08a7..d305440ebe9c23aa231fbe603fec0fb8f158bfb5 100644 --- a/arch/x86/kernel/dumpstack_32.c +++ b/arch/x86/kernel/dumpstack_32.c @@ -41,7 +41,7 @@ static bool in_hardirq_stack(unsigned long *stack, struct stack_info *info) * This is a software stack, so 'end' can be a valid stack pointer. * It just means the stack is empty. */ - if (stack <= begin || stack > end) + if (stack < begin || stack > end) return false; info->type = STACK_TYPE_IRQ; @@ -66,7 +66,7 @@ static bool in_softirq_stack(unsigned long *stack, struct stack_info *info) * This is a software stack, so 'end' can be a valid stack pointer. * It just means the stack is empty. */ - if (stack <= begin || stack > end) + if (stack < begin || stack > end) return false; info->type = STACK_TYPE_SOFTIRQ; diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index 5cdb9e84da57db3d4a494fdd41805219303bd7b6..16a780db77dc4276f3fc05cfb1cf267958071483 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c @@ -65,7 +65,7 @@ static bool in_exception_stack(unsigned long *stack, struct stack_info *info) begin = end - (exception_stack_sizes[k] / sizeof(long)); regs = (struct pt_regs *)end - 1; - if (stack <= begin || stack >= end) + if (stack < begin || stack >= end) continue; info->type = STACK_TYPE_EXCEPTION + k; @@ -88,7 +88,7 @@ static bool in_irq_stack(unsigned long *stack, struct stack_info *info) * This is a software stack, so 'end' can be a valid stack pointer. * It just means the stack is empty. */ - if (stack <= begin || stack > end) + if (stack < begin || stack >= end) return false; info->type = STACK_TYPE_IRQ; @@ -137,7 +137,8 @@ int get_stack_info(unsigned long *stack, struct task_struct *task, */ if (visit_mask) { if (*visit_mask & (1UL << info->type)) { - printk_deferred_once(KERN_WARNING "WARNING: stack recursion on stack type %d\n", info->type); + if (task == current) + printk_deferred_once(KERN_WARNING "WARNING: stack recursion on stack type %d\n", info->type); goto unknown; } *visit_mask |= 1UL << info->type; diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index c88c23c658c1e99faad3daa236448bc4208901d7..d1f25c83144752272401afe8c8aec313d40298ae 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -1248,7 +1248,6 @@ void __init e820__memblock_setup(void) { int i; u64 end; - u64 addr = 0; /* * The bootstrap memblock region count maximum is 128 entries @@ -1265,21 +1264,13 @@ void __init e820__memblock_setup(void) struct e820_entry *entry = &e820_table->entries[i]; end = entry->addr + entry->size; - if (addr < entry->addr) - memblock_reserve(addr, entry->addr - addr); - addr = end; if (end != (resource_size_t)end) continue; - /* - * all !E820_TYPE_RAM ranges (including gap ranges) are put - * into memblock.reserved to make sure that struct pages in - * such regions are not left uninitialized after bootup. - */ if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN) - memblock_reserve(entry->addr, entry->size); - else - memblock_add(entry->addr, entry->size); + continue; + + memblock_add(entry->addr, entry->size); } /* Throw away partial pages: */ diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index 5e801c8c8ce7cfaf191505fc9feaae5414f7270b..374a52fa529694f7399ad59e0b4ec2c1d598c636 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c @@ -213,8 +213,9 @@ static unsigned int mem32_serial_in(unsigned long addr, int offset) * early_pci_serial_init() * * This function is invoked when the early_printk param starts with "pciserial" - * The rest of the param should be ",B:D.F,baud" where B, D & F describe the - * location of a PCI device that must be a UART device. + * The rest of the param should be "[force],B:D.F,baud", where B, D & F describe + * the location of a PCI device that must be a UART device. "force" is optional + * and overrides the use of an UART device with a wrong PCI class code. */ static __init void early_pci_serial_init(char *s) { @@ -224,17 +225,23 @@ static __init void early_pci_serial_init(char *s) u32 classcode, bar0; u16 cmdreg; char *e; + int force = 0; - - /* - * First, part the param to get the BDF values - */ if (*s == ',') ++s; if (*s == 0) return; + /* Force the use of an UART device with wrong class code */ + if (!strncmp(s, "force,", 6)) { + force = 1; + s += 6; + } + + /* + * Part the param to get the BDF values + */ bus = (u8)simple_strtoul(s, &e, 16); s = e; if (*s != ':') @@ -253,7 +260,7 @@ static __init void early_pci_serial_init(char *s) s++; /* - * Second, find the device from the BDF + * Find the device from the BDF */ cmdreg = read_pci_config(bus, slot, func, PCI_COMMAND); classcode = read_pci_config(bus, slot, func, PCI_CLASS_REVISION); @@ -264,8 +271,10 @@ static __init void early_pci_serial_init(char *s) */ if (((classcode >> 16 != PCI_CLASS_COMMUNICATION_MODEM) && (classcode >> 16 != PCI_CLASS_COMMUNICATION_SERIAL)) || - (((classcode >> 8) & 0xff) != 0x02)) /* 16550 I/F at BAR0 */ - return; + (((classcode >> 8) & 0xff) != 0x02)) /* 16550 I/F at BAR0 */ { + if (!force) + return; + } /* * Determine if it is IO or memory mapped @@ -289,7 +298,7 @@ static __init void early_pci_serial_init(char *s) } /* - * Lastly, initialize the hardware + * Initialize the hardware */ if (*s) { if (strcmp(s, "nocfg") == 0) diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 2ea85b32421a02d5f3f8e6643757610576841e3c..2e5003fef51a9392ecca9d413f7f6558095ea9c7 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -93,7 +93,7 @@ bool irq_fpu_usable(void) } EXPORT_SYMBOL(irq_fpu_usable); -void __kernel_fpu_begin(void) +static void __kernel_fpu_begin(void) { struct fpu *fpu = ¤t->thread.fpu; @@ -111,9 +111,8 @@ void __kernel_fpu_begin(void) __cpu_invalidate_fpregs_state(); } } -EXPORT_SYMBOL(__kernel_fpu_begin); -void __kernel_fpu_end(void) +static void __kernel_fpu_end(void) { struct fpu *fpu = ¤t->thread.fpu; @@ -122,7 +121,6 @@ void __kernel_fpu_end(void) kernel_fpu_enable(); } -EXPORT_SYMBOL(__kernel_fpu_end); void kernel_fpu_begin(void) { diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c index bc02f5144b958bd8182f284e2cfa6dd5a65384c8..621d249ded0b967bfc90d3af4da564b8e41f0cb6 100644 --- a/arch/x86/kernel/fpu/regset.c +++ b/arch/x86/kernel/fpu/regset.c @@ -128,7 +128,7 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset, /* * A whole standard-format XSAVE buffer is needed: */ - if ((pos != 0) || (count < fpu_user_xstate_size)) + if (pos != 0 || count != fpu_user_xstate_size) return -EFAULT; xsave = &fpu->state.xsave; diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c index 61a949d84dfa52aff8572bfd88a3e6dc43556222..3d9f4b7b4121c49572747ffe5791e29c3b9b7730 100644 --- a/arch/x86/kernel/fpu/signal.c +++ b/arch/x86/kernel/fpu/signal.c @@ -164,7 +164,7 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size) ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) || IS_ENABLED(CONFIG_IA32_EMULATION)); - if (!access_ok(VERIFY_WRITE, buf, size)) + if (!access_ok(buf, size)) return -EACCES; if (!static_cpu_has(X86_FEATURE_FPU)) @@ -272,6 +272,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) int state_size = fpu_kernel_xstate_size; u64 xfeatures = 0; int fx_only = 0; + int ret = 0; ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) || IS_ENABLED(CONFIG_IA32_EMULATION)); @@ -281,15 +282,21 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) return 0; } - if (!access_ok(VERIFY_READ, buf, size)) - return -EACCES; + if (!access_ok(buf, size)) { + ret = -EACCES; + goto out_err; + } fpu__initialize(fpu); - if (!static_cpu_has(X86_FEATURE_FPU)) - return fpregs_soft_set(current, NULL, - 0, sizeof(struct user_i387_ia32_struct), - NULL, buf) != 0; + if (!static_cpu_has(X86_FEATURE_FPU)) { + ret = fpregs_soft_set(current, NULL, + 0, sizeof(struct user_i387_ia32_struct), + NULL, buf) != 0; + if (ret) + goto out_err; + return 0; + } if (use_xsave()) { struct _fpx_sw_bytes fx_sw_user; @@ -344,11 +351,12 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) sanitize_restored_xstate(tsk, &env, xfeatures, fx_only); } + local_bh_disable(); fpu->initialized = 1; - preempt_disable(); fpu__restore(fpu); - preempt_enable(); + local_bh_enable(); + /* Failure is already handled */ return err; } else { /* @@ -356,13 +364,14 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) * state to the registers directly (with exceptions handled). */ user_fpu_begin(); - if (copy_user_to_fpregs_zeroing(buf_fx, xfeatures, fx_only)) { - fpu__clear(fpu); - return -1; - } + if (!copy_user_to_fpregs_zeroing(buf_fx, xfeatures, fx_only)) + return 0; + ret = -1; } - return 0; +out_err: + fpu__clear(fpu); + return ret; } static inline int xstate_sigframe_size(void) diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 87a57b7642d3673420b272fec0f442b9baf76414..78372d5a35ecb6fb7d5903294b586283d6a48c98 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -404,6 +404,24 @@ static void __init print_xstate_offset_size(void) } } +/* + * All supported features have either init state all zeros or are + * handled in setup_init_fpu() individually. This is an explicit + * feature list and does not use XFEATURE_MASK*SUPPORTED to catch + * newly added supported features at build time and make people + * actually look at the init state for the new feature. + */ +#define XFEATURES_INIT_FPSTATE_HANDLED \ + (XFEATURE_MASK_FP | \ + XFEATURE_MASK_SSE | \ + XFEATURE_MASK_YMM | \ + XFEATURE_MASK_OPMASK | \ + XFEATURE_MASK_ZMM_Hi256 | \ + XFEATURE_MASK_Hi16_ZMM | \ + XFEATURE_MASK_PKRU | \ + XFEATURE_MASK_BNDREGS | \ + XFEATURE_MASK_BNDCSR) + /* * setup the xstate image representing the init state */ @@ -411,6 +429,8 @@ static void __init setup_init_fpu_buf(void) { static int on_boot_cpu __initdata = 1; + BUILD_BUG_ON(XCNTXT_MASK != XFEATURES_INIT_FPSTATE_HANDLED); + WARN_ON_FPU(!on_boot_cpu); on_boot_cpu = 0; @@ -429,10 +449,22 @@ static void __init setup_init_fpu_buf(void) copy_kernel_to_xregs_booting(&init_fpstate.xsave); /* - * Dump the init state again. This is to identify the init state - * of any feature which is not represented by all zero's. + * All components are now in init state. Read the state back so + * that init_fpstate contains all non-zero init state. This only + * works with XSAVE, but not with XSAVEOPT and XSAVES because + * those use the init optimization which skips writing data for + * components in init state. + * + * XSAVE could be used, but that would require to reshuffle the + * data when XSAVES is available because XSAVES uses xstate + * compaction. But doing so is a pointless exercise because most + * components have an all zeros init state except for the legacy + * ones (FP and SSE). Those can be saved with FXSAVE into the + * legacy area. Adding new features requires to ensure that init + * state is all zeroes or if not to add the necessary handling + * here. */ - copy_xregs_to_kernel_booting(&init_fpstate.xsave); + fxsave(&init_fpstate.fxsave); } static int xfeature_uncompacted_offset(int xfeature_nr) diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 01ebcb6f263e39accb3f8e53ab7eb5372e0726f7..5790671857e55822c15286dc1e257315056abcfb 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -22,6 +22,7 @@ #include #include #include +#include #include @@ -29,11 +30,13 @@ #include #include #include +#include #ifdef CONFIG_DYNAMIC_FTRACE int ftrace_arch_code_modify_prepare(void) { + mutex_lock(&text_mutex); set_kernel_text_rw(); set_all_modules_text_rw(); return 0; @@ -43,13 +46,14 @@ int ftrace_arch_code_modify_post_process(void) { set_all_modules_text_ro(); set_kernel_text_ro(); + mutex_unlock(&text_mutex); return 0; } union ftrace_code_union { char code[MCOUNT_INSN_SIZE]; struct { - unsigned char e8; + unsigned char op; int offset; } __attribute__((packed)); }; @@ -59,20 +63,23 @@ static int ftrace_calc_offset(long ip, long addr) return (int)(addr - ip); } -static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) +static unsigned char * +ftrace_text_replace(unsigned char op, unsigned long ip, unsigned long addr) { static union ftrace_code_union calc; - calc.e8 = 0xe8; + calc.op = op; calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr); - /* - * No locking needed, this must be called via kstop_machine - * which in essence is like running on a uniprocessor machine. - */ return calc.code; } +static unsigned char * +ftrace_call_replace(unsigned long ip, unsigned long addr) +{ + return ftrace_text_replace(0xe8, ip, addr); +} + static inline int within(unsigned long addr, unsigned long start, unsigned long end) { @@ -228,6 +235,7 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, } static unsigned long ftrace_update_func; +static unsigned long ftrace_update_func_call; static int update_ftrace_func(unsigned long ip, void *new) { @@ -256,6 +264,8 @@ int ftrace_update_ftrace_func(ftrace_func_t func) unsigned char *new; int ret; + ftrace_update_func_call = (unsigned long)func; + new = ftrace_call_replace(ip, (unsigned long)func); ret = update_ftrace_func(ip, new); @@ -291,13 +301,28 @@ int ftrace_int3_handler(struct pt_regs *regs) if (WARN_ON_ONCE(!regs)) return 0; - ip = regs->ip - 1; - if (!ftrace_location(ip) && !is_ftrace_caller(ip)) - return 0; + ip = regs->ip - INT3_INSN_SIZE; - regs->ip += MCOUNT_INSN_SIZE - 1; +#ifdef CONFIG_X86_64 + if (ftrace_location(ip)) { + int3_emulate_call(regs, (unsigned long)ftrace_regs_caller); + return 1; + } else if (is_ftrace_caller(ip)) { + if (!ftrace_update_func_call) { + int3_emulate_jmp(regs, ip + CALL_INSN_SIZE); + return 1; + } + int3_emulate_call(regs, ftrace_update_func_call); + return 1; + } +#else + if (ftrace_location(ip) || is_ftrace_caller(ip)) { + int3_emulate_jmp(regs, ip + CALL_INSN_SIZE); + return 1; + } +#endif - return 1; + return 0; } static int ftrace_write(unsigned long ip, const char *val, int size) @@ -664,22 +689,6 @@ int __init ftrace_dyn_arch_init(void) return 0; } -#if defined(CONFIG_X86_64) || defined(CONFIG_FUNCTION_GRAPH_TRACER) -static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr) -{ - static union ftrace_code_union calc; - - /* Jmp not a call (ignore the .e8) */ - calc.e8 = 0xe9; - calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr); - - /* - * ftrace external locks synchronize the access to the static variable. - */ - return calc.code; -} -#endif - /* Currently only x86_64 supports dynamic trampolines */ #ifdef CONFIG_X86_64 @@ -733,18 +742,21 @@ union ftrace_op_code_union { } __attribute__((packed)); }; +#define RET_SIZE 1 + static unsigned long create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) { - unsigned const char *jmp; unsigned long start_offset; unsigned long end_offset; unsigned long op_offset; unsigned long offset; + unsigned long npages; unsigned long size; - unsigned long ip; + unsigned long retq; unsigned long *ptr; void *trampoline; + void *ip; /* 48 8b 15 is movq (%rip), %rdx */ unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 }; union ftrace_op_code_union op_ptr; @@ -764,27 +776,28 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) /* * Allocate enough size to store the ftrace_caller code, - * the jmp to ftrace_epilogue, as well as the address of - * the ftrace_ops this trampoline is used for. + * the iret , as well as the address of the ftrace_ops this + * trampoline is used for. */ - trampoline = alloc_tramp(size + MCOUNT_INSN_SIZE + sizeof(void *)); + trampoline = alloc_tramp(size + RET_SIZE + sizeof(void *)); if (!trampoline) return 0; - *tramp_size = size + MCOUNT_INSN_SIZE + sizeof(void *); + *tramp_size = size + RET_SIZE + sizeof(void *); + npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE); /* Copy ftrace_caller onto the trampoline memory */ ret = probe_kernel_read(trampoline, (void *)start_offset, size); - if (WARN_ON(ret < 0)) { - tramp_free(trampoline, *tramp_size); - return 0; - } + if (WARN_ON(ret < 0)) + goto fail; - ip = (unsigned long)trampoline + size; + ip = trampoline + size; - /* The trampoline ends with a jmp to ftrace_epilogue */ - jmp = ftrace_jmp_replace(ip, (unsigned long)ftrace_epilogue); - memcpy(trampoline + size, jmp, MCOUNT_INSN_SIZE); + /* The trampoline ends with ret(q) */ + retq = (unsigned long)ftrace_stub; + ret = probe_kernel_read(ip, (void *)retq, RET_SIZE); + if (WARN_ON(ret < 0)) + goto fail; /* * The address of the ftrace_ops that is used for this trampoline @@ -794,17 +807,15 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) * the global function_trace_op variable. */ - ptr = (unsigned long *)(trampoline + size + MCOUNT_INSN_SIZE); + ptr = (unsigned long *)(trampoline + size + RET_SIZE); *ptr = (unsigned long)ops; op_offset -= start_offset; memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE); /* Are we pointing to the reference? */ - if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) { - tramp_free(trampoline, *tramp_size); - return 0; - } + if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) + goto fail; /* Load the contents of ptr into the callback parameter */ offset = (unsigned long)ptr; @@ -818,7 +829,16 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) /* ALLOC_TRAMP flags lets us know we created it */ ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP; + /* + * Module allocation needs to be completed by making the page + * executable. The page is still writable, which is a security hazard, + * but anyhow ftrace breaks W^X completely. + */ + set_memory_x((unsigned long)trampoline, npages); return (unsigned long)trampoline; +fail: + tramp_free(trampoline, *tramp_size); + return 0; } static unsigned long calc_trampoline_call_offset(bool save_regs) @@ -868,6 +888,8 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops) func = ftrace_ops_get_func(ops); + ftrace_update_func_call = (unsigned long)func; + /* Do a safe modify in case the trampoline is executing */ new = ftrace_call_replace(ip, (unsigned long)func); ret = update_ftrace_func(ip, new); @@ -888,8 +910,8 @@ static void *addr_from_call(void *ptr) return NULL; /* Make sure this is a call */ - if (WARN_ON_ONCE(calc.e8 != 0xe8)) { - pr_warn("Expected e8, got %x\n", calc.e8); + if (WARN_ON_ONCE(calc.op != 0xe8)) { + pr_warn("Expected e8, got %x\n", calc.op); return NULL; } @@ -960,10 +982,16 @@ void arch_ftrace_trampoline_free(struct ftrace_ops *ops) #ifdef CONFIG_DYNAMIC_FTRACE extern void ftrace_graph_call(void); +static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr) +{ + return ftrace_text_replace(0xe9, ip, addr); +} + static int ftrace_mod_jmp(unsigned long ip, void *func) { unsigned char *new; + ftrace_update_func_call = 0UL; new = ftrace_jmp_replace(ip, (unsigned long)func); return update_ftrace_func(ip, new); @@ -994,7 +1022,6 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, { unsigned long old; int faulted; - struct ftrace_graph_ent trace; unsigned long return_hooker = (unsigned long) &return_to_handler; @@ -1046,19 +1073,7 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, return; } - trace.func = self_addr; - trace.depth = current->curr_ret_stack + 1; - - /* Only trace if the calling function expects to */ - if (!ftrace_graph_entry(&trace)) { - *parent = old; - return; - } - - if (ftrace_push_return_trace(old, self_addr, &trace.depth, - frame_pointer, parent) == -EBUSY) { + if (function_graph_enter(old, self_addr, frame_pointer, parent)) *parent = old; - return; - } } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S index 91b2cff4b79a393f2cb4520be1157538a1abdcc7..75f2b36b41a6956814a82be59c2665798d23fbb4 100644 --- a/arch/x86/kernel/ftrace_64.S +++ b/arch/x86/kernel/ftrace_64.S @@ -171,9 +171,6 @@ GLOBAL(ftrace_call) restore_mcount_regs /* - * The copied trampoline must call ftrace_epilogue as it - * still may need to call the function graph tracer. - * * The code up to this label is copied into trampolines so * think twice before adding any new code or changing the * layout here. @@ -185,7 +182,10 @@ GLOBAL(ftrace_graph_call) jmp ftrace_stub #endif -/* This is weak to keep gas from relaxing the jumps */ +/* + * This is weak to keep gas from relaxing the jumps. + * It is also used to copy the retq for trampolines. + */ WEAK(ftrace_stub) retq ENDPROC(ftrace_caller) diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index ddee1f0870c4b091cae110f35d92a2d87a05d8b6..90c2613af36b6726bf380f2473a2b1e4eacc58b9 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -184,24 +184,25 @@ unsigned long __head __startup_64(unsigned long physaddr, pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask(); if (la57) { - p4d = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr); + p4d = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], + physaddr); i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD; pgd[i + 0] = (pgdval_t)p4d + pgtable_flags; pgd[i + 1] = (pgdval_t)p4d + pgtable_flags; - i = (physaddr >> P4D_SHIFT) % PTRS_PER_P4D; - p4d[i + 0] = (pgdval_t)pud + pgtable_flags; - p4d[i + 1] = (pgdval_t)pud + pgtable_flags; + i = physaddr >> P4D_SHIFT; + p4d[(i + 0) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags; + p4d[(i + 1) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags; } else { i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD; pgd[i + 0] = (pgdval_t)pud + pgtable_flags; pgd[i + 1] = (pgdval_t)pud + pgtable_flags; } - i = (physaddr >> PUD_SHIFT) % PTRS_PER_PUD; - pud[i + 0] = (pudval_t)pmd + pgtable_flags; - pud[i + 1] = (pudval_t)pmd + pgtable_flags; + i = physaddr >> PUD_SHIFT; + pud[(i + 0) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags; + pud[(i + 1) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags; pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL; /* Filter out unsupported __PAGE_KERNEL_* bits: */ @@ -211,8 +212,9 @@ unsigned long __head __startup_64(unsigned long physaddr, pmd_entry += physaddr; for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) { - int idx = i + (physaddr >> PMD_SHIFT) % PTRS_PER_PMD; - pmd[idx] = pmd_entry + i * PMD_SIZE; + int idx = i + (physaddr >> PMD_SHIFT); + + pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE; } /* @@ -220,13 +222,31 @@ unsigned long __head __startup_64(unsigned long physaddr, * we might write invalid pmds, when the kernel is relocated * cleanup_highmap() fixes this up along with the mappings * beyond _end. + * + * Only the region occupied by the kernel image has so far + * been checked against the table of usable memory regions + * provided by the firmware, so invalidate pages outside that + * region. A page table entry that maps to a reserved area of + * memory would allow processor speculation into that area, + * and on some hardware (particularly the UV platform) even + * speculative access to some reserved areas is caught as an + * error, causing the BIOS to halt the system. */ pmd = fixup_pointer(level2_kernel_pgt, physaddr); - for (i = 0; i < PTRS_PER_PMD; i++) { + + /* invalidate pages before the kernel image */ + for (i = 0; i < pmd_index((unsigned long)_text); i++) + pmd[i] &= ~_PAGE_PRESENT; + + /* fixup pages that are part of the kernel image */ + for (; i <= pmd_index((unsigned long)_end); i++) if (pmd[i] & _PAGE_PRESENT) pmd[i] += load_delta; - } + + /* invalidate pages after the kernel image */ + for (; i < PTRS_PER_PMD; i++) + pmd[i] &= ~_PAGE_PRESENT; /* * Fixup phys_base - remove the memory encryption mask to obtain @@ -363,6 +383,8 @@ static void __init clear_bss(void) { memset(__bss_start, 0, (unsigned long) __bss_stop - (unsigned long) __bss_start); + memset(__brk_base, 0, + (unsigned long) __brk_limit - (unsigned long) __brk_base); } static unsigned long get_cmd_line_ptr(void) diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index b0acb22e5a465096b37f7a67a689852685228087..1e3f1f140ffb0e233b82873603c17f0fa13f36b0 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -909,6 +909,8 @@ int __init hpet_enable(void) return 0; hpet_set_mapping(); + if (!hpet_virt_address) + return 0; /* * Read the period and check for a sane value: diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index 34a5c171514870af79195679ad9bb90741a57992..2882fe1d2a785ce818509e51143da830c461384c 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -357,6 +357,7 @@ int hw_breakpoint_arch_parse(struct perf_event *bp, #endif default: WARN_ON_ONCE(1); + return -EINVAL; } /* diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c index 0d307a657abbb253d4679471bd1ac202279ff447..2b7999a1a50a83b07084e93903786e15479684b8 100644 --- a/arch/x86/kernel/i8253.c +++ b/arch/x86/kernel/i8253.c @@ -8,6 +8,7 @@ #include #include +#include #include #include #include @@ -18,10 +19,32 @@ */ struct clock_event_device *global_clock_event; -void __init setup_pit_timer(void) +/* + * Modern chipsets can disable the PIT clock which makes it unusable. It + * would be possible to enable the clock but the registers are chipset + * specific and not discoverable. Avoid the whack a mole game. + * + * These platforms have discoverable TSC/CPU frequencies but this also + * requires to know the local APIC timer frequency as it normally is + * calibrated against the PIT interrupt. + */ +static bool __init use_pit(void) +{ + if (!IS_ENABLED(CONFIG_X86_TSC) || !boot_cpu_has(X86_FEATURE_TSC)) + return true; + + /* This also returns true when APIC is disabled */ + return apic_needs_pit(); +} + +bool __init pit_timer_init(void) { + if (!use_pit()) + return false; + clockevent_i8253_init(true); global_clock_event = &i8253_clockevent; + return true; } #ifndef CONFIG_X86_64 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c index 519649ddf1001d423c5b0a7e02a42f7ca92e2a06..89c7975b88a7cb9877c33d0726fad889105d7527 100644 --- a/arch/x86/kernel/i8259.c +++ b/arch/x86/kernel/i8259.c @@ -114,6 +114,7 @@ static void make_8259A_irq(unsigned int irq) disable_irq_nosync(irq); io_apic_irqs &= ~(1<ist[0] - EXCEPTION_STKSZ + STACK_TOP_MARGIN; - estack_bottom = (u64)oist->ist[N_EXCEPTION_STACKS - 1]; + estack_bottom = (u64)oist->ist[DEBUG_STACK]; + estack_top = estack_bottom - DEBUG_STKSZ + STACK_TOP_MARGIN; if (regs->sp >= estack_top && regs->sp <= estack_bottom) return; diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index a0693b71cfc1cc4bb99af19ed7b9d07bfca80c33..f2c215e1f64c5b39f9939583adf226ae4e9438c0 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c @@ -72,8 +72,10 @@ void __init init_ISA_irqs(void) legacy_pic->init(0); - for (i = 0; i < nr_legacy_irqs(); i++) + for (i = 0; i < nr_legacy_irqs(); i++) { irq_set_chip_and_handler(i, chip, handle_level_irq); + irq_set_status_flags(i, IRQ_LEVEL); + } } void __init init_IRQ(void) diff --git a/arch/x86/kernel/jailhouse.c b/arch/x86/kernel/jailhouse.c index 108c48d0d40e70e24010eb43024429cefa1677e9..3f133b4daf0aee7bfdccea7b93932ecd9ca55d8a 100644 --- a/arch/x86/kernel/jailhouse.c +++ b/arch/x86/kernel/jailhouse.c @@ -44,7 +44,7 @@ static void jailhouse_get_wallclock(struct timespec64 *now) static void __init jailhouse_timer_init(void) { - lapic_timer_frequency = setup_data.apic_khz * (1000 / HZ); + lapic_timer_period = setup_data.apic_khz * (1000 / HZ); } static unsigned long jailhouse_get_tsc(void) diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c index eeea935e9bb53ff9f6cfb0b75778e71c4889af84..4c3d9a3d45b2f9bbb6d7da707fb2e599f7990a55 100644 --- a/arch/x86/kernel/jump_label.c +++ b/arch/x86/kernel/jump_label.c @@ -16,8 +16,6 @@ #include #include -#ifdef HAVE_JUMP_LABEL - union jump_code_union { char code[JUMP_LABEL_NOP_SIZE]; struct { @@ -142,5 +140,3 @@ __init_or_module void arch_jump_label_transform_static(struct jump_entry *entry, if (jlstate == JL_STATE_UPDATE) __jump_label_transform(entry, type, text_poke_early, 1); } - -#endif diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c index 278cd07228dd886778cabf708dd2b1237a318d72..70caa60b4c617884e7503d7aac1e5cb74bec3df0 100644 --- a/arch/x86/kernel/kexec-bzimage64.c +++ b/arch/x86/kernel/kexec-bzimage64.c @@ -167,6 +167,9 @@ setup_efi_state(struct boot_params *params, unsigned long params_load_addr, struct efi_info *current_ei = &boot_params.efi_info; struct efi_info *ei = ¶ms->efi_info; + if (!efi_enabled(EFI_RUNTIME_SERVICES)) + return 0; + if (!current_ei->efi_memmap_size) return 0; @@ -179,6 +182,7 @@ setup_efi_state(struct boot_params *params, unsigned long params_load_addr, if (efi_enabled(EFI_OLD_MEMMAP)) return 0; + params->secure_boot = boot_params.secure_boot; ei->efi_loader_signature = current_ei->efi_loader_signature; ei->efi_systab = current_ei->efi_systab; ei->efi_systab_hi = current_ei->efi_systab_hi; diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index 8e36f249646e25d20bc2bcc04b7a0ccc292e498c..2636ca8394bd30e3bf0061b134f620e4d5892e45 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c @@ -763,13 +763,13 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) if (!err) return err; /* - * It is safe to call text_poke() because normal kernel execution + * It is safe to call text_poke_kgdb() because normal kernel execution * is stopped on all cores, so long as the text_mutex is not locked. */ if (mutex_is_locked(&text_mutex)) return -EBUSY; - text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr, - BREAK_INSTR_SIZE); + text_poke_kgdb((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr, + BREAK_INSTR_SIZE); err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); if (err) return err; @@ -788,12 +788,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) if (bpt->type != BP_POKE_BREAKPOINT) goto knl_write; /* - * It is safe to call text_poke() because normal kernel execution + * It is safe to call text_poke_kgdb() because normal kernel execution * is stopped on all cores, so long as the text_mutex is not locked. */ if (mutex_is_locked(&text_mutex)) goto knl_write; - text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE); + text_poke_kgdb((void *)bpt->bpt_addr, bpt->saved_instr, + BREAK_INSTR_SIZE); err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE)) goto knl_write; diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index b0d1e81c96bbe297c2b0d573673a40bfb9cf3a99..7a40251a2061a8e95e80a615c129c9c19b896391 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -170,6 +170,8 @@ NOKPROBE_SYMBOL(skip_prefixes); int can_boost(struct insn *insn, void *addr) { kprobe_opcode_t opcode; + insn_byte_t prefix; + int i; if (search_exception_tables((unsigned long)addr)) return 0; /* Page fault may occur on this address. */ @@ -182,9 +184,14 @@ int can_boost(struct insn *insn, void *addr) if (insn->opcode.nbytes != 1) return 0; - /* Can't boost Address-size override prefix */ - if (unlikely(inat_is_address_size_prefix(insn->attr))) - return 0; + for_each_insn_prefix(insn, i, prefix) { + insn_attr_t attr; + + attr = inat_get_opcode_attribute(prefix); + /* Can't boost Address-size override prefix and CS override prefix */ + if (prefix == 0x2e || inat_is_address_size_prefix(attr)) + return 0; + } opcode = insn->opcode.bytes[0]; @@ -209,8 +216,8 @@ int can_boost(struct insn *insn, void *addr) /* clear and set flags are boostable */ return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe)); default: - /* CS override prefix and call are not boostable */ - return (opcode != 0x2e && opcode != 0x9a); + /* call is not boostable */ + return opcode != 0x9a; } } @@ -431,8 +438,20 @@ void *alloc_insn_page(void) void *page; page = module_alloc(PAGE_SIZE); - if (page) - set_memory_ro((unsigned long)page & PAGE_MASK, 1); + if (!page) + return NULL; + + /* + * First make the page read-only, and only then make it executable to + * prevent it from being W+X in between. + */ + set_memory_ro((unsigned long)page, 1); + + /* + * TODO: Once additional kernel code protection mechanisms are set, ensure + * that the page was not maliciously altered and it is still zeroed. + */ + set_memory_x((unsigned long)page, 1); return page; } @@ -440,8 +459,12 @@ void *alloc_insn_page(void) /* Recover page to RW mode before releasing it */ void free_insn_page(void *page) { - set_memory_nx((unsigned long)page & PAGE_MASK, 1); - set_memory_rw((unsigned long)page & PAGE_MASK, 1); + /* + * First make the page non-executable, and only then make it writable to + * prevent it from being W+X in between. + */ + set_memory_nx((unsigned long)page, 1); + set_memory_rw((unsigned long)page, 1); module_memfree(page); } @@ -569,6 +592,7 @@ void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) unsigned long *sara = stack_addr(regs); ri->ret_addr = (kprobe_opcode_t *) *sara; + ri->fp = sara; /* Replace the return addr with trampoline addr */ *sara = (unsigned long) &kretprobe_trampoline; @@ -751,7 +775,7 @@ STACK_FRAME_NON_STANDARD(kretprobe_trampoline); /* * Called from kretprobe_trampoline */ -__visible __used void *trampoline_handler(struct pt_regs *regs) +static __used void *trampoline_handler(struct pt_regs *regs) { struct kretprobe_instance *ri = NULL; struct hlist_head *head, empty_rp; @@ -759,15 +783,28 @@ __visible __used void *trampoline_handler(struct pt_regs *regs) unsigned long flags, orig_ret_address = 0; unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; kprobe_opcode_t *correct_ret_addr = NULL; + void *frame_pointer; + bool skipped = false; + + /* + * Set a dummy kprobe for avoiding kretprobe recursion. + * Since kretprobe never run in kprobe handler, kprobe must not + * be running at this point. + */ + kprobe_busy_begin(); INIT_HLIST_HEAD(&empty_rp); kretprobe_hash_lock(current, &head, &flags); /* fixup registers */ #ifdef CONFIG_X86_64 regs->cs = __KERNEL_CS; + /* On x86-64, we use pt_regs->sp for return address holder. */ + frame_pointer = ®s->sp; #else regs->cs = __KERNEL_CS | get_kernel_rpl(); regs->gs = 0; + /* On x86-32, we use pt_regs->flags for return address holder. */ + frame_pointer = ®s->flags; #endif regs->ip = trampoline_address; regs->orig_ax = ~0UL; @@ -789,8 +826,25 @@ __visible __used void *trampoline_handler(struct pt_regs *regs) if (ri->task != current) /* another task is sharing our hash bucket */ continue; + /* + * Return probes must be pushed on this hash list correct + * order (same as return order) so that it can be poped + * correctly. However, if we find it is pushed it incorrect + * order, this means we find a function which should not be + * probed, because the wrong order entry is pushed on the + * path of processing other kretprobe itself. + */ + if (ri->fp != frame_pointer) { + if (!skipped) + pr_warn("kretprobe is stacked incorrectly. Trying to fixup.\n"); + skipped = true; + continue; + } orig_ret_address = (unsigned long)ri->ret_addr; + if (skipped) + pr_warn("%ps must be blacklisted because of incorrect kretprobe order\n", + ri->rp->kp.addr); if (orig_ret_address != trampoline_address) /* @@ -808,14 +862,15 @@ __visible __used void *trampoline_handler(struct pt_regs *regs) if (ri->task != current) /* another task is sharing our hash bucket */ continue; + if (ri->fp != frame_pointer) + continue; orig_ret_address = (unsigned long)ri->ret_addr; if (ri->rp && ri->rp->handler) { __this_cpu_write(current_kprobe, &ri->rp->kp); - get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; ri->ret_addr = correct_ret_addr; ri->rp->handler(ri, regs); - __this_cpu_write(current_kprobe, NULL); + __this_cpu_write(current_kprobe, &kprobe_busy); } recycle_rp_inst(ri, &empty_rp); @@ -831,6 +886,8 @@ __visible __used void *trampoline_handler(struct pt_regs *regs) kretprobe_hash_unlock(current, &flags); + kprobe_busy_end(); + hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { hlist_del(&ri->hlist); kfree(ri); @@ -938,7 +995,15 @@ int kprobe_debug_handler(struct pt_regs *regs) struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - if (!cur) + if (!cur || !cur->ainsn.insn) + return 0; + + /* kprobe will change the status from KPROBE_HIT_ACTIVE to + * KPROBE_HIT_SS or KPROBE_HIT_SSDONE before single-step execution, so + * if the current status is KPROBE_HIT_ACTIVE, its not a debug + * exception triggered by kprobe. + */ + if (kcb->kprobe_status == KPROBE_HIT_ACTIVE) return 0; resume_execution(cur, regs, kcb); @@ -1080,6 +1145,12 @@ bool arch_within_kprobe_blacklist(unsigned long addr) is_in_entry_trampoline_section; } +int __init arch_populate_kprobe_blacklist(void) +{ + return kprobe_add_area_blacklist((unsigned long)__entry_text_start, + (unsigned long)__entry_text_end); +} + int __init arch_init_kprobes(void) { return 0; diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index eaf02f2e73005731e28dc22bd5d91d10fd26d2aa..36b5a493e5b4511ed6fc908a6b2904df143e9cee 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -56,8 +56,8 @@ unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr) /* This function only handles jump-optimized kprobe */ if (kp && kprobe_optimized(kp)) { op = container_of(kp, struct optimized_kprobe, kp); - /* If op->list is not empty, op is under optimizing */ - if (list_empty(&op->list)) + /* If op is optimized or under unoptimizing */ + if (list_empty(&op->list) || optprobe_queued_unopt(op)) goto found; } } @@ -141,6 +141,11 @@ asm ( void optprobe_template_func(void); STACK_FRAME_NON_STANDARD(optprobe_template_func); +NOKPROBE_SYMBOL(optprobe_template_func); +NOKPROBE_SYMBOL(optprobe_template_entry); +NOKPROBE_SYMBOL(optprobe_template_val); +NOKPROBE_SYMBOL(optprobe_template_call); +NOKPROBE_SYMBOL(optprobe_template_end); #define TMPL_MOVE_IDX \ ((long)optprobe_template_val - (long)optprobe_template_entry) @@ -179,7 +184,7 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) opt_pre_handler(&op->kp, regs); __this_cpu_write(current_kprobe, NULL); } - preempt_enable_no_resched(); + preempt_enable(); } NOKPROBE_SYMBOL(optimized_callback); @@ -189,7 +194,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real) int len = 0, ret; while (len < RELATIVEJUMP_SIZE) { - ret = __copy_instruction(dest + len, src + len, real, &insn); + ret = __copy_instruction(dest + len, src + len, real + len, &insn); if (!ret || !can_boost(&insn, src + len)) return -EINVAL; len += ret; @@ -323,7 +328,7 @@ int arch_check_optimized_kprobe(struct optimized_kprobe *op) for (i = 1; i < op->optinsn.size; i++) { p = get_kprobe(op->kp.addr + i); - if (p && !kprobe_disabled(p)) + if (p && !kprobe_disarmed(p)) return -EEXIST; } diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index d9b71924c23c9b939986b2d2ab153ba15b7077c3..d7c649bfae9b243916a35abc67f591e4d4a728b8 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -444,6 +445,26 @@ static void __init sev_map_percpu_data(void) } } +static void kvm_guest_cpu_offline(void) +{ + kvm_disable_steal_time(); + if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) + wrmsrl(MSR_KVM_PV_EOI_EN, 0); + kvm_pv_disable_apf(); + apf_task_wake_all(); + kvmclock_disable(); +} + +static int kvm_cpu_online(unsigned int cpu) +{ + unsigned long flags; + + local_irq_save(flags); + kvm_guest_cpu_init(); + local_irq_restore(flags); + return 0; +} + #ifdef CONFIG_SMP #define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG) @@ -457,6 +478,7 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector) #else u64 ipi_bitmap = 0; #endif + long ret; if (cpumask_empty(mask)) return; @@ -482,8 +504,9 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector) } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) { max = apic_id < max ? max : apic_id; } else { - kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, + ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr); + WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret); min = max = apic_id; ipi_bitmap = 0; } @@ -491,8 +514,9 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector) } if (ipi_bitmap) { - kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, + ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr); + WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret); } local_irq_restore(flags); @@ -557,31 +581,34 @@ static void __init kvm_smp_prepare_boot_cpu(void) kvm_spinlock_init(); } -static void kvm_guest_cpu_offline(void) +static int kvm_cpu_down_prepare(unsigned int cpu) { - kvm_disable_steal_time(); - if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) - wrmsrl(MSR_KVM_PV_EOI_EN, 0); - kvm_pv_disable_apf(); - apf_task_wake_all(); -} + unsigned long flags; -static int kvm_cpu_online(unsigned int cpu) -{ - local_irq_disable(); - kvm_guest_cpu_init(); - local_irq_enable(); + local_irq_save(flags); + kvm_guest_cpu_offline(); + local_irq_restore(flags); return 0; } -static int kvm_cpu_down_prepare(unsigned int cpu) +#endif + +static int kvm_suspend(void) { - local_irq_disable(); kvm_guest_cpu_offline(); - local_irq_enable(); + return 0; + } + +static void kvm_resume(void) +{ + kvm_cpu_online(raw_smp_processor_id()); } -#endif + +static struct syscore_ops kvm_syscore_ops = { + .suspend = kvm_suspend, + .resume = kvm_resume, +}; static void __init kvm_apf_trap_init(void) { @@ -656,6 +683,8 @@ static void __init kvm_guest_init(void) kvm_guest_cpu_init(); #endif + register_syscore_ops(&kvm_syscore_ops); + /* * Hard lockup detection is enabled by default. Disable it, as guests * can get false positives too easily, for example if the host is @@ -700,6 +729,7 @@ unsigned int kvm_arch_para_hints(void) { return cpuid_edx(kvm_cpuid_base() | KVM_CPUID_FEATURES); } +EXPORT_SYMBOL_GPL(kvm_arch_para_hints); static uint32_t __init kvm_detect(void) { @@ -827,6 +857,7 @@ asm( "cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);" "setne %al;" "ret;" +".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;" ".popsection"); #endif @@ -838,16 +869,36 @@ void __init kvm_spinlock_init(void) { if (!kvm_para_available()) return; - /* Does host kernel support KVM_FEATURE_PV_UNHALT? */ - if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) + /* + * In case host doesn't support KVM_FEATURE_PV_UNHALT there is still an + * advantage of keeping virt_spin_lock_key enabled: virt_spin_lock() is + * preferred over native qspinlock when vCPU is preempted. + */ + if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) { + pr_info("PV spinlocks disabled, no host support\n"); return; + } - if (kvm_para_has_hint(KVM_HINTS_REALTIME)) - return; + /* + * Disable PV spinlocks and use native qspinlock when dedicated pCPUs + * are available. + */ + if (kvm_para_has_hint(KVM_HINTS_REALTIME)) { + pr_info("PV spinlocks disabled with KVM_HINTS_REALTIME hints\n"); + goto out; + } - /* Don't use the pvqspinlock code if there is only 1 vCPU. */ - if (num_possible_cpus() == 1) - return; + if (num_possible_cpus() == 1) { + pr_info("PV spinlocks disabled, single CPU\n"); + goto out; + } + + if (nopvspin) { + pr_info("PV spinlocks disabled, forced by \"nopvspin\" parameter\n"); + goto out; + } + + pr_info("PV spinlocks enabled\n"); __pv_init_lock_hash(); pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; @@ -859,6 +910,49 @@ void __init kvm_spinlock_init(void) pv_lock_ops.vcpu_is_preempted = PV_CALLEE_SAVE(__kvm_vcpu_is_preempted); } + /* + * When PV spinlock is enabled which is preferred over + * virt_spin_lock(), virt_spin_lock_key's value is meaningless. + * Just disable it anyway. + */ +out: + static_branch_disable(&virt_spin_lock_key); } #endif /* CONFIG_PARAVIRT_SPINLOCKS */ + +#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL + +static void kvm_disable_host_haltpoll(void *i) +{ + wrmsrl(MSR_KVM_POLL_CONTROL, 0); +} + +static void kvm_enable_host_haltpoll(void *i) +{ + wrmsrl(MSR_KVM_POLL_CONTROL, 1); +} + +void arch_haltpoll_enable(unsigned int cpu) +{ + if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL)) { + pr_err_once("kvm: host does not support poll control\n"); + pr_err_once("kvm: host upgrade recommended\n"); + return; + } + + /* Enable guest halt poll disables host halt poll */ + smp_call_function_single(cpu, kvm_disable_host_haltpoll, NULL, 1); +} +EXPORT_SYMBOL_GPL(arch_haltpoll_enable); + +void arch_haltpoll_disable(unsigned int cpu) +{ + if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL)) + return; + + /* Enable guest halt poll disables host halt poll */ + smp_call_function_single(cpu, kvm_enable_host_haltpoll, NULL, 1); +} +EXPORT_SYMBOL_GPL(arch_haltpoll_disable); +#endif diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 013fe3d21dbb3f4d5f834f74e0ca3d791a6f0b06..0c7a77889f637b5dabb6d2385e3fedb99ad03e0a 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -38,8 +38,8 @@ static int kvmclock __initdata = 1; static int kvmclock_vsyscall __initdata = 1; -static int msr_kvm_system_time __ro_after_init = MSR_KVM_SYSTEM_TIME; -static int msr_kvm_wall_clock __ro_after_init = MSR_KVM_WALL_CLOCK; +static int msr_kvm_system_time __ro_after_init; +static int msr_kvm_wall_clock __ro_after_init; static u64 kvm_sched_clock_offset __ro_after_init; static int __init parse_no_kvmclock(char *arg) @@ -117,12 +117,8 @@ static u64 kvm_sched_clock_read(void) static inline void kvm_sched_clock_init(bool stable) { - if (!stable) { - pv_time_ops.sched_clock = kvm_clock_read; + if (!stable) clear_sched_clock_stable(); - return; - } - kvm_sched_clock_offset = kvm_clock_read(); pv_time_ops.sched_clock = kvm_sched_clock_read; @@ -231,11 +227,10 @@ static void kvm_crash_shutdown(struct pt_regs *regs) } #endif -static void kvm_shutdown(void) +void kvmclock_disable(void) { - native_write_msr(msr_kvm_system_time, 0, 0); - kvm_disable_steal_time(); - native_machine_shutdown(); + if (msr_kvm_system_time) + native_write_msr(msr_kvm_system_time, 0, 0); } static void __init kvmclock_init_mem(void) @@ -332,7 +327,10 @@ void __init kvmclock_init(void) if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE2)) { msr_kvm_system_time = MSR_KVM_SYSTEM_TIME_NEW; msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK_NEW; - } else if (!kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) { + } else if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) { + msr_kvm_system_time = MSR_KVM_SYSTEM_TIME; + msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK; + } else { return; } @@ -363,7 +361,6 @@ void __init kvmclock_init(void) #endif x86_platform.save_sched_clock_state = kvm_save_sched_clock_state; x86_platform.restore_sched_clock_state = kvm_restore_sched_clock_state; - machine_ops.shutdown = kvm_shutdown; #ifdef CONFIG_KEXEC_CORE machine_ops.crash_shutdown = kvm_crash_shutdown; #endif diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index 733e6ace0fa4e97dcb77e031d37c89ec82866e0f..f702b5b380b291083dfa0b86fce8aa8fead0a681 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -70,7 +70,7 @@ static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries) if (num_entries > LDT_ENTRIES) return NULL; - new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL); + new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL_ACCOUNT); if (!new_ldt) return NULL; @@ -84,9 +84,10 @@ static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries) * than PAGE_SIZE. */ if (alloc_size > PAGE_SIZE) - new_ldt->entries = vzalloc(alloc_size); + new_ldt->entries = __vmalloc(alloc_size, GFP_KERNEL_ACCOUNT | __GFP_ZERO, + PAGE_KERNEL); else - new_ldt->entries = (void *)get_zeroed_page(GFP_KERNEL); + new_ldt->entries = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT); if (!new_ldt->entries) { kfree(new_ldt); @@ -199,14 +200,6 @@ static void sanity_check_ldt_mapping(struct mm_struct *mm) /* * If PTI is enabled, this maps the LDT into the kernelmode and * usermode tables for the given mm. - * - * There is no corresponding unmap function. Even if the LDT is freed, we - * leave the PTEs around until the slot is reused or the mm is destroyed. - * This is harmless: the LDT is always in ordinary memory, and no one will - * access the freed slot. - * - * If we wanted to unmap freed LDTs, we'd also need to do a flush to make - * it useful, and the flush would slow down modify_ldt(). */ static int map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) @@ -214,8 +207,7 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) unsigned long va; bool is_vmalloc; spinlock_t *ptl; - pgd_t *pgd; - int i; + int i, nr_pages; if (!static_cpu_has(X86_FEATURE_PTI)) return 0; @@ -229,16 +221,11 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) /* Check if the current mappings are sane */ sanity_check_ldt_mapping(mm); - /* - * Did we already have the top level entry allocated? We can't - * use pgd_none() for this because it doens't do anything on - * 4-level page table kernels. - */ - pgd = pgd_offset(mm, LDT_BASE_ADDR); - is_vmalloc = is_vmalloc_addr(ldt->entries); - for (i = 0; i * PAGE_SIZE < ldt->nr_entries * LDT_ENTRY_SIZE; i++) { + nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE); + + for (i = 0; i < nr_pages; i++) { unsigned long offset = i << PAGE_SHIFT; const void *src = (char *)ldt->entries + offset; unsigned long pfn; @@ -272,13 +259,39 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) /* Propagate LDT mapping to the user page-table */ map_ldt_struct_to_user(mm); - va = (unsigned long)ldt_slot_va(slot); - flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, 0); - ldt->slot = slot; return 0; } +static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt) +{ + unsigned long va; + int i, nr_pages; + + if (!ldt) + return; + + /* LDT map/unmap is only required for PTI */ + if (!static_cpu_has(X86_FEATURE_PTI)) + return; + + nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE); + + for (i = 0; i < nr_pages; i++) { + unsigned long offset = i << PAGE_SHIFT; + spinlock_t *ptl; + pte_t *ptep; + + va = (unsigned long)ldt_slot_va(ldt->slot) + offset; + ptep = get_locked_pte(mm, va, &ptl); + pte_clear(mm, va, ptep); + pte_unmap_unlock(ptep, ptl); + } + + va = (unsigned long)ldt_slot_va(ldt->slot); + flush_tlb_mm_range(mm, va, va + nr_pages * PAGE_SIZE, 0); +} + #else /* !CONFIG_PAGE_TABLE_ISOLATION */ static int @@ -286,6 +299,10 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) { return 0; } + +static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt) +{ +} #endif /* CONFIG_PAGE_TABLE_ISOLATION */ static void free_ldt_pgtables(struct mm_struct *mm) @@ -524,6 +541,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) } install_ldt(mm, new_ldt); + unmap_ldt_struct(mm, old_ldt); free_ldt_struct(old_ldt); error = 0; diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c index e9d252d873aa0d68484353d0621452348ffd98ed..785bba03b77fa5f6ea94472b6134303848791493 100644 --- a/arch/x86/kernel/livepatch.c +++ b/arch/x86/kernel/livepatch.c @@ -15,10 +15,14 @@ * along with this program; if not, see . */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include #include +#include +#include /* Apply per-object alternatives. Based on x86 module_finalize() */ void arch_klp_init_object_loaded(struct klp_patch *patch, @@ -63,3 +67,305 @@ void arch_klp_init_object_loaded(struct klp_patch *patch, apply_paravirt(pseg, pseg + para->sh_size); } } + + +#ifdef CONFIG_LIVEPATCH_WO_FTRACE +#include +#include +#include + +#define JMP_E9_INSN_SIZE 5 +union klp_code_union { + char code[JMP_E9_INSN_SIZE]; + struct { + unsigned char e9; + int offset; + } __packed; +}; + +struct klp_func_node { + struct list_head node; + struct list_head func_stack; + unsigned long old_addr; + unsigned char old_code[JMP_E9_INSN_SIZE]; +}; + +static LIST_HEAD(klp_func_list); + +static struct klp_func_node *klp_find_func_node(unsigned long old_addr) +{ + struct klp_func_node *func_node; + + list_for_each_entry(func_node, &klp_func_list, node) { + if (func_node->old_addr == old_addr) + return func_node; + } + + return NULL; +} + +static inline int klp_compare_address(unsigned long stack_addr, + unsigned long func_addr, unsigned long func_size, + const char *func_name) +{ + if (stack_addr >= func_addr && stack_addr < func_addr + func_size) { + pr_err("func %s is in use!\n", func_name); + return -EBUSY; + } + return 0; +} + +static struct klp_func_node *klp_find_func_node(unsigned long old_addr); + +static int klp_check_stack_func(struct klp_func *func, + struct stack_trace *trace, int enable) +{ + unsigned long func_addr, func_size, address; + const char *func_name; + struct klp_func_node *func_node; + int i; + + for (i = 0; i < trace->nr_entries; i++) { + address = trace->entries[i]; + + if (enable) { + if (func->force) + continue; + /* + * When enable, checking the currently active + * functions. + */ + func_node = klp_find_func_node(func->old_addr); + if (!func_node || + list_empty(&func_node->func_stack)) { + func_addr = func->old_addr; + func_size = func->old_size; + } else { + /* + * Previously patched function + * [the active one] + */ + struct klp_func *prev; + + prev = list_first_or_null_rcu( + &func_node->func_stack, + struct klp_func, stack_node); + func_addr = (unsigned long)prev->new_func; + func_size = prev->new_size; + } + } else { + /* + * When disable, check for the function itself + * which to be unpatched. + */ + func_addr = (unsigned long)func->new_func; + func_size = func->new_size; + } + func_name = func->old_name; + + if (klp_compare_address(address, func_addr, + func_size, func_name)) + return -EAGAIN; + } + + return 0; +} + +static void klp_print_stack_trace(struct stack_trace *trace) +{ + int i; + + pr_err("Call Trace:\n"); + for (i = 0; i < trace->nr_entries; i++) { + pr_err("[<%pK>] %pS\n", + (void *)trace->entries[i], + (void *)trace->entries[i]); + } + +} + +#ifdef MAX_STACK_ENTRIES +#undef MAX_STACK_ENTRIES +#endif +#define MAX_STACK_ENTRIES 100 + +/* + * Determine whether it's safe to transition the task to the target patch state + * by looking for any to-be-patched or to-be-unpatched functions on its stack. + */ +static int klp_check_stack(struct task_struct *task, + struct klp_patch *patch, int enable) + +{ + static unsigned long entries[MAX_STACK_ENTRIES]; + struct stack_trace trace; + struct klp_object *obj; + struct klp_func *func; + int ret; + + trace.skip = 0; + trace.nr_entries = 0; + trace.max_entries = MAX_STACK_ENTRIES; + trace.entries = entries; + ret = save_stack_trace_tsk_reliable(task, &trace); + WARN_ON_ONCE(ret == -ENOSYS); + if (ret) { + pr_debug("%s: %s:%d has an unreliable stack\n", + __func__, task->comm, task->pid); + } + + klp_for_each_object(patch, obj) { + klp_for_each_func(obj, func) { + ret = klp_check_stack_func(func, &trace, enable); + if (ret) { + pr_info("%s: %s:%d is sleeping on function %s\n", + __func__, task->comm, task->pid, + func->old_name); + + klp_print_stack_trace(&trace); + + return ret; + + } + } + } + + return 0; +} + +int klp_check_calltrace(struct klp_patch *patch, int enable) +{ + struct task_struct *g, *t; + int ret = 0; + + for_each_process_thread(g, t) { + ret = klp_check_stack(t, patch, enable); + if (ret) + goto out; + } + +out: + return ret; +} + +int arch_klp_init_func(struct klp_object *obj, struct klp_func *func) +{ + return 0; +} + +void arch_klp_free_func(struct klp_object *obj, struct klp_func *limit) +{ + +} + +static int klp_calc_offset(long pc, long addr) +{ + return (int)(addr - pc); +} + +static unsigned char *klp_jmp_code(unsigned long ip, unsigned long addr) +{ + static union klp_code_union calc; + + calc.e9 = 0xe9; + calc.offset = klp_calc_offset(ip + JMP_E9_INSN_SIZE, addr); + + return calc.code; +} + +static unsigned char *klp_old_code(unsigned char *code) +{ + static union klp_code_union old_code; + + memcpy(old_code.code, code, JMP_E9_INSN_SIZE); + return old_code.code; +} + +void arch_klp_code_modify_prepare(void) +{ + set_kernel_text_rw(); + set_all_modules_text_rw(); +} + +void arch_klp_code_modify_post_process(void) +{ + set_all_modules_text_ro(); + set_kernel_text_ro(); +} + +static inline int within(unsigned long addr, unsigned long start, + unsigned long end) +{ + return addr >= start && addr < end; +} + +static unsigned long text_ip_addr(unsigned long ip) +{ + if (within(ip, (unsigned long)_text, (unsigned long)_etext)) + ip = (unsigned long)__va(__pa_symbol(ip)); + + return ip; +} + +int arch_klp_patch_func(struct klp_func *func) +{ + struct klp_func_node *func_node; + unsigned long ip, new_addr; + const unsigned char *new; + + func_node = klp_find_func_node(func->old_addr); + ip = func->old_addr; + if (!func_node) { + func_node = kzalloc(sizeof(*func_node), GFP_ATOMIC); + if (!func_node) + return -ENOMEM; + + INIT_LIST_HEAD(&func_node->func_stack); + func_node->old_addr = func->old_addr; + probe_kernel_read(func_node->old_code, + (void *)ip, JMP_E9_INSN_SIZE); + list_add_rcu(&func_node->node, &klp_func_list); + } + + list_add_rcu(&func->stack_node, &func_node->func_stack); + + new_addr = (unsigned long)func->new_func; + new = klp_jmp_code(ip, new_addr); + + ip = text_ip_addr(ip); + if (probe_kernel_write((void *)ip, new, JMP_E9_INSN_SIZE)) + return -EPERM; + + sync_core(); + + return 0; +} + +void arch_klp_unpatch_func(struct klp_func *func) +{ + struct klp_func_node *func_node; + struct klp_func *next_func; + unsigned long ip, new_addr; + const unsigned char *new; + + func_node = klp_find_func_node(func->old_addr); + ip = func_node->old_addr; + if (list_is_singular(&func_node->func_stack)) { + list_del_rcu(&func->stack_node); + list_del_rcu(&func_node->node); + new = klp_old_code(func_node->old_code); + kfree(func_node); + } else { + list_del_rcu(&func->stack_node); + next_func = list_first_or_null_rcu(&func_node->func_stack, + struct klp_func, stack_node); + + new_addr = (unsigned long)next_func->new_func; + new = klp_jmp_code(ip, new_addr); + } + + ip = text_ip_addr(ip); + probe_kernel_write((void *)ip, new, JMP_E9_INSN_SIZE); + sync_core(); +} +#endif diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 4c8acdfdc5a746a23bb012c7860ff0f9b3ed7d9f..15b9ace2349ea15036fd38d51c3fab2686aedaec 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -29,6 +30,55 @@ #include #include +#ifdef CONFIG_ACPI +/* + * Used while adding mapping for ACPI tables. + * Can be reused when other iomem regions need be mapped + */ +struct init_pgtable_data { + struct x86_mapping_info *info; + pgd_t *level4p; +}; + +static int mem_region_callback(struct resource *res, void *arg) +{ + struct init_pgtable_data *data = arg; + unsigned long mstart, mend; + + mstart = res->start; + mend = mstart + resource_size(res) - 1; + + return kernel_ident_mapping_init(data->info, data->level4p, mstart, mend); +} + +static int +map_acpi_tables(struct x86_mapping_info *info, pgd_t *level4p) +{ + struct init_pgtable_data data; + unsigned long flags; + int ret; + + data.info = info; + data.level4p = level4p; + flags = IORESOURCE_MEM | IORESOURCE_BUSY; + + ret = walk_iomem_res_desc(IORES_DESC_ACPI_TABLES, flags, 0, -1, + &data, mem_region_callback); + if (ret && ret != -EINVAL) + return ret; + + /* ACPI tables could be located in ACPI Non-volatile Storage region */ + ret = walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE, flags, 0, -1, + &data, mem_region_callback); + if (ret && ret != -EINVAL) + return ret; + + return 0; +} +#else +static int map_acpi_tables(struct x86_mapping_info *info, pgd_t *level4p) { return 0; } +#endif + #ifdef CONFIG_KEXEC_FILE const struct kexec_file_ops * const kexec_file_loaders[] = { &kexec_bzImage64_ops, @@ -36,6 +86,31 @@ const struct kexec_file_ops * const kexec_file_loaders[] = { }; #endif +static int +map_efi_systab(struct x86_mapping_info *info, pgd_t *level4p) +{ +#ifdef CONFIG_EFI + unsigned long mstart, mend; + + if (!efi_enabled(EFI_BOOT)) + return 0; + + mstart = (boot_params.efi_info.efi_systab | + ((u64)boot_params.efi_info.efi_systab_hi<<32)); + + if (efi_enabled(EFI_64BIT)) + mend = mstart + sizeof(efi_system_table_64_t); + else + mend = mstart + sizeof(efi_system_table_32_t); + + if (!mstart) + return 0; + + return kernel_ident_mapping_init(info, level4p, mstart, mend); +#endif + return 0; +} + static void free_transition_pgtable(struct kimage *image) { free_page((unsigned long)image->arch.p4d); @@ -159,6 +234,18 @@ static int init_pgtable(struct kimage *image, unsigned long start_pgtable) return result; } + /* + * Prepare EFI systab and ACPI tables for kexec kernel since they are + * not covered by pfn_mapped. + */ + result = map_efi_systab(&info, level4p); + if (result) + return result; + + result = map_acpi_tables(&info, level4p); + if (result) + return result; + return init_transition_pgtable(image, level4p); } diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index f58336af095c9d3050e85c5dea79a164b275e420..6645f123419c62f65125851b414e8a6ed9e7cf40 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c @@ -87,7 +87,7 @@ void *module_alloc(unsigned long size) p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR + get_module_load_offset(), MODULES_END, GFP_KERNEL, - PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, + PAGE_KERNEL, 0, NUMA_NO_NODE, __builtin_return_address(0)); if (p && (kasan_module_alloc(p, size) < 0)) { vfree(p); diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index f1c5eb99d445407a9fc134e76a8010d17a61d780..5b4c3279909474aa8443e828dadfb580633361e5 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -547,17 +547,15 @@ void __init default_get_smp_config(unsigned int early) * local APIC has default address */ mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; - return; + goto out; } pr_info("Default MP configuration #%d\n", mpf->feature1); construct_default_ISA_mptable(mpf->feature1); } else if (mpf->physptr) { - if (check_physptr(mpf, early)) { - early_memunmap(mpf, sizeof(*mpf)); - return; - } + if (check_physptr(mpf, early)) + goto out; } else BUG(); @@ -566,7 +564,7 @@ void __init default_get_smp_config(unsigned int early) /* * Only use the first configuration found. */ - +out: early_memunmap(mpf, sizeof(*mpf)); } @@ -599,8 +597,8 @@ static int __init smp_scan_config(unsigned long base, unsigned long length) mpf_base = base; mpf_found = true; - pr_info("found SMP MP-table at [mem %#010lx-%#010lx] mapped at [%p]\n", - base, base + sizeof(*mpf) - 1, mpf); + pr_info("found SMP MP-table at [mem %#010lx-%#010lx]\n", + base, base + sizeof(*mpf) - 1); memblock_reserve(base, sizeof(*mpf)); if (mpf->physptr) diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index 18bc9b51ac9b99ffaf51e85daf490b0ba108bcc9..086cf1d1d71d820e13baca26091d6104abea9eb9 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -34,6 +34,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include @@ -533,6 +534,9 @@ do_nmi(struct pt_regs *regs, long error_code) write_cr2(this_cpu_read(nmi_cr2)); if (this_cpu_dec_return(nmi_state)) goto nmi_restart; + + if (user_mode(regs)) + mds_user_clear_cpu_buffers(); } NOKPROBE_SYMBOL(do_nmi); diff --git a/arch/x86/kernel/perf_regs.c b/arch/x86/kernel/perf_regs.c index c06c4c16c6b69c0d251505fa4c03a658c5f938a6..bb7e1132290b0073b0ff3b0ac68d69d48016d2a8 100644 --- a/arch/x86/kernel/perf_regs.c +++ b/arch/x86/kernel/perf_regs.c @@ -59,18 +59,37 @@ static unsigned int pt_regs_offset[PERF_REG_X86_MAX] = { u64 perf_reg_value(struct pt_regs *regs, int idx) { + struct x86_perf_regs *perf_regs; + + if (idx >= PERF_REG_X86_XMM0 && idx < PERF_REG_X86_XMM_MAX) { + perf_regs = container_of(regs, struct x86_perf_regs, regs); + if (!perf_regs->xmm_regs) + return 0; + return perf_regs->xmm_regs[idx - PERF_REG_X86_XMM0]; + } + if (WARN_ON_ONCE(idx >= ARRAY_SIZE(pt_regs_offset))) return 0; return regs_get_register(regs, pt_regs_offset[idx]); } -#define REG_RESERVED (~((1ULL << PERF_REG_X86_MAX) - 1ULL)) +#define PERF_REG_X86_RESERVED (((1ULL << PERF_REG_X86_XMM0) - 1) & \ + ~((1ULL << PERF_REG_X86_MAX) - 1)) #ifdef CONFIG_X86_32 +#define REG_NOSUPPORT ((1ULL << PERF_REG_X86_R8) | \ + (1ULL << PERF_REG_X86_R9) | \ + (1ULL << PERF_REG_X86_R10) | \ + (1ULL << PERF_REG_X86_R11) | \ + (1ULL << PERF_REG_X86_R12) | \ + (1ULL << PERF_REG_X86_R13) | \ + (1ULL << PERF_REG_X86_R14) | \ + (1ULL << PERF_REG_X86_R15)) + int perf_reg_validate(u64 mask) { - if (!mask || mask & REG_RESERVED) + if (!mask || (mask & (REG_NOSUPPORT | PERF_REG_X86_RESERVED))) return -EINVAL; return 0; @@ -96,10 +115,7 @@ void perf_get_regs_user(struct perf_regs *regs_user, int perf_reg_validate(u64 mask) { - if (!mask || mask & REG_RESERVED) - return -EINVAL; - - if (mask & REG_NOSUPPORT) + if (!mask || (mask & (REG_NOSUPPORT | PERF_REG_X86_RESERVED))) return -EINVAL; return 0; diff --git a/arch/x86/kernel/pmem.c b/arch/x86/kernel/pmem.c index 6b07faaa157980915e42dc71bdc392a7c5a704e0..23154d24b1173df04b1731aa99779fd9f3931f83 100644 --- a/arch/x86/kernel/pmem.c +++ b/arch/x86/kernel/pmem.c @@ -27,6 +27,11 @@ static __init int register_e820_pmem(void) * simply here to trigger the module to load on demand. */ pdev = platform_device_alloc("e820_pmem", -1); - return platform_device_add(pdev); + + rc = platform_device_add(pdev); + if (rc) + platform_device_put(pdev); + + return rc; } device_initcall(register_e820_pmem); diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c old mode 100644 new mode 100755 index c93fcfdf1673418a352c8eb1085504d8c4ee4ffd..9dad42b8fe3c55edc09dbc393d61d11b8d185b71 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -39,6 +39,9 @@ #include #include #include +#include + +#include "process.h" /* * per-CPU TSS segments. Threads are completely 'soft' on Linux, @@ -252,11 +255,12 @@ void arch_setup_new_exec(void) enable_cpuid(); } -static inline void switch_to_bitmap(struct tss_struct *tss, - struct thread_struct *prev, +static inline void switch_to_bitmap(struct thread_struct *prev, struct thread_struct *next, unsigned long tifp, unsigned long tifn) { + struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw); + if (tifn & _TIF_IO_BITMAP) { /* * Copy the relevant range of the IO bitmap. @@ -395,32 +399,81 @@ static __always_inline void amd_set_ssb_virt_state(unsigned long tifn) wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn)); } -static __always_inline void intel_set_ssb_state(unsigned long tifn) +/* + * Update the MSRs managing speculation control, during context switch. + * + * tifp: Previous task's thread flags + * tifn: Next task's thread flags + */ +static __always_inline void __speculation_ctrl_update(unsigned long tifp, + unsigned long tifn) +{ + unsigned long tif_diff = tifp ^ tifn; + u64 msr = x86_spec_ctrl_base; + bool updmsr = false; + + lockdep_assert_irqs_disabled(); + + /* Handle change of TIF_SSBD depending on the mitigation method. */ + if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) { + if (tif_diff & _TIF_SSBD) + amd_set_ssb_virt_state(tifn); + } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) { + if (tif_diff & _TIF_SSBD) + amd_set_core_ssb_state(tifn); + } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || + static_cpu_has(X86_FEATURE_AMD_SSBD)) { + updmsr |= !!(tif_diff & _TIF_SSBD); + msr |= ssbd_tif_to_spec_ctrl(tifn); + } + + /* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled. */ + if (IS_ENABLED(CONFIG_SMP) && + static_branch_unlikely(&switch_to_cond_stibp)) { + updmsr |= !!(tif_diff & _TIF_SPEC_IB); + msr |= stibp_tif_to_spec_ctrl(tifn); + } + + if (updmsr) + write_spec_ctrl_current(msr, false); +} + +static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk) { - u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn); + if (test_and_clear_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE)) { + if (task_spec_ssb_disable(tsk)) + set_tsk_thread_flag(tsk, TIF_SSBD); + else + clear_tsk_thread_flag(tsk, TIF_SSBD); - wrmsrl(MSR_IA32_SPEC_CTRL, msr); + if (task_spec_ib_disable(tsk)) + set_tsk_thread_flag(tsk, TIF_SPEC_IB); + else + clear_tsk_thread_flag(tsk, TIF_SPEC_IB); + } + /* Return the updated threadinfo flags*/ + return task_thread_info(tsk)->flags; } -static __always_inline void __speculative_store_bypass_update(unsigned long tifn) +void speculation_ctrl_update(unsigned long tif) { - if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) - amd_set_ssb_virt_state(tifn); - else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) - amd_set_core_ssb_state(tifn); - else - intel_set_ssb_state(tifn); + unsigned long flags; + + /* Forced update. Make sure all relevant TIF flags are different */ + local_irq_save(flags); + __speculation_ctrl_update(~tif, tif); + local_irq_restore(flags); } -void speculative_store_bypass_update(unsigned long tif) +/* Called from seccomp/prctl update */ +void speculation_ctrl_update_current(void) { preempt_disable(); - __speculative_store_bypass_update(tif); + speculation_ctrl_update(speculation_ctrl_update_tif(current)); preempt_enable(); } -void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, - struct tss_struct *tss) +void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p) { struct thread_struct *prev, *next; unsigned long tifp, tifn; @@ -430,7 +483,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, tifn = READ_ONCE(task_thread_info(next_p)->flags); tifp = READ_ONCE(task_thread_info(prev_p)->flags); - switch_to_bitmap(tss, prev, next, tifp, tifn); + switch_to_bitmap(prev, next, tifp, tifn); propagate_user_return_notify(prev_p, next_p); @@ -451,8 +504,15 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, if ((tifp ^ tifn) & _TIF_NOCPUID) set_cpuid_faulting(!!(tifn & _TIF_NOCPUID)); - if ((tifp ^ tifn) & _TIF_SSBD) - __speculative_store_bypass_update(tifn); + if (likely(!((tifp | tifn) & _TIF_SPEC_FORCE_UPDATE))) { + __speculation_ctrl_update(tifp, tifn); + } else { + speculation_ctrl_update_tif(prev_p); + tifn = speculation_ctrl_update_tif(next_p); + + /* Enforce MSR update to ensure consistent state */ + __speculation_ctrl_update(~tifn, tifn); + } } /* @@ -498,8 +558,9 @@ void __cpuidle default_idle(void) safe_halt(); trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); } -#ifdef CONFIG_APM_MODULE +#if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE) EXPORT_SYMBOL(default_idle); +EXPORT_SYMBOL(arch_cpu_idle); #endif #ifdef CONFIG_XEN @@ -727,8 +788,8 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) */ unsigned long get_wchan(struct task_struct *p) { - unsigned long start, bottom, top, sp, fp, ip, ret = 0; - int count = 0; + struct unwind_state state; + unsigned long addr = 0; if (!p || p == current || p->state == TASK_RUNNING) return 0; @@ -736,49 +797,19 @@ unsigned long get_wchan(struct task_struct *p) if (!try_get_task_stack(p)) return 0; - start = (unsigned long)task_stack_page(p); - if (!start) - goto out; - - /* - * Layout of the stack page: - * - * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long) - * PADDING - * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING - * stack - * ----------- bottom = start - * - * The tasks stack pointer points at the location where the - * framepointer is stored. The data on the stack is: - * ... IP FP ... IP FP - * - * We need to read FP and IP, so we need to adjust the upper - * bound by another unsigned long. - */ - top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; - top -= 2 * sizeof(unsigned long); - bottom = start; - - sp = READ_ONCE(p->thread.sp); - if (sp < bottom || sp > top) - goto out; - - fp = READ_ONCE_NOCHECK(((struct inactive_task_frame *)sp)->bp); - do { - if (fp < bottom || fp > top) - goto out; - ip = READ_ONCE_NOCHECK(*(unsigned long *)(fp + sizeof(unsigned long))); - if (!in_sched_functions(ip)) { - ret = ip; - goto out; - } - fp = READ_ONCE_NOCHECK(*(unsigned long *)fp); - } while (count++ < 16 && p->state != TASK_RUNNING); + for (unwind_start(&state, p, NULL, NULL); !unwind_done(&state); + unwind_next_frame(&state)) { + addr = unwind_get_return_address(&state); + if (!addr) + break; + if (in_sched_functions(addr)) + continue; + break; + } -out: put_task_stack(p); - return ret; + + return addr; } long do_arch_prctl_common(struct task_struct *task, int option, diff --git a/arch/x86/kernel/process.h b/arch/x86/kernel/process.h new file mode 100644 index 0000000000000000000000000000000000000000..320ab978fb1f3149b314beb8003c5b224490e7ca --- /dev/null +++ b/arch/x86/kernel/process.h @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Code shared between 32 and 64 bit + +#include + +void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p); + +/* + * This needs to be inline to optimize for the common case where no extra + * work needs to be done. + */ +static inline void switch_to_extra(struct task_struct *prev, + struct task_struct *next) +{ + unsigned long next_tif = task_thread_info(next)->flags; + unsigned long prev_tif = task_thread_info(prev)->flags; + + if (IS_ENABLED(CONFIG_SMP)) { + /* + * Avoid __switch_to_xtra() invocation when conditional + * STIBP is disabled and the only different bit is + * TIF_SPEC_IB. For CONFIG_SMP=n TIF_SPEC_IB is not + * in the TIF_WORK_CTXSW masks. + */ + if (!static_branch_likely(&switch_to_cond_stibp)) { + prev_tif &= ~_TIF_SPEC_IB; + next_tif &= ~_TIF_SPEC_IB; + } + } + + /* + * __switch_to_xtra() handles debug registers, i/o bitmaps, + * speculation mitigations etc. + */ + if (unlikely(next_tif & _TIF_WORK_CTXSW_NEXT || + prev_tif & _TIF_WORK_CTXSW_PREV)) + __switch_to_xtra(prev, next); +} diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 5046a3c9dec2feaa6761e38c9947e90ad4030efb..020efe0f9614f8198218e8dbba8fdb4d501a1ce6 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -59,6 +59,8 @@ #include #include +#include "process.h" + void __show_regs(struct pt_regs *regs, enum show_regs_mode mode) { unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; @@ -128,6 +130,13 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp, struct task_struct *tsk; int err; + /* + * For a new task use the RESET flags value since there is no before. + * All the status flags are zero; DF and all the system flags must also + * be 0, specifically IF must be 0 because we context switch to the new + * task with interrupts disabled. + */ + frame->flags = X86_EFLAGS_FIXED; frame->bp = 0; frame->ret_addr = (unsigned long) ret_from_fork; p->thread.sp = (unsigned long) fork_frame; @@ -232,7 +241,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) struct fpu *prev_fpu = &prev->fpu; struct fpu *next_fpu = &next->fpu; int cpu = smp_processor_id(); - struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu); /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ @@ -264,12 +272,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl)) set_iopl_mask(next->iopl); - /* - * Now maybe handle debug registers and/or IO bitmaps - */ - if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV || - task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) - __switch_to_xtra(prev_p, next_p, tss); + switch_to_extra(prev_p, next_p); /* * Leave lazy mode, flushing any hypercalls made here. diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index ea5ea850348da94cafb85010c4ba123d83fbba57..59f71d0f2b2340299f91e51c1d95e87eac6b1a63 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -59,6 +59,8 @@ #include #endif +#include "process.h" + __visible DEFINE_PER_CPU(unsigned long, rsp_scratch); /* Prints also some state that isn't saved in the pt_regs */ @@ -298,6 +300,14 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp, childregs = task_pt_regs(p); fork_frame = container_of(childregs, struct fork_frame, regs); frame = &fork_frame->frame; + + /* + * For a new task use the RESET flags value since there is no before. + * All the status flags are zero; DF and all the system flags must also + * be 0, specifically IF must be 0 because we context switch to the new + * task with interrupts disabled. + */ + frame->flags = X86_EFLAGS_FIXED; frame->bp = 0; frame->ret_addr = (unsigned long) ret_from_fork; p->thread.sp = (unsigned long) fork_frame; @@ -422,7 +432,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) struct fpu *prev_fpu = &prev->fpu; struct fpu *next_fpu = &next->fpu; int cpu = smp_processor_id(); - struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu); WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) && this_cpu_read(irq_count) != -1); @@ -489,12 +498,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) /* Reload sp0. */ update_task_stack(next_p); - /* - * Now maybe reload the debug registers and handle I/O bitmaps - */ - if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT || - task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV)) - __switch_to_xtra(prev_p, next_p, tss); + switch_to_extra(prev_p, next_p); #ifdef CONFIG_XEN_PV /* diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index e2ee403865ebee6e265cfa3f35f5f0fad78436fe..8d4d5064531060e85161cc39e9fe6c83b63a25a6 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -39,6 +40,7 @@ #include #include #include +#include #include "tls.h" @@ -342,6 +344,49 @@ static int set_segment_reg(struct task_struct *task, return 0; } +static unsigned long task_seg_base(struct task_struct *task, + unsigned short selector) +{ + unsigned short idx = selector >> 3; + unsigned long base; + + if (likely((selector & SEGMENT_TI_MASK) == 0)) { + if (unlikely(idx >= GDT_ENTRIES)) + return 0; + + /* + * There are no user segments in the GDT with nonzero bases + * other than the TLS segments. + */ + if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) + return 0; + + idx -= GDT_ENTRY_TLS_MIN; + base = get_desc_base(&task->thread.tls_array[idx]); + } else { +#ifdef CONFIG_MODIFY_LDT_SYSCALL + struct ldt_struct *ldt; + + /* + * If performance here mattered, we could protect the LDT + * with RCU. This is a slow path, though, so we can just + * take the mutex. + */ + mutex_lock(&task->mm->context.lock); + ldt = task->mm->context.ldt; + if (unlikely(idx >= ldt->nr_entries)) + base = 0; + else + base = get_desc_base(ldt->entries + idx); + mutex_unlock(&task->mm->context.lock); +#else + base = 0; +#endif + } + + return base; +} + #endif /* CONFIG_X86_32 */ static unsigned long get_flags(struct task_struct *task) @@ -435,18 +480,16 @@ static unsigned long getreg(struct task_struct *task, unsigned long offset) #ifdef CONFIG_X86_64 case offsetof(struct user_regs_struct, fs_base): { - /* - * XXX: This will not behave as expected if called on - * current or if fsindex != 0. - */ - return task->thread.fsbase; + if (task->thread.fsindex == 0) + return task->thread.fsbase; + else + return task_seg_base(task, task->thread.fsindex); } case offsetof(struct user_regs_struct, gs_base): { - /* - * XXX: This will not behave as expected if called on - * current or if fsindex != 0. - */ - return task->thread.gsbase; + if (task->thread.gsindex == 0) + return task->thread.gsbase; + else + return task_seg_base(task, task->thread.gsindex); } #endif } @@ -653,7 +696,8 @@ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n) unsigned long val = 0; if (n < HBP_NUM) { - struct perf_event *bp = thread->ptrace_bps[n]; + int index = array_index_nospec(n, HBP_NUM); + struct perf_event *bp = thread->ptrace_bps[index]; if (bp) val = bp->hw.info.address; diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 725624b6c0c05cdc0c94175214a7ce796df47eee..8fd3cedd9accdd1c17757e5a381b2ab1eac1c032 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -81,6 +81,19 @@ static int __init set_bios_reboot(const struct dmi_system_id *d) return 0; } +/* + * Some machines don't handle the default ACPI reboot method and + * require the EFI reboot method: + */ +static int __init set_efi_reboot(const struct dmi_system_id *d) +{ + if (reboot_type != BOOT_EFI && !efi_runtime_disabled()) { + reboot_type = BOOT_EFI; + pr_info("%s series board detected. Selecting EFI-method for reboot.\n", d->ident); + } + return 0; +} + void __noreturn machine_real_restart(unsigned int type) { local_irq_disable(); @@ -166,6 +179,14 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"), }, }, + { /* Handle reboot issue on Acer TravelMate X514-51T */ + .callback = set_efi_reboot, + .ident = "Acer TravelMate X514-51T", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate X514-51T"), + }, + }, /* Apple */ { /* Handle problems with rebooting on Apple MacBook5 */ diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index b4866badb235a64be119989e642764d0becac65f..c63f2e41e74c6623929a20cd52a19a27d207cb81 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -447,9 +447,6 @@ static void __init memblock_x86_reserve_range_setup_data(void) #ifdef CONFIG_KEXEC_CORE -/* 16M alignment for crash kernel regions */ -#define CRASH_ALIGN (16 << 20) - /* * Keep the crash kernel below this limit. On 32 bits earlier kernels * would limit the kernel to the low 512 MiB due to mapping restrictions. @@ -463,59 +460,6 @@ static void __init memblock_x86_reserve_range_setup_data(void) # define CRASH_ADDR_HIGH_MAX MAXMEM #endif -static int __init reserve_crashkernel_low(void) -{ -#ifdef CONFIG_X86_64 - unsigned long long base, low_base = 0, low_size = 0; - unsigned long total_low_mem; - int ret; - - total_low_mem = memblock_mem_size(1UL << (32 - PAGE_SHIFT)); - - /* crashkernel=Y,low */ - ret = parse_crashkernel_low(boot_command_line, total_low_mem, &low_size, &base); - if (ret) { - /* - * two parts from lib/swiotlb.c: - * -swiotlb size: user-specified with swiotlb= or default. - * - * -swiotlb overflow buffer: now hardcoded to 32k. We round it - * to 8M for other buffers that may need to stay low too. Also - * make sure we allocate enough extra low memory so that we - * don't run out of DMA buffers for 32-bit devices. - */ - low_size = max(swiotlb_size_or_default() + (8UL << 20), 256UL << 20); - } else { - /* passed with crashkernel=0,low ? */ - if (!low_size) - return 0; - } - - low_base = memblock_find_in_range(0, 1ULL << 32, low_size, CRASH_ALIGN); - if (!low_base) { - pr_err("Cannot reserve %ldMB crashkernel low memory, please try smaller size.\n", - (unsigned long)(low_size >> 20)); - return -ENOMEM; - } - - ret = memblock_reserve(low_base, low_size); - if (ret) { - pr_err("%s: Error reserving crashkernel low memblock.\n", __func__); - return ret; - } - - pr_info("Reserving %ldMB of low memory at %ldMB for crashkernel (System low RAM: %ldMB)\n", - (unsigned long)(low_size >> 20), - (unsigned long)(low_base >> 20), - (unsigned long)(total_low_mem >> 20)); - - crashk_low_res.start = low_base; - crashk_low_res.end = low_base + low_size - 1; - insert_resource(&iomem_resource, &crashk_low_res); -#endif - return 0; -} - static void __init reserve_crashkernel(void) { unsigned long long crash_size, crash_base, total_mem; @@ -573,9 +517,13 @@ static void __init reserve_crashkernel(void) return; } - if (crash_base >= (1ULL << 32) && reserve_crashkernel_low()) { - memblock_free(crash_base, crash_size); - return; + if (crash_base >= (1ULL << 32)) { + if (reserve_crashkernel_low()) { + memblock_free(crash_base, crash_size); + return; + } + + insert_resource(&iomem_resource, &crashk_low_res); } pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n", @@ -1176,6 +1124,8 @@ void __init setup_arch(char **cmdline_p) reserve_initrd(); acpi_table_upgrade(); + /* Look for ACPI tables and reserve memory occupied by them. */ + acpi_boot_table_init(); vsmp_init(); @@ -1183,11 +1133,6 @@ void __init setup_arch(char **cmdline_p) early_platform_quirks(); - /* - * Parse the ACPI tables for possible boot-time SMP configuration. - */ - acpi_boot_table_init(); - early_acpi_boot_init(); initmem_init(); @@ -1251,7 +1196,7 @@ void __init setup_arch(char **cmdline_p) x86_init.hyper.guest_late_init(); e820__reserve_resources(); - e820__register_nosave_regions(max_low_pfn); + e820__register_nosave_regions(max_pfn); x86_init.resources.reserve_resources(); diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 92a3b312a53c465bbde5f006b5707b62671a49ae..c8aa58a2bab97e7d061f70f621e599608f5b66ac 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -132,16 +132,6 @@ static int restore_sigcontext(struct pt_regs *regs, COPY_SEG_CPL3(cs); COPY_SEG_CPL3(ss); -#ifdef CONFIG_X86_64 - /* - * Fix up SS if needed for the benefit of old DOSEMU and - * CRIU. - */ - if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) && - user_64bit_mode(regs))) - force_valid_ss(regs); -#endif - get_user_ex(tmpflags, &sc->flags); regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); regs->orig_ax = -1; /* disable syscall checks */ @@ -150,6 +140,15 @@ static int restore_sigcontext(struct pt_regs *regs, buf = (void __user *)buf_val; } get_user_catch(err); +#ifdef CONFIG_X86_64 + /* + * Fix up SS if needed for the benefit of old DOSEMU and + * CRIU. + */ + if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) && user_64bit_mode(regs))) + force_valid_ss(regs); +#endif + err |= fpu__restore_sig(buf, IS_ENABLED(CONFIG_X86_32)); force_iret(); @@ -322,7 +321,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set, frame = get_sigframe(&ksig->ka, regs, sizeof(*frame), &fpstate); - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) return -EFAULT; if (__put_user(sig, &frame->sig)) @@ -385,7 +384,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig, frame = get_sigframe(&ksig->ka, regs, sizeof(*frame), &fpstate); - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) return -EFAULT; put_user_try { @@ -461,11 +460,12 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig, { struct rt_sigframe __user *frame; void __user *fp = NULL; + unsigned long uc_flags; int err = 0; frame = get_sigframe(&ksig->ka, regs, sizeof(struct rt_sigframe), &fp); - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) return -EFAULT; if (ksig->ka.sa.sa_flags & SA_SIGINFO) { @@ -473,9 +473,11 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig, return -EFAULT; } + uc_flags = frame_uc_flags(regs); + put_user_try { /* Create the ucontext. */ - put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags); + put_user_ex(uc_flags, &frame->uc.uc_flags); put_user_ex(0, &frame->uc.uc_link); save_altstack_ex(&frame->uc.uc_stack, regs->sp); @@ -541,13 +543,14 @@ static int x32_setup_rt_frame(struct ksignal *ksig, { #ifdef CONFIG_X86_X32_ABI struct rt_sigframe_x32 __user *frame; + unsigned long uc_flags; void __user *restorer; int err = 0; void __user *fpstate = NULL; frame = get_sigframe(&ksig->ka, regs, sizeof(*frame), &fpstate); - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) return -EFAULT; if (ksig->ka.sa.sa_flags & SA_SIGINFO) { @@ -555,9 +558,11 @@ static int x32_setup_rt_frame(struct ksignal *ksig, return -EFAULT; } + uc_flags = frame_uc_flags(regs); + put_user_try { /* Create the ucontext. */ - put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags); + put_user_ex(uc_flags, &frame->uc.uc_flags); put_user_ex(0, &frame->uc.uc_link); compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp); put_user_ex(0, &frame->uc.uc__pad0); @@ -610,7 +615,7 @@ SYSCALL_DEFINE0(sigreturn) frame = (struct sigframe __user *)(regs->sp - 8); - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) goto badframe; if (__get_user(set.sig[0], &frame->sc.oldmask) || (_NSIG_WORDS > 1 && __copy_from_user(&set.sig[1], &frame->extramask, @@ -642,7 +647,7 @@ SYSCALL_DEFINE0(rt_sigreturn) unsigned long uc_flags; frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long)); - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; @@ -871,7 +876,7 @@ asmlinkage long sys32_x32_rt_sigreturn(void) frame = (struct rt_sigframe_x32 __user *)(regs->sp - 8); - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 04adc8d60aed82178caf3a099d66b497a6c11bcf..b2b87b91f3361dc4ec2806685d8bc7b21052ba6e 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -181,6 +181,12 @@ asmlinkage __visible void smp_reboot_interrupt(void) irq_exit(); } +static int register_stop_handler(void) +{ + return register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback, + NMI_FLAG_FIRST, "smp_stop"); +} + static void native_stop_other_cpus(int wait) { unsigned long flags; @@ -214,39 +220,41 @@ static void native_stop_other_cpus(int wait) apic->send_IPI_allbutself(REBOOT_VECTOR); /* - * Don't wait longer than a second if the caller - * didn't ask us to wait. + * Don't wait longer than a second for IPI completion. The + * wait request is not checked here because that would + * prevent an NMI shutdown attempt in case that not all + * CPUs reach shutdown state. */ timeout = USEC_PER_SEC; - while (num_online_cpus() > 1 && (wait || timeout--)) + while (num_online_cpus() > 1 && timeout--) udelay(1); } - - /* if the REBOOT_VECTOR didn't work, try with the NMI */ - if ((num_online_cpus() > 1) && (!smp_no_nmi_ipi)) { - if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback, - NMI_FLAG_FIRST, "smp_stop")) - /* Note: we ignore failures here */ - /* Hope the REBOOT_IRQ is good enough */ - goto finish; - - /* sync above data before sending IRQ */ - wmb(); - pr_emerg("Shutting down cpus with NMI\n"); + /* if the REBOOT_VECTOR didn't work, try with the NMI */ + if (num_online_cpus() > 1) { + /* + * If NMI IPI is enabled, try to register the stop handler + * and send the IPI. In any case try to wait for the other + * CPUs to stop. + */ + if (!smp_no_nmi_ipi && !register_stop_handler()) { + /* Sync above data before sending IRQ */ + wmb(); - apic->send_IPI_allbutself(NMI_VECTOR); + pr_emerg("Shutting down cpus with NMI\n"); + apic->send_IPI_allbutself(NMI_VECTOR); + } /* - * Don't wait longer than a 10 ms if the caller - * didn't ask us to wait. + * Don't wait longer than 10 ms if the caller didn't + * reqeust it. If wait is true, the machine hangs here if + * one or more CPUs do not reach shutdown state. */ timeout = USEC_PER_MSEC * 10; while (num_online_cpus() > 1 && (wait || timeout--)) udelay(1); } -finish: local_irq_save(flags); disable_local_APIC(); mcheck_cpu_clear(this_cpu_ptr(&cpu_info)); diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index f02ecaf97904bd2a1822cb242539ae3eeeefee20..e9dd01f7d6026d7f60a66a8ec5099b112ea6a8b7 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -100,6 +100,7 @@ EXPORT_PER_CPU_SYMBOL(cpu_info); unsigned int __max_logical_packages __read_mostly; EXPORT_SYMBOL(__max_logical_packages); static unsigned int logical_packages __read_mostly; +static unsigned int logical_die __read_mostly; /* Maximum number of SMT threads on any online core */ int __read_mostly __max_smt_threads = 1; @@ -216,17 +217,11 @@ static void notrace start_secondary(void *unused) * before cpu_init(), SMP booting is too fragile that we want to * limit the things done here to the most necessary things. */ - if (boot_cpu_has(X86_FEATURE_PCID)) - __write_cr4(__read_cr4() | X86_CR4_PCIDE); + cr4_init(); #ifdef CONFIG_X86_32 /* switch away from the initial page table */ load_cr3(swapper_pg_dir); - /* - * Initialize the CR4 shadow before doing anything that could - * try to read it. - */ - cr4_init_shadow(); __flush_tlb_all(); #endif load_current_idt(); @@ -269,6 +264,14 @@ static void notrace start_secondary(void *unused) wmb(); cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); + + /* + * Prevent tail call to cpu_startup_entry() because the stack protector + * guard has been changed a couple of function calls up, in + * boot_init_stack_canary() and must not be checked before tail calling + * another function. + */ + prevent_tail_call_optimization(); } /** @@ -306,6 +309,26 @@ int topology_phys_to_logical_pkg(unsigned int phys_pkg) return -1; } EXPORT_SYMBOL(topology_phys_to_logical_pkg); +/** + * topology_phys_to_logical_die - Map a physical die id to logical + * + * Returns logical die id or -1 if not found + */ +int topology_phys_to_logical_die(unsigned int die_id, unsigned int cur_cpu) +{ + int cpu; + int proc_id = cpu_data(cur_cpu).phys_proc_id; + + for_each_possible_cpu(cpu) { + struct cpuinfo_x86 *c = &cpu_data(cpu); + + if (c->initialized && c->cpu_die_id == die_id && + c->phys_proc_id == proc_id) + return c->logical_die_id; + } + return -1; +} +EXPORT_SYMBOL(topology_phys_to_logical_die); /** * topology_update_package_map - Update the physical to logical package map @@ -330,6 +353,29 @@ int topology_update_package_map(unsigned int pkg, unsigned int cpu) cpu_data(cpu).logical_proc_id = new; return 0; } +/** + * topology_update_die_map - Update the physical to logical die map + * @die: The die id as retrieved via CPUID + * @cpu: The cpu for which this is updated + */ +int topology_update_die_map(unsigned int die, unsigned int cpu) +{ + int new; + + /* Already available somewhere? */ + new = topology_phys_to_logical_die(die, cpu); + if (new >= 0) + goto found; + + new = logical_die++; + if (new != die) { + pr_info("CPU %u Converting physical %u to logical die %u\n", + cpu, die, new); + } +found: + cpu_data(cpu).logical_die_id = new; + return 0; +} void __init smp_store_boot_cpu_info(void) { @@ -339,6 +385,7 @@ void __init smp_store_boot_cpu_info(void) *c = boot_cpu_data; c->cpu_index = id; topology_update_package_map(c->phys_proc_id, id); + topology_update_die_map(c->cpu_die_id, id); c->initialized = true; } @@ -393,6 +440,7 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) int cpu1 = c->cpu_index, cpu2 = o->cpu_index; if (c->phys_proc_id == o->phys_proc_id && + c->cpu_die_id == o->cpu_die_id && per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) { if (c->cpu_core_id == o->cpu_core_id) return topology_sane(c, o, "smt"); @@ -404,6 +452,7 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) } } else if (c->phys_proc_id == o->phys_proc_id && + c->cpu_die_id == o->cpu_die_id && c->cpu_core_id == o->cpu_core_id) { return topology_sane(c, o, "smt"); } @@ -676,6 +725,7 @@ static void __init smp_quirk_init_udelay(void) /* if modern processor, use no delay */ if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) || + ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && (boot_cpu_data.x86 >= 0x18)) || ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) { init_udelay = 0; return; @@ -1346,7 +1396,7 @@ void __init calculate_max_logical_packages(void) * extrapolate the boot cpu's data to all packages. */ ncpus = cpu_data(0).booted_cores * topology_max_smt_threads(); - __max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus); + __max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus); pr_info("Max logical packages: %u\n", __max_logical_packages); } @@ -1592,7 +1642,8 @@ static inline void mwait_play_dead(void) void *mwait_ptr; int i; - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) return; if (!this_cpu_has(X86_FEATURE_MWAIT)) return; diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index 7627455047c2d58e2db96cc6af1186debc2bdff1..949320d1e51ef29476e299987c87f9dd6d0bf812 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c @@ -177,7 +177,7 @@ copy_stack_frame(const void __user *fp, struct stack_frame_user *frame) { int ret; - if (!access_ok(VERIFY_READ, fp, sizeof(*frame))) + if (__range_not_ok(fp, sizeof(*frame), TASK_SIZE)) return 0; ret = 1; diff --git a/arch/x86/kernel/sysfb_efi.c b/arch/x86/kernel/sysfb_efi.c index 623965e86b65eda431b8e5fdbc2204c47bcb8b33..897da526e40e66027a34b9657c73aea3bdf78c51 100644 --- a/arch/x86/kernel/sysfb_efi.c +++ b/arch/x86/kernel/sysfb_efi.c @@ -231,9 +231,55 @@ static const struct dmi_system_id efifb_dmi_system_table[] __initconst = { {}, }; +/* + * Some devices have a portrait LCD but advertise a landscape resolution (and + * pitch). We simply swap width and height for these devices so that we can + * correctly deal with some of them coming with multiple resolutions. + */ +static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = { + { + /* + * Lenovo MIIX310-10ICR, only some batches have the troublesome + * 800x1280 portrait screen. Luckily the portrait version has + * its own BIOS version, so we match on that. + */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "MIIX 310-10ICR"), + DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1HCN44WW"), + }, + }, + { + /* Lenovo MIIX 320-10ICR with 800x1280 portrait screen */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, + "Lenovo MIIX 320-10ICR"), + }, + }, + { + /* Lenovo D330 with 800x1280 or 1200x1920 portrait screen */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, + "Lenovo ideapad D330-10IGM"), + }, + }, + {}, +}; + __init void sysfb_apply_efi_quirks(void) { if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || !(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS)) dmi_check_system(efifb_dmi_system_table); + + if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI && + dmi_check_system(efifb_dmi_swap_width_height)) { + u16 temp = screen_info.lfb_width; + + screen_info.lfb_width = screen_info.lfb_height; + screen_info.lfb_height = temp; + screen_info.lfb_linelength = 4 * screen_info.lfb_width; + } } diff --git a/arch/x86/kernel/sysfb_simplefb.c b/arch/x86/kernel/sysfb_simplefb.c index 85195d447a922785857db0caacc73d6bc9b9490c..f3215346e47fd8d822191d9ff879896e1f5e7a4f 100644 --- a/arch/x86/kernel/sysfb_simplefb.c +++ b/arch/x86/kernel/sysfb_simplefb.c @@ -94,11 +94,11 @@ __init int create_simplefb(const struct screen_info *si, if (si->orig_video_isVGA == VIDEO_TYPE_VLFB) size <<= 16; length = mode->height * mode->stride; - length = PAGE_ALIGN(length); if (length > size) { printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n"); return -EINVAL; } + length = PAGE_ALIGN(length); /* setup IORESOURCE_MEM as framebuffer memory */ memset(&res, 0, sizeof(res)); diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c index fddaefc51fb6d7ead6630ec8bfedae9bcb94d582..f84422ac3274e93bdcc660d93b626578653e9fe7 100644 --- a/arch/x86/kernel/time.c +++ b/arch/x86/kernel/time.c @@ -30,26 +30,7 @@ __visible volatile unsigned long jiffies __cacheline_aligned_in_smp = INITIAL_JI unsigned long profile_pc(struct pt_regs *regs) { - unsigned long pc = instruction_pointer(regs); - - if (!user_mode(regs) && in_lock_functions(pc)) { -#ifdef CONFIG_FRAME_POINTER - return *(unsigned long *)(regs->bp + sizeof(long)); -#else - unsigned long *sp = - (unsigned long *)kernel_stack_pointer(regs); - /* - * Return address is either directly at stack pointer - * or above a saved flags. Eflags has bits 22-31 zero, - * kernel addresses don't. - */ - if (sp[0] >> 22) - return sp[0]; - if (sp[1] >> 22) - return sp[1]; -#endif - } - return pc; + return instruction_pointer(regs); } EXPORT_SYMBOL(profile_pc); @@ -81,17 +62,28 @@ static void __init setup_default_timer_irq(void) /* Default timer init function */ void __init hpet_time_init(void) { - if (!hpet_enable()) - setup_pit_timer(); + if (!hpet_enable()) { + if (!pit_timer_init()) + return; + } + setup_default_timer_irq(); } static __init void x86_late_time_init(void) { + /* + * Before PIT/HPET init, select the interrupt mode. This is required + * to make the decision whether PIT should be initialized correct. + */ + x86_init.irqs.intr_mode_select(); + + /* Setup the legacy timers */ x86_init.timers.timer_init(); + /* - * After PIT/HPET timers init, select and setup - * the final interrupt mode for delivering IRQs. + * After PIT/HPET timers init, set up the final interrupt mode for + * delivering IRQs. */ x86_init.irqs.intr_mode_init(); tsc_init(); diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c index a5b802a1221272402b344d75ccf94bcff4b69b38..71d3fef1edc92e7e10180a84ea36b5c81443e58c 100644 --- a/arch/x86/kernel/tls.c +++ b/arch/x86/kernel/tls.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include @@ -220,6 +221,7 @@ int do_get_thread_area(struct task_struct *p, int idx, struct user_desc __user *u_info) { struct user_desc info; + int index; if (idx == -1 && get_user(idx, &u_info->entry_number)) return -EFAULT; @@ -227,8 +229,11 @@ int do_get_thread_area(struct task_struct *p, int idx, if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) return -EINVAL; - fill_user_desc(&info, idx, - &p->thread.tls_array[idx - GDT_ENTRY_TLS_MIN]); + index = idx - GDT_ENTRY_TLS_MIN; + index = array_index_nospec(index, + GDT_ENTRY_TLS_MAX - GDT_ENTRY_TLS_MIN + 1); + + fill_user_desc(&info, idx, &p->thread.tls_array[index]); if (copy_to_user(u_info, &info, sizeof(info))) return -EFAULT; diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index e6db475164edec4f33e6f056cde5cbdfbe51a556..343c21fa3fad2eaa3d8fed4f69635c6d27815e5b 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -585,22 +585,34 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) * ftrace must be first, everything else may cause a recursive crash. * See note by declaration of modifying_ftrace_code in ftrace.c */ - if (unlikely(atomic_read(&modifying_ftrace_code)) && - ftrace_int3_handler(regs)) - return; + if (unlikely(atomic_read(&modifying_ftrace_code))) { + int ret; + + lockdep_off(); + ret = ftrace_int3_handler(regs); + lockdep_on(); + if (ret) + return; + } #endif if (poke_int3_handler(regs)) return; /* - * Use ist_enter despite the fact that we don't use an IST stack. - * We can be called from a kprobe in non-CONTEXT_KERNEL kernel - * mode or even during context tracking state changes. + * Unlike any other non-IST entry, we can be called from a kprobe in + * non-CONTEXT_KERNEL kernel mode or even during context tracking + * state changes. Make sure that we wake up RCU even if we're coming + * from kernel code. * - * This means that we can't schedule. That's okay. + * This means that we can't schedule even if we came from a + * preemptible kernel context. That's okay. */ - ist_enter(regs); + if (!user_mode(regs)) { + rcu_nmi_enter(); + preempt_disable(); + } RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); + #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, SIGTRAP) == NOTIFY_STOP) @@ -621,7 +633,10 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) cond_local_irq_disable(regs); exit: - ist_exit(regs); + if (!user_mode(regs)) { + preempt_enable_no_resched(); + rcu_nmi_exit(); + } } NOKPROBE_SYMBOL(do_int3); diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 6d5dc5dabfd7ec71532c9a48e00c6a70da950a35..a7f447c30089aa67711a4243090b042b635b8828 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -636,7 +636,7 @@ unsigned long native_calibrate_tsc(void) case INTEL_FAM6_KABYLAKE_DESKTOP: crystal_khz = 24000; /* 24.0 MHz */ break; - case INTEL_FAM6_ATOM_DENVERTON: + case INTEL_FAM6_ATOM_GOLDMONT_X: crystal_khz = 25000; /* 25.0 MHz */ break; case INTEL_FAM6_ATOM_GOLDMONT: @@ -1095,8 +1095,7 @@ static struct clocksource clocksource_tsc_early = { .rating = 299, .read = read_tsc, .mask = CLOCKSOURCE_MASK(64), - .flags = CLOCK_SOURCE_IS_CONTINUOUS | - CLOCK_SOURCE_MUST_VERIFY, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, .archdata = { .vclock_mode = VCLOCK_TSC }, .resume = tsc_resume, .mark_unstable = tsc_cs_mark_unstable, @@ -1353,6 +1352,20 @@ static int __init init_tsc_clocksource(void) if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3)) clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP; + /* + * Disable the clocksource watchdog when the system has: + * - TSC running at constant frequency + * - TSC which does not stop in C-States + * - the TSC_ADJUST register which allows to detect even minimal + * modifications + * - not more than four sockets. + */ + if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) && + boot_cpu_has(X86_FEATURE_NONSTOP_TSC) && + boot_cpu_has(X86_FEATURE_TSC_ADJUST) && + topology_max_packages() <= 4) + clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; + /* * When TSC frequency is known (retrieved via MSR or CPUID), we skip * the refined calibration and directly register it as a clocksource. diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c index 27ef714d886c121caa786a85b6211e92a30d6915..067858fe4db8fcf5063dcf0952cfb90add3e59de 100644 --- a/arch/x86/kernel/tsc_msr.c +++ b/arch/x86/kernel/tsc_msr.c @@ -59,19 +59,19 @@ static const struct freq_desc freq_desc_ann = { }; static const struct x86_cpu_id tsc_msr_cpu_ids[] = { - INTEL_CPU_FAM6(ATOM_PENWELL, freq_desc_pnw), - INTEL_CPU_FAM6(ATOM_CLOVERVIEW, freq_desc_clv), - INTEL_CPU_FAM6(ATOM_SILVERMONT1, freq_desc_byt), + INTEL_CPU_FAM6(ATOM_SALTWELL_MID, freq_desc_pnw), + INTEL_CPU_FAM6(ATOM_SALTWELL_TABLET, freq_desc_clv), + INTEL_CPU_FAM6(ATOM_SILVERMONT, freq_desc_byt), + INTEL_CPU_FAM6(ATOM_SILVERMONT_MID, freq_desc_tng), INTEL_CPU_FAM6(ATOM_AIRMONT, freq_desc_cht), - INTEL_CPU_FAM6(ATOM_MERRIFIELD, freq_desc_tng), - INTEL_CPU_FAM6(ATOM_MOOREFIELD, freq_desc_ann), + INTEL_CPU_FAM6(ATOM_AIRMONT_MID, freq_desc_ann), {} }; /* * MSR-based CPU/TSC frequency discovery for certain CPUs. * - * Set global "lapic_timer_frequency" to bus_clock_cycles/jiffy + * Set global "lapic_timer_period" to bus_clock_cycles/jiffy * Return processor base frequency in KHz, or 0 on failure. */ unsigned long cpu_khz_from_msr(void) @@ -104,7 +104,7 @@ unsigned long cpu_khz_from_msr(void) res = freq * ratio; #ifdef CONFIG_X86_LOCAL_APIC - lapic_timer_frequency = (freq * 1000) / HZ; + lapic_timer_period = (freq * 1000) / HZ; #endif /* diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index ec534f978867db90e662e7ee1d82e366796d1340..e0cc4170325cf77708e2faf825fa060b2a64e795 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c @@ -190,6 +190,11 @@ bool tsc_store_and_check_tsc_adjust(bool bootcpu) if (bootval != ref->adjusted) { cur->adjusted = ref->adjusted; wrmsrl(MSR_IA32_TSC_ADJUST, ref->adjusted); + } else if (cur->adjusted != bootval) { + if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) { + cur->adjusted = bootval; + } } /* * We have the TSCs forced to be in sync on this package. Skip sync diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c index 3dc26f95d46e8a1ea439dba8ae144bae0dc9444d..5a04f3472e8c9d3ea9a4ba3f8cf5db63e09d3d0f 100644 --- a/arch/x86/kernel/unwind_frame.c +++ b/arch/x86/kernel/unwind_frame.c @@ -320,10 +320,14 @@ bool unwind_next_frame(struct unwind_state *state) } /* Get the next frame pointer: */ - if (state->regs) + if (state->next_bp) { + next_bp = state->next_bp; + state->next_bp = NULL; + } else if (state->regs) { next_bp = (unsigned long *)state->regs->bp; - else + } else { next_bp = (unsigned long *)READ_ONCE_TASK_STACK(state->task, *state->bp); + } /* Move to the next frame if it's safe: */ if (!update_stack_state(state, next_bp)) @@ -362,6 +366,9 @@ bool unwind_next_frame(struct unwind_state *state) if (IS_ENABLED(CONFIG_X86_32)) goto the_end; + if (state->task != current) + goto the_end; + if (state->regs) { printk_deferred_once(KERN_WARNING "WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n", @@ -398,6 +405,21 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task, bp = get_frame_pointer(task, regs); + /* + * If we crash with IP==0, the last successfully executed instruction + * was probably an indirect function call with a NULL function pointer. + * That means that SP points into the middle of an incomplete frame: + * *SP is a return pointer, and *(SP-sizeof(unsigned long)) is where we + * would have written a frame pointer if we hadn't crashed. + * Pretend that the frame is complete and that BP points to it, but save + * the real BP so that we can use it when looking for the next frame. + */ + if (regs && regs->ip == 0 && + (unsigned long *)kernel_stack_pointer(regs) >= first_frame) { + state->next_bp = bp; + bp = ((unsigned long *)kernel_stack_pointer(regs)) - 1; + } + /* Initialize stack info and make sure the frame data is accessible: */ get_stack_info(bp, state->task, &state->stack_info, &state->stack_mask); @@ -410,7 +432,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task, */ while (!unwind_done(state) && (!on_stack(&state->stack_info, first_frame, sizeof(long)) || - state->bp < first_frame)) + (state->next_bp == NULL && state->bp < first_frame))) unwind_next_frame(state); } EXPORT_SYMBOL_GPL(__unwind_start); diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c index 26038eacf74a7130d659f98e5e1348667d4d635e..d609a7fa08c6535a3f756a3115ff409731a3e04b 100644 --- a/arch/x86/kernel/unwind_orc.c +++ b/arch/x86/kernel/unwind_orc.c @@ -7,7 +7,13 @@ #include #define orc_warn(fmt, ...) \ - printk_deferred_once(KERN_WARNING pr_fmt("WARNING: " fmt), ##__VA_ARGS__) + printk_deferred_once(KERN_WARNING "WARNING: " fmt, ##__VA_ARGS__) + +#define orc_warn_current(args...) \ +({ \ + if (state->task == current && !state->error) \ + orc_warn(args); \ +}) extern int __start_orc_unwind_ip[]; extern int __stop_orc_unwind_ip[]; @@ -73,9 +79,9 @@ static struct orc_entry *orc_module_find(unsigned long ip) } #endif -#ifdef CONFIG_DYNAMIC_FTRACE static struct orc_entry *orc_find(unsigned long ip); +#ifdef CONFIG_DYNAMIC_FTRACE /* * Ftrace dynamic trampolines do not have orc entries of their own. * But they are copies of the ftrace entries that are static and @@ -89,22 +95,27 @@ static struct orc_entry *orc_find(unsigned long ip); static struct orc_entry *orc_ftrace_find(unsigned long ip) { struct ftrace_ops *ops; - unsigned long caller; + unsigned long tramp_addr, offset; ops = ftrace_ops_trampoline(ip); if (!ops) return NULL; + /* Set tramp_addr to the start of the code copied by the trampoline */ if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) - caller = (unsigned long)ftrace_regs_call; + tramp_addr = (unsigned long)ftrace_regs_caller; else - caller = (unsigned long)ftrace_call; + tramp_addr = (unsigned long)ftrace_caller; + + /* Now place tramp_addr to the location within the trampoline ip is at */ + offset = ip - ops->trampoline; + tramp_addr += offset; /* Prevent unlikely recursion */ - if (ip == caller) + if (ip == tramp_addr) return NULL; - return orc_find(caller); + return orc_find(tramp_addr); } #else static struct orc_entry *orc_ftrace_find(unsigned long ip) @@ -113,6 +124,48 @@ static struct orc_entry *orc_ftrace_find(unsigned long ip) } #endif +/* + * If we crash with IP==0, the last successfully executed instruction + * was probably an indirect function call with a NULL function pointer, + * and we don't have unwind information for NULL. + * This hardcoded ORC entry for IP==0 allows us to unwind from a NULL function + * pointer into its parent and then continue normally from there. + */ +static struct orc_entry null_orc_entry = { + .sp_offset = sizeof(long), + .sp_reg = ORC_REG_SP, + .bp_reg = ORC_REG_UNDEFINED, + .type = ORC_TYPE_CALL +}; + +#ifdef CONFIG_PARAVIRT +static bool check_paravirt(struct unwind_state *state, struct orc_entry *orc) +{ + u8 *ip = (u8 *)state->ip; + + /* + * In paravirt_patch_64.c, patched paravirt opcode should be: + * pushfq; popq %rax // 0x9c 0x58 + * pushq %rdi; popfq // 0x57 0x9d + * + * Error unwinding only happens when: + * 1. In irq or preempt context. + * 2. Current insn is popq, and it doesn't change orc. + * 3. Last insn doesn't change orc, checking it first to + * promise ip - 1 is valid. + * 4. Last byte fits pushf. + */ + if (state->regs && orc->type == ORC_TYPE_CALL && + (ip[0] == 0x58 || ip[0] == 0x9d) && + orc == orc_find((unsigned long)(ip + 1)) && + orc == orc_find((unsigned long)(ip - 1)) && + (ip[-1] == 0x9c || ip[-1] == 0x57)) + return true; + + return false; +} +#endif + static struct orc_entry *orc_find(unsigned long ip) { static struct orc_entry *orc; @@ -120,6 +173,9 @@ static struct orc_entry *orc_find(unsigned long ip) if (!orc_init) return NULL; + if (ip == 0) + return &null_orc_entry; + /* For non-init vmlinux addresses, use the fast lookup table: */ if (ip >= LOOKUP_START_IP && ip < LOOKUP_STOP_IP) { unsigned int idx, start, stop; @@ -245,9 +301,11 @@ void __init unwind_init(void) return; } - /* Sort the .orc_unwind and .orc_unwind_ip tables: */ - sort(__start_orc_unwind_ip, num_entries, sizeof(int), orc_sort_cmp, - orc_sort_swap); + /* + * Note, the orc_unwind and orc_unwind_ip tables were already + * sorted at build time via the 'sorttable' tool. + * It's ready for binary search straight away, no need to sort it. + */ /* Initialize the fast lookup table: */ lookup_num_blocks = orc_lookup_end - orc_lookup; @@ -304,11 +362,11 @@ static bool stack_access_ok(struct unwind_state *state, unsigned long _addr, struct stack_info *info = &state->stack_info; void *addr = (void *)_addr; - if (!on_stack(info, addr, len) && - (get_stack_info(addr, state->task, info, &state->stack_mask))) - return false; + if (on_stack(info, addr, len)) + return true; - return true; + return !get_stack_info(addr, state->task, info, &state->stack_mask) && + on_stack(info, addr, len); } static bool deref_stack_reg(struct unwind_state *state, unsigned long addr, @@ -350,12 +408,44 @@ static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr return true; } +/* + * If state->regs is non-NULL, and points to a full pt_regs, just get the reg + * value from state->regs. + * + * Otherwise, if state->regs just points to IRET regs, and the previous frame + * had full regs, it's safe to get the value from the previous regs. This can + * happen when early/late IRQ entry code gets interrupted by an NMI. + */ +static bool get_reg(struct unwind_state *state, unsigned int reg_off, + unsigned long *val) +{ + unsigned int reg = reg_off/8; + + if (!state->regs) + return false; + + if (state->full_regs) { + *val = ((unsigned long *)state->regs)[reg]; + return true; + } + + if (state->prev_regs) { + *val = ((unsigned long *)state->prev_regs)[reg]; + return true; + } + + return false; +} + bool unwind_next_frame(struct unwind_state *state) { - unsigned long ip_p, sp, orig_ip = state->ip, prev_sp = state->sp; + unsigned long ip_p, sp, tmp, orig_ip = state->ip, prev_sp = state->sp; enum stack_type prev_type = state->stack_info.type; struct orc_entry *orc; bool indirect = false; +#ifdef CONFIG_PARAVIRT + struct orc_entry para_orc; +#endif if (unwind_done(state)) return false; @@ -377,6 +467,18 @@ bool unwind_next_frame(struct unwind_state *state) if (!orc) goto err; +#ifdef CONFIG_PARAVIRT + /* + * When hitting paravirt POP insn, the orc entry should add + * one slot for PUSH insn. + */ + if (!state->error && check_paravirt(state, orc)) { + para_orc = *orc; + para_orc.sp_offset += sizeof(long); + orc = ¶_orc; + } +#endif + /* End-of-stack check for kernel threads: */ if (orc->sp_reg == ORC_REG_UNDEFINED) { if (!orc->end) @@ -406,43 +508,39 @@ bool unwind_next_frame(struct unwind_state *state) break; case ORC_REG_R10: - if (!state->regs || !state->full_regs) { - orc_warn("missing regs for base reg R10 at ip %pB\n", - (void *)state->ip); + if (!get_reg(state, offsetof(struct pt_regs, r10), &sp)) { + orc_warn_current("missing R10 value at %pB\n", + (void *)state->ip); goto err; } - sp = state->regs->r10; break; case ORC_REG_R13: - if (!state->regs || !state->full_regs) { - orc_warn("missing regs for base reg R13 at ip %pB\n", - (void *)state->ip); + if (!get_reg(state, offsetof(struct pt_regs, r13), &sp)) { + orc_warn_current("missing R13 value at %pB\n", + (void *)state->ip); goto err; } - sp = state->regs->r13; break; case ORC_REG_DI: - if (!state->regs || !state->full_regs) { - orc_warn("missing regs for base reg DI at ip %pB\n", - (void *)state->ip); + if (!get_reg(state, offsetof(struct pt_regs, di), &sp)) { + orc_warn_current("missing RDI value at %pB\n", + (void *)state->ip); goto err; } - sp = state->regs->di; break; case ORC_REG_DX: - if (!state->regs || !state->full_regs) { - orc_warn("missing regs for base reg DX at ip %pB\n", - (void *)state->ip); + if (!get_reg(state, offsetof(struct pt_regs, dx), &sp)) { + orc_warn_current("missing DX value at %pB\n", + (void *)state->ip); goto err; } - sp = state->regs->dx; break; default: - orc_warn("unknown SP base reg %d for ip %pB\n", + orc_warn("unknown SP base reg %d at %pB\n", orc->sp_reg, (void *)state->ip); goto err; } @@ -465,35 +563,39 @@ bool unwind_next_frame(struct unwind_state *state) state->sp = sp; state->regs = NULL; + state->prev_regs = NULL; state->signal = false; break; case ORC_TYPE_REGS: if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) { - orc_warn("can't dereference registers at %p for ip %pB\n", - (void *)sp, (void *)orig_ip); + orc_warn_current("can't access registers at %pB\n", + (void *)orig_ip); goto err; } state->regs = (struct pt_regs *)sp; + state->prev_regs = NULL; state->full_regs = true; state->signal = true; break; case ORC_TYPE_REGS_IRET: if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) { - orc_warn("can't dereference iret registers at %p for ip %pB\n", - (void *)sp, (void *)orig_ip); + orc_warn_current("can't access iret registers at %pB\n", + (void *)orig_ip); goto err; } + if (state->full_regs) + state->prev_regs = state->regs; state->regs = (void *)sp - IRET_FRAME_OFFSET; state->full_regs = false; state->signal = true; break; default: - orc_warn("unknown .orc_unwind entry type %d for ip %pB\n", + orc_warn("unknown .orc_unwind entry type %d at %pB\n", orc->type, (void *)orig_ip); break; } @@ -501,8 +603,8 @@ bool unwind_next_frame(struct unwind_state *state) /* Find BP: */ switch (orc->bp_reg) { case ORC_REG_UNDEFINED: - if (state->regs && state->full_regs) - state->bp = state->regs->bp; + if (get_reg(state, offsetof(struct pt_regs, bp), &tmp)) + state->bp = tmp; break; case ORC_REG_PREV_SP: @@ -525,8 +627,8 @@ bool unwind_next_frame(struct unwind_state *state) if (state->stack_info.type == prev_type && on_stack(&state->stack_info, (void *)state->sp, sizeof(long)) && state->sp <= prev_sp) { - orc_warn("stack going in the wrong direction? ip=%pB\n", - (void *)orig_ip); + orc_warn_current("stack going in the wrong direction? at %pB\n", + (void *)orig_ip); goto err; } diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index deb576b23b7cf49817533d00555d0dc976c42486..ae9e806a11deff8e015a75d65c6cc9ef8bb9185c 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -268,12 +268,13 @@ static volatile u32 good_2byte_insns[256 / 32] = { static bool is_prefix_bad(struct insn *insn) { + insn_byte_t p; int i; - for (i = 0; i < insn->prefixes.nbytes; i++) { + for_each_insn_prefix(insn, i, p) { insn_attr_t attr; - attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]); + attr = inat_get_opcode_attribute(p); switch (attr) { case INAT_MAKE_PREFIX(INAT_PFX_ES): case INAT_MAKE_PREFIX(INAT_PFX_CS): @@ -521,9 +522,12 @@ struct uprobe_xol_ops { void (*abort)(struct arch_uprobe *, struct pt_regs *); }; -static inline int sizeof_long(void) +static inline int sizeof_long(struct pt_regs *regs) { - return in_ia32_syscall() ? 4 : 8; + /* + * Check registers for mode as in_xxx_syscall() does not apply here. + */ + return user_64bit_mode(regs) ? 8 : 4; } static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs) @@ -534,9 +538,9 @@ static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs) static int emulate_push_stack(struct pt_regs *regs, unsigned long val) { - unsigned long new_sp = regs->sp - sizeof_long(); + unsigned long new_sp = regs->sp - sizeof_long(regs); - if (copy_to_user((void __user *)new_sp, &val, sizeof_long())) + if (copy_to_user((void __user *)new_sp, &val, sizeof_long(regs))) return -EFAULT; regs->sp = new_sp; @@ -569,7 +573,7 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs long correction = utask->vaddr - utask->xol_vaddr; regs->ip += correction; } else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) { - regs->sp += sizeof_long(); /* Pop incorrect return address */ + regs->sp += sizeof_long(regs); /* Pop incorrect return address */ if (emulate_push_stack(regs, utask->vaddr + auprobe->defparam.ilen)) return -ERESTART; } @@ -688,7 +692,7 @@ static int branch_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs) * "call" insn was executed out-of-line. Just restore ->sp and restart. * We could also restore ->ip and try to call branch_emulate_op() again. */ - regs->sp += sizeof_long(); + regs->sp += sizeof_long(regs); return -ERESTART; } @@ -725,6 +729,7 @@ static const struct uprobe_xol_ops push_xol_ops = { static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) { u8 opc1 = OPCODE1(insn); + insn_byte_t p; int i; switch (opc1) { @@ -755,8 +760,8 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) * Intel and AMD behavior differ in 64-bit mode: Intel ignores 66 prefix. * No one uses these insns, reject any branch insns with such prefix. */ - for (i = 0; i < insn->prefixes.nbytes; i++) { - if (insn->prefixes.bytes[i] == 0x66) + for_each_insn_prefix(insn, i, p) { + if (p == 0x66) return -ENOTSUPP; } @@ -1068,7 +1073,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs) { - int rasize = sizeof_long(), nleft; + int rasize = sizeof_long(regs), nleft; unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */ if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize)) @@ -1086,7 +1091,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs pr_err("return address clobbered: pid=%d, %%sp=%#lx, %%ip=%#lx\n", current->pid, regs->sp, regs->ip); - force_sig_info(SIGSEGV, SEND_SIG_FORCED, current); + force_sig(SIGSEGV, current); } return -1; diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 1c03e4aa6474eaec356dc3bffddace14cb838cd2..9cf7fff0574b655a952bee70028e8c15e47188f8 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -114,7 +114,7 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval) set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->veflags_mask); user = vm86->user_vm86; - if (!access_ok(VERIFY_WRITE, user, vm86->vm86plus.is_vm86pus ? + if (!access_ok(user, vm86->vm86plus.is_vm86pus ? sizeof(struct vm86plus_struct) : sizeof(struct vm86_struct))) { pr_alert("could not access userspace vm86 info\n"); @@ -278,7 +278,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus) if (vm86->saved_sp0) return -EPERM; - if (!access_ok(VERIFY_READ, user_vm86, plus ? + if (!access_ok(user_vm86, plus ? sizeof(struct vm86_struct) : sizeof(struct vm86plus_struct))) return -EFAULT; diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 5dd3317d761f4065b0fc8f7cdcac4e08be600f0e..85e6d5620188e57544d008afd40f76e04960bd20 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -372,7 +372,7 @@ SECTIONS .bss : AT(ADDR(.bss) - LOAD_OFFSET) { __bss_start = .; *(.bss..page_aligned) - *(.bss) + *(BSS_MAIN) BSS_DECRYPTED . = ALIGN(PAGE_SIZE); __bss_stop = .; @@ -411,7 +411,7 @@ SECTIONS * Per-cpu symbols which need to be offset from __per_cpu_load * for the boot processor. */ -#define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load +#define INIT_PER_CPU(x) init_per_cpu__##x = ABSOLUTE(x) + __per_cpu_load INIT_PER_CPU(gdt_page); INIT_PER_CPU(irq_stack_union); diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index 2792b5573818fff859673e439a8e3be1c23b6054..915a30549bc6b38379edebdadad1d13cf5a6fd0a 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c @@ -59,6 +59,7 @@ struct x86_init_ops x86_init __initdata = { .pre_vector_init = init_ISA_irqs, .intr_init = native_init_IRQ, .trap_init = x86_init_noop, + .intr_mode_select = apic_intr_mode_select, .intr_mode_init = apic_intr_mode_init }, diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 1bbec387d289cb785e4acbd28389e5e071fdfdbb..80abc68b3e90cc49f227ccd97ae852fa0013b533 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -42,6 +42,7 @@ config KVM select PERF_EVENTS select HAVE_KVM_MSI select HAVE_KVM_CPU_RELAX_INTERCEPT + select HAVE_KVM_NO_POLL select KVM_GENERIC_DIRTYLOG_READ_PROTECT select KVM_VFIO select SRCU diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 7bcfa61375c097fa71e0d6b168f8493c8a9f4ad9..0d15273bb4a036621119cb43d9183421e4349f82 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -282,19 +282,36 @@ static void cpuid_mask(u32 *word, int wordnum) *word &= boot_cpu_data.x86_capability[wordnum]; } -static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function, +static void do_host_cpuid(struct kvm_cpuid_entry2 *entry, u32 function, u32 index) { entry->function = function; entry->index = index; + entry->flags = 0; + cpuid_count(entry->function, entry->index, &entry->eax, &entry->ebx, &entry->ecx, &entry->edx); - entry->flags = 0; + + switch (function) { + case 2: + entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC; + break; + case 4: + case 7: + case 0xb: + case 0xd: + entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; + break; + } } -static int __do_cpuid_ent_emulated(struct kvm_cpuid_entry2 *entry, - u32 func, u32 index, int *nent, int maxnent) +static int __do_cpuid_func_emulated(struct kvm_cpuid_entry2 *entry, + u32 func, int *nent, int maxnent) { + entry->function = func; + entry->index = 0; + entry->flags = 0; + switch (func) { case 0: entry->eax = 7; @@ -306,21 +323,100 @@ static int __do_cpuid_ent_emulated(struct kvm_cpuid_entry2 *entry, break; case 7: entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; - if (index == 0) - entry->ecx = F(RDPID); + entry->eax = 0; + entry->ecx = F(RDPID); ++*nent; default: break; } - entry->function = func; - entry->index = index; - return 0; } -static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, - u32 index, int *nent, int maxnent) +static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry, int index) +{ + unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0; + unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0; + unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0; + unsigned f_la57; + unsigned f_pku = kvm_x86_ops->pku_supported() ? F(PKU) : 0; + + /* cpuid 7.0.ebx */ + const u32 kvm_cpuid_7_0_ebx_x86_features = + F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) | + F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) | + F(ADX) | F(SMAP) | F(AVX512IFMA) | F(AVX512F) | F(AVX512PF) | + F(AVX512ER) | F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) | + F(SHA_NI) | F(AVX512BW) | F(AVX512VL); + + /* cpuid 7.0.ecx*/ + const u32 kvm_cpuid_7_0_ecx_x86_features = + F(AVX512VBMI) | F(LA57) | 0 /*PKU*/ | 0 /*OSPKE*/ | F(RDPID) | + F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) | + F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) | + F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B); + + /* cpuid 7.0.edx*/ + const u32 kvm_cpuid_7_0_edx_x86_features = + F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | + F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) | + F(MD_CLEAR); + + /* cpuid 7.1.eax */ + const u32 kvm_cpuid_7_1_eax_x86_features = + F(AVX512_BF16); + + switch (index) { + case 0: + entry->eax = min(entry->eax, 1u); + entry->ebx &= kvm_cpuid_7_0_ebx_x86_features; + cpuid_mask(&entry->ebx, CPUID_7_0_EBX); + // TSC_ADJUST is emulated + entry->ebx |= F(TSC_ADJUST); + entry->ecx &= kvm_cpuid_7_0_ecx_x86_features; + f_la57 = entry->ecx & F(LA57); + cpuid_mask(&entry->ecx, CPUID_7_ECX); + /* Set LA57 based on hardware capability. */ + entry->ecx |= f_la57; + entry->ecx |= f_umip; + entry->ecx |= f_pku; + /* PKU is not yet implemented for shadow paging. */ + if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE)) + entry->ecx &= ~F(PKU); + + entry->edx &= kvm_cpuid_7_0_edx_x86_features; + cpuid_mask(&entry->edx, CPUID_7_EDX); + if (boot_cpu_has(X86_FEATURE_IBPB) && + boot_cpu_has(X86_FEATURE_IBRS)) + entry->edx |= F(SPEC_CTRL); + if (boot_cpu_has(X86_FEATURE_STIBP)) + entry->edx |= F(INTEL_STIBP); + if (boot_cpu_has(X86_FEATURE_SSBD)) + entry->edx |= F(SPEC_CTRL_SSBD); + /* + * We emulate ARCH_CAPABILITIES in software even + * if the host doesn't support it. + */ + entry->edx |= F(ARCH_CAPABILITIES); + break; + case 1: + entry->eax &= kvm_cpuid_7_1_eax_x86_features; + entry->ebx = 0; + entry->ecx = 0; + entry->edx = 0; + break; + default: + WARN_ON_ONCE(1); + entry->eax = 0; + entry->ebx = 0; + entry->ecx = 0; + entry->edx = 0; + break; + } +} + +static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function, + int *nent, int maxnent) { int r; unsigned f_nx = is_efer_nx() ? F(NX) : 0; @@ -333,10 +429,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, unsigned f_lm = 0; #endif unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0; - unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0; - unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0; unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0; - unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0; /* cpuid 1.edx */ const u32 kvm_cpuid_1_edx_x86_features = @@ -381,7 +474,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, /* cpuid 0x80000008.ebx */ const u32 kvm_cpuid_8000_0008_ebx_x86_features = F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) | - F(AMD_SSB_NO); + F(AMD_SSB_NO) | F(AMD_STIBP); /* cpuid 0xC0000001.edx */ const u32 kvm_cpuid_C000_0001_edx_x86_features = @@ -389,39 +482,19 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) | F(PMM) | F(PMM_EN); - /* cpuid 7.0.ebx */ - const u32 kvm_cpuid_7_0_ebx_x86_features = - F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) | - F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) | - F(ADX) | F(SMAP) | F(AVX512IFMA) | F(AVX512F) | F(AVX512PF) | - F(AVX512ER) | F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) | - F(SHA_NI) | F(AVX512BW) | F(AVX512VL); - /* cpuid 0xD.1.eax */ const u32 kvm_cpuid_D_1_eax_x86_features = F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | f_xsaves; - /* cpuid 7.0.ecx*/ - const u32 kvm_cpuid_7_0_ecx_x86_features = - F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | - F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) | - F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) | - F(CLDEMOTE); - - /* cpuid 7.0.edx*/ - const u32 kvm_cpuid_7_0_edx_x86_features = - F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | - F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES); - /* all calls to cpuid_count() should be made on the same cpu */ get_cpu(); r = -E2BIG; - if (*nent >= maxnent) + if (WARN_ON(*nent >= maxnent)) goto out; - do_cpuid_1_ent(entry, function, index); + do_host_cpuid(entry, function, 0); ++*nent; switch (function) { @@ -444,14 +517,12 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, case 2: { int t, times = entry->eax & 0xff; - entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC; entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; for (t = 1; t < times; ++t) { if (*nent >= maxnent) goto out; - do_cpuid_1_ent(&entry[t], function, 0); - entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC; + do_host_cpuid(&entry[t], function, 0); ++*nent; } break; @@ -460,7 +531,6 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, case 4: { int i, cache_type; - entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; /* read more entries until cache_type is zero */ for (i = 1; ; ++i) { if (*nent >= maxnent) @@ -469,9 +539,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, cache_type = entry[i - 1].eax & 0x1f; if (!cache_type) break; - do_cpuid_1_ent(&entry[i], function, i); - entry[i].flags |= - KVM_CPUID_FLAG_SIGNIFCANT_INDEX; + do_host_cpuid(&entry[i], function, i); ++*nent; } break; @@ -482,33 +550,21 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, entry->ecx = 0; entry->edx = 0; break; + /* function 7 has additional index. */ case 7: { - entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; - /* Mask ebx against host capability word 9 */ - if (index == 0) { - entry->ebx &= kvm_cpuid_7_0_ebx_x86_features; - cpuid_mask(&entry->ebx, CPUID_7_0_EBX); - // TSC_ADJUST is emulated - entry->ebx |= F(TSC_ADJUST); - entry->ecx &= kvm_cpuid_7_0_ecx_x86_features; - cpuid_mask(&entry->ecx, CPUID_7_ECX); - entry->ecx |= f_umip; - /* PKU is not yet implemented for shadow paging. */ - if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE)) - entry->ecx &= ~F(PKU); - entry->edx &= kvm_cpuid_7_0_edx_x86_features; - cpuid_mask(&entry->edx, CPUID_7_EDX); - /* - * We emulate ARCH_CAPABILITIES in software even - * if the host doesn't support it. - */ - entry->edx |= F(ARCH_CAPABILITIES); - } else { - entry->ebx = 0; - entry->ecx = 0; - entry->edx = 0; + int i; + + for (i = 0; ; ) { + do_cpuid_7_mask(&entry[i], i); + if (i == entry->eax) + break; + if (*nent >= maxnent) + goto out; + + ++i; + do_host_cpuid(&entry[i], function, i); + ++*nent; } - entry->eax = 0; break; } case 9: @@ -546,7 +602,6 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, case 0xb: { int i, level_type; - entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; /* read more entries until level_type is zero */ for (i = 1; ; ++i) { if (*nent >= maxnent) @@ -555,9 +610,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, level_type = entry[i - 1].ecx & 0xff00; if (!level_type) break; - do_cpuid_1_ent(&entry[i], function, i); - entry[i].flags |= - KVM_CPUID_FLAG_SIGNIFCANT_INDEX; + do_host_cpuid(&entry[i], function, i); ++*nent; } break; @@ -570,7 +623,6 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, entry->ebx = xstate_required_size(supported, false); entry->ecx = entry->ebx; entry->edx &= supported >> 32; - entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; if (!supported) break; @@ -579,7 +631,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, if (*nent >= maxnent) goto out; - do_cpuid_1_ent(&entry[i], function, idx); + do_host_cpuid(&entry[i], function, idx); if (idx == 1) { entry[i].eax &= kvm_cpuid_D_1_eax_x86_features; cpuid_mask(&entry[i].eax, CPUID_D_1_EAX); @@ -596,8 +648,6 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, } entry[i].ecx = 0; entry[i].edx = 0; - entry[i].flags |= - KVM_CPUID_FLAG_SIGNIFCANT_INDEX; ++*nent; ++i; } @@ -622,7 +672,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, (1 << KVM_FEATURE_PV_UNHALT) | (1 << KVM_FEATURE_PV_TLB_FLUSH) | (1 << KVM_FEATURE_ASYNC_PF_VMEXIT) | - (1 << KVM_FEATURE_PV_SEND_IPI); + (1 << KVM_FEATURE_PV_SEND_IPI) | + (1 << KVM_FEATURE_POLL_CONTROL); if (sched_info_on()) entry->eax |= (1 << KVM_FEATURE_STEAL_TIME); @@ -713,21 +764,22 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, return r; } -static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 func, - u32 idx, int *nent, int maxnent, unsigned int type) +static int do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 func, + int *nent, int maxnent, unsigned int type) { + if (*nent >= maxnent) + return -E2BIG; + if (type == KVM_GET_EMULATED_CPUID) - return __do_cpuid_ent_emulated(entry, func, idx, nent, maxnent); + return __do_cpuid_func_emulated(entry, func, nent, maxnent); - return __do_cpuid_ent(entry, func, idx, nent, maxnent); + return __do_cpuid_func(entry, func, nent, maxnent); } #undef F struct kvm_cpuid_param { u32 func; - u32 idx; - bool has_leaf_count; bool (*qualifier)(const struct kvm_cpuid_param *param); }; @@ -771,11 +823,10 @@ int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, int limit, nent = 0, r = -E2BIG, i; u32 func; static const struct kvm_cpuid_param param[] = { - { .func = 0, .has_leaf_count = true }, - { .func = 0x80000000, .has_leaf_count = true }, - { .func = 0xC0000000, .qualifier = is_centaur_cpu, .has_leaf_count = true }, + { .func = 0 }, + { .func = 0x80000000 }, + { .func = 0xC0000000, .qualifier = is_centaur_cpu }, { .func = KVM_CPUID_SIGNATURE }, - { .func = KVM_CPUID_FEATURES }, }; if (cpuid->nent < 1) @@ -799,19 +850,16 @@ int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, if (ent->qualifier && !ent->qualifier(ent)) continue; - r = do_cpuid_ent(&cpuid_entries[nent], ent->func, ent->idx, - &nent, cpuid->nent, type); + r = do_cpuid_func(&cpuid_entries[nent], ent->func, + &nent, cpuid->nent, type); if (r) goto out_free; - if (!ent->has_leaf_count) - continue; - limit = cpuid_entries[nent - 1].eax; for (func = ent->func + 1; func <= limit && nent < cpuid->nent && r == 0; ++func) - r = do_cpuid_ent(&cpuid_entries[nent], func, ent->idx, - &nent, cpuid->nent, type); + r = do_cpuid_func(&cpuid_entries[nent], func, + &nent, cpuid->nent, type); if (r) goto out_free; diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index 9a327d5b6d1f5bf420c7f15c22cefcdddc921f73..d78a61408243f20e8369bc5acaa3562d7c34d0aa 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -47,8 +47,6 @@ static const struct cpuid_reg reverse_cpuid[] = { [CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX}, [CPUID_7_0_EBX] = { 7, 0, CPUID_EBX}, [CPUID_D_1_EAX] = { 0xd, 1, CPUID_EAX}, - [CPUID_F_0_EDX] = { 0xf, 0, CPUID_EDX}, - [CPUID_F_1_EDX] = { 0xf, 1, CPUID_EDX}, [CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX}, [CPUID_6_EAX] = { 6, 0, CPUID_EAX}, [CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX}, diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 106482da6388e29cec054aa72dc5872708d9cd4a..366d8d96b139cf0b8b825e9f90d2ca42aa6d14b4 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -456,7 +456,7 @@ FOP_END; /* * XXX: inoutclob user must know where the argument is being expanded. - * Relying on CC_HAVE_ASM_GOTO would allow us to remove _fault. + * Relying on CONFIG_CC_HAS_ASM_GOTO would allow us to remove _fault. */ #define asm_safe(insn, inoutclob...) \ ({ \ @@ -2331,12 +2331,16 @@ static int em_lseg(struct x86_emulate_ctxt *ctxt) static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt) { +#ifdef CONFIG_X86_64 u32 eax, ebx, ecx, edx; eax = 0x80000001; ecx = 0; ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false); return edx & bit(X86_FEATURE_LM); +#else + return false; +#endif } #define GET_SMSTATE(type, smbase, offset) \ @@ -2381,6 +2385,7 @@ static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n) return X86EMUL_CONTINUE; } +#ifdef CONFIG_X86_64 static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n) { struct desc_struct desc; @@ -2399,6 +2404,7 @@ static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n) ctxt->ops->set_segment(ctxt, selector, &desc, base3, n); return X86EMUL_CONTINUE; } +#endif static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt, u64 cr0, u64 cr3, u64 cr4) @@ -2499,6 +2505,7 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase) return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4); } +#ifdef CONFIG_X86_64 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase) { struct desc_struct desc; @@ -2560,6 +2567,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase) return X86EMUL_CONTINUE; } +#endif static int em_rsm(struct x86_emulate_ctxt *ctxt) { @@ -2575,15 +2583,13 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt) * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU * supports long mode. */ - cr4 = ctxt->ops->get_cr(ctxt, 4); if (emulator_has_longmode(ctxt)) { struct desc_struct cs_desc; /* Zero CR4.PCIDE before CR0.PG. */ - if (cr4 & X86_CR4_PCIDE) { + cr4 = ctxt->ops->get_cr(ctxt, 4); + if (cr4 & X86_CR4_PCIDE) ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE); - cr4 &= ~X86_CR4_PCIDE; - } /* A 32-bit code segment is required to clear EFER.LMA. */ memset(&cs_desc, 0, sizeof(cs_desc)); @@ -2597,13 +2603,16 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt) if (cr0 & X86_CR0_PE) ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE)); - /* Now clear CR4.PAE (which must be done before clearing EFER.LME). */ - if (cr4 & X86_CR4_PAE) - ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE); + if (emulator_has_longmode(ctxt)) { + /* Clear CR4.PAE before clearing EFER.LME. */ + cr4 = ctxt->ops->get_cr(ctxt, 4); + if (cr4 & X86_CR4_PAE) + ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE); - /* And finally go back to 32-bit mode. */ - efer = 0; - ctxt->ops->set_msr(ctxt, MSR_EFER, efer); + /* And finally go back to 32-bit mode. */ + efer = 0; + ctxt->ops->set_msr(ctxt, MSR_EFER, efer); + } smbase = ctxt->ops->get_smbase(ctxt); @@ -2615,9 +2624,11 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt) if (ctxt->ops->pre_leave_smm(ctxt, smbase)) return X86EMUL_UNHANDLEABLE; +#ifdef CONFIG_X86_64 if (emulator_has_longmode(ctxt)) ret = rsm_load_state_64(ctxt, smbase + 0x8000); else +#endif ret = rsm_load_state_32(ctxt, smbase + 0x8000); if (ret != X86EMUL_CONTINUE) { @@ -2711,7 +2722,16 @@ static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt) edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx) return true; - /* default: (not Intel, not AMD), apply Intel's stricter rules... */ + /* Hygon ("HygonGenuine") */ + if (ebx == X86EMUL_CPUID_VENDOR_HygonGenuine_ebx && + ecx == X86EMUL_CPUID_VENDOR_HygonGenuine_ecx && + edx == X86EMUL_CPUID_VENDOR_HygonGenuine_edx) + return true; + + /* + * default: (not Intel, not AMD, not Hygon), apply Intel's + * stricter rules... + */ return false; } @@ -3983,6 +4003,12 @@ static int em_clflush(struct x86_emulate_ctxt *ctxt) return X86EMUL_CONTINUE; } +static int em_clflushopt(struct x86_emulate_ctxt *ctxt) +{ + /* emulating clflushopt regardless of cpuid */ + return X86EMUL_CONTINUE; +} + static int em_movsxd(struct x86_emulate_ctxt *ctxt) { ctxt->dst.val = (s32) ctxt->src.val; @@ -4496,7 +4522,7 @@ static const struct opcode group11[] = { }; static const struct gprefix pfx_0f_ae_7 = { - I(SrcMem | ByteOp, em_clflush), N, N, N, + I(SrcMem | ByteOp, em_clflush), I(SrcMem | ByteOp, em_clflushopt), N, N, }; static const struct group_dual group15 = { { @@ -5357,6 +5383,8 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) ctxt->memopp->addr.mem.ea + ctxt->_eip); done: + if (rc == X86EMUL_PROPAGATE_FAULT) + ctxt->have_exception = true; return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK; } diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 01d209ab5481b0edddb020bbc4b701f4c27d3055..426f612f412f852c5e7f04f55c13bb80223d4c32 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -132,8 +132,10 @@ static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx) struct kvm_vcpu *vcpu = NULL; int i; - if (vpidx < KVM_MAX_VCPUS) - vcpu = kvm_get_vcpu(kvm, vpidx); + if (vpidx >= KVM_MAX_VCPUS) + return NULL; + + vcpu = kvm_get_vcpu(kvm, vpidx); if (vcpu && vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx) return vcpu; kvm_for_each_vcpu(i, vcpu, kvm) @@ -235,7 +237,7 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic, struct kvm_vcpu *vcpu = synic_to_vcpu(synic); int ret; - if (!synic->active && !host) + if (!synic->active && (!host || data)) return 1; trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host); @@ -281,6 +283,9 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic, case HV_X64_MSR_EOM: { int i; + if (!synic->active) + break; + for (i = 0; i < ARRAY_SIZE(synic->sint); i++) kvm_hv_notify_acked_sint(vcpu, i); break; @@ -542,6 +547,12 @@ static int stimer_start(struct kvm_vcpu_hv_stimer *stimer) static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config, bool host) { + struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); + struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); + + if (!synic->active && (!host || config)) + return 1; + trace_kvm_hv_stimer_set_config(stimer_to_vcpu(stimer)->vcpu_id, stimer->index, config, host); @@ -556,6 +567,12 @@ static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config, static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count, bool host) { + struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); + struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); + + if (!synic->active && (!host || count)) + return 1; + trace_kvm_hv_stimer_set_count(stimer_to_vcpu(stimer)->vcpu_id, stimer->index, count, host); @@ -689,6 +706,24 @@ void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu) stimer_cleanup(&hv_vcpu->stimer[i]); } +bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu) +{ + if (!(vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) + return false; + return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED; +} +EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled); + +bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu, + struct hv_vp_assist_page *assist_page) +{ + if (!kvm_hv_assist_page_enabled(vcpu)) + return false; + return !kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, + assist_page, sizeof(*assist_page)); +} +EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page); + static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer) { struct hv_message *msg = &stimer->msg; @@ -1040,21 +1075,41 @@ static u64 current_task_runtime_100ns(void) static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) { - struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv; + struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; switch (msr) { - case HV_X64_MSR_VP_INDEX: - if (!host) + case HV_X64_MSR_VP_INDEX: { + struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; + int vcpu_idx = kvm_vcpu_get_idx(vcpu); + u32 new_vp_index = (u32)data; + + if (!host || new_vp_index >= KVM_MAX_VCPUS) return 1; - hv->vp_index = (u32)data; + + if (new_vp_index == hv_vcpu->vp_index) + return 0; + + /* + * The VP index is initialized to vcpu_index by + * kvm_hv_vcpu_postcreate so they initially match. Now the + * VP index is changing, adjust num_mismatched_vp_indexes if + * it now matches or no longer matches vcpu_idx. + */ + if (hv_vcpu->vp_index == vcpu_idx) + atomic_inc(&hv->num_mismatched_vp_indexes); + else if (new_vp_index == vcpu_idx) + atomic_dec(&hv->num_mismatched_vp_indexes); + + hv_vcpu->vp_index = new_vp_index; break; + } case HV_X64_MSR_VP_ASSIST_PAGE: { u64 gfn; unsigned long addr; if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) { - hv->hv_vapic = data; - if (kvm_lapic_enable_pv_eoi(vcpu, 0)) + hv_vcpu->hv_vapic = data; + if (kvm_lapic_enable_pv_eoi(vcpu, 0, 0)) return 1; break; } @@ -1064,10 +1119,11 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) return 1; if (__clear_user((void __user *)addr, PAGE_SIZE)) return 1; - hv->hv_vapic = data; + hv_vcpu->hv_vapic = data; kvm_vcpu_mark_page_dirty(vcpu, gfn); if (kvm_lapic_enable_pv_eoi(vcpu, - gfn_to_gpa(gfn) | KVM_MSR_ENABLED)) + gfn_to_gpa(gfn) | KVM_MSR_ENABLED, + sizeof(struct hv_vp_assist_page))) return 1; break; } @@ -1080,7 +1136,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) case HV_X64_MSR_VP_RUNTIME: if (!host) return 1; - hv->runtime_offset = data - current_task_runtime_100ns(); + hv_vcpu->runtime_offset = data - current_task_runtime_100ns(); break; case HV_X64_MSR_SCONTROL: case HV_X64_MSR_SVERSION: @@ -1172,11 +1228,11 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) { u64 data = 0; - struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv; + struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; switch (msr) { case HV_X64_MSR_VP_INDEX: - data = hv->vp_index; + data = hv_vcpu->vp_index; break; case HV_X64_MSR_EOI: return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata); @@ -1185,10 +1241,10 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, case HV_X64_MSR_TPR: return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata); case HV_X64_MSR_VP_ASSIST_PAGE: - data = hv->hv_vapic; + data = hv_vcpu->hv_vapic; break; case HV_X64_MSR_VP_RUNTIME: - data = current_task_runtime_100ns() + hv->runtime_offset; + data = current_task_runtime_100ns() + hv_vcpu->runtime_offset; break; case HV_X64_MSR_SCONTROL: case HV_X64_MSR_SVERSION: @@ -1291,7 +1347,16 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa, flush.address_space, flush.flags); sparse_banks[0] = flush.processor_mask; - all_cpus = flush.flags & HV_FLUSH_ALL_PROCESSORS; + + /* + * Work around possible WS2012 bug: it sends hypercalls + * with processor_mask = 0x0 and HV_FLUSH_ALL_PROCESSORS clear, + * while also expecting us to flush something and crashing if + * we don't. Let's treat processor_mask == 0 same as + * HV_FLUSH_ALL_PROCESSORS. + */ + all_cpus = (flush.flags & HV_FLUSH_ALL_PROCESSORS) || + flush.processor_mask == 0; } else { if (unlikely(kvm_read_guest(kvm, ingpa, &flush_ex, sizeof(flush_ex)))) diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h index d6aa969e20f19f518e420be2ff9d8b7f91282994..9c21c34798993331e12d74c33116450b69dc85c3 100644 --- a/arch/x86/kvm/hyperv.h +++ b/arch/x86/kvm/hyperv.h @@ -24,6 +24,8 @@ #ifndef __ARCH_X86_KVM_HYPERV_H__ #define __ARCH_X86_KVM_HYPERV_H__ +#include + static inline struct kvm_vcpu_hv *vcpu_to_hv_vcpu(struct kvm_vcpu *vcpu) { return &vcpu->arch.hyperv; @@ -62,6 +64,10 @@ void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu); void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu); void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu); +bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu); +bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu, + struct hv_vp_assist_page *assist_page); + static inline struct kvm_vcpu_hv_stimer *vcpu_to_stimer(struct kvm_vcpu *vcpu, int timer_index) { diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c index faa264822cee3c658280d122cc3495fc93587cf5..007bc654f928a17e731963f2ba1c6cae00f653d5 100644 --- a/arch/x86/kvm/irq.c +++ b/arch/x86/kvm/irq.c @@ -172,3 +172,10 @@ void __kvm_migrate_timers(struct kvm_vcpu *vcpu) __kvm_migrate_apic_timer(vcpu); __kvm_migrate_pit_timer(vcpu); } + +bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args) +{ + bool resample = args->flags & KVM_IRQFD_FLAG_RESAMPLE; + + return resample ? irqchip_kernel(kvm) : irqchip_in_kernel(kvm); +} diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h index d5005cc265217c4fa4d5de7c2ecc2a29fb599c19..fd210cdd49839dea0f33367376e13a089177c3af 100644 --- a/arch/x86/kvm/irq.h +++ b/arch/x86/kvm/irq.h @@ -114,6 +114,7 @@ static inline int irqchip_in_kernel(struct kvm *kvm) return mode != KVM_IRQCHIP_NONE; } +bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args); void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu); void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu); void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index 9619dcc2b32593422381445f34339bf11f7e6e95..f8f56a93358ba0d53b3fa509f6129911c518810e 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h @@ -2,6 +2,8 @@ #ifndef ASM_KVM_CACHE_REGS_H #define ASM_KVM_CACHE_REGS_H +#include + #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS #define KVM_POSSIBLE_CR4_GUEST_BITS \ (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \ diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index fbb0e6df121b2f3d8c3a4d89c43e8cac3ca694e8..262e49301cae61caf76b7e875769f20301a71c83 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -55,7 +55,7 @@ #define PRIo64 "o" /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */ -#define apic_debug(fmt, arg...) +#define apic_debug(fmt, arg...) do {} while (0) /* 14 is the version for Xeon and Pentium 8.4.8*/ #define APIC_VERSION (0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16)) @@ -133,6 +133,7 @@ static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map, if (offset <= max_apic_id) { u8 cluster_size = min(max_apic_id - offset + 1, 16U); + offset = array_index_nospec(offset, map->max_apic_id + 1); *cluster = &map->phys_map[offset]; *mask = dest_id & (0xffff >> (16 - cluster_size)); } else { @@ -208,6 +209,9 @@ static void recalculate_apic_map(struct kvm *kvm) if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id]) new->phys_map[xapic_id] = apic; + if (!kvm_apic_sw_enabled(apic)) + continue; + ldr = kvm_lapic_get_reg(apic, APIC_LDR); if (apic_x2apic_mode(apic)) { @@ -251,6 +255,8 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val) recalculate_apic_map(apic->vcpu->kvm); } else static_key_slow_inc(&apic_sw_disabled.key); + + recalculate_apic_map(apic->vcpu->kvm); } } @@ -571,6 +577,11 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low, rcu_read_lock(); map = rcu_dereference(kvm->arch.apic_map); + if (unlikely(!map)) { + count = -EOPNOTSUPP; + goto out; + } + if (min > map->max_apic_id) goto out; /* Bits above cluster_size are masked in the caller. */ @@ -891,7 +902,8 @@ static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm, if (irq->dest_id > map->max_apic_id) { *bitmap = 0; } else { - *dst = &map->phys_map[irq->dest_id]; + u32 dest_id = array_index_nospec(irq->dest_id, map->max_apic_id + 1); + *dst = &map->phys_map[dest_id]; *bitmap = 1; } return true; @@ -1442,7 +1454,7 @@ static void apic_timer_expired(struct kvm_lapic *apic) if (swait_active(q)) swake_up_one(q); - if (apic_lvtt_tscdeadline(apic)) + if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use) ktimer->expired_tscdeadline = ktimer->tscdeadline; } @@ -2268,7 +2280,7 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu) struct kvm_lapic *apic = vcpu->arch.apic; u32 ppr; - if (!apic_enabled(apic)) + if (!kvm_apic_hw_enabled(apic)) return -1; __apic_update_ppr(apic, &ppr); @@ -2621,17 +2633,25 @@ int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data) return 0; } -int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data) +int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len) { u64 addr = data & ~KVM_MSR_ENABLED; + struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data; + unsigned long new_len; + if (!IS_ALIGNED(addr, 4)) return 1; vcpu->arch.pv_eoi.msr_val = data; if (!pv_eoi_enabled(vcpu)) return 0; - return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data, - addr, sizeof(u8)); + + if (addr == ghc->gpa && len <= ghc->len) + new_len = ghc->len; + else + new_len = len; + + return kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len); } void kvm_apic_accept_events(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index ed0ed39abd36970601e39ed04edde1850c849ece..ff6ef9c3d760c7d6db6d5ee86d1a0bb21c1c63b5 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -120,7 +120,7 @@ static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu) return vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE; } -int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data); +int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len); void kvm_lapic_init(void); void kvm_lapic_exit(void); diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 51b953ad9d4efe0e10a032228cc1418ff22167fe..873e3b6948ffc1cb952e28e4f8b3d435c092a93d 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -40,6 +40,7 @@ #include #include #include +#include #include #include @@ -49,6 +50,30 @@ #include #include "trace.h" +extern bool itlb_multihit_kvm_mitigation; + +static int __read_mostly nx_huge_pages = -1; +static uint __read_mostly nx_huge_pages_recovery_ratio = 60; + +static int set_nx_huge_pages(const char *val, const struct kernel_param *kp); +static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp); + +static struct kernel_param_ops nx_huge_pages_ops = { + .set = set_nx_huge_pages, + .get = param_get_bool, +}; + +static struct kernel_param_ops nx_huge_pages_recovery_ratio_ops = { + .set = set_nx_huge_pages_recovery_ratio, + .get = param_get_uint, +}; + +module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644); +__MODULE_PARM_TYPE(nx_huge_pages, "bool"); +module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_ratio_ops, + &nx_huge_pages_recovery_ratio, 0644); +__MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint"); + /* * When setting this variable to true it enables Two-Dimensional-Paging * where the hardware walks 2 page tables: @@ -85,7 +110,16 @@ module_param(dbg, bool, 0644); #define PTE_PREFETCH_NUM 8 #define PT_FIRST_AVAIL_BITS_SHIFT 10 -#define PT64_SECOND_AVAIL_BITS_SHIFT 52 +#define PT64_SECOND_AVAIL_BITS_SHIFT 54 + +/* + * The mask used to denote special SPTEs, which can be either MMIO SPTEs or + * Access Tracking SPTEs. + */ +#define SPTE_SPECIAL_MASK (3ULL << 52) +#define SPTE_AD_ENABLED_MASK (0ULL << 52) +#define SPTE_AD_DISABLED_MASK (1ULL << 52) +#define SPTE_MMIO_MASK (3ULL << 52) #define PT64_LEVEL_BITS 9 @@ -140,9 +174,6 @@ module_param(dbg, bool, 0644); #include -#define CREATE_TRACE_POINTS -#include "mmutrace.h" - #define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT) #define SPTE_MMU_WRITEABLE (1ULL << (PT_FIRST_AVAIL_BITS_SHIFT + 1)) @@ -217,16 +248,16 @@ static u64 __read_mostly shadow_accessed_mask; static u64 __read_mostly shadow_dirty_mask; static u64 __read_mostly shadow_mmio_mask; static u64 __read_mostly shadow_mmio_value; +static u64 __read_mostly shadow_mmio_access_mask; static u64 __read_mostly shadow_present_mask; static u64 __read_mostly shadow_me_mask; /* - * SPTEs used by MMUs without A/D bits are marked with shadow_acc_track_value. - * Non-present SPTEs with shadow_acc_track_value set are in place for access - * tracking. + * SPTEs used by MMUs without A/D bits are marked with SPTE_AD_DISABLED_MASK; + * shadow_acc_track_mask is the set of bits to be cleared in non-accessed + * pages. */ static u64 __read_mostly shadow_acc_track_mask; -static const u64 shadow_acc_track_value = SPTE_SPECIAL_MASK; /* * The mask/shift to use for saving the original R/X bits when marking the PTE @@ -259,19 +290,38 @@ static const u64 shadow_nonpresent_or_rsvd_mask_len = 5; */ static u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask; +/* + * The number of non-reserved physical address bits irrespective of features + * that repurpose legal bits, e.g. MKTME. + */ +static u8 __read_mostly shadow_phys_bits; static void mmu_spte_set(u64 *sptep, u64 spte); +static bool is_executable_pte(u64 spte); static union kvm_mmu_page_role kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu); -void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value) +#define CREATE_TRACE_POINTS +#include "mmutrace.h" + + +void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value, u64 access_mask) { + BUG_ON((u64)(unsigned)access_mask != access_mask); BUG_ON((mmio_mask & mmio_value) != mmio_value); - shadow_mmio_value = mmio_value | SPTE_SPECIAL_MASK; + WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << shadow_nonpresent_or_rsvd_mask_len)); + WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask); + shadow_mmio_value = mmio_value | SPTE_MMIO_MASK; shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK; + shadow_mmio_access_mask = access_mask; } EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); +static bool is_mmio_spte(u64 spte) +{ + return (spte & shadow_mmio_mask) == shadow_mmio_value; +} + static inline bool sp_ad_disabled(struct kvm_mmu_page *sp) { return sp->role.ad_disabled; @@ -279,19 +329,24 @@ static inline bool sp_ad_disabled(struct kvm_mmu_page *sp) static inline bool spte_ad_enabled(u64 spte) { - MMU_WARN_ON((spte & shadow_mmio_mask) == shadow_mmio_value); - return !(spte & shadow_acc_track_value); + MMU_WARN_ON(is_mmio_spte(spte)); + return (spte & SPTE_SPECIAL_MASK) == SPTE_AD_ENABLED_MASK; +} + +static bool is_nx_huge_page_enabled(void) +{ + return READ_ONCE(nx_huge_pages); } static inline u64 spte_shadow_accessed_mask(u64 spte) { - MMU_WARN_ON((spte & shadow_mmio_mask) == shadow_mmio_value); + MMU_WARN_ON(is_mmio_spte(spte)); return spte_ad_enabled(spte) ? shadow_accessed_mask : 0; } static inline u64 spte_shadow_dirty_mask(u64 spte) { - MMU_WARN_ON((spte & shadow_mmio_mask) == shadow_mmio_value); + MMU_WARN_ON(is_mmio_spte(spte)); return spte_ad_enabled(spte) ? shadow_dirty_mask : 0; } @@ -301,57 +356,71 @@ static inline bool is_access_track_spte(u64 spte) } /* - * the low bit of the generation number is always presumed to be zero. - * This disables mmio caching during memslot updates. The concept is - * similar to a seqcount but instead of retrying the access we just punt - * and ignore the cache. + * Due to limited space in PTEs, the MMIO generation is a 18 bit subset of + * the memslots generation and is derived as follows: + * + * Bits 0-8 of the MMIO generation are propagated to spte bits 3-11 + * Bits 9-17 of the MMIO generation are propagated to spte bits 54-62 * - * spte bits 3-11 are used as bits 1-9 of the generation number, - * the bits 52-61 are used as bits 10-19 of the generation number. + * The KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS flag is intentionally not included in + * the MMIO generation number, as doing so would require stealing a bit from + * the "real" generation number and thus effectively halve the maximum number + * of MMIO generations that can be handled before encountering a wrap (which + * requires a full MMU zap). The flag is instead explicitly queried when + * checking for MMIO spte cache hits. */ -#define MMIO_SPTE_GEN_LOW_SHIFT 2 -#define MMIO_SPTE_GEN_HIGH_SHIFT 52 -#define MMIO_GEN_SHIFT 20 -#define MMIO_GEN_LOW_SHIFT 10 -#define MMIO_GEN_LOW_MASK ((1 << MMIO_GEN_LOW_SHIFT) - 2) -#define MMIO_GEN_MASK ((1 << MMIO_GEN_SHIFT) - 1) +#define MMIO_SPTE_GEN_LOW_START 3 +#define MMIO_SPTE_GEN_LOW_END 11 + +#define MMIO_SPTE_GEN_HIGH_START PT64_SECOND_AVAIL_BITS_SHIFT +#define MMIO_SPTE_GEN_HIGH_END 62 + +#define MMIO_SPTE_GEN_LOW_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_END, \ + MMIO_SPTE_GEN_LOW_START) +#define MMIO_SPTE_GEN_HIGH_MASK GENMASK_ULL(MMIO_SPTE_GEN_HIGH_END, \ + MMIO_SPTE_GEN_HIGH_START) + +#define MMIO_SPTE_GEN_LOW_BITS (MMIO_SPTE_GEN_LOW_END - MMIO_SPTE_GEN_LOW_START + 1) +#define MMIO_SPTE_GEN_HIGH_BITS (MMIO_SPTE_GEN_HIGH_END - MMIO_SPTE_GEN_HIGH_START + 1) + +/* remember to adjust the comment above as well if you change these */ +static_assert(MMIO_SPTE_GEN_LOW_BITS == 9 && MMIO_SPTE_GEN_HIGH_BITS == 9); + +#define MMIO_SPTE_GEN_LOW_SHIFT (MMIO_SPTE_GEN_LOW_START - 0) +#define MMIO_SPTE_GEN_HIGH_SHIFT (MMIO_SPTE_GEN_HIGH_START - MMIO_SPTE_GEN_LOW_BITS) -static u64 generation_mmio_spte_mask(unsigned int gen) +#define MMIO_SPTE_GEN_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_BITS + MMIO_SPTE_GEN_HIGH_BITS - 1, 0) + +static u64 generation_mmio_spte_mask(u64 gen) { u64 mask; - WARN_ON(gen & ~MMIO_GEN_MASK); + WARN_ON(gen & ~MMIO_SPTE_GEN_MASK); + BUILD_BUG_ON((MMIO_SPTE_GEN_HIGH_MASK | MMIO_SPTE_GEN_LOW_MASK) & SPTE_SPECIAL_MASK); - mask = (gen & MMIO_GEN_LOW_MASK) << MMIO_SPTE_GEN_LOW_SHIFT; - mask |= ((u64)gen >> MMIO_GEN_LOW_SHIFT) << MMIO_SPTE_GEN_HIGH_SHIFT; + mask = (gen << MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_SPTE_GEN_LOW_MASK; + mask |= (gen << MMIO_SPTE_GEN_HIGH_SHIFT) & MMIO_SPTE_GEN_HIGH_MASK; return mask; } -static unsigned int get_mmio_spte_generation(u64 spte) +static u64 get_mmio_spte_generation(u64 spte) { - unsigned int gen; - - spte &= ~shadow_mmio_mask; + u64 gen; - gen = (spte >> MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_GEN_LOW_MASK; - gen |= (spte >> MMIO_SPTE_GEN_HIGH_SHIFT) << MMIO_GEN_LOW_SHIFT; + gen = (spte & MMIO_SPTE_GEN_LOW_MASK) >> MMIO_SPTE_GEN_LOW_SHIFT; + gen |= (spte & MMIO_SPTE_GEN_HIGH_MASK) >> MMIO_SPTE_GEN_HIGH_SHIFT; return gen; } -static unsigned int kvm_current_mmio_generation(struct kvm_vcpu *vcpu) -{ - return kvm_vcpu_memslots(vcpu)->generation & MMIO_GEN_MASK; -} - static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, unsigned access) { - unsigned int gen = kvm_current_mmio_generation(vcpu); + u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK; u64 mask = generation_mmio_spte_mask(gen); u64 gpa = gfn << PAGE_SHIFT; - access &= ACC_WRITE_MASK | ACC_USER_MASK; + access &= shadow_mmio_access_mask; mask |= shadow_mmio_value | access; mask |= gpa | shadow_nonpresent_or_rsvd_mask; mask |= (gpa & shadow_nonpresent_or_rsvd_mask) @@ -361,11 +430,6 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, mmu_spte_set(sptep, mask); } -static bool is_mmio_spte(u64 spte) -{ - return (spte & shadow_mmio_mask) == shadow_mmio_value; -} - static gfn_t get_mmio_spte_gfn(u64 spte) { u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask; @@ -378,8 +442,7 @@ static gfn_t get_mmio_spte_gfn(u64 spte) static unsigned get_mmio_spte_access(u64 spte) { - u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask; - return (spte & ~mask) & ~PAGE_MASK; + return spte & shadow_mmio_access_mask; } static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, @@ -395,9 +458,13 @@ static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte) { - unsigned int kvm_gen, spte_gen; + u64 kvm_gen, spte_gen, gen; - kvm_gen = kvm_current_mmio_generation(vcpu); + gen = kvm_vcpu_memslots(vcpu)->generation; + if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS)) + return false; + + kvm_gen = gen & MMIO_SPTE_GEN_MASK; spte_gen = get_mmio_spte_generation(spte); trace_check_mmio_spte(spte, kvm_gen, spte_gen); @@ -417,7 +484,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, { BUG_ON(!dirty_mask != !accessed_mask); BUG_ON(!accessed_mask && !acc_track_mask); - BUG_ON(acc_track_mask & shadow_acc_track_value); + BUG_ON(acc_track_mask & SPTE_SPECIAL_MASK); shadow_user_mask = user_mask; shadow_accessed_mask = accessed_mask; @@ -430,6 +497,21 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, } EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); +static u8 kvm_get_shadow_phys_bits(void) +{ + /* + * boot_cpu_data.x86_phys_bits is reduced when MKTME is detected + * in CPU detection code, but MKTME treats those reduced bits as + * 'keyID' thus they are not reserved bits. Therefore for MKTME + * we should still return physical address bits reported by CPUID. + */ + if (!boot_cpu_has(X86_FEATURE_TME) || + WARN_ON_ONCE(boot_cpu_data.extended_cpuid_level < 0x80000008)) + return boot_cpu_data.x86_phys_bits; + + return cpuid_eax(0x80000008) & 0xff; +} + static void kvm_mmu_reset_all_pte_masks(void) { u8 low_phys_bits; @@ -443,20 +525,29 @@ static void kvm_mmu_reset_all_pte_masks(void) shadow_present_mask = 0; shadow_acc_track_mask = 0; + shadow_phys_bits = kvm_get_shadow_phys_bits(); + /* * If the CPU has 46 or less physical address bits, then set an * appropriate mask to guard against L1TF attacks. Otherwise, it is * assumed that the CPU is not vulnerable to L1TF. + * + * Some Intel CPUs address the L1 cache using more PA bits than are + * reported by CPUID. Use the PA width of the L1 cache when possible + * to achieve more effective mitigation, e.g. if system RAM overlaps + * the most significant bits of legal physical address space. */ + shadow_nonpresent_or_rsvd_mask = 0; low_phys_bits = boot_cpu_data.x86_phys_bits; - if (boot_cpu_data.x86_phys_bits < - 52 - shadow_nonpresent_or_rsvd_mask_len) { + if (boot_cpu_has_bug(X86_BUG_L1TF) && + !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >= + 52 - shadow_nonpresent_or_rsvd_mask_len)) { + low_phys_bits = boot_cpu_data.x86_cache_bits + - shadow_nonpresent_or_rsvd_mask_len; shadow_nonpresent_or_rsvd_mask = - rsvd_bits(boot_cpu_data.x86_phys_bits - - shadow_nonpresent_or_rsvd_mask_len, - boot_cpu_data.x86_phys_bits - 1); - low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len; + rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1); } + shadow_nonpresent_or_rsvd_lower_gfn_mask = GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT); } @@ -1027,10 +1118,16 @@ static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index) static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) { - if (sp->role.direct) - BUG_ON(gfn != kvm_mmu_page_get_gfn(sp, index)); - else + if (!sp->role.direct) { sp->gfns[index] = gfn; + return; + } + + if (WARN_ON(gfn != kvm_mmu_page_get_gfn(sp, index))) + pr_err_ratelimited("gfn mismatch under direct page %llx " + "(expected %llx, got %llx)\n", + sp->gfn, + kvm_mmu_page_get_gfn(sp, index), gfn); } /* @@ -1089,6 +1186,17 @@ static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) kvm_mmu_gfn_disallow_lpage(slot, gfn); } +static void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp) +{ + if (sp->lpage_disallowed) + return; + + ++kvm->stat.nx_lpage_splits; + list_add_tail(&sp->lpage_disallowed_link, + &kvm->arch.lpage_disallowed_mmu_pages); + sp->lpage_disallowed = true; +} + static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) { struct kvm_memslots *slots; @@ -1106,6 +1214,13 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) kvm_mmu_gfn_allow_lpage(slot, gfn); } +static void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp) +{ + --kvm->stat.nx_lpage_splits; + sp->lpage_disallowed = false; + list_del(&sp->lpage_disallowed_link); +} + static bool __mmu_gfn_lpage_is_disallowed(gfn_t gfn, int level, struct kvm_memory_slot *slot) { @@ -1954,7 +2069,7 @@ static int is_empty_shadow_page(u64 *spt) * aggregate version in order to make the slab shrinker * faster */ -static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr) +static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr) { kvm->arch.n_used_mmu_pages += nr; percpu_counter_add(&kvm_total_used_mmu_pages, nr); @@ -2525,7 +2640,7 @@ static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep, shadow_user_mask | shadow_x_mask | shadow_me_mask; if (sp_ad_disabled(sp)) - spte |= shadow_acc_track_value; + spte |= SPTE_AD_DISABLED_MASK; else spte |= shadow_accessed_mask; @@ -2658,6 +2773,9 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, kvm_reload_remote_mmus(kvm); } + if (sp->lpage_disallowed) + unaccount_huge_nx_page(kvm, sp); + sp->role.invalid = 1; return ret; } @@ -2704,7 +2822,7 @@ static bool prepare_zap_oldest_mmu_page(struct kvm *kvm, * Changing the number of mmu pages allocated to the vm * Note: if goal_nr_mmu_pages is too small, you will get dead lock */ -void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages) +void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages) { LIST_HEAD(invalid_list); @@ -2854,7 +2972,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, sp = page_header(__pa(sptep)); if (sp_ad_disabled(sp)) - spte |= shadow_acc_track_value; + spte |= SPTE_AD_DISABLED_MASK; /* * For the EPT case, shadow_present_mask is 0 if hardware @@ -2866,6 +2984,11 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, if (!speculative) spte |= spte_shadow_accessed_mask(spte); + if (level > PT_PAGE_TABLE_LEVEL && (pte_access & ACC_EXEC_MASK) && + is_nx_huge_page_enabled()) { + pte_access &= ~ACC_EXEC_MASK; + } + if (pte_access & ACC_EXEC_MASK) spte |= shadow_x_mask; else @@ -2986,10 +3109,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access, ret = RET_PF_EMULATE; pgprintk("%s: setting spte %llx\n", __func__, *sptep); - pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n", - is_large_pte(*sptep)? "2MB" : "4kB", - *sptep & PT_WRITABLE_MASK ? "RW" : "R", gfn, - *sptep, sptep); + trace_kvm_mmu_set_spte(level, gfn, sptep); if (!was_rmapped && is_large_pte(*sptep)) ++vcpu->kvm->stat.lpages; @@ -3001,8 +3121,6 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access, } } - kvm_release_pfn_clean(pfn); - return ret; } @@ -3037,9 +3155,11 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, if (ret <= 0) return -1; - for (i = 0; i < ret; i++, gfn++, start++) + for (i = 0; i < ret; i++, gfn++, start++) { mmu_set_spte(vcpu, start, access, 0, sp->role.level, gfn, page_to_pfn(pages[i]), true, true); + put_page(pages[i]); + } return 0; } @@ -3087,40 +3207,71 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) __direct_pte_prefetch(vcpu, sp, sptep); } -static int __direct_map(struct kvm_vcpu *vcpu, int write, int map_writable, - int level, gfn_t gfn, kvm_pfn_t pfn, bool prefault) +static void disallowed_hugepage_adjust(struct kvm_shadow_walk_iterator it, + gfn_t gfn, kvm_pfn_t *pfnp, int *levelp) { - struct kvm_shadow_walk_iterator iterator; + int level = *levelp; + u64 spte = *it.sptep; + + if (it.level == level && level > PT_PAGE_TABLE_LEVEL && + is_nx_huge_page_enabled() && + is_shadow_present_pte(spte) && + !is_large_pte(spte)) { + /* + * A small SPTE exists for this pfn, but FNAME(fetch) + * and __direct_map would like to create a large PTE + * instead: just force them to go down another level, + * patching back for them into pfn the next 9 bits of + * the address. + */ + u64 page_mask = KVM_PAGES_PER_HPAGE(level) - KVM_PAGES_PER_HPAGE(level - 1); + *pfnp |= gfn & page_mask; + (*levelp)--; + } +} + +static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write, + int map_writable, int level, kvm_pfn_t pfn, + bool prefault, bool lpage_disallowed) +{ + struct kvm_shadow_walk_iterator it; struct kvm_mmu_page *sp; - int emulate = 0; - gfn_t pseudo_gfn; + int ret; + gfn_t gfn = gpa >> PAGE_SHIFT; + gfn_t base_gfn = gfn; if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) - return 0; + return RET_PF_RETRY; - for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) { - if (iterator.level == level) { - emulate = mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, - write, level, gfn, pfn, prefault, - map_writable); - direct_pte_prefetch(vcpu, iterator.sptep); - ++vcpu->stat.pf_fixed; - break; - } + trace_kvm_mmu_spte_requested(gpa, level, pfn); + for_each_shadow_entry(vcpu, gpa, it) { + /* + * We cannot overwrite existing page tables with an NX + * large page, as the leaf could be executable. + */ + disallowed_hugepage_adjust(it, gfn, &pfn, &level); - drop_large_spte(vcpu, iterator.sptep); - if (!is_shadow_present_pte(*iterator.sptep)) { - u64 base_addr = iterator.addr; + base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); + if (it.level == level) + break; - base_addr &= PT64_LVL_ADDR_MASK(iterator.level); - pseudo_gfn = base_addr >> PAGE_SHIFT; - sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr, - iterator.level - 1, 1, ACC_ALL); + drop_large_spte(vcpu, it.sptep); + if (!is_shadow_present_pte(*it.sptep)) { + sp = kvm_mmu_get_page(vcpu, base_gfn, it.addr, + it.level - 1, true, ACC_ALL); - link_shadow_page(vcpu, iterator.sptep, sp); + link_shadow_page(vcpu, it.sptep, sp); + if (lpage_disallowed) + account_huge_nx_page(vcpu->kvm, sp); } } - return emulate; + + ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL, + write, level, base_gfn, pfn, prefault, + map_writable); + direct_pte_prefetch(vcpu, it.sptep); + ++vcpu->stat.pf_fixed; + return ret; } static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk) @@ -3156,11 +3307,10 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn) } static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, - gfn_t *gfnp, kvm_pfn_t *pfnp, + gfn_t gfn, kvm_pfn_t *pfnp, int *levelp) { kvm_pfn_t pfn = *pfnp; - gfn_t gfn = *gfnp; int level = *levelp; /* @@ -3170,7 +3320,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, * here. */ if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) && - level == PT_PAGE_TABLE_LEVEL && + !kvm_is_zone_device_pfn(pfn) && level == PT_PAGE_TABLE_LEVEL && PageTransCompoundMap(pfn_to_page(pfn)) && !mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) { unsigned long mask; @@ -3187,8 +3337,6 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, mask = KVM_PAGES_PER_HPAGE(level) - 1; VM_BUG_ON((gfn & mask) != (pfn & mask)); if (pfn & mask) { - gfn &= ~mask; - *gfnp = gfn; kvm_release_pfn_clean(pfn); pfn &= ~mask; kvm_get_pfn(pfn); @@ -3207,7 +3355,8 @@ static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, } if (unlikely(is_noslot_pfn(pfn))) - vcpu_cache_mmio_info(vcpu, gva, gfn, access); + vcpu_cache_mmio_info(vcpu, gva, gfn, + access & shadow_mmio_access_mask); return false; } @@ -3415,11 +3564,14 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code, { int r; int level; - bool force_pt_level = false; + bool force_pt_level; kvm_pfn_t pfn; unsigned long mmu_seq; bool map_writable, write = error_code & PFERR_WRITE_MASK; + bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) && + is_nx_huge_page_enabled(); + force_pt_level = lpage_disallowed; level = mapping_level(vcpu, gfn, &force_pt_level); if (likely(!force_pt_level)) { /* @@ -3445,22 +3597,20 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code, if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r)) return r; + r = RET_PF_RETRY; spin_lock(&vcpu->kvm->mmu_lock); if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) goto out_unlock; if (make_mmu_pages_available(vcpu) < 0) goto out_unlock; if (likely(!force_pt_level)) - transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); - r = __direct_map(vcpu, write, map_writable, level, gfn, pfn, prefault); - spin_unlock(&vcpu->kvm->mmu_lock); - - return r; - + transparent_hugepage_adjust(vcpu, gfn, &pfn, &level); + r = __direct_map(vcpu, v, write, map_writable, level, pfn, + prefault, false); out_unlock: spin_unlock(&vcpu->kvm->mmu_lock); kvm_release_pfn_clean(pfn); - return RET_PF_RETRY; + return r; } static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa, @@ -3938,11 +4088,22 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, error_code, gfn, prefault); } +static u32 alloc_apf_token(struct kvm_vcpu *vcpu) +{ + /* make sure the token value is not 0 */ + u32 id = vcpu->arch.apf.id; + + if (id << 12 == 0) + vcpu->arch.apf.id = 1; + + return (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; +} + static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) { struct kvm_arch_async_pf arch; - arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; + arch.token = alloc_apf_token(vcpu); arch.gfn = gfn; arch.direct_map = vcpu->arch.mmu.direct_map; arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu); @@ -4050,6 +4211,8 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, unsigned long mmu_seq; int write = error_code & PFERR_WRITE_MASK; bool map_writable; + bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) && + is_nx_huge_page_enabled(); MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); @@ -4060,8 +4223,9 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, if (r) return r; - force_pt_level = !check_hugepage_cache_consistency(vcpu, gfn, - PT_DIRECTORY_LEVEL); + force_pt_level = + lpage_disallowed || + !check_hugepage_cache_consistency(vcpu, gfn, PT_DIRECTORY_LEVEL); level = mapping_level(vcpu, gfn, &force_pt_level); if (likely(!force_pt_level)) { if (level > PT_DIRECTORY_LEVEL && @@ -4082,22 +4246,20 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r)) return r; + r = RET_PF_RETRY; spin_lock(&vcpu->kvm->mmu_lock); if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) goto out_unlock; if (make_mmu_pages_available(vcpu) < 0) goto out_unlock; if (likely(!force_pt_level)) - transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); - r = __direct_map(vcpu, write, map_writable, level, gfn, pfn, prefault); - spin_unlock(&vcpu->kvm->mmu_lock); - - return r; - + transparent_hugepage_adjust(vcpu, gfn, &pfn, &level); + r = __direct_map(vcpu, gpa, write, map_writable, level, pfn, + prefault, lpage_disallowed); out_unlock: spin_unlock(&vcpu->kvm->mmu_lock); kvm_release_pfn_clean(pfn); - return RET_PF_RETRY; + return r; } static void nonpaging_init_context(struct kvm_vcpu *vcpu, @@ -4433,7 +4595,7 @@ reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) */ shadow_zero_check = &context->shadow_zero_check; __reset_rsvds_bits_mask(vcpu, shadow_zero_check, - boot_cpu_data.x86_phys_bits, + shadow_phys_bits, context->shadow_root_level, uses_nx, guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES), is_pse(vcpu), true); @@ -4470,13 +4632,13 @@ reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, if (boot_cpu_is_amd()) __reset_rsvds_bits_mask(vcpu, shadow_zero_check, - boot_cpu_data.x86_phys_bits, + shadow_phys_bits, context->shadow_root_level, false, boot_cpu_has(X86_FEATURE_GBPAGES), true, true); else __reset_rsvds_bits_mask_ept(shadow_zero_check, - boot_cpu_data.x86_phys_bits, + shadow_phys_bits, false); if (!shadow_me_mask) @@ -4497,7 +4659,7 @@ reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context, bool execonly) { __reset_rsvds_bits_mask_ept(&context->shadow_zero_check, - boot_cpu_data.x86_phys_bits, execonly); + shadow_phys_bits, execonly); } #define BYTE_MASK(access) \ @@ -4532,11 +4694,11 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu, */ /* Faults from writes to non-writable pages */ - u8 wf = (pfec & PFERR_WRITE_MASK) ? ~w : 0; + u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0; /* Faults from user mode accesses to supervisor pages */ - u8 uf = (pfec & PFERR_USER_MASK) ? ~u : 0; + u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0; /* Faults from fetches of non-executable pages*/ - u8 ff = (pfec & PFERR_FETCH_MASK) ? ~x : 0; + u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0; /* Faults from kernel mode fetches of user pages */ u8 smepf = 0; /* Faults from kernel mode accesses of user pages */ @@ -5013,9 +5175,9 @@ static bool need_remote_flush(u64 old, u64 new) } static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa, - const u8 *new, int *bytes) + int *bytes) { - u64 gentry; + u64 gentry = 0; int r; /* @@ -5027,22 +5189,12 @@ static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa, /* Handle a 32-bit guest writing two halves of a 64-bit gpte */ *gpa &= ~(gpa_t)7; *bytes = 8; - r = kvm_vcpu_read_guest(vcpu, *gpa, &gentry, 8); - if (r) - gentry = 0; - new = (const u8 *)&gentry; } - switch (*bytes) { - case 4: - gentry = *(const u32 *)new; - break; - case 8: - gentry = *(const u64 *)new; - break; - default: - gentry = 0; - break; + if (*bytes == 4 || *bytes == 8) { + r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes); + if (r) + gentry = 0; } return gentry; @@ -5146,8 +5298,6 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes); - gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, new, &bytes); - /* * No need to care whether allocation memory is successful * or not since pte prefetch is skiped if it does not have @@ -5156,6 +5306,9 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, mmu_topup_memory_caches(vcpu); spin_lock(&vcpu->kvm->mmu_lock); + + gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes); + ++vcpu->kvm->stat.mmu_pte_write; kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); @@ -5347,14 +5500,16 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid) uint i; if (pcid == kvm_get_active_pcid(vcpu)) { - mmu->invlpg(vcpu, gva, mmu->root_hpa); + if (mmu->invlpg) + mmu->invlpg(vcpu, gva, mmu->root_hpa); tlb_flush = true; } for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { if (VALID_PAGE(mmu->prev_roots[i].hpa) && pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].cr3)) { - mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa); + if (mmu->invlpg) + mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa); tlb_flush = true; } } @@ -5395,7 +5550,16 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu) struct page *page; int i; - if (tdp_enabled) + /* + * When using PAE paging, the four PDPTEs are treated as 'root' pages, + * while the PDP table is a per-vCPU construct that's allocated at MMU + * creation. When emulating 32-bit mode, cr3 is only 32 bits even on + * x86_64. Therefore we need to allocate the PDP table in the first + * 4GB of memory, which happens to fit the DMA32 zone. Except for + * SVM's 32-bit NPT support, TDP paging doesn't use PAE paging and can + * skip allocating the PDP table. + */ + if (tdp_enabled && kvm_x86_ops->get_tdp_level(vcpu) > PT32E_ROOT_LEVEL) return 0; /* @@ -5566,13 +5730,14 @@ static bool slot_rmap_write_protect(struct kvm *kvm, } void kvm_mmu_slot_remove_write_access(struct kvm *kvm, - struct kvm_memory_slot *memslot) + struct kvm_memory_slot *memslot, + int start_level) { bool flush; spin_lock(&kvm->mmu_lock); - flush = slot_handle_all_level(kvm, memslot, slot_rmap_write_protect, - false); + flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect, + start_level, PT_MAX_HUGEPAGE_LEVEL, false); spin_unlock(&kvm->mmu_lock); /* @@ -5618,9 +5783,9 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm, * the guest, and the guest page table is using 4K page size * mapping if the indirect sp has level = 1. */ - if (sp->role.direct && - !kvm_is_reserved_pfn(pfn) && - PageTransCompoundMap(pfn_to_page(pfn))) { + if (sp->role.direct && !kvm_is_reserved_pfn(pfn) && + !kvm_is_zone_device_pfn(pfn) && + PageTransCompoundMap(pfn_to_page(pfn))) { drop_spte(kvm, sptep); need_tlb_flush = 1; goto restart; @@ -5783,13 +5948,26 @@ static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm) return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages)); } -void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots) +void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen) { + WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS); + + gen &= MMIO_SPTE_GEN_MASK; + + /* + * Generation numbers are incremented in multiples of the number of + * address spaces in order to provide unique generations across all + * address spaces. Strip what is effectively the address space + * modifier prior to checking for a wrap of the MMIO generation so + * that a wrap in any address space is detected. + */ + gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1); + /* - * The very rare case: if the generation-number is round, + * The very rare case: if the MMIO generation number has wrapped, * zap all shadow pages. */ - if (unlikely((slots->generation & MMIO_GEN_MASK) == 0)) { + if (unlikely(gen == 0)) { kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n"); kvm_mmu_invalidate_zap_all_pages(kvm); } @@ -5802,7 +5980,7 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) int nr_to_scan = sc->nr_to_scan; unsigned long freed = 0; - spin_lock(&kvm_lock); + mutex_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) { int idx; @@ -5852,7 +6030,7 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) break; } - spin_unlock(&kvm_lock); + mutex_unlock(&kvm_lock); return freed; } @@ -5874,12 +6052,83 @@ static void mmu_destroy_caches(void) kmem_cache_destroy(mmu_page_header_cache); } +static bool get_nx_auto_mode(void) +{ + /* Return true when CPU has the bug, and mitigations are ON */ + return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off(); +} + +static void __set_nx_huge_pages(bool val) +{ + nx_huge_pages = itlb_multihit_kvm_mitigation = val; +} + +static int set_nx_huge_pages(const char *val, const struct kernel_param *kp) +{ + bool old_val = nx_huge_pages; + bool new_val; + + /* In "auto" mode deploy workaround only if CPU has the bug. */ + if (sysfs_streq(val, "off")) + new_val = 0; + else if (sysfs_streq(val, "force")) + new_val = 1; + else if (sysfs_streq(val, "auto")) + new_val = get_nx_auto_mode(); + else if (strtobool(val, &new_val) < 0) + return -EINVAL; + + __set_nx_huge_pages(new_val); + + if (new_val != old_val) { + struct kvm *kvm; + int idx; + + mutex_lock(&kvm_lock); + + list_for_each_entry(kvm, &vm_list, vm_list) { + idx = srcu_read_lock(&kvm->srcu); + kvm_mmu_invalidate_zap_all_pages(kvm); + srcu_read_unlock(&kvm->srcu, idx); + + wake_up_process(kvm->arch.nx_lpage_recovery_thread); + } + mutex_unlock(&kvm_lock); + } + + return 0; +} + +static void kvm_set_mmio_spte_mask(void) +{ + u64 mask; + + /* + * Set a reserved PA bit in MMIO SPTEs to generate page faults with + * PFEC.RSVD=1 on MMIO accesses. 64-bit PTEs (PAE, x86-64, and EPT + * paging) support a maximum of 52 bits of PA, i.e. if the CPU supports + * 52-bit physical addresses then there are no reserved PA bits in the + * PTEs and so the reserved PA approach must be disabled. + */ + if (shadow_phys_bits < 52) + mask = BIT_ULL(51) | PT_PRESENT_MASK; + else + mask = 0; + + kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK); +} + int kvm_mmu_module_init(void) { int ret = -ENOMEM; + if (nx_huge_pages == -1) + __set_nx_huge_pages(get_nx_auto_mode()); + kvm_mmu_reset_all_pte_masks(); + kvm_set_mmio_spte_mask(); + pte_list_desc_cache = kmem_cache_create("pte_list_desc", sizeof(struct pte_list_desc), 0, SLAB_ACCOUNT, NULL); @@ -5909,10 +6158,10 @@ int kvm_mmu_module_init(void) /* * Caculate mmu pages needed for kvm. */ -unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm) +unsigned long kvm_mmu_calculate_mmu_pages(struct kvm *kvm) { - unsigned int nr_mmu_pages; - unsigned int nr_pages = 0; + unsigned long nr_mmu_pages; + unsigned long nr_pages = 0; struct kvm_memslots *slots; struct kvm_memory_slot *memslot; int i; @@ -5925,8 +6174,7 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm) } nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000; - nr_mmu_pages = max(nr_mmu_pages, - (unsigned int) KVM_MIN_ALLOC_MMU_PAGES); + nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES); return nr_mmu_pages; } @@ -5945,3 +6193,116 @@ void kvm_mmu_module_exit(void) unregister_shrinker(&mmu_shrinker); mmu_audit_disable(); } + +static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp) +{ + unsigned int old_val; + int err; + + old_val = nx_huge_pages_recovery_ratio; + err = param_set_uint(val, kp); + if (err) + return err; + + if (READ_ONCE(nx_huge_pages) && + !old_val && nx_huge_pages_recovery_ratio) { + struct kvm *kvm; + + mutex_lock(&kvm_lock); + + list_for_each_entry(kvm, &vm_list, vm_list) + wake_up_process(kvm->arch.nx_lpage_recovery_thread); + + mutex_unlock(&kvm_lock); + } + + return err; +} + +static void kvm_recover_nx_lpages(struct kvm *kvm) +{ + int rcu_idx; + struct kvm_mmu_page *sp; + unsigned int ratio; + LIST_HEAD(invalid_list); + ulong to_zap; + + rcu_idx = srcu_read_lock(&kvm->srcu); + spin_lock(&kvm->mmu_lock); + + ratio = READ_ONCE(nx_huge_pages_recovery_ratio); + to_zap = ratio ? DIV_ROUND_UP(kvm->stat.nx_lpage_splits, ratio) : 0; + while (to_zap && !list_empty(&kvm->arch.lpage_disallowed_mmu_pages)) { + /* + * We use a separate list instead of just using active_mmu_pages + * because the number of lpage_disallowed pages is expected to + * be relatively small compared to the total. + */ + sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages, + struct kvm_mmu_page, + lpage_disallowed_link); + WARN_ON_ONCE(!sp->lpage_disallowed); + kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); + WARN_ON_ONCE(sp->lpage_disallowed); + + if (!--to_zap || need_resched() || spin_needbreak(&kvm->mmu_lock)) { + kvm_mmu_commit_zap_page(kvm, &invalid_list); + if (to_zap) + cond_resched_lock(&kvm->mmu_lock); + } + } + + spin_unlock(&kvm->mmu_lock); + srcu_read_unlock(&kvm->srcu, rcu_idx); +} + +static long get_nx_lpage_recovery_timeout(u64 start_time) +{ + return READ_ONCE(nx_huge_pages) && READ_ONCE(nx_huge_pages_recovery_ratio) + ? start_time + 60 * HZ - get_jiffies_64() + : MAX_SCHEDULE_TIMEOUT; +} + +static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data) +{ + u64 start_time; + long remaining_time; + + while (true) { + start_time = get_jiffies_64(); + remaining_time = get_nx_lpage_recovery_timeout(start_time); + + set_current_state(TASK_INTERRUPTIBLE); + while (!kthread_should_stop() && remaining_time > 0) { + schedule_timeout(remaining_time); + remaining_time = get_nx_lpage_recovery_timeout(start_time); + set_current_state(TASK_INTERRUPTIBLE); + } + + set_current_state(TASK_RUNNING); + + if (kthread_should_stop()) + return 0; + + kvm_recover_nx_lpages(kvm); + } +} + +int kvm_mmu_post_init_vm(struct kvm *kvm) +{ + int err; + + err = kvm_vm_create_worker_thread(kvm, kvm_nx_lpage_recovery_worker, 0, + "kvm-nx-lpage-recovery", + &kvm->arch.nx_lpage_recovery_thread); + if (!err) + kthread_unpark(kvm->arch.nx_lpage_recovery_thread); + + return err; +} + +void kvm_mmu_pre_destroy_vm(struct kvm *kvm) +{ + if (kvm->arch.nx_lpage_recovery_thread) + kthread_stop(kvm->arch.nx_lpage_recovery_thread); +} diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 1fab69c0b2f32851b86ce62deb3e162848f985a8..a8b75ef4499e46662ef6459070b5f2e52d88c81b 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -56,7 +56,7 @@ static inline u64 rsvd_bits(int s, int e) return ((1ULL << (e - s + 1)) - 1) << s; } -void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value); +void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value, u64 access_mask); void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context); @@ -69,7 +69,7 @@ bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu); int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, u64 fault_address, char *insn, int insn_len); -static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) +static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm) { if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages) return kvm->arch.n_max_mmu_pages - @@ -216,4 +216,8 @@ void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, struct kvm_memory_slot *slot, u64 gfn); int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu); + +int kvm_mmu_post_init_vm(struct kvm *kvm); +void kvm_mmu_pre_destroy_vm(struct kvm *kvm); + #endif diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h index c73bf4e4988cb5c84065dd324b50b93cbe12c2e1..918b0d5bf2724c66953addcc6b0bfd5b776c6c45 100644 --- a/arch/x86/kvm/mmutrace.h +++ b/arch/x86/kvm/mmutrace.h @@ -325,6 +325,65 @@ TRACE_EVENT( __entry->kvm_gen == __entry->spte_gen ) ); + +TRACE_EVENT( + kvm_mmu_set_spte, + TP_PROTO(int level, gfn_t gfn, u64 *sptep), + TP_ARGS(level, gfn, sptep), + + TP_STRUCT__entry( + __field(u64, gfn) + __field(u64, spte) + __field(u64, sptep) + __field(u8, level) + /* These depend on page entry type, so compute them now. */ + __field(bool, r) + __field(bool, x) + __field(u8, u) + ), + + TP_fast_assign( + __entry->gfn = gfn; + __entry->spte = *sptep; + __entry->sptep = virt_to_phys(sptep); + __entry->level = level; + __entry->r = shadow_present_mask || (__entry->spte & PT_PRESENT_MASK); + __entry->x = is_executable_pte(__entry->spte); + __entry->u = shadow_user_mask ? !!(__entry->spte & shadow_user_mask) : -1; + ), + + TP_printk("gfn %llx spte %llx (%s%s%s%s) level %d at %llx", + __entry->gfn, __entry->spte, + __entry->r ? "r" : "-", + __entry->spte & PT_WRITABLE_MASK ? "w" : "-", + __entry->x ? "x" : "-", + __entry->u == -1 ? "" : (__entry->u ? "u" : "-"), + __entry->level, __entry->sptep + ) +); + +TRACE_EVENT( + kvm_mmu_spte_requested, + TP_PROTO(gpa_t addr, int level, kvm_pfn_t pfn), + TP_ARGS(addr, level, pfn), + + TP_STRUCT__entry( + __field(u64, gfn) + __field(u64, pfn) + __field(u8, level) + ), + + TP_fast_assign( + __entry->gfn = addr >> PAGE_SHIFT; + __entry->pfn = pfn | (__entry->gfn & (KVM_PAGES_PER_HPAGE(level) - 1)); + __entry->level = level; + ), + + TP_printk("gfn %llx pfn %llx level %d", + __entry->gfn, __entry->pfn, __entry->level + ) +); + #endif /* _TRACE_KVMMMU_H */ #undef TRACE_INCLUDE_PATH diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c index e9ea2d45ae66baa65a2e3818e7120189bcd61e57..9f72cc427158e637b1852e9843c5eddfcf78c43a 100644 --- a/arch/x86/kvm/mtrr.c +++ b/arch/x86/kvm/mtrr.c @@ -48,11 +48,6 @@ static bool msr_mtrr_valid(unsigned msr) return false; } -static bool valid_pat_type(unsigned t) -{ - return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */ -} - static bool valid_mtrr_type(unsigned t) { return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */ @@ -67,10 +62,7 @@ bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data) return false; if (msr == MSR_IA32_CR_PAT) { - for (i = 0; i < 8; i++) - if (!valid_pat_type((data >> (i * 8)) & 0xff)) - return false; - return true; + return kvm_pat_valid(data); } else if (msr == MSR_MTRRdefType) { if (data & ~0xcff) return false; diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c index 3052a59a30655bcadccb53ec0cd1c14dab2b7591..1f6b0d9b0c85d81cf860f0e76b0b5315141c8f34 100644 --- a/arch/x86/kvm/page_track.c +++ b/arch/x86/kvm/page_track.c @@ -169,13 +169,13 @@ void kvm_page_track_cleanup(struct kvm *kvm) cleanup_srcu_struct(&head->track_srcu); } -void kvm_page_track_init(struct kvm *kvm) +int kvm_page_track_init(struct kvm *kvm) { struct kvm_page_track_notifier_head *head; head = &kvm->arch.track_notifier_head; - init_srcu_struct(&head->track_srcu); INIT_HLIST_HEAD(&head->track_notifier_list); + return init_srcu_struct(&head->track_srcu); } /* diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 14ffd973df54e6d1c122b14d6f15f33cea931ed6..31014a746aede201230899a6c738ce2c54999791 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -93,8 +93,8 @@ struct guest_walker { gpa_t pte_gpa[PT_MAX_FULL_LEVELS]; pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS]; bool pte_writable[PT_MAX_FULL_LEVELS]; - unsigned pt_access; - unsigned pte_access; + unsigned int pt_access[PT_MAX_FULL_LEVELS]; + unsigned int pte_access; gfn_t gfn; struct x86_exception fault; }; @@ -388,13 +388,15 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, } walker->ptes[walker->level - 1] = pte; + + /* Convert to ACC_*_MASK flags for struct guest_walker. */ + walker->pt_access[walker->level - 1] = FNAME(gpte_access)(pt_access ^ walk_nx_mask); } while (!is_last_gpte(mmu, walker->level, pte)); pte_pkey = FNAME(gpte_pkeys)(vcpu, pte); accessed_dirty = have_ad ? pte_access & PT_GUEST_ACCESSED_MASK : 0; /* Convert to ACC_*_MASK flags for struct guest_walker. */ - walker->pt_access = FNAME(gpte_access)(pt_access ^ walk_nx_mask); walker->pte_access = FNAME(gpte_access)(pte_access ^ walk_nx_mask); errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access); if (unlikely(errcode)) @@ -432,7 +434,8 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, } pgprintk("%s: pte %llx pte_access %x pt_access %x\n", - __func__, (u64)pte, walker->pte_access, walker->pt_access); + __func__, (u64)pte, walker->pte_access, + walker->pt_access[walker->level - 1]); return 1; error: @@ -522,6 +525,7 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, mmu_set_spte(vcpu, spte, pte_access, 0, PT_PAGE_TABLE_LEVEL, gfn, pfn, true, true); + kvm_release_pfn_clean(pfn); return true; } @@ -595,12 +599,14 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw, static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, struct guest_walker *gw, int write_fault, int hlevel, - kvm_pfn_t pfn, bool map_writable, bool prefault) + kvm_pfn_t pfn, bool map_writable, bool prefault, + bool lpage_disallowed) { struct kvm_mmu_page *sp = NULL; struct kvm_shadow_walk_iterator it; - unsigned direct_access, access = gw->pt_access; + unsigned int direct_access, access; int top_level, ret; + gfn_t gfn, base_gfn; direct_access = gw->pte_access; @@ -630,6 +636,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, sp = NULL; if (!is_shadow_present_pte(*it.sptep)) { table_gfn = gw->table_gfn[it.level - 2]; + access = gw->pt_access[it.level - 2]; sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1, false, access); } @@ -645,35 +652,48 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, link_shadow_page(vcpu, it.sptep, sp); } - for (; - shadow_walk_okay(&it) && it.level > hlevel; - shadow_walk_next(&it)) { - gfn_t direct_gfn; + /* + * FNAME(page_fault) might have clobbered the bottom bits of + * gw->gfn, restore them from the virtual address. + */ + gfn = gw->gfn | ((addr & PT_LVL_OFFSET_MASK(gw->level)) >> PAGE_SHIFT); + base_gfn = gfn; + + trace_kvm_mmu_spte_requested(addr, gw->level, pfn); + for (; shadow_walk_okay(&it); shadow_walk_next(&it)) { clear_sp_write_flooding_count(it.sptep); - validate_direct_spte(vcpu, it.sptep, direct_access); - drop_large_spte(vcpu, it.sptep); + /* + * We cannot overwrite existing page tables with an NX + * large page, as the leaf could be executable. + */ + disallowed_hugepage_adjust(it, gfn, &pfn, &hlevel); - if (is_shadow_present_pte(*it.sptep)) - continue; + base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); + if (it.level == hlevel) + break; + + validate_direct_spte(vcpu, it.sptep, direct_access); - direct_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); + drop_large_spte(vcpu, it.sptep); - sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1, - true, direct_access); - link_shadow_page(vcpu, it.sptep, sp); + if (!is_shadow_present_pte(*it.sptep)) { + sp = kvm_mmu_get_page(vcpu, base_gfn, addr, + it.level - 1, true, direct_access); + link_shadow_page(vcpu, it.sptep, sp); + if (lpage_disallowed) + account_huge_nx_page(vcpu->kvm, sp); + } } - clear_sp_write_flooding_count(it.sptep); ret = mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault, - it.level, gw->gfn, pfn, prefault, map_writable); + it.level, base_gfn, pfn, prefault, map_writable); FNAME(pte_prefetch)(vcpu, gw, it.sptep); - + ++vcpu->stat.pf_fixed; return ret; out_gpte_changed: - kvm_release_pfn_clean(pfn); return RET_PF_RETRY; } @@ -740,9 +760,11 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, int r; kvm_pfn_t pfn; int level = PT_PAGE_TABLE_LEVEL; - bool force_pt_level = false; unsigned long mmu_seq; bool map_writable, is_self_change_mapping; + bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) && + is_nx_huge_page_enabled(); + bool force_pt_level = lpage_disallowed; pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code); @@ -821,6 +843,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, walker.pte_access &= ~ACC_EXEC_MASK; } + r = RET_PF_RETRY; spin_lock(&vcpu->kvm->mmu_lock); if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) goto out_unlock; @@ -829,19 +852,15 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, if (make_mmu_pages_available(vcpu) < 0) goto out_unlock; if (!force_pt_level) - transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level); + transparent_hugepage_adjust(vcpu, walker.gfn, &pfn, &level); r = FNAME(fetch)(vcpu, addr, &walker, write_fault, - level, pfn, map_writable, prefault); - ++vcpu->stat.pf_fixed; + level, pfn, map_writable, prefault, lpage_disallowed); kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT); - spin_unlock(&vcpu->kvm->mmu_lock); - - return r; out_unlock: spin_unlock(&vcpu->kvm->mmu_lock); kvm_release_pfn_clean(pfn); - return RET_PF_RETRY; + return r; } static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp) diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 58ead7db71a312764b56d9f242e84820239eeb93..4fe0bfcf030481d40f23bfe77a81ff80149b3519 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -113,7 +113,7 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, .config = config, }; - attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc); + attr.sample_period = get_sample_period(pmc, pmc->counter); if (in_tx) attr.config |= HSW_IN_TX; @@ -131,8 +131,8 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, intr ? kvm_perf_overflow_intr : kvm_perf_overflow, pmc); if (IS_ERR(event)) { - printk_once("kvm_pmu: event creation failed %ld\n", - PTR_ERR(event)); + pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n", + PTR_ERR(event), pmc->idx); return; } @@ -282,20 +282,16 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data) { bool fast_mode = idx & (1u << 31); struct kvm_pmc *pmc; - u64 ctr_val; + u64 mask = fast_mode ? ~0u : ~0ull; if (is_vmware_backdoor_pmc(idx)) return kvm_pmu_rdpmc_vmware(vcpu, idx, data); - pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx); + pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx, &mask); if (!pmc) return 1; - ctr_val = pmc_read_counter(pmc); - if (fast_mode) - ctr_val = (u32)ctr_val; - - *data = ctr_val; + *data = pmc_read_counter(pmc) & mask; return 0; } diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index ba8898e1a8542c31caaabeccc3e5b1c99f1efadd..22710c16f548aa4c5ec6daf0bd70190970de931f 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -25,7 +25,8 @@ struct kvm_pmu_ops { unsigned (*find_fixed_event)(int idx); bool (*pmc_is_enabled)(struct kvm_pmc *pmc); struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx); - struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx); + struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx, + u64 *mask); int (*is_valid_msr_idx)(struct kvm_vcpu *vcpu, unsigned idx); bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr); int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data); @@ -102,6 +103,24 @@ static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr) return NULL; } +static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value) +{ + u64 sample_period = (-counter_value) & pmc_bitmask(pmc); + + if (!sample_period) + sample_period = pmc_bitmask(pmc) + 1; + return sample_period; +} + +static inline void pmc_update_sample_period(struct kvm_pmc *pmc) +{ + if (!pmc->perf_event) + return; + + perf_event_period(pmc->perf_event, + get_sample_period(pmc, pmc->counter)); +} + void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel); void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx); void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx); diff --git a/arch/x86/kvm/pmu_amd.c b/arch/x86/kvm/pmu_amd.c index 1495a735b38e757ea3d01e6716888d55cf8a84b7..437bbaea20e67e5125821ee1b6de34afff72ccab 100644 --- a/arch/x86/kvm/pmu_amd.c +++ b/arch/x86/kvm/pmu_amd.c @@ -186,7 +186,7 @@ static int amd_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx) } /* idx is the ECX register of RDPMC instruction */ -static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, unsigned idx) +static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *mask) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); struct kvm_pmc *counters; @@ -242,6 +242,7 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER); if (pmc) { pmc->counter += data - pmc_read_counter(pmc); + pmc_update_sample_period(pmc); return 0; } /* MSR_EVNTSELn */ diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c index 5ab4a364348e3c10987c33203be4ff6fa97e1e73..a4298a8c476536b35a587a782ad8a3b3d1ea6b09 100644 --- a/arch/x86/kvm/pmu_intel.c +++ b/arch/x86/kvm/pmu_intel.c @@ -126,7 +126,7 @@ static int intel_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx) } static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, - unsigned idx) + unsigned idx, u64 *mask) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); bool fixed = idx & (1u << 30); @@ -138,6 +138,7 @@ static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, if (fixed && idx >= pmu->nr_arch_fixed_counters) return NULL; counters = fixed ? pmu->fixed_counters : pmu->gp_counters; + *mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP]; return &counters[idx]; } @@ -183,9 +184,13 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data) *data = pmu->global_ovf_ctrl; return 0; default: - if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || - (pmc = get_fixed_pmc(pmu, msr))) { - *data = pmc_read_counter(pmc); + if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) { + u64 val = pmc_read_counter(pmc); + *data = val & pmu->counter_bitmask[KVM_PMC_GP]; + return 0; + } else if ((pmc = get_fixed_pmc(pmu, msr))) { + u64 val = pmc_read_counter(pmc); + *data = val & pmu->counter_bitmask[KVM_PMC_FIXED]; return 0; } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { *data = pmc->eventsel; @@ -235,11 +240,15 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) } break; default: - if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || - (pmc = get_fixed_pmc(pmu, msr))) { + if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) { if (!msr_info->host_initiated) data = (s64)(s32)data; pmc->counter += data - pmc_read_counter(pmc); + pmc_update_sample_period(pmc); + return 0; + } else if ((pmc = get_fixed_pmc(pmu, msr))) { + pmc->counter += data - pmc_read_counter(pmc); + pmc_update_sample_period(pmc); return 0; } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { if (data == pmc->eventsel) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 61ccfb13899ed702d8ab7dc88bdb5489b34bcfeb..74b4c98c6607ed935d813f6cc0d6a003c16a44b4 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -50,6 +50,7 @@ #include #include #include +#include #include #include "trace.h" @@ -389,6 +390,7 @@ static int nested_svm_intercept(struct vcpu_svm *svm); static int nested_svm_vmexit(struct vcpu_svm *svm); static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, bool has_error_code, u32 error_code); +static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); enum { VMCB_INTERCEPTS, /* Intercept vectors, TSC offset, @@ -513,6 +515,9 @@ static void recalc_intercepts(struct vcpu_svm *svm) c->intercept_dr = h->intercept_dr | g->intercept_dr; c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions; c->intercept = h->intercept | g->intercept; + + c->intercept |= (1ULL << INTERCEPT_VMLOAD); + c->intercept |= (1ULL << INTERCEPT_VMSAVE); } static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm) @@ -736,8 +741,14 @@ static int get_npt_level(struct kvm_vcpu *vcpu) static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) { vcpu->arch.efer = efer; - if (!npt_enabled && !(efer & EFER_LMA)) - efer &= ~EFER_LME; + + if (!npt_enabled) { + /* Shadow paging assumes NX to be available. */ + efer |= EFER_NX; + + if (!(efer & EFER_LMA)) + efer &= ~EFER_LME; + } to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME; mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR); @@ -992,33 +1003,32 @@ static void svm_cpu_uninit(int cpu) static int svm_cpu_init(int cpu) { struct svm_cpu_data *sd; - int r; sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL); if (!sd) return -ENOMEM; sd->cpu = cpu; - r = -ENOMEM; sd->save_area = alloc_page(GFP_KERNEL); if (!sd->save_area) - goto err_1; + goto free_cpu_data; if (svm_sev_enabled()) { - r = -ENOMEM; sd->sev_vmcbs = kmalloc_array(max_sev_asid + 1, sizeof(void *), GFP_KERNEL); if (!sd->sev_vmcbs) - goto err_1; + goto free_save_area; } per_cpu(svm_data, cpu) = sd; return 0; -err_1: +free_save_area: + __free_page(sd->save_area); +free_cpu_data: kfree(sd); - return r; + return -ENOMEM; } @@ -1292,6 +1302,47 @@ static void shrink_ple_window(struct kvm_vcpu *vcpu) control->pause_filter_count, old); } +/* + * The default MMIO mask is a single bit (excluding the present bit), + * which could conflict with the memory encryption bit. Check for + * memory encryption support and override the default MMIO mask if + * memory encryption is enabled. + */ +static __init void svm_adjust_mmio_mask(void) +{ + unsigned int enc_bit, mask_bit; + u64 msr, mask; + + /* If there is no memory encryption support, use existing mask */ + if (cpuid_eax(0x80000000) < 0x8000001f) + return; + + /* If memory encryption is not enabled, use existing mask */ + rdmsrl(MSR_K8_SYSCFG, msr); + if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT)) + return; + + enc_bit = cpuid_ebx(0x8000001f) & 0x3f; + mask_bit = boot_cpu_data.x86_phys_bits; + + /* Increment the mask bit if it is the same as the encryption bit */ + if (enc_bit == mask_bit) + mask_bit++; + + /* + * If the mask bit location is below 52, then some bits above the + * physical addressing limit will always be reserved, so use the + * rsvd_bits() function to generate the mask. This mask, along with + * the present bit, will be used to generate a page fault with + * PFER.RSV = 1. + * + * If the mask bit location is 52 (or above), then clear the mask. + */ + mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0; + + kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK); +} + static __init int svm_hardware_setup(void) { int cpu; @@ -1346,6 +1397,8 @@ static __init int svm_hardware_setup(void) } } + svm_adjust_mmio_mask(); + for_each_possible_cpu(cpu) { r = svm_cpu_init(cpu); if (r) @@ -1388,12 +1441,7 @@ static __init int svm_hardware_setup(void) } } - if (vgif) { - if (!boot_cpu_has(X86_FEATURE_VGIF)) - vgif = false; - else - pr_info("Virtual GIF supported\n"); - } + vgif = false; /* Disabled for CVE-2021-3653 */ return 0; @@ -1444,7 +1492,7 @@ static u64 svm_read_l1_tsc_offset(struct kvm_vcpu *vcpu) return vcpu->arch.tsc_offset; } -static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) +static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) { struct vcpu_svm *svm = to_svm(vcpu); u64 g_tsc_offset = 0; @@ -1462,6 +1510,7 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) svm->vmcb->control.tsc_offset = offset + g_tsc_offset; mark_dirty(svm->vmcb, VMCB_INTERCEPTS); + return svm->vmcb->control.tsc_offset; } static void avic_init_vmcb(struct vcpu_svm *svm) @@ -1566,9 +1615,10 @@ static void init_vmcb(struct vcpu_svm *svm) init_sys_seg(&save->ldtr, SEG_TYPE_LDT); init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16); + svm_set_cr4(&svm->vcpu, 0); svm_set_efer(&svm->vcpu, 0); save->dr6 = 0xffff0ff0; - kvm_set_rflags(&svm->vcpu, 2); + kvm_set_rflags(&svm->vcpu, X86_EFLAGS_FIXED); save->rip = 0x0000fff0; svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip; @@ -1662,20 +1712,23 @@ static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu, static int avic_init_access_page(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; - int ret; + int ret = 0; + mutex_lock(&kvm->slots_lock); if (kvm->arch.apic_access_page_done) - return 0; + goto out; - ret = x86_set_memory_region(kvm, - APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, - APIC_DEFAULT_PHYS_BASE, - PAGE_SIZE); + ret = __x86_set_memory_region(kvm, + APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, + APIC_DEFAULT_PHYS_BASE, + PAGE_SIZE); if (ret) - return ret; + goto out; kvm->arch.apic_access_page_done = true; - return 0; +out: + mutex_unlock(&kvm->slots_lock); + return ret; } static int avic_init_backing_page(struct kvm_vcpu *vcpu) @@ -1891,6 +1944,7 @@ static void sev_vm_destroy(struct kvm *kvm) list_for_each_safe(pos, q, head) { __unregister_enc_region_locked(kvm, list_entry(pos, struct enc_region, list)); + cond_resched(); } } @@ -2018,7 +2072,11 @@ static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (!kvm_vcpu_apicv_active(vcpu)) return; - if (WARN_ON(h_physical_id >= AVIC_MAX_PHYSICAL_ID_COUNT)) + /* + * Since the host physical APIC id is 8 bits, + * we can support host APIC ID upto 255. + */ + if (WARN_ON(h_physical_id > AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK)) return; entry = READ_ONCE(*(svm->avic_physical_id_cache)); @@ -2187,21 +2245,31 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) return ERR_PTR(err); } +static void svm_clear_current_vmcb(struct vmcb *vmcb) +{ + int i; + + for_each_online_cpu(i) + cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL); +} + static void svm_free_vcpu(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); + /* + * The vmcb page can be recycled, causing a false negative in + * svm_vcpu_load(). So, ensure that no logical CPU has this + * vmcb page recorded as its current vmcb. + */ + svm_clear_current_vmcb(svm->vmcb); + __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT)); __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER); __free_page(virt_to_page(svm->nested.hsave)); __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER); kvm_vcpu_uninit(vcpu); kmem_cache_free(kvm_vcpu_cache, svm); - /* - * The vmcb page can be recycled, causing a false negative in - * svm_vcpu_load(). So do a full IBPB now. - */ - indirect_branch_prediction_barrier(); } static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) @@ -2665,6 +2733,7 @@ static int npf_interception(struct vcpu_svm *svm) static int db_interception(struct vcpu_svm *svm) { struct kvm_run *kvm_run = svm->vcpu.run; + struct kvm_vcpu *vcpu = &svm->vcpu; if (!(svm->vcpu.guest_debug & (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) && @@ -2675,6 +2744,8 @@ static int db_interception(struct vcpu_svm *svm) if (svm->nmi_singlestep) { disable_nmi_singlestep(svm); + /* Make sure we check for pending NMIs upon entry */ + kvm_make_request(KVM_REQ_EVENT, vcpu); } if (svm->vcpu.guest_debug & @@ -2873,8 +2944,12 @@ static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index) u64 pdpte; int ret; + /* + * Note, nCR3 is "assumed" to be 32-byte aligned, i.e. the CPU ignores + * nCR3[4:0] when loading PDPTEs from memory. + */ ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(__sme_clr(cr3)), &pdpte, - offset_in_page(cr3) + index * 8, 8); + (cr3 & GENMASK(11, 5)) + index * 8, 8); if (ret) return 0; return pdpte; @@ -3385,6 +3460,14 @@ static int nested_svm_vmexit(struct vcpu_svm *svm) kvm_mmu_reset_context(&svm->vcpu); kvm_mmu_load(&svm->vcpu); + /* + * Drop what we picked up for L2 via svm_complete_interrupts() so it + * doesn't end up in L1. + */ + svm->vcpu.arch.nmi_injected = false; + kvm_clear_exception_queue(&svm->vcpu); + kvm_clear_interrupt_queue(&svm->vcpu); + return 0; } @@ -3493,7 +3576,13 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, svm->nested.intercept = nested_vmcb->control.intercept; svm_flush_tlb(&svm->vcpu, true); - svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK; + + svm->vmcb->control.int_ctl &= + V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK; + + svm->vmcb->control.int_ctl |= nested_vmcb->control.int_ctl & + (V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK); + if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK) svm->vcpu.arch.hflags |= HF_VINTR_MASK; else @@ -5117,6 +5206,11 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec) kvm_vcpu_wake_up(vcpu); } +static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu) +{ + return false; +} + static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi) { unsigned long flags; @@ -5446,6 +5540,7 @@ static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva) static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) { + amd_clear_divider(); } static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu) @@ -5588,6 +5683,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) svm->vmcb->save.cr2 = vcpu->arch.cr2; clgi(); + kvm_load_guest_xcr0(vcpu); /* * If this vCPU has touched SPEC_CTRL, restore the guest's value if @@ -5735,6 +5831,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) kvm_before_interrupt(&svm->vcpu); + kvm_put_guest_xcr0(vcpu); stgi(); /* Any pending NMI will happen here */ @@ -5823,6 +5920,13 @@ static bool svm_cpu_has_accelerated_tpr(void) static bool svm_has_emulated_msr(int index) { + switch (index) { + case MSR_IA32_MCG_EXT_CTL: + return false; + default: + break; + } + return true; } @@ -5916,6 +6020,11 @@ static bool svm_has_wbinvd_exit(void) return true; } +static bool svm_pku_supported(void) +{ + return false; +} + #define PRE_EX(exit) { .exit_code = (exit), \ .stage = X86_ICPT_PRE_EXCEPT, } #define POST_EX(exit) { .exit_code = (exit), \ @@ -6102,6 +6211,9 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu, static void svm_handle_external_intr(struct kvm_vcpu *vcpu) { + if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_INTR) + vcpu->arch.at_instruction_boundary = true; + local_irq_enable(); /* * We must have an instruction with interrupts enabled, so @@ -6235,6 +6347,9 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) int asid, ret; ret = -EBUSY; + if (unlikely(sev->active)) + return ret; + asid = sev_asid_new(); if (asid < 0) return ret; @@ -6377,11 +6492,11 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } -static int get_num_contig_pages(int idx, struct page **inpages, - unsigned long npages) +static unsigned long get_num_contig_pages(unsigned long idx, + struct page **inpages, unsigned long npages) { unsigned long paddr, next_paddr; - int i = idx + 1, pages = 1; + unsigned long i = idx + 1, pages = 1; /* find the number of contiguous pages starting from idx */ paddr = __sme_page_pa(inpages[idx]); @@ -6400,12 +6515,12 @@ static int get_num_contig_pages(int idx, struct page **inpages, static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) { - unsigned long vaddr, vaddr_end, next_vaddr, npages, size; + unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i; struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; struct kvm_sev_launch_update_data params; struct sev_data_launch_update_data *data; struct page **inpages; - int i, ret, pages; + int ret; if (!sev_guest(kvm)) return -ENOTTY; @@ -6754,7 +6869,8 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec) struct page **src_p, **dst_p; struct kvm_sev_dbg debug; unsigned long n; - int ret, size; + unsigned int size; + int ret; if (!sev_guest(kvm)) return -ENOTTY; @@ -6762,6 +6878,11 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec) if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug))) return -EFAULT; + if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr) + return -EINVAL; + if (!debug.dst_uaddr) + return -EINVAL; + vaddr = debug.src_uaddr; size = debug.len; vaddr_end = vaddr + size; @@ -6812,8 +6933,8 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec) dst_vaddr, len, &argp->error); - sev_unpin_memory(kvm, src_p, 1); - sev_unpin_memory(kvm, dst_p, 1); + sev_unpin_memory(kvm, src_p, n); + sev_unpin_memory(kvm, dst_p, n); if (ret) goto err; @@ -7139,13 +7260,14 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .mpx_supported = svm_mpx_supported, .xsaves_supported = svm_xsaves_supported, .umip_emulated = svm_umip_emulated, + .pku_supported = svm_pku_supported, .set_supported_cpuid = svm_set_supported_cpuid, .has_wbinvd_exit = svm_has_wbinvd_exit, .read_l1_tsc_offset = svm_read_l1_tsc_offset, - .write_tsc_offset = svm_write_tsc_offset, + .write_l1_tsc_offset = svm_write_l1_tsc_offset, .set_tdp_cr3 = set_tdp_cr3, @@ -7158,6 +7280,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .pmu_ops = &amd_pmu_ops, .deliver_posted_interrupt = svm_deliver_avic_intr, + .dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt, .update_pi_irte = svm_update_pi_irte, .setup_mce = svm_setup_mce, diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index 0f997683404fad647b55abd7ad375a6c499dc75c..b3f219b7c840863588bd8dbae2193e212e41f443 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h @@ -438,13 +438,13 @@ TRACE_EVENT(kvm_apic_ipi, ); TRACE_EVENT(kvm_apic_accept_irq, - TP_PROTO(__u32 apicid, __u16 dm, __u8 tm, __u8 vec), + TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec), TP_ARGS(apicid, dm, tm, vec), TP_STRUCT__entry( __field( __u32, apicid ) __field( __u16, dm ) - __field( __u8, tm ) + __field( __u16, tm ) __field( __u8, vec ) ), diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index e665aa7167cf9729aac82a075c358236d9f03aec..535fce186d586d8c2a360e2d26e71e0e12a93ecc 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -40,6 +41,7 @@ #include #include +#include #include #include #include @@ -170,6 +172,7 @@ module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO); * refer SDM volume 3b section 21.6.13 & 22.1.3. */ static unsigned int ple_gap = KVM_DEFAULT_PLE_GAP; +module_param(ple_gap, uint, 0444); static unsigned int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW; module_param(ple_window, uint, 0444); @@ -210,6 +213,9 @@ static const struct { #define L1D_CACHE_ORDER 4 static void *vmx_l1d_flush_pages; +/* Control for disabling CPU Fill buffer clear */ +static bool __read_mostly vmx_fb_clear_ctrl_available; + static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf) { struct page *page; @@ -961,13 +967,13 @@ struct vcpu_vmx { struct shared_msr_entry *guest_msrs; int nmsrs; int save_nmsrs; + bool guest_msrs_dirty; unsigned long host_idt_base; #ifdef CONFIG_X86_64 u64 msr_host_kernel_gs_base; u64 msr_guest_kernel_gs_base; #endif - u64 arch_capabilities; u64 spec_ctrl; u32 vm_entry_controls_shadow; @@ -1043,6 +1049,8 @@ struct vcpu_vmx { u64 msr_ia32_feature_control; u64 msr_ia32_feature_control_valid_bits; u64 ept_pointer; + u64 msr_ia32_mcu_opt_ctrl; + bool disable_fb_clear; }; enum segment_cache_field { @@ -1283,7 +1291,7 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked); static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, u16 error_code); static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu); -static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, +static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr, int type); static DEFINE_PER_CPU(struct vmcs *, vmxarea); @@ -1595,6 +1603,11 @@ static inline void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf) {} static inline void evmcs_touch_msr_bitmap(void) {} #endif /* IS_ENABLED(CONFIG_HYPERV) */ +void kvm_arch_vcpu_stat_reset(struct kvm_vcpu_stat *vcpu_stat) +{ + vcpu_stat->st_max = 0; +} + static inline bool is_exception_n(u32 intr_info, u8 vector) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | @@ -1931,6 +1944,11 @@ static bool vmx_umip_emulated(void) SECONDARY_EXEC_DESC; } +static inline bool vmx_pku_supported(void) +{ + return boot_cpu_has(X86_FEATURE_PKU); +} + static inline bool report_flexpriority(void) { return flexpriority_enabled; @@ -2077,7 +2095,7 @@ static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) return -1; } -static inline void __invvpid(int ext, u16 vpid, gva_t gva) +static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva) { struct { u64 vpid : 16; @@ -2092,7 +2110,7 @@ static inline void __invvpid(int ext, u16 vpid, gva_t gva) BUG_ON(error); } -static inline void __invept(int ext, u64 eptp, gpa_t gpa) +static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa) { struct { u64 eptp, gpa; @@ -2105,6 +2123,60 @@ static inline void __invept(int ext, u64 eptp, gpa_t gpa) BUG_ON(error); } +static void vmx_setup_fb_clear_ctrl(void) +{ + u64 msr; + + if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES) && + !boot_cpu_has_bug(X86_BUG_MDS) && + !boot_cpu_has_bug(X86_BUG_TAA)) { + rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr); + if (msr & ARCH_CAP_FB_CLEAR_CTRL) + vmx_fb_clear_ctrl_available = true; + } +} + +static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx) +{ + u64 msr; + + if (!vmx->disable_fb_clear) + return; + + msr = __rdmsr(MSR_IA32_MCU_OPT_CTRL); + msr |= FB_CLEAR_DIS; + native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, msr); + /* Cache the MSR value to avoid reading it later */ + vmx->msr_ia32_mcu_opt_ctrl = msr; +} + +static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx) +{ + if (!vmx->disable_fb_clear) + return; + + vmx->msr_ia32_mcu_opt_ctrl &= ~FB_CLEAR_DIS; + native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, vmx->msr_ia32_mcu_opt_ctrl); +} + +static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx) +{ + vmx->disable_fb_clear = vmx_fb_clear_ctrl_available; + + /* + * If guest will not execute VERW, there is no need to set FB_CLEAR_DIS + * at VMEntry. Skip the MSR read/write when a guest has no use case to + * execute VERW. + */ + if ((vcpu->arch.arch_capabilities & ARCH_CAP_FB_CLEAR) || + ((vcpu->arch.arch_capabilities & ARCH_CAP_MDS_NO) && + (vcpu->arch.arch_capabilities & ARCH_CAP_TAA_NO) && + (vcpu->arch.arch_capabilities & ARCH_CAP_PSDP_NO) && + (vcpu->arch.arch_capabilities & ARCH_CAP_FBSDP_NO) && + (vcpu->arch.arch_capabilities & ARCH_CAP_SBDR_SSDP_NO))) + vmx->disable_fb_clear = false; +} + static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr) { int i; @@ -2754,7 +2826,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, if (!entry_only) j = find_msr(&m->host, msr); - if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) { + if ((i < 0 && m->guest.nr == NR_AUTOLOAD_MSRS) || + (j < 0 && m->host.nr == NR_AUTOLOAD_MSRS)) { printk_once(KERN_WARNING "Not enough msr switch entries. " "Can't add msr %x\n", msr); return; @@ -2782,17 +2855,9 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) u64 guest_efer = vmx->vcpu.arch.efer; u64 ignore_bits = 0; - if (!enable_ept) { - /* - * NX is needed to handle CR0.WP=1, CR4.SMEP=1. Testing - * host CPUID is more efficient than testing guest CPUID - * or CR4. Host SMEP is anyway a requirement for guest SMEP. - */ - if (boot_cpu_has(X86_FEATURE_SMEP)) - guest_efer |= EFER_NX; - else if (!(guest_efer & EFER_NX)) - ignore_bits |= EFER_NX; - } + /* Shadow paging assumes NX to be available. */ + if (!enable_ept) + guest_efer |= EFER_NX; /* * LMA and LME handled by hardware; SCE meaningless outside long mode. @@ -2873,6 +2938,20 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) vmx->req_immediate_exit = false; + /* + * Note that guest MSRs to be saved/restored can also be changed + * when guest state is loaded. This happens when guest transitions + * to/from long-mode by setting MSR_EFER.LMA. + */ + if (!vmx->loaded_cpu_state || vmx->guest_msrs_dirty) { + vmx->guest_msrs_dirty = false; + for (i = 0; i < vmx->save_nmsrs; ++i) + kvm_set_shared_msr(vmx->guest_msrs[i].index, + vmx->guest_msrs[i].data, + vmx->guest_msrs[i].mask); + + } + if (vmx->loaded_cpu_state) return; @@ -2933,11 +3012,6 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) vmcs_writel(HOST_GS_BASE, gs_base); host_state->gs_base = gs_base; } - - for (i = 0; i < vmx->save_nmsrs; ++i) - kvm_set_shared_msr(vmx->guest_msrs[i].index, - vmx->guest_msrs[i].data, - vmx->guest_msrs[i].mask); } static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx) @@ -3294,10 +3368,13 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit } } else { if (vmcs12->exception_bitmap & (1u << nr)) { - if (nr == DB_VECTOR) + if (nr == DB_VECTOR) { *exit_qual = vcpu->arch.dr6; - else + *exit_qual &= ~(DR6_FIXED_1 | DR6_BT); + *exit_qual ^= DR6_RTM; + } else { *exit_qual = 0; + } return 1; } } @@ -3397,9 +3474,6 @@ static void setup_msrs(struct vcpu_vmx *vmx) index = __find_msr_index(vmx, MSR_CSTAR); if (index >= 0) move_msr_up(vmx, index, save_nmsrs++); - index = __find_msr_index(vmx, MSR_TSC_AUX); - if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP)) - move_msr_up(vmx, index, save_nmsrs++); /* * MSR_STAR is only needed on long mode guests, and only * if efer.sce is enabled. @@ -3412,8 +3486,12 @@ static void setup_msrs(struct vcpu_vmx *vmx) index = __find_msr_index(vmx, MSR_EFER); if (index >= 0 && update_transition_efer(vmx, index)) move_msr_up(vmx, index, save_nmsrs++); + index = __find_msr_index(vmx, MSR_TSC_AUX); + if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP)) + move_msr_up(vmx, index, save_nmsrs++); vmx->save_nmsrs = save_nmsrs; + vmx->guest_msrs_dirty = true; if (cpu_has_vmx_msr_bitmap()) vmx_update_msr_bitmap(&vmx->vcpu); @@ -3430,11 +3508,9 @@ static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu) return vcpu->arch.tsc_offset; } -/* - * writes 'offset' into guest's timestamp counter offset register - */ -static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) +static u64 vmx_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) { + u64 active_offset = offset; if (is_guest_mode(vcpu)) { /* * We're here if L1 chose not to trap WRMSR to TSC. According @@ -3442,17 +3518,16 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) * set for L2 remains unchanged, and still needs to be added * to the newly set TSC to get L2's TSC. */ - struct vmcs12 *vmcs12; - /* recalculate vmcs02.TSC_OFFSET: */ - vmcs12 = get_vmcs12(vcpu); - vmcs_write64(TSC_OFFSET, offset + - (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ? - vmcs12->tsc_offset : 0)); + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING)) + active_offset += vmcs12->tsc_offset; } else { trace_kvm_write_tsc_offset(vcpu->vcpu_id, vmcs_read64(TSC_OFFSET), offset); - vmcs_write64(TSC_OFFSET, offset); } + + vmcs_write64(TSC_OFFSET, active_offset); + return active_offset; } /* @@ -3588,9 +3663,11 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv) * secondary cpu-based controls. Do not include those that * depend on CPUID bits, they are added later by vmx_cpuid_update. */ - rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, - msrs->secondary_ctls_low, - msrs->secondary_ctls_high); + if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) + rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, + msrs->secondary_ctls_low, + msrs->secondary_ctls_high); + msrs->secondary_ctls_low = 0; msrs->secondary_ctls_high &= SECONDARY_EXEC_DESC | @@ -4088,12 +4165,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = to_vmx(vcpu)->spec_ctrl; break; - case MSR_IA32_ARCH_CAPABILITIES: - if (!msr_info->host_initiated && - !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES)) - return 1; - msr_info->data = to_vmx(vcpu)->arch_capabilities; - break; case MSR_IA32_SYSENTER_CS: msr_info->data = vmcs_read32(GUEST_SYSENTER_CS); break; @@ -4126,7 +4197,10 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index, &msr_info->data); case MSR_IA32_XSS: - if (!vmx_xsaves_supported()) + if (!vmx_xsaves_supported() || + (!msr_info->host_initiated && + !(guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && + guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)))) return 1; msr_info->data = vcpu->arch.ia32_xss; break; @@ -4255,15 +4329,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD, MSR_TYPE_W); break; - case MSR_IA32_ARCH_CAPABILITIES: - if (!msr_info->host_initiated) - return 1; - vmx->arch_capabilities = data; - break; case MSR_IA32_CR_PAT: + if (!kvm_pat_valid(data)) + return 1; + if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { - if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) - return 1; vmcs_write64(GUEST_IA32_PAT, data); vcpu->arch.pat = data; break; @@ -4297,7 +4367,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 1; return vmx_set_vmx_msr(vcpu, msr_index, data); case MSR_IA32_XSS: - if (!vmx_xsaves_supported()) + if (!vmx_xsaves_supported() || + (!msr_info->host_initiated && + !(guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && + guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)))) return 1; /* * The only supported bit as of Skylake is bit 8, but @@ -4335,9 +4408,13 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) } break; } - ret = kvm_set_msr_common(vcpu, msr_info); + ret = kvm_set_msr_common(vcpu, msr_info); } + /* FB_CLEAR may have changed, also update the FB_CLEAR_DIS behavior */ + if (msr_index == MSR_IA32_ARCH_CAPABILITIES) + vmx_update_fb_clear_dis(vcpu, vmx); + return ret; } @@ -4827,6 +4904,9 @@ static void init_vmcs_shadow_fields(void) { int i, j; + memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); + memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); + for (i = j = 0; i < max_shadow_read_only_fields; i++) { u16 field = shadow_read_only_fields[i]; if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 && @@ -5170,7 +5250,7 @@ static void ept_load_pdptrs(struct kvm_vcpu *vcpu) (unsigned long *)&vcpu->arch.regs_dirty)) return; - if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { + if (is_pae_paging(vcpu)) { vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]); vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]); vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]); @@ -5182,7 +5262,7 @@ static void ept_save_pdptrs(struct kvm_vcpu *vcpu) { struct kvm_mmu *mmu = vcpu->arch.walk_mmu; - if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { + if (is_pae_paging(vcpu)) { mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0); mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1); mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2); @@ -5719,6 +5799,26 @@ static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu) (ss.selector & SEGMENT_RPL_MASK)); } +static bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, + unsigned int port, int size); +static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + unsigned long exit_qualification; + unsigned short port; + int size; + + if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) + return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING); + + exit_qualification = vmcs_readl(EXIT_QUALIFICATION); + + port = exit_qualification >> 16; + size = (exit_qualification & 7) + 1; + + return nested_vmx_check_io_bitmaps(vcpu, port, size); +} + /* * Check if guest state is valid. Returns true if valid, false if * not. @@ -5923,7 +6023,7 @@ static void free_vpid(int vpid) spin_unlock(&vmx_vpid_lock); } -static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, +static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr, int type) { int f = sizeof(unsigned long); @@ -5961,7 +6061,7 @@ static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bit } } -static void __always_inline vmx_enable_intercept_for_msr(unsigned long *msr_bitmap, +static __always_inline void vmx_enable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr, int type) { int f = sizeof(unsigned long); @@ -5999,7 +6099,7 @@ static void __always_inline vmx_enable_intercept_for_msr(unsigned long *msr_bitm } } -static void __always_inline vmx_set_intercept_for_msr(unsigned long *msr_bitmap, +static __always_inline void vmx_set_intercept_for_msr(unsigned long *msr_bitmap, u32 msr, int type, bool value) { if (value) @@ -6557,7 +6657,7 @@ static void ept_set_mmio_spte_mask(void) * of an EPT paging-structure entry is 110b (write/execute). */ kvm_mmu_set_mmio_spte_mask(VMX_EPT_RWX_MASK, - VMX_EPT_MISCONFIG_WX_VALUE); + VMX_EPT_MISCONFIG_WX_VALUE, 0); } #define VMX_XSS_EXIT_BITMAP 0 @@ -6650,8 +6750,6 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx) ++vmx->nmsrs; } - vmx->arch_capabilities = kvm_get_arch_capabilities(); - vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl); /* 22.2.1, 20.8.1 */ @@ -6769,6 +6867,8 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) vpid_sync_context(vmx->vpid); if (init_event) vmx_clear_hlt(vcpu); + + vmx_update_fb_clear_dis(vcpu, vmx); } /* @@ -7254,6 +7354,7 @@ static int handle_cr(struct kvm_vcpu *vcpu) exit_qualification = vmcs_readl(EXIT_QUALIFICATION); cr = exit_qualification & 15; reg = (exit_qualification >> 8) & 15; + vcpu->stat.cr_exits++; switch ((exit_qualification >> 4) & 3) { case 0: /* mov to cr */ val = kvm_register_readl(vcpu, reg); @@ -7426,6 +7527,7 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu) msr_info.index = ecx; msr_info.host_initiated = false; + vcpu->stat.msr_rd_exits++; if (vmx_get_msr(vcpu, &msr_info)) { trace_kvm_msr_read_ex(ecx); kvm_inject_gp(vcpu, 0); @@ -7450,6 +7552,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu) msr.data = data; msr.index = ecx; msr.host_initiated = false; + vcpu->stat.msr_wr_exits++; if (kvm_set_msr(vcpu, &msr) != 0) { trace_kvm_msr_write_ex(ecx, data); kvm_inject_gp(vcpu, 0); @@ -7575,6 +7678,7 @@ static int handle_apic_write(struct kvm_vcpu *vcpu) u32 offset = exit_qualification & 0xfff; /* APIC-write VM exit is trap-like and thus no need to adjust IP */ + vcpu->stat.apic_wr_exits++; kvm_apic_write_nodecode(vcpu, offset); return 1; } @@ -7651,6 +7755,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu) u64 error_code; exit_qualification = vmcs_readl(EXIT_QUALIFICATION); + vcpu->stat.ept_vio_exits++; /* * EPT violation happened while executing iret from NMI, @@ -7697,6 +7802,7 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu) * nGPA here instead of the required GPA. */ gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); + vcpu->stat.ept_mis_exits++; if (!is_guest_mode(vcpu) && !kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) { trace_kvm_fast_mmio(gpa); @@ -7866,19 +7972,8 @@ static __init int hardware_setup(void) for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) kvm_define_shared_msr(i, vmx_msr_index[i]); - for (i = 0; i < VMX_BITMAP_NR; i++) { - vmx_bitmap[i] = (unsigned long *)__get_free_page(GFP_KERNEL); - if (!vmx_bitmap[i]) - goto out; - } - - memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); - memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); - - if (setup_vmcs_config(&vmcs_config) < 0) { - r = -EIO; - goto out; - } + if (setup_vmcs_config(&vmcs_config) < 0) + return -EIO; if (boot_cpu_has(X86_FEATURE_NX)) kvm_enable_efer_bits(EFER_NX); @@ -7989,31 +8084,45 @@ static __init int hardware_setup(void) kvm_x86_ops->cancel_hv_timer = NULL; } - if (!cpu_has_vmx_shadow_vmcs()) + if (!cpu_has_vmx_shadow_vmcs() || !nested) enable_shadow_vmcs = 0; - if (enable_shadow_vmcs) + if (enable_shadow_vmcs) { + for (i = 0; i < VMX_BITMAP_NR; i++) { + vmx_bitmap[i] = (unsigned long *) + __get_free_page(GFP_KERNEL); + if (!vmx_bitmap[i]) + goto out; + } + init_vmcs_shadow_fields(); + } kvm_set_posted_intr_wakeup_handler(wakeup_handler); nested_vmx_setup_ctls_msrs(&vmcs_config.nested, enable_apicv); kvm_mce_cap_supported |= MCG_LMCE_P; - return alloc_kvm_area(); + r = alloc_kvm_area(); + if (r) + goto out; + return 0; out: - for (i = 0; i < VMX_BITMAP_NR; i++) - free_page((unsigned long)vmx_bitmap[i]); - - return r; + if (enable_shadow_vmcs) { + for (i = 0; i < VMX_BITMAP_NR; i++) + free_page((unsigned long)vmx_bitmap[i]); + } + return r; } static __exit void hardware_unsetup(void) { int i; - for (i = 0; i < VMX_BITMAP_NR; i++) - free_page((unsigned long)vmx_bitmap[i]); + if (enable_shadow_vmcs) { + for (i = 0; i < VMX_BITMAP_NR; i++) + free_page((unsigned long)vmx_bitmap[i]); + } free_kvm_area(); } @@ -8024,6 +8133,7 @@ static __exit void hardware_unsetup(void) */ static int handle_pause(struct kvm_vcpu *vcpu) { + vcpu->stat.pause_exits++; if (!kvm_pause_in_guest(vcpu->kvm)) grow_ple_window(vcpu); @@ -8165,25 +8275,50 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu, /* Addr = segment_base + offset */ /* offset = base + [index * scale] + displacement */ off = exit_qualification; /* holds the displacement */ + if (addr_size == 1) + off = (gva_t)sign_extend64(off, 31); + else if (addr_size == 0) + off = (gva_t)sign_extend64(off, 15); if (base_is_valid) off += kvm_register_read(vcpu, base_reg); if (index_is_valid) off += kvm_register_read(vcpu, index_reg)< s.limit); + if (!(s.base == 0 && s.limit == 0xffffffff && + ((s.type & 8) || !(s.type & 4)))) + exn = exn || (off + sizeof(u64) > s.limit); } if (exn) { kvm_queue_exception_e(vcpu, @@ -8275,11 +8416,11 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu) if (r < 0) goto out_vmcs02; - vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); + vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL); if (!vmx->nested.cached_vmcs12) goto out_cached_vmcs12; - vmx->nested.cached_shadow_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); + vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL); if (!vmx->nested.cached_shadow_vmcs12) goto out_cached_shadow_vmcs12; @@ -8421,6 +8562,7 @@ static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx) { vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS); vmcs_write64(VMCS_LINK_POINTER, -1ull); + vmx->nested.sync_shadow_vmcs = false; } static inline void nested_release_vmcs12(struct vcpu_vmx *vmx) @@ -8432,7 +8574,6 @@ static inline void nested_release_vmcs12(struct vcpu_vmx *vmx) /* copy to memory all shadowed fields in case they were modified */ copy_shadow_to_vmcs12(vmx); - vmx->nested.sync_shadow_vmcs = false; vmx_disable_shadow_vmcs(vmx); } vmx->nested.posted_intr_nv = -1; @@ -8454,6 +8595,9 @@ static void free_nested(struct vcpu_vmx *vmx) if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) return; + kvm_clear_request(KVM_REQ_GET_VMCS12_PAGES, &vmx->vcpu); + + hrtimer_cancel(&vmx->nested.preemption_timer); vmx->nested.vmxon = false; vmx->nested.smm.vmxon = false; free_vpid(vmx->nested.vpid02); @@ -8631,6 +8775,9 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) u64 field_value; struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; + if (WARN_ON(!shadow_vmcs)) + return; + preempt_disable(); vmcs_load(shadow_vmcs); @@ -8669,6 +8816,9 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) u64 field_value = 0; struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; + if (WARN_ON(!shadow_vmcs)) + return; + vmcs_load(shadow_vmcs); for (q = 0; q < ARRAY_SIZE(fields); q++) { @@ -8705,6 +8855,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu) u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); gva_t gva = 0; struct vmcs12 *vmcs12; + struct x86_exception e; if (!nested_vmx_check_permission(vcpu)) return 1; @@ -8746,8 +8897,10 @@ static int handle_vmread(struct kvm_vcpu *vcpu) vmx_instruction_info, true, &gva)) return 1; /* _system ok, nested_vmx_check_permission has verified cpl=0 */ - kvm_write_guest_virt_system(vcpu, gva, &field_value, - (is_long_mode(vcpu) ? 8 : 4), NULL); + if (kvm_write_guest_virt_system(vcpu, gva, &field_value, + (is_long_mode(vcpu) ? 8 : 4), + &e)) + kvm_inject_page_fault(vcpu, &e); } nested_vmx_succeed(vcpu); @@ -9417,23 +9570,17 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { static const int kvm_vmx_max_exit_handlers = ARRAY_SIZE(kvm_vmx_exit_handlers); -static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12) +/* + * Return true if an IO instruction with the specified port and size should cause + * a VM-exit into L1. + */ +bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port, + int size) { - unsigned long exit_qualification; + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); gpa_t bitmap, last_bitmap; - unsigned int port; - int size; u8 b; - if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) - return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING); - - exit_qualification = vmcs_readl(EXIT_QUALIFICATION); - - port = exit_qualification >> 16; - size = (exit_qualification & 7) + 1; - last_bitmap = (gpa_t)-1; b = -1; @@ -10366,6 +10513,11 @@ static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu) return ((rvi & 0xf0) > (vppr & 0xf0)); } +static bool vmx_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu) +{ + return pi_test_on(vcpu_to_pi_desc(vcpu)); +} + static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) { if (!kvm_vcpu_apicv_active(vcpu)) @@ -10387,28 +10539,21 @@ static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu) static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx) { - u32 exit_intr_info = 0; - u16 basic_exit_reason = (u16)vmx->exit_reason; - - if (!(basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY - || basic_exit_reason == EXIT_REASON_EXCEPTION_NMI)) + if (vmx->exit_reason != EXIT_REASON_EXCEPTION_NMI) return; - if (!(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) - exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); - vmx->exit_intr_info = exit_intr_info; + vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); /* if exit due to PF check for async PF */ - if (is_page_fault(exit_intr_info)) + if (is_page_fault(vmx->exit_intr_info)) vmx->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason(); /* Handle machine checks before interrupts are enabled */ - if (basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY || - is_machine_check(exit_intr_info)) + if (is_machine_check(vmx->exit_intr_info)) kvm_machine_check(); /* We need to handle NMIs before interrupts are enabled */ - if (is_nmi(exit_intr_info)) { + if (is_nmi(vmx->exit_intr_info)) { kvm_before_interrupt(&vmx->vcpu); asm("int $2"); kvm_after_interrupt(&vmx->vcpu); @@ -10453,6 +10598,7 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) [cs]"i"(__KERNEL_CS) ); } + vcpu->arch.at_instruction_boundary = true; } STACK_FRAME_NON_STANDARD(vmx_handle_external_intr); @@ -10656,10 +10802,35 @@ static void vmx_update_hv_timer(struct kvm_vcpu *vcpu) vmx->loaded_vmcs->hv_timer_armed = false; } +u64 __always_inline vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx) +{ + u64 guestval, hostval = this_cpu_read(x86_spec_ctrl_current); + + if (!cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) + return 0; + + guestval = __rdmsr(MSR_IA32_SPEC_CTRL); + + /* + * + * For legacy IBRS, the IBRS bit always needs to be written after + * transitioning from a less privileged predictor mode, regardless of + * whether the guest/host values differ. + */ + if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) || + guestval != hostval) + native_wrmsrl(MSR_IA32_SPEC_CTRL, hostval); + + barrier_nospec(); + + return guestval; +} + static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long cr3, cr4, evmcs_rsp; + u64 spec_ctrl; /* Record the guest's net vcpu time for enforced NMI injections. */ if (unlikely(!enable_vnmi && @@ -10706,6 +10877,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) vmx_set_interrupt_shadow(vcpu, 0); + kvm_load_guest_xcr0(vcpu); + if (static_cpu_has(X86_FEATURE_PKU) && kvm_read_cr4_bits(vcpu, X86_CR4_PKE) && vcpu->arch.pkru != vmx->host_pkru) @@ -10728,8 +10901,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) evmcs_rsp = static_branch_unlikely(&enable_evmcs) ? (unsigned long)¤t_evmcs->host_rsp : 0; + /* L1D Flush includes CPU buffer clear to mitigate MDS */ if (static_branch_unlikely(&vmx_l1d_should_flush)) vmx_l1d_flush(vcpu); + else if (static_branch_unlikely(&mds_user_clear)) + mds_clear_cpu_buffers(); + else if (static_branch_unlikely(&mmio_stale_data_clear) && + kvm_arch_has_assigned_device(vcpu->kvm)) + mds_clear_cpu_buffers(); + + vmx_disable_fb_clear(vmx); asm( /* Store host registers */ @@ -10755,7 +10936,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) "mov %%" _ASM_AX", %%cr2 \n\t" "3: \n\t" /* Check if vmlaunch of vmresume is needed */ - "cmpl $0, %c[launched](%0) \n\t" + "cmpb $0, %c[launched](%0) \n\t" /* Load guest registers. Don't clobber flags. */ "mov %c[rax](%0), %%" _ASM_AX " \n\t" "mov %c[rbx](%0), %%" _ASM_BX " \n\t" @@ -10854,6 +11035,26 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) #endif ); + /* + * IMPORTANT: RSB filling and SPEC_CTRL handling must be done before + * the first unbalanced RET after vmexit! + * + * For retpoline or IBRS, RSB filling is needed to prevent poisoned RSB + * entries and (in some cases) RSB underflow. + * + * eIBRS has its own protection against poisoned RSB, so it doesn't + * need the RSB filling sequence. But it does need to be enabled, and a + * single call to retire, before the first unbalanced RET. + * + * So no RETs before vmx_spec_ctrl_restore_host() below. + */ + vmexit_fill_RSB(); + + /* Save this for below */ + spec_ctrl = vmx_spec_ctrl_restore_host(vmx); + + vmx_enable_fb_clear(vmx); + /* * We do not use IBRS in the kernel. If this vCPU has used the * SPEC_CTRL MSR it may have left it on; save the value and @@ -10870,12 +11071,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) * save it. */ if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) - vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); - - x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0); - - /* Eliminate branch target predictions from guest mode */ - vmexit_fill_RSB(); + vmx->spec_ctrl = spec_ctrl; /* All fields are clean at this point */ if (static_branch_unlikely(&enable_evmcs)) @@ -10913,15 +11109,20 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) */ if (static_cpu_has(X86_FEATURE_PKU) && kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) { - vcpu->arch.pkru = __read_pkru(); + vcpu->arch.pkru = rdpkru(); if (vcpu->arch.pkru != vmx->host_pkru) __write_pkru(vmx->host_pkru); } + kvm_put_guest_xcr0(vcpu); + vmx->nested.nested_run_pending = 0; vmx->idt_vectoring_info = 0; vmx->exit_reason = vmx->fail ? 0xdead : vmcs_read32(VM_EXIT_REASON); + if ((u16)vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY) + kvm_machine_check(); + if (vmx->fail || (vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) return; @@ -11090,8 +11291,8 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) return ERR_PTR(err); } -#define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n" -#define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n" +#define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n" +#define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n" static int vmx_vm_init(struct kvm *kvm) { @@ -11113,7 +11314,7 @@ static int vmx_vm_init(struct kvm *kvm) * Warn upon starting the first VM in a potentially * insecure environment. */ - if (cpu_smt_control == CPU_SMT_ENABLED) + if (sched_smt_active()) pr_warn_once(L1TF_MSG_SMT); if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER) pr_warn_once(L1TF_MSG_L1D); @@ -11459,6 +11660,8 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) kunmap(vmx->nested.pi_desc_page); kvm_release_page_dirty(vmx->nested.pi_desc_page); vmx->nested.pi_desc_page = NULL; + vmx->nested.pi_desc = NULL; + vmcs_write64(POSTED_INTR_DESC_ADDR, -1ull); } page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->posted_intr_desc_addr); if (is_error_page(page)) @@ -11543,6 +11746,17 @@ static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu, return 0; } +static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap) { + int msr; + + for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { + unsigned word = msr / BITS_PER_LONG; + + msr_bitmap[word] = ~0; + msr_bitmap[word + (0x800 / sizeof(long))] = ~0; + } +} + /* * Merge L0's and L1's MSR bitmap, return false to indicate that * we do not use the hardware. @@ -11584,39 +11798,44 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, return false; msr_bitmap_l1 = (unsigned long *)kmap(page); - if (nested_cpu_has_apic_reg_virt(vmcs12)) { - /* - * L0 need not intercept reads for MSRs between 0x800 and 0x8ff, it - * just lets the processor take the value from the virtual-APIC page; - * take those 256 bits directly from the L1 bitmap. - */ - for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { - unsigned word = msr / BITS_PER_LONG; - msr_bitmap_l0[word] = msr_bitmap_l1[word]; - msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0; - } - } else { - for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { - unsigned word = msr / BITS_PER_LONG; - msr_bitmap_l0[word] = ~0; - msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0; - } - } - nested_vmx_disable_intercept_for_msr( - msr_bitmap_l1, msr_bitmap_l0, - X2APIC_MSR(APIC_TASKPRI), - MSR_TYPE_W); + /* + * To keep the control flow simple, pay eight 8-byte writes (sixteen + * 4-byte writes on 32-bit systems) up front to enable intercepts for + * the x2APIC MSR range and selectively disable them below. + */ + enable_x2apic_msr_intercepts(msr_bitmap_l0); + + if (nested_cpu_has_virt_x2apic_mode(vmcs12)) { + if (nested_cpu_has_apic_reg_virt(vmcs12)) { + /* + * L0 need not intercept reads for MSRs between 0x800 + * and 0x8ff, it just lets the processor take the value + * from the virtual-APIC page; take those 256 bits + * directly from the L1 bitmap. + */ + for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { + unsigned word = msr / BITS_PER_LONG; + + msr_bitmap_l0[word] = msr_bitmap_l1[word]; + } + } - if (nested_cpu_has_vid(vmcs12)) { - nested_vmx_disable_intercept_for_msr( - msr_bitmap_l1, msr_bitmap_l0, - X2APIC_MSR(APIC_EOI), - MSR_TYPE_W); nested_vmx_disable_intercept_for_msr( msr_bitmap_l1, msr_bitmap_l0, - X2APIC_MSR(APIC_SELF_IPI), - MSR_TYPE_W); + X2APIC_MSR(APIC_TASKPRI), + MSR_TYPE_R | MSR_TYPE_W); + + if (nested_cpu_has_vid(vmcs12)) { + nested_vmx_disable_intercept_for_msr( + msr_bitmap_l1, msr_bitmap_l0, + X2APIC_MSR(APIC_EOI), + MSR_TYPE_W); + nested_vmx_disable_intercept_for_msr( + msr_bitmap_l1, msr_bitmap_l0, + X2APIC_MSR(APIC_SELF_IPI), + MSR_TYPE_W); + } } if (spec_ctrl) @@ -11716,7 +11935,7 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, !nested_exit_intr_ack_set(vcpu) || (vmcs12->posted_intr_nv & 0xff00) || (vmcs12->posted_intr_desc_addr & 0x3f) || - (!page_address_valid(vcpu, vmcs12->posted_intr_desc_addr)))) + (vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu)))) return -EINVAL; /* tpr shadow is needed by all apicv features. */ @@ -11940,8 +12159,7 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne * If PAE paging and EPT are both on, CR3 is not used by the CPU and * must not be dereferenced. */ - if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu) && - !nested_ept) { + if (is_pae_paging(vcpu) && !nested_ept) { if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) { *entry_failure_code = ENTRY_FAIL_PDPTE; return 1; @@ -12493,7 +12711,7 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) /* VM-entry exception error code */ if (has_error_code && - vmcs12->vm_entry_exception_error_code & GENMASK(31, 15)) + vmcs12->vm_entry_exception_error_code & GENMASK(31, 16)) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; /* VM-entry interruption-info field: reserved bits */ @@ -12545,7 +12763,7 @@ static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu, static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, u32 *exit_qual) { - bool ia32e; + bool ia32e = !!(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE); *exit_qual = ENTRY_FAIL_DEFAULT; @@ -12558,6 +12776,13 @@ static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, return 1; } + if ((vmcs12->guest_cr0 & (X86_CR0_PG | X86_CR0_PE)) == X86_CR0_PG) + return 1; + + if ((ia32e && !(vmcs12->guest_cr4 & X86_CR4_PAE)) || + (ia32e && !(vmcs12->guest_cr0 & X86_CR0_PG))) + return 1; + /* * If the load IA32_EFER VM-entry control is 1, the following checks * are performed on the field for the IA32_EFER MSR: @@ -12569,7 +12794,6 @@ static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, */ if (to_vmx(vcpu)->nested.nested_run_pending && (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) { - ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0; if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) || ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) || ((vmcs12->guest_cr0 & X86_CR0_PG) && @@ -12797,11 +13021,15 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) nested_cache_shadow_vmcs12(vcpu, vmcs12); /* - * If we're entering a halted L2 vcpu and the L2 vcpu won't be woken - * by event injection, halt vcpu. + * If we're entering a halted L2 vcpu and the L2 vcpu won't be + * awakened by event injection or by an NMI-window VM-exit or + * by an interrupt-window VM-exit, halt the vcpu. */ if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) && - !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK)) { + !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) && + !(vmcs12->cpu_based_vm_exec_control & CPU_BASED_VIRTUAL_NMI_PENDING) && + !((vmcs12->cpu_based_vm_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING) && + (vmcs12->guest_rflags & X86_EFLAGS_IF))) { vmx->nested.nested_run_pending = 0; return kvm_vcpu_halt(vcpu); } @@ -13122,24 +13350,6 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, kvm_clear_interrupt_queue(vcpu); } -static void load_vmcs12_mmu_host_state(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12) -{ - u32 entry_failure_code; - - nested_ept_uninit_mmu_context(vcpu); - - /* - * Only PDPTE load can fail as the value of cr3 was checked on entry and - * couldn't have changed. - */ - if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code)) - nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL); - - if (!enable_ept) - vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; -} - /* * A part of what we need to when the nested L2 guest exits and we want to * run its L1 parent, is to reset L1's guest state to the host state specified @@ -13153,6 +13363,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { struct kvm_segment seg; + u32 entry_failure_code; if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) vcpu->arch.efer = vmcs12->host_ia32_efer; @@ -13179,7 +13390,17 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); vmx_set_cr4(vcpu, vmcs12->host_cr4); - load_vmcs12_mmu_host_state(vcpu, vmcs12); + nested_ept_uninit_mmu_context(vcpu); + + /* + * Only PDPTE load can fail as the value of cr3 was checked on entry and + * couldn't have changed. + */ + if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code)) + nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL); + + if (!enable_ept) + vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; /* * If vmcs01 don't use VPID, CPU flushes TLB on every @@ -13275,6 +13496,140 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); } +static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx) +{ + struct shared_msr_entry *efer_msr; + unsigned int i; + + if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER) + return vmcs_read64(GUEST_IA32_EFER); + + if (cpu_has_load_ia32_efer) + return host_efer; + + for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) { + if (vmx->msr_autoload.guest.val[i].index == MSR_EFER) + return vmx->msr_autoload.guest.val[i].value; + } + + efer_msr = find_msr_entry(vmx, MSR_EFER); + if (efer_msr) + return efer_msr->data; + + return host_efer; +} + +static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu) +{ + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct vmx_msr_entry g, h; + struct msr_data msr; + gpa_t gpa; + u32 i, j; + + vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT); + + if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) { + /* + * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set + * as vmcs01.GUEST_DR7 contains a userspace defined value + * and vcpu->arch.dr7 is not squirreled away before the + * nested VMENTER (not worth adding a variable in nested_vmx). + */ + if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) + kvm_set_dr(vcpu, 7, DR7_FIXED_1); + else + WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7))); + } + + /* + * Note that calling vmx_set_{efer,cr0,cr4} is important as they + * handle a variety of side effects to KVM's software model. + */ + vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx)); + + vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS; + vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW)); + + vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); + vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW)); + + nested_ept_uninit_mmu_context(vcpu); + vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); + __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); + + /* + * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs + * from vmcs01 (if necessary). The PDPTRs are not loaded on + * VMFail, like everything else we just need to ensure our + * software model is up-to-date. + */ + ept_save_pdptrs(vcpu); + + kvm_mmu_reset_context(vcpu); + + if (cpu_has_vmx_msr_bitmap()) + vmx_update_msr_bitmap(vcpu); + + /* + * This nasty bit of open coding is a compromise between blindly + * loading L1's MSRs using the exit load lists (incorrect emulation + * of VMFail), leaving the nested VM's MSRs in the software model + * (incorrect behavior) and snapshotting the modified MSRs (too + * expensive since the lists are unbound by hardware). For each + * MSR that was (prematurely) loaded from the nested VMEntry load + * list, reload it from the exit load list if it exists and differs + * from the guest value. The intent is to stuff host state as + * silently as possible, not to fully process the exit load list. + */ + msr.host_initiated = false; + for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) { + gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g)); + if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) { + pr_debug_ratelimited( + "%s read MSR index failed (%u, 0x%08llx)\n", + __func__, i, gpa); + goto vmabort; + } + + for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) { + gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h)); + if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) { + pr_debug_ratelimited( + "%s read MSR failed (%u, 0x%08llx)\n", + __func__, j, gpa); + goto vmabort; + } + if (h.index != g.index) + continue; + if (h.value == g.value) + break; + + if (nested_vmx_load_msr_check(vcpu, &h)) { + pr_debug_ratelimited( + "%s check failed (%u, 0x%x, 0x%x)\n", + __func__, j, h.index, h.reserved); + goto vmabort; + } + + msr.index = h.index; + msr.data = h.value; + if (kvm_set_msr(vcpu, &msr)) { + pr_debug_ratelimited( + "%s WRMSR failed (%u, 0x%x, 0x%llx)\n", + __func__, j, h.index, h.value); + goto vmabort; + } + } + } + + return; + +vmabort: + nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); +} + /* * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1 * and modify vmcs12 to make it see what it would expect to see there if @@ -13419,7 +13774,13 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, */ nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); - load_vmcs12_mmu_host_state(vcpu, vmcs12); + /* + * Restore L1's host state to KVM's software model. We're here + * because a consistency check was caught by hardware, which + * means some amount of guest state has been propagated to KVM's + * model and needs to be unwound to the host's state. + */ + nested_vmx_restore_host_state(vcpu); /* * The emulated instruction was already skipped in @@ -13461,6 +13822,39 @@ static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu, to_vmx(vcpu)->nested.sync_shadow_vmcs = true; } +static int vmx_check_intercept_io(struct kvm_vcpu *vcpu, + struct x86_instruction_info *info) +{ + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + unsigned short port; + bool intercept; + int size; + + if (info->intercept == x86_intercept_in || + info->intercept == x86_intercept_ins) { + port = info->src_val; + size = info->dst_bytes; + } else { + port = info->dst_val; + size = info->src_bytes; + } + + /* + * If the 'use IO bitmaps' VM-execution control is 0, IO instruction + * VM-exits depend on the 'unconditional IO exiting' VM-execution + * control. + * + * Otherwise, IO instruction VM-exits are controlled by the IO bitmaps. + */ + if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) + intercept = nested_cpu_has(vmcs12, + CPU_BASED_UNCOND_IO_EXITING); + else + intercept = nested_vmx_check_io_bitmaps(vcpu, port, size); + + return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; +} + static int vmx_check_intercept(struct kvm_vcpu *vcpu, struct x86_instruction_info *info, enum x86_intercept_stage stage) @@ -13468,19 +13862,31 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12 = get_vmcs12(vcpu); struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; + switch (info->intercept) { /* * RDPID causes #UD if disabled through secondary execution controls. * Because it is marked as EmulateOnUD, we need to intercept it here. */ - if (info->intercept == x86_intercept_rdtscp && - !nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) { - ctxt->exception.vector = UD_VECTOR; - ctxt->exception.error_code_valid = false; - return X86EMUL_PROPAGATE_FAULT; - } + case x86_intercept_rdtscp: + if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) { + ctxt->exception.vector = UD_VECTOR; + ctxt->exception.error_code_valid = false; + return X86EMUL_PROPAGATE_FAULT; + } + break; + + case x86_intercept_in: + case x86_intercept_ins: + case x86_intercept_out: + case x86_intercept_outs: + return vmx_check_intercept_io(vcpu, info); /* TODO: check more intercepts... */ - return X86EMUL_CONTINUE; + default: + break; + } + + return X86EMUL_UNHANDLEABLE; } #ifdef CONFIG_X86_64 @@ -13557,7 +13963,8 @@ static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu) static void vmx_slot_enable_log_dirty(struct kvm *kvm, struct kvm_memory_slot *slot) { - kvm_mmu_slot_leaf_clear_dirty(kvm, slot); + if (!kvm_dirty_log_manual_protect_and_init_set(kvm)) + kvm_mmu_slot_leaf_clear_dirty(kvm, slot); kvm_mmu_slot_largepage_remove_write_access(kvm, slot); } @@ -13967,13 +14374,17 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu, else if (enable_shadow_vmcs && !vmx->nested.sync_shadow_vmcs) copy_shadow_to_vmcs12(vmx); - if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12))) + /* + * Copy over the full allocated size of vmcs12 rather than just the size + * of the struct. + */ + if (copy_to_user(user_kvm_nested_state->data, vmcs12, VMCS12_SIZE)) return -EFAULT; if (nested_cpu_has_shadow_vmcs(vmcs12) && vmcs12->vmcs_link_pointer != -1ull) { if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE, - get_shadow_vmcs12(vcpu), sizeof(*vmcs12))) + get_shadow_vmcs12(vcpu), VMCS12_SIZE)) return -EFAULT; } @@ -14010,13 +14421,6 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, if (!page_address_valid(vcpu, kvm_state->vmx.vmxon_pa)) return -EINVAL; - if (kvm_state->size < sizeof(kvm_state) + sizeof(*vmcs12)) - return -EINVAL; - - if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa || - !page_address_valid(vcpu, kvm_state->vmx.vmcs_pa)) - return -EINVAL; - if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) return -EINVAL; @@ -14046,6 +14450,14 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, if (ret) return ret; + /* Empty 'VMXON' state is permitted */ + if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12)) + return 0; + + if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa || + !page_address_valid(vcpu, kvm_state->vmx.vmcs_pa)) + return -EINVAL; + set_current_vmptr(vmx, kvm_state->vmx.vmcs_pa); if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) { @@ -14072,7 +14484,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, if (nested_cpu_has_shadow_vmcs(vmcs12) && vmcs12->vmcs_link_pointer != -1ull) { struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu); - if (kvm_state->size < sizeof(kvm_state) + 2 * sizeof(*vmcs12)) + if (kvm_state->size < sizeof(*kvm_state) + 2 * sizeof(*vmcs12)) return -EINVAL; if (copy_from_user(shadow_vmcs12, @@ -14179,6 +14591,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt, .sync_pir_to_irr = vmx_sync_pir_to_irr, .deliver_posted_interrupt = vmx_deliver_posted_interrupt, + .dy_apicv_has_pending_interrupt = vmx_dy_apicv_has_pending_interrupt, .set_tss_addr = vmx_set_tss_addr, .set_identity_map_addr = vmx_set_identity_map_addr, @@ -14199,7 +14612,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, .read_l1_tsc_offset = vmx_read_l1_tsc_offset, - .write_tsc_offset = vmx_write_tsc_offset, + .write_l1_tsc_offset = vmx_write_l1_tsc_offset, .set_tdp_cr3 = vmx_set_cr3, @@ -14208,6 +14621,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { .mpx_supported = vmx_mpx_supported, .xsaves_supported = vmx_xsaves_supported, .umip_emulated = vmx_umip_emulated, + .pku_supported = vmx_pku_supported, .check_nested_events = vmx_check_nested_events, .request_immediate_exit = vmx_request_immediate_exit, @@ -14342,6 +14756,8 @@ static int __init vmx_init(void) } } + vmx_setup_fb_clear_ctrl(); + #ifdef CONFIG_KEXEC_CORE rcu_assign_pointer(crash_vmclear_loaded_vmcss, crash_vmclear_local_loaded_vmcss); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ca717737347e670d25ede51b1c84118fd43a3d9c..6afffdcc656e17237296890b29bbde696d974be0 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -92,8 +92,10 @@ u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA)); static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE); #endif -#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM -#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU +#define VM_STAT(x, ...) offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__ +#define VCPU_STAT(x, ...) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__ +#define DFX_STAT(n, x, ...) \ + {n, offsetof(struct kvm_vcpu_stat, x), DFX_STAT_U64, ## __VA_ARGS__} #define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \ KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK) @@ -150,6 +152,10 @@ EXPORT_SYMBOL_GPL(enable_vmware_backdoor); static bool __read_mostly force_emulation_prefix = false; module_param(force_emulation_prefix, bool, S_IRUGO); +/* Enable/disable SMT_RSB bug mitigation */ +bool __read_mostly mitigate_smt_rsb; +module_param(mitigate_smt_rsb, bool, 0444); + #define KVM_NR_SHARED_MSRS 16 struct kvm_shared_msrs_global { @@ -184,6 +190,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) }, { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) }, + { "preemption_reported", VCPU_STAT(preemption_reported) }, + { "preemption_other", VCPU_STAT(preemption_other) }, { "halt_wakeup", VCPU_STAT(halt_wakeup) }, { "hypercalls", VCPU_STAT(hypercalls) }, { "request_irq", VCPU_STAT(request_irq_exits) }, @@ -205,9 +213,54 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "mmu_cache_miss", VM_STAT(mmu_cache_miss) }, { "mmu_unsync", VM_STAT(mmu_unsync) }, { "remote_tlb_flush", VM_STAT(remote_tlb_flush) }, - { "largepages", VM_STAT(lpages) }, + { "largepages", VM_STAT(lpages, .mode = 0444) }, + { "nx_largepages_splitted", VM_STAT(nx_lpage_splits, .mode = 0444) }, { "max_mmu_page_hash_collisions", VM_STAT(max_mmu_page_hash_collisions) }, + { "vcpu_stat", 0, KVM_STAT_DFX }, + { NULL } +}; + +/* debugfs entries of Detail For vcpu stat EXtension */ +struct dfx_kvm_stats_debugfs_item dfx_debugfs_entries[] = { + DFX_STAT("pid", pid), + DFX_STAT("pf_fixed", pf_fixed), + DFX_STAT("pf_guest", pf_guest), + DFX_STAT("tlb_flush", tlb_flush), + DFX_STAT("invlpg", invlpg), + DFX_STAT("exits", exits), + DFX_STAT("io_exits", io_exits), + DFX_STAT("mmio_exits", mmio_exits), + DFX_STAT("signal_exits", signal_exits), + DFX_STAT("irq_window", irq_window_exits), + DFX_STAT("nmi_window", nmi_window_exits), + DFX_STAT("halt_exits", halt_exits), + DFX_STAT("halt_successful_poll", halt_successful_poll), + DFX_STAT("halt_attempted_poll", halt_attempted_poll), + DFX_STAT("preemption_reported", preemption_reported), + DFX_STAT("preemption_other", preemption_other), + DFX_STAT("halt_wakeup", halt_wakeup), + DFX_STAT("request_irq", request_irq_exits), + DFX_STAT("irq_exits", irq_exits), + DFX_STAT("host_state_reload", host_state_reload), + DFX_STAT("fpu_reload", fpu_reload), + DFX_STAT("insn_emulation", insn_emulation), + DFX_STAT("insn_emulation_fail", insn_emulation_fail), + DFX_STAT("hypercalls", hypercalls), + DFX_STAT("irq_injections", irq_injections), + DFX_STAT("nmi_injections", nmi_injections), + DFX_STAT("cr_exits", cr_exits), + DFX_STAT("msr_rd_exits", msr_rd_exits), + DFX_STAT("msr_wr_exits", msr_wr_exits), + DFX_STAT("apic_wr_exits", apic_wr_exits), + DFX_STAT("ept_vio_exits", ept_vio_exits), + DFX_STAT("ept_mis_exits", ept_mis_exits), + DFX_STAT("pause_exits", pause_exits), + DFX_STAT("steal", steal), + DFX_STAT("st_max", st_max), + DFX_STAT("utime", utime), + DFX_STAT("stime", stime), + DFX_STAT("gtime", gtime), { NULL } }; @@ -289,13 +342,14 @@ int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask) struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); int err; - if (((value ^ smsr->values[slot].curr) & mask) == 0) + value = (value & mask) | (smsr->values[slot].host & ~mask); + if (value == smsr->values[slot].curr) return 0; - smsr->values[slot].curr = value; err = wrmsrl_safe(shared_msrs_global.msrs[slot], value); if (err) return 1; + smsr->values[slot].curr = value; if (!smsr->registered) { smsr->urn.on_user_return = kvm_on_user_return; user_return_notifier_register(&smsr->urn); @@ -581,8 +635,14 @@ static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, data, offset, len, access); } +static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu) +{ + return rsvd_bits(cpuid_maxphyaddr(vcpu), 63) | rsvd_bits(5, 8) | + rsvd_bits(1, 2); +} + /* - * Load the pae pdptrs. Return true is they are all valid. + * Load the pae pdptrs. Return 1 if they are all valid, 0 otherwise. */ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) { @@ -601,8 +661,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) } for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { if ((pdpte[i] & PT_PRESENT_MASK) && - (pdpte[i] & - vcpu->arch.mmu.guest_rsvd_check.rsvd_bits_mask[0][2])) { + (pdpte[i] & pdptr_rsvd_bits(vcpu))) { ret = 0; goto out; } @@ -628,7 +687,7 @@ bool pdptrs_changed(struct kvm_vcpu *vcpu) gfn_t gfn; int r; - if (is_long_mode(vcpu) || !is_pae(vcpu) || !is_paging(vcpu)) + if (!is_pae_paging(vcpu)) return false; if (!test_bit(VCPU_EXREG_PDPTR, @@ -713,7 +772,7 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) } EXPORT_SYMBOL_GPL(kvm_lmsw); -static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) +void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) { if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && !vcpu->guest_xcr0_loaded) { @@ -723,8 +782,9 @@ static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) vcpu->guest_xcr0_loaded = 1; } } +EXPORT_SYMBOL_GPL(kvm_load_guest_xcr0); -static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) +void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) { if (vcpu->guest_xcr0_loaded) { if (vcpu->arch.xcr0 != host_xcr0) @@ -732,6 +792,7 @@ static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) vcpu->guest_xcr0_loaded = 0; } } +EXPORT_SYMBOL_GPL(kvm_put_guest_xcr0); static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) { @@ -784,34 +845,42 @@ int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) } EXPORT_SYMBOL_GPL(kvm_set_xcr); -int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) +static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { - unsigned long old_cr4 = kvm_read_cr4(vcpu); - unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | - X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE; - if (cr4 & CR4_RESERVED_BITS) - return 1; + return -EINVAL; if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (cr4 & X86_CR4_OSXSAVE)) - return 1; + return -EINVAL; if (!guest_cpuid_has(vcpu, X86_FEATURE_SMEP) && (cr4 & X86_CR4_SMEP)) - return 1; + return -EINVAL; if (!guest_cpuid_has(vcpu, X86_FEATURE_SMAP) && (cr4 & X86_CR4_SMAP)) - return 1; + return -EINVAL; if (!guest_cpuid_has(vcpu, X86_FEATURE_FSGSBASE) && (cr4 & X86_CR4_FSGSBASE)) - return 1; + return -EINVAL; if (!guest_cpuid_has(vcpu, X86_FEATURE_PKU) && (cr4 & X86_CR4_PKE)) - return 1; + return -EINVAL; if (!guest_cpuid_has(vcpu, X86_FEATURE_LA57) && (cr4 & X86_CR4_LA57)) - return 1; + return -EINVAL; if (!guest_cpuid_has(vcpu, X86_FEATURE_UMIP) && (cr4 & X86_CR4_UMIP)) + return -EINVAL; + + return 0; +} + +int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) +{ + unsigned long old_cr4 = kvm_read_cr4(vcpu); + unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | + X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE; + + if (kvm_valid_cr4(vcpu, cr4)) return 1; if (is_long_mode(vcpu)) { @@ -869,8 +938,8 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) if (is_long_mode(vcpu) && (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 63))) return 1; - else if (is_pae(vcpu) && is_paging(vcpu) && - !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) + else if (is_pae_paging(vcpu) && + !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) return 1; kvm_mmu_new_cr3(vcpu, cr3, skip_tlb_flush); @@ -1074,6 +1143,7 @@ static u32 emulated_msrs[] = { MSR_PLATFORM_INFO, MSR_MISC_FEATURES_ENABLES, MSR_AMD64_VIRT_SPEC_CTRL, + MSR_KVM_POLL_CONTROL, }; static unsigned num_emulated_msrs; @@ -1115,6 +1185,14 @@ u64 kvm_get_arch_capabilities(void) rdmsrl_safe(MSR_IA32_ARCH_CAPABILITIES, &data); + /* + * If nx_huge_pages is enabled, KVM's shadow paging will ensure that + * the nested hypervisor runs with NX huge pages. If it is not, + * L1 is anyway vulnerable to ITLB_MULTIHIT explots from other + * L1 guests, so it need not worry about its own (L2) guests. + */ + data |= ARCH_CAP_PSCHANGE_MC_NO; + /* * If we're doing cache flushes (either "always" or "cond") * we will do one whenever the guest does a vmlaunch/vmresume. @@ -1127,8 +1205,47 @@ u64 kvm_get_arch_capabilities(void) if (l1tf_vmx_mitigation != VMENTER_L1D_FLUSH_NEVER) data |= ARCH_CAP_SKIP_VMENTRY_L1DFLUSH; + if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) + data |= ARCH_CAP_RDCL_NO; + if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) + data |= ARCH_CAP_SSB_NO; + if (!boot_cpu_has_bug(X86_BUG_MDS)) + data |= ARCH_CAP_MDS_NO; + + /* + * On TAA affected systems, export MDS_NO=0 when: + * - TSX is enabled on the host, i.e. X86_FEATURE_RTM=1. + * - Updated microcode is present. This is detected by + * the presence of ARCH_CAP_TSX_CTRL_MSR and ensures + * that VERW clears CPU buffers. + * + * When MDS_NO=0 is exported, guests deploy clear CPU buffer + * mitigation and don't complain: + * + * "Vulnerable: Clear CPU buffers attempted, no microcode" + * + * If TSX is disabled on the system, guests are also mitigated against + * TAA and clear CPU buffer mitigation is not required for guests. + */ + if (!boot_cpu_has(X86_FEATURE_RTM)) + data &= ~ARCH_CAP_TAA_NO; + else if (!boot_cpu_has_bug(X86_BUG_TAA)) + data |= ARCH_CAP_TAA_NO; + else if (data & ARCH_CAP_TSX_CTRL_MSR) + data &= ~ARCH_CAP_MDS_NO; + + /* KVM does not emulate MSR_IA32_TSX_CTRL. */ + data &= ~ARCH_CAP_TSX_CTRL_MSR; + + /* Guests don't need to know "Fill buffer clear control" exists */ + data &= ~ARCH_CAP_FB_CLEAR_CTRL; + + if (!boot_cpu_has_bug(X86_BUG_GDS) || gds_ucode_mitigated()) + data |= ARCH_CAP_GDS_NO; + return data; } + EXPORT_SYMBOL_GPL(kvm_get_arch_capabilities); static int kvm_get_msr_feature(struct kvm_msr_entry *msr) @@ -1162,31 +1279,42 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data) return 0; } -bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) +static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) { - if (efer & efer_reserved_bits) - return false; - if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT)) - return false; + return false; if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM)) - return false; + return false; return true; + +} +bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) +{ + if (efer & efer_reserved_bits) + return false; + + return __kvm_valid_efer(vcpu, efer); } EXPORT_SYMBOL_GPL(kvm_valid_efer); -static int set_efer(struct kvm_vcpu *vcpu, u64 efer) +static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { u64 old_efer = vcpu->arch.efer; + u64 efer = msr_info->data; - if (!kvm_valid_efer(vcpu, efer)) + if (efer & efer_reserved_bits) return 1; - if (is_paging(vcpu) - && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) - return 1; + if (!msr_info->host_initiated) { + if (!__kvm_valid_efer(vcpu, efer)) + return 1; + + if (is_paging(vcpu) && + (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) + return 1; + } efer &= ~EFER_LMA; efer |= vcpu->arch.efer & EFER_LMA; @@ -1436,7 +1564,7 @@ static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) vcpu->arch.tsc_always_catchup = 1; return 0; } else { - WARN(1, "user requested TSC rate below hardware speed\n"); + pr_warn_ratelimited("user requested TSC rate below hardware speed\n"); return -1; } } @@ -1446,8 +1574,8 @@ static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) user_tsc_khz, tsc_khz); if (ratio == 0 || ratio >= kvm_max_tsc_scaling_ratio) { - WARN_ONCE(1, "Invalid TSC scaling ratio - virtual-tsc-khz=%u\n", - user_tsc_khz); + pr_warn_ratelimited("Invalid TSC scaling ratio - virtual-tsc-khz=%u\n", + user_tsc_khz); return -1; } @@ -1582,8 +1710,7 @@ EXPORT_SYMBOL_GPL(kvm_read_l1_tsc); static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) { - kvm_x86_ops->write_tsc_offset(vcpu, offset); - vcpu->arch.tsc_offset = offset; + vcpu->arch.tsc_offset = kvm_x86_ops->write_l1_tsc_offset(vcpu, offset); } static inline bool kvm_check_tsc_unstable(void) @@ -1711,7 +1838,8 @@ EXPORT_SYMBOL_GPL(kvm_write_tsc); static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment) { - kvm_vcpu_write_tsc_offset(vcpu, vcpu->arch.tsc_offset + adjustment); + u64 tsc_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu); + kvm_vcpu_write_tsc_offset(vcpu, tsc_offset + adjustment); } static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment) @@ -2289,8 +2417,31 @@ static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa) kvm_x86_ops->tlb_flush(vcpu, invalidate_gpa); } +static u64 accumulate_stat_steal_time(u64 *last_steal) +{ + u64 delta; + + if (*last_steal == 0) + delta = 0; + else + delta = current->sched_info.run_delay - *last_steal; + + *last_steal = current->sched_info.run_delay; + return delta; +} + +static void update_stat_steal_time(struct kvm_vcpu *vcpu) +{ + u64 delta; + + delta = accumulate_stat_steal_time(&vcpu->stat.steal); + vcpu->stat.st_max = max(vcpu->stat.st_max, delta); +} + static void record_steal_time(struct kvm_vcpu *vcpu) { + update_stat_steal_time(vcpu); + if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) return; @@ -2343,14 +2494,20 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_AMD64_PATCH_LOADER: case MSR_AMD64_BU_CFG2: case MSR_AMD64_DC_CFG: + case MSR_F15H_EX_CFG: break; case MSR_IA32_UCODE_REV: if (msr_info->host_initiated) vcpu->arch.microcode_version = data; break; + case MSR_IA32_ARCH_CAPABILITIES: + if (!msr_info->host_initiated) + return 1; + vcpu->arch.arch_capabilities = data; + break; case MSR_EFER: - return set_efer(vcpu, data); + return set_efer(vcpu, msr_info); case MSR_K7_HWCR: data &= ~(u64)0x40; /* ignore flush filter disable */ data &= ~(u64)0x100; /* ignore ignne emulation enable */ @@ -2477,8 +2634,16 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; case MSR_KVM_PV_EOI_EN: - if (kvm_lapic_enable_pv_eoi(vcpu, data)) + if (kvm_lapic_enable_pv_eoi(vcpu, data, sizeof(u8))) + return 1; + break; + + case MSR_KVM_POLL_CONTROL: + /* only enable bit supported */ + if (data & (-1ULL << 1)) return 1; + + vcpu->arch.msr_kvm_poll_control = data; break; case MSR_IA32_MCG_CTL: @@ -2638,6 +2803,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_AMD64_BU_CFG2: case MSR_IA32_PERF_CTL: case MSR_AMD64_DC_CFG: + case MSR_F15H_EX_CFG: msr_info->data = 0; break; case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5: @@ -2652,6 +2818,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_UCODE_REV: msr_info->data = vcpu->arch.microcode_version; break; + case MSR_IA32_ARCH_CAPABILITIES: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES)) + return 1; + msr_info->data = vcpu->arch.arch_capabilities; + break; case MSR_IA32_TSC: msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset; break; @@ -2724,6 +2896,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_KVM_PV_EOI_EN: msr_info->data = vcpu->arch.pv_eoi.msr_val; break; + case MSR_KVM_POLL_CONTROL: + msr_info->data = vcpu->arch.msr_kvm_poll_control; + break; case MSR_IA32_P5_MC_ADDR: case MSR_IA32_P5_MC_TYPE: case MSR_IA32_MCG_CAP: @@ -2923,7 +3098,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_HYPERV_TIME: case KVM_CAP_IOAPIC_POLARITY_IGNORED: case KVM_CAP_TSC_DEADLINE_TIMER: - case KVM_CAP_ENABLE_CAP_VM: case KVM_CAP_DISABLE_QUIRKS: case KVM_CAP_SET_BOOT_CPU_ID: case KVM_CAP_SPLIT_IRQCHIP: @@ -2939,9 +3113,14 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = KVM_CLOCK_TSC_STABLE; break; case KVM_CAP_X86_DISABLE_EXITS: - r |= KVM_X86_DISABLE_EXITS_HLT | KVM_X86_DISABLE_EXITS_PAUSE; - if(kvm_can_mwait_in_guest()) - r |= KVM_X86_DISABLE_EXITS_MWAIT; + r = KVM_X86_DISABLE_EXITS_PAUSE; + + if (!mitigate_smt_rsb) { + r |= KVM_X86_DISABLE_EXITS_HLT; + + if (kvm_can_mwait_in_guest()) + r |= KVM_X86_DISABLE_EXITS_MWAIT; + } break; case KVM_CAP_X86_SMM: /* SMBASE is usually relocated above 1M on modern chipsets, @@ -2963,6 +3142,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_MAX_VCPUS: r = KVM_MAX_VCPUS; break; + case KVM_CAP_MAX_VCPU_ID: + r = KVM_MAX_VCPU_ID; + break; case KVM_CAP_NR_MEMSLOTS: r = KVM_USER_MEM_SLOTS; break; @@ -3147,9 +3329,26 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) { + /* + * The vCPU can be marked preempted if and only if the VM-Exit was on + * an instruction boundary and will not trigger guest emulation of any + * kind (see vcpu_run). Vendor specific code controls (conservatively) + * when this is true, for example allowing the vCPU to be marked + * preempted if and only if the VM-Exit was due to a host interrupt. + */ + if (!vcpu->arch.at_instruction_boundary) { + vcpu->stat.preemption_other++; + return; + } + + vcpu->stat.preemption_reported++; + if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) return; + if (vcpu->arch.st.steal.preempted) + return; + vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED; kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime, @@ -3480,12 +3679,11 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, { unsigned long val; + memset(dbgregs, 0, sizeof(*dbgregs)); memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); kvm_get_dr(vcpu, 6, &val); dbgregs->dr6 = val; dbgregs->dr7 = vcpu->arch.dr7; - dbgregs->flags = 0; - memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved)); } static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, @@ -4033,6 +4231,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_NESTED_STATE: { struct kvm_nested_state __user *user_kvm_nested_state = argp; struct kvm_nested_state kvm_state; + int idx; r = -EINVAL; if (!kvm_x86_ops->set_nested_state) @@ -4054,7 +4253,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, if (kvm_state.flags == KVM_STATE_NESTED_RUN_PENDING) break; + idx = srcu_read_lock(&vcpu->kvm->srcu); r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state); + srcu_read_unlock(&vcpu->kvm->srcu, idx); break; } default: @@ -4089,7 +4290,7 @@ static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm, } static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, - u32 kvm_nr_mmu_pages) + unsigned long kvm_nr_mmu_pages) { if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES) return -EINVAL; @@ -4103,7 +4304,7 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, return 0; } -static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) +static unsigned long kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) { return kvm->arch.n_max_mmu_pages; } @@ -4261,7 +4462,7 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm, */ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { - bool is_dirty = false; + bool flush = false; int r; mutex_lock(&kvm->slots_lock); @@ -4272,14 +4473,41 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) if (kvm_x86_ops->flush_log_dirty) kvm_x86_ops->flush_log_dirty(kvm); - r = kvm_get_dirty_log_protect(kvm, log, &is_dirty); + r = kvm_get_dirty_log_protect(kvm, log, &flush); /* * All the TLBs can be flushed out of mmu lock, see the comments in * kvm_mmu_slot_remove_write_access(). */ lockdep_assert_held(&kvm->slots_lock); - if (is_dirty) + if (flush) + kvm_flush_remote_tlbs(kvm); + + mutex_unlock(&kvm->slots_lock); + return r; +} + +int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, struct kvm_clear_dirty_log *log) +{ + bool flush = false; + int r; + + mutex_lock(&kvm->slots_lock); + + /* + * Flush potentially hardware-cached dirty pages to dirty_bitmap. + */ + if (kvm_x86_ops->flush_log_dirty) + kvm_x86_ops->flush_log_dirty(kvm); + + r = kvm_clear_dirty_log_protect(kvm, log, &flush); + + /* + * All the TLBs can be flushed out of mmu lock, see the comments in + * kvm_mmu_slot_remove_write_access(). + */ + lockdep_assert_held(&kvm->slots_lock); + if (flush) kvm_flush_remote_tlbs(kvm); mutex_unlock(&kvm->slots_lock); @@ -4298,8 +4526,8 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, return 0; } -static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, - struct kvm_enable_cap *cap) +int kvm_vm_ioctl_enable_cap(struct kvm *kvm, + struct kvm_enable_cap *cap) { int r; @@ -4350,13 +4578,24 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, if (cap->args[0] & ~KVM_X86_DISABLE_VALID_EXITS) break; - if ((cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) && - kvm_can_mwait_in_guest()) - kvm->arch.mwait_in_guest = true; - if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT) - kvm->arch.hlt_in_guest = true; if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE) kvm->arch.pause_in_guest = true; + +#define SMT_RSB_MSG "This processor is affected by the Cross-Thread Return Predictions vulnerability. " \ + "KVM_CAP_X86_DISABLE_EXITS should only be used with SMT disabled or trusted guests." + + if (!mitigate_smt_rsb) { + if (boot_cpu_has_bug(X86_BUG_SMT_RSB) && cpu_smt_possible() && + (cap->args[0] & ~KVM_X86_DISABLE_EXITS_PAUSE)) + pr_warn_once(SMT_RSB_MSG); + + if ((cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) && + kvm_can_mwait_in_guest()) + kvm->arch.mwait_in_guest = true; + if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT) + kvm->arch.hlt_in_guest = true; + } + r = 0; break; case KVM_CAP_MSR_PLATFORM_INFO: @@ -4628,15 +4867,6 @@ long kvm_arch_vm_ioctl(struct file *filp, r = 0; break; } - case KVM_ENABLE_CAP: { - struct kvm_enable_cap cap; - - r = -EFAULT; - if (copy_from_user(&cap, argp, sizeof(cap))) - goto out; - r = kvm_vm_ioctl_enable_cap(kvm, &cap); - break; - } case KVM_MEMORY_ENCRYPT_OP: { r = -ENOTTY; if (kvm_x86_ops->mem_enc_op) @@ -4902,6 +5132,13 @@ int kvm_read_guest_virt(struct kvm_vcpu *vcpu, { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; + /* + * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED + * is returned, but our callers are not ready for that and they blindly + * call kvm_inject_page_fault. Ensure that they at least do not leak + * uninitialized kernel stack memory into cr2 and error code. + */ + memset(exception, 0, sizeof(*exception)); return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); } @@ -5038,7 +5275,7 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, */ if (vcpu_match_mmio_gva(vcpu, gva) && !permission_fault(vcpu, vcpu->arch.walk_mmu, - vcpu->arch.access, 0, access)) { + vcpu->arch.mmio_access, 0, access)) { *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | (gva & (PAGE_SIZE - 1)); trace_vcpu_match_mmio(gva, *gpa, write, false); @@ -6201,8 +6438,16 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, if (reexecute_instruction(vcpu, cr2, write_fault_to_spt, emulation_type)) return EMULATE_DONE; - if (ctxt->have_exception && inject_emulated_exception(vcpu)) + if (ctxt->have_exception) { + /* + * #UD should result in just EMULATION_FAILED, and trap-like + * exception should not be encountered during decode. + */ + WARN_ON_ONCE(ctxt->exception.vector == UD_VECTOR || + exception_type(ctxt->exception.vector) == EXCPT_TRAP); + inject_emulated_exception(vcpu); return EMULATE_DONE; + } if (emulation_type & EMULTYPE_SKIP) return EMULATE_FAIL; return handle_emulation_failure(vcpu, emulation_type); @@ -6274,13 +6519,13 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); toggle_interruptibility(vcpu, ctxt->interruptibility); vcpu->arch.emulate_regs_need_sync_to_vcpu = false; - kvm_rip_write(vcpu, ctxt->eip); - if (r == EMULATE_DONE && - (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) - kvm_vcpu_do_singlestep(vcpu, &r); if (!ctxt->have_exception || - exception_type(ctxt->exception.vector) == EXCPT_TRAP) + exception_type(ctxt->exception.vector) == EXCPT_TRAP) { + kvm_rip_write(vcpu, ctxt->eip); + if (r == EMULATE_DONE && ctxt->tf) + kvm_vcpu_do_singlestep(vcpu, &r); __kvm_set_rflags(vcpu, ctxt->eflags); + } /* * For STI, interrupts are shadowed; so KVM_REQ_EVENT will @@ -6309,15 +6554,45 @@ int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, } EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer); +static int complete_fast_pio_out_port_0x7e(struct kvm_vcpu *vcpu) +{ + vcpu->arch.pio.count = 0; + return 1; +} + +static int complete_fast_pio_out(struct kvm_vcpu *vcpu) +{ + vcpu->arch.pio.count = 0; + + if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) + return 1; + + return kvm_skip_emulated_instruction(vcpu); +} + static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port) { unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX); int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt, size, port, &val, 1); - /* do not return to emulator after return from userspace */ - vcpu->arch.pio.count = 0; - return ret; + if (ret) + return ret; + + /* + * Workaround userspace that relies on old KVM behavior of %rip being + * incremented prior to exiting to userspace to handle "OUT 0x7e". + */ + if (port == 0x7e && + kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) { + vcpu->arch.complete_userspace_io = + complete_fast_pio_out_port_0x7e; + kvm_skip_emulated_instruction(vcpu); + } else { + vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); + vcpu->arch.complete_userspace_io = complete_fast_pio_out; + } + return 0; } static int complete_fast_pio_in(struct kvm_vcpu *vcpu) @@ -6327,6 +6602,11 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu) /* We should only ever be called with arch.pio.count equal to 1 */ BUG_ON(vcpu->arch.pio.count != 1); + if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) { + vcpu->arch.pio.count = 0; + return 1; + } + /* For size less than 4 we merge, else we zero extend */ val = (vcpu->arch.pio.size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX) : 0; @@ -6339,7 +6619,7 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu) vcpu->arch.pio.port, &val, 1); kvm_register_write(vcpu, VCPU_REGS_RAX, val); - return 1; + return kvm_skip_emulated_instruction(vcpu); } static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, @@ -6358,6 +6638,7 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, return ret; } + vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); vcpu->arch.complete_userspace_io = complete_fast_pio_in; return 0; @@ -6365,16 +6646,13 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in) { - int ret = kvm_skip_emulated_instruction(vcpu); + int ret; - /* - * TODO: we might be squashing a KVM_GUESTDBG_SINGLESTEP-triggered - * KVM_EXIT_DEBUG here. - */ if (in) - return kvm_fast_pio_in(vcpu, size, port) && ret; + ret = kvm_fast_pio_in(vcpu, size, port); else - return kvm_fast_pio_out(vcpu, size, port) && ret; + ret = kvm_fast_pio_out(vcpu, size, port); + return ret && kvm_skip_emulated_instruction(vcpu); } EXPORT_SYMBOL_GPL(kvm_fast_pio); @@ -6405,7 +6683,7 @@ static void kvm_hyperv_tsc_notifier(void) struct kvm_vcpu *vcpu; int cpu; - spin_lock(&kvm_lock); + mutex_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) kvm_make_mclock_inprogress_request(kvm); @@ -6431,7 +6709,7 @@ static void kvm_hyperv_tsc_notifier(void) spin_unlock(&ka->pvclock_gtod_sync_lock); } - spin_unlock(&kvm_lock); + mutex_unlock(&kvm_lock); } #endif @@ -6489,17 +6767,17 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); - spin_lock(&kvm_lock); + mutex_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) { kvm_for_each_vcpu(i, vcpu, kvm) { if (vcpu->cpu != freq->cpu) continue; kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); - if (vcpu->cpu != smp_processor_id()) + if (vcpu->cpu != raw_smp_processor_id()) send_ipi = 1; } } - spin_unlock(&kvm_lock); + mutex_unlock(&kvm_lock); if (freq->old < freq->new && send_ipi) { /* @@ -6588,35 +6866,6 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = { .get_guest_ip = kvm_get_guest_ip, }; -static void kvm_set_mmio_spte_mask(void) -{ - u64 mask; - int maxphyaddr = boot_cpu_data.x86_phys_bits; - - /* - * Set the reserved bits and the present bit of an paging-structure - * entry to generate page fault with PFER.RSV = 1. - */ - - /* - * Mask the uppermost physical address bit, which would be reserved as - * long as the supported physical address width is less than 52. - */ - mask = 1ull << 51; - - /* Set the present bit. */ - mask |= 1ull; - - /* - * If reserved bit is not supported, clear the present bit to disable - * mmio page fault. - */ - if (IS_ENABLED(CONFIG_X86_64) && maxphyaddr == 52) - mask &= ~1ull; - - kvm_mmu_set_mmio_spte_mask(mask, mask); -} - #ifdef CONFIG_X86_64 static void pvclock_gtod_update_fn(struct work_struct *work) { @@ -6625,12 +6874,12 @@ static void pvclock_gtod_update_fn(struct work_struct *work) struct kvm_vcpu *vcpu; int i; - spin_lock(&kvm_lock); + mutex_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) kvm_for_each_vcpu(i, vcpu, kvm) kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); atomic_set(&kvm_guest_has_master_clock, 0); - spin_unlock(&kvm_lock); + mutex_unlock(&kvm_lock); } static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn); @@ -6694,7 +6943,7 @@ int kvm_arch_init(void *opaque) if (r) goto out_free_percpu; - kvm_set_mmio_spte_mask(); + mitigate_smt_rsb &= boot_cpu_has_bug(X86_BUG_SMT_RSB) && cpu_smt_possible(); kvm_x86_ops = ops; @@ -6788,6 +7037,7 @@ static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr, clock_pairing.nsec = ts.tv_nsec; clock_pairing.tsc = kvm_read_l1_tsc(vcpu, cycle); clock_pairing.flags = 0; + memset(&clock_pairing.pad, 0, sizeof(clock_pairing.pad)); ret = 0; if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing, @@ -6865,10 +7115,10 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) case KVM_HC_CLOCK_PAIRING: ret = kvm_pv_clock_pairing(vcpu, a0, a1); break; +#endif case KVM_HC_SEND_IPI: ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit); break; -#endif default: ret = -KVM_ENOSYS; break; @@ -7157,9 +7407,9 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf) put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase); } +#ifdef CONFIG_X86_64 static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf) { -#ifdef CONFIG_X86_64 struct desc_ptr dt; struct kvm_segment seg; unsigned long val; @@ -7209,10 +7459,8 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf) for (i = 0; i < 6; i++) enter_smm_save_seg_64(vcpu, buf, i); -#else - WARN_ON_ONCE(1); -#endif } +#endif static void enter_smm(struct kvm_vcpu *vcpu) { @@ -7223,9 +7471,11 @@ static void enter_smm(struct kvm_vcpu *vcpu) trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true); memset(buf, 0, 512); +#ifdef CONFIG_X86_64 if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) enter_smm_save_state_64(vcpu, buf); else +#endif enter_smm_save_state_32(vcpu, buf); /* @@ -7283,8 +7533,10 @@ static void enter_smm(struct kvm_vcpu *vcpu) kvm_set_segment(vcpu, &ds, VCPU_SREG_GS); kvm_set_segment(vcpu, &ds, VCPU_SREG_SS); +#ifdef CONFIG_X86_64 if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) kvm_x86_ops->set_efer(vcpu, 0); +#endif kvm_update_cpuid(vcpu); kvm_mmu_reset_context(vcpu); @@ -7303,7 +7555,7 @@ void kvm_make_scan_ioapic_request(struct kvm *kvm) static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) { - if (!kvm_apic_hw_enabled(vcpu->arch.apic)) + if (!kvm_apic_present(vcpu)) return; bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256); @@ -7313,7 +7565,8 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) else { if (vcpu->arch.apicv_active) kvm_x86_ops->sync_pir_to_irr(vcpu); - kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); + if (ioapic_in_kernel(vcpu->kvm)) + kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); } if (is_guest_mode(vcpu)) @@ -7580,8 +7833,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) goto cancel_injection; } - kvm_load_guest_xcr0(vcpu); - if (req_immediate_exit) { kvm_make_request(KVM_REQ_EVENT, vcpu); kvm_x86_ops->request_immediate_exit(vcpu); @@ -7634,8 +7885,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) vcpu->mode = OUTSIDE_GUEST_MODE; smp_wmb(); - kvm_put_guest_xcr0(vcpu); - kvm_before_interrupt(vcpu); kvm_x86_ops->handle_external_intr(vcpu); kvm_after_interrupt(vcpu); @@ -7665,6 +7914,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) vcpu->arch.gpa_available = false; r = kvm_x86_ops->handle_exit(vcpu); + vcpu->stat.utime = current->utime; + vcpu->stat.stime = current->stime; + vcpu->stat.gtime = current->gtime; + return r; cancel_injection: @@ -7726,6 +7979,13 @@ static int vcpu_run(struct kvm_vcpu *vcpu) vcpu->arch.l1tf_flush_l1d = true; for (;;) { + /* + * If another guest vCPU requests a PV TLB flush in the middle + * of instruction emulation, the rest of the emulation could + * use a stale page translation. Assume that any code after + * this point can start executing an instruction. + */ + vcpu->arch.at_instruction_boundary = false; if (kvm_vcpu_running(vcpu)) { r = vcpu_enter_guest(vcpu); } else { @@ -8148,10 +8408,6 @@ EXPORT_SYMBOL_GPL(kvm_task_switch); static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { - if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && - (sregs->cr4 & X86_CR4_OSXSAVE)) - return -EINVAL; - if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) { /* * When EFER.LME and CR0.PG are set, the processor is in @@ -8170,7 +8426,7 @@ static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) return -EINVAL; } - return 0; + return kvm_valid_cr4(vcpu, sregs->cr4); } static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) @@ -8219,7 +8475,7 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) kvm_update_cpuid(vcpu); idx = srcu_read_lock(&vcpu->kvm->srcu); - if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu)) { + if (is_pae_paging(vcpu)) { load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); mmu_reset_needed = 1; } @@ -8475,6 +8731,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { + vcpu->arch.arch_capabilities = kvm_get_arch_capabilities(); kvm_vcpu_mtrr_init(vcpu); vcpu_load(vcpu); kvm_vcpu_reset(vcpu, false); @@ -8498,6 +8755,10 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) msr.host_initiated = true; kvm_write_tsc(vcpu, &msr); vcpu_put(vcpu); + + /* poll control enabled by default */ + vcpu->arch.msr_kvm_poll_control = 1; + mutex_unlock(&vcpu->mutex); if (!kvmclock_periodic_sync) @@ -8849,12 +9110,19 @@ void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { + int ret; + if (type) return -EINVAL; + ret = kvm_page_track_init(kvm); + if (ret) + return ret; + INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages); + INIT_LIST_HEAD(&kvm->arch.lpage_disallowed_mmu_pages); INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); atomic_set(&kvm->arch.noncoherent_dma_count, 0); @@ -8877,7 +9145,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); kvm_hv_init_vm(kvm); - kvm_page_track_init(kvm); kvm_mmu_init_vm(kvm); if (kvm_x86_ops->vm_init) @@ -8886,6 +9153,11 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) return 0; } +int kvm_arch_post_init_vm(struct kvm *kvm) +{ + return kvm_mmu_post_init_vm(kvm); +} + static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) { vcpu_load(vcpu); @@ -8987,6 +9259,11 @@ int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size) } EXPORT_SYMBOL_GPL(x86_set_memory_region); +void kvm_arch_pre_destroy_vm(struct kvm *kvm) +{ + kvm_mmu_pre_destroy_vm(kvm); +} + void kvm_arch_destroy_vm(struct kvm *kvm) { if (current->mm == kvm->mm) { @@ -9098,13 +9375,13 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, return -ENOMEM; } -void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) +void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) { /* * memslots->generation has been incremented. * mmio generation may have reached its maximum value. */ - kvm_mmu_invalidate_mmio_sptes(kvm, slots); + kvm_mmu_invalidate_mmio_sptes(kvm, gen); } int kvm_arch_prepare_memory_region(struct kvm *kvm, @@ -9120,7 +9397,7 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm, { /* Still write protect RO slot */ if (new->flags & KVM_MEM_READONLY) { - kvm_mmu_slot_remove_write_access(kvm, new); + kvm_mmu_slot_remove_write_access(kvm, new, PT_PAGE_TABLE_LEVEL); return; } @@ -9155,10 +9432,23 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm, * See the comments in fast_page_fault(). */ if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) { - if (kvm_x86_ops->slot_enable_log_dirty) + if (kvm_x86_ops->slot_enable_log_dirty) { kvm_x86_ops->slot_enable_log_dirty(kvm, new); - else - kvm_mmu_slot_remove_write_access(kvm, new); + } else { + int level = + kvm_dirty_log_manual_protect_and_init_set(kvm) ? + PT_DIRECTORY_LEVEL : PT_PAGE_TABLE_LEVEL; + + /* + * If we're with initial-all-set, we don't need + * to write protect any small page because + * they're reported as dirty already. However + * we still need to write-protect huge pages + * so that the page split can happen lazily on + * the first write to the huge page. + */ + kvm_mmu_slot_remove_write_access(kvm, new, level); + } } else { if (kvm_x86_ops->slot_disable_log_dirty) kvm_x86_ops->slot_disable_log_dirty(kvm, new); @@ -9267,6 +9557,22 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu); } +bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) +{ + if (READ_ONCE(vcpu->arch.pv.pv_unhalted)) + return true; + + if (kvm_test_request(KVM_REQ_NMI, vcpu) || + kvm_test_request(KVM_REQ_SMI, vcpu) || + kvm_test_request(KVM_REQ_EVENT, vcpu)) + return true; + + if (vcpu->arch.apicv_active && kvm_x86_ops->dy_apicv_has_pending_interrupt(vcpu)) + return true; + + return false; +} + bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) { return vcpu->arch.preempted_in_kernel; @@ -9499,7 +9805,7 @@ EXPORT_SYMBOL_GPL(kvm_arch_end_assignment); bool kvm_arch_has_assigned_device(struct kvm *kvm) { - return atomic_read(&kvm->arch.assigned_device_count); + return arch_atomic_read(&kvm->arch.assigned_device_count); } EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device); @@ -9575,6 +9881,12 @@ bool kvm_vector_hashing_enabled(void) } EXPORT_SYMBOL_GPL(kvm_vector_hashing_enabled); +bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) +{ + return (vcpu->arch.msr_kvm_poll_control & 1) == 0; +} +EXPORT_SYMBOL_GPL(kvm_arch_no_poll); + EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq); diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 67b9568613f34abdcacc208b4f23ef1414a512eb..587c39f323f1b730766a843e9bd707255a088a60 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -139,6 +139,11 @@ static inline int is_paging(struct kvm_vcpu *vcpu) return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG)); } +static inline bool is_pae_paging(struct kvm_vcpu *vcpu) +{ + return !is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu); +} + static inline u32 bit(int bitno) { return 1 << (bitno & 31); @@ -181,14 +186,19 @@ static inline bool emul_is_noncanonical_address(u64 la, static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, unsigned access) { + u64 gen = kvm_memslots(vcpu->kvm)->generation; + + if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS)) + return; + /* * If this is a shadow nested page table, the "GVA" is * actually a nGPA. */ vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK; - vcpu->arch.access = access; + vcpu->arch.mmio_access = access; vcpu->arch.mmio_gfn = gfn; - vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation; + vcpu->arch.mmio_gen = gen; } static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu) @@ -340,4 +350,16 @@ static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu) __this_cpu_write(current_vcpu, NULL); } + +static inline bool kvm_pat_valid(u64 data) +{ + if (data & 0xF8F8F8F8F8F8F8F8ull) + return false; + /* 0, 1, 4, 5, 6, 7 are valid values. */ + return (data | ((data & 0x0202020202020202ull) << 1)) == data; +} + +void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu); +void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu); + #endif diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index 25a972c61b0ae9816a817eb9681f4cd374e9e32a..3c19d60316a88c05717c2ed4d679e0b7a1b339c9 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -6,6 +6,18 @@ # Produces uninteresting flaky coverage. KCOV_INSTRUMENT_delay.o := n +# Early boot use of cmdline; don't instrument it +ifdef CONFIG_AMD_MEM_ENCRYPT +KCOV_INSTRUMENT_cmdline.o := n +KASAN_SANITIZE_cmdline.o := n + +ifdef CONFIG_FUNCTION_TRACER +CFLAGS_REMOVE_cmdline.o = -pg +endif + +CFLAGS_cmdline.o := $(call cc-option, -fno-stack-protector) +endif + inat_tables_script = $(srctree)/arch/x86/tools/gen-insn-attr-x86.awk inat_tables_maps = $(srctree)/arch/x86/lib/x86-opcode-map.txt quiet_cmd_inat_tables = GEN $@ diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S index 46e71a74e6129b861c233ccf86f452b485e62d59..ad8e0906d1ea2f7d7d81c46d933184f8d24c92be 100644 --- a/arch/x86/lib/checksum_32.S +++ b/arch/x86/lib/checksum_32.S @@ -273,11 +273,11 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, #define SRC(y...) \ 9999: y; \ - _ASM_EXTABLE(9999b, 6001f) + _ASM_EXTABLE_UA(9999b, 6001f) #define DST(y...) \ 9999: y; \ - _ASM_EXTABLE(9999b, 6002f) + _ASM_EXTABLE_UA(9999b, 6002f) #ifndef CONFIG_X86_USE_PPRO_CHECKSUM diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index 020f75cc8cf6a8508ecf2b891c3a79559fade666..944fcc662f78a35a75a54b2fd43366c17b64a50b 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -16,6 +16,30 @@ #include #include +.macro ALIGN_DESTINATION + /* check for bad alignment of destination */ + movl %edi,%ecx + andl $7,%ecx + jz 102f /* already aligned */ + subl $8,%ecx + negl %ecx + subl %ecx,%edx +100: movb (%rsi),%al +101: movb %al,(%rdi) + incq %rsi + incq %rdi + decl %ecx + jnz 100b +102: + .section .fixup,"ax" +103: addl %ecx,%edx /* ecx is zerorest also */ + jmp copy_user_handle_tail + .previous + + _ASM_EXTABLE_UA(100b, 103b) + _ASM_EXTABLE_UA(101b, 103b) + .endm + /* * copy_user_generic_unrolled - memory copy with exception handling. * This version is for CPUs like P4 that don't have efficient micro @@ -92,26 +116,26 @@ ENTRY(copy_user_generic_unrolled) 60: jmp copy_user_handle_tail /* ecx is zerorest also */ .previous - _ASM_EXTABLE(1b,30b) - _ASM_EXTABLE(2b,30b) - _ASM_EXTABLE(3b,30b) - _ASM_EXTABLE(4b,30b) - _ASM_EXTABLE(5b,30b) - _ASM_EXTABLE(6b,30b) - _ASM_EXTABLE(7b,30b) - _ASM_EXTABLE(8b,30b) - _ASM_EXTABLE(9b,30b) - _ASM_EXTABLE(10b,30b) - _ASM_EXTABLE(11b,30b) - _ASM_EXTABLE(12b,30b) - _ASM_EXTABLE(13b,30b) - _ASM_EXTABLE(14b,30b) - _ASM_EXTABLE(15b,30b) - _ASM_EXTABLE(16b,30b) - _ASM_EXTABLE(18b,40b) - _ASM_EXTABLE(19b,40b) - _ASM_EXTABLE(21b,50b) - _ASM_EXTABLE(22b,50b) + _ASM_EXTABLE_UA(1b,30b) + _ASM_EXTABLE_UA(2b,30b) + _ASM_EXTABLE_UA(3b,30b) + _ASM_EXTABLE_UA(4b,30b) + _ASM_EXTABLE_UA(5b,30b) + _ASM_EXTABLE_UA(6b,30b) + _ASM_EXTABLE_UA(7b,30b) + _ASM_EXTABLE_UA(8b,30b) + _ASM_EXTABLE_UA(9b,30b) + _ASM_EXTABLE_UA(10b,30b) + _ASM_EXTABLE_UA(11b,30b) + _ASM_EXTABLE_UA(12b,30b) + _ASM_EXTABLE_UA(13b,30b) + _ASM_EXTABLE_UA(14b,30b) + _ASM_EXTABLE_UA(15b,30b) + _ASM_EXTABLE_UA(16b,30b) + _ASM_EXTABLE_UA(18b,40b) + _ASM_EXTABLE_UA(19b,40b) + _ASM_EXTABLE_UA(21b,50b) + _ASM_EXTABLE_UA(22b,50b) ENDPROC(copy_user_generic_unrolled) EXPORT_SYMBOL(copy_user_generic_unrolled) @@ -156,8 +180,8 @@ ENTRY(copy_user_generic_string) jmp copy_user_handle_tail .previous - _ASM_EXTABLE(1b,11b) - _ASM_EXTABLE(3b,12b) + _ASM_EXTABLE_UA(1b,11b) + _ASM_EXTABLE_UA(3b,12b) ENDPROC(copy_user_generic_string) EXPORT_SYMBOL(copy_user_generic_string) @@ -189,10 +213,34 @@ ENTRY(copy_user_enhanced_fast_string) jmp copy_user_handle_tail .previous - _ASM_EXTABLE(1b,12b) + _ASM_EXTABLE_UA(1b,12b) ENDPROC(copy_user_enhanced_fast_string) EXPORT_SYMBOL(copy_user_enhanced_fast_string) +/* + * Try to copy last bytes and clear the rest if needed. + * Since protection fault in copy_from/to_user is not a normal situation, + * it is not necessary to optimize tail handling. + * + * Input: + * rdi destination + * rsi source + * rdx count + * + * Output: + * eax uncopied bytes or 0 if successful. + */ +ALIGN; +copy_user_handle_tail: + movl %edx,%ecx +1: rep movsb +2: mov %ecx,%eax + ASM_CLAC + ret + + _ASM_EXTABLE_UA(1b, 2b) +ENDPROC(copy_user_handle_tail) + /* * copy_user_nocache - Uncached memory copy with exception handling * This will force destination out of cache for more performance. @@ -319,27 +367,27 @@ ENTRY(__copy_user_nocache) jmp copy_user_handle_tail .previous - _ASM_EXTABLE(1b,.L_fixup_4x8b_copy) - _ASM_EXTABLE(2b,.L_fixup_4x8b_copy) - _ASM_EXTABLE(3b,.L_fixup_4x8b_copy) - _ASM_EXTABLE(4b,.L_fixup_4x8b_copy) - _ASM_EXTABLE(5b,.L_fixup_4x8b_copy) - _ASM_EXTABLE(6b,.L_fixup_4x8b_copy) - _ASM_EXTABLE(7b,.L_fixup_4x8b_copy) - _ASM_EXTABLE(8b,.L_fixup_4x8b_copy) - _ASM_EXTABLE(9b,.L_fixup_4x8b_copy) - _ASM_EXTABLE(10b,.L_fixup_4x8b_copy) - _ASM_EXTABLE(11b,.L_fixup_4x8b_copy) - _ASM_EXTABLE(12b,.L_fixup_4x8b_copy) - _ASM_EXTABLE(13b,.L_fixup_4x8b_copy) - _ASM_EXTABLE(14b,.L_fixup_4x8b_copy) - _ASM_EXTABLE(15b,.L_fixup_4x8b_copy) - _ASM_EXTABLE(16b,.L_fixup_4x8b_copy) - _ASM_EXTABLE(20b,.L_fixup_8b_copy) - _ASM_EXTABLE(21b,.L_fixup_8b_copy) - _ASM_EXTABLE(30b,.L_fixup_4b_copy) - _ASM_EXTABLE(31b,.L_fixup_4b_copy) - _ASM_EXTABLE(40b,.L_fixup_1b_copy) - _ASM_EXTABLE(41b,.L_fixup_1b_copy) + _ASM_EXTABLE_UA(1b,.L_fixup_4x8b_copy) + _ASM_EXTABLE_UA(2b,.L_fixup_4x8b_copy) + _ASM_EXTABLE_UA(3b,.L_fixup_4x8b_copy) + _ASM_EXTABLE_UA(4b,.L_fixup_4x8b_copy) + _ASM_EXTABLE_UA(5b,.L_fixup_4x8b_copy) + _ASM_EXTABLE_UA(6b,.L_fixup_4x8b_copy) + _ASM_EXTABLE_UA(7b,.L_fixup_4x8b_copy) + _ASM_EXTABLE_UA(8b,.L_fixup_4x8b_copy) + _ASM_EXTABLE_UA(9b,.L_fixup_4x8b_copy) + _ASM_EXTABLE_UA(10b,.L_fixup_4x8b_copy) + _ASM_EXTABLE_UA(11b,.L_fixup_4x8b_copy) + _ASM_EXTABLE_UA(12b,.L_fixup_4x8b_copy) + _ASM_EXTABLE_UA(13b,.L_fixup_4x8b_copy) + _ASM_EXTABLE_UA(14b,.L_fixup_4x8b_copy) + _ASM_EXTABLE_UA(15b,.L_fixup_4x8b_copy) + _ASM_EXTABLE_UA(16b,.L_fixup_4x8b_copy) + _ASM_EXTABLE_UA(20b,.L_fixup_8b_copy) + _ASM_EXTABLE_UA(21b,.L_fixup_8b_copy) + _ASM_EXTABLE_UA(30b,.L_fixup_4b_copy) + _ASM_EXTABLE_UA(31b,.L_fixup_4b_copy) + _ASM_EXTABLE_UA(40b,.L_fixup_1b_copy) + _ASM_EXTABLE_UA(41b,.L_fixup_1b_copy) ENDPROC(__copy_user_nocache) EXPORT_SYMBOL(__copy_user_nocache) diff --git a/arch/x86/lib/cpu.c b/arch/x86/lib/cpu.c index 2dd1fe13a37b36aacfeca12733178f62a89ba309..19f707992db22b3d2fdc5f8c9be85f63bf7aeb93 100644 --- a/arch/x86/lib/cpu.c +++ b/arch/x86/lib/cpu.c @@ -1,5 +1,6 @@ #include #include +#include unsigned int x86_family(unsigned int sig) { diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S index 45a53dfe1859b1955fa91bad9bf1543903f1c1a8..a4a379e79259d1dfb0230372c792b144b4441b0b 100644 --- a/arch/x86/lib/csum-copy_64.S +++ b/arch/x86/lib/csum-copy_64.S @@ -31,14 +31,18 @@ .macro source 10: - _ASM_EXTABLE(10b, .Lbad_source) + _ASM_EXTABLE_UA(10b, .Lbad_source) .endm .macro dest 20: - _ASM_EXTABLE(20b, .Lbad_dest) + _ASM_EXTABLE_UA(20b, .Lbad_dest) .endm + /* + * No _ASM_EXTABLE_UA; this is used for intentional prefetch on a + * potentially unmapped kernel address. + */ .macro ignore L=.Lignore 30: _ASM_EXTABLE(30b, \L) diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c index 8bd53589ecfb93337c920d254e9307617bc21a32..a6a2b7dccbfff163ff650fce42f422c5bf20cc7e 100644 --- a/arch/x86/lib/csum-wrappers_64.c +++ b/arch/x86/lib/csum-wrappers_64.c @@ -27,7 +27,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst, might_sleep(); *errp = 0; - if (!likely(access_ok(VERIFY_READ, src, len))) + if (!likely(access_ok(src, len))) goto out_err; /* @@ -89,7 +89,7 @@ csum_partial_copy_to_user(const void *src, void __user *dst, might_sleep(); - if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) { + if (unlikely(!access_ok(dst, len))) { *errp = -EFAULT; return 0; } diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c index f5b7f1b3b6d75ce6024f7ab7459586b1f124902d..68ca883abfdb00cd3e8307467b24dbc56086f7ee 100644 --- a/arch/x86/lib/delay.c +++ b/arch/x86/lib/delay.c @@ -43,8 +43,8 @@ static void delay_loop(unsigned long loops) " jnz 2b \n" "3: dec %0 \n" - : /* we don't need output */ - :"a" (loops) + : "+a" (loops) + : ); } @@ -113,8 +113,8 @@ static void delay_mwaitx(unsigned long __loops) __monitorx(raw_cpu_ptr(&cpu_tss_rw), 0, 0); /* - * AMD, like Intel, supports the EAX hint and EAX=0xf - * means, do not enter any deep C-state and we use it + * AMD, like Intel's MWAIT version, supports the EAX hint and + * EAX=0xf0 means, do not enter any deep C-state and we use it * here in delay() to minimize wakeup latency. */ __mwaitx(MWAITX_DISABLE_CSTATES, delay, MWAITX_ECX_TIMER_ENABLE); diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S index 49b167f732151e2eda4d450ee8cb078ae64a2c24..74fdff968ea3b14ce05db05f65fe59dc6ae89154 100644 --- a/arch/x86/lib/getuser.S +++ b/arch/x86/lib/getuser.S @@ -132,12 +132,12 @@ bad_get_user_8: END(bad_get_user_8) #endif - _ASM_EXTABLE(1b,bad_get_user) - _ASM_EXTABLE(2b,bad_get_user) - _ASM_EXTABLE(3b,bad_get_user) + _ASM_EXTABLE_UA(1b, bad_get_user) + _ASM_EXTABLE_UA(2b, bad_get_user) + _ASM_EXTABLE_UA(3b, bad_get_user) #ifdef CONFIG_X86_64 - _ASM_EXTABLE(4b,bad_get_user) + _ASM_EXTABLE_UA(4b, bad_get_user) #else - _ASM_EXTABLE(4b,bad_get_user_8) - _ASM_EXTABLE(5b,bad_get_user_8) + _ASM_EXTABLE_UA(4b, bad_get_user_8) + _ASM_EXTABLE_UA(5b, bad_get_user_8) #endif diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c index 9119d8e41f1ff59e2c8584a36f0f03d000bb1bbe..ec1670db5e4d984588ff6156f0c59c723776896b 100644 --- a/arch/x86/lib/insn-eval.c +++ b/arch/x86/lib/insn-eval.c @@ -155,7 +155,7 @@ static bool check_seg_overrides(struct insn *insn, int regoff) */ static int resolve_default_seg(struct insn *insn, struct pt_regs *regs, int off) { - if (user_64bit_mode(regs)) + if (any_64bit_mode(regs)) return INAT_SEG_REG_IGNORE; /* * Resolve the default segment register as described in Section 3.7.4 @@ -264,7 +264,7 @@ static int resolve_seg_reg(struct insn *insn, struct pt_regs *regs, int regoff) * which may be invalid at this point. */ if (regoff == offsetof(struct pt_regs, ip)) { - if (user_64bit_mode(regs)) + if (any_64bit_mode(regs)) return INAT_SEG_REG_IGNORE; else return INAT_SEG_REG_CS; @@ -287,7 +287,7 @@ static int resolve_seg_reg(struct insn *insn, struct pt_regs *regs, int regoff) * In long mode, segment override prefixes are ignored, except for * overrides for FS and GS. */ - if (user_64bit_mode(regs)) { + if (any_64bit_mode(regs)) { if (idx != INAT_SEG_REG_FS && idx != INAT_SEG_REG_GS) idx = INAT_SEG_REG_IGNORE; @@ -555,7 +555,8 @@ static int get_reg_offset_16(struct insn *insn, struct pt_regs *regs, } /** - * get_desc() - Obtain pointer to a segment descriptor + * get_desc() - Obtain contents of a segment descriptor + * @out: Segment descriptor contents on success * @sel: Segment selector * * Given a segment selector, obtain a pointer to the segment descriptor. @@ -563,18 +564,18 @@ static int get_reg_offset_16(struct insn *insn, struct pt_regs *regs, * * Returns: * - * Pointer to segment descriptor on success. + * True on success, false on failure. * * NULL on error. */ -static struct desc_struct *get_desc(unsigned short sel) +static bool get_desc(struct desc_struct *out, unsigned short sel) { struct desc_ptr gdt_desc = {0, 0}; unsigned long desc_base; #ifdef CONFIG_MODIFY_LDT_SYSCALL if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) { - struct desc_struct *desc = NULL; + bool success = false; struct ldt_struct *ldt; /* Bits [15:3] contain the index of the desired entry. */ @@ -582,12 +583,14 @@ static struct desc_struct *get_desc(unsigned short sel) mutex_lock(¤t->active_mm->context.lock); ldt = current->active_mm->context.ldt; - if (ldt && sel < ldt->nr_entries) - desc = &ldt->entries[sel]; + if (ldt && sel < ldt->nr_entries) { + *out = ldt->entries[sel]; + success = true; + } mutex_unlock(¤t->active_mm->context.lock); - return desc; + return success; } #endif native_store_gdt(&gdt_desc); @@ -602,9 +605,10 @@ static struct desc_struct *get_desc(unsigned short sel) desc_base = sel & ~(SEGMENT_RPL_MASK | SEGMENT_TI_MASK); if (desc_base > gdt_desc.size) - return NULL; + return false; - return (struct desc_struct *)(gdt_desc.address + desc_base); + *out = *(struct desc_struct *)(gdt_desc.address + desc_base); + return true; } /** @@ -626,7 +630,7 @@ static struct desc_struct *get_desc(unsigned short sel) */ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx) { - struct desc_struct *desc; + struct desc_struct desc; short sel; sel = get_segment_selector(regs, seg_reg_idx); @@ -640,23 +644,27 @@ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx) */ return (unsigned long)(sel << 4); - if (user_64bit_mode(regs)) { + if (any_64bit_mode(regs)) { /* * Only FS or GS will have a base address, the rest of * the segments' bases are forced to 0. */ unsigned long base; - if (seg_reg_idx == INAT_SEG_REG_FS) + if (seg_reg_idx == INAT_SEG_REG_FS) { rdmsrl(MSR_FS_BASE, base); - else if (seg_reg_idx == INAT_SEG_REG_GS) + } else if (seg_reg_idx == INAT_SEG_REG_GS) { /* * swapgs was called at the kernel entry point. Thus, * MSR_KERNEL_GS_BASE will have the user-space GS base. */ - rdmsrl(MSR_KERNEL_GS_BASE, base); - else + if (user_mode(regs)) + rdmsrl(MSR_KERNEL_GS_BASE, base); + else + rdmsrl(MSR_GS_BASE, base); + } else { base = 0; + } return base; } @@ -664,11 +672,10 @@ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx) if (!sel) return -1L; - desc = get_desc(sel); - if (!desc) + if (!get_desc(&desc, sel)) return -1L; - return get_desc_base(desc); + return get_desc_base(&desc); } /** @@ -690,7 +697,7 @@ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx) */ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx) { - struct desc_struct *desc; + struct desc_struct desc; unsigned long limit; short sel; @@ -698,14 +705,13 @@ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx) if (sel < 0) return 0; - if (user_64bit_mode(regs) || v8086_mode(regs)) + if (any_64bit_mode(regs) || v8086_mode(regs)) return -1L; if (!sel) return 0; - desc = get_desc(sel); - if (!desc) + if (!get_desc(&desc, sel)) return 0; /* @@ -714,8 +720,8 @@ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx) * not tested when checking the segment limits. In practice, * this means that the segment ends in (limit << 12) + 0xfff. */ - limit = get_desc_limit(desc); - if (desc->g) + limit = get_desc_limit(&desc); + if (desc.g) limit = (limit << 12) + 0xfff; return limit; @@ -739,7 +745,7 @@ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx) */ int insn_get_code_seg_params(struct pt_regs *regs) { - struct desc_struct *desc; + struct desc_struct desc; short sel; if (v8086_mode(regs)) @@ -750,8 +756,7 @@ int insn_get_code_seg_params(struct pt_regs *regs) if (sel < 0) return sel; - desc = get_desc(sel); - if (!desc) + if (!get_desc(&desc, sel)) return -EINVAL; /* @@ -759,10 +764,10 @@ int insn_get_code_seg_params(struct pt_regs *regs) * determines whether a segment contains data or code. If this is a data * segment, return error. */ - if (!(desc->type & BIT(3))) + if (!(desc.type & BIT(3))) return -EINVAL; - switch ((desc->l << 1) | desc->d) { + switch ((desc.l << 1) | desc.d) { case 0: /* * Legacy mode. CS.L=0, CS.D=0. Address and operand size are * both 16-bit. @@ -945,7 +950,7 @@ static int get_eff_addr_modrm(struct insn *insn, struct pt_regs *regs, * following instruction. */ if (*regoff == -EDOM) { - if (user_64bit_mode(regs)) + if (any_64bit_mode(regs)) tmp = regs->ip + insn->length; else tmp = 0; @@ -1247,7 +1252,7 @@ static void __user *get_addr_ref_32(struct insn *insn, struct pt_regs *regs) * After computed, the effective address is treated as an unsigned * quantity. */ - if (!user_64bit_mode(regs) && ((unsigned int)eff_addr > seg_limit)) + if (!any_64bit_mode(regs) && ((unsigned int)eff_addr > seg_limit)) goto out; /* diff --git a/arch/x86/lib/kaslr.c b/arch/x86/lib/kaslr.c index 79778ab200e494c16cac7441cdb5da9507940715..a536651164584c96a19b1455f743834291a355c6 100644 --- a/arch/x86/lib/kaslr.c +++ b/arch/x86/lib/kaslr.c @@ -36,8 +36,8 @@ static inline u16 i8254(void) u16 status, timer; do { - outb(I8254_PORT_CONTROL, - I8254_CMD_READBACK | I8254_SELECT_COUNTER0); + outb(I8254_CMD_READBACK | I8254_SELECT_COUNTER0, + I8254_PORT_CONTROL); status = inb(I8254_PORT_COUNTER0); timer = inb(I8254_PORT_COUNTER0); timer |= inb(I8254_PORT_COUNTER0) << 8; diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index 3b24dc05251c7ce908cc2be48befb971b5b8f564..9d05572370edc40f234f2813f5fc1c82020ad94f 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S @@ -257,6 +257,7 @@ ENTRY(__memcpy_mcsafe) /* Copy successful. Return zero */ .L_done_memcpy_trap: xorl %eax, %eax +.L_done: ret ENDPROC(__memcpy_mcsafe) EXPORT_SYMBOL_GPL(__memcpy_mcsafe) @@ -273,7 +274,7 @@ EXPORT_SYMBOL_GPL(__memcpy_mcsafe) addl %edx, %ecx .E_trailing_bytes: mov %ecx, %eax - ret + jmp .L_done /* * For write fault handling, given the destination is unaligned, diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S index 96dce5fe2a35f49c92c6e44a3273567931227ceb..d2e5c9c396018e7e255ad0cdbfc5d5a1552d6e8c 100644 --- a/arch/x86/lib/putuser.S +++ b/arch/x86/lib/putuser.S @@ -94,10 +94,10 @@ bad_put_user: EXIT END(bad_put_user) - _ASM_EXTABLE(1b,bad_put_user) - _ASM_EXTABLE(2b,bad_put_user) - _ASM_EXTABLE(3b,bad_put_user) - _ASM_EXTABLE(4b,bad_put_user) + _ASM_EXTABLE_UA(1b, bad_put_user) + _ASM_EXTABLE_UA(2b, bad_put_user) + _ASM_EXTABLE_UA(3b, bad_put_user) + _ASM_EXTABLE_UA(4b, bad_put_user) #ifdef CONFIG_X86_32 - _ASM_EXTABLE(5b,bad_put_user) + _ASM_EXTABLE_UA(5b, bad_put_user) #endif diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index 7add8ba06887ef158bb819618c78ee53581f4835..bfd94e7812fcb8642817784ab7be3032fa95ef37 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c @@ -47,8 +47,8 @@ do { \ "3: lea 0(%2,%0,4),%0\n" \ " jmp 2b\n" \ ".previous\n" \ - _ASM_EXTABLE(0b,3b) \ - _ASM_EXTABLE(1b,2b) \ + _ASM_EXTABLE_UA(0b, 3b) \ + _ASM_EXTABLE_UA(1b, 2b) \ : "=&c"(size), "=&D" (__d0) \ : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \ } while (0) @@ -67,7 +67,7 @@ unsigned long clear_user(void __user *to, unsigned long n) { might_fault(); - if (access_ok(VERIFY_WRITE, to, n)) + if (access_ok(to, n)) __do_clear_user(to, n); return n; } @@ -153,44 +153,44 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size) "101: lea 0(%%eax,%0,4),%0\n" " jmp 100b\n" ".previous\n" - _ASM_EXTABLE(1b,100b) - _ASM_EXTABLE(2b,100b) - _ASM_EXTABLE(3b,100b) - _ASM_EXTABLE(4b,100b) - _ASM_EXTABLE(5b,100b) - _ASM_EXTABLE(6b,100b) - _ASM_EXTABLE(7b,100b) - _ASM_EXTABLE(8b,100b) - _ASM_EXTABLE(9b,100b) - _ASM_EXTABLE(10b,100b) - _ASM_EXTABLE(11b,100b) - _ASM_EXTABLE(12b,100b) - _ASM_EXTABLE(13b,100b) - _ASM_EXTABLE(14b,100b) - _ASM_EXTABLE(15b,100b) - _ASM_EXTABLE(16b,100b) - _ASM_EXTABLE(17b,100b) - _ASM_EXTABLE(18b,100b) - _ASM_EXTABLE(19b,100b) - _ASM_EXTABLE(20b,100b) - _ASM_EXTABLE(21b,100b) - _ASM_EXTABLE(22b,100b) - _ASM_EXTABLE(23b,100b) - _ASM_EXTABLE(24b,100b) - _ASM_EXTABLE(25b,100b) - _ASM_EXTABLE(26b,100b) - _ASM_EXTABLE(27b,100b) - _ASM_EXTABLE(28b,100b) - _ASM_EXTABLE(29b,100b) - _ASM_EXTABLE(30b,100b) - _ASM_EXTABLE(31b,100b) - _ASM_EXTABLE(32b,100b) - _ASM_EXTABLE(33b,100b) - _ASM_EXTABLE(34b,100b) - _ASM_EXTABLE(35b,100b) - _ASM_EXTABLE(36b,100b) - _ASM_EXTABLE(37b,100b) - _ASM_EXTABLE(99b,101b) + _ASM_EXTABLE_UA(1b, 100b) + _ASM_EXTABLE_UA(2b, 100b) + _ASM_EXTABLE_UA(3b, 100b) + _ASM_EXTABLE_UA(4b, 100b) + _ASM_EXTABLE_UA(5b, 100b) + _ASM_EXTABLE_UA(6b, 100b) + _ASM_EXTABLE_UA(7b, 100b) + _ASM_EXTABLE_UA(8b, 100b) + _ASM_EXTABLE_UA(9b, 100b) + _ASM_EXTABLE_UA(10b, 100b) + _ASM_EXTABLE_UA(11b, 100b) + _ASM_EXTABLE_UA(12b, 100b) + _ASM_EXTABLE_UA(13b, 100b) + _ASM_EXTABLE_UA(14b, 100b) + _ASM_EXTABLE_UA(15b, 100b) + _ASM_EXTABLE_UA(16b, 100b) + _ASM_EXTABLE_UA(17b, 100b) + _ASM_EXTABLE_UA(18b, 100b) + _ASM_EXTABLE_UA(19b, 100b) + _ASM_EXTABLE_UA(20b, 100b) + _ASM_EXTABLE_UA(21b, 100b) + _ASM_EXTABLE_UA(22b, 100b) + _ASM_EXTABLE_UA(23b, 100b) + _ASM_EXTABLE_UA(24b, 100b) + _ASM_EXTABLE_UA(25b, 100b) + _ASM_EXTABLE_UA(26b, 100b) + _ASM_EXTABLE_UA(27b, 100b) + _ASM_EXTABLE_UA(28b, 100b) + _ASM_EXTABLE_UA(29b, 100b) + _ASM_EXTABLE_UA(30b, 100b) + _ASM_EXTABLE_UA(31b, 100b) + _ASM_EXTABLE_UA(32b, 100b) + _ASM_EXTABLE_UA(33b, 100b) + _ASM_EXTABLE_UA(34b, 100b) + _ASM_EXTABLE_UA(35b, 100b) + _ASM_EXTABLE_UA(36b, 100b) + _ASM_EXTABLE_UA(37b, 100b) + _ASM_EXTABLE_UA(99b, 101b) : "=&c"(size), "=&D" (d0), "=&S" (d1) : "1"(to), "2"(from), "0"(size) : "eax", "edx", "memory"); @@ -259,26 +259,26 @@ static unsigned long __copy_user_intel_nocache(void *to, "9: lea 0(%%eax,%0,4),%0\n" "16: jmp 8b\n" ".previous\n" - _ASM_EXTABLE(0b,16b) - _ASM_EXTABLE(1b,16b) - _ASM_EXTABLE(2b,16b) - _ASM_EXTABLE(21b,16b) - _ASM_EXTABLE(3b,16b) - _ASM_EXTABLE(31b,16b) - _ASM_EXTABLE(4b,16b) - _ASM_EXTABLE(41b,16b) - _ASM_EXTABLE(10b,16b) - _ASM_EXTABLE(51b,16b) - _ASM_EXTABLE(11b,16b) - _ASM_EXTABLE(61b,16b) - _ASM_EXTABLE(12b,16b) - _ASM_EXTABLE(71b,16b) - _ASM_EXTABLE(13b,16b) - _ASM_EXTABLE(81b,16b) - _ASM_EXTABLE(14b,16b) - _ASM_EXTABLE(91b,16b) - _ASM_EXTABLE(6b,9b) - _ASM_EXTABLE(7b,16b) + _ASM_EXTABLE_UA(0b, 16b) + _ASM_EXTABLE_UA(1b, 16b) + _ASM_EXTABLE_UA(2b, 16b) + _ASM_EXTABLE_UA(21b, 16b) + _ASM_EXTABLE_UA(3b, 16b) + _ASM_EXTABLE_UA(31b, 16b) + _ASM_EXTABLE_UA(4b, 16b) + _ASM_EXTABLE_UA(41b, 16b) + _ASM_EXTABLE_UA(10b, 16b) + _ASM_EXTABLE_UA(51b, 16b) + _ASM_EXTABLE_UA(11b, 16b) + _ASM_EXTABLE_UA(61b, 16b) + _ASM_EXTABLE_UA(12b, 16b) + _ASM_EXTABLE_UA(71b, 16b) + _ASM_EXTABLE_UA(13b, 16b) + _ASM_EXTABLE_UA(81b, 16b) + _ASM_EXTABLE_UA(14b, 16b) + _ASM_EXTABLE_UA(91b, 16b) + _ASM_EXTABLE_UA(6b, 9b) + _ASM_EXTABLE_UA(7b, 16b) : "=&c"(size), "=&D" (d0), "=&S" (d1) : "1"(to), "2"(from), "0"(size) : "eax", "edx", "memory"); @@ -321,9 +321,9 @@ do { \ "3: lea 0(%3,%0,4),%0\n" \ " jmp 2b\n" \ ".previous\n" \ - _ASM_EXTABLE(4b,5b) \ - _ASM_EXTABLE(0b,3b) \ - _ASM_EXTABLE(1b,2b) \ + _ASM_EXTABLE_UA(4b, 5b) \ + _ASM_EXTABLE_UA(0b, 3b) \ + _ASM_EXTABLE_UA(1b, 2b) \ : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \ : "3"(size), "0"(size), "1"(to), "2"(from) \ : "memory"); \ diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index 9c5606d88f618f9ef5b8594027d2513f257a1d9f..f80386bfa3a98935120866f136e7e09dbd0bd89d 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c @@ -37,8 +37,8 @@ unsigned long __clear_user(void __user *addr, unsigned long size) "3: lea 0(%[size1],%[size8],8),%[size8]\n" " jmp 2b\n" ".previous\n" - _ASM_EXTABLE(0b,3b) - _ASM_EXTABLE(1b,2b) + _ASM_EXTABLE_UA(0b, 3b) + _ASM_EXTABLE_UA(1b, 2b) : [size8] "=&c"(size), [dst] "=&D" (__d0) : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr)); clac(); @@ -48,32 +48,12 @@ EXPORT_SYMBOL(__clear_user); unsigned long clear_user(void __user *to, unsigned long n) { - if (access_ok(VERIFY_WRITE, to, n)) + if (access_ok(to, n)) return __clear_user(to, n); return n; } EXPORT_SYMBOL(clear_user); -/* - * Try to copy last bytes and clear the rest if needed. - * Since protection fault in copy_from/to_user is not a normal situation, - * it is not necessary to optimize tail handling. - */ -__visible unsigned long -copy_user_handle_tail(char *to, char *from, unsigned len) -{ - for (; len; --len, to++) { - char c; - - if (__get_user_nocheck(c, from++, sizeof(char))) - break; - if (__put_user_nocheck(c, to, sizeof(char))) - break; - } - clac(); - return len; -} - /* * Similar to copy_user_handle_tail, probe for the write fault point, * but reuse __memcpy_mcsafe in case a new read error is encountered. @@ -160,7 +140,7 @@ void memcpy_flushcache(void *_dst, const void *_src, size_t size) /* cache copy and flush to align dest */ if (!IS_ALIGNED(dest, 8)) { - unsigned len = min_t(unsigned, size, ALIGN(dest, 8) - dest); + size_t len = min_t(size_t, size, ALIGN(dest, 8) - dest); memcpy((void *) dest, (void *) source, len); clean_cache_range((void *) dest, len); diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt index e0b85930dd773e87417e2b4957b8af61221b04c0..0a0e9112f2842e5b40e4e2284b2c061c4d98370a 100644 --- a/arch/x86/lib/x86-opcode-map.txt +++ b/arch/x86/lib/x86-opcode-map.txt @@ -333,7 +333,7 @@ AVXcode: 1 06: CLTS 07: SYSRET (o64) 08: INVD -09: WBINVD +09: WBINVD | WBNOINVD (F3) 0a: 0b: UD2 (1B) 0c: @@ -364,7 +364,7 @@ AVXcode: 1 # a ModR/M byte. 1a: BNDCL Gv,Ev (F3) | BNDCU Gv,Ev (F2) | BNDMOV Gv,Ev (66) | BNDLDX Gv,Ev 1b: BNDCN Gv,Ev (F2) | BNDMOV Ev,Gv (66) | BNDMK Gv,Ev (F3) | BNDSTX Ev,Gv -1c: +1c: Grp20 (1A),(1C) 1d: 1e: 1f: NOP Ev @@ -792,6 +792,8 @@ f3: Grp17 (1A) f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v) f6: ADCX Gy,Ey (66) | ADOX Gy,Ey (F3) | MULX By,Gy,rDX,Ey (F2),(v) f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v) +f8: MOVDIR64B Gv,Mdqq (66) | ENQCMD Gv,Mdqq (F2) | ENQCMDS Gv,Mdqq (F3) +f9: MOVDIRI My,Gy EndTable Table: 3-byte opcode 2 (0x0f 0x3a) @@ -943,9 +945,9 @@ GrpTable: Grp6 EndTable GrpTable: Grp7 -0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B) -1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B) -2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B) +0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B) | PCONFIG (101),(11B) | ENCLV (000),(11B) +1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B) | ENCLS (111),(11B) +2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B) | ENCLU (111),(11B) 3: LIDT Ms 4: SMSW Mw/Rv 5: rdpkru (110),(11B) | wrpkru (111),(11B) @@ -1020,7 +1022,7 @@ GrpTable: Grp15 3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B) 4: XSAVE | ptwrite Ey (F3),(11B) 5: XRSTOR | lfence (11B) -6: XSAVEOPT | clwb (66) | mfence (11B) +6: XSAVEOPT | clwb (66) | mfence (11B) | TPAUSE Rd (66),(11B) | UMONITOR Rv (F3),(11B) | UMWAIT Rd (F2),(11B) 7: clflush | clflushopt (66) | sfence (11B) EndTable @@ -1051,6 +1053,10 @@ GrpTable: Grp19 6: vscatterpf1qps/d Wx (66),(ev) EndTable +GrpTable: Grp20 +0: cldemote Mb +EndTable + # AMD's Prefetch Group GrpTable: GrpP 0: PREFETCH diff --git a/arch/x86/math-emu/fpu_emu.h b/arch/x86/math-emu/fpu_emu.h index a5a41ec5807211d8cb634f2ef009a9ad867da4a4..0c122226ca56f5b2e98549868939ea2457ef5adf 100644 --- a/arch/x86/math-emu/fpu_emu.h +++ b/arch/x86/math-emu/fpu_emu.h @@ -177,7 +177,7 @@ static inline void reg_copy(FPU_REG const *x, FPU_REG *y) #define setexponentpos(x,y) { (*(short *)&((x)->exp)) = \ ((y) + EXTENDED_Ebias) & 0x7fff; } #define exponent16(x) (*(short *)&((x)->exp)) -#define setexponent16(x,y) { (*(short *)&((x)->exp)) = (y); } +#define setexponent16(x,y) { (*(short *)&((x)->exp)) = (u16)(y); } #define addexponent(x,y) { (*(short *)&((x)->exp)) += (y); } #define stdexp(x) { (*(short *)&((x)->exp)) += EXTENDED_Ebias; } diff --git a/arch/x86/math-emu/fpu_system.h b/arch/x86/math-emu/fpu_system.h index c8b1b31ed7c44b9bcfc0b570d5643e80655147ab..f98a0c956764687e9afc8a982edc5e4f40d61aee 100644 --- a/arch/x86/math-emu/fpu_system.h +++ b/arch/x86/math-emu/fpu_system.h @@ -104,7 +104,7 @@ static inline bool seg_writable(struct desc_struct *d) #define instruction_address (*(struct address *)&I387->soft.fip) #define operand_address (*(struct address *)&I387->soft.foo) -#define FPU_access_ok(x,y,z) if ( !access_ok(x,y,z) ) \ +#define FPU_access_ok(y,z) if ( !access_ok(y,z) ) \ math_abort(FPU_info,SIGSEGV) #define FPU_abort math_abort(FPU_info, SIGSEGV) @@ -119,7 +119,7 @@ static inline bool seg_writable(struct desc_struct *d) /* A simpler test than access_ok() can probably be done for FPU_code_access_ok() because the only possible error is to step past the upper boundary of a legal code area. */ -#define FPU_code_access_ok(z) FPU_access_ok(VERIFY_READ,(void __user *)FPU_EIP,z) +#define FPU_code_access_ok(z) FPU_access_ok((void __user *)FPU_EIP,z) #endif #define FPU_get_user(x,y) get_user((x),(y)) diff --git a/arch/x86/math-emu/load_store.c b/arch/x86/math-emu/load_store.c index f821a9cd7753c8940901911472423e6f280b2995..f15263e158e8e669d82398bd40b3f2f8415cd88d 100644 --- a/arch/x86/math-emu/load_store.c +++ b/arch/x86/math-emu/load_store.c @@ -251,7 +251,7 @@ int FPU_load_store(u_char type, fpu_addr_modes addr_modes, break; case 024: /* fldcw */ RE_ENTRANT_CHECK_OFF; - FPU_access_ok(VERIFY_READ, data_address, 2); + FPU_access_ok(data_address, 2); FPU_get_user(control_word, (unsigned short __user *)data_address); RE_ENTRANT_CHECK_ON; @@ -291,7 +291,7 @@ int FPU_load_store(u_char type, fpu_addr_modes addr_modes, break; case 034: /* fstcw m16int */ RE_ENTRANT_CHECK_OFF; - FPU_access_ok(VERIFY_WRITE, data_address, 2); + FPU_access_ok(data_address, 2); FPU_put_user(control_word, (unsigned short __user *)data_address); RE_ENTRANT_CHECK_ON; @@ -305,7 +305,7 @@ int FPU_load_store(u_char type, fpu_addr_modes addr_modes, break; case 036: /* fstsw m2byte */ RE_ENTRANT_CHECK_OFF; - FPU_access_ok(VERIFY_WRITE, data_address, 2); + FPU_access_ok(data_address, 2); FPU_put_user(status_word(), (unsigned short __user *)data_address); RE_ENTRANT_CHECK_ON; diff --git a/arch/x86/math-emu/reg_constant.c b/arch/x86/math-emu/reg_constant.c index 8dc9095bab224daab73338b7551d26b71319b9a2..742619e94bdf281a0dc19a36b1fe2b5a63a19a7b 100644 --- a/arch/x86/math-emu/reg_constant.c +++ b/arch/x86/math-emu/reg_constant.c @@ -18,7 +18,7 @@ #include "control_w.h" #define MAKE_REG(s, e, l, h) { l, h, \ - ((EXTENDED_Ebias+(e)) | ((SIGN_##s != 0)*0x8000)) } + (u16)((EXTENDED_Ebias+(e)) | ((SIGN_##s != 0)*0x8000)) } FPU_REG const CONST_1 = MAKE_REG(POS, 0, 0x00000000, 0x80000000); #if 0 diff --git a/arch/x86/math-emu/reg_ld_str.c b/arch/x86/math-emu/reg_ld_str.c index d40ff45497b9bb554aa14d49fe5d7f15da660b97..f3779743d15e695fae569b969825857c38610a4a 100644 --- a/arch/x86/math-emu/reg_ld_str.c +++ b/arch/x86/math-emu/reg_ld_str.c @@ -84,7 +84,7 @@ int FPU_load_extended(long double __user *s, int stnr) FPU_REG *sti_ptr = &st(stnr); RE_ENTRANT_CHECK_OFF; - FPU_access_ok(VERIFY_READ, s, 10); + FPU_access_ok(s, 10); __copy_from_user(sti_ptr, s, 10); RE_ENTRANT_CHECK_ON; @@ -98,7 +98,7 @@ int FPU_load_double(double __user *dfloat, FPU_REG *loaded_data) unsigned m64, l64; RE_ENTRANT_CHECK_OFF; - FPU_access_ok(VERIFY_READ, dfloat, 8); + FPU_access_ok(dfloat, 8); FPU_get_user(m64, 1 + (unsigned long __user *)dfloat); FPU_get_user(l64, (unsigned long __user *)dfloat); RE_ENTRANT_CHECK_ON; @@ -159,7 +159,7 @@ int FPU_load_single(float __user *single, FPU_REG *loaded_data) int exp, tag, negative; RE_ENTRANT_CHECK_OFF; - FPU_access_ok(VERIFY_READ, single, 4); + FPU_access_ok(single, 4); FPU_get_user(m32, (unsigned long __user *)single); RE_ENTRANT_CHECK_ON; @@ -214,7 +214,7 @@ int FPU_load_int64(long long __user *_s) FPU_REG *st0_ptr = &st(0); RE_ENTRANT_CHECK_OFF; - FPU_access_ok(VERIFY_READ, _s, 8); + FPU_access_ok(_s, 8); if (copy_from_user(&s, _s, 8)) FPU_abort; RE_ENTRANT_CHECK_ON; @@ -243,7 +243,7 @@ int FPU_load_int32(long __user *_s, FPU_REG *loaded_data) int negative; RE_ENTRANT_CHECK_OFF; - FPU_access_ok(VERIFY_READ, _s, 4); + FPU_access_ok(_s, 4); FPU_get_user(s, _s); RE_ENTRANT_CHECK_ON; @@ -271,7 +271,7 @@ int FPU_load_int16(short __user *_s, FPU_REG *loaded_data) int s, negative; RE_ENTRANT_CHECK_OFF; - FPU_access_ok(VERIFY_READ, _s, 2); + FPU_access_ok(_s, 2); /* Cast as short to get the sign extended. */ FPU_get_user(s, _s); RE_ENTRANT_CHECK_ON; @@ -304,7 +304,7 @@ int FPU_load_bcd(u_char __user *s) int sign; RE_ENTRANT_CHECK_OFF; - FPU_access_ok(VERIFY_READ, s, 10); + FPU_access_ok(s, 10); RE_ENTRANT_CHECK_ON; for (pos = 8; pos >= 0; pos--) { l *= 10; @@ -345,7 +345,7 @@ int FPU_store_extended(FPU_REG *st0_ptr, u_char st0_tag, if (st0_tag != TAG_Empty) { RE_ENTRANT_CHECK_OFF; - FPU_access_ok(VERIFY_WRITE, d, 10); + FPU_access_ok(d, 10); FPU_put_user(st0_ptr->sigl, (unsigned long __user *)d); FPU_put_user(st0_ptr->sigh, @@ -364,7 +364,7 @@ int FPU_store_extended(FPU_REG *st0_ptr, u_char st0_tag, /* The masked response */ /* Put out the QNaN indefinite */ RE_ENTRANT_CHECK_OFF; - FPU_access_ok(VERIFY_WRITE, d, 10); + FPU_access_ok(d, 10); FPU_put_user(0, (unsigned long __user *)d); FPU_put_user(0xc0000000, 1 + (unsigned long __user *)d); FPU_put_user(0xffff, 4 + (short __user *)d); @@ -539,7 +539,7 @@ int FPU_store_double(FPU_REG *st0_ptr, u_char st0_tag, double __user *dfloat) /* The masked response */ /* Put out the QNaN indefinite */ RE_ENTRANT_CHECK_OFF; - FPU_access_ok(VERIFY_WRITE, dfloat, 8); + FPU_access_ok(dfloat, 8); FPU_put_user(0, (unsigned long __user *)dfloat); FPU_put_user(0xfff80000, 1 + (unsigned long __user *)dfloat); @@ -552,7 +552,7 @@ int FPU_store_double(FPU_REG *st0_ptr, u_char st0_tag, double __user *dfloat) l[1] |= 0x80000000; RE_ENTRANT_CHECK_OFF; - FPU_access_ok(VERIFY_WRITE, dfloat, 8); + FPU_access_ok(dfloat, 8); FPU_put_user(l[0], (unsigned long __user *)dfloat); FPU_put_user(l[1], 1 + (unsigned long __user *)dfloat); RE_ENTRANT_CHECK_ON; @@ -724,7 +724,7 @@ int FPU_store_single(FPU_REG *st0_ptr, u_char st0_tag, float __user *single) /* The masked response */ /* Put out the QNaN indefinite */ RE_ENTRANT_CHECK_OFF; - FPU_access_ok(VERIFY_WRITE, single, 4); + FPU_access_ok(single, 4); FPU_put_user(0xffc00000, (unsigned long __user *)single); RE_ENTRANT_CHECK_ON; @@ -742,7 +742,7 @@ int FPU_store_single(FPU_REG *st0_ptr, u_char st0_tag, float __user *single) templ |= 0x80000000; RE_ENTRANT_CHECK_OFF; - FPU_access_ok(VERIFY_WRITE, single, 4); + FPU_access_ok(single, 4); FPU_put_user(templ, (unsigned long __user *)single); RE_ENTRANT_CHECK_ON; @@ -791,7 +791,7 @@ int FPU_store_int64(FPU_REG *st0_ptr, u_char st0_tag, long long __user *d) } RE_ENTRANT_CHECK_OFF; - FPU_access_ok(VERIFY_WRITE, d, 8); + FPU_access_ok(d, 8); if (copy_to_user(d, &tll, 8)) FPU_abort; RE_ENTRANT_CHECK_ON; @@ -838,7 +838,7 @@ int FPU_store_int32(FPU_REG *st0_ptr, u_char st0_tag, long __user *d) } RE_ENTRANT_CHECK_OFF; - FPU_access_ok(VERIFY_WRITE, d, 4); + FPU_access_ok(d, 4); FPU_put_user(t.sigl, (unsigned long __user *)d); RE_ENTRANT_CHECK_ON; @@ -884,7 +884,7 @@ int FPU_store_int16(FPU_REG *st0_ptr, u_char st0_tag, short __user *d) } RE_ENTRANT_CHECK_OFF; - FPU_access_ok(VERIFY_WRITE, d, 2); + FPU_access_ok(d, 2); FPU_put_user((short)t.sigl, d); RE_ENTRANT_CHECK_ON; @@ -925,7 +925,7 @@ int FPU_store_bcd(FPU_REG *st0_ptr, u_char st0_tag, u_char __user *d) if (control_word & CW_Invalid) { /* Produce the QNaN "indefinite" */ RE_ENTRANT_CHECK_OFF; - FPU_access_ok(VERIFY_WRITE, d, 10); + FPU_access_ok(d, 10); for (i = 0; i < 7; i++) FPU_put_user(0, d + i); /* These bytes "undefined" */ FPU_put_user(0xc0, d + 7); /* This byte "undefined" */ @@ -941,7 +941,7 @@ int FPU_store_bcd(FPU_REG *st0_ptr, u_char st0_tag, u_char __user *d) } RE_ENTRANT_CHECK_OFF; - FPU_access_ok(VERIFY_WRITE, d, 10); + FPU_access_ok(d, 10); RE_ENTRANT_CHECK_ON; for (i = 0; i < 9; i++) { b = FPU_div_small(&ll, 10); @@ -1034,7 +1034,7 @@ u_char __user *fldenv(fpu_addr_modes addr_modes, u_char __user *s) ((addr_modes.default_mode == PM16) ^ (addr_modes.override.operand_size == OP_SIZE_PREFIX))) { RE_ENTRANT_CHECK_OFF; - FPU_access_ok(VERIFY_READ, s, 0x0e); + FPU_access_ok(s, 0x0e); FPU_get_user(control_word, (unsigned short __user *)s); FPU_get_user(partial_status, (unsigned short __user *)(s + 2)); FPU_get_user(tag_word, (unsigned short __user *)(s + 4)); @@ -1056,7 +1056,7 @@ u_char __user *fldenv(fpu_addr_modes addr_modes, u_char __user *s) } } else { RE_ENTRANT_CHECK_OFF; - FPU_access_ok(VERIFY_READ, s, 0x1c); + FPU_access_ok(s, 0x1c); FPU_get_user(control_word, (unsigned short __user *)s); FPU_get_user(partial_status, (unsigned short __user *)(s + 4)); FPU_get_user(tag_word, (unsigned short __user *)(s + 8)); @@ -1125,7 +1125,7 @@ void frstor(fpu_addr_modes addr_modes, u_char __user *data_address) /* Copy all registers in stack order. */ RE_ENTRANT_CHECK_OFF; - FPU_access_ok(VERIFY_READ, s, 80); + FPU_access_ok(s, 80); __copy_from_user(register_base + offset, s, other); if (offset) __copy_from_user(register_base, s + other, offset); @@ -1146,7 +1146,7 @@ u_char __user *fstenv(fpu_addr_modes addr_modes, u_char __user *d) ((addr_modes.default_mode == PM16) ^ (addr_modes.override.operand_size == OP_SIZE_PREFIX))) { RE_ENTRANT_CHECK_OFF; - FPU_access_ok(VERIFY_WRITE, d, 14); + FPU_access_ok(d, 14); #ifdef PECULIAR_486 FPU_put_user(control_word & ~0xe080, (unsigned long __user *)d); #else @@ -1174,7 +1174,7 @@ u_char __user *fstenv(fpu_addr_modes addr_modes, u_char __user *d) d += 0x0e; } else { RE_ENTRANT_CHECK_OFF; - FPU_access_ok(VERIFY_WRITE, d, 7 * 4); + FPU_access_ok(d, 7 * 4); #ifdef PECULIAR_486 control_word &= ~0xe080; /* An 80486 sets nearly all of the reserved bits to 1. */ @@ -1204,7 +1204,7 @@ void fsave(fpu_addr_modes addr_modes, u_char __user *data_address) d = fstenv(addr_modes, data_address); RE_ENTRANT_CHECK_OFF; - FPU_access_ok(VERIFY_WRITE, d, 80); + FPU_access_ok(d, 80); /* Copy all registers in stack order. */ if (__copy_to_user(d, register_base + offset, other)) diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 4b101dd6e52f3f714bc16b15d25191f3486f1548..14623619df2e8852a1d29ff42235ff65f52f5c9a 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -13,7 +13,7 @@ CFLAGS_REMOVE_mem_encrypt_identity.o = -pg endif obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ - pat.o pgtable.o physaddr.o setup_nx.o tlb.o cpu_entry_area.o + pat.o pgtable.o physaddr.o setup_nx.o tlb.o cpu_entry_area.o maccess.o # Make sure __phys_addr has no stackprotector nostackp := $(call cc-option, -fno-stack-protector) diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index a12afff146d10d274792f396f7e0eeab3c9962f2..abcb8d00b01486f431fc9fa591f6e6260c8e1153 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -19,7 +19,9 @@ #include #include #include +#include +#include #include /* @@ -53,10 +55,10 @@ struct addr_marker { enum address_markers_idx { USER_SPACE_NR = 0, KERNEL_SPACE_NR, - LOW_KERNEL_NR, -#if defined(CONFIG_MODIFY_LDT_SYSCALL) && defined(CONFIG_X86_5LEVEL) +#ifdef CONFIG_MODIFY_LDT_SYSCALL LDT_NR, #endif + LOW_KERNEL_NR, VMALLOC_START_NR, VMEMMAP_START_NR, #ifdef CONFIG_KASAN @@ -64,9 +66,6 @@ enum address_markers_idx { KASAN_SHADOW_END_NR, #endif CPU_ENTRY_AREA_NR, -#if defined(CONFIG_MODIFY_LDT_SYSCALL) && !defined(CONFIG_X86_5LEVEL) - LDT_NR, -#endif #ifdef CONFIG_X86_ESPFIX64 ESPFIX_START_NR, #endif @@ -241,6 +240,29 @@ static unsigned long normalize_addr(unsigned long u) return (signed long)(u << shift) >> shift; } +static void note_wx(struct pg_state *st) +{ + unsigned long npages; + + npages = (st->current_address - st->start_address) / PAGE_SIZE; + +#ifdef CONFIG_PCI_BIOS + /* + * If PCI BIOS is enabled, the PCI BIOS area is forced to WX. + * Inform about it, but avoid the warning. + */ + if (pcibios_enabled && st->start_address >= PAGE_OFFSET + BIOS_BEGIN && + st->current_address <= PAGE_OFFSET + BIOS_END) { + pr_warn_once("x86/mm: PCI BIOS W+X mapping %lu pages\n", npages); + return; + } +#endif + /* Account the WX pages */ + st->wx_pages += npages; + WARN_ONCE(1, "x86/mm: Found insecure W+X mapping at address %pS\n", + (void *)st->start_address); +} + /* * This function gets called on a break in a continuous series * of PTE entries; the next one is different so we need to @@ -276,14 +298,8 @@ static void note_page(struct seq_file *m, struct pg_state *st, unsigned long delta; int width = sizeof(unsigned long) * 2; - if (st->check_wx && (eff & _PAGE_RW) && !(eff & _PAGE_NX)) { - WARN_ONCE(1, - "x86/mm: Found insecure W+X mapping at address %p/%pS\n", - (void *)st->start_address, - (void *)st->start_address); - st->wx_pages += (st->current_address - - st->start_address) / PAGE_SIZE; - } + if (st->check_wx && (eff & _PAGE_RW) && !(eff & _PAGE_NX)) + note_wx(st); /* * Now print the actual finished series @@ -493,11 +509,11 @@ static inline bool is_hypervisor_range(int idx) { #ifdef CONFIG_X86_64 /* - * ffff800000000000 - ffff87ffffffffff is reserved for - * the hypervisor. + * A hole in the beginning of kernel address space reserved + * for a hypervisor. */ - return (idx >= pgd_index(__PAGE_OFFSET) - 16) && - (idx < pgd_index(__PAGE_OFFSET)); + return (idx >= pgd_index(GUARD_HOLE_BASE_ADDR)) && + (idx < pgd_index(GUARD_HOLE_END_ADDR)); #else return false; #endif diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c index 45f5d6cf65aed0bf30f0885648df743c449c61a7..dc72b2d17ac6476a7211ade248201fdad98426cf 100644 --- a/arch/x86/mm/extable.c +++ b/arch/x86/mm/extable.c @@ -108,6 +108,14 @@ __visible bool ex_handler_fprestore(const struct exception_table_entry *fixup, } EXPORT_SYMBOL_GPL(ex_handler_fprestore); +bool ex_handler_uaccess(const struct exception_table_entry *fixup, + struct pt_regs *regs, int trapnr) +{ + regs->ip = ex_fixup_addr(fixup); + return true; +} +EXPORT_SYMBOL(ex_handler_uaccess); + __visible bool ex_handler_ext(const struct exception_table_entry *fixup, struct pt_regs *regs, int trapnr) { diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 47bebfe6efa70a316424934683f4302de33876a2..52c8cbbd5a65b953652c0773fced29b4ca1602df 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -261,18 +261,19 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) pmd = pmd_offset(pud, address); pmd_k = pmd_offset(pud_k, address); - if (!pmd_present(*pmd_k)) - return NULL; - if (!pmd_present(*pmd)) + if (pmd_present(*pmd) != pmd_present(*pmd_k)) set_pmd(pmd, *pmd_k); + + if (!pmd_present(*pmd_k)) + return NULL; else - BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); + BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k)); return pmd_k; } -void vmalloc_sync_all(void) +static void vmalloc_sync(void) { unsigned long address; @@ -280,29 +281,35 @@ void vmalloc_sync_all(void) return; for (address = VMALLOC_START & PMD_MASK; - address >= TASK_SIZE_MAX && address < FIXADDR_TOP; + address >= TASK_SIZE_MAX && address < VMALLOC_END; address += PMD_SIZE) { struct page *page; spin_lock(&pgd_lock); list_for_each_entry(page, &pgd_list, lru) { spinlock_t *pgt_lock; - pmd_t *ret; /* the pgt_lock only for Xen */ pgt_lock = &pgd_page_get_mm(page)->page_table_lock; spin_lock(pgt_lock); - ret = vmalloc_sync_one(page_address(page), address); + vmalloc_sync_one(page_address(page), address); spin_unlock(pgt_lock); - - if (!ret) - break; } spin_unlock(&pgd_lock); } } +void vmalloc_sync_mappings(void) +{ + vmalloc_sync(); +} + +void vmalloc_sync_unmappings(void) +{ + vmalloc_sync(); +} + /* * 32-bit: * @@ -405,11 +412,23 @@ static void dump_pagetable(unsigned long address) #else /* CONFIG_X86_64: */ -void vmalloc_sync_all(void) +void vmalloc_sync_mappings(void) { + /* + * 64-bit mappings might allocate new p4d/pud pages + * that need to be propagated to all tasks' PGDs. + */ sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END); } +void vmalloc_sync_unmappings(void) +{ + /* + * Unmappings never allocate or free p4d/pud pages. + * No work is required here. + */ +} + /* * 64-bit: * @@ -427,8 +446,6 @@ static noinline int vmalloc_fault(unsigned long address) if (!(address >= VMALLOC_START && address < VMALLOC_END)) return -1; - WARN_ON_ONCE(in_nmi()); - /* * Copy kernel mappings over when needed. This can also * happen within a race in page table update. In the later @@ -1401,9 +1418,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, * that we made any progress. Handle this case first. */ if (unlikely(fault & VM_FAULT_RETRY)) { - /* Retry at most once */ if (flags & FAULT_FLAG_ALLOW_RETRY) { - flags &= ~FAULT_FLAG_ALLOW_RETRY; flags |= FAULT_FLAG_TRIED; if (!fatal_signal_pending(tsk)) goto retry; diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index faca978ebf9d8b46b6437f908b9ae506d3489da2..d6f4ad93b1235adddcfeea2a4ad149411df2c2fa 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -6,8 +6,10 @@ #include /* for max_low_pfn */ #include #include +#include #include +#include #include #include #include @@ -208,6 +210,24 @@ static void __init probe_page_size_mask(void) } } +#define INTEL_MATCH(_model) { .vendor = X86_VENDOR_INTEL, \ + .family = 6, \ + .model = _model, \ + } +/* + * INVLPG may not properly flush Global entries + * on these CPUs when PCIDs are enabled. + */ +static const struct x86_cpu_id invlpg_miss_ids[] = { + INTEL_MATCH(INTEL_FAM6_ALDERLAKE ), + INTEL_MATCH(INTEL_FAM6_ALDERLAKE_L ), + INTEL_MATCH(INTEL_FAM6_ALDERLAKE_N ), + INTEL_MATCH(INTEL_FAM6_RAPTORLAKE ), + INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_P), + INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_S), + {} +}; + static void setup_pcid(void) { if (!IS_ENABLED(CONFIG_X86_64)) @@ -216,6 +236,12 @@ static void setup_pcid(void) if (!boot_cpu_has(X86_FEATURE_PCID)) return; + if (x86_match_cpu(invlpg_miss_ids)) { + pr_info("Incomplete global flushes, disabling PCID"); + setup_clear_cpu_cap(X86_FEATURE_PCID); + return; + } + if (boot_cpu_has(X86_FEATURE_PGE)) { /* * This can't be cr4_set_bits_and_update_boot() -- the @@ -767,6 +793,11 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) if (debug_pagealloc_enabled()) { pr_info("debug: unmapping init [mem %#010lx-%#010lx]\n", begin, end - 1); + /* + * Inform kmemleak about the hole in the memory since the + * corresponding pages will be unmapped. + */ + kmemleak_free_part((void *)begin, end - begin); set_memory_np(begin, (end - begin) >> PAGE_SHIFT); } else { /* @@ -932,7 +963,7 @@ unsigned long max_swapfile_size(void) pages = generic_max_swapfile_size(); - if (boot_cpu_has_bug(X86_BUG_L1TF)) { + if (boot_cpu_has_bug(X86_BUG_L1TF) && l1tf_mitigation != L1TF_MITIGATION_OFF) { /* Limit the swap file size to MAX_PA/2 for L1TF workaround */ unsigned long long l1tf_limit = l1tf_pfn_limit(); /* diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 979e0a02cbe1a12d67fad130592a6e68c98925c5..79b95910fd9ffcf73a2bb33ccd3090dcb3112271 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -860,18 +860,15 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock); } -#ifdef CONFIG_MEMORY_HOTREMOVE -int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) +void arch_remove_memory(int nid, u64 start, u64 size, + struct vmem_altmap *altmap) { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - struct zone *zone; - zone = page_zone(pfn_to_page(start_pfn)); - return __remove_pages(zone, start_pfn, nr_pages, altmap); + __remove_pages(start_pfn, nr_pages, altmap); } #endif -#endif int kernel_set_to_readonly __read_mostly; diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index dd519f3721692180b3aac7f5e8eeda52a1d68226..4b25a1ad18ffd4a63b5ee13660f963c2a8bcb1be 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -585,7 +585,6 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end, paddr_end, page_size_mask, prot); - __flush_tlb_all(); continue; } /* @@ -628,7 +627,6 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end, pud_populate(&init_mm, pud, pmd); spin_unlock(&init_mm.page_table_lock); } - __flush_tlb_all(); update_page_count(PG_LEVEL_1G, pages); @@ -669,7 +667,6 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end, paddr_last = phys_pud_init(pud, paddr, paddr_end, page_size_mask); - __flush_tlb_all(); continue; } @@ -681,7 +678,6 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end, p4d_populate(&init_mm, p4d, pud); spin_unlock(&init_mm.page_table_lock); } - __flush_tlb_all(); return paddr_last; } @@ -734,8 +730,6 @@ kernel_physical_mapping_init(unsigned long paddr_start, if (pgd_changed) sync_global_pgds(vaddr_start, vaddr_end - 1); - __flush_tlb_all(); - return paddr_last; } @@ -1138,7 +1132,6 @@ void __ref vmemmap_free(unsigned long start, unsigned long end, remove_pagetable(start, end, false, altmap); } -#ifdef CONFIG_MEMORY_HOTREMOVE static void __meminit kernel_physical_mapping_remove(unsigned long start, unsigned long end) { @@ -1148,25 +1141,15 @@ kernel_physical_mapping_remove(unsigned long start, unsigned long end) remove_pagetable(start, end, true, NULL); } -int __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) +void __ref arch_remove_memory(int nid, u64 start, u64 size, + struct vmem_altmap *altmap) { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - struct page *page = pfn_to_page(start_pfn); - struct zone *zone; - int ret; - /* With altmap the first mapped page is offset from @start */ - if (altmap) - page += vmem_altmap_offset(altmap); - zone = page_zone(page); - ret = __remove_pages(zone, start_pfn, nr_pages, altmap); - WARN_ON_ONCE(ret); + __remove_pages(start_pfn, nr_pages, altmap); kernel_physical_mapping_remove(start, start + size); - - return ret; } -#endif #endif /* CONFIG_MEMORY_HOTPLUG */ static struct kcore_list kcore_vsyscall; @@ -1306,18 +1289,18 @@ int kern_addr_valid(unsigned long addr) return 0; p4d = p4d_offset(pgd, addr); - if (p4d_none(*p4d)) + if (!p4d_present(*p4d)) return 0; pud = pud_offset(p4d, addr); - if (pud_none(*pud)) + if (!pud_present(*pud)) return 0; if (pud_large(*pud)) return pfn_valid(pud_pfn(*pud)); pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd)) + if (!pmd_present(*pmd)) return 0; if (pmd_large(*pmd)) diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index c63a545ec199547776ae8dce2d4258955c68d5f4..6d86b14363ce9422420a2c1f9d2f0696f748e692 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -171,9 +171,15 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, * Mappings have to be page-aligned */ offset = phys_addr & ~PAGE_MASK; - phys_addr &= PHYSICAL_PAGE_MASK; + phys_addr &= PAGE_MASK; size = PAGE_ALIGN(last_addr+1) - phys_addr; + /* + * Mask out any bits not part of the actual physical + * address, like memory encryption bits. + */ + phys_addr &= PHYSICAL_PAGE_MASK; + retval = reserve_memtype(phys_addr, (u64)phys_addr + size, pcm, &new_pcm); if (retval) { @@ -431,19 +437,26 @@ void iounmap(volatile void __iomem *addr) } EXPORT_SYMBOL(iounmap); -int __init arch_ioremap_pud_supported(void) +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP +bool arch_vmap_p4d_supported(pgprot_t prot) +{ + return false; +} + +bool arch_vmap_pud_supported(pgprot_t prot) { #ifdef CONFIG_X86_64 return boot_cpu_has(X86_FEATURE_GBPAGES); #else - return 0; + return false; #endif } -int __init arch_ioremap_pmd_supported(void) +bool arch_vmap_pmd_supported(pgprot_t prot) { return boot_cpu_has(X86_FEATURE_PSE); } +#endif /* * Convert a physical pointer to a virtual kernel pointer for /dev/mem @@ -697,7 +710,7 @@ bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size) return arch_memremap_can_ram_remap(phys_addr, size, 0); } -#ifdef CONFIG_ARCH_USE_MEMREMAP_PROT +#ifdef CONFIG_AMD_MEM_ENCRYPT /* Remap memory with encryption */ void __init *early_memremap_encrypted(resource_size_t phys_addr, unsigned long size) @@ -739,7 +752,7 @@ void __init *early_memremap_decrypted_wp(resource_size_t phys_addr, return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC_WP); } -#endif /* CONFIG_ARCH_USE_MEMREMAP_PROT */ +#endif /* CONFIG_AMD_MEM_ENCRYPT */ static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss; diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index e3e77527f8dff8e44bd560d4f62a11ee8cf90c26..4bfd14d5da8e54a94eeb6a04f51de9837ae5c5b1 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -198,7 +198,7 @@ static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr) if (!pgtable_l5_enabled()) return (p4d_t *)pgd; - p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK; + p4d = pgd_val(*pgd) & PTE_PFN_MASK; p4d += __START_KERNEL_map - phys_base; return (p4d_t *)p4d + p4d_index(addr); } diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c index 61db77b0eda9cecf9b71d38fde99d0febb5eb4ea..bfe769209eaef7e97370bb72414aa4aab9e9bcd1 100644 --- a/arch/x86/mm/kaslr.c +++ b/arch/x86/mm/kaslr.c @@ -51,7 +51,7 @@ static __initdata struct kaslr_memory_region { } kaslr_regions[] = { { &page_offset_base, 0 }, { &vmalloc_base, 0 }, - { &vmemmap_base, 1 }, + { &vmemmap_base, 0 }, }; /* Get size in bytes used by the memory region */ @@ -77,6 +77,7 @@ void __init kernel_randomize_memory(void) unsigned long rand, memory_tb; struct rnd_state rand_state; unsigned long remain_entropy; + unsigned long vmemmap_size; vaddr_start = pgtable_l5_enabled() ? __PAGE_OFFSET_BASE_L5 : __PAGE_OFFSET_BASE_L4; vaddr = vaddr_start; @@ -93,7 +94,7 @@ void __init kernel_randomize_memory(void) if (!kaslr_memory_enabled()) return; - kaslr_regions[0].size_tb = 1 << (__PHYSICAL_MASK_SHIFT - TB_SHIFT); + kaslr_regions[0].size_tb = 1 << (MAX_PHYSMEM_BITS - TB_SHIFT); kaslr_regions[1].size_tb = VMALLOC_SIZE_TB; /* @@ -108,6 +109,14 @@ void __init kernel_randomize_memory(void) if (memory_tb < kaslr_regions[0].size_tb) kaslr_regions[0].size_tb = memory_tb; + /* + * Calculate the vmemmap region size in TBs, aligned to a TB + * boundary. + */ + vmemmap_size = (kaslr_regions[0].size_tb << (TB_SHIFT - PAGE_SHIFT)) * + sizeof(struct page); + kaslr_regions[2].size_tb = DIV_ROUND_UP(vmemmap_size, 1UL << TB_SHIFT); + /* Calculate entropy available between regions */ remain_entropy = vaddr_end - vaddr_start; for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++) diff --git a/arch/x86/mm/maccess.c b/arch/x86/mm/maccess.c new file mode 100644 index 0000000000000000000000000000000000000000..f5b85bdc0535cf7350a274d9bea447ce838ef473 --- /dev/null +++ b/arch/x86/mm/maccess.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include + +#ifdef CONFIG_X86_64 +static __always_inline u64 canonical_address(u64 vaddr, u8 vaddr_bits) +{ + return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits); +} + +static __always_inline bool invalid_probe_range(u64 vaddr) +{ + /* + * Range covering the highest possible canonical userspace address + * as well as non-canonical address range. For the canonical range + * we also need to include the userspace guard page. + */ + return vaddr < TASK_SIZE_MAX + PAGE_SIZE || + canonical_address(vaddr, boot_cpu_data.x86_virt_bits) != vaddr; +} +#else +static __always_inline bool invalid_probe_range(u64 vaddr) +{ + return vaddr < TASK_SIZE_MAX; +} +#endif + +long probe_kernel_read_strict(void *dst, const void *src, size_t size) +{ + if (unlikely(invalid_probe_range((unsigned long)src))) + return -EFAULT; + + return __probe_kernel_read(dst, src, size); +} + +long strncpy_from_unsafe_strict(char *dst, const void *unsafe_addr, long count) +{ + if (unlikely(invalid_probe_range((unsigned long)unsafe_addr))) + return -EFAULT; + + return __strncpy_from_unsafe(dst, unsafe_addr, count); +} diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c index 7ae36868aed256e44749bdd7f3692dc423574f7c..0fadc69d2621881270d72e96e1db084c04eb43c5 100644 --- a/arch/x86/mm/mem_encrypt_identity.c +++ b/arch/x86/mm/mem_encrypt_identity.c @@ -29,6 +29,15 @@ #undef CONFIG_PARAVIRT #undef CONFIG_PARAVIRT_SPINLOCKS +/* + * This code runs before CPU feature bits are set. By default, the + * pgtable_l5_enabled() function uses bit X86_FEATURE_LA57 to determine if + * 5-level paging is active, so that won't work here. USE_EARLY_PGTABLE_L5 + * is provided to handle this situation and, instead, use a variable that + * has been set by the early boot code. + */ +#define USE_EARLY_PGTABLE_L5 + #include #include #include @@ -157,8 +166,8 @@ static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd) pmd = pmd_offset(pud, ppd->vaddr); if (pmd_none(*pmd)) { pte = ppd->pgtable_area; - memset(pte, 0, sizeof(pte) * PTRS_PER_PTE); - ppd->pgtable_area += sizeof(pte) * PTRS_PER_PTE; + memset(pte, 0, sizeof(*pte) * PTRS_PER_PTE); + ppd->pgtable_area += sizeof(*pte) * PTRS_PER_PTE; set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte))); } @@ -554,7 +563,8 @@ void __init sme_enable(struct boot_params *bp) cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr | ((u64)bp->ext_cmd_line_ptr << 32)); - cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)); + if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0) + return; if (!strncmp(buffer, cmdline_on, sizeof(buffer))) sme_me_mask = me_mask; diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 1e95d57760cf79becf81c012df564e42ae7122e2..b69f7d428443531ddfb48d9afe0eefaf7d4c22fe 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -230,7 +230,7 @@ bool mmap_address_hint_valid(unsigned long addr, unsigned long len) /* Can we access it for direct reading/writing? Must be RAM: */ int valid_phys_addr_range(phys_addr_t addr, size_t count) { - return addr + count <= __pa(high_memory); + return addr + count - 1 <= __pa(high_memory - 1); } /* Can we access it through mmap? Must be a valid physical address: */ diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c index e500949bae24534ce7fea11d73bddb2810affa99..60af9300b2bfd5dca1565c7ab2f3a38e9760975b 100644 --- a/arch/x86/mm/mpx.c +++ b/arch/x86/mm/mpx.c @@ -507,7 +507,7 @@ static int get_bt_addr(struct mm_struct *mm, unsigned long bd_entry; unsigned long bt_addr; - if (!access_ok(VERIFY_READ, (bd_entry_ptr), sizeof(*bd_entry_ptr))) + if (!access_ok((bd_entry_ptr), sizeof(*bd_entry_ptr))) return -EFAULT; while (1) { diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index fa150855647cc9c85ef2e2b126bf30bcaba06994..2284f279a1afcee4e25cbdb404e032a8db6c8d61 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -722,21 +722,6 @@ void __init x86_numa_init(void) numa_init(dummy_numa_init); } -static void __init init_memory_less_node(int nid) -{ - unsigned long zones_size[MAX_NR_ZONES] = {0}; - unsigned long zholes_size[MAX_NR_ZONES] = {0}; - - /* Allocate and initialize node data. Memory-less node is now online.*/ - alloc_node_data(nid); - free_area_init_node(nid, zones_size, 0, zholes_size); - - /* - * All zonelists will be built later in start_kernel() after per cpu - * areas are initialized. - */ -} - /* * Setup early cpu_to_node. * @@ -764,8 +749,17 @@ void __init init_cpu_to_node(void) if (node == NUMA_NO_NODE) continue; + /* + * Exclude this node from + * bringup_nonboot_cpus + * cpu_up + * __try_online_node + * register_one_node + * because node_subsys is not initialized yet. + * TODO remove dependency on node_online + */ if (!node_online(node)) - init_memory_less_node(node); + node_set_online(node); numa_set_node(cpu, node); } diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c index b54d52a2d00a83a9fa4be8647fe3acf92b81c4c7..d71d72cf6c66685eb9f8eee9f7bb29c4a277baa2 100644 --- a/arch/x86/mm/numa_emulation.c +++ b/arch/x86/mm/numa_emulation.c @@ -400,9 +400,17 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) n = simple_strtoul(emu_cmdline, &emu_cmdline, 0); ret = -1; for_each_node_mask(i, physnode_mask) { + /* + * The reason we pass in blk[0] is due to + * numa_remove_memblk_from() called by + * emu_setup_memblk() will delete entry 0 + * and then move everything else up in the pi.blk + * array. Therefore we should always be looking + * at blk[0]. + */ ret = split_nodes_size_interleave_uniform(&ei, &pi, - pi.blk[i].start, pi.blk[i].end, 0, - n, &pi.blk[i], nid); + pi.blk[0].start, pi.blk[0].end, 0, + n, &pi.blk[0], nid); if (ret < 0) break; if (ret < n) { diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 51a5a69ecac9f24ab794ea6e86ea6d7fb39367a8..e2d4b25c7aa44360595d2f6eefa028a5dfbeb482 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -2086,9 +2086,13 @@ void __kernel_map_pages(struct page *page, int numpages, int enable) /* * We should perform an IPI and flush all tlbs, - * but that can deadlock->flush only current cpu: + * but that can deadlock->flush only current cpu. + * Preemption needs to be disabled around __flush_tlb_all() due to + * CR3 reload in __native_flush_tlb(). */ + preempt_disable(); __flush_tlb_all(); + preempt_enable(); arch_flush_lazy_mmu_mode(); } diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 3d0c83ef6aab98cde354b86f999eecfca3a38263..3467dbf085aec69e4c00ee0aaf21afd28f3fca07 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -33,6 +33,7 @@ #include "pat_internal.h" #include "mm_internal.h" +#include "../../mm/internal.h" /* is_cow_mapping() */ #undef pr_fmt #define pr_fmt(fmt) "" fmt @@ -74,7 +75,7 @@ int pat_debug_enable; static int __init pat_debug_setup(char *str) { pat_debug_enable = 1; - return 0; + return 1; } __setup("debugpat", pat_debug_setup); @@ -519,8 +520,13 @@ static u64 sanitize_phys(u64 address) * for a "decoy" virtual address (bit 63 clear) passed to * set_memory_X(). __pa() on a "decoy" address results in a * physical address with bit 63 set. + * + * Decoy addresses are not present for 32-bit builds, see + * set_mce_nospec(). */ - return address & __PHYSICAL_MASK; + if (IS_ENABLED(CONFIG_X86_64)) + return address & __PHYSICAL_MASK; + return address; } /* @@ -546,7 +552,11 @@ int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type, start = sanitize_phys(start); end = sanitize_phys(end); - BUG_ON(start >= end); /* end is exclusive */ + if (start >= end) { + WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__, + start, end - 1, cattr_name(req_type)); + return -EINVAL; + } if (!pat_enabled()) { /* This is identical to page table setting without PAT */ @@ -945,6 +955,38 @@ static void free_pfn_range(u64 paddr, unsigned long size) free_memtype(paddr, paddr + size); } +static int get_pat_info(struct vm_area_struct *vma, resource_size_t *paddr, + pgprot_t *pgprot) +{ + unsigned long prot; + + VM_WARN_ON_ONCE(!(vma->vm_flags & VM_PAT)); + + /* + * We need the starting PFN and cachemode used for track_pfn_remap() + * that covered the whole VMA. For most mappings, we can obtain that + * information from the page tables. For COW mappings, we might now + * suddenly have anon folios mapped and follow_phys() will fail. + * + * Fallback to using vma->vm_pgoff, see remap_pfn_range_notrack(), to + * detect the PFN. If we need the cachemode as well, we're out of luck + * for now and have to fail fork(). + */ + if (!follow_phys(vma, vma->vm_start, 0, &prot, paddr)) { + if (pgprot) + *pgprot = __pgprot(prot); + return 0; + } + if (is_cow_mapping(vma->vm_flags)) { + if (pgprot) + return -EINVAL; + *paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; + return 0; + } + WARN_ON_ONCE(1); + return -EINVAL; +} + /* * track_pfn_copy is called when vma that is covering the pfnmap gets * copied through copy_page_range(). @@ -955,20 +997,13 @@ static void free_pfn_range(u64 paddr, unsigned long size) int track_pfn_copy(struct vm_area_struct *vma) { resource_size_t paddr; - unsigned long prot; unsigned long vma_size = vma->vm_end - vma->vm_start; pgprot_t pgprot; if (vma->vm_flags & VM_PAT) { - /* - * reserve the whole chunk covered by vma. We need the - * starting address and protection from pte. - */ - if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) { - WARN_ON_ONCE(1); + if (get_pat_info(vma, &paddr, &pgprot)) return -EINVAL; - } - pgprot = __pgprot(prot); + /* reserve the whole chunk covered by vma. */ return reserve_pfn_range(paddr, vma_size, &pgprot, 1); } @@ -1043,7 +1078,6 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, unsigned long size) { resource_size_t paddr; - unsigned long prot; if (vma && !(vma->vm_flags & VM_PAT)) return; @@ -1051,11 +1085,8 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, /* free the chunk starting from pfn or the whole chunk */ paddr = (resource_size_t)pfn << PAGE_SHIFT; if (!paddr && !size) { - if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) { - WARN_ON_ONCE(1); + if (get_pat_info(vma, &paddr, NULL)) return; - } - size = vma->vm_end - vma->vm_start; } free_pfn_range(paddr, size); @@ -1122,12 +1153,14 @@ static void *memtype_seq_start(struct seq_file *seq, loff_t *pos) static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos) { + kfree(v); ++*pos; return memtype_get_idx(*pos); } static void memtype_seq_stop(struct seq_file *seq, void *v) { + kfree(v); } static int memtype_seq_show(struct seq_file *seq, void *v) @@ -1136,7 +1169,6 @@ static int memtype_seq_show(struct seq_file *seq, void *v) seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type), print_entry->start, print_entry->end); - kfree(print_entry); return 0; } diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 59274e2c1ac44c0fb2fb4c004e3e64484b305335..5b97717b389bba511752e7ff185585a69a0d0a63 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -660,8 +660,8 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte) fixmaps_set++; } -void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys, - pgprot_t flags) +void native_set_fixmap(unsigned /* enum fixed_addresses */ idx, + phys_addr_t phys, pgprot_t flags) { /* Sanitize 'prot' against any unsupported bits: */ pgprot_val(flags) &= __default_kernel_pte_mask; @@ -794,6 +794,14 @@ int pmd_clear_huge(pmd_t *pmd) return 0; } +/* + * Until we support 512GB pages, skip them in the vmap area. + */ +int p4d_free_pud_page(p4d_t *p4d, unsigned long addr) +{ + return 0; +} + #ifdef CONFIG_X86_64 /** * pud_free_pmd_page - Clear pud entry and free pmd page. diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c index 6e98e0a7c92315c2a819ee396bf78ae3104688c4..552233885f8f7be5dec6ad83233ddc9760b61de8 100644 --- a/arch/x86/mm/pkeys.c +++ b/arch/x86/mm/pkeys.c @@ -146,13 +146,6 @@ u32 init_pkru_value = PKRU_AD_KEY( 1) | PKRU_AD_KEY( 2) | PKRU_AD_KEY( 3) | void copy_init_pkru_to_fpregs(void) { u32 init_pkru_value_snapshot = READ_ONCE(init_pkru_value); - /* - * Any write to PKRU takes it out of the XSAVE 'init - * state' which increases context switch cost. Avoid - * writing 0 when PKRU was already 0. - */ - if (!init_pkru_value_snapshot && !read_pkru()) - return; /* * Override the PKRU state that came from 'init_fpstate' * with the baseline from the process. diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index c1fc1ae6b42947a9f6dece93efce6e4f843b7def..21105ae44ca18571f8446044638fca4127fab4da 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include @@ -115,7 +116,8 @@ void __init pti_check_boottime_disable(void) } } - if (cmdline_find_option_bool(boot_command_line, "nopti")) { + if (cmdline_find_option_bool(boot_command_line, "nopti") || + cpu_mitigations_off()) { pti_mode = PTI_FORCE_OFF; pti_print_if_insecure("disabled on command line."); return; @@ -336,13 +338,15 @@ pti_clone_pgtable(unsigned long start, unsigned long end, pud = pud_offset(p4d, addr); if (pud_none(*pud)) { - addr += PUD_SIZE; + WARN_ON_ONCE(addr & ~PUD_MASK); + addr = round_up(addr + 1, PUD_SIZE); continue; } pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) { - addr += PMD_SIZE; + WARN_ON_ONCE(addr & ~PMD_MASK); + addr = round_up(addr + 1, PMD_SIZE); continue; } @@ -379,14 +383,14 @@ pti_clone_pgtable(unsigned long start, unsigned long end, */ *target_pmd = *pmd; - addr += PMD_SIZE; + addr = round_up(addr + 1, PMD_SIZE); } else if (level == PTI_CLONE_PTE) { /* Walk the page-table down to the pte level */ pte = pte_offset_kernel(pmd, addr); if (pte_none(*pte)) { - addr += PAGE_SIZE; + addr = round_up(addr + 1, PAGE_SIZE); continue; } @@ -406,7 +410,7 @@ pti_clone_pgtable(unsigned long start, unsigned long end, /* Clone the PTE */ *target_pte = *pte; - addr += PAGE_SIZE; + addr = round_up(addr + 1, PAGE_SIZE); } else { BUG(); @@ -641,6 +645,8 @@ void __init pti_init(void) */ void pti_finalize(void) { + if (!boot_cpu_has(X86_FEATURE_PTI)) + return; /* * We need to clone everything (again) that maps parts of the * kernel image. diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index e96b99eb800ccd5f170f7539efaaf99720d192b4..a6836ab0fcc7348dce7b4bdf1db07761c42351e3 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -29,6 +29,12 @@ * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi */ +/* + * Use bit 0 to mangle the TIF_SPEC_IB state into the mm pointer which is + * stored in cpu_tlb_state.last_user_mm_ibpb. + */ +#define LAST_USER_MM_IBPB 0x1UL + /* * We get here when we do something requiring a TLB invalidation * but could not go invalidate all of the contexts. We do the @@ -180,6 +186,89 @@ static void sync_current_stack_to_mm(struct mm_struct *mm) } } +static inline unsigned long mm_mangle_tif_spec_ib(struct task_struct *next) +{ + unsigned long next_tif = task_thread_info(next)->flags; + unsigned long ibpb = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_IBPB; + + return (unsigned long)next->mm | ibpb; +} + +static void cond_ibpb(struct task_struct *next) +{ + if (!next || !next->mm) + return; + + /* + * Both, the conditional and the always IBPB mode use the mm + * pointer to avoid the IBPB when switching between tasks of the + * same process. Using the mm pointer instead of mm->context.ctx_id + * opens a hypothetical hole vs. mm_struct reuse, which is more or + * less impossible to control by an attacker. Aside of that it + * would only affect the first schedule so the theoretically + * exposed data is not really interesting. + */ + if (static_branch_likely(&switch_mm_cond_ibpb)) { + unsigned long prev_mm, next_mm; + + /* + * This is a bit more complex than the always mode because + * it has to handle two cases: + * + * 1) Switch from a user space task (potential attacker) + * which has TIF_SPEC_IB set to a user space task + * (potential victim) which has TIF_SPEC_IB not set. + * + * 2) Switch from a user space task (potential attacker) + * which has TIF_SPEC_IB not set to a user space task + * (potential victim) which has TIF_SPEC_IB set. + * + * This could be done by unconditionally issuing IBPB when + * a task which has TIF_SPEC_IB set is either scheduled in + * or out. Though that results in two flushes when: + * + * - the same user space task is scheduled out and later + * scheduled in again and only a kernel thread ran in + * between. + * + * - a user space task belonging to the same process is + * scheduled in after a kernel thread ran in between + * + * - a user space task belonging to the same process is + * scheduled in immediately. + * + * Optimize this with reasonably small overhead for the + * above cases. Mangle the TIF_SPEC_IB bit into the mm + * pointer of the incoming task which is stored in + * cpu_tlbstate.last_user_mm_ibpb for comparison. + */ + next_mm = mm_mangle_tif_spec_ib(next); + prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_ibpb); + + /* + * Issue IBPB only if the mm's are different and one or + * both have the IBPB bit set. + */ + if (next_mm != prev_mm && + (next_mm | prev_mm) & LAST_USER_MM_IBPB) + indirect_branch_prediction_barrier(); + + this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, next_mm); + } + + if (static_branch_unlikely(&switch_mm_always_ibpb)) { + /* + * Only flush when switching to a user space task with a + * different context than the user space task which ran + * last on this CPU. + */ + if (this_cpu_read(cpu_tlbstate.last_user_mm) != next->mm) { + indirect_branch_prediction_barrier(); + this_cpu_write(cpu_tlbstate.last_user_mm, next->mm); + } + } +} + void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { @@ -254,27 +343,13 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, } else { u16 new_asid; bool need_flush; - u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id); /* * Avoid user/user BTB poisoning by flushing the branch * predictor when switching between processes. This stops * one process from doing Spectre-v2 attacks on another. - * - * As an optimization, flush indirect branches only when - * switching into processes that disable dumping. This - * protects high value processes like gpg, without having - * too high performance overhead. IBPB is *expensive*! - * - * This will not flush branches when switching into kernel - * threads. It will also not flush if we switch to idle - * thread and back to the same process. It will flush if we - * switch to a different non-dumpable process. */ - if (tsk && tsk->mm && - tsk->mm->context.ctx_id != last_ctx_id && - get_dumpable(tsk->mm) != SUID_DUMP_USER) - indirect_branch_prediction_barrier(); + cond_ibpb(tsk); if (IS_ENABLED(CONFIG_VMAP_STACK)) { /* @@ -331,14 +406,6 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0); } - /* - * Record last user mm's context id, so we can avoid - * flushing branch buffer with IBPB if we switch back - * to the same user. - */ - if (next != &init_mm) - this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id); - /* Make sure we write CR3 before loaded_mm. */ barrier(); @@ -419,7 +486,7 @@ void initialize_tlbstate_and_flush(void) write_cr3(build_cr3(mm->pgd, 0)); /* Reinitialize tlbstate. */ - this_cpu_write(cpu_tlbstate.last_ctx_id, mm->context.ctx_id); + this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, LAST_USER_MM_IBPB); this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0); this_cpu_write(cpu_tlbstate.next_asid, 1); this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id); @@ -627,7 +694,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, { int cpu; - struct flush_tlb_info info __aligned(SMP_CACHE_BYTES) = { + struct flush_tlb_info info = { .mm = mm, }; diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 2580cd2e98b1700f2bc048377d431a56dd69eccb..658869ac2e55e257bda098e79384c2d370a53d4a 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -190,9 +190,7 @@ struct jit_context { #define BPF_MAX_INSN_SIZE 128 #define BPF_INSN_SAFETY 64 -#define AUX_STACK_SPACE 40 /* Space for RBX, R13, R14, R15, tailcnt */ - -#define PROLOGUE_SIZE 37 +#define PROLOGUE_SIZE 20 /* * Emit x86-64 prologue code for BPF program and check its size. @@ -203,44 +201,19 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf) u8 *prog = *pprog; int cnt = 0; - /* push rbp */ - EMIT1(0x55); - - /* mov rbp,rsp */ - EMIT3(0x48, 0x89, 0xE5); - - /* sub rsp, rounded_stack_depth + AUX_STACK_SPACE */ - EMIT3_off32(0x48, 0x81, 0xEC, - round_up(stack_depth, 8) + AUX_STACK_SPACE); - - /* sub rbp, AUX_STACK_SPACE */ - EMIT4(0x48, 0x83, 0xED, AUX_STACK_SPACE); - - /* mov qword ptr [rbp+0],rbx */ - EMIT4(0x48, 0x89, 0x5D, 0); - /* mov qword ptr [rbp+8],r13 */ - EMIT4(0x4C, 0x89, 0x6D, 8); - /* mov qword ptr [rbp+16],r14 */ - EMIT4(0x4C, 0x89, 0x75, 16); - /* mov qword ptr [rbp+24],r15 */ - EMIT4(0x4C, 0x89, 0x7D, 24); - + EMIT1(0x55); /* push rbp */ + EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ + /* sub rsp, rounded_stack_depth */ + EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8)); + EMIT1(0x53); /* push rbx */ + EMIT2(0x41, 0x55); /* push r13 */ + EMIT2(0x41, 0x56); /* push r14 */ + EMIT2(0x41, 0x57); /* push r15 */ if (!ebpf_from_cbpf) { - /* - * Clear the tail call counter (tail_call_cnt): for eBPF tail - * calls we need to reset the counter to 0. It's done in two - * instructions, resetting RAX register to 0, and moving it - * to the counter location. - */ - - /* xor eax, eax */ - EMIT2(0x31, 0xc0); - /* mov qword ptr [rbp+32], rax */ - EMIT4(0x48, 0x89, 0x45, 32); - + /* zero init tail_call_cnt */ + EMIT2(0x6a, 0x00); BUILD_BUG_ON(cnt != PROLOGUE_SIZE); } - *pprog = prog; } @@ -285,13 +258,13 @@ static void emit_bpf_tail_call(u8 **pprog) * if (tail_call_cnt > MAX_TAIL_CALL_CNT) * goto out; */ - EMIT2_off32(0x8B, 0x85, 36); /* mov eax, dword ptr [rbp + 36] */ + EMIT2_off32(0x8B, 0x85, -36 - MAX_BPF_STACK); /* mov eax, dword ptr [rbp - 548] */ EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ #define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE) EMIT2(X86_JA, OFFSET2); /* ja out */ label2 = cnt; EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ - EMIT2_off32(0x89, 0x85, 36); /* mov dword ptr [rbp + 36], eax */ + EMIT2_off32(0x89, 0x85, -36 - MAX_BPF_STACK); /* mov dword ptr [rbp -548], eax */ /* prog = array->ptrs[index]; */ EMIT4_off32(0x48, 0x8B, 0x84, 0xD6, /* mov rax, [rsi + rdx * 8 + offsetof(...)] */ @@ -745,6 +718,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, } break; + /* speculation barrier */ + case BPF_ST | BPF_NOSPEC: + if (boot_cpu_has(X86_FEATURE_XMM2)) + /* Emit 'lfence' */ + EMIT3(0x0F, 0xAE, 0xE8); + break; + /* ST: *(u8*)(dst_reg + off) = imm */ case BPF_ST | BPF_MEM | BPF_B: if (is_ereg(dst_reg)) @@ -1006,19 +986,14 @@ xadd: if (is_imm8(insn->off)) seen_exit = true; /* Update cleanup_addr */ ctx->cleanup_addr = proglen; - /* mov rbx, qword ptr [rbp+0] */ - EMIT4(0x48, 0x8B, 0x5D, 0); - /* mov r13, qword ptr [rbp+8] */ - EMIT4(0x4C, 0x8B, 0x6D, 8); - /* mov r14, qword ptr [rbp+16] */ - EMIT4(0x4C, 0x8B, 0x75, 16); - /* mov r15, qword ptr [rbp+24] */ - EMIT4(0x4C, 0x8B, 0x7D, 24); - - /* add rbp, AUX_STACK_SPACE */ - EMIT4(0x48, 0x83, 0xC5, AUX_STACK_SPACE); - EMIT1(0xC9); /* leave */ - EMIT1(0xC3); /* ret */ + if (!bpf_prog_was_classic(bpf_prog)) + EMIT1(0x5B); /* get rid of tail_call_cnt */ + EMIT2(0x41, 0x5F); /* pop r15 */ + EMIT2(0x41, 0x5E); /* pop r14 */ + EMIT2(0x41, 0x5D); /* pop r13 */ + EMIT1(0x5B); /* pop rbx */ + EMIT1(0xC9); /* leave */ + EMIT1(0xC3); /* ret */ break; default: @@ -1039,7 +1014,16 @@ xadd: if (is_imm8(insn->off)) } if (image) { - if (unlikely(proglen + ilen > oldproglen)) { + /* + * When populating the image, assert that: + * + * i) We do not write beyond the allocated space, and + * ii) addrs[i] did not change from the prior run, in order + * to validate assumptions made for computing branch + * displacements. + */ + if (unlikely(proglen + ilen > oldproglen || + proglen + ilen != addrs[i])) { pr_err("bpf_jit: fatal error\n"); return -EFAULT; } diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c index 8f6cc71e08482b05ddb53efce7ee3b818b1061f2..1eb3d5209d1c7e3c082883bfcba0f6c9afe7402c 100644 --- a/arch/x86/net/bpf_jit_comp32.c +++ b/arch/x86/net/bpf_jit_comp32.c @@ -117,6 +117,8 @@ static bool is_simm32(s64 value) #define IA32_JLE 0x7E #define IA32_JG 0x7F +#define COND_JMP_OPCODE_INVALID (0xFF) + /* * Map eBPF registers to IA32 32bit registers or stack scratch space. * @@ -698,19 +700,12 @@ static inline void emit_ia32_neg64(const u8 dst[], bool dstk, u8 **pprog) STACK_VAR(dst_hi)); } - /* xor ecx,ecx */ - EMIT2(0x31, add_2reg(0xC0, IA32_ECX, IA32_ECX)); - /* sub dreg_lo,ecx */ - EMIT2(0x2B, add_2reg(0xC0, dreg_lo, IA32_ECX)); - /* mov dreg_lo,ecx */ - EMIT2(0x89, add_2reg(0xC0, dreg_lo, IA32_ECX)); - - /* xor ecx,ecx */ - EMIT2(0x31, add_2reg(0xC0, IA32_ECX, IA32_ECX)); - /* sbb dreg_hi,ecx */ - EMIT2(0x19, add_2reg(0xC0, dreg_hi, IA32_ECX)); - /* mov dreg_hi,ecx */ - EMIT2(0x89, add_2reg(0xC0, dreg_hi, IA32_ECX)); + /* neg dreg_lo */ + EMIT2(0xF7, add_1reg(0xD8, dreg_lo)); + /* adc dreg_hi,0x0 */ + EMIT3(0x83, add_1reg(0xD0, dreg_hi), 0x00); + /* neg dreg_hi */ + EMIT2(0xF7, add_1reg(0xD8, dreg_hi)); if (dstk) { /* mov dword ptr [ebp+off],dreg_lo */ @@ -729,9 +724,6 @@ static inline void emit_ia32_lsh_r64(const u8 dst[], const u8 src[], { u8 *prog = *pprog; int cnt = 0; - static int jmp_label1 = -1; - static int jmp_label2 = -1; - static int jmp_label3 = -1; u8 dreg_lo = dstk ? IA32_EAX : dst_lo; u8 dreg_hi = dstk ? IA32_EDX : dst_hi; @@ -750,79 +742,23 @@ static inline void emit_ia32_lsh_r64(const u8 dst[], const u8 src[], /* mov ecx,src_lo */ EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX)); - /* cmp ecx,32 */ - EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32); - /* Jumps when >= 32 */ - if (is_imm8(jmp_label(jmp_label1, 2))) - EMIT2(IA32_JAE, jmp_label(jmp_label1, 2)); - else - EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label1, 6)); - - /* < 32 */ - /* shl dreg_hi,cl */ - EMIT2(0xD3, add_1reg(0xE0, dreg_hi)); - /* mov ebx,dreg_lo */ - EMIT2(0x8B, add_2reg(0xC0, dreg_lo, IA32_EBX)); + /* shld dreg_hi,dreg_lo,cl */ + EMIT3(0x0F, 0xA5, add_2reg(0xC0, dreg_hi, dreg_lo)); /* shl dreg_lo,cl */ EMIT2(0xD3, add_1reg(0xE0, dreg_lo)); - /* IA32_ECX = -IA32_ECX + 32 */ - /* neg ecx */ - EMIT2(0xF7, add_1reg(0xD8, IA32_ECX)); - /* add ecx,32 */ - EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32); - - /* shr ebx,cl */ - EMIT2(0xD3, add_1reg(0xE8, IA32_EBX)); - /* or dreg_hi,ebx */ - EMIT2(0x09, add_2reg(0xC0, dreg_hi, IA32_EBX)); - - /* goto out; */ - if (is_imm8(jmp_label(jmp_label3, 2))) - EMIT2(0xEB, jmp_label(jmp_label3, 2)); - else - EMIT1_off32(0xE9, jmp_label(jmp_label3, 5)); - - /* >= 32 */ - if (jmp_label1 == -1) - jmp_label1 = cnt; + /* if ecx >= 32, mov dreg_lo into dreg_hi and clear dreg_lo */ - /* cmp ecx,64 */ - EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 64); - /* Jumps when >= 64 */ - if (is_imm8(jmp_label(jmp_label2, 2))) - EMIT2(IA32_JAE, jmp_label(jmp_label2, 2)); - else - EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label2, 6)); + /* cmp ecx,32 */ + EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32); + /* skip the next two instructions (4 bytes) when < 32 */ + EMIT2(IA32_JB, 4); - /* >= 32 && < 64 */ - /* sub ecx,32 */ - EMIT3(0x83, add_1reg(0xE8, IA32_ECX), 32); - /* shl dreg_lo,cl */ - EMIT2(0xD3, add_1reg(0xE0, dreg_lo)); /* mov dreg_hi,dreg_lo */ EMIT2(0x89, add_2reg(0xC0, dreg_hi, dreg_lo)); - /* xor dreg_lo,dreg_lo */ EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo)); - /* goto out; */ - if (is_imm8(jmp_label(jmp_label3, 2))) - EMIT2(0xEB, jmp_label(jmp_label3, 2)); - else - EMIT1_off32(0xE9, jmp_label(jmp_label3, 5)); - - /* >= 64 */ - if (jmp_label2 == -1) - jmp_label2 = cnt; - /* xor dreg_lo,dreg_lo */ - EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo)); - /* xor dreg_hi,dreg_hi */ - EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi)); - - if (jmp_label3 == -1) - jmp_label3 = cnt; - if (dstk) { /* mov dword ptr [ebp+off],dreg_lo */ EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo), @@ -841,9 +777,6 @@ static inline void emit_ia32_arsh_r64(const u8 dst[], const u8 src[], { u8 *prog = *pprog; int cnt = 0; - static int jmp_label1 = -1; - static int jmp_label2 = -1; - static int jmp_label3 = -1; u8 dreg_lo = dstk ? IA32_EAX : dst_lo; u8 dreg_hi = dstk ? IA32_EDX : dst_hi; @@ -862,79 +795,23 @@ static inline void emit_ia32_arsh_r64(const u8 dst[], const u8 src[], /* mov ecx,src_lo */ EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX)); - /* cmp ecx,32 */ - EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32); - /* Jumps when >= 32 */ - if (is_imm8(jmp_label(jmp_label1, 2))) - EMIT2(IA32_JAE, jmp_label(jmp_label1, 2)); - else - EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label1, 6)); - - /* < 32 */ - /* lshr dreg_lo,cl */ - EMIT2(0xD3, add_1reg(0xE8, dreg_lo)); - /* mov ebx,dreg_hi */ - EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX)); - /* ashr dreg_hi,cl */ + /* shrd dreg_lo,dreg_hi,cl */ + EMIT3(0x0F, 0xAD, add_2reg(0xC0, dreg_lo, dreg_hi)); + /* sar dreg_hi,cl */ EMIT2(0xD3, add_1reg(0xF8, dreg_hi)); - /* IA32_ECX = -IA32_ECX + 32 */ - /* neg ecx */ - EMIT2(0xF7, add_1reg(0xD8, IA32_ECX)); - /* add ecx,32 */ - EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32); - - /* shl ebx,cl */ - EMIT2(0xD3, add_1reg(0xE0, IA32_EBX)); - /* or dreg_lo,ebx */ - EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX)); - - /* goto out; */ - if (is_imm8(jmp_label(jmp_label3, 2))) - EMIT2(0xEB, jmp_label(jmp_label3, 2)); - else - EMIT1_off32(0xE9, jmp_label(jmp_label3, 5)); + /* if ecx >= 32, mov dreg_hi to dreg_lo and set/clear dreg_hi depending on sign */ - /* >= 32 */ - if (jmp_label1 == -1) - jmp_label1 = cnt; - - /* cmp ecx,64 */ - EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 64); - /* Jumps when >= 64 */ - if (is_imm8(jmp_label(jmp_label2, 2))) - EMIT2(IA32_JAE, jmp_label(jmp_label2, 2)); - else - EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label2, 6)); + /* cmp ecx,32 */ + EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32); + /* skip the next two instructions (5 bytes) when < 32 */ + EMIT2(IA32_JB, 5); - /* >= 32 && < 64 */ - /* sub ecx,32 */ - EMIT3(0x83, add_1reg(0xE8, IA32_ECX), 32); - /* ashr dreg_hi,cl */ - EMIT2(0xD3, add_1reg(0xF8, dreg_hi)); /* mov dreg_lo,dreg_hi */ EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi)); - - /* ashr dreg_hi,imm8 */ + /* sar dreg_hi,31 */ EMIT3(0xC1, add_1reg(0xF8, dreg_hi), 31); - /* goto out; */ - if (is_imm8(jmp_label(jmp_label3, 2))) - EMIT2(0xEB, jmp_label(jmp_label3, 2)); - else - EMIT1_off32(0xE9, jmp_label(jmp_label3, 5)); - - /* >= 64 */ - if (jmp_label2 == -1) - jmp_label2 = cnt; - /* ashr dreg_hi,imm8 */ - EMIT3(0xC1, add_1reg(0xF8, dreg_hi), 31); - /* mov dreg_lo,dreg_hi */ - EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi)); - - if (jmp_label3 == -1) - jmp_label3 = cnt; - if (dstk) { /* mov dword ptr [ebp+off],dreg_lo */ EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo), @@ -953,9 +830,6 @@ static inline void emit_ia32_rsh_r64(const u8 dst[], const u8 src[], bool dstk, { u8 *prog = *pprog; int cnt = 0; - static int jmp_label1 = -1; - static int jmp_label2 = -1; - static int jmp_label3 = -1; u8 dreg_lo = dstk ? IA32_EAX : dst_lo; u8 dreg_hi = dstk ? IA32_EDX : dst_hi; @@ -974,77 +848,23 @@ static inline void emit_ia32_rsh_r64(const u8 dst[], const u8 src[], bool dstk, /* mov ecx,src_lo */ EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX)); - /* cmp ecx,32 */ - EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32); - /* Jumps when >= 32 */ - if (is_imm8(jmp_label(jmp_label1, 2))) - EMIT2(IA32_JAE, jmp_label(jmp_label1, 2)); - else - EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label1, 6)); - - /* < 32 */ - /* lshr dreg_lo,cl */ - EMIT2(0xD3, add_1reg(0xE8, dreg_lo)); - /* mov ebx,dreg_hi */ - EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX)); + /* shrd dreg_lo,dreg_hi,cl */ + EMIT3(0x0F, 0xAD, add_2reg(0xC0, dreg_lo, dreg_hi)); /* shr dreg_hi,cl */ EMIT2(0xD3, add_1reg(0xE8, dreg_hi)); - /* IA32_ECX = -IA32_ECX + 32 */ - /* neg ecx */ - EMIT2(0xF7, add_1reg(0xD8, IA32_ECX)); - /* add ecx,32 */ - EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32); - - /* shl ebx,cl */ - EMIT2(0xD3, add_1reg(0xE0, IA32_EBX)); - /* or dreg_lo,ebx */ - EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX)); - - /* goto out; */ - if (is_imm8(jmp_label(jmp_label3, 2))) - EMIT2(0xEB, jmp_label(jmp_label3, 2)); - else - EMIT1_off32(0xE9, jmp_label(jmp_label3, 5)); + /* if ecx >= 32, mov dreg_hi to dreg_lo and clear dreg_hi */ - /* >= 32 */ - if (jmp_label1 == -1) - jmp_label1 = cnt; - /* cmp ecx,64 */ - EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 64); - /* Jumps when >= 64 */ - if (is_imm8(jmp_label(jmp_label2, 2))) - EMIT2(IA32_JAE, jmp_label(jmp_label2, 2)); - else - EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label2, 6)); + /* cmp ecx,32 */ + EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32); + /* skip the next two instructions (4 bytes) when < 32 */ + EMIT2(IA32_JB, 4); - /* >= 32 && < 64 */ - /* sub ecx,32 */ - EMIT3(0x83, add_1reg(0xE8, IA32_ECX), 32); - /* shr dreg_hi,cl */ - EMIT2(0xD3, add_1reg(0xE8, dreg_hi)); /* mov dreg_lo,dreg_hi */ EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi)); /* xor dreg_hi,dreg_hi */ EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi)); - /* goto out; */ - if (is_imm8(jmp_label(jmp_label3, 2))) - EMIT2(0xEB, jmp_label(jmp_label3, 2)); - else - EMIT1_off32(0xE9, jmp_label(jmp_label3, 5)); - - /* >= 64 */ - if (jmp_label2 == -1) - jmp_label2 = cnt; - /* xor dreg_lo,dreg_lo */ - EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo)); - /* xor dreg_hi,dreg_hi */ - EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi)); - - if (jmp_label3 == -1) - jmp_label3 = cnt; - if (dstk) { /* mov dword ptr [ebp+off],dreg_lo */ EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo), @@ -1074,27 +894,10 @@ static inline void emit_ia32_lsh_i64(const u8 dst[], const u32 val, } /* Do LSH operation */ if (val < 32) { - /* shl dreg_hi,imm8 */ - EMIT3(0xC1, add_1reg(0xE0, dreg_hi), val); - /* mov ebx,dreg_lo */ - EMIT2(0x8B, add_2reg(0xC0, dreg_lo, IA32_EBX)); + /* shld dreg_hi,dreg_lo,imm8 */ + EMIT4(0x0F, 0xA4, add_2reg(0xC0, dreg_hi, dreg_lo), val); /* shl dreg_lo,imm8 */ EMIT3(0xC1, add_1reg(0xE0, dreg_lo), val); - - /* IA32_ECX = 32 - val */ - /* mov ecx,val */ - EMIT2(0xB1, val); - /* movzx ecx,ecx */ - EMIT3(0x0F, 0xB6, add_2reg(0xC0, IA32_ECX, IA32_ECX)); - /* neg ecx */ - EMIT2(0xF7, add_1reg(0xD8, IA32_ECX)); - /* add ecx,32 */ - EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32); - - /* shr ebx,cl */ - EMIT2(0xD3, add_1reg(0xE8, IA32_EBX)); - /* or dreg_hi,ebx */ - EMIT2(0x09, add_2reg(0xC0, dreg_hi, IA32_EBX)); } else if (val >= 32 && val < 64) { u32 value = val - 32; @@ -1140,27 +943,10 @@ static inline void emit_ia32_rsh_i64(const u8 dst[], const u32 val, /* Do RSH operation */ if (val < 32) { - /* shr dreg_lo,imm8 */ - EMIT3(0xC1, add_1reg(0xE8, dreg_lo), val); - /* mov ebx,dreg_hi */ - EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX)); + /* shrd dreg_lo,dreg_hi,imm8 */ + EMIT4(0x0F, 0xAC, add_2reg(0xC0, dreg_lo, dreg_hi), val); /* shr dreg_hi,imm8 */ EMIT3(0xC1, add_1reg(0xE8, dreg_hi), val); - - /* IA32_ECX = 32 - val */ - /* mov ecx,val */ - EMIT2(0xB1, val); - /* movzx ecx,ecx */ - EMIT3(0x0F, 0xB6, add_2reg(0xC0, IA32_ECX, IA32_ECX)); - /* neg ecx */ - EMIT2(0xF7, add_1reg(0xD8, IA32_ECX)); - /* add ecx,32 */ - EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32); - - /* shl ebx,cl */ - EMIT2(0xD3, add_1reg(0xE0, IA32_EBX)); - /* or dreg_lo,ebx */ - EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX)); } else if (val >= 32 && val < 64) { u32 value = val - 32; @@ -1205,27 +991,10 @@ static inline void emit_ia32_arsh_i64(const u8 dst[], const u32 val, } /* Do RSH operation */ if (val < 32) { - /* shr dreg_lo,imm8 */ - EMIT3(0xC1, add_1reg(0xE8, dreg_lo), val); - /* mov ebx,dreg_hi */ - EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX)); + /* shrd dreg_lo,dreg_hi,imm8 */ + EMIT4(0x0F, 0xAC, add_2reg(0xC0, dreg_lo, dreg_hi), val); /* ashr dreg_hi,imm8 */ EMIT3(0xC1, add_1reg(0xF8, dreg_hi), val); - - /* IA32_ECX = 32 - val */ - /* mov ecx,val */ - EMIT2(0xB1, val); - /* movzx ecx,ecx */ - EMIT3(0x0F, 0xB6, add_2reg(0xC0, IA32_ECX, IA32_ECX)); - /* neg ecx */ - EMIT2(0xF7, add_1reg(0xD8, IA32_ECX)); - /* add ecx,32 */ - EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32); - - /* shl ebx,cl */ - EMIT2(0xD3, add_1reg(0xE0, IA32_EBX)); - /* or dreg_lo,ebx */ - EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX)); } else if (val >= 32 && val < 64) { u32 value = val - 32; @@ -1613,6 +1382,75 @@ static inline void emit_push_r64(const u8 src[], u8 **pprog) *pprog = prog; } +static u8 get_cond_jmp_opcode(const u8 op, bool is_cmp_lo) +{ + u8 jmp_cond; + + /* Convert BPF opcode to x86 */ + switch (op) { + case BPF_JEQ: + jmp_cond = IA32_JE; + break; + case BPF_JSET: + case BPF_JNE: + jmp_cond = IA32_JNE; + break; + case BPF_JGT: + /* GT is unsigned '>', JA in x86 */ + jmp_cond = IA32_JA; + break; + case BPF_JLT: + /* LT is unsigned '<', JB in x86 */ + jmp_cond = IA32_JB; + break; + case BPF_JGE: + /* GE is unsigned '>=', JAE in x86 */ + jmp_cond = IA32_JAE; + break; + case BPF_JLE: + /* LE is unsigned '<=', JBE in x86 */ + jmp_cond = IA32_JBE; + break; + case BPF_JSGT: + if (!is_cmp_lo) + /* Signed '>', GT in x86 */ + jmp_cond = IA32_JG; + else + /* GT is unsigned '>', JA in x86 */ + jmp_cond = IA32_JA; + break; + case BPF_JSLT: + if (!is_cmp_lo) + /* Signed '<', LT in x86 */ + jmp_cond = IA32_JL; + else + /* LT is unsigned '<', JB in x86 */ + jmp_cond = IA32_JB; + break; + case BPF_JSGE: + if (!is_cmp_lo) + /* Signed '>=', GE in x86 */ + jmp_cond = IA32_JGE; + else + /* GE is unsigned '>=', JAE in x86 */ + jmp_cond = IA32_JAE; + break; + case BPF_JSLE: + if (!is_cmp_lo) + /* Signed '<=', LE in x86 */ + jmp_cond = IA32_JLE; + else + /* LE is unsigned '<=', JBE in x86 */ + jmp_cond = IA32_JBE; + break; + default: /* to silence GCC warning */ + jmp_cond = COND_JMP_OPCODE_INVALID; + break; + } + + return jmp_cond; +} + static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, int oldproglen, struct jit_context *ctx) { @@ -1845,6 +1683,12 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, i++; break; } + /* speculation barrier */ + case BPF_ST | BPF_NOSPEC: + if (boot_cpu_has(X86_FEATURE_XMM2)) + /* Emit 'lfence' */ + EMIT3(0x0F, 0xAE, 0xE8); + break; /* ST: *(u8*)(dst_reg + off) = imm */ case BPF_ST | BPF_MEM | BPF_H: case BPF_ST | BPF_MEM | BPF_B: @@ -2068,11 +1912,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, case BPF_JMP | BPF_JGT | BPF_X: case BPF_JMP | BPF_JLT | BPF_X: case BPF_JMP | BPF_JGE | BPF_X: - case BPF_JMP | BPF_JLE | BPF_X: - case BPF_JMP | BPF_JSGT | BPF_X: - case BPF_JMP | BPF_JSLE | BPF_X: - case BPF_JMP | BPF_JSLT | BPF_X: - case BPF_JMP | BPF_JSGE | BPF_X: { + case BPF_JMP | BPF_JLE | BPF_X: { u8 dreg_lo = dstk ? IA32_EAX : dst_lo; u8 dreg_hi = dstk ? IA32_EDX : dst_hi; u8 sreg_lo = sstk ? IA32_ECX : src_lo; @@ -2099,6 +1939,40 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo)); goto emit_cond_jmp; } + case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP | BPF_JSLE | BPF_X: + case BPF_JMP | BPF_JSLT | BPF_X: + case BPF_JMP | BPF_JSGE | BPF_X: { + u8 dreg_lo = dstk ? IA32_EAX : dst_lo; + u8 dreg_hi = dstk ? IA32_EDX : dst_hi; + u8 sreg_lo = sstk ? IA32_ECX : src_lo; + u8 sreg_hi = sstk ? IA32_EBX : src_hi; + + if (dstk) { + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), + STACK_VAR(dst_lo)); + EMIT3(0x8B, + add_2reg(0x40, IA32_EBP, + IA32_EDX), + STACK_VAR(dst_hi)); + } + + if (sstk) { + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX), + STACK_VAR(src_lo)); + EMIT3(0x8B, + add_2reg(0x40, IA32_EBP, + IA32_EBX), + STACK_VAR(src_hi)); + } + + /* cmp dreg_hi,sreg_hi */ + EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi)); + EMIT2(IA32_JNE, 10); + /* cmp dreg_lo,sreg_lo */ + EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo)); + goto emit_cond_jmp_signed; + } case BPF_JMP | BPF_JSET | BPF_X: { u8 dreg_lo = dstk ? IA32_EAX : dst_lo; u8 dreg_hi = dstk ? IA32_EDX : dst_hi; @@ -2159,11 +2033,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, case BPF_JMP | BPF_JGT | BPF_K: case BPF_JMP | BPF_JLT | BPF_K: case BPF_JMP | BPF_JGE | BPF_K: - case BPF_JMP | BPF_JLE | BPF_K: - case BPF_JMP | BPF_JSGT | BPF_K: - case BPF_JMP | BPF_JSLE | BPF_K: - case BPF_JMP | BPF_JSLT | BPF_K: - case BPF_JMP | BPF_JSGE | BPF_K: { + case BPF_JMP | BPF_JLE | BPF_K: { u32 hi; u8 dreg_lo = dstk ? IA32_EAX : dst_lo; u8 dreg_hi = dstk ? IA32_EDX : dst_hi; @@ -2189,50 +2059,9 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, /* cmp dreg_lo,sreg_lo */ EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo)); -emit_cond_jmp: /* Convert BPF opcode to x86 */ - switch (BPF_OP(code)) { - case BPF_JEQ: - jmp_cond = IA32_JE; - break; - case BPF_JSET: - case BPF_JNE: - jmp_cond = IA32_JNE; - break; - case BPF_JGT: - /* GT is unsigned '>', JA in x86 */ - jmp_cond = IA32_JA; - break; - case BPF_JLT: - /* LT is unsigned '<', JB in x86 */ - jmp_cond = IA32_JB; - break; - case BPF_JGE: - /* GE is unsigned '>=', JAE in x86 */ - jmp_cond = IA32_JAE; - break; - case BPF_JLE: - /* LE is unsigned '<=', JBE in x86 */ - jmp_cond = IA32_JBE; - break; - case BPF_JSGT: - /* Signed '>', GT in x86 */ - jmp_cond = IA32_JG; - break; - case BPF_JSLT: - /* Signed '<', LT in x86 */ - jmp_cond = IA32_JL; - break; - case BPF_JSGE: - /* Signed '>=', GE in x86 */ - jmp_cond = IA32_JGE; - break; - case BPF_JSLE: - /* Signed '<=', LE in x86 */ - jmp_cond = IA32_JLE; - break; - default: /* to silence GCC warning */ +emit_cond_jmp: jmp_cond = get_cond_jmp_opcode(BPF_OP(code), false); + if (jmp_cond == COND_JMP_OPCODE_INVALID) return -EFAULT; - } jmp_offset = addrs[i + insn->off] - addrs[i]; if (is_imm8(jmp_offset)) { EMIT2(jmp_cond, jmp_offset); @@ -2242,7 +2071,66 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, pr_err("cond_jmp gen bug %llx\n", jmp_offset); return -EFAULT; } + break; + } + case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP | BPF_JSLE | BPF_K: + case BPF_JMP | BPF_JSLT | BPF_K: + case BPF_JMP | BPF_JSGE | BPF_K: { + u8 dreg_lo = dstk ? IA32_EAX : dst_lo; + u8 dreg_hi = dstk ? IA32_EDX : dst_hi; + u8 sreg_lo = IA32_ECX; + u8 sreg_hi = IA32_EBX; + u32 hi; + + if (dstk) { + EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), + STACK_VAR(dst_lo)); + EMIT3(0x8B, + add_2reg(0x40, IA32_EBP, + IA32_EDX), + STACK_VAR(dst_hi)); + } + + /* mov ecx,imm32 */ + EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX), imm32); + hi = imm32 & (1 << 31) ? (u32)~0 : 0; + /* mov ebx,imm32 */ + EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EBX), hi); + /* cmp dreg_hi,sreg_hi */ + EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi)); + EMIT2(IA32_JNE, 10); + /* cmp dreg_lo,sreg_lo */ + EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo)); + + /* + * For simplicity of branch offset computation, + * let's use fixed jump coding here. + */ +emit_cond_jmp_signed: /* Check the condition for low 32-bit comparison */ + jmp_cond = get_cond_jmp_opcode(BPF_OP(code), true); + if (jmp_cond == COND_JMP_OPCODE_INVALID) + return -EFAULT; + jmp_offset = addrs[i + insn->off] - addrs[i] + 8; + if (is_simm32(jmp_offset)) { + EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset); + } else { + pr_err("cond_jmp gen bug %llx\n", jmp_offset); + return -EFAULT; + } + EMIT2(0xEB, 6); + /* Check the condition for high 32-bit comparison */ + jmp_cond = get_cond_jmp_opcode(BPF_OP(code), false); + if (jmp_cond == COND_JMP_OPCODE_INVALID) + return -EFAULT; + jmp_offset = addrs[i + insn->off] - addrs[i]; + if (is_simm32(jmp_offset)) { + EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset); + } else { + pr_err("cond_jmp gen bug %llx\n", jmp_offset); + return -EFAULT; + } break; } case BPF_JMP | BPF_JA: @@ -2305,7 +2193,16 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, } if (image) { - if (unlikely(proglen + ilen > oldproglen)) { + /* + * When populating the image, assert that: + * + * i) We do not write beyond the allocated space, and + * ii) addrs[i] did not change from the prior run, in order + * to validate assumptions made for computing branch + * displacements. + */ + if (unlikely(proglen + ilen > oldproglen || + proglen + ilen != addrs[i])) { pr_err("bpf_jit: fatal error\n"); return -EFAULT; } diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c index 649bdde63e328b223ff8371057e9052371ddb7e3..bfa50e65ef6c380557d576681a8cf6cede4db8db 100644 --- a/arch/x86/pci/amd_bus.c +++ b/arch/x86/pci/amd_bus.c @@ -93,7 +93,8 @@ static int __init early_root_info_init(void) vendor = id & 0xffff; device = (id>>16) & 0xffff; - if (vendor != PCI_VENDOR_ID_AMD) + if (vendor != PCI_VENDOR_ID_AMD && + vendor != PCI_VENDOR_ID_HYGON) continue; if (hb_probes[i].device == device) { @@ -390,7 +391,8 @@ static int __init pci_io_ecs_init(void) static int __init amd_postcore_init(void) { - if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && + boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) return 0; early_root_info_init(); diff --git a/arch/x86/pci/broadcom_bus.c b/arch/x86/pci/broadcom_bus.c index 526536c81ddc41d395fd971d909a3b687e46d989..ca1e8e6dccc8afc235402e2ef7efd0a86a936a07 100644 --- a/arch/x86/pci/broadcom_bus.c +++ b/arch/x86/pci/broadcom_bus.c @@ -50,8 +50,8 @@ static void __init cnb20le_res(u8 bus, u8 slot, u8 func) word1 = read_pci_config_16(bus, slot, func, 0xc0); word2 = read_pci_config_16(bus, slot, func, 0xc2); if (word1 != word2) { - res.start = (word1 << 16) | 0x0000; - res.end = (word2 << 16) | 0xffff; + res.start = ((resource_size_t) word1 << 16) | 0x0000; + res.end = ((resource_size_t) word2 << 16) | 0xffff; res.flags = IORESOURCE_MEM; update_res(info, res.start, res.end, res.flags, 0); } diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index 13f4485ca388cb199080dce86583545f0719e676..e723559c386a14212b48987fc034d2c2fda2dece 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c @@ -588,6 +588,17 @@ static void pci_fixup_amd_ehci_pme(struct pci_dev *dev) } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x7808, pci_fixup_amd_ehci_pme); +/* + * Device [1022:7914] + * When in D0, PME# doesn't get asserted when plugging USB 2.0 device. + */ +static void pci_fixup_amd_fch_xhci_pme(struct pci_dev *dev) +{ + dev_info(&dev->dev, "PME# does not work under D0, disabling it\n"); + dev->pme_support &= ~(PCI_PM_CAP_PME_D0 >> PCI_PM_CAP_PME_SHIFT); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x7914, pci_fixup_amd_fch_xhci_pme); + /* * Apple MacBook Pro: Avoid [mem 0x7fa00000-0x7fbfffff] * @@ -629,17 +640,27 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8c10, quirk_apple_mbp_poweroff); static void quirk_no_aersid(struct pci_dev *pdev) { /* VMD Domain */ - if (is_vmd(pdev->bus)) + if (is_vmd(pdev->bus) && pci_is_root_bus(pdev->bus)) pdev->bus->bus_flags |= PCI_BUS_FLAGS_NO_AERSID; } -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2030, quirk_no_aersid); -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2031, quirk_no_aersid); -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2032, quirk_no_aersid); -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid); -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334a, quirk_no_aersid); -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334b, quirk_no_aersid); -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334c, quirk_no_aersid); -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334d, quirk_no_aersid); +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, + PCI_CLASS_BRIDGE_PCI, 8, quirk_no_aersid); + +static void quirk_intel_th_dnv(struct pci_dev *dev) +{ + struct resource *r = &dev->resource[4]; + + /* + * Denverton reports 2k of RTIT_BAR (intel_th resource 4), which + * appears to be 4 MB in reality. + */ + if (r->end == r->start + 0x7ff) { + r->start = 0; + r->end = 0x3fffff; + r->flags |= IORESOURCE_UNSET; + } +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x19e1, quirk_intel_th_dnv); #ifdef CONFIG_PHYS_ADDR_T_64BIT diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index 52e55108404ebfd9f003bde6dd08f4be98531a9f..d3a73f9335e11c3f8f285ab18ce1ae4f04b0c27f 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c @@ -1119,6 +1119,8 @@ static const struct dmi_system_id pciirq_dmi_table[] __initconst = { void __init pcibios_irq_init(void) { + struct irq_routing_table *rtable = NULL; + DBG(KERN_DEBUG "PCI: IRQ init\n"); if (raw_pci_ops == NULL) @@ -1129,8 +1131,10 @@ void __init pcibios_irq_init(void) pirq_table = pirq_find_routing_table(); #ifdef CONFIG_PCI_BIOS - if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN)) + if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN)) { pirq_table = pcibios_get_irq_routing_table(); + rtable = pirq_table; + } #endif if (pirq_table) { pirq_peer_trick(); @@ -1145,8 +1149,10 @@ void __init pcibios_irq_init(void) * If we're using the I/O APIC, avoid using the PCI IRQ * routing table */ - if (io_apic_assign_pci_irqs) + if (io_apic_assign_pci_irqs) { + kfree(rtable); pirq_table = NULL; + } } x86_init.pci.fixup_irqs(); diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 9112d1cb397bb56faa637216f1e39815c50edd9d..910ffe04fb99f51a7296425a86acdff952d57075 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c @@ -440,6 +440,11 @@ void __init xen_msi_init(void) x86_msi.setup_msi_irqs = xen_hvm_setup_msi_irqs; x86_msi.teardown_msi_irq = xen_teardown_msi_irq; + /* + * With XEN PIRQ/Eventchannels in use PCI/MSI[-X] masking is solely + * controlled by the hypervisor. + */ + pci_msi_ignore_mask = 1; } #endif diff --git a/arch/x86/platform/atom/punit_atom_debug.c b/arch/x86/platform/atom/punit_atom_debug.c index 034813d4ab1e1a2ff3c1d2c1969a61cfd198a3e6..41dae0f0d898edb1bb19c6fbc71140849c34b4e6 100644 --- a/arch/x86/platform/atom/punit_atom_debug.c +++ b/arch/x86/platform/atom/punit_atom_debug.c @@ -143,8 +143,8 @@ static void punit_dbgfs_unregister(void) (kernel_ulong_t)&drv_data } static const struct x86_cpu_id intel_punit_cpu_ids[] = { - ICPU(INTEL_FAM6_ATOM_SILVERMONT1, punit_device_byt), - ICPU(INTEL_FAM6_ATOM_MERRIFIELD, punit_device_tng), + ICPU(INTEL_FAM6_ATOM_SILVERMONT, punit_device_byt), + ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID, punit_device_tng), ICPU(INTEL_FAM6_ATOM_AIRMONT, punit_device_cht), {} }; diff --git a/arch/x86/platform/efi/early_printk.c b/arch/x86/platform/efi/early_printk.c index 5fdacb322ceb490515a9ac56af08bc604ab3d842..c3e6be110b7d06a5bfbd332a68144fd2d04bb32d 100644 --- a/arch/x86/platform/efi/early_printk.c +++ b/arch/x86/platform/efi/early_printk.c @@ -179,7 +179,7 @@ early_efi_write(struct console *con, const char *str, unsigned int num) num--; } - if (efi_x >= si->lfb_width) { + if (efi_x + font->width > si->lfb_width) { efi_x = 0; efi_y += font->height; } diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 9061babfbc83d73b59a1cddb7b954353e3f53c2d..d8f295926485990f8cdce5f55966ffdba77e6677 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -86,6 +86,8 @@ static efi_status_t __init phys_efi_set_virtual_address_map( pgd_t *save_pgd; save_pgd = efi_call_phys_prolog(); + if (!save_pgd) + return EFI_ABORTED; /* Disable interrupts around EFI calls: */ local_irq_save(flags); @@ -99,26 +101,6 @@ static efi_status_t __init phys_efi_set_virtual_address_map( return status; } -void __init efi_find_mirror(void) -{ - efi_memory_desc_t *md; - u64 mirror_size = 0, total_size = 0; - - for_each_efi_memory_desc(md) { - unsigned long long start = md->phys_addr; - unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; - - total_size += size; - if (md->attribute & EFI_MEMORY_MORE_RELIABLE) { - memblock_mark_mirror(start, size); - mirror_size += size; - } - } - if (mirror_size) - pr_info("Memory: %lldM/%lldM mirrored memory\n", - mirror_size>>20, total_size>>20); -} - /* * Tell the kernel about the EFI memory map. This might include * more than the max 128 entries that can fit in the e820 legacy @@ -276,22 +258,6 @@ static void __init efi_clean_memmap(void) } } -void __init efi_print_memmap(void) -{ - efi_memory_desc_t *md; - int i = 0; - - for_each_efi_memory_desc(md) { - char buf[64]; - - pr_info("mem%02u: %s range=[0x%016llx-0x%016llx] (%lluMB)\n", - i++, efi_md_typeattr_format(buf, sizeof(buf), md), - md->phys_addr, - md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1, - (md->num_pages >> (20 - EFI_PAGE_SHIFT))); - } -} - static int __init efi_systab_init(void *phys) { if (efi_enabled(EFI_64BIT)) { @@ -480,7 +446,6 @@ void __init efi_init(void) efi_char16_t *c16; char vendor[100] = "unknown"; int i = 0; - void *tmp; #ifdef CONFIG_X86_32 if (boot_params.efi_info.efi_systab_hi || @@ -505,14 +470,16 @@ void __init efi_init(void) /* * Show what we know for posterity */ - c16 = tmp = early_memremap(efi.systab->fw_vendor, 2); + c16 = early_memremap_ro(efi.systab->fw_vendor, + sizeof(vendor) * sizeof(efi_char16_t)); if (c16) { - for (i = 0; i < sizeof(vendor) - 1 && *c16; ++i) - vendor[i] = *c16++; + for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i) + vendor[i] = c16[i]; vendor[i] = '\0'; - } else + early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t)); + } else { pr_err("Could not map the firmware vendor!\n"); - early_memunmap(tmp, 2); + } pr_info("EFI v%u.%.02u by %s\n", efi.systab->hdr.revision >> 16, @@ -893,9 +860,6 @@ static void __init kexec_enter_virtual_mode(void) if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX)) runtime_code_page_mkexec(); - - /* clean DUMMY object */ - efi_delete_dummy_variable(); #endif } diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index ee5d08f25ce45f21aa81550ce317760a2c745900..dfc809b31c7c6956f1fdab4c81dde58f6ef4af17 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -84,13 +84,15 @@ pgd_t * __init efi_call_phys_prolog(void) if (!efi_enabled(EFI_OLD_MEMMAP)) { efi_switch_mm(&efi_mm); - return NULL; + return efi_mm.pgd; } early_code_mapping_set_exec(1); n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE); save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL); + if (!save_pgd) + return NULL; /* * Build 1:1 identity mapping for efi=old_map usage. Note that @@ -138,10 +140,11 @@ pgd_t * __init efi_call_phys_prolog(void) pgd_offset_k(pgd * PGDIR_SIZE)->pgd &= ~_PAGE_NX; } -out: __flush_tlb_all(); - return save_pgd; +out: + efi_call_phys_epilog(save_pgd); + return NULL; } void __init efi_call_phys_epilog(pgd_t *save_pgd) diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c index 844d31cb8a0c7eae1dcb37ed48fa373564e83f22..006eb09e95879e0b1f8c8276ae75226dd097c139 100644 --- a/arch/x86/platform/efi/quirks.c +++ b/arch/x86/platform/efi/quirks.c @@ -259,10 +259,6 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size) return; } - /* No need to reserve regions that will never be freed. */ - if (md.attribute & EFI_MEMORY_RUNTIME) - return; - size += addr % EFI_PAGE_SIZE; size = round_up(size, EFI_PAGE_SIZE); addr = round_down(addr, EFI_PAGE_SIZE); @@ -282,7 +278,8 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size) return; } - new = early_memremap(new_phys, new_size); + new = early_memremap_prot(new_phys, new_size, + pgprot_val(pgprot_encrypted(FIXMAP_PAGE_NORMAL))); if (!new) { pr_err("Failed to map new boot services memmap\n"); return; @@ -292,6 +289,8 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size) early_memunmap(new, new_size); efi_memmap_install(new_phys, num_entries); + e820__range_update(addr, size, E820_TYPE_RAM, E820_TYPE_RESERVED); + e820__update_table(e820_table); } /* diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bt.c b/arch/x86/platform/intel-mid/device_libs/platform_bt.c index 5a0483e7bf662cb27044b7684567b644dfe49b81..31dce781364cf2dc3ad2771eec57ca0fae97e354 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_bt.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_bt.c @@ -68,7 +68,7 @@ static struct bt_sfi_data tng_bt_sfi_data __initdata = { { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (kernel_ulong_t)&ddata } static const struct x86_cpu_id bt_sfi_cpu_ids[] = { - ICPU(INTEL_FAM6_ATOM_MERRIFIELD, tng_bt_sfi_data), + ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID, tng_bt_sfi_data), {} }; diff --git a/arch/x86/platform/olpc/olpc-xo1-rtc.c b/arch/x86/platform/olpc/olpc-xo1-rtc.c index a2b4efddd61a58de2ec0fcf4fdc47f0a4dd45ebb..8e7ddd7e313a40c4d06624df085341fc3fcf0cee 100644 --- a/arch/x86/platform/olpc/olpc-xo1-rtc.c +++ b/arch/x86/platform/olpc/olpc-xo1-rtc.c @@ -16,6 +16,7 @@ #include #include +#include static void rtc_wake_on(struct device *dev) { @@ -75,6 +76,8 @@ static int __init xo1_rtc_init(void) if (r) return r; + x86_platform.legacy.rtc = 0; + device_init_wakeup(&xo1_rtc_device.dev, 1); return 0; } diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c index 4a6a5a26c58295e4245b09be21a658c3ddae86a2..eb33432f2f241db7475e32e651ae2bc9ca526361 100644 --- a/arch/x86/platform/uv/bios_uv.c +++ b/arch/x86/platform/uv/bios_uv.c @@ -29,7 +29,8 @@ struct uv_systab *uv_systab; -s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) +static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, + u64 a4, u64 a5) { struct uv_systab *tab = uv_systab; s64 ret; @@ -51,6 +52,19 @@ s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) return ret; } + +s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) +{ + s64 ret; + + if (down_interruptible(&__efi_uv_runtime_lock)) + return BIOS_STATUS_ABORT; + + ret = __uv_bios_call(which, a1, a2, a3, a4, a5); + up(&__efi_uv_runtime_lock); + + return ret; +} EXPORT_SYMBOL_GPL(uv_bios_call); s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, @@ -59,10 +73,15 @@ s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, unsigned long bios_flags; s64 ret; + if (down_interruptible(&__efi_uv_runtime_lock)) + return BIOS_STATUS_ABORT; + local_irq_save(bios_flags); - ret = uv_bios_call(which, a1, a2, a3, a4, a5); + ret = __uv_bios_call(which, a1, a2, a3, a4, a5); local_irq_restore(bios_flags); + up(&__efi_uv_runtime_lock); + return ret; } diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index a4130b84d1ff5650f020802f7ebe7e990db97e71..1297e185b8c8d7a5358aac9aeac4aecc1419433a 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c @@ -2010,8 +2010,7 @@ static void make_per_cpu_thp(struct bau_control *smaster) int cpu; size_t hpsz = sizeof(struct hub_and_pnode) * num_possible_cpus(); - smaster->thp = kmalloc_node(hpsz, GFP_KERNEL, smaster->osnode); - memset(smaster->thp, 0, hpsz); + smaster->thp = kzalloc_node(hpsz, GFP_KERNEL, smaster->osnode); for_each_present_cpu(cpu) { smaster->thp[cpu].pnode = uv_cpu_hub_info(cpu)->pnode; smaster->thp[cpu].uvhub = uv_cpu_hub_info(cpu)->numa_blade_id; @@ -2134,17 +2133,19 @@ static int __init summarize_uvhub_sockets(int nuvhubs, */ static int __init init_per_cpu(int nuvhubs, int base_part_pnode) { - unsigned char *uvhub_mask; - void *vp; struct uvhub_desc *uvhub_descs; + unsigned char *uvhub_mask = NULL; if (is_uv3_hub() || is_uv2_hub() || is_uv1_hub()) timeout_us = calculate_destination_timeout(); - vp = kmalloc_array(nuvhubs, sizeof(struct uvhub_desc), GFP_KERNEL); - uvhub_descs = (struct uvhub_desc *)vp; - memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc)); + uvhub_descs = kcalloc(nuvhubs, sizeof(struct uvhub_desc), GFP_KERNEL); + if (!uvhub_descs) + goto fail; + uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL); + if (!uvhub_mask) + goto fail; if (get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask)) goto fail; diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index a7d966964c6f20577c927cf5e618bc86b3331977..7948249482637f05cf0c1dd43779e2d0aa5103ae 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -24,7 +25,8 @@ #include #include #include -#include +#include +#include #ifdef CONFIG_X86_32 __visible unsigned long saved_context_ebx; @@ -40,7 +42,8 @@ static void msr_save_context(struct saved_context *ctxt) struct saved_msr *end = msr + ctxt->saved_msrs.num; while (msr < end) { - msr->valid = !rdmsrl_safe(msr->info.msr_no, &msr->info.reg.q); + if (msr->valid) + rdmsrl(msr->info.msr_no, msr->info.reg.q); msr++; } } @@ -266,6 +269,13 @@ static void notrace __restore_processor_state(struct saved_context *ctxt) x86_platform.restore_sched_clock_state(); mtrr_bp_restore(); perf_restore_debug_store(); + + microcode_bsp_resume(); + + /* + * This needs to happen after the microcode has been updated upon resume + * because some of the MSRs are "emulated" in microcode. + */ msr_restore_context(ctxt); } @@ -299,7 +309,17 @@ int hibernate_resume_nonboot_cpu_disable(void) * address in its instruction pointer may not be possible to resolve * any more at that point (the page tables used by it previously may * have been overwritten by hibernate image data). + * + * First, make sure that we wake up all the potentially disabled SMT + * threads which have been initially brought up and then put into + * mwait/cpuidle sleep. + * Those will be put to proper (not interfering with hibernation + * resume) sleep afterwards, and the resumed kernel will decide itself + * what to do with them. */ + ret = cpuhp_smt_enable(); + if (ret) + return ret; smp_ops.play_dead = resume_play_dead; ret = disable_nonboot_cpus(); smp_ops.play_dead = play_dead; @@ -388,15 +408,14 @@ static int __init bsp_pm_check_init(void) core_initcall(bsp_pm_check_init); -static int msr_init_context(const u32 *msr_id, const int total_num) +static int msr_build_context(const u32 *msr_id, const int num) { - int i = 0; + struct saved_msrs *saved_msrs = &saved_context.saved_msrs; struct saved_msr *msr_array; + int total_num; + int i, j; - if (saved_context.saved_msrs.array || saved_context.saved_msrs.num > 0) { - pr_err("x86/pm: MSR quirk already applied, please check your DMI match table.\n"); - return -EINVAL; - } + total_num = saved_msrs->num + num; msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL); if (!msr_array) { @@ -404,19 +423,32 @@ static int msr_init_context(const u32 *msr_id, const int total_num) return -ENOMEM; } - for (i = 0; i < total_num; i++) { - msr_array[i].info.msr_no = msr_id[i]; - msr_array[i].valid = false; + if (saved_msrs->array) { + /* + * Multiple callbacks can invoke this function, so copy any + * MSR save requests from previous invocations. + */ + memcpy(msr_array, saved_msrs->array, + sizeof(struct saved_msr) * saved_msrs->num); + + kfree(saved_msrs->array); + } + + for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) { + u64 dummy; + + msr_array[i].info.msr_no = msr_id[j]; + msr_array[i].valid = !rdmsrl_safe(msr_id[j], &dummy); msr_array[i].info.reg.q = 0; } - saved_context.saved_msrs.num = total_num; - saved_context.saved_msrs.array = msr_array; + saved_msrs->num = total_num; + saved_msrs->array = msr_array; return 0; } /* - * The following section is a quirk framework for problematic BIOSen: + * The following sections are a quirk framework for problematic BIOSen: * Sometimes MSRs are modified by the BIOSen after suspended to * RAM, this might cause unexpected behavior after wakeup. * Thus we save/restore these specified MSRs across suspend/resume @@ -431,7 +463,7 @@ static int msr_initialize_bdw(const struct dmi_system_id *d) u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL }; pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident); - return msr_init_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id)); + return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id)); } static const struct dmi_system_id msr_save_dmi_table[] = { @@ -446,9 +478,72 @@ static const struct dmi_system_id msr_save_dmi_table[] = { {} }; +static int msr_save_cpuid_features(const struct x86_cpu_id *c) +{ + u32 cpuid_msr_id[] = { + MSR_AMD64_CPUID_FN_1, + }; + + pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n", + c->family); + + return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id)); +} + +static const struct x86_cpu_id msr_save_cpu_table[] = { + { + .vendor = X86_VENDOR_AMD, + .family = 0x15, + .model = X86_MODEL_ANY, + .feature = X86_FEATURE_ANY, + .driver_data = (kernel_ulong_t)msr_save_cpuid_features, + }, + { + .vendor = X86_VENDOR_AMD, + .family = 0x16, + .model = X86_MODEL_ANY, + .feature = X86_FEATURE_ANY, + .driver_data = (kernel_ulong_t)msr_save_cpuid_features, + }, + {} +}; + +typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *); +static int pm_cpu_check(const struct x86_cpu_id *c) +{ + const struct x86_cpu_id *m; + int ret = 0; + + m = x86_match_cpu(msr_save_cpu_table); + if (m) { + pm_cpu_match_t fn; + + fn = (pm_cpu_match_t)m->driver_data; + ret = fn(m); + } + + return ret; +} + +static void pm_save_spec_msr(void) +{ + u32 spec_msr_id[] = { + MSR_IA32_SPEC_CTRL, + MSR_IA32_TSX_CTRL, + MSR_TSX_FORCE_ABORT, + MSR_IA32_MCU_OPT_CTRL, + MSR_AMD64_LS_CFG, + }; + + msr_build_context(spec_msr_id, ARRAY_SIZE(spec_msr_id)); +} + static int pm_check_save_msr(void) { dmi_check_system(msr_save_dmi_table); + pm_cpu_check(msr_save_cpu_table); + pm_save_spec_msr(); + return 0; } diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c index f8e3b668d20b9b2cde3ff31ed6131efbf7273b4a..6c3ec193a24657470f00769fe02cb53103ebe57f 100644 --- a/arch/x86/power/hibernate_64.c +++ b/arch/x86/power/hibernate_64.c @@ -13,6 +13,7 @@ #include #include #include +#include #include @@ -265,9 +266,9 @@ static int get_e820_md5(struct e820_table *table, void *buf) return ret; } -static void hibernation_e820_save(void *buf) +static int hibernation_e820_save(void *buf) { - get_e820_md5(e820_table_firmware, buf); + return get_e820_md5(e820_table_firmware, buf); } static bool hibernation_e820_mismatch(void *buf) @@ -287,8 +288,9 @@ static bool hibernation_e820_mismatch(void *buf) return memcmp(result, buf, MD5_DIGEST_SIZE) ? true : false; } #else -static void hibernation_e820_save(void *buf) +static int hibernation_e820_save(void *buf) { + return 0; } static bool hibernation_e820_mismatch(void *buf) @@ -333,9 +335,7 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size) rdr->magic = RESTORE_MAGIC; - hibernation_e820_save(rdr->e820_digest); - - return 0; + return hibernation_e820_save(rdr->e820_digest); } /** @@ -363,3 +363,35 @@ int arch_hibernation_header_restore(void *addr) return 0; } + +int arch_resume_nosmt(void) +{ + int ret = 0; + /* + * We reached this while coming out of hibernation. This means + * that SMT siblings are sleeping in hlt, as mwait is not safe + * against control transition during resume (see comment in + * hibernate_resume_nonboot_cpu_disable()). + * + * If the resumed kernel has SMT disabled, we have to take all the + * SMT siblings out of hlt, and offline them again so that they + * end up in mwait proper. + * + * Called with hotplug disabled. + */ + cpu_hotplug_enable(); + if (cpu_smt_control == CPU_SMT_DISABLED || + cpu_smt_control == CPU_SMT_FORCE_DISABLED) { + enum cpuhp_smt_control old = cpu_smt_control; + + ret = cpuhp_smt_enable(); + if (ret) + goto out; + ret = cpuhp_smt_disable(old); + if (ret) + goto out; + } +out: + cpu_hotplug_disable(); + return ret; +} diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile index 3cf302b2633222ff45323b2528f3499233b650c5..b81b5172cf994761f0aea779e686ce742868c685 100644 --- a/arch/x86/purgatory/Makefile +++ b/arch/x86/purgatory/Makefile @@ -6,6 +6,9 @@ purgatory-y := purgatory.o stack.o setup-x86_$(BITS).o sha256.o entry64.o string targets += $(purgatory-y) PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y)) +$(obj)/string.o: $(srctree)/arch/x86/boot/compressed/string.c FORCE + $(call if_changed_rule,cc_o_c) + $(obj)/sha256.o: $(srctree)/lib/sha256.c FORCE $(call if_changed_rule,cc_o_c) @@ -15,13 +18,40 @@ targets += purgatory.ro KASAN_SANITIZE := n KCOV_INSTRUMENT := n +# These are adjustments to the compiler flags used for objects that +# make up the standalone purgatory.ro + +PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel +PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss +PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) + # Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That # in turn leaves some undefined symbols like __fentry__ in purgatory and not -# sure how to relocate those. Like kexec-tools, use custom flags. +# sure how to relocate those. +ifdef CONFIG_FUNCTION_TRACER +PURGATORY_CFLAGS_REMOVE += $(CC_FLAGS_FTRACE) +endif + +ifdef CONFIG_STACKPROTECTOR +PURGATORY_CFLAGS_REMOVE += -fstack-protector +endif + +ifdef CONFIG_STACKPROTECTOR_STRONG +PURGATORY_CFLAGS_REMOVE += -fstack-protector-strong +endif + +ifdef CONFIG_RETPOLINE +PURGATORY_CFLAGS_REMOVE += $(RETPOLINE_CFLAGS) +endif + +CFLAGS_REMOVE_purgatory.o += $(PURGATORY_CFLAGS_REMOVE) +CFLAGS_purgatory.o += $(PURGATORY_CFLAGS) + +CFLAGS_REMOVE_sha256.o += $(PURGATORY_CFLAGS_REMOVE) +CFLAGS_sha256.o += $(PURGATORY_CFLAGS) -KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -Os -mcmodel=large -KBUILD_CFLAGS += -m$(BITS) -KBUILD_CFLAGS += $(call cc-option,-fno-PIE) +CFLAGS_REMOVE_string.o += $(PURGATORY_CFLAGS_REMOVE) +CFLAGS_string.o += $(PURGATORY_CFLAGS) $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE $(call if_changed,ld) diff --git a/arch/x86/purgatory/purgatory.c b/arch/x86/purgatory/purgatory.c index 025c34ac0d848f642a4b4b1c2c9577c52aa72614..7971f7a8af59f1aa84bec3e9ea2129c8b0c629ee 100644 --- a/arch/x86/purgatory/purgatory.c +++ b/arch/x86/purgatory/purgatory.c @@ -70,3 +70,9 @@ void purgatory(void) } copy_backup_region(); } + +/* + * Defined in order to reuse memcpy() and memset() from + * arch/x86/boot/compressed/string.c + */ +void warn(const char *msg) {} diff --git a/arch/x86/purgatory/string.c b/arch/x86/purgatory/string.c deleted file mode 100644 index 795ca4f2cb3c912e37f214802cde76fc4fea7985..0000000000000000000000000000000000000000 --- a/arch/x86/purgatory/string.c +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Simple string functions. - * - * Copyright (C) 2014 Red Hat Inc. - * - * Author: - * Vivek Goyal - * - * This source code is licensed under the GNU General Public License, - * Version 2. See the file COPYING for more details. - */ - -#include - -#include "../boot/string.c" - -void *memcpy(void *dst, const void *src, size_t len) -{ - return __builtin_memcpy(dst, src, len); -} - -void *memset(void *dst, int c, size_t len) -{ - return __builtin_memset(dst, c, len); -} diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c index d10105825d57a7faee5f3221d04f0e77b9807e45..47d097946872772264ddf1fe55c5d5de014e3b43 100644 --- a/arch/x86/realmode/init.c +++ b/arch/x86/realmode/init.c @@ -20,8 +20,6 @@ void __init set_real_mode_mem(phys_addr_t mem, size_t size) void *base = __va(mem); real_mode_header = (struct real_mode_header *) base; - printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n", - base, (unsigned long long)mem, size); } void __init reserve_real_mode(void) diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile index 4463fa72db945b4c43396d347c475230fcb6431d..96cb20de08af8a61836af68bb933f517915f3d1d 100644 --- a/arch/x86/realmode/rm/Makefile +++ b/arch/x86/realmode/rm/Makefile @@ -47,7 +47,7 @@ $(obj)/pasyms.h: $(REALMODE_OBJS) FORCE targets += realmode.lds $(obj)/realmode.lds: $(obj)/pasyms.h -LDFLAGS_realmode.elf := --emit-relocs -T +LDFLAGS_realmode.elf := -m elf_i386 --emit-relocs -T CPPFLAGS_realmode.lds += -P -C -I$(objtree)/$(obj) targets += realmode.elf diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk index b02a36b2c14fb20522dc16d3f4093cbe36e2e740..a42015b305f406ee2b48a40df81daeee628f1832 100644 --- a/arch/x86/tools/gen-insn-attr-x86.awk +++ b/arch/x86/tools/gen-insn-attr-x86.awk @@ -69,7 +69,7 @@ BEGIN { lprefix1_expr = "\\((66|!F3)\\)" lprefix2_expr = "\\(F3\\)" - lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)" + lprefix3_expr = "\\((F2|!F3|66&F2)\\)" lprefix_expr = "\\((66|F2|F3)\\)" max_lprefix = 4 @@ -257,7 +257,7 @@ function convert_operands(count,opnd, i,j,imm,mod) return add_flags(imm, mod) } -/^[0-9a-f]+\:/ { +/^[0-9a-f]+:/ { if (NR == 1) next # get index diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c index 3a6c8ebc8032eb5c95e70af2e5e4f44000e29350..3fdf415a1da1f34be91611756c1037d12764d164 100644 --- a/arch/x86/tools/relocs.c +++ b/arch/x86/tools/relocs.c @@ -579,6 +579,14 @@ static void print_absolute_relocs(void) if (!(sec_applies->shdr.sh_flags & SHF_ALLOC)) { continue; } + /* + * Do not perform relocations in .notes section; any + * values there are meant for pre-boot consumption (e.g. + * startup_xen). + */ + if (sec_applies->shdr.sh_type == SHT_NOTE) { + continue; + } sh_symtab = sec_symtab->symtab; sym_strtab = sec_symtab->link->strtab; for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) { @@ -664,6 +672,15 @@ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel, if (!(sec_applies->shdr.sh_flags & SHF_ALLOC)) { continue; } + + /* + * Do not perform relocations in .notes sections; any + * values there are meant for pre-boot consumption (e.g. + * startup_xen). + */ + if (sec_applies->shdr.sh_type == SHT_NOTE) + continue; + sh_symtab = sec_symtab->symtab; sym_strtab = sec_symtab->link->strtab; for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) { diff --git a/arch/x86/um/Kconfig b/arch/x86/um/Kconfig index f518b4744ff893282c45ec93746a40ffa7e9eda8..a9e80e44178c7818cdf03ae07fb66128c028f003 100644 --- a/arch/x86/um/Kconfig +++ b/arch/x86/um/Kconfig @@ -16,7 +16,7 @@ config 64BIT config X86_32 def_bool !64BIT - select HAVE_AOUT + select ARCH_32BIT_OFF_T select ARCH_WANT_IPC_PARSE_VERSION select MODULES_USE_ELF_REL select CLONE_BACKWARDS diff --git a/arch/x86/um/asm/checksum_32.h b/arch/x86/um/asm/checksum_32.h index 83a75f8a12330237a73b20a9d5c96c9a77d4b450..b9ac7c9eb72c553fdd13ac4e3a8a5282f3d2f2b8 100644 --- a/arch/x86/um/asm/checksum_32.h +++ b/arch/x86/um/asm/checksum_32.h @@ -43,7 +43,7 @@ static __inline__ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, __wsum sum, int *err_ptr) { - if (access_ok(VERIFY_WRITE, dst, len)) { + if (access_ok(dst, len)) { if (copy_to_user(dst, src, len)) { *err_ptr = -EFAULT; return (__force __wsum)-1; diff --git a/arch/x86/um/shared/sysdep/ptrace_32.h b/arch/x86/um/shared/sysdep/ptrace_32.h index b94a108de1dc894e77c9e49310e4d0a12d177ce8..ae00d22bce02c61fbd5db9e9705fc0e8be386aab 100644 --- a/arch/x86/um/shared/sysdep/ptrace_32.h +++ b/arch/x86/um/shared/sysdep/ptrace_32.h @@ -10,20 +10,10 @@ static inline void update_debugregs(int seq) {} -/* syscall emulation path in ptrace */ - -#ifndef PTRACE_SYSEMU -#define PTRACE_SYSEMU 31 -#endif - void set_using_sysemu(int value); int get_using_sysemu(void); extern int sysemu_supported; -#ifndef PTRACE_SYSEMU_SINGLESTEP -#define PTRACE_SYSEMU_SINGLESTEP 32 -#endif - #define UPT_SYSCALL_ARG1(r) UPT_BX(r) #define UPT_SYSCALL_ARG2(r) UPT_CX(r) #define UPT_SYSCALL_ARG3(r) UPT_DX(r) diff --git a/arch/x86/um/signal.c b/arch/x86/um/signal.c index 727ed442e0a52f0b60b57567abf5bbc59c8e6e5f..8b4a71efe7eef1c71e778bf17ff1929c6d68b0a8 100644 --- a/arch/x86/um/signal.c +++ b/arch/x86/um/signal.c @@ -367,7 +367,7 @@ int setup_signal_stack_sc(unsigned long stack_top, struct ksignal *ksig, /* This is the same calculation as i386 - ((sp + 4) & 15) == 0 */ stack_top = ((stack_top + 4) & -16UL) - 4; frame = (struct sigframe __user *) stack_top - 1; - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) return 1; restorer = frame->retcode; @@ -412,7 +412,7 @@ int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig, stack_top &= -8UL; frame = (struct rt_sigframe __user *) stack_top - 1; - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) return 1; restorer = frame->retcode; @@ -497,7 +497,7 @@ int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig, /* Subtract 128 for a red zone and 8 for proper alignment */ frame = (struct rt_sigframe __user *) ((unsigned long) frame - 128 - 8); - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) goto out; if (ksig->ka.sa.sa_flags & SA_SIGINFO) { diff --git a/arch/x86/um/vdso/Makefile b/arch/x86/um/vdso/Makefile index 822ccdba93adac4cef571cfd0308c871c3bc28f7..4437b208ec6d2ee7e59990b667853189c256a593 100644 --- a/arch/x86/um/vdso/Makefile +++ b/arch/x86/um/vdso/Makefile @@ -50,8 +50,13 @@ $(vobjs): KBUILD_CFLAGS += $(CFL) # # vDSO code runs in userspace and -pg doesn't help with profiling anyway. # +ifeq ($(CONFIG_PGO_KERNEL),y) +CFLAGS_REMOVE_vdso-note.o = -pg -fprofile-generate -ftest-coverage +CFLAGS_REMOVE_um_vdso.o = -pg -fprofile-generate -ftest-coverage +else CFLAGS_REMOVE_vdso-note.o = -pg -fprofile-arcs -ftest-coverage CFLAGS_REMOVE_um_vdso.o = -pg -fprofile-arcs -ftest-coverage +endif # # The DSO images are built using a special linker script. diff --git a/arch/x86/xen/efi.c b/arch/x86/xen/efi.c index 1804b27f9632a12b4ed9877c8e97bce35177a2be..66bcdeeee639a0a81b3a2ab4f24a59d9327e2593 100644 --- a/arch/x86/xen/efi.c +++ b/arch/x86/xen/efi.c @@ -77,7 +77,9 @@ static efi_system_table_t __init *xen_efi_probe(void) efi.get_variable = xen_efi_get_variable; efi.get_next_variable = xen_efi_get_next_variable; efi.set_variable = xen_efi_set_variable; + efi.set_variable_nonblocking = xen_efi_set_variable; efi.query_variable_info = xen_efi_query_variable_info; + efi.query_variable_info_nonblocking = xen_efi_query_variable_info; efi.update_capsule = xen_efi_update_capsule; efi.query_capsule_caps = xen_efi_query_capsule_caps; efi.get_next_high_mono_count = xen_efi_get_next_high_mono_count; diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 2eeddd81465330f43e269d4e1d2449f82772bfd1..2483ff345bbcde95aa7d6aa0f09e712e242b2a52 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -7,7 +7,6 @@ #include #include -#include #include #include @@ -267,19 +266,41 @@ void xen_reboot(int reason) BUG(); } +static int reboot_reason = SHUTDOWN_reboot; +static bool xen_legacy_crash; void xen_emergency_restart(void) { - xen_reboot(SHUTDOWN_reboot); + xen_reboot(reboot_reason); } static int xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr) { - if (!kexec_crash_loaded()) - xen_reboot(SHUTDOWN_crash); + if (!kexec_crash_loaded()) { + if (xen_legacy_crash) + xen_reboot(SHUTDOWN_crash); + + reboot_reason = SHUTDOWN_crash; + + /* + * If panic_timeout==0 then we are supposed to wait forever. + * However, to preserve original dom0 behavior we have to drop + * into hypervisor. (domU behavior is controlled by its + * config file) + */ + if (panic_timeout == 0) + panic_timeout = -1; + } return NOTIFY_DONE; } +static int __init parse_xen_legacy_crash(char *arg) +{ + xen_legacy_crash = true; + return 0; +} +early_param("xen_legacy_crash", parse_xen_legacy_crash); + static struct notifier_block xen_panic_block = { .notifier_call = xen_panic_event, .priority = INT_MIN @@ -343,80 +364,3 @@ void xen_arch_unregister_cpu(int num) } EXPORT_SYMBOL(xen_arch_unregister_cpu); #endif - -#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG -void __init arch_xen_balloon_init(struct resource *hostmem_resource) -{ - struct xen_memory_map memmap; - int rc; - unsigned int i, last_guest_ram; - phys_addr_t max_addr = PFN_PHYS(max_pfn); - struct e820_table *xen_e820_table; - const struct e820_entry *entry; - struct resource *res; - - if (!xen_initial_domain()) - return; - - xen_e820_table = kmalloc(sizeof(*xen_e820_table), GFP_KERNEL); - if (!xen_e820_table) - return; - - memmap.nr_entries = ARRAY_SIZE(xen_e820_table->entries); - set_xen_guest_handle(memmap.buffer, xen_e820_table->entries); - rc = HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap); - if (rc) { - pr_warn("%s: Can't read host e820 (%d)\n", __func__, rc); - goto out; - } - - last_guest_ram = 0; - for (i = 0; i < memmap.nr_entries; i++) { - if (xen_e820_table->entries[i].addr >= max_addr) - break; - if (xen_e820_table->entries[i].type == E820_TYPE_RAM) - last_guest_ram = i; - } - - entry = &xen_e820_table->entries[last_guest_ram]; - if (max_addr >= entry->addr + entry->size) - goto out; /* No unallocated host RAM. */ - - hostmem_resource->start = max_addr; - hostmem_resource->end = entry->addr + entry->size; - - /* - * Mark non-RAM regions between the end of dom0 RAM and end of host RAM - * as unavailable. The rest of that region can be used for hotplug-based - * ballooning. - */ - for (; i < memmap.nr_entries; i++) { - entry = &xen_e820_table->entries[i]; - - if (entry->type == E820_TYPE_RAM) - continue; - - if (entry->addr >= hostmem_resource->end) - break; - - res = kzalloc(sizeof(*res), GFP_KERNEL); - if (!res) - goto out; - - res->name = "Unavailable host RAM"; - res->start = entry->addr; - res->end = (entry->addr + entry->size < hostmem_resource->end) ? - entry->addr + entry->size : hostmem_resource->end; - rc = insert_resource(hostmem_resource, res); - if (rc) { - pr_warn("%s: Can't insert [%llx - %llx) (%d)\n", - __func__, res->start, res->end, rc); - kfree(res); - goto out; - } - } - - out: - kfree(xen_e820_table); -} -#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */ diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 52a7c3faee0ccf88f840110093111e22bc7b30ad..3efafe2c7763f202b535163604bc719d348e0990 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -597,12 +597,12 @@ struct trap_array_entry { static struct trap_array_entry trap_array[] = { { debug, xen_xendebug, true }, - { int3, xen_xenint3, true }, { double_fault, xen_double_fault, true }, #ifdef CONFIG_X86_MCE { machine_check, xen_machine_check, true }, #endif { nmi, xen_xennmi, true }, + { int3, xen_int3, false }, { overflow, xen_overflow, false }, #ifdef CONFIG_IA32_EMULATION { entry_INT80_compat, xen_entry_INT80_compat, false }, @@ -899,10 +899,7 @@ static u64 xen_read_msr_safe(unsigned int msr, int *err) val = native_read_msr_safe(msr, err); switch (msr) { case MSR_IA32_APICBASE: -#ifdef CONFIG_X86_X2APIC - if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31)))) -#endif - val &= ~X2APIC_ENABLE; + val &= ~X2APIC_ENABLE; break; } return val; @@ -1222,6 +1219,7 @@ asmlinkage __visible void __init xen_start_kernel(void) x86_platform.get_nmi_reason = xen_get_nmi_reason; x86_init.resources.memory_setup = xen_memory_setup; + x86_init.irqs.intr_mode_select = x86_init_noop; x86_init.irqs.intr_mode_init = x86_init_noop; x86_init.oem.arch_setup = xen_arch_setup; x86_init.oem.banner = xen_banner; diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c index c85d1a88f47693232369411588cfc19084086b25..dab07827d25e88ec79d2348003b84aa7b2f5b6c9 100644 --- a/arch/x86/xen/enlighten_pvh.c +++ b/arch/x86/xen/enlighten_pvh.c @@ -75,7 +75,7 @@ static void __init init_pvh_bootparams(void) * Version 2.12 supports Xen entry point but we will use default x86/PC * environment (i.e. hardware_subarch 0). */ - pvh_bootparams.hdr.version = 0x212; + pvh_bootparams.hdr.version = (2 << 8) | 12; pvh_bootparams.hdr.type_of_loader = (9 << 4) | 0; /* Xen loader */ x86_init.acpi.get_root_pointer = pvh_get_root_pointer; @@ -97,6 +97,7 @@ void __init xen_prepare_pvh(void) } xen_pvh = 1; + xen_domain_type = XEN_HVM_DOMAIN; xen_start_flags = pvh_start_info.flags; msr = cpuid_ebx(xen_cpuid_base() + 2); diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index dd461c0167ef026e466fd99f3815c4c71c65c16f..73aa0b89a74a40350c8d70dd06f93321206915fe 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -640,19 +640,20 @@ static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd, unsigned long limit) { int i, nr, flush = 0; - unsigned hole_low, hole_high; + unsigned hole_low = 0, hole_high = 0; /* The limit is the last byte to be touched */ limit--; BUG_ON(limit >= FIXADDR_TOP); +#ifdef CONFIG_X86_64 /* * 64-bit has a great big hole in the middle of the address - * space, which contains the Xen mappings. On 32-bit these - * will end up making a zero-sized hole and so is a no-op. + * space, which contains the Xen mappings. */ - hole_low = pgd_index(USER_LIMIT); - hole_high = pgd_index(PAGE_OFFSET); + hole_low = pgd_index(GUARD_HOLE_BASE_ADDR); + hole_high = pgd_index(GUARD_HOLE_END_ADDR); +#endif nr = pgd_index(limit) + 1; for (i = 0; i < nr; i++) { @@ -1897,7 +1898,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) init_top_pgt[0] = __pgd(0); /* Pre-constructed entries are in pfn, so convert to mfn */ - /* L4[272] -> level3_ident_pgt */ + /* L4[273] -> level3_ident_pgt */ /* L4[511] -> level3_kernel_pgt */ convert_pfn_mfn(init_top_pgt); @@ -1917,8 +1918,8 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) addr[0] = (unsigned long)pgd; addr[1] = (unsigned long)l3; addr[2] = (unsigned long)l2; - /* Graft it onto L4[272][0]. Note that we creating an aliasing problem: - * Both L4[272][0] and L4[511][510] have entries that point to the same + /* Graft it onto L4[273][0]. Note that we creating an aliasing problem: + * Both L4[273][0] and L4[511][510] have entries that point to the same * L2 (PMD) tables. Meaning that if you modify it in __va space * it will be also modified in the __ka space! (But if you just * modify the PMD table to point to other PTE's or none, then you @@ -2105,10 +2106,10 @@ void __init xen_relocate_p2m(void) pt = early_memremap(pt_phys, PAGE_SIZE); clear_page(pt); for (idx_pte = 0; - idx_pte < min(n_pte, PTRS_PER_PTE); - idx_pte++) { - set_pte(pt + idx_pte, - pfn_pte(p2m_pfn, PAGE_KERNEL)); + idx_pte < min(n_pte, PTRS_PER_PTE); + idx_pte++) { + pt[idx_pte] = pfn_pte(p2m_pfn, + PAGE_KERNEL); p2m_pfn++; } n_pte -= PTRS_PER_PTE; @@ -2116,8 +2117,7 @@ void __init xen_relocate_p2m(void) make_lowmem_page_readonly(__va(pt_phys)); pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, PFN_DOWN(pt_phys)); - set_pmd(pmd + idx_pt, - __pmd(_PAGE_TABLE | pt_phys)); + pmd[idx_pt] = __pmd(_PAGE_TABLE | pt_phys); pt_phys += PAGE_SIZE; } n_pt -= PTRS_PER_PMD; @@ -2125,7 +2125,7 @@ void __init xen_relocate_p2m(void) make_lowmem_page_readonly(__va(pmd_phys)); pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE, PFN_DOWN(pmd_phys)); - set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys)); + pud[idx_pmd] = __pud(_PAGE_TABLE | pmd_phys); pmd_phys += PAGE_SIZE; } n_pmd -= PTRS_PER_PUD; diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 159a897151d64bab4bdc2222154af8e53e776438..82577eec6d0a79ce6d30f55fb7a3bddd1a86a638 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -706,9 +706,12 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, for (i = 0; i < count; i++) { unsigned long mfn, pfn; + struct gnttab_unmap_grant_ref unmap[2]; + int rc; /* Do not add to override if the map failed. */ - if (map_ops[i].status) + if (map_ops[i].status != GNTST_okay || + (kmap_ops && kmap_ops[i].status != GNTST_okay)) continue; if (map_ops[i].flags & GNTMAP_contains_pte) { @@ -722,10 +725,46 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, WARN(pfn_to_mfn(pfn) != INVALID_P2M_ENTRY, "page must be ballooned"); - if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) { - ret = -ENOMEM; - goto out; + if (likely(set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) + continue; + + /* + * Signal an error for this slot. This in turn requires + * immediate unmapping. + */ + map_ops[i].status = GNTST_general_error; + unmap[0].host_addr = map_ops[i].host_addr, + unmap[0].handle = map_ops[i].handle; + map_ops[i].handle = ~0; + if (map_ops[i].flags & GNTMAP_device_map) + unmap[0].dev_bus_addr = map_ops[i].dev_bus_addr; + else + unmap[0].dev_bus_addr = 0; + + if (kmap_ops) { + kmap_ops[i].status = GNTST_general_error; + unmap[1].host_addr = kmap_ops[i].host_addr, + unmap[1].handle = kmap_ops[i].handle; + kmap_ops[i].handle = ~0; + if (kmap_ops[i].flags & GNTMAP_device_map) + unmap[1].dev_bus_addr = kmap_ops[i].dev_bus_addr; + else + unmap[1].dev_bus_addr = 0; } + + /* + * Pre-populate both status fields, to be recognizable in + * the log message below. + */ + unmap[0].status = 1; + unmap[1].status = 1; + + rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, + unmap, 1 + !!kmap_ops); + if (rc || unmap[0].status != GNTST_okay || + unmap[1].status != GNTST_okay) + pr_err_once("gnttab unmap failed: rc=%d st0=%d st1=%d\n", + rc, unmap[0].status, unmap[1].status); } out: @@ -746,17 +785,15 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i])); unsigned long pfn = page_to_pfn(pages[i]); - if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) { + if (mfn != INVALID_P2M_ENTRY && (mfn & FOREIGN_FRAME_BIT)) + set_phys_to_machine(pfn, INVALID_P2M_ENTRY); + else ret = -EINVAL; - goto out; - } - - set_phys_to_machine(pfn, INVALID_P2M_ENTRY); } if (kunmap_ops) ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, - kunmap_ops, count); -out: + kunmap_ops, count) ?: ret; + return ret; } EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping); diff --git a/arch/x86/xen/platform-pci-unplug.c b/arch/x86/xen/platform-pci-unplug.c index 33a783c77d969ecb5c76b841e13aa2ac19ad0da5..184b369223979335325ec70b1a67ac451b906b64 100644 --- a/arch/x86/xen/platform-pci-unplug.c +++ b/arch/x86/xen/platform-pci-unplug.c @@ -146,6 +146,10 @@ void xen_unplug_emulated_devices(void) { int r; + /* PVH guests don't have emulated devices. */ + if (xen_pvh_domain()) + return; + /* user explicitly requested no unplug */ if (xen_emul_unplug & XEN_UNPLUG_NEVER) return; diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c index 95997e6c06960073c75b713cb2be76ec74c0886c..5f72b6419d4ff6ba08065fe9ee2d4b6d03027caa 100644 --- a/arch/x86/xen/pmu.c +++ b/arch/x86/xen/pmu.c @@ -90,6 +90,12 @@ static void xen_pmu_arch_init(void) k7_counters_mirrored = 0; break; } + } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + amd_num_counters = F10H_NUM_COUNTERS; + amd_counters_base = MSR_K7_PERFCTR0; + amd_ctrls_base = MSR_K7_EVNTSEL0; + amd_msr_step = 1; + k7_counters_mirrored = 0; } else { uint32_t eax, ebx, ecx, edx; @@ -285,7 +291,7 @@ static bool xen_amd_pmu_emulate(unsigned int msr, u64 *val, bool is_read) bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err) { - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { if (is_amd_pmu_msr(msr)) { if (!xen_amd_pmu_emulate(msr, val, 1)) *val = native_read_msr_safe(msr, err); @@ -308,7 +314,7 @@ bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err) { uint64_t val = ((uint64_t)high << 32) | low; - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { if (is_amd_pmu_msr(msr)) { if (!xen_amd_pmu_emulate(msr, &val, 0)) *err = native_write_msr_safe(msr, low, high); @@ -379,7 +385,7 @@ static unsigned long long xen_intel_read_pmc(int counter) unsigned long long xen_read_pmc(int counter) { - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) return xen_amd_read_pmc(counter); else return xen_intel_read_pmc(counter); diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 1163e33121fb39af673743b0eda9de5007629c63..075ed47993bbf5c1bf9b760511268613c035d8c1 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -808,6 +808,7 @@ char * __init xen_memory_setup(void) addr = xen_e820_table.entries[0].addr; size = xen_e820_table.entries[0].size; while (i < xen_e820_table.nr_entries) { + bool discard = false; chunk_size = size; type = xen_e820_table.entries[i].type; @@ -823,10 +824,11 @@ char * __init xen_memory_setup(void) xen_add_extra_mem(pfn_s, n_pfns); xen_max_p2m_pfn = pfn_s + n_pfns; } else - type = E820_TYPE_UNUSABLE; + discard = true; } - xen_align_and_add_e820_region(addr, chunk_size, type); + if (!discard) + xen_align_and_add_e820_region(addr, chunk_size, type); addr += chunk_size; size -= chunk_size; diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 7a43b2ae19f1228b247e57fe453d21ab15e116f7..6a87d0a6a4aff268e1d0149068cf8af79b20ce69 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -65,6 +65,8 @@ int xen_smp_intr_init(unsigned int cpu) char *resched_name, *callfunc_name, *debug_name; resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu); + if (!resched_name) + goto fail_mem; rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu, xen_reschedule_interrupt, @@ -77,6 +79,8 @@ int xen_smp_intr_init(unsigned int cpu) per_cpu(xen_resched_irq, cpu).name = resched_name; callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); + if (!callfunc_name) + goto fail_mem; rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, cpu, xen_call_function_interrupt, @@ -89,6 +93,8 @@ int xen_smp_intr_init(unsigned int cpu) per_cpu(xen_callfunc_irq, cpu).name = callfunc_name; debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); + if (!debug_name) + goto fail_mem; rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt, IRQF_PERCPU | IRQF_NOBALANCING, debug_name, NULL); @@ -98,6 +104,8 @@ int xen_smp_intr_init(unsigned int cpu) per_cpu(xen_debug_irq, cpu).name = debug_name; callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); + if (!callfunc_name) + goto fail_mem; rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, cpu, xen_call_function_single_interrupt, @@ -111,6 +119,8 @@ int xen_smp_intr_init(unsigned int cpu) return 0; + fail_mem: + rc = -ENOMEM; fail: xen_smp_intr_free(cpu); return rc; diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c index e3b18ad49889afc5ae35d2e2796aecd108a93819..f35ad29367bbff56a8a436a9966a5f81caf0d454 100644 --- a/arch/x86/xen/smp_pv.c +++ b/arch/x86/xen/smp_pv.c @@ -57,6 +57,7 @@ static void cpu_bringup(void) { int cpu; + cr4_init(); cpu_init(); touch_softlockup_watchdog(); preempt_disable(); @@ -89,6 +90,7 @@ asmlinkage __visible void cpu_bringup_and_idle(void) { cpu_bringup(); cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); + prevent_tail_call_optimization(); } void xen_smp_intr_free_pv(unsigned int cpu) diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 973f10e0521195bf28c6965061f95b227518bc39..717b4847b473fc55c6cb2d303ed93145fcbd26bd 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include @@ -21,6 +22,7 @@ static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; static DEFINE_PER_CPU(char *, irq_name); +static DEFINE_PER_CPU(atomic_t, xen_qlock_wait_nest); static bool xen_pvspin = true; static void xen_qlock_kick(int cpu) @@ -40,33 +42,24 @@ static void xen_qlock_kick(int cpu) static void xen_qlock_wait(u8 *byte, u8 val) { int irq = __this_cpu_read(lock_kicker_irq); + atomic_t *nest_cnt = this_cpu_ptr(&xen_qlock_wait_nest); /* If kicker interrupts not initialized yet, just spin */ - if (irq == -1) + if (irq == -1 || in_nmi()) return; - /* clear pending */ - xen_clear_irq_pending(irq); - barrier(); - - /* - * We check the byte value after clearing pending IRQ to make sure - * that we won't miss a wakeup event because of the clearing. - * - * The sync_clear_bit() call in xen_clear_irq_pending() is atomic. - * So it is effectively a memory barrier for x86. - */ - if (READ_ONCE(*byte) != val) - return; + /* Detect reentry. */ + atomic_inc(nest_cnt); - /* - * If an interrupt happens here, it will leave the wakeup irq - * pending, which will cause xen_poll_irq() to return - * immediately. - */ + /* If irq pending already and no nested call clear it. */ + if (atomic_read(nest_cnt) == 1 && xen_test_irq_pending(irq)) { + xen_clear_irq_pending(irq); + } else if (READ_ONCE(*byte) == val) { + /* Block until irq becomes pending (or a spurious wakeup) */ + xen_poll_irq(irq); + } - /* Block until irq becomes pending (or perhaps a spurious wakeup) */ - xen_poll_irq(irq); + atomic_dec(nest_cnt); } static irqreturn_t dummy_handler(int irq, void *dev_id) diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index c84f1e039d849210b1e04517f62b9cce3ab2bddc..01dcccf9185ff4069531db95487e95620e8ce09e 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -361,8 +361,6 @@ void xen_timer_resume(void) { int cpu; - pvclock_resume(); - if (xen_clockevent != &xen_vcpuop_clockevent) return; @@ -379,12 +377,15 @@ static const struct pv_time_ops xen_time_ops __initconst = { }; static struct pvclock_vsyscall_time_info *xen_clock __read_mostly; +static u64 xen_clock_value_saved; void xen_save_time_memory_area(void) { struct vcpu_register_time_memory_area t; int ret; + xen_clock_value_saved = xen_clocksource_read() - xen_sched_clock_offset; + if (!xen_clock) return; @@ -404,7 +405,7 @@ void xen_restore_time_memory_area(void) int ret; if (!xen_clock) - return; + goto out; t.addr.v = &xen_clock->pvti; @@ -421,6 +422,11 @@ void xen_restore_time_memory_area(void) if (ret != 0) pr_notice("Cannot restore secondary vcpu_time_info (err %d)", ret); + +out: + /* Need pvclock_resume() before using xen_clocksource_read(). */ + pvclock_resume(); + xen_sched_clock_offset = xen_clocksource_read() - xen_clock_value_saved; } static void xen_setup_vsyscall_time_info(void) diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S index 417b339e5c8e1aadedd20231c9be82ac93dbe728..a93d8a7cef26c2ed2bbabbf2a135ee35ecd5113e 100644 --- a/arch/x86/xen/xen-asm_64.S +++ b/arch/x86/xen/xen-asm_64.S @@ -12,6 +12,7 @@ #include #include #include +#include #include @@ -24,13 +25,13 @@ ENTRY(xen_\name) pop %r11 jmp \name END(xen_\name) +_ASM_NOKPROBE(xen_\name) .endm xen_pv_trap divide_error xen_pv_trap debug xen_pv_trap xendebug xen_pv_trap int3 -xen_pv_trap xenint3 xen_pv_trap xennmi xen_pv_trap overflow xen_pv_trap bounds diff --git a/arch/x86/xen/xen-pvh.S b/arch/x86/xen/xen-pvh.S index ca2d3b2bf2af711f57a4ef22111e99fc46d6471f..58722a052f9c1a7db23425f3aaba0b59da6c22a6 100644 --- a/arch/x86/xen/xen-pvh.S +++ b/arch/x86/xen/xen-pvh.S @@ -181,7 +181,7 @@ canary: .fill 48, 1, 0 early_stack: - .fill 256, 1, 0 + .fill BOOT_STACK_SIZE, 1, 0 early_stack_end: ELFNOTE(Xen, XEN_ELFNOTE_PHYS32_ENTRY, diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index b9ad83a0ee5dbf1604acc3c4edc90698e7eea1a6..94b862405ce9448717ddb6b9742757384eef2846 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -4,13 +4,14 @@ config ZONE_DMA config XTENSA def_bool y + select ARCH_32BIT_OFF_T select ARCH_HAS_SG_CHAIN select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_NO_COHERENT_DMA_MMAP if !MMU select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_IPC_PARSE_VERSION - select BUILDTIME_EXTABLE_SORT + select BUILDTIME_TABLE_SORT select CLONE_BACKWARDS select COMMON_CLK select DMA_NONCOHERENT_OPS diff --git a/arch/xtensa/boot/Makefile b/arch/xtensa/boot/Makefile index dc9e0ba7122cad1e982ac33eb5c9d60d9a1db48a..294846117fc2c5527e297ccd50eb55c31c3f3228 100644 --- a/arch/xtensa/boot/Makefile +++ b/arch/xtensa/boot/Makefile @@ -33,7 +33,7 @@ uImage: $(obj)/uImage boot-elf boot-redboot: $(addprefix $(obj)/,$(subdir-y)) $(Q)$(MAKE) $(build)=$(obj)/$@ $(MAKECMDGOALS) -OBJCOPYFLAGS = --strip-all -R .comment -R .note.gnu.build-id -O binary +OBJCOPYFLAGS = --strip-all -R .comment -R .notes -O binary vmlinux.bin: vmlinux FORCE $(call if_changed,objcopy) diff --git a/arch/xtensa/boot/dts/xtfpga.dtsi b/arch/xtensa/boot/dts/xtfpga.dtsi index 1090528825ec6aee23f921338f62fd30f02a8a2e..e46ae07bab0599165ac2da2afe023e93698973f1 100644 --- a/arch/xtensa/boot/dts/xtfpga.dtsi +++ b/arch/xtensa/boot/dts/xtfpga.dtsi @@ -103,7 +103,7 @@ }; }; - spi0: spi-master@0d0a0000 { + spi0: spi@0d0a0000 { compatible = "cdns,xtfpga-spi"; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig index 11fed6c06a7c668cc870589dab6bf2e7f241c1e1..b5938160fb3dde7d845c7612267fb189bd92806a 100644 --- a/arch/xtensa/configs/smp_lx200_defconfig +++ b/arch/xtensa/configs/smp_lx200_defconfig @@ -33,6 +33,7 @@ CONFIG_SMP=y CONFIG_HOTPLUG_CPU=y # CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set # CONFIG_PCI is not set +CONFIG_VECTORS_OFFSET=0x00002000 CONFIG_XTENSA_PLATFORM_XTFPGA=y CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=96M@0" diff --git a/arch/xtensa/include/asm/checksum.h b/arch/xtensa/include/asm/checksum.h index 3ae74d7e074b5970c92b4e2e5158ea631809eccf..f302ef57973a45e8a9dccf82a8329158f48172a3 100644 --- a/arch/xtensa/include/asm/checksum.h +++ b/arch/xtensa/include/asm/checksum.h @@ -243,7 +243,7 @@ static __inline__ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, __wsum sum, int *err_ptr) { - if (access_ok(VERIFY_WRITE, dst, len)) + if (access_ok(dst, len)) return csum_partial_copy_generic(src,dst,len,sum,NULL,err_ptr); if (len) diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h index 5bfbc1c401d4cfb82fe5596f0a24689cabecdc98..3c4ea346968322b5306a460002ba741892eaec27 100644 --- a/arch/xtensa/include/asm/futex.h +++ b/arch/xtensa/include/asm/futex.h @@ -93,7 +93,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, { int ret = 0; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + if (!access_ok(uaddr, sizeof(u32))) return -EFAULT; #if !XCHAL_HAVE_S32C1I diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h index e4ccb88b799631102185e21bda4b8a5193a5c5f4..677bc76c1d7078e0ed78c84f1e5eac14363bd7e9 100644 --- a/arch/xtensa/include/asm/processor.h +++ b/arch/xtensa/include/asm/processor.h @@ -23,7 +23,11 @@ # error Linux requires the Xtensa Windowed Registers Option. #endif -#define ARCH_SLAB_MINALIGN XCHAL_DATA_WIDTH +/* Xtensa ABI requires stack alignment to be at least 16 */ + +#define STACK_ALIGN (XCHAL_DATA_WIDTH > 16 ? XCHAL_DATA_WIDTH : 16) + +#define ARCH_SLAB_MINALIGN STACK_ALIGN /* * User space process size: 1 GB. diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index f1158b4c629cf83a0568f9022a71ccb87e684f20..9266b17a85df46ddf072d7ad425806522a8d3bd2 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -42,7 +42,7 @@ #define __user_ok(addr, size) \ (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size))) #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) -#define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size)) +#define access_ok(addr, size) __access_ok((unsigned long)(addr), (size)) #define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE) @@ -86,7 +86,7 @@ extern long __put_user_bad(void); ({ \ long __pu_err = -EFAULT; \ __typeof__(*(ptr)) *__pu_addr = (ptr); \ - if (access_ok(VERIFY_WRITE, __pu_addr, size)) \ + if (access_ok(__pu_addr, size)) \ __put_user_size((x), __pu_addr, (size), __pu_err); \ __pu_err; \ }) @@ -184,7 +184,7 @@ __asm__ __volatile__( \ ({ \ long __gu_err = -EFAULT, __gu_val = 0; \ const __typeof__(*(ptr)) *__gu_addr = (ptr); \ - if (access_ok(VERIFY_READ, __gu_addr, size)) \ + if (access_ok(__gu_addr, size)) \ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ @@ -271,7 +271,7 @@ __xtensa_clear_user(void *addr, unsigned long size) static inline unsigned long clear_user(void *addr, unsigned long size) { - if (access_ok(VERIFY_WRITE, addr, size)) + if (access_ok(addr, size)) return __xtensa_clear_user(addr, size); return size ? -EFAULT : 0; } @@ -286,7 +286,7 @@ extern long __strncpy_user(char *, const char *, long); static inline long strncpy_from_user(char *dst, const char *src, long count) { - if (access_ok(VERIFY_READ, src, 1)) + if (access_ok(src, 1)) return __strncpy_user(dst, src, count); return -EFAULT; } diff --git a/arch/xtensa/include/asm/vmalloc.h b/arch/xtensa/include/asm/vmalloc.h new file mode 100644 index 0000000000000000000000000000000000000000..0eb94b70be55dff2ce2ac2bca913fd71c1e585bb --- /dev/null +++ b/arch/xtensa/include/asm/vmalloc.h @@ -0,0 +1,4 @@ +#ifndef _ASM_XTENSA_VMALLOC_H +#define _ASM_XTENSA_VMALLOC_H + +#endif /* _ASM_XTENSA_VMALLOC_H */ diff --git a/arch/xtensa/include/uapi/asm/mman.h b/arch/xtensa/include/uapi/asm/mman.h index 58f29a9d895d83e98c12020bf501b9f3d063eaec..97239cce3ffcb3b41b0f6cc675ad85653f81b214 100644 --- a/arch/xtensa/include/uapi/asm/mman.h +++ b/arch/xtensa/include/uapi/asm/mman.h @@ -58,6 +58,8 @@ #define MAP_STACK 0x40000 /* give out an address that is best suited for process/thread stacks */ #define MAP_HUGETLB 0x80000 /* create a huge page mapping */ #define MAP_FIXED_NOREPLACE 0x100000 /* MAP_FIXED which doesn't unmap underlying mapping */ +#define MAP_PA32BIT 0x400000 /* physical address is within 4G */ +#define MAP_CHECKNODE 0x800000 /* hugetlb numa node check */ #ifdef CONFIG_MMAP_ALLOW_UNINITIALIZED # define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be * uninitialized */ @@ -65,6 +67,8 @@ # define MAP_UNINITIALIZED 0x0 /* Don't support this flag */ #endif +#define MAP_ALIGN 0x2000000 /* create an aligned mapping */ + /* * Flags for msync */ diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c index 67904f55f1884f52893b3a99b1be785a48dc69da..120dd746a14751f3de4317a35921b2037d8c7cdd 100644 --- a/arch/xtensa/kernel/asm-offsets.c +++ b/arch/xtensa/kernel/asm-offsets.c @@ -94,14 +94,14 @@ int main(void) DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp)); DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable)); #if XTENSA_HAVE_COPROCESSORS - DEFINE(THREAD_XTREGS_CP0, offsetof (struct thread_info, xtregs_cp)); - DEFINE(THREAD_XTREGS_CP1, offsetof (struct thread_info, xtregs_cp)); - DEFINE(THREAD_XTREGS_CP2, offsetof (struct thread_info, xtregs_cp)); - DEFINE(THREAD_XTREGS_CP3, offsetof (struct thread_info, xtregs_cp)); - DEFINE(THREAD_XTREGS_CP4, offsetof (struct thread_info, xtregs_cp)); - DEFINE(THREAD_XTREGS_CP5, offsetof (struct thread_info, xtregs_cp)); - DEFINE(THREAD_XTREGS_CP6, offsetof (struct thread_info, xtregs_cp)); - DEFINE(THREAD_XTREGS_CP7, offsetof (struct thread_info, xtregs_cp)); + DEFINE(THREAD_XTREGS_CP0, offsetof(struct thread_info, xtregs_cp.cp0)); + DEFINE(THREAD_XTREGS_CP1, offsetof(struct thread_info, xtregs_cp.cp1)); + DEFINE(THREAD_XTREGS_CP2, offsetof(struct thread_info, xtregs_cp.cp2)); + DEFINE(THREAD_XTREGS_CP3, offsetof(struct thread_info, xtregs_cp.cp3)); + DEFINE(THREAD_XTREGS_CP4, offsetof(struct thread_info, xtregs_cp.cp4)); + DEFINE(THREAD_XTREGS_CP5, offsetof(struct thread_info, xtregs_cp.cp5)); + DEFINE(THREAD_XTREGS_CP6, offsetof(struct thread_info, xtregs_cp.cp6)); + DEFINE(THREAD_XTREGS_CP7, offsetof(struct thread_info, xtregs_cp.cp7)); #endif DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user)); DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t)); diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S index 2f76118ecf6230ff01fe0e43221269da7b208f46..5bd38ea2da384379909bd25e29d039ed66aeb68e 100644 --- a/arch/xtensa/kernel/head.S +++ b/arch/xtensa/kernel/head.S @@ -88,9 +88,12 @@ _SetupMMU: initialize_mmu #if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY rsr a2, excsave1 - movi a3, 0x08000000 + movi a3, XCHAL_KSEG_PADDR + bltu a2, a3, 1f + sub a2, a2, a3 + movi a3, XCHAL_KSEG_SIZE bgeu a2, a3, 1f - movi a3, 0xd0000000 + movi a3, XCHAL_KSEG_CACHED_VADDR add a2, a2, a3 wsr a2, excsave1 1: @@ -277,12 +280,13 @@ should_never_return: movi a2, cpu_start_ccount 1: + memw l32i a3, a2, 0 beqi a3, 0, 1b movi a3, 0 s32i a3, a2, 0 - memw 1: + memw l32i a3, a2, 0 beqi a3, 0, 1b wsr a3, ccount @@ -318,11 +322,13 @@ ENTRY(cpu_restart) rsr a0, prid neg a2, a0 movi a3, cpu_start_id + memw s32i a2, a3, 0 #if XCHAL_DCACHE_IS_WRITEBACK dhwbi a3, 0 #endif 1: + memw l32i a2, a3, 0 dhi a3, 0 bne a2, a0, 1b diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index 483dcfb6e681d7d483ef8ebfb948d91b7ee8f1fd..5a0e0bd68b769e1d4c3c1fb2c36b92bb5a8eb3d9 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -94,18 +94,21 @@ void coprocessor_release_all(struct thread_info *ti) void coprocessor_flush_all(struct thread_info *ti) { - unsigned long cpenable; + unsigned long cpenable, old_cpenable; int i; preempt_disable(); + RSR_CPENABLE(old_cpenable); cpenable = ti->cpenable; + WSR_CPENABLE(cpenable); for (i = 0; i < XCHAL_CP_MAX; i++) { if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti) coprocessor_flush(ti, i); cpenable >>= 1; } + WSR_CPENABLE(old_cpenable); preempt_enable(); } @@ -317,8 +320,8 @@ unsigned long get_wchan(struct task_struct *p) /* Stack layout: sp-4: ra, sp-3: sp' */ - pc = MAKE_PC_FROM_RA(*(unsigned long*)sp - 4, sp); - sp = *(unsigned long *)sp - 3; + pc = MAKE_PC_FROM_RA(SPILL_SLOT(sp, 0), sp); + sp = SPILL_SLOT(sp, 1); } while (count++ < 16); return 0; } diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c index c0845cb1cbb9944ed7deaee9bffa3685171574c3..d9541be0605ad52793e5d59a11b2a8dda7f975a7 100644 --- a/arch/xtensa/kernel/ptrace.c +++ b/arch/xtensa/kernel/ptrace.c @@ -127,12 +127,37 @@ static int ptrace_setregs(struct task_struct *child, void __user *uregs) } +#if XTENSA_HAVE_COPROCESSORS +#define CP_OFFSETS(cp) \ + { \ + .elf_xtregs_offset = offsetof(elf_xtregs_t, cp), \ + .ti_offset = offsetof(struct thread_info, xtregs_cp.cp), \ + .sz = sizeof(xtregs_ ## cp ## _t), \ + } + +static const struct { + size_t elf_xtregs_offset; + size_t ti_offset; + size_t sz; +} cp_offsets[] = { + CP_OFFSETS(cp0), + CP_OFFSETS(cp1), + CP_OFFSETS(cp2), + CP_OFFSETS(cp3), + CP_OFFSETS(cp4), + CP_OFFSETS(cp5), + CP_OFFSETS(cp6), + CP_OFFSETS(cp7), +}; +#endif + static int ptrace_getxregs(struct task_struct *child, void __user *uregs) { struct pt_regs *regs = task_pt_regs(child); struct thread_info *ti = task_thread_info(child); elf_xtregs_t __user *xtregs = uregs; int ret = 0; + int i __maybe_unused; if (!access_ok(VERIFY_WRITE, uregs, sizeof(elf_xtregs_t))) return -EIO; @@ -140,8 +165,13 @@ static int ptrace_getxregs(struct task_struct *child, void __user *uregs) #if XTENSA_HAVE_COPROCESSORS /* Flush all coprocessor registers to memory. */ coprocessor_flush_all(ti); - ret |= __copy_to_user(&xtregs->cp0, &ti->xtregs_cp, - sizeof(xtregs_coprocessor_t)); + + for (i = 0; i < ARRAY_SIZE(cp_offsets); ++i) + ret |= __copy_to_user((char __user *)xtregs + + cp_offsets[i].elf_xtregs_offset, + (const char *)ti + + cp_offsets[i].ti_offset, + cp_offsets[i].sz); #endif ret |= __copy_to_user(&xtregs->opt, ®s->xtregs_opt, sizeof(xtregs->opt)); @@ -157,6 +187,7 @@ static int ptrace_setxregs(struct task_struct *child, void __user *uregs) struct pt_regs *regs = task_pt_regs(child); elf_xtregs_t *xtregs = uregs; int ret = 0; + int i __maybe_unused; if (!access_ok(VERIFY_READ, uregs, sizeof(elf_xtregs_t))) return -EFAULT; @@ -166,8 +197,11 @@ static int ptrace_setxregs(struct task_struct *child, void __user *uregs) coprocessor_flush_all(ti); coprocessor_release_all(ti); - ret |= __copy_from_user(&ti->xtregs_cp, &xtregs->cp0, - sizeof(xtregs_coprocessor_t)); + for (i = 0; i < ARRAY_SIZE(cp_offsets); ++i) + ret |= __copy_from_user((char *)ti + cp_offsets[i].ti_offset, + (const char __user *)xtregs + + cp_offsets[i].elf_xtregs_offset, + cp_offsets[i].sz); #endif ret |= __copy_from_user(®s->xtregs_opt, &xtregs->opt, sizeof(xtregs->opt)); diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 351283b60df6174af90cfd789f991f58da68f131..15580e4fc766a24191fe93dadf47590c424a1818 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -310,7 +310,8 @@ extern char _SecondaryResetVector_text_start; extern char _SecondaryResetVector_text_end; #endif -static inline int mem_reserve(unsigned long start, unsigned long end) +static inline int __init_memblock mem_reserve(unsigned long start, + unsigned long end) { return memblock_reserve(start, end - start); } @@ -514,6 +515,7 @@ void cpu_reset(void) "add %2, %2, %7\n\t" "addi %0, %0, -1\n\t" "bnez %0, 1b\n\t" + "isync\n\t" /* Jump to identity mapping */ "jx %3\n" "2:\n\t" diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c index f88e7a0b232cbd09241a0393ca5db193b661d8c9..20c1779f57c7635a7e8d97456be1a99c07e0f49e 100644 --- a/arch/xtensa/kernel/signal.c +++ b/arch/xtensa/kernel/signal.c @@ -251,7 +251,7 @@ asmlinkage long xtensa_rt_sigreturn(long a0, long a1, long a2, long a3, frame = (struct rt_sigframe __user *) regs->areg[1]; - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + if (!access_ok(frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) @@ -348,7 +348,7 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set, if (regs->depc > 64) panic ("Double exception sys_sigreturn\n"); - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) { + if (!access_ok(frame, sizeof(*frame))) { return -EFAULT; } diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c index 932d64689bacbbbf3dc2ae981b3d3d1938172bce..be1f280c322cd17307e0377736abdb3d955e3ebc 100644 --- a/arch/xtensa/kernel/smp.c +++ b/arch/xtensa/kernel/smp.c @@ -83,7 +83,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) { unsigned i; - for (i = 0; i < max_cpus; ++i) + for_each_possible_cpu(i) set_cpu_present(i, true); } @@ -96,6 +96,11 @@ void __init smp_init_cpus(void) pr_info("%s: Core Count = %d\n", __func__, ncpus); pr_info("%s: Core Id = %d\n", __func__, core_id); + if (ncpus > NR_CPUS) { + ncpus = NR_CPUS; + pr_info("%s: limiting core count by %d\n", __func__, ncpus); + } + for (i = 0; i < ncpus; ++i) set_cpu_possible(i, true); } @@ -195,9 +200,11 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts) int i; #ifdef CONFIG_HOTPLUG_CPU - cpu_start_id = cpu; - system_flush_invalidate_dcache_range( - (unsigned long)&cpu_start_id, sizeof(cpu_start_id)); + WRITE_ONCE(cpu_start_id, cpu); + /* Pairs with the third memw in the cpu_restart */ + mb(); + system_flush_invalidate_dcache_range((unsigned long)&cpu_start_id, + sizeof(cpu_start_id)); #endif smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1); @@ -206,18 +213,21 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts) ccount = get_ccount(); while (!ccount); - cpu_start_ccount = ccount; + WRITE_ONCE(cpu_start_ccount, ccount); - while (time_before(jiffies, timeout)) { + do { + /* + * Pairs with the first two memws in the + * .Lboot_secondary. + */ mb(); - if (!cpu_start_ccount) - break; - } + ccount = READ_ONCE(cpu_start_ccount); + } while (ccount && time_before(jiffies, timeout)); - if (cpu_start_ccount) { + if (ccount) { smp_call_function_single(0, mx_cpu_stop, - (void *)cpu, 1); - cpu_start_ccount = 0; + (void *)cpu, 1); + WRITE_ONCE(cpu_start_ccount, 0); return -EIO; } } @@ -237,6 +247,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n", __func__, cpu, idle, start_info.stack); + init_completion(&cpu_running); ret = boot_secondary(cpu, idle); if (ret == 0) { wait_for_completion_timeout(&cpu_running, @@ -298,8 +309,10 @@ void __cpu_die(unsigned int cpu) unsigned long timeout = jiffies + msecs_to_jiffies(1000); while (time_before(jiffies, timeout)) { system_invalidate_dcache_range((unsigned long)&cpu_start_id, - sizeof(cpu_start_id)); - if (cpu_start_id == -cpu) { + sizeof(cpu_start_id)); + /* Pairs with the second memw in the cpu_restart */ + mb(); + if (READ_ONCE(cpu_start_id) == -cpu) { platform_cpu_kill(cpu); return; } diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c index 0df4080fa20f2276563eb64f8694959548a3f557..b9f82510c65019506ffb98f3f23ac494f7285efa 100644 --- a/arch/xtensa/kernel/stacktrace.c +++ b/arch/xtensa/kernel/stacktrace.c @@ -91,7 +91,7 @@ void xtensa_backtrace_user(struct pt_regs *regs, unsigned int depth, pc = MAKE_PC_FROM_RA(a0, pc); /* Check if the region is OK to access. */ - if (!access_ok(VERIFY_READ, &SPILL_SLOT(a1, 0), 8)) + if (!access_ok(&SPILL_SLOT(a1, 0), 8)) return; /* Copy a1, a0 from user space stack frame. */ if (__get_user(a0, &SPILL_SLOT(a1, 0)) || @@ -253,10 +253,14 @@ static int return_address_cb(struct stackframe *frame, void *data) return 1; } +/* + * level == 0 is for the return address from the caller of this function, + * not from this function itself. + */ unsigned long return_address(unsigned level) { struct return_addr_data r = { - .skip = level + 1, + .skip = level, }; walk_stackframe(stack_pointer(NULL), return_address_cb, &r); return r.addr; diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c index fd524a54d2ab5e8f82e5485c6105f84d1a748b02..378186b5eb401aba9246756b5d235b60a4d697d5 100644 --- a/arch/xtensa/kernel/time.c +++ b/arch/xtensa/kernel/time.c @@ -89,7 +89,7 @@ static int ccount_timer_shutdown(struct clock_event_device *evt) container_of(evt, struct ccount_timer, evt); if (timer->irq_enabled) { - disable_irq(evt->irq); + disable_irq_nosync(evt->irq); timer->irq_enabled = 0; } return 0; diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index a1c3edb8ad56c0d93e3c9a7d994dc0ffd9ad7351..fa926995d2a37c64ed6075fd9e7598d6c9656fab 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S @@ -131,6 +131,7 @@ SECTIONS .fixup : { *(.fixup) } EXCEPTION_TABLE(16) + NOTES /* Data section */ _sdata = .; diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c index 04f19de4670033142537a6271b749c30836ea47e..4092555828b13a25305f250863ad2e4d42016727 100644 --- a/arch/xtensa/kernel/xtensa_ksyms.c +++ b/arch/xtensa/kernel/xtensa_ksyms.c @@ -119,13 +119,6 @@ EXPORT_SYMBOL(__invalidate_icache_range); // FIXME EXPORT_SYMBOL(screen_info); #endif -EXPORT_SYMBOL(outsb); -EXPORT_SYMBOL(outsw); -EXPORT_SYMBOL(outsl); -EXPORT_SYMBOL(insb); -EXPORT_SYMBOL(insw); -EXPORT_SYMBOL(insl); - extern long common_exception_return; EXPORT_SYMBOL(common_exception_return); diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index 2ab0e0dcd1668bce6fb9ea6f9035d99d13458ed3..deb2cd8ac7b4d0c041f0430dd7850fab7e27c33f 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -128,7 +128,6 @@ void do_page_fault(struct pt_regs *regs) else current->min_flt++; if (fault & VM_FAULT_RETRY) { - flags &= ~FAULT_FLAG_ALLOW_RETRY; flags |= FAULT_FLAG_TRIED; /* No need to up_read(&mm->mmap_sem) as we would diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c index 59153d0aa8908dd415c975c87b9140650afa2686..b43f03620843055b4333dbcf117f4759ce8ff034 100644 --- a/arch/xtensa/mm/tlb.c +++ b/arch/xtensa/mm/tlb.c @@ -216,6 +216,8 @@ static int check_tlb_entry(unsigned w, unsigned e, bool dtlb) unsigned tlbidx = w | (e << PAGE_SHIFT); unsigned r0 = dtlb ? read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx); + unsigned r1 = dtlb ? + read_dtlb_translation(tlbidx) : read_itlb_translation(tlbidx); unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT); unsigned pte = get_pte_for_vaddr(vpn); unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK; @@ -231,8 +233,6 @@ static int check_tlb_entry(unsigned w, unsigned e, bool dtlb) } if (tlb_asid == mm_asid) { - unsigned r1 = dtlb ? read_dtlb_translation(tlbidx) : - read_itlb_translation(tlbidx); if ((pte ^ r1) & PAGE_MASK) { pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n", dtlb ? 'D' : 'I', w, e, r0, r1, pte); diff --git a/block/Kconfig b/block/Kconfig index 1f2469a0123ceb1f36103c0db9eb71d140556193..8804f21df1519c6bd5ad9678e99cfe98f26b36a7 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -26,6 +26,12 @@ menuconfig BLOCK if BLOCK +config BLK_BIO_ALLOC_TIME + bool + +config BLK_BIO_ALLOC_TASK + bool + config LBDAF bool "Support for large (2TB+) block devices and files" depends on !64BIT @@ -200,6 +206,21 @@ config BLK_SED_OPAL Enabling this option enables users to setup/unlock/lock Locking ranges for SED devices using the Opal protocol. +config BLK_BIO_DISPATCH_ASYNC + bool "Dispatch bios asynchronously on specific cpus" + default n + depends on BLOCK=y + help + If there are multiple nodes, memory access across nodes is rather bad + compare to local node. And if some drivers are using internal spin + locks, io performance will be bad if bios are issued concurrently from + different nodes. This feature will dispatch bio asynchronously to the + specific CPUs to avoid across nodes memory access in driver, noted this + feature will require special care in the driver to work. If unsure, + say N here. + +source "block/blk-io-hierarchy/Kconfig" + menu "Partition Types" source "block/partitions/Kconfig" diff --git a/block/Makefile b/block/Makefile index 572b33f32c07cf7056fb1121abba753a9ba8a0ac..bb711b0c307a6c45c84a8d6685b7b9dc790e73da 100644 --- a/block/Makefile +++ b/block/Makefile @@ -37,3 +37,4 @@ obj-$(CONFIG_BLK_WBT) += blk-wbt.o obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o obj-$(CONFIG_BLK_DEBUG_FS_ZONED)+= blk-mq-debugfs-zoned.o obj-$(CONFIG_BLK_SED_OPAL) += sed-opal.o +obj-$(CONFIG_BLK_IO_HIERARCHY_STATS) += blk-io-hierarchy/ diff --git a/block/badblocks.c b/block/badblocks.c index 91f7bcf979d374537f6dcaf30e2ee0c54947fed4..3256f642785a31fed0980ac8ee3c352c4c942c84 100644 --- a/block/badblocks.c +++ b/block/badblocks.c @@ -173,7 +173,7 @@ int badblocks_set(struct badblocks *bb, sector_t s, int sectors, { u64 *p; int lo, hi; - int rv = 0; + int rv = 0, changed = 0; unsigned long flags; if (bb->shift < 0) @@ -238,6 +238,7 @@ int badblocks_set(struct badblocks *bb, sector_t s, int sectors, s = a + BB_MAX_LEN; } sectors = e - s; + changed = 1; } } if (sectors && hi < bb->count) { @@ -268,24 +269,24 @@ int badblocks_set(struct badblocks *bb, sector_t s, int sectors, sectors = e - s; lo = hi; hi++; + changed = 1; } } if (sectors == 0 && hi < bb->count) { /* we might be able to combine lo and hi */ /* Note: 's' is at the end of 'lo' */ - sector_t a = BB_OFFSET(p[hi]); - int lolen = BB_LEN(p[lo]); - int hilen = BB_LEN(p[hi]); - int newlen = lolen + hilen - (s - a); + sector_t a = BB_OFFSET(p[lo]); + int newlen = max((u64)s, BB_OFFSET(p[hi]) + BB_LEN(p[hi])) - a; - if (s >= a && newlen < BB_MAX_LEN) { + if (s >= BB_OFFSET(p[hi]) && newlen < BB_MAX_LEN) { /* yes, we can combine them */ int ack = BB_ACK(p[lo]) && BB_ACK(p[hi]); - p[lo] = BB_MAKE(BB_OFFSET(p[lo]), newlen, ack); + p[lo] = BB_MAKE(a, newlen, ack); memmove(p + hi, p + hi + 1, (bb->count - hi - 1) * 8); bb->count--; + changed = 1; } } while (sectors) { @@ -308,14 +309,18 @@ int badblocks_set(struct badblocks *bb, sector_t s, int sectors, p[hi] = BB_MAKE(s, this_sectors, acknowledged); sectors -= this_sectors; s += this_sectors; + hi++; + changed = 1; } } - bb->changed = 1; - if (!acknowledged) - bb->unacked_exist = 1; - else - badblocks_update_acked(bb); + if (changed) { + bb->changed = changed; + if (!acknowledged) + bb->unacked_exist = 1; + else + badblocks_update_acked(bb); + } write_sequnlock_irqrestore(&bb->lock, flags); return rv; diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index 9fe5952d117d553f12f32055fde8683c554b06a8..25a407e5142dbf8ba8fd7848a197cadad020fb1a 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -22,6 +22,7 @@ #include #include +#include "blk.h" #include "bfq-iosched.h" #if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP) @@ -60,7 +61,7 @@ static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats) if (!bfqg_stats_waiting(stats)) return; - now = ktime_get_ns(); + now = blk_time_get_ns(); if (now > stats->start_group_wait_time) blkg_stat_add(&stats->group_wait_time, now - stats->start_group_wait_time); @@ -77,7 +78,7 @@ static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg, return; if (bfqg == curr_bfqg) return; - stats->start_group_wait_time = ktime_get_ns(); + stats->start_group_wait_time = blk_time_get_ns(); bfqg_stats_mark_waiting(stats); } @@ -89,7 +90,7 @@ static void bfqg_stats_end_empty_time(struct bfqg_stats *stats) if (!bfqg_stats_empty(stats)) return; - now = ktime_get_ns(); + now = blk_time_get_ns(); if (now > stats->start_empty_time) blkg_stat_add(&stats->empty_time, now - stats->start_empty_time); @@ -116,7 +117,7 @@ void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) if (bfqg_stats_empty(stats)) return; - stats->start_empty_time = ktime_get_ns(); + stats->start_empty_time = blk_time_get_ns(); bfqg_stats_mark_empty(stats); } @@ -125,7 +126,7 @@ void bfqg_stats_update_idle_time(struct bfq_group *bfqg) struct bfqg_stats *stats = &bfqg->stats; if (bfqg_stats_idling(stats)) { - u64 now = ktime_get_ns(); + u64 now = blk_time_get_ns(); if (now > stats->start_idle_time) blkg_stat_add(&stats->idle_time, @@ -138,7 +139,7 @@ void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { struct bfqg_stats *stats = &bfqg->stats; - stats->start_idle_time = ktime_get_ns(); + stats->start_idle_time = blk_time_get_ns(); bfqg_stats_mark_idling(stats); } @@ -175,7 +176,7 @@ void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns, u64 io_start_time_ns, unsigned int op) { struct bfqg_stats *stats = &bfqg->stats; - u64 now = ktime_get_ns(); + u64 now = blk_time_get_ns(); if (now > io_start_time_ns) blkg_rwstat_add(&stats->service_time, op, @@ -254,14 +255,12 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq) static void bfqg_get(struct bfq_group *bfqg) { - bfqg->ref++; + refcount_inc(&bfqg->ref); } static void bfqg_put(struct bfq_group *bfqg) { - bfqg->ref--; - - if (bfqg->ref == 0) + if (refcount_dec_and_test(&bfqg->ref)) kfree(bfqg); } @@ -448,7 +447,7 @@ static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node) } /* see comments in bfq_bic_update_cgroup for why refcounting */ - bfqg_get(bfqg); + refcount_set(&bfqg->ref, 1); return &bfqg->pd; } @@ -525,12 +524,13 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, */ entity = &bfqg->entity; for_each_entity(entity) { - bfqg = container_of(entity, struct bfq_group, entity); - if (bfqg != bfqd->root_group) { - parent = bfqg_parent(bfqg); + struct bfq_group *curr_bfqg = container_of(entity, + struct bfq_group, entity); + if (curr_bfqg != bfqd->root_group) { + parent = bfqg_parent(curr_bfqg); if (!parent) parent = bfqd->root_group; - bfq_group_set_parent(bfqg, parent); + bfq_group_set_parent(curr_bfqg, parent); } } @@ -555,6 +555,15 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, struct bfq_group *bfqg) { struct bfq_entity *entity = &bfqq->entity; + struct bfq_group *old_parent = bfqq_group(bfqq); + + if (bfqq == &bfqd->oom_bfqq) + return; + /* + * Get extra reference to prevent bfqq from being freed in + * next possible expire or deactivate. + */ + bfqq->ref++; /* If bfqq is empty, then bfq_bfqq_expire also invokes * bfq_del_bfqq_busy, thereby removing bfqq and its entity @@ -570,20 +579,47 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfq_deactivate_bfqq(bfqd, bfqq, false, false); else if (entity->on_st) bfq_put_idle_entity(bfq_entity_service_tree(entity), entity); - bfqg_and_blkg_put(bfqq_group(bfqq)); entity->parent = bfqg->my_entity; entity->sched_data = &bfqg->sched_data; /* pin down bfqg and its associated blkg */ bfqg_and_blkg_get(bfqg); - if (bfq_bfqq_busy(bfqq)) { - bfq_pos_tree_add_move(bfqd, bfqq); + /* + * Don't leave the bfqq->pos_root to old bfqg, since the ref to old + * bfqg will be released and the bfqg might be freed. + */ + bfq_pos_tree_add_move(bfqd, bfqq); + bfqg_and_blkg_put(old_parent); + + if (bfq_bfqq_busy(bfqq)) bfq_activate_bfqq(bfqd, bfqq); - } if (!bfqd->in_service_queue && !bfqd->rq_in_driver) bfq_schedule_dispatch(bfqd); + /* release extra ref taken above, bfqq may happen to be freed now */ + bfq_put_queue(bfqq); +} + +static +void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq) +{ + /* + * To prevent bfqq's service guarantees from being violated, + * bfqq may be left busy, i.e., queued for service, even if + * empty (see comments in __bfq_bfqq_expire() for + * details). But, if no process will send requests to bfqq any + * longer, then there is no point in keeping bfqq queued for + * service. In addition, keeping bfqq queued for service, but + * with no process ref any longer, may have caused bfqq to be + * freed when dequeued from service. But this is assumed to + * never happen. + */ + if (bfq_bfqq_busy(bfqq) && RB_EMPTY_ROOT(&bfqq->sort_list) && + bfqq != bfqd->in_service_queue) + bfq_del_bfqq_busy(bfqd, bfqq, false); + + bfq_put_queue(bfqq); } /** @@ -619,10 +655,7 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, if (entity->sched_data != &bfqg->sched_data) { bic_set_bfqq(bic, NULL, 0); - bfq_log_bfqq(bfqd, async_bfqq, - "bic_change_group: %p %d", - async_bfqq, async_bfqq->ref); - bfq_put_queue(async_bfqq); + bfq_release_process_ref(bfqd, async_bfqq); } } @@ -723,39 +756,53 @@ static void bfq_flush_idle_tree(struct bfq_service_tree *st) /** * bfq_reparent_leaf_entity - move leaf entity to the root_group. * @bfqd: the device data structure with the root group. - * @entity: the entity to move. + * @entity: the entity to move, if entity is a leaf; or the parent entity + * of an active leaf entity to move, if entity is not a leaf. */ static void bfq_reparent_leaf_entity(struct bfq_data *bfqd, - struct bfq_entity *entity) + struct bfq_entity *entity, + int ioprio_class) { - struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); + struct bfq_queue *bfqq; + struct bfq_entity *child_entity = entity; + + while (child_entity->my_sched_data) { /* leaf not reached yet */ + struct bfq_sched_data *child_sd = child_entity->my_sched_data; + struct bfq_service_tree *child_st = child_sd->service_tree + + ioprio_class; + struct rb_root *child_active = &child_st->active; + + child_entity = bfq_entity_of(rb_first(child_active)); + if (!child_entity) + child_entity = child_sd->in_service_entity; + } + + bfqq = bfq_entity_to_bfqq(child_entity); bfq_bfqq_move(bfqd, bfqq, bfqd->root_group); } /** - * bfq_reparent_active_entities - move to the root group all active - * entities. + * bfq_reparent_active_queues - move to the root group all active queues. * @bfqd: the device data structure with the root group. * @bfqg: the group to move from. - * @st: the service tree with the entities. + * @st: the service tree to start the search from. */ -static void bfq_reparent_active_entities(struct bfq_data *bfqd, - struct bfq_group *bfqg, - struct bfq_service_tree *st) +static void bfq_reparent_active_queues(struct bfq_data *bfqd, + struct bfq_group *bfqg, + struct bfq_service_tree *st, + int ioprio_class) { struct rb_root *active = &st->active; - struct bfq_entity *entity = NULL; - - if (!RB_EMPTY_ROOT(&st->active)) - entity = bfq_entity_of(rb_first(active)); + struct bfq_entity *entity; - for (; entity ; entity = bfq_entity_of(rb_first(active))) - bfq_reparent_leaf_entity(bfqd, entity); + while ((entity = bfq_entity_of(rb_first(active)))) + bfq_reparent_leaf_entity(bfqd, entity, ioprio_class); if (bfqg->sched_data.in_service_entity) bfq_reparent_leaf_entity(bfqd, - bfqg->sched_data.in_service_entity); + bfqg->sched_data.in_service_entity, + ioprio_class); } /** @@ -787,13 +834,6 @@ static void bfq_pd_offline(struct blkg_policy_data *pd) for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) { st = bfqg->sched_data.service_tree + i; - /* - * The idle tree may still contain bfq_queues belonging - * to exited task because they never migrated to a different - * cgroup from the one being destroyed now. - */ - bfq_flush_idle_tree(st); - /* * It may happen that some queues are still active * (busy) upon group destruction (if the corresponding @@ -806,13 +846,27 @@ static void bfq_pd_offline(struct blkg_policy_data *pd) * There is no need to put the sync queues, as the * scheduler has taken no reference. */ - bfq_reparent_active_entities(bfqd, bfqg, st); + bfq_reparent_active_queues(bfqd, bfqg, st, i); + + /* + * The idle tree may still contain bfq_queues + * belonging to exited task because they never + * migrated to a different cgroup from the one being + * destroyed now. In addition, even + * bfq_reparent_active_queues() may happen to add some + * entities to the idle tree. It happens if, in some + * of the calls to bfq_bfqq_move() performed by + * bfq_reparent_active_queues(), the queue to move is + * empty and gets expired. + */ + bfq_flush_idle_tree(st); } __bfq_deactivate_entity(entity, false); put_async_queues: bfq_put_async_queues(bfqd, bfqg); + pd->plid = BLKCG_MAX_POLS; spin_unlock_irqrestore(&bfqd->lock, flags); /* diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 653100fb719eb80e1bb11e9ef7761f14f960623f..c6c6ee0956e3daa657b5d4db82cc102f479b4c81 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -132,6 +132,7 @@ #include #include #include +#include #include "blk.h" #include "blk-mq.h" @@ -139,6 +140,7 @@ #include "blk-mq-sched.h" #include "bfq-iosched.h" #include "blk-wbt.h" +#include "blk-io-hierarchy/stats.h" #define BFQ_BFQQ_FNS(name) \ void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \ @@ -624,12 +626,13 @@ void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq) } /* - * Tell whether there are active queues or groups with differentiated weights. + * Tell whether there are active queues with different weights or + * active groups. */ -static bool bfq_differentiated_weights(struct bfq_data *bfqd) +static bool bfq_varied_queue_weights_or_active_groups(struct bfq_data *bfqd) { /* - * For weights to differ, at least one of the trees must contain + * For queue weights to differ, queue_weights_tree must contain * at least two nodes. */ return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) && @@ -637,9 +640,7 @@ static bool bfq_differentiated_weights(struct bfq_data *bfqd) bfqd->queue_weights_tree.rb_node->rb_right) #ifdef CONFIG_BFQ_GROUP_IOSCHED ) || - (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) && - (bfqd->group_weights_tree.rb_node->rb_left || - bfqd->group_weights_tree.rb_node->rb_right) + (bfqd->num_groups_with_pending_reqs > 0 #endif ); } @@ -657,26 +658,25 @@ static bool bfq_differentiated_weights(struct bfq_data *bfqd) * 3) all active groups at the same level in the groups tree have the same * number of children. * - * Unfortunately, keeping the necessary state for evaluating exactly the - * above symmetry conditions would be quite complex and time-consuming. - * Therefore this function evaluates, instead, the following stronger - * sub-conditions, for which it is much easier to maintain the needed - * state: + * Unfortunately, keeping the necessary state for evaluating exactly + * the last two symmetry sub-conditions above would be quite complex + * and time consuming. Therefore this function evaluates, instead, + * only the following stronger two sub-conditions, for which it is + * much easier to maintain the needed state: * 1) all active queues have the same weight, - * 2) all active groups have the same weight, - * 3) all active groups have at most one active child each. - * In particular, the last two conditions are always true if hierarchical - * support and the cgroups interface are not enabled, thus no state needs - * to be maintained in this case. + * 2) there are no active groups. + * In particular, the last condition is always true if hierarchical + * support or the cgroups interface are not enabled, thus no state + * needs to be maintained in this case. */ static bool bfq_symmetric_scenario(struct bfq_data *bfqd) { - return !bfq_differentiated_weights(bfqd); + return !bfq_varied_queue_weights_or_active_groups(bfqd); } /* * If the weight-counter tree passed as input contains no counter for - * the weight of the input entity, then add that counter; otherwise just + * the weight of the input queue, then add that counter; otherwise just * increment the existing counter. * * Note that weight-counter trees contain few nodes in mostly symmetric @@ -687,25 +687,25 @@ static bool bfq_symmetric_scenario(struct bfq_data *bfqd) * In most scenarios, the rate at which nodes are created/destroyed * should be low too. */ -void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity, +void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq, struct rb_root *root) { + struct bfq_entity *entity = &bfqq->entity; struct rb_node **new = &(root->rb_node), *parent = NULL; /* - * Do not insert if the entity is already associated with a + * Do not insert if the queue is already associated with a * counter, which happens if: - * 1) the entity is associated with a queue, - * 2) a request arrival has caused the queue to become both + * 1) a request arrival has caused the queue to become both * non-weight-raised, and hence change its weight, and * backlogged; in this respect, each of the two events * causes an invocation of this function, - * 3) this is the invocation of this function caused by the + * 2) this is the invocation of this function caused by the * second event. This second invocation is actually useless, * and we handle this fact by exiting immediately. More * efficient or clearer solutions might possibly be adopted. */ - if (entity->weight_counter) + if (bfqq->weight_counter) return; while (*new) { @@ -715,7 +715,7 @@ void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity, parent = *new; if (entity->weight == __counter->weight) { - entity->weight_counter = __counter; + bfqq->weight_counter = __counter; goto inc_counter; } if (entity->weight < __counter->weight) @@ -724,68 +724,68 @@ void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity, new = &((*new)->rb_right); } - entity->weight_counter = kzalloc(sizeof(struct bfq_weight_counter), - GFP_ATOMIC); + bfqq->weight_counter = kzalloc(sizeof(struct bfq_weight_counter), + GFP_ATOMIC); /* * In the unlucky event of an allocation failure, we just - * exit. This will cause the weight of entity to not be - * considered in bfq_differentiated_weights, which, in its - * turn, causes the scenario to be deemed wrongly symmetric in - * case entity's weight would have been the only weight making - * the scenario asymmetric. On the bright side, no unbalance - * will however occur when entity becomes inactive again (the - * invocation of this function is triggered by an activation - * of entity). In fact, bfq_weights_tree_remove does nothing - * if !entity->weight_counter. + * exit. This will cause the weight of queue to not be + * considered in bfq_varied_queue_weights_or_active_groups, + * which, in its turn, causes the scenario to be deemed + * wrongly symmetric in case bfqq's weight would have been + * the only weight making the scenario asymmetric. On the + * bright side, no unbalance will however occur when bfqq + * becomes inactive again (the invocation of this function + * is triggered by an activation of queue). In fact, + * bfq_weights_tree_remove does nothing if + * !bfqq->weight_counter. */ - if (unlikely(!entity->weight_counter)) + if (unlikely(!bfqq->weight_counter)) return; - entity->weight_counter->weight = entity->weight; - rb_link_node(&entity->weight_counter->weights_node, parent, new); - rb_insert_color(&entity->weight_counter->weights_node, root); + bfqq->weight_counter->weight = entity->weight; + rb_link_node(&bfqq->weight_counter->weights_node, parent, new); + rb_insert_color(&bfqq->weight_counter->weights_node, root); inc_counter: - entity->weight_counter->num_active++; + bfqq->weight_counter->num_active++; + bfqq->ref++; } /* - * Decrement the weight counter associated with the entity, and, if the + * Decrement the weight counter associated with the queue, and, if the * counter reaches 0, remove the counter from the tree. * See the comments to the function bfq_weights_tree_add() for considerations * about overhead. */ void __bfq_weights_tree_remove(struct bfq_data *bfqd, - struct bfq_entity *entity, + struct bfq_queue *bfqq, struct rb_root *root) { - if (!entity->weight_counter) + if (!bfqq->weight_counter) return; - entity->weight_counter->num_active--; - if (entity->weight_counter->num_active > 0) + bfqq->weight_counter->num_active--; + if (bfqq->weight_counter->num_active > 0) goto reset_entity_pointer; - rb_erase(&entity->weight_counter->weights_node, root); - kfree(entity->weight_counter); + rb_erase(&bfqq->weight_counter->weights_node, root); + kfree(bfqq->weight_counter); reset_entity_pointer: - entity->weight_counter = NULL; + bfqq->weight_counter = NULL; + bfq_put_queue(bfqq); } /* - * Invoke __bfq_weights_tree_remove on bfqq and all its inactive - * parent entities. + * Invoke __bfq_weights_tree_remove on bfqq and decrement the number + * of active groups for each queue's inactive parent entity. */ void bfq_weights_tree_remove(struct bfq_data *bfqd, struct bfq_queue *bfqq) { struct bfq_entity *entity = bfqq->entity.parent; - __bfq_weights_tree_remove(bfqd, &bfqq->entity, - &bfqd->queue_weights_tree); - for_each_entity(entity) { struct bfq_sched_data *sd = entity->my_sched_data; @@ -797,18 +797,37 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd, * next_in_service for details on why * in_service_entity must be checked too). * - * As a consequence, the weight of entity is - * not to be removed. In addition, if entity - * is active, then its parent entities are - * active as well, and thus their weights are - * not to be removed either. In the end, this - * loop must stop here. + * As a consequence, its parent entities are + * active as well, and thus this loop must + * stop here. */ break; } - __bfq_weights_tree_remove(bfqd, entity, - &bfqd->group_weights_tree); + + /* + * The decrement of num_groups_with_pending_reqs is + * not performed immediately upon the deactivation of + * entity, but it is delayed to when it also happens + * that the first leaf descendant bfqq of entity gets + * all its pending requests completed. The following + * instructions perform this delayed decrement, if + * needed. See the comments on + * num_groups_with_pending_reqs for details. + */ + if (entity->in_groups_with_pending_reqs) { + entity->in_groups_with_pending_reqs = false; + bfqd->num_groups_with_pending_reqs--; + } } + + /* + * Next function is invoked last, because it causes bfqq to be + * freed if the following holds: bfqq is not in service and + * has no dispatched request. DO NOT use bfqq after the next + * function invocation. + */ + __bfq_weights_tree_remove(bfqd, bfqq, + &bfqd->queue_weights_tree); } /* @@ -826,7 +845,7 @@ static struct request *bfq_check_fifo(struct bfq_queue *bfqq, rq = rq_entry_fifo(bfqq->fifo.next); - if (rq == last || ktime_get_ns() < rq->fifo_time) + if (rq == last || blk_time_get_ns() < rq->fifo_time) return NULL; bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq); @@ -967,6 +986,7 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd, else bfq_clear_bfqq_IO_bound(bfqq); + bfqq->entity.new_weight = bic->saved_weight; bfqq->ttime = bic->saved_ttime; bfqq->wr_coeff = bic->saved_wr_coeff; bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt; @@ -1002,7 +1022,8 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd, static int bfqq_process_refs(struct bfq_queue *bfqq) { - return bfqq->ref - bfqq->allocated - bfqq->entity.on_st; + return bfqq->ref - bfqq->allocated - bfqq->entity.on_st - + (bfqq->weight_counter != NULL); } /* Empty burst list and add just bfqq (see comments on bfq_handle_burst) */ @@ -1546,7 +1567,7 @@ static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd, * bfq_bfqq_update_budg_for_activation for * details on the usage of the next variable. */ - arrived_in_time = ktime_get_ns() <= + arrived_in_time = blk_time_get_ns() <= bfqq->ttime.last_end_request + bfqd->bfq_slice_idle * 3; @@ -1666,7 +1687,11 @@ static void bfq_add_request(struct request *rq) bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq)); bfqq->queued[rq_is_sync(rq)]++; - bfqd->queued++; + /* + * Updating of 'bfqd->queued' is protected by 'bfqd->lock', however, it + * may be read without holding the lock in bfq_has_work(). + */ + WRITE_ONCE(bfqd->queued, bfqd->queued + 1); elv_rb_add(&bfqq->sort_list, rq); @@ -1784,7 +1809,11 @@ static void bfq_remove_request(struct request_queue *q, if (rq->queuelist.prev != &rq->queuelist) list_del_init(&rq->queuelist); bfqq->queued[sync]--; - bfqd->queued--; + /* + * Updating of 'bfqd->queued' is protected by 'bfqd->lock', however, it + * may be read without holding the lock in bfq_has_work(). + */ + WRITE_ONCE(bfqd->queued, bfqd->queued - 1); elv_rb_del(&bfqq->sort_list, rq); elv_rqhash_del(q, rq); @@ -1853,9 +1882,11 @@ static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio) ret = blk_mq_sched_try_merge(q, bio, &free); - if (free) - blk_mq_free_request(free); spin_unlock_irq(&bfqd->lock); + if (free) { + rq_hierarchy_end_io_acct(free, STAGE_BFQ); + blk_mq_free_request(free); + } return ret; } @@ -1886,9 +1917,14 @@ static void bfq_request_merged(struct request_queue *q, struct request *req, blk_rq_pos(container_of(rb_prev(&req->rb_node), struct request, rb_node))) { struct bfq_queue *bfqq = bfq_init_rq(req); - struct bfq_data *bfqd = bfqq->bfqd; + struct bfq_data *bfqd; struct request *prev, *next_rq; + if (!bfqq) + return; + + bfqd = bfqq->bfqd; + /* Reposition request in its sort_list */ elv_rb_del(&bfqq->sort_list, req); elv_rb_add(&bfqq->sort_list, req); @@ -1930,6 +1966,9 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq, struct bfq_queue *bfqq = bfq_init_rq(rq), *next_bfqq = bfq_init_rq(next); + if (!bfqq) + goto remove; + /* * If next and rq belong to the same bfq_queue and next is older * than rq, then reposition rq in the fifo (by substituting next @@ -1951,6 +1990,14 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq, bfqq->next_rq = rq; bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags); +remove: + /* Merged request may be in the IO scheduler. Remove it. */ + if (!RB_EMPTY_NODE(&next->rb_node)) { + bfq_remove_request(next->q, next); + if (next_bfqq) + bfqg_stats_update_io_remove(bfqq_group(next_bfqq), + next->cmd_flags); + } } /* Must be called with bfqq != NULL */ @@ -2128,6 +2175,15 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) * are likely to increase the throughput. */ bfqq->new_bfqq = new_bfqq; + /* + * The above assignment schedules the following redirections: + * each time some I/O for bfqq arrives, the process that + * generated that I/O is disassociated from bfqq and + * associated with new_bfqq. Here we increases new_bfqq->ref + * in advance, adding the number of processes that are + * expected to be associated with new_bfqq as they happen to + * issue I/O. + */ new_bfqq->ref += process_refs; return new_bfqq; } @@ -2187,6 +2243,10 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, { struct bfq_queue *in_service_bfqq, *new_bfqq; + /* if a merge has already been setup, then proceed with that first */ + if (bfqq->new_bfqq) + return bfqq->new_bfqq; + /* * Prevent bfqq from being merged if it has been created too * long ago. The idea is that true cooperating processes, and @@ -2201,9 +2261,6 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, if (bfq_too_late_for_merging(bfqq)) return NULL; - if (bfqq->new_bfqq) - return bfqq->new_bfqq; - if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq)) return NULL; @@ -2215,7 +2272,8 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, if (in_service_bfqq && in_service_bfqq != bfqq && likely(in_service_bfqq != &bfqd->oom_bfqq) && - bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) && + bfq_rq_close_to_sector(io_struct, request, + bfqd->in_serv_last_pos) && bfqq->entity.parent == in_service_bfqq->entity.parent && bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) { new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq); @@ -2249,6 +2307,7 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq) if (!bic) return; + bic->saved_weight = bfqq->entity.orig_weight; bic->saved_ttime = bfqq->ttime; bic->saved_has_short_ttime = bfq_bfqq_has_short_ttime(bfqq); bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq); @@ -2412,7 +2471,7 @@ static void bfq_set_budget_timeout(struct bfq_data *bfqd, else timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight; - bfqd->last_budget_start = ktime_get(); + bfqd->last_budget_start = blk_time_get(); bfqq->budget_timeout = jiffies + bfqd->bfq_timeout * timeout_coeff; @@ -2469,6 +2528,7 @@ static void __bfq_set_in_service_queue(struct bfq_data *bfqd, } bfqd->in_service_queue = bfqq; + bfqd->in_serv_last_pos = 0; } /* @@ -2508,8 +2568,10 @@ static void bfq_arm_slice_timer(struct bfq_data *bfqd) if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 && bfq_symmetric_scenario(bfqd)) sl = min_t(u64, sl, BFQ_MIN_TT); + else if (bfqq->wr_coeff > 1) + sl = max_t(u32, sl, 20ULL * NSEC_PER_MSEC); - bfqd->last_idling_start = ktime_get(); + bfqd->last_idling_start = blk_time_get(); hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl), HRTIMER_MODE_REL); bfqg_stats_set_start_idle_time(bfqq_group(bfqq)); @@ -2546,7 +2608,7 @@ static void bfq_reset_rate_computation(struct bfq_data *bfqd, struct request *rq) { if (rq != NULL) { /* new rq dispatch now, reset accordingly */ - bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns(); + bfqd->last_dispatch = bfqd->first_dispatch = blk_time_get_ns(); bfqd->peak_rate_samples = 1; bfqd->sequential_samples = 0; bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size = @@ -2703,7 +2765,7 @@ static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq) */ static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq) { - u64 now_ns = ktime_get_ns(); + u64 now_ns = blk_time_get_ns(); if (bfqd->peak_rate_samples == 0) { /* first dispatch */ bfq_log(bfqd, "update_peak_rate: goto reset, samples %d", @@ -2755,6 +2817,8 @@ static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq) bfq_update_rate_reset(bfqd, rq); update_last_values: bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); + if (RQ_BFQQ(rq) == bfqd->in_service_queue) + bfqd->in_serv_last_pos = bfqd->last_position; bfqd->last_dispatch = now_ns; } @@ -2783,7 +2847,7 @@ static void bfq_dispatch_remove(struct request_queue *q, struct request *rq) bfq_remove_request(q, rq); } -static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) +static bool __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) { /* * If this bfqq is shared between multiple processes, check @@ -2816,9 +2880,11 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) /* * All in-service entities must have been properly deactivated * or requeued before executing the next function, which - * resets all in-service entites as no more in service. + * resets all in-service entities as no more in service. This + * may cause bfqq to be freed. If this happens, the next + * function returns true. */ - __bfq_bfqd_reset_in_service(bfqd); + return __bfq_bfqd_reset_in_service(bfqd); } /** @@ -3036,7 +3102,7 @@ static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq, if (compensate) delta_ktime = bfqd->last_idling_start; else - delta_ktime = ktime_get(); + delta_ktime = blk_time_get(); delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start); delta_usecs = ktime_to_us(delta_ktime); @@ -3182,6 +3248,13 @@ static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd, jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4); } +static bool bfq_bfqq_injectable(struct bfq_queue *bfqq) +{ + return BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 && + blk_queue_nonrot(bfqq->bfqd->queue) && + bfqq->bfqd->hw_tag; +} + /** * bfq_bfqq_expire - expire a queue. * @bfqd: device owning the queue. @@ -3216,7 +3289,6 @@ void bfq_bfqq_expire(struct bfq_data *bfqd, bool slow; unsigned long delta = 0; struct bfq_entity *entity = &bfqq->entity; - int ref; /* * Check whether the process is slow (see bfq_bfqq_is_slow). @@ -3285,12 +3357,12 @@ void bfq_bfqq_expire(struct bfq_data *bfqd, * reason. */ __bfq_bfqq_recalc_budget(bfqd, bfqq, reason); - ref = bfqq->ref; - __bfq_bfqq_expire(bfqd, bfqq); - - if (ref == 1) /* bfqq is gone, no more actions on it */ + if (__bfq_bfqq_expire(bfqd, bfqq)) + /* bfqq is gone, no more actions on it */ return; + bfqq->injected_service = 0; + /* mark bfqq as waiting a request only if a bic still points to it */ if (!bfq_bfqq_busy(bfqq) && reason != BFQQE_BUDGET_TIMEOUT && @@ -3497,9 +3569,11 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq) * symmetric scenario where: * (i) each of these processes must get the same throughput as * the others; - * (ii) all these processes have the same I/O pattern - (either sequential or random). - * In fact, in such a scenario, the drive will tend to treat + * (ii) the I/O of each process has the same properties, in + * terms of locality (sequential or random), direction + * (reads or writes), request sizes, greediness + * (from I/O-bound to sporadic), and so on. + * In fact, in such a scenario, the drive tends to treat * the requests of each of these processes in about the same * way as the requests of the others, and thus to provide * each of these processes with about the same throughput @@ -3508,18 +3582,67 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq) * certainly needed to guarantee that bfqq receives its * assigned fraction of the device throughput (see [1] for * details). + * The problem is that idling may significantly reduce + * throughput with certain combinations of types of I/O and + * devices. An important example is sync random I/O, on flash + * storage with command queueing. So, unless bfqq falls in the + * above cases where idling also boosts throughput, it would + * be important to check conditions (i) and (ii) accurately, + * so as to avoid idling when not strictly needed for service + * guarantees. + * + * Unfortunately, it is extremely difficult to thoroughly + * check condition (ii). And, in case there are active groups, + * it becomes very difficult to check condition (i) too. In + * fact, if there are active groups, then, for condition (i) + * to become false, it is enough that an active group contains + * more active processes or sub-groups than some other active + * group. More precisely, for condition (i) to hold because of + * such a group, it is not even necessary that the group is + * (still) active: it is sufficient that, even if the group + * has become inactive, some of its descendant processes still + * have some request already dispatched but still waiting for + * completion. In fact, requests have still to be guaranteed + * their share of the throughput even after being + * dispatched. In this respect, it is easy to show that, if a + * group frequently becomes inactive while still having + * in-flight requests, and if, when this happens, the group is + * not considered in the calculation of whether the scenario + * is asymmetric, then the group may fail to be guaranteed its + * fair share of the throughput (basically because idling may + * not be performed for the descendant processes of the group, + * but it had to be). We address this issue with the + * following bi-modal behavior, implemented in the function + * bfq_symmetric_scenario(). + * + * If there are groups with requests waiting for completion + * (as commented above, some of these groups may even be + * already inactive), then the scenario is tagged as + * asymmetric, conservatively, without checking any of the + * conditions (i) and (ii). So the device is idled for bfqq. + * This behavior matches also the fact that groups are created + * exactly if controlling I/O is a primary concern (to + * preserve bandwidth and latency guarantees). + * + * On the opposite end, if there are no groups with requests + * waiting for completion, then only condition (i) is actually + * controlled, i.e., provided that condition (i) holds, idling + * is not performed, regardless of whether condition (ii) + * holds. In other words, only if condition (i) does not hold, + * then idling is allowed, and the device tends to be + * prevented from queueing many requests, possibly of several + * processes. Since there are no groups with requests waiting + * for completion, then, to control condition (i) it is enough + * to check just whether all the queues with requests waiting + * for completion also have the same weight. * - * We address this issue by controlling, actually, only the - * symmetry sub-condition (i), i.e., provided that - * sub-condition (i) holds, idling is not performed, - * regardless of whether sub-condition (ii) holds. In other - * words, only if sub-condition (i) holds, then idling is - * allowed, and the device tends to be prevented from queueing - * many requests, possibly of several processes. The reason - * for not controlling also sub-condition (ii) is that we - * exploit preemption to preserve guarantees in case of - * symmetric scenarios, even if (ii) does not hold, as - * explained in the next two paragraphs. + * Not checking condition (ii) evidently exposes bfqq to the + * risk of getting less throughput than its fair share. + * However, for queues with the same weight, a further + * mechanism, preemption, mitigates or even eliminates this + * problem. And it does so without consequences on overall + * throughput. This mechanism and its benefits are explained + * in the next three paragraphs. * * Even if a queue, say Q, is expired when it remains idle, Q * can still preempt the new in-service queue if the next @@ -3533,11 +3656,7 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq) * idling allows the internal queues of the device to contain * many requests, and thus to reorder requests, we can rather * safely assume that the internal scheduler still preserves a - * minimum of mid-term fairness. The motivation for using - * preemption instead of idling is that, by not idling, - * service guarantees are preserved without minimally - * sacrificing throughput. In other words, both a high - * throughput and its desired distribution are obtained. + * minimum of mid-term fairness. * * More precisely, this preemption-based, idleless approach * provides fairness in terms of IOPS, and not sectors per @@ -3556,22 +3675,28 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq) * 1024/8 times as high as the service received by the other * queue. * - * On the other hand, device idling is performed, and thus - * pure sector-domain guarantees are provided, for the - * following queues, which are likely to need stronger - * throughput guarantees: weight-raised queues, and queues - * with a higher weight than other queues. When such queues - * are active, sub-condition (i) is false, which triggers - * device idling. + * The motivation for using preemption instead of idling (for + * queues with the same weight) is that, by not idling, + * service guarantees are preserved (completely or at least in + * part) without minimally sacrificing throughput. And, if + * there is no active group, then the primary expectation for + * this device is probably a high throughput. * - * According to the above considerations, the next variable is - * true (only) if sub-condition (i) holds. To compute the - * value of this variable, we not only use the return value of - * the function bfq_symmetric_scenario(), but also check - * whether bfqq is being weight-raised, because - * bfq_symmetric_scenario() does not take into account also - * weight-raised queues (see comments on - * bfq_weights_tree_add()). + * We are now left only with explaining the additional + * compound condition that is checked below for deciding + * whether the scenario is asymmetric. To explain this + * compound condition, we need to add that the function + * bfq_symmetric_scenario checks the weights of only + * non-weight-raised queues, for efficiency reasons (see + * comments on bfq_weights_tree_add()). Then the fact that + * bfqq is weight-raised is checked explicitly here. More + * precisely, the compound condition below takes into account + * also the fact that, even if bfqq is being weight-raised, + * the scenario is still symmetric if all queues with requests + * waiting for completion happen to be + * weight-raised. Actually, we should be even more precise + * here, and differentiate between interactive weight raising + * and soft real-time weight raising. * * As a side note, it is worth considering that the above * device-idling countermeasures may however fail in the @@ -3583,7 +3708,8 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq) * to let requests be served in the desired order until all * the requests already queued in the device have been served. */ - asymmetric_scenario = bfqq->wr_coeff > 1 || + asymmetric_scenario = (bfqq->wr_coeff > 1 && + bfqd->wr_busy_queues < bfqd->busy_queues) || !bfq_symmetric_scenario(bfqd); /* @@ -3629,6 +3755,30 @@ static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq) return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_better_to_idle(bfqq); } +static struct bfq_queue *bfq_choose_bfqq_for_injection(struct bfq_data *bfqd) +{ + struct bfq_queue *bfqq; + + /* + * A linear search; but, with a high probability, very few + * steps are needed to find a candidate queue, i.e., a queue + * with enough budget left for its next request. In fact: + * - BFQ dynamically updates the budget of every queue so as + * to accommodate the expected backlog of the queue; + * - if a queue gets all its requests dispatched as injected + * service, then the queue is removed from the active list + * (and re-added only if it gets new requests, but with + * enough budget for its new backlog). + */ + list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) + if (!RB_EMPTY_ROOT(&bfqq->sort_list) && + bfq_serv_to_charge(bfqq->next_rq, bfqq) <= + bfq_bfqq_budget_left(bfqq)) + return bfqq; + + return NULL; +} + /* * Select a queue for service. If we have a current queue in service, * check whether to continue servicing it, or retrieve and set a new one. @@ -3710,10 +3860,19 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) * No requests pending. However, if the in-service queue is idling * for a new request, or has requests waiting for a completion and * may idle after their completion, then keep it anyway. + * + * Yet, to boost throughput, inject service from other queues if + * possible. */ if (bfq_bfqq_wait_request(bfqq) || (bfqq->dispatched != 0 && bfq_better_to_idle(bfqq))) { - bfqq = NULL; + if (bfq_bfqq_injectable(bfqq) && + bfqq->injected_service * bfqq->inject_coeff < + bfqq->entity.service * 10) + bfqq = bfq_choose_bfqq_for_injection(bfqd); + else + bfqq = NULL; + goto keep_queue; } @@ -3803,6 +3962,14 @@ static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd, bfq_dispatch_remove(bfqd->queue, rq); + if (bfqq != bfqd->in_service_queue) { + if (likely(bfqd->in_service_queue)) + bfqd->in_service_queue->injected_service += + bfq_serv_to_charge(rq, bfqq); + + goto return_rq; + } + /* * If weight raising has to terminate for bfqq, then next * function causes an immediate update of bfqq's weight, @@ -3821,13 +3988,12 @@ static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd, * belongs to CLASS_IDLE and other queues are waiting for * service. */ - if (bfqd->busy_queues > 1 && bfq_class_idle(bfqq)) - goto expire; + if (!(bfqd->busy_queues > 1 && bfq_class_idle(bfqq))) + goto return_rq; - return rq; - -expire: bfq_bfqq_expire(bfqd, bfqq, false, BFQQE_BUDGET_EXHAUSTED); + +return_rq: return rq; } @@ -3836,11 +4002,11 @@ static bool bfq_has_work(struct blk_mq_hw_ctx *hctx) struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; /* - * Avoiding lock: a race on bfqd->busy_queues should cause at + * Avoiding lock: a race on bfqd->queued should cause at * most a call to dispatch for nothing */ return !list_empty_careful(&bfqd->dispatch) || - bfqd->busy_queues > 0; + READ_ONCE(bfqd->queued); } static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) @@ -3987,7 +4153,7 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; struct request *rq; struct bfq_queue *in_serv_queue; - bool waiting_rq, idle_timer_disabled; + bool waiting_rq, idle_timer_disabled = false; spin_lock_irq(&bfqd->lock); @@ -3995,15 +4161,18 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue); rq = __bfq_dispatch_request(hctx); - - idle_timer_disabled = - waiting_rq && !bfq_bfqq_wait_request(in_serv_queue); + if (in_serv_queue == bfqd->in_service_queue) { + idle_timer_disabled = + waiting_rq && !bfq_bfqq_wait_request(in_serv_queue); + } spin_unlock_irq(&bfqd->lock); + bfq_update_dispatch_stats(hctx->queue, rq, + idle_timer_disabled ? in_serv_queue : NULL, + idle_timer_disabled); - bfq_update_dispatch_stats(hctx->queue, rq, in_serv_queue, - idle_timer_disabled); - + if (rq) + rq_hierarchy_end_io_acct(rq, STAGE_BFQ); return rq; } @@ -4111,6 +4280,7 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync) unsigned long flags; spin_lock_irqsave(&bfqd->lock, flags); + bfqq->bic = NULL; bfq_exit_bfqq(bfqd, bfqq); bic_set_bfqq(bic, NULL, is_sync); spin_unlock_irqrestore(&bfqd->lock, flags); @@ -4135,6 +4305,7 @@ bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic) struct task_struct *tsk = current; int ioprio_class; struct bfq_data *bfqd = bfqq->bfqd; + char dname[BDI_DEV_NAME_LEN]; if (!bfqd) return; @@ -4142,8 +4313,9 @@ bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic) ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio); switch (ioprio_class) { default: - dev_err(bfqq->bfqd->queue->backing_dev_info->dev, - "bfq: bad prio class %d\n", ioprio_class); + bdi_get_dev_name(bfqq->bfqd->queue->backing_dev_info, + dname, BDI_DEV_NAME_LEN); + pr_err("%s bfq: bad prio class %d\n", dname, ioprio_class); /* fall through */ case IOPRIO_CLASS_NONE: /* @@ -4169,7 +4341,7 @@ bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic) if (bfqq->new_ioprio >= IOPRIO_BE_NR) { pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n", bfqq->new_ioprio); - bfqq->new_ioprio = IOPRIO_BE_NR; + bfqq->new_ioprio = IOPRIO_BE_NR - 1; } bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio); @@ -4232,11 +4404,18 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfq_mark_bfqq_has_short_ttime(bfqq); bfq_mark_bfqq_sync(bfqq); bfq_mark_bfqq_just_created(bfqq); + /* + * Aggressively inject a lot of service: up to 90%. + * This coefficient remains constant during bfqq life, + * but this behavior might be changed, after enough + * testing and tuning. + */ + bfqq->inject_coeff = 1; } else bfq_clear_bfqq_sync(bfqq); /* set end request to minus infinity from now */ - bfqq->ttime.last_end_request = ktime_get_ns() + 1; + bfqq->ttime.last_end_request = blk_time_get_ns() + 1; bfq_mark_bfqq_IO_bound(bfqq); @@ -4354,7 +4533,7 @@ static void bfq_update_io_thinktime(struct bfq_data *bfqd, struct bfq_queue *bfqq) { struct bfq_ttime *ttime = &bfqq->ttime; - u64 elapsed = ktime_get_ns() - bfqq->ttime.last_end_request; + u64 elapsed = blk_time_get_ns() - bfqq->ttime.last_end_request; elapsed = min_t(u64, elapsed, 2ULL * bfqd->bfq_slice_idle); @@ -4523,7 +4702,8 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) bfq_add_request(rq); idle_timer_disabled = waiting && !bfq_bfqq_wait_request(bfqq); - rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)]; + rq->fifo_time = blk_time_get_ns() + + bfqd->bfq_fifo_expire[rq_is_sync(rq)]; list_add_tail(&rq->queuelist, &bfqq->fifo); bfq_rq_enqueued(bfqd, bfqq, rq); @@ -4571,10 +4751,13 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, struct bfq_queue *bfqq; bool idle_timer_disabled = false; unsigned int cmd_flags; + LIST_HEAD(free); spin_lock_irq(&bfqd->lock); - if (blk_mq_sched_try_insert_merge(q, rq)) { + if (blk_mq_sched_try_insert_merge(q, rq, &free)) { spin_unlock_irq(&bfqd->lock); + rq_list_hierarchy_end_io_acct(&free, STAGE_BFQ); + blk_mq_free_requests(&free); return; } @@ -4584,12 +4767,12 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, spin_lock_irq(&bfqd->lock); bfqq = bfq_init_rq(rq); - if (at_head || blk_rq_is_passthrough(rq)) { + if (!bfqq || at_head || blk_rq_is_passthrough(rq)) { if (at_head) list_add(&rq->queuelist, &bfqd->dispatch); else list_add_tail(&rq->queuelist, &bfqd->dispatch); - } else { /* bfqq is assumed to be non null here */ + } else { idle_timer_disabled = __bfq_insert_request(bfqd, rq); /* * Update bfqq, because, if a queue merge has occurred @@ -4621,6 +4804,7 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx, struct list_head *list, bool at_head) { + rq_list_hierarchy_start_io_acct(list, STAGE_BFQ); while (!list_empty(list)) { struct request *rq; @@ -4677,7 +4861,7 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd) bfq_weights_tree_remove(bfqd, bfqq); } - now_ns = ktime_get_ns(); + now_ns = blk_time_get_ns(); bfqq->ttime.last_end_request = now_ns; @@ -4786,18 +4970,7 @@ static void bfq_finish_requeue_request(struct request *rq) { struct bfq_queue *bfqq = RQ_BFQQ(rq); struct bfq_data *bfqd; - - /* - * Requeue and finish hooks are invoked in blk-mq without - * checking whether the involved request is actually still - * referenced in the scheduler. To handle this fact, the - * following two checks make this function exit in case of - * spurious invocations, for which there is nothing to do. - * - * First, check whether rq has nothing to do with an elevator. - */ - if (unlikely(!(rq->rq_flags & RQF_ELVPRIV))) - return; + unsigned long flags; /* * rq either is not associated with any icq, or is an already @@ -4815,36 +4988,12 @@ static void bfq_finish_requeue_request(struct request *rq) rq->io_start_time_ns, rq->cmd_flags); + spin_lock_irqsave(&bfqd->lock, flags); if (likely(rq->rq_flags & RQF_STARTED)) { - unsigned long flags; - - spin_lock_irqsave(&bfqd->lock, flags); - bfq_completed_request(bfqq, bfqd); - bfq_finish_requeue_request_body(bfqq); - - spin_unlock_irqrestore(&bfqd->lock, flags); - } else { - /* - * Request rq may be still/already in the scheduler, - * in which case we need to remove it (this should - * never happen in case of requeue). And we cannot - * defer such a check and removal, to avoid - * inconsistencies in the time interval from the end - * of this function to the start of the deferred work. - * This situation seems to occur only in process - * context, as a consequence of a merge. In the - * current version of the code, this implies that the - * lock is held. - */ - - if (!RB_EMPTY_NODE(&rq->rb_node)) { - bfq_remove_request(rq->q, rq); - bfqg_stats_update_io_remove(bfqq_group(bfqq), - rq->cmd_flags); - } - bfq_finish_requeue_request_body(bfqq); } + bfq_finish_requeue_request_body(bfqq); + spin_unlock_irqrestore(&bfqd->lock, flags); /* * Reset private fields. In case of a requeue, this allows @@ -5061,7 +5210,8 @@ static struct bfq_queue *bfq_init_rq(struct request *rq) * addition, if the queue has also just been split, we have to * resume its state. */ - if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) { + if (likely(bfqq != &bfqd->oom_bfqq) && !bfqq->new_bfqq && + bfqq_process_refs(bfqq) == 1) { bfqq->bic = bic; if (split) { /* @@ -5080,20 +5230,28 @@ static struct bfq_queue *bfq_init_rq(struct request *rq) return bfqq; } -static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq) +static void +bfq_idle_slice_timer_body(struct bfq_data *bfqd, struct bfq_queue *bfqq) { - struct bfq_data *bfqd = bfqq->bfqd; enum bfqq_expiration reason; unsigned long flags; spin_lock_irqsave(&bfqd->lock, flags); - bfq_clear_bfqq_wait_request(bfqq); + /* + * Considering that bfqq may be in race, we should firstly check + * whether bfqq is in service before doing something on it. If + * the bfqq in race is not in service, it has already been expired + * through __bfq_bfqq_expire func and its wait_request flags has + * been cleared in __bfq_bfqd_reset_in_service func. + */ if (bfqq != bfqd->in_service_queue) { spin_unlock_irqrestore(&bfqd->lock, flags); return; } + bfq_clear_bfqq_wait_request(bfqq); + if (bfq_bfqq_budget_timeout(bfqq)) /* * Also here the queue can be safely expired @@ -5138,7 +5296,7 @@ static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer) * early. */ if (bfqq) - bfq_idle_slice_timer_body(bfqq); + bfq_idle_slice_timer_body(bfqd, bfqq); return HRTIMER_NORESTART; } @@ -5223,7 +5381,7 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd, return min_shallow; } -static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index) +static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx) { struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; struct blk_mq_tags *tags = hctx->sched_tags; @@ -5231,6 +5389,11 @@ static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index) min_shallow = bfq_update_depths(bfqd, &tags->bitmap_tags); sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, min_shallow); +} + +static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index) +{ + bfq_depth_updated(hctx); return 0; } @@ -5238,7 +5401,9 @@ static void bfq_exit_queue(struct elevator_queue *e) { struct bfq_data *bfqd = e->elevator_data; struct bfq_queue *bfqq, *n; + struct request_queue *q = bfqd->queue; + blk_mq_unregister_hierarchy(q, STAGE_BFQ); hrtimer_cancel(&bfqd->idle_slice_timer); spin_lock_irq(&bfqd->lock); @@ -5261,6 +5426,9 @@ static void bfq_exit_queue(struct elevator_queue *e) #endif kfree(bfqd); + + /* Re-enable throttling in case elevator disabled it */ + wbt_enable_default(q); } static void bfq_init_root_group(struct bfq_group *root_group, @@ -5330,7 +5498,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) bfqd->idle_slice_timer.function = bfq_idle_slice_timer; bfqd->queue_weights_tree = RB_ROOT; - bfqd->group_weights_tree = RB_ROOT; + bfqd->num_groups_with_pending_reqs = 0; INIT_LIST_HEAD(&bfqd->active_list); INIT_LIST_HEAD(&bfqd->idle_list); @@ -5402,6 +5570,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group); wbt_disable_default(q); + blk_mq_register_hierarchy(q, STAGE_BFQ); return 0; out_free: @@ -5653,6 +5822,7 @@ static struct elevator_type iosched_bfq_mq = { .requests_merged = bfq_requests_merged, .request_merged = bfq_request_merged, .has_work = bfq_has_work, + .depth_updated = bfq_depth_updated, .init_hctx = bfq_init_hctx, .init_sched = bfq_init_queue, .exit_sched = bfq_exit_queue, diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h index a8a2e5aca4d48f328dbb1c14bff88f1aaa485a2c..9bbd8ea906dd6de07069f0d911c3dcb2e6b1cf6b 100644 --- a/block/bfq-iosched.h +++ b/block/bfq-iosched.h @@ -108,15 +108,14 @@ struct bfq_sched_data { }; /** - * struct bfq_weight_counter - counter of the number of all active entities + * struct bfq_weight_counter - counter of the number of all active queues * with a given weight. */ struct bfq_weight_counter { - unsigned int weight; /* weight of the entities this counter refers to */ - unsigned int num_active; /* nr of active entities with this weight */ + unsigned int weight; /* weight of the queues this counter refers to */ + unsigned int num_active; /* nr of active queues with this weight */ /* - * Weights tree member (see bfq_data's @queue_weights_tree and - * @group_weights_tree) + * Weights tree member (see bfq_data's @queue_weights_tree) */ struct rb_node weights_node; }; @@ -151,8 +150,6 @@ struct bfq_weight_counter { struct bfq_entity { /* service_tree member */ struct rb_node rb_node; - /* pointer to the weight counter associated with this entity */ - struct bfq_weight_counter *weight_counter; /* * Flag, true if the entity is on a tree (either the active or @@ -199,6 +196,9 @@ struct bfq_entity { /* flag, set to request a weight, ioprio or ioprio_class change */ int prio_changed; + + /* flag, set if the entity is counted in groups_with_pending_reqs */ + bool in_groups_with_pending_reqs; }; struct bfq_group; @@ -266,6 +266,9 @@ struct bfq_queue { /* entity representing this queue in the scheduler */ struct bfq_entity entity; + /* pointer to the weight counter associated with this entity */ + struct bfq_weight_counter *weight_counter; + /* maximum budget allowed from the feedback mechanism */ int max_budget; /* budget expiration (in jiffies) */ @@ -351,6 +354,32 @@ struct bfq_queue { unsigned long split_time; /* time of last split */ unsigned long first_IO_time; /* time of first I/O for this queue */ + + /* max service rate measured so far */ + u32 max_service_rate; + /* + * Ratio between the service received by bfqq while it is in + * service, and the cumulative service (of requests of other + * queues) that may be injected while bfqq is empty but still + * in service. To increase precision, the coefficient is + * measured in tenths of unit. Here are some example of (1) + * ratios, (2) resulting percentages of service injected + * w.r.t. to the total service dispatched while bfqq is in + * service, and (3) corresponding values of the coefficient: + * 1 (50%) -> 10 + * 2 (33%) -> 20 + * 10 (9%) -> 100 + * 9.9 (9%) -> 99 + * 1.5 (40%) -> 15 + * 0.5 (66%) -> 5 + * 0.1 (90%) -> 1 + * + * So, if the coefficient is lower than 10, then + * injected service is more than bfqq service. + */ + unsigned int inject_coeff; + /* amount of service injected in current service slot */ + unsigned int injected_service; }; /** @@ -389,6 +418,15 @@ struct bfq_io_cq { */ bool was_in_burst_list; + /* + * Save the weight when a merge occurs, to be able + * to restore it in case of split. If the weight is not + * correctly resumed when the queue is recycled, + * then the weight of the recycled queue could differ + * from the weight of the original queue. + */ + unsigned int saved_weight; + /* * Similar to previous fields: save wr information. */ @@ -422,15 +460,54 @@ struct bfq_data { * bfq_weights_tree_[add|remove] for further details). */ struct rb_root queue_weights_tree; + /* - * rbtree of non-queue @bfq_entity weight counters, sorted by - * weight. Used to keep track of whether all @bfq_groups have - * the same weight. The tree contains one counter for each - * distinct weight associated to some active @bfq_group (see - * the comments to the functions bfq_weights_tree_[add|remove] - * for further details). + * Number of groups with at least one descendant process that + * has at least one request waiting for completion. Note that + * this accounts for also requests already dispatched, but not + * yet completed. Therefore this number of groups may differ + * (be larger) than the number of active groups, as a group is + * considered active only if its corresponding entity has + * descendant queues with at least one request queued. This + * number is used to decide whether a scenario is symmetric. + * For a detailed explanation see comments on the computation + * of the variable asymmetric_scenario in the function + * bfq_better_to_idle(). + * + * However, it is hard to compute this number exactly, for + * groups with multiple descendant processes. Consider a group + * that is inactive, i.e., that has no descendant process with + * pending I/O inside BFQ queues. Then suppose that + * num_groups_with_pending_reqs is still accounting for this + * group, because the group has descendant processes with some + * I/O request still in flight. num_groups_with_pending_reqs + * should be decremented when the in-flight request of the + * last descendant process is finally completed (assuming that + * nothing else has changed for the group in the meantime, in + * terms of composition of the group and active/inactive state of child + * groups and processes). To accomplish this, an additional + * pending-request counter must be added to entities, and must + * be updated correctly. To avoid this additional field and operations, + * we resort to the following tradeoff between simplicity and + * accuracy: for an inactive group that is still counted in + * num_groups_with_pending_reqs, we decrement + * num_groups_with_pending_reqs when the first descendant + * process of the group remains with no request waiting for + * completion. + * + * Even this simpler decrement strategy requires a little + * carefulness: to avoid multiple decrements, we flag a group, + * more precisely an entity representing a group, as still + * counted in num_groups_with_pending_reqs when it becomes + * inactive. Then, when the first descendant queue of the + * entity remains with no request waiting for completion, + * num_groups_with_pending_reqs is decremented, and this flag + * is reset. After this flag is reset for the entity, + * num_groups_with_pending_reqs won't be decremented any + * longer in case a new descendant queue of the entity remains + * with no request waiting for completion. */ - struct rb_root group_weights_tree; + unsigned int num_groups_with_pending_reqs; /* * Number of bfq_queues containing requests (including the @@ -469,6 +546,9 @@ struct bfq_data { /* on-disk position of the last served request */ sector_t last_position; + /* position of the last served request for the in-service queue */ + sector_t in_serv_last_pos; + /* time of last request completion (ns) */ u64 last_completion; @@ -782,7 +862,7 @@ struct bfq_group { char blkg_path[128]; /* reference counter (see comments in bfq_bic_update_cgroup) */ - int ref; + refcount_t ref; struct bfq_entity entity; struct bfq_sched_data sched_data; @@ -825,10 +905,10 @@ struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync); void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync); struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic); void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq); -void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity, +void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq, struct rb_root *root); void __bfq_weights_tree_remove(struct bfq_data *bfqd, - struct bfq_entity *entity, + struct bfq_queue *bfqq, struct rb_root *root); void bfq_weights_tree_remove(struct bfq_data *bfqd, struct bfq_queue *bfqq); @@ -922,7 +1002,7 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree); bool next_queue_may_preempt(struct bfq_data *bfqd); struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd); -void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd); +bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd); void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, bool ins_into_idle_tree, bool expiration); void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq); diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c index ae52bff43ce4ff1697fdfdbc0c9c10fb4148c69e..d5d7fbb2ebc193611313d0ae37e32be04cfe3474 100644 --- a/block/bfq-wf2q.c +++ b/block/bfq-wf2q.c @@ -788,25 +788,23 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st, new_weight = entity->orig_weight * (bfqq ? bfqq->wr_coeff : 1); /* - * If the weight of the entity changes, remove the entity - * from its old weight counter (if there is a counter - * associated with the entity), and add it to the counter - * associated with its new weight. + * If the weight of the entity changes, and the entity is a + * queue, remove the entity from its old weight counter (if + * there is a counter associated with the entity). */ - if (prev_weight != new_weight) { - root = bfqq ? &bfqd->queue_weights_tree : - &bfqd->group_weights_tree; - __bfq_weights_tree_remove(bfqd, entity, root); + if (prev_weight != new_weight && bfqq) { + root = &bfqd->queue_weights_tree; + __bfq_weights_tree_remove(bfqd, bfqq, root); } entity->weight = new_weight; /* - * Add the entity to its weights tree only if it is - * not associated with a weight-raised queue. + * Add the entity, if it is not a weight-raised queue, + * to the counter associated with its new weight. */ - if (prev_weight != new_weight && - (bfqq ? bfqq->wr_coeff == 1 : 1)) + if (prev_weight != new_weight && bfqq && bfqq->wr_coeff == 1) { /* If we get here, root has been initialized. */ - bfq_weights_tree_add(bfqd, entity, root); + bfq_weights_tree_add(bfqd, bfqq, root); + } new_st->wsum += entity->weight; @@ -1008,13 +1006,16 @@ static void __bfq_activate_entity(struct bfq_entity *entity, entity->on_st = true; } -#ifdef BFQ_GROUP_IOSCHED_ENABLED +#ifdef CONFIG_BFQ_GROUP_IOSCHED if (!bfq_entity_to_bfqq(entity)) { /* bfq_group */ struct bfq_group *bfqg = container_of(entity, struct bfq_group, entity); + struct bfq_data *bfqd = bfqg->bfqd; - bfq_weights_tree_add(bfqg->bfqd, entity, - &bfqd->group_weights_tree); + if (!entity->in_groups_with_pending_reqs) { + entity->in_groups_with_pending_reqs = true; + bfqd->num_groups_with_pending_reqs++; + } } #endif @@ -1181,10 +1182,17 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree) st = bfq_entity_service_tree(entity); is_in_service = entity == sd->in_service_entity; - if (is_in_service) { - bfq_calc_finish(entity, entity->service); + bfq_calc_finish(entity, entity->service); + + if (is_in_service) sd->in_service_entity = NULL; - } + else + /* + * Non in-service entity: nobody will take care of + * resetting its service counter on expiration. Do it + * now. + */ + entity->service = 0; if (entity->tree == &st->active) bfq_active_extract(st, entity); @@ -1592,7 +1600,8 @@ struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd) return bfqq; } -void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd) +/* returns true if the in-service queue gets freed */ +bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd) { struct bfq_queue *in_serv_bfqq = bfqd->in_service_queue; struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity; @@ -1616,8 +1625,20 @@ void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd) * service tree either, then release the service reference to * the queue it represents (taken with bfq_get_entity). */ - if (!in_serv_entity->on_st) + if (!in_serv_entity->on_st) { + /* + * If no process is referencing in_serv_bfqq any + * longer, then the service reference may be the only + * reference to the queue. If this is the case, then + * bfqq gets freed here. + */ + int ref = in_serv_bfqq->ref; bfq_put_queue(in_serv_bfqq); + if (ref == 1) + return true; + } + + return false; } void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, @@ -1660,15 +1681,15 @@ void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfqd->busy_queues--; - if (!bfqq->dispatched) - bfq_weights_tree_remove(bfqd, bfqq); - if (bfqq->wr_coeff > 1) bfqd->wr_busy_queues--; bfqg_stats_update_dequeue(bfqq_group(bfqq)); bfq_deactivate_bfqq(bfqd, bfqq, true, expiration); + + if (!bfqq->dispatched) + bfq_weights_tree_remove(bfqd, bfqq); } /* @@ -1676,6 +1697,15 @@ void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq, */ void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq) { +#ifdef CONFIG_BFQ_GROUP_IOSCHED + /* If parent group is offlined, move the bfqq to root group */ + if (bfqq->entity.parent) { + struct bfq_group *bfqg = bfq_bfqq_to_bfqg(bfqq); + + if (bfqg->pd.plid >= BLKCG_MAX_POLS) + bfq_bfqq_move(bfqd, bfqq, bfqd->root_group); + } +#endif bfq_log_bfqq(bfqd, bfqq, "add to busy"); bfq_activate_bfqq(bfqd, bfqq); @@ -1685,7 +1715,7 @@ void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq) if (!bfqq->dispatched) if (bfqq->wr_coeff == 1) - bfq_weights_tree_add(bfqd, &bfqq->entity, + bfq_weights_tree_add(bfqd, bfqq, &bfqd->queue_weights_tree); if (bfqq->wr_coeff > 1) diff --git a/block/bio-integrity.c b/block/bio-integrity.c index 67b5fb861a5100c5294e572668881a187e478a6b..2e22a3f7466a86b75c542dd55e6b23d0b4e73c58 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -38,6 +38,18 @@ void blk_flush_integrity(void) flush_workqueue(kintegrityd_wq); } +void __bio_integrity_free(struct bio_set *bs, struct bio_integrity_payload *bip) +{ + if (bs && mempool_initialized(&bs->bio_integrity_pool)) { + if (bip->bip_vec) + bvec_free(&bs->bvec_integrity_pool, bip->bip_vec, + bip->bip_slab); + mempool_free(bip, &bs->bio_integrity_pool); + } else { + kfree(bip); + } +} + /** * bio_integrity_alloc - Allocate integrity payload and attach it to bio * @bio: bio to attach integrity metadata to @@ -90,7 +102,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, return bip; err: - mempool_free(bip, &bs->bio_integrity_pool); + __bio_integrity_free(bs, bip); return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL(bio_integrity_alloc); @@ -111,14 +123,7 @@ static void bio_integrity_free(struct bio *bio) kfree(page_address(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset); - if (bs && mempool_initialized(&bs->bio_integrity_pool)) { - bvec_free(&bs->bvec_integrity_pool, bip->bip_vec, bip->bip_slab); - - mempool_free(bip, &bs->bio_integrity_pool); - } else { - kfree(bip); - } - + __bio_integrity_free(bs, bip); bio->bi_integrity = NULL; bio->bi_opf &= ~REQ_INTEGRITY; } @@ -291,8 +296,11 @@ bool bio_integrity_prep(struct bio *bio) ret = bio_integrity_add_page(bio, virt_to_page(buf), bytes, offset); - if (ret == 0) - return false; + if (ret == 0) { + printk(KERN_ERR "could not attach integrity payload\n"); + status = BLK_STS_RESOURCE; + goto err_end_io; + } if (ret < bytes) break; @@ -391,7 +399,7 @@ void bio_integrity_advance(struct bio *bio, unsigned int bytes_done) struct blk_integrity *bi = blk_get_integrity(bio->bi_disk); unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9); - bip->bip_iter.bi_sector += bytes_done >> 9; + bip->bip_iter.bi_sector += bio_integrity_intervals(bi, bytes_done >> 9); bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes); } EXPORT_SYMBOL(bio_integrity_advance); diff --git a/block/bio.c b/block/bio.c index 0093bed81c0e85882066499dcd92c5e94bd8a35d..ff18f6839063016bb5c2d0419c0400b4235afe64 100644 --- a/block/bio.c +++ b/block/bio.c @@ -33,6 +33,7 @@ #include #include "blk.h" #include "blk-rq-qos.h" +#include "blk-io-hierarchy/stats.h" /* * Test patch to inline a certain number of bi_io_vec's inside the bio @@ -245,6 +246,14 @@ struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx, void bio_uninit(struct bio *bio) { bio_disassociate_task(bio); +#ifdef CONFIG_BLK_BIO_ALLOC_TASK + if (bio->pid) { + put_pid(bio->pid); + bio->pid = NULL; + } +#endif + bio_hierarchy_end(bio); + bio_free_hierarchy_data(bio); } EXPORT_SYMBOL(bio_uninit); @@ -285,6 +294,14 @@ void bio_init(struct bio *bio, struct bio_vec *table, bio->bi_io_vec = table; bio->bi_max_vecs = max_vecs; + +#ifdef CONFIG_BLK_BIO_ALLOC_TIME + bio->bi_alloc_time_ns = blk_time_get_ns(); +#endif + +#ifdef CONFIG_BLK_BIO_ALLOC_TASK + bio->pid = get_pid(task_pid(current)); +#endif } EXPORT_SYMBOL(bio_init); @@ -314,7 +331,7 @@ static struct bio *__bio_chain_endio(struct bio *bio) { struct bio *parent = bio->bi_private; - if (!parent->bi_status) + if (bio->bi_status && !parent->bi_status) parent->bi_status = bio->bi_status; bio_put(bio); return parent; @@ -547,6 +564,56 @@ void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start) } EXPORT_SYMBOL(zero_fill_bio_iter); +/** + * bio_truncate - truncate the bio to small size of @new_size + * @bio: the bio to be truncated + * @new_size: new size for truncating the bio + * + * Description: + * Truncate the bio to new size of @new_size. If bio_op(bio) is + * REQ_OP_READ, zero the truncated part. This function should only + * be used for handling corner cases, such as bio eod. + */ +void bio_truncate(struct bio *bio, unsigned new_size) +{ + struct bio_vec bv; + struct bvec_iter iter; + unsigned int done = 0; + bool truncated = false; + + if (new_size >= bio->bi_iter.bi_size) + return; + + if (bio_op(bio) != REQ_OP_READ) + goto exit; + + bio_for_each_segment(bv, bio, iter) { + if (done + bv.bv_len > new_size) { + unsigned offset; + + if (!truncated) + offset = new_size - done; + else + offset = 0; + zero_user(bv.bv_page, bv.bv_offset + offset, + bv.bv_len - offset); + truncated = true; + } + done += bv.bv_len; + } + + exit: + /* + * Don't touch bvec table here and make it really immutable, since + * fs bio user has to retrieve all pages via bio_for_each_segment_all + * in its .end_bio() callback. + * + * It is enough to truncate bio by updating .bi_size since we can make + * correct bvec with the updated .bi_size for drivers. + */ + bio->bi_iter.bi_size = new_size; +} + /** * bio_put - release a reference to a bio * @bio: bio to release reference to @@ -605,6 +672,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src) if (bio_flagged(bio_src, BIO_THROTTLED)) bio_set_flag(bio, BIO_THROTTLED); bio->bi_opf = bio_src->bi_opf; + bio->bi_ioprio = bio_src->bi_ioprio; bio->bi_write_hint = bio_src->bi_write_hint; bio->bi_iter = bio_src->bi_iter; bio->bi_io_vec = bio_src->bi_io_vec; @@ -1239,8 +1307,11 @@ struct bio *bio_copy_user_iov(struct request_queue *q, } } - if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) + if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) { + if (!map_data) + __free_page(page); break; + } len -= bytes; offset = 0; @@ -1261,6 +1332,8 @@ struct bio *bio_copy_user_iov(struct request_queue *q, if (ret) goto cleanup; } else { + if (bmd->is_our_pages) + zero_fill_bio(bio); iov_iter_advance(iter, bio->bi_iter.bi_size); } @@ -1522,7 +1595,7 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len, if (bytes > len) bytes = len; - page = alloc_page(q->bounce_gfp | gfp_mask); + page = alloc_page(q->bounce_gfp | __GFP_ZERO | gfp_mask); if (!page) goto cleanup; @@ -1662,36 +1735,97 @@ void bio_check_pages_dirty(struct bio *bio) } EXPORT_SYMBOL_GPL(bio_check_pages_dirty); -void generic_start_io_acct(struct request_queue *q, int op, - unsigned long sectors, struct hd_struct *part) +void update_io_ticks(int cpu, struct hd_struct *part, unsigned long now, bool end) +{ + unsigned long stamp; +again: + stamp = READ_ONCE(part->stamp); + if (unlikely(time_after(now, stamp))) { + if (likely(cmpxchg(&part->stamp, stamp, now) == stamp)) + __part_stat_add(cpu, part, io_ticks, end ? now - stamp : 1); + } + if (part->partno) { + part = &part_to_disk(part)->part0; + goto again; + } +} + +static void __generic_start_io_acct(struct request_queue *q, int op, + unsigned long sectors, + struct hd_struct *part, bool precise) { const int sgrp = op_stat_group(op); int cpu = part_stat_lock(); - part_round_stats(q, cpu, part); - part_stat_inc(cpu, part, ios[sgrp]); - part_stat_add(cpu, part, sectors[sgrp], sectors); + if (precise_iostat) + part_round_stats(q, cpu, part); + else + update_io_ticks(cpu, part, jiffies, false); + if (!precise) { + part_stat_inc(cpu, part, ios[sgrp]); + part_stat_add(cpu, part, sectors[sgrp], sectors); + } part_inc_in_flight(q, part, op_is_write(op)); part_stat_unlock(); } + +void generic_start_io_acct(struct request_queue *q, int op, + unsigned long sectors, struct hd_struct *part) +{ + __generic_start_io_acct(q, op, sectors, part, false); +} EXPORT_SYMBOL(generic_start_io_acct); -void generic_end_io_acct(struct request_queue *q, int req_op, - struct hd_struct *part, unsigned long start_time) +void generic_start_precise_io_acct(struct request_queue *q, int op, + struct hd_struct *part) { - unsigned long duration = jiffies - start_time; + __generic_start_io_acct(q, op, 0, part, true); +} +EXPORT_SYMBOL(generic_start_precise_io_acct); + +static void __generic_end_io_acct(struct request_queue *q, int req_op, + struct hd_struct *part, + unsigned long start_time, + unsigned long sectors, bool precise) +{ + unsigned long now = jiffies; + unsigned long duration = now - start_time; const int sgrp = op_stat_group(req_op); int cpu = part_stat_lock(); + if (precise_iostat) { + part_round_stats(q, cpu, part); + } else { + update_io_ticks(cpu, part, now, true); + part_stat_add(cpu, part, time_in_queue, duration); + } + if (precise) { + part_stat_inc(cpu, part, ios[sgrp]); + part_stat_add(cpu, part, sectors[sgrp], sectors); + } part_stat_add(cpu, part, nsecs[sgrp], jiffies_to_nsecs(duration)); - part_round_stats(q, cpu, part); part_dec_in_flight(q, part, op_is_write(req_op)); part_stat_unlock(); } + +void generic_end_io_acct(struct request_queue *q, int req_op, + struct hd_struct *part, unsigned long start_time) +{ + __generic_end_io_acct(q, req_op, part, start_time, 0, false); +} EXPORT_SYMBOL(generic_end_io_acct); +void generic_end_precise_io_acct(struct request_queue *q, int req_op, + struct hd_struct *part, + unsigned long start_time, + unsigned long sectors) +{ + __generic_end_io_acct(q, req_op, part, start_time, sectors, true); +} +EXPORT_SYMBOL(generic_end_precise_io_acct); + #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE void bio_flush_dcache_pages(struct bio *bi) { @@ -1745,7 +1879,7 @@ void bio_endio(struct bio *bio) if (!bio_integrity_endio(bio)) return; - if (bio->bi_disk) + if (bio->bi_disk && bio_flagged(bio, BIO_TRACKED)) rq_qos_done_bio(bio->bi_disk->queue, bio); /* diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index c630e02836a80d7d406778208c659aebda8fcf06..c0187bf00f714c63671d0459d7d4125f54ec4b26 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -364,16 +364,31 @@ static void blkg_destroy(struct blkcg_gq *blkg) */ static void blkg_destroy_all(struct request_queue *q) { +#define BLKG_DESTROY_BATCH 4096 struct blkcg_gq *blkg, *n; + int count; lockdep_assert_held(q->queue_lock); +again: + count = BLKG_DESTROY_BATCH; list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { struct blkcg *blkcg = blkg->blkcg; spin_lock(&blkcg->lock); blkg_destroy(blkg); spin_unlock(&blkcg->lock); + /* + * If the list is too long, the loop can took a long time, + * thus relese the lock for a while when a batch of blkg + * were destroyed. + */ + if (!--count) { + spin_unlock_irq(q->queue_lock); + cond_resched(); + spin_lock_irq(q->queue_lock); + goto again; + } } q->root_blkg = NULL; @@ -473,8 +488,11 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css, const char *blkg_dev_name(struct blkcg_gq *blkg) { /* some drivers (floppy) instantiate a queue w/o disk registered */ - if (blkg->q->backing_dev_info->dev) - return dev_name(blkg->q->backing_dev_info->dev); + struct rcu_device *rcu_dev; + + rcu_dev = rcu_dereference(blkg->q->backing_dev_info->rcu_dev); + if (rcu_dev) + return dev_name(&rcu_dev->dev); return NULL; } EXPORT_SYMBOL_GPL(blkg_dev_name); @@ -839,6 +857,14 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, q = disk->queue; + /* + * blkcg_deactivate_policy() requires queue to be frozen, we can grab + * q_usage_counter to prevent concurrent with blkcg_deactivate_policy(). + */ + ret = blk_queue_enter(q, 0); + if (ret) + goto fail; + rcu_read_lock(); spin_lock_irq(q->queue_lock); @@ -873,7 +899,13 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, new_blkg = blkg_alloc(pos, q, GFP_KERNEL); if (unlikely(!new_blkg)) { ret = -ENOMEM; - goto fail; + goto fail_exit_queue; + } + + if (radix_tree_preload(GFP_KERNEL)) { + blkg_free(new_blkg); + ret = -ENOMEM; + goto fail_exit_queue; } rcu_read_lock(); @@ -882,7 +914,8 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, blkg = blkg_lookup_check(pos, pol, q); if (IS_ERR(blkg)) { ret = PTR_ERR(blkg); - goto fail_unlock; + blkg_free(new_blkg); + goto fail_preloaded; } if (blkg) { @@ -891,22 +924,29 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, blkg = blkg_create(pos, q, new_blkg); if (unlikely(IS_ERR(blkg))) { ret = PTR_ERR(blkg); - goto fail_unlock; + goto fail_preloaded; } } + radix_tree_preload_end(); + if (pos == blkcg) goto success; } success: + blk_queue_exit(q); ctx->disk = disk; ctx->blkg = blkg; ctx->body = body; return 0; +fail_preloaded: + radix_tree_preload_end(); fail_unlock: spin_unlock_irq(q->queue_lock); rcu_read_unlock(); +fail_exit_queue: + blk_queue_exit(q); fail: put_disk_and_module(disk); /* @@ -955,9 +995,14 @@ static int blkcg_print_stat(struct seq_file *sf, void *v) int i; bool has_stats = false; + spin_lock_irq(blkg->q->queue_lock); + + if (!blkg->online) + goto skip; + dname = blkg_dev_name(blkg); if (!dname) - continue; + goto skip; /* * Hooray string manipulation, count is the size written NOT @@ -967,8 +1012,6 @@ static int blkcg_print_stat(struct seq_file *sf, void *v) */ off += scnprintf(buf+off, size-off, "%s ", dname); - spin_lock_irq(blkg->q->queue_lock); - rwstat = blkg_rwstat_recursive_sum(blkg, NULL, offsetof(struct blkcg_gq, stat_bytes)); rbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]); @@ -981,8 +1024,6 @@ static int blkcg_print_stat(struct seq_file *sf, void *v) wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]); dios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]); - spin_unlock_irq(blkg->q->queue_lock); - if (rbytes || wbytes || rios || wios) { has_stats = true; off += scnprintf(buf+off, size-off, @@ -1016,9 +1057,15 @@ static int blkcg_print_stat(struct seq_file *sf, void *v) } next: if (has_stats) { - off += scnprintf(buf+off, size-off, "\n"); - seq_commit(sf, off); + if (off < size - 1) { + off += scnprintf(buf+off, size-off, "\n"); + seq_commit(sf, off); + } else { + seq_commit(sf, -1); + } } + skip: + spin_unlock_irq(blkg->q->queue_lock); } rcu_read_unlock(); @@ -1278,6 +1325,14 @@ void blkcg_drain_queue(struct request_queue *q) if (!q->root_blkg) return; + /* + * @q could be exiting and q->td has not been initialized. + * If so, don't need drain any throttled bios. + */ +#ifdef CONFIG_BLK_DEV_THROTTLING + if (!q->td) + return; +#endif blk_throtl_drain(q); } @@ -1473,12 +1528,16 @@ void blkcg_deactivate_policy(struct request_queue *q, __clear_bit(pol->plid, q->blkcg_pols); list_for_each_entry(blkg, &q->blkg_list, q_node) { + struct blkcg *blkcg = blkg->blkcg; + + spin_lock(&blkcg->lock); if (blkg->pd[pol->plid]) { if (pol->pd_offline_fn) pol->pd_offline_fn(blkg->pd[pol->plid]); pol->pd_free_fn(blkg->pd[pol->plid]); blkg->pd[pol->plid] = NULL; } + spin_unlock(&blkcg->lock); } spin_unlock_irq(q->queue_lock); @@ -1670,7 +1729,7 @@ static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now) */ static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay) { - u64 now = ktime_to_ns(ktime_get()); + u64 now = blk_time_get_ns(); u64 exp; u64 delay_nsec = 0; int tok; diff --git a/block/blk-core.c b/block/blk-core.c index cff0a60ee20066c2fc2d7c4fb2da0bc9ea7c50da..835496adf694e86c2d8eb8177edaa07f6e31f726 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -43,10 +43,9 @@ #include "blk-mq.h" #include "blk-mq-sched.h" #include "blk-rq-qos.h" +#include "blk-io-hierarchy/stats.h" -#ifdef CONFIG_DEBUG_FS struct dentry *blk_debugfs_root; -#endif EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); @@ -56,6 +55,20 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug); DEFINE_IDA(blk_queue_ida); +bool precise_iostat = true; +static int __init precise_iostat_setup(char *str) +{ + bool precise; + + if (!strtobool(str, &precise)) { + precise_iostat = precise; + pr_info("precise iostat %d\n", precise_iostat); + } + + return 1; +} +__setup("precise_iostat=", precise_iostat_setup); + /* * For the allocated request tables */ @@ -71,6 +84,250 @@ struct kmem_cache *blk_requestq_cachep; */ static struct workqueue_struct *kblockd_workqueue; +#ifdef CONFIG_BLK_BIO_DISPATCH_ASYNC +#include + +#define BIO_DISPATCH_MAX_LOOP 16 +/* the minimum of cpus that dispatch async can be enabled */ +#define MIN_DISPATCH_ASYNC_CPUS 16 + +/* prevent false sharing */ +#define BIO_ASYNC_LIST_SHIFT 2 +#define BIO_ASYNC_LOCK_SHIFT 4 +#define bio_async_list(ctl, i) (&(ctl)->list[(i) << BIO_ASYNC_LIST_SHIFT]) +#define bio_async_lock(ctl, i) (&(ctl)->lock[(i) << BIO_ASYNC_LOCK_SHIFT]) + +struct bio_dispatch_async_ctl { + /* + * Vector size is nr_cpu_ids, list stores bio dispatched from other cpu, + * such bio will be dispatched asynchronously to the cpu this structure + * is serviced. + */ + struct bio_list *list; + /* list is protected by lock */ + spinlock_t *lock; + /* kthread to dispatch bio asynchronously */ + struct task_struct *thread; + /* thread will wait here if there are no bios in list */ + wait_queue_head_t wait; +}; + +static struct bio_dispatch_async_ctl __percpu **bio_dispatch_async_ctl; + +static int blk_alloc_queue_dispatch_async(struct request_queue *q) +{ + struct request_queue_wrapper *q_wrapper = queue_to_wrapper(q); + int cpu; + + q_wrapper->last_dispatch_cpu = alloc_percpu(int); + if (!q_wrapper->last_dispatch_cpu) + return -ENOMEM; + + cpumask_setall(&q_wrapper->dispatch_async_cpus); + for_each_possible_cpu(cpu) { + *per_cpu_ptr(q_wrapper->last_dispatch_cpu, cpu) = cpu; + } + + return 0; +} + +void blk_free_queue_dispatch_async(struct request_queue *q) +{ + free_percpu(queue_to_wrapper(q)->last_dispatch_cpu); +} + +static int collect_bio(struct bio_dispatch_async_ctl *ctl, + struct bio_list *list) +{ + int count = 0; + int cpu; + struct bio *bio; + + for_each_possible_cpu(cpu) { + spin_lock_irq(bio_async_lock(ctl, cpu)); + while ((bio = bio_list_pop(bio_async_list(ctl, cpu)))) { + bio_list_add(list, bio); + count++; + } + spin_unlock_irq(bio_async_lock(ctl, cpu)); + } + + return count; +} + +static int bio_dispatch_work(void *data) +{ + int loop_count = 0; + int cpu = smp_processor_id(); + struct bio_dispatch_async_ctl *ctl = + *per_cpu_ptr(bio_dispatch_async_ctl, cpu); + + for (;; loop_count++) { + struct bio_list bio_list_on_stack; + struct blk_plug plug; + struct bio *bio; + int count; + + bio_list_init(&bio_list_on_stack); + count = collect_bio(ctl, &bio_list_on_stack); + + if (!count) { + DEFINE_WAIT(wait); + + for (;;) { + prepare_to_wait(&ctl->wait, &wait, + TASK_INTERRUPTIBLE); + count = collect_bio(ctl, &bio_list_on_stack); + if (count) + break; + schedule(); + loop_count = 0; + } + finish_wait(&ctl->wait, &wait); + + } + + blk_start_plug(&plug); + while ((bio = bio_list_pop(&bio_list_on_stack))) { + struct request_queue *q = bio->bi_disk->queue; + + q->make_request_fn(q, bio); + } + blk_finish_plug(&plug); + + /* prevent soft lockup */ + if (loop_count >= BIO_DISPATCH_MAX_LOOP) { + loop_count = 0; + cond_resched(); + } + } + + return 0; +} + +static int get_dispatch_cpu(struct request_queue *q, int cpu) +{ + int *last_dispatch_cpu = + per_cpu_ptr(queue_to_wrapper(q)->last_dispatch_cpu, cpu); + struct cpumask *dispatch_async_cpus = + &queue_to_wrapper(q)->dispatch_async_cpus; + + cpu = cpumask_next(*last_dispatch_cpu, dispatch_async_cpus); + if (cpu >= nr_cpu_ids) + cpu = cpumask_first(dispatch_async_cpus); + + *last_dispatch_cpu = cpu; + + return cpu; +} + +static void blk_queue_make_request_async(struct bio *bio) +{ + struct request_queue *q = bio->bi_disk->queue; + int cpu = smp_processor_id(); + int dispatch_cpu = get_dispatch_cpu(q, cpu); + struct bio_dispatch_async_ctl *ctl = + *per_cpu_ptr(bio_dispatch_async_ctl, dispatch_cpu); + + spin_lock_irq(bio_async_lock(ctl, cpu)); + bio_list_add(bio_async_list(ctl, cpu), bio); + spin_unlock_irq(bio_async_lock(ctl, cpu)); + + if (wq_has_sleeper(&ctl->wait)) + wake_up(&ctl->wait); +} + +static blk_qc_t blk_queue_do_make_request(struct bio *bio) +{ + struct request_queue *q = bio->bi_disk->queue; + int cpu = smp_processor_id(); + + /* + * Don't dispatch bio asynchronously in following cases: + * + * 1) QUEUE_FLAG_DISPATCH_ASYNC is not set; + * 2) current cpu is the target cpu; + * 3) bio is flagged no wait; + * 4) TODO: return value of submit_bio() will be used in io polling. + */ + if (!test_bit(QUEUE_FLAG_DISPATCH_ASYNC, &q->queue_flags) || + cpumask_test_cpu(cpu, &queue_to_wrapper(q)->dispatch_async_cpus) || + bio->bi_opf & REQ_NOWAIT) + return q->make_request_fn(q, bio); + + /* return value is not concerned */ + blk_queue_make_request_async(bio); + return BLK_QC_T_NONE; +} + +static void init_blk_queue_async_dispatch(void) +{ + int cpu; + + bio_dispatch_async_ctl = alloc_percpu(struct bio_dispatch_async_ctl *); + if (!bio_dispatch_async_ctl) + panic("Failed to alloc bio_dispatch_async_ctl\n"); + + for_each_possible_cpu(cpu) { + int i; + struct bio_dispatch_async_ctl *ctl = + kmalloc(sizeof(struct bio_dispatch_async_ctl), + GFP_KERNEL | __GFP_NOFAIL); + + *per_cpu_ptr(bio_dispatch_async_ctl, cpu) = ctl; + + ctl->thread = + kthread_create_on_cpu(bio_dispatch_work, NULL, cpu, + "bio_dispatch_work_%u"); + if (IS_ERR_OR_NULL(ctl->thread)) + panic("Failed to create bio dispatch thread\n"); + + ctl->list = kmalloc_array(nr_cpu_ids, + sizeof(struct bio_list) << BIO_ASYNC_LIST_SHIFT, + GFP_KERNEL | __GFP_NOFAIL); + ctl->lock = kmalloc_array(nr_cpu_ids, + sizeof(spinlock_t) << BIO_ASYNC_LOCK_SHIFT, + GFP_KERNEL | __GFP_NOFAIL); + for (i = 0; i < nr_cpu_ids; ++i) { + bio_list_init(bio_async_list(ctl, i)); + spin_lock_init(bio_async_lock(ctl, i)); + } + + wake_up_process(ctl->thread); + init_waitqueue_head(&ctl->wait); + } +} + +void queue_init_dispatch_async_cpus(struct request_queue *q, int node) +{ + struct cpumask *dispatch_async_cpus = + &queue_to_wrapper(q)->dispatch_async_cpus; + + arch_get_preferred_sibling_cpumask(node, dispatch_async_cpus); + if (cpumask_weight(dispatch_async_cpus) >= MIN_DISPATCH_ASYNC_CPUS) + blk_queue_flag_set(QUEUE_FLAG_DISPATCH_ASYNC, q); + else + cpumask_setall(dispatch_async_cpus); +} +EXPORT_SYMBOL_GPL(queue_init_dispatch_async_cpus); +#else +static int blk_alloc_queue_dispatch_async(struct request_queue *q) +{ + return 0; +} + +static blk_qc_t blk_queue_do_make_request(struct bio *bio) +{ + struct request_queue *q = bio->bi_disk->queue; + + return q->make_request_fn(q, bio); +} + +static void init_blk_queue_async_dispatch(void) +{ +} +#endif + /** * blk_queue_flag_set - atomically set a queue flag * @flag: flag to be set @@ -183,7 +440,7 @@ void blk_queue_congestion_threshold(struct request_queue *q) q->nr_congestion_off = nr; } -void blk_rq_init(struct request_queue *q, struct request *rq) +void __blk_rq_init(struct request_queue *q, struct request *rq) { memset(rq, 0, sizeof(*rq)); @@ -196,9 +453,15 @@ void blk_rq_init(struct request_queue *q, struct request *rq) RB_CLEAR_NODE(&rq->rb_node); rq->tag = -1; rq->internal_tag = -1; - rq->start_time_ns = ktime_get_ns(); + rq->start_time_ns = blk_time_get_ns(); rq->part = NULL; } + +void blk_rq_init(struct request_queue *q, struct request *rq) +{ + __blk_rq_init(q, rq); + refcount_set(&rq->ref, 1); +} EXPORT_SYMBOL(blk_rq_init); static const struct { @@ -273,8 +536,10 @@ static void req_bio_endio(struct request *rq, struct bio *bio, bio_advance(bio, nbytes); /* don't actually finish bio if it's part of flush sequence */ - if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) + if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) { + req_bio_hierarchy_end(rq, bio); bio_endio(bio); + } } void blk_dump_rq_flags(struct request *rq, char *msg) @@ -407,38 +672,31 @@ void blk_sync_queue(struct request_queue *q) del_timer_sync(&q->timeout); cancel_work_sync(&q->timeout_work); - if (q->mq_ops) { - struct blk_mq_hw_ctx *hctx; - int i; - - cancel_delayed_work_sync(&q->requeue_work); - queue_for_each_hw_ctx(q, hctx, i) - cancel_delayed_work_sync(&hctx->run_work); - } else { + if (!q->mq_ops) cancel_delayed_work_sync(&q->delay_work); - } } EXPORT_SYMBOL(blk_sync_queue); /** - * blk_set_preempt_only - set QUEUE_FLAG_PREEMPT_ONLY + * blk_set_pm_only - increment pm_only counter * @q: request queue pointer - * - * Returns the previous value of the PREEMPT_ONLY flag - 0 if the flag was not - * set and 1 if the flag was already set. */ -int blk_set_preempt_only(struct request_queue *q) +void blk_set_pm_only(struct request_queue *q) { - return blk_queue_flag_test_and_set(QUEUE_FLAG_PREEMPT_ONLY, q); + atomic_inc(&q->pm_only); } -EXPORT_SYMBOL_GPL(blk_set_preempt_only); +EXPORT_SYMBOL_GPL(blk_set_pm_only); -void blk_clear_preempt_only(struct request_queue *q) +void blk_clear_pm_only(struct request_queue *q) { - blk_queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q); - wake_up_all(&q->mq_freeze_wq); + int pm_only; + + pm_only = atomic_dec_return(&q->pm_only); + WARN_ON_ONCE(pm_only < 0); + if (pm_only == 0) + wake_up_all(&q->mq_freeze_wq); } -EXPORT_SYMBOL_GPL(blk_clear_preempt_only); +EXPORT_SYMBOL_GPL(blk_clear_pm_only); /** * __blk_run_queue_uncond - run a queue whether or not it has been stopped @@ -744,6 +1002,15 @@ void blk_exit_queue(struct request_queue *q) bdi_put(q->backing_dev_info); } +static void blk_mq_unregister_default_hierarchy(struct request_queue *q) +{ + blk_mq_unregister_hierarchy(q, STAGE_GETTAG); + blk_mq_unregister_hierarchy(q, STAGE_PLUG); + blk_mq_unregister_hierarchy(q, STAGE_HCTX); + blk_mq_unregister_hierarchy(q, STAGE_REQUEUE); + blk_mq_unregister_hierarchy(q, STAGE_RQ_DRIVER); +} + /** * blk_cleanup_queue - shutdown a request queue * @q: request queue to shutdown @@ -783,6 +1050,19 @@ void blk_cleanup_queue(struct request_queue *q) * prevent that q->request_fn() gets invoked after draining finished. */ blk_freeze_queue(q); + + /* + * All throttled io will never be issued after blk_throtl_exit(), which + * will lead to hung. Thus we need to issue them before calling + * blk_throtl_exit() from blk_exit_queue(). + */ + if (q->mq_ops) { + spin_lock_irq(lock); + blkcg_drain_queue(q); + spin_unlock_irq(lock); + } + rq_qos_exit(q); + spin_lock_irq(lock); queue_flag_set(QUEUE_FLAG_DEAD, q); spin_unlock_irq(lock); @@ -793,12 +1073,14 @@ void blk_cleanup_queue(struct request_queue *q) * dispatch may still be in-progress since we dispatch requests * from more than one contexts. * - * No need to quiesce queue if it isn't initialized yet since - * blk_freeze_queue() should be enough for cases of passthrough - * request. + * We rely on driver to deal with the race in case that queue + * initialization isn't done. If driver cannot deal the race, + * we try to call quiesce in kernel for these drivers that have + * set QUEUE_FLAG_FORECE_QUIESCE flag. */ - if (q->mq_ops && blk_queue_init_done(q)) - blk_mq_quiesce_queue(q); + if (q->mq_ops && (blk_queue_init_done(q) || + test_bit(QUEUE_FLAG_FORECE_QUIESCE, &q->queue_flags))) + blk_mq_quiesce_queue_internal(q); /* for synchronous bio-based driver finish in-flight integrity i/o */ blk_flush_integrity(); @@ -815,8 +1097,11 @@ void blk_cleanup_queue(struct request_queue *q) blk_exit_queue(q); - if (q->mq_ops) - blk_mq_free_queue(q); + if (q->mq_ops) { + blk_mq_unregister_default_hierarchy(q); + blk_mq_cancel_work_sync(q); + blk_mq_exit_queue(q); + } percpu_ref_exit(&q->q_usage_counter); spin_lock_irq(lock); @@ -889,8 +1174,11 @@ int blk_init_rl(struct request_list *rl, struct request_queue *q, if (!rl->rq_pool) return -ENOMEM; - if (rl != &q->root_rl) - WARN_ON_ONCE(!blk_get_queue(q)); + if (rl != &q->root_rl && !blk_get_queue(q)) { + mempool_destroy(rl->rq_pool); + rl->rq_pool = NULL; + return -ENODEV; + } return 0; } @@ -917,7 +1205,7 @@ EXPORT_SYMBOL(blk_alloc_queue); */ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) { - const bool preempt = flags & BLK_MQ_REQ_PREEMPT; + const bool pm = flags & BLK_MQ_REQ_PREEMPT; while (true) { bool success = false; @@ -925,11 +1213,11 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) rcu_read_lock(); if (percpu_ref_tryget_live(&q->q_usage_counter)) { /* - * The code that sets the PREEMPT_ONLY flag is - * responsible for ensuring that that flag is globally - * visible before the queue is unfrozen. + * The code that increments the pm_only counter is + * responsible for ensuring that that counter is + * globally visible before the queue is unfrozen. */ - if (preempt || !blk_queue_preempt_only(q)) { + if (pm || !blk_queue_pm_only(q)) { success = true; } else { percpu_ref_put(&q->q_usage_counter); @@ -953,8 +1241,8 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) smp_rmb(); wait_event(q->mq_freeze_wq, - (atomic_read(&q->mq_freeze_depth) == 0 && - (preempt || !blk_queue_preempt_only(q))) || + (!queue_to_wrapper(q)->mq_freeze_depth && + (pm || !blk_queue_pm_only(q))) || blk_queue_dying(q)); if (blk_queue_dying(q)) return -ENODEV; @@ -981,6 +1269,10 @@ static void blk_rq_timed_out_timer(struct timer_list *t) kblockd_schedule_work(&q->timeout_work); } +static void blk_timeout_work_empty(struct work_struct *work) +{ +} + /** * blk_alloc_queue_node - allocate a request queue * @gfp_mask: memory allocation flags @@ -998,21 +1290,26 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, spinlock_t *lock) { struct request_queue *q; + struct request_queue_wrapper *q_wrapper; int ret; - q = kmem_cache_alloc_node(blk_requestq_cachep, - gfp_mask | __GFP_ZERO, node_id); - if (!q) + q_wrapper = kmem_cache_alloc_node(blk_requestq_cachep, + gfp_mask | __GFP_ZERO, node_id); + if (!q_wrapper) return NULL; + q = &q_wrapper->q; INIT_LIST_HEAD(&q->queue_head); q->last_merge = NULL; q->end_sector = 0; q->boundary_rq = NULL; + if (blk_alloc_queue_dispatch_async(q)) + goto fail_q; + q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask); if (q->id < 0) - goto fail_q; + goto fail_dispatch_async; ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); if (ret) @@ -1028,6 +1325,8 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, q->backing_dev_info->ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_SIZE; + q->backing_dev_info->io_pages = + (VM_MAX_READAHEAD * 1024) / PAGE_SIZE; q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK; q->backing_dev_info->name = "block"; q->node = node_id; @@ -1035,7 +1334,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, timer_setup(&q->backing_dev_info->laptop_mode_wb_timer, laptop_mode_timer_fn, 0); timer_setup(&q->timeout, blk_rq_timed_out_timer, 0); - INIT_WORK(&q->timeout_work, NULL); + INIT_WORK(&q->timeout_work, blk_timeout_work_empty); INIT_LIST_HEAD(&q->timeout_list); INIT_LIST_HEAD(&q->icq_list); #ifdef CONFIG_BLK_CGROUP @@ -1045,14 +1344,12 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, kobject_init(&q->kobj, &blk_queue_ktype); -#ifdef CONFIG_BLK_DEV_IO_TRACE - mutex_init(&q->blk_trace_mutex); -#endif + mutex_init(&q->debugfs_mutex); mutex_init(&q->sysfs_lock); + mutex_init(&q_wrapper->sysfs_dir_lock); spin_lock_init(&q->__queue_lock); - if (!q->mq_ops) - q->queue_lock = lock ? : &q->__queue_lock; + q->queue_lock = lock ? : &q->__queue_lock; /* * A queue starts its life with bypass turned on to avoid @@ -1064,6 +1361,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q); init_waitqueue_head(&q->mq_freeze_wq); + mutex_init(&q_wrapper->mq_freeze_lock); /* * Init percpu_ref in atomic mode so that it's faster to shutdown. @@ -1089,8 +1387,10 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, bioset_exit(&q->bio_split); fail_id: ida_simple_remove(&blk_queue_ida, q->id); +fail_dispatch_async: + blk_free_queue_dispatch_async(q); fail_q: - kmem_cache_free(blk_requestq_cachep, q); + kmem_cache_free(blk_requestq_cachep, q_wrapper); return NULL; } EXPORT_SYMBOL(blk_alloc_queue_node); @@ -1160,7 +1460,7 @@ int blk_init_allocated_queue(struct request_queue *q) { WARN_ON_ONCE(q->mq_ops); - q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size); + q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size, GFP_KERNEL); if (!q->fq) return -ENOMEM; @@ -1354,6 +1654,7 @@ static struct request *__get_request(struct request_list *rl, unsigned int op, const bool is_sync = op_is_sync(op); int may_queue; req_flags_t rq_flags = RQF_ALLOCED; + char dname[BDI_DEV_NAME_LEN]; lockdep_assert_held(q->queue_lock); @@ -1475,8 +1776,9 @@ static struct request *__get_request(struct request_list *rl, unsigned int op, * shouldn't stall IO. Treat this request as !elvpriv. This will * disturb iosched and blkcg but weird is bettern than dead. */ + bdi_get_dev_name(q->backing_dev_info, dname, BDI_DEV_NAME_LEN); printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n", - __func__, dev_name(q->backing_dev_info->dev)); + __func__, dname); rq->rq_flags &= ~RQF_ELVPRIV; rq->elv.icq = NULL; @@ -1674,8 +1976,10 @@ static void part_round_stats_single(struct request_queue *q, int cpu, unsigned int inflight) { if (inflight) { - __part_stat_add(cpu, part, time_in_queue, - inflight * (now - part->stamp)); + if (precise_iostat) { + __part_stat_add(cpu, part, time_in_queue, + inflight * (now - part->stamp)); + } __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); } part->stamp = now; @@ -1811,7 +2115,7 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req, req->biotail->bi_next = bio; req->biotail = bio; req->__data_len += bio->bi_iter.bi_size; - req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); + blk_rq_update_bi_alloc_time(req, bio, NULL); blk_account_io_start(req, false); return true; @@ -1835,7 +2139,7 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req, req->__sector = bio->bi_iter.bi_sector; req->__data_len += bio->bi_iter.bi_size; - req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); + blk_rq_update_bi_alloc_time(req, bio, NULL); blk_account_io_start(req, false); return true; @@ -1855,8 +2159,8 @@ bool bio_attempt_discard_merge(struct request_queue *q, struct request *req, req->biotail->bi_next = bio; req->biotail = bio; req->__data_len += bio->bi_iter.bi_size; - req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); req->nr_phys_segments = segments + 1; + blk_rq_update_bi_alloc_time(req, bio, NULL); blk_account_io_start(req, false); return true; @@ -1969,18 +2273,11 @@ unsigned int blk_plug_queued_count(struct request_queue *q) void blk_init_request_from_bio(struct request *req, struct bio *bio) { - struct io_context *ioc = rq_ioc(bio); - if (bio->bi_opf & REQ_RAHEAD) req->cmd_flags |= REQ_FAILFAST_MASK; req->__sector = bio->bi_iter.bi_sector; - if (ioprio_valid(bio_prio(bio))) - req->ioprio = bio_prio(bio); - else if (ioc) - req->ioprio = ioc->ioprio; - else - req->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0); + req->ioprio = bio_prio(bio); req->write_hint = bio->bi_write_hint; blk_rq_bio_prep(req->q, req, bio); } @@ -2328,6 +2625,12 @@ generic_make_request_checks(struct bio *bio) */ create_io_context(GFP_ATOMIC, q->node); + /* + * On the one hand REQ_PREFLUSH | REQ_FUA can be cleared above, on the + * other hand it doesn't make sense to count invalid bio. Split bio will + * be accounted separately. + */ + bio_hierarchy_start(bio); if (!blkcg_bio_issue_check(q, bio)) return false; @@ -2443,10 +2746,8 @@ blk_qc_t generic_make_request(struct bio *bio) flags = 0; if (bio->bi_opf & REQ_NOWAIT) flags = BLK_MQ_REQ_NOWAIT; - if (blk_queue_enter(q, flags) < 0) { + if (blk_queue_enter(q, flags) < 0) enter_succeeded = false; - q = NULL; - } } if (enter_succeeded) { @@ -2455,7 +2756,7 @@ blk_qc_t generic_make_request(struct bio *bio) /* Create a fresh bio_list for all subordinate requests */ bio_list_on_stack[1] = bio_list_on_stack[0]; bio_list_init(&bio_list_on_stack[0]); - ret = q->make_request_fn(q, bio); + ret = blk_queue_do_make_request(bio); /* sort new bios into those for a lower level * and those for the same level @@ -2477,6 +2778,7 @@ blk_qc_t generic_make_request(struct bio *bio) bio_wouldblock_error(bio); else bio_io_error(bio); + q = NULL; } bio = bio_list_pop(&bio_list_on_stack[0]); } while (bio); @@ -2595,12 +2897,27 @@ EXPORT_SYMBOL_GPL(blk_poll); * limits when retrying requests on other queues. Those requests need * to be checked against the new queue limits again during dispatch. */ -static int blk_cloned_rq_check_limits(struct request_queue *q, +static blk_status_t blk_cloned_rq_check_limits(struct request_queue *q, struct request *rq) { - if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) { + unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq)); + + if (blk_rq_sectors(rq) > max_sectors) { + /* + * SCSI device does not have a good way to return if + * Write Same/Zero is actually supported. If a device rejects + * a non-read/write command (discard, write same,etc.) the + * low-level device driver will set the relevant queue limit to + * 0 to prevent blk-lib from issuing more of the offending + * operations. Commands queued prior to the queue limit being + * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O + * errors being propagated to upper layers. + */ + if (max_sectors == 0) + return BLK_STS_NOTSUPP; + printk(KERN_ERR "%s: over max size limit.\n", __func__); - return -EIO; + return BLK_STS_IOERR; } /* @@ -2612,24 +2929,29 @@ static int blk_cloned_rq_check_limits(struct request_queue *q, blk_recalc_rq_segments(rq); if (rq->nr_phys_segments > queue_max_segments(q)) { printk(KERN_ERR "%s: over max segments limit.\n", __func__); - return -EIO; + return BLK_STS_IOERR; } - return 0; + return BLK_STS_OK; } /** * blk_insert_cloned_request - Helper for stacking drivers to submit a request * @q: the queue to submit the request * @rq: the request being queued + * @precise: true if io account with start and done will be balanced */ -blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq) +blk_status_t __blk_insert_cloned_request(struct request_queue *q, + struct request *rq, bool precise) { unsigned long flags; int where = ELEVATOR_INSERT_BACK; - if (blk_cloned_rq_check_limits(q, rq)) - return BLK_STS_IOERR; + blk_status_t ret; + + ret = blk_cloned_rq_check_limits(q, rq); + if (ret != BLK_STS_OK) + return ret; if (rq->rq_disk && should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq))) @@ -2643,7 +2965,16 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request * * bypass a potential scheduler on the bottom device for * insert. */ - return blk_mq_request_issue_directly(rq); + ret = blk_mq_request_issue_directly(rq); + if (ret && precise) { + u64 now = 0; + + if (blk_mq_need_time_stamp(rq)) + now = blk_time_get_ns(); + + blk_account_io_done(rq, now); + } + return ret; } spin_lock_irqsave(q->queue_lock, flags); @@ -2668,6 +2999,13 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request * return BLK_STS_OK; } +EXPORT_SYMBOL_GPL(__blk_insert_cloned_request); + +blk_status_t blk_insert_cloned_request(struct request_queue *q, + struct request *rq) +{ + return __blk_insert_cloned_request(q, rq, false); +} EXPORT_SYMBOL_GPL(blk_insert_cloned_request); /** @@ -2740,9 +3078,15 @@ void blk_account_io_done(struct request *req, u64 now) cpu = part_stat_lock(); part = req->part; + if (!precise_iostat) { + update_io_ticks(cpu, part, jiffies, true); + part_stat_add(cpu, part, time_in_queue, + nsecs_to_jiffies64(now - req->start_time_ns)); + } else { + part_round_stats(req->q, cpu, part); + } part_stat_inc(cpu, part, ios[sgrp]); part_stat_add(cpu, part, nsecs[sgrp], now - req->start_time_ns); - part_round_stats(req->q, cpu, part); part_dec_in_flight(req->q, part, rq_data_dir(req)); hd_struct_put(part); @@ -2790,19 +3134,10 @@ void blk_account_io_start(struct request *rq, bool new_io) part_stat_inc(cpu, part, merges[rw]); } else { part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); - if (!hd_struct_try_get(part)) { - /* - * The partition is already being removed, - * the request will be accounted on the disk only - * - * We take a reference on disk->part0 although that - * partition will never be deleted, so we can treat - * it as any other partition. - */ - part = &rq->rq_disk->part0; - hd_struct_get(part); - } - part_round_stats(rq->q, cpu, part); + if (!precise_iostat) + update_io_ticks(cpu, part, jiffies, false); + else + part_round_stats(rq->q, cpu, part); part_inc_in_flight(rq->q, part, rw); rq->part = part; } @@ -2987,7 +3322,7 @@ void blk_start_request(struct request *req) blk_dequeue_request(req); if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) { - req->io_start_time_ns = ktime_get_ns(); + req->io_start_time_ns = blk_time_get_ns(); #ifdef CONFIG_BLK_DEV_THROTTLING_LOW req->throtl_size = blk_rq_sectors(req); #endif @@ -3192,7 +3527,7 @@ EXPORT_SYMBOL_GPL(blk_unprep_request); void blk_finish_request(struct request *req, blk_status_t error) { struct request_queue *q = req->q; - u64 now = ktime_get_ns(); + u64 now = blk_time_get_ns(); lockdep_assert_held(req->q->queue_lock); WARN_ON_ONCE(q->mq_ops); @@ -3410,6 +3745,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, rq->__data_len = bio->bi_iter.bi_size; rq->bio = rq->biotail = bio; + blk_rq_update_bi_alloc_time(rq, bio, NULL); if (bio->bi_disk) rq->rq_disk = bio->bi_disk; @@ -3606,6 +3942,7 @@ void blk_start_plug(struct blk_plug *plug) * Store ordering should not be needed here, since a potential * preempt will imply a full memory barrier */ + tsk->_resvd->cur_ktime = 0; tsk->plug = plug; } EXPORT_SYMBOL(blk_start_plug); @@ -3743,6 +4080,9 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) */ if (q) queue_unplugged(q, depth, from_schedule); + + current->_resvd->cur_ktime = 0; + current->flags &= ~PF_BLOCK_TS; } void blk_finish_plug(struct blk_plug *plug) @@ -3958,11 +4298,12 @@ int __init blk_dev_init(void) sizeof(struct request), 0, SLAB_PANIC, NULL); blk_requestq_cachep = kmem_cache_create("request_queue", - sizeof(struct request_queue), 0, SLAB_PANIC, NULL); + sizeof(struct request_queue_wrapper), 0, SLAB_PANIC, + NULL); + + init_blk_queue_async_dispatch(); -#ifdef CONFIG_DEBUG_FS blk_debugfs_root = debugfs_create_dir("block", NULL); -#endif return 0; } diff --git a/block/blk-flush.c b/block/blk-flush.c index ce41f666de3e1d068e78698349c1629d7e118bc8..e788e5513c9e0d8729fd724c57bf972214fac701 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -75,6 +75,7 @@ #include "blk-mq.h" #include "blk-mq-tag.h" #include "blk-mq-sched.h" +#include "blk-io-hierarchy/stats.h" /* PREFLUSH/FUA sequences */ enum { @@ -187,6 +188,7 @@ static bool blk_flush_complete_seq(struct request *rq, if (list_empty(pending)) fq->flush_pending_since = jiffies; list_move_tail(&rq->flush.list, pending); + rq_hierarchy_start_io_acct(rq, STAGE_HCTX); break; case REQ_FSEQ_DATA: @@ -232,6 +234,26 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error) /* release the tag's ownership to the req cloned from */ spin_lock_irqsave(&fq->mq_flush_lock, flags); + + if (!refcount_dec_and_test(&flush_rq->ref)) { + fq->rq_status = error; + spin_unlock_irqrestore(&fq->mq_flush_lock, flags); + return; + } + + /* + * Flush request has to be marked as IDLE when it is really ended + * because its .end_io() is called from timeout code path too for + * avoiding use-after-free. + */ + WRITE_ONCE(flush_rq->state, MQ_RQ_IDLE); + blk_mq_put_alloc_task(flush_rq); + blk_rq_hierarchy_stats_complete(flush_rq); + if (fq->rq_status != BLK_STS_OK) { + error = fq->rq_status; + fq->rq_status = BLK_STS_OK; + } + hctx = blk_mq_map_queue(q, flush_rq->mq_ctx->cpu); if (!q->elevator) { blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq); @@ -256,6 +278,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error) unsigned int seq = blk_flush_cur_seq(rq); BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH); + rq_hierarchy_end_io_acct(rq, STAGE_HCTX); queued |= blk_flush_complete_seq(rq, fq, seq, error); } @@ -279,6 +302,11 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error) spin_unlock_irqrestore(&fq->mq_flush_lock, flags); } +bool is_flush_rq(struct request *rq) +{ + return rq->end_io == flush_end_io; +} + /** * blk_kick_flush - consider issuing flush request * @q: request_queue being kicked @@ -324,7 +352,7 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq, */ fq->flush_pending_idx ^= 1; - blk_rq_init(q, flush_rq); + __blk_rq_init(q, flush_rq); /* * In case of none scheduler, borrow tag from the first request @@ -355,6 +383,20 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq, flush_rq->rq_disk = first_rq->rq_disk; flush_rq->end_io = flush_end_io; + blk_rq_hierarchy_stats_init(flush_rq); + blk_rq_init_bi_alloc_time(flush_rq, first_rq); + if (q->mq_ops) + blk_mq_get_alloc_task(flush_rq, first_rq->bio); + + /* + * Order WRITE ->end_io and WRITE rq->ref, and its pair is the one + * implied in refcount_inc_not_zero() called from + * blk_mq_find_and_get_req(), which orders WRITE/READ flush_rq->ref + * and READ flush_rq->end_io + */ + smp_wmb(); + refcount_set(&flush_rq->ref, 1); + return blk_flush_queue_rq(flush_rq, false); } @@ -416,6 +458,8 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error) blk_mq_put_driver_tag_hctx(hctx, rq); } + blk_rq_hierarchy_set_flush_done(rq); + /* * After populating an empty queue, kick it to avoid stall. Read * the comment in flush_end_io(). @@ -424,7 +468,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error) blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error); spin_unlock_irqrestore(&fq->mq_flush_lock, flags); - blk_mq_run_hw_queue(hctx, true); + blk_mq_sched_restart(hctx); } /** @@ -485,7 +529,7 @@ void blk_insert_flush(struct request *rq) if ((policy & REQ_FSEQ_DATA) && !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { if (q->mq_ops) - blk_mq_request_bypass_insert(rq, false); + blk_mq_request_bypass_insert(rq, false, false); else list_add_tail(&rq->queuelist, &q->queue_head); return; @@ -566,12 +610,13 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, EXPORT_SYMBOL(blkdev_issue_flush); struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q, - int node, int cmd_size) + int node, int cmd_size, gfp_t flags) { struct blk_flush_queue *fq; - int rq_sz = sizeof(struct request); + struct request_wrapper *wrapper; + int rq_sz = sizeof(struct request) + sizeof(struct request_wrapper); - fq = kzalloc_node(sizeof(*fq), GFP_KERNEL, node); + fq = kzalloc_node(sizeof(*fq), flags, node); if (!fq) goto fail; @@ -579,10 +624,11 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q, spin_lock_init(&fq->mq_flush_lock); rq_sz = round_up(rq_sz + cmd_size, cache_line_size()); - fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node); - if (!fq->flush_rq) + wrapper = kzalloc_node(rq_sz, flags, node); + if (!wrapper) goto fail_rq; + fq->flush_rq = (struct request *)(wrapper + 1); INIT_LIST_HEAD(&fq->flush_queue[0]); INIT_LIST_HEAD(&fq->flush_queue[1]); INIT_LIST_HEAD(&fq->flush_data_in_flight); @@ -601,6 +647,6 @@ void blk_free_flush_queue(struct blk_flush_queue *fq) if (!fq) return; - kfree(fq->flush_rq); + kfree(request_to_wrapper(fq->flush_rq)); kfree(fq); } diff --git a/block/blk-integrity.c b/block/blk-integrity.c index 6121611e1316420372ce510300321532e9c76ef0..0b01fa972f1fc5a757e5b2cd2d8754b28850ee74 100644 --- a/block/blk-integrity.c +++ b/block/blk-integrity.c @@ -431,8 +431,15 @@ EXPORT_SYMBOL(blk_integrity_register); */ void blk_integrity_unregister(struct gendisk *disk) { + struct blk_integrity *bi = &disk->queue->integrity; + + if (!bi->profile) + return; + + /* ensure all bios are off the integrity workqueue */ + blk_flush_integrity(); disk->queue->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES; - memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity)); + memset(bi, 0, sizeof(*bi)); } EXPORT_SYMBOL(blk_integrity_unregister); diff --git a/block/blk-io-hierarchy/Kconfig b/block/blk-io-hierarchy/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..01019f6aa4252cb9fe3c6c85e5a8d49c96f21443 --- /dev/null +++ b/block/blk-io-hierarchy/Kconfig @@ -0,0 +1,145 @@ +# SPDX-License-Identifier: GPL-2.0-only + +menuconfig BLK_IO_HIERARCHY_STATS + bool "Enable hierarchy io stats" + default n + depends on BLK_DEBUG_FS=y + help + Enabling this lets the block layer to record additional information + in different io stages. Such information can be helpful to debug + performance and problems like io hang. + + If unsure, say N. + +if BLK_IO_HIERARCHY_STATS + +config HIERARCHY_BIO + bool "Support to record stats for bio lifetime" + default n + select BLK_BIO_ALLOC_TIME + help + Enabling this lets blk hierarchy stats to record additional information + for bio. Such information can be helpful to debug performance and + problems like io hang. + + If unsure, say N. + +config HIERARCHY_IO_DUMP + bool "Support to dump io that is throttled" + default n + select BLK_BIO_ALLOC_TIME + select BLK_BIO_ALLOC_TASK + depends on BLK_DEV_IO_TRACE + help + Enable this will create new debugfs entries to show user the detailed + information of IO that are submitted and not done yet, and user can + filter the result by IO stage or IO latency. + + If unsure, say N. + +config HIERARCHY_THROTTLE + bool "Enable hierarchy stats layer blk-throttle" + default n + depends on BLK_DEV_THROTTLING=y + help + Enabling this lets blk hierarchy stats to record additional information + for blk-throttle. Such information can be helpful to debug performance + and problems like io hang. + + If unsure, say N. + +config HIERARCHY_WBT + bool "Enable hierarchy stats layer blk-wbt" + default n + depends on BLK_WBT + help + Enabling this lets blk hierarchy stats to record additional information + for blk-wbt. Such information can be helpful to debug performance + and problems like io hang. + + If unsure, say N. + +config HIERARCHY_GETTAG + bool "Enable hierarchy stats layer gettag" + default n + help + Enabling this lets blk hierarchy stats to record additional information + for gettag. Such information can be helpful to debug performance + and problems like io hang. + + If unsure, say N. + +config HIERARCHY_PLUG + bool "Enable hierarchy stats layer plug" + default n + help + Enabling this lets blk hierarchy stats to record additional information + for plug. Such information can be helpful to debug performance + and problems like io hang. + + If unsure, say N. + +config HIERARCHY_DEADLINE + bool "Enable hierarchy stats layer mq-deadline" + default n + depends on MQ_IOSCHED_DEADLINE + help + Enabling this lets blk hierarchy stats to record additional information + for mq-deadline. Such information can be helpful to debug performance + and problems like io hang. + + If unsure, say N. + +config HIERARCHY_BFQ + bool "Enable hierarchy stats layer bfq" + default n + depends on IOSCHED_BFQ + help + Enabling this lets blk hierarchy stats to record additional information + for bfq. Such information can be helpful to debug performance + and problems like io hang. + + If unsure, say N. + +config HIERARCHY_KYBER + bool "Enable hierarchy stats layer kyber" + default n + depends on MQ_IOSCHED_KYBER + help + Enabling this lets blk hierarchy stats to record additional information + for kyber. Such information can be helpful to debug performance + and problems like io hang. + + If unsure, say N. + +config HIERARCHY_HCTX + bool "Enable hierarchy stats layer hctx" + default n + help + Enabling this lets blk hierarchy stats to record additional information + for hctx. Such information can be helpful to debug performance + and problems like io hang. + + If unsure, say N. + +config HIERARCHY_REQUEUE + bool "Enable hierarchy stats layer requeue" + default n + help + Enabling this lets blk hierarchy stats to record additional information + for requeue. Such information can be helpful to debug performance + and problems like io hang. + + If unsure, say N. + +config HIERARCHY_RQ_DRIVER + bool "Enable hierarchy stats layer rq_driver" + default n + help + Enabling this lets blk hierarchy stats to record additional information + for rq_driver. Such information can be helpful to debug performance + and problems like io hang. + + If unsure, say N. + +endif diff --git a/block/blk-io-hierarchy/Makefile b/block/blk-io-hierarchy/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..9b989d379e5807a5827c62cf62269ec168084df8 --- /dev/null +++ b/block/blk-io-hierarchy/Makefile @@ -0,0 +1,8 @@ +# +# Make file for blk_io_hierarchy_stats +# + +obj-$(CONFIG_BLK_IO_HIERARCHY_STATS) += blk_io_hierarchy_stats.o + +blk_io_hierarchy_stats-y := stats.o debugfs.o +obj-$(CONFIG_HIERARCHY_IO_DUMP) += iodump.o diff --git a/block/blk-io-hierarchy/debugfs.c b/block/blk-io-hierarchy/debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..ba2f4af49d1d49aff1c1b3c8e2c224cb951631a1 --- /dev/null +++ b/block/blk-io-hierarchy/debugfs.c @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2024. Huawei Technologies Co., Ltd. All rights reserved. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +#include "../blk-mq-debugfs.h" +#include "stats.h" +#include "iodump.h" + +static const char *stage_name[NR_STAGE_GROUPS] = { +#ifdef CONFIG_HIERARCHY_THROTTLE + [STAGE_THROTTLE] = "throtl", +#endif +#ifdef CONFIG_HIERARCHY_WBT + [STAGE_WBT] = "wbt", +#endif +#ifdef CONFIG_HIERARCHY_GETTAG + [STAGE_GETTAG] = "gettag", +#endif +#ifdef CONFIG_HIERARCHY_PLUG + [STAGE_PLUG] = "plug", +#endif +#ifdef CONFIG_HIERARCHY_DEADLINE + [STAGE_DEADLINE] = "deadline", +#endif +#ifdef CONFIG_HIERARCHY_BFQ + [STAGE_BFQ] = "bfq", +#endif +#ifdef CONFIG_HIERARCHY_KYBER + [STAGE_KYBER] = "kyber", +#endif +#ifdef CONFIG_HIERARCHY_HCTX + [STAGE_HCTX] = "hctx", +#endif +#ifdef CONFIG_HIERARCHY_REQUEUE + [STAGE_REQUEUE] = "requeue", +#endif +#ifdef CONFIG_HIERARCHY_RQ_DRIVER + [STAGE_RQ_DRIVER] = "rq_driver", +#endif +#ifdef CONFIG_HIERARCHY_BIO + [STAGE_BIO] = "bio", +#endif +}; + +const char *hierarchy_stage_name(enum stage_group stage) +{ + return stage_name[stage]; +} + +static int __hierarchy_stats_show(struct hierarchy_stats_data *hstats_data, + struct seq_file *m, enum stage_group stage) +{ + u64 dispatched[NR_NEW_STAT_GROUPS] = {0}; + u64 completed[NR_NEW_STAT_GROUPS] = {0}; + u64 latency[NR_NEW_STAT_GROUPS] = {0}; + int cpu; + int i; + + for_each_possible_cpu(cpu) { + struct hierarchy_stats *stat = + per_cpu_ptr(hstats_data->hstats, cpu); + + for (i = 0; i < NR_NEW_STAT_GROUPS; ++i) { + dispatched[i] += stat->dispatched[i]; + completed[i] += stat->completed[i]; + latency[i] += stage_is_rq(stage) ? + stat->jiffies[i] : stat->nsecs[i]; + } + } + + if (stage_is_rq(stage)) + for (i = 0; i < NR_NEW_STAT_GROUPS; ++i) + latency[i] = + jiffies_to_msecs(latency[i]) * NSEC_PER_MSEC; + + seq_printf(m, "%llu %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu", + dispatched[STAT_READ], completed[STAT_READ], + latency[STAT_READ], dispatched[STAT_WRITE], + completed[STAT_WRITE], latency[STAT_WRITE], + dispatched[STAT_DISCARD], completed[STAT_DISCARD], + latency[STAT_DISCARD], dispatched[STAT_FLUSH], + completed[STAT_FLUSH], latency[STAT_FLUSH]); + + hierarchy_show_slow_io(hstats_data, m); + seq_putc(m, '\n'); + return 0; +} + +static void *hierarchy_stats_start(struct seq_file *m, loff_t *pos) +{ + enum stage_group stage = *pos; + + if (stage < 0 || stage >= NR_STAGE_GROUPS) + return NULL; + + return pos; +} + +static void *hierarchy_stats_next(struct seq_file *m, void *v, loff_t *pos) +{ + enum stage_group stage = ++(*pos); + + if (stage >= 0 && stage < NR_STAGE_GROUPS) + return pos; + + return NULL; +} + +static void hierarchy_stats_stop(struct seq_file *m, void *v) +{ +} + +static int hierarchy_stats_show(struct seq_file *m, void *v) +{ + enum stage_group stage = (*(loff_t *)v); + struct blk_io_hierarchy_stats *stats = m->private; + struct hierarchy_stats_data *hstats_data = + get_hstats_data(stats, stage); + + if (!hstats_data) + return 0; + + seq_printf(m, "%s ", hierarchy_stage_name(stage)); + __hierarchy_stats_show(hstats_data, m, stage); + put_hstats_data(stats, hstats_data); + return 0; +} + +static const struct seq_operations hierarchy_stats_ops = { + .start = hierarchy_stats_start, + .next = hierarchy_stats_next, + .stop = hierarchy_stats_stop, + .show = hierarchy_stats_show, +}; + +static int hierarchy_stats_show_single(void *v, struct seq_file *m) +{ + struct hierarchy_stage *hstage = v; + + return __hierarchy_stats_show(hstage->hstats_data, m, hstage->stage); +} + +static const struct blk_mq_debugfs_attr hierarchy_debugfs_attrs[] = { + {"stats", 0400, hierarchy_stats_show_single}, + {}, +}; + +static const struct blk_mq_debugfs_attr hierarchy_stats_attr[] = { + {"stats", 0400, .seq_ops = &hierarchy_stats_ops}, + {}, +}; + +static void hierarchy_register_stage(struct blk_io_hierarchy_stats *stats, + enum stage_group stage) +{ + struct hierarchy_stage *hstage = stats->hstage[stage]; + struct dentry *dir; + + if (!stage_name[stage] || hstage->debugfs_dir) + return; + + dir = debugfs_create_dir(stage_name[stage], stats->debugfs_dir); + if (IS_ERR(dir)) + return; + + hstage->debugfs_dir = dir; + debugfs_create_files(dir, hstage, hierarchy_debugfs_attrs); + io_hierarchy_register_iodump(hstage); +} + +static void hierarchy_unregister_stage(struct blk_io_hierarchy_stats *stats, + enum stage_group stage) +{ + struct hierarchy_stage *hstage = stats->hstage[stage]; + + if (!stage_name[stage] || !hstage->debugfs_dir) + return; + + debugfs_remove_recursive(hstage->debugfs_dir); + hstage->debugfs_dir = NULL; +} + +void blk_mq_debugfs_register_hierarchy(struct request_queue *q, + enum stage_group stage) +{ + struct blk_io_hierarchy_stats *stats = + queue_to_wrapper(q)->io_hierarchy_stats; + + lockdep_assert_held(&q->debugfs_mutex); + + if (!blk_mq_hierarchy_registered(q, stage) || + !blk_mq_debugfs_enabled(q)) + return; + + hierarchy_register_stage(stats, stage); +} + +void blk_mq_debugfs_unregister_hierarchy(struct request_queue *q, + enum stage_group stage) +{ + struct blk_io_hierarchy_stats *stats = + queue_to_wrapper(q)->io_hierarchy_stats; + + lockdep_assert_held(&q->debugfs_mutex); + + if (!blk_mq_hierarchy_registered(q, stage) || + !blk_mq_debugfs_enabled(q)) + return; + + hierarchy_unregister_stage(stats, stage); +} + +void blk_mq_debugfs_create_default_hierarchy_attr(struct request_queue *q) +{ + struct blk_io_hierarchy_stats *stats = + queue_to_wrapper(q)->io_hierarchy_stats; + + lockdep_assert_held(&q->debugfs_mutex); + + if (!blk_mq_debugfs_enabled(q)) + return; + + debugfs_create_files(stats->debugfs_dir, stats, hierarchy_stats_attr); +} diff --git a/block/blk-io-hierarchy/iodump.c b/block/blk-io-hierarchy/iodump.c new file mode 100644 index 0000000000000000000000000000000000000000..18bd813665f8d53bc2eecd7c6c5bf769fa24f68b --- /dev/null +++ b/block/blk-io-hierarchy/iodump.c @@ -0,0 +1,756 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2024. Huawei Technologies Co., Ltd. All rights reserved. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include + +#include "iodump.h" +#include "../blk.h" +#include "../blk-mq-debugfs.h" + +#define RWB_LEN 6 +#define PATH_LEN 64 +#define ms_to_ns(time) (time * NSEC_PER_MSEC) +#define DEFAULT_THRESHOLD 1000 + +static DEFINE_MUTEX(dump_mutex); + +struct bio_dump_data { + u64 stat_time; + struct list_head head; + spinlock_t lock; +}; + +struct rq_dump_data { + struct request_queue *q; + enum stage_group stage; + unsigned int tag; + unsigned int total_tags; + bool has_elevator; + bool enter_queue; +}; + +#ifdef CONFIG_HIERARCHY_BIO +struct pos_data { + enum stage_group stage; + unsigned int count; +}; + +struct bio_stage_dump_data { + union { + loff_t pos; + struct pos_data pdata; + }; + struct rq_dump_data rq_ddata; + u64 stat_time; +}; +#endif + +int blk_io_hierarchy_iodump_init(struct request_queue *q, + struct hierarchy_stage *hstage) +{ + hstage->threshold = DEFAULT_THRESHOLD; + + if (stage_is_bio(hstage->stage)) { + struct bio_dump_data *bio_ddata = + kmalloc(sizeof(*bio_ddata), GFP_KERNEL); + + if (!bio_ddata) + return -ENOMEM; + + INIT_LIST_HEAD(&bio_ddata->head); + spin_lock_init(&bio_ddata->lock); + hstage->dump_data = bio_ddata; + return 0; + } + + if (stage_is_rq(hstage->stage)) { + struct rq_dump_data *rq_ddata = + kzalloc(sizeof(*rq_ddata), GFP_KERNEL); + + if (!rq_ddata) + return -ENOMEM; + + rq_ddata->q = q; + rq_ddata->stage = hstage->stage; + hstage->dump_data = rq_ddata; + return 0; + } + +#ifdef CONFIG_HIERARCHY_BIO + BUILD_BUG_ON(sizeof(struct pos_data) != sizeof(loff_t)); + + if (hstage->stage == STAGE_BIO) { + struct bio_stage_dump_data *bstage_ddata = + kzalloc(sizeof(*bstage_ddata), GFP_KERNEL); + + if (!bstage_ddata) + return -ENOMEM; + + bstage_ddata->rq_ddata.q = q; + bstage_ddata->rq_ddata.stage = hstage->stage; + hstage->dump_data = bstage_ddata; + return 0; + } +#endif + + return -EINVAL; +} + +void blk_io_hierarchy_iodump_exit(struct request_queue *q, + enum stage_group stage) +{ + struct hierarchy_stage *hstage = + queue_to_wrapper(q)->io_hierarchy_stats->hstage[stage]; + + if (stage_is_bio(hstage->stage)) { + struct bio_dump_data *bio_ddata = hstage->dump_data; + + WARN(!list_empty(&bio_ddata->head), + "blk-io-hierarchy: disk %s stage %s unregistered whih throttled IO.\n", + kobject_name(q->kobj.parent), hierarchy_stage_name(stage)); + } + + kfree(hstage->dump_data); + hstage->dump_data = NULL; +} + +void hierarchy_add_bio(struct hierarchy_stage *hstage, struct bio *bio) +{ + unsigned long flags; + struct bio_hierarchy_data *data = bio->hdata; + struct bio_dump_data *bio_ddata = hstage->dump_data; + + spin_lock_irqsave(&bio_ddata->lock, flags); + list_add_tail(&data->hierarchy_list, &bio_ddata->head); + spin_unlock_irqrestore(&bio_ddata->lock, flags); +} + +void hierarchy_remove_bio(struct hierarchy_stage *hstage, struct bio *bio) +{ + unsigned long flags; + struct bio_hierarchy_data *data = bio->hdata; + struct bio_dump_data *bio_ddata = hstage->dump_data; + + spin_lock_irqsave(&bio_ddata->lock, flags); + list_del_init(&data->hierarchy_list); + spin_unlock_irqrestore(&bio_ddata->lock, flags); +} + +void bio_hierarchy_data_init(struct bio *bio, struct bio_hierarchy_data *hdata) +{ + hdata->bio = bio; + INIT_LIST_HEAD(&hdata->hierarchy_list); +} + +static void *bio_hierarchy_list_start(struct seq_file *m, loff_t *pos) + __acquires(&bio_ddata->lock) +{ + struct hierarchy_stage *hstage = m->private; + struct bio_dump_data *bio_ddata = hstage->dump_data; + + spin_lock_irq(&bio_ddata->lock); + bio_ddata->stat_time = blk_time_get_ns(); + + return seq_list_start(&bio_ddata->head, *pos); +} + +static void *bio_hierarchy_list_next(struct seq_file *m, void *v, loff_t *pos) +{ + struct hierarchy_stage *hstage = m->private; + struct bio_dump_data *bio_ddata = hstage->dump_data; + + return seq_list_next(v, &bio_ddata->head, pos); +} + +static void bio_hierarchy_list_stop(struct seq_file *m, void *v) + __releases(&hstage->lock) +{ + struct hierarchy_stage *hstage = m->private; + struct bio_dump_data *bio_ddata = hstage->dump_data; + + spin_unlock_irq(&bio_ddata->lock); +} + +static void __hierarchy_show_bio(struct seq_file *m, + struct bio_hierarchy_data *data, + enum stage_group stage, u64 duration) +{ + char rwbs[RWB_LEN]; + char path[PATH_LEN] = {0}; + struct bio *bio = data->bio; + struct task_struct *task = get_pid_task(bio->pid, PIDTYPE_PID); + + blk_fill_rwbs(rwbs, bio->bi_opf, bio->bi_iter.bi_size); +#ifdef CONFIG_BLK_CGROUP + cgroup_path(bio->bi_css->cgroup, path, PATH_LEN); +#endif + + seq_printf(m, "%s-%d %s stage %s bio %s %lu + %u cgroup %s started %llu ns ago\n", + task ? task->comm : "null", task ? task->pid : 0, + bio->bi_disk->disk_name, hierarchy_stage_name(stage), + rwbs, bio->bi_iter.bi_sector, bio_sectors(bio), path, + duration); + + if (task) + put_task_struct(task); +} + +static u64 get_duration(u64 a, u64 b) +{ + return a > b ? a - b : 0; +} + +static void hierarchy_show_bio(struct seq_file *m, + struct bio_hierarchy_data *data) +{ + u64 duration; + struct hierarchy_stage *hstage = m->private; + struct bio_dump_data *bio_ddata = hstage->dump_data; + + duration = get_duration(bio_ddata->stat_time, data->time); + if (hstage->threshold > ns_to_ms(duration)) + return; + + __hierarchy_show_bio(m, data, hstage->stage, duration); +} + +static int bio_hierarchy_list_show(struct seq_file *m, void *v) +{ + struct bio_hierarchy_data *data = + list_entry(v, struct bio_hierarchy_data, hierarchy_list); + + hierarchy_show_bio(m, data); + return 0; +} + +static const struct seq_operations hierarchy_bio_dump_ops = { + .start = bio_hierarchy_list_start, + .next = bio_hierarchy_list_next, + .stop = bio_hierarchy_list_stop, + .show = bio_hierarchy_list_show, +}; + +static int threshold_show(void *data, struct seq_file *m) +{ + struct hierarchy_stage *hstage = data; + + seq_printf(m, "%lu\n", hstage->threshold); + return 0; +} + +/* + * max size needed by different bases to express U64 + * HEX: "0xFFFFFFFFFFFFFFFF" --> 18 + * DEC: "18446744073709551615" --> 20 + * OCT: "01777777777777777777777" --> 23 + * pick the max one to define NUMBER_BUF_LEN + */ +#define MAX_BUF_LEN 24 +static ssize_t threshold_store(void *data, const char __user *buf, size_t count, + loff_t *ppos) +{ + int err; + unsigned long val; + char b[MAX_BUF_LEN + 1]; + struct hierarchy_stage *hstage = data; + + if (count > MAX_BUF_LEN) + return -EINVAL; + + if (copy_from_user(b, buf, count)) + return -EFAULT; + + b[count] = 0; + err = kstrtoul(b, 0, &val); + if (!err) + hstage->threshold = val; + + return err ? err : count; +} + +static void rq_hierarchy_init_dump_data(struct rq_dump_data *rq_ddata) +{ + struct request_queue *q = rq_ddata->q; + + rq_ddata->has_elevator = !!q->elevator; + + if (rq_ddata->has_elevator) + rq_ddata->total_tags = q->nr_hw_queues * q->nr_requests; + else + rq_ddata->total_tags = q->nr_hw_queues * + q->tag_set->queue_depth; +} + +static bool __rq_hierarchy_start(struct rq_dump_data *rq_ddata, + unsigned int tag) +{ + /* + * Grab .q_usage_counter so request pool won't go away, then no + * request use-after-free is possible during iteration. If queue is + * frozen, there won't be any inflight requests. + */ + if (!percpu_ref_tryget(&rq_ddata->q->q_usage_counter)) { + rq_ddata->enter_queue = false; + return false; + } + + rq_ddata->enter_queue = true; + rq_hierarchy_init_dump_data(rq_ddata); + rq_ddata->tag = tag; + + return tag < rq_ddata->total_tags + rq_ddata->q->nr_hw_queues; +} + +static bool __rq_hierarchy_next(struct rq_dump_data *rq_ddata) +{ + rq_ddata->tag++; + + return rq_ddata->tag < rq_ddata->total_tags + rq_ddata->q->nr_hw_queues; +} + +static void __rq_hierarchy_stop(struct rq_dump_data *rq_ddata) +{ + if (rq_ddata->enter_queue) { + percpu_ref_put(&rq_ddata->q->q_usage_counter); + rq_ddata->enter_queue = false; + } +} + +static void *rq_hierarchy_start(struct seq_file *m, loff_t *pos) + __acquires(&dump_mutex) +{ + struct hierarchy_stage *hstage = m->private; + struct rq_dump_data *rq_ddata = hstage->dump_data; + + mutex_lock(&dump_mutex); + + if (__rq_hierarchy_start(rq_ddata, *pos)) + return rq_ddata; + + return NULL; +} + +static void *rq_hierarchy_next(struct seq_file *m, void *v, loff_t *pos) +{ + struct rq_dump_data *rq_ddata = v; + + if (__rq_hierarchy_next(rq_ddata)) { + *pos = rq_ddata->tag; + return rq_ddata; + } + + (*pos)++; + return NULL; +} + +static void rq_hierarchy_stop(struct seq_file *m, void *v) + __releases(&dump_mutex) +{ + struct hierarchy_stage *hstage = m->private; + struct rq_dump_data *rq_ddata = hstage->dump_data; + + __rq_hierarchy_stop(rq_ddata); + mutex_unlock(&dump_mutex); +} + +static struct request *hierarchy_find_and_get_rq(struct rq_dump_data *rq_ddata) +{ + struct request *rq; + struct request_wrapper *rq_wrapper; + struct blk_mq_hw_ctx *hctx; + struct request_queue *q = rq_ddata->q; + unsigned int nr_tag = rq_ddata->tag; + unsigned int hctx_id; + + if (nr_tag >= rq_ddata->total_tags) { + hctx_id = nr_tag - rq_ddata->total_tags; + if (hctx_id >= q->nr_hw_queues) + return NULL; + + hctx = q->queue_hw_ctx[hctx_id]; + rq = hctx->fq->flush_rq; + } else if (rq_ddata->has_elevator) { + hctx_id = nr_tag / q->nr_requests; + if (hctx_id >= q->nr_hw_queues) + return NULL; + + hctx = q->queue_hw_ctx[hctx_id]; + rq = hctx->sched_tags->static_rqs[nr_tag % q->nr_requests]; + } else { + hctx_id = nr_tag / q->tag_set->queue_depth; + if (hctx_id >= q->nr_hw_queues) + return NULL; + + hctx = q->queue_hw_ctx[hctx_id]; + if (!hctx->tags) + return NULL; + + rq = hctx->tags->static_rqs[nr_tag % q->tag_set->queue_depth]; + } + + rq_wrapper = request_to_wrapper(rq); + /* + * fast path to avoid refcount cas operations for the request that + * is from other shared request_queue or other stages. + */ + if (rq->q != q || (rq_ddata->stage != STAGE_BIO && + READ_ONCE(rq_wrapper->stage) != rq_ddata->stage)) + return NULL; + + if (!refcount_inc_not_zero(&rq->ref)) + return NULL; + + /* Check again after request is pinned, in case request is resued. */ + if (rq->q != q) { + blk_mq_put_rq_ref(rq); + return NULL; + } + + if (rq_ddata->stage == STAGE_BIO) + return rq; + + /* + * Barrier is paired with the smp_store_release() in + * rq_hierarchy_start_io_acct(), so that if stage is read, uninitialized + * hierarchy_time won't be read. + */ + if (smp_load_acquire(&rq_wrapper->stage) != rq_ddata->stage) { + blk_mq_put_rq_ref(rq); + return NULL; + } + + return rq; +} + +static void hierarchy_show_rq(struct seq_file *m, struct request *rq, + u64 duration) +{ + struct request_wrapper *rq_wrapper = request_to_wrapper(rq); + struct task_struct *task = get_pid_task(rq_wrapper->pid, PIDTYPE_PID); + const char *name = hierarchy_stage_name(rq_wrapper->stage); + + seq_printf(m, "%s-%d %s stage %s ", task ? task->comm : "null", + task ? task->pid : 0, + rq->rq_disk ? rq->rq_disk->disk_name : "?", + name ? name : "?"); + debugfs_rq_show(m, rq); + seq_printf(m, " started %llu ns ago}\n", duration); + + if (task) + put_task_struct(task); +} + +static int rq_hierarchy_show(struct seq_file *m, void *v) +{ + u64 duration; + unsigned long htime; + struct hierarchy_stage *hstage = m->private; + struct request_wrapper *rq_wrapper; + struct request *rq = hierarchy_find_and_get_rq(v); + + if (!rq) + return 0; + + rq_wrapper = request_to_wrapper(rq); + htime = READ_ONCE(rq_wrapper->hierarchy_time); + htime = time_after(jiffies, htime) ? jiffies - htime : 0; + duration = jiffies_to_msecs(htime); + if (hstage->threshold <= duration) + hierarchy_show_rq(m, rq, ms_to_ns(duration)); + + blk_mq_put_rq_ref(rq); + return 0; +} + +static const struct seq_operations hierarchy_rq_dump_ops = { + .start = rq_hierarchy_start, + .next = rq_hierarchy_next, + .stop = rq_hierarchy_stop, + .show = rq_hierarchy_show, +}; + +static const struct blk_mq_debugfs_attr hierarchy_threshold_attr[] = { + { + "threshold", + 0600, + threshold_show, + threshold_store, + }, + {}, +}; + +static const struct blk_mq_debugfs_attr hierarchy_bio_dump_attr[] = { + { + "io_dump", + 0400, + .seq_ops = &hierarchy_bio_dump_ops, + }, + {}, +}; + +static const struct blk_mq_debugfs_attr hierarchy_rq_dump_attr[] = { + { + "io_dump", + 0400, + .seq_ops = &hierarchy_rq_dump_ops, + }, + {}, +}; + +#ifdef CONFIG_HIERARCHY_BIO +static struct bio_dump_data *get_bio_stage_ddata(struct request_queue *q, + enum stage_group stage) +{ + struct blk_io_hierarchy_stats *stats = + queue_to_wrapper(q)->io_hierarchy_stats; + struct hierarchy_stage *hstage = READ_ONCE(stats->hstage[stage]); + + if (!hstage) + return NULL; + + return hstage->dump_data; +} + +static void bio_stage_start_next_stage(struct bio_stage_dump_data *bstage_ddata, + loff_t *pos) +{ + struct pos_data *pdata = &bstage_ddata->pdata; + + pdata->stage++; + if (!stage_is_bio(pdata->stage)) + pdata->stage = STAGE_BIO; + pdata->count = 0; + + *pos = bstage_ddata->pos; +} + +static void bio_stage_start_next_io(struct bio_stage_dump_data *bstage_ddata, + loff_t *pos) +{ + struct pos_data *pdata = &bstage_ddata->pdata; + + if (stage_is_bio(pdata->stage)) + pdata->count++; + else + pdata->count = bstage_ddata->rq_ddata.tag; + + *pos = bstage_ddata->pos; +} + +static void __bio_stage_hierarchy_stop(struct bio_stage_dump_data *bstage_ddata) +{ + struct pos_data *pdata = &bstage_ddata->pdata; + struct rq_dump_data *rq_ddata = &bstage_ddata->rq_ddata; + + if (stage_is_bio(pdata->stage)) { + struct bio_dump_data *bio_ddata = + get_bio_stage_ddata(rq_ddata->q, pdata->stage); + + spin_unlock_irq(&bio_ddata->lock); + } + + if (rq_ddata->enter_queue) { + percpu_ref_put(&rq_ddata->q->q_usage_counter); + rq_ddata->enter_queue = false; + } +} + +void *__bio_stage_hierarchy_start(struct bio_stage_dump_data *bstage_ddata, + loff_t *pos) +{ + struct pos_data *pdata = &bstage_ddata->pdata; + struct rq_dump_data *rq_ddata = &bstage_ddata->rq_ddata; + +retry: + if (stage_is_bio(pdata->stage)) { + struct list_head *list; + struct bio_dump_data *bio_ddata = + get_bio_stage_ddata(rq_ddata->q, pdata->stage); + + if (!bio_ddata) { + bio_stage_start_next_stage(bstage_ddata, pos); + goto retry; + } + + spin_lock_irq(&bio_ddata->lock); + list = seq_list_start(&bio_ddata->head, pdata->count); + if (list) + return list; + + spin_unlock_irq(&bio_ddata->lock); + bio_stage_start_next_stage(bstage_ddata, pos); + goto retry; + } + + if (pdata->stage == STAGE_BIO && + __rq_hierarchy_start(rq_ddata, pdata->count)) + return bstage_ddata; + + return NULL; +} + +static void *bio_stage_hierarchy_start(struct seq_file *m, loff_t *pos) +{ + struct hierarchy_stage *hstage = m->private; + struct bio_stage_dump_data *bstage_ddata = hstage->dump_data; + + mutex_lock(&dump_mutex); + bstage_ddata->pos = *pos; + bstage_ddata->stat_time = blk_time_get_ns(); + + return __bio_stage_hierarchy_start(bstage_ddata, pos); +} + +static void *bio_stage_hierarchy_next(struct seq_file *m, void *v, loff_t *pos) +{ + struct hierarchy_stage *hstage = m->private; + struct bio_stage_dump_data *bstage_ddata = hstage->dump_data; + struct rq_dump_data *rq_ddata = &bstage_ddata->rq_ddata; + struct pos_data *pdata = &bstage_ddata->pdata; + + if (stage_is_bio(pdata->stage)) { + struct bio_dump_data *bio_ddata = + get_bio_stage_ddata(rq_ddata->q, pdata->stage); + struct list_head *list = ((struct list_head *)v)->next; + + if (list != &bio_ddata->head) { + bio_stage_start_next_io(bstage_ddata, pos); + return list; + } + + spin_unlock_irq(&bio_ddata->lock); + + bio_stage_start_next_stage(bstage_ddata, pos); + return __bio_stage_hierarchy_start(bstage_ddata, pos); + } + + if (pdata->stage == STAGE_BIO && + __rq_hierarchy_next(rq_ddata)) { + bio_stage_start_next_io(bstage_ddata, pos); + return bstage_ddata; + } + + (*pos)++; + return NULL; +} + +static void bio_stage_hierarchy_stop(struct seq_file *m, void *v) +{ + struct hierarchy_stage *hstage = m->private; + struct bio_stage_dump_data *bstage_ddata = hstage->dump_data; + + __bio_stage_hierarchy_stop(bstage_ddata); + mutex_unlock(&dump_mutex); +} + +static int bio_stage_hierarchy_show(struct seq_file *m, void *v) +{ + struct hierarchy_stage *hstage = m->private; + struct bio_stage_dump_data *bstage_ddata = hstage->dump_data; + struct rq_dump_data *rq_ddata = &bstage_ddata->rq_ddata; + struct pos_data *pdata = &bstage_ddata->pdata; + u64 duration; + + if (stage_is_bio(pdata->stage)) { + struct bio_hierarchy_data *data = list_entry( + v, struct bio_hierarchy_data, hierarchy_list); + + duration = get_duration(bstage_ddata->stat_time, + data->bio->bi_alloc_time_ns); + if (hstage->threshold <= ns_to_ms(duration)) + __hierarchy_show_bio(m, data, pdata->stage, duration); + } else if (pdata->stage == STAGE_BIO) { + struct request *rq = hierarchy_find_and_get_rq(rq_ddata); + + if (rq) { + duration = get_duration(bstage_ddata->stat_time, + request_to_wrapper(rq)->bi_alloc_time_ns); + if (hstage->threshold <= ns_to_ms(duration)) + hierarchy_show_rq(m, rq, duration); + blk_mq_put_rq_ref(rq); + } + } + + return 0; +} + +static const struct seq_operations bio_stage_hierarchy_ops = { + .start = bio_stage_hierarchy_start, + .next = bio_stage_hierarchy_next, + .stop = bio_stage_hierarchy_stop, + .show = bio_stage_hierarchy_show, +}; + +static const struct blk_mq_debugfs_attr bio_stage_dump_attr[] = { + { + "io_dump", + 0400, + .seq_ops = &bio_stage_hierarchy_ops, + }, + {}, +}; + +#else /* CONFIG_HIERARCHY_BIO */ +static const struct blk_mq_debugfs_attr bio_stage_dump_attr[] = { + {}, +}; + +#endif + +void io_hierarchy_register_iodump(struct hierarchy_stage *hstage) +{ + const struct blk_mq_debugfs_attr *attr; + + if (stage_is_bio(hstage->stage)) + attr = hierarchy_bio_dump_attr; + else if (stage_is_rq(hstage->stage)) + attr = hierarchy_rq_dump_attr; + else if (hstage->stage == STAGE_BIO) + attr = bio_stage_dump_attr; + else + attr = NULL; + + debugfs_create_files(hstage->debugfs_dir, hstage, + hierarchy_threshold_attr); + if (attr) + debugfs_create_files(hstage->debugfs_dir, hstage, attr); +} + +void hierarchy_account_slow_io(struct hierarchy_stage *hstage, + enum stat_group op, unsigned long duration) +{ + if (hstage->threshold <= duration) + this_cpu_inc(hstage->hstats_data->hstats->slow[op]); +} + +void hierarchy_show_slow_io(struct hierarchy_stats_data *hstats_data, + struct seq_file *m) +{ + u64 slow[NR_NEW_STAT_GROUPS] = {0}; + int cpu; + int i; + + for_each_possible_cpu(cpu) { + struct hierarchy_stats *stat = + per_cpu_ptr(hstats_data->hstats, cpu); + + for (i = 0; i < NR_NEW_STAT_GROUPS; ++i) + slow[i] += stat->slow[i]; + } + + seq_printf(m, " %llu %llu %llu %llu", slow[STAT_READ], slow[STAT_WRITE], + slow[STAT_DISCARD], slow[STAT_FLUSH]); +} diff --git a/block/blk-io-hierarchy/iodump.h b/block/blk-io-hierarchy/iodump.h new file mode 100644 index 0000000000000000000000000000000000000000..f8ef0d8669f621bf2c97728ed5c4d7fa676d7e66 --- /dev/null +++ b/block/blk-io-hierarchy/iodump.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2024. Huawei Technologies Co., Ltd. All rights reserved. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef BLK_IO_HIERARCHY_IODUMP_H +#define BLK_IO_HIERARCHY_IODUMP_H + +#ifdef CONFIG_HIERARCHY_IO_DUMP + +#include "stats.h" + +#define ns_to_ms(time) div_u64(time, NSEC_PER_MSEC) + +int blk_io_hierarchy_iodump_init(struct request_queue *q, + struct hierarchy_stage *hstage); +void blk_io_hierarchy_iodump_exit(struct request_queue *q, + enum stage_group stage); +void hierarchy_add_bio(struct hierarchy_stage *hstage, struct bio *bio); +void hierarchy_remove_bio(struct hierarchy_stage *hstage, struct bio *bio); +void bio_hierarchy_data_init(struct bio *bio, struct bio_hierarchy_data *hdata); +void io_hierarchy_register_iodump(struct hierarchy_stage *hstage); + +void hierarchy_account_slow_io(struct hierarchy_stage *hstage, + enum stat_group op, unsigned long duration); +void hierarchy_show_slow_io(struct hierarchy_stats_data *hstats_data, + struct seq_file *m); + +static inline void +hierarchy_account_slow_io_ns(struct hierarchy_stage *hstage, + enum stat_group op, u64 duration) +{ + hierarchy_account_slow_io(hstage, op, ns_to_ms(duration)); +} + +static inline void +hierarchy_account_slow_io_jiffies(struct hierarchy_stage *hstage, + enum stat_group op, unsigned long duration) +{ + hierarchy_account_slow_io(hstage, op, jiffies_to_msecs(duration)); +} + +#else +static inline int +blk_io_hierarchy_iodump_init(struct request_queue *q, + struct hierarchy_stage *hstage) +{ + return 0; +} + +static inline void +blk_io_hierarchy_iodump_exit(struct request_queue *q, enum stage_group stage) +{ +} + +static inline void +hierarchy_add_bio(struct hierarchy_stage *hstage, struct bio *bio) +{ +} + +static inline void +hierarchy_remove_bio(struct hierarchy_stage *hstage, struct bio *bio) +{ +} + +static inline void +bio_hierarchy_data_init(struct bio *bio, struct bio_hierarchy_data *hdata) +{ +} + +static inline void +io_hierarchy_register_iodump(struct hierarchy_stage *hstage) +{ +} + +static inline void +hierarchy_account_slow_io(struct hierarchy_stage *hstage, + enum stat_group op, unsigned long duration) +{ +} + +static inline void +hierarchy_account_slow_io_ns(struct hierarchy_stage *hstage, + enum stat_group op, u64 duration) +{ +} + +static inline void +hierarchy_account_slow_io_jiffies(struct hierarchy_stage *hstage, + enum stat_group op, unsigned long duration) +{ +} + +#endif +#endif diff --git a/block/blk-io-hierarchy/stats.c b/block/blk-io-hierarchy/stats.c new file mode 100644 index 0000000000000000000000000000000000000000..9b6b735fd5bfdac0bdea10d3d1f9837fdb9331a7 --- /dev/null +++ b/block/blk-io-hierarchy/stats.c @@ -0,0 +1,437 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2024. Huawei Technologies Co., Ltd. All rights reserved. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +#include "stats.h" +#include "iodump.h" +#include "../blk.h" +#include "../blk-mq-debugfs.h" + +#define io_hierarchy_add(statsp, field, group, nr) \ + this_cpu_add((statsp)->hstats->field[group], nr) +#define io_hierarchy_inc(statsp, field, group) \ + io_hierarchy_add(statsp, field, group, 1) + +#define PRE_ALLOC_BIO_CNT 8 + +static mempool_t *hdata_pool; + +void blk_mq_debugfs_register_hierarchy_stats(struct request_queue *q) +{ + struct blk_io_hierarchy_stats *stats; + enum stage_group stage; + + lockdep_assert_held(&q->debugfs_mutex); + + stats = queue_to_wrapper(q)->io_hierarchy_stats; + if (!stats || !blk_mq_debugfs_enabled(q)) + return; + + stats->debugfs_dir = debugfs_create_dir("blk_io_hierarchy", + q->debugfs_dir); + blk_mq_debugfs_create_default_hierarchy_attr(q); + + for (stage = 0; stage < NR_STAGE_GROUPS; ++stage) + blk_mq_debugfs_register_hierarchy(q, stage); +} + +static void bio_alloc_hierarchy_data(struct bio *bio) +{ + if (!bio->hdata) { + struct bio_hierarchy_data *hdata = + mempool_alloc(hdata_pool, GFP_NOIO); + + bio_hierarchy_data_init(bio, hdata); + bio->hdata = hdata; + } +} + +void bio_free_hierarchy_data(struct bio *bio) +{ + if (!bio->hdata) + return; + + mempool_free(bio->hdata, hdata_pool); + bio->hdata = NULL; +} + +int blk_io_hierarchy_stats_alloc(struct request_queue *q) +{ + struct blk_io_hierarchy_stats *stats; + + if (!q->mq_ops) + return 0; + + stats = kzalloc(sizeof(struct blk_io_hierarchy_stats), GFP_KERNEL); + if (!stats) + return -ENOMEM; + + spin_lock_init(&stats->hstage_lock); + stats->q = q; + queue_to_wrapper(q)->io_hierarchy_stats = stats; + + return 0; +} + +void blk_io_hierarchy_stats_free(struct request_queue *q) +{ + struct blk_io_hierarchy_stats *stats = + queue_to_wrapper(q)->io_hierarchy_stats; + + if (!stats) + return; + + queue_to_wrapper(q)->io_hierarchy_stats = NULL; + kfree(stats); +} + +bool blk_mq_hierarchy_registered(struct request_queue *q, + enum stage_group stage) +{ + struct blk_io_hierarchy_stats *stats = + queue_to_wrapper(q)->io_hierarchy_stats; + + if (!stats) + return false; + + return stats->hstage[stage] != NULL; +} +EXPORT_SYMBOL_GPL(blk_mq_hierarchy_registered); + +static struct hierarchy_stats_data *alloc_hstats_data(void) +{ + struct hierarchy_stats_data *hstats_data; + + hstats_data = kmalloc(sizeof(*hstats_data), GFP_KERNEL); + if (!hstats_data) + return NULL; + + hstats_data->hstats = alloc_percpu(struct hierarchy_stats); + if (!hstats_data->hstats) { + kfree(hstats_data); + return NULL; + } + + hstats_data->ref = 1; + return hstats_data; +} + +struct hierarchy_stats_data *get_hstats_data( + struct blk_io_hierarchy_stats *stats, + enum stage_group stage) +{ + struct hierarchy_stage *hstage; + struct hierarchy_stats_data *hstats_data = NULL; + + spin_lock(&stats->hstage_lock); + hstage = stats->hstage[stage]; + if (hstage) { + hstats_data = hstage->hstats_data; + if (hstats_data) + hstats_data->ref++; + } + spin_unlock(&stats->hstage_lock); + + return hstats_data; +} + +static void __put_hstats_data(struct blk_io_hierarchy_stats *stats, + struct hierarchy_stats_data *hstats_data) +{ + if (--hstats_data->ref == 0) { + free_percpu(hstats_data->hstats); + kfree(hstats_data); + } +} + +void put_hstats_data(struct blk_io_hierarchy_stats *stats, + struct hierarchy_stats_data *hstats_data) +{ + spin_lock(&stats->hstage_lock); + __put_hstats_data(stats, hstats_data); + spin_unlock(&stats->hstage_lock); +} + +void blk_mq_register_hierarchy(struct request_queue *q, enum stage_group stage) +{ + struct blk_io_hierarchy_stats *stats = + queue_to_wrapper(q)->io_hierarchy_stats; + struct hierarchy_stage *hstage; + + if (!stats || !hierarchy_stage_name(stage)) + return; + + if (blk_mq_hierarchy_registered(q, stage)) { + pr_warn("blk-io-hierarchy: disk %s is registering stage %s again.", + kobject_name(q->kobj.parent), + hierarchy_stage_name(stage)); + return; + } + + /* + * Alloc memory before freeze queue, prevent deadlock if new IO is + * issued by memory reclaim. + */ + hstage = kmalloc(sizeof(*hstage), GFP_KERNEL); + if (!hstage) + return; + + hstage->hstats_data = alloc_hstats_data(); + if (!hstage->hstats_data) { + kfree(hstage); + return; + } + + hstage->stage = stage; + hstage->unbalanced_warned = false; + hstage->debugfs_dir = NULL; + if (blk_io_hierarchy_iodump_init(q, hstage) < 0) { + put_hstats_data(stats, hstage->hstats_data); + kfree(hstage); + return; + } + + blk_mq_freeze_queue(q); + + mutex_lock(&q->debugfs_mutex); + WRITE_ONCE(stats->hstage[stage], hstage); + blk_mq_debugfs_register_hierarchy(q, stage); + mutex_unlock(&q->debugfs_mutex); + + blk_mq_unfreeze_queue(q); +} +EXPORT_SYMBOL_GPL(blk_mq_register_hierarchy); + +void blk_mq_unregister_hierarchy(struct request_queue *q, + enum stage_group stage) +{ + struct blk_io_hierarchy_stats *stats = + queue_to_wrapper(q)->io_hierarchy_stats; + struct hierarchy_stage *hstage; + + if (!blk_mq_hierarchy_registered(q, stage)) + return; + + mutex_lock(&q->debugfs_mutex); + + blk_mq_debugfs_unregister_hierarchy(q, stage); + blk_io_hierarchy_iodump_exit(q, stage); + + spin_lock(&stats->hstage_lock); + hstage = stats->hstage[stage]; + stats->hstage[stage] = NULL; + __put_hstats_data(stats, hstage->hstats_data); + spin_unlock(&stats->hstage_lock); + + kfree(hstage); + + mutex_unlock(&q->debugfs_mutex); +} +EXPORT_SYMBOL_GPL(blk_mq_unregister_hierarchy); + +static enum stat_group bio_hierarchy_op(struct bio *bio) +{ + if (op_is_discard(bio->bi_opf)) + return STAT_DISCARD; + + if (op_is_flush(bio->bi_opf) && + !(bio_sectors(bio) || (bio->bi_opf & REQ_HAS_DATA))) + return STAT_FLUSH; + + if (op_is_write(bio->bi_opf)) + return STAT_WRITE; + + return STAT_READ; +} + + +void bio_hierarchy_start_io_acct(struct bio *bio, enum stage_group stage) +{ + struct request_queue *q = bio->bi_disk->queue; + struct hierarchy_stage *hstage; + + if (!blk_mq_hierarchy_registered(q, stage)) + return; + + hstage = queue_to_wrapper(q)->io_hierarchy_stats->hstage[stage]; + bio_alloc_hierarchy_data(bio); + io_hierarchy_inc(hstage->hstats_data, dispatched, + bio_hierarchy_op(bio)); + bio->hdata->time = blk_time_get_ns(); + hierarchy_add_bio(hstage, bio); +} + +void __bio_hierarchy_end_io_acct(struct bio *bio, enum stage_group stage, + u64 time) +{ + struct request_queue *q = bio->bi_disk->queue; + struct hierarchy_stage *hstage; + u64 duration; + enum stat_group op; + + if (!blk_mq_hierarchy_registered(q, stage)) + return; + + op = bio_hierarchy_op(bio); + duration = time - bio->hdata->time; + hstage = queue_to_wrapper(q)->io_hierarchy_stats->hstage[stage]; + + hierarchy_remove_bio(hstage, bio); + io_hierarchy_inc(hstage->hstats_data, completed, op); + io_hierarchy_add(hstage->hstats_data, nsecs, op, duration); + hierarchy_account_slow_io_ns(hstage, op, duration); +} + +static enum stat_group rq_hierarchy_op(struct request *rq) +{ + if (op_is_discard(rq->cmd_flags)) + return STAT_DISCARD; + + if (is_flush_rq(rq)) + return STAT_FLUSH; + + if (op_is_write(rq->cmd_flags)) + return STAT_WRITE; + + return STAT_READ; +} + +static void rq_hierarchy_warn_unbalanced(struct request *rq, + struct hierarchy_stage *hstage, + enum stage_group old_stage, + enum stage_group new_stage) +{ + if (hstage->unbalanced_warned) + return; + + pr_warn("blk-io-hierarchy: disk %s stage %d(%s) -> %d(%s) unbalanced accounting.", + kobject_name(rq->q->kobj.parent), + old_stage, hierarchy_stage_name(old_stage), + new_stage, hierarchy_stage_name(new_stage)); + hstage->unbalanced_warned = true; +} + +void blk_rq_hierarchy_stats_complete(struct request *rq) +{ + struct hierarchy_stage *hstage; + enum stage_group stage; + + stage = request_to_wrapper(rq)->stage; + if (stage == NR_RQ_STAGE_GROUPS) + return; + + if (!blk_mq_hierarchy_registered(rq->q, stage)) + return; + + hstage = queue_to_wrapper(rq->q)->io_hierarchy_stats->hstage[stage]; + rq_hierarchy_warn_unbalanced(rq, hstage, stage, NR_RQ_STAGE_GROUPS); + __rq_hierarchy_end_io_acct(rq, hstage); +} + +void __rq_hierarchy_start_io_acct(struct request *rq, + struct hierarchy_stage *hstage) +{ + struct request_wrapper *rq_wrapper = request_to_wrapper(rq); + + blk_rq_hierarchy_stats_complete(rq); + io_hierarchy_inc(hstage->hstats_data, dispatched, rq_hierarchy_op(rq)); + WRITE_ONCE(rq_wrapper->hierarchy_time, jiffies); + + /* + * Paired with barrier in hierarchy_show_rq_fn(), make sure + * hierarchy_time is set before stage. + */ + smp_store_release(&rq_wrapper->stage, hstage->stage); +} +EXPORT_SYMBOL_GPL(__rq_hierarchy_start_io_acct); + +void __rq_hierarchy_end_io_acct(struct request *rq, + struct hierarchy_stage *hstage) +{ + enum stat_group op; + unsigned long duration; + struct request_wrapper *rq_wrapper; + + rq_wrapper = request_to_wrapper(rq); + if (rq_wrapper->stage != hstage->stage) { + rq_hierarchy_warn_unbalanced(rq, hstage, rq_wrapper->stage, + hstage->stage); + return; + } + + op = rq_hierarchy_op(rq); + duration = jiffies - rq_wrapper->hierarchy_time; + + io_hierarchy_inc(hstage->hstats_data, completed, op); + io_hierarchy_add(hstage->hstats_data, jiffies, op, duration); + hierarchy_account_slow_io_jiffies(hstage, op, duration); + WRITE_ONCE(rq_wrapper->stage, NR_RQ_STAGE_GROUPS); +} +EXPORT_SYMBOL_GPL(__rq_hierarchy_end_io_acct); + +#ifdef CONFIG_HIERARCHY_BIO +void bio_hierarchy_start(struct bio *bio) +{ + struct request_queue_wrapper *q_wrapper; + struct gendisk *disk = bio->bi_disk; + struct hierarchy_stage *hstage; + + if (bio_flagged(bio, BIO_HIERARCHY_ACCT)) + return; + + if (!blk_mq_hierarchy_registered(disk->queue, STAGE_BIO)) + return; + + bio_set_flag(bio, BIO_HIERARCHY_ACCT); + if (bio_has_data(bio)) + bio->bi_opf |= REQ_HAS_DATA; + q_wrapper = queue_to_wrapper(disk->queue); + hstage = q_wrapper->io_hierarchy_stats->hstage[STAGE_BIO]; + io_hierarchy_inc(hstage->hstats_data, dispatched, + bio_hierarchy_op(bio)); +} + +void __bio_hierarchy_end(struct bio *bio, u64 now) +{ + struct request_queue_wrapper *q_wrapper; + struct gendisk *disk = bio->bi_disk; + struct hierarchy_stage *hstage; + enum stat_group op; + u64 duration; + + op = bio_hierarchy_op(bio); + duration = now - bio->bi_alloc_time_ns; + q_wrapper = queue_to_wrapper(disk->queue); + hstage = q_wrapper->io_hierarchy_stats->hstage[STAGE_BIO]; + + io_hierarchy_inc(hstage->hstats_data, completed, op); + io_hierarchy_add(hstage->hstats_data, nsecs, op, duration); + hierarchy_account_slow_io_ns(hstage, op, duration); + + bio_clear_flag(bio, BIO_HIERARCHY_ACCT); + bio->bi_opf &= ~REQ_HAS_DATA; +} +#endif + +static int __init hierarchy_stats_init(void) +{ + hdata_pool = mempool_create_kmalloc_pool(PRE_ALLOC_BIO_CNT, + sizeof(struct bio_hierarchy_data)); + if (!hdata_pool) + panic("Failed to create hdata_pool\n"); + + return 0; +} +module_init(hierarchy_stats_init); diff --git a/block/blk-io-hierarchy/stats.h b/block/blk-io-hierarchy/stats.h new file mode 100644 index 0000000000000000000000000000000000000000..d3c6a26dfacbd344022f5f062aaf64562a845f75 --- /dev/null +++ b/block/blk-io-hierarchy/stats.h @@ -0,0 +1,366 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2024. Huawei Technologies Co., Ltd. All rights reserved. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef BLK_IO_HIERARCHY_STATS_H +#define BLK_IO_HIERARCHY_STATS_H + +#ifdef CONFIG_BLK_IO_HIERARCHY_STATS + +#include +#include +#include "../blk.h" + +struct bio_hierarchy_data { + u64 time; +#ifdef CONFIG_HIERARCHY_IO_DUMP + struct bio *bio; + struct list_head hierarchy_list; +#endif +}; + +struct hierarchy_stats { + union { + /* for bio based stages. */ + u64 nsecs[NR_NEW_STAT_GROUPS]; + /* for request based stages. */ + unsigned long jiffies[NR_NEW_STAT_GROUPS]; + }; + unsigned long dispatched[NR_NEW_STAT_GROUPS]; + unsigned long completed[NR_NEW_STAT_GROUPS]; +#ifdef CONFIG_HIERARCHY_IO_DUMP + unsigned long slow[NR_NEW_STAT_GROUPS]; +#endif +}; + +struct hierarchy_stats_data { + int ref; + struct hierarchy_stats __percpu *hstats; +}; + +struct hierarchy_stage { + enum stage_group stage; + bool unbalanced_warned; + struct dentry *debugfs_dir; + struct hierarchy_stats_data *hstats_data; +#ifdef CONFIG_HIERARCHY_IO_DUMP + unsigned long threshold; + void *dump_data; +#endif +}; + +struct blk_io_hierarchy_stats { + struct request_queue *q; + struct dentry *debugfs_dir; + spinlock_t hstage_lock; + struct hierarchy_stage *hstage[NR_STAGE_GROUPS]; +}; + +static inline bool stage_is_bio(enum stage_group stage) +{ + return stage >= 0 && stage < NR_BIO_STAGE_GROUPS; +} + +static inline bool stage_is_rq(enum stage_group stage) +{ + return stage >= NR_BIO_STAGE_GROUPS && stage < NR_RQ_STAGE_GROUPS; +} + +const char *hierarchy_stage_name(enum stage_group stage); +int blk_io_hierarchy_stats_alloc(struct request_queue *q); +void blk_io_hierarchy_stats_free(struct request_queue *q); + +/* APIs for stage registration */ +bool blk_mq_hierarchy_registered(struct request_queue *q, + enum stage_group stage); +void blk_mq_register_hierarchy(struct request_queue *q, enum stage_group stage); +void blk_mq_unregister_hierarchy(struct request_queue *q, + enum stage_group stage); + +/* APIs for disk level debugfs */ +void blk_mq_debugfs_register_hierarchy_stats(struct request_queue *q); +void blk_mq_debugfs_create_default_hierarchy_attr(struct request_queue *q); + +/* APIs for stage level debugfs */ +void blk_mq_debugfs_register_hierarchy(struct request_queue *q, + enum stage_group stage); +void blk_mq_debugfs_unregister_hierarchy(struct request_queue *q, + enum stage_group stage); +struct hierarchy_stats_data *get_hstats_data( + struct blk_io_hierarchy_stats *stats, + enum stage_group stage); +void put_hstats_data(struct blk_io_hierarchy_stats *stats, + struct hierarchy_stats_data *hstats_data); + +/* APIs for bio based stage io accounting */ +void bio_hierarchy_start_io_acct(struct bio *bio, enum stage_group stage); +void __bio_hierarchy_end_io_acct(struct bio *bio, enum stage_group stage, + u64 time); +void bio_free_hierarchy_data(struct bio *bio); + +static inline void bio_hierarchy_end_io_acct(struct bio *bio, + enum stage_group stage) +{ + __bio_hierarchy_end_io_acct(bio, stage, blk_time_get_ns()); +} + +static inline void bio_list_hierarchy_end_io_acct(struct bio_list *list, + enum stage_group stage) +{ + u64 time = blk_time_get_ns(); + struct bio *bio; + + bio_list_for_each(bio, list) + __bio_hierarchy_end_io_acct(bio, stage, time); +} + +/* APIs for request based stage io accounting */ +void blk_rq_hierarchy_stats_complete(struct request *rq); +void __rq_hierarchy_start_io_acct(struct request *rq, + struct hierarchy_stage *hstage); +void __rq_hierarchy_end_io_acct(struct request *rq, + struct hierarchy_stage *hstage); + +static inline void rq_hierarchy_start_io_acct(struct request *rq, + enum stage_group stage) +{ + if (!blk_mq_hierarchy_registered(rq->q, stage)) + return; + + __rq_hierarchy_start_io_acct(rq, + queue_to_wrapper(rq->q)->io_hierarchy_stats->hstage[stage]); +} + +static inline void rq_hierarchy_end_io_acct(struct request *rq, + enum stage_group stage) +{ + if (!blk_mq_hierarchy_registered(rq->q, stage)) + return; + + __rq_hierarchy_end_io_acct(rq, + queue_to_wrapper(rq->q)->io_hierarchy_stats->hstage[stage]); +} + +static inline void rq_list_hierarchy_start_io_acct(struct list_head *head, + enum stage_group stage) +{ + struct request *rq; + struct hierarchy_stage *hstage; + + if (list_empty(head)) + return; + + rq = list_first_entry(head, struct request, queuelist); + if (!blk_mq_hierarchy_registered(rq->q, stage)) + return; + + hstage = queue_to_wrapper(rq->q)->io_hierarchy_stats->hstage[stage]; + list_for_each_entry(rq, head, queuelist) + __rq_hierarchy_start_io_acct(rq, hstage); +} + +static inline void rq_list_hierarchy_end_io_acct(struct list_head *head, + enum stage_group stage) +{ + struct request *rq; + struct hierarchy_stage *hstage; + + if (list_empty(head)) + return; + + rq = list_first_entry(head, struct request, queuelist); + if (!blk_mq_hierarchy_registered(rq->q, stage)) + return; + + hstage = queue_to_wrapper(rq->q)->io_hierarchy_stats->hstage[stage]; + list_for_each_entry(rq, head, queuelist) + __rq_hierarchy_end_io_acct(rq, hstage); +} + +static inline void blk_rq_hierarchy_stats_init(struct request *rq) +{ + request_to_wrapper(rq)->stage = NR_RQ_STAGE_GROUPS; + request_to_wrapper(rq)->flush_done = false; +} + +static inline void blk_rq_hierarchy_set_flush_done(struct request *rq) +{ + request_to_wrapper(rq)->flush_done = true; +} + +static inline bool blk_rq_hierarchy_is_flush_done(struct request *rq) +{ + return request_to_wrapper(rq)->flush_done; +} + +#ifdef CONFIG_HIERARCHY_BIO +void bio_hierarchy_start(struct bio *bio); +void __bio_hierarchy_end(struct bio *bio, u64 now); + +static inline void bio_hierarchy_end(struct bio *bio) +{ + if (!bio_flagged(bio, BIO_HIERARCHY_ACCT)) + return; + + if (!blk_mq_hierarchy_registered(bio->bi_disk->queue, STAGE_BIO)) + return; + + __bio_hierarchy_end(bio, blk_time_get_ns()); +} + +static inline void req_bio_hierarchy_end(struct request *rq, struct bio *bio) +{ + u64 now; + + if (!bio_flagged(bio, BIO_HIERARCHY_ACCT)) + return; + + if (!blk_mq_hierarchy_registered(bio->bi_disk->queue, STAGE_BIO)) + return; + + now = request_to_wrapper(rq)->io_end_time_ns; + if (!now) { + now = blk_time_get_ns(); + request_to_wrapper(rq)->io_end_time_ns = now; + } + + __bio_hierarchy_end(bio, now); +} +#endif + +#else /* CONFIG_BLK_IO_HIERARCHY_STATS */ + +static inline int +blk_io_hierarchy_stats_alloc(struct request_queue *q) +{ + return 0; +} + +static inline void +blk_io_hierarchy_stats_free(struct request_queue *q) +{ +} + +static inline bool +blk_mq_hierarchy_registered(struct request_queue *q, enum stage_group stage) +{ + return false; +} + +static inline void +blk_mq_register_hierarchy(struct request_queue *q, enum stage_group stage) +{ +} + +static inline void +blk_mq_unregister_hierarchy(struct request_queue *q, enum stage_group stage) +{ +} + +static inline void +blk_mq_debugfs_register_hierarchy_stats(struct request_queue *q) +{ +} + +static inline void +blk_mq_debugfs_register_hierarchy(struct request_queue *q, + enum stage_group stage) +{ +} + +static inline void +blk_mq_debugfs_unregister_hierarchy(struct request_queue *q, + enum stage_group stage) +{ +} + +static inline void +bio_hierarchy_start_io_acct(struct bio *bio, enum stage_group stage) +{ +} + +static inline void +bio_hierarchy_end_io_acct(struct bio *bio, enum stage_group stage) +{ +} + +static inline void +bio_list_hierarchy_end_io_acct(struct bio_list *list, enum stage_group stage) +{ +} + +static inline void +bio_free_hierarchy_data(struct bio *bio) +{ +} + +static inline void +blk_rq_hierarchy_set_flush_done(struct request *rq) +{ +} + +static inline bool +blk_rq_hierarchy_is_flush_done(struct request *rq) +{ + return false; +} + +static inline void +blk_rq_hierarchy_stats_complete(struct request *rq) +{ +} + +static inline void +rq_hierarchy_start_io_acct(struct request *rq, enum stage_group stage) +{ +} + +static inline void +rq_hierarchy_end_io_acct(struct request *rq, enum stage_group stage) +{ +} + +static inline void +rq_list_hierarchy_start_io_acct(struct list_head *head, enum stage_group stage) +{ +} + +static inline void +rq_list_hierarchy_end_io_acct(struct list_head *head, enum stage_group stage) +{ +} + +static inline void +blk_rq_hierarchy_stats_init(struct request *rq) +{ +} + +#endif /* CONFIG_BLK_IO_HIERARCHY_STATS */ + +#if !defined(CONFIG_BLK_IO_HIERARCHY_STATS) || !defined(CONFIG_HIERARCHY_BIO) +static inline void +bio_hierarchy_start(struct bio *bio) +{ +} + +static inline void +bio_hierarchy_end(struct bio *bio) +{ +} + +static inline void +req_bio_hierarchy_end(struct request *rq, struct bio *bio) +{ +} +#endif + +#endif /* BLK_IO_HIERARCHY_STATS_H */ diff --git a/block/blk-ioc.c b/block/blk-ioc.c index 01580f88fcb39fd68f04253fb216d859dc2577e5..281b7a93e340ae75321d540b70560d5ae9c5c545 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c @@ -87,6 +87,7 @@ static void ioc_destroy_icq(struct io_cq *icq) * making it impossible to determine icq_cache. Record it in @icq. */ icq->__rcu_icq_cache = et->icq_cache; + icq->flags |= ICQ_DESTROYED; call_rcu(&icq->__rcu_head, icq_free_icq_rcu); } @@ -230,15 +231,21 @@ static void __ioc_clear_queue(struct list_head *icq_list) { unsigned long flags; + rcu_read_lock(); while (!list_empty(icq_list)) { struct io_cq *icq = list_entry(icq_list->next, struct io_cq, q_node); struct io_context *ioc = icq->ioc; spin_lock_irqsave(&ioc->lock, flags); + if (icq->flags & ICQ_DESTROYED) { + spin_unlock_irqrestore(&ioc->lock, flags); + continue; + } ioc_destroy_icq(icq); spin_unlock_irqrestore(&ioc->lock, flags); } + rcu_read_unlock(); } /** @@ -254,13 +261,8 @@ void ioc_clear_queue(struct request_queue *q) spin_lock_irq(q->queue_lock); list_splice_init(&q->icq_list, &icq_list); - if (q->mq_ops) { - spin_unlock_irq(q->queue_lock); - __ioc_clear_queue(&icq_list); - } else { - __ioc_clear_queue(&icq_list); - spin_unlock_irq(q->queue_lock); - } + __ioc_clear_queue(&icq_list); + spin_unlock_irq(q->queue_lock); } int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node) diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c index 19923f8a029ddf199e01a7571914e8217f70b308..6f81794eb6e6d6cb0790ba7e13b1f201e13d7284 100644 --- a/block/blk-iolatency.c +++ b/block/blk-iolatency.c @@ -72,8 +72,10 @@ #include #include #include +#include #include "blk-rq-qos.h" #include "blk-stat.h" +#include "blk.h" #define DEFAULT_SCALE_COOKIE 1000000U @@ -555,12 +557,13 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio) struct rq_wait *rqw; struct iolatency_grp *iolat; u64 window_start; - u64 now = ktime_to_ns(ktime_get()); + u64 now = blk_time_get_ns(); bool issue_as_root = bio_issue_as_root_blkg(bio); bool enabled = false; + int inflight = 0; blkg = bio->bi_blkg; - if (!blkg) + if (!blkg || !bio_flagged(bio, BIO_TRACKED)) return; iolat = blkg_to_lat(bio->bi_blkg); @@ -568,6 +571,9 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio) return; enabled = blk_iolatency_enabled(iolat->blkiolat); + if (!enabled) + return; + while (blkg && blkg->parent) { iolat = blkg_to_lat(blkg); if (!iolat) { @@ -576,41 +582,24 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio) } rqw = &iolat->rq_wait; - atomic_dec(&rqw->inflight); - if (!enabled || iolat->min_lat_nsec == 0) - goto next; - iolatency_record_time(iolat, &bio->bi_issue, now, - issue_as_root); - window_start = atomic64_read(&iolat->window_start); - if (now > window_start && - (now - window_start) >= iolat->cur_win_nsec) { - if (atomic64_cmpxchg(&iolat->window_start, - window_start, now) == window_start) - iolatency_check_latencies(iolat, now); + inflight = atomic_dec_return(&rqw->inflight); + WARN_ON_ONCE(inflight < 0); + /* + * If bi_status is BLK_STS_AGAIN, the bio wasn't actually + * submitted, so do not account for it. + */ + if (iolat->min_lat_nsec && bio->bi_status != BLK_STS_AGAIN) { + iolatency_record_time(iolat, &bio->bi_issue, now, + issue_as_root); + window_start = atomic64_read(&iolat->window_start); + if (now > window_start && + (now - window_start) >= iolat->cur_win_nsec) { + if (atomic64_cmpxchg(&iolat->window_start, + window_start, now) == window_start) + iolatency_check_latencies(iolat, now); + } } -next: - wake_up(&rqw->wait); - blkg = blkg->parent; - } -} - -static void blkcg_iolatency_cleanup(struct rq_qos *rqos, struct bio *bio) -{ - struct blkcg_gq *blkg; - - blkg = bio->bi_blkg; - while (blkg && blkg->parent) { - struct rq_wait *rqw; - struct iolatency_grp *iolat; - - iolat = blkg_to_lat(blkg); - if (!iolat) - goto next; - - rqw = &iolat->rq_wait; - atomic_dec(&rqw->inflight); wake_up(&rqw->wait); -next: blkg = blkg->parent; } } @@ -626,7 +615,6 @@ static void blkcg_iolatency_exit(struct rq_qos *rqos) static struct rq_qos_ops blkcg_iolatency_ops = { .throttle = blkcg_iolatency_throttle, - .cleanup = blkcg_iolatency_cleanup, .done_bio = blkcg_iolatency_done_bio, .exit = blkcg_iolatency_exit, }; @@ -636,7 +624,7 @@ static void blkiolatency_timer_fn(struct timer_list *t) struct blk_iolatency *blkiolat = from_timer(blkiolat, t, timer); struct blkcg_gq *blkg; struct cgroup_subsys_state *pos_css; - u64 now = ktime_to_ns(ktime_get()); + u64 now = blk_time_get_ns(); rcu_read_lock(); blkg_for_each_descendant_pre(blkg, pos_css, @@ -721,10 +709,13 @@ int blk_iolatency_init(struct request_queue *q) return 0; } -static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val) +/* + * return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise + * return 0. + */ +static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val) { struct iolatency_grp *iolat = blkg_to_lat(blkg); - struct blk_iolatency *blkiolat = iolat->blkiolat; u64 oldval = iolat->min_lat_nsec; iolat->min_lat_nsec = val; @@ -733,9 +724,12 @@ static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val) BLKIOLATENCY_MAX_WIN_SIZE); if (!oldval && val) - atomic_inc(&blkiolat->enabled); - if (oldval && !val) - atomic_dec(&blkiolat->enabled); + return 1; + if (oldval && !val) { + blkcg_clear_delay(blkg); + return -1; + } + return 0; } static void iolatency_clear_scaling(struct blkcg_gq *blkg) @@ -768,6 +762,7 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf, u64 lat_val = 0; u64 oldval; int ret; + int enable = 0; ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx); if (ret) @@ -803,7 +798,16 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf, blkg = ctx.blkg; oldval = iolat->min_lat_nsec; - iolatency_set_min_lat_nsec(blkg, lat_val); + enable = iolatency_set_min_lat_nsec(blkg, lat_val); + if (enable) { + if (!blk_get_queue(blkg->q)) { + ret = -ENODEV; + goto out; + } + + blkg_get(blkg); + } + if (oldval != iolat->min_lat_nsec) { iolatency_clear_scaling(blkg); } @@ -811,6 +815,24 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf, ret = 0; out: blkg_conf_finish(&ctx); + if (ret == 0 && enable) { + struct iolatency_grp *tmp = blkg_to_lat(blkg); + struct blk_iolatency *blkiolat = tmp->blkiolat; + + blk_mq_freeze_queue(blkg->q); + + if (enable == 1) + atomic_inc(&blkiolat->enabled); + else if (enable == -1) + atomic_dec(&blkiolat->enabled); + else + WARN_ON_ONCE(1); + + blk_mq_unfreeze_queue(blkg->q); + + blkg_put(blkg); + blk_put_queue(blkg->q); + } return ret ?: nbytes; } @@ -873,7 +895,7 @@ static void iolatency_pd_init(struct blkg_policy_data *pd) struct blkcg_gq *blkg = lat_to_blkg(iolat); struct rq_qos *rqos = blkcg_rq_qos(blkg->q); struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos); - u64 now = ktime_to_ns(ktime_get()); + u64 now = blk_time_get_ns(); int cpu; for_each_possible_cpu(cpu) { @@ -910,8 +932,14 @@ static void iolatency_pd_offline(struct blkg_policy_data *pd) { struct iolatency_grp *iolat = pd_to_lat(pd); struct blkcg_gq *blkg = lat_to_blkg(iolat); + struct blk_iolatency *blkiolat = iolat->blkiolat; + int ret; - iolatency_set_min_lat_nsec(blkg, 0); + ret = iolatency_set_min_lat_nsec(blkg, 0); + if (ret == 1) + atomic_inc(&blkiolat->enabled); + if (ret == -1) + atomic_dec(&blkiolat->enabled); iolatency_clear_scaling(blkg); } diff --git a/block/blk-lib.c b/block/blk-lib.c index bbd44666f2b516c758a0334a2e7b45ce3a291c84..71a551a497ea1c609f880d332ccca242c8c32376 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c @@ -48,20 +48,27 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, op = REQ_OP_DISCARD; } + /* In case the discard granularity isn't set by buggy device driver */ + if (WARN_ON_ONCE(!q->limits.discard_granularity)) { + char dev_name[BDEVNAME_SIZE]; + + bdevname(bdev, dev_name); + pr_err_ratelimited("%s: Error: discard_granularity is 0.\n", dev_name); + return -EOPNOTSUPP; + } + bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; if ((sector | nr_sects) & bs_mask) return -EINVAL; - while (nr_sects) { - unsigned int req_sects = nr_sects; - sector_t end_sect; + if (!nr_sects) + return -EINVAL; - if (!req_sects) - goto fail; - if (req_sects > UINT_MAX >> 9) - req_sects = UINT_MAX >> 9; + while (nr_sects) { + sector_t req_sects = min_t(sector_t, nr_sects, + bio_allowed_max_sectors(q)); - end_sect = sector + req_sects; + WARN_ON_ONCE((req_sects << 9) > UINT_MAX); bio = next_bio(bio, 0, gfp_mask); bio->bi_iter.bi_sector = sector; @@ -69,8 +76,8 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, bio_set_op_attrs(bio, op, 0); bio->bi_iter.bi_size = req_sects << 9; + sector += req_sects; nr_sects -= req_sects; - sector = end_sect; /* * We can loop for a long time in here, if someone does @@ -83,14 +90,6 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, *biop = bio; return 0; - -fail: - if (bio) { - submit_bio_wait(bio); - bio_put(bio); - } - *biop = NULL; - return -EOPNOTSUPP; } EXPORT_SYMBOL(__blkdev_issue_discard); @@ -162,7 +161,7 @@ static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector, return -EOPNOTSUPP; /* Ensure that max_write_same_sectors doesn't overflow bi_size */ - max_write_same_sectors = UINT_MAX >> 9; + max_write_same_sectors = bio_allowed_max_sectors(q); while (nr_sects) { bio = next_bio(bio, 1, gfp_mask); diff --git a/block/blk-map.c b/block/blk-map.c index db9373bd31aca0e9393dd77ec9ef5f404d88923e..9d8627acc2f59c51f5b752a0a93c0e0b6973a555 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -145,7 +145,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, return 0; unmap_rq: - __blk_rq_unmap_user(bio); + blk_rq_unmap_user(bio); fail: rq->bio = NULL; return ret; diff --git a/block/blk-merge.c b/block/blk-merge.c index aaec38cc37b86489cdfed9ff8f4202f72516ede0..9f9d803e064b05b81abdc8052e2253dd65e973a5 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -27,7 +27,8 @@ static struct bio *blk_bio_discard_split(struct request_queue *q, /* Zero-sector (unknown) and one-sector granularities are the same. */ granularity = max(q->limits.discard_granularity >> 9, 1U); - max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); + max_discard_sectors = min(q->limits.max_discard_sectors, + bio_allowed_max_sectors(q)); max_discard_sectors -= max_discard_sectors % granularity; if (unlikely(!max_discard_sectors)) { @@ -234,7 +235,8 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, { struct bio_vec bv, bvprv = { NULL }; int cluster, prev = 0; - unsigned int seg_size, nr_phys_segs; + unsigned int nr_phys_segs = 0; + unsigned int seg_size; struct bio *fbio, *bbio; struct bvec_iter iter; @@ -244,6 +246,12 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, switch (bio_op(bio)) { case REQ_OP_DISCARD: case REQ_OP_SECURE_ERASE: + if (queue_max_discard_segments(q) > 1) { + for_each_bio(bio) + nr_phys_segs++; + return nr_phys_segs; + } + return 1; case REQ_OP_WRITE_ZEROES: return 0; case REQ_OP_WRITE_SAME: @@ -253,7 +261,6 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, fbio = bio; cluster = blk_queue_cluster(q); seg_size = 0; - nr_phys_segs = 0; for_each_bio(bio) { bio_for_each_segment(bv, bio, iter) { /* @@ -308,13 +315,7 @@ void blk_recalc_rq_segments(struct request *rq) void blk_recount_segments(struct request_queue *q, struct bio *bio) { - unsigned short seg_cnt; - - /* estimate segment number by bi_vcnt for non-cloned bio */ - if (bio_flagged(bio, BIO_CLONED)) - seg_cnt = bio_segments(bio); - else - seg_cnt = bio->bi_vcnt; + unsigned short seg_cnt = bio_segments(bio); if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) && (seg_cnt < queue_max_segments(q))) @@ -482,13 +483,20 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, } EXPORT_SYMBOL(blk_rq_map_sg); +static inline unsigned int blk_rq_get_max_segments(struct request *rq) +{ + if (req_op(rq) == REQ_OP_DISCARD) + return queue_max_discard_segments(rq->q); + return queue_max_segments(rq->q); +} + static inline int ll_new_hw_segment(struct request_queue *q, struct request *req, struct bio *bio) { int nr_phys_segs = bio_phys_segments(q, bio); - if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) + if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req)) goto no_merge; if (blk_integrity_merge_bio(q, req, bio) == false) @@ -611,7 +619,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, total_phys_segments--; } - if (total_phys_segments > queue_max_segments(q)) + if (total_phys_segments > blk_rq_get_max_segments(req)) return 0; if (blk_integrity_merge_rq(q, req, next) == false) @@ -661,13 +669,39 @@ static void blk_account_io_merge(struct request *req) cpu = part_stat_lock(); part = req->part; - part_round_stats(req->q, cpu, part); + if (precise_iostat) + part_round_stats(req->q, cpu, part); part_dec_in_flight(req->q, part, rq_data_dir(req)); hd_struct_put(part); part_stat_unlock(); } } +/* + * Two cases of handling DISCARD merge: + * If max_discard_segments > 1, the driver takes every bio + * as a range and send them to controller together. The ranges + * needn't to be contiguous. + * Otherwise, the bios/requests will be handled as same as + * others which should be contiguous. + */ +static inline bool blk_discard_mergable(struct request *req) +{ + if (req_op(req) == REQ_OP_DISCARD && + queue_max_discard_segments(req->q) > 1) + return true; + return false; +} + +enum elv_merge blk_try_req_merge(struct request *req, struct request *next) +{ + if (blk_discard_mergable(req)) + return ELEVATOR_DISCARD_MERGE; + else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next)) + return ELEVATOR_BACK_MERGE; + + return ELEVATOR_NO_MERGE; +} /* * For non-mq, this has to be called with the request spinlock acquired. @@ -685,12 +719,6 @@ static struct request *attempt_merge(struct request_queue *q, if (req_op(req) != req_op(next)) return NULL; - /* - * not contiguous - */ - if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) - return NULL; - if (rq_data_dir(req) != rq_data_dir(next) || req->rq_disk != next->rq_disk || req_no_special_merge(next)) @@ -707,6 +735,9 @@ static struct request *attempt_merge(struct request_queue *q, if (req->write_hint != next->write_hint) return NULL; + if (req->ioprio != next->ioprio) + return NULL; + /* * If we are allowed to merge, then append bio list * from next to rq and release next. merge_requests_fn @@ -714,11 +745,19 @@ static struct request *attempt_merge(struct request_queue *q, * counts here. Handle DISCARDs separately, as they * have separate settings. */ - if (req_op(req) == REQ_OP_DISCARD) { + + switch (blk_try_req_merge(req, next)) { + case ELEVATOR_DISCARD_MERGE: if (!req_attempt_discard_merge(q, req, next)) return NULL; - } else if (!ll_merge_requests_fn(q, req, next)) + break; + case ELEVATOR_BACK_MERGE: + if (!ll_merge_requests_fn(q, req, next)) + return NULL; + break; + default: return NULL; + } /* * If failfast settings disagree or any of the two is already @@ -745,8 +784,9 @@ static struct request *attempt_merge(struct request_queue *q, req->biotail = next->biotail; req->__data_len += blk_rq_bytes(next); + blk_rq_update_bi_alloc_time(req, NULL, next); - if (req_op(req) != REQ_OP_DISCARD) + if (!blk_discard_mergable(req)) elv_merge_requests(q, req, next); /* @@ -754,7 +794,6 @@ static struct request *attempt_merge(struct request_queue *q, */ blk_account_io_merge(next); - req->ioprio = ioprio_best(req->ioprio, next->ioprio); if (blk_rq_cpu_valid(next)) req->cpu = next->cpu; @@ -786,23 +825,21 @@ struct request *attempt_front_merge(struct request_queue *q, struct request *rq) return NULL; } -int blk_attempt_req_merge(struct request_queue *q, struct request *rq, +/* + * Try to merge 'next' into 'rq'. Return true if the merge happened, false + * otherwise. The caller is responsible for freeing 'next' if the merge + * happened. + */ +bool blk_attempt_req_merge(struct request_queue *q, struct request *rq, struct request *next) { struct elevator_queue *e = q->elevator; - struct request *free; if (!e->uses_mq && e->type->ops.sq.elevator_allow_rq_merge_fn) if (!e->type->ops.sq.elevator_allow_rq_merge_fn(q, rq, next)) return 0; - free = attempt_merge(q, rq, next); - if (free) { - __blk_put_request(q, free); - return 1; - } - - return 0; + return attempt_merge(q, rq, next); } bool blk_rq_merge_ok(struct request *rq, struct bio *bio) @@ -837,13 +874,15 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) if (rq->write_hint != bio->bi_write_hint) return false; + if (rq->ioprio != bio_prio(bio)) + return false; + return true; } enum elv_merge blk_try_merge(struct request *rq, struct bio *bio) { - if (req_op(rq) == REQ_OP_DISCARD && - queue_max_discard_segments(rq->q) > 1) + if (blk_discard_mergable(rq)) return ELEVATOR_DISCARD_MERGE; else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) return ELEVATOR_BACK_MERGE; diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index cb1e6cf7ac48f4e187e915896376cdfec79c9d2a..8f99b216ca0cd9a86eb2747a0bbe1900361c2096 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -23,6 +23,7 @@ #include "blk-mq.h" #include "blk-mq-debugfs.h" #include "blk-mq-tag.h" +#include "blk-io-hierarchy/stats.h" static void print_stat(struct seq_file *m, struct blk_rq_stat *stat) { @@ -102,6 +103,14 @@ static int blk_flags_show(struct seq_file *m, const unsigned long flags, return 0; } +static int queue_pm_only_show(void *data, struct seq_file *m) +{ + struct request_queue *q = data; + + seq_printf(m, "%d\n", atomic_read(&q->pm_only)); + return 0; +} + #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name static const char *const blk_queue_flag_name[] = { QUEUE_FLAG_NAME(QUEUED), @@ -132,7 +141,6 @@ static const char *const blk_queue_flag_name[] = { QUEUE_FLAG_NAME(REGISTERED), QUEUE_FLAG_NAME(SCSI_PASSTHROUGH), QUEUE_FLAG_NAME(QUIESCED), - QUEUE_FLAG_NAME(PREEMPT_ONLY), }; #undef QUEUE_FLAG_NAME @@ -209,6 +217,7 @@ static ssize_t queue_write_hint_store(void *data, const char __user *buf, static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = { { "poll_stat", 0400, queue_poll_stat_show }, { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops }, + { "pm_only", 0600, queue_pm_only_show, NULL }, { "state", 0600, queue_state_show, queue_state_write }, { "write_hints", 0600, queue_write_hint_show, queue_write_hint_store }, { "zone_wlock", 0400, queue_zone_wlock_show, NULL }, @@ -347,9 +356,13 @@ static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state) return blk_mq_rq_state_name_array[rq_state]; } -int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq) +/* + * This helper will dump general information for @rq into @m, started with '{' + * and doesn't end with '}', caller must include a closing curly brace '}' at + * the end after adding the custom string. + */ +void debugfs_rq_show(struct seq_file *m, struct request *rq) { - const struct blk_mq_ops *const mq_ops = rq->q->mq_ops; const unsigned int op = rq->cmd_flags & REQ_OP_MASK; seq_printf(m, "%p {.op=", rq); @@ -366,6 +379,13 @@ int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq) seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq))); seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag, rq->internal_tag); +} + +int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq) +{ + const struct blk_mq_ops *const mq_ops = rq->q->mq_ops; + + debugfs_rq_show(m, rq); if (mq_ops->show_rq) mq_ops->show_rq(m, rq); seq_puts(m, "}\n"); @@ -803,35 +823,25 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = { {}, }; -static bool debugfs_create_files(struct dentry *parent, void *data, - const struct blk_mq_debugfs_attr *attr) +void debugfs_create_files(struct dentry *parent, void *data, + const struct blk_mq_debugfs_attr *attr) { + if (IS_ERR_OR_NULL(parent)) + return; + d_inode(parent)->i_private = data; - for (; attr->name; attr++) { - if (!debugfs_create_file(attr->name, attr->mode, parent, - (void *)attr, &blk_mq_debugfs_fops)) - return false; - } - return true; + for (; attr->name; attr++) + debugfs_create_file(attr->name, attr->mode, parent, + (void *)attr, &blk_mq_debugfs_fops); } -int blk_mq_debugfs_register(struct request_queue *q) +void blk_mq_debugfs_register(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; int i; - if (!blk_debugfs_root) - return -ENOENT; - - q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent), - blk_debugfs_root); - if (!q->debugfs_dir) - return -ENOMEM; - - if (!debugfs_create_files(q->debugfs_dir, q, - blk_mq_debugfs_queue_attrs)) - goto err; + debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs); /* * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir @@ -843,93 +853,68 @@ int blk_mq_debugfs_register(struct request_queue *q) /* Similarly, blk_mq_init_hctx() couldn't do this previously. */ queue_for_each_hw_ctx(q, hctx, i) { - if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx)) - goto err; - if (q->elevator && !hctx->sched_debugfs_dir && - blk_mq_debugfs_register_sched_hctx(q, hctx)) - goto err; + if (!hctx->debugfs_dir) + blk_mq_debugfs_register_hctx(q, hctx); + if (q->elevator && !hctx->sched_debugfs_dir) + blk_mq_debugfs_register_sched_hctx(q, hctx); } - return 0; - -err: - blk_mq_debugfs_unregister(q); - return -ENOMEM; + blk_mq_debugfs_register_hierarchy_stats(q); } -void blk_mq_debugfs_unregister(struct request_queue *q) -{ - debugfs_remove_recursive(q->debugfs_dir); - q->sched_debugfs_dir = NULL; - q->debugfs_dir = NULL; -} - -static int blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx, - struct blk_mq_ctx *ctx) +static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx, + struct blk_mq_ctx *ctx) { struct dentry *ctx_dir; char name[20]; snprintf(name, sizeof(name), "cpu%u", ctx->cpu); ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir); - if (!ctx_dir) - return -ENOMEM; - - if (!debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs)) - return -ENOMEM; - return 0; + debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs); } -int blk_mq_debugfs_register_hctx(struct request_queue *q, - struct blk_mq_hw_ctx *hctx) +void blk_mq_debugfs_register_hctx(struct request_queue *q, + struct blk_mq_hw_ctx *hctx) { struct blk_mq_ctx *ctx; char name[20]; int i; + lockdep_assert_held(&q->debugfs_mutex); + if (!q->debugfs_dir) - return -ENOENT; + return; snprintf(name, sizeof(name), "hctx%u", hctx->queue_num); hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir); - if (!hctx->debugfs_dir) - return -ENOMEM; - - if (!debugfs_create_files(hctx->debugfs_dir, hctx, - blk_mq_debugfs_hctx_attrs)) - goto err; - hctx_for_each_ctx(hctx, ctx, i) { - if (blk_mq_debugfs_register_ctx(hctx, ctx)) - goto err; - } - - return 0; + debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs); -err: - blk_mq_debugfs_unregister_hctx(hctx); - return -ENOMEM; + hctx_for_each_ctx(hctx, ctx, i) + blk_mq_debugfs_register_ctx(hctx, ctx); } void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx) { + lockdep_assert_held(&hctx->queue->debugfs_mutex); + + if (!hctx->queue->debugfs_dir) + return; debugfs_remove_recursive(hctx->debugfs_dir); hctx->sched_debugfs_dir = NULL; hctx->debugfs_dir = NULL; } -int blk_mq_debugfs_register_hctxs(struct request_queue *q) +void blk_mq_debugfs_register_hctxs(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; int i; - queue_for_each_hw_ctx(q, hctx, i) { - if (blk_mq_debugfs_register_hctx(q, hctx)) - return -ENOMEM; - } - - return 0; + mutex_lock(&q->debugfs_mutex); + queue_for_each_hw_ctx(q, hctx, i) + blk_mq_debugfs_register_hctx(q, hctx); + mutex_unlock(&q->debugfs_mutex); } void blk_mq_debugfs_unregister_hctxs(struct request_queue *q) @@ -937,66 +922,71 @@ void blk_mq_debugfs_unregister_hctxs(struct request_queue *q) struct blk_mq_hw_ctx *hctx; int i; + mutex_lock(&q->debugfs_mutex); queue_for_each_hw_ctx(q, hctx, i) blk_mq_debugfs_unregister_hctx(hctx); + mutex_unlock(&q->debugfs_mutex); } -int blk_mq_debugfs_register_sched(struct request_queue *q) +void blk_mq_debugfs_register_sched(struct request_queue *q) { struct elevator_type *e = q->elevator->type; + lockdep_assert_held(&q->debugfs_mutex); + + /* + * If the parent directory has not been created yet, return, we will be + * called again later on and the directory/files will be created then. + */ if (!q->debugfs_dir) - return -ENOENT; + return; if (!e->queue_debugfs_attrs) - return 0; + return; q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir); - if (!q->sched_debugfs_dir) - return -ENOMEM; - if (!debugfs_create_files(q->sched_debugfs_dir, q, - e->queue_debugfs_attrs)) - goto err; - - return 0; - -err: - blk_mq_debugfs_unregister_sched(q); - return -ENOMEM; + debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs); } void blk_mq_debugfs_unregister_sched(struct request_queue *q) { + lockdep_assert_held(&q->debugfs_mutex); + debugfs_remove_recursive(q->sched_debugfs_dir); q->sched_debugfs_dir = NULL; } -int blk_mq_debugfs_register_sched_hctx(struct request_queue *q, - struct blk_mq_hw_ctx *hctx) +void blk_mq_debugfs_register_sched_hctx(struct request_queue *q, + struct blk_mq_hw_ctx *hctx) { struct elevator_type *e = q->elevator->type; + lockdep_assert_held(&q->debugfs_mutex); + + /* + * If the parent debugfs directory has not been created yet, return; + * We will be called again later on with appropriate parent debugfs + * directory from blk_register_queue() + */ if (!hctx->debugfs_dir) - return -ENOENT; + return; if (!e->hctx_debugfs_attrs) - return 0; + return; hctx->sched_debugfs_dir = debugfs_create_dir("sched", hctx->debugfs_dir); - if (!hctx->sched_debugfs_dir) - return -ENOMEM; - - if (!debugfs_create_files(hctx->sched_debugfs_dir, hctx, - e->hctx_debugfs_attrs)) - return -ENOMEM; - - return 0; + debugfs_create_files(hctx->sched_debugfs_dir, hctx, + e->hctx_debugfs_attrs); } void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx) { + lockdep_assert_held(&hctx->queue->debugfs_mutex); + + if (!hctx->queue->debugfs_dir) + return; debugfs_remove_recursive(hctx->sched_debugfs_dir); hctx->sched_debugfs_dir = NULL; } diff --git a/block/blk-mq-debugfs.h b/block/blk-mq-debugfs.h index a9160be12be05a527c0d968a6534e4a244f989e1..4ef149cf5cfbc78eedb1c899fe30d86c8cfa9077 100644 --- a/block/blk-mq-debugfs.h +++ b/block/blk-mq-debugfs.h @@ -15,64 +15,64 @@ struct blk_mq_debugfs_attr { const struct seq_operations *seq_ops; }; +void debugfs_rq_show(struct seq_file *m, struct request *rq); int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq); int blk_mq_debugfs_rq_show(struct seq_file *m, void *v); -int blk_mq_debugfs_register(struct request_queue *q); -void blk_mq_debugfs_unregister(struct request_queue *q); -int blk_mq_debugfs_register_hctx(struct request_queue *q, +void blk_mq_debugfs_register(struct request_queue *q); +void blk_mq_debugfs_register_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx); void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx); -int blk_mq_debugfs_register_hctxs(struct request_queue *q); +void blk_mq_debugfs_register_hctxs(struct request_queue *q); void blk_mq_debugfs_unregister_hctxs(struct request_queue *q); -int blk_mq_debugfs_register_sched(struct request_queue *q); +void blk_mq_debugfs_register_sched(struct request_queue *q); void blk_mq_debugfs_unregister_sched(struct request_queue *q); -int blk_mq_debugfs_register_sched_hctx(struct request_queue *q, +void blk_mq_debugfs_register_sched_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx); void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx); -#else -static inline int blk_mq_debugfs_register(struct request_queue *q) + +void debugfs_create_files(struct dentry *parent, void *data, + const struct blk_mq_debugfs_attr *attr); + +static inline bool blk_mq_debugfs_enabled(struct request_queue *q) { - return 0; + return !IS_ERR_OR_NULL(q->debugfs_dir); } -static inline void blk_mq_debugfs_unregister(struct request_queue *q) +#else +static inline void blk_mq_debugfs_register(struct request_queue *q) { } -static inline int blk_mq_debugfs_register_hctx(struct request_queue *q, - struct blk_mq_hw_ctx *hctx) +static inline void blk_mq_debugfs_register_hctx(struct request_queue *q, + struct blk_mq_hw_ctx *hctx) { - return 0; } static inline void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx) { } -static inline int blk_mq_debugfs_register_hctxs(struct request_queue *q) +static inline void blk_mq_debugfs_register_hctxs(struct request_queue *q) { - return 0; } static inline void blk_mq_debugfs_unregister_hctxs(struct request_queue *q) { } -static inline int blk_mq_debugfs_register_sched(struct request_queue *q) +static inline void blk_mq_debugfs_register_sched(struct request_queue *q) { - return 0; } static inline void blk_mq_debugfs_unregister_sched(struct request_queue *q) { } -static inline int blk_mq_debugfs_register_sched_hctx(struct request_queue *q, - struct blk_mq_hw_ctx *hctx) +static inline void blk_mq_debugfs_register_sched_hctx(struct request_queue *q, + struct blk_mq_hw_ctx *hctx) { - return 0; } static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx) diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 29bfe8017a2d8e6cbadeab6b9d1d63d293f656a5..443d92e8982a5ae0be5e60fbc038a83fccfe9872 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -15,6 +15,7 @@ #include "blk-mq-sched.h" #include "blk-mq-tag.h" #include "blk-wbt.h" +#include "blk-io-hierarchy/stats.h" void blk_mq_sched_free_hctx_data(struct request_queue *q, void (*exit)(struct blk_mq_hw_ctx *)) @@ -54,13 +55,14 @@ void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio) * Mark a hardware queue as needing a restart. For shared queues, maintain * a count of how many hardware queues are marked for restart. */ -static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) +void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) { if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) return; set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); } +EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx); void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) { @@ -68,19 +70,35 @@ void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) return; clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); + /* + * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch) + * in blk_mq_run_hw_queue(). Its pair is the barrier in + * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART, + * meantime new request added to hctx->dispatch is missed to check in + * blk_mq_run_hw_queue(). + */ + smp_mb(); + blk_mq_run_hw_queue(hctx, true); } +#define BLK_MQ_BUDGET_DELAY 3 /* ms units */ + /* * Only SCSI implements .get_budget and .put_budget, and SCSI restarts * its queue by itself in its completion handler, so we don't need to * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE. + * + * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to + * be run again. This is necessary to avoid starving flushes. */ -static void blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) +static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) { struct request_queue *q = hctx->queue; struct elevator_queue *e = q->elevator; LIST_HEAD(rq_list); + int ret = 0; + unsigned long end = jiffies + HZ; do { struct request *rq; @@ -89,12 +107,25 @@ static void blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) !e->type->ops.mq.has_work(hctx)) break; + if (!list_empty_careful(&hctx->dispatch)) { + ret = -EAGAIN; + break; + } + if (!blk_mq_get_dispatch_budget(hctx)) break; rq = e->type->ops.mq.dispatch_request(hctx); if (!rq) { blk_mq_put_dispatch_budget(hctx); + /* + * We're releasing without dispatching. Holding the + * budget could have blocked any "hctx"s with the + * same queue and if we didn't dispatch then there's + * no guarantee anyone will kick the queue. Kick it + * ourselves. + */ + blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY); break; } @@ -104,7 +135,16 @@ static void blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) * in blk_mq_dispatch_rq_list(). */ list_add(&rq->queuelist, &rq_list); - } while (blk_mq_dispatch_rq_list(q, &rq_list, true)); + + if (!blk_mq_dispatch_rq_list(q, &rq_list, true)) + break; + if (need_resched() || time_is_before_jiffies(end)) { + blk_mq_delay_run_hw_queue(hctx, 0); + break; + } + } while (1); + + return ret; } static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx, @@ -122,16 +162,25 @@ static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx, * Only SCSI implements .get_budget and .put_budget, and SCSI restarts * its queue by itself in its completion handler, so we don't need to * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE. + * + * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to + * to be run again. This is necessary to avoid starving flushes. */ -static void blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) +static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) { struct request_queue *q = hctx->queue; LIST_HEAD(rq_list); struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from); + int ret = 0; do { struct request *rq; + if (!list_empty_careful(&hctx->dispatch)) { + ret = -EAGAIN; + break; + } + if (!sbitmap_any_bit_set(&hctx->ctx_map)) break; @@ -141,6 +190,14 @@ static void blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) rq = blk_mq_dequeue_from_ctx(hctx, ctx); if (!rq) { blk_mq_put_dispatch_budget(hctx); + /* + * We're releasing without dispatching. Holding the + * budget could have blocked any "hctx"s with the + * same queue and if we didn't dispatch then there's + * no guarantee anyone will kick the queue. Kick it + * ourselves. + */ + blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY); break; } @@ -157,21 +214,17 @@ static void blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) } while (blk_mq_dispatch_rq_list(q, &rq_list, true)); WRITE_ONCE(hctx->dispatch_from, ctx); + return ret; } -void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) +int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) { struct request_queue *q = hctx->queue; struct elevator_queue *e = q->elevator; const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request; + int ret = 0; LIST_HEAD(rq_list); - /* RCU or SRCU read lock is needed before checking quiesced flag */ - if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) - return; - - hctx->run++; - /* * If we have previous entries on our dispatch list, grab them first for * more fair dispatch. @@ -198,21 +251,45 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) */ if (!list_empty(&rq_list)) { blk_mq_sched_mark_restart_hctx(hctx); + rq_list_hierarchy_end_io_acct(&rq_list, STAGE_HCTX); if (blk_mq_dispatch_rq_list(q, &rq_list, false)) { if (has_sched_dispatch) - blk_mq_do_dispatch_sched(hctx); + ret = blk_mq_do_dispatch_sched(hctx); else - blk_mq_do_dispatch_ctx(hctx); + ret = blk_mq_do_dispatch_ctx(hctx); } } else if (has_sched_dispatch) { - blk_mq_do_dispatch_sched(hctx); + ret = blk_mq_do_dispatch_sched(hctx); } else if (hctx->dispatch_busy) { /* dequeue request one by one from sw queue if queue is busy */ - blk_mq_do_dispatch_ctx(hctx); + ret = blk_mq_do_dispatch_ctx(hctx); } else { blk_mq_flush_busy_ctxs(hctx, &rq_list); blk_mq_dispatch_rq_list(q, &rq_list, false); } + + return ret; +} + +void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) +{ + struct request_queue *q = hctx->queue; + + /* RCU or SRCU read lock is needed before checking quiesced flag */ + if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q) || + blk_queue_quiesced_internal(q))) + return; + + hctx->run++; + + /* + * A return of -EAGAIN is an indication that hctx->dispatch is not + * empty and we must run again in order to avoid starving flushes. + */ + if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) { + if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) + blk_mq_run_hw_queue(hctx, true); + } } bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, @@ -314,10 +391,8 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); bool ret = false; - if (e && e->type->ops.mq.bio_merge) { - blk_mq_put_ctx(ctx); + if (e && e->type->ops.mq.bio_merge) return e->type->ops.mq.bio_merge(hctx, bio); - } if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) && !list_empty_careful(&ctx->rq_list)) { @@ -327,13 +402,13 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) spin_unlock(&ctx->lock); } - blk_mq_put_ctx(ctx); return ret; } -bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq) +bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq, + struct list_head *free) { - return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq); + return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq, free); } EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge); @@ -347,13 +422,19 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, bool has_sched, struct request *rq) { - /* dispatch flush rq directly */ - if (rq->rq_flags & RQF_FLUSH_SEQ) { - spin_lock(&hctx->lock); - list_add(&rq->queuelist, &hctx->dispatch); - spin_unlock(&hctx->lock); + /* + * dispatch flush and passthrough rq directly + * + * passthrough request has to be added to hctx->dispatch directly. + * For some reason, device may be in one situation which can't + * handle FS request, so STS_RESOURCE is always returned and the + * FS request will be added to hctx->dispatch. However passthrough + * request may be required at that time for fixing the problem. If + * passthrough request is added to scheduler queue, there isn't any + * chance to dispatch it given we prioritize requests in hctx->dispatch. + */ + if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq)) return true; - } if (has_sched) rq->rq_flags |= RQF_SORTED; @@ -377,8 +458,32 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head, WARN_ON(e && (rq->tag != -1)); - if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) + if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) { + /* + * Firstly normal IO request is inserted to scheduler queue or + * sw queue, meantime we add flush request to dispatch queue( + * hctx->dispatch) directly and there is at most one in-flight + * flush request for each hw queue, so it doesn't matter to add + * flush request to tail or front of the dispatch queue. + * + * Secondly in case of NCQ, flush request belongs to non-NCQ + * command, and queueing it will fail when there is any + * in-flight normal IO request(NCQ command). When adding flush + * rq to the front of hctx->dispatch, it is easier to introduce + * extra time to flush rq's latency because of S_SCHED_RESTART + * compared with adding to the tail of dispatch queue, then + * chance of flush merge is increased, and less flush requests + * will be issued to controller. It is observed that ~10% time + * is saved in blktests block/004 on disk attached to AHCI/NCQ + * drive when adding flush rq to the front of hctx->dispatch. + * + * Simply queue flush rq to the front of hctx->dispatch so that + * intensive flush workloads can benefit in case of NCQ HW. + */ + at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head; + blk_mq_request_bypass_insert(rq, at_head, false); goto run; + } if (e && e->type->ops.mq.insert_requests) { LIST_HEAD(list); @@ -403,6 +508,13 @@ void blk_mq_sched_insert_requests(struct request_queue *q, struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); struct elevator_queue *e = hctx->queue->elevator; + /* + * blk_mq_sched_insert_requests() is called from flush plug + * context only, and hold one usage counter to prevent queue + * from being released. + */ + percpu_ref_get(&q->q_usage_counter); + if (e && e->type->ops.mq.insert_requests) e->type->ops.mq.insert_requests(hctx, list, false); else { @@ -414,12 +526,14 @@ void blk_mq_sched_insert_requests(struct request_queue *q, if (!hctx->dispatch_busy && !e && !run_queue_async) { blk_mq_try_issue_list_directly(hctx, list); if (list_empty(list)) - return; + goto out; } blk_mq_insert_requests(hctx, ctx, list); } blk_mq_run_hw_queue(hctx, run_queue_async); + out: + percpu_ref_put(&q->q_usage_counter); } static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set, @@ -446,8 +560,10 @@ static int blk_mq_sched_alloc_tags(struct request_queue *q, return -ENOMEM; ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests); - if (ret) - blk_mq_sched_free_tags(set, hctx, hctx_idx); + if (ret) { + blk_mq_free_rq_map(hctx->sched_tags); + hctx->sched_tags = NULL; + } return ret; } @@ -493,7 +609,9 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) if (ret) goto err; + mutex_lock(&q->debugfs_mutex); blk_mq_debugfs_register_sched(q); + mutex_unlock(&q->debugfs_mutex); queue_for_each_hw_ctx(q, hctx, i) { if (e->ops.mq.init_hctx) { @@ -505,7 +623,9 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) return ret; } } + mutex_lock(&q->debugfs_mutex); blk_mq_debugfs_register_sched_hctx(q, hctx); + mutex_unlock(&q->debugfs_mutex); } return 0; @@ -522,13 +642,20 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) unsigned int i; queue_for_each_hw_ctx(q, hctx, i) { + mutex_lock(&q->debugfs_mutex); blk_mq_debugfs_unregister_sched_hctx(hctx); + mutex_unlock(&q->debugfs_mutex); + if (e->type->ops.mq.exit_hctx && hctx->sched_data) { e->type->ops.mq.exit_hctx(hctx, i); hctx->sched_data = NULL; } } + + mutex_lock(&q->debugfs_mutex); blk_mq_debugfs_unregister_sched(q); + mutex_unlock(&q->debugfs_mutex); + if (e->type->ops.mq.exit_sched) e->type->ops.mq.exit_sched(e); blk_mq_sched_tags_teardown(q); diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index 4e028ee4243014ff8eed44627f8e0cc5068217ca..c81a26e7b6209b722965203a135ffddf6316b058 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -14,7 +14,9 @@ void blk_mq_sched_request_inserted(struct request *rq); bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, struct request **merged_request); bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio); -bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq); +bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq, + struct list_head *free); +void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx); void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx); void blk_mq_sched_insert_request(struct request *rq, bool at_head, @@ -71,7 +73,7 @@ static inline void blk_mq_sched_requeue_request(struct request *rq) struct request_queue *q = rq->q; struct elevator_queue *e = q->elevator; - if (e && e->type->ops.mq.requeue_request) + if ((rq->rq_flags & RQF_ELVPRIV) && e && e->type->ops.mq.requeue_request) e->type->ops.mq.requeue_request(rq); } diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index aafb44224c896ca24782032da5bb6193dfb9c269..cace619d742148c1675b451c0fb0828aace13d21 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -10,17 +10,35 @@ #include #include +#include "blk.h" #include "blk-mq.h" #include "blk-mq-tag.h" static void blk_mq_sysfs_release(struct kobject *kobj) { + struct blk_mq_ctxs *ctxs = container_of(kobj, struct blk_mq_ctxs, kobj); + + free_percpu(ctxs->queue_ctx); + kfree(ctxs); +} + +static void blk_mq_ctx_sysfs_release(struct kobject *kobj) +{ + struct blk_mq_ctx *ctx = container_of(kobj, struct blk_mq_ctx, kobj); + + /* ctx->ctxs won't be released until all ctx are freed */ + kobject_put(&ctx->ctxs->kobj); } static void blk_mq_hw_sysfs_release(struct kobject *kobj) { struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); + + if (hctx->flags & BLK_MQ_F_BLOCKING) + cleanup_srcu_struct(hctx->srcu); + blk_free_flush_queue(hctx->fq); + sbitmap_free(&hctx->ctx_map); free_cpumask_var(hctx->cpumask); kfree(hctx->ctxs); kfree(hctx); @@ -145,20 +163,25 @@ static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx, static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) { + const size_t size = PAGE_SIZE - 1; unsigned int i, first = 1; - ssize_t ret = 0; + int ret = 0, pos = 0; for_each_cpu(i, hctx->cpumask) { if (first) - ret += sprintf(ret + page, "%u", i); + ret = snprintf(pos + page, size - pos, "%u", i); else - ret += sprintf(ret + page, ", %u", i); + ret = snprintf(pos + page, size - pos, ", %u", i); + + if (ret >= size - pos) + break; first = 0; + pos += ret; } - ret += sprintf(ret + page, "\n"); - return ret; + ret = snprintf(pos + page, size + 1 - pos, "\n"); + return pos + ret; } static struct attribute *default_ctx_attrs[] = { @@ -203,7 +226,7 @@ static struct kobj_type blk_mq_ktype = { static struct kobj_type blk_mq_ctx_ktype = { .sysfs_ops = &blk_mq_sysfs_ops, .default_attrs = default_ctx_attrs, - .release = blk_mq_sysfs_release, + .release = blk_mq_ctx_sysfs_release, }; static struct kobj_type blk_mq_hw_ktype = { @@ -230,21 +253,28 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) { struct request_queue *q = hctx->queue; struct blk_mq_ctx *ctx; - int i, ret; + int i, j, ret; if (!hctx->nr_ctx) return 0; - ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num); + ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num); if (ret) return ret; hctx_for_each_ctx(hctx, ctx, i) { ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); if (ret) - break; + goto out; } + return 0; +out: + hctx_for_each_ctx(hctx, ctx, j) { + if (j < i) + kobject_del(&ctx->kobj); + } + kobject_del(&hctx->kobj); return ret; } @@ -253,13 +283,13 @@ void blk_mq_unregister_dev(struct device *dev, struct request_queue *q) struct blk_mq_hw_ctx *hctx; int i; - lockdep_assert_held(&q->sysfs_lock); + lockdep_assert_held(&queue_to_wrapper(q)->sysfs_dir_lock); queue_for_each_hw_ctx(q, hctx, i) blk_mq_unregister_hctx(hctx); - kobject_uevent(&q->mq_kobj, KOBJ_REMOVE); - kobject_del(&q->mq_kobj); + kobject_uevent(q->mq_kobj, KOBJ_REMOVE); + kobject_del(q->mq_kobj); kobject_put(&dev->kobj); q->mq_sysfs_init_done = false; @@ -279,7 +309,7 @@ void blk_mq_sysfs_deinit(struct request_queue *q) ctx = per_cpu_ptr(q->queue_ctx, cpu); kobject_put(&ctx->kobj); } - kobject_put(&q->mq_kobj); + kobject_put(q->mq_kobj); } void blk_mq_sysfs_init(struct request_queue *q) @@ -287,10 +317,12 @@ void blk_mq_sysfs_init(struct request_queue *q) struct blk_mq_ctx *ctx; int cpu; - kobject_init(&q->mq_kobj, &blk_mq_ktype); + kobject_init(q->mq_kobj, &blk_mq_ktype); for_each_possible_cpu(cpu) { ctx = per_cpu_ptr(q->queue_ctx, cpu); + + kobject_get(q->mq_kobj); kobject_init(&ctx->kobj, &blk_mq_ctx_ktype); } } @@ -301,13 +333,13 @@ int __blk_mq_register_dev(struct device *dev, struct request_queue *q) int ret, i; WARN_ON_ONCE(!q->kobj.parent); - lockdep_assert_held(&q->sysfs_lock); + lockdep_assert_held(&queue_to_wrapper(q)->sysfs_dir_lock); - ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq"); + ret = kobject_add(q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq"); if (ret < 0) goto out; - kobject_uevent(&q->mq_kobj, KOBJ_ADD); + kobject_uevent(q->mq_kobj, KOBJ_ADD); queue_for_each_hw_ctx(q, hctx, i) { ret = blk_mq_register_hctx(hctx); @@ -324,8 +356,8 @@ int __blk_mq_register_dev(struct device *dev, struct request_queue *q) while (--i >= 0) blk_mq_unregister_hctx(q->queue_hw_ctx[i]); - kobject_uevent(&q->mq_kobj, KOBJ_REMOVE); - kobject_del(&q->mq_kobj); + kobject_uevent(q->mq_kobj, KOBJ_REMOVE); + kobject_del(q->mq_kobj); kobject_put(&dev->kobj); return ret; } @@ -334,9 +366,9 @@ int blk_mq_register_dev(struct device *dev, struct request_queue *q) { int ret; - mutex_lock(&q->sysfs_lock); + mutex_lock(&queue_to_wrapper(q)->sysfs_dir_lock); ret = __blk_mq_register_dev(dev, q); - mutex_unlock(&q->sysfs_lock); + mutex_unlock(&queue_to_wrapper(q)->sysfs_dir_lock); return ret; } @@ -347,7 +379,7 @@ void blk_mq_sysfs_unregister(struct request_queue *q) struct blk_mq_hw_ctx *hctx; int i; - mutex_lock(&q->sysfs_lock); + mutex_lock(&queue_to_wrapper(q)->sysfs_dir_lock); if (!q->mq_sysfs_init_done) goto unlock; @@ -355,7 +387,7 @@ void blk_mq_sysfs_unregister(struct request_queue *q) blk_mq_unregister_hctx(hctx); unlock: - mutex_unlock(&q->sysfs_lock); + mutex_unlock(&queue_to_wrapper(q)->sysfs_dir_lock); } int blk_mq_sysfs_register(struct request_queue *q) @@ -363,7 +395,7 @@ int blk_mq_sysfs_register(struct request_queue *q) struct blk_mq_hw_ctx *hctx; int i, ret = 0; - mutex_lock(&q->sysfs_lock); + mutex_lock(&queue_to_wrapper(q)->sysfs_dir_lock); if (!q->mq_sysfs_init_done) goto unlock; @@ -374,7 +406,7 @@ int blk_mq_sysfs_register(struct request_queue *q) } unlock: - mutex_unlock(&q->sysfs_lock); + mutex_unlock(&queue_to_wrapper(q)->sysfs_dir_lock); return ret; } diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 41317c50a44628e9ef4930e9f17ad6d8297c9190..f7b21d7f136e77d2f6620c4a04dc469b4b61fbf3 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -9,9 +9,11 @@ #include #include +#include #include "blk.h" #include "blk-mq.h" #include "blk-mq-tag.h" +#include "blk-io-hierarchy/stats.h" bool blk_mq_has_free_tags(struct blk_mq_tags *tags) { @@ -112,7 +114,6 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) struct sbq_wait_state *ws; DEFINE_WAIT(wait); unsigned int tag_offset; - bool drop_ctx; int tag; if (data->flags & BLK_MQ_REQ_RESERVED) { @@ -134,8 +135,9 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) if (data->flags & BLK_MQ_REQ_NOWAIT) return BLK_MQ_TAG_FAIL; + if (data->bio) + bio_hierarchy_start_io_acct(data->bio, STAGE_GETTAG); ws = bt_wait_ptr(bt, data->hctx); - drop_ctx = data->ctx == NULL; do { struct sbitmap_queue *bt_prev; @@ -161,9 +163,6 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) if (tag != -1) break; - if (data->ctx) - blk_mq_put_ctx(data->ctx); - bt_prev = bt; io_schedule(); @@ -188,10 +187,9 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) ws = bt_wait_ptr(bt, data->hctx); } while (1); - if (drop_ctx && data->ctx) - blk_mq_put_ctx(data->ctx); - finish_wait(&ws->wait, &wait); + if (data->bio) + bio_hierarchy_end_io_acct(data->bio, STAGE_GETTAG); found_tag: return tag + tag_offset; @@ -218,6 +216,20 @@ struct bt_iter_data { bool reserved; }; +static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags, + unsigned int bitnr) +{ + struct request *rq; + unsigned long flags; + + blk_mq_tags_lock_irqsave(tags, flags); + rq = tags->rqs[bitnr]; + if (!rq || !refcount_inc_not_zero(&rq->ref)) + rq = NULL; + blk_mq_tags_unlock_irqrestore(tags, flags); + return rq; +} + static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) { struct bt_iter_data *iter_data = data; @@ -234,8 +246,13 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) * We can hit rq == NULL here, because the tagging functions * test and set the bit before assining ->rqs[]. */ - if (rq && rq->q == hctx->queue) + rq = blk_mq_find_and_get_req(tags, bitnr); + if (!rq) + return true; + + if (rq->q == hctx->queue) iter_data->fn(hctx, rq, iter_data->data, reserved); + blk_mq_put_rq_ref(rq); return true; } @@ -273,9 +290,13 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) * We can hit rq == NULL here, because the tagging functions * test and set the bit before assining ->rqs[]. */ - rq = tags->rqs[bitnr]; - if (rq && blk_mq_request_started(rq)) + rq = blk_mq_find_and_get_req(tags, bitnr); + if (!rq) + return true; + + if (blk_mq_request_started(rq)) iter_data->fn(rq, iter_data->data, reserved); + blk_mq_put_rq_ref(rq); return true; } @@ -314,6 +335,36 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, } EXPORT_SYMBOL(blk_mq_tagset_busy_iter); +static void blk_mq_tagset_count_completed_rqs(struct request *rq, + void *data, bool reserved) +{ + unsigned *count = data; + + if (blk_mq_request_completed(rq)) + (*count)++; +} + +/** + * blk_mq_tagset_wait_completed_request - wait until all completed req's + * complete funtion is run + * @tagset: Tag set to drain completed request + * + * Note: This function has to be run after all IO queues are shutdown + */ +void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset) +{ + while (true) { + unsigned count = 0; + + blk_mq_tagset_busy_iter(tagset, + blk_mq_tagset_count_completed_rqs, &count); + if (!count) + break; + msleep(5); + } +} +EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request); + void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, void *priv) { @@ -368,7 +419,7 @@ static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags, free_bitmap_tags: sbitmap_queue_free(&tags->bitmap_tags); free_tags: - kfree(tags); + kfree(blk_mq_tags_to_wrapper(tags)); return NULL; } @@ -377,18 +428,21 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, int node, int alloc_policy) { struct blk_mq_tags *tags; + struct blk_mq_tags_wrapper *tags_wrapper; if (total_tags > BLK_MQ_TAG_MAX) { pr_err("blk-mq: tag depth too large\n"); return NULL; } - tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node); - if (!tags) + tags_wrapper = kzalloc_node(sizeof(*tags_wrapper), GFP_KERNEL, node); + if (!tags_wrapper) return NULL; + tags = &tags_wrapper->tags; tags->nr_tags = total_tags; tags->nr_reserved_tags = reserved_tags; + spin_lock_init(&tags_wrapper->lock); return blk_mq_init_bitmap_tags(tags, node, alloc_policy); } @@ -397,7 +451,7 @@ void blk_mq_free_tags(struct blk_mq_tags *tags) { sbitmap_queue_free(&tags->bitmap_tags); sbitmap_queue_free(&tags->breserved_tags); - kfree(tags); + kfree(blk_mq_tags_to_wrapper(tags)); } int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 61deab0b5a5a565c1214ad305cac52f0dde7fb3d..bc7a4c58c76f76067ef8ae539c9c8d6d937dd9fa 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -21,6 +21,22 @@ struct blk_mq_tags { struct list_head page_list; }; +struct blk_mq_tags_wrapper { + struct blk_mq_tags tags; + + /* + * used to clear request reference in rqs[] before freeing one + * request pool + */ + spinlock_t lock; +}; + +#define blk_mq_tags_to_wrapper(t) \ + container_of(t, struct blk_mq_tags_wrapper, tags) +#define blk_mq_tags_lock_irqsave(tags, flags) \ + spin_lock_irqsave(&blk_mq_tags_to_wrapper(tags)->lock, flags) +#define blk_mq_tags_unlock_irqrestore(tags, flags) \ + spin_unlock_irqrestore(&blk_mq_tags_to_wrapper(tags)->lock, flags) extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node, int alloc_policy); extern void blk_mq_free_tags(struct blk_mq_tags *tags); diff --git a/block/blk-mq.c b/block/blk-mq.c index e3c39ea8e17b04b0787e53959cd4f68cb1a43f3d..0392b014ac89d7776e781c42383405b80d56ffd0 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -36,6 +36,7 @@ #include "blk-stat.h" #include "blk-mq-sched.h" #include "blk-rq-qos.h" +#include "blk-io-hierarchy/stats.h" static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie); static void blk_mq_poll_stats_start(struct request_queue *q); @@ -136,13 +137,16 @@ void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part, void blk_freeze_queue_start(struct request_queue *q) { - int freeze_depth; + struct request_queue_wrapper *q_wrapper = queue_to_wrapper(q); - freeze_depth = atomic_inc_return(&q->mq_freeze_depth); - if (freeze_depth == 1) { + mutex_lock(&q_wrapper->mq_freeze_lock); + if (++q_wrapper->mq_freeze_depth == 1) { percpu_ref_kill(&q->q_usage_counter); + mutex_unlock(&q_wrapper->mq_freeze_lock); if (q->mq_ops) blk_mq_run_hw_queues(q, false); + } else { + mutex_unlock(&q_wrapper->mq_freeze_lock); } } EXPORT_SYMBOL_GPL(blk_freeze_queue_start); @@ -193,27 +197,55 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue); void blk_mq_unfreeze_queue(struct request_queue *q) { - int freeze_depth; + struct request_queue_wrapper *q_wrapper = queue_to_wrapper(q); + + mutex_lock(&q_wrapper->mq_freeze_lock); + q_wrapper->mq_freeze_depth--; - freeze_depth = atomic_dec_return(&q->mq_freeze_depth); - WARN_ON_ONCE(freeze_depth < 0); - if (!freeze_depth) { + WARN_ON_ONCE(q_wrapper->mq_freeze_depth < 0); + if (!q_wrapper->mq_freeze_depth) { percpu_ref_reinit(&q->q_usage_counter); wake_up_all(&q->mq_freeze_wq); } + + mutex_unlock(&q_wrapper->mq_freeze_lock); } EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); +static void __blk_mq_quiesce_queue_nowait(struct request_queue *q, + unsigned int flag) +{ + blk_queue_flag_set(flag, q); +} + /* * FIXME: replace the scsi_internal_device_*block_nowait() calls in the * mpt3sas driver such that this function can be removed. */ void blk_mq_quiesce_queue_nowait(struct request_queue *q) { - blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q); + __blk_mq_quiesce_queue_nowait(q, QUEUE_FLAG_QUIESCED); } EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait); +static void __blk_mq_quiesce_queue(struct request_queue *q, unsigned int flag) +{ + struct blk_mq_hw_ctx *hctx; + unsigned int i; + bool rcu = false; + + __blk_mq_quiesce_queue_nowait(q, flag); + + queue_for_each_hw_ctx(q, hctx, i) { + if (hctx->flags & BLK_MQ_F_BLOCKING) + synchronize_srcu(hctx->srcu); + else + rcu = true; + } + if (rcu) + synchronize_rcu(); +} + /** * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished * @q: request queue. @@ -224,12 +256,24 @@ EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait); * blk_mq_unquiesce_queue(). */ void blk_mq_quiesce_queue(struct request_queue *q) +{ + __blk_mq_quiesce_queue(q, QUEUE_FLAG_QUIESCED); +} +EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue); + +void blk_mq_quiesce_queue_internal(struct request_queue *q) +{ + __blk_mq_quiesce_queue(q, QUEUE_FLAG_QUIESCED_INTERNAL); +} + +static bool __blk_mq_quiesce_queue_without_rcu(struct request_queue *q, + unsigned int flag) { struct blk_mq_hw_ctx *hctx; unsigned int i; bool rcu = false; - blk_mq_quiesce_queue_nowait(q); + __blk_mq_quiesce_queue_nowait(q, flag); queue_for_each_hw_ctx(q, hctx, i) { if (hctx->flags & BLK_MQ_F_BLOCKING) @@ -237,10 +281,22 @@ void blk_mq_quiesce_queue(struct request_queue *q) else rcu = true; } - if (rcu) - synchronize_rcu(); + return rcu; +} + +bool blk_mq_quiesce_queue_without_rcu(struct request_queue *q) +{ + return __blk_mq_quiesce_queue_without_rcu(q, QUEUE_FLAG_QUIESCED); +} +EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_without_rcu); + +static void __blk_mq_unquiesce_queue(struct request_queue *q, unsigned int flag) +{ + blk_queue_flag_clear(flag, q); + + /* dispatch requests which are inserted during quiescing */ + blk_mq_run_hw_queues(q, true); } -EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue); /* * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue() @@ -251,13 +307,15 @@ EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue); */ void blk_mq_unquiesce_queue(struct request_queue *q) { - blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q); - - /* dispatch requests which are inserted during quiescing */ - blk_mq_run_hw_queues(q, true); + __blk_mq_unquiesce_queue(q, QUEUE_FLAG_QUIESCED); } EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue); +void blk_mq_unquiesce_queue_internal(struct request_queue *q) +{ + __blk_mq_unquiesce_queue(q, QUEUE_FLAG_QUIESCED_INTERNAL); +} + void blk_mq_wake_waiters(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; @@ -309,8 +367,13 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, RB_CLEAR_NODE(&rq->rb_node); rq->rq_disk = NULL; rq->part = NULL; - rq->start_time_ns = ktime_get_ns(); + rq->start_time_ns = blk_time_get_ns(); + blk_rq_init_bi_alloc_time(rq, NULL); + blk_mq_get_alloc_task(rq, data->bio); + blk_rq_hierarchy_stats_init(rq); + rq->io_start_time_ns = 0; + request_to_wrapper(rq)->io_end_time_ns = 0; rq->nr_phys_segments = 0; #if defined(CONFIG_BLK_DEV_INTEGRITY) rq->nr_integrity_segments = 0; @@ -343,13 +406,13 @@ static struct request *blk_mq_get_request(struct request_queue *q, struct elevator_queue *e = q->elevator; struct request *rq; unsigned int tag; - bool put_ctx_on_error = false; + bool clear_ctx_on_error = false; blk_queue_enter_live(q); data->q = q; if (likely(!data->ctx)) { data->ctx = blk_mq_get_ctx(q); - put_ctx_on_error = true; + clear_ctx_on_error = true; } if (likely(!data->hctx)) data->hctx = blk_mq_map_queue(q, data->ctx->cpu); @@ -373,10 +436,8 @@ static struct request *blk_mq_get_request(struct request_queue *q, tag = blk_mq_get_tag(data); if (tag == BLK_MQ_TAG_FAIL) { - if (put_ctx_on_error) { - blk_mq_put_ctx(data->ctx); + if (clear_ctx_on_error) data->ctx = NULL; - } blk_queue_exit(q); return NULL; } @@ -413,8 +474,6 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, if (!rq) return ERR_PTR(-EWOULDBLOCK); - blk_mq_put_ctx(alloc_data.ctx); - rq->__data_len = 0; rq->__sector = (sector_t) -1; rq->bio = rq->biotail = NULL; @@ -475,6 +534,8 @@ static void __blk_mq_free_request(struct request *rq) struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); const int sched_tag = rq->internal_tag; + blk_rq_hierarchy_stats_complete(rq); + blk_mq_put_alloc_task(rq); if (rq->tag != -1) blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag); if (sched_tag != -1) @@ -519,13 +580,22 @@ EXPORT_SYMBOL_GPL(blk_mq_free_request); inline void __blk_mq_end_request(struct request *rq, blk_status_t error) { - u64 now = ktime_get_ns(); + u64 now = request_to_wrapper(rq)->io_end_time_ns; + + if (!now) + now = blk_time_get_ns(); if (rq->rq_flags & RQF_STATS) { blk_mq_poll_stats_start(rq->q); blk_stat_add(rq, now); } + /* + * Avoid accounting flush request with data twice and request that is + * not started. + */ + if (blk_mq_request_started(rq) && !blk_rq_hierarchy_is_flush_done(rq)) + rq_hierarchy_end_io_acct(rq, STAGE_RQ_DRIVER); blk_account_io_done(rq, now); if (rq->end_io) { @@ -554,7 +624,17 @@ static void __blk_mq_complete_request_remote(void *data) rq->q->softirq_done_fn(rq); } -static void __blk_mq_complete_request(struct request *rq) +/** + * blk_mq_force_complete_rq() - Force complete the request, bypassing any error + * injection that could drop the completion. + * @rq: Request to be force completed + * + * Drivers should use blk_mq_complete_request() to complete requests in their + * normal IO path. For timeout error recovery, drivers may call this forced + * completion routine after they've reclaimed timed out requests to bypass + * potentially subsequent fake timeouts. + */ +void blk_mq_force_complete_rq(struct request *rq) { struct blk_mq_ctx *ctx = rq->mq_ctx; bool shared = false; @@ -565,6 +645,20 @@ static void __blk_mq_complete_request(struct request *rq) if (rq->internal_tag != -1) blk_mq_sched_completed_request(rq); + /* + * Most of single queue controllers, there is only one irq vector + * for handling IO completion, and the only irq's affinity is set + * as all possible CPUs. On most of ARCHs, this affinity means the + * irq is handled on one specific CPU. + * + * So complete IO reqeust in softirq context in case of single queue + * for not degrading IO performance by irqsoff latency. + */ + if (rq->q->nr_hw_queues == 1) { + __blk_complete_request(rq); + return; + } + if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) { rq->q->softirq_done_fn(rq); return; @@ -584,6 +678,7 @@ static void __blk_mq_complete_request(struct request *rq) } put_cpu(); } +EXPORT_SYMBOL_GPL(blk_mq_force_complete_rq); static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx) __releases(hctx->srcu) @@ -617,7 +712,7 @@ void blk_mq_complete_request(struct request *rq) { if (unlikely(blk_should_fake_timeout(rq->q))) return; - __blk_mq_complete_request(rq); + blk_mq_force_complete_rq(rq); } EXPORT_SYMBOL(blk_mq_complete_request); @@ -627,6 +722,12 @@ int blk_mq_request_started(struct request *rq) } EXPORT_SYMBOL_GPL(blk_mq_request_started); +int blk_mq_request_completed(struct request *rq) +{ + return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE; +} +EXPORT_SYMBOL_GPL(blk_mq_request_completed); + void blk_mq_start_request(struct request *rq) { struct request_queue *q = rq->q; @@ -634,9 +735,10 @@ void blk_mq_start_request(struct request *rq) blk_mq_sched_started_request(rq); trace_block_rq_issue(q, rq); + rq_hierarchy_start_io_acct(rq, STAGE_RQ_DRIVER); if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) { - rq->io_start_time_ns = ktime_get_ns(); + rq->io_start_time_ns = blk_time_get_ns(); #ifdef CONFIG_BLK_DEV_THROTTLING_LOW rq->throtl_size = blk_rq_sectors(rq); #endif @@ -672,6 +774,7 @@ static void __blk_mq_requeue_request(struct request *rq) if (blk_mq_request_started(rq)) { WRITE_ONCE(rq->state, MQ_RQ_IDLE); rq->rq_flags &= ~RQF_TIMED_OUT; + rq_hierarchy_end_io_acct(rq, STAGE_RQ_DRIVER); if (q->dma_drain_size && blk_rq_bytes(rq)) rq->nr_phys_segments--; } @@ -699,14 +802,23 @@ static void blk_mq_requeue_work(struct work_struct *work) spin_lock_irq(&q->requeue_lock); list_splice_init(&q->requeue_list, &rq_list); spin_unlock_irq(&q->requeue_lock); + rq_list_hierarchy_end_io_acct(&rq_list, STAGE_REQUEUE); list_for_each_entry_safe(rq, next, &rq_list, queuelist) { - if (!(rq->rq_flags & RQF_SOFTBARRIER)) + if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP))) continue; rq->rq_flags &= ~RQF_SOFTBARRIER; list_del_init(&rq->queuelist); - blk_mq_sched_insert_request(rq, true, false, false); + /* + * If RQF_DONTPREP, rq has contained some driver specific + * data, so insert it to hctx dispatch list to avoid any + * merge. + */ + if (rq->rq_flags & RQF_DONTPREP) + blk_mq_request_bypass_insert(rq, false, false); + else + blk_mq_sched_insert_request(rq, true, false, false); } while (!list_empty(&rq_list)) { @@ -730,6 +842,7 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, */ BUG_ON(rq->rq_flags & RQF_SOFTBARRIER); + rq_hierarchy_start_io_acct(rq, STAGE_REQUEUE); spin_lock_irqsave(&q->requeue_lock, flags); if (at_head) { rq->rq_flags |= RQF_SOFTBARRIER; @@ -804,40 +917,28 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next) return false; } +void blk_mq_put_rq_ref(struct request *rq) +{ + if (is_flush_rq(rq)) + rq->end_io(rq, 0); + else if (refcount_dec_and_test(&rq->ref)) + __blk_mq_free_request(rq); +} + static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, struct request *rq, void *priv, bool reserved) { unsigned long *next = priv; /* - * Just do a quick check if it is expired before locking the request in - * so we're not unnecessarilly synchronizing across CPUs. - */ - if (!blk_mq_req_expired(rq, next)) - return; - - /* - * We have reason to believe the request may be expired. Take a - * reference on the request to lock this request lifetime into its - * currently allocated context to prevent it from being reallocated in - * the event the completion by-passes this timeout handler. - * - * If the reference was already released, then the driver beat the - * timeout handler to posting a natural completion. - */ - if (!refcount_inc_not_zero(&rq->ref)) - return; - - /* - * The request is now locked and cannot be reallocated underneath the - * timeout handler's processing. Re-verify this exact request is truly - * expired; if it is not expired, then the request was completed and - * reallocated as a new request. + * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot + * be reallocated underneath the timeout handler's processing, then + * the expire check is reliable. If the request is not expired, then + * it was completed and reallocated as a new request after returning + * from blk_mq_check_expired(). */ if (blk_mq_req_expired(rq, next)) blk_mq_rq_timed_out(rq, reserved); - if (refcount_dec_and_test(&rq->ref)) - __blk_mq_free_request(rq); } static void blk_mq_timeout_work(struct work_struct *work) @@ -1053,6 +1154,22 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, wait->flags &= ~WQ_FLAG_EXCLUSIVE; __add_wait_queue(wq, wait); + /* + * Add one explicit barrier since blk_mq_get_driver_tag() may + * not imply barrier in case of failure. + * + * Order adding us to wait queue and allocating driver tag. + * + * The pair is the one implied in sbitmap_queue_wake_up() which + * orders clearing sbitmap tag bits and waitqueue_active() in + * __sbitmap_queue_wake_up(), since waitqueue_active() is lockless + * + * Otherwise, re-order of adding wait queue and getting driver tag + * may cause __sbitmap_queue_wake_up() to wake up nothing because + * the waitqueue_active() may not observe us in wait queue. + */ + smp_mb(); + /* * It's possible that a tag was freed in the window between the * allocation failure and adding the hardware queue to the wait @@ -1107,6 +1224,23 @@ static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy) #define BLK_MQ_RESOURCE_DELAY 3 /* ms units */ +static void blk_mq_handle_dev_resource(struct request *rq, + struct list_head *list) +{ + struct request *next = + list_first_entry_or_null(list, struct request, queuelist); + + /* + * If an I/O scheduler has been configured and we got a driver tag for + * the next request already, free it. + */ + if (next) + blk_mq_put_driver_tag(next); + + list_add(&rq->queuelist, list); + __blk_mq_requeue_request(rq); +} + /* * Returns true if we did some work AND can potentially do more. */ @@ -1118,6 +1252,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, bool no_tag = false; int errors, queued; blk_status_t ret = BLK_STS_OK; + bool no_budget_avail = false; if (list_empty(list)) return false; @@ -1134,8 +1269,11 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, rq = list_first_entry(list, struct request, queuelist); hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); - if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) + if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) { + blk_mq_put_driver_tag(rq); + no_budget_avail = true; break; + } if (!blk_mq_get_driver_tag(rq)) { /* @@ -1174,17 +1312,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, ret = q->mq_ops->queue_rq(hctx, &bd); if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) { - /* - * If an I/O scheduler has been configured and we got a - * driver tag for the next request already, free it - * again. - */ - if (!list_empty(list)) { - nxt = list_first_entry(list, struct request, queuelist); - blk_mq_put_driver_tag(nxt); - } - list_add(&rq->queuelist, list); - __blk_mq_requeue_request(rq); + blk_mq_handle_dev_resource(rq, list); break; } @@ -1206,10 +1334,20 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, if (!list_empty(list)) { bool needs_restart; + rq_list_hierarchy_start_io_acct(list, STAGE_HCTX); spin_lock(&hctx->lock); - list_splice_init(list, &hctx->dispatch); + list_splice_tail_init(list, &hctx->dispatch); spin_unlock(&hctx->lock); + /* + * Order adding requests to hctx->dispatch and checking + * SCHED_RESTART flag. The pair of this smp_mb() is the one + * in blk_mq_sched_restart(). Avoid restart code path to + * miss the new added requests to hctx->dispatch, meantime + * SCHED_RESTART is observed here. + */ + smp_mb(); + /* * If SCHED_RESTART was set by the caller of this function and * it is no longer set that means that it was cleared by another @@ -1232,13 +1370,15 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, * * If driver returns BLK_STS_RESOURCE and SCHED_RESTART * bit is set, run queue after a delay to avoid IO stalls - * that could otherwise occur if the queue is idle. + * that could otherwise occur if the queue is idle. We'll do + * similar if we couldn't get budget and SCHED_RESTART is set. */ needs_restart = blk_mq_sched_needs_restart(hctx); if (!needs_restart || (no_tag && list_empty_careful(&hctx->dispatch_wait.entry))) blk_mq_run_hw_queue(hctx, true); - else if (needs_restart && (ret == BLK_STS_RESOURCE)) + else if (needs_restart && (ret == BLK_STS_RESOURCE || + no_budget_avail)) blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY); blk_mq_update_dispatch_busy(hctx, true); @@ -1370,8 +1510,16 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, put_cpu(); } + /* + * No need to queue work if there is no io, and this can avoid race + * with blk_cleanup_queue(). + */ + if (!percpu_ref_tryget(&hctx->queue->q_usage_counter)) + return; + kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work, msecs_to_jiffies(msecs)); + percpu_ref_put(&hctx->queue->q_usage_counter); } void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) @@ -1395,6 +1543,7 @@ bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) */ hctx_lock(hctx, &srcu_idx); need_run = !blk_queue_quiesced(hctx->queue) && + !blk_queue_quiesced_internal(hctx->queue) && blk_mq_hctx_has_pending(hctx); hctx_unlock(hctx, srcu_idx); @@ -1421,6 +1570,34 @@ void blk_mq_run_hw_queues(struct request_queue *q, bool async) } EXPORT_SYMBOL(blk_mq_run_hw_queues); +/** + * blk_mq_delay_run_hw_queues - Run all hardware queues asynchronously. + * @q: Pointer to the request queue to run. + * @msecs: Microseconds of delay to wait before running the queues. + */ +void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs) +{ + struct blk_mq_hw_ctx *hctx; + int i; + + queue_for_each_hw_ctx(q, hctx, i) { + if (blk_mq_hctx_stopped(hctx)) + continue; + + /* + * If there is already a run_work pending, leave the + * pending delay untouched. Otherwise, a hctx can stall + * if another hctx is re-delaying the other's work + * before the work executes. + */ + if (delayed_work_pending(&hctx->run_work)) + continue; + + blk_mq_delay_run_hw_queue(hctx, msecs); + } +} +EXPORT_SYMBOL(blk_mq_delay_run_hw_queues); + /** * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped * @q: request queue. @@ -1561,13 +1738,18 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, * Should only be used carefully, when the caller knows we want to * bypass a potential IO scheduler on the target device. */ -void blk_mq_request_bypass_insert(struct request *rq, bool run_queue) +void blk_mq_request_bypass_insert(struct request *rq, bool at_head, + bool run_queue) { struct blk_mq_ctx *ctx = rq->mq_ctx; struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu); + rq_hierarchy_start_io_acct(rq, STAGE_HCTX); spin_lock(&hctx->lock); - list_add_tail(&rq->queuelist, &hctx->dispatch); + if (at_head) + list_add(&rq->queuelist, &hctx->dispatch); + else + list_add_tail(&rq->queuelist, &hctx->dispatch); spin_unlock(&hctx->lock); if (run_queue) @@ -1629,6 +1811,8 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) if (rq->mq_ctx != this_ctx) { if (this_ctx) { trace_block_unplug(this_q, depth, !from_schedule); + rq_list_hierarchy_end_io_acct(&ctx_list, + STAGE_PLUG); blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list, from_schedule); @@ -1649,6 +1833,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) */ if (this_ctx) { trace_block_unplug(this_q, depth, !from_schedule); + rq_list_hierarchy_end_io_acct(&ctx_list, STAGE_PLUG); blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list, from_schedule); } @@ -1725,7 +1910,8 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller, * and avoid driver to try to dispatch again. */ - if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) { + if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q) || + blk_queue_quiesced_internal(q)) { run_queue = false; bypass_insert = false; goto insert; @@ -1747,7 +1933,7 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, if (bypass_insert) return BLK_STS_RESOURCE; - blk_mq_sched_insert_request(rq, false, run_queue, false); + blk_mq_request_bypass_insert(rq, false, run_queue); return BLK_STS_OK; } @@ -1763,7 +1949,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false); if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) - blk_mq_sched_insert_request(rq, false, true, false); + blk_mq_request_bypass_insert(rq, false, true); else if (ret != BLK_STS_OK) blk_mq_end_request(rq, ret); @@ -1798,7 +1984,8 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, if (ret != BLK_STS_OK) { if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) { - list_add(&rq->queuelist, list); + blk_mq_request_bypass_insert(rq, false, + list_empty(list)); break; } blk_mq_end_request(rq, ret); @@ -1810,17 +1997,28 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) { const int is_sync = op_is_sync(bio->bi_opf); const int is_flush_fua = op_is_flush(bio->bi_opf); - struct blk_mq_alloc_data data = { .flags = 0 }; + struct blk_mq_alloc_data data = { + .flags = 0, + .bio = bio + }; struct request *rq; unsigned int request_count = 0; struct blk_plug *plug; struct request *same_queue_rq = NULL; blk_qc_t cookie; + if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags)) { + bio_io_error(bio); + return BLK_QC_T_NONE; + } + blk_queue_bounce(q, &bio); blk_queue_split(q, &bio); + /* account for split bio. */ + bio_hierarchy_start(bio); + if (!bio_integrity_prep(bio)) return BLK_QC_T_NONE; @@ -1833,8 +2031,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) rq_qos_throttle(q, bio, NULL); - trace_block_getrq(q, bio, bio->bi_opf); - rq = blk_mq_get_request(q, bio, bio->bi_opf, &data); if (unlikely(!rq)) { rq_qos_cleanup(q, bio); @@ -1843,13 +2039,14 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) return BLK_QC_T_NONE; } + trace_block_getrq(q, bio, bio->bi_opf); + rq_qos_track(q, rq, bio); cookie = request_to_qc_t(data.hctx, rq); plug = current->plug; if (unlikely(is_flush_fua)) { - blk_mq_put_ctx(data.ctx); blk_mq_bio_to_request(rq, bio); /* bypass scheduler for flush rq */ @@ -1858,7 +2055,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) } else if (plug && q->nr_hw_queues == 1) { struct request *last = NULL; - blk_mq_put_ctx(data.ctx); blk_mq_bio_to_request(rq, bio); /* @@ -1881,6 +2077,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) trace_block_plug(q); } + rq_hierarchy_start_io_acct(rq, STAGE_PLUG); list_add_tail(&rq->queuelist, &plug->mq_list); } else if (plug && !blk_queue_nomerges(q)) { blk_mq_bio_to_request(rq, bio); @@ -1896,23 +2093,21 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) same_queue_rq = NULL; if (same_queue_rq) list_del_init(&same_queue_rq->queuelist); + rq_hierarchy_start_io_acct(rq, STAGE_PLUG); list_add_tail(&rq->queuelist, &plug->mq_list); - blk_mq_put_ctx(data.ctx); - if (same_queue_rq) { data.hctx = blk_mq_map_queue(q, same_queue_rq->mq_ctx->cpu); + rq_hierarchy_end_io_acct(same_queue_rq, STAGE_PLUG); blk_mq_try_issue_directly(data.hctx, same_queue_rq, &cookie); } } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator && !data.hctx->dispatch_busy)) { - blk_mq_put_ctx(data.ctx); blk_mq_bio_to_request(rq, bio); blk_mq_try_issue_directly(data.hctx, rq, &cookie); } else { - blk_mq_put_ctx(data.ctx); blk_mq_bio_to_request(rq, bio); blk_mq_sched_insert_request(rq, false, true, true); } @@ -1920,6 +2115,45 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) return cookie; } +static size_t order_to_size(unsigned int order) +{ + return (size_t)PAGE_SIZE << order; +} + +/* called before freeing request pool in @tags */ +static void blk_mq_clear_rq_mapping(struct blk_mq_tag_set *set, + struct blk_mq_tags *tags, unsigned int hctx_idx) +{ + struct blk_mq_tags *drv_tags = set->tags[hctx_idx]; + struct page *page; + unsigned long flags; + + list_for_each_entry(page, &tags->page_list, lru) { + unsigned long start = (unsigned long)page_address(page); + unsigned long end = start + order_to_size(page->private); + int i; + + for (i = 0; i < set->queue_depth; i++) { + struct request *rq = drv_tags->rqs[i]; + unsigned long rq_addr = (unsigned long)rq; + + if (rq_addr >= start && rq_addr < end) { + WARN_ON_ONCE(refcount_read(&rq->ref) != 0); + cmpxchg(&drv_tags->rqs[i], rq, NULL); + } + } + } + + /* + * Wait until all pending iteration is done. + * + * Request reference is cleared and it is guaranteed to be observed + * after the ->lock is released. + */ + blk_mq_tags_lock_irqsave(drv_tags, flags); + blk_mq_tags_unlock_irqrestore(drv_tags, flags); +} + void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, unsigned int hctx_idx) { @@ -1938,6 +2172,8 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, } } + blk_mq_clear_rq_mapping(set, tags, hctx_idx); + while (!list_empty(&tags->page_list)) { page = list_first_entry(&tags->page_list, struct page, lru); list_del_init(&page->lru); @@ -1997,11 +2233,6 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, return tags; } -static size_t order_to_size(unsigned int order) -{ - return (size_t)PAGE_SIZE << order; -} - static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx, int node) { @@ -2034,7 +2265,8 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, * rq_size is the size of the request plus driver payload, rounded * to the cacheline size */ - rq_size = round_up(sizeof(struct request) + set->cmd_size, + rq_size = round_up(sizeof(struct request) + + sizeof(struct request_wrapper) + set->cmd_size, cache_line_size()); left = rq_size * depth; @@ -2075,7 +2307,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, to_do = min(entries_per_page, depth - i); left -= to_do * rq_size; for (j = 0; j < to_do; j++) { - struct request *rq = p; + struct request *rq = p + sizeof(struct request_wrapper); tags->static_rqs[i] = rq; if (blk_mq_init_request(set, rq, hctx_idx, node)) { @@ -2118,6 +2350,7 @@ static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node) if (list_empty(&tmp)) return 0; + rq_list_hierarchy_start_io_acct(&tmp, STAGE_HCTX); spin_lock(&hctx->lock); list_splice_tail_init(&tmp, &hctx->dispatch); spin_unlock(&hctx->lock); @@ -2132,28 +2365,58 @@ static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx) &hctx->cpuhp_dead); } +/* + * Before freeing hw queue, clearing the flush request reference in + * tags->rqs[] for avoiding potential UAF. + */ +static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags, + unsigned int queue_depth, struct request *flush_rq) +{ + int i; + unsigned long flags; + + /* The hw queue may not be mapped yet */ + if (!tags) + return; + + WARN_ON_ONCE(refcount_read(&flush_rq->ref) != 0); + + for (i = 0; i < queue_depth; i++) + cmpxchg(&tags->rqs[i], flush_rq, NULL); + + /* + * Wait until all pending iteration is done. + * + * Request reference is cleared and it is guaranteed to be observed + * after the ->lock is released. + */ + blk_mq_tags_lock_irqsave(tags, flags); + blk_mq_tags_unlock_irqrestore(tags, flags); +} + /* hctx->ctxs will be freed in queue's release handler */ static void blk_mq_exit_hctx(struct request_queue *q, struct blk_mq_tag_set *set, struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) { - blk_mq_debugfs_unregister_hctx(hctx); + struct request *flush_rq = hctx->fq->flush_rq; if (blk_mq_hw_queue_mapped(hctx)) blk_mq_tag_idle(hctx); + blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx], + set->queue_depth, flush_rq); if (set->ops->exit_request) - set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx); + set->ops->exit_request(set, flush_rq, hctx_idx); if (set->ops->exit_hctx) set->ops->exit_hctx(hctx, hctx_idx); - if (hctx->flags & BLK_MQ_F_BLOCKING) - cleanup_srcu_struct(hctx->srcu); - blk_mq_remove_cpuhp(hctx); - blk_free_flush_queue(hctx->fq); - sbitmap_free(&hctx->ctx_map); + + spin_lock(&q->unused_hctx_lock); + list_add(&hctx->hctx_list, &q->unused_hctx_list); + spin_unlock(&q->unused_hctx_lock); } static void blk_mq_exit_hw_queues(struct request_queue *q, @@ -2165,19 +2428,72 @@ static void blk_mq_exit_hw_queues(struct request_queue *q, queue_for_each_hw_ctx(q, hctx, i) { if (i == nr_queue) break; + mutex_lock(&q->debugfs_mutex); + blk_mq_debugfs_unregister_hctx(hctx); + mutex_unlock(&q->debugfs_mutex); blk_mq_exit_hctx(q, set, hctx, i); } } +static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set) +{ + int hw_ctx_size = sizeof(struct blk_mq_hw_ctx); + + BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu), + __alignof__(struct blk_mq_hw_ctx)) != + sizeof(struct blk_mq_hw_ctx)); + + if (tag_set->flags & BLK_MQ_F_BLOCKING) + hw_ctx_size += sizeof(struct srcu_struct); + + return hw_ctx_size; +} + static int blk_mq_init_hctx(struct request_queue *q, struct blk_mq_tag_set *set, struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) { - int node; + hctx->queue_num = hctx_idx; + + cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead); + + hctx->tags = set->tags[hctx_idx]; + + if (set->ops->init_hctx && + set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) + goto unregister_cpu_notifier; + + if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, + hctx->numa_node)) + goto exit_hctx; + return 0; + + exit_hctx: + if (set->ops->exit_hctx) + set->ops->exit_hctx(hctx, hctx_idx); + unregister_cpu_notifier: + blk_mq_remove_cpuhp(hctx); + return -1; +} + +static struct blk_mq_hw_ctx * +blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set, + int node) +{ + struct blk_mq_hw_ctx *hctx; + gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY; + + hctx = kzalloc_node(blk_mq_hw_ctx_size(set), gfp, node); + if (!hctx) + goto fail_alloc_hctx; - node = hctx->numa_node; + if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node)) + goto free_hctx; + + atomic_set(&hctx->nr_active, 0); if (node == NUMA_NO_NODE) - node = hctx->numa_node = set->numa_node; + node = set->numa_node; + hctx->numa_node = node; INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); spin_lock_init(&hctx->lock); @@ -2185,59 +2501,51 @@ static int blk_mq_init_hctx(struct request_queue *q, hctx->queue = q; hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED; - cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead); - - hctx->tags = set->tags[hctx_idx]; + INIT_LIST_HEAD(&hctx->hctx_list); /* * Allocate space for all possible cpus to avoid allocation at * runtime */ hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *), - GFP_KERNEL, node); + gfp, node); if (!hctx->ctxs) - goto unregister_cpu_notifier; + goto free_cpumask; - if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL, - node)) + if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), + gfp, node)) goto free_ctxs; - hctx->nr_ctx = 0; spin_lock_init(&hctx->dispatch_wait_lock); init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake); INIT_LIST_HEAD(&hctx->dispatch_wait.entry); - if (set->ops->init_hctx && - set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) - goto free_bitmap; - - hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size); + hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size, + gfp); if (!hctx->fq) - goto exit_hctx; - - if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, node)) - goto free_fq; - - if (hctx->flags & BLK_MQ_F_BLOCKING) - init_srcu_struct(hctx->srcu); + goto free_bitmap; - blk_mq_debugfs_register_hctx(q, hctx); + if (hctx->flags & BLK_MQ_F_BLOCKING) { + if (init_srcu_struct(hctx->srcu) != 0) + goto free_flush_queue; + } + blk_mq_hctx_kobj_init(hctx); - return 0; + return hctx; - free_fq: - kfree(hctx->fq); - exit_hctx: - if (set->ops->exit_hctx) - set->ops->exit_hctx(hctx, hctx_idx); + free_flush_queue: + blk_free_flush_queue(hctx->fq); free_bitmap: sbitmap_free(&hctx->ctx_map); free_ctxs: kfree(hctx->ctxs); - unregister_cpu_notifier: - blk_mq_remove_cpuhp(hctx); - return -1; + free_cpumask: + free_cpumask_var(hctx->cpumask); + free_hctx: + kfree(hctx); + fail_alloc_hctx: + return NULL; } static void blk_mq_init_cpu_queues(struct request_queue *q, @@ -2286,7 +2594,7 @@ static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx) static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set, unsigned int hctx_idx) { - if (set->tags[hctx_idx]) { + if (set->tags && set->tags[hctx_idx]) { blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx); blk_mq_free_rq_map(set->tags[hctx_idx]); set->tags[hctx_idx] = NULL; @@ -2300,11 +2608,6 @@ static void blk_mq_map_swqueue(struct request_queue *q) struct blk_mq_ctx *ctx; struct blk_mq_tag_set *set = q->tag_set; - /* - * Avoid others reading imcomplete hctx->cpumask through sysfs - */ - mutex_lock(&q->sysfs_lock); - queue_for_each_hw_ctx(q, hctx, i) { cpumask_clear(hctx->cpumask); hctx->nr_ctx = 0; @@ -2338,8 +2641,6 @@ static void blk_mq_map_swqueue(struct request_queue *q) hctx->ctxs[hctx->nr_ctx++] = ctx; } - mutex_unlock(&q->sysfs_lock); - queue_for_each_hw_ctx(q, hctx, i) { /* * If no software queues are mapped to this hardware queue, @@ -2385,10 +2686,12 @@ static void queue_set_hctx_shared(struct request_queue *q, bool shared) int i; queue_for_each_hw_ctx(q, hctx, i) { - if (shared) + if (shared) { hctx->flags |= BLK_MQ_F_TAG_SHARED; - else + } else { + blk_mq_tag_idle(hctx); hctx->flags &= ~BLK_MQ_F_TAG_SHARED; + } } } @@ -2411,7 +2714,7 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q) struct blk_mq_tag_set *set = q->tag_set; mutex_lock(&set->tag_list_lock); - list_del_rcu(&q->tag_set_list); + list_del(&q->tag_set_list); if (list_is_singular(&set->tag_list)) { /* just transitioned to unshared */ set->flags &= ~BLK_MQ_F_TAG_SHARED; @@ -2440,11 +2743,39 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, } if (set->flags & BLK_MQ_F_TAG_SHARED) queue_set_hctx_shared(q, true); - list_add_tail_rcu(&q->tag_set_list, &set->tag_list); + list_add_tail(&q->tag_set_list, &set->tag_list); mutex_unlock(&set->tag_list_lock); } +/* All allocations will be freed in release handler of q->mq_kobj */ +static int blk_mq_alloc_ctxs(struct request_queue *q) +{ + struct blk_mq_ctxs *ctxs; + int cpu; + + ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL); + if (!ctxs) + return -ENOMEM; + + ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx); + if (!ctxs->queue_ctx) + goto fail; + + for_each_possible_cpu(cpu) { + struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu); + ctx->ctxs = ctxs; + } + + q->mq_kobj = &ctxs->kobj; + q->queue_ctx = ctxs->queue_ctx; + + return 0; + fail: + kfree(ctxs); + return -ENOMEM; +} + /* * It is the actual release handler for mq, but we do it from * request queue's release handler for avoiding use-after-free @@ -2453,13 +2784,18 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, */ void blk_mq_release(struct request_queue *q) { - struct blk_mq_hw_ctx *hctx; - unsigned int i; + struct blk_mq_hw_ctx *hctx, *next; + int i; - /* hctx kobj stays in hctx */ - queue_for_each_hw_ctx(q, hctx, i) { - if (!hctx) - continue; + blk_mq_unregister_hierarchy(q, STAGE_BIO); + blk_io_hierarchy_stats_free(q); + + queue_for_each_hw_ctx(q, hctx, i) + WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list)); + + /* all hctx are in .unused_hctx_list now */ + list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) { + list_del_init(&hctx->hctx_list); kobject_put(&hctx->kobj); } @@ -2472,8 +2808,6 @@ void blk_mq_release(struct request_queue *q) * both share lifetime with request queue. */ blk_mq_sysfs_deinit(q); - - free_percpu(q->queue_ctx); } struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) @@ -2492,93 +2826,118 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) } EXPORT_SYMBOL(blk_mq_init_queue); -static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set) +static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx( + struct blk_mq_tag_set *set, struct request_queue *q, + int hctx_idx, int node) { - int hw_ctx_size = sizeof(struct blk_mq_hw_ctx); + struct blk_mq_hw_ctx *hctx = NULL, *tmp; - BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu), - __alignof__(struct blk_mq_hw_ctx)) != - sizeof(struct blk_mq_hw_ctx)); + /* reuse dead hctx first */ + spin_lock(&q->unused_hctx_lock); + list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) { + if (tmp->numa_node == node) { + hctx = tmp; + break; + } + } + if (hctx) + list_del_init(&hctx->hctx_list); + spin_unlock(&q->unused_hctx_lock); - if (tag_set->flags & BLK_MQ_F_BLOCKING) - hw_ctx_size += sizeof(struct srcu_struct); + if (!hctx) + hctx = blk_mq_alloc_hctx(q, set, node); + if (!hctx) + goto fail; - return hw_ctx_size; + if (blk_mq_init_hctx(q, set, hctx, hctx_idx)) + goto free_hctx; + + return hctx; + +free_hctx: + kobject_put(&hctx->kobj); +fail: + return NULL; } static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, struct request_queue *q) { - int i, j; + int i, j, end; struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx; - blk_mq_sysfs_unregister(q); - /* protect against switching io scheduler */ mutex_lock(&q->sysfs_lock); for (i = 0; i < set->nr_hw_queues; i++) { int node; - - if (hctxs[i]) - continue; + struct blk_mq_hw_ctx *hctx; node = blk_mq_hw_queue_to_node(q->mq_map, i); - hctxs[i] = kzalloc_node(blk_mq_hw_ctx_size(set), - GFP_KERNEL, node); - if (!hctxs[i]) - break; - - if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL, - node)) { - kfree(hctxs[i]); - hctxs[i] = NULL; - break; - } - - atomic_set(&hctxs[i]->nr_active, 0); - hctxs[i]->numa_node = node; - hctxs[i]->queue_num = i; + /* + * If the hw queue has been mapped to another numa node, + * we need to realloc the hctx. If allocation fails, fallback + * to use the previous one. + */ + if (hctxs[i] && (hctxs[i]->numa_node == node)) + continue; - if (blk_mq_init_hctx(q, set, hctxs[i], i)) { - free_cpumask_var(hctxs[i]->cpumask); - kfree(hctxs[i]); - hctxs[i] = NULL; - break; + hctx = blk_mq_alloc_and_init_hctx(set, q, i, node); + if (hctx) { + if (hctxs[i]) + blk_mq_exit_hctx(q, set, hctxs[i], i); + hctxs[i] = hctx; + } else { + if (hctxs[i]) + pr_warn("Allocate new hctx on node %d fails,\ + fallback to previous one on node %d\n", + node, hctxs[i]->numa_node); + else + break; } - blk_mq_hctx_kobj_init(hctxs[i]); } - for (j = i; j < q->nr_hw_queues; j++) { + /* + * Increasing nr_hw_queues fails. Free the newly allocated + * hctxs and keep the previous q->nr_hw_queues. + */ + if (i != set->nr_hw_queues) { + j = q->nr_hw_queues; + end = i; + } else { + j = i; + end = q->nr_hw_queues; + q->nr_hw_queues = set->nr_hw_queues; + } + + for (; j < end; j++) { struct blk_mq_hw_ctx *hctx = hctxs[j]; if (hctx) { - if (hctx->tags) - blk_mq_free_map_and_requests(set, j); blk_mq_exit_hctx(q, set, hctx, j); - kobject_put(&hctx->kobj); hctxs[j] = NULL; - } } - q->nr_hw_queues = i; mutex_unlock(&q->sysfs_lock); - blk_mq_sysfs_register(q); } struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, struct request_queue *q) { + int ret = -ENOMEM; + /* mark the queue as mq asap */ q->mq_ops = set->ops; + if (blk_io_hierarchy_stats_alloc(q)) + goto err_exit; + q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn, blk_mq_poll_stats_bkt, BLK_MQ_POLL_STATS_BKTS, q); if (!q->poll_cb) - goto err_exit; + goto err_hierarchy_exit; - q->queue_ctx = alloc_percpu(struct blk_mq_ctx); - if (!q->queue_ctx) - goto err_exit; + if (blk_mq_alloc_ctxs(q)) + goto err_hierarchy_exit; /* init q->mq_kobj and sw queues' kobjects */ blk_mq_sysfs_init(q); @@ -2586,8 +2945,10 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, q->queue_hw_ctx = kcalloc_node(nr_cpu_ids, sizeof(*(q->queue_hw_ctx)), GFP_KERNEL, set->numa_node); if (!q->queue_hw_ctx) - goto err_percpu; + goto err_sys_init; + INIT_LIST_HEAD(&q->unused_hctx_list); + spin_lock_init(&q->unused_hctx_lock); q->mq_map = set->mq_map; blk_mq_realloc_hw_ctxs(set, q); @@ -2632,50 +2993,37 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, blk_mq_map_swqueue(q); if (!(set->flags & BLK_MQ_F_NO_SCHED)) { - int ret; - ret = elevator_init_mq(q); if (ret) - return ERR_PTR(ret); + goto err_tag_set; } return q; +err_tag_set: + blk_mq_del_queue_tag_set(q); err_hctxs: kfree(q->queue_hw_ctx); -err_percpu: - free_percpu(q->queue_ctx); + q->nr_hw_queues = 0; +err_sys_init: + blk_mq_sysfs_deinit(q); +err_hierarchy_exit: + blk_io_hierarchy_stats_free(q); err_exit: q->mq_ops = NULL; return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL(blk_mq_init_allocated_queue); -void blk_mq_free_queue(struct request_queue *q) +/* tags can _not_ be used after returning from blk_mq_exit_queue */ +void blk_mq_exit_queue(struct request_queue *q) { - struct blk_mq_tag_set *set = q->tag_set; + struct blk_mq_tag_set *set = q->tag_set; - blk_mq_del_queue_tag_set(q); + /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */ blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); -} - -/* Basically redo blk_mq_init_queue with queue frozen */ -static void blk_mq_queue_reinit(struct request_queue *q) -{ - WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth)); - - blk_mq_debugfs_unregister_hctxs(q); - blk_mq_sysfs_unregister(q); - - /* - * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe - * we should change hctx numa_node according to the new topology (this - * involves freeing and re-allocating memory, worth doing?) - */ - blk_mq_map_swqueue(q); - - blk_mq_sysfs_register(q); - blk_mq_debugfs_register_hctxs(q); + /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */ + blk_mq_del_queue_tag_set(q); } static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) @@ -2732,7 +3080,7 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) static int blk_mq_update_queue_map(struct blk_mq_tag_set *set) { - if (set->ops->map_queues) { + if (set->ops->map_queues && !is_kdump_kernel()) { /* * transport .map_queues is usually done in the following * way: @@ -2859,7 +3207,7 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) return -EINVAL; blk_mq_freeze_queue(q); - blk_mq_quiesce_queue(q); + blk_mq_quiesce_queue_internal(q); ret = 0; queue_for_each_hw_ctx(q, hctx, i) { @@ -2878,12 +3226,14 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) } if (ret) break; + if (q->elevator && q->elevator->type->ops.mq.depth_updated) + q->elevator->type->ops.mq.depth_updated(hctx); } if (!ret) q->nr_requests = nr; - blk_mq_unquiesce_queue(q); + blk_mq_unquiesce_queue_internal(q); blk_mq_unfreeze_queue(q); return ret; @@ -2964,6 +3314,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, { struct request_queue *q; LIST_HEAD(head); + int prev_nr_hw_queues; lockdep_assert_held(&set->tag_list_lock); @@ -2987,11 +3338,35 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, if (!blk_mq_elv_switch_none(&head, q)) goto switch_back; + list_for_each_entry(q, &set->tag_list, tag_set_list) { + blk_mq_debugfs_unregister_hctxs(q); + blk_mq_sysfs_unregister(q); + } + + prev_nr_hw_queues = set->nr_hw_queues; set->nr_hw_queues = nr_hw_queues; blk_mq_update_queue_map(set); +fallback: list_for_each_entry(q, &set->tag_list, tag_set_list) { blk_mq_realloc_hw_ctxs(set, q); - blk_mq_queue_reinit(q); + if (q->nr_hw_queues != set->nr_hw_queues) { + int i = prev_nr_hw_queues; + + pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n", + nr_hw_queues, prev_nr_hw_queues); + for (; i < set->nr_hw_queues; i++) + blk_mq_free_map_and_requests(set, i); + + set->nr_hw_queues = prev_nr_hw_queues; + blk_mq_map_queues(set); + goto fallback; + } + blk_mq_map_swqueue(q); + } + + list_for_each_entry(q, &set->tag_list, tag_set_list) { + blk_mq_sysfs_register(q); + blk_mq_debugfs_register_hctxs(q); } switch_back: @@ -3205,6 +3580,19 @@ static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie) return __blk_mq_poll(hctx, rq); } +void blk_mq_cancel_work_sync(struct request_queue *q) +{ + if (q->mq_ops) { + struct blk_mq_hw_ctx *hctx; + int i; + + cancel_delayed_work_sync(&q->requeue_work); + + queue_for_each_hw_ctx(q, hctx, i) + cancel_delayed_work_sync(&hctx->run_work); + } +} + static int __init blk_mq_init(void) { cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL, diff --git a/block/blk-mq.h b/block/blk-mq.h index 9497b47e2526c62006f00101cc7acffd35e7e299..80ad4bc91fa283a634172004a3219872a8b2921f 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -7,6 +7,11 @@ struct blk_mq_tag_set; +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + /** * struct blk_mq_ctx - State for a software queue facing the submitting CPUs */ @@ -27,11 +32,32 @@ struct blk_mq_ctx { unsigned long ____cacheline_aligned_in_smp rq_completed[2]; struct request_queue *queue; + struct blk_mq_ctxs *ctxs; struct kobject kobj; } ____cacheline_aligned_in_smp; +struct request_wrapper { + u64 io_end_time_ns; +#ifdef CONFIG_BLK_BIO_ALLOC_TIME + u64 bi_alloc_time_ns; +#endif +#ifdef CONFIG_BLK_BIO_ALLOC_TASK + struct pid *pid; +#endif +#ifdef CONFIG_BLK_IO_HIERARCHY_STATS + bool flush_done; + enum stage_group stage; + unsigned long hierarchy_time; +#endif +} ____cacheline_aligned_in_smp; + +static inline struct request_wrapper *request_to_wrapper(void *rq) +{ + return rq - sizeof(struct request_wrapper); +} + void blk_mq_freeze_queue(struct request_queue *q); -void blk_mq_free_queue(struct request_queue *q); +void blk_mq_exit_queue(struct request_queue *q); int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); void blk_mq_wake_waiters(struct request_queue *q); bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool); @@ -39,6 +65,9 @@ void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); bool blk_mq_get_driver_tag(struct request *rq); struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *start); +void blk_mq_put_rq_ref(struct request *rq); +void blk_mq_quiesce_queue_internal(struct request_queue *q); +void blk_mq_unquiesce_queue_internal(struct request_queue *q); /* * Internal helpers for allocating/freeing the request map @@ -58,7 +87,8 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, */ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, bool at_head); -void blk_mq_request_bypass_insert(struct request *rq, bool run_queue); +void blk_mq_request_bypass_insert(struct request *rq, bool at_head, + bool run_queue); void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, struct list_head *list); @@ -88,6 +118,8 @@ extern int blk_mq_sysfs_register(struct request_queue *q); extern void blk_mq_sysfs_unregister(struct request_queue *q); extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); +void blk_mq_cancel_work_sync(struct request_queue *q); + void blk_mq_release(struct request_queue *q); /** @@ -113,12 +145,7 @@ static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, */ static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) { - return __blk_mq_get_ctx(q, get_cpu()); -} - -static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx) -{ - put_cpu(); + return __blk_mq_get_ctx(q, raw_smp_processor_id()); } struct blk_mq_alloc_data { @@ -130,6 +157,10 @@ struct blk_mq_alloc_data { /* input & output parameter */ struct blk_mq_ctx *ctx; struct blk_mq_hw_ctx *hctx; + +#ifndef __GENKSYMS__ + struct bio *bio; +#endif }; static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data) @@ -212,4 +243,39 @@ static inline void blk_mq_clear_mq_map(struct blk_mq_tag_set *set) set->mq_map[cpu] = 0; } +static inline void blk_mq_free_requests(struct list_head *list) +{ + while (!list_empty(list)) { + struct request *rq = list_entry_rq(list->next); + + list_del_init(&rq->queuelist); + __blk_put_request(rq->q, rq); + } +} + +#ifdef CONFIG_BLK_BIO_ALLOC_TASK +static inline void blk_mq_get_alloc_task(struct request *rq, struct bio *bio) +{ + request_to_wrapper(rq)->pid = bio ? get_pid(bio->pid) : + get_pid(task_pid(current)); +} + +static inline void blk_mq_put_alloc_task(struct request *rq) +{ + struct request_wrapper *rq_wrapper = request_to_wrapper(rq); + + if (rq_wrapper->pid) { + put_pid(rq_wrapper->pid); + rq_wrapper->pid = NULL; + } +} +#else +static inline void blk_mq_get_alloc_task(struct request *rq, struct bio *bio) +{ +} +static inline void blk_mq_put_alloc_task(struct request *rq) +{ +} +#endif + #endif diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c index 0005dfd568dd5baaf3804106b25ef6a37b2a79ee..d1eaa118ce3098f903c54cea75fcb6e2e0eb62d5 100644 --- a/block/blk-rq-qos.c +++ b/block/blk-rq-qos.c @@ -72,6 +72,12 @@ void rq_qos_throttle(struct request_queue *q, struct bio *bio, { struct rq_qos *rqos; + /* + * BIO_TRACKED lets controllers know that a bio went through the + * normal rq_qos path. + */ + bio_set_flag(bio, BIO_TRACKED); + for(rqos = q->rq_qos; rqos; rqos = rqos->next) { if (rqos->ops->throttle) rqos->ops->throttle(rqos, bio, lock); @@ -148,24 +154,27 @@ bool rq_depth_calc_max_depth(struct rq_depth *rqd) return ret; } -void rq_depth_scale_up(struct rq_depth *rqd) +/* Returns true on success and false if scaling up wasn't possible */ +bool rq_depth_scale_up(struct rq_depth *rqd) { /* * Hit max in previous round, stop here */ if (rqd->scaled_max) - return; + return false; rqd->scale_step--; rqd->scaled_max = rq_depth_calc_max_depth(rqd); + return true; } /* * Scale rwb down. If 'hard_throttle' is set, do it quicker, since we - * had a latency violation. + * had a latency violation. Returns true on success and returns false if + * scaling down wasn't possible. */ -void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle) +bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle) { /* * Stop scaling down when we've hit the limit. This also prevents @@ -173,7 +182,7 @@ void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle) * keep up. */ if (rqd->max_depth == 1) - return; + return false; if (rqd->scale_step < 0 && hard_throttle) rqd->scale_step = 0; @@ -182,6 +191,7 @@ void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle) rqd->scaled_max = false; rq_depth_calc_max_depth(rqd); + return true; } void rq_qos_exit(struct request_queue *q) diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h index 32b02efbfa66dda638a02d07aa00c63df48a5dab..d5daed923c725870bdf122a8c1882b859c036fd0 100644 --- a/block/blk-rq-qos.h +++ b/block/blk-rq-qos.h @@ -6,6 +6,9 @@ #include #include #include +#ifndef __GENKSYMS__ +#include +#endif enum rq_qos_id { RQ_QOS_WBT, @@ -74,28 +77,48 @@ static inline void rq_wait_init(struct rq_wait *rq_wait) static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos) { + /* + * No IO can be in-flight when adding rqos, so freeze queue, which + * is fine since we only support rq_qos for blk-mq queue. + * + * Reuse ->queue_lock for protecting against other concurrent + * rq_qos adding/deleting + */ + blk_mq_freeze_queue(q); + + spin_lock_irq(q->queue_lock); rqos->next = q->rq_qos; q->rq_qos = rqos; + spin_unlock_irq(q->queue_lock); + + blk_mq_unfreeze_queue(q); } static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos) { - struct rq_qos *cur, *prev = NULL; - for (cur = q->rq_qos; cur; cur = cur->next) { - if (cur == rqos) { - if (prev) - prev->next = rqos->next; - else - q->rq_qos = cur; + struct rq_qos **cur; + + /* + * See comment in rq_qos_add() about freezing queue & using + * ->queue_lock. + */ + blk_mq_freeze_queue(q); + + spin_lock_irq(q->queue_lock); + for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) { + if (*cur == rqos) { + *cur = rqos->next; break; } - prev = cur; } + spin_unlock_irq(q->queue_lock); + + blk_mq_unfreeze_queue(q); } bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit); -void rq_depth_scale_up(struct rq_depth *rqd); -void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle); +bool rq_depth_scale_up(struct rq_depth *rqd); +bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle); bool rq_depth_calc_max_depth(struct rq_depth *rqd); void rq_qos_cleanup(struct request_queue *, struct bio *); diff --git a/block/blk-settings.c b/block/blk-settings.c index ffd459969689df0821bf377d1c433f039d502de1..f5d0b57f9992b0b4cbee277e6d6ef5f672572e1f 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -513,6 +513,14 @@ void blk_queue_io_opt(struct request_queue *q, unsigned int opt) } EXPORT_SYMBOL(blk_queue_io_opt); +static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs) +{ + sectors = round_down(sectors, lbs >> SECTOR_SHIFT); + if (sectors < PAGE_SIZE >> SECTOR_SHIFT) + sectors = PAGE_SIZE >> SECTOR_SHIFT; + return sectors; +} + /** * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers * @t: the stacking driver (top) @@ -553,8 +561,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors); - t->max_write_same_sectors = min(t->max_write_same_sectors, - b->max_write_same_sectors); t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors, b->max_write_zeroes_sectors); t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn); @@ -593,6 +599,14 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, } } + /* If logical block size is difference, forbid write same */ + if (t->logical_block_size != b->logical_block_size && + t->max_write_same_sectors != UINT_MAX) + t->max_write_same_sectors = 0; + else + t->max_write_same_sectors = min(t->max_write_same_sectors, + b->max_write_same_sectors); + t->logical_block_size = max(t->logical_block_size, b->logical_block_size); @@ -639,6 +653,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, ret = -1; } + t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size); + t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size); + t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size); + /* Discard alignment and granularity */ if (b->discard_granularity) { alignment = queue_limit_discard_alignment(b, start); @@ -717,6 +735,9 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", top, bottom); } + + t->backing_dev_info->io_pages = + t->limits.max_sectors >> (PAGE_SHIFT - 9); } EXPORT_SYMBOL(disk_stack_limits); diff --git a/block/blk-softirq.c b/block/blk-softirq.c index 15c1f5e12eb89460bc42eb7f5807eaa03254e51d..e47a2f751884ddb7821fededef643693db752393 100644 --- a/block/blk-softirq.c +++ b/block/blk-softirq.c @@ -97,8 +97,8 @@ static int blk_softirq_cpu_dead(unsigned int cpu) void __blk_complete_request(struct request *req) { - int ccpu, cpu; struct request_queue *q = req->q; + int cpu, ccpu = q->mq_ops ? req->mq_ctx->cpu : req->cpu; unsigned long flags; bool shared = false; @@ -110,8 +110,7 @@ void __blk_complete_request(struct request *req) /* * Select completion CPU */ - if (req->cpu != -1) { - ccpu = req->cpu; + if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && ccpu != -1) { if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) shared = cpus_share_cache(cpu, ccpu); } else diff --git a/block/blk-stat.c b/block/blk-stat.c index 7587b1c3caaf5299613686e005ac211346234928..507ac714423bdf0e8c88931515cf513216e068c2 100644 --- a/block/blk-stat.c +++ b/block/blk-stat.c @@ -27,7 +27,7 @@ void blk_rq_stat_init(struct blk_rq_stat *stat) /* src is a per-cpu stat, mean isn't initialized */ void blk_rq_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src) { - if (!src->nr_samples) + if (dst->nr_samples + src->nr_samples <= dst->nr_samples) return; dst->min = min(dst->min, src->min); diff --git a/block/blk-stat.h b/block/blk-stat.h index f4a1568e81a415789e63f045a3d99fe932962ce5..17b47a86eefb331c5d2266243183c2c71b10e2bc 100644 --- a/block/blk-stat.h +++ b/block/blk-stat.h @@ -145,6 +145,11 @@ static inline void blk_stat_activate_nsecs(struct blk_stat_callback *cb, mod_timer(&cb->timer, jiffies + nsecs_to_jiffies(nsecs)); } +static inline void blk_stat_deactivate(struct blk_stat_callback *cb) +{ + del_timer_sync(&cb->timer); +} + /** * blk_stat_activate_msecs() - Gather block statistics during a time window in * milliseconds. diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 3772671cf2bc5ad6322786b9531e9eb8bb6c7edd..b23d82fbe73670f5c5847fc167652f273374b9e7 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -11,11 +11,14 @@ #include #include #include +#include +#include #include "blk.h" #include "blk-mq.h" #include "blk-mq-debugfs.h" #include "blk-wbt.h" +#include "blk-io-hierarchy/stats.h" struct queue_sysfs_entry { struct attribute attr; @@ -460,7 +463,7 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, */ if (q->mq_ops) { blk_mq_freeze_queue(q); - blk_mq_quiesce_queue(q); + blk_mq_quiesce_queue_internal(q); } else blk_queue_bypass_start(q); @@ -468,7 +471,7 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, wbt_update_limits(q); if (q->mq_ops) { - blk_mq_unquiesce_queue(q); + blk_mq_unquiesce_queue_internal(q); blk_mq_unfreeze_queue(q); } else blk_queue_bypass_end(q); @@ -695,6 +698,44 @@ static struct queue_sysfs_entry queue_wb_lat_entry = { .store = queue_wb_lat_store, }; +#ifdef CONFIG_BLK_BIO_DISPATCH_ASYNC +static ssize_t queue_dispatch_async_cpus_show(struct request_queue *q, + char *page) +{ + int cpu; + ssize_t ret = 0; + + if (!test_bit(QUEUE_FLAG_DISPATCH_ASYNC, &q->queue_flags)) + return -EOPNOTSUPP; + + for_each_cpu(cpu, &queue_to_wrapper(q)->dispatch_async_cpus) { + ret += sprintf(page + ret, "%d ", cpu); + } + + ret += sprintf(page + ret, "\n"); + return ret; +} + +static struct queue_sysfs_entry queue_dispatch_async_cpus_entry = { + .attr = {.name = "dispatch_async_cpus", .mode = 0444 }, + .show = queue_dispatch_async_cpus_show, +}; + +static ssize_t queue_show_dispatch_async(struct request_queue *q, + char *page) +{ + if (test_bit(QUEUE_FLAG_DISPATCH_ASYNC, &q->queue_flags)) + return sprintf(page, "1\n"); + else + return sprintf(page, "0\n"); +} + +static struct queue_sysfs_entry queue_dispatch_async_entry = { + .attr = {.name = "dispatch_async", .mode = 0444 }, + .show = queue_show_dispatch_async, +}; +#endif + #ifdef CONFIG_BLK_DEV_THROTTLING_LOW static struct queue_sysfs_entry throtl_sample_time_entry = { .attr = {.name = "throttle_sample_time", .mode = 0644 }, @@ -737,6 +778,10 @@ static struct attribute *default_attrs[] = { &queue_dax_entry.attr, &queue_wb_lat_entry.attr, &queue_poll_delay_entry.attr, +#ifdef CONFIG_BLK_BIO_DISPATCH_ASYNC + &queue_dispatch_async_cpus_entry.attr, + &queue_dispatch_async_entry.attr, +#endif #ifdef CONFIG_BLK_DEV_THROTTLING_LOW &throtl_sample_time_entry.attr, #endif @@ -791,7 +836,7 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head) { struct request_queue *q = container_of(rcu_head, struct request_queue, rcu_head); - kmem_cache_free(blk_requestq_cachep, q); + kmem_cache_free(blk_requestq_cachep, queue_to_wrapper(q)); } /** @@ -818,6 +863,7 @@ static void __blk_release_queue(struct work_struct *work) if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) blk_stat_remove_callback(q, q->poll_cb); blk_stat_free_callback(q->poll_cb); + blk_free_queue_dispatch_async(q); if (!blk_queue_dead(q)) { /* @@ -849,10 +895,9 @@ static void __blk_release_queue(struct work_struct *work) blk_mq_release(q); } + mutex_lock(&q->debugfs_mutex); blk_trace_shutdown(q); - - if (q->mq_ops) - blk_mq_debugfs_unregister(q); + mutex_unlock(&q->debugfs_mutex); bioset_exit(&q->bio_split); @@ -880,6 +925,19 @@ struct kobj_type blk_queue_ktype = { .release = blk_release_queue, }; +static void blk_mq_register_default_hierarchy(struct request_queue *q) +{ + if (!q->mq_ops) + return; + + blk_mq_register_hierarchy(q, STAGE_GETTAG); + blk_mq_register_hierarchy(q, STAGE_PLUG); + blk_mq_register_hierarchy(q, STAGE_HCTX); + blk_mq_register_hierarchy(q, STAGE_REQUEUE); + blk_mq_register_hierarchy(q, STAGE_RQ_DRIVER); + blk_mq_register_hierarchy(q, STAGE_BIO); +} + /** * blk_register_queue - register a block layer queue with sysfs * @disk: Disk of which the request queue should be registered with sysfs. @@ -889,6 +947,7 @@ int blk_register_queue(struct gendisk *disk) int ret; struct device *dev = disk_to_dev(disk); struct request_queue *q = disk->queue; + bool has_elevator = false; if (WARN_ON(!q)) return -ENXIO; @@ -896,29 +955,12 @@ int blk_register_queue(struct gendisk *disk) WARN_ONCE(test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags), "%s is registering an already registered queue\n", kobject_name(&dev->kobj)); - queue_flag_set_unlocked(QUEUE_FLAG_REGISTERED, q); - - /* - * SCSI probing may synchronously create and destroy a lot of - * request_queues for non-existent devices. Shutting down a fully - * functional queue takes measureable wallclock time as RCU grace - * periods are involved. To avoid excessive latency in these - * cases, a request_queue starts out in a degraded mode which is - * faster to shut down and is made fully functional here as - * request_queues for non-existent devices never get registered. - */ - if (!blk_queue_init_done(q)) { - queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q); - percpu_ref_switch_to_percpu(&q->q_usage_counter); - blk_queue_bypass_end(q); - } ret = blk_trace_init_sysfs(dev); if (ret) return ret; - /* Prevent changes through sysfs until registration is completed. */ - mutex_lock(&q->sysfs_lock); + mutex_lock(&queue_to_wrapper(q)->sysfs_dir_lock); ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); if (ret < 0) { @@ -926,31 +968,62 @@ int blk_register_queue(struct gendisk *disk) goto unlock; } - if (q->mq_ops) { + if (q->mq_ops) __blk_mq_register_dev(dev, q); - blk_mq_debugfs_register(q); - } - - kobject_uevent(&q->kobj, KOBJ_ADD); - - wbt_enable_default(q); + mutex_lock(&q->sysfs_lock); - blk_throtl_register_queue(q); + mutex_lock(&q->debugfs_mutex); + q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent), + blk_debugfs_root); + if (q->mq_ops) + blk_mq_debugfs_register(q); + mutex_unlock(&q->debugfs_mutex); if (q->request_fn || (q->mq_ops && q->elevator)) { - ret = elv_register_queue(q); + ret = elv_register_queue(q, false); if (ret) { mutex_unlock(&q->sysfs_lock); - kobject_uevent(&q->kobj, KOBJ_REMOVE); + mutex_unlock(&queue_to_wrapper(q)->sysfs_dir_lock); kobject_del(&q->kobj); blk_trace_remove_sysfs(dev); kobject_put(&dev->kobj); return ret; } + has_elevator = true; } + + blk_mq_register_default_hierarchy(q); + + blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q); + wbt_enable_default(q); + blk_throtl_register_queue(q); + blk_queue_flag_set(QUEUE_FLAG_REGISTER_DONE, q); + + /* Now everything is ready and send out KOBJ_ADD uevent */ + kobject_uevent(&q->kobj, KOBJ_ADD); + if (has_elevator) + kobject_uevent(&q->elevator->kobj, KOBJ_ADD); + mutex_unlock(&q->sysfs_lock); + ret = 0; unlock: - mutex_unlock(&q->sysfs_lock); + mutex_unlock(&queue_to_wrapper(q)->sysfs_dir_lock); + + /* + * SCSI probing may synchronously create and destroy a lot of + * request_queues for non-existent devices. Shutting down a fully + * functional queue takes measureable wallclock time as RCU grace + * periods are involved. To avoid excessive latency in these + * cases, a request_queue starts out in a degraded mode which is + * faster to shut down and is made fully functional here as + * request_queues for non-existent devices never get registered. + */ + if (!blk_queue_init_done(q)) { + queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q); + percpu_ref_switch_to_percpu(&q->q_usage_counter); + blk_queue_bypass_end(q); + } + return ret; } EXPORT_SYMBOL_GPL(blk_register_queue); @@ -973,6 +1046,7 @@ void blk_unregister_queue(struct gendisk *disk) if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags)) return; + blk_queue_flag_clear(QUEUE_FLAG_REGISTER_DONE, q); /* * Since sysfs_remove_dir() prevents adding new directory entries * before removal of existing entries starts, protect against @@ -981,25 +1055,34 @@ void blk_unregister_queue(struct gendisk *disk) mutex_lock(&q->sysfs_lock); blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q); + mutex_unlock(&q->sysfs_lock); + mutex_lock(&queue_to_wrapper(q)->sysfs_dir_lock); /* * Remove the sysfs attributes before unregistering the queue data * structures that can be modified through sysfs. */ if (q->mq_ops) blk_mq_unregister_dev(disk_to_dev(disk), q); - mutex_unlock(&q->sysfs_lock); - kobject_uevent(&q->kobj, KOBJ_REMOVE); - kobject_del(&q->kobj); blk_trace_remove_sysfs(disk_to_dev(disk)); - rq_qos_exit(q); - mutex_lock(&q->sysfs_lock); if (q->request_fn || (q->mq_ops && q->elevator)) elv_unregister_queue(q); mutex_unlock(&q->sysfs_lock); + mutex_unlock(&queue_to_wrapper(q)->sysfs_dir_lock); + + mutex_lock(&q->debugfs_mutex); + blk_trace_shutdown(q); + debugfs_remove_recursive(q->debugfs_dir); + q->debugfs_dir = NULL; + q->sched_debugfs_dir = NULL; + mutex_unlock(&q->debugfs_mutex); + + /* Now that we've deleted all child objects, we can delete the queue. */ + kobject_uevent(&q->kobj, KOBJ_REMOVE); + kobject_del(&q->kobj); kobject_put(&disk_to_dev(disk)->kobj); } diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 01d0620a4e4a5e829c9de5c2dec0ac5e0b2b3ab3..a1867a2f4f181c164a163b75049f402fef6ae0ad 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -11,7 +11,10 @@ #include #include #include +#include +#include #include "blk.h" +#include "blk-io-hierarchy/stats.h" /* Max dispatch from a group in 1 round */ static int throtl_grp_quantum = 8; @@ -41,6 +44,19 @@ static struct blkcg_policy blkcg_policy_throtl; /* A workqueue to queue throttle related work */ static struct workqueue_struct *kthrotld_workqueue; +/* True if global limit is enabled in cgroup v1 */ +static bool global_limit; + +static int __init setup_global_limit(char *str) +{ + if (!strcmp(str, "1") || !strcmp(str, "Y") || !strcmp(str, "y")) + global_limit = true; + + return 1; +} + +__setup("blkcg_global_limit=", setup_global_limit); + /* * To implement hierarchical throttling, throtl_grps form a tree and bios * are dispatched upwards level by level until they reach the top and get @@ -535,7 +551,8 @@ static void throtl_pd_init(struct blkg_policy_data *pd) * regardless of the position of the group in the hierarchy. */ sq->parent_sq = &td->service_queue; - if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent) + if ((cgroup_subsys_on_dfl(io_cgrp_subsys) || global_limit) && + blkg->parent) sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue; tg->td = td; } @@ -568,6 +585,7 @@ static void throtl_pd_online(struct blkg_policy_data *pd) tg_update_has_rules(tg); } +#ifdef CONFIG_BLK_DEV_THROTTLING_LOW static void blk_throtl_update_limit_valid(struct throtl_data *td) { struct cgroup_subsys_state *pos_css; @@ -588,6 +606,11 @@ static void blk_throtl_update_limit_valid(struct throtl_data *td) td->limit_valid[LIMIT_LOW] = low_valid; } +#else +static inline void blk_throtl_update_limit_valid(struct throtl_data *td) +{ +} +#endif static void throtl_upgrade_state(struct throtl_data *td); static void throtl_pd_offline(struct blkg_policy_data *pd) @@ -892,13 +915,10 @@ static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio, unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd; u64 tmp; - jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; - - /* Slice has just started. Consider one slice interval */ - if (!jiffy_elapsed) - jiffy_elapsed_rnd = tg->td->throtl_slice; + jiffy_elapsed = jiffies - tg->slice_start[rw]; - jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice); + /* Round up to the next throttle slice, wait time must be nonzero */ + jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice); /* * jiffy_elapsed_rnd should not be a big value as minimum iops can be @@ -1331,6 +1351,8 @@ static void blk_throtl_dispatch_work_fn(struct work_struct *work) bio_list_add(&bio_list_on_stack, bio); spin_unlock_irq(q->queue_lock); + bio_list_hierarchy_end_io_acct(&bio_list_on_stack, STAGE_THROTTLE); + if (!bio_list_empty(&bio_list_on_stack)) { blk_start_plug(&plug); while((bio = bio_list_pop(&bio_list_on_stack))) @@ -1375,7 +1397,57 @@ static int tg_print_conf_uint(struct seq_file *sf, void *v) return 0; } -static void tg_conf_updated(struct throtl_grp *tg, bool global) +static u64 throtl_update_bytes_disp(u64 dispatched, u64 new_limit, + u64 old_limit) +{ + if (new_limit == old_limit) + return dispatched; + + if (!dispatched) + return 0; + + /* + * In the case that multiply will overflow, just return 0. It will only + * let bios to be dispatched earlier. + */ + if (div64_u64(U64_MAX, dispatched) < new_limit) + return 0; + + dispatched *= new_limit; + return div64_u64(dispatched, old_limit); +} + +static u32 throtl_update_io_disp(u32 dispatched, u32 new_limit, u32 old_limit) +{ + if (new_limit == old_limit) + return dispatched; + + if (!dispatched) + return 0; + /* + * In the case that multiply will overflow, just return 0. It will only + * let bios to be dispatched earlier. + */ + if (UINT_MAX / dispatched < new_limit) + return 0; + + dispatched *= new_limit; + return dispatched / old_limit; +} + +static void throtl_update_slice(struct throtl_grp *tg, u64 *old_limits) +{ + tg->bytes_disp[READ] = throtl_update_bytes_disp(tg->bytes_disp[READ], + tg_bps_limit(tg, READ), old_limits[0]); + tg->bytes_disp[WRITE] = throtl_update_bytes_disp(tg->bytes_disp[WRITE], + tg_bps_limit(tg, WRITE), old_limits[1]); + tg->io_disp[READ] = throtl_update_io_disp(tg->io_disp[READ], + tg_iops_limit(tg, READ), (u32)old_limits[2]); + tg->io_disp[WRITE] = throtl_update_io_disp(tg->io_disp[WRITE], + tg_iops_limit(tg, WRITE), (u32)old_limits[3]); +} + +static void tg_conf_updated(struct throtl_grp *tg, u64 *old_limits, bool global) { struct throtl_service_queue *sq = &tg->service_queue; struct cgroup_subsys_state *pos_css; @@ -1414,16 +1486,7 @@ static void tg_conf_updated(struct throtl_grp *tg, bool global) parent_tg->latency_target); } - /* - * We're already holding queue_lock and know @tg is valid. Let's - * apply the new config directly. - * - * Restart the slices for both READ and WRITES. It might happen - * that a group's limit are dropped suddenly and we don't want to - * account recently dispatched IO with new low rate. - */ - throtl_start_new_slice(tg, 0); - throtl_start_new_slice(tg, 1); + throtl_update_slice(tg, old_limits); if (tg->flags & THROTL_TG_PENDING) { tg_update_disptime(tg); @@ -1431,6 +1494,39 @@ static void tg_conf_updated(struct throtl_grp *tg, bool global) } } +static inline int throtl_check_init_done(struct request_queue *q) +{ + if (test_bit(QUEUE_FLAG_REGISTER_DONE, &q->queue_flags)) + return 0; + + return blk_queue_dying(q) ? -ENODEV : -EBUSY; +} + +/* + * If throtl_check_init_done() return -EBUSY, we should retry after a short + * msleep(), since that throttle init will be completed in blk_register_queue() + * soon. + */ +static inline int throtl_restart_syscall_when_busy(int errno) +{ + int ret = errno; + + if (ret == -EBUSY) { + msleep(10); + ret = restart_syscall(); + } + + return ret; +} + +static void tg_get_limits(struct throtl_grp *tg, u64 *limits) +{ + limits[0] = tg_bps_limit(tg, READ); + limits[1] = tg_bps_limit(tg, WRITE); + limits[2] = tg_iops_limit(tg, READ); + limits[3] = tg_iops_limit(tg, WRITE); +} + static ssize_t tg_set_conf(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off, bool is_u64) { @@ -1439,11 +1535,16 @@ static ssize_t tg_set_conf(struct kernfs_open_file *of, struct throtl_grp *tg; int ret; u64 v; + u64 old_limits[4]; ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx); if (ret) return ret; + ret = throtl_check_init_done(ctx.disk->queue); + if (ret) + goto out_finish; + ret = -EINVAL; if (sscanf(ctx.body, "%llu", &v) != 1) goto out_finish; @@ -1451,16 +1552,19 @@ static ssize_t tg_set_conf(struct kernfs_open_file *of, v = U64_MAX; tg = blkg_to_tg(ctx.blkg); + tg_get_limits(tg, old_limits); if (is_u64) *(u64 *)((void *)tg + of_cft(of)->private) = v; else *(unsigned int *)((void *)tg + of_cft(of)->private) = v; - tg_conf_updated(tg, false); + tg_conf_updated(tg, old_limits, false); ret = 0; out_finish: blkg_conf_finish(&ctx); + ret = throtl_restart_syscall_when_busy(ret); + return ret ?: nbytes; } @@ -1601,6 +1705,7 @@ static ssize_t tg_set_limit(struct kernfs_open_file *of, struct blkg_conf_ctx ctx; struct throtl_grp *tg; u64 v[4]; + u64 old_limits[4]; unsigned long idle_time; unsigned long latency_time; int ret; @@ -1610,12 +1715,16 @@ static ssize_t tg_set_limit(struct kernfs_open_file *of, if (ret) return ret; - tg = blkg_to_tg(ctx.blkg); + ret = throtl_check_init_done(ctx.disk->queue); + if (ret) + goto out_finish; + tg = blkg_to_tg(ctx.blkg); v[0] = tg->bps_conf[READ][index]; v[1] = tg->bps_conf[WRITE][index]; v[2] = tg->iops_conf[READ][index]; v[3] = tg->iops_conf[WRITE][index]; + tg_get_limits(tg, old_limits); idle_time = tg->idletime_threshold_conf; latency_time = tg->latency_target_conf; @@ -1702,11 +1811,13 @@ static ssize_t tg_set_limit(struct kernfs_open_file *of, tg->td->limit_index = LIMIT_LOW; } else tg->td->limit_index = LIMIT_MAX; - tg_conf_updated(tg, index == LIMIT_LOW && + tg_conf_updated(tg, old_limits, index == LIMIT_LOW && tg->td->limit_valid[LIMIT_LOW]); ret = 0; out_finish: blkg_conf_finish(&ctx); + ret = throtl_restart_syscall_when_busy(ret); + return ret ?: nbytes; } @@ -1802,7 +1913,7 @@ static bool throtl_tg_is_idle(struct throtl_grp *tg) time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold); ret = tg->latency_target == DFL_LATENCY_TARGET || tg->idletime_threshold == DFL_IDLE_THRESHOLD || - (ktime_get_ns() >> 10) - tg->last_finish_time > time || + (blk_time_get_ns() >> 10) - tg->last_finish_time > time || tg->avg_idletime > tg->idletime_threshold || (tg->latency_target && tg->bio_cnt && tg->bad_bio_cnt * 5 < tg->bio_cnt); @@ -2032,7 +2143,7 @@ static void throtl_downgrade_check(struct throtl_grp *tg) static void blk_throtl_update_idletime(struct throtl_grp *tg) { - unsigned long now = ktime_get_ns() >> 10; + unsigned long now = blk_time_get_ns() >> 10; unsigned long last_finish_time = tg->last_finish_time; if (now <= last_finish_time || last_finish_time == 0 || @@ -2144,20 +2255,23 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, struct throtl_service_queue *sq; bool rw = bio_data_dir(bio); bool throttled = false; + bool locked = true; struct throtl_data *td = tg->td; WARN_ON_ONCE(!rcu_read_lock_held()); /* see throtl_charge_bio() */ - if (bio_flagged(bio, BIO_THROTTLED) || !tg->has_rules[rw]) + if (bio_flagged(bio, BIO_THROTTLED) || !tg->has_rules[rw]) { + locked = false; goto out; + } spin_lock_irq(q->queue_lock); throtl_update_latency_buckets(td); if (unlikely(blk_queue_bypass(q))) - goto out_unlock; + goto out; blk_throtl_assoc_bio(tg, bio); blk_throtl_update_idletime(tg); @@ -2209,7 +2323,7 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, sq = sq->parent_sq; tg = sq_to_tg(sq); if (!tg) - goto out_unlock; + goto out; } /* out-of-limit, queue to @tg */ @@ -2222,6 +2336,20 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, tg->last_low_overflow_time[rw] = jiffies; + /* + * This is slow path now, bio_hierarchy_start_io_acct() might spend + * some time to allocate memory. However, it's safe because 'tg' is + * pinned by this bio, and io charge should still be accurate because + * slice is already started from tg_may_dispatch(). + */ + spin_unlock_irq(q->queue_lock); + rcu_read_unlock(); + + bio_hierarchy_start_io_acct(bio, STAGE_THROTTLE); + + rcu_read_lock(); + spin_lock_irq(q->queue_lock); + td->nr_queued[rw]++; throtl_add_bio_tg(bio, qn, tg); throttled = true; @@ -2237,8 +2365,6 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true); } -out_unlock: - spin_unlock_irq(q->queue_lock); out: bio_set_flag(bio, BIO_THROTTLED); @@ -2246,6 +2372,8 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, if (throttled || !td->track_bio_latency) bio->bi_issue.value |= BIO_ISSUE_THROTL_SKIP_LATENCY; #endif + if (locked) + spin_unlock_irq(q->queue_lock); return throttled; } @@ -2292,7 +2420,7 @@ void blk_throtl_bio_endio(struct bio *bio) return; tg = blkg_to_tg(blkg); - finish_time_ns = ktime_get_ns(); + finish_time_ns = blk_time_get_ns(); tg->last_finish_time = finish_time_ns >> 10; start_time = bio_issue_time(&bio->bi_issue) >> 10; @@ -2365,8 +2493,11 @@ void blk_throtl_drain(struct request_queue *q) struct blkcg_gq *blkg; struct cgroup_subsys_state *pos_css; struct bio *bio; + struct bio_list bio_list_on_stack; int rw; + bio_list_init(&bio_list_on_stack); + queue_lockdep_assert_held(q); rcu_read_lock(); @@ -2383,12 +2514,18 @@ void blk_throtl_drain(struct request_queue *q) tg_drain_bios(&td->service_queue); rcu_read_unlock(); - spin_unlock_irq(q->queue_lock); /* all bios now should be in td->service_queue, issue them */ for (rw = READ; rw <= WRITE; rw++) while ((bio = throtl_pop_queued(&td->service_queue.queued[rw], NULL))) + bio_list_add(&bio_list_on_stack, bio); + spin_unlock_irq(q->queue_lock); + + bio_list_hierarchy_end_io_acct(&bio_list_on_stack, STAGE_THROTTLE); + + if (!bio_list_empty(&bio_list_on_stack)) + while ((bio = bio_list_pop(&bio_list_on_stack))) generic_make_request(bio); spin_lock_irq(q->queue_lock); @@ -2440,8 +2577,11 @@ int blk_throtl_init(struct request_queue *q) void blk_throtl_exit(struct request_queue *q) { BUG_ON(!q->td); + del_timer_sync(&q->td->service_queue.pending_timer); throtl_shutdown_wq(q); blkcg_deactivate_policy(q, &blkcg_policy_throtl); + blk_mq_unregister_hierarchy(q, STAGE_THROTTLE); + free_percpu(q->td->latency_buckets[READ]); free_percpu(q->td->latency_buckets[WRITE]); kfree(q->td); @@ -2474,6 +2614,8 @@ void blk_throtl_register_queue(struct request_queue *q) td->track_bio_latency = !queue_is_rq_based(q); if (!td->track_bio_latency) blk_stat_enable_accounting(q); + + blk_mq_register_hierarchy(q, STAGE_THROTTLE); } #ifdef CONFIG_BLK_DEV_THROTTLING_LOW diff --git a/block/blk-wbt.c b/block/blk-wbt.c index 8ac93fcbaa2eaaf680cebac8f3d8da8f9a25805d..cf098d2a7262c094345d386f7da8cda87900de0b 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -23,9 +23,16 @@ #include #include #include +#ifndef __GENKSYMS__ +#include +#include "blk.h" +#endif #include "blk-wbt.h" #include "blk-rq-qos.h" +#ifndef __GENKSYMS__ +#include "blk-io-hierarchy/stats.h" +#endif #define CREATE_TRACE_POINTS #include @@ -76,7 +83,8 @@ enum { static inline bool rwb_enabled(struct rq_wb *rwb) { - return rwb && rwb->wb_normal != 0; + return rwb && rwb->enable_state != WBT_STATE_OFF_DEFAULT && + rwb->wb_normal != 0; } static void wb_timestamp(struct rq_wb *rwb, unsigned long *var) @@ -219,7 +227,7 @@ static u64 rwb_sync_issue_lat(struct rq_wb *rwb) if (!issue || !rwb->sync_cookie) return 0; - now = ktime_to_ns(ktime_get()); + now = blk_time_get_ns(); return now - issue; } @@ -307,7 +315,8 @@ static void calc_wb_limits(struct rq_wb *rwb) static void scale_up(struct rq_wb *rwb) { - rq_depth_scale_up(&rwb->rq_depth); + if (!rq_depth_scale_up(&rwb->rq_depth)) + return; calc_wb_limits(rwb); rwb->unknown_cnt = 0; rwb_wake_all(rwb); @@ -316,7 +325,8 @@ static void scale_up(struct rq_wb *rwb) static void scale_down(struct rq_wb *rwb, bool hard_throttle) { - rq_depth_scale_down(&rwb->rq_depth, hard_throttle); + if (!rq_depth_scale_down(&rwb->rq_depth, hard_throttle)) + return; calc_wb_limits(rwb); rwb->unknown_cnt = 0; rwb_trace_step(rwb, "scale down"); @@ -436,8 +446,13 @@ void wbt_set_min_lat(struct request_queue *q, u64 val) struct rq_qos *rqos = wbt_rq_qos(q); if (!rqos) return; + RQWB(rqos)->min_lat_nsec = val; - RQWB(rqos)->enable_state = WBT_STATE_ON_MANUAL; + if (val) + RQWB(rqos)->enable_state = WBT_STATE_ON_MANUAL; + else + RQWB(rqos)->enable_state = WBT_STATE_OFF_MANUAL; + __wbt_update_limits(RQWB(rqos)); } @@ -511,6 +526,7 @@ static int wbt_wake_function(struct wait_queue_entry *curr, unsigned int mode, return -1; data->got_token = true; + smp_wmb(); list_del_init(&curr->entry); wake_up_process(data->task); return 1; @@ -521,11 +537,12 @@ static int wbt_wake_function(struct wait_queue_entry *curr, unsigned int mode, * the timer to kick off queuing again. */ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct, - unsigned long rw, spinlock_t *lock) + struct bio *bio, spinlock_t *lock) __releases(lock) __acquires(lock) { struct rq_wait *rqw = get_rq_wait(rwb, wb_acct); + unsigned long rw = bio->bi_opf; struct wbt_wait_data data = { .wq = { .func = wbt_wake_function, @@ -542,8 +559,11 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct, if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw))) return; - prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE); + bio_hierarchy_start_io_acct(bio, STAGE_WBT); + has_sleeper = !__prepare_to_wait_exclusive(&rqw->wait, &data.wq, + TASK_UNINTERRUPTIBLE); do { + /* The memory barrier in set_task_state saves us here. */ if (data.got_token) break; @@ -556,6 +576,7 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct, * which means we now have two. Put our local token * and wake anyone else potentially waiting for one. */ + smp_rmb(); if (data.got_token) wbt_rqw_done(rwb, rqw, wb_acct); break; @@ -568,10 +589,12 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct, } else io_schedule(); - has_sleeper = false; + has_sleeper = true; + set_current_state(TASK_UNINTERRUPTIBLE); } while (1); finish_wait(&rqw->wait, &data.wq); + bio_hierarchy_end_io_acct(bio, STAGE_WBT); } static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio) @@ -636,7 +659,7 @@ static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock) return; } - __wbt_wait(rwb, flags, bio->bi_opf, lock); + __wbt_wait(rwb, flags, bio, lock); if (!blk_stat_is_active(rwb->cb)) rwb_arm_timer(rwb); @@ -700,17 +723,26 @@ void wbt_set_write_cache(struct request_queue *q, bool write_cache_on) */ void wbt_enable_default(struct request_queue *q) { - struct rq_qos *rqos = wbt_rq_qos(q); + struct rq_qos *rqos; + bool disable_flag = q->elevator && + !strcmp(q->elevator->type->elevator_name, "cfq"); + /* Throttling already enabled? */ - if (rqos) + rqos = wbt_rq_qos(q); + if (rqos) { + if (!disable_flag && + RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT) + RQWB(rqos)->enable_state = WBT_STATE_ON_DEFAULT; return; + } /* Queue not registered? Maybe shutting down... */ if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags)) return; - if ((q->mq_ops && IS_ENABLED(CONFIG_BLK_WBT_MQ)) || - (q->request_fn && IS_ENABLED(CONFIG_BLK_WBT_SQ))) + if (((q->mq_ops && IS_ENABLED(CONFIG_BLK_WBT_MQ)) || + (q->request_fn && IS_ENABLED(CONFIG_BLK_WBT_SQ))) && + !disable_flag) wbt_init(q); } EXPORT_SYMBOL_GPL(wbt_enable_default); @@ -745,6 +777,7 @@ static void wbt_exit(struct rq_qos *rqos) struct rq_wb *rwb = RQWB(rqos); struct request_queue *q = rqos->q; + blk_mq_unregister_hierarchy(q, STAGE_WBT); blk_stat_remove_callback(q, rwb->cb); blk_stat_free_callback(rwb->cb); kfree(rwb); @@ -760,8 +793,10 @@ void wbt_disable_default(struct request_queue *q) if (!rqos) return; rwb = RQWB(rqos); - if (rwb->enable_state == WBT_STATE_ON_DEFAULT) - rwb->wb_normal = 0; + if (rwb->enable_state == WBT_STATE_ON_DEFAULT) { + blk_stat_deactivate(rwb->cb); + rwb->enable_state = WBT_STATE_OFF_DEFAULT; + } } EXPORT_SYMBOL_GPL(wbt_disable_default); @@ -810,10 +845,18 @@ int wbt_init(struct request_queue *q) rq_qos_add(q, &rwb->rqos); blk_stat_add_callback(q, rwb->cb); - rwb->min_lat_nsec = wbt_default_latency_nsec(q); + /* + * Ensure that the queue is idled by freezing the queue + * while enabling wbt, there is no inflight rq running. + */ + blk_mq_freeze_queue(q); + rwb->min_lat_nsec = wbt_default_latency_nsec(q); wbt_set_queue_depth(q, blk_queue_depth(q)); + + blk_mq_unfreeze_queue(q); wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags)); + blk_mq_register_hierarchy(q, STAGE_WBT); return 0; } diff --git a/block/blk-wbt.h b/block/blk-wbt.h index f47218d5b3b2081c81f92c40d3f4fc247f3f9242..58b36e019f9ebb445b04774aa05b36b97974eca5 100644 --- a/block/blk-wbt.h +++ b/block/blk-wbt.h @@ -28,12 +28,15 @@ enum { }; /* - * Enable states. Either off, or on by default (done at init time), - * or on through manual setup in sysfs. + * If current state is WBT_STATE_ON/OFF_DEFAULT, it can be covered to any other + * state, if current state is WBT_STATE_ON/OFF_MANUAL, it can only be covered + * to WBT_STATE_OFF/ON_MANUAL. */ enum { - WBT_STATE_ON_DEFAULT = 1, - WBT_STATE_ON_MANUAL = 2, + WBT_STATE_ON_DEFAULT = 1, /* on by default */ + WBT_STATE_ON_MANUAL = 2, /* on manually by sysfs */ + WBT_STATE_OFF_DEFAULT = 3, /* off by default */ + WBT_STATE_OFF_MANUAL = 4, /* off manually by sysfs */ }; struct rq_wb { diff --git a/block/blk.h b/block/blk.h index 9db4e389582c8da7848d458a9d4f12d64a1aecae..30146e2099e3ef319ca626e28127cd0d1af80a69 100644 --- a/block/blk.h +++ b/block/blk.h @@ -15,14 +15,13 @@ /* Max future timer expiry for timeouts */ #define BLK_MAX_TIMEOUT (5 * HZ) -#ifdef CONFIG_DEBUG_FS extern struct dentry *blk_debugfs_root; -#endif struct blk_flush_queue { unsigned int flush_queue_delayed:1; unsigned int flush_pending_idx:1; unsigned int flush_running_idx:1; + blk_status_t rq_status; unsigned long flush_pending_since; struct list_head flush_queue[2]; struct list_head flush_data_in_flight; @@ -36,6 +35,32 @@ struct blk_flush_queue { spinlock_t mq_flush_lock; }; +/* + * The wrapper of request_queue to fix kabi while adding members. + */ +struct request_queue_wrapper { + struct request_queue q; + /* + * Protect concurrent access to q_usage_counter by + * percpu_ref_kill() and percpu_ref_reinit(). + */ + struct mutex mq_freeze_lock; + int mq_freeze_depth; + +#ifdef CONFIG_BLK_BIO_DISPATCH_ASYNC + /* used when QUEUE_FLAG_DISPATCH_ASYNC is set */ + struct cpumask dispatch_async_cpus; + int __percpu *last_dispatch_cpu; +#endif + struct mutex sysfs_dir_lock; +#ifdef CONFIG_BLK_IO_HIERARCHY_STATS + struct blk_io_hierarchy_stats *io_hierarchy_stats; +#endif +}; + +#define queue_to_wrapper(__q) \ + container_of((__q), struct request_queue_wrapper, q) + extern struct kmem_cache *blk_requestq_cachep; extern struct kmem_cache *request_cachep; extern struct kobj_type blk_queue_ktype; @@ -123,8 +148,68 @@ static inline void __blk_get_queue(struct request_queue *q) kobject_get(&q->kobj); } +#ifdef CONFIG_BLK_BIO_ALLOC_TIME +static inline u64 blk_time_get_ns(void); +static inline void blk_rq_init_bi_alloc_time(struct request *rq, + struct request *first_rq) +{ + if (!rq->q->mq_ops) + return; + + request_to_wrapper(rq)->bi_alloc_time_ns = + first_rq ? request_to_wrapper(first_rq)->bi_alloc_time_ns : + blk_time_get_ns(); +} + +/* + * Used in following cases to updated request bi_alloc_time_ns: + * + * 1) Allocate a new @rq for @bio; + * 2) @bio is merged to @rq, in this case @merged_rq should be NULL; + * 3) @merged_rq is merged to @rq, in this case @bio should be NULL; + */ +static inline void blk_rq_update_bi_alloc_time(struct request *rq, + struct bio *bio, + struct request *merged_rq) +{ + struct request_wrapper *rq_wrapper; + struct request_wrapper *merged_rq_wrapper; + + if (!rq->q->mq_ops) + return; + + rq_wrapper = request_to_wrapper(rq); + if (bio) { + if (rq_wrapper->bi_alloc_time_ns > bio->bi_alloc_time_ns) + rq_wrapper->bi_alloc_time_ns = bio->bi_alloc_time_ns; + return; + } + + if (WARN_ON_ONCE(!merged_rq)) + return; + + merged_rq_wrapper = request_to_wrapper(merged_rq); + if (rq_wrapper->bi_alloc_time_ns > merged_rq_wrapper->bi_alloc_time_ns) + rq_wrapper->bi_alloc_time_ns = + merged_rq_wrapper->bi_alloc_time_ns; +} +#else /* CONFIG_BLK_BIO_ALLOC_TIME */ +static inline void blk_rq_init_bi_alloc_time(struct request *rq, + struct request *first_rq) +{ +} + +static inline void blk_rq_update_bi_alloc_time(struct request *rq, + struct bio *bio, + struct request *merged_rq) +{ +} +#endif /* CONFIG_BLK_BIO_ALLOC_TIME */ + +bool is_flush_rq(struct request *req); + struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q, - int node, int cmd_size); + int node, int cmd_size, gfp_t flags); void blk_free_flush_queue(struct blk_flush_queue *q); int blk_init_rl(struct request_list *rl, struct request_queue *q, @@ -188,6 +273,10 @@ unsigned int blk_plug_queued_count(struct request_queue *q); void blk_account_io_start(struct request *req, bool new_io); void blk_account_io_completion(struct request *req, unsigned int bytes); void blk_account_io_done(struct request *req, u64 now); +int bd_prepare_to_claim(struct block_device *bdev, + struct block_device *whole, void *holder); +void bd_abort_claiming(struct block_device *bdev, struct block_device *whole, + void *holder); /* * EH timer and IO completion will both attempt to 'grab' the request, make @@ -237,8 +326,9 @@ int elevator_init_mq(struct request_queue *q); int elevator_switch_mq(struct request_queue *q, struct elevator_type *new_e); void elevator_exit(struct request_queue *, struct elevator_queue *); -int elv_register_queue(struct request_queue *q); +int elv_register_queue(struct request_queue *q, bool uevent); void elv_unregister_queue(struct request_queue *q); +void __blk_rq_init(struct request_queue *q, struct request *rq); struct hd_struct *__disk_get_part(struct gendisk *disk, int partno); @@ -260,8 +350,8 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req, struct bio *bio); struct request *attempt_back_merge(struct request_queue *q, struct request *rq); struct request *attempt_front_merge(struct request_queue *q, struct request *rq); -int blk_attempt_req_merge(struct request_queue *q, struct request *rq, - struct request *next); +bool blk_attempt_req_merge(struct request_queue *q, struct request *rq, + struct request *next); void blk_recalc_rq_segments(struct request *rq); void blk_rq_set_mixed_merge(struct request *rq); bool blk_rq_merge_ok(struct request *rq, struct bio *bio); @@ -328,6 +418,16 @@ static inline unsigned long blk_rq_deadline(struct request *rq) return rq->__deadline & ~0x1UL; } +/* + * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size + * is defined as 'unsigned int', meantime it has to aligned to with logical + * block size which is the minimum accepted unit by hardware. + */ +static inline unsigned int bio_allowed_max_sectors(struct request_queue *q) +{ + return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9; +} + /* * Internal io_context interface */ @@ -375,6 +475,15 @@ static inline struct io_context *create_io_context(gfp_t gfp_mask, int node) return current->io_context; } +/* + * Only need start/end time stamping if we have stats enabled, or using + * an IO scheduler. + */ +static inline bool blk_mq_need_time_stamp(struct request *rq) +{ + return (rq->rq_flags & RQF_IO_STAT) || rq->q->elevator; +} + /* * Internal throttling interface */ @@ -421,4 +530,36 @@ extern int blk_iolatency_init(struct request_queue *q); static inline int blk_iolatency_init(struct request_queue *q) { return 0; } #endif +#ifdef CONFIG_BLK_BIO_DISPATCH_ASYNC +extern void blk_free_queue_dispatch_async(struct request_queue *q); +#else +static inline void blk_free_queue_dispatch_async(struct request_queue *q) +{ +} +#endif + +static inline u64 blk_time_get_ns(void) +{ + struct task_struct *tsk = current; + struct blk_plug *plug = tsk->plug; + + if (!plug || !in_task()) + return ktime_get_ns(); + + /* + * 0 could very well be a valid time, but rather than flag "this is + * a valid timestamp" separately, just accept that we'll do an extra + * ktime_get_ns() if we just happen to get 0 as the current time. + */ + if (!tsk->_resvd->cur_ktime) { + tsk->_resvd->cur_ktime = ktime_get_ns(); + tsk->flags |= PF_BLOCK_TS; + } + return tsk->_resvd->cur_ktime; +} + +static inline ktime_t blk_time_get(void) +{ + return ns_to_ktime(blk_time_get_ns()); +} #endif /* BLK_INTERNAL_H */ diff --git a/block/blk_extra_api.h b/block/blk_extra_api.h new file mode 100644 index 0000000000000000000000000000000000000000..704d2a61bf12f61d87ed4ffe265b5ccd6c1f1d9b --- /dev/null +++ b/block/blk_extra_api.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2023. Huawei Technologies Co., Ltd. All rights reserved. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef BLK_EXTRA_API_H +#define BLK_EXTRA_API_H + +/* + * Include blk.h will cause kabi broken in some contexts because it will expose + * definitions for some data structure. This file is used for the apis that + * can't be placed in blk.h. + */ + +#include + +int disk_scan_partitions(struct gendisk *disk, fmode_t mode); + +#endif /* BLK_EXTRA_API_H */ diff --git a/block/bounce.c b/block/bounce.c index bc63b3a2d18cad59b0061c4fc1d4ff7b042ac5f4..abb50e7e5fab197d351dc68cd88e3704663a3396 100644 --- a/block/bounce.c +++ b/block/bounce.c @@ -31,6 +31,24 @@ static struct bio_set bounce_bio_set, bounce_bio_split; static mempool_t page_pool, isa_page_pool; +static void init_bounce_bioset(void) +{ + static bool bounce_bs_setup; + int ret; + + if (bounce_bs_setup) + return; + + ret = bioset_init(&bounce_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); + BUG_ON(ret); + if (bioset_integrity_create(&bounce_bio_set, BIO_POOL_SIZE)) + BUG_ON(1); + + ret = bioset_init(&bounce_bio_split, BIO_POOL_SIZE, 0, 0); + BUG_ON(ret); + bounce_bs_setup = true; +} + #if defined(CONFIG_HIGHMEM) static __init int init_emergency_pool(void) { @@ -44,14 +62,7 @@ static __init int init_emergency_pool(void) BUG_ON(ret); pr_info("pool size: %d pages\n", POOL_SIZE); - ret = bioset_init(&bounce_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); - BUG_ON(ret); - if (bioset_integrity_create(&bounce_bio_set, BIO_POOL_SIZE)) - BUG_ON(1); - - ret = bioset_init(&bounce_bio_split, BIO_POOL_SIZE, 0, 0); - BUG_ON(ret); - + init_bounce_bioset(); return 0; } @@ -86,6 +97,8 @@ static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data) return mempool_alloc_pages(gfp_mask | GFP_DMA, data); } +static DEFINE_MUTEX(isa_mutex); + /* * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA * as the max address, so check if the pool has already been created. @@ -94,14 +107,20 @@ int init_emergency_isa_pool(void) { int ret; - if (mempool_initialized(&isa_page_pool)) + mutex_lock(&isa_mutex); + + if (mempool_initialized(&isa_page_pool)) { + mutex_unlock(&isa_mutex); return 0; + } ret = mempool_init(&isa_page_pool, ISA_POOL_SIZE, mempool_alloc_pages_isa, mempool_free_pages, (void *) 0); BUG_ON(ret); pr_info("isa pool size: %d pages\n", ISA_POOL_SIZE); + init_bounce_bioset(); + mutex_unlock(&isa_mutex); return 0; } @@ -229,6 +248,7 @@ static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask, return NULL; bio->bi_disk = bio_src->bi_disk; bio->bi_opf = bio_src->bi_opf; + bio->bi_ioprio = bio_src->bi_ioprio; bio->bi_write_hint = bio_src->bi_write_hint; bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 2eb87444b157271fe4c3e9d57f4cffc303f0c0a5..130854ad8cdb7830cd626dabb42f3b638e0d1dfe 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -3778,6 +3778,7 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) if (cfqq) { cfq_log_cfqq(cfqd, cfqq, "changed cgroup"); cic_set_cfqq(cic, NULL, true); + cfq_put_cooperator(cfqq); cfq_put_queue(cfqq); } @@ -4562,6 +4563,7 @@ static void cfq_exit_queue(struct elevator_queue *e) kfree(cfqd->root_group); #endif kfree(cfqd); + wbt_enable_default(q); } static int cfq_init_queue(struct request_queue *q, struct elevator_type *e) diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c index 6ca015f92766e9052d444f9e81ba3c307846166c..6490b2759bcb4e23ae6b1aac777e6725d08532a8 100644 --- a/block/compat_ioctl.c +++ b/block/compat_ioctl.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -354,6 +355,8 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) * but we call blkdev_ioctl, which gets the lock for us */ case BLKRRPART: + case BLKREPORTZONE: + case BLKRESETZONE: return blkdev_ioctl(bdev, mode, cmd, (unsigned long)compat_ptr(arg)); case BLKBSZSET_32: @@ -401,6 +404,14 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) case BLKTRACETEARDOWN: /* compatible */ ret = blk_trace_ioctl(bdev, cmd, compat_ptr(arg)); return ret; + case IOC_PR_REGISTER: + case IOC_PR_RESERVE: + case IOC_PR_RELEASE: + case IOC_PR_PREEMPT: + case IOC_PR_PREEMPT_ABORT: + case IOC_PR_CLEAR: + return blkdev_ioctl(bdev, mode, cmd, + (unsigned long)compat_ptr(arg)); default: if (disk->fops->compat_ioctl) ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg); diff --git a/block/elevator.c b/block/elevator.c index fae58b2f906fc5e0352c3f3194780abe13369784..91c8769882d567b8a77a324859b13668e7e07c92 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -469,9 +469,11 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req, * we can append 'rq' to an existing request, so we can throw 'rq' away * afterwards. * - * Returns true if we merged, false otherwise + * Returns true if we merged, false otherwise. 'free' will contain all + * requests that need to be freed. */ -bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq) +bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq, + struct list_head *free) { struct request *__rq; bool ret; @@ -482,8 +484,10 @@ bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq) /* * First try one-hit cache. */ - if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) + if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) { + list_add(&rq->queuelist, free); return true; + } if (blk_queue_noxmerges(q)) return false; @@ -497,6 +501,7 @@ bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq) if (!__rq || !blk_attempt_req_merge(q, __rq, rq)) break; + list_add(&rq->queuelist, free); /* The merged request could be merged with others, try again */ ret = true; rq = __rq; @@ -618,6 +623,7 @@ void elv_drain_elevator(struct request_queue *q) void __elv_add_request(struct request_queue *q, struct request *rq, int where) { + LIST_HEAD(free); trace_block_rq_insert(q, rq); blk_pm_add_request(q, rq); @@ -665,8 +671,10 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where) * queue already, we are done - rq has now been freed, * so no need to do anything further. */ - if (elv_attempt_insert_merge(q, rq)) + if (elv_attempt_insert_merge(q, rq, &free)) { + blk_mq_free_requests(&free); break; + } /* fall through */ case ELEVATOR_INSERT_SORT: BUG_ON(blk_rq_is_passthrough(rq)); @@ -833,7 +841,7 @@ static struct kobj_type elv_ktype = { .release = elevator_release, }; -int elv_register_queue(struct request_queue *q) +int elv_register_queue(struct request_queue *q, bool uevent) { struct elevator_queue *e = q->elevator; int error; @@ -850,7 +858,9 @@ int elv_register_queue(struct request_queue *q) attr++; } } - kobject_uevent(&e->kobj, KOBJ_ADD); + if (uevent) + kobject_uevent(&e->kobj, KOBJ_ADD); + e->registered = 1; if (!e->uses_mq && e->type->ops.sq.elevator_registered_fn) e->type->ops.sq.elevator_registered_fn(q); @@ -867,9 +877,8 @@ void elv_unregister_queue(struct request_queue *q) kobject_uevent(&e->kobj, KOBJ_REMOVE); kobject_del(&e->kobj); + e->registered = 0; - /* Re-enable throttling in case elevator disabled it */ - wbt_enable_default(q); } } @@ -942,6 +951,7 @@ int elevator_switch_mq(struct request_queue *q, if (q->elevator) { if (q->elevator->registered) elv_unregister_queue(q); + ioc_clear_queue(q); elevator_exit(q, q->elevator); } @@ -951,7 +961,7 @@ int elevator_switch_mq(struct request_queue *q, goto out; if (new_e) { - ret = elv_register_queue(q); + ret = elv_register_queue(q, true); if (ret) { elevator_exit(q, q->elevator); goto out; @@ -980,23 +990,19 @@ int elevator_init_mq(struct request_queue *q) if (q->nr_hw_queues != 1) return 0; - /* - * q->sysfs_lock must be held to provide mutual exclusion between - * elevator_switch() and here. - */ - mutex_lock(&q->sysfs_lock); + WARN_ON_ONCE(test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags)); + if (unlikely(q->elevator)) - goto out_unlock; + goto out; e = elevator_get(q, "mq-deadline", false); if (!e) - goto out_unlock; + goto out; err = blk_mq_init_sched(q, e); if (err) elevator_put(e); -out_unlock: - mutex_unlock(&q->sysfs_lock); +out: return err; } @@ -1017,11 +1023,11 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) if (q->mq_ops) { blk_mq_freeze_queue(q); - blk_mq_quiesce_queue(q); + blk_mq_quiesce_queue_internal(q); err = elevator_switch_mq(q, new_e); - blk_mq_unquiesce_queue(q); + blk_mq_unquiesce_queue_internal(q); blk_mq_unfreeze_queue(q); return err; @@ -1051,7 +1057,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) if (err) goto fail_init; - err = elv_register_queue(q); + err = elv_register_queue(q, true); if (err) goto fail_register; @@ -1071,7 +1077,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) /* switch failed, restore and re-register old elevator */ if (old) { q->elevator = old; - elv_register_queue(q); + elv_register_queue(q, true); blk_queue_bypass_end(q); } diff --git a/block/genhd.c b/block/genhd.c index be5bab20b2abf278fd7d7370c1a082de0928b1ed..d75d52a53e10aee099e86620d14a7515d3f1ecb6 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -24,6 +24,7 @@ #include "blk.h" +extern bool precise_iostat; static DEFINE_MUTEX(block_class_lock); struct kobject *block_depr; @@ -47,7 +48,7 @@ static void disk_release_events(struct gendisk *disk); void part_inc_in_flight(struct request_queue *q, struct hd_struct *part, int rw) { - if (q->mq_ops) + if (!precise_iostat && q->mq_ops) return; atomic_inc(&part->in_flight[rw]); @@ -57,7 +58,7 @@ void part_inc_in_flight(struct request_queue *q, struct hd_struct *part, int rw) void part_dec_in_flight(struct request_queue *q, struct hd_struct *part, int rw) { - if (q->mq_ops) + if (!precise_iostat && q->mq_ops) return; atomic_dec(&part->in_flight[rw]); @@ -68,7 +69,7 @@ void part_dec_in_flight(struct request_queue *q, struct hd_struct *part, int rw) void part_in_flight(struct request_queue *q, struct hd_struct *part, unsigned int inflight[2]) { - if (q->mq_ops) { + if (!precise_iostat && q->mq_ops) { blk_mq_in_flight(q, part, inflight); return; } @@ -85,7 +86,7 @@ void part_in_flight(struct request_queue *q, struct hd_struct *part, void part_in_flight_rw(struct request_queue *q, struct hd_struct *part, unsigned int inflight[2]) { - if (q->mq_ops) { + if (!precise_iostat && q->mq_ops) { blk_mq_in_flight_rw(q, part, inflight); return; } @@ -208,14 +209,17 @@ struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter) part = rcu_dereference(ptbl->part[piter->idx]); if (!part) continue; + get_device(part_to_dev(part)); + piter->part = part; if (!part_nr_sects_read(part) && !(piter->flags & DISK_PITER_INCL_EMPTY) && !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 && - piter->idx == 0)) + piter->idx == 0)) { + put_device(part_to_dev(part)); + piter->part = NULL; continue; + } - get_device(part_to_dev(part)); - piter->part = part; piter->idx += inc; break; } @@ -272,17 +276,21 @@ struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector) ptbl = rcu_dereference(disk->part_tbl); part = rcu_dereference(ptbl->last_lookup); - if (part && sector_in_part(part, sector)) + if (part && sector_in_part(part, sector) && hd_struct_try_get(part)) return part; for (i = 1; i < ptbl->len; i++) { part = rcu_dereference(ptbl->part[i]); if (part && sector_in_part(part, sector)) { + if (!hd_struct_try_get(part)) + goto exit; rcu_assign_pointer(ptbl->last_lookup, part); return part; } } + exit: + hd_struct_get(&disk->part0); return &disk->part0; } EXPORT_SYMBOL_GPL(disk_map_sector_rcu); @@ -518,6 +526,18 @@ void blk_free_devt(dev_t devt) } } +/** + * We invalidate devt by assigning NULL pointer for devt in idr. + */ +void blk_invalidate_devt(dev_t devt) +{ + if (MAJOR(devt) == BLOCK_EXT_MAJOR) { + spin_lock_bh(&ext_devt_lock); + idr_replace(&ext_devt_idr, NULL, blk_mangle_minor(MINOR(devt))); + spin_unlock_bh(&ext_devt_lock); + } +} + static char *bdevt_str(dev_t devt, char *buf) { if (MAJOR(devt) <= 0xff && MINOR(devt) <= 0xff) { @@ -551,6 +571,14 @@ void blk_unregister_region(dev_t devt, unsigned long range) EXPORT_SYMBOL(blk_unregister_region); +void blk_delete_region(dev_t devt, unsigned long range, + struct kobject *(*probe)(dev_t, int *, void *)) +{ + kobj_delete(bdev_map, devt, range, probe); +} + +EXPORT_SYMBOL(blk_delete_region); + static struct kobject *exact_match(dev_t devt, int *partno, void *data) { struct gendisk *p = data; @@ -570,9 +598,6 @@ static int exact_lock(dev_t devt, void *data) static void register_disk(struct device *parent, struct gendisk *disk) { struct device *ddev = disk_to_dev(disk); - struct block_device *bdev; - struct disk_part_iter piter; - struct hd_struct *part; int err; ddev->parent = parent; @@ -603,30 +628,74 @@ static void register_disk(struct device *parent, struct gendisk *disk) disk->part0.holder_dir = kobject_create_and_add("holders", &ddev->kobj); disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj); - if (disk->flags & GENHD_FL_HIDDEN) { - dev_set_uevent_suppress(ddev, 0); + if (disk->flags & GENHD_FL_HIDDEN) return; + + if (disk->queue->backing_dev_info->dev) { + err = sysfs_create_link(&ddev->kobj, + &disk->queue->backing_dev_info->dev->kobj, + "bdi"); + WARN_ON(err); } +} - /* No minors to use for partitions */ - if (!disk_part_scan_enabled(disk)) - goto exit; +int disk_scan_partitions(struct gendisk *disk, fmode_t mode) +{ + struct block_device *bdev; + int ret; - /* No such device (e.g., media were just removed) */ - if (!get_capacity(disk)) - goto exit; + if (!disk_part_scan_enabled(disk)) + return -EINVAL; bdev = bdget_disk(disk, 0); if (!bdev) - goto exit; + return -ENOMEM; + + /* + * If the device is opened exclusively by current thread already, it's + * safe to scan partitons, otherwise, use bd_prepare_to_claim() to + * synchronize with other exclusive openers and other partition + * scanners. + */ + if (!(mode & FMODE_EXCL)) { + ret = bd_prepare_to_claim(bdev, bdev, disk_scan_partitions); + if (ret) { + bdput(bdev); + return ret; + } + + /* Ping the bdev until bd_abort_claiming() */ + bdgrab(bdev); + } bdev->bd_invalidated = 1; - err = blkdev_get(bdev, FMODE_READ, NULL); - if (err < 0) - goto exit; - blkdev_put(bdev, FMODE_READ); + ret = blkdev_get(bdev, mode & ~FMODE_EXCL, NULL); + if (!ret) + blkdev_put(bdev, mode & ~FMODE_EXCL); + + /* + * If blkdev_get_by_dev() failed early, GD_NEED_PART_SCAN is still set, + * and this will cause that re-assemble partitioned raid device will + * creat partition for underlying disk. + */ + bdev->bd_invalidated = 0; + if (!(mode & FMODE_EXCL)) { + bd_abort_claiming(bdev, bdev, disk_scan_partitions); + bdput(bdev); + } + + return ret; +} + +static void disk_init_partition(struct gendisk *disk) +{ + struct device *ddev = disk_to_dev(disk); + struct disk_part_iter piter; + struct hd_struct *part; + + if (get_capacity(disk)) + disk_scan_partitions(disk, FMODE_READ); -exit: /* announce disk after possible partitions are created */ dev_set_uevent_suppress(ddev, 0); kobject_uevent(&ddev->kobj, KOBJ_ADD); @@ -636,11 +705,6 @@ static void register_disk(struct device *parent, struct gendisk *disk) while ((part = disk_part_iter_next(&piter))) kobject_uevent(&part_to_dev(part)->kobj, KOBJ_ADD); disk_part_iter_exit(&piter); - - err = sysfs_create_link(&ddev->kobj, - &disk->queue->backing_dev_info->dev->kobj, - "bdi"); - WARN_ON(err); } /** @@ -657,6 +721,7 @@ static void register_disk(struct device *parent, struct gendisk *disk) static void __device_add_disk(struct device *parent, struct gendisk *disk, bool register_queue) { + struct block_device *bdev = NULL; dev_t devt; int retval; @@ -668,8 +733,6 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk, WARN_ON(!disk->minors && !(disk->flags & (GENHD_FL_EXT_DEVT | GENHD_FL_HIDDEN))); - disk->flags |= GENHD_FL_UP; - retval = blk_alloc_devt(&disk->part0, &devt); if (retval) { WARN_ON(1); @@ -710,6 +773,23 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk, disk_add_events(disk); blk_integrity_add(disk); + + /* Make sure the first partition scan will be proceed */ + if (get_capacity(disk) && disk_part_scan_enabled(disk)) { + bdev = bdget_disk(disk, 0); + if (bdev) + bdev->bd_invalidated = 1; + } + + /* + * Set the flag at last, so that block devcie can't be opened + * before it's registration is done. + */ + disk->flags |= GENHD_FL_UP; + disk_init_partition(disk); + + if (bdev) + bdput(bdev); } void device_add_disk(struct device *parent, struct gendisk *disk) @@ -769,6 +849,13 @@ void del_gendisk(struct gendisk *disk) if (!(disk->flags & GENHD_FL_HIDDEN)) blk_unregister_region(disk_devt(disk), disk->minors); + /* + * Remove gendisk pointer from idr so that it cannot be looked up + * while RCU period before freeing gendisk is running to prevent + * use-after-free issues. Note that the device number stays + * "in-use" until we really free the gendisk. + */ + blk_invalidate_devt(disk_devt(disk)); kobject_put(disk->part0.holder_dir); kobject_put(disk->slave_dir); @@ -1279,6 +1366,7 @@ static void disk_release(struct device *dev) hd_free_part(&disk->part0); if (disk->queue) blk_put_queue(disk->queue); + kfree(disk->user_ro_bitmap); kfree(disk); } struct class block_class = { @@ -1329,9 +1417,12 @@ static int diskstats_show(struct seq_file *seqf, void *v) disk_part_iter_init(&piter, gp, DISK_PITER_INCL_EMPTY_PART0); while ((hd = disk_part_iter_next(&piter))) { - cpu = part_stat_lock(); - part_round_stats(gp->queue, cpu, hd); - part_stat_unlock(); + if (precise_iostat) { + cpu = part_stat_lock(); + part_round_stats(gp->queue, cpu, hd); + part_stat_unlock(); + } + part_in_flight(gp->queue, hd, inflight); seq_printf(seqf, "%4d %7d %s " "%lu %lu %lu %u " @@ -1457,6 +1548,14 @@ struct gendisk *__alloc_disk_node(int minors, int node_id) return NULL; } + disk->user_ro_bitmap = kzalloc_node( + BITS_TO_LONGS(DISK_MAX_PARTS) * sizeof(long), + GFP_KERNEL, node_id); + if (!disk->user_ro_bitmap) { + hd_free_part(&disk->part0); + kfree(disk); + return NULL; + } disk->minors = minors; rand_initialize_disk(disk); disk_to_dev(disk)->class = &block_class; @@ -1526,19 +1625,33 @@ void set_device_ro(struct block_device *bdev, int flag) EXPORT_SYMBOL(set_device_ro); +int get_user_ro(struct gendisk *disk, unsigned int partno) +{ + /* Is the user read-only bit set for the whole disk device? */ + if (test_bit(0, disk->user_ro_bitmap)) + return 1; + + /* Is the user read-only bit set for this particular partition? */ + if (test_bit(partno, disk->user_ro_bitmap)) + return 1; + + return 0; +} + void set_disk_ro(struct gendisk *disk, int flag) { struct disk_part_iter piter; struct hd_struct *part; - if (disk->part0.policy != flag) { + if (disk->part0.policy != flag) set_disk_ro_uevent(disk, flag); - disk->part0.policy = flag; - } - disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY); + disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY_PART0); while ((part = disk_part_iter_next(&piter))) - part->policy = flag; + if (get_user_ro(disk, part->partno)) + part->policy = 1; + else + part->policy = flag; disk_part_iter_exit(&piter); } diff --git a/block/ioctl.c b/block/ioctl.c index 3884d810efd27fc73bb07659b91296ea46265252..1fdf496d7717b709b7a96fa4db991bf0bff87482 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -10,16 +10,13 @@ #include #include +#include "blk_extra_api.h" + static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg) { - struct block_device *bdevp; - struct gendisk *disk; - struct hd_struct *part, *lpart; struct blkpg_ioctl_arg a; struct blkpg_partition p; - struct disk_part_iter piter; - long long start, length; - int partno; + sector_t start, length; if (!capable(CAP_SYS_ADMIN)) return -EACCES; @@ -27,131 +24,45 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user return -EFAULT; if (copy_from_user(&p, a.data, sizeof(struct blkpg_partition))) return -EFAULT; - disk = bdev->bd_disk; if (bdev != bdev->bd_contains) return -EINVAL; - partno = p.pno; - if (partno <= 0) + + if (p.pno <= 0) return -EINVAL; - switch (a.op) { - case BLKPG_ADD_PARTITION: - start = p.start >> 9; - length = p.length >> 9; - /* check for fit in a hd_struct */ - if (sizeof(sector_t) == sizeof(long) && - sizeof(long long) > sizeof(long)) { - long pstart = start, plength = length; - if (pstart != start || plength != length - || pstart < 0 || plength < 0 || partno > 65535) - return -EINVAL; - } - /* check if partition is aligned to blocksize */ - if (p.start & (bdev_logical_block_size(bdev) - 1)) - return -EINVAL; - - mutex_lock(&bdev->bd_mutex); - - /* overlap? */ - disk_part_iter_init(&piter, disk, - DISK_PITER_INCL_EMPTY); - while ((part = disk_part_iter_next(&piter))) { - if (!(start + length <= part->start_sect || - start >= part->start_sect + part->nr_sects)) { - disk_part_iter_exit(&piter); - mutex_unlock(&bdev->bd_mutex); - return -EBUSY; - } - } - disk_part_iter_exit(&piter); - - /* all seems OK */ - part = add_partition(disk, partno, start, length, - ADDPART_FLAG_NONE, NULL); - mutex_unlock(&bdev->bd_mutex); - return PTR_ERR_OR_ZERO(part); - case BLKPG_DEL_PARTITION: - part = disk_get_part(disk, partno); - if (!part) - return -ENXIO; - - bdevp = bdget(part_devt(part)); - disk_put_part(part); - if (!bdevp) - return -ENOMEM; - - mutex_lock(&bdevp->bd_mutex); - if (bdevp->bd_openers) { - mutex_unlock(&bdevp->bd_mutex); - bdput(bdevp); - return -EBUSY; - } - /* all seems OK */ - fsync_bdev(bdevp); - invalidate_bdev(bdevp); - - mutex_lock_nested(&bdev->bd_mutex, 1); - delete_partition(disk, partno); - mutex_unlock(&bdev->bd_mutex); - mutex_unlock(&bdevp->bd_mutex); - bdput(bdevp); - - return 0; - case BLKPG_RESIZE_PARTITION: - start = p.start >> 9; - /* new length of partition in bytes */ - length = p.length >> 9; - /* check for fit in a hd_struct */ - if (sizeof(sector_t) == sizeof(long) && - sizeof(long long) > sizeof(long)) { - long pstart = start, plength = length; - if (pstart != start || plength != length - || pstart < 0 || plength < 0) - return -EINVAL; - } - part = disk_get_part(disk, partno); - if (!part) - return -ENXIO; - bdevp = bdget(part_devt(part)); - if (!bdevp) { - disk_put_part(part); - return -ENOMEM; - } - mutex_lock(&bdevp->bd_mutex); - mutex_lock_nested(&bdev->bd_mutex, 1); - if (start != part->start_sect) { - mutex_unlock(&bdevp->bd_mutex); - mutex_unlock(&bdev->bd_mutex); - bdput(bdevp); - disk_put_part(part); - return -EINVAL; - } - /* overlap? */ - disk_part_iter_init(&piter, disk, - DISK_PITER_INCL_EMPTY); - while ((lpart = disk_part_iter_next(&piter))) { - if (lpart->partno != partno && - !(start + length <= lpart->start_sect || - start >= lpart->start_sect + lpart->nr_sects) - ) { - disk_part_iter_exit(&piter); - mutex_unlock(&bdevp->bd_mutex); - mutex_unlock(&bdev->bd_mutex); - bdput(bdevp); - disk_put_part(part); - return -EBUSY; - } - } - disk_part_iter_exit(&piter); - part_nr_sects_write(part, (sector_t)length); - i_size_write(bdevp->bd_inode, p.length); - mutex_unlock(&bdevp->bd_mutex); - mutex_unlock(&bdev->bd_mutex); - bdput(bdevp); - disk_put_part(part); - return 0; - default: + + if (a.op == BLKPG_DEL_PARTITION) + return bdev_del_partition(bdev, p.pno); + + if (p.start < 0 || p.length <= 0 || p.start + p.length < 0) + return -EINVAL; + /* Check that the partition is aligned to the block size */ + if (!IS_ALIGNED(p.start | p.length, bdev_logical_block_size(bdev))) + return -EINVAL; + + start = p.start >> SECTOR_SHIFT; + length = p.length >> SECTOR_SHIFT; + + /* length may be equal to 0 after right shift */ + if (!length || start + length > get_capacity(bdev->bd_disk)) + return -EINVAL; + + /* check for fit in a hd_struct */ + if (sizeof(sector_t) < sizeof(long long)) { + long pstart = start, plength = length; + + if (pstart != start || plength != length || pstart < 0 || + plength < 0 || p.pno > 65535) return -EINVAL; } + + switch (a.op) { + case BLKPG_ADD_PARTITION: + return bdev_add_partition(bdev, p.pno, start, length); + case BLKPG_RESIZE_PARTITION: + return bdev_resize_partition(bdev, p.pno, start, length); + default: + return -EINVAL; + } } /* @@ -162,15 +73,25 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user int __blkdev_reread_part(struct block_device *bdev) { struct gendisk *disk = bdev->bd_disk; + int err; if (!disk_part_scan_enabled(disk) || bdev != bdev->bd_contains) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EACCES; + if (bdev->bd_part_count) + return -EBUSY; lockdep_assert_held(&bdev->bd_mutex); - return rescan_partitions(disk, bdev); + down_read(&disk->lookup_sem); + if (disk->flags & GENHD_FL_UP) + err = rescan_partitions(disk, bdev); + else + err = -ENXIO; + up_read(&disk->lookup_sem); + + return err; } EXPORT_SYMBOL(__blkdev_reread_part); @@ -201,10 +122,9 @@ static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode, unsigned long arg, unsigned long flags) { uint64_t range[2]; - uint64_t start, len; + uint64_t start, len, end; struct request_queue *q = bdev_get_queue(bdev); - struct address_space *mapping = bdev->bd_inode->i_mapping; - + int err; if (!(mode & FMODE_WRITE)) return -EBADF; @@ -223,9 +143,14 @@ static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode, if (len & 511) return -EINVAL; - if (start + len > i_size_read(bdev->bd_inode)) + if (check_add_overflow(start, len, &end) || + end > i_size_read(bdev->bd_inode)) return -EINVAL; - truncate_inode_pages_range(mapping, start, start + len - 1); + + err = truncate_bdev_range(bdev, mode, start, start + len - 1); + if (err) + return err; + return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL, flags); } @@ -234,8 +159,8 @@ static int blk_ioctl_zeroout(struct block_device *bdev, fmode_t mode, unsigned long arg) { uint64_t range[2]; - struct address_space *mapping; uint64_t start, end, len; + int err; if (!(mode & FMODE_WRITE)) return -EBADF; @@ -257,8 +182,9 @@ static int blk_ioctl_zeroout(struct block_device *bdev, fmode_t mode, return -EINVAL; /* Invalidate the page cache, including dirty pages */ - mapping = bdev->bd_inode->i_mapping; - truncate_inode_pages_range(mapping, start, end); + err = truncate_bdev_range(bdev, mode, start, end); + if (err) + return err; return blkdev_issue_zeroout(bdev, start >> 9, len >> 9, GFP_KERNEL, BLKDEV_ZERO_NOUNMAP); @@ -451,6 +377,10 @@ static int blkdev_roset(struct block_device *bdev, fmode_t mode, return ret; if (get_user(n, (int __user *)arg)) return -EFAULT; + if (n) + set_bit(bdev->bd_partno, bdev->bd_disk->user_ro_bitmap); + else + clear_bit(bdev->bd_partno, bdev->bd_disk->user_ro_bitmap); set_device_ro(bdev, n); return 0; } @@ -572,7 +502,13 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, case BLKPG: return blkpg_ioctl(bdev, argp); case BLKRRPART: - return blkdev_reread_part(bdev); + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + if (bdev != bdev->bd_contains) + return -EINVAL; + if (bdev->bd_part_count) + return -EBUSY; + return disk_scan_partitions(bdev->bd_disk, mode); case BLKGETSIZE: size = i_size_read(bdev->bd_inode); if ((size >> 9) > ~0UL) diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c index a1660bafc912452a37730ac0ed2bf02f6c8a99bc..fe0cb5ab76af6636af7b3feed634c2110d30609f 100644 --- a/block/kyber-iosched.c +++ b/block/kyber-iosched.c @@ -30,6 +30,7 @@ #include "blk-mq-sched.h" #include "blk-mq-tag.h" #include "blk-stat.h" +#include "blk-io-hierarchy/stats.h" /* Scheduling domains. */ enum { @@ -288,15 +289,6 @@ static void kyber_stat_timer_fn(struct blk_stat_callback *cb) blk_stat_activate_msecs(kqd->cb, 100); } -static unsigned int kyber_sched_tags_shift(struct kyber_queue_data *kqd) -{ - /* - * All of the hardware queues have the same depth, so we can just grab - * the shift of the first one. - */ - return kqd->q->queue_hw_ctx[0]->sched_tags->bitmap_tags.sb.shift; -} - static int kyber_bucket_fn(const struct request *rq) { return kyber_sched_domain(rq->cmd_flags); @@ -306,7 +298,6 @@ static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q) { struct kyber_queue_data *kqd; unsigned int max_tokens; - unsigned int shift; int ret = -ENOMEM; int i; @@ -341,11 +332,9 @@ static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q) sbitmap_queue_resize(&kqd->domain_tokens[i], kyber_depth[i]); } - shift = kyber_sched_tags_shift(kqd); - kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U; - kqd->read_lat_nsec = 2000000ULL; kqd->write_lat_nsec = 10000000ULL; + kqd->async_depth = 0; return kqd; @@ -377,6 +366,7 @@ static int kyber_init_sched(struct request_queue *q, struct elevator_type *e) blk_stat_add_callback(q, kqd->cb); + blk_mq_register_hierarchy(q, STAGE_KYBER); return 0; } @@ -386,6 +376,7 @@ static void kyber_exit_sched(struct elevator_queue *e) struct request_queue *q = kqd->q; int i; + blk_mq_unregister_hierarchy(q, STAGE_KYBER); blk_stat_remove_callback(q, kqd->cb); for (i = 0; i < KYBER_NUM_DOMAINS; i++) @@ -403,9 +394,19 @@ static void kyber_ctx_queue_init(struct kyber_ctx_queue *kcq) INIT_LIST_HEAD(&kcq->rq_list[i]); } -static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) +static void kyber_depth_updated(struct blk_mq_hw_ctx *hctx) { struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data; + struct blk_mq_tags *tags = hctx->sched_tags; + unsigned int shift = tags->bitmap_tags.sb.shift; + + kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U; + + sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, kqd->async_depth); +} + +static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) +{ struct kyber_hctx_data *khd; int i; @@ -446,8 +447,7 @@ static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) khd->batching = 0; hctx->sched_data = khd; - sbitmap_queue_min_shallow_depth(&hctx->sched_tags->bitmap_tags, - kqd->async_depth); + kyber_depth_updated(hctx); return 0; @@ -506,10 +506,12 @@ static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data) } } -static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio) +static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx_q, struct bio *bio) { + struct request_queue *q = hctx_q->queue; + struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); + struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); struct kyber_hctx_data *khd = hctx->sched_data; - struct blk_mq_ctx *ctx = blk_mq_get_ctx(hctx->queue); struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw]; unsigned int sched_domain = kyber_sched_domain(bio->bi_opf); struct list_head *rq_list = &kcq->rq_list[sched_domain]; @@ -518,7 +520,6 @@ static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio) spin_lock(&kcq->lock); merged = blk_mq_bio_list_merge(hctx->queue, rq_list, bio); spin_unlock(&kcq->lock); - blk_mq_put_ctx(ctx); return merged; } @@ -534,6 +535,7 @@ static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx, struct kyber_hctx_data *khd = hctx->sched_data; struct request *rq, *next; + rq_list_hierarchy_start_io_acct(rq_list, STAGE_KYBER); list_for_each_entry_safe(rq, next, rq_list, queuelist) { unsigned int sched_domain = kyber_sched_domain(rq->cmd_flags); struct kyber_ctx_queue *kcq = &khd->kcqs[rq->mq_ctx->index_hw]; @@ -585,7 +587,7 @@ static void kyber_completed_request(struct request *rq) if (blk_stat_is_active(kqd->cb)) return; - now = ktime_get_ns(); + now = blk_time_get_ns(); if (now < rq->io_start_time_ns) return; @@ -773,6 +775,9 @@ static struct request *kyber_dispatch_request(struct blk_mq_hw_ctx *hctx) rq = NULL; out: spin_unlock(&khd->lock); + + if (rq) + rq_hierarchy_end_io_acct(rq, STAGE_KYBER); return rq; } @@ -966,6 +971,7 @@ static struct elevator_type kyber_sched = { .completed_request = kyber_completed_request, .dispatch_request = kyber_dispatch_request, .has_work = kyber_has_work, + .depth_updated = kyber_depth_updated, }, .uses_mq = true, #ifdef CONFIG_BLK_DEBUG_FS diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 099a9e05854c35de207d6bd5cf24b4ff83ffb5a9..aa51abb3eaa4e7c6bf3615d120f19ca299319d74 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -22,6 +22,7 @@ #include "blk-mq-debugfs.h" #include "blk-mq-tag.h" #include "blk-mq-sched.h" +#include "blk-io-hierarchy/stats.h" /* * See Documentation/block/deadline-iosched.txt @@ -61,6 +62,8 @@ struct deadline_data { spinlock_t lock; spinlock_t zone_lock; struct list_head dispatch; + + struct request_queue *q; }; static inline struct rb_root * @@ -373,7 +376,7 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd) /* * One confusing aspect here is that we get called for a specific - * hardware queue, but we return a request that may not be for a + * hardware queue, but we may return a request that is for a * different hardware queue. This is because mq-deadline has shared * state for all hardware queues, in terms of sorting, FIFOs, etc. */ @@ -386,6 +389,8 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) rq = __dd_dispatch_request(dd); spin_unlock(&dd->lock); + if (rq) + rq_hierarchy_end_io_acct(rq, STAGE_DEADLINE); return rq; } @@ -396,6 +401,7 @@ static void dd_exit_queue(struct elevator_queue *e) BUG_ON(!list_empty(&dd->fifo_list[READ])); BUG_ON(!list_empty(&dd->fifo_list[WRITE])); + blk_mq_unregister_hierarchy(dd->q, STAGE_DEADLINE); kfree(dd); } @@ -427,11 +433,13 @@ static int dd_init_queue(struct request_queue *q, struct elevator_type *e) dd->writes_starved = writes_starved; dd->front_merges = 1; dd->fifo_batch = fifo_batch; + dd->q = q; spin_lock_init(&dd->lock); spin_lock_init(&dd->zone_lock); INIT_LIST_HEAD(&dd->dispatch); q->elevator = eq; + blk_mq_register_hierarchy(q, STAGE_DEADLINE); return 0; } @@ -469,8 +477,10 @@ static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio) ret = blk_mq_sched_try_merge(q, bio, &free); spin_unlock(&dd->lock); - if (free) + if (free) { + rq_hierarchy_end_io_acct(free, STAGE_DEADLINE); blk_mq_free_request(free); + } return ret; } @@ -484,6 +494,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, struct request_queue *q = hctx->queue; struct deadline_data *dd = q->elevator->elevator_data; const int data_dir = rq_data_dir(rq); + LIST_HEAD(free); /* * This may be a requeue of a write request that has locked its @@ -491,8 +502,11 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, */ blk_req_zone_write_unlock(rq); - if (blk_mq_sched_try_insert_merge(q, rq)) + if (blk_mq_sched_try_insert_merge(q, rq, &free)) { + rq_list_hierarchy_end_io_acct(&free, STAGE_DEADLINE); + blk_mq_free_requests(&free); return; + } blk_mq_sched_request_inserted(rq); @@ -524,6 +538,8 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, struct request_queue *q = hctx->queue; struct deadline_data *dd = q->elevator->elevator_data; + rq_list_hierarchy_start_io_acct(list, STAGE_DEADLINE); + spin_lock(&dd->lock); while (!list_empty(list)) { struct request *rq; @@ -549,6 +565,13 @@ static void dd_prepare_request(struct request *rq, struct bio *bio) * spinlock so that the zone is never unlocked while deadline_fifo_request() * or deadline_next_request() are executing. This function is called for * all requests, whether or not these requests complete successfully. + * + * For a zoned block device, __dd_dispatch_request() may have stopped + * dispatching requests if all the queued requests are write requests directed + * at zones that are already locked due to on-going write requests. To ensure + * write request dispatch progress in this case, mark the queue as needing a + * restart to ensure that the queue is run again after completion of the + * request and zones being unlocked. */ static void dd_finish_request(struct request *rq) { @@ -560,6 +583,12 @@ static void dd_finish_request(struct request *rq) spin_lock_irqsave(&dd->zone_lock, flags); blk_req_zone_write_unlock(rq); + if (!list_empty(&dd->fifo_list[WRITE])) { + struct blk_mq_hw_ctx *hctx; + + hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu); + blk_mq_sched_mark_restart_hctx(hctx); + } spin_unlock_irqrestore(&dd->zone_lock, flags); } } diff --git a/block/partition-generic.c b/block/partition-generic.c index d3d14e81fb12dc3cc518852d5c7ca33054e42756..b1277629856c11a82f2fd93715608dd4f19ddacc 100644 --- a/block/partition-generic.c +++ b/block/partition-generic.c @@ -18,6 +18,7 @@ #include #include #include +#include #include "partitions/check.h" @@ -98,7 +99,7 @@ static ssize_t part_ro_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hd_struct *p = dev_to_part(dev); - return sprintf(buf, "%d\n", p->policy ? 1 : 0); + return sprintf(buf, "%u\n", p->policy ? 1 : 0); } static ssize_t part_alignment_offset_show(struct device *dev, @@ -123,9 +124,12 @@ ssize_t part_stat_show(struct device *dev, unsigned int inflight[2]; int cpu; - cpu = part_stat_lock(); - part_round_stats(q, cpu, p); - part_stat_unlock(); + if (precise_iostat) { + cpu = part_stat_lock(); + part_round_stats(q, cpu, p); + part_stat_unlock(); + } + part_in_flight(q, p, inflight); return sprintf(buf, "%8lu %8lu %8llu %8u " @@ -249,9 +253,10 @@ struct device_type part_type = { .uevent = part_uevent, }; -static void delete_partition_rcu_cb(struct rcu_head *head) +static void delete_partition_work_fn(struct work_struct *work) { - struct hd_struct *part = container_of(head, struct hd_struct, rcu_head); + struct hd_struct *part = container_of(to_rcu_work(work), struct hd_struct, + rcu_work); part->start_sect = 0; part->nr_sects = 0; @@ -262,7 +267,14 @@ static void delete_partition_rcu_cb(struct rcu_head *head) void __delete_partition(struct percpu_ref *ref) { struct hd_struct *part = container_of(ref, struct hd_struct, ref); - call_rcu(&part->rcu_head, delete_partition_rcu_cb); + struct disk_part_tbl *ptbl = + rcu_dereference_protected(part->disk->part_tbl, 1); + + rcu_assign_pointer(ptbl->last_lookup, NULL); + put_device(disk_to_dev(part->disk)); + + INIT_RCU_WORK(&part->rcu_work, delete_partition_work_fn); + queue_rcu_work(system_wq, &part->rcu_work); } /* @@ -282,11 +294,19 @@ void delete_partition(struct gendisk *disk, int partno) if (!part) return; + get_device(disk_to_dev(disk)); rcu_assign_pointer(ptbl->part[partno], NULL); - rcu_assign_pointer(ptbl->last_lookup, NULL); + kobject_put(part->holder_dir); device_del(part_to_dev(part)); + /* + * Remove gendisk pointer from idr so that it cannot be looked up + * while RCU period before freeing gendisk is running to prevent + * use-after-free issues. Note that the device number stays + * "in-use" until we really free the gendisk. + */ + blk_invalidate_devt(part_devt(part)); hd_struct_kill(part); } @@ -301,7 +321,7 @@ static DEVICE_ATTR(whole_disk, 0444, whole_disk_show, NULL); * Must be called either with bd_mutex held, before a disk can be opened or * after all disk users are gone. */ -struct hd_struct *add_partition(struct gendisk *disk, int partno, +static struct hd_struct *add_partition(struct gendisk *disk, int partno, sector_t start, sector_t len, int flags, struct partition_meta_info *info) { @@ -340,7 +360,11 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno, queue_limit_discard_alignment(&disk->queue->limits, start); p->nr_sects = len; p->partno = partno; - p->policy = get_disk_ro(disk); + if (get_user_ro(disk, partno)) + p->policy = 1; + else + p->policy = get_disk_ro(disk); + p->disk = disk; if (info) { struct partition_meta_info *pinfo = alloc_part_info(disk); @@ -418,6 +442,128 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno, return ERR_PTR(err); } +static bool partition_overlaps(struct gendisk *disk, sector_t start, + sector_t length, int skip_partno) +{ + struct disk_part_iter piter; + struct hd_struct *part; + bool overlap = false; + + disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY); + while ((part = disk_part_iter_next(&piter))) { + if (part->partno == skip_partno || + start >= part->start_sect + part->nr_sects || + start + length <= part->start_sect) + continue; + overlap = true; + break; + } + + disk_part_iter_exit(&piter); + return overlap; +} + +int bdev_add_partition(struct block_device *bdev, int partno, + sector_t start, sector_t length) +{ + struct hd_struct *part; + + mutex_lock(&bdev->bd_mutex); + if (partition_overlaps(bdev->bd_disk, start, length, -1)) { + mutex_unlock(&bdev->bd_mutex); + return -EBUSY; + } + + down_read(&bdev->bd_disk->lookup_sem); + if (!(bdev->bd_disk->flags & GENHD_FL_UP)) { + up_read(&bdev->bd_disk->lookup_sem); + mutex_unlock(&bdev->bd_mutex); + return -ENXIO; + } + part = add_partition(bdev->bd_disk, partno, start, length, + ADDPART_FLAG_NONE, NULL); + up_read(&bdev->bd_disk->lookup_sem); + mutex_unlock(&bdev->bd_mutex); + return PTR_ERR_OR_ZERO(part); +} + +int bdev_del_partition(struct block_device *bdev, int partno) +{ + struct block_device *bdevp; + struct hd_struct *part; + int ret = 0; + + part = disk_get_part(bdev->bd_disk, partno); + if (!part) + return -ENXIO; + + bdevp = bdget(part_devt(part)); + disk_put_part(part); + if (!bdevp) + return -ENOMEM; + + mutex_lock(&bdevp->bd_mutex); + + ret = -EBUSY; + if (bdevp->bd_openers) + goto out_unlock; + + fsync_bdev(bdevp); + invalidate_bdev(bdevp); + + mutex_lock_nested(&bdev->bd_mutex, 1); + down_read(&bdev->bd_disk->lookup_sem); + delete_partition(bdev->bd_disk, partno); + up_read(&bdev->bd_disk->lookup_sem); + mutex_unlock(&bdev->bd_mutex); + + ret = 0; +out_unlock: + mutex_unlock(&bdevp->bd_mutex); + bdput(bdevp); + return ret; +} + +int bdev_resize_partition(struct block_device *bdev, int partno, + sector_t start, sector_t length) +{ + struct block_device *bdevp; + struct hd_struct *part; + int ret = 0; + + part = disk_get_part(bdev->bd_disk, partno); + if (!part) + return -ENXIO; + + ret = -ENOMEM; + bdevp = bdget(part_devt(part)); + if (!bdevp) + goto out_put_part; + + mutex_lock(&bdevp->bd_mutex); + mutex_lock_nested(&bdev->bd_mutex, 1); + + ret = -EINVAL; + if (start != part->start_sect) + goto out_unlock; + + ret = -EBUSY; + if (partition_overlaps(bdev->bd_disk, start, length, partno)) + goto out_unlock; + + part_nr_sects_write(part, (sector_t)length); + i_size_write(bdevp->bd_inode, length << SECTOR_SHIFT); + + ret = 0; +out_unlock: + mutex_unlock(&bdevp->bd_mutex); + mutex_unlock(&bdev->bd_mutex); + bdput(bdevp); +out_put_part: + disk_put_part(part); + return ret; +} + static bool disk_unlock_native_capacity(struct gendisk *disk) { const struct block_device_operations *bdops = disk->fops; @@ -440,7 +586,7 @@ static int drop_partitions(struct gendisk *disk, struct block_device *bdev) struct hd_struct *part; int res; - if (bdev->bd_part_count || bdev->bd_super) + if (bdev->bd_part_count) return -EBUSY; res = invalidate_partition(disk, 0); if (res) diff --git a/block/partitions/efi.c b/block/partitions/efi.c index 39f70d968754e558ccb175dc3c56822d6da3fcab..b9beaa0a9b369674d412fdc29da4f2891ccc2c85 100644 --- a/block/partitions/efi.c +++ b/block/partitions/efi.c @@ -670,6 +670,31 @@ static int find_valid_gpt(struct parsed_partitions *state, gpt_header **gpt, return 0; } +/** + * utf16_le_to_7bit(): Naively converts a UTF-16LE string to 7-bit ASCII characters + * @in: input UTF-16LE string + * @size: size of the input string + * @out: output string ptr, should be capable to store @size+1 characters + * + * Description: Converts @size UTF16-LE symbols from @in string to 7-bit + * ASCII characters and stores them to @out. Adds trailing zero to @out array. + */ +static void utf16_le_to_7bit(const __le16 *in, unsigned int size, u8 *out) +{ + unsigned int i = 0; + + out[size] = 0; + + while (i < size) { + u8 c = le16_to_cpu(in[i]) & 0xff; + + if (c && !isprint(c)) + c = '!'; + out[i] = c; + i++; + } +} + /** * efi_partition(struct parsed_partitions *state) * @state: disk parsed partitions @@ -706,7 +731,6 @@ int efi_partition(struct parsed_partitions *state) for (i = 0; i < le32_to_cpu(gpt->num_partition_entries) && i < state->limit-1; i++) { struct partition_meta_info *info; - unsigned label_count = 0; unsigned label_max; u64 start = le64_to_cpu(ptes[i].starting_lba); u64 size = le64_to_cpu(ptes[i].ending_lba) - @@ -727,14 +751,7 @@ int efi_partition(struct parsed_partitions *state) /* Naively convert UTF16-LE to 7 bits. */ label_max = min(ARRAY_SIZE(info->volname) - 1, ARRAY_SIZE(ptes[i].partition_name)); - info->volname[label_max] = 0; - while (label_count < label_max) { - u8 c = ptes[i].partition_name[label_count] & 0xff; - if (c && !isprint(c)) - c = '!'; - info->volname[label_count] = c; - label_count++; - } + utf16_le_to_7bit(ptes[i].partition_name, label_max, info->volname); state->parts[i + 1].has_info = true; } kfree(ptes); diff --git a/block/partitions/efi.h b/block/partitions/efi.h index abd0b19288a66935ead285a071e44c689fe770cd..42db2513ecfa4b1a62b8e9da53bc725518ac6a37 100644 --- a/block/partitions/efi.h +++ b/block/partitions/efi.h @@ -102,7 +102,7 @@ typedef struct _gpt_entry { __le64 starting_lba; __le64 ending_lba; gpt_entry_attributes attributes; - efi_char16_t partition_name[72 / sizeof (efi_char16_t)]; + __le16 partition_name[72/sizeof(__le16)]; } __packed gpt_entry; typedef struct _gpt_mbr_record { diff --git a/block/sed-opal.c b/block/sed-opal.c index e0de4dd448b3c7238e8656b572de72206302bf87..1196408972937cf306139faa2ed188fb187e777d 100644 --- a/block/sed-opal.c +++ b/block/sed-opal.c @@ -2095,13 +2095,16 @@ static int opal_erase_locking_range(struct opal_dev *dev, static int opal_enable_disable_shadow_mbr(struct opal_dev *dev, struct opal_mbr_data *opal_mbr) { + u8 enable_disable = opal_mbr->enable_disable == OPAL_MBR_ENABLE ? + OPAL_TRUE : OPAL_FALSE; + const struct opal_step mbr_steps[] = { { opal_discovery0, }, { start_admin1LSP_opal_session, &opal_mbr->key }, - { set_mbr_done, &opal_mbr->enable_disable }, + { set_mbr_done, &enable_disable }, { end_opal_session, }, { start_admin1LSP_opal_session, &opal_mbr->key }, - { set_mbr_enable_disable, &opal_mbr->enable_disable }, + { set_mbr_enable_disable, &enable_disable }, { end_opal_session, }, { NULL, } }; @@ -2221,7 +2224,7 @@ static int __opal_lock_unlock(struct opal_dev *dev, static int __opal_set_mbr_done(struct opal_dev *dev, struct opal_key *key) { - u8 mbr_done_tf = 1; + u8 mbr_done_tf = OPAL_TRUE; const struct opal_step mbrdone_step [] = { { opal_discovery0, }, { start_admin1LSP_opal_session, key }, diff --git a/certs/blacklist.c b/certs/blacklist.c index 3a507b9e2568a27e9d2e4359fc4fbdc3218dda4b..e9f3f81c51f96a17cb546ec559e319d0183138ce 100644 --- a/certs/blacklist.c +++ b/certs/blacklist.c @@ -157,7 +157,7 @@ static int __init blacklist_init(void) KEY_USR_VIEW | KEY_USR_READ | KEY_USR_SEARCH, KEY_ALLOC_NOT_IN_QUOTA | - KEY_FLAG_KEEP, + KEY_ALLOC_SET_KEEP, NULL, NULL); if (IS_ERR(blacklist_keyring)) panic("Can't allocate system blacklist keyring\n"); diff --git a/crypto/Kconfig b/crypto/Kconfig index f3e40ac56d9390ad283b050ce4e3901697e5512f..0ec4767a8856b2931f80dea87c178b4cf6a0858f 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -213,20 +213,6 @@ config CRYPTO_CRYPTD converts an arbitrary synchronous software crypto algorithm into an asynchronous algorithm that executes in a kernel thread. -config CRYPTO_MCRYPTD - tristate "Software async multi-buffer crypto daemon" - select CRYPTO_BLKCIPHER - select CRYPTO_HASH - select CRYPTO_MANAGER - select CRYPTO_WORKQUEUE - help - This is a generic software asynchronous crypto daemon that - provides the kernel thread to assist multi-buffer crypto - algorithms for submitting jobs and flushing jobs in multi-buffer - crypto algorithms. Multi-buffer crypto algorithms are executed - in the context of this kernel thread and drivers can post - their crypto request asynchronously to be processed by this daemon. - config CRYPTO_AUTHENC tristate "Authenc support" select CRYPTO_AEAD @@ -848,54 +834,6 @@ config CRYPTO_SHA1_PPC_SPE SHA-1 secure hash standard (DFIPS 180-4) implemented using powerpc SPE SIMD instruction set. -config CRYPTO_SHA1_MB - tristate "SHA1 digest algorithm (x86_64 Multi-Buffer, Experimental)" - depends on X86 && 64BIT - select CRYPTO_SHA1 - select CRYPTO_HASH - select CRYPTO_MCRYPTD - help - SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented - using multi-buffer technique. This algorithm computes on - multiple data lanes concurrently with SIMD instructions for - better throughput. It should not be enabled by default but - used when there is significant amount of work to keep the keep - the data lanes filled to get performance benefit. If the data - lanes remain unfilled, a flush operation will be initiated to - process the crypto jobs, adding a slight latency. - -config CRYPTO_SHA256_MB - tristate "SHA256 digest algorithm (x86_64 Multi-Buffer, Experimental)" - depends on X86 && 64BIT - select CRYPTO_SHA256 - select CRYPTO_HASH - select CRYPTO_MCRYPTD - help - SHA-256 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented - using multi-buffer technique. This algorithm computes on - multiple data lanes concurrently with SIMD instructions for - better throughput. It should not be enabled by default but - used when there is significant amount of work to keep the keep - the data lanes filled to get performance benefit. If the data - lanes remain unfilled, a flush operation will be initiated to - process the crypto jobs, adding a slight latency. - -config CRYPTO_SHA512_MB - tristate "SHA512 digest algorithm (x86_64 Multi-Buffer, Experimental)" - depends on X86 && 64BIT - select CRYPTO_SHA512 - select CRYPTO_HASH - select CRYPTO_MCRYPTD - help - SHA-512 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented - using multi-buffer technique. This algorithm computes on - multiple data lanes concurrently with SIMD instructions for - better throughput. It should not be enabled by default but - used when there is significant amount of work to keep the keep - the data lanes filled to get performance benefit. If the data - lanes remain unfilled, a flush operation will be initiated to - process the crypto jobs, adding a slight latency. - config CRYPTO_SHA256 tristate "SHA224 and SHA256 digest algorithm" select CRYPTO_HASH @@ -1056,7 +994,8 @@ config CRYPTO_AES_TI 8 for decryption), this implementation only uses just two S-boxes of 256 bytes each, and attempts to eliminate data dependent latencies by prefetching the entire table into the cache at the start of each - block. + block. Interrupts are also disabled to avoid races where cachelines + are evicted when the CPU is interrupted to do something else. config CRYPTO_AES_586 tristate "AES cipher algorithms (i586)" @@ -1590,20 +1529,6 @@ config CRYPTO_SM4 If unsure, say N. -config CRYPTO_SPECK - tristate "Speck cipher algorithm" - select CRYPTO_ALGAPI - help - Speck is a lightweight block cipher that is tuned for optimal - performance in software (rather than hardware). - - Speck may not be as secure as AES, and should only be used on systems - where AES is not fast enough. - - See also: - - If unsure, say N. - config CRYPTO_TEA tristate "TEA, XTEA and XETA cipher algorithms" select CRYPTO_ALGAPI diff --git a/crypto/Makefile b/crypto/Makefile index 6d1d40eeb9642c7a773852c0559c633e871de8c3..d719843f8b6e4c65f0501c1375c1ae783a4f2d21 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -93,7 +93,6 @@ obj-$(CONFIG_CRYPTO_MORUS640) += morus640.o obj-$(CONFIG_CRYPTO_MORUS1280) += morus1280.o obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o -obj-$(CONFIG_CRYPTO_MCRYPTD) += mcryptd.o obj-$(CONFIG_CRYPTO_DES) += des_generic.o obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish_generic.o @@ -115,7 +114,6 @@ obj-$(CONFIG_CRYPTO_TEA) += tea.o obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o obj-$(CONFIG_CRYPTO_SEED) += seed.o -obj-$(CONFIG_CRYPTO_SPECK) += speck.o obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o obj-$(CONFIG_CRYPTO_CHACHA20) += chacha20_generic.o obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o diff --git a/crypto/aead.c b/crypto/aead.c index 60b3bbe973e752a879f39ac5ca5af712560ad888..f8b2cd0567f12e94bd1c9d35d4f941bc6a9a0fdf 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -45,8 +45,7 @@ static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key, alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); memcpy(alignbuffer, key, keylen); ret = crypto_aead_alg(tfm)->setkey(tfm, alignbuffer, keylen); - memset(alignbuffer, 0, keylen); - kfree(buffer); + kzfree(buffer); return ret; } @@ -61,8 +60,10 @@ int crypto_aead_setkey(struct crypto_aead *tfm, else err = crypto_aead_alg(tfm)->setkey(tfm, key, keylen); - if (err) + if (unlikely(err)) { + crypto_aead_set_flags(tfm, CRYPTO_TFM_NEED_KEY); return err; + } crypto_aead_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); return 0; diff --git a/crypto/aegis.h b/crypto/aegis.h index f1c6900ddb8011b79fcfe3f37eb7a748f9be3cc8..405e025fc9067dbc958282a53566cd76a30ea02b 100644 --- a/crypto/aegis.h +++ b/crypto/aegis.h @@ -21,7 +21,7 @@ union aegis_block { __le64 words64[AEGIS_BLOCK_SIZE / sizeof(__le64)]; - u32 words32[AEGIS_BLOCK_SIZE / sizeof(u32)]; + __le32 words32[AEGIS_BLOCK_SIZE / sizeof(__le32)]; u8 bytes[AEGIS_BLOCK_SIZE]; }; @@ -57,24 +57,22 @@ static void crypto_aegis_aesenc(union aegis_block *dst, const union aegis_block *src, const union aegis_block *key) { - u32 *d = dst->words32; const u8 *s = src->bytes; - const u32 *k = key->words32; const u32 *t0 = crypto_ft_tab[0]; const u32 *t1 = crypto_ft_tab[1]; const u32 *t2 = crypto_ft_tab[2]; const u32 *t3 = crypto_ft_tab[3]; u32 d0, d1, d2, d3; - d0 = t0[s[ 0]] ^ t1[s[ 5]] ^ t2[s[10]] ^ t3[s[15]] ^ k[0]; - d1 = t0[s[ 4]] ^ t1[s[ 9]] ^ t2[s[14]] ^ t3[s[ 3]] ^ k[1]; - d2 = t0[s[ 8]] ^ t1[s[13]] ^ t2[s[ 2]] ^ t3[s[ 7]] ^ k[2]; - d3 = t0[s[12]] ^ t1[s[ 1]] ^ t2[s[ 6]] ^ t3[s[11]] ^ k[3]; + d0 = t0[s[ 0]] ^ t1[s[ 5]] ^ t2[s[10]] ^ t3[s[15]]; + d1 = t0[s[ 4]] ^ t1[s[ 9]] ^ t2[s[14]] ^ t3[s[ 3]]; + d2 = t0[s[ 8]] ^ t1[s[13]] ^ t2[s[ 2]] ^ t3[s[ 7]]; + d3 = t0[s[12]] ^ t1[s[ 1]] ^ t2[s[ 6]] ^ t3[s[11]]; - d[0] = d0; - d[1] = d1; - d[2] = d2; - d[3] = d3; + dst->words32[0] = cpu_to_le32(d0) ^ key->words32[0]; + dst->words32[1] = cpu_to_le32(d1) ^ key->words32[1]; + dst->words32[2] = cpu_to_le32(d2) ^ key->words32[2]; + dst->words32[3] = cpu_to_le32(d3) ^ key->words32[3]; } #endif /* _CRYPTO_AEGIS_H */ diff --git a/crypto/aegis128.c b/crypto/aegis128.c index c22f4414856d9939cbd23774bd7091c84d8fa497..789716f92e4c54b797cb6b2e18530beda7632e1c 100644 --- a/crypto/aegis128.c +++ b/crypto/aegis128.c @@ -290,19 +290,19 @@ static void crypto_aegis128_process_crypt(struct aegis_state *state, const struct aegis128_ops *ops) { struct skcipher_walk walk; - u8 *src, *dst; - unsigned int chunksize; ops->skcipher_walk_init(&walk, req, false); while (walk.nbytes) { - src = walk.src.virt.addr; - dst = walk.dst.virt.addr; - chunksize = walk.nbytes; + unsigned int nbytes = walk.nbytes; - ops->crypt_chunk(state, dst, src, chunksize); + if (nbytes < walk.total) + nbytes = round_down(nbytes, walk.stride); - skcipher_walk_done(&walk, 0); + ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr, + nbytes); + + skcipher_walk_done(&walk, walk.nbytes - nbytes); } } diff --git a/crypto/aegis128l.c b/crypto/aegis128l.c index b6fb21ebdc3e8a6e7d72bd9f3602d5d95c227d18..73811448cb6b4abf2ebe1b32644c19b96e0903e1 100644 --- a/crypto/aegis128l.c +++ b/crypto/aegis128l.c @@ -353,19 +353,19 @@ static void crypto_aegis128l_process_crypt(struct aegis_state *state, const struct aegis128l_ops *ops) { struct skcipher_walk walk; - u8 *src, *dst; - unsigned int chunksize; ops->skcipher_walk_init(&walk, req, false); while (walk.nbytes) { - src = walk.src.virt.addr; - dst = walk.dst.virt.addr; - chunksize = walk.nbytes; + unsigned int nbytes = walk.nbytes; - ops->crypt_chunk(state, dst, src, chunksize); + if (nbytes < walk.total) + nbytes = round_down(nbytes, walk.stride); - skcipher_walk_done(&walk, 0); + ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr, + nbytes); + + skcipher_walk_done(&walk, walk.nbytes - nbytes); } } diff --git a/crypto/aegis256.c b/crypto/aegis256.c index 11f0f8ec9c7c27d77a8338a0451da8d387c15360..8a71e9c061934f791f6bda8265301e93d5c0d320 100644 --- a/crypto/aegis256.c +++ b/crypto/aegis256.c @@ -303,19 +303,19 @@ static void crypto_aegis256_process_crypt(struct aegis_state *state, const struct aegis256_ops *ops) { struct skcipher_walk walk; - u8 *src, *dst; - unsigned int chunksize; ops->skcipher_walk_init(&walk, req, false); while (walk.nbytes) { - src = walk.src.virt.addr; - dst = walk.dst.virt.addr; - chunksize = walk.nbytes; + unsigned int nbytes = walk.nbytes; - ops->crypt_chunk(state, dst, src, chunksize); + if (nbytes < walk.total) + nbytes = round_down(nbytes, walk.stride); - skcipher_walk_done(&walk, 0); + ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr, + nbytes); + + skcipher_walk_done(&walk, walk.nbytes - nbytes); } } diff --git a/crypto/aes_ti.c b/crypto/aes_ti.c index 03023b2290e8ec6e790679a30ddf4763e0476923..1ff9785b30f5568e121c374770d2eaee58f4e3d9 100644 --- a/crypto/aes_ti.c +++ b/crypto/aes_ti.c @@ -269,6 +269,7 @@ static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) const u32 *rkp = ctx->key_enc + 4; int rounds = 6 + ctx->key_length / 4; u32 st0[4], st1[4]; + unsigned long flags; int round; st0[0] = ctx->key_enc[0] ^ get_unaligned_le32(in); @@ -276,6 +277,12 @@ static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) st0[2] = ctx->key_enc[2] ^ get_unaligned_le32(in + 8); st0[3] = ctx->key_enc[3] ^ get_unaligned_le32(in + 12); + /* + * Temporarily disable interrupts to avoid races where cachelines are + * evicted when the CPU is interrupted to do something else. + */ + local_irq_save(flags); + st0[0] ^= __aesti_sbox[ 0] ^ __aesti_sbox[128]; st0[1] ^= __aesti_sbox[32] ^ __aesti_sbox[160]; st0[2] ^= __aesti_sbox[64] ^ __aesti_sbox[192]; @@ -300,6 +307,8 @@ static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) put_unaligned_le32(subshift(st1, 1) ^ rkp[5], out + 4); put_unaligned_le32(subshift(st1, 2) ^ rkp[6], out + 8); put_unaligned_le32(subshift(st1, 3) ^ rkp[7], out + 12); + + local_irq_restore(flags); } static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) @@ -308,6 +317,7 @@ static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) const u32 *rkp = ctx->key_dec + 4; int rounds = 6 + ctx->key_length / 4; u32 st0[4], st1[4]; + unsigned long flags; int round; st0[0] = ctx->key_dec[0] ^ get_unaligned_le32(in); @@ -315,6 +325,12 @@ static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) st0[2] = ctx->key_dec[2] ^ get_unaligned_le32(in + 8); st0[3] = ctx->key_dec[3] ^ get_unaligned_le32(in + 12); + /* + * Temporarily disable interrupts to avoid races where cachelines are + * evicted when the CPU is interrupted to do something else. + */ + local_irq_save(flags); + st0[0] ^= __aesti_inv_sbox[ 0] ^ __aesti_inv_sbox[128]; st0[1] ^= __aesti_inv_sbox[32] ^ __aesti_inv_sbox[160]; st0[2] ^= __aesti_inv_sbox[64] ^ __aesti_inv_sbox[192]; @@ -339,6 +355,8 @@ static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) put_unaligned_le32(inv_subshift(st1, 1) ^ rkp[5], out + 4); put_unaligned_le32(inv_subshift(st1, 2) ^ rkp[6], out + 8); put_unaligned_le32(inv_subshift(st1, 3) ^ rkp[7], out + 12); + + local_irq_restore(flags); } static struct crypto_alg aes_alg = { diff --git a/crypto/af_alg.c b/crypto/af_alg.c index b053179e0bc532e019b249eb0b96ba76fc3bb103..fdccf8c9c4ffdce91dbea783e1738b50cd20e0a7 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -122,8 +123,10 @@ static void alg_do_release(const struct af_alg_type *type, void *private) int af_alg_release(struct socket *sock) { - if (sock->sk) + if (sock->sk) { sock_put(sock->sk); + sock->sk = NULL; + } return 0; } EXPORT_SYMBOL_GPL(af_alg_release); @@ -646,6 +649,7 @@ void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, if (!ctx->used) ctx->merge = 0; + ctx->init = ctx->more; } EXPORT_SYMBOL_GPL(af_alg_pull_tsgl); @@ -747,9 +751,10 @@ EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup); * * @sk socket of connection to user space * @flags If MSG_DONTWAIT is set, then only report if function would sleep + * @min Set to minimum request size if partial requests are allowed. * @return 0 when writable memory is available, < 0 upon error */ -int af_alg_wait_for_data(struct sock *sk, unsigned flags) +int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min) { DEFINE_WAIT_FUNC(wait, woken_wake_function); struct alg_sock *ask = alg_sk(sk); @@ -767,7 +772,9 @@ int af_alg_wait_for_data(struct sock *sk, unsigned flags) if (signal_pending(current)) break; timeout = MAX_SCHEDULE_TIMEOUT; - if (sk_wait_event(sk, &timeout, (ctx->used || !ctx->more), + if (sk_wait_event(sk, &timeout, + ctx->init && (!ctx->more || + (min && ctx->used >= min)), &wait)) { err = 0; break; @@ -858,10 +865,17 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, } lock_sock(sk); - if (!ctx->more && ctx->used) { - err = -EINVAL; - goto unlock; + if (ctx->init && !ctx->more) { + if (ctx->used) { + err = -EINVAL; + goto unlock; + } + + pr_info_once( + "%s sent an empty control message without MSG_MORE.\n", + current->comm); } + ctx->init = true; if (init) { ctx->enc = enc; @@ -1028,9 +1042,13 @@ EXPORT_SYMBOL_GPL(af_alg_sendpage); void af_alg_free_resources(struct af_alg_async_req *areq) { struct sock *sk = areq->sk; + struct af_alg_ctx *ctx; af_alg_free_areq_sgls(areq); sock_kfree_s(sk, areq, areq->areqlen); + + ctx = alg_sk(sk)->private; + ctx->inflight = false; } EXPORT_SYMBOL_GPL(af_alg_free_resources); @@ -1056,7 +1074,7 @@ void af_alg_async_cb(struct crypto_async_request *_req, int err) af_alg_free_resources(areq); sock_put(sk); - iocb->ki_complete(iocb, err ? err : resultlen, 0); + iocb->ki_complete(iocb, err ? err : (int)resultlen, 0); } EXPORT_SYMBOL_GPL(af_alg_async_cb); @@ -1071,7 +1089,7 @@ __poll_t af_alg_poll(struct file *file, struct socket *sock, struct af_alg_ctx *ctx = ask->private; __poll_t mask; - sock_poll_wait(file, wait); + sock_poll_wait(file, sock, wait); mask = 0; if (!ctx->more || ctx->used) @@ -1094,11 +1112,19 @@ EXPORT_SYMBOL_GPL(af_alg_poll); struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk, unsigned int areqlen) { - struct af_alg_async_req *areq = sock_kmalloc(sk, areqlen, GFP_KERNEL); + struct af_alg_ctx *ctx = alg_sk(sk)->private; + struct af_alg_async_req *areq; + /* Only one AIO request can be in flight. */ + if (ctx->inflight) + return ERR_PTR(-EBUSY); + + areq = sock_kmalloc(sk, areqlen, GFP_KERNEL); if (unlikely(!areq)) return ERR_PTR(-ENOMEM); + ctx->inflight = true; + areq->areqlen = areqlen; areq->sk = sk; areq->last_rsgl = NULL; diff --git a/crypto/ahash.c b/crypto/ahash.c index a64c143165b1fb3ce048b0de8f59ef5bacea077d..158e716f21a195cfbe83518d5e559d911b1ffb94 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -86,17 +86,17 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk) int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) { unsigned int alignmask = walk->alignmask; - unsigned int nbytes = walk->entrylen; walk->data -= walk->offset; - if (nbytes && walk->offset & alignmask && !err) { - walk->offset = ALIGN(walk->offset, alignmask + 1); - nbytes = min(nbytes, - ((unsigned int)(PAGE_SIZE)) - walk->offset); - walk->entrylen -= nbytes; + if (walk->entrylen && (walk->offset & alignmask) && !err) { + unsigned int nbytes; + walk->offset = ALIGN(walk->offset, alignmask + 1); + nbytes = min(walk->entrylen, + (unsigned int)(PAGE_SIZE - walk->offset)); if (nbytes) { + walk->entrylen -= nbytes; walk->data += walk->offset; return nbytes; } @@ -116,7 +116,7 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) if (err) return err; - if (nbytes) { + if (walk->entrylen) { walk->offset = 0; walk->pg++; return hash_walk_next(walk); @@ -190,6 +190,21 @@ static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, return ret; } +static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + return -ENOSYS; +} + +static void ahash_set_needkey(struct crypto_ahash *tfm) +{ + const struct hash_alg_common *alg = crypto_hash_alg_common(tfm); + + if (tfm->setkey != ahash_nosetkey && + !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) + crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY); +} + int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { @@ -201,20 +216,16 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, else err = tfm->setkey(tfm, key, keylen); - if (err) + if (unlikely(err)) { + ahash_set_needkey(tfm); return err; + } crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); return 0; } EXPORT_SYMBOL_GPL(crypto_ahash_setkey); -static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, - unsigned int keylen) -{ - return -ENOSYS; -} - static inline unsigned int ahash_align_buffer_size(unsigned len, unsigned long mask) { @@ -467,8 +478,7 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) if (alg->setkey) { hash->setkey = alg->setkey; - if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) - crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY); + ahash_set_needkey(hash); } return 0; diff --git a/crypto/algapi.c b/crypto/algapi.c index c0755cf4f53f8b5eb518cd96d77a70fc3f837367..9bf991a4246b9a564f8bae70fc2232078a43f306 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -21,6 +21,7 @@ #include #include #include +#include #include "internal.h" @@ -87,13 +88,37 @@ static void crypto_free_instance(struct crypto_instance *inst) inst->alg.cra_type->free(inst); } -static void crypto_destroy_instance(struct crypto_alg *alg) +static void crypto_destroy_instance_workfn(struct work_struct *w) { - struct crypto_instance *inst = (void *)alg; + struct crypto_instance_freework *work = container_of(w, + struct crypto_instance_freework, free_work); + struct crypto_instance *inst = work->instance; struct crypto_template *tmpl = inst->tmpl; crypto_free_instance(inst); crypto_tmpl_put(tmpl); + + kfree(work); +} + +static void crypto_destroy_instance(struct crypto_alg *alg) +{ + struct crypto_instance_freework *work; + struct crypto_instance *inst = container_of(alg, + struct crypto_instance, + alg); + struct crypto_template *tmpl = inst->tmpl; + + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) { + crypto_free_instance(inst); + crypto_tmpl_put(tmpl); + return; + } + work->instance = inst; + + INIT_WORK(&work->free_work, crypto_destroy_instance_workfn); + schedule_work(&work->free_work); } static struct list_head *crypto_more_spawns(struct crypto_alg *alg, @@ -485,6 +510,24 @@ int crypto_register_template(struct crypto_template *tmpl) } EXPORT_SYMBOL_GPL(crypto_register_template); +int crypto_register_templates(struct crypto_template *tmpls, int count) +{ + int i, err; + + for (i = 0; i < count; i++) { + err = crypto_register_template(&tmpls[i]); + if (err) + goto out; + } + return 0; + +out: + for (--i; i >= 0; --i) + crypto_unregister_template(&tmpls[i]); + return err; +} +EXPORT_SYMBOL_GPL(crypto_register_templates); + void crypto_unregister_template(struct crypto_template *tmpl) { struct crypto_instance *inst; @@ -514,6 +557,15 @@ void crypto_unregister_template(struct crypto_template *tmpl) } EXPORT_SYMBOL_GPL(crypto_unregister_template); +void crypto_unregister_templates(struct crypto_template *tmpls, int count) +{ + int i; + + for (i = count - 1; i >= 0; --i) + crypto_unregister_template(&tmpls[i]); +} +EXPORT_SYMBOL_GPL(crypto_unregister_templates); + static struct crypto_template *__crypto_lookup_template(const char *name) { struct crypto_template *q, *tmpl = NULL; diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index c40a8c7ee8aedcb0f6adb3afb1e0bb60a233d68c..43a861a79209c3bb8a737c140bb5ffd79aa1887b 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -110,8 +110,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t usedpages = 0; /* [in] RX bufs to be used from user */ size_t processed = 0; /* [in] TX bufs to be consumed */ - if (!ctx->used) { - err = af_alg_wait_for_data(sk, flags); + if (!ctx->init || ctx->more) { + err = af_alg_wait_for_data(sk, flags, 0); if (err) return err; } @@ -565,12 +565,6 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk) INIT_LIST_HEAD(&ctx->tsgl_list); ctx->len = len; - ctx->used = 0; - atomic_set(&ctx->rcvused, 0); - ctx->more = 0; - ctx->merge = 0; - ctx->enc = 0; - ctx->aead_assoclen = 0; crypto_init_wait(&ctx->wait); ask->private = ctx; diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index cfdaab2b7d766d517e239687bf2232e09a749991..c5813e14e90538f81628139a5a8f6f9f75f4cc4d 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -60,13 +60,13 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, struct alg_sock *pask = alg_sk(psk); struct af_alg_ctx *ctx = ask->private; struct crypto_skcipher *tfm = pask->private; - unsigned int bs = crypto_skcipher_blocksize(tfm); + unsigned int bs = crypto_skcipher_chunksize(tfm); struct af_alg_async_req *areq; int err = 0; size_t len = 0; - if (!ctx->used) { - err = af_alg_wait_for_data(sk, flags); + if (!ctx->init || (ctx->more && ctx->used < bs)) { + err = af_alg_wait_for_data(sk, flags, bs); if (err) return err; } @@ -131,7 +131,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, crypto_skcipher_decrypt(&areq->cra_u.skcipher_req); /* AIO operation in progress */ - if (err == -EINPROGRESS || err == -EBUSY) + if (err == -EINPROGRESS) return -EIOCBQUEUED; sock_put(sk); @@ -344,6 +344,7 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk) ctx = sock_kmalloc(sk, len, GFP_KERNEL); if (!ctx) return -ENOMEM; + memset(ctx, 0, len); ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(tfm), GFP_KERNEL); @@ -351,16 +352,10 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk) sock_kfree_s(sk, ctx, len); return -ENOMEM; } - memset(ctx->iv, 0, crypto_skcipher_ivsize(tfm)); INIT_LIST_HEAD(&ctx->tsgl_list); ctx->len = len; - ctx->used = 0; - atomic_set(&ctx->rcvused, 0); - ctx->more = 0; - ctx->merge = 0; - ctx->enc = 0; crypto_init_wait(&ctx->wait); ask->private = ctx; diff --git a/crypto/api.c b/crypto/api.c index 7aca9f86c5f397f0d0ef021c6bbfcaae09fd2b32..318e56cdfccf5e082ba858e0dc2fe0ce54961ab8 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -572,7 +572,7 @@ void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm) { struct crypto_alg *alg; - if (unlikely(!mem)) + if (IS_ERR_OR_NULL(mem)) return; alg = tfm->__crt_alg; diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig index f3702e533ff41044694625aad813abc58b8af4dd..d8a73d94bb309a524e47cca954a66a55f385b1f4 100644 --- a/crypto/asymmetric_keys/Kconfig +++ b/crypto/asymmetric_keys/Kconfig @@ -15,6 +15,7 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE select MPILIB select CRYPTO_HASH_INFO select CRYPTO_AKCIPHER + select CRYPTO_HASH help This option provides support for asymmetric public key type handling. If signature generation and/or verification are to be used, @@ -34,6 +35,7 @@ config X509_CERTIFICATE_PARSER config PKCS7_MESSAGE_PARSER tristate "PKCS#7 message parser" depends on X509_CERTIFICATE_PARSER + select CRYPTO_HASH select ASN1 select OID_REGISTRY help @@ -56,6 +58,7 @@ config SIGNED_PE_FILE_VERIFICATION bool "Support for PE file signature verification" depends on PKCS7_MESSAGE_PARSER=y depends on SYSTEM_DATA_VERIFICATION + select CRYPTO_HASH select ASN1 select OID_REGISTRY help diff --git a/crypto/asymmetric_keys/verify_pefile.c b/crypto/asymmetric_keys/verify_pefile.c index d178650fd524cfe8bc390c883505f4cbc31d60f2..411977947adbe9ccdad50ee23fade7a48cb3f14e 100644 --- a/crypto/asymmetric_keys/verify_pefile.c +++ b/crypto/asymmetric_keys/verify_pefile.c @@ -139,11 +139,15 @@ static int pefile_strip_sig_wrapper(const void *pebuf, pr_debug("sig wrapper = { %x, %x, %x }\n", wrapper.length, wrapper.revision, wrapper.cert_type); - /* Both pesign and sbsign round up the length of certificate table - * (in optional header data directories) to 8 byte alignment. + /* sbsign rounds up the length of certificate table (in optional + * header data directories) to 8 byte alignment. However, the PE + * specification states that while entries are 8-byte aligned, this is + * not included in their length, and as a result, pesign has not + * rounded up since 0.110. */ - if (round_up(wrapper.length, 8) != ctx->sig_len) { - pr_debug("Signature wrapper len wrong\n"); + if (wrapper.length > ctx->sig_len) { + pr_debug("Signature wrapper bigger than sig len (%x > %x)\n", + ctx->sig_len, wrapper.length); return -ELIBBAD; } if (wrapper.revision != WIN_CERT_REVISION_2_0) { diff --git a/crypto/authenc.c b/crypto/authenc.c index 4fa8d40d947b7594f8a7b089a8a86e8ea1b5e81d..02d4d8517449427ce195a4e389ab3ce9e5463cb3 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -58,14 +58,22 @@ int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key, return -EINVAL; if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) return -EINVAL; - if (RTA_PAYLOAD(rta) < sizeof(*param)) + + /* + * RTA_OK() didn't align the rtattr's payload when validating that it + * fits in the buffer. Yet, the keys should start on the next 4-byte + * aligned boundary. To avoid confusion, require that the rtattr + * payload be exactly the param struct, which has a 4-byte aligned size. + */ + if (RTA_PAYLOAD(rta) != sizeof(*param)) return -EINVAL; + BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO); param = RTA_DATA(rta); keys->enckeylen = be32_to_cpu(param->enckeylen); - key += RTA_ALIGN(rta->rta_len); - keylen -= RTA_ALIGN(rta->rta_len); + key += rta->rta_len; + keylen -= rta->rta_len; if (keylen < keys->enckeylen) return -EINVAL; @@ -260,7 +268,7 @@ static int crypto_authenc_decrypt_tail(struct aead_request *req, dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen); skcipher_request_set_tfm(skreq, ctx->enc); - skcipher_request_set_callback(skreq, aead_request_flags(req), + skcipher_request_set_callback(skreq, flags, req->base.complete, req->base.data); skcipher_request_set_crypt(skreq, src, dst, req->cryptlen - authsize, req->iv); diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 50b804747e20a3a9a68e158bab1e900949b75b27..4eff4be6bd1272269ab639702349af14ae55309e 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -279,7 +279,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq, struct aead_request *req = areq->data; err = err ?: crypto_authenc_esn_decrypt_tail(req, 0); - aead_request_complete(req, err); + authenc_esn_request_complete(req, err); } static int crypto_authenc_esn_decrypt(struct aead_request *req) diff --git a/crypto/cbc.c b/crypto/cbc.c index b761b1f9c6ca161c8eb3a9340ab50b69374671bc..dd5f332fd5668985c9e904b35ad56d47b34ed383 100644 --- a/crypto/cbc.c +++ b/crypto/cbc.c @@ -140,9 +140,8 @@ static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = skcipher_instance_ctx(inst); err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst), CRYPTO_ALG_TYPE_MASK); - crypto_mod_put(alg); if (err) - goto err_free_inst; + goto err_put_alg; err = crypto_inst_setname(skcipher_crypto_instance(inst), "cbc", alg); if (err) @@ -174,12 +173,15 @@ static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb) err = skcipher_register_instance(tmpl, inst); if (err) goto err_drop_spawn; + crypto_mod_put(alg); out: return err; err_drop_spawn: crypto_drop_spawn(spawn); +err_put_alg: + crypto_mod_put(alg); err_free_inst: kfree(inst); goto out; diff --git a/crypto/ccm.c b/crypto/ccm.c index 0a083342ec8cf3b17c4da15c515c8c3d7a519a0f..8104c564dd318ceea01a763adbece9f2fc297407 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -455,7 +455,6 @@ static void crypto_ccm_free(struct aead_instance *inst) static int crypto_ccm_create_common(struct crypto_template *tmpl, struct rtattr **tb, - const char *full_name, const char *ctr_name, const char *mac_name) { @@ -483,7 +482,8 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, mac = __crypto_hash_alg_common(mac_alg); err = -EINVAL; - if (mac->digestsize != 16) + if (strncmp(mac->base.cra_name, "cbcmac(", 7) != 0 || + mac->digestsize != 16) goto out_put_mac; inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); @@ -506,23 +506,27 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, ctr = crypto_spawn_skcipher_alg(&ictx->ctr); - /* Not a stream cipher? */ + /* The skcipher algorithm must be CTR mode, using 16-byte blocks. */ err = -EINVAL; - if (ctr->base.cra_blocksize != 1) + if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 || + crypto_skcipher_alg_ivsize(ctr) != 16 || + ctr->base.cra_blocksize != 1) goto err_drop_ctr; - /* We want the real thing! */ - if (crypto_skcipher_alg_ivsize(ctr) != 16) + /* ctr and cbcmac must use the same underlying block cipher. */ + if (strcmp(ctr->base.cra_name + 4, mac->base.cra_name + 7) != 0) goto err_drop_ctr; err = -ENAMETOOLONG; + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, + "ccm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME) + goto err_drop_ctr; + if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)", ctr->base.cra_driver_name, mac->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_drop_ctr; - memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME); - inst->alg.base.cra_flags = ctr->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = (mac->base.cra_priority + ctr->base.cra_priority) / 2; @@ -564,7 +568,6 @@ static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb) const char *cipher_name; char ctr_name[CRYPTO_MAX_ALG_NAME]; char mac_name[CRYPTO_MAX_ALG_NAME]; - char full_name[CRYPTO_MAX_ALG_NAME]; cipher_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(cipher_name)) @@ -578,12 +581,7 @@ static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb) cipher_name) >= CRYPTO_MAX_ALG_NAME) return -ENAMETOOLONG; - if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm(%s)", cipher_name) >= - CRYPTO_MAX_ALG_NAME) - return -ENAMETOOLONG; - - return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name, - mac_name); + return crypto_ccm_create_common(tmpl, tb, ctr_name, mac_name); } static struct crypto_template crypto_ccm_tmpl = { @@ -596,23 +594,17 @@ static int crypto_ccm_base_create(struct crypto_template *tmpl, struct rtattr **tb) { const char *ctr_name; - const char *cipher_name; - char full_name[CRYPTO_MAX_ALG_NAME]; + const char *mac_name; ctr_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(ctr_name)) return PTR_ERR(ctr_name); - cipher_name = crypto_attr_alg_name(tb[2]); - if (IS_ERR(cipher_name)) - return PTR_ERR(cipher_name); - - if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)", - ctr_name, cipher_name) >= CRYPTO_MAX_ALG_NAME) - return -ENAMETOOLONG; + mac_name = crypto_attr_alg_name(tb[2]); + if (IS_ERR(mac_name)) + return PTR_ERR(mac_name); - return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name, - cipher_name); + return crypto_ccm_create_common(tmpl, tb, ctr_name, mac_name); } static struct crypto_template crypto_ccm_base_tmpl = { diff --git a/crypto/cfb.c b/crypto/cfb.c index a0d68c09e1b9c53dd9eb4fb9bd08238d24b70d44..4abfe32ff8451cbd721285b104cb398f98ec727b 100644 --- a/crypto/cfb.c +++ b/crypto/cfb.c @@ -77,12 +77,14 @@ static int crypto_cfb_encrypt_segment(struct skcipher_walk *walk, do { crypto_cfb_encrypt_one(tfm, iv, dst); crypto_xor(dst, src, bsize); - memcpy(iv, dst, bsize); + iv = dst; src += bsize; dst += bsize; } while ((nbytes -= bsize) >= bsize); + memcpy(walk->iv, iv, bsize); + return nbytes; } @@ -144,7 +146,7 @@ static int crypto_cfb_decrypt_segment(struct skcipher_walk *walk, do { crypto_cfb_encrypt_one(tfm, iv, dst); - crypto_xor(dst, iv, bsize); + crypto_xor(dst, src, bsize); iv = src; src += bsize; @@ -162,7 +164,7 @@ static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk, const unsigned int bsize = crypto_cfb_bsize(tfm); unsigned int nbytes = walk->nbytes; u8 *src = walk->src.virt.addr; - u8 *iv = walk->iv; + u8 * const iv = walk->iv; u8 tmp[MAX_CIPHER_BLOCKSIZE]; do { @@ -172,8 +174,6 @@ static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk, src += bsize; } while ((nbytes -= bsize) >= bsize); - memcpy(walk->iv, iv, bsize); - return nbytes; } @@ -286,9 +286,8 @@ static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = skcipher_instance_ctx(inst); err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst), CRYPTO_ALG_TYPE_MASK); - crypto_mod_put(alg); if (err) - goto err_free_inst; + goto err_put_alg; err = crypto_inst_setname(skcipher_crypto_instance(inst), "cfb", alg); if (err) @@ -299,6 +298,12 @@ static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = alg->cra_alignmask; + /* + * To simplify the implementation, configure the skcipher walk to only + * give a partial block at the very end, never earlier. + */ + inst->alg.chunksize = alg->cra_blocksize; + inst->alg.ivsize = alg->cra_blocksize; inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize; inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize; @@ -317,12 +322,15 @@ static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb) err = skcipher_register_instance(tmpl, inst); if (err) goto err_drop_spawn; + crypto_mod_put(alg); out: return err; err_drop_spawn: crypto_drop_spawn(spawn); +err_put_alg: + crypto_mod_put(alg); err_free_inst: kfree(inst); goto out; diff --git a/crypto/chacha20_generic.c b/crypto/chacha20_generic.c index e451c3cb6a56e863e8da71170b3e676de1b0cb16..3ae96587caf9a4e18216c8a0f1308eb0812eb912 100644 --- a/crypto/chacha20_generic.c +++ b/crypto/chacha20_generic.c @@ -18,20 +18,21 @@ static void chacha20_docrypt(u32 *state, u8 *dst, const u8 *src, unsigned int bytes) { - u32 stream[CHACHA20_BLOCK_WORDS]; + /* aligned to potentially speed up crypto_xor() */ + u8 stream[CHACHA20_BLOCK_SIZE] __aligned(sizeof(long)); if (dst != src) memcpy(dst, src, bytes); while (bytes >= CHACHA20_BLOCK_SIZE) { chacha20_block(state, stream); - crypto_xor(dst, (const u8 *)stream, CHACHA20_BLOCK_SIZE); + crypto_xor(dst, stream, CHACHA20_BLOCK_SIZE); bytes -= CHACHA20_BLOCK_SIZE; dst += CHACHA20_BLOCK_SIZE; } if (bytes) { chacha20_block(state, stream); - crypto_xor(dst, (const u8 *)stream, bytes); + crypto_xor(dst, stream, bytes); } } diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index 600afa99941fe07470b74f3c142b59716656df92..af8afe5c06ea9a827df39c65b0bda11176943043 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c @@ -67,6 +67,8 @@ struct chachapoly_req_ctx { unsigned int cryptlen; /* Actual AD, excluding IV */ unsigned int assoclen; + /* request flags, with MAY_SLEEP cleared if needed */ + u32 flags; union { struct poly_req poly; struct chacha_req chacha; @@ -76,8 +78,12 @@ struct chachapoly_req_ctx { static inline void async_done_continue(struct aead_request *req, int err, int (*cont)(struct aead_request *)) { - if (!err) + if (!err) { + struct chachapoly_req_ctx *rctx = aead_request_ctx(req); + + rctx->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; err = cont(req); + } if (err != -EINPROGRESS && err != -EBUSY) aead_request_complete(req, err); @@ -144,7 +150,7 @@ static int chacha_decrypt(struct aead_request *req) dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen); } - skcipher_request_set_callback(&creq->req, aead_request_flags(req), + skcipher_request_set_callback(&creq->req, rctx->flags, chacha_decrypt_done, req); skcipher_request_set_tfm(&creq->req, ctx->chacha); skcipher_request_set_crypt(&creq->req, src, dst, @@ -188,7 +194,7 @@ static int poly_tail(struct aead_request *req) memcpy(&preq->tail.cryptlen, &len, sizeof(len)); sg_set_buf(preq->src, &preq->tail, sizeof(preq->tail)); - ahash_request_set_callback(&preq->req, aead_request_flags(req), + ahash_request_set_callback(&preq->req, rctx->flags, poly_tail_done, req); ahash_request_set_tfm(&preq->req, ctx->poly); ahash_request_set_crypt(&preq->req, preq->src, @@ -219,7 +225,7 @@ static int poly_cipherpad(struct aead_request *req) sg_init_table(preq->src, 1); sg_set_buf(preq->src, &preq->pad, padlen); - ahash_request_set_callback(&preq->req, aead_request_flags(req), + ahash_request_set_callback(&preq->req, rctx->flags, poly_cipherpad_done, req); ahash_request_set_tfm(&preq->req, ctx->poly); ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen); @@ -250,7 +256,7 @@ static int poly_cipher(struct aead_request *req) sg_init_table(rctx->src, 2); crypt = scatterwalk_ffwd(rctx->src, crypt, req->assoclen); - ahash_request_set_callback(&preq->req, aead_request_flags(req), + ahash_request_set_callback(&preq->req, rctx->flags, poly_cipher_done, req); ahash_request_set_tfm(&preq->req, ctx->poly); ahash_request_set_crypt(&preq->req, crypt, NULL, rctx->cryptlen); @@ -280,7 +286,7 @@ static int poly_adpad(struct aead_request *req) sg_init_table(preq->src, 1); sg_set_buf(preq->src, preq->pad, padlen); - ahash_request_set_callback(&preq->req, aead_request_flags(req), + ahash_request_set_callback(&preq->req, rctx->flags, poly_adpad_done, req); ahash_request_set_tfm(&preq->req, ctx->poly); ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen); @@ -304,7 +310,7 @@ static int poly_ad(struct aead_request *req) struct poly_req *preq = &rctx->u.poly; int err; - ahash_request_set_callback(&preq->req, aead_request_flags(req), + ahash_request_set_callback(&preq->req, rctx->flags, poly_ad_done, req); ahash_request_set_tfm(&preq->req, ctx->poly); ahash_request_set_crypt(&preq->req, req->src, NULL, rctx->assoclen); @@ -331,7 +337,7 @@ static int poly_setkey(struct aead_request *req) sg_init_table(preq->src, 1); sg_set_buf(preq->src, rctx->key, sizeof(rctx->key)); - ahash_request_set_callback(&preq->req, aead_request_flags(req), + ahash_request_set_callback(&preq->req, rctx->flags, poly_setkey_done, req); ahash_request_set_tfm(&preq->req, ctx->poly); ahash_request_set_crypt(&preq->req, preq->src, NULL, sizeof(rctx->key)); @@ -355,7 +361,7 @@ static int poly_init(struct aead_request *req) struct poly_req *preq = &rctx->u.poly; int err; - ahash_request_set_callback(&preq->req, aead_request_flags(req), + ahash_request_set_callback(&preq->req, rctx->flags, poly_init_done, req); ahash_request_set_tfm(&preq->req, ctx->poly); @@ -393,7 +399,7 @@ static int poly_genkey(struct aead_request *req) chacha_iv(creq->iv, req, 0); - skcipher_request_set_callback(&creq->req, aead_request_flags(req), + skcipher_request_set_callback(&creq->req, rctx->flags, poly_genkey_done, req); skcipher_request_set_tfm(&creq->req, ctx->chacha); skcipher_request_set_crypt(&creq->req, creq->src, creq->src, @@ -433,7 +439,7 @@ static int chacha_encrypt(struct aead_request *req) dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen); } - skcipher_request_set_callback(&creq->req, aead_request_flags(req), + skcipher_request_set_callback(&creq->req, rctx->flags, chacha_encrypt_done, req); skcipher_request_set_tfm(&creq->req, ctx->chacha); skcipher_request_set_crypt(&creq->req, src, dst, @@ -451,6 +457,7 @@ static int chachapoly_encrypt(struct aead_request *req) struct chachapoly_req_ctx *rctx = aead_request_ctx(req); rctx->cryptlen = req->cryptlen; + rctx->flags = aead_request_flags(req); /* encrypt call chain: * - chacha_encrypt/done() @@ -472,6 +479,7 @@ static int chachapoly_decrypt(struct aead_request *req) struct chachapoly_req_ctx *rctx = aead_request_ctx(req); rctx->cryptlen = req->cryptlen - POLY1305_DIGEST_SIZE; + rctx->flags = aead_request_flags(req); /* decrypt call chain: * - poly_genkey/done() @@ -647,8 +655,8 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, err = -ENAMETOOLONG; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, - "%s(%s,%s)", name, chacha_name, - poly_name) >= CRYPTO_MAX_ALG_NAME) + "%s(%s,%s)", name, chacha->base.cra_name, + poly->cra_name) >= CRYPTO_MAX_ALG_NAME) goto out_drop_chacha; if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s(%s,%s)", name, chacha->base.cra_driver_name, diff --git a/crypto/cipher.c b/crypto/cipher.c index 57836c30a49a6ecdae55e9266b6eeb3de52387a0..3d3fa8d0d533af1962cfd94b99846cfe1726673d 100644 --- a/crypto/cipher.c +++ b/crypto/cipher.c @@ -38,8 +38,7 @@ static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key, alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); memcpy(alignbuffer, key, keylen); ret = cia->cia_setkey(tfm, alignbuffer, keylen); - memset(alignbuffer, 0, keylen); - kfree(buffer); + kzfree(buffer); return ret; } diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c index 8e94e29dc6fc89e71f9d2e724814c74e3277683b..d08048ae555279da9fe5ef01659d1328854b7646 100644 --- a/crypto/crct10dif_generic.c +++ b/crypto/crct10dif_generic.c @@ -65,10 +65,9 @@ static int chksum_final(struct shash_desc *desc, u8 *out) return 0; } -static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len, - u8 *out) +static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out) { - *(__u16 *)out = crc_t10dif_generic(*crcp, data, len); + *(__u16 *)out = crc_t10dif_generic(crc, data, len); return 0; } @@ -77,15 +76,13 @@ static int chksum_finup(struct shash_desc *desc, const u8 *data, { struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); - return __chksum_finup(&ctx->crc, data, len, out); + return __chksum_finup(ctx->crc, data, len, out); } static int chksum_digest(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out) { - struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); - - return __chksum_finup(&ctx->crc, data, length, out); + return __chksum_finup(0, data, length, out); } static struct shash_alg alg = { diff --git a/crypto/cryptd.c b/crypto/cryptd.c index addca7bae33f40e9111fc855a5851537373a03f9..3feebeec1d05037e342bb3d7bffc43e152bbf651 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -42,6 +42,10 @@ struct cryptd_cpu_queue { }; struct cryptd_queue { + /* + * Protected by disabling BH to allow enqueueing from softinterrupt and + * dequeuing from kworker (cryptd_queue_worker()). + */ struct cryptd_cpu_queue __percpu *cpu_queue; }; @@ -137,28 +141,28 @@ static void cryptd_fini_queue(struct cryptd_queue *queue) static int cryptd_enqueue_request(struct cryptd_queue *queue, struct crypto_async_request *request) { - int cpu, err; + int err; struct cryptd_cpu_queue *cpu_queue; atomic_t *refcnt; - cpu = get_cpu(); + local_bh_disable(); cpu_queue = this_cpu_ptr(queue->cpu_queue); err = crypto_enqueue_request(&cpu_queue->queue, request); refcnt = crypto_tfm_ctx(request->tfm); if (err == -ENOSPC) - goto out_put_cpu; + goto out; - queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); + queue_work_on(smp_processor_id(), kcrypto_wq, &cpu_queue->work); if (!atomic_read(refcnt)) - goto out_put_cpu; + goto out; atomic_inc(refcnt); -out_put_cpu: - put_cpu(); +out: + local_bh_enable(); return err; } @@ -174,15 +178,10 @@ static void cryptd_queue_worker(struct work_struct *work) cpu_queue = container_of(work, struct cryptd_cpu_queue, work); /* * Only handle one request at a time to avoid hogging crypto workqueue. - * preempt_disable/enable is used to prevent being preempted by - * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent - * cryptd_enqueue_request() being accessed from software interrupts. */ local_bh_disable(); - preempt_disable(); backlog = crypto_get_backlog(&cpu_queue->queue); req = crypto_dequeue_request(&cpu_queue->queue); - preempt_enable(); local_bh_enable(); if (!req) @@ -586,6 +585,7 @@ static void cryptd_skcipher_free(struct skcipher_instance *inst) struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst); crypto_drop_skcipher(&ctx->spawn); + kfree(inst); } static int cryptd_create_skcipher(struct crypto_template *tmpl, diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c index 0e89b5457cab0b2d6f2ba684b23440b9b1544c4a..f847c181a39c8f6cadad7a403b0a9a8b75e24585 100644 --- a/crypto/crypto_user.c +++ b/crypto/crypto_user.c @@ -55,6 +55,9 @@ static struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact) list_for_each_entry(q, &crypto_alg_list, cra_list) { int match = 0; + if (crypto_is_larval(q)) + continue; + if ((q->cra_flags ^ p->cru_type) & p->cru_mask) continue; @@ -83,7 +86,7 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_cipher rcipher; - strlcpy(rcipher.type, "cipher", sizeof(rcipher.type)); + strncpy(rcipher.type, "cipher", sizeof(rcipher.type)); rcipher.blocksize = alg->cra_blocksize; rcipher.min_keysize = alg->cra_cipher.cia_min_keysize; @@ -102,7 +105,7 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_comp rcomp; - strlcpy(rcomp.type, "compression", sizeof(rcomp.type)); + strncpy(rcomp.type, "compression", sizeof(rcomp.type)); if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, sizeof(struct crypto_report_comp), &rcomp)) goto nla_put_failure; @@ -116,7 +119,7 @@ static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_acomp racomp; - strlcpy(racomp.type, "acomp", sizeof(racomp.type)); + strncpy(racomp.type, "acomp", sizeof(racomp.type)); if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, sizeof(struct crypto_report_acomp), &racomp)) @@ -131,7 +134,7 @@ static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_akcipher rakcipher; - strlcpy(rakcipher.type, "akcipher", sizeof(rakcipher.type)); + strncpy(rakcipher.type, "akcipher", sizeof(rakcipher.type)); if (nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER, sizeof(struct crypto_report_akcipher), &rakcipher)) @@ -146,7 +149,7 @@ static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_kpp rkpp; - strlcpy(rkpp.type, "kpp", sizeof(rkpp.type)); + strncpy(rkpp.type, "kpp", sizeof(rkpp.type)); if (nla_put(skb, CRYPTOCFGA_REPORT_KPP, sizeof(struct crypto_report_kpp), &rkpp)) @@ -160,10 +163,10 @@ static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg) static int crypto_report_one(struct crypto_alg *alg, struct crypto_user_alg *ualg, struct sk_buff *skb) { - strlcpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name)); - strlcpy(ualg->cru_driver_name, alg->cra_driver_name, + strncpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name)); + strncpy(ualg->cru_driver_name, alg->cra_driver_name, sizeof(ualg->cru_driver_name)); - strlcpy(ualg->cru_module_name, module_name(alg->cra_module), + strncpy(ualg->cru_module_name, module_name(alg->cra_module), sizeof(ualg->cru_module_name)); ualg->cru_type = 0; @@ -176,7 +179,7 @@ static int crypto_report_one(struct crypto_alg *alg, if (alg->cra_flags & CRYPTO_ALG_LARVAL) { struct crypto_report_larval rl; - strlcpy(rl.type, "larval", sizeof(rl.type)); + strncpy(rl.type, "larval", sizeof(rl.type)); if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL, sizeof(struct crypto_report_larval), &rl)) goto nla_put_failure; @@ -285,38 +288,43 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, drop_alg: crypto_mod_put(alg); - if (err) + if (err) { + kfree_skb(skb); return err; + } return nlmsg_unicast(crypto_nlsk, skb, NETLINK_CB(in_skb).portid); } static int crypto_dump_report(struct sk_buff *skb, struct netlink_callback *cb) { - struct crypto_alg *alg; + const size_t start_pos = cb->args[0]; + size_t pos = 0; struct crypto_dump_info info; - int err; - - if (cb->args[0]) - goto out; - - cb->args[0] = 1; + struct crypto_alg *alg; + int res; info.in_skb = cb->skb; info.out_skb = skb; info.nlmsg_seq = cb->nlh->nlmsg_seq; info.nlmsg_flags = NLM_F_MULTI; + down_read(&crypto_alg_sem); list_for_each_entry(alg, &crypto_alg_list, cra_list) { - err = crypto_report_alg(alg, &info); - if (err) - goto out_err; + if (pos >= start_pos) { + res = crypto_report_alg(alg, &info); + if (res == -EMSGSIZE) + break; + if (res) + goto out; + } + pos++; } - + cb->args[0] = pos; + res = skb->len; out: - return skb->len; -out_err: - return err; + up_read(&crypto_alg_sem); + return res; } static int crypto_dump_report_done(struct netlink_callback *cb) @@ -500,7 +508,7 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, if ((type == (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE) && (nlh->nlmsg_flags & NLM_F_DUMP))) { struct crypto_alg *alg; - u16 dump_alloc = 0; + unsigned long dump_alloc = 0; if (link->dump == NULL) return -EINVAL; @@ -508,16 +516,16 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, down_read(&crypto_alg_sem); list_for_each_entry(alg, &crypto_alg_list, cra_list) dump_alloc += CRYPTO_REPORT_MAXSIZE; + up_read(&crypto_alg_sem); { struct netlink_dump_control c = { .dump = link->dump, .done = link->done, - .min_dump_alloc = dump_alloc, + .min_dump_alloc = min(dump_alloc, 65535UL), }; err = netlink_dump_start(crypto_nlsk, skb, nlh, &c); } - up_read(&crypto_alg_sem); return err; } diff --git a/crypto/drbg.c b/crypto/drbg.c index bc52d956261102a5b6ae59e50ad56d21745abbcf..ca6765eeb99046347a1f60d78457de122927f651 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -1425,6 +1425,14 @@ static int drbg_prepare_hrng(struct drbg_state *drbg) } drbg->jent = crypto_alloc_rng("jitterentropy_rng", 0, 0); + if (IS_ERR(drbg->jent)) { + const int err = PTR_ERR(drbg->jent); + + drbg->jent = NULL; + if (fips_enabled) + return err; + pr_info("DRBG: Continuing without Jitter RNG\n"); + } /* * Require frequent reseeds until the seed source is fully @@ -1486,14 +1494,6 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers, if (ret) goto free_everything; - if (IS_ERR(drbg->jent)) { - ret = PTR_ERR(drbg->jent); - drbg->jent = NULL; - if (fips_enabled || ret != -ENOENT) - goto free_everything; - pr_info("DRBG: Continuing without Jitter RNG\n"); - } - reseed = false; } diff --git a/crypto/ecc.c b/crypto/ecc.c index 8facafd678026ac9bfaf0cfe5c9767a77e875a1a..ad739255951f01da1bf42ce68904a070084f97e2 100644 --- a/crypto/ecc.c +++ b/crypto/ecc.c @@ -842,15 +842,23 @@ static void xycz_add_c(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime, static void ecc_point_mult(struct ecc_point *result, const struct ecc_point *point, const u64 *scalar, - u64 *initial_z, u64 *curve_prime, + u64 *initial_z, const struct ecc_curve *curve, unsigned int ndigits) { /* R0 and R1 */ u64 rx[2][ECC_MAX_DIGITS]; u64 ry[2][ECC_MAX_DIGITS]; u64 z[ECC_MAX_DIGITS]; + u64 sk[2][ECC_MAX_DIGITS]; + u64 *curve_prime = curve->p; int i, nb; - int num_bits = vli_num_bits(scalar, ndigits); + int num_bits; + int carry; + + carry = vli_add(sk[0], scalar, curve->n, ndigits); + vli_add(sk[1], sk[0], curve->n, ndigits); + scalar = sk[!carry]; + num_bits = sizeof(u64) * ndigits * 8 + 1; vli_set(rx[1], point->x, ndigits); vli_set(ry[1], point->y, ndigits); @@ -898,36 +906,50 @@ static void ecc_point_mult(struct ecc_point *result, static inline void ecc_swap_digits(const u64 *in, u64 *out, unsigned int ndigits) { + const __be64 *src = (__force __be64 *)in; int i; for (i = 0; i < ndigits; i++) - out[i] = __swab64(in[ndigits - 1 - i]); + out[i] = be64_to_cpu(src[ndigits - 1 - i]); } -int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits, - const u64 *private_key, unsigned int private_key_len) +static int __ecc_is_key_valid(const struct ecc_curve *curve, + const u64 *private_key, unsigned int ndigits) { - int nbytes; - const struct ecc_curve *curve = ecc_get_curve(curve_id); + u64 one[ECC_MAX_DIGITS] = { 1, }; + u64 res[ECC_MAX_DIGITS]; if (!private_key) return -EINVAL; - nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT; - - if (private_key_len != nbytes) + if (curve->g.ndigits != ndigits) return -EINVAL; - if (vli_is_zero(private_key, ndigits)) + /* Make sure the private key is in the range [2, n-3]. */ + if (vli_cmp(one, private_key, ndigits) != -1) return -EINVAL; - - /* Make sure the private key is in the range [1, n-1]. */ - if (vli_cmp(curve->n, private_key, ndigits) != 1) + vli_sub(res, curve->n, one, ndigits); + vli_sub(res, res, one, ndigits); + if (vli_cmp(res, private_key, ndigits) != 1) return -EINVAL; return 0; } +int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits, + const u64 *private_key, unsigned int private_key_len) +{ + int nbytes; + const struct ecc_curve *curve = ecc_get_curve(curve_id); + + nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT; + + if (private_key_len != nbytes) + return -EINVAL; + + return __ecc_is_key_valid(curve, private_key, ndigits); +} + /* * ECC private keys are generated using the method of extra random bits, * equivalent to that described in FIPS 186-4, Appendix B.4.1. @@ -971,11 +993,8 @@ int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey) if (err) return err; - if (vli_is_zero(priv, ndigits)) - return -EINVAL; - - /* Make sure the private key is in the range [1, n-1]. */ - if (vli_cmp(curve->n, priv, ndigits) != 1) + /* Make sure the private key is in the valid range. */ + if (__ecc_is_key_valid(curve, priv, ndigits)) return -EINVAL; ecc_swap_digits(priv, privkey, ndigits); @@ -1004,7 +1023,7 @@ int ecc_make_pub_key(unsigned int curve_id, unsigned int ndigits, goto out; } - ecc_point_mult(pk, &curve->g, priv, NULL, curve->p, ndigits); + ecc_point_mult(pk, &curve->g, priv, NULL, curve, ndigits); if (ecc_point_is_zero(pk)) { ret = -EAGAIN; goto err_free_point; @@ -1090,7 +1109,7 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, goto err_alloc_product; } - ecc_point_mult(product, pk, priv, rand_z, curve->p, ndigits); + ecc_point_mult(product, pk, priv, rand_z, curve, ndigits); ecc_swap_digits(product->x, secret, ndigits); diff --git a/crypto/ecdh.c b/crypto/ecdh.c index bf6300175b9cd2416c81d7aab4c13b986e8bba60..a6e1a5d43fa7a2bc54b7527b3003791a6f7f51df 100644 --- a/crypto/ecdh.c +++ b/crypto/ecdh.c @@ -57,12 +57,13 @@ static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, return ecc_gen_privkey(ctx->curve_id, ctx->ndigits, ctx->private_key); - if (ecc_is_key_valid(ctx->curve_id, ctx->ndigits, - (const u64 *)params.key, params.key_size) < 0) - return -EINVAL; - memcpy(ctx->private_key, params.key, params.key_size); + if (ecc_is_key_valid(ctx->curve_id, ctx->ndigits, + ctx->private_key, params.key_size) < 0) { + memzero_explicit(ctx->private_key, params.key_size); + return -EINVAL; + } return 0; } diff --git a/crypto/gcm.c b/crypto/gcm.c index 0ad879e1f9b2124bffa97e47869dc762ae37d58d..9b0ea3ded1a426341181df122e238dd68f0fdf3f 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -597,7 +597,6 @@ static void crypto_gcm_free(struct aead_instance *inst) static int crypto_gcm_create_common(struct crypto_template *tmpl, struct rtattr **tb, - const char *full_name, const char *ctr_name, const char *ghash_name) { @@ -638,7 +637,8 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, goto err_free_inst; err = -EINVAL; - if (ghash->digestsize != 16) + if (strcmp(ghash->base.cra_name, "ghash") != 0 || + ghash->digestsize != 16) goto err_drop_ghash; crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst)); @@ -650,24 +650,24 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, ctr = crypto_spawn_skcipher_alg(&ctx->ctr); - /* We only support 16-byte blocks. */ + /* The skcipher algorithm must be CTR mode, using 16-byte blocks. */ err = -EINVAL; - if (crypto_skcipher_alg_ivsize(ctr) != 16) + if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 || + crypto_skcipher_alg_ivsize(ctr) != 16 || + ctr->base.cra_blocksize != 1) goto out_put_ctr; - /* Not a stream cipher? */ - if (ctr->base.cra_blocksize != 1) + err = -ENAMETOOLONG; + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, + "gcm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME) goto out_put_ctr; - err = -ENAMETOOLONG; if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)", ctr->base.cra_driver_name, ghash_alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto out_put_ctr; - memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME); - inst->alg.base.cra_flags = (ghash->base.cra_flags | ctr->base.cra_flags) & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = (ghash->base.cra_priority + @@ -709,7 +709,6 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb) { const char *cipher_name; char ctr_name[CRYPTO_MAX_ALG_NAME]; - char full_name[CRYPTO_MAX_ALG_NAME]; cipher_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(cipher_name)) @@ -719,12 +718,7 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb) CRYPTO_MAX_ALG_NAME) return -ENAMETOOLONG; - if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm(%s)", cipher_name) >= - CRYPTO_MAX_ALG_NAME) - return -ENAMETOOLONG; - - return crypto_gcm_create_common(tmpl, tb, full_name, - ctr_name, "ghash"); + return crypto_gcm_create_common(tmpl, tb, ctr_name, "ghash"); } static struct crypto_template crypto_gcm_tmpl = { @@ -738,7 +732,6 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl, { const char *ctr_name; const char *ghash_name; - char full_name[CRYPTO_MAX_ALG_NAME]; ctr_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(ctr_name)) @@ -748,12 +741,7 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl, if (IS_ERR(ghash_name)) return PTR_ERR(ghash_name); - if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)", - ctr_name, ghash_name) >= CRYPTO_MAX_ALG_NAME) - return -ENAMETOOLONG; - - return crypto_gcm_create_common(tmpl, tb, full_name, - ctr_name, ghash_name); + return crypto_gcm_create_common(tmpl, tb, ctr_name, ghash_name); } static struct crypto_template crypto_gcm_base_tmpl = { diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c index d9f192b953b22b06ea4adc6b921a1d59a1dde1f2..591b52d3bdca31f4546c4f6b7dd1e1cd6727ed95 100644 --- a/crypto/ghash-generic.c +++ b/crypto/ghash-generic.c @@ -34,6 +34,7 @@ static int ghash_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { struct ghash_ctx *ctx = crypto_shash_ctx(tfm); + be128 k; if (keylen != GHASH_BLOCK_SIZE) { crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); @@ -42,7 +43,12 @@ static int ghash_setkey(struct crypto_shash *tfm, if (ctx->gf128) gf128mul_free_4k(ctx->gf128); - ctx->gf128 = gf128mul_init_4k_lle((be128 *)key); + + BUILD_BUG_ON(sizeof(k) != GHASH_BLOCK_SIZE); + memcpy(&k, key, GHASH_BLOCK_SIZE); /* avoid violating alignment rules */ + ctx->gf128 = gf128mul_init_4k_lle(&k); + memzero_explicit(&k, GHASH_BLOCK_SIZE); + if (!ctx->gf128) return -ENOMEM; diff --git a/crypto/lrw.c b/crypto/lrw.c index 393a782679c7833e1556934a54c1a5a813f7c393..5504d1325a56ab58d583724be58baef24d72b0bb 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -143,7 +143,12 @@ static inline int get_index128(be128 *block) return x + ffz(val); } - return x; + /* + * If we get here, then x == 128 and we are incrementing the counter + * from all ones to all zeros. This means we must return index 127, i.e. + * the one corresponding to key2*{ 1,...,1 }. + */ + return 127; } static int post_crypt(struct skcipher_request *req) diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c deleted file mode 100644 index f14152147ce80185e1a2f0cc7adbd72ced29f365..0000000000000000000000000000000000000000 --- a/crypto/mcryptd.c +++ /dev/null @@ -1,675 +0,0 @@ -/* - * Software multibuffer async crypto daemon. - * - * Copyright (c) 2014 Tim Chen - * - * Adapted from crypto daemon. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define MCRYPTD_MAX_CPU_QLEN 100 -#define MCRYPTD_BATCH 9 - -static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, - unsigned int tail); - -struct mcryptd_flush_list { - struct list_head list; - struct mutex lock; -}; - -static struct mcryptd_flush_list __percpu *mcryptd_flist; - -struct hashd_instance_ctx { - struct crypto_ahash_spawn spawn; - struct mcryptd_queue *queue; -}; - -static void mcryptd_queue_worker(struct work_struct *work); - -void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay) -{ - struct mcryptd_flush_list *flist; - - if (!cstate->flusher_engaged) { - /* put the flusher on the flush list */ - flist = per_cpu_ptr(mcryptd_flist, smp_processor_id()); - mutex_lock(&flist->lock); - list_add_tail(&cstate->flush_list, &flist->list); - cstate->flusher_engaged = true; - cstate->next_flush = jiffies + delay; - queue_delayed_work_on(smp_processor_id(), kcrypto_wq, - &cstate->flush, delay); - mutex_unlock(&flist->lock); - } -} -EXPORT_SYMBOL(mcryptd_arm_flusher); - -static int mcryptd_init_queue(struct mcryptd_queue *queue, - unsigned int max_cpu_qlen) -{ - int cpu; - struct mcryptd_cpu_queue *cpu_queue; - - queue->cpu_queue = alloc_percpu(struct mcryptd_cpu_queue); - pr_debug("mqueue:%p mcryptd_cpu_queue %p\n", queue, queue->cpu_queue); - if (!queue->cpu_queue) - return -ENOMEM; - for_each_possible_cpu(cpu) { - cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); - pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue); - crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); - INIT_WORK(&cpu_queue->work, mcryptd_queue_worker); - spin_lock_init(&cpu_queue->q_lock); - } - return 0; -} - -static void mcryptd_fini_queue(struct mcryptd_queue *queue) -{ - int cpu; - struct mcryptd_cpu_queue *cpu_queue; - - for_each_possible_cpu(cpu) { - cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); - BUG_ON(cpu_queue->queue.qlen); - } - free_percpu(queue->cpu_queue); -} - -static int mcryptd_enqueue_request(struct mcryptd_queue *queue, - struct crypto_async_request *request, - struct mcryptd_hash_request_ctx *rctx) -{ - int cpu, err; - struct mcryptd_cpu_queue *cpu_queue; - - cpu_queue = raw_cpu_ptr(queue->cpu_queue); - spin_lock(&cpu_queue->q_lock); - cpu = smp_processor_id(); - rctx->tag.cpu = smp_processor_id(); - - err = crypto_enqueue_request(&cpu_queue->queue, request); - pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n", - cpu, cpu_queue, request); - spin_unlock(&cpu_queue->q_lock); - queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); - - return err; -} - -/* - * Try to opportunisticlly flush the partially completed jobs if - * crypto daemon is the only task running. - */ -static void mcryptd_opportunistic_flush(void) -{ - struct mcryptd_flush_list *flist; - struct mcryptd_alg_cstate *cstate; - - flist = per_cpu_ptr(mcryptd_flist, smp_processor_id()); - while (single_task_running()) { - mutex_lock(&flist->lock); - cstate = list_first_entry_or_null(&flist->list, - struct mcryptd_alg_cstate, flush_list); - if (!cstate || !cstate->flusher_engaged) { - mutex_unlock(&flist->lock); - return; - } - list_del(&cstate->flush_list); - cstate->flusher_engaged = false; - mutex_unlock(&flist->lock); - cstate->alg_state->flusher(cstate); - } -} - -/* - * Called in workqueue context, do one real cryption work (via - * req->complete) and reschedule itself if there are more work to - * do. - */ -static void mcryptd_queue_worker(struct work_struct *work) -{ - struct mcryptd_cpu_queue *cpu_queue; - struct crypto_async_request *req, *backlog; - int i; - - /* - * Need to loop through more than once for multi-buffer to - * be effective. - */ - - cpu_queue = container_of(work, struct mcryptd_cpu_queue, work); - i = 0; - while (i < MCRYPTD_BATCH || single_task_running()) { - - spin_lock_bh(&cpu_queue->q_lock); - backlog = crypto_get_backlog(&cpu_queue->queue); - req = crypto_dequeue_request(&cpu_queue->queue); - spin_unlock_bh(&cpu_queue->q_lock); - - if (!req) { - mcryptd_opportunistic_flush(); - return; - } - - if (backlog) - backlog->complete(backlog, -EINPROGRESS); - req->complete(req, 0); - if (!cpu_queue->queue.qlen) - return; - ++i; - } - if (cpu_queue->queue.qlen) - queue_work_on(smp_processor_id(), kcrypto_wq, &cpu_queue->work); -} - -void mcryptd_flusher(struct work_struct *__work) -{ - struct mcryptd_alg_cstate *alg_cpu_state; - struct mcryptd_alg_state *alg_state; - struct mcryptd_flush_list *flist; - int cpu; - - cpu = smp_processor_id(); - alg_cpu_state = container_of(to_delayed_work(__work), - struct mcryptd_alg_cstate, flush); - alg_state = alg_cpu_state->alg_state; - if (alg_cpu_state->cpu != cpu) - pr_debug("mcryptd error: work on cpu %d, should be cpu %d\n", - cpu, alg_cpu_state->cpu); - - if (alg_cpu_state->flusher_engaged) { - flist = per_cpu_ptr(mcryptd_flist, cpu); - mutex_lock(&flist->lock); - list_del(&alg_cpu_state->flush_list); - alg_cpu_state->flusher_engaged = false; - mutex_unlock(&flist->lock); - alg_state->flusher(alg_cpu_state); - } -} -EXPORT_SYMBOL_GPL(mcryptd_flusher); - -static inline struct mcryptd_queue *mcryptd_get_queue(struct crypto_tfm *tfm) -{ - struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); - struct mcryptd_instance_ctx *ictx = crypto_instance_ctx(inst); - - return ictx->queue; -} - -static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, - unsigned int tail) -{ - char *p; - struct crypto_instance *inst; - int err; - - p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL); - if (!p) - return ERR_PTR(-ENOMEM); - - inst = (void *)(p + head); - - err = -ENAMETOOLONG; - if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, - "mcryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) - goto out_free_inst; - - memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); - - inst->alg.cra_priority = alg->cra_priority + 50; - inst->alg.cra_blocksize = alg->cra_blocksize; - inst->alg.cra_alignmask = alg->cra_alignmask; - -out: - return p; - -out_free_inst: - kfree(p); - p = ERR_PTR(err); - goto out; -} - -static inline bool mcryptd_check_internal(struct rtattr **tb, u32 *type, - u32 *mask) -{ - struct crypto_attr_type *algt; - - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return false; - - *type |= algt->type & CRYPTO_ALG_INTERNAL; - *mask |= algt->mask & CRYPTO_ALG_INTERNAL; - - if (*type & *mask & CRYPTO_ALG_INTERNAL) - return true; - else - return false; -} - -static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm) -{ - struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); - struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); - struct crypto_ahash_spawn *spawn = &ictx->spawn; - struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); - struct crypto_ahash *hash; - - hash = crypto_spawn_ahash(spawn); - if (IS_ERR(hash)) - return PTR_ERR(hash); - - ctx->child = hash; - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct mcryptd_hash_request_ctx) + - crypto_ahash_reqsize(hash)); - return 0; -} - -static void mcryptd_hash_exit_tfm(struct crypto_tfm *tfm) -{ - struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); - - crypto_free_ahash(ctx->child); -} - -static int mcryptd_hash_setkey(struct crypto_ahash *parent, - const u8 *key, unsigned int keylen) -{ - struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); - struct crypto_ahash *child = ctx->child; - int err; - - crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_ahash_set_flags(child, crypto_ahash_get_flags(parent) & - CRYPTO_TFM_REQ_MASK); - err = crypto_ahash_setkey(child, key, keylen); - crypto_ahash_set_flags(parent, crypto_ahash_get_flags(child) & - CRYPTO_TFM_RES_MASK); - return err; -} - -static int mcryptd_hash_enqueue(struct ahash_request *req, - crypto_completion_t complete) -{ - int ret; - - struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct mcryptd_queue *queue = - mcryptd_get_queue(crypto_ahash_tfm(tfm)); - - rctx->complete = req->base.complete; - req->base.complete = complete; - - ret = mcryptd_enqueue_request(queue, &req->base, rctx); - - return ret; -} - -static void mcryptd_hash_init(struct crypto_async_request *req_async, int err) -{ - struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); - struct crypto_ahash *child = ctx->child; - struct ahash_request *req = ahash_request_cast(req_async); - struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); - struct ahash_request *desc = &rctx->areq; - - if (unlikely(err == -EINPROGRESS)) - goto out; - - ahash_request_set_tfm(desc, child); - ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP, - rctx->complete, req_async); - - rctx->out = req->result; - err = crypto_ahash_init(desc); - -out: - local_bh_disable(); - rctx->complete(&req->base, err); - local_bh_enable(); -} - -static int mcryptd_hash_init_enqueue(struct ahash_request *req) -{ - return mcryptd_hash_enqueue(req, mcryptd_hash_init); -} - -static void mcryptd_hash_update(struct crypto_async_request *req_async, int err) -{ - struct ahash_request *req = ahash_request_cast(req_async); - struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); - - if (unlikely(err == -EINPROGRESS)) - goto out; - - rctx->out = req->result; - err = crypto_ahash_update(&rctx->areq); - if (err) { - req->base.complete = rctx->complete; - goto out; - } - - return; -out: - local_bh_disable(); - rctx->complete(&req->base, err); - local_bh_enable(); -} - -static int mcryptd_hash_update_enqueue(struct ahash_request *req) -{ - return mcryptd_hash_enqueue(req, mcryptd_hash_update); -} - -static void mcryptd_hash_final(struct crypto_async_request *req_async, int err) -{ - struct ahash_request *req = ahash_request_cast(req_async); - struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); - - if (unlikely(err == -EINPROGRESS)) - goto out; - - rctx->out = req->result; - err = crypto_ahash_final(&rctx->areq); - if (err) { - req->base.complete = rctx->complete; - goto out; - } - - return; -out: - local_bh_disable(); - rctx->complete(&req->base, err); - local_bh_enable(); -} - -static int mcryptd_hash_final_enqueue(struct ahash_request *req) -{ - return mcryptd_hash_enqueue(req, mcryptd_hash_final); -} - -static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err) -{ - struct ahash_request *req = ahash_request_cast(req_async); - struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); - - if (unlikely(err == -EINPROGRESS)) - goto out; - rctx->out = req->result; - err = crypto_ahash_finup(&rctx->areq); - - if (err) { - req->base.complete = rctx->complete; - goto out; - } - - return; -out: - local_bh_disable(); - rctx->complete(&req->base, err); - local_bh_enable(); -} - -static int mcryptd_hash_finup_enqueue(struct ahash_request *req) -{ - return mcryptd_hash_enqueue(req, mcryptd_hash_finup); -} - -static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err) -{ - struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); - struct crypto_ahash *child = ctx->child; - struct ahash_request *req = ahash_request_cast(req_async); - struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); - struct ahash_request *desc = &rctx->areq; - - if (unlikely(err == -EINPROGRESS)) - goto out; - - ahash_request_set_tfm(desc, child); - ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP, - rctx->complete, req_async); - - rctx->out = req->result; - err = crypto_ahash_init(desc) ?: crypto_ahash_finup(desc); - -out: - local_bh_disable(); - rctx->complete(&req->base, err); - local_bh_enable(); -} - -static int mcryptd_hash_digest_enqueue(struct ahash_request *req) -{ - return mcryptd_hash_enqueue(req, mcryptd_hash_digest); -} - -static int mcryptd_hash_export(struct ahash_request *req, void *out) -{ - struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); - - return crypto_ahash_export(&rctx->areq, out); -} - -static int mcryptd_hash_import(struct ahash_request *req, const void *in) -{ - struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); - - return crypto_ahash_import(&rctx->areq, in); -} - -static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, - struct mcryptd_queue *queue) -{ - struct hashd_instance_ctx *ctx; - struct ahash_instance *inst; - struct hash_alg_common *halg; - struct crypto_alg *alg; - u32 type = 0; - u32 mask = 0; - int err; - - if (!mcryptd_check_internal(tb, &type, &mask)) - return -EINVAL; - - halg = ahash_attr_alg(tb[1], type, mask); - if (IS_ERR(halg)) - return PTR_ERR(halg); - - alg = &halg->base; - pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name); - inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(), - sizeof(*ctx)); - err = PTR_ERR(inst); - if (IS_ERR(inst)) - goto out_put_alg; - - ctx = ahash_instance_ctx(inst); - ctx->queue = queue; - - err = crypto_init_ahash_spawn(&ctx->spawn, halg, - ahash_crypto_instance(inst)); - if (err) - goto out_free_inst; - - inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC | - (alg->cra_flags & (CRYPTO_ALG_INTERNAL | - CRYPTO_ALG_OPTIONAL_KEY)); - - inst->alg.halg.digestsize = halg->digestsize; - inst->alg.halg.statesize = halg->statesize; - inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx); - - inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm; - inst->alg.halg.base.cra_exit = mcryptd_hash_exit_tfm; - - inst->alg.init = mcryptd_hash_init_enqueue; - inst->alg.update = mcryptd_hash_update_enqueue; - inst->alg.final = mcryptd_hash_final_enqueue; - inst->alg.finup = mcryptd_hash_finup_enqueue; - inst->alg.export = mcryptd_hash_export; - inst->alg.import = mcryptd_hash_import; - if (crypto_hash_alg_has_setkey(halg)) - inst->alg.setkey = mcryptd_hash_setkey; - inst->alg.digest = mcryptd_hash_digest_enqueue; - - err = ahash_register_instance(tmpl, inst); - if (err) { - crypto_drop_ahash(&ctx->spawn); -out_free_inst: - kfree(inst); - } - -out_put_alg: - crypto_mod_put(alg); - return err; -} - -static struct mcryptd_queue mqueue; - -static int mcryptd_create(struct crypto_template *tmpl, struct rtattr **tb) -{ - struct crypto_attr_type *algt; - - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { - case CRYPTO_ALG_TYPE_DIGEST: - return mcryptd_create_hash(tmpl, tb, &mqueue); - break; - } - - return -EINVAL; -} - -static void mcryptd_free(struct crypto_instance *inst) -{ - struct mcryptd_instance_ctx *ctx = crypto_instance_ctx(inst); - struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); - - switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { - case CRYPTO_ALG_TYPE_AHASH: - crypto_drop_ahash(&hctx->spawn); - kfree(ahash_instance(inst)); - return; - default: - crypto_drop_spawn(&ctx->spawn); - kfree(inst); - } -} - -static struct crypto_template mcryptd_tmpl = { - .name = "mcryptd", - .create = mcryptd_create, - .free = mcryptd_free, - .module = THIS_MODULE, -}; - -struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name, - u32 type, u32 mask) -{ - char mcryptd_alg_name[CRYPTO_MAX_ALG_NAME]; - struct crypto_ahash *tfm; - - if (snprintf(mcryptd_alg_name, CRYPTO_MAX_ALG_NAME, - "mcryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) - return ERR_PTR(-EINVAL); - tfm = crypto_alloc_ahash(mcryptd_alg_name, type, mask); - if (IS_ERR(tfm)) - return ERR_CAST(tfm); - if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { - crypto_free_ahash(tfm); - return ERR_PTR(-EINVAL); - } - - return __mcryptd_ahash_cast(tfm); -} -EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash); - -struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm) -{ - struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); - - return ctx->child; -} -EXPORT_SYMBOL_GPL(mcryptd_ahash_child); - -struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req) -{ - struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); - return &rctx->areq; -} -EXPORT_SYMBOL_GPL(mcryptd_ahash_desc); - -void mcryptd_free_ahash(struct mcryptd_ahash *tfm) -{ - crypto_free_ahash(&tfm->base); -} -EXPORT_SYMBOL_GPL(mcryptd_free_ahash); - -static int __init mcryptd_init(void) -{ - int err, cpu; - struct mcryptd_flush_list *flist; - - mcryptd_flist = alloc_percpu(struct mcryptd_flush_list); - for_each_possible_cpu(cpu) { - flist = per_cpu_ptr(mcryptd_flist, cpu); - INIT_LIST_HEAD(&flist->list); - mutex_init(&flist->lock); - } - - err = mcryptd_init_queue(&mqueue, MCRYPTD_MAX_CPU_QLEN); - if (err) { - free_percpu(mcryptd_flist); - return err; - } - - err = crypto_register_template(&mcryptd_tmpl); - if (err) { - mcryptd_fini_queue(&mqueue); - free_percpu(mcryptd_flist); - } - - return err; -} - -static void __exit mcryptd_exit(void) -{ - mcryptd_fini_queue(&mqueue); - crypto_unregister_template(&mcryptd_tmpl); - free_percpu(mcryptd_flist); -} - -subsys_initcall(mcryptd_init); -module_exit(mcryptd_exit); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Software async multibuffer crypto daemon"); -MODULE_ALIAS_CRYPTO("mcryptd"); diff --git a/crypto/morus1280.c b/crypto/morus1280.c index d057cf5ac4a8b07897e7e0e143e65015a4cbc71f..b83576b4eb553d80019413a3f6885e705d68f2f1 100644 --- a/crypto/morus1280.c +++ b/crypto/morus1280.c @@ -366,18 +366,19 @@ static void crypto_morus1280_process_crypt(struct morus1280_state *state, const struct morus1280_ops *ops) { struct skcipher_walk walk; - u8 *dst; - const u8 *src; ops->skcipher_walk_init(&walk, req, false); while (walk.nbytes) { - src = walk.src.virt.addr; - dst = walk.dst.virt.addr; + unsigned int nbytes = walk.nbytes; - ops->crypt_chunk(state, dst, src, walk.nbytes); + if (nbytes < walk.total) + nbytes = round_down(nbytes, walk.stride); - skcipher_walk_done(&walk, 0); + ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr, + nbytes); + + skcipher_walk_done(&walk, walk.nbytes - nbytes); } } @@ -385,14 +386,11 @@ static void crypto_morus1280_final(struct morus1280_state *state, struct morus1280_block *tag_xor, u64 assoclen, u64 cryptlen) { - u64 assocbits = assoclen * 8; - u64 cryptbits = cryptlen * 8; - struct morus1280_block tmp; unsigned int i; - tmp.words[0] = cpu_to_le64(assocbits); - tmp.words[1] = cpu_to_le64(cryptbits); + tmp.words[0] = assoclen * 8; + tmp.words[1] = cryptlen * 8; tmp.words[2] = 0; tmp.words[3] = 0; diff --git a/crypto/morus640.c b/crypto/morus640.c index 1ca76e54281bf42e8ba31bf92fbb66aff20df650..b6a477444f6de742355e3fedb8cc99f923bfc786 100644 --- a/crypto/morus640.c +++ b/crypto/morus640.c @@ -365,18 +365,19 @@ static void crypto_morus640_process_crypt(struct morus640_state *state, const struct morus640_ops *ops) { struct skcipher_walk walk; - u8 *dst; - const u8 *src; ops->skcipher_walk_init(&walk, req, false); while (walk.nbytes) { - src = walk.src.virt.addr; - dst = walk.dst.virt.addr; + unsigned int nbytes = walk.nbytes; - ops->crypt_chunk(state, dst, src, walk.nbytes); + if (nbytes < walk.total) + nbytes = round_down(nbytes, walk.stride); - skcipher_walk_done(&walk, 0); + ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr, + nbytes); + + skcipher_walk_done(&walk, walk.nbytes - nbytes); } } @@ -384,21 +385,13 @@ static void crypto_morus640_final(struct morus640_state *state, struct morus640_block *tag_xor, u64 assoclen, u64 cryptlen) { - u64 assocbits = assoclen * 8; - u64 cryptbits = cryptlen * 8; - - u32 assocbits_lo = (u32)assocbits; - u32 assocbits_hi = (u32)(assocbits >> 32); - u32 cryptbits_lo = (u32)cryptbits; - u32 cryptbits_hi = (u32)(cryptbits >> 32); - struct morus640_block tmp; unsigned int i; - tmp.words[0] = cpu_to_le32(assocbits_lo); - tmp.words[1] = cpu_to_le32(assocbits_hi); - tmp.words[2] = cpu_to_le32(cryptbits_lo); - tmp.words[3] = cpu_to_le32(cryptbits_hi); + tmp.words[0] = lower_32_bits(assoclen * 8); + tmp.words[1] = upper_32_bits(assoclen * 8); + tmp.words[2] = lower_32_bits(cryptlen * 8); + tmp.words[3] = upper_32_bits(cryptlen * 8); for (i = 0; i < MORUS_BLOCK_WORDS; i++) state->s[4].words[i] ^= state->s[0].words[i]; diff --git a/crypto/pcbc.c b/crypto/pcbc.c index ef802f6e964218f06d00b035fc66d960cd4ce700..1b182dfedc948688e7f79f10245ef88ea85eb508 100644 --- a/crypto/pcbc.c +++ b/crypto/pcbc.c @@ -51,7 +51,7 @@ static int crypto_pcbc_encrypt_segment(struct skcipher_request *req, unsigned int nbytes = walk->nbytes; u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; - u8 *iv = walk->iv; + u8 * const iv = walk->iv; do { crypto_xor(iv, src, bsize); @@ -72,7 +72,7 @@ static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req, int bsize = crypto_cipher_blocksize(tfm); unsigned int nbytes = walk->nbytes; u8 *src = walk->src.virt.addr; - u8 *iv = walk->iv; + u8 * const iv = walk->iv; u8 tmpbuf[MAX_CIPHER_BLOCKSIZE]; do { @@ -84,8 +84,6 @@ static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req, src += bsize; } while ((nbytes -= bsize) >= bsize); - memcpy(walk->iv, iv, bsize); - return nbytes; } @@ -121,7 +119,7 @@ static int crypto_pcbc_decrypt_segment(struct skcipher_request *req, unsigned int nbytes = walk->nbytes; u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; - u8 *iv = walk->iv; + u8 * const iv = walk->iv; do { crypto_cipher_decrypt_one(tfm, dst, src); @@ -132,8 +130,6 @@ static int crypto_pcbc_decrypt_segment(struct skcipher_request *req, dst += bsize; } while ((nbytes -= bsize) >= bsize); - memcpy(walk->iv, iv, bsize); - return nbytes; } @@ -144,7 +140,7 @@ static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req, int bsize = crypto_cipher_blocksize(tfm); unsigned int nbytes = walk->nbytes; u8 *src = walk->src.virt.addr; - u8 *iv = walk->iv; + u8 * const iv = walk->iv; u8 tmpbuf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(u32)); do { @@ -156,8 +152,6 @@ static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req, src += bsize; } while ((nbytes -= bsize) >= bsize); - memcpy(walk->iv, iv, bsize); - return nbytes; } @@ -244,9 +238,8 @@ static int crypto_pcbc_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = skcipher_instance_ctx(inst); err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst), CRYPTO_ALG_TYPE_MASK); - crypto_mod_put(alg); if (err) - goto err_free_inst; + goto err_put_alg; err = crypto_inst_setname(skcipher_crypto_instance(inst), "pcbc", alg); if (err) @@ -275,12 +268,15 @@ static int crypto_pcbc_create(struct crypto_template *tmpl, struct rtattr **tb) err = skcipher_register_instance(tmpl, inst); if (err) goto err_drop_spawn; + crypto_mod_put(alg); out: return err; err_drop_spawn: crypto_drop_spawn(spawn); +err_put_alg: + crypto_mod_put(alg); err_free_inst: kfree(inst); goto out; diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index f8ec3d4ba4a80f8eefed739d9e8a852865a7ac02..4542b48b5afd56918b8a33f451ce3e59451ee74e 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -70,35 +70,6 @@ struct pcrypt_aead_ctx { unsigned int cb_cpu; }; -static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu, - struct padata_pcrypt *pcrypt) -{ - unsigned int cpu_index, cpu, i; - struct pcrypt_cpumask *cpumask; - - cpu = *cb_cpu; - - rcu_read_lock_bh(); - cpumask = rcu_dereference_bh(pcrypt->cb_cpumask); - if (cpumask_test_cpu(cpu, cpumask->mask)) - goto out; - - if (!cpumask_weight(cpumask->mask)) - goto out; - - cpu_index = cpu % cpumask_weight(cpumask->mask); - - cpu = cpumask_first(cpumask->mask); - for (i = 0; i < cpu_index; i++) - cpu = cpumask_next(cpu, cpumask->mask); - - *cb_cpu = cpu; - -out: - rcu_read_unlock_bh(); - return padata_do_parallel(pcrypt->pinst, padata, cpu); -} - static int pcrypt_aead_setkey(struct crypto_aead *parent, const u8 *key, unsigned int keylen) { @@ -139,12 +110,14 @@ static void pcrypt_aead_enc(struct padata_priv *padata) { struct pcrypt_request *preq = pcrypt_padata_request(padata); struct aead_request *req = pcrypt_request_ctx(preq); + int ret; - padata->info = crypto_aead_encrypt(req); + ret = crypto_aead_encrypt(req); - if (padata->info == -EINPROGRESS) + if (ret == -EINPROGRESS) return; + padata->info = ret; padata_do_serial(padata); } @@ -170,9 +143,13 @@ static int pcrypt_aead_encrypt(struct aead_request *req) req->cryptlen, req->iv); aead_request_set_ad(creq, req->assoclen); - err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt); + err = padata_do_parallel(pencrypt.pinst, padata, &ctx->cb_cpu); if (!err) return -EINPROGRESS; + if (err == -EBUSY) { + /* try non-parallel mode */ + return crypto_aead_encrypt(creq); + } return err; } @@ -181,12 +158,14 @@ static void pcrypt_aead_dec(struct padata_priv *padata) { struct pcrypt_request *preq = pcrypt_padata_request(padata); struct aead_request *req = pcrypt_request_ctx(preq); + int ret; - padata->info = crypto_aead_decrypt(req); + ret = crypto_aead_decrypt(req); - if (padata->info == -EINPROGRESS) + if (ret == -EINPROGRESS) return; + padata->info = ret; padata_do_serial(padata); } @@ -212,9 +191,13 @@ static int pcrypt_aead_decrypt(struct aead_request *req) req->cryptlen, req->iv); aead_request_set_ad(creq, req->assoclen); - err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt); + err = padata_do_parallel(pdecrypt.pinst, padata, &ctx->cb_cpu); if (!err) return -EINPROGRESS; + if (err == -EBUSY) { + /* try non-parallel mode */ + return crypto_aead_decrypt(creq); + } return err; } @@ -394,7 +377,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name) int ret; pinst->kobj.kset = pcrypt_kset; - ret = kobject_add(&pinst->kobj, NULL, name); + ret = kobject_add(&pinst->kobj, NULL, "%s", name); if (!ret) kobject_uevent(&pinst->kobj, KOBJ_ADD); @@ -505,11 +488,12 @@ static int __init pcrypt_init(void) static void __exit pcrypt_exit(void) { + crypto_unregister_template(&pcrypt_tmpl); + pcrypt_fini_padata(&pencrypt); pcrypt_fini_padata(&pdecrypt); kset_unregister(pcrypt_kset); - crypto_unregister_template(&pcrypt_tmpl); } module_init(pcrypt_init); diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index 9893dbfc1af4524fdea9a0f2d6aa1b225534cde7..e2fe43f4b4c641f160db6684b144b7e3df271662 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -216,16 +216,14 @@ static void pkcs1pad_encrypt_sign_complete_cb( struct crypto_async_request *child_async_req, int err) { struct akcipher_request *req = child_async_req->data; - struct crypto_async_request async_req; if (err == -EINPROGRESS) - return; + goto out; + + err = pkcs1pad_encrypt_sign_complete(req, err); - async_req.data = req->base.data; - async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req)); - async_req.flags = child_async_req->flags; - req->base.complete(&async_req, - pkcs1pad_encrypt_sign_complete(req, err)); +out: + akcipher_request_complete(req, err); } static int pkcs1pad_encrypt(struct akcipher_request *req) @@ -261,15 +259,6 @@ static int pkcs1pad_encrypt(struct akcipher_request *req) pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf, ctx->key_size - 1 - req->src_len, req->src); - req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL); - if (!req_ctx->out_buf) { - kfree(req_ctx->in_buf); - return -ENOMEM; - } - - pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf, - ctx->key_size, NULL); - akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, pkcs1pad_encrypt_sign_complete_cb, req); @@ -343,15 +332,14 @@ static void pkcs1pad_decrypt_complete_cb( struct crypto_async_request *child_async_req, int err) { struct akcipher_request *req = child_async_req->data; - struct crypto_async_request async_req; if (err == -EINPROGRESS) - return; + goto out; + + err = pkcs1pad_decrypt_complete(req, err); - async_req.data = req->base.data; - async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req)); - async_req.flags = child_async_req->flags; - req->base.complete(&async_req, pkcs1pad_decrypt_complete(req, err)); +out: + akcipher_request_complete(req, err); } static int pkcs1pad_decrypt(struct akcipher_request *req) @@ -484,6 +472,8 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err) goto done; pos++; + if (digest_info->size > dst_len - pos) + goto done; if (crypto_memneq(out_buf + pos, digest_info->data, digest_info->size)) goto done; @@ -509,15 +499,14 @@ static void pkcs1pad_verify_complete_cb( struct crypto_async_request *child_async_req, int err) { struct akcipher_request *req = child_async_req->data; - struct crypto_async_request async_req; if (err == -EINPROGRESS) - return; + goto out; + + err = pkcs1pad_verify_complete(req, err); - async_req.data = req->base.data; - async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req)); - async_req.flags = child_async_req->flags; - req->base.complete(&async_req, pkcs1pad_verify_complete(req, err)); +out: + akcipher_request_complete(req, err); } /* @@ -535,7 +524,7 @@ static int pkcs1pad_verify(struct akcipher_request *req) struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); int err; - if (!ctx->key_size || req->src_len < ctx->key_size) + if (!ctx->key_size || req->src_len != ctx->key_size) return -EINVAL; req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL); diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c index 8c77bc78a09f2693a1b8acf4e8748d10ced93da7..df8fc0f543741e657b6fcfc012b8ef9257a3f9ad 100644 --- a/crypto/salsa20_generic.c +++ b/crypto/salsa20_generic.c @@ -161,7 +161,7 @@ static int salsa20_crypt(struct skcipher_request *req) err = skcipher_walk_virt(&walk, req, true); - salsa20_init(state, ctx, walk.iv); + salsa20_init(state, ctx, req->iv); while (walk.nbytes > 0) { unsigned int nbytes = walk.nbytes; diff --git a/crypto/scompress.c b/crypto/scompress.c index 968bbcf65c9411679d74f737a2550bddb5eb0e51..5f3fb6cbf531c95ac5a7bf4eb58eaee07d4f679a 100644 --- a/crypto/scompress.c +++ b/crypto/scompress.c @@ -149,6 +149,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) const int cpu = get_cpu(); u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu); u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu); + unsigned int dlen; int ret; if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) { @@ -164,6 +165,8 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE) req->dlen = SCOMP_SCRATCH_SIZE; + dlen = req->dlen; + scatterwalk_map_and_copy(scratch_src, req->src, 0, req->slen, 0); if (dir) ret = crypto_scomp_compress(scomp, scratch_src, req->slen, @@ -174,8 +177,13 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) if (!ret) { if (!req->dst) { req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL); - if (!req->dst) + if (!req->dst) { + ret = -ENOMEM; goto out; + } + } else if (req->dlen > dlen) { + ret = -ENOSPC; + goto out; } scatterwalk_map_and_copy(scratch_dst, req->dst, 0, req->dlen, 1); diff --git a/crypto/seqiv.c b/crypto/seqiv.c index 39dbf2f7e5f5cd0d123665bdee0897b9ede49a58..ca68608ab14e123db8a937945de318a8c1eab804 100644 --- a/crypto/seqiv.c +++ b/crypto/seqiv.c @@ -30,7 +30,7 @@ static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err) struct aead_request *subreq = aead_request_ctx(req); struct crypto_aead *geniv; - if (err == -EINPROGRESS) + if (err == -EINPROGRESS || err == -EBUSY) return; if (err) diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c index 7c3382facc82e8bb706a48029d90875cafb6a156..600bd288881ddd69c6bb5d5487eb73eaea5c1789 100644 --- a/crypto/serpent_generic.c +++ b/crypto/serpent_generic.c @@ -229,7 +229,13 @@ x4 ^= x2; \ }) -static void __serpent_setkey_sbox(u32 r0, u32 r1, u32 r2, u32 r3, u32 r4, u32 *k) +/* + * both gcc and clang have misoptimized this function in the past, + * producing horrible object code from spilling temporary variables + * on the stack. Forcing this part out of line avoids that. + */ +static noinline void __serpent_setkey_sbox(u32 r0, u32 r1, u32 r2, + u32 r3, u32 r4, u32 *k) { k += 100; S3(r3, r4, r0, r1, r2); store_and_load_keys(r1, r2, r4, r3, 28, 24); diff --git a/crypto/shash.c b/crypto/shash.c index 5d732c6bb4b2158f59e7e16e0f96508b8ae90f91..a04145e5306a5491ff02bd1a535c54e18aaf3a4e 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -53,6 +53,13 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, return err; } +static void shash_set_needkey(struct crypto_shash *tfm, struct shash_alg *alg) +{ + if (crypto_shash_alg_has_setkey(alg) && + !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) + crypto_shash_set_flags(tfm, CRYPTO_TFM_NEED_KEY); +} + int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { @@ -65,8 +72,10 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, else err = shash->setkey(tfm, key, keylen); - if (err) + if (unlikely(err)) { + shash_set_needkey(tfm, shash); return err; + } crypto_shash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); return 0; @@ -368,7 +377,8 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm) crt->final = shash_async_final; crt->finup = shash_async_finup; crt->digest = shash_async_digest; - crt->setkey = shash_async_setkey; + if (crypto_shash_alg_has_setkey(alg)) + crt->setkey = shash_async_setkey; crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) & CRYPTO_TFM_NEED_KEY); @@ -390,9 +400,7 @@ static int crypto_shash_init_tfm(struct crypto_tfm *tfm) hash->descsize = alg->descsize; - if (crypto_shash_alg_has_setkey(alg) && - !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) - crypto_shash_set_flags(hash, CRYPTO_TFM_NEED_KEY); + shash_set_needkey(hash, alg); return 0; } diff --git a/crypto/simd.c b/crypto/simd.c index ea7240be3001ba245c12d3214c11a7c7e6a8a1fd..78e8d037ae2b342d94ff837d6c9de82b6a4a1090 100644 --- a/crypto/simd.c +++ b/crypto/simd.c @@ -124,8 +124,9 @@ static int simd_skcipher_init(struct crypto_skcipher *tfm) ctx->cryptd_tfm = cryptd_tfm; - reqsize = sizeof(struct skcipher_request); - reqsize += crypto_skcipher_reqsize(&cryptd_tfm->base); + reqsize = crypto_skcipher_reqsize(cryptd_skcipher_child(cryptd_tfm)); + reqsize = max(reqsize, crypto_skcipher_reqsize(&cryptd_tfm->base)); + reqsize += sizeof(struct skcipher_request); crypto_skcipher_set_reqsize(tfm, reqsize); diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 0bd8c6caa4987ebdab4a6e17f92a7b903b91cd21..a8750b4ebf264cd4a52724199e389fcb06b55d9f 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -95,7 +95,7 @@ static inline u8 *skcipher_get_spot(u8 *start, unsigned int len) return max(start, end_page); } -static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) +static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) { u8 *addr; @@ -103,19 +103,21 @@ static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) addr = skcipher_get_spot(addr, bsize); scatterwalk_copychunks(addr, &walk->out, bsize, (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1); + return 0; } int skcipher_walk_done(struct skcipher_walk *walk, int err) { - unsigned int n; /* bytes processed */ - bool more; + unsigned int n = walk->nbytes; + unsigned int nbytes = 0; - if (unlikely(err < 0)) + if (!n) goto finish; - n = walk->nbytes - err; - walk->total -= n; - more = (walk->total != 0); + if (likely(err >= 0)) { + n -= err; + nbytes = walk->total - n; + } if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS | SKCIPHER_WALK_SLOW | @@ -131,30 +133,37 @@ int skcipher_walk_done(struct skcipher_walk *walk, int err) memcpy(walk->dst.virt.addr, walk->page, n); skcipher_unmap_dst(walk); } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) { - if (WARN_ON(err)) { - /* unexpected case; didn't process all bytes */ + if (err > 0) { + /* + * Didn't process all bytes. Either the algorithm is + * broken, or this was the last step and it turned out + * the message wasn't evenly divisible into blocks but + * the algorithm requires it. + */ err = -EINVAL; - goto finish; - } - skcipher_done_slow(walk, n); - goto already_advanced; + nbytes = 0; + } else + n = skcipher_done_slow(walk, n); } + if (err > 0) + err = 0; + + walk->total = nbytes; + walk->nbytes = 0; + scatterwalk_advance(&walk->in, n); scatterwalk_advance(&walk->out, n); -already_advanced: - scatterwalk_done(&walk->in, 0, more); - scatterwalk_done(&walk->out, 1, more); + scatterwalk_done(&walk->in, 0, nbytes); + scatterwalk_done(&walk->out, 1, nbytes); - if (more) { + if (nbytes) { crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ? CRYPTO_TFM_REQ_MAY_SLEEP : 0); return skcipher_walk_next(walk); } - err = 0; -finish: - walk->nbytes = 0; +finish: /* Short-circuit for the common/fast path. */ if (!((unsigned long)walk->buffer | (unsigned long)walk->page)) goto out; @@ -584,6 +593,12 @@ static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg) return crypto_alg_extsize(alg); } +static void skcipher_set_needkey(struct crypto_skcipher *tfm) +{ + if (tfm->keysize) + crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY); +} + static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { @@ -597,8 +612,10 @@ static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm, err = crypto_blkcipher_setkey(blkcipher, key, keylen); crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) & CRYPTO_TFM_RES_MASK); - if (err) + if (unlikely(err)) { + skcipher_set_needkey(tfm); return err; + } crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); return 0; @@ -676,8 +693,7 @@ static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm) skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher); skcipher->keysize = calg->cra_blkcipher.max_keysize; - if (skcipher->keysize) - crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY); + skcipher_set_needkey(skcipher); return 0; } @@ -697,8 +713,10 @@ static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm, crypto_skcipher_set_flags(tfm, crypto_ablkcipher_get_flags(ablkcipher) & CRYPTO_TFM_RES_MASK); - if (err) + if (unlikely(err)) { + skcipher_set_needkey(tfm); return err; + } crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); return 0; @@ -775,8 +793,7 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm) sizeof(struct ablkcipher_request); skcipher->keysize = calg->cra_ablkcipher.max_keysize; - if (skcipher->keysize) - crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY); + skcipher_set_needkey(skcipher); return 0; } @@ -819,8 +836,10 @@ static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, else err = cipher->setkey(tfm, key, keylen); - if (err) + if (unlikely(err)) { + skcipher_set_needkey(tfm); return err; + } crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); return 0; @@ -852,8 +871,7 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) skcipher->ivsize = alg->ivsize; skcipher->keysize = alg->max_keysize; - if (skcipher->keysize) - crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY); + skcipher_set_needkey(skcipher); if (alg->exit) skcipher->base.exit = crypto_skcipher_exit_tfm; diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c index 9a5c60f08aad8482cce58bf2a660aa37a4c652e6..c0cf87ae7ef6d0716e4e15f797603efaee96bb2b 100644 --- a/crypto/sm3_generic.c +++ b/crypto/sm3_generic.c @@ -100,7 +100,7 @@ static void sm3_compress(u32 *w, u32 *wt, u32 *m) for (i = 0; i <= 63; i++) { - ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i)), 7); + ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i & 31)), 7); ss2 = ss1 ^ rol32(a, 12); diff --git a/crypto/speck.c b/crypto/speck.c deleted file mode 100644 index 58aa9f7f91f791e58ccc7e2e1ed54de5134ccce2..0000000000000000000000000000000000000000 --- a/crypto/speck.c +++ /dev/null @@ -1,307 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Speck: a lightweight block cipher - * - * Copyright (c) 2018 Google, Inc - * - * Speck has 10 variants, including 5 block sizes. For now we only implement - * the variants Speck128/128, Speck128/192, Speck128/256, Speck64/96, and - * Speck64/128. Speck${B}/${K} denotes the variant with a block size of B bits - * and a key size of K bits. The Speck128 variants are believed to be the most - * secure variants, and they use the same block size and key sizes as AES. The - * Speck64 variants are less secure, but on 32-bit processors are usually - * faster. The remaining variants (Speck32, Speck48, and Speck96) are even less - * secure and/or not as well suited for implementation on either 32-bit or - * 64-bit processors, so are omitted. - * - * Reference: "The Simon and Speck Families of Lightweight Block Ciphers" - * https://eprint.iacr.org/2013/404.pdf - * - * In a correspondence, the Speck designers have also clarified that the words - * should be interpreted in little-endian format, and the words should be - * ordered such that the first word of each block is 'y' rather than 'x', and - * the first key word (rather than the last) becomes the first round key. - */ - -#include -#include -#include -#include -#include -#include - -/* Speck128 */ - -static __always_inline void speck128_round(u64 *x, u64 *y, u64 k) -{ - *x = ror64(*x, 8); - *x += *y; - *x ^= k; - *y = rol64(*y, 3); - *y ^= *x; -} - -static __always_inline void speck128_unround(u64 *x, u64 *y, u64 k) -{ - *y ^= *x; - *y = ror64(*y, 3); - *x ^= k; - *x -= *y; - *x = rol64(*x, 8); -} - -void crypto_speck128_encrypt(const struct speck128_tfm_ctx *ctx, - u8 *out, const u8 *in) -{ - u64 y = get_unaligned_le64(in); - u64 x = get_unaligned_le64(in + 8); - int i; - - for (i = 0; i < ctx->nrounds; i++) - speck128_round(&x, &y, ctx->round_keys[i]); - - put_unaligned_le64(y, out); - put_unaligned_le64(x, out + 8); -} -EXPORT_SYMBOL_GPL(crypto_speck128_encrypt); - -static void speck128_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) -{ - crypto_speck128_encrypt(crypto_tfm_ctx(tfm), out, in); -} - -void crypto_speck128_decrypt(const struct speck128_tfm_ctx *ctx, - u8 *out, const u8 *in) -{ - u64 y = get_unaligned_le64(in); - u64 x = get_unaligned_le64(in + 8); - int i; - - for (i = ctx->nrounds - 1; i >= 0; i--) - speck128_unround(&x, &y, ctx->round_keys[i]); - - put_unaligned_le64(y, out); - put_unaligned_le64(x, out + 8); -} -EXPORT_SYMBOL_GPL(crypto_speck128_decrypt); - -static void speck128_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) -{ - crypto_speck128_decrypt(crypto_tfm_ctx(tfm), out, in); -} - -int crypto_speck128_setkey(struct speck128_tfm_ctx *ctx, const u8 *key, - unsigned int keylen) -{ - u64 l[3]; - u64 k; - int i; - - switch (keylen) { - case SPECK128_128_KEY_SIZE: - k = get_unaligned_le64(key); - l[0] = get_unaligned_le64(key + 8); - ctx->nrounds = SPECK128_128_NROUNDS; - for (i = 0; i < ctx->nrounds; i++) { - ctx->round_keys[i] = k; - speck128_round(&l[0], &k, i); - } - break; - case SPECK128_192_KEY_SIZE: - k = get_unaligned_le64(key); - l[0] = get_unaligned_le64(key + 8); - l[1] = get_unaligned_le64(key + 16); - ctx->nrounds = SPECK128_192_NROUNDS; - for (i = 0; i < ctx->nrounds; i++) { - ctx->round_keys[i] = k; - speck128_round(&l[i % 2], &k, i); - } - break; - case SPECK128_256_KEY_SIZE: - k = get_unaligned_le64(key); - l[0] = get_unaligned_le64(key + 8); - l[1] = get_unaligned_le64(key + 16); - l[2] = get_unaligned_le64(key + 24); - ctx->nrounds = SPECK128_256_NROUNDS; - for (i = 0; i < ctx->nrounds; i++) { - ctx->round_keys[i] = k; - speck128_round(&l[i % 3], &k, i); - } - break; - default: - return -EINVAL; - } - - return 0; -} -EXPORT_SYMBOL_GPL(crypto_speck128_setkey); - -static int speck128_setkey(struct crypto_tfm *tfm, const u8 *key, - unsigned int keylen) -{ - return crypto_speck128_setkey(crypto_tfm_ctx(tfm), key, keylen); -} - -/* Speck64 */ - -static __always_inline void speck64_round(u32 *x, u32 *y, u32 k) -{ - *x = ror32(*x, 8); - *x += *y; - *x ^= k; - *y = rol32(*y, 3); - *y ^= *x; -} - -static __always_inline void speck64_unround(u32 *x, u32 *y, u32 k) -{ - *y ^= *x; - *y = ror32(*y, 3); - *x ^= k; - *x -= *y; - *x = rol32(*x, 8); -} - -void crypto_speck64_encrypt(const struct speck64_tfm_ctx *ctx, - u8 *out, const u8 *in) -{ - u32 y = get_unaligned_le32(in); - u32 x = get_unaligned_le32(in + 4); - int i; - - for (i = 0; i < ctx->nrounds; i++) - speck64_round(&x, &y, ctx->round_keys[i]); - - put_unaligned_le32(y, out); - put_unaligned_le32(x, out + 4); -} -EXPORT_SYMBOL_GPL(crypto_speck64_encrypt); - -static void speck64_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) -{ - crypto_speck64_encrypt(crypto_tfm_ctx(tfm), out, in); -} - -void crypto_speck64_decrypt(const struct speck64_tfm_ctx *ctx, - u8 *out, const u8 *in) -{ - u32 y = get_unaligned_le32(in); - u32 x = get_unaligned_le32(in + 4); - int i; - - for (i = ctx->nrounds - 1; i >= 0; i--) - speck64_unround(&x, &y, ctx->round_keys[i]); - - put_unaligned_le32(y, out); - put_unaligned_le32(x, out + 4); -} -EXPORT_SYMBOL_GPL(crypto_speck64_decrypt); - -static void speck64_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) -{ - crypto_speck64_decrypt(crypto_tfm_ctx(tfm), out, in); -} - -int crypto_speck64_setkey(struct speck64_tfm_ctx *ctx, const u8 *key, - unsigned int keylen) -{ - u32 l[3]; - u32 k; - int i; - - switch (keylen) { - case SPECK64_96_KEY_SIZE: - k = get_unaligned_le32(key); - l[0] = get_unaligned_le32(key + 4); - l[1] = get_unaligned_le32(key + 8); - ctx->nrounds = SPECK64_96_NROUNDS; - for (i = 0; i < ctx->nrounds; i++) { - ctx->round_keys[i] = k; - speck64_round(&l[i % 2], &k, i); - } - break; - case SPECK64_128_KEY_SIZE: - k = get_unaligned_le32(key); - l[0] = get_unaligned_le32(key + 4); - l[1] = get_unaligned_le32(key + 8); - l[2] = get_unaligned_le32(key + 12); - ctx->nrounds = SPECK64_128_NROUNDS; - for (i = 0; i < ctx->nrounds; i++) { - ctx->round_keys[i] = k; - speck64_round(&l[i % 3], &k, i); - } - break; - default: - return -EINVAL; - } - - return 0; -} -EXPORT_SYMBOL_GPL(crypto_speck64_setkey); - -static int speck64_setkey(struct crypto_tfm *tfm, const u8 *key, - unsigned int keylen) -{ - return crypto_speck64_setkey(crypto_tfm_ctx(tfm), key, keylen); -} - -/* Algorithm definitions */ - -static struct crypto_alg speck_algs[] = { - { - .cra_name = "speck128", - .cra_driver_name = "speck128-generic", - .cra_priority = 100, - .cra_flags = CRYPTO_ALG_TYPE_CIPHER, - .cra_blocksize = SPECK128_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct speck128_tfm_ctx), - .cra_module = THIS_MODULE, - .cra_u = { - .cipher = { - .cia_min_keysize = SPECK128_128_KEY_SIZE, - .cia_max_keysize = SPECK128_256_KEY_SIZE, - .cia_setkey = speck128_setkey, - .cia_encrypt = speck128_encrypt, - .cia_decrypt = speck128_decrypt - } - } - }, { - .cra_name = "speck64", - .cra_driver_name = "speck64-generic", - .cra_priority = 100, - .cra_flags = CRYPTO_ALG_TYPE_CIPHER, - .cra_blocksize = SPECK64_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct speck64_tfm_ctx), - .cra_module = THIS_MODULE, - .cra_u = { - .cipher = { - .cia_min_keysize = SPECK64_96_KEY_SIZE, - .cia_max_keysize = SPECK64_128_KEY_SIZE, - .cia_setkey = speck64_setkey, - .cia_encrypt = speck64_encrypt, - .cia_decrypt = speck64_decrypt - } - } - } -}; - -static int __init speck_module_init(void) -{ - return crypto_register_algs(speck_algs, ARRAY_SIZE(speck_algs)); -} - -static void __exit speck_module_exit(void) -{ - crypto_unregister_algs(speck_algs, ARRAY_SIZE(speck_algs)); -} - -module_init(speck_module_init); -module_exit(speck_module_exit); - -MODULE_DESCRIPTION("Speck block cipher (generic)"); -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Eric Biggers "); -MODULE_ALIAS_CRYPTO("speck128"); -MODULE_ALIAS_CRYPTO("speck128-generic"); -MODULE_ALIAS_CRYPTO("speck64"); -MODULE_ALIAS_CRYPTO("speck64-generic"); diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index bdde95e8d3693184e51f6526e251bf803cf85748..366f4510acbe49f9fa0c76887b4266cd99e35ffc 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -202,8 +202,8 @@ static int test_mb_aead_jiffies(struct test_mb_aead_data *data, int enc, goto out; } - pr_cont("%d operations in %d seconds (%ld bytes)\n", - bcount * num_mb, secs, (long)bcount * blen * num_mb); + pr_cont("%d operations in %d seconds (%llu bytes)\n", + bcount * num_mb, secs, (u64)bcount * blen * num_mb); out: kfree(rc); @@ -472,8 +472,8 @@ static int test_aead_jiffies(struct aead_request *req, int enc, return ret; } - printk("%d operations in %d seconds (%ld bytes)\n", - bcount, secs, (long)bcount * blen); + pr_cont("%d operations in %d seconds (%llu bytes)\n", + bcount, secs, (u64)bcount * blen); return 0; } @@ -763,8 +763,8 @@ static int test_mb_ahash_jiffies(struct test_mb_ahash_data *data, int blen, goto out; } - pr_cont("%d operations in %d seconds (%ld bytes)\n", - bcount * num_mb, secs, (long)bcount * blen * num_mb); + pr_cont("%d operations in %d seconds (%llu bytes)\n", + bcount * num_mb, secs, (u64)bcount * blen * num_mb); out: kfree(rc); @@ -1103,6 +1103,9 @@ static void test_ahash_speed_common(const char *algo, unsigned int secs, break; } + if (speed[i].klen) + crypto_ahash_setkey(tfm, tvmem[0], speed[i].klen); + pr_info("test%3u " "(%5u byte blocks,%5u bytes per update,%4u updates): ", i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen); @@ -1197,8 +1200,8 @@ static int test_mb_acipher_jiffies(struct test_mb_skcipher_data *data, int enc, goto out; } - pr_cont("%d operations in %d seconds (%ld bytes)\n", - bcount * num_mb, secs, (long)bcount * blen * num_mb); + pr_cont("%d operations in %d seconds (%llu bytes)\n", + bcount * num_mb, secs, (u64)bcount * blen * num_mb); out: kfree(rc); @@ -1282,15 +1285,6 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs, goto out_free_tfm; } - - for (i = 0; i < num_mb; ++i) - if (testmgr_alloc_buf(data[i].xbuf)) { - while (i--) - testmgr_free_buf(data[i].xbuf); - goto out_free_tfm; - } - - for (i = 0; i < num_mb; ++i) { data[i].req = skcipher_request_alloc(tfm, GFP_KERNEL); if (!data[i].req) { @@ -1435,8 +1429,8 @@ static int test_acipher_jiffies(struct skcipher_request *req, int enc, return ret; } - pr_cont("%d operations in %d seconds (%ld bytes)\n", - bcount, secs, (long)bcount * blen); + pr_cont("%d operations in %d seconds (%llu bytes)\n", + bcount, secs, (u64)bcount * blen); return 0; } @@ -1733,6 +1727,7 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) ret += tcrypt_test("xts(aes)"); ret += tcrypt_test("ctr(aes)"); ret += tcrypt_test("rfc3686(ctr(aes))"); + ret += tcrypt_test("cfb(aes)"); break; case 11: @@ -2059,6 +2054,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) speed_template_16_24_32); test_cipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0, speed_template_16_24_32); + test_cipher_speed("cfb(aes)", ENCRYPT, sec, NULL, 0, + speed_template_16_24_32); + test_cipher_speed("cfb(aes)", DECRYPT, sec, NULL, 0, + speed_template_16_24_32); break; case 201: diff --git a/crypto/testmgr.c b/crypto/testmgr.c index a1d42245082aa78a6977ba8fa6b042b4ffcf05ed..13cb2ea99d6a50d5185bd1b68224d0c65d4adae5 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -1400,8 +1400,8 @@ static int test_comp(struct crypto_comp *tfm, int ilen; unsigned int dlen = COMP_BUF_SIZE; - memset(output, 0, sizeof(COMP_BUF_SIZE)); - memset(decomp_output, 0, sizeof(COMP_BUF_SIZE)); + memset(output, 0, COMP_BUF_SIZE); + memset(decomp_output, 0, COMP_BUF_SIZE); ilen = ctemplate[i].inlen; ret = crypto_comp_compress(tfm, ctemplate[i].input, @@ -1445,7 +1445,7 @@ static int test_comp(struct crypto_comp *tfm, int ilen; unsigned int dlen = COMP_BUF_SIZE; - memset(decomp_output, 0, sizeof(COMP_BUF_SIZE)); + memset(decomp_output, 0, COMP_BUF_SIZE); ilen = dtemplate[i].inlen; ret = crypto_comp_decompress(tfm, dtemplate[i].input, @@ -1894,14 +1894,21 @@ static int alg_test_crc32c(const struct alg_test_desc *desc, err = alg_test_hash(desc, driver, type, mask); if (err) - goto out; + return err; tfm = crypto_alloc_shash(driver, type, mask); if (IS_ERR(tfm)) { + if (PTR_ERR(tfm) == -ENOENT) { + /* + * This crc32c implementation is only available through + * ahash API, not the shash API, so the remaining part + * of the test is not applicable to it. + */ + return 0; + } printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: " "%ld\n", driver, PTR_ERR(tfm)); - err = PTR_ERR(tfm); - goto out; + return PTR_ERR(tfm); } do { @@ -1928,7 +1935,6 @@ static int alg_test_crc32c(const struct alg_test_desc *desc, crypto_free_shash(tfm); -out: return err; } @@ -2684,6 +2690,13 @@ static const struct alg_test_desc alg_test_descs[] = { .dec = __VECS(aes_ccm_dec_tv_template) } } + }, { + .alg = "cfb(aes)", + .test = alg_test_skcipher, + .fips_allowed = 1, + .suite = { + .cipher = __VECS(aes_cfb_tv_template) + }, }, { .alg = "chacha20", .test = alg_test_skcipher, @@ -3037,18 +3050,6 @@ static const struct alg_test_desc alg_test_descs[] = { .suite = { .cipher = __VECS(sm4_tv_template) } - }, { - .alg = "ecb(speck128)", - .test = alg_test_skcipher, - .suite = { - .cipher = __VECS(speck128_tv_template) - } - }, { - .alg = "ecb(speck64)", - .test = alg_test_skcipher, - .suite = { - .cipher = __VECS(speck64_tv_template) - } }, { .alg = "ecb(tea)", .test = alg_test_skcipher, @@ -3576,18 +3577,6 @@ static const struct alg_test_desc alg_test_descs[] = { .suite = { .cipher = __VECS(serpent_xts_tv_template) } - }, { - .alg = "xts(speck128)", - .test = alg_test_skcipher, - .suite = { - .cipher = __VECS(speck128_xts_tv_template) - } - }, { - .alg = "xts(speck64)", - .test = alg_test_skcipher, - .suite = { - .cipher = __VECS(speck64_xts_tv_template) - } }, { .alg = "xts(twofish)", .test = alg_test_skcipher, diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 173111c70746e31438f50362913e8bb112779060..74e1454cae1e7cb7e0592c18d0981f6aeab10c5c 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -5592,7 +5592,49 @@ static const struct hash_testvec poly1305_tv_template[] = { .psize = 80, .digest = "\x13\x00\x00\x00\x00\x00\x00\x00" "\x00\x00\x00\x00\x00\x00\x00\x00", - }, + }, { /* Regression test for overflow in AVX2 implementation */ + .plaintext = "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff", + .psize = 300, + .digest = "\xfb\x5e\x96\xd8\x61\xd5\xc7\xc8" + "\x78\xe5\x87\xcc\x2d\x5a\x22\xe1", + } }; /* @@ -10198,918 +10240,180 @@ static const struct cipher_testvec sm4_tv_template[] = { } }; -/* - * Speck test vectors taken from the original paper: - * "The Simon and Speck Families of Lightweight Block Ciphers" - * https://eprint.iacr.org/2013/404.pdf - * - * Note that the paper does not make byte and word order clear. But it was - * confirmed with the authors that the intended orders are little endian byte - * order and (y, x) word order. Equivalently, the printed test vectors, when - * looking at only the bytes (ignoring the whitespace that divides them into - * words), are backwards: the left-most byte is actually the one with the - * highest memory address, while the right-most byte is actually the one with - * the lowest memory address. - */ - -static const struct cipher_testvec speck128_tv_template[] = { - { /* Speck128/128 */ - .key = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", +/* Cast6 test vectors from RFC 2612 */ +static const struct cipher_testvec cast6_tv_template[] = { + { + .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c" + "\x0a\xf7\x56\x47\xf2\x9f\x61\x5d", .klen = 16, - .ptext = "\x20\x6d\x61\x64\x65\x20\x69\x74" - "\x20\x65\x71\x75\x69\x76\x61\x6c", - .ctext = "\x18\x0d\x57\x5c\xdf\xfe\x60\x78" - "\x65\x32\x78\x79\x51\x98\x5d\xa6", + .ptext = zeroed_string, + .ctext = "\xc8\x42\xa0\x89\x72\xb4\x3d\x20" + "\x83\x6c\x91\xd1\xb7\x53\x0f\x6b", .len = 16, - }, { /* Speck128/192 */ - .key = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" - "\x10\x11\x12\x13\x14\x15\x16\x17", + }, { + .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c" + "\xbe\xd0\xac\x83\x94\x0a\xc2\x98" + "\xba\xc7\x7a\x77\x17\x94\x28\x63", .klen = 24, - .ptext = "\x65\x6e\x74\x20\x74\x6f\x20\x43" - "\x68\x69\x65\x66\x20\x48\x61\x72", - .ctext = "\x86\x18\x3c\xe0\x5d\x18\xbc\xf9" - "\x66\x55\x13\x13\x3a\xcf\xe4\x1b", + .ptext = zeroed_string, + .ctext = "\x1b\x38\x6c\x02\x10\xdc\xad\xcb" + "\xdd\x0e\x41\xaa\x08\xa7\xa7\xe8", .len = 16, - }, { /* Speck128/256 */ - .key = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" - "\x10\x11\x12\x13\x14\x15\x16\x17" - "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + }, { + .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c" + "\xbe\xd0\xac\x83\x94\x0a\xc2\x98" + "\x8d\x7c\x47\xce\x26\x49\x08\x46" + "\x1c\xc1\xb5\x13\x7a\xe6\xb6\x04", .klen = 32, - .ptext = "\x70\x6f\x6f\x6e\x65\x72\x2e\x20" - "\x49\x6e\x20\x74\x68\x6f\x73\x65", - .ctext = "\x43\x8f\x18\x9c\x8d\xb4\xee\x4e" - "\x3e\xf5\xc0\x05\x04\x01\x09\x41", + .ptext = zeroed_string, + .ctext = "\x4f\x6a\x20\x38\x28\x68\x97\xb9" + "\xc9\x87\x01\x36\x55\x33\x17\xfa", .len = 16, + }, { /* Generated from TF test vectors */ + .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" + "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" + "\x27\x04\xE1\x27\x04\xE1\xBE\x9B" + "\x78\xBE\x9B\x78\x55\x32\x0F\x55", + .klen = 32, + .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" + "\xC4\x29\x8E\xF3\x35\x9A\xFF\x64", + .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" + "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" + "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" + "\xAC\x20\xB7\x4E\xE5\x59\xF0\x87" + "\x1E\x92\x29\xC0\x34\xCB\x62\xF9" + "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48" + "\xDF\x76\x0D\x81\x18\xAF\x23\xBA" + "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C" + "\xC3\x37\xCE\x65\xFC\x70\x07\x9E" + "\x12\xA9\x40\xD7\x4B\xE2\x79\x10" + "\x84\x1B\xB2\x26\xBD\x54\xEB\x5F" + "\xF6\x8D\x01\x98\x2F\xC6\x3A\xD1" + "\x68\xFF\x73\x0A\xA1\x15\xAC\x43" + "\xDA\x4E\xE5\x7C\x13\x87\x1E\xB5" + "\x29\xC0\x57\xEE\x62\xF9\x90\x04" + "\x9B\x32\xC9\x3D\xD4\x6B\x02\x76" + "\x0D\xA4\x18\xAF\x46\xDD\x51\xE8" + "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A" + "\xF1\x65\xFC\x93\x07\x9E\x35\xCC" + "\x40\xD7\x6E\x05\x79\x10\xA7\x1B" + "\xB2\x49\xE0\x54\xEB\x82\x19\x8D" + "\x24\xBB\x2F\xC6\x5D\xF4\x68\xFF" + "\x96\x0A\xA1\x38\xCF\x43\xDA\x71" + "\x08\x7C\x13\xAA\x1E\xB5\x4C\xE3" + "\x57\xEE\x85\x1C\x90\x27\xBE\x32" + "\xC9\x60\xF7\x6B\x02\x99\x0D\xA4" + "\x3B\xD2\x46\xDD\x74\x0B\x7F\x16" + "\xAD\x21\xB8\x4F\xE6\x5A\xF1\x88" + "\x1F\x93\x2A\xC1\x35\xCC\x63\xFA" + "\x6E\x05\x9C\x10\xA7\x3E\xD5\x49" + "\xE0\x77\x0E\x82\x19\xB0\x24\xBB" + "\x52\xE9\x5D\xF4\x8B\x22\x96\x2D" + "\xC4\x38\xCF\x66\xFD\x71\x08\x9F" + "\x13\xAA\x41\xD8\x4C\xE3\x7A\x11" + "\x85\x1C\xB3\x27\xBE\x55\xEC\x60" + "\xF7\x8E\x02\x99\x30\xC7\x3B\xD2" + "\x69\x00\x74\x0B\xA2\x16\xAD\x44" + "\xDB\x4F\xE6\x7D\x14\x88\x1F\xB6" + "\x2A\xC1\x58\xEF\x63\xFA\x91\x05" + "\x9C\x33\xCA\x3E\xD5\x6C\x03\x77" + "\x0E\xA5\x19\xB0\x47\xDE\x52\xE9" + "\x80\x17\x8B\x22\xB9\x2D\xC4\x5B" + "\xF2\x66\xFD\x94\x08\x9F\x36\xCD" + "\x41\xD8\x6F\x06\x7A\x11\xA8\x1C" + "\xB3\x4A\xE1\x55\xEC\x83\x1A\x8E" + "\x25\xBC\x30\xC7\x5E\xF5\x69\x00" + "\x97\x0B\xA2\x39\xD0\x44\xDB\x72" + "\x09\x7D\x14\xAB\x1F\xB6\x4D\xE4" + "\x58\xEF\x86\x1D\x91\x28\xBF\x33" + "\xCA\x61\xF8\x6C\x03\x9A\x0E\xA5" + "\x3C\xD3\x47\xDE\x75\x0C\x80\x17" + "\xAE\x22\xB9\x50\xE7\x5B\xF2\x89" + "\x20\x94\x2B\xC2\x36\xCD\x64\xFB" + "\x6F\x06\x9D\x11\xA8\x3F\xD6\x4A" + "\xE1\x78\x0F\x83\x1A\xB1\x25\xBC" + "\x53\xEA\x5E\xF5\x8C\x00\x97\x2E" + "\xC5\x39\xD0\x67\xFE\x72\x09\xA0" + "\x14\xAB\x42\xD9\x4D\xE4\x7B\x12" + "\x86\x1D\xB4\x28\xBF\x56\xED\x61" + "\xF8\x8F\x03\x9A\x31\xC8\x3C\xD3" + "\x6A\x01\x75\x0C\xA3\x17\xAE\x45" + "\xDC\x50\xE7\x7E\x15\x89\x20\xB7", + .ctext = "\xC3\x70\x22\x32\xF5\x80\xCB\x54" + "\xFC\x30\xE0\xF6\xEB\x39\x57\xA6" + "\xB6\xB9\xC5\xA4\x91\x55\x14\x97" + "\xC1\x20\xFF\x6C\x5C\xF0\x67\xEA" + "\x2F\xED\xD8\xC9\xFB\x38\x3F\xFE" + "\x93\xBE\xDC\x00\xD3\x7F\xAD\x4C" + "\x5A\x08\x92\xD1\x47\x0C\xFA\x6C" + "\xD0\x6A\x99\x10\x72\xF8\x47\x62" + "\x81\x42\xF8\xD8\xF5\xBB\x94\x08" + "\xAA\x97\xA2\x8B\x69\xB3\xD2\x7E" + "\xBC\xB5\x00\x0C\xE5\x44\x4B\x58" + "\xE8\x63\xDC\xB3\xC4\xE5\x23\x12" + "\x5A\x72\x85\x47\x8B\xEC\x9F\x26" + "\x84\xB6\xED\x10\x33\x63\x9B\x5F" + "\x4D\x53\xEE\x94\x45\x8B\x60\x58" + "\x86\x20\xF9\x1E\x82\x08\x3E\x58" + "\x60\x1B\x34\x19\x02\xBE\x4E\x09" + "\xBB\x7C\x15\xCC\x60\x27\x55\x7A" + "\x12\xB8\xD8\x08\x89\x3C\xA6\xF3" + "\xF1\xDD\xA7\x07\xA3\x12\x85\x28" + "\xE9\x57\xAC\x80\x0C\x5C\x0F\x3A" + "\x5D\xC2\x91\xC7\x90\xE4\x8C\x43" + "\x92\xE4\x7C\x26\x69\x4D\x83\x68" + "\x14\x96\x42\x47\xBD\xA9\xE4\x8A" + "\x33\x19\xEB\x54\x8E\x0D\x4B\x6E" + "\x91\x51\xB5\x36\x08\xDE\x1C\x06" + "\x03\xBD\xDE\x81\x26\xF7\x99\xC2" + "\xBA\xF7\x6D\x87\x0D\xE4\xA6\xCF" + "\xC1\xF5\x27\x05\xB8\x02\x57\x72" + "\xE6\x42\x13\x0B\xC6\x47\x05\x74" + "\x24\x15\xF7\x0D\xC2\x23\x9D\xB9" + "\x3C\x77\x18\x93\xBA\xB4\xFC\x8C" + "\x98\x82\x67\x67\xB4\xD7\xD3\x43" + "\x23\x08\x02\xB7\x9B\x99\x05\xFB" + "\xD3\xB5\x00\x0A\xA9\x9D\x66\xD6" + "\x2E\x49\x58\xD0\xA8\x57\x29\x7F" + "\x0A\x0E\x7D\xFC\x92\x83\xCC\x67" + "\xA2\xB1\x70\x3A\x8F\x87\x4A\x8D" + "\x17\xE2\x58\x2B\x88\x0D\x68\x62" + "\xBF\x35\xD1\x6F\xC0\xF0\x18\x62" + "\xB2\xC7\x2D\x58\xC7\x16\xDE\x08" + "\xEB\x84\x1D\x25\xA7\x38\x94\x06" + "\x93\x9D\xF8\xFE\x88\x71\xE7\x84" + "\x2C\xA0\x38\xA3\x1D\x48\xCF\x29" + "\x0B\xBC\xD8\x50\x99\x1A\x26\xFB" + "\x8E\x75\x3D\x73\xEB\x6A\xED\x29" + "\xE0\x8E\xED\xFC\xFE\x6F\xF6\xBA" + "\x41\xE2\x10\x4C\x01\x8B\x69\x2B" + "\x25\x3F\x4D\x70\x7B\x92\xD6\x3B" + "\xAC\xF9\x77\x18\xD9\x6A\x30\xA6" + "\x2E\xFA\x30\xFF\xC8\xD5\x1D\x06" + "\x59\x28\x1D\x86\x43\x04\x5D\x3B" + "\x99\x4C\x04\x5A\x21\x17\x8B\x76" + "\x8F\x72\xCB\xA1\x9C\x29\x4C\xC3" + "\x65\xA2\x58\x2A\xC5\x66\x24\xBF" + "\xBA\xE6\x0C\xDD\x34\x24\x74\xC8" + "\x84\x0A\x66\x2C\xBE\x8F\x32\xA9" + "\xE7\xE4\xA1\xD7\xDA\xAB\x23\x1E" + "\xEB\xEE\x6C\x94\x6F\x9C\x2E\xD1" + "\x49\x2C\xF3\xD4\x90\xCC\x93\x4C" + "\x84\x52\x6D\x68\xDE\xC6\x64\xB2" + "\x11\x74\x93\x57\xB4\x7E\xC6\x00", + .len = 496, + .also_non_np = 1, + .np = 3, + .tap = { 496 - 20, 4, 16 }, }, }; -/* - * Speck128-XTS test vectors, taken from the AES-XTS test vectors with the - * ciphertext recomputed with Speck128 as the cipher - */ -static const struct cipher_testvec speck128_xts_tv_template[] = { - { - .key = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .klen = 32, - .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .ctext = "\xbe\xa0\xe7\x03\xd7\xfe\xab\x62" - "\x3b\x99\x4a\x64\x74\x77\xac\xed" - "\xd8\xf4\xa6\xcf\xae\xb9\x07\x42" - "\x51\xd9\xb6\x1d\xe0\x5e\xbc\x54", - .len = 32, - }, { - .key = "\x11\x11\x11\x11\x11\x11\x11\x11" - "\x11\x11\x11\x11\x11\x11\x11\x11" - "\x22\x22\x22\x22\x22\x22\x22\x22" - "\x22\x22\x22\x22\x22\x22\x22\x22", - .klen = 32, - .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .ptext = "\x44\x44\x44\x44\x44\x44\x44\x44" - "\x44\x44\x44\x44\x44\x44\x44\x44" - "\x44\x44\x44\x44\x44\x44\x44\x44" - "\x44\x44\x44\x44\x44\x44\x44\x44", - .ctext = "\xfb\x53\x81\x75\x6f\x9f\x34\xad" - "\x7e\x01\xed\x7b\xcc\xda\x4e\x4a" - "\xd4\x84\xa4\x53\xd5\x88\x73\x1b" - "\xfd\xcb\xae\x0d\xf3\x04\xee\xe6", - .len = 32, - }, { - .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8" - "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0" - "\x22\x22\x22\x22\x22\x22\x22\x22" - "\x22\x22\x22\x22\x22\x22\x22\x22", - .klen = 32, - .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .ptext = "\x44\x44\x44\x44\x44\x44\x44\x44" - "\x44\x44\x44\x44\x44\x44\x44\x44" - "\x44\x44\x44\x44\x44\x44\x44\x44" - "\x44\x44\x44\x44\x44\x44\x44\x44", - .ctext = "\x21\x52\x84\x15\xd1\xf7\x21\x55" - "\xd9\x75\x4a\xd3\xc5\xdb\x9f\x7d" - "\xda\x63\xb2\xf1\x82\xb0\x89\x59" - "\x86\xd4\xaa\xaa\xdd\xff\x4f\x92", - .len = 32, - }, { - .key = "\x27\x18\x28\x18\x28\x45\x90\x45" - "\x23\x53\x60\x28\x74\x71\x35\x26" - "\x31\x41\x59\x26\x53\x58\x97\x93" - "\x23\x84\x62\x64\x33\x83\x27\x95", - .klen = 32, - .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" - "\x10\x11\x12\x13\x14\x15\x16\x17" - "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" - "\x20\x21\x22\x23\x24\x25\x26\x27" - "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" - "\x30\x31\x32\x33\x34\x35\x36\x37" - "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" - "\x40\x41\x42\x43\x44\x45\x46\x47" - "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" - "\x50\x51\x52\x53\x54\x55\x56\x57" - "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" - "\x60\x61\x62\x63\x64\x65\x66\x67" - "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" - "\x70\x71\x72\x73\x74\x75\x76\x77" - "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" - "\x80\x81\x82\x83\x84\x85\x86\x87" - "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" - "\x90\x91\x92\x93\x94\x95\x96\x97" - "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" - "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" - "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" - "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" - "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" - "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" - "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" - "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" - "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" - "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" - "\xe8\xe9\xea\xeb\xec\xed\xee\xef" - "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" - "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" - "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" - "\x10\x11\x12\x13\x14\x15\x16\x17" - "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" - "\x20\x21\x22\x23\x24\x25\x26\x27" - "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" - "\x30\x31\x32\x33\x34\x35\x36\x37" - "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" - "\x40\x41\x42\x43\x44\x45\x46\x47" - "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" - "\x50\x51\x52\x53\x54\x55\x56\x57" - "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" - "\x60\x61\x62\x63\x64\x65\x66\x67" - "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" - "\x70\x71\x72\x73\x74\x75\x76\x77" - "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" - "\x80\x81\x82\x83\x84\x85\x86\x87" - "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" - "\x90\x91\x92\x93\x94\x95\x96\x97" - "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" - "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" - "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" - "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" - "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" - "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" - "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" - "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" - "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" - "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" - "\xe8\xe9\xea\xeb\xec\xed\xee\xef" - "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" - "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", - .ctext = "\x57\xb5\xf8\x71\x6e\x6d\xdd\x82" - "\x53\xd0\xed\x2d\x30\xc1\x20\xef" - "\x70\x67\x5e\xff\x09\x70\xbb\xc1" - "\x3a\x7b\x48\x26\xd9\x0b\xf4\x48" - "\xbe\xce\xb1\xc7\xb2\x67\xc4\xa7" - "\x76\xf8\x36\x30\xb7\xb4\x9a\xd9" - "\xf5\x9d\xd0\x7b\xc1\x06\x96\x44" - "\x19\xc5\x58\x84\x63\xb9\x12\x68" - "\x68\xc7\xaa\x18\x98\xf2\x1f\x5c" - "\x39\xa6\xd8\x32\x2b\xc3\x51\xfd" - "\x74\x79\x2e\xb4\x44\xd7\x69\xc4" - "\xfc\x29\xe6\xed\x26\x1e\xa6\x9d" - "\x1c\xbe\x00\x0e\x7f\x3a\xca\xfb" - "\x6d\x13\x65\xa0\xf9\x31\x12\xe2" - "\x26\xd1\xec\x2b\x0a\x8b\x59\x99" - "\xa7\x49\xa0\x0e\x09\x33\x85\x50" - "\xc3\x23\xca\x7a\xdd\x13\x45\x5f" - "\xde\x4c\xa7\xcb\x00\x8a\x66\x6f" - "\xa2\xb6\xb1\x2e\xe1\xa0\x18\xf6" - "\xad\xf3\xbd\xeb\xc7\xef\x55\x4f" - "\x79\x91\x8d\x36\x13\x7b\xd0\x4a" - "\x6c\x39\xfb\x53\xb8\x6f\x02\x51" - "\xa5\x20\xac\x24\x1c\x73\x59\x73" - "\x58\x61\x3a\x87\x58\xb3\x20\x56" - "\x39\x06\x2b\x4d\xd3\x20\x2b\x89" - "\x3f\xa2\xf0\x96\xeb\x7f\xa4\xcd" - "\x11\xae\xbd\xcb\x3a\xb4\xd9\x91" - "\x09\x35\x71\x50\x65\xac\x92\xe3" - "\x7b\x32\xc0\x7a\xdd\xd4\xc3\x92" - "\x6f\xeb\x79\xde\x6f\xd3\x25\xc9" - "\xcd\x63\xf5\x1e\x7a\x3b\x26\x9d" - "\x77\x04\x80\xa9\xbf\x38\xb5\xbd" - "\xb8\x05\x07\xbd\xfd\xab\x7b\xf8" - "\x2a\x26\xcc\x49\x14\x6d\x55\x01" - "\x06\x94\xd8\xb2\x2d\x53\x83\x1b" - "\x8f\xd4\xdd\x57\x12\x7e\x18\xba" - "\x8e\xe2\x4d\x80\xef\x7e\x6b\x9d" - "\x24\xa9\x60\xa4\x97\x85\x86\x2a" - "\x01\x00\x09\xf1\xcb\x4a\x24\x1c" - "\xd8\xf6\xe6\x5b\xe7\x5d\xf2\xc4" - "\x97\x1c\x10\xc6\x4d\x66\x4f\x98" - "\x87\x30\xac\xd5\xea\x73\x49\x10" - "\x80\xea\xe5\x5f\x4d\x5f\x03\x33" - "\x66\x02\x35\x3d\x60\x06\x36\x4f" - "\x14\x1c\xd8\x07\x1f\x78\xd0\xf8" - "\x4f\x6c\x62\x7c\x15\xa5\x7c\x28" - "\x7c\xcc\xeb\x1f\xd1\x07\x90\x93" - "\x7e\xc2\xa8\x3a\x80\xc0\xf5\x30" - "\xcc\x75\xcf\x16\x26\xa9\x26\x3b" - "\xe7\x68\x2f\x15\x21\x5b\xe4\x00" - "\xbd\x48\x50\xcd\x75\x70\xc4\x62" - "\xbb\x41\xfb\x89\x4a\x88\x3b\x3b" - "\x51\x66\x02\x69\x04\x97\x36\xd4" - "\x75\xae\x0b\xa3\x42\xf8\xca\x79" - "\x8f\x93\xe9\xcc\x38\xbd\xd6\xd2" - "\xf9\x70\x4e\xc3\x6a\x8e\x25\xbd" - "\xea\x15\x5a\xa0\x85\x7e\x81\x0d" - "\x03\xe7\x05\x39\xf5\x05\x26\xee" - "\xec\xaa\x1f\x3d\xc9\x98\x76\x01" - "\x2c\xf4\xfc\xa3\x88\x77\x38\xc4" - "\x50\x65\x50\x6d\x04\x1f\xdf\x5a" - "\xaa\xf2\x01\xa9\xc1\x8d\xee\xca" - "\x47\x26\xef\x39\xb8\xb4\xf2\xd1" - "\xd6\xbb\x1b\x2a\xc1\x34\x14\xcf", - .len = 512, - }, { - .key = "\x27\x18\x28\x18\x28\x45\x90\x45" - "\x23\x53\x60\x28\x74\x71\x35\x26" - "\x62\x49\x77\x57\x24\x70\x93\x69" - "\x99\x59\x57\x49\x66\x96\x76\x27" - "\x31\x41\x59\x26\x53\x58\x97\x93" - "\x23\x84\x62\x64\x33\x83\x27\x95" - "\x02\x88\x41\x97\x16\x93\x99\x37" - "\x51\x05\x82\x09\x74\x94\x45\x92", - .klen = 64, - .iv = "\xff\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" - "\x10\x11\x12\x13\x14\x15\x16\x17" - "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" - "\x20\x21\x22\x23\x24\x25\x26\x27" - "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" - "\x30\x31\x32\x33\x34\x35\x36\x37" - "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" - "\x40\x41\x42\x43\x44\x45\x46\x47" - "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" - "\x50\x51\x52\x53\x54\x55\x56\x57" - "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" - "\x60\x61\x62\x63\x64\x65\x66\x67" - "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" - "\x70\x71\x72\x73\x74\x75\x76\x77" - "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" - "\x80\x81\x82\x83\x84\x85\x86\x87" - "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" - "\x90\x91\x92\x93\x94\x95\x96\x97" - "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" - "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" - "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" - "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" - "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" - "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" - "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" - "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" - "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" - "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" - "\xe8\xe9\xea\xeb\xec\xed\xee\xef" - "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" - "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" - "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" - "\x10\x11\x12\x13\x14\x15\x16\x17" - "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" - "\x20\x21\x22\x23\x24\x25\x26\x27" - "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" - "\x30\x31\x32\x33\x34\x35\x36\x37" - "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" - "\x40\x41\x42\x43\x44\x45\x46\x47" - "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" - "\x50\x51\x52\x53\x54\x55\x56\x57" - "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" - "\x60\x61\x62\x63\x64\x65\x66\x67" - "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" - "\x70\x71\x72\x73\x74\x75\x76\x77" - "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" - "\x80\x81\x82\x83\x84\x85\x86\x87" - "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" - "\x90\x91\x92\x93\x94\x95\x96\x97" - "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" - "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" - "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" - "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" - "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" - "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" - "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" - "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" - "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" - "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" - "\xe8\xe9\xea\xeb\xec\xed\xee\xef" - "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" - "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", - .ctext = "\xc5\x85\x2a\x4b\x73\xe4\xf6\xf1" - "\x7e\xf9\xf6\xe9\xa3\x73\x36\xcb" - "\xaa\xb6\x22\xb0\x24\x6e\x3d\x73" - "\x92\x99\xde\xd3\x76\xed\xcd\x63" - "\x64\x3a\x22\x57\xc1\x43\x49\xd4" - "\x79\x36\x31\x19\x62\xae\x10\x7e" - "\x7d\xcf\x7a\xe2\x6b\xce\x27\xfa" - "\xdc\x3d\xd9\x83\xd3\x42\x4c\xe0" - "\x1b\xd6\x1d\x1a\x6f\xd2\x03\x00" - "\xfc\x81\x99\x8a\x14\x62\xf5\x7e" - "\x0d\xe7\x12\xe8\x17\x9d\x0b\xec" - "\xe2\xf7\xc9\xa7\x63\xd1\x79\xb6" - "\x62\x62\x37\xfe\x0a\x4c\x4a\x37" - "\x70\xc7\x5e\x96\x5f\xbc\x8e\x9e" - "\x85\x3c\x4f\x26\x64\x85\xbc\x68" - "\xb0\xe0\x86\x5e\x26\x41\xce\x11" - "\x50\xda\x97\x14\xe9\x9e\xc7\x6d" - "\x3b\xdc\x43\xde\x2b\x27\x69\x7d" - "\xfc\xb0\x28\xbd\x8f\xb1\xc6\x31" - "\x14\x4d\xf0\x74\x37\xfd\x07\x25" - "\x96\x55\xe5\xfc\x9e\x27\x2a\x74" - "\x1b\x83\x4d\x15\x83\xac\x57\xa0" - "\xac\xa5\xd0\x38\xef\x19\x56\x53" - "\x25\x4b\xfc\xce\x04\x23\xe5\x6b" - "\xf6\xc6\x6c\x32\x0b\xb3\x12\xc5" - "\xed\x22\x34\x1c\x5d\xed\x17\x06" - "\x36\xa3\xe6\x77\xb9\x97\x46\xb8" - "\xe9\x3f\x7e\xc7\xbc\x13\x5c\xdc" - "\x6e\x3f\x04\x5e\xd1\x59\xa5\x82" - "\x35\x91\x3d\x1b\xe4\x97\x9f\x92" - "\x1c\x5e\x5f\x6f\x41\xd4\x62\xa1" - "\x8d\x39\xfc\x42\xfb\x38\x80\xb9" - "\x0a\xe3\xcc\x6a\x93\xd9\x7a\xb1" - "\xe9\x69\xaf\x0a\x6b\x75\x38\xa7" - "\xa1\xbf\xf7\xda\x95\x93\x4b\x78" - "\x19\xf5\x94\xf9\xd2\x00\x33\x37" - "\xcf\xf5\x9e\x9c\xf3\xcc\xa6\xee" - "\x42\xb2\x9e\x2c\x5f\x48\x23\x26" - "\x15\x25\x17\x03\x3d\xfe\x2c\xfc" - "\xeb\xba\xda\xe0\x00\x05\xb6\xa6" - "\x07\xb3\xe8\x36\x5b\xec\x5b\xbf" - "\xd6\x5b\x00\x74\xc6\x97\xf1\x6a" - "\x49\xa1\xc3\xfa\x10\x52\xb9\x14" - "\xad\xb7\x73\xf8\x78\x12\xc8\x59" - "\x17\x80\x4c\x57\x39\xf1\x6d\x80" - "\x25\x77\x0f\x5e\x7d\xf0\xaf\x21" - "\xec\xce\xb7\xc8\x02\x8a\xed\x53" - "\x2c\x25\x68\x2e\x1f\x85\x5e\x67" - "\xd1\x07\x7a\x3a\x89\x08\xe0\x34" - "\xdc\xdb\x26\xb4\x6b\x77\xfc\x40" - "\x31\x15\x72\xa0\xf0\x73\xd9\x3b" - "\xd5\xdb\xfe\xfc\x8f\xa9\x44\xa2" - "\x09\x9f\xc6\x33\xe5\xe2\x88\xe8" - "\xf3\xf0\x1a\xf4\xce\x12\x0f\xd6" - "\xf7\x36\xe6\xa4\xf4\x7a\x10\x58" - "\xcc\x1f\x48\x49\x65\x47\x75\xe9" - "\x28\xe1\x65\x7b\xf2\xc4\xb5\x07" - "\xf2\xec\x76\xd8\x8f\x09\xf3\x16" - "\xa1\x51\x89\x3b\xeb\x96\x42\xac" - "\x65\xe0\x67\x63\x29\xdc\xb4\x7d" - "\xf2\x41\x51\x6a\xcb\xde\x3c\xfb" - "\x66\x8d\x13\xca\xe0\x59\x2a\x00" - "\xc9\x53\x4c\xe6\x9e\xe2\x73\xd5" - "\x67\x19\xb2\xbd\x9a\x63\xd7\x5c", - .len = 512, - .also_non_np = 1, - .np = 3, - .tap = { 512 - 20, 4, 16 }, - } -}; - -static const struct cipher_testvec speck64_tv_template[] = { - { /* Speck64/96 */ - .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b" - "\x10\x11\x12\x13", - .klen = 12, - .ptext = "\x65\x61\x6e\x73\x20\x46\x61\x74", - .ctext = "\x6c\x94\x75\x41\xec\x52\x79\x9f", - .len = 8, - }, { /* Speck64/128 */ - .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b" - "\x10\x11\x12\x13\x18\x19\x1a\x1b", - .klen = 16, - .ptext = "\x2d\x43\x75\x74\x74\x65\x72\x3b", - .ctext = "\x8b\x02\x4e\x45\x48\xa5\x6f\x8c", - .len = 8, - }, -}; - -/* - * Speck64-XTS test vectors, taken from the AES-XTS test vectors with the - * ciphertext recomputed with Speck64 as the cipher, and key lengths adjusted - */ -static const struct cipher_testvec speck64_xts_tv_template[] = { - { - .key = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .klen = 24, - .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .ctext = "\x84\xaf\x54\x07\x19\xd4\x7c\xa6" - "\xe4\xfe\xdf\xc4\x1f\x34\xc3\xc2" - "\x80\xf5\x72\xe7\xcd\xf0\x99\x22" - "\x35\xa7\x2f\x06\xef\xdc\x51\xaa", - .len = 32, - }, { - .key = "\x11\x11\x11\x11\x11\x11\x11\x11" - "\x11\x11\x11\x11\x11\x11\x11\x11" - "\x22\x22\x22\x22\x22\x22\x22\x22", - .klen = 24, - .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .ptext = "\x44\x44\x44\x44\x44\x44\x44\x44" - "\x44\x44\x44\x44\x44\x44\x44\x44" - "\x44\x44\x44\x44\x44\x44\x44\x44" - "\x44\x44\x44\x44\x44\x44\x44\x44", - .ctext = "\x12\x56\x73\xcd\x15\x87\xa8\x59" - "\xcf\x84\xae\xd9\x1c\x66\xd6\x9f" - "\xb3\x12\x69\x7e\x36\xeb\x52\xff" - "\x62\xdd\xba\x90\xb3\xe1\xee\x99", - .len = 32, - }, { - .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8" - "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0" - "\x22\x22\x22\x22\x22\x22\x22\x22", - .klen = 24, - .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .ptext = "\x44\x44\x44\x44\x44\x44\x44\x44" - "\x44\x44\x44\x44\x44\x44\x44\x44" - "\x44\x44\x44\x44\x44\x44\x44\x44" - "\x44\x44\x44\x44\x44\x44\x44\x44", - .ctext = "\x15\x1b\xe4\x2c\xa2\x5a\x2d\x2c" - "\x27\x36\xc0\xbf\x5d\xea\x36\x37" - "\x2d\x1a\x88\xbc\x66\xb5\xd0\x0b" - "\xa1\xbc\x19\xb2\x0f\x3b\x75\x34", - .len = 32, - }, { - .key = "\x27\x18\x28\x18\x28\x45\x90\x45" - "\x23\x53\x60\x28\x74\x71\x35\x26" - "\x31\x41\x59\x26\x53\x58\x97\x93", - .klen = 24, - .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" - "\x10\x11\x12\x13\x14\x15\x16\x17" - "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" - "\x20\x21\x22\x23\x24\x25\x26\x27" - "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" - "\x30\x31\x32\x33\x34\x35\x36\x37" - "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" - "\x40\x41\x42\x43\x44\x45\x46\x47" - "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" - "\x50\x51\x52\x53\x54\x55\x56\x57" - "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" - "\x60\x61\x62\x63\x64\x65\x66\x67" - "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" - "\x70\x71\x72\x73\x74\x75\x76\x77" - "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" - "\x80\x81\x82\x83\x84\x85\x86\x87" - "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" - "\x90\x91\x92\x93\x94\x95\x96\x97" - "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" - "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" - "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" - "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" - "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" - "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" - "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" - "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" - "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" - "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" - "\xe8\xe9\xea\xeb\xec\xed\xee\xef" - "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" - "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" - "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" - "\x10\x11\x12\x13\x14\x15\x16\x17" - "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" - "\x20\x21\x22\x23\x24\x25\x26\x27" - "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" - "\x30\x31\x32\x33\x34\x35\x36\x37" - "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" - "\x40\x41\x42\x43\x44\x45\x46\x47" - "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" - "\x50\x51\x52\x53\x54\x55\x56\x57" - "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" - "\x60\x61\x62\x63\x64\x65\x66\x67" - "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" - "\x70\x71\x72\x73\x74\x75\x76\x77" - "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" - "\x80\x81\x82\x83\x84\x85\x86\x87" - "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" - "\x90\x91\x92\x93\x94\x95\x96\x97" - "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" - "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" - "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" - "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" - "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" - "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" - "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" - "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" - "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" - "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" - "\xe8\xe9\xea\xeb\xec\xed\xee\xef" - "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" - "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", - .ctext = "\xaf\xa1\x81\xa6\x32\xbb\x15\x8e" - "\xf8\x95\x2e\xd3\xe6\xee\x7e\x09" - "\x0c\x1a\xf5\x02\x97\x8b\xe3\xb3" - "\x11\xc7\x39\x96\xd0\x95\xf4\x56" - "\xf4\xdd\x03\x38\x01\x44\x2c\xcf" - "\x88\xae\x8e\x3c\xcd\xe7\xaa\x66" - "\xfe\x3d\xc6\xfb\x01\x23\x51\x43" - "\xd5\xd2\x13\x86\x94\x34\xe9\x62" - "\xf9\x89\xe3\xd1\x7b\xbe\xf8\xef" - "\x76\x35\x04\x3f\xdb\x23\x9d\x0b" - "\x85\x42\xb9\x02\xd6\xcc\xdb\x96" - "\xa7\x6b\x27\xb6\xd4\x45\x8f\x7d" - "\xae\xd2\x04\xd5\xda\xc1\x7e\x24" - "\x8c\x73\xbe\x48\x7e\xcf\x65\x28" - "\x29\xe5\xbe\x54\x30\xcb\x46\x95" - "\x4f\x2e\x8a\x36\xc8\x27\xc5\xbe" - "\xd0\x1a\xaf\xab\x26\xcd\x9e\x69" - "\xa1\x09\x95\x71\x26\xe9\xc4\xdf" - "\xe6\x31\xc3\x46\xda\xaf\x0b\x41" - "\x1f\xab\xb1\x8e\xd6\xfc\x0b\xb3" - "\x82\xc0\x37\x27\xfc\x91\xa7\x05" - "\xfb\xc5\xdc\x2b\x74\x96\x48\x43" - "\x5d\x9c\x19\x0f\x60\x63\x3a\x1f" - "\x6f\xf0\x03\xbe\x4d\xfd\xc8\x4a" - "\xc6\xa4\x81\x6d\xc3\x12\x2a\x5c" - "\x07\xff\xf3\x72\x74\x48\xb5\x40" - "\x50\xb5\xdd\x90\x43\x31\x18\x15" - "\x7b\xf2\xa6\xdb\x83\xc8\x4b\x4a" - "\x29\x93\x90\x8b\xda\x07\xf0\x35" - "\x6d\x90\x88\x09\x4e\x83\xf5\x5b" - "\x94\x12\xbb\x33\x27\x1d\x3f\x23" - "\x51\xa8\x7c\x07\xa2\xae\x77\xa6" - "\x50\xfd\xcc\xc0\x4f\x80\x7a\x9f" - "\x66\xdd\xcd\x75\x24\x8b\x33\xf7" - "\x20\xdb\x83\x9b\x4f\x11\x63\x6e" - "\xcf\x37\xef\xc9\x11\x01\x5c\x45" - "\x32\x99\x7c\x3c\x9e\x42\x89\xe3" - "\x70\x6d\x15\x9f\xb1\xe6\xb6\x05" - "\xfe\x0c\xb9\x49\x2d\x90\x6d\xcc" - "\x5d\x3f\xc1\xfe\x89\x0a\x2e\x2d" - "\xa0\xa8\x89\x3b\x73\x39\xa5\x94" - "\x4c\xa4\xa6\xbb\xa7\x14\x46\x89" - "\x10\xff\xaf\xef\xca\xdd\x4f\x80" - "\xb3\xdf\x3b\xab\xd4\xe5\x5a\xc7" - "\x33\xca\x00\x8b\x8b\x3f\xea\xec" - "\x68\x8a\xc2\x6d\xfd\xd4\x67\x0f" - "\x22\x31\xe1\x0e\xfe\x5a\x04\xd5" - "\x64\xa3\xf1\x1a\x76\x28\xcc\x35" - "\x36\xa7\x0a\x74\xf7\x1c\x44\x9b" - "\xc7\x1b\x53\x17\x02\xea\xd1\xad" - "\x13\x51\x73\xc0\xa0\xb2\x05\x32" - "\xa8\xa2\x37\x2e\xe1\x7a\x3a\x19" - "\x26\xb4\x6c\x62\x5d\xb3\x1a\x1d" - "\x59\xda\xee\x1a\x22\x18\xda\x0d" - "\x88\x0f\x55\x8b\x72\x62\xfd\xc1" - "\x69\x13\xcd\x0d\x5f\xc1\x09\x52" - "\xee\xd6\xe3\x84\x4d\xee\xf6\x88" - "\xaf\x83\xdc\x76\xf4\xc0\x93\x3f" - "\x4a\x75\x2f\xb0\x0b\x3e\xc4\x54" - "\x7d\x69\x8d\x00\x62\x77\x0d\x14" - "\xbe\x7c\xa6\x7d\xc5\x24\x4f\xf3" - "\x50\xf7\x5f\xf4\xc2\xca\x41\x97" - "\x37\xbe\x75\x74\xcd\xf0\x75\x6e" - "\x25\x23\x94\xbd\xda\x8d\xb0\xd4", - .len = 512, - }, { - .key = "\x27\x18\x28\x18\x28\x45\x90\x45" - "\x23\x53\x60\x28\x74\x71\x35\x26" - "\x62\x49\x77\x57\x24\x70\x93\x69" - "\x99\x59\x57\x49\x66\x96\x76\x27", - .klen = 32, - .iv = "\xff\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" - "\x10\x11\x12\x13\x14\x15\x16\x17" - "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" - "\x20\x21\x22\x23\x24\x25\x26\x27" - "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" - "\x30\x31\x32\x33\x34\x35\x36\x37" - "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" - "\x40\x41\x42\x43\x44\x45\x46\x47" - "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" - "\x50\x51\x52\x53\x54\x55\x56\x57" - "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" - "\x60\x61\x62\x63\x64\x65\x66\x67" - "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" - "\x70\x71\x72\x73\x74\x75\x76\x77" - "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" - "\x80\x81\x82\x83\x84\x85\x86\x87" - "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" - "\x90\x91\x92\x93\x94\x95\x96\x97" - "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" - "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" - "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" - "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" - "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" - "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" - "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" - "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" - "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" - "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" - "\xe8\xe9\xea\xeb\xec\xed\xee\xef" - "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" - "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" - "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" - "\x10\x11\x12\x13\x14\x15\x16\x17" - "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" - "\x20\x21\x22\x23\x24\x25\x26\x27" - "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" - "\x30\x31\x32\x33\x34\x35\x36\x37" - "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" - "\x40\x41\x42\x43\x44\x45\x46\x47" - "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" - "\x50\x51\x52\x53\x54\x55\x56\x57" - "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" - "\x60\x61\x62\x63\x64\x65\x66\x67" - "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" - "\x70\x71\x72\x73\x74\x75\x76\x77" - "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" - "\x80\x81\x82\x83\x84\x85\x86\x87" - "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" - "\x90\x91\x92\x93\x94\x95\x96\x97" - "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" - "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" - "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" - "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" - "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" - "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" - "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" - "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" - "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" - "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" - "\xe8\xe9\xea\xeb\xec\xed\xee\xef" - "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" - "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", - .ctext = "\x55\xed\x71\xd3\x02\x8e\x15\x3b" - "\xc6\x71\x29\x2d\x3e\x89\x9f\x59" - "\x68\x6a\xcc\x8a\x56\x97\xf3\x95" - "\x4e\x51\x08\xda\x2a\xf8\x6f\x3c" - "\x78\x16\xea\x80\xdb\x33\x75\x94" - "\xf9\x29\xc4\x2b\x76\x75\x97\xc7" - "\xf2\x98\x2c\xf9\xff\xc8\xd5\x2b" - "\x18\xf1\xaf\xcf\x7c\xc5\x0b\xee" - "\xad\x3c\x76\x7c\xe6\x27\xa2\x2a" - "\xe4\x66\xe1\xab\xa2\x39\xfc\x7c" - "\xf5\xec\x32\x74\xa3\xb8\x03\x88" - "\x52\xfc\x2e\x56\x3f\xa1\xf0\x9f" - "\x84\x5e\x46\xed\x20\x89\xb6\x44" - "\x8d\xd0\xed\x54\x47\x16\xbe\x95" - "\x8a\xb3\x6b\x72\xc4\x32\x52\x13" - "\x1b\xb0\x82\xbe\xac\xf9\x70\xa6" - "\x44\x18\xdd\x8c\x6e\xca\x6e\x45" - "\x8f\x1e\x10\x07\x57\x25\x98\x7b" - "\x17\x8c\x78\xdd\x80\xa7\xd9\xd8" - "\x63\xaf\xb9\x67\x57\xfd\xbc\xdb" - "\x44\xe9\xc5\x65\xd1\xc7\x3b\xff" - "\x20\xa0\x80\x1a\xc3\x9a\xad\x5e" - "\x5d\x3b\xd3\x07\xd9\xf5\xfd\x3d" - "\x4a\x8b\xa8\xd2\x6e\x7a\x51\x65" - "\x6c\x8e\x95\xe0\x45\xc9\x5f\x4a" - "\x09\x3c\x3d\x71\x7f\x0c\x84\x2a" - "\xc8\x48\x52\x1a\xc2\xd5\xd6\x78" - "\x92\x1e\xa0\x90\x2e\xea\xf0\xf3" - "\xdc\x0f\xb1\xaf\x0d\x9b\x06\x2e" - "\x35\x10\x30\x82\x0d\xe7\xc5\x9b" - "\xde\x44\x18\xbd\x9f\xd1\x45\xa9" - "\x7b\x7a\x4a\xad\x35\x65\x27\xca" - "\xb2\xc3\xd4\x9b\x71\x86\x70\xee" - "\xf1\x89\x3b\x85\x4b\x5b\xaa\xaf" - "\xfc\x42\xc8\x31\x59\xbe\x16\x60" - "\x4f\xf9\xfa\x12\xea\xd0\xa7\x14" - "\xf0\x7a\xf3\xd5\x8d\xbd\x81\xef" - "\x52\x7f\x29\x51\x94\x20\x67\x3c" - "\xd1\xaf\x77\x9f\x22\x5a\x4e\x63" - "\xe7\xff\x73\x25\xd1\xdd\x96\x8a" - "\x98\x52\x6d\xf3\xac\x3e\xf2\x18" - "\x6d\xf6\x0a\x29\xa6\x34\x3d\xed" - "\xe3\x27\x0d\x9d\x0a\x02\x44\x7e" - "\x5a\x7e\x67\x0f\x0a\x9e\xd6\xad" - "\x91\xe6\x4d\x81\x8c\x5c\x59\xaa" - "\xfb\xeb\x56\x53\xd2\x7d\x4c\x81" - "\x65\x53\x0f\x41\x11\xbd\x98\x99" - "\xf9\xc6\xfa\x51\x2e\xa3\xdd\x8d" - "\x84\x98\xf9\x34\xed\x33\x2a\x1f" - "\x82\xed\xc1\x73\x98\xd3\x02\xdc" - "\xe6\xc2\x33\x1d\xa2\xb4\xca\x76" - "\x63\x51\x34\x9d\x96\x12\xae\xce" - "\x83\xc9\x76\x5e\xa4\x1b\x53\x37" - "\x17\xd5\xc0\x80\x1d\x62\xf8\x3d" - "\x54\x27\x74\xbb\x10\x86\x57\x46" - "\x68\xe1\xed\x14\xe7\x9d\xfc\x84" - "\x47\xbc\xc2\xf8\x19\x4b\x99\xcf" - "\x7a\xe9\xc4\xb8\x8c\x82\x72\x4d" - "\x7b\x4f\x38\x55\x36\x71\x64\xc1" - "\xfc\x5c\x75\x52\x33\x02\x18\xf8" - "\x17\xe1\x2b\xc2\x43\x39\xbd\x76" - "\x9b\x63\x76\x32\x2f\x19\x72\x10" - "\x9f\x21\x0c\xf1\x66\x50\x7f\xa5" - "\x0d\x1f\x46\xe0\xba\xd3\x2f\x3c", - .len = 512, - .also_non_np = 1, - .np = 3, - .tap = { 512 - 20, 4, 16 }, - } -}; - -/* Cast6 test vectors from RFC 2612 */ -static const struct cipher_testvec cast6_tv_template[] = { - { - .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c" - "\x0a\xf7\x56\x47\xf2\x9f\x61\x5d", - .klen = 16, - .ptext = zeroed_string, - .ctext = "\xc8\x42\xa0\x89\x72\xb4\x3d\x20" - "\x83\x6c\x91\xd1\xb7\x53\x0f\x6b", - .len = 16, - }, { - .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c" - "\xbe\xd0\xac\x83\x94\x0a\xc2\x98" - "\xba\xc7\x7a\x77\x17\x94\x28\x63", - .klen = 24, - .ptext = zeroed_string, - .ctext = "\x1b\x38\x6c\x02\x10\xdc\xad\xcb" - "\xdd\x0e\x41\xaa\x08\xa7\xa7\xe8", - .len = 16, - }, { - .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c" - "\xbe\xd0\xac\x83\x94\x0a\xc2\x98" - "\x8d\x7c\x47\xce\x26\x49\x08\x46" - "\x1c\xc1\xb5\x13\x7a\xe6\xb6\x04", - .klen = 32, - .ptext = zeroed_string, - .ctext = "\x4f\x6a\x20\x38\x28\x68\x97\xb9" - "\xc9\x87\x01\x36\x55\x33\x17\xfa", - .len = 16, - }, { /* Generated from TF test vectors */ - .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" - "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" - "\x27\x04\xE1\x27\x04\xE1\xBE\x9B" - "\x78\xBE\x9B\x78\x55\x32\x0F\x55", - .klen = 32, - .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" - "\xC4\x29\x8E\xF3\x35\x9A\xFF\x64", - .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" - "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" - "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" - "\xAC\x20\xB7\x4E\xE5\x59\xF0\x87" - "\x1E\x92\x29\xC0\x34\xCB\x62\xF9" - "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48" - "\xDF\x76\x0D\x81\x18\xAF\x23\xBA" - "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C" - "\xC3\x37\xCE\x65\xFC\x70\x07\x9E" - "\x12\xA9\x40\xD7\x4B\xE2\x79\x10" - "\x84\x1B\xB2\x26\xBD\x54\xEB\x5F" - "\xF6\x8D\x01\x98\x2F\xC6\x3A\xD1" - "\x68\xFF\x73\x0A\xA1\x15\xAC\x43" - "\xDA\x4E\xE5\x7C\x13\x87\x1E\xB5" - "\x29\xC0\x57\xEE\x62\xF9\x90\x04" - "\x9B\x32\xC9\x3D\xD4\x6B\x02\x76" - "\x0D\xA4\x18\xAF\x46\xDD\x51\xE8" - "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A" - "\xF1\x65\xFC\x93\x07\x9E\x35\xCC" - "\x40\xD7\x6E\x05\x79\x10\xA7\x1B" - "\xB2\x49\xE0\x54\xEB\x82\x19\x8D" - "\x24\xBB\x2F\xC6\x5D\xF4\x68\xFF" - "\x96\x0A\xA1\x38\xCF\x43\xDA\x71" - "\x08\x7C\x13\xAA\x1E\xB5\x4C\xE3" - "\x57\xEE\x85\x1C\x90\x27\xBE\x32" - "\xC9\x60\xF7\x6B\x02\x99\x0D\xA4" - "\x3B\xD2\x46\xDD\x74\x0B\x7F\x16" - "\xAD\x21\xB8\x4F\xE6\x5A\xF1\x88" - "\x1F\x93\x2A\xC1\x35\xCC\x63\xFA" - "\x6E\x05\x9C\x10\xA7\x3E\xD5\x49" - "\xE0\x77\x0E\x82\x19\xB0\x24\xBB" - "\x52\xE9\x5D\xF4\x8B\x22\x96\x2D" - "\xC4\x38\xCF\x66\xFD\x71\x08\x9F" - "\x13\xAA\x41\xD8\x4C\xE3\x7A\x11" - "\x85\x1C\xB3\x27\xBE\x55\xEC\x60" - "\xF7\x8E\x02\x99\x30\xC7\x3B\xD2" - "\x69\x00\x74\x0B\xA2\x16\xAD\x44" - "\xDB\x4F\xE6\x7D\x14\x88\x1F\xB6" - "\x2A\xC1\x58\xEF\x63\xFA\x91\x05" - "\x9C\x33\xCA\x3E\xD5\x6C\x03\x77" - "\x0E\xA5\x19\xB0\x47\xDE\x52\xE9" - "\x80\x17\x8B\x22\xB9\x2D\xC4\x5B" - "\xF2\x66\xFD\x94\x08\x9F\x36\xCD" - "\x41\xD8\x6F\x06\x7A\x11\xA8\x1C" - "\xB3\x4A\xE1\x55\xEC\x83\x1A\x8E" - "\x25\xBC\x30\xC7\x5E\xF5\x69\x00" - "\x97\x0B\xA2\x39\xD0\x44\xDB\x72" - "\x09\x7D\x14\xAB\x1F\xB6\x4D\xE4" - "\x58\xEF\x86\x1D\x91\x28\xBF\x33" - "\xCA\x61\xF8\x6C\x03\x9A\x0E\xA5" - "\x3C\xD3\x47\xDE\x75\x0C\x80\x17" - "\xAE\x22\xB9\x50\xE7\x5B\xF2\x89" - "\x20\x94\x2B\xC2\x36\xCD\x64\xFB" - "\x6F\x06\x9D\x11\xA8\x3F\xD6\x4A" - "\xE1\x78\x0F\x83\x1A\xB1\x25\xBC" - "\x53\xEA\x5E\xF5\x8C\x00\x97\x2E" - "\xC5\x39\xD0\x67\xFE\x72\x09\xA0" - "\x14\xAB\x42\xD9\x4D\xE4\x7B\x12" - "\x86\x1D\xB4\x28\xBF\x56\xED\x61" - "\xF8\x8F\x03\x9A\x31\xC8\x3C\xD3" - "\x6A\x01\x75\x0C\xA3\x17\xAE\x45" - "\xDC\x50\xE7\x7E\x15\x89\x20\xB7", - .ctext = "\xC3\x70\x22\x32\xF5\x80\xCB\x54" - "\xFC\x30\xE0\xF6\xEB\x39\x57\xA6" - "\xB6\xB9\xC5\xA4\x91\x55\x14\x97" - "\xC1\x20\xFF\x6C\x5C\xF0\x67\xEA" - "\x2F\xED\xD8\xC9\xFB\x38\x3F\xFE" - "\x93\xBE\xDC\x00\xD3\x7F\xAD\x4C" - "\x5A\x08\x92\xD1\x47\x0C\xFA\x6C" - "\xD0\x6A\x99\x10\x72\xF8\x47\x62" - "\x81\x42\xF8\xD8\xF5\xBB\x94\x08" - "\xAA\x97\xA2\x8B\x69\xB3\xD2\x7E" - "\xBC\xB5\x00\x0C\xE5\x44\x4B\x58" - "\xE8\x63\xDC\xB3\xC4\xE5\x23\x12" - "\x5A\x72\x85\x47\x8B\xEC\x9F\x26" - "\x84\xB6\xED\x10\x33\x63\x9B\x5F" - "\x4D\x53\xEE\x94\x45\x8B\x60\x58" - "\x86\x20\xF9\x1E\x82\x08\x3E\x58" - "\x60\x1B\x34\x19\x02\xBE\x4E\x09" - "\xBB\x7C\x15\xCC\x60\x27\x55\x7A" - "\x12\xB8\xD8\x08\x89\x3C\xA6\xF3" - "\xF1\xDD\xA7\x07\xA3\x12\x85\x28" - "\xE9\x57\xAC\x80\x0C\x5C\x0F\x3A" - "\x5D\xC2\x91\xC7\x90\xE4\x8C\x43" - "\x92\xE4\x7C\x26\x69\x4D\x83\x68" - "\x14\x96\x42\x47\xBD\xA9\xE4\x8A" - "\x33\x19\xEB\x54\x8E\x0D\x4B\x6E" - "\x91\x51\xB5\x36\x08\xDE\x1C\x06" - "\x03\xBD\xDE\x81\x26\xF7\x99\xC2" - "\xBA\xF7\x6D\x87\x0D\xE4\xA6\xCF" - "\xC1\xF5\x27\x05\xB8\x02\x57\x72" - "\xE6\x42\x13\x0B\xC6\x47\x05\x74" - "\x24\x15\xF7\x0D\xC2\x23\x9D\xB9" - "\x3C\x77\x18\x93\xBA\xB4\xFC\x8C" - "\x98\x82\x67\x67\xB4\xD7\xD3\x43" - "\x23\x08\x02\xB7\x9B\x99\x05\xFB" - "\xD3\xB5\x00\x0A\xA9\x9D\x66\xD6" - "\x2E\x49\x58\xD0\xA8\x57\x29\x7F" - "\x0A\x0E\x7D\xFC\x92\x83\xCC\x67" - "\xA2\xB1\x70\x3A\x8F\x87\x4A\x8D" - "\x17\xE2\x58\x2B\x88\x0D\x68\x62" - "\xBF\x35\xD1\x6F\xC0\xF0\x18\x62" - "\xB2\xC7\x2D\x58\xC7\x16\xDE\x08" - "\xEB\x84\x1D\x25\xA7\x38\x94\x06" - "\x93\x9D\xF8\xFE\x88\x71\xE7\x84" - "\x2C\xA0\x38\xA3\x1D\x48\xCF\x29" - "\x0B\xBC\xD8\x50\x99\x1A\x26\xFB" - "\x8E\x75\x3D\x73\xEB\x6A\xED\x29" - "\xE0\x8E\xED\xFC\xFE\x6F\xF6\xBA" - "\x41\xE2\x10\x4C\x01\x8B\x69\x2B" - "\x25\x3F\x4D\x70\x7B\x92\xD6\x3B" - "\xAC\xF9\x77\x18\xD9\x6A\x30\xA6" - "\x2E\xFA\x30\xFF\xC8\xD5\x1D\x06" - "\x59\x28\x1D\x86\x43\x04\x5D\x3B" - "\x99\x4C\x04\x5A\x21\x17\x8B\x76" - "\x8F\x72\xCB\xA1\x9C\x29\x4C\xC3" - "\x65\xA2\x58\x2A\xC5\x66\x24\xBF" - "\xBA\xE6\x0C\xDD\x34\x24\x74\xC8" - "\x84\x0A\x66\x2C\xBE\x8F\x32\xA9" - "\xE7\xE4\xA1\xD7\xDA\xAB\x23\x1E" - "\xEB\xEE\x6C\x94\x6F\x9C\x2E\xD1" - "\x49\x2C\xF3\xD4\x90\xCC\x93\x4C" - "\x84\x52\x6D\x68\xDE\xC6\x64\xB2" - "\x11\x74\x93\x57\xB4\x7E\xC6\x00", - .len = 496, - .also_non_np = 1, - .np = 3, - .tap = { 496 - 20, 4, 16 }, - }, -}; - -static const struct cipher_testvec cast6_cbc_tv_template[] = { - { /* Generated from TF test vectors */ - .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" - "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" - "\x27\x04\xE1\x27\x04\xE1\xBE\x9B" - "\x78\xBE\x9B\x78\x55\x32\x0F\x55", +static const struct cipher_testvec cast6_cbc_tv_template[] = { + { /* Generated from TF test vectors */ + .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" + "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" + "\x27\x04\xE1\x27\x04\xE1\xBE\x9B" + "\x78\xBE\x9B\x78\x55\x32\x0F\x55", .klen = 32, .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" "\xC4\x29\x8E\xF3\x35\x9A\xFF\x64", @@ -12081,6 +11385,107 @@ static const struct cipher_testvec aes_cbc_tv_template[] = { }, }; +static const struct cipher_testvec aes_cfb_tv_template[] = { + { /* From NIST SP800-38A */ + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", + .klen = 16, + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" + "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", + .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20" + "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a" + "\xc8\xa6\x45\x37\xa0\xb3\xa9\x3f" + "\xcd\xe3\xcd\xad\x9f\x1c\xe5\x8b" + "\x26\x75\x1f\x67\xa3\xcb\xb1\x40" + "\xb1\x80\x8c\xf1\x87\xa4\xf4\xdf" + "\xc0\x4b\x05\x35\x7c\x5d\x1c\x0e" + "\xea\xc4\xc6\x6f\x9f\xf7\xf2\xe6", + .len = 64, + }, { + .key = "\x8e\x73\xb0\xf7\xda\x0e\x64\x52" + "\xc8\x10\xf3\x2b\x80\x90\x79\xe5" + "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b", + .klen = 24, + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" + "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", + .ctext = "\xcd\xc8\x0d\x6f\xdd\xf1\x8c\xab" + "\x34\xc2\x59\x09\xc9\x9a\x41\x74" + "\x67\xce\x7f\x7f\x81\x17\x36\x21" + "\x96\x1a\x2b\x70\x17\x1d\x3d\x7a" + "\x2e\x1e\x8a\x1d\xd5\x9b\x88\xb1" + "\xc8\xe6\x0f\xed\x1e\xfa\xc4\xc9" + "\xc0\x5f\x9f\x9c\xa9\x83\x4f\xa0" + "\x42\xae\x8f\xba\x58\x4b\x09\xff", + .len = 64, + }, { + .key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe" + "\x2b\x73\xae\xf0\x85\x7d\x77\x81" + "\x1f\x35\x2c\x07\x3b\x61\x08\xd7" + "\x2d\x98\x10\xa3\x09\x14\xdf\xf4", + .klen = 32, + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" + "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", + .ctext = "\xdc\x7e\x84\xbf\xda\x79\x16\x4b" + "\x7e\xcd\x84\x86\x98\x5d\x38\x60" + "\x39\xff\xed\x14\x3b\x28\xb1\xc8" + "\x32\x11\x3c\x63\x31\xe5\x40\x7b" + "\xdf\x10\x13\x24\x15\xe5\x4b\x92" + "\xa1\x3e\xd0\xa8\x26\x7a\xe2\xf9" + "\x75\xa3\x85\x74\x1a\xb9\xce\xf8" + "\x20\x31\x62\x3d\x55\xb1\xe4\x71", + .len = 64, + .also_non_np = 1, + .np = 2, + .tap = { 31, 33 }, + }, { /* > 16 bytes, not a multiple of 16 bytes */ + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", + .klen = 16, + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" + "\xae", + .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20" + "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a" + "\xc8", + .len = 17, + }, { /* < 16 bytes */ + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", + .klen = 16, + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f", + .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad", + .len = 7, + }, +}; + static const struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = { { /* Input data from RFC 2410 Case 1 */ #ifdef __LITTLE_ENDIAN diff --git a/crypto/tgr192.c b/crypto/tgr192.c index 022d3dd76c3b22c27c16137d1c686dbb0abce97a..f8e1d9f9938f5f847f880a90ffa3f7a95023afcc 100644 --- a/crypto/tgr192.c +++ b/crypto/tgr192.c @@ -25,8 +25,9 @@ #include #include #include -#include #include +#include +#include #define TGR192_DIGEST_SIZE 24 #define TGR160_DIGEST_SIZE 20 @@ -468,10 +469,9 @@ static void tgr192_transform(struct tgr192_ctx *tctx, const u8 * data) u64 a, b, c, aa, bb, cc; u64 x[8]; int i; - const __le64 *ptr = (const __le64 *)data; for (i = 0; i < 8; i++) - x[i] = le64_to_cpu(ptr[i]); + x[i] = get_unaligned_le64(data + i * sizeof(__le64)); /* save */ a = aa = tctx->a; diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index dd1eea90f67f1c9c998fd00941b4cb6c9b2e7fef..fc99ce413a76593bb2d4cac7a79300e3ff555dd4 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -244,6 +244,7 @@ config ACPI_CPU_FREQ_PSS config ACPI_PROCESSOR_CSTATE def_bool y + depends on ACPI_PROCESSOR depends on IA64 || X86 config ACPI_PROCESSOR_IDLE @@ -338,7 +339,7 @@ config ACPI_CUSTOM_DSDT_FILE See Documentation/acpi/dsdt-override.txt Enter the full path name to the file which includes the AmlCode - declaration. + or dsdt_aml_code declaration. If unsure, don't enter a file name. @@ -470,6 +471,7 @@ config ACPI_REDUCED_HARDWARE_ONLY If you are unsure what to do, do not enable this option. source "drivers/acpi/nfit/Kconfig" +source "drivers/acpi/hmat/Kconfig" source "drivers/acpi/apei/Kconfig" source "drivers/acpi/dptf/Kconfig" @@ -498,6 +500,9 @@ config ACPI_EXTLOG driver adds support for that functionality with corresponding tracepoint which carries that information to userspace. +config ACPI_ADXL + bool + menuconfig PMIC_OPREGION bool "PMIC (Power Management Integrated Circuit) operation region support" help diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 6d59aa109a91a349d521d87b9e465ad2d0e17d63..696ea10f7788f3b5d191d7bb89060ac2c47e29d4 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -61,6 +61,9 @@ acpi-$(CONFIG_ACPI_LPIT) += acpi_lpit.o acpi-$(CONFIG_ACPI_GENERIC_GSI) += irq.o acpi-$(CONFIG_ACPI_WATCHDOG) += acpi_watchdog.o +# Address translation +acpi-$(CONFIG_ACPI_ADXL) += acpi_adxl.o + # These are (potentially) separate modules # IPMI may be used by other drivers, so it has to initialise before them @@ -76,6 +79,7 @@ obj-$(CONFIG_ACPI_PROCESSOR) += processor.o obj-$(CONFIG_ACPI) += container.o obj-$(CONFIG_ACPI_THERMAL) += thermal.o obj-$(CONFIG_ACPI_NFIT) += nfit/ +obj-$(CONFIG_ACPI_HMAT) += hmat/ obj-$(CONFIG_ACPI) += acpi_memhotplug.o obj-$(CONFIG_ACPI_HOTPLUG_IOAPIC) += ioapic.o obj-$(CONFIG_ACPI_BATTERY) += battery.o diff --git a/drivers/acpi/acpi_adxl.c b/drivers/acpi/acpi_adxl.c new file mode 100644 index 0000000000000000000000000000000000000000..13c8f7b50c463abc8b30a47299193f93eb2a3df5 --- /dev/null +++ b/drivers/acpi/acpi_adxl.c @@ -0,0 +1,192 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Address translation interface via ACPI DSM. + * Copyright (C) 2018 Intel Corporation + * + * Specification for this interface is available at: + * + * https://cdrdv2.intel.com/v1/dl/getContent/603354 + */ + +#include +#include + +#define ADXL_REVISION 0x1 +#define ADXL_IDX_GET_ADDR_PARAMS 0x1 +#define ADXL_IDX_FORWARD_TRANSLATE 0x2 +#define ACPI_ADXL_PATH "\\_SB.ADXL" + +/* + * The specification doesn't provide a limit on how many + * components are in a memory address. But since we allocate + * memory based on the number the BIOS tells us, we should + * defend against insane values. + */ +#define ADXL_MAX_COMPONENTS 500 + +#undef pr_fmt +#define pr_fmt(fmt) "ADXL: " fmt + +static acpi_handle handle; +static union acpi_object *params; +static const guid_t adxl_guid = + GUID_INIT(0xAA3C050A, 0x7EA4, 0x4C1F, + 0xAF, 0xDA, 0x12, 0x67, 0xDF, 0xD3, 0xD4, 0x8D); + +static int adxl_count; +static char **adxl_component_names; + +static union acpi_object *adxl_dsm(int cmd, union acpi_object argv[]) +{ + union acpi_object *obj, *o; + + obj = acpi_evaluate_dsm_typed(handle, &adxl_guid, ADXL_REVISION, + cmd, argv, ACPI_TYPE_PACKAGE); + if (!obj) { + pr_info("DSM call failed for cmd=%d\n", cmd); + return NULL; + } + + if (obj->package.count != 2) { + pr_info("Bad pkg count %d\n", obj->package.count); + goto err; + } + + o = obj->package.elements; + if (o->type != ACPI_TYPE_INTEGER) { + pr_info("Bad 1st element type %d\n", o->type); + goto err; + } + if (o->integer.value) { + pr_info("Bad ret val %llu\n", o->integer.value); + goto err; + } + + o = obj->package.elements + 1; + if (o->type != ACPI_TYPE_PACKAGE) { + pr_info("Bad 2nd element type %d\n", o->type); + goto err; + } + return obj; + +err: + ACPI_FREE(obj); + return NULL; +} + +/** + * adxl_get_component_names - get list of memory component names + * Returns NULL terminated list of string names + * + * Give the caller a pointer to the list of memory component names + * e.g. { "SystemAddress", "ProcessorSocketId", "ChannelId", ... NULL } + * Caller should count how many strings in order to allocate a buffer + * for the return from adxl_decode(). + */ +const char * const *adxl_get_component_names(void) +{ + return (const char * const *)adxl_component_names; +} +EXPORT_SYMBOL_GPL(adxl_get_component_names); + +/** + * adxl_decode - ask BIOS to decode a system address to memory address + * @addr: the address to decode + * @component_values: pointer to array of values for each component + * Returns 0 on success, negative error code otherwise + * + * The index of each value returned in the array matches the index of + * each component name returned by adxl_get_component_names(). + * Components that are not defined for this address translation (e.g. + * mirror channel number for a non-mirrored address) are set to ~0ull. + */ +int adxl_decode(u64 addr, u64 component_values[]) +{ + union acpi_object argv4[2], *results, *r; + int i, cnt; + + if (!adxl_component_names) + return -EOPNOTSUPP; + + argv4[0].type = ACPI_TYPE_PACKAGE; + argv4[0].package.count = 1; + argv4[0].package.elements = &argv4[1]; + argv4[1].integer.type = ACPI_TYPE_INTEGER; + argv4[1].integer.value = addr; + + results = adxl_dsm(ADXL_IDX_FORWARD_TRANSLATE, argv4); + if (!results) + return -EINVAL; + + r = results->package.elements + 1; + cnt = r->package.count; + if (cnt != adxl_count) { + ACPI_FREE(results); + return -EINVAL; + } + r = r->package.elements; + + for (i = 0; i < cnt; i++) + component_values[i] = r[i].integer.value; + + ACPI_FREE(results); + + return 0; +} +EXPORT_SYMBOL_GPL(adxl_decode); + +static int __init adxl_init(void) +{ + char *path = ACPI_ADXL_PATH; + union acpi_object *p; + acpi_status status; + int i; + + status = acpi_get_handle(NULL, path, &handle); + if (ACPI_FAILURE(status)) { + pr_debug("No ACPI handle for path %s\n", path); + return -ENODEV; + } + + if (!acpi_has_method(handle, "_DSM")) { + pr_info("No DSM method\n"); + return -ENODEV; + } + + if (!acpi_check_dsm(handle, &adxl_guid, ADXL_REVISION, + ADXL_IDX_GET_ADDR_PARAMS | + ADXL_IDX_FORWARD_TRANSLATE)) { + pr_info("DSM method does not support forward translate\n"); + return -ENODEV; + } + + params = adxl_dsm(ADXL_IDX_GET_ADDR_PARAMS, NULL); + if (!params) { + pr_info("Failed to get component names\n"); + return -ENODEV; + } + + p = params->package.elements + 1; + adxl_count = p->package.count; + if (adxl_count > ADXL_MAX_COMPONENTS) { + pr_info("Insane number of address component names %d\n", adxl_count); + ACPI_FREE(params); + return -ENODEV; + } + p = p->package.elements; + + /* + * Allocate one extra for NULL termination. + */ + adxl_component_names = kcalloc(adxl_count + 1, sizeof(char *), GFP_KERNEL); + if (!adxl_component_names) { + ACPI_FREE(params); + return -ENOMEM; + } + + for (i = 0; i < adxl_count; i++) + adxl_component_names[i] = p[i].string.pointer; + + return 0; +} +subsys_initcall(adxl_init); diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c index 2664452fa112630d254106099bc8e3026a2ecb7b..36446c489b96101eec7f0dd5cd98f3ee9c1b13a5 100644 --- a/drivers/acpi/acpi_apd.c +++ b/drivers/acpi/acpi_apd.c @@ -105,6 +105,8 @@ static int st_misc_setup(struct apd_private_data *pdata) resource_size(rentry->res)); break; } + if (!clk_data->base) + return -ENOMEM; acpi_dev_free_resource_list(&resource_list); @@ -162,10 +164,31 @@ static const struct apd_device_desc hip08_i2c_desc = { .setup = acpi_apd_setup, .fixed_clk_rate = 250000000, }; + +static const struct apd_device_desc phytium_i2c_desc = { + .setup = acpi_apd_setup, + .fixed_clk_rate = 200000000, +}; + +static const struct apd_device_desc phytium_pe220x_i2c_desc = { + .setup = acpi_apd_setup, + .fixed_clk_rate = 50000000, +}; + +static const struct apd_device_desc hip08_lite_i2c_desc = { + .setup = acpi_apd_setup, + .fixed_clk_rate = 125000000, +}; + static const struct apd_device_desc thunderx2_i2c_desc = { .setup = acpi_apd_setup, .fixed_clk_rate = 125000000, }; + +static const struct apd_device_desc hip08_spi_desc = { + .setup = acpi_apd_setup, + .fixed_clk_rate = 250000000, +}; #endif #else @@ -226,6 +249,7 @@ static const struct acpi_device_id acpi_apd_device_ids[] = { { "AMDI0020", APD_ADDR(cz_uart_desc) }, { "AMD0030", }, { "AMD0040", APD_ADDR(st_misc_desc)}, + { "HYGO0010", APD_ADDR(wt_i2c_desc) }, #endif #ifdef CONFIG_ARM64 { "APMC0D0F", APD_ADDR(xgene_i2c_desc) }, @@ -234,6 +258,10 @@ static const struct acpi_device_id acpi_apd_device_ids[] = { { "CAV9007", APD_ADDR(thunderx2_i2c_desc) }, { "HISI02A1", APD_ADDR(hip07_i2c_desc) }, { "HISI02A2", APD_ADDR(hip08_i2c_desc) }, + { "HISI02A3", APD_ADDR(hip08_lite_i2c_desc) }, + { "HISI0173", APD_ADDR(hip08_spi_desc) }, + { "PHYT0003", APD_ADDR(phytium_i2c_desc) }, + { "PHYT0038", APD_ADDR(phytium_pe220x_i2c_desc) }, #endif { } }; diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c index b588503890941c552749458520a8909056742573..c0325556a897be8f1eef00b0a92e04613a5f78cb 100644 --- a/drivers/acpi/acpi_configfs.c +++ b/drivers/acpi/acpi_configfs.c @@ -269,7 +269,12 @@ static int __init acpi_configfs_init(void) acpi_table_group = configfs_register_default_group(root, "table", &acpi_tables_type); - return PTR_ERR_OR_ZERO(acpi_table_group); + if (IS_ERR(acpi_table_group)) { + configfs_unregister_subsystem(&acpi_configfs); + return PTR_ERR(acpi_table_group); + } + + return 0; } module_init(acpi_configfs_init); diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c index f21c99ec46ee0935605f09d28e4e2161be1abdc4..a2dcd62ea32ffb54818a3f16482608f12ee92cc0 100644 --- a/drivers/acpi/acpi_dbg.c +++ b/drivers/acpi/acpi_dbg.c @@ -614,7 +614,7 @@ static ssize_t acpi_aml_read(struct file *file, char __user *buf, if (!count) return 0; - if (!access_ok(VERIFY_WRITE, buf, count)) + if (!access_ok(buf, count)) return -EFAULT; while (count > 0) { @@ -684,7 +684,7 @@ static ssize_t acpi_aml_write(struct file *file, const char __user *buf, if (!count) return 0; - if (!access_ok(VERIFY_READ, buf, count)) + if (!access_ok(buf, count)) return -EFAULT; while (count > 0) { diff --git a/drivers/acpi/acpi_lpit.c b/drivers/acpi/acpi_lpit.c index cf4fc0161164158e3f83e65e173a6389ab0eaec2..c39c56904c520278d6fea1787503e4a4105236e9 100644 --- a/drivers/acpi/acpi_lpit.c +++ b/drivers/acpi/acpi_lpit.c @@ -106,7 +106,7 @@ static void lpit_update_residency(struct lpit_residency_info *info, struct acpi_lpit_native *lpit_native) { info->frequency = lpit_native->counter_frequency ? - lpit_native->counter_frequency : tsc_khz * 1000; + lpit_native->counter_frequency : mul_u32_u32(tsc_khz, 1000U); if (!info->frequency) info->frequency = 1; @@ -117,11 +117,17 @@ static void lpit_update_residency(struct lpit_residency_info *info, if (!info->iomem_addr) return; + if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) + return; + /* Silently fail, if cpuidle attribute group is not present */ sysfs_add_file_to_group(&cpu_subsys.dev_root->kobj, &dev_attr_low_power_idle_system_residency_us.attr, "cpuidle"); } else if (info->gaddr.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { + if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) + return; + /* Silently fail, if cpuidle attribute group is not present */ sysfs_add_file_to_group(&cpu_subsys.dev_root->kobj, &dev_attr_low_power_idle_cpu_residency_us.attr, diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index bf64cfa30febf173640729db80f9a567ce50b4b0..b9d407135f94c6488cb303133da06500091e65cd 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -83,6 +84,7 @@ struct lpss_device_desc { size_t prv_size_override; struct property_entry *properties; void (*setup)(struct lpss_private_data *pdata); + bool resume_from_noirq; }; static const struct lpss_device_desc lpss_dma_desc = { @@ -99,6 +101,9 @@ struct lpss_private_data { u32 prv_reg_ctx[LPSS_PRV_REG_COUNT]; }; +/* Devices which need to be in D3 before lpss_iosf_enter_d3_state() proceeds */ +static u32 pmc_atom_d3_mask = 0xfe000ffe; + /* LPSS run time quirks */ static unsigned int lpss_quirks; @@ -175,6 +180,21 @@ static void byt_pwm_setup(struct lpss_private_data *pdata) static void byt_i2c_setup(struct lpss_private_data *pdata) { + const char *uid_str = acpi_device_uid(pdata->adev); + acpi_handle handle = pdata->adev->handle; + unsigned long long shared_host = 0; + acpi_status status; + long uid = 0; + + /* Expected to always be true, but better safe then sorry */ + if (uid_str) + uid = simple_strtol(uid_str, NULL, 10); + + /* Detect I2C bus shared with PUNIT and ignore its d3 status */ + status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host); + if (ACPI_SUCCESS(status) && shared_host && uid) + pmc_atom_d3_mask &= ~(BIT_LPSS2_F1_I2C1 << (uid - 1)); + lpss_deassert_reset(pdata); if (readl(pdata->mmio_base + pdata->dev_desc->prv_offset)) @@ -274,12 +294,14 @@ static const struct lpss_device_desc byt_i2c_dev_desc = { .flags = LPSS_CLK | LPSS_SAVE_CTX, .prv_offset = 0x800, .setup = byt_i2c_setup, + .resume_from_noirq = true, }; static const struct lpss_device_desc bsw_i2c_dev_desc = { .flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_NO_D3_DELAY, .prv_offset = 0x800, .setup = byt_i2c_setup, + .resume_from_noirq = true, }; static const struct lpss_device_desc bsw_spi_dev_desc = { @@ -292,7 +314,7 @@ static const struct lpss_device_desc bsw_spi_dev_desc = { #define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, } static const struct x86_cpu_id lpss_cpu_ids[] = { - ICPU(INTEL_FAM6_ATOM_SILVERMONT1), /* Valleyview, Bay Trail */ + ICPU(INTEL_FAM6_ATOM_SILVERMONT), /* Valleyview, Bay Trail */ ICPU(INTEL_FAM6_ATOM_AIRMONT), /* Braswell, Cherry Trail */ {} }; @@ -327,9 +349,11 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = { { "INT33FC", }, /* Braswell LPSS devices */ + { "80862286", LPSS_ADDR(lpss_dma_desc) }, { "80862288", LPSS_ADDR(bsw_pwm_dev_desc) }, { "8086228A", LPSS_ADDR(bsw_uart_dev_desc) }, { "8086228E", LPSS_ADDR(bsw_spi_dev_desc) }, + { "808622C0", LPSS_ADDR(lpss_dma_desc) }, { "808622C1", LPSS_ADDR(bsw_i2c_dev_desc) }, /* Broadwell LPSS devices */ @@ -377,6 +401,9 @@ static int register_device_clock(struct acpi_device *adev, if (!lpss_clk_dev) lpt_register_clock_device(); + if (IS_ERR(lpss_clk_dev)) + return PTR_ERR(lpss_clk_dev); + clk_data = platform_get_drvdata(lpss_clk_dev); if (!clk_data) return -ENODEV; @@ -492,12 +519,18 @@ static int match_hid_uid(struct device *dev, void *data) static struct device *acpi_lpss_find_device(const char *hid, const char *uid) { + struct device *dev; + struct hid_uid data = { .hid = hid, .uid = uid, }; - return bus_find_device(&platform_bus_type, NULL, &data, match_hid_uid); + dev = bus_find_device(&platform_bus_type, NULL, &data, match_hid_uid); + if (dev) + return dev; + + return bus_find_device(&pci_bus_type, NULL, &data, match_hid_uid); } static bool acpi_lpss_dep(struct acpi_device *adev, acpi_handle handle) @@ -635,12 +668,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev, * have _PS0 and _PS3 without _PSC (and no power resources), so * acpi_bus_init_power() will assume that the BIOS has put them into D0. */ - ret = acpi_device_fix_up_power(adev); - if (ret) { - /* Skip the device, but continue the namespace scan. */ - ret = 0; - goto err_out; - } + acpi_device_fix_up_power(adev); adev->driver_data = pdata; pdev = acpi_create_platform_device(adev, dev_desc->properties); @@ -892,7 +920,7 @@ static void lpss_iosf_enter_d3_state(void) * Here we read the values related to LPSS power island, i.e. LPSS * devices, excluding both LPSS DMA controllers, along with SCC domain. */ - u32 func_dis, d3_sts_0, pmc_status, pmc_mask = 0xfe000ffe; + u32 func_dis, d3_sts_0, pmc_status; int ret; ret = pmc_atom_read(PMC_FUNC_DIS, &func_dis); @@ -910,7 +938,7 @@ static void lpss_iosf_enter_d3_state(void) * Shutdown both LPSS DMA controllers if and only if all other devices * are already in D3hot. */ - pmc_status = (~(d3_sts_0 | func_dis)) & pmc_mask; + pmc_status = (~(d3_sts_0 | func_dis)) & pmc_atom_d3_mask; if (pmc_status) goto exit; @@ -1004,7 +1032,7 @@ static int acpi_lpss_resume(struct device *dev) } #ifdef CONFIG_PM_SLEEP -static int acpi_lpss_suspend_late(struct device *dev) +static int acpi_lpss_do_suspend_late(struct device *dev) { int ret; @@ -1015,12 +1043,62 @@ static int acpi_lpss_suspend_late(struct device *dev) return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev)); } -static int acpi_lpss_resume_early(struct device *dev) +static int acpi_lpss_suspend_late(struct device *dev) +{ + struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); + + if (pdata->dev_desc->resume_from_noirq) + return 0; + + return acpi_lpss_do_suspend_late(dev); +} + +static int acpi_lpss_suspend_noirq(struct device *dev) +{ + struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); + int ret; + + if (pdata->dev_desc->resume_from_noirq) { + ret = acpi_lpss_do_suspend_late(dev); + if (ret) + return ret; + } + + return acpi_subsys_suspend_noirq(dev); +} + +static int acpi_lpss_do_resume_early(struct device *dev) { int ret = acpi_lpss_resume(dev); return ret ? ret : pm_generic_resume_early(dev); } + +static int acpi_lpss_resume_early(struct device *dev) +{ + struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); + + if (pdata->dev_desc->resume_from_noirq) + return 0; + + return acpi_lpss_do_resume_early(dev); +} + +static int acpi_lpss_resume_noirq(struct device *dev) +{ + struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); + int ret; + + ret = acpi_subsys_resume_noirq(dev); + if (ret) + return ret; + + if (!dev_pm_may_skip_resume(dev) && pdata->dev_desc->resume_from_noirq) + ret = acpi_lpss_do_resume_early(dev); + + return ret; +} + #endif /* CONFIG_PM_SLEEP */ static int acpi_lpss_runtime_suspend(struct device *dev) @@ -1050,8 +1128,8 @@ static struct dev_pm_domain acpi_lpss_pm_domain = { .complete = acpi_subsys_complete, .suspend = acpi_subsys_suspend, .suspend_late = acpi_lpss_suspend_late, - .suspend_noirq = acpi_subsys_suspend_noirq, - .resume_noirq = acpi_subsys_resume_noirq, + .suspend_noirq = acpi_lpss_suspend_noirq, + .resume_noirq = acpi_lpss_resume_noirq, .resume_early = acpi_lpss_resume_early, .freeze = acpi_subsys_freeze, .freeze_late = acpi_subsys_freeze_late, @@ -1059,8 +1137,8 @@ static struct dev_pm_domain acpi_lpss_pm_domain = { .thaw_noirq = acpi_subsys_thaw_noirq, .poweroff = acpi_subsys_suspend, .poweroff_late = acpi_lpss_suspend_late, - .poweroff_noirq = acpi_subsys_suspend_noirq, - .restore_noirq = acpi_subsys_resume_noirq, + .poweroff_noirq = acpi_lpss_suspend_noirq, + .restore_noirq = acpi_lpss_resume_noirq, .restore_early = acpi_lpss_resume_early, #endif .runtime_suspend = acpi_lpss_runtime_suspend, diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c index 6b0d3ef7309cb710a63b38848ad35b770fed3c1a..8fe0960ea572495d927ead4053c783e23a94631b 100644 --- a/drivers/acpi/acpi_memhotplug.c +++ b/drivers/acpi/acpi_memhotplug.c @@ -228,7 +228,7 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device) if (node < 0) node = memory_add_physaddr_to_nid(info->start_addr); - result = add_memory(node, info->start_addr, info->length); + result = __add_memory(node, info->start_addr, info->length); /* * If the memory block has been used by the kernel, add_memory() @@ -282,7 +282,7 @@ static void acpi_memory_remove_memory(struct acpi_memory_device *mem_device) nid = memory_add_physaddr_to_nid(info->start_addr); acpi_unbind_memory_blocks(info); - remove_memory(nid, info->start_addr, info->length); + __remove_memory(nid, info->start_addr, info->length); list_del(&info->list); kfree(info); } diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index 552c1f725b6cf5ab8d4a86ba556c37a053d91b1d..6a730ae83777d5255a724209b8c1c4cde03ad93a 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c @@ -70,8 +70,11 @@ static void power_saving_mwait_init(void) #if defined(CONFIG_X86) switch (boot_cpu_data.x86_vendor) { + case X86_VENDOR_HYGON: case X86_VENDOR_AMD: case X86_VENDOR_INTEL: + case X86_VENDOR_CENTAUR: + case X86_VENDOR_ZHAOXIN: /* * AMD Fam10h TSC will tick in all * C/P/S0/S1 states when this bit is set. diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c index eaa60c94205a82f685190a5c0790d6ca91df69cb..1f32caa87686e2369b0f4841447d5ab5093b993d 100644 --- a/drivers/acpi/acpi_platform.c +++ b/drivers/acpi/acpi_platform.c @@ -30,6 +30,7 @@ static const struct acpi_device_id forbidden_id_list[] = { {"PNP0200", 0}, /* AT DMA Controller */ {"ACPI0009", 0}, /* IOxAPIC */ {"ACPI000A", 0}, /* IOAPIC */ + {"SMB0001", 0}, /* ACPI SMBUS virtual device */ {"", 0}, }; diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c index 67d97c0090a27ad6a9ae9daecf1a163178481a95..5d72baf60ac83365de7b36a47531d408023f5e7f 100644 --- a/drivers/acpi/acpi_pnp.c +++ b/drivers/acpi/acpi_pnp.c @@ -320,6 +320,9 @@ static bool matching_id(const char *idstr, const char *list_id) { int i; + if (strlen(idstr) != strlen(list_id)) + return false; + if (memcmp(idstr, list_id, 3)) return false; diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index 449d86d39965e6090a518eab4f9c18aa9f2f6852..fa5dc338a65a287bb130e2c285c068eb67868a52 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c @@ -282,9 +282,13 @@ static int acpi_processor_get_info(struct acpi_device *device) } if (acpi_duplicate_processor_id(pr->acpi_id)) { - dev_err(&device->dev, - "Failed to get unique processor _UID (0x%x)\n", - pr->acpi_id); + if (pr->acpi_id == 0xff) + dev_info_once(&device->dev, + "Entry not well-defined, consider updating BIOS\n"); + else + dev_err(&device->dev, + "Failed to get unique processor _UID (0x%x)\n", + pr->acpi_id); return -ENODEV; } @@ -387,7 +391,7 @@ static int acpi_processor_add(struct acpi_device *device, result = acpi_processor_get_info(device); if (result) /* Processor is not physically present or unavailable */ - return 0; + goto err_clear_driver_data; BUG_ON(pr->id >= nr_cpu_ids); @@ -402,7 +406,7 @@ static int acpi_processor_add(struct acpi_device *device, "BIOS reported wrong ACPI id %d for the processor\n", pr->id); /* Give up, but do not abort the namespace scan. */ - goto err; + goto err_clear_driver_data; } /* * processor_device_array is not cleared on errors to allow buggy BIOS @@ -414,12 +418,12 @@ static int acpi_processor_add(struct acpi_device *device, dev = get_cpu_device(pr->id); if (!dev) { result = -ENODEV; - goto err; + goto err_clear_per_cpu; } result = acpi_bind_one(dev, device); if (result) - goto err; + goto err_clear_per_cpu; pr->dev = dev; @@ -430,10 +434,11 @@ static int acpi_processor_add(struct acpi_device *device, dev_err(dev, "Processor driver could not be attached\n"); acpi_unbind_one(dev); - err: - free_cpumask_var(pr->throttling.shared_cpu_map); - device->driver_data = NULL; + err_clear_per_cpu: per_cpu(processors, pr->id) = NULL; + err_clear_driver_data: + device->driver_data = NULL; + free_cpumask_var(pr->throttling.shared_cpu_map); err_free_pr: kfree(pr); return result; @@ -643,7 +648,7 @@ static acpi_status __init acpi_processor_ids_walk(acpi_handle handle, status = acpi_get_type(handle, &acpi_type); if (ACPI_FAILURE(status)) - return false; + return status; switch (acpi_type) { case ACPI_TYPE_PROCESSOR: @@ -663,11 +668,12 @@ static acpi_status __init acpi_processor_ids_walk(acpi_handle handle, } processor_validated_ids_update(uid); - return true; + return AE_OK; err: + /* Exit on error, but don't abort the namespace walk */ acpi_handle_info(handle, "Invalid processor object\n"); - return false; + return AE_OK; } @@ -703,3 +709,185 @@ void __init acpi_processor_init(void) acpi_scan_add_handler_with_hotplug(&processor_handler, "processor"); acpi_scan_add_handler(&processor_container_handler); } + +#ifdef CONFIG_ACPI_PROCESSOR_CSTATE +/** + * acpi_processor_claim_cst_control - Request _CST control from the platform. + */ +bool acpi_processor_claim_cst_control(void) +{ + static bool cst_control_claimed; + acpi_status status; + + if (!acpi_gbl_FADT.cst_control || cst_control_claimed) + return true; + + status = acpi_os_write_port(acpi_gbl_FADT.smi_command, + acpi_gbl_FADT.cst_control, 8); + if (ACPI_FAILURE(status)) { + pr_warn("ACPI: Failed to claim processor _CST control\n"); + return false; + } + + cst_control_claimed = true; + return true; +} +EXPORT_SYMBOL_GPL(acpi_processor_claim_cst_control); + +/** + * acpi_processor_evaluate_cst - Evaluate the processor _CST control method. + * @handle: ACPI handle of the processor object containing the _CST. + * @cpu: The numeric ID of the target CPU. + * @info: Object write the C-states information into. + * + * Extract the C-state information for the given CPU from the output of the _CST + * control method under the corresponding ACPI processor object (or processor + * device object) and populate @info with it. + * + * If any ACPI_ADR_SPACE_FIXED_HARDWARE C-states are found, invoke + * acpi_processor_ffh_cstate_probe() to verify them and update the + * cpu_cstate_entry data for @cpu. + */ +int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu, + struct acpi_processor_power *info) +{ + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *cst; + acpi_status status; + u64 count; + int last_index = 0; + int i, ret = 0; + + status = acpi_evaluate_object(handle, "_CST", NULL, &buffer); + if (ACPI_FAILURE(status)) { + acpi_handle_debug(handle, "No _CST\n"); + return -ENODEV; + } + + cst = buffer.pointer; + + /* There must be at least 2 elements. */ + if (!cst || cst->type != ACPI_TYPE_PACKAGE || cst->package.count < 2) { + acpi_handle_warn(handle, "Invalid _CST output\n"); + ret = -EFAULT; + goto end; + } + + count = cst->package.elements[0].integer.value; + + /* Validate the number of C-states. */ + if (count < 1 || count != cst->package.count - 1) { + acpi_handle_warn(handle, "Inconsistent _CST data\n"); + ret = -EFAULT; + goto end; + } + + for (i = 1; i <= count; i++) { + union acpi_object *element; + union acpi_object *obj; + struct acpi_power_register *reg; + struct acpi_processor_cx cx; + + /* + * If there is not enough space for all C-states, skip the + * excess ones and log a warning. + */ + if (last_index >= ACPI_PROCESSOR_MAX_POWER - 1) { + acpi_handle_warn(handle, + "No room for more idle states (limit: %d)\n", + ACPI_PROCESSOR_MAX_POWER - 1); + break; + } + + memset(&cx, 0, sizeof(cx)); + + element = &cst->package.elements[i]; + if (element->type != ACPI_TYPE_PACKAGE) + continue; + + if (element->package.count != 4) + continue; + + obj = &element->package.elements[0]; + + if (obj->type != ACPI_TYPE_BUFFER) + continue; + + reg = (struct acpi_power_register *)obj->buffer.pointer; + + obj = &element->package.elements[1]; + if (obj->type != ACPI_TYPE_INTEGER) + continue; + + cx.type = obj->integer.value; + /* + * There are known cases in which the _CST output does not + * contain C1, so if the type of the first state found is not + * C1, leave an empty slot for C1 to be filled in later. + */ + if (i == 1 && cx.type != ACPI_STATE_C1) + last_index = 1; + + cx.address = reg->address; + cx.index = last_index + 1; + + if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { + if (!acpi_processor_ffh_cstate_probe(cpu, &cx, reg)) { + /* + * In the majority of cases _CST describes C1 as + * a FIXED_HARDWARE C-state, but if the command + * line forbids using MWAIT, use CSTATE_HALT for + * C1 regardless. + */ + if (cx.type == ACPI_STATE_C1 && + boot_option_idle_override == IDLE_NOMWAIT) { + cx.entry_method = ACPI_CSTATE_HALT; + snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); + } else { + cx.entry_method = ACPI_CSTATE_FFH; + } + } else if (cx.type == ACPI_STATE_C1) { + /* + * In the special case of C1, FIXED_HARDWARE can + * be handled by executing the HLT instruction. + */ + cx.entry_method = ACPI_CSTATE_HALT; + snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); + } else { + continue; + } + } else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { + cx.entry_method = ACPI_CSTATE_SYSTEMIO; + snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x", + cx.address); + } else { + continue; + } + + if (cx.type == ACPI_STATE_C1) + cx.valid = 1; + + obj = &element->package.elements[2]; + if (obj->type != ACPI_TYPE_INTEGER) + continue; + + cx.latency = obj->integer.value; + + obj = &element->package.elements[3]; + if (obj->type != ACPI_TYPE_INTEGER) + continue; + + memcpy(&info->states[++last_index], &cx, sizeof(cx)); + } + + acpi_handle_info(handle, "Found %d idle states\n", last_index); + + info->count = last_index; + + end: + kfree(buffer.pointer); + + return ret; +} +EXPORT_SYMBOL_GPL(acpi_processor_evaluate_cst); +#endif /* CONFIG_ACPI_PROCESSOR_CSTATE */ diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index f0b52266b3ac6c45ea83a7f446513a179d60531b..51b39e8c2f8af4d4d2f357c11ed7b218fdfb9f91 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -73,6 +73,12 @@ module_param(report_key_events, int, 0644); MODULE_PARM_DESC(report_key_events, "0: none, 1: output changes, 2: brightness changes, 3: all"); +static int hw_changes_brightness = -1; +module_param(hw_changes_brightness, int, 0644); +MODULE_PARM_DESC(hw_changes_brightness, + "Set this to 1 on buggy hw which changes the brightness itself when " + "a hotkey is pressed: -1: auto, 0: normal 1: hw-changes-brightness"); + /* * Whether the struct acpi_video_device_attrib::device_id_scheme bit should be * assumed even if not actually set. @@ -418,6 +424,14 @@ static int video_set_report_key_events(const struct dmi_system_id *id) return 0; } +static int video_hw_changes_brightness( + const struct dmi_system_id *d) +{ + if (hw_changes_brightness == -1) + hw_changes_brightness = 1; + return 0; +} + static const struct dmi_system_id video_dmi_table[] = { /* * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121 @@ -542,6 +556,21 @@ static const struct dmi_system_id video_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"), }, }, + /* + * Some machines change the brightness themselves when a brightness + * hotkey gets pressed, despite us telling them not to. In this case + * acpi_video_device_notify() should only call backlight_force_update( + * BACKLIGHT_UPDATE_HOTKEY) and not do anything else. + */ + { + /* https://bugzilla.kernel.org/show_bug.cgi?id=204077 */ + .callback = video_hw_changes_brightness, + .ident = "Packard Bell EasyNote MZ35", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Packard Bell"), + DMI_MATCH(DMI_PRODUCT_NAME, "EasyNote MZ35"), + }, + }, {} }; @@ -1625,6 +1654,14 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data) bus = video_device->video; input = bus->input; + if (hw_changes_brightness > 0) { + if (video_device->backlight) + backlight_force_update(video_device->backlight, + BACKLIGHT_UPDATE_HOTKEY); + acpi_notifier_call_chain(device, event, 0); + return; + } + switch (event) { case ACPI_VIDEO_NOTIFY_CYCLE_BRIGHTNESS: /* Cycle brightness */ brightness_switch_event(video_device, event); @@ -1735,12 +1772,12 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device) return; count++; - acpi_get_parent(device->dev->handle, &acpi_parent); - - pdev = acpi_get_pci_dev(acpi_parent); - if (pdev) { - parent = &pdev->dev; - pci_dev_put(pdev); + if (ACPI_SUCCESS(acpi_get_parent(device->dev->handle, &acpi_parent))) { + pdev = acpi_get_pci_dev(acpi_parent); + if (pdev) { + parent = &pdev->dev; + pci_dev_put(pdev); + } } memset(&props, 0, sizeof(struct backlight_properties)); @@ -2124,21 +2161,29 @@ static int __init intel_opregion_present(void) return opregion; } +/* Check if the chassis-type indicates there is no builtin LCD panel */ static bool dmi_is_desktop(void) { const char *chassis_type; + unsigned long type; chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE); if (!chassis_type) return false; - if (!strcmp(chassis_type, "3") || /* 3: Desktop */ - !strcmp(chassis_type, "4") || /* 4: Low Profile Desktop */ - !strcmp(chassis_type, "5") || /* 5: Pizza Box */ - !strcmp(chassis_type, "6") || /* 6: Mini Tower */ - !strcmp(chassis_type, "7") || /* 7: Tower */ - !strcmp(chassis_type, "11")) /* 11: Main Server Chassis */ + if (kstrtoul(chassis_type, 10, &type) != 0) + return false; + + switch (type) { + case 0x03: /* Desktop */ + case 0x04: /* Low Profile Desktop */ + case 0x05: /* Pizza Box */ + case 0x06: /* Mini Tower */ + case 0x07: /* Tower */ + case 0x10: /* Lunch Box */ + case 0x11: /* Main Server Chassis */ return true; + } return false; } diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h index 704bebbd35b06adc0d9faeb98f22535586330d61..bfcc68b9f708d12ea75bf94c076a3b7be285ada9 100644 --- a/drivers/acpi/acpica/acevents.h +++ b/drivers/acpi/acpica/acevents.h @@ -69,7 +69,8 @@ acpi_status acpi_ev_mask_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 is_masked); acpi_status -acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info); +acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info, + u8 clear_on_enable); acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info); @@ -229,6 +230,8 @@ acpi_ev_default_region_setup(acpi_handle handle, acpi_status acpi_ev_initialize_region(union acpi_operand_object *region_obj); +u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node); + /* * evsci - SCI (System Control Interrupt) handling/dispatch */ diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index 0f28a38a43ea1dadaeafbbefa478637deb1de90d..99b0da89910989f8ef53caefd2e64333036a49a0 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h @@ -395,9 +395,9 @@ struct acpi_simple_repair_info { /* Info for running the _REG methods */ struct acpi_reg_walk_info { - acpi_adr_space_type space_id; u32 function; u32 reg_run_count; + acpi_adr_space_type space_id; }; /***************************************************************************** diff --git a/drivers/acpi/acpica/dbconvert.c b/drivers/acpi/acpica/dbconvert.c index 9fd9a98a9cbe89fd033475eacadfae9a495e526b..5255a0837c82bfe9344730e2d60d8885fd55c2a7 100644 --- a/drivers/acpi/acpica/dbconvert.c +++ b/drivers/acpi/acpica/dbconvert.c @@ -170,6 +170,8 @@ acpi_status acpi_db_convert_to_package(char *string, union acpi_object *object) elements = ACPI_ALLOCATE_ZEROED(DB_DEFAULT_PKG_ELEMENTS * sizeof(union acpi_object)); + if (!elements) + return (AE_NO_MEMORY); this = string; for (i = 0; i < (DB_DEFAULT_PKG_ELEMENTS - 1); i++) { diff --git a/drivers/acpi/acpica/dbnames.c b/drivers/acpi/acpica/dbnames.c index 992bd7b92540d6afff1083164e0f0e237a2f2a08..49afba8c916a71eaad08d201acf73ba5bdfa164c 100644 --- a/drivers/acpi/acpica/dbnames.c +++ b/drivers/acpi/acpica/dbnames.c @@ -571,6 +571,9 @@ acpi_status acpi_db_display_objects(char *obj_type_arg, char *display_count_arg) object_info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_object_info)); + if (!object_info) + return (AE_NO_MEMORY); + /* Walk the namespace from the root */ (void)acpi_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c index 0da96268deb51bd631b175aae898222829c0bb74..ebaa74d81d7725eb82f077502b3f546e13724266 100644 --- a/drivers/acpi/acpica/dscontrol.c +++ b/drivers/acpi/acpica/dscontrol.c @@ -85,7 +85,7 @@ acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state, walk_state->parser_state.pkg_end; control_state->control.opcode = op->common.aml_opcode; control_state->control.loop_timeout = acpi_os_get_timer() + - (u64)(acpi_gbl_max_loop_iterations * ACPI_100NSEC_PER_SEC); + ((u64)acpi_gbl_max_loop_iterations * ACPI_100NSEC_PER_SEC); /* Push the control state on this walk's control stack */ diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c index 30fe89545d6ab614a32d8d563e2d335dc25613b2..bcc6a7acc576272763df04453bf780a29fd73c3e 100644 --- a/drivers/acpi/acpica/dsfield.c +++ b/drivers/acpi/acpica/dsfield.c @@ -244,7 +244,7 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op, * FUNCTION: acpi_ds_get_field_names * * PARAMETERS: info - create_field info structure - * ` walk_state - Current method state + * walk_state - Current method state * arg - First parser arg for the field name list * * RETURN: Status diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c index dd4deb678d13ea24df8de76cf6e4181a91bf99e1..a00516d9538c2b0a1ed792f07182d90faf538ca9 100644 --- a/drivers/acpi/acpica/dsmethod.c +++ b/drivers/acpi/acpica/dsmethod.c @@ -517,7 +517,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); if (!info) { status = AE_NO_MEMORY; - goto cleanup; + goto pop_walk_state; } info->parameters = &this_walk_state->operands[0]; @@ -529,7 +529,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, ACPI_FREE(info); if (ACPI_FAILURE(status)) { - goto cleanup; + goto pop_walk_state; } /* @@ -561,6 +561,12 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, return_ACPI_STATUS(status); +pop_walk_state: + + /* On error, pop the walk state to be deleted from thread */ + + acpi_ds_pop_walk_state(thread); + cleanup: /* On error, we must terminate the method properly */ diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c index e9fb0bf3c8d250662b14e22cb5ff12ba3ac238d2..2f4641e5ecde800b3e40ef88ffb473f987bf3f28 100644 --- a/drivers/acpi/acpica/dsopcode.c +++ b/drivers/acpi/acpica/dsopcode.c @@ -417,6 +417,10 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state, ACPI_FORMAT_UINT64(obj_desc->region.address), obj_desc->region.length)); + status = acpi_ut_add_address_range(obj_desc->region.space_id, + obj_desc->region.address, + obj_desc->region.length, node); + /* Now the address and length are valid for this opregion */ obj_desc->region.flags |= AOPOBJ_DATA_VALID; @@ -519,6 +523,10 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state, ACPI_FORMAT_UINT64(obj_desc->region.address), obj_desc->region.length)); + status = acpi_ut_add_address_range(obj_desc->region.space_id, + obj_desc->region.address, + obj_desc->region.length, node); + /* Now the address and length are valid for this opregion */ obj_desc->region.flags |= AOPOBJ_DATA_VALID; diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c index 8d1b754005158ff0d90329c80e908bebd473a142..515a16989dd3fb1f7d2cffcf1eb854262cd36d3a 100644 --- a/drivers/acpi/acpica/dsutils.c +++ b/drivers/acpi/acpica/dsutils.c @@ -670,6 +670,8 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state, union acpi_parse_object *arguments[ACPI_OBJ_NUM_OPERANDS]; u32 arg_count = 0; u32 index = walk_state->num_operands; + u32 prev_num_operands = walk_state->num_operands; + u32 new_num_operands; u32 i; ACPI_FUNCTION_TRACE_PTR(ds_create_operands, first_arg); @@ -698,6 +700,7 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state, /* Create the interpreter arguments, in reverse order */ + new_num_operands = index; index--; for (i = 0; i < arg_count; i++) { arg = arguments[index]; @@ -722,7 +725,11 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state, * pop everything off of the operand stack and delete those * objects */ - acpi_ds_obj_stack_pop_and_delete(arg_count, walk_state); + walk_state->num_operands = i; + acpi_ds_obj_stack_pop_and_delete(new_num_operands, walk_state); + + /* Restore operand count */ + walk_state->num_operands = prev_num_operands; ACPI_EXCEPTION((AE_INFO, status, "While creating Arg %u", index)); return_ACPI_STATUS(status); diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c index d06c414462822dfb0ceb49691b0643309fa653e6..ba53662f121799a5c1d12945341dcc4eb1357184 100644 --- a/drivers/acpi/acpica/dswload.c +++ b/drivers/acpi/acpica/dswload.c @@ -412,6 +412,27 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state) ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op, walk_state)); + /* + * Disassembler: handle create field operators here. + * + * create_buffer_field is a deferred op that is typically processed in load + * pass 2. However, disassembly of control method contents walk the parse + * tree with ACPI_PARSE_LOAD_PASS1 and AML_CREATE operators are processed + * in a later walk. This is a problem when there is a control method that + * has the same name as the AML_CREATE object. In this case, any use of the + * name segment will be detected as a method call rather than a reference + * to a buffer field. + * + * This earlier creation during disassembly solves this issue by inserting + * the named object in the ACPI namespace so that references to this name + * would be a name string rather than a method call. + */ + if ((walk_state->parse_flags & ACPI_PARSE_DISASSEMBLE) && + (walk_state->op_info->flags & AML_CREATE)) { + status = acpi_ds_create_buffer_field(op, walk_state); + return_ACPI_STATUS(status); + } + /* We are only interested in opcodes that have an associated name */ if (!(walk_state->op_info->flags & (AML_NAMED | AML_FIELD))) { diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index e10fec99a182eca363167e27e1a6338e42586c09..4b5d3b4c627a723f931bc94713f3cacbb799e1bb 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c @@ -146,6 +146,7 @@ acpi_ev_mask_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 is_masked) * FUNCTION: acpi_ev_add_gpe_reference * * PARAMETERS: gpe_event_info - Add a reference to this GPE + * clear_on_enable - Clear GPE status before enabling it * * RETURN: Status * @@ -155,7 +156,8 @@ acpi_ev_mask_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 is_masked) ******************************************************************************/ acpi_status -acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) +acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info, + u8 clear_on_enable) { acpi_status status = AE_OK; @@ -170,6 +172,10 @@ acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) /* Enable on first reference */ + if (clear_on_enable) { + (void)acpi_hw_clear_gpe(gpe_event_info); + } + status = acpi_ev_update_gpe_enable_mask(gpe_event_info); if (ACPI_SUCCESS(status)) { status = acpi_ev_enable_gpe(gpe_event_info); diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c index b253063b09d39c1c3c5bb3c81a375c8cec8756cf..8d96270ed8c738e6376280877562745238a801cf 100644 --- a/drivers/acpi/acpica/evgpeblk.c +++ b/drivers/acpi/acpica/evgpeblk.c @@ -453,7 +453,7 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, continue; } - status = acpi_ev_add_gpe_reference(gpe_event_info); + status = acpi_ev_add_gpe_reference(gpe_event_info, FALSE); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Could not enable GPE 0x%02X", diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c index 70c2bd169f66923f3937f675c2254226af767cfe..49decca4e08ffeabbc0fc4f421a4ad88015884fe 100644 --- a/drivers/acpi/acpica/evregion.c +++ b/drivers/acpi/acpica/evregion.c @@ -653,6 +653,19 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, ACPI_FUNCTION_TRACE(ev_execute_reg_methods); + /* + * These address spaces do not need a call to _REG, since the ACPI + * specification defines them as: "must always be accessible". Since + * they never change state (never become unavailable), no need to ever + * call _REG on them. Also, a data_table is not a "real" address space, + * so do not call _REG. September 2018. + */ + if ((space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) || + (space_id == ACPI_ADR_SPACE_SYSTEM_IO) || + (space_id == ACPI_ADR_SPACE_DATA_TABLE)) { + return_VOID; + } + info.space_id = space_id; info.function = function; info.reg_run_count = 0; @@ -714,8 +727,8 @@ acpi_ev_reg_run(acpi_handle obj_handle, } /* - * We only care about regions.and objects that are allowed to have address - * space handlers + * We only care about regions and objects that are allowed to have + * address space handlers */ if ((node->type != ACPI_TYPE_REGION) && (node != acpi_gbl_root_node)) { return (AE_OK); diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c index 39284deedd885f3c925de34bcf9c183e47a2bf70..17df5dacd43cf2c1687c4b0d0cf02c414f1f4706 100644 --- a/drivers/acpi/acpica/evrgnini.c +++ b/drivers/acpi/acpica/evrgnini.c @@ -16,9 +16,6 @@ #define _COMPONENT ACPI_EVENTS ACPI_MODULE_NAME("evrgnini") -/* Local prototypes */ -static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node); - /******************************************************************************* * * FUNCTION: acpi_ev_system_memory_region_setup @@ -33,7 +30,6 @@ static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node); * DESCRIPTION: Setup a system_memory operation region * ******************************************************************************/ - acpi_status acpi_ev_system_memory_region_setup(acpi_handle handle, u32 function, @@ -313,7 +309,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle, * ******************************************************************************/ -static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node) +u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node) { acpi_status status; struct acpi_pnp_device_id *hid; diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c index febc332b00ac1313716b96e01c6fc3cc977e3ac3..841557bda64191602f2766425ee19d85a6d8f88b 100644 --- a/drivers/acpi/acpica/evxface.c +++ b/drivers/acpi/acpica/evxface.c @@ -971,7 +971,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device, ACPI_GPE_DISPATCH_METHOD) || (ACPI_GPE_DISPATCH_TYPE(handler->original_flags) == ACPI_GPE_DISPATCH_NOTIFY)) && handler->originally_enabled) { - (void)acpi_ev_add_gpe_reference(gpe_event_info); + (void)acpi_ev_add_gpe_reference(gpe_event_info, FALSE); if (ACPI_GPE_IS_POLLING_NEEDED(gpe_event_info)) { /* Poll edge triggered GPEs to handle existing events */ diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c index b2d5f66cc1b055863492567007fee2444bfa1150..4188731e7c406ea20d4be04c65eb3a478e121d40 100644 --- a/drivers/acpi/acpica/evxfgpe.c +++ b/drivers/acpi/acpica/evxfgpe.c @@ -108,7 +108,7 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number) if (gpe_event_info) { if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) != ACPI_GPE_DISPATCH_NONE) { - status = acpi_ev_add_gpe_reference(gpe_event_info); + status = acpi_ev_add_gpe_reference(gpe_event_info, TRUE); if (ACPI_SUCCESS(status) && ACPI_GPE_IS_POLLING_NEEDED(gpe_event_info)) { diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c index 091415b14fbf1109e14e292c233f53f04dadf1e9..3b3a25d9f0e6d25717b3a0b6e43d4f79e525ce17 100644 --- a/drivers/acpi/acpica/evxfregn.c +++ b/drivers/acpi/acpica/evxfregn.c @@ -193,7 +193,6 @@ acpi_remove_address_space_handler(acpi_handle device, */ region_obj = handler_obj->address_space.region_list; - } /* Remove this Handler object from the list */ diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c index ba9fbae0cf91fe87fb454ff53bfcc25885ed7503..319f4bc6a83947b93f2fc13b54f781ee5b80ea8e 100644 --- a/drivers/acpi/acpica/exoparg1.c +++ b/drivers/acpi/acpica/exoparg1.c @@ -1007,7 +1007,8 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state) (walk_state, return_desc, &temp_desc); if (ACPI_FAILURE(status)) { - goto cleanup; + return_ACPI_STATUS + (status); } return_desc = temp_desc; diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c index 738f3c732363a06cd6d246b57eca2855906621e9..91143bcfe0904cdb51a34318caa61516e51fa780 100644 --- a/drivers/acpi/acpica/exprep.c +++ b/drivers/acpi/acpica/exprep.c @@ -437,6 +437,9 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info) if (info->connection_node) { second_desc = info->connection_node->object; + if (second_desc == NULL) { + break; + } if (!(second_desc->common.flags & AOPOBJ_DATA_VALID)) { status = acpi_ds_get_buffer_arguments(second_desc); @@ -473,10 +476,6 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info) (u8)access_byte_width; } } - /* An additional reference for the container */ - - acpi_ut_add_reference(obj_desc->field.region_obj); - ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "RegionField: BitOff %X, Off %X, Gran %X, Region %p\n", obj_desc->field.start_field_bit_offset, diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c index 97bbfd07fcf759c86e033f3d2d83b6cb8b334a3b..2d99cbbf82d102a9daa4a1ecc8f38ffd3a0a1265 100644 --- a/drivers/acpi/acpica/exregion.c +++ b/drivers/acpi/acpica/exregion.c @@ -43,7 +43,6 @@ acpi_ex_system_memory_space_handler(u32 function, struct acpi_mem_space_context *mem_info = region_context; u32 length; acpi_size map_length; - acpi_size page_boundary_map_length; #ifdef ACPI_MISALIGNMENT_NOT_SUPPORTED u32 remainder; #endif @@ -120,26 +119,8 @@ acpi_ex_system_memory_space_handler(u32 function, map_length = (acpi_size) ((mem_info->address + mem_info->length) - address); - /* - * If mapping the entire remaining portion of the region will cross - * a page boundary, just map up to the page boundary, do not cross. - * On some systems, crossing a page boundary while mapping regions - * can cause warnings if the pages have different attributes - * due to resource management. - * - * This has the added benefit of constraining a single mapping to - * one page, which is similar to the original code that used a 4k - * maximum window. - */ - page_boundary_map_length = (acpi_size) - (ACPI_ROUND_UP(address, ACPI_DEFAULT_PAGE_SIZE) - address); - if (page_boundary_map_length == 0) { - page_boundary_map_length = ACPI_DEFAULT_PAGE_SIZE; - } - - if (map_length > page_boundary_map_length) { - map_length = page_boundary_map_length; - } + if (map_length > ACPI_DEFAULT_PAGE_SIZE) + map_length = ACPI_DEFAULT_PAGE_SIZE; /* Create a new mapping starting at the address given */ diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c index 5470213b8e645986a3c01b5e0e25ce7961d12dc6..6eb63db72249b0291be099310163841f2b59340e 100644 --- a/drivers/acpi/acpica/nsalloc.c +++ b/drivers/acpi/acpica/nsalloc.c @@ -74,6 +74,10 @@ void acpi_ns_delete_node(struct acpi_namespace_node *node) ACPI_FUNCTION_NAME(ns_delete_node); + if (!node) { + return_VOID; + } + /* Detach an object if there is one */ acpi_ns_detach_object(node); diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c index 8638f43cfc3d87184c9a0cc91318f07ab5abff8a..79d86da1c8924a971bacf928008634c8aa3224ca 100644 --- a/drivers/acpi/acpica/nsobject.c +++ b/drivers/acpi/acpica/nsobject.c @@ -186,6 +186,10 @@ void acpi_ns_detach_object(struct acpi_namespace_node *node) } } + if (obj_desc->common.type == ACPI_TYPE_REGION) { + acpi_ut_remove_address_range(obj_desc->region.space_id, node); + } + /* Clear the Node entry in all cases */ node->object = NULL; diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c index ff2ab8fbec3841a13e8e7ca815aa0d23e99068f2..8de80bf7802b4a4ee21617743db3fc2c328e36dd 100644 --- a/drivers/acpi/acpica/nsrepair.c +++ b/drivers/acpi/acpica/nsrepair.c @@ -181,8 +181,9 @@ acpi_ns_simple_repair(struct acpi_evaluate_info *info, * Try to fix if there was no return object. Warning if failed to fix. */ if (!return_object) { - if (expected_btypes && (!(expected_btypes & ACPI_RTYPE_NONE))) { - if (package_index != ACPI_NOT_PACKAGE_ELEMENT) { + if (expected_btypes) { + if (!(expected_btypes & ACPI_RTYPE_NONE) && + package_index != ACPI_NOT_PACKAGE_ELEMENT) { ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname, ACPI_WARN_ALWAYS, @@ -196,14 +197,15 @@ acpi_ns_simple_repair(struct acpi_evaluate_info *info, if (ACPI_SUCCESS(status)) { return (AE_OK); /* Repair was successful */ } - } else { + } + + if (expected_btypes != ACPI_RTYPE_NONE) { ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname, ACPI_WARN_ALWAYS, "Missing expected return value")); + return (AE_AML_NO_RETURN_VALUE); } - - return (AE_AML_NO_RETURN_VALUE); } } diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c index e9a061da9bb20c5f0a2734469ee92dee2b8904db..c325789a62bf01a0c746afcf8eb93d4cdcb4b4d6 100644 --- a/drivers/acpi/acpica/nswalk.c +++ b/drivers/acpi/acpica/nswalk.c @@ -169,6 +169,9 @@ acpi_ns_walk_namespace(acpi_object_type type, if (start_node == ACPI_ROOT_OBJECT) { start_node = acpi_gbl_root_node; + if (!start_node) { + return_ACPI_STATUS(AE_NO_NAMESPACE); + } } /* Null child means "get first node" */ diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c index 176d28d60125d9e2804e8af80fcbe41cc9cd51dc..956aaf6a3f3d041e26429b96c94c8fd95704e4b8 100644 --- a/drivers/acpi/acpica/psargs.c +++ b/drivers/acpi/acpica/psargs.c @@ -25,6 +25,8 @@ acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state); static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state *parser_state); +static void acpi_ps_free_field_list(union acpi_parse_object *start); + /******************************************************************************* * * FUNCTION: acpi_ps_get_next_package_length @@ -683,6 +685,39 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state return_PTR(field); } +/******************************************************************************* + * + * FUNCTION: acpi_ps_free_field_list + * + * PARAMETERS: start - First Op in field list + * + * RETURN: None. + * + * DESCRIPTION: Free all Op objects inside a field list. + * + ******************************************************************************/ + +static void acpi_ps_free_field_list(union acpi_parse_object *start) +{ + union acpi_parse_object *cur = start; + union acpi_parse_object *next; + union acpi_parse_object *arg; + + while (cur) { + next = cur->common.next; + + /* AML_INT_CONNECTION_OP can have a single argument */ + + arg = acpi_ps_get_arg(cur, 0); + if (arg) { + acpi_ps_free_op(arg); + } + + acpi_ps_free_op(cur); + cur = next; + } +} + /******************************************************************************* * * FUNCTION: acpi_ps_get_next_arg @@ -751,6 +786,10 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state, while (parser_state->aml < parser_state->pkg_end) { field = acpi_ps_get_next_field(parser_state); if (!field) { + if (arg) { + acpi_ps_free_field_list(arg); + } + return_ACPI_STATUS(AE_NO_MEMORY); } @@ -820,6 +859,10 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state, acpi_ps_get_next_namepath(walk_state, parser_state, arg, ACPI_NOT_METHOD_CALL); + if (ACPI_FAILURE(status)) { + acpi_ps_free_op(arg); + return_ACPI_STATUS(status); + } } else { /* Single complex argument, nothing returned */ @@ -854,6 +897,10 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state, acpi_ps_get_next_namepath(walk_state, parser_state, arg, ACPI_POSSIBLE_METHOD_CALL); + if (ACPI_FAILURE(status)) { + acpi_ps_free_op(arg); + return_ACPI_STATUS(status); + } if (arg->common.aml_opcode == AML_INT_METHODCALL_OP) { diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c index 34fc2f7476eddadf678a85000a0c0023c9f3bb64..b0789c483b0f4bf3d0710bd467b46479200ac274 100644 --- a/drivers/acpi/acpica/psloop.c +++ b/drivers/acpi/acpica/psloop.c @@ -417,6 +417,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state) union acpi_parse_object *op = NULL; /* current op */ struct acpi_parse_state *parser_state; u8 *aml_op_start = NULL; + u8 opcode_length; ACPI_FUNCTION_TRACE_PTR(ps_parse_loop, walk_state); @@ -540,8 +541,19 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state) "Skip parsing opcode %s", acpi_ps_get_opcode_name (walk_state->opcode))); + + /* + * Determine the opcode length before skipping the opcode. + * An opcode can be 1 byte or 2 bytes in length. + */ + opcode_length = 1; + if ((walk_state->opcode & 0xFF00) == + AML_EXTENDED_OPCODE) { + opcode_length = 2; + } walk_state->parser_state.aml = - walk_state->aml + 1; + walk_state->aml + opcode_length; + walk_state->parser_state.aml = acpi_ps_get_next_package_end (&walk_state->parser_state); diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c index 3138e7a00da815dc44f882b4be35f97b3250e82b..81515897ae618b849f87a0c344078704ea2ab81b 100644 --- a/drivers/acpi/acpica/psobject.c +++ b/drivers/acpi/acpica/psobject.c @@ -640,7 +640,8 @@ acpi_status acpi_ps_complete_final_op(struct acpi_walk_state *walk_state, union acpi_parse_object *op, acpi_status status) { - acpi_status status2; + acpi_status return_status = AE_OK; + u8 ascending = TRUE; ACPI_FUNCTION_TRACE_PTR(ps_complete_final_op, walk_state); @@ -654,7 +655,8 @@ acpi_ps_complete_final_op(struct acpi_walk_state *walk_state, op)); do { if (op) { - if (walk_state->ascending_callback != NULL) { + if (ascending && + walk_state->ascending_callback != NULL) { walk_state->op = op; walk_state->op_info = acpi_ps_get_opcode_info(op->common. @@ -676,49 +678,26 @@ acpi_ps_complete_final_op(struct acpi_walk_state *walk_state, } if (status == AE_CTRL_TERMINATE) { - status = AE_OK; - - /* Clean up */ - do { - if (op) { - status2 = - acpi_ps_complete_this_op - (walk_state, op); - if (ACPI_FAILURE - (status2)) { - return_ACPI_STATUS - (status2); - } - } - - acpi_ps_pop_scope(& - (walk_state-> - parser_state), - &op, - &walk_state-> - arg_types, - &walk_state-> - arg_count); - - } while (op); - - return_ACPI_STATUS(status); + ascending = FALSE; + return_status = AE_CTRL_TERMINATE; } else if (ACPI_FAILURE(status)) { /* First error is most important */ - (void) - acpi_ps_complete_this_op(walk_state, - op); - return_ACPI_STATUS(status); + ascending = FALSE; + return_status = status; } } - status2 = acpi_ps_complete_this_op(walk_state, op); - if (ACPI_FAILURE(status2)) { - return_ACPI_STATUS(status2); + status = acpi_ps_complete_this_op(walk_state, op); + if (ACPI_FAILURE(status)) { + ascending = FALSE; + if (ACPI_SUCCESS(return_status) || + return_status == AE_CTRL_TERMINATE) { + return_status = status; + } } } @@ -728,5 +707,5 @@ acpi_ps_complete_final_op(struct acpi_walk_state *walk_state, } while (op); - return_ACPI_STATUS(status); + return_ACPI_STATUS(return_status); } diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c index 8d7dc98bad17b50d9224e4802ab8f90046250641..ca01e02af9cba241fa0d7d39aa904c7b2a84a47d 100644 --- a/drivers/acpi/acpica/psopcode.c +++ b/drivers/acpi/acpica/psopcode.c @@ -603,7 +603,7 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = { /* 7E */ ACPI_OP("Timer", ARGP_TIMER_OP, ARGI_TIMER_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_0A_0T_1R, - AML_FLAGS_EXEC_0A_0T_1R), + AML_FLAGS_EXEC_0A_0T_1R | AML_NO_OPERAND_RESOLVE), /* ACPI 5.0 opcodes */ diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c index a872ed7879ca7a938d6ea0ea86fd512cabb381da..056c1741c1e39e5daec7273d971eb24250301437 100644 --- a/drivers/acpi/acpica/utcopy.c +++ b/drivers/acpi/acpica/utcopy.c @@ -916,13 +916,6 @@ acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj, status = acpi_ut_walk_package_tree(source_obj, dest_obj, acpi_ut_copy_ielement_to_ielement, walk_state); - if (ACPI_FAILURE(status)) { - - /* On failure, delete the destination package object */ - - acpi_ut_remove_reference(dest_obj); - } - return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c index 8cc4392c61f33aea636251426e263b7017613498..db79a826e6b8dd008a607bdca02c0a07cd35ec39 100644 --- a/drivers/acpi/acpica/utdelete.c +++ b/drivers/acpi/acpica/utdelete.c @@ -410,6 +410,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action) ACPI_WARNING((AE_INFO, "Obj %p, Reference Count is already zero, cannot decrement\n", object)); + return; } ACPI_DEBUG_PRINT_RAW((ACPI_DB_ALLOCATIONS, @@ -563,11 +564,6 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action) next_object = object->buffer_field.buffer_obj; break; - case ACPI_TYPE_LOCAL_REGION_FIELD: - - next_object = object->field.region_obj; - break; - case ACPI_TYPE_LOCAL_BANK_FIELD: next_object = object->bank_field.bank_obj; @@ -608,6 +604,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action) } break; + case ACPI_TYPE_LOCAL_REGION_FIELD: case ACPI_TYPE_REGION: default: diff --git a/drivers/acpi/apei/bert.c b/drivers/acpi/apei/bert.c index 12771fcf0417df30ed36ddceed49bea7532b9d8e..e5f326fdf5ec0f922d60747b3b6f0b8cd7086c05 100644 --- a/drivers/acpi/apei/bert.c +++ b/drivers/acpi/apei/bert.c @@ -31,6 +31,7 @@ #undef pr_fmt #define pr_fmt(fmt) "BERT: " fmt +#define ACPI_BERT_PRINT_MAX_LEN 1024 static int bert_disable; @@ -42,15 +43,7 @@ static void __init bert_print_all(struct acpi_bert_region *region, int remain = region_len; u32 estatus_len; - if (!estatus->block_status) - return; - - while (remain > sizeof(struct acpi_bert_region)) { - if (cper_estatus_check(estatus)) { - pr_err(FW_BUG "Invalid error record.\n"); - return; - } - + while (remain >= sizeof(struct acpi_bert_region)) { estatus_len = cper_estatus_len(estatus); if (remain < estatus_len) { pr_err(FW_BUG "Truncated status block (length: %u).\n", @@ -58,9 +51,21 @@ static void __init bert_print_all(struct acpi_bert_region *region, return; } - pr_info_once("Error records from previous boot:\n"); + /* No more error records. */ + if (!estatus->block_status) + return; - cper_estatus_print(KERN_INFO HW_ERR, estatus); + if (cper_estatus_check(estatus)) { + pr_err(FW_BUG "Invalid error record.\n"); + return; + } + + pr_info_once("Error records from previous boot:\n"); + if (region_len < ACPI_BERT_PRINT_MAX_LEN) + cper_estatus_print(KERN_INFO HW_ERR, estatus); + else + pr_info_once("Max print length exceeded, table data is available at:\n" + "/sys/firmware/acpi/tables/data/BERT"); /* * Because the boot error source is "one-time polled" type, @@ -70,10 +75,6 @@ static void __init bert_print_all(struct acpi_bert_region *region, estatus->block_status = 0; estatus = (void *)estatus + estatus_len; - /* No more error records. */ - if (!estatus->block_status) - return; - remain -= estatus_len; } } @@ -82,7 +83,7 @@ static int __init setup_bert_disable(char *str) { bert_disable = 1; - return 0; + return 1; } __setup("bert_disable", setup_bert_disable); diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index 3c5ea7cb693ef558829ab15b930d234cd51b9d67..445e85394db6138138cfd31ee8fd8b334b1135e1 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c @@ -899,7 +899,7 @@ EXPORT_SYMBOL_GPL(erst_clear); static int __init setup_erst_disable(char *str) { erst_disable = 1; - return 0; + return 1; } __setup("erst_disable", setup_erst_disable); @@ -1176,7 +1176,6 @@ static int __init erst_init(void) "Error Record Serialization Table (ERST) support is initialized.\n"); buf = kmalloc(erst_erange.size, GFP_KERNEL); - spin_lock_init(&erst_info.buf_lock); if (buf) { erst_info.buf = buf + sizeof(struct cper_pstore_record); erst_info.bufsize = erst_erange.size - diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 02c6fd9caff7dc173c0d7552532e0c3588800e27..73a04c0b19eb93085723518e04ed30983a69511b 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -33,7 +33,6 @@ #include #include #include -#include #include #include #include @@ -85,6 +84,12 @@ ((struct acpi_hest_generic_status *) \ ((struct ghes_estatus_node *)(estatus_node) + 1)) +#define GHES_VENDOR_ENTRY_LEN(gdata_len) \ + (sizeof(struct ghes_vendor_record_entry) + (gdata_len)) +#define GHES_GDATA_FROM_VENDOR_ENTRY(vendor_entry) \ + ((struct acpi_hest_generic_data *) \ + ((struct ghes_vendor_record_entry *)(vendor_entry) + 1)) + static inline bool is_hest_type_generic_v2(struct ghes *ghes) { return ghes->generic->header.type == ACPI_HEST_TYPE_GENERIC_ERROR_V2; @@ -109,6 +114,12 @@ module_param_named(disable, ghes_disable, bool, 0); static LIST_HEAD(ghes_hed); static DEFINE_MUTEX(ghes_list_mutex); +ATOMIC_NOTIFIER_HEAD(ghes_mem_err_chain); +EXPORT_SYMBOL(ghes_mem_err_chain); + +BLOCKING_NOTIFIER_HEAD(ghes_ts_err_chain); +EXPORT_SYMBOL(ghes_ts_err_chain); + /* * Because the memory area used to transfer hardware error information * from BIOS to Linux can be determined only in NMI, IRQ or timer @@ -121,6 +132,12 @@ static DEFINE_MUTEX(ghes_list_mutex); static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi); static DEFINE_SPINLOCK(ghes_ioremap_lock_irq); +struct ghes_vendor_record_entry { + struct work_struct work; + int error_severity; + char vendor_record[]; +}; + static struct gen_pool *ghes_estatus_pool; static unsigned long ghes_estatus_pool_size_request; @@ -171,40 +188,40 @@ static int ghes_estatus_pool_init(void) return 0; } -static void ghes_estatus_pool_free_chunk_page(struct gen_pool *pool, +static void ghes_estatus_pool_free_chunk(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data) { - free_page(chunk->start_addr); + vfree((void *)chunk->start_addr); } static void ghes_estatus_pool_exit(void) { gen_pool_for_each_chunk(ghes_estatus_pool, - ghes_estatus_pool_free_chunk_page, NULL); + ghes_estatus_pool_free_chunk, NULL); gen_pool_destroy(ghes_estatus_pool); } static int ghes_estatus_pool_expand(unsigned long len) { - unsigned long i, pages, size, addr; - int ret; + unsigned long size, addr; ghes_estatus_pool_size_request += PAGE_ALIGN(len); size = gen_pool_size(ghes_estatus_pool); if (size >= ghes_estatus_pool_size_request) return 0; - pages = (ghes_estatus_pool_size_request - size) / PAGE_SIZE; - for (i = 0; i < pages; i++) { - addr = __get_free_page(GFP_KERNEL); - if (!addr) - return -ENOMEM; - ret = gen_pool_add(ghes_estatus_pool, addr, PAGE_SIZE, -1); - if (ret) - return ret; - } - return 0; + addr = (unsigned long)vmalloc(PAGE_ALIGN(len)); + if (!addr) + return -ENOMEM; + + /* + * New allocation must be visible in all pgd before it can be found by + * an NMI allocating from the pool. + */ + vmalloc_sync_mappings(); + + return gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1); } static int map_gen_v2(struct ghes *ghes) @@ -459,6 +476,63 @@ static void ghes_handle_aer(struct acpi_hest_generic_data *gdata) #endif } +static BLOCKING_NOTIFIER_HEAD(vendor_record_notify_list); + +int ghes_register_vendor_record_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&vendor_record_notify_list, nb); +} +EXPORT_SYMBOL_GPL(ghes_register_vendor_record_notifier); + +void ghes_unregister_vendor_record_notifier(struct notifier_block *nb) +{ + blocking_notifier_chain_unregister(&vendor_record_notify_list, nb); +} +EXPORT_SYMBOL_GPL(ghes_unregister_vendor_record_notifier); + +static void ghes_vendor_record_work_func(struct work_struct *work) +{ + struct ghes_vendor_record_entry *entry; + struct acpi_hest_generic_data *gdata; + u32 len; + + entry = container_of(work, struct ghes_vendor_record_entry, work); + gdata = GHES_GDATA_FROM_VENDOR_ENTRY(entry); + + blocking_notifier_call_chain(&vendor_record_notify_list, + entry->error_severity, gdata); + + len = GHES_VENDOR_ENTRY_LEN(acpi_hest_get_record_size(gdata)); + gen_pool_free(ghes_estatus_pool, (unsigned long)entry, len); +} + +static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata, + int sev) +{ + struct acpi_hest_generic_data *copied_gdata; + struct ghes_vendor_record_entry *entry; + u32 len; + + len = GHES_VENDOR_ENTRY_LEN(acpi_hest_get_record_size(gdata)); + entry = (void *)gen_pool_alloc(ghes_estatus_pool, len); + if (!entry) + return; + + copied_gdata = GHES_GDATA_FROM_VENDOR_ENTRY(entry); + memcpy(copied_gdata, gdata, acpi_hest_get_record_size(gdata)); + entry->error_severity = sev; + + INIT_WORK(&entry->work, ghes_vendor_record_work_func); + schedule_work(&entry->work); +} + + +void __weak ghes_arm_process_error(struct ghes *ghes, + struct cper_sec_proc_arm *err, int sec_sev) +{ + log_arm_hw_error(err, sec_sev); +} + static void ghes_do_proc(struct ghes *ghes, const struct acpi_hest_generic_status *estatus) { @@ -480,6 +554,13 @@ static void ghes_do_proc(struct ghes *ghes, if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) { struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata); + struct ghes_mem_err mem; + + mem.notify_type = ghes->generic->notify.type; + mem.severity = gdata->error_severity; + mem.mem_err = mem_err; + + atomic_notifier_call_chain(&ghes_mem_err_chain, 0, &mem); ghes_edac_report_mem_error(sev, mem_err); @@ -492,7 +573,10 @@ static void ghes_do_proc(struct ghes *ghes, else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) { struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata); - log_arm_hw_error(err); + ghes_arm_process_error(ghes, err, sec_sev); + } else if (guid_equal(sec_type, &CPER_SEC_TS_CORE)) { + blocking_notifier_call_chain(&ghes_ts_err_chain, + 0, acpi_hest_get_payload(gdata)); } else { void *err = acpi_hest_get_payload(gdata); @@ -500,6 +584,9 @@ static void ghes_do_proc(struct ghes *ghes, sec_sev, err, gdata->error_data_length); } + + /* Customization deliver all types error to driver. */ + ghes_defer_non_standard_event(gdata, sev); } } @@ -691,6 +778,8 @@ static void __ghes_panic(struct ghes *ghes) { __ghes_print_estatus(KERN_EMERG, ghes->generic, ghes->estatus); + ghes_clear_estatus(ghes); + /* reboot to log the error! */ if (!panic_timeout) panic_timeout = ghes_panic_timeout; @@ -947,7 +1036,6 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs) sev = ghes_severity(ghes->estatus->error_severity); if (sev >= GHES_SEV_PANIC) { - oops_begin(); ghes_print_queued_estatus(); __ghes_panic(ghes); } diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c index b1e9f81ebeea25b58ba6c0f9820ad38b4bc5b9e0..d536e4d132eb4f042ad302299fa712bb7c21d1fb 100644 --- a/drivers/acpi/apei/hest.c +++ b/drivers/acpi/apei/hest.c @@ -215,7 +215,7 @@ static int __init hest_ghes_dev_register(unsigned int ghes_count) static int __init setup_hest_disable(char *str) { hest_disable = HEST_DISABLED; - return 0; + return 1; } __setup("hest_disable", setup_hest_disable); diff --git a/drivers/acpi/arm64/Kconfig b/drivers/acpi/arm64/Kconfig index 5a6f80fce0d6c6cc56bd74ab018981d01e498009..4ef04f0ea94c66811433763e6c306b96cad2c0dd 100644 --- a/drivers/acpi/arm64/Kconfig +++ b/drivers/acpi/arm64/Kconfig @@ -7,3 +7,6 @@ config ACPI_IORT config ACPI_GTDT bool + +config ACPI_MPAM + bool diff --git a/drivers/acpi/arm64/Makefile b/drivers/acpi/arm64/Makefile index 1017def2ea12edc0d8a3c87f2ba7f0f40e577f0e..81408ce4050650097346f6dcfd88ade7978bdc59 100644 --- a/drivers/acpi/arm64/Makefile +++ b/drivers/acpi/arm64/Makefile @@ -1,2 +1,3 @@ obj-$(CONFIG_ACPI_IORT) += iort.o obj-$(CONFIG_ACPI_GTDT) += gtdt.o +obj-$(CONFIG_ACPI_MPAM) += mpam.o diff --git a/drivers/acpi/arm64/gtdt.c b/drivers/acpi/arm64/gtdt.c index 92f9edf9d11ed617bfeea402487d3fdcd779803d..7a181a8a9bf044fdd15fdd289df4c93db0b08c33 100644 --- a/drivers/acpi/arm64/gtdt.c +++ b/drivers/acpi/arm64/gtdt.c @@ -39,7 +39,7 @@ struct acpi_gtdt_descriptor { static struct acpi_gtdt_descriptor acpi_gtdt_desc __initdata; -static inline void *next_platform_timer(void *platform_timer) +static inline __init void *next_platform_timer(void *platform_timer) { struct acpi_gtdt_header *gh = platform_timer; @@ -332,7 +332,7 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd, int index) { struct platform_device *pdev; - int irq = map_gt_gsi(wd->timer_interrupt, wd->timer_flags); + int irq; /* * According to SBSA specification the size of refresh and control @@ -341,7 +341,7 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd, struct resource res[] = { DEFINE_RES_MEM(wd->control_frame_address, SZ_4K), DEFINE_RES_MEM(wd->refresh_frame_address, SZ_4K), - DEFINE_RES_IRQ(irq), + {}, }; int nr_res = ARRAY_SIZE(res); @@ -351,10 +351,11 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd, if (!(wd->refresh_frame_address && wd->control_frame_address)) { pr_err(FW_BUG "failed to get the Watchdog base address.\n"); - acpi_unregister_gsi(wd->timer_interrupt); return -EINVAL; } + irq = map_gt_gsi(wd->timer_interrupt, wd->timer_flags); + res[2] = (struct resource)DEFINE_RES_IRQ(irq); if (irq <= 0) { pr_warn("failed to map the Watchdog interrupt.\n"); nr_res--; @@ -367,7 +368,8 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd, */ pdev = platform_device_register_simple("sbsa-gwdt", index, res, nr_res); if (IS_ERR(pdev)) { - acpi_unregister_gsi(wd->timer_interrupt); + if (irq > 0) + acpi_unregister_gsi(wd->timer_interrupt); return PTR_ERR(pdev); } diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 08f26db2da7e13d0a6d8d69903f692e2cccb3f68..9ad4ee8884dedd881a57b2fa7e60b7fa1a862292 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -25,6 +25,7 @@ #include #include #include +#include #define IORT_TYPE_MASK(type) (1 << (type)) #define IORT_MSI_TYPE (1 << ACPI_IORT_NODE_ITS_GROUP) @@ -306,6 +307,59 @@ static acpi_status iort_match_node_callback(struct acpi_iort_node *node, return status; } +struct iort_workaround_oem_info { + char oem_id[ACPI_OEM_ID_SIZE + 1]; + char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; + u32 oem_revision; +}; + +static bool apply_id_count_workaround; + +static struct iort_workaround_oem_info wa_info[] __initdata = { + { + .oem_id = "HISI ", + .oem_table_id = "HIP07 ", + .oem_revision = 0, + }, { + .oem_id = "HISI ", + .oem_table_id = "HIP08 ", + .oem_revision = 0, + } +}; + +static void __init +iort_check_id_count_workaround(struct acpi_table_header *tbl) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(wa_info); i++) { + if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) && + !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && + wa_info[i].oem_revision == tbl->oem_revision) { + apply_id_count_workaround = true; + pr_warn(FW_BUG "ID count for ID mapping entry is wrong, applying workaround\n"); + break; + } + } +} + +static inline u32 iort_get_map_max(struct acpi_iort_id_mapping *map) +{ + u32 map_max = map->input_base + map->id_count; + + /* + * The IORT specification revision D (Section 3, table 4, page 9) says + * Number of IDs = The number of IDs in the range minus one, but the + * IORT code ignored the "minus one", and some firmware did that too, + * so apply a workaround here to keep compatible with both the spec + * compliant and non-spec compliant firmwares. + */ + if (apply_id_count_workaround) + map_max--; + + return map_max; +} + static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in, u32 *rid_out) { @@ -322,8 +376,7 @@ static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in, return -ENXIO; } - if (rid_in < map->input_base || - (rid_in >= map->input_base + map->id_count)) + if (rid_in < map->input_base || rid_in > iort_get_map_max(map)) return -ENXIO; *rid_out = map->output_base + (rid_in - map->input_base); @@ -356,7 +409,8 @@ static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node, if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) { if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT || node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX || - node->type == ACPI_IORT_NODE_SMMU_V3) { + node->type == ACPI_IORT_NODE_SMMU_V3 || + node->type == ACPI_IORT_NODE_PMCG) { *id_out = map->output_base; return parent; } @@ -368,6 +422,7 @@ static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node, static int iort_get_id_mapping_index(struct acpi_iort_node *node) { struct acpi_iort_smmu_v3 *smmu; + struct acpi_iort_pmcg *pmcg; switch (node->type) { case ACPI_IORT_NODE_SMMU_V3: @@ -394,6 +449,12 @@ static int iort_get_id_mapping_index(struct acpi_iort_node *node) } return smmu->id_mapping_index; + case ACPI_IORT_NODE_PMCG: + pmcg = (struct acpi_iort_pmcg *)node->node_data; + if (pmcg->overflow_gsiv || node->mapping_count == 0) + return -EINVAL; + + return 0; default: return -EINVAL; } @@ -616,8 +677,8 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id, /* Move to ITS specific data */ its = (struct acpi_iort_its_group *)node->node_data; - if (idx > its->its_count) { - dev_err(dev, "requested ITS ID index [%d] is greater than available [%d]\n", + if (idx >= its->its_count) { + dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n", idx, its->its_count); return -ENXIO; } @@ -700,7 +761,7 @@ static void iort_set_device_domain(struct device *dev, */ static struct irq_domain *iort_get_platform_device_domain(struct device *dev) { - struct acpi_iort_node *node, *msi_parent; + struct acpi_iort_node *node, *msi_parent = NULL; struct fwnode_handle *iort_fwnode; struct acpi_iort_its_group *its; int i; @@ -941,6 +1002,11 @@ static int nc_dma_get_range(struct device *dev, u64 *size) ncomp = (struct acpi_iort_named_component *)node->node_data; + if (!ncomp->memory_address_limit) { + pr_warn(FW_BUG "Named component missing memory address limit\n"); + return -EINVAL; + } + *size = ncomp->memory_address_limit >= 64 ? U64_MAX : 1ULL<memory_address_limit; @@ -951,14 +1017,20 @@ static int rc_dma_get_range(struct device *dev, u64 *size) { struct acpi_iort_node *node; struct acpi_iort_root_complex *rc; + struct pci_bus *pbus = to_pci_dev(dev)->bus; node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, - iort_match_node_callback, dev); + iort_match_node_callback, &pbus->dev); if (!node || node->revision < 1) return -ENODEV; rc = (struct acpi_iort_root_complex *)node->node_data; + if (!rc->memory_address_limit) { + pr_warn(FW_BUG "Root complex missing memory address limit\n"); + return -EINVAL; + } + *size = rc->memory_address_limit >= 64 ? U64_MAX : 1ULL<memory_address_limit; @@ -1015,8 +1087,8 @@ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size) * retrieved from firmware. */ dev->bus_dma_mask = mask; - dev->coherent_dma_mask = mask; - *dev->dma_mask = mask; + dev->coherent_dma_mask = min(dev->coherent_dma_mask, mask); + *dev->dma_mask = min(*dev->dma_mask, mask); } *dma_addr = dmaaddr; @@ -1026,6 +1098,16 @@ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size) dev_dbg(dev, "dma_pfn_offset(%#08llx)\n", offset); } +#ifdef CONFIG_IOMMU_SVA +static bool iort_pci_rc_supports_ats(struct acpi_iort_node *node) +{ + struct acpi_iort_root_complex *pci_rc; + + pci_rc = (struct acpi_iort_root_complex *)node->node_data; + return pci_rc->ats_attribute & ACPI_IORT_ATS_SUPPORTED; +} +#endif + /** * iort_iommu_configure - Set-up IOMMU configuration for a device. * @@ -1061,6 +1143,10 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev) info.node = node; err = pci_for_each_dma_alias(to_pci_dev(dev), iort_pci_iommu_init, &info); +#ifdef CONFIG_IOMMU_SVA + if (!err && !iort_pci_rc_supports_ats(node)) + dev->iommu_fwspec->flags |= IOMMU_FWSPEC_PCI_NO_ATS; +#endif } else { int i = 0; @@ -1070,12 +1156,27 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev) return NULL; do { +#ifdef CONFIG_IOMMU_SVA + u32 sid; +#endif parent = iort_node_map_platform_id(node, &streamid, IORT_IOMMU_TYPE, i++); if (parent) err = iort_iommu_xlate(dev, parent, streamid); + +#ifdef CONFIG_IOMMU_SVA + if (!acpi_dev_prop_read_single(ACPI_COMPANION(dev), + "streamid", DEV_PROP_U32, &sid)) { + err = iommu_fwspec_add_ids(dev, &sid, 1); + if (err) + dev_info(dev, "failed to add ids\n"); + dev->iommu_fwspec->can_stall = true; + dev->iommu_fwspec->num_pasid_bits = 0x10; + } +#endif + } while (parent && !err); } @@ -1216,32 +1317,47 @@ static void __init arm_smmu_v3_init_resources(struct resource *res, } } -static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node) +static void __init arm_smmu_v3_dma_configure(struct device *dev, + struct acpi_iort_node *node) { struct acpi_iort_smmu_v3 *smmu; + enum dev_dma_attr attr; /* Retrieve SMMUv3 specific data */ smmu = (struct acpi_iort_smmu_v3 *)node->node_data; - return smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE; + attr = (smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) ? + DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT; + + /* We expect the dma masks to be equivalent for all SMMUv3 set-ups */ + dev->dma_mask = &dev->coherent_dma_mask; + + /* Configure DMA for the page table walker */ + acpi_dma_configure(dev, attr); } #if defined(CONFIG_ACPI_NUMA) /* * set numa proximity domain for smmuv3 device */ -static void __init arm_smmu_v3_set_proximity(struct device *dev, +static int __init arm_smmu_v3_set_proximity(struct device *dev, struct acpi_iort_node *node) { struct acpi_iort_smmu_v3 *smmu; smmu = (struct acpi_iort_smmu_v3 *)node->node_data; if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) { - set_dev_node(dev, acpi_map_pxm_to_node(smmu->pxm)); + int dev_node = acpi_map_pxm_to_node(smmu->pxm); + + if (dev_node != NUMA_NO_NODE && !node_online(dev_node)) + return -EINVAL; + + set_dev_node(dev, dev_node); pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n", smmu->base_address, smmu->pxm); } + return 0; } #else #define arm_smmu_v3_set_proximity NULL @@ -1299,30 +1415,104 @@ static void __init arm_smmu_init_resources(struct resource *res, } } -static bool __init arm_smmu_is_coherent(struct acpi_iort_node *node) +static void __init arm_smmu_dma_configure(struct device *dev, + struct acpi_iort_node *node) { struct acpi_iort_smmu *smmu; + enum dev_dma_attr attr; /* Retrieve SMMU specific data */ smmu = (struct acpi_iort_smmu *)node->node_data; - return smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK; + attr = (smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK) ? + DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT; + + /* We expect the dma masks to be equivalent for SMMU set-ups */ + dev->dma_mask = &dev->coherent_dma_mask; + + /* Configure DMA for the page table walker */ + acpi_dma_configure(dev, attr); +} + +static int __init arm_smmu_v3_pmcg_count_resources(struct acpi_iort_node *node) +{ + struct acpi_iort_pmcg *pmcg; + + /* Retrieve PMCG specific data */ + pmcg = (struct acpi_iort_pmcg *)node->node_data; + + /* + * There are always 2 memory resources. + * If the overflow_gsiv is present then add that for a total of 3. + */ + return pmcg->overflow_gsiv ? 3 : 2; +} + +static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res, + struct acpi_iort_node *node) +{ + struct acpi_iort_pmcg *pmcg; + + /* Retrieve PMCG specific data */ + pmcg = (struct acpi_iort_pmcg *)node->node_data; + + res[0].start = pmcg->page0_base_address; + res[0].end = pmcg->page0_base_address + SZ_4K - 1; + res[0].flags = IORESOURCE_MEM; + /* + * The initial version in DEN0049C lacked a way to describe register + * page 1, which makes it broken for most PMCG implementations; in + * that case, just let the driver fail gracefully if it expects to + * find a second memory resource. + */ + if (node->revision > 0) { + res[1].start = pmcg->page1_base_address; + res[1].end = pmcg->page1_base_address + SZ_4K - 1; + res[1].flags = IORESOURCE_MEM; + } + + if (pmcg->overflow_gsiv) + acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow", + ACPI_EDGE_SENSITIVE, &res[2]); +} + +static struct acpi_platform_list pmcg_plat_info[] __initdata = { + /* HiSilicon Hip08 Platform */ + {"HISI ", "HIP08 ", 0, ACPI_SIG_IORT, greater_than_or_equal, 0, + IORT_SMMU_V3_PMCG_HISI_HIP08}, + { } +}; + +static int __init arm_smmu_v3_pmcg_add_platdata(struct platform_device *pdev) +{ + u32 model; + int idx; + + idx = acpi_match_platform_list(pmcg_plat_info); + if (idx >= 0) + model = pmcg_plat_info[idx].data; + else + model = IORT_SMMU_V3_PMCG_GENERIC; + + return platform_device_add_data(pdev, &model, sizeof(model)); } struct iort_dev_config { const char *name; int (*dev_init)(struct acpi_iort_node *node); - bool (*dev_is_coherent)(struct acpi_iort_node *node); + void (*dev_dma_configure)(struct device *dev, + struct acpi_iort_node *node); int (*dev_count_resources)(struct acpi_iort_node *node); void (*dev_init_resources)(struct resource *res, struct acpi_iort_node *node); - void (*dev_set_proximity)(struct device *dev, + int (*dev_set_proximity)(struct device *dev, struct acpi_iort_node *node); + int (*dev_add_platdata)(struct platform_device *pdev); }; static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = { .name = "arm-smmu-v3", - .dev_is_coherent = arm_smmu_v3_is_coherent, + .dev_dma_configure = arm_smmu_v3_dma_configure, .dev_count_resources = arm_smmu_v3_count_resources, .dev_init_resources = arm_smmu_v3_init_resources, .dev_set_proximity = arm_smmu_v3_set_proximity, @@ -1330,9 +1520,16 @@ static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = { static const struct iort_dev_config iort_arm_smmu_cfg __initconst = { .name = "arm-smmu", - .dev_is_coherent = arm_smmu_is_coherent, + .dev_dma_configure = arm_smmu_dma_configure, .dev_count_resources = arm_smmu_count_resources, - .dev_init_resources = arm_smmu_init_resources + .dev_init_resources = arm_smmu_init_resources, +}; + +static const struct iort_dev_config iort_arm_smmu_v3_pmcg_cfg __initconst = { + .name = "arm-smmu-v3-pmcg", + .dev_count_resources = arm_smmu_v3_pmcg_count_resources, + .dev_init_resources = arm_smmu_v3_pmcg_init_resources, + .dev_add_platdata = arm_smmu_v3_pmcg_add_platdata, }; static __init const struct iort_dev_config *iort_get_dev_cfg( @@ -1343,6 +1540,8 @@ static __init const struct iort_dev_config *iort_get_dev_cfg( return &iort_arm_smmu_v3_cfg; case ACPI_IORT_NODE_SMMU: return &iort_arm_smmu_cfg; + case ACPI_IORT_NODE_PMCG: + return &iort_arm_smmu_v3_pmcg_cfg; default: return NULL; } @@ -1360,15 +1559,17 @@ static int __init iort_add_platform_device(struct acpi_iort_node *node, struct fwnode_handle *fwnode; struct platform_device *pdev; struct resource *r; - enum dev_dma_attr attr; int ret, count; pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO); if (!pdev) return -ENOMEM; - if (ops->dev_set_proximity) - ops->dev_set_proximity(&pdev->dev, node); + if (ops->dev_set_proximity) { + ret = ops->dev_set_proximity(&pdev->dev, node); + if (ret) + goto dev_put; + } count = ops->dev_count_resources(node); @@ -1391,19 +1592,19 @@ static int __init iort_add_platform_device(struct acpi_iort_node *node, goto dev_put; /* - * Add a copy of IORT node pointer to platform_data to - * be used to retrieve IORT data information. + * Platform devices based on PMCG nodes uses platform_data to + * pass the hardware model info to the driver. For others, add + * a copy of IORT node pointer to platform_data to be used to + * retrieve IORT data information. */ - ret = platform_device_add_data(pdev, &node, sizeof(node)); + if (ops->dev_add_platdata) + ret = ops->dev_add_platdata(pdev); + else + ret = platform_device_add_data(pdev, &node, sizeof(node)); + if (ret) goto dev_put; - /* - * We expect the dma masks to be equivalent for - * all SMMUs set-ups - */ - pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; - fwnode = iort_get_fwnode(node); if (!fwnode) { @@ -1413,11 +1614,8 @@ static int __init iort_add_platform_device(struct acpi_iort_node *node, pdev->dev.fwnode = fwnode; - attr = ops->dev_is_coherent && ops->dev_is_coherent(node) ? - DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT; - - /* Configure DMA for the page table walker */ - acpi_dma_configure(&pdev->dev, attr); + if (ops->dev_dma_configure) + ops->dev_dma_configure(&pdev->dev, node); iort_set_device_domain(&pdev->dev, node); @@ -1517,6 +1715,26 @@ static void __init iort_init_platform_devices(void) } } +/* + * This function detects the ascend platform by oem table id. + */ +static bool ascend_platform_detected(struct acpi_table_header *h) +{ + if (!memcmp(h->oem_table_id, "HI19801P", ACPI_OEM_TABLE_ID_SIZE)) + return true; + + if (!memcmp(h->oem_table_id, "HI19802P", ACPI_OEM_TABLE_ID_SIZE)) + return true; + + if (!memcmp(h->oem_table_id, "HI19804P", ACPI_OEM_TABLE_ID_SIZE)) + return true; + + if (!memcmp(h->oem_table_id, "HI1980\0\0", ACPI_OEM_TABLE_ID_SIZE)) + return true; + + return false; +} + void __init acpi_iort_init(void) { acpi_status status; @@ -1532,5 +1750,9 @@ void __init acpi_iort_init(void) return; } + if (ascend_platform_detected(iort_table)) + ascend_enable_all_features(); + + iort_check_id_count_workaround(iort_table); iort_init_platform_devices(); } diff --git a/drivers/acpi/arm64/mpam.c b/drivers/acpi/arm64/mpam.c new file mode 100644 index 0000000000000000000000000000000000000000..6f4572193eb22bdd980af3f6834b81bcf3623756 --- /dev/null +++ b/drivers/acpi/arm64/mpam.c @@ -0,0 +1,242 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Common code for ARM v8 MPAM ACPI + * + * Copyright (C) 2019-2020 Huawei Technologies Co., Ltd + * + * Author: Wang ShaoBo + * + * Code was partially borrowed from http://www.linux-arm.org/git?p= + * linux-jm.git;a=commit;h=10fe7d6363ae96b25f584d4a91f9d0f2fd5faf3b. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +/* Parse the MPAM ACPI table feeding the discovered nodes into the driver */ +#define pr_fmt(fmt) "ACPI MPAM: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * acpi_mpam_label_cache_component_id() - Recursivly find @min_physid + * for all leaf CPUs below @cpu_node, use numa node id of @min_cpu_node + * to label mpam cache node, which be signed by @component_id. + * @table_hdr: Pointer to the head of the PPTT table + * @cpu_node: The point in the toplogy to start the walk + * @component_id: The id labels the structure mpam_node cache + */ +static int +acpi_mpam_label_cache_component_id(struct acpi_table_header *table_hdr, + struct acpi_pptt_processor *cpu_node, + u32 *component_id) +{ + phys_cpuid_t min_physid = PHYS_CPUID_INVALID; + struct acpi_pptt_processor *min_cpu_node = NULL; + u32 logical_cpuid; + u32 acpi_processor_id; + + acpi_pptt_find_min_physid_cpu_node(table_hdr, + cpu_node, + &min_physid, + &min_cpu_node); + WARN_ON_ONCE(invalid_phys_cpuid(min_physid)); + if (min_cpu_node == NULL) + return -EINVAL; + + acpi_processor_id = min_cpu_node->acpi_processor_id; + logical_cpuid = acpi_map_cpuid(min_physid, acpi_processor_id); + if (invalid_logical_cpuid(logical_cpuid) || + !cpu_present(logical_cpuid)) { + pr_err_once("Invalid logical cpuid.\n"); + return -EINVAL; + } + + *component_id = cpu_to_node(logical_cpuid); + + return 0; +} + +static int __init acpi_mpam_parse_memory(struct acpi_mpam_header *h) +{ + u32 component_id; + struct mpam_device *dev; + struct acpi_mpam_node_memory *node = (struct acpi_mpam_node_memory *)h; + + component_id = acpi_map_pxm_to_node(node->proximity_domain); + if (component_id == NUMA_NO_NODE) + component_id = 0; + + dev = mpam_device_create_memory(component_id, node->header.base_address); + if (IS_ERR(dev)) { + pr_err("Failed to create memory node\n"); + return -EINVAL; + } + + return mpam_register_device_irq(dev, + node->header.overflow_interrupt, node->header.overflow_flags, + node->header.error_interrupt, node->header.error_interrupt_flags); +} + +static int __init acpi_mpam_parse_cache(struct acpi_mpam_header *h, + struct acpi_table_header *pptt) +{ + int ret = 0; + int level; + u32 component_id; + struct mpam_device *dev; + struct cacheinfo *ci; + struct acpi_pptt_cache *pptt_cache; + struct acpi_pptt_processor *pptt_cpu_node; + struct acpi_mpam_node_cache *node = (struct acpi_mpam_node_cache *)h; + + if (!pptt) { + pr_err("No PPTT table found, MPAM cannot be configured\n"); + return -EINVAL; + } + + pptt_cache = acpi_pptt_validate_cache_node(pptt, node->PPTT_ref); + if (!pptt_cache) { + pr_err("Broken PPTT reference in the MPAM table\n"); + return -EINVAL; + } + + /* + * We actually need a cpu_node, as a pointer to the PPTT cache + * description isn't unique. + */ + pptt_cpu_node = acpi_pptt_find_cache_backwards(pptt, pptt_cache); + + ret = acpi_mpam_label_cache_component_id(pptt, pptt_cpu_node, + &component_id); + + if (ret) { + pr_err("Failed to label cache component id\n"); + return -EINVAL; + } + + cpus_read_lock(); + ci = cacheinfo_shared_cpu_map_search(pptt_cpu_node); + if (!ci) { + pr_err_once("No CPU has cache with PPTT reference 0x%x", + node->PPTT_ref); + pr_err_once("All CPUs must be online to probe mpam.\n"); + cpus_read_unlock(); + return -ENODEV; + } + + level = ci->level; + ci = NULL; + cpus_read_unlock(); + + /* + * Possible we can get cpu-affinity in next MPAM ACPI version, + * now we have to set it to NULL and use default possible_aff- + * inity. + */ + dev = mpam_device_create_cache(level, component_id, NULL, + node->header.base_address); + if (IS_ERR(dev)) { + pr_err("Failed to create cache node\n"); + return -EINVAL; + } + + return mpam_register_device_irq(dev, + node->header.overflow_interrupt, node->header.overflow_flags, + node->header.error_interrupt, node->header.error_interrupt_flags); +} + +static int __init acpi_mpam_parse_table(struct acpi_table_header *table, + struct acpi_table_header *pptt) +{ + char *table_offset = (char *)(table + 1); + char *table_end = (char *)table + table->length; + struct acpi_mpam_header *node_hdr; + int ret = 0; + + ret = mpam_discovery_start(); + + if (ret) + return ret; + + node_hdr = (struct acpi_mpam_header *)table_offset; + while (table_offset < table_end) { + switch (node_hdr->type) { + + case ACPI_MPAM_TYPE_CACHE: + ret = acpi_mpam_parse_cache(node_hdr, pptt); + break; + case ACPI_MPAM_TYPE_MEMORY: + ret = acpi_mpam_parse_memory(node_hdr); + break; + default: + pr_warn_once("Unknown node type %u offset %ld.", + node_hdr->type, + (table_offset-(char *)table)); + /* fall through */ + case ACPI_MPAM_TYPE_SMMU: + /* not yet supported */ + /* fall through */ + case ACPI_MPAM_TYPE_UNKNOWN: + break; + } + if (ret) + break; + + table_offset += node_hdr->length; + node_hdr = (struct acpi_mpam_header *)table_offset; + } + + if (ret) { + pr_err("discovery failed: %d\n", ret); + mpam_discovery_failed(); + } else { + ret = mpam_discovery_complete(); + if (!ret) + pr_info("Successfully init mpam by ACPI.\n"); + } + + return ret; +} + +int __init acpi_mpam_parse(void) +{ + struct acpi_table_header *mpam, *pptt; + acpi_status status; + int ret; + + if (!cpus_have_const_cap(ARM64_HAS_MPAM)) + return 0; + + if (acpi_disabled || mpam_enabled != MPAM_ENABLE_ACPI) + return 0; + + status = acpi_get_table(ACPI_SIG_MPAM, 0, &mpam); + if (ACPI_FAILURE(status)) + return -ENOENT; + + /* PPTT is optional, there may be no mpam cache controls */ + acpi_get_table(ACPI_SIG_PPTT, 0, &pptt); + if (ACPI_FAILURE(status)) + pptt = NULL; + + ret = acpi_mpam_parse_table(mpam, pptt); + acpi_put_table(pptt); + acpi_put_table(mpam); + + return ret; +} diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index cb97b6105f5286fa6c11c8bf787a3e1b49baf52c..fc64c8fdef0b2eb312ddf79eb2a645c05ea55efe 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -448,7 +448,7 @@ static int extract_package(struct acpi_battery *battery, u8 *ptr = (u8 *)battery + offsets[i].offset; if (element->type == ACPI_TYPE_STRING || element->type == ACPI_TYPE_BUFFER) - strncpy(ptr, element->string.pointer, 32); + strscpy(ptr, element->string.pointer, 32); else if (element->type == ACPI_TYPE_INTEGER) { strncpy(ptr, (u8 *)&element->integer.value, sizeof(u64)); @@ -683,27 +683,34 @@ static LIST_HEAD(acpi_battery_list); static LIST_HEAD(battery_hook_list); static DEFINE_MUTEX(hook_mutex); -static void __battery_hook_unregister(struct acpi_battery_hook *hook, int lock) +static void battery_hook_unregister_unlocked(struct acpi_battery_hook *hook) { struct acpi_battery *battery; + /* * In order to remove a hook, we first need to * de-register all the batteries that are registered. */ - if (lock) - mutex_lock(&hook_mutex); list_for_each_entry(battery, &acpi_battery_list, list) { hook->remove_battery(battery->bat); } - list_del(&hook->list); - if (lock) - mutex_unlock(&hook_mutex); + list_del_init(&hook->list); + pr_info("extension unregistered: %s\n", hook->name); } void battery_hook_unregister(struct acpi_battery_hook *hook) { - __battery_hook_unregister(hook, 1); + mutex_lock(&hook_mutex); + /* + * Ignore already unregistered battery hooks. This might happen + * if a battery hook was previously unloaded due to an error when + * adding a new battery. + */ + if (!list_empty(&hook->list)) + battery_hook_unregister_unlocked(hook); + + mutex_unlock(&hook_mutex); } EXPORT_SYMBOL_GPL(battery_hook_unregister); @@ -712,7 +719,6 @@ void battery_hook_register(struct acpi_battery_hook *hook) struct acpi_battery *battery; mutex_lock(&hook_mutex); - INIT_LIST_HEAD(&hook->list); list_add(&hook->list, &battery_hook_list); /* * Now that the driver is registered, we need @@ -729,7 +735,7 @@ void battery_hook_register(struct acpi_battery_hook *hook) * hooks. */ pr_err("extension failed to load: %s", hook->name); - __battery_hook_unregister(hook, 0); + battery_hook_unregister_unlocked(hook); goto end; } } @@ -766,7 +772,7 @@ static void battery_hook_add_battery(struct acpi_battery *battery) */ pr_err("error in extension, unloading: %s", hook_node->name); - __battery_hook_unregister(hook_node, 0); + battery_hook_unregister_unlocked(hook_node); } } mutex_unlock(&hook_mutex); @@ -799,7 +805,7 @@ static void __exit battery_hook_exit(void) * need to remove the hooks. */ list_for_each_entry_safe(hook, ptr, &battery_hook_list, list) { - __battery_hook_unregister(hook, 1); + battery_hook_unregister(hook); } mutex_destroy(&hook_mutex); } diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c index 995c4d8922b12eef963a9cc1cab591ee7b404b1d..761f0c19a451266856838ad221afae623f7f6610 100644 --- a/drivers/acpi/blacklist.c +++ b/drivers/acpi/blacklist.c @@ -30,7 +30,9 @@ #include "internal.h" +#ifdef CONFIG_DMI static const struct dmi_system_id acpi_rev_dmi_table[] __initconst; +#endif /* * POLICY: If *anything* doesn't work, put it on the blacklist. @@ -74,7 +76,9 @@ int __init acpi_blacklisted(void) } (void)early_acpi_osi_init(); +#ifdef CONFIG_DMI dmi_check_system(acpi_rev_dmi_table); +#endif return blacklisted; } diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index d2e29a19890d14db1a9e6fd85f207d558c6a3c6e..d60e57d14c85935300324d44d24617b465d68e39 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -166,7 +166,7 @@ int acpi_bus_get_private_data(acpi_handle handle, void **data) { acpi_status status; - if (!*data) + if (!data) return -EINVAL; status = acpi_get_data(handle, acpi_bus_private_data_handler, data); @@ -1054,16 +1054,6 @@ void __init acpi_early_init(void) goto error0; } - if (!acpi_gbl_execute_tables_as_methods && - acpi_gbl_group_module_level_code) { - status = acpi_load_tables(); - if (ACPI_FAILURE(status)) { - printk(KERN_ERR PREFIX - "Unable to load the System Description Tables\n"); - goto error0; - } - } - #ifdef CONFIG_X86 if (!acpi_ioapic) { /* compatible (0) means level (3) */ @@ -1133,26 +1123,24 @@ static int __init acpi_bus_init(void) acpi_os_initialize1(); + status = acpi_load_tables(); + if (ACPI_FAILURE(status)) { + printk(KERN_ERR PREFIX + "Unable to load the System Description Tables\n"); + goto error1; + } + /* - * ACPI 2.0 requires the EC driver to be loaded and work before - * the EC device is found in the namespace (i.e. before - * acpi_load_tables() is called). + * ACPI 2.0 requires the EC driver to be loaded and work before the EC + * device is found in the namespace. + * + * This is accomplished by looking for the ECDT table and getting the EC + * parameters out of that. * - * This is accomplished by looking for the ECDT table, and getting - * the EC parameters out of that. + * Do that before calling acpi_initialize_objects() which may trigger EC + * address space accesses. */ - status = acpi_ec_ecdt_probe(); - /* Ignore result. Not having an ECDT is not fatal. */ - - if (acpi_gbl_execute_tables_as_methods || - !acpi_gbl_group_module_level_code) { - status = acpi_load_tables(); - if (ACPI_FAILURE(status)) { - printk(KERN_ERR PREFIX - "Unable to load the System Description Tables\n"); - goto error1; - } - } + acpi_ec_ecdt_probe(); status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE); if (ACPI_FAILURE(status)) { diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index a19ff3977ac4ae46deac7685ef017bf482c21330..a25d77b3a16adb5720c6dd7848b596380c03113a 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c @@ -91,6 +91,17 @@ static const struct dmi_system_id lid_blacklst[] = { DMI_MATCH(DMI_BIOS_VERSION, "BYT70A.YNCHENG.WIN.007"), }, }, + { + /* + * Medion Akoya E2215T, notification of the LID device only + * happens on close, not on open and _LID always returns closed. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MEDION"), + DMI_MATCH(DMI_PRODUCT_NAME, "E2215T MD60198"), + }, + .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN, + }, {} }; @@ -456,8 +467,11 @@ static int acpi_button_resume(struct device *dev) struct acpi_button *button = acpi_driver_data(device); button->suspended = false; - if (button->type == ACPI_BUTTON_TYPE_LID && button->input->users) + if (button->type == ACPI_BUTTON_TYPE_LID && button->input->users) { + button->last_state = !!acpi_lid_evaluate_state(device); + button->last_time = ktime_get(); acpi_lid_initialize_state(device); + } return 0; } #endif diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index d9ce4b162e2ce0533039cbe2d5ef4704ecb24642..8ad8789ab16e1256572ee6b903ab112fdebf444f 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -122,23 +122,17 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr); */ #define NUM_RETRIES 500ULL -struct cppc_attr { - struct attribute attr; - ssize_t (*show)(struct kobject *kobj, - struct attribute *attr, char *buf); - ssize_t (*store)(struct kobject *kobj, - struct attribute *attr, const char *c, ssize_t count); -}; +#define OVER_16BTS_MASK ~0xFFFFULL #define define_one_cppc_ro(_name) \ -static struct cppc_attr _name = \ +static struct kobj_attribute _name = \ __ATTR(_name, 0444, show_##_name, NULL) #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj) #define show_cppc_data(access_fn, struct_name, member_name) \ static ssize_t show_##member_name(struct kobject *kobj, \ - struct attribute *attr, char *buf) \ + struct kobj_attribute *attr, char *buf) \ { \ struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); \ struct struct_name st_name = {0}; \ @@ -163,8 +157,15 @@ show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq); show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf); show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time); +/* Check for valid access_width, otherwise, fallback to using bit_width */ +#define GET_BIT_WIDTH(reg) ((reg)->access_width ? (8 << ((reg)->access_width - 1)) : (reg)->bit_width) + +/* Shift and apply the mask for CPC reads/writes */ +#define MASK_VAL(reg, val) (((val) >> (reg)->bit_offset) & \ + GENMASK(((reg)->bit_width) - 1, 0)) + static ssize_t show_feedback_ctrs(struct kobject *kobj, - struct attribute *attr, char *buf) + struct kobj_attribute *attr, char *buf) { struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); struct cppc_perf_fb_ctrs fb_ctrs = {0}; @@ -369,8 +370,10 @@ static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle) union acpi_object *psd = NULL; struct acpi_psd_package *pdomain; - status = acpi_evaluate_object_typed(handle, "_PSD", NULL, &buffer, - ACPI_TYPE_PACKAGE); + status = acpi_evaluate_object_typed(handle, "_PSD", NULL, + &buffer, ACPI_TYPE_PACKAGE); + if (status == AE_NOT_FOUND) /* _PSD is optional */ + return 0; if (ACPI_FAILURE(status)) return -ENODEV; @@ -421,7 +424,7 @@ static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle) * * Return: 0 for success or negative value for err. */ -int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data) +static int __acpi_get_psd_map(struct cppc_cpudata **all_cpu_data, struct cpc_desc **cpc_pptr) { int count_target; int retval = 0; @@ -447,7 +450,7 @@ int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data) if (cpumask_test_cpu(i, covered_cpus)) continue; - cpc_ptr = per_cpu(cpc_desc_ptr, i); + cpc_ptr = cpc_pptr[i]; if (!cpc_ptr) { retval = -EFAULT; goto err_ret; @@ -472,7 +475,7 @@ int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data) if (i == j) continue; - match_cpc_ptr = per_cpu(cpc_desc_ptr, j); + match_cpc_ptr = cpc_pptr[j]; if (!match_cpc_ptr) { retval = -EFAULT; goto err_ret; @@ -505,7 +508,7 @@ int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data) if (!match_pr) continue; - match_cpc_ptr = per_cpu(cpc_desc_ptr, j); + match_cpc_ptr = cpc_pptr[j]; if (!match_cpc_ptr) { retval = -EFAULT; goto err_ret; @@ -538,6 +541,91 @@ int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data) free_cpumask_var(covered_cpus); return retval; } + +static acpi_status acpi_parse_cpc(acpi_handle handle, u32 lvl, void *data, + void **ret_p) +{ + struct acpi_device *adev = NULL; + struct cpc_desc *cpc_ptr, **cpc_pptr; + acpi_status status = AE_OK; + const int device_declaration = 1; + unsigned long long uid; + phys_cpuid_t phys_id; + int logical_id, ret; + int *parsed_core_num = (int *)ret_p; + + if (acpi_bus_get_device(handle, &adev)) + return AE_OK; + + if (strcmp(acpi_device_hid(adev), ACPI_PROCESSOR_DEVICE_HID)) + return AE_OK; + + status = acpi_evaluate_integer(handle, METHOD_NAME__UID, NULL, &uid); + if (ACPI_FAILURE(status)) + return AE_OK; + phys_id = acpi_get_phys_id(handle, device_declaration, uid); + if (invalid_phys_cpuid(phys_id)) + return AE_OK; + logical_id = acpi_map_cpuid(phys_id, uid); + if (logical_id < 0) + return AE_OK; + + cpc_pptr = (struct cpc_desc **)data; + cpc_ptr = cpc_pptr[logical_id]; + cpc_ptr->cpu_id = logical_id; + + ret = acpi_get_psd(cpc_ptr, handle); + if (ret) + return ret; + + (*parsed_core_num)++; + + return AE_OK; +} + +int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data) +{ + struct cpc_desc **cpc_pptr, *cpc_ptr; + int parsed_core_num = 0; + int i, ret; + + cpc_pptr = kcalloc(num_possible_cpus(), sizeof(void *), GFP_KERNEL); + if (!cpc_pptr) + return -ENOMEM; + for_each_possible_cpu(i) { + cpc_pptr[i] = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL); + if (!cpc_pptr[i]) { + ret = -ENOMEM; + goto out; + } + } + + /* + * We can not use acpi_get_devices() to walk the processor devices + * because some processor device is not present. + */ + ret = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, + ACPI_UINT32_MAX, acpi_parse_cpc, NULL, + cpc_pptr, (void **)&parsed_core_num); + if (ret) + goto out; + if (parsed_core_num != num_possible_cpus()) { + ret = -EINVAL; + goto out; + } + + ret = __acpi_get_psd_map(all_cpu_data, cpc_pptr); + +out: + for_each_possible_cpu(i) { + cpc_ptr = cpc_pptr[i]; + if (cpc_ptr) + kfree(cpc_ptr); + } + kfree(cpc_pptr); + + return ret; +} EXPORT_SYMBOL_GPL(acpi_get_psd_map); static int register_pcc_channel(int pcc_ss_idx) @@ -748,6 +836,11 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) cpc_obj = &out_obj->package.elements[0]; if (cpc_obj->type == ACPI_TYPE_INTEGER) { num_ent = cpc_obj->integer.value; + if (num_ent <= 1) { + pr_debug("Unexpected _CPC NumEntries value (%d) for CPU:%d\n", + num_ent, pr->id); + goto out_free; + } } else { pr_debug("Unexpected entry type(%d) for NumEntries\n", cpc_obj->type); @@ -798,15 +891,34 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { if (gas_t->address) { void __iomem *addr; + size_t access_width; - addr = ioremap(gas_t->address, gas_t->bit_width/8); + access_width = GET_BIT_WIDTH(gas_t) / 8; + addr = ioremap(gas_t->address, access_width); if (!addr) goto out_free; cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr; } + } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { + if (gas_t->access_width < 1 || gas_t->access_width > 3) { + /* + * 1 = 8-bit, 2 = 16-bit, and 3 = 32-bit. + * SystemIO doesn't implement 64-bit + * registers. + */ + pr_debug("Invalid access width %d for SystemIO register\n", + gas_t->access_width); + goto out_free; + } + if (gas_t->address & OVER_16BTS_MASK) { + /* SystemIO registers use 16-bit integer addresses */ + pr_debug("Invalid IO port %llu for SystemIO register\n", + gas_t->address); + goto out_free; + } } else { if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) { - /* Support only PCC ,SYS MEM and FFH type regs */ + /* Support only PCC, SystemMemory, SystemIO, and FFH type regs. */ pr_debug("Unsupported register type: %d\n", gas_t->space_id); goto out_free; } @@ -867,6 +979,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) "acpi_cppc"); if (ret) { per_cpu(cpc_desc_ptr, pr->id) = NULL; + kobject_put(&cpc_ptr->kobj); goto out_free; } @@ -907,8 +1020,8 @@ void acpi_cppc_processor_exit(struct acpi_processor *pr) pcc_data[pcc_ss_id]->refcount--; if (!pcc_data[pcc_ss_id]->refcount) { pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel); - pcc_data[pcc_ss_id]->pcc_channel_acquired = 0; kfree(pcc_data[pcc_ss_id]); + pcc_data[pcc_ss_id] = NULL; } } } @@ -969,6 +1082,7 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val) { int ret_val = 0; void __iomem *vaddr = 0; + int size; int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); struct cpc_reg *reg = ®_res->cpc_entry.reg; @@ -978,78 +1092,181 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val) } *val = 0; - if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) + size = GET_BIT_WIDTH(reg); + + if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { + u32 val_u32; + acpi_status status; + + status = acpi_os_read_port((acpi_io_address)reg->address, + &val_u32, size); + if (ACPI_FAILURE(status)) { + pr_debug("Error: Failed to read SystemIO port %llx\n", + reg->address); + return -EFAULT; + } + + *val = val_u32; + return 0; + } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) { + /* + * For registers in PCC space, the register size is determined + * by the bit width field; the access size is used to indicate + * the PCC subspace id. + */ + size = reg->bit_width; vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id); + } else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) vaddr = reg_res->sys_mem_vaddr; else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) return cpc_read_ffh(cpu, reg, val); else return acpi_os_read_memory((acpi_physical_address)reg->address, - val, reg->bit_width); - - switch (reg->bit_width) { - case 8: - *val = readb_relaxed(vaddr); - break; - case 16: - *val = readw_relaxed(vaddr); - break; - case 32: - *val = readl_relaxed(vaddr); - break; - case 64: - *val = readq_relaxed(vaddr); - break; - default: + val, size); + + switch (size) { + case 8: + *val = readb_relaxed(vaddr); + break; + case 16: + *val = readw_relaxed(vaddr); + break; + case 32: + *val = readl_relaxed(vaddr); + break; + case 64: + *val = readq_relaxed(vaddr); + break; + default: + if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { + pr_debug("Error: Cannot read %u bit width from system memory: 0x%llx\n", + size, reg->address); + } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n", - reg->bit_width, pcc_ss_id); - ret_val = -EFAULT; + size, pcc_ss_id); + } + return -EFAULT; } - return ret_val; + if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) + *val = MASK_VAL(reg, *val); + + return 0; } static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) { int ret_val = 0; void __iomem *vaddr = 0; + int size; int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); struct cpc_reg *reg = ®_res->cpc_entry.reg; - if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) + size = GET_BIT_WIDTH(reg); + + if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { + acpi_status status; + + status = acpi_os_write_port((acpi_io_address)reg->address, + (u32)val, size); + if (ACPI_FAILURE(status)) { + pr_debug("Error: Failed to write SystemIO port %llx\n", + reg->address); + return -EFAULT; + } + + return 0; + } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) { + /* + * For registers in PCC space, the register size is determined + * by the bit width field; the access size is used to indicate + * the PCC subspace id. + */ + size = reg->bit_width; vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id); + } else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) vaddr = reg_res->sys_mem_vaddr; else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) return cpc_write_ffh(cpu, reg, val); else return acpi_os_write_memory((acpi_physical_address)reg->address, - val, reg->bit_width); - - switch (reg->bit_width) { - case 8: - writeb_relaxed(val, vaddr); - break; - case 16: - writew_relaxed(val, vaddr); - break; - case 32: - writel_relaxed(val, vaddr); - break; - case 64: - writeq_relaxed(val, vaddr); - break; - default: + val, size); + + if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) + val = MASK_VAL(reg, val); + + switch (size) { + case 8: + writeb_relaxed(val, vaddr); + break; + case 16: + writew_relaxed(val, vaddr); + break; + case 32: + writel_relaxed(val, vaddr); + break; + case 64: + writeq_relaxed(val, vaddr); + break; + default: + if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { + pr_debug("Error: Cannot write %u bit width to system memory: 0x%llx\n", + size, reg->address); + } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n", - reg->bit_width, pcc_ss_id); - ret_val = -EFAULT; - break; + size, pcc_ss_id); + } + ret_val = -EFAULT; + break; } return ret_val; } +/** + * cppc_get_desired_perf - Get the value of desired performance register. + * @cpunum: CPU from which to get desired performance. + * @desired_perf: address of a variable to store the returned desired performance + * + * Return: 0 for success, -EIO otherwise. + */ +int cppc_get_desired_perf(int cpunum, u64 *desired_perf) +{ + struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); + int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum); + struct cpc_register_resource *desired_reg; + struct cppc_pcc_data *pcc_ss_data = NULL; + + desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; + + if (CPC_IN_PCC(desired_reg)) { + int ret = 0; + + if (pcc_ss_id < 0) + return -EIO; + + pcc_ss_data = pcc_data[pcc_ss_id]; + + down_write(&pcc_ss_data->pcc_lock); + + if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0) + cpc_read(cpunum, desired_reg, desired_perf); + else + ret = -EIO; + + up_write(&pcc_ss_data->pcc_lock); + + return ret; + } + + cpc_read(cpunum, desired_reg, desired_perf); + + return 0; +} +EXPORT_SYMBOL_GPL(cppc_get_desired_perf); + /** * cppc_get_perf_caps - Get a CPUs performance capabilities. * @cpunum: CPU from which to get capabilities info. @@ -1061,9 +1278,9 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps) { struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); struct cpc_register_resource *highest_reg, *lowest_reg, - *lowest_non_linear_reg, *nominal_reg, + *lowest_non_linear_reg, *nominal_reg, *guaranteed_reg, *low_freq_reg = NULL, *nom_freq_reg = NULL; - u64 high, low, nom, min_nonlinear, low_f = 0, nom_f = 0; + u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0; int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum); struct cppc_pcc_data *pcc_ss_data = NULL; int ret = 0, regs_in_pcc = 0; @@ -1079,6 +1296,7 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps) nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF]; low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ]; nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ]; + guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF]; /* Are any of the regs PCC ?*/ if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) || @@ -1107,6 +1325,14 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps) cpc_read(cpunum, nominal_reg, &nom); perf_caps->nominal_perf = nom; + if (guaranteed_reg->type != ACPI_TYPE_BUFFER || + IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) { + perf_caps->guaranteed_perf = 0; + } else { + cpc_read(cpunum, guaranteed_reg, &guaranteed); + perf_caps->guaranteed_perf = guaranteed; + } + cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear); perf_caps->lowest_nonlinear_perf = min_nonlinear; diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c index e967c1173ba3280ac1cb5b11c9785a785619b320..613041870872b87f024da944df74a992e5d90c62 100644 --- a/drivers/acpi/custom_method.c +++ b/drivers/acpi/custom_method.c @@ -37,6 +37,8 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf, sizeof(struct acpi_table_header))) return -EFAULT; uncopied_bytes = max_size = table.length; + /* make sure the buf is not allocated */ + kfree(buf); buf = kzalloc(max_size, GFP_KERNEL); if (!buf) return -ENOMEM; @@ -48,8 +50,11 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf, if ((*ppos > max_size) || (*ppos + count > max_size) || (*ppos + count < count) || - (count > uncopied_bytes)) + (count > uncopied_bytes)) { + kfree(buf); + buf = NULL; return -EINVAL; + } if (copy_from_user(buf + (*ppos), user_buf, count)) { kfree(buf); diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index a7c2673ffd36e8a8287a40182fae189e6b71688e..0fdd179bb3c7dd69a9ba5ed00e6728f0446f1838 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -53,6 +53,19 @@ const char *acpi_power_state_string(int state) } } +static int acpi_dev_pm_explicit_get(struct acpi_device *device, int *state) +{ + unsigned long long psc; + acpi_status status; + + status = acpi_evaluate_integer(device->handle, "_PSC", NULL, &psc); + if (ACPI_FAILURE(status)) + return -ENODEV; + + *state = psc; + return 0; +} + /** * acpi_device_get_power - Get power state of an ACPI device. * @device: Device to get the power state of. @@ -65,6 +78,7 @@ const char *acpi_power_state_string(int state) int acpi_device_get_power(struct acpi_device *device, int *state) { int result = ACPI_STATE_UNKNOWN; + int error; if (!device || !state) return -EINVAL; @@ -81,18 +95,16 @@ int acpi_device_get_power(struct acpi_device *device, int *state) * if available. */ if (device->power.flags.power_resources) { - int error = acpi_power_get_inferred_state(device, &result); + error = acpi_power_get_inferred_state(device, &result); if (error) return error; } if (device->power.flags.explicit_get) { - acpi_handle handle = device->handle; - unsigned long long psc; - acpi_status status; + int psc; - status = acpi_evaluate_integer(handle, "_PSC", NULL, &psc); - if (ACPI_FAILURE(status)) - return -ENODEV; + error = acpi_dev_pm_explicit_get(device, &psc); + if (error) + return error; /* * The power resources settings may indicate a power state @@ -159,7 +171,8 @@ int acpi_device_set_power(struct acpi_device *device, int state) /* Make sure this is a valid target state */ - if (state == device->power.state) { + /* There is a special case for D0 addressed below. */ + if (state > ACPI_STATE_D0 && state == device->power.state) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] already in %s\n", device->pnp.bus_id, acpi_power_state_string(state))); @@ -209,18 +222,50 @@ int acpi_device_set_power(struct acpi_device *device, int state) return -ENODEV; } - result = acpi_dev_pm_explicit_set(device, state); - if (result) - goto end; + /* + * If the device goes from D3hot to D3cold, _PS3 has been + * evaluated for it already, so skip it in that case. + */ + if (device->power.state < ACPI_STATE_D3_HOT) { + result = acpi_dev_pm_explicit_set(device, state); + if (result) + goto end; + } if (device->power.flags.power_resources) result = acpi_power_transition(device, target_state); } else { + int cur_state = device->power.state; + if (device->power.flags.power_resources) { result = acpi_power_transition(device, ACPI_STATE_D0); if (result) goto end; } + + if (cur_state == ACPI_STATE_D0) { + int psc; + + /* Nothing to do here if _PSC is not present. */ + if (!device->power.flags.explicit_get) + return 0; + + /* + * The power state of the device was set to D0 last + * time, but that might have happened before a + * system-wide transition involving the platform + * firmware, so it may be necessary to evaluate _PS0 + * for the device here. However, use extra care here + * and evaluate _PSC to check the device's current power + * state, and only invoke _PS0 if the evaluation of _PSC + * is successful and it returns a power state different + * from D0. + */ + result = acpi_dev_pm_explicit_get(device, &psc); + if (result || psc == ACPI_STATE_D0) + return 0; + } + result = acpi_dev_pm_explicit_set(device, ACPI_STATE_D0); } @@ -948,8 +993,8 @@ static bool acpi_dev_needs_resume(struct device *dev, struct acpi_device *adev) u32 sys_target = acpi_target_system_state(); int ret, state; - if (!pm_runtime_suspended(dev) || !adev || - device_may_wakeup(dev) != !!adev->wakeup.prepare_count) + if (!pm_runtime_suspended(dev) || !adev || (adev->wakeup.flags.valid && + device_may_wakeup(dev) != !!adev->wakeup.prepare_count)) return true; if (sys_target == ACPI_STATE_S0) @@ -1116,13 +1161,14 @@ EXPORT_SYMBOL_GPL(acpi_subsys_resume_early); int acpi_subsys_freeze(struct device *dev) { /* - * This used to be done in acpi_subsys_prepare() for all devices and - * some drivers may depend on it, so do it here. Ideally, however, - * runtime-suspended devices should not be touched during freeze/thaw - * transitions. + * Resume all runtime-suspended devices before creating a snapshot + * image of system memory, because the restore kernel generally cannot + * be expected to always handle them consistently and they need to be + * put into the runtime-active metastate during system resume anyway, + * so it is better to ensure that the state saved in the image will be + * always consistent with that. */ - if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND)) - pm_runtime_resume(dev); + pm_runtime_resume(dev); return pm_generic_freeze(dev); } @@ -1254,9 +1300,19 @@ static void acpi_dev_pm_detach(struct device *dev, bool power_off) */ int acpi_dev_pm_attach(struct device *dev, bool power_on) { + /* + * Skip devices whose ACPI companions match the device IDs below, + * because they require special power management handling incompatible + * with the generic ACPI PM domain. + */ + static const struct acpi_device_id special_pm_ids[] = { + {"PNP0C0B", }, /* Generic ACPI fan */ + {"INT3404", }, /* Fan */ + {} + }; struct acpi_device *adev = ACPI_COMPANION(dev); - if (!adev) + if (!adev || !acpi_match_device_ids(adev, special_pm_ids)) return 0; /* diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c index 545e91420cded88ac3b2621d97b1f1176696f348..f8dfd15f5bd0e33c93d12be86399f91bb0dee513 100644 --- a/drivers/acpi/device_sysfs.c +++ b/drivers/acpi/device_sysfs.c @@ -164,8 +164,8 @@ static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias, return 0; len = snprintf(modalias, size, "acpi:"); - if (len <= 0) - return len; + if (len >= size) + return -ENOMEM; size -= len; @@ -202,11 +202,15 @@ static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias, { struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER }; const union acpi_object *of_compatible, *obj; + acpi_status status; int len, count; int i, nval; char *c; - acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf); + status = acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf); + if (ACPI_FAILURE(status)) + return -ENODEV; + /* DT strings are all in lower case */ for (c = buf.pointer; *c != '\0'; c++) *c = tolower(*c); @@ -214,8 +218,10 @@ static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias, len = snprintf(modalias, size, "of:N%sT", (char *)buf.pointer); ACPI_FREE(buf.pointer); - if (len <= 0) - return len; + if (len >= size) + return -ENOMEM; + + size -= len; of_compatible = acpi_dev->data.of_compatible; if (of_compatible->type == ACPI_TYPE_PACKAGE) { @@ -456,7 +462,7 @@ static ssize_t description_show(struct device *dev, (wchar_t *)acpi_dev->pnp.str_obj->buffer.pointer, acpi_dev->pnp.str_obj->buffer.length, UTF16_LITTLE_ENDIAN, buf, - PAGE_SIZE); + PAGE_SIZE - 1); buf[result++] = '\n'; @@ -541,8 +547,9 @@ int acpi_device_setup_files(struct acpi_device *dev) * If device has _STR, 'description' file is created */ if (acpi_has_method(dev->handle, "_STR")) { - status = acpi_evaluate_object(dev->handle, "_STR", - NULL, &buffer); + status = acpi_evaluate_object_typed(dev->handle, "_STR", + NULL, &buffer, + ACPI_TYPE_BUFFER); if (ACPI_FAILURE(status)) buffer.pointer = NULL; dev->pnp.str_obj = buffer.pointer; diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index d4e5610e09c51beb8810a3bf5f4d42ee563288c5..e9b9844c391e4e16e0e145351c5c1e180afe5540 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -194,6 +194,7 @@ static struct workqueue_struct *ec_query_wq; static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */ static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */ static int EC_FLAGS_IGNORE_DSDT_GPE; /* Needs ECDT GPE as correction setting */ +static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */ /* -------------------------------------------------------------------------- * Logging/Debugging @@ -499,6 +500,26 @@ static inline void __acpi_ec_disable_event(struct acpi_ec *ec) ec_log_drv("event blocked"); } +/* + * Process _Q events that might have accumulated in the EC. + * Run with locked ec mutex. + */ +static void acpi_ec_clear(struct acpi_ec *ec) +{ + int i, status; + u8 value = 0; + + for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) { + status = acpi_ec_query(ec, &value); + if (status || !value) + break; + } + if (unlikely(i == ACPI_EC_CLEAR_MAX)) + pr_warn("Warning: Maximum of %d stale EC events cleared\n", i); + else + pr_info("%d stale EC events cleared\n", i); +} + static void acpi_ec_enable_event(struct acpi_ec *ec) { unsigned long flags; @@ -507,6 +528,10 @@ static void acpi_ec_enable_event(struct acpi_ec *ec) if (acpi_ec_started(ec)) __acpi_ec_enable_event(ec); spin_unlock_irqrestore(&ec->lock, flags); + + /* Drain additional events if hardware requires that */ + if (EC_FLAGS_CLEAR_ON_RESUME) + acpi_ec_clear(ec); } #ifdef CONFIG_PM_SLEEP @@ -782,6 +807,9 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, unsigned long tmp; int ret = 0; + if (t->rdata) + memset(t->rdata, 0, t->rlen); + /* start transaction */ spin_lock_irqsave(&ec->lock, tmp); /* Enable GPE for command processing (IBF=0/OBF=1) */ @@ -818,8 +846,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata)) return -EINVAL; - if (t->rdata) - memset(t->rdata, 0, t->rlen); mutex_lock(&ec->mutex); if (ec->global_lock) { @@ -846,7 +872,7 @@ static int acpi_ec_burst_enable(struct acpi_ec *ec) .wdata = NULL, .rdata = &d, .wlen = 0, .rlen = 1}; - return acpi_ec_transaction(ec, &t); + return acpi_ec_transaction_unlocked(ec, &t); } static int acpi_ec_burst_disable(struct acpi_ec *ec) @@ -856,7 +882,7 @@ static int acpi_ec_burst_disable(struct acpi_ec *ec) .wlen = 0, .rlen = 0}; return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ? - acpi_ec_transaction(ec, &t) : 0; + acpi_ec_transaction_unlocked(ec, &t) : 0; } static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data) @@ -872,6 +898,19 @@ static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data) return result; } +static int acpi_ec_read_unlocked(struct acpi_ec *ec, u8 address, u8 *data) +{ + int result; + u8 d; + struct transaction t = {.command = ACPI_EC_COMMAND_READ, + .wdata = &address, .rdata = &d, + .wlen = 1, .rlen = 1}; + + result = acpi_ec_transaction_unlocked(ec, &t); + *data = d; + return result; +} + static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data) { u8 wdata[2] = { address, data }; @@ -882,6 +921,16 @@ static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data) return acpi_ec_transaction(ec, &t); } +static int acpi_ec_write_unlocked(struct acpi_ec *ec, u8 address, u8 data) +{ + u8 wdata[2] = { address, data }; + struct transaction t = {.command = ACPI_EC_COMMAND_WRITE, + .wdata = wdata, .rdata = NULL, + .wlen = 2, .rlen = 0}; + + return acpi_ec_transaction_unlocked(ec, &t); +} + int ec_read(u8 addr, u8 *val) { int err; @@ -1034,6 +1083,18 @@ void acpi_ec_unblock_transactions(void) acpi_ec_start(first_ec, true); } +void acpi_ec_mark_gpe_for_wake(void) +{ + if (first_ec && !ec_no_wakeup) + acpi_mark_gpe_for_wake(NULL, first_ec->gpe); +} + +void acpi_ec_set_gpe_wake_mask(u8 action) +{ + if (first_ec && !ec_no_wakeup) + acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action); +} + void acpi_ec_dispatch_gpe(void) { if (first_ec) @@ -1043,29 +1104,21 @@ void acpi_ec_dispatch_gpe(void) /* -------------------------------------------------------------------------- Event Management -------------------------------------------------------------------------- */ -static struct acpi_ec_query_handler * -acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler) -{ - if (handler) - kref_get(&handler->kref); - return handler; -} - static struct acpi_ec_query_handler * acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value) { struct acpi_ec_query_handler *handler; - bool found = false; mutex_lock(&ec->mutex); list_for_each_entry(handler, &ec->list, node) { if (value == handler->query_bit) { - found = true; - break; + kref_get(&handler->kref); + mutex_unlock(&ec->mutex); + return handler; } } mutex_unlock(&ec->mutex); - return found ? acpi_ec_get_query_handler(handler) : NULL; + return NULL; } static void acpi_ec_query_handler_release(struct kref *kref) @@ -1124,6 +1177,7 @@ static void acpi_ec_remove_query_handlers(struct acpi_ec *ec, void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit) { acpi_ec_remove_query_handlers(ec, false, query_bit); + flush_workqueue(ec_query_wq); } EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler); @@ -1290,6 +1344,7 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address, struct acpi_ec *ec = handler_context; int result = 0, i, bytes = bits / 8; u8 *value = (u8 *)value64; + u32 glk; if ((address > 0xFF) || !value || !handler_context) return AE_BAD_PARAMETER; @@ -1297,17 +1352,38 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address, if (function != ACPI_READ && function != ACPI_WRITE) return AE_BAD_PARAMETER; + mutex_lock(&ec->mutex); + + if (ec->global_lock) { + acpi_status status; + + status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); + if (ACPI_FAILURE(status)) { + result = -ENODEV; + goto unlock; + } + } + if (ec->busy_polling || bits > 8) acpi_ec_burst_enable(ec); - for (i = 0; i < bytes; ++i, ++address, ++value) + for (i = 0; i < bytes; ++i, ++address, ++value) { result = (function == ACPI_READ) ? - acpi_ec_read(ec, address, value) : - acpi_ec_write(ec, address, *value); + acpi_ec_read_unlocked(ec, address, value) : + acpi_ec_write_unlocked(ec, address, *value); + if (result < 0) + break; + } if (ec->busy_polling || bits > 8) acpi_ec_burst_disable(ec); + if (ec->global_lock) + acpi_release_global_lock(glk); + +unlock: + mutex_unlock(&ec->mutex); + switch (result) { case -EINVAL: return AE_BAD_PARAMETER; @@ -1315,8 +1391,10 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address, return AE_NOT_FOUND; case -ETIME: return AE_TIME; - default: + case 0: return AE_OK; + default: + return AE_ERROR; } } @@ -1808,6 +1886,31 @@ static int ec_flag_query_handshake(const struct dmi_system_id *id) } #endif +/* + * On some hardware it is necessary to clear events accumulated by the EC during + * sleep. These ECs stop reporting GPEs until they are manually polled, if too + * many events are accumulated. (e.g. Samsung Series 5/9 notebooks) + * + * https://bugzilla.kernel.org/show_bug.cgi?id=44161 + * + * Ideally, the EC should also be instructed NOT to accumulate events during + * sleep (which Windows seems to do somehow), but the interface to control this + * behaviour is not known at this time. + * + * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx, + * however it is very likely that other Samsung models are affected. + * + * On systems which don't accumulate _Q events during sleep, this extra check + * should be harmless. + */ +static int ec_clear_on_resume(const struct dmi_system_id *id) +{ + pr_debug("Detected system needing EC poll on resume.\n"); + EC_FLAGS_CLEAR_ON_RESUME = 1; + ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS; + return 0; +} + /* * Some ECDTs contain wrong register addresses. * MSI MS-171F @@ -1857,6 +1960,9 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = { ec_honor_ecdt_gpe, "ASUS X580VD", { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_MATCH(DMI_PRODUCT_NAME, "X580VD"),}, NULL}, + { + ec_clear_on_resume, "Samsung hardware", { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL}, {}, }; diff --git a/drivers/acpi/hmat/Kconfig b/drivers/acpi/hmat/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..95a29964dbeae7a17892d1aad09fcfe4a4ba81e9 --- /dev/null +++ b/drivers/acpi/hmat/Kconfig @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 +config ACPI_HMAT + bool "ACPI Heterogeneous Memory Attribute Table Support" + depends on ACPI_NUMA + select HMEM_REPORTING + help + If set, this option has the kernel parse and report the + platform's ACPI HMAT (Heterogeneous Memory Attributes Table), + register memory initiators with their targets, and export + performance attributes through the node's sysfs device if + provided. diff --git a/drivers/acpi/hmat/Makefile b/drivers/acpi/hmat/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..e909051d3d00f23d35a9a1446455bfdef846b9a6 --- /dev/null +++ b/drivers/acpi/hmat/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_ACPI_HMAT) := hmat.o diff --git a/drivers/acpi/hmat/hmat.c b/drivers/acpi/hmat/hmat.c new file mode 100644 index 0000000000000000000000000000000000000000..96b7d39a97c65db544a7e8ec32159f57c595d673 --- /dev/null +++ b/drivers/acpi/hmat/hmat.c @@ -0,0 +1,666 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2019, Intel Corporation. + * + * Heterogeneous Memory Attributes Table (HMAT) representation + * + * This program parses and reports the platform's HMAT tables, and registers + * the applicable attributes with the node's interfaces. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +static __initdata u8 hmat_revision; + +static __initdata LIST_HEAD(targets); +static __initdata LIST_HEAD(initiators); +static __initdata LIST_HEAD(localities); + +/* + * The defined enum order is used to prioritize attributes to break ties when + * selecting the best performing node. + */ +enum locality_types { + WRITE_LATENCY, + READ_LATENCY, + WRITE_BANDWIDTH, + READ_BANDWIDTH, +}; + +static struct memory_locality *localities_types[4]; + +struct memory_target { + struct list_head node; + unsigned int memory_pxm; + unsigned int processor_pxm; + struct node_hmem_attrs hmem_attrs; +}; + +struct memory_initiator { + struct list_head node; + unsigned int processor_pxm; +}; + +struct memory_locality { + struct list_head node; + struct acpi_hmat_locality *hmat_loc; +}; + +static __init struct memory_initiator *find_mem_initiator(unsigned int cpu_pxm) +{ + struct memory_initiator *initiator; + + list_for_each_entry(initiator, &initiators, node) + if (initiator->processor_pxm == cpu_pxm) + return initiator; + return NULL; +} + +static __init struct memory_target *find_mem_target(unsigned int mem_pxm) +{ + struct memory_target *target; + + list_for_each_entry(target, &targets, node) + if (target->memory_pxm == mem_pxm) + return target; + return NULL; +} + +static __init void alloc_memory_initiator(unsigned int cpu_pxm) +{ + struct memory_initiator *initiator; + + if (pxm_to_node(cpu_pxm) == NUMA_NO_NODE) + return; + + initiator = find_mem_initiator(cpu_pxm); + if (initiator) + return; + + initiator = kzalloc(sizeof(*initiator), GFP_KERNEL); + if (!initiator) + return; + + initiator->processor_pxm = cpu_pxm; + list_add_tail(&initiator->node, &initiators); +} + +static __init void alloc_memory_target(unsigned int mem_pxm) +{ + struct memory_target *target; + + if (pxm_to_node(mem_pxm) == NUMA_NO_NODE) + return; + + target = find_mem_target(mem_pxm); + if (target) + return; + + target = kzalloc(sizeof(*target), GFP_KERNEL); + if (!target) + return; + + target->memory_pxm = mem_pxm; + target->processor_pxm = PXM_INVAL; + list_add_tail(&target->node, &targets); +} + +static __init const char *hmat_data_type(u8 type) +{ + switch (type) { + case ACPI_HMAT_ACCESS_LATENCY: + return "Access Latency"; + case ACPI_HMAT_READ_LATENCY: + return "Read Latency"; + case ACPI_HMAT_WRITE_LATENCY: + return "Write Latency"; + case ACPI_HMAT_ACCESS_BANDWIDTH: + return "Access Bandwidth"; + case ACPI_HMAT_READ_BANDWIDTH: + return "Read Bandwidth"; + case ACPI_HMAT_WRITE_BANDWIDTH: + return "Write Bandwidth"; + default: + return "Reserved"; + } +} + +static __init const char *hmat_data_type_suffix(u8 type) +{ + switch (type) { + case ACPI_HMAT_ACCESS_LATENCY: + case ACPI_HMAT_READ_LATENCY: + case ACPI_HMAT_WRITE_LATENCY: + return " nsec"; + case ACPI_HMAT_ACCESS_BANDWIDTH: + case ACPI_HMAT_READ_BANDWIDTH: + case ACPI_HMAT_WRITE_BANDWIDTH: + return " MB/s"; + default: + return ""; + } +} + +static __init u32 hmat_normalize(u16 entry, u64 base, u8 type) +{ + u32 value; + + /* + * Check for invalid and overflow values + */ + if (entry == 0xffff || !entry) + return 0; + else if (base > (UINT_MAX / (entry))) + return 0; + + /* + * Divide by the base unit for version 1, convert latency from + * picosenonds to nanoseconds if revision 2. + */ + value = entry * base; + if (hmat_revision == 1) { + if (value < 10) + return 0; + value = DIV_ROUND_UP(value, 10); + } else if (hmat_revision == 2) { + switch (type) { + case ACPI_HMAT_ACCESS_LATENCY: + case ACPI_HMAT_READ_LATENCY: + case ACPI_HMAT_WRITE_LATENCY: + value = DIV_ROUND_UP(value, 1000); + break; + default: + break; + } + } + return value; +} + +static __init void hmat_update_target_access(struct memory_target *target, + u8 type, u32 value) +{ + switch (type) { + case ACPI_HMAT_ACCESS_LATENCY: + target->hmem_attrs.read_latency = value; + target->hmem_attrs.write_latency = value; + break; + case ACPI_HMAT_READ_LATENCY: + target->hmem_attrs.read_latency = value; + break; + case ACPI_HMAT_WRITE_LATENCY: + target->hmem_attrs.write_latency = value; + break; + case ACPI_HMAT_ACCESS_BANDWIDTH: + target->hmem_attrs.read_bandwidth = value; + target->hmem_attrs.write_bandwidth = value; + break; + case ACPI_HMAT_READ_BANDWIDTH: + target->hmem_attrs.read_bandwidth = value; + break; + case ACPI_HMAT_WRITE_BANDWIDTH: + target->hmem_attrs.write_bandwidth = value; + break; + default: + break; + } +} + +static __init void hmat_add_locality(struct acpi_hmat_locality *hmat_loc) +{ + struct memory_locality *loc; + + loc = kzalloc(sizeof(*loc), GFP_KERNEL); + if (!loc) { + pr_notice_once("Failed to allocate HMAT locality\n"); + return; + } + + loc->hmat_loc = hmat_loc; + list_add_tail(&loc->node, &localities); + + switch (hmat_loc->data_type) { + case ACPI_HMAT_ACCESS_LATENCY: + localities_types[READ_LATENCY] = loc; + localities_types[WRITE_LATENCY] = loc; + break; + case ACPI_HMAT_READ_LATENCY: + localities_types[READ_LATENCY] = loc; + break; + case ACPI_HMAT_WRITE_LATENCY: + localities_types[WRITE_LATENCY] = loc; + break; + case ACPI_HMAT_ACCESS_BANDWIDTH: + localities_types[READ_BANDWIDTH] = loc; + localities_types[WRITE_BANDWIDTH] = loc; + break; + case ACPI_HMAT_READ_BANDWIDTH: + localities_types[READ_BANDWIDTH] = loc; + break; + case ACPI_HMAT_WRITE_BANDWIDTH: + localities_types[WRITE_BANDWIDTH] = loc; + break; + default: + break; + } +} + +static __init int hmat_parse_locality(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_hmat_locality *hmat_loc = (void *)header; + struct memory_target *target; + unsigned int init, targ, total_size, ipds, tpds; + u32 *inits, *targs, value; + u16 *entries; + u8 type, mem_hier; + + if (hmat_loc->header.length < sizeof(*hmat_loc)) { + pr_notice("HMAT: Unexpected locality header length: %d\n", + hmat_loc->header.length); + return -EINVAL; + } + + type = hmat_loc->data_type; + mem_hier = hmat_loc->flags & ACPI_HMAT_MEMORY_HIERARCHY; + ipds = hmat_loc->number_of_initiator_Pds; + tpds = hmat_loc->number_of_target_Pds; + total_size = sizeof(*hmat_loc) + sizeof(*entries) * ipds * tpds + + sizeof(*inits) * ipds + sizeof(*targs) * tpds; + if (hmat_loc->header.length < total_size) { + pr_notice("HMAT: Unexpected locality header length:%d, minimum required:%d\n", + hmat_loc->header.length, total_size); + return -EINVAL; + } + + pr_info("HMAT: Locality: Flags:%02x Type:%s Initiator Domains:%d Target Domains:%d Base:%lld\n", + hmat_loc->flags, hmat_data_type(type), ipds, tpds, + hmat_loc->entry_base_unit); + + inits = (u32 *)(hmat_loc + 1); + targs = inits + ipds; + entries = (u16 *)(targs + tpds); + for (init = 0; init < ipds; init++) { + alloc_memory_initiator(inits[init]); + for (targ = 0; targ < tpds; targ++) { + value = hmat_normalize(entries[init * tpds + targ], + hmat_loc->entry_base_unit, + type); + pr_info(" Initiator-Target[%d-%d]:%d%s\n", + inits[init], targs[targ], value, + hmat_data_type_suffix(type)); + + if (mem_hier == ACPI_HMAT_MEMORY) { + target = find_mem_target(targs[targ]); + if (target && target->processor_pxm == inits[init]) + hmat_update_target_access(target, type, value); + } + } + } + + if (mem_hier == ACPI_HMAT_MEMORY) + hmat_add_locality(hmat_loc); + + return 0; +} + +static __init int hmat_parse_cache(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_hmat_cache *cache = (void *)header; + struct node_cache_attrs cache_attrs; + u32 attrs; + + if (cache->header.length < sizeof(*cache)) { + pr_notice("HMAT: Unexpected cache header length: %d\n", + cache->header.length); + return -EINVAL; + } + + attrs = cache->cache_attributes; + pr_info("HMAT: Cache: Domain:%d Size:%llu Attrs:%08x SMBIOS Handles:%d\n", + cache->memory_PD, cache->cache_size, attrs, + cache->number_of_SMBIOShandles); + + cache_attrs.size = cache->cache_size; + cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4; + cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 16; + + switch ((attrs & ACPI_HMAT_CACHE_ASSOCIATIVITY) >> 8) { + case ACPI_HMAT_CA_DIRECT_MAPPED: + cache_attrs.indexing = NODE_CACHE_DIRECT_MAP; + break; + case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING: + cache_attrs.indexing = NODE_CACHE_INDEXED; + break; + case ACPI_HMAT_CA_NONE: + default: + cache_attrs.indexing = NODE_CACHE_OTHER; + break; + } + + switch ((attrs & ACPI_HMAT_WRITE_POLICY) >> 12) { + case ACPI_HMAT_CP_WB: + cache_attrs.write_policy = NODE_CACHE_WRITE_BACK; + break; + case ACPI_HMAT_CP_WT: + cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH; + break; + case ACPI_HMAT_CP_NONE: + default: + cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER; + break; + } + + node_add_cache(pxm_to_node(cache->memory_PD), &cache_attrs); + return 0; +} + +static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_hmat_proximity_domain *p = (void *)header; + struct memory_target *target = NULL; + + if (p->header.length != sizeof(*p)) { + pr_notice("HMAT: Unexpected address range header length: %d\n", + p->header.length); + return -EINVAL; + } + + if (hmat_revision == 1) + pr_info("HMAT: Memory (%#llx length %#llx) Flags:%04x Processor Domain:%d Memory Domain:%d\n", + p->reserved3, p->reserved4, p->flags, p->processor_PD, + p->memory_PD); + else + pr_info("HMAT: Memory Flags:%04x Processor Domain:%d Memory Domain:%d\n", + p->flags, p->processor_PD, p->memory_PD); + + if (p->flags & ACPI_HMAT_MEMORY_PD_VALID) { + target = find_mem_target(p->memory_PD); + if (!target) { + pr_debug("HMAT: Memory Domain missing from SRAT\n"); + return -EINVAL; + } + } + if (target && p->flags & ACPI_HMAT_PROCESSOR_PD_VALID) { + int p_node = pxm_to_node(p->processor_PD); + + if (p_node == NUMA_NO_NODE) { + pr_debug("HMAT: Invalid Processor Domain\n"); + return -EINVAL; + } + target->processor_pxm = p_node; + } + + return 0; +} + +static int __init hmat_parse_subtable(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_hmat_structure *hdr = (void *)header; + + if (!hdr) + return -EINVAL; + + switch (hdr->type) { + case ACPI_HMAT_TYPE_PROXIMITY: + return hmat_parse_proximity_domain(header, end); + case ACPI_HMAT_TYPE_LOCALITY: + return hmat_parse_locality(header, end); + case ACPI_HMAT_TYPE_CACHE: + return hmat_parse_cache(header, end); + default: + return -EINVAL; + } +} + +static __init int srat_parse_mem_affinity(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_srat_mem_affinity *ma = (void *)header; + + if (!ma) + return -EINVAL; + if (!(ma->flags & ACPI_SRAT_MEM_ENABLED)) + return 0; + alloc_memory_target(ma->proximity_domain); + return 0; +} + +static __init u32 hmat_initiator_perf(struct memory_target *target, + struct memory_initiator *initiator, + struct acpi_hmat_locality *hmat_loc) +{ + unsigned int ipds, tpds, i, idx = 0, tdx = 0; + u32 *inits, *targs; + u16 *entries; + + ipds = hmat_loc->number_of_initiator_Pds; + tpds = hmat_loc->number_of_target_Pds; + inits = (u32 *)(hmat_loc + 1); + targs = inits + ipds; + entries = (u16 *)(targs + tpds); + + for (i = 0; i < ipds; i++) { + if (inits[i] == initiator->processor_pxm) { + idx = i; + break; + } + } + + if (i == ipds) + return 0; + + for (i = 0; i < tpds; i++) { + if (targs[i] == target->memory_pxm) { + tdx = i; + break; + } + } + if (i == tpds) + return 0; + + return hmat_normalize(entries[idx * tpds + tdx], + hmat_loc->entry_base_unit, + hmat_loc->data_type); +} + +static __init bool hmat_update_best(u8 type, u32 value, u32 *best) +{ + bool updated = false; + + if (!value) + return false; + + switch (type) { + case ACPI_HMAT_ACCESS_LATENCY: + case ACPI_HMAT_READ_LATENCY: + case ACPI_HMAT_WRITE_LATENCY: + if (!*best || *best > value) { + *best = value; + updated = true; + } + break; + case ACPI_HMAT_ACCESS_BANDWIDTH: + case ACPI_HMAT_READ_BANDWIDTH: + case ACPI_HMAT_WRITE_BANDWIDTH: + if (!*best || *best < value) { + *best = value; + updated = true; + } + break; + } + + return updated; +} + +static int initiator_cmp(void *priv, struct list_head *a, struct list_head *b) +{ + struct memory_initiator *ia; + struct memory_initiator *ib; + unsigned long *p_nodes = priv; + + ia = list_entry(a, struct memory_initiator, node); + ib = list_entry(b, struct memory_initiator, node); + + set_bit(ia->processor_pxm, p_nodes); + set_bit(ib->processor_pxm, p_nodes); + + return ia->processor_pxm - ib->processor_pxm; +} + +static __init void hmat_register_target_initiators(struct memory_target *target) +{ + static DECLARE_BITMAP(p_nodes, MAX_NUMNODES); + struct memory_initiator *initiator; + unsigned int mem_nid, cpu_nid; + struct memory_locality *loc = NULL; + u32 best = 0; + int i; + + mem_nid = pxm_to_node(target->memory_pxm); + /* + * If the Address Range Structure provides a local processor pxm, link + * only that one. Otherwise, find the best performance attributes and + * register all initiators that match. + */ + if (target->processor_pxm != PXM_INVAL) { + cpu_nid = pxm_to_node(target->processor_pxm); + register_memory_node_under_compute_node(mem_nid, cpu_nid, 0); + return; + } + + if (list_empty(&localities)) + return; + + /* + * We need the initiator list sorted so we can use bitmap_clear for + * previously set initiators when we find a better memory accessor. + * We'll also use the sorting to prime the candidate nodes with known + * initiators. + */ + bitmap_zero(p_nodes, MAX_NUMNODES); + list_sort(p_nodes, &initiators, initiator_cmp); + for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) { + loc = localities_types[i]; + if (!loc) + continue; + + best = 0; + list_for_each_entry(initiator, &initiators, node) { + u32 value; + + if (!test_bit(initiator->processor_pxm, p_nodes)) + continue; + + value = hmat_initiator_perf(target, initiator, loc->hmat_loc); + if (hmat_update_best(loc->hmat_loc->data_type, value, &best)) + bitmap_clear(p_nodes, 0, initiator->processor_pxm); + if (value != best) + clear_bit(initiator->processor_pxm, p_nodes); + } + if (best) + hmat_update_target_access(target, loc->hmat_loc->data_type, best); + } + + for_each_set_bit(i, p_nodes, MAX_NUMNODES) { + cpu_nid = pxm_to_node(i); + register_memory_node_under_compute_node(mem_nid, cpu_nid, 0); + } +} + +static __init void hmat_register_target_perf(struct memory_target *target) +{ + unsigned mem_nid = pxm_to_node(target->memory_pxm); + node_set_perf_attrs(mem_nid, &target->hmem_attrs, 0); +} + +static __init void hmat_register_targets(void) +{ + struct memory_target *target; + + list_for_each_entry(target, &targets, node) { + hmat_register_target_initiators(target); + hmat_register_target_perf(target); + } +} + +static __init void hmat_free_structures(void) +{ + struct memory_target *target, *tnext; + struct memory_locality *loc, *lnext; + struct memory_initiator *initiator, *inext; + + list_for_each_entry_safe(target, tnext, &targets, node) { + list_del(&target->node); + kfree(target); + } + + list_for_each_entry_safe(initiator, inext, &initiators, node) { + list_del(&initiator->node); + kfree(initiator); + } + + list_for_each_entry_safe(loc, lnext, &localities, node) { + list_del(&loc->node); + kfree(loc); + } +} + +static __init int hmat_init(void) +{ + struct acpi_table_header *tbl; + enum acpi_hmat_type i; + acpi_status status; + + if (srat_disabled()) + return 0; + + status = acpi_get_table(ACPI_SIG_SRAT, 0, &tbl); + if (ACPI_FAILURE(status)) + return 0; + + if (acpi_table_parse_entries(ACPI_SIG_SRAT, + sizeof(struct acpi_table_srat), + ACPI_SRAT_TYPE_MEMORY_AFFINITY, + srat_parse_mem_affinity, 0) < 0) + goto out_put; + acpi_put_table(tbl); + + status = acpi_get_table(ACPI_SIG_HMAT, 0, &tbl); + if (ACPI_FAILURE(status)) + goto out_put; + + hmat_revision = tbl->revision; + switch (hmat_revision) { + case 1: + case 2: + break; + default: + pr_notice("Ignoring HMAT: Unknown revision:%d\n", hmat_revision); + goto out_put; + } + + for (i = ACPI_HMAT_TYPE_PROXIMITY; i < ACPI_HMAT_TYPE_RESERVED; i++) { + if (acpi_table_parse_entries(ACPI_SIG_HMAT, + sizeof(struct acpi_table_hmat), i, + hmat_parse_subtable, 0) < 0) { + pr_notice("Ignoring HMAT: Invalid table"); + goto out_put; + } + } + hmat_register_targets(); +out_put: + hmat_free_structures(); + acpi_put_table(tbl); + return 0; +} +subsys_initcall(hmat_init); diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 530a3f67549049c1600ccb2ac9af26a683a7099a..6def196cc23cf8efba0ce6b96cd543b158d8f486 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -98,7 +98,7 @@ void acpi_scan_table_handler(u32 event, void *table, void *context); extern struct list_head acpi_bus_id_list; struct acpi_device_bus_id { - char bus_id[15]; + const char *bus_id; unsigned int instance_no; struct list_head node; }; @@ -188,6 +188,8 @@ int acpi_ec_ecdt_probe(void); int acpi_ec_dsdt_probe(void); void acpi_ec_block_transactions(void); void acpi_ec_unblock_transactions(void); +void acpi_ec_mark_gpe_for_wake(void); +void acpi_ec_set_gpe_wake_mask(u8 action); void acpi_ec_dispatch_gpe(void); int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, acpi_handle handle, acpi_ec_query_func func, diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c index 7c352cba052893bb59f135b597f797a345974cd4..8ac01375fe8fdd47452b67c09cadfaa15cafe714 100644 --- a/drivers/acpi/irq.c +++ b/drivers/acpi/irq.c @@ -55,6 +55,7 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity) { struct irq_fwspec fwspec; + unsigned int irq; if (WARN_ON(!acpi_gsi_domain_id)) { pr_warn("GSI: No registered irqchip, giving up\n"); @@ -66,7 +67,11 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity); fwspec.param_count = 2; - return irq_create_fwspec_mapping(&fwspec); + irq = irq_create_fwspec_mapping(&fwspec); + if (!irq) + return -EINVAL; + + return irq; } EXPORT_SYMBOL_GPL(acpi_register_gsi); diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index b072cfc5f20ee0ec47ba83532a58eafdfcbd2534..e94f3d94c0b2130927d8c5e5c9daa1ae5047d0d8 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -367,7 +367,7 @@ static union acpi_object *acpi_label_info(acpi_handle handle) static u8 nfit_dsm_revid(unsigned family, unsigned func) { - static const u8 revid_table[NVDIMM_FAMILY_MAX+1][32] = { + static const u8 revid_table[NVDIMM_FAMILY_MAX+1][NVDIMM_CMD_MAX+1] = { [NVDIMM_FAMILY_INTEL] = { [NVDIMM_INTEL_GET_MODES] = 2, [NVDIMM_INTEL_GET_FWINFO] = 2, @@ -383,7 +383,7 @@ static u8 nfit_dsm_revid(unsigned family, unsigned func) if (family > NVDIMM_FAMILY_MAX) return 0; - if (func > 31) + if (func > NVDIMM_CMD_MAX) return 0; id = revid_table[family][func]; if (id == 0) @@ -391,6 +391,36 @@ static u8 nfit_dsm_revid(unsigned family, unsigned func) return id; } +static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd, + struct nd_cmd_pkg *call_pkg) +{ + if (call_pkg) { + int i; + + if (nfit_mem && nfit_mem->family != call_pkg->nd_family) + return -ENOTTY; + + for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++) + if (call_pkg->nd_reserved2[i]) + return -EINVAL; + return call_pkg->nd_command; + } + + /* In the !call_pkg case, bus commands == bus functions */ + if (!nfit_mem) + return cmd; + + /* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */ + if (nfit_mem->family == NVDIMM_FAMILY_INTEL) + return cmd; + + /* + * Force function number validation to fail since 0 is never + * published as a valid function in dsm_mask. + */ + return 0; +} + int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc) { @@ -404,29 +434,28 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, unsigned long cmd_mask, dsm_mask; u32 offset, fw_status = 0; acpi_handle handle; - unsigned int func; const guid_t *guid; - int rc, i; + int func, rc, i; if (cmd_rc) *cmd_rc = -EINVAL; - func = cmd; + if (cmd == ND_CMD_CALL) { - call_pkg = buf; - func = call_pkg->nd_command; + if (!buf || buf_len < sizeof(*call_pkg)) + return -EINVAL; - for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++) - if (call_pkg->nd_reserved2[i]) - return -EINVAL; + call_pkg = buf; } + func = cmd_to_func(nfit_mem, cmd, call_pkg); + if (func < 0) + return func; + if (nvdimm) { struct acpi_device *adev = nfit_mem->adev; if (!adev) return -ENOTTY; - if (call_pkg && nfit_mem->family != call_pkg->nd_family) - return -ENOTTY; dimm_name = nvdimm_name(nvdimm); cmd_name = nvdimm_cmd_name(cmd); @@ -440,9 +469,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, cmd_name = nvdimm_bus_cmd_name(cmd); cmd_mask = nd_desc->cmd_mask; - dsm_mask = cmd_mask; - if (cmd == ND_CMD_CALL) - dsm_mask = nd_desc->bus_dsm_mask; + dsm_mask = nd_desc->bus_dsm_mask; desc = nd_cmd_bus_desc(cmd); guid = to_nfit_uuid(NFIT_DEV_BUS); handle = adev->handle; @@ -452,7 +479,14 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, if (!desc || (cmd && (desc->out_num + desc->in_num == 0))) return -ENOTTY; - if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask)) + /* + * Check for a valid command. For ND_CMD_CALL, we also have to + * make sure that the DSM function is supported. + */ + if (cmd == ND_CMD_CALL && + (func > NVDIMM_CMD_MAX || !test_bit(func, &dsm_mask))) + return -ENOTTY; + else if (!test_bit(cmd, &cmd_mask)) return -ENOTTY; in_obj.type = ACPI_TYPE_PACKAGE; @@ -507,6 +541,19 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, return -EINVAL; } + if (out_obj->type != ACPI_TYPE_BUFFER) { + dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n", + dimm_name, cmd_name, out_obj->type); + rc = -EINVAL; + goto out; + } + + dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name, + cmd_name, out_obj->buffer.length); + print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4, + out_obj->buffer.pointer, + min_t(u32, 128, out_obj->buffer.length), true); + if (call_pkg) { call_pkg->nd_fw_size = out_obj->buffer.length; memcpy(call_pkg->nd_payload + call_pkg->nd_size_in, @@ -525,19 +572,6 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, return 0; } - if (out_obj->package.type != ACPI_TYPE_BUFFER) { - dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n", - dimm_name, cmd_name, out_obj->type); - rc = -EINVAL; - goto out; - } - - dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name, - cmd_name, out_obj->buffer.length); - print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4, - out_obj->buffer.pointer, - min_t(u32, 128, out_obj->buffer.length), true); - for (i = 0, offset = 0; i < desc->out_num; i++) { u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf, (u32 *) out_obj->buffer.pointer, @@ -693,6 +727,7 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags) struct acpi_nfit_memory_map *memdev; struct acpi_nfit_desc *acpi_desc; struct nfit_mem *nfit_mem; + u16 physical_id; mutex_lock(&acpi_desc_lock); list_for_each_entry(acpi_desc, &acpi_descs, list) { @@ -700,10 +735,11 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags) list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { memdev = __to_nfit_memdev(nfit_mem); if (memdev->device_handle == device_handle) { + *flags = memdev->flags; + physical_id = memdev->physical_id; mutex_unlock(&acpi_desc->init_mutex); mutex_unlock(&acpi_desc_lock); - *flags = memdev->flags; - return memdev->physical_id; + return physical_id; } } mutex_unlock(&acpi_desc->init_mutex); @@ -1268,19 +1304,30 @@ static ssize_t scrub_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvdimm_bus_descriptor *nd_desc; + struct acpi_nfit_desc *acpi_desc; ssize_t rc = -ENXIO; + bool busy; device_lock(dev); nd_desc = dev_get_drvdata(dev); - if (nd_desc) { - struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + if (!nd_desc) { + device_unlock(dev); + return rc; + } + acpi_desc = to_acpi_desc(nd_desc); - mutex_lock(&acpi_desc->init_mutex); - rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, - acpi_desc->scrub_busy - && !acpi_desc->cancel ? "+\n" : "\n"); - mutex_unlock(&acpi_desc->init_mutex); + mutex_lock(&acpi_desc->init_mutex); + busy = test_bit(ARS_BUSY, &acpi_desc->scrub_flags) + && !test_bit(ARS_CANCEL, &acpi_desc->scrub_flags); + rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, busy ? "+\n" : "\n"); + /* Allow an admin to poll the busy state at a higher rate */ + if (busy && capable(CAP_SYS_RAWIO) && !test_and_set_bit(ARS_POLL, + &acpi_desc->scrub_flags)) { + acpi_desc->scrub_tmo = 1; + mod_delayed_work(nfit_wq, &acpi_desc->dwork, HZ); } + + mutex_unlock(&acpi_desc->init_mutex); device_unlock(dev); return rc; } @@ -1303,7 +1350,7 @@ static ssize_t scrub_store(struct device *dev, if (nd_desc) { struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); - rc = acpi_nfit_ars_rescan(acpi_desc, 0); + rc = acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG); } device_unlock(dev); if (rc) @@ -1764,6 +1811,13 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, return 0; } + /* + * Function 0 is the command interrogation function, don't + * export it to potential userspace use, and enable it to be + * used as an error value in acpi_nfit_ctl(). + */ + dsm_mask &= ~1UL; + guid = to_nfit_uuid(nfit_mem->family); for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) if (acpi_check_dsm(adev_dimm->handle, guid, @@ -2466,7 +2520,8 @@ static int ars_get_cap(struct acpi_nfit_desc *acpi_desc, return cmd_rc; } -static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa) +static int ars_start(struct acpi_nfit_desc *acpi_desc, + struct nfit_spa *nfit_spa, enum nfit_ars_state req_type) { int rc; int cmd_rc; @@ -2477,7 +2532,7 @@ static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa memset(&ars_start, 0, sizeof(ars_start)); ars_start.address = spa->address; ars_start.length = spa->length; - if (test_bit(ARS_SHORT, &nfit_spa->ars_state)) + if (req_type == ARS_REQ_SHORT) ars_start.flags = ND_ARS_RETURN_PREV_DATA; if (nfit_spa_type(spa) == NFIT_SPA_PM) ars_start.type = ND_ARS_PERSISTENT; @@ -2491,7 +2546,10 @@ static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa if (rc < 0) return rc; - return cmd_rc; + if (cmd_rc < 0) + return cmd_rc; + set_bit(ARS_VALID, &acpi_desc->scrub_flags); + return 0; } static int ars_continue(struct acpi_nfit_desc *acpi_desc) @@ -2501,11 +2559,11 @@ static int ars_continue(struct acpi_nfit_desc *acpi_desc) struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; - memset(&ars_start, 0, sizeof(ars_start)); - ars_start.address = ars_status->restart_address; - ars_start.length = ars_status->restart_length; - ars_start.type = ars_status->type; - ars_start.flags = acpi_desc->ars_start_flags; + ars_start = (struct nd_cmd_ars_start) { + .address = ars_status->restart_address, + .length = ars_status->restart_length, + .type = ars_status->type, + }; rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, sizeof(ars_start), &cmd_rc); if (rc < 0) @@ -2534,6 +2592,15 @@ static void ars_complete(struct acpi_nfit_desc *acpi_desc, struct nd_region *nd_region = nfit_spa->nd_region; struct device *dev; + lockdep_assert_held(&acpi_desc->init_mutex); + /* + * Only advance the ARS state for ARS runs initiated by the + * kernel, ignore ARS results from BIOS initiated runs for scrub + * completion tracking. + */ + if (acpi_desc->scrub_spa != nfit_spa) + return; + if ((ars_status->address >= spa->address && ars_status->address < spa->address + spa->length) || (ars_status->address < spa->address)) { @@ -2553,28 +2620,13 @@ static void ars_complete(struct acpi_nfit_desc *acpi_desc, } else return; - if (test_bit(ARS_DONE, &nfit_spa->ars_state)) - return; - - if (!test_and_clear_bit(ARS_REQ, &nfit_spa->ars_state)) - return; - + acpi_desc->scrub_spa = NULL; if (nd_region) { dev = nd_region_dev(nd_region); nvdimm_region_notify(nd_region, NVDIMM_REVALIDATE_POISON); } else dev = acpi_desc->dev; - - dev_dbg(dev, "ARS: range %d %s complete\n", spa->range_index, - test_bit(ARS_SHORT, &nfit_spa->ars_state) - ? "short" : "long"); - clear_bit(ARS_SHORT, &nfit_spa->ars_state); - if (test_and_clear_bit(ARS_REQ_REDO, &nfit_spa->ars_state)) { - set_bit(ARS_SHORT, &nfit_spa->ars_state); - set_bit(ARS_REQ, &nfit_spa->ars_state); - dev_dbg(dev, "ARS: processing scrub request received while in progress\n"); - } else - set_bit(ARS_DONE, &nfit_spa->ars_state); + dev_dbg(dev, "ARS: range %d complete\n", spa->range_index); } static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc) @@ -2590,6 +2642,17 @@ static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc) */ if (ars_status->out_length < 44) return 0; + + /* + * Ignore potentially stale results that are only refreshed + * after a start-ARS event. + */ + if (!test_and_clear_bit(ARS_VALID, &acpi_desc->scrub_flags)) { + dev_dbg(acpi_desc->dev, "skip %d stale records\n", + ars_status->num_records); + return 0; + } + for (i = 0; i < ars_status->num_records; i++) { /* only process full records */ if (ars_status->out_length @@ -2747,11 +2810,15 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, ndr_desc->res = &res; ndr_desc->provider_data = nfit_spa; ndr_desc->attr_groups = acpi_nfit_region_attribute_groups; - if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) + if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) { ndr_desc->numa_node = acpi_map_pxm_to_online_node( spa->proximity_domain); - else + ndr_desc->target_node = acpi_map_pxm_to_node( + spa->proximity_domain); + } else { ndr_desc->numa_node = NUMA_NO_NODE; + ndr_desc->target_node = NUMA_NO_NODE; + } /* * Persistence domain bits are hierarchical, if @@ -2767,6 +2834,9 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; struct nd_mapping_desc *mapping; + /* range index 0 == unmapped in SPA or invalid-SPA */ + if (memdev->range_index == 0 || spa->range_index == 0) + continue; if (memdev->range_index != spa->range_index) continue; if (count >= ND_MAX_MAPPINGS) { @@ -2850,51 +2920,61 @@ static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc) return rc; if (ars_status_process_records(acpi_desc)) - return -ENOMEM; + dev_err(acpi_desc->dev, "Failed to process ARS records\n"); - return 0; + return rc; } -static int ars_register(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa, - int *query_rc) +static int ars_register(struct acpi_nfit_desc *acpi_desc, + struct nfit_spa *nfit_spa) { - int rc = *query_rc; + int rc; - if (no_init_ars) + if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) return acpi_nfit_register_region(acpi_desc, nfit_spa); - set_bit(ARS_REQ, &nfit_spa->ars_state); - set_bit(ARS_SHORT, &nfit_spa->ars_state); + set_bit(ARS_REQ_SHORT, &nfit_spa->ars_state); + if (!no_init_ars) + set_bit(ARS_REQ_LONG, &nfit_spa->ars_state); - switch (rc) { + switch (acpi_nfit_query_poison(acpi_desc)) { case 0: + case -ENOSPC: case -EAGAIN: - rc = ars_start(acpi_desc, nfit_spa); - if (rc == -EBUSY) { - *query_rc = rc; + rc = ars_start(acpi_desc, nfit_spa, ARS_REQ_SHORT); + /* shouldn't happen, try again later */ + if (rc == -EBUSY) break; - } else if (rc == 0) { - rc = acpi_nfit_query_poison(acpi_desc); - } else { + if (rc) { set_bit(ARS_FAILED, &nfit_spa->ars_state); break; } - if (rc == -EAGAIN) - clear_bit(ARS_SHORT, &nfit_spa->ars_state); - else if (rc == 0) - ars_complete(acpi_desc, nfit_spa); + clear_bit(ARS_REQ_SHORT, &nfit_spa->ars_state); + rc = acpi_nfit_query_poison(acpi_desc); + if (rc) + break; + acpi_desc->scrub_spa = nfit_spa; + ars_complete(acpi_desc, nfit_spa); + /* + * If ars_complete() says we didn't complete the + * short scrub, we'll try again with a long + * request. + */ + acpi_desc->scrub_spa = NULL; break; case -EBUSY: - case -ENOSPC: + case -ENOMEM: + /* + * BIOS was using ARS, wait for it to complete (or + * resources to become available) and then perform our + * own scrubs. + */ break; default: set_bit(ARS_FAILED, &nfit_spa->ars_state); break; } - if (test_and_clear_bit(ARS_DONE, &nfit_spa->ars_state)) - set_bit(ARS_REQ, &nfit_spa->ars_state); - return acpi_nfit_register_region(acpi_desc, nfit_spa); } @@ -2916,7 +2996,9 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc, struct device *dev = acpi_desc->dev; struct nfit_spa *nfit_spa; - if (acpi_desc->cancel) + lockdep_assert_held(&acpi_desc->init_mutex); + + if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags)) return 0; if (query_rc == -EBUSY) { @@ -2939,21 +3021,49 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc, ars_complete_all(acpi_desc); list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { + enum nfit_ars_state req_type; + int rc; + if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) continue; - if (test_bit(ARS_REQ, &nfit_spa->ars_state)) { - int rc = ars_start(acpi_desc, nfit_spa); - - clear_bit(ARS_DONE, &nfit_spa->ars_state); - dev = nd_region_dev(nfit_spa->nd_region); - dev_dbg(dev, "ARS: range %d ARS start (%d)\n", - nfit_spa->spa->range_index, rc); - if (rc == 0 || rc == -EBUSY) - return 1; - dev_err(dev, "ARS: range %d ARS failed (%d)\n", - nfit_spa->spa->range_index, rc); - set_bit(ARS_FAILED, &nfit_spa->ars_state); + + /* prefer short ARS requests first */ + if (test_bit(ARS_REQ_SHORT, &nfit_spa->ars_state)) + req_type = ARS_REQ_SHORT; + else if (test_bit(ARS_REQ_LONG, &nfit_spa->ars_state)) + req_type = ARS_REQ_LONG; + else + continue; + rc = ars_start(acpi_desc, nfit_spa, req_type); + + dev = nd_region_dev(nfit_spa->nd_region); + dev_dbg(dev, "ARS: range %d ARS start %s (%d)\n", + nfit_spa->spa->range_index, + req_type == ARS_REQ_SHORT ? "short" : "long", + rc); + /* + * Hmm, we raced someone else starting ARS? Try again in + * a bit. + */ + if (rc == -EBUSY) + return 1; + if (rc == 0) { + dev_WARN_ONCE(dev, acpi_desc->scrub_spa, + "scrub start while range %d active\n", + acpi_desc->scrub_spa->spa->range_index); + clear_bit(req_type, &nfit_spa->ars_state); + acpi_desc->scrub_spa = nfit_spa; + /* + * Consider this spa last for future scrub + * requests + */ + list_move_tail(&nfit_spa->list, &acpi_desc->spas); + return 1; } + + dev_err(dev, "ARS: range %d ARS failed (%d)\n", + nfit_spa->spa->range_index, rc); + set_bit(ARS_FAILED, &nfit_spa->ars_state); } return 0; } @@ -2962,7 +3072,7 @@ static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo) { lockdep_assert_held(&acpi_desc->init_mutex); - acpi_desc->scrub_busy = 1; + set_bit(ARS_BUSY, &acpi_desc->scrub_flags); /* note this should only be set from within the workqueue */ if (tmo) acpi_desc->scrub_tmo = tmo; @@ -2978,7 +3088,7 @@ static void notify_ars_done(struct acpi_nfit_desc *acpi_desc) { lockdep_assert_held(&acpi_desc->init_mutex); - acpi_desc->scrub_busy = 0; + clear_bit(ARS_BUSY, &acpi_desc->scrub_flags); acpi_desc->scrub_count++; if (acpi_desc->scrub_count_state) sysfs_notify_dirent(acpi_desc->scrub_count_state); @@ -2999,6 +3109,7 @@ static void acpi_nfit_scrub(struct work_struct *work) else notify_ars_done(acpi_desc); memset(acpi_desc->ars_status, 0, acpi_desc->max_ars); + clear_bit(ARS_POLL, &acpi_desc->scrub_flags); mutex_unlock(&acpi_desc->init_mutex); } @@ -3009,6 +3120,7 @@ static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc, struct nd_cmd_ars_cap ars_cap; int rc; + set_bit(ARS_FAILED, &nfit_spa->ars_state); memset(&ars_cap, 0, sizeof(ars_cap)); rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa); if (rc < 0) @@ -3025,16 +3137,15 @@ static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc, nfit_spa->clear_err_unit = ars_cap.clear_err_unit; acpi_desc->max_ars = max(nfit_spa->max_ars, acpi_desc->max_ars); clear_bit(ARS_FAILED, &nfit_spa->ars_state); - set_bit(ARS_REQ, &nfit_spa->ars_state); } static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) { struct nfit_spa *nfit_spa; - int rc, query_rc; + int rc; + set_bit(ARS_VALID, &acpi_desc->scrub_flags); list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { - set_bit(ARS_FAILED, &nfit_spa->ars_state); switch (nfit_spa_type(nfit_spa->spa)) { case NFIT_SPA_VOLATILE: case NFIT_SPA_PM: @@ -3043,20 +3154,12 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) } } - /* - * Reap any results that might be pending before starting new - * short requests. - */ - query_rc = acpi_nfit_query_poison(acpi_desc); - if (query_rc == 0) - ars_complete_all(acpi_desc); - list_for_each_entry(nfit_spa, &acpi_desc->spas, list) switch (nfit_spa_type(nfit_spa->spa)) { case NFIT_SPA_VOLATILE: case NFIT_SPA_PM: /* register regions and kick off initial ARS run */ - rc = ars_register(acpi_desc, nfit_spa, &query_rc); + rc = ars_register(acpi_desc, nfit_spa); if (rc) return rc; break; @@ -3251,14 +3354,15 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, return 0; } -int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags) +int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, + enum nfit_ars_state req_type) { struct device *dev = acpi_desc->dev; int scheduled = 0, busy = 0; struct nfit_spa *nfit_spa; mutex_lock(&acpi_desc->init_mutex); - if (acpi_desc->cancel) { + if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags)) { mutex_unlock(&acpi_desc->init_mutex); return 0; } @@ -3271,14 +3375,10 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags) if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) continue; - if (test_and_set_bit(ARS_REQ, &nfit_spa->ars_state)) { + if (test_and_set_bit(req_type, &nfit_spa->ars_state)) busy++; - set_bit(ARS_REQ_REDO, &nfit_spa->ars_state); - } else { - if (test_bit(ARS_SHORT, &flags)) - set_bit(ARS_SHORT, &nfit_spa->ars_state); + else scheduled++; - } } if (scheduled) { sched_ars(acpi_desc); @@ -3341,9 +3441,9 @@ void acpi_nfit_shutdown(void *data) mutex_unlock(&acpi_desc_lock); mutex_lock(&acpi_desc->init_mutex); - acpi_desc->cancel = 1; - cancel_delayed_work_sync(&acpi_desc->dwork); + set_bit(ARS_CANCEL, &acpi_desc->scrub_flags); mutex_unlock(&acpi_desc->init_mutex); + cancel_delayed_work_sync(&acpi_desc->dwork); /* * Bounce the nvdimm bus lock to make sure any in-flight @@ -3464,10 +3564,11 @@ static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle) static void acpi_nfit_uc_error_notify(struct device *dev, acpi_handle handle) { struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev); - unsigned long flags = (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON) ? - 0 : 1 << ARS_SHORT; - acpi_nfit_ars_rescan(acpi_desc, flags); + if (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON) + acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG); + else + acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_SHORT); } void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event) diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c index e9626bf6ca2960a2398aeeefc0f4c9e814e60c1b..d6c1b10f6c2542a8cfbbac6dae31246cd35134f7 100644 --- a/drivers/acpi/nfit/mce.c +++ b/drivers/acpi/nfit/mce.c @@ -25,8 +25,12 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val, struct acpi_nfit_desc *acpi_desc; struct nfit_spa *nfit_spa; - /* We only care about memory errors */ - if (!mce_is_memory_error(mce)) + /* We only care about uncorrectable memory errors */ + if (!mce_is_memory_error(mce) || mce_is_correctable(mce)) + return NOTIFY_DONE; + + /* Verify the address reported in the MCE is valid. */ + if (!mce_usable_address(mce)) return NOTIFY_DONE; /* diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h index d1274ea2d251636115a645ff4ba4ed8eddb12798..0e2f11b1a388caaa9f9ecfcc562770d0c62057e0 100644 --- a/drivers/acpi/nfit/nfit.h +++ b/drivers/acpi/nfit/nfit.h @@ -39,6 +39,7 @@ | ACPI_NFIT_MEM_NOT_ARMED | ACPI_NFIT_MEM_MAP_FAILED) #define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_MSFT +#define NVDIMM_CMD_MAX 31 #define NVDIMM_STANDARD_CMDMASK \ (1 << ND_CMD_SMART | 1 << ND_CMD_SMART_THRESHOLD | 1 << ND_CMD_DIMM_FLAGS \ @@ -118,10 +119,8 @@ enum nfit_dimm_notifiers { }; enum nfit_ars_state { - ARS_REQ, - ARS_REQ_REDO, - ARS_DONE, - ARS_SHORT, + ARS_REQ_SHORT, + ARS_REQ_LONG, ARS_FAILED, }; @@ -183,6 +182,13 @@ struct nfit_mem { bool has_lsw; }; +enum scrub_flags { + ARS_BUSY, + ARS_CANCEL, + ARS_VALID, + ARS_POLL, +}; + struct acpi_nfit_desc { struct nvdimm_bus_descriptor nd_desc; struct acpi_table_header acpi_header; @@ -196,16 +202,15 @@ struct acpi_nfit_desc { struct list_head idts; struct nvdimm_bus *nvdimm_bus; struct device *dev; - u8 ars_start_flags; struct nd_cmd_ars_status *ars_status; + struct nfit_spa *scrub_spa; struct delayed_work dwork; struct list_head list; struct kernfs_node *scrub_count_state; unsigned int max_ars; unsigned int scrub_count; unsigned int scrub_mode; - unsigned int scrub_busy:1; - unsigned int cancel:1; + unsigned long scrub_flags; unsigned long dimm_cmd_force_en; unsigned long bus_cmd_force_en; unsigned long bus_nfit_cmd_force_en; @@ -252,7 +257,8 @@ struct nfit_blk { extern struct list_head acpi_descs; extern struct mutex acpi_desc_lock; -int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags); +int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, + enum nfit_ars_state req_type); #ifdef CONFIG_X86_MCE void nfit_mce_register(void); diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index 85167603b9c94318bcef7c260de689c13e4e4545..9d7acf895264ddecf8c98e195a2f9df31302cbdf 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c @@ -46,7 +46,7 @@ int acpi_numa __initdata; int pxm_to_node(int pxm) { - if (pxm < 0) + if (pxm < 0 || pxm >= MAX_PXM_DOMAINS || numa_off) return NUMA_NO_NODE; return pxm_to_node_map[pxm]; } @@ -85,6 +85,7 @@ int acpi_map_pxm_to_node(int pxm) return node; } +EXPORT_SYMBOL(acpi_map_pxm_to_node); /** * acpi_map_pxm_to_online_node - Map proximity ID to online node @@ -147,9 +148,9 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header) { struct acpi_srat_mem_affinity *p = (struct acpi_srat_mem_affinity *)header; - pr_debug("SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s%s\n", - (unsigned long)p->base_address, - (unsigned long)p->length, + pr_debug("SRAT Memory (0x%llx length 0x%llx) in proximity domain %d %s%s%s\n", + (unsigned long long)p->base_address, + (unsigned long long)p->length, p->proximity_domain, (p->flags & ACPI_SRAT_MEM_ENABLED) ? "enabled" : "disabled", @@ -339,7 +340,7 @@ acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa) } static int __init -acpi_parse_x2apic_affinity(struct acpi_subtable_header *header, +acpi_parse_x2apic_affinity(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_srat_x2apic_cpu_affinity *processor_affinity; @@ -348,7 +349,7 @@ acpi_parse_x2apic_affinity(struct acpi_subtable_header *header, if (!processor_affinity) return -EINVAL; - acpi_table_print_srat_entry(header); + acpi_table_print_srat_entry(&header->common); /* let architecture-dependent part to do it */ acpi_numa_x2apic_affinity_init(processor_affinity); @@ -357,7 +358,7 @@ acpi_parse_x2apic_affinity(struct acpi_subtable_header *header, } static int __init -acpi_parse_processor_affinity(struct acpi_subtable_header *header, +acpi_parse_processor_affinity(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_srat_cpu_affinity *processor_affinity; @@ -366,7 +367,7 @@ acpi_parse_processor_affinity(struct acpi_subtable_header *header, if (!processor_affinity) return -EINVAL; - acpi_table_print_srat_entry(header); + acpi_table_print_srat_entry(&header->common); /* let architecture-dependent part to do it */ acpi_numa_processor_affinity_init(processor_affinity); @@ -375,7 +376,7 @@ acpi_parse_processor_affinity(struct acpi_subtable_header *header, } static int __init -acpi_parse_gicc_affinity(struct acpi_subtable_header *header, +acpi_parse_gicc_affinity(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_srat_gicc_affinity *processor_affinity; @@ -384,7 +385,7 @@ acpi_parse_gicc_affinity(struct acpi_subtable_header *header, if (!processor_affinity) return -EINVAL; - acpi_table_print_srat_entry(header); + acpi_table_print_srat_entry(&header->common); /* let architecture-dependent part to do it */ acpi_numa_gicc_affinity_init(processor_affinity); @@ -395,7 +396,7 @@ acpi_parse_gicc_affinity(struct acpi_subtable_header *header, static int __initdata parsed_numa_memblks; static int __init -acpi_parse_memory_affinity(struct acpi_subtable_header * header, +acpi_parse_memory_affinity(union acpi_subtable_headers * header, const unsigned long end) { struct acpi_srat_mem_affinity *memory_affinity; @@ -404,7 +405,7 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header, if (!memory_affinity) return -EINVAL; - acpi_table_print_srat_entry(header); + acpi_table_print_srat_entry(&header->common); /* let architecture-dependent part to do it */ if (!acpi_numa_memory_affinity_init(memory_affinity)) diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 8df9abfa947b0dca4719c674fd645d187ade242d..2261703650125985f9cd0b1900006ca0a5d883a3 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -374,19 +374,21 @@ void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size) } EXPORT_SYMBOL_GPL(acpi_os_map_memory); -static void acpi_os_drop_map_ref(struct acpi_ioremap *map) +/* Must be called with mutex_lock(&acpi_ioremap_lock) */ +static unsigned long acpi_os_drop_map_ref(struct acpi_ioremap *map) { - if (!--map->refcount) + unsigned long refcount = --map->refcount; + + if (!refcount) list_del_rcu(&map->list); + return refcount; } static void acpi_os_map_cleanup(struct acpi_ioremap *map) { - if (!map->refcount) { - synchronize_rcu_expedited(); - acpi_unmap(map->phys, map->virt); - kfree(map); - } + synchronize_rcu_expedited(); + acpi_unmap(map->phys, map->virt); + kfree(map); } /** @@ -406,6 +408,7 @@ static void acpi_os_map_cleanup(struct acpi_ioremap *map) void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size) { struct acpi_ioremap *map; + unsigned long refcount; if (!acpi_permanent_mmap) { __acpi_unmap_table(virt, size); @@ -419,10 +422,11 @@ void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size) WARN(true, PREFIX "%s: bad address %p\n", __func__, virt); return; } - acpi_os_drop_map_ref(map); + refcount = acpi_os_drop_map_ref(map); mutex_unlock(&acpi_ioremap_lock); - acpi_os_map_cleanup(map); + if (!refcount) + acpi_os_map_cleanup(map); } EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem); @@ -457,6 +461,7 @@ void acpi_os_unmap_generic_address(struct acpi_generic_address *gas) { u64 addr; struct acpi_ioremap *map; + unsigned long refcount; if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) return; @@ -472,10 +477,11 @@ void acpi_os_unmap_generic_address(struct acpi_generic_address *gas) mutex_unlock(&acpi_ioremap_lock); return; } - acpi_os_drop_map_ref(map); + refcount = acpi_os_drop_map_ref(map); mutex_unlock(&acpi_ioremap_lock); - acpi_os_map_cleanup(map); + if (!refcount) + acpi_os_map_cleanup(map); } EXPORT_SYMBOL(acpi_os_unmap_generic_address); @@ -617,15 +623,18 @@ void acpi_os_stall(u32 us) } /* - * Support ACPI 3.0 AML Timer operand - * Returns 64-bit free-running, monotonically increasing timer - * with 100ns granularity + * Support ACPI 3.0 AML Timer operand. Returns a 64-bit free-running, + * monotonically increasing timer with 100ns granularity. Do not use + * ktime_get() to implement this function because this function may get + * called after timekeeping has been suspended. Note: calling this function + * after timekeeping has been suspended may lead to unexpected results + * because when timekeeping is suspended the jiffies counter is not + * incremented. See also timekeeping_suspend(). */ u64 acpi_os_get_timer(void) { - u64 time_ns = ktime_to_ns(ktime_get()); - do_div(time_ns, 100); - return time_ns; + return (get_jiffies_64() - INITIAL_JIFFIES) * + (ACPI_100NSEC_PER_SEC / HZ); } acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width) @@ -1129,6 +1138,7 @@ void acpi_os_wait_events_complete(void) flush_workqueue(kacpid_wq); flush_workqueue(kacpi_notify_wq); } +EXPORT_SYMBOL(acpi_os_wait_events_complete); struct acpi_hp_work { struct work_struct work; diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index c576a6fe4ebb3044fc8f268fe338d3a50084cd6f..94ded9513c73b0bede043e85448251813b32806c 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c @@ -462,8 +462,10 @@ int acpi_pci_irq_enable(struct pci_dev *dev) * No IRQ known to the ACPI subsystem - maybe the BIOS / * driver reported one, then use it. Exit in any case. */ - if (!acpi_pci_irq_valid(dev, pin)) + if (!acpi_pci_irq_valid(dev, pin)) { + kfree(entry); return 0; + } if (acpi_isa_register_gsi(dev)) dev_warn(&dev->dev, "PCI INT %c: no GSI\n", diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 7433035ded95543e13f67fdc23a42667dcdd6be3..f39ab35e507dc5179b28d8ab6ddecbd3e0c15c7c 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -455,8 +455,9 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm) decode_osc_support(root, "OS supports", support); status = acpi_pci_osc_support(root, support); if (ACPI_FAILURE(status)) { - dev_info(&device->dev, "_OSC failed (%s); disabling ASPM\n", - acpi_format_exception(status)); + dev_info(&device->dev, "_OSC failed (%s)%s\n", + acpi_format_exception(status), + pcie_aspm_support_enabled() ? "; disabling ASPM" : ""); *no_aspm = 1; return; } @@ -883,6 +884,7 @@ struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root, int node = acpi_get_node(device->handle); struct pci_bus *bus; struct pci_host_bridge *host_bridge; + union acpi_object *obj; info->root = root; info->bridge = device; @@ -919,6 +921,17 @@ struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root, if (!(root->osc_control_set & OSC_PCI_EXPRESS_LTR_CONTROL)) host_bridge->native_ltr = 0; + /* + * Evaluate the "PCI Boot Configuration" _DSM Function. If it + * exists and returns 0, we must preserve any PCI resource + * assignments made by firmware for this host bridge. + */ + obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 1, + IGNORE_PCI_BOOT_CONFIG_DSM, NULL); + if (obj && obj->type == ACPI_TYPE_INTEGER && obj->integer.value == 0) + host_bridge->preserve_config = 1; + ACPI_FREE(obj); + pci_scan_child_bus(bus); pci_set_host_bridge_release(host_bridge, acpi_pci_root_release_info, info); diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c index 316e55174aa970093fb0d26083dc2b3fb0acf408..bb5391f59b8b569198d807a3efd6e1f2ee5c6636 100644 --- a/drivers/acpi/pmic/intel_pmic_xpower.c +++ b/drivers/acpi/pmic/intel_pmic_xpower.c @@ -27,8 +27,11 @@ #define GPI1_LDO_ON (3 << 0) #define GPI1_LDO_OFF (4 << 0) -#define AXP288_ADC_TS_PIN_GPADC 0xf2 -#define AXP288_ADC_TS_PIN_ON 0xf3 +#define AXP288_ADC_TS_CURRENT_ON_OFF_MASK GENMASK(1, 0) +#define AXP288_ADC_TS_CURRENT_OFF (0 << 0) +#define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING (1 << 0) +#define AXP288_ADC_TS_CURRENT_ON_ONDEMAND (2 << 0) +#define AXP288_ADC_TS_CURRENT_ON (3 << 0) static struct pmic_table power_table[] = { { @@ -211,22 +214,44 @@ static int intel_xpower_pmic_update_power(struct regmap *regmap, int reg, */ static int intel_xpower_pmic_get_raw_temp(struct regmap *regmap, int reg) { + int ret, adc_ts_pin_ctrl; u8 buf[2]; - int ret; - ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, - AXP288_ADC_TS_PIN_GPADC); + /* + * The current-source used for the battery temp-sensor (TS) is shared + * with the GPADC. For proper fuel-gauge and charger operation the TS + * current-source needs to be permanently on. But to read the GPADC we + * need to temporary switch the TS current-source to ondemand, so that + * the GPADC can use it, otherwise we will always read an all 0 value. + * + * Note that the switching from on to on-ondemand is not necessary + * when the TS current-source is off (this happens on devices which + * do not use the TS-pin). + */ + ret = regmap_read(regmap, AXP288_ADC_TS_PIN_CTRL, &adc_ts_pin_ctrl); if (ret) return ret; - /* After switching to the GPADC pin give things some time to settle */ - usleep_range(6000, 10000); + if (adc_ts_pin_ctrl & AXP288_ADC_TS_CURRENT_ON_OFF_MASK) { + ret = regmap_update_bits(regmap, AXP288_ADC_TS_PIN_CTRL, + AXP288_ADC_TS_CURRENT_ON_OFF_MASK, + AXP288_ADC_TS_CURRENT_ON_ONDEMAND); + if (ret) + return ret; + + /* Wait a bit after switching the current-source */ + usleep_range(6000, 10000); + } ret = regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, 2); if (ret == 0) ret = (buf[0] << 4) + ((buf[1] >> 4) & 0x0f); - regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON); + if (adc_ts_pin_ctrl & AXP288_ADC_TS_CURRENT_ON_OFF_MASK) { + regmap_update_bits(regmap, AXP288_ADC_TS_PIN_CTRL, + AXP288_ADC_TS_CURRENT_ON_OFF_MASK, + AXP288_ADC_TS_CURRENT_ON); + } return ret; } diff --git a/drivers/acpi/pmic/tps68470_pmic.c b/drivers/acpi/pmic/tps68470_pmic.c index a083de507009e6dcf22ab4726c555260d62319f4..564215948b5961116c2b3b0a03fad455015f10bd 100644 --- a/drivers/acpi/pmic/tps68470_pmic.c +++ b/drivers/acpi/pmic/tps68470_pmic.c @@ -376,7 +376,7 @@ static int tps68470_pmic_opregion_probe(struct platform_device *pdev) struct tps68470_pmic_opregion *opregion; acpi_status status; - if (!dev || !tps68470_regmap) { + if (!tps68470_regmap) { dev_warn(dev, "dev or regmap is NULL\n"); return -EINVAL; } diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index 1b475bc1ae169cb904f0c33cd32bd0600b520748..665e93ca0b40fb7b0d1b009b87fb1be64f45342f 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c @@ -131,6 +131,23 @@ void acpi_power_resources_list_free(struct list_head *list) } } +static bool acpi_power_resource_is_dup(union acpi_object *package, + unsigned int start, unsigned int i) +{ + acpi_handle rhandle, dup; + unsigned int j; + + /* The caller is expected to check the package element types */ + rhandle = package->package.elements[i].reference.handle; + for (j = start; j < i; j++) { + dup = package->package.elements[j].reference.handle; + if (dup == rhandle) + return true; + } + + return false; +} + int acpi_extract_power_resources(union acpi_object *package, unsigned int start, struct list_head *list) { @@ -150,6 +167,11 @@ int acpi_extract_power_resources(union acpi_object *package, unsigned int start, err = -ENODEV; break; } + + /* Some ACPI tables contain duplicate power resource references */ + if (acpi_power_resource_is_dup(package, start, i)) + continue; + err = acpi_add_power_resource(rhandle); if (err) break; diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index d1e26cb599bfca340e076500b9e27ec2f3c0bc73..879b9155b7b4a0832ddad332c5e32b7ac6ee71ab 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -277,6 +277,94 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he return NULL; } + +/* + * acpi_pptt_find_cache_backwards() - Given a PPTT cache find a processor node + * that points to it. This lets us find a cacheinfo node by fw_token, but + * is totally broken as many processor node may point at the same PPTT + * cache indicating different instances of the cache. (e.g. all the L1 + * caches are the same shape, but they aren't the same cache). + * This only works if you cooked your PPTT table to look like this. + */ +struct acpi_pptt_processor * +acpi_pptt_find_cache_backwards(struct acpi_table_header *table_hdr, + struct acpi_pptt_cache *cache) +{ + struct acpi_pptt_processor *cpu_node; + struct acpi_subtable_header *entry; + struct acpi_subtable_header *res; + unsigned long table_end; + u32 proc_sz; + int i; + + table_end = (unsigned long)table_hdr + table_hdr->length; + entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr, + sizeof(struct acpi_table_pptt)); + proc_sz = sizeof(struct acpi_pptt_processor *); + + /* find the processor structure which points at with this cpuid */ + while ((unsigned long)entry + proc_sz < table_end) { + if (entry->length == 0) { + pr_warn("Invalid zero length subtable\n"); + break; + } + + cpu_node = (struct acpi_pptt_processor *)entry; + entry = ACPI_ADD_PTR(struct acpi_subtable_header, entry, + entry->length); + + if (cpu_node->header.type != ACPI_PPTT_TYPE_PROCESSOR) + continue; + + for (i = 0; i < cpu_node->number_of_priv_resources; i++) { + res = acpi_get_pptt_resource(table_hdr, cpu_node, i); + if (&cache->header == res) + return cpu_node; + } + } + + return NULL; +} + +/** + * acpi_validate_cache_node() - Given an offset in the table, check this is + * a cache node. + * Used for cross-table pointers. + * + * Return the cache pointer for a valid cache, or NULL. + */ +struct acpi_pptt_cache * +acpi_pptt_validate_cache_node(struct acpi_table_header *table_hdr, u32 offset) +{ + struct acpi_subtable_header *entry, *cache; + unsigned long table_end; + + if ((offset < sizeof(*table_hdr)) || (offset >= table_hdr->length)) + return NULL; + + table_end = (unsigned long)table_hdr + table_hdr->length; + entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr, + sizeof(struct acpi_table_pptt)); + + cache = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr, offset); + + /* Walk every node to check offset is on a node boundary */ + while ((unsigned long)(entry + 1) < table_end) { + if (entry->length == 0) { + pr_err("Invalid zero length subtable\n"); + break; + } + if ((entry->type == ACPI_PPTT_TYPE_CACHE) && (entry == cache)) + return (struct acpi_pptt_cache *)entry; + + entry = ACPI_ADD_PTR(struct acpi_subtable_header, entry, + entry->length); + } + + return NULL; +} + + static int acpi_find_cache_levels(struct acpi_table_header *table_hdr, u32 acpi_cpu_id) { @@ -338,8 +426,60 @@ static struct acpi_pptt_cache *acpi_find_cache_node(struct acpi_table_header *ta return found; } -/* total number of attributes checked by the properties code */ -#define PPTT_CHECKED_ATTRIBUTES 4 +/** + * acpi_pptt_min_physid_from_cpu_node() - Recursivly find @min_physid for all + * leaf CPUs below @cpu_node. + * @table_hdr: Pointer to the head of the PPTT table + * @cpu_node: The point in the toplogy to start the walk + * @min_physid: The min_physid to update with leaf CPUs. + * @min_cpu_node: The min_cpu_node to update with leaf CPUs. + */ +void acpi_pptt_find_min_physid_cpu_node(struct acpi_table_header *table_hdr, + struct acpi_pptt_processor *cpu_node, + phys_cpuid_t *min_physid, + struct acpi_pptt_processor **min_cpu_node) +{ + bool leaf = true; + u32 acpi_processor_id; + phys_cpuid_t cpu_node_phys_id; + struct acpi_subtable_header *iter; + struct acpi_pptt_processor *iter_node = NULL; + u32 target_node = ACPI_PTR_DIFF(cpu_node, table_hdr); + u32 proc_sz = sizeof(struct acpi_pptt_processor *); + unsigned long table_end = (unsigned long)table_hdr + table_hdr->length; + + /* + * Walk the PPTT, looking for nodes that reference cpu_node + * as parent. + */ + iter = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr, + sizeof(struct acpi_table_pptt)); + + while ((unsigned long)iter + proc_sz < table_end) { + iter_node = (struct acpi_pptt_processor *)iter; + + if (iter->type == ACPI_PPTT_TYPE_PROCESSOR && + iter_node->parent == target_node) { + leaf = false; + acpi_pptt_find_min_physid_cpu_node(table_hdr, iter_node, + min_physid, min_cpu_node); + } + + if (iter->length == 0) + return; + iter = ACPI_ADD_PTR(struct acpi_subtable_header, iter, + iter->length); + } + + acpi_processor_id = cpu_node->acpi_processor_id; + cpu_node_phys_id = acpi_id_to_phys_cpuid(acpi_processor_id); + if (!invalid_phys_cpuid(cpu_node_phys_id) && + *min_physid > cpu_node_phys_id && + leaf == true) { + *min_physid = cpu_node_phys_id; + *min_cpu_node = cpu_node; + } +} /** * update_cache_properties() - Update cacheinfo for the given processor @@ -357,25 +497,15 @@ static void update_cache_properties(struct cacheinfo *this_leaf, struct acpi_pptt_cache *found_cache, struct acpi_pptt_processor *cpu_node) { - int valid_flags = 0; - this_leaf->fw_token = cpu_node; - if (found_cache->flags & ACPI_PPTT_SIZE_PROPERTY_VALID) { + if (found_cache->flags & ACPI_PPTT_SIZE_PROPERTY_VALID) this_leaf->size = found_cache->size; - valid_flags++; - } - if (found_cache->flags & ACPI_PPTT_LINE_SIZE_VALID) { + if (found_cache->flags & ACPI_PPTT_LINE_SIZE_VALID) this_leaf->coherency_line_size = found_cache->line_size; - valid_flags++; - } - if (found_cache->flags & ACPI_PPTT_NUMBER_OF_SETS_VALID) { + if (found_cache->flags & ACPI_PPTT_NUMBER_OF_SETS_VALID) this_leaf->number_of_sets = found_cache->number_of_sets; - valid_flags++; - } - if (found_cache->flags & ACPI_PPTT_ASSOCIATIVITY_VALID) { + if (found_cache->flags & ACPI_PPTT_ASSOCIATIVITY_VALID) this_leaf->ways_of_associativity = found_cache->associativity; - valid_flags++; - } if (found_cache->flags & ACPI_PPTT_WRITE_POLICY_VALID) { switch (found_cache->attributes & ACPI_PPTT_MASK_WRITE_POLICY) { case ACPI_PPTT_CACHE_POLICY_WT: @@ -402,11 +532,17 @@ static void update_cache_properties(struct cacheinfo *this_leaf, } } /* - * If the above flags are valid, and the cache type is NOCACHE - * update the cache type as well. + * If cache type is NOCACHE, then the cache hasn't been specified + * via other mechanisms. Update the type if a cache type has been + * provided. + * + * Note, we assume such caches are unified based on conventional system + * design and known examples. Significant work is required elsewhere to + * fully support data/instruction only type caches which are only + * specified in PPTT. */ if (this_leaf->type == CACHE_TYPE_NOCACHE && - valid_flags == PPTT_CHECKED_ATTRIBUTES) + found_cache->flags & ACPI_PPTT_CACHE_TYPE_VALID) this_leaf->type = CACHE_TYPE_UNIFIED; } @@ -436,17 +572,40 @@ static void cache_setup_acpi_cpu(struct acpi_table_header *table, } } +static bool flag_identical(struct acpi_table_header *table_hdr, + struct acpi_pptt_processor *cpu) +{ + struct acpi_pptt_processor *next; + + /* heterogeneous machines must use PPTT revision > 1 */ + if (table_hdr->revision < 2) + return false; + + /* Locate the last node in the tree with IDENTICAL set */ + if (cpu->flags & ACPI_PPTT_ACPI_IDENTICAL) { + next = fetch_pptt_node(table_hdr, cpu->parent); + if (!(next && next->flags & ACPI_PPTT_ACPI_IDENTICAL)) + return true; + } + + return false; +} + /* Passing level values greater than this will result in search termination */ #define PPTT_ABORT_PACKAGE 0xFF -static struct acpi_pptt_processor *acpi_find_processor_package_id(struct acpi_table_header *table_hdr, - struct acpi_pptt_processor *cpu, - int level, int flag) +static struct acpi_pptt_processor *acpi_find_processor_tag(struct acpi_table_header *table_hdr, + struct acpi_pptt_processor *cpu, + int level, int flag) { struct acpi_pptt_processor *prev_node; while (cpu && level) { - if (cpu->flags & flag) + /* special case the identical flag to find last identical */ + if (flag == ACPI_PPTT_ACPI_IDENTICAL) { + if (flag_identical(table_hdr, cpu)) + break; + } else if (cpu->flags & flag) break; pr_debug("level %d\n", level); prev_node = fetch_pptt_node(table_hdr, cpu->parent); @@ -479,8 +638,8 @@ static int topology_get_acpi_cpu_tag(struct acpi_table_header *table, cpu_node = acpi_find_processor_node(table, acpi_cpu_id); if (cpu_node) { - cpu_node = acpi_find_processor_package_id(table, cpu_node, - level, flag); + cpu_node = acpi_find_processor_tag(table, cpu_node, + level, flag); /* * As per specification if the processor structure represents * an actual processor, then ACPI processor ID must be valid. @@ -516,6 +675,44 @@ static int find_acpi_cpu_topology_tag(unsigned int cpu, int level, int flag) return retval; } +/** + * check_acpi_cpu_flag() - Determine if CPU node has a flag set + * @cpu: Kernel logical CPU number + * @rev: The minimum PPTT revision defining the flag + * @flag: The flag itself + * + * Check the node representing a CPU for a given flag. + * + * Return: -ENOENT if the PPTT doesn't exist, the CPU cannot be found or + * the table revision isn't new enough. + * 1, any passed flag set + * 0, flag unset + */ +static int check_acpi_cpu_flag(unsigned int cpu, int rev, u32 flag) +{ + struct acpi_table_header *table; + acpi_status status; + u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu); + struct acpi_pptt_processor *cpu_node = NULL; + int ret = -ENOENT; + + status = acpi_get_table(ACPI_SIG_PPTT, 0, &table); + if (ACPI_FAILURE(status)) { + pr_warn_once("No PPTT table found, cpu topology may be inaccurate\n"); + return ret; + } + + if (table->revision >= rev) + cpu_node = acpi_find_processor_node(table, acpi_cpu_id); + + if (cpu_node) + ret = (cpu_node->flags & flag) != 0; + + acpi_put_table(table); + + return ret; +} + /** * acpi_find_last_cache_level() - Determines the number of cache levels for a PE * @cpu: Kernel logical cpu number @@ -580,6 +777,20 @@ int cache_setup_acpi(unsigned int cpu) return status; } +/** + * acpi_pptt_cpu_is_thread() - Determine if CPU is a thread + * @cpu: Kernel logical CPU number + * + * Return: 1, a thread + * 0, not a thread + * -ENOENT ,if the PPTT doesn't exist, the CPU cannot be found or + * the table revision isn't new enough. + */ +int acpi_pptt_cpu_is_thread(unsigned int cpu) +{ + return check_acpi_cpu_flag(cpu, 2, ACPI_PPTT_ACPI_PROCESSOR_IS_THREAD); +} + /** * find_acpi_cpu_topology() - Determine a unique topology value for a given cpu * @cpu: Kernel logical cpu number @@ -659,3 +870,29 @@ int find_acpi_cpu_topology_package(unsigned int cpu) return find_acpi_cpu_topology_tag(cpu, PPTT_ABORT_PACKAGE, ACPI_PPTT_PHYSICAL_PACKAGE); } + +/** + * find_acpi_cpu_topology_hetero_id() - Get a core architecture tag + * @cpu: Kernel logical CPU number + * + * Determine a unique heterogeneous tag for the given CPU. CPUs with the same + * implementation should have matching tags. + * + * The returned tag can be used to group peers with identical implementation. + * + * The search terminates when a level is found with the identical implementation + * flag set or we reach a root node. + * + * Due to limitations in the PPTT data structure, there may be rare situations + * where two cores in a heterogeneous machine may be identical, but won't have + * the same tag. + * + * Return: -ENOENT if the PPTT doesn't exist, or the CPU cannot be found. + * Otherwise returns a value which represents a group of identical cores + * similar to this CPU. + */ +int find_acpi_cpu_topology_hetero_id(unsigned int cpu) +{ + return find_acpi_cpu_topology_tag(cpu, PPTT_ABORT_PACKAGE, + ACPI_PPTT_ACPI_IDENTICAL); +} diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 8c0a54d50d0e9b90eb7b7bf4d7e0ed2e9f97ee9b..333547bf784591ca2454c5455493f1f95eb540b8 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c @@ -9,7 +9,9 @@ * Yinghai Lu * Jiang Liu */ +#include #include +#include #include #include @@ -263,6 +265,20 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id) } EXPORT_SYMBOL_GPL(acpi_get_cpuid); +phys_cpuid_t acpi_id_to_phys_cpuid(u32 acpi_id) +{ + int cpu; + struct acpi_processor *pr; + + for_each_possible_cpu(cpu) { + pr = per_cpu(processors, cpu); + if (pr && pr->acpi_id == acpi_id) + return pr->phys_id; + } + + return PHYS_CPUID_INVALID; +} + #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC static int get_ioapic_id(struct acpi_subtable_header *entry, u32 gsi_base, u64 *phys_addr, int *ioapic_id) diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index abb559cd28d793d052b6408e606469538e80c65b..190cf402f1c88cb394f8744591be07b9906cb97c 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -205,9 +205,11 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr, static void tsc_check_state(int state) { switch (boot_cpu_data.x86_vendor) { + case X86_VENDOR_HYGON: case X86_VENDOR_AMD: case X86_VENDOR_INTEL: case X86_VENDOR_CENTAUR: + case X86_VENDOR_ZHAOXIN: /* * AMD Fam10h TSC will tick in all * C/P/S0/S1 states when this bit is set. @@ -303,164 +305,24 @@ static int acpi_processor_get_power_info_default(struct acpi_processor *pr) static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) { - acpi_status status; - u64 count; - int current_count; - int i, ret = 0; - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; - union acpi_object *cst; + int ret; if (nocst) return -ENODEV; - current_count = 0; - - status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n")); - return -ENODEV; - } - - cst = buffer.pointer; - - /* There must be at least 2 elements */ - if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) { - pr_err("not enough elements in _CST\n"); - ret = -EFAULT; - goto end; - } - - count = cst->package.elements[0].integer.value; + ret = acpi_processor_evaluate_cst(pr->handle, pr->id, &pr->power); + if (ret) + return ret; - /* Validate number of power states. */ - if (count < 1 || count != cst->package.count - 1) { - pr_err("count given by _CST is not valid\n"); - ret = -EFAULT; - goto end; - } + /* + * It is expected that there will be at least 2 states, C1 and + * something else (C2 or C3), so fail if that is not the case. + */ + if (pr->power.count < 2) + return -EFAULT; - /* Tell driver that at least _CST is supported. */ pr->flags.has_cst = 1; - - for (i = 1; i <= count; i++) { - union acpi_object *element; - union acpi_object *obj; - struct acpi_power_register *reg; - struct acpi_processor_cx cx; - - memset(&cx, 0, sizeof(cx)); - - element = &(cst->package.elements[i]); - if (element->type != ACPI_TYPE_PACKAGE) - continue; - - if (element->package.count != 4) - continue; - - obj = &(element->package.elements[0]); - - if (obj->type != ACPI_TYPE_BUFFER) - continue; - - reg = (struct acpi_power_register *)obj->buffer.pointer; - - if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO && - (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) - continue; - - /* There should be an easy way to extract an integer... */ - obj = &(element->package.elements[1]); - if (obj->type != ACPI_TYPE_INTEGER) - continue; - - cx.type = obj->integer.value; - /* - * Some buggy BIOSes won't list C1 in _CST - - * Let acpi_processor_get_power_info_default() handle them later - */ - if (i == 1 && cx.type != ACPI_STATE_C1) - current_count++; - - cx.address = reg->address; - cx.index = current_count + 1; - - cx.entry_method = ACPI_CSTATE_SYSTEMIO; - if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { - if (acpi_processor_ffh_cstate_probe - (pr->id, &cx, reg) == 0) { - cx.entry_method = ACPI_CSTATE_FFH; - } else if (cx.type == ACPI_STATE_C1) { - /* - * C1 is a special case where FIXED_HARDWARE - * can be handled in non-MWAIT way as well. - * In that case, save this _CST entry info. - * Otherwise, ignore this info and continue. - */ - cx.entry_method = ACPI_CSTATE_HALT; - snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); - } else { - continue; - } - if (cx.type == ACPI_STATE_C1 && - (boot_option_idle_override == IDLE_NOMWAIT)) { - /* - * In most cases the C1 space_id obtained from - * _CST object is FIXED_HARDWARE access mode. - * But when the option of idle=halt is added, - * the entry_method type should be changed from - * CSTATE_FFH to CSTATE_HALT. - * When the option of idle=nomwait is added, - * the C1 entry_method type should be - * CSTATE_HALT. - */ - cx.entry_method = ACPI_CSTATE_HALT; - snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); - } - } else { - snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x", - cx.address); - } - - if (cx.type == ACPI_STATE_C1) { - cx.valid = 1; - } - - obj = &(element->package.elements[2]); - if (obj->type != ACPI_TYPE_INTEGER) - continue; - - cx.latency = obj->integer.value; - - obj = &(element->package.elements[3]); - if (obj->type != ACPI_TYPE_INTEGER) - continue; - - current_count++; - memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx)); - - /* - * We support total ACPI_PROCESSOR_MAX_POWER - 1 - * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1) - */ - if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) { - pr_warn("Limiting number of power states to max (%d)\n", - ACPI_PROCESSOR_MAX_POWER); - pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n"); - break; - } - } - - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n", - current_count)); - - /* Validate number of power states discovered */ - if (current_count < 2) - ret = -EFAULT; - - end: - kfree(buffer.pointer); - - return ret; + return 0; } static void acpi_processor_power_verify_c3(struct acpi_processor *pr, @@ -544,10 +406,33 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, return; } +static void acpi_cst_latency_sort(struct acpi_processor_cx *states, size_t length) +{ + int i, j, k; + + for (i = 1; i < length; i++) { + if (!states[i].valid) + continue; + + for (j = i - 1, k = i; j >= 0; j--) { + if (!states[j].valid) + continue; + + if (states[j].latency > states[k].latency) + swap(states[j].latency, states[k].latency); + + k = j; + } + } +} + static int acpi_processor_power_verify(struct acpi_processor *pr) { unsigned int i; unsigned int working = 0; + unsigned int last_latency = 0; + unsigned int last_type = 0; + bool buggy_latency = false; pr->power.timer_broadcast_on_state = INT_MAX; @@ -571,12 +456,21 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) } if (!cx->valid) continue; + if (cx->type >= last_type && cx->latency < last_latency) + buggy_latency = true; + last_latency = cx->latency; + last_type = cx->type; lapic_timer_check_state(i, pr, cx); tsc_check_state(cx->type); working++; } + if (buggy_latency) { + pr_notice("FW issue: working around C-state latencies out of order\n"); + acpi_cst_latency_sort(&pr->power.states[1], max_cstate); + } + lapic_timer_propagate_broadcast(pr); return (working); @@ -904,7 +798,6 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr) static inline void acpi_processor_cstate_first_run_checks(void) { - acpi_status status; static int first_run; if (first_run) @@ -916,13 +809,10 @@ static inline void acpi_processor_cstate_first_run_checks(void) max_cstate); first_run++; - if (acpi_gbl_FADT.cst_control && !nocst) { - status = acpi_os_write_port(acpi_gbl_FADT.smi_command, - acpi_gbl_FADT.cst_control, 8); - if (ACPI_FAILURE(status)) - ACPI_EXCEPTION((AE_INFO, status, - "Notifying BIOS of _CST ability failed")); - } + if (nocst) + return; + + acpi_processor_claim_cst_control(); } #else @@ -1490,6 +1380,8 @@ int acpi_processor_power_exit(struct acpi_processor *pr) acpi_processor_registered--; if (acpi_processor_registered == 0) cpuidle_unregister_driver(&acpi_idle_driver); + + kfree(dev); } pr->flags.power_setup_done = 0; diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index 693cf05b0cc44ffba54427fa12abf3017830e0e1..b267d00f5c0f7c911ccb3a4699f6e273cada80b2 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -566,6 +566,7 @@ acpi_fwnode_get_named_child_node(const struct fwnode_handle *fwnode, * @index: Index of the reference to return * @num_args: Maximum number of arguments after each reference * @args: Location to store the returned reference with optional arguments + * (may be NULL) * * Find property with @name, verifify that it is a package containing at least * one object reference and if so, store the ACPI device object pointer to the @@ -618,12 +619,15 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, */ if (obj->type == ACPI_TYPE_LOCAL_REFERENCE) { if (index) - return -EINVAL; + return -ENOENT; ret = acpi_bus_get_device(obj->reference.handle, &device); if (ret) return ret == -ENODEV ? -EINVAL : ret; + if (!args) + return 0; + args->fwnode = acpi_fwnode_handle(device); args->nargs = 0; return 0; @@ -720,9 +724,6 @@ static int acpi_data_prop_read_single(const struct acpi_device_data *data, const union acpi_object *obj; int ret; - if (!val) - return -EINVAL; - if (proptype >= DEV_PROP_U8 && proptype <= DEV_PROP_U64) { ret = acpi_data_get_property(data, propname, ACPI_TYPE_INTEGER, &obj); if (ret) @@ -732,28 +733,43 @@ static int acpi_data_prop_read_single(const struct acpi_device_data *data, case DEV_PROP_U8: if (obj->integer.value > U8_MAX) return -EOVERFLOW; - *(u8 *)val = obj->integer.value; + + if (val) + *(u8 *)val = obj->integer.value; + break; case DEV_PROP_U16: if (obj->integer.value > U16_MAX) return -EOVERFLOW; - *(u16 *)val = obj->integer.value; + + if (val) + *(u16 *)val = obj->integer.value; + break; case DEV_PROP_U32: if (obj->integer.value > U32_MAX) return -EOVERFLOW; - *(u32 *)val = obj->integer.value; + + if (val) + *(u32 *)val = obj->integer.value; + break; default: - *(u64 *)val = obj->integer.value; + if (val) + *(u64 *)val = obj->integer.value; + break; } + + if (!val) + return 1; } else if (proptype == DEV_PROP_STRING) { ret = acpi_data_get_property(data, propname, ACPI_TYPE_STRING, &obj); if (ret) return ret; - *(char **)val = obj->string.pointer; + if (val) + *(char **)val = obj->string.pointer; return 1; } else { @@ -767,7 +783,7 @@ int acpi_dev_prop_read_single(struct acpi_device *adev, const char *propname, { int ret; - if (!adev) + if (!adev || !val) return -EINVAL; ret = acpi_data_prop_read_single(&adev->data, propname, proptype, val); @@ -861,10 +877,20 @@ static int acpi_data_prop_read(const struct acpi_device_data *data, const union acpi_object *items; int ret; - if (val && nval == 1) { + if (nval == 1 || !val) { ret = acpi_data_prop_read_single(data, propname, proptype, val); - if (ret >= 0) + /* + * The overflow error means that the property is there and it is + * single-value, but its type does not match, so return. + */ + if (ret >= 0 || ret == -EOVERFLOW) return ret; + + /* + * Reading this property as a single-value one failed, but its + * value may still be represented as one-element array, so + * continue. + */ } ret = acpi_data_get_property_array(data, propname, ACPI_TYPE_ANY, &obj); @@ -975,6 +1001,14 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode, const struct acpi_data_node *data = to_acpi_data_node(fwnode); struct acpi_data_node *dn; + /* + * We can have a combination of device and data nodes, e.g. with + * hierarchical _DSD properties. Make sure the adev pointer is + * restored before going through data nodes, otherwise we will + * be looking for data_nodes below the last device found instead + * of the common fwnode shared by device_nodes and data_nodes. + */ + adev = to_acpi_device_node(fwnode); if (adev) head = &adev->data.subnodes; else if (data) diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index 316a0fc785e3629a4bdd6dadd031cbe6691cec70..d3f9a320e880ea7e536ed7bfcf7b4b7401c3635c 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -549,7 +549,7 @@ static acpi_status acpi_dev_process_resource(struct acpi_resource *ares, ret = c->preproc(ares, c->preproc_data); if (ret < 0) { c->error = ret; - return AE_CTRL_TERMINATE; + return AE_ABORT_METHOD; } else if (ret > 0) { return AE_OK; } diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c index 295b59271189df46bf77296b78fdad1ec3e74fc7..96c5e27967f4f131078a8c5f3297a211f84adab9 100644 --- a/drivers/acpi/sbs.c +++ b/drivers/acpi/sbs.c @@ -441,9 +441,13 @@ static int acpi_ac_get_present(struct acpi_sbs *sbs) /* * The spec requires that bit 4 always be 1. If it's not set, assume - * that the implementation doesn't support an SBS charger + * that the implementation doesn't support an SBS charger. + * + * And on some MacBooks a status of 0xffff is always returned, no + * matter whether the charger is plugged in or not, which is also + * wrong, so ignore the SBS charger for those too. */ - if (!((status >> 4) & 0x1)) + if (!((status >> 4) & 0x1) || status == 0xffff) return -ENODEV; sbs->charger_present = (status >> 15) & 0x1; diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c index 7a3431018e0ab4ce96dc055abf31444e498b04bb..5008ead4609a46edafe73b9481b9912c71efc464 100644 --- a/drivers/acpi/sbshc.c +++ b/drivers/acpi/sbshc.c @@ -196,6 +196,7 @@ int acpi_smbus_unregister_callback(struct acpi_smb_hc *hc) hc->callback = NULL; hc->context = NULL; mutex_unlock(&hc->lock); + acpi_os_wait_events_complete(); return 0; } @@ -292,6 +293,7 @@ static int acpi_smbus_hc_remove(struct acpi_device *device) hc = acpi_driver_data(device); acpi_ec_remove_query_handler(hc->ec, hc->query_bit); + acpi_os_wait_events_complete(); kfree(hc); device->driver_data = NULL; return 0; diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index e1b6231cfa1c5b642e3b0ee78e81ff1c01278bb3..42ea4f784fa5ad3c8ed5c1b3ed7524d760103ceb 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -486,6 +486,7 @@ static void acpi_device_del(struct acpi_device *device) acpi_device_bus_id->instance_no--; else { list_del(&acpi_device_bus_id->node); + kfree_const(acpi_device_bus_id->bus_id); kfree(acpi_device_bus_id); } break; @@ -585,6 +586,8 @@ static int acpi_get_device_data(acpi_handle handle, struct acpi_device **device, if (!device) return -EINVAL; + *device = NULL; + status = acpi_get_data_full(handle, acpi_scan_drop_device, (void **)device, callback); if (ACPI_FAILURE(status) || !*device) { @@ -674,7 +677,14 @@ int acpi_device_add(struct acpi_device *device, } if (!found) { acpi_device_bus_id = new_bus_id; - strcpy(acpi_device_bus_id->bus_id, acpi_device_hid(device)); + acpi_device_bus_id->bus_id = + kstrdup_const(acpi_device_hid(device), GFP_KERNEL); + if (!acpi_device_bus_id->bus_id) { + pr_err(PREFIX "Memory allocation error for bus id\n"); + result = -ENOMEM; + goto err_free_new_bus_id; + } + acpi_device_bus_id->instance_no = 0; list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list); } @@ -709,6 +719,11 @@ int acpi_device_add(struct acpi_device *device, if (device->parent) list_del(&device->node); list_del(&device->wakeup_list); + + err_free_new_bus_id: + if (!found) + kfree(new_bus_id); + mutex_unlock(&acpi_device_lock); err_detach: @@ -1550,6 +1565,7 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device) */ static const struct acpi_device_id i2c_multi_instantiate_ids[] = { {"BSG1160", }, + {"INT33FE", }, {} }; @@ -2243,10 +2259,10 @@ static struct acpi_probe_entry *ape; static int acpi_probe_count; static DEFINE_MUTEX(acpi_probe_mutex); -static int __init acpi_match_madt(struct acpi_subtable_header *header, +static int __init acpi_match_madt(union acpi_subtable_headers *header, const unsigned long end) { - if (!ape->subtable_valid || ape->subtable_valid(header, ape)) + if (!ape->subtable_valid || ape->subtable_valid(&header->common, ape)) if (!ape->probe_subtbl(header, end)) acpi_probe_count++; diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 754d59f95500497b5dd914323f04926f253593ac..847db3edcb5b8e6f26c70379eaa131897e59961a 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -940,6 +940,8 @@ static int lps0_device_attach(struct acpi_device *adev, acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n", bitmask); + + acpi_ec_mark_gpe_for_wake(); } else { acpi_handle_debug(adev->handle, "_DSM function 0 evaluation failed\n"); @@ -968,11 +970,18 @@ static int acpi_s2idle_prepare(void) if (lps0_device_handle) { acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF); acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY); + + acpi_ec_set_gpe_wake_mask(ACPI_GPE_ENABLE); } if (acpi_sci_irq_valid()) enable_irq_wake(acpi_sci_irq); + acpi_enable_wakeup_devices(ACPI_STATE_S0); + + /* Change the configuration of GPEs to avoid spurious wakeup. */ + acpi_enable_all_wakeup_gpes(); + acpi_os_wait_events_complete(); return 0; } @@ -1017,10 +1026,16 @@ static void acpi_s2idle_sync(void) static void acpi_s2idle_restore(void) { + acpi_enable_all_runtime_gpes(); + + acpi_disable_wakeup_devices(ACPI_STATE_S0); + if (acpi_sci_irq_valid()) disable_irq_wake(acpi_sci_irq); if (lps0_device_handle) { + acpi_ec_set_gpe_wake_mask(ACPI_GPE_DISABLE); + acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT); acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON); } diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c index 9d52743080a4f65200ea6e33f0d0d2fefdb881ea..c336784d0bcbeabd86a7bd27150342058b3c2af0 100644 --- a/drivers/acpi/spcr.c +++ b/drivers/acpi/spcr.c @@ -148,6 +148,13 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console) } switch (table->baud_rate) { + case 0: + /* + * SPCR 1.04 defines 0 as a preconfigured state of UART. + * Assume firmware or bootloader configures console correctly. + */ + baud_rate = 0; + break; case 3: baud_rate = 9600; break; @@ -196,6 +203,10 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console) * UART so don't attempt to change to the baud rate state * in the table because driver cannot calculate the dividers */ + baud_rate = 0; + } + + if (!baud_rate) { snprintf(opts, sizeof(opts), "%s,%s,0x%llx", uart, iotype, table->serial_port.address); } else { diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index 41324f0b1bee26b73a55ddc027fb8f52928a31a5..f0c64b6837d3e4d240ab3f237a4fcc123362b12c 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c @@ -439,18 +439,29 @@ static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj, { struct acpi_data_attr *data_attr; void __iomem *base; - ssize_t rc; + ssize_t size; data_attr = container_of(bin_attr, struct acpi_data_attr, attr); + size = data_attr->attr.size; + + if (offset < 0) + return -EINVAL; + + if (offset >= size) + return 0; + + if (count > size - offset) + count = size - offset; - base = acpi_os_map_memory(data_attr->addr, data_attr->attr.size); + base = acpi_os_map_iomem(data_attr->addr, size); if (!base) return -ENOMEM; - rc = memory_read_from_buffer(buf, count, &offset, base, - data_attr->attr.size); - acpi_os_unmap_memory(base, data_attr->attr.size); - return rc; + memcpy_fromio(buf, base + offset, count); + + acpi_os_unmap_iomem(base, size); + + return count; } static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr) @@ -816,14 +827,14 @@ static ssize_t counter_set(struct kobject *kobj, * interface: * echo unmask > /sys/firmware/acpi/interrupts/gpe00 */ -#define ACPI_MASKABLE_GPE_MAX 0xFF +#define ACPI_MASKABLE_GPE_MAX 0x100 static DECLARE_BITMAP(acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) __initdata; static int __init acpi_gpe_set_masked_gpes(char *val) { u8 gpe; - if (kstrtou8(val, 0, &gpe) || gpe > ACPI_MASKABLE_GPE_MAX) + if (kstrtou8(val, 0, &gpe)) return -EINVAL; set_bit(gpe, acpi_masked_gpes_map); @@ -835,7 +846,7 @@ void __init acpi_gpe_apply_masked_gpes(void) { acpi_handle handle; acpi_status status; - u8 gpe; + u16 gpe; for_each_set_bit(gpe, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) { status = acpi_get_gpe_device(gpe, &handle); @@ -990,8 +1001,10 @@ void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug, error = kobject_init_and_add(&hotplug->kobj, &acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name); - if (error) + if (error) { + kobject_put(&hotplug->kobj); goto err_out; + } kobject_uevent(&hotplug->kobj, KOBJ_ADD); return; diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index a3d012b08fc5cc982e019f7049e90458012c8419..39fda61fba32d21d0d187bbbc41e074abb31ef70 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c @@ -50,6 +50,16 @@ static struct acpi_table_desc initial_tables[ACPI_MAX_TABLES] __initdata; static int acpi_apic_instance __initdata; +enum acpi_subtable_type { + ACPI_SUBTABLE_COMMON, + ACPI_SUBTABLE_HMAT, +}; + +struct acpi_subtable_entry { + union acpi_subtable_headers *hdr; + enum acpi_subtable_type type; +}; + /* * Disable table checksum verification for the early stage due to the size * limitation of the current x86 early mapping implementation. @@ -218,6 +228,50 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header) } } +static unsigned long __init +acpi_get_entry_type(struct acpi_subtable_entry *entry) +{ + switch (entry->type) { + case ACPI_SUBTABLE_COMMON: + return entry->hdr->common.type; + case ACPI_SUBTABLE_HMAT: + return entry->hdr->hmat.type; + } + return 0; +} + +static unsigned long __init +acpi_get_entry_length(struct acpi_subtable_entry *entry) +{ + switch (entry->type) { + case ACPI_SUBTABLE_COMMON: + return entry->hdr->common.length; + case ACPI_SUBTABLE_HMAT: + return entry->hdr->hmat.length; + } + return 0; +} + +static unsigned long __init +acpi_get_subtable_header_length(struct acpi_subtable_entry *entry) +{ + switch (entry->type) { + case ACPI_SUBTABLE_COMMON: + return sizeof(entry->hdr->common); + case ACPI_SUBTABLE_HMAT: + return sizeof(entry->hdr->hmat); + } + return 0; +} + +static enum acpi_subtable_type __init +acpi_get_subtable_type(char *id) +{ + if (strncmp(id, ACPI_SIG_HMAT, 4) == 0) + return ACPI_SUBTABLE_HMAT; + return ACPI_SUBTABLE_COMMON; +} + /** * acpi_parse_entries_array - for each proc_num find a suitable subtable * @@ -247,8 +301,8 @@ acpi_parse_entries_array(char *id, unsigned long table_size, struct acpi_subtable_proc *proc, int proc_num, unsigned int max_entries) { - struct acpi_subtable_header *entry; - unsigned long table_end; + struct acpi_subtable_entry entry; + unsigned long table_end, subtable_len, entry_len; int count = 0; int errs = 0; int i; @@ -271,19 +325,20 @@ acpi_parse_entries_array(char *id, unsigned long table_size, /* Parse all entries looking for a match. */ - entry = (struct acpi_subtable_header *) + entry.type = acpi_get_subtable_type(id); + entry.hdr = (union acpi_subtable_headers *) ((unsigned long)table_header + table_size); + subtable_len = acpi_get_subtable_header_length(&entry); - while (((unsigned long)entry) + sizeof(struct acpi_subtable_header) < - table_end) { + while (((unsigned long)entry.hdr) + subtable_len < table_end) { if (max_entries && count >= max_entries) break; for (i = 0; i < proc_num; i++) { - if (entry->type != proc[i].id) + if (acpi_get_entry_type(&entry) != proc[i].id) continue; if (!proc[i].handler || - (!errs && proc[i].handler(entry, table_end))) { + (!errs && proc[i].handler(entry.hdr, table_end))) { errs++; continue; } @@ -298,13 +353,14 @@ acpi_parse_entries_array(char *id, unsigned long table_size, * If entry->length is 0, break from this loop to avoid * infinite loop. */ - if (entry->length == 0) { + entry_len = acpi_get_entry_length(&entry); + if (entry_len == 0) { pr_err("[%4.4s:0x%02x] Invalid zero length\n", id, proc->id); return -EINVAL; } - entry = (struct acpi_subtable_header *) - ((unsigned long)entry + entry->length); + entry.hdr = (union acpi_subtable_headers *) + ((unsigned long)entry.hdr + entry_len); } if (max_entries && count > max_entries) { @@ -713,6 +769,11 @@ acpi_os_physical_table_override(struct acpi_table_header *existing_table, table_length); } +#ifdef CONFIG_ACPI_CUSTOM_DSDT +static void *amlcode __attribute__ ((weakref("AmlCode"))); +static void *dsdt_amlcode __attribute__ ((weakref("dsdt_aml_code"))); +#endif + acpi_status acpi_os_table_override(struct acpi_table_header *existing_table, struct acpi_table_header **new_table) @@ -723,8 +784,11 @@ acpi_os_table_override(struct acpi_table_header *existing_table, *new_table = NULL; #ifdef CONFIG_ACPI_CUSTOM_DSDT - if (strncmp(existing_table->signature, "DSDT", 4) == 0) - *new_table = (struct acpi_table_header *)AmlCode; + if (!strncmp(existing_table->signature, "DSDT", 4)) { + *new_table = (struct acpi_table_header *)&amlcode; + if (!(*new_table)) + *new_table = (struct acpi_table_header *)&dsdt_amlcode; + } #endif if (*new_table != NULL) acpi_table_taint(existing_table); @@ -732,7 +796,7 @@ acpi_os_table_override(struct acpi_table_header *existing_table, } /* - * acpi_table_init() + * acpi_locate_initial_tables() * * find RSDP, find and checksum SDT/XSDT. * checksum all tables, print SDT/XSDT @@ -740,7 +804,7 @@ acpi_os_table_override(struct acpi_table_header *existing_table, * result: sdt_entry[] is initialized */ -int __init acpi_table_init(void) +int __init acpi_locate_initial_tables(void) { acpi_status status; @@ -755,9 +819,45 @@ int __init acpi_table_init(void) status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0); if (ACPI_FAILURE(status)) return -EINVAL; - acpi_table_initrd_scan(); + return 0; +} + +void __init acpi_reserve_initial_tables(void) +{ + int i; + + for (i = 0; i < ACPI_MAX_TABLES; i++) { + struct acpi_table_desc *table_desc = &initial_tables[i]; + u64 start = table_desc->address; + u64 size = table_desc->length; + + if (!start || !size) + break; + + pr_info("Reserving %4s table memory at [mem 0x%llx-0x%llx]\n", + table_desc->signature.ascii, start, start + size - 1); + + memblock_reserve(start, size); + } +} + +void __init acpi_table_init_complete(void) +{ + acpi_table_initrd_scan(); check_multiple_madt(); +} + +int __init acpi_table_init(void) +{ + int ret; + + ret = acpi_locate_initial_tables(); + if (ret) + return ret; + + acpi_table_init_complete(); + return 0; } diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c index 78db97687f26a1512130ffadda01f3372dd4ae34..c4b06cc075f937f8a4c8b4c7b76cb3344b25d0f0 100644 --- a/drivers/acpi/utils.c +++ b/drivers/acpi/utils.c @@ -800,6 +800,7 @@ bool acpi_dev_present(const char *hid, const char *uid, s64 hrv) match.hrv = hrv; dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb); + put_device(dev); return !!dev; } EXPORT_SYMBOL(acpi_dev_present); diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c index 06c31ec3cc705d0abb4f86da90e64a3930c2154d..9a8e286dd86fde392751864044ca09be170e0ce8 100644 --- a/drivers/acpi/x86/utils.c +++ b/drivers/acpi/x86/utils.c @@ -54,7 +54,7 @@ static const struct always_present_id always_present_ids[] = { * Bay / Cherry Trail PWM directly poked by GPU driver in win10, * but Linux uses a separate PWM driver, harmless if not used. */ - ENTRY("80860F09", "1", ICPU(INTEL_FAM6_ATOM_SILVERMONT1), {}), + ENTRY("80860F09", "1", ICPU(INTEL_FAM6_ATOM_SILVERMONT), {}), ENTRY("80862288", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), {}), /* * The INT0002 device is necessary to clear wakeup interrupt sources diff --git a/drivers/android/binder.c b/drivers/android/binder.c index d58763b6b009018e9a70d59db57acb44930f6c01..266ce581e75176e471ac290a19e742f13ceb7d59 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -71,6 +71,8 @@ #include #include #include +#include +#include #include @@ -115,10 +117,6 @@ BINDER_DEBUG_ENTRY(proc); #define SZ_1K 0x400 #endif -#ifndef SZ_4M -#define SZ_4M 0x400000 -#endif - #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) enum { @@ -285,7 +283,7 @@ struct binder_device { struct binder_work { struct list_head entry; - enum { + enum binder_work_type { BINDER_WORK_TRANSACTION = 1, BINDER_WORK_TRANSACTION_COMPLETE, BINDER_WORK_RETURN_ERROR, @@ -354,6 +352,8 @@ struct binder_error { * (invariant after initialized) * @min_priority: minimum scheduling priority * (invariant after initialized) + * @txn_security_ctx: require sender's security context + * (invariant after initialized) * @async_todo: list of async work items * (protected by @proc->inner_lock) * @@ -390,6 +390,7 @@ struct binder_node { * invariant after initialization */ u8 accept_fds:1; + u8 txn_security_ctx:1; u8 min_priority; }; bool has_async_transaction; @@ -457,9 +458,8 @@ struct binder_ref { }; enum binder_deferred_state { - BINDER_DEFERRED_PUT_FILES = 0x01, - BINDER_DEFERRED_FLUSH = 0x02, - BINDER_DEFERRED_RELEASE = 0x04, + BINDER_DEFERRED_FLUSH = 0x01, + BINDER_DEFERRED_RELEASE = 0x02, }; /** @@ -480,9 +480,9 @@ enum binder_deferred_state { * (invariant after initialized) * @tsk task_struct for group_leader of process * (invariant after initialized) - * @files files_struct for process - * (protected by @files_lock) - * @files_lock mutex to protect @files + * @cred struct cred associated with the `struct file` + * in binder_open() + * (invariant after initialized) * @deferred_work_node: element for binder_deferred_list * (protected by binder_deferred_lock) * @deferred_work: bitmap of deferred work to perform @@ -527,8 +527,7 @@ struct binder_proc { struct list_head waiting_threads; int pid; struct task_struct *tsk; - struct files_struct *files; - struct mutex files_lock; + const struct cred *cred; struct hlist_node deferred_work_node; int deferred_work; bool is_dead; @@ -611,6 +610,23 @@ struct binder_thread { bool is_dead; }; +/** + * struct binder_txn_fd_fixup - transaction fd fixup list element + * @fixup_entry: list entry + * @file: struct file to be associated with new fd + * @offset: offset in buffer data to this fixup + * + * List element for fd fixups in a transaction. Since file + * descriptors need to be allocated in the context of the + * target process, we pass each fd to be processed in this + * struct. + */ +struct binder_txn_fd_fixup { + struct list_head fixup_entry; + struct file *file; + size_t offset; +}; + struct binder_transaction { int debug_id; struct binder_work work; @@ -628,6 +644,8 @@ struct binder_transaction { long priority; long saved_priority; kuid_t sender_euid; + struct list_head fd_fixups; + binder_uintptr_t security_ctx; /** * @lock: protects @from, @to_proc, and @to_thread * @@ -637,6 +655,26 @@ struct binder_transaction { spinlock_t lock; }; +/** + * struct binder_object - union of flat binder object types + * @hdr: generic object header + * @fbo: binder object (nodes and refs) + * @fdo: file descriptor object + * @bbo: binder buffer pointer + * @fdao: file descriptor array + * + * Used for type-independent object copies + */ +struct binder_object { + union { + struct binder_object_header hdr; + struct flat_binder_object fbo; + struct binder_fd_object fdo; + struct binder_buffer_object bbo; + struct binder_fd_array_object fdao; + }; +}; + /** * binder_proc_lock() - Acquire outer lock for given binder_proc * @proc: struct binder_proc to acquire @@ -822,6 +860,7 @@ static void binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread, struct binder_work *work) { + WARN_ON(!list_empty(&thread->waiting_thread_node)); binder_enqueue_work_ilocked(work, &thread->todo); } @@ -839,7 +878,18 @@ static void binder_enqueue_thread_work_ilocked(struct binder_thread *thread, struct binder_work *work) { + WARN_ON(!list_empty(&thread->waiting_thread_node)); binder_enqueue_work_ilocked(work, &thread->todo); + + /* (e)poll-based threads require an explicit wakeup signal when + * queuing their own work; they rely on these events to consume + * messages without I/O block. Without it, threads risk waiting + * indefinitely without handling the work. + */ + if (thread->looper & BINDER_LOOPER_STATE_POLL && + thread->pid == current->pid && !thread->process_todo) + wake_up_interruptible_sync(&thread->wait); + thread->process_todo = true; } @@ -893,93 +943,12 @@ static struct binder_work *binder_dequeue_work_head_ilocked( return w; } -/** - * binder_dequeue_work_head() - Dequeues the item at head of list - * @proc: binder_proc associated with list - * @list: list to dequeue head - * - * Removes the head of the list if there are items on the list - * - * Return: pointer dequeued binder_work, NULL if list was empty - */ -static struct binder_work *binder_dequeue_work_head( - struct binder_proc *proc, - struct list_head *list) -{ - struct binder_work *w; - - binder_inner_proc_lock(proc); - w = binder_dequeue_work_head_ilocked(list); - binder_inner_proc_unlock(proc); - return w; -} - static void binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); static void binder_free_thread(struct binder_thread *thread); static void binder_free_proc(struct binder_proc *proc); static void binder_inc_node_tmpref_ilocked(struct binder_node *node); -static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) -{ - unsigned long rlim_cur; - unsigned long irqs; - int ret; - - mutex_lock(&proc->files_lock); - if (proc->files == NULL) { - ret = -ESRCH; - goto err; - } - if (!lock_task_sighand(proc->tsk, &irqs)) { - ret = -EMFILE; - goto err; - } - rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE); - unlock_task_sighand(proc->tsk, &irqs); - - ret = __alloc_fd(proc->files, 0, rlim_cur, flags); -err: - mutex_unlock(&proc->files_lock); - return ret; -} - -/* - * copied from fd_install - */ -static void task_fd_install( - struct binder_proc *proc, unsigned int fd, struct file *file) -{ - mutex_lock(&proc->files_lock); - if (proc->files) - __fd_install(proc->files, fd, file); - mutex_unlock(&proc->files_lock); -} - -/* - * copied from sys_close - */ -static long task_close_fd(struct binder_proc *proc, unsigned int fd) -{ - int retval; - - mutex_lock(&proc->files_lock); - if (proc->files == NULL) { - retval = -ESRCH; - goto err; - } - retval = __close_fd(proc->files, fd); - /* can't restart close syscall because file table entry was cleared */ - if (unlikely(retval == -ERESTARTSYS || - retval == -ERESTARTNOINTR || - retval == -ERESTARTNOHAND || - retval == -ERESTART_RESTARTBLOCK)) - retval = -EINTR; -err: - mutex_unlock(&proc->files_lock); - return retval; -} - static bool binder_has_work_ilocked(struct binder_thread *thread, bool do_proc_work) { @@ -1209,6 +1178,7 @@ static struct binder_node *binder_init_node_ilocked( node->work.type = BINDER_WORK_NODE; node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK; node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); + node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX); spin_lock_init(&node->lock); INIT_LIST_HEAD(&node->work.entry); INIT_LIST_HEAD(&node->async_todo); @@ -1270,19 +1240,12 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong, } else node->local_strong_refs++; if (!node->has_strong_ref && target_list) { + struct binder_thread *thread = container_of(target_list, + struct binder_thread, todo); binder_dequeue_work_ilocked(&node->work); - /* - * Note: this function is the only place where we queue - * directly to a thread->todo without using the - * corresponding binder_enqueue_thread_work() helper - * functions; in this case it's ok to not set the - * process_todo flag, since we know this node work will - * always be followed by other work that starts queue - * processing: in case of synchronous transactions, a - * BR_REPLY or BR_ERROR; in case of oneway - * transactions, a BR_TRANSACTION_COMPLETE. - */ - binder_enqueue_work_ilocked(&node->work, target_list); + BUG_ON(&thread->todo != target_list); + binder_enqueue_deferred_thread_work_ilocked(thread, + &node->work); } } else { if (!internal) @@ -1831,6 +1794,18 @@ static int binder_inc_ref_for_node(struct binder_proc *proc, } ret = binder_inc_ref_olocked(ref, strong, target_list); *rdata = ref->data; + if (ret && ref == new_ref) { + /* + * Cleanup the failed reference here as the target + * could now be dead and have already released its + * references by now. Calling on the new reference + * with strong=0 and a tmp_refs will not decrement + * the node. The new_ref gets kfree'd below. + */ + binder_cleanup_ref_olocked(new_ref); + ref = NULL; + } + binder_proc_unlock(proc); if (new_ref && ref != new_ref) /* @@ -1958,10 +1933,42 @@ static struct binder_thread *binder_get_txn_from_and_acq_inner( return NULL; } +/** + * binder_free_txn_fixups() - free unprocessed fd fixups + * @t: binder transaction for t->from + * + * If the transaction is being torn down prior to being + * processed by the target process, free all of the + * fd fixups and fput the file structs. It is safe to + * call this function after the fixups have been + * processed -- in that case, the list will be empty. + */ +static void binder_free_txn_fixups(struct binder_transaction *t) +{ + struct binder_txn_fd_fixup *fixup, *tmp; + + list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { + fput(fixup->file); + list_del(&fixup->fixup_entry); + kfree(fixup); + } +} + static void binder_free_transaction(struct binder_transaction *t) { - if (t->buffer) - t->buffer->transaction = NULL; + struct binder_proc *target_proc = t->to_proc; + + if (target_proc) { + binder_inner_proc_lock(target_proc); + if (t->buffer) + t->buffer->transaction = NULL; + binder_inner_proc_unlock(target_proc); + } + /* + * If the transaction has no target_proc, then + * t->buffer->transaction has already been cleared. + */ + binder_free_txn_fixups(t); kfree(t); binder_stats_deleted(BINDER_STAT_TRANSACTION); } @@ -2044,26 +2051,47 @@ static void binder_cleanup_transaction(struct binder_transaction *t, } /** - * binder_validate_object() - checks for a valid metadata object in a buffer. + * binder_get_object() - gets object and checks for valid metadata + * @proc: binder_proc owning the buffer + * @u: sender's user pointer to base of buffer * @buffer: binder_buffer that we're parsing. - * @offset: offset in the buffer at which to validate an object. + * @offset: offset in the @buffer at which to validate an object. + * @object: struct binder_object to read into + * + * Copy the binder object at the given offset into @object. If @u is + * provided then the copy is from the sender's buffer. If not, then + * it is copied from the target's @buffer. * - * Return: If there's a valid metadata object at @offset in @buffer, the - * size of that object. Otherwise, it returns zero. + * Return: If there's a valid metadata object at @offset, the + * size of that object. Otherwise, it returns zero. The object + * is read into the struct binder_object pointed to by @object. */ -static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset) +static size_t binder_get_object(struct binder_proc *proc, + const void __user *u, + struct binder_buffer *buffer, + unsigned long offset, + struct binder_object *object) { - /* Check if we can read a header first */ + size_t read_size; struct binder_object_header *hdr; size_t object_size = 0; - if (buffer->data_size < sizeof(*hdr) || - offset > buffer->data_size - sizeof(*hdr) || + read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset); + if (offset > buffer->data_size || read_size < sizeof(*hdr) || !IS_ALIGNED(offset, sizeof(u32))) return 0; - /* Ok, now see if we can read a complete object. */ - hdr = (struct binder_object_header *)(buffer->data + offset); + if (u) { + if (copy_from_user(object, u + offset, read_size)) + return 0; + } else { + if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer, + offset, read_size)) + return 0; + } + + /* Ok, now see if we read a complete object. */ + hdr = &object->hdr; switch (hdr->type) { case BINDER_TYPE_BINDER: case BINDER_TYPE_WEAK_BINDER: @@ -2092,10 +2120,13 @@ static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset) /** * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer. + * @proc: binder_proc owning the buffer * @b: binder_buffer containing the object + * @object: struct binder_object to read into * @index: index in offset array at which the binder_buffer_object is * located - * @start: points to the start of the offset array + * @start_offset: points to the start of the offset array + * @object_offsetp: offset of @object read from @b * @num_valid: the number of valid offsets in the offset array * * Return: If @index is within the valid range of the offset array @@ -2106,34 +2137,48 @@ static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset) * Note that the offset found in index @index itself is not * verified; this function assumes that @num_valid elements * from @start were previously verified to have valid offsets. + * If @object_offsetp is non-NULL, then the offset within + * @b is written to it. */ -static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b, - binder_size_t index, - binder_size_t *start, - binder_size_t num_valid) +static struct binder_buffer_object *binder_validate_ptr( + struct binder_proc *proc, + struct binder_buffer *b, + struct binder_object *object, + binder_size_t index, + binder_size_t start_offset, + binder_size_t *object_offsetp, + binder_size_t num_valid) { - struct binder_buffer_object *buffer_obj; - binder_size_t *offp; + size_t object_size; + binder_size_t object_offset; + unsigned long buffer_offset; if (index >= num_valid) return NULL; - offp = start + index; - buffer_obj = (struct binder_buffer_object *)(b->data + *offp); - if (buffer_obj->hdr.type != BINDER_TYPE_PTR) + buffer_offset = start_offset + sizeof(binder_size_t) * index; + if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset, + b, buffer_offset, + sizeof(object_offset))) + return NULL; + object_size = binder_get_object(proc, NULL, b, object_offset, object); + if (!object_size || object->hdr.type != BINDER_TYPE_PTR) return NULL; + if (object_offsetp) + *object_offsetp = object_offset; - return buffer_obj; + return &object->bbo; } /** * binder_validate_fixup() - validates pointer/fd fixups happen in order. + * @proc: binder_proc owning the buffer * @b: transaction buffer - * @objects_start start of objects buffer - * @buffer: binder_buffer_object in which to fix up - * @offset: start offset in @buffer to fix up - * @last_obj: last binder_buffer_object that we fixed up in - * @last_min_offset: minimum fixup offset in @last_obj + * @objects_start_offset: offset to start of objects buffer + * @buffer_obj_offset: offset to binder_buffer_object in which to fix up + * @fixup_offset: start offset in @buffer to fix up + * @last_obj_offset: offset to last binder_buffer_object that we fixed + * @last_min_offset: minimum fixup offset in object at @last_obj_offset * * Return: %true if a fixup in buffer @buffer at offset @offset is * allowed. @@ -2164,63 +2209,146 @@ static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b, * C (parent = A, offset = 16) * D (parent = B, offset = 0) // B is not A or any of A's parents */ -static bool binder_validate_fixup(struct binder_buffer *b, - binder_size_t *objects_start, - struct binder_buffer_object *buffer, +static bool binder_validate_fixup(struct binder_proc *proc, + struct binder_buffer *b, + binder_size_t objects_start_offset, + binder_size_t buffer_obj_offset, binder_size_t fixup_offset, - struct binder_buffer_object *last_obj, + binder_size_t last_obj_offset, binder_size_t last_min_offset) { - if (!last_obj) { + if (!last_obj_offset) { /* Nothing to fix up in */ return false; } - while (last_obj != buffer) { + while (last_obj_offset != buffer_obj_offset) { + unsigned long buffer_offset; + struct binder_object last_object; + struct binder_buffer_object *last_bbo; + size_t object_size = binder_get_object(proc, NULL, b, + last_obj_offset, + &last_object); + if (object_size != sizeof(*last_bbo)) + return false; + + last_bbo = &last_object.bbo; /* * Safe to retrieve the parent of last_obj, since it * was already previously verified by the driver. */ - if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0) + if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0) + return false; + last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t); + buffer_offset = objects_start_offset + + sizeof(binder_size_t) * last_bbo->parent; + if (binder_alloc_copy_from_buffer(&proc->alloc, + &last_obj_offset, + b, buffer_offset, + sizeof(last_obj_offset))) return false; - last_min_offset = last_obj->parent_offset + sizeof(uintptr_t); - last_obj = (struct binder_buffer_object *) - (b->data + *(objects_start + last_obj->parent)); } return (fixup_offset >= last_min_offset); } +/** + * struct binder_task_work_cb - for deferred close + * + * @twork: callback_head for task work + * @fd: fd to close + * + * Structure to pass task work to be handled after + * returning from binder_ioctl() via task_work_add(). + */ +struct binder_task_work_cb { + struct callback_head twork; + struct file *file; +}; + +/** + * binder_do_fd_close() - close list of file descriptors + * @twork: callback head for task work + * + * It is not safe to call ksys_close() during the binder_ioctl() + * function if there is a chance that binder's own file descriptor + * might be closed. This is to meet the requirements for using + * fdget() (see comments for __fget_light()). Therefore use + * task_work_add() to schedule the close operation once we have + * returned from binder_ioctl(). This function is a callback + * for that mechanism and does the actual ksys_close() on the + * given file descriptor. + */ +static void binder_do_fd_close(struct callback_head *twork) +{ + struct binder_task_work_cb *twcb = container_of(twork, + struct binder_task_work_cb, twork); + + fput(twcb->file); + kfree(twcb); +} + +/** + * binder_deferred_fd_close() - schedule a close for the given file-descriptor + * @fd: file-descriptor to close + * + * See comments in binder_do_fd_close(). This function is used to schedule + * a file-descriptor to be closed after returning from binder_ioctl(). + */ +static void binder_deferred_fd_close(int fd) +{ + struct binder_task_work_cb *twcb; + + twcb = kzalloc(sizeof(*twcb), GFP_KERNEL); + if (!twcb) + return; + init_task_work(&twcb->twork, binder_do_fd_close); + close_fd_get_file(fd, &twcb->file); + if (twcb->file) { + filp_close(twcb->file, current->files); + task_work_add(current, &twcb->twork, true); + } else { + kfree(twcb); + } +} + static void binder_transaction_buffer_release(struct binder_proc *proc, + struct binder_thread *thread, struct binder_buffer *buffer, - binder_size_t *failed_at) + binder_size_t off_end_offset, + bool is_failure) { - binder_size_t *offp, *off_start, *off_end; int debug_id = buffer->debug_id; + binder_size_t off_start_offset, buffer_offset; binder_debug(BINDER_DEBUG_TRANSACTION, - "%d buffer release %d, size %zd-%zd, failed at %pK\n", + "%d buffer release %d, size %zd-%zd, failed at %llx\n", proc->pid, buffer->debug_id, - buffer->data_size, buffer->offsets_size, failed_at); + buffer->data_size, buffer->offsets_size, + (unsigned long long)off_end_offset); if (buffer->target_node) binder_dec_node(buffer->target_node, 1, 0); - off_start = (binder_size_t *)(buffer->data + - ALIGN(buffer->data_size, sizeof(void *))); - if (failed_at) - off_end = failed_at; - else - off_end = (void *)off_start + buffer->offsets_size; - for (offp = off_start; offp < off_end; offp++) { - struct binder_object_header *hdr; - size_t object_size = binder_validate_object(buffer, *offp); + off_start_offset = ALIGN(buffer->data_size, sizeof(void *)); + for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; + buffer_offset += sizeof(binder_size_t)) { + struct binder_object_header *hdr; + size_t object_size = 0; + struct binder_object object; + binder_size_t object_offset; + + if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset, + buffer, buffer_offset, + sizeof(object_offset))) + object_size = binder_get_object(proc, NULL, buffer, + object_offset, &object); if (object_size == 0) { pr_err("transaction release %d bad object at offset %lld, size %zd\n", - debug_id, (u64)*offp, buffer->data_size); + debug_id, (u64)object_offset, buffer->data_size); continue; } - hdr = (struct binder_object_header *)(buffer->data + *offp); + hdr = &object.hdr; switch (hdr->type) { case BINDER_TYPE_BINDER: case BINDER_TYPE_WEAK_BINDER: { @@ -2262,12 +2390,15 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, } break; case BINDER_TYPE_FD: { - struct binder_fd_object *fp = to_binder_fd_object(hdr); - - binder_debug(BINDER_DEBUG_TRANSACTION, - " fd %d\n", fp->fd); - if (failed_at) - task_close_fd(proc, fp->fd); + /* + * No need to close the file here since user-space + * closes it for for successfully delivered + * transactions. For transactions that weren't + * delivered, the new fd was never allocated so + * there is no need to close and the fput on the + * file is done when the transaction is torn + * down. + */ } break; case BINDER_TYPE_PTR: /* @@ -2278,28 +2409,33 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, case BINDER_TYPE_FDA: { struct binder_fd_array_object *fda; struct binder_buffer_object *parent; - uintptr_t parent_buffer; - u32 *fd_array; + struct binder_object ptr_object; + binder_size_t fda_offset; size_t fd_index; binder_size_t fd_buf_size; + binder_size_t num_valid; + if (is_failure) { + /* + * The fd fixups have not been applied so no + * fds need to be closed. + */ + continue; + } + + num_valid = (buffer_offset - off_start_offset) / + sizeof(binder_size_t); fda = to_binder_fd_array_object(hdr); - parent = binder_validate_ptr(buffer, fda->parent, - off_start, - offp - off_start); + parent = binder_validate_ptr(proc, buffer, &ptr_object, + fda->parent, + off_start_offset, + NULL, + num_valid); if (!parent) { pr_err("transaction release %d bad parent offset\n", debug_id); continue; } - /* - * Since the parent was already fixed up, convert it - * back to kernel address space to access it - */ - parent_buffer = parent->buffer - - binder_alloc_get_user_buffer_offset( - &proc->alloc); - fd_buf_size = sizeof(u32) * fda->num_fds; if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { pr_err("transaction release %d invalid number of fds (%lld)\n", @@ -2313,9 +2449,38 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, debug_id, (u64)fda->num_fds); continue; } - fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset); - for (fd_index = 0; fd_index < fda->num_fds; fd_index++) - task_close_fd(proc, fd_array[fd_index]); + /* + * the source data for binder_buffer_object is visible + * to user-space and the @buffer element is the user + * pointer to the buffer_object containing the fd_array. + * Convert the address to an offset relative to + * the base of the transaction buffer. + */ + fda_offset = + (parent->buffer - (uintptr_t)buffer->user_data) + + fda->parent_offset; + for (fd_index = 0; fd_index < fda->num_fds; + fd_index++) { + u32 fd; + int err; + binder_size_t offset = fda_offset + + fd_index * sizeof(fd); + + err = binder_alloc_copy_from_buffer( + &proc->alloc, &fd, buffer, + offset, sizeof(fd)); + WARN_ON(err); + if (!err) { + binder_deferred_fd_close(fd); + /* + * Need to make sure the thread goes + * back to userspace to complete the + * deferred close + */ + if (thread) + thread->looper_need_return = true; + } + } } break; default: pr_err("transaction release %d bad object type %x\n", @@ -2325,6 +2490,21 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, } } +/* Clean up all the objects in the buffer */ +static inline void binder_release_entire_buffer(struct binder_proc *proc, + struct binder_thread *thread, + struct binder_buffer *buffer, + bool is_failure) +{ + binder_size_t off_end_offset; + + off_end_offset = ALIGN(buffer->data_size, sizeof(void *)); + off_end_offset += buffer->offsets_size; + + binder_transaction_buffer_release(proc, thread, buffer, + off_end_offset, is_failure); +} + static int binder_translate_binder(struct flat_binder_object *fp, struct binder_transaction *t, struct binder_thread *thread) @@ -2349,7 +2529,7 @@ static int binder_translate_binder(struct flat_binder_object *fp, ret = -EINVAL; goto done; } - if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { + if (security_binder_transfer_binder(proc->cred, target_proc->cred)) { ret = -EPERM; goto done; } @@ -2395,7 +2575,7 @@ static int binder_translate_handle(struct flat_binder_object *fp, proc->pid, thread->pid, fp->handle); return -EINVAL; } - if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { + if (security_binder_transfer_binder(proc->cred, target_proc->cred)) { ret = -EPERM; goto done; } @@ -2447,16 +2627,16 @@ static int binder_translate_handle(struct flat_binder_object *fp, return ret; } -static int binder_translate_fd(int fd, +static int binder_translate_fd(u32 fd, binder_size_t fd_offset, struct binder_transaction *t, struct binder_thread *thread, struct binder_transaction *in_reply_to) { struct binder_proc *proc = thread->proc; struct binder_proc *target_proc = t->to_proc; - int target_fd; + struct binder_txn_fd_fixup *fixup; struct file *file; - int ret; + int ret = 0; bool target_allows_fd; if (in_reply_to) @@ -2479,25 +2659,30 @@ static int binder_translate_fd(int fd, ret = -EBADF; goto err_fget; } - ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file); + ret = security_binder_transfer_file(proc->cred, target_proc->cred, file); if (ret < 0) { ret = -EPERM; goto err_security; } - target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); - if (target_fd < 0) { + /* + * Add fixup record for this transaction. The allocation + * of the fd in the target needs to be done from a + * target thread. + */ + fixup = kzalloc(sizeof(*fixup), GFP_KERNEL); + if (!fixup) { ret = -ENOMEM; - goto err_get_unused_fd; + goto err_alloc; } - task_fd_install(target_proc, target_fd, file); - trace_binder_transaction_fd(t, fd, target_fd); - binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n", - fd, target_fd); + fixup->file = file; + fixup->offset = fd_offset; + trace_binder_transaction_fd_send(t, fd, fixup->offset); + list_add_tail(&fixup->fixup_entry, &t->fd_fixups); - return target_fd; + return ret; -err_get_unused_fd: +err_alloc: err_security: fput(file); err_fget: @@ -2505,18 +2690,266 @@ static int binder_translate_fd(int fd, return ret; } -static int binder_translate_fd_array(struct binder_fd_array_object *fda, +/** + * struct binder_ptr_fixup - data to be fixed-up in target buffer + * @offset offset in target buffer to fixup + * @skip_size bytes to skip in copy (fixup will be written later) + * @fixup_data data to write at fixup offset + * @node list node + * + * This is used for the pointer fixup list (pf) which is created and consumed + * during binder_transaction() and is only accessed locally. No + * locking is necessary. + * + * The list is ordered by @offset. + */ +struct binder_ptr_fixup { + binder_size_t offset; + size_t skip_size; + binder_uintptr_t fixup_data; + struct list_head node; +}; + +/** + * struct binder_sg_copy - scatter-gather data to be copied + * @offset offset in target buffer + * @sender_uaddr user address in source buffer + * @length bytes to copy + * @node list node + * + * This is used for the sg copy list (sgc) which is created and consumed + * during binder_transaction() and is only accessed locally. No + * locking is necessary. + * + * The list is ordered by @offset. + */ +struct binder_sg_copy { + binder_size_t offset; + const void __user *sender_uaddr; + size_t length; + struct list_head node; +}; + +/** + * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data + * @alloc: binder_alloc associated with @buffer + * @buffer: binder buffer in target process + * @sgc_head: list_head of scatter-gather copy list + * @pf_head: list_head of pointer fixup list + * + * Processes all elements of @sgc_head, applying fixups from @pf_head + * and copying the scatter-gather data from the source process' user + * buffer to the target's buffer. It is expected that the list creation + * and processing all occurs during binder_transaction() so these lists + * are only accessed in local context. + * + * Return: 0=success, else -errno + */ +static int binder_do_deferred_txn_copies(struct binder_alloc *alloc, + struct binder_buffer *buffer, + struct list_head *sgc_head, + struct list_head *pf_head) +{ + int ret = 0; + struct binder_sg_copy *sgc, *tmpsgc; + struct binder_ptr_fixup *tmppf; + struct binder_ptr_fixup *pf = + list_first_entry_or_null(pf_head, struct binder_ptr_fixup, + node); + + list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) { + size_t bytes_copied = 0; + + while (bytes_copied < sgc->length) { + size_t copy_size; + size_t bytes_left = sgc->length - bytes_copied; + size_t offset = sgc->offset + bytes_copied; + + /* + * We copy up to the fixup (pointed to by pf) + */ + copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset) + : bytes_left; + if (!ret && copy_size) + ret = binder_alloc_copy_user_to_buffer( + alloc, buffer, + offset, + sgc->sender_uaddr + bytes_copied, + copy_size); + bytes_copied += copy_size; + if (copy_size != bytes_left) { + BUG_ON(!pf); + /* we stopped at a fixup offset */ + if (pf->skip_size) { + /* + * we are just skipping. This is for + * BINDER_TYPE_FDA where the translated + * fds will be fixed up when we get + * to target context. + */ + bytes_copied += pf->skip_size; + } else { + /* apply the fixup indicated by pf */ + if (!ret) + ret = binder_alloc_copy_to_buffer( + alloc, buffer, + pf->offset, + &pf->fixup_data, + sizeof(pf->fixup_data)); + bytes_copied += sizeof(pf->fixup_data); + } + list_del(&pf->node); + kfree(pf); + pf = list_first_entry_or_null(pf_head, + struct binder_ptr_fixup, node); + } + } + list_del(&sgc->node); + kfree(sgc); + } + list_for_each_entry_safe(pf, tmppf, pf_head, node) { + BUG_ON(pf->skip_size == 0); + list_del(&pf->node); + kfree(pf); + } + BUG_ON(!list_empty(sgc_head)); + + return ret > 0 ? -EINVAL : ret; +} + +/** + * binder_cleanup_deferred_txn_lists() - free specified lists + * @sgc_head: list_head of scatter-gather copy list + * @pf_head: list_head of pointer fixup list + * + * Called to clean up @sgc_head and @pf_head if there is an + * error. + */ +static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head, + struct list_head *pf_head) +{ + struct binder_sg_copy *sgc, *tmpsgc; + struct binder_ptr_fixup *pf, *tmppf; + + list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) { + list_del(&sgc->node); + kfree(sgc); + } + list_for_each_entry_safe(pf, tmppf, pf_head, node) { + list_del(&pf->node); + kfree(pf); + } +} + +/** + * binder_defer_copy() - queue a scatter-gather buffer for copy + * @sgc_head: list_head of scatter-gather copy list + * @offset: binder buffer offset in target process + * @sender_uaddr: user address in source process + * @length: bytes to copy + * + * Specify a scatter-gather block to be copied. The actual copy must + * be deferred until all the needed fixups are identified and queued. + * Then the copy and fixups are done together so un-translated values + * from the source are never visible in the target buffer. + * + * We are guaranteed that repeated calls to this function will have + * monotonically increasing @offset values so the list will naturally + * be ordered. + * + * Return: 0=success, else -errno + */ +static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset, + const void __user *sender_uaddr, size_t length) +{ + struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL); + + if (!bc) + return -ENOMEM; + + bc->offset = offset; + bc->sender_uaddr = sender_uaddr; + bc->length = length; + INIT_LIST_HEAD(&bc->node); + + /* + * We are guaranteed that the deferred copies are in-order + * so just add to the tail. + */ + list_add_tail(&bc->node, sgc_head); + + return 0; +} + +/** + * binder_add_fixup() - queue a fixup to be applied to sg copy + * @pf_head: list_head of binder ptr fixup list + * @offset: binder buffer offset in target process + * @fixup: bytes to be copied for fixup + * @skip_size: bytes to skip when copying (fixup will be applied later) + * + * Add the specified fixup to a list ordered by @offset. When copying + * the scatter-gather buffers, the fixup will be copied instead of + * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup + * will be applied later (in target process context), so we just skip + * the bytes specified by @skip_size. If @skip_size is 0, we copy the + * value in @fixup. + * + * This function is called *mostly* in @offset order, but there are + * exceptions. Since out-of-order inserts are relatively uncommon, + * we insert the new element by searching backward from the tail of + * the list. + * + * Return: 0=success, else -errno + */ +static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset, + binder_uintptr_t fixup, size_t skip_size) +{ + struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL); + struct binder_ptr_fixup *tmppf; + + if (!pf) + return -ENOMEM; + + pf->offset = offset; + pf->fixup_data = fixup; + pf->skip_size = skip_size; + INIT_LIST_HEAD(&pf->node); + + /* Fixups are *mostly* added in-order, but there are some + * exceptions. Look backwards through list for insertion point. + */ + list_for_each_entry_reverse(tmppf, pf_head, node) { + if (tmppf->offset < pf->offset) { + list_add(&pf->node, &tmppf->node); + return 0; + } + } + /* + * if we get here, then the new offset is the lowest so + * insert at the head + */ + list_add(&pf->node, pf_head); + return 0; +} + +static int binder_translate_fd_array(struct list_head *pf_head, + struct binder_fd_array_object *fda, + const void __user *sender_ubuffer, struct binder_buffer_object *parent, + struct binder_buffer_object *sender_uparent, struct binder_transaction *t, struct binder_thread *thread, struct binder_transaction *in_reply_to) { - binder_size_t fdi, fd_buf_size, num_installed_fds; - int target_fd; - uintptr_t parent_buffer; - u32 *fd_array; + binder_size_t fdi, fd_buf_size; + binder_size_t fda_offset; + const void __user *sender_ufda_base; struct binder_proc *proc = thread->proc; - struct binder_proc *target_proc = t->to_proc; + int ret; + + if (fda->num_fds == 0) + return 0; fd_buf_size = sizeof(u32) * fda->num_fds; if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { @@ -2532,64 +2965,74 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda, return -EINVAL; } /* - * Since the parent was already fixed up, convert it - * back to the kernel address space to access it + * the source data for binder_buffer_object is visible + * to user-space and the @buffer element is the user + * pointer to the buffer_object containing the fd_array. + * Convert the address to an offset relative to + * the base of the transaction buffer. */ - parent_buffer = parent->buffer - - binder_alloc_get_user_buffer_offset(&target_proc->alloc); - fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset); - if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) { + fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) + + fda->parent_offset; + sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer + + fda->parent_offset; + + if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) || + !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) { binder_user_error("%d:%d parent offset not aligned correctly.\n", proc->pid, thread->pid); return -EINVAL; } + ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32)); + if (ret) + return ret; + for (fdi = 0; fdi < fda->num_fds; fdi++) { - target_fd = binder_translate_fd(fd_array[fdi], t, thread, - in_reply_to); - if (target_fd < 0) - goto err_translate_fd_failed; - fd_array[fdi] = target_fd; + u32 fd; + binder_size_t offset = fda_offset + fdi * sizeof(fd); + binder_size_t sender_uoffset = fdi * sizeof(fd); + + ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd)); + if (!ret) + ret = binder_translate_fd(fd, offset, t, thread, + in_reply_to); + if (ret) + return ret > 0 ? -EINVAL : ret; } return 0; - -err_translate_fd_failed: - /* - * Failed to allocate fd or security error, free fds - * installed so far. - */ - num_installed_fds = fdi; - for (fdi = 0; fdi < num_installed_fds; fdi++) - task_close_fd(target_proc, fd_array[fdi]); - return target_fd; } -static int binder_fixup_parent(struct binder_transaction *t, +static int binder_fixup_parent(struct list_head *pf_head, + struct binder_transaction *t, struct binder_thread *thread, struct binder_buffer_object *bp, - binder_size_t *off_start, + binder_size_t off_start_offset, binder_size_t num_valid, - struct binder_buffer_object *last_fixup_obj, + binder_size_t last_fixup_obj_off, binder_size_t last_fixup_min_off) { struct binder_buffer_object *parent; - u8 *parent_buffer; struct binder_buffer *b = t->buffer; struct binder_proc *proc = thread->proc; struct binder_proc *target_proc = t->to_proc; + struct binder_object object; + binder_size_t buffer_offset; + binder_size_t parent_offset; if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT)) return 0; - parent = binder_validate_ptr(b, bp->parent, off_start, num_valid); + parent = binder_validate_ptr(target_proc, b, &object, bp->parent, + off_start_offset, &parent_offset, + num_valid); if (!parent) { binder_user_error("%d:%d got transaction with invalid parent offset or type\n", proc->pid, thread->pid); return -EINVAL; } - if (!binder_validate_fixup(b, off_start, - parent, bp->parent_offset, - last_fixup_obj, + if (!binder_validate_fixup(target_proc, b, off_start_offset, + parent_offset, bp->parent_offset, + last_fixup_obj_off, last_fixup_min_off)) { binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", proc->pid, thread->pid); @@ -2603,12 +3046,9 @@ static int binder_fixup_parent(struct binder_transaction *t, proc->pid, thread->pid); return -EINVAL; } - parent_buffer = (u8 *)((uintptr_t)parent->buffer - - binder_alloc_get_user_buffer_offset( - &target_proc->alloc)); - *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer; - - return 0; + buffer_offset = bp->parent_offset + + (uintptr_t)parent->buffer - (uintptr_t)b->user_data; + return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0); } /** @@ -2723,10 +3163,13 @@ static void binder_transaction(struct binder_proc *proc, { int ret; struct binder_transaction *t; + struct binder_work *w; struct binder_work *tcomplete; - binder_size_t *offp, *off_end, *off_start; + binder_size_t buffer_offset = 0; + binder_size_t off_start_offset, off_end_offset; binder_size_t off_min; - u8 *sg_bufp, *sg_buf_end; + binder_size_t sg_buf_offset, sg_buf_end_offset; + binder_size_t user_offset = 0; struct binder_proc *target_proc = NULL; struct binder_thread *target_thread = NULL; struct binder_node *target_node = NULL; @@ -2735,10 +3178,18 @@ static void binder_transaction(struct binder_proc *proc, uint32_t return_error = 0; uint32_t return_error_param = 0; uint32_t return_error_line = 0; - struct binder_buffer_object *last_fixup_obj = NULL; + binder_size_t last_fixup_obj_off = 0; binder_size_t last_fixup_min_off = 0; struct binder_context *context = proc->context; int t_debug_id = atomic_inc_return(&binder_last_id); + char *secctx = NULL; + u32 secctx_sz = 0; + struct list_head sgc_head; + struct list_head pf_head; + const void __user *user_buffer = (const void __user *) + (uintptr_t)tr->data.ptr.buffer; + INIT_LIST_HEAD(&sgc_head); + INIT_LIST_HEAD(&pf_head); e = binder_transaction_log_add(&binder_transaction_log); e->debug_id = t_debug_id; @@ -2838,7 +3289,7 @@ static void binder_transaction(struct binder_proc *proc, else return_error = BR_DEAD_REPLY; mutex_unlock(&context->context_mgr_node_lock); - if (target_node && target_proc == proc) { + if (target_node && target_proc->pid == proc->pid) { binder_user_error("%d:%d got transaction to context manager from process owning it\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; @@ -2856,14 +3307,37 @@ static void binder_transaction(struct binder_proc *proc, goto err_dead_binder; } e->to_node = target_node->debug_id; - if (security_binder_transaction(proc->tsk, - target_proc->tsk) < 0) { + if (security_binder_transaction(proc->cred, + target_proc->cred) < 0) { return_error = BR_FAILED_REPLY; return_error_param = -EPERM; return_error_line = __LINE__; goto err_invalid_target_handle; } binder_inner_proc_lock(proc); + + w = list_first_entry_or_null(&thread->todo, + struct binder_work, entry); + if (!(tr->flags & TF_ONE_WAY) && w && + w->type == BINDER_WORK_TRANSACTION) { + /* + * Do not allow new outgoing transaction from a + * thread that has a transaction at the head of + * its todo list. Only need to check the head + * because binder_select_thread_ilocked picks a + * thread from proc->waiting_threads to enqueue + * the transaction, and nothing is queued to the + * todo list while the thread is on waiting_threads. + */ + binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n", + proc->pid, thread->pid); + binder_inner_proc_unlock(proc); + return_error = BR_FAILED_REPLY; + return_error_param = -EPROTO; + return_error_line = __LINE__; + goto err_bad_todo_list; + } + if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { struct binder_transaction *tmp; @@ -2911,6 +3385,7 @@ static void binder_transaction(struct binder_proc *proc, return_error_line = __LINE__; goto err_alloc_t_failed; } + INIT_LIST_HEAD(&t->fd_fixups); binder_stats_created(BINDER_STAT_TRANSACTION); spin_lock_init(&t->lock); @@ -2955,6 +3430,29 @@ static void binder_transaction(struct binder_proc *proc, t->flags = tr->flags; t->priority = task_nice(current); + if (target_node && target_node->txn_security_ctx) { + u32 secid; + size_t added_size; + + security_cred_getsecid(proc->cred, &secid); + ret = security_secid_to_secctx(secid, &secctx, &secctx_sz); + if (ret) { + return_error = BR_FAILED_REPLY; + return_error_param = ret; + return_error_line = __LINE__; + goto err_get_secctx_failed; + } + added_size = ALIGN(secctx_sz, sizeof(u64)); + extra_buffers_size += added_size; + if (extra_buffers_size < added_size) { + /* integer overflow of extra_buffers_size */ + return_error = BR_FAILED_REPLY; + return_error_param = EINVAL; + return_error_line = __LINE__; + goto err_bad_extra_size; + } + } + trace_binder_transaction(reply, t, target_node); t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, @@ -2971,26 +3469,36 @@ static void binder_transaction(struct binder_proc *proc, t->buffer = NULL; goto err_binder_alloc_buf_failed; } - t->buffer->allow_user_free = 0; + if (secctx) { + int err; + size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) + + ALIGN(tr->offsets_size, sizeof(void *)) + + ALIGN(extra_buffers_size, sizeof(void *)) - + ALIGN(secctx_sz, sizeof(u64)); + + t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset; + err = binder_alloc_copy_to_buffer(&target_proc->alloc, + t->buffer, buf_offset, + secctx, secctx_sz); + if (err) { + t->security_ctx = 0; + WARN_ON(1); + } + security_release_secctx(secctx, secctx_sz); + secctx = NULL; + } t->buffer->debug_id = t->debug_id; t->buffer->transaction = t; t->buffer->target_node = target_node; trace_binder_transaction_alloc_buf(t->buffer); - off_start = (binder_size_t *)(t->buffer->data + - ALIGN(tr->data_size, sizeof(void *))); - offp = off_start; - if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t) - tr->data.ptr.buffer, tr->data_size)) { - binder_user_error("%d:%d got transaction with invalid data ptr\n", - proc->pid, thread->pid); - return_error = BR_FAILED_REPLY; - return_error_param = -EFAULT; - return_error_line = __LINE__; - goto err_copy_data_failed; - } - if (copy_from_user(offp, (const void __user *)(uintptr_t) - tr->data.ptr.offsets, tr->offsets_size)) { + if (binder_alloc_copy_user_to_buffer( + &target_proc->alloc, + t->buffer, + ALIGN(tr->data_size, sizeof(void *)), + (const void __user *) + (uintptr_t)tr->data.ptr.offsets, + tr->offsets_size)) { binder_user_error("%d:%d got transaction with invalid offsets ptr\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; @@ -3015,17 +3523,57 @@ static void binder_transaction(struct binder_proc *proc, return_error_line = __LINE__; goto err_bad_offset; } - off_end = (void *)off_start + tr->offsets_size; - sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *))); - sg_buf_end = sg_bufp + extra_buffers_size; + off_start_offset = ALIGN(tr->data_size, sizeof(void *)); + buffer_offset = off_start_offset; + off_end_offset = off_start_offset + tr->offsets_size; + sg_buf_offset = ALIGN(off_end_offset, sizeof(void *)); + sg_buf_end_offset = sg_buf_offset + extra_buffers_size - + ALIGN(secctx_sz, sizeof(u64)); off_min = 0; - for (; offp < off_end; offp++) { + for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; + buffer_offset += sizeof(binder_size_t)) { struct binder_object_header *hdr; - size_t object_size = binder_validate_object(t->buffer, *offp); + size_t object_size; + struct binder_object object; + binder_size_t object_offset; + binder_size_t copy_size; + + if (binder_alloc_copy_from_buffer(&target_proc->alloc, + &object_offset, + t->buffer, + buffer_offset, + sizeof(object_offset))) { + return_error = BR_FAILED_REPLY; + return_error_param = -EINVAL; + return_error_line = __LINE__; + goto err_bad_offset; + } - if (object_size == 0 || *offp < off_min) { + /* + * Copy the source user buffer up to the next object + * that will be processed. + */ + copy_size = object_offset - user_offset; + if (copy_size && (user_offset > object_offset || + object_offset > tr->data_size || + binder_alloc_copy_user_to_buffer( + &target_proc->alloc, + t->buffer, user_offset, + user_buffer + user_offset, + copy_size))) { + binder_user_error("%d:%d got transaction with invalid data ptr\n", + proc->pid, thread->pid); + return_error = BR_FAILED_REPLY; + return_error_param = -EFAULT; + return_error_line = __LINE__; + goto err_copy_data_failed; + } + object_size = binder_get_object(target_proc, user_buffer, + t->buffer, object_offset, &object); + if (object_size == 0 || object_offset < off_min) { binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n", - proc->pid, thread->pid, (u64)*offp, + proc->pid, thread->pid, + (u64)object_offset, (u64)off_min, (u64)t->buffer->data_size); return_error = BR_FAILED_REPLY; @@ -3033,9 +3581,14 @@ static void binder_transaction(struct binder_proc *proc, return_error_line = __LINE__; goto err_bad_offset; } + /* + * Set offset to the next buffer fragment to be + * copied + */ + user_offset = object_offset + object_size; - hdr = (struct binder_object_header *)(t->buffer->data + *offp); - off_min = *offp + object_size; + hdr = &object.hdr; + off_min = object_offset + object_size; switch (hdr->type) { case BINDER_TYPE_BINDER: case BINDER_TYPE_WEAK_BINDER: { @@ -3043,7 +3596,12 @@ static void binder_transaction(struct binder_proc *proc, fp = to_flat_binder_object(hdr); ret = binder_translate_binder(fp, t, thread); - if (ret < 0) { + + if (ret < 0 || + binder_alloc_copy_to_buffer(&target_proc->alloc, + t->buffer, + object_offset, + fp, sizeof(*fp))) { return_error = BR_FAILED_REPLY; return_error_param = ret; return_error_line = __LINE__; @@ -3056,7 +3614,11 @@ static void binder_transaction(struct binder_proc *proc, fp = to_flat_binder_object(hdr); ret = binder_translate_handle(fp, t, thread); - if (ret < 0) { + if (ret < 0 || + binder_alloc_copy_to_buffer(&target_proc->alloc, + t->buffer, + object_offset, + fp, sizeof(*fp))) { return_error = BR_FAILED_REPLY; return_error_param = ret; return_error_line = __LINE__; @@ -3066,25 +3628,38 @@ static void binder_transaction(struct binder_proc *proc, case BINDER_TYPE_FD: { struct binder_fd_object *fp = to_binder_fd_object(hdr); - int target_fd = binder_translate_fd(fp->fd, t, thread, - in_reply_to); + binder_size_t fd_offset = object_offset + + (uintptr_t)&fp->fd - (uintptr_t)fp; + int ret = binder_translate_fd(fp->fd, fd_offset, t, + thread, in_reply_to); - if (target_fd < 0) { + fp->pad_binder = 0; + if (ret < 0 || + binder_alloc_copy_to_buffer(&target_proc->alloc, + t->buffer, + object_offset, + fp, sizeof(*fp))) { return_error = BR_FAILED_REPLY; - return_error_param = target_fd; + return_error_param = ret; return_error_line = __LINE__; goto err_translate_failed; } - fp->pad_binder = 0; - fp->fd = target_fd; } break; case BINDER_TYPE_FDA: { + struct binder_object ptr_object; + binder_size_t parent_offset; + struct binder_object user_object; + size_t user_parent_size; struct binder_fd_array_object *fda = to_binder_fd_array_object(hdr); + size_t num_valid = (buffer_offset - off_start_offset) / + sizeof(binder_size_t); struct binder_buffer_object *parent = - binder_validate_ptr(t->buffer, fda->parent, - off_start, - offp - off_start); + binder_validate_ptr(target_proc, t->buffer, + &ptr_object, fda->parent, + off_start_offset, + &parent_offset, + num_valid); if (!parent) { binder_user_error("%d:%d got transaction with invalid parent offset or type\n", proc->pid, thread->pid); @@ -3093,9 +3668,11 @@ static void binder_transaction(struct binder_proc *proc, return_error_line = __LINE__; goto err_bad_parent; } - if (!binder_validate_fixup(t->buffer, off_start, - parent, fda->parent_offset, - last_fixup_obj, + if (!binder_validate_fixup(target_proc, t->buffer, + off_start_offset, + parent_offset, + fda->parent_offset, + last_fixup_obj_off, last_fixup_min_off)) { binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", proc->pid, thread->pid); @@ -3104,22 +3681,47 @@ static void binder_transaction(struct binder_proc *proc, return_error_line = __LINE__; goto err_bad_parent; } - ret = binder_translate_fd_array(fda, parent, t, thread, - in_reply_to); - if (ret < 0) { + /* + * We need to read the user version of the parent + * object to get the original user offset + */ + user_parent_size = + binder_get_object(proc, user_buffer, t->buffer, + parent_offset, &user_object); + if (user_parent_size != sizeof(user_object.bbo)) { + binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n", + proc->pid, thread->pid, + user_parent_size, + sizeof(user_object.bbo)); return_error = BR_FAILED_REPLY; - return_error_param = ret; + return_error_param = -EINVAL; + return_error_line = __LINE__; + goto err_bad_parent; + } + ret = binder_translate_fd_array(&pf_head, fda, + user_buffer, parent, + &user_object.bbo, t, + thread, in_reply_to); + if (!ret) + ret = binder_alloc_copy_to_buffer(&target_proc->alloc, + t->buffer, + object_offset, + fda, sizeof(*fda)); + if (ret) { + return_error = BR_FAILED_REPLY; + return_error_param = ret > 0 ? -EINVAL : ret; return_error_line = __LINE__; goto err_translate_failed; } - last_fixup_obj = parent; + last_fixup_obj_off = parent_offset; last_fixup_min_off = fda->parent_offset + sizeof(u32) * fda->num_fds; } break; case BINDER_TYPE_PTR: { struct binder_buffer_object *bp = to_binder_buffer_object(hdr); - size_t buf_left = sg_buf_end - sg_bufp; + size_t buf_left = sg_buf_end_offset - sg_buf_offset; + size_t num_valid; if (bp->length > buf_left) { binder_user_error("%d:%d got transaction with too large buffer\n", @@ -3129,33 +3731,39 @@ static void binder_transaction(struct binder_proc *proc, return_error_line = __LINE__; goto err_bad_offset; } - if (copy_from_user(sg_bufp, - (const void __user *)(uintptr_t) - bp->buffer, bp->length)) { - binder_user_error("%d:%d got transaction with invalid offsets ptr\n", - proc->pid, thread->pid); - return_error_param = -EFAULT; + ret = binder_defer_copy(&sgc_head, sg_buf_offset, + (const void __user *)(uintptr_t)bp->buffer, + bp->length); + if (ret) { return_error = BR_FAILED_REPLY; + return_error_param = ret; return_error_line = __LINE__; - goto err_copy_data_failed; + goto err_translate_failed; } /* Fixup buffer pointer to target proc address space */ - bp->buffer = (uintptr_t)sg_bufp + - binder_alloc_get_user_buffer_offset( - &target_proc->alloc); - sg_bufp += ALIGN(bp->length, sizeof(u64)); - - ret = binder_fixup_parent(t, thread, bp, off_start, - offp - off_start, - last_fixup_obj, + bp->buffer = (uintptr_t) + t->buffer->user_data + sg_buf_offset; + sg_buf_offset += ALIGN(bp->length, sizeof(u64)); + + num_valid = (buffer_offset - off_start_offset) / + sizeof(binder_size_t); + ret = binder_fixup_parent(&pf_head, t, + thread, bp, + off_start_offset, + num_valid, + last_fixup_obj_off, last_fixup_min_off); - if (ret < 0) { + if (ret < 0 || + binder_alloc_copy_to_buffer(&target_proc->alloc, + t->buffer, + object_offset, + bp, sizeof(*bp))) { return_error = BR_FAILED_REPLY; return_error_param = ret; return_error_line = __LINE__; goto err_translate_failed; } - last_fixup_obj = bp; + last_fixup_obj_off = object_offset; last_fixup_min_off = 0; } break; default: @@ -3167,6 +3775,30 @@ static void binder_transaction(struct binder_proc *proc, goto err_bad_object_type; } } + /* Done processing objects, copy the rest of the buffer */ + if (binder_alloc_copy_user_to_buffer( + &target_proc->alloc, + t->buffer, user_offset, + user_buffer + user_offset, + tr->data_size - user_offset)) { + binder_user_error("%d:%d got transaction with invalid data ptr\n", + proc->pid, thread->pid); + return_error = BR_FAILED_REPLY; + return_error_param = -EFAULT; + return_error_line = __LINE__; + goto err_copy_data_failed; + } + + ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer, + &sgc_head, &pf_head); + if (ret) { + binder_user_error("%d:%d got transaction with invalid offsets ptr\n", + proc->pid, thread->pid); + return_error = BR_FAILED_REPLY; + return_error_param = ret; + return_error_line = __LINE__; + goto err_copy_data_failed; + } tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; t->work.type = BINDER_WORK_TRANSACTION; @@ -3233,20 +3865,28 @@ static void binder_transaction(struct binder_proc *proc, err_bad_offset: err_bad_parent: err_copy_data_failed: + binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head); + binder_free_txn_fixups(t); trace_binder_transaction_failed_buffer_release(t->buffer); - binder_transaction_buffer_release(target_proc, t->buffer, offp); + binder_transaction_buffer_release(target_proc, NULL, t->buffer, + buffer_offset, true); if (target_node) binder_dec_node_tmpref(target_node); target_node = NULL; t->buffer->transaction = NULL; binder_alloc_free_buf(&target_proc->alloc, t->buffer); err_binder_alloc_buf_failed: +err_bad_extra_size: + if (secctx) + security_release_secctx(secctx, secctx_sz); +err_get_secctx_failed: kfree(tcomplete); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); err_alloc_tcomplete_failed: kfree(t); binder_stats_deleted(BINDER_STAT_TRANSACTION); err_alloc_t_failed: +err_bad_todo_list: err_bad_call_stack: err_empty_call_stack: err_dead_binder: @@ -3294,6 +3934,52 @@ static void binder_transaction(struct binder_proc *proc, } } +/** + * binder_free_buf() - free the specified buffer + * @proc: binder proc that owns buffer + * @buffer: buffer to be freed + * @is_failure: failed to send transaction + * + * If buffer for an async transaction, enqueue the next async + * transaction from the node. + * + * Cleanup buffer and free it. + */ +static void +binder_free_buf(struct binder_proc *proc, + struct binder_thread *thread, + struct binder_buffer *buffer, bool is_failure) +{ + binder_inner_proc_lock(proc); + if (buffer->transaction) { + buffer->transaction->buffer = NULL; + buffer->transaction = NULL; + } + binder_inner_proc_unlock(proc); + if (buffer->async_transaction && buffer->target_node) { + struct binder_node *buf_node; + struct binder_work *w; + + buf_node = buffer->target_node; + binder_node_inner_lock(buf_node); + BUG_ON(!buf_node->has_async_transaction); + BUG_ON(buf_node->proc != proc); + w = binder_dequeue_work_head_ilocked( + &buf_node->async_todo); + if (!w) { + buf_node->has_async_transaction = false; + } else { + binder_enqueue_work_ilocked( + w, &proc->todo); + binder_wakeup_proc_ilocked(proc); + } + binder_node_inner_unlock(buf_node); + } + trace_binder_transaction_buffer_release(buffer); + binder_release_entire_buffer(proc, thread, buffer, is_failure); + binder_alloc_free_buf(&proc->alloc, buffer); +} + static int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size, @@ -3465,14 +4151,18 @@ static int binder_thread_write(struct binder_proc *proc, buffer = binder_alloc_prepare_to_free(&proc->alloc, data_ptr); - if (buffer == NULL) { - binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n", - proc->pid, thread->pid, (u64)data_ptr); - break; - } - if (!buffer->allow_user_free) { - binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n", - proc->pid, thread->pid, (u64)data_ptr); + if (IS_ERR_OR_NULL(buffer)) { + if (PTR_ERR(buffer) == -EPERM) { + binder_user_error( + "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n", + proc->pid, thread->pid, + (u64)data_ptr); + } else { + binder_user_error( + "%d:%d BC_FREE_BUFFER u%016llx no match\n", + proc->pid, thread->pid, + (u64)data_ptr); + } break; } binder_debug(BINDER_DEBUG_FREE_BUFFER, @@ -3480,33 +4170,7 @@ static int binder_thread_write(struct binder_proc *proc, proc->pid, thread->pid, (u64)data_ptr, buffer->debug_id, buffer->transaction ? "active" : "finished"); - - if (buffer->transaction) { - buffer->transaction->buffer = NULL; - buffer->transaction = NULL; - } - if (buffer->async_transaction && buffer->target_node) { - struct binder_node *buf_node; - struct binder_work *w; - - buf_node = buffer->target_node; - binder_node_inner_lock(buf_node); - BUG_ON(!buf_node->has_async_transaction); - BUG_ON(buf_node->proc != proc); - w = binder_dequeue_work_head_ilocked( - &buf_node->async_todo); - if (!w) { - buf_node->has_async_transaction = false; - } else { - binder_enqueue_work_ilocked( - w, &proc->todo); - binder_wakeup_proc_ilocked(proc); - } - binder_node_inner_unlock(buf_node); - } - trace_binder_transaction_buffer_release(buffer); - binder_transaction_buffer_release(proc, buffer, NULL); - binder_alloc_free_buf(&proc->alloc, buffer); + binder_free_buf(proc, thread, buffer, false); break; } @@ -3829,6 +4493,71 @@ static int binder_wait_for_work(struct binder_thread *thread, return ret; } +/** + * binder_apply_fd_fixups() - finish fd translation + * @proc: binder_proc associated @t->buffer + * @t: binder transaction with list of fd fixups + * + * Now that we are in the context of the transaction target + * process, we can allocate and install fds. Process the + * list of fds to translate and fixup the buffer with the + * new fds. + * + * If we fail to allocate an fd, then free the resources by + * fput'ing files that have not been processed and ksys_close'ing + * any fds that have already been allocated. + */ +static int binder_apply_fd_fixups(struct binder_proc *proc, + struct binder_transaction *t) +{ + struct binder_txn_fd_fixup *fixup, *tmp; + int ret = 0; + + list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) { + int fd = get_unused_fd_flags(O_CLOEXEC); + + if (fd < 0) { + binder_debug(BINDER_DEBUG_TRANSACTION, + "failed fd fixup txn %d fd %d\n", + t->debug_id, fd); + ret = -ENOMEM; + break; + } + binder_debug(BINDER_DEBUG_TRANSACTION, + "fd fixup txn %d fd %d\n", + t->debug_id, fd); + trace_binder_transaction_fd_recv(t, fd, fixup->offset); + fd_install(fd, fixup->file); + fixup->file = NULL; + if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer, + fixup->offset, &fd, + sizeof(u32))) { + ret = -EINVAL; + break; + } + } + list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { + if (fixup->file) { + fput(fixup->file); + } else if (ret) { + u32 fd; + int err; + + err = binder_alloc_copy_from_buffer(&proc->alloc, &fd, + t->buffer, + fixup->offset, + sizeof(fd)); + WARN_ON(err); + if (!err) + binder_deferred_fd_close(fd); + } + list_del(&fixup->fixup_entry); + kfree(fixup); + } + + return ret; +} + static int binder_thread_read(struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size, @@ -3882,11 +4611,13 @@ static int binder_thread_read(struct binder_proc *proc, while (1) { uint32_t cmd; - struct binder_transaction_data tr; + struct binder_transaction_data_secctx tr; + struct binder_transaction_data *trd = &tr.transaction_data; struct binder_work *w = NULL; struct list_head *list = NULL; struct binder_transaction *t = NULL; struct binder_thread *t_from; + size_t trsize = sizeof(*trd); binder_inner_proc_lock(proc); if (!binder_worklist_empty_ilocked(&thread->todo)) @@ -3933,6 +4664,8 @@ static int binder_thread_read(struct binder_proc *proc, case BINDER_WORK_TRANSACTION_COMPLETE: { binder_inner_proc_unlock(proc); cmd = BR_TRANSACTION_COMPLETE; + kfree(w); + binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); @@ -3941,8 +4674,6 @@ static int binder_thread_read(struct binder_proc *proc, binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, "%d:%d BR_TRANSACTION_COMPLETE\n", proc->pid, thread->pid); - kfree(w); - binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); } break; case BINDER_WORK_NODE: { struct binder_node *node = container_of(w, struct binder_node, work); @@ -4081,8 +4812,8 @@ static int binder_thread_read(struct binder_proc *proc, if (t->buffer->target_node) { struct binder_node *target_node = t->buffer->target_node; - tr.target.ptr = target_node->ptr; - tr.cookie = target_node->cookie; + trd->target.ptr = target_node->ptr; + trd->cookie = target_node->cookie; t->saved_priority = task_nice(current); if (t->priority < target_node->min_priority && !(t->flags & TF_ONE_WAY)) @@ -4092,33 +4823,65 @@ static int binder_thread_read(struct binder_proc *proc, binder_set_nice(target_node->min_priority); cmd = BR_TRANSACTION; } else { - tr.target.ptr = 0; - tr.cookie = 0; + trd->target.ptr = 0; + trd->cookie = 0; cmd = BR_REPLY; } - tr.code = t->code; - tr.flags = t->flags; - tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid); + trd->code = t->code; + trd->flags = t->flags; + trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid); t_from = binder_get_txn_from(t); if (t_from) { struct task_struct *sender = t_from->proc->tsk; - tr.sender_pid = task_tgid_nr_ns(sender, - task_active_pid_ns(current)); + trd->sender_pid = + task_tgid_nr_ns(sender, + task_active_pid_ns(current)); } else { - tr.sender_pid = 0; + trd->sender_pid = 0; } - tr.data_size = t->buffer->data_size; - tr.offsets_size = t->buffer->offsets_size; - tr.data.ptr.buffer = (binder_uintptr_t) - ((uintptr_t)t->buffer->data + - binder_alloc_get_user_buffer_offset(&proc->alloc)); - tr.data.ptr.offsets = tr.data.ptr.buffer + + ret = binder_apply_fd_fixups(proc, t); + if (ret) { + struct binder_buffer *buffer = t->buffer; + bool oneway = !!(t->flags & TF_ONE_WAY); + int tid = t->debug_id; + + if (t_from) + binder_thread_dec_tmpref(t_from); + buffer->transaction = NULL; + binder_cleanup_transaction(t, "fd fixups failed", + BR_FAILED_REPLY); + binder_free_buf(proc, thread, buffer, true); + binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, + "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n", + proc->pid, thread->pid, + oneway ? "async " : + (cmd == BR_REPLY ? "reply " : ""), + tid, BR_FAILED_REPLY, ret, __LINE__); + if (cmd == BR_REPLY) { + cmd = BR_FAILED_REPLY; + if (put_user(cmd, (uint32_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(uint32_t); + binder_stat_br(proc, thread, cmd); + break; + } + continue; + } + trd->data_size = t->buffer->data_size; + trd->offsets_size = t->buffer->offsets_size; + trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data; + trd->data.ptr.offsets = trd->data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *)); + tr.secctx = t->security_ctx; + if (t->security_ctx) { + cmd = BR_TRANSACTION_SEC_CTX; + trsize = sizeof(tr); + } if (put_user(cmd, (uint32_t __user *)ptr)) { if (t_from) binder_thread_dec_tmpref(t_from); @@ -4129,7 +4892,7 @@ static int binder_thread_read(struct binder_proc *proc, return -EFAULT; } ptr += sizeof(uint32_t); - if (copy_to_user(ptr, &tr, sizeof(tr))) { + if (copy_to_user(ptr, &tr, trsize)) { if (t_from) binder_thread_dec_tmpref(t_from); @@ -4138,7 +4901,7 @@ static int binder_thread_read(struct binder_proc *proc, return -EFAULT; } - ptr += sizeof(tr); + ptr += trsize; trace_binder_transaction_received(t); binder_stat_br(proc, thread, cmd); @@ -4146,16 +4909,18 @@ static int binder_thread_read(struct binder_proc *proc, "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n", proc->pid, thread->pid, (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : - "BR_REPLY", + (cmd == BR_TRANSACTION_SEC_CTX) ? + "BR_TRANSACTION_SEC_CTX" : "BR_REPLY", t->debug_id, t_from ? t_from->proc->pid : 0, t_from ? t_from->pid : 0, cmd, t->buffer->data_size, t->buffer->offsets_size, - (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets); + (u64)trd->data.ptr.buffer, + (u64)trd->data.ptr.offsets); if (t_from) binder_thread_dec_tmpref(t_from); t->buffer->allow_user_free = 1; - if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { + if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) { binder_inner_proc_lock(thread->proc); t->to_parent = thread->transaction_stack; t->to_thread = thread; @@ -4194,13 +4959,17 @@ static void binder_release_work(struct binder_proc *proc, struct list_head *list) { struct binder_work *w; + enum binder_work_type wtype; while (1) { - w = binder_dequeue_work_head(proc, list); + binder_inner_proc_lock(proc); + w = binder_dequeue_work_head_ilocked(list); + wtype = w ? w->type : 0; + binder_inner_proc_unlock(proc); if (!w) return; - switch (w->type) { + switch (wtype) { case BINDER_WORK_TRANSACTION: { struct binder_transaction *t; @@ -4234,9 +5003,11 @@ static void binder_release_work(struct binder_proc *proc, kfree(death); binder_stats_deleted(BINDER_STAT_DEATH); } break; + case BINDER_WORK_NODE: + break; default: pr_err("unexpected work type, %d, not freed\n", - w->type); + wtype); break; } } @@ -4308,6 +5079,7 @@ static void binder_free_proc(struct binder_proc *proc) BUG_ON(!list_empty(&proc->delivered_death)); binder_alloc_deferred_release(&proc->alloc); put_task_struct(proc->tsk); + put_cred(proc->cred); binder_stats_deleted(BINDER_STAT_PROC); kfree(proc); } @@ -4497,7 +5269,8 @@ static int binder_ioctl_write_read(struct file *filp, return ret; } -static int binder_ioctl_set_ctx_mgr(struct file *filp) +static int binder_ioctl_set_ctx_mgr(struct file *filp, + struct flat_binder_object *fbo) { int ret = 0; struct binder_proc *proc = filp->private_data; @@ -4511,7 +5284,7 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp) ret = -EBUSY; goto out; } - ret = security_binder_set_context_mgr(proc->tsk); + ret = security_binder_set_context_mgr(proc->cred); if (ret < 0) goto out; if (uid_valid(context->binder_context_mgr_uid)) { @@ -4526,7 +5299,7 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp) } else { context->binder_context_mgr_uid = curr_euid; } - new_node = binder_new_node(proc, NULL); + new_node = binder_new_node(proc, fbo); if (!new_node) { ret = -ENOMEM; goto out; @@ -4544,6 +5317,42 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp) return ret; } +static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc, + struct binder_node_info_for_ref *info) +{ + struct binder_node *node; + struct binder_context *context = proc->context; + __u32 handle = info->handle; + + if (info->strong_count || info->weak_count || info->reserved1 || + info->reserved2 || info->reserved3) { + binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.", + proc->pid); + return -EINVAL; + } + + /* This ioctl may only be used by the context manager */ + mutex_lock(&context->context_mgr_node_lock); + if (!context->binder_context_mgr_node || + context->binder_context_mgr_node->proc != proc) { + mutex_unlock(&context->context_mgr_node_lock); + return -EPERM; + } + mutex_unlock(&context->context_mgr_node_lock); + + node = binder_get_node_from_ref(proc, handle, true, NULL); + if (!node) + return -EINVAL; + + info->strong_count = node->local_strong_refs + + node->internal_strong_refs; + info->weak_count = node->local_weak_refs; + + binder_put_node(node); + + return 0; +} + static int binder_ioctl_get_node_debug_info(struct binder_proc *proc, struct binder_node_debug_info *info) { @@ -4613,8 +5422,20 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) binder_inner_proc_unlock(proc); break; } + case BINDER_SET_CONTEXT_MGR_EXT: { + struct flat_binder_object fbo; + + if (copy_from_user(&fbo, ubuf, sizeof(fbo))) { + ret = -EINVAL; + goto err; + } + ret = binder_ioctl_set_ctx_mgr(filp, &fbo); + if (ret) + goto err; + break; + } case BINDER_SET_CONTEXT_MGR: - ret = binder_ioctl_set_ctx_mgr(filp); + ret = binder_ioctl_set_ctx_mgr(filp, NULL); if (ret) goto err; break; @@ -4638,6 +5459,25 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) } break; } + case BINDER_GET_NODE_INFO_FOR_REF: { + struct binder_node_info_for_ref info; + + if (copy_from_user(&info, ubuf, sizeof(info))) { + ret = -EFAULT; + goto err; + } + + ret = binder_ioctl_get_node_info_for_ref(proc, &info); + if (ret < 0) + goto err; + + if (copy_to_user(ubuf, &info, sizeof(info))) { + ret = -EFAULT; + goto err; + } + + break; + } case BINDER_GET_NODE_DEBUG_INFO: { struct binder_node_debug_info info; @@ -4693,7 +5533,6 @@ static void binder_vma_close(struct vm_area_struct *vma) (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, (unsigned long)pgprot_val(vma->vm_page_prot)); binder_alloc_vma_close(&proc->alloc); - binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); } static vm_fault_t binder_vm_fault(struct vm_fault *vmf) @@ -4716,9 +5555,6 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) if (proc->tsk != current->group_leader) return -EINVAL; - if ((vma->vm_end - vma->vm_start) > SZ_4M) - vma->vm_end = vma->vm_start + SZ_4M; - binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", __func__, proc->pid, vma->vm_start, vma->vm_end, @@ -4739,9 +5575,6 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) ret = binder_alloc_mmap_handler(&proc->alloc, vma); if (ret) return ret; - mutex_lock(&proc->files_lock); - proc->files = get_files_struct(current); - mutex_unlock(&proc->files_lock); return 0; err_bad_arg: @@ -4765,7 +5598,7 @@ static int binder_open(struct inode *nodp, struct file *filp) spin_lock_init(&proc->outer_lock); get_task_struct(current->group_leader); proc->tsk = current->group_leader; - mutex_init(&proc->files_lock); + proc->cred = get_cred(filp->f_cred); INIT_LIST_HEAD(&proc->todo); proc->default_priority = task_nice(current); binder_dev = container_of(filp->private_data, struct binder_device, @@ -4915,8 +5748,6 @@ static void binder_deferred_release(struct binder_proc *proc) struct rb_node *n; int threads, nodes, incoming_refs, outgoing_refs, active_transactions; - BUG_ON(proc->files); - mutex_lock(&binder_procs_lock); hlist_del(&proc->proc_node); mutex_unlock(&binder_procs_lock); @@ -4998,7 +5829,6 @@ static void binder_deferred_release(struct binder_proc *proc) static void binder_deferred_func(struct work_struct *work) { struct binder_proc *proc; - struct files_struct *files; int defer; @@ -5016,23 +5846,11 @@ static void binder_deferred_func(struct work_struct *work) } mutex_unlock(&binder_deferred_lock); - files = NULL; - if (defer & BINDER_DEFERRED_PUT_FILES) { - mutex_lock(&proc->files_lock); - files = proc->files; - if (files) - proc->files = NULL; - mutex_unlock(&proc->files_lock); - } - if (defer & BINDER_DEFERRED_FLUSH) binder_deferred_flush(proc); if (defer & BINDER_DEFERRED_RELEASE) binder_deferred_release(proc); /* frees proc */ - - if (files) - put_files_struct(files); } while (proc); } static DECLARE_WORK(binder_deferred_work, binder_deferred_func); @@ -5087,7 +5905,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m, seq_printf(m, " node %d", buffer->target_node->debug_id); seq_printf(m, " size %zd:%zd data %pK\n", buffer->data_size, buffer->offsets_size, - buffer->data); + buffer->user_data); } static void print_binder_work_ilocked(struct seq_file *m, diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 64fd96eada31f42e5677a72de837fafa2987165b..6f073d7fd7c66040445d2b4482c6d21800c20945 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -29,6 +29,9 @@ #include #include #include +#include +#include +#include #include "binder_alloc.h" #include "binder_trace.h" @@ -67,9 +70,8 @@ static size_t binder_alloc_buffer_size(struct binder_alloc *alloc, struct binder_buffer *buffer) { if (list_is_last(&buffer->entry, &alloc->buffers)) - return (u8 *)alloc->buffer + - alloc->buffer_size - (u8 *)buffer->data; - return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data; + return alloc->buffer + alloc->buffer_size - buffer->user_data; + return binder_buffer_next(buffer)->user_data - buffer->user_data; } static void binder_insert_free_buffer(struct binder_alloc *alloc, @@ -119,9 +121,9 @@ static void binder_insert_allocated_buffer_locked( buffer = rb_entry(parent, struct binder_buffer, rb_node); BUG_ON(buffer->free); - if (new_buffer->data < buffer->data) + if (new_buffer->user_data < buffer->user_data) p = &parent->rb_left; - else if (new_buffer->data > buffer->data) + else if (new_buffer->user_data > buffer->user_data) p = &parent->rb_right; else BUG(); @@ -136,31 +138,27 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked( { struct rb_node *n = alloc->allocated_buffers.rb_node; struct binder_buffer *buffer; - void *kern_ptr; + void __user *uptr; - kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset); + uptr = (void __user *)user_ptr; while (n) { buffer = rb_entry(n, struct binder_buffer, rb_node); BUG_ON(buffer->free); - if (kern_ptr < buffer->data) + if (uptr < buffer->user_data) n = n->rb_left; - else if (kern_ptr > buffer->data) + else if (uptr > buffer->user_data) n = n->rb_right; else { /* * Guard against user threads attempting to - * free the buffer twice + * free the buffer when in use by kernel or + * after it's already been freed. */ - if (buffer->free_in_progress) { - binder_alloc_debug(BINDER_DEBUG_USER_ERROR, - "%d:%d FREE_BUFFER u%016llx user freed buffer twice\n", - alloc->pid, current->pid, - (u64)user_ptr); - return NULL; - } - buffer->free_in_progress = 1; + if (!buffer->allow_user_free) + return ERR_PTR(-EPERM); + buffer->allow_user_free = 0; return buffer; } } @@ -190,9 +188,9 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, } static int binder_update_page_range(struct binder_alloc *alloc, int allocate, - void *start, void *end) + void __user *start, void __user *end) { - void *page_addr; + void __user *page_addr; unsigned long user_page_addr; struct binder_lru_page *page; struct vm_area_struct *vma = NULL; @@ -267,18 +265,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, page->alloc = alloc; INIT_LIST_HEAD(&page->lru); - ret = map_kernel_range_noflush((unsigned long)page_addr, - PAGE_SIZE, PAGE_KERNEL, - &page->page_ptr); - flush_cache_vmap((unsigned long)page_addr, - (unsigned long)page_addr + PAGE_SIZE); - if (ret != 1) { - pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n", - alloc->pid, page_addr); - goto err_map_kernel_failed; - } - user_page_addr = - (uintptr_t)page_addr + alloc->user_buffer_offset; + user_page_addr = (uintptr_t)page_addr; ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr); if (ret) { pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", @@ -294,13 +281,12 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, } if (mm) { up_read(&mm->mmap_sem); - mmput(mm); + mmput_async(mm); } return 0; free_range: - for (page_addr = end - PAGE_SIZE; page_addr >= start; - page_addr -= PAGE_SIZE) { + for (page_addr = end - PAGE_SIZE; 1; page_addr -= PAGE_SIZE) { bool ret; size_t index; @@ -313,21 +299,22 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, WARN_ON(!ret); trace_binder_free_lru_end(alloc, index); + if (page_addr == start) + break; continue; err_vm_insert_page_failed: - unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); -err_map_kernel_failed: __free_page(page->page_ptr); page->page_ptr = NULL; err_alloc_page_failed: err_page_ptr_cleared: - ; + if (page_addr == start) + break; } err_no_vma: if (mm) { up_read(&mm->mmap_sem); - mmput(mm); + mmput_async(mm); } return vma ? -ENOMEM : -ESRCH; } @@ -372,8 +359,8 @@ static struct binder_buffer *binder_alloc_new_buf_locked( struct binder_buffer *buffer; size_t buffer_size; struct rb_node *best_fit = NULL; - void *has_page_addr; - void *end_page_addr; + void __user *has_page_addr; + void __user *end_page_addr; size_t size, data_offsets_size; int ret; @@ -471,15 +458,15 @@ static struct binder_buffer *binder_alloc_new_buf_locked( "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n", alloc->pid, size, buffer, buffer_size); - has_page_addr = - (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); + has_page_addr = (void __user *) + (((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK); WARN_ON(n && buffer_size != size); end_page_addr = - (void *)PAGE_ALIGN((uintptr_t)buffer->data + size); + (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size); if (end_page_addr > has_page_addr) end_page_addr = has_page_addr; - ret = binder_update_page_range(alloc, 1, - (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr); + ret = binder_update_page_range(alloc, 1, (void __user *) + PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr); if (ret) return ERR_PTR(ret); @@ -492,7 +479,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked( __func__, alloc->pid); goto err_alloc_buf_struct_failed; } - new_buffer->data = (u8 *)buffer->data + size; + new_buffer->user_data = (u8 __user *)buffer->user_data + size; list_add(&new_buffer->entry, &buffer->entry); new_buffer->free = 1; binder_insert_free_buffer(alloc, new_buffer); @@ -500,7 +487,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked( rb_erase(best_fit, &alloc->free_buffers); buffer->free = 0; - buffer->free_in_progress = 0; + buffer->allow_user_free = 0; binder_insert_allocated_buffer_locked(alloc, buffer); binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: binder_alloc_buf size %zd got %pK\n", @@ -518,8 +505,8 @@ static struct binder_buffer *binder_alloc_new_buf_locked( return buffer; err_alloc_buf_struct_failed: - binder_update_page_range(alloc, 0, - (void *)PAGE_ALIGN((uintptr_t)buffer->data), + binder_update_page_range(alloc, 0, (void __user *) + PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr); return ERR_PTR(-ENOMEM); } @@ -554,14 +541,15 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, return buffer; } -static void *buffer_start_page(struct binder_buffer *buffer) +static void __user *buffer_start_page(struct binder_buffer *buffer) { - return (void *)((uintptr_t)buffer->data & PAGE_MASK); + return (void __user *)((uintptr_t)buffer->user_data & PAGE_MASK); } -static void *prev_buffer_end_page(struct binder_buffer *buffer) +static void __user *prev_buffer_end_page(struct binder_buffer *buffer) { - return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK); + return (void __user *) + (((uintptr_t)(buffer->user_data) - 1) & PAGE_MASK); } static void binder_delete_free_buffer(struct binder_alloc *alloc, @@ -576,7 +564,8 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc, to_free = false; binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: merge free, buffer %pK share page with %pK\n", - alloc->pid, buffer->data, prev->data); + alloc->pid, buffer->user_data, + prev->user_data); } if (!list_is_last(&buffer->entry, &alloc->buffers)) { @@ -586,23 +575,24 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc, binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: merge free, buffer %pK share page with %pK\n", alloc->pid, - buffer->data, - next->data); + buffer->user_data, + next->user_data); } } - if (PAGE_ALIGNED(buffer->data)) { + if (PAGE_ALIGNED(buffer->user_data)) { binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: merge free, buffer start %pK is page aligned\n", - alloc->pid, buffer->data); + alloc->pid, buffer->user_data); to_free = false; } if (to_free) { binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: merge free, buffer %pK do not share page with %pK or %pK\n", - alloc->pid, buffer->data, - prev->data, next ? next->data : NULL); + alloc->pid, buffer->user_data, + prev->user_data, + next ? next->user_data : NULL); binder_update_page_range(alloc, 0, buffer_start_page(buffer), buffer_start_page(buffer) + PAGE_SIZE); } @@ -628,11 +618,11 @@ static void binder_free_buf_locked(struct binder_alloc *alloc, BUG_ON(buffer->free); BUG_ON(size > buffer_size); BUG_ON(buffer->transaction != NULL); - BUG_ON(buffer->data < alloc->buffer); - BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size); + BUG_ON(buffer->user_data < alloc->buffer); + BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size); if (buffer->async_transaction) { - alloc->free_async_space += size + sizeof(struct binder_buffer); + alloc->free_async_space += buffer_size + sizeof(struct binder_buffer); binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, "%d: binder_free_buf size %zd async free %zd\n", @@ -640,8 +630,9 @@ static void binder_free_buf_locked(struct binder_alloc *alloc, } binder_update_page_range(alloc, 0, - (void *)PAGE_ALIGN((uintptr_t)buffer->data), - (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK)); + (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data), + (void __user *)(((uintptr_t) + buffer->user_data + buffer_size) & PAGE_MASK)); rb_erase(&buffer->rb_node, &alloc->allocated_buffers); buffer->free = 1; @@ -697,40 +688,22 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, struct vm_area_struct *vma) { int ret; - struct vm_struct *area; const char *failure_string; struct binder_buffer *buffer; mutex_lock(&binder_alloc_mmap_lock); - if (alloc->buffer) { + if (alloc->buffer_size) { ret = -EBUSY; failure_string = "already mapped"; goto err_already_mapped; } - - area = get_vm_area(vma->vm_end - vma->vm_start, VM_ALLOC); - if (area == NULL) { - ret = -ENOMEM; - failure_string = "get_vm_area"; - goto err_get_vm_area_failed; - } - alloc->buffer = area->addr; - alloc->user_buffer_offset = - vma->vm_start - (uintptr_t)alloc->buffer; + alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start, + SZ_4M); mutex_unlock(&binder_alloc_mmap_lock); -#ifdef CONFIG_CPU_CACHE_VIPT - if (cache_is_vipt_aliasing()) { - while (CACHE_COLOUR( - (vma->vm_start ^ (uint32_t)alloc->buffer))) { - pr_info("%s: %d %lx-%lx maps %pK bad alignment\n", - __func__, alloc->pid, vma->vm_start, - vma->vm_end, alloc->buffer); - vma->vm_start += PAGE_SIZE; - } - } -#endif - alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE, + alloc->buffer = (void __user *)vma->vm_start; + + alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE, sizeof(alloc->pages[0]), GFP_KERNEL); if (alloc->pages == NULL) { @@ -738,7 +711,6 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, failure_string = "alloc page array"; goto err_alloc_pages_failed; } - alloc->buffer_size = vma->vm_end - vma->vm_start; buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); if (!buffer) { @@ -747,7 +719,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, goto err_alloc_buf_struct_failed; } - buffer->data = alloc->buffer; + buffer->user_data = alloc->buffer; list_add(&buffer->entry, &alloc->buffers); buffer->free = 1; binder_insert_free_buffer(alloc, buffer); @@ -761,10 +733,9 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, kfree(alloc->pages); alloc->pages = NULL; err_alloc_pages_failed: - mutex_lock(&binder_alloc_mmap_lock); - vfree(alloc->buffer); alloc->buffer = NULL; -err_get_vm_area_failed: + mutex_lock(&binder_alloc_mmap_lock); + alloc->buffer_size = 0; err_already_mapped: mutex_unlock(&binder_alloc_mmap_lock); binder_alloc_debug(BINDER_DEBUG_USER_ERROR, @@ -810,7 +781,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) int i; for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { - void *page_addr; + void __user *page_addr; bool on_lru; if (!alloc->pages[i].page_ptr) @@ -823,12 +794,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) "%s: %d: page %d at %pK %s\n", __func__, alloc->pid, i, page_addr, on_lru ? "on lru" : "active"); - unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); __free_page(alloc->pages[i].page_ptr); page_count++; } kfree(alloc->pages); - vfree(alloc->buffer); } mutex_unlock(&alloc->mutex); if (alloc->vma_vm_mm) @@ -843,7 +812,7 @@ static void print_binder_buffer(struct seq_file *m, const char *prefix, struct binder_buffer *buffer) { seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n", - prefix, buffer->debug_id, buffer->data, + prefix, buffer->debug_id, buffer->user_data, buffer->data_size, buffer->offsets_size, buffer->extra_buffers_size, buffer->transaction ? "active" : "delivered"); @@ -884,14 +853,20 @@ void binder_alloc_print_pages(struct seq_file *m, int free = 0; mutex_lock(&alloc->mutex); - for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { - page = &alloc->pages[i]; - if (!page->page_ptr) - free++; - else if (list_empty(&page->lru)) - active++; - else - lru++; + /* + * Make sure the binder_alloc is fully initialized, otherwise we might + * read inconsistent state. + */ + if (binder_alloc_get_vma(alloc) != NULL) { + for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { + page = &alloc->pages[i]; + if (!page->page_ptr) + free++; + else if (list_empty(&page->lru)) + active++; + else + lru++; + } } mutex_unlock(&alloc->mutex); seq_printf(m, " pages: %d:%d:%d\n", active, lru, free); @@ -962,14 +937,13 @@ enum lru_status binder_alloc_free_page(struct list_head *item, index = page - alloc->pages; page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; + + mm = alloc->vma_vm_mm; + if (!mmget_not_zero(mm)) + goto err_mmget; + if (!down_write_trylock(&mm->mmap_sem)) + goto err_down_write_mmap_sem_failed; vma = binder_alloc_get_vma(alloc); - if (vma) { - if (!mmget_not_zero(alloc->vma_vm_mm)) - goto err_mmget; - mm = alloc->vma_vm_mm; - if (!down_write_trylock(&mm->mmap_sem)) - goto err_down_write_mmap_sem_failed; - } list_lru_isolate(lru, item); spin_unlock(lock); @@ -977,19 +951,15 @@ enum lru_status binder_alloc_free_page(struct list_head *item, if (vma) { trace_binder_unmap_user_start(alloc, index); - zap_page_range(vma, - page_addr + alloc->user_buffer_offset, - PAGE_SIZE); + zap_page_range(vma, page_addr, PAGE_SIZE); trace_binder_unmap_user_end(alloc, index); - - up_write(&mm->mmap_sem); - mmput(mm); } + up_write(&mm->mmap_sem); + mmput(mm); trace_binder_unmap_kernel_start(alloc, index); - unmap_kernel_range(page_addr, PAGE_SIZE); __free_page(page->page_ptr); page->page_ptr = NULL; @@ -1056,3 +1026,174 @@ int binder_alloc_shrinker_init(void) } return ret; } + +/** + * check_buffer() - verify that buffer/offset is safe to access + * @alloc: binder_alloc for this proc + * @buffer: binder buffer to be accessed + * @offset: offset into @buffer data + * @bytes: bytes to access from offset + * + * Check that the @offset/@bytes are within the size of the given + * @buffer and that the buffer is currently active and not freeable. + * Offsets must also be multiples of sizeof(u32). The kernel is + * allowed to touch the buffer in two cases: + * + * 1) when the buffer is being created: + * (buffer->free == 0 && buffer->allow_user_free == 0) + * 2) when the buffer is being torn down: + * (buffer->free == 0 && buffer->transaction == NULL). + * + * Return: true if the buffer is safe to access + */ +static inline bool check_buffer(struct binder_alloc *alloc, + struct binder_buffer *buffer, + binder_size_t offset, size_t bytes) +{ + size_t buffer_size = binder_alloc_buffer_size(alloc, buffer); + + return buffer_size >= bytes && + offset <= buffer_size - bytes && + IS_ALIGNED(offset, sizeof(u32)) && + !buffer->free && + (!buffer->allow_user_free || !buffer->transaction); +} + +/** + * binder_alloc_get_page() - get kernel pointer for given buffer offset + * @alloc: binder_alloc for this proc + * @buffer: binder buffer to be accessed + * @buffer_offset: offset into @buffer data + * @pgoffp: address to copy final page offset to + * + * Lookup the struct page corresponding to the address + * at @buffer_offset into @buffer->user_data. If @pgoffp is not + * NULL, the byte-offset into the page is written there. + * + * The caller is responsible to ensure that the offset points + * to a valid address within the @buffer and that @buffer is + * not freeable by the user. Since it can't be freed, we are + * guaranteed that the corresponding elements of @alloc->pages[] + * cannot change. + * + * Return: struct page + */ +static struct page *binder_alloc_get_page(struct binder_alloc *alloc, + struct binder_buffer *buffer, + binder_size_t buffer_offset, + pgoff_t *pgoffp) +{ + binder_size_t buffer_space_offset = buffer_offset + + (buffer->user_data - alloc->buffer); + pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK; + size_t index = buffer_space_offset >> PAGE_SHIFT; + struct binder_lru_page *lru_page; + + lru_page = &alloc->pages[index]; + *pgoffp = pgoff; + return lru_page->page_ptr; +} + +/** + * binder_alloc_copy_user_to_buffer() - copy src user to tgt user + * @alloc: binder_alloc for this proc + * @buffer: binder buffer to be accessed + * @buffer_offset: offset into @buffer data + * @from: userspace pointer to source buffer + * @bytes: bytes to copy + * + * Copy bytes from source userspace to target buffer. + * + * Return: bytes remaining to be copied + */ +unsigned long +binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc, + struct binder_buffer *buffer, + binder_size_t buffer_offset, + const void __user *from, + size_t bytes) +{ + if (!check_buffer(alloc, buffer, buffer_offset, bytes)) + return bytes; + + while (bytes) { + unsigned long size; + unsigned long ret; + struct page *page; + pgoff_t pgoff; + void *kptr; + + page = binder_alloc_get_page(alloc, buffer, + buffer_offset, &pgoff); + size = min_t(size_t, bytes, PAGE_SIZE - pgoff); + kptr = kmap(page) + pgoff; + ret = copy_from_user(kptr, from, size); + kunmap(page); + if (ret) + return bytes - size + ret; + bytes -= size; + from += size; + buffer_offset += size; + } + return 0; +} + +static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc, + bool to_buffer, + struct binder_buffer *buffer, + binder_size_t buffer_offset, + void *ptr, + size_t bytes) +{ + /* All copies must be 32-bit aligned and 32-bit size */ + if (!check_buffer(alloc, buffer, buffer_offset, bytes)) + return -EINVAL; + + while (bytes) { + unsigned long size; + struct page *page; + pgoff_t pgoff; + void *tmpptr; + void *base_ptr; + + page = binder_alloc_get_page(alloc, buffer, + buffer_offset, &pgoff); + size = min_t(size_t, bytes, PAGE_SIZE - pgoff); + base_ptr = kmap_atomic(page); + tmpptr = base_ptr + pgoff; + if (to_buffer) + memcpy(tmpptr, ptr, size); + else + memcpy(ptr, tmpptr, size); + /* + * kunmap_atomic() takes care of flushing the cache + * if this device has VIVT cache arch + */ + kunmap_atomic(base_ptr); + bytes -= size; + pgoff = 0; + ptr = ptr + size; + buffer_offset += size; + } + return 0; +} + +int binder_alloc_copy_to_buffer(struct binder_alloc *alloc, + struct binder_buffer *buffer, + binder_size_t buffer_offset, + void *src, + size_t bytes) +{ + return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset, + src, bytes); +} + +int binder_alloc_copy_from_buffer(struct binder_alloc *alloc, + void *dest, + struct binder_buffer *buffer, + binder_size_t buffer_offset, + size_t bytes) +{ + return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset, + dest, bytes); +} diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h index 9ef64e56385667a53abeab2f41f67b198a8ce86e..9a51b72624ae403a8832d681d68cc777f0df59dc 100644 --- a/drivers/android/binder_alloc.h +++ b/drivers/android/binder_alloc.h @@ -22,6 +22,7 @@ #include #include #include +#include extern struct list_lru binder_alloc_lru; struct binder_transaction; @@ -30,16 +31,16 @@ struct binder_transaction; * struct binder_buffer - buffer used for binder transactions * @entry: entry alloc->buffers * @rb_node: node for allocated_buffers/free_buffers rb trees - * @free: true if buffer is free - * @allow_user_free: describe the second member of struct blah, - * @async_transaction: describe the second member of struct blah, - * @debug_id: describe the second member of struct blah, - * @transaction: describe the second member of struct blah, - * @target_node: describe the second member of struct blah, - * @data_size: describe the second member of struct blah, - * @offsets_size: describe the second member of struct blah, - * @extra_buffers_size: describe the second member of struct blah, - * @data:i describe the second member of struct blah, + * @free: %true if buffer is free + * @allow_user_free: %true if user is allowed to free buffer + * @async_transaction: %true if buffer is in use for an async txn + * @debug_id: unique ID for debugging + * @transaction: pointer to associated struct binder_transaction + * @target_node: struct binder_node associated with this buffer + * @data_size: size of @transaction data + * @offsets_size: size of array of offsets + * @extra_buffers_size: size of space for other objects (like sg lists) + * @user_data: user pointer to base of buffer space * * Bookkeeping structure for binder transaction buffers */ @@ -50,8 +51,7 @@ struct binder_buffer { unsigned free:1; unsigned allow_user_free:1; unsigned async_transaction:1; - unsigned free_in_progress:1; - unsigned debug_id:28; + unsigned debug_id:29; struct binder_transaction *transaction; @@ -59,7 +59,7 @@ struct binder_buffer { size_t data_size; size_t offsets_size; size_t extra_buffers_size; - void *data; + void __user *user_data; }; /** @@ -82,7 +82,6 @@ struct binder_lru_page { * (invariant after init) * @vma_vm_mm: copy of vma->vm_mm (invarient after mmap) * @buffer: base of per-proc address space mapped via mmap - * @user_buffer_offset: offset between user and kernel VAs for buffer * @buffers: list of all buffers for this proc * @free_buffers: rb tree of buffers available for allocation * sorted by size @@ -103,8 +102,7 @@ struct binder_alloc { struct mutex mutex; struct vm_area_struct *vma; struct mm_struct *vma_vm_mm; - void *buffer; - ptrdiff_t user_buffer_offset; + void __user *buffer; struct list_head buffers; struct rb_root free_buffers; struct rb_root allocated_buffers; @@ -163,26 +161,24 @@ binder_alloc_get_free_async_space(struct binder_alloc *alloc) return free_async_space; } -/** - * binder_alloc_get_user_buffer_offset() - get offset between kernel/user addrs - * @alloc: binder_alloc for this proc - * - * Return: the offset between kernel and user-space addresses to use for - * virtual address conversion - */ -static inline ptrdiff_t -binder_alloc_get_user_buffer_offset(struct binder_alloc *alloc) -{ - /* - * user_buffer_offset is constant if vma is set and - * undefined if vma is not set. It is possible to - * get here with !alloc->vma if the target process - * is dying while a transaction is being initiated. - * Returning the old value is ok in this case and - * the transaction will fail. - */ - return alloc->user_buffer_offset; -} +unsigned long +binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc, + struct binder_buffer *buffer, + binder_size_t buffer_offset, + const void __user *from, + size_t bytes); + +int binder_alloc_copy_to_buffer(struct binder_alloc *alloc, + struct binder_buffer *buffer, + binder_size_t buffer_offset, + void *src, + size_t bytes); + +int binder_alloc_copy_from_buffer(struct binder_alloc *alloc, + void *dest, + struct binder_buffer *buffer, + binder_size_t buffer_offset, + size_t bytes); #endif /* _LINUX_BINDER_ALLOC_H */ diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c index 8bd7bcef967d28cf2921938ecd25dd8bea71b2c6..f0f4d7d0263516c29a1a4b878105aea6e928d67c 100644 --- a/drivers/android/binder_alloc_selftest.c +++ b/drivers/android/binder_alloc_selftest.c @@ -105,8 +105,8 @@ static bool check_buffer_pages_allocated(struct binder_alloc *alloc, void *page_addr, *end; int page_index; - end = (void *)PAGE_ALIGN((uintptr_t)buffer->data + size); - page_addr = buffer->data; + end = (void *)PAGE_ALIGN((uintptr_t)buffer->user_data + size); + page_addr = buffer->user_data; for (; page_addr < end; page_addr += PAGE_SIZE) { page_index = (page_addr - alloc->buffer) / PAGE_SIZE; if (!alloc->pages[page_index].page_ptr || diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h index 588eb3ec35070554cfde9668d9838b16e076785b..83cc254d2335a018364f7e38e400b89908a0b74a 100644 --- a/drivers/android/binder_trace.h +++ b/drivers/android/binder_trace.h @@ -223,22 +223,40 @@ TRACE_EVENT(binder_transaction_ref_to_ref, __entry->dest_ref_debug_id, __entry->dest_ref_desc) ); -TRACE_EVENT(binder_transaction_fd, - TP_PROTO(struct binder_transaction *t, int src_fd, int dest_fd), - TP_ARGS(t, src_fd, dest_fd), +TRACE_EVENT(binder_transaction_fd_send, + TP_PROTO(struct binder_transaction *t, int fd, size_t offset), + TP_ARGS(t, fd, offset), TP_STRUCT__entry( __field(int, debug_id) - __field(int, src_fd) - __field(int, dest_fd) + __field(int, fd) + __field(size_t, offset) + ), + TP_fast_assign( + __entry->debug_id = t->debug_id; + __entry->fd = fd; + __entry->offset = offset; + ), + TP_printk("transaction=%d src_fd=%d offset=%zu", + __entry->debug_id, __entry->fd, __entry->offset) +); + +TRACE_EVENT(binder_transaction_fd_recv, + TP_PROTO(struct binder_transaction *t, int fd, size_t offset), + TP_ARGS(t, fd, offset), + + TP_STRUCT__entry( + __field(int, debug_id) + __field(int, fd) + __field(size_t, offset) ), TP_fast_assign( __entry->debug_id = t->debug_id; - __entry->src_fd = src_fd; - __entry->dest_fd = dest_fd; + __entry->fd = fd; + __entry->offset = offset; ), - TP_printk("transaction=%d src_fd=%d ==> dest_fd=%d", - __entry->debug_id, __entry->src_fd, __entry->dest_fd) + TP_printk("transaction=%d dest_fd=%d offset=%zu", + __entry->debug_id, __entry->fd, __entry->offset) ); DECLARE_EVENT_CLASS(binder_buffer_class, @@ -275,7 +293,7 @@ DEFINE_EVENT(binder_buffer_class, binder_transaction_failed_buffer_release, TRACE_EVENT(binder_update_page_range, TP_PROTO(struct binder_alloc *alloc, bool allocate, - void *start, void *end), + void __user *start, void __user *end), TP_ARGS(alloc, allocate, start, end), TP_STRUCT__entry( __field(int, proc) diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 39b181d6bd0d8cf2cbcd9dde1cf89b373ecae6a4..78a6338d074ecaa63fa7706225ad8e9e0750210f 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -121,7 +121,8 @@ config SATA_AHCI_PLATFORM config AHCI_BRCM tristate "Broadcom AHCI SATA support" - depends on ARCH_BRCMSTB || BMIPS_GENERIC || ARCH_BCM_NSP + depends on ARCH_BRCMSTB || BMIPS_GENERIC || ARCH_BCM_NSP || \ + ARCH_BCM_63XX help This option enables support for the AHCI SATA3 controller found on Broadcom SoC's. @@ -493,6 +494,14 @@ config SATA_VITESSE If unsure, say N. +config SATA_ZHAOXIN + tristate "ZhaoXin SATA support" + depends on PCI + help + This option enables support for ZhaoXin Serial ATA. + + If unsure, say N. + comment "PATA SFF controllers with BMDMA" config PATA_ALI diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index d21cdd83f7ab7ad0167d036d52344d065c968135..2d922031118733f65efaed1599c8948a5fd7ba93 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile @@ -44,6 +44,7 @@ obj-$(CONFIG_SATA_SIL) += sata_sil.o obj-$(CONFIG_SATA_SIS) += sata_sis.o obj-$(CONFIG_SATA_SVW) += sata_svw.o obj-$(CONFIG_SATA_ULI) += sata_uli.o +obj-$(CONFIG_SATA_ZHAOXIN) += sata_zhaoxin.o obj-$(CONFIG_SATA_VIA) += sata_via.o obj-$(CONFIG_SATA_VITESSE) += sata_vsc.o diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 021ce46e2e57343b181976a980abef2e16e798d5..8ed2f42b85fd3482fb9b54f5461ec19ef770a1a2 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -81,6 +81,12 @@ enum board_ids { board_ahci_sb700, /* for SB700 and SB800 */ board_ahci_vt8251, + /* + * board IDs for Intel chipsets that support more than 6 ports + * *and* end up needing the PCS quirk. + */ + board_ahci_pcs7, + /* aliases */ board_ahci_mcp_linux = board_ahci_mcp65, board_ahci_mcp67 = board_ahci_mcp65, @@ -90,6 +96,7 @@ enum board_ids { static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); static void ahci_remove_one(struct pci_dev *dev); +static void ahci_shutdown_one(struct pci_dev *dev); static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline); static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class, @@ -236,6 +243,12 @@ static const struct ata_port_info ahci_port_info[] = { .udma_mask = ATA_UDMA6, .port_ops = &ahci_vt8251_ops, }, + [board_ahci_pcs7] = { + .flags = AHCI_FLAG_COMMON, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_ops, + }, }; static const struct pci_device_id ahci_pci_tbl[] = { @@ -280,26 +293,26 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci_mobile }, /* PCH M RAID */ { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */ - { PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19b2), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19b3), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19b4), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19b5), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19b6), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19b7), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19bE), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19bF), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19c0), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19c1), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19c2), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19c3), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19c4), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19c5), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19c6), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19c7), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19b0), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19b1), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19b2), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19b3), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19b4), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19b5), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19b6), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19b7), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19bE), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19bF), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19c0), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19c1), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19c2), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19c3), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19c4), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19c5), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19c6), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19c7), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19cE), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19cF), board_ahci_pcs7 }, /* DNV AHCI */ { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */ { PCI_VDEVICE(INTEL, 0x1c03), board_ahci_mobile }, /* CPT M AHCI */ { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */ @@ -597,6 +610,7 @@ static struct pci_driver ahci_pci_driver = { .id_table = ahci_pci_tbl, .probe = ahci_init_one, .remove = ahci_remove_one, + .shutdown = ahci_shutdown_one, .driver = { .pm = &ahci_pci_pm_ops, }, @@ -639,30 +653,6 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev, ahci_save_initial_config(&pdev->dev, hpriv); } -static int ahci_pci_reset_controller(struct ata_host *host) -{ - struct pci_dev *pdev = to_pci_dev(host->dev); - int rc; - - rc = ahci_reset_controller(host); - if (rc) - return rc; - - if (pdev->vendor == PCI_VENDOR_ID_INTEL) { - struct ahci_host_priv *hpriv = host->private_data; - u16 tmp16; - - /* configure PCS */ - pci_read_config_word(pdev, 0x92, &tmp16); - if ((tmp16 & hpriv->port_map) != hpriv->port_map) { - tmp16 |= hpriv->port_map; - pci_write_config_word(pdev, 0x92, tmp16); - } - } - - return 0; -} - static void ahci_pci_init_controller(struct ata_host *host) { struct ahci_host_priv *hpriv = host->private_data; @@ -865,7 +855,7 @@ static int ahci_pci_device_runtime_resume(struct device *dev) struct ata_host *host = pci_get_drvdata(pdev); int rc; - rc = ahci_pci_reset_controller(host); + rc = ahci_reset_controller(host); if (rc) return rc; ahci_pci_init_controller(host); @@ -900,7 +890,7 @@ static int ahci_pci_device_resume(struct device *dev) ahci_mcp89_apple_enable(pdev); if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { - rc = ahci_pci_reset_controller(host); + rc = ahci_reset_controller(host); if (rc) return rc; @@ -1635,6 +1625,36 @@ static void ahci_update_initial_lpm_policy(struct ata_port *ap, ap->target_lpm_policy = policy; } +static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hpriv) +{ + const struct pci_device_id *id = pci_match_id(ahci_pci_tbl, pdev); + u16 tmp16; + + /* + * Only apply the 6-port PCS quirk for known legacy platforms. + */ + if (!id || id->vendor != PCI_VENDOR_ID_INTEL) + return; + + /* Skip applying the quirk on Denverton and beyond */ + if (((enum board_ids) id->driver_data) >= board_ahci_pcs7) + return; + + /* + * port_map is determined from PORTS_IMPL PCI register which is + * implemented as write or write-once register. If the register + * isn't programmed, ahci automatically generates it from number + * of ports, which is good enough for PCS programming. It is + * otherwise expected that platform firmware enables the ports + * before the OS boots. + */ + pci_read_config_word(pdev, PCS_6, &tmp16); + if ((tmp16 & hpriv->port_map) != hpriv->port_map) { + tmp16 |= hpriv->port_map; + pci_write_config_word(pdev, PCS_6, tmp16); + } +} + static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { unsigned int board_id = ent->driver_data; @@ -1740,6 +1760,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) hpriv->flags |= AHCI_HFLAG_NO_DEVSLP; #ifdef CONFIG_ARM64 + if (pdev->vendor == PCI_VENDOR_ID_HUAWEI && + pdev->device == 0xa235 && + pdev->revision < 0x30) + hpriv->flags |= AHCI_HFLAG_NO_SXS; + if (pdev->vendor == 0x177d && pdev->device == 0xa01c) hpriv->irq_handler = ahci_thunderx_irq_handler; #endif @@ -1747,6 +1772,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* save initial config */ ahci_pci_save_initial_config(pdev, hpriv); + /* + * If platform firmware failed to enable ports, try to enable + * them here. + */ + ahci_intel_pcs_quirk(pdev, hpriv); + /* prepare host */ if (hpriv->cap & HOST_CAP_NCQ) { pi.flags |= ATA_FLAG_NCQ; @@ -1824,6 +1855,17 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) else dev_info(&pdev->dev, "SSS flag set, parallel bus scan disabled\n"); + if (pdev->vendor == PCI_VENDOR_ID_ZHAOXIN) { + if (hpriv->cap & HOST_CAP_PART) + host->flags |= ATA_HOST_PART; + + if (hpriv->cap & HOST_CAP_SSC) + host->flags |= ATA_HOST_SSC; + + if (hpriv->cap2 & HOST_CAP2_SDS) + host->flags |= ATA_HOST_DEVSLP; + } + if (pi.flags & ATA_FLAG_EM) ahci_reset_em(host); @@ -1856,7 +1898,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) return rc; - rc = ahci_pci_reset_controller(host); + rc = ahci_reset_controller(host); if (rc) return rc; @@ -1873,6 +1915,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; } +static void ahci_shutdown_one(struct pci_dev *pdev) +{ + ata_pci_shutdown_one(pdev); +} + static void ahci_remove_one(struct pci_dev *pdev) { pm_runtime_get_noresume(&pdev->dev); diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index 6a1515f0da4021d755d433bf618cc423487fe930..52d0851653e5f9c33f76caa475179c15da04b321 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -254,6 +254,7 @@ enum { AHCI_HFLAG_IS_MOBILE = (1 << 25), /* mobile chipset, use SATA_MOBILE_LPM_POLICY as default lpm_policy */ + AHCI_HFLAG_NO_SXS = (1 << 28), /* SXS not supported */ /* ap->flags bits */ @@ -261,6 +262,8 @@ enum { ATA_FLAG_ACPI_SATA | ATA_FLAG_AN, ICH_MAP = 0x90, /* ICH MAP register */ + PCS_6 = 0x92, /* 6 port PCS */ + PCS_7 = 0x94, /* 7+ port PCS (Denverton) */ /* em constants */ EM_MAX_SLOTS = 8, diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c index f3d557777d8292bd7739a477b70cc47f5df802e0..0192cab1b862fb7ba9a39bc335da899afd2912e8 100644 --- a/drivers/ata/ahci_brcm.c +++ b/drivers/ata/ahci_brcm.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include "ahci.h" @@ -84,8 +85,7 @@ enum brcm_ahci_version { }; enum brcm_ahci_quirks { - BRCM_AHCI_QUIRK_NO_NCQ = BIT(0), - BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE = BIT(1), + BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE = BIT(0), }; struct brcm_ahci_priv { @@ -94,6 +94,7 @@ struct brcm_ahci_priv { u32 port_mask; u32 quirks; enum brcm_ahci_version version; + struct reset_control *rcdev; }; static inline u32 brcm_sata_readreg(void __iomem *addr) @@ -220,19 +221,12 @@ static void brcm_sata_phys_disable(struct brcm_ahci_priv *priv) brcm_sata_phy_disable(priv, i); } -static u32 brcm_ahci_get_portmask(struct platform_device *pdev, +static u32 brcm_ahci_get_portmask(struct ahci_host_priv *hpriv, struct brcm_ahci_priv *priv) { - void __iomem *ahci; - struct resource *res; u32 impl; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ahci"); - ahci = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(ahci)) - return 0; - - impl = readl(ahci + HOST_PORTS_IMPL); + impl = readl(hpriv->mmio + HOST_PORTS_IMPL); if (fls(impl) > SATA_TOP_MAX_PHYS) dev_warn(priv->dev, "warning: more ports than PHYs (%#x)\n", @@ -240,9 +234,6 @@ static u32 brcm_ahci_get_portmask(struct platform_device *pdev, else if (!impl) dev_info(priv->dev, "no ports found\n"); - devm_iounmap(&pdev->dev, ahci); - devm_release_mem_region(&pdev->dev, res->start, resource_size(res)); - return impl; } @@ -292,6 +283,13 @@ static unsigned int brcm_ahci_read_id(struct ata_device *dev, /* Perform the SATA PHY reset sequence */ brcm_sata_phy_disable(priv, ap->port_no); + /* Reset the SATA clock */ + ahci_platform_disable_clks(hpriv); + msleep(10); + + ahci_platform_enable_clks(hpriv); + msleep(10); + /* Bring the PHY back on */ brcm_sata_phy_enable(priv, ap->port_no); @@ -354,11 +352,10 @@ static int brcm_ahci_suspend(struct device *dev) struct ata_host *host = dev_get_drvdata(dev); struct ahci_host_priv *hpriv = host->private_data; struct brcm_ahci_priv *priv = hpriv->plat_data; - int ret; - ret = ahci_platform_suspend(dev); brcm_sata_phys_disable(priv); - return ret; + + return ahci_platform_suspend(dev); } static int brcm_ahci_resume(struct device *dev) @@ -366,11 +363,44 @@ static int brcm_ahci_resume(struct device *dev) struct ata_host *host = dev_get_drvdata(dev); struct ahci_host_priv *hpriv = host->private_data; struct brcm_ahci_priv *priv = hpriv->plat_data; + int ret; + + /* Make sure clocks are turned on before re-configuration */ + ret = ahci_platform_enable_clks(hpriv); + if (ret) + return ret; brcm_sata_init(priv); brcm_sata_phys_enable(priv); brcm_sata_alpm_init(hpriv); - return ahci_platform_resume(dev); + + /* Since we had to enable clocks earlier on, we cannot use + * ahci_platform_resume() as-is since a second call to + * ahci_platform_enable_resources() would bump up the resources + * (regulators, clocks, PHYs) count artificially so we copy the part + * after ahci_platform_enable_resources(). + */ + ret = ahci_platform_enable_phys(hpriv); + if (ret) + goto out_disable_phys; + + ret = ahci_platform_resume_host(dev); + if (ret) + goto out_disable_platform_phys; + + /* We resumed so update PM runtime state */ + pm_runtime_disable(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + + return 0; + +out_disable_platform_phys: + ahci_platform_disable_phys(hpriv); +out_disable_phys: + brcm_sata_phys_disable(priv); + ahci_platform_disable_clks(hpriv); + return ret; } #endif @@ -411,44 +441,76 @@ static int brcm_ahci_probe(struct platform_device *pdev) if (IS_ERR(priv->top_ctrl)) return PTR_ERR(priv->top_ctrl); - if ((priv->version == BRCM_SATA_BCM7425) || - (priv->version == BRCM_SATA_NSP)) { - priv->quirks |= BRCM_AHCI_QUIRK_NO_NCQ; + /* Reset is optional depending on platform */ + priv->rcdev = devm_reset_control_get(&pdev->dev, "ahci"); + if (!IS_ERR_OR_NULL(priv->rcdev)) + reset_control_deassert(priv->rcdev); + + hpriv = ahci_platform_get_resources(pdev, 0); + if (IS_ERR(hpriv)) { + ret = PTR_ERR(hpriv); + goto out_reset; + } + + hpriv->plat_data = priv; + hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP | AHCI_HFLAG_NO_WRITE_TO_RO; + + switch (priv->version) { + case BRCM_SATA_BCM7425: + hpriv->flags |= AHCI_HFLAG_DELAY_ENGINE; + /* fall through */ + case BRCM_SATA_NSP: + hpriv->flags |= AHCI_HFLAG_NO_NCQ; priv->quirks |= BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE; + break; + default: + break; } + ret = ahci_platform_enable_clks(hpriv); + if (ret) + goto out_reset; + + /* Must be first so as to configure endianness including that + * of the standard AHCI register space. + */ brcm_sata_init(priv); - priv->port_mask = brcm_ahci_get_portmask(pdev, priv); - if (!priv->port_mask) - return -ENODEV; + /* Initializes priv->port_mask which is used below */ + priv->port_mask = brcm_ahci_get_portmask(hpriv, priv); + if (!priv->port_mask) { + ret = -ENODEV; + goto out_disable_clks; + } + /* Must be done before ahci_platform_enable_phys() */ brcm_sata_phys_enable(priv); - hpriv = ahci_platform_get_resources(pdev, 0); - if (IS_ERR(hpriv)) - return PTR_ERR(hpriv); - hpriv->plat_data = priv; - hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP; - brcm_sata_alpm_init(hpriv); - ret = ahci_platform_enable_resources(hpriv); + ret = ahci_platform_enable_phys(hpriv); if (ret) - return ret; - - if (priv->quirks & BRCM_AHCI_QUIRK_NO_NCQ) - hpriv->flags |= AHCI_HFLAG_NO_NCQ; - hpriv->flags |= AHCI_HFLAG_NO_WRITE_TO_RO; + goto out_disable_phys; ret = ahci_platform_init_host(pdev, hpriv, &ahci_brcm_port_info, &ahci_platform_sht); if (ret) - return ret; + goto out_disable_platform_phys; dev_info(dev, "Broadcom AHCI SATA3 registered\n"); return 0; + +out_disable_platform_phys: + ahci_platform_disable_phys(hpriv); +out_disable_phys: + brcm_sata_phys_disable(priv); +out_disable_clks: + ahci_platform_disable_clks(hpriv); +out_reset: + if (!IS_ERR_OR_NULL(priv->rcdev)) + reset_control_assert(priv->rcdev); + return ret; } static int brcm_ahci_remove(struct platform_device *pdev) @@ -458,12 +520,12 @@ static int brcm_ahci_remove(struct platform_device *pdev) struct brcm_ahci_priv *priv = hpriv->plat_data; int ret; + brcm_sata_phys_disable(priv); + ret = ata_platform_remove_one(pdev); if (ret) return ret; - brcm_sata_phys_disable(priv); - return 0; } diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c index f9cb51be38ebfd099940c1836d34e6933275ec54..a54214291481ff04bf24d4a830a7eebd6a718b01 100644 --- a/drivers/ata/ahci_mvebu.c +++ b/drivers/ata/ahci_mvebu.c @@ -28,6 +28,10 @@ #define AHCI_WINDOW_BASE(win) (0x64 + ((win) << 4)) #define AHCI_WINDOW_SIZE(win) (0x68 + ((win) << 4)) +struct ahci_mvebu_plat_data { + int (*plat_config)(struct ahci_host_priv *hpriv); +}; + static void ahci_mvebu_mbus_config(struct ahci_host_priv *hpriv, const struct mbus_dram_target_info *dram) { @@ -62,6 +66,22 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv) writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA); } +static int ahci_mvebu_armada_380_config(struct ahci_host_priv *hpriv) +{ + const struct mbus_dram_target_info *dram; + int rc = 0; + + dram = mv_mbus_dram_info(); + if (dram) + ahci_mvebu_mbus_config(hpriv, dram); + else + rc = -ENODEV; + + ahci_mvebu_regret_option(hpriv); + + return rc; +} + /** * ahci_mvebu_stop_engine * @@ -126,13 +146,10 @@ static int ahci_mvebu_resume(struct platform_device *pdev) { struct ata_host *host = platform_get_drvdata(pdev); struct ahci_host_priv *hpriv = host->private_data; - const struct mbus_dram_target_info *dram; + const struct ahci_mvebu_plat_data *pdata = hpriv->plat_data; - dram = mv_mbus_dram_info(); - if (dram) - ahci_mvebu_mbus_config(hpriv, dram); - - ahci_mvebu_regret_option(hpriv); + if (pdata->plat_config) + pdata->plat_config(hpriv); return ahci_platform_resume_host(&pdev->dev); } @@ -154,28 +171,31 @@ static struct scsi_host_template ahci_platform_sht = { static int ahci_mvebu_probe(struct platform_device *pdev) { + const struct ahci_mvebu_plat_data *pdata; struct ahci_host_priv *hpriv; - const struct mbus_dram_target_info *dram; int rc; + pdata = of_device_get_match_data(&pdev->dev); + if (!pdata) + return -EINVAL; + hpriv = ahci_platform_get_resources(pdev, 0); if (IS_ERR(hpriv)) return PTR_ERR(hpriv); + hpriv->plat_data = (void *)pdata; + rc = ahci_platform_enable_resources(hpriv); if (rc) return rc; hpriv->stop_engine = ahci_mvebu_stop_engine; - if (of_device_is_compatible(pdev->dev.of_node, - "marvell,armada-380-ahci")) { - dram = mv_mbus_dram_info(); - if (!dram) - return -ENODEV; - - ahci_mvebu_mbus_config(hpriv, dram); - ahci_mvebu_regret_option(hpriv); + pdata = hpriv->plat_data; + if (pdata->plat_config) { + rc = pdata->plat_config(hpriv); + if (rc) + goto disable_resources; } rc = ahci_platform_init_host(pdev, hpriv, &ahci_mvebu_port_info, @@ -190,9 +210,23 @@ static int ahci_mvebu_probe(struct platform_device *pdev) return rc; } +static const struct ahci_mvebu_plat_data ahci_mvebu_armada_380_plat_data = { + .plat_config = ahci_mvebu_armada_380_config, +}; + +static const struct ahci_mvebu_plat_data ahci_mvebu_armada_3700_plat_data = { + .plat_config = NULL, +}; + static const struct of_device_id ahci_mvebu_of_match[] = { - { .compatible = "marvell,armada-380-ahci", }, - { .compatible = "marvell,armada-3700-ahci", }, + { + .compatible = "marvell,armada-380-ahci", + .data = &ahci_mvebu_armada_380_plat_data, + }, + { + .compatible = "marvell,armada-3700-ahci", + .data = &ahci_mvebu_armada_3700_plat_data, + }, { }, }; MODULE_DEVICE_TABLE(of, ahci_mvebu_of_match); diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c index 46f0bd75eff7984f9709d431aaa874fa951c3724..cf1e0e18a7a98af51d14d6bf4389f8c730751627 100644 --- a/drivers/ata/ahci_platform.c +++ b/drivers/ata/ahci_platform.c @@ -33,6 +33,13 @@ static const struct ata_port_info ahci_port_info = { .port_ops = &ahci_platform_ops, }; +static const struct ata_port_info ahci_port_info_nolpm = { + .flags = AHCI_FLAG_COMMON | ATA_FLAG_NO_LPM, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_platform_ops, +}; + static struct scsi_host_template ahci_platform_sht = { AHCI_SHT(DRV_NAME), }; @@ -41,6 +48,7 @@ static int ahci_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct ahci_host_priv *hpriv; + const struct ata_port_info *port; int rc; hpriv = ahci_platform_get_resources(pdev, @@ -58,7 +66,11 @@ static int ahci_probe(struct platform_device *pdev) if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci")) hpriv->flags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ; - rc = ahci_platform_init_host(pdev, hpriv, &ahci_port_info, + port = acpi_device_get_match_data(dev); + if (!port) + port = &ahci_port_info; + + rc = ahci_platform_init_host(pdev, hpriv, port, &ahci_platform_sht); if (rc) goto disable_resources; @@ -85,6 +97,7 @@ static const struct of_device_id ahci_of_match[] = { MODULE_DEVICE_TABLE(of, ahci_of_match); static const struct acpi_device_id ahci_acpi_match[] = { + { "APMC0D33", (unsigned long)&ahci_port_info_nolpm }, { ACPI_DEVICE_CLASS(PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff) }, {}, }; diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index b5f57c69c48786e7b795b399b8a828a111534ec9..8df2541f39bfb26a98270012cb8249749fa3219e 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -510,6 +510,11 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv) cap |= HOST_CAP_ALPM; } + if ((cap & HOST_CAP_SXS) && (hpriv->flags & AHCI_HFLAG_NO_SXS)) { + dev_info(dev, "controller does not support SXS, disabling CAP_SXS\n"); + cap &= ~HOST_CAP_SXS; + } + if (hpriv->force_port_map && port_map != hpriv->force_port_map) { dev_info(dev, "forcing port_map 0x%x -> 0x%x\n", port_map, hpriv->force_port_map); @@ -1805,9 +1810,17 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat) /* okay, let's hand over to EH */ - if (irq_stat & PORT_IRQ_FREEZE) + if (irq_stat & PORT_IRQ_FREEZE) { + /* + * EH already running, this may happen if the port is + * thawed in the EH. But we cannot freeze it again + * otherwise the port will never be thawed. + */ + if (ap->pflags & (ATA_PFLAG_EH_PENDING | + ATA_PFLAG_EH_IN_PROGRESS)) + return; ata_port_freeze(ap); - else if (fbs_need_dec) { + } else if (fbs_need_dec) { ata_link_abort(link); ahci_fbs_dec_intr(ap); } else diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c index c92c10d553746da95702677b9b380a0da099b242..6a55aac0c60fcc9f4477b0dfc466558d6ff07dff 100644 --- a/drivers/ata/libahci_platform.c +++ b/drivers/ata/libahci_platform.c @@ -47,7 +47,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_ops); * RETURNS: * 0 on success otherwise a negative error code */ -static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv) +int ahci_platform_enable_phys(struct ahci_host_priv *hpriv) { int rc, i; @@ -72,6 +72,7 @@ static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv) } return rc; } +EXPORT_SYMBOL_GPL(ahci_platform_enable_phys); /** * ahci_platform_disable_phys - Disable PHYs @@ -79,7 +80,7 @@ static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv) * * This function disables all PHYs found in hpriv->phys. */ -static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv) +void ahci_platform_disable_phys(struct ahci_host_priv *hpriv) { int i; @@ -88,6 +89,7 @@ static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv) phy_exit(hpriv->phys[i]); } } +EXPORT_SYMBOL_GPL(ahci_platform_disable_phys); /** * ahci_platform_enable_clks - Enable platform clocks @@ -313,6 +315,9 @@ static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port, hpriv->phys[port] = NULL; rc = 0; break; + case -EPROBE_DEFER: + /* Do not complain yet */ + break; default: dev_err(dev, @@ -539,11 +544,13 @@ int ahci_platform_init_host(struct platform_device *pdev, int i, irq, n_ports, rc; irq = platform_get_irq(pdev, 0); - if (irq <= 0) { + if (irq < 0) { if (irq != -EPROBE_DEFER) dev_err(dev, "no irq\n"); return irq; } + if (!irq) + return -EINVAL; hpriv->irq = irq; diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index a9dd4ea7467df60912baca1fd7ebfeb91c9d2988..e23ba7e4298d1301d64ac5dfc9d684cee1d84a56 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -1665,6 +1665,13 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, */ if (qc->flags & ATA_QCFLAG_ACTIVE) { qc->err_mask |= AC_ERR_TIMEOUT; + qc->flags |= ATA_QCFLAG_FAILED; + + spin_unlock_irqrestore(ap->lock, flags); + /* do post_internal_cmd */ + if (ap->ops->post_internal_cmd) + ap->ops->post_internal_cmd(qc); + spin_lock_irqsave(ap->lock, flags); if (ap->ops->error_handler) ata_port_freeze(ap); @@ -1679,9 +1686,10 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, spin_unlock_irqrestore(ap->lock, flags); } - /* do post_internal_cmd */ - if (ap->ops->post_internal_cmd) - ap->ops->post_internal_cmd(qc); + if (!(qc->err_mask & AC_ERR_TIMEOUT)) + /* do post_internal_cmd */ + if (ap->ops->post_internal_cmd) + ap->ops->post_internal_cmd(qc); /* perform minimal error analysis */ if (qc->flags & ATA_QCFLAG_FAILED) { @@ -2266,7 +2274,41 @@ static void ata_dev_config_ncq_prio(struct ata_device *dev) dev->flags &= ~ATA_DFLAG_NCQ_PRIO; ata_dev_dbg(dev, "SATA page does not support priority\n"); } +} + +static inline int ata_dev_config_aa(struct ata_device *dev, struct ata_port *ap, + char **aa_desc) +{ + unsigned int err_mask; + char *desc; + u8 enable; + + if (dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) + return 0; + + if (!ata_id_has_fpdma_aa(dev->id)) + return 0; + + if (ap->flags & ATA_FLAG_FPDMA_AA) { + enable = SETFEATURES_SATA_ENABLE; + desc = "enable"; + } else { + enable = SETFEATURES_SATA_DISABLE; + desc = "disalbe"; + } + + err_mask = ata_dev_set_feature(dev, enable, SATA_FPDMA_AA); + if (err_mask) { + ata_dev_err(dev, "failed to %s AA (error_mask=0x%x)\n", + desc, err_mask); + if (err_mask != AC_ERR_DEV) { + dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA; + return -EIO; + } + } else + *aa_desc = ", AA"; + return 0; } static int ata_dev_config_ncq(struct ata_device *dev, @@ -2290,22 +2332,9 @@ static int ata_dev_config_ncq(struct ata_device *dev, dev->flags |= ATA_DFLAG_NCQ; } - if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) && - (ap->flags & ATA_FLAG_FPDMA_AA) && - ata_id_has_fpdma_aa(dev->id)) { - err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE, - SATA_FPDMA_AA); - if (err_mask) { - ata_dev_err(dev, - "failed to enable AA (error_mask=0x%x)\n", - err_mask); - if (err_mask != AC_ERR_DEV) { - dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA; - return -EIO; - } - } else - aa_desc = ", AA"; - } + err_mask = ata_dev_config_aa(dev, ap, &aa_desc); + if (err_mask) + return err_mask; if (hdepth >= ddepth) snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc); @@ -3946,6 +3975,9 @@ int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy, bool spm_wakeup) { struct ata_eh_context *ehc = &link->eh_context; + struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL; + struct device *dev = ap ? ap->host->dev : NULL; + struct pci_dev *pdev = (!dev || !dev_is_pci(dev)) ? NULL : to_pci_dev(dev); bool woken_up = false; u32 scontrol; int rc; @@ -3972,10 +4004,20 @@ int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy, case ATA_LPM_MED_POWER_WITH_DIPM: case ATA_LPM_MIN_POWER_WITH_PARTIAL: case ATA_LPM_MIN_POWER: - if (ata_link_nr_enabled(link) > 0) + if (ata_link_nr_enabled(link) > 0) { /* no restrictions on LPM transitions */ scontrol &= ~(0x7 << 8); - else { + /* + * If Host does not support partial, then disallows it, + * the same for slumber. + */ + if (pdev && pdev->vendor == PCI_VENDOR_ID_ZHAOXIN) { + if (!(link->ap->host->flags & ATA_HOST_PART)) + scontrol |= (0x1 << 8); + if (!(link->ap->host->flags & ATA_HOST_SSC)) + scontrol |= (0x2 << 8); + } + } else { /* empty port, power off */ scontrol &= ~0xf; scontrol |= (0x1 << 2); @@ -4225,7 +4267,7 @@ void ata_std_postreset(struct ata_link *link, unsigned int *classes) * RETURNS: * 1 if @dev matches @new_class and @new_id, 0 otherwise. */ -static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class, +int ata_dev_same_device(struct ata_device *dev, unsigned int new_class, const u16 *new_id) { const u16 *old_id = dev->id; @@ -4476,9 +4518,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, - /* drives which fail FPDMA_AA activation (some may freeze afterwards) */ - { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA }, - { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA }, + /* drives which fail FPDMA_AA activation (some may freeze afterwards) + the ST disks also have LPM issues */ + { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA | + ATA_HORKAGE_NOLPM, }, + { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA | + ATA_HORKAGE_NOLPM, }, { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA }, /* Blacklist entries taken from Silicon Image 3124/3132 @@ -4490,9 +4535,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, - /* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on - SD7SN6S256G and SD8SN8U256G */ - { "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, }, + /* Sandisk SD7/8/9s lock up hard on large trims */ + { "SanDisk SD[789]*", NULL, ATA_HORKAGE_MAX_TRIM_128M, }, /* devices which puke on READ_NATIVE_MAX */ { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, @@ -4553,6 +4597,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* These specific Samsung models/firmware-revs do not handle LPM well */ { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, }, { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, }, + { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM, }, + { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM, }, /* devices that don't properly handle queued TRIM commands */ { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | @@ -4601,6 +4647,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, + { "SAMSUNG*MZ7KM*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, /* @@ -5337,6 +5384,30 @@ void ata_qc_complete(struct ata_queued_cmd *qc) } } +/** + * ata_qc_get_active - get bitmask of active qcs + * @ap: port in question + * + * LOCKING: + * spin_lock_irqsave(host lock) + * + * RETURNS: + * Bitmask of active qcs + */ +u64 ata_qc_get_active(struct ata_port *ap) +{ + u64 qc_active = ap->qc_active; + + /* ATA_TAG_INTERNAL is sent to hw as tag 0 */ + if (qc_active & (1ULL << ATA_TAG_INTERNAL)) { + qc_active |= (1 << 0); + qc_active &= ~(1ULL << ATA_TAG_INTERNAL); + } + + return qc_active; +} +EXPORT_SYMBOL_GPL(ata_qc_get_active); + /** * ata_qc_complete_multiple - Complete multiple qcs successfully * @ap: port in question @@ -6124,8 +6195,10 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports) if (!host) return NULL; - if (!devres_open_group(dev, NULL, GFP_KERNEL)) - goto err_free; + if (!devres_open_group(dev, NULL, GFP_KERNEL)) { + kfree(host); + return NULL; + } dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL); if (!dr) @@ -6157,8 +6230,6 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports) err_out: devres_release_group(dev, NULL); - err_free: - kfree(host); return NULL; } @@ -6182,7 +6253,7 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev, const struct ata_port_info * const * ppi, int n_ports) { - const struct ata_port_info *pi; + const struct ata_port_info *pi = &ata_dummy_port_info; struct ata_host *host; int i, j; @@ -6190,7 +6261,7 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev, if (!host) return NULL; - for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) { + for (i = 0, j = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; if (ppi[j]) @@ -6720,6 +6791,9 @@ void ata_host_detach(struct ata_host *host) { int i; + /* Ensure ata_port probe has completed */ + async_synchronize_full(); + for (i = 0; i < host->n_ports; i++) ata_port_detach(host->ports[i]); @@ -6747,6 +6821,26 @@ void ata_pci_remove_one(struct pci_dev *pdev) ata_host_detach(host); } +void ata_pci_shutdown_one(struct pci_dev *pdev) +{ + struct ata_host *host = pci_get_drvdata(pdev); + int i; + + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap = host->ports[i]; + + ap->pflags |= ATA_PFLAG_FROZEN; + + /* Disable port interrupts */ + if (ap->ops->freeze) + ap->ops->freeze(ap); + + /* Stop the port DMA engines */ + if (ap->ops->port_stop) + ap->ops->port_stop(ap); + } +} + /* move to PCI subsystem */ int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits) { @@ -7367,6 +7461,7 @@ EXPORT_SYMBOL_GPL(ata_timing_cycle2mode); #ifdef CONFIG_PCI EXPORT_SYMBOL_GPL(pci_test_config_bits); +EXPORT_SYMBOL_GPL(ata_pci_shutdown_one); EXPORT_SYMBOL_GPL(ata_pci_remove_one); #ifdef CONFIG_PM EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend); @@ -7405,3 +7500,4 @@ EXPORT_SYMBOL_GPL(ata_cable_ignore); EXPORT_SYMBOL_GPL(ata_cable_sata); EXPORT_SYMBOL_GPL(ata_host_get); EXPORT_SYMBOL_GPL(ata_host_put); +EXPORT_SYMBOL_GPL(ata_dev_same_device); diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 01306c018398fa16583cab46bd1e51b9ccf86309..e4e0e6d94741ea76a62056b227d88d3ac5392d8f 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -1490,7 +1490,7 @@ static int ata_eh_read_log_10h(struct ata_device *dev, tf->hob_lbah = buf[10]; tf->nsect = buf[12]; tf->hob_nsect = buf[13]; - if (ata_id_has_ncq_autosense(dev->id)) + if (dev->class == ATA_DEV_ZAC && ata_id_has_ncq_autosense(dev->id)) tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16]; return 0; @@ -1737,7 +1737,8 @@ void ata_eh_analyze_ncq_error(struct ata_link *link) memcpy(&qc->result_tf, &tf, sizeof(tf)); qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; - if ((qc->result_tf.command & ATA_SENSE) || qc->result_tf.auxiliary) { + if (dev->class == ATA_DEV_ZAC && + ((qc->result_tf.command & ATA_SENSE) || qc->result_tf.auxiliary)) { char sense_key, asc, ascq; sense_key = (qc->result_tf.auxiliary >> 16) & 0xff; @@ -1791,10 +1792,11 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, } switch (qc->dev->class) { - case ATA_DEV_ATA: case ATA_DEV_ZAC: if (stat & ATA_SENSE) ata_eh_request_sense(qc, qc->scsicmd); + /* fall through */ + case ATA_DEV_ATA: if (err & ATA_ICRC) qc->err_mask |= AC_ERR_ATA_BUS; if (err & (ATA_UNC | ATA_AMNF)) @@ -3446,6 +3448,8 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, struct ata_device **r_failed_dev) { struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL; + struct device *device = ap->host->dev; + struct pci_dev *pdev = (!device || !dev_is_pci(device)) ? NULL : to_pci_dev(device); struct ata_eh_context *ehc = &link->eh_context; struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; enum ata_lpm_policy old_policy = link->lpm_policy; @@ -3454,6 +3458,11 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, unsigned int err_mask; int rc; + /* if controller does not support lpm, then sets no LPM flags */ + if ((pdev && pdev->vendor == PCI_VENDOR_ID_ZHAOXIN) && + !(ap->host->flags & (ATA_HOST_PART | ATA_HOST_SSC | ATA_HOST_DEVSLP))) + link->flags |= ATA_LFLAG_NO_LPM; + /* if the link or host doesn't do LPM, noop */ if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm)) return 0; diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c index 2ae1799f49927231f16cf49cbfb872607c128ea3..51eeaea65833d19c73c2d9b0dce0b108ab1780d5 100644 --- a/drivers/ata/libata-pmp.c +++ b/drivers/ata/libata-pmp.c @@ -764,6 +764,7 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap, if (dev->flags & ATA_DFLAG_DETACH) { detach = 1; + rc = -ENODEV; goto fail; } diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 1984fc78c750b42505a5178761366dce33fa4089..16d76c8490c723059d360566dcd2d0a53dae8fb8 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -778,7 +778,7 @@ static int ata_ioc32(struct ata_port *ap) } int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev, - int cmd, void __user *arg) + unsigned int cmd, void __user *arg) { unsigned long val; int rc = -EINVAL; @@ -829,7 +829,8 @@ int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev, } EXPORT_SYMBOL_GPL(ata_sas_scsi_ioctl); -int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg) +int ata_scsi_ioctl(struct scsi_device *scsidev, unsigned int cmd, + void __user *arg) { return ata_sas_scsi_ioctl(ata_shost_to_port(scsidev->host), scsidev, cmd, arg); @@ -1803,6 +1804,21 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc) return 1; } +static bool ata_check_nblocks(struct scsi_cmnd *scmd, u32 n_blocks) +{ + struct request *rq = scmd->request; + u32 req_blocks; + + if (!blk_rq_is_passthrough(rq)) + return true; + + req_blocks = blk_rq_bytes(rq) / scmd->device->sector_size; + if (n_blocks > req_blocks) + return false; + + return true; +} + /** * ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one * @qc: Storage for translated ATA taskfile @@ -1847,6 +1863,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc) scsi_10_lba_len(cdb, &block, &n_block); if (cdb[1] & (1 << 3)) tf_flags |= ATA_TFLAG_FUA; + if (!ata_check_nblocks(scmd, n_block)) + goto invalid_fld; break; case READ_6: case WRITE_6: @@ -1861,6 +1879,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc) */ if (!n_block) n_block = 256; + if (!ata_check_nblocks(scmd, n_block)) + goto invalid_fld; break; case READ_16: case WRITE_16: @@ -1871,6 +1891,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc) scsi_16_lba_len(cdb, &block, &n_block); if (cdb[1] & (1 << 3)) tf_flags |= ATA_TFLAG_FUA; + if (!ata_check_nblocks(scmd, n_block)) + goto invalid_fld; break; default: DPRINTK("no-byte command\n"); @@ -2370,6 +2392,7 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf) static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf) { + struct ata_device *dev = args->dev; u16 min_io_sectors; rbuf[1] = 0xb0; @@ -2395,7 +2418,12 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf) * with the unmap bit set. */ if (ata_id_has_trim(args->id)) { - put_unaligned_be64(65535 * ATA_MAX_TRIM_RNUM, &rbuf[36]); + u64 max_blocks = 65535 * ATA_MAX_TRIM_RNUM; + + if (dev->horkage & ATA_HORKAGE_MAX_TRIM_128M) + max_blocks = 128 << (20 - SECTOR_SHIFT); + + put_unaligned_be64(max_blocks, &rbuf[36]); put_unaligned_be32(1, &rbuf[28]); } @@ -3974,12 +4002,13 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc) { struct scsi_cmnd *scmd = qc->scsicmd; const u8 *cdb = scmd->cmnd; - const u8 *p; u8 pg, spg; unsigned six_byte, pg_len, hdr_len, bd_len; int len; u16 fp = (u16)-1; u8 bp = 0xff; + u8 buffer[64]; + const u8 *p = buffer; VPRINTK("ENTER\n"); @@ -4013,12 +4042,14 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc) if (!scsi_sg_count(scmd) || scsi_sglist(scmd)->length < len) goto invalid_param_len; - p = page_address(sg_page(scsi_sglist(scmd))); - /* Move past header and block descriptors. */ if (len < hdr_len) goto invalid_param_len; + if (!sg_copy_to_buffer(scsi_sglist(scmd), scsi_sg_count(scmd), + buffer, sizeof(buffer))) + goto invalid_param_len; + if (six_byte) bd_len = p[3]; else @@ -4549,22 +4580,19 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht) */ shost->max_host_blocked = 1; - rc = scsi_add_host_with_dma(ap->scsi_host, - &ap->tdev, ap->host->dev); + rc = scsi_add_host_with_dma(shost, &ap->tdev, ap->host->dev); if (rc) - goto err_add; + goto err_alloc; } return 0; - err_add: - scsi_host_put(host->ports[i]->scsi_host); err_alloc: while (--i >= 0) { struct Scsi_Host *shost = host->ports[i]->scsi_host; + /* scsi_host_put() is in ata_devres_release() */ scsi_remove_host(shost); - scsi_host_put(shost); } return rc; } diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index c5ea0fc635e54eb800cb12d8812ea5e508c388cc..873cc0906055129eff641428db31c118cbdbeb21 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -674,6 +674,10 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) unsigned int offset; unsigned char *buf; + if (!qc->cursg) { + qc->curbytes = qc->nbytes; + return; + } if (qc->curbytes == qc->nbytes - qc->sect_size) ap->hsm_task_state = HSM_ST_LAST; @@ -699,6 +703,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) if (qc->cursg_ofs == qc->cursg->length) { qc->cursg = sg_next(qc->cursg); + if (!qc->cursg) + ap->hsm_task_state = HSM_ST_LAST; qc->cursg_ofs = 0; } } diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c index a0b0b4d986f20fe813d565113e23097dee761102..c3f446fc24c4bc8ad17edb567c277a18814fb381 100644 --- a/drivers/ata/libata-transport.c +++ b/drivers/ata/libata-transport.c @@ -208,7 +208,7 @@ show_ata_port_##name(struct device *dev, \ { \ struct ata_port *ap = transport_class_to_port(dev); \ \ - return snprintf(buf, 20, format_string, cast ap->field); \ + return scnprintf(buf, 20, format_string, cast ap->field); \ } #define ata_port_simple_attr(field, name, format_string, type) \ @@ -479,7 +479,7 @@ show_ata_dev_##field(struct device *dev, \ { \ struct ata_device *ata_dev = transport_class_to_dev(dev); \ \ - return snprintf(buf, 20, format_string, cast ata_dev->field); \ + return scnprintf(buf, 20, format_string, cast ata_dev->field); \ } #define ata_dev_simple_attr(field, format_string, type) \ @@ -533,7 +533,7 @@ show_ata_dev_id(struct device *dev, if (ata_dev->class == ATA_DEV_PMP) return 0; for(i=0;iid[i], ((i+1) & 7) ? ' ' : '\n'); } @@ -552,7 +552,7 @@ show_ata_dev_gscr(struct device *dev, if (ata_dev->class != ATA_DEV_PMP) return 0; for(i=0;igscr[i], ((i+1) & 3) ? ' ' : '\n'); } @@ -581,7 +581,7 @@ show_ata_dev_trim(struct device *dev, else mode = "unqueued"; - return snprintf(buf, 20, "%s\n", mode); + return scnprintf(buf, 20, "%s\n", mode); } static DEVICE_ATTR(trim, S_IRUGO, show_ata_dev_trim, NULL); diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c index b3ed8f9953a862ea3ae67ef065ca5469330a44e0..eefda51f97d351bda5d8437e2d83aaeae16b30f6 100644 --- a/drivers/ata/libata-zpodd.c +++ b/drivers/ata/libata-zpodd.c @@ -52,38 +52,52 @@ static int eject_tray(struct ata_device *dev) /* Per the spec, only slot type and drawer type ODD can be supported */ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev) { - char buf[16]; + char *buf; unsigned int ret; - struct rm_feature_desc *desc = (void *)(buf + 8); + struct rm_feature_desc *desc; struct ata_taskfile tf; - static const char cdb[] = { GPCMD_GET_CONFIGURATION, + static const char cdb[ATAPI_CDB_LEN] = { GPCMD_GET_CONFIGURATION, 2, /* only 1 feature descriptor requested */ 0, 3, /* 3, removable medium feature */ 0, 0, 0,/* reserved */ - 0, sizeof(buf), + 0, 16, 0, 0, 0, }; + buf = kzalloc(16, GFP_KERNEL); + if (!buf) + return ODD_MECH_TYPE_UNSUPPORTED; + desc = (void *)(buf + 8); + ata_tf_init(dev, &tf); tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; tf.command = ATA_CMD_PACKET; tf.protocol = ATAPI_PROT_PIO; - tf.lbam = sizeof(buf); + tf.lbam = 16; ret = ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, - buf, sizeof(buf), 0); - if (ret) + buf, 16, 0); + if (ret) { + kfree(buf); return ODD_MECH_TYPE_UNSUPPORTED; + } - if (be16_to_cpu(desc->feature_code) != 3) + if (be16_to_cpu(desc->feature_code) != 3) { + kfree(buf); return ODD_MECH_TYPE_UNSUPPORTED; + } - if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1) + if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1) { + kfree(buf); return ODD_MECH_TYPE_SLOT; - else if (desc->mech_type == 1 && desc->load == 0 && desc->eject == 1) + } else if (desc->mech_type == 1 && desc->load == 0 && + desc->eject == 1) { + kfree(buf); return ODD_MECH_TYPE_DRAWER; - else + } else { + kfree(buf); return ODD_MECH_TYPE_UNSUPPORTED; + } } /* Test if ODD is zero power ready by sense code */ diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c index 0a550190955ad26915842aff139bd9e09588257b..cc6d06c1b2c70a8e50c3f6384004f10d25edeba1 100644 --- a/drivers/ata/pata_ep93xx.c +++ b/drivers/ata/pata_ep93xx.c @@ -659,7 +659,7 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data) * start of new transfer. */ drv_data->dma_rx_data.port = EP93XX_DMA_IDE; - drv_data->dma_rx_data.direction = DMA_FROM_DEVICE; + drv_data->dma_rx_data.direction = DMA_DEV_TO_MEM; drv_data->dma_rx_data.name = "ep93xx-pata-rx"; drv_data->dma_rx_channel = dma_request_channel(mask, ep93xx_pata_dma_filter, &drv_data->dma_rx_data); @@ -667,7 +667,7 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data) return; drv_data->dma_tx_data.port = EP93XX_DMA_IDE; - drv_data->dma_tx_data.direction = DMA_TO_DEVICE; + drv_data->dma_tx_data.direction = DMA_MEM_TO_DEV; drv_data->dma_tx_data.name = "ep93xx-pata-tx"; drv_data->dma_tx_channel = dma_request_channel(mask, ep93xx_pata_dma_filter, &drv_data->dma_tx_data); @@ -678,7 +678,7 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data) /* Configure receive channel direction and source address */ memset(&conf, 0, sizeof(conf)); - conf.direction = DMA_FROM_DEVICE; + conf.direction = DMA_DEV_TO_MEM; conf.src_addr = drv_data->udma_in_phys; conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; if (dmaengine_slave_config(drv_data->dma_rx_channel, &conf)) { @@ -689,7 +689,7 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data) /* Configure transmit channel direction and destination address */ memset(&conf, 0, sizeof(conf)); - conf.direction = DMA_TO_DEVICE; + conf.direction = DMA_MEM_TO_DEV; conf.dst_addr = drv_data->udma_out_phys; conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; if (dmaengine_slave_config(drv_data->dma_tx_channel, &conf)) { diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index 4dc528bf8e85e3088fa55859d056613e8db73281..e4fd7489058e1cea80720114ac7c853157329b14 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c @@ -1283,7 +1283,7 @@ static void sata_fsl_host_intr(struct ata_port *ap) i, ioread32(hcr_base + CC), ioread32(hcr_base + CA)); } - ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); + ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask); return; } else if ((ap->qc_active & (1ULL << ATA_TAG_INTERNAL))) { @@ -1397,6 +1397,14 @@ static int sata_fsl_init_controller(struct ata_host *host) return 0; } +static void sata_fsl_host_stop(struct ata_host *host) +{ + struct sata_fsl_host_priv *host_priv = host->private_data; + + iounmap(host_priv->hcr_base); + kfree(host_priv); +} + /* * scsi mid-layer and libata interface structures */ @@ -1429,6 +1437,8 @@ static struct ata_port_operations sata_fsl_ops = { .port_start = sata_fsl_port_start, .port_stop = sata_fsl_port_stop, + .host_stop = sata_fsl_host_stop, + .pmp_attach = sata_fsl_pmp_attach, .pmp_detach = sata_fsl_pmp_detach, }; @@ -1561,8 +1571,6 @@ static int sata_fsl_remove(struct platform_device *ofdev) ata_host_detach(host); irq_dispose_mapping(host_priv->irq); - iounmap(host_priv->hcr_base); - kfree(host_priv); return 0; } diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 73ba8e134ca9a9054867ae4e32581f9cc825539d..ab2e9f62ddc1a693b0da58cb01d5f0f7632eea4b 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -2840,7 +2840,7 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp } if (work_done) { - ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); + ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask); /* Update the software queue position index in hardware */ writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index 72c9b922a77bc7793bb20ccd6432f249bcce45e1..761577d57ff3720bc129c1ca16315447a4e2d5bf 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c @@ -1000,7 +1000,7 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) check_commands = 0; check_commands &= ~(1 << pos); } - ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); + ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask); } } diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c index 10ecb232245db8c617ee808966db432ece834358..03867f539f3a88aec6484af55647d30f0106b0fd 100644 --- a/drivers/ata/sata_rcar.c +++ b/drivers/ata/sata_rcar.c @@ -895,7 +895,9 @@ static int sata_rcar_probe(struct platform_device *pdev) int ret = 0; irq = platform_get_irq(pdev, 0); - if (irq <= 0) + if (irq < 0) + return irq; + if (!irq) return -EINVAL; priv = devm_kzalloc(dev, sizeof(struct sata_rcar_priv), GFP_KERNEL); diff --git a/drivers/ata/sata_zhaoxin.c b/drivers/ata/sata_zhaoxin.c new file mode 100644 index 0000000000000000000000000000000000000000..ef8c73a37667e99a23128b4206dd9d558b64f669 --- /dev/null +++ b/drivers/ata/sata_zhaoxin.c @@ -0,0 +1,384 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * sata_zhaoxin.c - ZhaoXin Serial ATA controllers + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "sata_zx" +#define DRV_VERSION "2.6.1" + +enum board_ids_enum { + zx100s, +}; + +enum { + SATA_CHAN_ENAB = 0x40, /* SATA channel enable */ + SATA_INT_GATE = 0x41, /* SATA interrupt gating */ + SATA_NATIVE_MODE = 0x42, /* Native mode enable */ + PATA_UDMA_TIMING = 0xB3, /* PATA timing for DMA/ cable detect */ + PATA_PIO_TIMING = 0xAB, /* PATA timing register */ + + PORT0 = (1 << 1), + PORT1 = (1 << 0), + ALL_PORTS = PORT0 | PORT1, + + NATIVE_MODE_ALL = (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4), + + SATA_EXT_PHY = (1 << 6), /* 0==use PATA, 1==ext phy */ +}; + +static int zx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); +static int zx_scr_read(struct ata_link *link, unsigned int scr, u32 *val); +static int zx_scr_write(struct ata_link *link, unsigned int scr, u32 val); +static int zx_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline); + +static void zx_tf_load(struct ata_port *ap, const struct ata_taskfile *tf); + +static const struct pci_device_id zx_pci_tbl[] = { + { PCI_VDEVICE(ZHAOXIN, 0x9002), zx100s }, + { PCI_VDEVICE(ZHAOXIN, 0x9003), zx100s }, + + { } /* terminate list */ +}; + +static struct pci_driver zx_pci_driver = { + .name = DRV_NAME, + .id_table = zx_pci_tbl, + .probe = zx_init_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = ata_pci_device_resume, +#endif + .remove = ata_pci_remove_one, +}; + +static struct scsi_host_template zx_sht = { + ATA_BMDMA_SHT(DRV_NAME), +}; + +static struct ata_port_operations zx_base_ops = { + .inherits = &ata_bmdma_port_ops, + .sff_tf_load = zx_tf_load, +}; + +static struct ata_port_operations zx_ops = { + .inherits = &zx_base_ops, + .hardreset = zx_hardreset, + .scr_read = zx_scr_read, + .scr_write = zx_scr_write, +}; + +static struct ata_port_info zx100s_port_info = { + .flags = ATA_FLAG_SATA | ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &zx_ops, +}; + + +static int zx_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + int rc; + + rc = sata_std_hardreset(link, class, deadline); + if (!rc || rc == -EAGAIN) { + struct ata_port *ap = link->ap; + int pmp = link->pmp; + int tmprc; + + if (pmp) { + ap->ops->sff_dev_select(ap, pmp); + tmprc = ata_sff_wait_ready(&ap->link, deadline); + } else { + tmprc = ata_sff_wait_ready(link, deadline); + } + if (tmprc) + ata_link_err(link, "COMRESET failed for wait (errno=%d)\n", + rc); + else + ata_link_err(link, "wait for bsy success\n"); + + ata_link_err(link, "COMRESET success (errno=%d) ap=%d link %d\n", + rc, link->ap->port_no, link->pmp); + } else { + ata_link_err(link, "COMRESET failed (errno=%d) ap=%d link %d\n", + rc, link->ap->port_no, link->pmp); + } + return rc; +} + +static int zx_scr_read(struct ata_link *link, unsigned int scr, u32 *val) +{ + static const u8 ipm_tbl[] = { 1, 2, 6, 0 }; + struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); + int slot = 2 * link->ap->port_no + link->pmp; + u32 v = 0; + u8 raw; + + switch (scr) { + case SCR_STATUS: + pci_read_config_byte(pdev, 0xA0 + slot, &raw); + + /* read the DET field, bit0 and 1 of the config byte */ + v |= raw & 0x03; + + /* read the SPD field, bit4 of the configure byte */ + v |= raw & 0x30; + + /* read the IPM field, bit2 and 3 of the config byte */ + v |= ((ipm_tbl[(raw >> 2) & 0x3])<<8); + break; + + case SCR_ERROR: + /* devices other than 5287 uses 0xA8 as base */ + WARN_ON(pdev->device != 0x9002 && pdev->device != 0x9003); + pci_write_config_byte(pdev, 0x42, slot); + pci_read_config_dword(pdev, 0xA8, &v); + break; + + case SCR_CONTROL: + pci_read_config_byte(pdev, 0xA4 + slot, &raw); + + /* read the DET field, bit0 and bit1 */ + v |= ((raw & 0x02) << 1) | (raw & 0x01); + + /* read the IPM field, bit2 and bit3 */ + v |= ((raw >> 2) & 0x03) << 8; + + break; + + default: + return -EINVAL; + } + + *val = v; + return 0; +} + +static int zx_scr_write(struct ata_link *link, unsigned int scr, u32 val) +{ + struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); + int slot = 2 * link->ap->port_no + link->pmp; + u32 v = 0; + + WARN_ON(pdev == NULL); + + switch (scr) { + case SCR_ERROR: + /* devices 0x9002 uses 0xA8 as base */ + WARN_ON(pdev->device != 0x9002 && pdev->device != 0x9003); + pci_write_config_byte(pdev, 0x42, slot); + pci_write_config_dword(pdev, 0xA8, val); + return 0; + + case SCR_CONTROL: + /* set the DET field */ + v |= ((val & 0x4) >> 1) | (val & 0x1); + + /* set the IPM field */ + v |= ((val >> 8) & 0x3) << 2; + + + pci_write_config_byte(pdev, 0xA4 + slot, v); + + + return 0; + + default: + return -EINVAL; + } +} + + +/** + * zx_tf_load - send taskfile registers to host controller + * @ap: Port to which output is sent + * @tf: ATA taskfile register set + * + * Outputs ATA taskfile to standard ATA host controller. + * + * This is to fix the internal bug of zx chipsets, which will + * reset the device register after changing the IEN bit on ctl + * register. + */ +static void zx_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) +{ + struct ata_taskfile ttf; + + if (tf->ctl != ap->last_ctl) { + ttf = *tf; + ttf.flags |= ATA_TFLAG_DEVICE; + tf = &ttf; + } + ata_sff_tf_load(ap, tf); +} + +static const unsigned int zx_bar_sizes[] = { + 8, 4, 8, 4, 16, 256 +}; + +static const unsigned int zx100s_bar_sizes0[] = { + 8, 4, 8, 4, 16, 0 +}; + +static const unsigned int zx100s_bar_sizes1[] = { + 8, 4, 0, 0, 16, 0 +}; + +static int zx_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) +{ + const struct ata_port_info *ppi0[] = { + &zx100s_port_info, NULL + }; + const struct ata_port_info *ppi1[] = { + &zx100s_port_info, &ata_dummy_port_info + }; + struct ata_host *host; + int i, rc; + + if (pdev->device == 0x9002) + rc = ata_pci_bmdma_prepare_host(pdev, ppi0, &host); + else if (pdev->device == 0x9003) + rc = ata_pci_bmdma_prepare_host(pdev, ppi1, &host); + else + rc = -EINVAL; + + if (rc) + return rc; + + *r_host = host; + + /* 9002 hosts four sata ports as M/S of the two channels */ + /* 9003 hosts two sata ports as M/S of the one channel */ + for (i = 0; i < host->n_ports; i++) + ata_slave_link_init(host->ports[i]); + + return 0; +} + +static void zx_configure(struct pci_dev *pdev, int board_id) +{ + u8 tmp8; + + pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tmp8); + dev_info(&pdev->dev, "routed to hard irq line %d\n", + (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f); + + /* make sure SATA channels are enabled */ + pci_read_config_byte(pdev, SATA_CHAN_ENAB, &tmp8); + if ((tmp8 & ALL_PORTS) != ALL_PORTS) { + dev_dbg(&pdev->dev, "enabling SATA channels (0x%x)\n", + (int)tmp8); + tmp8 |= ALL_PORTS; + pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8); + } + + /* make sure interrupts for each channel sent to us */ + pci_read_config_byte(pdev, SATA_INT_GATE, &tmp8); + if ((tmp8 & ALL_PORTS) != ALL_PORTS) { + dev_dbg(&pdev->dev, "enabling SATA channel interrupts (0x%x)\n", + (int) tmp8); + tmp8 |= ALL_PORTS; + pci_write_config_byte(pdev, SATA_INT_GATE, tmp8); + } + + /* make sure native mode is enabled */ + pci_read_config_byte(pdev, SATA_NATIVE_MODE, &tmp8); + if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) { + dev_dbg(&pdev->dev, + "enabling SATA channel native mode (0x%x)\n", + (int) tmp8); + tmp8 |= NATIVE_MODE_ALL; + pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8); + } +} + +static int zx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + unsigned int i; + int rc; + struct ata_host *host = NULL; + int board_id = (int) ent->driver_data; + const unsigned int *bar_sizes; + int legacy_mode = 0; + + ata_print_version_once(&pdev->dev, DRV_VERSION); + + if (pdev->device == 0x9002 || pdev->device == 0x9003) { + if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) { + u8 tmp8, mask; + + /* TODO: What if one channel is in native mode ... */ + pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8); + mask = (1 << 2) | (1 << 0); + if ((tmp8 & mask) != mask) + legacy_mode = 1; + } + if (legacy_mode) + return -EINVAL; + } + + rc = pcim_enable_device(pdev); + if (rc) + return rc; + + if (board_id == zx100s && pdev->device == 0x9002) + bar_sizes = &zx100s_bar_sizes0[0]; + else if (board_id == zx100s && pdev->device == 0x9003) + bar_sizes = &zx100s_bar_sizes1[0]; + else + bar_sizes = &zx_bar_sizes[0]; + + for (i = 0; i < ARRAY_SIZE(zx_bar_sizes); i++) { + if ((pci_resource_start(pdev, i) == 0) || + (pci_resource_len(pdev, i) < bar_sizes[i])) { + if (bar_sizes[i] == 0) + continue; + + dev_err(&pdev->dev, + "invalid PCI BAR %u (sz 0x%llx, val 0x%llx)\n", + i, + (unsigned long long)pci_resource_start(pdev, i), + (unsigned long long)pci_resource_len(pdev, i)); + + return -ENODEV; + } + } + + switch (board_id) { + case zx100s: + rc = zx_prepare_host(pdev, &host); + break; + default: + rc = -EINVAL; + } + if (rc) + return rc; + + zx_configure(pdev, board_id); + + pci_set_master(pdev); + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, + IRQF_SHARED, &zx_sht); +} + +module_pci_driver(zx_pci_driver); + +MODULE_AUTHOR("Yanchen:YanchenSun@zhaoxin.com"); +MODULE_DESCRIPTION("SCSI low-level driver for ZX SATA controllers"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, zx_pci_tbl); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig index 2e2efa577437e82ecdeb20d9d41b6ce63635bf16..8c37294f1d1ee4c07dacaec843a0190b0320d5fc 100644 --- a/drivers/atm/Kconfig +++ b/drivers/atm/Kconfig @@ -200,7 +200,7 @@ config ATM_NICSTAR_USE_SUNI make the card work). config ATM_NICSTAR_USE_IDT77105 - bool "Use IDT77015 PHY driver (25Mbps)" + bool "Use IDT77105 PHY driver (25Mbps)" depends on ATM_NICSTAR help Support for the PHYsical layer chip in ForeRunner LE25 cards. In diff --git a/drivers/atm/he.c b/drivers/atm/he.c index 29f102dcfec499885fd9feed86fb54c7b3617059..329ce9072ee9f9e9d72caf5fae22c9279974ac19 100644 --- a/drivers/atm/he.c +++ b/drivers/atm/he.c @@ -717,7 +717,7 @@ static int he_init_cs_block_rcm(struct he_dev *he_dev) instead of '/ 512', use '>> 9' to prevent a call to divdu3 on x86 platforms */ - rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9; + rate_cps = (unsigned long long) (1UL << exp) * (man + 512) >> 9; if (rate_cps < 10) rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */ diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index 6e737142ceaab636f3f46a600b6ec7bb893115c8..9ae66595b6360291c08f2779aa6547e9916157bf 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c @@ -3767,6 +3767,7 @@ static void __exit idt77252_exit(void) card = idt77252_chain; dev = card->atmdev; idt77252_chain = card->next; + del_timer_sync(&card->tst_timer); if (dev->phy->stop) dev->phy->stop(dev); diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index 82532c299bb5964a429e81353b9c5f94d9bb5ed2..827c6d5e6177422e6d1880274c295f9e4a439c64 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c @@ -63,6 +63,7 @@ #include #include #include +#include #include "iphase.h" #include "suni.h" #define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8)) @@ -2760,8 +2761,11 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg) } if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT; board = ia_cmds.status; - if ((board < 0) || (board > iadev_count)) - board = 0; + + if ((board < 0) || (board > iadev_count)) + board = 0; + board = array_index_nospec(board, iadev_count + 1); + iadev = ia_dev[board]; switch (ia_cmds.cmd) { case MEMDUMP: @@ -3297,7 +3301,7 @@ static void __exit ia_module_exit(void) { pci_unregister_driver(&ia_driver); - del_timer(&ia_timer); + del_timer_sync(&ia_timer); } module_init(ia_module_init); diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c index cbec9adc01c768e95cf8a3ad000697019f38f65c..2719f086abc78cb731efdfb2984dbe019d9953b2 100644 --- a/drivers/atm/nicstar.c +++ b/drivers/atm/nicstar.c @@ -296,7 +296,7 @@ static void __exit nicstar_cleanup(void) { XPRINTK("nicstar: nicstar_cleanup() called.\n"); - del_timer(&ns_timer); + del_timer_sync(&ns_timer); pci_unregister_driver(&nicstar_driver); diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c index e89146ddede693a400b0d7019f14ca92aa4dff8e..d5c76b50d3575b753cb8e9f69de6031e145494f4 100644 --- a/drivers/atm/zatm.c +++ b/drivers/atm/zatm.c @@ -126,7 +126,7 @@ static unsigned long dummy[2] = {0,0}; #define zin_n(r) inl(zatm_dev->base+r*4) #define zin(r) inl(zatm_dev->base+uPD98401_##r*4) #define zout(v,r) outl(v,zatm_dev->base+uPD98401_##r*4) -#define zwait while (zin(CMR) & uPD98401_BUSY) +#define zwait() do {} while (zin(CMR) & uPD98401_BUSY) /* RX0, RX1, TX0, TX1 */ static const int mbx_entries[NR_MBX] = { 1024,1024,1024,1024 }; @@ -140,7 +140,7 @@ static const int mbx_esize[NR_MBX] = { 16,16,4,4 }; /* entry size in bytes */ static void zpokel(struct zatm_dev *zatm_dev,u32 value,u32 addr) { - zwait; + zwait(); zout(value,CER); zout(uPD98401_IND_ACC | uPD98401_IA_BALL | (uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR); @@ -149,10 +149,10 @@ static void zpokel(struct zatm_dev *zatm_dev,u32 value,u32 addr) static u32 zpeekl(struct zatm_dev *zatm_dev,u32 addr) { - zwait; + zwait(); zout(uPD98401_IND_ACC | uPD98401_IA_BALL | uPD98401_IA_RW | (uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR); - zwait; + zwait(); return zin(CER); } @@ -241,7 +241,7 @@ static void refill_pool(struct atm_dev *dev,int pool) } if (first) { spin_lock_irqsave(&zatm_dev->lock, flags); - zwait; + zwait(); zout(virt_to_bus(first),CER); zout(uPD98401_ADD_BAT | (pool << uPD98401_POOL_SHIFT) | count, CMR); @@ -508,9 +508,9 @@ static int open_rx_first(struct atm_vcc *vcc) } if (zatm_vcc->pool < 0) return -EMSGSIZE; spin_lock_irqsave(&zatm_dev->lock, flags); - zwait; + zwait(); zout(uPD98401_OPEN_CHAN,CMR); - zwait; + zwait(); DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER)); chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT; spin_unlock_irqrestore(&zatm_dev->lock, flags); @@ -571,21 +571,21 @@ static void close_rx(struct atm_vcc *vcc) pos = vcc->vci >> 1; shift = (1-(vcc->vci & 1)) << 4; zpokel(zatm_dev,zpeekl(zatm_dev,pos) & ~(0xffff << shift),pos); - zwait; + zwait(); zout(uPD98401_NOP,CMR); - zwait; + zwait(); zout(uPD98401_NOP,CMR); spin_unlock_irqrestore(&zatm_dev->lock, flags); } spin_lock_irqsave(&zatm_dev->lock, flags); - zwait; + zwait(); zout(uPD98401_DEACT_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan << uPD98401_CHAN_ADDR_SHIFT),CMR); - zwait; + zwait(); udelay(10); /* why oh why ... ? */ zout(uPD98401_CLOSE_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan << uPD98401_CHAN_ADDR_SHIFT),CMR); - zwait; + zwait(); if (!(zin(CMR) & uPD98401_CHAN_ADDR)) printk(KERN_CRIT DEV_LABEL "(itf %d): can't close RX channel " "%d\n",vcc->dev->number,zatm_vcc->rx_chan); @@ -699,7 +699,7 @@ printk("NONONONOO!!!!\n"); skb_queue_tail(&zatm_vcc->tx_queue,skb); DPRINTK("QRP=0x%08lx\n",zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+ uPD98401_TXVC_QRP)); - zwait; + zwait(); zout(uPD98401_TX_READY | (zatm_vcc->tx_chan << uPD98401_CHAN_ADDR_SHIFT),CMR); spin_unlock_irqrestore(&zatm_dev->lock, flags); @@ -891,12 +891,12 @@ static void close_tx(struct atm_vcc *vcc) } spin_lock_irqsave(&zatm_dev->lock, flags); #if 0 - zwait; + zwait(); zout(uPD98401_DEACT_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR); #endif - zwait; + zwait(); zout(uPD98401_CLOSE_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR); - zwait; + zwait(); if (!(zin(CMR) & uPD98401_CHAN_ADDR)) printk(KERN_CRIT DEV_LABEL "(itf %d): can't close TX channel " "%d\n",vcc->dev->number,chan); @@ -926,9 +926,9 @@ static int open_tx_first(struct atm_vcc *vcc) zatm_vcc->tx_chan = 0; if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0; spin_lock_irqsave(&zatm_dev->lock, flags); - zwait; + zwait(); zout(uPD98401_OPEN_CHAN,CMR); - zwait; + zwait(); DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER)); chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT; spin_unlock_irqrestore(&zatm_dev->lock, flags); @@ -1557,7 +1557,7 @@ static void zatm_phy_put(struct atm_dev *dev,unsigned char value, struct zatm_dev *zatm_dev; zatm_dev = ZATM_DEV(dev); - zwait; + zwait(); zout(value,CER); zout(uPD98401_IND_ACC | uPD98401_IA_B0 | (uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR); @@ -1569,10 +1569,10 @@ static unsigned char zatm_phy_get(struct atm_dev *dev,unsigned long addr) struct zatm_dev *zatm_dev; zatm_dev = ZATM_DEV(dev); - zwait; + zwait(); zout(uPD98401_IND_ACC | uPD98401_IA_B0 | uPD98401_IA_RW | (uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR); - zwait; + zwait(); return zin(CER) & 0xff; } diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c index 81c22d20d9d9c568ffc2f6c191a407baee330719..60e0b772673f3bd0c631efb6e45b0f624f75aa8a 100644 --- a/drivers/auxdisplay/charlcd.c +++ b/drivers/auxdisplay/charlcd.c @@ -538,6 +538,9 @@ static inline int handle_lcd_special_code(struct charlcd *lcd) } case 'x': /* gotoxy : LxXXX[yYYY]; */ case 'y': /* gotoxy : LyYYY[xXXX]; */ + if (priv->esc_seq.buf[priv->esc_seq.len - 1] != ';') + break; + /* If the command is valid, move to the new address */ if (parse_xy(esc, &priv->addr.x, &priv->addr.y)) charlcd_gotoxy(lcd); diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c index f1a42f0f1ded658e196bcde56decfbd41d663267..df3da49ff9e88cfd4df64405df5439cf13176dfe 100644 --- a/drivers/auxdisplay/hd44780.c +++ b/drivers/auxdisplay/hd44780.c @@ -299,6 +299,8 @@ static int hd44780_remove(struct platform_device *pdev) struct charlcd *lcd = platform_get_drvdata(pdev); charlcd_unregister(lcd); + + kfree(lcd); return 0; } diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c index a43276c76fc688a5e158a0c930c720845fccbfd0..21393ec3b9a4a58fe13037f59be6a3d1466f747e 100644 --- a/drivers/auxdisplay/ht16k33.c +++ b/drivers/auxdisplay/ht16k33.c @@ -509,7 +509,7 @@ static int ht16k33_remove(struct i2c_client *client) struct ht16k33_priv *priv = i2c_get_clientdata(client); struct ht16k33_fbdev *fbdev = &priv->fbdev; - cancel_delayed_work(&fbdev->work); + cancel_delayed_work_sync(&fbdev->work); unregister_framebuffer(fbdev->info); framebuffer_release(fbdev->info); free_page((unsigned long) fbdev->buffer); diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c index 3b25a643058c9dde38511d646da8700e53c46837..0b8e2a7d6e9344009c48d78115ebf853f8ce924e 100644 --- a/drivers/auxdisplay/panel.c +++ b/drivers/auxdisplay/panel.c @@ -1618,6 +1618,8 @@ static void panel_attach(struct parport *port) return; err_lcd_unreg: + if (scan_timer.function) + del_timer_sync(&scan_timer); if (lcd.enabled) charlcd_unregister(lcd.charlcd); err_unreg_device: diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 3e63a900b330e9b40f934ffe4a9814c79daacb4a..32dc81bd7056eaf8333c1d8e88dbd711ccb28016 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -149,6 +149,14 @@ config DEBUG_TEST_DRIVER_REMOVE unusable. You should say N here unless you are explicitly looking to test this functionality. +config HMEM_REPORTING + bool + default n + depends on NUMA + help + Enable reporting for heterogenous memory access attributes under + their non-uniform memory nodes. + source "drivers/base/test/Kconfig" config SYS_HYPERVISOR diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index e7cb0c6ade81ec7171a0a55fdb0cf0f5ab4cd199..729dded51e7b2c69f460680fdf78af69b4007c11 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -7,7 +7,6 @@ */ #include -#include #include #include #include @@ -30,7 +29,6 @@ void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq, per_cpu(freq_scale, i) = scale; } -static DEFINE_MUTEX(cpu_scale_mutex); DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity) @@ -44,38 +42,11 @@ static ssize_t cpu_capacity_show(struct device *dev, { struct cpu *cpu = container_of(dev, struct cpu, dev); - return sprintf(buf, "%lu\n", topology_get_cpu_scale(NULL, cpu->dev.id)); + return sysfs_emit(buf, "%lu\n", + topology_get_cpu_scale(NULL, cpu->dev.id)); } -static ssize_t cpu_capacity_store(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t count) -{ - struct cpu *cpu = container_of(dev, struct cpu, dev); - int this_cpu = cpu->dev.id; - int i; - unsigned long new_capacity; - ssize_t ret; - - if (!count) - return 0; - - ret = kstrtoul(buf, 0, &new_capacity); - if (ret) - return ret; - if (new_capacity > SCHED_CAPACITY_SCALE) - return -EINVAL; - - mutex_lock(&cpu_scale_mutex); - for_each_cpu(i, &cpu_topology[this_cpu].core_sibling) - topology_set_cpu_scale(i, new_capacity); - mutex_unlock(&cpu_scale_mutex); - - return count; -} - -static DEVICE_ATTR_RW(cpu_capacity); +static DEVICE_ATTR_RO(cpu_capacity); static int register_cpu_capacity_sysctl(void) { @@ -116,7 +87,6 @@ void topology_normalize_cpu_scale(void) return; pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale); - mutex_lock(&cpu_scale_mutex); for_each_possible_cpu(cpu) { pr_debug("cpu_capacity: cpu=%d raw_capacity=%u\n", cpu, raw_capacity[cpu]); @@ -126,7 +96,6 @@ void topology_normalize_cpu_scale(void) pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n", cpu, topology_get_cpu_scale(NULL, cpu)); } - mutex_unlock(&cpu_scale_mutex); } bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) diff --git a/drivers/base/base.h b/drivers/base/base.h index 7a419a7a6235b166625bcc4216de79a66249188c..2d270b8c731a069a15261724a73cc2df0086b1b9 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -66,6 +66,9 @@ struct driver_private { * probed first. * @device - pointer back to the struct device that this structure is * associated with. + * @dead - This device is currently either in the process of or has been + * removed from the system. Any asynchronous events scheduled for this + * device should exit without taking any action. * * Nothing outside of the driver core should ever touch these fields. */ @@ -76,6 +79,7 @@ struct device_private { struct klist_node knode_bus; struct list_head deferred_probe; struct device *device; + u8 dead:1; }; #define to_device_private_parent(obj) \ container_of(obj, struct device_private, knode_parent) @@ -124,6 +128,8 @@ extern int driver_add_groups(struct device_driver *drv, const struct attribute_group **groups); extern void driver_remove_groups(struct device_driver *drv, const struct attribute_group **groups); +int device_driver_attach(struct device_driver *drv, struct device *dev); +void device_driver_detach(struct device *dev); extern char *make_class_name(const char *name, struct kobject *kobj); diff --git a/drivers/base/bus.c b/drivers/base/bus.c index 8bfd27ec73d60d1d18a43f9bfcff29b4eb9a50a6..5f1966081c42a52ba2f11123f565f2637ac97e0a 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -31,6 +31,9 @@ static struct kset *system_kset; #define to_drv_attr(_attr) container_of(_attr, struct driver_attribute, attr) +#define DRIVER_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \ + struct driver_attribute driver_attr_##_name = \ + __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) static int __must_check bus_rescan_devices_helper(struct device *dev, void *data); @@ -184,18 +187,14 @@ static ssize_t unbind_store(struct device_driver *drv, const char *buf, dev = bus_find_device_by_name(bus, NULL, buf); if (dev && dev->driver == drv) { - if (dev->parent && dev->bus->need_parent_lock) - device_lock(dev->parent); - device_release_driver(dev); - if (dev->parent && dev->bus->need_parent_lock) - device_unlock(dev->parent); + device_driver_detach(dev); err = count; } put_device(dev); bus_put(bus); return err; } -static DRIVER_ATTR_WO(unbind); +static DRIVER_ATTR_IGNORE_LOCKDEP(unbind, S_IWUSR, NULL, unbind_store); /* * Manually attach a device to a driver. @@ -211,13 +210,7 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf, dev = bus_find_device_by_name(bus, NULL, buf); if (dev && dev->driver == NULL && driver_match_device(drv, dev)) { - if (dev->parent && bus->need_parent_lock) - device_lock(dev->parent); - device_lock(dev); - err = driver_probe_device(drv, dev); - device_unlock(dev); - if (dev->parent && bus->need_parent_lock) - device_unlock(dev->parent); + err = device_driver_attach(drv, dev); if (err > 0) { /* success */ @@ -231,11 +224,11 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf, bus_put(bus); return err; } -static DRIVER_ATTR_WO(bind); +static DRIVER_ATTR_IGNORE_LOCKDEP(bind, S_IWUSR, NULL, bind_store); static ssize_t show_drivers_autoprobe(struct bus_type *bus, char *buf) { - return sprintf(buf, "%d\n", bus->p->drivers_autoprobe); + return sysfs_emit(buf, "%d\n", bus->p->drivers_autoprobe); } static ssize_t store_drivers_autoprobe(struct bus_type *bus, @@ -611,8 +604,10 @@ static void remove_probe_files(struct bus_type *bus) static ssize_t uevent_store(struct device_driver *drv, const char *buf, size_t count) { - kobject_synth_uevent(&drv->p->kobj, buf, count); - return count; + int rc; + + rc = kobject_synth_uevent(&drv->p->kobj, buf, count); + return rc ? rc : count; } static DRIVER_ATTR_WO(uevent); @@ -769,13 +764,8 @@ EXPORT_SYMBOL_GPL(bus_rescan_devices); */ int device_reprobe(struct device *dev) { - if (dev->driver) { - if (dev->parent && dev->bus->need_parent_lock) - device_lock(dev->parent); - device_release_driver(dev); - if (dev->parent && dev->bus->need_parent_lock) - device_unlock(dev->parent); - } + if (dev->driver) + device_driver_detach(dev); return bus_rescan_devices_helper(dev, NULL); } EXPORT_SYMBOL_GPL(device_reprobe); @@ -828,8 +818,10 @@ static void klist_devices_put(struct klist_node *n) static ssize_t bus_uevent_store(struct bus_type *bus, const char *buf, size_t count) { - kobject_synth_uevent(&bus->p->subsys.kobj, buf, count); - return count; + int rc; + + rc = kobject_synth_uevent(&bus->p->subsys.kobj, buf, count); + return rc ? rc : count; } static BUS_ATTR(uevent, S_IWUSR, NULL, bus_uevent_store); diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index 5d5b5988e88be298141c569fa6853762165b0112..91a7e2b22e986cf6560716e90486e9f31b53289a 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -79,8 +79,7 @@ static void cache_size(struct cacheinfo *this_leaf, struct device_node *np) ct_idx = get_cacheinfo_idx(this_leaf->type); propname = cache_type_info[ct_idx].size_prop; - if (of_property_read_u32(np, propname, &this_leaf->size)) - this_leaf->size = 0; + of_property_read_u32(np, propname, &this_leaf->size); } /* not cache_line_size() because that's a macro in include/linux/cache.h */ @@ -114,8 +113,7 @@ static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np) ct_idx = get_cacheinfo_idx(this_leaf->type); propname = cache_type_info[ct_idx].nr_sets_prop; - if (of_property_read_u32(np, propname, &this_leaf->number_of_sets)) - this_leaf->number_of_sets = 0; + of_property_read_u32(np, propname, &this_leaf->number_of_sets); } static void cache_associativity(struct cacheinfo *this_leaf) @@ -215,6 +213,46 @@ int __weak cache_setup_acpi(unsigned int cpu) return -ENOTSUPP; } +/** + * cacheinfo_shared_cpu_map_search() - find an instance of struct cacheinfo + * from the provided firmware description. + * Caller must hold cpus_read_lock() until its finished with the cacheinfo. + * + * Return a CPUs cache leaf described @fw_desc, or NULL. + */ +struct cacheinfo *cacheinfo_shared_cpu_map_search(void *fw_token) +{ + struct cacheinfo *iter; + unsigned int cpu, index; + struct cpu_cacheinfo *cpu_ci; + + for_each_online_cpu(cpu) { + cpu_ci = get_cpu_cacheinfo(cpu); + + /* + * info_list of this cacheinfo instance + * may not be initialized because sometimes + * free_cache_attributes() may free this + * info_list but not set num_leaves to zero, + * for example when PPTT is not supported. + */ + if (!cpu_ci->info_list) + continue; + + for (index = 0; index < cache_leaves(cpu); index++) { + iter = cpu_ci->info_list + index; + + if (iter->fw_token == fw_token) { + return iter; + } + } + } + + return NULL; +} + +unsigned int coherency_max_size; + static int cache_shared_cpu_map_setup(unsigned int cpu) { struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); @@ -253,6 +291,9 @@ static int cache_shared_cpu_map_setup(unsigned int cpu) cpumask_set_cpu(i, &this_leaf->shared_cpu_map); } } + /* record the maximum cache line size */ + if (this_leaf->coherency_line_size > coherency_max_size) + coherency_max_size = this_leaf->coherency_line_size; } return 0; @@ -359,7 +400,7 @@ static ssize_t file_name##_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct cacheinfo *this_leaf = dev_get_drvdata(dev); \ - return sprintf(buf, "%u\n", this_leaf->object); \ + return sysfs_emit(buf, "%u\n", this_leaf->object); \ } show_one(id, id); @@ -374,44 +415,48 @@ static ssize_t size_show(struct device *dev, { struct cacheinfo *this_leaf = dev_get_drvdata(dev); - return sprintf(buf, "%uK\n", this_leaf->size >> 10); + return sysfs_emit(buf, "%uK\n", this_leaf->size >> 10); } -static ssize_t shared_cpumap_show_func(struct device *dev, bool list, char *buf) +static ssize_t shared_cpu_map_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct cacheinfo *this_leaf = dev_get_drvdata(dev); const struct cpumask *mask = &this_leaf->shared_cpu_map; - return cpumap_print_to_pagebuf(list, buf, mask); -} - -static ssize_t shared_cpu_map_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return shared_cpumap_show_func(dev, false, buf); + return sysfs_emit(buf, "%*pb\n", nr_cpu_ids, mask); } static ssize_t shared_cpu_list_show(struct device *dev, struct device_attribute *attr, char *buf) { - return shared_cpumap_show_func(dev, true, buf); + struct cacheinfo *this_leaf = dev_get_drvdata(dev); + const struct cpumask *mask = &this_leaf->shared_cpu_map; + + return sysfs_emit(buf, "%*pbl\n", nr_cpu_ids, mask); } static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cacheinfo *this_leaf = dev_get_drvdata(dev); + const char *output; switch (this_leaf->type) { case CACHE_TYPE_DATA: - return sprintf(buf, "Data\n"); + output = "Data"; + break; case CACHE_TYPE_INST: - return sprintf(buf, "Instruction\n"); + output = "Instruction"; + break; case CACHE_TYPE_UNIFIED: - return sprintf(buf, "Unified\n"); + output = "Unified"; + break; default: return -EINVAL; } + + return sysfs_emit(buf, "%s\n", output); } static ssize_t allocation_policy_show(struct device *dev, @@ -419,15 +464,18 @@ static ssize_t allocation_policy_show(struct device *dev, { struct cacheinfo *this_leaf = dev_get_drvdata(dev); unsigned int ci_attr = this_leaf->attributes; - int n = 0; + const char *output; if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE)) - n = sprintf(buf, "ReadWriteAllocate\n"); + output = "ReadWriteAllocate"; else if (ci_attr & CACHE_READ_ALLOCATE) - n = sprintf(buf, "ReadAllocate\n"); + output = "ReadAllocate"; else if (ci_attr & CACHE_WRITE_ALLOCATE) - n = sprintf(buf, "WriteAllocate\n"); - return n; + output = "WriteAllocate"; + else + return 0; + + return sysfs_emit(buf, "%s\n", output); } static ssize_t write_policy_show(struct device *dev, @@ -438,9 +486,9 @@ static ssize_t write_policy_show(struct device *dev, int n = 0; if (ci_attr & CACHE_WRITE_THROUGH) - n = sprintf(buf, "WriteThrough\n"); + n = sysfs_emit(buf, "WriteThrough\n"); else if (ci_attr & CACHE_WRITE_BACK) - n = sprintf(buf, "WriteBack\n"); + n = sysfs_emit(buf, "WriteBack\n"); return n; } @@ -655,7 +703,8 @@ static int cacheinfo_cpu_pre_down(unsigned int cpu) static int __init cacheinfo_sysfs_init(void) { - return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "base/cacheinfo:online", + return cpuhp_setup_state(CPUHP_AP_BASE_CACHEINFO_ONLINE, + "base/cacheinfo:online", cacheinfo_cpu_online, cacheinfo_cpu_pre_down); } device_initcall(cacheinfo_sysfs_init); diff --git a/drivers/base/class.c b/drivers/base/class.c index 54def4e02f005875de73fffa53974374bdcda940..1dd058fa9bce6ad432e0c1837c0654c6ee97fb17 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c @@ -185,6 +185,11 @@ int __class_register(struct class *cls, struct lock_class_key *key) } error = class_add_groups(class_get(cls), cls->class_groups); class_put(cls); + if (error) { + kobject_del(&cp->subsys.kobj); + kfree_const(cp->subsys.kobj.name); + kfree(cp); + } return error; } EXPORT_SYMBOL_GPL(__class_register); @@ -471,7 +476,7 @@ ssize_t show_class_attr_string(struct class *class, struct class_attribute_string *cs; cs = container_of(attr, struct class_attribute_string, attr); - return snprintf(buf, PAGE_SIZE, "%s\n", cs->str); + return sysfs_emit(buf, "%s\n", cs->str); } EXPORT_SYMBOL_GPL(show_class_attr_string); diff --git a/drivers/base/component.c b/drivers/base/component.c index 8946dfee4768e8a82fb35f994b4e9ff2b88c245a..7f7c4233cd3142d559d1ea27b5b8eb3bf36e1910 100644 --- a/drivers/base/component.c +++ b/drivers/base/component.c @@ -74,11 +74,11 @@ static int component_devices_show(struct seq_file *s, void *data) seq_printf(s, "%-40s %20s\n", "device name", "status"); seq_puts(s, "-------------------------------------------------------------\n"); for (i = 0; i < match->num; i++) { - struct device *d = (struct device *)match->compare[i].data; + struct component *component = match->compare[i].component; - seq_printf(s, "%-40s %20s\n", dev_name(d), - match->compare[i].component ? - "registered" : "not registered"); + seq_printf(s, "%-40s %20s\n", + component ? dev_name(component->dev) : "(unknown)", + component ? (component->bound ? "bound" : "not bound") : "not registered"); } mutex_unlock(&component_mutex); @@ -536,9 +536,9 @@ int component_bind_all(struct device *master_dev, void *data) } if (ret != 0) { - for (; i--; ) - if (!master->match->compare[i].duplicate) { - c = master->match->compare[i].component; + for (; i > 0; i--) + if (!master->match->compare[i - 1].duplicate) { + c = master->match->compare[i - 1].component; component_unbind(c, master, data); } } diff --git a/drivers/base/core.c b/drivers/base/core.c index 04bbcd779e114ef1ecad0df77591c0801a0bf84b..a06e0cc4183e6461af429e707044d4644dec3e08 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -8,6 +8,7 @@ * Copyright (c) 2006 Novell, Inc. */ +#include #include #include #include @@ -23,6 +24,7 @@ #include #include #include +#include #include #include @@ -92,6 +94,16 @@ void device_links_read_unlock(int not_used) } #endif /* !CONFIG_SRCU */ +static bool device_is_ancestor(struct device *dev, struct device *target) +{ + while (target->parent) { + target = target->parent; + if (dev == target) + return true; + } + return false; +} + /** * device_is_dependent - Check if one device depends on another one * @dev: Device to check dependencies for. @@ -105,7 +117,12 @@ static int device_is_dependent(struct device *dev, void *target) struct device_link *link; int ret; - if (dev == target) + /* + * The "ancestors" check is needed to catch the case when the target + * device has not been completely initialized yet and it is still + * missing from the list of children of its parent device. + */ + if (dev == target || device_is_ancestor(dev, target)) return 1; ret = device_for_each_child(dev, target, device_is_dependent); @@ -123,6 +140,50 @@ static int device_is_dependent(struct device *dev, void *target) return ret; } +static void device_link_init_status(struct device_link *link, + struct device *consumer, + struct device *supplier) +{ + switch (supplier->links.status) { + case DL_DEV_PROBING: + switch (consumer->links.status) { + case DL_DEV_PROBING: + /* + * A consumer driver can create a link to a supplier + * that has not completed its probing yet as long as it + * knows that the supplier is already functional (for + * example, it has just acquired some resources from the + * supplier). + */ + link->status = DL_STATE_CONSUMER_PROBE; + break; + default: + link->status = DL_STATE_DORMANT; + break; + } + break; + case DL_DEV_DRIVER_BOUND: + switch (consumer->links.status) { + case DL_DEV_PROBING: + link->status = DL_STATE_CONSUMER_PROBE; + break; + case DL_DEV_DRIVER_BOUND: + link->status = DL_STATE_ACTIVE; + break; + default: + link->status = DL_STATE_AVAILABLE; + break; + } + break; + case DL_DEV_UNBINDING: + link->status = DL_STATE_SUPPLIER_UNBIND; + break; + default: + link->status = DL_STATE_DORMANT; + break; + } +} + static int device_reorder_to_tail(struct device *dev, void *not_used) { struct device_link *link; @@ -164,6 +225,13 @@ void device_pm_move_to_tail(struct device *dev) device_links_read_unlock(idx); } +#define DL_MANAGED_LINK_FLAGS (DL_FLAG_AUTOREMOVE_CONSUMER | \ + DL_FLAG_AUTOREMOVE_SUPPLIER | \ + DL_FLAG_AUTOPROBE_CONSUMER) + +#define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \ + DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE) + /** * device_link_add - Create a link between two devices. * @consumer: Consumer end of the link. @@ -178,10 +246,38 @@ void device_pm_move_to_tail(struct device *dev) * of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be * ignored. * - * If the DL_FLAG_AUTOREMOVE_CONSUMER is set, the link will be removed - * automatically when the consumer device driver unbinds from it. - * The combination of both DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_STATELESS - * set is invalid and will cause NULL to be returned. + * If DL_FLAG_STATELESS is set in @flags, the caller of this function is + * expected to release the link returned by it directly with the help of either + * device_link_del() or device_link_remove(). + * + * If that flag is not set, however, the caller of this function is handing the + * management of the link over to the driver core entirely and its return value + * can only be used to check whether or not the link is present. In that case, + * the DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_AUTOREMOVE_SUPPLIER device link + * flags can be used to indicate to the driver core when the link can be safely + * deleted. Namely, setting one of them in @flags indicates to the driver core + * that the link is not going to be used (by the given caller of this function) + * after unbinding the consumer or supplier driver, respectively, from its + * device, so the link can be deleted at that point. If none of them is set, + * the link will be maintained until one of the devices pointed to by it (either + * the consumer or the supplier) is unregistered. + * + * Also, if DL_FLAG_STATELESS, DL_FLAG_AUTOREMOVE_CONSUMER and + * DL_FLAG_AUTOREMOVE_SUPPLIER are not set in @flags (that is, a persistent + * managed device link is being added), the DL_FLAG_AUTOPROBE_CONSUMER flag can + * be used to request the driver core to automaticall probe for a consmer + * driver after successfully binding a driver to the supplier device. + * + * The combination of DL_FLAG_STATELESS and one of DL_FLAG_AUTOREMOVE_CONSUMER, + * DL_FLAG_AUTOREMOVE_SUPPLIER, or DL_FLAG_AUTOPROBE_CONSUMER set in @flags at + * the same time is invalid and will cause NULL to be returned upfront. + * However, if a device link between the given @consumer and @supplier pair + * exists already when this function is called for them, the existing link will + * be returned regardless of its current type and status (the link's flags may + * be modified then). The caller of this function is then expected to treat + * the link as though it has just been created, so (in particular) if + * DL_FLAG_STATELESS was passed in @flags, the link needs to be released + * explicitly when not needed any more (as stated above). * * A side effect of the link creation is re-ordering of dpm_list and the * devices_kset list by moving the consumer device and all devices depending @@ -197,11 +293,23 @@ struct device_link *device_link_add(struct device *consumer, { struct device_link *link; - if (!consumer || !supplier || - ((flags & DL_FLAG_STATELESS) && - (flags & DL_FLAG_AUTOREMOVE_CONSUMER))) + if (!consumer || !supplier || flags & ~DL_ADD_VALID_FLAGS || + (flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) || + (flags & DL_FLAG_AUTOPROBE_CONSUMER && + flags & (DL_FLAG_AUTOREMOVE_CONSUMER | + DL_FLAG_AUTOREMOVE_SUPPLIER))) return NULL; + if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) { + if (pm_runtime_get_sync(supplier) < 0) { + pm_runtime_put_noidle(supplier); + return NULL; + } + } + + if (!(flags & DL_FLAG_STATELESS)) + flags |= DL_FLAG_MANAGED; + device_links_write_lock(); device_pm_lock(); @@ -216,35 +324,68 @@ struct device_link *device_link_add(struct device *consumer, goto out; } - list_for_each_entry(link, &supplier->links.consumers, s_node) - if (link->consumer == consumer) { + /* + * DL_FLAG_AUTOREMOVE_SUPPLIER indicates that the link will be needed + * longer than for DL_FLAG_AUTOREMOVE_CONSUMER and setting them both + * together doesn't make sense, so prefer DL_FLAG_AUTOREMOVE_SUPPLIER. + */ + if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) + flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER; + + list_for_each_entry(link, &supplier->links.consumers, s_node) { + if (link->consumer != consumer) + continue; + + if (flags & DL_FLAG_PM_RUNTIME) { + if (!(link->flags & DL_FLAG_PM_RUNTIME)) { + pm_runtime_new_link(consumer); + link->flags |= DL_FLAG_PM_RUNTIME; + } + if (flags & DL_FLAG_RPM_ACTIVE) + refcount_inc(&link->rpm_active); + } + + if (flags & DL_FLAG_STATELESS) { + link->flags |= DL_FLAG_STATELESS; kref_get(&link->kref); goto out; } + /* + * If the life time of the link following from the new flags is + * longer than indicated by the flags of the existing link, + * update the existing link to stay around longer. + */ + if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) { + if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) { + link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER; + link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER; + } + } else if (!(flags & DL_FLAG_AUTOREMOVE_CONSUMER)) { + link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER | + DL_FLAG_AUTOREMOVE_SUPPLIER); + } + if (!(link->flags & DL_FLAG_MANAGED)) { + kref_get(&link->kref); + link->flags |= DL_FLAG_MANAGED; + device_link_init_status(link, consumer, supplier); + } + goto out; + } + link = kzalloc(sizeof(*link), GFP_KERNEL); if (!link) goto out; + refcount_set(&link->rpm_active, 1); + if (flags & DL_FLAG_PM_RUNTIME) { - if (flags & DL_FLAG_RPM_ACTIVE) { - if (pm_runtime_get_sync(supplier) < 0) { - pm_runtime_put_noidle(supplier); - kfree(link); - link = NULL; - goto out; - } - link->rpm_active = true; - } + if (flags & DL_FLAG_RPM_ACTIVE) + refcount_inc(&link->rpm_active); + pm_runtime_new_link(consumer); - /* - * If the link is being added by the consumer driver at probe - * time, balance the decrementation of the supplier's runtime PM - * usage counter after consumer probe in driver_probe_device(). - */ - if (consumer->links.status == DL_DEV_PROBING) - pm_runtime_get_noresume(supplier); } + get_device(supplier); link->supplier = supplier; INIT_LIST_HEAD(&link->s_node); @@ -255,39 +396,18 @@ struct device_link *device_link_add(struct device *consumer, kref_init(&link->kref); /* Determine the initial link state. */ - if (flags & DL_FLAG_STATELESS) { + if (flags & DL_FLAG_STATELESS) link->status = DL_STATE_NONE; - } else { - switch (supplier->links.status) { - case DL_DEV_DRIVER_BOUND: - switch (consumer->links.status) { - case DL_DEV_PROBING: - /* - * Some callers expect the link creation during - * consumer driver probe to resume the supplier - * even without DL_FLAG_RPM_ACTIVE. - */ - if (flags & DL_FLAG_PM_RUNTIME) - pm_runtime_resume(supplier); - - link->status = DL_STATE_CONSUMER_PROBE; - break; - case DL_DEV_DRIVER_BOUND: - link->status = DL_STATE_ACTIVE; - break; - default: - link->status = DL_STATE_AVAILABLE; - break; - } - break; - case DL_DEV_UNBINDING: - link->status = DL_STATE_SUPPLIER_UNBIND; - break; - default: - link->status = DL_STATE_DORMANT; - break; - } - } + else + device_link_init_status(link, consumer, supplier); + + /* + * Some callers expect the link creation during consumer driver probe to + * resume the supplier even without DL_FLAG_RPM_ACTIVE. + */ + if (link->status == DL_STATE_CONSUMER_PROBE && + flags & DL_FLAG_PM_RUNTIME) + pm_runtime_resume(supplier); /* * Move the consumer and all of the devices depending on it to the end @@ -306,12 +426,19 @@ struct device_link *device_link_add(struct device *consumer, out: device_pm_unlock(); device_links_write_unlock(); + + if ((flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) && !link) + pm_runtime_put(supplier); + return link; } EXPORT_SYMBOL_GPL(device_link_add); static void device_link_free(struct device_link *link) { + while (refcount_dec_not_one(&link->rpm_active)) + pm_runtime_put(link->supplier); + put_device(link->consumer); put_device(link->supplier); kfree(link); @@ -354,8 +481,16 @@ static void __device_link_del(struct kref *kref) } #endif /* !CONFIG_SRCU */ +static void device_link_put_kref(struct device_link *link) +{ + if (link->flags & DL_FLAG_STATELESS) + kref_put(&link->kref, __device_link_del); + else + WARN(1, "Unable to drop a managed device link reference\n"); +} + /** - * device_link_del - Delete a link between two devices. + * device_link_del - Delete a stateless link between two devices. * @link: Device link to delete. * * The caller must ensure proper synchronization of this function with runtime @@ -367,14 +502,14 @@ void device_link_del(struct device_link *link) { device_links_write_lock(); device_pm_lock(); - kref_put(&link->kref, __device_link_del); + device_link_put_kref(link); device_pm_unlock(); device_links_write_unlock(); } EXPORT_SYMBOL_GPL(device_link_del); /** - * device_link_remove - remove a link between two devices. + * device_link_remove - Delete a stateless link between two devices. * @consumer: Consumer end of the link. * @supplier: Supplier end of the link. * @@ -393,7 +528,7 @@ void device_link_remove(void *consumer, struct device *supplier) list_for_each_entry(link, &supplier->links.consumers, s_node) { if (link->consumer == consumer) { - kref_put(&link->kref, __device_link_del); + device_link_put_kref(link); break; } } @@ -426,7 +561,7 @@ static void device_links_missing_supplier(struct device *dev) * mark the link as "consumer probe in progress" to make the supplier removal * wait for us to complete (or bad things may happen). * - * Links with the DL_FLAG_STATELESS flag set are ignored. + * Links without the DL_FLAG_MANAGED flag set are ignored. */ int device_links_check_suppliers(struct device *dev) { @@ -436,7 +571,7 @@ int device_links_check_suppliers(struct device *dev) device_links_write_lock(); list_for_each_entry(link, &dev->links.suppliers, c_node) { - if (link->flags & DL_FLAG_STATELESS) + if (!(link->flags & DL_FLAG_MANAGED)) continue; if (link->status != DL_STATE_AVAILABLE) { @@ -461,7 +596,7 @@ int device_links_check_suppliers(struct device *dev) * * Also change the status of @dev's links to suppliers to "active". * - * Links with the DL_FLAG_STATELESS flag set are ignored. + * Links without the DL_FLAG_MANAGED flag set are ignored. */ void device_links_driver_bound(struct device *dev) { @@ -470,15 +605,28 @@ void device_links_driver_bound(struct device *dev) device_links_write_lock(); list_for_each_entry(link, &dev->links.consumers, s_node) { - if (link->flags & DL_FLAG_STATELESS) + if (!(link->flags & DL_FLAG_MANAGED)) + continue; + + /* + * Links created during consumer probe may be in the "consumer + * probe" state to start with if the supplier is still probing + * when they are created and they may become "active" if the + * consumer probe returns first. Skip them here. + */ + if (link->status == DL_STATE_CONSUMER_PROBE || + link->status == DL_STATE_ACTIVE) continue; WARN_ON(link->status != DL_STATE_DORMANT); WRITE_ONCE(link->status, DL_STATE_AVAILABLE); + + if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER) + driver_deferred_probe_add(link->consumer); } list_for_each_entry(link, &dev->links.suppliers, c_node) { - if (link->flags & DL_FLAG_STATELESS) + if (!(link->flags & DL_FLAG_MANAGED)) continue; WARN_ON(link->status != DL_STATE_CONSUMER_PROBE); @@ -490,6 +638,13 @@ void device_links_driver_bound(struct device *dev) device_links_write_unlock(); } +static void device_link_drop_managed(struct device_link *link) +{ + link->flags &= ~DL_FLAG_MANAGED; + WRITE_ONCE(link->status, DL_STATE_NONE); + kref_put(&link->kref, __device_link_del); +} + /** * __device_links_no_driver - Update links of a device without a driver. * @dev: Device without a drvier. @@ -500,29 +655,60 @@ void device_links_driver_bound(struct device *dev) * unless they already are in the "supplier unbind in progress" state in which * case they need not be updated. * - * Links with the DL_FLAG_STATELESS flag set are ignored. + * Links without the DL_FLAG_MANAGED flag set are ignored. */ static void __device_links_no_driver(struct device *dev) { struct device_link *link, *ln; list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) { - if (link->flags & DL_FLAG_STATELESS) + if (!(link->flags & DL_FLAG_MANAGED)) continue; if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) - kref_put(&link->kref, __device_link_del); - else if (link->status != DL_STATE_SUPPLIER_UNBIND) + device_link_drop_managed(link); + else if (link->status == DL_STATE_CONSUMER_PROBE || + link->status == DL_STATE_ACTIVE) WRITE_ONCE(link->status, DL_STATE_AVAILABLE); } dev->links.status = DL_DEV_NO_DRIVER; } +/** + * device_links_no_driver - Update links after failing driver probe. + * @dev: Device whose driver has just failed to probe. + * + * Clean up leftover links to consumers for @dev and invoke + * %__device_links_no_driver() to update links to suppliers for it as + * appropriate. + * + * Links without the DL_FLAG_MANAGED flag set are ignored. + */ void device_links_no_driver(struct device *dev) { + struct device_link *link; + device_links_write_lock(); + + list_for_each_entry(link, &dev->links.consumers, s_node) { + if (!(link->flags & DL_FLAG_MANAGED)) + continue; + + /* + * The probe has failed, so if the status of the link is + * "consumer probe" or "active", it must have been added by + * a probing consumer while this device was still probing. + * Change its state to "dormant", as it represents a valid + * relationship, but it is not functionally meaningful. + */ + if (link->status == DL_STATE_CONSUMER_PROBE || + link->status == DL_STATE_ACTIVE) + WRITE_ONCE(link->status, DL_STATE_DORMANT); + } + __device_links_no_driver(dev); + device_links_write_unlock(); } @@ -534,16 +720,16 @@ void device_links_no_driver(struct device *dev) * invoke %__device_links_no_driver() to update links to suppliers for it as * appropriate. * - * Links with the DL_FLAG_STATELESS flag set are ignored. + * Links without the DL_FLAG_MANAGED flag set are ignored. */ void device_links_driver_cleanup(struct device *dev) { - struct device_link *link; + struct device_link *link, *ln; device_links_write_lock(); - list_for_each_entry(link, &dev->links.consumers, s_node) { - if (link->flags & DL_FLAG_STATELESS) + list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) { + if (!(link->flags & DL_FLAG_MANAGED)) continue; WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER); @@ -556,7 +742,7 @@ void device_links_driver_cleanup(struct device *dev) */ if (link->status == DL_STATE_SUPPLIER_UNBIND && link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER) - kref_put(&link->kref, __device_link_del); + device_link_drop_managed(link); WRITE_ONCE(link->status, DL_STATE_DORMANT); } @@ -578,7 +764,7 @@ void device_links_driver_cleanup(struct device *dev) * * Return 'false' if there are no probing or active consumers. * - * Links with the DL_FLAG_STATELESS flag set are ignored. + * Links without the DL_FLAG_MANAGED flag set are ignored. */ bool device_links_busy(struct device *dev) { @@ -588,7 +774,7 @@ bool device_links_busy(struct device *dev) device_links_write_lock(); list_for_each_entry(link, &dev->links.consumers, s_node) { - if (link->flags & DL_FLAG_STATELESS) + if (!(link->flags & DL_FLAG_MANAGED)) continue; if (link->status == DL_STATE_CONSUMER_PROBE @@ -618,7 +804,7 @@ bool device_links_busy(struct device *dev) * driver to unbind and start over (the consumer will not re-probe as we have * changed the state of the link already). * - * Links with the DL_FLAG_STATELESS flag set are ignored. + * Links without the DL_FLAG_MANAGED flag set are ignored. */ void device_links_unbind_consumers(struct device *dev) { @@ -630,7 +816,7 @@ void device_links_unbind_consumers(struct device *dev) list_for_each_entry(link, &dev->links.consumers, s_node) { enum device_link_state status; - if (link->flags & DL_FLAG_STATELESS) + if (!(link->flags & DL_FLAG_MANAGED)) continue; status = link->status; @@ -809,7 +995,7 @@ ssize_t device_show_ulong(struct device *dev, char *buf) { struct dev_ext_attribute *ea = to_ext_attr(attr); - return snprintf(buf, PAGE_SIZE, "%lx\n", *(unsigned long *)(ea->var)); + return sysfs_emit(buf, "%lx\n", *(unsigned long *)(ea->var)); } EXPORT_SYMBOL_GPL(device_show_ulong); @@ -834,7 +1020,7 @@ ssize_t device_show_int(struct device *dev, { struct dev_ext_attribute *ea = to_ext_attr(attr); - return snprintf(buf, PAGE_SIZE, "%d\n", *(int *)(ea->var)); + return sysfs_emit(buf, "%d\n", *(int *)(ea->var)); } EXPORT_SYMBOL_GPL(device_show_int); @@ -855,7 +1041,7 @@ ssize_t device_show_bool(struct device *dev, struct device_attribute *attr, { struct dev_ext_attribute *ea = to_ext_attr(attr); - return snprintf(buf, PAGE_SIZE, "%d\n", *(bool *)(ea->var)); + return sysfs_emit(buf, "%d\n", *(bool *)(ea->var)); } EXPORT_SYMBOL_GPL(device_show_bool); @@ -952,6 +1138,7 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj, struct kobj_uevent_env *env) { struct device *dev = kobj_to_dev(kobj); + struct device_driver *driver; int retval = 0; /* add device node properties if present */ @@ -980,8 +1167,12 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj, if (dev->type && dev->type->name) add_uevent_var(env, "DEVTYPE=%s", dev->type->name); - if (dev->driver) - add_uevent_var(env, "DRIVER=%s", dev->driver->name); + /* Synchronize with module_remove_driver() */ + rcu_read_lock(); + driver = READ_ONCE(dev->driver); + if (driver) + add_uevent_var(env, "DRIVER=%s", driver->name); + rcu_read_unlock(); /* Add common DT information about the device */ of_device_uevent(dev, env); @@ -1028,7 +1219,7 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr, struct kset *kset; struct kobj_uevent_env *env = NULL; int i; - size_t count = 0; + int len = 0; int retval; /* search the kset, the device belongs to */ @@ -1058,17 +1249,23 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr, /* copy keys to file */ for (i = 0; i < env->envp_idx; i++) - count += sprintf(&buf[count], "%s\n", env->envp[i]); + len += sysfs_emit_at(buf, len, "%s\n", env->envp[i]); out: kfree(env); - return count; + return len; } static ssize_t uevent_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - if (kobject_synth_uevent(&dev->kobj, buf, count)) + int rc; + + rc = kobject_synth_uevent(&dev->kobj, buf, count); + + if (rc) { dev_err(dev, "uevent: failed to send synthetic uevent\n"); + return rc; + } return count; } @@ -1082,7 +1279,7 @@ static ssize_t online_show(struct device *dev, struct device_attribute *attr, device_lock(dev); val = !dev->offline; device_unlock(dev); - return sprintf(buf, "%u\n", val); + return sysfs_emit(buf, "%u\n", val); } static ssize_t online_store(struct device *dev, struct device_attribute *attr, @@ -1491,6 +1688,7 @@ void device_initialize(struct device *dev) device_pm_init(dev); set_dev_node(dev, -1); #ifdef CONFIG_GENERIC_MSI_IRQ + raw_spin_lock_init(&dev->msi_lock); INIT_LIST_HEAD(&dev->msi_list); #endif INIT_LIST_HEAD(&dev->links.consumers); @@ -1642,12 +1840,63 @@ static inline struct kobject *get_glue_dir(struct device *dev) */ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) { + unsigned int ref; + /* see if we live in a "glue" directory */ if (!live_in_glue_dir(glue_dir, dev)) return; mutex_lock(&gdp_mutex); - if (!kobject_has_children(glue_dir)) + /** + * There is a race condition between removing glue directory + * and adding a new device under the glue directory. + * + * CPU1: CPU2: + * + * device_add() + * get_device_parent() + * class_dir_create_and_add() + * kobject_add_internal() + * create_dir() // create glue_dir + * + * device_add() + * get_device_parent() + * kobject_get() // get glue_dir + * + * device_del() + * cleanup_glue_dir() + * kobject_del(glue_dir) + * + * kobject_add() + * kobject_add_internal() + * create_dir() // in glue_dir + * sysfs_create_dir_ns() + * kernfs_create_dir_ns(sd) + * + * sysfs_remove_dir() // glue_dir->sd=NULL + * sysfs_put() // free glue_dir->sd + * + * // sd is freed + * kernfs_new_node(sd) + * kernfs_get(glue_dir) + * kernfs_add_one() + * kernfs_put() + * + * Before CPU1 remove last child device under glue dir, if CPU2 add + * a new device under glue dir, the glue_dir kobject reference count + * will be increase to 2 in kobject_get(k). And CPU2 has been called + * kernfs_create_dir_ns(). Meanwhile, CPU1 call sysfs_remove_dir() + * and sysfs_put(). This result in glue_dir->sd is freed. + * + * Then the CPU2 will see a stale "empty" but still potentially used + * glue dir around in kernfs_new_node(). + * + * In order to avoid this happening, we also should make sure that + * kernfs_node for glue_dir is released in CPU1 only when refcount + * for glue_dir kobj is 1. + */ + ref = kref_read(&glue_dir->kref); + if (!kobject_has_children(glue_dir) && !--ref) kobject_del(glue_dir); kobject_put(glue_dir); mutex_unlock(&gdp_mutex); @@ -2025,6 +2274,24 @@ void put_device(struct device *dev) } EXPORT_SYMBOL_GPL(put_device); +bool kill_device(struct device *dev) +{ + /* + * Require the device lock and set the "dead" flag to guarantee that + * the update behavior is consistent with the other bitfields near + * it and that we cannot have an asynchronous probe routine trying + * to run while we are tearing out the bus/class/sysfs from + * underneath the device. + */ + lockdep_assert_held(&dev->mutex); + + if (dev->p->dead) + return false; + dev->p->dead = true; + return true; +} +EXPORT_SYMBOL_GPL(kill_device); + /** * device_del - delete device from system. * @dev: device. @@ -2044,6 +2311,10 @@ void device_del(struct device *dev) struct kobject *glue_dir = NULL; struct class_interface *class_intf; + device_lock(dev); + kill_device(dev); + device_unlock(dev); + /* Notify clients of device removal. This call must come * before dpm_sysfs_remove(). */ @@ -2864,6 +3135,8 @@ void device_shutdown(void) wait_for_device_probe(); device_block_probing(); + cpufreq_suspend(); + spin_lock(&devices_kset->list_lock); /* * Walk the devices list backward, shutting down each in turn. @@ -3082,9 +3355,10 @@ static inline bool fwnode_is_primary(struct fwnode_handle *fwnode) */ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) { - if (fwnode) { - struct fwnode_handle *fn = dev->fwnode; + struct device *parent = dev->parent; + struct fwnode_handle *fn = dev->fwnode; + if (fwnode) { if (fwnode_is_primary(fn)) fn = fn->secondary; @@ -3094,8 +3368,13 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) } dev->fwnode = fwnode; } else { - dev->fwnode = fwnode_is_primary(dev->fwnode) ? - dev->fwnode->secondary : NULL; + if (fwnode_is_primary(fn)) { + dev->fwnode = fn->secondary; + if (!(parent && fn == parent->fwnode)) + fn->secondary = ERR_PTR(-ENODEV); + } else { + dev->fwnode = NULL; + } } } EXPORT_SYMBOL_GPL(set_primary_fwnode); diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index eb9443d5bae1f8f6eb80632640b7c549f83e0043..c16b58adbdfe1f15089bce16ed9e30a30714ee8c 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -139,11 +139,11 @@ EXPORT_SYMBOL_GPL(cpu_subsys); #ifdef CONFIG_KEXEC #include -static ssize_t show_crash_notes(struct device *dev, struct device_attribute *attr, +static ssize_t crash_notes_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct cpu *cpu = container_of(dev, struct cpu, dev); - ssize_t rc; unsigned long long addr; int cpunum; @@ -156,21 +156,18 @@ static ssize_t show_crash_notes(struct device *dev, struct device_attribute *att * operation should be safe. No locking required. */ addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum)); - rc = sprintf(buf, "%Lx\n", addr); - return rc; + + return sysfs_emit(buf, "%llx\n", addr); } -static DEVICE_ATTR(crash_notes, 0400, show_crash_notes, NULL); +static DEVICE_ATTR(crash_notes, 0400, crash_notes_show, NULL); -static ssize_t show_crash_notes_size(struct device *dev, +static ssize_t crash_notes_size_show(struct device *dev, struct device_attribute *attr, char *buf) { - ssize_t rc; - - rc = sprintf(buf, "%zu\n", sizeof(note_buf_t)); - return rc; + return sysfs_emit(buf, "%zu\n", sizeof(note_buf_t)); } -static DEVICE_ATTR(crash_notes_size, 0400, show_crash_notes_size, NULL); +static DEVICE_ATTR(crash_notes_size, 0400, crash_notes_size_show, NULL); static struct attribute *crash_note_cpu_attrs[] = { &dev_attr_crash_notes.attr, @@ -231,8 +228,7 @@ static struct cpu_attr cpu_attrs[] = { static ssize_t print_cpus_kernel_max(struct device *dev, struct device_attribute *attr, char *buf) { - int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1); - return n; + return sysfs_emit(buf, "%d\n", NR_CPUS - 1); } static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL); @@ -242,37 +238,37 @@ unsigned int total_cpus; static ssize_t print_cpus_offline(struct device *dev, struct device_attribute *attr, char *buf) { - int n = 0, len = PAGE_SIZE-2; + int len = 0; cpumask_var_t offline; /* display offline cpus < nr_cpu_ids */ if (!alloc_cpumask_var(&offline, GFP_KERNEL)) return -ENOMEM; cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask); - n = scnprintf(buf, len, "%*pbl", cpumask_pr_args(offline)); + len += sysfs_emit_at(buf, len, "%*pbl", cpumask_pr_args(offline)); free_cpumask_var(offline); /* display offline cpus >= nr_cpu_ids */ if (total_cpus && nr_cpu_ids < total_cpus) { - if (n && n < len) - buf[n++] = ','; + len += sysfs_emit_at(buf, len, ","); if (nr_cpu_ids == total_cpus-1) - n += snprintf(&buf[n], len - n, "%u", nr_cpu_ids); + len += sysfs_emit_at(buf, len, "%u", nr_cpu_ids); else - n += snprintf(&buf[n], len - n, "%u-%d", - nr_cpu_ids, total_cpus-1); + len += sysfs_emit_at(buf, len, "%u-%d", + nr_cpu_ids, total_cpus - 1); } - n += snprintf(&buf[n], len - n, "\n"); - return n; + len += sysfs_emit_at(buf, len, "\n"); + + return len; } static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL); static ssize_t print_cpus_isolated(struct device *dev, struct device_attribute *attr, char *buf) { - int n = 0, len = PAGE_SIZE-2; + int len; cpumask_var_t isolated; if (!alloc_cpumask_var(&isolated, GFP_KERNEL)) @@ -280,11 +276,11 @@ static ssize_t print_cpus_isolated(struct device *dev, cpumask_andnot(isolated, cpu_possible_mask, housekeeping_cpumask(HK_FLAG_DOMAIN)); - n = scnprintf(buf, len, "%*pbl\n", cpumask_pr_args(isolated)); + len = sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(isolated)); free_cpumask_var(isolated); - return n; + return len; } static DEVICE_ATTR(isolated, 0444, print_cpus_isolated, NULL); @@ -292,11 +288,7 @@ static DEVICE_ATTR(isolated, 0444, print_cpus_isolated, NULL); static ssize_t print_cpus_nohz_full(struct device *dev, struct device_attribute *attr, char *buf) { - int n = 0, len = PAGE_SIZE-2; - - n = scnprintf(buf, len, "%*pbl\n", cpumask_pr_args(tick_nohz_full_mask)); - - return n; + return sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(tick_nohz_full_mask)); } static DEVICE_ATTR(nohz_full, 0444, print_cpus_nohz_full, NULL); #endif @@ -325,22 +317,23 @@ static ssize_t print_cpu_modalias(struct device *dev, struct device_attribute *attr, char *buf) { - ssize_t n; + int len = 0; u32 i; - n = sprintf(buf, "cpu:type:" CPU_FEATURE_TYPEFMT ":feature:", - CPU_FEATURE_TYPEVAL); + len += sysfs_emit_at(buf, len, + "cpu:type:" CPU_FEATURE_TYPEFMT ":feature:", + CPU_FEATURE_TYPEVAL); for (i = 0; i < MAX_CPU_FEATURES; i++) if (cpu_have_feature(i)) { - if (PAGE_SIZE < n + sizeof(",XXXX\n")) { + if (len + sizeof(",XXXX\n") >= PAGE_SIZE) { WARN(1, "CPU features overflow page\n"); break; } - n += sprintf(&buf[n], ",%04X", i); + len += sysfs_emit_at(buf, len, ",%04X", i); } - buf[n++] = '\n'; - return n; + len += sysfs_emit_at(buf, len, "\n"); + return len; } static int cpu_uevent(struct device *dev, struct kobj_uevent_env *env) @@ -519,31 +512,74 @@ static void __init cpu_dev_register_generic(void) ssize_t __weak cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, "Not affected\n"); + return sysfs_emit(buf, "Not affected\n"); } ssize_t __weak cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, "Not affected\n"); + return sysfs_emit(buf, "Not affected\n"); } ssize_t __weak cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, "Not affected\n"); + return sysfs_emit(buf, "Not affected\n"); } ssize_t __weak cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, "Not affected\n"); + return sysfs_emit(buf, "Not affected\n"); } ssize_t __weak cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, "Not affected\n"); + return sysfs_emit(buf, "Not affected\n"); +} + +ssize_t __weak cpu_show_mds(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "Not affected\n"); +} + +ssize_t __weak cpu_show_tsx_async_abort(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "Not affected\n"); +} + +ssize_t __weak cpu_show_itlb_multihit(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "Not affected\n"); +} + +ssize_t __weak cpu_show_srbds(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "Not affected\n"); +} + +ssize_t __weak cpu_show_mmio_stale_data(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "Not affected\n"); +} + +ssize_t __weak cpu_show_retbleed(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "Not affected\n"); +} + +ssize_t __weak cpu_show_gds(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "Not affected\n"); } static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); @@ -551,6 +587,13 @@ static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL); static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL); +static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL); +static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL); +static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL); +static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL); +static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL); +static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL); +static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL); static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_meltdown.attr, @@ -558,6 +601,13 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_spectre_v2.attr, &dev_attr_spec_store_bypass.attr, &dev_attr_l1tf.attr, + &dev_attr_mds.attr, + &dev_attr_tsx_async_abort.attr, + &dev_attr_itlb_multihit.attr, + &dev_attr_srbds.attr, + &dev_attr_mmio_stale_data.attr, + &dev_attr_retbleed.attr, + &dev_attr_gather_data_sampling.attr, NULL }; diff --git a/drivers/base/dd.c b/drivers/base/dd.c index edfc9f0b1180947a701cd08ca7473a6132f03999..432a5645bac361ca03f1c3c5083dcf196379c5e5 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -116,7 +116,7 @@ static void deferred_probe_work_func(struct work_struct *work) } static DECLARE_WORK(deferred_probe_work, deferred_probe_work_func); -static void driver_deferred_probe_add(struct device *dev) +void driver_deferred_probe_add(struct device *dev) { mutex_lock(&deferred_probe_mutex); if (list_empty(&dev->p->deferred_probe)) { @@ -254,14 +254,16 @@ int driver_deferred_probe_check_state(struct device *dev) static void deferred_probe_timeout_work_func(struct work_struct *work) { - struct device_private *private, *p; + struct device_private *p; deferred_probe_timeout = 0; driver_deferred_probe_trigger(); flush_work(&deferred_probe_work); - list_for_each_entry_safe(private, p, &deferred_probe_pending_list, deferred_probe) - dev_info(private->device, "deferred probe pending"); + mutex_lock(&deferred_probe_mutex); + list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe) + dev_info(p->device, "deferred probe pending\n"); + mutex_unlock(&deferred_probe_mutex); } static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func); @@ -482,7 +484,7 @@ static int really_probe(struct device *dev, struct device_driver *drv) ret = dma_configure(dev); if (ret) - goto dma_failed; + goto probe_failed; if (driver_sysfs_add(dev)) { printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n", @@ -537,14 +539,13 @@ static int really_probe(struct device *dev, struct device_driver *drv) goto done; probe_failed: - dma_deconfigure(dev); -dma_failed: if (dev->bus) blocking_notifier_call_chain(&dev->bus->p->bus_notifier, BUS_NOTIFY_DRIVER_NOT_BOUND, dev); pinctrl_bind_failed: device_links_no_driver(dev); devres_release_all(dev); + dma_deconfigure(dev); driver_sysfs_remove(dev); dev->driver = NULL; dev_set_drvdata(dev, NULL); @@ -726,15 +727,6 @@ static int __device_attach_driver(struct device_driver *drv, void *_data) bool async_allowed; int ret; - /* - * Check if device has already been claimed. This may - * happen with driver loading, device discovery/registration, - * and deferred probe processing happens all at once with - * multiple threads. - */ - if (dev->driver) - return -EBUSY; - ret = driver_match_device(drv, dev); if (ret == 0) { /* no match */ @@ -742,6 +734,11 @@ static int __device_attach_driver(struct device_driver *drv, void *_data) } else if (ret == -EPROBE_DEFER) { dev_dbg(dev, "Device match requests probe deferral\n"); driver_deferred_probe_add(dev); + /* + * Device can't match with a driver right now, so don't attempt + * to match or bind with other drivers on the bus. + */ + return ret; } else if (ret < 0) { dev_dbg(dev, "Bus failed to match device: %d", ret); return ret; @@ -769,6 +766,15 @@ static void __device_attach_async_helper(void *_dev, async_cookie_t cookie) device_lock(dev); + /* + * Check if device has already been removed or claimed. This may + * happen with driver loading, device discovery/registration, + * and deferred probe processing happens all at once with + * multiple threads. + */ + if (dev->p->dead || dev->driver) + goto out_unlock; + if (dev->parent) pm_runtime_get_sync(dev->parent); @@ -779,7 +785,7 @@ static void __device_attach_async_helper(void *_dev, async_cookie_t cookie) if (dev->parent) pm_runtime_put(dev->parent); - +out_unlock: device_unlock(dev); put_device(dev); @@ -790,7 +796,9 @@ static int __device_attach(struct device *dev, bool allow_async) int ret = 0; device_lock(dev); - if (dev->driver) { + if (dev->p->dead) { + goto out_unlock; + } else if (dev->driver) { if (device_is_bound(dev)) { ret = 1; goto out_unlock; @@ -862,6 +870,64 @@ void device_initial_probe(struct device *dev) __device_attach(dev, true); } +/* + * __device_driver_lock - acquire locks needed to manipulate dev->drv + * @dev: Device we will update driver info for + * @parent: Parent device. Needed if the bus requires parent lock + * + * This function will take the required locks for manipulating dev->drv. + * Normally this will just be the @dev lock, but when called for a USB + * interface, @parent lock will be held as well. + */ +static void __device_driver_lock(struct device *dev, struct device *parent) +{ + if (parent && dev->bus->need_parent_lock) + device_lock(parent); + device_lock(dev); +} + +/* + * __device_driver_unlock - release locks needed to manipulate dev->drv + * @dev: Device we will update driver info for + * @parent: Parent device. Needed if the bus requires parent lock + * + * This function will release the required locks for manipulating dev->drv. + * Normally this will just be the the @dev lock, but when called for a + * USB interface, @parent lock will be released as well. + */ +static void __device_driver_unlock(struct device *dev, struct device *parent) +{ + device_unlock(dev); + if (parent && dev->bus->need_parent_lock) + device_unlock(parent); +} + +/** + * device_driver_attach - attach a specific driver to a specific device + * @drv: Driver to attach + * @dev: Device to attach it to + * + * Manually attach driver to a device. Will acquire both @dev lock and + * @dev->parent lock if needed. + */ +int device_driver_attach(struct device_driver *drv, struct device *dev) +{ + int ret = 0; + + __device_driver_lock(dev, dev->parent); + + /* + * If device has been removed or someone has already successfully + * bound a driver before us just skip the driver probe call. + */ + if (!dev->p->dead && !dev->driver) + ret = driver_probe_device(drv, dev); + + __device_driver_unlock(dev, dev->parent); + + return ret; +} + static int __driver_attach(struct device *dev, void *data) { struct device_driver *drv = data; @@ -884,19 +950,21 @@ static int __driver_attach(struct device *dev, void *data) } else if (ret == -EPROBE_DEFER) { dev_dbg(dev, "Device match requests probe deferral\n"); driver_deferred_probe_add(dev); + /* + * Driver could not match with device, but may match with + * another device on the bus. + */ + return 0; } else if (ret < 0) { - dev_dbg(dev, "Bus failed to match device: %d", ret); - return ret; + dev_dbg(dev, "Bus failed to match device: %d\n", ret); + /* + * Driver could not match with device, but may match with + * another device on the bus. + */ + return 0; } /* ret > 0 means positive match */ - if (dev->parent && dev->bus->need_parent_lock) - device_lock(dev->parent); - device_lock(dev); - if (!dev->driver) - driver_probe_device(drv, dev); - device_unlock(dev); - if (dev->parent && dev->bus->need_parent_lock) - device_unlock(dev->parent); + device_driver_attach(drv, dev); return 0; } @@ -926,19 +994,12 @@ static void __device_release_driver(struct device *dev, struct device *parent) drv = dev->driver; if (drv) { - if (driver_allows_async_probing(drv)) - async_synchronize_full(); - while (device_links_busy(dev)) { - device_unlock(dev); - if (parent) - device_unlock(parent); + __device_driver_unlock(dev, parent); device_links_unbind_consumers(dev); - if (parent) - device_lock(parent); - device_lock(dev); + __device_driver_lock(dev, parent); /* * A concurrent invocation of the same function might * have released the driver successfully while this one @@ -965,10 +1026,8 @@ static void __device_release_driver(struct device *dev, struct device *parent) else if (drv->remove) drv->remove(dev); - device_links_driver_cleanup(dev); - dma_deconfigure(dev); - devres_release_all(dev); + dma_deconfigure(dev); dev->driver = NULL; dev_set_drvdata(dev, NULL); if (dev->pm_domain && dev->pm_domain->dismiss) @@ -976,6 +1035,8 @@ static void __device_release_driver(struct device *dev, struct device *parent) pm_runtime_reinit(dev); dev_pm_set_driver_flags(dev, 0); + device_links_driver_cleanup(dev); + klist_remove(&dev->p->knode_driver); device_pm_check_callbacks(dev); if (dev->bus) @@ -991,16 +1052,12 @@ void device_release_driver_internal(struct device *dev, struct device_driver *drv, struct device *parent) { - if (parent && dev->bus->need_parent_lock) - device_lock(parent); + __device_driver_lock(dev, parent); - device_lock(dev); if (!drv || drv == dev->driver) __device_release_driver(dev, parent); - device_unlock(dev); - if (parent && dev->bus->need_parent_lock) - device_unlock(parent); + __device_driver_unlock(dev, parent); } /** @@ -1025,6 +1082,18 @@ void device_release_driver(struct device *dev) } EXPORT_SYMBOL_GPL(device_release_driver); +/** + * device_driver_detach - detach driver from a specific device + * @dev: device to detach driver from + * + * Detach driver from device. Will acquire both @dev lock and @dev->parent + * lock if needed. + */ +void device_driver_detach(struct device *dev) +{ + device_release_driver_internal(dev, NULL, dev->parent); +} + /** * driver_detach - detach driver from all devices it controls. * @drv: driver. @@ -1034,6 +1103,9 @@ void driver_detach(struct device_driver *drv) struct device_private *dev_prv; struct device *dev; + if (driver_allows_async_probing(drv)) + async_synchronize_full(); + for (;;) { spin_lock(&drv->p->klist_devices.k_lock); if (list_empty(&drv->p->klist_devices.k_list)) { diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c index f1a3353f349467eeabd2cc4680fb96337bbf0eec..7f74a1f61d0c2eb638ed23daeb409d49e490a3e3 100644 --- a/drivers/base/devcoredump.c +++ b/drivers/base/devcoredump.c @@ -29,6 +29,47 @@ struct devcd_entry { struct device devcd_dev; void *data; size_t datalen; + /* + * Here, mutex is required to serialize the calls to del_wk work between + * user/kernel space which happens when devcd is added with device_add() + * and that sends uevent to user space. User space reads the uevents, + * and calls to devcd_data_write() which try to modify the work which is + * not even initialized/queued from devcoredump. + * + * + * + * cpu0(X) cpu1(Y) + * + * dev_coredump() uevent sent to user space + * device_add() ======================> user space process Y reads the + * uevents writes to devcd fd + * which results into writes to + * + * devcd_data_write() + * mod_delayed_work() + * try_to_grab_pending() + * del_timer() + * debug_assert_init() + * INIT_DELAYED_WORK() + * schedule_delayed_work() + * + * + * Also, mutex alone would not be enough to avoid scheduling of + * del_wk work after it get flush from a call to devcd_free() + * mentioned as below. + * + * disabled_store() + * devcd_free() + * mutex_lock() devcd_data_write() + * flush_delayed_work() + * mutex_unlock() + * mutex_lock() + * mod_delayed_work() + * mutex_unlock() + * So, delete_work flag is required. + */ + struct mutex mutex; + bool delete_work; struct module *owner; ssize_t (*read)(char *buffer, loff_t offset, size_t count, void *data, size_t datalen); @@ -88,7 +129,12 @@ static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj, struct device *dev = kobj_to_dev(kobj); struct devcd_entry *devcd = dev_to_devcd(dev); - mod_delayed_work(system_wq, &devcd->del_wk, 0); + mutex_lock(&devcd->mutex); + if (!devcd->delete_work) { + devcd->delete_work = true; + mod_delayed_work(system_wq, &devcd->del_wk, 0); + } + mutex_unlock(&devcd->mutex); return count; } @@ -116,16 +162,45 @@ static int devcd_free(struct device *dev, void *data) { struct devcd_entry *devcd = dev_to_devcd(dev); + mutex_lock(&devcd->mutex); + if (!devcd->delete_work) + devcd->delete_work = true; + flush_delayed_work(&devcd->del_wk); + mutex_unlock(&devcd->mutex); return 0; } static ssize_t disabled_show(struct class *class, struct class_attribute *attr, char *buf) { - return sprintf(buf, "%d\n", devcd_disabled); + return sysfs_emit(buf, "%d\n", devcd_disabled); } +/* + * + * disabled_store() worker() + * class_for_each_device(&devcd_class, + * NULL, NULL, devcd_free) + * ... + * ... + * while ((dev = class_dev_iter_next(&iter)) + * devcd_del() + * device_del() + * put_device() <- last reference + * error = fn(dev, data) devcd_dev_release() + * devcd_free(dev, data) kfree(devcd) + * mutex_lock(&devcd->mutex); + * + * + * In the above diagram, It looks like disabled_store() would be racing with parallely + * running devcd_del() and result in memory abort while acquiring devcd->mutex which + * is called after kfree of devcd memory after dropping its last reference with + * put_device(). However, this will not happens as fn(dev, data) runs + * with its own reference to device via klist_node so it is not its last reference. + * so, above situation would not occur. + */ + static ssize_t disabled_store(struct class *class, struct class_attribute *attr, const char *buf, size_t count) { @@ -291,13 +366,17 @@ void dev_coredumpm(struct device *dev, struct module *owner, devcd->read = read; devcd->free = free; devcd->failing_dev = get_device(dev); + devcd->delete_work = false; + mutex_init(&devcd->mutex); device_initialize(&devcd->devcd_dev); dev_set_name(&devcd->devcd_dev, "devcd%d", atomic_inc_return(&devcd_count)); devcd->devcd_dev.class = &devcd_class; + mutex_lock(&devcd->mutex); + dev_set_uevent_suppress(&devcd->devcd_dev, true); if (device_add(&devcd->devcd_dev)) goto put_device; @@ -309,12 +388,15 @@ void dev_coredumpm(struct device *dev, struct module *owner, "devcoredump")) /* nothing - symlink will be missing */; + dev_set_uevent_suppress(&devcd->devcd_dev, false); + kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD); INIT_DELAYED_WORK(&devcd->del_wk, devcd_del); schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT); - + mutex_unlock(&devcd->mutex); return; put_device: put_device(&devcd->devcd_dev); + mutex_unlock(&devcd->mutex); put_module: module_put(owner); free: diff --git a/drivers/base/devres.c b/drivers/base/devres.c index f98a097e73f29c92f0bbf86d805f4fd3fd64109d..0813c16e31a33c798083f45d71a9a139ebe73afc 100644 --- a/drivers/base/devres.c +++ b/drivers/base/devres.c @@ -24,8 +24,14 @@ struct devres_node { struct devres { struct devres_node node; - /* -- 3 pointers */ - unsigned long long data[]; /* guarantee ull alignment */ + /* + * Some archs want to perform DMA into kmalloc caches + * and need a guaranteed alignment larger than + * the alignment of a 64-bit integer. + * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same + * buffer alignment as if it was allocated by plain kmalloc(). + */ + u8 __aligned(ARCH_KMALLOC_MINALIGN) data[]; }; struct devres_group { @@ -1051,7 +1057,11 @@ EXPORT_SYMBOL_GPL(__devm_alloc_percpu); */ void devm_free_percpu(struct device *dev, void __percpu *pdata) { - WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match, + /* + * Use devres_release() to prevent memory leakage as + * devm_free_pages() does. + */ + WARN_ON(devres_release(dev, devm_percpu_release, devm_percpu_match, (void *)pdata)); } EXPORT_SYMBOL_GPL(devm_free_percpu); diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c index b5c865fe263b25303b72bc42a5715e4fba0e746d..949b472a310362c64dce6660b72a7b03f613ba1d 100644 --- a/drivers/base/firmware_loader/fallback.c +++ b/drivers/base/firmware_loader/fallback.c @@ -121,7 +121,7 @@ void kill_pending_fw_fallback_reqs(bool only_kill_custom) static ssize_t timeout_show(struct class *class, struct class_attribute *attr, char *buf) { - return sprintf(buf, "%d\n", __firmware_loading_timeout()); + return sysfs_emit(buf, "%d\n", __firmware_loading_timeout()); } /** @@ -216,7 +216,7 @@ static ssize_t firmware_loading_show(struct device *dev, loading = fw_sysfs_loading(fw_sysfs->fw_priv); mutex_unlock(&fw_lock); - return sprintf(buf, "%d\n", loading); + return sysfs_emit(buf, "%d\n", loading); } /* one pages buffer should be mapped/unmapped only once */ @@ -659,7 +659,7 @@ static bool fw_run_sysfs_fallback(enum fw_opt opt_flags) /* Also permit LSMs and IMA to fail firmware sysfs fallback */ ret = security_kernel_load_data(LOADING_FIRMWARE); if (ret < 0) - return ret; + return false; return fw_force_sysfs_fallback(opt_flags); } diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c index 8e9213b36e31371aed574de0252366f171e6104f..c79acb61b3aeabcc2796700f8a17d3b5ba87cf06 100644 --- a/drivers/base/firmware_loader/main.c +++ b/drivers/base/firmware_loader/main.c @@ -562,6 +562,26 @@ static void fw_abort_batch_reqs(struct firmware *fw) fw_state_aborted(fw_priv); } +/* + * Reject firmware file names with ".." path components. + * There are drivers that construct firmware file names from device-supplied + * strings, and we don't want some device to be able to tell us "I would like to + * be sent my firmware from ../../../etc/shadow, please". + * + * Search for ".." surrounded by either '/' or start/end of string. + * + * This intentionally only looks at the firmware name, not at the firmware base + * directory or at symlink contents. + */ +static bool name_contains_dotdot(const char *name) +{ + size_t name_len = strlen(name); + + return strcmp(name, "..") == 0 || strncmp(name, "../", 3) == 0 || + strstr(name, "/../") != NULL || + (name_len >= 3 && strcmp(name+name_len-3, "/..") == 0); +} + /* called from request_firmware() and request_firmware_work_func() */ static int _request_firmware(const struct firmware **firmware_p, const char *name, @@ -579,6 +599,14 @@ _request_firmware(const struct firmware **firmware_p, const char *name, goto out; } + if (name_contains_dotdot(name)) { + dev_warn(device, + "Firmware load for '%s' refused, path contains '..' component\n", + name); + ret = -EINVAL; + goto out; + } + ret = _request_firmware_prepare(&fw, name, device, buf, size, opt_flags); if (ret <= 0) /* error or already assigned */ @@ -619,6 +647,8 @@ _request_firmware(const struct firmware **firmware_p, const char *name, * @name will be used as $FIRMWARE in the uevent environment and * should be distinctive enough not to be confused with any other * firmware image for this or any other device. + * It must not contain any ".." path components - "foo/bar..bin" is + * allowed, but "foo/../bar.bin" is not. * * Caller must hold the reference count of @device. * diff --git a/drivers/base/init.c b/drivers/base/init.c index 908e6520e804b2967ab029ca6a617cc0d1902f16..e086e0e441c750d887375ab508015c3580993bba 100644 --- a/drivers/base/init.c +++ b/drivers/base/init.c @@ -8,6 +8,7 @@ #include #include #include +#include #include "base.h" @@ -20,6 +21,7 @@ void __init driver_init(void) { /* These are the core pieces */ + bdi_init(&noop_backing_dev_info); devtmpfs_init(); devices_init(); buses_init(); diff --git a/drivers/base/map.c b/drivers/base/map.c index 5650ab2b247ada40dc7eb74e2214e444859291d6..551296d485020e1df128e2c761ecd0ef3f9ca536 100644 --- a/drivers/base/map.c +++ b/drivers/base/map.c @@ -92,6 +92,34 @@ void kobj_unmap(struct kobj_map *domain, dev_t dev, unsigned long range) kfree(found); } +void kobj_delete(struct kobj_map *domain, dev_t dev, unsigned long range, + kobj_probe_t *probe) +{ + unsigned n = MAJOR(dev + range - 1) - MAJOR(dev) + 1; + unsigned index = MAJOR(dev); + unsigned i; + struct probe *found = NULL; + + if (n > 255) + n = 255; + + mutex_lock(domain->lock); + for (i = 0; i < n; i++, index++) { + struct probe **s; + for (s = &domain->probes[index % 255]; *s; s = &(*s)->next) { + struct probe *p = *s; + if (p->dev == dev && p->range == range && p->get == probe) { + *s = p->next; + if (!found) + found = p; + break; + } + } + } + mutex_unlock(domain->lock); + kfree(found); +} + struct kobject *kobj_lookup(struct kobj_map *domain, dev_t dev, int *index) { struct kobject *kobj; diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 817320c7c4c1b72cf248b73a184bc8dee6ef28ac..de0b500aab3c4dd2f77c02156904b99272aa1d56 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -30,6 +30,24 @@ static DEFINE_MUTEX(mem_sysfs_mutex); #define MEMORY_CLASS_NAME "memory" +static const char *const online_type_to_str[] = { + [MMOP_OFFLINE] = "offline", + [MMOP_ONLINE] = "online", + [MMOP_ONLINE_KERNEL] = "online_kernel", + [MMOP_ONLINE_MOVABLE] = "online_movable", +}; + +int memhp_online_type_from_str(const char *str) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(online_type_to_str); i++) { + if (sysfs_streq(str, online_type_to_str[i])) + return i; + } + return -EINVAL; +} + #define to_memory_block(dev) container_of(dev, struct memory_block, dev) static int sections_per_block; @@ -39,6 +57,11 @@ static inline int base_memory_block_id(int section_nr) return section_nr / sections_per_block; } +static inline int pfn_to_block_id(unsigned long pfn) +{ + return base_memory_block_id(pfn_to_section_nr(pfn)); +} + static int memory_subsys_online(struct device *dev); static int memory_subsys_offline(struct device *dev); @@ -88,6 +111,7 @@ unsigned long __weak memory_block_size_bytes(void) { return MIN_MEMORY_BLOCK_SIZE; } +EXPORT_SYMBOL_GPL(memory_block_size_bytes); static unsigned long get_memory_block_size(void) { @@ -109,21 +133,22 @@ static unsigned long get_memory_block_size(void) * uses. */ -static ssize_t show_mem_start_phys_index(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t phys_index_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct memory_block *mem = to_memory_block(dev); unsigned long phys_index; phys_index = mem->start_section_nr / sections_per_block; - return sprintf(buf, "%08lx\n", phys_index); + + return sysfs_emit(buf, "%08lx\n", phys_index); } /* * Show whether the section of memory is likely to be hot-removable */ -static ssize_t show_mem_removable(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t removable_show(struct device *dev, struct device_attribute *attr, + char *buf) { unsigned long i, pfn; int ret = 1; @@ -140,17 +165,17 @@ static ssize_t show_mem_removable(struct device *dev, } out: - return sprintf(buf, "%d\n", ret); + return sysfs_emit(buf, "%d\n", ret); } /* * online, offline, going offline, etc. */ -static ssize_t show_mem_state(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t state_show(struct device *dev, struct device_attribute *attr, + char *buf) { struct memory_block *mem = to_memory_block(dev); - ssize_t len = 0; + const char *output; /* * We can probably put these states in a nice little array @@ -158,22 +183,20 @@ static ssize_t show_mem_state(struct device *dev, */ switch (mem->state) { case MEM_ONLINE: - len = sprintf(buf, "online\n"); + output = "online"; break; case MEM_OFFLINE: - len = sprintf(buf, "offline\n"); + output = "offline"; break; case MEM_GOING_OFFLINE: - len = sprintf(buf, "going-offline\n"); + output = "going-offline"; break; default: - len = sprintf(buf, "ERROR-UNKNOWN-%ld\n", - mem->state); WARN_ON(1); - break; + return sysfs_emit(buf, "ERROR-UNKNOWN-%ld\n", mem->state); } - return len; + return sysfs_emit(buf, "%s\n", output); } int memory_notify(unsigned long val, void *v) @@ -207,15 +230,15 @@ static bool pages_correctly_probed(unsigned long start_pfn) return false; if (!present_section_nr(section_nr)) { - pr_warn("section %ld pfn[%lx, %lx) not present", + pr_warn("section %ld pfn[%lx, %lx) not present\n", section_nr, pfn, pfn + PAGES_PER_SECTION); return false; } else if (!valid_section_nr(section_nr)) { - pr_warn("section %ld pfn[%lx, %lx) no valid memmap", + pr_warn("section %ld pfn[%lx, %lx) no valid memmap\n", section_nr, pfn, pfn + PAGES_PER_SECTION); return false; } else if (online_section_nr(section_nr)) { - pr_warn("section %ld pfn[%lx, %lx) is already online", + pr_warn("section %ld pfn[%lx, %lx) is already online\n", section_nr, pfn, pfn + PAGES_PER_SECTION); return false; } @@ -228,16 +251,16 @@ static bool pages_correctly_probed(unsigned long start_pfn) /* * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is * OK to have direct references to sparsemem variables in here. - * Must already be protected by mem_hotplug_begin(). */ static int -memory_block_action(unsigned long phys_index, unsigned long action, int online_type) +memory_block_action(unsigned long start_section_nr, unsigned long action, + int online_type) { unsigned long start_pfn; unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; int ret; - start_pfn = section_nr_to_pfn(phys_index); + start_pfn = section_nr_to_pfn(start_section_nr); switch (action) { case MEM_ONLINE: @@ -251,7 +274,7 @@ memory_block_action(unsigned long phys_index, unsigned long action, int online_t break; default: WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: " - "%ld\n", __func__, phys_index, action, action); + "%ld\n", __func__, start_section_nr, action, action); ret = -EINVAL; } @@ -287,18 +310,14 @@ static int memory_subsys_online(struct device *dev) return 0; /* - * If we are called from store_mem_state(), online_type will be - * set >= 0 Otherwise we were called from the device online - * attribute and need to set the online_type. + * When called via device_online() without configuring the online_type, + * we want to default to MMOP_ONLINE. */ - if (mem->online_type < 0) - mem->online_type = MMOP_ONLINE_KEEP; + if (mem->online_type == MMOP_OFFLINE) + mem->online_type = MMOP_ONLINE; - /* Already under protection of mem_hotplug_begin() */ ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE); - - /* clear online_type */ - mem->online_type = -1; + mem->online_type = MMOP_OFFLINE; return ret; } @@ -317,43 +336,25 @@ static int memory_subsys_offline(struct device *dev) return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE); } -static ssize_t -store_mem_state(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) +static ssize_t state_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { + const int online_type = memhp_online_type_from_str(buf); struct memory_block *mem = to_memory_block(dev); - int ret, online_type; + int ret; + + if (online_type < 0) + return -EINVAL; ret = lock_device_hotplug_sysfs(); if (ret) return ret; - if (sysfs_streq(buf, "online_kernel")) - online_type = MMOP_ONLINE_KERNEL; - else if (sysfs_streq(buf, "online_movable")) - online_type = MMOP_ONLINE_MOVABLE; - else if (sysfs_streq(buf, "online")) - online_type = MMOP_ONLINE_KEEP; - else if (sysfs_streq(buf, "offline")) - online_type = MMOP_OFFLINE; - else { - ret = -EINVAL; - goto err; - } - - /* - * Memory hotplug needs to hold mem_hotplug_begin() for probe to find - * the correct memory block to online before doing device_online(dev), - * which will take dev->mutex. Take the lock early to prevent an - * inversion, memory_subsys_online() callbacks will be implemented by - * assuming it's already protected. - */ - mem_hotplug_begin(); - switch (online_type) { case MMOP_ONLINE_KERNEL: case MMOP_ONLINE_MOVABLE: - case MMOP_ONLINE_KEEP: + case MMOP_ONLINE: + /* mem->online_type is protected by device_hotplug_lock */ mem->online_type = online_type; ret = device_online(&mem->dev); break; @@ -364,8 +365,6 @@ store_mem_state(struct device *dev, ret = -EINVAL; /* should never happen */ } - mem_hotplug_done(); -err: unlock_device_hotplug(); if (ret < 0) @@ -385,28 +384,29 @@ store_mem_state(struct device *dev, * s.t. if I offline all of these sections I can then * remove the physical device? */ -static ssize_t show_phys_device(struct device *dev, +static ssize_t phys_device_show(struct device *dev, struct device_attribute *attr, char *buf) { struct memory_block *mem = to_memory_block(dev); - return sprintf(buf, "%d\n", mem->phys_device); + + return sysfs_emit(buf, "%d\n", mem->phys_device); } #ifdef CONFIG_MEMORY_HOTREMOVE -static void print_allowed_zone(char *buf, int nid, unsigned long start_pfn, - unsigned long nr_pages, int online_type, - struct zone *default_zone) +static int print_allowed_zone(char *buf, int len, int nid, + unsigned long start_pfn, unsigned long nr_pages, + int online_type, struct zone *default_zone) { struct zone *zone; zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages); - if (zone != default_zone) { - strcat(buf, " "); - strcat(buf, zone->name); - } + if (zone == default_zone) + return 0; + + return sysfs_emit_at(buf, len, " %s", zone->name); } -static ssize_t show_valid_zones(struct device *dev, +static ssize_t valid_zones_show(struct device *dev, struct device_attribute *attr, char *buf) { struct memory_block *mem = to_memory_block(dev); @@ -414,6 +414,7 @@ static ssize_t show_valid_zones(struct device *dev, unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; unsigned long valid_start_pfn, valid_end_pfn; struct zone *default_zone; + int len = 0; int nid; /* @@ -427,75 +428,69 @@ static ssize_t show_valid_zones(struct device *dev, */ if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages, &valid_start_pfn, &valid_end_pfn)) - return sprintf(buf, "none\n"); + return sysfs_emit(buf, "%s\n", "none"); start_pfn = valid_start_pfn; - strcat(buf, page_zone(pfn_to_page(start_pfn))->name); + len += sysfs_emit_at(buf, len, "%s", + page_zone(pfn_to_page(start_pfn))->name); goto out; } nid = mem->nid; - default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages); - strcat(buf, default_zone->name); - - print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_KERNEL, - default_zone); - print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_MOVABLE, - default_zone); + default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, start_pfn, + nr_pages); + len += sysfs_emit_at(buf, len, "%s", default_zone->name); + len += print_allowed_zone(buf, len, nid, start_pfn, nr_pages, + MMOP_ONLINE_KERNEL, default_zone); + len += print_allowed_zone(buf, len, nid, start_pfn, nr_pages, + MMOP_ONLINE_MOVABLE, default_zone); out: - strcat(buf, "\n"); - - return strlen(buf); + len += sysfs_emit_at(buf, len, "\n"); + return len; } -static DEVICE_ATTR(valid_zones, 0444, show_valid_zones, NULL); +static DEVICE_ATTR_RO(valid_zones); #endif -static DEVICE_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL); -static DEVICE_ATTR(state, 0644, show_mem_state, store_mem_state); -static DEVICE_ATTR(phys_device, 0444, show_phys_device, NULL); -static DEVICE_ATTR(removable, 0444, show_mem_removable, NULL); +static DEVICE_ATTR_RO(phys_index); +static DEVICE_ATTR_RW(state); +static DEVICE_ATTR_RO(phys_device); +static DEVICE_ATTR_RO(removable); /* * Block size attribute stuff */ -static ssize_t -print_block_size(struct device *dev, struct device_attribute *attr, - char *buf) +static ssize_t block_size_bytes_show(struct device *dev, + struct device_attribute *attr, char *buf) { - return sprintf(buf, "%lx\n", get_memory_block_size()); + return sysfs_emit(buf, "%lx\n", get_memory_block_size()); } -static DEVICE_ATTR(block_size_bytes, 0444, print_block_size, NULL); +static DEVICE_ATTR_RO(block_size_bytes); /* * Memory auto online policy. */ -static ssize_t -show_auto_online_blocks(struct device *dev, struct device_attribute *attr, - char *buf) +static ssize_t auto_online_blocks_show(struct device *dev, + struct device_attribute *attr, char *buf) { - if (memhp_auto_online) - return sprintf(buf, "online\n"); - else - return sprintf(buf, "offline\n"); + return sysfs_emit(buf, "%s\n", + online_type_to_str[memhp_default_online_type]); } -static ssize_t -store_auto_online_blocks(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t auto_online_blocks_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { - if (sysfs_streq(buf, "online")) - memhp_auto_online = true; - else if (sysfs_streq(buf, "offline")) - memhp_auto_online = false; - else + const int online_type = memhp_online_type_from_str(buf); + + if (online_type < 0) return -EINVAL; + memhp_default_online_type = online_type; return count; } -static DEVICE_ATTR(auto_online_blocks, 0644, show_auto_online_blocks, - store_auto_online_blocks); +static DEVICE_ATTR_RW(auto_online_blocks); /* * Some architectures will have custom drivers to do this, and @@ -504,9 +499,8 @@ static DEVICE_ATTR(auto_online_blocks, 0644, show_auto_online_blocks, * and will require this interface. */ #ifdef CONFIG_ARCH_MEMORY_PROBE -static ssize_t -memory_probe_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t probe_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { u64 phys_addr; int nid, ret; @@ -519,19 +513,24 @@ memory_probe_store(struct device *dev, struct device_attribute *attr, if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1)) return -EINVAL; + ret = lock_device_hotplug_sysfs(); + if (ret) + return ret; + nid = memory_add_physaddr_to_nid(phys_addr); - ret = add_memory(nid, phys_addr, - MIN_MEMORY_BLOCK_SIZE * sections_per_block); + ret = __add_memory(nid, phys_addr, + MIN_MEMORY_BLOCK_SIZE * sections_per_block); if (ret) goto out; ret = count; out: + unlock_device_hotplug(); return ret; } -static DEVICE_ATTR(probe, S_IWUSR, NULL, memory_probe_store); +static DEVICE_ATTR_WO(probe); #endif #ifdef CONFIG_MEMORY_FAILURE @@ -540,10 +539,9 @@ static DEVICE_ATTR(probe, S_IWUSR, NULL, memory_probe_store); */ /* Soft offline a page */ -static ssize_t -store_soft_offline_page(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t soft_offline_page_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { int ret; u64 pfn; @@ -554,15 +552,17 @@ store_soft_offline_page(struct device *dev, pfn >>= PAGE_SHIFT; if (!pfn_valid(pfn)) return -ENXIO; + /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */ + if (!pfn_to_online_page(pfn)) + return -EIO; ret = soft_offline_page(pfn_to_page(pfn), 0); return ret == 0 ? count : ret; } /* Forcibly offline a page, including killing processes. */ -static ssize_t -store_hard_offline_page(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t hard_offline_page_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { int ret; u64 pfn; @@ -575,8 +575,8 @@ store_hard_offline_page(struct device *dev, return ret ? ret : count; } -static DEVICE_ATTR(soft_offline_page, S_IWUSR, NULL, store_soft_offline_page); -static DEVICE_ATTR(hard_offline_page, S_IWUSR, NULL, store_hard_offline_page); +static DEVICE_ATTR_WO(soft_offline_page); +static DEVICE_ATTR_WO(hard_offline_page); #endif /* @@ -593,10 +593,9 @@ int __weak arch_get_memory_phys_device(unsigned long start_pfn) * A reference for the returned object is held and the reference for the * hinted object is released. */ -struct memory_block *find_memory_block_hinted(struct mem_section *section, - struct memory_block *hint) +static struct memory_block *find_memory_block_by_id(int block_id, + struct memory_block *hint) { - int block_id = base_memory_block_id(__section_nr(section)); struct device *hintdev = hint ? &hint->dev : NULL; struct device *dev; @@ -608,6 +607,14 @@ struct memory_block *find_memory_block_hinted(struct mem_section *section, return to_memory_block(dev); } +struct memory_block *find_memory_block_hinted(struct mem_section *section, + struct memory_block *hint) +{ + int block_id = base_memory_block_id(__section_nr(section)); + + return find_memory_block_by_id(block_id, hint); +} + /* * For now, we have a linear search to go find the appropriate * memory_block corresponding to a particular phys_index. If @@ -662,25 +669,28 @@ int register_memory(struct memory_block *memory) return ret; } -static int init_memory_block(struct memory_block **memory, - struct mem_section *section, unsigned long state) +static int init_memory_block(struct memory_block **memory, int block_id, + unsigned long state) { struct memory_block *mem; unsigned long start_pfn; - int scn_nr; int ret = 0; + mem = find_memory_block_by_id(block_id, NULL); + if (mem) { + put_device(&mem->dev); + return -EEXIST; + } mem = kzalloc(sizeof(*mem), GFP_KERNEL); if (!mem) return -ENOMEM; - scn_nr = __section_nr(section); - mem->start_section_nr = - base_memory_block_id(scn_nr) * sections_per_block; + mem->start_section_nr = block_id * sections_per_block; mem->end_section_nr = mem->start_section_nr + sections_per_block - 1; mem->state = state; start_pfn = section_nr_to_pfn(mem->start_section_nr); mem->phys_device = arch_get_memory_phys_device(start_pfn); + mem->nid = NUMA_NO_NODE; ret = register_memory(mem); @@ -691,102 +701,99 @@ static int init_memory_block(struct memory_block **memory, static int add_memory_block(int base_section_nr) { struct memory_block *mem; - int i, ret, section_count = 0, section_nr; + int i, ret, section_count = 0; for (i = base_section_nr; - (i < base_section_nr + sections_per_block) && i < NR_MEM_SECTIONS; - i++) { - if (!present_section_nr(i)) - continue; - if (section_count == 0) - section_nr = i; - section_count++; - } + i < base_section_nr + sections_per_block; + i++) + if (present_section_nr(i)) + section_count++; if (section_count == 0) return 0; - ret = init_memory_block(&mem, __nr_to_section(section_nr), MEM_ONLINE); + ret = init_memory_block(&mem, base_memory_block_id(base_section_nr), + MEM_ONLINE); if (ret) return ret; mem->section_count = section_count; return 0; } +static void unregister_memory(struct memory_block *memory) +{ + if (WARN_ON_ONCE(memory->dev.bus != &memory_subsys)) + return; + + /* drop the ref. we got via find_memory_block() */ + put_device(&memory->dev); + device_unregister(&memory->dev); +} + /* - * need an interface for the VM to add new memory regions, - * but without onlining it. + * Create memory block devices for the given memory area. Start and size + * have to be aligned to memory block granularity. Memory block devices + * will be initialized as offline. */ -int hotplug_memory_register(int nid, struct mem_section *section) +int create_memory_block_devices(unsigned long start, unsigned long size) { - int ret = 0; + const int start_block_id = pfn_to_block_id(PFN_DOWN(start)); + int end_block_id = pfn_to_block_id(PFN_DOWN(start + size)); struct memory_block *mem; + unsigned long block_id; + int ret = 0; - mutex_lock(&mem_sysfs_mutex); + if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) || + !IS_ALIGNED(size, memory_block_size_bytes()))) + return -EINVAL; - mem = find_memory_block(section); - if (mem) { - mem->section_count++; - put_device(&mem->dev); - } else { - ret = init_memory_block(&mem, section, MEM_OFFLINE); + mutex_lock(&mem_sysfs_mutex); + for (block_id = start_block_id; block_id != end_block_id; block_id++) { + ret = init_memory_block(&mem, block_id, MEM_OFFLINE); if (ret) - goto out; - mem->section_count++; + break; + mem->section_count = sections_per_block; + } + if (ret) { + end_block_id = block_id; + for (block_id = start_block_id; block_id != end_block_id; + block_id++) { + mem = find_memory_block_by_id(block_id, NULL); + mem->section_count = 0; + unregister_memory(mem); + } } - -out: mutex_unlock(&mem_sysfs_mutex); return ret; } -#ifdef CONFIG_MEMORY_HOTREMOVE -static void -unregister_memory(struct memory_block *memory) -{ - BUG_ON(memory->dev.bus != &memory_subsys); - - /* drop the ref. we got in remove_memory_block() */ - put_device(&memory->dev); - device_unregister(&memory->dev); -} - -static int remove_memory_section(unsigned long node_id, - struct mem_section *section, int phys_device) +/* + * Remove memory block devices for the given memory area. Start and size + * have to be aligned to memory block granularity. Memory block devices + * have to be offline. + */ +void remove_memory_block_devices(unsigned long start, unsigned long size) { + const int start_block_id = pfn_to_block_id(PFN_DOWN(start)); + const int end_block_id = pfn_to_block_id(PFN_DOWN(start + size)); struct memory_block *mem; + int block_id; - mutex_lock(&mem_sysfs_mutex); - - /* - * Some users of the memory hotplug do not want/need memblock to - * track all sections. Skip over those. - */ - mem = find_memory_block(section); - if (!mem) - goto out_unlock; - - unregister_mem_sect_under_nodes(mem, __section_nr(section)); + if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) || + !IS_ALIGNED(size, memory_block_size_bytes()))) + return; - mem->section_count--; - if (mem->section_count == 0) + mutex_lock(&mem_sysfs_mutex); + for (block_id = start_block_id; block_id != end_block_id; block_id++) { + mem = find_memory_block_by_id(block_id, NULL); + if (WARN_ON_ONCE(!mem)) + continue; + mem->section_count = 0; + unregister_memory_block_under_nodes(mem); unregister_memory(mem); - else - put_device(&mem->dev); - -out_unlock: + } mutex_unlock(&mem_sysfs_mutex); - return 0; } -int unregister_memory_section(struct mem_section *section) -{ - if (!present_section(section)) - return -EINVAL; - - return remove_memory_section(0, section, 0); -} -#endif /* CONFIG_MEMORY_HOTREMOVE */ - /* return true if the memory block is offlined, otherwise, return false */ bool is_memblock_offlined(struct memory_block *mem) { @@ -852,3 +859,39 @@ int __init memory_dev_init(void) printk(KERN_ERR "%s() failed: %d\n", __func__, ret); return ret; } + +struct for_each_memory_block_cb_data { + walk_memory_blocks_func_t func; + void *arg; +}; + +static int for_each_memory_block_cb(struct device *dev, void *data) +{ + struct memory_block *mem = to_memory_block(dev); + struct for_each_memory_block_cb_data *cb_data = data; + + return cb_data->func(mem, cb_data->arg); +} + +/** + * for_each_memory_block - walk through all present memory blocks + * + * @arg: argument passed to func + * @func: callback for each memory block walked + * + * This function walks through all present memory blocks, calling func on + * each memory block. + * + * In case func() returns an error, walking is aborted and the error is + * returned. + */ +int for_each_memory_block(void *arg, walk_memory_blocks_func_t func) +{ + struct for_each_memory_block_cb_data cb_data = { + .func = func, + .arg = arg, + }; + + return bus_for_each_dev(&memory_subsys, NULL, &cb_data, + for_each_memory_block_cb); +} diff --git a/drivers/base/module.c b/drivers/base/module.c index 46ad4d636731ddf8f20b3696b018b99cadcaeb88..851cc5367c04c044f32e8dd5c9a31c5cfcbdd027 100644 --- a/drivers/base/module.c +++ b/drivers/base/module.c @@ -7,6 +7,7 @@ #include #include #include +#include #include "base.h" static char *make_driver_name(struct device_driver *drv) @@ -77,6 +78,9 @@ void module_remove_driver(struct device_driver *drv) if (!drv) return; + /* Synchronize with dev_uevent() */ + synchronize_rcu(); + sysfs_remove_link(&drv->p->kobj, "module"); if (drv->owner) diff --git a/drivers/base/node.c b/drivers/base/node.c index 1ac4c36e13bbd9a70a07ba310e01bc053614c103..105a18b9788d73b4061b9a999ffe7c2f514c7a7e 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -45,31 +46,331 @@ static ssize_t node_read_cpumap(struct device *dev, bool list, char *buf) return n; } -static inline ssize_t node_read_cpumask(struct device *dev, - struct device_attribute *attr, char *buf) +static inline ssize_t cpumap_show(struct device *dev, + struct device_attribute *attr, + char *buf) { return node_read_cpumap(dev, false, buf); } -static inline ssize_t node_read_cpulist(struct device *dev, - struct device_attribute *attr, char *buf) + +static DEVICE_ATTR_RO(cpumap); + +static inline ssize_t cpulist_show(struct device *dev, + struct device_attribute *attr, + char *buf) { return node_read_cpumap(dev, true, buf); } -static DEVICE_ATTR(cpumap, S_IRUGO, node_read_cpumask, NULL); -static DEVICE_ATTR(cpulist, S_IRUGO, node_read_cpulist, NULL); +static DEVICE_ATTR_RO(cpulist); + +/** + * struct node_access_nodes - Access class device to hold user visible + * relationships to other nodes. + * @dev: Device for this memory access class + * @list_node: List element in the node's access list + * @access: The access class rank + */ +struct node_access_nodes { + struct device dev; + struct list_head list_node; + unsigned access; +#ifdef CONFIG_HMEM_REPORTING + struct node_hmem_attrs hmem_attrs; +#endif +}; +#define to_access_nodes(dev) container_of(dev, struct node_access_nodes, dev) + +static struct attribute *node_init_access_node_attrs[] = { + NULL, +}; + +static struct attribute *node_targ_access_node_attrs[] = { + NULL, +}; + +static const struct attribute_group initiators = { + .name = "initiators", + .attrs = node_init_access_node_attrs, +}; + +static const struct attribute_group targets = { + .name = "targets", + .attrs = node_targ_access_node_attrs, +}; + +static const struct attribute_group *node_access_node_groups[] = { + &initiators, + &targets, + NULL, +}; + +static void node_remove_accesses(struct node *node) +{ + struct node_access_nodes *c, *cnext; + + list_for_each_entry_safe(c, cnext, &node->access_list, list_node) { + list_del(&c->list_node); + device_unregister(&c->dev); + } +} + +static void node_access_release(struct device *dev) +{ + kfree(to_access_nodes(dev)); +} + +static struct node_access_nodes *node_init_node_access(struct node *node, + unsigned access) +{ + struct node_access_nodes *access_node; + struct device *dev; + + list_for_each_entry(access_node, &node->access_list, list_node) + if (access_node->access == access) + return access_node; + + access_node = kzalloc(sizeof(*access_node), GFP_KERNEL); + if (!access_node) + return NULL; + + access_node->access = access; + dev = &access_node->dev; + dev->parent = &node->dev; + dev->release = node_access_release; + dev->groups = node_access_node_groups; + if (dev_set_name(dev, "access%u", access)) + goto free; + + if (device_register(dev)) + goto free_name; + + pm_runtime_no_callbacks(dev); + list_add_tail(&access_node->list_node, &node->access_list); + return access_node; +free_name: + kfree_const(dev->kobj.name); +free: + kfree(access_node); + return NULL; +} + +#ifdef CONFIG_HMEM_REPORTING +#define ACCESS_ATTR(name) \ +static ssize_t name##_show(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + return sysfs_emit(buf, "%u\n", \ + to_access_nodes(dev)->hmem_attrs.name); \ +} \ +static DEVICE_ATTR_RO(name) + +ACCESS_ATTR(read_bandwidth); +ACCESS_ATTR(read_latency); +ACCESS_ATTR(write_bandwidth); +ACCESS_ATTR(write_latency); + +static struct attribute *access_attrs[] = { + &dev_attr_read_bandwidth.attr, + &dev_attr_read_latency.attr, + &dev_attr_write_bandwidth.attr, + &dev_attr_write_latency.attr, + NULL, +}; + +/** + * node_set_perf_attrs - Set the performance values for given access class + * @nid: Node identifier to be set + * @hmem_attrs: Heterogeneous memory performance attributes + * @access: The access class the for the given attributes + */ +void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs, + unsigned access) +{ + struct node_access_nodes *c; + struct node *node; + int i; + + if (WARN_ON_ONCE(!node_online(nid))) + return; + + node = node_devices[nid]; + c = node_init_node_access(node, access); + if (!c) + return; + + c->hmem_attrs = *hmem_attrs; + for (i = 0; access_attrs[i] != NULL; i++) { + if (sysfs_add_file_to_group(&c->dev.kobj, access_attrs[i], + "initiators")) { + pr_info("failed to add performance attribute to node %d\n", + nid); + break; + } + } +} + +/** + * struct node_cache_info - Internal tracking for memory node caches + * @dev: Device represeting the cache level + * @node: List element for tracking in the node + * @cache_attrs:Attributes for this cache level + */ +struct node_cache_info { + struct device dev; + struct list_head node; + struct node_cache_attrs cache_attrs; +}; +#define to_cache_info(device) container_of(device, struct node_cache_info, dev) + +#define CACHE_ATTR(name, fmt) \ +static ssize_t name##_show(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + return sysfs_emit(buf, fmt "\n", \ + to_cache_info(dev)->cache_attrs.name); \ +} \ +DEVICE_ATTR_RO(name); + +CACHE_ATTR(size, "%llu") +CACHE_ATTR(line_size, "%u") +CACHE_ATTR(indexing, "%u") +CACHE_ATTR(write_policy, "%u") + +static struct attribute *cache_attrs[] = { + &dev_attr_indexing.attr, + &dev_attr_size.attr, + &dev_attr_line_size.attr, + &dev_attr_write_policy.attr, + NULL, +}; +ATTRIBUTE_GROUPS(cache); + +static void node_cache_release(struct device *dev) +{ + kfree(dev); +} + +static void node_cacheinfo_release(struct device *dev) +{ + struct node_cache_info *info = to_cache_info(dev); + kfree(info); +} + +static void node_init_cache_dev(struct node *node) +{ + struct device *dev; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return; + + device_initialize(dev); + dev->parent = &node->dev; + dev->release = node_cache_release; + if (dev_set_name(dev, "memory_side_cache")) + goto put_device; + + if (device_add(dev)) + goto put_device; + + pm_runtime_no_callbacks(dev); + node->cache_dev = dev; + return; +put_device: + put_device(dev); +} + +/** + * node_add_cache() - add cache attribute to a memory node + * @nid: Node identifier that has new cache attributes + * @cache_attrs: Attributes for the cache being added + */ +void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs) +{ + struct node_cache_info *info; + struct device *dev; + struct node *node; + + if (!node_online(nid) || !node_devices[nid]) + return; + + node = node_devices[nid]; + list_for_each_entry(info, &node->cache_attrs, node) { + if (info->cache_attrs.level == cache_attrs->level) { + dev_warn(&node->dev, + "attempt to add duplicate cache level:%d\n", + cache_attrs->level); + return; + } + } + + if (!node->cache_dev) + node_init_cache_dev(node); + if (!node->cache_dev) + return; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return; + + dev = &info->dev; + device_initialize(dev); + dev->parent = node->cache_dev; + dev->release = node_cacheinfo_release; + dev->groups = cache_groups; + if (dev_set_name(dev, "index%d", cache_attrs->level)) + goto put_device; + + info->cache_attrs = *cache_attrs; + if (device_add(dev)) { + dev_warn(&node->dev, "failed to add cache level:%d\n", + cache_attrs->level); + goto put_device; + } + pm_runtime_no_callbacks(dev); + list_add_tail(&info->node, &node->cache_attrs); + return; +put_device: + put_device(dev); +} + +static void node_remove_caches(struct node *node) +{ + struct node_cache_info *info, *next; + + if (!node->cache_dev) + return; + + list_for_each_entry_safe(info, next, &node->cache_attrs, node) { + list_del(&info->node); + device_unregister(&info->dev); + } + device_unregister(node->cache_dev); +} + +static void node_init_caches(unsigned int nid) +{ + INIT_LIST_HEAD(&node_devices[nid]->cache_attrs); +} +#else +static void node_init_caches(unsigned int nid) { } +static void node_remove_caches(struct node *node) { } +#endif #define K(x) ((x) << (PAGE_SHIFT - 10)) static ssize_t node_read_meminfo(struct device *dev, struct device_attribute *attr, char *buf) { - int n; + int len = 0; int nid = dev->id; struct pglist_data *pgdat = NODE_DATA(nid); struct sysinfo i; si_meminfo_node(&i, nid); - n = sprintf(buf, + len = sysfs_emit_at(buf, len, "Node %d MemTotal: %8lu kB\n" "Node %d MemFree: %8lu kB\n" "Node %d MemUsed: %8lu kB\n" @@ -96,7 +397,7 @@ static ssize_t node_read_meminfo(struct device *dev, nid, K(sum_zone_node_page_state(nid, NR_MLOCK))); #ifdef CONFIG_HIGHMEM - n += sprintf(buf + n, + len += sysfs_emit_at(buf, len, "Node %d HighTotal: %8lu kB\n" "Node %d HighFree: %8lu kB\n" "Node %d LowTotal: %8lu kB\n" @@ -106,7 +407,7 @@ static ssize_t node_read_meminfo(struct device *dev, nid, K(i.totalram - i.totalhigh), nid, K(i.freeram - i.freehigh)); #endif - n += sprintf(buf + n, + len += sysfs_emit_at(buf, len, "Node %d Dirty: %8lu kB\n" "Node %d Writeback: %8lu kB\n" "Node %d FilePages: %8lu kB\n" @@ -152,8 +453,8 @@ static ssize_t node_read_meminfo(struct device *dev, #else nid, K(node_page_state(pgdat, NR_SLAB_UNRECLAIMABLE))); #endif - n += hugetlb_report_node_meminfo(nid, buf + n); - return n; + len += hugetlb_report_node_meminfo(buf, len, nid); + return len; } #undef K @@ -162,21 +463,21 @@ static DEVICE_ATTR(meminfo, S_IRUGO, node_read_meminfo, NULL); static ssize_t node_read_numastat(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, - "numa_hit %lu\n" - "numa_miss %lu\n" - "numa_foreign %lu\n" - "interleave_hit %lu\n" - "local_node %lu\n" - "other_node %lu\n", - sum_zone_numa_state(dev->id, NUMA_HIT), - sum_zone_numa_state(dev->id, NUMA_MISS), - sum_zone_numa_state(dev->id, NUMA_FOREIGN), - sum_zone_numa_state(dev->id, NUMA_INTERLEAVE_HIT), - sum_zone_numa_state(dev->id, NUMA_LOCAL), - sum_zone_numa_state(dev->id, NUMA_OTHER)); -} -static DEVICE_ATTR(numastat, S_IRUGO, node_read_numastat, NULL); + return sysfs_emit(buf, + "numa_hit %lu\n" + "numa_miss %lu\n" + "numa_foreign %lu\n" + "interleave_hit %lu\n" + "local_node %lu\n" + "other_node %lu\n", + sum_zone_numa_state(dev->id, NUMA_HIT), + sum_zone_numa_state(dev->id, NUMA_MISS), + sum_zone_numa_state(dev->id, NUMA_FOREIGN), + sum_zone_numa_state(dev->id, NUMA_INTERLEAVE_HIT), + sum_zone_numa_state(dev->id, NUMA_LOCAL), + sum_zone_numa_state(dev->id, NUMA_OTHER)); +} +static DEVICE_ATTR(numastat, 0444, node_read_numastat, NULL); static ssize_t node_read_vmstat(struct device *dev, struct device_attribute *attr, char *buf) @@ -184,26 +485,32 @@ static ssize_t node_read_vmstat(struct device *dev, int nid = dev->id; struct pglist_data *pgdat = NODE_DATA(nid); int i; - int n = 0; + int len = 0; for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) - n += sprintf(buf+n, "%s %lu\n", vmstat_text[i], - sum_zone_node_page_state(nid, i)); + len += sysfs_emit_at(buf, len, "%s %lu\n", + vmstat_text[i], + sum_zone_node_page_state(nid, i)); #ifdef CONFIG_NUMA for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++) - n += sprintf(buf+n, "%s %lu\n", + len += sysfs_emit_at(buf, len, "%s %lu\n", vmstat_text[i + NR_VM_ZONE_STAT_ITEMS], sum_zone_numa_state(nid, i)); #endif - for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) - n += sprintf(buf+n, "%s %lu\n", + for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) { + /* Skip hidden vmstat items. */ + if (*vmstat_text[i + NR_VM_ZONE_STAT_ITEMS + + NR_VM_NUMA_STAT_ITEMS] == '\0') + continue; + len += sysfs_emit_at(buf, len, "%s %lu\n", vmstat_text[i + NR_VM_ZONE_STAT_ITEMS + NR_VM_NUMA_STAT_ITEMS], node_page_state(pgdat, i)); + } - return n; + return len; } static DEVICE_ATTR(vmstat, S_IRUGO, node_read_vmstat, NULL); @@ -220,13 +527,15 @@ static ssize_t node_read_distance(struct device *dev, */ BUILD_BUG_ON(MAX_NUMNODES * 4 > PAGE_SIZE); - for_each_online_node(i) - len += sprintf(buf + len, "%s%d", i ? " " : "", node_distance(nid, i)); + for_each_online_node(i) { + len += sysfs_emit_at(buf, len, "%s%d", + i ? " " : "", node_distance(nid, i)); + } - len += sprintf(buf + len, "\n"); + len += sysfs_emit_at(buf, len, "\n"); return len; } -static DEVICE_ATTR(distance, S_IRUGO, node_read_distance, NULL); +static DEVICE_ATTR(distance, 0444, node_read_distance, NULL); static struct attribute *node_dev_attrs[] = { &dev_attr_cpumap.attr, @@ -334,8 +643,10 @@ static int register_node(struct node *node, int num) */ void unregister_node(struct node *node) { + compaction_unregister_node(node); hugetlb_unregister_node(node); /* no-op, if memoryless node */ - + node_remove_accesses(node); + node_remove_caches(node); device_unregister(&node->dev); } @@ -367,6 +678,56 @@ int register_cpu_under_node(unsigned int cpu, unsigned int nid) kobject_name(&node_devices[nid]->dev.kobj)); } +/** + * register_memory_node_under_compute_node - link memory node to its compute + * node for a given access class. + * @mem_node: Memory node number + * @cpu_node: Cpu node number + * @access: Access class to register + * + * Description: + * For use with platforms that may have separate memory and compute nodes. + * This function will export node relationships linking which memory + * initiator nodes can access memory targets at a given ranked access + * class. + */ +int register_memory_node_under_compute_node(unsigned int mem_nid, + unsigned int cpu_nid, + unsigned access) +{ + struct node *init_node, *targ_node; + struct node_access_nodes *initiator, *target; + int ret; + + if (!node_online(cpu_nid) || !node_online(mem_nid)) + return -ENODEV; + + init_node = node_devices[cpu_nid]; + targ_node = node_devices[mem_nid]; + initiator = node_init_node_access(init_node, access); + target = node_init_node_access(targ_node, access); + if (!initiator || !target) + return -ENOMEM; + + ret = sysfs_add_link_to_group(&initiator->dev.kobj, "targets", + &targ_node->dev.kobj, + dev_name(&targ_node->dev)); + if (ret) + return ret; + + ret = sysfs_add_link_to_group(&target->dev.kobj, "initiators", + &init_node->dev.kobj, + dev_name(&init_node->dev)); + if (ret) + goto err; + + return 0; + err: + sysfs_remove_link_from_group(&initiator->dev.kobj, "targets", + dev_name(&targ_node->dev)); + return ret; +} + int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) { struct device *obj; @@ -398,14 +759,34 @@ static int __ref get_nid_for_pfn(unsigned long pfn) return pfn_to_nid(pfn); } -/* register memory section under specified node if it spans that node */ -int register_mem_sect_under_node(struct memory_block *mem_blk, void *arg) +static int do_register_memory_block_under_node(int nid, + struct memory_block *mem_blk) { - int ret, nid = *(int *)arg; - unsigned long pfn, sect_start_pfn, sect_end_pfn; + int ret; + /* + * If this memory block spans multiple nodes, we only indicate + * the last processed node. + */ mem_blk->nid = nid; + ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj, + &mem_blk->dev.kobj, + kobject_name(&mem_blk->dev.kobj)); + if (ret) + return ret; + + return sysfs_create_link_nowarn(&mem_blk->dev.kobj, + &node_devices[nid]->dev.kobj, + kobject_name(&node_devices[nid]->dev.kobj)); +} + +/* register memory section under specified node if it spans that node */ +int register_mem_block_under_node_early(struct memory_block *mem_blk, void *arg) +{ + int nid = *(int *)arg; + unsigned long pfn, sect_start_pfn, sect_end_pfn; + sect_start_pfn = section_nr_to_pfn(mem_blk->start_section_nr); sect_end_pfn = section_nr_to_pfn(mem_blk->end_section_nr); sect_end_pfn += PAGES_PER_SECTION - 1; @@ -423,71 +804,59 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, void *arg) } /* - * We need to check if page belongs to nid only for the boot - * case, during hotplug we know that all pages in the memory - * block belong to the same node. + * We need to check if page belongs to nid only at the boot + * case because node's ranges can be interleaved. */ - if (system_state == SYSTEM_BOOTING) { - page_nid = get_nid_for_pfn(pfn); - if (page_nid < 0) - continue; - if (page_nid != nid) - continue; - } - ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj, - &mem_blk->dev.kobj, - kobject_name(&mem_blk->dev.kobj)); - if (ret) - return ret; + page_nid = get_nid_for_pfn(pfn); + if (page_nid < 0) + continue; + if (page_nid != nid) + continue; - return sysfs_create_link_nowarn(&mem_blk->dev.kobj, - &node_devices[nid]->dev.kobj, - kobject_name(&node_devices[nid]->dev.kobj)); + return do_register_memory_block_under_node(nid, mem_blk); } /* mem section does not span the specified node */ return 0; } -/* unregister memory section under all nodes that it spans */ -int unregister_mem_sect_under_nodes(struct memory_block *mem_blk, - unsigned long phys_index) +/* + * During hotplug we know that all pages in the memory block belong to the same + * node. + */ +static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk, + void *arg) { - NODEMASK_ALLOC(nodemask_t, unlinked_nodes, GFP_KERNEL); - unsigned long pfn, sect_start_pfn, sect_end_pfn; + int nid = *(int *)arg; - if (!mem_blk) { - NODEMASK_FREE(unlinked_nodes); - return -EFAULT; - } - if (!unlinked_nodes) - return -ENOMEM; - nodes_clear(*unlinked_nodes); + return do_register_memory_block_under_node(nid, mem_blk); +} - sect_start_pfn = section_nr_to_pfn(phys_index); - sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1; - for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { - int nid; +/* + * Unregister a memory block device under the node it spans. Memory blocks + * with multiple nodes cannot be offlined and therefore also never be removed. + */ +void unregister_memory_block_under_nodes(struct memory_block *mem_blk) +{ + if (mem_blk->nid == NUMA_NO_NODE) + return; - nid = get_nid_for_pfn(pfn); - if (nid < 0) - continue; - if (!node_online(nid)) - continue; - if (node_test_and_set(nid, *unlinked_nodes)) - continue; - sysfs_remove_link(&node_devices[nid]->dev.kobj, - kobject_name(&mem_blk->dev.kobj)); - sysfs_remove_link(&mem_blk->dev.kobj, - kobject_name(&node_devices[nid]->dev.kobj)); - } - NODEMASK_FREE(unlinked_nodes); - return 0; + sysfs_remove_link(&node_devices[mem_blk->nid]->dev.kobj, + kobject_name(&mem_blk->dev.kobj)); + sysfs_remove_link(&mem_blk->dev.kobj, + kobject_name(&node_devices[mem_blk->nid]->dev.kobj)); } -int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn) +int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn, + enum meminit_context context) { - return walk_memory_range(start_pfn, end_pfn, (void *)&nid, - register_mem_sect_under_node); + walk_memory_blocks_func_t func; + + if (context == MEMINIT_HOTPLUG) + func = register_mem_block_under_node_hotplug; + else + func = register_mem_block_under_node_early; + + return walk_memory_range(start_pfn, end_pfn, (void *)&nid, func); } #ifdef CONFIG_HUGETLBFS @@ -575,8 +944,10 @@ int __register_one_node(int nid) register_cpu_under_node(cpu, nid); } + INIT_LIST_HEAD(&node_devices[nid]->access_list); /* initialize work queue for memory hot plug */ init_node_hugetlb_work(nid); + node_init_caches(nid); return error; } @@ -594,17 +965,6 @@ void unregister_one_node(int nid) * node states attributes */ -static ssize_t print_nodes_state(enum node_states state, char *buf) -{ - int n; - - n = scnprintf(buf, PAGE_SIZE - 1, "%*pbl", - nodemask_pr_args(&node_states[state])); - buf[n++] = '\n'; - buf[n] = '\0'; - return n; -} - struct node_attr { struct device_attribute attr; enum node_states state; @@ -614,7 +974,9 @@ static ssize_t show_node_state(struct device *dev, struct device_attribute *attr, char *buf) { struct node_attr *na = container_of(attr, struct node_attr, attr); - return print_nodes_state(na->state, buf); + + return sysfs_emit(buf, "%*pbl\n", + nodemask_pr_args(&node_states[na->state])); } #define _NODE_ATTR(name, state) \ @@ -629,6 +991,9 @@ static struct node_attr node_state_attr[] = { #endif [N_MEMORY] = _NODE_ATTR(has_memory, N_MEMORY), [N_CPU] = _NODE_ATTR(has_cpu, N_CPU), +#ifdef CONFIG_COHERENT_DEVICE + [N_COHERENT_DEVICE] = _NODE_ATTR(is_cdm_node, N_COHERENT_DEVICE), +#endif }; static struct attribute *node_state_attrs[] = { @@ -640,6 +1005,9 @@ static struct attribute *node_state_attrs[] = { #endif &node_state_attr[N_MEMORY].attr.attr, &node_state_attr[N_CPU].attr.attr, +#ifdef CONFIG_COHERENT_DEVICE + &node_state_attr[N_COHERENT_DEVICE].attr.attr, +#endif NULL }; diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c index 60d6cc618f1c4c2d90dad17765494e9e26cbf869..6d54905c6263946da7b9ebe002214e3caf9f8a57 100644 --- a/drivers/base/platform-msi.c +++ b/drivers/base/platform-msi.c @@ -366,14 +366,16 @@ void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nvec) { struct platform_msi_priv_data *data = domain->host_data; - struct msi_desc *desc; - for_each_msi_entry(desc, data->dev) { + struct msi_desc *desc, *tmp; + for_each_msi_entry_safe(desc, tmp, data->dev) { if (WARN_ON(!desc->irq || desc->nvec_used != 1)) return; if (!(desc->irq >= virq && desc->irq < (virq + nvec))) continue; irq_domain_free_irqs_common(domain, desc->irq, 1); + list_del(&desc->list); + free_msi_entry(desc); } } diff --git a/drivers/base/platform.c b/drivers/base/platform.c index dff82a3c2caa90162076a56c7911554d6fb2d09f..be8c82cc444502d6d64223cbff9102cbb00be09c 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -26,6 +26,8 @@ #include #include #include +#include +#include #include "base.h" #include "power/power.h" @@ -66,7 +68,7 @@ void __weak arch_setup_pdev_archdata(struct platform_device *pdev) struct resource *platform_get_resource(struct platform_device *dev, unsigned int type, unsigned int num) { - int i; + u32 i; for (i = 0; i < dev->num_resources; i++) { struct resource *r = &dev->resource[i]; @@ -161,7 +163,7 @@ struct resource *platform_get_resource_byname(struct platform_device *dev, unsigned int type, const char *name) { - int i; + u32 i; for (i = 0; i < dev->num_resources; i++) { struct resource *r = &dev->resource[i]; @@ -358,7 +360,8 @@ EXPORT_SYMBOL_GPL(platform_device_add_properties); */ int platform_device_add(struct platform_device *pdev) { - int i, ret; + u32 i; + int ret; if (!pdev) return -EINVAL; @@ -424,7 +427,7 @@ int platform_device_add(struct platform_device *pdev) pdev->id = PLATFORM_DEVID_AUTO; } - while (--i >= 0) { + while (i--) { struct resource *r = &pdev->resource[i]; if (r->parent) release_resource(r); @@ -445,7 +448,7 @@ EXPORT_SYMBOL_GPL(platform_device_add); */ void platform_device_del(struct platform_device *pdev) { - int i; + u32 i; if (pdev) { device_remove_properties(&pdev->dev); @@ -525,6 +528,8 @@ struct platform_device *platform_device_register_full( if (!pdev->dev.dma_mask) goto err; + kmemleak_ignore(pdev->dev.dma_mask); + *pdev->dev.dma_mask = pdevinfo->dma_mask; pdev->dev.coherent_dma_mask = pdevinfo->dma_mask; } @@ -695,6 +700,8 @@ int __init_or_module __platform_driver_probe(struct platform_driver *drv, /* temporary section violation during probe() */ drv->probe = probe; retval = code = __platform_driver_register(drv, module); + if (retval) + return retval; /* * Fixup that section violation, being paranoid about code scanning @@ -839,10 +846,10 @@ EXPORT_SYMBOL_GPL(platform_unregister_drivers); * (b) sysfs attribute lets new-style coldplug recover from hotplug events * mishandled before system is fully running: "modprobe $(cat modalias)" */ -static ssize_t modalias_show(struct device *dev, struct device_attribute *a, - char *buf) +static ssize_t modalias_show(struct device *dev, + struct device_attribute *attr, char *buf) { - struct platform_device *pdev = to_platform_device(dev); + struct platform_device *pdev = to_platform_device(dev); int len; len = of_device_modalias(dev, buf, PAGE_SIZE); @@ -853,9 +860,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a, if (len != -ENODEV) return len; - len = snprintf(buf, PAGE_SIZE, "platform:%s\n", pdev->name); - - return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len; + return sysfs_emit(buf, "platform:%s\n", pdev->name); } static DEVICE_ATTR_RO(modalias); @@ -900,7 +905,7 @@ static ssize_t driver_override_show(struct device *dev, ssize_t len; device_lock(dev); - len = sprintf(buf, "%s\n", pdev->driver_override); + len = sysfs_emit(buf, "%s\n", pdev->driver_override); device_unlock(dev); return len; } diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c index b413951c6abc8c5be2d2df2bd580d09947e89f5a..22aedb28aad7dabe9408ae24e8025e1355d87ab8 100644 --- a/drivers/base/power/common.c +++ b/drivers/base/power/common.c @@ -160,7 +160,7 @@ EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_id); * For a detailed function description, see dev_pm_domain_attach_by_id(). */ struct device *dev_pm_domain_attach_by_name(struct device *dev, - char *name) + const char *name) { if (dev->pm_domain) return ERR_PTR(-EEXIST); diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 4b5714199490802d626df3c34952925be0ee6e77..9e22b75c7f9f98dc57ea63fe8084d0df3de0401b 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -467,6 +467,10 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, return -EAGAIN; } + /* Default to shallowest state. */ + if (!genpd->gov) + genpd->state_idx = 0; + if (genpd->power_off) { int ret; @@ -843,7 +847,7 @@ static int __init genpd_power_off_unused(void) return 0; } -late_initcall(genpd_power_off_unused); +late_initcall_sync(genpd_power_off_unused); #if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF) @@ -1388,12 +1392,12 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, if (IS_ERR(gpd_data)) return PTR_ERR(gpd_data); - genpd_lock(genpd); - ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0; if (ret) goto out; + genpd_lock(genpd); + dev_pm_domain_set(dev, &genpd->domain); genpd->device_count++; @@ -1401,9 +1405,8 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); - out: genpd_unlock(genpd); - + out: if (ret) genpd_free_dev_data(dev, gpd_data); else @@ -1452,15 +1455,15 @@ static int genpd_remove_device(struct generic_pm_domain *genpd, genpd->device_count--; genpd->max_off_time_changed = true; - if (genpd->detach_dev) - genpd->detach_dev(genpd, dev); - dev_pm_domain_set(dev, NULL); list_del_init(&pdd->list_node); genpd_unlock(genpd); + if (genpd->detach_dev) + genpd->detach_dev(genpd, dev); + genpd_free_dev_data(dev, gpd_data); return 0; @@ -1687,6 +1690,8 @@ int pm_genpd_init(struct generic_pm_domain *genpd, ret = genpd_set_default_power_state(genpd); if (ret) return ret; + } else if (!gov) { + pr_warn("%s : no governor for states\n", genpd->name); } device_initialize(&genpd->dev); @@ -2235,7 +2240,8 @@ static void genpd_dev_pm_sync(struct device *dev) } static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np, - unsigned int index, bool power_on) + struct device *base_dev, unsigned int index, + bool power_on) { struct of_phandle_args pd_args; struct generic_pm_domain *pd; @@ -2253,7 +2259,7 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np, mutex_unlock(&gpd_list_lock); dev_dbg(dev, "%s() failed to find PM domain: %ld\n", __func__, PTR_ERR(pd)); - return driver_deferred_probe_check_state(dev); + return driver_deferred_probe_check_state(base_dev); } dev_dbg(dev, "adding to PM domain %s\n", pd->name); @@ -2309,7 +2315,7 @@ int genpd_dev_pm_attach(struct device *dev) "#power-domain-cells") != 1) return 0; - return __genpd_dev_pm_attach(dev, dev->of_node, 0, true); + return __genpd_dev_pm_attach(dev, dev->of_node, dev, 0, true); } EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); @@ -2356,12 +2362,12 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev, ret = device_register(genpd_dev); if (ret) { - kfree(genpd_dev); + put_device(genpd_dev); return ERR_PTR(ret); } /* Try to attach the device to the PM domain at the specified index. */ - ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index, false); + ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, dev, index, false); if (ret < 1) { device_unregister(genpd_dev); return ret ? ERR_PTR(ret) : NULL; @@ -2383,7 +2389,7 @@ EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id); * power-domain-names DT property. For further description see * genpd_dev_pm_attach_by_id(). */ -struct device *genpd_dev_pm_attach_by_name(struct device *dev, char *name) +struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name) { int index; diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index a690fd40026051453ba138d4919811b726b9789b..fe7506aae13ca1ea7c7d09500398ba968768d45d 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -1719,13 +1719,17 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) } /* - * If a device configured to wake up the system from sleep states - * has been suspended at run time and there's a resume request pending - * for it, this is equivalent to the device signaling wakeup, so the - * system suspend operation should be aborted. + * Wait for possible runtime PM transitions of the device in progress + * to complete and if there's a runtime resume request pending for it, + * resume it before proceeding with invoking the system-wide suspend + * callbacks for it. + * + * If the system-wide suspend callbacks below change the configuration + * of the device, they must disable runtime PM for it or otherwise + * ensure that its runtime-resume callbacks will not be confused by that + * change in case they are invoked going forward. */ - if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) - pm_wakeup_event(dev, 0); + pm_runtime_barrier(dev); if (pm_wakeup_pending()) { dev->power.direct_complete = false; @@ -1736,6 +1740,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) if (dev->power.syscore) goto Complete; + /* Avoid direct_complete to let wakeup_path propagate. */ + if (device_may_wakeup(dev) || dev->power.wakeup_path) + dev->power.direct_complete = false; + if (dev->power.direct_complete) { if (pm_runtime_status_suspended(dev)) { pm_runtime_disable(dev); @@ -2105,7 +2113,9 @@ static bool pm_ops_is_empty(const struct dev_pm_ops *ops) void device_pm_check_callbacks(struct device *dev) { - spin_lock_irq(&dev->power.lock); + unsigned long flags; + + spin_lock_irqsave(&dev->power.lock, flags); dev->power.no_pm_callbacks = (!dev->bus || (pm_ops_is_empty(dev->bus->pm) && !dev->bus->suspend && !dev->bus->resume)) && @@ -2114,7 +2124,7 @@ void device_pm_check_callbacks(struct device *dev) (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) && (!dev->driver || (pm_ops_is_empty(dev->driver->pm) && !dev->driver->suspend && !dev->driver->resume)); - spin_unlock_irq(&dev->power.lock); + spin_unlock_irqrestore(&dev->power.lock, flags); } bool dev_pm_smart_suspend_and_suspended(struct device *dev) diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index beb85c31f3fa3b9f997a4f21c72891f40e7535ed..eaae4adf9ce4bc5a3c8b3ff12e2c99e86340e7f1 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -268,11 +268,8 @@ static int rpm_get_suppliers(struct device *dev) list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) { int retval; - if (!(link->flags & DL_FLAG_PM_RUNTIME)) - continue; - - if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND || - link->rpm_active) + if (!(link->flags & DL_FLAG_PM_RUNTIME) || + READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND) continue; retval = pm_runtime_get_sync(link->supplier); @@ -281,7 +278,7 @@ static int rpm_get_suppliers(struct device *dev) pm_runtime_put_noidle(link->supplier); return retval; } - link->rpm_active = true; + refcount_inc(&link->rpm_active); } return 0; } @@ -290,12 +287,13 @@ static void rpm_put_suppliers(struct device *dev) { struct device_link *link; - list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) - if (link->rpm_active && - READ_ONCE(link->status) != DL_STATE_SUPPLIER_UNBIND) { + list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) { + if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND) + continue; + + while (refcount_dec_not_one(&link->rpm_active)) pm_runtime_put(link->supplier); - link->rpm_active = false; - } + } } /** @@ -1531,9 +1529,9 @@ void pm_runtime_remove(struct device *dev) * * Check links from this device to any consumers and if any of them have active * runtime PM references to the device, drop the usage counter of the device - * (once per link). + * (as many times as needed). * - * Links with the DL_FLAG_STATELESS flag set are ignored. + * Links with the DL_FLAG_MANAGED flag unset are ignored. * * Since the device is guaranteed to be runtime-active at the point this is * called, nothing else needs to be done here. @@ -1550,13 +1548,11 @@ void pm_runtime_clean_up_links(struct device *dev) idx = device_links_read_lock(); list_for_each_entry_rcu(link, &dev->links.consumers, s_node) { - if (link->flags & DL_FLAG_STATELESS) + if (!(link->flags & DL_FLAG_MANAGED)) continue; - if (link->rpm_active) { + while (refcount_dec_not_one(&link->rpm_active)) pm_runtime_put_noidle(dev); - link->rpm_active = false; - } } device_links_read_unlock(idx); @@ -1574,8 +1570,11 @@ void pm_runtime_get_suppliers(struct device *dev) idx = device_links_read_lock(); list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) - if (link->flags & DL_FLAG_PM_RUNTIME) + if (link->flags & DL_FLAG_PM_RUNTIME) { + link->supplier_preactivated = true; pm_runtime_get_sync(link->supplier); + refcount_inc(&link->rpm_active); + } device_links_read_unlock(idx); } @@ -1587,13 +1586,22 @@ void pm_runtime_get_suppliers(struct device *dev) void pm_runtime_put_suppliers(struct device *dev) { struct device_link *link; + unsigned long flags; + bool put; int idx; idx = device_links_read_lock(); list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) - if (link->flags & DL_FLAG_PM_RUNTIME) - pm_runtime_put(link->supplier); + if (link->supplier_preactivated) { + link->supplier_preactivated = false; + spin_lock_irqsave(&dev->power.lock, flags); + put = pm_runtime_status_suspended(dev) && + refcount_dec_not_one(&link->rpm_active); + spin_unlock_irqrestore(&dev->power.lock, flags); + if (put) + pm_runtime_put(link->supplier); + } device_links_read_unlock(idx); } @@ -1607,8 +1615,6 @@ void pm_runtime_new_link(struct device *dev) void pm_runtime_drop_link(struct device *dev) { - rpm_put_suppliers(dev); - spin_lock_irq(&dev->power.lock); WARN_ON(dev->power.links_count == 0); dev->power.links_count--; diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index d713738ce7967cdb8003166fc17bf63116d3b629..63375f80e241ef6c7aa4084ae458adca8b4cd6f7 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -101,7 +101,7 @@ static const char ctrl_on[] = "on"; static ssize_t control_show(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, "%s\n", + return sysfs_emit(buf, "%s\n", dev->power.runtime_auto ? ctrl_auto : ctrl_on); } @@ -127,7 +127,8 @@ static ssize_t runtime_active_time_show(struct device *dev, int ret; spin_lock_irq(&dev->power.lock); update_pm_runtime_accounting(dev); - ret = sprintf(buf, "%i\n", jiffies_to_msecs(dev->power.active_jiffies)); + ret = sysfs_emit(buf, "%i\n", + jiffies_to_msecs(dev->power.active_jiffies)); spin_unlock_irq(&dev->power.lock); return ret; } @@ -140,8 +141,8 @@ static ssize_t runtime_suspended_time_show(struct device *dev, int ret; spin_lock_irq(&dev->power.lock); update_pm_runtime_accounting(dev); - ret = sprintf(buf, "%i\n", - jiffies_to_msecs(dev->power.suspended_jiffies)); + ret = sysfs_emit(buf, "%i\n", + jiffies_to_msecs(dev->power.suspended_jiffies)); spin_unlock_irq(&dev->power.lock); return ret; } @@ -175,7 +176,7 @@ static ssize_t runtime_status_show(struct device *dev, return -EIO; } } - return sprintf(buf, p); + return sysfs_emit(buf, p); } static DEVICE_ATTR_RO(runtime_status); @@ -185,7 +186,7 @@ static ssize_t autosuspend_delay_ms_show(struct device *dev, { if (!dev->power.use_autosuspend) return -EIO; - return sprintf(buf, "%d\n", dev->power.autosuspend_delay); + return sysfs_emit(buf, "%d\n", dev->power.autosuspend_delay); } static ssize_t autosuspend_delay_ms_store(struct device *dev, @@ -214,11 +215,11 @@ static ssize_t pm_qos_resume_latency_us_show(struct device *dev, s32 value = dev_pm_qos_requested_resume_latency(dev); if (value == 0) - return sprintf(buf, "n/a\n"); + return sysfs_emit(buf, "n/a\n"); if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) value = 0; - return sprintf(buf, "%d\n", value); + return sysfs_emit(buf, "%d\n", value); } static ssize_t pm_qos_resume_latency_us_store(struct device *dev, @@ -258,11 +259,11 @@ static ssize_t pm_qos_latency_tolerance_us_show(struct device *dev, s32 value = dev_pm_qos_get_user_latency_tolerance(dev); if (value < 0) - return sprintf(buf, "auto\n"); + return sysfs_emit(buf, "%s\n", "auto"); if (value == PM_QOS_LATENCY_ANY) - return sprintf(buf, "any\n"); + return sysfs_emit(buf, "%s\n", "any"); - return sprintf(buf, "%d\n", value); + return sysfs_emit(buf, "%d\n", value); } static ssize_t pm_qos_latency_tolerance_us_store(struct device *dev, @@ -294,8 +295,8 @@ static ssize_t pm_qos_no_power_off_show(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev) - & PM_QOS_FLAG_NO_POWER_OFF)); + return sysfs_emit(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev) + & PM_QOS_FLAG_NO_POWER_OFF)); } static ssize_t pm_qos_no_power_off_store(struct device *dev, @@ -323,9 +324,9 @@ static const char _disabled[] = "disabled"; static ssize_t wakeup_show(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, "%s\n", device_can_wakeup(dev) - ? (device_may_wakeup(dev) ? _enabled : _disabled) - : ""); + return sysfs_emit(buf, "%s\n", device_can_wakeup(dev) + ? (device_may_wakeup(dev) ? _enabled : _disabled) + : ""); } static ssize_t wakeup_store(struct device *dev, struct device_attribute *attr, @@ -348,7 +349,7 @@ static DEVICE_ATTR_RW(wakeup); static ssize_t wakeup_count_show(struct device *dev, struct device_attribute *attr, char *buf) { - unsigned long count = 0; + unsigned long count; bool enabled = false; spin_lock_irq(&dev->power.lock); @@ -357,7 +358,10 @@ static ssize_t wakeup_count_show(struct device *dev, enabled = true; } spin_unlock_irq(&dev->power.lock); - return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); + + if (!enabled) + return sysfs_emit(buf, "\n"); + return sysfs_emit(buf, "%lu\n", count); } static DEVICE_ATTR_RO(wakeup_count); @@ -366,7 +370,7 @@ static ssize_t wakeup_active_count_show(struct device *dev, struct device_attribute *attr, char *buf) { - unsigned long count = 0; + unsigned long count; bool enabled = false; spin_lock_irq(&dev->power.lock); @@ -375,7 +379,10 @@ static ssize_t wakeup_active_count_show(struct device *dev, enabled = true; } spin_unlock_irq(&dev->power.lock); - return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); + + if (!enabled) + return sysfs_emit(buf, "\n"); + return sysfs_emit(buf, "%lu\n", count); } static DEVICE_ATTR_RO(wakeup_active_count); @@ -384,7 +391,7 @@ static ssize_t wakeup_abort_count_show(struct device *dev, struct device_attribute *attr, char *buf) { - unsigned long count = 0; + unsigned long count; bool enabled = false; spin_lock_irq(&dev->power.lock); @@ -393,7 +400,10 @@ static ssize_t wakeup_abort_count_show(struct device *dev, enabled = true; } spin_unlock_irq(&dev->power.lock); - return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); + + if (!enabled) + return sysfs_emit(buf, "\n"); + return sysfs_emit(buf, "%lu\n", count); } static DEVICE_ATTR_RO(wakeup_abort_count); @@ -402,7 +412,7 @@ static ssize_t wakeup_expire_count_show(struct device *dev, struct device_attribute *attr, char *buf) { - unsigned long count = 0; + unsigned long count; bool enabled = false; spin_lock_irq(&dev->power.lock); @@ -411,7 +421,10 @@ static ssize_t wakeup_expire_count_show(struct device *dev, enabled = true; } spin_unlock_irq(&dev->power.lock); - return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); + + if (!enabled) + return sysfs_emit(buf, "\n"); + return sysfs_emit(buf, "%lu\n", count); } static DEVICE_ATTR_RO(wakeup_expire_count); @@ -419,7 +432,7 @@ static DEVICE_ATTR_RO(wakeup_expire_count); static ssize_t wakeup_active_show(struct device *dev, struct device_attribute *attr, char *buf) { - unsigned int active = 0; + unsigned int active; bool enabled = false; spin_lock_irq(&dev->power.lock); @@ -428,7 +441,10 @@ static ssize_t wakeup_active_show(struct device *dev, enabled = true; } spin_unlock_irq(&dev->power.lock); - return enabled ? sprintf(buf, "%u\n", active) : sprintf(buf, "\n"); + + if (!enabled) + return sysfs_emit(buf, "\n"); + return sysfs_emit(buf, "%u\n", active); } static DEVICE_ATTR_RO(wakeup_active); @@ -437,7 +453,7 @@ static ssize_t wakeup_total_time_ms_show(struct device *dev, struct device_attribute *attr, char *buf) { - s64 msec = 0; + s64 msec; bool enabled = false; spin_lock_irq(&dev->power.lock); @@ -446,7 +462,10 @@ static ssize_t wakeup_total_time_ms_show(struct device *dev, enabled = true; } spin_unlock_irq(&dev->power.lock); - return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); + + if (!enabled) + return sysfs_emit(buf, "\n"); + return sysfs_emit(buf, "%lld\n", msec); } static DEVICE_ATTR_RO(wakeup_total_time_ms); @@ -454,7 +473,7 @@ static DEVICE_ATTR_RO(wakeup_total_time_ms); static ssize_t wakeup_max_time_ms_show(struct device *dev, struct device_attribute *attr, char *buf) { - s64 msec = 0; + s64 msec; bool enabled = false; spin_lock_irq(&dev->power.lock); @@ -463,7 +482,10 @@ static ssize_t wakeup_max_time_ms_show(struct device *dev, enabled = true; } spin_unlock_irq(&dev->power.lock); - return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); + + if (!enabled) + return sysfs_emit(buf, "\n"); + return sysfs_emit(buf, "%lld\n", msec); } static DEVICE_ATTR_RO(wakeup_max_time_ms); @@ -472,7 +494,7 @@ static ssize_t wakeup_last_time_ms_show(struct device *dev, struct device_attribute *attr, char *buf) { - s64 msec = 0; + s64 msec; bool enabled = false; spin_lock_irq(&dev->power.lock); @@ -481,7 +503,10 @@ static ssize_t wakeup_last_time_ms_show(struct device *dev, enabled = true; } spin_unlock_irq(&dev->power.lock); - return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); + + if (!enabled) + return sysfs_emit(buf, "\n"); + return sysfs_emit(buf, "%lld\n", msec); } static DEVICE_ATTR_RO(wakeup_last_time_ms); @@ -491,7 +516,7 @@ static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev, struct device_attribute *attr, char *buf) { - s64 msec = 0; + s64 msec; bool enabled = false; spin_lock_irq(&dev->power.lock); @@ -500,7 +525,10 @@ static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev, enabled = true; } spin_unlock_irq(&dev->power.lock); - return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); + + if (!enabled) + return sysfs_emit(buf, "\n"); + return sysfs_emit(buf, "%lld\n", msec); } static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms); @@ -511,7 +539,7 @@ static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms); static ssize_t runtime_usage_show(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, "%d\n", atomic_read(&dev->power.usage_count)); + return sysfs_emit(buf, "%d\n", atomic_read(&dev->power.usage_count)); } static DEVICE_ATTR_RO(runtime_usage); @@ -519,21 +547,26 @@ static ssize_t runtime_active_kids_show(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, "%d\n", dev->power.ignore_children ? - 0 : atomic_read(&dev->power.child_count)); + return sysfs_emit(buf, "%d\n", dev->power.ignore_children ? + 0 : atomic_read(&dev->power.child_count)); } static DEVICE_ATTR_RO(runtime_active_kids); static ssize_t runtime_enabled_show(struct device *dev, struct device_attribute *attr, char *buf) { - if (dev->power.disable_depth && (dev->power.runtime_auto == false)) - return sprintf(buf, "disabled & forbidden\n"); - if (dev->power.disable_depth) - return sprintf(buf, "disabled\n"); - if (dev->power.runtime_auto == false) - return sprintf(buf, "forbidden\n"); - return sprintf(buf, "enabled\n"); + const char *output; + + if (dev->power.disable_depth && !dev->power.runtime_auto) + output = "disabled & forbidden"; + else if (dev->power.disable_depth) + output = "disabled"; + else if (!dev->power.runtime_auto) + output = "forbidden"; + else + output = "enabled"; + + return sysfs_emit(buf, "%s\n", output); } static DEVICE_ATTR_RO(runtime_enabled); @@ -541,9 +574,9 @@ static DEVICE_ATTR_RO(runtime_enabled); static ssize_t async_show(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, "%s\n", - device_async_suspend_enabled(dev) ? - _enabled : _disabled); + return sysfs_emit(buf, "%s\n", + device_async_suspend_enabled(dev) ? + _enabled : _disabled); } static ssize_t async_store(struct device *dev, struct device_attribute *attr, diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 5fa1898755a3487862ea76d2a9e39dc03830ef7d..2dfa2e04874511a1c741c81d77b508b9f1fb9399 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -118,7 +118,6 @@ void wakeup_source_drop(struct wakeup_source *ws) if (!ws) return; - del_timer_sync(&ws->timer); __pm_relax(ws); } EXPORT_SYMBOL_GPL(wakeup_source_drop); @@ -205,6 +204,13 @@ void wakeup_source_remove(struct wakeup_source *ws) list_del_rcu(&ws->entry); raw_spin_unlock_irqrestore(&events_lock, flags); synchronize_srcu(&wakeup_srcu); + + del_timer_sync(&ws->timer); + /* + * Clear timer.function to make wakeup_source_not_registered() treat + * this wakeup source as not registered. + */ + ws->timer.function = NULL; } EXPORT_SYMBOL_GPL(wakeup_source_remove); @@ -869,7 +875,7 @@ EXPORT_SYMBOL_GPL(pm_system_wakeup); void pm_system_cancel_wakeup(void) { - atomic_dec(&pm_abort_suspend); + atomic_dec_if_positive(&pm_abort_suspend); } void pm_wakeup_clear(bool reset) diff --git a/drivers/base/property.c b/drivers/base/property.c index 240ab5230ff6bad521c6d9d1ce99c5d6cbc6ca53..e5a58614f8e094f37b60249c8a6b2ec9b64020b2 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -1341,7 +1341,7 @@ int fwnode_irq_get(struct fwnode_handle *fwnode, unsigned int index) EXPORT_SYMBOL(fwnode_irq_get); /** - * device_graph_get_next_endpoint - Get next endpoint firmware node + * fwnode_graph_get_next_endpoint - Get next endpoint firmware node * @fwnode: Pointer to the parent firmware node * @prev: Previous endpoint node or %NULL to get the first * diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig index 6ad5ef48b61eee488e81ec4364b9097f31973a1f..8cd2ac650b50551bf716b8d9fd699bbec6d5b42f 100644 --- a/drivers/base/regmap/Kconfig +++ b/drivers/base/regmap/Kconfig @@ -44,7 +44,7 @@ config REGMAP_IRQ config REGMAP_SOUNDWIRE tristate - depends on SOUNDWIRE_BUS + depends on SOUNDWIRE config REGMAP_SCCB tristate diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index b1e9aae9a5d0be6e035c39472e8e3cdbbdc3abfb..b6f8f4059e255407d40a54a1d534835e4e8225ea 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c @@ -291,18 +291,18 @@ static int regcache_rbtree_insert_to_block(struct regmap *map, blk = krealloc(rbnode->block, blklen * map->cache_word_size, - GFP_KERNEL); + map->alloc_flags); if (!blk) return -ENOMEM; + rbnode->block = blk; + if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) { present = krealloc(rbnode->cache_present, BITS_TO_LONGS(blklen) * sizeof(*present), - GFP_KERNEL); - if (!present) { - kfree(blk); + map->alloc_flags); + if (!present) return -ENOMEM; - } memset(present + BITS_TO_LONGS(rbnode->blklen), 0, (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen)) @@ -319,7 +319,6 @@ static int regcache_rbtree_insert_to_block(struct regmap *map, } /* update the rbnode block, its size and the base register */ - rbnode->block = blk; rbnode->blklen = blklen; rbnode->base_reg = base_reg; rbnode->cache_present = present; @@ -335,7 +334,7 @@ regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg) const struct regmap_range *range; int i; - rbnode = kzalloc(sizeof(*rbnode), GFP_KERNEL); + rbnode = kzalloc(sizeof(*rbnode), map->alloc_flags); if (!rbnode) return NULL; @@ -361,13 +360,13 @@ regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg) } rbnode->block = kmalloc_array(rbnode->blklen, map->cache_word_size, - GFP_KERNEL); + map->alloc_flags); if (!rbnode->block) goto err_free; rbnode->cache_present = kcalloc(BITS_TO_LONGS(rbnode->blklen), sizeof(*rbnode->cache_present), - GFP_KERNEL); + map->alloc_flags); if (!rbnode->cache_present) goto err_free_block; @@ -468,7 +467,8 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg, if (!rbnode) return -ENOMEM; regcache_rbtree_set_register(map, rbnode, - reg - rbnode->base_reg, value); + (reg - rbnode->base_reg) / map->reg_stride, + value); regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode); rbtree_ctx->cached_rbnode = rbnode; } diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index 87b562e49a435ffedac0f71b4652b669611e9a0e..c9687c8b23478138fe5824be4a7bf1737a384d88 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -575,6 +575,8 @@ void regmap_debugfs_init(struct regmap *map, const char *name) } if (!strcmp(name, "dummy")) { + kfree(map->debugfs_name); + map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d", dummy_index); name = map->debugfs_name; diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 429ca8ed7e518087bc1ddaebd9f4c8393eb5179e..982c7ac311b8524eb2b5bbda1275d4a3c81aab50 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -91,6 +91,9 @@ static void regmap_irq_sync_unlock(struct irq_data *data) * suppress pointless writes. */ for (i = 0; i < d->chip->num_regs; i++) { + if (!d->chip->mask_base) + continue; + reg = d->chip->mask_base + (i * map->reg_stride * d->irq_reg_stride); if (d->chip->mask_invert) { @@ -526,6 +529,9 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, /* Mask all the interrupts by default */ for (i = 0; i < chip->num_regs; i++) { d->mask_buf[i] = d->mask_buf_def[i]; + if (!chip->mask_base) + continue; + reg = chip->mask_base + (i * map->reg_stride * d->irq_reg_stride); if (chip->mask_invert) diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 0360a90ad6b623530f0053dbc3b3d29748266aae..3315bd1ddf306e6175e099cb85cebdd77dc00368 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -1350,7 +1350,7 @@ static int dev_get_regmap_match(struct device *dev, void *res, void *data) /* If the user didn't specify a name match any */ if (data) - return (*r)->name == data; + return (*r)->name && !strcmp((*r)->name, data); else return 1; } @@ -1618,6 +1618,8 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, map->format.reg_bytes + map->format.pad_bytes, val, val_len); + else + ret = -ENOTSUPP; /* If that didn't work fall back on linearising by hand. */ if (ret == -ENOTSUPP) { diff --git a/drivers/base/soc.c b/drivers/base/soc.c index 10b280f30217bcc6ec72cea2d7ca9612fc299293..55f196e19581bb9902a258a52cb5b6f70e1aad2f 100644 --- a/drivers/base/soc.c +++ b/drivers/base/soc.c @@ -17,9 +17,9 @@ static DEFINE_IDA(soc_ida); -static ssize_t soc_info_get(struct device *dev, - struct device_attribute *attr, - char *buf); +/* Prototype to allow declarations of DEVICE_ATTR() before soc_info_show */ +static ssize_t soc_info_show(struct device *dev, struct device_attribute *attr, + char *buf); struct soc_device { struct device dev; @@ -31,10 +31,10 @@ static struct bus_type soc_bus_type = { .name = "soc", }; -static DEVICE_ATTR(machine, S_IRUGO, soc_info_get, NULL); -static DEVICE_ATTR(family, S_IRUGO, soc_info_get, NULL); -static DEVICE_ATTR(soc_id, S_IRUGO, soc_info_get, NULL); -static DEVICE_ATTR(revision, S_IRUGO, soc_info_get, NULL); +static DEVICE_ATTR(machine, 0444, soc_info_show, NULL); +static DEVICE_ATTR(family, 0444, soc_info_show, NULL); +static DEVICE_ATTR(soc_id, 0444, soc_info_show, NULL); +static DEVICE_ATTR(revision, 0444, soc_info_show, NULL); struct device *soc_device_to_device(struct soc_device *soc_dev) { @@ -65,20 +65,20 @@ static umode_t soc_attribute_mode(struct kobject *kobj, return 0; } -static ssize_t soc_info_get(struct device *dev, +static ssize_t soc_info_show(struct device *dev, struct device_attribute *attr, char *buf) { struct soc_device *soc_dev = container_of(dev, struct soc_device, dev); if (attr == &dev_attr_machine) - return sprintf(buf, "%s\n", soc_dev->attr->machine); + return sysfs_emit(buf, "%s\n", soc_dev->attr->machine); if (attr == &dev_attr_family) - return sprintf(buf, "%s\n", soc_dev->attr->family); + return sysfs_emit(buf, "%s\n", soc_dev->attr->family); if (attr == &dev_attr_revision) - return sprintf(buf, "%s\n", soc_dev->attr->revision); + return sysfs_emit(buf, "%s\n", soc_dev->attr->revision); if (attr == &dev_attr_soc_id) - return sprintf(buf, "%s\n", soc_dev->attr->soc_id); + return sysfs_emit(buf, "%s\n", soc_dev->attr->soc_id); return -EINVAL; @@ -157,6 +157,7 @@ struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr out1: return ERR_PTR(ret); } +EXPORT_SYMBOL_GPL(soc_device_register); /* Ensure soc_dev->attr is freed prior to calling soc_device_unregister. */ void soc_device_unregister(struct soc_device *soc_dev) @@ -166,6 +167,7 @@ void soc_device_unregister(struct soc_device *soc_dev) device_unregister(&soc_dev->dev); early_soc_dev_attr = NULL; } +EXPORT_SYMBOL_GPL(soc_device_unregister); static int __init soc_bus_register(void) { diff --git a/drivers/base/topology.c b/drivers/base/topology.c index 5fd9f167ecc10e68a53c29b0a0046206a5003879..da74231de498e5c938484d273e820820c775d339 100644 --- a/drivers/base/topology.c +++ b/drivers/base/topology.c @@ -14,11 +14,11 @@ #include #include -#define define_id_show_func(name) \ -static ssize_t name##_show(struct device *dev, \ - struct device_attribute *attr, char *buf) \ -{ \ - return sprintf(buf, "%d\n", topology_##name(dev->id)); \ +#define define_id_show_func(name) \ +static ssize_t name##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + return sysfs_emit(buf, "%d\n", topology_##name(dev->id)); \ } #define define_siblings_show_map(name, mask) \ @@ -43,6 +43,9 @@ static ssize_t name##_list_show(struct device *dev, \ define_id_show_func(physical_package_id); static DEVICE_ATTR_RO(physical_package_id); +define_id_show_func(die_id); +static DEVICE_ATTR_RO(die_id); + define_id_show_func(core_id); static DEVICE_ATTR_RO(core_id); @@ -72,6 +75,7 @@ static DEVICE_ATTR_RO(drawer_siblings_list); static struct attribute *default_attrs[] = { &dev_attr_physical_package_id.attr, + &dev_attr_die_id.attr, &dev_attr_core_id.attr, &dev_attr_thread_siblings.attr, &dev_attr_thread_siblings_list.attr, diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index d4913516823f141663fac804f54d0f3c7cfec974..82f32f76ad19dad5eb5ea08c7f044f65bb3ca951 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -39,6 +39,22 @@ config BLK_DEV_FD To compile this driver as a module, choose M here: the module will be called floppy. +config BLK_DEV_FD_RAWCMD + bool "Support for raw floppy disk commands (DEPRECATED)" + depends on BLK_DEV_FD + help + If you want to use actual physical floppies and expect to do + special low-level hardware accesses to them (access and use + non-standard formats, for example), then enable this. + + Note that the code enabled by this option is rarely used and + might be unstable or insecure, and distros should not enable it. + + Note: FDRAWCMD is deprecated and will be removed from the kernel + in the near future. + + If unsure, say N. + config AMIGA_FLOPPY tristate "Amiga floppy support" depends on AMIGA diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index 3aaf6af3ec23d7d54d5f45deb97ede64bec7d3db..2158e130744e00d8584421020eab88b7fa26cc65 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c @@ -1701,11 +1701,41 @@ static const struct block_device_operations floppy_fops = { .check_events = amiga_check_events, }; +static struct gendisk *fd_alloc_disk(int drive) +{ + struct gendisk *disk; + + disk = alloc_disk(1); + if (!disk) + goto out; + + disk->queue = blk_init_queue(do_fd_request, &amiflop_lock); + if (IS_ERR(disk->queue)) { + disk->queue = NULL; + goto out_put_disk; + } + + unit[drive].trackbuf = kmalloc(FLOPPY_MAX_SECTORS * 512, GFP_KERNEL); + if (!unit[drive].trackbuf) + goto out_cleanup_queue; + + return disk; + +out_cleanup_queue: + blk_cleanup_queue(disk->queue); + disk->queue = NULL; +out_put_disk: + put_disk(disk); +out: + unit[drive].type->code = FD_NODRIVE; + return NULL; +} + static int __init fd_probe_drives(void) { int drive,drives,nomem; - printk(KERN_INFO "FD: probing units\nfound "); + pr_info("FD: probing units\nfound"); drives=0; nomem=0; for(drive=0;drivecode == FD_NODRIVE) continue; - disk = alloc_disk(1); + + disk = fd_alloc_disk(drive); if (!disk) { - unit[drive].type->code = FD_NODRIVE; + pr_cont(" no mem for fd%d", drive); + nomem = 1; continue; } unit[drive].gendisk = disk; - - disk->queue = blk_init_queue(do_fd_request, &amiflop_lock); - if (!disk->queue) { - unit[drive].type->code = FD_NODRIVE; - continue; - } - drives++; - if ((unit[drive].trackbuf = kmalloc(FLOPPY_MAX_SECTORS * 512, GFP_KERNEL)) == NULL) { - printk("no mem for "); - unit[drive].type = &drive_types[num_dr_types - 1]; /* FD_NODRIVE */ - drives--; - nomem = 1; - } - printk("fd%d ",drive); + + pr_cont(" fd%d",drive); disk->major = FLOPPY_MAJOR; disk->first_minor = drive; disk->fops = &floppy_fops; @@ -1744,11 +1764,11 @@ static int __init fd_probe_drives(void) } if ((drives > 0) || (nomem == 0)) { if (drives == 0) - printk("no drives"); - printk("\n"); + pr_cont(" no drives"); + pr_cont("\n"); return drives; } - printk("\n"); + pr_cont("\n"); return -ENOMEM; } @@ -1831,30 +1851,6 @@ static int __init amiga_floppy_probe(struct platform_device *pdev) return ret; } -#if 0 /* not safe to unload */ -static int __exit amiga_floppy_remove(struct platform_device *pdev) -{ - int i; - - for( i = 0; i < FD_MAX_UNITS; i++) { - if (unit[i].type->code != FD_NODRIVE) { - struct request_queue *q = unit[i].gendisk->queue; - del_gendisk(unit[i].gendisk); - put_disk(unit[i].gendisk); - kfree(unit[i].trackbuf); - if (q) - blk_cleanup_queue(q); - } - } - blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); - free_irq(IRQ_AMIGA_CIAA_TB, NULL); - free_irq(IRQ_AMIGA_DSKBLK, NULL); - custom.dmacon = DMAF_DISK; /* disable DMA */ - amiga_chip_free(raw_buf); - unregister_blkdev(FLOPPY_MAJOR, "fd"); -} -#endif - static struct platform_driver amiga_floppy_driver = { .driver = { .name = "amiga-floppy", diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 136dc507d0206dd0f097774f664c00b434d30bbe..c2b32c53da2bb750f4837bbfe390516e025e5a5c 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -420,13 +420,16 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *qu rcu_read_lock(); for_each_netdev_rcu(&init_net, ifp) { dev_hold(ifp); - if (!is_aoe_netif(ifp)) - goto cont; + if (!is_aoe_netif(ifp)) { + dev_put(ifp); + continue; + } skb = new_skb(sizeof *h + sizeof *ch); if (skb == NULL) { printk(KERN_INFO "aoe: skb alloc failure\n"); - goto cont; + dev_put(ifp); + continue; } skb_put(skb, sizeof *h + sizeof *ch); skb->dev = ifp; @@ -441,9 +444,6 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *qu h->major = cpu_to_be16(aoemajor); h->minor = aoeminor; h->cmd = AOECMD_CFG; - -cont: - dev_put(ifp); } rcu_read_unlock(); } diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c index 63773a90581dd36818051a114e38b5b60af0a874..1e66c7a188a1217d7a7205230e2f9557f6ddcf31 100644 --- a/drivers/block/aoe/aoenet.c +++ b/drivers/block/aoe/aoenet.c @@ -64,6 +64,7 @@ tx(int id) __must_hold(&txlock) pr_warn("aoe: packet could not be sent on %s. %s\n", ifp ? ifp->name : "netif", "consider increasing tx_queue_len"); + dev_put(ifp); spin_lock_irq(&txlock); } return 0; diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index dfb2c2622e5a64d77e85ca9d14059c25f1840878..822e3060d834820c40d98eeca1e92ef751c68fbd 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c @@ -1935,6 +1935,11 @@ static int __init atari_floppy_init (void) unit[i].disk = alloc_disk(1); if (!unit[i].disk) goto Enomem; + + unit[i].disk->queue = blk_init_queue(do_fd_request, + &ataflop_lock); + if (!unit[i].disk->queue) + goto Enomem; } if (UseTrackbuffer < 0) @@ -1966,10 +1971,6 @@ static int __init atari_floppy_init (void) sprintf(unit[i].disk->disk_name, "fd%d", i); unit[i].disk->fops = &floppy_fops; unit[i].disk->private_data = &unit[i]; - unit[i].disk->queue = blk_init_queue(do_fd_request, - &ataflop_lock); - if (!unit[i].disk->queue) - goto Enomem; set_capacity(unit[i].disk, MAX_DISK_SIZE * 2); add_disk(unit[i].disk); } @@ -1984,13 +1985,17 @@ static int __init atari_floppy_init (void) return 0; Enomem: - while (i--) { - struct request_queue *q = unit[i].disk->queue; + do { + struct gendisk *disk = unit[i].disk; - put_disk(unit[i].disk); - if (q) - blk_cleanup_queue(q); - } + if (disk) { + if (disk->queue) { + blk_cleanup_queue(disk->queue); + disk->queue = NULL; + } + put_disk(unit[i].disk); + } + } while (i--); unregister_blkdev(FLOPPY_MAJOR, "fd"); return -ENOMEM; diff --git a/drivers/block/brd.c b/drivers/block/brd.c index df8103dd40ac2d1f6e5c29ef7790160a3335b35b..fc1abcf760ad022a95d986fdf7e42fb8dd59f734 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -96,13 +96,8 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) /* * Must use NOIO because we don't want to recurse back into the * block or filesystem layers from page reclaim. - * - * Cannot support DAX and highmem, because our ->direct_access - * routine for DAX must return memory that is always addressable. - * If DAX was reworked to use pfns and kmap throughout, this - * restriction might be able to be lifted. */ - gfp_flags = GFP_NOIO | __GFP_ZERO; + gfp_flags = GFP_NOIO | __GFP_ZERO | __GFP_HIGHMEM; page = alloc_page(gfp_flags); if (!page) return NULL; @@ -396,15 +391,14 @@ static struct brd_device *brd_alloc(int i) disk->first_minor = i * max_part; disk->fops = &brd_fops; disk->private_data = brd; - disk->queue = brd->brd_queue; disk->flags = GENHD_FL_EXT_DEVT; sprintf(disk->disk_name, "ram%d", i); set_capacity(disk, rd_size * 2); - disk->queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO; + brd->brd_queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO; /* Tell the block layer that this is not a rotational device */ - blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); - blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue); + blk_queue_flag_set(QUEUE_FLAG_NONROT, brd->brd_queue); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, brd->brd_queue); return brd; @@ -424,24 +418,20 @@ static void brd_free(struct brd_device *brd) kfree(brd); } -static struct brd_device *brd_init_one(int i, bool *new) +static void brd_init_one(int i) { struct brd_device *brd; - *new = false; - list_for_each_entry(brd, &brd_devices, brd_list) { + list_for_each_entry(brd, &brd_devices, brd_list) if (brd->brd_number == i) - goto out; - } + return; brd = brd_alloc(i); if (brd) { + brd->brd_disk->queue = brd->brd_queue; add_disk(brd->brd_disk); list_add_tail(&brd->brd_list, &brd_devices); } - *new = true; -out: - return brd; } static void brd_del_one(struct brd_device *brd) @@ -453,19 +443,30 @@ static void brd_del_one(struct brd_device *brd) static struct kobject *brd_probe(dev_t dev, int *part, void *data) { - struct brd_device *brd; - struct kobject *kobj; - bool new; - mutex_lock(&brd_devices_mutex); - brd = brd_init_one(MINOR(dev) / max_part, &new); - kobj = brd ? get_disk_and_module(brd->brd_disk) : NULL; + brd_init_one(MINOR(dev) / max_part); mutex_unlock(&brd_devices_mutex); - if (new) - *part = 0; + return NULL; +} + +static inline void brd_check_and_reset_par(void) +{ + if (unlikely(!max_part)) + max_part = 1; + + /* + * make sure 'max_part' can be divided exactly by (1U << MINORBITS), + * otherwise, it is possiable to get same dev_t when adding partitions. + */ + if ((1U << MINORBITS) % max_part != 0) + max_part = 1UL << fls(max_part); - return kobj; + if (max_part > DISK_MAX_PARTS) { + pr_info("brd: max_part can't be larger than %d, reset max_part = %d.\n", + DISK_MAX_PARTS, DISK_MAX_PARTS); + max_part = DISK_MAX_PARTS; + } } static int __init brd_init(void) @@ -491,8 +492,7 @@ static int __init brd_init(void) if (register_blkdev(RAMDISK_MAJOR, "ramdisk")) return -EIO; - if (unlikely(!max_part)) - max_part = 1; + brd_check_and_reset_par(); for (i = 0; i < rd_nr; i++) { brd = brd_alloc(i); @@ -503,8 +503,14 @@ static int __init brd_init(void) /* point of no return */ - list_for_each_entry(brd, &brd_devices, brd_list) + list_for_each_entry(brd, &brd_devices, brd_list) { + /* + * associate with queue just before adding disk for + * avoiding to mess up failure path + */ + brd->brd_disk->queue = brd->brd_queue; add_disk(brd->brd_disk); + } blk_register_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS, THIS_MODULE, brd_probe, NULL, NULL); diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index ef8212a4b73ef5cf2a659ac62170d6efaec291f0..5e3885f5729b045fec3e3e8fe1e7237191dafd5b 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -334,6 +334,8 @@ static int drbd_thread_setup(void *arg) thi->name[0], resource->name); + allow_kernel_signal(DRBD_SIGKILL); + allow_kernel_signal(SIGXCPU); restart: retval = thi->function(thi); @@ -796,7 +798,6 @@ int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cm if (nc->tentative && connection->agreed_pro_version < 92) { rcu_read_unlock(); - mutex_unlock(&sock->mutex); drbd_err(connection, "--dry-run is not supported by peer"); return -EOPNOTSUPP; } diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index b4f02768ba475c13620582e44332b32ac9a65b48..5b15ffd0c7f577112ecf5d0a2a2a13055505c682 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -668,14 +668,15 @@ drbd_set_role(struct drbd_device *const device, enum drbd_role new_role, int for if (rv == SS_TWO_PRIMARIES) { /* Maybe the peer is detected as dead very soon... retry at most once more in this case. */ - int timeo; - rcu_read_lock(); - nc = rcu_dereference(connection->net_conf); - timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1; - rcu_read_unlock(); - schedule_timeout_interruptible(timeo); - if (try < max_tries) + if (try < max_tries) { + int timeo; try = max_tries - 1; + rcu_read_lock(); + nc = rcu_dereference(connection->net_conf); + timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1; + rcu_read_unlock(); + schedule_timeout_interruptible(timeo); + } continue; } if (rv < SS_SUCCESS) { @@ -1514,6 +1515,30 @@ static void sanitize_disk_conf(struct drbd_device *device, struct disk_conf *dis } } +static int disk_opts_check_al_size(struct drbd_device *device, struct disk_conf *dc) +{ + int err = -EBUSY; + + if (device->act_log && + device->act_log->nr_elements == dc->al_extents) + return 0; + + drbd_suspend_io(device); + /* If IO completion is currently blocked, we would likely wait + * "forever" for the activity log to become unused. So we don't. */ + if (atomic_read(&device->ap_bio_cnt)) + goto out; + + wait_event(device->al_wait, lc_try_lock(device->act_log)); + drbd_al_shrink(device); + err = drbd_check_al_size(device, dc); + lc_unlock(device->act_log); + wake_up(&device->al_wait); +out: + drbd_resume_io(device); + return err; +} + int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info) { struct drbd_config_context adm_ctx; @@ -1576,15 +1601,12 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info) } } - drbd_suspend_io(device); - wait_event(device->al_wait, lc_try_lock(device->act_log)); - drbd_al_shrink(device); - err = drbd_check_al_size(device, new_disk_conf); - lc_unlock(device->act_log); - wake_up(&device->al_wait); - drbd_resume_io(device); - + err = disk_opts_check_al_size(device, new_disk_conf); if (err) { + /* Could be just "busy". Ignore? + * Introduce dedicated error code? */ + drbd_msg_put_info(adm_ctx.reply_skb, + "Try again without changing current al-extents setting"); retcode = ERR_NOMEM; goto fail_unlock; } @@ -1934,9 +1956,9 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) } } - if (device->state.conn < C_CONNECTED && - device->state.role == R_PRIMARY && device->ed_uuid && - (device->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) { + if (device->state.pdsk != D_UP_TO_DATE && device->ed_uuid && + (device->state.role == R_PRIMARY || device->state.peer == R_PRIMARY) && + (device->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) { drbd_err(device, "Can only attach to data with current UUID=%016llX\n", (unsigned long long)device->ed_uuid); retcode = ERR_DATA_NOT_CURRENT; diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 75f6b47169e65ac8141ec507a974fb3622d1b562..cbb6ef719978f8f44617895de6a2c0af495903b2 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -3364,7 +3364,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device, enum drbd_conns rv = C_MASK; enum drbd_disk_state mydisk; struct net_conf *nc; - int hg, rule_nr, rr_conflict, tentative; + int hg, rule_nr, rr_conflict, tentative, always_asbp; mydisk = device->state.disk; if (mydisk == D_NEGOTIATING) @@ -3415,8 +3415,12 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device, rcu_read_lock(); nc = rcu_dereference(peer_device->connection->net_conf); + always_asbp = nc->always_asbp; + rr_conflict = nc->rr_conflict; + tentative = nc->tentative; + rcu_read_unlock(); - if (hg == 100 || (hg == -100 && nc->always_asbp)) { + if (hg == 100 || (hg == -100 && always_asbp)) { int pcount = (device->state.role == R_PRIMARY) + (peer_role == R_PRIMARY); int forced = (hg == -100); @@ -3455,9 +3459,6 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device, "Sync from %s node\n", (hg < 0) ? "peer" : "this"); } - rr_conflict = nc->rr_conflict; - tentative = nc->tentative; - rcu_read_unlock(); if (hg == -100) { /* FIXME this log message is not correct if we end up here @@ -3979,6 +3980,7 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info struct o_qlim *o = (connection->agreed_features & DRBD_FF_WSAME) ? p->qlim : NULL; enum determine_dev_size dd = DS_UNCHANGED; sector_t p_size, p_usize, p_csize, my_usize; + sector_t new_size, cur_size; int ldsc = 0; /* local disk size changed */ enum dds_flags ddsf; @@ -3986,6 +3988,7 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info if (!peer_device) return config_unknown_volume(connection, pi); device = peer_device->device; + cur_size = drbd_get_capacity(device->this_bdev); p_size = be64_to_cpu(p->d_size); p_usize = be64_to_cpu(p->u_size); @@ -3996,7 +3999,6 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info device->p_size = p_size; if (get_ldev(device)) { - sector_t new_size, cur_size; rcu_read_lock(); my_usize = rcu_dereference(device->ldev->disk_conf)->disk_size; rcu_read_unlock(); @@ -4014,7 +4016,6 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info /* Never shrink a device with usable data during connect. But allow online shrinking if we are connected. */ new_size = drbd_new_dev_size(device, device->ldev, p_usize, 0); - cur_size = drbd_get_capacity(device->this_bdev); if (new_size < cur_size && device->state.disk >= D_OUTDATED && device->state.conn < C_CONNECTED) { @@ -4079,9 +4080,36 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info * * However, if he sends a zero current size, * take his (user-capped or) backing disk size anyways. + * + * Unless of course he does not have a disk himself. + * In which case we ignore this completely. */ + sector_t new_size = p_csize ?: p_usize ?: p_size; drbd_reconsider_queue_parameters(device, NULL, o); - drbd_set_my_capacity(device, p_csize ?: p_usize ?: p_size); + if (new_size == 0) { + /* Ignore, peer does not know nothing. */ + } else if (new_size == cur_size) { + /* nothing to do */ + } else if (cur_size != 0 && p_size == 0) { + drbd_warn(device, "Ignored diskless peer device size (peer:%llu != me:%llu sectors)!\n", + (unsigned long long)new_size, (unsigned long long)cur_size); + } else if (new_size < cur_size && device->state.role == R_PRIMARY) { + drbd_err(device, "The peer's device size is too small! (%llu < %llu sectors); demote me first!\n", + (unsigned long long)new_size, (unsigned long long)cur_size); + conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD); + return -EIO; + } else { + /* I believe the peer, if + * - I don't have a current size myself + * - we agree on the size anyways + * - I do have a current size, am Secondary, + * and he has the only disk + * - I do have a current size, am Primary, + * and he has the only disk, + * which is larger than my current size + */ + drbd_set_my_capacity(device, new_size); + } } if (get_ldev(device)) { @@ -4141,7 +4169,7 @@ static int receive_uuids(struct drbd_connection *connection, struct packet_info kfree(device->p_uuid); device->p_uuid = p_uuid; - if (device->state.conn < C_CONNECTED && + if ((device->state.conn < C_CONNECTED || device->state.pdsk == D_DISKLESS) && device->state.disk < D_INCONSISTENT && device->state.role == R_PRIMARY && (device->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) { @@ -4367,6 +4395,25 @@ static int receive_state(struct drbd_connection *connection, struct packet_info if (peer_state.conn == C_AHEAD) ns.conn = C_BEHIND; + /* TODO: + * if (primary and diskless and peer uuid != effective uuid) + * abort attach on peer; + * + * If this node does not have good data, was already connected, but + * the peer did a late attach only now, trying to "negotiate" with me, + * AND I am currently Primary, possibly frozen, with some specific + * "effective" uuid, this should never be reached, really, because + * we first send the uuids, then the current state. + * + * In this scenario, we already dropped the connection hard + * when we received the unsuitable uuids (receive_uuids(). + * + * Should we want to change this, that is: not drop the connection in + * receive_uuids() already, then we would need to add a branch here + * that aborts the attach of "unsuitable uuids" on the peer in case + * this node is currently Diskless Primary. + */ + if (device->p_uuid && peer_state.disk >= D_NEGOTIATING && get_ldev_if_state(device, D_NEGOTIATING)) { int cr; /* consider resync */ @@ -5239,7 +5286,7 @@ static int drbd_do_auth(struct drbd_connection *connection) unsigned int key_len; char secret[SHARED_SECRET_MAX]; /* 64 byte */ unsigned int resp_size; - SHASH_DESC_ON_STACK(desc, connection->cram_hmac_tfm); + struct shash_desc *desc; struct packet_info pi; struct net_conf *nc; int err, rv; @@ -5252,6 +5299,13 @@ static int drbd_do_auth(struct drbd_connection *connection) memcpy(secret, nc->shared_secret, key_len); rcu_read_unlock(); + desc = kmalloc(sizeof(struct shash_desc) + + crypto_shash_descsize(connection->cram_hmac_tfm), + GFP_KERNEL); + if (!desc) { + rv = -1; + goto fail; + } desc->tfm = connection->cram_hmac_tfm; desc->flags = 0; @@ -5394,7 +5448,10 @@ static int drbd_do_auth(struct drbd_connection *connection) kfree(peers_ch); kfree(response); kfree(right_response); - shash_desc_zero(desc); + if (desc) { + shash_desc_zero(desc); + kfree(desc); + } return rv; } diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c index 0813c654c89387e36e0174a453028e8c5b75f26b..b452359b6aae84f40e5602d16ae51859a42338c0 100644 --- a/drivers/block/drbd/drbd_state.c +++ b/drivers/block/drbd/drbd_state.c @@ -688,11 +688,9 @@ request_detach(struct drbd_device *device) CS_VERBOSE | CS_ORDERED | CS_INHIBIT_MD_IO); } -enum drbd_state_rv -drbd_request_detach_interruptible(struct drbd_device *device) +int drbd_request_detach_interruptible(struct drbd_device *device) { - enum drbd_state_rv rv; - int ret; + int ret, rv; drbd_suspend_io(device); /* so no-one is stuck in drbd_al_begin_io */ wait_event_interruptible(device->state_wait, diff --git a/drivers/block/drbd/drbd_state.h b/drivers/block/drbd/drbd_state.h index ea58301d0895c96212f2fdc38e39e9859d9c627d..f87371e55e682ac4deaa4ce69fb44765ceb8460b 100644 --- a/drivers/block/drbd/drbd_state.h +++ b/drivers/block/drbd/drbd_state.h @@ -131,7 +131,7 @@ extern enum drbd_state_rv _drbd_set_state(struct drbd_device *, union drbd_state enum chg_state_flags, struct completion *done); extern void print_st_err(struct drbd_device *, union drbd_state, - union drbd_state, int); + union drbd_state, enum drbd_state_rv); enum drbd_state_rv _conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val, @@ -162,8 +162,7 @@ static inline int drbd_request_state(struct drbd_device *device, } /* for use in adm_detach() (drbd_adm_detach(), drbd_adm_down()) */ -enum drbd_state_rv -drbd_request_detach_interruptible(struct drbd_device *device); +int drbd_request_detach_interruptible(struct drbd_device *device); enum drbd_role conn_highest_role(struct drbd_connection *connection); enum drbd_role conn_highest_peer(struct drbd_connection *connection); diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index f2b6f4da10341e77cc0280f48560405fecf8b85e..9f63a5a7aad1160ceb84c550966061b8b080ae4c 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -520,8 +520,8 @@ static unsigned long fdc_busy; static DECLARE_WAIT_QUEUE_HEAD(fdc_wait); static DECLARE_WAIT_QUEUE_HEAD(command_done); -/* Errors during formatting are counted here. */ -static int format_errors; +/* errors encountered on the current (or last) request */ +static int floppy_errors; /* Format request descriptor. */ static struct format_descr format_req; @@ -541,7 +541,6 @@ static struct format_descr format_req; static char *floppy_track_buffer; static int max_buffer_sectors; -static int *errors; typedef void (*done_f)(int); static const struct cont_t { void (*interrupt)(void); @@ -852,14 +851,17 @@ static void reset_fdc_info(int mode) /* selects the fdc and drive, and enables the fdc's input/dma. */ static void set_fdc(int drive) { + unsigned int new_fdc = fdc; + if (drive >= 0 && drive < N_DRIVE) { - fdc = FDC(drive); + new_fdc = FDC(drive); current_drive = drive; } - if (fdc != 1 && fdc != 0) { + if (new_fdc >= N_FDC) { pr_info("bad fdc value\n"); return; } + fdc = new_fdc; set_dor(fdc, ~0, 8); #if N_FDC > 1 set_dor(1 - fdc, ~8, 0); @@ -1431,7 +1433,7 @@ static int interpret_errors(void) if (DP->flags & FTD_MSG) DPRINT("Over/Underrun - retrying\n"); bad = 0; - } else if (*errors >= DP->max_errors.reporting) { + } else if (floppy_errors >= DP->max_errors.reporting) { print_errors(); } if (ST2 & ST2_WC || ST2 & ST2_BC) @@ -2051,7 +2053,7 @@ static void bad_flp_intr(void) if (!next_valid_format()) return; } - err_count = ++(*errors); + err_count = ++floppy_errors; INFBOUND(DRWE->badness, err_count); if (err_count > DP->max_errors.abort) cont->done(0); @@ -2119,6 +2121,9 @@ static void setup_format_params(int track) raw_cmd->kernel_data = floppy_track_buffer; raw_cmd->length = 4 * F_SECT_PER_TRACK; + if (!F_SECT_PER_TRACK) + return; + /* allow for about 30ms for data transport per track */ head_shift = (F_SECT_PER_TRACK + 5) / 6; @@ -2193,9 +2198,8 @@ static int do_format(int drive, struct format_descr *tmp_format_req) return -EINVAL; } format_req = *tmp_format_req; - format_errors = 0; cont = &format_cont; - errors = &format_errors; + floppy_errors = 0; ret = wait_til_done(redo_format, true); if (ret == -EINTR) return -EINTR; @@ -2678,7 +2682,7 @@ static int make_raw_rw_request(void) */ if (!direct || (indirect * 2 > direct * 3 && - *errors < DP->max_errors.read_track && + floppy_errors < DP->max_errors.read_track && ((!probing || (DP->read_track & (1 << DRS->probed_format)))))) { max_size = blk_rq_sectors(current_req); @@ -2812,13 +2816,13 @@ static int set_next_request(void) if (q) { current_req = blk_fetch_request(q); if (current_req) { - current_req->error_count = 0; - break; + floppy_errors = 0; + return 1; } } } while (fdc_queue != old_pos); - return current_req != NULL; + return 0; } static void redo_fd_request(void) @@ -2874,7 +2878,6 @@ static void redo_fd_request(void) _floppy = floppy_type + DP->autodetect[DRS->probed_format]; } else probing = 0; - errors = &(current_req->error_count); tmp = make_raw_rw_request(); if (tmp < 2) { request_done(tmp); @@ -3017,6 +3020,8 @@ static const char *drive_name(int type, int drive) return "(null)"; } +#ifdef CONFIG_BLK_DEV_FD_RAWCMD + /* raw commands */ static void raw_cmd_done(int flag) { @@ -3226,6 +3231,35 @@ static int raw_cmd_ioctl(int cmd, void __user *param) return ret; } +static int floppy_raw_cmd_ioctl(int type, int drive, int cmd, + void __user *param) +{ + int ret; + + pr_warn_once("Note: FDRAWCMD is deprecated and will be removed from the kernel in the near future.\n"); + + if (type) + return -EINVAL; + if (lock_fdc(drive)) + return -EINTR; + set_floppy(drive); + ret = raw_cmd_ioctl(cmd, param); + if (ret == -EINTR) + return -EINTR; + process_fd_request(); + return ret; +} + +#else /* CONFIG_BLK_DEV_FD_RAWCMD */ + +static int floppy_raw_cmd_ioctl(int type, int drive, int cmd, + void __user *param) +{ + return -EOPNOTSUPP; +} + +#endif + static int invalidate_drive(struct block_device *bdev) { /* invalidate the buffer track to force a reread */ @@ -3241,8 +3275,12 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g, int cnt; /* sanity checking for parameters. */ - if (g->sect <= 0 || - g->head <= 0 || + if ((int)g->sect <= 0 || + (int)g->head <= 0 || + /* check for overflow in max_sector */ + (int)(g->sect * g->head) <= 0 || + /* check for zero in F_SECT_PER_TRACK */ + (unsigned char)((g->sect << 2) >> FD_SIZECODE(g)) == 0 || g->track <= 0 || g->track > UDP->tracks >> STRETCH(g) || /* check if reserved bits are set */ (g->stretch & ~(FD_STRETCH | FD_SWAPSIDES | FD_SECTBASEMASK)) != 0) @@ -3386,12 +3424,29 @@ static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo) return 0; } +static bool valid_floppy_drive_params(const short autodetect[8], + int native_format) +{ + size_t floppy_type_size = ARRAY_SIZE(floppy_type); + size_t i = 0; + + for (i = 0; i < 8; ++i) { + if (autodetect[i] < 0 || + autodetect[i] >= floppy_type_size) + return false; + } + + if (native_format < 0 || native_format >= floppy_type_size) + return false; + + return true; +} + static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long param) { int drive = (long)bdev->bd_disk->private_data; int type = ITYPE(UDRS->fd_device); - int i; int ret; int size; union inparam { @@ -3512,6 +3567,9 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int SUPBOUND(size, strlen((const char *)outparam) + 1); break; case FDSETDRVPRM: + if (!valid_floppy_drive_params(inparam.dp.autodetect, + inparam.dp.native_format)) + return -EINVAL; *UDP = inparam.dp; break; case FDGETDRVPRM: @@ -3539,16 +3597,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int outparam = UDRWE; break; case FDRAWCMD: - if (type) - return -EINVAL; - if (lock_fdc(drive)) - return -EINTR; - set_floppy(drive); - i = raw_cmd_ioctl(cmd, (void __user *)param); - if (i == -EINTR) - return -EINTR; - process_fd_request(); - return i; + return floppy_raw_cmd_ioctl(type, drive, cmd, (void __user *)param); case FDTWADDLE: if (lock_fdc(drive)) return -EINTR; @@ -3709,6 +3758,8 @@ static int compat_setdrvprm(int drive, return -EPERM; if (copy_from_user(&v, arg, sizeof(struct compat_floppy_drive_params))) return -EFAULT; + if (!valid_floppy_drive_params(v.autodetect, v.native_format)) + return -EINVAL; mutex_lock(&floppy_mutex); UDP->cmos = v.cmos; UDP->max_dtr = v.max_dtr; @@ -3761,7 +3812,7 @@ static int compat_getdrvprm(int drive, v.native_format = UDP->native_format; mutex_unlock(&floppy_mutex); - if (copy_from_user(arg, &v, sizeof(struct compat_floppy_drive_params))) + if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_params))) return -EFAULT; return 0; } @@ -3797,7 +3848,7 @@ static int compat_getdrvstat(int drive, bool poll, v.bufblocks = UDRS->bufblocks; mutex_unlock(&floppy_mutex); - if (copy_from_user(arg, &v, sizeof(struct compat_floppy_drive_struct))) + if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_struct))) return -EFAULT; return 0; Eintr: @@ -4084,7 +4135,7 @@ static unsigned int floppy_check_events(struct gendisk *disk, if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) { if (lock_fdc(drive)) - return -EINTR; + return 0; poll_drive(false, 0); process_fd_request(); } @@ -4151,10 +4202,11 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive) bio.bi_end_io = floppy_rb0_cb; bio_set_op_attrs(&bio, REQ_OP_READ, 0); + init_completion(&cbdata.complete); + submit_bio(&bio); process_fd_request(); - init_completion(&cbdata.complete); wait_for_completion(&cbdata.complete); __free_page(page); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index ea9debf59b225c19d815e7ff1fd8aa950f5dcb1b..692c638826862d4c2ab837e07c80134a413fd166 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -83,7 +83,7 @@ #include static DEFINE_IDR(loop_index_idr); -static DEFINE_MUTEX(loop_index_mutex); +static DEFINE_MUTEX(loop_ctl_mutex); static int max_part; static int part_shift; @@ -241,7 +241,7 @@ figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit) set_capacity(lo->lo_disk, x); bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9); /* let user-space know about the new size */ - kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); + kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE); return 0; } @@ -416,19 +416,22 @@ static int lo_read_transfer(struct loop_device *lo, struct request *rq, return ret; } -static int lo_discard(struct loop_device *lo, struct request *rq, loff_t pos) +static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos, + int mode) { /* - * We use punch hole to reclaim the free space used by the - * image a.k.a. discard. However we do not support discard if - * encryption is enabled, because it may give an attacker - * useful information. + * We use fallocate to manipulate the space mappings used by the image + * a.k.a. discard/zerorange. However we do not support this if + * encryption is enabled, because it may give an attacker useful + * information. */ struct file *file = lo->lo_backing_file; - int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE; + struct request_queue *q = lo->lo_queue; int ret; - if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size) { + mode |= FALLOC_FL_KEEP_SIZE; + + if (!blk_queue_discard(q)) { ret = -EOPNOTSUPP; goto out; } @@ -458,7 +461,7 @@ static void lo_complete_rq(struct request *rq) if (!cmd->use_aio || cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) || req_op(rq) != REQ_OP_READ) { if (cmd->ret < 0) - ret = BLK_STS_IOERR; + ret = errno_to_blk_status(cmd->ret); goto end_io; } @@ -596,9 +599,17 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq) switch (req_op(rq)) { case REQ_OP_FLUSH: return lo_req_flush(lo, rq); - case REQ_OP_DISCARD: case REQ_OP_WRITE_ZEROES: - return lo_discard(lo, rq, pos); + /* + * If the caller doesn't want deallocation, call zeroout to + * write zeroes the range. Otherwise, punch them out. + */ + return lo_fallocate(lo, rq, pos, + (rq->cmd_flags & REQ_NOUNMAP) ? + FALLOC_FL_ZERO_RANGE : + FALLOC_FL_PUNCH_HOLE); + case REQ_OP_DISCARD: + return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE); case REQ_OP_WRITE: if (lo->transfer) return lo_write_transfer(lo, rq, pos); @@ -631,18 +642,7 @@ static void loop_reread_partitions(struct loop_device *lo, { int rc; - /* - * bd_mutex has been held already in release path, so don't - * acquire it if this function is called in such case. - * - * If the reread partition isn't from release path, lo_refcnt - * must be at least one and it can only become zero when the - * current holder is released. - */ - if (!atomic_read(&lo->lo_refcnt)) - rc = __blkdev_reread_part(bdev); - else - rc = blkdev_reread_part(bdev); + rc = blkdev_reread_part(bdev); if (rc) pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n", __func__, lo->lo_number, lo->lo_file_name, rc); @@ -668,7 +668,7 @@ static int loop_validate_file(struct file *file, struct block_device *bdev) return -EBADF; l = f->f_mapping->host->i_bdev->bd_disk->private_data; - if (l->lo_state == Lo_unbound) { + if (l->lo_state != Lo_bound) { return -EINVAL; } f = l->lo_backing_file; @@ -689,26 +689,30 @@ static int loop_validate_file(struct file *file, struct block_device *bdev) static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, unsigned int arg) { - struct file *file, *old_file; + struct file *file = NULL, *old_file; int error; + bool partscan; + error = mutex_lock_killable(&loop_ctl_mutex); + if (error) + return error; error = -ENXIO; if (lo->lo_state != Lo_bound) - goto out; + goto out_err; /* the loop device has to be read-only */ error = -EINVAL; if (!(lo->lo_flags & LO_FLAGS_READ_ONLY)) - goto out; + goto out_err; error = -EBADF; file = fget(arg); if (!file) - goto out; + goto out_err; error = loop_validate_file(file, bdev); if (error) - goto out_putf; + goto out_err; old_file = lo->lo_backing_file; @@ -716,7 +720,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, /* size of the new backing store needs to be the same */ if (get_loop_size(lo, file) != get_loop_size(lo, old_file)) - goto out_putf; + goto out_err; /* and ... switch */ blk_mq_freeze_queue(lo->lo_queue); @@ -727,15 +731,22 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); loop_update_dio(lo); blk_mq_unfreeze_queue(lo->lo_queue); - + partscan = lo->lo_flags & LO_FLAGS_PARTSCAN; + mutex_unlock(&loop_ctl_mutex); + /* + * We must drop file reference outside of loop_ctl_mutex as dropping + * the file ref can take bd_mutex which creates circular locking + * dependency. + */ fput(old_file); - if (lo->lo_flags & LO_FLAGS_PARTSCAN) + if (partscan) loop_reread_partitions(lo, bdev); return 0; - out_putf: - fput(file); - out: +out_err: + mutex_unlock(&loop_ctl_mutex); + if (file) + fput(file); return error; } @@ -853,6 +864,23 @@ static void loop_config_discard(struct loop_device *lo) struct file *file = lo->lo_backing_file; struct inode *inode = file->f_mapping->host; struct request_queue *q = lo->lo_queue; + u32 granularity, max_discard_sectors; + + /* + * If the backing device is a block device, mirror its zeroing + * capability. Set the discard sectors to the block device's zeroing + * capabilities because loop discards result in blkdev_issue_zeroout(), + * not blkdev_issue_discard(). This maintains consistent behavior with + * file-backed loop devices: discarded regions read back as zero. + */ + if (S_ISBLK(inode->i_mode) && !lo->lo_encrypt_key_size) { + struct request_queue *backingq; + + backingq = bdev_get_queue(inode->i_bdev); + + max_discard_sectors = backingq->limits.max_write_zeroes_sectors; + granularity = backingq->limits.discard_granularity ?: + queue_physical_block_size(backingq); /* * We use punch hole to reclaim the free space used by the @@ -860,22 +888,27 @@ static void loop_config_discard(struct loop_device *lo) * encryption is enabled, because it may give an attacker * useful information. */ - if ((!file->f_op->fallocate) || - lo->lo_encrypt_key_size) { + } else if (!file->f_op->fallocate || lo->lo_encrypt_key_size) { + max_discard_sectors = 0; + granularity = 0; + + } else { + max_discard_sectors = UINT_MAX >> 9; + granularity = inode->i_sb->s_blocksize; + } + + if (max_discard_sectors) { + q->limits.discard_granularity = granularity; + blk_queue_max_discard_sectors(q, max_discard_sectors); + blk_queue_max_write_zeroes_sectors(q, max_discard_sectors); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); + } else { q->limits.discard_granularity = 0; - q->limits.discard_alignment = 0; blk_queue_max_discard_sectors(q, 0); blk_queue_max_write_zeroes_sectors(q, 0); blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); - return; } - - q->limits.discard_granularity = inode->i_sb->s_blocksize; q->limits.discard_alignment = 0; - - blk_queue_max_discard_sectors(q, UINT_MAX >> 9); - blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9); - blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); } static void loop_unprepare_queue(struct loop_device *lo) @@ -886,7 +919,7 @@ static void loop_unprepare_queue(struct loop_device *lo) static int loop_kthread_worker_fn(void *worker_ptr) { - current->flags |= PF_LESS_THROTTLE; + current->flags |= PF_LESS_THROTTLE | PF_MEMALLOC_NOIO; return kthread_worker_fn(worker_ptr); } @@ -910,6 +943,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, int lo_flags = 0; int error; loff_t size; + bool partscan; /* This is safe, since we have a reference from open(). */ __module_get(THIS_MODULE); @@ -919,13 +953,17 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, if (!file) goto out; + error = mutex_lock_killable(&loop_ctl_mutex); + if (error) + goto out_putf; + error = -EBUSY; if (lo->lo_state != Lo_unbound) - goto out_putf; + goto out_unlock; error = loop_validate_file(file, bdev); if (error) - goto out_putf; + goto out_unlock; mapping = file->f_mapping; inode = mapping->host; @@ -937,10 +975,10 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, error = -EFBIG; size = get_loop_size(lo, file); if ((loff_t)(sector_t)size != size) - goto out_putf; + goto out_unlock; error = loop_prepare_queue(lo); if (error) - goto out_putf; + goto out_unlock; error = 0; @@ -964,7 +1002,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, bd_set_size(bdev, size << 9); loop_sysfs_init(lo); /* let user-space know about the new size */ - kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); + kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE); set_blocksize(bdev, S_ISBLK(inode->i_mode) ? block_size(inode->i_bdev) : PAGE_SIZE); @@ -972,18 +1010,22 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, lo->lo_state = Lo_bound; if (part_shift) lo->lo_flags |= LO_FLAGS_PARTSCAN; - if (lo->lo_flags & LO_FLAGS_PARTSCAN) - loop_reread_partitions(lo, bdev); + partscan = lo->lo_flags & LO_FLAGS_PARTSCAN; /* Grab the block_device to prevent its destruction after we - * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev). + * put /dev/loopXX inode. Later in __loop_clr_fd() we bdput(bdev). */ bdgrab(bdev); + mutex_unlock(&loop_ctl_mutex); + if (partscan) + loop_reread_partitions(lo, bdev); return 0; - out_putf: +out_unlock: + mutex_unlock(&loop_ctl_mutex); +out_putf: fput(file); - out: +out: /* This is safe: open() is still holding a reference. */ module_put(THIS_MODULE); return error; @@ -1026,39 +1068,34 @@ loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer, return err; } -static int loop_clr_fd(struct loop_device *lo) +static int __loop_clr_fd(struct loop_device *lo, bool release) { - struct file *filp = lo->lo_backing_file; + struct file *filp = NULL; gfp_t gfp = lo->old_gfp_mask; struct block_device *bdev = lo->lo_device; + int err = 0; + bool partscan = false; + int lo_number; - if (lo->lo_state != Lo_bound) - return -ENXIO; + mutex_lock(&loop_ctl_mutex); + if (WARN_ON_ONCE(lo->lo_state != Lo_rundown)) { + err = -ENXIO; + goto out_unlock; + } - /* - * If we've explicitly asked to tear down the loop device, - * and it has an elevated reference count, set it for auto-teardown when - * the last reference goes away. This stops $!~#$@ udev from - * preventing teardown because it decided that it needs to run blkid on - * the loopback device whenever they appear. xfstests is notorious for - * failing tests because blkid via udev races with a losetup - * /do something like mkfs/losetup -d causing the losetup -d - * command to fail with EBUSY. - */ - if (atomic_read(&lo->lo_refcnt) > 1) { - lo->lo_flags |= LO_FLAGS_AUTOCLEAR; - mutex_unlock(&lo->lo_ctl_mutex); - return 0; + filp = lo->lo_backing_file; + if (filp == NULL) { + err = -EINVAL; + goto out_unlock; } - if (filp == NULL) - return -EINVAL; + if (test_bit(QUEUE_FLAG_WC, &lo->lo_queue->queue_flags)) + blk_queue_write_cache(lo->lo_queue, false, false); /* freeze request queue during the transition */ blk_mq_freeze_queue(lo->lo_queue); spin_lock_irq(&lo->lo_lock); - lo->lo_state = Lo_rundown; lo->lo_backing_file = NULL; spin_unlock_irq(&lo->lo_lock); @@ -1086,29 +1123,95 @@ static int loop_clr_fd(struct loop_device *lo) if (bdev) { bd_set_size(bdev, 0); /* let user-space know about this change */ - kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); + kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE); } mapping_set_gfp_mask(filp->f_mapping, gfp); - lo->lo_state = Lo_unbound; /* This is safe: open() is still holding a reference. */ module_put(THIS_MODULE); blk_mq_unfreeze_queue(lo->lo_queue); - if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev) - loop_reread_partitions(lo, bdev); + partscan = lo->lo_flags & LO_FLAGS_PARTSCAN && bdev; + lo_number = lo->lo_number; + loop_unprepare_queue(lo); +out_unlock: + mutex_unlock(&loop_ctl_mutex); + if (partscan) { + /* + * bd_mutex has been held already in release path, so don't + * acquire it if this function is called in such case. + * + * If the reread partition isn't from release path, lo_refcnt + * must be at least one and it can only become zero when the + * current holder is released. + */ + if (release) + err = __blkdev_reread_part(bdev); + else + err = blkdev_reread_part(bdev); + if (err) + pr_warn("%s: partition scan of loop%d failed (rc=%d)\n", + __func__, lo_number, err); + /* Device is gone, no point in returning error */ + err = 0; + } + + /* + * lo->lo_state is set to Lo_unbound here after above partscan has + * finished. + * + * There cannot be anybody else entering __loop_clr_fd() as + * lo->lo_backing_file is already cleared and Lo_rundown state + * protects us from all the other places trying to change the 'lo' + * device. + */ + mutex_lock(&loop_ctl_mutex); lo->lo_flags = 0; if (!part_shift) lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN; - loop_unprepare_queue(lo); - mutex_unlock(&lo->lo_ctl_mutex); + lo->lo_state = Lo_unbound; + mutex_unlock(&loop_ctl_mutex); + /* - * Need not hold lo_ctl_mutex to fput backing file. - * Calling fput holding lo_ctl_mutex triggers a circular + * Need not hold loop_ctl_mutex to fput backing file. + * Calling fput holding loop_ctl_mutex triggers a circular * lock dependency possibility warning as fput can take - * bd_mutex which is usually taken before lo_ctl_mutex. + * bd_mutex which is usually taken before loop_ctl_mutex. */ - fput(filp); - return 0; + if (filp) + fput(filp); + return err; +} + +static int loop_clr_fd(struct loop_device *lo) +{ + int err; + + err = mutex_lock_killable(&loop_ctl_mutex); + if (err) + return err; + if (lo->lo_state != Lo_bound) { + mutex_unlock(&loop_ctl_mutex); + return -ENXIO; + } + /* + * If we've explicitly asked to tear down the loop device, + * and it has an elevated reference count, set it for auto-teardown when + * the last reference goes away. This stops $!~#$@ udev from + * preventing teardown because it decided that it needs to run blkid on + * the loopback device whenever they appear. xfstests is notorious for + * failing tests because blkid via udev races with a losetup + * /do something like mkfs/losetup -d causing the losetup -d + * command to fail with EBUSY. + */ + if (atomic_read(&lo->lo_refcnt) > 1) { + lo->lo_flags |= LO_FLAGS_AUTOCLEAR; + mutex_unlock(&loop_ctl_mutex); + return 0; + } + lo->lo_state = Lo_rundown; + mutex_unlock(&loop_ctl_mutex); + + return __loop_clr_fd(lo, false); } static int @@ -1117,47 +1220,77 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) int err; struct loop_func_table *xfer; kuid_t uid = current_uid(); + struct block_device *bdev; + bool partscan = false; + err = mutex_lock_killable(&loop_ctl_mutex); + if (err) + return err; if (lo->lo_encrypt_key_size && !uid_eq(lo->lo_key_owner, uid) && - !capable(CAP_SYS_ADMIN)) - return -EPERM; - if (lo->lo_state != Lo_bound) - return -ENXIO; - if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) - return -EINVAL; + !capable(CAP_SYS_ADMIN)) { + err = -EPERM; + goto out_unlock; + } + if (lo->lo_state != Lo_bound) { + err = -ENXIO; + goto out_unlock; + } + if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) { + err = -EINVAL; + goto out_unlock; + } + + if (lo->lo_offset != info->lo_offset || + lo->lo_sizelimit != info->lo_sizelimit) { + sync_blockdev(lo->lo_device); + invalidate_bdev(lo->lo_device); + } /* I/O need to be drained during transfer transition */ blk_mq_freeze_queue(lo->lo_queue); err = loop_release_xfer(lo); if (err) - goto exit; + goto out_unfreeze; if (info->lo_encrypt_type) { unsigned int type = info->lo_encrypt_type; if (type >= MAX_LO_CRYPT) { err = -EINVAL; - goto exit; + goto out_unfreeze; } xfer = xfer_funcs[type]; if (xfer == NULL) { err = -EINVAL; - goto exit; + goto out_unfreeze; } } else xfer = NULL; err = loop_init_xfer(lo, xfer, info); if (err) - goto exit; + goto out_unfreeze; if (lo->lo_offset != info->lo_offset || lo->lo_sizelimit != info->lo_sizelimit) { + /* kill_bdev should have truncated all the pages */ + if (lo->lo_device->bd_inode->i_mapping->nrpages) { + err = -EAGAIN; + pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n", + __func__, lo->lo_number, lo->lo_file_name, + lo->lo_device->bd_inode->i_mapping->nrpages); + goto out_unfreeze; + } + + /* Avoid assigning overflow values */ + if (info->lo_offset > LLONG_MAX || info->lo_sizelimit > LLONG_MAX) + return -EOVERFLOW; + if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) { err = -EFBIG; - goto exit; + goto out_unfreeze; } } @@ -1189,15 +1322,20 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) /* update dio if lo_offset or transfer is changed */ __loop_update_dio(lo, lo->use_dio); - exit: +out_unfreeze: blk_mq_unfreeze_queue(lo->lo_queue); if (!err && (info->lo_flags & LO_FLAGS_PARTSCAN) && !(lo->lo_flags & LO_FLAGS_PARTSCAN)) { lo->lo_flags |= LO_FLAGS_PARTSCAN; lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN; - loop_reread_partitions(lo, lo->lo_device); + bdev = lo->lo_device; + partscan = true; } +out_unlock: + mutex_unlock(&loop_ctl_mutex); + if (partscan) + loop_reread_partitions(lo, bdev); return err; } @@ -1205,12 +1343,15 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) static int loop_get_status(struct loop_device *lo, struct loop_info64 *info) { - struct file *file; + struct path path; struct kstat stat; int ret; + ret = mutex_lock_killable(&loop_ctl_mutex); + if (ret) + return ret; if (lo->lo_state != Lo_bound) { - mutex_unlock(&lo->lo_ctl_mutex); + mutex_unlock(&loop_ctl_mutex); return -ENXIO; } @@ -1229,17 +1370,17 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info) lo->lo_encrypt_key_size); } - /* Drop lo_ctl_mutex while we call into the filesystem. */ - file = get_file(lo->lo_backing_file); - mutex_unlock(&lo->lo_ctl_mutex); - ret = vfs_getattr(&file->f_path, &stat, STATX_INO, - AT_STATX_SYNC_AS_STAT); + /* Drop loop_ctl_mutex while we call into the filesystem. */ + path = lo->lo_backing_file->f_path; + path_get(&path); + mutex_unlock(&loop_ctl_mutex); + ret = vfs_getattr(&path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT); if (!ret) { info->lo_device = huge_encode_dev(stat.dev); info->lo_inode = stat.ino; info->lo_rdevice = huge_encode_dev(stat.rdev); } - fput(file); + path_put(&path); return ret; } @@ -1323,10 +1464,8 @@ loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) { struct loop_info64 info64; int err; - if (!arg) { - mutex_unlock(&lo->lo_ctl_mutex); + if (!arg) return -EINVAL; - } err = loop_get_status(lo, &info64); if (!err) err = loop_info64_to_old(&info64, &info); @@ -1341,10 +1480,8 @@ loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) { struct loop_info64 info64; int err; - if (!arg) { - mutex_unlock(&lo->lo_ctl_mutex); + if (!arg) return -EINVAL; - } err = loop_get_status(lo, &info64); if (!err && copy_to_user(arg, &info64, sizeof(info64))) err = -EFAULT; @@ -1376,22 +1513,64 @@ static int loop_set_dio(struct loop_device *lo, unsigned long arg) static int loop_set_block_size(struct loop_device *lo, unsigned long arg) { + int err = 0; + if (lo->lo_state != Lo_bound) return -ENXIO; if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg)) return -EINVAL; + if (lo->lo_queue->limits.logical_block_size != arg) { + sync_blockdev(lo->lo_device); + invalidate_bdev(lo->lo_device); + } + blk_mq_freeze_queue(lo->lo_queue); + /* invalidate_bdev should have truncated all the pages */ + if (lo->lo_queue->limits.logical_block_size != arg && + lo->lo_device->bd_inode->i_mapping->nrpages) { + err = -EAGAIN; + pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n", + __func__, lo->lo_number, lo->lo_file_name, + lo->lo_device->bd_inode->i_mapping->nrpages); + goto out_unfreeze; + } + blk_queue_logical_block_size(lo->lo_queue, arg); blk_queue_physical_block_size(lo->lo_queue, arg); blk_queue_io_min(lo->lo_queue, arg); loop_update_dio(lo); - +out_unfreeze: blk_mq_unfreeze_queue(lo->lo_queue); - return 0; + return err; +} + +static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd, + unsigned long arg) +{ + int err; + + err = mutex_lock_killable(&loop_ctl_mutex); + if (err) + return err; + switch (cmd) { + case LOOP_SET_CAPACITY: + err = loop_set_capacity(lo); + break; + case LOOP_SET_DIRECT_IO: + err = loop_set_dio(lo, arg); + break; + case LOOP_SET_BLOCK_SIZE: + err = loop_set_block_size(lo, arg); + break; + default: + err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL; + } + mutex_unlock(&loop_ctl_mutex); + return err; } static int lo_ioctl(struct block_device *bdev, fmode_t mode, @@ -1400,64 +1579,42 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode, struct loop_device *lo = bdev->bd_disk->private_data; int err; - err = mutex_lock_killable_nested(&lo->lo_ctl_mutex, 1); - if (err) - goto out_unlocked; - switch (cmd) { case LOOP_SET_FD: - err = loop_set_fd(lo, mode, bdev, arg); - break; + return loop_set_fd(lo, mode, bdev, arg); case LOOP_CHANGE_FD: - err = loop_change_fd(lo, bdev, arg); - break; + return loop_change_fd(lo, bdev, arg); case LOOP_CLR_FD: - /* loop_clr_fd would have unlocked lo_ctl_mutex on success */ - err = loop_clr_fd(lo); - if (!err) - goto out_unlocked; - break; + return loop_clr_fd(lo); case LOOP_SET_STATUS: err = -EPERM; - if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) + if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) { err = loop_set_status_old(lo, (struct loop_info __user *)arg); + } break; case LOOP_GET_STATUS: - err = loop_get_status_old(lo, (struct loop_info __user *) arg); - /* loop_get_status() unlocks lo_ctl_mutex */ - goto out_unlocked; + return loop_get_status_old(lo, (struct loop_info __user *) arg); case LOOP_SET_STATUS64: err = -EPERM; - if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) + if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) { err = loop_set_status64(lo, (struct loop_info64 __user *) arg); + } break; case LOOP_GET_STATUS64: - err = loop_get_status64(lo, (struct loop_info64 __user *) arg); - /* loop_get_status() unlocks lo_ctl_mutex */ - goto out_unlocked; + return loop_get_status64(lo, (struct loop_info64 __user *) arg); case LOOP_SET_CAPACITY: - err = -EPERM; - if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) - err = loop_set_capacity(lo); - break; case LOOP_SET_DIRECT_IO: - err = -EPERM; - if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) - err = loop_set_dio(lo, arg); - break; case LOOP_SET_BLOCK_SIZE: - err = -EPERM; - if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) - err = loop_set_block_size(lo, arg); - break; + if (!(mode & FMODE_WRITE) && !capable(CAP_SYS_ADMIN)) + return -EPERM; + /* Fall through */ default: - err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL; + err = lo_simple_ioctl(lo, cmd, arg); + break; } - mutex_unlock(&lo->lo_ctl_mutex); -out_unlocked: return err; } @@ -1571,10 +1728,8 @@ loop_get_status_compat(struct loop_device *lo, struct loop_info64 info64; int err; - if (!arg) { - mutex_unlock(&lo->lo_ctl_mutex); + if (!arg) return -EINVAL; - } err = loop_get_status(lo, &info64); if (!err) err = loop_info64_to_compat(&info64, arg); @@ -1589,20 +1744,12 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, switch(cmd) { case LOOP_SET_STATUS: - err = mutex_lock_killable(&lo->lo_ctl_mutex); - if (!err) { - err = loop_set_status_compat(lo, - (const struct compat_loop_info __user *)arg); - mutex_unlock(&lo->lo_ctl_mutex); - } + err = loop_set_status_compat(lo, + (const struct compat_loop_info __user *)arg); break; case LOOP_GET_STATUS: - err = mutex_lock_killable(&lo->lo_ctl_mutex); - if (!err) { - err = loop_get_status_compat(lo, - (struct compat_loop_info __user *)arg); - /* loop_get_status() unlocks lo_ctl_mutex */ - } + err = loop_get_status_compat(lo, + (struct compat_loop_info __user *)arg); break; case LOOP_SET_CAPACITY: case LOOP_CLR_FD: @@ -1613,6 +1760,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, case LOOP_SET_FD: case LOOP_CHANGE_FD: case LOOP_SET_BLOCK_SIZE: + case LOOP_SET_DIRECT_IO: err = lo_ioctl(bdev, mode, cmd, arg); break; default: @@ -1626,9 +1774,11 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, static int lo_open(struct block_device *bdev, fmode_t mode) { struct loop_device *lo; - int err = 0; + int err; - mutex_lock(&loop_index_mutex); + err = mutex_lock_killable(&loop_ctl_mutex); + if (err) + return err; lo = bdev->bd_disk->private_data; if (!lo) { err = -ENXIO; @@ -1637,26 +1787,30 @@ static int lo_open(struct block_device *bdev, fmode_t mode) atomic_inc(&lo->lo_refcnt); out: - mutex_unlock(&loop_index_mutex); + mutex_unlock(&loop_ctl_mutex); return err; } -static void __lo_release(struct loop_device *lo) +static void lo_release(struct gendisk *disk, fmode_t mode) { - int err; + struct loop_device *lo; + mutex_lock(&loop_ctl_mutex); + lo = disk->private_data; if (atomic_dec_return(&lo->lo_refcnt)) - return; + goto out_unlock; - mutex_lock(&lo->lo_ctl_mutex); if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) { + if (lo->lo_state != Lo_bound) + goto out_unlock; + lo->lo_state = Lo_rundown; + mutex_unlock(&loop_ctl_mutex); /* * In autoclear mode, stop the loop thread * and remove configuration after last close. */ - err = loop_clr_fd(lo); - if (!err) - return; + __loop_clr_fd(lo, true); + return; } else if (lo->lo_state == Lo_bound) { /* * Otherwise keep thread (if running) and config, @@ -1666,14 +1820,8 @@ static void __lo_release(struct loop_device *lo) blk_mq_unfreeze_queue(lo->lo_queue); } - mutex_unlock(&lo->lo_ctl_mutex); -} - -static void lo_release(struct gendisk *disk, fmode_t mode) -{ - mutex_lock(&loop_index_mutex); - __lo_release(disk->private_data); - mutex_unlock(&loop_index_mutex); +out_unlock: + mutex_unlock(&loop_ctl_mutex); } static const struct block_device_operations lo_fops = { @@ -1712,10 +1860,10 @@ static int unregister_transfer_cb(int id, void *ptr, void *data) struct loop_device *lo = ptr; struct loop_func_table *xfer = data; - mutex_lock(&lo->lo_ctl_mutex); + mutex_lock(&loop_ctl_mutex); if (lo->lo_encryption == xfer) loop_release_xfer(lo); - mutex_unlock(&lo->lo_ctl_mutex); + mutex_unlock(&loop_ctl_mutex); return 0; } @@ -1787,7 +1935,10 @@ static void loop_handle_cmd(struct loop_cmd *cmd) failed: /* complete non-aio request */ if (!cmd->use_aio || ret) { - cmd->ret = ret ? -EIO : 0; + if (ret == -EOPNOTSUPP) + cmd->ret = ret; + else + cmd->ret = ret ? -EIO : 0; blk_mq_complete_request(rq); } } @@ -1821,6 +1972,17 @@ static int loop_add(struct loop_device **l, int i) struct gendisk *disk; int err; + /* + * i << part_shift is actually used as the first_minor. + * So here should avoid i << part_shift overflow. + * And, MKDEV() expect that the max bits of + * first_minor is 20. + */ + if (i > 0 && i > MINORMASK >> part_shift) { + err = -EINVAL; + goto out; + } + err = -ENOMEM; lo = kzalloc(sizeof(*lo), GFP_KERNEL); if (!lo) @@ -1834,7 +1996,8 @@ static int loop_add(struct loop_device **l, int i) if (err == -ENOSPC) err = -EEXIST; } else { - err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL); + err = idr_alloc(&loop_index_idr, lo, 0, + (MINORMASK >> part_shift) + 1, GFP_KERNEL); } if (err < 0) goto out_free_dev; @@ -1896,7 +2059,6 @@ static int loop_add(struct loop_device **l, int i) if (!part_shift) disk->flags |= GENHD_FL_NO_PART_SCAN; disk->flags |= GENHD_FL_EXT_DEVT; - mutex_init(&lo->lo_ctl_mutex); atomic_set(&lo->lo_refcnt, 0); lo->lo_number = i; spin_lock_init(&lo->lo_lock); @@ -1975,7 +2137,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data) struct kobject *kobj; int err; - mutex_lock(&loop_index_mutex); + mutex_lock(&loop_ctl_mutex); err = loop_lookup(&lo, MINOR(dev) >> part_shift); if (err < 0) err = loop_add(&lo, MINOR(dev) >> part_shift); @@ -1983,7 +2145,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data) kobj = NULL; else kobj = get_disk_and_module(lo->lo_disk); - mutex_unlock(&loop_index_mutex); + mutex_unlock(&loop_ctl_mutex); *part = 0; return kobj; @@ -1993,9 +2155,13 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd, unsigned long parm) { struct loop_device *lo; - int ret = -ENOSYS; + int ret; - mutex_lock(&loop_index_mutex); + ret = mutex_lock_killable(&loop_ctl_mutex); + if (ret) + return ret; + + ret = -ENOSYS; switch (cmd) { case LOOP_CTL_ADD: ret = loop_lookup(&lo, parm); @@ -2009,21 +2175,15 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd, ret = loop_lookup(&lo, parm); if (ret < 0) break; - ret = mutex_lock_killable(&lo->lo_ctl_mutex); - if (ret) - break; if (lo->lo_state != Lo_unbound) { ret = -EBUSY; - mutex_unlock(&lo->lo_ctl_mutex); break; } if (atomic_read(&lo->lo_refcnt) > 0) { ret = -EBUSY; - mutex_unlock(&lo->lo_ctl_mutex); break; } lo->lo_disk->private_data = NULL; - mutex_unlock(&lo->lo_ctl_mutex); idr_remove(&loop_index_idr, lo->lo_number); loop_remove(lo); break; @@ -2033,7 +2193,7 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd, break; ret = loop_add(&lo, -1); } - mutex_unlock(&loop_index_mutex); + mutex_unlock(&loop_ctl_mutex); return ret; } @@ -2117,10 +2277,10 @@ static int __init loop_init(void) THIS_MODULE, loop_probe, NULL, NULL); /* pre-create number of devices given by config or max_loop */ - mutex_lock(&loop_index_mutex); + mutex_lock(&loop_ctl_mutex); for (i = 0; i < nr; i++) loop_add(&lo, i); - mutex_unlock(&loop_index_mutex); + mutex_unlock(&loop_ctl_mutex); printk(KERN_INFO "loop: module loaded\n"); return 0; @@ -2145,6 +2305,8 @@ static void __exit loop_exit(void) range = max_loop ? max_loop << part_shift : 1UL << MINORBITS; + mutex_lock(&loop_ctl_mutex); + idr_for_each(&loop_index_idr, &loop_exit_cb, NULL); idr_destroy(&loop_index_idr); @@ -2152,6 +2314,8 @@ static void __exit loop_exit(void) unregister_blkdev(LOOP_MAJOR, "loop"); misc_deregister(&loop_misc); + + mutex_unlock(&loop_ctl_mutex); } module_init(loop_init); diff --git a/drivers/block/loop.h b/drivers/block/loop.h index 4d42c7af7de750e8f72e74bf0edb0b2ce26d2208..af75a5ee409440b24ab633d3a744ed9f8c307256 100644 --- a/drivers/block/loop.h +++ b/drivers/block/loop.h @@ -54,7 +54,6 @@ struct loop_device { spinlock_t lo_lock; int lo_state; - struct mutex lo_ctl_mutex; struct kthread_worker worker; struct task_struct *worker_task; bool use_dio; diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 14a51254c3db7f19c94cdab62e1d9e192c7ae02f..fbc4cdb5a5b9458244eb20bbada56a91f94d710b 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -61,6 +62,7 @@ struct nbd_sock { struct recv_thread_args { struct work_struct work; struct nbd_device *nbd; + struct nbd_sock *nsock; int index; }; @@ -69,14 +71,16 @@ struct link_dead_args { int index; }; -#define NBD_TIMEDOUT 0 +#define NBD_RT_TIMEDOUT 0 +#define NBD_RT_DISCONNECT_REQUESTED 1 +#define NBD_RT_DISCONNECTED 2 +#define NBD_RT_HAS_PID_FILE 3 +#define NBD_RT_HAS_CONFIG_REF 4 +#define NBD_RT_BOUND 5 +#define NBD_RT_DISCONNECT_ON_CLOSE 6 + +#define NBD_DESTROY_ON_DISCONNECT 0 #define NBD_DISCONNECT_REQUESTED 1 -#define NBD_DISCONNECTED 2 -#define NBD_HAS_PID_FILE 3 -#define NBD_HAS_CONFIG_REF 4 -#define NBD_BOUND 5 -#define NBD_DESTROY_ON_DISCONNECT 6 -#define NBD_DISCONNECT_ON_CLOSE 7 struct nbd_config { u32 flags; @@ -106,13 +110,23 @@ struct nbd_device { struct nbd_config *config; struct mutex config_lock; struct gendisk *disk; + struct workqueue_struct *recv_workq; struct list_head list; - struct task_struct *task_recv; struct task_struct *task_setup; + + struct completion *destroy_complete; + unsigned long flags; + pid_t pid; /* pid of nbd-client, if attached */ }; #define NBD_CMD_REQUEUED 1 +/* + * This flag will be set if nbd_queue_rq() succeed, and will be checked and + * cleared in completion. Both setting and clearing of the flag are protected + * by cmd->lock. + */ +#define NBD_CMD_INFLIGHT 2 struct nbd_cmd { struct nbd_device *nbd; @@ -132,9 +146,10 @@ static struct dentry *nbd_dbg_dir; #define NBD_MAGIC 0x68797548 +#define NBD_DEF_BLKSIZE 1024 + static unsigned int nbds_max = 16; static int max_part = 16; -static struct workqueue_struct *recv_workqueue; static int part_shift; static int nbd_dev_dbg_init(struct nbd_device *nbd); @@ -154,6 +169,17 @@ static void nbd_requeue_cmd(struct nbd_cmd *cmd) { struct request *req = blk_mq_rq_from_pdu(cmd); + lockdep_assert_held(&cmd->lock); + + /* + * Clear INFLIGHT flag so that this cmd won't be completed in + * normal completion path + * + * INFLIGHT flag will be set when the cmd is queued to nbd next + * time. + */ + __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags); + if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags)) blk_mq_requeue_request(req, true); } @@ -197,7 +223,7 @@ static ssize_t pid_show(struct device *dev, struct gendisk *disk = dev_to_disk(dev); struct nbd_device *nbd = (struct nbd_device *)disk->private_data; - return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv)); + return sprintf(buf, "%d\n", nbd->pid); } static const struct device_attribute pid_attr = { @@ -218,6 +244,16 @@ static void nbd_dev_remove(struct nbd_device *nbd) disk->private_data = NULL; put_disk(disk); } + + /* + * Place this in the last just before the nbd is freed to + * make sure that the disk and the related kobject are also + * totally removed to avoid duplicate creation of the same + * one. + */ + if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) && nbd->destroy_complete) + complete(nbd->destroy_complete); + kfree(nbd); } @@ -226,15 +262,15 @@ static void nbd_put(struct nbd_device *nbd) if (refcount_dec_and_mutex_lock(&nbd->refs, &nbd_index_mutex)) { idr_remove(&nbd_index_idr, nbd->index); - mutex_unlock(&nbd_index_mutex); nbd_dev_remove(nbd); + mutex_unlock(&nbd_index_mutex); } } static int nbd_disconnected(struct nbd_config *config) { - return test_bit(NBD_DISCONNECTED, &config->runtime_flags) || - test_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags); + return test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags) || + test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags); } static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock, @@ -252,9 +288,9 @@ static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock, if (!nsock->dead) { kernel_sock_shutdown(nsock->sock, SHUT_RDWR); if (atomic_dec_return(&nbd->config->live_connections) == 0) { - if (test_and_clear_bit(NBD_DISCONNECT_REQUESTED, + if (test_and_clear_bit(NBD_RT_DISCONNECT_REQUESTED, &nbd->config->runtime_flags)) { - set_bit(NBD_DISCONNECTED, + set_bit(NBD_RT_DISCONNECTED, &nbd->config->runtime_flags); dev_info(nbd_to_dev(nbd), "Disconnected due to user request.\n"); @@ -274,37 +310,44 @@ static void nbd_size_clear(struct nbd_device *nbd) } } -static void nbd_size_update(struct nbd_device *nbd) +static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize, + loff_t blksize) { - struct nbd_config *config = nbd->config; - struct block_device *bdev = bdget_disk(nbd->disk, 0); + struct block_device *bdev; + + if (!blksize) + blksize = NBD_DEF_BLKSIZE; + if (blksize < 512 || blksize > PAGE_SIZE || !is_power_of_2(blksize)) + return -EINVAL; + + if (bytesize < 0) + return -EINVAL; + + nbd->config->bytesize = bytesize; + nbd->config->blksize = blksize; - if (config->flags & NBD_FLAG_SEND_TRIM) { - nbd->disk->queue->limits.discard_granularity = config->blksize; - nbd->disk->queue->limits.discard_alignment = config->blksize; + if (!nbd->pid) + return 0; + + if (nbd->config->flags & NBD_FLAG_SEND_TRIM) { + nbd->disk->queue->limits.discard_granularity = blksize; + nbd->disk->queue->limits.discard_alignment = blksize; blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX); } - blk_queue_logical_block_size(nbd->disk->queue, config->blksize); - blk_queue_physical_block_size(nbd->disk->queue, config->blksize); - set_capacity(nbd->disk, config->bytesize >> 9); + blk_queue_logical_block_size(nbd->disk->queue, blksize); + blk_queue_physical_block_size(nbd->disk->queue, blksize); + + set_capacity(nbd->disk, bytesize >> 9); + bdev = bdget_disk(nbd->disk, 0); if (bdev) { if (bdev->bd_disk) - bd_set_size(bdev, config->bytesize); + bd_set_size(bdev, bytesize); else bdev->bd_invalidated = 1; bdput(bdev); } kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); -} - -static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize, - loff_t nr_blocks) -{ - struct nbd_config *config = nbd->config; - config->blksize = blocksize; - config->bytesize = blocksize * nr_blocks; - if (nbd->task_recv != NULL) - nbd_size_update(nbd); + return 0; } static void nbd_complete_rq(struct request *req) @@ -327,7 +370,7 @@ static void sock_shutdown(struct nbd_device *nbd) if (config->num_connections == 0) return; - if (test_and_set_bit(NBD_DISCONNECTED, &config->runtime_flags)) + if (test_and_set_bit(NBD_RT_DISCONNECTED, &config->runtime_flags)) return; for (i = 0; i < config->num_connections; i++) { @@ -339,6 +382,22 @@ static void sock_shutdown(struct nbd_device *nbd) dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n"); } +static struct nbd_config *nbd_get_config_unlocked(struct nbd_device *nbd) +{ + if (refcount_inc_not_zero(&nbd->config_refs)) { + /* + * Add smp_mb__after_atomic to ensure that reading + * nbd->config_refs and reading nbd->config is ordered. + * The pair is the barrier in nbd_alloc_and_init_config(), + * avoid nbd->config_refs is set before nbd->config. + */ + smp_mb__after_atomic(); + return nbd->config; + } + + return NULL; +} + static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, bool reserved) { @@ -346,14 +405,21 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, struct nbd_device *nbd = cmd->nbd; struct nbd_config *config; - if (!refcount_inc_not_zero(&nbd->config_refs)) { + if (!mutex_trylock(&cmd->lock)) + return BLK_EH_RESET_TIMER; + + if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) { + mutex_unlock(&cmd->lock); + return BLK_EH_DONE; + } + + config = nbd_get_config_unlocked(nbd); + if (!config) { cmd->status = BLK_STS_TIMEOUT; + __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags); + mutex_unlock(&cmd->lock); goto done; } - config = nbd->config; - - if (!mutex_trylock(&cmd->lock)) - return BLK_EH_RESET_TIMER; if (config->num_connections > 1) { dev_err_ratelimited(nbd_to_dev(nbd), @@ -379,8 +445,8 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, nbd_mark_nsock_dead(nbd, nsock, 1); mutex_unlock(&nsock->tx_lock); } - mutex_unlock(&cmd->lock); nbd_requeue_cmd(cmd); + mutex_unlock(&cmd->lock); nbd_config_put(nbd); return BLK_EH_DONE; } @@ -388,8 +454,9 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n"); } - set_bit(NBD_TIMEDOUT, &config->runtime_flags); + set_bit(NBD_RT_TIMEDOUT, &config->runtime_flags); cmd->status = BLK_STS_IOERR; + __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags); mutex_unlock(&cmd->lock); sock_shutdown(nbd); nbd_config_put(nbd); @@ -398,14 +465,9 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, return BLK_EH_DONE; } -/* - * Send or receive packet. - */ -static int sock_xmit(struct nbd_device *nbd, int index, int send, - struct iov_iter *iter, int msg_flags, int *sent) +static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send, + struct iov_iter *iter, int msg_flags, int *sent) { - struct nbd_config *config = nbd->config; - struct socket *sock = config->socks[index]->sock; int result; struct msghdr msg; unsigned int noreclaim_flag; @@ -447,6 +509,19 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send, return result; } +/* + * Send or receive packet. Return a positive value on success and + * negtive value on failure, and never return 0. + */ +static int sock_xmit(struct nbd_device *nbd, int index, int send, + struct iov_iter *iter, int msg_flags, int *sent) +{ + struct nbd_config *config = nbd->config; + struct socket *sock = config->socks[index]->sock; + + return __sock_xmit(nbd, sock, send, iter, msg_flags, sent); +} + /* * Different settings for sk->sk_sndtimeo can result in different return values * if there is a signal pending when we enter sendmsg, because reasons? @@ -530,7 +605,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); result = sock_xmit(nbd, index, 1, &from, (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent); - if (result <= 0) { + if (result < 0) { if (was_interrupted(result)) { /* If we havne't sent anything we can just return BUSY, * however if we have sent something we need to make @@ -575,7 +650,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) skip = 0; } result = sock_xmit(nbd, index, 1, &from, flags, &sent); - if (result <= 0) { + if (result < 0) { if (was_interrupted(result)) { /* We've already sent the header, we * have no choice but to set pending and @@ -608,38 +683,45 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) return 0; } -/* NULL returned = something went wrong, inform userspace */ -static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) +static int nbd_read_reply(struct nbd_device *nbd, struct socket *sock, + struct nbd_reply *reply) { - struct nbd_config *config = nbd->config; - int result; - struct nbd_reply reply; - struct nbd_cmd *cmd; - struct request *req = NULL; - u64 handle; - u16 hwq; - u32 tag; - struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)}; + struct kvec iov = {.iov_base = reply, .iov_len = sizeof(*reply)}; struct iov_iter to; - int ret = 0; + int result; - reply.magic = 0; - iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply)); - result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); - if (result <= 0) { - if (!nbd_disconnected(config)) + reply->magic = 0; + iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(*reply)); + result = __sock_xmit(nbd, sock, 0, &to, MSG_WAITALL, NULL); + if (result < 0) { + if (!nbd_disconnected(nbd->config)) dev_err(disk_to_dev(nbd->disk), "Receive control failed (result %d)\n", result); - return ERR_PTR(result); + return result; } - if (ntohl(reply.magic) != NBD_REPLY_MAGIC) { + if (ntohl(reply->magic) != NBD_REPLY_MAGIC) { dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n", - (unsigned long)ntohl(reply.magic)); - return ERR_PTR(-EPROTO); + (unsigned long)ntohl(reply->magic)); + return -EPROTO; } - memcpy(&handle, reply.handle, sizeof(handle)); + return 0; +} + +/* NULL returned = something went wrong, inform userspace */ +static struct nbd_cmd *nbd_handle_reply(struct nbd_device *nbd, int index, + struct nbd_reply *reply) +{ + int result; + struct nbd_cmd *cmd; + struct request *req = NULL; + u64 handle; + u16 hwq; + u32 tag; + int ret = 0; + + memcpy(&handle, reply->handle, sizeof(handle)); tag = nbd_handle_to_tag(handle); hwq = blk_mq_unique_tag_to_hwq(tag); if (hwq < nbd->tag_set.nr_hw_queues) @@ -653,21 +735,37 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) cmd = blk_mq_rq_to_pdu(req); mutex_lock(&cmd->lock); + if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) { + dev_err(disk_to_dev(nbd->disk), "Suspicious reply %d (status %u flags %lu)", + tag, cmd->status, cmd->flags); + ret = -ENOENT; + goto out; + } + if (cmd->index != index) { + dev_err(disk_to_dev(nbd->disk), "Unexpected reply %d from different sock %d (expected %d)", + tag, index, cmd->index); + } if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) { dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n", req, cmd->cmd_cookie, nbd_handle_to_cookie(handle)); ret = -ENOENT; goto out; } + if (cmd->status != BLK_STS_OK) { + dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n", + req); + ret = -ENOENT; + goto out; + } if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) { dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n", req); ret = -ENOENT; goto out; } - if (ntohl(reply.error)) { + if (ntohl(reply->error)) { dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", - ntohl(reply.error)); + ntohl(reply->error)); cmd->status = BLK_STS_IOERR; goto out; } @@ -676,12 +774,13 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) if (rq_data_dir(req) != WRITE) { struct req_iterator iter; struct bio_vec bvec; + struct iov_iter to; rq_for_each_segment(bvec, req, iter) { iov_iter_bvec(&to, ITER_BVEC | READ, &bvec, 1, bvec.bv_len); result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); - if (result <= 0) { + if (result < 0) { dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", result); /* @@ -691,8 +790,8 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) * and let the timeout stuff handle resubmitting * this request onto another connection. */ - if (nbd_disconnected(config) || - config->num_connections <= 1) { + if (nbd_disconnected(nbd->config) || + nbd->config->num_connections <= 1) { cmd->status = BLK_STS_IOERR; goto out; } @@ -704,6 +803,8 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) } } out: + if (!ret) + __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags); mutex_unlock(&cmd->lock); return ret ? ERR_PTR(ret) : cmd; } @@ -715,24 +816,45 @@ static void recv_work(struct work_struct *work) work); struct nbd_device *nbd = args->nbd; struct nbd_config *config = nbd->config; + struct request_queue *q = nbd->disk->queue; + struct nbd_sock *nsock = args->nsock; struct nbd_cmd *cmd; while (1) { - cmd = nbd_read_stat(nbd, args->index); - if (IS_ERR(cmd)) { - struct nbd_sock *nsock = config->socks[args->index]; + struct nbd_reply reply; - mutex_lock(&nsock->tx_lock); - nbd_mark_nsock_dead(nbd, nsock, 1); - mutex_unlock(&nsock->tx_lock); + if (nbd_read_reply(nbd, nsock->sock, &reply)) + break; + + /* + * Grab .q_usage_counter so request pool won't go away, then no + * request use-after-free is possible during nbd_handle_reply(). + * If queue is frozen, there won't be any inflight requests, we + * needn't to handle the incoming garbage message. + */ + if (!percpu_ref_tryget(&q->q_usage_counter)) { + dev_err(disk_to_dev(nbd->disk), "%s: no io inflight\n", + __func__); + break; + } + + cmd = nbd_handle_reply(nbd, args->index, &reply); + if (IS_ERR(cmd)) { + percpu_ref_put(&q->q_usage_counter); break; } blk_mq_complete_request(blk_mq_rq_from_pdu(cmd)); + percpu_ref_put(&q->q_usage_counter); } + + mutex_lock(&nsock->tx_lock); + nbd_mark_nsock_dead(nbd, nsock, 1); + mutex_unlock(&nsock->tx_lock); + + nbd_config_put(nbd); atomic_dec(&config->recv_threads); wake_up(&config->recv_wq); - nbd_config_put(nbd); kfree(args); } @@ -740,7 +862,14 @@ static void nbd_clear_req(struct request *req, void *data, bool reserved) { struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req); + mutex_lock(&cmd->lock); + if (!__test_and_clear_bit(NBD_CMD_INFLIGHT, &cmd->flags)) { + mutex_unlock(&cmd->lock); + return; + } cmd->status = BLK_STS_IOERR; + mutex_unlock(&cmd->lock); + blk_mq_complete_request(req); } @@ -759,7 +888,7 @@ static int find_fallback(struct nbd_device *nbd, int index) struct nbd_sock *nsock = config->socks[index]; int fallback = nsock->fallback_index; - if (test_bit(NBD_DISCONNECTED, &config->runtime_flags)) + if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags)) return new_index; if (config->num_connections <= 1) { @@ -800,7 +929,7 @@ static int wait_for_reconnect(struct nbd_device *nbd) struct nbd_config *config = nbd->config; if (!config->dead_conn_timeout) return 0; - if (test_bit(NBD_DISCONNECTED, &config->runtime_flags)) + if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags)) return 0; return wait_event_timeout(config->conn_wait, atomic_read(&config->live_connections) > 0, @@ -815,19 +944,17 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) struct nbd_sock *nsock; int ret; - if (!refcount_inc_not_zero(&nbd->config_refs)) { + config = nbd_get_config_unlocked(nbd); + if (!config) { dev_err_ratelimited(disk_to_dev(nbd->disk), "Socks array is empty\n"); - blk_mq_start_request(req); return -EINVAL; } - config = nbd->config; if (index >= config->num_connections) { dev_err_ratelimited(disk_to_dev(nbd->disk), "Attempted send on invalid socket\n"); nbd_config_put(nbd); - blk_mq_start_request(req); return -EINVAL; } cmd->status = BLK_STS_OK; @@ -851,7 +978,6 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) */ sock_shutdown(nbd); nbd_config_put(nbd); - blk_mq_start_request(req); return -EIO; } goto again; @@ -873,7 +999,13 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) * returns EAGAIN can be retried on a different socket. */ ret = nbd_send_cmd(nbd, cmd, index); - if (ret == -EAGAIN) { + /* + * Access to this flag is protected by cmd->lock, thus it's safe to set + * the flag after nbd_send_cmd() succeed to send request to server. + */ + if (!ret) + __set_bit(NBD_CMD_INFLIGHT, &cmd->flags); + else if (ret == -EAGAIN) { dev_err_ratelimited(disk_to_dev(nbd->disk), "Request send failed, requeueing\n"); nbd_mark_nsock_dead(nbd, nsock, 1); @@ -919,6 +1051,26 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx, return ret; } +static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd, + int *err) +{ + struct socket *sock; + + *err = 0; + sock = sockfd_lookup(fd, err); + if (!sock) + return NULL; + + if (sock->ops->shutdown == sock_no_shutdown) { + dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n"); + *err = -EINVAL; + sockfd_put(sock); + return NULL; + } + + return sock; +} + static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, bool netlink) { @@ -928,33 +1080,44 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, struct nbd_sock *nsock; int err; - sock = sockfd_lookup(arg, &err); + /* Arg will be cast to int, check it to avoid overflow */ + if (arg > INT_MAX) + return -EINVAL; + sock = nbd_get_socket(nbd, arg, &err); if (!sock) return err; + /* + * We need to make sure we don't get any errant requests while we're + * reallocating the ->socks array. + */ + blk_mq_freeze_queue(nbd->disk->queue); + if (!netlink && !nbd->task_setup && - !test_bit(NBD_BOUND, &config->runtime_flags)) + !test_bit(NBD_RT_BOUND, &config->runtime_flags)) nbd->task_setup = current; if (!netlink && (nbd->task_setup != current || - test_bit(NBD_BOUND, &config->runtime_flags))) { + test_bit(NBD_RT_BOUND, &config->runtime_flags))) { dev_err(disk_to_dev(nbd->disk), "Device being setup by another task"); - sockfd_put(sock); - return -EBUSY; + err = -EBUSY; + goto put_socket; + } + + nsock = kzalloc(sizeof(*nsock), GFP_KERNEL); + if (!nsock) { + err = -ENOMEM; + goto put_socket; } socks = krealloc(config->socks, (config->num_connections + 1) * sizeof(struct nbd_sock *), GFP_KERNEL); if (!socks) { - sockfd_put(sock); - return -ENOMEM; - } - nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL); - if (!nsock) { - sockfd_put(sock); - return -ENOMEM; + kfree(nsock); + err = -ENOMEM; + goto put_socket; } config->socks = socks; @@ -968,8 +1131,14 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, nsock->cookie = 0; socks[config->num_connections++] = nsock; atomic_inc(&config->live_connections); + blk_mq_unfreeze_queue(nbd->disk->queue); return 0; + +put_socket: + blk_mq_unfreeze_queue(nbd->disk->queue); + sockfd_put(sock); + return err; } static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg) @@ -980,7 +1149,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg) int i; int err; - sock = sockfd_lookup(arg, &err); + sock = nbd_get_socket(nbd, arg, &err); if (!sock) return err; @@ -1013,16 +1182,17 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg) INIT_WORK(&args->work, recv_work); args->index = i; args->nbd = nbd; + args->nsock = nsock; nsock->cookie++; mutex_unlock(&nsock->tx_lock); sockfd_put(old); - clear_bit(NBD_DISCONNECTED, &config->runtime_flags); + clear_bit(NBD_RT_DISCONNECTED, &config->runtime_flags); /* We take the tx_mutex in an error path in the recv_work, so we * need to queue_work outside of the tx_mutex. */ - queue_work(recv_workqueue, &args->work); + queue_work(nbd->recv_workq, &args->work); atomic_inc(&config->live_connections); wake_up(&config->conn_wait); @@ -1076,7 +1246,7 @@ static void send_disconnects(struct nbd_device *nbd) iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); mutex_lock(&nsock->tx_lock); ret = sock_xmit(nbd, i, 1, &from, 0, NULL); - if (ret <= 0) + if (ret < 0) dev_err(disk_to_dev(nbd->disk), "Send disconnect failed %d\n", ret); mutex_unlock(&nsock->tx_lock); @@ -1088,7 +1258,8 @@ static int nbd_disconnect(struct nbd_device *nbd) struct nbd_config *config = nbd->config; dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n"); - set_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags); + set_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags); + set_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags); send_disconnects(nbd); return 0; } @@ -1107,10 +1278,10 @@ static void nbd_config_put(struct nbd_device *nbd) struct nbd_config *config = nbd->config; nbd_dev_dbg_close(nbd); nbd_size_clear(nbd); - if (test_and_clear_bit(NBD_HAS_PID_FILE, + if (test_and_clear_bit(NBD_RT_HAS_PID_FILE, &config->runtime_flags)) device_remove_file(disk_to_dev(nbd->disk), &pid_attr); - nbd->task_recv = NULL; + nbd->pid = 0; nbd_clear_sock(nbd); if (config->num_connections) { int i; @@ -1123,6 +1294,10 @@ static void nbd_config_put(struct nbd_device *nbd) kfree(nbd->config); nbd->config = NULL; + if (nbd->recv_workq) + destroy_workqueue(nbd->recv_workq); + nbd->recv_workq = NULL; + nbd->tag_set.timeout = 0; nbd->disk->queue->limits.discard_granularity = 0; nbd->disk->queue->limits.discard_alignment = 0; @@ -1141,7 +1316,7 @@ static int nbd_start_device(struct nbd_device *nbd) int num_connections = config->num_connections; int error = 0, i; - if (nbd->task_recv) + if (nbd->pid) return -EBUSY; if (!config->socks) return -EINVAL; @@ -1151,8 +1326,16 @@ static int nbd_start_device(struct nbd_device *nbd) return -EINVAL; } + nbd->recv_workq = alloc_workqueue("knbd%d-recv", + WQ_MEM_RECLAIM | WQ_HIGHPRI | + WQ_UNBOUND, 0, nbd->index); + if (!nbd->recv_workq) { + dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n"); + return -ENOMEM; + } + blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections); - nbd->task_recv = current; + nbd->pid = task_pid_nr(current); nbd_parse_flags(nbd); @@ -1161,7 +1344,7 @@ static int nbd_start_device(struct nbd_device *nbd) dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n"); return error; } - set_bit(NBD_HAS_PID_FILE, &config->runtime_flags); + set_bit(NBD_RT_HAS_PID_FILE, &config->runtime_flags); nbd_dev_dbg_init(nbd); for (i = 0; i < num_connections; i++) { @@ -1170,6 +1353,16 @@ static int nbd_start_device(struct nbd_device *nbd) args = kzalloc(sizeof(*args), GFP_KERNEL); if (!args) { sock_shutdown(nbd); + /* + * If num_connections is m (2 < m), + * and NO.1 ~ NO.n(1 < n < m) kzallocs are successful. + * But NO.(n + 1) failed. We still have n recv threads. + * So, add flush_workqueue here to prevent recv threads + * dropping the last config_refs and trying to destroy + * the workqueue from inside the workqueue. + */ + if (i) + flush_workqueue(nbd->recv_workq); return -ENOMEM; } sk_set_memalloc(config->socks[i]->sock->sk); @@ -1180,11 +1373,11 @@ static int nbd_start_device(struct nbd_device *nbd) refcount_inc(&nbd->config_refs); INIT_WORK(&args->work, recv_work); args->nbd = nbd; + args->nsock = config->socks[i]; args->index = i; - queue_work(recv_workqueue, &args->work); + queue_work(nbd->recv_workq, &args->work); } - nbd_size_update(nbd); - return error; + return nbd_set_size(nbd, config->bytesize, config->blksize); } static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev) @@ -1201,14 +1394,21 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b mutex_unlock(&nbd->config_lock); ret = wait_event_interruptible(config->recv_wq, atomic_read(&config->recv_threads) == 0); + + /* + * recv_work in flush_workqueue will not get this lock, because nbd_open + * will hold nbd->config_refs + */ + mutex_lock(&nbd->config_lock); if (ret) sock_shutdown(nbd); - mutex_lock(&nbd->config_lock); + flush_workqueue(nbd->recv_workq); + nbd_bdev_reset(bdev); /* user requested, ignore socket errors */ - if (test_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags)) + if (test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags)) ret = 0; - if (test_bit(NBD_TIMEDOUT, &config->runtime_flags)) + if (test_bit(NBD_RT_TIMEDOUT, &config->runtime_flags)) ret = -ETIMEDOUT; return ret; } @@ -1216,10 +1416,10 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b static void nbd_clear_sock_ioctl(struct nbd_device *nbd, struct block_device *bdev) { - sock_shutdown(nbd); - kill_bdev(bdev); + nbd_clear_sock(nbd); + __invalidate_device(bdev, true); nbd_bdev_reset(bdev); - if (test_and_clear_bit(NBD_HAS_CONFIG_REF, + if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF, &nbd->config->runtime_flags)) nbd_config_put(nbd); } @@ -1229,6 +1429,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, unsigned int cmd, unsigned long arg) { struct nbd_config *config = nbd->config; + loff_t bytesize; switch (cmd) { case NBD_DISCONNECT: @@ -1239,19 +1440,13 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, case NBD_SET_SOCK: return nbd_add_socket(nbd, arg, false); case NBD_SET_BLKSIZE: - if (!arg || !is_power_of_2(arg) || arg < 512 || - arg > PAGE_SIZE) - return -EINVAL; - nbd_size_set(nbd, arg, - div_s64(config->bytesize, arg)); - return 0; + return nbd_set_size(nbd, config->bytesize, arg); case NBD_SET_SIZE: - nbd_size_set(nbd, config->blksize, - div_s64(arg, config->blksize)); - return 0; + return nbd_set_size(nbd, arg, config->blksize); case NBD_SET_SIZE_BLOCKS: - nbd_size_set(nbd, config->blksize, arg); - return 0; + if (check_mul_overflow((loff_t)arg, config->blksize, &bytesize)) + return -EINVAL; + return nbd_set_size(nbd, bytesize, config->blksize); case NBD_SET_TIMEOUT: if (arg) { nbd->tag_set.timeout = arg * HZ; @@ -1301,7 +1496,7 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode, /* Don't allow ioctl operations on a nbd device that was created with * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine. */ - if (!test_bit(NBD_BOUND, &config->runtime_flags) || + if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) || (cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK)) error = __nbd_ioctl(bdev, nbd, cmd, arg); else @@ -1310,25 +1505,45 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode, return error; } -static struct nbd_config *nbd_alloc_config(void) +static int nbd_alloc_and_init_config(struct nbd_device *nbd) { struct nbd_config *config; + if (WARN_ON(nbd->config)) + return -EINVAL; + + if (!try_module_get(THIS_MODULE)) + return -ENODEV; + config = kzalloc(sizeof(struct nbd_config), GFP_NOFS); - if (!config) - return NULL; + if (!config) { + module_put(THIS_MODULE); + return -ENOMEM; + } + atomic_set(&config->recv_threads, 0); init_waitqueue_head(&config->recv_wq); init_waitqueue_head(&config->conn_wait); - config->blksize = 1024; + config->blksize = NBD_DEF_BLKSIZE; atomic_set(&config->live_connections, 0); - try_module_get(THIS_MODULE); - return config; + + nbd->config = config; + /* + * Order refcount_set(&nbd->config_refs, 1) and nbd->config assignment, + * its pair is the barrier in nbd_get_config_unlocked(). + * So nbd_get_config_unlocked() won't see nbd->config as null after + * refcount_inc_not_zero() succeed. + */ + smp_mb__before_atomic(); + refcount_set(&nbd->config_refs, 1); + + return 0; } static int nbd_open(struct block_device *bdev, fmode_t mode) { struct nbd_device *nbd; + struct nbd_config *config; int ret = 0; mutex_lock(&nbd_index_mutex); @@ -1341,25 +1556,24 @@ static int nbd_open(struct block_device *bdev, fmode_t mode) ret = -ENXIO; goto out; } - if (!refcount_inc_not_zero(&nbd->config_refs)) { - struct nbd_config *config; + config = nbd_get_config_unlocked(nbd); + if (!config) { mutex_lock(&nbd->config_lock); if (refcount_inc_not_zero(&nbd->config_refs)) { mutex_unlock(&nbd->config_lock); goto out; } - config = nbd->config = nbd_alloc_config(); - if (!config) { - ret = -ENOMEM; + ret = nbd_alloc_and_init_config(nbd); + if (ret) { mutex_unlock(&nbd->config_lock); goto out; } - refcount_set(&nbd->config_refs, 1); + refcount_inc(&nbd->refs); mutex_unlock(&nbd->config_lock); bdev->bd_invalidated = 1; - } else if (nbd_disconnected(nbd->config)) { + } else if (nbd_disconnected(config)) { bdev->bd_invalidated = 1; } out: @@ -1372,9 +1586,10 @@ static void nbd_release(struct gendisk *disk, fmode_t mode) struct nbd_device *nbd = disk->private_data; struct block_device *bdev = bdget_disk(disk, 0); - if (test_bit(NBD_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) && + if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) && bdev->bd_openers == 0) nbd_disconnect_and_put(nbd); + bdput(bdev); nbd_config_put(nbd); nbd_put(nbd); @@ -1395,8 +1610,8 @@ static int nbd_dbg_tasks_show(struct seq_file *s, void *unused) { struct nbd_device *nbd = s->private; - if (nbd->task_recv) - seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv)); + if (nbd->pid) + seq_printf(s, "recv: %d\n", nbd->pid); return 0; } @@ -1556,7 +1771,8 @@ static int nbd_dev_add(int index) if (err == -ENOSPC) err = -EEXIST; } else { - err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL); + err = idr_alloc(&nbd_index_idr, nbd, 0, + (MINORMASK >> part_shift) + 1, GFP_KERNEL); if (err >= 0) index = err; } @@ -1573,6 +1789,7 @@ static int nbd_dev_add(int index) nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING; nbd->tag_set.driver_data = nbd; + nbd->destroy_complete = NULL; err = blk_mq_alloc_tag_set(&nbd->tag_set); if (err) @@ -1663,6 +1880,7 @@ nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = { static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info) { + DECLARE_COMPLETION_ONSTACK(destroy_complete); struct nbd_device *nbd = NULL; struct nbd_config *config; int index = -1; @@ -1672,8 +1890,20 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info) if (!netlink_capable(skb, CAP_SYS_ADMIN)) return -EPERM; - if (info->attrs[NBD_ATTR_INDEX]) + if (info->attrs[NBD_ATTR_INDEX]) { index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]); + + /* + * Too big first_minor can cause duplicate creation of + * sysfs files/links, since index << part_shift might + * overflow, or MKDEV() expect that the max bits of + * first_minor is 20. + */ + if (index < 0 || index > MINORMASK >> part_shift) { + printk(KERN_ERR "nbd: illegal input index %d\n", index); + return -EINVAL; + } + } if (!info->attrs[NBD_ATTR_SOCKETS]) { printk(KERN_ERR "nbd: must specify at least one socket\n"); return -EINVAL; @@ -1714,6 +1944,17 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info) mutex_unlock(&nbd_index_mutex); return -EINVAL; } + + if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) && + test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) { + nbd->destroy_complete = &destroy_complete; + mutex_unlock(&nbd_index_mutex); + + /* Wait untill the the nbd stuff is totally destroyed */ + wait_for_completion(&destroy_complete); + goto again; + } + if (!refcount_inc_not_zero(&nbd->refs)) { mutex_unlock(&nbd_index_mutex); if (index == -1) @@ -1733,30 +1974,29 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info) printk(KERN_ERR "nbd: nbd%d already in use\n", index); return -EBUSY; } - if (WARN_ON(nbd->config)) { - mutex_unlock(&nbd->config_lock); - nbd_put(nbd); - return -EINVAL; - } - config = nbd->config = nbd_alloc_config(); - if (!nbd->config) { + + ret = nbd_alloc_and_init_config(nbd); + if (ret) { mutex_unlock(&nbd->config_lock); nbd_put(nbd); printk(KERN_ERR "nbd: couldn't allocate config\n"); - return -ENOMEM; + return ret; } - refcount_set(&nbd->config_refs, 1); - set_bit(NBD_BOUND, &config->runtime_flags); + config = nbd->config; + set_bit(NBD_RT_BOUND, &config->runtime_flags); if (info->attrs[NBD_ATTR_SIZE_BYTES]) { u64 bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]); - nbd_size_set(nbd, config->blksize, - div64_u64(bytes, config->blksize)); + ret = nbd_set_size(nbd, bytes, config->blksize); + if (ret) + goto out; } if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) { u64 bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]); - nbd_size_set(nbd, bsize, div64_u64(config->bytesize, bsize)); + ret = nbd_set_size(nbd, config->bytesize, bsize); + if (ret) + goto out; } if (info->attrs[NBD_ATTR_TIMEOUT]) { u64 timeout = nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]); @@ -1774,12 +2014,24 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info) if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) { u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]); if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) { - set_bit(NBD_DESTROY_ON_DISCONNECT, - &config->runtime_flags); - put_dev = true; + /* + * We have 1 ref to keep the device around, and then 1 + * ref for our current operation here, which will be + * inherited by the config. If we already have + * DESTROY_ON_DISCONNECT set then we know we don't have + * that extra ref already held so we don't need the + * put_dev. + */ + if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT, + &nbd->flags)) + put_dev = true; + } else { + if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT, + &nbd->flags)) + refcount_inc(&nbd->refs); } if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) { - set_bit(NBD_DISCONNECT_ON_CLOSE, + set_bit(NBD_RT_DISCONNECT_ON_CLOSE, &config->runtime_flags); } } @@ -1816,7 +2068,7 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info) out: mutex_unlock(&nbd->config_lock); if (!ret) { - set_bit(NBD_HAS_CONFIG_REF, &config->runtime_flags); + set_bit(NBD_RT_HAS_CONFIG_REF, &config->runtime_flags); refcount_inc(&nbd->config_refs); nbd_connect_reply(info, nbd->index); } @@ -1831,8 +2083,16 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd) mutex_lock(&nbd->config_lock); nbd_disconnect(nbd); nbd_clear_sock(nbd); + clear_bit(NBD_RT_BOUND, &nbd->config->runtime_flags); mutex_unlock(&nbd->config_lock); - if (test_and_clear_bit(NBD_HAS_CONFIG_REF, + /* + * Make sure recv thread has finished, so it does not drop the last + * config ref and try to destroy the workqueue from inside the work + * queue. + */ + if (nbd->recv_workq) + flush_workqueue(nbd->recv_workq); + if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF, &nbd->config->runtime_flags)) nbd_config_put(nbd); } @@ -1907,7 +2167,8 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info) } mutex_unlock(&nbd_index_mutex); - if (!refcount_inc_not_zero(&nbd->config_refs)) { + config = nbd_get_config_unlocked(nbd); + if (!config) { dev_err(nbd_to_dev(nbd), "not configured, cannot reconfigure\n"); nbd_put(nbd); @@ -1915,9 +2176,8 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info) } mutex_lock(&nbd->config_lock); - config = nbd->config; - if (!test_bit(NBD_BOUND, &config->runtime_flags) || - !nbd->task_recv) { + if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) || + !nbd->pid) { dev_err(nbd_to_dev(nbd), "not configured, cannot reconfigure\n"); ret = -EINVAL; @@ -1938,19 +2198,19 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info) u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]); if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) { if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT, - &config->runtime_flags)) + &nbd->flags)) put_dev = true; } else { if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT, - &config->runtime_flags)) + &nbd->flags)) refcount_inc(&nbd->refs); } if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) { - set_bit(NBD_DISCONNECT_ON_CLOSE, + set_bit(NBD_RT_DISCONNECT_ON_CLOSE, &config->runtime_flags); } else { - clear_bit(NBD_DISCONNECT_ON_CLOSE, + clear_bit(NBD_RT_DISCONNECT_ON_CLOSE, &config->runtime_flags); } } @@ -2099,6 +2359,12 @@ static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info) } dev_list = nla_nest_start(reply, NBD_ATTR_DEVICE_LIST); + if (!dev_list) { + nlmsg_free(reply); + ret = -EMSGSIZE; + goto out; + } + if (index == -1) { ret = idr_for_each(&nbd_index_idr, &status_cb, reply); if (ret) { @@ -2212,20 +2478,12 @@ static int __init nbd_init(void) if (nbds_max > 1UL << (MINORBITS - part_shift)) return -EINVAL; - recv_workqueue = alloc_workqueue("knbd-recv", - WQ_MEM_RECLAIM | WQ_HIGHPRI | - WQ_UNBOUND, 0); - if (!recv_workqueue) - return -ENOMEM; - if (register_blkdev(NBD_MAJOR, "nbd")) { - destroy_workqueue(recv_workqueue); + if (register_blkdev(NBD_MAJOR, "nbd")) return -EIO; - } if (genl_register_family(&nbd_genl_family)) { unregister_blkdev(NBD_MAJOR, "nbd"); - destroy_workqueue(recv_workqueue); return -EINVAL; } nbd_dbg_init(); @@ -2251,6 +2509,12 @@ static void __exit nbd_cleanup(void) struct nbd_device *nbd; LIST_HEAD(del_list); + /* + * Unregister netlink interface prior to waiting + * for the completion of netlink commands. + */ + genl_unregister_family(&nbd_genl_family); + nbd_dbg_close(); mutex_lock(&nbd_index_mutex); @@ -2260,14 +2524,15 @@ static void __exit nbd_cleanup(void) while (!list_empty(&del_list)) { nbd = list_first_entry(&del_list, struct nbd_device, list); list_del_init(&nbd->list); + if (refcount_read(&nbd->config_refs)) + printk(KERN_ERR "nbd: possibly leaking nbd_config (ref %d)\n", + refcount_read(&nbd->config_refs)); if (refcount_read(&nbd->refs) != 1) printk(KERN_ERR "nbd: possibly leaking a device\n"); nbd_put(nbd); } idr_destroy(&nbd_index_idr); - genl_unregister_family(&nbd_genl_family); - destroy_workqueue(recv_workqueue); unregister_blkdev(NBD_MAJOR, "nbd"); } diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c index 093b614d652445a337db00ea8beaded767073415..c5c0b7c89481555074bda218c8a545a2539d1ee4 100644 --- a/drivers/block/null_blk_main.c +++ b/drivers/block/null_blk_main.c @@ -321,11 +321,12 @@ static ssize_t nullb_device_power_store(struct config_item *item, set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags); dev->power = newp; } else if (dev->power && !newp) { - mutex_lock(&lock); - dev->power = newp; - null_del_dev(dev->nullb); - mutex_unlock(&lock); - clear_bit(NULLB_DEV_FL_UP, &dev->flags); + if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) { + mutex_lock(&lock); + dev->power = newp; + null_del_dev(dev->nullb); + mutex_unlock(&lock); + } clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags); } diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c index 7c6b86d987002b73251a9725d71af5b281987c9f..f64e9122824b4265a2d107804f2eb99f0b31156c 100644 --- a/drivers/block/null_blk_zoned.c +++ b/drivers/block/null_blk_zoned.c @@ -1,9 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 #include +#include #include "null_blk.h" -/* zone_size in MBs to sectors. */ -#define ZONE_SIZE_SHIFT 11 +#define MB_TO_SECTS(mb) (((sector_t)mb * SZ_1M) >> SECTOR_SHIFT) static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect) { @@ -12,7 +12,7 @@ static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect) int null_zone_init(struct nullb_device *dev) { - sector_t dev_size = (sector_t)dev->size * 1024 * 1024; + sector_t dev_capacity_sects; sector_t sector = 0; unsigned int i; @@ -21,9 +21,12 @@ int null_zone_init(struct nullb_device *dev) return -EINVAL; } - dev->zone_size_sects = dev->zone_size << ZONE_SIZE_SHIFT; - dev->nr_zones = dev_size >> - (SECTOR_SHIFT + ilog2(dev->zone_size_sects)); + dev_capacity_sects = MB_TO_SECTS(dev->size); + dev->zone_size_sects = MB_TO_SECTS(dev->zone_size); + dev->nr_zones = dev_capacity_sects >> ilog2(dev->zone_size_sects); + if (dev_capacity_sects & (dev->zone_size_sects - 1)) + dev->nr_zones++; + dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct blk_zone), GFP_KERNEL | __GFP_ZERO); if (!dev->zones) @@ -33,7 +36,10 @@ int null_zone_init(struct nullb_device *dev) struct blk_zone *zone = &dev->zones[i]; zone->start = zone->wp = sector; - zone->len = dev->zone_size_sects; + if (zone->start + dev->zone_size_sects > dev_capacity_sects) + zone->len = dev_capacity_sects - zone->start; + else + zone->len = dev->zone_size_sects; zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ; zone->cond = BLK_ZONE_COND_EMPTY; @@ -46,6 +52,7 @@ int null_zone_init(struct nullb_device *dev) void null_zone_exit(struct nullb_device *dev) { kvfree(dev->zones); + dev->zones = NULL; } static void null_zone_fill_bio(struct nullb_device *dev, struct bio *bio, diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 6f1d25c1eb640b8a0cffe94c3bd7e0cfa0ea691b..0bc344d22f0135c68e66a36311d8cb35085d7c10 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -2596,7 +2596,6 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) if (ret) return ret; if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) { - WARN_ONCE(true, "Attempt to register a non-SCSI queue\n"); blkdev_put(bdev, FMODE_READ | FMODE_NDELAY); return -EINVAL; } diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 73ed5f3a862dfcde598227671d55c2491d33408b..48fb843d929a1005b6372cdb2616e57e582bc002 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4119,6 +4119,9 @@ static ssize_t rbd_config_info_show(struct device *dev, { struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + return sprintf(buf, "%s\n", rbd_dev->config_info); } @@ -4230,6 +4233,9 @@ static ssize_t rbd_image_refresh(struct device *dev, struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); int ret; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + ret = rbd_dev_refresh(rbd_dev); if (ret) return ret; @@ -5833,6 +5839,9 @@ static ssize_t do_rbd_add(struct bus_type *bus, struct rbd_client *rbdc; int rc; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (!try_module_get(THIS_MODULE)) return -ENODEV; @@ -5982,10 +5991,12 @@ static ssize_t do_rbd_remove(struct bus_type *bus, struct list_head *tmp; int dev_id; char opt_buf[6]; - bool already = false; bool force = false; int ret; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + dev_id = -1; opt_buf[0] = '\0'; sscanf(buf, "%d %5s", &dev_id, opt_buf); @@ -6015,13 +6026,13 @@ static ssize_t do_rbd_remove(struct bus_type *bus, spin_lock_irq(&rbd_dev->lock); if (rbd_dev->open_count && !force) ret = -EBUSY; - else - already = test_and_set_bit(RBD_DEV_FLAG_REMOVING, - &rbd_dev->flags); + else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING, + &rbd_dev->flags)) + ret = -EINPROGRESS; spin_unlock_irq(&rbd_dev->lock); } spin_unlock(&rbd_dev_list_lock); - if (ret < 0 || already) + if (ret) return ret; if (force) { diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c index f2c631ce793cc8a342b44381592824bf902282f7..14056dc450642941a12d142ce899ea9c627a5d8e 100644 --- a/drivers/block/rsxx/core.c +++ b/drivers/block/rsxx/core.c @@ -1014,8 +1014,10 @@ static void rsxx_pci_remove(struct pci_dev *dev) cancel_work_sync(&card->event_work); + destroy_workqueue(card->event_wq); rsxx_destroy_dev(card); rsxx_dma_destroy(card); + destroy_workqueue(card->creg_ctrl.creg_wq); spin_lock_irqsave(&card->irq_lock, flags); rsxx_disable_ier_and_isr(card, CR_INTR_ALL); diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index 87b9e7fbf0621af826e2b67cc9e518a60a5c6724..27323fa23997da763430533bfa799890483eeda4 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c @@ -1416,7 +1416,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev, case SKD_CHECK_STATUS_BUSY_IMMINENT: skd_log_skreq(skdev, skreq, "retry(busy)"); - blk_requeue_request(skdev->queue, req); + blk_mq_requeue_request(req, true); dev_info(&skdev->pdev->dev, "drive BUSY imminent\n"); skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT; skdev->timer_countdown = SKD_TIMER_MINUTES(20); @@ -1426,7 +1426,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev, case SKD_CHECK_STATUS_REQUEUE_REQUEST: if ((unsigned long) ++req->special < SKD_MAX_RETRIES) { skd_log_skreq(skdev, skreq, "retry"); - blk_requeue_request(skdev->queue, req); + blk_mq_requeue_request(req, true); break; } /* fall through */ diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index f68e9baffad764a72e31fdb652b9707dbaf8e46e..5d7024057540aa154854686c3b48337d4226db5b 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c @@ -45,6 +45,8 @@ MODULE_VERSION(DRV_MODULE_VERSION); #define WAITING_FOR_GEN_CMD 0x04 #define WAITING_FOR_ANY -1 +#define VDC_MAX_RETRIES 10 + static struct workqueue_struct *sunvdc_wq; struct vdc_req_entry { @@ -431,6 +433,7 @@ static int __vdc_tx_trigger(struct vdc_port *port) .end_idx = dr->prod, }; int err, delay; + int retries = 0; hdr.seq = dr->snd_nxt; delay = 1; @@ -443,6 +446,8 @@ static int __vdc_tx_trigger(struct vdc_port *port) udelay(delay); if ((delay <<= 1) > 128) delay = 128; + if (retries++ > VDC_MAX_RETRIES) + break; } while (err == -EAGAIN); if (err == -ENOTCONN) diff --git a/drivers/block/swim.c b/drivers/block/swim.c index 0e31884a9519614398c1f1f6b048934a09ac5906..cbe909c51847db4fb6732980624b209855e6442e 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c @@ -887,8 +887,17 @@ static int swim_floppy_init(struct swim_priv *swd) exit_put_disks: unregister_blkdev(FLOPPY_MAJOR, "fd"); - while (drive--) - put_disk(swd->unit[drive].disk); + do { + struct gendisk *disk = swd->unit[drive].disk; + + if (disk) { + if (disk->queue) { + blk_cleanup_queue(disk->queue); + disk->queue = NULL; + } + put_disk(disk); + } + } while (drive--); return err; } diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index 469541c1e51eed13b589f47b295f0e916ade4170..20907a0a043b5eea2e03b4fcb0b6ac02cd4fa299 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c @@ -1026,7 +1026,11 @@ static void floppy_release(struct gendisk *disk, fmode_t mode) struct swim3 __iomem *sw = fs->swim3; mutex_lock(&swim3_mutex); - if (fs->ref_count > 0 && --fs->ref_count == 0) { + if (fs->ref_count > 0) + --fs->ref_count; + else if (fs->ref_count == -1) + fs->ref_count = 0; + if (fs->ref_count == 0) { swim3_action(fs, MOTOR_OFF); out_8(&sw->control_bic, 0xff); swim3_select(fs, RELAX); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 23752dc99b008c4b2075ee03860627b729ea54ba..8ec6b00138265de6c1bcd64117991889339cdc46 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -18,6 +18,7 @@ #define PART_BITS 4 #define VQ_NAME_LEN 16 +#define MAX_DISCARD_SEGMENTS 256u static int major; static DEFINE_IDA(vd_index_ida); @@ -31,6 +32,15 @@ struct virtio_blk_vq { } ____cacheline_aligned_in_smp; struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; struct virtio_device *vdev; /* The disk structure for the kernel. */ @@ -42,6 +52,13 @@ struct virtio_blk { /* Process context for config space updates */ struct work_struct config_work; + /* + * Tracks references from block_device_operations open/release and + * virtio_driver probe/remove so this object can be freed once no + * longer in use. + */ + refcount_t refs; + /* What host tells us, plus 2 for header & tailer. */ unsigned int sg_elems; @@ -172,10 +189,63 @@ static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr, return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC); } +static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap) +{ + unsigned short segments = blk_rq_nr_discard_segments(req); + unsigned short n = 0; + struct virtio_blk_discard_write_zeroes *range; + struct bio *bio; + u32 flags = 0; + + if (unmap) + flags |= VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP; + + range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC); + if (!range) + return -ENOMEM; + + /* + * Single max discard segment means multi-range discard isn't + * supported, and block layer only runs contiguity merge like + * normal RW request. So we can't reply on bio for retrieving + * each range info. + */ + if (queue_max_discard_segments(req->q) == 1) { + range[0].flags = cpu_to_le32(flags); + range[0].num_sectors = cpu_to_le32(blk_rq_sectors(req)); + range[0].sector = cpu_to_le64(blk_rq_pos(req)); + n = 1; + } else { + __rq_for_each_bio(bio, req) { + u64 sector = bio->bi_iter.bi_sector; + u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT; + + range[n].flags = cpu_to_le32(flags); + range[n].num_sectors = cpu_to_le32(num_sectors); + range[n].sector = cpu_to_le64(sector); + n++; + } + } + + WARN_ON_ONCE(n != segments); + + req->special_vec.bv_page = virt_to_page(range); + req->special_vec.bv_offset = offset_in_page(range); + req->special_vec.bv_len = sizeof(*range) * segments; + req->rq_flags |= RQF_SPECIAL_PAYLOAD; + + return 0; +} + static inline void virtblk_request_done(struct request *req) { struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); + if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { + kfree(page_address(req->special_vec.bv_page) + + req->special_vec.bv_offset); + } + switch (req_op(req)) { case REQ_OP_SCSI_IN: case REQ_OP_SCSI_OUT: @@ -225,6 +295,7 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, int qid = hctx->queue_num; int err; bool notify = false; + bool unmap = false; u32 type; BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); @@ -237,6 +308,13 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, case REQ_OP_FLUSH: type = VIRTIO_BLK_T_FLUSH; break; + case REQ_OP_DISCARD: + type = VIRTIO_BLK_T_DISCARD; + break; + case REQ_OP_WRITE_ZEROES: + type = VIRTIO_BLK_T_WRITE_ZEROES; + unmap = !(req->cmd_flags & REQ_NOUNMAP); + break; case REQ_OP_SCSI_IN: case REQ_OP_SCSI_OUT: type = VIRTIO_BLK_T_SCSI_CMD; @@ -256,6 +334,12 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, blk_mq_start_request(req); + if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES) { + err = virtblk_setup_discard_write_zeroes(req, unmap); + if (err) + return BLK_STS_RESOURCE; + } + num = blk_rq_map_sg(hctx->queue, req, vbr->sg); if (num) { if (rq_data_dir(req) == WRITE) @@ -271,10 +355,12 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num); if (err) { virtqueue_kick(vblk->vqs[qid].vq); - blk_mq_stop_hw_queue(hctx); + /* Don't stop the queue if -ENOMEM: we may have failed to + * bounce the buffer due to global resource outage. + */ + if (err == -ENOSPC) + blk_mq_stop_hw_queue(hctx); spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); - /* Out of mem doesn't actually happen, since we fall back - * to direct descriptors */ if (err == -ENOMEM || err == -ENOSPC) return BLK_STS_DEV_RESOURCE; return BLK_STS_IOERR; @@ -313,10 +399,55 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str) return err; } +static void virtblk_get(struct virtio_blk *vblk) +{ + refcount_inc(&vblk->refs); +} + +static void virtblk_put(struct virtio_blk *vblk) +{ + if (refcount_dec_and_test(&vblk->refs)) { + ida_simple_remove(&vd_index_ida, vblk->index); + mutex_destroy(&vblk->vdev_mutex); + kfree(vblk); + } +} + +static int virtblk_open(struct block_device *bd, fmode_t mode) +{ + struct virtio_blk *vblk = bd->bd_disk->private_data; + int ret = 0; + + mutex_lock(&vblk->vdev_mutex); + + if (vblk->vdev) + virtblk_get(vblk); + else + ret = -ENXIO; + + mutex_unlock(&vblk->vdev_mutex); + return ret; +} + +static void virtblk_release(struct gendisk *disk, fmode_t mode) +{ + struct virtio_blk *vblk = disk->private_data; + + virtblk_put(vblk); +} + /* We provide getgeo only to please some old bootloader/partitioning tools */ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) { struct virtio_blk *vblk = bd->bd_disk->private_data; + int ret = 0; + + mutex_lock(&vblk->vdev_mutex); + + if (!vblk->vdev) { + ret = -ENXIO; + goto out; + } /* see if the host passed in geometry config */ if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) { @@ -332,12 +463,16 @@ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) geo->sectors = 1 << 5; geo->cylinders = get_capacity(bd->bd_disk) >> 11; } - return 0; +out: + mutex_unlock(&vblk->vdev_mutex); + return ret; } static const struct block_device_operations virtblk_fops = { .ioctl = virtblk_ioctl, .owner = THIS_MODULE, + .open = virtblk_open, + .release = virtblk_release, .getgeo = virtblk_getgeo, }; @@ -351,8 +486,8 @@ static int minor_to_index(int minor) return minor >> PART_BITS; } -static ssize_t virtblk_serial_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t serial_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct gendisk *disk = dev_to_disk(dev); int err; @@ -371,7 +506,7 @@ static ssize_t virtblk_serial_show(struct device *dev, return err; } -static DEVICE_ATTR(serial, 0444, virtblk_serial_show, NULL); +static DEVICE_ATTR_RO(serial); /* The queue's logical block size must be set before calling this */ static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize) @@ -446,6 +581,12 @@ static int init_vq(struct virtio_blk *vblk) if (err) num_vqs = 1; + num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs); + if (!err && !num_vqs) { + dev_err(&vdev->dev, "MQ advertisted but zero queues reported\n"); + return -EINVAL; + } + vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL); if (!vblk->vqs) return -ENOMEM; @@ -545,8 +686,8 @@ static const char *const virtblk_cache_types[] = { }; static ssize_t -virtblk_cache_type_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +cache_type_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct gendisk *disk = dev_to_disk(dev); struct virtio_blk *vblk = disk->private_data; @@ -564,8 +705,7 @@ virtblk_cache_type_store(struct device *dev, struct device_attribute *attr, } static ssize_t -virtblk_cache_type_show(struct device *dev, struct device_attribute *attr, - char *buf) +cache_type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gendisk *disk = dev_to_disk(dev); struct virtio_blk *vblk = disk->private_data; @@ -575,12 +715,38 @@ virtblk_cache_type_show(struct device *dev, struct device_attribute *attr, return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]); } -static const struct device_attribute dev_attr_cache_type_ro = - __ATTR(cache_type, 0444, - virtblk_cache_type_show, NULL); -static const struct device_attribute dev_attr_cache_type_rw = - __ATTR(cache_type, 0644, - virtblk_cache_type_show, virtblk_cache_type_store); +static DEVICE_ATTR_RW(cache_type); + +static struct attribute *virtblk_attrs[] = { + &dev_attr_serial.attr, + &dev_attr_cache_type.attr, + NULL, +}; + +static umode_t virtblk_attrs_are_visible(struct kobject *kobj, + struct attribute *a, int n) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct gendisk *disk = dev_to_disk(dev); + struct virtio_blk *vblk = disk->private_data; + struct virtio_device *vdev = vblk->vdev; + + if (a == &dev_attr_cache_type.attr && + !virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE)) + return S_IRUGO; + + return a->mode; +} + +static const struct attribute_group virtblk_attr_group = { + .attrs = virtblk_attrs, + .is_visible = virtblk_attrs_are_visible, +}; + +static const struct attribute_group *virtblk_attr_groups[] = { + &virtblk_attr_group, + NULL, +}; static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx, unsigned int numa_node) @@ -663,6 +829,10 @@ static int virtblk_probe(struct virtio_device *vdev) goto out_free_index; } + /* This reference is dropped in virtblk_remove(). */ + refcount_set(&vblk->refs, 1); + mutex_init(&vblk->vdev_mutex); + vblk->vdev = vdev; vblk->sg_elems = sg_elems; @@ -746,9 +916,17 @@ static int virtblk_probe(struct virtio_device *vdev) err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE, struct virtio_blk_config, blk_size, &blk_size); - if (!err) + if (!err) { + err = blk_validate_block_size(blk_size); + if (err) { + dev_err(&vdev->dev, + "virtio_blk: invalid block size: 0x%x\n", + blk_size); + goto out_cleanup_disk; + } + blk_queue_logical_block_size(q, blk_size); - else + } else blk_size = queue_logical_block_size(q); /* Use topology information if available */ @@ -777,26 +955,40 @@ static int virtblk_probe(struct virtio_device *vdev) if (!err && opt_io_size) blk_queue_io_opt(q, blk_size * opt_io_size); + if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) { + q->limits.discard_granularity = blk_size; + + virtio_cread(vdev, struct virtio_blk_config, + discard_sector_alignment, &v); + q->limits.discard_alignment = v ? v << SECTOR_SHIFT : 0; + + virtio_cread(vdev, struct virtio_blk_config, + max_discard_sectors, &v); + blk_queue_max_discard_sectors(q, v ? v : UINT_MAX); + + virtio_cread(vdev, struct virtio_blk_config, max_discard_seg, + &v); + blk_queue_max_discard_segments(q, + min_not_zero(v, + MAX_DISCARD_SEGMENTS)); + + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); + } + + if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) { + virtio_cread(vdev, struct virtio_blk_config, + max_write_zeroes_sectors, &v); + blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX); + } + virtblk_update_capacity(vblk, false); virtio_device_ready(vdev); + disk_to_dev(vblk->disk)->groups = virtblk_attr_groups; device_add_disk(&vdev->dev, vblk->disk); - err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial); - if (err) - goto out_del_disk; - - if (virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE)) - err = device_create_file(disk_to_dev(vblk->disk), - &dev_attr_cache_type_rw); - else - err = device_create_file(disk_to_dev(vblk->disk), - &dev_attr_cache_type_ro); - if (err) - goto out_del_disk; return 0; -out_del_disk: - del_gendisk(vblk->disk); +out_cleanup_disk: blk_cleanup_queue(vblk->disk->queue); out_free_tags: blk_mq_free_tag_set(&vblk->tag_set); @@ -804,6 +996,7 @@ static int virtblk_probe(struct virtio_device *vdev) put_disk(vblk->disk); out_free_vq: vdev->config->del_vqs(vdev); + kfree(vblk->vqs); out_free_vblk: kfree(vblk); out_free_index: @@ -815,8 +1008,6 @@ static int virtblk_probe(struct virtio_device *vdev) static void virtblk_remove(struct virtio_device *vdev) { struct virtio_blk *vblk = vdev->priv; - int index = vblk->index; - int refc; /* Make sure no work handler is accessing the device. */ flush_work(&vblk->config_work); @@ -826,18 +1017,21 @@ static void virtblk_remove(struct virtio_device *vdev) blk_mq_free_tag_set(&vblk->tag_set); + mutex_lock(&vblk->vdev_mutex); + /* Stop all the virtqueues. */ vdev->config->reset(vdev); - refc = kref_read(&disk_to_dev(vblk->disk)->kobj.kref); + /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */ + vblk->vdev = NULL; + put_disk(vblk->disk); vdev->config->del_vqs(vdev); kfree(vblk->vqs); - kfree(vblk); - /* Only free device id if we don't have any users */ - if (refc == 1) - ida_simple_remove(&vd_index_ida, index); + mutex_unlock(&vblk->vdev_mutex); + + virtblk_put(vblk); } #ifdef CONFIG_PM_SLEEP @@ -854,6 +1048,8 @@ static int virtblk_freeze(struct virtio_device *vdev) blk_mq_quiesce_queue(vblk->disk->queue); vdev->config->del_vqs(vdev); + kfree(vblk->vqs); + return 0; } @@ -885,14 +1081,14 @@ static unsigned int features_legacy[] = { VIRTIO_BLK_F_SCSI, #endif VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE, - VIRTIO_BLK_F_MQ, + VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES, } ; static unsigned int features[] = { VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE, - VIRTIO_BLK_F_MQ, + VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES, }; static struct virtio_driver virtio_blk = { diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index fd1e19f1a49f5803ca6dd5d939ec41ec592af077..d98cfd3b64ff002b5b5704f33daf7aeeea9aec14 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -202,7 +202,7 @@ static inline void shrink_free_pagepool(struct xen_blkif_ring *ring, int num) #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page))) -static int do_block_io_op(struct xen_blkif_ring *ring); +static int do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags); static int dispatch_rw_block_io(struct xen_blkif_ring *ring, struct blkif_request *req, struct pending_req *pending_req); @@ -615,6 +615,8 @@ int xen_blkif_schedule(void *arg) struct xen_vbd *vbd = &blkif->vbd; unsigned long timeout; int ret; + bool do_eoi; + unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS; set_freezable(); while (!kthread_should_stop()) { @@ -639,16 +641,23 @@ int xen_blkif_schedule(void *arg) if (timeout == 0) goto purge_gnt_list; + do_eoi = ring->waiting_reqs; + ring->waiting_reqs = 0; smp_mb(); /* clear flag *before* checking for work */ - ret = do_block_io_op(ring); + ret = do_block_io_op(ring, &eoi_flags); if (ret > 0) ring->waiting_reqs = 1; if (ret == -EACCES) wait_event_interruptible(ring->shutdown_wq, kthread_should_stop()); + if (do_eoi && !ring->waiting_reqs) { + xen_irq_lateeoi(ring->irq, eoi_flags); + eoi_flags |= XEN_EOI_FLAG_SPURIOUS; + } + purge_gnt_list: if (blkif->vbd.feature_gnt_persistent && time_after(jiffies, ring->next_lru)) { @@ -841,8 +850,11 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring, pages[i]->page = persistent_gnt->page; pages[i]->persistent_gnt = persistent_gnt; } else { - if (get_free_page(ring, &pages[i]->page)) - goto out_of_memory; + if (get_free_page(ring, &pages[i]->page)) { + put_free_pages(ring, pages_to_gnt, segs_to_map); + ret = -ENOMEM; + goto out; + } addr = vaddr(pages[i]->page); pages_to_gnt[segs_to_map] = pages[i]->page; pages[i]->persistent_gnt = NULL; @@ -858,10 +870,8 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring, break; } - if (segs_to_map) { + if (segs_to_map) ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map); - BUG_ON(ret); - } /* * Now swizzle the MFN in our domain with the MFN from the other domain @@ -876,7 +886,7 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring, pr_debug("invalid buffer -- could not remap it\n"); put_free_pages(ring, &pages[seg_idx]->page, 1); pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE; - ret |= 1; + ret |= !ret; goto next; } pages[seg_idx]->handle = map[new_map_idx].handle; @@ -928,15 +938,18 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring, } segs_to_map = 0; last_map = map_until; - if (map_until != num) + if (!ret && map_until != num) goto again; - return ret; +out: + for (i = last_map; i < num; i++) { + /* Don't zap current batch's valid persistent grants. */ + if(i >= map_until) + pages[i]->persistent_gnt = NULL; + pages[i]->handle = BLKBACK_INVALID_HANDLE; + } -out_of_memory: - pr_alert("%s: out of memory\n", __func__); - put_free_pages(ring, pages_to_gnt, segs_to_map); - return -ENOMEM; + return ret; } static int xen_blkbk_map_seg(struct pending_req *pending_req) @@ -1119,7 +1132,7 @@ static void end_block_io_op(struct bio *bio) * and transmute it to the block API to hand it over to the proper block disk. */ static int -__do_block_io_op(struct xen_blkif_ring *ring) +__do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags) { union blkif_back_rings *blk_rings = &ring->blk_rings; struct blkif_request req; @@ -1142,6 +1155,9 @@ __do_block_io_op(struct xen_blkif_ring *ring) if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc)) break; + /* We've seen a request, so clear spurious eoi flag. */ + *eoi_flags &= ~XEN_EOI_FLAG_SPURIOUS; + if (kthread_should_stop()) { more_to_do = 1; break; @@ -1200,13 +1216,13 @@ __do_block_io_op(struct xen_blkif_ring *ring) } static int -do_block_io_op(struct xen_blkif_ring *ring) +do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags) { union blkif_back_rings *blk_rings = &ring->blk_rings; int more_to_do; do { - more_to_do = __do_block_io_op(ring); + more_to_do = __do_block_io_op(ring, eoi_flags); if (more_to_do) break; diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index a4bc74e72c394965f31dcbe7b55c8e5cd0fc6cd5..42af2f37ba4e16efd33ff386b993e5cfc53cf923 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -179,6 +179,15 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid) blkif->domid = domid; atomic_set(&blkif->refcnt, 1); init_completion(&blkif->drain_complete); + + /* + * Because freeing back to the cache may be deferred, it is not + * safe to unload the module (and hence destroy the cache) until + * this has completed. To prevent premature unloading, take an + * extra module reference here and release only when the object + * has been freed back to the cache. + */ + __module_get(THIS_MODULE); INIT_WORK(&blkif->free_work, xen_blkif_deferred_free); return blkif; @@ -228,9 +237,8 @@ static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref, BUG(); } - err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn, - xen_blkif_be_int, 0, - "blkif-backend", ring); + err = bind_interdomain_evtchn_to_irqhandler_lateeoi(blkif->domid, + evtchn, xen_blkif_be_int, 0, "blkif-backend", ring); if (err < 0) { xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring); ring->blk_rings.common.sring = NULL; @@ -256,6 +264,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) if (ring->xenblkd) { kthread_stop(ring->xenblkd); + ring->xenblkd = NULL; wake_up(&ring->shutdown_wq); } @@ -328,6 +337,7 @@ static void xen_blkif_free(struct xen_blkif *blkif) /* Make sure everything is drained before shutting down */ kmem_cache_free(xen_blkif_cachep, blkif); + module_put(THIS_MODULE); } int __init xen_blkif_interface_init(void) @@ -642,7 +652,8 @@ static int xen_blkbk_probe(struct xenbus_device *dev, /* setup back pointer */ be->blkif->be = be; - err = xenbus_watch_pathfmt(dev, &be->backend_watch, backend_changed, + err = xenbus_watch_pathfmt(dev, &be->backend_watch, NULL, + backend_changed, "%s/%s", dev->nodename, "physical-device"); if (err) goto fail; @@ -974,6 +985,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir) } blkif->nr_ring_pages = nr_grefs; + err = -ENOMEM; for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) { req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) @@ -996,7 +1008,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir) err = xen_blkif_map(ring, ring_ref, nr_grefs, evtchn); if (err) { xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn); - return err; + goto fail; } return 0; @@ -1016,8 +1028,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir) } kfree(req); } - return -ENOMEM; - + return err; } static int connect_ring(struct backend_info *be) diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 429d20131c7e228f81bcbd6dd72ed8a21290c14f..bc1d350c9bdc13241d83edab2dcd25995182dd12 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -47,6 +47,7 @@ #include #include #include +#include #include #include @@ -79,6 +80,7 @@ enum blkif_state { BLKIF_STATE_DISCONNECTED, BLKIF_STATE_CONNECTED, BLKIF_STATE_SUSPENDED, + BLKIF_STATE_ERROR, }; struct grant { @@ -88,6 +90,7 @@ struct grant { }; enum blk_req_status { + REQ_PROCESSING, REQ_WAITING, REQ_DONE, REQ_ERROR, @@ -148,6 +151,10 @@ static unsigned int xen_blkif_max_ring_order; module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444); MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring"); +static bool __read_mostly xen_blkif_trusted = true; +module_param_named(trusted, xen_blkif_trusted, bool, 0644); +MODULE_PARM_DESC(trusted, "Is the backend trusted"); + #define BLK_RING_SIZE(info) \ __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages) @@ -208,6 +215,7 @@ struct blkfront_info unsigned int feature_discard:1; unsigned int feature_secdiscard:1; unsigned int feature_persistent:1; + unsigned int bounce:1; unsigned int discard_granularity; unsigned int discard_alignment; /* Number of 4KB segments handled */ @@ -297,8 +305,8 @@ static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num) if (!gnt_list_entry) goto out_of_memory; - if (info->feature_persistent) { - granted_page = alloc_page(GFP_NOIO); + if (info->bounce) { + granted_page = alloc_page(GFP_NOIO | __GFP_ZERO); if (!granted_page) { kfree(gnt_list_entry); goto out_of_memory; @@ -317,7 +325,7 @@ static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num) list_for_each_entry_safe(gnt_list_entry, n, &rinfo->grants, node) { list_del(&gnt_list_entry->node); - if (info->feature_persistent) + if (info->bounce) __free_page(gnt_list_entry->page); kfree(gnt_list_entry); i--; @@ -363,7 +371,7 @@ static struct grant *get_grant(grant_ref_t *gref_head, /* Assign a gref to this page */ gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head); BUG_ON(gnt_list_entry->gref == -ENOSPC); - if (info->feature_persistent) + if (info->bounce) grant_foreign_access(gnt_list_entry, info); else { /* Grant access to the GFN passed by the caller */ @@ -387,7 +395,7 @@ static struct grant *get_indirect_grant(grant_ref_t *gref_head, /* Assign a gref to this page */ gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head); BUG_ON(gnt_list_entry->gref == -ENOSPC); - if (!info->feature_persistent) { + if (!info->bounce) { struct page *indirect_page; /* Fetch a pre-allocated page to use for indirect grefs */ @@ -532,10 +540,10 @@ static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo, id = get_id_from_freelist(rinfo); rinfo->shadow[id].request = req; - rinfo->shadow[id].status = REQ_WAITING; + rinfo->shadow[id].status = REQ_PROCESSING; rinfo->shadow[id].associated_id = NO_ASSOCIATED_ID; - (*ring_req)->u.rw.id = id; + rinfo->shadow[id].req.u.rw.id = id; return id; } @@ -543,11 +551,12 @@ static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo, static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo) { struct blkfront_info *info = rinfo->dev_info; - struct blkif_request *ring_req; + struct blkif_request *ring_req, *final_ring_req; unsigned long id; /* Fill out a communications ring structure. */ - id = blkif_ring_get_request(rinfo, req, &ring_req); + id = blkif_ring_get_request(rinfo, req, &final_ring_req); + ring_req = &rinfo->shadow[id].req; ring_req->operation = BLKIF_OP_DISCARD; ring_req->u.discard.nr_sectors = blk_rq_sectors(req); @@ -558,8 +567,9 @@ static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_inf else ring_req->u.discard.flag = 0; - /* Keep a private copy so we can reissue requests when recovering. */ - rinfo->shadow[id].req = *ring_req; + /* Copy the request to the ring page. */ + *final_ring_req = *ring_req; + rinfo->shadow[id].status = REQ_WAITING; return 0; } @@ -692,6 +702,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri { struct blkfront_info *info = rinfo->dev_info; struct blkif_request *ring_req, *extra_ring_req = NULL; + struct blkif_request *final_ring_req, *final_extra_ring_req = NULL; unsigned long id, extra_id = NO_ASSOCIATED_ID; bool require_extra_req = false; int i; @@ -699,7 +710,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri .grant_idx = 0, .segments = NULL, .rinfo = rinfo, - .need_copy = rq_data_dir(req) && info->feature_persistent, + .need_copy = rq_data_dir(req) && info->bounce, }; /* @@ -736,7 +747,8 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri } /* Fill out a communications ring structure. */ - id = blkif_ring_get_request(rinfo, req, &ring_req); + id = blkif_ring_get_request(rinfo, req, &final_ring_req); + ring_req = &rinfo->shadow[id].req; num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg); num_grant = 0; @@ -787,7 +799,9 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri ring_req->u.rw.nr_segments = num_grant; if (unlikely(require_extra_req)) { extra_id = blkif_ring_get_request(rinfo, req, - &extra_ring_req); + &final_extra_ring_req); + extra_ring_req = &rinfo->shadow[extra_id].req; + /* * Only the first request contains the scatter-gather * list. @@ -829,10 +843,13 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri if (setup.segments) kunmap_atomic(setup.segments); - /* Keep a private copy so we can reissue requests when recovering. */ - rinfo->shadow[id].req = *ring_req; - if (unlikely(require_extra_req)) - rinfo->shadow[extra_id].req = *extra_ring_req; + /* Copy request(s) to the ring page. */ + *final_ring_req = *ring_req; + rinfo->shadow[id].status = REQ_WAITING; + if (unlikely(require_extra_req)) { + *final_extra_ring_req = *extra_ring_req; + rinfo->shadow[extra_id].status = REQ_WAITING; + } if (new_persistent_gnts) gnttab_free_grant_references(setup.gref_head); @@ -1013,11 +1030,12 @@ static void xlvbd_flush(struct blkfront_info *info) { blk_queue_write_cache(info->rq, info->feature_flush ? true : false, info->feature_fua ? true : false); - pr_info("blkfront: %s: %s %s %s %s %s\n", + pr_info("blkfront: %s: %s %s %s %s %s %s %s\n", info->gd->disk_name, flush_info(info), "persistent grants:", info->feature_persistent ? "enabled;" : "disabled;", "indirect descriptors:", - info->max_indirect_segments ? "enabled;" : "disabled;"); + info->max_indirect_segments ? "enabled;" : "disabled;", + "bounce buffer:", info->bounce ? "enabled" : "disabled;"); } static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset) @@ -1252,7 +1270,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo) if (!list_empty(&rinfo->indirect_pages)) { struct page *indirect_page, *n; - BUG_ON(info->feature_persistent); + BUG_ON(info->bounce); list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) { list_del(&indirect_page->lru); __free_page(indirect_page); @@ -1269,7 +1287,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo) 0, 0UL); rinfo->persistent_gnts_c--; } - if (info->feature_persistent) + if (info->bounce) __free_page(persistent_gnt->page); kfree(persistent_gnt); } @@ -1290,7 +1308,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo) for (j = 0; j < segs; j++) { persistent_gnt = rinfo->shadow[i].grants_used[j]; gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); - if (info->feature_persistent) + if (info->bounce) __free_page(persistent_gnt->page); kfree(persistent_gnt); } @@ -1310,11 +1328,11 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo) } free_shadow: - kfree(rinfo->shadow[i].grants_used); + kvfree(rinfo->shadow[i].grants_used); rinfo->shadow[i].grants_used = NULL; - kfree(rinfo->shadow[i].indirect_grants); + kvfree(rinfo->shadow[i].indirect_grants); rinfo->shadow[i].indirect_grants = NULL; - kfree(rinfo->shadow[i].sg); + kvfree(rinfo->shadow[i].sg); rinfo->shadow[i].sg = NULL; } @@ -1331,7 +1349,8 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo) rinfo->ring_ref[i] = GRANT_INVALID_REF; } } - free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * XEN_PAGE_SIZE)); + free_pages_exact(rinfo->ring.sring, + info->nr_ring_pages * XEN_PAGE_SIZE); rinfo->ring.sring = NULL; if (rinfo->irq) @@ -1353,7 +1372,7 @@ static void blkif_free(struct blkfront_info *info, int suspend) for (i = 0; i < info->nr_rings; i++) blkif_free_ring(&info->rinfo[i]); - kfree(info->rinfo); + kvfree(info->rinfo); info->rinfo = NULL; info->nr_rings = 0; } @@ -1405,8 +1424,8 @@ static enum blk_req_status blkif_rsp_to_req_status(int rsp) static int blkif_get_final_status(enum blk_req_status s1, enum blk_req_status s2) { - BUG_ON(s1 == REQ_WAITING); - BUG_ON(s2 == REQ_WAITING); + BUG_ON(s1 < REQ_DONE); + BUG_ON(s2 < REQ_DONE); if (s1 == REQ_ERROR || s2 == REQ_ERROR) return BLKIF_RSP_ERROR; @@ -1415,9 +1434,15 @@ static int blkif_get_final_status(enum blk_req_status s1, return BLKIF_RSP_OKAY; } -static bool blkif_completion(unsigned long *id, - struct blkfront_ring_info *rinfo, - struct blkif_response *bret) +/* + * Return values: + * 1 response processed. + * 0 missing further responses. + * -1 error while processing. + */ +static int blkif_completion(unsigned long *id, + struct blkfront_ring_info *rinfo, + struct blkif_response *bret) { int i = 0; struct scatterlist *sg; @@ -1439,8 +1464,8 @@ static bool blkif_completion(unsigned long *id, s->status = blkif_rsp_to_req_status(bret->status); /* Wait the second response if not yet here. */ - if (s2->status == REQ_WAITING) - return false; + if (s2->status < REQ_DONE) + return 0; bret->status = blkif_get_final_status(s->status, s2->status); @@ -1473,7 +1498,7 @@ static bool blkif_completion(unsigned long *id, data.s = s; num_sg = s->num_sg; - if (bret->operation == BLKIF_OP_READ && info->feature_persistent) { + if (bret->operation == BLKIF_OP_READ && info->bounce) { for_each_sg(s->sg, sg, num_sg, i) { BUG_ON(sg->offset + sg->length > PAGE_SIZE); @@ -1491,47 +1516,48 @@ static bool blkif_completion(unsigned long *id, } /* Add the persistent grant into the list of free grants */ for (i = 0; i < num_grant; i++) { - if (gnttab_query_foreign_access(s->grants_used[i]->gref)) { + if (!gnttab_try_end_foreign_access(s->grants_used[i]->gref)) { /* * If the grant is still mapped by the backend (the * backend has chosen to make this grant persistent) * we add it at the head of the list, so it will be * reused first. */ - if (!info->feature_persistent) - pr_alert_ratelimited("backed has not unmapped grant: %u\n", - s->grants_used[i]->gref); + if (!info->feature_persistent) { + pr_alert("backed has not unmapped grant: %u\n", + s->grants_used[i]->gref); + return -1; + } list_add(&s->grants_used[i]->node, &rinfo->grants); rinfo->persistent_gnts_c++; } else { /* - * If the grant is not mapped by the backend we end the - * foreign access and add it to the tail of the list, - * so it will not be picked again unless we run out of - * persistent grants. + * If the grant is not mapped by the backend we add it + * to the tail of the list, so it will not be picked + * again unless we run out of persistent grants. */ - gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL); s->grants_used[i]->gref = GRANT_INVALID_REF; list_add_tail(&s->grants_used[i]->node, &rinfo->grants); } } if (s->req.operation == BLKIF_OP_INDIRECT) { for (i = 0; i < INDIRECT_GREFS(num_grant); i++) { - if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) { - if (!info->feature_persistent) - pr_alert_ratelimited("backed has not unmapped grant: %u\n", - s->indirect_grants[i]->gref); + if (!gnttab_try_end_foreign_access(s->indirect_grants[i]->gref)) { + if (!info->feature_persistent) { + pr_alert("backed has not unmapped grant: %u\n", + s->indirect_grants[i]->gref); + return -1; + } list_add(&s->indirect_grants[i]->node, &rinfo->grants); rinfo->persistent_gnts_c++; } else { struct page *indirect_page; - gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL); /* * Add the used indirect page back to the list of * available pages for indirect grefs. */ - if (!info->feature_persistent) { + if (!info->bounce) { indirect_page = s->indirect_grants[i]->page; list_add(&indirect_page->lru, &rinfo->indirect_pages); } @@ -1541,71 +1567,103 @@ static bool blkif_completion(unsigned long *id, } } - return true; + return 1; } static irqreturn_t blkif_interrupt(int irq, void *dev_id) { struct request *req; - struct blkif_response *bret; + struct blkif_response bret; RING_IDX i, rp; unsigned long flags; struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id; struct blkfront_info *info = rinfo->dev_info; + unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS; - if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) + if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { + xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS); return IRQ_HANDLED; + } spin_lock_irqsave(&rinfo->ring_lock, flags); again: - rp = rinfo->ring.sring->rsp_prod; - rmb(); /* Ensure we see queued responses up to 'rp'. */ + rp = READ_ONCE(rinfo->ring.sring->rsp_prod); + virt_rmb(); /* Ensure we see queued responses up to 'rp'. */ + if (RING_RESPONSE_PROD_OVERFLOW(&rinfo->ring, rp)) { + pr_alert("%s: illegal number of responses %u\n", + info->gd->disk_name, rp - rinfo->ring.rsp_cons); + goto err; + } for (i = rinfo->ring.rsp_cons; i != rp; i++) { unsigned long id; + unsigned int op; + + eoiflag = 0; + + RING_COPY_RESPONSE(&rinfo->ring, i, &bret); + id = bret.id; - bret = RING_GET_RESPONSE(&rinfo->ring, i); - id = bret->id; /* * The backend has messed up and given us an id that we would * never have given to it (we stamp it up to BLK_RING_SIZE - * look in get_id_from_freelist. */ if (id >= BLK_RING_SIZE(info)) { - WARN(1, "%s: response to %s has incorrect id (%ld)\n", - info->gd->disk_name, op_name(bret->operation), id); - /* We can't safely get the 'struct request' as - * the id is busted. */ - continue; + pr_alert("%s: response has incorrect id (%ld)\n", + info->gd->disk_name, id); + goto err; + } + if (rinfo->shadow[id].status != REQ_WAITING) { + pr_alert("%s: response references no pending request\n", + info->gd->disk_name); + goto err; } + + rinfo->shadow[id].status = REQ_PROCESSING; req = rinfo->shadow[id].request; - if (bret->operation != BLKIF_OP_DISCARD) { + op = rinfo->shadow[id].req.operation; + if (op == BLKIF_OP_INDIRECT) + op = rinfo->shadow[id].req.u.indirect.indirect_op; + if (bret.operation != op) { + pr_alert("%s: response has wrong operation (%u instead of %u)\n", + info->gd->disk_name, bret.operation, op); + goto err; + } + + if (bret.operation != BLKIF_OP_DISCARD) { + int ret; + /* * We may need to wait for an extra response if the * I/O request is split in 2 */ - if (!blkif_completion(&id, rinfo, bret)) + ret = blkif_completion(&id, rinfo, &bret); + if (!ret) continue; + if (unlikely(ret < 0)) + goto err; } if (add_id_to_freelist(rinfo, id)) { WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n", - info->gd->disk_name, op_name(bret->operation), id); + info->gd->disk_name, op_name(bret.operation), id); continue; } - if (bret->status == BLKIF_RSP_OKAY) + if (bret.status == BLKIF_RSP_OKAY) blkif_req(req)->error = BLK_STS_OK; else blkif_req(req)->error = BLK_STS_IOERR; - switch (bret->operation) { + switch (bret.operation) { case BLKIF_OP_DISCARD: - if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { + if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) { struct request_queue *rq = info->rq; - printk(KERN_WARNING "blkfront: %s: %s op failed\n", - info->gd->disk_name, op_name(bret->operation)); + + pr_warn_ratelimited("blkfront: %s: %s op failed\n", + info->gd->disk_name, op_name(bret.operation)); blkif_req(req)->error = BLK_STS_NOTSUPP; info->feature_discard = 0; info->feature_secdiscard = 0; @@ -1615,15 +1673,15 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) break; case BLKIF_OP_FLUSH_DISKCACHE: case BLKIF_OP_WRITE_BARRIER: - if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { - printk(KERN_WARNING "blkfront: %s: %s op failed\n", - info->gd->disk_name, op_name(bret->operation)); + if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) { + pr_warn_ratelimited("blkfront: %s: %s op failed\n", + info->gd->disk_name, op_name(bret.operation)); blkif_req(req)->error = BLK_STS_NOTSUPP; } - if (unlikely(bret->status == BLKIF_RSP_ERROR && + if (unlikely(bret.status == BLKIF_RSP_ERROR && rinfo->shadow[id].req.u.rw.nr_segments == 0)) { - printk(KERN_WARNING "blkfront: %s: empty %s op failed\n", - info->gd->disk_name, op_name(bret->operation)); + pr_warn_ratelimited("blkfront: %s: empty %s op failed\n", + info->gd->disk_name, op_name(bret.operation)); blkif_req(req)->error = BLK_STS_NOTSUPP; } if (unlikely(blkif_req(req)->error)) { @@ -1636,9 +1694,10 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) /* fall through */ case BLKIF_OP_READ: case BLKIF_OP_WRITE: - if (unlikely(bret->status != BLKIF_RSP_OKAY)) - dev_dbg(&info->xbdev->dev, "Bad return from blkdev data " - "request: %x\n", bret->status); + if (unlikely(bret.status != BLKIF_RSP_OKAY)) + dev_dbg_ratelimited(&info->xbdev->dev, + "Bad return from blkdev data request: %#x\n", + bret.status); break; default: @@ -1662,6 +1721,18 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) spin_unlock_irqrestore(&rinfo->ring_lock, flags); + xen_irq_lateeoi(irq, eoiflag); + + return IRQ_HANDLED; + + err: + info->connected = BLKIF_STATE_ERROR; + + spin_unlock_irqrestore(&rinfo->ring_lock, flags); + + /* No EOI in order to avoid further interrupts. */ + + pr_alert("%s disabled for further use\n", info->gd->disk_name); return IRQ_HANDLED; } @@ -1678,8 +1749,7 @@ static int setup_blkring(struct xenbus_device *dev, for (i = 0; i < info->nr_ring_pages; i++) rinfo->ring_ref[i] = GRANT_INVALID_REF; - sring = (struct blkif_sring *)__get_free_pages(GFP_NOIO | __GFP_HIGH, - get_order(ring_size)); + sring = alloc_pages_exact(ring_size, GFP_NOIO | __GFP_ZERO); if (!sring) { xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); return -ENOMEM; @@ -1689,7 +1759,7 @@ static int setup_blkring(struct xenbus_device *dev, err = xenbus_grant_ring(dev, rinfo->ring.sring, info->nr_ring_pages, gref); if (err < 0) { - free_pages((unsigned long)sring, get_order(ring_size)); + free_pages_exact(sring, ring_size); rinfo->ring.sring = NULL; goto fail; } @@ -1700,8 +1770,8 @@ static int setup_blkring(struct xenbus_device *dev, if (err) goto fail; - err = bind_evtchn_to_irqhandler(rinfo->evtchn, blkif_interrupt, 0, - "blkif", rinfo); + err = bind_evtchn_to_irqhandler_lateeoi(rinfo->evtchn, blkif_interrupt, + 0, "blkif", rinfo); if (err <= 0) { xenbus_dev_fatal(dev, err, "bind_evtchn_to_irqhandler failed"); @@ -1782,6 +1852,10 @@ static int talk_to_blkback(struct xenbus_device *dev, if (!info) return -ENODEV; + /* Check if backend is trusted. */ + info->bounce = !xen_blkif_trusted || + !xenbus_read_unsigned(dev->nodename, "trusted", 1); + max_page_order = xenbus_read_unsigned(info->xbdev->otherend, "max-ring-page-order", 0); ring_page_order = min(xen_blkif_max_ring_order, max_page_order); @@ -1914,11 +1988,12 @@ static int negotiate_mq(struct blkfront_info *info) if (!info->nr_rings) info->nr_rings = 1; - info->rinfo = kcalloc(info->nr_rings, - sizeof(struct blkfront_ring_info), - GFP_KERNEL); + info->rinfo = kvcalloc(info->nr_rings, + sizeof(struct blkfront_ring_info), + GFP_KERNEL); if (!info->rinfo) { xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure"); + info->nr_rings = 0; return -ENOMEM; } @@ -2187,10 +2262,12 @@ static void blkfront_setup_discard(struct blkfront_info *info) static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) { - unsigned int psegs, grants; + unsigned int psegs, grants, memflags; int err, i; struct blkfront_info *info = rinfo->dev_info; + memflags = memalloc_noio_save(); + if (info->max_indirect_segments == 0) { if (!HAS_EXTRA_REQ) grants = BLKIF_MAX_SEGMENTS_PER_REQUEST; @@ -2212,17 +2289,18 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) if (err) goto out_of_memory; - if (!info->feature_persistent && info->max_indirect_segments) { + if (!info->bounce && info->max_indirect_segments) { /* - * We are using indirect descriptors but not persistent - * grants, we need to allocate a set of pages that can be + * We are using indirect descriptors but don't have a bounce + * buffer, we need to allocate a set of pages that can be * used for mapping indirect grefs */ int num = INDIRECT_GREFS(grants) * BLK_RING_SIZE(info); BUG_ON(!list_empty(&rinfo->indirect_pages)); for (i = 0; i < num; i++) { - struct page *indirect_page = alloc_page(GFP_NOIO); + struct page *indirect_page = alloc_page(GFP_KERNEL | + __GFP_ZERO); if (!indirect_page) goto out_of_memory; list_add(&indirect_page->lru, &rinfo->indirect_pages); @@ -2231,17 +2309,17 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) for (i = 0; i < BLK_RING_SIZE(info); i++) { rinfo->shadow[i].grants_used = - kcalloc(grants, - sizeof(rinfo->shadow[i].grants_used[0]), - GFP_NOIO); - rinfo->shadow[i].sg = kcalloc(psegs, - sizeof(rinfo->shadow[i].sg[0]), - GFP_NOIO); + kvcalloc(grants, + sizeof(rinfo->shadow[i].grants_used[0]), + GFP_KERNEL); + rinfo->shadow[i].sg = kvcalloc(psegs, + sizeof(rinfo->shadow[i].sg[0]), + GFP_KERNEL); if (info->max_indirect_segments) rinfo->shadow[i].indirect_grants = - kcalloc(INDIRECT_GREFS(grants), - sizeof(rinfo->shadow[i].indirect_grants[0]), - GFP_NOIO); + kvcalloc(INDIRECT_GREFS(grants), + sizeof(rinfo->shadow[i].indirect_grants[0]), + GFP_KERNEL); if ((rinfo->shadow[i].grants_used == NULL) || (rinfo->shadow[i].sg == NULL) || (info->max_indirect_segments && @@ -2250,16 +2328,17 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) sg_init_table(rinfo->shadow[i].sg, psegs); } + memalloc_noio_restore(memflags); return 0; out_of_memory: for (i = 0; i < BLK_RING_SIZE(info); i++) { - kfree(rinfo->shadow[i].grants_used); + kvfree(rinfo->shadow[i].grants_used); rinfo->shadow[i].grants_used = NULL; - kfree(rinfo->shadow[i].sg); + kvfree(rinfo->shadow[i].sg); rinfo->shadow[i].sg = NULL; - kfree(rinfo->shadow[i].indirect_grants); + kvfree(rinfo->shadow[i].indirect_grants); rinfo->shadow[i].indirect_grants = NULL; } if (!list_empty(&rinfo->indirect_pages)) { @@ -2269,6 +2348,9 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) __free_page(indirect_page); } } + + memalloc_noio_restore(memflags); + return -ENOMEM; } @@ -2310,6 +2392,8 @@ static void blkfront_gather_backend_features(struct blkfront_info *info) info->feature_persistent = !!xenbus_read_unsigned(info->xbdev->otherend, "feature-persistent", 0); + if (info->feature_persistent) + info->bounce = true; indirect_segments = xenbus_read_unsigned(info->xbdev->otherend, "feature-max-indirect-segments", 0); @@ -2493,6 +2577,9 @@ static int blkfront_remove(struct xenbus_device *xbdev) dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename); + if (!info) + return 0; + blkif_free(info, 0); mutex_lock(&info->mutex); @@ -2664,11 +2751,10 @@ static void purge_persistent_grants(struct blkfront_info *info) list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants, node) { if (gnt_list_entry->gref == GRANT_INVALID_REF || - gnttab_query_foreign_access(gnt_list_entry->gref)) + !gnttab_try_end_foreign_access(gnt_list_entry->gref)) continue; list_del(&gnt_list_entry->node); - gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL); rinfo->persistent_gnts_c--; gnt_list_entry->gref = GRANT_INVALID_REF; list_add_tail(&gnt_list_entry->node, &rinfo->grants); @@ -2683,6 +2769,13 @@ static void blkfront_delay_work(struct work_struct *work) struct blkfront_info *info; bool need_schedule_work = false; + /* + * Note that when using bounce buffers but not persistent grants + * there's no need to run blkfront_delay_work because grants are + * revoked in blkif_completion or else an error is reported and the + * connection is closed. + */ + mutex_lock(&blkfront_mutex); list_for_each_entry(info, &info_list, info_list) { diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index c24589414c75926b934b9bb117b237bcb686e736..0f36db0cf74a9c1bc8ad61e81b416da4b5d7a773 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c @@ -1063,6 +1063,8 @@ static int ace_setup(struct ace_device *ace) return 0; err_read: + /* prevent double queue cleanup */ + ace->gd->queue = NULL; put_disk(ace->gd); err_alloc_disk: blk_cleanup_queue(ace->queue); diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index a1d6b5597c17bac113c062f7b3200e2926b9f0e5..dade3734a8caea82eb0046aeab9c9b901d17037b 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -53,6 +53,11 @@ static size_t huge_class_size; static void zram_free_page(struct zram *zram, size_t index); +static int zram_slot_trylock(struct zram *zram, u32 index) +{ + return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].value); +} + static void zram_slot_lock(struct zram *zram, u32 index) { bit_spin_lock(ZRAM_LOCK, &zram->table[index].value); @@ -307,13 +312,14 @@ static void reset_bdev(struct zram *zram) static ssize_t backing_dev_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct file *file; struct zram *zram = dev_to_zram(dev); - struct file *file = zram->backing_dev; char *p; ssize_t ret; down_read(&zram->init_lock); - if (!zram_wb_enabled(zram)) { + file = zram->backing_dev; + if (!file) { memcpy(buf, "none\n", 5); up_read(&zram->init_lock); return 5; @@ -382,8 +388,10 @@ static ssize_t backing_dev_store(struct device *dev, bdev = bdgrab(I_BDEV(inode)); err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram); - if (err < 0) + if (err < 0) { + bdev = NULL; goto out; + } nr_pages = i_size_read(inode) >> PAGE_SHIFT; bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long); @@ -399,7 +407,6 @@ static ssize_t backing_dev_store(struct device *dev, goto out; reset_bdev(zram); - spin_lock_init(&zram->bitmap_lock); zram->old_block_size = old_block_size; zram->bdev = bdev; @@ -443,29 +450,24 @@ static ssize_t backing_dev_store(struct device *dev, static unsigned long get_entry_bdev(struct zram *zram) { - unsigned long entry; - - spin_lock(&zram->bitmap_lock); + unsigned long blk_idx = 1; +retry: /* skip 0 bit to confuse zram.handle = 0 */ - entry = find_next_zero_bit(zram->bitmap, zram->nr_pages, 1); - if (entry == zram->nr_pages) { - spin_unlock(&zram->bitmap_lock); + blk_idx = find_next_zero_bit(zram->bitmap, zram->nr_pages, blk_idx); + if (blk_idx == zram->nr_pages) return 0; - } - set_bit(entry, zram->bitmap); - spin_unlock(&zram->bitmap_lock); + if (test_and_set_bit(blk_idx, zram->bitmap)) + goto retry; - return entry; + return blk_idx; } static void put_entry_bdev(struct zram *zram, unsigned long entry) { int was_set; - spin_lock(&zram->bitmap_lock); was_set = test_and_clear_bit(entry, zram->bitmap); - spin_unlock(&zram->bitmap_lock); WARN_ON_ONCE(!was_set); } @@ -514,18 +516,18 @@ struct zram_work { struct zram *zram; unsigned long entry; struct bio *bio; + struct bio_vec bvec; }; #if PAGE_SIZE != 4096 static void zram_sync_read(struct work_struct *work) { - struct bio_vec bvec; struct zram_work *zw = container_of(work, struct zram_work, work); struct zram *zram = zw->zram; unsigned long entry = zw->entry; struct bio *bio = zw->bio; - read_from_bdev_async(zram, &bvec, entry, bio); + read_from_bdev_async(zram, &zw->bvec, entry, bio); } /* @@ -538,6 +540,7 @@ static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec, { struct zram_work work; + work.bvec = *bvec; work.zram = zram; work.entry = entry; work.bio = bio; @@ -696,7 +699,7 @@ static ssize_t read_block_state(struct file *file, char __user *buf, zram_test_flag(zram, index, ZRAM_WB) ? 'w' : '.', zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.'); - if (count < copied) { + if (count <= copied) { zram_slot_unlock(zram, index); break; } @@ -886,9 +889,10 @@ static ssize_t debug_stat_show(struct device *dev, down_read(&zram->init_lock); ret = scnprintf(buf, PAGE_SIZE, - "version: %d\n%8llu\n", + "version: %d\n%8llu %8llu\n", version, - (u64)atomic64_read(&zram->stats.writestall)); + (u64)atomic64_read(&zram->stats.writestall), + (u64)atomic64_read(&zram->stats.miss_free)); up_read(&zram->init_lock); return ret; @@ -1400,10 +1404,14 @@ static void zram_slot_free_notify(struct block_device *bdev, zram = bdev->bd_disk->private_data; - zram_slot_lock(zram, index); + atomic64_inc(&zram->stats.notify_free); + if (!zram_slot_trylock(zram, index)) { + atomic64_inc(&zram->stats.miss_free); + return; + } + zram_free_page(zram, index); zram_slot_unlock(zram, index); - atomic64_inc(&zram->stats.notify_free); } static int zram_rw_page(struct block_device *bdev, sector_t sector, @@ -1636,6 +1644,11 @@ static const struct attribute_group zram_disk_attr_group = { .attrs = zram_disk_attrs, }; +static const struct attribute_group *zram_disk_attr_groups[] = { + &zram_disk_attr_group, + NULL, +}; + /* * Allocate and initialize new zram device. the function returns * '>= 0' device_id upon success, and negative value otherwise. @@ -1716,24 +1729,15 @@ static int zram_add(void) zram->disk->queue->backing_dev_info->capabilities |= (BDI_CAP_STABLE_WRITES | BDI_CAP_SYNCHRONOUS_IO); + disk_to_dev(zram->disk)->groups = zram_disk_attr_groups; add_disk(zram->disk); - ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj, - &zram_disk_attr_group); - if (ret < 0) { - pr_err("Error creating sysfs group for device %d\n", - device_id); - goto out_free_disk; - } strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor)); zram_debugfs_register(zram); pr_info("Added device: %s\n", zram->disk->disk_name); return device_id; -out_free_disk: - del_gendisk(zram->disk); - put_disk(zram->disk); out_free_queue: blk_cleanup_queue(queue); out_free_idr: @@ -1762,16 +1766,6 @@ static int zram_remove(struct zram *zram) mutex_unlock(&bdev->bd_mutex); zram_debugfs_unregister(zram); - /* - * Remove sysfs first, so no one will perform a disksize - * store while we destroy the devices. This also helps during - * hot_remove -- zram_reset_device() is the last holder of - * ->init_lock, no later/concurrent disksize_store() or any - * other sysfs handlers are possible. - */ - sysfs_remove_group(&disk_to_dev(zram->disk)->kobj, - &zram_disk_attr_group); - /* Make sure all the pending I/O are finished */ fsync_bdev(bdev); zram_reset_device(zram); @@ -1808,7 +1802,8 @@ static ssize_t hot_add_show(struct class *class, return ret; return scnprintf(buf, PAGE_SIZE, "%d\n", ret); } -static CLASS_ATTR_RO(hot_add); +static struct class_attribute class_attr_hot_add = + __ATTR(hot_add, 0400, hot_add_show, NULL); static ssize_t hot_remove_store(struct class *class, struct class_attribute *attr, diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h index 72c8584b6dfff46847a73dbc066ceeff3ecbcec1..d1095dfdffa81471196f4b008a5b373ea1834e3b 100644 --- a/drivers/block/zram/zram_drv.h +++ b/drivers/block/zram/zram_drv.h @@ -79,6 +79,7 @@ struct zram_stats { atomic64_t pages_stored; /* no. of pages currently stored */ atomic_long_t max_used_pages; /* no. of maximum pages stored */ atomic64_t writestall; /* no. of write slow paths */ + atomic64_t miss_free; /* no. of missed free */ }; struct zram { @@ -110,7 +111,6 @@ struct zram { unsigned int old_block_size; unsigned long *bitmap; unsigned long nr_pages; - spinlock_t bitmap_lock; #endif #ifdef CONFIG_ZRAM_MEMORY_TRACKING struct dentry *debugfs_dir; diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c index 99cde1f9467d4edae71cc9820b35c00a75016650..e3e4d929e74f52566b385d56731c8e2d8fbf3daa 100644 --- a/drivers/bluetooth/btbcm.c +++ b/drivers/bluetooth/btbcm.c @@ -324,6 +324,7 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = { { 0x4103, "BCM4330B1" }, /* 002.001.003 */ { 0x410e, "BCM43341B0" }, /* 002.001.014 */ { 0x4406, "BCM4324B3" }, /* 002.004.006 */ + { 0x6109, "BCM4335C0" }, /* 003.001.009 */ { 0x610c, "BCM4354" }, /* 003.001.012 */ { 0x2122, "BCM4343A0" }, /* 001.001.034 */ { 0x2209, "BCM43430A1" }, /* 001.002.009 */ diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c index 4593baff2bc944f5a003bb666520062f768f52e5..19eecf1983216d3bff3b0ba2e663864ffa52fa42 100644 --- a/drivers/bluetooth/btmtkuart.c +++ b/drivers/bluetooth/btmtkuart.c @@ -115,11 +115,13 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev, u8 op, u8 flag, u16 plen, TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT); if (err == -EINTR) { bt_dev_err(hdev, "Execution of wmt command interrupted"); + clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state); return err; } if (err) { bt_dev_err(hdev, "Execution of wmt command timed out"); + clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state); return -ETIMEDOUT; } diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c index ec9e03a6b7786084b634b73f24ee5cf0dfc2c70f..9e70f7c7e5659b87dedfaddc529c04bbd9cd7cb2 100644 --- a/drivers/bluetooth/btqca.c +++ b/drivers/bluetooth/btqca.c @@ -363,6 +363,9 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, return err; } + /* Give the controller some time to get ready to receive the NVM */ + msleep(10); + /* Download NVM configuration */ config.type = TLV_TYPE_NVM; if (soc_type == QCA_WCN3990) diff --git a/drivers/bluetooth/btrsi.c b/drivers/bluetooth/btrsi.c index 60d1419590babb9f275e460756250b90b9234ab7..3951f7b23840447030eaabe617d125b97c25495e 100644 --- a/drivers/bluetooth/btrsi.c +++ b/drivers/bluetooth/btrsi.c @@ -21,8 +21,9 @@ #include #include -#define RSI_HEADROOM_FOR_BT_HAL 16 +#define RSI_DMA_ALIGN 8 #define RSI_FRAME_DESC_SIZE 16 +#define RSI_HEADROOM_FOR_BT_HAL (RSI_FRAME_DESC_SIZE + RSI_DMA_ALIGN) struct rsi_hci_adapter { void *priv; @@ -70,6 +71,16 @@ static int rsi_hci_send_pkt(struct hci_dev *hdev, struct sk_buff *skb) bt_cb(new_skb)->pkt_type = hci_skb_pkt_type(skb); kfree_skb(skb); skb = new_skb; + if (!IS_ALIGNED((unsigned long)skb->data, RSI_DMA_ALIGN)) { + u8 *skb_data = skb->data; + int skb_len = skb->len; + + skb_push(skb, RSI_DMA_ALIGN); + skb_pull(skb, PTR_ALIGN(skb->data, + RSI_DMA_ALIGN) - skb->data); + memmove(skb->data, skb_data, skb_len); + skb_trim(skb, skb_len); + } } return h_adapter->proto_ops->coex_send_pkt(h_adapter->priv, skb, diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c index 7f9ea8e4c1b227b56182ba83751a2f03bc670a07..8d1cd2479e36f5dd5c0d057b3cdf4dd92d7ef019 100644 --- a/drivers/bluetooth/btrtl.c +++ b/drivers/bluetooth/btrtl.c @@ -544,10 +544,9 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev, hdev->bus); if (!btrtl_dev->ic_info) { - rtl_dev_err(hdev, "rtl: unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x", + rtl_dev_info(hdev, "rtl: unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x", lmp_subver, hci_rev, hci_ver); - ret = -EINVAL; - goto err_free; + return btrtl_dev; } if (btrtl_dev->ic_info->has_rom_version) { @@ -602,6 +601,11 @@ int btrtl_download_firmware(struct hci_dev *hdev, * standard btusb. Once that firmware is uploaded, the subver changes * to a different value. */ + if (!btrtl_dev->ic_info) { + rtl_dev_info(hdev, "rtl: assuming no firmware upload needed\n"); + return 0; + } + switch (btrtl_dev->ic_info->lmp_subver) { case RTL_ROM_LMP_8723A: case RTL_ROM_LMP_3499: @@ -635,6 +639,26 @@ int btrtl_setup_realtek(struct hci_dev *hdev) } EXPORT_SYMBOL_GPL(btrtl_setup_realtek); +int btrtl_shutdown_realtek(struct hci_dev *hdev) +{ + struct sk_buff *skb; + int ret; + + /* According to the vendor driver, BT must be reset on close to avoid + * firmware crash. + */ + skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + ret = PTR_ERR(skb); + bt_dev_err(hdev, "HCI reset during shutdown failed"); + return ret; + } + kfree_skb(skb); + + return 0; +} +EXPORT_SYMBOL_GPL(btrtl_shutdown_realtek); + static unsigned int btrtl_convert_baudrate(u32 device_baudrate) { switch (device_baudrate) { diff --git a/drivers/bluetooth/btrtl.h b/drivers/bluetooth/btrtl.h index f5e36f3993a8163e945619ede58a01e03496a9cc..852f27d4ee289ecf161245fb29b258776088b66c 100644 --- a/drivers/bluetooth/btrtl.h +++ b/drivers/bluetooth/btrtl.h @@ -65,6 +65,7 @@ void btrtl_free(struct btrtl_device_info *btrtl_dev); int btrtl_download_firmware(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev); int btrtl_setup_realtek(struct hci_dev *hdev); +int btrtl_shutdown_realtek(struct hci_dev *hdev); int btrtl_get_uart_settings(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev, unsigned int *controller_baudrate, @@ -93,6 +94,11 @@ static inline int btrtl_setup_realtek(struct hci_dev *hdev) return -EOPNOTSUPP; } +static inline int btrtl_shutdown_realtek(struct hci_dev *hdev) +{ + return -EOPNOTSUPP; +} + static inline int btrtl_get_uart_settings(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev, unsigned int *controller_baudrate, diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c index 20142bc77554c0f3400da0506a6158e94722f54b..1325b1df4a8ed51ab78d8d8663c4b008302a053e 100644 --- a/drivers/bluetooth/btsdio.c +++ b/drivers/bluetooth/btsdio.c @@ -356,6 +356,7 @@ static void btsdio_remove(struct sdio_func *func) if (!data) return; + cancel_work_sync(&data->work); hdev = data->hdev; sdio_set_drvdata(func, NULL); diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index cd2e5cf14ea5b9a6bba979bd0d822a46812d574f..1b0adf5c2376017b20417167c83ff571a3af1dfd 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -277,7 +277,9 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x04ca, 0x3015), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x04ca, 0x301a), .driver_info = BTUSB_QCA_ROME }, + { USB_DEVICE(0x13d3, 0x3491), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x13d3, 0x3496), .driver_info = BTUSB_QCA_ROME }, + { USB_DEVICE(0x13d3, 0x3501), .driver_info = BTUSB_QCA_ROME }, /* Broadcom BCM2035 */ { USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 }, @@ -343,6 +345,7 @@ static const struct usb_device_id blacklist_table[] = { /* Intel Bluetooth devices */ { USB_DEVICE(0x8087, 0x0025), .driver_info = BTUSB_INTEL_NEW }, { USB_DEVICE(0x8087, 0x0026), .driver_info = BTUSB_INTEL_NEW }, + { USB_DEVICE(0x8087, 0x0029), .driver_info = BTUSB_INTEL_NEW }, { USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR }, { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL }, { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL }, @@ -388,6 +391,9 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x13d3, 0x3526), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x0b05, 0x185c), .driver_info = BTUSB_REALTEK }, + /* Additional Realtek 8822CE Bluetooth devices */ + { USB_DEVICE(0x04ca, 0x4005), .driver_info = BTUSB_REALTEK }, + /* Silicon Wave based devices */ { USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE }, @@ -1132,14 +1138,10 @@ static int btusb_open(struct hci_dev *hdev) if (data->setup_on_usb) { err = data->setup_on_usb(hdev); if (err < 0) - return err; + goto setup_fail; } data->intf->needs_remote_wakeup = 1; - /* device specific wakeup source enabled and required for USB - * remote wakeup while host is suspended - */ - device_wakeup_enable(&data->udev->dev); if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags)) goto done; @@ -1168,6 +1170,7 @@ static int btusb_open(struct hci_dev *hdev) failed: clear_bit(BTUSB_INTR_RUNNING, &data->flags); +setup_fail: usb_autopm_put_interface(data->intf); return err; } @@ -1203,7 +1206,6 @@ static int btusb_close(struct hci_dev *hdev) goto failed; data->intf->needs_remote_wakeup = 0; - device_wakeup_disable(&data->udev->dev); usb_autopm_put_interface(data->intf); failed: @@ -2054,6 +2056,35 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb) return -EILSEQ; } +static bool btusb_setup_intel_new_get_fw_name(struct intel_version *ver, + struct intel_boot_params *params, + char *fw_name, size_t len, + const char *suffix) +{ + switch (ver->hw_variant) { + case 0x0b: /* SfP */ + case 0x0c: /* WsP */ + snprintf(fw_name, len, "intel/ibt-%u-%u.%s", + le16_to_cpu(ver->hw_variant), + le16_to_cpu(params->dev_revid), + suffix); + break; + case 0x11: /* JfP */ + case 0x12: /* ThP */ + case 0x13: /* HrP */ + case 0x14: /* CcP */ + snprintf(fw_name, len, "intel/ibt-%u-%u-%u.%s", + le16_to_cpu(ver->hw_variant), + le16_to_cpu(ver->hw_revision), + le16_to_cpu(ver->fw_revision), + suffix); + break; + default: + return false; + } + return true; +} + static int btusb_setup_intel_new(struct hci_dev *hdev) { struct btusb_data *data = hci_get_drvdata(hdev); @@ -2105,7 +2136,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) case 0x11: /* JfP */ case 0x12: /* ThP */ case 0x13: /* HrP */ - case 0x14: /* QnJ, IcP */ + case 0x14: /* CcP */ break; default: bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)", @@ -2189,23 +2220,9 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) * ibt---.sfi. * */ - switch (ver.hw_variant) { - case 0x0b: /* SfP */ - case 0x0c: /* WsP */ - snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.sfi", - le16_to_cpu(ver.hw_variant), - le16_to_cpu(params.dev_revid)); - break; - case 0x11: /* JfP */ - case 0x12: /* ThP */ - case 0x13: /* HrP */ - case 0x14: /* QnJ, IcP */ - snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.sfi", - le16_to_cpu(ver.hw_variant), - le16_to_cpu(ver.hw_revision), - le16_to_cpu(ver.fw_revision)); - break; - default: + err = btusb_setup_intel_new_get_fw_name(&ver, ¶ms, fwname, + sizeof(fwname), "sfi"); + if (!err) { bt_dev_err(hdev, "Unsupported Intel firmware naming"); return -EINVAL; } @@ -2221,23 +2238,9 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) /* Save the DDC file name for later use to apply once the firmware * downloading is done. */ - switch (ver.hw_variant) { - case 0x0b: /* SfP */ - case 0x0c: /* WsP */ - snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.ddc", - le16_to_cpu(ver.hw_variant), - le16_to_cpu(params.dev_revid)); - break; - case 0x11: /* JfP */ - case 0x12: /* ThP */ - case 0x13: /* HrP */ - case 0x14: /* QnJ, IcP */ - snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.ddc", - le16_to_cpu(ver.hw_variant), - le16_to_cpu(ver.hw_revision), - le16_to_cpu(ver.fw_revision)); - break; - default: + err = btusb_setup_intel_new_get_fw_name(&ver, ¶ms, fwname, + sizeof(fwname), "ddc"); + if (!err) { bt_dev_err(hdev, "Unsupported Intel firmware naming"); return -EINVAL; } @@ -2886,6 +2889,7 @@ static int btusb_config_oob_wake(struct hci_dev *hdev) return 0; } + irq_set_status_flags(irq, IRQ_NOAUTOEN); ret = devm_request_irq(&hdev->dev, irq, btusb_oob_wake_handler, 0, "OOB Wake-on-BT", data); if (ret) { @@ -2900,7 +2904,6 @@ static int btusb_config_oob_wake(struct hci_dev *hdev) } data->oob_wake_irq = irq; - disable_irq(irq); bt_dev_info(hdev, "OOB Wake-on-BT configured at IRQ %u", irq); return 0; } @@ -3129,6 +3132,7 @@ static int btusb_probe(struct usb_interface *intf, #ifdef CONFIG_BT_HCIBTUSB_RTL if (id->driver_info & BTUSB_REALTEK) { hdev->setup = btrtl_setup_realtek; + hdev->shutdown = btrtl_shutdown_realtek; /* Realtek devices lose their updated firmware over suspend, * but the USB hub doesn't notice any status change. diff --git a/drivers/bluetooth/h4_recv.h b/drivers/bluetooth/h4_recv.h index b432651f823650a2ca45e6b1e3537a6179e3fa61..307d82166f480cb53e47c24703ff75188efdd000 100644 --- a/drivers/bluetooth/h4_recv.h +++ b/drivers/bluetooth/h4_recv.h @@ -60,6 +60,10 @@ static inline struct sk_buff *h4_recv_buf(struct hci_dev *hdev, const struct h4_recv_pkt *pkts, int pkts_count) { + /* Check for error from previous call */ + if (IS_ERR(skb)) + skb = NULL; + while (count) { int i, len; diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c index d568fbd94d6c84cc0045be68204f46b995e76cdb..20235925344dd224327a12ededac2589906be334 100644 --- a/drivers/bluetooth/hci_ath.c +++ b/drivers/bluetooth/hci_ath.c @@ -112,6 +112,9 @@ static int ath_open(struct hci_uart *hu) BT_DBG("hu %p", hu); + if (!hci_uart_has_flow_control(hu)) + return -EOPNOTSUPP; + ath = kzalloc(sizeof(*ath), GFP_KERNEL); if (!ath) return -ENOMEM; diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index ddbd8c6a0cebd5cc7ef5d079533a31383518e5f4..59e5fc5eec8f878f5f02f5bcd7b41776b2ce039d 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -51,6 +51,12 @@ #define BCM_LM_DIAG_PKT 0x07 #define BCM_LM_DIAG_SIZE 63 +#define BCM_TYPE49_PKT 0x31 +#define BCM_TYPE49_SIZE 0 + +#define BCM_TYPE52_PKT 0x34 +#define BCM_TYPE52_SIZE 0 + #define BCM_AUTOSUSPEND_DELAY 5000 /* default autosleep delay */ /** @@ -369,6 +375,9 @@ static int bcm_open(struct hci_uart *hu) bt_dev_dbg(hu->hdev, "hu %p", hu); + if (!hci_uart_has_flow_control(hu)) + return -EOPNOTSUPP; + bcm = kzalloc(sizeof(*bcm), GFP_KERNEL); if (!bcm) return -ENOMEM; @@ -561,12 +570,28 @@ static int bcm_setup(struct hci_uart *hu) .lsize = 0, \ .maxlen = BCM_NULL_SIZE +#define BCM_RECV_TYPE49 \ + .type = BCM_TYPE49_PKT, \ + .hlen = BCM_TYPE49_SIZE, \ + .loff = 0, \ + .lsize = 0, \ + .maxlen = BCM_TYPE49_SIZE + +#define BCM_RECV_TYPE52 \ + .type = BCM_TYPE52_PKT, \ + .hlen = BCM_TYPE52_SIZE, \ + .loff = 0, \ + .lsize = 0, \ + .maxlen = BCM_TYPE52_SIZE + static const struct h4_recv_pkt bcm_recv_pkts[] = { { H4_RECV_ACL, .recv = hci_recv_frame }, { H4_RECV_SCO, .recv = hci_recv_frame }, { H4_RECV_EVENT, .recv = hci_recv_frame }, { BCM_RECV_LM_DIAG, .recv = hci_recv_diag }, { BCM_RECV_NULL, .recv = hci_recv_diag }, + { BCM_RECV_TYPE49, .recv = hci_recv_diag }, + { BCM_RECV_TYPE52, .recv = hci_recv_diag }, }; static int bcm_recv(struct hci_uart *hu, const void *data, int count) @@ -907,6 +932,10 @@ static int bcm_get_resources(struct bcm_device *dev) dev->clk = devm_clk_get(dev->dev, NULL); + /* Handle deferred probing */ + if (dev->clk == ERR_PTR(-EPROBE_DEFER)) + return PTR_ERR(dev->clk); + dev->device_wakeup = devm_gpiod_get_optional(dev->dev, "device-wakeup", GPIOD_OUT_LOW); if (IS_ERR(dev->device_wakeup)) diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c index 1a7f0c82fb362ec2e200c2b6428b923fb24aa3ab..27829273f3c913fe53adef14ebd3ccb89b9d0fd7 100644 --- a/drivers/bluetooth/hci_bcsp.c +++ b/drivers/bluetooth/hci_bcsp.c @@ -606,6 +606,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count) if (*ptr == 0xc0) { BT_ERR("Short BCSP packet"); kfree_skb(bcsp->rx_skb); + bcsp->rx_skb = NULL; bcsp->rx_state = BCSP_W4_PKT_START; bcsp->rx_count = 0; } else @@ -621,6 +622,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count) bcsp->rx_skb->data[2])) != bcsp->rx_skb->data[3]) { BT_ERR("Error in BCSP hdr checksum"); kfree_skb(bcsp->rx_skb); + bcsp->rx_skb = NULL; bcsp->rx_state = BCSP_W4_PKT_DELIMITER; bcsp->rx_count = 0; continue; @@ -645,6 +647,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count) bscp_get_crc(bcsp)); kfree_skb(bcsp->rx_skb); + bcsp->rx_skb = NULL; bcsp->rx_state = BCSP_W4_PKT_DELIMITER; bcsp->rx_count = 0; continue; @@ -759,6 +762,11 @@ static int bcsp_close(struct hci_uart *hu) skb_queue_purge(&bcsp->rel); skb_queue_purge(&bcsp->unrel); + if (bcsp->rx_skb) { + kfree_skb(bcsp->rx_skb); + bcsp->rx_skb = NULL; + } + kfree(bcsp); return 0; } diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c index fb97a3bf069bbffbbee9feed7de1ffdb83e8a685..5d97d77627c1a087ee5450b327562ef993ce1887 100644 --- a/drivers/bluetooth/hci_h4.c +++ b/drivers/bluetooth/hci_h4.c @@ -174,6 +174,10 @@ struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb, struct hci_uart *hu = hci_get_drvdata(hdev); u8 alignment = hu->alignment ? hu->alignment : 1; + /* Check for error from previous call */ + if (IS_ERR(skb)) + skb = NULL; + while (count) { int i, len; diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c index 46ace321bf60ebb1a1cfa3446838765cb025d812..e9228520e4c7af163b9ddeefedd200a01d4ca46c 100644 --- a/drivers/bluetooth/hci_intel.c +++ b/drivers/bluetooth/hci_intel.c @@ -406,6 +406,9 @@ static int intel_open(struct hci_uart *hu) BT_DBG("hu %p", hu); + if (!hci_uart_has_flow_control(hu)) + return -EOPNOTSUPP; + intel = kzalloc(sizeof(*intel), GFP_KERNEL); if (!intel) return -ENOMEM; diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index ea6238ed5c0eaa095ada2c2b0f88aea27707dcbf..73ba2977350a973a66792d304d8840c9079d63ad 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -207,11 +207,11 @@ void hci_uart_init_work(struct work_struct *work) err = hci_register_dev(hu->hdev); if (err < 0) { BT_ERR("Can't register HCI device"); + clear_bit(HCI_UART_PROTO_READY, &hu->flags); + hu->proto->close(hu); hdev = hu->hdev; hu->hdev = NULL; hci_free_dev(hdev); - clear_bit(HCI_UART_PROTO_READY, &hu->flags); - hu->proto->close(hu); return; } @@ -299,6 +299,19 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb) return 0; } +/* Check the underlying device or tty has flow control support */ +bool hci_uart_has_flow_control(struct hci_uart *hu) +{ + /* serdev nodes check if the needed operations are present */ + if (hu->serdev) + return true; + + if (hu->tty->driver->ops->tiocmget && hu->tty->driver->ops->tiocmset) + return true; + + return false; +} + /* Flow control or un-flow control the device */ void hci_uart_set_flow_control(struct hci_uart *hu, bool enable) { @@ -616,6 +629,7 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, static int hci_uart_register_dev(struct hci_uart *hu) { struct hci_dev *hdev; + int err; BT_DBG(""); @@ -659,11 +673,22 @@ static int hci_uart_register_dev(struct hci_uart *hu) else hdev->dev_type = HCI_PRIMARY; + /* Only call open() for the protocol after hdev is fully initialized as + * open() (or a timer/workqueue it starts) may attempt to reference it. + */ + err = hu->proto->open(hu); + if (err) { + hu->hdev = NULL; + hci_free_dev(hdev); + return err; + } + if (test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags)) return 0; if (hci_register_dev(hdev) < 0) { BT_ERR("Can't register HCI device"); + hu->proto->close(hu); hu->hdev = NULL; hci_free_dev(hdev); return -ENODEV; @@ -683,20 +708,14 @@ static int hci_uart_set_proto(struct hci_uart *hu, int id) if (!p) return -EPROTONOSUPPORT; - err = p->open(hu); - if (err) - return err; - hu->proto = p; - set_bit(HCI_UART_PROTO_READY, &hu->flags); err = hci_uart_register_dev(hu); if (err) { - clear_bit(HCI_UART_PROTO_READY, &hu->flags); - p->close(hu); return err; } + set_bit(HCI_UART_PROTO_READY, &hu->flags); return 0; } @@ -753,7 +772,8 @@ static int hci_uart_tty_ioctl(struct tty_struct *tty, struct file *file, break; case HCIUARTGETPROTO: - if (test_bit(HCI_UART_PROTO_SET, &hu->flags)) + if (test_bit(HCI_UART_PROTO_SET, &hu->flags) && + test_bit(HCI_UART_PROTO_READY, &hu->flags)) err = hu->proto->id; else err = -EUNATCH; diff --git a/drivers/bluetooth/hci_mrvl.c b/drivers/bluetooth/hci_mrvl.c index ffb00669346f0ee70045043e82af0651e80591ba..23791df081bab0935fed1a91ec3e36f67da9f3e7 100644 --- a/drivers/bluetooth/hci_mrvl.c +++ b/drivers/bluetooth/hci_mrvl.c @@ -66,6 +66,9 @@ static int mrvl_open(struct hci_uart *hu) BT_DBG("hu %p", hu); + if (!hci_uart_has_flow_control(hu)) + return -EOPNOTSUPP; + mrvl = kzalloc(sizeof(*mrvl), GFP_KERNEL); if (!mrvl) return -ENOMEM; diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 2fee65886d50fd6a45ef14b365900ccdc5a4ffe6..f96e58de049b3b98ad46420695d7b9db2487e80f 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -167,7 +167,7 @@ struct qca_serdev { }; static int qca_power_setup(struct hci_uart *hu, bool on); -static void qca_power_shutdown(struct hci_dev *hdev); +static void qca_power_shutdown(struct hci_uart *hu); static void __serial_clock_on(struct tty_struct *tty) { @@ -450,6 +450,9 @@ static int qca_open(struct hci_uart *hu) BT_DBG("hu %p qca_open", hu); + if (!hci_uart_has_flow_control(hu)) + return -EOPNOTSUPP; + qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL); if (!qca) return -ENOMEM; @@ -504,6 +507,8 @@ static int qca_open(struct hci_uart *hu) qcadev = serdev_device_get_drvdata(hu->serdev); if (qcadev->btsoc_type != QCA_WCN3990) { gpiod_set_value_cansleep(qcadev->bt_en, 1); + /* Controller needs time to bootup. */ + msleep(150); } else { hu->init_speed = qcadev->init_speed; hu->oper_speed = qcadev->oper_speed; @@ -609,7 +614,7 @@ static int qca_close(struct hci_uart *hu) if (hu->serdev) { qcadev = serdev_device_get_drvdata(hu->serdev); if (qcadev->btsoc_type == QCA_WCN3990) - qca_power_shutdown(hu->hdev); + qca_power_shutdown(hu); else gpiod_set_value_cansleep(qcadev->bt_en, 0); @@ -1232,12 +1237,15 @@ static const struct qca_vreg_data qca_soc_data = { .num_vregs = 4, }; -static void qca_power_shutdown(struct hci_dev *hdev) +static void qca_power_shutdown(struct hci_uart *hu) { - struct hci_uart *hu = hci_get_drvdata(hdev); + struct serdev_device *serdev = hu->serdev; + unsigned char cmd = QCA_WCN3990_POWEROFF_PULSE; host_set_baudrate(hu, 2400); - qca_send_power_pulse(hdev, QCA_WCN3990_POWEROFF_PULSE); + hci_uart_set_flow_control(hu, true); + serdev_device_write_buf(serdev, &cmd, sizeof(cmd)); + hci_uart_set_flow_control(hu, false); qca_power_setup(hu, false); } @@ -1413,7 +1421,7 @@ static void qca_serdev_remove(struct serdev_device *serdev) struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev); if (qcadev->btsoc_type == QCA_WCN3990) - qca_power_shutdown(qcadev->serdev_hu.hdev); + qca_power_shutdown(&qcadev->serdev_hu); else clk_disable_unprepare(qcadev->susclk); diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c index aa2543b3c286968fece4acee97953eb2c1fe0ba8..46e20444ba19bdbe85163eddac4c6d93b4cbab3c 100644 --- a/drivers/bluetooth/hci_serdev.c +++ b/drivers/bluetooth/hci_serdev.c @@ -368,6 +368,7 @@ void hci_uart_unregister_device(struct hci_uart *hu) { struct hci_dev *hdev = hu->hdev; + clear_bit(HCI_UART_PROTO_READY, &hu->flags); hci_unregister_dev(hdev); hci_free_dev(hdev); diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h index 00cab2fd7a1b8302ef164940a784485a9198784d..067a610f1372a4b171a9e0446f76d3eaf52fbfc8 100644 --- a/drivers/bluetooth/hci_uart.h +++ b/drivers/bluetooth/hci_uart.h @@ -118,6 +118,7 @@ int hci_uart_tx_wakeup(struct hci_uart *hu); int hci_uart_init_ready(struct hci_uart *hu); void hci_uart_init_work(struct work_struct *work); void hci_uart_set_baudrate(struct hci_uart *hu, unsigned int speed); +bool hci_uart_has_flow_control(struct hci_uart *hu); void hci_uart_set_flow_control(struct hci_uart *hu, bool enable); void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed, unsigned int oper_speed); diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c index d5f85455fa6216fc2c7660e5f543f5aab71f50f5..0922cbe8900b46228cff31b79c0b90bc4cdb6c21 100644 --- a/drivers/bus/hisi_lpc.c +++ b/drivers/bus/hisi_lpc.c @@ -358,6 +358,26 @@ static int hisi_lpc_acpi_xlat_io_res(struct acpi_device *adev, return 0; } +/* + * Released firmware describes the IO port max address as 0x3fff, which is + * the max host bus address. Fixup to a proper range. This will probably + * never be fixed in firmware. + */ +static void hisi_lpc_acpi_fixup_child_resource(struct device *hostdev, + struct resource *r) +{ + if (r->end != 0x3fff) + return; + + if (r->start == 0xe4) + r->end = 0xe4 + 0x04 - 1; + else if (r->start == 0x2f8) + r->end = 0x2f8 + 0x08 - 1; + else + dev_warn(hostdev, "unrecognised resource %pR to fixup, ignoring\n", + r); +} + /* * hisi_lpc_acpi_set_io_res - set the resources for a child * @child: the device node to be updated the I/O resource @@ -419,8 +439,11 @@ static int hisi_lpc_acpi_set_io_res(struct device *child, return -ENOMEM; } count = 0; - list_for_each_entry(rentry, &resource_list, node) - resources[count++] = *rentry->res; + list_for_each_entry(rentry, &resource_list, node) { + resources[count] = *rentry->res; + hisi_lpc_acpi_fixup_child_resource(hostdev, &resources[count]); + count++; + } acpi_dev_free_resource_list(&resource_list); @@ -456,6 +479,17 @@ struct hisi_lpc_acpi_cell { size_t pdata_size; }; +static void hisi_lpc_acpi_remove(struct device *hostdev) +{ + struct acpi_device *adev = ACPI_COMPANION(hostdev); + struct acpi_device *child; + + device_for_each_child(hostdev, NULL, hisi_lpc_acpi_remove_subdev); + + list_for_each_entry(child, &adev->children, node) + acpi_device_clear_enumerated(child); +} + /* * hisi_lpc_acpi_probe - probe children for ACPI FW * @hostdev: LPC host device pointer @@ -470,13 +504,13 @@ static int hisi_lpc_acpi_probe(struct device *hostdev) { struct acpi_device *adev = ACPI_COMPANION(hostdev); struct acpi_device *child; + struct platform_device *pdev; int ret; /* Only consider the children of the host */ list_for_each_entry(child, &adev->children, node) { const char *hid = acpi_device_hid(child); const struct hisi_lpc_acpi_cell *cell; - struct platform_device *pdev; const struct resource *res; bool found = false; int num_res; @@ -522,10 +556,9 @@ static int hisi_lpc_acpi_probe(struct device *hostdev) if (!found) { dev_warn(hostdev, - "could not find cell for child device (%s)\n", + "could not find cell for child device (%s), discarding\n", hid); - ret = -ENODEV; - goto fail; + continue; } pdev = platform_device_alloc(cell->name, PLATFORM_DEVID_AUTO); @@ -539,25 +572,26 @@ static int hisi_lpc_acpi_probe(struct device *hostdev) ret = platform_device_add_resources(pdev, res, num_res); if (ret) - goto fail; + goto fail_put_device; ret = platform_device_add_data(pdev, cell->pdata, cell->pdata_size); if (ret) - goto fail; + goto fail_put_device; ret = platform_device_add(pdev); if (ret) - goto fail; + goto fail_put_device; acpi_device_set_enumerated(child); } return 0; +fail_put_device: + platform_device_put(pdev); fail: - device_for_each_child(hostdev, NULL, - hisi_lpc_acpi_remove_subdev); + hisi_lpc_acpi_remove(hostdev); return ret; } @@ -570,6 +604,10 @@ static int hisi_lpc_acpi_probe(struct device *dev) { return -ENODEV; } + +static void hisi_lpc_acpi_remove(struct device *hostdev) +{ +} #endif // CONFIG_ACPI /* @@ -607,24 +645,27 @@ static int hisi_lpc_probe(struct platform_device *pdev) range->fwnode = dev->fwnode; range->flags = LOGIC_PIO_INDIRECT; range->size = PIO_INDIRECT_SIZE; + range->hostdata = lpcdev; + range->ops = &hisi_lpc_ops; + lpcdev->io_host = range; ret = logic_pio_register_range(range); if (ret) { dev_err(dev, "register IO range failed (%d)!\n", ret); return ret; } - lpcdev->io_host = range; /* register the LPC host PIO resources */ if (acpi_device) ret = hisi_lpc_acpi_probe(dev); else ret = of_platform_populate(dev->of_node, NULL, NULL, dev); - if (ret) + if (ret) { + logic_pio_unregister_range(range); return ret; + } - lpcdev->io_host->hostdata = lpcdev; - lpcdev->io_host->ops = &hisi_lpc_ops; + dev_set_drvdata(dev, lpcdev); io_end = lpcdev->io_host->io_start + lpcdev->io_host->size; dev_info(dev, "registered range [%pa - %pa]\n", @@ -633,6 +674,23 @@ static int hisi_lpc_probe(struct platform_device *pdev) return ret; } +static int hisi_lpc_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct acpi_device *acpi_device = ACPI_COMPANION(dev); + struct hisi_lpc_dev *lpcdev = dev_get_drvdata(dev); + struct logic_pio_hwaddr *range = lpcdev->io_host; + + if (acpi_device) + hisi_lpc_acpi_remove(dev); + else + of_platform_depopulate(dev); + + logic_pio_unregister_range(range); + + return 0; +} + static const struct of_device_id hisi_lpc_of_match[] = { { .compatible = "hisilicon,hip06-lpc", }, { .compatible = "hisilicon,hip07-lpc", }, @@ -646,5 +704,6 @@ static struct platform_driver hisi_lpc_driver = { .acpi_match_table = ACPI_PTR(hisi_lpc_acpi_match), }, .probe = hisi_lpc_probe, + .remove = hisi_lpc_remove, }; builtin_platform_driver(hisi_lpc_driver); diff --git a/drivers/bus/qcom-ebi2.c b/drivers/bus/qcom-ebi2.c index a6444244c4111f7033eb04788f5a28cce00b9358..bfb67aa00becc5ef1abc0c439772f4c33ce2abc5 100644 --- a/drivers/bus/qcom-ebi2.c +++ b/drivers/bus/qcom-ebi2.c @@ -357,8 +357,10 @@ static int qcom_ebi2_probe(struct platform_device *pdev) /* Figure out the chipselect */ ret = of_property_read_u32(child, "reg", &csindex); - if (ret) + if (ret) { + of_node_put(child); return ret; + } if (csindex > 5) { dev_err(dev, diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index e4fe954e63a9be53b74397c825bf4f57b06dcca5..b6f63e7620214a7baf02fc491ea7fe118204958f 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -91,6 +91,9 @@ struct sysc { struct delayed_work idle_work; }; +static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np, + bool is_child); + void sysc_write(struct sysc *ddata, int offset, u32 value) { writel_relaxed(value, ddata->module_va + offset); @@ -214,8 +217,13 @@ static int sysc_get_clocks(struct sysc *ddata) if (!ddata->clocks) return -ENOMEM; - for (i = 0; i < ddata->nr_clocks; i++) { - error = sysc_get_one_clock(ddata, ddata->clock_roles[i]); + for (i = 0; i < SYSC_MAX_CLOCKS; i++) { + const char *name = ddata->clock_roles[i]; + + if (!name) + continue; + + error = sysc_get_one_clock(ddata, name); if (error && error != -ENOENT) return error; } @@ -374,6 +382,7 @@ static int sysc_check_one_child(struct sysc *ddata, dev_warn(ddata->dev, "really a child ti,hwmods property?"); sysc_check_quirk_stdout(ddata, np); + sysc_parse_dts_quirks(ddata, np, true); return 0; } @@ -1022,10 +1031,7 @@ static int sysc_init_sysc_mask(struct sysc *ddata) if (error) return 0; - if (val) - ddata->cfg.sysc_val = val & ddata->cap->sysc_mask; - else - ddata->cfg.sysc_val = ddata->cap->sysc_mask; + ddata->cfg.sysc_val = val & ddata->cap->sysc_mask; return 0; } @@ -1346,23 +1352,37 @@ static const struct sysc_dts_quirk sysc_dts_quirks[] = { .mask = SYSC_QUIRK_NO_RESET_ON_INIT, }, }; -static int sysc_init_dts_quirks(struct sysc *ddata) +static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np, + bool is_child) { - struct device_node *np = ddata->dev->of_node; const struct property *prop; - int i, len, error; - u32 val; - - ddata->legacy_mode = of_get_property(np, "ti,hwmods", NULL); + int i, len; for (i = 0; i < ARRAY_SIZE(sysc_dts_quirks); i++) { - prop = of_get_property(np, sysc_dts_quirks[i].name, &len); + const char *name = sysc_dts_quirks[i].name; + + prop = of_get_property(np, name, &len); if (!prop) continue; ddata->cfg.quirks |= sysc_dts_quirks[i].mask; + if (is_child) { + dev_warn(ddata->dev, + "dts flag should be at module level for %s\n", + name); + } } +} + +static int sysc_init_dts_quirks(struct sysc *ddata) +{ + struct device_node *np = ddata->dev->of_node; + int error; + u32 val; + ddata->legacy_mode = of_get_property(np, "ti,hwmods", NULL); + + sysc_parse_dts_quirks(ddata, np, false); error = of_property_read_u32(np, "ti,sysc-delay-us", &val); if (!error) { if (val > 255) { @@ -1688,7 +1708,7 @@ static int sysc_probe(struct platform_device *pdev) error = sysc_init_dts_quirks(ddata); if (error) - goto unprepare; + return error; error = sysc_get_clocks(ddata); if (error) @@ -1696,27 +1716,27 @@ static int sysc_probe(struct platform_device *pdev) error = sysc_map_and_check_registers(ddata); if (error) - goto unprepare; + return error; error = sysc_init_sysc_mask(ddata); if (error) - goto unprepare; + return error; error = sysc_init_idlemodes(ddata); if (error) - goto unprepare; + return error; error = sysc_init_syss_mask(ddata); if (error) - goto unprepare; + return error; error = sysc_init_pdata(ddata); if (error) - goto unprepare; + return error; error = sysc_init_resets(ddata); if (error) - return error; + goto unprepare; pm_runtime_enable(ddata->dev); error = sysc_init_module(ddata); diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index a5d5a96479bfe813449c7527024c52bf0d79c1b8..d3947388a3ef3b05a1a10401c772497386bd9fc8 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -265,6 +265,7 @@ /* #define ERRLOGMASK (CD_WARNING|CD_OPEN|CD_COUNT_TRACKS|CD_CLOSE) */ /* #define ERRLOGMASK (CD_WARNING|CD_REG_UNREG|CD_DO_IOCTL|CD_OPEN|CD_CLOSE|CD_COUNT_TRACKS) */ +#include #include #include #include @@ -410,10 +411,10 @@ static int cdrom_get_disc_info(struct cdrom_device_info *cdi, * hack to have the capability flags defined const, while we can still * change it here without gcc complaining at every line. */ -#define ENSURE(call, bits) \ -do { \ - if (cdo->call == NULL) \ - *change_capability &= ~(bits); \ +#define ENSURE(cdo, call, bits) \ +do { \ + if (cdo->call == NULL) \ + WARN_ON_ONCE((cdo)->capability & (bits)); \ } while (0) /* @@ -589,7 +590,6 @@ int register_cdrom(struct cdrom_device_info *cdi) { static char banner_printed; const struct cdrom_device_ops *cdo = cdi->ops; - int *change_capability = (int *)&cdo->capability; /* hack */ cd_dbg(CD_OPEN, "entering register_cdrom\n"); @@ -601,16 +601,16 @@ int register_cdrom(struct cdrom_device_info *cdi) cdrom_sysctl_register(); } - ENSURE(drive_status, CDC_DRIVE_STATUS); + ENSURE(cdo, drive_status, CDC_DRIVE_STATUS); if (cdo->check_events == NULL && cdo->media_changed == NULL) - *change_capability = ~(CDC_MEDIA_CHANGED | CDC_SELECT_DISC); - ENSURE(tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY); - ENSURE(lock_door, CDC_LOCK); - ENSURE(select_speed, CDC_SELECT_SPEED); - ENSURE(get_last_session, CDC_MULTI_SESSION); - ENSURE(get_mcn, CDC_MCN); - ENSURE(reset, CDC_RESET); - ENSURE(generic_packet, CDC_GENERIC_PACKET); + WARN_ON_ONCE(cdo->capability & (CDC_MEDIA_CHANGED | CDC_SELECT_DISC)); + ENSURE(cdo, tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY); + ENSURE(cdo, lock_door, CDC_LOCK); + ENSURE(cdo, select_speed, CDC_SELECT_SPEED); + ENSURE(cdo, get_last_session, CDC_MULTI_SESSION); + ENSURE(cdo, get_mcn, CDC_MCN); + ENSURE(cdo, reset, CDC_RESET); + ENSURE(cdo, generic_packet, CDC_GENERIC_PACKET); cdi->mc_flags = 0; cdi->options = CDO_USE_FFLAGS; @@ -996,6 +996,12 @@ static void cdrom_count_tracks(struct cdrom_device_info *cdi, tracktype *tracks) tracks->xa = 0; tracks->error = 0; cd_dbg(CD_COUNT_TRACKS, "entering cdrom_count_tracks\n"); + + if (!CDROM_CAN(CDC_PLAY_AUDIO)) { + tracks->error = CDS_NO_INFO; + return; + } + /* Grab the TOC header so we can see how many tracks there are */ ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCHDR, &header); if (ret) { @@ -1162,7 +1168,8 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, ret = open_for_data(cdi); if (ret) goto err; - cdrom_mmc3_profile(cdi); + if (CDROM_CAN(CDC_GENERIC_PACKET)) + cdrom_mmc3_profile(cdi); if (mode & FMODE_WRITE) { ret = -EROFS; if (cdrom_open_write(cdi)) @@ -2445,7 +2452,7 @@ static int cdrom_ioctl_select_disc(struct cdrom_device_info *cdi, return -ENOSYS; if (arg != CDSL_CURRENT && arg != CDSL_NONE) { - if ((int)arg >= cdi->capacity) + if (arg >= cdi->capacity) return -EINVAL; } @@ -2882,6 +2889,9 @@ int cdrom_get_last_written(struct cdrom_device_info *cdi, long *last_written) it doesn't give enough information or fails. then we return the toc contents. */ use_toc: + if (!CDROM_CAN(CDC_PLAY_AUDIO)) + return -ENOSYS; + toc.cdte_format = CDROM_MSF; toc.cdte_track = CDROM_LEADOUT; if ((ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &toc))) @@ -3693,9 +3703,9 @@ static struct ctl_table_header *cdrom_sysctl_header; static void cdrom_sysctl_register(void) { - static int initialized; + static atomic_t initialized = ATOMIC_INIT(0); - if (initialized == 1) + if (!atomic_add_unless(&initialized, 1, 1)) return; cdrom_sysctl_header = register_sysctl_table(cdrom_root_table); @@ -3706,8 +3716,6 @@ static void cdrom_sysctl_register(void) cdrom_sysctl_settings.debug = debug; cdrom_sysctl_settings.lock = lockdoor; cdrom_sysctl_settings.check = check_media_type; - - initialized = 1; } static void cdrom_sysctl_unregister(void) diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 40728491f37b6a46be7e59399c6530263d2d40c1..5df4aa56b1c11e50cbe83ca1ace9a7fdfd16a86b 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -343,7 +343,7 @@ config XILINX_HWICAP config R3964 tristate "Siemens R3964 line discipline" - depends on TTY + depends on TTY && BROKEN ---help--- This driver allows synchronous communication with devices using the Siemens R3964 packet protocol. Unless you are dealing with special @@ -387,8 +387,6 @@ config GPIO_TB0219 depends on TANBAC_TB022X select GPIO_VR41XX -source "drivers/char/pcmcia/Kconfig" - config MWAVE tristate "ACP Modem (Mwave) support" depends on X86 && TTY @@ -552,6 +550,23 @@ config ADI and SSM (Silicon Secured Memory). Intended consumers of this driver include crash and makedumpfile. +config HISI_SVM + tristate "Hisilicon svm driver" + depends on ARM64 && ARM_SMMU_V3 && MMU_NOTIFIER && HUGETLBFS + default m + help + This driver provides character-level access to Hisilicon + SVM chipset. Typically, you can bind a task to the + svm and share the virtual memory with hisilicon svm device. + When in doubt, say "N". + +config PIN_MEMORY_DEV + tristate "/dev/pinmem character device" + depends on PIN_MEMORY + default m + help + pin memory driver + endmenu config RANDOM_TRUST_CPU diff --git a/drivers/char/Makefile b/drivers/char/Makefile index b8d42b4e979bbc225bec63b6f5ea1dcf10981c50..f235072410c253f426bc3337b7e523b2cdea0f74 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -45,7 +45,6 @@ obj-$(CONFIG_TELCLOCK) += tlclk.o obj-$(CONFIG_MWAVE) += mwave/ obj-y += agp/ -obj-$(CONFIG_PCMCIA) += pcmcia/ obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o obj-$(CONFIG_TCG_TPM) += tpm/ @@ -58,3 +57,5 @@ js-rtc-y = rtc.o obj-$(CONFIG_XILLYBUS) += xillybus/ obj-$(CONFIG_POWERNV_OP_PANEL) += powernv-op-panel.o obj-$(CONFIG_ADI) += adi.o +obj-$(CONFIG_HISI_SVM) += svm.o +obj-$(CONFIG_PIN_MEMORY_DEV) += pin_memory.o diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c index c0a5b1f3a9863b17bf232d7d432a16943a90d252..4ccc39e00ced33fa1942be81fe6f87f7658adbab 100644 --- a/drivers/char/applicom.c +++ b/drivers/char/applicom.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -386,7 +387,11 @@ static ssize_t ac_write(struct file *file, const char __user *buf, size_t count, TicCard = st_loc.tic_des_from_pc; /* tic number to send */ IndexCard = NumCard - 1; - if((NumCard < 1) || (NumCard > MAX_BOARD) || !apbs[IndexCard].RamIO) + if (IndexCard >= MAX_BOARD) + return -EINVAL; + IndexCard = array_index_nospec(IndexCard, MAX_BOARD); + + if (!apbs[IndexCard].RamIO) return -EINVAL; #ifdef DEBUG @@ -697,6 +702,7 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg) unsigned char IndexCard; void __iomem *pmem; int ret = 0; + static int warncount = 10; volatile unsigned char byte_reset_it; struct st_ram_io *adgl; void __user *argp = (void __user *)arg; @@ -711,16 +717,12 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg) mutex_lock(&ac_mutex); IndexCard = adgl->num_card-1; - if(cmd != 6 && ((IndexCard >= MAX_BOARD) || !apbs[IndexCard].RamIO)) { - static int warncount = 10; - if (warncount) { - printk( KERN_WARNING "APPLICOM driver IOCTL, bad board number %d\n",(int)IndexCard+1); - warncount--; - } - kfree(adgl); - mutex_unlock(&ac_mutex); - return -EINVAL; - } + if (cmd != 6 && IndexCard >= MAX_BOARD) + goto err; + IndexCard = array_index_nospec(IndexCard, MAX_BOARD); + + if (cmd != 6 && !apbs[IndexCard].RamIO) + goto err; switch (cmd) { @@ -838,5 +840,16 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg) kfree(adgl); mutex_unlock(&ac_mutex); return 0; + +err: + if (warncount) { + pr_warn("APPLICOM driver IOCTL, bad board number %d\n", + (int)IndexCard + 1); + warncount--; + } + kfree(adgl); + mutex_unlock(&ac_mutex); + return -EINVAL; + } diff --git a/drivers/char/generic_nvram.c b/drivers/char/generic_nvram.c index 14e728fbb8a009bcb8988c63f52c812c122943b7..ff5394f475875b6fe0c666d760a1a45daae8a0db 100644 --- a/drivers/char/generic_nvram.c +++ b/drivers/char/generic_nvram.c @@ -44,7 +44,7 @@ static ssize_t read_nvram(struct file *file, char __user *buf, unsigned int i; char __user *p = buf; - if (!access_ok(VERIFY_WRITE, buf, count)) + if (!access_ok(buf, count)) return -EFAULT; if (*ppos >= nvram_len) return 0; @@ -62,7 +62,7 @@ static ssize_t write_nvram(struct file *file, const char __user *buf, const char __user *p = buf; char c; - if (!access_ok(VERIFY_READ, buf, count)) + if (!access_ok(buf, count)) return -EFAULT; if (*ppos >= nvram_len) return 0; diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index 4a22b4b41aefdfb743fc04922318cc1901514c6b..c0732f032248479fb6ea2386ec76bb46e3800798 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -377,7 +377,7 @@ static __init int hpet_mmap_enable(char *str) pr_info("HPET mmap %s\n", hpet_mmap_enabled ? "enabled" : "disabled"); return 1; } -__setup("hpet_mmap", hpet_mmap_enable); +__setup("hpet_mmap=", hpet_mmap_enable); static int hpet_mmap(struct file *file, struct vm_area_struct *vma) { @@ -570,8 +570,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets, unsigned long long m; m = hpets->hp_tick_freq + (dis >> 1); - do_div(m, dis); - return (unsigned long)m; + return div64_ul(m, dis); } static int diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index dac895dc01b956882486b90e0ea75813b93ea1c7..7caa16e8689c8f79976eb81c27af08b012c15b28 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -126,6 +126,20 @@ config HW_RANDOM_N2RNG If unsure, say Y. + +config HW_RANDOM_ZHAOXIN + tristate "Zhaoxin HW Random Number Generator support" + depends on X86 + default HW_RANDOM + help + This driver provides kernel-side support for the Random Number + Generator hardware found on Zhaoxin based motherboards. + + To compile this driver as a module, choose M here: the + module will be called zhaoxin-rng. + + If unsure, say Y. + config HW_RANDOM_VIA tristate "VIA HW Random Number Generator support" depends on X86 @@ -307,6 +321,19 @@ config HW_RANDOM_HISI If unsure, say Y. +config HW_RANDOM_HISI_V2 + tristate "HiSilicon True Random Number Generator V2 support" + depends on HW_RANDOM && ARM64 && ACPI + default HW_RANDOM + help + This driver provides kernel-side support for the True Random Number + Generator V2 hardware found on HiSilicon Hi1620 SoC. + + To compile this driver as a module, choose M here: the + module will be called hisi-trng-v2. + + If unsure, say Y. + config HW_RANDOM_ST tristate "ST Microelectronics HW Random Number Generator support" depends on HW_RANDOM && ARCH_STI @@ -424,6 +451,19 @@ config HW_RANDOM_EXYNOS will be called exynos-trng. If unsure, say Y. + +config HW_RANDOM_PHYTIUM + tristate "Phytium Random Number Generator support" + depends on ARCH_PHYTIUM || COMPILE_TEST + help + This driver provides kernel-side support for the Random Number + Generator hardware found on Phytium SoCs. + + To compile this driver as a module, choose M here: the + module will be called phytium-rng. + + If unsure, say Y. + endif # HW_RANDOM config UML_RANDOM diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index e35ec3ce3a20426afb7d464784242a7a22faebb0..d38a2dfb98dfe415565f8ba040286b2fd25b3c18 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile @@ -13,6 +13,7 @@ obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o obj-$(CONFIG_HW_RANDOM_N2RNG) += n2-rng.o n2-rng-y := n2-drv.o n2-asm.o obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o +obj-$(CONFIG_HW_RANDOM_ZHAOXIN) += zhaoxin-rng.o obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-trng.o obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o @@ -27,6 +28,7 @@ obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o obj-$(CONFIG_HW_RANDOM_HISI) += hisi-rng.o +obj-$(CONFIG_HW_RANDOM_HISI_V2) += hisi-trng-v2.o obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o @@ -38,3 +40,4 @@ obj-$(CONFIG_HW_RANDOM_CAVIUM) += cavium-rng.o cavium-rng-vf.o obj-$(CONFIG_HW_RANDOM_MTK) += mtk-rng.o obj-$(CONFIG_HW_RANDOM_S390) += s390-trng.o obj-$(CONFIG_HW_RANDOM_KEYSTONE) += ks-sa-rng.o +obj-$(CONFIG_HW_RANDOM_PHYTIUM) += phytium-rng.o diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index aaf9e5afaad435e2342a15fc963aa91367079957..e33b71659215bf7b2b9f57b3ec6720de077e6dc1 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -23,10 +23,13 @@ #include #include #include +#include #include #define RNG_MODULE_NAME "hw_random" +#define RNG_BUFFER_SIZE (SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES) + static struct hwrng *current_rng; /* the current rng has been explicitly chosen by user via sysfs */ static int cur_rng_set_by_user; @@ -58,7 +61,7 @@ static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, static size_t rng_buffer_size(void) { - return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES; + return RNG_BUFFER_SIZE; } static void add_early_randomness(struct hwrng *rng) @@ -67,7 +70,7 @@ static void add_early_randomness(struct hwrng *rng) size_t size = min_t(size_t, 16, rng_buffer_size()); mutex_lock(&reading_mutex); - bytes_read = rng_get_data(rng, rng_buffer, size, 1); + bytes_read = rng_get_data(rng, rng_buffer, size, 0); mutex_unlock(&reading_mutex); if (bytes_read > 0) add_device_randomness(rng_buffer, bytes_read); @@ -201,6 +204,7 @@ static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, static ssize_t rng_dev_read(struct file *filp, char __user *buf, size_t size, loff_t *offp) { + u8 buffer[RNG_BUFFER_SIZE]; ssize_t ret = 0; int err = 0; int bytes_read, len; @@ -228,34 +232,37 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf, if (bytes_read < 0) { err = bytes_read; goto out_unlock_reading; + } else if (bytes_read == 0 && + (filp->f_flags & O_NONBLOCK)) { + err = -EAGAIN; + goto out_unlock_reading; } + data_avail = bytes_read; } - if (!data_avail) { - if (filp->f_flags & O_NONBLOCK) { - err = -EAGAIN; - goto out_unlock_reading; - } - } else { - len = data_avail; + len = data_avail; + if (len) { if (len > size) len = size; data_avail -= len; - if (copy_to_user(buf + ret, rng_buffer + data_avail, - len)) { + memcpy(buffer, rng_buffer + data_avail, len); + } + mutex_unlock(&reading_mutex); + put_rng(rng); + + if (len) { + if (copy_to_user(buf + ret, buffer, len)) { err = -EFAULT; - goto out_unlock_reading; + goto out; } size -= len; ret += len; } - mutex_unlock(&reading_mutex); - put_rng(rng); if (need_resched()) schedule_timeout_interruptible(1); @@ -266,6 +273,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf, } } out: + memzero_explicit(buffer, sizeof(buffer)); return ret ? : err; out_unlock_reading: diff --git a/drivers/char/hw_random/hisi-trng-v2.c b/drivers/char/hw_random/hisi-trng-v2.c new file mode 100644 index 0000000000000000000000000000000000000000..17f7a3f9a2f0ee4b7f07852fe8fdf062d7acbde8 --- /dev/null +++ b/drivers/char/hw_random/hisi-trng-v2.c @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 HiSilicon Limited. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define HISI_TRNG_SMC_CMD 0x83000109 +#define HISI_TRNG_SMC_BYTES 32 +#define HISI_TRNG_REG 0x00F0 +#define HISI_TRNG_BYTES 4 +#define HISI_TRNG_QUALITY 512 +#define SLEEP_US 10 +#define TIMEOUT_US 10000 +#define RAND_DATA_NORMAL 0 +#define RAND_DATA_POSTPRO 1 + +struct hisi_trng { + void __iomem *base; + struct hwrng rng; + void *va; + phys_addr_t pa; +}; + +static int data_mode_set(const char *val, const struct kernel_param *kp) +{ + u32 n; + int ret; + + if (!val) + return -EINVAL; + + ret = kstrtou32(val, 10, &n); + if (ret < 0 || (n != RAND_DATA_NORMAL && n != RAND_DATA_POSTPRO)) + return -EINVAL; + + return param_set_int(val, kp); +} + +static const struct kernel_param_ops data_mode_ops = { + .set = data_mode_set, + .get = param_get_int, +}; + +static int data_mode = RAND_DATA_NORMAL; +module_param_cb(data_mode, &data_mode_ops, &data_mode, 0444); +MODULE_PARM_DESC(data_mode, "Rand data with post process or not, 0(default), 1"); + +static int hisi_trng_read_v2(struct hwrng *rng, void *buf, size_t max, + bool wait) +{ + struct arm_smccc_res res = {0}; + struct hisi_trng *trng; + int currsize = 0; + + trng = container_of(rng, struct hisi_trng, rng); + + do { + arm_smccc_smc(HISI_TRNG_SMC_CMD, trng->pa, 0, 0, 0, 0, 0, 0, + &res); + if (res.a0) + return currsize; + + if (max - currsize >= HISI_TRNG_SMC_BYTES) { + memcpy(buf + currsize, trng->va, HISI_TRNG_SMC_BYTES); + currsize += HISI_TRNG_SMC_BYTES; + if (currsize == max) + return currsize; + continue; + } + + memcpy(buf + currsize, trng->va, max - currsize); + currsize = max; + } while (currsize < max); + + return currsize; +} + +static int hisi_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait) +{ + struct hisi_trng *trng; + int currsize = 0; + u32 val = 0; + u32 ret; + + trng = container_of(rng, struct hisi_trng, rng); + + do { + ret = readl_poll_timeout(trng->base + HISI_TRNG_REG, val, + val, SLEEP_US, TIMEOUT_US); + if (ret) + return currsize; + + if (max - currsize >= HISI_TRNG_BYTES) { + memcpy(buf + currsize, &val, HISI_TRNG_BYTES); + currsize += HISI_TRNG_BYTES; + if (currsize == max) + return currsize; + continue; + } + + /* copy remaining bytes */ + memcpy(buf + currsize, &val, max - currsize); + currsize = max; + } while (currsize < max); + + return currsize; +} + +static int hisi_trng_probe(struct platform_device *pdev) +{ + struct hisi_trng *trng; + struct resource *res; + int ret; + + trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL); + if (!trng) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + trng->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(trng->base)) + return PTR_ERR(trng->base); + + trng->rng.name = pdev->name; + trng->rng.quality = HISI_TRNG_QUALITY; + + if (data_mode) { + trng->rng.read = hisi_trng_read_v2; + trng->va = devm_kzalloc(&pdev->dev, HISI_TRNG_SMC_BYTES, + GFP_KERNEL); + if (!trng->va) + return -ENOMEM; + + trng->pa = virt_to_phys(trng->va); + } else + trng->rng.read = hisi_trng_read; + + ret = devm_hwrng_register(&pdev->dev, &trng->rng); + if (ret) + dev_err(&pdev->dev, "failed to register hwrng!\n"); + + return ret; +} + +static const struct acpi_device_id hisi_trng_acpi_match[] = { + { "HISI02B3", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, hisi_trng_acpi_match); + +static struct platform_driver hisi_trng_driver = { + .probe = hisi_trng_probe, + .driver = { + .name = "hisi-trng-v2", + .acpi_match_table = ACPI_PTR(hisi_trng_acpi_match), + }, +}; + +module_platform_driver(hisi_trng_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Yuan Wang "); +MODULE_AUTHOR("Weili Qian "); +MODULE_AUTHOR("Zaibo Xu "); +MODULE_DESCRIPTION("HiSilicon true random number generator V2 driver"); diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index b65ff696289954a76ba9fcf2a706a4edd7aad320..49882524263421bdefea2ec69cd304eaf98a02c9 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c @@ -66,6 +66,13 @@ #define OMAP4_RNG_OUTPUT_SIZE 0x8 #define EIP76_RNG_OUTPUT_SIZE 0x10 +/* + * EIP76 RNG takes approx. 700us to produce 16 bytes of output data + * as per testing results. And to account for the lack of udelay()'s + * reliability, we keep the timeout as 1000us. + */ +#define RNG_DATA_FILL_TIMEOUT 100 + enum { RNG_OUTPUT_0_REG = 0, RNG_OUTPUT_1_REG, @@ -176,7 +183,7 @@ static int omap_rng_do_read(struct hwrng *rng, void *data, size_t max, if (max < priv->pdata->data_size) return 0; - for (i = 0; i < 20; i++) { + for (i = 0; i < RNG_DATA_FILL_TIMEOUT; i++) { present = priv->pdata->data_present(priv); if (present || !wait) break; @@ -443,6 +450,7 @@ static int omap_rng_probe(struct platform_device *pdev) priv->rng.read = omap_rng_do_read; priv->rng.init = omap_rng_init; priv->rng.cleanup = omap_rng_cleanup; + priv->rng.quality = 900; priv->rng.priv = (unsigned long)priv; platform_set_drvdata(pdev, priv); diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c index 38b719017186ef4f27e3b93a013165f5d9bd8d5d..648e39ce6bd95bcfb34de1624d5240ef343badd9 100644 --- a/drivers/char/hw_random/omap3-rom-rng.c +++ b/drivers/char/hw_random/omap3-rom-rng.c @@ -121,7 +121,8 @@ static int omap3_rom_rng_remove(struct platform_device *pdev) { cancel_delayed_work_sync(&idle_work); hwrng_unregister(&omap3_rom_rng_ops); - clk_disable_unprepare(rng_clk); + if (!rng_idle) + clk_disable_unprepare(rng_clk); return 0; } diff --git a/drivers/char/hw_random/phytium-rng.c b/drivers/char/hw_random/phytium-rng.c new file mode 100644 index 0000000000000000000000000000000000000000..ef4c137c17cccfefc268c71d545836236eaa543f --- /dev/null +++ b/drivers/char/hw_random/phytium-rng.c @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium SoC RNG Driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define TRNG_CR 0x00 +#define TRNG_CR_RNGEN BIT(0) +#define TRNG_CR_ROSEN_MASK GENMASK(7, 4) +#define TRNG_CR_DIEN BIT(16) +#define TRNG_CR_ERIEN BIT(17) +#define TRNG_CR_IRQEN BIT(24) +#define TRNG_MSEL 0x04 +#define TRNG_MSEL_MSEL BIT(0) +#define TRNG_SR 0x08 +#define TRNG_SR_HTF BIT(0) +#define TRNG_SR_DRDY BIT(1) +#define TRNG_SR_ERERR BIT(3) +#define TRNG_DR 0x0C +#define TRNG_RESEED 0x40 +#define TRNG_RESEED_RSED BIT(0) + +#define DELAY 10 +#define TIMEOUT 100 + +static int msel; +module_param(msel, int, 0444); +MODULE_PARM_DESC(msel, "Phytium RNG mode selection: 0 - TRNG. 1 - PRNG."); + +struct phytium_rng { + struct hwrng rng; + void __iomem *base; +}; + +static int phytium_rng_init(struct hwrng *rng) +{ + struct phytium_rng *priv = container_of(rng, struct phytium_rng, rng); + u32 reg; + + /* Mode Selection */ + reg = msel ? TRNG_MSEL_MSEL : 0; + writel(reg, priv->base + TRNG_MSEL); + + /* If PRGN mode is on, do reseed operations */ + if (msel) + writel(TRNG_RESEED_RSED, priv->base + TRNG_RESEED); + + /* Clear status */ + writel(0x7, priv->base + TRNG_SR); + + /* Enable TRNG */ + reg = readl(priv->base + TRNG_CR) | TRNG_CR_ROSEN_MASK | TRNG_CR_RNGEN; + writel(reg, priv->base + TRNG_CR); + + return 0; +} + +static void phytium_rng_cleanup(struct hwrng *rng) +{ + struct phytium_rng *priv = container_of(rng, struct phytium_rng, rng); + + writel(0x7, priv->base + TRNG_SR); +} + +static int phytium_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) +{ + struct phytium_rng *priv = container_of(rng, struct phytium_rng, rng); + u32 reg; + int ret = 0; + + /* TRNG can generate at most 8*32bit random number per time */ + max = max > 8 ? 8 : max; + + reg = readl(priv->base + TRNG_SR); + if (!(reg & TRNG_SR_DRDY) && wait) { + ret = readl_poll_timeout(priv->base + TRNG_SR, reg, + reg & TRNG_SR_DRDY, DELAY, TIMEOUT); + if (ret) { + dev_err((struct device *)priv->rng.priv, + "%s: timeout %x!\n", __func__, reg); + return -EIO; + } + } + + while (max > 4) { + *(u32 *)buf = readl(priv->base + TRNG_DR); + + ret += sizeof(u32); + buf += sizeof(u32); + max -= sizeof(u32); + } + + /* Clear DRDY by writing 1 */ + writel(reg | TRNG_SR_DRDY, priv->base + TRNG_SR); + + return ret; +} + +static int scto_rng_probe(struct platform_device *pdev) +{ + struct phytium_rng *priv; + struct resource *mem; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + platform_set_drvdata(pdev, priv); + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + priv->rng.name = pdev->name; + priv->rng.init = phytium_rng_init; + priv->rng.cleanup = phytium_rng_cleanup; + priv->rng.read = phytium_rng_read; + priv->rng.priv = (unsigned long)&pdev->dev; + + return devm_hwrng_register(&pdev->dev, &priv->rng); +} + +static const struct of_device_id phytium_rng_dt_ids[] = { + { .compatible = "phytium,rng" }, + { } +}; +MODULE_DEVICE_TABLE(of, phytium_rng_dt_ids); + +static struct platform_driver phytium_rng_driver = { + .probe = scto_rng_probe, + .driver = { + .name = "phytium-rng", + .of_match_table = of_match_ptr(phytium_rng_dt_ids), + } +}; +module_platform_driver(phytium_rng_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Phytium random number generator driver"); +MODULE_AUTHOR("Chen Baozi "); diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c index 042860d97b1560a1943b9c28e57d48c1e77536b8..37b338a76ba4670a583260555573f27937fac1ae 100644 --- a/drivers/char/hw_random/stm32-rng.c +++ b/drivers/char/hw_random/stm32-rng.c @@ -169,6 +169,13 @@ static int stm32_rng_probe(struct platform_device *ofdev) return devm_hwrng_register(dev, &priv->rng); } +static int stm32_rng_remove(struct platform_device *ofdev) +{ + pm_runtime_disable(&ofdev->dev); + + return 0; +} + #ifdef CONFIG_PM static int stm32_rng_runtime_suspend(struct device *dev) { @@ -210,6 +217,7 @@ static struct platform_driver stm32_rng_driver = { .of_match_table = stm32_rng_match, }, .probe = stm32_rng_probe, + .remove = stm32_rng_remove, }; module_platform_driver(stm32_rng_driver); diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c index ffe9b0c6c647c2ef897e8726043d79b5e2b48ca4..a76a70a170a342c2c9ce961177b6198aad6aa303 100644 --- a/drivers/char/hw_random/via-rng.c +++ b/drivers/char/hw_random/via-rng.c @@ -191,12 +191,17 @@ static struct hwrng via_rng = { .data_read = via_rng_data_read, }; +static struct x86_cpu_id via_rng_ids[] = { + { X86_VENDOR_CENTAUR, 6, X86_MODEL_ANY, X86_FEATURE_XSTORE }, + {} +}; +MODULE_DEVICE_TABLE(x86cpu, via_rng_ids); static int __init mod_init(void) { int err; - if (!boot_cpu_has(X86_FEATURE_XSTORE)) + if (!x86_match_cpu(via_rng_ids)) return -ENODEV; pr_info("VIA RNG detected\n"); diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index b89df66ea1aefd4659c50258ef28bb970eb45db7..7abd604e938c278825ba6efdf7c65190f3c0b5c2 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c @@ -73,7 +73,7 @@ static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait) if (!vi->busy) { vi->busy = true; - init_completion(&vi->have_data); + reinit_completion(&vi->have_data); register_buffer(vi, buf, size); } diff --git a/drivers/char/hw_random/zhaoxin-rng.c b/drivers/char/hw_random/zhaoxin-rng.c new file mode 100644 index 0000000000000000000000000000000000000000..f4e1f58494af4bd8cc0111b36d6339106cffac0b --- /dev/null +++ b/drivers/char/hw_random/zhaoxin-rng.c @@ -0,0 +1,172 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * RNG driver for Zhaoxin RNGs + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum { + ZHAOXIN_STRFILT_CNT_SHIFT = 16, + ZHAOXIN_STRFILT_FAIL = (1 << 15), + ZHAOXIN_STRFILT_ENABLE = (1 << 14), + ZHAOXIN_RAWBITS_ENABLE = (1 << 13), + ZHAOXIN_RNG_ENABLE = (1 << 6), + ZHAOXIN_NOISESRC1 = (1 << 8), + ZHAOXIN_NOISESRC2 = (1 << 9), + ZHAOXIN_XSTORE_CNT_MASK = 0x0F, + + ZHAOXIN_RNG_CHUNK_8 = 0x00, /* 64 rand bits, 64 stored bits */ + ZHAOXIN_RNG_CHUNK_4 = 0x01, /* 32 rand bits, 32 stored bits */ + ZHAOXIN_RNG_CHUNK_4_MASK = 0xFFFFFFFF, + ZHAOXIN_RNG_CHUNK_2 = 0x02, /* 16 rand bits, 32 stored bits */ + ZHAOXIN_RNG_CHUNK_2_MASK = 0xFFFF, + ZHAOXIN_RNG_CHUNK_1 = 0x03, /* 8 rand bits, 32 stored bits */ + ZHAOXIN_RNG_CHUNK_1_MASK = 0xFF, +}; + +/* + * Investigate using the 'rep' prefix to obtain 32 bits of random data + * in one insn. The upside is potentially better performance. The + * downside is that the instruction becomes no longer atomic. Due to + * this, just like familiar issues with /dev/random itself, the worst + * case of a 'rep xstore' could potentially pause a cpu for an + * unreasonably long time. In practice, this condition would likely + * only occur when the hardware is failing. (or so we hope :)) + * + * Another possible performance boost may come from simply buffering + * until we have 4 bytes, thus returning a u32 at a time, + * instead of the current u8-at-a-time. + * + * Padlock instructions can generate a spurious DNA fault, but the + * kernel doesn't use CR0.TS, so this doesn't matter. + */ + +static inline u32 xstore(u32 *addr, u32 edx_in) +{ + u32 eax_out; + + asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */" + : "=m" (*addr), "=a" (eax_out), "+d" (edx_in), "+D" (addr)); + + return eax_out; +} + +static int zhaoxin_rng_data_present(struct hwrng *rng, int wait) +{ + char buf[16 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ + ((aligned(STACK_ALIGN))); + u32 *zhaoxin_rng_datum = (u32 *)PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); + u32 bytes_out; + int i; + + /* We choose the recommended 1-byte-per-instruction RNG rate, + * for greater randomness at the expense of speed. Larger + * values 2, 4, or 8 bytes-per-instruction yield greater + * speed at lesser randomness. + * + * If you change this to another ZHAOXIN_CHUNK_n, you must also + * change the ->n_bytes values in rng_vendor_ops[] tables. + * ZHAOXIN_CHUNK_8 requires further code changes. + * + * A copy of MSR_ZHAOXIN_RNG is placed in eax_out when xstore + * completes. + */ + + for (i = 0; i < 20; i++) { + *zhaoxin_rng_datum = 0; /* paranoia, not really necessary */ + bytes_out = xstore(zhaoxin_rng_datum, ZHAOXIN_RNG_CHUNK_1); + bytes_out &= ZHAOXIN_XSTORE_CNT_MASK; + if (bytes_out || !wait) + break; + udelay(10); + } + rng->priv = *zhaoxin_rng_datum; + return bytes_out ? 1 : 0; +} + +static int zhaoxin_rng_data_read(struct hwrng *rng, u32 *data) +{ + u32 zhaoxin_rng_datum = (u32)rng->priv; + + *data = zhaoxin_rng_datum; + + return 1; +} + +static int zhaoxin_rng_init(struct hwrng *rng) +{ + struct cpuinfo_x86 *c = &cpu_data(0); + + /* + * Zhaoxin CPUs don't have the MSR_ZHAOXIN_RNG anymore. The RNG + * is always enabled if CPUID rng_en is set. There is no + * RNG configuration like it used to be the case in this + * register + */ + if (c->x86 > 6) { + if (!boot_cpu_has(X86_FEATURE_XSTORE_EN)) { + pr_err(PFX "can't enable hardware RNG if XSTORE is not enabled\n"); + return -ENODEV; + } + return 0; + } + return 0; +} + + +static struct hwrng zhaoxin_rng = { + .name = "zhaoxin", + .init = zhaoxin_rng_init, + .data_present = zhaoxin_rng_data_present, + .data_read = zhaoxin_rng_data_read, +}; + +static struct x86_cpu_id zhaoxin_rng_ids[] = { + { X86_VENDOR_CENTAUR, 7, X86_MODEL_ANY, X86_STEPPING_ANY, X86_FEATURE_XSTORE }, + { X86_VENDOR_ZHAOXIN, 7, X86_MODEL_ANY, X86_STEPPING_ANY, X86_FEATURE_XSTORE }, + {} +}; +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_rng_ids); + +static int __init mod_init(void) +{ + int err; + + if (!x86_match_cpu(zhaoxin_rng_ids)) + return -ENODEV; + + pr_info("RNG detected\n"); + err = hwrng_register(&zhaoxin_rng); + if (err) { + pr_err(PFX "RNG registering failed (%d)\n", err); + goto out; + } +out: + return err; +} + +static void __exit mod_exit(void) +{ + hwrng_unregister(&zhaoxin_rng); +} + +module_init(mod_init); +module_exit(mod_exit); + +static struct x86_cpu_id __maybe_unused zhaoxin_rng_cpu_id[] = { + X86_FEATURE_MATCH(X86_FEATURE_XSTORE), + {} +}; + +MODULE_DESCRIPTION("H/W RNG driver for Zhaoxin CPU with PadLock"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_rng_cpu_id); diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c index 1a486aec99b6a9119784337545892253e70a724d..8250453129914690bcfaf53edd63fc3a44f5a398 100644 --- a/drivers/char/ipmi/ipmi_devintf.c +++ b/drivers/char/ipmi/ipmi_devintf.c @@ -207,7 +207,7 @@ static int handle_recv(struct ipmi_file_private *priv, struct list_head *entry; struct ipmi_recv_msg *msg; unsigned long flags; - int rv = 0; + int rv = 0, rv2 = 0; /* We claim a mutex because we don't want two users getting something from the queue at a time. @@ -250,7 +250,7 @@ static int handle_recv(struct ipmi_file_private *priv, if (msg->msg.data_len > 0) { if (rsp->msg.data_len < msg->msg.data_len) { - rv = -EMSGSIZE; + rv2 = -EMSGSIZE; if (trunc) msg->msg.data_len = rsp->msg.data_len; else @@ -274,7 +274,7 @@ static int handle_recv(struct ipmi_file_private *priv, mutex_unlock(&priv->recv_mutex); ipmi_free_recv_msg(msg); - return 0; + return rv2; recv_putback_on_err: /* If we got an error, put the message back onto diff --git a/drivers/char/ipmi/ipmi_dmi.c b/drivers/char/ipmi/ipmi_dmi.c index e2c143861b1e5aa7e4ee7899477750acb7fff358..cfdcb1422611a1240d6266757beef13a27534808 100644 --- a/drivers/char/ipmi/ipmi_dmi.c +++ b/drivers/char/ipmi/ipmi_dmi.c @@ -41,7 +41,7 @@ static void __init dmi_add_platform_ipmi(unsigned long base_addr, unsigned int num_r = 1, size; struct property_entry p[5]; unsigned int pidx = 0; - char *name, *override; + char *name; int rv; enum si_type si_type; struct ipmi_dmi_info *info; @@ -49,11 +49,9 @@ static void __init dmi_add_platform_ipmi(unsigned long base_addr, memset(p, 0, sizeof(p)); name = "dmi-ipmi-si"; - override = "ipmi_si"; switch (type) { case IPMI_DMI_TYPE_SSIF: name = "dmi-ipmi-ssif"; - override = "ipmi_ssif"; offset = 1; size = 1; si_type = SI_TYPE_INVALID; @@ -98,10 +96,6 @@ static void __init dmi_add_platform_ipmi(unsigned long base_addr, pr_err("ipmi:dmi: Error allocation IPMI platform device\n"); return; } - pdev->driver_override = kasprintf(GFP_KERNEL, "%s", - override); - if (!pdev->driver_override) - goto err; if (type == IPMI_DMI_TYPE_SSIF) { p[pidx++] = PROPERTY_ENTRY_U16("i2c-addr", base_addr); @@ -217,6 +211,10 @@ static void __init dmi_decode_ipmi(const struct dmi_header *dm) slave_addr = data[DMI_IPMI_SLAVEADDR]; memcpy(&base_addr, data + DMI_IPMI_ADDR, sizeof(unsigned long)); + if (!base_addr) { + pr_err("Base address is zero, assuming no IPMI interface\n"); + return; + } if (len >= DMI_IPMI_VER2_LENGTH) { if (type == IPMI_DMI_TYPE_SSIF) { offset = 0; diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 7fc9612070a1f1abe43b489f226c84a9c4c50632..9aedd3588eccc9d85bea9d7fa3918d34efcd7c49 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -29,6 +29,8 @@ #include #include #include +#include +#include #define PFX "IPMI message handler: " @@ -42,26 +44,8 @@ static void need_waiter(struct ipmi_smi *intf); static int handle_one_recv_msg(struct ipmi_smi *intf, struct ipmi_smi_msg *msg); -#ifdef DEBUG -static void ipmi_debug_msg(const char *title, unsigned char *data, - unsigned int len) -{ - int i, pos; - char buf[100]; - - pos = snprintf(buf, sizeof(buf), "%s: ", title); - for (i = 0; i < len; i++) - pos += snprintf(buf + pos, sizeof(buf) - pos, - " %2.2x", data[i]); - pr_debug("%s\n", buf); -} -#else -static void ipmi_debug_msg(const char *title, unsigned char *data, - unsigned int len) -{ } -#endif - -static int initialized; +static bool initialized; +static bool drvregistered; enum ipmi_panic_event_op { IPMI_SEND_PANIC_EVENT_NONE, @@ -211,8 +195,13 @@ struct ipmi_user { /* Does this interface receive IPMI events? */ bool gets_events; + + /* Free must run in process context for RCU cleanup. */ + struct work_struct remove_work; }; +static struct workqueue_struct *remove_work_wq; + static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index) __acquires(user->release_barrier) { @@ -442,6 +431,8 @@ enum ipmi_stat_indexes { #define IPMI_IPMB_NUM_SEQ 64 struct ipmi_smi { + struct module *owner; + /* What interface number are we? */ int intf_num; @@ -611,7 +602,7 @@ static DEFINE_MUTEX(ipmidriver_mutex); static LIST_HEAD(ipmi_interfaces); static DEFINE_MUTEX(ipmi_interfaces_mutex); -DEFINE_STATIC_SRCU(ipmi_interfaces_srcu); +static struct srcu_struct ipmi_interfaces_srcu; /* * List of watchers that want to know when smi's are added and deleted. @@ -719,7 +710,15 @@ struct watcher_entry { int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) { struct ipmi_smi *intf; - int index; + int index, rv; + + /* + * Make sure the driver is actually initialized, this handles + * problems with initialization order. + */ + rv = ipmi_init_msghandler(); + if (rv) + return rv; mutex_lock(&smi_watchers_mutex); @@ -883,7 +882,7 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) if (user) { user->handler->ipmi_recv_hndl(msg, user->handler_data); - release_ipmi_user(msg->user, index); + release_ipmi_user(user, index); } else { /* User went away, give up. */ ipmi_free_recv_msg(msg); @@ -1068,6 +1067,15 @@ static int intf_err_seq(struct ipmi_smi *intf, } +static void free_user_work(struct work_struct *work) +{ + struct ipmi_user *user = container_of(work, struct ipmi_user, + remove_work); + + cleanup_srcu_struct(&user->release_barrier); + vfree(user); +} + int ipmi_create_user(unsigned int if_num, const struct ipmi_user_hndl *handler, void *handler_data, @@ -1075,7 +1083,7 @@ int ipmi_create_user(unsigned int if_num, { unsigned long flags; struct ipmi_user *new_user; - int rv = 0, index; + int rv, index; struct ipmi_smi *intf; /* @@ -1093,20 +1101,11 @@ int ipmi_create_user(unsigned int if_num, * Make sure the driver is actually initialized, this handles * problems with initialization order. */ - if (!initialized) { - rv = ipmi_init_msghandler(); - if (rv) - return rv; - - /* - * The init code doesn't return an error if it was turned - * off, but it won't initialize. Check that. - */ - if (!initialized) - return -ENODEV; - } + rv = ipmi_init_msghandler(); + if (rv) + return rv; - new_user = kmalloc(sizeof(*new_user), GFP_KERNEL); + new_user = vzalloc(sizeof(*new_user)); if (!new_user) return -ENOMEM; @@ -1120,10 +1119,17 @@ int ipmi_create_user(unsigned int if_num, goto out_kfree; found: + INIT_WORK(&new_user->remove_work, free_user_work); + rv = init_srcu_struct(&new_user->release_barrier); if (rv) goto out_kfree; + if (!try_module_get(intf->owner)) { + rv = -ENODEV; + goto out_kfree; + } + /* Note that each existing user holds a refcount to the interface. */ kref_get(&intf->refcount); @@ -1148,7 +1154,7 @@ int ipmi_create_user(unsigned int if_num, out_kfree: srcu_read_unlock(&ipmi_interfaces_srcu, index); - kfree(new_user); + vfree(new_user); return rv; } EXPORT_SYMBOL(ipmi_create_user); @@ -1182,7 +1188,9 @@ EXPORT_SYMBOL(ipmi_get_smi_info); static void free_user(struct kref *ref) { struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); - kfree(user); + + /* SRCU cleanup must happen in task context. */ + queue_work(remove_work_wq, &user->remove_work); } static void _ipmi_destroy_user(struct ipmi_user *user) @@ -1192,6 +1200,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user) unsigned long flags; struct cmd_rcvr *rcvr; struct cmd_rcvr *rcvrs = NULL; + struct module *owner; if (!acquire_ipmi_user(user, &i)) { /* @@ -1251,14 +1260,15 @@ static void _ipmi_destroy_user(struct ipmi_user *user) kfree(rcvr); } + owner = intf->owner; kref_put(&intf->refcount, intf_free); + module_put(owner); } int ipmi_destroy_user(struct ipmi_user *user) { _ipmi_destroy_user(user); - cleanup_srcu_struct(&user->release_barrier); kref_put(&user->refcount, free_user); return 0; @@ -1297,10 +1307,12 @@ int ipmi_set_my_address(struct ipmi_user *user, if (!user) return -ENODEV; - if (channel >= IPMI_MAX_CHANNELS) + if (channel >= IPMI_MAX_CHANNELS) { rv = -EINVAL; - else + } else { + channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); user->intf->addrinfo[channel].address = address; + } release_ipmi_user(user, index); return rv; @@ -1317,10 +1329,12 @@ int ipmi_get_my_address(struct ipmi_user *user, if (!user) return -ENODEV; - if (channel >= IPMI_MAX_CHANNELS) + if (channel >= IPMI_MAX_CHANNELS) { rv = -EINVAL; - else + } else { + channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); *address = user->intf->addrinfo[channel].address; + } release_ipmi_user(user, index); return rv; @@ -1337,13 +1351,15 @@ int ipmi_set_my_LUN(struct ipmi_user *user, if (!user) return -ENODEV; - if (channel >= IPMI_MAX_CHANNELS) + if (channel >= IPMI_MAX_CHANNELS) { rv = -EINVAL; - else + } else { + channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); user->intf->addrinfo[channel].lun = LUN & 0x3; + } release_ipmi_user(user, index); - return 0; + return rv; } EXPORT_SYMBOL(ipmi_set_my_LUN); @@ -1357,10 +1373,12 @@ int ipmi_get_my_LUN(struct ipmi_user *user, if (!user) return -ENODEV; - if (channel >= IPMI_MAX_CHANNELS) + if (channel >= IPMI_MAX_CHANNELS) { rv = -EINVAL; - else + } else { + channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); *address = user->intf->addrinfo[channel].lun; + } release_ipmi_user(user, index); return rv; @@ -2167,7 +2185,7 @@ static int i_ipmi_request(struct ipmi_user *user, ipmi_free_smi_msg(smi_msg); ipmi_free_recv_msg(recv_msg); } else { - ipmi_debug_msg("Send", smi_msg->data, smi_msg->data_size); + pr_debug("Send: %*ph\n", smi_msg->data_size, smi_msg->data); smi_send(intf, intf->handlers, smi_msg, priority); } @@ -2184,6 +2202,7 @@ static int check_addr(struct ipmi_smi *intf, { if (addr->channel >= IPMI_MAX_CHANNELS) return -EINVAL; + addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS); *lun = intf->addrinfo[addr->channel].lun; *saddr = intf->addrinfo[addr->channel].address; return 0; @@ -2359,7 +2378,7 @@ static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc) * been recently fetched, this will just use the cached data. Otherwise * it will run a new fetch. * - * Except for the first time this is called (in ipmi_register_smi()), + * Except for the first time this is called (in ipmi_add_smi()), * this will always return good data; */ static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, @@ -2827,7 +2846,7 @@ cleanup_bmc_device(struct kref *ref) * with removing the device attributes while reading a device * attribute. */ - schedule_work(&bmc->remove_work); + queue_work(remove_work_wq, &bmc->remove_work); } /* @@ -2932,8 +2951,11 @@ static int __ipmi_bmc_register(struct ipmi_smi *intf, bmc->pdev.name = "ipmi_bmc"; rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL); - if (rv < 0) + if (rv < 0) { + kfree(bmc); goto out; + } + bmc->pdev.dev.driver = &ipmidriver.driver; bmc->pdev.id = rv; bmc->pdev.dev.release = release_bmc_device; @@ -2984,8 +3006,6 @@ static int __ipmi_bmc_register(struct ipmi_smi *intf, rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj, intf->my_dev_name); if (rv) { - kfree(intf->my_dev_name); - intf->my_dev_name = NULL; dev_err(intf->si_dev, PFX "Unable to create symlink to bmc: %d\n", rv); goto out_free_my_dev_name; @@ -3098,8 +3118,8 @@ static void __get_guid(struct ipmi_smi *intf) if (rv) /* Send failed, no GUID available. */ bmc->dyn_guid_set = 0; - - wait_event(intf->waitq, bmc->dyn_guid_set != 2); + else + wait_event(intf->waitq, bmc->dyn_guid_set != 2); /* dyn_guid_set makes the guid data available. */ smp_rmb(); @@ -3279,10 +3299,11 @@ static void redo_bmc_reg(struct work_struct *work) kref_put(&intf->refcount, intf_free); } -int ipmi_register_smi(const struct ipmi_smi_handlers *handlers, - void *send_info, - struct device *si_dev, - unsigned char slave_addr) +int ipmi_add_smi(struct module *owner, + const struct ipmi_smi_handlers *handlers, + void *send_info, + struct device *si_dev, + unsigned char slave_addr) { int i, j; int rv; @@ -3294,17 +3315,9 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers, * Make sure the driver is actually initialized, this handles * problems with initialization order. */ - if (!initialized) { - rv = ipmi_init_msghandler(); - if (rv) - return rv; - /* - * The init code doesn't return an error if it was turned - * off, but it won't initialize. Check that. - */ - if (!initialized) - return -ENODEV; - } + rv = ipmi_init_msghandler(); + if (rv) + return rv; intf = kzalloc(sizeof(*intf), GFP_KERNEL); if (!intf) @@ -3316,7 +3329,7 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers, return rv; } - + intf->owner = owner; intf->bmc = &intf->tmp_bmc; INIT_LIST_HEAD(&intf->bmc->intfs); mutex_init(&intf->bmc->dyn_mutex); @@ -3423,18 +3436,22 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers, return rv; } -EXPORT_SYMBOL(ipmi_register_smi); +EXPORT_SYMBOL(ipmi_add_smi); static void deliver_smi_err_response(struct ipmi_smi *intf, struct ipmi_smi_msg *msg, unsigned char err) { + int rv; msg->rsp[0] = msg->data[0] | 4; msg->rsp[1] = msg->data[1]; msg->rsp[2] = err; msg->rsp_size = 3; - /* It's an error, so it will never requeue, no need to check return. */ - handle_one_recv_msg(intf, msg); + + /* This will never requeue, but it may ask us to free the message. */ + rv = handle_one_recv_msg(intf, msg); + if (rv == 0) + ipmi_free_smi_msg(msg); } static void cleanup_smi_msgs(struct ipmi_smi *intf) @@ -3639,7 +3656,7 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf, msg->data[10] = ipmb_checksum(&msg->data[6], 4); msg->data_size = 11; - ipmi_debug_msg("Invalid command:", msg->data, msg->data_size); + pr_debug("Invalid command: %*ph\n", msg->data_size, msg->data); rcu_read_lock(); if (!intf->in_shutdown) { @@ -4126,7 +4143,7 @@ static int handle_one_recv_msg(struct ipmi_smi *intf, int requeue; int chan; - ipmi_debug_msg("Recv:", msg->rsp, msg->rsp_size); + pr_debug("Recv: %*ph\n", msg->rsp_size, msg->rsp); if (msg->rsp_size < 2) { /* Message is too small to be correct. */ dev_warn(intf->si_dev, @@ -4484,7 +4501,7 @@ smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg, smi_msg->data_size = recv_msg->msg.data_len; smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid); - ipmi_debug_msg("Resend: ", smi_msg->data, smi_msg->data_size); + pr_debug("Resend: %*ph\n", smi_msg->data_size, smi_msg->data); return smi_msg; } @@ -5020,6 +5037,22 @@ static int panic_event(struct notifier_block *this, return NOTIFY_DONE; } +/* Must be called with ipmi_interfaces_mutex held. */ +static int ipmi_register_driver(void) +{ + int rv; + + if (drvregistered) + return 0; + + rv = driver_register(&ipmidriver.driver); + if (rv) + pr_err("Could not register IPMI driver\n"); + else + drvregistered = true; + return rv; +} + static struct notifier_block panic_block = { .notifier_call = panic_event, .next = NULL, @@ -5030,66 +5063,88 @@ static int ipmi_init_msghandler(void) { int rv; + mutex_lock(&ipmi_interfaces_mutex); + rv = ipmi_register_driver(); + if (rv) + goto out; if (initialized) - return 0; + goto out; - rv = driver_register(&ipmidriver.driver); - if (rv) { - pr_err(PFX "Could not register IPMI driver\n"); - return rv; - } + rv = init_srcu_struct(&ipmi_interfaces_srcu); + if (rv) + goto out; - pr_info("ipmi message handler version " IPMI_DRIVER_VERSION "\n"); + remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq"); + if (!remove_work_wq) { + pr_err("unable to create ipmi-msghandler-remove-wq workqueue"); + rv = -ENOMEM; + goto out_wq; + } timer_setup(&ipmi_timer, ipmi_timeout, 0); mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); atomic_notifier_chain_register(&panic_notifier_list, &panic_block); - initialized = 1; + initialized = true; - return 0; +out_wq: + if (rv) + cleanup_srcu_struct(&ipmi_interfaces_srcu); +out: + mutex_unlock(&ipmi_interfaces_mutex); + return rv; } static int __init ipmi_init_msghandler_mod(void) { - ipmi_init_msghandler(); - return 0; + int rv; + + pr_info("version " IPMI_DRIVER_VERSION "\n"); + + mutex_lock(&ipmi_interfaces_mutex); + rv = ipmi_register_driver(); + mutex_unlock(&ipmi_interfaces_mutex); + + return rv; } static void __exit cleanup_ipmi(void) { int count; - if (!initialized) - return; - - atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block); - - /* - * This can't be called if any interfaces exist, so no worry - * about shutting down the interfaces. - */ - - /* - * Tell the timer to stop, then wait for it to stop. This - * avoids problems with race conditions removing the timer - * here. - */ - atomic_inc(&stop_operation); - del_timer_sync(&ipmi_timer); + if (initialized) { + destroy_workqueue(remove_work_wq); - driver_unregister(&ipmidriver.driver); + atomic_notifier_chain_unregister(&panic_notifier_list, + &panic_block); - initialized = 0; + /* + * This can't be called if any interfaces exist, so no worry + * about shutting down the interfaces. + */ - /* Check for buffer leaks. */ - count = atomic_read(&smi_msg_inuse_count); - if (count != 0) - pr_warn(PFX "SMI message count %d at exit\n", count); - count = atomic_read(&recv_msg_inuse_count); - if (count != 0) - pr_warn(PFX "recv message count %d at exit\n", count); + /* + * Tell the timer to stop, then wait for it to stop. This + * avoids problems with race conditions removing the timer + * here. + */ + atomic_inc(&stop_operation); + del_timer_sync(&ipmi_timer); + + initialized = false; + + /* Check for buffer leaks. */ + count = atomic_read(&smi_msg_inuse_count); + if (count != 0) + pr_warn(PFX "SMI message count %d at exit\n", count); + count = atomic_read(&recv_msg_inuse_count); + if (count != 0) + pr_warn(PFX "recv message count %d at exit\n", count); + cleanup_srcu_struct(&ipmi_interfaces_srcu); + } + if (drvregistered) + driver_unregister(&ipmidriver.driver); } module_exit(cleanup_ipmi); diff --git a/drivers/char/ipmi/ipmi_si.h b/drivers/char/ipmi/ipmi_si.h index 52f6152d1fcbf628f6f4bee39d3a5a4b36e8b0c6..7ae52c17618ed754d1ddf244fd791833a053738b 100644 --- a/drivers/char/ipmi/ipmi_si.h +++ b/drivers/char/ipmi/ipmi_si.h @@ -25,7 +25,9 @@ void ipmi_irq_finish_setup(struct si_sm_io *io); int ipmi_si_remove_by_dev(struct device *dev); void ipmi_si_remove_by_data(int addr_space, enum si_type si_type, unsigned long addr); -int ipmi_si_hardcode_find_bmc(void); +void ipmi_hardcode_init(void); +void ipmi_si_hardcode_exit(void); +int ipmi_si_hardcode_match(int addr_type, unsigned long addr); void ipmi_si_platform_init(void); void ipmi_si_platform_shutdown(void); diff --git a/drivers/char/ipmi/ipmi_si_hardcode.c b/drivers/char/ipmi/ipmi_si_hardcode.c index 10219f24546be0d6bccc965ef11393a8da681359..7abe549fd803d51c15dcc9a174fdc204d86df15a 100644 --- a/drivers/char/ipmi/ipmi_si_hardcode.c +++ b/drivers/char/ipmi/ipmi_si_hardcode.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0+ #include +#include #include "ipmi_si.h" #define PFX "ipmi_hardcode: " @@ -11,23 +12,22 @@ #define SI_MAX_PARMS 4 -static char *si_type[SI_MAX_PARMS]; #define MAX_SI_TYPE_STR 30 -static char si_type_str[MAX_SI_TYPE_STR]; +static char si_type_str[MAX_SI_TYPE_STR] __initdata; static unsigned long addrs[SI_MAX_PARMS]; static unsigned int num_addrs; static unsigned int ports[SI_MAX_PARMS]; static unsigned int num_ports; -static int irqs[SI_MAX_PARMS]; -static unsigned int num_irqs; -static int regspacings[SI_MAX_PARMS]; -static unsigned int num_regspacings; -static int regsizes[SI_MAX_PARMS]; -static unsigned int num_regsizes; -static int regshifts[SI_MAX_PARMS]; -static unsigned int num_regshifts; -static int slave_addrs[SI_MAX_PARMS]; /* Leaving 0 chooses the default value */ -static unsigned int num_slave_addrs; +static int irqs[SI_MAX_PARMS] __initdata; +static unsigned int num_irqs __initdata; +static int regspacings[SI_MAX_PARMS] __initdata; +static unsigned int num_regspacings __initdata; +static int regsizes[SI_MAX_PARMS] __initdata; +static unsigned int num_regsizes __initdata; +static int regshifts[SI_MAX_PARMS] __initdata; +static unsigned int num_regshifts __initdata; +static int slave_addrs[SI_MAX_PARMS] __initdata; +static unsigned int num_slave_addrs __initdata; module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0); MODULE_PARM_DESC(type, "Defines the type of each interface, each" @@ -72,12 +72,135 @@ MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for" " overridden by this parm. This is an array indexed" " by interface number."); -int ipmi_si_hardcode_find_bmc(void) +static struct platform_device *ipmi_hc_pdevs[SI_MAX_PARMS]; + +static void __init ipmi_hardcode_init_one(const char *si_type_str, + unsigned int i, + unsigned long addr, + unsigned int flags) +{ + struct platform_device *pdev; + unsigned int num_r = 1, size; + struct resource r[4]; + struct property_entry p[6]; + enum si_type si_type; + unsigned int regspacing, regsize; + int rv; + + memset(p, 0, sizeof(p)); + memset(r, 0, sizeof(r)); + + if (!si_type_str || !*si_type_str || strcmp(si_type_str, "kcs") == 0) { + size = 2; + si_type = SI_KCS; + } else if (strcmp(si_type_str, "smic") == 0) { + size = 2; + si_type = SI_SMIC; + } else if (strcmp(si_type_str, "bt") == 0) { + size = 3; + si_type = SI_BT; + } else if (strcmp(si_type_str, "invalid") == 0) { + /* + * Allow a firmware-specified interface to be + * disabled. + */ + size = 1; + si_type = SI_TYPE_INVALID; + } else { + pr_warn("Interface type specified for interface %d, was invalid: %s\n", + i, si_type_str); + return; + } + + regsize = regsizes[i]; + if (regsize == 0) + regsize = DEFAULT_REGSIZE; + + p[0] = PROPERTY_ENTRY_U8("ipmi-type", si_type); + p[1] = PROPERTY_ENTRY_U8("slave-addr", slave_addrs[i]); + p[2] = PROPERTY_ENTRY_U8("addr-source", SI_HARDCODED); + p[3] = PROPERTY_ENTRY_U8("reg-shift", regshifts[i]); + p[4] = PROPERTY_ENTRY_U8("reg-size", regsize); + /* Last entry must be left NULL to terminate it. */ + + /* + * Register spacing is derived from the resources in + * the IPMI platform code. + */ + regspacing = regspacings[i]; + if (regspacing == 0) + regspacing = regsize; + + r[0].start = addr; + r[0].end = r[0].start + regsize - 1; + r[0].name = "IPMI Address 1"; + r[0].flags = flags; + + if (size > 1) { + r[1].start = r[0].start + regspacing; + r[1].end = r[1].start + regsize - 1; + r[1].name = "IPMI Address 2"; + r[1].flags = flags; + num_r++; + } + + if (size > 2) { + r[2].start = r[1].start + regspacing; + r[2].end = r[2].start + regsize - 1; + r[2].name = "IPMI Address 3"; + r[2].flags = flags; + num_r++; + } + + if (irqs[i]) { + r[num_r].start = irqs[i]; + r[num_r].end = irqs[i]; + r[num_r].name = "IPMI IRQ"; + r[num_r].flags = IORESOURCE_IRQ; + num_r++; + } + + pdev = platform_device_alloc("hardcode-ipmi-si", i); + if (!pdev) { + pr_err("Error allocating IPMI platform device %d\n", i); + return; + } + + rv = platform_device_add_resources(pdev, r, num_r); + if (rv) { + dev_err(&pdev->dev, + "Unable to add hard-code resources: %d\n", rv); + goto err; + } + + rv = platform_device_add_properties(pdev, p); + if (rv) { + dev_err(&pdev->dev, + "Unable to add hard-code properties: %d\n", rv); + goto err; + } + + rv = platform_device_add(pdev); + if (rv) { + dev_err(&pdev->dev, + "Unable to add hard-code device: %d\n", rv); + goto err; + } + + ipmi_hc_pdevs[i] = pdev; + return; + +err: + platform_device_put(pdev); +} + +void __init ipmi_hardcode_init(void) { - int ret = -ENODEV; - int i; - struct si_sm_io io; + unsigned int i; char *str; + char *si_type[SI_MAX_PARMS]; + + memset(si_type, 0, sizeof(si_type)); /* Parse out the si_type string into its components. */ str = si_type_str; @@ -94,54 +217,45 @@ int ipmi_si_hardcode_find_bmc(void) } } - memset(&io, 0, sizeof(io)); for (i = 0; i < SI_MAX_PARMS; i++) { - if (!ports[i] && !addrs[i]) - continue; - - io.addr_source = SI_HARDCODED; - pr_info(PFX "probing via hardcoded address\n"); - - if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) { - io.si_type = SI_KCS; - } else if (strcmp(si_type[i], "smic") == 0) { - io.si_type = SI_SMIC; - } else if (strcmp(si_type[i], "bt") == 0) { - io.si_type = SI_BT; - } else { - pr_warn(PFX "Interface type specified for interface %d, was invalid: %s\n", - i, si_type[i]); - continue; - } - if (ports[i]) { - /* An I/O port */ - io.addr_data = ports[i]; - io.addr_type = IPMI_IO_ADDR_SPACE; - } else if (addrs[i]) { - /* A memory port */ - io.addr_data = addrs[i]; - io.addr_type = IPMI_MEM_ADDR_SPACE; - } else { - pr_warn(PFX "Interface type specified for interface %d, but port and address were not set or set to zero.\n", - i); - continue; - } + if (i < num_ports && ports[i]) + ipmi_hardcode_init_one(si_type[i], i, ports[i], + IORESOURCE_IO); + if (i < num_addrs && addrs[i]) + ipmi_hardcode_init_one(si_type[i], i, addrs[i], + IORESOURCE_MEM); + } +} + +void ipmi_si_hardcode_exit(void) +{ + unsigned int i; + for (i = 0; i < SI_MAX_PARMS; i++) { + if (ipmi_hc_pdevs[i]) + platform_device_unregister(ipmi_hc_pdevs[i]); + } +} - io.addr = NULL; - io.regspacing = regspacings[i]; - if (!io.regspacing) - io.regspacing = DEFAULT_REGSPACING; - io.regsize = regsizes[i]; - if (!io.regsize) - io.regsize = DEFAULT_REGSIZE; - io.regshift = regshifts[i]; - io.irq = irqs[i]; - if (io.irq) - io.irq_setup = ipmi_std_irq_setup; - io.slave_addr = slave_addrs[i]; - - ret = ipmi_si_add_smi(&io); +/* + * Returns true of the given address exists as a hardcoded address, + * false if not. + */ +int ipmi_si_hardcode_match(int addr_type, unsigned long addr) +{ + unsigned int i; + + if (addr_type == IPMI_IO_ADDR_SPACE) { + for (i = 0; i < num_ports; i++) { + if (ports[i] == addr) + return 1; + } + } else { + for (i = 0; i < num_addrs; i++) { + if (addrs[i] == addr) + return 1; + } } - return ret; + + return 0; } diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 5faa917df1b629647cb7fecd291a1d1b8d80eae9..955bb53def9011ec81c6d3fe6a60558140ca5ccd 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -221,6 +221,9 @@ struct smi_info { */ bool irq_enable_broken; + /* Is the driver in maintenance mode? */ + bool in_maintenance_mode; + /* * Did we get an attention that we did not handle? */ @@ -1013,11 +1016,20 @@ static int ipmi_thread(void *data) spin_unlock_irqrestore(&(smi_info->si_lock), flags); busy_wait = ipmi_thread_busy_wait(smi_result, smi_info, &busy_until); - if (smi_result == SI_SM_CALL_WITHOUT_DELAY) + if (smi_result == SI_SM_CALL_WITHOUT_DELAY) { ; /* do nothing */ - else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) - schedule(); - else if (smi_result == SI_SM_IDLE) { + } else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) { + /* + * In maintenance mode we run as fast as + * possible to allow firmware updates to + * complete as fast as possible, but normally + * don't bang on the scheduler. + */ + if (smi_info->in_maintenance_mode) + schedule(); + else + usleep_range(100, 200); + } else if (smi_result == SI_SM_IDLE) { if (atomic_read(&smi_info->need_watch)) { schedule_timeout_interruptible(100); } else { @@ -1025,8 +1037,9 @@ static int ipmi_thread(void *data) __set_current_state(TASK_INTERRUPTIBLE); schedule(); } - } else + } else { schedule_timeout_interruptible(1); + } } return 0; } @@ -1201,6 +1214,7 @@ static void set_maintenance_mode(void *send_info, bool enable) if (!enable) atomic_set(&smi_info->req_events, 0); + smi_info->in_maintenance_mode = enable; } static void shutdown_smi(void *send_info); @@ -1831,8 +1845,7 @@ static inline void stop_timer_and_thread(struct smi_info *smi_info) } smi_info->timer_can_start = false; - if (smi_info->timer_running) - del_timer_sync(&smi_info->si_timer); + del_timer_sync(&smi_info->si_timer); } static struct smi_info *find_dup_si(struct smi_info *info) @@ -1862,6 +1875,18 @@ int ipmi_si_add_smi(struct si_sm_io *io) int rv = 0; struct smi_info *new_smi, *dup; + /* + * If the user gave us a hard-coded device at the same + * address, they presumably want us to use it and not what is + * in the firmware. + */ + if (io->addr_source != SI_HARDCODED && + ipmi_si_hardcode_match(io->addr_type, io->addr_data)) { + dev_info(io->dev, + "Hard-coded device at this address already exists"); + return -ENODEV; + } + if (!io->io_setup) { if (io->addr_type == IPMI_IO_ADDR_SPACE) { io->io_setup = ipmi_si_port_setup; @@ -2085,11 +2110,21 @@ static int try_smi_init(struct smi_info *new_smi) WARN_ON(new_smi->io.dev->init_name != NULL); out_err: + if (rv && new_smi->io.io_cleanup) { + new_smi->io.io_cleanup(&new_smi->io); + new_smi->io.io_cleanup = NULL; + } + + if (rv && new_smi->si_sm) { + kfree(new_smi->si_sm); + new_smi->si_sm = NULL; + } + kfree(init_name); return rv; } -static int init_ipmi_si(void) +static int __init init_ipmi_si(void) { struct smi_info *e; enum ipmi_addr_src type = SI_INVALID; @@ -2097,11 +2132,9 @@ static int init_ipmi_si(void) if (initialized) return 0; - pr_info("IPMI System Interface driver.\n"); + ipmi_hardcode_init(); - /* If the user gave us a device, they presumably want us to use it */ - if (!ipmi_si_hardcode_find_bmc()) - goto do_scan; + pr_info("IPMI System Interface driver\n"); ipmi_si_platform_init(); @@ -2113,7 +2146,6 @@ static int init_ipmi_si(void) with multiple BMCs we assume that there will be several instances of a given type so if we succeed in registering a type then also try to register everything else of the same type */ -do_scan: mutex_lock(&smi_infos_lock); list_for_each_entry(e, &smi_infos, link) { /* Try to register a device if it has an IRQ and we either @@ -2160,6 +2192,20 @@ static int init_ipmi_si(void) } module_init(init_ipmi_si); +static void wait_msg_processed(struct smi_info *smi_info) +{ + unsigned long jiffies_now; + long time_diff; + + while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) { + jiffies_now = jiffies; + time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) + * SI_USEC_PER_JIFFY); + smi_event_handler(smi_info, time_diff); + schedule_timeout_uninterruptible(1); + } +} + static void shutdown_smi(void *send_info) { struct smi_info *smi_info = send_info; @@ -2194,16 +2240,13 @@ static void shutdown_smi(void *send_info) * in the BMC. Note that timers and CPU interrupts are off, * so no need for locks. */ - while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) { - poll(smi_info); - schedule_timeout_uninterruptible(1); - } + wait_msg_processed(smi_info); + if (smi_info->handlers) disable_si_irq(smi_info); - while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) { - poll(smi_info); - schedule_timeout_uninterruptible(1); - } + + wait_msg_processed(smi_info); + if (smi_info->handlers) smi_info->handlers->cleanup(smi_info->si_sm); @@ -2299,6 +2342,8 @@ static void cleanup_ipmi_si(void) list_for_each_entry_safe(e, tmp_e, &smi_infos, link) cleanup_one_si(e); mutex_unlock(&smi_infos_lock); + + ipmi_si_hardcode_exit(); } module_exit(cleanup_ipmi_si); diff --git a/drivers/char/ipmi/ipmi_si_mem_io.c b/drivers/char/ipmi/ipmi_si_mem_io.c index 1b869d530884c47c4763a29019339d91866d3da6..bb4ed90a115352f19269235724dbbd624ffc8448 100644 --- a/drivers/char/ipmi/ipmi_si_mem_io.c +++ b/drivers/char/ipmi/ipmi_si_mem_io.c @@ -3,9 +3,75 @@ #include #include "ipmi_si.h" +#ifdef CONFIG_ARM_GIC_PHYTIUM_2500 +#include + +#define CTL_RST_FUNC_ID 0xC2000011 + +static bool apply_phytium2500_workaround; + +struct ipmi_workaround_oem_info { + char oem_id[ACPI_OEM_ID_SIZE + 1]; +}; + +static struct ipmi_workaround_oem_info wa_info[] = { + { + .oem_id = "KPSVVJ", + } +}; + +static void ipmi_check_phytium_workaround(void) +{ +#ifdef CONFIG_ACPI + struct acpi_table_header tbl; + int i; + + if (ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_DSDT, 0, &tbl))) + return; + + for (i = 0; i < ARRAY_SIZE(wa_info); i++) { + if (strncmp(wa_info[i].oem_id, tbl.oem_id, ACPI_OEM_ID_SIZE)) + continue; + + apply_phytium2500_workaround = true; + break; + } +#endif +} + +static void ctl_smc(unsigned long arg0, unsigned long arg1, + unsigned long arg2, unsigned long arg3) +{ + struct arm_smccc_res res; + + arm_smccc_smc(arg0, arg1, arg2, arg3, 0, 0, 0, 0, &res); + if (res.a0 != 0) + pr_err("Error: Firmware call SMC reset Failed: %d, addr: 0x%lx\n", + (int)res.a0, arg2); +} + +static void ctl_timeout_reset(void) +{ + ctl_smc(CTL_RST_FUNC_ID, 0x1, 0x28100208, 0x1); + ctl_smc(CTL_RST_FUNC_ID, 0x1, 0x2810020C, 0x1); +} + +static inline void ipmi_phytium_workaround(void) +{ + if (apply_phytium2500_workaround) + ctl_timeout_reset(); +} + +#else +static inline void ipmi_check_phytium_workaround(void) {} +static inline void ipmi_phytium_workaround(void) {} +#endif + static unsigned char intf_mem_inb(const struct si_sm_io *io, unsigned int offset) { + ipmi_phytium_workaround(); + return readb((io->addr)+(offset * io->regspacing)); } @@ -18,6 +84,8 @@ static void intf_mem_outb(const struct si_sm_io *io, unsigned int offset, static unsigned char intf_mem_inw(const struct si_sm_io *io, unsigned int offset) { + ipmi_phytium_workaround(); + return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift) & 0xff; } @@ -31,6 +99,8 @@ static void intf_mem_outw(const struct si_sm_io *io, unsigned int offset, static unsigned char intf_mem_inl(const struct si_sm_io *io, unsigned int offset) { + ipmi_phytium_workaround(); + return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift) & 0xff; } @@ -44,6 +114,8 @@ static void intf_mem_outl(const struct si_sm_io *io, unsigned int offset, #ifdef readq static unsigned char mem_inq(const struct si_sm_io *io, unsigned int offset) { + ipmi_phytium_workaround(); + return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift) & 0xff; } @@ -51,7 +123,7 @@ static unsigned char mem_inq(const struct si_sm_io *io, unsigned int offset) static void mem_outq(const struct si_sm_io *io, unsigned int offset, unsigned char b) { - writeq(b << io->regshift, (io->addr)+(offset * io->regspacing)); + writeq((u64)b << io->regshift, (io->addr)+(offset * io->regspacing)); } #endif @@ -81,7 +153,7 @@ int ipmi_si_mem_setup(struct si_sm_io *io) if (!addr) return -ENODEV; - io->io_cleanup = mem_cleanup; + ipmi_check_phytium_workaround(); /* * Figure out the actual readb/readw/readl/etc routine to use based @@ -141,5 +213,8 @@ int ipmi_si_mem_setup(struct si_sm_io *io) mem_region_cleanup(io, io->io_size); return -EIO; } + + io->io_cleanup = mem_cleanup; + return 0; } diff --git a/drivers/char/ipmi/ipmi_si_pci.c b/drivers/char/ipmi/ipmi_si_pci.c index f54ca6869ed2c3f62d3c2d31daea3c5044642b9f..022e03634ce2ac0a987bace4eeaff5f8f6d45fed 100644 --- a/drivers/char/ipmi/ipmi_si_pci.c +++ b/drivers/char/ipmi/ipmi_si_pci.c @@ -120,6 +120,8 @@ static int ipmi_pci_probe(struct pci_dev *pdev, } io.addr_data = pci_resource_start(pdev, 0); + io.dev = &pdev->dev; + io.regspacing = ipmi_pci_probe_regspacing(&io); io.regsize = DEFAULT_REGSIZE; io.regshift = 0; @@ -128,8 +130,6 @@ static int ipmi_pci_probe(struct pci_dev *pdev, if (io.irq) io.irq_setup = ipmi_std_irq_setup; - io.dev = &pdev->dev; - dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n", &pdev->resource[0], io.regsize, io.regspacing, io.irq); diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c index bf69927502bd2a079546218c5f281dc9b5703bc9..08d941e791eb16dbb5f5b74f3379b7aa889a1c70 100644 --- a/drivers/char/ipmi/ipmi_si_platform.c +++ b/drivers/char/ipmi/ipmi_si_platform.c @@ -17,6 +17,7 @@ #define PFX "ipmi_platform: " +static bool platform_registered; static bool si_tryplatform = true; #ifdef CONFIG_ACPI static bool si_tryacpi = true; @@ -126,8 +127,6 @@ ipmi_get_info_from_resources(struct platform_device *pdev, if (res_second->start > io->addr_data) io->regspacing = res_second->start - io->addr_data; } - io->regsize = DEFAULT_REGSIZE; - io->regshift = 0; return res; } @@ -135,7 +134,7 @@ ipmi_get_info_from_resources(struct platform_device *pdev, static int platform_ipmi_probe(struct platform_device *pdev) { struct si_sm_io io; - u8 type, slave_addr, addr_source; + u8 type, slave_addr, addr_source, regsize, regshift; int rv; rv = device_property_read_u8(&pdev->dev, "addr-source", &addr_source); @@ -147,7 +146,7 @@ static int platform_ipmi_probe(struct platform_device *pdev) if (addr_source == SI_SMBIOS) { if (!si_trydmi) return -ENODEV; - } else { + } else if (addr_source != SI_HARDCODED) { if (!si_tryplatform) return -ENODEV; } @@ -167,11 +166,23 @@ static int platform_ipmi_probe(struct platform_device *pdev) case SI_BT: io.si_type = type; break; + case SI_TYPE_INVALID: /* User disabled this in hardcode. */ + return -ENODEV; default: dev_err(&pdev->dev, "ipmi-type property is invalid\n"); return -EINVAL; } + io.regsize = DEFAULT_REGSIZE; + rv = device_property_read_u8(&pdev->dev, "reg-size", ®size); + if (!rv) + io.regsize = regsize; + + io.regshift = 0; + rv = device_property_read_u8(&pdev->dev, "reg-shift", ®shift); + if (!rv) + io.regshift = regshift; + if (!ipmi_get_info_from_resources(pdev, &io)) return -EINVAL; @@ -191,7 +202,8 @@ static int platform_ipmi_probe(struct platform_device *pdev) io.dev = &pdev->dev; - pr_info("ipmi_si: SMBIOS: %s %#lx regsize %d spacing %d irq %d\n", + pr_info("ipmi_si: %s: %s %#lx regsize %d spacing %d irq %d\n", + ipmi_addr_src_to_str(addr_source), (io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem", io.addr_data, io.regsize, io.regspacing, io.irq); @@ -356,6 +368,9 @@ static int acpi_ipmi_probe(struct platform_device *pdev) goto err_free; } + io.regsize = DEFAULT_REGSIZE; + io.regshift = 0; + res = ipmi_get_info_from_resources(pdev, &io); if (!res) { rv = -EINVAL; @@ -417,6 +432,12 @@ static int ipmi_remove(struct platform_device *pdev) return ipmi_si_remove_by_dev(&pdev->dev); } +static const struct platform_device_id si_plat_ids[] = { + { "dmi-ipmi-si", 0 }, + { "hardcode-ipmi-si", 0 }, + { } +}; + struct platform_driver ipmi_platform_driver = { .driver = { .name = DEVICE_NAME, @@ -425,6 +446,7 @@ struct platform_driver ipmi_platform_driver = { }, .probe = ipmi_probe, .remove = ipmi_remove, + .id_table = si_plat_ids }; void ipmi_si_platform_init(void) @@ -432,9 +454,12 @@ void ipmi_si_platform_init(void) int rv = platform_driver_register(&ipmi_platform_driver); if (rv) pr_err(PFX "Unable to register driver: %d\n", rv); + else + platform_registered = true; } void ipmi_si_platform_shutdown(void) { - platform_driver_unregister(&ipmi_platform_driver); + if (platform_registered) + platform_driver_unregister(&ipmi_platform_driver); } diff --git a/drivers/char/ipmi/ipmi_si_port_io.c b/drivers/char/ipmi/ipmi_si_port_io.c index ef6dffcea9fa698827fc4cf766cc8e4397b7a287..03924c32b6e98035ad3b873c4d19320d8aa89567 100644 --- a/drivers/char/ipmi/ipmi_si_port_io.c +++ b/drivers/char/ipmi/ipmi_si_port_io.c @@ -68,8 +68,6 @@ int ipmi_si_port_setup(struct si_sm_io *io) if (!addr) return -ENODEV; - io->io_cleanup = port_cleanup; - /* * Figure out the actual inb/inw/inl/etc routine to use based * upon the register size. @@ -109,5 +107,8 @@ int ipmi_si_port_setup(struct si_sm_io *io) return -EIO; } } + + io->io_cleanup = port_cleanup; + return 0; } diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index 29e67a80fb208f804e4ed1fb0a560142002c7ffc..3160f6565f330ca8d54bf6cb6e53e81f5ad33a94 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -88,8 +88,14 @@ #define SSIF_MSG_JIFFIES ((SSIF_MSG_USEC * 1000) / TICK_NSEC) #define SSIF_MSG_PART_JIFFIES ((SSIF_MSG_PART_USEC * 1000) / TICK_NSEC) +/* + * Timeout for the watch, only used for get flag timer. + */ +#define SSIF_WATCH_TIMEOUT_MSEC 100 +#define SSIF_WATCH_TIMEOUT_JIFFIES msecs_to_jiffies(SSIF_WATCH_TIMEOUT_MSEC) + enum ssif_intf_state { - SSIF_NORMAL, + SSIF_IDLE, SSIF_GETTING_FLAGS, SSIF_GETTING_EVENTS, SSIF_CLEARING_FLAGS, @@ -97,8 +103,8 @@ enum ssif_intf_state { /* FIXME - add watchdog stuff. */ }; -#define SSIF_IDLE(ssif) ((ssif)->ssif_state == SSIF_NORMAL \ - && (ssif)->curr_msg == NULL) +#define IS_SSIF_IDLE(ssif) ((ssif)->ssif_state == SSIF_IDLE \ + && (ssif)->curr_msg == NULL) /* * Indexes into stats[] in ssif_info below. @@ -268,6 +274,9 @@ struct ssif_info { struct timer_list retry_timer; int retries_left; + bool need_watch; /* Need to look for flags? */ + struct timer_list watch_timer; /* Flag fetch timer. */ + /* Info from SSIF cmd */ unsigned char max_xmit_msg_size; unsigned char max_recv_msg_size; @@ -290,6 +299,7 @@ struct ssif_info { ((unsigned int) atomic_read(&(ssif)->stats[SSIF_STAT_ ## stat])) static bool initialized; +static bool platform_registered; static void return_hosed_msg(struct ssif_info *ssif_info, struct ipmi_smi_msg *msg); @@ -340,9 +350,9 @@ static void return_hosed_msg(struct ssif_info *ssif_info, /* * Must be called with the message lock held. This will release the - * message lock. Note that the caller will check SSIF_IDLE and start a - * new operation, so there is no need to check for new messages to - * start in here. + * message lock. Note that the caller will check IS_SSIF_IDLE and + * start a new operation, so there is no need to check for new + * messages to start in here. */ static void start_clear_flags(struct ssif_info *ssif_info, unsigned long *flags) { @@ -359,7 +369,7 @@ static void start_clear_flags(struct ssif_info *ssif_info, unsigned long *flags) if (start_send(ssif_info, msg, 3) != 0) { /* Error, just go to normal state. */ - ssif_info->ssif_state = SSIF_NORMAL; + ssif_info->ssif_state = SSIF_IDLE; } } @@ -374,7 +384,7 @@ static void start_flag_fetch(struct ssif_info *ssif_info, unsigned long *flags) mb[0] = (IPMI_NETFN_APP_REQUEST << 2); mb[1] = IPMI_GET_MSG_FLAGS_CMD; if (start_send(ssif_info, mb, 2) != 0) - ssif_info->ssif_state = SSIF_NORMAL; + ssif_info->ssif_state = SSIF_IDLE; } static void check_start_send(struct ssif_info *ssif_info, unsigned long *flags, @@ -385,7 +395,7 @@ static void check_start_send(struct ssif_info *ssif_info, unsigned long *flags, flags = ipmi_ssif_lock_cond(ssif_info, &oflags); ssif_info->curr_msg = NULL; - ssif_info->ssif_state = SSIF_NORMAL; + ssif_info->ssif_state = SSIF_IDLE; ipmi_ssif_unlock_cond(ssif_info, flags); ipmi_free_smi_msg(msg); } @@ -399,7 +409,7 @@ static void start_event_fetch(struct ssif_info *ssif_info, unsigned long *flags) msg = ipmi_alloc_smi_msg(); if (!msg) { - ssif_info->ssif_state = SSIF_NORMAL; + ssif_info->ssif_state = SSIF_IDLE; ipmi_ssif_unlock_cond(ssif_info, flags); return; } @@ -422,7 +432,7 @@ static void start_recv_msg_fetch(struct ssif_info *ssif_info, msg = ipmi_alloc_smi_msg(); if (!msg) { - ssif_info->ssif_state = SSIF_NORMAL; + ssif_info->ssif_state = SSIF_IDLE; ipmi_ssif_unlock_cond(ssif_info, flags); return; } @@ -440,9 +450,9 @@ static void start_recv_msg_fetch(struct ssif_info *ssif_info, /* * Must be called with the message lock held. This will release the - * message lock. Note that the caller will check SSIF_IDLE and start a - * new operation, so there is no need to check for new messages to - * start in here. + * message lock. Note that the caller will check IS_SSIF_IDLE and + * start a new operation, so there is no need to check for new + * messages to start in here. */ static void handle_flags(struct ssif_info *ssif_info, unsigned long *flags) { @@ -458,7 +468,7 @@ static void handle_flags(struct ssif_info *ssif_info, unsigned long *flags) /* Events available. */ start_event_fetch(ssif_info, flags); else { - ssif_info->ssif_state = SSIF_NORMAL; + ssif_info->ssif_state = SSIF_IDLE; ipmi_ssif_unlock_cond(ssif_info, flags); } } @@ -558,6 +568,26 @@ static void retry_timeout(struct timer_list *t) start_get(ssif_info); } +static void watch_timeout(struct timer_list *t) +{ + struct ssif_info *ssif_info = from_timer(ssif_info, t, watch_timer); + unsigned long oflags, *flags; + + if (ssif_info->stopping) + return; + + flags = ipmi_ssif_lock_cond(ssif_info, &oflags); + if (ssif_info->need_watch) { + mod_timer(&ssif_info->watch_timer, + jiffies + SSIF_WATCH_TIMEOUT_JIFFIES); + if (IS_SSIF_IDLE(ssif_info)) { + start_flag_fetch(ssif_info, flags); /* Releases lock */ + return; + } + ssif_info->req_flags = true; + } + ipmi_ssif_unlock_cond(ssif_info, flags); +} static void ssif_alert(struct i2c_client *client, enum i2c_alert_protocol type, unsigned int data) @@ -606,8 +636,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, flags = ipmi_ssif_lock_cond(ssif_info, &oflags); ssif_info->waiting_alert = true; ssif_info->rtc_us_timer = SSIF_MSG_USEC; - mod_timer(&ssif_info->retry_timer, - jiffies + SSIF_MSG_JIFFIES); + if (!ssif_info->stopping) + mod_timer(&ssif_info->retry_timer, + jiffies + SSIF_MSG_JIFFIES); ipmi_ssif_unlock_cond(ssif_info, flags); return; } @@ -629,8 +660,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, /* Remove the multi-part read marker. */ len -= 2; + data += 2; for (i = 0; i < len; i++) - ssif_info->data[i] = data[i+2]; + ssif_info->data[i] = data[i]; ssif_info->multi_len = len; ssif_info->multi_pos = 1; @@ -658,8 +690,19 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, } blocknum = data[0]; + len--; + data++; - if (ssif_info->multi_len + len - 1 > IPMI_MAX_MSG_LENGTH) { + if (blocknum != 0xff && len != 31) { + /* All blocks but the last must have 31 data bytes. */ + result = -EIO; + if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) + pr_info("Received middle message <31\n"); + + goto continue_op; + } + + if (ssif_info->multi_len + len > IPMI_MAX_MSG_LENGTH) { /* Received message too big, abort the operation. */ result = -E2BIG; if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) @@ -668,10 +711,8 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, goto continue_op; } - /* Remove the blocknum from the data. */ - len--; for (i = 0; i < len; i++) - ssif_info->data[i + ssif_info->multi_len] = data[i + 1]; + ssif_info->data[i + ssif_info->multi_len] = data[i]; ssif_info->multi_len += len; if (blocknum == 0xff) { /* End of read */ @@ -683,6 +724,10 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, * numbers start at zero for the second block, * but multi_pos starts at one, so the +1. */ + if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) + dev_dbg(&ssif_info->client->dev, + "Received message out of sequence, expected %u, got %u\n", + ssif_info->multi_pos - 1, blocknum); result = -EIO; } else { ssif_inc_stat(ssif_info, received_message_parts); @@ -705,6 +750,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, } } + continue_op: if (result < 0) { ssif_inc_stat(ssif_info, receive_errors); } else { @@ -712,8 +758,6 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, ssif_inc_stat(ssif_info, received_message_parts); } - - continue_op: if (ssif_info->ssif_debug & SSIF_DEBUG_STATE) pr_info(PFX "DONE 1: state = %d, result=%d.\n", ssif_info->ssif_state, result); @@ -721,15 +765,19 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, flags = ipmi_ssif_lock_cond(ssif_info, &oflags); msg = ssif_info->curr_msg; if (msg) { + if (data) { + if (len > IPMI_MAX_MSG_LENGTH) + len = IPMI_MAX_MSG_LENGTH; + memcpy(msg->rsp, data, len); + } else { + len = 0; + } msg->rsp_size = len; - if (msg->rsp_size > IPMI_MAX_MSG_LENGTH) - msg->rsp_size = IPMI_MAX_MSG_LENGTH; - memcpy(msg->rsp, data, msg->rsp_size); ssif_info->curr_msg = NULL; } switch (ssif_info->ssif_state) { - case SSIF_NORMAL: + case SSIF_IDLE: ipmi_ssif_unlock_cond(ssif_info, flags); if (!msg) break; @@ -747,16 +795,16 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, * Error fetching flags, or invalid length, * just give up for now. */ - ssif_info->ssif_state = SSIF_NORMAL; + ssif_info->ssif_state = SSIF_IDLE; ipmi_ssif_unlock_cond(ssif_info, flags); pr_warn(PFX "Error getting flags: %d %d, %x\n", result, len, (len >= 3) ? data[2] : 0); } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || data[1] != IPMI_GET_MSG_FLAGS_CMD) { /* - * Don't abort here, maybe it was a queued - * response to a previous command. + * Recv error response, give up. */ + ssif_info->ssif_state = SSIF_IDLE; ipmi_ssif_unlock_cond(ssif_info, flags); pr_warn(PFX "Invalid response getting flags: %x %x\n", data[0], data[1]); @@ -778,7 +826,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, pr_warn(PFX "Invalid response clearing flags: %x %x\n", data[0], data[1]); } - ssif_info->ssif_state = SSIF_NORMAL; + ssif_info->ssif_state = SSIF_IDLE; ipmi_ssif_unlock_cond(ssif_info, flags); break; @@ -831,7 +879,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, } flags = ipmi_ssif_lock_cond(ssif_info, &oflags); - if (SSIF_IDLE(ssif_info) && !ssif_info->stopping) { + if (IS_SSIF_IDLE(ssif_info) && !ssif_info->stopping) { if (ssif_info->req_events) start_event_fetch(ssif_info, flags); else if (ssif_info->req_flags) @@ -939,8 +987,9 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result, ssif_info->waiting_alert = true; ssif_info->retries_left = SSIF_RECV_RETRIES; ssif_info->rtc_us_timer = SSIF_MSG_PART_USEC; - mod_timer(&ssif_info->retry_timer, - jiffies + SSIF_MSG_PART_JIFFIES); + if (!ssif_info->stopping) + mod_timer(&ssif_info->retry_timer, + jiffies + SSIF_MSG_PART_JIFFIES); ipmi_ssif_unlock_cond(ssif_info, flags); } } @@ -999,7 +1048,7 @@ static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags) unsigned long oflags; restart: - if (!SSIF_IDLE(ssif_info)) { + if (!IS_SSIF_IDLE(ssif_info)) { ipmi_ssif_unlock_cond(ssif_info, flags); return; } @@ -1061,8 +1110,7 @@ static int get_smi_info(void *send_info, struct ipmi_smi_info *data) } /* - * Instead of having our own timer to periodically check the message - * flags, we let the message handler drive us. + * Upper layer wants us to request events. */ static void request_events(void *send_info) { @@ -1073,18 +1121,27 @@ static void request_events(void *send_info) return; flags = ipmi_ssif_lock_cond(ssif_info, &oflags); - /* - * Request flags first, not events, because the lower layer - * doesn't have a way to send an attention. But make sure - * event checking still happens. - */ ssif_info->req_events = true; - if (SSIF_IDLE(ssif_info)) - start_flag_fetch(ssif_info, flags); - else { - ssif_info->req_flags = true; - ipmi_ssif_unlock_cond(ssif_info, flags); + ipmi_ssif_unlock_cond(ssif_info, flags); +} + +/* + * Upper layer is changing the flag saying whether we need to request + * flags periodically or not. + */ +static void ssif_set_need_watch(void *send_info, bool enable) +{ + struct ssif_info *ssif_info = (struct ssif_info *) send_info; + unsigned long oflags, *flags; + + flags = ipmi_ssif_lock_cond(ssif_info, &oflags); + if (enable != ssif_info->need_watch) { + ssif_info->need_watch = enable; + if (ssif_info->need_watch) + mod_timer(&ssif_info->watch_timer, + jiffies + SSIF_WATCH_TIMEOUT_JIFFIES); } + ipmi_ssif_unlock_cond(ssif_info, flags); } static int ssif_start_processing(void *send_info, @@ -1207,10 +1264,11 @@ static void shutdown_ssif(void *send_info) dev_set_drvdata(&ssif_info->client->dev, NULL); /* make sure the driver is not looking for flags any more. */ - while (ssif_info->ssif_state != SSIF_NORMAL) + while (ssif_info->ssif_state != SSIF_IDLE) schedule_timeout(1); ssif_info->stopping = true; + del_timer_sync(&ssif_info->watch_timer); del_timer_sync(&ssif_info->retry_timer); if (ssif_info->thread) { complete(&ssif_info->wake_thread); @@ -1588,8 +1646,9 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) } spin_lock_init(&ssif_info->lock); - ssif_info->ssif_state = SSIF_NORMAL; + ssif_info->ssif_state = SSIF_IDLE; timer_setup(&ssif_info->retry_timer, retry_timeout, 0); + timer_setup(&ssif_info->watch_timer, watch_timeout, 0); for (i = 0; i < SSIF_NUM_STATS; i++) atomic_set(&ssif_info->stats[i], 0); @@ -1603,6 +1662,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) ssif_info->handlers.get_smi_info = get_smi_info; ssif_info->handlers.sender = sender; ssif_info->handlers.request_events = request_events; + ssif_info->handlers.set_need_watch = ssif_set_need_watch; { unsigned int thread_num; @@ -1847,12 +1907,18 @@ static int ssif_platform_remove(struct platform_device *dev) return 0; } +static const struct platform_device_id ssif_plat_ids[] = { + { "dmi-ipmi-ssif", 0 }, + { } +}; + static struct platform_driver ipmi_driver = { .driver = { .name = DEVICE_NAME, }, .probe = ssif_platform_probe, .remove = ssif_platform_remove, + .id_table = ssif_plat_ids }; static int init_ipmi_ssif(void) @@ -1884,6 +1950,8 @@ static int init_ipmi_ssif(void) rv = platform_driver_register(&ipmi_driver); if (rv) pr_err(PFX "Unable to register driver: %d\n", rv); + else + platform_registered = true; } ssif_i2c_driver.address_list = ssif_address_list(); @@ -1905,7 +1973,10 @@ static void cleanup_ipmi_ssif(void) i2c_del_driver(&ssif_i2c_driver); - platform_driver_unregister(&ipmi_driver); + kfree(ssif_i2c_driver.address_list); + + if (ssif_trydmi && platform_registered) + platform_driver_unregister(&ipmi_driver); free_ssif_clients(); } diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index ca1c5c5109f04f90c096929d0153ad27326288e5..f016d54b25929c4168b44b05e66807a749277583 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c @@ -366,16 +366,18 @@ static int __ipmi_set_timeout(struct ipmi_smi_msg *smi_msg, data[0] = 0; WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS); - if ((ipmi_version_major > 1) - || ((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) { - /* This is an IPMI 1.5-only feature. */ - data[0] |= WDOG_DONT_STOP_ON_SET; - } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { - /* - * In ipmi 1.0, setting the timer stops the watchdog, we - * need to start it back up again. - */ - hbnow = 1; + if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { + if ((ipmi_version_major > 1) || + ((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) { + /* This is an IPMI 1.5-only feature. */ + data[0] |= WDOG_DONT_STOP_ON_SET; + } else { + /* + * In ipmi 1.0, setting the timer stops the watchdog, we + * need to start it back up again. + */ + hbnow = 1; + } } data[1] = 0; diff --git a/drivers/char/ipmi/kcs_bmc.c b/drivers/char/ipmi/kcs_bmc.c index e6124bd548df211317e7a69fa42da99849382e69..ed4dc3b1843e3a06995750deda672b5bb830444a 100644 --- a/drivers/char/ipmi/kcs_bmc.c +++ b/drivers/char/ipmi/kcs_bmc.c @@ -440,12 +440,13 @@ struct kcs_bmc *kcs_bmc_alloc(struct device *dev, int sizeof_priv, u32 channel) kcs_bmc->data_in = devm_kmalloc(dev, KCS_MSG_BUFSIZ, GFP_KERNEL); kcs_bmc->data_out = devm_kmalloc(dev, KCS_MSG_BUFSIZ, GFP_KERNEL); kcs_bmc->kbuffer = devm_kmalloc(dev, KCS_MSG_BUFSIZ, GFP_KERNEL); - if (!kcs_bmc->data_in || !kcs_bmc->data_out || !kcs_bmc->kbuffer) - return NULL; kcs_bmc->miscdev.minor = MISC_DYNAMIC_MINOR; kcs_bmc->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s%u", DEVICE_NAME, channel); + if (!kcs_bmc->data_in || !kcs_bmc->data_out || !kcs_bmc->kbuffer || + !kcs_bmc->miscdev.name) + return NULL; kcs_bmc->miscdev.fops = &kcs_bmc_fops; return kcs_bmc; diff --git a/drivers/char/lp.c b/drivers/char/lp.c index 8c4dd1a3bb6ab562136e32bad330fd1da2fdb1c5..767740d758dd6f916a99719ad447a1fb806db763 100644 --- a/drivers/char/lp.c +++ b/drivers/char/lp.c @@ -708,6 +708,10 @@ static int lp_set_timeout64(unsigned int minor, void __user *arg) if (copy_from_user(karg, arg, sizeof(karg))) return -EFAULT; + /* sparc64 suseconds_t is 32-bit only */ + if (IS_ENABLED(CONFIG_SPARC64) && !in_compat_syscall()) + karg[1] >>= 32; + return lp_set_timeout(minor, karg[0], karg[1]); } diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 7b4e4de778e45f7900732a243f6d53f783089d32..9eb564c002f66e221646c5225e69ff71b38f913e 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -97,6 +97,13 @@ void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) } #endif +static inline bool should_stop_iteration(void) +{ + if (need_resched()) + cond_resched(); + return fatal_signal_pending(current); +} + /* * This funcion reads the *physical* memory. The f_pos points directly to the * memory location. @@ -175,6 +182,8 @@ static ssize_t read_mem(struct file *file, char __user *buf, p += sz; count -= sz; read += sz; + if (should_stop_iteration()) + break; } kfree(bounce); @@ -251,6 +260,8 @@ static ssize_t write_mem(struct file *file, const char __user *buf, p += sz; count -= sz; written += sz; + if (should_stop_iteration()) + break; } *ppos += written; @@ -468,6 +479,10 @@ static ssize_t read_kmem(struct file *file, char __user *buf, read += sz; low_count -= sz; count -= sz; + if (should_stop_iteration()) { + count = 0; + break; + } } } @@ -492,6 +507,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf, buf += sz; read += sz; p += sz; + if (should_stop_iteration()) + break; } free_page((unsigned long)kbuf); } @@ -544,6 +561,8 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf, p += sz; count -= sz; written += sz; + if (should_stop_iteration()) + break; } *ppos += written; @@ -595,6 +614,8 @@ static ssize_t write_kmem(struct file *file, const char __user *buf, buf += sz; virtr += sz; p += sz; + if (should_stop_iteration()) + break; } free_page((unsigned long)kbuf); } @@ -609,7 +630,7 @@ static ssize_t read_port(struct file *file, char __user *buf, unsigned long i = *ppos; char __user *tmp = buf; - if (!access_ok(VERIFY_WRITE, buf, count)) + if (!access_ok(buf, count)) return -EFAULT; while (count-- > 0 && i < 65536) { if (__put_user(inb(i), tmp) < 0) @@ -627,7 +648,7 @@ static ssize_t write_port(struct file *file, const char __user *buf, unsigned long i = *ppos; const char __user *tmp = buf; - if (!access_ok(VERIFY_READ, buf, count)) + if (!access_ok(buf, count)) return -EFAULT; while (count-- > 0 && i < 65536) { char c; diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c index b5e3103c1175575a0e16d3f4168b4d343aca7b6f..e43c876a92232d9fc0e8a18269c5c4fd7342a3ff 100644 --- a/drivers/char/mwave/mwavedd.c +++ b/drivers/char/mwave/mwavedd.c @@ -59,6 +59,7 @@ #include #include #include +#include #include "smapi.h" #include "mwavedd.h" #include "3780i.h" @@ -289,6 +290,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, ipcnum); return -EINVAL; } + ipcnum = array_index_nospec(ipcnum, + ARRAY_SIZE(pDrvData->IPCs)); PRINTK_3(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC" " ipcnum %x entry usIntCount %x\n", @@ -317,6 +320,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, " Invalid ipcnum %x\n", ipcnum); return -EINVAL; } + ipcnum = array_index_nospec(ipcnum, + ARRAY_SIZE(pDrvData->IPCs)); PRINTK_3(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC" " ipcnum %x, usIntCount %x\n", @@ -383,6 +388,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, ipcnum); return -EINVAL; } + ipcnum = array_index_nospec(ipcnum, + ARRAY_SIZE(pDrvData->IPCs)); mutex_lock(&mwave_mutex); if (pDrvData->IPCs[ipcnum].bIsEnabled == true) { pDrvData->IPCs[ipcnum].bIsEnabled = false; diff --git a/drivers/char/nwflash.c b/drivers/char/nwflash.c index a284ae25e69a1bcee2b4eda407c51b616e9432cc..76fb434068d4f7b85be5ab6b2abf3ebd5c58ae4a 100644 --- a/drivers/char/nwflash.c +++ b/drivers/char/nwflash.c @@ -167,7 +167,7 @@ static ssize_t flash_write(struct file *file, const char __user *buf, if (count > gbFlashSize - p) count = gbFlashSize - p; - if (!access_ok(VERIFY_READ, buf, count)) + if (!access_ok(buf, count)) return -EFAULT; /* diff --git a/drivers/char/pcmcia/Kconfig b/drivers/char/pcmcia/Kconfig deleted file mode 100644 index 1d1e7da8ad270acf877aa99021f98bdc01cf9ac7..0000000000000000000000000000000000000000 --- a/drivers/char/pcmcia/Kconfig +++ /dev/null @@ -1,67 +0,0 @@ -# -# PCMCIA character device configuration -# - -menu "PCMCIA character devices" - depends on PCMCIA!=n - -config SYNCLINK_CS - tristate "SyncLink PC Card support" - depends on PCMCIA && TTY - help - Enable support for the SyncLink PC Card serial adapter, running - asynchronous and HDLC communications up to 512Kbps. The port is - selectable for RS-232, V.35, RS-449, RS-530, and X.21 - - This driver may be built as a module ( = code which can be - inserted in and removed from the running kernel whenever you want). - The module will be called synclink_cs. If you want to do that, say M - here. - -config CARDMAN_4000 - tristate "Omnikey Cardman 4000 support" - depends on PCMCIA - select BITREVERSE - help - Enable support for the Omnikey Cardman 4000 PCMCIA Smartcard - reader. - - This kernel driver requires additional userspace support, either - by the vendor-provided PC/SC ifd_handler (http://www.omnikey.com/), - or via the cm4000 backend of OpenCT (http://www.opensc-project.org/opensc). - -config CARDMAN_4040 - tristate "Omnikey CardMan 4040 support" - depends on PCMCIA - help - Enable support for the Omnikey CardMan 4040 PCMCIA Smartcard - reader. - - This card is basically a USB CCID device connected to a FIFO - in I/O space. To use the kernel driver, you will need either the - PC/SC ifdhandler provided from the Omnikey homepage - (http://www.omnikey.com/), or a current development version of OpenCT - (http://www.opensc-project.org/opensc). - -config SCR24X - tristate "SCR24x Chip Card Interface support" - depends on PCMCIA - help - Enable support for the SCR24x PCMCIA Chip Card Interface. - - To compile this driver as a module, choose M here. - The module will be called scr24x_cs.. - - If unsure say N. - -config IPWIRELESS - tristate "IPWireless 3G UMTS PCMCIA card support" - depends on PCMCIA && NETDEVICES && TTY - select PPP - help - This is a driver for 3G UMTS PCMCIA card from IPWireless company. In - some countries (for example Czech Republic, T-Mobile ISP) this card - is shipped for service called UMTS 4G. - -endmenu - diff --git a/drivers/char/pcmcia/Makefile b/drivers/char/pcmcia/Makefile deleted file mode 100644 index 5b836bc21406076450806253c877857a63a5f780..0000000000000000000000000000000000000000 --- a/drivers/char/pcmcia/Makefile +++ /dev/null @@ -1,10 +0,0 @@ -# -# drivers/char/pcmcia/Makefile -# -# Makefile for the Linux PCMCIA char device drivers. -# - -obj-$(CONFIG_SYNCLINK_CS) += synclink_cs.o -obj-$(CONFIG_CARDMAN_4000) += cm4000_cs.o -obj-$(CONFIG_CARDMAN_4040) += cm4040_cs.o -obj-$(CONFIG_SCR24X) += scr24x_cs.o diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c deleted file mode 100644 index a219964cb77005ff2462f85edf9106b3c7fd0d41..0000000000000000000000000000000000000000 --- a/drivers/char/pcmcia/cm4000_cs.c +++ /dev/null @@ -1,1920 +0,0 @@ - /* - * A driver for the PCMCIA Smartcard Reader "Omnikey CardMan Mobile 4000" - * - * cm4000_cs.c support.linux@omnikey.com - * - * Tue Oct 23 11:32:43 GMT 2001 herp - cleaned up header files - * Sun Jan 20 10:11:15 MET 2002 herp - added modversion header files - * Thu Nov 14 16:34:11 GMT 2002 mh - added PPS functionality - * Tue Nov 19 16:36:27 GMT 2002 mh - added SUSPEND/RESUME functionailty - * Wed Jul 28 12:55:01 CEST 2004 mh - kernel 2.6 adjustments - * - * current version: 2.4.0gm4 - * - * (C) 2000,2001,2002,2003,2004 Omnikey AG - * - * (C) 2005-2006 Harald Welte - * - Adhere to Kernel process/coding-style.rst - * - Port to 2.6.13 "new" style PCMCIA - * - Check for copy_{from,to}_user return values - * - Use nonseekable_open() - * - add class interface for udev device creation - * - * All rights reserved. Licensed under dual BSD/GPL license. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include - -/* #define ATR_CSUM */ - -#define reader_to_dev(x) (&x->p_dev->dev) - -/* n (debug level) is ignored */ -/* additional debug output may be enabled by re-compiling with - * CM4000_DEBUG set */ -/* #define CM4000_DEBUG */ -#define DEBUGP(n, rdr, x, args...) do { \ - dev_dbg(reader_to_dev(rdr), "%s:" x, \ - __func__ , ## args); \ - } while (0) - -static DEFINE_MUTEX(cmm_mutex); - -#define T_1SEC (HZ) -#define T_10MSEC msecs_to_jiffies(10) -#define T_20MSEC msecs_to_jiffies(20) -#define T_40MSEC msecs_to_jiffies(40) -#define T_50MSEC msecs_to_jiffies(50) -#define T_100MSEC msecs_to_jiffies(100) -#define T_500MSEC msecs_to_jiffies(500) - -static void cm4000_release(struct pcmcia_device *link); - -static int major; /* major number we get from the kernel */ - -/* note: the first state has to have number 0 always */ - -#define M_FETCH_ATR 0 -#define M_TIMEOUT_WAIT 1 -#define M_READ_ATR_LEN 2 -#define M_READ_ATR 3 -#define M_ATR_PRESENT 4 -#define M_BAD_CARD 5 -#define M_CARDOFF 6 - -#define LOCK_IO 0 -#define LOCK_MONITOR 1 - -#define IS_AUTOPPS_ACT 6 -#define IS_PROCBYTE_PRESENT 7 -#define IS_INVREV 8 -#define IS_ANY_T0 9 -#define IS_ANY_T1 10 -#define IS_ATR_PRESENT 11 -#define IS_ATR_VALID 12 -#define IS_CMM_ABSENT 13 -#define IS_BAD_LENGTH 14 -#define IS_BAD_CSUM 15 -#define IS_BAD_CARD 16 - -#define REG_FLAGS0(x) (x + 0) -#define REG_FLAGS1(x) (x + 1) -#define REG_NUM_BYTES(x) (x + 2) -#define REG_BUF_ADDR(x) (x + 3) -#define REG_BUF_DATA(x) (x + 4) -#define REG_NUM_SEND(x) (x + 5) -#define REG_BAUDRATE(x) (x + 6) -#define REG_STOPBITS(x) (x + 7) - -struct cm4000_dev { - struct pcmcia_device *p_dev; - - unsigned char atr[MAX_ATR]; - unsigned char rbuf[512]; - unsigned char sbuf[512]; - - wait_queue_head_t devq; /* when removing cardman must not be - zeroed! */ - - wait_queue_head_t ioq; /* if IO is locked, wait on this Q */ - wait_queue_head_t atrq; /* wait for ATR valid */ - wait_queue_head_t readq; /* used by write to wake blk.read */ - - /* warning: do not move this fields. - * initialising to zero depends on it - see ZERO_DEV below. */ - unsigned char atr_csum; - unsigned char atr_len_retry; - unsigned short atr_len; - unsigned short rlen; /* bytes avail. after write */ - unsigned short rpos; /* latest read pos. write zeroes */ - unsigned char procbyte; /* T=0 procedure byte */ - unsigned char mstate; /* state of card monitor */ - unsigned char cwarn; /* slow down warning */ - unsigned char flags0; /* cardman IO-flags 0 */ - unsigned char flags1; /* cardman IO-flags 1 */ - unsigned int mdelay; /* variable monitor speeds, in jiffies */ - - unsigned int baudv; /* baud value for speed */ - unsigned char ta1; - unsigned char proto; /* T=0, T=1, ... */ - unsigned long flags; /* lock+flags (MONITOR,IO,ATR) * for concurrent - access */ - - unsigned char pts[4]; - - struct timer_list timer; /* used to keep monitor running */ - int monitor_running; -}; - -#define ZERO_DEV(dev) \ - memset(&dev->atr_csum,0, \ - sizeof(struct cm4000_dev) - \ - offsetof(struct cm4000_dev, atr_csum)) - -static struct pcmcia_device *dev_table[CM4000_MAX_DEV]; -static struct class *cmm_class; - -/* This table doesn't use spaces after the comma between fields and thus - * violates process/coding-style.rst. However, I don't really think wrapping it around will - * make it any clearer to read -HW */ -static unsigned char fi_di_table[10][14] = { -/*FI 00 01 02 03 04 05 06 07 08 09 10 11 12 13 */ -/*DI */ -/* 0 */ {0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11}, -/* 1 */ {0x01,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x91,0x11,0x11,0x11,0x11}, -/* 2 */ {0x02,0x12,0x22,0x32,0x11,0x11,0x11,0x11,0x11,0x92,0xA2,0xB2,0x11,0x11}, -/* 3 */ {0x03,0x13,0x23,0x33,0x43,0x53,0x63,0x11,0x11,0x93,0xA3,0xB3,0xC3,0xD3}, -/* 4 */ {0x04,0x14,0x24,0x34,0x44,0x54,0x64,0x11,0x11,0x94,0xA4,0xB4,0xC4,0xD4}, -/* 5 */ {0x00,0x15,0x25,0x35,0x45,0x55,0x65,0x11,0x11,0x95,0xA5,0xB5,0xC5,0xD5}, -/* 6 */ {0x06,0x16,0x26,0x36,0x46,0x56,0x66,0x11,0x11,0x96,0xA6,0xB6,0xC6,0xD6}, -/* 7 */ {0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11}, -/* 8 */ {0x08,0x11,0x28,0x38,0x48,0x58,0x68,0x11,0x11,0x98,0xA8,0xB8,0xC8,0xD8}, -/* 9 */ {0x09,0x19,0x29,0x39,0x49,0x59,0x69,0x11,0x11,0x99,0xA9,0xB9,0xC9,0xD9} -}; - -#ifndef CM4000_DEBUG -#define xoutb outb -#define xinb inb -#else -static inline void xoutb(unsigned char val, unsigned short port) -{ - pr_debug("outb(val=%.2x,port=%.4x)\n", val, port); - outb(val, port); -} -static inline unsigned char xinb(unsigned short port) -{ - unsigned char val; - - val = inb(port); - pr_debug("%.2x=inb(%.4x)\n", val, port); - - return val; -} -#endif - -static inline unsigned char invert_revert(unsigned char ch) -{ - return bitrev8(~ch); -} - -static void str_invert_revert(unsigned char *b, int len) -{ - int i; - - for (i = 0; i < len; i++) - b[i] = invert_revert(b[i]); -} - -#define ATRLENCK(dev,pos) \ - if (pos>=dev->atr_len || pos>=MAX_ATR) \ - goto return_0; - -static unsigned int calc_baudv(unsigned char fidi) -{ - unsigned int wcrcf, wbrcf, fi_rfu, di_rfu; - - fi_rfu = 372; - di_rfu = 1; - - /* FI */ - switch ((fidi >> 4) & 0x0F) { - case 0x00: - wcrcf = 372; - break; - case 0x01: - wcrcf = 372; - break; - case 0x02: - wcrcf = 558; - break; - case 0x03: - wcrcf = 744; - break; - case 0x04: - wcrcf = 1116; - break; - case 0x05: - wcrcf = 1488; - break; - case 0x06: - wcrcf = 1860; - break; - case 0x07: - wcrcf = fi_rfu; - break; - case 0x08: - wcrcf = fi_rfu; - break; - case 0x09: - wcrcf = 512; - break; - case 0x0A: - wcrcf = 768; - break; - case 0x0B: - wcrcf = 1024; - break; - case 0x0C: - wcrcf = 1536; - break; - case 0x0D: - wcrcf = 2048; - break; - default: - wcrcf = fi_rfu; - break; - } - - /* DI */ - switch (fidi & 0x0F) { - case 0x00: - wbrcf = di_rfu; - break; - case 0x01: - wbrcf = 1; - break; - case 0x02: - wbrcf = 2; - break; - case 0x03: - wbrcf = 4; - break; - case 0x04: - wbrcf = 8; - break; - case 0x05: - wbrcf = 16; - break; - case 0x06: - wbrcf = 32; - break; - case 0x07: - wbrcf = di_rfu; - break; - case 0x08: - wbrcf = 12; - break; - case 0x09: - wbrcf = 20; - break; - default: - wbrcf = di_rfu; - break; - } - - return (wcrcf / wbrcf); -} - -static unsigned short io_read_num_rec_bytes(unsigned int iobase, - unsigned short *s) -{ - unsigned short tmp; - - tmp = *s = 0; - do { - *s = tmp; - tmp = inb(REG_NUM_BYTES(iobase)) | - (inb(REG_FLAGS0(iobase)) & 4 ? 0x100 : 0); - } while (tmp != *s); - - return *s; -} - -static int parse_atr(struct cm4000_dev *dev) -{ - unsigned char any_t1, any_t0; - unsigned char ch, ifno; - int ix, done; - - DEBUGP(3, dev, "-> parse_atr: dev->atr_len = %i\n", dev->atr_len); - - if (dev->atr_len < 3) { - DEBUGP(5, dev, "parse_atr: atr_len < 3\n"); - return 0; - } - - if (dev->atr[0] == 0x3f) - set_bit(IS_INVREV, &dev->flags); - else - clear_bit(IS_INVREV, &dev->flags); - ix = 1; - ifno = 1; - ch = dev->atr[1]; - dev->proto = 0; /* XXX PROTO */ - any_t1 = any_t0 = done = 0; - dev->ta1 = 0x11; /* defaults to 9600 baud */ - do { - if (ifno == 1 && (ch & 0x10)) { - /* read first interface byte and TA1 is present */ - dev->ta1 = dev->atr[2]; - DEBUGP(5, dev, "Card says FiDi is 0x%.2x\n", dev->ta1); - ifno++; - } else if ((ifno == 2) && (ch & 0x10)) { /* TA(2) */ - dev->ta1 = 0x11; - ifno++; - } - - DEBUGP(5, dev, "Yi=%.2x\n", ch & 0xf0); - ix += ((ch & 0x10) >> 4) /* no of int.face chars */ - +((ch & 0x20) >> 5) - + ((ch & 0x40) >> 6) - + ((ch & 0x80) >> 7); - /* ATRLENCK(dev,ix); */ - if (ch & 0x80) { /* TDi */ - ch = dev->atr[ix]; - if ((ch & 0x0f)) { - any_t1 = 1; - DEBUGP(5, dev, "card is capable of T=1\n"); - } else { - any_t0 = 1; - DEBUGP(5, dev, "card is capable of T=0\n"); - } - } else - done = 1; - } while (!done); - - DEBUGP(5, dev, "ix=%d noHist=%d any_t1=%d\n", - ix, dev->atr[1] & 15, any_t1); - if (ix + 1 + (dev->atr[1] & 0x0f) + any_t1 != dev->atr_len) { - DEBUGP(5, dev, "length error\n"); - return 0; - } - if (any_t0) - set_bit(IS_ANY_T0, &dev->flags); - - if (any_t1) { /* compute csum */ - dev->atr_csum = 0; -#ifdef ATR_CSUM - for (i = 1; i < dev->atr_len; i++) - dev->atr_csum ^= dev->atr[i]; - if (dev->atr_csum) { - set_bit(IS_BAD_CSUM, &dev->flags); - DEBUGP(5, dev, "bad checksum\n"); - goto return_0; - } -#endif - if (any_t0 == 0) - dev->proto = 1; /* XXX PROTO */ - set_bit(IS_ANY_T1, &dev->flags); - } - - return 1; -} - -struct card_fixup { - char atr[12]; - u_int8_t atr_len; - u_int8_t stopbits; -}; - -static struct card_fixup card_fixups[] = { - { /* ACOS */ - .atr = { 0x3b, 0xb3, 0x11, 0x00, 0x00, 0x41, 0x01 }, - .atr_len = 7, - .stopbits = 0x03, - }, - { /* Motorola */ - .atr = {0x3b, 0x76, 0x13, 0x00, 0x00, 0x80, 0x62, 0x07, - 0x41, 0x81, 0x81 }, - .atr_len = 11, - .stopbits = 0x04, - }, -}; - -static void set_cardparameter(struct cm4000_dev *dev) -{ - int i; - unsigned int iobase = dev->p_dev->resource[0]->start; - u_int8_t stopbits = 0x02; /* ISO default */ - - DEBUGP(3, dev, "-> set_cardparameter\n"); - - dev->flags1 = dev->flags1 | (((dev->baudv - 1) & 0x0100) >> 8); - xoutb(dev->flags1, REG_FLAGS1(iobase)); - DEBUGP(5, dev, "flags1 = 0x%02x\n", dev->flags1); - - /* set baudrate */ - xoutb((unsigned char)((dev->baudv - 1) & 0xFF), REG_BAUDRATE(iobase)); - - DEBUGP(5, dev, "baudv = %i -> write 0x%02x\n", dev->baudv, - ((dev->baudv - 1) & 0xFF)); - - /* set stopbits */ - for (i = 0; i < ARRAY_SIZE(card_fixups); i++) { - if (!memcmp(dev->atr, card_fixups[i].atr, - card_fixups[i].atr_len)) - stopbits = card_fixups[i].stopbits; - } - xoutb(stopbits, REG_STOPBITS(iobase)); - - DEBUGP(3, dev, "<- set_cardparameter\n"); -} - -static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq) -{ - - unsigned long tmp, i; - unsigned short num_bytes_read; - unsigned char pts_reply[4]; - ssize_t rc; - unsigned int iobase = dev->p_dev->resource[0]->start; - - rc = 0; - - DEBUGP(3, dev, "-> set_protocol\n"); - DEBUGP(5, dev, "ptsreq->Protocol = 0x%.8x, ptsreq->Flags=0x%.8x, " - "ptsreq->pts1=0x%.2x, ptsreq->pts2=0x%.2x, " - "ptsreq->pts3=0x%.2x\n", (unsigned int)ptsreq->protocol, - (unsigned int)ptsreq->flags, ptsreq->pts1, ptsreq->pts2, - ptsreq->pts3); - - /* Fill PTS structure */ - dev->pts[0] = 0xff; - dev->pts[1] = 0x00; - tmp = ptsreq->protocol; - while ((tmp = (tmp >> 1)) > 0) - dev->pts[1]++; - dev->proto = dev->pts[1]; /* Set new protocol */ - dev->pts[1] = (0x01 << 4) | (dev->pts[1]); - - /* Correct Fi/Di according to CM4000 Fi/Di table */ - DEBUGP(5, dev, "Ta(1) from ATR is 0x%.2x\n", dev->ta1); - /* set Fi/Di according to ATR TA(1) */ - dev->pts[2] = fi_di_table[dev->ta1 & 0x0F][(dev->ta1 >> 4) & 0x0F]; - - /* Calculate PCK character */ - dev->pts[3] = dev->pts[0] ^ dev->pts[1] ^ dev->pts[2]; - - DEBUGP(5, dev, "pts0=%.2x, pts1=%.2x, pts2=%.2x, pts3=%.2x\n", - dev->pts[0], dev->pts[1], dev->pts[2], dev->pts[3]); - - /* check card convention */ - if (test_bit(IS_INVREV, &dev->flags)) - str_invert_revert(dev->pts, 4); - - /* reset SM */ - xoutb(0x80, REG_FLAGS0(iobase)); - - /* Enable access to the message buffer */ - DEBUGP(5, dev, "Enable access to the messages buffer\n"); - dev->flags1 = 0x20 /* T_Active */ - | (test_bit(IS_INVREV, &dev->flags) ? 0x02 : 0x00) /* inv parity */ - | ((dev->baudv >> 8) & 0x01); /* MSB-baud */ - xoutb(dev->flags1, REG_FLAGS1(iobase)); - - DEBUGP(5, dev, "Enable message buffer -> flags1 = 0x%.2x\n", - dev->flags1); - - /* write challenge to the buffer */ - DEBUGP(5, dev, "Write challenge to buffer: "); - for (i = 0; i < 4; i++) { - xoutb(i, REG_BUF_ADDR(iobase)); - xoutb(dev->pts[i], REG_BUF_DATA(iobase)); /* buf data */ -#ifdef CM4000_DEBUG - pr_debug("0x%.2x ", dev->pts[i]); - } - pr_debug("\n"); -#else - } -#endif - - /* set number of bytes to write */ - DEBUGP(5, dev, "Set number of bytes to write\n"); - xoutb(0x04, REG_NUM_SEND(iobase)); - - /* Trigger CARDMAN CONTROLLER */ - xoutb(0x50, REG_FLAGS0(iobase)); - - /* Monitor progress */ - /* wait for xmit done */ - DEBUGP(5, dev, "Waiting for NumRecBytes getting valid\n"); - - for (i = 0; i < 100; i++) { - if (inb(REG_FLAGS0(iobase)) & 0x08) { - DEBUGP(5, dev, "NumRecBytes is valid\n"); - break; - } - mdelay(10); - } - if (i == 100) { - DEBUGP(5, dev, "Timeout waiting for NumRecBytes getting " - "valid\n"); - rc = -EIO; - goto exit_setprotocol; - } - - DEBUGP(5, dev, "Reading NumRecBytes\n"); - for (i = 0; i < 100; i++) { - io_read_num_rec_bytes(iobase, &num_bytes_read); - if (num_bytes_read >= 4) { - DEBUGP(2, dev, "NumRecBytes = %i\n", num_bytes_read); - break; - } - mdelay(10); - } - - /* check whether it is a short PTS reply? */ - if (num_bytes_read == 3) - i = 0; - - if (i == 100) { - DEBUGP(5, dev, "Timeout reading num_bytes_read\n"); - rc = -EIO; - goto exit_setprotocol; - } - - DEBUGP(5, dev, "Reset the CARDMAN CONTROLLER\n"); - xoutb(0x80, REG_FLAGS0(iobase)); - - /* Read PPS reply */ - DEBUGP(5, dev, "Read PPS reply\n"); - for (i = 0; i < num_bytes_read; i++) { - xoutb(i, REG_BUF_ADDR(iobase)); - pts_reply[i] = inb(REG_BUF_DATA(iobase)); - } - -#ifdef CM4000_DEBUG - DEBUGP(2, dev, "PTSreply: "); - for (i = 0; i < num_bytes_read; i++) { - pr_debug("0x%.2x ", pts_reply[i]); - } - pr_debug("\n"); -#endif /* CM4000_DEBUG */ - - DEBUGP(5, dev, "Clear Tactive in Flags1\n"); - xoutb(0x20, REG_FLAGS1(iobase)); - - /* Compare ptsreq and ptsreply */ - if ((dev->pts[0] == pts_reply[0]) && - (dev->pts[1] == pts_reply[1]) && - (dev->pts[2] == pts_reply[2]) && (dev->pts[3] == pts_reply[3])) { - /* setcardparameter according to PPS */ - dev->baudv = calc_baudv(dev->pts[2]); - set_cardparameter(dev); - } else if ((dev->pts[0] == pts_reply[0]) && - ((dev->pts[1] & 0xef) == pts_reply[1]) && - ((pts_reply[0] ^ pts_reply[1]) == pts_reply[2])) { - /* short PTS reply, set card parameter to default values */ - dev->baudv = calc_baudv(0x11); - set_cardparameter(dev); - } else - rc = -EIO; - -exit_setprotocol: - DEBUGP(3, dev, "<- set_protocol\n"); - return rc; -} - -static int io_detect_cm4000(unsigned int iobase, struct cm4000_dev *dev) -{ - - /* note: statemachine is assumed to be reset */ - if (inb(REG_FLAGS0(iobase)) & 8) { - clear_bit(IS_ATR_VALID, &dev->flags); - set_bit(IS_CMM_ABSENT, &dev->flags); - return 0; /* detect CMM = 1 -> failure */ - } - /* xoutb(0x40, REG_FLAGS1(iobase)); detectCMM */ - xoutb(dev->flags1 | 0x40, REG_FLAGS1(iobase)); - if ((inb(REG_FLAGS0(iobase)) & 8) == 0) { - clear_bit(IS_ATR_VALID, &dev->flags); - set_bit(IS_CMM_ABSENT, &dev->flags); - return 0; /* detect CMM=0 -> failure */ - } - /* clear detectCMM again by restoring original flags1 */ - xoutb(dev->flags1, REG_FLAGS1(iobase)); - return 1; -} - -static void terminate_monitor(struct cm4000_dev *dev) -{ - - /* tell the monitor to stop and wait until - * it terminates. - */ - DEBUGP(3, dev, "-> terminate_monitor\n"); - wait_event_interruptible(dev->devq, - test_and_set_bit(LOCK_MONITOR, - (void *)&dev->flags)); - - /* now, LOCK_MONITOR has been set. - * allow a last cycle in the monitor. - * the monitor will indicate that it has - * finished by clearing this bit. - */ - DEBUGP(5, dev, "Now allow last cycle of monitor!\n"); - while (test_bit(LOCK_MONITOR, (void *)&dev->flags)) - msleep(25); - - DEBUGP(5, dev, "Delete timer\n"); - del_timer_sync(&dev->timer); -#ifdef CM4000_DEBUG - dev->monitor_running = 0; -#endif - - DEBUGP(3, dev, "<- terminate_monitor\n"); -} - -/* - * monitor the card every 50msec. as a side-effect, retrieve the - * atr once a card is inserted. another side-effect of retrieving the - * atr is that the card will be powered on, so there is no need to - * power on the card explicitly from the application: the driver - * is already doing that for you. - */ - -static void monitor_card(struct timer_list *t) -{ - struct cm4000_dev *dev = from_timer(dev, t, timer); - unsigned int iobase = dev->p_dev->resource[0]->start; - unsigned short s; - struct ptsreq ptsreq; - int i, atrc; - - DEBUGP(7, dev, "-> monitor_card\n"); - - /* if someone has set the lock for us: we're done! */ - if (test_and_set_bit(LOCK_MONITOR, &dev->flags)) { - DEBUGP(4, dev, "About to stop monitor\n"); - /* no */ - dev->rlen = - dev->rpos = - dev->atr_csum = dev->atr_len_retry = dev->cwarn = 0; - dev->mstate = M_FETCH_ATR; - clear_bit(LOCK_MONITOR, &dev->flags); - /* close et al. are sleeping on devq, so wake it */ - wake_up_interruptible(&dev->devq); - DEBUGP(2, dev, "<- monitor_card (we are done now)\n"); - return; - } - - /* try to lock io: if it is already locked, just add another timer */ - if (test_and_set_bit(LOCK_IO, (void *)&dev->flags)) { - DEBUGP(4, dev, "Couldn't get IO lock\n"); - goto return_with_timer; - } - - /* is a card/a reader inserted at all ? */ - dev->flags0 = xinb(REG_FLAGS0(iobase)); - DEBUGP(7, dev, "dev->flags0 = 0x%2x\n", dev->flags0); - DEBUGP(7, dev, "smartcard present: %s\n", - dev->flags0 & 1 ? "yes" : "no"); - DEBUGP(7, dev, "cardman present: %s\n", - dev->flags0 == 0xff ? "no" : "yes"); - - if ((dev->flags0 & 1) == 0 /* no smartcard inserted */ - || dev->flags0 == 0xff) { /* no cardman inserted */ - /* no */ - dev->rlen = - dev->rpos = - dev->atr_csum = dev->atr_len_retry = dev->cwarn = 0; - dev->mstate = M_FETCH_ATR; - - dev->flags &= 0x000000ff; /* only keep IO and MONITOR locks */ - - if (dev->flags0 == 0xff) { - DEBUGP(4, dev, "set IS_CMM_ABSENT bit\n"); - set_bit(IS_CMM_ABSENT, &dev->flags); - } else if (test_bit(IS_CMM_ABSENT, &dev->flags)) { - DEBUGP(4, dev, "clear IS_CMM_ABSENT bit " - "(card is removed)\n"); - clear_bit(IS_CMM_ABSENT, &dev->flags); - } - - goto release_io; - } else if ((dev->flags0 & 1) && test_bit(IS_CMM_ABSENT, &dev->flags)) { - /* cardman and card present but cardman was absent before - * (after suspend with inserted card) */ - DEBUGP(4, dev, "clear IS_CMM_ABSENT bit (card is inserted)\n"); - clear_bit(IS_CMM_ABSENT, &dev->flags); - } - - if (test_bit(IS_ATR_VALID, &dev->flags) == 1) { - DEBUGP(7, dev, "believe ATR is already valid (do nothing)\n"); - goto release_io; - } - - switch (dev->mstate) { - unsigned char flags0; - case M_CARDOFF: - DEBUGP(4, dev, "M_CARDOFF\n"); - flags0 = inb(REG_FLAGS0(iobase)); - if (flags0 & 0x02) { - /* wait until Flags0 indicate power is off */ - dev->mdelay = T_10MSEC; - } else { - /* Flags0 indicate power off and no card inserted now; - * Reset CARDMAN CONTROLLER */ - xoutb(0x80, REG_FLAGS0(iobase)); - - /* prepare for fetching ATR again: after card off ATR - * is read again automatically */ - dev->rlen = - dev->rpos = - dev->atr_csum = - dev->atr_len_retry = dev->cwarn = 0; - dev->mstate = M_FETCH_ATR; - - /* minimal gap between CARDOFF and read ATR is 50msec */ - dev->mdelay = T_50MSEC; - } - break; - case M_FETCH_ATR: - DEBUGP(4, dev, "M_FETCH_ATR\n"); - xoutb(0x80, REG_FLAGS0(iobase)); - DEBUGP(4, dev, "Reset BAUDV to 9600\n"); - dev->baudv = 0x173; /* 9600 */ - xoutb(0x02, REG_STOPBITS(iobase)); /* stopbits=2 */ - xoutb(0x73, REG_BAUDRATE(iobase)); /* baud value */ - xoutb(0x21, REG_FLAGS1(iobase)); /* T_Active=1, baud - value */ - /* warm start vs. power on: */ - xoutb(dev->flags0 & 2 ? 0x46 : 0x44, REG_FLAGS0(iobase)); - dev->mdelay = T_40MSEC; - dev->mstate = M_TIMEOUT_WAIT; - break; - case M_TIMEOUT_WAIT: - DEBUGP(4, dev, "M_TIMEOUT_WAIT\n"); - /* numRecBytes */ - io_read_num_rec_bytes(iobase, &dev->atr_len); - dev->mdelay = T_10MSEC; - dev->mstate = M_READ_ATR_LEN; - break; - case M_READ_ATR_LEN: - DEBUGP(4, dev, "M_READ_ATR_LEN\n"); - /* infinite loop possible, since there is no timeout */ - -#define MAX_ATR_LEN_RETRY 100 - - if (dev->atr_len == io_read_num_rec_bytes(iobase, &s)) { - if (dev->atr_len_retry++ >= MAX_ATR_LEN_RETRY) { /* + XX msec */ - dev->mdelay = T_10MSEC; - dev->mstate = M_READ_ATR; - } - } else { - dev->atr_len = s; - dev->atr_len_retry = 0; /* set new timeout */ - } - - DEBUGP(4, dev, "Current ATR_LEN = %i\n", dev->atr_len); - break; - case M_READ_ATR: - DEBUGP(4, dev, "M_READ_ATR\n"); - xoutb(0x80, REG_FLAGS0(iobase)); /* reset SM */ - for (i = 0; i < dev->atr_len; i++) { - xoutb(i, REG_BUF_ADDR(iobase)); - dev->atr[i] = inb(REG_BUF_DATA(iobase)); - } - /* Deactivate T_Active flags */ - DEBUGP(4, dev, "Deactivate T_Active flags\n"); - dev->flags1 = 0x01; - xoutb(dev->flags1, REG_FLAGS1(iobase)); - - /* atr is present (which doesn't mean it's valid) */ - set_bit(IS_ATR_PRESENT, &dev->flags); - if (dev->atr[0] == 0x03) - str_invert_revert(dev->atr, dev->atr_len); - atrc = parse_atr(dev); - if (atrc == 0) { /* atr invalid */ - dev->mdelay = 0; - dev->mstate = M_BAD_CARD; - } else { - dev->mdelay = T_50MSEC; - dev->mstate = M_ATR_PRESENT; - set_bit(IS_ATR_VALID, &dev->flags); - } - - if (test_bit(IS_ATR_VALID, &dev->flags) == 1) { - DEBUGP(4, dev, "monitor_card: ATR valid\n"); - /* if ta1 == 0x11, no PPS necessary (default values) */ - /* do not do PPS with multi protocol cards */ - if ((test_bit(IS_AUTOPPS_ACT, &dev->flags) == 0) && - (dev->ta1 != 0x11) && - !(test_bit(IS_ANY_T0, &dev->flags) && - test_bit(IS_ANY_T1, &dev->flags))) { - DEBUGP(4, dev, "Perform AUTOPPS\n"); - set_bit(IS_AUTOPPS_ACT, &dev->flags); - ptsreq.protocol = (0x01 << dev->proto); - ptsreq.flags = 0x01; - ptsreq.pts1 = 0x00; - ptsreq.pts2 = 0x00; - ptsreq.pts3 = 0x00; - if (set_protocol(dev, &ptsreq) == 0) { - DEBUGP(4, dev, "AUTOPPS ret SUCC\n"); - clear_bit(IS_AUTOPPS_ACT, &dev->flags); - wake_up_interruptible(&dev->atrq); - } else { - DEBUGP(4, dev, "AUTOPPS failed: " - "repower using defaults\n"); - /* prepare for repowering */ - clear_bit(IS_ATR_PRESENT, &dev->flags); - clear_bit(IS_ATR_VALID, &dev->flags); - dev->rlen = - dev->rpos = - dev->atr_csum = - dev->atr_len_retry = dev->cwarn = 0; - dev->mstate = M_FETCH_ATR; - - dev->mdelay = T_50MSEC; - } - } else { - /* for cards which use slightly different - * params (extra guard time) */ - set_cardparameter(dev); - if (test_bit(IS_AUTOPPS_ACT, &dev->flags) == 1) - DEBUGP(4, dev, "AUTOPPS already active " - "2nd try:use default values\n"); - if (dev->ta1 == 0x11) - DEBUGP(4, dev, "No AUTOPPS necessary " - "TA(1)==0x11\n"); - if (test_bit(IS_ANY_T0, &dev->flags) - && test_bit(IS_ANY_T1, &dev->flags)) - DEBUGP(4, dev, "Do NOT perform AUTOPPS " - "with multiprotocol cards\n"); - clear_bit(IS_AUTOPPS_ACT, &dev->flags); - wake_up_interruptible(&dev->atrq); - } - } else { - DEBUGP(4, dev, "ATR invalid\n"); - wake_up_interruptible(&dev->atrq); - } - break; - case M_BAD_CARD: - DEBUGP(4, dev, "M_BAD_CARD\n"); - /* slow down warning, but prompt immediately after insertion */ - if (dev->cwarn == 0 || dev->cwarn == 10) { - set_bit(IS_BAD_CARD, &dev->flags); - dev_warn(&dev->p_dev->dev, MODULE_NAME ": "); - if (test_bit(IS_BAD_CSUM, &dev->flags)) { - DEBUGP(4, dev, "ATR checksum (0x%.2x, should " - "be zero) failed\n", dev->atr_csum); - } -#ifdef CM4000_DEBUG - else if (test_bit(IS_BAD_LENGTH, &dev->flags)) { - DEBUGP(4, dev, "ATR length error\n"); - } else { - DEBUGP(4, dev, "card damaged or wrong way " - "inserted\n"); - } -#endif - dev->cwarn = 0; - wake_up_interruptible(&dev->atrq); /* wake open */ - } - dev->cwarn++; - dev->mdelay = T_100MSEC; - dev->mstate = M_FETCH_ATR; - break; - default: - DEBUGP(7, dev, "Unknown action\n"); - break; /* nothing */ - } - -release_io: - DEBUGP(7, dev, "release_io\n"); - clear_bit(LOCK_IO, &dev->flags); - wake_up_interruptible(&dev->ioq); /* whoever needs IO */ - -return_with_timer: - DEBUGP(7, dev, "<- monitor_card (returns with timer)\n"); - mod_timer(&dev->timer, jiffies + dev->mdelay); - clear_bit(LOCK_MONITOR, &dev->flags); -} - -/* Interface to userland (file_operations) */ - -static ssize_t cmm_read(struct file *filp, __user char *buf, size_t count, - loff_t *ppos) -{ - struct cm4000_dev *dev = filp->private_data; - unsigned int iobase = dev->p_dev->resource[0]->start; - ssize_t rc; - int i, j, k; - - DEBUGP(2, dev, "-> cmm_read(%s,%d)\n", current->comm, current->pid); - - if (count == 0) /* according to manpage */ - return 0; - - if (!pcmcia_dev_present(dev->p_dev) || /* device removed */ - test_bit(IS_CMM_ABSENT, &dev->flags)) - return -ENODEV; - - if (test_bit(IS_BAD_CSUM, &dev->flags)) - return -EIO; - - /* also see the note about this in cmm_write */ - if (wait_event_interruptible - (dev->atrq, - ((filp->f_flags & O_NONBLOCK) - || (test_bit(IS_ATR_PRESENT, (void *)&dev->flags) != 0)))) { - if (filp->f_flags & O_NONBLOCK) - return -EAGAIN; - return -ERESTARTSYS; - } - - if (test_bit(IS_ATR_VALID, &dev->flags) == 0) - return -EIO; - - /* this one implements blocking IO */ - if (wait_event_interruptible - (dev->readq, - ((filp->f_flags & O_NONBLOCK) || (dev->rpos < dev->rlen)))) { - if (filp->f_flags & O_NONBLOCK) - return -EAGAIN; - return -ERESTARTSYS; - } - - /* lock io */ - if (wait_event_interruptible - (dev->ioq, - ((filp->f_flags & O_NONBLOCK) - || (test_and_set_bit(LOCK_IO, (void *)&dev->flags) == 0)))) { - if (filp->f_flags & O_NONBLOCK) - return -EAGAIN; - return -ERESTARTSYS; - } - - rc = 0; - dev->flags0 = inb(REG_FLAGS0(iobase)); - if ((dev->flags0 & 1) == 0 /* no smartcard inserted */ - || dev->flags0 == 0xff) { /* no cardman inserted */ - clear_bit(IS_ATR_VALID, &dev->flags); - if (dev->flags0 & 1) { - set_bit(IS_CMM_ABSENT, &dev->flags); - rc = -ENODEV; - } else { - rc = -EIO; - } - goto release_io; - } - - DEBUGP(4, dev, "begin read answer\n"); - j = min(count, (size_t)(dev->rlen - dev->rpos)); - k = dev->rpos; - if (k + j > 255) - j = 256 - k; - DEBUGP(4, dev, "read1 j=%d\n", j); - for (i = 0; i < j; i++) { - xoutb(k++, REG_BUF_ADDR(iobase)); - dev->rbuf[i] = xinb(REG_BUF_DATA(iobase)); - } - j = min(count, (size_t)(dev->rlen - dev->rpos)); - if (k + j > 255) { - DEBUGP(4, dev, "read2 j=%d\n", j); - dev->flags1 |= 0x10; /* MSB buf addr set */ - xoutb(dev->flags1, REG_FLAGS1(iobase)); - for (; i < j; i++) { - xoutb(k++, REG_BUF_ADDR(iobase)); - dev->rbuf[i] = xinb(REG_BUF_DATA(iobase)); - } - } - - if (dev->proto == 0 && count > dev->rlen - dev->rpos && i) { - DEBUGP(4, dev, "T=0 and count > buffer\n"); - dev->rbuf[i] = dev->rbuf[i - 1]; - dev->rbuf[i - 1] = dev->procbyte; - j++; - } - count = j; - - dev->rpos = dev->rlen + 1; - - /* Clear T1Active */ - DEBUGP(4, dev, "Clear T1Active\n"); - dev->flags1 &= 0xdf; - xoutb(dev->flags1, REG_FLAGS1(iobase)); - - xoutb(0, REG_FLAGS1(iobase)); /* clear detectCMM */ - /* last check before exit */ - if (!io_detect_cm4000(iobase, dev)) { - rc = -ENODEV; - goto release_io; - } - - if (test_bit(IS_INVREV, &dev->flags) && count > 0) - str_invert_revert(dev->rbuf, count); - - if (copy_to_user(buf, dev->rbuf, count)) - rc = -EFAULT; - -release_io: - clear_bit(LOCK_IO, &dev->flags); - wake_up_interruptible(&dev->ioq); - - DEBUGP(2, dev, "<- cmm_read returns: rc = %zi\n", - (rc < 0 ? rc : count)); - return rc < 0 ? rc : count; -} - -static ssize_t cmm_write(struct file *filp, const char __user *buf, - size_t count, loff_t *ppos) -{ - struct cm4000_dev *dev = filp->private_data; - unsigned int iobase = dev->p_dev->resource[0]->start; - unsigned short s; - unsigned char tmp; - unsigned char infolen; - unsigned char sendT0; - unsigned short nsend; - unsigned short nr; - ssize_t rc; - int i; - - DEBUGP(2, dev, "-> cmm_write(%s,%d)\n", current->comm, current->pid); - - if (count == 0) /* according to manpage */ - return 0; - - if (dev->proto == 0 && count < 4) { - /* T0 must have at least 4 bytes */ - DEBUGP(4, dev, "T0 short write\n"); - return -EIO; - } - - nr = count & 0x1ff; /* max bytes to write */ - - sendT0 = dev->proto ? 0 : nr > 5 ? 0x08 : 0; - - if (!pcmcia_dev_present(dev->p_dev) || /* device removed */ - test_bit(IS_CMM_ABSENT, &dev->flags)) - return -ENODEV; - - if (test_bit(IS_BAD_CSUM, &dev->flags)) { - DEBUGP(4, dev, "bad csum\n"); - return -EIO; - } - - /* - * wait for atr to become valid. - * note: it is important to lock this code. if we dont, the monitor - * could be run between test_bit and the call to sleep on the - * atr-queue. if *then* the monitor detects atr valid, it will wake up - * any process on the atr-queue, *but* since we have been interrupted, - * we do not yet sleep on this queue. this would result in a missed - * wake_up and the calling process would sleep forever (until - * interrupted). also, do *not* restore_flags before sleep_on, because - * this could result in the same situation! - */ - if (wait_event_interruptible - (dev->atrq, - ((filp->f_flags & O_NONBLOCK) - || (test_bit(IS_ATR_PRESENT, (void *)&dev->flags) != 0)))) { - if (filp->f_flags & O_NONBLOCK) - return -EAGAIN; - return -ERESTARTSYS; - } - - if (test_bit(IS_ATR_VALID, &dev->flags) == 0) { /* invalid atr */ - DEBUGP(4, dev, "invalid ATR\n"); - return -EIO; - } - - /* lock io */ - if (wait_event_interruptible - (dev->ioq, - ((filp->f_flags & O_NONBLOCK) - || (test_and_set_bit(LOCK_IO, (void *)&dev->flags) == 0)))) { - if (filp->f_flags & O_NONBLOCK) - return -EAGAIN; - return -ERESTARTSYS; - } - - if (copy_from_user(dev->sbuf, buf, ((count > 512) ? 512 : count))) - return -EFAULT; - - rc = 0; - dev->flags0 = inb(REG_FLAGS0(iobase)); - if ((dev->flags0 & 1) == 0 /* no smartcard inserted */ - || dev->flags0 == 0xff) { /* no cardman inserted */ - clear_bit(IS_ATR_VALID, &dev->flags); - if (dev->flags0 & 1) { - set_bit(IS_CMM_ABSENT, &dev->flags); - rc = -ENODEV; - } else { - DEBUGP(4, dev, "IO error\n"); - rc = -EIO; - } - goto release_io; - } - - xoutb(0x80, REG_FLAGS0(iobase)); /* reset SM */ - - if (!io_detect_cm4000(iobase, dev)) { - rc = -ENODEV; - goto release_io; - } - - /* reflect T=0 send/read mode in flags1 */ - dev->flags1 |= (sendT0); - - set_cardparameter(dev); - - /* dummy read, reset flag procedure received */ - tmp = inb(REG_FLAGS1(iobase)); - - dev->flags1 = 0x20 /* T_Active */ - | (sendT0) - | (test_bit(IS_INVREV, &dev->flags) ? 2 : 0)/* inverse parity */ - | (((dev->baudv - 1) & 0x0100) >> 8); /* MSB-Baud */ - DEBUGP(1, dev, "set dev->flags1 = 0x%.2x\n", dev->flags1); - xoutb(dev->flags1, REG_FLAGS1(iobase)); - - /* xmit data */ - DEBUGP(4, dev, "Xmit data\n"); - for (i = 0; i < nr; i++) { - if (i >= 256) { - dev->flags1 = 0x20 /* T_Active */ - | (sendT0) /* SendT0 */ - /* inverse parity: */ - | (test_bit(IS_INVREV, &dev->flags) ? 2 : 0) - | (((dev->baudv - 1) & 0x0100) >> 8) /* MSB-Baud */ - | 0x10; /* set address high */ - DEBUGP(4, dev, "dev->flags = 0x%.2x - set address " - "high\n", dev->flags1); - xoutb(dev->flags1, REG_FLAGS1(iobase)); - } - if (test_bit(IS_INVREV, &dev->flags)) { - DEBUGP(4, dev, "Apply inverse convention for 0x%.2x " - "-> 0x%.2x\n", (unsigned char)dev->sbuf[i], - invert_revert(dev->sbuf[i])); - xoutb(i, REG_BUF_ADDR(iobase)); - xoutb(invert_revert(dev->sbuf[i]), - REG_BUF_DATA(iobase)); - } else { - xoutb(i, REG_BUF_ADDR(iobase)); - xoutb(dev->sbuf[i], REG_BUF_DATA(iobase)); - } - } - DEBUGP(4, dev, "Xmit done\n"); - - if (dev->proto == 0) { - /* T=0 proto: 0 byte reply */ - if (nr == 4) { - DEBUGP(4, dev, "T=0 assumes 0 byte reply\n"); - xoutb(i, REG_BUF_ADDR(iobase)); - if (test_bit(IS_INVREV, &dev->flags)) - xoutb(0xff, REG_BUF_DATA(iobase)); - else - xoutb(0x00, REG_BUF_DATA(iobase)); - } - - /* numSendBytes */ - if (sendT0) - nsend = nr; - else { - if (nr == 4) - nsend = 5; - else { - nsend = 5 + (unsigned char)dev->sbuf[4]; - if (dev->sbuf[4] == 0) - nsend += 0x100; - } - } - } else - nsend = nr; - - /* T0: output procedure byte */ - if (test_bit(IS_INVREV, &dev->flags)) { - DEBUGP(4, dev, "T=0 set Procedure byte (inverse-reverse) " - "0x%.2x\n", invert_revert(dev->sbuf[1])); - xoutb(invert_revert(dev->sbuf[1]), REG_NUM_BYTES(iobase)); - } else { - DEBUGP(4, dev, "T=0 set Procedure byte 0x%.2x\n", dev->sbuf[1]); - xoutb(dev->sbuf[1], REG_NUM_BYTES(iobase)); - } - - DEBUGP(1, dev, "set NumSendBytes = 0x%.2x\n", - (unsigned char)(nsend & 0xff)); - xoutb((unsigned char)(nsend & 0xff), REG_NUM_SEND(iobase)); - - DEBUGP(1, dev, "Trigger CARDMAN CONTROLLER (0x%.2x)\n", - 0x40 /* SM_Active */ - | (dev->flags0 & 2 ? 0 : 4) /* power on if needed */ - |(dev->proto ? 0x10 : 0x08) /* T=1/T=0 */ - |(nsend & 0x100) >> 8 /* MSB numSendBytes */ ); - xoutb(0x40 /* SM_Active */ - | (dev->flags0 & 2 ? 0 : 4) /* power on if needed */ - |(dev->proto ? 0x10 : 0x08) /* T=1/T=0 */ - |(nsend & 0x100) >> 8, /* MSB numSendBytes */ - REG_FLAGS0(iobase)); - - /* wait for xmit done */ - if (dev->proto == 1) { - DEBUGP(4, dev, "Wait for xmit done\n"); - for (i = 0; i < 1000; i++) { - if (inb(REG_FLAGS0(iobase)) & 0x08) - break; - msleep_interruptible(10); - } - if (i == 1000) { - DEBUGP(4, dev, "timeout waiting for xmit done\n"); - rc = -EIO; - goto release_io; - } - } - - /* T=1: wait for infoLen */ - - infolen = 0; - if (dev->proto) { - /* wait until infoLen is valid */ - for (i = 0; i < 6000; i++) { /* max waiting time of 1 min */ - io_read_num_rec_bytes(iobase, &s); - if (s >= 3) { - infolen = inb(REG_FLAGS1(iobase)); - DEBUGP(4, dev, "infolen=%d\n", infolen); - break; - } - msleep_interruptible(10); - } - if (i == 6000) { - DEBUGP(4, dev, "timeout waiting for infoLen\n"); - rc = -EIO; - goto release_io; - } - } else - clear_bit(IS_PROCBYTE_PRESENT, &dev->flags); - - /* numRecBytes | bit9 of numRecytes */ - io_read_num_rec_bytes(iobase, &dev->rlen); - for (i = 0; i < 600; i++) { /* max waiting time of 2 sec */ - if (dev->proto) { - if (dev->rlen >= infolen + 4) - break; - } - msleep_interruptible(10); - /* numRecBytes | bit9 of numRecytes */ - io_read_num_rec_bytes(iobase, &s); - if (s > dev->rlen) { - DEBUGP(1, dev, "NumRecBytes inc (reset timeout)\n"); - i = 0; /* reset timeout */ - dev->rlen = s; - } - /* T=0: we are done when numRecBytes doesn't - * increment any more and NoProcedureByte - * is set and numRecBytes == bytes sent + 6 - * (header bytes + data + 1 for sw2) - * except when the card replies an error - * which means, no data will be sent back. - */ - else if (dev->proto == 0) { - if ((inb(REG_BUF_ADDR(iobase)) & 0x80)) { - /* no procedure byte received since last read */ - DEBUGP(1, dev, "NoProcedure byte set\n"); - /* i=0; */ - } else { - /* procedure byte received since last read */ - DEBUGP(1, dev, "NoProcedure byte unset " - "(reset timeout)\n"); - dev->procbyte = inb(REG_FLAGS1(iobase)); - DEBUGP(1, dev, "Read procedure byte 0x%.2x\n", - dev->procbyte); - i = 0; /* resettimeout */ - } - if (inb(REG_FLAGS0(iobase)) & 0x08) { - DEBUGP(1, dev, "T0Done flag (read reply)\n"); - break; - } - } - if (dev->proto) - infolen = inb(REG_FLAGS1(iobase)); - } - if (i == 600) { - DEBUGP(1, dev, "timeout waiting for numRecBytes\n"); - rc = -EIO; - goto release_io; - } else { - if (dev->proto == 0) { - DEBUGP(1, dev, "Wait for T0Done bit to be set\n"); - for (i = 0; i < 1000; i++) { - if (inb(REG_FLAGS0(iobase)) & 0x08) - break; - msleep_interruptible(10); - } - if (i == 1000) { - DEBUGP(1, dev, "timeout waiting for T0Done\n"); - rc = -EIO; - goto release_io; - } - - dev->procbyte = inb(REG_FLAGS1(iobase)); - DEBUGP(4, dev, "Read procedure byte 0x%.2x\n", - dev->procbyte); - - io_read_num_rec_bytes(iobase, &dev->rlen); - DEBUGP(4, dev, "Read NumRecBytes = %i\n", dev->rlen); - - } - } - /* T=1: read offset=zero, T=0: read offset=after challenge */ - dev->rpos = dev->proto ? 0 : nr == 4 ? 5 : nr > dev->rlen ? 5 : nr; - DEBUGP(4, dev, "dev->rlen = %i, dev->rpos = %i, nr = %i\n", - dev->rlen, dev->rpos, nr); - -release_io: - DEBUGP(4, dev, "Reset SM\n"); - xoutb(0x80, REG_FLAGS0(iobase)); /* reset SM */ - - if (rc < 0) { - DEBUGP(4, dev, "Write failed but clear T_Active\n"); - dev->flags1 &= 0xdf; - xoutb(dev->flags1, REG_FLAGS1(iobase)); - } - - clear_bit(LOCK_IO, &dev->flags); - wake_up_interruptible(&dev->ioq); - wake_up_interruptible(&dev->readq); /* tell read we have data */ - - /* ITSEC E2: clear write buffer */ - memset((char *)dev->sbuf, 0, 512); - - /* return error or actually written bytes */ - DEBUGP(2, dev, "<- cmm_write\n"); - return rc < 0 ? rc : nr; -} - -static void start_monitor(struct cm4000_dev *dev) -{ - DEBUGP(3, dev, "-> start_monitor\n"); - if (!dev->monitor_running) { - DEBUGP(5, dev, "create, init and add timer\n"); - timer_setup(&dev->timer, monitor_card, 0); - dev->monitor_running = 1; - mod_timer(&dev->timer, jiffies); - } else - DEBUGP(5, dev, "monitor already running\n"); - DEBUGP(3, dev, "<- start_monitor\n"); -} - -static void stop_monitor(struct cm4000_dev *dev) -{ - DEBUGP(3, dev, "-> stop_monitor\n"); - if (dev->monitor_running) { - DEBUGP(5, dev, "stopping monitor\n"); - terminate_monitor(dev); - /* reset monitor SM */ - clear_bit(IS_ATR_VALID, &dev->flags); - clear_bit(IS_ATR_PRESENT, &dev->flags); - } else - DEBUGP(5, dev, "monitor already stopped\n"); - DEBUGP(3, dev, "<- stop_monitor\n"); -} - -static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) -{ - struct cm4000_dev *dev = filp->private_data; - unsigned int iobase = dev->p_dev->resource[0]->start; - struct inode *inode = file_inode(filp); - struct pcmcia_device *link; - int size; - int rc; - void __user *argp = (void __user *)arg; -#ifdef CM4000_DEBUG - char *ioctl_names[CM_IOC_MAXNR + 1] = { - [_IOC_NR(CM_IOCGSTATUS)] "CM_IOCGSTATUS", - [_IOC_NR(CM_IOCGATR)] "CM_IOCGATR", - [_IOC_NR(CM_IOCARDOFF)] "CM_IOCARDOFF", - [_IOC_NR(CM_IOCSPTS)] "CM_IOCSPTS", - [_IOC_NR(CM_IOSDBGLVL)] "CM4000_DBGLVL", - }; - DEBUGP(3, dev, "cmm_ioctl(device=%d.%d) %s\n", imajor(inode), - iminor(inode), ioctl_names[_IOC_NR(cmd)]); -#endif - - mutex_lock(&cmm_mutex); - rc = -ENODEV; - link = dev_table[iminor(inode)]; - if (!pcmcia_dev_present(link)) { - DEBUGP(4, dev, "DEV_OK false\n"); - goto out; - } - - if (test_bit(IS_CMM_ABSENT, &dev->flags)) { - DEBUGP(4, dev, "CMM_ABSENT flag set\n"); - goto out; - } - rc = -EINVAL; - - if (_IOC_TYPE(cmd) != CM_IOC_MAGIC) { - DEBUGP(4, dev, "ioctype mismatch\n"); - goto out; - } - if (_IOC_NR(cmd) > CM_IOC_MAXNR) { - DEBUGP(4, dev, "iocnr mismatch\n"); - goto out; - } - size = _IOC_SIZE(cmd); - rc = -EFAULT; - DEBUGP(4, dev, "iocdir=%.4x iocr=%.4x iocw=%.4x iocsize=%d cmd=%.4x\n", - _IOC_DIR(cmd), _IOC_READ, _IOC_WRITE, size, cmd); - - if (_IOC_DIR(cmd) & _IOC_READ) { - if (!access_ok(VERIFY_WRITE, argp, size)) - goto out; - } - if (_IOC_DIR(cmd) & _IOC_WRITE) { - if (!access_ok(VERIFY_READ, argp, size)) - goto out; - } - rc = 0; - - switch (cmd) { - case CM_IOCGSTATUS: - DEBUGP(4, dev, " ... in CM_IOCGSTATUS\n"); - { - int status; - - /* clear other bits, but leave inserted & powered as - * they are */ - status = dev->flags0 & 3; - if (test_bit(IS_ATR_PRESENT, &dev->flags)) - status |= CM_ATR_PRESENT; - if (test_bit(IS_ATR_VALID, &dev->flags)) - status |= CM_ATR_VALID; - if (test_bit(IS_CMM_ABSENT, &dev->flags)) - status |= CM_NO_READER; - if (test_bit(IS_BAD_CARD, &dev->flags)) - status |= CM_BAD_CARD; - if (copy_to_user(argp, &status, sizeof(int))) - rc = -EFAULT; - } - break; - case CM_IOCGATR: - DEBUGP(4, dev, "... in CM_IOCGATR\n"); - { - struct atreq __user *atreq = argp; - int tmp; - /* allow nonblocking io and being interrupted */ - if (wait_event_interruptible - (dev->atrq, - ((filp->f_flags & O_NONBLOCK) - || (test_bit(IS_ATR_PRESENT, (void *)&dev->flags) - != 0)))) { - if (filp->f_flags & O_NONBLOCK) - rc = -EAGAIN; - else - rc = -ERESTARTSYS; - break; - } - - rc = -EFAULT; - if (test_bit(IS_ATR_VALID, &dev->flags) == 0) { - tmp = -1; - if (copy_to_user(&(atreq->atr_len), &tmp, - sizeof(int))) - break; - } else { - if (copy_to_user(atreq->atr, dev->atr, - dev->atr_len)) - break; - - tmp = dev->atr_len; - if (copy_to_user(&(atreq->atr_len), &tmp, sizeof(int))) - break; - } - rc = 0; - break; - } - case CM_IOCARDOFF: - -#ifdef CM4000_DEBUG - DEBUGP(4, dev, "... in CM_IOCARDOFF\n"); - if (dev->flags0 & 0x01) { - DEBUGP(4, dev, " Card inserted\n"); - } else { - DEBUGP(2, dev, " No card inserted\n"); - } - if (dev->flags0 & 0x02) { - DEBUGP(4, dev, " Card powered\n"); - } else { - DEBUGP(2, dev, " Card not powered\n"); - } -#endif - - /* is a card inserted and powered? */ - if ((dev->flags0 & 0x01) && (dev->flags0 & 0x02)) { - - /* get IO lock */ - if (wait_event_interruptible - (dev->ioq, - ((filp->f_flags & O_NONBLOCK) - || (test_and_set_bit(LOCK_IO, (void *)&dev->flags) - == 0)))) { - if (filp->f_flags & O_NONBLOCK) - rc = -EAGAIN; - else - rc = -ERESTARTSYS; - break; - } - /* Set Flags0 = 0x42 */ - DEBUGP(4, dev, "Set Flags0=0x42 \n"); - xoutb(0x42, REG_FLAGS0(iobase)); - clear_bit(IS_ATR_PRESENT, &dev->flags); - clear_bit(IS_ATR_VALID, &dev->flags); - dev->mstate = M_CARDOFF; - clear_bit(LOCK_IO, &dev->flags); - if (wait_event_interruptible - (dev->atrq, - ((filp->f_flags & O_NONBLOCK) - || (test_bit(IS_ATR_VALID, (void *)&dev->flags) != - 0)))) { - if (filp->f_flags & O_NONBLOCK) - rc = -EAGAIN; - else - rc = -ERESTARTSYS; - break; - } - } - /* release lock */ - clear_bit(LOCK_IO, &dev->flags); - wake_up_interruptible(&dev->ioq); - - rc = 0; - break; - case CM_IOCSPTS: - { - struct ptsreq krnptsreq; - - if (copy_from_user(&krnptsreq, argp, - sizeof(struct ptsreq))) { - rc = -EFAULT; - break; - } - - rc = 0; - DEBUGP(4, dev, "... in CM_IOCSPTS\n"); - /* wait for ATR to get valid */ - if (wait_event_interruptible - (dev->atrq, - ((filp->f_flags & O_NONBLOCK) - || (test_bit(IS_ATR_PRESENT, (void *)&dev->flags) - != 0)))) { - if (filp->f_flags & O_NONBLOCK) - rc = -EAGAIN; - else - rc = -ERESTARTSYS; - break; - } - /* get IO lock */ - if (wait_event_interruptible - (dev->ioq, - ((filp->f_flags & O_NONBLOCK) - || (test_and_set_bit(LOCK_IO, (void *)&dev->flags) - == 0)))) { - if (filp->f_flags & O_NONBLOCK) - rc = -EAGAIN; - else - rc = -ERESTARTSYS; - break; - } - - if ((rc = set_protocol(dev, &krnptsreq)) != 0) { - /* auto power_on again */ - dev->mstate = M_FETCH_ATR; - clear_bit(IS_ATR_VALID, &dev->flags); - } - /* release lock */ - clear_bit(LOCK_IO, &dev->flags); - wake_up_interruptible(&dev->ioq); - - } - break; -#ifdef CM4000_DEBUG - case CM_IOSDBGLVL: - rc = -ENOTTY; - break; -#endif - default: - DEBUGP(4, dev, "... in default (unknown IOCTL code)\n"); - rc = -ENOTTY; - } -out: - mutex_unlock(&cmm_mutex); - return rc; -} - -static int cmm_open(struct inode *inode, struct file *filp) -{ - struct cm4000_dev *dev; - struct pcmcia_device *link; - int minor = iminor(inode); - int ret; - - if (minor >= CM4000_MAX_DEV) - return -ENODEV; - - mutex_lock(&cmm_mutex); - link = dev_table[minor]; - if (link == NULL || !pcmcia_dev_present(link)) { - ret = -ENODEV; - goto out; - } - - if (link->open) { - ret = -EBUSY; - goto out; - } - - dev = link->priv; - filp->private_data = dev; - - DEBUGP(2, dev, "-> cmm_open(device=%d.%d process=%s,%d)\n", - imajor(inode), minor, current->comm, current->pid); - - /* init device variables, they may be "polluted" after close - * or, the device may never have been closed (i.e. open failed) - */ - - ZERO_DEV(dev); - - /* opening will always block since the - * monitor will be started by open, which - * means we have to wait for ATR becoming - * valid = block until valid (or card - * inserted) - */ - if (filp->f_flags & O_NONBLOCK) { - ret = -EAGAIN; - goto out; - } - - dev->mdelay = T_50MSEC; - - /* start monitoring the cardstatus */ - start_monitor(dev); - - link->open = 1; /* only one open per device */ - - DEBUGP(2, dev, "<- cmm_open\n"); - ret = nonseekable_open(inode, filp); -out: - mutex_unlock(&cmm_mutex); - return ret; -} - -static int cmm_close(struct inode *inode, struct file *filp) -{ - struct cm4000_dev *dev; - struct pcmcia_device *link; - int minor = iminor(inode); - - if (minor >= CM4000_MAX_DEV) - return -ENODEV; - - link = dev_table[minor]; - if (link == NULL) - return -ENODEV; - - dev = link->priv; - - DEBUGP(2, dev, "-> cmm_close(maj/min=%d.%d)\n", - imajor(inode), minor); - - stop_monitor(dev); - - ZERO_DEV(dev); - - link->open = 0; /* only one open per device */ - wake_up(&dev->devq); /* socket removed? */ - - DEBUGP(2, dev, "cmm_close\n"); - return 0; -} - -static void cmm_cm4000_release(struct pcmcia_device * link) -{ - struct cm4000_dev *dev = link->priv; - - /* dont terminate the monitor, rather rely on - * close doing that for us. - */ - DEBUGP(3, dev, "-> cmm_cm4000_release\n"); - while (link->open) { - printk(KERN_INFO MODULE_NAME ": delaying release until " - "process has terminated\n"); - /* note: don't interrupt us: - * close the applications which own - * the devices _first_ ! - */ - wait_event(dev->devq, (link->open == 0)); - } - /* dev->devq=NULL; this cannot be zeroed earlier */ - DEBUGP(3, dev, "<- cmm_cm4000_release\n"); - return; -} - -/*==== Interface to PCMCIA Layer =======================================*/ - -static int cm4000_config_check(struct pcmcia_device *p_dev, void *priv_data) -{ - return pcmcia_request_io(p_dev); -} - -static int cm4000_config(struct pcmcia_device * link, int devno) -{ - link->config_flags |= CONF_AUTO_SET_IO; - - /* read the config-tuples */ - if (pcmcia_loop_config(link, cm4000_config_check, NULL)) - goto cs_release; - - if (pcmcia_enable_device(link)) - goto cs_release; - - return 0; - -cs_release: - cm4000_release(link); - return -ENODEV; -} - -static int cm4000_suspend(struct pcmcia_device *link) -{ - struct cm4000_dev *dev; - - dev = link->priv; - stop_monitor(dev); - - return 0; -} - -static int cm4000_resume(struct pcmcia_device *link) -{ - struct cm4000_dev *dev; - - dev = link->priv; - if (link->open) - start_monitor(dev); - - return 0; -} - -static void cm4000_release(struct pcmcia_device *link) -{ - cmm_cm4000_release(link); /* delay release until device closed */ - pcmcia_disable_device(link); -} - -static int cm4000_probe(struct pcmcia_device *link) -{ - struct cm4000_dev *dev; - int i, ret; - - for (i = 0; i < CM4000_MAX_DEV; i++) - if (dev_table[i] == NULL) - break; - - if (i == CM4000_MAX_DEV) { - printk(KERN_NOTICE MODULE_NAME ": all devices in use\n"); - return -ENODEV; - } - - /* create a new cm4000_cs device */ - dev = kzalloc(sizeof(struct cm4000_dev), GFP_KERNEL); - if (dev == NULL) - return -ENOMEM; - - dev->p_dev = link; - link->priv = dev; - dev_table[i] = link; - - init_waitqueue_head(&dev->devq); - init_waitqueue_head(&dev->ioq); - init_waitqueue_head(&dev->atrq); - init_waitqueue_head(&dev->readq); - - ret = cm4000_config(link, i); - if (ret) { - dev_table[i] = NULL; - kfree(dev); - return ret; - } - - device_create(cmm_class, NULL, MKDEV(major, i), NULL, "cmm%d", i); - - return 0; -} - -static void cm4000_detach(struct pcmcia_device *link) -{ - struct cm4000_dev *dev = link->priv; - int devno; - - /* find device */ - for (devno = 0; devno < CM4000_MAX_DEV; devno++) - if (dev_table[devno] == link) - break; - if (devno == CM4000_MAX_DEV) - return; - - stop_monitor(dev); - - cm4000_release(link); - - dev_table[devno] = NULL; - kfree(dev); - - device_destroy(cmm_class, MKDEV(major, devno)); - - return; -} - -static const struct file_operations cm4000_fops = { - .owner = THIS_MODULE, - .read = cmm_read, - .write = cmm_write, - .unlocked_ioctl = cmm_ioctl, - .open = cmm_open, - .release= cmm_close, - .llseek = no_llseek, -}; - -static const struct pcmcia_device_id cm4000_ids[] = { - PCMCIA_DEVICE_MANF_CARD(0x0223, 0x0002), - PCMCIA_DEVICE_PROD_ID12("CardMan", "4000", 0x2FB368CA, 0xA2BD8C39), - PCMCIA_DEVICE_NULL, -}; -MODULE_DEVICE_TABLE(pcmcia, cm4000_ids); - -static struct pcmcia_driver cm4000_driver = { - .owner = THIS_MODULE, - .name = "cm4000_cs", - .probe = cm4000_probe, - .remove = cm4000_detach, - .suspend = cm4000_suspend, - .resume = cm4000_resume, - .id_table = cm4000_ids, -}; - -static int __init cmm_init(void) -{ - int rc; - - cmm_class = class_create(THIS_MODULE, "cardman_4000"); - if (IS_ERR(cmm_class)) - return PTR_ERR(cmm_class); - - major = register_chrdev(0, DEVICE_NAME, &cm4000_fops); - if (major < 0) { - printk(KERN_WARNING MODULE_NAME - ": could not get major number\n"); - class_destroy(cmm_class); - return major; - } - - rc = pcmcia_register_driver(&cm4000_driver); - if (rc < 0) { - unregister_chrdev(major, DEVICE_NAME); - class_destroy(cmm_class); - return rc; - } - - return 0; -} - -static void __exit cmm_exit(void) -{ - pcmcia_unregister_driver(&cm4000_driver); - unregister_chrdev(major, DEVICE_NAME); - class_destroy(cmm_class); -}; - -module_init(cmm_init); -module_exit(cmm_exit); -MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c deleted file mode 100644 index f80965407d3ccc74d132cb8817aa886661581067..0000000000000000000000000000000000000000 --- a/drivers/char/pcmcia/cm4040_cs.c +++ /dev/null @@ -1,685 +0,0 @@ -/* - * A driver for the Omnikey PCMCIA smartcard reader CardMan 4040 - * - * (c) 2000-2004 Omnikey AG (http://www.omnikey.com/) - * - * (C) 2005-2006 Harald Welte - * - add support for poll() - * - driver cleanup - * - add waitqueues - * - adhere to linux kernel coding style and policies - * - support 2.6.13 "new style" pcmcia interface - * - add class interface for udev device creation - * - * The device basically is a USB CCID compliant device that has been - * attached to an I/O-Mapped FIFO. - * - * All rights reserved, Dual BSD/GPL Licensed. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "cm4040_cs.h" - - -#define reader_to_dev(x) (&x->p_dev->dev) - -/* n (debug level) is ignored */ -/* additional debug output may be enabled by re-compiling with - * CM4040_DEBUG set */ -/* #define CM4040_DEBUG */ -#define DEBUGP(n, rdr, x, args...) do { \ - dev_dbg(reader_to_dev(rdr), "%s:" x, \ - __func__ , ## args); \ - } while (0) - -static DEFINE_MUTEX(cm4040_mutex); - -#define CCID_DRIVER_BULK_DEFAULT_TIMEOUT (150*HZ) -#define CCID_DRIVER_ASYNC_POWERUP_TIMEOUT (35*HZ) -#define CCID_DRIVER_MINIMUM_TIMEOUT (3*HZ) -#define READ_WRITE_BUFFER_SIZE 512 -#define POLL_LOOP_COUNT 1000 - -/* how often to poll for fifo status change */ -#define POLL_PERIOD msecs_to_jiffies(10) - -static void reader_release(struct pcmcia_device *link); - -static int major; -static struct class *cmx_class; - -#define BS_READABLE 0x01 -#define BS_WRITABLE 0x02 - -struct reader_dev { - struct pcmcia_device *p_dev; - wait_queue_head_t devq; - wait_queue_head_t poll_wait; - wait_queue_head_t read_wait; - wait_queue_head_t write_wait; - unsigned long buffer_status; - unsigned long timeout; - unsigned char s_buf[READ_WRITE_BUFFER_SIZE]; - unsigned char r_buf[READ_WRITE_BUFFER_SIZE]; - struct timer_list poll_timer; -}; - -static struct pcmcia_device *dev_table[CM_MAX_DEV]; - -#ifndef CM4040_DEBUG -#define xoutb outb -#define xinb inb -#else -static inline void xoutb(unsigned char val, unsigned short port) -{ - pr_debug("outb(val=%.2x,port=%.4x)\n", val, port); - outb(val, port); -} - -static inline unsigned char xinb(unsigned short port) -{ - unsigned char val; - - val = inb(port); - pr_debug("%.2x=inb(%.4x)\n", val, port); - return val; -} -#endif - -/* poll the device fifo status register. not to be confused with - * the poll syscall. */ -static void cm4040_do_poll(struct timer_list *t) -{ - struct reader_dev *dev = from_timer(dev, t, poll_timer); - unsigned int obs = xinb(dev->p_dev->resource[0]->start - + REG_OFFSET_BUFFER_STATUS); - - if ((obs & BSR_BULK_IN_FULL)) { - set_bit(BS_READABLE, &dev->buffer_status); - DEBUGP(4, dev, "waking up read_wait\n"); - wake_up_interruptible(&dev->read_wait); - } else - clear_bit(BS_READABLE, &dev->buffer_status); - - if (!(obs & BSR_BULK_OUT_FULL)) { - set_bit(BS_WRITABLE, &dev->buffer_status); - DEBUGP(4, dev, "waking up write_wait\n"); - wake_up_interruptible(&dev->write_wait); - } else - clear_bit(BS_WRITABLE, &dev->buffer_status); - - if (dev->buffer_status) - wake_up_interruptible(&dev->poll_wait); - - mod_timer(&dev->poll_timer, jiffies + POLL_PERIOD); -} - -static void cm4040_stop_poll(struct reader_dev *dev) -{ - del_timer_sync(&dev->poll_timer); -} - -static int wait_for_bulk_out_ready(struct reader_dev *dev) -{ - int i, rc; - int iobase = dev->p_dev->resource[0]->start; - - for (i = 0; i < POLL_LOOP_COUNT; i++) { - if ((xinb(iobase + REG_OFFSET_BUFFER_STATUS) - & BSR_BULK_OUT_FULL) == 0) { - DEBUGP(4, dev, "BulkOut empty (i=%d)\n", i); - return 1; - } - } - - DEBUGP(4, dev, "wait_event_interruptible_timeout(timeout=%ld\n", - dev->timeout); - rc = wait_event_interruptible_timeout(dev->write_wait, - test_and_clear_bit(BS_WRITABLE, - &dev->buffer_status), - dev->timeout); - - if (rc > 0) - DEBUGP(4, dev, "woke up: BulkOut empty\n"); - else if (rc == 0) - DEBUGP(4, dev, "woke up: BulkOut full, returning 0 :(\n"); - else if (rc < 0) - DEBUGP(4, dev, "woke up: signal arrived\n"); - - return rc; -} - -/* Write to Sync Control Register */ -static int write_sync_reg(unsigned char val, struct reader_dev *dev) -{ - int iobase = dev->p_dev->resource[0]->start; - int rc; - - rc = wait_for_bulk_out_ready(dev); - if (rc <= 0) - return rc; - - xoutb(val, iobase + REG_OFFSET_SYNC_CONTROL); - rc = wait_for_bulk_out_ready(dev); - if (rc <= 0) - return rc; - - return 1; -} - -static int wait_for_bulk_in_ready(struct reader_dev *dev) -{ - int i, rc; - int iobase = dev->p_dev->resource[0]->start; - - for (i = 0; i < POLL_LOOP_COUNT; i++) { - if ((xinb(iobase + REG_OFFSET_BUFFER_STATUS) - & BSR_BULK_IN_FULL) == BSR_BULK_IN_FULL) { - DEBUGP(3, dev, "BulkIn full (i=%d)\n", i); - return 1; - } - } - - DEBUGP(4, dev, "wait_event_interruptible_timeout(timeout=%ld\n", - dev->timeout); - rc = wait_event_interruptible_timeout(dev->read_wait, - test_and_clear_bit(BS_READABLE, - &dev->buffer_status), - dev->timeout); - if (rc > 0) - DEBUGP(4, dev, "woke up: BulkIn full\n"); - else if (rc == 0) - DEBUGP(4, dev, "woke up: BulkIn not full, returning 0 :(\n"); - else if (rc < 0) - DEBUGP(4, dev, "woke up: signal arrived\n"); - - return rc; -} - -static ssize_t cm4040_read(struct file *filp, char __user *buf, - size_t count, loff_t *ppos) -{ - struct reader_dev *dev = filp->private_data; - int iobase = dev->p_dev->resource[0]->start; - size_t bytes_to_read; - unsigned long i; - size_t min_bytes_to_read; - int rc; - unsigned char uc; - - DEBUGP(2, dev, "-> cm4040_read(%s,%d)\n", current->comm, current->pid); - - if (count == 0) - return 0; - - if (count < 10) - return -EFAULT; - - if (filp->f_flags & O_NONBLOCK) { - DEBUGP(4, dev, "filep->f_flags O_NONBLOCK set\n"); - DEBUGP(2, dev, "<- cm4040_read (failure)\n"); - return -EAGAIN; - } - - if (!pcmcia_dev_present(dev->p_dev)) - return -ENODEV; - - for (i = 0; i < 5; i++) { - rc = wait_for_bulk_in_ready(dev); - if (rc <= 0) { - DEBUGP(5, dev, "wait_for_bulk_in_ready rc=%.2x\n", rc); - DEBUGP(2, dev, "<- cm4040_read (failed)\n"); - if (rc == -ERESTARTSYS) - return rc; - return -EIO; - } - dev->r_buf[i] = xinb(iobase + REG_OFFSET_BULK_IN); -#ifdef CM4040_DEBUG - pr_debug("%lu:%2x ", i, dev->r_buf[i]); - } - pr_debug("\n"); -#else - } -#endif - - bytes_to_read = 5 + le32_to_cpu(*(__le32 *)&dev->r_buf[1]); - - DEBUGP(6, dev, "BytesToRead=%zu\n", bytes_to_read); - - min_bytes_to_read = min(count, bytes_to_read + 5); - min_bytes_to_read = min_t(size_t, min_bytes_to_read, READ_WRITE_BUFFER_SIZE); - - DEBUGP(6, dev, "Min=%zu\n", min_bytes_to_read); - - for (i = 0; i < (min_bytes_to_read-5); i++) { - rc = wait_for_bulk_in_ready(dev); - if (rc <= 0) { - DEBUGP(5, dev, "wait_for_bulk_in_ready rc=%.2x\n", rc); - DEBUGP(2, dev, "<- cm4040_read (failed)\n"); - if (rc == -ERESTARTSYS) - return rc; - return -EIO; - } - dev->r_buf[i+5] = xinb(iobase + REG_OFFSET_BULK_IN); -#ifdef CM4040_DEBUG - pr_debug("%lu:%2x ", i, dev->r_buf[i]); - } - pr_debug("\n"); -#else - } -#endif - - *ppos = min_bytes_to_read; - if (copy_to_user(buf, dev->r_buf, min_bytes_to_read)) - return -EFAULT; - - rc = wait_for_bulk_in_ready(dev); - if (rc <= 0) { - DEBUGP(5, dev, "wait_for_bulk_in_ready rc=%.2x\n", rc); - DEBUGP(2, dev, "<- cm4040_read (failed)\n"); - if (rc == -ERESTARTSYS) - return rc; - return -EIO; - } - - rc = write_sync_reg(SCR_READER_TO_HOST_DONE, dev); - if (rc <= 0) { - DEBUGP(5, dev, "write_sync_reg c=%.2x\n", rc); - DEBUGP(2, dev, "<- cm4040_read (failed)\n"); - if (rc == -ERESTARTSYS) - return rc; - else - return -EIO; - } - - uc = xinb(iobase + REG_OFFSET_BULK_IN); - - DEBUGP(2, dev, "<- cm4040_read (successfully)\n"); - return min_bytes_to_read; -} - -static ssize_t cm4040_write(struct file *filp, const char __user *buf, - size_t count, loff_t *ppos) -{ - struct reader_dev *dev = filp->private_data; - int iobase = dev->p_dev->resource[0]->start; - ssize_t rc; - int i; - unsigned int bytes_to_write; - - DEBUGP(2, dev, "-> cm4040_write(%s,%d)\n", current->comm, current->pid); - - if (count == 0) { - DEBUGP(2, dev, "<- cm4040_write empty read (successfully)\n"); - return 0; - } - - if ((count < 5) || (count > READ_WRITE_BUFFER_SIZE)) { - DEBUGP(2, dev, "<- cm4040_write buffersize=%zd < 5\n", count); - return -EIO; - } - - if (filp->f_flags & O_NONBLOCK) { - DEBUGP(4, dev, "filep->f_flags O_NONBLOCK set\n"); - DEBUGP(4, dev, "<- cm4040_write (failure)\n"); - return -EAGAIN; - } - - if (!pcmcia_dev_present(dev->p_dev)) - return -ENODEV; - - bytes_to_write = count; - if (copy_from_user(dev->s_buf, buf, bytes_to_write)) - return -EFAULT; - - switch (dev->s_buf[0]) { - case CMD_PC_TO_RDR_XFRBLOCK: - case CMD_PC_TO_RDR_SECURE: - case CMD_PC_TO_RDR_TEST_SECURE: - case CMD_PC_TO_RDR_OK_SECURE: - dev->timeout = CCID_DRIVER_BULK_DEFAULT_TIMEOUT; - break; - - case CMD_PC_TO_RDR_ICCPOWERON: - dev->timeout = CCID_DRIVER_ASYNC_POWERUP_TIMEOUT; - break; - - case CMD_PC_TO_RDR_GETSLOTSTATUS: - case CMD_PC_TO_RDR_ICCPOWEROFF: - case CMD_PC_TO_RDR_GETPARAMETERS: - case CMD_PC_TO_RDR_RESETPARAMETERS: - case CMD_PC_TO_RDR_SETPARAMETERS: - case CMD_PC_TO_RDR_ESCAPE: - case CMD_PC_TO_RDR_ICCCLOCK: - default: - dev->timeout = CCID_DRIVER_MINIMUM_TIMEOUT; - break; - } - - rc = write_sync_reg(SCR_HOST_TO_READER_START, dev); - if (rc <= 0) { - DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc); - DEBUGP(2, dev, "<- cm4040_write (failed)\n"); - if (rc == -ERESTARTSYS) - return rc; - else - return -EIO; - } - - DEBUGP(4, dev, "start \n"); - - for (i = 0; i < bytes_to_write; i++) { - rc = wait_for_bulk_out_ready(dev); - if (rc <= 0) { - DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2zx\n", - rc); - DEBUGP(2, dev, "<- cm4040_write (failed)\n"); - if (rc == -ERESTARTSYS) - return rc; - else - return -EIO; - } - - xoutb(dev->s_buf[i],iobase + REG_OFFSET_BULK_OUT); - } - DEBUGP(4, dev, "end\n"); - - rc = write_sync_reg(SCR_HOST_TO_READER_DONE, dev); - - if (rc <= 0) { - DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc); - DEBUGP(2, dev, "<- cm4040_write (failed)\n"); - if (rc == -ERESTARTSYS) - return rc; - else - return -EIO; - } - - DEBUGP(2, dev, "<- cm4040_write (successfully)\n"); - return count; -} - -static __poll_t cm4040_poll(struct file *filp, poll_table *wait) -{ - struct reader_dev *dev = filp->private_data; - __poll_t mask = 0; - - poll_wait(filp, &dev->poll_wait, wait); - - if (test_and_clear_bit(BS_READABLE, &dev->buffer_status)) - mask |= EPOLLIN | EPOLLRDNORM; - if (test_and_clear_bit(BS_WRITABLE, &dev->buffer_status)) - mask |= EPOLLOUT | EPOLLWRNORM; - - DEBUGP(2, dev, "<- cm4040_poll(%u)\n", mask); - - return mask; -} - -static int cm4040_open(struct inode *inode, struct file *filp) -{ - struct reader_dev *dev; - struct pcmcia_device *link; - int minor = iminor(inode); - int ret; - - if (minor >= CM_MAX_DEV) - return -ENODEV; - - mutex_lock(&cm4040_mutex); - link = dev_table[minor]; - if (link == NULL || !pcmcia_dev_present(link)) { - ret = -ENODEV; - goto out; - } - - if (link->open) { - ret = -EBUSY; - goto out; - } - - dev = link->priv; - filp->private_data = dev; - - if (filp->f_flags & O_NONBLOCK) { - DEBUGP(4, dev, "filep->f_flags O_NONBLOCK set\n"); - ret = -EAGAIN; - goto out; - } - - link->open = 1; - - mod_timer(&dev->poll_timer, jiffies + POLL_PERIOD); - - DEBUGP(2, dev, "<- cm4040_open (successfully)\n"); - ret = nonseekable_open(inode, filp); -out: - mutex_unlock(&cm4040_mutex); - return ret; -} - -static int cm4040_close(struct inode *inode, struct file *filp) -{ - struct reader_dev *dev = filp->private_data; - struct pcmcia_device *link; - int minor = iminor(inode); - - DEBUGP(2, dev, "-> cm4040_close(maj/min=%d.%d)\n", imajor(inode), - iminor(inode)); - - if (minor >= CM_MAX_DEV) - return -ENODEV; - - link = dev_table[minor]; - if (link == NULL) - return -ENODEV; - - cm4040_stop_poll(dev); - - link->open = 0; - wake_up(&dev->devq); - - DEBUGP(2, dev, "<- cm4040_close\n"); - return 0; -} - -static void cm4040_reader_release(struct pcmcia_device *link) -{ - struct reader_dev *dev = link->priv; - - DEBUGP(3, dev, "-> cm4040_reader_release\n"); - while (link->open) { - DEBUGP(3, dev, KERN_INFO MODULE_NAME ": delaying release " - "until process has terminated\n"); - wait_event(dev->devq, (link->open == 0)); - } - DEBUGP(3, dev, "<- cm4040_reader_release\n"); - return; -} - -static int cm4040_config_check(struct pcmcia_device *p_dev, void *priv_data) -{ - return pcmcia_request_io(p_dev); -} - - -static int reader_config(struct pcmcia_device *link, int devno) -{ - struct reader_dev *dev; - int fail_rc; - - link->config_flags |= CONF_AUTO_SET_IO; - - if (pcmcia_loop_config(link, cm4040_config_check, NULL)) - goto cs_release; - - fail_rc = pcmcia_enable_device(link); - if (fail_rc != 0) { - dev_info(&link->dev, "pcmcia_enable_device failed 0x%x\n", - fail_rc); - goto cs_release; - } - - dev = link->priv; - - DEBUGP(2, dev, "device " DEVICE_NAME "%d at %pR\n", devno, - link->resource[0]); - DEBUGP(2, dev, "<- reader_config (succ)\n"); - - return 0; - -cs_release: - reader_release(link); - return -ENODEV; -} - -static void reader_release(struct pcmcia_device *link) -{ - cm4040_reader_release(link); - pcmcia_disable_device(link); -} - -static int reader_probe(struct pcmcia_device *link) -{ - struct reader_dev *dev; - int i, ret; - - for (i = 0; i < CM_MAX_DEV; i++) { - if (dev_table[i] == NULL) - break; - } - - if (i == CM_MAX_DEV) - return -ENODEV; - - dev = kzalloc(sizeof(struct reader_dev), GFP_KERNEL); - if (dev == NULL) - return -ENOMEM; - - dev->timeout = CCID_DRIVER_MINIMUM_TIMEOUT; - dev->buffer_status = 0; - - link->priv = dev; - dev->p_dev = link; - - dev_table[i] = link; - - init_waitqueue_head(&dev->devq); - init_waitqueue_head(&dev->poll_wait); - init_waitqueue_head(&dev->read_wait); - init_waitqueue_head(&dev->write_wait); - timer_setup(&dev->poll_timer, cm4040_do_poll, 0); - - ret = reader_config(link, i); - if (ret) { - dev_table[i] = NULL; - kfree(dev); - return ret; - } - - device_create(cmx_class, NULL, MKDEV(major, i), NULL, "cmx%d", i); - - return 0; -} - -static void reader_detach(struct pcmcia_device *link) -{ - struct reader_dev *dev = link->priv; - int devno; - - /* find device */ - for (devno = 0; devno < CM_MAX_DEV; devno++) { - if (dev_table[devno] == link) - break; - } - if (devno == CM_MAX_DEV) - return; - - reader_release(link); - - dev_table[devno] = NULL; - kfree(dev); - - device_destroy(cmx_class, MKDEV(major, devno)); - - return; -} - -static const struct file_operations reader_fops = { - .owner = THIS_MODULE, - .read = cm4040_read, - .write = cm4040_write, - .open = cm4040_open, - .release = cm4040_close, - .poll = cm4040_poll, - .llseek = no_llseek, -}; - -static const struct pcmcia_device_id cm4040_ids[] = { - PCMCIA_DEVICE_MANF_CARD(0x0223, 0x0200), - PCMCIA_DEVICE_PROD_ID12("OMNIKEY", "CardMan 4040", - 0xE32CDD8C, 0x8F23318B), - PCMCIA_DEVICE_NULL, -}; -MODULE_DEVICE_TABLE(pcmcia, cm4040_ids); - -static struct pcmcia_driver reader_driver = { - .owner = THIS_MODULE, - .name = "cm4040_cs", - .probe = reader_probe, - .remove = reader_detach, - .id_table = cm4040_ids, -}; - -static int __init cm4040_init(void) -{ - int rc; - - cmx_class = class_create(THIS_MODULE, "cardman_4040"); - if (IS_ERR(cmx_class)) - return PTR_ERR(cmx_class); - - major = register_chrdev(0, DEVICE_NAME, &reader_fops); - if (major < 0) { - printk(KERN_WARNING MODULE_NAME - ": could not get major number\n"); - class_destroy(cmx_class); - return major; - } - - rc = pcmcia_register_driver(&reader_driver); - if (rc < 0) { - unregister_chrdev(major, DEVICE_NAME); - class_destroy(cmx_class); - return rc; - } - - return 0; -} - -static void __exit cm4040_exit(void) -{ - pcmcia_unregister_driver(&reader_driver); - unregister_chrdev(major, DEVICE_NAME); - class_destroy(cmx_class); -} - -module_init(cm4040_init); -module_exit(cm4040_exit); -MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/char/pcmcia/cm4040_cs.h b/drivers/char/pcmcia/cm4040_cs.h deleted file mode 100644 index e2ffff995d512a04ea36c3286d5117c0861b0183..0000000000000000000000000000000000000000 --- a/drivers/char/pcmcia/cm4040_cs.h +++ /dev/null @@ -1,48 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _CM4040_H_ -#define _CM4040_H_ - -#define CM_MAX_DEV 4 - -#define DEVICE_NAME "cmx" -#define MODULE_NAME "cm4040_cs" - -#define REG_OFFSET_BULK_OUT 0 -#define REG_OFFSET_BULK_IN 0 -#define REG_OFFSET_BUFFER_STATUS 1 -#define REG_OFFSET_SYNC_CONTROL 2 - -#define BSR_BULK_IN_FULL 0x02 -#define BSR_BULK_OUT_FULL 0x01 - -#define SCR_HOST_TO_READER_START 0x80 -#define SCR_ABORT 0x40 -#define SCR_EN_NOTIFY 0x20 -#define SCR_ACK_NOTIFY 0x10 -#define SCR_READER_TO_HOST_DONE 0x08 -#define SCR_HOST_TO_READER_DONE 0x04 -#define SCR_PULSE_INTERRUPT 0x02 -#define SCR_POWER_DOWN 0x01 - - -#define CMD_PC_TO_RDR_ICCPOWERON 0x62 -#define CMD_PC_TO_RDR_GETSLOTSTATUS 0x65 -#define CMD_PC_TO_RDR_ICCPOWEROFF 0x63 -#define CMD_PC_TO_RDR_SECURE 0x69 -#define CMD_PC_TO_RDR_GETPARAMETERS 0x6C -#define CMD_PC_TO_RDR_RESETPARAMETERS 0x6D -#define CMD_PC_TO_RDR_SETPARAMETERS 0x61 -#define CMD_PC_TO_RDR_XFRBLOCK 0x6F -#define CMD_PC_TO_RDR_ESCAPE 0x6B -#define CMD_PC_TO_RDR_ICCCLOCK 0x6E -#define CMD_PC_TO_RDR_TEST_SECURE 0x74 -#define CMD_PC_TO_RDR_OK_SECURE 0x89 - - -#define CMD_RDR_TO_PC_SLOTSTATUS 0x81 -#define CMD_RDR_TO_PC_DATABLOCK 0x80 -#define CMD_RDR_TO_PC_PARAMETERS 0x82 -#define CMD_RDR_TO_PC_ESCAPE 0x83 -#define CMD_RDR_TO_PC_OK_SECURE 0x89 - -#endif /* _CM4040_H_ */ diff --git a/drivers/char/pcmcia/scr24x_cs.c b/drivers/char/pcmcia/scr24x_cs.c deleted file mode 100644 index f6b43d9350f0f6877ccbc4235df903885390b2a0..0000000000000000000000000000000000000000 --- a/drivers/char/pcmcia/scr24x_cs.c +++ /dev/null @@ -1,373 +0,0 @@ -/* - * SCR24x PCMCIA Smart Card Reader Driver - * - * Copyright (C) 2005-2006 TL Sudheendran - * Copyright (C) 2016 Lubomir Rintel - * - * Derived from "scr24x_v4.2.6_Release.tar.gz" driver by TL Sudheendran. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; see the file COPYING. If not, write to - * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#define CCID_HEADER_SIZE 10 -#define CCID_LENGTH_OFFSET 1 -#define CCID_MAX_LEN 271 - -#define SCR24X_DATA(n) (1 + n) -#define SCR24X_CMD_STATUS 7 -#define CMD_START 0x40 -#define CMD_WRITE_BYTE 0x41 -#define CMD_READ_BYTE 0x42 -#define STATUS_BUSY 0x80 - -struct scr24x_dev { - struct device *dev; - struct cdev c_dev; - unsigned char buf[CCID_MAX_LEN]; - int devno; - struct mutex lock; - struct kref refcnt; - u8 __iomem *regs; -}; - -#define SCR24X_DEVS 8 -static DECLARE_BITMAP(scr24x_minors, SCR24X_DEVS); - -static struct class *scr24x_class; -static dev_t scr24x_devt; - -static void scr24x_delete(struct kref *kref) -{ - struct scr24x_dev *dev = container_of(kref, struct scr24x_dev, - refcnt); - - kfree(dev); -} - -static int scr24x_wait_ready(struct scr24x_dev *dev) -{ - u_char status; - int timeout = 100; - - do { - status = ioread8(dev->regs + SCR24X_CMD_STATUS); - if (!(status & STATUS_BUSY)) - return 0; - - msleep(20); - } while (--timeout); - - return -EIO; -} - -static int scr24x_open(struct inode *inode, struct file *filp) -{ - struct scr24x_dev *dev = container_of(inode->i_cdev, - struct scr24x_dev, c_dev); - - kref_get(&dev->refcnt); - filp->private_data = dev; - - return nonseekable_open(inode, filp); -} - -static int scr24x_release(struct inode *inode, struct file *filp) -{ - struct scr24x_dev *dev = filp->private_data; - - /* We must not take the dev->lock here as scr24x_delete() - * might be called to remove the dev structure altogether. - * We don't need the lock anyway, since after the reference - * acquired in probe() is released in remove() the chrdev - * is already unregistered and noone can possibly acquire - * a reference via open() anymore. */ - kref_put(&dev->refcnt, scr24x_delete); - return 0; -} - -static int read_chunk(struct scr24x_dev *dev, size_t offset, size_t limit) -{ - size_t i, y; - int ret; - - for (i = offset; i < limit; i += 5) { - iowrite8(CMD_READ_BYTE, dev->regs + SCR24X_CMD_STATUS); - ret = scr24x_wait_ready(dev); - if (ret < 0) - return ret; - - for (y = 0; y < 5 && i + y < limit; y++) - dev->buf[i + y] = ioread8(dev->regs + SCR24X_DATA(y)); - } - - return 0; -} - -static ssize_t scr24x_read(struct file *filp, char __user *buf, size_t count, - loff_t *ppos) -{ - struct scr24x_dev *dev = filp->private_data; - int ret; - int len; - - if (count < CCID_HEADER_SIZE) - return -EINVAL; - - if (mutex_lock_interruptible(&dev->lock)) - return -ERESTARTSYS; - - if (!dev->dev) { - ret = -ENODEV; - goto out; - } - - ret = scr24x_wait_ready(dev); - if (ret < 0) - goto out; - len = CCID_HEADER_SIZE; - ret = read_chunk(dev, 0, len); - if (ret < 0) - goto out; - - len += le32_to_cpu(*(__le32 *)(&dev->buf[CCID_LENGTH_OFFSET])); - if (len > sizeof(dev->buf)) { - ret = -EIO; - goto out; - } - ret = read_chunk(dev, CCID_HEADER_SIZE, len); - if (ret < 0) - goto out; - - if (len < count) - count = len; - - if (copy_to_user(buf, dev->buf, count)) { - ret = -EFAULT; - goto out; - } - - ret = count; -out: - mutex_unlock(&dev->lock); - return ret; -} - -static ssize_t scr24x_write(struct file *filp, const char __user *buf, - size_t count, loff_t *ppos) -{ - struct scr24x_dev *dev = filp->private_data; - size_t i, y; - int ret; - - if (mutex_lock_interruptible(&dev->lock)) - return -ERESTARTSYS; - - if (!dev->dev) { - ret = -ENODEV; - goto out; - } - - if (count > sizeof(dev->buf)) { - ret = -EINVAL; - goto out; - } - - if (copy_from_user(dev->buf, buf, count)) { - ret = -EFAULT; - goto out; - } - - ret = scr24x_wait_ready(dev); - if (ret < 0) - goto out; - - iowrite8(CMD_START, dev->regs + SCR24X_CMD_STATUS); - ret = scr24x_wait_ready(dev); - if (ret < 0) - goto out; - - for (i = 0; i < count; i += 5) { - for (y = 0; y < 5 && i + y < count; y++) - iowrite8(dev->buf[i + y], dev->regs + SCR24X_DATA(y)); - - iowrite8(CMD_WRITE_BYTE, dev->regs + SCR24X_CMD_STATUS); - ret = scr24x_wait_ready(dev); - if (ret < 0) - goto out; - } - - ret = count; -out: - mutex_unlock(&dev->lock); - return ret; -} - -static const struct file_operations scr24x_fops = { - .owner = THIS_MODULE, - .read = scr24x_read, - .write = scr24x_write, - .open = scr24x_open, - .release = scr24x_release, - .llseek = no_llseek, -}; - -static int scr24x_config_check(struct pcmcia_device *link, void *priv_data) -{ - if (resource_size(link->resource[PCMCIA_IOPORT_0]) != 0x11) - return -ENODEV; - return pcmcia_request_io(link); -} - -static int scr24x_probe(struct pcmcia_device *link) -{ - struct scr24x_dev *dev; - int ret; - - dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) - return -ENOMEM; - - dev->devno = find_first_zero_bit(scr24x_minors, SCR24X_DEVS); - if (dev->devno >= SCR24X_DEVS) { - ret = -EBUSY; - goto err; - } - - mutex_init(&dev->lock); - kref_init(&dev->refcnt); - - link->priv = dev; - link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; - - ret = pcmcia_loop_config(link, scr24x_config_check, NULL); - if (ret < 0) - goto err; - - dev->dev = &link->dev; - dev->regs = devm_ioport_map(&link->dev, - link->resource[PCMCIA_IOPORT_0]->start, - resource_size(link->resource[PCMCIA_IOPORT_0])); - if (!dev->regs) { - ret = -EIO; - goto err; - } - - cdev_init(&dev->c_dev, &scr24x_fops); - dev->c_dev.owner = THIS_MODULE; - dev->c_dev.ops = &scr24x_fops; - ret = cdev_add(&dev->c_dev, MKDEV(MAJOR(scr24x_devt), dev->devno), 1); - if (ret < 0) - goto err; - - ret = pcmcia_enable_device(link); - if (ret < 0) { - pcmcia_disable_device(link); - goto err; - } - - device_create(scr24x_class, NULL, MKDEV(MAJOR(scr24x_devt), dev->devno), - NULL, "scr24x%d", dev->devno); - - dev_info(&link->dev, "SCR24x Chip Card Interface\n"); - return 0; - -err: - if (dev->devno < SCR24X_DEVS) - clear_bit(dev->devno, scr24x_minors); - kfree (dev); - return ret; -} - -static void scr24x_remove(struct pcmcia_device *link) -{ - struct scr24x_dev *dev = (struct scr24x_dev *)link->priv; - - device_destroy(scr24x_class, MKDEV(MAJOR(scr24x_devt), dev->devno)); - mutex_lock(&dev->lock); - pcmcia_disable_device(link); - cdev_del(&dev->c_dev); - clear_bit(dev->devno, scr24x_minors); - dev->dev = NULL; - mutex_unlock(&dev->lock); - - kref_put(&dev->refcnt, scr24x_delete); -} - -static const struct pcmcia_device_id scr24x_ids[] = { - PCMCIA_DEVICE_PROD_ID12("HP", "PC Card Smart Card Reader", - 0x53cb94f9, 0xbfdf89a5), - PCMCIA_DEVICE_PROD_ID1("SCR241 PCMCIA", 0x6271efa3), - PCMCIA_DEVICE_PROD_ID1("SCR243 PCMCIA", 0x2054e8de), - PCMCIA_DEVICE_PROD_ID1("SCR24x PCMCIA", 0x54a33665), - PCMCIA_DEVICE_NULL -}; -MODULE_DEVICE_TABLE(pcmcia, scr24x_ids); - -static struct pcmcia_driver scr24x_driver = { - .owner = THIS_MODULE, - .name = "scr24x_cs", - .probe = scr24x_probe, - .remove = scr24x_remove, - .id_table = scr24x_ids, -}; - -static int __init scr24x_init(void) -{ - int ret; - - scr24x_class = class_create(THIS_MODULE, "scr24x"); - if (IS_ERR(scr24x_class)) - return PTR_ERR(scr24x_class); - - ret = alloc_chrdev_region(&scr24x_devt, 0, SCR24X_DEVS, "scr24x"); - if (ret < 0) { - class_destroy(scr24x_class); - return ret; - } - - ret = pcmcia_register_driver(&scr24x_driver); - if (ret < 0) { - unregister_chrdev_region(scr24x_devt, SCR24X_DEVS); - class_destroy(scr24x_class); - } - - return ret; -} - -static void __exit scr24x_exit(void) -{ - pcmcia_unregister_driver(&scr24x_driver); - unregister_chrdev_region(scr24x_devt, SCR24X_DEVS); - class_destroy(scr24x_class); -} - -module_init(scr24x_init); -module_exit(scr24x_exit); - -MODULE_AUTHOR("Lubomir Rintel"); -MODULE_DESCRIPTION("SCR24x PCMCIA Smart Card Reader Driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c deleted file mode 100644 index 66b04194aa9f8e1ea174ee7e2e73e5af6eb06ab6..0000000000000000000000000000000000000000 --- a/drivers/char/pcmcia/synclink_cs.c +++ /dev/null @@ -1,4305 +0,0 @@ -/* - * linux/drivers/char/pcmcia/synclink_cs.c - * - * $Id: synclink_cs.c,v 4.34 2005/09/08 13:20:54 paulkf Exp $ - * - * Device driver for Microgate SyncLink PC Card - * multiprotocol serial adapter. - * - * written by Paul Fulghum for Microgate Corporation - * paulkf@microgate.com - * - * Microgate and SyncLink are trademarks of Microgate Corporation - * - * This code is released under the GNU General Public License (GPL) - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED - * OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#define VERSION(ver,rel,seq) (((ver)<<16) | ((rel)<<8) | (seq)) -#if defined(__i386__) -# define BREAKPOINT() asm(" int $3"); -#else -# define BREAKPOINT() { } -#endif - -#define MAX_DEVICE_COUNT 4 - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_CS_MODULE)) -#define SYNCLINK_GENERIC_HDLC 1 -#else -#define SYNCLINK_GENERIC_HDLC 0 -#endif - -#define GET_USER(error,value,addr) error = get_user(value,addr) -#define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0 -#define PUT_USER(error,value,addr) error = put_user(value,addr) -#define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0 - -#include - -static MGSL_PARAMS default_params = { - MGSL_MODE_HDLC, /* unsigned long mode */ - 0, /* unsigned char loopback; */ - HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */ - HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */ - 0, /* unsigned long clock_speed; */ - 0xff, /* unsigned char addr_filter; */ - HDLC_CRC_16_CCITT, /* unsigned short crc_type; */ - HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */ - HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */ - 9600, /* unsigned long data_rate; */ - 8, /* unsigned char data_bits; */ - 1, /* unsigned char stop_bits; */ - ASYNC_PARITY_NONE /* unsigned char parity; */ -}; - -typedef struct { - int count; - unsigned char status; - char data[1]; -} RXBUF; - -/* The queue of BH actions to be performed */ - -#define BH_RECEIVE 1 -#define BH_TRANSMIT 2 -#define BH_STATUS 4 - -#define IO_PIN_SHUTDOWN_LIMIT 100 - -#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK)) - -struct _input_signal_events { - int ri_up; - int ri_down; - int dsr_up; - int dsr_down; - int dcd_up; - int dcd_down; - int cts_up; - int cts_down; -}; - - -/* - * Device instance data structure - */ - -typedef struct _mgslpc_info { - struct tty_port port; - void *if_ptr; /* General purpose pointer (used by SPPP) */ - int magic; - int line; - - struct mgsl_icount icount; - - int timeout; - int x_char; /* xon/xoff character */ - unsigned char read_status_mask; - unsigned char ignore_status_mask; - - unsigned char *tx_buf; - int tx_put; - int tx_get; - int tx_count; - - /* circular list of fixed length rx buffers */ - - unsigned char *rx_buf; /* memory allocated for all rx buffers */ - int rx_buf_total_size; /* size of memory allocated for rx buffers */ - int rx_put; /* index of next empty rx buffer */ - int rx_get; /* index of next full rx buffer */ - int rx_buf_size; /* size in bytes of single rx buffer */ - int rx_buf_count; /* total number of rx buffers */ - int rx_frame_count; /* number of full rx buffers */ - - wait_queue_head_t status_event_wait_q; - wait_queue_head_t event_wait_q; - struct timer_list tx_timer; /* HDLC transmit timeout timer */ - struct _mgslpc_info *next_device; /* device list link */ - - unsigned short imra_value; - unsigned short imrb_value; - unsigned char pim_value; - - spinlock_t lock; - struct work_struct task; /* task structure for scheduling bh */ - - u32 max_frame_size; - - u32 pending_bh; - - bool bh_running; - bool bh_requested; - - int dcd_chkcount; /* check counts to prevent */ - int cts_chkcount; /* too many IRQs if a signal */ - int dsr_chkcount; /* is floating */ - int ri_chkcount; - - bool rx_enabled; - bool rx_overflow; - - bool tx_enabled; - bool tx_active; - bool tx_aborting; - u32 idle_mode; - - int if_mode; /* serial interface selection (RS-232, v.35 etc) */ - - char device_name[25]; /* device instance name */ - - unsigned int io_base; /* base I/O address of adapter */ - unsigned int irq_level; - - MGSL_PARAMS params; /* communications parameters */ - - unsigned char serial_signals; /* current serial signal states */ - - bool irq_occurred; /* for diagnostics use */ - char testing_irq; - unsigned int init_error; /* startup error (DIAGS) */ - - char *flag_buf; - bool drop_rts_on_tx_done; - - struct _input_signal_events input_signal_events; - - /* PCMCIA support */ - struct pcmcia_device *p_dev; - int stop; - - /* SPPP/Cisco HDLC device parts */ - int netcount; - spinlock_t netlock; - -#if SYNCLINK_GENERIC_HDLC - struct net_device *netdev; -#endif - -} MGSLPC_INFO; - -#define MGSLPC_MAGIC 0x5402 - -/* - * The size of the serial xmit buffer is 1 page, or 4096 bytes - */ -#define TXBUFSIZE 4096 - - -#define CHA 0x00 /* channel A offset */ -#define CHB 0x40 /* channel B offset */ - -/* - * FIXME: PPC has PVR defined in asm/reg.h. For now we just undef it. - */ -#undef PVR - -#define RXFIFO 0 -#define TXFIFO 0 -#define STAR 0x20 -#define CMDR 0x20 -#define RSTA 0x21 -#define PRE 0x21 -#define MODE 0x22 -#define TIMR 0x23 -#define XAD1 0x24 -#define XAD2 0x25 -#define RAH1 0x26 -#define RAH2 0x27 -#define DAFO 0x27 -#define RAL1 0x28 -#define RFC 0x28 -#define RHCR 0x29 -#define RAL2 0x29 -#define RBCL 0x2a -#define XBCL 0x2a -#define RBCH 0x2b -#define XBCH 0x2b -#define CCR0 0x2c -#define CCR1 0x2d -#define CCR2 0x2e -#define CCR3 0x2f -#define VSTR 0x34 -#define BGR 0x34 -#define RLCR 0x35 -#define AML 0x36 -#define AMH 0x37 -#define GIS 0x38 -#define IVA 0x38 -#define IPC 0x39 -#define ISR 0x3a -#define IMR 0x3a -#define PVR 0x3c -#define PIS 0x3d -#define PIM 0x3d -#define PCR 0x3e -#define CCR4 0x3f - -// IMR/ISR - -#define IRQ_BREAK_ON BIT15 // rx break detected -#define IRQ_DATAOVERRUN BIT14 // receive data overflow -#define IRQ_ALLSENT BIT13 // all sent -#define IRQ_UNDERRUN BIT12 // transmit data underrun -#define IRQ_TIMER BIT11 // timer interrupt -#define IRQ_CTS BIT10 // CTS status change -#define IRQ_TXREPEAT BIT9 // tx message repeat -#define IRQ_TXFIFO BIT8 // transmit pool ready -#define IRQ_RXEOM BIT7 // receive message end -#define IRQ_EXITHUNT BIT6 // receive frame start -#define IRQ_RXTIME BIT6 // rx char timeout -#define IRQ_DCD BIT2 // carrier detect status change -#define IRQ_OVERRUN BIT1 // receive frame overflow -#define IRQ_RXFIFO BIT0 // receive pool full - -// STAR - -#define XFW BIT6 // transmit FIFO write enable -#define CEC BIT2 // command executing -#define CTS BIT1 // CTS state - -#define PVR_DTR BIT0 -#define PVR_DSR BIT1 -#define PVR_RI BIT2 -#define PVR_AUTOCTS BIT3 -#define PVR_RS232 0x20 /* 0010b */ -#define PVR_V35 0xe0 /* 1110b */ -#define PVR_RS422 0x40 /* 0100b */ - -/* Register access functions */ - -#define write_reg(info, reg, val) outb((val),(info)->io_base + (reg)) -#define read_reg(info, reg) inb((info)->io_base + (reg)) - -#define read_reg16(info, reg) inw((info)->io_base + (reg)) -#define write_reg16(info, reg, val) outw((val), (info)->io_base + (reg)) - -#define set_reg_bits(info, reg, mask) \ - write_reg(info, (reg), \ - (unsigned char) (read_reg(info, (reg)) | (mask))) -#define clear_reg_bits(info, reg, mask) \ - write_reg(info, (reg), \ - (unsigned char) (read_reg(info, (reg)) & ~(mask))) -/* - * interrupt enable/disable routines - */ -static void irq_disable(MGSLPC_INFO *info, unsigned char channel, unsigned short mask) -{ - if (channel == CHA) { - info->imra_value |= mask; - write_reg16(info, CHA + IMR, info->imra_value); - } else { - info->imrb_value |= mask; - write_reg16(info, CHB + IMR, info->imrb_value); - } -} -static void irq_enable(MGSLPC_INFO *info, unsigned char channel, unsigned short mask) -{ - if (channel == CHA) { - info->imra_value &= ~mask; - write_reg16(info, CHA + IMR, info->imra_value); - } else { - info->imrb_value &= ~mask; - write_reg16(info, CHB + IMR, info->imrb_value); - } -} - -#define port_irq_disable(info, mask) \ - { info->pim_value |= (mask); write_reg(info, PIM, info->pim_value); } - -#define port_irq_enable(info, mask) \ - { info->pim_value &= ~(mask); write_reg(info, PIM, info->pim_value); } - -static void rx_start(MGSLPC_INFO *info); -static void rx_stop(MGSLPC_INFO *info); - -static void tx_start(MGSLPC_INFO *info, struct tty_struct *tty); -static void tx_stop(MGSLPC_INFO *info); -static void tx_set_idle(MGSLPC_INFO *info); - -static void get_signals(MGSLPC_INFO *info); -static void set_signals(MGSLPC_INFO *info); - -static void reset_device(MGSLPC_INFO *info); - -static void hdlc_mode(MGSLPC_INFO *info); -static void async_mode(MGSLPC_INFO *info); - -static void tx_timeout(struct timer_list *t); - -static int carrier_raised(struct tty_port *port); -static void dtr_rts(struct tty_port *port, int onoff); - -#if SYNCLINK_GENERIC_HDLC -#define dev_to_port(D) (dev_to_hdlc(D)->priv) -static void hdlcdev_tx_done(MGSLPC_INFO *info); -static void hdlcdev_rx(MGSLPC_INFO *info, char *buf, int size); -static int hdlcdev_init(MGSLPC_INFO *info); -static void hdlcdev_exit(MGSLPC_INFO *info); -#endif - -static void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit); - -static bool register_test(MGSLPC_INFO *info); -static bool irq_test(MGSLPC_INFO *info); -static int adapter_test(MGSLPC_INFO *info); - -static int claim_resources(MGSLPC_INFO *info); -static void release_resources(MGSLPC_INFO *info); -static int mgslpc_add_device(MGSLPC_INFO *info); -static void mgslpc_remove_device(MGSLPC_INFO *info); - -static bool rx_get_frame(MGSLPC_INFO *info, struct tty_struct *tty); -static void rx_reset_buffers(MGSLPC_INFO *info); -static int rx_alloc_buffers(MGSLPC_INFO *info); -static void rx_free_buffers(MGSLPC_INFO *info); - -static irqreturn_t mgslpc_isr(int irq, void *dev_id); - -/* - * Bottom half interrupt handlers - */ -static void bh_handler(struct work_struct *work); -static void bh_transmit(MGSLPC_INFO *info, struct tty_struct *tty); -static void bh_status(MGSLPC_INFO *info); - -/* - * ioctl handlers - */ -static int tiocmget(struct tty_struct *tty); -static int tiocmset(struct tty_struct *tty, - unsigned int set, unsigned int clear); -static int get_stats(MGSLPC_INFO *info, struct mgsl_icount __user *user_icount); -static int get_params(MGSLPC_INFO *info, MGSL_PARAMS __user *user_params); -static int set_params(MGSLPC_INFO *info, MGSL_PARAMS __user *new_params, struct tty_struct *tty); -static int get_txidle(MGSLPC_INFO *info, int __user *idle_mode); -static int set_txidle(MGSLPC_INFO *info, int idle_mode); -static int set_txenable(MGSLPC_INFO *info, int enable, struct tty_struct *tty); -static int tx_abort(MGSLPC_INFO *info); -static int set_rxenable(MGSLPC_INFO *info, int enable); -static int wait_events(MGSLPC_INFO *info, int __user *mask); - -static MGSLPC_INFO *mgslpc_device_list = NULL; -static int mgslpc_device_count = 0; - -/* - * Set this param to non-zero to load eax with the - * .text section address and breakpoint on module load. - * This is useful for use with gdb and add-symbol-file command. - */ -static bool break_on_load; - -/* - * Driver major number, defaults to zero to get auto - * assigned major number. May be forced as module parameter. - */ -static int ttymajor=0; - -static int debug_level = 0; -static int maxframe[MAX_DEVICE_COUNT] = {0,}; - -module_param(break_on_load, bool, 0); -module_param(ttymajor, int, 0); -module_param(debug_level, int, 0); -module_param_array(maxframe, int, NULL, 0); - -MODULE_LICENSE("GPL"); - -static char *driver_name = "SyncLink PC Card driver"; -static char *driver_version = "$Revision: 4.34 $"; - -static struct tty_driver *serial_driver; - -/* number of characters left in xmit buffer before we ask for more */ -#define WAKEUP_CHARS 256 - -static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty); -static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout); - -/* PCMCIA prototypes */ - -static int mgslpc_config(struct pcmcia_device *link); -static void mgslpc_release(u_long arg); -static void mgslpc_detach(struct pcmcia_device *p_dev); - -/* - * 1st function defined in .text section. Calling this function in - * init_module() followed by a breakpoint allows a remote debugger - * (gdb) to get the .text address for the add-symbol-file command. - * This allows remote debugging of dynamically loadable modules. - */ -static void* mgslpc_get_text_ptr(void) -{ - return mgslpc_get_text_ptr; -} - -/** - * line discipline callback wrappers - * - * The wrappers maintain line discipline references - * while calling into the line discipline. - * - * ldisc_receive_buf - pass receive data to line discipline - */ - -static void ldisc_receive_buf(struct tty_struct *tty, - const __u8 *data, char *flags, int count) -{ - struct tty_ldisc *ld; - if (!tty) - return; - ld = tty_ldisc_ref(tty); - if (ld) { - if (ld->ops->receive_buf) - ld->ops->receive_buf(tty, data, flags, count); - tty_ldisc_deref(ld); - } -} - -static const struct tty_port_operations mgslpc_port_ops = { - .carrier_raised = carrier_raised, - .dtr_rts = dtr_rts -}; - -static int mgslpc_probe(struct pcmcia_device *link) -{ - MGSLPC_INFO *info; - int ret; - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("mgslpc_attach\n"); - - info = kzalloc(sizeof(MGSLPC_INFO), GFP_KERNEL); - if (!info) { - printk("Error can't allocate device instance data\n"); - return -ENOMEM; - } - - info->magic = MGSLPC_MAGIC; - tty_port_init(&info->port); - info->port.ops = &mgslpc_port_ops; - INIT_WORK(&info->task, bh_handler); - info->max_frame_size = 4096; - info->port.close_delay = 5*HZ/10; - info->port.closing_wait = 30*HZ; - init_waitqueue_head(&info->status_event_wait_q); - init_waitqueue_head(&info->event_wait_q); - spin_lock_init(&info->lock); - spin_lock_init(&info->netlock); - memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS)); - info->idle_mode = HDLC_TXIDLE_FLAGS; - info->imra_value = 0xffff; - info->imrb_value = 0xffff; - info->pim_value = 0xff; - - info->p_dev = link; - link->priv = info; - - /* Initialize the struct pcmcia_device structure */ - - ret = mgslpc_config(link); - if (ret != 0) - goto failed; - - ret = mgslpc_add_device(info); - if (ret != 0) - goto failed_release; - - return 0; - -failed_release: - mgslpc_release((u_long)link); -failed: - tty_port_destroy(&info->port); - kfree(info); - return ret; -} - -/* Card has been inserted. - */ - -static int mgslpc_ioprobe(struct pcmcia_device *p_dev, void *priv_data) -{ - return pcmcia_request_io(p_dev); -} - -static int mgslpc_config(struct pcmcia_device *link) -{ - MGSLPC_INFO *info = link->priv; - int ret; - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("mgslpc_config(0x%p)\n", link); - - link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; - - ret = pcmcia_loop_config(link, mgslpc_ioprobe, NULL); - if (ret != 0) - goto failed; - - link->config_index = 8; - link->config_regs = PRESENT_OPTION; - - ret = pcmcia_request_irq(link, mgslpc_isr); - if (ret) - goto failed; - ret = pcmcia_enable_device(link); - if (ret) - goto failed; - - info->io_base = link->resource[0]->start; - info->irq_level = link->irq; - return 0; - -failed: - mgslpc_release((u_long)link); - return -ENODEV; -} - -/* Card has been removed. - * Unregister device and release PCMCIA configuration. - * If device is open, postpone until it is closed. - */ -static void mgslpc_release(u_long arg) -{ - struct pcmcia_device *link = (struct pcmcia_device *)arg; - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("mgslpc_release(0x%p)\n", link); - - pcmcia_disable_device(link); -} - -static void mgslpc_detach(struct pcmcia_device *link) -{ - if (debug_level >= DEBUG_LEVEL_INFO) - printk("mgslpc_detach(0x%p)\n", link); - - ((MGSLPC_INFO *)link->priv)->stop = 1; - mgslpc_release((u_long)link); - - mgslpc_remove_device((MGSLPC_INFO *)link->priv); -} - -static int mgslpc_suspend(struct pcmcia_device *link) -{ - MGSLPC_INFO *info = link->priv; - - info->stop = 1; - - return 0; -} - -static int mgslpc_resume(struct pcmcia_device *link) -{ - MGSLPC_INFO *info = link->priv; - - info->stop = 0; - - return 0; -} - - -static inline bool mgslpc_paranoia_check(MGSLPC_INFO *info, - char *name, const char *routine) -{ -#ifdef MGSLPC_PARANOIA_CHECK - static const char *badmagic = - "Warning: bad magic number for mgsl struct (%s) in %s\n"; - static const char *badinfo = - "Warning: null mgslpc_info for (%s) in %s\n"; - - if (!info) { - printk(badinfo, name, routine); - return true; - } - if (info->magic != MGSLPC_MAGIC) { - printk(badmagic, name, routine); - return true; - } -#else - if (!info) - return true; -#endif - return false; -} - - -#define CMD_RXFIFO BIT7 // release current rx FIFO -#define CMD_RXRESET BIT6 // receiver reset -#define CMD_RXFIFO_READ BIT5 -#define CMD_START_TIMER BIT4 -#define CMD_TXFIFO BIT3 // release current tx FIFO -#define CMD_TXEOM BIT1 // transmit end message -#define CMD_TXRESET BIT0 // transmit reset - -static bool wait_command_complete(MGSLPC_INFO *info, unsigned char channel) -{ - int i = 0; - /* wait for command completion */ - while (read_reg(info, (unsigned char)(channel+STAR)) & BIT2) { - udelay(1); - if (i++ == 1000) - return false; - } - return true; -} - -static void issue_command(MGSLPC_INFO *info, unsigned char channel, unsigned char cmd) -{ - wait_command_complete(info, channel); - write_reg(info, (unsigned char) (channel + CMDR), cmd); -} - -static void tx_pause(struct tty_struct *tty) -{ - MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; - unsigned long flags; - - if (mgslpc_paranoia_check(info, tty->name, "tx_pause")) - return; - if (debug_level >= DEBUG_LEVEL_INFO) - printk("tx_pause(%s)\n", info->device_name); - - spin_lock_irqsave(&info->lock, flags); - if (info->tx_enabled) - tx_stop(info); - spin_unlock_irqrestore(&info->lock, flags); -} - -static void tx_release(struct tty_struct *tty) -{ - MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; - unsigned long flags; - - if (mgslpc_paranoia_check(info, tty->name, "tx_release")) - return; - if (debug_level >= DEBUG_LEVEL_INFO) - printk("tx_release(%s)\n", info->device_name); - - spin_lock_irqsave(&info->lock, flags); - if (!info->tx_enabled) - tx_start(info, tty); - spin_unlock_irqrestore(&info->lock, flags); -} - -/* Return next bottom half action to perform. - * or 0 if nothing to do. - */ -static int bh_action(MGSLPC_INFO *info) -{ - unsigned long flags; - int rc = 0; - - spin_lock_irqsave(&info->lock, flags); - - if (info->pending_bh & BH_RECEIVE) { - info->pending_bh &= ~BH_RECEIVE; - rc = BH_RECEIVE; - } else if (info->pending_bh & BH_TRANSMIT) { - info->pending_bh &= ~BH_TRANSMIT; - rc = BH_TRANSMIT; - } else if (info->pending_bh & BH_STATUS) { - info->pending_bh &= ~BH_STATUS; - rc = BH_STATUS; - } - - if (!rc) { - /* Mark BH routine as complete */ - info->bh_running = false; - info->bh_requested = false; - } - - spin_unlock_irqrestore(&info->lock, flags); - - return rc; -} - -static void bh_handler(struct work_struct *work) -{ - MGSLPC_INFO *info = container_of(work, MGSLPC_INFO, task); - struct tty_struct *tty; - int action; - - if (debug_level >= DEBUG_LEVEL_BH) - printk("%s(%d):bh_handler(%s) entry\n", - __FILE__,__LINE__,info->device_name); - - info->bh_running = true; - tty = tty_port_tty_get(&info->port); - - while((action = bh_action(info)) != 0) { - - /* Process work item */ - if (debug_level >= DEBUG_LEVEL_BH) - printk("%s(%d):bh_handler() work item action=%d\n", - __FILE__,__LINE__,action); - - switch (action) { - - case BH_RECEIVE: - while(rx_get_frame(info, tty)); - break; - case BH_TRANSMIT: - bh_transmit(info, tty); - break; - case BH_STATUS: - bh_status(info); - break; - default: - /* unknown work item ID */ - printk("Unknown work item ID=%08X!\n", action); - break; - } - } - - tty_kref_put(tty); - if (debug_level >= DEBUG_LEVEL_BH) - printk("%s(%d):bh_handler(%s) exit\n", - __FILE__,__LINE__,info->device_name); -} - -static void bh_transmit(MGSLPC_INFO *info, struct tty_struct *tty) -{ - if (debug_level >= DEBUG_LEVEL_BH) - printk("bh_transmit() entry on %s\n", info->device_name); - - if (tty) - tty_wakeup(tty); -} - -static void bh_status(MGSLPC_INFO *info) -{ - info->ri_chkcount = 0; - info->dsr_chkcount = 0; - info->dcd_chkcount = 0; - info->cts_chkcount = 0; -} - -/* eom: non-zero = end of frame */ -static void rx_ready_hdlc(MGSLPC_INFO *info, int eom) -{ - unsigned char data[2]; - unsigned char fifo_count, read_count, i; - RXBUF *buf = (RXBUF*)(info->rx_buf + (info->rx_put * info->rx_buf_size)); - - if (debug_level >= DEBUG_LEVEL_ISR) - printk("%s(%d):rx_ready_hdlc(eom=%d)\n", __FILE__, __LINE__, eom); - - if (!info->rx_enabled) - return; - - if (info->rx_frame_count >= info->rx_buf_count) { - /* no more free buffers */ - issue_command(info, CHA, CMD_RXRESET); - info->pending_bh |= BH_RECEIVE; - info->rx_overflow = true; - info->icount.buf_overrun++; - return; - } - - if (eom) { - /* end of frame, get FIFO count from RBCL register */ - fifo_count = (unsigned char)(read_reg(info, CHA+RBCL) & 0x1f); - if (fifo_count == 0) - fifo_count = 32; - } else - fifo_count = 32; - - do { - if (fifo_count == 1) { - read_count = 1; - data[0] = read_reg(info, CHA + RXFIFO); - } else { - read_count = 2; - *((unsigned short *) data) = read_reg16(info, CHA + RXFIFO); - } - fifo_count -= read_count; - if (!fifo_count && eom) - buf->status = data[--read_count]; - - for (i = 0; i < read_count; i++) { - if (buf->count >= info->max_frame_size) { - /* frame too large, reset receiver and reset current buffer */ - issue_command(info, CHA, CMD_RXRESET); - buf->count = 0; - return; - } - *(buf->data + buf->count) = data[i]; - buf->count++; - } - } while (fifo_count); - - if (eom) { - info->pending_bh |= BH_RECEIVE; - info->rx_frame_count++; - info->rx_put++; - if (info->rx_put >= info->rx_buf_count) - info->rx_put = 0; - } - issue_command(info, CHA, CMD_RXFIFO); -} - -static void rx_ready_async(MGSLPC_INFO *info, int tcd) -{ - struct tty_port *port = &info->port; - unsigned char data, status, flag; - int fifo_count; - int work = 0; - struct mgsl_icount *icount = &info->icount; - - if (tcd) { - /* early termination, get FIFO count from RBCL register */ - fifo_count = (unsigned char)(read_reg(info, CHA+RBCL) & 0x1f); - - /* Zero fifo count could mean 0 or 32 bytes available. - * If BIT5 of STAR is set then at least 1 byte is available. - */ - if (!fifo_count && (read_reg(info,CHA+STAR) & BIT5)) - fifo_count = 32; - } else - fifo_count = 32; - - tty_buffer_request_room(port, fifo_count); - /* Flush received async data to receive data buffer. */ - while (fifo_count) { - data = read_reg(info, CHA + RXFIFO); - status = read_reg(info, CHA + RXFIFO); - fifo_count -= 2; - - icount->rx++; - flag = TTY_NORMAL; - - // if no frameing/crc error then save data - // BIT7:parity error - // BIT6:framing error - - if (status & (BIT7 + BIT6)) { - if (status & BIT7) - icount->parity++; - else - icount->frame++; - - /* discard char if tty control flags say so */ - if (status & info->ignore_status_mask) - continue; - - status &= info->read_status_mask; - - if (status & BIT7) - flag = TTY_PARITY; - else if (status & BIT6) - flag = TTY_FRAME; - } - work += tty_insert_flip_char(port, data, flag); - } - issue_command(info, CHA, CMD_RXFIFO); - - if (debug_level >= DEBUG_LEVEL_ISR) { - printk("%s(%d):rx_ready_async", - __FILE__,__LINE__); - printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n", - __FILE__,__LINE__,icount->rx,icount->brk, - icount->parity,icount->frame,icount->overrun); - } - - if (work) - tty_flip_buffer_push(port); -} - - -static void tx_done(MGSLPC_INFO *info, struct tty_struct *tty) -{ - if (!info->tx_active) - return; - - info->tx_active = false; - info->tx_aborting = false; - - if (info->params.mode == MGSL_MODE_ASYNC) - return; - - info->tx_count = info->tx_put = info->tx_get = 0; - del_timer(&info->tx_timer); - - if (info->drop_rts_on_tx_done) { - get_signals(info); - if (info->serial_signals & SerialSignal_RTS) { - info->serial_signals &= ~SerialSignal_RTS; - set_signals(info); - } - info->drop_rts_on_tx_done = false; - } - -#if SYNCLINK_GENERIC_HDLC - if (info->netcount) - hdlcdev_tx_done(info); - else -#endif - { - if (tty && (tty->stopped || tty->hw_stopped)) { - tx_stop(info); - return; - } - info->pending_bh |= BH_TRANSMIT; - } -} - -static void tx_ready(MGSLPC_INFO *info, struct tty_struct *tty) -{ - unsigned char fifo_count = 32; - int c; - - if (debug_level >= DEBUG_LEVEL_ISR) - printk("%s(%d):tx_ready(%s)\n", __FILE__, __LINE__, info->device_name); - - if (info->params.mode == MGSL_MODE_HDLC) { - if (!info->tx_active) - return; - } else { - if (tty && (tty->stopped || tty->hw_stopped)) { - tx_stop(info); - return; - } - if (!info->tx_count) - info->tx_active = false; - } - - if (!info->tx_count) - return; - - while (info->tx_count && fifo_count) { - c = min(2, min_t(int, fifo_count, min(info->tx_count, TXBUFSIZE - info->tx_get))); - - if (c == 1) { - write_reg(info, CHA + TXFIFO, *(info->tx_buf + info->tx_get)); - } else { - write_reg16(info, CHA + TXFIFO, - *((unsigned short*)(info->tx_buf + info->tx_get))); - } - info->tx_count -= c; - info->tx_get = (info->tx_get + c) & (TXBUFSIZE - 1); - fifo_count -= c; - } - - if (info->params.mode == MGSL_MODE_ASYNC) { - if (info->tx_count < WAKEUP_CHARS) - info->pending_bh |= BH_TRANSMIT; - issue_command(info, CHA, CMD_TXFIFO); - } else { - if (info->tx_count) - issue_command(info, CHA, CMD_TXFIFO); - else - issue_command(info, CHA, CMD_TXFIFO + CMD_TXEOM); - } -} - -static void cts_change(MGSLPC_INFO *info, struct tty_struct *tty) -{ - get_signals(info); - if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) - irq_disable(info, CHB, IRQ_CTS); - info->icount.cts++; - if (info->serial_signals & SerialSignal_CTS) - info->input_signal_events.cts_up++; - else - info->input_signal_events.cts_down++; - wake_up_interruptible(&info->status_event_wait_q); - wake_up_interruptible(&info->event_wait_q); - - if (tty && tty_port_cts_enabled(&info->port)) { - if (tty->hw_stopped) { - if (info->serial_signals & SerialSignal_CTS) { - if (debug_level >= DEBUG_LEVEL_ISR) - printk("CTS tx start..."); - tty->hw_stopped = 0; - tx_start(info, tty); - info->pending_bh |= BH_TRANSMIT; - return; - } - } else { - if (!(info->serial_signals & SerialSignal_CTS)) { - if (debug_level >= DEBUG_LEVEL_ISR) - printk("CTS tx stop..."); - tty->hw_stopped = 1; - tx_stop(info); - } - } - } - info->pending_bh |= BH_STATUS; -} - -static void dcd_change(MGSLPC_INFO *info, struct tty_struct *tty) -{ - get_signals(info); - if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) - irq_disable(info, CHB, IRQ_DCD); - info->icount.dcd++; - if (info->serial_signals & SerialSignal_DCD) { - info->input_signal_events.dcd_up++; - } - else - info->input_signal_events.dcd_down++; -#if SYNCLINK_GENERIC_HDLC - if (info->netcount) { - if (info->serial_signals & SerialSignal_DCD) - netif_carrier_on(info->netdev); - else - netif_carrier_off(info->netdev); - } -#endif - wake_up_interruptible(&info->status_event_wait_q); - wake_up_interruptible(&info->event_wait_q); - - if (tty_port_check_carrier(&info->port)) { - if (debug_level >= DEBUG_LEVEL_ISR) - printk("%s CD now %s...", info->device_name, - (info->serial_signals & SerialSignal_DCD) ? "on" : "off"); - if (info->serial_signals & SerialSignal_DCD) - wake_up_interruptible(&info->port.open_wait); - else { - if (debug_level >= DEBUG_LEVEL_ISR) - printk("doing serial hangup..."); - if (tty) - tty_hangup(tty); - } - } - info->pending_bh |= BH_STATUS; -} - -static void dsr_change(MGSLPC_INFO *info) -{ - get_signals(info); - if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) - port_irq_disable(info, PVR_DSR); - info->icount.dsr++; - if (info->serial_signals & SerialSignal_DSR) - info->input_signal_events.dsr_up++; - else - info->input_signal_events.dsr_down++; - wake_up_interruptible(&info->status_event_wait_q); - wake_up_interruptible(&info->event_wait_q); - info->pending_bh |= BH_STATUS; -} - -static void ri_change(MGSLPC_INFO *info) -{ - get_signals(info); - if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) - port_irq_disable(info, PVR_RI); - info->icount.rng++; - if (info->serial_signals & SerialSignal_RI) - info->input_signal_events.ri_up++; - else - info->input_signal_events.ri_down++; - wake_up_interruptible(&info->status_event_wait_q); - wake_up_interruptible(&info->event_wait_q); - info->pending_bh |= BH_STATUS; -} - -/* Interrupt service routine entry point. - * - * Arguments: - * - * irq interrupt number that caused interrupt - * dev_id device ID supplied during interrupt registration - */ -static irqreturn_t mgslpc_isr(int dummy, void *dev_id) -{ - MGSLPC_INFO *info = dev_id; - struct tty_struct *tty; - unsigned short isr; - unsigned char gis, pis; - int count=0; - - if (debug_level >= DEBUG_LEVEL_ISR) - printk("mgslpc_isr(%d) entry.\n", info->irq_level); - - if (!(info->p_dev->_locked)) - return IRQ_HANDLED; - - tty = tty_port_tty_get(&info->port); - - spin_lock(&info->lock); - - while ((gis = read_reg(info, CHA + GIS))) { - if (debug_level >= DEBUG_LEVEL_ISR) - printk("mgslpc_isr %s gis=%04X\n", info->device_name,gis); - - if ((gis & 0x70) || count > 1000) { - printk("synclink_cs:hardware failed or ejected\n"); - break; - } - count++; - - if (gis & (BIT1 | BIT0)) { - isr = read_reg16(info, CHB + ISR); - if (isr & IRQ_DCD) - dcd_change(info, tty); - if (isr & IRQ_CTS) - cts_change(info, tty); - } - if (gis & (BIT3 | BIT2)) - { - isr = read_reg16(info, CHA + ISR); - if (isr & IRQ_TIMER) { - info->irq_occurred = true; - irq_disable(info, CHA, IRQ_TIMER); - } - - /* receive IRQs */ - if (isr & IRQ_EXITHUNT) { - info->icount.exithunt++; - wake_up_interruptible(&info->event_wait_q); - } - if (isr & IRQ_BREAK_ON) { - info->icount.brk++; - if (info->port.flags & ASYNC_SAK) - do_SAK(tty); - } - if (isr & IRQ_RXTIME) { - issue_command(info, CHA, CMD_RXFIFO_READ); - } - if (isr & (IRQ_RXEOM | IRQ_RXFIFO)) { - if (info->params.mode == MGSL_MODE_HDLC) - rx_ready_hdlc(info, isr & IRQ_RXEOM); - else - rx_ready_async(info, isr & IRQ_RXEOM); - } - - /* transmit IRQs */ - if (isr & IRQ_UNDERRUN) { - if (info->tx_aborting) - info->icount.txabort++; - else - info->icount.txunder++; - tx_done(info, tty); - } - else if (isr & IRQ_ALLSENT) { - info->icount.txok++; - tx_done(info, tty); - } - else if (isr & IRQ_TXFIFO) - tx_ready(info, tty); - } - if (gis & BIT7) { - pis = read_reg(info, CHA + PIS); - if (pis & BIT1) - dsr_change(info); - if (pis & BIT2) - ri_change(info); - } - } - - /* Request bottom half processing if there's something - * for it to do and the bh is not already running - */ - - if (info->pending_bh && !info->bh_running && !info->bh_requested) { - if (debug_level >= DEBUG_LEVEL_ISR) - printk("%s(%d):%s queueing bh task.\n", - __FILE__,__LINE__,info->device_name); - schedule_work(&info->task); - info->bh_requested = true; - } - - spin_unlock(&info->lock); - tty_kref_put(tty); - - if (debug_level >= DEBUG_LEVEL_ISR) - printk("%s(%d):mgslpc_isr(%d)exit.\n", - __FILE__, __LINE__, info->irq_level); - - return IRQ_HANDLED; -} - -/* Initialize and start device. - */ -static int startup(MGSLPC_INFO * info, struct tty_struct *tty) -{ - int retval = 0; - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("%s(%d):startup(%s)\n", __FILE__, __LINE__, info->device_name); - - if (tty_port_initialized(&info->port)) - return 0; - - if (!info->tx_buf) { - /* allocate a page of memory for a transmit buffer */ - info->tx_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL); - if (!info->tx_buf) { - printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n", - __FILE__, __LINE__, info->device_name); - return -ENOMEM; - } - } - - info->pending_bh = 0; - - memset(&info->icount, 0, sizeof(info->icount)); - - timer_setup(&info->tx_timer, tx_timeout, 0); - - /* Allocate and claim adapter resources */ - retval = claim_resources(info); - - /* perform existence check and diagnostics */ - if (!retval) - retval = adapter_test(info); - - if (retval) { - if (capable(CAP_SYS_ADMIN) && tty) - set_bit(TTY_IO_ERROR, &tty->flags); - release_resources(info); - return retval; - } - - /* program hardware for current parameters */ - mgslpc_change_params(info, tty); - - if (tty) - clear_bit(TTY_IO_ERROR, &tty->flags); - - tty_port_set_initialized(&info->port, 1); - - return 0; -} - -/* Called by mgslpc_close() and mgslpc_hangup() to shutdown hardware - */ -static void shutdown(MGSLPC_INFO * info, struct tty_struct *tty) -{ - unsigned long flags; - - if (!tty_port_initialized(&info->port)) - return; - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("%s(%d):mgslpc_shutdown(%s)\n", - __FILE__, __LINE__, info->device_name); - - /* clear status wait queue because status changes */ - /* can't happen after shutting down the hardware */ - wake_up_interruptible(&info->status_event_wait_q); - wake_up_interruptible(&info->event_wait_q); - - del_timer_sync(&info->tx_timer); - - if (info->tx_buf) { - free_page((unsigned long) info->tx_buf); - info->tx_buf = NULL; - } - - spin_lock_irqsave(&info->lock, flags); - - rx_stop(info); - tx_stop(info); - - /* TODO:disable interrupts instead of reset to preserve signal states */ - reset_device(info); - - if (!tty || C_HUPCL(tty)) { - info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR); - set_signals(info); - } - - spin_unlock_irqrestore(&info->lock, flags); - - release_resources(info); - - if (tty) - set_bit(TTY_IO_ERROR, &tty->flags); - - tty_port_set_initialized(&info->port, 0); -} - -static void mgslpc_program_hw(MGSLPC_INFO *info, struct tty_struct *tty) -{ - unsigned long flags; - - spin_lock_irqsave(&info->lock, flags); - - rx_stop(info); - tx_stop(info); - info->tx_count = info->tx_put = info->tx_get = 0; - - if (info->params.mode == MGSL_MODE_HDLC || info->netcount) - hdlc_mode(info); - else - async_mode(info); - - set_signals(info); - - info->dcd_chkcount = 0; - info->cts_chkcount = 0; - info->ri_chkcount = 0; - info->dsr_chkcount = 0; - - irq_enable(info, CHB, IRQ_DCD | IRQ_CTS); - port_irq_enable(info, (unsigned char) PVR_DSR | PVR_RI); - get_signals(info); - - if (info->netcount || (tty && C_CREAD(tty))) - rx_start(info); - - spin_unlock_irqrestore(&info->lock, flags); -} - -/* Reconfigure adapter based on new parameters - */ -static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty) -{ - unsigned cflag; - int bits_per_char; - - if (!tty) - return; - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("%s(%d):mgslpc_change_params(%s)\n", - __FILE__, __LINE__, info->device_name); - - cflag = tty->termios.c_cflag; - - /* if B0 rate (hangup) specified then negate RTS and DTR */ - /* otherwise assert RTS and DTR */ - if (cflag & CBAUD) - info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR; - else - info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR); - - /* byte size and parity */ - - switch (cflag & CSIZE) { - case CS5: info->params.data_bits = 5; break; - case CS6: info->params.data_bits = 6; break; - case CS7: info->params.data_bits = 7; break; - case CS8: info->params.data_bits = 8; break; - default: info->params.data_bits = 7; break; - } - - if (cflag & CSTOPB) - info->params.stop_bits = 2; - else - info->params.stop_bits = 1; - - info->params.parity = ASYNC_PARITY_NONE; - if (cflag & PARENB) { - if (cflag & PARODD) - info->params.parity = ASYNC_PARITY_ODD; - else - info->params.parity = ASYNC_PARITY_EVEN; -#ifdef CMSPAR - if (cflag & CMSPAR) - info->params.parity = ASYNC_PARITY_SPACE; -#endif - } - - /* calculate number of jiffies to transmit a full - * FIFO (32 bytes) at specified data rate - */ - bits_per_char = info->params.data_bits + - info->params.stop_bits + 1; - - /* if port data rate is set to 460800 or less then - * allow tty settings to override, otherwise keep the - * current data rate. - */ - if (info->params.data_rate <= 460800) { - info->params.data_rate = tty_get_baud_rate(tty); - } - - if (info->params.data_rate) { - info->timeout = (32*HZ*bits_per_char) / - info->params.data_rate; - } - info->timeout += HZ/50; /* Add .02 seconds of slop */ - - tty_port_set_cts_flow(&info->port, cflag & CRTSCTS); - tty_port_set_check_carrier(&info->port, ~cflag & CLOCAL); - - /* process tty input control flags */ - - info->read_status_mask = 0; - if (I_INPCK(tty)) - info->read_status_mask |= BIT7 | BIT6; - if (I_IGNPAR(tty)) - info->ignore_status_mask |= BIT7 | BIT6; - - mgslpc_program_hw(info, tty); -} - -/* Add a character to the transmit buffer - */ -static int mgslpc_put_char(struct tty_struct *tty, unsigned char ch) -{ - MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; - unsigned long flags; - - if (debug_level >= DEBUG_LEVEL_INFO) { - printk("%s(%d):mgslpc_put_char(%d) on %s\n", - __FILE__, __LINE__, ch, info->device_name); - } - - if (mgslpc_paranoia_check(info, tty->name, "mgslpc_put_char")) - return 0; - - if (!info->tx_buf) - return 0; - - spin_lock_irqsave(&info->lock, flags); - - if (info->params.mode == MGSL_MODE_ASYNC || !info->tx_active) { - if (info->tx_count < TXBUFSIZE - 1) { - info->tx_buf[info->tx_put++] = ch; - info->tx_put &= TXBUFSIZE-1; - info->tx_count++; - } - } - - spin_unlock_irqrestore(&info->lock, flags); - return 1; -} - -/* Enable transmitter so remaining characters in the - * transmit buffer are sent. - */ -static void mgslpc_flush_chars(struct tty_struct *tty) -{ - MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; - unsigned long flags; - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("%s(%d):mgslpc_flush_chars() entry on %s tx_count=%d\n", - __FILE__, __LINE__, info->device_name, info->tx_count); - - if (mgslpc_paranoia_check(info, tty->name, "mgslpc_flush_chars")) - return; - - if (info->tx_count <= 0 || tty->stopped || - tty->hw_stopped || !info->tx_buf) - return; - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("%s(%d):mgslpc_flush_chars() entry on %s starting transmitter\n", - __FILE__, __LINE__, info->device_name); - - spin_lock_irqsave(&info->lock, flags); - if (!info->tx_active) - tx_start(info, tty); - spin_unlock_irqrestore(&info->lock, flags); -} - -/* Send a block of data - * - * Arguments: - * - * tty pointer to tty information structure - * buf pointer to buffer containing send data - * count size of send data in bytes - * - * Returns: number of characters written - */ -static int mgslpc_write(struct tty_struct * tty, - const unsigned char *buf, int count) -{ - int c, ret = 0; - MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; - unsigned long flags; - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("%s(%d):mgslpc_write(%s) count=%d\n", - __FILE__, __LINE__, info->device_name, count); - - if (mgslpc_paranoia_check(info, tty->name, "mgslpc_write") || - !info->tx_buf) - goto cleanup; - - if (info->params.mode == MGSL_MODE_HDLC) { - if (count > TXBUFSIZE) { - ret = -EIO; - goto cleanup; - } - if (info->tx_active) - goto cleanup; - else if (info->tx_count) - goto start; - } - - for (;;) { - c = min(count, - min(TXBUFSIZE - info->tx_count - 1, - TXBUFSIZE - info->tx_put)); - if (c <= 0) - break; - - memcpy(info->tx_buf + info->tx_put, buf, c); - - spin_lock_irqsave(&info->lock, flags); - info->tx_put = (info->tx_put + c) & (TXBUFSIZE-1); - info->tx_count += c; - spin_unlock_irqrestore(&info->lock, flags); - - buf += c; - count -= c; - ret += c; - } -start: - if (info->tx_count && !tty->stopped && !tty->hw_stopped) { - spin_lock_irqsave(&info->lock, flags); - if (!info->tx_active) - tx_start(info, tty); - spin_unlock_irqrestore(&info->lock, flags); - } -cleanup: - if (debug_level >= DEBUG_LEVEL_INFO) - printk("%s(%d):mgslpc_write(%s) returning=%d\n", - __FILE__, __LINE__, info->device_name, ret); - return ret; -} - -/* Return the count of free bytes in transmit buffer - */ -static int mgslpc_write_room(struct tty_struct *tty) -{ - MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; - int ret; - - if (mgslpc_paranoia_check(info, tty->name, "mgslpc_write_room")) - return 0; - - if (info->params.mode == MGSL_MODE_HDLC) { - /* HDLC (frame oriented) mode */ - if (info->tx_active) - return 0; - else - return HDLC_MAX_FRAME_SIZE; - } else { - ret = TXBUFSIZE - info->tx_count - 1; - if (ret < 0) - ret = 0; - } - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("%s(%d):mgslpc_write_room(%s)=%d\n", - __FILE__, __LINE__, info->device_name, ret); - return ret; -} - -/* Return the count of bytes in transmit buffer - */ -static int mgslpc_chars_in_buffer(struct tty_struct *tty) -{ - MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; - int rc; - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("%s(%d):mgslpc_chars_in_buffer(%s)\n", - __FILE__, __LINE__, info->device_name); - - if (mgslpc_paranoia_check(info, tty->name, "mgslpc_chars_in_buffer")) - return 0; - - if (info->params.mode == MGSL_MODE_HDLC) - rc = info->tx_active ? info->max_frame_size : 0; - else - rc = info->tx_count; - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("%s(%d):mgslpc_chars_in_buffer(%s)=%d\n", - __FILE__, __LINE__, info->device_name, rc); - - return rc; -} - -/* Discard all data in the send buffer - */ -static void mgslpc_flush_buffer(struct tty_struct *tty) -{ - MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; - unsigned long flags; - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("%s(%d):mgslpc_flush_buffer(%s) entry\n", - __FILE__, __LINE__, info->device_name); - - if (mgslpc_paranoia_check(info, tty->name, "mgslpc_flush_buffer")) - return; - - spin_lock_irqsave(&info->lock, flags); - info->tx_count = info->tx_put = info->tx_get = 0; - del_timer(&info->tx_timer); - spin_unlock_irqrestore(&info->lock, flags); - - wake_up_interruptible(&tty->write_wait); - tty_wakeup(tty); -} - -/* Send a high-priority XON/XOFF character - */ -static void mgslpc_send_xchar(struct tty_struct *tty, char ch) -{ - MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; - unsigned long flags; - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("%s(%d):mgslpc_send_xchar(%s,%d)\n", - __FILE__, __LINE__, info->device_name, ch); - - if (mgslpc_paranoia_check(info, tty->name, "mgslpc_send_xchar")) - return; - - info->x_char = ch; - if (ch) { - spin_lock_irqsave(&info->lock, flags); - if (!info->tx_enabled) - tx_start(info, tty); - spin_unlock_irqrestore(&info->lock, flags); - } -} - -/* Signal remote device to throttle send data (our receive data) - */ -static void mgslpc_throttle(struct tty_struct * tty) -{ - MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; - unsigned long flags; - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("%s(%d):mgslpc_throttle(%s) entry\n", - __FILE__, __LINE__, info->device_name); - - if (mgslpc_paranoia_check(info, tty->name, "mgslpc_throttle")) - return; - - if (I_IXOFF(tty)) - mgslpc_send_xchar(tty, STOP_CHAR(tty)); - - if (C_CRTSCTS(tty)) { - spin_lock_irqsave(&info->lock, flags); - info->serial_signals &= ~SerialSignal_RTS; - set_signals(info); - spin_unlock_irqrestore(&info->lock, flags); - } -} - -/* Signal remote device to stop throttling send data (our receive data) - */ -static void mgslpc_unthrottle(struct tty_struct * tty) -{ - MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; - unsigned long flags; - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("%s(%d):mgslpc_unthrottle(%s) entry\n", - __FILE__, __LINE__, info->device_name); - - if (mgslpc_paranoia_check(info, tty->name, "mgslpc_unthrottle")) - return; - - if (I_IXOFF(tty)) { - if (info->x_char) - info->x_char = 0; - else - mgslpc_send_xchar(tty, START_CHAR(tty)); - } - - if (C_CRTSCTS(tty)) { - spin_lock_irqsave(&info->lock, flags); - info->serial_signals |= SerialSignal_RTS; - set_signals(info); - spin_unlock_irqrestore(&info->lock, flags); - } -} - -/* get the current serial statistics - */ -static int get_stats(MGSLPC_INFO * info, struct mgsl_icount __user *user_icount) -{ - int err; - if (debug_level >= DEBUG_LEVEL_INFO) - printk("get_params(%s)\n", info->device_name); - if (!user_icount) { - memset(&info->icount, 0, sizeof(info->icount)); - } else { - COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount)); - if (err) - return -EFAULT; - } - return 0; -} - -/* get the current serial parameters - */ -static int get_params(MGSLPC_INFO * info, MGSL_PARAMS __user *user_params) -{ - int err; - if (debug_level >= DEBUG_LEVEL_INFO) - printk("get_params(%s)\n", info->device_name); - COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS)); - if (err) - return -EFAULT; - return 0; -} - -/* set the serial parameters - * - * Arguments: - * - * info pointer to device instance data - * new_params user buffer containing new serial params - * - * Returns: 0 if success, otherwise error code - */ -static int set_params(MGSLPC_INFO * info, MGSL_PARAMS __user *new_params, struct tty_struct *tty) -{ - unsigned long flags; - MGSL_PARAMS tmp_params; - int err; - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("%s(%d):set_params %s\n", __FILE__,__LINE__, - info->device_name); - COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS)); - if (err) { - if (debug_level >= DEBUG_LEVEL_INFO) - printk("%s(%d):set_params(%s) user buffer copy failed\n", - __FILE__, __LINE__, info->device_name); - return -EFAULT; - } - - spin_lock_irqsave(&info->lock, flags); - memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS)); - spin_unlock_irqrestore(&info->lock, flags); - - mgslpc_change_params(info, tty); - - return 0; -} - -static int get_txidle(MGSLPC_INFO * info, int __user *idle_mode) -{ - int err; - if (debug_level >= DEBUG_LEVEL_INFO) - printk("get_txidle(%s)=%d\n", info->device_name, info->idle_mode); - COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int)); - if (err) - return -EFAULT; - return 0; -} - -static int set_txidle(MGSLPC_INFO * info, int idle_mode) -{ - unsigned long flags; - if (debug_level >= DEBUG_LEVEL_INFO) - printk("set_txidle(%s,%d)\n", info->device_name, idle_mode); - spin_lock_irqsave(&info->lock, flags); - info->idle_mode = idle_mode; - tx_set_idle(info); - spin_unlock_irqrestore(&info->lock, flags); - return 0; -} - -static int get_interface(MGSLPC_INFO * info, int __user *if_mode) -{ - int err; - if (debug_level >= DEBUG_LEVEL_INFO) - printk("get_interface(%s)=%d\n", info->device_name, info->if_mode); - COPY_TO_USER(err,if_mode, &info->if_mode, sizeof(int)); - if (err) - return -EFAULT; - return 0; -} - -static int set_interface(MGSLPC_INFO * info, int if_mode) -{ - unsigned long flags; - unsigned char val; - if (debug_level >= DEBUG_LEVEL_INFO) - printk("set_interface(%s,%d)\n", info->device_name, if_mode); - spin_lock_irqsave(&info->lock, flags); - info->if_mode = if_mode; - - val = read_reg(info, PVR) & 0x0f; - switch (info->if_mode) - { - case MGSL_INTERFACE_RS232: val |= PVR_RS232; break; - case MGSL_INTERFACE_V35: val |= PVR_V35; break; - case MGSL_INTERFACE_RS422: val |= PVR_RS422; break; - } - write_reg(info, PVR, val); - - spin_unlock_irqrestore(&info->lock, flags); - return 0; -} - -static int set_txenable(MGSLPC_INFO * info, int enable, struct tty_struct *tty) -{ - unsigned long flags; - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("set_txenable(%s,%d)\n", info->device_name, enable); - - spin_lock_irqsave(&info->lock, flags); - if (enable) { - if (!info->tx_enabled) - tx_start(info, tty); - } else { - if (info->tx_enabled) - tx_stop(info); - } - spin_unlock_irqrestore(&info->lock, flags); - return 0; -} - -static int tx_abort(MGSLPC_INFO * info) -{ - unsigned long flags; - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("tx_abort(%s)\n", info->device_name); - - spin_lock_irqsave(&info->lock, flags); - if (info->tx_active && info->tx_count && - info->params.mode == MGSL_MODE_HDLC) { - /* clear data count so FIFO is not filled on next IRQ. - * This results in underrun and abort transmission. - */ - info->tx_count = info->tx_put = info->tx_get = 0; - info->tx_aborting = true; - } - spin_unlock_irqrestore(&info->lock, flags); - return 0; -} - -static int set_rxenable(MGSLPC_INFO * info, int enable) -{ - unsigned long flags; - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("set_rxenable(%s,%d)\n", info->device_name, enable); - - spin_lock_irqsave(&info->lock, flags); - if (enable) { - if (!info->rx_enabled) - rx_start(info); - } else { - if (info->rx_enabled) - rx_stop(info); - } - spin_unlock_irqrestore(&info->lock, flags); - return 0; -} - -/* wait for specified event to occur - * - * Arguments: info pointer to device instance data - * mask pointer to bitmask of events to wait for - * Return Value: 0 if successful and bit mask updated with - * of events triggerred, - * otherwise error code - */ -static int wait_events(MGSLPC_INFO * info, int __user *mask_ptr) -{ - unsigned long flags; - int s; - int rc=0; - struct mgsl_icount cprev, cnow; - int events; - int mask; - struct _input_signal_events oldsigs, newsigs; - DECLARE_WAITQUEUE(wait, current); - - COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int)); - if (rc) - return -EFAULT; - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("wait_events(%s,%d)\n", info->device_name, mask); - - spin_lock_irqsave(&info->lock, flags); - - /* return immediately if state matches requested events */ - get_signals(info); - s = info->serial_signals; - events = mask & - ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) + - ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) + - ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) + - ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) ); - if (events) { - spin_unlock_irqrestore(&info->lock, flags); - goto exit; - } - - /* save current irq counts */ - cprev = info->icount; - oldsigs = info->input_signal_events; - - if ((info->params.mode == MGSL_MODE_HDLC) && - (mask & MgslEvent_ExitHuntMode)) - irq_enable(info, CHA, IRQ_EXITHUNT); - - set_current_state(TASK_INTERRUPTIBLE); - add_wait_queue(&info->event_wait_q, &wait); - - spin_unlock_irqrestore(&info->lock, flags); - - - for(;;) { - schedule(); - if (signal_pending(current)) { - rc = -ERESTARTSYS; - break; - } - - /* get current irq counts */ - spin_lock_irqsave(&info->lock, flags); - cnow = info->icount; - newsigs = info->input_signal_events; - set_current_state(TASK_INTERRUPTIBLE); - spin_unlock_irqrestore(&info->lock, flags); - - /* if no change, wait aborted for some reason */ - if (newsigs.dsr_up == oldsigs.dsr_up && - newsigs.dsr_down == oldsigs.dsr_down && - newsigs.dcd_up == oldsigs.dcd_up && - newsigs.dcd_down == oldsigs.dcd_down && - newsigs.cts_up == oldsigs.cts_up && - newsigs.cts_down == oldsigs.cts_down && - newsigs.ri_up == oldsigs.ri_up && - newsigs.ri_down == oldsigs.ri_down && - cnow.exithunt == cprev.exithunt && - cnow.rxidle == cprev.rxidle) { - rc = -EIO; - break; - } - - events = mask & - ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) + - (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) + - (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) + - (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) + - (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) + - (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) + - (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) + - (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) + - (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) + - (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) ); - if (events) - break; - - cprev = cnow; - oldsigs = newsigs; - } - - remove_wait_queue(&info->event_wait_q, &wait); - set_current_state(TASK_RUNNING); - - if (mask & MgslEvent_ExitHuntMode) { - spin_lock_irqsave(&info->lock, flags); - if (!waitqueue_active(&info->event_wait_q)) - irq_disable(info, CHA, IRQ_EXITHUNT); - spin_unlock_irqrestore(&info->lock, flags); - } -exit: - if (rc == 0) - PUT_USER(rc, events, mask_ptr); - return rc; -} - -static int modem_input_wait(MGSLPC_INFO *info,int arg) -{ - unsigned long flags; - int rc; - struct mgsl_icount cprev, cnow; - DECLARE_WAITQUEUE(wait, current); - - /* save current irq counts */ - spin_lock_irqsave(&info->lock, flags); - cprev = info->icount; - add_wait_queue(&info->status_event_wait_q, &wait); - set_current_state(TASK_INTERRUPTIBLE); - spin_unlock_irqrestore(&info->lock, flags); - - for(;;) { - schedule(); - if (signal_pending(current)) { - rc = -ERESTARTSYS; - break; - } - - /* get new irq counts */ - spin_lock_irqsave(&info->lock, flags); - cnow = info->icount; - set_current_state(TASK_INTERRUPTIBLE); - spin_unlock_irqrestore(&info->lock, flags); - - /* if no change, wait aborted for some reason */ - if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && - cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) { - rc = -EIO; - break; - } - - /* check for change in caller specified modem input */ - if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) || - (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) || - (arg & TIOCM_CD && cnow.dcd != cprev.dcd) || - (arg & TIOCM_CTS && cnow.cts != cprev.cts)) { - rc = 0; - break; - } - - cprev = cnow; - } - remove_wait_queue(&info->status_event_wait_q, &wait); - set_current_state(TASK_RUNNING); - return rc; -} - -/* return the state of the serial control and status signals - */ -static int tiocmget(struct tty_struct *tty) -{ - MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; - unsigned int result; - unsigned long flags; - - spin_lock_irqsave(&info->lock, flags); - get_signals(info); - spin_unlock_irqrestore(&info->lock, flags); - - result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) + - ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) + - ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) + - ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) + - ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) + - ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0); - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("%s(%d):%s tiocmget() value=%08X\n", - __FILE__, __LINE__, info->device_name, result); - return result; -} - -/* set modem control signals (DTR/RTS) - */ -static int tiocmset(struct tty_struct *tty, - unsigned int set, unsigned int clear) -{ - MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; - unsigned long flags; - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("%s(%d):%s tiocmset(%x,%x)\n", - __FILE__, __LINE__, info->device_name, set, clear); - - if (set & TIOCM_RTS) - info->serial_signals |= SerialSignal_RTS; - if (set & TIOCM_DTR) - info->serial_signals |= SerialSignal_DTR; - if (clear & TIOCM_RTS) - info->serial_signals &= ~SerialSignal_RTS; - if (clear & TIOCM_DTR) - info->serial_signals &= ~SerialSignal_DTR; - - spin_lock_irqsave(&info->lock, flags); - set_signals(info); - spin_unlock_irqrestore(&info->lock, flags); - - return 0; -} - -/* Set or clear transmit break condition - * - * Arguments: tty pointer to tty instance data - * break_state -1=set break condition, 0=clear - */ -static int mgslpc_break(struct tty_struct *tty, int break_state) -{ - MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data; - unsigned long flags; - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("%s(%d):mgslpc_break(%s,%d)\n", - __FILE__, __LINE__, info->device_name, break_state); - - if (mgslpc_paranoia_check(info, tty->name, "mgslpc_break")) - return -EINVAL; - - spin_lock_irqsave(&info->lock, flags); - if (break_state == -1) - set_reg_bits(info, CHA+DAFO, BIT6); - else - clear_reg_bits(info, CHA+DAFO, BIT6); - spin_unlock_irqrestore(&info->lock, flags); - return 0; -} - -static int mgslpc_get_icount(struct tty_struct *tty, - struct serial_icounter_struct *icount) -{ - MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data; - struct mgsl_icount cnow; /* kernel counter temps */ - unsigned long flags; - - spin_lock_irqsave(&info->lock, flags); - cnow = info->icount; - spin_unlock_irqrestore(&info->lock, flags); - - icount->cts = cnow.cts; - icount->dsr = cnow.dsr; - icount->rng = cnow.rng; - icount->dcd = cnow.dcd; - icount->rx = cnow.rx; - icount->tx = cnow.tx; - icount->frame = cnow.frame; - icount->overrun = cnow.overrun; - icount->parity = cnow.parity; - icount->brk = cnow.brk; - icount->buf_overrun = cnow.buf_overrun; - - return 0; -} - -/* Service an IOCTL request - * - * Arguments: - * - * tty pointer to tty instance data - * cmd IOCTL command code - * arg command argument/context - * - * Return Value: 0 if success, otherwise error code - */ -static int mgslpc_ioctl(struct tty_struct *tty, - unsigned int cmd, unsigned long arg) -{ - MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data; - void __user *argp = (void __user *)arg; - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("%s(%d):mgslpc_ioctl %s cmd=%08X\n", __FILE__, __LINE__, - info->device_name, cmd); - - if (mgslpc_paranoia_check(info, tty->name, "mgslpc_ioctl")) - return -ENODEV; - - if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && - (cmd != TIOCMIWAIT)) { - if (tty_io_error(tty)) - return -EIO; - } - - switch (cmd) { - case MGSL_IOCGPARAMS: - return get_params(info, argp); - case MGSL_IOCSPARAMS: - return set_params(info, argp, tty); - case MGSL_IOCGTXIDLE: - return get_txidle(info, argp); - case MGSL_IOCSTXIDLE: - return set_txidle(info, (int)arg); - case MGSL_IOCGIF: - return get_interface(info, argp); - case MGSL_IOCSIF: - return set_interface(info,(int)arg); - case MGSL_IOCTXENABLE: - return set_txenable(info,(int)arg, tty); - case MGSL_IOCRXENABLE: - return set_rxenable(info,(int)arg); - case MGSL_IOCTXABORT: - return tx_abort(info); - case MGSL_IOCGSTATS: - return get_stats(info, argp); - case MGSL_IOCWAITEVENT: - return wait_events(info, argp); - case TIOCMIWAIT: - return modem_input_wait(info,(int)arg); - default: - return -ENOIOCTLCMD; - } - return 0; -} - -/* Set new termios settings - * - * Arguments: - * - * tty pointer to tty structure - * termios pointer to buffer to hold returned old termios - */ -static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_termios) -{ - MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; - unsigned long flags; - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("%s(%d):mgslpc_set_termios %s\n", __FILE__, __LINE__, - tty->driver->name); - - /* just return if nothing has changed */ - if ((tty->termios.c_cflag == old_termios->c_cflag) - && (RELEVANT_IFLAG(tty->termios.c_iflag) - == RELEVANT_IFLAG(old_termios->c_iflag))) - return; - - mgslpc_change_params(info, tty); - - /* Handle transition to B0 status */ - if ((old_termios->c_cflag & CBAUD) && !C_BAUD(tty)) { - info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR); - spin_lock_irqsave(&info->lock, flags); - set_signals(info); - spin_unlock_irqrestore(&info->lock, flags); - } - - /* Handle transition away from B0 status */ - if (!(old_termios->c_cflag & CBAUD) && C_BAUD(tty)) { - info->serial_signals |= SerialSignal_DTR; - if (!C_CRTSCTS(tty) || !tty_throttled(tty)) - info->serial_signals |= SerialSignal_RTS; - spin_lock_irqsave(&info->lock, flags); - set_signals(info); - spin_unlock_irqrestore(&info->lock, flags); - } - - /* Handle turning off CRTSCTS */ - if (old_termios->c_cflag & CRTSCTS && !C_CRTSCTS(tty)) { - tty->hw_stopped = 0; - tx_release(tty); - } -} - -static void mgslpc_close(struct tty_struct *tty, struct file * filp) -{ - MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data; - struct tty_port *port = &info->port; - - if (mgslpc_paranoia_check(info, tty->name, "mgslpc_close")) - return; - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("%s(%d):mgslpc_close(%s) entry, count=%d\n", - __FILE__, __LINE__, info->device_name, port->count); - - if (tty_port_close_start(port, tty, filp) == 0) - goto cleanup; - - if (tty_port_initialized(port)) - mgslpc_wait_until_sent(tty, info->timeout); - - mgslpc_flush_buffer(tty); - - tty_ldisc_flush(tty); - shutdown(info, tty); - - tty_port_close_end(port, tty); - tty_port_tty_set(port, NULL); -cleanup: - if (debug_level >= DEBUG_LEVEL_INFO) - printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__, - tty->driver->name, port->count); -} - -/* Wait until the transmitter is empty. - */ -static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout) -{ - MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data; - unsigned long orig_jiffies, char_time; - - if (!info) - return; - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("%s(%d):mgslpc_wait_until_sent(%s) entry\n", - __FILE__, __LINE__, info->device_name); - - if (mgslpc_paranoia_check(info, tty->name, "mgslpc_wait_until_sent")) - return; - - if (!tty_port_initialized(&info->port)) - goto exit; - - orig_jiffies = jiffies; - - /* Set check interval to 1/5 of estimated time to - * send a character, and make it at least 1. The check - * interval should also be less than the timeout. - * Note: use tight timings here to satisfy the NIST-PCTS. - */ - - if (info->params.data_rate) { - char_time = info->timeout/(32 * 5); - if (!char_time) - char_time++; - } else - char_time = 1; - - if (timeout) - char_time = min_t(unsigned long, char_time, timeout); - - if (info->params.mode == MGSL_MODE_HDLC) { - while (info->tx_active) { - msleep_interruptible(jiffies_to_msecs(char_time)); - if (signal_pending(current)) - break; - if (timeout && time_after(jiffies, orig_jiffies + timeout)) - break; - } - } else { - while ((info->tx_count || info->tx_active) && - info->tx_enabled) { - msleep_interruptible(jiffies_to_msecs(char_time)); - if (signal_pending(current)) - break; - if (timeout && time_after(jiffies, orig_jiffies + timeout)) - break; - } - } - -exit: - if (debug_level >= DEBUG_LEVEL_INFO) - printk("%s(%d):mgslpc_wait_until_sent(%s) exit\n", - __FILE__, __LINE__, info->device_name); -} - -/* Called by tty_hangup() when a hangup is signaled. - * This is the same as closing all open files for the port. - */ -static void mgslpc_hangup(struct tty_struct *tty) -{ - MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data; - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("%s(%d):mgslpc_hangup(%s)\n", - __FILE__, __LINE__, info->device_name); - - if (mgslpc_paranoia_check(info, tty->name, "mgslpc_hangup")) - return; - - mgslpc_flush_buffer(tty); - shutdown(info, tty); - tty_port_hangup(&info->port); -} - -static int carrier_raised(struct tty_port *port) -{ - MGSLPC_INFO *info = container_of(port, MGSLPC_INFO, port); - unsigned long flags; - - spin_lock_irqsave(&info->lock, flags); - get_signals(info); - spin_unlock_irqrestore(&info->lock, flags); - - if (info->serial_signals & SerialSignal_DCD) - return 1; - return 0; -} - -static void dtr_rts(struct tty_port *port, int onoff) -{ - MGSLPC_INFO *info = container_of(port, MGSLPC_INFO, port); - unsigned long flags; - - spin_lock_irqsave(&info->lock, flags); - if (onoff) - info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR; - else - info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR); - set_signals(info); - spin_unlock_irqrestore(&info->lock, flags); -} - - -static int mgslpc_open(struct tty_struct *tty, struct file * filp) -{ - MGSLPC_INFO *info; - struct tty_port *port; - int retval, line; - unsigned long flags; - - /* verify range of specified line number */ - line = tty->index; - if (line >= mgslpc_device_count) { - printk("%s(%d):mgslpc_open with invalid line #%d.\n", - __FILE__, __LINE__, line); - return -ENODEV; - } - - /* find the info structure for the specified line */ - info = mgslpc_device_list; - while(info && info->line != line) - info = info->next_device; - if (mgslpc_paranoia_check(info, tty->name, "mgslpc_open")) - return -ENODEV; - - port = &info->port; - tty->driver_data = info; - tty_port_tty_set(port, tty); - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("%s(%d):mgslpc_open(%s), old ref count = %d\n", - __FILE__, __LINE__, tty->driver->name, port->count); - - port->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0; - - spin_lock_irqsave(&info->netlock, flags); - if (info->netcount) { - retval = -EBUSY; - spin_unlock_irqrestore(&info->netlock, flags); - goto cleanup; - } - spin_lock(&port->lock); - port->count++; - spin_unlock(&port->lock); - spin_unlock_irqrestore(&info->netlock, flags); - - if (port->count == 1) { - /* 1st open on this device, init hardware */ - retval = startup(info, tty); - if (retval < 0) - goto cleanup; - } - - retval = tty_port_block_til_ready(&info->port, tty, filp); - if (retval) { - if (debug_level >= DEBUG_LEVEL_INFO) - printk("%s(%d):block_til_ready(%s) returned %d\n", - __FILE__, __LINE__, info->device_name, retval); - goto cleanup; - } - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("%s(%d):mgslpc_open(%s) success\n", - __FILE__, __LINE__, info->device_name); - retval = 0; - -cleanup: - return retval; -} - -/* - * /proc fs routines.... - */ - -static inline void line_info(struct seq_file *m, MGSLPC_INFO *info) -{ - char stat_buf[30]; - unsigned long flags; - - seq_printf(m, "%s:io:%04X irq:%d", - info->device_name, info->io_base, info->irq_level); - - /* output current serial signal states */ - spin_lock_irqsave(&info->lock, flags); - get_signals(info); - spin_unlock_irqrestore(&info->lock, flags); - - stat_buf[0] = 0; - stat_buf[1] = 0; - if (info->serial_signals & SerialSignal_RTS) - strcat(stat_buf, "|RTS"); - if (info->serial_signals & SerialSignal_CTS) - strcat(stat_buf, "|CTS"); - if (info->serial_signals & SerialSignal_DTR) - strcat(stat_buf, "|DTR"); - if (info->serial_signals & SerialSignal_DSR) - strcat(stat_buf, "|DSR"); - if (info->serial_signals & SerialSignal_DCD) - strcat(stat_buf, "|CD"); - if (info->serial_signals & SerialSignal_RI) - strcat(stat_buf, "|RI"); - - if (info->params.mode == MGSL_MODE_HDLC) { - seq_printf(m, " HDLC txok:%d rxok:%d", - info->icount.txok, info->icount.rxok); - if (info->icount.txunder) - seq_printf(m, " txunder:%d", info->icount.txunder); - if (info->icount.txabort) - seq_printf(m, " txabort:%d", info->icount.txabort); - if (info->icount.rxshort) - seq_printf(m, " rxshort:%d", info->icount.rxshort); - if (info->icount.rxlong) - seq_printf(m, " rxlong:%d", info->icount.rxlong); - if (info->icount.rxover) - seq_printf(m, " rxover:%d", info->icount.rxover); - if (info->icount.rxcrc) - seq_printf(m, " rxcrc:%d", info->icount.rxcrc); - } else { - seq_printf(m, " ASYNC tx:%d rx:%d", - info->icount.tx, info->icount.rx); - if (info->icount.frame) - seq_printf(m, " fe:%d", info->icount.frame); - if (info->icount.parity) - seq_printf(m, " pe:%d", info->icount.parity); - if (info->icount.brk) - seq_printf(m, " brk:%d", info->icount.brk); - if (info->icount.overrun) - seq_printf(m, " oe:%d", info->icount.overrun); - } - - /* Append serial signal status to end */ - seq_printf(m, " %s\n", stat_buf+1); - - seq_printf(m, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n", - info->tx_active,info->bh_requested,info->bh_running, - info->pending_bh); -} - -/* Called to print information about devices - */ -static int mgslpc_proc_show(struct seq_file *m, void *v) -{ - MGSLPC_INFO *info; - - seq_printf(m, "synclink driver:%s\n", driver_version); - - info = mgslpc_device_list; - while (info) { - line_info(m, info); - info = info->next_device; - } - return 0; -} - -static int rx_alloc_buffers(MGSLPC_INFO *info) -{ - /* each buffer has header and data */ - info->rx_buf_size = sizeof(RXBUF) + info->max_frame_size; - - /* calculate total allocation size for 8 buffers */ - info->rx_buf_total_size = info->rx_buf_size * 8; - - /* limit total allocated memory */ - if (info->rx_buf_total_size > 0x10000) - info->rx_buf_total_size = 0x10000; - - /* calculate number of buffers */ - info->rx_buf_count = info->rx_buf_total_size / info->rx_buf_size; - - info->rx_buf = kmalloc(info->rx_buf_total_size, GFP_KERNEL); - if (info->rx_buf == NULL) - return -ENOMEM; - - /* unused flag buffer to satisfy receive_buf calling interface */ - info->flag_buf = kzalloc(info->max_frame_size, GFP_KERNEL); - if (!info->flag_buf) { - kfree(info->rx_buf); - info->rx_buf = NULL; - return -ENOMEM; - } - - rx_reset_buffers(info); - return 0; -} - -static void rx_free_buffers(MGSLPC_INFO *info) -{ - kfree(info->rx_buf); - info->rx_buf = NULL; - kfree(info->flag_buf); - info->flag_buf = NULL; -} - -static int claim_resources(MGSLPC_INFO *info) -{ - if (rx_alloc_buffers(info) < 0) { - printk("Can't allocate rx buffer %s\n", info->device_name); - release_resources(info); - return -ENODEV; - } - return 0; -} - -static void release_resources(MGSLPC_INFO *info) -{ - if (debug_level >= DEBUG_LEVEL_INFO) - printk("release_resources(%s)\n", info->device_name); - rx_free_buffers(info); -} - -/* Add the specified device instance data structure to the - * global linked list of devices and increment the device count. - * - * Arguments: info pointer to device instance data - */ -static int mgslpc_add_device(MGSLPC_INFO *info) -{ - MGSLPC_INFO *current_dev = NULL; - struct device *tty_dev; - int ret; - - info->next_device = NULL; - info->line = mgslpc_device_count; - sprintf(info->device_name,"ttySLP%d",info->line); - - if (info->line < MAX_DEVICE_COUNT) { - if (maxframe[info->line]) - info->max_frame_size = maxframe[info->line]; - } - - mgslpc_device_count++; - - if (!mgslpc_device_list) - mgslpc_device_list = info; - else { - current_dev = mgslpc_device_list; - while (current_dev->next_device) - current_dev = current_dev->next_device; - current_dev->next_device = info; - } - - if (info->max_frame_size < 4096) - info->max_frame_size = 4096; - else if (info->max_frame_size > 65535) - info->max_frame_size = 65535; - - printk("SyncLink PC Card %s:IO=%04X IRQ=%d\n", - info->device_name, info->io_base, info->irq_level); - -#if SYNCLINK_GENERIC_HDLC - ret = hdlcdev_init(info); - if (ret != 0) - goto failed; -#endif - - tty_dev = tty_port_register_device(&info->port, serial_driver, info->line, - &info->p_dev->dev); - if (IS_ERR(tty_dev)) { - ret = PTR_ERR(tty_dev); -#if SYNCLINK_GENERIC_HDLC - hdlcdev_exit(info); -#endif - goto failed; - } - - return 0; - -failed: - if (current_dev) - current_dev->next_device = NULL; - else - mgslpc_device_list = NULL; - mgslpc_device_count--; - return ret; -} - -static void mgslpc_remove_device(MGSLPC_INFO *remove_info) -{ - MGSLPC_INFO *info = mgslpc_device_list; - MGSLPC_INFO *last = NULL; - - while(info) { - if (info == remove_info) { - if (last) - last->next_device = info->next_device; - else - mgslpc_device_list = info->next_device; - tty_unregister_device(serial_driver, info->line); -#if SYNCLINK_GENERIC_HDLC - hdlcdev_exit(info); -#endif - release_resources(info); - tty_port_destroy(&info->port); - kfree(info); - mgslpc_device_count--; - return; - } - last = info; - info = info->next_device; - } -} - -static const struct pcmcia_device_id mgslpc_ids[] = { - PCMCIA_DEVICE_MANF_CARD(0x02c5, 0x0050), - PCMCIA_DEVICE_NULL -}; -MODULE_DEVICE_TABLE(pcmcia, mgslpc_ids); - -static struct pcmcia_driver mgslpc_driver = { - .owner = THIS_MODULE, - .name = "synclink_cs", - .probe = mgslpc_probe, - .remove = mgslpc_detach, - .id_table = mgslpc_ids, - .suspend = mgslpc_suspend, - .resume = mgslpc_resume, -}; - -static const struct tty_operations mgslpc_ops = { - .open = mgslpc_open, - .close = mgslpc_close, - .write = mgslpc_write, - .put_char = mgslpc_put_char, - .flush_chars = mgslpc_flush_chars, - .write_room = mgslpc_write_room, - .chars_in_buffer = mgslpc_chars_in_buffer, - .flush_buffer = mgslpc_flush_buffer, - .ioctl = mgslpc_ioctl, - .throttle = mgslpc_throttle, - .unthrottle = mgslpc_unthrottle, - .send_xchar = mgslpc_send_xchar, - .break_ctl = mgslpc_break, - .wait_until_sent = mgslpc_wait_until_sent, - .set_termios = mgslpc_set_termios, - .stop = tx_pause, - .start = tx_release, - .hangup = mgslpc_hangup, - .tiocmget = tiocmget, - .tiocmset = tiocmset, - .get_icount = mgslpc_get_icount, - .proc_show = mgslpc_proc_show, -}; - -static int __init synclink_cs_init(void) -{ - int rc; - - if (break_on_load) { - mgslpc_get_text_ptr(); - BREAKPOINT(); - } - - serial_driver = tty_alloc_driver(MAX_DEVICE_COUNT, - TTY_DRIVER_REAL_RAW | - TTY_DRIVER_DYNAMIC_DEV); - if (IS_ERR(serial_driver)) { - rc = PTR_ERR(serial_driver); - goto err; - } - - /* Initialize the tty_driver structure */ - serial_driver->driver_name = "synclink_cs"; - serial_driver->name = "ttySLP"; - serial_driver->major = ttymajor; - serial_driver->minor_start = 64; - serial_driver->type = TTY_DRIVER_TYPE_SERIAL; - serial_driver->subtype = SERIAL_TYPE_NORMAL; - serial_driver->init_termios = tty_std_termios; - serial_driver->init_termios.c_cflag = - B9600 | CS8 | CREAD | HUPCL | CLOCAL; - tty_set_operations(serial_driver, &mgslpc_ops); - - rc = tty_register_driver(serial_driver); - if (rc < 0) { - printk(KERN_ERR "%s(%d):Couldn't register serial driver\n", - __FILE__, __LINE__); - goto err_put_tty; - } - - rc = pcmcia_register_driver(&mgslpc_driver); - if (rc < 0) - goto err_unreg_tty; - - printk(KERN_INFO "%s %s, tty major#%d\n", driver_name, driver_version, - serial_driver->major); - - return 0; -err_unreg_tty: - tty_unregister_driver(serial_driver); -err_put_tty: - put_tty_driver(serial_driver); -err: - return rc; -} - -static void __exit synclink_cs_exit(void) -{ - pcmcia_unregister_driver(&mgslpc_driver); - tty_unregister_driver(serial_driver); - put_tty_driver(serial_driver); -} - -module_init(synclink_cs_init); -module_exit(synclink_cs_exit); - -static void mgslpc_set_rate(MGSLPC_INFO *info, unsigned char channel, unsigned int rate) -{ - unsigned int M, N; - unsigned char val; - - /* note:standard BRG mode is broken in V3.2 chip - * so enhanced mode is always used - */ - - if (rate) { - N = 3686400 / rate; - if (!N) - N = 1; - N >>= 1; - for (M = 1; N > 64 && M < 16; M++) - N >>= 1; - N--; - - /* BGR[5..0] = N - * BGR[9..6] = M - * BGR[7..0] contained in BGR register - * BGR[9..8] contained in CCR2[7..6] - * divisor = (N+1)*2^M - * - * Note: M *must* not be zero (causes asymetric duty cycle) - */ - write_reg(info, (unsigned char) (channel + BGR), - (unsigned char) ((M << 6) + N)); - val = read_reg(info, (unsigned char) (channel + CCR2)) & 0x3f; - val |= ((M << 4) & 0xc0); - write_reg(info, (unsigned char) (channel + CCR2), val); - } -} - -/* Enabled the AUX clock output at the specified frequency. - */ -static void enable_auxclk(MGSLPC_INFO *info) -{ - unsigned char val; - - /* MODE - * - * 07..06 MDS[1..0] 10 = transparent HDLC mode - * 05 ADM Address Mode, 0 = no addr recognition - * 04 TMD Timer Mode, 0 = external - * 03 RAC Receiver Active, 0 = inactive - * 02 RTS 0=RTS active during xmit, 1=RTS always active - * 01 TRS Timer Resolution, 1=512 - * 00 TLP Test Loop, 0 = no loop - * - * 1000 0010 - */ - val = 0x82; - - /* channel B RTS is used to enable AUXCLK driver on SP505 */ - if (info->params.mode == MGSL_MODE_HDLC && info->params.clock_speed) - val |= BIT2; - write_reg(info, CHB + MODE, val); - - /* CCR0 - * - * 07 PU Power Up, 1=active, 0=power down - * 06 MCE Master Clock Enable, 1=enabled - * 05 Reserved, 0 - * 04..02 SC[2..0] Encoding - * 01..00 SM[1..0] Serial Mode, 00=HDLC - * - * 11000000 - */ - write_reg(info, CHB + CCR0, 0xc0); - - /* CCR1 - * - * 07 SFLG Shared Flag, 0 = disable shared flags - * 06 GALP Go Active On Loop, 0 = not used - * 05 GLP Go On Loop, 0 = not used - * 04 ODS Output Driver Select, 1=TxD is push-pull output - * 03 ITF Interframe Time Fill, 0=mark, 1=flag - * 02..00 CM[2..0] Clock Mode - * - * 0001 0111 - */ - write_reg(info, CHB + CCR1, 0x17); - - /* CCR2 (Channel B) - * - * 07..06 BGR[9..8] Baud rate bits 9..8 - * 05 BDF Baud rate divisor factor, 0=1, 1=BGR value - * 04 SSEL Clock source select, 1=submode b - * 03 TOE 0=TxCLK is input, 1=TxCLK is output - * 02 RWX Read/Write Exchange 0=disabled - * 01 C32, CRC select, 0=CRC-16, 1=CRC-32 - * 00 DIV, data inversion 0=disabled, 1=enabled - * - * 0011 1000 - */ - if (info->params.mode == MGSL_MODE_HDLC && info->params.clock_speed) - write_reg(info, CHB + CCR2, 0x38); - else - write_reg(info, CHB + CCR2, 0x30); - - /* CCR4 - * - * 07 MCK4 Master Clock Divide by 4, 1=enabled - * 06 EBRG Enhanced Baud Rate Generator Mode, 1=enabled - * 05 TST1 Test Pin, 0=normal operation - * 04 ICD Ivert Carrier Detect, 1=enabled (active low) - * 03..02 Reserved, must be 0 - * 01..00 RFT[1..0] RxFIFO Threshold 00=32 bytes - * - * 0101 0000 - */ - write_reg(info, CHB + CCR4, 0x50); - - /* if auxclk not enabled, set internal BRG so - * CTS transitions can be detected (requires TxC) - */ - if (info->params.mode == MGSL_MODE_HDLC && info->params.clock_speed) - mgslpc_set_rate(info, CHB, info->params.clock_speed); - else - mgslpc_set_rate(info, CHB, 921600); -} - -static void loopback_enable(MGSLPC_INFO *info) -{ - unsigned char val; - - /* CCR1:02..00 CM[2..0] Clock Mode = 111 (clock mode 7) */ - val = read_reg(info, CHA + CCR1) | (BIT2 | BIT1 | BIT0); - write_reg(info, CHA + CCR1, val); - - /* CCR2:04 SSEL Clock source select, 1=submode b */ - val = read_reg(info, CHA + CCR2) | (BIT4 | BIT5); - write_reg(info, CHA + CCR2, val); - - /* set LinkSpeed if available, otherwise default to 2Mbps */ - if (info->params.clock_speed) - mgslpc_set_rate(info, CHA, info->params.clock_speed); - else - mgslpc_set_rate(info, CHA, 1843200); - - /* MODE:00 TLP Test Loop, 1=loopback enabled */ - val = read_reg(info, CHA + MODE) | BIT0; - write_reg(info, CHA + MODE, val); -} - -static void hdlc_mode(MGSLPC_INFO *info) -{ - unsigned char val; - unsigned char clkmode, clksubmode; - - /* disable all interrupts */ - irq_disable(info, CHA, 0xffff); - irq_disable(info, CHB, 0xffff); - port_irq_disable(info, 0xff); - - /* assume clock mode 0a, rcv=RxC xmt=TxC */ - clkmode = clksubmode = 0; - if (info->params.flags & HDLC_FLAG_RXC_DPLL - && info->params.flags & HDLC_FLAG_TXC_DPLL) { - /* clock mode 7a, rcv = DPLL, xmt = DPLL */ - clkmode = 7; - } else if (info->params.flags & HDLC_FLAG_RXC_BRG - && info->params.flags & HDLC_FLAG_TXC_BRG) { - /* clock mode 7b, rcv = BRG, xmt = BRG */ - clkmode = 7; - clksubmode = 1; - } else if (info->params.flags & HDLC_FLAG_RXC_DPLL) { - if (info->params.flags & HDLC_FLAG_TXC_BRG) { - /* clock mode 6b, rcv = DPLL, xmt = BRG/16 */ - clkmode = 6; - clksubmode = 1; - } else { - /* clock mode 6a, rcv = DPLL, xmt = TxC */ - clkmode = 6; - } - } else if (info->params.flags & HDLC_FLAG_TXC_BRG) { - /* clock mode 0b, rcv = RxC, xmt = BRG */ - clksubmode = 1; - } - - /* MODE - * - * 07..06 MDS[1..0] 10 = transparent HDLC mode - * 05 ADM Address Mode, 0 = no addr recognition - * 04 TMD Timer Mode, 0 = external - * 03 RAC Receiver Active, 0 = inactive - * 02 RTS 0=RTS active during xmit, 1=RTS always active - * 01 TRS Timer Resolution, 1=512 - * 00 TLP Test Loop, 0 = no loop - * - * 1000 0010 - */ - val = 0x82; - if (info->params.loopback) - val |= BIT0; - - /* preserve RTS state */ - if (info->serial_signals & SerialSignal_RTS) - val |= BIT2; - write_reg(info, CHA + MODE, val); - - /* CCR0 - * - * 07 PU Power Up, 1=active, 0=power down - * 06 MCE Master Clock Enable, 1=enabled - * 05 Reserved, 0 - * 04..02 SC[2..0] Encoding - * 01..00 SM[1..0] Serial Mode, 00=HDLC - * - * 11000000 - */ - val = 0xc0; - switch (info->params.encoding) - { - case HDLC_ENCODING_NRZI: - val |= BIT3; - break; - case HDLC_ENCODING_BIPHASE_SPACE: - val |= BIT4; - break; // FM0 - case HDLC_ENCODING_BIPHASE_MARK: - val |= BIT4 | BIT2; - break; // FM1 - case HDLC_ENCODING_BIPHASE_LEVEL: - val |= BIT4 | BIT3; - break; // Manchester - } - write_reg(info, CHA + CCR0, val); - - /* CCR1 - * - * 07 SFLG Shared Flag, 0 = disable shared flags - * 06 GALP Go Active On Loop, 0 = not used - * 05 GLP Go On Loop, 0 = not used - * 04 ODS Output Driver Select, 1=TxD is push-pull output - * 03 ITF Interframe Time Fill, 0=mark, 1=flag - * 02..00 CM[2..0] Clock Mode - * - * 0001 0000 - */ - val = 0x10 + clkmode; - write_reg(info, CHA + CCR1, val); - - /* CCR2 - * - * 07..06 BGR[9..8] Baud rate bits 9..8 - * 05 BDF Baud rate divisor factor, 0=1, 1=BGR value - * 04 SSEL Clock source select, 1=submode b - * 03 TOE 0=TxCLK is input, 0=TxCLK is input - * 02 RWX Read/Write Exchange 0=disabled - * 01 C32, CRC select, 0=CRC-16, 1=CRC-32 - * 00 DIV, data inversion 0=disabled, 1=enabled - * - * 0000 0000 - */ - val = 0x00; - if (clkmode == 2 || clkmode == 3 || clkmode == 6 - || clkmode == 7 || (clkmode == 0 && clksubmode == 1)) - val |= BIT5; - if (clksubmode) - val |= BIT4; - if (info->params.crc_type == HDLC_CRC_32_CCITT) - val |= BIT1; - if (info->params.encoding == HDLC_ENCODING_NRZB) - val |= BIT0; - write_reg(info, CHA + CCR2, val); - - /* CCR3 - * - * 07..06 PRE[1..0] Preamble count 00=1, 01=2, 10=4, 11=8 - * 05 EPT Enable preamble transmission, 1=enabled - * 04 RADD Receive address pushed to FIFO, 0=disabled - * 03 CRL CRC Reset Level, 0=FFFF - * 02 RCRC Rx CRC 0=On 1=Off - * 01 TCRC Tx CRC 0=On 1=Off - * 00 PSD DPLL Phase Shift Disable - * - * 0000 0000 - */ - val = 0x00; - if (info->params.crc_type == HDLC_CRC_NONE) - val |= BIT2 | BIT1; - if (info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE) - val |= BIT5; - switch (info->params.preamble_length) - { - case HDLC_PREAMBLE_LENGTH_16BITS: - val |= BIT6; - break; - case HDLC_PREAMBLE_LENGTH_32BITS: - val |= BIT6; - break; - case HDLC_PREAMBLE_LENGTH_64BITS: - val |= BIT7 | BIT6; - break; - } - write_reg(info, CHA + CCR3, val); - - /* PRE - Preamble pattern */ - val = 0; - switch (info->params.preamble) - { - case HDLC_PREAMBLE_PATTERN_FLAGS: val = 0x7e; break; - case HDLC_PREAMBLE_PATTERN_10: val = 0xaa; break; - case HDLC_PREAMBLE_PATTERN_01: val = 0x55; break; - case HDLC_PREAMBLE_PATTERN_ONES: val = 0xff; break; - } - write_reg(info, CHA + PRE, val); - - /* CCR4 - * - * 07 MCK4 Master Clock Divide by 4, 1=enabled - * 06 EBRG Enhanced Baud Rate Generator Mode, 1=enabled - * 05 TST1 Test Pin, 0=normal operation - * 04 ICD Ivert Carrier Detect, 1=enabled (active low) - * 03..02 Reserved, must be 0 - * 01..00 RFT[1..0] RxFIFO Threshold 00=32 bytes - * - * 0101 0000 - */ - val = 0x50; - write_reg(info, CHA + CCR4, val); - if (info->params.flags & HDLC_FLAG_RXC_DPLL) - mgslpc_set_rate(info, CHA, info->params.clock_speed * 16); - else - mgslpc_set_rate(info, CHA, info->params.clock_speed); - - /* RLCR Receive length check register - * - * 7 1=enable receive length check - * 6..0 Max frame length = (RL + 1) * 32 - */ - write_reg(info, CHA + RLCR, 0); - - /* XBCH Transmit Byte Count High - * - * 07 DMA mode, 0 = interrupt driven - * 06 NRM, 0=ABM (ignored) - * 05 CAS Carrier Auto Start - * 04 XC Transmit Continuously (ignored) - * 03..00 XBC[10..8] Transmit byte count bits 10..8 - * - * 0000 0000 - */ - val = 0x00; - if (info->params.flags & HDLC_FLAG_AUTO_DCD) - val |= BIT5; - write_reg(info, CHA + XBCH, val); - enable_auxclk(info); - if (info->params.loopback || info->testing_irq) - loopback_enable(info); - if (info->params.flags & HDLC_FLAG_AUTO_CTS) - { - irq_enable(info, CHB, IRQ_CTS); - /* PVR[3] 1=AUTO CTS active */ - set_reg_bits(info, CHA + PVR, BIT3); - } else - clear_reg_bits(info, CHA + PVR, BIT3); - - irq_enable(info, CHA, - IRQ_RXEOM | IRQ_RXFIFO | IRQ_ALLSENT | - IRQ_UNDERRUN | IRQ_TXFIFO); - issue_command(info, CHA, CMD_TXRESET + CMD_RXRESET); - wait_command_complete(info, CHA); - read_reg16(info, CHA + ISR); /* clear pending IRQs */ - - /* Master clock mode enabled above to allow reset commands - * to complete even if no data clocks are present. - * - * Disable master clock mode for normal communications because - * V3.2 of the ESCC2 has a bug that prevents the transmit all sent - * IRQ when in master clock mode. - * - * Leave master clock mode enabled for IRQ test because the - * timer IRQ used by the test can only happen in master clock mode. - */ - if (!info->testing_irq) - clear_reg_bits(info, CHA + CCR0, BIT6); - - tx_set_idle(info); - - tx_stop(info); - rx_stop(info); -} - -static void rx_stop(MGSLPC_INFO *info) -{ - if (debug_level >= DEBUG_LEVEL_ISR) - printk("%s(%d):rx_stop(%s)\n", - __FILE__, __LINE__, info->device_name); - - /* MODE:03 RAC Receiver Active, 0=inactive */ - clear_reg_bits(info, CHA + MODE, BIT3); - - info->rx_enabled = false; - info->rx_overflow = false; -} - -static void rx_start(MGSLPC_INFO *info) -{ - if (debug_level >= DEBUG_LEVEL_ISR) - printk("%s(%d):rx_start(%s)\n", - __FILE__, __LINE__, info->device_name); - - rx_reset_buffers(info); - info->rx_enabled = false; - info->rx_overflow = false; - - /* MODE:03 RAC Receiver Active, 1=active */ - set_reg_bits(info, CHA + MODE, BIT3); - - info->rx_enabled = true; -} - -static void tx_start(MGSLPC_INFO *info, struct tty_struct *tty) -{ - if (debug_level >= DEBUG_LEVEL_ISR) - printk("%s(%d):tx_start(%s)\n", - __FILE__, __LINE__, info->device_name); - - if (info->tx_count) { - /* If auto RTS enabled and RTS is inactive, then assert */ - /* RTS and set a flag indicating that the driver should */ - /* negate RTS when the transmission completes. */ - info->drop_rts_on_tx_done = false; - - if (info->params.flags & HDLC_FLAG_AUTO_RTS) { - get_signals(info); - if (!(info->serial_signals & SerialSignal_RTS)) { - info->serial_signals |= SerialSignal_RTS; - set_signals(info); - info->drop_rts_on_tx_done = true; - } - } - - if (info->params.mode == MGSL_MODE_ASYNC) { - if (!info->tx_active) { - info->tx_active = true; - tx_ready(info, tty); - } - } else { - info->tx_active = true; - tx_ready(info, tty); - mod_timer(&info->tx_timer, jiffies + - msecs_to_jiffies(5000)); - } - } - - if (!info->tx_enabled) - info->tx_enabled = true; -} - -static void tx_stop(MGSLPC_INFO *info) -{ - if (debug_level >= DEBUG_LEVEL_ISR) - printk("%s(%d):tx_stop(%s)\n", - __FILE__, __LINE__, info->device_name); - - del_timer(&info->tx_timer); - - info->tx_enabled = false; - info->tx_active = false; -} - -/* Reset the adapter to a known state and prepare it for further use. - */ -static void reset_device(MGSLPC_INFO *info) -{ - /* power up both channels (set BIT7) */ - write_reg(info, CHA + CCR0, 0x80); - write_reg(info, CHB + CCR0, 0x80); - write_reg(info, CHA + MODE, 0); - write_reg(info, CHB + MODE, 0); - - /* disable all interrupts */ - irq_disable(info, CHA, 0xffff); - irq_disable(info, CHB, 0xffff); - port_irq_disable(info, 0xff); - - /* PCR Port Configuration Register - * - * 07..04 DEC[3..0] Serial I/F select outputs - * 03 output, 1=AUTO CTS control enabled - * 02 RI Ring Indicator input 0=active - * 01 DSR input 0=active - * 00 DTR output 0=active - * - * 0000 0110 - */ - write_reg(info, PCR, 0x06); - - /* PVR Port Value Register - * - * 07..04 DEC[3..0] Serial I/F select (0000=disabled) - * 03 AUTO CTS output 1=enabled - * 02 RI Ring Indicator input - * 01 DSR input - * 00 DTR output (1=inactive) - * - * 0000 0001 - */ -// write_reg(info, PVR, PVR_DTR); - - /* IPC Interrupt Port Configuration - * - * 07 VIS 1=Masked interrupts visible - * 06..05 Reserved, 0 - * 04..03 SLA Slave address, 00 ignored - * 02 CASM Cascading Mode, 1=daisy chain - * 01..00 IC[1..0] Interrupt Config, 01=push-pull output, active low - * - * 0000 0101 - */ - write_reg(info, IPC, 0x05); -} - -static void async_mode(MGSLPC_INFO *info) -{ - unsigned char val; - - /* disable all interrupts */ - irq_disable(info, CHA, 0xffff); - irq_disable(info, CHB, 0xffff); - port_irq_disable(info, 0xff); - - /* MODE - * - * 07 Reserved, 0 - * 06 FRTS RTS State, 0=active - * 05 FCTS Flow Control on CTS - * 04 FLON Flow Control Enable - * 03 RAC Receiver Active, 0 = inactive - * 02 RTS 0=Auto RTS, 1=manual RTS - * 01 TRS Timer Resolution, 1=512 - * 00 TLP Test Loop, 0 = no loop - * - * 0000 0110 - */ - val = 0x06; - if (info->params.loopback) - val |= BIT0; - - /* preserve RTS state */ - if (!(info->serial_signals & SerialSignal_RTS)) - val |= BIT6; - write_reg(info, CHA + MODE, val); - - /* CCR0 - * - * 07 PU Power Up, 1=active, 0=power down - * 06 MCE Master Clock Enable, 1=enabled - * 05 Reserved, 0 - * 04..02 SC[2..0] Encoding, 000=NRZ - * 01..00 SM[1..0] Serial Mode, 11=Async - * - * 1000 0011 - */ - write_reg(info, CHA + CCR0, 0x83); - - /* CCR1 - * - * 07..05 Reserved, 0 - * 04 ODS Output Driver Select, 1=TxD is push-pull output - * 03 BCR Bit Clock Rate, 1=16x - * 02..00 CM[2..0] Clock Mode, 111=BRG - * - * 0001 1111 - */ - write_reg(info, CHA + CCR1, 0x1f); - - /* CCR2 (channel A) - * - * 07..06 BGR[9..8] Baud rate bits 9..8 - * 05 BDF Baud rate divisor factor, 0=1, 1=BGR value - * 04 SSEL Clock source select, 1=submode b - * 03 TOE 0=TxCLK is input, 0=TxCLK is input - * 02 RWX Read/Write Exchange 0=disabled - * 01 Reserved, 0 - * 00 DIV, data inversion 0=disabled, 1=enabled - * - * 0001 0000 - */ - write_reg(info, CHA + CCR2, 0x10); - - /* CCR3 - * - * 07..01 Reserved, 0 - * 00 PSD DPLL Phase Shift Disable - * - * 0000 0000 - */ - write_reg(info, CHA + CCR3, 0); - - /* CCR4 - * - * 07 MCK4 Master Clock Divide by 4, 1=enabled - * 06 EBRG Enhanced Baud Rate Generator Mode, 1=enabled - * 05 TST1 Test Pin, 0=normal operation - * 04 ICD Ivert Carrier Detect, 1=enabled (active low) - * 03..00 Reserved, must be 0 - * - * 0101 0000 - */ - write_reg(info, CHA + CCR4, 0x50); - mgslpc_set_rate(info, CHA, info->params.data_rate * 16); - - /* DAFO Data Format - * - * 07 Reserved, 0 - * 06 XBRK transmit break, 0=normal operation - * 05 Stop bits (0=1, 1=2) - * 04..03 PAR[1..0] Parity (01=odd, 10=even) - * 02 PAREN Parity Enable - * 01..00 CHL[1..0] Character Length (00=8, 01=7) - * - */ - val = 0x00; - if (info->params.data_bits != 8) - val |= BIT0; /* 7 bits */ - if (info->params.stop_bits != 1) - val |= BIT5; - if (info->params.parity != ASYNC_PARITY_NONE) - { - val |= BIT2; /* Parity enable */ - if (info->params.parity == ASYNC_PARITY_ODD) - val |= BIT3; - else - val |= BIT4; - } - write_reg(info, CHA + DAFO, val); - - /* RFC Rx FIFO Control - * - * 07 Reserved, 0 - * 06 DPS, 1=parity bit not stored in data byte - * 05 DXS, 0=all data stored in FIFO (including XON/XOFF) - * 04 RFDF Rx FIFO Data Format, 1=status byte stored in FIFO - * 03..02 RFTH[1..0], rx threshold, 11=16 status + 16 data byte - * 01 Reserved, 0 - * 00 TCDE Terminate Char Detect Enable, 0=disabled - * - * 0101 1100 - */ - write_reg(info, CHA + RFC, 0x5c); - - /* RLCR Receive length check register - * - * Max frame length = (RL + 1) * 32 - */ - write_reg(info, CHA + RLCR, 0); - - /* XBCH Transmit Byte Count High - * - * 07 DMA mode, 0 = interrupt driven - * 06 NRM, 0=ABM (ignored) - * 05 CAS Carrier Auto Start - * 04 XC Transmit Continuously (ignored) - * 03..00 XBC[10..8] Transmit byte count bits 10..8 - * - * 0000 0000 - */ - val = 0x00; - if (info->params.flags & HDLC_FLAG_AUTO_DCD) - val |= BIT5; - write_reg(info, CHA + XBCH, val); - if (info->params.flags & HDLC_FLAG_AUTO_CTS) - irq_enable(info, CHA, IRQ_CTS); - - /* MODE:03 RAC Receiver Active, 1=active */ - set_reg_bits(info, CHA + MODE, BIT3); - enable_auxclk(info); - if (info->params.flags & HDLC_FLAG_AUTO_CTS) { - irq_enable(info, CHB, IRQ_CTS); - /* PVR[3] 1=AUTO CTS active */ - set_reg_bits(info, CHA + PVR, BIT3); - } else - clear_reg_bits(info, CHA + PVR, BIT3); - irq_enable(info, CHA, - IRQ_RXEOM | IRQ_RXFIFO | IRQ_BREAK_ON | IRQ_RXTIME | - IRQ_ALLSENT | IRQ_TXFIFO); - issue_command(info, CHA, CMD_TXRESET + CMD_RXRESET); - wait_command_complete(info, CHA); - read_reg16(info, CHA + ISR); /* clear pending IRQs */ -} - -/* Set the HDLC idle mode for the transmitter. - */ -static void tx_set_idle(MGSLPC_INFO *info) -{ - /* Note: ESCC2 only supports flags and one idle modes */ - if (info->idle_mode == HDLC_TXIDLE_FLAGS) - set_reg_bits(info, CHA + CCR1, BIT3); - else - clear_reg_bits(info, CHA + CCR1, BIT3); -} - -/* get state of the V24 status (input) signals. - */ -static void get_signals(MGSLPC_INFO *info) -{ - unsigned char status = 0; - - /* preserve RTS and DTR */ - info->serial_signals &= SerialSignal_RTS | SerialSignal_DTR; - - if (read_reg(info, CHB + VSTR) & BIT7) - info->serial_signals |= SerialSignal_DCD; - if (read_reg(info, CHB + STAR) & BIT1) - info->serial_signals |= SerialSignal_CTS; - - status = read_reg(info, CHA + PVR); - if (!(status & PVR_RI)) - info->serial_signals |= SerialSignal_RI; - if (!(status & PVR_DSR)) - info->serial_signals |= SerialSignal_DSR; -} - -/* Set the state of RTS and DTR based on contents of - * serial_signals member of device extension. - */ -static void set_signals(MGSLPC_INFO *info) -{ - unsigned char val; - - val = read_reg(info, CHA + MODE); - if (info->params.mode == MGSL_MODE_ASYNC) { - if (info->serial_signals & SerialSignal_RTS) - val &= ~BIT6; - else - val |= BIT6; - } else { - if (info->serial_signals & SerialSignal_RTS) - val |= BIT2; - else - val &= ~BIT2; - } - write_reg(info, CHA + MODE, val); - - if (info->serial_signals & SerialSignal_DTR) - clear_reg_bits(info, CHA + PVR, PVR_DTR); - else - set_reg_bits(info, CHA + PVR, PVR_DTR); -} - -static void rx_reset_buffers(MGSLPC_INFO *info) -{ - RXBUF *buf; - int i; - - info->rx_put = 0; - info->rx_get = 0; - info->rx_frame_count = 0; - for (i=0 ; i < info->rx_buf_count ; i++) { - buf = (RXBUF*)(info->rx_buf + (i * info->rx_buf_size)); - buf->status = buf->count = 0; - } -} - -/* Attempt to return a received HDLC frame - * Only frames received without errors are returned. - * - * Returns true if frame returned, otherwise false - */ -static bool rx_get_frame(MGSLPC_INFO *info, struct tty_struct *tty) -{ - unsigned short status; - RXBUF *buf; - unsigned int framesize = 0; - unsigned long flags; - bool return_frame = false; - - if (info->rx_frame_count == 0) - return false; - - buf = (RXBUF*)(info->rx_buf + (info->rx_get * info->rx_buf_size)); - - status = buf->status; - - /* 07 VFR 1=valid frame - * 06 RDO 1=data overrun - * 05 CRC 1=OK, 0=error - * 04 RAB 1=frame aborted - */ - if ((status & 0xf0) != 0xA0) { - if (!(status & BIT7) || (status & BIT4)) - info->icount.rxabort++; - else if (status & BIT6) - info->icount.rxover++; - else if (!(status & BIT5)) { - info->icount.rxcrc++; - if (info->params.crc_type & HDLC_CRC_RETURN_EX) - return_frame = true; - } - framesize = 0; -#if SYNCLINK_GENERIC_HDLC - { - info->netdev->stats.rx_errors++; - info->netdev->stats.rx_frame_errors++; - } -#endif - } else - return_frame = true; - - if (return_frame) - framesize = buf->count; - - if (debug_level >= DEBUG_LEVEL_BH) - printk("%s(%d):rx_get_frame(%s) status=%04X size=%d\n", - __FILE__, __LINE__, info->device_name, status, framesize); - - if (debug_level >= DEBUG_LEVEL_DATA) - trace_block(info, buf->data, framesize, 0); - - if (framesize) { - if ((info->params.crc_type & HDLC_CRC_RETURN_EX && - framesize+1 > info->max_frame_size) || - framesize > info->max_frame_size) - info->icount.rxlong++; - else { - if (status & BIT5) - info->icount.rxok++; - - if (info->params.crc_type & HDLC_CRC_RETURN_EX) { - *(buf->data + framesize) = status & BIT5 ? RX_OK:RX_CRC_ERROR; - ++framesize; - } - -#if SYNCLINK_GENERIC_HDLC - if (info->netcount) - hdlcdev_rx(info, buf->data, framesize); - else -#endif - ldisc_receive_buf(tty, buf->data, info->flag_buf, framesize); - } - } - - spin_lock_irqsave(&info->lock, flags); - buf->status = buf->count = 0; - info->rx_frame_count--; - info->rx_get++; - if (info->rx_get >= info->rx_buf_count) - info->rx_get = 0; - spin_unlock_irqrestore(&info->lock, flags); - - return true; -} - -static bool register_test(MGSLPC_INFO *info) -{ - static unsigned char patterns[] = - { 0x00, 0xff, 0xaa, 0x55, 0x69, 0x96, 0x0f }; - static unsigned int count = ARRAY_SIZE(patterns); - unsigned int i; - bool rc = true; - unsigned long flags; - - spin_lock_irqsave(&info->lock, flags); - reset_device(info); - - for (i = 0; i < count; i++) { - write_reg(info, XAD1, patterns[i]); - write_reg(info, XAD2, patterns[(i + 1) % count]); - if ((read_reg(info, XAD1) != patterns[i]) || - (read_reg(info, XAD2) != patterns[(i + 1) % count])) { - rc = false; - break; - } - } - - spin_unlock_irqrestore(&info->lock, flags); - return rc; -} - -static bool irq_test(MGSLPC_INFO *info) -{ - unsigned long end_time; - unsigned long flags; - - spin_lock_irqsave(&info->lock, flags); - reset_device(info); - - info->testing_irq = true; - hdlc_mode(info); - - info->irq_occurred = false; - - /* init hdlc mode */ - - irq_enable(info, CHA, IRQ_TIMER); - write_reg(info, CHA + TIMR, 0); /* 512 cycles */ - issue_command(info, CHA, CMD_START_TIMER); - - spin_unlock_irqrestore(&info->lock, flags); - - end_time=100; - while(end_time-- && !info->irq_occurred) { - msleep_interruptible(10); - } - - info->testing_irq = false; - - spin_lock_irqsave(&info->lock, flags); - reset_device(info); - spin_unlock_irqrestore(&info->lock, flags); - - return info->irq_occurred; -} - -static int adapter_test(MGSLPC_INFO *info) -{ - if (!register_test(info)) { - info->init_error = DiagStatus_AddressFailure; - printk("%s(%d):Register test failure for device %s Addr=%04X\n", - __FILE__, __LINE__, info->device_name, (unsigned short)(info->io_base)); - return -ENODEV; - } - - if (!irq_test(info)) { - info->init_error = DiagStatus_IrqFailure; - printk("%s(%d):Interrupt test failure for device %s IRQ=%d\n", - __FILE__, __LINE__, info->device_name, (unsigned short)(info->irq_level)); - return -ENODEV; - } - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("%s(%d):device %s passed diagnostics\n", - __FILE__, __LINE__, info->device_name); - return 0; -} - -static void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit) -{ - int i; - int linecount; - if (xmit) - printk("%s tx data:\n", info->device_name); - else - printk("%s rx data:\n", info->device_name); - - while(count) { - if (count > 16) - linecount = 16; - else - linecount = count; - - for(i=0;i=040 && data[i]<=0176) - printk("%c", data[i]); - else - printk("."); - } - printk("\n"); - - data += linecount; - count -= linecount; - } -} - -/* HDLC frame time out - * update stats and do tx completion processing - */ -static void tx_timeout(struct timer_list *t) -{ - MGSLPC_INFO *info = from_timer(info, t, tx_timer); - unsigned long flags; - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("%s(%d):tx_timeout(%s)\n", - __FILE__, __LINE__, info->device_name); - if (info->tx_active && - info->params.mode == MGSL_MODE_HDLC) { - info->icount.txtimeout++; - } - spin_lock_irqsave(&info->lock, flags); - info->tx_active = false; - info->tx_count = info->tx_put = info->tx_get = 0; - - spin_unlock_irqrestore(&info->lock, flags); - -#if SYNCLINK_GENERIC_HDLC - if (info->netcount) - hdlcdev_tx_done(info); - else -#endif - { - struct tty_struct *tty = tty_port_tty_get(&info->port); - bh_transmit(info, tty); - tty_kref_put(tty); - } -} - -#if SYNCLINK_GENERIC_HDLC - -/** - * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.) - * set encoding and frame check sequence (FCS) options - * - * dev pointer to network device structure - * encoding serial encoding setting - * parity FCS setting - * - * returns 0 if success, otherwise error code - */ -static int hdlcdev_attach(struct net_device *dev, unsigned short encoding, - unsigned short parity) -{ - MGSLPC_INFO *info = dev_to_port(dev); - struct tty_struct *tty; - unsigned char new_encoding; - unsigned short new_crctype; - - /* return error if TTY interface open */ - if (info->port.count) - return -EBUSY; - - switch (encoding) - { - case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break; - case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break; - case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break; - case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break; - case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break; - default: return -EINVAL; - } - - switch (parity) - { - case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break; - case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break; - case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break; - default: return -EINVAL; - } - - info->params.encoding = new_encoding; - info->params.crc_type = new_crctype; - - /* if network interface up, reprogram hardware */ - if (info->netcount) { - tty = tty_port_tty_get(&info->port); - mgslpc_program_hw(info, tty); - tty_kref_put(tty); - } - - return 0; -} - -/** - * called by generic HDLC layer to send frame - * - * skb socket buffer containing HDLC frame - * dev pointer to network device structure - */ -static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - MGSLPC_INFO *info = dev_to_port(dev); - unsigned long flags; - - if (debug_level >= DEBUG_LEVEL_INFO) - printk(KERN_INFO "%s:hdlc_xmit(%s)\n", __FILE__, dev->name); - - /* stop sending until this frame completes */ - netif_stop_queue(dev); - - /* copy data to device buffers */ - skb_copy_from_linear_data(skb, info->tx_buf, skb->len); - info->tx_get = 0; - info->tx_put = info->tx_count = skb->len; - - /* update network statistics */ - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; - - /* done with socket buffer, so free it */ - dev_kfree_skb(skb); - - /* save start time for transmit timeout detection */ - netif_trans_update(dev); - - /* start hardware transmitter if necessary */ - spin_lock_irqsave(&info->lock, flags); - if (!info->tx_active) { - struct tty_struct *tty = tty_port_tty_get(&info->port); - tx_start(info, tty); - tty_kref_put(tty); - } - spin_unlock_irqrestore(&info->lock, flags); - - return NETDEV_TX_OK; -} - -/** - * called by network layer when interface enabled - * claim resources and initialize hardware - * - * dev pointer to network device structure - * - * returns 0 if success, otherwise error code - */ -static int hdlcdev_open(struct net_device *dev) -{ - MGSLPC_INFO *info = dev_to_port(dev); - struct tty_struct *tty; - int rc; - unsigned long flags; - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("%s:hdlcdev_open(%s)\n", __FILE__, dev->name); - - /* generic HDLC layer open processing */ - rc = hdlc_open(dev); - if (rc != 0) - return rc; - - /* arbitrate between network and tty opens */ - spin_lock_irqsave(&info->netlock, flags); - if (info->port.count != 0 || info->netcount != 0) { - printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name); - spin_unlock_irqrestore(&info->netlock, flags); - return -EBUSY; - } - info->netcount=1; - spin_unlock_irqrestore(&info->netlock, flags); - - tty = tty_port_tty_get(&info->port); - /* claim resources and init adapter */ - rc = startup(info, tty); - if (rc != 0) { - tty_kref_put(tty); - spin_lock_irqsave(&info->netlock, flags); - info->netcount=0; - spin_unlock_irqrestore(&info->netlock, flags); - return rc; - } - /* assert RTS and DTR, apply hardware settings */ - info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR; - mgslpc_program_hw(info, tty); - tty_kref_put(tty); - - /* enable network layer transmit */ - netif_trans_update(dev); - netif_start_queue(dev); - - /* inform generic HDLC layer of current DCD status */ - spin_lock_irqsave(&info->lock, flags); - get_signals(info); - spin_unlock_irqrestore(&info->lock, flags); - if (info->serial_signals & SerialSignal_DCD) - netif_carrier_on(dev); - else - netif_carrier_off(dev); - return 0; -} - -/** - * called by network layer when interface is disabled - * shutdown hardware and release resources - * - * dev pointer to network device structure - * - * returns 0 if success, otherwise error code - */ -static int hdlcdev_close(struct net_device *dev) -{ - MGSLPC_INFO *info = dev_to_port(dev); - struct tty_struct *tty = tty_port_tty_get(&info->port); - unsigned long flags; - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("%s:hdlcdev_close(%s)\n", __FILE__, dev->name); - - netif_stop_queue(dev); - - /* shutdown adapter and release resources */ - shutdown(info, tty); - tty_kref_put(tty); - hdlc_close(dev); - - spin_lock_irqsave(&info->netlock, flags); - info->netcount=0; - spin_unlock_irqrestore(&info->netlock, flags); - - return 0; -} - -/** - * called by network layer to process IOCTL call to network device - * - * dev pointer to network device structure - * ifr pointer to network interface request structure - * cmd IOCTL command code - * - * returns 0 if success, otherwise error code - */ -static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - const size_t size = sizeof(sync_serial_settings); - sync_serial_settings new_line; - sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync; - MGSLPC_INFO *info = dev_to_port(dev); - unsigned int flags; - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name); - - /* return error if TTY interface open */ - if (info->port.count) - return -EBUSY; - - if (cmd != SIOCWANDEV) - return hdlc_ioctl(dev, ifr, cmd); - - memset(&new_line, 0, size); - - switch(ifr->ifr_settings.type) { - case IF_GET_IFACE: /* return current sync_serial_settings */ - - ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL; - if (ifr->ifr_settings.size < size) { - ifr->ifr_settings.size = size; /* data size wanted */ - return -ENOBUFS; - } - - flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | - HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | - HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | - HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); - - switch (flags){ - case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break; - case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break; - case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break; - case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break; - default: new_line.clock_type = CLOCK_DEFAULT; - } - - new_line.clock_rate = info->params.clock_speed; - new_line.loopback = info->params.loopback ? 1:0; - - if (copy_to_user(line, &new_line, size)) - return -EFAULT; - return 0; - - case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */ - - if(!capable(CAP_NET_ADMIN)) - return -EPERM; - if (copy_from_user(&new_line, line, size)) - return -EFAULT; - - switch (new_line.clock_type) - { - case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break; - case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break; - case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break; - case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break; - case CLOCK_DEFAULT: flags = info->params.flags & - (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | - HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | - HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | - HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break; - default: return -EINVAL; - } - - if (new_line.loopback != 0 && new_line.loopback != 1) - return -EINVAL; - - info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | - HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | - HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | - HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); - info->params.flags |= flags; - - info->params.loopback = new_line.loopback; - - if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG)) - info->params.clock_speed = new_line.clock_rate; - else - info->params.clock_speed = 0; - - /* if network interface up, reprogram hardware */ - if (info->netcount) { - struct tty_struct *tty = tty_port_tty_get(&info->port); - mgslpc_program_hw(info, tty); - tty_kref_put(tty); - } - return 0; - - default: - return hdlc_ioctl(dev, ifr, cmd); - } -} - -/** - * called by network layer when transmit timeout is detected - * - * dev pointer to network device structure - */ -static void hdlcdev_tx_timeout(struct net_device *dev) -{ - MGSLPC_INFO *info = dev_to_port(dev); - unsigned long flags; - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("hdlcdev_tx_timeout(%s)\n", dev->name); - - dev->stats.tx_errors++; - dev->stats.tx_aborted_errors++; - - spin_lock_irqsave(&info->lock, flags); - tx_stop(info); - spin_unlock_irqrestore(&info->lock, flags); - - netif_wake_queue(dev); -} - -/** - * called by device driver when transmit completes - * reenable network layer transmit if stopped - * - * info pointer to device instance information - */ -static void hdlcdev_tx_done(MGSLPC_INFO *info) -{ - if (netif_queue_stopped(info->netdev)) - netif_wake_queue(info->netdev); -} - -/** - * called by device driver when frame received - * pass frame to network layer - * - * info pointer to device instance information - * buf pointer to buffer contianing frame data - * size count of data bytes in buf - */ -static void hdlcdev_rx(MGSLPC_INFO *info, char *buf, int size) -{ - struct sk_buff *skb = dev_alloc_skb(size); - struct net_device *dev = info->netdev; - - if (debug_level >= DEBUG_LEVEL_INFO) - printk("hdlcdev_rx(%s)\n", dev->name); - - if (skb == NULL) { - printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", dev->name); - dev->stats.rx_dropped++; - return; - } - - skb_put_data(skb, buf, size); - - skb->protocol = hdlc_type_trans(skb, dev); - - dev->stats.rx_packets++; - dev->stats.rx_bytes += size; - - netif_rx(skb); -} - -static const struct net_device_ops hdlcdev_ops = { - .ndo_open = hdlcdev_open, - .ndo_stop = hdlcdev_close, - .ndo_start_xmit = hdlc_start_xmit, - .ndo_do_ioctl = hdlcdev_ioctl, - .ndo_tx_timeout = hdlcdev_tx_timeout, -}; - -/** - * called by device driver when adding device instance - * do generic HDLC initialization - * - * info pointer to device instance information - * - * returns 0 if success, otherwise error code - */ -static int hdlcdev_init(MGSLPC_INFO *info) -{ - int rc; - struct net_device *dev; - hdlc_device *hdlc; - - /* allocate and initialize network and HDLC layer objects */ - - dev = alloc_hdlcdev(info); - if (dev == NULL) { - printk(KERN_ERR "%s:hdlc device allocation failure\n", __FILE__); - return -ENOMEM; - } - - /* for network layer reporting purposes only */ - dev->base_addr = info->io_base; - dev->irq = info->irq_level; - - /* network layer callbacks and settings */ - dev->netdev_ops = &hdlcdev_ops; - dev->watchdog_timeo = 10 * HZ; - dev->tx_queue_len = 50; - - /* generic HDLC layer callbacks and settings */ - hdlc = dev_to_hdlc(dev); - hdlc->attach = hdlcdev_attach; - hdlc->xmit = hdlcdev_xmit; - - /* register objects with HDLC layer */ - rc = register_hdlc_device(dev); - if (rc) { - printk(KERN_WARNING "%s:unable to register hdlc device\n", __FILE__); - free_netdev(dev); - return rc; - } - - info->netdev = dev; - return 0; -} - -/** - * called by device driver when removing device instance - * do generic HDLC cleanup - * - * info pointer to device instance information - */ -static void hdlcdev_exit(MGSLPC_INFO *info) -{ - unregister_hdlc_device(info->netdev); - free_netdev(info->netdev); - info->netdev = NULL; -} - -#endif /* CONFIG_HDLC */ - diff --git a/drivers/char/pin_memory.c b/drivers/char/pin_memory.c new file mode 100644 index 0000000000000000000000000000000000000000..e6ed26c18135b086651e1d50b410732f23781720 --- /dev/null +++ b/drivers/char/pin_memory.c @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright @ Huawei Technologies Co., Ltd. 2020-2020. ALL rights reserved. + * Description: Euler pin memory driver + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAX_PIN_MEM_AREA_NUM 16 +struct _pin_mem_area { + unsigned long virt_start; + unsigned long virt_end; +}; + +struct pin_mem_area_set { + unsigned int pid; + unsigned int area_num; + struct _pin_mem_area mem_area[MAX_PIN_MEM_AREA_NUM]; +}; + +#define PIN_MEM_MAGIC 0x59 +#define _SET_PIN_MEM_AREA 1 +#define _CLEAR_PIN_MEM_AREA 2 +#define _REMAP_PIN_MEM_AREA 3 +#define _FINISH_PIN_MEM_DUMP 4 +#define _INIT_PAGEMAP_READ 5 +#ifdef CONFIG_PID_RESERVE +#define _SET_FORK_PID 6 +#define _PIN_MEM_IOC_MAX_NR 6 +#else +#define _PIN_MEM_IOC_MAX_NR 5 +#endif +#define SET_PIN_MEM_AREA _IOW(PIN_MEM_MAGIC, _SET_PIN_MEM_AREA, struct pin_mem_area_set) +#define CLEAR_PIN_MEM_AREA _IOW(PIN_MEM_MAGIC, _CLEAR_PIN_MEM_AREA, int) +#define REMAP_PIN_MEM_AREA _IOW(PIN_MEM_MAGIC, _REMAP_PIN_MEM_AREA, int) +#define FINISH_PIN_MEM_DUMP _IOW(PIN_MEM_MAGIC, _FINISH_PIN_MEM_DUMP, int) +#define INIT_PAGEMAP_READ _IOW(PIN_MEM_MAGIC, _INIT_PAGEMAP_READ, int) +#ifdef CONFIG_PID_RESERVE +#define SET_FORK_PID _IOW(PIN_MEM_MAGIC, _SET_FORK_PID, int) +#endif +static int set_pin_mem(struct pin_mem_area_set *pmas) +{ + int i; + int ret = 0; + struct _pin_mem_area *pma; + struct mm_struct *mm; + struct task_struct *task; + struct pid *pid_s; + + pid_s = find_get_pid(pmas->pid); + if (!pid_s) { + pr_warn("Get pid struct fail:%d.\n", pmas->pid); + return -EFAULT; + } + rcu_read_lock(); + task = pid_task(pid_s, PIDTYPE_PID); + if (!task) { + pr_warn("Get task struct fail:%d.\n", pmas->pid); + goto fail; + } + mm = get_task_mm(task); + for (i = 0; i < pmas->area_num; i++) { + pma = &(pmas->mem_area[i]); + ret = pin_mem_area(task, mm, pma->virt_start, pma->virt_end); + if (ret) { + mmput(mm); + goto fail; + } + } + mmput(mm); + rcu_read_unlock(); + put_pid(pid_s); + return ret; + +fail: + rcu_read_unlock(); + put_pid(pid_s); + return -EFAULT; +} + +static int set_pin_mem_area(unsigned long arg) +{ + struct pin_mem_area_set pmas; + void __user *buf = (void __user *)arg; + + if (copy_from_user(&pmas, buf, sizeof(pmas))) + return -EINVAL; + if (pmas.area_num > MAX_PIN_MEM_AREA_NUM) { + pr_warn("Input area_num is too large.\n"); + return -EINVAL; + } + + return set_pin_mem(&pmas); +} + +static int pin_mem_remap(unsigned long arg) +{ + int pid; + struct task_struct *task; + struct mm_struct *mm; + vm_fault_t ret; + void __user *buf = (void __user *)arg; + struct pid *pid_s; + + if (copy_from_user(&pid, buf, sizeof(int))) + return -EINVAL; + + pid_s = find_get_pid(pid); + if (!pid_s) { + pr_warn("Get pid struct fail:%d.\n", pid); + return -EINVAL; + } + rcu_read_lock(); + task = pid_task(pid_s, PIDTYPE_PID); + if (!task) { + pr_warn("Get task struct fail:%d.\n", pid); + goto fault; + } + mm = get_task_mm(task); + ret = do_mem_remap(pid, mm); + if (ret) { + pr_warn("Handle pin memory remap fail.\n"); + mmput(mm); + goto fault; + } + mmput(mm); + rcu_read_unlock(); + put_pid(pid_s); + return 0; + +fault: + rcu_read_unlock(); + put_pid(pid_s); + return -EFAULT; +} + +#ifdef CONFIG_PID_RESERVE +static int set_fork_pid(unsigned long arg) +{ + int pid; + void __user *buf = (void __user *)arg; + + if (copy_from_user(&pid, buf, sizeof(int))) + goto fault; + current->fork_pid_union.fork_pid = pid; + return 0; +fault: + return -EFAULT; +} +#endif + +static long pin_memory_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + long ret = 0; + + if (_IOC_TYPE(cmd) != PIN_MEM_MAGIC) + return -EINVAL; + if (_IOC_NR(cmd) > _PIN_MEM_IOC_MAX_NR) + return -EINVAL; + + switch (cmd) { + case SET_PIN_MEM_AREA: + ret = set_pin_mem_area(arg); + break; + case CLEAR_PIN_MEM_AREA: + clear_pin_memory_record(); + break; + case REMAP_PIN_MEM_AREA: + ret = pin_mem_remap(arg); + break; + case FINISH_PIN_MEM_DUMP: + ret = finish_pin_mem_dump(); + break; + case INIT_PAGEMAP_READ: + ret = init_pagemap_read(); + break; +#ifdef CONFIG_PID_RESERVE + case SET_FORK_PID: + ret = set_fork_pid(arg); + break; +#endif + default: + return -EINVAL; + } + return ret; +} + +static const struct file_operations pin_memory_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = pin_memory_ioctl, + .compat_ioctl = pin_memory_ioctl, +}; + +static struct miscdevice pin_memory_miscdev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "pinmem", + .fops = &pin_memory_fops, +}; + +static int pin_memory_init(void) +{ + int err = misc_register(&pin_memory_miscdev); + + if (!err) + pr_info("pin_memory init\n"); + else + pr_warn("pin_memory init failed!\n"); + return err; +} + +static void pin_memory_exit(void) +{ + misc_deregister(&pin_memory_miscdev); + pr_info("pin_memory ko exists!\n"); +} + +module_init(pin_memory_init); +module_exit(pin_memory_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Euler"); +MODULE_DESCRIPTION("pin memory"); diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c index 1ae77b41050abb8404046261718cf6e8a6386da5..6422a184a58aa9386d5d5609b01cb76d4d18d7e4 100644 --- a/drivers/char/ppdev.c +++ b/drivers/char/ppdev.c @@ -300,28 +300,35 @@ static int register_device(int minor, struct pp_struct *pp) if (!port) { pr_warn("%s: no associated port!\n", name); rc = -ENXIO; - goto err; + goto err_free_name; + } + + index = ida_alloc(&ida_index, GFP_KERNEL); + if (index < 0) { + pr_warn("%s: failed to get index!\n", name); + rc = index; + goto err_put_port; } - index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL); memset(&ppdev_cb, 0, sizeof(ppdev_cb)); ppdev_cb.irq_func = pp_irq; ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0; ppdev_cb.private = pp; pdev = parport_register_dev_model(port, name, &ppdev_cb, index); - parport_put_port(port); if (!pdev) { pr_warn("%s: failed to register device!\n", name); rc = -ENXIO; - ida_simple_remove(&ida_index, index); - goto err; + ida_free(&ida_index, index); + goto err_put_port; } pp->pdev = pdev; pp->index = index; dev_dbg(&pdev->dev, "registered pardevice\n"); -err: +err_put_port: + parport_put_port(port); +err_free_name: kfree(name); return rc; } @@ -623,20 +630,27 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (copy_from_user(time32, argp, sizeof(time32))) return -EFAULT; + if ((time32[0] < 0) || (time32[1] < 0)) + return -EINVAL; + return pp_set_timeout(pp->pdev, time32[0], time32[1]); case PPSETTIME64: if (copy_from_user(time64, argp, sizeof(time64))) return -EFAULT; + if ((time64[0] < 0) || (time64[1] < 0)) + return -EINVAL; + + if (IS_ENABLED(CONFIG_SPARC64) && !in_compat_syscall()) + time64[1] >>= 32; + return pp_set_timeout(pp->pdev, time64[0], time64[1]); case PPGETTIME32: jiffies_to_timespec64(pp->pdev->timeout, &ts); time32[0] = ts.tv_sec; time32[1] = ts.tv_nsec / NSEC_PER_USEC; - if ((time32[0] < 0) || (time32[1] < 0)) - return -EINVAL; if (copy_to_user(argp, time32, sizeof(time32))) return -EFAULT; @@ -647,8 +661,9 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) jiffies_to_timespec64(pp->pdev->timeout, &ts); time64[0] = ts.tv_sec; time64[1] = ts.tv_nsec / NSEC_PER_USEC; - if ((time64[0] < 0) || (time64[1] < 0)) - return -EINVAL; + + if (IS_ENABLED(CONFIG_SPARC64) && !in_compat_syscall()) + time64[1] <<= 32; if (copy_to_user(argp, time64, sizeof(time64))) return -EFAULT; @@ -758,7 +773,7 @@ static int pp_release(struct inode *inode, struct file *file) if (pp->pdev) { parport_unregister_device(pp->pdev); - ida_simple_remove(&ida_index, pp->index); + ida_free(&ida_index, pp->index); pp->pdev = NULL; pr_debug(CHRDEV "%x: unregistered pardevice\n", minor); } diff --git a/drivers/char/random.c b/drivers/char/random.c index c75b6cdf00533ac82ec6fb957bc1669f1f44a34d..a008d816cf2bdc74908d12b864462de8e9535717 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -433,9 +433,9 @@ static int crng_init_cnt = 0; static unsigned long crng_global_init_time = 0; #define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE) static void _extract_crng(struct crng_state *crng, - __u32 out[CHACHA20_BLOCK_WORDS]); + __u8 out[CHACHA20_BLOCK_SIZE]); static void _crng_backtrack_protect(struct crng_state *crng, - __u32 tmp[CHACHA20_BLOCK_WORDS], int used); + __u8 tmp[CHACHA20_BLOCK_SIZE], int used); static void process_random_ready_list(void); static void _get_random_bytes(void *buf, int nbytes); @@ -471,7 +471,6 @@ struct entropy_store { unsigned short add_ptr; unsigned short input_rotate; int entropy_count; - int entropy_total; unsigned int initialized:1; unsigned int last_data_init:1; __u8 last_data[EXTRACT_SIZE]; @@ -644,7 +643,7 @@ static void process_random_ready_list(void) */ static void credit_entropy_bits(struct entropy_store *r, int nbits) { - int entropy_count, orig; + int entropy_count, orig, has_initialized = 0; const int pool_size = r->poolinfo->poolfracbits; int nfrac = nbits << ENTROPY_SHIFT; @@ -699,47 +698,53 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits) entropy_count = 0; } else if (entropy_count > pool_size) entropy_count = pool_size; + if ((r == &blocking_pool) && !r->initialized && + (entropy_count >> ENTROPY_SHIFT) > 128) + has_initialized = 1; if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) goto retry; - r->entropy_total += nbits; - if (!r->initialized && r->entropy_total > 128) { + if (has_initialized) { r->initialized = 1; - r->entropy_total = 0; + wake_up_interruptible(&random_read_wait); + kill_fasync(&fasync, SIGIO, POLL_IN); } trace_credit_entropy_bits(r->name, nbits, - entropy_count >> ENTROPY_SHIFT, - r->entropy_total, _RET_IP_); + entropy_count >> ENTROPY_SHIFT, _RET_IP_); if (r == &input_pool) { int entropy_bits = entropy_count >> ENTROPY_SHIFT; + struct entropy_store *other = &blocking_pool; - if (crng_init < 2 && entropy_bits >= 128) { + if (crng_init < 2) { + if (entropy_bits < 128) + return; crng_reseed(&primary_crng, r); entropy_bits = r->entropy_count >> ENTROPY_SHIFT; } + /* initialize the blocking pool if necessary */ + if (entropy_bits >= random_read_wakeup_bits && + !other->initialized) { + schedule_work(&other->push_work); + return; + } + /* should we wake readers? */ if (entropy_bits >= random_read_wakeup_bits && wq_has_sleeper(&random_read_wait)) { wake_up_interruptible(&random_read_wait); kill_fasync(&fasync, SIGIO, POLL_IN); } - /* If the input pool is getting full, send some - * entropy to the blocking pool until it is 75% full. + /* If the input pool is getting full, and the blocking + * pool has room, send some entropy to the blocking + * pool. */ - if (entropy_bits > random_write_wakeup_bits && - r->initialized && - r->entropy_total >= 2*random_read_wakeup_bits) { - struct entropy_store *other = &blocking_pool; - - if (other->entropy_count <= - 3 * other->poolinfo->poolfracbits / 4) { - schedule_work(&other->push_work); - r->entropy_total = 0; - } - } + if (!work_pending(&other->push_work) && + (ENTROPY_BITS(r) > 6 * r->poolinfo->poolbytes) && + (ENTROPY_BITS(other) <= 6 * other->poolinfo->poolbytes)) + schedule_work(&other->push_work); } } @@ -778,6 +783,7 @@ static struct crng_state **crng_node_pool __read_mostly; #endif static void invalidate_batched_entropy(void); +static void numa_crng_init(void); static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU); static int __init parse_trust_cpu(char *arg) @@ -806,7 +812,9 @@ static void crng_initialize(struct crng_state *crng) } crng->state[i] ^= rv; } - if (trust_cpu && arch_init) { + if (trust_cpu && arch_init && crng == &primary_crng) { + invalidate_batched_entropy(); + numa_crng_init(); crng_init = 2; pr_notice("random: crng done (trusting CPU's manufacturer)\n"); } @@ -828,8 +836,8 @@ static void do_numa_crng_init(struct work_struct *work) crng_initialize(crng); pool[i] = crng; } - mb(); - if (cmpxchg(&crng_node_pool, NULL, pool)) { + /* pairs with READ_ONCE() in select_crng() */ + if (cmpxchg_release(&crng_node_pool, NULL, pool) != NULL) { for_each_node(i) kfree(pool[i]); kfree(pool); @@ -842,8 +850,26 @@ static void numa_crng_init(void) { schedule_work(&numa_crng_init_work); } + +static struct crng_state *select_crng(void) +{ + struct crng_state **pool; + int nid = numa_node_id(); + + /* pairs with cmpxchg_release() in do_numa_crng_init() */ + pool = READ_ONCE(crng_node_pool); + if (pool && pool[nid]) + return pool[nid]; + + return &primary_crng; +} #else static void numa_crng_init(void) {} + +static struct crng_state *select_crng(void) +{ + return &primary_crng; +} #endif /* @@ -926,7 +952,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) unsigned long flags; int i, num; union { - __u32 block[CHACHA20_BLOCK_WORDS]; + __u8 block[CHACHA20_BLOCK_SIZE]; __u32 key[8]; } buf; @@ -973,7 +999,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) } static void _extract_crng(struct crng_state *crng, - __u32 out[CHACHA20_BLOCK_WORDS]) + __u8 out[CHACHA20_BLOCK_SIZE]) { unsigned long v, flags; @@ -990,17 +1016,9 @@ static void _extract_crng(struct crng_state *crng, spin_unlock_irqrestore(&crng->lock, flags); } -static void extract_crng(__u32 out[CHACHA20_BLOCK_WORDS]) +static void extract_crng(__u8 out[CHACHA20_BLOCK_SIZE]) { - struct crng_state *crng = NULL; - -#ifdef CONFIG_NUMA - if (crng_node_pool) - crng = crng_node_pool[numa_node_id()]; - if (crng == NULL) -#endif - crng = &primary_crng; - _extract_crng(crng, out); + _extract_crng(select_crng(), out); } /* @@ -1008,7 +1026,7 @@ static void extract_crng(__u32 out[CHACHA20_BLOCK_WORDS]) * enough) to mutate the CRNG key to provide backtracking protection. */ static void _crng_backtrack_protect(struct crng_state *crng, - __u32 tmp[CHACHA20_BLOCK_WORDS], int used) + __u8 tmp[CHACHA20_BLOCK_SIZE], int used) { unsigned long flags; __u32 *s, *d; @@ -1020,30 +1038,22 @@ static void _crng_backtrack_protect(struct crng_state *crng, used = 0; } spin_lock_irqsave(&crng->lock, flags); - s = &tmp[used / sizeof(__u32)]; + s = (__u32 *) &tmp[used]; d = &crng->state[4]; for (i=0; i < 8; i++) *d++ ^= *s++; spin_unlock_irqrestore(&crng->lock, flags); } -static void crng_backtrack_protect(__u32 tmp[CHACHA20_BLOCK_WORDS], int used) +static void crng_backtrack_protect(__u8 tmp[CHACHA20_BLOCK_SIZE], int used) { - struct crng_state *crng = NULL; - -#ifdef CONFIG_NUMA - if (crng_node_pool) - crng = crng_node_pool[numa_node_id()]; - if (crng == NULL) -#endif - crng = &primary_crng; - _crng_backtrack_protect(crng, tmp, used); + _crng_backtrack_protect(select_crng(), tmp, used); } static ssize_t extract_crng_user(void __user *buf, size_t nbytes) { ssize_t ret = 0, i = CHACHA20_BLOCK_SIZE; - __u32 tmp[CHACHA20_BLOCK_WORDS]; + __u8 tmp[CHACHA20_BLOCK_SIZE] __aligned(4); int large_request = (nbytes > 256); while (nbytes) { @@ -1147,14 +1157,14 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) * We take into account the first, second and third-order deltas * in order to make our estimate. */ - delta = sample.jiffies - state->last_time; - state->last_time = sample.jiffies; + delta = sample.jiffies - READ_ONCE(state->last_time); + WRITE_ONCE(state->last_time, sample.jiffies); - delta2 = delta - state->last_delta; - state->last_delta = delta; + delta2 = delta - READ_ONCE(state->last_delta); + WRITE_ONCE(state->last_delta, delta); - delta3 = delta2 - state->last_delta2; - state->last_delta2 = delta2; + delta3 = delta2 - READ_ONCE(state->last_delta2); + WRITE_ONCE(state->last_delta2, delta2); if (delta < 0) delta = -delta; @@ -1266,7 +1276,8 @@ void add_interrupt_randomness(int irq, int irq_flags) } if ((fast_pool->count < 64) && - !time_after(now, fast_pool->last + HZ)) + !time_after(now, fast_pool->last + HZ) && + crng_ready()) return; r = &input_pool; @@ -1554,6 +1565,11 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf, int large_request = (nbytes > 256); trace_extract_entropy_user(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_); + if (!r->initialized && r->pull) { + xfer_secondary_pool(r, ENTROPY_BITS(r->pull)/8); + if (!r->initialized) + return 0; + } xfer_secondary_pool(r, nbytes); nbytes = account(r, nbytes, 0, 0); @@ -1622,7 +1638,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller, */ static void _get_random_bytes(void *buf, int nbytes) { - __u32 tmp[CHACHA20_BLOCK_WORDS]; + __u8 tmp[CHACHA20_BLOCK_SIZE] __aligned(4); trace_get_random_bytes(nbytes, _RET_IP_); @@ -1866,8 +1882,8 @@ _random_read(int nonblock, char __user *buf, size_t nbytes) return -EAGAIN; wait_event_interruptible(random_read_wait, - ENTROPY_BITS(&input_pool) >= - random_read_wakeup_bits); + blocking_pool.initialized && + (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits)); if (signal_pending(current)) return -ERESTARTSYS; } @@ -2008,7 +2024,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) return -EPERM; if (crng_init < 2) return -ENODATA; - crng_reseed(&primary_crng, NULL); + crng_reseed(&primary_crng, &input_pool); crng_global_init_time = jiffies - 1; return 0; default: @@ -2212,79 +2228,62 @@ struct batched_entropy { u32 entropy_u32[CHACHA20_BLOCK_SIZE / sizeof(u32)]; }; unsigned int position; + spinlock_t batch_lock; }; -static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock); /* * Get a random word for internal kernel use only. The quality of the random - * number is either as good as RDRAND or as good as /dev/urandom, with the - * goal of being quite fast and not depleting entropy. In order to ensure + * number is good as /dev/urandom, but there is no backtrack protection, with + * the goal of being quite fast and not depleting entropy. In order to ensure * that the randomness provided by this function is okay, the function - * wait_for_random_bytes() should be called and return 0 at least once - * at any point prior. + * wait_for_random_bytes() should be called and return 0 at least once at any + * point prior. */ -static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64); +static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = { + .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock), +}; + u64 get_random_u64(void) { u64 ret; - bool use_lock; - unsigned long flags = 0; + unsigned long flags; struct batched_entropy *batch; static void *previous; -#if BITS_PER_LONG == 64 - if (arch_get_random_long((unsigned long *)&ret)) - return ret; -#else - if (arch_get_random_long((unsigned long *)&ret) && - arch_get_random_long((unsigned long *)&ret + 1)) - return ret; -#endif - warn_unseeded_randomness(&previous); - use_lock = READ_ONCE(crng_init) < 2; - batch = &get_cpu_var(batched_entropy_u64); - if (use_lock) - read_lock_irqsave(&batched_entropy_reset_lock, flags); + batch = raw_cpu_ptr(&batched_entropy_u64); + spin_lock_irqsave(&batch->batch_lock, flags); if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) { - extract_crng((__u32 *)batch->entropy_u64); + extract_crng((u8 *)batch->entropy_u64); batch->position = 0; } ret = batch->entropy_u64[batch->position++]; - if (use_lock) - read_unlock_irqrestore(&batched_entropy_reset_lock, flags); - put_cpu_var(batched_entropy_u64); + spin_unlock_irqrestore(&batch->batch_lock, flags); return ret; } EXPORT_SYMBOL(get_random_u64); -static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32); +static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = { + .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock), +}; u32 get_random_u32(void) { u32 ret; - bool use_lock; - unsigned long flags = 0; + unsigned long flags; struct batched_entropy *batch; static void *previous; - if (arch_get_random_int(&ret)) - return ret; - warn_unseeded_randomness(&previous); - use_lock = READ_ONCE(crng_init) < 2; - batch = &get_cpu_var(batched_entropy_u32); - if (use_lock) - read_lock_irqsave(&batched_entropy_reset_lock, flags); + batch = raw_cpu_ptr(&batched_entropy_u32); + spin_lock_irqsave(&batch->batch_lock, flags); if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) { - extract_crng(batch->entropy_u32); + extract_crng((u8 *)batch->entropy_u32); batch->position = 0; } ret = batch->entropy_u32[batch->position++]; - if (use_lock) - read_unlock_irqrestore(&batched_entropy_reset_lock, flags); - put_cpu_var(batched_entropy_u32); + spin_unlock_irqrestore(&batch->batch_lock, flags); return ret; } EXPORT_SYMBOL(get_random_u32); @@ -2298,12 +2297,19 @@ static void invalidate_batched_entropy(void) int cpu; unsigned long flags; - write_lock_irqsave(&batched_entropy_reset_lock, flags); for_each_possible_cpu (cpu) { - per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0; - per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0; + struct batched_entropy *batched_entropy; + + batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu); + spin_lock_irqsave(&batched_entropy->batch_lock, flags); + batched_entropy->position = 0; + spin_unlock(&batched_entropy->batch_lock); + + batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu); + spin_lock(&batched_entropy->batch_lock); + batched_entropy->position = 0; + spin_unlock_irqrestore(&batched_entropy->batch_lock, flags); } - write_unlock_irqrestore(&batched_entropy_reset_lock, flags); } /** diff --git a/drivers/char/svm.c b/drivers/char/svm.c new file mode 100644 index 0000000000000000000000000000000000000000..d7f70f2019e2286848a384c584187bbfa6b1bdf8 --- /dev/null +++ b/drivers/char/svm.c @@ -0,0 +1,1943 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2017-2018 Hisilicon Limited. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SVM_DEVICE_NAME "svm" +#define ASID_SHIFT 48 + +#define SVM_IOCTL_PROCESS_BIND 0xffff +#define SVM_IOCTL_GET_PHYS 0xfff9 +#define SVM_IOCTL_SET_RC 0xfffc +#define SVM_IOCTL_LOAD_FLAG 0xfffa +#define SVM_IOCTL_PIN_MEMORY 0xfff7 +#define SVM_IOCTL_UNPIN_MEMORY 0xfff5 +#define SVM_IOCTL_REMAP_PROC 0xfff4 + +#define SVM_REMAP_MEM_LEN_MAX (16 * 1024 * 1024) + +#define SVM_IOCTL_RELEASE_PHYS32 0xfff3 +#define MMAP_PHY32_MAX (16 * 1024 * 1024) + +#define SVM_IOCTL_SP_ALLOC 0xfff2 +#define SVM_IOCTL_SP_FREE 0xfff1 +#define SPG_DEFAULT_ID 0 +#define CORE_SID 0 +static int probe_index; +static LIST_HEAD(child_list); +static DECLARE_RWSEM(svm_sem); +static struct rb_root svm_process_root = RB_ROOT; +static struct mutex svm_process_mutex; + +struct core_device { + struct device dev; + struct iommu_group *group; + struct iommu_domain *domain; + u8 smmu_bypass; + struct list_head entry; +}; + +struct svm_device { + unsigned long long id; + struct miscdevice miscdev; + struct device *dev; + phys_addr_t l2buff; + unsigned long l2size; +}; + +struct svm_bind_process { + pid_t vpid; + u64 ttbr; + u64 tcr; + int pasid; + u32 flags; +#define SVM_BIND_PID (1 << 0) +}; + +/* + *svm_process is released in svm_notifier_release() when mm refcnt + *goes down zero. We should access svm_process only in the context + *where mm_struct is valid, which means we should always get mm + *refcnt first. + */ +struct svm_process { + struct pid *pid; + struct mm_struct *mm; + unsigned long asid; + struct rb_node rb_node; + struct mmu_notifier notifier; + /* For postponed release */ + struct rcu_head rcu; + int pasid; + struct mutex mutex; + struct rb_root sdma_list; + struct svm_device *sdev; +}; + +struct svm_sdma { + struct rb_node node; + unsigned long addr; + int nr_pages; + struct page **pages; + atomic64_t ref; +}; + +struct svm_proc_mem { + u32 dev_id; + u32 len; + u64 pid; + u64 vaddr; + u64 buf; +}; + +struct spalloc { + unsigned long addr; + unsigned long size; + unsigned long flag; +}; + +struct addr_trans_args { + unsigned long vptr; + unsigned long *pptr; + unsigned int device_id; +}; + +static struct bus_type svm_bus_type = { + .name = "svm_bus", +}; + +static char *svm_cmd_to_string(unsigned int cmd) +{ + switch (cmd) { + case SVM_IOCTL_PROCESS_BIND: + return "bind"; + case SVM_IOCTL_GET_PHYS: + return "get phys"; + case SVM_IOCTL_SET_RC: + return "set rc"; + case SVM_IOCTL_PIN_MEMORY: + return "pin memory"; + case SVM_IOCTL_UNPIN_MEMORY: + return "unpin memory"; + case SVM_IOCTL_REMAP_PROC: + return "remap proc"; + case SVM_IOCTL_LOAD_FLAG: + return "load flag"; + case SVM_IOCTL_RELEASE_PHYS32: + return "release phys"; + default: + return "unsupported"; + } + + return NULL; +} + +/* + * image word of slot + * SVM_IMAGE_WORD_INIT: initial value, indicating that the slot is not used. + * SVM_IMAGE_WORD_VALID: valid data is filled in the slot + * SVM_IMAGE_WORD_DONE: the DMA operation is complete when the TS uses this address, + so, this slot can be freed. + */ +#define SVM_IMAGE_WORD_INIT 0x0 +#define SVM_IMAGE_WORD_VALID 0xaa55aa55 +#define SVM_IMAGE_WORD_DONE 0x55ff55ff + +/* + * The length of this structure must be 64 bytes, which is the agreement with the TS. + * And the data type and sequence cannot be changed, because the TS core reads data + * based on the data type and sequence. + * image_word: slot status. For details, see SVM_IMAGE_WORD_xxx + * pid: pid of process which ioctl svm device to get physical addr, it is used for + verification by TS. + * data_type: used to determine the data type by TS. Currently, data type must be + SVM_VA2PA_TYPE_DMA. + * char data[48]: for the data type SVM_VA2PA_TYPE_DMA, the DMA address is stored. + */ +struct svm_va2pa_slot { + int image_word; + int resv; + int pid; + int data_type; + union { + char user_defined_data[48]; + struct { + unsigned long phys; + unsigned long len; + char reserved[32]; + }; + }; +}; + +struct svm_va2pa_trunk { + struct svm_va2pa_slot *slots; + int slot_total; + int slot_used; + unsigned long *bitmap; + struct mutex mutex; + phys_addr_t base; + unsigned long size; +}; + +#define SVM_VA2PA_TRUNK_SIZE_MAX 0x3200000 +#define SVM_VA2PA_MEMORY_ALIGN 64 +#define SVM_VA2PA_SLOT_SIZE sizeof(struct svm_va2pa_slot) +#define SVM_VA2PA_TYPE_DMA 0x1 +#define SVM_MEM_REG "va2pa trunk" +#define SVM_VA2PA_CLEAN_BATCH_NUM 0x80 +#define SVM_VA2PA_TRUNK_COUNT_MAX 0x8 + +static struct svm_va2pa_trunk va2pa_trunk[SVM_VA2PA_TRUNK_COUNT_MAX]; + +struct device_node *svm_find_mem_reg_node(struct device *dev, const char *compat) +{ + int index = 0; + struct device_node *tmp = NULL; + struct device_node *np = dev->of_node; + + for (; ; index++) { + tmp = of_parse_phandle(np, "memory-region", index); + if (!tmp) + break; + + if (of_device_is_compatible(tmp, compat)) + return tmp; + + of_node_put(tmp); + } + + return NULL; +} + +static int svm_parse_trunk_memory(struct device *dev) +{ + int err, count; + struct resource r; + struct device_node *trunk = NULL; + + trunk = svm_find_mem_reg_node(dev, SVM_MEM_REG); + if (!trunk) { + dev_err(dev, "Didn't find reserved memory\n"); + return -EINVAL; + } + + for (count = 0; count < SVM_VA2PA_TRUNK_COUNT_MAX; count++) { + err = of_address_to_resource(trunk, count, &r); + if (err) + break; + + va2pa_trunk[count].base = r.start; + va2pa_trunk[count].size = resource_size(&r); + } + + if (!count) { + dev_err(dev, "Couldn't address to resource for reserved memory\n"); + return -ENODEV; + } + + return 0; +} + +static int __svm_setup_trunk(struct device *dev, struct svm_va2pa_trunk *trunk) +{ + int slot_total; + unsigned long *bitmap = NULL; + struct svm_va2pa_slot *slot = NULL; + phys_addr_t base = trunk->base; + unsigned long size = trunk->size; + + if (!IS_ALIGNED(base, SVM_VA2PA_MEMORY_ALIGN)) { + dev_err(dev, "Didn't aligned to %u\n", SVM_VA2PA_MEMORY_ALIGN); + return -EINVAL; + } + + if ((size == 0) || (size > SVM_VA2PA_TRUNK_SIZE_MAX)) { + dev_err(dev, "Size of reserved memory is not right\n"); + return -EINVAL; + } + + slot_total = size / SVM_VA2PA_SLOT_SIZE; + if (slot_total < BITS_PER_LONG) + return -EINVAL; + + bitmap = kvcalloc(slot_total / BITS_PER_LONG, sizeof(unsigned long), GFP_KERNEL); + if (!bitmap) { + dev_err(dev, "alloc memory failed\n"); + return -ENOMEM; + } + + slot = ioremap(base, size); + if (!slot) { + kvfree(bitmap); + dev_err(dev, "Ioremap trunk failed\n"); + return -ENXIO; + } + + trunk->slots = slot; + trunk->slot_used = 0; + trunk->slot_total = slot_total; + trunk->bitmap = bitmap; + mutex_init(&trunk->mutex); + + return 0; +} + +static int svm_setup_trunk(struct device *dev) +{ + int err = 0; + int count; + + for (count = 0; count < SVM_VA2PA_TRUNK_COUNT_MAX; count++) { + if (!va2pa_trunk[count].base) + break; + + err = __svm_setup_trunk(dev, &va2pa_trunk[count]); + if (err) + break; + } + + return err; +} + +static void svm_remove_trunk(struct device *dev) +{ + int count; + + for (count = 0; count < SVM_VA2PA_TRUNK_COUNT_MAX; count++) { + if (!va2pa_trunk[count].base) + break; + + iounmap(va2pa_trunk[count].slots); + kvfree(va2pa_trunk[count].bitmap); + va2pa_trunk[count].slots = NULL; + va2pa_trunk[count].bitmap = NULL; + } +} + +static void svm_set_slot_valid(struct svm_va2pa_trunk *trunk, unsigned long index, + unsigned long phys, unsigned long len) +{ + struct svm_va2pa_slot *slot = &trunk->slots[index]; + + slot->phys = phys; + slot->len = len; + slot->image_word = SVM_IMAGE_WORD_VALID; + slot->pid = current->tgid; + slot->data_type = SVM_VA2PA_TYPE_DMA; + __bitmap_set(trunk->bitmap, index, 1); + trunk->slot_used++; +} + +static void svm_set_slot_init(struct svm_va2pa_trunk *trunk, unsigned long index) +{ + struct svm_va2pa_slot *slot = &trunk->slots[index]; + + slot->image_word = SVM_IMAGE_WORD_INIT; + __bitmap_clear(trunk->bitmap, index, 1); + trunk->slot_used--; +} + +static void svm_clean_done_slots(struct svm_va2pa_trunk *trunk) +{ + int used = trunk->slot_used; + int count = 0; + long temp = -1; + phys_addr_t addr; + unsigned long *bitmap = trunk->bitmap; + + for (; count < used && count < SVM_VA2PA_CLEAN_BATCH_NUM;) { + temp = find_next_bit(bitmap, trunk->slot_total, temp + 1); + if (temp == trunk->slot_total) + break; + + count++; + if (trunk->slots[temp].image_word != SVM_IMAGE_WORD_DONE) + continue; + + addr = (phys_addr_t)trunk->slots[temp].phys; + put_page(pfn_to_page(PHYS_PFN(addr))); + svm_set_slot_init(trunk, temp); + } +} + +static int svm_find_slot_init(struct svm_va2pa_trunk *trunk, unsigned long *index) +{ + int temp; + unsigned long *bitmap = trunk->bitmap; + + temp = find_first_zero_bit(bitmap, trunk->slot_total); + if (temp == trunk->slot_total) + return -ENOSPC; + + *index = temp; + return 0; +} + +static int svm_va2pa_trunk_init(struct device *dev) +{ + int err; + + memset(va2pa_trunk, 0, sizeof(va2pa_trunk)); + + err = svm_parse_trunk_memory(dev); + if (err) + return err; + + err = svm_setup_trunk(dev); + if (err) + return err; + + return 0; +} + +static struct svm_process *find_svm_process(unsigned long asid) +{ + struct rb_node *node = svm_process_root.rb_node; + + while (node) { + struct svm_process *process = NULL; + + process = rb_entry(node, struct svm_process, rb_node); + if (asid < process->asid) + node = node->rb_left; + else if (asid > process->asid) + node = node->rb_right; + else + return process; + } + + return NULL; +} + +static void insert_svm_process(struct svm_process *process) +{ + struct rb_node **p = &svm_process_root.rb_node; + struct rb_node *parent = NULL; + + while (*p) { + struct svm_process *tmp_process = NULL; + + parent = *p; + tmp_process = rb_entry(parent, struct svm_process, rb_node); + if (process->asid < tmp_process->asid) + p = &(*p)->rb_left; + else if (process->asid > tmp_process->asid) + p = &(*p)->rb_right; + else { + WARN_ON_ONCE("asid already in the tree"); + return; + } + } + + rb_link_node(&process->rb_node, parent, p); + rb_insert_color(&process->rb_node, &svm_process_root); +} + +static void delete_svm_process(struct svm_process *process) +{ + rb_erase(&process->rb_node, &svm_process_root); + RB_CLEAR_NODE(&process->rb_node); +} + +static struct svm_device *file_to_sdev(struct file *file) +{ + return container_of(file->private_data, + struct svm_device, miscdev); +} + +static int svm_open(struct inode *inode, struct file *file) +{ + return 0; +} + +static inline struct core_device *to_core_device(struct device *d) +{ + return container_of(d, struct core_device, dev); +} + +static void cdev_device_release(struct device *dev) +{ + struct core_device *cdev = to_core_device(dev); + + if (!acpi_disabled) + list_del(&cdev->entry); + + kfree(cdev); +} + +static int svm_remove_core(struct device *dev, void *data) +{ + struct core_device *cdev = to_core_device(dev); + + if (!cdev->smmu_bypass && cdev->group && cdev->domain) { + iommu_sva_device_shutdown(dev); + iommu_detach_group(cdev->domain, cdev->group); + iommu_group_put(cdev->group); + iommu_domain_free(cdev->domain); + } + + device_unregister(&cdev->dev); + + return 0; +} + +static struct svm_sdma *svm_find_sdma(struct svm_process *process, + unsigned long addr, int nr_pages) +{ + struct rb_node *node = process->sdma_list.rb_node; + + while (node) { + struct svm_sdma *sdma = NULL; + + sdma = rb_entry(node, struct svm_sdma, node); + if (addr < sdma->addr) + node = node->rb_left; + else if (addr > sdma->addr) + node = node->rb_right; + else if (nr_pages < sdma->nr_pages) + node = node->rb_left; + else if (nr_pages > sdma->nr_pages) + node = node->rb_right; + else { + return sdma; + } + } + + return NULL; +} + +static int svm_insert_sdma(struct svm_process *process, struct svm_sdma *sdma) +{ + struct rb_node **p = &process->sdma_list.rb_node; + struct rb_node *parent = NULL; + + while (*p) { + struct svm_sdma *tmp_sdma = NULL; + + parent = *p; + tmp_sdma = rb_entry(parent, struct svm_sdma, node); + if (sdma->addr < tmp_sdma->addr) + p = &(*p)->rb_left; + else if (sdma->addr > tmp_sdma->addr) + p = &(*p)->rb_right; + else if (sdma->nr_pages < tmp_sdma->nr_pages) + p = &(*p)->rb_left; + else if (sdma->nr_pages > tmp_sdma->nr_pages) + p = &(*p)->rb_right; + else { + /* + * add reference count and return -EBUSY + * to free former alloced one. + */ + atomic64_inc(&tmp_sdma->ref); + return -EBUSY; + } + } + + rb_link_node(&sdma->node, parent, p); + rb_insert_color(&sdma->node, &process->sdma_list); + + return 0; +} + +static void svm_remove_sdma(struct svm_process *process, + struct svm_sdma *sdma, bool try_rm) +{ + int null_count = 0; + + if (try_rm && (!atomic64_dec_and_test(&sdma->ref))) { + return; + } + + rb_erase(&sdma->node, &process->sdma_list); + RB_CLEAR_NODE(&sdma->node); + + while (sdma->nr_pages--) { + if (sdma->pages[sdma->nr_pages] == NULL) { + pr_err("null pointer, nr_pages:%d.\n", sdma->nr_pages); + null_count++; + continue; + } + + put_page(sdma->pages[sdma->nr_pages]); + } + + if (null_count) + dump_stack(); + + kvfree(sdma->pages); + kfree(sdma); +} + +static int svm_pin_pages(unsigned long addr, int nr_pages, + struct page **pages) +{ + int err; + + err = get_user_pages_fast(addr, nr_pages, 1, pages); + if (err > 0 && err < nr_pages) { + while (err--) + put_page(pages[err]); + err = -EFAULT; + } else if (err == 0) { + err = -EFAULT; + } + + return err; +} + +static int svm_add_sdma(struct svm_process *process, + unsigned long addr, unsigned long size) +{ + int err; + struct svm_sdma *sdma = NULL; + + sdma = kzalloc(sizeof(struct svm_sdma), GFP_KERNEL); + if (sdma == NULL) + return -ENOMEM; + + atomic64_set(&sdma->ref, 1); + sdma->addr = addr & PAGE_MASK; + sdma->nr_pages = (PAGE_ALIGN(size + addr) >> PAGE_SHIFT) - + (sdma->addr >> PAGE_SHIFT); + sdma->pages = kvcalloc(sdma->nr_pages, sizeof(char *), GFP_KERNEL); + if (sdma->pages == NULL) { + err = -ENOMEM; + goto err_free_sdma; + } + + /* + * If always pin the same addr with the same nr_pages, pin pages + * maybe should move after insert sdma with mutex lock. + */ + err = svm_pin_pages(sdma->addr, sdma->nr_pages, sdma->pages); + if (err < 0) { + pr_err("%s: failed to pin pages addr 0x%pK, size 0x%lx\n", + __func__, (void *)addr, size); + goto err_free_pages; + } + + err = svm_insert_sdma(process, sdma); + if (err < 0) { + err = 0; + pr_debug("%s: sdma already exist!\n", __func__); + goto err_unpin_pages; + } + + return err; + +err_unpin_pages: + while (sdma->nr_pages--) + put_page(sdma->pages[sdma->nr_pages]); +err_free_pages: + kvfree(sdma->pages); +err_free_sdma: + kfree(sdma); + + return err; +} + +static int svm_pin_memory(unsigned long __user *arg) +{ + int err; + struct svm_process *process = NULL; + unsigned long addr, size, asid; + + if (!acpi_disabled) + return -EPERM; + + if (arg == NULL) + return -EINVAL; + + if (get_user(addr, arg)) + return -EFAULT; + + if (get_user(size, arg + 1)) + return -EFAULT; + + if ((addr + size <= addr) || (size >= (u64)UINT_MAX) || (addr == 0)) + return -EINVAL; + + asid = mm_context_get(current->mm); + if (!asid) + return -ENOSPC; + + mutex_lock(&svm_process_mutex); + process = find_svm_process(asid); + if (process == NULL) { + mutex_unlock(&svm_process_mutex); + err = -ESRCH; + goto out; + } + mutex_unlock(&svm_process_mutex); + + mutex_lock(&process->mutex); + err = svm_add_sdma(process, addr, size); + mutex_unlock(&process->mutex); + +out: + mm_context_put(current->mm); + + return err; +} + +static int svm_unpin_memory(unsigned long __user *arg) +{ + int err = 0, nr_pages; + struct svm_sdma *sdma = NULL; + unsigned long addr, size, asid; + struct svm_process *process = NULL; + + if (!acpi_disabled) + return -EPERM; + + if (arg == NULL) + return -EINVAL; + + if (get_user(addr, arg)) + return -EFAULT; + + if (get_user(size, arg + 1)) + return -EFAULT; + + if (ULONG_MAX - addr < size) + return -EINVAL; + + asid = mm_context_get(current->mm); + if (!asid) + return -ENOSPC; + + nr_pages = (PAGE_ALIGN(size + addr) >> PAGE_SHIFT) - + ((addr & PAGE_MASK) >> PAGE_SHIFT); + addr &= PAGE_MASK; + + mutex_lock(&svm_process_mutex); + process = find_svm_process(asid); + if (process == NULL) { + mutex_unlock(&svm_process_mutex); + err = -ESRCH; + goto out; + } + mutex_unlock(&svm_process_mutex); + + mutex_lock(&process->mutex); + sdma = svm_find_sdma(process, addr, nr_pages); + if (sdma == NULL) { + mutex_unlock(&process->mutex); + err = -ESRCH; + goto out; + } + + svm_remove_sdma(process, sdma, true); + mutex_unlock(&process->mutex); + +out: + mm_context_put(current->mm); + + return err; +} + +static void svm_unpin_all(struct svm_process *process) +{ + struct rb_node *node = NULL; + + while ((node = rb_first(&process->sdma_list))) + svm_remove_sdma(process, + rb_entry(node, struct svm_sdma, node), + false); +} + +static int svm_acpi_bind_core(struct core_device *cdev, void *data) +{ + int err; + struct task_struct *task = NULL; + struct svm_process *process = data; + + if (cdev->smmu_bypass) + return 0; + + task = get_pid_task(process->pid, PIDTYPE_PID); + if (!task) { + pr_err("failed to get task_struct\n"); + return -ESRCH; + } + + err = iommu_sva_bind_device(&cdev->dev, task->mm, + &process->pasid, IOMMU_SVA_FEAT_IOPF, NULL); + if (err) + pr_err("failed to get the pasid\n"); + + put_task_struct(task); + + return err; +} + +static int svm_dt_bind_core(struct device *dev, void *data) +{ + int err; + struct task_struct *task = NULL; + struct svm_process *process = data; + struct core_device *cdev = to_core_device(dev); + + if (cdev->smmu_bypass) + return 0; + + task = get_pid_task(process->pid, PIDTYPE_PID); + if (!task) { + pr_err("failed to get task_struct\n"); + return -ESRCH; + } + + err = iommu_sva_bind_device(&cdev->dev, task->mm, + &process->pasid, IOMMU_SVA_FEAT_IOPF, NULL); + if (err) + pr_err("failed to get the pasid\n"); + + put_task_struct(task); + + return err; +} + +static void svm_dt_bind_cores(struct svm_process *process) +{ + device_for_each_child(process->sdev->dev, process, svm_dt_bind_core); +} + +static void svm_acpi_bind_cores(struct svm_process *process) +{ + struct core_device *pos = NULL; + + list_for_each_entry(pos, &child_list, entry) { + svm_acpi_bind_core(pos, process); + } +} + +static void svm_process_free(struct rcu_head *rcu) +{ + struct svm_process *process = NULL; + + process = container_of(rcu, struct svm_process, rcu); + svm_unpin_all(process); + mm_context_put(process->mm); + kfree(process); +} + +static void svm_process_release(struct svm_process *process) +{ + delete_svm_process(process); + put_pid(process->pid); + + /* + * If we're being released from process exit, the notifier callback + * ->release has already been called. Otherwise we don't need to go + * through there, the process isn't attached to anything anymore. Hence + * no_release. + */ + mmu_notifier_unregister_no_release(&process->notifier, process->mm); + + /* + * We can't free the structure here, because ->release might be + * attempting to grab it concurrently. And in the other case, if the + * structure is being released from within ->release, then + * __mmu_notifier_release expects to still have a valid mn when + * returning. So free the structure when it's safe, after the RCU grace + * period elapsed. + */ + mmu_notifier_call_srcu(&process->rcu, svm_process_free); +} + +static void svm_notifier_release(struct mmu_notifier *mn, + struct mm_struct *mm) +{ + struct svm_process *process = NULL; + + process = container_of(mn, struct svm_process, notifier); + + /* + * No need to call svm_unbind_cores(), as iommu-sva will do the + * unbind in its mm_notifier callback. + */ + + mutex_lock(&svm_process_mutex); + svm_process_release(process); + mutex_unlock(&svm_process_mutex); +} + +static struct mmu_notifier_ops svm_process_mmu_notifier = { + .release = svm_notifier_release, +}; + +static struct svm_process * +svm_process_alloc(struct svm_device *sdev, struct pid *pid, + struct mm_struct *mm, unsigned long asid) +{ + struct svm_process *process = kzalloc(sizeof(*process), GFP_ATOMIC); + + if (!process) + return ERR_PTR(-ENOMEM); + + process->sdev = sdev; + process->pid = pid; + process->mm = mm; + process->asid = asid; + process->sdma_list = RB_ROOT; //lint !e64 + mutex_init(&process->mutex); + process->notifier.ops = &svm_process_mmu_notifier; + + return process; +} + +static struct task_struct *svm_get_task(struct svm_bind_process params) +{ + struct task_struct *task = NULL; + + if (params.flags & ~SVM_BIND_PID) + return ERR_PTR(-EINVAL); + + if (params.flags & SVM_BIND_PID) { + struct mm_struct *mm = NULL; + + task = find_get_task_by_vpid(params.vpid); + if (task == NULL) + return ERR_PTR(-ESRCH); + + /* check the permission */ + mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS); + if (IS_ERR_OR_NULL(mm)) { + pr_err("cannot access mm\n"); + put_task_struct(task); + return ERR_PTR(-ESRCH); + } + + mmput(mm); + } else { + get_task_struct(current); + task = current; + } + + return task; +} + +static int svm_process_bind(struct task_struct *task, + struct svm_device *sdev, u64 *ttbr, u64 *tcr, int *pasid) +{ + int err; + unsigned long asid; + struct pid *pid = NULL; + struct svm_process *process = NULL; + struct mm_struct *mm = NULL; + + if ((ttbr == NULL) || (tcr == NULL) || (pasid == NULL)) + return -EINVAL; + + pid = get_task_pid(task, PIDTYPE_PID); + if (pid == NULL) + return -EINVAL; + + mm = get_task_mm(task); + if (!mm) { + err = -EINVAL; + goto err_put_pid; + } + + asid = mm_context_get(mm); + if (!asid) { + err = -ENOSPC; + goto err_put_mm; + } + + /* If a svm_process already exists, use it */ + mutex_lock(&svm_process_mutex); + process = find_svm_process(asid); + if (process == NULL) { + process = svm_process_alloc(sdev, pid, mm, asid); + if (IS_ERR(process)) { + err = PTR_ERR(process); + mutex_unlock(&svm_process_mutex); + goto err_put_mm_context; + } + err = mmu_notifier_register(&process->notifier, mm); + if (err) { + mutex_unlock(&svm_process_mutex); + goto err_free_svm_process; + } + + insert_svm_process(process); + + if (acpi_disabled) + svm_dt_bind_cores(process); + else + svm_acpi_bind_cores(process); + + mutex_unlock(&svm_process_mutex); + } else { + mutex_unlock(&svm_process_mutex); + mm_context_put(mm); + put_pid(pid); + } + + + *ttbr = virt_to_phys(mm->pgd) | asid << ASID_SHIFT; + *tcr = read_sysreg(tcr_el1); + *pasid = process->pasid; + + mmput(mm); + return 0; + +err_free_svm_process: + kfree(process); +err_put_mm_context: + mm_context_put(mm); +err_put_mm: + mmput(mm); +err_put_pid: + put_pid(pid); + + return err; +} + +#ifdef CONFIG_ACPI +static int svm_acpi_add_core(struct svm_device *sdev, + struct acpi_device *children, int id) +{ + int err; + struct core_device *cdev = NULL; + char *name = NULL; + enum dev_dma_attr attr; + const union acpi_object *obj; + + name = devm_kasprintf(sdev->dev, GFP_KERNEL, "svm_child_dev%d", id); + if (name == NULL) + return -ENOMEM; + + cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); + if (cdev == NULL) + return -ENOMEM; + cdev->dev.fwnode = &children->fwnode; + cdev->dev.parent = sdev->dev; + cdev->dev.bus = &svm_bus_type; + cdev->dev.release = cdev_device_release; + cdev->smmu_bypass = 0; + list_add(&cdev->entry, &child_list); + dev_set_name(&cdev->dev, "%s", name); + + err = device_register(&cdev->dev); + if (err) { + dev_info(&cdev->dev, "core_device register failed\n"); + list_del(&cdev->entry); + kfree(cdev); + return err; + } + + attr = device_get_dma_attr(&cdev->dev); + if (attr != DEV_DMA_NOT_SUPPORTED) { + err = acpi_dma_configure(&cdev->dev, attr); + if (err) { + dev_dbg(&cdev->dev, "acpi_dma_configure failed\n"); + return err; + } + } + + err = acpi_dev_get_property(children, "hisi,smmu-bypass", + DEV_PROP_U8, &obj); + if (err) { + dev_info(&children->dev, "read smmu bypass failed\n"); + } + + cdev->smmu_bypass = (u8)obj->integer.value; + + cdev->group = iommu_group_get(&cdev->dev); + if (IS_ERR_OR_NULL(cdev->group)) { + dev_err(&cdev->dev, "smmu is not right configured\n"); + return -ENXIO; + } + + cdev->domain = iommu_domain_alloc(sdev->dev->bus); + if (cdev->domain == NULL) { + dev_info(&cdev->dev, "failed to alloc domain\n"); + return -ENOMEM; + } + + err = iommu_attach_group(cdev->domain, cdev->group); + if (err) { + dev_err(&cdev->dev, "failed group to domain\n"); + return err; + } + + err = iommu_sva_device_init(&cdev->dev, IOMMU_SVA_FEAT_IOPF, + UINT_MAX, 0); + if (err) { + dev_err(&cdev->dev, "failed to init sva device\n"); + return err; + } + + return 0; +} + +static int svm_acpi_init_core(struct svm_device *sdev) +{ + int err = 0; + struct device *dev = sdev->dev; + struct acpi_device *adev = ACPI_COMPANION(sdev->dev); + struct acpi_device *cdev = NULL; + int id = 0; + + down_write(&svm_sem); + if (!svm_bus_type.iommu_ops) { + err = bus_register(&svm_bus_type); + if (err) { + up_write(&svm_sem); + dev_err(dev, "failed to register svm_bus_type\n"); + return err; + } + + err = bus_set_iommu(&svm_bus_type, dev->bus->iommu_ops); + if (err) { + up_write(&svm_sem); + dev_err(dev, "failed to set iommu for svm_bus_type\n"); + goto err_unregister_bus; + } + } else if (svm_bus_type.iommu_ops != dev->bus->iommu_ops) { + err = -EBUSY; + up_write(&svm_sem); + dev_err(dev, "iommu_ops configured, but changed!\n"); + return err; + } + up_write(&svm_sem); + + list_for_each_entry(cdev, &adev->children, node) { + err = svm_acpi_add_core(sdev, cdev, id++); + if (err) + device_for_each_child(dev, NULL, svm_remove_core); + } + + return err; + +err_unregister_bus: + bus_unregister(&svm_bus_type); + + return err; +} +#else +static int svm_acpi_init_core(struct svm_device *sdev) { return 0; } +#endif + +static int svm_of_add_core(struct svm_device *sdev, struct device_node *np) +{ + int err; + struct resource res; + struct core_device *cdev = NULL; + char *name = NULL; + + name = devm_kasprintf(sdev->dev, GFP_KERNEL, "svm%llu_%s", + sdev->id, np->name); + if (name == NULL) + return -ENOMEM; + + cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); + if (cdev == NULL) + return -ENOMEM; + + cdev->dev.of_node = np; + cdev->dev.parent = sdev->dev; + cdev->dev.bus = &svm_bus_type; + cdev->dev.release = cdev_device_release; + cdev->smmu_bypass = of_property_read_bool(np, "hisi,smmu_bypass"); + dev_set_name(&cdev->dev, "%s", name); + + err = device_register(&cdev->dev); + if (err) { + dev_info(&cdev->dev, "core_device register failed\n"); + kfree(cdev); + return err; + } + + err = of_dma_configure(&cdev->dev, np, true); + if (err) { + dev_dbg(&cdev->dev, "of_dma_configure failed\n"); + return err; + } + + err = of_address_to_resource(np, 0, &res); + if (err) { + dev_info(&cdev->dev, "no reg, FW should install the sid\n"); + } else { + /* If the reg specified, install sid for the core */ + void __iomem *core_base = NULL; + int sid = cdev->dev.iommu_fwspec->ids[0]; + + core_base = ioremap(res.start, resource_size(&res)); + if (core_base == NULL) { + dev_err(&cdev->dev, "ioremap failed\n"); + return -ENOMEM; + } + + writel_relaxed(sid, core_base + CORE_SID); + iounmap(core_base); + } + + /* If core device is smmu bypass, request direct map. */ + if (cdev->smmu_bypass) { + err = iommu_request_dm_for_dev(&cdev->dev); + return err; + } + + cdev->group = iommu_group_get(&cdev->dev); + if (IS_ERR_OR_NULL(cdev->group)) { + dev_err(&cdev->dev, "smmu is not right configured\n"); + return -ENXIO; + } + + cdev->domain = iommu_domain_alloc(sdev->dev->bus); + if (cdev->domain == NULL) { + dev_info(&cdev->dev, "failed to alloc domain\n"); + return -ENOMEM; + } + + err = iommu_attach_group(cdev->domain, cdev->group); + if (err) { + dev_err(&cdev->dev, "failed group to domain\n"); + return err; + } + + err = iommu_sva_device_init(&cdev->dev, IOMMU_SVA_FEAT_IOPF, + UINT_MAX, 0); + if (err) { + dev_err(&cdev->dev, "failed to init sva device\n"); + return err; + } + + return 0; +} + +static int svm_dt_init_core(struct svm_device *sdev, struct device_node *np) +{ + int err = 0; + struct device_node *child = NULL; + struct device *dev = sdev->dev; + + down_write(&svm_sem); + if (svm_bus_type.iommu_ops == NULL) { + err = bus_register(&svm_bus_type); + if (err) { + up_write(&svm_sem); + dev_err(dev, "failed to register svm_bus_type\n"); + return err; + } + + err = bus_set_iommu(&svm_bus_type, dev->bus->iommu_ops); + if (err) { + up_write(&svm_sem); + dev_err(dev, "failed to set iommu for svm_bus_type\n"); + goto err_unregister_bus; + } + } else if (svm_bus_type.iommu_ops != dev->bus->iommu_ops) { + err = -EBUSY; + up_write(&svm_sem); + dev_err(dev, "iommu_ops configured, but changed!\n"); + return err; + } + up_write(&svm_sem); + + for_each_available_child_of_node(np, child) { + err = svm_of_add_core(sdev, child); + if (err) + device_for_each_child(dev, NULL, svm_remove_core); + } + + return err; + +err_unregister_bus: + bus_unregister(&svm_bus_type); + + return err; +} + +static pte_t *svm_get_pte(struct vm_area_struct *vma, + pud_t *pud, + unsigned long addr, + unsigned long *page_size, + unsigned long *offset) +{ + pte_t *pte = NULL; + unsigned long size = 0; + + if (is_vm_hugetlb_page(vma)) { + if (pud_present(*pud)) { + if (pud_val(*pud) && !(pud_val(*pud) & PUD_TABLE_BIT)) { + pte = (pte_t *)pud; + *offset = addr & (PUD_SIZE - 1); + size = PUD_SIZE; + } else { + pte = (pte_t *)pmd_offset(pud, addr); + *offset = addr & (PMD_SIZE - 1); + size = PMD_SIZE; + } + } else { + pr_err("%s:hugetlb but pud not present\n", __func__); + } + } else { + pmd_t *pmd = pmd_offset(pud, addr); + + if (pmd_none(*pmd)) + return NULL; + + if (pmd_trans_huge(*pmd)) { + pte = (pte_t *)pmd; + *offset = addr & (PMD_SIZE - 1); + size = PMD_SIZE; + } else if (pmd_trans_unstable(pmd)) { + pr_warn("%s: thp unstable\n", __func__); + } else { + pte = pte_offset_map(pmd, addr); + *offset = addr & (PAGE_SIZE - 1); + size = PAGE_SIZE; + } + } + + if (page_size) + *page_size = size; + + return pte; +} + +/* Must be called with mmap_sem held */ +static pte_t *svm_walk_pt(unsigned long addr, unsigned long *page_size, + unsigned long *offset) +{ + pgd_t *pgd = NULL; + pud_t *pud = NULL; + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma = NULL; + + vma = find_vma(mm, addr); + if (!vma) + return NULL; + + pgd = pgd_offset(mm, addr); + if (pgd_none(*pgd)) + return NULL; + + pud = pud_offset(pgd, addr); + if (pud_none(*pud)) + return NULL; + + return svm_get_pte(vma, pud, addr, page_size, offset); +} + +static int svm_get_phys(unsigned long __user *arg) +{ + int err; + pte_t *ptep = NULL; + pte_t pte; + unsigned long index = 0; + struct page *page; + struct addr_trans_args args; + unsigned long addr, phys, offset; + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma = NULL; + unsigned long len; + unsigned int trunk_id; + struct svm_va2pa_trunk *trunk; + + if (!acpi_disabled) + return -EPERM; + + if (copy_from_user(&args, (void __user *)arg, sizeof(args))) + return -EFAULT; + + addr = args.vptr; + down_read(&mm->mmap_sem); + ptep = svm_walk_pt(addr, NULL, &offset); + if (!ptep) { + up_read(&mm->mmap_sem); + return -EINVAL; + } + + pte = READ_ONCE(*ptep); + if (!pte_present(pte) || !(pfn_present(pte_pfn(pte)))) { + up_read(&mm->mmap_sem); + return -EINVAL; + } + + page = pte_page(pte); + get_page(page); + + phys = PFN_PHYS(pte_pfn(pte)) + offset; + + /* fix ts problem, which need the len to check out memory */ + len = 0; + vma = find_vma(mm, addr); + if (vma) + len = vma->vm_end - addr; + + up_read(&mm->mmap_sem); + + trunk_id = args.device_id; + if (trunk_id >= SVM_VA2PA_TRUNK_COUNT_MAX) + return -EINVAL; + trunk = &va2pa_trunk[trunk_id]; + mutex_lock(&trunk->mutex); + svm_clean_done_slots(trunk); + if (trunk->slot_used == trunk->slot_total) { + err = -ENOSPC; + goto err_mutex_unlock; + } + + err = svm_find_slot_init(trunk, &index); + if (err) + goto err_mutex_unlock; + + svm_set_slot_valid(trunk, index, phys, len); + + err = put_user(index * SVM_VA2PA_SLOT_SIZE, (unsigned long __user *)args.pptr); + if (err) + goto err_slot_init; + + mutex_unlock(&trunk->mutex); + return 0; + +err_slot_init: + svm_set_slot_init(trunk, index); +err_mutex_unlock: + mutex_unlock(&trunk->mutex); + put_page(page); + return err; +} + +int svm_get_pasid(pid_t vpid, int dev_id __maybe_unused) +{ + int pasid; + unsigned long asid; + struct task_struct *task = NULL; + struct mm_struct *mm = NULL; + struct svm_process *process = NULL; + struct svm_bind_process params; + + params.flags = SVM_BIND_PID; + params.vpid = vpid; + params.pasid = -1; + params.ttbr = 0; + params.tcr = 0; + task = svm_get_task(params); + if (IS_ERR(task)) + return PTR_ERR(task); + + mm = get_task_mm(task); + if (mm == NULL) { + pasid = -EINVAL; + goto put_task; + } + + asid = mm_context_get(mm); + if (!asid) { + pasid = -ENOSPC; + goto put_mm; + } + + mutex_lock(&svm_process_mutex); + process = find_svm_process(asid); + mutex_unlock(&svm_process_mutex); + if (process) + pasid = process->pasid; + else + pasid = -ESRCH; + + mm_context_put(mm); +put_mm: + mmput(mm); +put_task: + put_task_struct(task); + + return pasid; +} +EXPORT_SYMBOL_GPL(svm_get_pasid); + +static int svm_proc_load_flag(int __user *arg) +{ + static atomic_t l2buf_load_flag = ATOMIC_INIT(0); + int flag; + + if (!acpi_disabled) + return -EPERM; + + if (arg == NULL) + return -EINVAL; + + if (0 == (atomic_cmpxchg(&l2buf_load_flag, 0, 1))) + flag = 0; + else + flag = 1; + + return put_user(flag, arg); +} + +static void svm_vma_open(struct vm_area_struct *vma) +{ + struct page *page = vma->vm_private_data; + + if (page) + get_page(page); +} + +static void svm_vma_close(struct vm_area_struct *vma) +{ + struct page *page = vma->vm_private_data; + + put_page(page); +} + +/* avoid split */ +static int svm_vma_split(struct vm_area_struct *area, unsigned long addr) +{ + return -EINVAL; +} + +/* avoid mremap */ +static int svm_vma_mremap(struct vm_area_struct *area) +{ + return -EINVAL; +} + +static const struct vm_operations_struct svm_vma_ops = { + .open = svm_vma_open, + .close = svm_vma_close, + .split = svm_vma_split, + .mremap = svm_vma_mremap, +}; + +static int svm_mmap(struct file *file, struct vm_area_struct *vma) +{ + int err; + struct svm_device *sdev = file_to_sdev(file); + + if (!acpi_disabled) + return -EPERM; + + if (vma->vm_flags & VM_PA32BIT) { + unsigned long vm_size = vma->vm_end - vma->vm_start; + struct page *page = NULL; + + if ((vma->vm_end < vma->vm_start) || (vm_size > MMAP_PHY32_MAX)) + return -EINVAL; + + /* vma->vm_pgoff transfer the nid */ + if (vma->vm_pgoff == 0) + page = alloc_pages(GFP_KERNEL | GFP_DMA32, + get_order(vm_size)); + else + page = alloc_pages_node((int)vma->vm_pgoff, + GFP_KERNEL | __GFP_THISNODE, + get_order(vm_size)); + if (!page) { + dev_err(sdev->dev, "fail to alloc page on node 0x%lx\n", + vma->vm_pgoff); + return -ENOMEM; + } + + err = remap_pfn_range(vma, + vma->vm_start, + page_to_pfn(page), + vm_size, vma->vm_page_prot); + if (err) + dev_err(sdev->dev, + "fail to remap 0x%pK err=%d\n", + (void *)vma->vm_start, err); + + vma->vm_private_data = page; + vma->vm_ops = &svm_vma_ops; + } else { + if ((vma->vm_end < vma->vm_start) || + ((vma->vm_end - vma->vm_start) > sdev->l2size)) + return -EINVAL; + + vma->vm_page_prot = __pgprot((~PTE_SHARED) & + vma->vm_page_prot.pgprot); + + err = remap_pfn_range(vma, + vma->vm_start, + sdev->l2buff >> PAGE_SHIFT, + vma->vm_end - vma->vm_start, + __pgprot(vma->vm_page_prot.pgprot | PTE_DIRTY)); + if (err) + dev_err(sdev->dev, + "fail to remap 0x%pK err=%d\n", + (void *)vma->vm_start, err); + } + + return err; +} + +static int svm_release_phys32(unsigned long __user *arg) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma = NULL; + unsigned long addr; + unsigned int len = 0; + + if (arg == NULL) + return -EINVAL; + + if (get_user(addr, arg)) + return -EFAULT; + + down_read(&mm->mmap_sem); + + vma = find_vma(mm, addr); + if (!vma || addr != vma->vm_start) { + up_read(&mm->mmap_sem); + return -EFAULT; + } + + len = vma->vm_end - vma->vm_start; + + up_read(&mm->mmap_sem); + + return vm_munmap(addr, len); +} + +static unsigned long svm_sp_alloc_mem(unsigned long __user *arg) +{ + struct spalloc spallocinfo; + void *addr; + int ret; + + if (arg == NULL) { + pr_err("arg is invalid value.\n"); + return EFAULT; + } + + ret = copy_from_user(&spallocinfo, (void __user *)arg, sizeof(spallocinfo)); + if (ret) { + pr_err("failed to copy args from user space.\n"); + return EFAULT; + } + + addr = sp_alloc(spallocinfo.size, spallocinfo.flag, SPG_DEFAULT_ID); + if (IS_ERR_VALUE(addr)) { + pr_err("svm: sp alloc failed with %ld\n", PTR_ERR(addr)); + return EFAULT; + } + + sp_dump_stack(); + + spallocinfo.addr = (uintptr_t)addr; + if (copy_to_user((void __user *)arg, &spallocinfo, sizeof(struct spalloc))) { + sp_free(spallocinfo.addr, SPG_DEFAULT_ID); + return EFAULT; + } + + return 0; +} + +static int svm_sp_free_mem(unsigned long __user *arg) +{ + int ret; + struct spalloc spallocinfo; + + if (arg == NULL) { + pr_err("arg ivalue.\n"); + return -EFAULT; + } + + ret = copy_from_user(&spallocinfo, (void __user *)arg, sizeof(spallocinfo)); + if (ret) { + pr_err("failed to copy args from user space.\n"); + return -EFAULT; + } + + ret = is_sharepool_addr(spallocinfo.addr); + if (ret == FALSE){ + pr_err("svm: sp free failed because the addr is not from sp.\n"); + return -EINVAL; + } + + ret = sp_free(spallocinfo.addr, SPG_DEFAULT_ID); + if (ret != 0) { + pr_err("svm: sp free failed with %d.\n", ret); + return -EFAULT; + } + + sp_dump_stack(); + + return 0; +} + +/*svm ioctl will include some case for HI1980 and HI1910*/ +static long svm_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int err = -EINVAL; + struct svm_bind_process params; + struct svm_device *sdev = file_to_sdev(file); + struct task_struct *task; + + if (!arg) + return -EINVAL; + + if (cmd == SVM_IOCTL_PROCESS_BIND) { + err = copy_from_user(¶ms, (void __user *)arg, + sizeof(params)); + if (err) { + dev_err(sdev->dev, "fail to copy params %d\n", err); + return -EFAULT; + } + } + + switch (cmd) { + case SVM_IOCTL_PROCESS_BIND: + task = svm_get_task(params); + if (IS_ERR(task)) { + dev_err(sdev->dev, "failed to get task\n"); + return PTR_ERR(task); + } + + err = svm_process_bind(task, sdev, ¶ms.ttbr, + ¶ms.tcr, ¶ms.pasid); + if (err) { + put_task_struct(task); + dev_err(sdev->dev, "failed to bind task %d\n", err); + return err; + } + + put_task_struct(task); + err = copy_to_user((void __user *)arg, ¶ms, + sizeof(params)); + if (err) { + dev_err(sdev->dev, "failed to copy to user!\n"); + return -EFAULT; + } + break; + case SVM_IOCTL_GET_PHYS: + err = svm_get_phys((unsigned long __user *)arg); + break; + case SVM_IOCTL_PIN_MEMORY: + err = svm_pin_memory((unsigned long __user *)arg); + break; + case SVM_IOCTL_UNPIN_MEMORY: + err = svm_unpin_memory((unsigned long __user *)arg); + break; + case SVM_IOCTL_LOAD_FLAG: + err = svm_proc_load_flag((int __user *)arg); + break; + case SVM_IOCTL_RELEASE_PHYS32: + err = svm_release_phys32((unsigned long __user *)arg); + break; + case SVM_IOCTL_SP_ALLOC: + err = svm_sp_alloc_mem((unsigned long __user *)arg); + break; + case SVM_IOCTL_SP_FREE: + err = svm_sp_free_mem((unsigned long __user *)arg); + break; + default: + err = -EINVAL; + } + + if (err) + dev_err(sdev->dev, "%s: %s failed err = %d\n", __func__, + svm_cmd_to_string(cmd), err); + + return err; +} + +static const struct file_operations svm_fops = { + .owner = THIS_MODULE, + .open = svm_open, + .mmap = svm_mmap, + .unlocked_ioctl = svm_ioctl, +}; + +static int svm_dt_setup_l2buff(struct svm_device *sdev, struct device_node *np) +{ + struct device_node *l2buff = of_parse_phandle(np, "memory-region", 0); + + if (l2buff) { + struct resource r; + int err = of_address_to_resource(l2buff, 0, &r); + + if (err) { + of_node_put(l2buff); + return err; + } + + sdev->l2buff = r.start; + sdev->l2size = resource_size(&r); + } + + of_node_put(l2buff); + return 0; +} + +/*svm device probe this is init the svm device*/ +static int svm_device_probe(struct platform_device *pdev) +{ + int err = -1; + struct device *dev = &pdev->dev; + struct svm_device *sdev = NULL; + struct device_node *np = dev->of_node; + int alias_id; + + if (acpi_disabled && np == NULL) + return -ENODEV; + + if (!dev->bus) { + dev_dbg(dev, "this dev bus is NULL\n"); + return -EPROBE_DEFER; + } + + if (!dev->bus->iommu_ops) { + dev_dbg(dev, "defer probe svm device\n"); + return -EPROBE_DEFER; + } + + sdev = devm_kzalloc(dev, sizeof(*sdev), GFP_KERNEL); + if (sdev == NULL) + return -ENOMEM; + + if (!acpi_disabled) { + err = device_property_read_u64(dev, "svmid", &sdev->id); + if (err) { + dev_err(dev, "failed to get this svm device id\n"); + return err; + } + } else { + alias_id = of_alias_get_id(np, "svm"); + if (alias_id < 0) + sdev->id = probe_index; + else + sdev->id = alias_id; + } + + sdev->dev = dev; + sdev->miscdev.minor = MISC_DYNAMIC_MINOR; + sdev->miscdev.fops = &svm_fops; + sdev->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, + SVM_DEVICE_NAME"%llu", sdev->id); + if (sdev->miscdev.name == NULL) + return -ENOMEM; + + dev_set_drvdata(dev, sdev); + err = misc_register(&sdev->miscdev); + if (err) { + dev_err(dev, "Unable to register misc device\n"); + return err; + } + + if (!acpi_disabled) { + err = svm_acpi_init_core(sdev); + if (err) { + dev_err(dev, "failed to init acpi cores\n"); + goto err_unregister_misc; + } + } else { + /* + * Get the l2buff phys address and size, if it do not exist + * just warn and continue, and runtime can not use L2BUFF. + */ + err = svm_dt_setup_l2buff(sdev, np); + if (err) + dev_warn(dev, "Cannot get l2buff\n"); + + if (svm_va2pa_trunk_init(dev)) { + dev_err(dev, "failed to init va2pa trunk\n"); + goto err_unregister_misc; + } + + err = svm_dt_init_core(sdev, np); + if (err) { + dev_err(dev, "failed to init dt cores\n"); + goto err_remove_trunk; + } + + probe_index++; + } + + mutex_init(&svm_process_mutex); + + return err; + +err_remove_trunk: + svm_remove_trunk(dev); + +err_unregister_misc: + misc_deregister(&sdev->miscdev); + + return err; +} + +static int svm_device_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct svm_device *sdev = dev_get_drvdata(dev); + + device_for_each_child(sdev->dev, NULL, svm_remove_core); + misc_deregister(&sdev->miscdev); + + return 0; +} + +static void svm_device_shutdown(struct platform_device *pdev) +{ + svm_device_remove(pdev); +} + +static const struct acpi_device_id svm_acpi_match[] = { + { "HSVM1980", 0}, + { } +}; +MODULE_DEVICE_TABLE(acpi, svm_acpi_match); + +static const struct of_device_id svm_of_match[] = { + { .compatible = "hisilicon,svm" }, + { } +}; +MODULE_DEVICE_TABLE(of, svm_of_match); + +/*svm acpi probe and remove*/ +static struct platform_driver svm_driver = { + .probe = svm_device_probe, + .remove = svm_device_remove, + .shutdown = svm_device_shutdown, + .driver = { + .name = SVM_DEVICE_NAME, + .acpi_match_table = ACPI_PTR(svm_acpi_match), + .of_match_table = svm_of_match, + }, +}; + +module_platform_driver(svm_driver); + +MODULE_DESCRIPTION("Hisilicon SVM driver"); +MODULE_AUTHOR("JianKang Chen "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c index 5a8720df2b51384930220a8111e306aaf58196f9..7d70b654df04d44c974ee069407a0294a4b9da78 100644 --- a/drivers/char/tpm/eventlog/common.c +++ b/drivers/char/tpm/eventlog/common.c @@ -104,11 +104,8 @@ static int tpm_read_log(struct tpm_chip *chip) * * If an event log is found then the securityfs files are setup to * export it to userspace, otherwise nothing is done. - * - * Returns -ENODEV if the firmware has no event log or securityfs is not - * supported. */ -int tpm_bios_log_setup(struct tpm_chip *chip) +void tpm_bios_log_setup(struct tpm_chip *chip) { const char *name = dev_name(&chip->dev); unsigned int cnt; @@ -117,7 +114,7 @@ int tpm_bios_log_setup(struct tpm_chip *chip) rc = tpm_read_log(chip); if (rc < 0) - return rc; + return; log_version = rc; cnt = 0; @@ -163,13 +160,12 @@ int tpm_bios_log_setup(struct tpm_chip *chip) cnt++; } - return 0; + return; err: - rc = PTR_ERR(chip->bios_dir[cnt]); chip->bios_dir[cnt] = NULL; tpm_bios_log_teardown(chip); - return rc; + return; } void tpm_bios_log_teardown(struct tpm_chip *chip) diff --git a/drivers/char/tpm/eventlog/efi.c b/drivers/char/tpm/eventlog/efi.c index 3e673ab22cb453ab37d8e77246429e6b995470db..abd3beeb515898120c774621cb0c21a2b21f61ff 100644 --- a/drivers/char/tpm/eventlog/efi.c +++ b/drivers/char/tpm/eventlog/efi.c @@ -43,6 +43,11 @@ int tpm_read_log_efi(struct tpm_chip *chip) log_size = log_tbl->size; memunmap(log_tbl); + if (!log_size) { + pr_warn("UEFI TPM log area empty\n"); + return -EIO; + } + log_tbl = memremap(efi.tpm_log, sizeof(*log_tbl) + log_size, MEMREMAP_WB); if (!log_tbl) { diff --git a/drivers/char/tpm/eventlog/tpm1.c b/drivers/char/tpm/eventlog/tpm1.c index 58c84784ba25c72020e233fd3cbe82f12945aa16..a4621c83e2bf7cd1344378a15c04e24e5e0b0290 100644 --- a/drivers/char/tpm/eventlog/tpm1.c +++ b/drivers/char/tpm/eventlog/tpm1.c @@ -129,6 +129,7 @@ static void *tpm1_bios_measurements_next(struct seq_file *m, void *v, u32 converted_event_size; u32 converted_event_type; + (*pos)++; converted_event_size = do_endian_conversion(event->event_size); v += sizeof(struct tcpa_event) + converted_event_size; @@ -146,7 +147,6 @@ static void *tpm1_bios_measurements_next(struct seq_file *m, void *v, ((v + sizeof(struct tcpa_event) + converted_event_size) >= limit)) return NULL; - (*pos)++; return v; } diff --git a/drivers/char/tpm/eventlog/tpm2.c b/drivers/char/tpm/eventlog/tpm2.c index 1b8fa9de2cacedbbc2a290f4f75ccc75fd4c270d..820128167607e7a8b492b96f8f0d76aa037a54d4 100644 --- a/drivers/char/tpm/eventlog/tpm2.c +++ b/drivers/char/tpm/eventlog/tpm2.c @@ -37,7 +37,7 @@ * * Returns size of the event. If it is an invalid event, returns 0. */ -static int calc_tpm2_event_size(struct tcg_pcr_event2 *event, +static size_t calc_tpm2_event_size(struct tcg_pcr_event2 *event, struct tcg_pcr_event *event_header) { struct tcg_efi_specid_event *efispecid; @@ -143,6 +143,7 @@ static void *tpm2_bios_measurements_next(struct seq_file *m, void *v, size_t event_size; void *marker; + (*pos)++; event_header = log->bios_event_log; if (v == SEQ_START_TOKEN) { @@ -167,7 +168,6 @@ static void *tpm2_bios_measurements_next(struct seq_file *m, void *v, if (((v + event_size) >= limit) || (event_size == 0)) return NULL; - (*pos)++; return v; } diff --git a/drivers/char/tpm/st33zp24/i2c.c b/drivers/char/tpm/st33zp24/i2c.c index be5d1abd3e8ef05d9ce66ba8d61d120750303c9e..8390c5b54c3bedbdd712bc55de00769d7fd9ca0a 100644 --- a/drivers/char/tpm/st33zp24/i2c.c +++ b/drivers/char/tpm/st33zp24/i2c.c @@ -33,7 +33,7 @@ struct st33zp24_i2c_phy { struct i2c_client *client; - u8 buf[TPM_BUFSIZE + 1]; + u8 buf[ST33ZP24_BUFSIZE + 1]; int io_lpcpd; }; diff --git a/drivers/char/tpm/st33zp24/spi.c b/drivers/char/tpm/st33zp24/spi.c index d7909ab287a85c7b75d2f44e70a443451e84a22f..ff019a1e3c68f9b9a672c9fe5946acaaea624504 100644 --- a/drivers/char/tpm/st33zp24/spi.c +++ b/drivers/char/tpm/st33zp24/spi.c @@ -63,7 +63,7 @@ * some latency byte before the answer is available (max 15). * We have 2048 + 1024 + 15. */ -#define ST33ZP24_SPI_BUFFER_SIZE (TPM_BUFSIZE + (TPM_BUFSIZE / 2) +\ +#define ST33ZP24_SPI_BUFFER_SIZE (ST33ZP24_BUFSIZE + (ST33ZP24_BUFSIZE / 2) +\ MAX_SPI_LATENCY) diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c index abd675bec88c8435a6b1277b3fac6336d0339c21..694fc58888c17e6688e548084ef53fbc9517f711 100644 --- a/drivers/char/tpm/st33zp24/st33zp24.c +++ b/drivers/char/tpm/st33zp24/st33zp24.c @@ -436,7 +436,7 @@ static int st33zp24_send(struct tpm_chip *chip, unsigned char *buf, goto out_err; } - return len; + return 0; out_err: st33zp24_cancel(chip); release_locality(chip); diff --git a/drivers/char/tpm/st33zp24/st33zp24.h b/drivers/char/tpm/st33zp24/st33zp24.h index 6f4a4198af6aa2637fb2347dff6cdb9109054b24..20da0a84988d6bde88c2bd95296484c366d1b206 100644 --- a/drivers/char/tpm/st33zp24/st33zp24.h +++ b/drivers/char/tpm/st33zp24/st33zp24.h @@ -18,8 +18,8 @@ #ifndef __LOCAL_ST33ZP24_H__ #define __LOCAL_ST33ZP24_H__ -#define TPM_WRITE_DIRECTION 0x80 -#define TPM_BUFSIZE 2048 +#define TPM_WRITE_DIRECTION 0x80 +#define ST33ZP24_BUFSIZE 2048 struct st33zp24_dev { struct tpm_chip *chip; diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index 46caadca916a0a0d3e23b35ca53d3c87142dc97e..f3d194b9630541d5051baac6050ff45c0e5c1da6 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -163,14 +163,6 @@ static void tpm_dev_release(struct device *dev) kfree(chip); } -static void tpm_devs_release(struct device *dev) -{ - struct tpm_chip *chip = container_of(dev, struct tpm_chip, devs); - - /* release the master device reference */ - put_device(&chip->dev); -} - /** * tpm_class_shutdown() - prepare the TPM device for loss of power. * @dev: device to which the chip is associated. @@ -187,12 +179,11 @@ static int tpm_class_shutdown(struct device *dev) { struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev); - if (chip->flags & TPM_CHIP_FLAG_TPM2) { - down_write(&chip->ops_sem); + down_write(&chip->ops_sem); + if (chip->flags & TPM_CHIP_FLAG_TPM2) tpm2_shutdown(chip, TPM2_SU_CLEAR); - chip->ops = NULL; - up_write(&chip->ops_sem); - } + chip->ops = NULL; + up_write(&chip->ops_sem); return 0; } @@ -233,7 +224,6 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev, chip->dev_num = rc; device_initialize(&chip->dev); - device_initialize(&chip->devs); chip->dev.class = tpm_class; chip->dev.class->shutdown_pre = tpm_class_shutdown; @@ -241,29 +231,12 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev, chip->dev.parent = pdev; chip->dev.groups = chip->groups; - chip->devs.parent = pdev; - chip->devs.class = tpmrm_class; - chip->devs.release = tpm_devs_release; - /* get extra reference on main device to hold on - * behalf of devs. This holds the chip structure - * while cdevs is in use. The corresponding put - * is in the tpm_devs_release (TPM2 only) - */ - if (chip->flags & TPM_CHIP_FLAG_TPM2) - get_device(&chip->dev); - if (chip->dev_num == 0) chip->dev.devt = MKDEV(MISC_MAJOR, TPM_MINOR); else chip->dev.devt = MKDEV(MAJOR(tpm_devt), chip->dev_num); - chip->devs.devt = - MKDEV(MAJOR(tpm_devt), chip->dev_num + TPM_NUM_DEVICES); - rc = dev_set_name(&chip->dev, "tpm%d", chip->dev_num); - if (rc) - goto out; - rc = dev_set_name(&chip->devs, "tpmrm%d", chip->dev_num); if (rc) goto out; @@ -271,17 +244,10 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev, chip->flags |= TPM_CHIP_FLAG_VIRTUAL; cdev_init(&chip->cdev, &tpm_fops); - cdev_init(&chip->cdevs, &tpmrm_fops); chip->cdev.owner = THIS_MODULE; - chip->cdevs.owner = THIS_MODULE; - chip->work_space.context_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); - if (!chip->work_space.context_buf) { - rc = -ENOMEM; - goto out; - } - chip->work_space.session_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); - if (!chip->work_space.session_buf) { + rc = tpm2_init_space(&chip->work_space, TPM2_SPACE_BUFFER_SIZE); + if (rc) { rc = -ENOMEM; goto out; } @@ -290,7 +256,6 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev, return chip; out: - put_device(&chip->devs); put_device(&chip->dev); return ERR_PTR(rc); } @@ -339,14 +304,9 @@ static int tpm_add_char_device(struct tpm_chip *chip) } if (chip->flags & TPM_CHIP_FLAG_TPM2) { - rc = cdev_device_add(&chip->cdevs, &chip->devs); - if (rc) { - dev_err(&chip->devs, - "unable to cdev_device_add() %s, major %d, minor %d, err=%d\n", - dev_name(&chip->devs), MAJOR(chip->devs.devt), - MINOR(chip->devs.devt), rc); - return rc; - } + rc = tpm_devs_add(chip); + if (rc) + goto err_del_cdev; } /* Make the chip available. */ @@ -354,6 +314,10 @@ static int tpm_add_char_device(struct tpm_chip *chip) idr_replace(&dev_nums_idr, chip, chip->dev_num); mutex_unlock(&idr_lock); + return 0; + +err_del_cdev: + cdev_device_del(&chip->cdev, &chip->dev); return rc; } @@ -462,9 +426,7 @@ int tpm_chip_register(struct tpm_chip *chip) tpm_sysfs_add_device(chip); - rc = tpm_bios_log_setup(chip); - if (rc != 0 && rc != -ENODEV) - return rc; + tpm_bios_log_setup(chip); tpm_add_ppi(chip); @@ -514,7 +476,7 @@ void tpm_chip_unregister(struct tpm_chip *chip) hwrng_unregister(&chip->hwrng); tpm_bios_log_teardown(chip); if (chip->flags & TPM_CHIP_FLAG_TPM2) - cdev_device_del(&chip->cdevs, &chip->devs); + tpm_devs_remove(chip); tpm_del_char_device(chip); } EXPORT_SYMBOL_GPL(tpm_chip_unregister); diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index 1a803b0cf980882f4eeb509c99379e5085c4ca6b..43c3f9b876140135939fa77dc5615550ffbc2d9b 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c @@ -477,13 +477,15 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, if (need_locality) { rc = tpm_request_locality(chip, flags); - if (rc < 0) - goto out_no_locality; + if (rc < 0) { + need_locality = false; + goto out_locality; + } } rc = tpm_cmd_ready(chip, flags); if (rc) - goto out; + goto out_locality; rc = tpm2_prepare_space(chip, space, ordinal, buf); if (rc) @@ -493,10 +495,19 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, if (rc < 0) { if (rc != -EPIPE) dev_err(&chip->dev, - "%s: tpm_send: error %d\n", __func__, rc); + "%s: send(): error %d\n", __func__, rc); goto out; } + /* A sanity check. send() should just return zero on success e.g. + * not the command length. + */ + if (rc > 0) { + dev_warn(&chip->dev, + "%s: send(): invalid value %d\n", __func__, rc); + rc = 0; + } + if (chip->flags & TPM_CHIP_FLAG_IRQ) goto out_recv; @@ -547,14 +558,13 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, dev_err(&chip->dev, "tpm2_commit_space: error %d\n", rc); out: - rc = tpm_go_idle(chip, flags); - if (rc) - goto out; + /* may fail but do not override previous error value in rc */ + tpm_go_idle(chip, flags); +out_locality: if (need_locality) tpm_relinquish_locality(chip, flags); -out_no_locality: if (chip->ops->clk_enable != NULL) chip->ops->clk_enable(chip, false); @@ -663,7 +673,8 @@ ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_space *space, return len; err = be32_to_cpu(header->return_code); - if (err != 0 && desc) + if (err != 0 && err != TPM_ERR_DISABLED && err != TPM_ERR_DEACTIVATED + && desc) dev_err(&chip->dev, "A TPM error (%d) occurred %s\n", err, desc); if (err) @@ -1321,7 +1332,8 @@ int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max) } rlength = be32_to_cpu(tpm_cmd.header.out.length); - if (rlength < offsetof(struct tpm_getrandom_out, rng_data) + + if (rlength < TPM_HEADER_SIZE + + offsetof(struct tpm_getrandom_out, rng_data) + recd) { total = -EFAULT; break; diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c index 83a77a4455380276ef8d4786cac2c330f3242460..177a60e5c6ec9a725c48c1d8e53e7fe0e7c22326 100644 --- a/drivers/char/tpm/tpm-sysfs.c +++ b/drivers/char/tpm/tpm-sysfs.c @@ -39,7 +39,6 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr, { struct tpm_buf tpm_buf; struct tpm_readpubek_out *out; - ssize_t rc; int i; char *str = buf; struct tpm_chip *chip = to_tpm_chip(dev); @@ -47,19 +46,18 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr, memset(&anti_replay, 0, sizeof(anti_replay)); - rc = tpm_buf_init(&tpm_buf, TPM_TAG_RQU_COMMAND, TPM_ORD_READPUBEK); - if (rc) - return rc; + if (tpm_try_get_ops(chip)) + return 0; + + if (tpm_buf_init(&tpm_buf, TPM_TAG_RQU_COMMAND, TPM_ORD_READPUBEK)) + goto out_ops; tpm_buf_append(&tpm_buf, anti_replay, sizeof(anti_replay)); - rc = tpm_transmit_cmd(chip, NULL, tpm_buf.data, PAGE_SIZE, + if (tpm_transmit_cmd(chip, NULL, tpm_buf.data, PAGE_SIZE, READ_PUBEK_RESULT_MIN_BODY_SIZE, 0, - "attempting to read the PUBEK"); - if (rc) { - tpm_buf_destroy(&tpm_buf); - return 0; - } + "attempting to read the PUBEK")) + goto out_buf; out = (struct tpm_readpubek_out *)&tpm_buf.data[10]; str += @@ -90,9 +88,11 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr, str += sprintf(str, "\n"); } - rc = str - buf; +out_buf: tpm_buf_destroy(&tpm_buf); - return rc; +out_ops: + tpm_put_ops(chip); + return str - buf; } static DEVICE_ATTR_RO(pubek); @@ -106,12 +106,16 @@ static ssize_t pcrs_show(struct device *dev, struct device_attribute *attr, char *str = buf; struct tpm_chip *chip = to_tpm_chip(dev); - rc = tpm_getcap(chip, TPM_CAP_PROP_PCR, &cap, - "attempting to determine the number of PCRS", - sizeof(cap.num_pcrs)); - if (rc) + if (tpm_try_get_ops(chip)) return 0; + if (tpm_getcap(chip, TPM_CAP_PROP_PCR, &cap, + "attempting to determine the number of PCRS", + sizeof(cap.num_pcrs))) { + tpm_put_ops(chip); + return 0; + } + num_pcrs = be32_to_cpu(cap.num_pcrs); for (i = 0; i < num_pcrs; i++) { rc = tpm_pcr_read_dev(chip, i, digest); @@ -122,6 +126,7 @@ static ssize_t pcrs_show(struct device *dev, struct device_attribute *attr, str += sprintf(str, "%02X ", digest[j]); str += sprintf(str, "\n"); } + tpm_put_ops(chip); return str - buf; } static DEVICE_ATTR_RO(pcrs); @@ -129,16 +134,21 @@ static DEVICE_ATTR_RO(pcrs); static ssize_t enabled_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct tpm_chip *chip = to_tpm_chip(dev); + ssize_t rc = 0; cap_t cap; - ssize_t rc; - rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_PERM, &cap, - "attempting to determine the permanent enabled state", - sizeof(cap.perm_flags)); - if (rc) + if (tpm_try_get_ops(chip)) return 0; + if (tpm_getcap(chip, TPM_CAP_FLAG_PERM, &cap, + "attempting to determine the permanent enabled state", + sizeof(cap.perm_flags))) + goto out_ops; + rc = sprintf(buf, "%d\n", !cap.perm_flags.disable); +out_ops: + tpm_put_ops(chip); return rc; } static DEVICE_ATTR_RO(enabled); @@ -146,16 +156,21 @@ static DEVICE_ATTR_RO(enabled); static ssize_t active_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct tpm_chip *chip = to_tpm_chip(dev); + ssize_t rc = 0; cap_t cap; - ssize_t rc; - rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_PERM, &cap, - "attempting to determine the permanent active state", - sizeof(cap.perm_flags)); - if (rc) + if (tpm_try_get_ops(chip)) return 0; + if (tpm_getcap(chip, TPM_CAP_FLAG_PERM, &cap, + "attempting to determine the permanent active state", + sizeof(cap.perm_flags))) + goto out_ops; + rc = sprintf(buf, "%d\n", !cap.perm_flags.deactivated); +out_ops: + tpm_put_ops(chip); return rc; } static DEVICE_ATTR_RO(active); @@ -163,16 +178,21 @@ static DEVICE_ATTR_RO(active); static ssize_t owned_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct tpm_chip *chip = to_tpm_chip(dev); + ssize_t rc = 0; cap_t cap; - ssize_t rc; - rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_PROP_OWNER, &cap, - "attempting to determine the owner state", - sizeof(cap.owned)); - if (rc) + if (tpm_try_get_ops(chip)) return 0; + if (tpm_getcap(to_tpm_chip(dev), TPM_CAP_PROP_OWNER, &cap, + "attempting to determine the owner state", + sizeof(cap.owned))) + goto out_ops; + rc = sprintf(buf, "%d\n", cap.owned); +out_ops: + tpm_put_ops(chip); return rc; } static DEVICE_ATTR_RO(owned); @@ -180,16 +200,21 @@ static DEVICE_ATTR_RO(owned); static ssize_t temp_deactivated_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct tpm_chip *chip = to_tpm_chip(dev); + ssize_t rc = 0; cap_t cap; - ssize_t rc; - rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_VOL, &cap, - "attempting to determine the temporary state", - sizeof(cap.stclear_flags)); - if (rc) + if (tpm_try_get_ops(chip)) return 0; + if (tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_VOL, &cap, + "attempting to determine the temporary state", + sizeof(cap.stclear_flags))) + goto out_ops; + rc = sprintf(buf, "%d\n", cap.stclear_flags.deactivated); +out_ops: + tpm_put_ops(chip); return rc; } static DEVICE_ATTR_RO(temp_deactivated); @@ -198,15 +223,18 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tpm_chip *chip = to_tpm_chip(dev); - cap_t cap; - ssize_t rc; + ssize_t rc = 0; char *str = buf; + cap_t cap; - rc = tpm_getcap(chip, TPM_CAP_PROP_MANUFACTURER, &cap, - "attempting to determine the manufacturer", - sizeof(cap.manufacturer_id)); - if (rc) + if (tpm_try_get_ops(chip)) return 0; + + if (tpm_getcap(chip, TPM_CAP_PROP_MANUFACTURER, &cap, + "attempting to determine the manufacturer", + sizeof(cap.manufacturer_id))) + goto out_ops; + str += sprintf(str, "Manufacturer: 0x%x\n", be32_to_cpu(cap.manufacturer_id)); @@ -223,20 +251,22 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr, cap.tpm_version_1_2.revMinor); } else { /* Otherwise just use TPM_STRUCT_VER */ - rc = tpm_getcap(chip, TPM_CAP_VERSION_1_1, &cap, - "attempting to determine the 1.1 version", - sizeof(cap.tpm_version)); - if (rc) - return 0; + if (tpm_getcap(chip, TPM_CAP_VERSION_1_1, &cap, + "attempting to determine the 1.1 version", + sizeof(cap.tpm_version))) + goto out_ops; + str += sprintf(str, "TCG version: %d.%d\nFirmware version: %d.%d\n", cap.tpm_version.Major, cap.tpm_version.Minor, cap.tpm_version.revMajor, cap.tpm_version.revMinor); - } - - return str - buf; +} + rc = str - buf; +out_ops: + tpm_put_ops(chip); + return rc; } static DEVICE_ATTR_RO(caps); @@ -244,10 +274,12 @@ static ssize_t cancel_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct tpm_chip *chip = to_tpm_chip(dev); - if (chip == NULL) + + if (tpm_try_get_ops(chip)) return 0; chip->ops->cancel(chip); + tpm_put_ops(chip); return count; } static DEVICE_ATTR_WO(cancel); diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index f3501d05264f51f87cda0795cd111554eaa4b062..7b8e279c180b164fb2dd6c7942fbab761dca3f04 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -188,6 +188,7 @@ struct tpm_space { u8 *context_buf; u32 session_tbl[3]; u8 *session_buf; + u32 buf_size; }; enum tpm_chip_flags { @@ -278,6 +279,9 @@ struct tpm_output_header { #define TPM_TAG_RQU_COMMAND 193 +/* TPM2 specific constants. */ +#define TPM2_SPACE_BUFFER_SIZE 16384 /* 16 kB */ + struct stclear_flags_t { __be16 tag; u8 deactivated; @@ -595,13 +599,15 @@ void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type); unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal); int tpm2_probe(struct tpm_chip *chip); int tpm2_find_cc(struct tpm_chip *chip, u32 cc); -int tpm2_init_space(struct tpm_space *space); +int tpm2_init_space(struct tpm_space *space, unsigned int buf_size); void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space); int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u32 cc, u8 *cmd); int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, u32 cc, u8 *buf, size_t *bufsiz); +int tpm_devs_add(struct tpm_chip *chip); +void tpm_devs_remove(struct tpm_chip *chip); -int tpm_bios_log_setup(struct tpm_chip *chip); +void tpm_bios_log_setup(struct tpm_chip *chip); void tpm_bios_log_teardown(struct tpm_chip *chip); #endif diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index c31b490bd41d97ebc94d3b27fc6080ee4acaf4b4..fcf07452ec1aa872fa347a3111281acfea264d81 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c @@ -200,7 +200,7 @@ int tpm2_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf) pcr_select[pcr_idx >> 3] = 1 << (pcr_idx & 0x7); tpm_buf_append_u32(&buf, 1); - tpm_buf_append_u16(&buf, TPM2_ALG_SHA1); + tpm_buf_append_u16(&buf, TPM2_ALG_SHA256); tpm_buf_append_u8(&buf, TPM2_PCR_SELECT_MIN); tpm_buf_append(&buf, (const unsigned char *)pcr_select, sizeof(pcr_select)); @@ -209,7 +209,7 @@ int tpm2_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf) res_buf ? "attempting to read a pcr value" : NULL); if (rc == 0 && res_buf) { out = (struct tpm2_pcr_read_out *)&buf.data[TPM_HEADER_SIZE]; - memcpy(res_buf, out->digest, SHA1_DIGEST_SIZE); + memcpy(res_buf, out->digest, SHA256_DIGEST_SIZE); } tpm_buf_destroy(&buf); @@ -329,7 +329,9 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max) &buf.data[TPM_HEADER_SIZE]; recd = min_t(u32, be16_to_cpu(out->size), num_bytes); if (tpm_buf_length(&buf) < - offsetof(struct tpm2_get_random_out, buffer) + recd) { + TPM_HEADER_SIZE + + offsetof(struct tpm2_get_random_out, buffer) + + recd) { err = -EFAULT; goto out; } @@ -936,6 +938,10 @@ static int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip) chip->cc_attrs_tbl = devm_kcalloc(&chip->dev, 4, nr_commands, GFP_KERNEL); + if (!chip->cc_attrs_tbl) { + rc = -ENOMEM; + goto out; + } rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_CAPABILITY); if (rc) @@ -954,6 +960,7 @@ static int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip) if (nr_commands != be32_to_cpup((__be32 *)&buf.data[TPM_HEADER_SIZE + 5])) { + rc = -EFAULT; tpm_buf_destroy(&buf); goto out; } diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c index d2e101b32482f83ca85c2ca5c0f5cac85f46efe5..a54bb904b2816583b507a91a94b13ffdfbb84f88 100644 --- a/drivers/char/tpm/tpm2-space.c +++ b/drivers/char/tpm/tpm2-space.c @@ -43,18 +43,21 @@ static void tpm2_flush_sessions(struct tpm_chip *chip, struct tpm_space *space) } } -int tpm2_init_space(struct tpm_space *space) +int tpm2_init_space(struct tpm_space *space, unsigned int buf_size) { - space->context_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); + space->context_buf = kzalloc(buf_size, GFP_KERNEL); if (!space->context_buf) return -ENOMEM; - space->session_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); + space->session_buf = kzalloc(buf_size, GFP_KERNEL); if (space->session_buf == NULL) { kfree(space->context_buf); + /* Prevent caller getting a dangling pointer. */ + space->context_buf = NULL; return -ENOMEM; } + space->buf_size = buf_size; return 0; } @@ -276,8 +279,10 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u32 cc, sizeof(space->context_tbl)); memcpy(&chip->work_space.session_tbl, &space->session_tbl, sizeof(space->session_tbl)); - memcpy(chip->work_space.context_buf, space->context_buf, PAGE_SIZE); - memcpy(chip->work_space.session_buf, space->session_buf, PAGE_SIZE); + memcpy(chip->work_space.context_buf, space->context_buf, + space->buf_size); + memcpy(chip->work_space.session_buf, space->session_buf, + space->buf_size); rc = tpm2_load_space(chip); if (rc) { @@ -414,6 +419,9 @@ static int tpm2_map_response_body(struct tpm_chip *chip, u32 cc, u8 *rsp, if (be32_to_cpu(data->capability) != TPM2_CAP_HANDLES) return 0; + if (be32_to_cpu(data->count) > (UINT_MAX - TPM_HEADER_SIZE - 9) / 4) + return -EFAULT; + if (len != TPM_HEADER_SIZE + 9 + 4 * be32_to_cpu(data->count)) return -EFAULT; @@ -456,7 +464,7 @@ static int tpm2_save_space(struct tpm_chip *chip) continue; rc = tpm2_save_context(chip, space->context_tbl[i], - space->context_buf, PAGE_SIZE, + space->context_buf, space->buf_size, &offset); if (rc == -ENOENT) { space->context_tbl[i] = 0; @@ -474,9 +482,8 @@ static int tpm2_save_space(struct tpm_chip *chip) continue; rc = tpm2_save_context(chip, space->session_tbl[i], - space->session_buf, PAGE_SIZE, + space->session_buf, space->buf_size, &offset); - if (rc == -ENOENT) { /* handle error saving session, just forget it */ space->session_tbl[i] = 0; @@ -522,8 +529,75 @@ int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, sizeof(space->context_tbl)); memcpy(&space->session_tbl, &chip->work_space.session_tbl, sizeof(space->session_tbl)); - memcpy(space->context_buf, chip->work_space.context_buf, PAGE_SIZE); - memcpy(space->session_buf, chip->work_space.session_buf, PAGE_SIZE); + memcpy(space->context_buf, chip->work_space.context_buf, + space->buf_size); + memcpy(space->session_buf, chip->work_space.session_buf, + space->buf_size); return 0; } + +/* + * Put the reference to the main device. + */ +static void tpm_devs_release(struct device *dev) +{ + struct tpm_chip *chip = container_of(dev, struct tpm_chip, devs); + + /* release the master device reference */ + put_device(&chip->dev); +} + +/* + * Remove the device file for exposed TPM spaces and release the device + * reference. This may also release the reference to the master device. + */ +void tpm_devs_remove(struct tpm_chip *chip) +{ + cdev_device_del(&chip->cdevs, &chip->devs); + put_device(&chip->devs); +} + +/* + * Add a device file to expose TPM spaces. Also take a reference to the + * main device. + */ +int tpm_devs_add(struct tpm_chip *chip) +{ + int rc; + + device_initialize(&chip->devs); + chip->devs.parent = chip->dev.parent; + chip->devs.class = tpmrm_class; + + /* + * Get extra reference on main device to hold on behalf of devs. + * This holds the chip structure while cdevs is in use. The + * corresponding put is in the tpm_devs_release. + */ + get_device(&chip->dev); + chip->devs.release = tpm_devs_release; + chip->devs.devt = MKDEV(MAJOR(tpm_devt), chip->dev_num + TPM_NUM_DEVICES); + cdev_init(&chip->cdevs, &tpmrm_fops); + chip->cdevs.owner = THIS_MODULE; + + rc = dev_set_name(&chip->devs, "tpmrm%d", chip->dev_num); + if (rc) + goto err_put_devs; + + rc = cdev_device_add(&chip->cdevs, &chip->devs); + if (rc) { + dev_err(&chip->devs, + "unable to cdev_device_add() %s, major %d, minor %d, err=%d\n", + dev_name(&chip->devs), MAJOR(chip->devs.devt), + MINOR(chip->devs.devt), rc); + goto err_put_devs; + } + + return 0; + +err_put_devs: + put_device(&chip->devs); + + return rc; +} diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c index 66a14526aaf4c811f7f15d0bd94c195ab2dabd3e..a290b30a0c3570b11db5a81b7369e331adc66c34 100644 --- a/drivers/char/tpm/tpm_atmel.c +++ b/drivers/char/tpm/tpm_atmel.c @@ -105,7 +105,7 @@ static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t count) iowrite8(buf[i], priv->iobase); } - return count; + return 0; } static void tpm_atml_cancel(struct tpm_chip *chip) diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c index 36952ef98f904d2d17337560c502042b8242e8c0..763fc7e6c0058825761b3730917c1d44338f3a0f 100644 --- a/drivers/char/tpm/tpm_crb.c +++ b/drivers/char/tpm/tpm_crb.c @@ -287,19 +287,29 @@ static int crb_recv(struct tpm_chip *chip, u8 *buf, size_t count) struct crb_priv *priv = dev_get_drvdata(&chip->dev); unsigned int expected; - /* sanity check */ - if (count < 6) + /* A sanity check that the upper layer wants to get at least the header + * as that is the minimum size for any TPM response. + */ + if (count < TPM_HEADER_SIZE) return -EIO; + /* If this bit is set, according to the spec, the TPM is in + * unrecoverable condition. + */ if (ioread32(&priv->regs_t->ctrl_sts) & CRB_CTRL_STS_ERROR) return -EIO; - memcpy_fromio(buf, priv->rsp, 6); - expected = be32_to_cpup((__be32 *) &buf[2]); - if (expected > count || expected < 6) + /* Read the first 8 bytes in order to get the length of the response. + * We read exactly a quad word in order to make sure that the remaining + * reads will be aligned. + */ + memcpy_fromio(buf, priv->rsp, 8); + + expected = be32_to_cpup((__be32 *)&buf[2]); + if (expected > count || expected < TPM_HEADER_SIZE) return -EIO; - memcpy_fromio(&buf[6], &priv->rsp[6], expected - 6); + memcpy_fromio(&buf[8], &priv->rsp[8], expected - 8); return expected; } diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c index 95ce2e9ccdc6e2ec40c7d52b0f35d17ba260001e..cc4e642d3180a89977c1fae3634f3968acb5125b 100644 --- a/drivers/char/tpm/tpm_i2c_atmel.c +++ b/drivers/char/tpm/tpm_i2c_atmel.c @@ -65,7 +65,15 @@ static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len) dev_dbg(&chip->dev, "%s(buf=%*ph len=%0zx) -> sts=%d\n", __func__, (int)min_t(size_t, 64, len), buf, len, status); - return status; + + if (status < 0) + return status; + + /* The upper layer does not support incomplete sends. */ + if (status != len) + return -E2BIG; + + return 0; } static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count) diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c index 9086edc9066b53786708df7b275a382b2b2abf3d..3b4e9672ff6cdb622fd063021c146975120f8fb8 100644 --- a/drivers/char/tpm/tpm_i2c_infineon.c +++ b/drivers/char/tpm/tpm_i2c_infineon.c @@ -26,8 +26,7 @@ #include #include "tpm.h" -/* max. buffer size supported by our TPM */ -#define TPM_BUFSIZE 1260 +#define TPM_I2C_INFINEON_BUFSIZE 1260 /* max. number of iterations after I2C NAK */ #define MAX_COUNT 3 @@ -63,11 +62,13 @@ enum i2c_chip_type { UNKNOWN, }; -/* Structure to store I2C TPM specific stuff */ struct tpm_inf_dev { struct i2c_client *client; int locality; - u8 buf[TPM_BUFSIZE + sizeof(u8)]; /* max. buffer size + addr */ + /* In addition to the data itself, the buffer must fit the 7-bit I2C + * address and the direction bit. + */ + u8 buf[TPM_I2C_INFINEON_BUFSIZE + 1]; struct tpm_chip *chip; enum i2c_chip_type chip_type; unsigned int adapterlimit; @@ -219,7 +220,7 @@ static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len, .buf = tpm_dev.buf }; - if (len > TPM_BUFSIZE) + if (len > TPM_I2C_INFINEON_BUFSIZE) return -EINVAL; if (!tpm_dev.client->adapter->algo->master_xfer) @@ -527,8 +528,8 @@ static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len) u8 retries = 0; u8 sts = TPM_STS_GO; - if (len > TPM_BUFSIZE) - return -E2BIG; /* command is too long for our tpm, sorry */ + if (len > TPM_I2C_INFINEON_BUFSIZE) + return -E2BIG; if (request_locality(chip, 0) < 0) return -EBUSY; @@ -587,7 +588,7 @@ static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len) /* go and do it */ iic_tpm_write(TPM_STS(tpm_dev.locality), &sts, 1); - return len; + return 0; out_err: tpm_tis_i2c_ready(chip); /* The TPM needs some time to clean up here, diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c index caa86b19c76dd7007a3975756dec8fe069a31db3..2803080097841ed96f1b7f3dbf177c060517d18d 100644 --- a/drivers/char/tpm/tpm_i2c_nuvoton.c +++ b/drivers/char/tpm/tpm_i2c_nuvoton.c @@ -35,14 +35,12 @@ #include "tpm.h" /* I2C interface offsets */ -#define TPM_STS 0x00 -#define TPM_BURST_COUNT 0x01 -#define TPM_DATA_FIFO_W 0x20 -#define TPM_DATA_FIFO_R 0x40 -#define TPM_VID_DID_RID 0x60 -/* TPM command header size */ -#define TPM_HEADER_SIZE 10 -#define TPM_RETRY 5 +#define TPM_STS 0x00 +#define TPM_BURST_COUNT 0x01 +#define TPM_DATA_FIFO_W 0x20 +#define TPM_DATA_FIFO_R 0x40 +#define TPM_VID_DID_RID 0x60 +#define TPM_I2C_RETRIES 5 /* * I2C bus device maximum buffer size w/o counting I2C address or command * i.e. max size required for I2C write is 34 = addr, command, 32 bytes data @@ -292,7 +290,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) dev_err(dev, "%s() count < header size\n", __func__); return -EIO; } - for (retries = 0; retries < TPM_RETRY; retries++) { + for (retries = 0; retries < TPM_I2C_RETRIES; retries++) { if (retries > 0) { /* if this is not the first trial, set responseRetry */ i2c_nuvoton_write_status(client, @@ -369,6 +367,7 @@ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len) struct device *dev = chip->dev.parent; struct i2c_client *client = to_i2c_client(dev); u32 ordinal; + unsigned long duration; size_t count = 0; int burst_count, bytes2write, retries, rc = -EIO; @@ -455,10 +454,12 @@ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len) return rc; } ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); - rc = i2c_nuvoton_wait_for_data_avail(chip, - tpm_calc_ordinal_duration(chip, - ordinal), - &priv->read_queue); + if (chip->flags & TPM_CHIP_FLAG_TPM2) + duration = tpm2_calc_ordinal_duration(chip, ordinal); + else + duration = tpm_calc_ordinal_duration(chip, ordinal); + + rc = i2c_nuvoton_wait_for_data_avail(chip, duration, &priv->read_queue); if (rc) { dev_err(dev, "%s() timeout command duration\n", __func__); i2c_nuvoton_ready(chip); @@ -466,7 +467,7 @@ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len) } dev_dbg(dev, "%s() -> %zd\n", __func__, len); - return len; + return 0; } static bool i2c_nuvoton_req_canceled(struct tpm_chip *chip, u8 status) diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c index 25f6e2665385d063d47c50ad9f21e25591f828c9..77e47dc5aacc5705b465583d2c02f26f8b393d56 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.c +++ b/drivers/char/tpm/tpm_ibmvtpm.c @@ -141,14 +141,14 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) } /** - * tpm_ibmvtpm_send - Send tpm request - * + * tpm_ibmvtpm_send() - Send a TPM command * @chip: tpm chip struct * @buf: buffer contains data to send * @count: size of buffer * * Return: - * Number of bytes sent or < 0 on error. + * 0 on success, + * -errno on error */ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) { @@ -194,7 +194,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) rc = 0; ibmvtpm->tpm_processing_cmd = false; } else - rc = count; + rc = 0; spin_unlock(&ibmvtpm->rtce_lock); return rc; diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c index d8f10047fbbaf1f2f965a795358e60be859fb759..97f6d4fe0aee14d8a49d88480f7b78bb84462f67 100644 --- a/drivers/char/tpm/tpm_infineon.c +++ b/drivers/char/tpm/tpm_infineon.c @@ -354,7 +354,7 @@ static int tpm_inf_send(struct tpm_chip *chip, u8 * buf, size_t count) for (i = 0; i < count; i++) { wait_and_send(chip, buf[i]); } - return count; + return 0; } static void tpm_inf_cancel(struct tpm_chip *chip) diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c index 5d6cce74cd3fa3e7f71cf207454dd1e5e492e560..9bee3c5eb4bf342dc41cfdd29d1a6f01ee4a67ca 100644 --- a/drivers/char/tpm/tpm_nsc.c +++ b/drivers/char/tpm/tpm_nsc.c @@ -226,7 +226,7 @@ static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count) } outb(NSC_COMMAND_EOC, priv->base + NSC_COMMAND); - return count; + return 0; } static void tpm_nsc_cancel(struct tpm_chip *chip) diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index d2345d9fd7b51390200375f318179e46bef01568..a8e8289e0b358dfdda8c15d3a3aa6c69834d3a2d 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -437,6 +437,9 @@ static void disable_interrupts(struct tpm_chip *chip) u32 intmask; int rc; + if (priv->irq == 0) + return; + rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask); if (rc < 0) intmask = 0; @@ -485,7 +488,7 @@ static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len) goto out_err; } } - return len; + return 0; out_err: tpm_tis_ready(chip); return rc; @@ -915,7 +918,15 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT | TPM_INTF_STS_VALID_INT; intmask &= ~TPM_GLOBAL_INT_ENABLE; + + rc = request_locality(chip, 0); + if (rc < 0) { + rc = -ENODEV; + goto out_err; + } + tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask); + release_locality(chip, 0); rc = tpm2_probe(chip); if (rc) @@ -981,12 +992,16 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, goto out_err; } + chip->flags |= TPM_CHIP_FLAG_IRQ; if (irq) { tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED, irq); - if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) + if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) { dev_err(&chip->dev, FW_BUG "TPM interrupt not working, polling instead\n"); + + disable_interrupts(chip); + } } else { tpm_tis_probe_irq(chip, intmask); } diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c index 87a0ce47f2018adc6e0d92e3400f6daed1283e37..ecbb63f8d2318241134b5d515103a4012b5a50ef 100644 --- a/drivers/char/tpm/tpm_vtpm_proxy.c +++ b/drivers/char/tpm/tpm_vtpm_proxy.c @@ -335,7 +335,6 @@ static int vtpm_proxy_is_driver_command(struct tpm_chip *chip, static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t count) { struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev); - int rc = 0; if (count > sizeof(proxy_dev->buffer)) { dev_err(&chip->dev, @@ -366,7 +365,7 @@ static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t count) wake_up_interruptible(&proxy_dev->wq); - return rc; + return 0; } static void vtpm_proxy_tpm_op_cancel(struct tpm_chip *chip) diff --git a/drivers/char/tpm/tpmrm-dev.c b/drivers/char/tpm/tpmrm-dev.c index 1a0e97a5da5a452d8aa7b486392ebcad4e1654bc..162fb16243d030655dc53100ff2ddef72711f395 100644 --- a/drivers/char/tpm/tpmrm-dev.c +++ b/drivers/char/tpm/tpmrm-dev.c @@ -22,7 +22,7 @@ static int tpmrm_open(struct inode *inode, struct file *file) if (priv == NULL) return -ENOMEM; - rc = tpm2_init_space(&priv->space); + rc = tpm2_init_space(&priv->space, TPM2_SPACE_BUFFER_SIZE); if (rc) { kfree(priv); return -ENOMEM; diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c index 911475d3680028e71ff75cc70411c1da4939494c..5a327eb7f63ac2855b5322d456c21fac1aa8a4dd 100644 --- a/drivers/char/tpm/xen-tpmfront.c +++ b/drivers/char/tpm/xen-tpmfront.c @@ -173,7 +173,7 @@ static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) return -ETIME; } - return count; + return 0; } static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) @@ -264,7 +264,7 @@ static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv) return -ENOMEM; } - rv = xenbus_grant_ring(dev, &priv->shr, 1, &gref); + rv = xenbus_grant_ring(dev, priv->shr, 1, &gref); if (rv < 0) return rv; diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 5b5b5d72eab7f47d1082ad980e04790dfe03c028..46188d0a54b8ad1eb5b19e2aaff39e36a0795096 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -75,7 +75,7 @@ struct ports_driver_data { /* All the console devices handled by this driver */ struct list_head consoles; }; -static struct ports_driver_data pdrvdata; +static struct ports_driver_data pdrvdata = { .next_vtermno = 1}; static DEFINE_SPINLOCK(pdrvdata_lock); static DECLARE_COMPLETION(early_console_added); @@ -488,7 +488,7 @@ static struct port_buffer *get_inbuf(struct port *port) buf = virtqueue_get_buf(port->in_vq, &len); if (buf) { - buf->len = len; + buf->len = min_t(size_t, len, buf->size); buf->offset = 0; port->stats.bytes_received += len; } @@ -1349,24 +1349,24 @@ static void set_console_size(struct port *port, u16 rows, u16 cols) port->cons.ws.ws_col = cols; } -static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) +static int fill_queue(struct virtqueue *vq, spinlock_t *lock) { struct port_buffer *buf; - unsigned int nr_added_bufs; + int nr_added_bufs; int ret; nr_added_bufs = 0; do { buf = alloc_buf(vq->vdev, PAGE_SIZE, 0); if (!buf) - break; + return -ENOMEM; spin_lock_irq(lock); ret = add_inbuf(vq, buf); if (ret < 0) { spin_unlock_irq(lock); free_buf(buf, true); - break; + return ret; } nr_added_bufs++; spin_unlock_irq(lock); @@ -1386,7 +1386,6 @@ static int add_port(struct ports_device *portdev, u32 id) char debugfs_name[16]; struct port *port; dev_t devt; - unsigned int nr_added_bufs; int err; port = kmalloc(sizeof(*port), GFP_KERNEL); @@ -1405,6 +1404,7 @@ static int add_port(struct ports_device *portdev, u32 id) port->async_queue = NULL; port->cons.ws.ws_row = port->cons.ws.ws_col = 0; + port->cons.vtermno = 0; port->host_connected = port->guest_connected = false; port->stats = (struct port_stats) { 0 }; @@ -1444,11 +1444,13 @@ static int add_port(struct ports_device *portdev, u32 id) spin_lock_init(&port->outvq_lock); init_waitqueue_head(&port->waitqueue); - /* Fill the in_vq with buffers so the host can send us data. */ - nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock); - if (!nr_added_bufs) { + /* We can safely ignore ENOSPC because it means + * the queue already has buffers. Buffers are removed + * only by virtcons_remove(), not by unplug_port() + */ + err = fill_queue(port->in_vq, &port->inbuf_lock); + if (err < 0 && err != -ENOSPC) { dev_err(port->dev, "Error allocating inbufs\n"); - err = -ENOMEM; goto free_device; } @@ -1736,7 +1738,7 @@ static void control_work_handler(struct work_struct *work) while ((buf = virtqueue_get_buf(vq, &len))) { spin_unlock(&portdev->c_ivq_lock); - buf->len = len; + buf->len = min_t(size_t, len, buf->size); buf->offset = 0; handle_control_message(vq->vdev, portdev, buf); @@ -2082,14 +2084,11 @@ static int virtcons_probe(struct virtio_device *vdev) INIT_WORK(&portdev->control_work, &control_work_handler); if (multiport) { - unsigned int nr_added_bufs; - spin_lock_init(&portdev->c_ivq_lock); spin_lock_init(&portdev->c_ovq_lock); - nr_added_bufs = fill_queue(portdev->c_ivq, - &portdev->c_ivq_lock); - if (!nr_added_bufs) { + err = fill_queue(portdev->c_ivq, &portdev->c_ivq_lock); + if (err < 0) { dev_err(&vdev->dev, "Error allocating buffers for control queue\n"); /* @@ -2100,7 +2099,7 @@ static int virtcons_probe(struct virtio_device *vdev) VIRTIO_CONSOLE_DEVICE_READY, 0); /* Device was functional: we need full cleanup. */ virtcons_remove(vdev); - return -ENOMEM; + return err; } } else { /* diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index a84c5573cabeae1ede83c2e597e8d310bc98d307..ed344eb717cc440dd1c5cbbd2c4daccba3e7691e 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -73,6 +73,7 @@ obj-$(CONFIG_ARCH_HISI) += hisilicon/ obj-y += imgtec/ obj-$(CONFIG_ARCH_MXC) += imx/ obj-$(CONFIG_MACH_INGENIC) += ingenic/ +obj-$(CONFIG_ARCH_K3) += keystone/ obj-$(CONFIG_ARCH_KEYSTONE) += keystone/ obj-$(CONFIG_MACH_LOONGSON32) += loongson1/ obj-y += mediatek/ diff --git a/drivers/clk/actions/owl-common.c b/drivers/clk/actions/owl-common.c index 61c1071b5180a8168cff7382e31f1d81ee536b49..e9be34b17f3f565e370b8e44230bc2fb4eebcd5e 100644 --- a/drivers/clk/actions/owl-common.c +++ b/drivers/clk/actions/owl-common.c @@ -67,16 +67,17 @@ int owl_clk_probe(struct device *dev, struct clk_hw_onecell_data *hw_clks) struct clk_hw *hw; for (i = 0; i < hw_clks->num; i++) { + const char *name; hw = hw_clks->hws[i]; - if (IS_ERR_OR_NULL(hw)) continue; + name = hw->init->name; ret = devm_clk_hw_register(dev, hw); if (ret) { dev_err(dev, "Couldn't register clock %d - %s\n", - i, hw->init->name); + i, name); return ret; } } diff --git a/drivers/clk/at91/clk-audio-pll.c b/drivers/clk/at91/clk-audio-pll.c index da7bafcfbe706cef631e86f3df8dc86536f5900c..b3eaf654fac980ed01cee5e467ad089cb561f066 100644 --- a/drivers/clk/at91/clk-audio-pll.c +++ b/drivers/clk/at91/clk-audio-pll.c @@ -509,7 +509,7 @@ static void __init of_sama5d2_clk_audio_pll_pad_setup(struct device_node *np) static void __init of_sama5d2_clk_audio_pll_pmc_setup(struct device_node *np) { - struct clk_audio_pad *apmc_ck; + struct clk_audio_pmc *apmc_ck; struct clk_init_data init = {}; apmc_ck = kzalloc(sizeof(*apmc_ck), GFP_KERNEL); diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c index 33481368740e7dc038fe1e32065e2659d8cec730..ea23002be4de15c7265ee209b0683261ec7d6903 100644 --- a/drivers/clk/at91/clk-generated.c +++ b/drivers/clk/at91/clk-generated.c @@ -153,6 +153,8 @@ static int clk_generated_determine_rate(struct clk_hw *hw, continue; div = DIV_ROUND_CLOSEST(parent_rate, req->rate); + if (div > GENERATED_MAX_DIV + 1) + div = GENERATED_MAX_DIV + 1; clk_generated_best_diff(req, parent, parent_rate, div, &best_diff, &best_rate); @@ -282,7 +284,7 @@ static void clk_generated_startup(struct clk_generated *gck) static struct clk_hw * __init at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock, const char *name, const char **parent_names, - u8 num_parents, u8 id, + u8 num_parents, u8 id, bool pll_audio, const struct clk_range *range) { struct clk_generated *gck; @@ -306,6 +308,7 @@ at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock, gck->regmap = regmap; gck->lock = lock; gck->range = *range; + gck->audio_pll_allowed = pll_audio; clk_generated_startup(gck); hw = &gck->hw; @@ -331,7 +334,6 @@ static void __init of_sama5d2_clk_generated_setup(struct device_node *np) struct device_node *gcknp; struct clk_range range = CLK_RANGE(0, 0); struct regmap *regmap; - struct clk_generated *gck; num_parents = of_clk_get_parent_count(np); if (num_parents == 0 || num_parents > GENERATED_SOURCE_MAX) @@ -348,6 +350,8 @@ static void __init of_sama5d2_clk_generated_setup(struct device_node *np) return; for_each_child_of_node(np, gcknp) { + bool pll_audio = false; + if (of_property_read_u32(gcknp, "reg", &id)) continue; @@ -360,24 +364,14 @@ static void __init of_sama5d2_clk_generated_setup(struct device_node *np) of_at91_get_clk_range(gcknp, "atmel,clk-output-range", &range); + if (of_device_is_compatible(np, "atmel,sama5d2-clk-generated") && + (id == GCK_ID_I2S0 || id == GCK_ID_I2S1 || + id == GCK_ID_CLASSD)) + pll_audio = true; + hw = at91_clk_register_generated(regmap, &pmc_pcr_lock, name, parent_names, num_parents, - id, &range); - - gck = to_clk_generated(hw); - - if (of_device_is_compatible(np, - "atmel,sama5d2-clk-generated")) { - if (gck->id == GCK_ID_SSC0 || gck->id == GCK_ID_SSC1 || - gck->id == GCK_ID_I2S0 || gck->id == GCK_ID_I2S1 || - gck->id == GCK_ID_CLASSD) - gck->audio_pll_allowed = true; - else - gck->audio_pll_allowed = false; - } else { - gck->audio_pll_allowed = false; - } - + id, pll_audio, &range); if (IS_ERR(hw)) continue; diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c index c813c27f2e58c6f8e62c523a1a9a8b1d212a96d8..90988e7a5b47fb28e1cd07734135bcff63c0e513 100644 --- a/drivers/clk/at91/clk-main.c +++ b/drivers/clk/at91/clk-main.c @@ -27,6 +27,10 @@ #define MOR_KEY_MASK (0xff << 16) +#define clk_main_parent_select(s) (((s) & \ + (AT91_PMC_MOSCEN | \ + AT91_PMC_OSCBYPASS)) ? 1 : 0) + struct clk_main_osc { struct clk_hw hw; struct regmap *regmap; @@ -119,7 +123,7 @@ static int clk_main_osc_is_prepared(struct clk_hw *hw) regmap_read(regmap, AT91_PMC_SR, &status); - return (status & AT91_PMC_MOSCS) && (tmp & AT91_PMC_MOSCEN); + return (status & AT91_PMC_MOSCS) && clk_main_parent_select(tmp); } static const struct clk_ops main_osc_ops = { @@ -158,7 +162,7 @@ at91_clk_register_main_osc(struct regmap *regmap, if (bypass) regmap_update_bits(regmap, AT91_CKGR_MOR, MOR_KEY_MASK | - AT91_PMC_MOSCEN, + AT91_PMC_OSCBYPASS, AT91_PMC_OSCBYPASS | AT91_PMC_KEY); hw = &osc->hw; @@ -350,7 +354,10 @@ static int clk_main_probe_frequency(struct regmap *regmap) regmap_read(regmap, AT91_CKGR_MCFR, &mcfr); if (mcfr & AT91_PMC_MAINRDY) return 0; - usleep_range(MAINF_LOOP_MIN_WAIT, MAINF_LOOP_MAX_WAIT); + if (system_state < SYSTEM_RUNNING) + udelay(MAINF_LOOP_MIN_WAIT); + else + usleep_range(MAINF_LOOP_MIN_WAIT, MAINF_LOOP_MAX_WAIT); } while (time_before(prep_time, timeout)); return -ETIMEDOUT; @@ -530,7 +537,7 @@ static u8 clk_sam9x5_main_get_parent(struct clk_hw *hw) regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status); - return status & AT91_PMC_MOSCEN ? 1 : 0; + return clk_main_parent_select(status); } static const struct clk_ops sam9x5_main_ops = { @@ -572,7 +579,7 @@ at91_clk_register_sam9x5_main(struct regmap *regmap, clkmain->hw.init = &init; clkmain->regmap = regmap; regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status); - clkmain->parent = status & AT91_PMC_MOSCEN ? 1 : 0; + clkmain->parent = clk_main_parent_select(status); hw = &clkmain->hw; ret = clk_hw_register(NULL, &clkmain->hw); diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c index 72b6091eb7b944f50b6a2e3d9ceafa8077a851d2..dc7fbc796cb652156cbec8f1483a462948955e40 100644 --- a/drivers/clk/at91/clk-pll.c +++ b/drivers/clk/at91/clk-pll.c @@ -133,6 +133,9 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hw, { struct clk_pll *pll = to_clk_pll(hw); + if (!pll->div || !pll->mul) + return 0; + return (parent_rate / pll->div) * (pll->mul + 1); } diff --git a/drivers/clk/at91/sckc.c b/drivers/clk/at91/sckc.c index ab6ecefc49ad85424a02dda7b3d3342d76598884..43ba2a8b03faf1bceeddbd2870deb4936b2fb451 100644 --- a/drivers/clk/at91/sckc.c +++ b/drivers/clk/at91/sckc.c @@ -74,7 +74,10 @@ static int clk_slow_osc_prepare(struct clk_hw *hw) writel(tmp | AT91_SCKC_OSC32EN, sckcr); - usleep_range(osc->startup_usec, osc->startup_usec + 1); + if (system_state < SYSTEM_RUNNING) + udelay(osc->startup_usec); + else + usleep_range(osc->startup_usec, osc->startup_usec + 1); return 0; } @@ -197,7 +200,10 @@ static int clk_slow_rc_osc_prepare(struct clk_hw *hw) writel(readl(sckcr) | AT91_SCKC_RCEN, sckcr); - usleep_range(osc->startup_usec, osc->startup_usec + 1); + if (system_state < SYSTEM_RUNNING) + udelay(osc->startup_usec); + else + usleep_range(osc->startup_usec, osc->startup_usec + 1); return 0; } @@ -310,7 +316,10 @@ static int clk_sam9x5_slow_set_parent(struct clk_hw *hw, u8 index) writel(tmp, sckcr); - usleep_range(SLOWCK_SW_TIME_USEC, SLOWCK_SW_TIME_USEC + 1); + if (system_state < SYSTEM_RUNNING) + udelay(SLOWCK_SW_TIME_USEC); + else + usleep_range(SLOWCK_SW_TIME_USEC, SLOWCK_SW_TIME_USEC + 1); return 0; } @@ -443,7 +452,10 @@ static int clk_sama5d4_slow_osc_prepare(struct clk_hw *hw) return 0; } - usleep_range(osc->startup_usec, osc->startup_usec + 1); + if (system_state < SYSTEM_RUNNING) + udelay(osc->startup_usec); + else + usleep_range(osc->startup_usec, osc->startup_usec + 1); osc->prepared = true; return 0; diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c index 20724abd38bd132205713cce2e7960324652d12b..7df6b5b1e7ee0e47178f2c54786bf9933f53b0cc 100644 --- a/drivers/clk/clk-fixed-factor.c +++ b/drivers/clk/clk-fixed-factor.c @@ -210,6 +210,7 @@ static int of_fixed_factor_clk_remove(struct platform_device *pdev) { struct clk *clk = platform_get_drvdata(pdev); + of_clk_del_provider(pdev->dev.of_node); clk_unregister_fixed_factor(clk); return 0; diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c index b5c46b3f8764d491da0f8586de8b18eb6d6f4527..6d6475c32ee51612df97f7fcce8c952a004ccdeb 100644 --- a/drivers/clk/clk-fixed-rate.c +++ b/drivers/clk/clk-fixed-rate.c @@ -200,6 +200,7 @@ static int of_fixed_clk_remove(struct platform_device *pdev) { struct clk *clk = platform_get_drvdata(pdev); + of_clk_del_provider(pdev->dev.of_node); clk_unregister_fixed_rate(clk); return 0; diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c index fdf625fb10faa03fc8c394555612fdcdf855ac09..083daa293280e7069b3643274345b36d1524d4aa 100644 --- a/drivers/clk/clk-fractional-divider.c +++ b/drivers/clk/clk-fractional-divider.c @@ -77,7 +77,7 @@ static long clk_fd_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long m, n; u64 ret; - if (!rate || rate >= *parent_rate) + if (!rate || (!clk_hw_can_set_rate_parent(hw) && rate >= *parent_rate)) return *parent_rate; if (fd->approximation) diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c index 40af4fbab4d23f24acda1ab07fe1df887131c243..af9cc00d2d920a5565a3ffca2d0176bfc62b3d8b 100644 --- a/drivers/clk/clk-gpio.c +++ b/drivers/clk/clk-gpio.c @@ -248,7 +248,7 @@ static int gpio_clk_driver_probe(struct platform_device *pdev) else clk = clk_register_gpio_gate(&pdev->dev, node->name, parent_names ? parent_names[0] : NULL, gpiod, - 0); + CLK_SET_RATE_PARENT); if (IS_ERR(clk)) return PTR_ERR(clk); diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c index 3a1812f65e5d823242d4428672736ce04f865a33..8abc5c8cb8b8c2535bc355f9c03ed7c9cd234b57 100644 --- a/drivers/clk/clk-qoriq.c +++ b/drivers/clk/clk-qoriq.c @@ -610,7 +610,7 @@ static const struct clockgen_chipinfo chipinfo[] = { .guts_compat = "fsl,qoriq-device-config-1.0", .init_periph = p5020_init_periph, .cmux_groups = { - &p2041_cmux_grp1, &p2041_cmux_grp2 + &p5020_cmux_grp1, &p5020_cmux_grp2 }, .cmux_to_group = { 0, 1, -1 diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c index d44e0eea31ec6de81e471469eee884f37a559fca..4080d4e78e8e404b0af645619442508992c4e06f 100644 --- a/drivers/clk/clk-s2mps11.c +++ b/drivers/clk/clk-s2mps11.c @@ -245,6 +245,36 @@ static const struct platform_device_id s2mps11_clk_id[] = { }; MODULE_DEVICE_TABLE(platform, s2mps11_clk_id); +#ifdef CONFIG_OF +/* + * Device is instantiated through parent MFD device and device matching is done + * through platform_device_id. + * + * However if device's DT node contains proper clock compatible and driver is + * built as a module, then the *module* matching will be done trough DT aliases. + * This requires of_device_id table. In the same time this will not change the + * actual *device* matching so do not add .of_match_table. + */ +static const struct of_device_id s2mps11_dt_match[] __used = { + { + .compatible = "samsung,s2mps11-clk", + .data = (void *)S2MPS11X, + }, { + .compatible = "samsung,s2mps13-clk", + .data = (void *)S2MPS13X, + }, { + .compatible = "samsung,s2mps14-clk", + .data = (void *)S2MPS14X, + }, { + .compatible = "samsung,s5m8767-clk", + .data = (void *)S5M8767X, + }, { + /* Sentinel */ + }, +}; +MODULE_DEVICE_TABLE(of, s2mps11_dt_match); +#endif + static struct platform_driver s2mps11_clk_driver = { .driver = { .name = "s2mps11-clk", diff --git a/drivers/clk/clk-stm32mp1.c b/drivers/clk/clk-stm32mp1.c index a907555b2a3d8f5c7882e5be1e080eed5fda1767..bf3b6a4c78d0c06390fffe3f8b2c49f8afba9387 100644 --- a/drivers/clk/clk-stm32mp1.c +++ b/drivers/clk/clk-stm32mp1.c @@ -121,7 +121,7 @@ static const char * const cpu_src[] = { }; static const char * const axi_src[] = { - "ck_hsi", "ck_hse", "pll2_p", "pll3_p" + "ck_hsi", "ck_hse", "pll2_p" }; static const char * const per_src[] = { @@ -225,19 +225,19 @@ static const char * const usart6_src[] = { }; static const char * const fdcan_src[] = { - "ck_hse", "pll3_q", "pll4_q" + "ck_hse", "pll3_q", "pll4_q", "pll4_r" }; static const char * const sai_src[] = { - "pll4_q", "pll3_q", "i2s_ckin", "ck_per" + "pll4_q", "pll3_q", "i2s_ckin", "ck_per", "pll3_r" }; static const char * const sai2_src[] = { - "pll4_q", "pll3_q", "i2s_ckin", "ck_per", "spdif_ck_symb" + "pll4_q", "pll3_q", "i2s_ckin", "ck_per", "spdif_ck_symb", "pll3_r" }; static const char * const adc12_src[] = { - "pll4_q", "ck_per" + "pll4_r", "ck_per", "pll3_q" }; static const char * const dsi_src[] = { @@ -269,7 +269,7 @@ static const struct clk_div_table axi_div_table[] = { static const struct clk_div_table mcu_div_table[] = { { 0, 1 }, { 1, 2 }, { 2, 4 }, { 3, 8 }, { 4, 16 }, { 5, 32 }, { 6, 64 }, { 7, 128 }, - { 8, 512 }, { 9, 512 }, { 10, 512}, { 11, 512 }, + { 8, 256 }, { 9, 512 }, { 10, 512}, { 11, 512 }, { 12, 512 }, { 13, 512 }, { 14, 512}, { 15, 512 }, { 0 }, }; @@ -1286,10 +1286,11 @@ _clk_stm32_register_composite(struct device *dev, MGATE_MP1(_id, _name, _parent, _flags, _mgate) #define KCLK(_id, _name, _parents, _flags, _mgate, _mmux)\ - COMPOSITE(_id, _name, _parents, CLK_OPS_PARENT_ENABLE | _flags,\ - _MGATE_MP1(_mgate),\ - _MMUX(_mmux),\ - _NO_DIV) + COMPOSITE(_id, _name, _parents, CLK_OPS_PARENT_ENABLE |\ + CLK_SET_RATE_NO_REPARENT | _flags,\ + _MGATE_MP1(_mgate),\ + _MMUX(_mmux),\ + _NO_DIV) enum { G_SAI1, @@ -1655,8 +1656,8 @@ static const struct stm32_mux_cfg ker_mux_cfg[M_LAST] = { static const struct clock_config stm32mp1_clock_cfg[] = { /* Oscillator divider */ - DIV(NO_ID, "clk-hsi-div", "clk-hsi", 0, RCC_HSICFGR, 0, 2, - CLK_DIVIDER_READ_ONLY), + DIV(NO_ID, "clk-hsi-div", "clk-hsi", CLK_DIVIDER_POWER_OF_TWO, + RCC_HSICFGR, 0, 2, CLK_DIVIDER_READ_ONLY), /* External / Internal Oscillators */ GATE_MP1(CK_HSE, "ck_hse", "clk-hse", 0, RCC_OCENSETR, 8, 0), @@ -1952,7 +1953,8 @@ static const struct clock_config stm32mp1_clock_cfg[] = { MGATE_MP1(GPU_K, "gpu_k", "pll2_q", 0, G_GPU), MGATE_MP1(DAC12_K, "dac12_k", "ck_lsi", 0, G_DAC12), - COMPOSITE(ETHPTP_K, "ethptp_k", eth_src, CLK_OPS_PARENT_ENABLE, + COMPOSITE(ETHPTP_K, "ethptp_k", eth_src, CLK_OPS_PARENT_ENABLE | + CLK_SET_RATE_NO_REPARENT, _NO_GATE, _MMUX(M_ETHCK), _DIV(RCC_ETHCKSELR, 4, 4, CLK_DIVIDER_ALLOW_ZERO, NULL)), diff --git a/drivers/clk/clk-twl6040.c b/drivers/clk/clk-twl6040.c index 25dfe050ae9f8d3dae9cb393a94590dfad52c7dd..4bd1b32a4f93f9c123409718792a766eeec7c691 100644 --- a/drivers/clk/clk-twl6040.c +++ b/drivers/clk/clk-twl6040.c @@ -41,6 +41,43 @@ static int twl6040_pdmclk_is_prepared(struct clk_hw *hw) return pdmclk->enabled; } +static int twl6040_pdmclk_reset_one_clock(struct twl6040_pdmclk *pdmclk, + unsigned int reg) +{ + const u8 reset_mask = TWL6040_HPLLRST; /* Same for HPPLL and LPPLL */ + int ret; + + ret = twl6040_set_bits(pdmclk->twl6040, reg, reset_mask); + if (ret < 0) + return ret; + + ret = twl6040_clear_bits(pdmclk->twl6040, reg, reset_mask); + if (ret < 0) + return ret; + + return 0; +} + +/* + * TWL6040A2 Phoenix Audio IC erratum #6: "PDM Clock Generation Issue At + * Cold Temperature". This affects cold boot and deeper idle states it + * seems. The workaround consists of resetting HPPLL and LPPLL. + */ +static int twl6040_pdmclk_quirk_reset_clocks(struct twl6040_pdmclk *pdmclk) +{ + int ret; + + ret = twl6040_pdmclk_reset_one_clock(pdmclk, TWL6040_REG_HPPLLCTL); + if (ret) + return ret; + + ret = twl6040_pdmclk_reset_one_clock(pdmclk, TWL6040_REG_LPPLLCTL); + if (ret) + return ret; + + return 0; +} + static int twl6040_pdmclk_prepare(struct clk_hw *hw) { struct twl6040_pdmclk *pdmclk = container_of(hw, struct twl6040_pdmclk, @@ -48,8 +85,20 @@ static int twl6040_pdmclk_prepare(struct clk_hw *hw) int ret; ret = twl6040_power(pdmclk->twl6040, 1); - if (!ret) - pdmclk->enabled = 1; + if (ret) + return ret; + + ret = twl6040_pdmclk_quirk_reset_clocks(pdmclk); + if (ret) + goto out_err; + + pdmclk->enabled = 1; + + return 0; + +out_err: + dev_err(pdmclk->dev, "%s: error %i\n", __func__, ret); + twl6040_power(pdmclk->twl6040, 0); return ret; } diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c index decffb3826ece8be3206c8c9da2d535ddcbf5be1..a738af893532ffdedcb00dd8f9e5554aa01fbcc1 100644 --- a/drivers/clk/clk-versaclock5.c +++ b/drivers/clk/clk-versaclock5.c @@ -262,8 +262,10 @@ static int vc5_mux_set_parent(struct clk_hw *hw, u8 index) if (vc5->clk_mux_ins == VC5_MUX_IN_XIN) src = VC5_PRIM_SRC_SHDN_EN_XTAL; - if (vc5->clk_mux_ins == VC5_MUX_IN_CLKIN) + else if (vc5->clk_mux_ins == VC5_MUX_IN_CLKIN) src = VC5_PRIM_SRC_SHDN_EN_CLKIN; + else /* Invalid; should have been caught by vc5_probe() */ + return -EINVAL; } return regmap_update_bits(vc5->regmap, VC5_PRIM_SRC_SHDN, mask, src); diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index d31055ae6ec6f94c23409df9f47936f09b54c1bf..5413ffaf02e234b1c87015339305ccdfd416db76 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -2687,7 +2687,7 @@ static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) seq_printf(s, "\"protect_count\": %d,", c->protect_count); seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c)); seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c)); - seq_printf(s, "\"phase\": %d", clk_core_get_phase(c)); + seq_printf(s, "\"phase\": %d,", clk_core_get_phase(c)); seq_printf(s, "\"duty_cycle\": %u", clk_core_get_scaled_duty_cycle(c, 100000)); } diff --git a/drivers/clk/hisilicon/clk-hi3660.c b/drivers/clk/hisilicon/clk-hi3660.c index f404199596563e34f7b62eead4fc539f084ca79b..794eeff0d5d2d548eb7fa91be8131e48d7299ca4 100644 --- a/drivers/clk/hisilicon/clk-hi3660.c +++ b/drivers/clk/hisilicon/clk-hi3660.c @@ -163,8 +163,12 @@ static const struct hisi_gate_clock hi3660_crgctrl_gate_sep_clks[] = { "clk_isp_snclk_mux", CLK_SET_RATE_PARENT, 0x50, 17, 0, }, { HI3660_CLK_GATE_ISP_SNCLK2, "clk_gate_isp_snclk2", "clk_isp_snclk_mux", CLK_SET_RATE_PARENT, 0x50, 18, 0, }, + /* + * clk_gate_ufs_subsys is a system bus clock, mark it as critical + * clock and keep it on for system suspend and resume. + */ { HI3660_CLK_GATE_UFS_SUBSYS, "clk_gate_ufs_subsys", "clk_div_sysbus", - CLK_SET_RATE_PARENT, 0x50, 21, 0, }, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0x50, 21, 0, }, { HI3660_PCLK_GATE_DSI0, "pclk_gate_dsi0", "clk_div_cfgbus", CLK_SET_RATE_PARENT, 0x50, 28, 0, }, { HI3660_PCLK_GATE_DSI1, "pclk_gate_dsi1", "clk_div_cfgbus", diff --git a/drivers/clk/hisilicon/reset.c b/drivers/clk/hisilicon/reset.c index 2a5015c736ce6d604d371c0bcd2a894d1ae4015f..43e82fa644226894bb520f4134740f98577ac297 100644 --- a/drivers/clk/hisilicon/reset.c +++ b/drivers/clk/hisilicon/reset.c @@ -109,9 +109,8 @@ struct hisi_reset_controller *hisi_reset_init(struct platform_device *pdev) return NULL; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - rstc->membase = devm_ioremap(&pdev->dev, - res->start, resource_size(res)); - if (!rstc->membase) + rstc->membase = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(rstc->membase)) return NULL; spin_lock_init(&rstc->lock); diff --git a/drivers/clk/imgtec/clk-boston.c b/drivers/clk/imgtec/clk-boston.c index 15af423cc0c907c89affc1c48f910723dbd516ce..dddda45127a809288dc8c4c5bf4c7ba4acbdc624 100644 --- a/drivers/clk/imgtec/clk-boston.c +++ b/drivers/clk/imgtec/clk-boston.c @@ -73,27 +73,40 @@ static void __init clk_boston_setup(struct device_node *np) hw = clk_hw_register_fixed_rate(NULL, "input", NULL, 0, in_freq); if (IS_ERR(hw)) { pr_err("failed to register input clock: %ld\n", PTR_ERR(hw)); - return; + goto fail_input; } onecell->hws[BOSTON_CLK_INPUT] = hw; hw = clk_hw_register_fixed_rate(NULL, "sys", "input", 0, sys_freq); if (IS_ERR(hw)) { pr_err("failed to register sys clock: %ld\n", PTR_ERR(hw)); - return; + goto fail_sys; } onecell->hws[BOSTON_CLK_SYS] = hw; hw = clk_hw_register_fixed_rate(NULL, "cpu", "input", 0, cpu_freq); if (IS_ERR(hw)) { pr_err("failed to register cpu clock: %ld\n", PTR_ERR(hw)); - return; + goto fail_cpu; } onecell->hws[BOSTON_CLK_CPU] = hw; err = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, onecell); - if (err) + if (err) { pr_err("failed to add DT provider: %d\n", err); + goto fail_clk_add; + } + + return; + +fail_clk_add: + clk_hw_unregister_fixed_rate(onecell->hws[BOSTON_CLK_CPU]); +fail_cpu: + clk_hw_unregister_fixed_rate(onecell->hws[BOSTON_CLK_SYS]); +fail_sys: + clk_hw_unregister_fixed_rate(onecell->hws[BOSTON_CLK_INPUT]); +fail_input: + kfree(onecell); } /* diff --git a/drivers/clk/imx/clk-busy.c b/drivers/clk/imx/clk-busy.c index 99036527eb0d8f1fc7d418d1f284dbe172fc21b3..e695622c5aa56e2407959f25e6e3d191c0086f56 100644 --- a/drivers/clk/imx/clk-busy.c +++ b/drivers/clk/imx/clk-busy.c @@ -154,7 +154,7 @@ static const struct clk_ops clk_busy_mux_ops = { struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift, u8 width, void __iomem *busy_reg, u8 busy_shift, - const char **parent_names, int num_parents) + const char * const *parent_names, int num_parents) { struct clk_busy_mux *busy; struct clk *clk; diff --git a/drivers/clk/imx/clk-fixup-mux.c b/drivers/clk/imx/clk-fixup-mux.c index c9b327e0a8dd9b2fd6f9c939f762c6dd46ae9917..44817c1b0b88cc740f3fbfad68b6be38ed295918 100644 --- a/drivers/clk/imx/clk-fixup-mux.c +++ b/drivers/clk/imx/clk-fixup-mux.c @@ -70,7 +70,7 @@ static const struct clk_ops clk_fixup_mux_ops = { }; struct clk *imx_clk_fixup_mux(const char *name, void __iomem *reg, - u8 shift, u8 width, const char **parents, + u8 shift, u8 width, const char * const *parents, int num_parents, void (*fixup)(u32 *val)) { struct clk_fixup_mux *fixup_mux; diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c index 8c7c2fcb8d9495a6597fa7020d3b297278ec961e..c509324f63385c3690540bf4966865226fbbdbdb 100644 --- a/drivers/clk/imx/clk-imx6q.c +++ b/drivers/clk/imx/clk-imx6q.c @@ -508,8 +508,12 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) * lvds1_gate and lvds2_gate are pseudo-gates. Both can be * independently configured as clock inputs or outputs. We treat * the "output_enable" bit as a gate, even though it's really just - * enabling clock output. + * enabling clock output. Initially the gate bits are cleared, as + * otherwise the exclusive configuration gets locked in the setup done + * by software running before the clock driver, with no way to change + * it. */ + writel(readl(base + 0x160) & ~0x3c00, base + 0x160); clk[IMX6QDL_CLK_LVDS1_GATE] = imx_clk_gate_exclusive("lvds1_gate", "lvds1_sel", base + 0x160, 10, BIT(12)); clk[IMX6QDL_CLK_LVDS2_GATE] = imx_clk_gate_exclusive("lvds2_gate", "lvds2_sel", base + 0x160, 11, BIT(13)); diff --git a/drivers/clk/imx/clk-imx6sl.c b/drivers/clk/imx/clk-imx6sl.c index eb6bcbf345a3bac4686f57343555a258f036e09b..390e3e0ecc4549b26617478df84cffcd836087f1 100644 --- a/drivers/clk/imx/clk-imx6sl.c +++ b/drivers/clk/imx/clk-imx6sl.c @@ -17,6 +17,8 @@ #include "clk.h" +#define CCDR 0x4 +#define BM_CCM_CCDR_MMDC_CH0_MASK (1 << 17) #define CCSR 0xc #define BM_CCSR_PLL1_SW_CLK_SEL (1 << 2) #define CACRR 0x10 @@ -409,6 +411,10 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node) clks[IMX6SL_CLK_USDHC3] = imx_clk_gate2("usdhc3", "usdhc3_podf", base + 0x80, 6); clks[IMX6SL_CLK_USDHC4] = imx_clk_gate2("usdhc4", "usdhc4_podf", base + 0x80, 8); + /* Ensure the MMDC CH0 handshake is bypassed */ + writel_relaxed(readl_relaxed(base + CCDR) | + BM_CCM_CCDR_MMDC_CH0_MASK, base + CCDR); + imx_check_clocks(clks, ARRAY_SIZE(clks)); clk_data.clks = clks; diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h index 8076ec040f375f2715c227d64f909e562ae178b5..e65c1115d97883c452887e411a4aa3f6e029d5ca 100644 --- a/drivers/clk/imx/clk.h +++ b/drivers/clk/imx/clk.h @@ -63,14 +63,14 @@ struct clk *imx_clk_busy_divider(const char *name, const char *parent_name, struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift, u8 width, void __iomem *busy_reg, u8 busy_shift, - const char **parent_names, int num_parents); + const char * const *parent_names, int num_parents); struct clk *imx_clk_fixup_divider(const char *name, const char *parent, void __iomem *reg, u8 shift, u8 width, void (*fixup)(u32 *val)); struct clk *imx_clk_fixup_mux(const char *name, void __iomem *reg, - u8 shift, u8 width, const char **parents, + u8 shift, u8 width, const char * const *parents, int num_parents, void (*fixup)(u32 *val)); static inline struct clk *imx_clk_fixed(const char *name, int rate) @@ -79,7 +79,8 @@ static inline struct clk *imx_clk_fixed(const char *name, int rate) } static inline struct clk *imx_clk_mux_ldb(const char *name, void __iomem *reg, - u8 shift, u8 width, const char **parents, int num_parents) + u8 shift, u8 width, const char * const *parents, + int num_parents) { return clk_register_mux(NULL, name, parents, num_parents, CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT, reg, @@ -192,7 +193,8 @@ static inline struct clk *imx_clk_gate4(const char *name, const char *parent, } static inline struct clk *imx_clk_mux(const char *name, void __iomem *reg, - u8 shift, u8 width, const char **parents, int num_parents) + u8 shift, u8 width, const char * const *parents, + int num_parents) { return clk_register_mux(NULL, name, parents, num_parents, CLK_SET_RATE_NO_REPARENT, reg, shift, @@ -200,7 +202,8 @@ static inline struct clk *imx_clk_mux(const char *name, void __iomem *reg, } static inline struct clk *imx_clk_mux2(const char *name, void __iomem *reg, - u8 shift, u8 width, const char **parents, int num_parents) + u8 shift, u8 width, const char * const *parents, + int num_parents) { return clk_register_mux(NULL, name, parents, num_parents, CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE, @@ -208,8 +211,9 @@ static inline struct clk *imx_clk_mux2(const char *name, void __iomem *reg, } static inline struct clk *imx_clk_mux_flags(const char *name, - void __iomem *reg, u8 shift, u8 width, const char **parents, - int num_parents, unsigned long flags) + void __iomem *reg, u8 shift, u8 width, + const char * const *parents, int num_parents, + unsigned long flags) { return clk_register_mux(NULL, name, parents, num_parents, flags | CLK_SET_RATE_NO_REPARENT, reg, shift, width, 0, diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c index 5ef7d9ba2195d42b19a03b3bcb5f7ee12b4c26bc..b40160eb337290cdfb97440b05a9f1a340800849 100644 --- a/drivers/clk/ingenic/cgu.c +++ b/drivers/clk/ingenic/cgu.c @@ -426,16 +426,16 @@ ingenic_clk_round_rate(struct clk_hw *hw, unsigned long req_rate, struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw); struct ingenic_cgu *cgu = ingenic_clk->cgu; const struct ingenic_cgu_clk_info *clk_info; - long rate = *parent_rate; + unsigned int div = 1; clk_info = &cgu->clock_info[ingenic_clk->idx]; if (clk_info->type & CGU_CLK_DIV) - rate /= ingenic_clk_calc_div(clk_info, *parent_rate, req_rate); + div = ingenic_clk_calc_div(clk_info, *parent_rate, req_rate); else if (clk_info->type & CGU_CLK_FIXDIV) - rate /= clk_info->fixdiv.div; + div = clk_info->fixdiv.div; - return rate; + return DIV_ROUND_UP(*parent_rate, div); } static int @@ -455,7 +455,7 @@ ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate, if (clk_info->type & CGU_CLK_DIV) { div = ingenic_clk_calc_div(clk_info, parent_rate, req_rate); - rate = parent_rate / div; + rate = DIV_ROUND_UP(parent_rate, div); if (rate != req_rate) return -EINVAL; diff --git a/drivers/clk/ingenic/cgu.h b/drivers/clk/ingenic/cgu.h index 502bcbb61b047a5331b56d7bdfafaeee2e3cb61b..e12716d8ce3cf13ef002a8159a99568178b625cc 100644 --- a/drivers/clk/ingenic/cgu.h +++ b/drivers/clk/ingenic/cgu.h @@ -80,7 +80,7 @@ struct ingenic_cgu_mux_info { * @reg: offset of the divider control register within the CGU * @shift: number of bits to left shift the divide value by (ie. the index of * the lowest bit of the divide value within its control register) - * @div: number of bits to divide the divider value by (i.e. if the + * @div: number to divide the divider value by (i.e. if the * effective divider value is the value written to the register * multiplied by some constant) * @bits: the size of the divide value in bits diff --git a/drivers/clk/keystone/Kconfig b/drivers/clk/keystone/Kconfig index 7e9f0176578a6d09e2170105943d0c6505602f69..b04927d06cd1033924483ff33318d1e82f439f98 100644 --- a/drivers/clk/keystone/Kconfig +++ b/drivers/clk/keystone/Kconfig @@ -7,7 +7,7 @@ config COMMON_CLK_KEYSTONE config TI_SCI_CLK tristate "TI System Control Interface clock drivers" - depends on (ARCH_KEYSTONE || COMPILE_TEST) && OF + depends on (ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST) && OF depends on TI_SCI_PROTOCOL default ARCH_KEYSTONE ---help--- diff --git a/drivers/clk/mediatek/clk-cpumux.c b/drivers/clk/mediatek/clk-cpumux.c index 16e56772d280ba29d56ae18ec90e626a77c28906..6c7eaa21e662b8343e6fc5d6269fbd6fb3460215 100644 --- a/drivers/clk/mediatek/clk-cpumux.c +++ b/drivers/clk/mediatek/clk-cpumux.c @@ -53,7 +53,7 @@ static const struct clk_ops clk_cpumux_ops = { .set_parent = clk_cpumux_set_parent, }; -static struct clk __init * +static struct clk * mtk_clk_register_cpumux(const struct mtk_composite *mux, struct regmap *regmap) { @@ -84,9 +84,9 @@ mtk_clk_register_cpumux(const struct mtk_composite *mux, return clk; } -int __init mtk_clk_register_cpumuxes(struct device_node *node, - const struct mtk_composite *clks, int num, - struct clk_onecell_data *clk_data) +int mtk_clk_register_cpumuxes(struct device_node *node, + const struct mtk_composite *clks, int num, + struct clk_onecell_data *clk_data) { int i; struct clk *clk; diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c index 4dda8988b2f091a665860ae5b2f5dc4cc0492a23..00e52a94e34fd90eb6d914e3476ad2714815ff6f 100644 --- a/drivers/clk/mediatek/clk-mt2701.c +++ b/drivers/clk/mediatek/clk-mt2701.c @@ -688,6 +688,8 @@ static int mtk_topckgen_init(struct platform_device *pdev) return PTR_ERR(base); clk_data = mtk_alloc_clk_data(CLK_TOP_NR); + if (!clk_data) + return -ENOMEM; mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks), clk_data); @@ -755,6 +757,8 @@ static void __init mtk_infrasys_init_early(struct device_node *node) if (!infra_clk_data) { infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR); + if (!infra_clk_data) + return; for (i = 0; i < CLK_INFRA_NR; i++) infra_clk_data->clks[i] = ERR_PTR(-EPROBE_DEFER); @@ -781,6 +785,8 @@ static int mtk_infrasys_init(struct platform_device *pdev) if (!infra_clk_data) { infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR); + if (!infra_clk_data) + return -ENOMEM; } else { for (i = 0; i < CLK_INFRA_NR; i++) { if (infra_clk_data->clks[i] == ERR_PTR(-EPROBE_DEFER)) @@ -909,6 +915,8 @@ static int mtk_pericfg_init(struct platform_device *pdev) return PTR_ERR(base); clk_data = mtk_alloc_clk_data(CLK_PERI_NR); + if (!clk_data) + return -ENOMEM; mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks), clk_data); diff --git a/drivers/clk/mediatek/clk-mt6797.c b/drivers/clk/mediatek/clk-mt6797.c index 5702bc974ed9904756fd069509416034d6bbeaf9..1ee45f32c1d4eeadbfbb0035155e82d6f94aee9d 100644 --- a/drivers/clk/mediatek/clk-mt6797.c +++ b/drivers/clk/mediatek/clk-mt6797.c @@ -396,6 +396,8 @@ static int mtk_topckgen_init(struct platform_device *pdev) return PTR_ERR(base); clk_data = mtk_alloc_clk_data(CLK_TOP_NR); + if (!clk_data) + return -ENOMEM; mtk_clk_register_factors(top_fixed_divs, ARRAY_SIZE(top_fixed_divs), clk_data); @@ -554,6 +556,8 @@ static void mtk_infrasys_init_early(struct device_node *node) if (!infra_clk_data) { infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR); + if (!infra_clk_data) + return; for (i = 0; i < CLK_INFRA_NR; i++) infra_clk_data->clks[i] = ERR_PTR(-EPROBE_DEFER); @@ -578,6 +582,8 @@ static int mtk_infrasys_init(struct platform_device *pdev) if (!infra_clk_data) { infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR); + if (!infra_clk_data) + return -ENOMEM; } else { for (i = 0; i < CLK_INFRA_NR; i++) { if (infra_clk_data->clks[i] == ERR_PTR(-EPROBE_DEFER)) diff --git a/drivers/clk/mediatek/clk-mt7622.c b/drivers/clk/mediatek/clk-mt7622.c index 92f7e32770c6a9905c79b2a84f2075c0dcd2be05..a8aecef1ba89ac1589fb248ffa120e24d8de7e6a 100644 --- a/drivers/clk/mediatek/clk-mt7622.c +++ b/drivers/clk/mediatek/clk-mt7622.c @@ -513,7 +513,7 @@ static const struct mtk_gate peri_clks[] = { GATE_PERI1(CLK_PERI_IRTX_PD, "peri_irtx_pd", "irtx_sel", 2), }; -static struct mtk_composite infra_muxes[] __initdata = { +static struct mtk_composite infra_muxes[] = { MUX(CLK_INFRA_MUX1_SEL, "infra_mux1_sel", infra_mux1_parents, 0x000, 2, 2), }; @@ -652,7 +652,7 @@ static int mtk_topckgen_init(struct platform_device *pdev) return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); } -static int __init mtk_infrasys_init(struct platform_device *pdev) +static int mtk_infrasys_init(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; struct clk_onecell_data *clk_data; diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c index f54e4015b0b1f3c005e7d82b5fdffa6dc826a8ad..18842d66031766f90206f7b661edb814f9f8d6b1 100644 --- a/drivers/clk/mediatek/clk-pll.c +++ b/drivers/clk/mediatek/clk-pll.c @@ -88,6 +88,32 @@ static unsigned long __mtk_pll_recalc_rate(struct mtk_clk_pll *pll, u32 fin, return ((unsigned long)vco + postdiv - 1) / postdiv; } +static void __mtk_pll_tuner_enable(struct mtk_clk_pll *pll) +{ + u32 r; + + if (pll->tuner_en_addr) { + r = readl(pll->tuner_en_addr) | BIT(pll->data->tuner_en_bit); + writel(r, pll->tuner_en_addr); + } else if (pll->tuner_addr) { + r = readl(pll->tuner_addr) | AUDPLL_TUNER_EN; + writel(r, pll->tuner_addr); + } +} + +static void __mtk_pll_tuner_disable(struct mtk_clk_pll *pll) +{ + u32 r; + + if (pll->tuner_en_addr) { + r = readl(pll->tuner_en_addr) & ~BIT(pll->data->tuner_en_bit); + writel(r, pll->tuner_en_addr); + } else if (pll->tuner_addr) { + r = readl(pll->tuner_addr) & ~AUDPLL_TUNER_EN; + writel(r, pll->tuner_addr); + } +} + static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw, int postdiv) { @@ -96,6 +122,9 @@ static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw, pll_en = readl(pll->base_addr + REG_CON0) & CON0_BASE_EN; + /* disable tuner */ + __mtk_pll_tuner_disable(pll); + /* set postdiv */ val = readl(pll->pd_addr); val &= ~(POSTDIV_MASK << pll->data->pd_shift); @@ -122,6 +151,9 @@ static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw, if (pll->tuner_addr) writel(con1 + 1, pll->tuner_addr); + /* restore tuner_en */ + __mtk_pll_tuner_enable(pll); + if (pll_en) udelay(20); } @@ -228,13 +260,7 @@ static int mtk_pll_prepare(struct clk_hw *hw) r |= pll->data->en_mask; writel(r, pll->base_addr + REG_CON0); - if (pll->tuner_en_addr) { - r = readl(pll->tuner_en_addr) | BIT(pll->data->tuner_en_bit); - writel(r, pll->tuner_en_addr); - } else if (pll->tuner_addr) { - r = readl(pll->tuner_addr) | AUDPLL_TUNER_EN; - writel(r, pll->tuner_addr); - } + __mtk_pll_tuner_enable(pll); udelay(20); @@ -258,13 +284,7 @@ static void mtk_pll_unprepare(struct clk_hw *hw) writel(r, pll->base_addr + REG_CON0); } - if (pll->tuner_en_addr) { - r = readl(pll->tuner_en_addr) & ~BIT(pll->data->tuner_en_bit); - writel(r, pll->tuner_en_addr); - } else if (pll->tuner_addr) { - r = readl(pll->tuner_addr) & ~AUDPLL_TUNER_EN; - writel(r, pll->tuner_addr); - } + __mtk_pll_tuner_disable(pll); r = readl(pll->base_addr + REG_CON0); r &= ~CON0_BASE_EN; diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c index 00ce62ad6416cb5514d792db648e90e42c76e764..02229d051d77879f0ebf19842c0e13e8695c39b7 100644 --- a/drivers/clk/meson/axg.c +++ b/drivers/clk/meson/axg.c @@ -96,7 +96,6 @@ static struct clk_regmap axg_sys_pll = { .ops = &meson_clk_pll_ro_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, - .flags = CLK_GET_RATE_NOCACHE, }, }; @@ -319,6 +318,7 @@ static struct clk_regmap axg_fclk_div2 = { .ops = &clk_regmap_gate_ops, .parent_names = (const char *[]){ "fclk_div2_div" }, .num_parents = 1, + .flags = CLK_IS_CRITICAL, }, }; @@ -343,6 +343,18 @@ static struct clk_regmap axg_fclk_div3 = { .ops = &clk_regmap_gate_ops, .parent_names = (const char *[]){ "fclk_div3_div" }, .num_parents = 1, + /* + * FIXME: + * This clock, as fdiv2, is used by the SCPI FW and is required + * by the platform to operate correctly. + * Until the following condition are met, we need this clock to + * be marked as critical: + * a) The SCPI generic driver claims and enable all the clocks + * it needs + * b) CCF has a clock hand-off mechanism to make the sure the + * clock stays on until the proper driver comes along + */ + .flags = CLK_IS_CRITICAL, }, }; @@ -700,12 +712,14 @@ static struct clk_regmap axg_pcie_mux = { .offset = HHI_PCIE_PLL_CNTL6, .mask = 0x1, .shift = 2, + /* skip the parent mpll3, reserved for debug */ + .table = (u32[]){ 1 }, }, .hw.init = &(struct clk_init_data){ .name = "pcie_mux", .ops = &clk_regmap_mux_ops, - .parent_names = (const char *[]){ "mpll3", "pcie_pll" }, - .num_parents = 2, + .parent_names = (const char *[]){ "pcie_pll" }, + .num_parents = 1, .flags = CLK_SET_RATE_PARENT, }, }; diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c index 86d3ae58e84c280c8ba982cf52e6848fd743db4e..b039909e03cf855e53874f6d871744b1dc715377 100644 --- a/drivers/clk/meson/gxbb.c +++ b/drivers/clk/meson/gxbb.c @@ -213,7 +213,6 @@ static struct clk_regmap gxbb_fixed_pll = { .ops = &meson_clk_pll_ro_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, - .flags = CLK_GET_RATE_NOCACHE, }, }; @@ -276,6 +275,10 @@ static struct clk_regmap gxbb_hdmi_pll = { .ops = &meson_clk_pll_ro_ops, .parent_names = (const char *[]){ "hdmi_pll_pre_mult" }, .num_parents = 1, + /* + * Display directly handle hdmi pll registers ATM, we need + * NOCACHE to keep our view of the clock as accurate as possible + */ .flags = CLK_GET_RATE_NOCACHE, }, }; @@ -292,6 +295,12 @@ static struct clk_regmap gxl_hdmi_pll = { .shift = 9, .width = 5, }, + /* + * On gxl, there is a register shift due to + * HHI_HDMI_PLL_CNTL1 which does not exist on gxbb, + * so we use the HHI_HDMI_PLL_CNTL2 define from GXBB + * instead which is defined at the same offset. + */ .frac = { /* * On gxl, there is a register shift due to @@ -301,7 +310,7 @@ static struct clk_regmap gxl_hdmi_pll = { */ .reg_off = HHI_HDMI_PLL_CNTL + 4, .shift = 0, - .width = 12, + .width = 10, }, .od = { .reg_off = HHI_HDMI_PLL_CNTL + 8, @@ -334,6 +343,10 @@ static struct clk_regmap gxl_hdmi_pll = { .ops = &meson_clk_pll_ro_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, + /* + * Display directly handle hdmi pll registers ATM, we need + * NOCACHE to keep our view of the clock as accurate as possible + */ .flags = CLK_GET_RATE_NOCACHE, }, }; @@ -371,7 +384,6 @@ static struct clk_regmap gxbb_sys_pll = { .ops = &meson_clk_pll_ro_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, - .flags = CLK_GET_RATE_NOCACHE, }, }; @@ -418,7 +430,6 @@ static struct clk_regmap gxbb_gp0_pll = { .ops = &meson_clk_pll_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, - .flags = CLK_GET_RATE_NOCACHE, }, }; @@ -472,7 +483,6 @@ static struct clk_regmap gxl_gp0_pll = { .ops = &meson_clk_pll_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, - .flags = CLK_GET_RATE_NOCACHE, }, }; @@ -522,6 +532,18 @@ static struct clk_regmap gxbb_fclk_div3 = { .ops = &clk_regmap_gate_ops, .parent_names = (const char *[]){ "fclk_div3_div" }, .num_parents = 1, + /* + * FIXME: + * This clock, as fdiv2, is used by the SCPI FW and is required + * by the platform to operate correctly. + * Until the following condition are met, we need this clock to + * be marked as critical: + * a) The SCPI generic driver claims and enable all the clocks + * it needs + * b) CCF has a clock hand-off mechanism to make the sure the + * clock stays on until the proper driver comes along + */ + .flags = CLK_IS_CRITICAL, }, }; @@ -820,6 +842,7 @@ static struct clk_regmap gxbb_sar_adc_clk_div = { .ops = &clk_regmap_divider_ops, .parent_names = (const char *[]){ "sar_adc_clk_sel" }, .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, }, }; @@ -1555,6 +1578,7 @@ static struct clk_regmap gxbb_vdec_1_div = { .offset = HHI_VDEC_CLK_CNTL, .shift = 0, .width = 7, + .flags = CLK_DIVIDER_ROUND_CLOSEST, }, .hw.init = &(struct clk_init_data){ .name = "vdec_1_div", @@ -1600,6 +1624,7 @@ static struct clk_regmap gxbb_vdec_hevc_div = { .offset = HHI_VDEC2_CLK_CNTL, .shift = 16, .width = 7, + .flags = CLK_DIVIDER_ROUND_CLOSEST, }, .hw.init = &(struct clk_init_data){ .name = "vdec_hevc_div", diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c index 7447d96a265f72e7d4b4277c29f5839df3ff43b6..e90af556ff90face1ba236755efc9880dbfaaa8a 100644 --- a/drivers/clk/meson/meson8b.c +++ b/drivers/clk/meson/meson8b.c @@ -132,7 +132,6 @@ static struct clk_regmap meson8b_fixed_pll = { .ops = &meson_clk_pll_ro_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, - .flags = CLK_GET_RATE_NOCACHE, }, }; @@ -145,7 +144,7 @@ static struct clk_regmap meson8b_vid_pll = { }, .n = { .reg_off = HHI_VID_PLL_CNTL, - .shift = 9, + .shift = 10, .width = 5, }, .od = { @@ -169,7 +168,6 @@ static struct clk_regmap meson8b_vid_pll = { .ops = &meson_clk_pll_ro_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, - .flags = CLK_GET_RATE_NOCACHE, }, }; @@ -207,7 +205,6 @@ static struct clk_regmap meson8b_sys_pll = { .ops = &meson_clk_pll_ro_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, - .flags = CLK_GET_RATE_NOCACHE, }, }; @@ -571,13 +568,14 @@ static struct clk_fixed_factor meson8b_cpu_div3 = { }; static const struct clk_div_table cpu_scale_table[] = { - { .val = 2, .div = 4 }, - { .val = 3, .div = 6 }, - { .val = 4, .div = 8 }, - { .val = 5, .div = 10 }, - { .val = 6, .div = 12 }, - { .val = 7, .div = 14 }, - { .val = 8, .div = 16 }, + { .val = 1, .div = 4 }, + { .val = 2, .div = 6 }, + { .val = 3, .div = 8 }, + { .val = 4, .div = 10 }, + { .val = 5, .div = 12 }, + { .val = 6, .div = 14 }, + { .val = 7, .div = 16 }, + { .val = 8, .div = 18 }, { /* sentinel */ }, }; @@ -585,7 +583,7 @@ static struct clk_regmap meson8b_cpu_scale_div = { .data = &(struct clk_regmap_div_data){ .offset = HHI_SYS_CPU_CLK_CNTL1, .shift = 20, - .width = 9, + .width = 10, .table = cpu_scale_table, .flags = CLK_DIVIDER_ALLOW_ZERO, }, @@ -598,20 +596,27 @@ static struct clk_regmap meson8b_cpu_scale_div = { }, }; +static u32 mux_table_cpu_scale_out_sel[] = { 0, 1, 3 }; static struct clk_regmap meson8b_cpu_scale_out_sel = { .data = &(struct clk_regmap_mux_data){ .offset = HHI_SYS_CPU_CLK_CNTL0, .mask = 0x3, .shift = 2, + .table = mux_table_cpu_scale_out_sel, }, .hw.init = &(struct clk_init_data){ .name = "cpu_scale_out_sel", .ops = &clk_regmap_mux_ro_ops, + /* + * NOTE: We are skipping the parent with value 0x2 (which is + * "cpu_div3") because it results in a duty cycle of 33% which + * makes the system unstable and can result in a lockup of the + * whole system. + */ .parent_names = (const char *[]) { "cpu_in_sel", "cpu_div2", - "cpu_div3", "cpu_scale_div" }, - .num_parents = 4, + .num_parents = 3, .flags = CLK_SET_RATE_PARENT, }, }; @@ -629,7 +634,8 @@ static struct clk_regmap meson8b_cpu_clk = { "cpu_scale_out_sel" }, .num_parents = 2, .flags = (CLK_SET_RATE_PARENT | - CLK_SET_RATE_NO_REPARENT), + CLK_SET_RATE_NO_REPARENT | + CLK_IS_CRITICAL), }, }; diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c index 0fc75c39595708f0d5c6296fcbeea4e3d61f5731..d083b860f08333ad1caf8082664efdeaf1e0099d 100644 --- a/drivers/clk/mmp/clk-of-mmp2.c +++ b/drivers/clk/mmp/clk-of-mmp2.c @@ -227,8 +227,8 @@ static struct mmp_param_gate_clk apmu_gate_clks[] = { /* The gate clocks has mux parent. */ {MMP2_CLK_SDH0, "sdh0_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH0, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, {MMP2_CLK_SDH1, "sdh1_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, - {MMP2_CLK_SDH1, "sdh2_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH2, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, - {MMP2_CLK_SDH1, "sdh3_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH3, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, + {MMP2_CLK_SDH2, "sdh2_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH2, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, + {MMP2_CLK_SDH3, "sdh3_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH3, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, {MMP2_CLK_DISP0, "disp0_clk", "disp0_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1b, 0x1b, 0x0, 0, &disp0_lock}, {MMP2_CLK_DISP0_SPHY, "disp0_sphy_clk", "disp0_sphy_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1024, 0x1024, 0x0, 0, &disp0_lock}, {MMP2_CLK_DISP1, "disp1_clk", "disp1_div", CLK_SET_RATE_PARENT, APMU_DISP1, 0x1b, 0x1b, 0x0, 0, &disp1_lock}, diff --git a/drivers/clk/mmp/clk.c b/drivers/clk/mmp/clk.c index ad8d483a35cd5c16d3cbae4019dd345e3c7bb69b..ca7d37e2c7be6bb6aba2a7d4e1459d8c4369fcca 100644 --- a/drivers/clk/mmp/clk.c +++ b/drivers/clk/mmp/clk.c @@ -183,7 +183,7 @@ void mmp_clk_add(struct mmp_clk_unit *unit, unsigned int id, pr_err("CLK %d has invalid pointer %p\n", id, clk); return; } - if (id > unit->nr_clks) { + if (id >= unit->nr_clks) { pr_err("CLK %d is invalid\n", id); return; } diff --git a/drivers/clk/mvebu/cp110-system-controller.c b/drivers/clk/mvebu/cp110-system-controller.c index 75bf7b8f282fc4e4cc0e342e16d5594b7cba1274..0153c76d4a20a56122d9161d511cf6512923d519 100644 --- a/drivers/clk/mvebu/cp110-system-controller.c +++ b/drivers/clk/mvebu/cp110-system-controller.c @@ -202,11 +202,11 @@ static struct clk_hw *cp110_of_clk_get(struct of_phandle_args *clkspec, unsigned int idx = clkspec->args[1]; if (type == CP110_CLK_TYPE_CORE) { - if (idx > CP110_MAX_CORE_CLOCKS) + if (idx >= CP110_MAX_CORE_CLOCKS) return ERR_PTR(-EINVAL); return clk_data->hws[idx]; } else if (type == CP110_CLK_TYPE_GATABLE) { - if (idx > CP110_MAX_GATABLE_CLOCKS) + if (idx >= CP110_MAX_GATABLE_CLOCKS) return ERR_PTR(-EINVAL); return clk_data->hws[CP110_MAX_CORE_CLOCKS + idx]; } diff --git a/drivers/clk/pxa/clk-pxa27x.c b/drivers/clk/pxa/clk-pxa27x.c index d40b63e7bbce906850e2628c47c77b0836742209..b44c4cf8011a345724f8352164f7271575e21681 100644 --- a/drivers/clk/pxa/clk-pxa27x.c +++ b/drivers/clk/pxa/clk-pxa27x.c @@ -463,6 +463,7 @@ struct dummy_clk { }; static struct dummy_clk dummy_clks[] __initdata = { DUMMY_CLK(NULL, "pxa27x-gpio", "osc_32_768khz"), + DUMMY_CLK(NULL, "pxa-rtc", "osc_32_768khz"), DUMMY_CLK(NULL, "sa1100-rtc", "osc_32_768khz"), DUMMY_CLK("UARTCLK", "pxa2xx-ir", "STUART"), }; diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c index 52208d4165f432ac7398e423139af52f84722844..51b2388d80ac92f561ddadaee0dd522dedf628e3 100644 --- a/drivers/clk/qcom/clk-rcg2.c +++ b/drivers/clk/qcom/clk-rcg2.c @@ -206,6 +206,8 @@ static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f, if (clk_flags & CLK_SET_RATE_PARENT) { rate = f->freq; if (f->pre_div) { + if (!rate) + rate = req->rate; rate /= 2; rate *= f->pre_div + 1; } diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c index db9b2471ac401fbb12c02ffb537c64fe718f430d..bfb6d6065a90c16bd9ac43f74365047562e0c2c9 100644 --- a/drivers/clk/qcom/common.c +++ b/drivers/clk/qcom/common.c @@ -29,6 +29,9 @@ struct freq_tbl *qcom_find_freq(const struct freq_tbl *f, unsigned long rate) if (!f) return NULL; + if (!f->freq) + return f; + for (; f->freq; f++) if (rate <= f->freq) return f; diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c index 505c6263141d9e31fd7ff325976aa569dab7adfa..9a6f0c1159aa9fbc36295f775374c108d043426d 100644 --- a/drivers/clk/qcom/gcc-ipq8074.c +++ b/drivers/clk/qcom/gcc-ipq8074.c @@ -985,6 +985,7 @@ static struct clk_rcg2 pcie0_axi_clk_src = { static const struct freq_tbl ftbl_pcie_aux_clk_src[] = { F(19200000, P_XO, 1, 0, 0), + { } }; static struct clk_rcg2 pcie0_aux_clk_src = { @@ -1090,6 +1091,7 @@ static const struct freq_tbl ftbl_sdcc_ice_core_clk_src[] = { F(19200000, P_XO, 1, 0, 0), F(160000000, P_GPLL0, 5, 0, 0), F(308570000, P_GPLL6, 3.5, 0, 0), + { } }; static struct clk_rcg2 sdcc1_ice_core_clk_src = { diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c index 9f0ae403d5f53eb54ede64891e4a6cc573f04e6e..4e23973b6cd161354193e61cd6bf6b65446f0d89 100644 --- a/drivers/clk/qcom/gcc-msm8998.c +++ b/drivers/clk/qcom/gcc-msm8998.c @@ -1101,6 +1101,7 @@ static struct clk_rcg2 ufs_axi_clk_src = { static const struct freq_tbl ftbl_usb30_master_clk_src[] = { F(19200000, P_XO, 1, 0, 0), + F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0), F(120000000, P_GPLL0_OUT_MAIN, 5, 0, 0), F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0), { } @@ -2401,7 +2402,7 @@ static struct clk_branch gcc_ufs_phy_aux_clk = { static struct clk_branch gcc_ufs_rx_symbol_0_clk = { .halt_reg = 0x75014, - .halt_check = BRANCH_HALT, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x75014, .enable_mask = BIT(0), @@ -2414,7 +2415,7 @@ static struct clk_branch gcc_ufs_rx_symbol_0_clk = { static struct clk_branch gcc_ufs_rx_symbol_1_clk = { .halt_reg = 0x7605c, - .halt_check = BRANCH_HALT, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x7605c, .enable_mask = BIT(0), @@ -2427,7 +2428,7 @@ static struct clk_branch gcc_ufs_rx_symbol_1_clk = { static struct clk_branch gcc_ufs_tx_symbol_0_clk = { .halt_reg = 0x75010, - .halt_check = BRANCH_HALT, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x75010, .enable_mask = BIT(0), @@ -2742,25 +2743,25 @@ static struct gdsc *gcc_msm8998_gdscs[] = { }; static const struct qcom_reset_map gcc_msm8998_resets[] = { - [GCC_BLSP1_QUP1_BCR] = { 0x102400 }, - [GCC_BLSP1_QUP2_BCR] = { 0x110592 }, - [GCC_BLSP1_QUP3_BCR] = { 0x118784 }, - [GCC_BLSP1_QUP4_BCR] = { 0x126976 }, - [GCC_BLSP1_QUP5_BCR] = { 0x135168 }, - [GCC_BLSP1_QUP6_BCR] = { 0x143360 }, - [GCC_BLSP2_QUP1_BCR] = { 0x155648 }, - [GCC_BLSP2_QUP2_BCR] = { 0x163840 }, - [GCC_BLSP2_QUP3_BCR] = { 0x172032 }, - [GCC_BLSP2_QUP4_BCR] = { 0x180224 }, - [GCC_BLSP2_QUP5_BCR] = { 0x188416 }, - [GCC_BLSP2_QUP6_BCR] = { 0x196608 }, - [GCC_PCIE_0_BCR] = { 0x438272 }, - [GCC_PDM_BCR] = { 0x208896 }, - [GCC_SDCC2_BCR] = { 0x81920 }, - [GCC_SDCC4_BCR] = { 0x90112 }, - [GCC_TSIF_BCR] = { 0x221184 }, - [GCC_UFS_BCR] = { 0x479232 }, - [GCC_USB_30_BCR] = { 0x61440 }, + [GCC_BLSP1_QUP1_BCR] = { 0x19000 }, + [GCC_BLSP1_QUP2_BCR] = { 0x1b000 }, + [GCC_BLSP1_QUP3_BCR] = { 0x1d000 }, + [GCC_BLSP1_QUP4_BCR] = { 0x1f000 }, + [GCC_BLSP1_QUP5_BCR] = { 0x21000 }, + [GCC_BLSP1_QUP6_BCR] = { 0x23000 }, + [GCC_BLSP2_QUP1_BCR] = { 0x26000 }, + [GCC_BLSP2_QUP2_BCR] = { 0x28000 }, + [GCC_BLSP2_QUP3_BCR] = { 0x2a000 }, + [GCC_BLSP2_QUP4_BCR] = { 0x2c000 }, + [GCC_BLSP2_QUP5_BCR] = { 0x2e000 }, + [GCC_BLSP2_QUP6_BCR] = { 0x30000 }, + [GCC_PCIE_0_BCR] = { 0x6b000 }, + [GCC_PDM_BCR] = { 0x33000 }, + [GCC_SDCC2_BCR] = { 0x14000 }, + [GCC_SDCC4_BCR] = { 0x16000 }, + [GCC_TSIF_BCR] = { 0x36000 }, + [GCC_UFS_BCR] = { 0x75000 }, + [GCC_USB_30_BCR] = { 0xf000 }, }; static const struct regmap_config gcc_msm8998_regmap_config = { diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c index fa1a196350f1542ab5acf95528b7b329ecaddb98..ada3e4aeb38f96d039dac687a44bd80bf8e28d2b 100644 --- a/drivers/clk/qcom/gcc-sdm845.c +++ b/drivers/clk/qcom/gcc-sdm845.c @@ -131,8 +131,8 @@ static const char * const gcc_parent_names_6[] = { "core_bi_pll_test_se", }; -static const char * const gcc_parent_names_7[] = { - "bi_tcxo", +static const char * const gcc_parent_names_7_ao[] = { + "bi_tcxo_ao", "gpll0", "gpll0_out_even", "core_bi_pll_test_se", @@ -144,6 +144,12 @@ static const char * const gcc_parent_names_8[] = { "core_bi_pll_test_se", }; +static const char * const gcc_parent_names_8_ao[] = { + "bi_tcxo_ao", + "gpll0", + "core_bi_pll_test_se", +}; + static const struct parent_map gcc_parent_map_10[] = { { P_BI_TCXO, 0 }, { P_GPLL0_OUT_MAIN, 1 }, @@ -226,7 +232,7 @@ static struct clk_rcg2 gcc_cpuss_ahb_clk_src = { .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "gcc_cpuss_ahb_clk_src", - .parent_names = gcc_parent_names_7, + .parent_names = gcc_parent_names_7_ao, .num_parents = 4, .ops = &clk_rcg2_ops, }, @@ -245,7 +251,7 @@ static struct clk_rcg2 gcc_cpuss_rbcpr_clk_src = { .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "gcc_cpuss_rbcpr_clk_src", - .parent_names = gcc_parent_names_8, + .parent_names = gcc_parent_names_8_ao, .num_parents = 3, .ops = &clk_rcg2_ops, }, @@ -641,7 +647,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = { .name = "gcc_sdcc2_apps_clk_src", .parent_names = gcc_parent_names_10, .num_parents = 5, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_floor_ops, }, }; @@ -665,7 +671,7 @@ static struct clk_rcg2 gcc_sdcc4_apps_clk_src = { .name = "gcc_sdcc4_apps_clk_src", .parent_names = gcc_parent_names_0, .num_parents = 4, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_floor_ops, }, }; diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c index 4ce1d7c88377fb75d7b7d2223f833fcb6614e3ef..36a40b0ddfa1f99a1a81454dadc4eaed6a56e818 100644 --- a/drivers/clk/qcom/mmcc-apq8084.c +++ b/drivers/clk/qcom/mmcc-apq8084.c @@ -341,6 +341,7 @@ static struct freq_tbl ftbl_mmss_axi_clk[] = { F(333430000, P_MMPLL1, 3.5, 0, 0), F(400000000, P_MMPLL0, 2, 0, 0), F(466800000, P_MMPLL1, 2.5, 0, 0), + { } }; static struct clk_rcg2 mmss_axi_clk_src = { @@ -365,6 +366,7 @@ static struct freq_tbl ftbl_ocmemnoc_clk[] = { F(150000000, P_GPLL0, 4, 0, 0), F(228570000, P_MMPLL0, 3.5, 0, 0), F(320000000, P_MMPLL0, 2.5, 0, 0), + { } }; static struct clk_rcg2 ocmemnoc_clk_src = { diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c index 91818516c3e0c3c0b1b476c06714a18d1b15225a..124d21f19e2c734f36338e171b60bb5792d28478 100644 --- a/drivers/clk/qcom/mmcc-msm8974.c +++ b/drivers/clk/qcom/mmcc-msm8974.c @@ -291,6 +291,7 @@ static struct freq_tbl ftbl_mmss_axi_clk[] = { F(291750000, P_MMPLL1, 4, 0, 0), F(400000000, P_MMPLL0, 2, 0, 0), F(466800000, P_MMPLL1, 2.5, 0, 0), + { } }; static struct clk_rcg2 mmss_axi_clk_src = { @@ -315,6 +316,7 @@ static struct freq_tbl ftbl_ocmemnoc_clk[] = { F(150000000, P_GPLL0, 4, 0, 0), F(291750000, P_MMPLL1, 4, 0, 0), F(400000000, P_MMPLL0, 2, 0, 0), + { } }; static struct clk_rcg2 ocmemnoc_clk_src = { diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c index e82adcb16a52a3b790c34fc2a49079615a7bf907..45d94fb9703d27e818a8fb25ef869aa8d74eccab 100644 --- a/drivers/clk/renesas/clk-mstp.c +++ b/drivers/clk/renesas/clk-mstp.c @@ -341,7 +341,8 @@ void __init cpg_mstp_add_clk_domain(struct device_node *np) return; pd->name = np->name; - pd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP; + pd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON | + GENPD_FLAG_ACTIVE_WAKEUP; pd->attach_dev = cpg_mstp_attach_dev; pd->detach_dev = cpg_mstp_detach_dev; pm_genpd_init(pd, &pm_domain_always_on_gov, false); diff --git a/drivers/clk/renesas/r8a77990-cpg-mssr.c b/drivers/clk/renesas/r8a77990-cpg-mssr.c index 9e14f1486fbb93ccb887c9b85cae0a61757741f3..81569767025cc4f23458c18848d536c8c671e3a6 100644 --- a/drivers/clk/renesas/r8a77990-cpg-mssr.c +++ b/drivers/clk/renesas/r8a77990-cpg-mssr.c @@ -175,8 +175,8 @@ static const struct mssr_mod_clk r8a77990_mod_clks[] __initconst = { DEF_MOD("ehci0", 703, R8A77990_CLK_S3D4), DEF_MOD("hsusb", 704, R8A77990_CLK_S3D4), DEF_MOD("csi40", 716, R8A77990_CLK_CSI0), - DEF_MOD("du1", 723, R8A77990_CLK_S2D1), - DEF_MOD("du0", 724, R8A77990_CLK_S2D1), + DEF_MOD("du1", 723, R8A77990_CLK_S1D1), + DEF_MOD("du0", 724, R8A77990_CLK_S1D1), DEF_MOD("lvds", 727, R8A77990_CLK_S2D1), DEF_MOD("vin5", 806, R8A77990_CLK_S1D2), diff --git a/drivers/clk/renesas/r8a77995-cpg-mssr.c b/drivers/clk/renesas/r8a77995-cpg-mssr.c index ea4cafbe6e851aca89c24f79b4912b1a2278d774..9e16931e6f28aa7de918ea65a143fbd3e53893b4 100644 --- a/drivers/clk/renesas/r8a77995-cpg-mssr.c +++ b/drivers/clk/renesas/r8a77995-cpg-mssr.c @@ -141,8 +141,8 @@ static const struct mssr_mod_clk r8a77995_mod_clks[] __initconst = { DEF_MOD("vspbs", 627, R8A77995_CLK_S0D1), DEF_MOD("ehci0", 703, R8A77995_CLK_S3D2), DEF_MOD("hsusb", 704, R8A77995_CLK_S3D2), - DEF_MOD("du1", 723, R8A77995_CLK_S2D1), - DEF_MOD("du0", 724, R8A77995_CLK_S2D1), + DEF_MOD("du1", 723, R8A77995_CLK_S1D1), + DEF_MOD("du0", 724, R8A77995_CLK_S1D1), DEF_MOD("lvds", 727, R8A77995_CLK_S2D1), DEF_MOD("vin7", 804, R8A77995_CLK_S1D2), DEF_MOD("vin6", 805, R8A77995_CLK_S1D2), diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c index a0b6ecdc63dd3bba3af1675da498c6429fb99904..6d2b56891559725d3d22bf53444b5854feb37a82 100644 --- a/drivers/clk/renesas/r9a06g032-clocks.c +++ b/drivers/clk/renesas/r9a06g032-clocks.c @@ -539,7 +539,8 @@ r9a06g032_div_round_rate(struct clk_hw *hw, * several uarts attached to this divider, and changing this impacts * everyone. */ - if (clk->index == R9A06G032_DIV_UART) { + if (clk->index == R9A06G032_DIV_UART || + clk->index == R9A06G032_DIV_P2_PG) { pr_devel("%s div uart hack!\n", __func__); return clk_get_rate(hw->clk); } diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c index 628b63b85d3f09c5cfdf37dea4b565953e41264a..9ace7d39cd1b53bde1b204fe80a80d9bcfaa318f 100644 --- a/drivers/clk/renesas/rcar-gen3-cpg.c +++ b/drivers/clk/renesas/rcar-gen3-cpg.c @@ -361,7 +361,7 @@ static struct clk * __init cpg_sd_clk_register(const struct cpg_core_clk *core, struct sd_clock *clock; struct clk *clk; unsigned int i; - u32 sd_fc; + u32 val; clock = kzalloc(sizeof(*clock), GFP_KERNEL); if (!clock) @@ -378,17 +378,9 @@ static struct clk * __init cpg_sd_clk_register(const struct cpg_core_clk *core, clock->div_table = cpg_sd_div_table; clock->div_num = ARRAY_SIZE(cpg_sd_div_table); - sd_fc = readl(clock->csn.reg) & CPG_SD_FC_MASK; - for (i = 0; i < clock->div_num; i++) - if (sd_fc == (clock->div_table[i].val & CPG_SD_FC_MASK)) - break; - - if (WARN_ON(i >= clock->div_num)) { - kfree(clock); - return ERR_PTR(-EINVAL); - } - - clock->cur_div_idx = i; + val = readl(clock->csn.reg) & ~CPG_SD_FC_MASK; + val |= CPG_SD_STP_MASK | (clock->div_table[0].val & CPG_SD_FC_MASK); + writel(val, clock->csn.reg); clock->div_max = clock->div_table[0].div; clock->div_min = clock->div_max; diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c index f4b013e9352d9efca6260c5ca763e8fcc53eb6f5..d7a2ad6173694c2f3dfaf54812356da11299c78e 100644 --- a/drivers/clk/renesas/renesas-cpg-mssr.c +++ b/drivers/clk/renesas/renesas-cpg-mssr.c @@ -514,7 +514,8 @@ static int __init cpg_mssr_add_clk_domain(struct device *dev, genpd = &pd->genpd; genpd->name = np->name; - genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP; + genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON | + GENPD_FLAG_ACTIVE_WAKEUP; genpd->attach_dev = cpg_mssr_attach_dev; genpd->detach_dev = cpg_mssr_detach_dev; pm_genpd_init(genpd, &pm_domain_always_on_gov, false); @@ -535,17 +536,11 @@ static int cpg_mssr_reset(struct reset_controller_dev *rcdev, unsigned int reg = id / 32; unsigned int bit = id % 32; u32 bitmask = BIT(bit); - unsigned long flags; - u32 value; dev_dbg(priv->dev, "reset %u%02u\n", reg, bit); /* Reset module */ - spin_lock_irqsave(&priv->rmw_lock, flags); - value = readl(priv->base + SRCR(reg)); - value |= bitmask; - writel(value, priv->base + SRCR(reg)); - spin_unlock_irqrestore(&priv->rmw_lock, flags); + writel(bitmask, priv->base + SRCR(reg)); /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */ udelay(35); @@ -562,16 +557,10 @@ static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id) unsigned int reg = id / 32; unsigned int bit = id % 32; u32 bitmask = BIT(bit); - unsigned long flags; - u32 value; dev_dbg(priv->dev, "assert %u%02u\n", reg, bit); - spin_lock_irqsave(&priv->rmw_lock, flags); - value = readl(priv->base + SRCR(reg)); - value |= bitmask; - writel(value, priv->base + SRCR(reg)); - spin_unlock_irqrestore(&priv->rmw_lock, flags); + writel(bitmask, priv->base + SRCR(reg)); return 0; } diff --git a/drivers/clk/rockchip/clk-ddr.c b/drivers/clk/rockchip/clk-ddr.c index e8075359366b0d9ef9cf84611d6c36b19fc22c4a..ebce5260068b72a9e2013e3aeaddc61f7cf252d8 100644 --- a/drivers/clk/rockchip/clk-ddr.c +++ b/drivers/clk/rockchip/clk-ddr.c @@ -80,16 +80,12 @@ static long rockchip_ddrclk_sip_round_rate(struct clk_hw *hw, static u8 rockchip_ddrclk_get_parent(struct clk_hw *hw) { struct rockchip_ddrclk *ddrclk = to_rockchip_ddrclk_hw(hw); - int num_parents = clk_hw_get_num_parents(hw); u32 val; val = clk_readl(ddrclk->reg_base + ddrclk->mux_offset) >> ddrclk->mux_shift; val &= GENMASK(ddrclk->mux_width - 1, 0); - if (val >= num_parents) - return -EINVAL; - return val; } diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c index 026a26bb702d9b9f6ab53a6ffaa8864149dfc01b..dbec84238ecdc7936ddc0c098dcb174a5d70e65a 100644 --- a/drivers/clk/rockchip/clk-mmc-phase.c +++ b/drivers/clk/rockchip/clk-mmc-phase.c @@ -61,10 +61,8 @@ static int rockchip_mmc_get_phase(struct clk_hw *hw) u32 delay_num = 0; /* See the comment for rockchip_mmc_set_phase below */ - if (!rate) { - pr_err("%s: invalid clk rate\n", __func__); + if (!rate) return -EINVAL; - } raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift); diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c index 67e73fd71f095c9b1164e26a5422c4d8f5edd6ee..2ca7e2be2f09e50e8b63ad3642fc78775017256a 100644 --- a/drivers/clk/rockchip/clk-rk3188.c +++ b/drivers/clk/rockchip/clk-rk3188.c @@ -362,8 +362,8 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = { RK2928_CLKGATE_CON(2), 5, GFLAGS), MUX(SCLK_MAC, "sclk_macref", mux_sclk_macref_p, CLK_SET_RATE_PARENT, RK2928_CLKSEL_CON(21), 4, 1, MFLAGS), - GATE(0, "sclk_mac_lbtest", "sclk_macref", - RK2928_CLKGATE_CON(2), 12, 0, GFLAGS), + GATE(0, "sclk_mac_lbtest", "sclk_macref", 0, + RK2928_CLKGATE_CON(2), 12, GFLAGS), COMPOSITE(0, "hsadc_src", mux_pll_src_gpll_cpll_p, 0, RK2928_CLKSEL_CON(22), 0, 1, MFLAGS, 8, 8, DFLAGS, @@ -382,7 +382,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = { COMPOSITE_NOMUX(0, "spdif_pre", "i2s_src", 0, RK2928_CLKSEL_CON(5), 0, 7, DFLAGS, RK2928_CLKGATE_CON(0), 13, GFLAGS), - COMPOSITE_FRACMUX(0, "spdif_frac", "spdif_pll", CLK_SET_RATE_PARENT, + COMPOSITE_FRACMUX(0, "spdif_frac", "spdif_pre", CLK_SET_RATE_PARENT, RK2928_CLKSEL_CON(9), 0, RK2928_CLKGATE_CON(0), 14, GFLAGS, &common_spdif_fracmux), @@ -391,8 +391,8 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = { * Clock-Architecture Diagram 4 */ - GATE(SCLK_SMC, "sclk_smc", "hclk_peri", - RK2928_CLKGATE_CON(2), 4, 0, GFLAGS), + GATE(SCLK_SMC, "sclk_smc", "hclk_peri", 0, + RK2928_CLKGATE_CON(2), 4, GFLAGS), COMPOSITE_NOMUX(SCLK_SPI0, "sclk_spi0", "pclk_peri", 0, RK2928_CLKSEL_CON(25), 0, 7, DFLAGS, diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c index 450de24a1b4224cd9386881c7c25471f32899060..9cfdbea493bb37926c2ab9a190cbeae227a8c020 100644 --- a/drivers/clk/rockchip/clk-rk3288.c +++ b/drivers/clk/rockchip/clk-rk3288.c @@ -198,7 +198,7 @@ PNAME(mux_hsadcout_p) = { "hsadc_src", "ext_hsadc" }; PNAME(mux_edp_24m_p) = { "ext_edp_24m", "xin24m" }; PNAME(mux_tspout_p) = { "cpll", "gpll", "npll", "xin27m" }; -PNAME(mux_aclk_vcodec_pre_p) = { "aclk_vepu", "aclk_vdpu" }; +PNAME(mux_aclk_vcodec_pre_p) = { "aclk_vdpu", "aclk_vepu" }; PNAME(mux_usbphy480m_p) = { "sclk_otgphy1_480m", "sclk_otgphy2_480m", "sclk_otgphy0_480m" }; PNAME(mux_hsicphy480m_p) = { "cpll", "gpll", "usbphy480m_src" }; @@ -292,13 +292,13 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = { COMPOSITE_NOMUX(0, "aclk_core_mp", "armclk", CLK_IGNORE_UNUSED, RK3288_CLKSEL_CON(0), 4, 4, DFLAGS | CLK_DIVIDER_READ_ONLY, RK3288_CLKGATE_CON(12), 6, GFLAGS), - COMPOSITE_NOMUX(0, "atclk", "armclk", CLK_IGNORE_UNUSED, + COMPOSITE_NOMUX(0, "atclk", "armclk", 0, RK3288_CLKSEL_CON(37), 4, 5, DFLAGS | CLK_DIVIDER_READ_ONLY, RK3288_CLKGATE_CON(12), 7, GFLAGS), COMPOSITE_NOMUX(0, "pclk_dbg_pre", "armclk", CLK_IGNORE_UNUSED, RK3288_CLKSEL_CON(37), 9, 5, DFLAGS | CLK_DIVIDER_READ_ONLY, RK3288_CLKGATE_CON(12), 8, GFLAGS), - GATE(0, "pclk_dbg", "pclk_dbg_pre", CLK_IGNORE_UNUSED, + GATE(0, "pclk_dbg", "pclk_dbg_pre", 0, RK3288_CLKGATE_CON(12), 9, GFLAGS), GATE(0, "cs_dbg", "pclk_dbg_pre", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(12), 10, GFLAGS), @@ -399,7 +399,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = { COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_usb480m_p, 0, RK3288_CLKSEL_CON(32), 14, 2, MFLAGS, 8, 5, DFLAGS, RK3288_CLKGATE_CON(3), 11, GFLAGS), - MUXGRF(0, "aclk_vcodec_pre", mux_aclk_vcodec_pre_p, 0, + MUXGRF(0, "aclk_vcodec_pre", mux_aclk_vcodec_pre_p, CLK_SET_RATE_PARENT, RK3288_GRF_SOC_CON(0), 7, 1, MFLAGS), GATE(ACLK_VCODEC, "aclk_vcodec", "aclk_vcodec_pre", 0, RK3288_CLKGATE_CON(9), 0, GFLAGS), @@ -626,7 +626,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = { INVERTER(SCLK_HSADC, "sclk_hsadc", "sclk_hsadc_out", RK3288_CLKSEL_CON(22), 7, IFLAGS), - GATE(0, "jtag", "ext_jtag", CLK_IGNORE_UNUSED, + GATE(0, "jtag", "ext_jtag", 0, RK3288_CLKGATE_CON(4), 14, GFLAGS), COMPOSITE_NODIV(SCLK_USBPHY480M_SRC, "usbphy480m_src", mux_usbphy480m_p, 0, @@ -635,7 +635,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = { COMPOSITE_NODIV(SCLK_HSICPHY480M, "sclk_hsicphy480m", mux_hsicphy480m_p, 0, RK3288_CLKSEL_CON(29), 0, 2, MFLAGS, RK3288_CLKGATE_CON(3), 6, GFLAGS), - GATE(0, "hsicphy12m_xin12m", "xin12m", CLK_IGNORE_UNUSED, + GATE(0, "hsicphy12m_xin12m", "xin12m", 0, RK3288_CLKGATE_CON(13), 9, GFLAGS), DIV(0, "hsicphy12m_usbphy", "sclk_hsicphy480m", 0, RK3288_CLKSEL_CON(11), 8, 6, DFLAGS), @@ -676,7 +676,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = { GATE(PCLK_TZPC, "pclk_tzpc", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 3, GFLAGS), GATE(PCLK_UART2, "pclk_uart2", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 9, GFLAGS), GATE(PCLK_EFUSE256, "pclk_efuse_256", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 10, GFLAGS), - GATE(PCLK_RKPWM, "pclk_rkpwm", "pclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(11), 11, GFLAGS), + GATE(PCLK_RKPWM, "pclk_rkpwm", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 11, GFLAGS), /* ddrctrl [DDR Controller PHY clock] gates */ GATE(0, "nclk_ddrupctl0", "ddrphy", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(11), 4, GFLAGS), @@ -816,12 +816,9 @@ static const char *const rk3288_critical_clocks[] __initconst = { "pclk_alive_niu", "pclk_pd_pmu", "pclk_pmu_niu", - "pclk_core_niu", - "pclk_ddrupctl0", - "pclk_publ0", - "pclk_ddrupctl1", - "pclk_publ1", "pmu_hclk_otg0", + /* pwm-regulators on some boards, so handoff-critical later */ + "pclk_rkpwm", }; static void __iomem *rk3288_cru_base; @@ -838,6 +835,9 @@ static const int rk3288_saved_cru_reg_ids[] = { RK3288_CLKSEL_CON(10), RK3288_CLKSEL_CON(33), RK3288_CLKSEL_CON(37), + + /* We turn aclk_dmac1 on for suspend; this will restore it */ + RK3288_CLKGATE_CON(10), }; static u32 rk3288_saved_cru_regs[ARRAY_SIZE(rk3288_saved_cru_reg_ids)]; @@ -853,6 +853,14 @@ static int rk3288_clk_suspend(void) readl_relaxed(rk3288_cru_base + reg_id); } + /* + * Going into deep sleep (specifically setting PMU_CLR_DMA in + * RK3288_PMU_PWRMODE_CON1) appears to fail unless + * "aclk_dmac1" is on. + */ + writel_relaxed(1 << (12 + 16), + rk3288_cru_base + RK3288_CLKGATE_CON(10)); + /* * Switch PLLs other than DPLL (for SDRAM) to slow mode to * avoid crashes on resume. The Mask ROM on the system will diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c index 252366a5231f766dd6a9df28fb7a755b7a6021f7..f2f13b603ae9b811d4a8e52464c58c71ff153b1e 100644 --- a/drivers/clk/rockchip/clk-rk3328.c +++ b/drivers/clk/rockchip/clk-rk3328.c @@ -78,17 +78,17 @@ static struct rockchip_pll_rate_table rk3328_pll_rates[] = { static struct rockchip_pll_rate_table rk3328_pll_frac_rates[] = { /* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd, _frac */ - RK3036_PLL_RATE(1016064000, 3, 127, 1, 1, 0, 134217), + RK3036_PLL_RATE(1016064000, 3, 127, 1, 1, 0, 134218), /* vco = 1016064000 */ - RK3036_PLL_RATE(983040000, 24, 983, 1, 1, 0, 671088), + RK3036_PLL_RATE(983040000, 24, 983, 1, 1, 0, 671089), /* vco = 983040000 */ - RK3036_PLL_RATE(491520000, 24, 983, 2, 1, 0, 671088), + RK3036_PLL_RATE(491520000, 24, 983, 2, 1, 0, 671089), /* vco = 983040000 */ - RK3036_PLL_RATE(61440000, 6, 215, 7, 2, 0, 671088), + RK3036_PLL_RATE(61440000, 6, 215, 7, 2, 0, 671089), /* vco = 860156000 */ - RK3036_PLL_RATE(56448000, 12, 451, 4, 4, 0, 9797894), + RK3036_PLL_RATE(56448000, 12, 451, 4, 4, 0, 9797895), /* vco = 903168000 */ - RK3036_PLL_RATE(40960000, 12, 409, 4, 5, 0, 10066329), + RK3036_PLL_RATE(40960000, 12, 409, 4, 5, 0, 10066330), /* vco = 819200000 */ { /* sentinel */ }, }; @@ -392,7 +392,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = { RK3328_CLKGATE_CON(1), 5, GFLAGS, &rk3328_i2s1_fracmux), GATE(SCLK_I2S1, "clk_i2s1", "i2s1_pre", CLK_SET_RATE_PARENT, - RK3328_CLKGATE_CON(0), 6, GFLAGS), + RK3328_CLKGATE_CON(1), 6, GFLAGS), COMPOSITE_NODIV(SCLK_I2S1_OUT, "i2s1_out", mux_i2s1out_p, 0, RK3328_CLKSEL_CON(8), 12, 1, MFLAGS, RK3328_CLKGATE_CON(1), 7, GFLAGS), @@ -458,7 +458,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = { RK3328_CLKSEL_CON(35), 15, 1, MFLAGS, 8, 7, DFLAGS, RK3328_CLKGATE_CON(2), 12, GFLAGS), COMPOSITE(SCLK_CRYPTO, "clk_crypto", mux_2plls_p, 0, - RK3328_CLKSEL_CON(20), 7, 1, MFLAGS, 0, 7, DFLAGS, + RK3328_CLKSEL_CON(20), 7, 1, MFLAGS, 0, 5, DFLAGS, RK3328_CLKGATE_CON(2), 4, GFLAGS), COMPOSITE_NOMUX(SCLK_TSADC, "clk_tsadc", "clk_24m", 0, RK3328_CLKSEL_CON(22), 0, 10, DFLAGS, @@ -550,15 +550,15 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = { GATE(0, "hclk_rkvenc_niu", "hclk_rkvenc", 0, RK3328_CLKGATE_CON(25), 1, GFLAGS), GATE(ACLK_H265, "aclk_h265", "aclk_rkvenc", 0, - RK3328_CLKGATE_CON(25), 0, GFLAGS), + RK3328_CLKGATE_CON(25), 2, GFLAGS), GATE(PCLK_H265, "pclk_h265", "hclk_rkvenc", 0, - RK3328_CLKGATE_CON(25), 1, GFLAGS), + RK3328_CLKGATE_CON(25), 3, GFLAGS), GATE(ACLK_H264, "aclk_h264", "aclk_rkvenc", 0, - RK3328_CLKGATE_CON(25), 0, GFLAGS), + RK3328_CLKGATE_CON(25), 4, GFLAGS), GATE(HCLK_H264, "hclk_h264", "hclk_rkvenc", 0, - RK3328_CLKGATE_CON(25), 1, GFLAGS), + RK3328_CLKGATE_CON(25), 5, GFLAGS), GATE(ACLK_AXISRAM, "aclk_axisram", "aclk_rkvenc", CLK_IGNORE_UNUSED, - RK3328_CLKGATE_CON(25), 0, GFLAGS), + RK3328_CLKGATE_CON(25), 6, GFLAGS), COMPOSITE(SCLK_VENC_CORE, "sclk_venc_core", mux_4plls_p, 0, RK3328_CLKSEL_CON(51), 14, 2, MFLAGS, 8, 5, DFLAGS, @@ -663,7 +663,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = { /* PD_GMAC */ COMPOSITE(ACLK_GMAC, "aclk_gmac", mux_2plls_hdmiphy_p, 0, - RK3328_CLKSEL_CON(35), 6, 2, MFLAGS, 0, 5, DFLAGS, + RK3328_CLKSEL_CON(25), 6, 2, MFLAGS, 0, 5, DFLAGS, RK3328_CLKGATE_CON(3), 2, GFLAGS), COMPOSITE_NOMUX(PCLK_GMAC, "pclk_gmac", "aclk_gmac", 0, RK3328_CLKSEL_CON(25), 8, 3, DFLAGS, @@ -733,7 +733,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = { /* PD_PERI */ GATE(0, "aclk_peri_noc", "aclk_peri", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(19), 11, GFLAGS), - GATE(ACLK_USB3OTG, "aclk_usb3otg", "aclk_peri", 0, RK3328_CLKGATE_CON(19), 4, GFLAGS), + GATE(ACLK_USB3OTG, "aclk_usb3otg", "aclk_peri", 0, RK3328_CLKGATE_CON(19), 14, GFLAGS), GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_peri", 0, RK3328_CLKGATE_CON(19), 0, GFLAGS), GATE(HCLK_SDIO, "hclk_sdio", "hclk_peri", 0, RK3328_CLKGATE_CON(19), 1, GFLAGS), @@ -813,22 +813,22 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = { MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "clk_sdmmc", RK3328_SDMMC_CON0, 1), MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "clk_sdmmc", - RK3328_SDMMC_CON1, 1), + RK3328_SDMMC_CON1, 0), MMC(SCLK_SDIO_DRV, "sdio_drv", "clk_sdio", RK3328_SDIO_CON0, 1), MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "clk_sdio", - RK3328_SDIO_CON1, 1), + RK3328_SDIO_CON1, 0), MMC(SCLK_EMMC_DRV, "emmc_drv", "clk_emmc", RK3328_EMMC_CON0, 1), MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "clk_emmc", - RK3328_EMMC_CON1, 1), + RK3328_EMMC_CON1, 0), MMC(SCLK_SDMMC_EXT_DRV, "sdmmc_ext_drv", "clk_sdmmc_ext", RK3328_SDMMC_EXT_CON0, 1), MMC(SCLK_SDMMC_EXT_SAMPLE, "sdmmc_ext_sample", "clk_sdmmc_ext", - RK3328_SDMMC_EXT_CON1, 1), + RK3328_SDMMC_EXT_CON1, 0), }; static const char *const rk3328_critical_clocks[] __initconst = { @@ -913,7 +913,7 @@ static void __init rk3328_clk_init(struct device_node *np) &rk3328_cpuclk_data, rk3328_cpuclk_rates, ARRAY_SIZE(rk3328_cpuclk_rates)); - rockchip_register_softrst(np, 11, reg_base + RK3328_SOFTRST_CON(0), + rockchip_register_softrst(np, 12, reg_base + RK3328_SOFTRST_CON(0), ROCKCHIP_SOFTRST_HIWORD_MASK); rockchip_register_restart_notifier(ctx, RK3328_GLB_SRST_FST, NULL); diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c index d2c99d8916b83e48c1b23d6c49dd98f43f81f2db..a5fddebbe530532be49c3c4dc7e3cfdf6d6deea0 100644 --- a/drivers/clk/samsung/clk-cpu.c +++ b/drivers/clk/samsung/clk-cpu.c @@ -152,7 +152,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata, struct exynos_cpuclk *cpuclk, void __iomem *base) { const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg; - unsigned long alt_prate = clk_get_rate(cpuclk->alt_parent); + unsigned long alt_prate = clk_hw_get_rate(cpuclk->alt_parent); unsigned long alt_div = 0, alt_div_mask = DIV_MASK; unsigned long div0, div1 = 0, mux_reg; unsigned long flags; @@ -280,7 +280,7 @@ static int exynos5433_cpuclk_pre_rate_change(struct clk_notifier_data *ndata, struct exynos_cpuclk *cpuclk, void __iomem *base) { const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg; - unsigned long alt_prate = clk_get_rate(cpuclk->alt_parent); + unsigned long alt_prate = clk_hw_get_rate(cpuclk->alt_parent); unsigned long alt_div = 0, alt_div_mask = DIV_MASK; unsigned long div0, div1 = 0, mux_reg; unsigned long flags; @@ -432,7 +432,7 @@ int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx, else cpuclk->clk_nb.notifier_call = exynos_cpuclk_notifier_cb; - cpuclk->alt_parent = __clk_lookup(alt_parent); + cpuclk->alt_parent = __clk_get_hw(__clk_lookup(alt_parent)); if (!cpuclk->alt_parent) { pr_err("%s: could not lookup alternate parent %s\n", __func__, alt_parent); diff --git a/drivers/clk/samsung/clk-cpu.h b/drivers/clk/samsung/clk-cpu.h index d4b6b517fe1b44689df28853cf894baa3799153d..bd38c6aa389706c92261cd72f4f0c8fd8906ba67 100644 --- a/drivers/clk/samsung/clk-cpu.h +++ b/drivers/clk/samsung/clk-cpu.h @@ -49,7 +49,7 @@ struct exynos_cpuclk_cfg_data { */ struct exynos_cpuclk { struct clk_hw hw; - struct clk *alt_parent; + struct clk_hw *alt_parent; void __iomem *ctrl_base; spinlock_t *lock; const struct exynos_cpuclk_cfg_data *cfg; diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.c b/drivers/clk/samsung/clk-exynos5-subcmu.c index 93306283d764de29a379236961b430904e8c50a7..8ae44b5db4c2f59dbdb821e0564e6e82c6a5efd2 100644 --- a/drivers/clk/samsung/clk-exynos5-subcmu.c +++ b/drivers/clk/samsung/clk-exynos5-subcmu.c @@ -136,15 +136,20 @@ static int __init exynos5_clk_register_subcmu(struct device *parent, { struct of_phandle_args genpdspec = { .np = pd_node }; struct platform_device *pdev; + int ret; + + pdev = platform_device_alloc("exynos5-subcmu", PLATFORM_DEVID_AUTO); + if (!pdev) + return -ENOMEM; - pdev = platform_device_alloc(info->pd_name, -1); pdev->dev.parent = parent; - pdev->driver_override = "exynos5-subcmu"; platform_set_drvdata(pdev, (void *)info); of_genpd_add_device(&genpdspec, &pdev->dev); - platform_device_add(pdev); + ret = platform_device_add(pdev); + if (ret) + platform_device_put(pdev); - return 0; + return ret; } static int __init exynos5_clk_probe(struct platform_device *pdev) diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c index 95e1bf69449b75c1812c095f2db2483deac91dae..d5af937212992e2308abc1e912e681e4af255f74 100644 --- a/drivers/clk/samsung/clk-exynos5420.c +++ b/drivers/clk/samsung/clk-exynos5420.c @@ -171,12 +171,18 @@ static const unsigned long exynos5x_clk_regs[] __initconst = { GATE_BUS_CPU, GATE_SCLK_CPU, CLKOUT_CMU_CPU, + CPLL_CON0, + DPLL_CON0, EPLL_CON0, EPLL_CON1, EPLL_CON2, RPLL_CON0, RPLL_CON1, RPLL_CON2, + IPLL_CON0, + SPLL_CON0, + VPLL_CON0, + MPLL_CON0, SRC_TOP0, SRC_TOP1, SRC_TOP2, @@ -281,6 +287,7 @@ static const struct samsung_clk_reg_dump exynos5420_set_clksrc[] = { { .offset = GATE_BUS_TOP, .value = 0xffffffff, }, { .offset = GATE_BUS_DISP1, .value = 0xffffffff, }, { .offset = GATE_IP_PERIC, .value = 0xffffffff, }, + { .offset = GATE_IP_PERIS, .value = 0xffffffff, }, }; static int exynos5420_clk_suspend(void) @@ -633,6 +640,7 @@ static const struct samsung_div_clock exynos5420_div_clks[] __initconst = { }; static const struct samsung_gate_clock exynos5420_gate_clks[] __initconst = { + GATE(CLK_SECKEY, "seckey", "aclk66_psgen", GATE_BUS_PERIS1, 1, 0, 0), GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk", SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0), }; @@ -1162,8 +1170,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = { GATE(CLK_TMU, "tmu", "aclk66_psgen", GATE_IP_PERIS, 21, 0, 0), GATE(CLK_TMU_GPU, "tmu_gpu", "aclk66_psgen", GATE_IP_PERIS, 22, 0, 0), - GATE(CLK_SECKEY, "seckey", "aclk66_psgen", GATE_BUS_PERIS1, 1, 0, 0), - /* GEN Block */ GATE(CLK_ROTATOR, "rotator", "mout_user_aclk266", GATE_IP_GEN, 1, 0, 0), GATE(CLK_JPEG, "jpeg", "aclk300_jpeg", GATE_IP_GEN, 2, 0, 0), diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c index 162de44df099bff5be3a70e4fd5b010e1fc584f5..302596dc79a2c1283963615c1d82063bfa67dc5a 100644 --- a/drivers/clk/samsung/clk-exynos5433.c +++ b/drivers/clk/samsung/clk-exynos5433.c @@ -16,6 +16,7 @@ #include #include #include +#include #include @@ -5527,6 +5528,8 @@ static int __init exynos5433_cmu_probe(struct platform_device *pdev) data->clk_save = samsung_clk_alloc_reg_dump(info->clk_regs, info->nr_clk_regs); + if (!data->clk_save) + return -ENOMEM; data->nr_clk_save = info->nr_clk_regs; data->clk_suspend = info->suspend_regs; data->nr_clk_suspend = info->nr_suspend_regs; @@ -5535,12 +5538,19 @@ static int __init exynos5433_cmu_probe(struct platform_device *pdev) if (data->nr_pclks > 0) { data->pclks = devm_kcalloc(dev, sizeof(struct clk *), data->nr_pclks, GFP_KERNEL); - + if (!data->pclks) { + kfree(data->clk_save); + return -ENOMEM; + } for (i = 0; i < data->nr_pclks; i++) { struct clk *clk = of_clk_get(dev->of_node, i); - if (IS_ERR(clk)) + if (IS_ERR(clk)) { + kfree(data->clk_save); + while (--i >= 0) + clk_put(data->pclks[i]); return PTR_ERR(clk); + } data->pclks[i] = clk; } } @@ -5630,7 +5640,7 @@ static const struct of_device_id exynos5433_cmu_of_match[] = { static const struct dev_pm_ops exynos5433_cmu_pm_ops = { SET_RUNTIME_PM_OPS(exynos5433_cmu_suspend, exynos5433_cmu_resume, NULL) - SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) }; diff --git a/drivers/clk/sirf/clk-common.c b/drivers/clk/sirf/clk-common.c index d8f9efa5129adf4d7e9731ba1dd7967f71d9c946..25351d6a55ba24bc2dbf357d6649b800c33db01e 100644 --- a/drivers/clk/sirf/clk-common.c +++ b/drivers/clk/sirf/clk-common.c @@ -298,9 +298,10 @@ static u8 dmn_clk_get_parent(struct clk_hw *hw) { struct clk_dmn *clk = to_dmnclk(hw); u32 cfg = clkc_readl(clk->regofs); + const char *name = clk_hw_get_name(hw); /* parent of io domain can only be pll3 */ - if (strcmp(hw->init->name, "io") == 0) + if (strcmp(name, "io") == 0) return 4; WARN_ON((cfg & (BIT(3) - 1)) > 4); @@ -312,9 +313,10 @@ static int dmn_clk_set_parent(struct clk_hw *hw, u8 parent) { struct clk_dmn *clk = to_dmnclk(hw); u32 cfg = clkc_readl(clk->regofs); + const char *name = clk_hw_get_name(hw); /* parent of io domain can only be pll3 */ - if (strcmp(hw->init->name, "io") == 0) + if (strcmp(name, "io") == 0) return -EINVAL; cfg &= ~(BIT(3) - 1); @@ -354,7 +356,8 @@ static long dmn_clk_round_rate(struct clk_hw *hw, unsigned long rate, { unsigned long fin; unsigned ratio, wait, hold; - unsigned bits = (strcmp(hw->init->name, "mem") == 0) ? 3 : 4; + const char *name = clk_hw_get_name(hw); + unsigned bits = (strcmp(name, "mem") == 0) ? 3 : 4; fin = *parent_rate; ratio = fin / rate; @@ -376,7 +379,8 @@ static int dmn_clk_set_rate(struct clk_hw *hw, unsigned long rate, struct clk_dmn *clk = to_dmnclk(hw); unsigned long fin; unsigned ratio, wait, hold, reg; - unsigned bits = (strcmp(hw->init->name, "mem") == 0) ? 3 : 4; + const char *name = clk_hw_get_name(hw); + unsigned bits = (strcmp(name, "mem") == 0) ? 3 : 4; fin = parent_rate; ratio = fin / rate; diff --git a/drivers/clk/socfpga/clk-periph-s10.c b/drivers/clk/socfpga/clk-periph-s10.c index 568f59b58ddfa94852462ed8fd3531d48fff151c..e7c877d354c7bff5777fec95eef40a48b3879188 100644 --- a/drivers/clk/socfpga/clk-periph-s10.c +++ b/drivers/clk/socfpga/clk-periph-s10.c @@ -37,7 +37,7 @@ static unsigned long clk_peri_cnt_clk_recalc_rate(struct clk_hw *hwclk, if (socfpgaclk->fixed_div) { div = socfpgaclk->fixed_div; } else { - if (!socfpgaclk->bypass_reg) + if (socfpgaclk->hw.reg) div = ((readl(socfpgaclk->hw.reg) & 0x7ff) + 1); } diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c index 2d5d8b43727e95a5bd8f9af70c4452fedc1c9c19..c4d0b6f6abf2e1bb1a6027cd19bf0c22dc1b5cfe 100644 --- a/drivers/clk/socfpga/clk-pll-s10.c +++ b/drivers/clk/socfpga/clk-pll-s10.c @@ -43,7 +43,7 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk, /* Read mdiv and fdiv from the fdbck register */ reg = readl(socfpgaclk->hw.reg + 0x4); mdiv = (reg & SOCFPGA_PLL_MDIV_MASK) >> SOCFPGA_PLL_MDIV_SHIFT; - vco_freq = (unsigned long long)parent_rate * (mdiv + 6); + vco_freq = (unsigned long long)vco_freq * (mdiv + 6); return (unsigned long)vco_freq; } diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c index 5b238fc314ac65c83f6bbc44af222ad1f0037110..5bed36e129516a91a46d7edbb8950da25507a3bd 100644 --- a/drivers/clk/socfpga/clk-s10.c +++ b/drivers/clk/socfpga/clk-s10.c @@ -12,17 +12,17 @@ #include "stratix10-clk.h" -static const char * const pll_mux[] = { "osc1", "cb_intosc_hs_div2_clk", - "f2s_free_clk",}; +static const char * const pll_mux[] = { "osc1", "cb-intosc-hs-div2-clk", + "f2s-free-clk",}; static const char * const cntr_mux[] = { "main_pll", "periph_pll", - "osc1", "cb_intosc_hs_div2_clk", - "f2s_free_clk"}; -static const char * const boot_mux[] = { "osc1", "cb_intosc_hs_div2_clk",}; + "osc1", "cb-intosc-hs-div2-clk", + "f2s-free-clk"}; +static const char * const boot_mux[] = { "osc1", "cb-intosc-hs-div2-clk",}; static const char * const noc_free_mux[] = {"main_noc_base_clk", "peri_noc_base_clk", - "osc1", "cb_intosc_hs_div2_clk", - "f2s_free_clk"}; + "osc1", "cb-intosc-hs-div2-clk", + "f2s-free-clk"}; static const char * const emaca_free_mux[] = {"peri_emaca_clk", "boot_clk"}; static const char * const emacb_free_mux[] = {"peri_emacb_clk", "boot_clk"}; @@ -33,14 +33,14 @@ static const char * const s2f_usr1_free_mux[] = {"peri_s2f_usr1_clk", "boot_clk" static const char * const psi_ref_free_mux[] = {"peri_psi_ref_clk", "boot_clk"}; static const char * const mpu_mux[] = { "mpu_free_clk", "boot_clk",}; -static const char * const s2f_usr0_mux[] = {"f2s_free_clk", "boot_clk"}; +static const char * const s2f_usr0_mux[] = {"f2s-free-clk", "boot_clk"}; static const char * const emac_mux[] = {"emaca_free_clk", "emacb_free_clk"}; static const char * const noc_mux[] = {"noc_free_clk", "boot_clk"}; static const char * const mpu_free_mux[] = {"main_mpu_base_clk", "peri_mpu_base_clk", - "osc1", "cb_intosc_hs_div2_clk", - "f2s_free_clk"}; + "osc1", "cb-intosc-hs-div2-clk", + "f2s-free-clk"}; /* clocks in AO (always on) controller */ static const struct stratix10_pll_clock s10_pll_clks[] = { @@ -103,9 +103,9 @@ static const struct stratix10_perip_cnt_clock s10_main_perip_cnt_clks[] = { { STRATIX10_NOC_CLK, "noc_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0, 0, 0x3C, 1}, { STRATIX10_EMAC_A_FREE_CLK, "emaca_free_clk", NULL, emaca_free_mux, ARRAY_SIZE(emaca_free_mux), - 0, 0, 4, 0xB0, 0}, + 0, 0, 2, 0xB0, 0}, { STRATIX10_EMAC_B_FREE_CLK, "emacb_free_clk", NULL, emacb_free_mux, ARRAY_SIZE(emacb_free_mux), - 0, 0, 4, 0xB0, 1}, + 0, 0, 2, 0xB0, 1}, { STRATIX10_EMAC_PTP_FREE_CLK, "emac_ptp_free_clk", NULL, emac_ptp_free_mux, ARRAY_SIZE(emac_ptp_free_mux), 0, 0, 4, 0xB0, 2}, { STRATIX10_GPIO_DB_FREE_CLK, "gpio_db_free_clk", NULL, gpio_db_free_mux, diff --git a/drivers/clk/sprd/Kconfig b/drivers/clk/sprd/Kconfig index 87892471eb96c3549ced203976eee9cbf7cfbb43..bad8099832d4806cd1a300549b2df66efc1df6f8 100644 --- a/drivers/clk/sprd/Kconfig +++ b/drivers/clk/sprd/Kconfig @@ -2,6 +2,7 @@ config SPRD_COMMON_CLK tristate "Clock support for Spreadtrum SoCs" depends on ARCH_SPRD || COMPILE_TEST default ARCH_SPRD + select REGMAP_MMIO if SPRD_COMMON_CLK diff --git a/drivers/clk/sprd/common.c b/drivers/clk/sprd/common.c index e038b044720611ab5a30382a3bc7772e2649fbe8..8bdab1c3013b82859df8a0391b54c1f61bf607fa 100644 --- a/drivers/clk/sprd/common.c +++ b/drivers/clk/sprd/common.c @@ -71,16 +71,17 @@ int sprd_clk_probe(struct device *dev, struct clk_hw_onecell_data *clkhw) struct clk_hw *hw; for (i = 0; i < clkhw->num; i++) { + const char *name; hw = clkhw->hws[i]; - if (!hw) continue; + name = hw->init->name; ret = devm_clk_hw_register(dev, hw); if (ret) { dev_err(dev, "Couldn't register clock %d - %s\n", - i, hw->init->name); + i, name); return ret; } } diff --git a/drivers/clk/sprd/pll.c b/drivers/clk/sprd/pll.c index 36b4402bf09e363618ff3453e43ba6a02158cc73..640270f51aa56ce413c5528a9854ad4919005f86 100644 --- a/drivers/clk/sprd/pll.c +++ b/drivers/clk/sprd/pll.c @@ -136,6 +136,7 @@ static unsigned long _sprd_pll_recalc_rate(const struct sprd_pll *pll, k2 + refin * nint * CLK_PLL_1M; } + kfree(cfg); return rate; } @@ -222,6 +223,7 @@ static int _sprd_pll_set_rate(const struct sprd_pll *pll, if (!ret) udelay(pll->udelay); + kfree(cfg); return ret; } diff --git a/drivers/clk/sprd/sc9860-clk.c b/drivers/clk/sprd/sc9860-clk.c index 9980ab55271ba2f71f7ad09ba04d2bbd23468eb7..f76305b4bc8df9488bf099a3fa2e5e2d7f09a56d 100644 --- a/drivers/clk/sprd/sc9860-clk.c +++ b/drivers/clk/sprd/sc9860-clk.c @@ -2023,6 +2023,7 @@ static int sc9860_clk_probe(struct platform_device *pdev) { const struct of_device_id *match; const struct sprd_clk_desc *desc; + int ret; match = of_match_node(sprd_sc9860_clk_ids, pdev->dev.of_node); if (!match) { @@ -2031,7 +2032,9 @@ static int sc9860_clk_probe(struct platform_device *pdev) } desc = match->data; - sprd_clk_regmap_init(pdev, desc); + ret = sprd_clk_regmap_init(pdev, desc); + if (ret) + return ret; return sprd_clk_probe(&pdev->dev, desc->hw_clks); } diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c index ee9c12cf3f08c38d6c1757a646c043b25f7dd90e..dec4a130390a3d46e33543465e9a9dd40d374515 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c @@ -158,7 +158,12 @@ static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_gpu_clk, "pll-gpu", #define SUN50I_A64_PLL_MIPI_REG 0x040 static struct ccu_nkm pll_mipi_clk = { - .enable = BIT(31), + /* + * The bit 23 and 22 are called "LDO{1,2}_EN" on the SoC's + * user manual, and by experiments the PLL doesn't work without + * these bits toggled. + */ + .enable = BIT(31) | BIT(23) | BIT(22), .lock = BIT(28), .n = _SUNXI_CCU_MULT(8, 4), .k = _SUNXI_CCU_MULT_MIN(4, 2, 2), @@ -577,7 +582,7 @@ static const char * const dsi_dphy_parents[] = { "pll-video0", "pll-periph0" }; static const u8 dsi_dphy_table[] = { 0, 2, }; static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(dsi_dphy_clk, "dsi-dphy", dsi_dphy_parents, dsi_dphy_table, - 0x168, 0, 4, 8, 2, BIT(31), CLK_SET_RATE_PARENT); + 0x168, 0, 4, 8, 2, BIT(15), CLK_SET_RATE_PARENT); static SUNXI_CCU_M_WITH_GATE(gpu_clk, "gpu", "pll-gpu", 0x1a0, 0, 3, BIT(31), CLK_SET_RATE_PARENT); diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c index bdbfe78fe1333c944434c8f66b130923617dc671..d425b47cef1797e9aef965f17dd90250baa7e5c8 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c @@ -224,7 +224,7 @@ static SUNXI_CCU_MP_WITH_MUX(psi_ahb1_ahb2_clk, "psi-ahb1-ahb2", psi_ahb1_ahb2_parents, 0x510, 0, 5, /* M */ - 16, 2, /* P */ + 8, 2, /* P */ 24, 2, /* mux */ 0); @@ -233,19 +233,19 @@ static const char * const ahb3_apb1_apb2_parents[] = { "osc24M", "osc32k", "pll-periph0" }; static SUNXI_CCU_MP_WITH_MUX(ahb3_clk, "ahb3", ahb3_apb1_apb2_parents, 0x51c, 0, 5, /* M */ - 16, 2, /* P */ + 8, 2, /* P */ 24, 2, /* mux */ 0); static SUNXI_CCU_MP_WITH_MUX(apb1_clk, "apb1", ahb3_apb1_apb2_parents, 0x520, 0, 5, /* M */ - 16, 2, /* P */ + 8, 2, /* P */ 24, 2, /* mux */ 0); static SUNXI_CCU_MP_WITH_MUX(apb2_clk, "apb2", ahb3_apb1_apb2_parents, 0x524, 0, 5, /* M */ - 16, 2, /* P */ + 8, 2, /* P */ 24, 2, /* mux */ 0); @@ -352,7 +352,7 @@ static SUNXI_CCU_GATE(bus_dbg_clk, "bus-dbg", "psi-ahb1-ahb2", static SUNXI_CCU_GATE(bus_psi_clk, "bus-psi", "psi-ahb1-ahb2", 0x79c, BIT(0), 0); -static SUNXI_CCU_GATE(bus_pwm_clk, "bus-pwm", "apb1", 0x79c, BIT(0), 0); +static SUNXI_CCU_GATE(bus_pwm_clk, "bus-pwm", "apb1", 0x7ac, BIT(0), 0); static SUNXI_CCU_GATE(bus_iommu_clk, "bus-iommu", "apb1", 0x7bc, BIT(0), 0); diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c index 3b97f60540ad8cd29aeb06a69620dcf940a15213..609970c0b6665caa2e1a8babedffe2c5c771588a 100644 --- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c +++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c @@ -264,9 +264,9 @@ static SUNXI_CCU_GATE(ahb1_mmc1_clk, "ahb1-mmc1", "ahb1", static SUNXI_CCU_GATE(ahb1_mmc2_clk, "ahb1-mmc2", "ahb1", 0x060, BIT(10), 0); static SUNXI_CCU_GATE(ahb1_mmc3_clk, "ahb1-mmc3", "ahb1", - 0x060, BIT(12), 0); + 0x060, BIT(11), 0); static SUNXI_CCU_GATE(ahb1_nand1_clk, "ahb1-nand1", "ahb1", - 0x060, BIT(13), 0); + 0x060, BIT(12), 0); static SUNXI_CCU_GATE(ahb1_nand0_clk, "ahb1-nand0", "ahb1", 0x060, BIT(13), 0); static SUNXI_CCU_GATE(ahb1_sdram_clk, "ahb1-sdram", "ahb1", diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c index 13eb5b23c5e7fb2b2aa741a8b0291c2f1ca30d76..c40d572a760292cf30cc3aea9b694c5b468a1918 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c @@ -366,10 +366,10 @@ static SUNXI_CCU_MP_WITH_MUX_GATE(spi1_clk, "spi1", mod0_default_parents, 0x0a4, static const char * const i2s_parents[] = { "pll-audio-8x", "pll-audio-4x", "pll-audio-2x", "pll-audio" }; static SUNXI_CCU_MUX_WITH_GATE(i2s0_clk, "i2s0", i2s_parents, - 0x0b0, 16, 2, BIT(31), 0); + 0x0b0, 16, 2, BIT(31), CLK_SET_RATE_PARENT); static SUNXI_CCU_MUX_WITH_GATE(i2s1_clk, "i2s1", i2s_parents, - 0x0b4, 16, 2, BIT(31), 0); + 0x0b4, 16, 2, BIT(31), CLK_SET_RATE_PARENT); /* TODO: the parent for most of the USB clocks is not known */ static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M", @@ -446,7 +446,7 @@ static SUNXI_CCU_M_WITH_GATE(ve_clk, "ve", "pll-ve", static SUNXI_CCU_GATE(ac_dig_clk, "ac-dig", "pll-audio", 0x140, BIT(31), CLK_SET_RATE_PARENT); static SUNXI_CCU_GATE(ac_dig_4x_clk, "ac-dig-4x", "pll-audio-4x", - 0x140, BIT(30), 0); + 0x140, BIT(30), CLK_SET_RATE_PARENT); static SUNXI_CCU_GATE(avs_clk, "avs", "osc24M", 0x144, BIT(31), 0); diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c index 77ed0b0ba6819d94317e12f31ac25896143e2a77..61e3ba12773eac5b40d0bae5b19395d6d0fc1914 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c @@ -475,7 +475,7 @@ static const char * const csi_sclk_parents[] = { "pll-periph0", "pll-periph1" }; static SUNXI_CCU_M_WITH_MUX_GATE(csi_sclk_clk, "csi-sclk", csi_sclk_parents, 0x134, 16, 4, 24, 3, BIT(31), 0); -static const char * const csi_mclk_parents[] = { "osc24M", "pll-video", "pll-periph0" }; +static const char * const csi_mclk_parents[] = { "osc24M", "pll-video", "pll-periph1" }; static SUNXI_CCU_M_WITH_MUX_GATE(csi_mclk_clk, "csi-mclk", csi_mclk_parents, 0x134, 0, 5, 8, 3, BIT(15), 0); diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c index 621b1cd996dbb4e5e4d1e621172e4a7e53e746f5..9e3f4088724b430f111c2996e8d5e51cf368877b 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c @@ -499,6 +499,9 @@ static struct clk_hw_onecell_data sun8i_v3s_hw_clks = { [CLK_MMC1] = &mmc1_clk.common.hw, [CLK_MMC1_SAMPLE] = &mmc1_sample_clk.common.hw, [CLK_MMC1_OUTPUT] = &mmc1_output_clk.common.hw, + [CLK_MMC2] = &mmc2_clk.common.hw, + [CLK_MMC2_SAMPLE] = &mmc2_sample_clk.common.hw, + [CLK_MMC2_OUTPUT] = &mmc2_output_clk.common.hw, [CLK_CE] = &ce_clk.common.hw, [CLK_SPI0] = &spi0_clk.common.hw, [CLK_USB_PHY0] = &usb_phy0_clk.common.hw, @@ -542,7 +545,7 @@ static struct ccu_reset_map sun8i_v3s_ccu_resets[] = { [RST_BUS_OHCI0] = { 0x2c0, BIT(29) }, [RST_BUS_VE] = { 0x2c4, BIT(0) }, - [RST_BUS_TCON0] = { 0x2c4, BIT(3) }, + [RST_BUS_TCON0] = { 0x2c4, BIT(4) }, [RST_BUS_CSI] = { 0x2c4, BIT(8) }, [RST_BUS_DE] = { 0x2c4, BIT(12) }, [RST_BUS_DBG] = { 0x2c4, BIT(31) }, diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c index 8936ef87652c093aade64063c9aa710d8064fbc7..c14bf782b2b33a91869dca97c4cc4ac2fcc8a2b8 100644 --- a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c +++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c @@ -1231,7 +1231,7 @@ static int sun9i_a80_ccu_probe(struct platform_device *pdev) /* Enforce d1 = 0, d2 = 0 for Audio PLL */ val = readl(reg + SUN9I_A80_PLL_AUDIO_REG); - val &= (BIT(16) & BIT(18)); + val &= ~(BIT(16) | BIT(18)); writel(val, reg + SUN9I_A80_PLL_AUDIO_REG); /* Enforce P = 1 for both CPU cluster PLLs */ diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c index ebd9436d2c7cd382ed86a3bee1957a5db48b7530..1ad53d1016a3e86ae4eeda7f0492b6501761f363 100644 --- a/drivers/clk/sunxi-ng/ccu_nkmp.c +++ b/drivers/clk/sunxi-ng/ccu_nkmp.c @@ -160,7 +160,7 @@ static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw); - u32 n_mask, k_mask, m_mask, p_mask; + u32 n_mask = 0, k_mask = 0, m_mask = 0, p_mask = 0; struct _ccu_nkmp _nkmp; unsigned long flags; u32 reg; @@ -179,10 +179,18 @@ static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate, ccu_nkmp_find_best(parent_rate, rate, &_nkmp); - n_mask = GENMASK(nkmp->n.width + nkmp->n.shift - 1, nkmp->n.shift); - k_mask = GENMASK(nkmp->k.width + nkmp->k.shift - 1, nkmp->k.shift); - m_mask = GENMASK(nkmp->m.width + nkmp->m.shift - 1, nkmp->m.shift); - p_mask = GENMASK(nkmp->p.width + nkmp->p.shift - 1, nkmp->p.shift); + if (nkmp->n.width) + n_mask = GENMASK(nkmp->n.width + nkmp->n.shift - 1, + nkmp->n.shift); + if (nkmp->k.width) + k_mask = GENMASK(nkmp->k.width + nkmp->k.shift - 1, + nkmp->k.shift); + if (nkmp->m.width) + m_mask = GENMASK(nkmp->m.width + nkmp->m.shift - 1, + nkmp->m.shift); + if (nkmp->p.width) + p_mask = GENMASK(nkmp->p.width + nkmp->p.shift - 1, + nkmp->p.shift); spin_lock_irqsave(nkmp->common.lock, flags); diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c index 4e2073307f34013e215fd827cc7048f6d4608bc6..9e3944f868ffffb471f4d6f99dc3589b382f6f8d 100644 --- a/drivers/clk/sunxi-ng/ccu_nm.c +++ b/drivers/clk/sunxi-ng/ccu_nm.c @@ -19,6 +19,17 @@ struct _ccu_nm { unsigned long m, min_m, max_m; }; +static unsigned long ccu_nm_calc_rate(unsigned long parent, + unsigned long n, unsigned long m) +{ + u64 rate = parent; + + rate *= n; + do_div(rate, m); + + return rate; +} + static void ccu_nm_find_best(unsigned long parent, unsigned long rate, struct _ccu_nm *nm) { @@ -28,7 +39,8 @@ static void ccu_nm_find_best(unsigned long parent, unsigned long rate, for (_n = nm->min_n; _n <= nm->max_n; _n++) { for (_m = nm->min_m; _m <= nm->max_m; _m++) { - unsigned long tmp_rate = parent * _n / _m; + unsigned long tmp_rate = ccu_nm_calc_rate(parent, + _n, _m); if (tmp_rate > rate) continue; @@ -100,7 +112,7 @@ static unsigned long ccu_nm_recalc_rate(struct clk_hw *hw, if (ccu_sdm_helper_is_enabled(&nm->common, &nm->sdm)) rate = ccu_sdm_helper_read_rate(&nm->common, &nm->sdm, m, n); else - rate = parent_rate * n / m; + rate = ccu_nm_calc_rate(parent_rate, n, m); if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV) rate /= nm->fixed_post_div; @@ -142,7 +154,7 @@ static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate, _nm.max_m = nm->m.max ?: 1 << nm->m.width; ccu_nm_find_best(*parent_rate, rate, &_nm); - rate = *parent_rate * _nm.n / _nm.m; + rate = ccu_nm_calc_rate(*parent_rate, _nm.n, _nm.m); if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV) rate /= nm->fixed_post_div; diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c index 012714d94b429bcca04b277aed39983a140b08d0..19ad33c97d6d6013f8723fef7adf9982c7cc31aa 100644 --- a/drivers/clk/sunxi/clk-sunxi.c +++ b/drivers/clk/sunxi/clk-sunxi.c @@ -988,6 +988,8 @@ static struct clk ** __init sunxi_divs_clk_setup(struct device_node *node, if (endp) { derived_name = kstrndup(clk_name, endp - clk_name, GFP_KERNEL); + if (!derived_name) + return NULL; factors.name = derived_name; } else { factors.name = clk_name; @@ -1086,8 +1088,8 @@ static struct clk ** __init sunxi_divs_clk_setup(struct device_node *node, rate_hw, rate_ops, gate_hw, &clk_gate_ops, clkflags | - data->div[i].critical ? - CLK_IS_CRITICAL : 0); + (data->div[i].critical ? + CLK_IS_CRITICAL : 0)); WARN_ON(IS_ERR(clk_data->clks[i])); } diff --git a/drivers/clk/tegra/clk-audio-sync.c b/drivers/clk/tegra/clk-audio-sync.c index 92d04ce2dee6b7e4e5131a1f9052bbee7d5de134..53cdc0ec40f33a752aef895f8157a4f65258ed1d 100644 --- a/drivers/clk/tegra/clk-audio-sync.c +++ b/drivers/clk/tegra/clk-audio-sync.c @@ -55,7 +55,7 @@ const struct clk_ops tegra_clk_sync_source_ops = { }; struct clk *tegra_clk_register_sync_source(const char *name, - unsigned long rate, unsigned long max_rate) + unsigned long max_rate) { struct tegra_clk_sync_source *sync; struct clk_init_data init; @@ -67,7 +67,6 @@ struct clk *tegra_clk_register_sync_source(const char *name, return ERR_PTR(-ENOMEM); } - sync->rate = rate; sync->max_rate = max_rate; init.ops = &tegra_clk_sync_source_ops; diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c index 830d1c87fa7cb6d089ff95992a3e2592691801ea..dc87866233b94b22a7214babea13a3aa7e816c6f 100644 --- a/drivers/clk/tegra/clk-pll.c +++ b/drivers/clk/tegra/clk-pll.c @@ -662,8 +662,8 @@ static void _update_pll_mnp(struct tegra_clk_pll *pll, pll_override_writel(val, params->pmc_divp_reg, pll); val = pll_override_readl(params->pmc_divnm_reg, pll); - val &= ~(divm_mask(pll) << div_nmp->override_divm_shift) | - ~(divn_mask(pll) << div_nmp->override_divn_shift); + val &= ~((divm_mask(pll) << div_nmp->override_divm_shift) | + (divn_mask(pll) << div_nmp->override_divn_shift)); val |= (cfg->m << div_nmp->override_divm_shift) | (cfg->n << div_nmp->override_divn_shift); pll_override_writel(val, params->pmc_divnm_reg, pll); diff --git a/drivers/clk/tegra/clk-tegra-audio.c b/drivers/clk/tegra/clk-tegra-audio.c index b37cae7af26da031c01a6fb584802bb4640ba099..02dd6487d855d001083579b9c7841383bd8e380b 100644 --- a/drivers/clk/tegra/clk-tegra-audio.c +++ b/drivers/clk/tegra/clk-tegra-audio.c @@ -49,8 +49,6 @@ struct tegra_sync_source_initdata { #define SYNC(_name) \ {\ .name = #_name,\ - .rate = 24000000,\ - .max_rate = 24000000,\ .clk_id = tegra_clk_ ## _name,\ } @@ -176,7 +174,7 @@ static void __init tegra_audio_sync_clk_init(void __iomem *clk_base, void __init tegra_audio_clk_init(void __iomem *clk_base, void __iomem *pmc_base, struct tegra_clk *tegra_clks, struct tegra_audio_clk_info *audio_info, - unsigned int num_plls) + unsigned int num_plls, unsigned long sync_max_rate) { struct clk *clk; struct clk **dt_clk; @@ -221,8 +219,7 @@ void __init tegra_audio_clk_init(void __iomem *clk_base, if (!dt_clk) continue; - clk = tegra_clk_register_sync_source(data->name, - data->rate, data->max_rate); + clk = tegra_clk_register_sync_source(data->name, sync_max_rate); *dt_clk = clk; } diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c index 1824f014202b0351f2587a7dcc3b9e666a69b15a..625d11091330896149c051910b3f5b791117d034 100644 --- a/drivers/clk/tegra/clk-tegra114.c +++ b/drivers/clk/tegra/clk-tegra114.c @@ -1190,6 +1190,13 @@ static struct tegra_clk_init_table init_table[] __initdata = { { TEGRA114_CLK_XUSB_FALCON_SRC, TEGRA114_CLK_PLL_P, 204000000, 0 }, { TEGRA114_CLK_XUSB_HOST_SRC, TEGRA114_CLK_PLL_P, 102000000, 0 }, { TEGRA114_CLK_VDE, TEGRA114_CLK_CLK_MAX, 600000000, 0 }, + { TEGRA114_CLK_SPDIF_IN_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 }, + { TEGRA114_CLK_I2S0_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 }, + { TEGRA114_CLK_I2S1_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 }, + { TEGRA114_CLK_I2S2_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 }, + { TEGRA114_CLK_I2S3_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 }, + { TEGRA114_CLK_I2S4_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 }, + { TEGRA114_CLK_VIMCLK_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 }, /* must be the last entry */ { TEGRA114_CLK_CLK_MAX, TEGRA114_CLK_CLK_MAX, 0, 0 }, }; @@ -1362,7 +1369,7 @@ static void __init tegra114_clock_init(struct device_node *np) tegra114_periph_clk_init(clk_base, pmc_base); tegra_audio_clk_init(clk_base, pmc_base, tegra114_clks, tegra114_audio_plls, - ARRAY_SIZE(tegra114_audio_plls)); + ARRAY_SIZE(tegra114_audio_plls), 24000000); tegra_pmc_clk_init(pmc_base, tegra114_clks); tegra_super_clk_gen4_init(clk_base, pmc_base, tegra114_clks, &pll_x_params); diff --git a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c index 269d3595758bebabf0f72d6448ba6633e9cd3c8f..edc31bb56674ad1ea425c93ab4ceb44bf183730b 100644 --- a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c +++ b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c @@ -133,9 +133,11 @@ static int tegra124_dfll_fcpu_remove(struct platform_device *pdev) struct tegra_dfll_soc_data *soc; soc = tegra_dfll_unregister(pdev); - if (IS_ERR(soc)) + if (IS_ERR(soc)) { dev_err(&pdev->dev, "failed to unregister DFLL: %ld\n", PTR_ERR(soc)); + return PTR_ERR(soc); + } tegra_cvb_remove_opp_table(soc->dev, soc->cvb, soc->max_freq); diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c index b6cf28ca2ed291174e7a24576f89afe90638f2d6..df0018f7bf7ed8668d04a6ab5ef47994b4ef3ac3 100644 --- a/drivers/clk/tegra/clk-tegra124.c +++ b/drivers/clk/tegra/clk-tegra124.c @@ -1291,6 +1291,13 @@ static struct tegra_clk_init_table common_init_table[] __initdata = { { TEGRA124_CLK_CSITE, TEGRA124_CLK_CLK_MAX, 0, 1 }, { TEGRA124_CLK_TSENSOR, TEGRA124_CLK_CLK_M, 400000, 0 }, { TEGRA124_CLK_VIC03, TEGRA124_CLK_PLL_C3, 0, 0 }, + { TEGRA124_CLK_SPDIF_IN_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 }, + { TEGRA124_CLK_I2S0_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 }, + { TEGRA124_CLK_I2S1_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 }, + { TEGRA124_CLK_I2S2_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 }, + { TEGRA124_CLK_I2S3_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 }, + { TEGRA124_CLK_I2S4_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 }, + { TEGRA124_CLK_VIMCLK_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 }, /* must be the last entry */ { TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0 }, }; @@ -1455,7 +1462,7 @@ static void __init tegra124_132_clock_init_pre(struct device_node *np) tegra124_periph_clk_init(clk_base, pmc_base); tegra_audio_clk_init(clk_base, pmc_base, tegra124_clks, tegra124_audio_plls, - ARRAY_SIZE(tegra124_audio_plls)); + ARRAY_SIZE(tegra124_audio_plls), 24576000); tegra_pmc_clk_init(pmc_base, tegra124_clks); /* For Tegra124 & Tegra132, PLLD is the only source for DSIA & DSIB */ diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c index cc857d4d4a86e9985876b439ecd21f43579446f3..68551effb5ca2f3956080953226289ea9886f78f 100644 --- a/drivers/clk/tegra/clk-tegra20.c +++ b/drivers/clk/tegra/clk-tegra20.c @@ -578,7 +578,6 @@ static struct tegra_clk tegra20_clks[tegra_clk_max] __initdata = { [tegra_clk_afi] = { .dt_id = TEGRA20_CLK_AFI, .present = true }, [tegra_clk_fuse] = { .dt_id = TEGRA20_CLK_FUSE, .present = true }, [tegra_clk_kfuse] = { .dt_id = TEGRA20_CLK_KFUSE, .present = true }, - [tegra_clk_emc] = { .dt_id = TEGRA20_CLK_EMC, .present = true }, }; static unsigned long tegra20_clk_measure_input_freq(void) @@ -799,6 +798,31 @@ static struct tegra_periph_init_data tegra_periph_nodiv_clk_list[] = { TEGRA_INIT_DATA_NODIV("disp2", mux_pllpdc_clkm, CLK_SOURCE_DISP2, 30, 2, 26, 0, TEGRA20_CLK_DISP2), }; +static void __init tegra20_emc_clk_init(void) +{ + struct clk *clk; + + clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm, + ARRAY_SIZE(mux_pllmcp_clkm), + CLK_SET_RATE_NO_REPARENT, + clk_base + CLK_SOURCE_EMC, + 30, 2, 0, &emc_lock); + + clk = tegra_clk_register_mc("mc", "emc_mux", clk_base + CLK_SOURCE_EMC, + &emc_lock); + clks[TEGRA20_CLK_MC] = clk; + + /* + * Note that 'emc_mux' source and 'emc' rate shouldn't be changed at + * the same time due to a HW bug, this won't happen because we're + * defining 'emc_mux' and 'emc' as distinct clocks. + */ + clk = tegra_clk_register_divider("emc", "emc_mux", + clk_base + CLK_SOURCE_EMC, CLK_IS_CRITICAL, + TEGRA_DIVIDER_INT, 0, 8, 1, &emc_lock); + clks[TEGRA20_CLK_EMC] = clk; +} + static void __init tegra20_periph_clk_init(void) { struct tegra_periph_init_data *data; @@ -812,15 +836,7 @@ static void __init tegra20_periph_clk_init(void) clks[TEGRA20_CLK_AC97] = clk; /* emc */ - clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm, - ARRAY_SIZE(mux_pllmcp_clkm), - CLK_SET_RATE_NO_REPARENT, - clk_base + CLK_SOURCE_EMC, - 30, 2, 0, &emc_lock); - - clk = tegra_clk_register_mc("mc", "emc_mux", clk_base + CLK_SOURCE_EMC, - &emc_lock); - clks[TEGRA20_CLK_MC] = clk; + tegra20_emc_clk_init(); /* dsi */ clk = tegra_clk_register_periph_gate("dsi", "pll_d", 0, clk_base, 0, diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c index 9eb1cb14fce11ca5dd0ddd725102a343712d3b24..7264e9731034829e39f4b8649cd40f59f51988fb 100644 --- a/drivers/clk/tegra/clk-tegra210.c +++ b/drivers/clk/tegra/clk-tegra210.c @@ -2214,9 +2214,9 @@ static struct div_nmp pllu_nmp = { }; static struct tegra_clk_pll_freq_table pll_u_freq_table[] = { - { 12000000, 480000000, 40, 1, 0, 0 }, - { 13000000, 480000000, 36, 1, 0, 0 }, /* actual: 468.0 MHz */ - { 38400000, 480000000, 25, 2, 0, 0 }, + { 12000000, 480000000, 40, 1, 1, 0 }, + { 13000000, 480000000, 36, 1, 1, 0 }, /* actual: 468.0 MHz */ + { 38400000, 480000000, 25, 2, 1, 0 }, { 0, 0, 0, 0, 0, 0 }, }; @@ -2603,7 +2603,7 @@ static struct tegra210_domain_mbist_war tegra210_pg_mbist_war[] = { [TEGRA_POWERGATE_MPE] = { .handle_lvl2_ovr = tegra210_generic_mbist_war, .lvl2_offset = LVL2_CLK_GATE_OVRE, - .lvl2_mask = BIT(2), + .lvl2_mask = BIT(29), }, [TEGRA_POWERGATE_SOR] = { .handle_lvl2_ovr = tegra210_generic_mbist_war, @@ -2654,14 +2654,14 @@ static struct tegra210_domain_mbist_war tegra210_pg_mbist_war[] = { .num_clks = ARRAY_SIZE(nvdec_slcg_clkids), .clk_init_data = nvdec_slcg_clkids, .handle_lvl2_ovr = tegra210_generic_mbist_war, - .lvl2_offset = LVL2_CLK_GATE_OVRC, + .lvl2_offset = LVL2_CLK_GATE_OVRE, .lvl2_mask = BIT(9) | BIT(31), }, [TEGRA_POWERGATE_NVJPG] = { .num_clks = ARRAY_SIZE(nvjpg_slcg_clkids), .clk_init_data = nvjpg_slcg_clkids, .handle_lvl2_ovr = tegra210_generic_mbist_war, - .lvl2_offset = LVL2_CLK_GATE_OVRC, + .lvl2_offset = LVL2_CLK_GATE_OVRE, .lvl2_mask = BIT(9) | BIT(31), }, [TEGRA_POWERGATE_AUD] = { @@ -3343,6 +3343,7 @@ static struct tegra_clk_init_table init_table[] __initdata = { { TEGRA210_CLK_DFLL_REF, TEGRA210_CLK_PLL_P, 51000000, 1 }, { TEGRA210_CLK_SBC4, TEGRA210_CLK_PLL_P, 12000000, 1 }, { TEGRA210_CLK_PLL_RE_VCO, TEGRA210_CLK_CLK_MAX, 672000000, 1 }, + { TEGRA210_CLK_PLL_U_OUT1, TEGRA210_CLK_CLK_MAX, 48000000, 1 }, { TEGRA210_CLK_XUSB_GATE, TEGRA210_CLK_CLK_MAX, 0, 1 }, { TEGRA210_CLK_XUSB_SS_SRC, TEGRA210_CLK_PLL_U_480M, 120000000, 0 }, { TEGRA210_CLK_XUSB_FS_SRC, TEGRA210_CLK_PLL_U_48M, 48000000, 0 }, @@ -3367,8 +3368,16 @@ static struct tegra_clk_init_table init_table[] __initdata = { { TEGRA210_CLK_PLL_DP, TEGRA210_CLK_CLK_MAX, 270000000, 0 }, { TEGRA210_CLK_SOC_THERM, TEGRA210_CLK_PLL_P, 51000000, 0 }, { TEGRA210_CLK_CCLK_G, TEGRA210_CLK_CLK_MAX, 0, 1 }, - { TEGRA210_CLK_PLL_U_OUT1, TEGRA210_CLK_CLK_MAX, 48000000, 1 }, { TEGRA210_CLK_PLL_U_OUT2, TEGRA210_CLK_CLK_MAX, 60000000, 1 }, + { TEGRA210_CLK_SPDIF_IN_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 }, + { TEGRA210_CLK_I2S0_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 }, + { TEGRA210_CLK_I2S1_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 }, + { TEGRA210_CLK_I2S2_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 }, + { TEGRA210_CLK_I2S3_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 }, + { TEGRA210_CLK_I2S4_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 }, + { TEGRA210_CLK_VIMCLK_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 }, + { TEGRA210_CLK_HDA, TEGRA210_CLK_PLL_P, 51000000, 0 }, + { TEGRA210_CLK_HDA2CODEC_2X, TEGRA210_CLK_PLL_P, 48000000, 0 }, /* This MUST be the last entry. */ { TEGRA210_CLK_CLK_MAX, TEGRA210_CLK_CLK_MAX, 0, 0 }, }; @@ -3562,7 +3571,7 @@ static void __init tegra210_clock_init(struct device_node *np) tegra210_periph_clk_init(clk_base, pmc_base); tegra_audio_clk_init(clk_base, pmc_base, tegra210_clks, tegra210_audio_plls, - ARRAY_SIZE(tegra210_audio_plls)); + ARRAY_SIZE(tegra210_audio_plls), 24576000); tegra_pmc_clk_init(pmc_base, tegra210_clks); /* For Tegra210, PLLD is the only source for DSIA & DSIB */ diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c index acfe661b2ae7249db0c130e0897e1b6fea164d67..e0aaecd98fbff1635c2ee78f877ad84df734eed5 100644 --- a/drivers/clk/tegra/clk-tegra30.c +++ b/drivers/clk/tegra/clk-tegra30.c @@ -1267,6 +1267,13 @@ static struct tegra_clk_init_table init_table[] __initdata = { { TEGRA30_CLK_GR3D2, TEGRA30_CLK_PLL_C, 300000000, 0 }, { TEGRA30_CLK_PLL_U, TEGRA30_CLK_CLK_MAX, 480000000, 0 }, { TEGRA30_CLK_VDE, TEGRA30_CLK_CLK_MAX, 600000000, 0 }, + { TEGRA30_CLK_SPDIF_IN_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 }, + { TEGRA30_CLK_I2S0_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 }, + { TEGRA30_CLK_I2S1_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 }, + { TEGRA30_CLK_I2S2_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 }, + { TEGRA30_CLK_I2S3_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 }, + { TEGRA30_CLK_I2S4_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 }, + { TEGRA30_CLK_VIMCLK_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 }, /* must be the last entry */ { TEGRA30_CLK_CLK_MAX, TEGRA30_CLK_CLK_MAX, 0, 0 }, }; @@ -1344,7 +1351,7 @@ static void __init tegra30_clock_init(struct device_node *np) tegra30_periph_clk_init(); tegra_audio_clk_init(clk_base, pmc_base, tegra30_clks, tegra30_audio_plls, - ARRAY_SIZE(tegra30_audio_plls)); + ARRAY_SIZE(tegra30_audio_plls), 24000000); tegra_pmc_clk_init(pmc_base, tegra30_clks); tegra_init_dup_clks(tegra_clk_duplicates, clks, TEGRA30_CLK_CLK_MAX); diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h index d2c3a010f8e9b358906cc99f4eaf80b54fb1c18a..09bccbb9640c48c802dff657d166645641ebada0 100644 --- a/drivers/clk/tegra/clk.h +++ b/drivers/clk/tegra/clk.h @@ -41,7 +41,7 @@ extern const struct clk_ops tegra_clk_sync_source_ops; extern int *periph_clk_enb_refcnt; struct clk *tegra_clk_register_sync_source(const char *name, - unsigned long fixed_rate, unsigned long max_rate); + unsigned long max_rate); /** * struct tegra_clk_frac_div - fractional divider clock @@ -796,7 +796,7 @@ void tegra_register_devclks(struct tegra_devclk *dev_clks, int num); void tegra_audio_clk_init(void __iomem *clk_base, void __iomem *pmc_base, struct tegra_clk *tegra_clks, struct tegra_audio_clk_info *audio_info, - unsigned int num_plls); + unsigned int num_plls, unsigned long sync_max_rate); void tegra_periph_clk_init(void __iomem *clk_base, void __iomem *pmc_base, struct tegra_clk *tegra_clks, diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c index 14881547043130d1e686055387a6276e49fd11f9..beb672a215b6cca9dbc8f1c58dd7bf1bbc57864b 100644 --- a/drivers/clk/ti/clk-dra7-atl.c +++ b/drivers/clk/ti/clk-dra7-atl.c @@ -174,7 +174,6 @@ static void __init of_dra7_atl_clock_setup(struct device_node *node) struct clk_init_data init = { NULL }; const char **parent_names = NULL; struct clk *clk; - int ret; clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL); if (!clk_hw) { @@ -207,11 +206,6 @@ static void __init of_dra7_atl_clock_setup(struct device_node *node) clk = ti_clk_register(NULL, &clk_hw->hw, node->name); if (!IS_ERR(clk)) { - ret = ti_clk_add_alias(NULL, clk, node->name); - if (ret) { - clk_unregister(clk); - goto cleanup; - } of_clk_add_provider(node, of_clk_src_simple_get, clk); kfree(parent_names); return; diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c index 7d22e1af224770d7084cd7d8ec29e24d3f87368f..27e0979b315860dbddf032d11e7826891ede1bd9 100644 --- a/drivers/clk/ti/clk.c +++ b/drivers/clk/ti/clk.c @@ -129,7 +129,7 @@ int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops) void __init ti_dt_clocks_register(struct ti_dt_clk oclks[]) { struct ti_dt_clk *c; - struct device_node *node; + struct device_node *node, *parent; struct clk *clk; struct of_phandle_args clkspec; char buf[64]; @@ -164,8 +164,12 @@ void __init ti_dt_clocks_register(struct ti_dt_clk oclks[]) continue; node = of_find_node_by_name(NULL, buf); - if (num_args) - node = of_find_node_by_name(node, "clk"); + if (num_args) { + parent = node; + node = of_get_child_by_name(parent, "clk"); + of_node_put(parent); + } + clkspec.np = node; clkspec.args_count = num_args; for (i = 0; i < num_args; i++) { @@ -173,11 +177,12 @@ void __init ti_dt_clocks_register(struct ti_dt_clk oclks[]) if (ret) { pr_warn("Bad tag in %s at %d: %s\n", c->node_name, i, tags[i]); + of_node_put(node); return; } } clk = of_clk_get_from_provider(&clkspec); - + of_node_put(node); if (!IS_ERR(clk)) { c->lk.clk = clk; clkdev_add(&c->lk); diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c index 421b0539222058354d94ae6c29347d03b02ddd93..2c2564acad227a9626b9b9c333b3eb9157b2db5f 100644 --- a/drivers/clk/ti/clkctrl.c +++ b/drivers/clk/ti/clkctrl.c @@ -100,11 +100,12 @@ static bool _omap4_is_timeout(union omap4_timeout *time, u32 timeout) * can be from a timer that requires pm_runtime access, which * will eventually bring us here with timekeeping_suspended, * during both suspend entry and resume paths. This happens - * at least on am43xx platform. + * at least on am43xx platform. Account for flakeyness + * with udelay() by multiplying the timeout value by 2. */ if (unlikely(_early_timeout || timekeeping_suspended)) { if (time->cycles++ < timeout) { - udelay(1); + udelay(1 * 2); return false; } } else { @@ -137,9 +138,6 @@ static int _omap4_clkctrl_clk_enable(struct clk_hw *hw) int ret; union omap4_timeout timeout = { 0 }; - if (!clk->enable_bit) - return 0; - if (clk->clkdm) { ret = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk); if (ret) { @@ -151,6 +149,9 @@ static int _omap4_clkctrl_clk_enable(struct clk_hw *hw) } } + if (!clk->enable_bit) + return 0; + val = ti_clk_ll_ops->clk_readl(&clk->enable_reg); val &= ~OMAP4_MODULEMODE_MASK; @@ -179,7 +180,7 @@ static void _omap4_clkctrl_clk_disable(struct clk_hw *hw) union omap4_timeout timeout = { 0 }; if (!clk->enable_bit) - return; + goto exit; val = ti_clk_ll_ops->clk_readl(&clk->enable_reg); @@ -229,6 +230,7 @@ static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec, { struct omap_clkctrl_provider *provider = data; struct omap_clkctrl_clk *entry; + bool found = false; if (clkspec->args_count != 2) return ERR_PTR(-EINVAL); @@ -238,11 +240,13 @@ static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec, list_for_each_entry(entry, &provider->clocks, node) { if (entry->reg_offset == clkspec->args[0] && - entry->bit_offset == clkspec->args[1]) + entry->bit_offset == clkspec->args[1]) { + found = true; break; + } } - if (!entry) + if (!found) return ERR_PTR(-EINVAL); return entry->clk; diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c index ccfb4d9a152aea55c3e414dc26d2ddd3ce792ee4..079f0beda8b6919a9c31e68ae8ae6ba531bc6afd 100644 --- a/drivers/clk/ti/divider.c +++ b/drivers/clk/ti/divider.c @@ -367,8 +367,10 @@ int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div, num_dividers = i; tmp = kcalloc(valid_div + 1, sizeof(*tmp), GFP_KERNEL); - if (!tmp) + if (!tmp) { + *table = ERR_PTR(-ENOMEM); return -ENOMEM; + } valid_div = 0; *width = 0; @@ -403,6 +405,7 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup) { struct clk_omap_divider *div; struct clk_omap_reg *reg; + int ret; if (!setup) return NULL; @@ -422,6 +425,12 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup) div->flags |= CLK_DIVIDER_POWER_OF_TWO; div->table = _get_div_table_from_setup(setup, &div->width); + if (IS_ERR(div->table)) { + ret = PTR_ERR(div->table); + kfree(div); + return ERR_PTR(ret); + } + div->shift = setup->bit_shift; div->latch = -EINVAL; diff --git a/drivers/clk/uniphier/clk-uniphier-cpugear.c b/drivers/clk/uniphier/clk-uniphier-cpugear.c index ec11f55594ad0d9ded3eb97ca2d3b2cd1f4e96f6..5d2d42b7e182b82dceffdb4329aa75a28950b54b 100644 --- a/drivers/clk/uniphier/clk-uniphier-cpugear.c +++ b/drivers/clk/uniphier/clk-uniphier-cpugear.c @@ -47,7 +47,7 @@ static int uniphier_clk_cpugear_set_parent(struct clk_hw *hw, u8 index) return ret; ret = regmap_write_bits(gear->regmap, - gear->regbase + UNIPHIER_CLK_CPUGEAR_SET, + gear->regbase + UNIPHIER_CLK_CPUGEAR_UPD, UNIPHIER_CLK_CPUGEAR_UPD_BIT, UNIPHIER_CLK_CPUGEAR_UPD_BIT); if (ret) diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c index d977193842dfed1fead553dd2240013c5a0d380a..19174835693b91cd473b59c7fed787f128f268d1 100644 --- a/drivers/clk/x86/clk-pmc-atom.c +++ b/drivers/clk/x86/clk-pmc-atom.c @@ -165,7 +165,7 @@ static const struct clk_ops plt_clk_ops = { }; static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id, - void __iomem *base, + const struct pmc_clk_data *pmc_data, const char **parent_names, int num_parents) { @@ -184,9 +184,17 @@ static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id, init.num_parents = num_parents; pclk->hw.init = &init; - pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE; + pclk->reg = pmc_data->base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE; spin_lock_init(&pclk->lock); + /* + * On some systems, the pmc_plt_clocks already enabled by the + * firmware are being marked as critical to avoid them being + * gated by the clock framework. + */ + if (pmc_data->critical && plt_clk_is_enabled(&pclk->hw)) + init.flags |= CLK_IS_CRITICAL; + ret = devm_clk_hw_register(&pdev->dev, &pclk->hw); if (ret) { pclk = ERR_PTR(ret); @@ -332,7 +340,7 @@ static int plt_clk_probe(struct platform_device *pdev) return PTR_ERR(parent_names); for (i = 0; i < PMC_CLK_NUM; i++) { - data->clks[i] = plt_clk_register(pdev, i, pmc_data->base, + data->clks[i] = plt_clk_register(pdev, i, pmc_data, parent_names, data->nparents); if (IS_ERR(data->clks[i])) { err = PTR_ERR(data->clks[i]); diff --git a/drivers/clk/zte/clk-zx296718.c b/drivers/clk/zte/clk-zx296718.c index 354dd508c51692b540255edaa3edde19cf56d506..8dfb8523b79db137a2af81784de34b2a62f7f330 100644 --- a/drivers/clk/zte/clk-zx296718.c +++ b/drivers/clk/zte/clk-zx296718.c @@ -567,6 +567,7 @@ static int __init top_clocks_init(struct device_node *np) { void __iomem *reg_base; int i, ret; + const char *name; reg_base = of_iomap(np, 0); if (!reg_base) { @@ -576,11 +577,10 @@ static int __init top_clocks_init(struct device_node *np) for (i = 0; i < ARRAY_SIZE(zx296718_pll_clk); i++) { zx296718_pll_clk[i].reg_base += (uintptr_t)reg_base; + name = zx296718_pll_clk[i].hw.init->name; ret = clk_hw_register(NULL, &zx296718_pll_clk[i].hw); - if (ret) { - pr_warn("top clk %s init error!\n", - zx296718_pll_clk[i].hw.init->name); - } + if (ret) + pr_warn("top clk %s init error!\n", name); } for (i = 0; i < ARRAY_SIZE(top_ffactor_clk); i++) { @@ -588,11 +588,10 @@ static int __init top_clocks_init(struct device_node *np) top_hw_onecell_data.hws[top_ffactor_clk[i].id] = &top_ffactor_clk[i].factor.hw; + name = top_ffactor_clk[i].factor.hw.init->name; ret = clk_hw_register(NULL, &top_ffactor_clk[i].factor.hw); - if (ret) { - pr_warn("top clk %s init error!\n", - top_ffactor_clk[i].factor.hw.init->name); - } + if (ret) + pr_warn("top clk %s init error!\n", name); } for (i = 0; i < ARRAY_SIZE(top_mux_clk); i++) { @@ -601,11 +600,10 @@ static int __init top_clocks_init(struct device_node *np) &top_mux_clk[i].mux.hw; top_mux_clk[i].mux.reg += (uintptr_t)reg_base; + name = top_mux_clk[i].mux.hw.init->name; ret = clk_hw_register(NULL, &top_mux_clk[i].mux.hw); - if (ret) { - pr_warn("top clk %s init error!\n", - top_mux_clk[i].mux.hw.init->name); - } + if (ret) + pr_warn("top clk %s init error!\n", name); } for (i = 0; i < ARRAY_SIZE(top_gate_clk); i++) { @@ -614,11 +612,10 @@ static int __init top_clocks_init(struct device_node *np) &top_gate_clk[i].gate.hw; top_gate_clk[i].gate.reg += (uintptr_t)reg_base; + name = top_gate_clk[i].gate.hw.init->name; ret = clk_hw_register(NULL, &top_gate_clk[i].gate.hw); - if (ret) { - pr_warn("top clk %s init error!\n", - top_gate_clk[i].gate.hw.init->name); - } + if (ret) + pr_warn("top clk %s init error!\n", name); } for (i = 0; i < ARRAY_SIZE(top_div_clk); i++) { @@ -627,11 +624,10 @@ static int __init top_clocks_init(struct device_node *np) &top_div_clk[i].div.hw; top_div_clk[i].div.reg += (uintptr_t)reg_base; + name = top_div_clk[i].div.hw.init->name; ret = clk_hw_register(NULL, &top_div_clk[i].div.hw); - if (ret) { - pr_warn("top clk %s init error!\n", - top_div_clk[i].div.hw.init->name); - } + if (ret) + pr_warn("top clk %s init error!\n", name); } ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, @@ -757,6 +753,7 @@ static int __init lsp0_clocks_init(struct device_node *np) { void __iomem *reg_base; int i, ret; + const char *name; reg_base = of_iomap(np, 0); if (!reg_base) { @@ -770,11 +767,10 @@ static int __init lsp0_clocks_init(struct device_node *np) &lsp0_mux_clk[i].mux.hw; lsp0_mux_clk[i].mux.reg += (uintptr_t)reg_base; + name = lsp0_mux_clk[i].mux.hw.init->name; ret = clk_hw_register(NULL, &lsp0_mux_clk[i].mux.hw); - if (ret) { - pr_warn("lsp0 clk %s init error!\n", - lsp0_mux_clk[i].mux.hw.init->name); - } + if (ret) + pr_warn("lsp0 clk %s init error!\n", name); } for (i = 0; i < ARRAY_SIZE(lsp0_gate_clk); i++) { @@ -783,11 +779,10 @@ static int __init lsp0_clocks_init(struct device_node *np) &lsp0_gate_clk[i].gate.hw; lsp0_gate_clk[i].gate.reg += (uintptr_t)reg_base; + name = lsp0_gate_clk[i].gate.hw.init->name; ret = clk_hw_register(NULL, &lsp0_gate_clk[i].gate.hw); - if (ret) { - pr_warn("lsp0 clk %s init error!\n", - lsp0_gate_clk[i].gate.hw.init->name); - } + if (ret) + pr_warn("lsp0 clk %s init error!\n", name); } for (i = 0; i < ARRAY_SIZE(lsp0_div_clk); i++) { @@ -796,11 +791,10 @@ static int __init lsp0_clocks_init(struct device_node *np) &lsp0_div_clk[i].div.hw; lsp0_div_clk[i].div.reg += (uintptr_t)reg_base; + name = lsp0_div_clk[i].div.hw.init->name; ret = clk_hw_register(NULL, &lsp0_div_clk[i].div.hw); - if (ret) { - pr_warn("lsp0 clk %s init error!\n", - lsp0_div_clk[i].div.hw.init->name); - } + if (ret) + pr_warn("lsp0 clk %s init error!\n", name); } ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, @@ -865,6 +859,7 @@ static int __init lsp1_clocks_init(struct device_node *np) { void __iomem *reg_base; int i, ret; + const char *name; reg_base = of_iomap(np, 0); if (!reg_base) { @@ -878,11 +873,10 @@ static int __init lsp1_clocks_init(struct device_node *np) &lsp0_mux_clk[i].mux.hw; lsp1_mux_clk[i].mux.reg += (uintptr_t)reg_base; + name = lsp1_mux_clk[i].mux.hw.init->name; ret = clk_hw_register(NULL, &lsp1_mux_clk[i].mux.hw); - if (ret) { - pr_warn("lsp1 clk %s init error!\n", - lsp1_mux_clk[i].mux.hw.init->name); - } + if (ret) + pr_warn("lsp1 clk %s init error!\n", name); } for (i = 0; i < ARRAY_SIZE(lsp1_gate_clk); i++) { @@ -891,11 +885,10 @@ static int __init lsp1_clocks_init(struct device_node *np) &lsp1_gate_clk[i].gate.hw; lsp1_gate_clk[i].gate.reg += (uintptr_t)reg_base; + name = lsp1_gate_clk[i].gate.hw.init->name; ret = clk_hw_register(NULL, &lsp1_gate_clk[i].gate.hw); - if (ret) { - pr_warn("lsp1 clk %s init error!\n", - lsp1_gate_clk[i].gate.hw.init->name); - } + if (ret) + pr_warn("lsp1 clk %s init error!\n", name); } for (i = 0; i < ARRAY_SIZE(lsp1_div_clk); i++) { @@ -904,11 +897,10 @@ static int __init lsp1_clocks_init(struct device_node *np) &lsp1_div_clk[i].div.hw; lsp1_div_clk[i].div.reg += (uintptr_t)reg_base; + name = lsp1_div_clk[i].div.hw.init->name; ret = clk_hw_register(NULL, &lsp1_div_clk[i].div.hw); - if (ret) { - pr_warn("lsp1 clk %s init error!\n", - lsp1_div_clk[i].div.hw.init->name); - } + if (ret) + pr_warn("lsp1 clk %s init error!\n", name); } ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, @@ -982,6 +974,7 @@ static int __init audio_clocks_init(struct device_node *np) { void __iomem *reg_base; int i, ret; + const char *name; reg_base = of_iomap(np, 0); if (!reg_base) { @@ -995,11 +988,10 @@ static int __init audio_clocks_init(struct device_node *np) &audio_mux_clk[i].mux.hw; audio_mux_clk[i].mux.reg += (uintptr_t)reg_base; + name = audio_mux_clk[i].mux.hw.init->name; ret = clk_hw_register(NULL, &audio_mux_clk[i].mux.hw); - if (ret) { - pr_warn("audio clk %s init error!\n", - audio_mux_clk[i].mux.hw.init->name); - } + if (ret) + pr_warn("audio clk %s init error!\n", name); } for (i = 0; i < ARRAY_SIZE(audio_adiv_clk); i++) { @@ -1008,11 +1000,10 @@ static int __init audio_clocks_init(struct device_node *np) &audio_adiv_clk[i].hw; audio_adiv_clk[i].reg_base += (uintptr_t)reg_base; + name = audio_adiv_clk[i].hw.init->name; ret = clk_hw_register(NULL, &audio_adiv_clk[i].hw); - if (ret) { - pr_warn("audio clk %s init error!\n", - audio_adiv_clk[i].hw.init->name); - } + if (ret) + pr_warn("audio clk %s init error!\n", name); } for (i = 0; i < ARRAY_SIZE(audio_div_clk); i++) { @@ -1021,11 +1012,10 @@ static int __init audio_clocks_init(struct device_node *np) &audio_div_clk[i].div.hw; audio_div_clk[i].div.reg += (uintptr_t)reg_base; + name = audio_div_clk[i].div.hw.init->name; ret = clk_hw_register(NULL, &audio_div_clk[i].div.hw); - if (ret) { - pr_warn("audio clk %s init error!\n", - audio_div_clk[i].div.hw.init->name); - } + if (ret) + pr_warn("audio clk %s init error!\n", name); } for (i = 0; i < ARRAY_SIZE(audio_gate_clk); i++) { @@ -1034,11 +1024,10 @@ static int __init audio_clocks_init(struct device_node *np) &audio_gate_clk[i].gate.hw; audio_gate_clk[i].gate.reg += (uintptr_t)reg_base; + name = audio_gate_clk[i].gate.hw.init->name; ret = clk_hw_register(NULL, &audio_gate_clk[i].gate.hw); - if (ret) { - pr_warn("audio clk %s init error!\n", - audio_gate_clk[i].gate.hw.init->name); - } + if (ret) + pr_warn("audio clk %s init error!\n", name); } ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c index 88a2cab37f627b86bebe9512ffee9db720b327e1..35c8557087417f4b423f0f6d299c9ec4fe680bae 100644 --- a/drivers/clk/zynq/clkc.c +++ b/drivers/clk/zynq/clkc.c @@ -53,6 +53,7 @@ static void __iomem *zynq_clkc_base; #define SLCR_SWDT_CLK_SEL (zynq_clkc_base + 0x204) #define NUM_MIO_PINS 54 +#define CLK_NAME_LEN 16 #define DBG_CLK_CTRL_CLKACT_TRC BIT(0) #define DBG_CLK_CTRL_CPU_1XCLKACT BIT(1) @@ -229,7 +230,7 @@ static void __init zynq_clk_setup(struct device_node *np) u32 tmp; int ret; struct clk *clk; - char *clk_name; + char clk_name[CLK_NAME_LEN]; unsigned int fclk_enable = 0; const char *clk_output_name[clk_max]; const char *cpu_parents[4]; @@ -439,12 +440,10 @@ static void __init zynq_clk_setup(struct device_node *np) "gem1_emio_mux", CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 0, 0, &gem1clk_lock); - tmp = strlen("mio_clk_00x"); - clk_name = kmalloc(tmp, GFP_KERNEL); for (i = 0; i < NUM_MIO_PINS; i++) { int idx; - snprintf(clk_name, tmp, "mio_clk_%2.2d", i); + snprintf(clk_name, CLK_NAME_LEN, "mio_clk_%2.2d", i); idx = of_property_match_string(np, "clock-names", clk_name); if (idx >= 0) can_mio_mux_parents[i] = of_clk_get_parent_name(np, @@ -452,7 +451,6 @@ static void __init zynq_clk_setup(struct device_node *np) else can_mio_mux_parents[i] = dummy_nm; } - kfree(clk_name); clk = clk_register_mux(NULL, "can_mux", periph_parents, 4, CLK_SET_RATE_NO_REPARENT, SLCR_CAN_CLK_CTRL, 4, 2, 0, &canclk_lock); diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index a11f4ba98b05c57d08b211ac933f93fcf7cb4616..4d37f018d846c7c207409340d47461297cefde94 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -136,6 +136,7 @@ config VT8500_TIMER config NPCM7XX_TIMER bool "NPCM7xx timer driver" if COMPILE_TEST depends on HAS_IOMEM + select TIMER_OF select CLKSRC_MMIO help Enable 24-bit TIMER0 and TIMER1 counters in the NPCM7xx architecture, @@ -290,6 +291,7 @@ config CLKSRC_MPS2 config ARC_TIMERS bool "Support for 32-bit TIMERn counters in ARC Cores" if COMPILE_TEST + depends on GENERIC_SCHED_CLOCK select TIMER_OF help These are legacy 32-bit TIMER0 and TIMER1 counters found on all ARC cores @@ -364,6 +366,16 @@ config ARM64_ERRATUM_858921 The workaround will be dynamically enabled when an affected core is detected. +config SUN50I_ERRATUM_UNKNOWN1 + bool "Workaround for Allwinner A64 erratum UNKNOWN1" + default y + depends on ARM_ARCH_TIMER && ARM64 && ARCH_SUNXI + select ARM_ARCH_TIMER_OOL_WORKAROUND + help + This option enables a workaround for instability in the timer on + the Allwinner A64 SoC. The workaround will only be active if the + allwinner,erratum-unknown1 property is found in the timer node. + config ARM_GLOBAL_TIMER bool "Support for the ARM global timer" if COMPILE_TEST select TIMER_OF if OF diff --git a/drivers/clocksource/arc_timer.c b/drivers/clocksource/arc_timer.c index 20da9b1d7f7d01c0a537dd6a421b3eb7ea2e547e..b28970ca4a7a985f7d8aec23c3c7049851b68eae 100644 --- a/drivers/clocksource/arc_timer.c +++ b/drivers/clocksource/arc_timer.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -88,6 +89,11 @@ static u64 arc_read_gfrc(struct clocksource *cs) return (((u64)h) << 32) | l; } +static notrace u64 arc_gfrc_clock_read(void) +{ + return arc_read_gfrc(NULL); +} + static struct clocksource arc_counter_gfrc = { .name = "ARConnect GFRC", .rating = 400, @@ -111,6 +117,8 @@ static int __init arc_cs_setup_gfrc(struct device_node *node) if (ret) return ret; + sched_clock_register(arc_gfrc_clock_read, 64, arc_timer_freq); + return clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq); } TIMER_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc); @@ -139,6 +147,11 @@ static u64 arc_read_rtc(struct clocksource *cs) return (((u64)h) << 32) | l; } +static notrace u64 arc_rtc_clock_read(void) +{ + return arc_read_rtc(NULL); +} + static struct clocksource arc_counter_rtc = { .name = "ARCv2 RTC", .rating = 350, @@ -170,6 +183,8 @@ static int __init arc_cs_setup_rtc(struct device_node *node) write_aux_reg(AUX_RTC_CTRL, 1); + sched_clock_register(arc_rtc_clock_read, 64, arc_timer_freq); + return clocksource_register_hz(&arc_counter_rtc, arc_timer_freq); } TIMER_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc); @@ -185,6 +200,11 @@ static u64 arc_read_timer1(struct clocksource *cs) return (u64) read_aux_reg(ARC_REG_TIMER1_CNT); } +static notrace u64 arc_timer1_clock_read(void) +{ + return arc_read_timer1(NULL); +} + static struct clocksource arc_counter_timer1 = { .name = "ARC Timer1", .rating = 300, @@ -209,6 +229,8 @@ static int __init arc_cs_setup_timer1(struct device_node *node) write_aux_reg(ARC_REG_TIMER1_CNT, 0); write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH); + sched_clock_register(arc_timer1_clock_read, 32, arc_timer_freq); + return clocksource_register_hz(&arc_counter_timer1, arc_timer_freq); } diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index d8c7f5750cdb025dfd3eae42d691318fc472e29b..58863fd9c91be5562739b492bc8dd2479a31e1ac 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -76,6 +76,7 @@ static bool arch_timer_c3stop; static bool arch_timer_mem_use_virtual; static bool arch_counter_suspend_stop; static bool vdso_default = true; +static bool vdso_fix; static cpumask_t evtstrm_available = CPU_MASK_NONE; static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM); @@ -319,6 +320,48 @@ static u64 notrace arm64_858921_read_cntvct_el0(void) } #endif +#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1 +/* + * The low bits of the counter registers are indeterminate while bit 10 or + * greater is rolling over. Since the counter value can jump both backward + * (7ff -> 000 -> 800) and forward (7ff -> fff -> 800), ignore register values + * with all ones or all zeros in the low bits. Bound the loop by the maximum + * number of CPU cycles in 3 consecutive 24 MHz counter periods. + */ +#define __sun50i_a64_read_reg(reg) ({ \ + u64 _val; \ + int _retries = 150; \ + \ + do { \ + _val = read_sysreg(reg); \ + _retries--; \ + } while (((_val + 1) & GENMASK(9, 0)) <= 1 && _retries); \ + \ + WARN_ON_ONCE(!_retries); \ + _val; \ +}) + +static u64 notrace sun50i_a64_read_cntpct_el0(void) +{ + return __sun50i_a64_read_reg(cntpct_el0); +} + +static u64 notrace sun50i_a64_read_cntvct_el0(void) +{ + return __sun50i_a64_read_reg(cntvct_el0); +} + +static u32 notrace sun50i_a64_read_cntp_tval_el0(void) +{ + return read_sysreg(cntp_cval_el0) - sun50i_a64_read_cntpct_el0(); +} + +static u32 notrace sun50i_a64_read_cntv_tval_el0(void) +{ + return read_sysreg(cntv_cval_el0) - sun50i_a64_read_cntvct_el0(); +} +#endif + #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround); EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround); @@ -408,6 +451,19 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = { .read_cntvct_el0 = arm64_858921_read_cntvct_el0, }, #endif +#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1 + { + .match_type = ate_match_dt, + .id = "allwinner,erratum-unknown1", + .desc = "Allwinner erratum UNKNOWN1", + .read_cntp_tval_el0 = sun50i_a64_read_cntp_tval_el0, + .read_cntv_tval_el0 = sun50i_a64_read_cntv_tval_el0, + .read_cntpct_el0 = sun50i_a64_read_cntpct_el0, + .read_cntvct_el0 = sun50i_a64_read_cntvct_el0, + .set_next_event_phys = erratum_set_next_event_tval_phys, + .set_next_event_virt = erratum_set_next_event_tval_virt, + }, +#endif }; typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *, @@ -495,8 +551,14 @@ void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa * change both the default value and the vdso itself. */ if (wa->read_cntvct_el0) { - clocksource_counter.archdata.vdso_direct = false; - vdso_default = false; + if (wa->read_cntvct_el0 == hisi_161010101_read_cntvct_el0) { + clocksource_counter.archdata.vdso_direct = true; + vdso_default = true; + vdso_fix = true; + } else { + clocksource_counter.archdata.vdso_direct = false; + vdso_default = false; + } } } @@ -764,23 +826,32 @@ static void arch_timer_evtstrm_enable(int divider) | ARCH_TIMER_VIRT_EVT_EN; arch_timer_set_cntkctl(cntkctl); elf_hwcap |= HWCAP_EVTSTRM; -#ifdef CONFIG_COMPAT - compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM; +#ifdef CONFIG_AARCH32_EL0 + a32_elf_hwcap |= COMPAT_HWCAP_EVTSTRM; #endif cpumask_set_cpu(smp_processor_id(), &evtstrm_available); } static void arch_timer_configure_evtstream(void) { - int evt_stream_div, pos; + int evt_stream_div, lsb; + + /* + * As the event stream can at most be generated at half the frequency + * of the counter, use half the frequency when computing the divider. + */ + evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ / 2; + + /* + * Find the closest power of two to the divisor. If the adjacent bit + * of lsb (last set bit, starts from 0) is set, then we use (lsb + 1). + */ + lsb = fls(evt_stream_div) - 1; + if (lsb > 0 && (evt_stream_div & BIT(lsb - 1))) + lsb++; - /* Find the closest power of two to the divisor */ - evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ; - pos = fls(evt_stream_div); - if (pos > 1 && !(evt_stream_div & (1 << (pos - 2)))) - pos--; /* enable event stream */ - arch_timer_evtstrm_enable(min(pos, 15)); + arch_timer_evtstrm_enable(max(0, min(lsb, 15))); } static void arch_counter_set_user_access(void) @@ -800,7 +871,7 @@ static void arch_counter_set_user_access(void) * need to be workaround. The vdso may have been already * disabled though. */ - if (arch_timer_this_cpu_has_cntvct_wa()) + if (arch_timer_this_cpu_has_cntvct_wa() && !vdso_fix) pr_info("CPU%d: Trapping CNTVCT access\n", smp_processor_id()); else cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN; @@ -935,6 +1006,9 @@ static void __init arch_counter_register(unsigned type) arch_timer_read_counter = arch_counter_get_cntpct; clocksource_counter.archdata.vdso_direct = vdso_default; +#ifdef CONFIG_ARM64 + clocksource_counter.archdata.vdso_fix = vdso_fix; +#endif } else { arch_timer_read_counter = arch_counter_get_cntvct_mem; } diff --git a/drivers/clocksource/asm9260_timer.c b/drivers/clocksource/asm9260_timer.c index 38cd2feb87c42e35ab1a9153c9bb18438abd16b9..0ce760776406b3cc02ba2233c402410801b6fcf4 100644 --- a/drivers/clocksource/asm9260_timer.c +++ b/drivers/clocksource/asm9260_timer.c @@ -198,6 +198,10 @@ static int __init asm9260_timer_init(struct device_node *np) } clk = of_clk_get(np, 0); + if (IS_ERR(clk)) { + pr_err("Failed to get clk!\n"); + return PTR_ERR(clk); + } ret = clk_prepare_enable(clk); if (ret) { diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index 7a244b681876d3895e1e8f010f58c14e21c4d535..aaf5bfa9bd9c915efdca1b2ac5fba58d86199a01 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c @@ -211,7 +211,7 @@ static void exynos4_frc_resume(struct clocksource *cs) static struct clocksource mct_frc = { .name = "mct-frc", - .rating = 400, + .rating = 450, /* use value higher than ARM arch timer */ .read = exynos4_frc_read, .mask = CLOCKSOURCE_MASK(32), .flags = CLOCK_SOURCE_IS_CONTINUOUS, @@ -388,6 +388,13 @@ static void exynos4_mct_tick_start(unsigned long cycles, exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET); } +static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt) +{ + /* Clear the MCT tick interrupt */ + if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) + exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET); +} + static int exynos4_tick_set_next_event(unsigned long cycles, struct clock_event_device *evt) { @@ -404,6 +411,7 @@ static int set_state_shutdown(struct clock_event_device *evt) mevt = container_of(evt, struct mct_clock_event_device, evt); exynos4_mct_tick_stop(mevt); + exynos4_mct_tick_clear(mevt); return 0; } @@ -420,8 +428,11 @@ static int set_state_periodic(struct clock_event_device *evt) return 0; } -static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt) +static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id) { + struct mct_clock_event_device *mevt = dev_id; + struct clock_event_device *evt = &mevt->evt; + /* * This is for supporting oneshot mode. * Mct would generate interrupt periodically @@ -430,16 +441,6 @@ static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt) if (!clockevent_state_periodic(&mevt->evt)) exynos4_mct_tick_stop(mevt); - /* Clear the MCT tick interrupt */ - if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) - exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET); -} - -static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id) -{ - struct mct_clock_event_device *mevt = dev_id; - struct clock_event_device *evt = &mevt->evt; - exynos4_mct_tick_clear(mevt); evt->event_handler(evt); @@ -465,7 +466,7 @@ static int exynos4_mct_starting_cpu(unsigned int cpu) evt->set_state_oneshot_stopped = set_state_shutdown; evt->tick_resume = set_state_shutdown; evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; - evt->rating = 450; + evt->rating = 500; /* use value higher than ARM arch timer */ exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET); diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c index 9c38895542f4abb5bff8c487ff22701e008443a2..d4350bb10b83a26aa1c9a56555ff8a20e949148a 100644 --- a/drivers/clocksource/i8253.c +++ b/drivers/clocksource/i8253.c @@ -20,6 +20,13 @@ DEFINE_RAW_SPINLOCK(i8253_lock); EXPORT_SYMBOL(i8253_lock); +/* + * Handle PIT quirk in pit_shutdown() where zeroing the counter register + * restarts the PIT, negating the shutdown. On platforms with the quirk, + * platform specific code can set this to false. + */ +bool i8253_clear_counter_on_shutdown __ro_after_init = true; + #ifdef CONFIG_CLKSRC_I8253 /* * Since the PIT overflows every tick, its not very useful @@ -109,8 +116,11 @@ static int pit_shutdown(struct clock_event_device *evt) raw_spin_lock(&i8253_lock); outb_p(0x30, PIT_MODE); - outb_p(0, PIT_CH0); - outb_p(0, PIT_CH0); + + if (i8253_clear_counter_on_shutdown) { + outb_p(0, PIT_CH0); + outb_p(0, PIT_CH0); + } raw_spin_unlock(&i8253_lock); return 0; diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index bbbf37c471a3919f7fb177f0439cd15d020d8d08..cec90a4c79b34b270a552a4db42e2b3540112cf4 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -78,18 +78,17 @@ struct sh_cmt_info { unsigned int channels_mask; unsigned long width; /* 16 or 32 bit version of hardware block */ - unsigned long overflow_bit; - unsigned long clear_bits; + u32 overflow_bit; + u32 clear_bits; /* callbacks for CMSTR and CMCSR access */ - unsigned long (*read_control)(void __iomem *base, unsigned long offs); + u32 (*read_control)(void __iomem *base, unsigned long offs); void (*write_control)(void __iomem *base, unsigned long offs, - unsigned long value); + u32 value); /* callbacks for CMCNT and CMCOR access */ - unsigned long (*read_count)(void __iomem *base, unsigned long offs); - void (*write_count)(void __iomem *base, unsigned long offs, - unsigned long value); + u32 (*read_count)(void __iomem *base, unsigned long offs); + void (*write_count)(void __iomem *base, unsigned long offs, u32 value); }; struct sh_cmt_channel { @@ -103,13 +102,13 @@ struct sh_cmt_channel { unsigned int timer_bit; unsigned long flags; - unsigned long match_value; - unsigned long next_match_value; - unsigned long max_match_value; + u32 match_value; + u32 next_match_value; + u32 max_match_value; raw_spinlock_t lock; struct clock_event_device ced; struct clocksource cs; - unsigned long total_cycles; + u64 total_cycles; bool cs_enabled; }; @@ -160,24 +159,22 @@ struct sh_cmt_device { #define SH_CMT32_CMCSR_CKS_RCLK1 (7 << 0) #define SH_CMT32_CMCSR_CKS_MASK (7 << 0) -static unsigned long sh_cmt_read16(void __iomem *base, unsigned long offs) +static u32 sh_cmt_read16(void __iomem *base, unsigned long offs) { return ioread16(base + (offs << 1)); } -static unsigned long sh_cmt_read32(void __iomem *base, unsigned long offs) +static u32 sh_cmt_read32(void __iomem *base, unsigned long offs) { return ioread32(base + (offs << 2)); } -static void sh_cmt_write16(void __iomem *base, unsigned long offs, - unsigned long value) +static void sh_cmt_write16(void __iomem *base, unsigned long offs, u32 value) { iowrite16(value, base + (offs << 1)); } -static void sh_cmt_write32(void __iomem *base, unsigned long offs, - unsigned long value) +static void sh_cmt_write32(void __iomem *base, unsigned long offs, u32 value) { iowrite32(value, base + (offs << 2)); } @@ -242,7 +239,7 @@ static const struct sh_cmt_info sh_cmt_info[] = { #define CMCNT 1 /* channel register */ #define CMCOR 2 /* channel register */ -static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_channel *ch) +static inline u32 sh_cmt_read_cmstr(struct sh_cmt_channel *ch) { if (ch->iostart) return ch->cmt->info->read_control(ch->iostart, 0); @@ -250,8 +247,7 @@ static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_channel *ch) return ch->cmt->info->read_control(ch->cmt->mapbase, 0); } -static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch, - unsigned long value) +static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch, u32 value) { if (ch->iostart) ch->cmt->info->write_control(ch->iostart, 0, value); @@ -259,39 +255,35 @@ static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch, ch->cmt->info->write_control(ch->cmt->mapbase, 0, value); } -static inline unsigned long sh_cmt_read_cmcsr(struct sh_cmt_channel *ch) +static inline u32 sh_cmt_read_cmcsr(struct sh_cmt_channel *ch) { return ch->cmt->info->read_control(ch->ioctrl, CMCSR); } -static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch, - unsigned long value) +static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch, u32 value) { ch->cmt->info->write_control(ch->ioctrl, CMCSR, value); } -static inline unsigned long sh_cmt_read_cmcnt(struct sh_cmt_channel *ch) +static inline u32 sh_cmt_read_cmcnt(struct sh_cmt_channel *ch) { return ch->cmt->info->read_count(ch->ioctrl, CMCNT); } -static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, - unsigned long value) +static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, u32 value) { ch->cmt->info->write_count(ch->ioctrl, CMCNT, value); } -static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch, - unsigned long value) +static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch, u32 value) { ch->cmt->info->write_count(ch->ioctrl, CMCOR, value); } -static unsigned long sh_cmt_get_counter(struct sh_cmt_channel *ch, - int *has_wrapped) +static u32 sh_cmt_get_counter(struct sh_cmt_channel *ch, u32 *has_wrapped) { - unsigned long v1, v2, v3; - int o1, o2; + u32 v1, v2, v3; + u32 o1, o2; o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit; @@ -311,7 +303,8 @@ static unsigned long sh_cmt_get_counter(struct sh_cmt_channel *ch, static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start) { - unsigned long flags, value; + unsigned long flags; + u32 value; /* start stop register shared by multiple timer channels */ raw_spin_lock_irqsave(&ch->cmt->lock, flags); @@ -418,11 +411,11 @@ static void sh_cmt_disable(struct sh_cmt_channel *ch) static void sh_cmt_clock_event_program_verify(struct sh_cmt_channel *ch, int absolute) { - unsigned long new_match; - unsigned long value = ch->next_match_value; - unsigned long delay = 0; - unsigned long now = 0; - int has_wrapped; + u32 value = ch->next_match_value; + u32 new_match; + u32 delay = 0; + u32 now = 0; + u32 has_wrapped; now = sh_cmt_get_counter(ch, &has_wrapped); ch->flags |= FLAG_REPROGRAM; /* force reprogram */ @@ -619,9 +612,10 @@ static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs) static u64 sh_cmt_clocksource_read(struct clocksource *cs) { struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); - unsigned long flags, raw; - unsigned long value; - int has_wrapped; + unsigned long flags; + u32 has_wrapped; + u64 value; + u32 raw; raw_spin_lock_irqsave(&ch->lock, flags); value = ch->total_cycles; @@ -694,7 +688,7 @@ static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch, cs->disable = sh_cmt_clocksource_disable; cs->suspend = sh_cmt_clocksource_suspend; cs->resume = sh_cmt_clocksource_resume; - cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8); + cs->mask = CLOCKSOURCE_MASK(sizeof(u64) * 8); cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n", diff --git a/drivers/clocksource/timer-fttmr010.c b/drivers/clocksource/timer-fttmr010.c index cf93f6419b5142e397747be406138dacf3278a5c..fadff7915dd9cb8df40493a1db52db8a06e3af16 100644 --- a/drivers/clocksource/timer-fttmr010.c +++ b/drivers/clocksource/timer-fttmr010.c @@ -21,7 +21,7 @@ #include /* - * Register definitions for the timers + * Register definitions common for all the timer variants. */ #define TIMER1_COUNT (0x00) #define TIMER1_LOAD (0x04) @@ -36,9 +36,10 @@ #define TIMER3_MATCH1 (0x28) #define TIMER3_MATCH2 (0x2c) #define TIMER_CR (0x30) -#define TIMER_INTR_STATE (0x34) -#define TIMER_INTR_MASK (0x38) +/* + * Control register (TMC30) bit fields for fttmr010/gemini/moxart timers. + */ #define TIMER_1_CR_ENABLE BIT(0) #define TIMER_1_CR_CLOCK BIT(1) #define TIMER_1_CR_INT BIT(2) @@ -53,8 +54,9 @@ #define TIMER_3_CR_UPDOWN BIT(11) /* - * The Aspeed AST2400 moves bits around in the control register - * and lacks bits for setting the timer to count upwards. + * Control register (TMC30) bit fields for aspeed ast2400/ast2500 timers. + * The aspeed timers move bits around in the control register and lacks + * bits for setting the timer to count upwards. */ #define TIMER_1_CR_ASPEED_ENABLE BIT(0) #define TIMER_1_CR_ASPEED_CLOCK BIT(1) @@ -66,6 +68,18 @@ #define TIMER_3_CR_ASPEED_CLOCK BIT(9) #define TIMER_3_CR_ASPEED_INT BIT(10) +/* + * Interrupt status/mask register definitions for fttmr010/gemini/moxart + * timers. + * The registers don't exist and they are not needed on aspeed timers + * because: + * - aspeed timer overflow interrupt is controlled by bits in Control + * Register (TMC30). + * - aspeed timers always generate interrupt when either one of the + * Match registers equals to Status register. + */ +#define TIMER_INTR_STATE (0x34) +#define TIMER_INTR_MASK (0x38) #define TIMER_1_INT_MATCH1 BIT(0) #define TIMER_1_INT_MATCH2 BIT(1) #define TIMER_1_INT_OVERFLOW BIT(2) @@ -80,7 +94,7 @@ struct fttmr010 { void __iomem *base; unsigned int tick_rate; - bool count_down; + bool is_aspeed; u32 t1_enable_val; struct clock_event_device clkevt; #ifdef CONFIG_ARM @@ -130,7 +144,7 @@ static int fttmr010_timer_set_next_event(unsigned long cycles, cr &= ~fttmr010->t1_enable_val; writel(cr, fttmr010->base + TIMER_CR); - if (fttmr010->count_down) { + if (fttmr010->is_aspeed) { /* * ASPEED Timer Controller will load TIMER1_LOAD register * into TIMER1_COUNT register when the timer is re-enabled. @@ -175,16 +189,17 @@ static int fttmr010_timer_set_oneshot(struct clock_event_device *evt) /* Setup counter start from 0 or ~0 */ writel(0, fttmr010->base + TIMER1_COUNT); - if (fttmr010->count_down) + if (fttmr010->is_aspeed) { writel(~0, fttmr010->base + TIMER1_LOAD); - else + } else { writel(0, fttmr010->base + TIMER1_LOAD); - /* Enable interrupt */ - cr = readl(fttmr010->base + TIMER_INTR_MASK); - cr &= ~(TIMER_1_INT_OVERFLOW | TIMER_1_INT_MATCH2); - cr |= TIMER_1_INT_MATCH1; - writel(cr, fttmr010->base + TIMER_INTR_MASK); + /* Enable interrupt */ + cr = readl(fttmr010->base + TIMER_INTR_MASK); + cr &= ~(TIMER_1_INT_OVERFLOW | TIMER_1_INT_MATCH2); + cr |= TIMER_1_INT_MATCH1; + writel(cr, fttmr010->base + TIMER_INTR_MASK); + } return 0; } @@ -201,9 +216,8 @@ static int fttmr010_timer_set_periodic(struct clock_event_device *evt) writel(cr, fttmr010->base + TIMER_CR); /* Setup timer to fire at 1/HZ intervals. */ - if (fttmr010->count_down) { + if (fttmr010->is_aspeed) { writel(period, fttmr010->base + TIMER1_LOAD); - writel(0, fttmr010->base + TIMER1_MATCH1); } else { cr = 0xffffffff - (period - 1); writel(cr, fttmr010->base + TIMER1_COUNT); @@ -281,23 +295,21 @@ static int __init fttmr010_common_init(struct device_node *np, bool is_aspeed) } /* - * The Aspeed AST2400 moves bits around in the control register, - * otherwise it works the same. + * The Aspeed timers move bits around in the control register. */ if (is_aspeed) { fttmr010->t1_enable_val = TIMER_1_CR_ASPEED_ENABLE | TIMER_1_CR_ASPEED_INT; - /* Downward not available */ - fttmr010->count_down = true; + fttmr010->is_aspeed = true; } else { fttmr010->t1_enable_val = TIMER_1_CR_ENABLE | TIMER_1_CR_INT; - } - /* - * Reset the interrupt mask and status - */ - writel(TIMER_INT_ALL_MASK, fttmr010->base + TIMER_INTR_MASK); - writel(0, fttmr010->base + TIMER_INTR_STATE); + /* + * Reset the interrupt mask and status + */ + writel(TIMER_INT_ALL_MASK, fttmr010->base + TIMER_INTR_MASK); + writel(0, fttmr010->base + TIMER_INTR_STATE); + } /* * Enable timer 1 count up, timer 2 count up, except on Aspeed, @@ -306,9 +318,8 @@ static int __init fttmr010_common_init(struct device_node *np, bool is_aspeed) if (is_aspeed) val = TIMER_2_CR_ASPEED_ENABLE; else { - val = TIMER_2_CR_ENABLE; - if (!fttmr010->count_down) - val |= TIMER_1_CR_UPDOWN | TIMER_2_CR_UPDOWN; + val = TIMER_2_CR_ENABLE | TIMER_1_CR_UPDOWN | + TIMER_2_CR_UPDOWN; } writel(val, fttmr010->base + TIMER_CR); @@ -321,7 +332,7 @@ static int __init fttmr010_common_init(struct device_node *np, bool is_aspeed) writel(0, fttmr010->base + TIMER2_MATCH1); writel(0, fttmr010->base + TIMER2_MATCH2); - if (fttmr010->count_down) { + if (fttmr010->is_aspeed) { writel(~0, fttmr010->base + TIMER2_LOAD); clocksource_mmio_init(fttmr010->base + TIMER2_COUNT, "FTTMR010-TIMER2", @@ -371,7 +382,7 @@ static int __init fttmr010_common_init(struct device_node *np, bool is_aspeed) #ifdef CONFIG_ARM /* Also use this timer for delays */ - if (fttmr010->count_down) + if (fttmr010->is_aspeed) fttmr010->delay_timer.read_current_timer = fttmr010_read_current_timer_down; else diff --git a/drivers/clocksource/timer-integrator-ap.c b/drivers/clocksource/timer-integrator-ap.c index 62d24690ba0205df997f588207bfe3e9b649c325..9701107806a738e112e7986553f3212da278cd05 100644 --- a/drivers/clocksource/timer-integrator-ap.c +++ b/drivers/clocksource/timer-integrator-ap.c @@ -181,8 +181,7 @@ static int __init integrator_ap_timer_init_of(struct device_node *node) int irq; struct clk *clk; unsigned long rate; - struct device_node *pri_node; - struct device_node *sec_node; + struct device_node *alias_node; base = of_io_request_and_map(node, 0, "integrator-timer"); if (IS_ERR(base)) @@ -204,7 +203,18 @@ static int __init integrator_ap_timer_init_of(struct device_node *node) return err; } - pri_node = of_find_node_by_path(path); + alias_node = of_find_node_by_path(path); + + /* + * The pointer is used as an identifier not as a pointer, we + * can drop the refcount on the of__node immediately after + * getting it. + */ + of_node_put(alias_node); + + if (node == alias_node) + /* The primary timer lacks IRQ, use as clocksource */ + return integrator_clocksource_init(rate, base); err = of_property_read_string(of_aliases, "arm,timer-secondary", &path); @@ -213,14 +223,11 @@ static int __init integrator_ap_timer_init_of(struct device_node *node) return err; } + alias_node = of_find_node_by_path(path); - sec_node = of_find_node_by_path(path); - - if (node == pri_node) - /* The primary timer lacks IRQ, use as clocksource */ - return integrator_clocksource_init(rate, base); + of_node_put(alias_node); - if (node == sec_node) { + if (node == alias_node) { /* The secondary timer will drive the clock event */ irq = irq_of_parse_and_map(node, 0); return integrator_clockevent_init(rate, base, irq); diff --git a/drivers/clocksource/timer-mediatek.c b/drivers/clocksource/timer-mediatek.c index eb10321f85178bce0bcf2af255e1070e92bf6253..8e7894a026acebb61ba401dc8410951484dcc0de 100644 --- a/drivers/clocksource/timer-mediatek.c +++ b/drivers/clocksource/timer-mediatek.c @@ -277,15 +277,12 @@ static int __init mtk_syst_init(struct device_node *node) ret = timer_of_init(node, &to); if (ret) - goto err; + return ret; clockevents_config_and_register(&to.clkevt, timer_of_rate(&to), TIMER_SYNC_TICKS, 0xffffffff); return 0; -err: - timer_of_cleanup(&to); - return ret; } static int __init mtk_gpt_init(struct device_node *node) @@ -302,7 +299,7 @@ static int __init mtk_gpt_init(struct device_node *node) ret = timer_of_init(node, &to); if (ret) - goto err; + return ret; /* Configure clock source */ mtk_gpt_setup(&to, TIMER_CLK_SRC, GPT_CTRL_OP_FREERUN); @@ -320,9 +317,6 @@ static int __init mtk_gpt_init(struct device_node *node) mtk_gpt_enable_irq(&to, TIMER_CLK_EVT); return 0; -err: - timer_of_cleanup(&to); - return ret; } TIMER_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_gpt_init); TIMER_OF_DECLARE(mtk_mt6765, "mediatek,mt6765-timer", mtk_syst_init); diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c index 06ed88a2a8a0d4944937be3884784e773237e732..6e2cb3693ed89272f48dcece6f87fe777bf736a4 100644 --- a/drivers/clocksource/timer-of.c +++ b/drivers/clocksource/timer-of.c @@ -199,7 +199,7 @@ int __init timer_of_init(struct device_node *np, struct timer_of *to) } if (!to->clkevt.name) - to->clkevt.name = np->name; + to->clkevt.name = np->full_name; to->np = np; diff --git a/drivers/clocksource/timer-oxnas-rps.c b/drivers/clocksource/timer-oxnas-rps.c index eed6feff8b5f23673de989932afcd806e858ecfc..30c6f4ce672b3b1ac16645159398c66e3129aae9 100644 --- a/drivers/clocksource/timer-oxnas-rps.c +++ b/drivers/clocksource/timer-oxnas-rps.c @@ -296,4 +296,4 @@ static int __init oxnas_rps_timer_init(struct device_node *np) TIMER_OF_DECLARE(ox810se_rps, "oxsemi,ox810se-rps-timer", oxnas_rps_timer_init); TIMER_OF_DECLARE(ox820_rps, - "oxsemi,ox820se-rps-timer", oxnas_rps_timer_init); + "oxsemi,ox820-rps-timer", oxnas_rps_timer_init); diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c index 4cce6b224b87c1104daecfe0f4769344853b8dbf..2e074c66818d67ec0ded259749a635360d0e9e54 100644 --- a/drivers/clocksource/timer-ti-dm.c +++ b/drivers/clocksource/timer-ti-dm.c @@ -154,6 +154,10 @@ static int omap_dm_timer_of_set_source(struct omap_dm_timer *timer) if (IS_ERR(parent)) return -ENODEV; + /* Bail out if both clocks point to fck */ + if (clk_is_match(parent, timer->fclk)) + return 0; + ret = clk_set_parent(timer->fclk, parent); if (ret < 0) pr_err("%s: failed to set parent\n", __func__); @@ -996,5 +1000,4 @@ module_platform_driver(omap_dm_timer_driver); MODULE_DESCRIPTION("OMAP Dual-Mode Timer Driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:" DRIVER_NAME); MODULE_AUTHOR("Texas Instruments Inc"); diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c index ed5e42461094476d76bcee450794ab81a7fe6d5a..ad48fd52cb5373d6758e5483d442138de7a18bf5 100644 --- a/drivers/connector/cn_proc.c +++ b/drivers/connector/cn_proc.c @@ -250,6 +250,7 @@ void proc_coredump_connector(struct task_struct *task) { struct cn_msg *msg; struct proc_event *ev; + struct task_struct *parent; __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); if (atomic_read(&proc_event_num_listeners) < 1) @@ -262,8 +263,14 @@ void proc_coredump_connector(struct task_struct *task) ev->what = PROC_EVENT_COREDUMP; ev->event_data.coredump.process_pid = task->pid; ev->event_data.coredump.process_tgid = task->tgid; - ev->event_data.coredump.parent_pid = task->real_parent->pid; - ev->event_data.coredump.parent_tgid = task->real_parent->tgid; + + rcu_read_lock(); + if (pid_alive(task)) { + parent = rcu_dereference(task->real_parent); + ev->event_data.coredump.parent_pid = parent->pid; + ev->event_data.coredump.parent_tgid = parent->tgid; + } + rcu_read_unlock(); memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); msg->ack = 0; /* not used */ @@ -276,6 +283,7 @@ void proc_exit_connector(struct task_struct *task) { struct cn_msg *msg; struct proc_event *ev; + struct task_struct *parent; __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); if (atomic_read(&proc_event_num_listeners) < 1) @@ -290,8 +298,14 @@ void proc_exit_connector(struct task_struct *task) ev->event_data.exit.process_tgid = task->tgid; ev->event_data.exit.exit_code = task->exit_code; ev->event_data.exit.exit_signal = task->exit_signal; - ev->event_data.exit.parent_pid = task->real_parent->pid; - ev->event_data.exit.parent_tgid = task->real_parent->tgid; + + rcu_read_lock(); + if (pid_alive(task)) { + parent = rcu_dereference(task->real_parent); + ev->event_data.exit.parent_pid = parent->pid; + ev->event_data.exit.parent_tgid = parent->tgid; + } + rcu_read_unlock(); memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); msg->ack = 0; /* not used */ diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index b61f4ec43e0685f6b1ea28035cff16a19a8247d9..ce0a51849f66a8bcb10523265f2bf420f16aa808 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -47,6 +47,7 @@ #include #include #include +#include MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); MODULE_DESCRIPTION("ACPI Processor P-States Driver"); @@ -61,6 +62,7 @@ enum { #define INTEL_MSR_RANGE (0xffff) #define AMD_MSR_RANGE (0x7) +#define HYGON_MSR_RANGE (0x7) #define MSR_K7_HWCR_CPB_DIS (1ULL << 25) @@ -92,9 +94,12 @@ static bool boost_state(unsigned int cpu) switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_INTEL: + case X86_VENDOR_CENTAUR: + case X86_VENDOR_ZHAOXIN: rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi); msr = lo | ((u64)hi << 32); return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE); + case X86_VENDOR_HYGON: case X86_VENDOR_AMD: rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi); msr = lo | ((u64)hi << 32); @@ -110,9 +115,12 @@ static int boost_set_msr(bool enable) switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_INTEL: + case X86_VENDOR_CENTAUR: + case X86_VENDOR_ZHAOXIN: msr_addr = MSR_IA32_MISC_ENABLE; msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE; break; + case X86_VENDOR_HYGON: case X86_VENDOR_AMD: msr_addr = MSR_K7_HWCR; msr_mask = MSR_K7_HWCR_CPB_DIS; @@ -225,6 +233,8 @@ static unsigned extract_msr(struct cpufreq_policy *policy, u32 msr) if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) msr &= AMD_MSR_RANGE; + else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + msr &= HYGON_MSR_RANGE; else msr &= INTEL_MSR_RANGE; @@ -911,8 +921,10 @@ static void __init acpi_cpufreq_boost_init(void) { int ret; - if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA))) + if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA))) { + pr_debug("Boost capabilities not present in the processor\n"); return; + } acpi_cpufreq_driver.set_boost = set_boost; acpi_cpufreq_driver.boost_enabled = boost_state(0); diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c index be926d9a66e574604758fb40300504bec9ca3039..0e55be99fadbc544ab6bd905579f41489bcd1a2f 100644 --- a/drivers/cpufreq/amd_freq_sensitivity.c +++ b/drivers/cpufreq/amd_freq_sensitivity.c @@ -21,6 +21,7 @@ #include #include +#include #include "cpufreq_ondemand.h" @@ -111,11 +112,16 @@ static int __init amd_freq_sensitivity_init(void) { u64 val; struct pci_dev *pcidev; + unsigned int pci_vendor; - if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + pci_vendor = PCI_VENDOR_ID_AMD; + else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + pci_vendor = PCI_VENDOR_ID_HYGON; + else return -ENODEV; - pcidev = pci_get_device(PCI_VENDOR_ID_AMD, + pcidev = pci_get_device(pci_vendor, PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, NULL); if (!pcidev) { diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c index 75491fc841a6b7e6b6a621e7bb3c45e215fd466b..0df16eb1eb3cebcac2953fa52f70bf13c1a6d0b7 100644 --- a/drivers/cpufreq/armada-37xx-cpufreq.c +++ b/drivers/cpufreq/armada-37xx-cpufreq.c @@ -359,11 +359,11 @@ static int __init armada37xx_cpufreq_driver_init(void) struct armada_37xx_dvfs *dvfs; struct platform_device *pdev; unsigned long freq; - unsigned int cur_frequency; + unsigned int cur_frequency, base_frequency; struct regmap *nb_pm_base, *avs_base; struct device *cpu_dev; int load_lvl, ret; - struct clk *clk; + struct clk *clk, *parent; nb_pm_base = syscon_regmap_lookup_by_compatible("marvell,armada-3700-nb-pm"); @@ -399,6 +399,22 @@ static int __init armada37xx_cpufreq_driver_init(void) return PTR_ERR(clk); } + parent = clk_get_parent(clk); + if (IS_ERR(parent)) { + dev_err(cpu_dev, "Cannot get parent clock for CPU0\n"); + clk_put(clk); + return PTR_ERR(parent); + } + + /* Get parent CPU frequency */ + base_frequency = clk_get_rate(parent); + + if (!base_frequency) { + dev_err(cpu_dev, "Failed to get parent clock rate for CPU\n"); + clk_put(clk); + return -EINVAL; + } + /* Get nominal (current) CPU frequency */ cur_frequency = clk_get_rate(clk); if (!cur_frequency) { @@ -431,7 +447,7 @@ static int __init armada37xx_cpufreq_driver_init(void) for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR; load_lvl++) { unsigned long u_volt = avs_map[dvfs->avs[load_lvl]] * 1000; - freq = cur_frequency / dvfs->divider[load_lvl]; + freq = base_frequency / dvfs->divider[load_lvl]; ret = dev_pm_opp_add(cpu_dev, freq, u_volt); if (ret) goto remove_opp; diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c index e6f9cbe5835f96883599d86006c54fd805f4e7e7..c28ec30604694f31f6a005bfed567f74aa7363c8 100644 --- a/drivers/cpufreq/brcmstb-avs-cpufreq.c +++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c @@ -453,6 +453,8 @@ static bool brcm_avs_is_firmware_loaded(struct private_data *priv) static unsigned int brcm_avs_cpufreq_get(unsigned int cpu) { struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); + if (!policy) + return 0; struct private_data *priv = policy->driver_data; return brcm_avs_get_frequency(priv->base); diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index 30f3021497304a5abc9868e54df2c41d17b093e3..0a245f1caa956d96f4fbd2f27fe43aa579f125bb 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -42,6 +42,70 @@ */ static struct cppc_cpudata **all_cpu_data; +struct cppc_workaround_oem_info { + char oem_id[ACPI_OEM_ID_SIZE + 1]; + char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; + u32 oem_revision; +}; + +static bool apply_hisi_workaround; + +static struct cppc_workaround_oem_info wa_info[] = { + { + .oem_id = "HISI ", + .oem_table_id = "HIP07 ", + .oem_revision = 0, + }, { + .oem_id = "HISI ", + .oem_table_id = "HIP08 ", + .oem_revision = 0, + } +}; + +static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu, + unsigned int perf); + +/* + * HISI platform does not support delivered performance counter and + * reference performance counter. It can calculate the performance using the + * platform specific mechanism. We reuse the desired performance register to + * store the real performance calculated by the platform. + */ +static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpunum) +{ + struct cppc_cpudata *cpudata = all_cpu_data[cpunum]; + u64 desired_perf; + int ret; + + ret = cppc_get_desired_perf(cpunum, &desired_perf); + if (ret < 0) + return -EIO; + + return cppc_cpufreq_perf_to_khz(cpudata, desired_perf); +} + +static void cppc_check_hisi_workaround(void) +{ + struct acpi_table_header *tbl; + acpi_status status = AE_OK; + int i; + + status = acpi_get_table(ACPI_SIG_PCCT, 0, &tbl); + if (ACPI_FAILURE(status) || !tbl) + return; + + for (i = 0; i < ARRAY_SIZE(wa_info); i++) { + if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) && + !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && + wa_info[i].oem_revision == tbl->oem_revision) { + apply_hisi_workaround = true; + break; + } + } + + acpi_put_table(tbl); +} + /* Callback function used to retrieve the max frequency from DMI */ static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private) { @@ -334,6 +398,9 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpunum) struct cppc_cpudata *cpu = all_cpu_data[cpunum]; int ret; + if (apply_hisi_workaround) + return hisi_cppc_cpufreq_get_rate(cpunum); + ret = cppc_get_perf_ctrs(cpunum, &fb_ctrs_t0); if (ret) return ret; @@ -386,6 +453,8 @@ static int __init cppc_cpufreq_init(void) goto out; } + cppc_check_hisi_workaround(); + ret = cpufreq_register_driver(&cppc_cpufreq_driver); if (ret) goto out; diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index 0a9ebf00be468e2d78aad853d1dcb7fd0d97558e..e58bfcb1169ebb0acc4d5da303b0754192196130 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -32,6 +32,7 @@ struct private_data { struct device *cpu_dev; struct thermal_cooling_device *cdev; const char *reg_name; + bool have_static_opps; }; static struct freq_attr *cpufreq_dt_attr[] = { @@ -204,6 +205,15 @@ static int cpufreq_init(struct cpufreq_policy *policy) } } + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + ret = -ENOMEM; + goto out_put_regulator; + } + + priv->reg_name = name; + priv->opp_table = opp_table; + /* * Initialize OPP tables for all policy->cpus. They will be shared by * all CPUs which have marked their CPUs shared with OPP bindings. @@ -214,7 +224,8 @@ static int cpufreq_init(struct cpufreq_policy *policy) * * OPPs might be populated at runtime, don't check for error here */ - dev_pm_opp_of_cpumask_add_table(policy->cpus); + if (!dev_pm_opp_of_cpumask_add_table(policy->cpus)) + priv->have_static_opps = true; /* * But we need OPP table to function so if it is not there let's @@ -240,19 +251,10 @@ static int cpufreq_init(struct cpufreq_policy *policy) __func__, ret); } - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) { - ret = -ENOMEM; - goto out_free_opp; - } - - priv->reg_name = name; - priv->opp_table = opp_table; - ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); if (ret) { dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); - goto out_free_priv; + goto out_free_opp; } priv->cpu_dev = cpu_dev; @@ -282,10 +284,11 @@ static int cpufreq_init(struct cpufreq_policy *policy) out_free_cpufreq_table: dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); -out_free_priv: - kfree(priv); out_free_opp: - dev_pm_opp_of_cpumask_remove_table(policy->cpus); + if (priv->have_static_opps) + dev_pm_opp_of_cpumask_remove_table(policy->cpus); + kfree(priv); +out_put_regulator: if (name) dev_pm_opp_put_regulators(opp_table); out_put_clk: @@ -300,7 +303,8 @@ static int cpufreq_exit(struct cpufreq_policy *policy) cpufreq_cooling_unregister(priv->cdev); dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); - dev_pm_opp_of_cpumask_remove_table(policy->related_cpus); + if (priv->have_static_opps) + dev_pm_opp_of_cpumask_remove_table(policy->related_cpus); if (priv->reg_name) dev_pm_opp_put_regulators(priv->opp_table); diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index f53fb41efb7bf7dceac8733f3d95c0da614a88dd..4bd9fc287c822d0096d0abe415b36650fabe96ce 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -545,13 +545,13 @@ EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us); * SYSFS INTERFACE * *********************************************************************/ static ssize_t show_boost(struct kobject *kobj, - struct attribute *attr, char *buf) + struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled); } -static ssize_t store_boost(struct kobject *kobj, struct attribute *attr, - const char *buf, size_t count) +static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) { int ret, enable; @@ -907,10 +907,14 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) { struct cpufreq_policy *policy = to_policy(kobj); struct freq_attr *fattr = to_attr(attr); - ssize_t ret; + ssize_t ret = -EBUSY; + + if (!fattr->show) + return -EIO; down_read(&policy->rwsem); - ret = fattr->show(policy, buf); + if (likely(!policy_is_inactive(policy))) + ret = fattr->show(policy, buf); up_read(&policy->rwsem); return ret; @@ -921,7 +925,10 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, { struct cpufreq_policy *policy = to_policy(kobj); struct freq_attr *fattr = to_attr(attr); - ssize_t ret = -EINVAL; + ssize_t ret = -EBUSY; + + if (!fattr->store) + return -EIO; /* * cpus_read_trylock() is used here to work around a circular lock @@ -932,7 +939,8 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, if (cpu_online(policy->cpu)) { down_write(&policy->rwsem); - ret = fattr->store(policy, buf, count); + if (likely(!policy_is_inactive(policy))) + ret = fattr->store(policy, buf, count); up_write(&policy->rwsem); } @@ -959,10 +967,9 @@ static struct kobj_type ktype_cpufreq = { .release = cpufreq_sysfs_release, }; -static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu) +static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu, + struct device *dev) { - struct device *dev = get_cpu_device(cpu); - if (!dev) return; @@ -1103,6 +1110,7 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu) cpufreq_global_kobject, "policy%u", cpu); if (ret) { pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret); + kobject_put(&policy->kobj); goto err_free_real_cpus; } @@ -1192,12 +1200,12 @@ static int cpufreq_online(unsigned int cpu) down_write(&policy->rwsem); policy->cpu = cpu; policy->governor = NULL; - up_write(&policy->rwsem); } else { new_policy = true; policy = cpufreq_policy_alloc(cpu); if (!policy) return -ENOMEM; + down_write(&policy->rwsem); } cpumask_copy(policy->cpus, cpumask_of(cpu)); @@ -1215,8 +1223,6 @@ static int cpufreq_online(unsigned int cpu) if (ret) goto out_exit_policy; - down_write(&policy->rwsem); - if (new_policy) { /* related_cpus should at least include policy->cpus. */ cpumask_copy(policy->related_cpus, policy->cpus); @@ -1234,7 +1240,7 @@ static int cpufreq_online(unsigned int cpu) for_each_cpu(j, policy->related_cpus) { per_cpu(cpufreq_cpu_data, j) = policy; - add_cpu_dev_symlink(policy, j); + add_cpu_dev_symlink(policy, j, get_cpu_device(j)); } } else { policy->min = policy->user_policy.min; @@ -1326,13 +1332,15 @@ static int cpufreq_online(unsigned int cpu) for_each_cpu(j, policy->real_cpus) remove_cpu_dev_symlink(policy, get_cpu_device(j)); - up_write(&policy->rwsem); + cpumask_clear(policy->cpus); out_exit_policy: if (cpufreq_driver->exit) cpufreq_driver->exit(policy); out_free_policy: + up_write(&policy->rwsem); + cpufreq_policy_free(policy); return ret; } @@ -1359,7 +1367,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) /* Create sysfs link on CPU registration */ policy = per_cpu(cpufreq_cpu_data, cpu); if (policy) - add_cpu_dev_symlink(policy, cpu); + add_cpu_dev_symlink(policy, cpu, dev); return 0; } @@ -1530,17 +1538,16 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy) { unsigned int ret_freq = 0; - if (!cpufreq_driver->get) + if (unlikely(policy_is_inactive(policy)) || !cpufreq_driver->get) return ret_freq; ret_freq = cpufreq_driver->get(policy->cpu); /* - * Updating inactive policies is invalid, so avoid doing that. Also - * if fast frequency switching is used with the given policy, the check + * If fast frequency switching is used with the given policy, the check * against policy->cur is pointless, so skip it in that case too. */ - if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled) + if (policy->fast_switch_enabled) return ret_freq; if (ret_freq && policy->cur && @@ -1569,10 +1576,7 @@ unsigned int cpufreq_get(unsigned int cpu) if (policy) { down_read(&policy->rwsem); - - if (!policy_is_inactive(policy)) - ret_freq = __cpufreq_get(policy); - + ret_freq = __cpufreq_get(policy); up_read(&policy->rwsem); cpufreq_cpu_put(policy); @@ -2477,6 +2481,13 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) if (cpufreq_disabled()) return -ENODEV; + /* + * The cpufreq core depends heavily on the availability of device + * structure, make sure they are available before proceeding further. + */ + if (!get_cpu_device(0)) + return -EPROBE_DEFER; + if (!driver_data || !driver_data->verify || !driver_data->init || !(driver_data->setpolicy || driver_data->target_index || driver_data->target) || @@ -2581,14 +2592,6 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver) } EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); -/* - * Stop cpufreq at shutdown to make sure it isn't holding any locks - * or mutexes when secondary CPUs are halted. - */ -static struct syscore_ops cpufreq_syscore_ops = { - .shutdown = cpufreq_suspend, -}; - struct kobject *cpufreq_global_kobject; EXPORT_SYMBOL(cpufreq_global_kobject); @@ -2600,8 +2603,6 @@ static int __init cpufreq_core_init(void) cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj); BUG_ON(!cpufreq_global_kobject); - register_syscore_ops(&cpufreq_syscore_ops); - return 0; } module_param(off, int, 0444); diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index f20f20a77d4d37cad47eea9673552478dd1ccf32..6649ed519f40358e920ec5fbf3eb57327f2fcf14 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -28,6 +28,7 @@ static inline struct cs_policy_dbs_info *to_dbs_info(struct policy_dbs_info *pol struct cs_dbs_tuners { unsigned int down_threshold; unsigned int freq_step; + unsigned int fast_mode; }; /* Conservative governor macros */ @@ -36,6 +37,8 @@ struct cs_dbs_tuners { #define DEF_FREQUENCY_STEP (5) #define DEF_SAMPLING_DOWN_FACTOR (1) #define MAX_SAMPLING_DOWN_FACTOR (10) +/* 50000 is the recommended hardware supported frequency step, 50MHz */ +#define RECOMMAND_FREQUENCY_STEP (50000) static inline unsigned int get_freq_step(struct cs_dbs_tuners *cs_tuners, struct cpufreq_policy *policy) @@ -49,6 +52,46 @@ static inline unsigned int get_freq_step(struct cs_dbs_tuners *cs_tuners, return freq_step; } +static unsigned int fast_dbs_update(struct cpufreq_policy *policy, const unsigned int load) +{ + struct policy_dbs_info *policy_dbs = policy->governor_data; + struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs); + unsigned int requested_freq = dbs_info->requested_freq; + struct dbs_data *dbs_data = policy_dbs->dbs_data; + unsigned int min_f, max_f; + + /* + * If requested_freq is out of range, it is likely that the limits + * changed in the meantime, so fall back to current frequency in that + * case. + */ + if (requested_freq > policy->max || requested_freq < policy->min) { + requested_freq = policy->cur; + dbs_info->requested_freq = requested_freq; + } + + /* Check for frequency increase */ + if (load > dbs_data->up_threshold) { + /* If switching to max speed, apply sampling_down_factor */ + if (policy->cur < policy->max) + policy_dbs->rate_mult = dbs_data->sampling_down_factor; + requested_freq = policy->max; + } else { + /* Calculate the next frequency proportional to load */ + min_f = policy->cpuinfo.min_freq; + max_f = policy->cpuinfo.max_freq; + /* The range of load is 0 ~ 100, divide by 100 for percentage */ + requested_freq = min_f + load * (max_f - min_f) / 100; + + /* No longer fully busy, reset rate_mult */ + policy_dbs->rate_mult = 1; + } + requested_freq = (requested_freq / RECOMMAND_FREQUENCY_STEP) * RECOMMAND_FREQUENCY_STEP; + __cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_C); + + return dbs_data->sampling_rate * policy_dbs->rate_mult; +} + /* * Every sampling_rate, we check, if current idle time is less than 20% * (default), then we try to increase frequency. Every sampling_rate * @@ -75,13 +118,19 @@ static unsigned int cs_dbs_update(struct cpufreq_policy *policy) if (cs_tuners->freq_step == 0) goto out; + /* If seek to reduce performance loss */ + if (cs_tuners->fast_mode == 1) + return fast_dbs_update(policy, load); + /* * If requested_freq is out of range, it is likely that the limits * changed in the meantime, so fall back to current frequency in that * case. */ - if (requested_freq > policy->max || requested_freq < policy->min) + if (requested_freq > policy->max || requested_freq < policy->min) { requested_freq = policy->cur; + dbs_info->requested_freq = requested_freq; + } freq_step = get_freq_step(cs_tuners, policy); @@ -92,7 +141,7 @@ static unsigned int cs_dbs_update(struct cpufreq_policy *policy) if (policy_dbs->idle_periods < UINT_MAX) { unsigned int freq_steps = policy_dbs->idle_periods * freq_step; - if (requested_freq > freq_steps) + if (requested_freq > policy->min + freq_steps) requested_freq -= freq_steps; else requested_freq = policy->min; @@ -242,12 +291,30 @@ static ssize_t store_freq_step(struct gov_attr_set *attr_set, const char *buf, return count; } +static ssize_t store_fast_mode(struct gov_attr_set *attr_set, + const char *buf, size_t count) +{ + struct dbs_data *dbs_data = to_dbs_data(attr_set); + struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; + unsigned int input; + int ret; + + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || (input != 0 && input != 1)) + return -EINVAL; + + cs_tuners->fast_mode = input; + return count; +} + gov_show_one_common(sampling_rate); gov_show_one_common(sampling_down_factor); gov_show_one_common(up_threshold); gov_show_one_common(ignore_nice_load); gov_show_one(cs, down_threshold); gov_show_one(cs, freq_step); +gov_show_one(cs, fast_mode); gov_attr_rw(sampling_rate); gov_attr_rw(sampling_down_factor); @@ -255,6 +322,7 @@ gov_attr_rw(up_threshold); gov_attr_rw(ignore_nice_load); gov_attr_rw(down_threshold); gov_attr_rw(freq_step); +gov_attr_rw(fast_mode); static struct attribute *cs_attributes[] = { &sampling_rate.attr, @@ -263,6 +331,7 @@ static struct attribute *cs_attributes[] = { &down_threshold.attr, &ignore_nice_load.attr, &freq_step.attr, + &fast_mode.attr, NULL }; @@ -291,6 +360,7 @@ static int cs_init(struct dbs_data *dbs_data) tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD; tuners->freq_step = DEF_FREQUENCY_STEP; + tuners->fast_mode = 0; dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; dbs_data->ignore_nice_load = 0; diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 6d53f7d9fc7a92d415b507f9b8facdb3f4143b17..e5b01abd1da91863ad4147aa3f36747318d958e3 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -114,12 +114,23 @@ void gov_update_cpu_data(struct dbs_data *dbs_data) } EXPORT_SYMBOL_GPL(gov_update_cpu_data); +struct cpumask cpus_for_counting_load; +bool powersave_first; +EXPORT_SYMBOL_GPL(powersave_first); + +static int __init enable_powersave_first(char *str) +{ + powersave_first = true; + return 0; +} +early_param("powersave_first_for_ondemand", enable_powersave_first); + unsigned int dbs_update(struct cpufreq_policy *policy) { struct policy_dbs_info *policy_dbs = policy->governor_data; struct dbs_data *dbs_data = policy_dbs->dbs_data; unsigned int ignore_nice = dbs_data->ignore_nice_load; - unsigned int max_load = 0, idle_periods = UINT_MAX; + unsigned int max_load = 0, idle_periods = UINT_MAX, total_load = 0; unsigned int sampling_rate, io_busy, j; /* @@ -136,8 +147,13 @@ unsigned int dbs_update(struct cpufreq_policy *policy) */ io_busy = dbs_data->io_is_busy; + if (unlikely(powersave_first)) + cpumask_copy(&cpus_for_counting_load, cpu_online_mask); + else + cpumask_copy(&cpus_for_counting_load, policy->cpus); + /* Get Absolute Load */ - for_each_cpu(j, policy->cpus) { + for_each_cpu(j, &cpus_for_counting_load) { struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j); u64 update_time, cur_idle_time; unsigned int idle_time, time_elapsed; @@ -222,13 +238,18 @@ unsigned int dbs_update(struct cpufreq_policy *policy) idle_periods = periods; } + total_load += load; + if (load > max_load) max_load = load; } policy_dbs->idle_periods = idle_periods; - return max_load; + if (unlikely(powersave_first)) + return total_load / cpumask_weight(cpu_online_mask); + else + return max_load; } EXPORT_SYMBOL_GPL(dbs_update); @@ -459,6 +480,8 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy) /* Failure, so roll back. */ pr_err("initialization failed (dbs_data kobject init error %d)\n", ret); + kobject_put(&dbs_data->attr_set.kobj); + policy->governor_data = NULL; if (!have_governor_per_policy()) diff --git a/drivers/cpufreq/cpufreq_governor_attr_set.c b/drivers/cpufreq/cpufreq_governor_attr_set.c index 52841f807a7eb329d9ce8a976dca405d1b29bbba..45fdf30cade395fe41256c442ac0846c97908e6f 100644 --- a/drivers/cpufreq/cpufreq_governor_attr_set.c +++ b/drivers/cpufreq/cpufreq_governor_attr_set.c @@ -77,8 +77,8 @@ unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *l if (count) return count; - kobject_put(&attr_set->kobj); mutex_destroy(&attr_set->update_lock); + kobject_put(&attr_set->kobj); return 0; } EXPORT_SYMBOL_GPL(gov_attr_set_put); diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 6b423eebfd5dbefccc2f20e52825271d0b2a7114..c695d4f489818158757cad92660f89bc2b641f92 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -129,6 +129,56 @@ static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq) CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); } +/* the value is in kHz */ +#define CPU_FREQ_400M (400*1000) +#define CPU_FREQ_600M (600*1000) +#define CPU_FREQ_800M (800*1000) +#define CPU_FREQ_900M (900*1000) + +static unsigned int get_next_powersave_first_freq(unsigned int load, + struct cpufreq_policy *policy) +{ + unsigned int freq_next, min_f, max_f; + static unsigned int freq_pre; + static bool flag; + + min_f = policy->cpuinfo.min_freq; + max_f = policy->cpuinfo.max_freq; + + if (load <= 40) { + /* next freq will maintain 90% cpu load */ + freq_next = (load * policy->cur / 100) * 10 / 9; + if (freq_next < CPU_FREQ_400M) + freq_next = CPU_FREQ_400M; + + freq_pre = freq_next; + flag = true; + return freq_next; + } + + if (flag && load <= 90) { + freq_next = freq_pre; + } else { + if (load > 95) + freq_next = min_f + load * (max_f - min_f) / 100; + else if (load > 90 && (policy->cur >= CPU_FREQ_400M + && policy->cur < CPU_FREQ_600M)) + freq_next = CPU_FREQ_600M; + else if (load > 90 && (policy->cur >= CPU_FREQ_600M + && policy->cur < CPU_FREQ_800M)) + freq_next = CPU_FREQ_800M; + else if (load > 90 && (policy->cur >= CPU_FREQ_800M + && policy->cur < CPU_FREQ_900M)) + freq_next = CPU_FREQ_900M; + else + freq_next = min_f + load * (max_f - min_f) / 100; + + flag = false; + } + + return freq_next; +} + /* * Every sampling_rate, we check, if current idle time is less than 20% * (default), then we try to increase frequency. Else, we adjust the frequency @@ -153,10 +203,15 @@ static void od_update(struct cpufreq_policy *policy) } else { /* Calculate the next frequency proportional to load */ unsigned int freq_next, min_f, max_f; - - min_f = policy->cpuinfo.min_freq; - max_f = policy->cpuinfo.max_freq; - freq_next = min_f + load * (max_f - min_f) / 100; + extern bool powersave_first; + + if (likely(!powersave_first)) { + min_f = policy->cpuinfo.min_freq; + max_f = policy->cpuinfo.max_freq; + freq_next = min_f + load * (max_f - min_f) / 100; + } else { + freq_next = get_next_powersave_first_freq(load, policy); + } /* No longer fully busy, reset rate_mult */ policy_dbs->rate_mult = 1; diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c index b2ff423ad7f82f6252d2c3ff7a0a38811ca2449b..d8c3595e90236e5f9d87ca9b5f55a7cbdb76ccdc 100644 --- a/drivers/cpufreq/imx6q-cpufreq.c +++ b/drivers/cpufreq/imx6q-cpufreq.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -159,8 +160,13 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index) /* Ensure the arm clock divider is what we expect */ ret = clk_set_rate(clks[ARM].clk, new_freq * 1000); if (ret) { + int ret1; + dev_err(cpu_dev, "failed to set clock rate: %d\n", ret); - regulator_set_voltage_tol(arm_reg, volt_old, 0); + ret1 = regulator_set_voltage_tol(arm_reg, volt_old, 0); + if (ret1) + dev_warn(cpu_dev, + "failed to restore vddarm voltage: %d\n", ret1); return ret; } @@ -290,20 +296,32 @@ static void imx6q_opp_check_speed_grading(struct device *dev) #define OCOTP_CFG3_6ULL_SPEED_792MHZ 0x2 #define OCOTP_CFG3_6ULL_SPEED_900MHZ 0x3 -static void imx6ul_opp_check_speed_grading(struct device *dev) +static int imx6ul_opp_check_speed_grading(struct device *dev) { - struct device_node *np; - void __iomem *base; u32 val; + int ret = 0; - np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-ocotp"); - if (!np) - return; + if (of_find_property(dev->of_node, "nvmem-cells", NULL)) { + ret = nvmem_cell_read_u32(dev, "speed_grade", &val); + if (ret) + return ret; + } else { + struct device_node *np; + void __iomem *base; + + np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-ocotp"); + if (!np) + return -ENOENT; + + base = of_iomap(np, 0); + of_node_put(np); + if (!base) { + dev_err(dev, "failed to map ocotp\n"); + return -EFAULT; + } - base = of_iomap(np, 0); - if (!base) { - dev_err(dev, "failed to map ocotp\n"); - goto put_node; + val = readl_relaxed(base + OCOTP_CFG3); + iounmap(base); } /* @@ -314,7 +332,6 @@ static void imx6ul_opp_check_speed_grading(struct device *dev) * 2b'11: 900000000Hz on i.MX6ULL only; * We need to set the max speed of ARM according to fuse map. */ - val = readl_relaxed(base + OCOTP_CFG3); val >>= OCOTP_CFG3_SPEED_SHIFT; val &= 0x3; @@ -334,9 +351,7 @@ static void imx6ul_opp_check_speed_grading(struct device *dev) dev_warn(dev, "failed to disable 900MHz OPP\n"); } - iounmap(base); -put_node: - of_node_put(np); + return ret; } static int imx6q_cpufreq_probe(struct platform_device *pdev) @@ -394,10 +409,18 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev) } if (of_machine_is_compatible("fsl,imx6ul") || - of_machine_is_compatible("fsl,imx6ull")) - imx6ul_opp_check_speed_grading(cpu_dev); - else + of_machine_is_compatible("fsl,imx6ull")) { + ret = imx6ul_opp_check_speed_grading(cpu_dev); + if (ret == -EPROBE_DEFER) + return ret; + if (ret) { + dev_err(cpu_dev, "failed to read ocotp: %d\n", + ret); + return ret; + } + } else { imx6q_opp_check_speed_grading(cpu_dev); + } /* Because we have added the OPPs here, we must free them */ free_opp = true; diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index b6a1aadaff9f353611c3c64c712aa0300626a5af..96e2a0875798e4e97e3f0b58b227856d944d2259 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -373,11 +373,27 @@ static void intel_pstate_set_itmt_prio(int cpu) } } } -#else + +static int intel_pstate_get_cppc_guranteed(int cpu) +{ + struct cppc_perf_caps cppc_perf; + int ret; + + ret = cppc_get_perf_caps(cpu, &cppc_perf); + if (ret) + return ret; + + if (cppc_perf.guaranteed_perf) + return cppc_perf.guaranteed_perf; + + return cppc_perf.nominal_perf; +} + +#else /* CONFIG_ACPI_CPPC_LIB */ static void intel_pstate_set_itmt_prio(int cpu) { } -#endif +#endif /* CONFIG_ACPI_CPPC_LIB */ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) { @@ -459,7 +475,7 @@ static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) acpi_processor_unregister_performance(policy->cpu); } -#else +#else /* CONFIG_ACPI */ static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) { } @@ -472,7 +488,14 @@ static inline bool intel_pstate_acpi_pm_profile_server(void) { return false; } -#endif +#endif /* CONFIG_ACPI */ + +#ifndef CONFIG_ACPI_CPPC_LIB +static int intel_pstate_get_cppc_guranteed(int cpu) +{ + return -ENOTSUPP; +} +#endif /* CONFIG_ACPI_CPPC_LIB */ static inline void update_turbo_state(void) { @@ -699,9 +722,29 @@ static ssize_t show_energy_performance_preference( cpufreq_freq_attr_rw(energy_performance_preference); +static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf) +{ + struct cpudata *cpu; + u64 cap; + int ratio; + + ratio = intel_pstate_get_cppc_guranteed(policy->cpu); + if (ratio <= 0) { + rdmsrl_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap); + ratio = HWP_GUARANTEED_PERF(cap); + } + + cpu = all_cpu_data[policy->cpu]; + + return sprintf(buf, "%d\n", ratio * cpu->pstate.scaling); +} + +cpufreq_freq_attr_ro(base_frequency); + static struct freq_attr *hwp_cpufreq_attrs[] = { &energy_performance_preference, &energy_performance_available_preferences, + &base_frequency, NULL, }; @@ -833,7 +876,7 @@ static void intel_pstate_update_policies(void) /************************** sysfs begin ************************/ #define show_one(file_name, object) \ static ssize_t show_##file_name \ - (struct kobject *kobj, struct attribute *attr, char *buf) \ + (struct kobject *kobj, struct kobj_attribute *attr, char *buf) \ { \ return sprintf(buf, "%u\n", global.object); \ } @@ -842,7 +885,7 @@ static ssize_t intel_pstate_show_status(char *buf); static int intel_pstate_update_status(const char *buf, size_t size); static ssize_t show_status(struct kobject *kobj, - struct attribute *attr, char *buf) + struct kobj_attribute *attr, char *buf) { ssize_t ret; @@ -853,7 +896,7 @@ static ssize_t show_status(struct kobject *kobj, return ret; } -static ssize_t store_status(struct kobject *a, struct attribute *b, +static ssize_t store_status(struct kobject *a, struct kobj_attribute *b, const char *buf, size_t count) { char *p = memchr(buf, '\n', count); @@ -867,7 +910,7 @@ static ssize_t store_status(struct kobject *a, struct attribute *b, } static ssize_t show_turbo_pct(struct kobject *kobj, - struct attribute *attr, char *buf) + struct kobj_attribute *attr, char *buf) { struct cpudata *cpu; int total, no_turbo, turbo_pct; @@ -893,7 +936,7 @@ static ssize_t show_turbo_pct(struct kobject *kobj, } static ssize_t show_num_pstates(struct kobject *kobj, - struct attribute *attr, char *buf) + struct kobj_attribute *attr, char *buf) { struct cpudata *cpu; int total; @@ -914,7 +957,7 @@ static ssize_t show_num_pstates(struct kobject *kobj, } static ssize_t show_no_turbo(struct kobject *kobj, - struct attribute *attr, char *buf) + struct kobj_attribute *attr, char *buf) { ssize_t ret; @@ -936,7 +979,7 @@ static ssize_t show_no_turbo(struct kobject *kobj, return ret; } -static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, +static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, const char *buf, size_t count) { unsigned int input; @@ -983,7 +1026,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, return count; } -static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, +static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b, const char *buf, size_t count) { unsigned int input; @@ -1013,7 +1056,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, return count; } -static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, +static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b, const char *buf, size_t count) { unsigned int input; @@ -1045,12 +1088,13 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, } static ssize_t show_hwp_dynamic_boost(struct kobject *kobj, - struct attribute *attr, char *buf) + struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u\n", hwp_boost); } -static ssize_t store_hwp_dynamic_boost(struct kobject *a, struct attribute *b, +static ssize_t store_hwp_dynamic_boost(struct kobject *a, + struct kobj_attribute *b, const char *buf, size_t count) { unsigned int input; @@ -1778,7 +1822,7 @@ static const struct pstate_funcs knl_funcs = { static const struct x86_cpu_id intel_pstate_cpu_ids[] = { ICPU(INTEL_FAM6_SANDYBRIDGE, core_funcs), ICPU(INTEL_FAM6_SANDYBRIDGE_X, core_funcs), - ICPU(INTEL_FAM6_ATOM_SILVERMONT1, silvermont_funcs), + ICPU(INTEL_FAM6_ATOM_SILVERMONT, silvermont_funcs), ICPU(INTEL_FAM6_IVYBRIDGE, core_funcs), ICPU(INTEL_FAM6_HASWELL_CORE, core_funcs), ICPU(INTEL_FAM6_BROADWELL_CORE, core_funcs), @@ -1795,7 +1839,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = { ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_funcs), ICPU(INTEL_FAM6_XEON_PHI_KNM, knl_funcs), ICPU(INTEL_FAM6_ATOM_GOLDMONT, core_funcs), - ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE, core_funcs), + ICPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS, core_funcs), ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs), {} }; diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c index c2dd43f3f5d8a3092e6847f18d124f0631bdf065..8d63a6dc8383ce2964e31a53070d4bb0a1a5fcf8 100644 --- a/drivers/cpufreq/kirkwood-cpufreq.c +++ b/drivers/cpufreq/kirkwood-cpufreq.c @@ -124,13 +124,14 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev) priv.cpu_clk = of_clk_get_by_name(np, "cpu_clk"); if (IS_ERR(priv.cpu_clk)) { dev_err(priv.dev, "Unable to get cpuclk\n"); - return PTR_ERR(priv.cpu_clk); + err = PTR_ERR(priv.cpu_clk); + goto out_node; } err = clk_prepare_enable(priv.cpu_clk); if (err) { dev_err(priv.dev, "Unable to prepare cpuclk\n"); - return err; + goto out_node; } kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000; @@ -161,20 +162,22 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev) goto out_ddr; } - of_node_put(np); - np = NULL; - err = cpufreq_register_driver(&kirkwood_cpufreq_driver); - if (!err) - return 0; + if (err) { + dev_err(priv.dev, "Failed to register cpufreq driver\n"); + goto out_powersave; + } - dev_err(priv.dev, "Failed to register cpufreq driver\n"); + of_node_put(np); + return 0; +out_powersave: clk_disable_unprepare(priv.powersave_clk); out_ddr: clk_disable_unprepare(priv.ddr_clk); out_cpu: clk_disable_unprepare(priv.cpu_clk); +out_node: of_node_put(np); return err; diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c index 75dfbd2a58ea6f1df8db4e89a8d47883e29ecfea..a0620c9ec064957c963f31a665a92106def6c929 100644 --- a/drivers/cpufreq/pasemi-cpufreq.c +++ b/drivers/cpufreq/pasemi-cpufreq.c @@ -145,10 +145,19 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy) int err = -ENODEV; cpu = of_get_cpu_node(policy->cpu, NULL); - if (!cpu) goto out; + max_freqp = of_get_property(cpu, "clock-frequency", NULL); + of_node_put(cpu); + if (!max_freqp) { + err = -EINVAL; + goto out; + } + + /* we need the freq in kHz */ + max_freq = *max_freqp / 1000; + dn = of_find_compatible_node(NULL, NULL, "1682m-sdc"); if (!dn) dn = of_find_compatible_node(NULL, NULL, @@ -184,16 +193,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy) } pr_debug("init cpufreq on CPU %d\n", policy->cpu); - - max_freqp = of_get_property(cpu, "clock-frequency", NULL); - if (!max_freqp) { - err = -EINVAL; - goto out_unmap_sdcpwr; - } - - /* we need the freq in kHz */ - max_freq = *max_freqp / 1000; - pr_debug("max clock-frequency is at %u kHz\n", max_freq); pr_debug("initializing frequency table\n"); @@ -211,9 +210,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy) return cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency()); -out_unmap_sdcpwr: - iounmap(sdcpwr_mapbase); - out_unmap_sdcasr: iounmap(sdcasr_mapbase); out: diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c index 61ae06ca008e746ac7d166f502ac212ab86148fc..e225edb5c35934ada65cd17d6f046e727ae1fdaa 100644 --- a/drivers/cpufreq/pmac32-cpufreq.c +++ b/drivers/cpufreq/pmac32-cpufreq.c @@ -552,6 +552,7 @@ static int pmac_cpufreq_init_7447A(struct device_node *cpunode) volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select"); if (volt_gpio_np) voltage_gpio = read_gpio(volt_gpio_np); + of_node_put(volt_gpio_np); if (!voltage_gpio){ pr_err("missing cpu-vcore-select gpio\n"); return 1; @@ -588,6 +589,7 @@ static int pmac_cpufreq_init_750FX(struct device_node *cpunode) if (volt_gpio_np) voltage_gpio = read_gpio(volt_gpio_np); + of_node_put(volt_gpio_np); pvr = mfspr(SPRN_PVR); has_cpu_l2lve = !((pvr & 0xf00) == 0x100); diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index bf6519cf64bc2e040b6e004f1771e47e5691c7e0..5fff39dae625777b25663f06c5183289ef4cd8c2 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c @@ -1042,9 +1042,14 @@ static struct cpufreq_driver powernv_cpufreq_driver = { static int init_chip_info(void) { - unsigned int chip[256]; + unsigned int *chip; unsigned int cpu, i; unsigned int prev_chip_id = UINT_MAX; + int ret = 0; + + chip = kcalloc(num_possible_cpus(), sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; for_each_possible_cpu(cpu) { unsigned int id = cpu_to_chip_id(cpu); @@ -1056,8 +1061,10 @@ static int init_chip_info(void) } chips = kcalloc(nr_chips, sizeof(struct chip), GFP_KERNEL); - if (!chips) - return -ENOMEM; + if (!chips) { + ret = -ENOMEM; + goto free_and_return; + } for (i = 0; i < nr_chips; i++) { chips[i].id = chip[i]; @@ -1067,7 +1074,9 @@ static int init_chip_info(void) per_cpu(chip_info, cpu) = &chips[i]; } - return 0; +free_and_return: + kfree(chip); + return ret; } static inline void clean_chip_info(void) diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c index 41a0f0be3f9ff12cb38211b3b0eae5ec56880892..8414c3a4ea08cdef5d58c7e4e522e8bba9a23d70 100644 --- a/drivers/cpufreq/ppc_cbe_cpufreq.c +++ b/drivers/cpufreq/ppc_cbe_cpufreq.c @@ -86,6 +86,7 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy) if (!cbe_get_cpu_pmd_regs(policy->cpu) || !cbe_get_cpu_mic_tm_regs(policy->cpu)) { pr_info("invalid CBE regs pointers for cpufreq\n"); + of_node_put(cpu); return -EINVAL; } diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c index 46254e5839829cd25971efe1b8d91879e9bb3fc7..74e0e0c20c4625b9baea26d8aa29408db3a2ddcc 100644 --- a/drivers/cpufreq/pxa2xx-cpufreq.c +++ b/drivers/cpufreq/pxa2xx-cpufreq.c @@ -143,7 +143,7 @@ static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq) return ret; } -static void __init pxa_cpufreq_init_voltages(void) +static void pxa_cpufreq_init_voltages(void) { vcc_core = regulator_get(NULL, "vcc_core"); if (IS_ERR(vcc_core)) { @@ -159,7 +159,7 @@ static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq) return 0; } -static void __init pxa_cpufreq_init_voltages(void) { } +static void pxa_cpufreq_init_voltages(void) { } #endif static void find_freq_tables(struct cpufreq_frequency_table **freq_table, diff --git a/drivers/cpufreq/qcom-cpufreq-kryo.c b/drivers/cpufreq/qcom-cpufreq-kryo.c index 2a3675c24032bc8059c4c591698d6a7b5218cf1d..a472b814058f7649d4ae5049b38f19e1b285f6cb 100644 --- a/drivers/cpufreq/qcom-cpufreq-kryo.c +++ b/drivers/cpufreq/qcom-cpufreq-kryo.c @@ -75,7 +75,7 @@ static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void) static int qcom_cpufreq_kryo_probe(struct platform_device *pdev) { - struct opp_table *opp_tables[NR_CPUS] = {0}; + struct opp_table **opp_tables; enum _msm8996_version msm8996_version; struct nvmem_cell *speedbin_nvmem; struct device_node *np; @@ -133,6 +133,10 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev) } kfree(speedbin); + opp_tables = kcalloc(num_possible_cpus(), sizeof(*opp_tables), GFP_KERNEL); + if (!opp_tables) + return -ENOMEM; + for_each_possible_cpu(cpu) { cpu_dev = get_cpu_device(cpu); if (NULL == cpu_dev) { @@ -151,8 +155,10 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev) cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1, NULL, 0); - if (!IS_ERR(cpufreq_dt_pdev)) + if (!IS_ERR(cpufreq_dt_pdev)) { + platform_set_drvdata(pdev, opp_tables); return 0; + } ret = PTR_ERR(cpufreq_dt_pdev); dev_err(cpu_dev, "Failed to register platform device\n"); @@ -163,13 +169,23 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev) break; dev_pm_opp_put_supported_hw(opp_tables[cpu]); } + kfree(opp_tables); return ret; } static int qcom_cpufreq_kryo_remove(struct platform_device *pdev) { + struct opp_table **opp_tables = platform_get_drvdata(pdev); + unsigned int cpu; + platform_device_unregister(cpufreq_dt_pdev); + + for_each_possible_cpu(cpu) + dev_pm_opp_put_supported_hw(opp_tables[cpu]); + + kfree(opp_tables); + return 0; } diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index 50b1551ba8942d43d0a3f28824c83d0e0b837b26..3f06934394869bb9f7756a2bf2ddaef5f470fbad 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -52,9 +52,9 @@ scmi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) int ret; struct scmi_data *priv = policy->driver_data; struct scmi_perf_ops *perf_ops = handle->perf_ops; - u64 freq = policy->freq_table[index].frequency * 1000; + u64 freq = policy->freq_table[index].frequency; - ret = perf_ops->freq_set(handle, priv->domain_id, freq, false); + ret = perf_ops->freq_set(handle, priv->domain_id, freq * 1000, false); if (!ret) arch_set_freq_scale(policy->related_cpus, freq, policy->cpuinfo.max_freq); diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c index 43530254201a8b3a5f98fdcb032ea6c3b635bb96..4bb154f6c54cdcef9ca19ed3a071e1a2f674b474 100644 --- a/drivers/cpufreq/tegra124-cpufreq.c +++ b/drivers/cpufreq/tegra124-cpufreq.c @@ -134,6 +134,8 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev) platform_set_drvdata(pdev, priv); + of_node_put(np); + return 0; out_switch_to_pllx: diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c index 3f0e2a14895a03dc0c4bc1d01c032d0f50a4b4ae..22b53bf268179ad8c2e873dd9637af61adf2f54d 100644 --- a/drivers/cpufreq/ti-cpufreq.c +++ b/drivers/cpufreq/ti-cpufreq.c @@ -201,19 +201,28 @@ static const struct of_device_id ti_cpufreq_of_match[] = { {}, }; +static const struct of_device_id *ti_cpufreq_match_node(void) +{ + struct device_node *np; + const struct of_device_id *match; + + np = of_find_node_by_path("/"); + match = of_match_node(ti_cpufreq_of_match, np); + of_node_put(np); + + return match; +} + static int ti_cpufreq_probe(struct platform_device *pdev) { u32 version[VERSION_COUNT]; - struct device_node *np; const struct of_device_id *match; struct opp_table *ti_opp_table; struct ti_cpufreq_data *opp_data; const char * const reg_names[] = {"vdd", "vbb"}; int ret; - np = of_find_node_by_path("/"); - match = of_match_node(ti_cpufreq_of_match, np); - of_node_put(np); + match = dev_get_platdata(&pdev->dev); if (!match) return -ENODEV; @@ -290,7 +299,14 @@ static int ti_cpufreq_probe(struct platform_device *pdev) static int ti_cpufreq_init(void) { - platform_device_register_simple("ti-cpufreq", -1, NULL, 0); + const struct of_device_id *match; + + /* Check to ensure we are on a compatible platform */ + match = ti_cpufreq_match_node(); + if (match) + platform_device_register_data(NULL, "ti-cpufreq", -1, match, + sizeof(*match)); + return 0; } module_init(ti_cpufreq_init); diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig index 7e48eb5bf0a7a1aebb958157c38a0d6b1228a437..b379635f193679f19b2b82ec9eeaf7b0b83835ae 100644 --- a/drivers/cpuidle/Kconfig +++ b/drivers/cpuidle/Kconfig @@ -23,6 +23,17 @@ config CPU_IDLE_GOV_LADDER config CPU_IDLE_GOV_MENU bool "Menu governor (for tickless system)" +config CPU_IDLE_GOV_HALTPOLL + bool "Haltpoll governor (for virtualized systems)" + depends on KVM_GUEST || ARM64 + help + This governor implements haltpoll idle state selection, to be + used in conjunction with the haltpoll cpuidle driver, allowing + for polling for a certain amount of time before entering idle + state. + + Some virtualized workloads benefit from using it. + config DT_IDLE_STATES bool @@ -41,6 +52,15 @@ depends on PPC source "drivers/cpuidle/Kconfig.powerpc" endmenu +config HALTPOLL_CPUIDLE + tristate "Halt poll cpuidle driver" + depends on (X86 && KVM_GUEST) || ARM64 + default y + help + This option enables halt poll cpuidle driver, which allows to poll + before halting in the guest (more efficient than polling in the + host via halt_poll_ns for some scenarios). + endif config ARCH_NEEDS_CPU_IDLE_COUPLED diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile index 9d7176cee3d376195a6bbe7ebbe67956e4a5ca52..240227474cd987a07f3547fa755fcbbcfba1ac40 100644 --- a/drivers/cpuidle/Makefile +++ b/drivers/cpuidle/Makefile @@ -7,6 +7,7 @@ obj-y += cpuidle.o driver.o governor.o sysfs.o governors/ obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o obj-$(CONFIG_DT_IDLE_STATES) += dt_idle_states.o obj-$(CONFIG_ARCH_HAS_CPU_RELAX) += poll_state.o +obj-$(CONFIG_HALTPOLL_CPUIDLE) += cpuidle-haltpoll.o ################################################################################## # ARM SoC drivers diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c index 073557f433eb1be630a7f64b8cfc9930771eff2a..df564d78321617670b947eafe81aa76050db7e6f 100644 --- a/drivers/cpuidle/cpuidle-arm.c +++ b/drivers/cpuidle/cpuidle-arm.c @@ -103,13 +103,6 @@ static int __init arm_idle_init_cpu(int cpu) goto out_kfree_drv; } - ret = cpuidle_register_driver(drv); - if (ret) { - if (ret != -EBUSY) - pr_err("Failed to register cpuidle driver\n"); - goto out_kfree_drv; - } - /* * Call arch CPU operations in order to initialize * idle states suspend back-end specific data @@ -117,15 +110,20 @@ static int __init arm_idle_init_cpu(int cpu) ret = arm_cpuidle_init(cpu); /* - * Skip the cpuidle device initialization if the reported + * Allow the initialization to continue for other CPUs, if the reported * failure is a HW misconfiguration/breakage (-ENXIO). */ - if (ret == -ENXIO) - return 0; - if (ret) { pr_err("CPU %d failed to init idle CPU ops\n", cpu); - goto out_unregister_drv; + ret = ret == -ENXIO ? 0 : ret; + goto out_kfree_drv; + } + + ret = cpuidle_register_driver(drv); + if (ret) { + if (ret != -EBUSY) + pr_err("Failed to register cpuidle driver\n"); + goto out_kfree_drv; } dev = kzalloc(sizeof(*dev), GFP_KERNEL); diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c index db2ede565f1aab2228d818d38ee41a8a358a3fc8..b44476a1b7ad8a8806e63f48897b383e736d1677 100644 --- a/drivers/cpuidle/cpuidle-big_little.c +++ b/drivers/cpuidle/cpuidle-big_little.c @@ -167,6 +167,7 @@ static int __init bl_idle_init(void) { int ret; struct device_node *root = of_find_node_by_path("/"); + const struct of_device_id *match_id; if (!root) return -ENODEV; @@ -174,7 +175,11 @@ static int __init bl_idle_init(void) /* * Initialize the driver just for a compliant set of machines */ - if (!of_match_node(compatible_machine_match, root)) + match_id = of_match_node(compatible_machine_match, root); + + of_node_put(root); + + if (!match_id) return -ENODEV; if (!mcpm_is_available()) diff --git a/drivers/cpuidle/cpuidle-haltpoll.c b/drivers/cpuidle/cpuidle-haltpoll.c new file mode 100644 index 0000000000000000000000000000000000000000..ae4f06f0507987c5db02a454b7886e764ef82806 --- /dev/null +++ b/drivers/cpuidle/cpuidle-haltpoll.c @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * cpuidle driver for haltpoll governor. + * + * Copyright 2019 Red Hat, Inc. and/or its affiliates. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Authors: Marcelo Tosatti + */ + +#include +#include +#include +#include +#include +#include +#include + +static bool force __read_mostly; +module_param(force, bool, 0444); +MODULE_PARM_DESC(force, "Load unconditionally"); + +static struct cpuidle_device_wrapper __percpu *haltpoll_cpuidle_dev_wrap; +static enum cpuhp_state haltpoll_hp_state; + +static int default_enter_idle(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + if (current_clr_polling_and_test()) { + local_irq_enable(); + return index; + } + arch_cpu_idle(); + return index; +} + +static struct cpuidle_driver_wrapper haltpoll_driver_wrapper = { + .drv = { + .name = "haltpoll", + .states = { + { /* entry 0 is for polling */ }, + { + .enter = default_enter_idle, + .exit_latency = 1, + .target_residency = 1, + .power_usage = -1, + .name = "haltpoll idle", + .desc = + "default architecture idle", + }, + }, + .safe_state_index = 0, + .state_count = 2, + }, + .governor = "haltpoll", +}; + +static int haltpoll_cpu_online(unsigned int cpu) +{ + struct cpuidle_device *dev; + struct cpuidle_device_wrapper *devw; + + devw = per_cpu_ptr(haltpoll_cpuidle_dev_wrap, cpu); + dev = &(devw->dev); + if (!dev->registered) { + dev->cpu = cpu; + if (cpuidle_register_device(dev)) { + pr_notice("cpuidle_register_device %d failed!\n", cpu); + return -EIO; + } + arch_haltpoll_enable(cpu); + } + + return 0; +} + +static int haltpoll_cpu_offline(unsigned int cpu) +{ + struct cpuidle_device *dev; + struct cpuidle_device_wrapper *devw; + + devw = per_cpu_ptr(haltpoll_cpuidle_dev_wrap, cpu); + dev = &(devw->dev); + if (dev->registered) { + arch_haltpoll_disable(cpu); + cpuidle_unregister_device(dev); + } + + return 0; +} + +static void haltpoll_uninit(void) +{ + if (haltpoll_hp_state) + cpuhp_remove_state(haltpoll_hp_state); + cpuidle_unregister_driver(&(haltpoll_driver_wrapper.drv)); + + free_percpu(haltpoll_cpuidle_dev_wrap); + haltpoll_cpuidle_dev_wrap = NULL; +} + +static bool haltpoll_want(void) +{ + return kvm_para_has_hint(KVM_HINTS_REALTIME); +} + +static int __init haltpoll_init(void) +{ + int ret; + struct cpuidle_driver *drv = &(haltpoll_driver_wrapper.drv); + + cpuidle_poll_state_init(drv); + + if (!force && (!kvm_para_available() || !haltpoll_want())) + return -ENODEV; + + ret = cpuidle_register_driver(drv); + if (ret == 0) + haltpoll_switch_governor(drv); + else if (ret < 0) + return ret; + + haltpoll_cpuidle_dev_wrap = alloc_percpu(struct cpuidle_device_wrapper); + if (haltpoll_cpuidle_dev_wrap == NULL) { + cpuidle_unregister_driver(drv); + return -ENOMEM; + } + + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "cpuidle/haltpoll:online", + haltpoll_cpu_online, haltpoll_cpu_offline); + if (ret < 0) { + haltpoll_uninit(); + } else { + haltpoll_hp_state = ret; + ret = 0; + } + + return ret; +} + +static void __exit haltpoll_exit(void) +{ + haltpoll_uninit(); +} + +module_init(haltpoll_init); +module_exit(haltpoll_exit); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Marcelo Tosatti "); diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c index 9e56bc411061fd573bf06b56acf8dc4e6235ecd8..74c247972bb363e6cf952773ad86b9a48b26b19d 100644 --- a/drivers/cpuidle/cpuidle-pseries.c +++ b/drivers/cpuidle/cpuidle-pseries.c @@ -247,7 +247,13 @@ static int pseries_idle_probe(void) return -ENODEV; if (firmware_has_feature(FW_FEATURE_SPLPAR)) { - if (lppaca_shared_proc(get_lppaca())) { + /* + * Use local_paca instead of get_lppaca() since + * preemption is not disabled, and it is not required in + * fact, since lppaca_ptr does not need to be the value + * associated to the current CPU, it can be from any CPU. + */ + if (lppaca_shared_proc(local_paca->lppaca_ptr)) { cpuidle_state_table = shared_states; max_idle_state = ARRAY_SIZE(shared_states); } else { diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 6df894d65d9e270efe800ff0c371d8c775183f08..b9990189afbda18174daba608bb50a638ba4b16d 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -140,21 +140,24 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv, * executing it contains RCU usage regarded as invalid in the idle * context, so tell RCU about that. */ - RCU_NONIDLE(tick_freeze()); + tick_freeze(); /* * The state used here cannot be a "coupled" one, because the "coupled" * cpuidle mechanism enables interrupts and doing that with timekeeping * suspended is generally unsafe. */ stop_critical_timings(); + rcu_idle_enter(); drv->states[index].enter_s2idle(dev, drv, index); - WARN_ON(!irqs_disabled()); + if (WARN_ON_ONCE(!irqs_disabled())) + local_irq_disable(); /* * timekeeping_resume() that will be called by tick_unfreeze() for the * first CPU executing it calls functions containing RCU read-side * critical sections, so tell RCU about that. */ - RCU_NONIDLE(tick_unfreeze()); + rcu_idle_exit(); + tick_unfreeze(); start_critical_timings(); time_end = ns_to_ktime(local_clock()); @@ -223,16 +226,18 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, /* Take note of the planned idle state. */ sched_idle_set_state(target_state); - trace_cpu_idle_rcuidle(index, dev->cpu); + trace_cpu_idle(index, dev->cpu); time_start = ns_to_ktime(local_clock()); stop_critical_timings(); + rcu_idle_enter(); entered_state = target_state->enter(dev, drv, index); + rcu_idle_exit(); start_critical_timings(); sched_clock_idle_wakeup_event(); time_end = ns_to_ktime(local_clock()); - trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); + trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu); /* The cpu is no longer idle or about to enter idle. */ sched_idle_set_state(NULL); @@ -318,6 +323,46 @@ void cpuidle_reflect(struct cpuidle_device *dev, int index) cpuidle_curr_governor->reflect(dev, index); } +/** + * cpuidle_poll_time - return amount of time to poll for, + * governors can override dev->poll_limit_ns if necessary + * + * @drv: the cpuidle driver tied with the cpu + * @dev: the cpuidle device + * + */ +u64 cpuidle_poll_time(struct cpuidle_driver *drv, + struct cpuidle_device *dev) +{ + u64 limit_ns = TICK_NSEC; + int i; + + for (i = 1; i < drv->state_count; i++) { + if (drv->states[i].disabled || dev->states_usage[i].disable) + continue; + + limit_ns = (u64)drv->states[i].target_residency + * NSEC_PER_USEC; + break; + } + + return limit_ns; +} + +u64 cpuidle_haltpoll_time(struct cpuidle_driver *drv, + struct cpuidle_device *dev) +{ + struct cpuidle_device_wrapper *devw = + container_of(dev, struct cpuidle_device_wrapper, dev); + + if (devw->poll_limit_ns) + return devw->poll_limit_ns; + + devw->poll_limit_ns = cpuidle_poll_time(drv, dev); + + return devw->poll_limit_ns; +} + /** * cpuidle_install_idle_handler - installs the cpuidle idle loop handler */ @@ -493,8 +538,8 @@ static void __cpuidle_device_init(struct cpuidle_device *dev) */ static int __cpuidle_register_device(struct cpuidle_device *dev) { - int ret; struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); + int i, ret; if (!try_module_get(drv->owner)) return -EINVAL; @@ -502,6 +547,11 @@ static int __cpuidle_register_device(struct cpuidle_device *dev) per_cpu(cpuidle_devices, dev->cpu) = dev; list_add(&dev->device_list, &cpuidle_detected_devices); + for (i = 0; i < drv->state_count; i++) { + if (drv->states[i].flags & CPUIDLE_FLAG_OFF) + dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_USER; + } + ret = cpuidle_coupled_register_device(dev); if (ret) __cpuidle_unregister_device(dev); @@ -702,4 +752,5 @@ static int __init cpuidle_init(void) } module_param(off, int, 0444); +module_param_string(governor, param_governor, CPUIDLE_NAME_LEN, 0444); core_initcall(cpuidle_init); diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h index 2965ab32a583f769ea919c30427eb7255b04909a..9f336af17fa6038abe7b939b6c4718fd6965c540 100644 --- a/drivers/cpuidle/cpuidle.h +++ b/drivers/cpuidle/cpuidle.h @@ -7,7 +7,9 @@ #define __DRIVER_CPUIDLE_H /* For internal use only */ +extern char param_governor[]; extern struct cpuidle_governor *cpuidle_curr_governor; +extern struct cpuidle_governor *cpuidle_prev_governor; extern struct list_head cpuidle_governors; extern struct list_head cpuidle_detected_devices; extern struct mutex cpuidle_lock; @@ -21,6 +23,7 @@ extern void cpuidle_install_idle_handler(void); extern void cpuidle_uninstall_idle_handler(void); /* governors */ +extern struct cpuidle_governor *cpuidle_find_governor(const char *str); extern int cpuidle_switch_governor(struct cpuidle_governor *gov); /* sysfs */ diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index dc32f34e68d9379dceee6f04795fe81473a8f9ca..deb800be4902aadab5c1bf6ef32bdc1a9563c24c 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c @@ -62,24 +62,23 @@ static inline void __cpuidle_unset_driver(struct cpuidle_driver *drv) * __cpuidle_set_driver - set per CPU driver variables for the given driver. * @drv: a valid pointer to a struct cpuidle_driver * - * For each CPU in the driver's cpumask, unset the registered driver per CPU - * to @drv. - * - * Returns 0 on success, -EBUSY if the CPUs have driver(s) already. + * Returns 0 on success, -EBUSY if any CPU in the cpumask have a driver + * different from drv already. */ static inline int __cpuidle_set_driver(struct cpuidle_driver *drv) { int cpu; for_each_cpu(cpu, drv->cpumask) { + struct cpuidle_driver *old_drv; - if (__cpuidle_get_cpu_driver(cpu)) { - __cpuidle_unset_driver(drv); + old_drv = __cpuidle_get_cpu_driver(cpu); + if (old_drv && old_drv != drv) return -EBUSY; - } + } + for_each_cpu(cpu, drv->cpumask) per_cpu(cpuidle_drivers, cpu) = drv; - } return 0; } @@ -242,6 +241,26 @@ static void __cpuidle_unregister_driver(struct cpuidle_driver *drv) __cpuidle_unset_driver(drv); } +void haltpoll_switch_governor(struct cpuidle_driver *drv) +{ + struct cpuidle_governor *gov; + struct cpuidle_driver_wrapper *drvw; + + drvw = container_of(drv, struct cpuidle_driver_wrapper, drv); + if (!strlen(param_governor) && drvw->governor && + (cpuidle_get_driver() == drv)) { + mutex_lock(&cpuidle_lock); + gov = cpuidle_find_governor(drvw->governor); + if (gov) { + cpuidle_prev_governor = cpuidle_curr_governor; + if (cpuidle_switch_governor(gov) < 0) + cpuidle_prev_governor = NULL; + } + mutex_unlock(&cpuidle_lock); + } +} +EXPORT_SYMBOL_GPL(haltpoll_switch_governor); + /** * cpuidle_register_driver - registers a driver * @drv: a pointer to a valid struct cpuidle_driver @@ -262,6 +281,7 @@ int cpuidle_register_driver(struct cpuidle_driver *drv) return ret; } + EXPORT_SYMBOL_GPL(cpuidle_register_driver); /** @@ -274,9 +294,21 @@ EXPORT_SYMBOL_GPL(cpuidle_register_driver); */ void cpuidle_unregister_driver(struct cpuidle_driver *drv) { + bool enabled = (cpuidle_get_driver() == drv); + spin_lock(&cpuidle_driver_lock); __cpuidle_unregister_driver(drv); spin_unlock(&cpuidle_driver_lock); + + if (!enabled) + return; + + mutex_lock(&cpuidle_lock); + if (cpuidle_prev_governor) { + if (!cpuidle_switch_governor(cpuidle_prev_governor)) + cpuidle_prev_governor = NULL; + } + mutex_unlock(&cpuidle_lock); } EXPORT_SYMBOL_GPL(cpuidle_unregister_driver); diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c index 53342b7f10105b605bbc59014e3a1178c496923b..ea3c59d3fdadd66d6be531c6e0aa5eb0ee073d49 100644 --- a/drivers/cpuidle/dt_idle_states.c +++ b/drivers/cpuidle/dt_idle_states.c @@ -224,6 +224,6 @@ int dt_init_idle_driver(struct cpuidle_driver *drv, * also be 0 on platforms with missing DT idle states or legacy DT * configuration predating the DT idle states bindings. */ - return i; + return state_idx - start_idx; } EXPORT_SYMBOL_GPL(dt_init_idle_driver); diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c index 9fed1b82929278d08d3aa8ef26c77ebd705fbd1f..349be85446256ac87a172dbd6e56065f71903027 100644 --- a/drivers/cpuidle/governor.c +++ b/drivers/cpuidle/governor.c @@ -11,20 +11,24 @@ #include #include #include +#include #include #include "cpuidle.h" +char param_governor[CPUIDLE_NAME_LEN]; + LIST_HEAD(cpuidle_governors); struct cpuidle_governor *cpuidle_curr_governor; +struct cpuidle_governor *cpuidle_prev_governor; /** - * __cpuidle_find_governor - finds a governor of the specified name + * cpuidle_find_governor - finds a governor of the specified name * @str: the name * * Must be called with cpuidle_lock acquired. */ -static struct cpuidle_governor * __cpuidle_find_governor(const char *str) +struct cpuidle_governor *cpuidle_find_governor(const char *str) { struct cpuidle_governor *gov; @@ -84,11 +88,14 @@ int cpuidle_register_governor(struct cpuidle_governor *gov) return -ENODEV; mutex_lock(&cpuidle_lock); - if (__cpuidle_find_governor(gov->name) == NULL) { + if (cpuidle_find_governor(gov->name) == NULL) { ret = 0; list_add_tail(&gov->governor_list, &cpuidle_governors); if (!cpuidle_curr_governor || - cpuidle_curr_governor->rating < gov->rating) + !strncasecmp(param_governor, gov->name, CPUIDLE_NAME_LEN) || + (cpuidle_curr_governor->rating < gov->rating && + strncasecmp(param_governor, cpuidle_curr_governor->name, + CPUIDLE_NAME_LEN))) cpuidle_switch_governor(gov); } mutex_unlock(&cpuidle_lock); diff --git a/drivers/cpuidle/governors/Makefile b/drivers/cpuidle/governors/Makefile index 1b512722689f01f0871fb91b72e76cf52dac8f93..88773655737212f8d4e8ebae58480430b8503e0e 100644 --- a/drivers/cpuidle/governors/Makefile +++ b/drivers/cpuidle/governors/Makefile @@ -4,3 +4,4 @@ obj-$(CONFIG_CPU_IDLE_GOV_LADDER) += ladder.o obj-$(CONFIG_CPU_IDLE_GOV_MENU) += menu.o +obj-$(CONFIG_CPU_IDLE_GOV_HALTPOLL) += haltpoll.o diff --git a/drivers/cpuidle/governors/haltpoll.c b/drivers/cpuidle/governors/haltpoll.c new file mode 100644 index 0000000000000000000000000000000000000000..3490ba7de14151d8a9ca0609889e4676589c261e --- /dev/null +++ b/drivers/cpuidle/governors/haltpoll.c @@ -0,0 +1,163 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * haltpoll.c - haltpoll idle governor + * + * Copyright 2019 Red Hat, Inc. and/or its affiliates. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Authors: Marcelo Tosatti + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static unsigned int guest_halt_poll_ns __read_mostly = 200000; +module_param(guest_halt_poll_ns, uint, 0644); + +/* division factor to shrink halt_poll_ns */ +static unsigned int guest_halt_poll_shrink __read_mostly = 2; +module_param(guest_halt_poll_shrink, uint, 0644); + +/* multiplication factor to grow per-cpu poll_limit_ns */ +static unsigned int guest_halt_poll_grow __read_mostly = 2; +module_param(guest_halt_poll_grow, uint, 0644); + +/* value in us to start growing per-cpu halt_poll_ns */ +static unsigned int guest_halt_poll_grow_start __read_mostly = 50000; +module_param(guest_halt_poll_grow_start, uint, 0644); + +/* allow shrinking guest halt poll */ +static bool guest_halt_poll_allow_shrink __read_mostly = true; +module_param(guest_halt_poll_allow_shrink, bool, 0644); + +static bool enable __read_mostly; +module_param(enable, bool, 0444); +MODULE_PARM_DESC(enable, "Load unconditionally"); + +/** + * haltpoll_select - selects the next idle state to enter + * @drv: cpuidle driver containing state data + * @dev: the CPU + * @stop_tick: indication on whether or not to stop the tick + */ +static int haltpoll_select(struct cpuidle_driver *drv, + struct cpuidle_device *dev, + bool *stop_tick) +{ + struct cpuidle_device_wrapper *devw = + container_of(dev, struct cpuidle_device_wrapper, dev); + int latency_req = cpuidle_governor_latency_req(dev->cpu); + + if (!drv->state_count || latency_req == 0) { + *stop_tick = false; + return 0; + } + + if (devw->poll_limit_ns == 0) + return 1; + + /* Last state was poll? */ + if (devw->last_state_idx == 0) { + /* Halt if no event occurred on poll window */ + if (dev->poll_time_limit == true) + return 1; + + *stop_tick = false; + /* Otherwise, poll again */ + return 0; + } + + *stop_tick = false; + /* Last state was halt: poll */ + return 0; +} + +static void adjust_poll_limit(struct cpuidle_device *dev, unsigned int block_us) +{ + struct cpuidle_device_wrapper *devw = + container_of(dev, struct cpuidle_device_wrapper, dev); + unsigned int val; + u64 block_ns = block_us*NSEC_PER_USEC; + + /* Grow cpu_halt_poll_us if + * cpu_halt_poll_us < block_ns < guest_halt_poll_us + */ + if (block_ns > devw->poll_limit_ns && block_ns <= guest_halt_poll_ns) { + val = devw->poll_limit_ns * guest_halt_poll_grow; + + if (val < guest_halt_poll_grow_start) + val = guest_halt_poll_grow_start; + if (val > guest_halt_poll_ns) + val = guest_halt_poll_ns; + + devw->poll_limit_ns = val; + } else if (block_ns > guest_halt_poll_ns && + guest_halt_poll_allow_shrink) { + unsigned int shrink = guest_halt_poll_shrink; + + val = devw->poll_limit_ns; + if (shrink == 0) + val = 0; + else + val /= shrink; + devw->poll_limit_ns = val; + } +} + +/** + * haltpoll_reflect - update variables and update poll time + * @dev: the CPU + * @index: the index of actual entered state + */ +static void haltpoll_reflect(struct cpuidle_device *dev, int index) +{ + struct cpuidle_device_wrapper *devw = + container_of(dev, struct cpuidle_device_wrapper, dev); + devw->last_state_idx = index; + + if (index != 0) + adjust_poll_limit(dev, dev->last_residency); +} + +/** + * haltpoll_enable_device - scans a CPU's states and does setup + * @drv: cpuidle driver + * @dev: the CPU + */ +static int haltpoll_enable_device(struct cpuidle_driver *drv, + struct cpuidle_device *dev) +{ + struct cpuidle_device_wrapper *devw = + container_of(dev, struct cpuidle_device_wrapper, dev); + + devw->poll_limit_ns = 0; + + return 0; +} + +static struct cpuidle_governor haltpoll_governor = { + .name = "haltpoll", + .rating = 9, + .enable = haltpoll_enable_device, + .select = haltpoll_select, + .reflect = haltpoll_reflect, +}; + +static int __init init_haltpoll(void) +{ + if (kvm_para_available() || enable) + return cpuidle_register_governor(&haltpoll_governor); + + return 0; +} + +postcore_initcall(init_haltpoll); diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index e26a40971b263ed5f5cb113c938f4d9f84109862..955e7762dcd1751663a6a7baf420ee01c5d54ea4 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -197,17 +197,19 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev); * of points is below a threshold. If it is... then use the * average of these 8 points as the estimated value. */ -static unsigned int get_typical_interval(struct menu_device *data) +static unsigned int get_typical_interval(struct menu_device *data, + unsigned int predicted_us) { int i, divisor; - unsigned int max, thresh, avg; + unsigned int max, min, thresh, avg; uint64_t sum, variance; - thresh = UINT_MAX; /* Discard outliers above this value */ + thresh = INT_MAX; /* Discard outliers above this value */ again: /* First calculate the average of past intervals */ + min = UINT_MAX; max = 0; sum = 0; divisor = 0; @@ -218,8 +220,18 @@ static unsigned int get_typical_interval(struct menu_device *data) divisor++; if (value > max) max = value; + if (value < min) + min = value; } } + + /* + * If the result of the computation is going to be discarded anyway, + * avoid the computation altogether. + */ + if (min >= predicted_us) + return UINT_MAX; + if (divisor == INTERVALS) avg = sum >> INTERVAL_SHIFT; else @@ -319,7 +331,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, data->correction_factor[data->bucket], RESOLUTION * DECAY); - expected_interval = get_typical_interval(data); + expected_interval = get_typical_interval(data, data->predicted_us); expected_interval = min(expected_interval, data->next_timer_us); first_idx = 0; @@ -403,7 +415,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, s->target_residency <= ktime_to_us(delta_next)) idx = i; - goto out; + return idx; } if (s->exit_latency > latency_req) { /* @@ -450,10 +462,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, } } -out: - data->last_state_idx = idx; - - return data->last_state_idx; + return idx; } /** @@ -512,6 +521,16 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) * duration predictor do a better job next time. */ measured_us = 9 * MAX_INTERESTING / 10; + } else if ((drv->states[last_idx].flags & CPUIDLE_FLAG_POLLING) && + dev->poll_time_limit) { + /* + * The CPU exited the "polling" state due to a time limit, so + * the idle duration prediction leading to the selection of that + * state was inaccurate. If a better prediction had been made, + * the CPU might have been woken up from idle by the next timer. + * Assume that to be the case. + */ + measured_us = data->next_timer_us; } else { /* measured value */ measured_us = cpuidle_get_last_residency(dev); diff --git a/drivers/cpuidle/poll_state.c b/drivers/cpuidle/poll_state.c index 3f86d23c592ec0cdce4b6e8019a02256f4146d78..a73c09464429f4c3a574c6813de26ae03c8f3aa3 100644 --- a/drivers/cpuidle/poll_state.c +++ b/drivers/cpuidle/poll_state.c @@ -8,8 +8,10 @@ #include #include #include +#ifdef CONFIG_ARM64 +#include +#endif -#define POLL_IDLE_TIME_LIMIT (TICK_NSEC / 16) #define POLL_IDLE_RELAX_COUNT 200 static int __cpuidle poll_idle(struct cpuidle_device *dev, @@ -17,9 +19,17 @@ static int __cpuidle poll_idle(struct cpuidle_device *dev, { u64 time_start = local_clock(); + dev->poll_time_limit = false; + local_irq_enable(); if (!current_set_polling_and_test()) { unsigned int loop_count = 0; + u64 limit; + + if (drv->name && !strcmp(drv->name, "haltpoll")) + limit = cpuidle_haltpoll_time(drv, dev); + else + limit = cpuidle_poll_time(drv, dev); while (!need_resched()) { cpu_relax(); @@ -27,8 +37,10 @@ static int __cpuidle poll_idle(struct cpuidle_device *dev, continue; loop_count = 0; - if (local_clock() - time_start > POLL_IDLE_TIME_LIMIT) + if (local_clock() - time_start > limit) { + dev->poll_time_limit = true; break; + } } } current_clr_polling(); diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index e754c7aae7f7bba331459f2500c7339e2321a7ba..76fcd45eadb50f71b050d491e18b1828d39c84a4 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c @@ -292,6 +292,14 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \ return sprintf(buf, "%s\n", state->_name);\ } +static ssize_t show_state_default_status(struct cpuidle_state *state, + struct cpuidle_state_usage *state_usage, + char *buf) +{ + return sprintf(buf, "%s\n", + state->flags & CPUIDLE_FLAG_OFF ? "disabled" : "enabled"); +} + define_show_state_function(exit_latency) define_show_state_function(target_residency) define_show_state_function(power_usage) @@ -310,6 +318,7 @@ define_one_state_ro(power, show_state_power_usage); define_one_state_ro(usage, show_state_usage); define_one_state_ro(time, show_state_time); define_one_state_rw(disable, show_state_disable, store_state_disable); +define_one_state_ro(default_status, show_state_default_status); static struct attribute *cpuidle_state_default_attrs[] = { &attr_name.attr, @@ -320,6 +329,7 @@ static struct attribute *cpuidle_state_default_attrs[] = { &attr_usage.attr, &attr_time.attr, &attr_disable.attr, + &attr_default_status.attr, NULL }; @@ -328,6 +338,7 @@ struct cpuidle_state_kobj { struct cpuidle_state_usage *state_usage; struct completion kobj_unregister; struct kobject kobj; + struct cpuidle_device *device; }; #ifdef CONFIG_SUSPEND @@ -385,6 +396,7 @@ static inline void cpuidle_remove_s2idle_attr_group(struct cpuidle_state_kobj *k #define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj) #define kobj_to_state(k) (kobj_to_state_obj(k)->state) #define kobj_to_state_usage(k) (kobj_to_state_obj(k)->state_usage) +#define kobj_to_device(k) (kobj_to_state_obj(k)->device) #define attr_to_stateattr(a) container_of(a, struct cpuidle_state_attr, attr) static ssize_t cpuidle_state_show(struct kobject *kobj, struct attribute *attr, @@ -462,11 +474,13 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device) } kobj->state = &drv->states[i]; kobj->state_usage = &device->states_usage[i]; + kobj->device = device; init_completion(&kobj->kobj_unregister); ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, &kdev->kobj, "state%d", i); if (ret) { + kobject_put(&kobj->kobj); kfree(kobj); goto error_state; } @@ -598,6 +612,7 @@ static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev) ret = kobject_init_and_add(&kdrv->kobj, &ktype_driver_cpuidle, &kdev->kobj, "driver"); if (ret) { + kobject_put(&kdrv->kobj); kfree(kdrv); return ret; } @@ -685,17 +700,18 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev) if (!kdev) return -ENOMEM; kdev->dev = dev; - dev->kobj_dev = kdev; init_completion(&kdev->kobj_unregister); error = kobject_init_and_add(&kdev->kobj, &ktype_cpuidle, &cpu_dev->kobj, "cpuidle"); if (error) { + kobject_put(&kdev->kobj); kfree(kdev); return error; } + dev->kobj_dev = kdev; kobject_uevent(&kdev->kobj, KOBJ_ADD); return 0; diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index a8c4ce07fc9d635661652d1b96675d3cabbf2343..e352428c6fa05b17fdaca72e787901ea2234e28b 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -10,6 +10,32 @@ menuconfig CRYPTO_HW if CRYPTO_HW +config CRYPTO_DEV_ZHAOXIN_SM3 + tristate "Zhaoxin GMI driver for SM3 algorithm" + depends on CRYPTO && X86 + select CRYPTO_HASH + help + Use Zhaoxin GMI for SM3 algorithm. + + Available in ZX-C+ and newer CPUs. + + If unsure say M. The compiled module will be + called zhaoxin_gmi_sm3. + +config CRYPTO_DEV_ZHAOXIN_SM4 + tristate "Zhaoxin GMI driver for SM4 algorithm" + depends on CRYPTO && X86 + select CRYPTO_SIMD + select CRYPTO_SKCIPHER + select CRYPTO_ALGAPI + help + Use Zhaoxin GMI for SM4 algorithm. + + Available in ZX-C+ and newer CPUs. + + If unsure say M. The compiled module will be + called zhaoxin_gmi_sm4. + config CRYPTO_DEV_PADLOCK tristate "Support for VIA PadLock ACE" depends on X86 && !UML @@ -49,6 +75,45 @@ config CRYPTO_DEV_PADLOCK_SHA If unsure say M. The compiled module will be called padlock-sha. +config CRYPTO_DEV_ZHAOXIN + tristate "Support for Zhaoxin ACE" + depends on X86 && !UML + help + Some Zhaoxin processors come with an integrated crypto engine + (so called Zhaoxin ACE, Advanced Cryptography Engine) + that provides instructions for very fast cryptographic + operations with supported algorithms. + + The instructions are used only when the CPU supports them. + Otherwise software encryption is used. + +config CRYPTO_DEV_ZHAOXIN_AES + tristate "Zhaoxin ACE driver for AES algorithm" + depends on CRYPTO_DEV_ZHAOXIN + select CRYPTO_BLKCIPHER + select CRYPTO_AES + help + Use Zhaoxin ACE for AES algorithm. + + Available in Zhaoxin CPUs. + + If unsure say M. The compiled module will be + called zhaoxin-aes. + +config CRYPTO_DEV_ZHAOXIN_SHA + tristate "Zhaoxin ACE driver for SHA1 and SHA256 algorithms" + depends on CRYPTO_DEV_ZHAOXIN + select CRYPTO_HASH + select CRYPTO_SHA1 + select CRYPTO_SHA256 + help + Use Zhaoxin ACE for SHA1/SHA256 algorithms. + + Available in Zhaoxin processors. + + If unsure say M. The compiled module will be + called zhaoxin-sha. + config CRYPTO_DEV_GEODE tristate "Support for the Geode LX AES engine" depends on X86_32 && PCI @@ -681,6 +746,7 @@ config CRYPTO_DEV_BCM_SPU depends on ARCH_BCM_IPROC depends on MAILBOX default m + select CRYPTO_AUTHENC select CRYPTO_DES select CRYPTO_MD5 select CRYPTO_SHA1 diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index c23396f32c8a74f8c723842b19c8c7c0bc618fb8..9805083d5d56a8ff4d3881606488e2c9f0e90fe4 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -29,6 +29,8 @@ obj-$(CONFIG_CRYPTO_DEV_OMAP_DES) += omap-des.o obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o +obj-$(CONFIG_CRYPTO_DEV_ZHAOXIN_AES) += zhaoxin-aes.o +obj-$(CONFIG_CRYPTO_DEV_ZHAOXIN_SHA) += zhaoxin-sha.o obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/ @@ -46,4 +48,6 @@ obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/ obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/ obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/ obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/ +obj-$(CONFIG_CRYPTO_DEV_ZHAOXIN_SM3) += zhaoxin-gmi-sm3.o +obj-$(CONFIG_CRYPTO_DEV_ZHAOXIN_SM4) += zhaoxin-gmi-sm4.o obj-y += hisilicon/ diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c index f5c07498ea4f08541b47040f3356bb00cfbc35ff..1d87deca32ed55be2290e186073b6ed183804566 100644 --- a/drivers/crypto/amcc/crypto4xx_alg.c +++ b/drivers/crypto/amcc/crypto4xx_alg.c @@ -76,12 +76,16 @@ static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm, } static inline int crypto4xx_crypt(struct skcipher_request *req, - const unsigned int ivlen, bool decrypt) + const unsigned int ivlen, bool decrypt, + bool check_blocksize) { struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher); __le32 iv[AES_IV_SIZE]; + if (check_blocksize && !IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE)) + return -EINVAL; + if (ivlen) crypto4xx_memcpy_to_le32(iv, req->iv, ivlen); @@ -90,24 +94,34 @@ static inline int crypto4xx_crypt(struct skcipher_request *req, ctx->sa_len, 0, NULL); } -int crypto4xx_encrypt_noiv(struct skcipher_request *req) +int crypto4xx_encrypt_noiv_block(struct skcipher_request *req) +{ + return crypto4xx_crypt(req, 0, false, true); +} + +int crypto4xx_encrypt_iv_stream(struct skcipher_request *req) { - return crypto4xx_crypt(req, 0, false); + return crypto4xx_crypt(req, AES_IV_SIZE, false, false); } -int crypto4xx_encrypt_iv(struct skcipher_request *req) +int crypto4xx_decrypt_noiv_block(struct skcipher_request *req) { - return crypto4xx_crypt(req, AES_IV_SIZE, false); + return crypto4xx_crypt(req, 0, true, true); } -int crypto4xx_decrypt_noiv(struct skcipher_request *req) +int crypto4xx_decrypt_iv_stream(struct skcipher_request *req) { - return crypto4xx_crypt(req, 0, true); + return crypto4xx_crypt(req, AES_IV_SIZE, true, false); } -int crypto4xx_decrypt_iv(struct skcipher_request *req) +int crypto4xx_encrypt_iv_block(struct skcipher_request *req) { - return crypto4xx_crypt(req, AES_IV_SIZE, true); + return crypto4xx_crypt(req, AES_IV_SIZE, false, true); +} + +int crypto4xx_decrypt_iv_block(struct skcipher_request *req) +{ + return crypto4xx_crypt(req, AES_IV_SIZE, true, true); } /** @@ -141,9 +155,10 @@ static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher, /* Setup SA */ sa = ctx->sa_in; - set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_CBC ? - SA_SAVE_IV : SA_NOT_SAVE_IV), - SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE, + set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_ECB ? + SA_NOT_SAVE_IV : SA_SAVE_IV), + SA_NOT_LOAD_HASH, (cm == CRYPTO_MODE_ECB ? + SA_LOAD_IV_FROM_SA : SA_LOAD_IV_FROM_STATE), SA_NO_HEADER_PROC, SA_HASH_ALG_NULL, SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC, SA_OPCODE_DECRYPT, @@ -162,6 +177,11 @@ static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher, memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4); sa = ctx->sa_out; sa->sa_command_0.bf.dir = DIR_OUTBOUND; + /* + * SA_OPCODE_ENCRYPT is the same value as SA_OPCODE_DECRYPT. + * it's the DIR_(IN|OUT)BOUND that matters + */ + sa->sa_command_0.bf.opcode = SA_OPCODE_ENCRYPT; return 0; } @@ -272,8 +292,8 @@ crypto4xx_ctr_crypt(struct skcipher_request *req, bool encrypt) return ret; } - return encrypt ? crypto4xx_encrypt_iv(req) - : crypto4xx_decrypt_iv(req); + return encrypt ? crypto4xx_encrypt_iv_stream(req) + : crypto4xx_decrypt_iv_stream(req); } static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx, diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 6eaec9ba0f68baafbb233bc3a670282d20be098a..68d5ea818b6c014c1bbe9d49c3f458eafc927df5 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c @@ -373,12 +373,8 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev) dma_alloc_coherent(dev->core_dev->device, PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD, &dev->scatter_buffer_pa, GFP_ATOMIC); - if (!dev->scatter_buffer_va) { - dma_free_coherent(dev->core_dev->device, - sizeof(struct ce_sd) * PPC4XX_NUM_SD, - dev->sdr, dev->sdr_pa); + if (!dev->scatter_buffer_va) return -ENOMEM; - } for (i = 0; i < PPC4XX_NUM_SD; i++) { dev->sdr[i].ptr = dev->scatter_buffer_pa + @@ -712,7 +708,23 @@ int crypto4xx_build_pd(struct crypto_async_request *req, size_t offset_to_sr_ptr; u32 gd_idx = 0; int tmp; - bool is_busy; + bool is_busy, force_sd; + + /* + * There's a very subtile/disguised "bug" in the hardware that + * gets indirectly mentioned in 18.1.3.5 Encryption/Decryption + * of the hardware spec: + * *drum roll* the AES/(T)DES OFB and CFB modes are listed as + * operation modes for >>> "Block ciphers" <<<. + * + * To workaround this issue and stop the hardware from causing + * "overran dst buffer" on crypttexts that are not a multiple + * of 16 (AES_BLOCK_SIZE), we force the driver to use the + * scatter buffers. + */ + force_sd = (req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_CFB + || req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_OFB) + && (datalen % AES_BLOCK_SIZE); /* figure how many gd are needed */ tmp = sg_nents_for_len(src, assoclen + datalen); @@ -730,7 +742,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req, } /* figure how many sd are needed */ - if (sg_is_last(dst)) { + if (sg_is_last(dst) && force_sd == false) { num_sd = 0; } else { if (datalen > PPC4XX_SD_BUFFER_SIZE) { @@ -805,9 +817,10 @@ int crypto4xx_build_pd(struct crypto_async_request *req, pd->sa_len = sa_len; pd_uinfo = &dev->pdr_uinfo[pd_entry]; - pd_uinfo->async_req = req; pd_uinfo->num_gd = num_gd; pd_uinfo->num_sd = num_sd; + pd_uinfo->dest_va = dst; + pd_uinfo->async_req = req; if (iv_len) memcpy(pd_uinfo->sr_va->save_iv, iv, iv_len); @@ -826,7 +839,6 @@ int crypto4xx_build_pd(struct crypto_async_request *req, /* get first gd we are going to use */ gd_idx = fst_gd; pd_uinfo->first_gd = fst_gd; - pd_uinfo->num_gd = num_gd; gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx); pd->src = gd_dma; /* enable gather */ @@ -863,17 +875,14 @@ int crypto4xx_build_pd(struct crypto_async_request *req, * Indicate gather array is not used */ pd_uinfo->first_gd = 0xffffffff; - pd_uinfo->num_gd = 0; } - if (sg_is_last(dst)) { + if (!num_sd) { /* * we know application give us dst a whole piece of memory * no need to use scatter ring. */ pd_uinfo->using_sd = 0; pd_uinfo->first_sd = 0xffffffff; - pd_uinfo->num_sd = 0; - pd_uinfo->dest_va = dst; sa->sa_command_0.bf.scatter = 0; pd->dest = (u32)dma_map_page(dev->core_dev->device, sg_page(dst), dst->offset, @@ -887,9 +896,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req, nbytes = datalen; sa->sa_command_0.bf.scatter = 1; pd_uinfo->using_sd = 1; - pd_uinfo->dest_va = dst; pd_uinfo->first_sd = fst_sd; - pd_uinfo->num_sd = num_sd; sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx); pd->dest = sd_dma; /* setup scatter descriptor */ @@ -1142,8 +1149,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = { .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_IV_SIZE, .setkey = crypto4xx_setkey_aes_cbc, - .encrypt = crypto4xx_encrypt_iv, - .decrypt = crypto4xx_decrypt_iv, + .encrypt = crypto4xx_encrypt_iv_block, + .decrypt = crypto4xx_decrypt_iv_block, .init = crypto4xx_sk_init, .exit = crypto4xx_sk_exit, } }, @@ -1162,8 +1169,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = { .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_IV_SIZE, .setkey = crypto4xx_setkey_aes_cfb, - .encrypt = crypto4xx_encrypt_iv, - .decrypt = crypto4xx_decrypt_iv, + .encrypt = crypto4xx_encrypt_iv_stream, + .decrypt = crypto4xx_decrypt_iv_stream, .init = crypto4xx_sk_init, .exit = crypto4xx_sk_exit, } }, @@ -1175,7 +1182,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = { .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, - .cra_blocksize = AES_BLOCK_SIZE, + .cra_blocksize = 1, .cra_ctxsize = sizeof(struct crypto4xx_ctx), .cra_module = THIS_MODULE, }, @@ -1195,7 +1202,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = { .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, - .cra_blocksize = AES_BLOCK_SIZE, + .cra_blocksize = 1, .cra_ctxsize = sizeof(struct crypto4xx_ctx), .cra_module = THIS_MODULE, }, @@ -1215,15 +1222,15 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = { .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, - .cra_blocksize = AES_BLOCK_SIZE, + .cra_blocksize = 1, .cra_ctxsize = sizeof(struct crypto4xx_ctx), .cra_module = THIS_MODULE, }, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = crypto4xx_setkey_aes_ecb, - .encrypt = crypto4xx_encrypt_noiv, - .decrypt = crypto4xx_decrypt_noiv, + .encrypt = crypto4xx_encrypt_noiv_block, + .decrypt = crypto4xx_decrypt_noiv_block, .init = crypto4xx_sk_init, .exit = crypto4xx_sk_exit, } }, @@ -1234,7 +1241,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = { .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, - .cra_blocksize = AES_BLOCK_SIZE, + .cra_blocksize = 1, .cra_ctxsize = sizeof(struct crypto4xx_ctx), .cra_module = THIS_MODULE, }, @@ -1242,8 +1249,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = { .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_IV_SIZE, .setkey = crypto4xx_setkey_aes_ofb, - .encrypt = crypto4xx_encrypt_iv, - .decrypt = crypto4xx_decrypt_iv, + .encrypt = crypto4xx_encrypt_iv_stream, + .decrypt = crypto4xx_decrypt_iv_stream, .init = crypto4xx_sk_init, .exit = crypto4xx_sk_exit, } }, diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h index e2ca56722f077242a0a356d14e15638db48ab49c..21a6bbcedc55db7741b770b91ab33e3a4750d86b 100644 --- a/drivers/crypto/amcc/crypto4xx_core.h +++ b/drivers/crypto/amcc/crypto4xx_core.h @@ -179,10 +179,12 @@ int crypto4xx_setkey_rfc3686(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen); int crypto4xx_encrypt_ctr(struct skcipher_request *req); int crypto4xx_decrypt_ctr(struct skcipher_request *req); -int crypto4xx_encrypt_iv(struct skcipher_request *req); -int crypto4xx_decrypt_iv(struct skcipher_request *req); -int crypto4xx_encrypt_noiv(struct skcipher_request *req); -int crypto4xx_decrypt_noiv(struct skcipher_request *req); +int crypto4xx_encrypt_iv_stream(struct skcipher_request *req); +int crypto4xx_decrypt_iv_stream(struct skcipher_request *req); +int crypto4xx_encrypt_iv_block(struct skcipher_request *req); +int crypto4xx_decrypt_iv_block(struct skcipher_request *req); +int crypto4xx_encrypt_noiv_block(struct skcipher_request *req); +int crypto4xx_decrypt_noiv_block(struct skcipher_request *req); int crypto4xx_rfc3686_encrypt(struct skcipher_request *req); int crypto4xx_rfc3686_decrypt(struct skcipher_request *req); int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm); diff --git a/drivers/crypto/amcc/crypto4xx_trng.c b/drivers/crypto/amcc/crypto4xx_trng.c index 5e63742b0d22f09cd35f26d0334b3eed69bf3825..8a3ed40312061f9e877c2219602a177a3491d5a0 100644 --- a/drivers/crypto/amcc/crypto4xx_trng.c +++ b/drivers/crypto/amcc/crypto4xx_trng.c @@ -80,8 +80,10 @@ void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev) /* Find the TRNG device node and map it */ trng = of_find_matching_node(NULL, ppc4xx_trng_match); - if (!trng || !of_device_is_available(trng)) + if (!trng || !of_device_is_available(trng)) { + of_node_put(trng); return; + } dev->trng_base = of_iomap(trng, 0); of_node_put(trng); @@ -109,7 +111,6 @@ void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev) return; err_out: - of_node_put(trng); iounmap(dev->trng_base); kfree(rng); dev->trng_base = NULL; diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 801aeab5ab1e6898c8eb172a3a95a1daf989584b..53a78035381d5c9098b882993fdebe89e19dcfa4 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -148,7 +148,7 @@ struct atmel_aes_xts_ctx { u32 key2[AES_KEYSIZE_256 / sizeof(u32)]; }; -#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC +#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) struct atmel_aes_authenc_ctx { struct atmel_aes_base_ctx base; struct atmel_sha_authenc_ctx *auth; @@ -160,7 +160,7 @@ struct atmel_aes_reqctx { u32 lastc[AES_BLOCK_SIZE / sizeof(u32)]; }; -#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC +#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) struct atmel_aes_authenc_reqctx { struct atmel_aes_reqctx base; @@ -489,13 +489,36 @@ static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd) return (dd->flags & AES_FLAGS_ENCRYPT); } -#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC +#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err); #endif +static void atmel_aes_set_iv_as_last_ciphertext_block(struct atmel_aes_dev *dd) +{ + struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq); + struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req); + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); + unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher); + + if (req->nbytes < ivsize) + return; + + if (rctx->mode & AES_FLAGS_ENCRYPT) { + scatterwalk_map_and_copy(req->info, req->dst, + req->nbytes - ivsize, ivsize, 0); + } else { + if (req->src == req->dst) + memcpy(req->info, rctx->lastc, ivsize); + else + scatterwalk_map_and_copy(req->info, req->src, + req->nbytes - ivsize, + ivsize, 0); + } +} + static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err) { -#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC +#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) if (dd->ctx->is_aead) atmel_aes_authenc_complete(dd, err); #endif @@ -503,26 +526,8 @@ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err) clk_disable(dd->iclk); dd->flags &= ~AES_FLAGS_BUSY; - if (!dd->ctx->is_aead) { - struct ablkcipher_request *req = - ablkcipher_request_cast(dd->areq); - struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req); - struct crypto_ablkcipher *ablkcipher = - crypto_ablkcipher_reqtfm(req); - int ivsize = crypto_ablkcipher_ivsize(ablkcipher); - - if (rctx->mode & AES_FLAGS_ENCRYPT) { - scatterwalk_map_and_copy(req->info, req->dst, - req->nbytes - ivsize, ivsize, 0); - } else { - if (req->src == req->dst) { - memcpy(req->info, rctx->lastc, ivsize); - } else { - scatterwalk_map_and_copy(req->info, req->src, - req->nbytes - ivsize, ivsize, 0); - } - } - } + if (!dd->ctx->is_aead) + atmel_aes_set_iv_as_last_ciphertext_block(dd); if (dd->is_async) dd->areq->complete(dd->areq, err); @@ -1128,10 +1133,12 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode) rctx->mode = mode; if (!(mode & AES_FLAGS_ENCRYPT) && (req->src == req->dst)) { - int ivsize = crypto_ablkcipher_ivsize(ablkcipher); + unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher); - scatterwalk_map_and_copy(rctx->lastc, req->src, - (req->nbytes - ivsize), ivsize, 0); + if (req->nbytes >= ivsize) + scatterwalk_map_and_copy(rctx->lastc, req->src, + req->nbytes - ivsize, + ivsize, 0); } return atmel_aes_handle_queue(dd, &req->base); @@ -1976,7 +1983,7 @@ static struct crypto_alg aes_xts_alg = { } }; -#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC +#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) /* authenc aead functions */ static int atmel_aes_authenc_start(struct atmel_aes_dev *dd); @@ -2463,7 +2470,7 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd) { int i; -#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC +#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) if (dd->caps.has_authenc) for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++) crypto_unregister_aead(&aes_authenc_algs[i]); @@ -2510,7 +2517,7 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd) goto err_aes_xts_alg; } -#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC +#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) if (dd->caps.has_authenc) { for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++) { err = crypto_register_aead(&aes_authenc_algs[i]); @@ -2522,7 +2529,7 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd) return 0; -#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC +#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) /* i = ARRAY_SIZE(aes_authenc_algs); */ err_aes_authenc_alg: for (j = 0; j < i; j++) @@ -2713,7 +2720,7 @@ static int atmel_aes_probe(struct platform_device *pdev) atmel_aes_get_cap(aes_dd); -#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC +#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) if (aes_dd->caps.has_authenc && !atmel_sha_authenc_is_ready()) { err = -EPROBE_DEFER; goto iclk_unprepare; diff --git a/drivers/crypto/atmel-authenc.h b/drivers/crypto/atmel-authenc.h index 2a60d1224143a6e150ccc8ca36bf7e2ecafb0172..7f6742d35dd5a12ebf5696b28abcab4c0f26c105 100644 --- a/drivers/crypto/atmel-authenc.h +++ b/drivers/crypto/atmel-authenc.h @@ -23,7 +23,7 @@ #ifndef __ATMEL_AUTHENC_H__ #define __ATMEL_AUTHENC_H__ -#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC +#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) #include #include diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index 8a19df2fba6a3e11d7bbe770a0089d75bdf30017..ef125d4be8fc4e65ca45baf6d7ad1abca40e7d22 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -2215,7 +2215,7 @@ static struct ahash_alg sha_hmac_algs[] = { }, }; -#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC +#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) /* authenc functions */ static int atmel_sha_authenc_init2(struct atmel_sha_dev *dd); diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c index 7f07a5085e9bdbb866d08402a947b295c8c16466..fdcdc751d03bdd400457ee39493a5c349a8bc81b 100644 --- a/drivers/crypto/axis/artpec6_crypto.c +++ b/drivers/crypto/axis/artpec6_crypto.c @@ -284,6 +284,7 @@ enum artpec6_crypto_hash_flags { struct artpec6_crypto_req_common { struct list_head list; + struct list_head complete_in_progress; struct artpec6_crypto_dma_descriptors *dma; struct crypto_async_request *req; void (*complete)(struct crypto_async_request *req); @@ -2046,7 +2047,8 @@ static int artpec6_crypto_prepare_aead(struct aead_request *areq) return artpec6_crypto_dma_map_descs(common); } -static void artpec6_crypto_process_queue(struct artpec6_crypto *ac) +static void artpec6_crypto_process_queue(struct artpec6_crypto *ac, + struct list_head *completions) { struct artpec6_crypto_req_common *req; @@ -2057,7 +2059,7 @@ static void artpec6_crypto_process_queue(struct artpec6_crypto *ac) list_move_tail(&req->list, &ac->pending); artpec6_crypto_start_dma(req); - req->req->complete(req->req, -EINPROGRESS); + list_add_tail(&req->complete_in_progress, completions); } /* @@ -2087,6 +2089,11 @@ static void artpec6_crypto_task(unsigned long data) struct artpec6_crypto *ac = (struct artpec6_crypto *)data; struct artpec6_crypto_req_common *req; struct artpec6_crypto_req_common *n; + struct list_head complete_done; + struct list_head complete_in_progress; + + INIT_LIST_HEAD(&complete_done); + INIT_LIST_HEAD(&complete_in_progress); if (list_empty(&ac->pending)) { pr_debug("Spurious IRQ\n"); @@ -2120,19 +2127,30 @@ static void artpec6_crypto_task(unsigned long data) pr_debug("Completing request %p\n", req); - list_del(&req->list); + list_move_tail(&req->list, &complete_done); artpec6_crypto_dma_unmap_all(req); artpec6_crypto_copy_bounce_buffers(req); ac->pending_count--; artpec6_crypto_common_destroy(req); - req->complete(req->req); } - artpec6_crypto_process_queue(ac); + artpec6_crypto_process_queue(ac, &complete_in_progress); spin_unlock_bh(&ac->queue_lock); + + /* Perform the completion callbacks without holding the queue lock + * to allow new request submissions from the callbacks. + */ + list_for_each_entry_safe(req, n, &complete_done, list) { + req->complete(req->req); + } + + list_for_each_entry_safe(req, n, &complete_in_progress, + complete_in_progress) { + req->req->complete(req->req, -EINPROGRESS); + } } static void artpec6_crypto_complete_crypto(struct crypto_async_request *req) diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index 2d1f1db9f807448c45bc228d7c4f500d3ac6152c..4a2fc97a2d1863caf1d1a5d87568d8a0280b22a8 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -2514,6 +2514,7 @@ static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key, static int ahash_hmac_init(struct ahash_request *req) { + int ret; struct iproc_reqctx_s *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); @@ -2523,7 +2524,9 @@ static int ahash_hmac_init(struct ahash_request *req) flow_log("ahash_hmac_init()\n"); /* init the context as a hash */ - ahash_init(req); + ret = ahash_init(req); + if (ret) + return ret; if (!spu_no_incr_hash(ctx)) { /* SPU-M can do incr hashing but needs sw for outer HMAC */ @@ -2845,44 +2848,28 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, struct spu_hw *spu = &iproc_priv.spu; struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); struct crypto_tfm *tfm = crypto_aead_tfm(cipher); - struct rtattr *rta = (void *)key; - struct crypto_authenc_key_param *param; - const u8 *origkey = key; - const unsigned int origkeylen = keylen; - - int ret = 0; + struct crypto_authenc_keys keys; + int ret; flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key, keylen); flow_dump(" key: ", key, keylen); - if (!RTA_OK(rta, keylen)) - goto badkey; - if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) - goto badkey; - if (RTA_PAYLOAD(rta) < sizeof(*param)) + ret = crypto_authenc_extractkeys(&keys, key, keylen); + if (ret) goto badkey; - param = RTA_DATA(rta); - ctx->enckeylen = be32_to_cpu(param->enckeylen); - - key += RTA_ALIGN(rta->rta_len); - keylen -= RTA_ALIGN(rta->rta_len); - - if (keylen < ctx->enckeylen) - goto badkey; - if (ctx->enckeylen > MAX_KEY_SIZE) + if (keys.enckeylen > MAX_KEY_SIZE || + keys.authkeylen > MAX_KEY_SIZE) goto badkey; - ctx->authkeylen = keylen - ctx->enckeylen; + ctx->enckeylen = keys.enckeylen; + ctx->authkeylen = keys.authkeylen; - if (ctx->authkeylen > MAX_KEY_SIZE) - goto badkey; - - memcpy(ctx->enckey, key + ctx->authkeylen, ctx->enckeylen); + memcpy(ctx->enckey, keys.enckey, keys.enckeylen); /* May end up padding auth key. So make sure it's zeroed. */ memset(ctx->authkey, 0, sizeof(ctx->authkey)); - memcpy(ctx->authkey, key, ctx->authkeylen); + memcpy(ctx->authkey, keys.authkey, keys.authkeylen); switch (ctx->alg->cipher_info.alg) { case CIPHER_ALG_DES: @@ -2890,7 +2877,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, u32 tmp[DES_EXPKEY_WORDS]; u32 flags = CRYPTO_TFM_RES_WEAK_KEY; - if (des_ekey(tmp, key) == 0) { + if (des_ekey(tmp, keys.enckey) == 0) { if (crypto_aead_get_flags(cipher) & CRYPTO_TFM_REQ_WEAK_KEY) { crypto_aead_set_flags(cipher, flags); @@ -2905,7 +2892,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, break; case CIPHER_ALG_3DES: if (ctx->enckeylen == (DES_KEY_SIZE * 3)) { - const u32 *K = (const u32 *)key; + const u32 *K = (const u32 *)keys.enckey; u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED; if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) || @@ -2956,9 +2943,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; ctx->fallback_cipher->base.crt_flags |= tfm->crt_flags & CRYPTO_TFM_REQ_MASK; - ret = - crypto_aead_setkey(ctx->fallback_cipher, origkey, - origkeylen); + ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen); if (ret) { flow_log(" fallback setkey() returned:%d\n", ret); tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; @@ -4652,12 +4637,16 @@ static int spu_register_ahash(struct iproc_alg_s *driver_alg) hash->halg.statesize = sizeof(struct spu_hash_export_s); if (driver_alg->auth_info.mode != HASH_MODE_HMAC) { - hash->setkey = ahash_setkey; hash->init = ahash_init; hash->update = ahash_update; hash->final = ahash_final; hash->finup = ahash_finup; hash->digest = ahash_digest; + if ((driver_alg->auth_info.alg == HASH_ALG_AES) && + ((driver_alg->auth_info.mode == HASH_MODE_XCBC) || + (driver_alg->auth_info.mode == HASH_MODE_CMAC))) { + hash->setkey = ahash_setkey; + } } else { hash->setkey = ahash_hmac_setkey; hash->init = ahash_hmac_init; diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c index bf7ac621c591dcad6737c42edb06d9392c27c8f9..0f6023347cc89c610f251a2884c7635cfe1b5e4a 100644 --- a/drivers/crypto/bcm/spu2.c +++ b/drivers/crypto/bcm/spu2.c @@ -506,7 +506,7 @@ static void spu2_dump_omd(u8 *omd, u16 hash_key_len, u16 ciph_key_len, if (hash_iv_len) { packet_log(" Hash IV Length %u bytes\n", hash_iv_len); packet_dump(" hash IV: ", ptr, hash_iv_len); - ptr += ciph_key_len; + ptr += hash_iv_len; } if (ciph_iv_len) { diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index ec40f991e6c63c4e6be98df8fc964312bc9afbf2..1907945f82b787bf4baa49fa4aaf818d458b4839 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -887,6 +887,7 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, struct ablkcipher_request *req = context; struct ablkcipher_edesc *edesc; struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); int ivsize = crypto_ablkcipher_ivsize(ablkcipher); #ifdef DEBUG @@ -911,10 +912,11 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, /* * The crypto API expects us to set the IV (req->info) to the last - * ciphertext block. This is used e.g. by the CTS mode. + * ciphertext block when running in CBC mode. */ - scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize, - ivsize, 0); + if ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == OP_ALG_AAI_CBC) + scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - + ivsize, ivsize, 0); /* In case initial IV was generated, copy it in GIVCIPHER request */ if (edesc->iv_dir == DMA_FROM_DEVICE) { @@ -1005,6 +1007,7 @@ static void init_aead_job(struct aead_request *req, if (unlikely(req->src != req->dst)) { if (edesc->dst_nents == 1) { dst_dma = sg_dma_address(req->dst); + out_options = 0; } else { dst_dma = edesc->sec4_sg_dma + sec4_sg_index * @@ -1650,10 +1653,11 @@ static int ablkcipher_decrypt(struct ablkcipher_request *req) /* * The crypto API expects us to set the IV (req->info) to the last - * ciphertext block. + * ciphertext block when running in CBC mode. */ - scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize, - ivsize, 0); + if ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == OP_ALG_AAI_CBC) + scatterwalk_map_and_copy(req->info, req->src, req->nbytes - + ivsize, ivsize, 0); /* Create and submit job descriptor*/ init_ablkcipher_job(ctx->sh_desc_dec, ctx->sh_desc_dec_dma, edesc, req); diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c index a408edd84f34634b8cae6bfa6af2035c8788394e..edacf9b39b638ce543a20eec9b0259654c11e088 100644 --- a/drivers/crypto/caam/caamalg_desc.c +++ b/drivers/crypto/caam/caamalg_desc.c @@ -509,6 +509,7 @@ void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata, const bool is_qi, int era) { u32 geniv, moveiv; + u32 *wait_cmd; /* Note: Context registers are saved. */ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era); @@ -604,6 +605,14 @@ void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata, /* Will read cryptlen */ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + + /* + * Wait for IV transfer (ofifo -> class2) to finish before starting + * ciphertext transfer (ofifo -> external memory). + */ + wait_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NIFP); + set_jump_tgt_here(desc, wait_cmd); + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF | FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH); append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h index a917af5776ce160dba8d5208f6ff9a4874d5998c..05516b0a4240c379b02da04ab5760453abcdd68d 100644 --- a/drivers/crypto/caam/caamalg_desc.h +++ b/drivers/crypto/caam/caamalg_desc.h @@ -12,7 +12,7 @@ #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ) #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ) -#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ) +#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 8 * CAAM_CMD_SZ) #define DESC_QI_AEAD_ENC_LEN (DESC_AEAD_ENC_LEN + 3 * CAAM_CMD_SZ) #define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ) #define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ) diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 43975ab5f09c12c391a8830da71584d76076dda8..f5fd00065650ab6525c75aafd45f176a5783f6b3 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -118,6 +118,7 @@ struct caam_hash_ctx { struct caam_hash_state { dma_addr_t buf_dma; dma_addr_t ctx_dma; + int ctx_dma_len; u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; int buflen_0; u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; @@ -170,6 +171,7 @@ static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, struct caam_hash_state *state, int ctx_len) { + state->ctx_dma_len = ctx_len; state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, DMA_FROM_DEVICE); if (dma_mapping_error(jrdev, state->ctx_dma)) { @@ -183,18 +185,6 @@ static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, return 0; } -/* Map req->result, and append seq_out_ptr command that points to it */ -static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev, - u8 *result, int digestsize) -{ - dma_addr_t dst_dma; - - dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE); - append_seq_out_ptr(desc, dst_dma, digestsize, 0); - - return dst_dma; -} - /* Map current buffer in state (if length > 0) and put it in link table */ static inline int buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg, @@ -223,6 +213,7 @@ static inline int ctx_map_to_sec4_sg(struct device *jrdev, struct caam_hash_state *state, int ctx_len, struct sec4_sg_entry *sec4_sg, u32 flag) { + state->ctx_dma_len = ctx_len; state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag); if (dma_mapping_error(jrdev, state->ctx_dma)) { dev_err(jrdev, "unable to map ctx\n"); @@ -485,7 +476,6 @@ static int ahash_setkey(struct crypto_ahash *ahash, /* * ahash_edesc - s/w-extended ahash descriptor - * @dst_dma: physical mapped address of req->result * @sec4_sg_dma: physical mapped address of h/w link table * @src_nents: number of segments in input scatterlist * @sec4_sg_bytes: length of dma mapped sec4_sg space @@ -493,7 +483,6 @@ static int ahash_setkey(struct crypto_ahash *ahash, * @sec4_sg: h/w link table */ struct ahash_edesc { - dma_addr_t dst_dma; dma_addr_t sec4_sg_dma; int src_nents; int sec4_sg_bytes; @@ -509,8 +498,6 @@ static inline void ahash_unmap(struct device *dev, if (edesc->src_nents) dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); - if (edesc->dst_dma) - dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE); if (edesc->sec4_sg_bytes) dma_unmap_single(dev, edesc->sec4_sg_dma, @@ -527,12 +514,10 @@ static inline void ahash_unmap_ctx(struct device *dev, struct ahash_edesc *edesc, struct ahash_request *req, int dst_len, u32 flag) { - struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); struct caam_hash_state *state = ahash_request_ctx(req); if (state->ctx_dma) { - dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag); + dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag); state->ctx_dma = 0; } ahash_unmap(dev, edesc, req, dst_len); @@ -545,9 +530,9 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err, struct ahash_edesc *edesc; struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); int digestsize = crypto_ahash_digestsize(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); #ifdef DEBUG struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); - struct caam_hash_state *state = ahash_request_ctx(req); dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); #endif @@ -556,17 +541,14 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err, if (err) caam_jr_strstatus(jrdev, err); - ahash_unmap(jrdev, edesc, req, digestsize); + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); + memcpy(req->result, state->caam_ctx, digestsize); kfree(edesc); #ifdef DEBUG print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, ctx->ctx_len, 1); - if (req->result) - print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, req->result, - digestsize, 1); #endif req->base.complete(&req->base, err); @@ -614,9 +596,9 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, struct ahash_edesc *edesc; struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); int digestsize = crypto_ahash_digestsize(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); #ifdef DEBUG struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); - struct caam_hash_state *state = ahash_request_ctx(req); dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); #endif @@ -625,17 +607,14 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, if (err) caam_jr_strstatus(jrdev, err); - ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE); + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); + memcpy(req->result, state->caam_ctx, digestsize); kfree(edesc); #ifdef DEBUG print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, ctx->ctx_len, 1); - if (req->result) - print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, req->result, - digestsize, 1); #endif req->base.complete(&req->base, err); @@ -896,7 +875,7 @@ static int ahash_final_ctx(struct ahash_request *req) edesc->sec4_sg_bytes = sec4_sg_bytes; ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, - edesc->sec4_sg, DMA_TO_DEVICE); + edesc->sec4_sg, DMA_BIDIRECTIONAL); if (ret) goto unmap_ctx; @@ -916,14 +895,7 @@ static int ahash_final_ctx(struct ahash_request *req) append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, LDST_SGF); - - edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, - digestsize); - if (dma_mapping_error(jrdev, edesc->dst_dma)) { - dev_err(jrdev, "unable to map dst\n"); - ret = -ENOMEM; - goto unmap_ctx; - } + append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); #ifdef DEBUG print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", @@ -936,7 +908,7 @@ static int ahash_final_ctx(struct ahash_request *req) return -EINPROGRESS; unmap_ctx: - ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); kfree(edesc); return ret; } @@ -990,7 +962,7 @@ static int ahash_finup_ctx(struct ahash_request *req) edesc->src_nents = src_nents; ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, - edesc->sec4_sg, DMA_TO_DEVICE); + edesc->sec4_sg, DMA_BIDIRECTIONAL); if (ret) goto unmap_ctx; @@ -1004,13 +976,7 @@ static int ahash_finup_ctx(struct ahash_request *req) if (ret) goto unmap_ctx; - edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, - digestsize); - if (dma_mapping_error(jrdev, edesc->dst_dma)) { - dev_err(jrdev, "unable to map dst\n"); - ret = -ENOMEM; - goto unmap_ctx; - } + append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); #ifdef DEBUG print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", @@ -1023,7 +989,7 @@ static int ahash_finup_ctx(struct ahash_request *req) return -EINPROGRESS; unmap_ctx: - ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); kfree(edesc); return ret; } @@ -1082,10 +1048,8 @@ static int ahash_digest(struct ahash_request *req) desc = edesc->hw_desc; - edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, - digestsize); - if (dma_mapping_error(jrdev, edesc->dst_dma)) { - dev_err(jrdev, "unable to map dst\n"); + ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize); + if (ret) { ahash_unmap(jrdev, edesc, req, digestsize); kfree(edesc); return -ENOMEM; @@ -1100,7 +1064,7 @@ static int ahash_digest(struct ahash_request *req) if (!ret) { ret = -EINPROGRESS; } else { - ahash_unmap(jrdev, edesc, req, digestsize); + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); kfree(edesc); } @@ -1131,20 +1095,20 @@ static int ahash_final_no_ctx(struct ahash_request *req) desc = edesc->hw_desc; - state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, state->buf_dma)) { - dev_err(jrdev, "unable to map src\n"); - goto unmap; - } + if (buflen) { + state->buf_dma = dma_map_single(jrdev, buf, buflen, + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, state->buf_dma)) { + dev_err(jrdev, "unable to map src\n"); + goto unmap; + } - append_seq_in_ptr(desc, state->buf_dma, buflen, 0); + append_seq_in_ptr(desc, state->buf_dma, buflen, 0); + } - edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, - digestsize); - if (dma_mapping_error(jrdev, edesc->dst_dma)) { - dev_err(jrdev, "unable to map dst\n"); + ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize); + if (ret) goto unmap; - } #ifdef DEBUG print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", @@ -1155,7 +1119,7 @@ static int ahash_final_no_ctx(struct ahash_request *req) if (!ret) { ret = -EINPROGRESS; } else { - ahash_unmap(jrdev, edesc, req, digestsize); + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); kfree(edesc); } @@ -1354,12 +1318,9 @@ static int ahash_finup_no_ctx(struct ahash_request *req) goto unmap; } - edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, - digestsize); - if (dma_mapping_error(jrdev, edesc->dst_dma)) { - dev_err(jrdev, "unable to map dst\n"); + ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize); + if (ret) goto unmap; - } #ifdef DEBUG print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", @@ -1370,7 +1331,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req) if (!ret) { ret = -EINPROGRESS; } else { - ahash_unmap(jrdev, edesc, req, digestsize); + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); kfree(edesc); } @@ -1502,6 +1463,7 @@ static int ahash_init(struct ahash_request *req) state->final = ahash_final_no_ctx; state->ctx_dma = 0; + state->ctx_dma_len = 0; state->current_buf = 0; state->buf_dma = 0; state->buflen_0 = 0; diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index 4fb91ba39c36b32a95ec3190d1296c9ec8a2eefa..ce3f9ad7120f0f882c8c8a008ac8f1f10ddb19c3 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -70,22 +70,22 @@ extern bool caam_little_end; extern bool caam_imx; -#define caam_to_cpu(len) \ -static inline u##len caam##len ## _to_cpu(u##len val) \ -{ \ - if (caam_little_end) \ - return le##len ## _to_cpu(val); \ - else \ - return be##len ## _to_cpu(val); \ +#define caam_to_cpu(len) \ +static inline u##len caam##len ## _to_cpu(u##len val) \ +{ \ + if (caam_little_end) \ + return le##len ## _to_cpu((__force __le##len)val); \ + else \ + return be##len ## _to_cpu((__force __be##len)val); \ } -#define cpu_to_caam(len) \ -static inline u##len cpu_to_caam##len(u##len val) \ -{ \ - if (caam_little_end) \ - return cpu_to_le##len(val); \ - else \ - return cpu_to_be##len(val); \ +#define cpu_to_caam(len) \ +static inline u##len cpu_to_caam##len(u##len val) \ +{ \ + if (caam_little_end) \ + return (__force u##len)cpu_to_le##len(val); \ + else \ + return (__force u##len)cpu_to_be##len(val); \ } caam_to_cpu(16) diff --git a/drivers/crypto/cavium/nitrox/nitrox_algs.c b/drivers/crypto/cavium/nitrox/nitrox_algs.c index 2ae6124e5da673e1d73fe0141beed7adaf5e5360..5d54ebc20cb30b2a30102ac7f1a1b1419d481471 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_algs.c +++ b/drivers/crypto/cavium/nitrox/nitrox_algs.c @@ -73,7 +73,7 @@ static int flexi_aes_keylen(int keylen) static int nitrox_skcipher_init(struct crypto_skcipher *tfm) { struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm); - void *fctx; + struct crypto_ctx_hdr *chdr; /* get the first device */ nctx->ndev = nitrox_get_first_device(); @@ -81,12 +81,14 @@ static int nitrox_skcipher_init(struct crypto_skcipher *tfm) return -ENODEV; /* allocate nitrox crypto context */ - fctx = crypto_alloc_context(nctx->ndev); - if (!fctx) { + chdr = crypto_alloc_context(nctx->ndev); + if (!chdr) { nitrox_put_device(nctx->ndev); return -ENOMEM; } - nctx->u.ctx_handle = (uintptr_t)fctx; + nctx->chdr = chdr; + nctx->u.ctx_handle = (uintptr_t)((u8 *)chdr->vaddr + + sizeof(struct ctx_hdr)); crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(tfm) + sizeof(struct nitrox_kcrypt_request)); return 0; @@ -102,7 +104,7 @@ static void nitrox_skcipher_exit(struct crypto_skcipher *tfm) memset(&fctx->crypto, 0, sizeof(struct crypto_keys)); memset(&fctx->auth, 0, sizeof(struct auth_keys)); - crypto_free_context((void *)fctx); + crypto_free_context((void *)nctx->chdr); } nitrox_put_device(nctx->ndev); diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c index 4d31df07777f63129715175f7aa4bc84b50fe49b..28baf1a19d0a3ce26def18591e2243b308e931c9 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_lib.c +++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c @@ -146,12 +146,19 @@ static void destroy_crypto_dma_pool(struct nitrox_device *ndev) void *crypto_alloc_context(struct nitrox_device *ndev) { struct ctx_hdr *ctx; + struct crypto_ctx_hdr *chdr; void *vaddr; dma_addr_t dma; + chdr = kmalloc(sizeof(*chdr), GFP_KERNEL); + if (!chdr) + return NULL; + vaddr = dma_pool_alloc(ndev->ctx_pool, (GFP_KERNEL | __GFP_ZERO), &dma); - if (!vaddr) + if (!vaddr) { + kfree(chdr); return NULL; + } /* fill meta data */ ctx = vaddr; @@ -159,7 +166,11 @@ void *crypto_alloc_context(struct nitrox_device *ndev) ctx->dma = dma; ctx->ctx_dma = dma + sizeof(struct ctx_hdr); - return ((u8 *)vaddr + sizeof(struct ctx_hdr)); + chdr->pool = ndev->ctx_pool; + chdr->dma = dma; + chdr->vaddr = vaddr; + + return chdr; } /** @@ -168,13 +179,14 @@ void *crypto_alloc_context(struct nitrox_device *ndev) */ void crypto_free_context(void *ctx) { - struct ctx_hdr *ctxp; + struct crypto_ctx_hdr *ctxp; if (!ctx) return; - ctxp = (struct ctx_hdr *)((u8 *)ctx - sizeof(struct ctx_hdr)); - dma_pool_free(ctxp->pool, ctxp, ctxp->dma); + ctxp = ctx; + dma_pool_free(ctxp->pool, ctxp->vaddr, ctxp->dma); + kfree(ctxp); } /** diff --git a/drivers/crypto/cavium/nitrox/nitrox_req.h b/drivers/crypto/cavium/nitrox/nitrox_req.h index d091b6f5f5dd697d56ec01e1afd2449d5d03127e..19f0a20e3bb3b125fdccf7e65e627bbea0935ca0 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_req.h +++ b/drivers/crypto/cavium/nitrox/nitrox_req.h @@ -181,12 +181,19 @@ struct flexi_crypto_context { struct auth_keys auth; }; +struct crypto_ctx_hdr { + struct dma_pool *pool; + dma_addr_t dma; + void *vaddr; +}; + struct nitrox_crypto_ctx { struct nitrox_device *ndev; union { u64 ctx_handle; struct flexi_crypto_context *fctx; } u; + struct crypto_ctx_hdr *chdr; }; struct nitrox_kcrypt_request { diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c index be055b9547f6252cb6187f360e88f6d8960efec0..ea901bc5733ccd479966d3ca0bd24f646abc1c22 100644 --- a/drivers/crypto/cavium/zip/zip_main.c +++ b/drivers/crypto/cavium/zip/zip_main.c @@ -351,6 +351,7 @@ static struct pci_driver zip_driver = { static struct crypto_alg zip_comp_deflate = { .cra_name = "deflate", + .cra_driver_name = "deflate-cavium", .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, .cra_ctxsize = sizeof(struct zip_kernel_ctx), .cra_priority = 300, @@ -365,6 +366,7 @@ static struct crypto_alg zip_comp_deflate = { static struct crypto_alg zip_comp_lzs = { .cra_name = "lzs", + .cra_driver_name = "lzs-cavium", .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, .cra_ctxsize = sizeof(struct zip_kernel_ctx), .cra_priority = 300, @@ -384,7 +386,7 @@ static struct scomp_alg zip_scomp_deflate = { .decompress = zip_scomp_decompress, .base = { .cra_name = "deflate", - .cra_driver_name = "deflate-scomp", + .cra_driver_name = "deflate-scomp-cavium", .cra_module = THIS_MODULE, .cra_priority = 300, } @@ -397,7 +399,7 @@ static struct scomp_alg zip_scomp_lzs = { .decompress = zip_scomp_decompress, .base = { .cra_name = "lzs", - .cra_driver_name = "lzs-scomp", + .cra_driver_name = "lzs-scomp-cavium", .cra_module = THIS_MODULE, .cra_priority = 300, } @@ -591,6 +593,7 @@ static const struct file_operations zip_stats_fops = { .owner = THIS_MODULE, .open = zip_stats_open, .read = seq_read, + .release = single_release, }; static int zip_clear_open(struct inode *inode, struct file *file) @@ -602,6 +605,7 @@ static const struct file_operations zip_clear_fops = { .owner = THIS_MODULE, .open = zip_clear_open, .read = seq_read, + .release = single_release, }; static int zip_regs_open(struct inode *inode, struct file *file) @@ -613,6 +617,7 @@ static const struct file_operations zip_regs_fops = { .owner = THIS_MODULE, .open = zip_regs_open, .read = seq_read, + .release = single_release, }; /* Root directory for thunderx_zip debugfs entry */ diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c index ca1f0d780b61cee959cb326d60bbde7c43782a78..e5dcb29b687f6324621405218ed20ab2d781894e 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c @@ -61,6 +61,19 @@ static int ccp_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key, static int ccp_aes_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { + switch (authsize) { + case 16: + case 15: + case 14: + case 13: + case 12: + case 8: + case 4: + break; + default: + return -EINVAL; + } + return 0; } @@ -107,6 +120,7 @@ static int ccp_aes_gcm_crypt(struct aead_request *req, bool encrypt) memset(&rctx->cmd, 0, sizeof(rctx->cmd)); INIT_LIST_HEAD(&rctx->cmd.entry); rctx->cmd.engine = CCP_ENGINE_AES; + rctx->cmd.u.aes.authsize = crypto_aead_authsize(tfm); rctx->cmd.u.aes.type = ctx->u.aes.type; rctx->cmd.u.aes.mode = ctx->u.aes.mode; rctx->cmd.u.aes.action = encrypt; diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index 1b5035d562880a6d0b66458fae9fa15adda33c1c..b8c94a01cfc941c0808b24d88c639216086cdd8c 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c @@ -35,56 +35,62 @@ struct ccp_tasklet_data { }; /* Human-readable error strings */ +#define CCP_MAX_ERROR_CODE 64 static char *ccp_error_codes[] = { "", - "ERR 01: ILLEGAL_ENGINE", - "ERR 02: ILLEGAL_KEY_ID", - "ERR 03: ILLEGAL_FUNCTION_TYPE", - "ERR 04: ILLEGAL_FUNCTION_MODE", - "ERR 05: ILLEGAL_FUNCTION_ENCRYPT", - "ERR 06: ILLEGAL_FUNCTION_SIZE", - "ERR 07: Zlib_MISSING_INIT_EOM", - "ERR 08: ILLEGAL_FUNCTION_RSVD", - "ERR 09: ILLEGAL_BUFFER_LENGTH", - "ERR 10: VLSB_FAULT", - "ERR 11: ILLEGAL_MEM_ADDR", - "ERR 12: ILLEGAL_MEM_SEL", - "ERR 13: ILLEGAL_CONTEXT_ID", - "ERR 14: ILLEGAL_KEY_ADDR", - "ERR 15: 0xF Reserved", - "ERR 16: Zlib_ILLEGAL_MULTI_QUEUE", - "ERR 17: Zlib_ILLEGAL_JOBID_CHANGE", - "ERR 18: CMD_TIMEOUT", - "ERR 19: IDMA0_AXI_SLVERR", - "ERR 20: IDMA0_AXI_DECERR", - "ERR 21: 0x15 Reserved", - "ERR 22: IDMA1_AXI_SLAVE_FAULT", - "ERR 23: IDMA1_AIXI_DECERR", - "ERR 24: 0x18 Reserved", - "ERR 25: ZLIBVHB_AXI_SLVERR", - "ERR 26: ZLIBVHB_AXI_DECERR", - "ERR 27: 0x1B Reserved", - "ERR 27: ZLIB_UNEXPECTED_EOM", - "ERR 27: ZLIB_EXTRA_DATA", - "ERR 30: ZLIB_BTYPE", - "ERR 31: ZLIB_UNDEFINED_SYMBOL", - "ERR 32: ZLIB_UNDEFINED_DISTANCE_S", - "ERR 33: ZLIB_CODE_LENGTH_SYMBOL", - "ERR 34: ZLIB _VHB_ILLEGAL_FETCH", - "ERR 35: ZLIB_UNCOMPRESSED_LEN", - "ERR 36: ZLIB_LIMIT_REACHED", - "ERR 37: ZLIB_CHECKSUM_MISMATCH0", - "ERR 38: ODMA0_AXI_SLVERR", - "ERR 39: ODMA0_AXI_DECERR", - "ERR 40: 0x28 Reserved", - "ERR 41: ODMA1_AXI_SLVERR", - "ERR 42: ODMA1_AXI_DECERR", - "ERR 43: LSB_PARITY_ERR", + "ILLEGAL_ENGINE", + "ILLEGAL_KEY_ID", + "ILLEGAL_FUNCTION_TYPE", + "ILLEGAL_FUNCTION_MODE", + "ILLEGAL_FUNCTION_ENCRYPT", + "ILLEGAL_FUNCTION_SIZE", + "Zlib_MISSING_INIT_EOM", + "ILLEGAL_FUNCTION_RSVD", + "ILLEGAL_BUFFER_LENGTH", + "VLSB_FAULT", + "ILLEGAL_MEM_ADDR", + "ILLEGAL_MEM_SEL", + "ILLEGAL_CONTEXT_ID", + "ILLEGAL_KEY_ADDR", + "0xF Reserved", + "Zlib_ILLEGAL_MULTI_QUEUE", + "Zlib_ILLEGAL_JOBID_CHANGE", + "CMD_TIMEOUT", + "IDMA0_AXI_SLVERR", + "IDMA0_AXI_DECERR", + "0x15 Reserved", + "IDMA1_AXI_SLAVE_FAULT", + "IDMA1_AIXI_DECERR", + "0x18 Reserved", + "ZLIBVHB_AXI_SLVERR", + "ZLIBVHB_AXI_DECERR", + "0x1B Reserved", + "ZLIB_UNEXPECTED_EOM", + "ZLIB_EXTRA_DATA", + "ZLIB_BTYPE", + "ZLIB_UNDEFINED_SYMBOL", + "ZLIB_UNDEFINED_DISTANCE_S", + "ZLIB_CODE_LENGTH_SYMBOL", + "ZLIB _VHB_ILLEGAL_FETCH", + "ZLIB_UNCOMPRESSED_LEN", + "ZLIB_LIMIT_REACHED", + "ZLIB_CHECKSUM_MISMATCH0", + "ODMA0_AXI_SLVERR", + "ODMA0_AXI_DECERR", + "0x28 Reserved", + "ODMA1_AXI_SLVERR", + "ODMA1_AXI_DECERR", }; -void ccp_log_error(struct ccp_device *d, int e) +void ccp_log_error(struct ccp_device *d, unsigned int e) { - dev_err(d->dev, "CCP error: %s (0x%x)\n", ccp_error_codes[e], e); + if (WARN_ON(e >= CCP_MAX_ERROR_CODE)) + return; + + if (e < ARRAY_SIZE(ccp_error_codes)) + dev_err(d->dev, "CCP error %d: %s\n", e, ccp_error_codes[e]); + else + dev_err(d->dev, "CCP error %d: Unknown Error\n", e); } /* List of CCPs, CCP count, read-write access lock, and access functions @@ -537,6 +543,10 @@ int ccp_dev_suspend(struct sp_device *sp, pm_message_t state) unsigned long flags; unsigned int i; + /* If there's no device there's nothing to do */ + if (!ccp) + return 0; + spin_lock_irqsave(&ccp->cmd_lock, flags); ccp->suspending = 1; @@ -561,6 +571,10 @@ int ccp_dev_resume(struct sp_device *sp) unsigned long flags; unsigned int i; + /* If there's no device there's nothing to do */ + if (!ccp) + return 0; + spin_lock_irqsave(&ccp->cmd_lock, flags); ccp->suspending = 0; diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 6810b65c1939c88abee1448f06e9d9c2b71460df..7442b0422f8ac032f3c0c7ee87f352b522671927 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -632,7 +632,7 @@ struct ccp5_desc { void ccp_add_device(struct ccp_device *ccp); void ccp_del_device(struct ccp_device *ccp); -extern void ccp_log_error(struct ccp_device *, int); +extern void ccp_log_error(struct ccp_device *, unsigned int); struct ccp_device *ccp_alloc_struct(struct sp_device *sp); bool ccp_queues_suspended(struct ccp_device *ccp); diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c index 67155cb21636917456941c346457b300116d61f1..a83588d6ba72c9ee2628fb1eb9c9372f95ad143d 100644 --- a/drivers/crypto/ccp/ccp-dmaengine.c +++ b/drivers/crypto/ccp/ccp-dmaengine.c @@ -340,6 +340,7 @@ static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan, desc->tx_desc.flags = flags; desc->tx_desc.tx_submit = ccp_tx_submit; desc->ccp = chan->ccp; + INIT_LIST_HEAD(&desc->entry); INIT_LIST_HEAD(&desc->pending); INIT_LIST_HEAD(&desc->active); desc->status = DMA_IN_PROGRESS; diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 0ea43cdeb05f0f4c5c38a332b30faf1cc67f4c6e..cdffff7c8abb9e8bf4f4b812b03e45907747432c 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -625,6 +625,8 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, unsigned long long *final; unsigned int dm_offset; + unsigned int authsize; + unsigned int jobid; unsigned int ilen; bool in_place = true; /* Default value */ int ret; @@ -645,6 +647,21 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, if (!aes->key) /* Gotta have a key SGL */ return -EINVAL; + /* Zero defaults to 16 bytes, the maximum size */ + authsize = aes->authsize ? aes->authsize : AES_BLOCK_SIZE; + switch (authsize) { + case 16: + case 15: + case 14: + case 13: + case 12: + case 8: + case 4: + break; + default: + return -EINVAL; + } + /* First, decompose the source buffer into AAD & PT, * and the destination buffer into AAD, CT & tag, or * the input into CT & tag. @@ -659,13 +676,15 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen); } else { /* Input length for decryption includes tag */ - ilen = aes->src_len - AES_BLOCK_SIZE; + ilen = aes->src_len - authsize; p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen); } + jobid = CCP_NEW_JOBID(cmd_q->ccp); + memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; - op.jobid = CCP_NEW_JOBID(cmd_q->ccp); + op.jobid = jobid; op.sb_key = cmd_q->sb_key; /* Pre-allocated */ op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ op.init = 1; @@ -749,7 +768,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); if (ret) - goto e_ctx; + goto e_aad; if (in_place) { dst = src; @@ -766,8 +785,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, while (src.sg_wa.bytes_left) { ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true); if (!src.sg_wa.bytes_left) { - unsigned int nbytes = aes->src_len - % AES_BLOCK_SIZE; + unsigned int nbytes = ilen % AES_BLOCK_SIZE; if (nbytes) { op.eom = 1; @@ -816,6 +834,13 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, final[0] = cpu_to_be64(aes->aad_len * 8); final[1] = cpu_to_be64(ilen * 8); + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = jobid; + op.sb_key = cmd_q->sb_key; /* Pre-allocated */ + op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ + op.init = 1; + op.u.aes.type = aes->type; op.u.aes.mode = CCP_AES_MODE_GHASH; op.u.aes.action = CCP_AES_GHASHFINAL; op.src.type = CCP_MEMTYPE_SYSTEM; @@ -828,34 +853,37 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, op.u.aes.size = 0; ret = cmd_q->ccp->vdata->perform->aes(&op); if (ret) - goto e_dst; + goto e_final_wa; if (aes->action == CCP_AES_ACTION_ENCRYPT) { /* Put the ciphered tag after the ciphertext. */ - ccp_get_dm_area(&final_wa, 0, p_tag, 0, AES_BLOCK_SIZE); + ccp_get_dm_area(&final_wa, 0, p_tag, 0, authsize); } else { /* Does this ciphered tag match the input? */ - ret = ccp_init_dm_workarea(&tag, cmd_q, AES_BLOCK_SIZE, + ret = ccp_init_dm_workarea(&tag, cmd_q, authsize, DMA_BIDIRECTIONAL); if (ret) - goto e_tag; - ret = ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE); - if (ret) - goto e_tag; + goto e_final_wa; + ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize); + if (ret) { + ccp_dm_free(&tag); + goto e_final_wa; + } - ret = memcmp(tag.address, final_wa.address, AES_BLOCK_SIZE); + ret = crypto_memneq(tag.address, final_wa.address, + authsize) ? -EBADMSG : 0; ccp_dm_free(&tag); } -e_tag: +e_final_wa: ccp_dm_free(&final_wa); e_dst: - if (aes->src_len && !in_place) + if (ilen > 0 && !in_place) ccp_free_data(&dst, cmd_q); e_src: - if (aes->src_len) + if (ilen > 0) ccp_free_data(&src, cmd_q); e_aad: @@ -1721,7 +1749,7 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) break; default: ret = -EINVAL; - goto e_ctx; + goto e_data; } } else { /* Stash the context */ @@ -1767,8 +1795,9 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) LSB_ITEM_SIZE); break; default: + kfree(hmac_buf); ret = -EINVAL; - goto e_ctx; + goto e_data; } memset(&hmac_cmd, 0, sizeof(hmac_cmd)); diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index 72790d88236d4d73b56c5f1d26efcbfadc033c6b..5874c80683fcb0d67ecc26d90d62005c678b2870 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -363,7 +363,7 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp) goto cmd; /* allocate a physically contiguous buffer to store the CSR blob */ - if (!access_ok(VERIFY_WRITE, input.address, input.length) || + if (!access_ok(input.address, input.length) || input.length > SEV_FW_BLOB_MAX_SIZE) { ret = -EFAULT; goto e_free; @@ -607,14 +607,14 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp) /* Allocate a physically contiguous buffer to store the PDH blob. */ if ((input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) || - !access_ok(VERIFY_WRITE, input.pdh_cert_address, input.pdh_cert_len)) { + !access_ok(input.pdh_cert_address, input.pdh_cert_len)) { ret = -EFAULT; goto e_free; } /* Allocate a physically contiguous buffer to store the cert chain blob. */ if ((input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) || - !access_ok(VERIFY_WRITE, input.cert_chain_address, input.cert_chain_len)) { + !access_ok(input.cert_chain_address, input.cert_chain_len)) { ret = -EFAULT; goto e_free; } @@ -935,7 +935,7 @@ void psp_pci_init(void) rc = sev_platform_init(&error); if (rc) { dev_err(sp->dev, "SEV: failed to INIT error %#x\n", error); - goto err; + return; } dev_info(sp->dev, "SEV API:%d.%d build:%d\n", psp_master->api_major, diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c index 01b82b82f8b8761fa775952ec4165b63b9522eec..aa6b45bc13b983ca382df769d4e0df9b2935d603 100644 --- a/drivers/crypto/ccree/cc_aead.c +++ b/drivers/crypto/ccree/cc_aead.c @@ -227,7 +227,7 @@ static void cc_aead_complete(struct device *dev, void *cc_req, int err) /* In case of payload authentication failure, MUST NOT * revealed the decrypted message --> zero its memory. */ - cc_zero_sgl(areq->dst, areq_ctx->cryptlen); + cc_zero_sgl(areq->dst, areq->cryptlen); err = -EBADMSG; } } else { /*ENCRYPT*/ @@ -415,7 +415,7 @@ static int validate_keys_sizes(struct cc_aead_ctx *ctx) /* This function prepers the user key so it can pass to the hmac processing * (copy to intenral buffer or hash in case of key longer than block */ -static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, +static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey, unsigned int keylen) { dma_addr_t key_dma_addr = 0; @@ -428,6 +428,7 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, unsigned int hashmode; unsigned int idx = 0; int rc = 0; + u8 *key = NULL; struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ]; dma_addr_t padded_authkey_dma_addr = ctx->auth_state.hmac.padded_authkey_dma_addr; @@ -446,11 +447,17 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, } if (keylen != 0) { + + key = kmemdup(authkey, keylen, GFP_KERNEL); + if (!key) + return -ENOMEM; + key_dma_addr = dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE); if (dma_mapping_error(dev, key_dma_addr)) { dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n", key, keylen); + kzfree(key); return -ENOMEM; } if (keylen > blocksize) { @@ -533,6 +540,8 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, if (key_dma_addr) dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE); + kzfree(key); + return rc; } @@ -540,13 +549,12 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); - struct rtattr *rta = (struct rtattr *)key; struct cc_crypto_req cc_req = {}; - struct crypto_authenc_key_param *param; struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ]; - int rc = -EINVAL; unsigned int seq_len = 0; struct device *dev = drvdata_to_dev(ctx->drvdata); + const u8 *enckey, *authkey; + int rc; dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n", ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen); @@ -554,35 +562,33 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, /* STAT_PHASE_0: Init and sanity checks */ if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */ - if (!RTA_OK(rta, keylen)) - goto badkey; - if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) - goto badkey; - if (RTA_PAYLOAD(rta) < sizeof(*param)) - goto badkey; - param = RTA_DATA(rta); - ctx->enc_keylen = be32_to_cpu(param->enckeylen); - key += RTA_ALIGN(rta->rta_len); - keylen -= RTA_ALIGN(rta->rta_len); - if (keylen < ctx->enc_keylen) + struct crypto_authenc_keys keys; + + rc = crypto_authenc_extractkeys(&keys, key, keylen); + if (rc) goto badkey; - ctx->auth_keylen = keylen - ctx->enc_keylen; + enckey = keys.enckey; + authkey = keys.authkey; + ctx->enc_keylen = keys.enckeylen; + ctx->auth_keylen = keys.authkeylen; if (ctx->cipher_mode == DRV_CIPHER_CTR) { /* the nonce is stored in bytes at end of key */ + rc = -EINVAL; if (ctx->enc_keylen < (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE)) goto badkey; /* Copy nonce from last 4 bytes in CTR key to * first 4 bytes in CTR IV */ - memcpy(ctx->ctr_nonce, key + ctx->auth_keylen + - ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE, - CTR_RFC3686_NONCE_SIZE); + memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen - + CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE); /* Set CTR key size */ ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE; } } else { /* non-authenc - has just one key */ + enckey = key; + authkey = NULL; ctx->enc_keylen = keylen; ctx->auth_keylen = 0; } @@ -594,13 +600,14 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, /* STAT_PHASE_1: Copy key to ctx */ /* Get key material */ - memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen); + memcpy(ctx->enckey, enckey, ctx->enc_keylen); if (ctx->enc_keylen == 24) memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24); if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { - memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen); + memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey, + ctx->auth_keylen); } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */ - rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen); + rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen); if (rc) goto badkey; } diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c index dd948e1df9e5c5a8f2aac0dba9aacfa87a0f5aa6..90b4870078fb724e63de45518c47c6b40d56fd08 100644 --- a/drivers/crypto/ccree/cc_buffer_mgr.c +++ b/drivers/crypto/ccree/cc_buffer_mgr.c @@ -83,24 +83,17 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req, */ static unsigned int cc_get_sgl_nents(struct device *dev, struct scatterlist *sg_list, - unsigned int nbytes, u32 *lbytes, - bool *is_chained) + unsigned int nbytes, u32 *lbytes) { unsigned int nents = 0; while (nbytes && sg_list) { - if (sg_list->length) { - nents++; - /* get the number of bytes in the last entry */ - *lbytes = nbytes; - nbytes -= (sg_list->length > nbytes) ? - nbytes : sg_list->length; - sg_list = sg_next(sg_list); - } else { - sg_list = (struct scatterlist *)sg_page(sg_list); - if (is_chained) - *is_chained = true; - } + nents++; + /* get the number of bytes in the last entry */ + *lbytes = nbytes; + nbytes -= (sg_list->length > nbytes) ? + nbytes : sg_list->length; + sg_list = sg_next(sg_list); } dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes); return nents; @@ -142,7 +135,7 @@ void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg, { u32 nents, lbytes; - nents = cc_get_sgl_nents(dev, sg, end, &lbytes, NULL); + nents = cc_get_sgl_nents(dev, sg, end, &lbytes); sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip, (direct == CC_SG_TO_BUF)); } @@ -311,40 +304,10 @@ static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data, sgl_data->num_of_buffers++; } -static int cc_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents, - enum dma_data_direction direction) -{ - u32 i, j; - struct scatterlist *l_sg = sg; - - for (i = 0; i < nents; i++) { - if (!l_sg) - break; - if (dma_map_sg(dev, l_sg, 1, direction) != 1) { - dev_err(dev, "dma_map_page() sg buffer failed\n"); - goto err; - } - l_sg = sg_next(l_sg); - } - return nents; - -err: - /* Restore mapped parts */ - for (j = 0; j < i; j++) { - if (!sg) - break; - dma_unmap_sg(dev, sg, 1, direction); - sg = sg_next(sg); - } - return 0; -} - static int cc_map_sg(struct device *dev, struct scatterlist *sg, unsigned int nbytes, int direction, u32 *nents, u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents) { - bool is_chained = false; - if (sg_is_last(sg)) { /* One entry only case -set to DLLI */ if (dma_map_sg(dev, sg, 1, direction) != 1) { @@ -358,35 +321,21 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg, *nents = 1; *mapped_nents = 1; } else { /*sg_is_last*/ - *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes, - &is_chained); + *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes); if (*nents > max_sg_nents) { *nents = 0; dev_err(dev, "Too many fragments. current %d max %d\n", *nents, max_sg_nents); return -ENOMEM; } - if (!is_chained) { - /* In case of mmu the number of mapped nents might - * be changed from the original sgl nents - */ - *mapped_nents = dma_map_sg(dev, sg, *nents, direction); - if (*mapped_nents == 0) { - *nents = 0; - dev_err(dev, "dma_map_sg() sg buffer failed\n"); - return -ENOMEM; - } - } else { - /*In this case the driver maps entry by entry so it - * must have the same nents before and after map - */ - *mapped_nents = cc_dma_map_sg(dev, sg, *nents, - direction); - if (*mapped_nents != *nents) { - *nents = *mapped_nents; - dev_err(dev, "dma_map_sg() sg buffer failed\n"); - return -ENOMEM; - } + /* In case of mmu the number of mapped nents might + * be changed from the original sgl nents + */ + *mapped_nents = dma_map_sg(dev, sg, *nents, direction); + if (*mapped_nents == 0) { + *nents = 0; + dev_err(dev, "dma_map_sg() sg buffer failed\n"); + return -ENOMEM; } } @@ -571,7 +520,6 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req) struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cc_drvdata *drvdata = dev_get_drvdata(dev); u32 dummy; - bool chained; u32 size_to_unmap = 0; if (areq_ctx->mac_buf_dma_addr) { @@ -612,12 +560,13 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req) if (areq_ctx->gen_ctx.iv_dma_addr) { dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr, hw_iv_size, DMA_BIDIRECTIONAL); + kzfree(areq_ctx->gen_ctx.iv); } - /*In case a pool was set, a table was - *allocated and should be released - */ - if (areq_ctx->mlli_params.curr_pool) { + /* Release pool */ + if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI || + areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) && + (areq_ctx->mlli_params.mlli_virt_addr)) { dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n", &areq_ctx->mlli_params.mlli_dma_addr, areq_ctx->mlli_params.mlli_virt_addr); @@ -636,15 +585,14 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req) size_to_unmap += crypto_aead_ivsize(tfm); dma_unmap_sg(dev, req->src, - cc_get_sgl_nents(dev, req->src, size_to_unmap, - &dummy, &chained), + cc_get_sgl_nents(dev, req->src, size_to_unmap, &dummy), DMA_BIDIRECTIONAL); if (req->src != req->dst) { dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n", sg_virt(req->dst)); dma_unmap_sg(dev, req->dst, cc_get_sgl_nents(dev, req->dst, size_to_unmap, - &dummy, &chained), + &dummy), DMA_BIDIRECTIONAL); } if (drvdata->coherent && @@ -717,19 +665,27 @@ static int cc_aead_chain_iv(struct cc_drvdata *drvdata, struct aead_req_ctx *areq_ctx = aead_request_ctx(req); unsigned int hw_iv_size = areq_ctx->hw_iv_size; struct device *dev = drvdata_to_dev(drvdata); + gfp_t flags = cc_gfp_flags(&req->base); int rc = 0; if (!req->iv) { areq_ctx->gen_ctx.iv_dma_addr = 0; + areq_ctx->gen_ctx.iv = NULL; goto chain_iv_exit; } - areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv, - hw_iv_size, - DMA_BIDIRECTIONAL); + areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags); + if (!areq_ctx->gen_ctx.iv) + return -ENOMEM; + + areq_ctx->gen_ctx.iv_dma_addr = + dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size, + DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) { dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n", hw_iv_size, req->iv); + kzfree(areq_ctx->gen_ctx.iv); + areq_ctx->gen_ctx.iv = NULL; rc = -ENOMEM; goto chain_iv_exit; } @@ -1022,7 +978,6 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata, unsigned int size_for_map = req->assoclen + req->cryptlen; struct crypto_aead *tfm = crypto_aead_reqtfm(req); u32 sg_index = 0; - bool chained = false; bool is_gcm4543 = areq_ctx->is_gcm4543; u32 size_to_skip = req->assoclen; @@ -1043,7 +998,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata, size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0; src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map, - &src_last_bytes, &chained); + &src_last_bytes); sg_index = areq_ctx->src_sgl->length; //check where the data starts while (sg_index <= size_to_skip) { @@ -1085,7 +1040,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata, } dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map, - &dst_last_bytes, &chained); + &dst_last_bytes); sg_index = areq_ctx->dst_sgl->length; offset = size_to_skip; @@ -1486,7 +1441,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx, dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n", curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]); areq_ctx->in_nents = - cc_get_sgl_nents(dev, src, nbytes, &dummy, NULL); + cc_get_sgl_nents(dev, src, nbytes, &dummy); sg_copy_to_buffer(src, areq_ctx->in_nents, &curr_buff[*curr_buff_cnt], nbytes); *curr_buff_cnt += nbytes; diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c index 7623b29911af443ed62491acad2916f078e58a01..54a39164aab8f483d1c15ddbaa973eece6f6e506 100644 --- a/drivers/crypto/ccree/cc_cipher.c +++ b/drivers/crypto/ccree/cc_cipher.c @@ -79,6 +79,7 @@ static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size) default: break; } + break; case S_DIN_to_DES: if (size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE) return 0; @@ -634,6 +635,8 @@ static void cc_cipher_complete(struct device *dev, void *cc_req, int err) unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm); unsigned int len; + cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst); + switch (ctx_p->cipher_mode) { case DRV_CIPHER_CBC: /* @@ -663,7 +666,6 @@ static void cc_cipher_complete(struct device *dev, void *cc_req, int err) break; } - cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst); kzfree(req_ctx->iv); skcipher_request_complete(req, err); @@ -781,7 +783,8 @@ static int cc_cipher_decrypt(struct skcipher_request *req) memset(req_ctx, 0, sizeof(*req_ctx)); - if (ctx_p->cipher_mode == DRV_CIPHER_CBC) { + if ((ctx_p->cipher_mode == DRV_CIPHER_CBC) && + (req->cryptlen >= ivsize)) { /* Allocate and save the last IV sized bytes of the source, * which will be lost in case of in-place decryption. diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c index 1ff229c2aeab13a464ad573af163fb61783959cf..186a2536fb8b9c8a3cbb44e5f33a329175ea4490 100644 --- a/drivers/crypto/ccree/cc_driver.c +++ b/drivers/crypto/ccree/cc_driver.c @@ -364,7 +364,7 @@ static int init_cc_resources(struct platform_device *plat_dev) rc = cc_ivgen_init(new_drvdata); if (rc) { dev_err(dev, "cc_ivgen_init failed\n"); - goto post_power_mgr_err; + goto post_buf_mgr_err; } /* Allocate crypto algs */ @@ -387,6 +387,9 @@ static int init_cc_resources(struct platform_device *plat_dev) goto post_hash_err; } + /* All set, we can allow autosuspend */ + cc_pm_go(new_drvdata); + /* If we got here and FIPS mode is enabled * it means all FIPS test passed, so let TEE * know we're good. @@ -401,8 +404,6 @@ static int init_cc_resources(struct platform_device *plat_dev) cc_cipher_free(new_drvdata); post_ivgen_err: cc_ivgen_fini(new_drvdata); -post_power_mgr_err: - cc_pm_fini(new_drvdata); post_buf_mgr_err: cc_buffer_mgr_fini(new_drvdata); post_req_mgr_err: diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h index d608a4faf662255260b23c6a508e11a12c34e3e8..be7f9bd5c55990f6215b23c2612131acd19d002e 100644 --- a/drivers/crypto/ccree/cc_driver.h +++ b/drivers/crypto/ccree/cc_driver.h @@ -162,6 +162,7 @@ struct cc_alg_template { struct async_gen_req_ctx { dma_addr_t iv_dma_addr; + u8 *iv; enum drv_crypto_direction op_type; }; diff --git a/drivers/crypto/ccree/cc_fips.c b/drivers/crypto/ccree/cc_fips.c index b4d0a6d983e0cfe0c35653d3f087c36fc8090cf8..bac278d274b0fde15e711c950f45e02e3c151c85 100644 --- a/drivers/crypto/ccree/cc_fips.c +++ b/drivers/crypto/ccree/cc_fips.c @@ -21,7 +21,13 @@ static bool cc_get_tee_fips_status(struct cc_drvdata *drvdata) u32 reg; reg = cc_ioread(drvdata, CC_REG(GPR_HOST)); - return (reg == (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK)); + /* Did the TEE report status? */ + if (reg & CC_FIPS_SYNC_TEE_STATUS) + /* Yes. Is it OK? */ + return (reg & CC_FIPS_SYNC_MODULE_OK); + + /* No. It's either not in use or will be reported later */ + return true; } /* @@ -72,20 +78,28 @@ static inline void tee_fips_error(struct device *dev) dev_err(dev, "TEE reported error!\n"); } +/* + * This function check if cryptocell tee fips error occurred + * and in such case triggers system error + */ +void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata) +{ + struct device *dev = drvdata_to_dev(p_drvdata); + + if (!cc_get_tee_fips_status(p_drvdata)) + tee_fips_error(dev); +} + /* Deferred service handler, run as interrupt-fired tasklet */ static void fips_dsr(unsigned long devarg) { struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg; - struct device *dev = drvdata_to_dev(drvdata); - u32 irq, state, val; + u32 irq, val; irq = (drvdata->irq & (CC_GPR0_IRQ_MASK)); if (irq) { - state = cc_ioread(drvdata, CC_REG(GPR_HOST)); - - if (state != (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK)) - tee_fips_error(dev); + cc_tee_handle_fips_error(drvdata); } /* after verifing that there is nothing to do, @@ -113,8 +127,7 @@ int cc_fips_init(struct cc_drvdata *p_drvdata) dev_dbg(dev, "Initializing fips tasklet\n"); tasklet_init(&fips_h->tasklet, fips_dsr, (unsigned long)p_drvdata); - if (!cc_get_tee_fips_status(p_drvdata)) - tee_fips_error(dev); + cc_tee_handle_fips_error(p_drvdata); return 0; } diff --git a/drivers/crypto/ccree/cc_fips.h b/drivers/crypto/ccree/cc_fips.h index 645e096a7a823a5e8dbd3ea57b257cd868aef311..67d5fbfa09b56a04b4b12d51c9ef551e033a78ab 100644 --- a/drivers/crypto/ccree/cc_fips.h +++ b/drivers/crypto/ccree/cc_fips.h @@ -18,6 +18,7 @@ int cc_fips_init(struct cc_drvdata *p_drvdata); void cc_fips_fini(struct cc_drvdata *drvdata); void fips_handler(struct cc_drvdata *drvdata); void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool ok); +void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata); #else /* CONFIG_CRYPTO_FIPS */ @@ -30,6 +31,7 @@ static inline void cc_fips_fini(struct cc_drvdata *drvdata) {} static inline void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool ok) {} static inline void fips_handler(struct cc_drvdata *drvdata) {} +static inline void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata) {} #endif /* CONFIG_CRYPTO_FIPS */ diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c index b9313306c36feb729b2aa3f560d452d2bfa6577e..2cadd7a218445f13c4aab9b13d46c59c501349d8 100644 --- a/drivers/crypto/ccree/cc_hash.c +++ b/drivers/crypto/ccree/cc_hash.c @@ -64,6 +64,7 @@ struct cc_hash_alg { struct hash_key_req_ctx { u32 keylen; dma_addr_t key_dma_addr; + u8 *key; }; /* hash per-session context */ @@ -724,13 +725,20 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key, ctx->key_params.keylen = keylen; ctx->key_params.key_dma_addr = 0; ctx->is_hmac = true; + ctx->key_params.key = NULL; if (keylen) { + ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL); + if (!ctx->key_params.key) + return -ENOMEM; + ctx->key_params.key_dma_addr = - dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE); + dma_map_single(dev, (void *)ctx->key_params.key, keylen, + DMA_TO_DEVICE); if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) { dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n", - key, keylen); + ctx->key_params.key, keylen); + kzfree(ctx->key_params.key); return -ENOMEM; } dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n", @@ -881,6 +889,9 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key, dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n", &ctx->key_params.key_dma_addr, ctx->key_params.keylen); } + + kzfree(ctx->key_params.key); + return rc; } @@ -907,11 +918,16 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash, ctx->key_params.keylen = keylen; + ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL); + if (!ctx->key_params.key) + return -ENOMEM; + ctx->key_params.key_dma_addr = - dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE); + dma_map_single(dev, ctx->key_params.key, keylen, DMA_TO_DEVICE); if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) { dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n", key, keylen); + kzfree(ctx->key_params.key); return -ENOMEM; } dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n", @@ -963,6 +979,8 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash, dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n", &ctx->key_params.key_dma_addr, ctx->key_params.keylen); + kzfree(ctx->key_params.key); + return rc; } @@ -1598,7 +1616,7 @@ static struct cc_hash_template driver_hash[] = { .setkey = cc_hash_setkey, .halg = { .digestsize = SHA224_DIGEST_SIZE, - .statesize = CC_STATE_SIZE(SHA224_DIGEST_SIZE), + .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE), }, }, .hash_mode = DRV_HASH_SHA224, @@ -1623,7 +1641,7 @@ static struct cc_hash_template driver_hash[] = { .setkey = cc_hash_setkey, .halg = { .digestsize = SHA384_DIGEST_SIZE, - .statesize = CC_STATE_SIZE(SHA384_DIGEST_SIZE), + .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE), }, }, .hash_mode = DRV_HASH_SHA384, diff --git a/drivers/crypto/ccree/cc_hw_queue_defs.h b/drivers/crypto/ccree/cc_hw_queue_defs.h index a091ae57f9024f723adb1820b71bc80d030bd199..45985b955d2c899c1478ce43d2d279c6590d9e0a 100644 --- a/drivers/crypto/ccree/cc_hw_queue_defs.h +++ b/drivers/crypto/ccree/cc_hw_queue_defs.h @@ -449,8 +449,7 @@ static inline void set_flow_mode(struct cc_hw_desc *pdesc, * @pdesc: pointer HW descriptor struct * @mode: Any one of the modes defined in [CC7x-DESC] */ -static inline void set_cipher_mode(struct cc_hw_desc *pdesc, - enum drv_cipher_mode mode) +static inline void set_cipher_mode(struct cc_hw_desc *pdesc, int mode) { pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_MODE, mode); } @@ -461,8 +460,7 @@ static inline void set_cipher_mode(struct cc_hw_desc *pdesc, * @pdesc: pointer HW descriptor struct * @mode: Any one of the modes defined in [CC7x-DESC] */ -static inline void set_cipher_config0(struct cc_hw_desc *pdesc, - enum drv_crypto_direction mode) +static inline void set_cipher_config0(struct cc_hw_desc *pdesc, int mode) { pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_CONF0, mode); } diff --git a/drivers/crypto/ccree/cc_ivgen.c b/drivers/crypto/ccree/cc_ivgen.c index 7694583233944df97713dab29ce081dd6d51176e..1abec3896a7800260e0d435d5f9b17ba2c1c0498 100644 --- a/drivers/crypto/ccree/cc_ivgen.c +++ b/drivers/crypto/ccree/cc_ivgen.c @@ -154,9 +154,6 @@ void cc_ivgen_fini(struct cc_drvdata *drvdata) } ivgen_ctx->pool = NULL_SRAM_ADDR; - - /* release "this" context */ - kfree(ivgen_ctx); } /*! @@ -174,10 +171,12 @@ int cc_ivgen_init(struct cc_drvdata *drvdata) int rc; /* Allocate "this" context */ - ivgen_ctx = kzalloc(sizeof(*ivgen_ctx), GFP_KERNEL); + ivgen_ctx = devm_kzalloc(device, sizeof(*ivgen_ctx), GFP_KERNEL); if (!ivgen_ctx) return -ENOMEM; + drvdata->ivgen_handle = ivgen_ctx; + /* Allocate pool's header for initial enc. key/IV */ ivgen_ctx->pool_meta = dma_alloc_coherent(device, CC_IVPOOL_META_SIZE, &ivgen_ctx->pool_meta_dma, @@ -196,8 +195,6 @@ int cc_ivgen_init(struct cc_drvdata *drvdata) goto out; } - drvdata->ivgen_handle = ivgen_ctx; - return cc_init_iv_sram(drvdata); out: diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c index d990f472e89fb35265de1872c31f5b08399cc3a5..638082dff183ac7c92ed7f4c5fc183fff21dcab4 100644 --- a/drivers/crypto/ccree/cc_pm.c +++ b/drivers/crypto/ccree/cc_pm.c @@ -11,6 +11,7 @@ #include "cc_ivgen.h" #include "cc_hash.h" #include "cc_pm.h" +#include "cc_fips.h" #define POWER_DOWN_ENABLE 0x01 #define POWER_DOWN_DISABLE 0x00 @@ -25,13 +26,13 @@ int cc_pm_suspend(struct device *dev) int rc; dev_dbg(dev, "set HOST_POWER_DOWN_EN\n"); - cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE); rc = cc_suspend_req_queue(drvdata); if (rc) { dev_err(dev, "cc_suspend_req_queue (%x)\n", rc); return rc; } fini_cc_regs(drvdata); + cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE); cc_clk_off(drvdata); return 0; } @@ -42,19 +43,21 @@ int cc_pm_resume(struct device *dev) struct cc_drvdata *drvdata = dev_get_drvdata(dev); dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n"); - cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE); - + /* Enables the device source clk */ rc = cc_clk_on(drvdata); if (rc) { dev_err(dev, "failed getting clock back on. We're toast.\n"); return rc; } + cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE); rc = init_cc_regs(drvdata, false); if (rc) { dev_err(dev, "init_cc_regs (%x)\n", rc); return rc; } + /* check if tee fips error occurred during power down */ + cc_tee_handle_fips_error(drvdata); rc = cc_resume_req_queue(drvdata); if (rc) { @@ -100,20 +103,19 @@ int cc_pm_put_suspend(struct device *dev) int cc_pm_init(struct cc_drvdata *drvdata) { - int rc = 0; struct device *dev = drvdata_to_dev(drvdata); /* must be before the enabling to avoid resdundent suspending */ pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT); pm_runtime_use_autosuspend(dev); /* activate the PM module */ - rc = pm_runtime_set_active(dev); - if (rc) - return rc; - /* enable the PM module*/ - pm_runtime_enable(dev); + return pm_runtime_set_active(dev); +} - return rc; +/* enable the PM module*/ +void cc_pm_go(struct cc_drvdata *drvdata) +{ + pm_runtime_enable(drvdata_to_dev(drvdata)); } void cc_pm_fini(struct cc_drvdata *drvdata) diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h index 020a5403c58bac36961632075856a916d87fca60..907a6db4d6c036fe1c5433d5bda095d157e166b0 100644 --- a/drivers/crypto/ccree/cc_pm.h +++ b/drivers/crypto/ccree/cc_pm.h @@ -16,6 +16,7 @@ extern const struct dev_pm_ops ccree_pm; int cc_pm_init(struct cc_drvdata *drvdata); +void cc_pm_go(struct cc_drvdata *drvdata); void cc_pm_fini(struct cc_drvdata *drvdata); int cc_pm_suspend(struct device *dev); int cc_pm_resume(struct device *dev); @@ -29,6 +30,8 @@ static inline int cc_pm_init(struct cc_drvdata *drvdata) return 0; } +static inline void cc_pm_go(struct cc_drvdata *drvdata) {} + static inline void cc_pm_fini(struct cc_drvdata *drvdata) {} static inline int cc_pm_suspend(struct device *dev) diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c index 461b97e2f1fdcc34462aaa67bb0f5a6b036c5b08..1ff8738631a38d9f7dbcca296b0dfdac5270ac6c 100644 --- a/drivers/crypto/chelsio/chcr_ipsec.c +++ b/drivers/crypto/chelsio/chcr_ipsec.c @@ -303,7 +303,10 @@ static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x) static inline int is_eth_imm(const struct sk_buff *skb, unsigned int kctx_len) { - int hdrlen = sizeof(struct chcr_ipsec_req) + kctx_len; + int hdrlen; + + hdrlen = sizeof(struct fw_ulptx_wr) + + sizeof(struct chcr_ipsec_req) + kctx_len; hdrlen += sizeof(struct cpl_tx_pkt); if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen) diff --git a/drivers/crypto/chelsio/chtls/chtls.h b/drivers/crypto/chelsio/chtls/chtls.h index 7725b6ee14efb2ecc89d9c0822aa19903284d3f5..59bb67d5a7cede7198c652b53550941754cb9a3e 100644 --- a/drivers/crypto/chelsio/chtls/chtls.h +++ b/drivers/crypto/chelsio/chtls/chtls.h @@ -153,6 +153,11 @@ struct chtls_dev { unsigned int cdev_state; }; +struct chtls_listen { + struct chtls_dev *cdev; + struct sock *sk; +}; + struct chtls_hws { struct sk_buff_head sk_recv_queue; u8 txqid; @@ -215,6 +220,8 @@ struct chtls_sock { u16 resv2; u32 delack_mode; u32 delack_seq; + u32 snd_win; + u32 rcv_win; void *passive_reap_next; /* placeholder for passive */ struct chtls_hws tlshws; diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c index 0997e166ea57755c4d0178702995cce489b06d7e..3d3c8b3f4ea39a5e8042d70b371077f198aa702a 100644 --- a/drivers/crypto/chelsio/chtls/chtls_cm.c +++ b/drivers/crypto/chelsio/chtls/chtls_cm.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -890,24 +891,6 @@ static unsigned int chtls_select_mss(const struct chtls_sock *csk, return mtu_idx; } -static unsigned int select_rcv_wnd(struct chtls_sock *csk) -{ - unsigned int rcvwnd; - unsigned int wnd; - struct sock *sk; - - sk = csk->sk; - wnd = tcp_full_space(sk); - - if (wnd < MIN_RCV_WND) - wnd = MIN_RCV_WND; - - rcvwnd = MAX_RCV_WND; - - csk_set_flag(csk, CSK_UPDATE_RCV_WND); - return min(wnd, rcvwnd); -} - static unsigned int select_rcv_wscale(int space, int wscale_ok, int win_clamp) { int wscale = 0; @@ -954,7 +937,7 @@ static void chtls_pass_accept_rpl(struct sk_buff *skb, csk->mtu_idx = chtls_select_mss(csk, dst_mtu(__sk_dst_get(sk)), req); opt0 = TCAM_BYPASS_F | - WND_SCALE_V((tp)->rx_opt.rcv_wscale) | + WND_SCALE_V(RCV_WSCALE(tp)) | MSS_IDX_V(csk->mtu_idx) | L2T_IDX_V(csk->l2t_entry->idx) | NAGLE_V(!(tp->nonagle & TCP_NAGLE_OFF)) | @@ -1008,6 +991,25 @@ static int chtls_backlog_rcv(struct sock *sk, struct sk_buff *skb) return 0; } +static void chtls_set_tcp_window(struct chtls_sock *csk) +{ + struct net_device *ndev = csk->egress_dev; + struct port_info *pi = netdev_priv(ndev); + unsigned int linkspeed; + u8 scale; + + linkspeed = pi->link_cfg.speed; + scale = linkspeed / SPEED_10000; +#define CHTLS_10G_RCVWIN (256 * 1024) + csk->rcv_win = CHTLS_10G_RCVWIN; + if (scale) + csk->rcv_win *= scale; +#define CHTLS_10G_SNDWIN (256 * 1024) + csk->snd_win = CHTLS_10G_SNDWIN; + if (scale) + csk->snd_win *= scale; +} + static struct sock *chtls_recv_sock(struct sock *lsk, struct request_sock *oreq, void *network_hdr, @@ -1072,6 +1074,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk, csk->port_id = port_id; csk->egress_dev = ndev; csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid)); + chtls_set_tcp_window(csk); + tp->rcv_wnd = csk->rcv_win; + csk->sndbuf = csk->snd_win; csk->ulp_mode = ULP_MODE_TLS; step = cdev->lldi->nrxq / cdev->lldi->nchan; csk->rss_qid = cdev->lldi->rxq_ids[port_id * step]; @@ -1081,9 +1086,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk, csk->sndbuf = newsk->sk_sndbuf; csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi->adapter_type, cxgb4_port_viid(ndev)); - tp->rcv_wnd = select_rcv_wnd(csk); RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk), - WSCALE_OK(tp), + sock_net(newsk)-> + ipv4.sysctl_tcp_window_scaling, tp->window_clamp); neigh_release(n); inet_inherit_port(&tcp_hashinfo, lsk, newsk); @@ -1135,6 +1140,7 @@ static void chtls_pass_accept_request(struct sock *sk, struct cpl_t5_pass_accept_rpl *rpl; struct cpl_pass_accept_req *req; struct listen_ctx *listen_ctx; + struct vlan_ethhdr *vlan_eh; struct request_sock *oreq; struct sk_buff *reply_skb; struct chtls_sock *csk; @@ -1147,6 +1153,10 @@ static void chtls_pass_accept_request(struct sock *sk, unsigned int stid; unsigned int len; unsigned int tid; + bool th_ecn, ect; + __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */ + u16 eth_hdr_len; + bool ecn_ok; req = cplhdr(skb) + RSS_HDR; tid = GET_TID(req); @@ -1185,24 +1195,40 @@ static void chtls_pass_accept_request(struct sock *sk, oreq->mss = 0; oreq->ts_recent = 0; - eh = (struct ethhdr *)(req + 1); - iph = (struct iphdr *)(eh + 1); + eth_hdr_len = T6_ETH_HDR_LEN_G(ntohl(req->hdr_len)); + if (eth_hdr_len == ETH_HLEN) { + eh = (struct ethhdr *)(req + 1); + iph = (struct iphdr *)(eh + 1); + network_hdr = (void *)(eh + 1); + } else { + vlan_eh = (struct vlan_ethhdr *)(req + 1); + iph = (struct iphdr *)(vlan_eh + 1); + network_hdr = (void *)(vlan_eh + 1); + } if (iph->version != 0x4) goto free_oreq; - network_hdr = (void *)(eh + 1); tcph = (struct tcphdr *)(iph + 1); + skb_set_network_header(skb, (void *)iph - (void *)req); tcp_rsk(oreq)->tfo_listener = false; tcp_rsk(oreq)->rcv_isn = ntohl(tcph->seq); chtls_set_req_port(oreq, tcph->source, tcph->dest); - inet_rsk(oreq)->ecn_ok = 0; chtls_set_req_addr(oreq, iph->daddr, iph->saddr); - if (req->tcpopt.wsf <= 14) { + ip_dsfield = ipv4_get_dsfield(iph); + if (req->tcpopt.wsf <= 14 && + sock_net(sk)->ipv4.sysctl_tcp_window_scaling) { inet_rsk(oreq)->wscale_ok = 1; inet_rsk(oreq)->snd_wscale = req->tcpopt.wsf; } inet_rsk(oreq)->ir_iif = sk->sk_bound_dev_if; + th_ecn = tcph->ece && tcph->cwr; + if (th_ecn) { + ect = !INET_ECN_is_not_ect(ip_dsfield); + ecn_ok = sock_net(sk)->ipv4.sysctl_tcp_ecn; + if ((!ect && ecn_ok) || tcp_ca_needs_ecn(sk)) + inet_rsk(oreq)->ecn_ok = 1; + } newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev); if (!newsk) @@ -1276,7 +1302,7 @@ static void make_established(struct sock *sk, u32 snd_isn, unsigned int opt) tp->write_seq = snd_isn; tp->snd_nxt = snd_isn; tp->snd_una = snd_isn; - inet_sk(sk)->inet_id = tp->write_seq ^ jiffies; + inet_sk(sk)->inet_id = prandom_u32(); assign_rxopt(sk, opt); if (tp->rcv_wnd > (RCV_BUFSIZ_M << 10)) diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c index afebbd87c4aa1d22ca179f558552cb2f410fcc0a..61b309c7b2fcaa00b3b407483b771c5b79542020 100644 --- a/drivers/crypto/chelsio/chtls/chtls_io.c +++ b/drivers/crypto/chelsio/chtls/chtls_io.c @@ -397,7 +397,7 @@ static void tls_tx_data_wr(struct sock *sk, struct sk_buff *skb, req_wr->lsodisable_to_flags = htonl(TX_ULP_MODE_V(ULP_MODE_TLS) | - FW_OFLD_TX_DATA_WR_URGENT_V(skb_urgent(skb)) | + TX_URG_V(skb_urgent(skb)) | T6_TX_FORCE_F | wr_ulp_mode_force | TX_SHOVE_V((!csk_flag(sk, CSK_TX_MORE_DATA)) && skb_queue_empty(&csk->txq))); @@ -534,10 +534,9 @@ static void make_tx_data_wr(struct sock *sk, struct sk_buff *skb, FW_OFLD_TX_DATA_WR_SHOVE_F); req->tunnel_to_proxy = htonl(wr_ulp_mode_force | - FW_OFLD_TX_DATA_WR_URGENT_V(skb_urgent(skb)) | - FW_OFLD_TX_DATA_WR_SHOVE_V((!csk_flag - (sk, CSK_TX_MORE_DATA)) && - skb_queue_empty(&csk->txq))); + TX_URG_V(skb_urgent(skb)) | + TX_SHOVE_V((!csk_flag(sk, CSK_TX_MORE_DATA)) && + skb_queue_empty(&csk->txq))); req->plen = htonl(len); } @@ -995,7 +994,6 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) int mss, flags, err; int recordsz = 0; int copied = 0; - int hdrlen = 0; long timeo; lock_sock(sk); @@ -1032,7 +1030,7 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) recordsz = tls_header_read(&hdr, &msg->msg_iter); size -= TLS_HEADER_LENGTH; - hdrlen += TLS_HEADER_LENGTH; + copied += TLS_HEADER_LENGTH; csk->tlshws.txleft = recordsz; csk->tlshws.type = hdr.type; if (skb) @@ -1083,10 +1081,8 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) int off = TCP_OFF(sk); bool merge; - if (!page) - goto wait_for_memory; - - pg_size <<= compound_order(page); + if (page) + pg_size <<= compound_order(page); if (off < pg_size && skb_can_coalesce(skb, i, page, off)) { merge = 1; @@ -1187,7 +1183,7 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) chtls_tcp_push(sk, flags); done: release_sock(sk); - return copied + hdrlen; + return copied; do_fault: if (!skb->len) { __skb_unlink(skb, &csk->txq); @@ -1449,7 +1445,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, csk->wr_max_credits)) sk->sk_write_space(sk); - if (copied >= target && !sk->sk_backlog.tail) + if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) break; if (copied) { @@ -1482,7 +1478,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, break; } } - if (sk->sk_backlog.tail) { + if (READ_ONCE(sk->sk_backlog.tail)) { release_sock(sk); lock_sock(sk); chtls_cleanup_rbuf(sk, copied); @@ -1627,7 +1623,7 @@ static int peekmsg(struct sock *sk, struct msghdr *msg, break; } - if (sk->sk_backlog.tail) { + if (READ_ONCE(sk->sk_backlog.tail)) { /* Do not sleep, just process backlog. */ release_sock(sk); lock_sock(sk); @@ -1716,7 +1712,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, return peekmsg(sk, msg, len, nonblock, flags); if (sk_can_busy_loop(sk) && - skb_queue_empty(&sk->sk_receive_queue) && + skb_queue_empty_lockless(&sk->sk_receive_queue) && sk->sk_state == TCP_ESTABLISHED) sk_busy_loop(sk, nonblock); @@ -1759,7 +1755,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, csk->wr_max_credits)) sk->sk_write_space(sk); - if (copied >= target && !sk->sk_backlog.tail) + if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) break; if (copied) { @@ -1790,7 +1786,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, } } - if (sk->sk_backlog.tail) { + if (READ_ONCE(sk->sk_backlog.tail)) { release_sock(sk); lock_sock(sk); chtls_cleanup_rbuf(sk, copied); diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c index f59b044ebd25528864d055c04b90f67b83248eac..563f8fe7686adc9c895dbb623cac3f7c9ae5a3a6 100644 --- a/drivers/crypto/chelsio/chtls/chtls_main.c +++ b/drivers/crypto/chelsio/chtls/chtls_main.c @@ -55,24 +55,19 @@ static void unregister_listen_notifier(struct notifier_block *nb) static int listen_notify_handler(struct notifier_block *this, unsigned long event, void *data) { - struct chtls_dev *cdev; - struct sock *sk; - int ret; + struct chtls_listen *clisten; + int ret = NOTIFY_DONE; - sk = data; - ret = NOTIFY_DONE; + clisten = (struct chtls_listen *)data; switch (event) { case CHTLS_LISTEN_START: + ret = chtls_listen_start(clisten->cdev, clisten->sk); + kfree(clisten); + break; case CHTLS_LISTEN_STOP: - mutex_lock(&cdev_list_lock); - list_for_each_entry(cdev, &cdev_list, list) { - if (event == CHTLS_LISTEN_START) - ret = chtls_listen_start(cdev, sk); - else - chtls_listen_stop(cdev, sk); - } - mutex_unlock(&cdev_list_lock); + chtls_listen_stop(clisten->cdev, clisten->sk); + kfree(clisten); break; } return ret; @@ -90,8 +85,9 @@ static int listen_backlog_rcv(struct sock *sk, struct sk_buff *skb) return 0; } -static int chtls_start_listen(struct sock *sk) +static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk) { + struct chtls_listen *clisten; int err; if (sk->sk_protocol != IPPROTO_TCP) @@ -102,21 +98,33 @@ static int chtls_start_listen(struct sock *sk) return -EADDRNOTAVAIL; sk->sk_backlog_rcv = listen_backlog_rcv; + clisten = kmalloc(sizeof(*clisten), GFP_KERNEL); + if (!clisten) + return -ENOMEM; + clisten->cdev = cdev; + clisten->sk = sk; mutex_lock(¬ify_mutex); err = raw_notifier_call_chain(&listen_notify_list, - CHTLS_LISTEN_START, sk); + CHTLS_LISTEN_START, clisten); mutex_unlock(¬ify_mutex); return err; } -static void chtls_stop_listen(struct sock *sk) +static void chtls_stop_listen(struct chtls_dev *cdev, struct sock *sk) { + struct chtls_listen *clisten; + if (sk->sk_protocol != IPPROTO_TCP) return; + clisten = kmalloc(sizeof(*clisten), GFP_KERNEL); + if (!clisten) + return; + clisten->cdev = cdev; + clisten->sk = sk; mutex_lock(¬ify_mutex); raw_notifier_call_chain(&listen_notify_list, - CHTLS_LISTEN_STOP, sk); + CHTLS_LISTEN_STOP, clisten); mutex_unlock(¬ify_mutex); } @@ -138,15 +146,43 @@ static int chtls_inline_feature(struct tls_device *dev) static int chtls_create_hash(struct tls_device *dev, struct sock *sk) { + struct chtls_dev *cdev = to_chtls_dev(dev); + if (sk->sk_state == TCP_LISTEN) - return chtls_start_listen(sk); + return chtls_start_listen(cdev, sk); return 0; } static void chtls_destroy_hash(struct tls_device *dev, struct sock *sk) { + struct chtls_dev *cdev = to_chtls_dev(dev); + if (sk->sk_state == TCP_LISTEN) - chtls_stop_listen(sk); + chtls_stop_listen(cdev, sk); +} + +static void chtls_free_uld(struct chtls_dev *cdev) +{ + int i; + + tls_unregister_device(&cdev->tlsdev); + kvfree(cdev->kmap.addr); + idr_destroy(&cdev->hwtid_idr); + for (i = 0; i < (1 << RSPQ_HASH_BITS); i++) + kfree_skb(cdev->rspq_skb_cache[i]); + kfree(cdev->lldi); + kfree_skb(cdev->askb); + kfree(cdev); +} + +static inline void chtls_dev_release(struct kref *kref) +{ + struct chtls_dev *cdev; + struct tls_device *dev; + + dev = container_of(kref, struct tls_device, kref); + cdev = to_chtls_dev(dev); + chtls_free_uld(cdev); } static void chtls_register_dev(struct chtls_dev *cdev) @@ -159,15 +195,12 @@ static void chtls_register_dev(struct chtls_dev *cdev) tlsdev->feature = chtls_inline_feature; tlsdev->hash = chtls_create_hash; tlsdev->unhash = chtls_destroy_hash; - tls_register_device(&cdev->tlsdev); + tlsdev->release = chtls_dev_release; + kref_init(&tlsdev->kref); + tls_register_device(tlsdev); cdev->cdev_state = CHTLS_CDEV_STATE_UP; } -static void chtls_unregister_dev(struct chtls_dev *cdev) -{ - tls_unregister_device(&cdev->tlsdev); -} - static void process_deferq(struct work_struct *task_param) { struct chtls_dev *cdev = container_of(task_param, @@ -262,29 +295,16 @@ static void *chtls_uld_add(const struct cxgb4_lld_info *info) return NULL; } -static void chtls_free_uld(struct chtls_dev *cdev) -{ - int i; - - chtls_unregister_dev(cdev); - kvfree(cdev->kmap.addr); - idr_destroy(&cdev->hwtid_idr); - for (i = 0; i < (1 << RSPQ_HASH_BITS); i++) - kfree_skb(cdev->rspq_skb_cache[i]); - kfree(cdev->lldi); - if (cdev->askb) - kfree_skb(cdev->askb); - kfree(cdev); -} - static void chtls_free_all_uld(void) { struct chtls_dev *cdev, *tmp; mutex_lock(&cdev_mutex); list_for_each_entry_safe(cdev, tmp, &cdev_list, list) { - if (cdev->cdev_state == CHTLS_CDEV_STATE_UP) - chtls_free_uld(cdev); + if (cdev->cdev_state == CHTLS_CDEV_STATE_UP) { + list_del(&cdev->list); + kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release); + } } mutex_unlock(&cdev_mutex); } @@ -305,7 +325,7 @@ static int chtls_uld_state_change(void *handle, enum cxgb4_state new_state) mutex_lock(&cdev_mutex); list_del(&cdev->list); mutex_unlock(&cdev_mutex); - chtls_free_uld(cdev); + kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release); break; default: break; diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig index 8ca9c503bcb0de8309663e8ca7690817212993df..a40648d7acafce5d1bc8cbd170decd260e3a8c55 100644 --- a/drivers/crypto/hisilicon/Kconfig +++ b/drivers/crypto/hisilicon/Kconfig @@ -12,3 +12,42 @@ config CRYPTO_DEV_HISI_SEC To compile this as a module, choose M here: the module will be called hisi_sec. + +config CRYPTO_DEV_HISI_QM + tristate + depends on ARM64 && PCI + select UACCE + +config CRYPTO_DEV_HISI_ZIP + tristate "Support for HISI ZIP Driver" + depends on ARM64 && ACPI + select CRYPTO_DEV_HISI_QM + help + Support for HiSilicon HIP08 ZIP Driver. + +config CRYPTO_DEV_HISI_HPRE + tristate "Support for HISI HPRE accelerator" + depends on PCI && PCI_MSI && ACPI + depends on ARM64 + select CRYPTO_DEV_HISI_QM + select CRYPTO_DH + select CRYPTO_RSA + help + Support for HiSilicon HPRE(High Performance RSA Engine) + accelerator, which can accelerate RSA and DH algorithms. + +config CRYPTO_DEV_HISI_SEC2 + tristate "Support for HISI SEC Driver" + depends on ARM64 && ACPI + select CRYPTO_DEV_HISI_QM + select CRYPTO_BLKCIPHER + select CRYPTO_ALGAPI + help + Support for HiSilicon HIP09 SEC Driver. + +config CRYPTO_DEV_HISI_RDE + tristate "Support for HISI RDE Driver" + depends on ARM64 && ACPI + select CRYPTO_DEV_HISI_QM + help + Support for HiSilicon HIP09 RDE Driver. diff --git a/drivers/crypto/hisilicon/Makefile b/drivers/crypto/hisilicon/Makefile index 463f46ace1820e26131f29d6028e8cd26dfe71a1..c766a09b0be96e3143a85085d8486eeb48f8c444 100644 --- a/drivers/crypto/hisilicon/Makefile +++ b/drivers/crypto/hisilicon/Makefile @@ -1,2 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_CRYPTO_DEV_HISI_SEC) += sec/ +obj-$(CONFIG_CRYPTO_DEV_HISI_QM) += hisi_qm.o +hisi_qm-objs = qm.o sgl.o +obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += zip/ +obj-$(CONFIG_CRYPTO_DEV_HISI_HPRE) += hpre/ +obj-$(CONFIG_CRYPTO_DEV_HISI_SEC2) += sec2/ +obj-$(CONFIG_CRYPTO_DEV_HISI_RDE) += rde/ diff --git a/drivers/crypto/hisilicon/hpre/Makefile b/drivers/crypto/hisilicon/hpre/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..4fd32b789e1efaf11d63bd88fe9d9ec5da468eb4 --- /dev/null +++ b/drivers/crypto/hisilicon/hpre/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_CRYPTO_DEV_HISI_HPRE) += hisi_hpre.o +hisi_hpre-objs = hpre_main.o hpre_crypto.o diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h new file mode 100644 index 0000000000000000000000000000000000000000..14d60b48afc1fc1f0b6668f9d8d44f1b7841ba33 --- /dev/null +++ b/drivers/crypto/hisilicon/hpre/hpre.h @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2019 HiSilicon Limited. */ +#ifndef __HISI_HPRE_H +#define __HISI_HPRE_H + +#include +#include "../qm.h" + +#define HPRE_SQE_SIZE sizeof(struct hpre_sqe) +#define HPRE_PF_DEF_Q_NUM 64 +#define HPRE_PF_DEF_Q_BASE 0 + +enum { + HPRE_CLUSTER0, + HPRE_CLUSTER1, + HPRE_CLUSTER2, + HPRE_CLUSTER3, + HPRE_CLUSTERS_NUM, +}; + +enum hpre_ctrl_dbgfs_file { + HPRE_CURRENT_QM, + HPRE_CLEAR_ENABLE, + HPRE_CLUSTER_CTRL, + HPRE_DEBUG_FILE_NUM, +}; + +enum hpre_dfx_dbgfs_file { + HPRE_SEND_CNT, + HPRE_RECV_CNT, + HPRE_SEND_FAIL_CNT, + HPRE_SEND_BUSY_CNT, + HPRE_OVER_THRHLD_CNT, + HPRE_OVERTIME_THRHLD, + HPRE_INVALID_REQ_CNT, + HPRE_DFX_FILE_NUM +}; + +#define HPRE_DEBUGFS_FILE_NUM (HPRE_DEBUG_FILE_NUM + HPRE_CLUSTERS_NUM - 1) + +struct hpre_debugfs_file { + int index; + enum hpre_ctrl_dbgfs_file type; + spinlock_t lock; + struct hpre_debug *debug; +}; + +struct hpre_dfx { + atomic64_t value; + enum hpre_dfx_dbgfs_file type; +}; + +/* + * One HPRE controller has one PF and multiple VFs, some global configurations + * which PF has need this structure. + * Just relevant for PF. + */ +struct hpre_debug { + struct hpre_dfx dfx[HPRE_DFX_FILE_NUM]; + struct hpre_debugfs_file files[HPRE_DEBUGFS_FILE_NUM]; +}; + +struct hpre { + struct hisi_qm qm; + struct hpre_debug debug; +}; + +enum hpre_alg_type { + HPRE_ALG_NC_NCRT = 0x0, + HPRE_ALG_NC_CRT = 0x1, + HPRE_ALG_KG_STD = 0x2, + HPRE_ALG_KG_CRT = 0x3, + HPRE_ALG_DH_G2 = 0x4, + HPRE_ALG_DH = 0x5, +}; + +struct hpre_sqe { + __le32 dw0; + __u8 task_len1; + __u8 task_len2; + __u8 mrttest_num; + __u8 resv1; + __le64 key; + __le64 in; + __le64 out; + __le16 tag; + __le16 resv2; +#define _HPRE_SQE_ALIGN_EXT 7 + __le32 rsvd1[_HPRE_SQE_ALIGN_EXT]; +}; + +struct hisi_qp *hpre_create_qp(void); +int hpre_algs_register(void); +void hpre_algs_unregister(void); + +#endif diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c new file mode 100644 index 0000000000000000000000000000000000000000..5031d64789e68c5f3e5044d1e01df6a4dc44252b --- /dev/null +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -0,0 +1,1194 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 HiSilicon Limited. */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "hpre.h" + +struct hpre_ctx; + +#define HPRE_CRYPTO_ALG_PRI 1000 +#define HPRE_ALIGN_SZ 64 +#define HPRE_BITS_2_BYTES_SHIFT 3 +#define HPRE_RSA_512BITS_KSZ 64 +#define HPRE_RSA_1536BITS_KSZ 192 +#define HPRE_CRT_PRMS 5 +#define HPRE_CRT_Q 2 +#define HPRE_CRT_P 3 +#define HPRE_CRT_INV 4 +#define HPRE_DH_G_FLAG 0x02 +#define HPRE_TRY_SEND_TIMES 100 +#define HPRE_INVLD_REQ_ID (-1) +#define HPRE_DEV(ctx) (&((ctx)->qp->qm->pdev->dev)) + +#define HPRE_SQE_ALG_BITS 5 +#define HPRE_SQE_DONE_SHIFT 30 +#define HPRE_DH_MAX_P_SZ 512 + +typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe); + +struct hpre_rsa_ctx { + /* low address: e--->n */ + char *pubkey; + dma_addr_t dma_pubkey; + + /* low address: d--->n */ + char *prikey; + dma_addr_t dma_prikey; + + /* low address: dq->dp->q->p->qinv */ + char *crt_prikey; + dma_addr_t dma_crt_prikey; + + struct crypto_akcipher *soft_tfm; +}; + +struct hpre_dh_ctx { + /* + * If base is g we compute the public key + * ya = g^xa mod p; [RFC2631 sec 2.1.1] + * else if base if the counterpart public key we + * compute the shared secret + * ZZ = yb^xa mod p; [RFC2631 sec 2.1.1] + */ + char *xa_p; /* low address: d--->n, please refer to Hisilicon HPRE UM */ + dma_addr_t dma_xa_p; + + char *g; /* m */ + dma_addr_t dma_g; +}; + +struct hpre_ctx { + struct hisi_qp *qp; + struct hpre_asym_request **req_list; + struct hpre *hpre; + spinlock_t req_lock; + unsigned int key_sz; + bool crt_g2_mode; + struct idr req_idr; + union { + struct hpre_rsa_ctx rsa; + struct hpre_dh_ctx dh; + }; +}; + +struct hpre_asym_request { + char *src; + char *dst; + struct hpre_sqe req; + struct hpre_ctx *ctx; + union { + struct akcipher_request *rsa; + struct kpp_request *dh; + } areq; + int err; + int req_id; + hpre_cb cb; + struct timespec64 req_time; +}; + +static DEFINE_MUTEX(hpre_alg_lock); +static unsigned int hpre_active_devs; + +static int hpre_alloc_req_id(struct hpre_ctx *ctx) +{ + unsigned long flags; + int id; + + spin_lock_irqsave(&ctx->req_lock, flags); + id = idr_alloc(&ctx->req_idr, NULL, 0, QM_Q_DEPTH, GFP_ATOMIC); + spin_unlock_irqrestore(&ctx->req_lock, flags); + + return id; +} + +static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id) +{ + unsigned long flags; + + spin_lock_irqsave(&ctx->req_lock, flags); + idr_remove(&ctx->req_idr, req_id); + spin_unlock_irqrestore(&ctx->req_lock, flags); +} + +static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req) +{ + struct hpre_ctx *ctx; + struct hpre_dfx *dfx; + int id; + + ctx = hpre_req->ctx; + id = hpre_alloc_req_id(ctx); + if (unlikely(id < 0)) + return -EINVAL; + + ctx->req_list[id] = hpre_req; + hpre_req->req_id = id; + + dfx = ctx->hpre->debug.dfx; + if (atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value)) + ktime_get_ts64(&hpre_req->req_time); + + return id; +} + +static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req) +{ + struct hpre_ctx *ctx = hpre_req->ctx; + int id = hpre_req->req_id; + + if (hpre_req->req_id >= 0) { + hpre_req->req_id = HPRE_INVLD_REQ_ID; + ctx->req_list[id] = NULL; + hpre_free_req_id(ctx, id); + } +} + +static struct hisi_qp *hpre_get_qp_and_start(void) +{ + struct hisi_qp *qp; + int ret; + + qp = hpre_create_qp(); + if (!qp) { + pr_err("Can not create hpre qp!\n"); + return ERR_PTR(-ENODEV); + } + + ret = hisi_qm_start_qp(qp, 0); + if (ret < 0) { + hisi_qm_free_qps(&qp, 1); + pci_err(qp->qm->pdev, "Can not start qp!\n"); + return ERR_PTR(-EINVAL); + } + + return qp; +} + +static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req, + struct scatterlist *data, unsigned int len, + int is_src, dma_addr_t *tmp) +{ + struct hpre_ctx *ctx = hpre_req->ctx; + struct device *dev = HPRE_DEV(ctx); + enum dma_data_direction dma_dir; + + if (is_src) { + hpre_req->src = NULL; + dma_dir = DMA_TO_DEVICE; + } else { + hpre_req->dst = NULL; + dma_dir = DMA_FROM_DEVICE; + } + *tmp = dma_map_single(dev, sg_virt(data), + len, dma_dir); + if (unlikely(dma_mapping_error(dev, *tmp))) { + dev_err(dev, "dma map data err!\n"); + return -ENOMEM; + } + + return 0; +} + +static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req, + struct scatterlist *data, unsigned int len, + int is_src, dma_addr_t *tmp) +{ + struct hpre_ctx *ctx = hpre_req->ctx; + struct device *dev = HPRE_DEV(ctx); + void *ptr; + int shift; + + shift = ctx->key_sz - len; + if (unlikely(shift < 0)) + return -EINVAL; + + ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_KERNEL); + if (unlikely(!ptr)) + return -ENOMEM; + + if (is_src) { + scatterwalk_map_and_copy(ptr + shift, data, 0, len, 0); + hpre_req->src = ptr; + } else { + hpre_req->dst = ptr; + } + + return 0; +} + +static int hpre_hw_data_init(struct hpre_asym_request *hpre_req, + struct scatterlist *data, unsigned int len, + int is_src, int is_dh) +{ + struct hpre_sqe *msg = &hpre_req->req; + struct hpre_ctx *ctx = hpre_req->ctx; + dma_addr_t tmp = 0; + int ret; + + /* when the data is dh's source, we should format it */ + if ((sg_is_last(data) && len == ctx->key_sz) && + ((is_dh && !is_src) || !is_dh)) + ret = hpre_get_data_dma_addr(hpre_req, data, len, is_src, &tmp); + else + ret = hpre_prepare_dma_buf(hpre_req, data, len, + is_src, &tmp); + if (unlikely(ret)) + return ret; + + if (is_src) + msg->in = cpu_to_le64(tmp); + else + msg->out = cpu_to_le64(tmp); + + return 0; +} + +static void hpre_hw_data_clr_all(struct hpre_ctx *ctx, + struct hpre_asym_request *req, + struct scatterlist *dst, + struct scatterlist *src) +{ + struct device *dev = HPRE_DEV(ctx); + struct hpre_sqe *sqe = &req->req; + dma_addr_t tmp; + + tmp = le64_to_cpu(sqe->in); + if (unlikely(!tmp)) + return; + + if (src) { + if (req->src) + dma_free_coherent(dev, ctx->key_sz, + req->src, tmp); + else + dma_unmap_single(dev, tmp, + ctx->key_sz, DMA_TO_DEVICE); + } + + tmp = le64_to_cpu(sqe->out); + if (unlikely(!tmp)) + return; + + if (req->dst) { + if (dst) + scatterwalk_map_and_copy(req->dst, dst, 0, + ctx->key_sz, 1); + dma_free_coherent(dev, ctx->key_sz, req->dst, tmp); + } else { + dma_unmap_single(dev, tmp, ctx->key_sz, DMA_FROM_DEVICE); + } +} + +static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe, + void **kreq) +{ + struct hpre_asym_request *req; + int err, id, done; + +#define HPRE_NO_HW_ERR 0 +#define HPRE_HW_TASK_DONE 3 +#define HREE_HW_ERR_MASK 0x7ff +#define HREE_SQE_DONE_MASK 0x3 + id = (int)le16_to_cpu(sqe->tag); + req = ctx->req_list[id]; + hpre_rm_req_from_ctx(req); + *kreq = req; + + err = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_ALG_BITS) & + HREE_HW_ERR_MASK; + + done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) & + HREE_SQE_DONE_MASK; + if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE)) + return 0; + + return -EINVAL; +} + +static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen) +{ + struct hpre *hpre; + + if (!ctx || !qp || qlen < 0) + return -EINVAL; + + spin_lock_init(&ctx->req_lock); + ctx->qp = qp; + + hpre = container_of(ctx->qp->qm, struct hpre, qm); + ctx->hpre = hpre; + ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL); + if (!ctx->req_list) + return -ENOMEM; + ctx->key_sz = 0; + ctx->crt_g2_mode = false; + idr_init(&ctx->req_idr); + + return 0; +} + +static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all) +{ + if (is_clear_all) { + idr_destroy(&ctx->req_idr); + kfree(ctx->req_list); + hisi_qm_free_qps(&ctx->qp, 1); + } + + ctx->crt_g2_mode = false; + ctx->key_sz = 0; +} + +static bool hpre_is_bd_timeout(struct hpre_asym_request *req, + u64 overtime_thrhld) +{ + struct timespec64 reply_time; + u64 time_use_us; + +#define HPRE_DFX_SEC_TO_US 1000000 +#define HPRE_DFX_US_TO_NS 1000 + + ktime_get_ts64(&reply_time); + time_use_us = (reply_time.tv_sec - req->req_time.tv_sec) * + HPRE_DFX_SEC_TO_US + + (reply_time.tv_nsec - req->req_time.tv_nsec) / + HPRE_DFX_US_TO_NS; + + if (time_use_us <= overtime_thrhld) + return false; + + return true; +} + +static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp) +{ + struct hpre_dfx *dfx = ctx->hpre->debug.dfx; + struct hpre_asym_request *req; + struct kpp_request *areq; + u64 overtime_thrhld; + int ret; + + ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req); + areq = req->areq.dh; + areq->dst_len = ctx->key_sz; + + overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value); + if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld)) + atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value); + + hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src); + kpp_request_complete(areq, ret); + atomic64_inc(&dfx[HPRE_RECV_CNT].value); +} + +static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp) +{ + struct hpre_dfx *dfx = ctx->hpre->debug.dfx; + struct hpre_asym_request *req; + struct akcipher_request *areq; + u64 overtime_thrhld; + int ret; + + ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req); + + overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value); + if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld)) + atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value); + + areq = req->areq.rsa; + areq->dst_len = ctx->key_sz; + hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src); + akcipher_request_complete(areq, ret); + atomic64_inc(&dfx[HPRE_RECV_CNT].value); +} + +static void hpre_alg_cb(struct hisi_qp *qp, void *resp) +{ + struct hpre_ctx *ctx = qp->qp_ctx; + struct hpre_dfx *dfx = ctx->hpre->debug.dfx; + struct hpre_sqe *sqe = resp; + struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)]; + + + if (unlikely(!req)) { + atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value); + return; + } + + req->cb(ctx, resp); +} + +static int hpre_ctx_init(struct hpre_ctx *ctx) +{ + struct hisi_qp *qp; + + qp = hpre_get_qp_and_start(); + if (IS_ERR(qp)) + return PTR_ERR(qp); + + qp->qp_ctx = ctx; + qp->req_cb = hpre_alg_cb; + + return hpre_ctx_set(ctx, qp, QM_Q_DEPTH); +} + +static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa) +{ + struct hpre_asym_request *h_req; + struct hpre_sqe *msg; + int req_id; + void *tmp; + + if (is_rsa) { + struct akcipher_request *akreq = req; + + if (akreq->dst_len < ctx->key_sz) { + akreq->dst_len = ctx->key_sz; + return -EOVERFLOW; + } + + tmp = akcipher_request_ctx(akreq); + h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); + h_req->cb = hpre_rsa_cb; + h_req->areq.rsa = akreq; + msg = &h_req->req; + memset(msg, 0, sizeof(*msg)); + } else { + struct kpp_request *kreq = req; + + if (kreq->dst_len < ctx->key_sz) { + kreq->dst_len = ctx->key_sz; + return -EOVERFLOW; + } + + tmp = kpp_request_ctx(kreq); + h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); + h_req->cb = hpre_dh_cb; + h_req->areq.dh = kreq; + msg = &h_req->req; + memset(msg, 0, sizeof(*msg)); + msg->key = cpu_to_le64((u64)ctx->dh.dma_xa_p); + } + + msg->dw0 |= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT); + msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1; + h_req->ctx = ctx; + + req_id = hpre_add_req_to_ctx(h_req); + if (req_id < 0) + return -EBUSY; + + msg->tag = cpu_to_le16((u16)req_id); + + return 0; +} + +static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg) +{ + struct hpre_dfx *dfx = ctx->hpre->debug.dfx; + int ctr = 0; + int ret; + + do { + atomic64_inc(&dfx[HPRE_SEND_CNT].value); + ret = hisi_qp_send(ctx->qp, msg); + if (ret != -EBUSY) + break; + atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value); + } while (ctr++ < HPRE_TRY_SEND_TIMES); + + if (likely(!ret)) + return ret; + + if (ret != -EBUSY) + atomic64_inc(&dfx[HPRE_SEND_FAIL_CNT].value); + + return ret; +} + +#ifdef CONFIG_CRYPTO_DH +static int hpre_dh_compute_value(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + void *tmp = kpp_request_ctx(req); + struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); + struct hpre_sqe *msg = &hpre_req->req; + int ret; + + ret = hpre_msg_request_set(ctx, req, false); + if (unlikely(ret)) + return ret; + + if (req->src) { + ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1); + if (unlikely(ret)) + goto clear_all; + } + + ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1); + if (unlikely(ret)) + goto clear_all; + + if (ctx->crt_g2_mode && !req->src) + msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2); + else + msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH); + + /* success */ + ret = hpre_send(ctx, msg); + if (likely(!ret)) + return -EINPROGRESS; + +clear_all: + hpre_rm_req_from_ctx(hpre_req); + hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src); + + return ret; +} + +static int hpre_is_dh_params_length_valid(unsigned int key_sz) +{ +#define _HPRE_DH_GRP1 768 +#define _HPRE_DH_GRP2 1024 +#define _HPRE_DH_GRP5 1536 +#define _HPRE_DH_GRP14 2048 +#define _HPRE_DH_GRP15 3072 +#define _HPRE_DH_GRP16 4096 + switch (key_sz) { + case _HPRE_DH_GRP1: + case _HPRE_DH_GRP2: + case _HPRE_DH_GRP5: + case _HPRE_DH_GRP14: + case _HPRE_DH_GRP15: + case _HPRE_DH_GRP16: + return 0; + } + + return -EINVAL; +} + +static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params) +{ + struct device *dev = HPRE_DEV(ctx); + unsigned int sz; + + if (params->p_size > HPRE_DH_MAX_P_SZ) + return -EINVAL; + + if (hpre_is_dh_params_length_valid(params->p_size << + HPRE_BITS_2_BYTES_SHIFT)) + return -EINVAL; + + sz = ctx->key_sz = params->p_size; + ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1, + &ctx->dh.dma_xa_p, GFP_KERNEL); + if (!ctx->dh.xa_p) + return -ENOMEM; + + memcpy(ctx->dh.xa_p + sz, params->p, sz); + + /* If g equals 2 don't copy it */ + if (params->g_size == 1 && *(char *)params->g == HPRE_DH_G_FLAG) { + ctx->crt_g2_mode = true; + return 0; + } + + ctx->dh.g = dma_alloc_coherent(dev, sz, &ctx->dh.dma_g, GFP_KERNEL); + if (!ctx->dh.g) { + dma_free_coherent(dev, sz << 1, ctx->dh.xa_p, + ctx->dh.dma_xa_p); + ctx->dh.xa_p = NULL; + return -ENOMEM; + } + + memcpy(ctx->dh.g + (sz - params->g_size), params->g, params->g_size); + + return 0; +} + +static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all) +{ + struct device *dev = HPRE_DEV(ctx); + unsigned int sz = ctx->key_sz; + + if (is_clear_all) + hisi_qm_stop_qp(ctx->qp); + + if (ctx->dh.g) { + dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g); + ctx->dh.g = NULL; + } + + if (ctx->dh.xa_p) { + memzero_explicit(ctx->dh.xa_p, sz); + dma_free_coherent(dev, sz << 1, ctx->dh.xa_p, + ctx->dh.dma_xa_p); + ctx->dh.xa_p = NULL; + } + + hpre_ctx_clear(ctx, is_clear_all); +} + +static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf, + unsigned int len) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + struct dh params; + int ret; + + if (crypto_dh_decode_key(buf, len, ¶ms) < 0) + return -EINVAL; + + /* Free old secret if any */ + hpre_dh_clear_ctx(ctx, false); + + ret = hpre_dh_set_params(ctx, ¶ms); + if (ret < 0) + goto err_clear_ctx; + + memcpy(ctx->dh.xa_p + (ctx->key_sz - params.key_size), params.key, + params.key_size); + + return 0; + +err_clear_ctx: + hpre_dh_clear_ctx(ctx, false); + return ret; +} + +static unsigned int hpre_dh_max_size(struct crypto_kpp *tfm) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + + return ctx->key_sz; +} + +static int hpre_dh_init_tfm(struct crypto_kpp *tfm) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + + return hpre_ctx_init(ctx); +} + +static void hpre_dh_exit_tfm(struct crypto_kpp *tfm) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + + hpre_dh_clear_ctx(ctx, true); +} +#endif + +static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len) +{ + while (!**ptr && *len) { + (*ptr)++; + (*len)--; + } +} + +static bool hpre_rsa_key_size_is_support(unsigned int len) +{ + unsigned int bits = len << HPRE_BITS_2_BYTES_SHIFT; + +#define _RSA_1024BITS_KEY_WDTH 1024 +#define _RSA_2048BITS_KEY_WDTH 2048 +#define _RSA_3072BITS_KEY_WDTH 3072 +#define _RSA_4096BITS_KEY_WDTH 4096 + + switch (bits) { + case _RSA_1024BITS_KEY_WDTH: + case _RSA_2048BITS_KEY_WDTH: + case _RSA_3072BITS_KEY_WDTH: + case _RSA_4096BITS_KEY_WDTH: + return true; + default: + return false; + } +} + +static int hpre_rsa_enc(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); + void *tmp = akcipher_request_ctx(req); + struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); + struct hpre_sqe *msg = &hpre_req->req; + int ret; + + /* For 512 and 1536 bits key size, use soft tfm instead */ + if (ctx->key_sz == HPRE_RSA_512BITS_KSZ || + ctx->key_sz == HPRE_RSA_1536BITS_KSZ) { + akcipher_request_set_tfm(req, ctx->rsa.soft_tfm); + ret = crypto_akcipher_encrypt(req); + akcipher_request_set_tfm(req, tfm); + return ret; + } + + if (unlikely(!ctx->rsa.pubkey)) + return -EINVAL; + + ret = hpre_msg_request_set(ctx, req, true); + if (unlikely(ret)) + return ret; + + msg->dw0 |= cpu_to_le32(HPRE_ALG_NC_NCRT); + msg->key = cpu_to_le64((u64)ctx->rsa.dma_pubkey); + + ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0); + if (unlikely(ret)) + goto clear_all; + + ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0); + if (unlikely(ret)) + goto clear_all; + + /* success */ + ret = hpre_send(ctx, msg); + if (likely(!ret)) + return -EINPROGRESS; + +clear_all: + hpre_rm_req_from_ctx(hpre_req); + hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src); + + return ret; +} + +static int hpre_rsa_dec(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); + void *tmp = akcipher_request_ctx(req); + struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); + struct hpre_sqe *msg = &hpre_req->req; + int ret; + + /* For 512 and 1536 bits key size, use soft tfm instead */ + if (ctx->key_sz == HPRE_RSA_512BITS_KSZ || + ctx->key_sz == HPRE_RSA_1536BITS_KSZ) { + akcipher_request_set_tfm(req, ctx->rsa.soft_tfm); + ret = crypto_akcipher_decrypt(req); + akcipher_request_set_tfm(req, tfm); + return ret; + } + + if (unlikely(!ctx->rsa.prikey)) + return -EINVAL; + + ret = hpre_msg_request_set(ctx, req, true); + if (unlikely(ret)) + return ret; + + if (ctx->crt_g2_mode) { + msg->key = cpu_to_le64((u64)ctx->rsa.dma_crt_prikey); + msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | + HPRE_ALG_NC_CRT); + } else { + msg->key = cpu_to_le64((u64)ctx->rsa.dma_prikey); + msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | + HPRE_ALG_NC_NCRT); + } + + ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0); + if (unlikely(ret)) + goto clear_all; + + ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0); + if (unlikely(ret)) + goto clear_all; + + /* success */ + ret = hpre_send(ctx, msg); + if (likely(!ret)) + return -EINPROGRESS; + +clear_all: + hpre_rm_req_from_ctx(hpre_req); + hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src); + + return ret; +} + +static int hpre_rsa_set_n(struct hpre_ctx *ctx, const char *value, + size_t vlen, bool private) +{ + const char *ptr = value; + + hpre_rsa_drop_leading_zeros(&ptr, &vlen); + + ctx->key_sz = vlen; + + /* if invalid key size provided, we use software tfm */ + if (!hpre_rsa_key_size_is_support(ctx->key_sz)) + return 0; + + ctx->rsa.pubkey = dma_alloc_coherent(HPRE_DEV(ctx), vlen << 1, + &ctx->rsa.dma_pubkey, + GFP_KERNEL); + if (!ctx->rsa.pubkey) + return -ENOMEM; + + if (private) { + ctx->rsa.prikey = dma_alloc_coherent(HPRE_DEV(ctx), vlen << 1, + &ctx->rsa.dma_prikey, + GFP_KERNEL); + if (!ctx->rsa.prikey) { + dma_free_coherent(HPRE_DEV(ctx), vlen << 1, + ctx->rsa.pubkey, + ctx->rsa.dma_pubkey); + ctx->rsa.pubkey = NULL; + return -ENOMEM; + } + memcpy(ctx->rsa.prikey + vlen, ptr, vlen); + } + memcpy(ctx->rsa.pubkey + vlen, ptr, vlen); + + /* Using hardware HPRE to do RSA */ + return 1; +} + +static int hpre_rsa_set_e(struct hpre_ctx *ctx, const char *value, + size_t vlen) +{ + const char *ptr = value; + + hpre_rsa_drop_leading_zeros(&ptr, &vlen); + + if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) + return -EINVAL; + + memcpy(ctx->rsa.pubkey + ctx->key_sz - vlen, ptr, vlen); + + return 0; +} + +static int hpre_rsa_set_d(struct hpre_ctx *ctx, const char *value, + size_t vlen) +{ + const char *ptr = value; + + hpre_rsa_drop_leading_zeros(&ptr, &vlen); + + if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) + return -EINVAL; + + memcpy(ctx->rsa.prikey + ctx->key_sz - vlen, ptr, vlen); + + return 0; +} + +static int hpre_crt_para_get(char *para, size_t para_sz, + const char *raw, size_t raw_sz) +{ + const char *ptr = raw; + size_t len = raw_sz; + + hpre_rsa_drop_leading_zeros(&ptr, &len); + if (!len || len > para_sz) + return -EINVAL; + + memcpy(para + para_sz - len, ptr, len); + + return 0; +} + +static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key) +{ + unsigned int hlf_ksz = ctx->key_sz >> 1; + struct device *dev = HPRE_DEV(ctx); + u64 offset; + int ret; + + ctx->rsa.crt_prikey = dma_alloc_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, + &ctx->rsa.dma_crt_prikey, + GFP_KERNEL); + if (!ctx->rsa.crt_prikey) + return -ENOMEM; + + ret = hpre_crt_para_get(ctx->rsa.crt_prikey, hlf_ksz, + rsa_key->dq, rsa_key->dq_sz); + if (ret) + goto free_key; + + offset = hlf_ksz; + ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz, + rsa_key->dp, rsa_key->dp_sz); + if (ret) + goto free_key; + + offset = hlf_ksz * HPRE_CRT_Q; + ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz, + rsa_key->q, rsa_key->q_sz); + if (ret) + goto free_key; + + offset = hlf_ksz * HPRE_CRT_P; + ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz, + rsa_key->p, rsa_key->p_sz); + if (ret) + goto free_key; + + offset = hlf_ksz * HPRE_CRT_INV; + ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz, + rsa_key->qinv, rsa_key->qinv_sz); + if (ret) + goto free_key; + + ctx->crt_g2_mode = true; + + return 0; + +free_key: + offset = hlf_ksz * HPRE_CRT_PRMS; + memzero_explicit(ctx->rsa.crt_prikey, offset); + dma_free_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, ctx->rsa.crt_prikey, + ctx->rsa.dma_crt_prikey); + ctx->rsa.crt_prikey = NULL; + ctx->crt_g2_mode = false; + + return ret; +} + +/* If it is clear all, all the resources of the QP will be cleaned. */ +static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all) +{ + unsigned int half_key_sz = ctx->key_sz >> 1; + struct device *dev = HPRE_DEV(ctx); + + if (is_clear_all) + hisi_qm_stop_qp(ctx->qp); + + if (ctx->rsa.pubkey) { + dma_free_coherent(dev, ctx->key_sz << 1, + ctx->rsa.pubkey, ctx->rsa.dma_pubkey); + ctx->rsa.pubkey = NULL; + } + + if (ctx->rsa.crt_prikey) { + memzero_explicit(ctx->rsa.crt_prikey, + half_key_sz * HPRE_CRT_PRMS); + dma_free_coherent(dev, half_key_sz * HPRE_CRT_PRMS, + ctx->rsa.crt_prikey, ctx->rsa.dma_crt_prikey); + ctx->rsa.crt_prikey = NULL; + } + + if (ctx->rsa.prikey) { + memzero_explicit(ctx->rsa.prikey, ctx->key_sz); + dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.prikey, + ctx->rsa.dma_prikey); + ctx->rsa.prikey = NULL; + } + + hpre_ctx_clear(ctx, is_clear_all); +} + +/* + * we should judge if it is CRT or not, + * CRT: return true, N-CRT: return false . + */ +static bool hpre_is_crt_key(struct rsa_key *key) +{ + u16 len = key->p_sz + key->q_sz + key->dp_sz + key->dq_sz + + key->qinv_sz; + +#define LEN_OF_NCRT_PARA 5 + + /* N-CRT less than 5 parameters */ + return len > LEN_OF_NCRT_PARA; +} + +static int hpre_rsa_setkey(struct hpre_ctx *ctx, const void *key, + unsigned int keylen, bool private) +{ + struct rsa_key rsa_key; + int ret; + + hpre_rsa_clear_ctx(ctx, false); + + if (private) + ret = rsa_parse_priv_key(&rsa_key, key, keylen); + else + ret = rsa_parse_pub_key(&rsa_key, key, keylen); + if (ret < 0) + return ret; + + ret = hpre_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz, private); + if (ret <= 0) + return ret; + + if (private) { + ret = hpre_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz); + if (ret < 0) + goto free; + + if (hpre_is_crt_key(&rsa_key)) { + ret = hpre_rsa_setkey_crt(ctx, &rsa_key); + if (ret < 0) + goto free; + } + } + + ret = hpre_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz); + if (ret < 0) + goto free; + + if ((private && !ctx->rsa.prikey) || !ctx->rsa.pubkey) { + ret = -EINVAL; + goto free; + } + + return 0; + +free: + hpre_rsa_clear_ctx(ctx, false); + return ret; +} + +static int hpre_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen) +{ + struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); + int ret; + + ret = crypto_akcipher_set_pub_key(ctx->rsa.soft_tfm, key, keylen); + if (ret) + return ret; + + return hpre_rsa_setkey(ctx, key, keylen, false); +} + +static int hpre_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen) +{ + struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); + int ret; + + ret = crypto_akcipher_set_priv_key(ctx->rsa.soft_tfm, key, keylen); + if (ret) + return ret; + + return hpre_rsa_setkey(ctx, key, keylen, true); +} + +static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm) +{ + struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); + + /* For 512 and 1536 bits key size, use soft tfm instead */ + if (ctx->key_sz == HPRE_RSA_512BITS_KSZ || + ctx->key_sz == HPRE_RSA_1536BITS_KSZ) + return crypto_akcipher_maxsize(ctx->rsa.soft_tfm); + + return ctx->key_sz; +} + +static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm) +{ + struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); + int ret; + + ctx->rsa.soft_tfm = crypto_alloc_akcipher("rsa-generic", 0, 0); + if (IS_ERR(ctx->rsa.soft_tfm)) { + pr_err("Can not alloc_akcipher!\n"); + return PTR_ERR(ctx->rsa.soft_tfm); + } + + ret = hpre_ctx_init(ctx); + if (ret) + crypto_free_akcipher(ctx->rsa.soft_tfm); + + return ret; +} + +static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm) +{ + struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); + + hpre_rsa_clear_ctx(ctx, true); + crypto_free_akcipher(ctx->rsa.soft_tfm); +} + +static struct akcipher_alg rsa = { + .sign = hpre_rsa_dec, + .verify = hpre_rsa_enc, + .encrypt = hpre_rsa_enc, + .decrypt = hpre_rsa_dec, + .set_pub_key = hpre_rsa_setpubkey, + .set_priv_key = hpre_rsa_setprivkey, + .max_size = hpre_rsa_max_size, + .init = hpre_rsa_init_tfm, + .exit = hpre_rsa_exit_tfm, + .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ, + .base = { + .cra_ctxsize = sizeof(struct hpre_ctx), + .cra_priority = HPRE_CRYPTO_ALG_PRI, + .cra_name = "rsa", + .cra_driver_name = "hpre-rsa", + .cra_module = THIS_MODULE, + }, +}; + +#ifdef CONFIG_CRYPTO_DH +static struct kpp_alg dh = { + .set_secret = hpre_dh_set_secret, + .generate_public_key = hpre_dh_compute_value, + .compute_shared_secret = hpre_dh_compute_value, + .max_size = hpre_dh_max_size, + .init = hpre_dh_init_tfm, + .exit = hpre_dh_exit_tfm, + .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ, + .base = { + .cra_ctxsize = sizeof(struct hpre_ctx), + .cra_priority = HPRE_CRYPTO_ALG_PRI, + .cra_name = "dh", + .cra_driver_name = "hpre-dh", + .cra_module = THIS_MODULE, + }, +}; +#endif + +int hpre_algs_register(void) +{ + int ret = 0; + + mutex_lock(&hpre_alg_lock); + if (++hpre_active_devs == 1) { + rsa.base.cra_flags = 0; + ret = crypto_register_akcipher(&rsa); + if (ret) + goto unlock; +#ifdef CONFIG_CRYPTO_DH + ret = crypto_register_kpp(&dh); + if (ret) { + crypto_unregister_akcipher(&rsa); + goto unlock; + } +#endif + } + +unlock: + mutex_unlock(&hpre_alg_lock); + return ret; +} + +void hpre_algs_unregister(void) +{ + mutex_lock(&hpre_alg_lock); + if (--hpre_active_devs == 0) { + crypto_unregister_akcipher(&rsa); +#ifdef CONFIG_CRYPTO_DH + crypto_unregister_kpp(&dh); +#endif + } + mutex_unlock(&hpre_alg_lock); +} diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c new file mode 100644 index 0000000000000000000000000000000000000000..d8f5a505472c00c6de54890d00616d08e33a9dc1 --- /dev/null +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -0,0 +1,1053 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2018-2019 HiSilicon Limited. */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "hpre.h" + +#define HPRE_QUEUE_NUM_V2 1024 +#define HPRE_QUEUE_NUM_V1 4096 +#define HPRE_QM_ABNML_INT_MASK 0x100004 +#define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0) +#define HPRE_COMM_CNT_CLR_CE 0x0 +#define HPRE_CTRL_CNT_CLR_CE 0x301000 +#define HPRE_FSM_MAX_CNT 0x301008 +#define HPRE_VFG_AXQOS 0x30100c +#define HPRE_VFG_AXCACHE 0x301010 +#define HPRE_RDCHN_INI_CFG 0x301014 +#define HPRE_AWUSR_FP_CFG 0x301018 +#define HPRE_BD_ENDIAN 0x301020 +#define HPRE_ECC_BYPASS 0x301024 +#define HPRE_RAS_WIDTH_CFG 0x301028 +#define HPRE_POISON_BYPASS 0x30102c +#define HPRE_BD_ARUSR_CFG 0x301030 +#define HPRE_BD_AWUSR_CFG 0x301034 +#define HPRE_TYPES_ENB 0x301038 +#define HPRE_DATA_RUSER_CFG 0x30103c +#define HPRE_DATA_WUSER_CFG 0x301040 +#define HPRE_INT_MASK 0x301400 +#define HPRE_INT_STATUS 0x301800 +#define HPRE_CORE_INT_ENABLE 0 +#define HPRE_CORE_INT_DISABLE 0x003fffff +#define HPRE_RAS_ECC_1BIT_TH 0x30140c +#define HPRE_RDCHN_INI_ST 0x301a00 +#define HPRE_CLSTR_BASE 0x302000 +#define HPRE_CORE_EN_OFFSET 0x04 +#define HPRE_CORE_INI_CFG_OFFSET 0x20 +#define HPRE_CORE_INI_STATUS_OFFSET 0x80 +#define HPRE_CORE_HTBT_WARN_OFFSET 0x8c +#define HPRE_CORE_IS_SCHD_OFFSET 0x90 + +#define HPRE_RAS_CE_ENB 0x301410 +#define HPRE_HAC_RAS_CE_ENABLE 0x1 +#define HPRE_RAS_NFE_ENB 0x301414 +#define HPRE_HAC_RAS_NFE_ENABLE 0x3ffffe +#define HPRE_RAS_FE_ENB 0x301418 +#define HPRE_HAC_RAS_FE_ENABLE 0 + +#define HPRE_CORE_ENB (HPRE_CLSTR_BASE + HPRE_CORE_EN_OFFSET) +#define HPRE_CORE_INI_CFG (HPRE_CLSTR_BASE + HPRE_CORE_INI_CFG_OFFSET) +#define HPRE_CORE_INI_STATUS (HPRE_CLSTR_BASE + HPRE_CORE_INI_STATUS_OFFSET) +#define HPRE_HAC_ECC1_CNT 0x301a04 +#define HPRE_HAC_ECC2_CNT 0x301a08 +#define HPRE_HAC_INT_STATUS 0x301800 +#define HPRE_HAC_SOURCE_INT 0x301600 +#define HPRE_CLSTR_ADDR_INTRVL 0x1000 +#define HPRE_CLUSTER_INQURY 0x100 +#define HPRE_CLSTR_ADDR_INQRY_RSLT 0x104 +#define HPRE_TIMEOUT_ABNML_BIT 6 +#define HPRE_PASID_EN_BIT 9 +#define HPRE_REG_RD_INTVRL_US 10 +#define HPRE_REG_RD_TMOUT_US 1000 +#define HPRE_DBGFS_VAL_MAX_LEN 20 +#define HPRE_PCI_DEVICE_ID 0xa258 +#define HPRE_PCI_VF_DEVICE_ID 0xa259 +#define HPRE_ADDR(qm, offset) ((qm)->io_base + (offset)) +#define HPRE_QM_USR_CFG_MASK 0xfffffffe +#define HPRE_QM_AXI_CFG_MASK 0xffff +#define HPRE_QM_VFG_AX_MASK 0xff +#define HPRE_BD_USR_MASK 0x3 +#define HPRE_CLUSTER_CORE_MASK 0xf + +#define HPRE_AM_OOO_SHUTDOWN_ENB 0x301044 +#define AM_OOO_SHUTDOWN_ENABLE BIT(0) +#define AM_OOO_SHUTDOWN_DISABLE 0xFFFFFFFE +#define HPRE_WR_MSI_PORT BIT(2) + +#define HPRE_CORE_ECC_2BIT_ERR BIT(1) +#define HPRE_OOO_ECC_2BIT_ERR BIT(5) + +#define HPRE_QM_BME_FLR BIT(7) +#define HPRE_QM_PM_FLR BIT(11) +#define HPRE_QM_SRIOV_FLR BIT(12) + +#define HPRE_SQE_MASK_OFFSET 8 +#define HPRE_SQE_MASK_LEN 24 + +/* function index: + * 1 for hpre bypass mode, + * 2 for RDE bypass mode; + */ +#define HPRE_VIA_MSI_DSM 1 + +static struct hisi_qm_list hpre_devices; +static const char hpre_name[] = "hisi_hpre"; +static struct dentry *hpre_debugfs_root; +static const struct pci_device_id hpre_dev_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_DEVICE_ID) }, + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_VF_DEVICE_ID) }, + { 0, } +}; + +MODULE_DEVICE_TABLE(pci, hpre_dev_ids); + +struct hpre_hw_error { + u32 int_msk; + const char *msg; +}; + +static const char * const hpre_debug_file_name[] = { + [HPRE_CURRENT_QM] = "current_qm", + [HPRE_CLEAR_ENABLE] = "rdclr_en", + [HPRE_CLUSTER_CTRL] = "cluster_ctrl", +}; + +static const struct hpre_hw_error hpre_hw_errors[] = { + { .int_msk = BIT(0), .msg = "core_ecc_1bit_err_int_set" }, + { .int_msk = BIT(1), .msg = "core_ecc_2bit_err_int_set" }, + { .int_msk = BIT(2), .msg = "dat_wb_poison_int_set" }, + { .int_msk = BIT(3), .msg = "dat_rd_poison_int_set" }, + { .int_msk = BIT(4), .msg = "bd_rd_poison_int_set" }, + { .int_msk = BIT(5), .msg = "ooo_ecc_2bit_err_int_set" }, + { .int_msk = BIT(6), .msg = "cluster1_shb_timeout_int_set" }, + { .int_msk = BIT(7), .msg = "cluster2_shb_timeout_int_set" }, + { .int_msk = BIT(8), .msg = "cluster3_shb_timeout_int_set" }, + { .int_msk = BIT(9), .msg = "cluster4_shb_timeout_int_set" }, + { .int_msk = GENMASK(15, 10), .msg = "ooo_rdrsp_err_int_set" }, + { .int_msk = GENMASK(21, 16), .msg = "ooo_wrrsp_err_int_set" }, + { /* sentinel */ } +}; + +static const u64 hpre_cluster_offsets[] = { + [HPRE_CLUSTER0] = + HPRE_CLSTR_BASE + HPRE_CLUSTER0 * HPRE_CLSTR_ADDR_INTRVL, + [HPRE_CLUSTER1] = + HPRE_CLSTR_BASE + HPRE_CLUSTER1 * HPRE_CLSTR_ADDR_INTRVL, + [HPRE_CLUSTER2] = + HPRE_CLSTR_BASE + HPRE_CLUSTER2 * HPRE_CLSTR_ADDR_INTRVL, + [HPRE_CLUSTER3] = + HPRE_CLSTR_BASE + HPRE_CLUSTER3 * HPRE_CLSTR_ADDR_INTRVL, +}; + +static struct debugfs_reg32 hpre_cluster_dfx_regs[] = { + {"CORES_EN_STATUS ", HPRE_CORE_EN_OFFSET}, + {"CORES_INI_CFG ", HPRE_CORE_INI_CFG_OFFSET}, + {"CORES_INI_STATUS ", HPRE_CORE_INI_STATUS_OFFSET}, + {"CORES_HTBT_WARN ", HPRE_CORE_HTBT_WARN_OFFSET}, + {"CORES_IS_SCHD ", HPRE_CORE_IS_SCHD_OFFSET}, +}; + +static struct debugfs_reg32 hpre_com_dfx_regs[] = { + {"READ_CLR_EN ", HPRE_CTRL_CNT_CLR_CE}, + {"AXQOS ", HPRE_VFG_AXQOS}, + {"AWUSR_CFG ", HPRE_AWUSR_FP_CFG}, + {"QM_ARUSR_MCFG1 ", QM_ARUSER_M_CFG_1}, + {"QM_AWUSR_MCFG1 ", QM_AWUSER_M_CFG_1}, + {"BD_ENDIAN ", HPRE_BD_ENDIAN}, + {"ECC_CHECK_CTRL ", HPRE_ECC_BYPASS}, + {"RAS_INT_WIDTH ", HPRE_RAS_WIDTH_CFG}, + {"POISON_BYPASS ", HPRE_POISON_BYPASS}, + {"BD_ARUSER ", HPRE_BD_ARUSR_CFG}, + {"BD_AWUSER ", HPRE_BD_AWUSR_CFG}, + {"DATA_ARUSER ", HPRE_DATA_RUSER_CFG}, + {"DATA_AWUSER ", HPRE_DATA_WUSER_CFG}, + {"INT_STATUS ", HPRE_INT_STATUS}, +}; + +static const char *hpre_dfx_files[HPRE_DFX_FILE_NUM] = { + "send_cnt", + "recv_cnt", + "send_fail_cnt", + "send_busy_cnt", + "over_thrhld_cnt", + "overtime_thrhld", + "invalid_req_cnt" +}; + +static int uacce_mode_set(const char *val, const struct kernel_param *kp) +{ + return mode_set(val, kp); +} + +static const struct kernel_param_ops uacce_mode_ops = { + .set = uacce_mode_set, + .get = param_get_int, +}; + +static int uacce_mode = UACCE_MODE_NOUACCE; +module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444); +MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 2"); + +static int pf_q_num_set(const char *val, const struct kernel_param *kp) +{ + return q_num_set(val, kp, HPRE_PCI_DEVICE_ID); +} + +static const struct kernel_param_ops hpre_pf_q_num_ops = { + .set = pf_q_num_set, + .get = param_get_int, +}; + +static u32 pf_q_num = HPRE_PF_DEF_Q_NUM; +module_param_cb(pf_q_num, &hpre_pf_q_num_ops, &pf_q_num, 0444); +MODULE_PARM_DESC(pf_q_num, "Number of queues in PF of CS(1-1024)"); + +static int vfs_num_set(const char *val, const struct kernel_param *kp) +{ + return vf_num_set(val, kp); +} + +static const struct kernel_param_ops vfs_num_ops = { + .set = vfs_num_set, + .get = param_get_int, +}; + +static u32 vfs_num; +module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444); +MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)"); + +struct hisi_qp *hpre_create_qp(void) +{ + int node = cpu_to_node(raw_smp_processor_id()); + struct hisi_qp *qp = NULL; + int ret; + + ret = hisi_qm_alloc_qps_node(node, &hpre_devices, &qp, 1, 0); + if (!ret) + return qp; + + return NULL; +} + +static void hpre_pasid_enable(struct hisi_qm *qm) +{ + u32 val; + + val = readl_relaxed(HPRE_ADDR(qm, HPRE_DATA_RUSER_CFG)); + val |= BIT(HPRE_PASID_EN_BIT); + writel_relaxed(val, HPRE_ADDR(qm, HPRE_DATA_RUSER_CFG)); + val = readl_relaxed(HPRE_ADDR(qm, HPRE_DATA_WUSER_CFG)); + val |= BIT(HPRE_PASID_EN_BIT); + writel_relaxed(val, HPRE_ADDR(qm, HPRE_DATA_WUSER_CFG)); +} + +static int hpre_cfg_by_dsm(struct hisi_qm *qm) +{ + struct device *dev = &qm->pdev->dev; + union acpi_object *obj; + guid_t guid; + + if (guid_parse("b06b81ab-0134-4a45-9b0c-483447b95fa7", &guid)) { + dev_err(dev, "Hpre GUID failed\n"); + return -EINVAL; + } + + /* Switch over to MSI handling due to non-standard PCI implementation */ + obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid, + 0, HPRE_VIA_MSI_DSM, NULL); + if (!obj) { + dev_err(dev, "ACPI handle failed!\n"); + return -EIO; + } + + ACPI_FREE(obj); + + return 0; +} + +static int hpre_set_cluster(struct hisi_qm *qm) +{ + struct device *dev = &qm->pdev->dev; + unsigned long offset; + u32 val = 0; + int ret, i; + + for (i = 0; i < HPRE_CLUSTERS_NUM; i++) { + offset = i * HPRE_CLSTR_ADDR_INTRVL; + + /* clusters initiating */ + writel(HPRE_CLUSTER_CORE_MASK, + HPRE_ADDR(qm, offset + HPRE_CORE_ENB)); + writel(0x1, HPRE_ADDR(qm, offset + HPRE_CORE_INI_CFG)); + ret = readl_relaxed_poll_timeout(HPRE_ADDR(qm, offset + + HPRE_CORE_INI_STATUS), val, + ((val & HPRE_CLUSTER_CORE_MASK) == + HPRE_CLUSTER_CORE_MASK), + HPRE_REG_RD_INTVRL_US, + HPRE_REG_RD_TMOUT_US); + if (ret) { + dev_err(dev, + "cluster %d int st status timeout!\n", i); + return -ETIMEDOUT; + } + } + + return 0; +} + +static int hpre_set_user_domain_and_cache(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + u32 val; + int ret; + + writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_ARUSER_M_CFG_ENABLE)); + writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_AWUSER_M_CFG_ENABLE)); + writel_relaxed(HPRE_QM_AXI_CFG_MASK, HPRE_ADDR(qm, QM_AXI_M_CFG)); + + /* HPRE need more time, we close this interrupt */ + val = readl_relaxed(HPRE_ADDR(qm, HPRE_QM_ABNML_INT_MASK)); + val |= BIT(HPRE_TIMEOUT_ABNML_BIT); + writel_relaxed(val, HPRE_ADDR(qm, HPRE_QM_ABNML_INT_MASK)); + + writel(0x1, HPRE_ADDR(qm, HPRE_TYPES_ENB)); + writel(HPRE_QM_VFG_AX_MASK, HPRE_ADDR(qm, HPRE_VFG_AXCACHE)); + writel(0x0, HPRE_ADDR(qm, HPRE_BD_ENDIAN)); + writel(0x0, HPRE_ADDR(qm, HPRE_INT_MASK)); + writel(0x0, HPRE_ADDR(qm, HPRE_RAS_ECC_1BIT_TH)); + writel(0x0, HPRE_ADDR(qm, HPRE_POISON_BYPASS)); + writel(0x0, HPRE_ADDR(qm, HPRE_COMM_CNT_CLR_CE)); + writel(0x0, HPRE_ADDR(qm, HPRE_ECC_BYPASS)); + + /* Enable data buffer pasid */ + if (qm->use_sva) + hpre_pasid_enable(qm); + + writel(HPRE_BD_USR_MASK, HPRE_ADDR(qm, HPRE_BD_ARUSR_CFG)); + writel(HPRE_BD_USR_MASK, HPRE_ADDR(qm, HPRE_BD_AWUSR_CFG)); + writel(0x1, HPRE_ADDR(qm, HPRE_RDCHN_INI_CFG)); + ret = readl_relaxed_poll_timeout(HPRE_ADDR(qm, HPRE_RDCHN_INI_ST), val, + val & BIT(0), + HPRE_REG_RD_INTVRL_US, + HPRE_REG_RD_TMOUT_US); + if (ret) { + pci_err(pdev, "read rd channel timeout fail!\n"); + return -ETIMEDOUT; + } + + ret = hpre_set_cluster(qm); + if (ret) { + pci_err(pdev, "set hpre cluster err!\n"); + return -ETIMEDOUT; + } + + ret = hpre_cfg_by_dsm(qm); + if (ret) + pci_err(pdev, "acpi_evaluate_dsm err.\n"); + + /* disable FLR triggered by BME(bus master enable) */ + val = readl(HPRE_ADDR(qm, QM_PEH_AXUSER_CFG)); + val &= ~(HPRE_QM_BME_FLR | HPRE_QM_SRIOV_FLR); + val |= HPRE_QM_PM_FLR; + writel(val, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG)); + writel(PEH_AXUSER_CFG_ENABLE, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG_ENABLE)); + + return ret; +} + +static void hpre_cnt_regs_clear(struct hisi_qm *qm) +{ + unsigned long offset; + int i; + + /* clear current_qm */ + writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF); + writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF); + + /* clear clusterX/cluster_ctrl */ + for (i = 0; i < HPRE_CLUSTERS_NUM; i++) { + offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL; + writel(0x0, qm->io_base + offset + HPRE_CLUSTER_INQURY); + } + + /* clear rdclr_en */ + writel(0x0, qm->io_base + HPRE_CTRL_CNT_CLR_CE); + + hisi_qm_debug_regs_clear(qm); +} + +static void hpre_hw_error_disable(struct hisi_qm *qm) +{ + u32 val; + + /* disable hpre hw error interrupts */ + writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_INT_MASK); + + /* disable HPRE block master OOO when m-bit error occur */ + val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); + val &= AM_OOO_SHUTDOWN_DISABLE; + writel(val, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); +} + +static void hpre_hw_error_enable(struct hisi_qm *qm) +{ + u32 val; + + /* clear HPRE hw error source if having */ + writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_HAC_SOURCE_INT); + + /* enable hpre hw error interrupts */ + writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK); + writel(HPRE_HAC_RAS_CE_ENABLE, qm->io_base + HPRE_RAS_CE_ENB); + writel(HPRE_HAC_RAS_NFE_ENABLE, qm->io_base + HPRE_RAS_NFE_ENB); + writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB); + + /* enable HPRE block master OOO when m-bit error occur */ + val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); + val |= AM_OOO_SHUTDOWN_ENABLE; + writel(val, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); +} + +static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file) +{ + struct hpre *hpre = container_of(file->debug, struct hpre, debug); + + return &hpre->qm; +} + +static u32 hpre_current_qm_read(struct hpre_debugfs_file *file) +{ + struct hisi_qm *qm = hpre_file_to_qm(file); + + return readl(qm->io_base + QM_DFX_MB_CNT_VF); +} + +static int hpre_current_qm_write(struct hpre_debugfs_file *file, u32 val) +{ + struct hisi_qm *qm = hpre_file_to_qm(file); + u32 num_vfs = qm->vfs_num; + u32 vfq_num, tmp; + + if (val > num_vfs) + return -EINVAL; + + /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */ + if (val == 0) { + qm->debug.curr_qm_qp_num = qm->qp_num; + } else { + vfq_num = (qm->ctrl_q_num - qm->qp_num) / num_vfs; + if (val == num_vfs) { + qm->debug.curr_qm_qp_num = + qm->ctrl_q_num - qm->qp_num - (num_vfs - 1) * vfq_num; + } else { + qm->debug.curr_qm_qp_num = vfq_num; + } + } + + writel(val, qm->io_base + QM_DFX_MB_CNT_VF); + writel(val, qm->io_base + QM_DFX_DB_CNT_VF); + + tmp = val | + (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK); + writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); + + tmp = val | + (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK); + writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); + + return 0; +} + +static u32 hpre_clear_enable_read(struct hpre_debugfs_file *file) +{ + struct hisi_qm *qm = hpre_file_to_qm(file); + + return readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) & + HPRE_CTRL_CNT_CLR_CE_BIT; +} + +static int hpre_clear_enable_write(struct hpre_debugfs_file *file, u32 val) +{ + struct hisi_qm *qm = hpre_file_to_qm(file); + u32 tmp; + + if (val != 1 && val != 0) + return -EINVAL; + + tmp = (readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) & + ~HPRE_CTRL_CNT_CLR_CE_BIT) | val; + writel(tmp, qm->io_base + HPRE_CTRL_CNT_CLR_CE); + + return 0; +} + +static u32 hpre_cluster_inqry_read(struct hpre_debugfs_file *file) +{ + struct hisi_qm *qm = hpre_file_to_qm(file); + int cluster_index = file->index - HPRE_CLUSTER_CTRL; + unsigned long offset = HPRE_CLSTR_BASE + + cluster_index * HPRE_CLSTR_ADDR_INTRVL; + + return readl(qm->io_base + offset + HPRE_CLSTR_ADDR_INQRY_RSLT); +} + +static int hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val) +{ + struct hisi_qm *qm = hpre_file_to_qm(file); + int cluster_index = file->index - HPRE_CLUSTER_CTRL; + unsigned long offset = HPRE_CLSTR_BASE + cluster_index * + HPRE_CLSTR_ADDR_INTRVL; + + writel(val, qm->io_base + offset + HPRE_CLUSTER_INQURY); + + return 0; +} + +static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf, + size_t count, loff_t *pos) +{ + struct hpre_debugfs_file *file = filp->private_data; + char tbuf[HPRE_DBGFS_VAL_MAX_LEN]; + u32 val; + int ret; + + spin_lock_irq(&file->lock); + switch (file->type) { + case HPRE_CURRENT_QM: + val = hpre_current_qm_read(file); + break; + case HPRE_CLEAR_ENABLE: + val = hpre_clear_enable_read(file); + break; + case HPRE_CLUSTER_CTRL: + val = hpre_cluster_inqry_read(file); + break; + default: + spin_unlock_irq(&file->lock); + return -EINVAL; + } + spin_unlock_irq(&file->lock); + ret = snprintf(tbuf, HPRE_DBGFS_VAL_MAX_LEN, "%u\n", val); + return simple_read_from_buffer(buf, count, pos, tbuf, ret); +} + +static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct hpre_debugfs_file *file = filp->private_data; + char tbuf[HPRE_DBGFS_VAL_MAX_LEN]; + unsigned long val = 0; + int len, ret; + + if (*pos != 0) + return 0; + + if (count >= HPRE_DBGFS_VAL_MAX_LEN) + return -ENOSPC; + + len = simple_write_to_buffer(tbuf, HPRE_DBGFS_VAL_MAX_LEN - 1, + pos, buf, count); + if (len < 0) + return len; + + tbuf[len] = '\0'; + if (kstrtoul(tbuf, 0, &val)) + return -EFAULT; + + spin_lock_irq(&file->lock); + switch (file->type) { + case HPRE_CURRENT_QM: + ret = hpre_current_qm_write(file, val); + if (ret) + goto err_input; + break; + case HPRE_CLEAR_ENABLE: + ret = hpre_clear_enable_write(file, val); + if (ret) + goto err_input; + break; + case HPRE_CLUSTER_CTRL: + ret = hpre_cluster_inqry_write(file, val); + if (ret) + goto err_input; + break; + default: + ret = -EINVAL; + goto err_input; + } + spin_unlock_irq(&file->lock); + + return count; + +err_input: + spin_unlock_irq(&file->lock); + return ret; +} + +static const struct file_operations hpre_ctrl_debug_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = hpre_ctrl_debug_read, + .write = hpre_ctrl_debug_write, +}; + +static int hpre_debugfs_atomic64_get(void *data, u64 *val) +{ + struct hpre_dfx *dfx_item = data; + + *val = atomic64_read(&dfx_item->value); + return 0; +} + +static int hpre_debugfs_atomic64_set(void *data, u64 val) +{ + struct hpre_dfx *dfx_item = data; + + if (dfx_item->type == HPRE_OVERTIME_THRHLD) { + struct hpre_dfx *hpre_dfx = dfx_item - HPRE_OVERTIME_THRHLD; + + atomic64_set(&hpre_dfx[HPRE_OVER_THRHLD_CNT].value, 0); + } else if (val) { + return -EINVAL; + } + + atomic64_set(&dfx_item->value, val); + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(hpre_atomic64_ops, hpre_debugfs_atomic64_get, + hpre_debugfs_atomic64_set, "%llu\n"); + +static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir, + enum hpre_ctrl_dbgfs_file type, int indx) +{ + struct dentry *file_dir; + struct hpre *hpre; + + if (dir) { + file_dir = dir; + } else { + hpre = container_of(dbg, struct hpre, debug); + file_dir = hpre->qm.debug.debug_root; + } + + if (type >= HPRE_DEBUG_FILE_NUM) + return -EINVAL; + + spin_lock_init(&dbg->files[indx].lock); + dbg->files[indx].debug = dbg; + dbg->files[indx].type = type; + dbg->files[indx].index = indx; + + debugfs_create_file(hpre_debug_file_name[type], 0600, file_dir, + dbg->files + indx, &hpre_ctrl_debug_fops); + + return 0; +} + +static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug) +{ + struct hpre *hpre = container_of(debug, struct hpre, debug); + struct hisi_qm *qm = &hpre->qm; + struct device *dev = &qm->pdev->dev; + struct debugfs_regset32 *regset; + + regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); + if (!regset) + return -ENOMEM; + + regset->regs = hpre_com_dfx_regs; + regset->nregs = ARRAY_SIZE(hpre_com_dfx_regs); + regset->base = qm->io_base; + + debugfs_create_regset32("regs", 0444, qm->debug.debug_root, regset); + return 0; +} + +static int hpre_cluster_debugfs_init(struct hpre_debug *debug) +{ + struct hpre *hpre = container_of(debug, struct hpre, debug); + struct hisi_qm *qm = &hpre->qm; + struct device *dev = &qm->pdev->dev; + char buf[HPRE_DBGFS_VAL_MAX_LEN]; + struct debugfs_regset32 *regset; + struct dentry *tmp_d; + int i, ret; + + for (i = 0; i < HPRE_CLUSTERS_NUM; i++) { + ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i); + if (ret < 0) + return -EINVAL; + + tmp_d = debugfs_create_dir(buf, qm->debug.debug_root); + + regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); + if (!regset) + return -ENOMEM; + + regset->regs = hpre_cluster_dfx_regs; + regset->nregs = ARRAY_SIZE(hpre_cluster_dfx_regs); + regset->base = qm->io_base + hpre_cluster_offsets[i]; + + debugfs_create_regset32("regs", 0444, tmp_d, regset); + + ret = hpre_create_debugfs_file(debug, tmp_d, HPRE_CLUSTER_CTRL, + i + HPRE_CLUSTER_CTRL); + if (ret) + return ret; + } + + return 0; +} + +static int hpre_ctrl_debug_init(struct hpre_debug *debug) +{ + int ret; + + ret = hpre_create_debugfs_file(debug, NULL, HPRE_CURRENT_QM, + HPRE_CURRENT_QM); + if (ret) + return ret; + + ret = hpre_create_debugfs_file(debug, NULL, HPRE_CLEAR_ENABLE, + HPRE_CLEAR_ENABLE); + if (ret) + return ret; + + ret = hpre_pf_comm_regs_debugfs_init(debug); + if (ret) + return ret; + + return hpre_cluster_debugfs_init(debug); +} + +static void hpre_dfx_debug_init(struct hpre_debug *debug) +{ + struct hpre *hpre = container_of(debug, struct hpre, debug); + struct hpre_dfx *dfx = hpre->debug.dfx; + struct hisi_qm *qm = &hpre->qm; + struct dentry *parent; + int i; + + parent = debugfs_create_dir("hpre_dfx", qm->debug.debug_root); + for (i = 0; i < HPRE_DFX_FILE_NUM; i++) { + dfx[i].type = i; + debugfs_create_file(hpre_dfx_files[i], 0644, parent, &dfx[i], + &hpre_atomic64_ops); + } +} + +static int hpre_debugfs_init(struct hisi_qm *qm) +{ + struct hpre *hpre = container_of(qm, struct hpre, qm); + struct device *dev = &qm->pdev->dev; + int ret; + + qm->debug.debug_root = debugfs_create_dir(dev_name(dev), + hpre_debugfs_root); + + qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET; + qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN; + ret = hisi_qm_debug_init(qm); + if (ret) + goto failed_to_create; + + if (qm->pdev->device == HPRE_PCI_DEVICE_ID) { + ret = hpre_ctrl_debug_init(&hpre->debug); + if (ret) + goto failed_to_create; + } + + hpre_dfx_debug_init(&hpre->debug); + + return 0; + +failed_to_create: + debugfs_remove_recursive(qm->debug.debug_root); + return ret; +} + +static void hpre_debugfs_exit(struct hisi_qm *qm) +{ + debugfs_remove_recursive(qm->debug.debug_root); +} + +static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts) +{ + const struct hpre_hw_error *err = hpre_hw_errors; + struct device *dev = &qm->pdev->dev; + + while (err->msg) { + if (err->int_msk & err_sts) + dev_warn(dev, "%s [error status=0x%x] found\n", + err->msg, err->int_msk); + err++; + } +} + +static u32 hpre_get_hw_err_status(struct hisi_qm *qm) +{ + return readl(qm->io_base + HPRE_HAC_INT_STATUS); +} + +static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) +{ + writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT); +} + +static void hpre_open_axi_master_ooo(struct hisi_qm *qm) +{ + u32 value; + + value = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); + writel(value & AM_OOO_SHUTDOWN_DISABLE, + HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB)); + writel(value | AM_OOO_SHUTDOWN_ENABLE, + HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB)); +} + +static void hpre_err_ini_set(struct hisi_qm *qm) +{ + if (qm->fun_type == QM_HW_VF) + return; + + qm->err_ini.get_dev_hw_err_status = hpre_get_hw_err_status; + qm->err_ini.clear_dev_hw_err_status = hpre_clear_hw_err_status; + qm->err_ini.err_info.ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | + HPRE_OOO_ECC_2BIT_ERR; + qm->err_ini.err_info.ce = QM_BASE_CE; + qm->err_ini.err_info.nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT; + qm->err_ini.err_info.fe = 0; + qm->err_ini.err_info.msi = QM_DB_RANDOM_INVALID; + qm->err_ini.err_info.acpi_rst = "HRST"; + qm->err_ini.hw_err_disable = hpre_hw_error_disable; + qm->err_ini.hw_err_enable = hpre_hw_error_enable; + qm->err_ini.set_usr_domain_cache = hpre_set_user_domain_and_cache; + qm->err_ini.log_dev_hw_err = hpre_log_hw_error; + qm->err_ini.open_axi_master_ooo = hpre_open_axi_master_ooo; + qm->err_ini.err_info.msi_wr_port = HPRE_WR_MSI_PORT; +} + +static int hpre_pf_probe_init(struct hisi_qm *qm) +{ + int ret; + + if (qm->ver != QM_HW_V2) + return -EINVAL; + + qm->ctrl_q_num = HPRE_QUEUE_NUM_V2; + + ret = qm->err_ini.set_usr_domain_cache(qm); + if (ret) + return ret; + + hisi_qm_dev_err_init(qm); + + return 0; +} + +static int hpre_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev) +{ + int ret; + + qm->algs = "rsa\ndh\n"; + qm->uacce_mode = uacce_mode; + qm->pdev = pdev; + ret = hisi_qm_pre_init(qm, pf_q_num, HPRE_PF_DEF_Q_BASE); + if (ret) + return ret; + + if (qm->ver == QM_HW_V1) { + pci_warn(pdev, "HPRE version 1 is not supported!\n"); + return -EINVAL; + } + + qm->qm_list = &hpre_devices; + qm->sqe_size = HPRE_SQE_SIZE; + qm->dev_name = hpre_name; + hpre_err_ini_set(qm); + + return 0; +} + +static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct hisi_qm *qm; + struct hpre *hpre; + int ret; + + hpre = devm_kzalloc(&pdev->dev, sizeof(*hpre), GFP_KERNEL); + if (!hpre) + return -ENOMEM; + qm = &hpre->qm; + qm->fun_type = (pdev->device == HPRE_PCI_DEVICE_ID) ? + QM_HW_PF : QM_HW_VF; + + ret = hpre_qm_pre_init(qm, pdev); + if (ret) + return ret; + + ret = hisi_qm_init(qm); + if (ret) { + pci_err(pdev, "Failed to init qm (%d)!\n", ret); + return ret; + } + + if (pdev->is_physfn) { + ret = hpre_pf_probe_init(qm); + if (ret) { + pci_err(pdev, "Failed to init pf probe (%d)!\n", ret); + goto err_with_qm_init; + } + } + + ret = hisi_qm_start(qm); + if (ret) { + pci_err(pdev, "Failed to start qm (%d)!\n", ret); + goto err_with_err_init; + } + + ret = hpre_debugfs_init(qm); + if (ret) + pci_warn(pdev, "init debugfs fail!\n"); + + hisi_qm_add_to_list(qm, &hpre_devices); + + ret = hpre_algs_register(); + if (ret < 0) { + pci_err(pdev, "fail to register algs to crypto!\n"); + goto err_with_qm_start; + } + + if (qm->fun_type == QM_HW_PF && vfs_num > 0) { + ret = hisi_qm_sriov_enable(pdev, vfs_num); + if (ret < 0) + goto err_with_crypto_register; + } + + return 0; + +err_with_crypto_register: + hpre_algs_unregister(); + +err_with_qm_start: + hisi_qm_del_from_list(qm, &hpre_devices); + hisi_qm_stop(qm, QM_NORMAL); + +err_with_err_init: + hisi_qm_dev_err_uninit(qm); + +err_with_qm_init: + hisi_qm_uninit(qm); + + return ret; +} + +static int hpre_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + if (num_vfs) + return hisi_qm_sriov_enable(pdev, num_vfs); + else + return hisi_qm_sriov_disable(pdev, &hpre_devices); +} + +static void hpre_remove(struct pci_dev *pdev) +{ + struct hisi_qm *qm = pci_get_drvdata(pdev); + int ret; + + hisi_qm_remove_wait_delay(qm, &hpre_devices); + + hpre_algs_unregister(); + hisi_qm_del_from_list(qm, &hpre_devices); + if (qm->fun_type == QM_HW_PF && qm->vfs_num) { + ret = hisi_qm_sriov_disable(pdev, NULL); + if (ret) { + pci_err(pdev, "Disable SRIOV fail!\n"); + return; + } + } + if (qm->fun_type == QM_HW_PF) { + hpre_cnt_regs_clear(qm); + qm->debug.curr_qm_qp_num = 0; + } + + hpre_debugfs_exit(qm); + hisi_qm_stop(qm, QM_NORMAL); + + if (qm->fun_type == QM_HW_PF) + hisi_qm_dev_err_uninit(qm); + + hisi_qm_uninit(qm); +} + +static const struct pci_error_handlers hpre_err_handler = { + .error_detected = hisi_qm_dev_err_detected, + .slot_reset = hisi_qm_dev_slot_reset, + .reset_prepare = hisi_qm_reset_prepare, + .reset_done = hisi_qm_reset_done, +}; + +static struct pci_driver hpre_pci_driver = { + .name = hpre_name, + .id_table = hpre_dev_ids, + .probe = hpre_probe, + .remove = hpre_remove, + .sriov_configure = hpre_sriov_configure, + .err_handler = &hpre_err_handler, + .shutdown = hisi_qm_dev_shutdown, +}; + +static void hpre_register_debugfs(void) +{ + if (!debugfs_initialized()) + return; + + hpre_debugfs_root = debugfs_create_dir(hpre_name, NULL); + if (IS_ERR_OR_NULL(hpre_debugfs_root)) + hpre_debugfs_root = NULL; +} + +static void hpre_unregister_debugfs(void) +{ + debugfs_remove_recursive(hpre_debugfs_root); +} + +static int __init hpre_init(void) +{ + int ret; + + INIT_LIST_HEAD(&hpre_devices.list); + mutex_init(&hpre_devices.lock); + + hpre_register_debugfs(); + + ret = pci_register_driver(&hpre_pci_driver); + if (ret) { + hpre_unregister_debugfs(); + pr_err("hpre: can't register hisi hpre driver.\n"); + } + + return ret; +} + +static void __exit hpre_exit(void) +{ + pci_unregister_driver(&hpre_pci_driver); + hpre_unregister_debugfs(); +} + +module_init(hpre_init); +module_exit(hpre_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Zaibo Xu "); +MODULE_DESCRIPTION("Driver for HiSilicon HPRE accelerator"); diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c new file mode 100644 index 0000000000000000000000000000000000000000..5cab0bd771698f919eefd302f9fe69fd7d57a5e3 --- /dev/null +++ b/drivers/crypto/hisilicon/qm.c @@ -0,0 +1,4269 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018-2019 HiSilicon Limited. */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "qm.h" + +/* eq/aeq irq enable */ +#define QM_VF_AEQ_INT_SOURCE 0x0 +#define QM_VF_AEQ_INT_MASK 0x4 +#define QM_VF_EQ_INT_SOURCE 0x8 +#define QM_VF_EQ_INT_MASK 0xc +#define QM_IRQ_NUM_V1 1 +#define QM_IRQ_NUM_PF_V2 4 +#define QM_IRQ_NUM_VF_V2 2 + +/* mailbox */ +#define QM_MB_CMD_SQC 0x0 +#define QM_MB_CMD_CQC 0x1 +#define QM_MB_CMD_EQC 0x2 +#define QM_MB_CMD_AEQC 0x3 +#define QM_MB_CMD_SQC_BT 0x4 +#define QM_MB_CMD_CQC_BT 0x5 +#define QM_MB_CMD_SQC_VFT_V2 0x6 + +#define QM_MB_CMD_SEND_BASE 0x300 +#define QM_MB_EVENT_SHIFT 8 +#define QM_MB_BUSY_SHIFT 13 +#define QM_MB_OP_SHIFT 14 +#define QM_MB_CMD_DATA_ADDR_L 0x304 +#define QM_MB_CMD_DATA_ADDR_H 0x308 +#define QM_MB_STATUS_MASK GENMASK(12, 9) + +/* sqc shift */ +#define QM_SQ_HOP_NUM_SHIFT 0 +#define QM_SQ_PAGE_SIZE_SHIFT 4 +#define QM_SQ_BUF_SIZE_SHIFT 8 +#define QM_SQ_SQE_SIZE_SHIFT 12 +#define QM_SQ_PRIORITY_SHIFT 0 +#define QM_SQ_ORDERS_SHIFT 4 +#define QM_SQ_TYPE_SHIFT 8 + +#define QM_SQ_TYPE_MASK 0xf + +#define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc).w11) >> 6) & 0x1) + +/* cqc shift */ +#define QM_CQ_HOP_NUM_SHIFT 0 +#define QM_CQ_PAGE_SIZE_SHIFT 4 +#define QM_CQ_BUF_SIZE_SHIFT 8 +#define QM_CQ_CQE_SIZE_SHIFT 12 +#define QM_CQ_PHASE_SHIFT 0 +#define QM_CQ_FLAG_SHIFT 1 + +#define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1) + +#define QM_QC_CQE_SIZE 4 + +#define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc).w11) >> 6) & 0x1) + +/* eqc shift */ +#define QM_EQE_AEQE_SIZE (2UL << 12) +#define QM_EQC_PHASE_SHIFT 16 + +#define QM_EQE_PHASE(eqe) ((le32_to_cpu((eqe)->dw0) >> 16) & 0x1) +#define QM_EQE_CQN_MASK 0xffff + +#define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1) +#define QM_AEQE_TYPE_SHIFT 17 + +#define QM_DOORBELL_CMD_SQ 0 +#define QM_DOORBELL_CMD_CQ 1 +#define QM_DOORBELL_CMD_EQ 2 +#define QM_DOORBELL_CMD_AEQ 3 + +#define QM_DOORBELL_BASE_V1 0x340 +#define QM_DOORBELL_SQ_CQ_BASE_V2 0x1000 +#define QM_DOORBELL_EQ_AEQ_BASE_V2 0x2000 + +#define QM_MEM_START_INIT 0x100040 +#define QM_MEM_INIT_DONE 0x100044 +#define QM_VFT_CFG_RDY 0x10006c +#define QM_VFT_CFG_OP_WR 0x100058 +#define QM_VFT_CFG_TYPE 0x10005c +#define QM_SQC_VFT 0x0 +#define QM_CQC_VFT 0x1 +#define QM_VFT_CFG_ADDRESS 0x100060 +#define QM_VFT_CFG_OP_ENABLE 0x100054 + +#define QM_VFT_CFG_DATA_L 0x100064 +#define QM_VFT_CFG_DATA_H 0x100068 +#define QM_SQC_VFT_BUF_SIZE (7ULL << 8) +#define QM_SQC_VFT_SQC_SIZE (5ULL << 12) +#define QM_SQC_VFT_INDEX_NUMBER (1ULL << 16) +#define QM_SQC_VFT_START_SQN_SHIFT 28 +#define QM_SQC_VFT_VALID (1ULL << 44) +#define QM_SQC_VFT_SQN_SHIFT 45 +#define QM_CQC_VFT_BUF_SIZE (7ULL << 8) +#define QM_CQC_VFT_SQC_SIZE (5ULL << 12) +#define QM_CQC_VFT_INDEX_NUMBER (1ULL << 16) +#define QM_CQC_VFT_VALID (1ULL << 28) + +#define QM_SQC_VFT_BASE_SHIFT_V2 28 +#define QM_SQC_VFT_BASE_MASK_V2 0x3ff +#define QM_SQC_VFT_NUM_SHIFT_V2 45 +#define QM_SQC_VFT_NUM_MASK_v2 0x3ff + +#define QM_DFX_CNT_CLR_CE 0x100118 +#define QM_IN_IDLE_ST_REG 0x1040e4 + +#define QM_ABNORMAL_INT_SOURCE 0x100000 +#define QM_ABNORMAL_INT_MASK 0x100004 +#define QM_HW_ERROR_IRQ_DISABLE GENMASK(12, 0) +#define QM_ABNORMAL_INT_STATUS 0x100008 +#define QM_PF_ABNORMAL_INT_SET 0x10000c +#define QM_ABNORMAL_INF00 0x100010 +#define QM_FIFO_OVERFLOW_TYPE 0xc0 +#define QM_FIFO_OVERFLOW_VF 0x3f +#define QM_ABNORMAL_INF01 0x100014 +#define QM_DB_TIMEOUT_TYPE 0xc0 +#define QM_DB_TIMEOUT_VF 0x3f +#define QM_RAS_CE_ENABLE 0x1000ec +#define QM_RAS_FE_ENABLE 0x1000f0 +#define QM_RAS_NFE_ENABLE 0x1000f4 +#define QM_RAS_CE_THRESHOLD 0x1000f8 +#define QM_RAS_MSI_INT_SEL 0x1040f4 +#define QM_ABNORMAL_INT_SOURCE_CLR GENMASK(12, 0) + +#define QM_PEH_VENDOR_ID 0x1000d8 +#define VENDOR_ID_TEST_VALUE 0x5a5a +#define QM_PEH_DFX_INFO0 0x1000FC +#define PEH_SRIOV_CTRL_VF_MSE_SHIFT 3 +#define PEH_MSI_DISABLE GENMASK(31, 0) + +#define QM_CACHE_WB_START 0x204 +#define QM_CACHE_WB_DONE 0x208 +#define QM_V2_BASE_OFFSET 0x1000 + +#define QM_DB_CMD_SHIFT_V1 16 +#define QM_DB_INDEX_SHIFT_V1 32 +#define QM_DB_PRIORITY_SHIFT_V1 48 +#define QM_DB_CMD_SHIFT_V2 12 +#define QM_DB_RAND_SHIFT_V2 16 +#define QM_DB_INDEX_SHIFT_V2 32 +#define QM_DB_PRIORITY_SHIFT_V2 48 + +#define QM_EQ_EVENT_IRQ_VECTOR 0 +#define QM_AEQ_EVENT_IRQ_VECTOR 1 +#define QM_ABNORMAL_EVENT_IRQ_VECTOR 3 + +#define QM_ABNORMAL_INT_MASK_VALUE 0x1fff + +#define QM_SQE_DATA_ALIGN_MASK 0x7f + +#define POLL_PERIOD 10 +#define POLL_TIMEOUT 1000 +#define TEMPBUFFER_LEN 22 + +#define QM_DB_TIMEOUT_TYPE_SHIFT 6 +#define QM_FIFO_OVERFLOW_TYPE_SHIFT 6 + +#define TASK_TIMEOUT 10000 + +#define WAIT_PERIOD 20 +#define QM_MB_WAIT_READY_CNT 10 +#define QM_MB_MAX_WAIT_CNT 21000 +#define WAIT_PERIOD_US_MAX 200 +#define WAIT_PERIOD_US_MIN 100 +#define REMOVE_WAIT_DELAY 10 +#define MAX_WAIT_COUNTS 10000 +#define QM_DEV_RESET_STATUS 0 +#define QM_RESET_WAIT_TIMEOUT 400 +#define QM_PCI_COMMAND_INVALID 0xFFFFFFFF +#define MASTER_GLOBAL_CTRL_SHUTDOWN 0x1 +#define MASTER_TRANS_RETURN_RW 3 +#define MASTER_TRANS_RETURN 0x300150 +#define MASTER_GLOBAL_CTRL 0x300000 +#define QM_REG_RD_INTVRL_US 10 +#define QM_REG_RD_TMOUT_US 1000 +#define AM_CFG_PORT_RD_EN 0x300018 +#define AM_CFG_PORT_WR_EN 0x30001C +#define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT +#define AM_ROB_ECC_INT_STS 0x300104 +#define ROB_ECC_ERR_MULTPL BIT(1) + +#define QM_RESET_STOP_TX_OFFSET 1 +#define QM_RESET_STOP_RX_OFFSET 2 + +#define QM_DBG_READ_LEN 256 +#define QM_DBG_WRITE_LEN 1024 +#define QM_DBG_SHOW_SHIFT 16 + +#define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \ + (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \ + ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \ + ((buf_sz) << QM_CQ_BUF_SIZE_SHIFT) | \ + ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) +#define QM_MK_CQC_DW3_V2(cqe_sz) \ + ((QM_Q_DEPTH - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) +#define QM_MK_SQC_W13(priority, orders, alg_type) \ + (((priority) << QM_SQ_PRIORITY_SHIFT) | \ + ((orders) << QM_SQ_ORDERS_SHIFT) | \ + (((alg_type) & QM_SQ_TYPE_MASK) << QM_SQ_TYPE_SHIFT)) +#define QM_MK_SQC_DW3_V1(hop_num, pg_sz, buf_sz, sqe_sz) \ + (((hop_num) << QM_SQ_HOP_NUM_SHIFT) | \ + ((pg_sz) << QM_SQ_PAGE_SIZE_SHIFT) | \ + ((buf_sz) << QM_SQ_BUF_SIZE_SHIFT) | \ + ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) +#define QM_MK_SQC_DW3_V2(sqe_sz) \ + ((QM_Q_DEPTH - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) + +#define QMC_ALIGN(sz) ALIGN(sz, 32) + +static int __hisi_qm_start(struct hisi_qm *qm); +static int qm_reset_device(struct hisi_qm *qm); + +enum vft_type { + SQC_VFT = 0, + CQC_VFT, +}; + +struct hisi_qm_resource { + struct hisi_qm *qm; + int distance; + struct list_head list; +}; + +struct hisi_qm_hw_ops { + int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number); + void (*qm_db)(struct hisi_qm *qm, u16 qn, + u8 cmd, u16 index, u8 priority); + u32 (*get_irq_num)(struct hisi_qm *qm); + int (*debug_init)(struct hisi_qm *qm); + void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe, + u32 msi); + void (*hw_error_uninit)(struct hisi_qm *qm); + pci_ers_result_t (*hw_error_handle)(struct hisi_qm *qm); +}; + +struct qm_dfx_item { + const char *name; + u32 offset; +}; + +static struct qm_dfx_item qm_dfx_files[] = { + {"qm_err_irq", offsetof(struct qm_dfx, qm_err_irq_cnt)}, + {"aeq_irq", offsetof(struct qm_dfx, aeq_irq_cnt)}, + {"abnormal_irq", offsetof(struct qm_dfx, abnormal_irq_cnt)}, + {"qp_err", offsetof(struct qm_dfx, qp_err_cnt)}, + {"mb_err", offsetof(struct qm_dfx, mb_err_cnt)}, +}; + +static const char * const qm_debug_file_name[] = { + [CURRENT_Q] = "current_q", + [CLEAR_ENABLE] = "clear_enable", + [QM_STATE] = "qm_state", +}; + +static const struct hisi_qm_hw_error qm_hw_error[] = { + { .int_msk = BIT(0), .msg = "qm_axi_rresp" }, + { .int_msk = BIT(1), .msg = "qm_axi_bresp" }, + { .int_msk = BIT(2), .msg = "qm_ecc_mbit" }, + { .int_msk = BIT(3), .msg = "qm_ecc_1bit" }, + { .int_msk = BIT(4), .msg = "qm_acc_get_task_timeout" }, + { .int_msk = BIT(5), .msg = "qm_acc_do_task_timeout" }, + { .int_msk = BIT(6), .msg = "qm_acc_wb_not_ready_timeout" }, + { .int_msk = BIT(7), .msg = "qm_sq_cq_vf_invalid" }, + { .int_msk = BIT(8), .msg = "qm_cq_vf_invalid" }, + { .int_msk = BIT(9), .msg = "qm_sq_vf_invalid" }, + { .int_msk = BIT(10), .msg = "qm_db_timeout" }, + { .int_msk = BIT(11), .msg = "qm_of_fifo_of" }, + { .int_msk = BIT(12), .msg = "qm_db_random_invalid" } +}; + +static const char * const qm_db_timeout[] = { + "sq", "cq", "eq", "aeq", +}; + +static const char * const qm_fifo_overflow[] = { + "cq", "eq", "aeq", +}; + +static const char * const qm_s[] = { + "init", "start", "close", "stop", +}; + +static const char * const qp_s[] = { + "none", "init", "start", "stop", "close", +}; + +static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new) +{ + enum qm_state curr = atomic_read(&qm->status.flags); + bool avail = false; + + switch (curr) { + case QM_INIT: + if (new == QM_START || new == QM_CLOSE) + avail = true; + break; + case QM_START: + if (new == QM_STOP) + avail = true; + break; + case QM_STOP: + if (new == QM_CLOSE || new == QM_START) + avail = true; + break; + default: + break; + } + dev_dbg(&qm->pdev->dev, "change qm state from %s to %s\n", + qm_s[curr], qm_s[new]); + if (!avail) + dev_warn(&qm->pdev->dev, "Can not change qm state from %s to %s\n", + qm_s[curr], qm_s[new]); + return avail; +} + +static bool qm_qp_avail_state(struct hisi_qm *qm, struct hisi_qp *qp, + enum qp_state new) +{ + enum qm_state qm_curr = atomic_read(&qm->status.flags); + enum qp_state qp_curr = 0; + bool avail = false; + + if (qp) + qp_curr = atomic_read(&qp->qp_status.flags); + + switch (new) { + case QP_INIT: + if (qm_curr == QM_START || qm_curr == QM_INIT) + avail = true; + break; + case QP_START: + if ((qm_curr == QM_START && qp_curr == QP_INIT) || + (qm_curr == QM_START && qp_curr == QP_STOP)) + avail = true; + break; + case QP_STOP: + if ((qm_curr == QM_START && qp_curr == QP_START) || + (qp_curr == QP_INIT)) + avail = true; + break; + case QP_CLOSE: + if ((qm_curr == QM_START && qp_curr == QP_INIT) || + (qm_curr == QM_START && qp_curr == QP_STOP) || + (qm_curr == QM_STOP && qp_curr == QP_STOP) || + (qm_curr == QM_STOP && qp_curr == QP_INIT)) + avail = true; + break; + default: + break; + } + + dev_dbg(&qm->pdev->dev, "change qp state from %s to %s in QM %s\n", + qp_s[qp_curr], qp_s[new], qm_s[qm_curr]); + + if (!avail) + dev_warn(&qm->pdev->dev, + "Can not change qp state from %s to %s in QM %s\n", + qp_s[qp_curr], qp_s[new], qm_s[qm_curr]); + return avail; +} + +/* 128 bit should be wrote to hardware at one time to trigger a mailbox */ +static void qm_mb_write(struct hisi_qm *qm, const void *src) +{ + void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE; + unsigned long tmp0 = 0, tmp1 = 0; + + asm volatile("ldp %0, %1, %3\n" + "stp %0, %1, %2\n" + "dmb oshst\n" + : "=&r" (tmp0), + "=&r" (tmp1), + "+Q" (*((char __iomem *)fun_base)) + : "Q" (*((char *)src)) + : "memory"); +} + +/* 128 bit should be read from hardware at one time*/ +static void qm_mb_read(struct hisi_qm *qm, void *dst) +{ + const void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE; + unsigned long tmp0 = 0, tmp1 = 0; + + asm volatile("ldp %0, %1, %3\n" + "stp %0, %1, %2\n" + "dmb oshst\n" + : "=&r" (tmp0), + "=&r" (tmp1), + "+Q" (*((char *)dst)) + : "Q" (*((char __iomem *)fun_base)) + : "memory"); +} + +static int qm_wait_mb_ready(struct hisi_qm *qm) +{ + struct qm_mailbox mailbox; + int i = 0; + + while (i++ < QM_MB_WAIT_READY_CNT) { + qm_mb_read(qm, &mailbox); + if (!((le16_to_cpu(mailbox.w0) >> QM_MB_BUSY_SHIFT) & 0x1)) + return 0; + + usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX); + } + + dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n"); + + return -EBUSY; +} + +static int qm_wait_mb_finish(struct hisi_qm *qm, struct qm_mailbox *mailbox) +{ + int i = 0; + + while (++i) { + qm_mb_read(qm, mailbox); + if (!((le16_to_cpu(mailbox->w0) >> QM_MB_BUSY_SHIFT) & 0x1)) + break; + + if (i > QM_MB_MAX_WAIT_CNT) { + dev_err(&qm->pdev->dev, + "QM mailbox operation timeout!\n"); + return -ETIMEDOUT; + } + + usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX); + } + + if (le16_to_cpu(mailbox->w0) & QM_MB_STATUS_MASK) { + dev_err(&qm->pdev->dev, "QM mailbox operation failed!\n"); + return -EIO; + } + + return 0; +} + +static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox) +{ + int ret; + + ret = qm_wait_mb_ready(qm); + if (ret) + goto mb_busy; + + qm_mb_write(qm, mailbox); + + ret = qm_wait_mb_finish(qm, mailbox); + if (ret) + goto mb_busy; + + return 0; + +mb_busy: + atomic64_inc(&qm->debug.dfx.mb_err_cnt); + return ret; +} + +static void qm_mb_pre_init(struct qm_mailbox *mailbox, u8 cmd, + u64 base, u16 queue, bool op) +{ + mailbox->w0 = cpu_to_le16((cmd) | + ((op) ? 0x1 << QM_MB_OP_SHIFT : 0) | + (0x1 << QM_MB_BUSY_SHIFT)); + mailbox->queue_num = cpu_to_le16(queue); + mailbox->base_l = cpu_to_le32(lower_32_bits(base)); + mailbox->base_h = cpu_to_le32(upper_32_bits(base)); + mailbox->rsvd = 0; +} + +static int qm_check_dev_error(struct hisi_qm *qm); +static int hisi_qm_mb_write(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, + u16 queue, bool op) +{ + struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); + struct qm_mailbox mailbox; + int ret; + + dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u.\n", queue, cmd); + + /* No need to judge if master OOO is blocked. */ + if (qm_check_dev_error(pf_qm)) { + dev_err(&qm->pdev->dev, + "QM mailbox operation failed since qm is stop!\n"); + return -EIO; + } + + qm_mb_pre_init(&mailbox, cmd, dma_addr, queue, op); + mutex_lock(&qm->mailbox_lock); + ret = qm_mb_nolock(qm, &mailbox); + mutex_unlock(&qm->mailbox_lock); + + return ret; +} + +static int hisi_qm_mb_read(struct hisi_qm *qm, u64 *base, u8 cmd, u16 queue) +{ + struct qm_mailbox mailbox; + int ret; + + qm_mb_pre_init(&mailbox, cmd, 0, queue, 1); + mutex_lock(&qm->mailbox_lock); + ret = qm_mb_nolock(qm, &mailbox); + mutex_unlock(&qm->mailbox_lock); + if (ret) + return ret; + + *base = le32_to_cpu(mailbox.base_l) | + ((u64)le32_to_cpu(mailbox.base_h) << 32); + + return 0; +} + +/* op 0: set xqc info to hardware, 1: get xqc info from hardware. */ +static int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, + bool op) +{ + struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); + struct qm_mailbox mailbox; + dma_addr_t xqc_dma; + void *tmp_xqc; + size_t size; + int ret; + + switch (cmd) { + case QM_MB_CMD_SQC: + size = sizeof(struct qm_sqc); + tmp_xqc = qm->xqc_buf.sqc; + xqc_dma = qm->xqc_buf.sqc_dma; + break; + case QM_MB_CMD_CQC: + size = sizeof(struct qm_cqc); + tmp_xqc = qm->xqc_buf.cqc; + xqc_dma = qm->xqc_buf.cqc_dma; + break; + case QM_MB_CMD_EQC: + size = sizeof(struct qm_eqc); + tmp_xqc = qm->xqc_buf.eqc; + xqc_dma = qm->xqc_buf.eqc_dma; + break; + case QM_MB_CMD_AEQC: + size = sizeof(struct qm_aeqc); + tmp_xqc = qm->xqc_buf.aeqc; + xqc_dma = qm->xqc_buf.aeqc_dma; + break; + } + + /* No need to judge if master OOO is blocked. */ + if (qm_check_dev_error(pf_qm)) { + dev_err(&qm->pdev->dev, + "QM mailbox operation failed since qm is stop!\n"); + return -EIO; + } + + mutex_lock(&qm->mailbox_lock); + if (!op) + memcpy(tmp_xqc, xqc, size); + + qm_mb_pre_init(&mailbox, cmd, xqc_dma, qp_id, op); + ret = qm_mb_nolock(qm, &mailbox); + if (!ret && op) + memcpy(xqc, tmp_xqc, size); + + mutex_unlock(&qm->mailbox_lock); + + return ret; +} + +static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) +{ + u64 doorbell; + + doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V1) | + ((u64)index << QM_DB_INDEX_SHIFT_V1) | + ((u64)priority << QM_DB_PRIORITY_SHIFT_V1); + + writeq(doorbell, qm->io_base + QM_DOORBELL_BASE_V1); +} + +static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) +{ + u64 doorbell; + u64 dbase; + u16 randata = 0; + + if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ) + dbase = QM_DOORBELL_SQ_CQ_BASE_V2; + else + dbase = QM_DOORBELL_EQ_AEQ_BASE_V2; + + doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) | + ((u64)randata << QM_DB_RAND_SHIFT_V2) | + ((u64)index << QM_DB_INDEX_SHIFT_V2) | + ((u64)priority << QM_DB_PRIORITY_SHIFT_V2); + + writeq(doorbell, qm->io_base + dbase); +} + +static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) +{ + dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n", + qn, cmd, index); + + qm->ops->qm_db(qm, qn, cmd, index, priority); +} + +/* Put qm memory into active, so that other configs become available */ +static int qm_dev_mem_reset(struct hisi_qm *qm) +{ + u32 val; + + writel(0x1, qm->io_base + QM_MEM_START_INIT); + return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val, + val & BIT(0), POLL_PERIOD, + POLL_TIMEOUT); +} + +static u32 qm_get_irq_num_v1(struct hisi_qm *qm) +{ + return QM_IRQ_NUM_V1; +} + +static u32 qm_get_irq_num_v2(struct hisi_qm *qm) +{ + if (qm->fun_type == QM_HW_PF) + return QM_IRQ_NUM_PF_V2; + else + return QM_IRQ_NUM_VF_V2; +} + +static void qm_cq_head_update(struct hisi_qp *qp) +{ + if (qp->qp_status.cq_head == QM_Q_DEPTH - 1) { + qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase; + qp->qp_status.cq_head = 0; + } else { + qp->qp_status.cq_head++; + } +} + +static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm) +{ + struct qm_cqe *cqe; + + if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP)) + return; + + if (qp->event_cb) + qp->event_cb(qp); + else { + cqe = qp->cqe + qp->qp_status.cq_head; + + if (qp->req_cb) { + while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { + dma_rmb(); + qp->req_cb(qp, qp->sqe + qm->sqe_size * + le16_to_cpu(cqe->sq_head)); + qm_cq_head_update(qp); + cqe = qp->cqe + qp->qp_status.cq_head; + qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, + qp->qp_status.cq_head, 0); + atomic_dec(&qp->qp_status.used); + + cond_resched(); + } + /* set c_flag */ + qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, + qp->qp_status.cq_head, 1); + } + } +} + +static void qm_work_process(struct work_struct *work) +{ + struct hisi_qm *qm = container_of(work, struct hisi_qm, work); + struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; + struct hisi_qp *qp; + int eqe_num = 0; + + while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) { + eqe_num++; + qp = &qm->qp_array[le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK]; + qm_poll_qp(qp, qm); + + if (qm->status.eq_head == QM_EQ_DEPTH - 1) { + qm->status.eqc_phase = !qm->status.eqc_phase; + eqe = qm->eqe; + qm->status.eq_head = 0; + } else { + eqe++; + qm->status.eq_head++; + } + + if (eqe_num == QM_Q_DEPTH / 2 - 1) { + eqe_num = 0; + qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); + } + } + + qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); +} + +static irqreturn_t do_qm_irq(int irq, void *data) +{ + struct hisi_qm *qm = (struct hisi_qm *)data; + + if (qm->wq) + queue_work(qm->wq, &qm->work); + else + schedule_work(&qm->work); + + return IRQ_HANDLED; +} + +static irqreturn_t qm_irq(int irq, void *data) +{ + struct hisi_qm *qm = data; + + if (readl(qm->io_base + QM_VF_EQ_INT_SOURCE)) + return do_qm_irq(irq, data); + + atomic64_inc(&qm->debug.dfx.qm_err_irq_cnt); + dev_err(&qm->pdev->dev, "invalid int source\n"); + qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); + return IRQ_NONE; +} + +static irqreturn_t qm_aeq_irq(int irq, void *data) +{ + struct hisi_qm *qm = data; + struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head; + u32 type; + + atomic64_inc(&qm->debug.dfx.aeq_irq_cnt); + if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE)) + return IRQ_NONE; + + while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) { + type = le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT; + if (type < ARRAY_SIZE(qm_fifo_overflow)) + dev_err(&qm->pdev->dev, "%s overflow\n", + qm_fifo_overflow[type]); + else + dev_err(&qm->pdev->dev, "unknown error type %d\n", + type); + + if (qm->status.aeq_head == QM_Q_DEPTH - 1) { + qm->status.aeqc_phase = !qm->status.aeqc_phase; + aeqe = qm->aeqe; + qm->status.aeq_head = 0; + } else { + aeqe++; + qm->status.aeq_head++; + } + + qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); + } + + return IRQ_HANDLED; +} + +static irqreturn_t qm_abnormal_irq(int irq, void *data) +{ + const struct hisi_qm_hw_error *err; + struct hisi_qm *qm = data; + struct device *dev = &qm->pdev->dev; + u32 error_status, tmp; + int i; + + atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt); + if (qm->abnormal_fix) { + qm->abnormal_fix(qm); + return IRQ_HANDLED; + } + + /* read err sts */ + tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS); + error_status = qm->msi_mask & tmp; + + for (i = 0; i < ARRAY_SIZE(qm_hw_error); i++) { + err = &qm_hw_error[i]; + if (!(err->int_msk & error_status)) + continue; + + dev_err(dev, "%s [error status=0x%x] found\n", + err->msg, err->int_msk); + } + + /* clear err sts */ + writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); + + return IRQ_HANDLED; +} + +static int qm_irq_register(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + int ret; + + ret = request_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), + qm_irq, IRQF_SHARED, + qm->dev_name, qm); + if (ret) + return ret; + + if (qm->ver == QM_HW_V2) { + ret = request_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), + qm_aeq_irq, IRQF_SHARED, qm->dev_name, qm); + if (ret) + goto err_aeq_irq; + + if (qm->fun_type == QM_HW_PF) { + ret = request_irq(pci_irq_vector(pdev, + QM_ABNORMAL_EVENT_IRQ_VECTOR), + qm_abnormal_irq, IRQF_SHARED, + qm->dev_name, qm); + if (ret) + goto err_abonormal_irq; + } + } + + return 0; + +err_abonormal_irq: + free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm); +err_aeq_irq: + free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm); + return ret; +} + +static void qm_irq_unregister(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + + free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm); + + if (qm->ver == QM_HW_V2) { + free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm); + + if (qm->fun_type == QM_HW_PF) + free_irq(pci_irq_vector(pdev, + QM_ABNORMAL_EVENT_IRQ_VECTOR), qm); + } +} + +static void qm_init_qp_status(struct hisi_qp *qp) +{ + struct hisi_qp_status *qp_status = &qp->qp_status; + + qp_status->sq_tail = 0; + qp_status->cq_head = 0; + qp_status->cqc_phase = true; + atomic_set(&qp_status->used, 0); + atomic_set(&qp_status->send_ref, 0); +} + +static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base, + u32 number) +{ + u64 tmp = 0; + + if (number > 0) { + switch (type) { + case SQC_VFT: + switch (qm->ver) { + case QM_HW_V1: + tmp = QM_SQC_VFT_BUF_SIZE | + QM_SQC_VFT_SQC_SIZE | + QM_SQC_VFT_INDEX_NUMBER | + QM_SQC_VFT_VALID | + (u64)base << QM_SQC_VFT_START_SQN_SHIFT; + break; + case QM_HW_V2: + tmp = (u64)base << QM_SQC_VFT_START_SQN_SHIFT | + QM_SQC_VFT_VALID | + (u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT; + break; + case QM_HW_UNKNOWN: + break; + } + break; + case CQC_VFT: + switch (qm->ver) { + case QM_HW_V1: + tmp = QM_CQC_VFT_BUF_SIZE | + QM_CQC_VFT_SQC_SIZE | + QM_CQC_VFT_INDEX_NUMBER | + QM_CQC_VFT_VALID; + break; + case QM_HW_V2: + tmp = QM_CQC_VFT_VALID; + break; + case QM_HW_UNKNOWN: + break; + } + break; + } + } + + writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L); + writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H); +} + +static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type, + u32 fun_num, u32 base, u32 number) +{ + int ret; + unsigned int val; + + ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, + val & BIT(0), POLL_PERIOD, + POLL_TIMEOUT); + if (ret) + return ret; + + writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR); + writel(type, qm->io_base + QM_VFT_CFG_TYPE); + writel(fun_num, qm->io_base + QM_VFT_CFG_ADDRESS); + + qm_vft_data_cfg(qm, type, base, number); + + writel(0x0, qm->io_base + QM_VFT_CFG_RDY); + writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); + + return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, + val & BIT(0), POLL_PERIOD, + POLL_TIMEOUT); +} + +/* The config should be conducted after qm_dev_mem_reset() */ +static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base, + u32 number) +{ + int ret, i; + + for (i = SQC_VFT; i <= CQC_VFT; i++) { + ret = qm_set_vft_common(qm, i, fun_num, base, number); + if (ret) + return ret; + } + + return 0; +} + +static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number) +{ + u64 sqc_vft; + int ret; + + ret = hisi_qm_mb_read(qm, &sqc_vft, QM_MB_CMD_SQC_VFT_V2, 0); + if (ret) + return ret; + + *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2); + *number = (QM_SQC_VFT_NUM_MASK_v2 & + (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1; + + return 0; +} + +static struct hisi_qm *file_to_qm(struct debugfs_file *file) +{ + struct qm_debug *debug = file->debug; + + return container_of(debug, struct hisi_qm, debug); +} + +static u32 current_q_read(struct debugfs_file *file) +{ + struct hisi_qm *qm = file_to_qm(file); + + return (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) >> QM_DFX_QN_SHIFT); +} + +static int current_q_write(struct debugfs_file *file, u32 val) +{ + struct hisi_qm *qm = file_to_qm(file); + u32 tmp; + + if (val >= qm->debug.curr_qm_qp_num) + return -EINVAL; + + tmp = val << QM_DFX_QN_SHIFT | + (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_FUN_MASK); + writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); + + tmp = val << QM_DFX_QN_SHIFT | + (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_FUN_MASK); + writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); + + return 0; +} + +static u32 clear_enable_read(struct debugfs_file *file) +{ + struct hisi_qm *qm = file_to_qm(file); + + return readl(qm->io_base + QM_DFX_CNT_CLR_CE); +} + +/* rd_clr_ctrl 1 enable read clear, otherwise 0 disable it */ +static int clear_enable_write(struct debugfs_file *file, u32 rd_clr_ctrl) +{ + struct hisi_qm *qm = file_to_qm(file); + + if (rd_clr_ctrl > 1) + return -EINVAL; + + writel(rd_clr_ctrl, qm->io_base + QM_DFX_CNT_CLR_CE); + + return 0; +} + +static u32 qm_state_read(struct debugfs_file *file) +{ + struct hisi_qm *qm = file_to_qm(file); + + return readl(qm->io_base + QM_IN_IDLE_ST_REG); +} + +static ssize_t qm_debug_read(struct file *filp, char __user *buf, + size_t count, loff_t *pos) +{ + struct debugfs_file *file = filp->private_data; + enum qm_debug_file index = file->index; + char tbuf[TEMPBUFFER_LEN]; + u32 val; + int ret; + + mutex_lock(&file->lock); + switch (index) { + case CURRENT_Q: + val = current_q_read(file); + break; + case CLEAR_ENABLE: + val = clear_enable_read(file); + break; + case QM_STATE: + val = qm_state_read(file); + break; + default: + mutex_unlock(&file->lock); + return -EINVAL; + } + mutex_unlock(&file->lock); + ret = snprintf(tbuf, TEMPBUFFER_LEN, "%u\n", val); + + return simple_read_from_buffer(buf, count, pos, tbuf, ret); +} + +static ssize_t qm_debug_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct debugfs_file *file = filp->private_data; + enum qm_debug_file index = file->index; + unsigned long val; + char tbuf[TEMPBUFFER_LEN]; + int len, ret; + + if (*pos != 0) + return 0; + + if (count >= TEMPBUFFER_LEN) + return -ENOSPC; + + len = simple_write_to_buffer(tbuf, TEMPBUFFER_LEN - 1, pos, buf, count); + if (len < 0) + return len; + + tbuf[len] = '\0'; + if (kstrtoul(tbuf, 0, &val)) + return -EFAULT; + + mutex_lock(&file->lock); + switch (index) { + case CURRENT_Q: + ret = current_q_write(file, val); + if (ret) + goto err_input; + break; + case CLEAR_ENABLE: + ret = clear_enable_write(file, val); + if (ret) + goto err_input; + break; + default: + ret = -EINVAL; + goto err_input; + } + mutex_unlock(&file->lock); + + return count; + +err_input: + mutex_unlock(&file->lock); + return ret; +} + +static const struct file_operations qm_debug_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = qm_debug_read, + .write = qm_debug_write, +}; + +struct qm_dfx_registers { + char *reg_name; + u64 reg_offset; +}; + +static struct qm_dfx_registers qm_dfx_regs[] = { + /* these regs are read clear */ + {"QM_ECC_1BIT_CNT ", 0x104000ull}, + {"QM_ECC_MBIT_CNT ", 0x104008ull}, + {"QM_DFX_MB_CNT ", 0x104018ull}, + {"QM_DFX_DB_CNT ", 0x104028ull}, + {"QM_DFX_SQE_CNT ", 0x104038ull}, + {"QM_DFX_CQE_CNT ", 0x104048ull}, + {"QM_DFX_SEND_SQE_TO_ACC_CNT ", 0x104050ull}, + {"QM_DFX_WB_SQE_FROM_ACC_CNT ", 0x104058ull}, + {"QM_DFX_ACC_FINISH_CNT ", 0x104060ull}, + {"QM_DFX_CQE_ERR_CNT ", 0x1040b4ull}, + + {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull}, + {"QM_ECC_1BIT_INF ", 0x104004ull}, + {"QM_ECC_MBIT_INF ", 0x10400cull}, + {"QM_DFX_ACC_RDY_VLD0 ", 0x1040a0ull}, + {"QM_DFX_ACC_RDY_VLD1 ", 0x1040a4ull}, + {"QM_DFX_AXI_RDY_VLD ", 0x1040a8ull}, + {"QM_DFX_FF_ST0 ", 0x1040c8ull}, + {"QM_DFX_FF_ST1 ", 0x1040ccull}, + {"QM_DFX_FF_ST2 ", 0x1040d0ull}, + {"QM_DFX_FF_ST3 ", 0x1040d4ull}, + {"QM_DFX_FF_ST4 ", 0x1040d8ull}, + {"QM_DFX_FF_ST5 ", 0x1040dcull}, + {"QM_DFX_FF_ST6 ", 0x1040e0ull}, + {"QM_IN_IDLE_ST ", 0x1040e4ull}, + { NULL, 0} +}; + +static struct qm_dfx_registers qm_vf_dfx_regs[] = { + {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull}, + { NULL, 0} +}; + +static int qm_regs_show(struct seq_file *s, void *unused) +{ + struct hisi_qm *qm = s->private; + struct qm_dfx_registers *regs; + u32 val; + + if (qm->fun_type == QM_HW_PF) + regs = qm_dfx_regs; + else + regs = qm_vf_dfx_regs; + + while (regs->reg_name) { + val = readl(qm->io_base + regs->reg_offset); + seq_printf(s, "%s= 0x%08x\n", regs->reg_name, val); + regs++; + } + + return 0; +} + +static int qm_regs_open(struct inode *inode, struct file *file) +{ + return single_open(file, qm_regs_show, inode->i_private); +} + +static const struct file_operations qm_regs_fops = { + .owner = THIS_MODULE, + .open = qm_regs_open, + .read = seq_read, + .release = single_release, +}; + +static ssize_t qm_cmd_read(struct file *filp, char __user *buffer, + size_t count, loff_t *pos) +{ + char buf[QM_DBG_READ_LEN]; + int uncopy_bytes, len; + + if (*pos) + return 0; + + if (count < QM_DBG_READ_LEN) + return -ENOSPC; + + len = snprintf(buf, QM_DBG_READ_LEN, "%s\n", + "Please echo help to cmd to get help information"); + + uncopy_bytes = copy_to_user(buffer, buf, len); + if (uncopy_bytes) + return -EFAULT; + + return (*pos = len); +} + +static int dump_show(struct hisi_qm *qm, void *info, + unsigned int info_size, char *info_name) +{ + struct device *dev = &qm->pdev->dev; + u8 *info_curr = info; + u8 *info_buf; + u32 i; +#define BYTE_PER_DW 4 + + info_buf = kzalloc(info_size, GFP_KERNEL); + if (!info_buf) + return -ENOMEM; + + for (i = 0; i < info_size; i++, info_curr++) { + if (i % BYTE_PER_DW == 0) + info_buf[i + 3UL] = *info_curr; + else if (i % BYTE_PER_DW == 1) + info_buf[i + 1UL] = *info_curr; + else if (i % BYTE_PER_DW == 2) + info_buf[i - 1] = *info_curr; + else if (i % BYTE_PER_DW == 3) + info_buf[i - 3] = *info_curr; + } + + dev_info(dev, "%s DUMP\n", info_name); + for (i = 0; i < info_size; i += BYTE_PER_DW) { + pr_info("DW%d: %02X%02X %02X%02X\n", i / BYTE_PER_DW, + info_buf[i], info_buf[i + 1UL], + info_buf[i + 2UL], info_buf[i + 3UL]); + } + + kfree(info_buf); + return 0; +} + +static int qm_sqc_dump(struct hisi_qm *qm, const char *s) +{ + struct device *dev = &qm->pdev->dev; + struct qm_sqc *sqc_curr; + struct qm_sqc sqc; + u32 qp_id; + int ret; + + if (!s) + return -EINVAL; + + ret = kstrtou32(s, 0, &qp_id); + if (ret || qp_id >= qm->qp_num) { + dev_err(dev, "Please input qp num (0-%d)", qm->qp_num - 1); + return -EINVAL; + } + + ret = qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp_id, 1); + if (!ret) + return dump_show(qm, &sqc, sizeof(struct qm_sqc), "SQC"); + + down_read(&qm->qps_lock); + if (qm->sqc) { + sqc_curr = qm->sqc + qp_id; + ret = dump_show(qm, sqc_curr, sizeof(struct qm_sqc), + "SOFT SQC"); + if (ret) + dev_info(dev, "Show soft sqc failed!\n"); + } + up_read(&qm->qps_lock); + + return ret; +} + +static int qm_cqc_dump(struct hisi_qm *qm, const char *s) +{ + struct device *dev = &qm->pdev->dev; + struct qm_cqc *cqc_curr; + struct qm_cqc cqc; + u32 qp_id; + int ret; + + if (!s) + return -EINVAL; + + ret = kstrtou32(s, 0, &qp_id); + if (ret || qp_id >= qm->qp_num) { + dev_err(dev, "Please input qp num (0-%d)", qm->qp_num - 1); + return -EINVAL; + } + + ret = qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 1); + if (!ret) + return dump_show(qm, &cqc, sizeof(struct qm_cqc), "CQC"); + + down_read(&qm->qps_lock); + if (qm->cqc) { + cqc_curr = qm->cqc + qp_id; + + ret = dump_show(qm, cqc_curr, sizeof(struct qm_cqc), + "SOFT CQC"); + if (ret) + dev_info(dev, "Show soft cqc failed!\n"); + } + up_read(&qm->qps_lock); + + return ret; +} + +static int qm_eqc_dump(struct hisi_qm *qm, char *s) +{ + struct device *dev = &qm->pdev->dev; + struct qm_eqc eqc; + int ret; + + if (strsep(&s, " ")) { + dev_err(dev, "Please do not input extra characters!\n"); + return -EINVAL; + } + + ret = qm_set_and_get_xqc(qm, QM_MB_CMD_EQC, &eqc, 0, 1); + if (ret) + return ret; + + return dump_show(qm, &eqc, sizeof(struct qm_eqc), "EQC"); +} + +static int qm_aeqc_dump(struct hisi_qm *qm, char *s) +{ + struct device *dev = &qm->pdev->dev; + struct qm_aeqc aeqc; + int ret; + + if (strsep(&s, " ")) { + dev_err(dev, "Please do not input extra characters!\n"); + return -EINVAL; + } + + ret = qm_set_and_get_xqc(qm, QM_MB_CMD_AEQC, &aeqc, 0, 1); + if (ret) + return ret; + + return dump_show(qm, &aeqc, sizeof(struct qm_aeqc), "AEQC"); +} + +static int q_dump_param_parse(struct hisi_qm *qm, char *s, + u32 *e_id, u32 *q_id) +{ + struct device *dev = &qm->pdev->dev; + unsigned int qp_num = qm->qp_num; + char *presult; + int ret; + + presult = strsep(&s, " "); + if (!presult) { + dev_err(dev, "Please input qp number!\n"); + return -EINVAL; + } + + ret = kstrtou32(presult, 0, q_id); + if (ret || *q_id >= qp_num) { + dev_err(dev, "Please input qp num (0-%d)", qp_num - 1); + return -EINVAL; + } + + presult = strsep(&s, " "); + if (!presult) { + dev_err(dev, "Please input sqe number!\n"); + return -EINVAL; + } + + ret = kstrtou32(presult, 0, e_id); + if (ret || *e_id >= QM_Q_DEPTH) { + dev_err(dev, "Please input sqe num (0-%d)", QM_Q_DEPTH - 1); + return -EINVAL; + } + + if (strsep(&s, " ")) { + dev_err(dev, "Please do not input extra characters!\n"); + return -EINVAL; + } + + return 0; +} + +static int qm_sq_dump(struct hisi_qm *qm, char *s) +{ + struct hisi_qp *qp; + u32 qp_id, sqe_id; + void *sqe_curr; + int ret; + + ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id); + if (ret) + return ret; + + qp = &qm->qp_array[qp_id]; + sqe_curr = qp->sqe + (u32)(sqe_id * qm->sqe_size); + memset(sqe_curr + qm->debug.sqe_mask_offset, SQE_ADDR_MASK, + qm->debug.sqe_mask_len); + + return dump_show(qm, sqe_curr, qm->sqe_size, "SQE"); +} + +static int qm_cq_dump(struct hisi_qm *qm, char *s) +{ + struct qm_cqe *cqe_curr; + struct hisi_qp *qp; + u32 qp_id, cqe_id; + int ret; + + ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id); + if (ret) + return ret; + + qp = &qm->qp_array[qp_id]; + cqe_curr = qp->cqe + cqe_id; + + return dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE"); +} + +static int qm_eq_dump(struct hisi_qm *qm, const char *s) +{ + struct device *dev = &qm->pdev->dev; + struct qm_eqe *eqe; + u32 eqe_id; + int ret; + + if (!s) + return -EINVAL; + + ret = kstrtou32(s, 0, &eqe_id); + if (ret || eqe_id >= QM_EQ_DEPTH) { + dev_err(dev, "Please input eqe num (0-%d)", QM_EQ_DEPTH - 1); + return -EINVAL; + } + + down_read(&qm->qps_lock); + if (qm->eqe) { + eqe = qm->eqe + eqe_id; + ret = dump_show(qm, eqe, sizeof(struct qm_eqe), "EQE"); + if (ret) + dev_info(dev, "Show eqe failed!\n"); + } + up_read(&qm->qps_lock); + + return ret; +} + +static int qm_aeq_dump(struct hisi_qm *qm, const char *s) +{ + struct device *dev = &qm->pdev->dev; + struct qm_aeqe *aeqe; + u32 aeqe_id; + int ret; + + if (!s) + return -EINVAL; + + ret = kstrtou32(s, 0, &aeqe_id); + if (ret || aeqe_id >= QM_Q_DEPTH) { + dev_err(dev, "Please input aeqe num (0-%d)", QM_Q_DEPTH - 1); + return -EINVAL; + } + + down_read(&qm->qps_lock); + if (qm->aeqe) { + aeqe = qm->aeqe + aeqe_id; + ret = dump_show(qm, aeqe, sizeof(struct qm_aeqe), "AEQE"); + if (ret) + dev_info(dev, "Show aeqe failed!\n"); + } + up_read(&qm->qps_lock); + + return ret; +} + +static int qm_dbg_help(struct hisi_qm *qm, char *s) +{ + struct device *dev = &qm->pdev->dev; + + if (strsep(&s, " ")) { + dev_err(dev, "Please do not input extra characters!\n"); + return -EINVAL; + } + + dev_info(dev, "available commands\n"); + dev_info(dev, "sqc \n"); + dev_info(dev, "cqc \n"); + dev_info(dev, "eqc\n"); + dev_info(dev, "aeqc\n"); + dev_info(dev, "sq \n"); + dev_info(dev, "cq \n"); + dev_info(dev, "eq \n"); + dev_info(dev, "aeq \n"); + + return 0; +} + +static int qm_cmd_write_dump(struct hisi_qm *qm, const char *cmd_buf) +{ + struct device *dev = &qm->pdev->dev; + char *presult, *s, *s_tmp; + int ret; + + s = kstrdup(cmd_buf, GFP_KERNEL); + if (!s) + return -ENOMEM; + + s_tmp = s; + presult = strsep(&s, " "); + if (!presult) { + kfree(s_tmp); + return -EINVAL; + } + + if (!strcmp(presult, "sqc")) + ret = qm_sqc_dump(qm, s); + else if (!strcmp(presult, "cqc")) + ret = qm_cqc_dump(qm, s); + else if (!strcmp(presult, "eqc")) + ret = qm_eqc_dump(qm, s); + else if (!strcmp(presult, "aeqc")) + ret = qm_aeqc_dump(qm, s); + else if (!strcmp(presult, "sq")) + ret = qm_sq_dump(qm, s); + else if (!strcmp(presult, "cq")) + ret = qm_cq_dump(qm, s); + else if (!strcmp(presult, "eq")) + ret = qm_eq_dump(qm, s); + else if (!strcmp(presult, "aeq")) + ret = qm_aeq_dump(qm, s); + else if (!strcmp(presult, "help")) + ret = qm_dbg_help(qm, s); + else + ret = -EINVAL; + + if (ret) + dev_info(dev, "Please echo help\n"); + + kfree(s_tmp); + + return ret; +} + +static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer, + size_t count, loff_t *pos) +{ + struct hisi_qm *qm = filp->private_data; + char *cmd_buf, *cmd_buf_tmp; + int uncopied_bytes; + int ret; + + if (*pos) + return 0; + + /* Judge if the instance is being reset. */ + if (unlikely(atomic_read(&qm->status.flags) == QM_STOP)) + return 0; + + if (count > QM_DBG_WRITE_LEN) + return -ENOSPC; + + cmd_buf = kzalloc(count + 1, GFP_KERNEL); + if (!cmd_buf) + return -ENOMEM; + + uncopied_bytes = copy_from_user(cmd_buf, buffer, count); + if (uncopied_bytes) { + kfree(cmd_buf); + return -EFAULT; + } + + cmd_buf[count] = '\0'; + + cmd_buf_tmp = strchr(cmd_buf, '\n'); + if (cmd_buf_tmp) { + *cmd_buf_tmp = '\0'; + count = cmd_buf_tmp - cmd_buf + 1; + } + + ret = qm_cmd_write_dump(qm, cmd_buf); + if (ret) { + kfree(cmd_buf); + return ret; + } + + kfree(cmd_buf); + return count; +} + +static const struct file_operations qm_cmd_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = qm_cmd_read, + .write = qm_cmd_write, +}; + +static int qm_create_debugfs_file(struct hisi_qm *qm, enum qm_debug_file index) +{ + struct dentry *qm_d = qm->debug.qm_d, *tmp; + struct debugfs_file *file = qm->debug.files + index; + + tmp = debugfs_create_file(qm_debug_file_name[index], 0600, qm_d, file, + &qm_debug_fops); + if (IS_ERR(tmp)) + return -ENOENT; + + file->index = index; + mutex_init(&file->lock); + file->debug = &qm->debug; + + return 0; +} + +static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe, + u32 msi) +{ + dev_info(&qm->pdev->dev, + "QM v%d does not support hw error handle\n", qm->ver); + + writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); +} + +static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe, + u32 msi) +{ + u32 irq_enable = ce | nfe | fe | msi; + u32 irq_unmask = ~irq_enable; + u32 error_status; + + qm->error_mask = ce | nfe | fe; + qm->msi_mask = msi; + + /* clear QM hw residual error source */ + error_status = readl(qm->io_base + QM_ABNORMAL_INT_STATUS); + if (!(qm->hw_status & BIT(QM_DEV_RESET_STATUS)) + || !error_status) + error_status = QM_ABNORMAL_INT_SOURCE_CLR; + else + error_status &= qm->error_mask; + + writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); + + /* configure error type */ + writel(ce, qm->io_base + QM_RAS_CE_ENABLE); + writel(0x1, qm->io_base + QM_RAS_CE_THRESHOLD); + writel(nfe, qm->io_base + QM_RAS_NFE_ENABLE); + writel(fe, qm->io_base + QM_RAS_FE_ENABLE); + + /* use RAS irq default, so only set QM_RAS_MSI_INT_SEL for MSI */ + writel(msi, qm->io_base + QM_RAS_MSI_INT_SEL); + + irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); + writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); +} + +static void qm_hw_error_uninit_v2(struct hisi_qm *qm) +{ + writel(QM_HW_ERROR_IRQ_DISABLE, qm->io_base + QM_ABNORMAL_INT_MASK); +} + +static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status) +{ + const struct hisi_qm_hw_error *err; + struct device *dev = &qm->pdev->dev; + u32 reg_val, type, vf_num; + int i; + + for (i = 0; i < ARRAY_SIZE(qm_hw_error); i++) { + err = &qm_hw_error[i]; + if (!(err->int_msk & error_status)) + continue; + + dev_err(dev, "%s [error status=0x%x] found\n", + err->msg, err->int_msk); + + if (err->int_msk & QM_DB_TIMEOUT) { + reg_val = readl(qm->io_base + + QM_ABNORMAL_INF01); + type = (reg_val & QM_DB_TIMEOUT_TYPE) >> + QM_DB_TIMEOUT_TYPE_SHIFT; + vf_num = reg_val & QM_DB_TIMEOUT_VF; + dev_err(dev, "qm %s doorbell timeout in function %u\n", + qm_db_timeout[type], vf_num); + } else if (err->int_msk & QM_OF_FIFO_OF) { + reg_val = readl(qm->io_base + + QM_ABNORMAL_INF00); + type = (reg_val & QM_FIFO_OVERFLOW_TYPE) >> + QM_FIFO_OVERFLOW_TYPE_SHIFT; + vf_num = reg_val & QM_FIFO_OVERFLOW_VF; + + if (type < ARRAY_SIZE(qm_fifo_overflow)) + dev_err(dev, "qm %s fifo overflow in function %u\n", + qm_fifo_overflow[type], + vf_num); + else + dev_err(dev, "unknown error type\n"); + } + } +} + +static pci_ers_result_t qm_hw_error_handle_v2(struct hisi_qm *qm) +{ + u32 error_status, tmp; + + /* read err sts */ + tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS); + error_status = qm->error_mask & tmp; + if (error_status) { + if (error_status & QM_ECC_MBIT) + qm->err_ini.err_info.is_qm_ecc_mbit = true; + + qm_log_hw_error(qm, error_status); + return PCI_ERS_RESULT_NEED_RESET; + } + + return PCI_ERS_RESULT_RECOVERED; +} + +static const struct hisi_qm_hw_ops qm_hw_ops_v1 = { + .qm_db = qm_db_v1, + .get_irq_num = qm_get_irq_num_v1, + .hw_error_init = qm_hw_error_init_v1, +}; + +static const struct hisi_qm_hw_ops qm_hw_ops_v2 = { + .get_vft = qm_get_vft_v2, + .qm_db = qm_db_v2, + .get_irq_num = qm_get_irq_num_v2, + .hw_error_init = qm_hw_error_init_v2, + .hw_error_uninit = qm_hw_error_uninit_v2, + .hw_error_handle = qm_hw_error_handle_v2, +}; + +static void *qm_get_avail_sqe(struct hisi_qp *qp) +{ + struct hisi_qp_status *qp_status = &qp->qp_status; + u16 sq_tail = qp_status->sq_tail; + + if (unlikely(atomic_read(&qp->qp_status.used) == QM_Q_DEPTH - 1)) + return NULL; + + return qp->sqe + sq_tail * qp->qm->sqe_size; +} + +static void hisi_qm_unset_hw_reset(struct hisi_qp *qp) +{ + u64 *addr; + + /* Use last 32 bits of DUS to reset status. */ + addr = (u64 *)(qp->qdma.va + qp->qdma.size) - QM_RESET_STOP_TX_OFFSET; + *addr = 0; +} + +static struct hisi_qp *hisi_qm_create_qp_nolock(struct hisi_qm *qm, + u8 alg_type) +{ + struct device *dev = &qm->pdev->dev; + struct hisi_qp *qp; + int qp_id; + + if (!qm_qp_avail_state(qm, NULL, QP_INIT)) + return ERR_PTR(-EPERM); + + if (!qm->free_qp_num) { + dev_info_ratelimited(dev, "All %u queues of QM are busy!\n", + qm->qp_num); + atomic64_inc(&qm->debug.dfx.qp_err_cnt); + return ERR_PTR(-EBUSY); + } + + qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, + 0, qm->qp_num, GFP_ATOMIC); + if (qp_id < 0) { + dev_info_ratelimited(dev, "All %u queues of QM are busy!\n", + qm->qp_num); + atomic64_inc(&qm->debug.dfx.qp_err_cnt); + return ERR_PTR(-EBUSY); + } + + qp = &qm->qp_array[qp_id]; + if (!qp->is_in_kernel) + hisi_qm_unset_hw_reset(qp); + + memset(qp->cqe, 0, sizeof(struct qm_cqe) * QM_Q_DEPTH); + qp->event_cb = NULL; + qp->req_cb = NULL; + qp->alg_type = alg_type; + qp->c_flag = 1; + qp->is_in_kernel = true; + qm->free_qp_num--; + atomic_set(&qp->qp_status.flags, QP_INIT); + + return qp; +} + +/** + * hisi_qm_create_qp() - Create a queue pair from qm. + * @qm: The qm we create a qp from. + * @alg_type: Accelerator specific algorithm type in sqc. + * + * return created qp, -EBUSY if all qps in qm allocated, -ENOMEM if allocating + * qp memory fails. + */ +struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type) +{ + struct hisi_qp *qp; + + down_write(&qm->qps_lock); + qp = hisi_qm_create_qp_nolock(qm, alg_type); + up_write(&qm->qps_lock); + + return qp; +} +EXPORT_SYMBOL_GPL(hisi_qm_create_qp); + +/** + * hisi_qm_release_qp() - Release a qp back to its qm. + * @qp: The qp we want to release. + * + * This function releases the resource of a qp. + */ +void hisi_qm_release_qp(struct hisi_qp *qp) +{ + struct hisi_qm *qm = qp->qm; + + down_write(&qm->qps_lock); + if (!qm_qp_avail_state(qm, qp, QP_CLOSE)) { + up_write(&qm->qps_lock); + return; + } + qm->free_qp_num++; + idr_remove(&qm->qp_idr, qp->qp_id); + up_write(&qm->qps_lock); +} +EXPORT_SYMBOL_GPL(hisi_qm_release_qp); + +static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid) +{ + struct hisi_qm *qm = qp->qm; + enum qm_hw_ver ver = qm->ver; + struct qm_sqc sqc = {0}; + + if (ver == QM_HW_V1) { + sqc.dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size)); + sqc.w8 = cpu_to_le16(QM_Q_DEPTH - 1); + } else if (ver == QM_HW_V2) { + sqc.dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size)); + sqc.w8 = 0; /* rand_qc */ + } + sqc.cq_num = cpu_to_le16(qp_id); + sqc.w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type)); + sqc.base_l = cpu_to_le32(lower_32_bits(qp->sqe_dma)); + sqc.base_h = cpu_to_le32(upper_32_bits(qp->sqe_dma)); + sqc.pasid = cpu_to_le16(pasid); + + return qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp_id, 0); +} + +static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid) +{ + struct hisi_qm *qm = qp->qm; + enum qm_hw_ver ver = qm->ver; + struct qm_cqc cqc = {0}; + + if (ver == QM_HW_V1) { + cqc.dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, + QM_QC_CQE_SIZE)); + cqc.w8 = cpu_to_le16(QM_Q_DEPTH - 1); + } else if (ver == QM_HW_V2) { + cqc.dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE)); + cqc.w8 = 0; /* rand_qc */ + } + cqc.dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | + qp->c_flag << QM_CQ_FLAG_SHIFT); + cqc.base_l = cpu_to_le32(lower_32_bits(qp->cqe_dma)); + cqc.base_h = cpu_to_le32(upper_32_bits(qp->cqe_dma)); + cqc.pasid = cpu_to_le16(pasid); + + return qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 0); +} + +static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid) +{ + int ret; + + qm_init_qp_status(qp); + + ret = qm_sq_ctx_cfg(qp, qp_id, pasid); + if (ret) + return ret; + + return qm_cq_ctx_cfg(qp, qp_id, pasid); +} + +static int hisi_qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg) +{ + struct hisi_qm *qm = qp->qm; + struct device *dev = &qm->pdev->dev; + int qp_id = qp->qp_id; + int pasid = arg; + int ret; + + if (!qm_qp_avail_state(qm, qp, QP_START)) + return -EPERM; + + ret = qm_qp_ctx_cfg(qp, qp_id, pasid); + if (ret) + return ret; + atomic_set(&qp->qp_status.flags, QP_START); + dev_dbg(dev, "queue %d started\n", qp_id); + + return qp_id; +} + +/** + * hisi_qm_start_qp() - Start a qp into running. + * @qp: The qp we want to start to run. + * @arg: Accelerator specific argument. + * + * After this function, qp can receive request from user. Return qp_id if + * successful, Return -EBUSY if failed. + */ +int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg) +{ + struct hisi_qm *qm = qp->qm; + int ret; + + down_write(&qm->qps_lock); + ret = hisi_qm_start_qp_nolock(qp, arg); + up_write(&qm->qps_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(hisi_qm_start_qp); + +/* Callback function should be called whether task completed or not. */ +static void qp_stop_fail_cb(struct hisi_qp *qp) +{ + int qp_used = atomic_read(&qp->qp_status.used); + u16 cur_tail = qp->qp_status.sq_tail; + u16 cur_head = (cur_tail + QM_Q_DEPTH - qp_used) % QM_Q_DEPTH; + struct hisi_qm *qm = qp->qm; + u16 pos; + int i; + + for (i = 0; i < qp_used; i++) { + pos = (i + cur_head) % QM_Q_DEPTH; + qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos)); + atomic_dec(&qp->qp_status.used); + } +} + +static void qm_qp_has_no_task(struct hisi_qp *qp) +{ + struct hisi_qm *qm = qp->qm; + struct device *dev = &qm->pdev->dev; + struct qm_sqc sqc; + struct qm_cqc cqc; + int ret, i = 0; + + if (qp->qm->err_ini.err_info.is_qm_ecc_mbit || + qp->qm->err_ini.err_info.is_dev_ecc_mbit) + return; + + while (++i) { + ret = qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp->qp_id, 1); + if (ret) { + dev_err_ratelimited(dev, "Fail to dump sqc!\n"); + return; + } + + ret = qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp->qp_id, 1); + if (ret) { + dev_err_ratelimited(dev, "Fail to dump cqc!\n"); + return; + } + + if ((QM_SQ_TAIL_IDX(sqc) == QM_CQ_TAIL_IDX(cqc)) && + (sqc.tail == cqc.tail)) + return; + + if (i == MAX_WAIT_COUNTS) { + dev_err(dev, "Fail to empty queue %u!\n", qp->qp_id); + return; + } + + usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX); + } +} + +static int hisi_qm_stop_qp_nolock(struct hisi_qp *qp) +{ + struct device *dev = &qp->qm->pdev->dev; + + /* it is stopped */ + if (atomic_read(&qp->qp_status.flags) == QP_STOP) { + qp->is_resetting = false; + return 0; + } + if (!qm_qp_avail_state(qp->qm, qp, QP_STOP)) + return -EPERM; + + atomic_set(&qp->qp_status.flags, QP_STOP); + + qm_qp_has_no_task(qp); + + if (qp->qm->wq) + flush_workqueue(qp->qm->wq); + else + flush_work(&qp->qm->work); + + /* waiting for increase used count in qp send and last poll qp finish */ + while (atomic_read(&qp->qp_status.send_ref)) + udelay(WAIT_PERIOD); + + if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used))) + qp_stop_fail_cb(qp); + + dev_dbg(dev, "stop queue %u!", qp->qp_id); + + return 0; +} + +/** + * hisi_qm_stop_qp() - Stop a qp in qm. + * @qp: The qp we want to stop. + * + * This function is reverse of hisi_qm_start_qp. Return 0 if successful. + */ +int hisi_qm_stop_qp(struct hisi_qp *qp) +{ + int ret; + + down_write(&qp->qm->qps_lock); + ret = hisi_qm_stop_qp_nolock(qp); + up_write(&qp->qm->qps_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(hisi_qm_stop_qp); + +/** + * hisi_qp_send() - Queue up a task in the hardware queue. + * @qp: The qp in which to put the message. + * @msg: The message. + * + * This function will return -EBUSY if qp is currently full, and -EAGAIN + * if qp related qm is resetting. + * + * Note: This function may run with qm_irq_thread and ACC reset at same time. + * It has no race with qm_irq_thread. However, during hisi_qp_send, ACC + * reset may happen, we have no lock here considering performance. This + * causes current qm_db sending fail or can not receive sended sqe. QM + * sync/async receive function should handle the error sqe. ACC reset + * done function should clear used sqe to 0. + */ +int hisi_qp_send(struct hisi_qp *qp, const void *msg) +{ + struct hisi_qp_status *qp_status = &qp->qp_status; + void *sqe; + + if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP || + atomic_read(&qp->qm->status.flags) == QM_STOP) || + qp->is_resetting) { + dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n"); + return -EAGAIN; + } + + atomic_inc(&qp->qp_status.send_ref); + sqe = qm_get_avail_sqe(qp); + if (!sqe) { + atomic_dec(&qp->qp_status.send_ref); + return -EBUSY; + } + + memcpy(sqe, msg, qp->qm->sqe_size); + qp_status->sq_tail = (qp_status->sq_tail + 1) % QM_Q_DEPTH; + qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, qp_status->sq_tail, 0); + atomic_inc(&qp->qp_status.used); + atomic_dec(&qp->qp_status.send_ref); + + return 0; +} +EXPORT_SYMBOL_GPL(hisi_qp_send); + +static void hisi_qm_cache_wb(struct hisi_qm *qm) +{ + unsigned int val; + + if (qm->ver == QM_HW_V2) { + writel(0x1, qm->io_base + QM_CACHE_WB_START); + if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE, + val, val & BIT(0), POLL_PERIOD, + POLL_TIMEOUT)) + dev_err(&qm->pdev->dev, + "QM writeback sqc cache fail!\n"); + } +} + +int hisi_qm_get_free_qp_num(struct hisi_qm *qm) +{ + int ret; + + down_read(&qm->qps_lock); + ret = qm->free_qp_num; + up_read(&qm->qps_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(hisi_qm_get_free_qp_num); + +static void qm_qp_event_notifier(struct hisi_qp *qp) +{ + uacce_wake_up(qp->uacce_q); +} + +static int hisi_qm_get_available_instances(struct uacce *uacce) +{ + return hisi_qm_get_free_qp_num(uacce->priv); +} + +static void hisi_qm_set_hw_reset(struct hisi_qm *qm, int offset) +{ + struct hisi_qp *qp; + u32 *addr; + int i; + + for (i = 0; i < qm->qp_num; i++) { + qp = &qm->qp_array[i]; + if (!qp->is_in_kernel) { + /* Use last 32 bits of DUS to save reset status. */ + addr = (u32 *)(qp->qdma.va + qp->qdma.size) - offset; + *addr = 1; + + /* make sure setup is completed */ + mb(); + } + } +} + +static int hisi_qm_uacce_get_queue(struct uacce *uacce, unsigned long arg, + struct uacce_queue **q) +{ + struct hisi_qm *qm = uacce->priv; + struct hisi_qp *qp; + struct uacce_queue *wd_q; + u8 alg_type = 0; + + down_write(&qm->qps_lock); + qp = hisi_qm_create_qp_nolock(qm, alg_type); + if (IS_ERR(qp)) { + up_write(&qm->qps_lock); + return PTR_ERR(qp); + } + + wd_q = kzalloc(sizeof(struct uacce_queue), GFP_KERNEL); + if (!wd_q) { + up_write(&qm->qps_lock); + hisi_qm_release_qp(qp); + return -ENOMEM; + } + + wd_q->priv = qp; + wd_q->uacce = uacce; + *q = wd_q; + qp->uacce_q = wd_q; + qp->event_cb = qm_qp_event_notifier; + qp->pasid = arg; + qp->is_in_kernel = false; + + up_write(&qm->qps_lock); + return 0; +} + +static void hisi_qm_uacce_put_queue(struct uacce_queue *q) +{ + struct hisi_qp *qp = q->priv; + + /* need to stop hardware, but can not support in v1 */ + hisi_qm_release_qp(qp); +} + +/* map sq/cq/doorbell to user space */ +static int hisi_qm_uacce_mmap(struct uacce_queue *q, + struct vm_area_struct *vma, + struct uacce_qfile_region *qfr) +{ + struct hisi_qp *qp = (struct hisi_qp *)q->priv; + struct hisi_qm *qm = qp->qm; + size_t sz = vma->vm_end - vma->vm_start; + struct pci_dev *pdev = qm->pdev; + struct device *dev = &pdev->dev; + unsigned long vm_pgoff; + int ret; + + switch (qfr->type) { + case UACCE_QFRT_MMIO: + if (qm->ver == QM_HW_V2) { + if (WARN_ON(sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR + + QM_V2_DOORBELL_OFFSET / PAGE_SIZE))) + return -EINVAL; + } else { + if (WARN_ON(sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR)) + return -EINVAL; + } + + vma->vm_flags |= VM_IO; + + /* + * Warning: This is not safe as multiple processes use the same + * doorbell, v1/v2 hardware interface problem. It will be fixed + * it in next version. + */ + return remap_pfn_range(vma, vma->vm_start, + qm->phys_base >> PAGE_SHIFT, + sz, pgprot_noncached(vma->vm_page_prot)); + case UACCE_QFRT_DUS: + if (sz != qp->qdma.size) { + dev_err(dev, "wrong queue size %ld vs %ld\n", + sz, qp->qdma.size); + return -EINVAL; + } + + /* dma_mmap_coherent() requires vm_pgoff as 0 + * restore vm_pfoff to initial value for mmap() + */ + vm_pgoff = vma->vm_pgoff; + vma->vm_pgoff = 0; + ret = dma_mmap_coherent(dev, vma, qp->qdma.va, + qp->qdma.dma, sz); + vma->vm_pgoff = vm_pgoff; + + return ret; + default: + return -EINVAL; + } +} + +static int hisi_qm_uacce_start_queue(struct uacce_queue *q) +{ + struct hisi_qp *qp = q->priv; + + return hisi_qm_start_qp(qp, qp->pasid); +} + +static void hisi_qm_uacce_stop_queue(struct uacce_queue *q) +{ + struct hisi_qp *qp = q->priv; + + hisi_qm_stop_qp(qp); +} + +static int qm_set_sqctype(struct uacce_queue *q, u16 type) +{ + struct hisi_qm *qm = q->uacce->priv; + struct hisi_qp *qp = q->priv; + + down_write(&qm->qps_lock); + qp->alg_type = type; + up_write(&qm->qps_lock); + + return 0; +} + +static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd, + unsigned long arg) +{ + struct hisi_qp *qp = q->priv; + struct hisi_qp_ctx qp_ctx; + + if (cmd == UACCE_CMD_QM_SET_QP_CTX) { + if (copy_from_user(&qp_ctx, (void __user *)arg, + sizeof(struct hisi_qp_ctx))) + return -EFAULT; + + if (qp_ctx.qc_type != 0 && qp_ctx.qc_type != 1) + return -EINVAL; + + qm_set_sqctype(q, qp_ctx.qc_type); + qp_ctx.id = qp->qp_id; + qp->c_flag = 0; + if (copy_to_user((void __user *)arg, &qp_ctx, + sizeof(struct hisi_qp_ctx))) + return -EFAULT; + } else { + return -EINVAL; + } + + return 0; +} + +static enum uacce_dev_state hisi_qm_get_state(struct uacce *uacce) +{ + struct hisi_qm *qm = uacce->priv; + enum qm_state curr; + + curr = atomic_read(&qm->status.flags); + if (curr == QM_STOP) + return UACCE_DEV_ERR; + else + return UACCE_DEV_NORMAL; +} + +static void hisi_qm_uacce_memory_init(struct hisi_qm *qm) +{ + unsigned long dus_page_nr, mmio_page_nr; + struct uacce *uacce = &qm->uacce; + + /* Add one more page for device or qp status */ + dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH + + sizeof(struct cqe) * QM_Q_DEPTH + PAGE_SIZE) >> + PAGE_SHIFT; + + if (qm->ver == QM_HW_V2) + mmio_page_nr = QM_DOORBELL_PAGE_NR + + QM_V2_DOORBELL_OFFSET / PAGE_SIZE; + else + mmio_page_nr = QM_DOORBELL_PAGE_NR; + + uacce->qf_pg_start[UACCE_QFRT_MMIO] = 0; + uacce->qf_pg_start[UACCE_QFRT_DUS] = mmio_page_nr; + uacce->qf_pg_start[UACCE_QFRT_SS] = mmio_page_nr + dus_page_nr; +} + +/* + * the device is set the UACCE_DEV_SVA, but it will be cut if SVA patch is not + * available + */ +static struct uacce_ops uacce_qm_ops = { + .get_available_instances = hisi_qm_get_available_instances, + .get_queue = hisi_qm_uacce_get_queue, + .put_queue = hisi_qm_uacce_put_queue, + .start_queue = hisi_qm_uacce_start_queue, + .stop_queue = hisi_qm_uacce_stop_queue, + .mmap = hisi_qm_uacce_mmap, + .ioctl = hisi_qm_uacce_ioctl, + .get_dev_state = hisi_qm_get_state, +}; + +static int qm_register_uacce(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + struct uacce *uacce = &qm->uacce; + int i; + + uacce->name = dev_name(&pdev->dev); + uacce->drv_name = pdev->driver->name; + uacce->pdev = &pdev->dev; + uacce->is_vf = pdev->is_virtfn; + uacce->priv = qm; + uacce->ops = &uacce_qm_ops; + uacce->algs = qm->algs; + + if (uacce->is_vf) { + struct uacce *pf_uacce; + struct device *pf_dev = &(pci_physfn(pdev)->dev); + + /* VF uses PF's isoalte data */ + pf_uacce = dev_to_uacce(pf_dev); + if (!pf_uacce) { + dev_err(&pdev->dev, "fail to PF device\n"); + return -ENODEV; + } + + uacce->isolate = &pf_uacce->isolate_data; + } else { + uacce->isolate = &uacce->isolate_data; + } + + if (qm->ver == QM_HW_V1) + uacce->api_ver = HISI_QM_API_VER_BASE; + else + uacce->api_ver = HISI_QM_API_VER2_BASE; + + if (qm->use_sva) { + uacce->flags = UACCE_DEV_SVA; + } else { + uacce->flags = UACCE_DEV_NOIOMMU; + if (qm->ver == QM_HW_V1) + uacce->api_ver = HISI_QM_API_VER_BASE + UACCE_API_VER_NOIOMMU_SUBFIX; + else + uacce->api_ver = HISI_QM_API_VER2_BASE + UACCE_API_VER_NOIOMMU_SUBFIX; + } + + for (i = 0; i < UACCE_QFRT_MAX; i++) + uacce->qf_pg_start[i] = UACCE_QFR_NA; + + return uacce_register(uacce); +} + +static int qm_unregister_uacce(struct hisi_qm *qm) +{ + int ret; + + ret = uacce_unregister(&qm->uacce); + if (ret) + return ret; + + memset(&qm->uacce, 0, sizeof(qm->uacce)); + + return 0; +} + +/** + * hisi_qm_frozen() - Try to froze QM to cut continuous queue request. If + * there is user on the QM, return failure without doing anything. + * @qm: The qm needed to be fronzen. + * + * This function frozes QM, then we can do SRIOV disabling. + */ +static int hisi_qm_frozen(struct hisi_qm *qm) +{ + down_write(&qm->qps_lock); + if (qm->is_frozen) { + up_write(&qm->qps_lock); + return 0; + } + + if (qm->free_qp_num == qm->qp_num) { + qm->free_qp_num = 0; + qm->is_frozen = true; + up_write(&qm->qps_lock); + return 0; + } + + up_write(&qm->qps_lock); + + return -EBUSY; +} + +static int qm_try_frozen_vfs(struct pci_dev *pdev, + struct hisi_qm_list *qm_list) +{ + struct hisi_qm *qm, *vf_qm; + struct pci_dev *dev; + int ret = 0; + + if (!qm_list || !pdev) + return -EINVAL; + + /* Try to frozen all the VFs as disable SRIOV */ + mutex_lock(&qm_list->lock); + list_for_each_entry(qm, &qm_list->list, list) { + dev = qm->pdev; + if (dev == pdev) + continue; + if (pci_physfn(dev) == pdev) { + vf_qm = pci_get_drvdata(dev); + ret = hisi_qm_frozen(vf_qm); + if (ret) + goto frozen_fail; + } + } + +frozen_fail: + mutex_unlock(&qm_list->lock); + return ret; +} + +void hisi_qm_remove_wait_delay(struct hisi_qm *qm, + struct hisi_qm_list *qm_list) +{ + while (hisi_qm_frozen(qm) || + ((qm->fun_type == QM_HW_PF) && + qm_try_frozen_vfs(qm->pdev, qm_list))) { + msleep(WAIT_PERIOD); + } + udelay(REMOVE_WAIT_DELAY); +} +EXPORT_SYMBOL_GPL(hisi_qm_remove_wait_delay); + +static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num) +{ + struct device *dev = &qm->pdev->dev; + struct qm_dma *qdma; + int i; + + for (i = num - 1; i >= 0; i--) { + qdma = &qm->qp_array[i].qdma; + dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma); + } + + kfree(qm->qp_array); +} + +static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id) +{ + struct device *dev = &qm->pdev->dev; + size_t off = qm->sqe_size * QM_Q_DEPTH; + struct hisi_qp *qp; + + qp = &qm->qp_array[id]; + qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma, + GFP_KERNEL); + if (!qp->qdma.va) { + dev_err(dev, "Fail to alloc qp dma buf size=%zx\n", dma_size); + return -ENOMEM; + } + + qp->sqe = qp->qdma.va; + qp->sqe_dma = qp->qdma.dma; + qp->cqe = qp->qdma.va + off; + qp->cqe_dma = qp->qdma.dma + off; + qp->qdma.size = dma_size; + qp->qm = qm; + qp->qp_id = id; + + return 0; +} + +static int hisi_qp_alloc_memory(struct hisi_qm *qm) +{ + size_t qp_dma_size; + int i, ret; + + qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL); + if (!qm->qp_array) + return -ENOMEM; + + /* one more page for device or qp statuses */ + qp_dma_size = qm->sqe_size * QM_Q_DEPTH + + sizeof(struct cqe) * QM_Q_DEPTH; + qp_dma_size = PAGE_ALIGN(qp_dma_size) + PAGE_SIZE; + for (i = 0; i < qm->qp_num; i++) { + ret = hisi_qp_memory_init(qm, qp_dma_size, i); + if (ret) + goto err_init_qp_mem; + } + + return 0; + +err_init_qp_mem: + hisi_qp_memory_uninit(qm, i); + + return ret; +} + +static void hisi_qm_free_rsv_buf(struct hisi_qm *qm) +{ + struct qm_dma *xqc_dma = &qm->xqc_buf.qcdma; + struct device *dev = &qm->pdev->dev; + + dma_free_coherent(dev, xqc_dma->size, xqc_dma->va, xqc_dma->dma); +} + +static int hisi_qm_alloc_rsv_buf(struct hisi_qm *qm) +{ + struct qm_rsv_buf *xqc_buf = &qm->xqc_buf; + struct qm_dma *xqc_dma = &xqc_buf->qcdma; + struct device *dev = &qm->pdev->dev; + size_t off = 0; + +#define QM_XQC_BUF_INIT(xqc_buf, type) do { \ + (xqc_buf)->type = ((xqc_buf)->qcdma.va + (off)); \ + (xqc_buf)->type##_dma = (xqc_buf)->qcdma.dma + (off); \ + off += QMC_ALIGN(sizeof(struct qm_##type)); \ +} while (0) + + xqc_dma->size = QMC_ALIGN(sizeof(struct qm_eqc)) + + QMC_ALIGN(sizeof(struct qm_aeqc)) + + QMC_ALIGN(sizeof(struct qm_sqc)) + + QMC_ALIGN(sizeof(struct qm_cqc)); + + xqc_dma->va = dma_alloc_coherent(dev, xqc_dma->size, &xqc_dma->dma, + GFP_ATOMIC); + if (!xqc_dma->va) { + dev_err(dev, "Fail to alloc qcdma size=%zx\n", xqc_dma->size); + return -ENOMEM; + } + + QM_XQC_BUF_INIT(xqc_buf, eqc); + QM_XQC_BUF_INIT(xqc_buf, aeqc); + QM_XQC_BUF_INIT(xqc_buf, sqc); + QM_XQC_BUF_INIT(xqc_buf, cqc); + + return 0; +} + +static int hisi_qm_memory_init(struct hisi_qm *qm) +{ + struct device *dev = &qm->pdev->dev; + int ret = -ENOMEM; + size_t off = 0; + +#define QM_INIT_BUF(qm, type, num) do { \ + (qm)->type = ((qm)->qdma.va + (off)); \ + (qm)->type##_dma = (qm)->qdma.dma + (off); \ + off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \ +} while (0) + + if (qm->use_uacce) + hisi_qm_uacce_memory_init(qm); + + idr_init(&qm->qp_idr); + qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_EQ_DEPTH) + + QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) + + QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) + + QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num); + qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma, + GFP_ATOMIC | __GFP_ZERO); + if (!qm->qdma.va) { + dev_err(dev, "Fail to alloc qdma size=%zx\n", qm->qdma.size); + goto err_destroy_idr; + } + + QM_INIT_BUF(qm, eqe, QM_EQ_DEPTH); + QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH); + QM_INIT_BUF(qm, sqc, qm->qp_num); + QM_INIT_BUF(qm, cqc, qm->qp_num); + + ret = hisi_qm_alloc_rsv_buf(qm); + if (ret) + goto err_free_qdma; + + ret = hisi_qp_alloc_memory(qm); + if (ret) + goto err_free_reserve_buf; + + return 0; + +err_free_reserve_buf: + hisi_qm_free_rsv_buf(qm); +err_free_qdma: + dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); +err_destroy_idr: + idr_destroy(&qm->qp_idr); + + return ret; +} + +static int qm_clear_device(struct hisi_qm *qm) +{ + acpi_handle handle = ACPI_HANDLE(&qm->pdev->dev); + u32 val; + int ret; + + if (qm->fun_type == QM_HW_VF) + return 0; + + if (!handle) { + pci_warn(qm->pdev, "Device does not support reset, return.\n"); + return 0; + } + + if (!acpi_has_method(handle, qm->err_ini.err_info.acpi_rst)) { + pci_warn(qm->pdev, "BIOS has no reset method, return.\n"); + return 0; + } + + /* OOO register set and check */ + writel(MASTER_GLOBAL_CTRL_SHUTDOWN, qm->io_base + MASTER_GLOBAL_CTRL); + + ret = readl_relaxed_poll_timeout(qm->io_base + MASTER_TRANS_RETURN, + val, (val == MASTER_TRANS_RETURN_RW), + QM_REG_RD_INTVRL_US, + QM_REG_RD_TMOUT_US); + if (ret) { + pci_warn(qm->pdev, "Device is busy, can not clear device.\n"); + writel(0x0, qm->io_base + MASTER_GLOBAL_CTRL); + return ret; + } + + return qm_reset_device(qm); +} + +static int hisi_qm_pci_init(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + struct device *dev = &pdev->dev; + unsigned int num_vec; + int ret; + + ret = pci_enable_device_mem(pdev); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to enable device mem!\n"); + return ret; + } + + ret = pci_request_mem_regions(pdev, qm->dev_name); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to request mem regions!\n"); + goto err_request_mem_regions; + } + + qm->phys_base = pci_resource_start(pdev, PCI_BAR_2); + qm->size = pci_resource_len(qm->pdev, PCI_BAR_2); + qm->io_base = devm_ioremap(dev, pci_resource_start(pdev, PCI_BAR_2), + pci_resource_len(qm->pdev, PCI_BAR_2)); + if (!qm->io_base) { + ret = -EIO; + goto err_ioremap; + } + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); + if (ret < 0) { + dev_err(dev, "Failed to set 64 bit dma mask %d", ret); + goto err_set_mask_and_coherent; + } + pci_set_master(pdev); + + num_vec = qm->ops->get_irq_num(qm); + ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI); + if (ret < 0) { + dev_err(dev, "Failed to enable MSI vectors!\n"); + goto err_set_mask_and_coherent; + } + + ret = qm_clear_device(qm); + if (ret) + goto err_free_vectors; + + return 0; + +err_free_vectors: + pci_free_irq_vectors(pdev); +err_set_mask_and_coherent: + devm_iounmap(dev, qm->io_base); +err_ioremap: + pci_release_mem_regions(pdev); +err_request_mem_regions: + pci_disable_device(pdev); + return ret; +} + +static void hisi_qm_pci_uninit(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + struct device *dev = &pdev->dev; + + pci_free_irq_vectors(pdev); + devm_iounmap(dev, qm->io_base); + pci_release_mem_regions(pdev); + pci_disable_device(pdev); +} + +/** + * hisi_qm_init() - Initialize configures about qm. + * @qm: The qm needed init. + * + * This function init qm, then we can call hisi_qm_start to put qm into work. + */ +int hisi_qm_init(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + struct device *dev = &pdev->dev; + int ret; + + switch (qm->ver) { + case QM_HW_V1: + qm->ops = &qm_hw_ops_v1; + break; + case QM_HW_V2: + qm->ops = &qm_hw_ops_v2; + break; + default: + return -EINVAL; + } + + if (qm->use_uacce) { + dev_info(dev, "qm register to uacce\n"); + ret = qm_register_uacce(qm); + if (ret < 0) { + dev_err(dev, "fail to register uacce (%d)\n", ret); + return ret; + } + } + + ret = hisi_qm_pci_init(qm); + if (ret) + goto err_pci_init; + + ret = qm_irq_register(qm); + if (ret) + goto err_irq_register; + + mutex_init(&qm->mailbox_lock); + if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V2) { + /* v2 or v3 starts to support get vft by mailbox */ + ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); + if (ret) + goto err_get_vft; + } + + ret = hisi_qm_memory_init(qm); + if (ret) + goto err_get_vft; + + qm->free_qp_num = qm->qp_num; + qm->is_frozen = false; + + init_rwsem(&qm->qps_lock); + atomic_set(&qm->status.flags, QM_INIT); + INIT_WORK(&qm->work, qm_work_process); + + dev_dbg(dev, "init qm %s\n", pdev->is_physfn ? "pf" : "vf"); + + return 0; + +err_get_vft: + qm_irq_unregister(qm); +err_irq_register: + hisi_qm_pci_uninit(qm); +err_pci_init: + if (qm->use_uacce) + qm_unregister_uacce(qm); + + return ret; +} +EXPORT_SYMBOL_GPL(hisi_qm_init); + +/** + * hisi_qm_uninit() - Uninitialize qm. + * @qm: The qm needed uninit. + * + * This function uninits qm related device resources. + */ +void hisi_qm_uninit(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + struct device *dev = &pdev->dev; + + down_write(&qm->qps_lock); + if (!qm_avail_state(qm, QM_CLOSE)) { + up_write(&qm->qps_lock); + return; + } + + hisi_qp_memory_uninit(qm, qm->qp_num); + idr_destroy(&qm->qp_idr); + hisi_qm_free_rsv_buf(qm); + if (qm->qdma.va) { + hisi_qm_cache_wb(qm); + dma_free_coherent(dev, qm->qdma.size, + qm->qdma.va, qm->qdma.dma); + memset(&qm->qdma, 0, sizeof(qm->qdma)); + } + + qm_irq_unregister(qm); + hisi_qm_pci_uninit(qm); + up_write(&qm->qps_lock); + + if (qm->use_uacce) + uacce_unregister(&qm->uacce); +} +EXPORT_SYMBOL_GPL(hisi_qm_uninit); + +/** + * hisi_qm_dev_shutdown() - shutdown device. + * @pdev: The device will be shutdown. + * + * This function will stop qm when OS shutdown or rebooting. + */ +void hisi_qm_dev_shutdown(struct pci_dev *pdev) +{ + struct hisi_qm *qm = pci_get_drvdata(pdev); + int ret; + + ret = hisi_qm_stop(qm, QM_NORMAL); + if (ret) + dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n"); + + hisi_qm_cache_wb(qm); +} +EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown); + +/** + * hisi_qm_get_vft() - Get vft from a qm. + * @qm: The qm we want to get its vft. + * @base: The base number of queue in vft. + * @number: The number of queues in vft. + * + * We can allocate multiple queues to a qm by configuring virtual function + * table. We get related configures by this function. Normally, we call this + * function in VF driver to get the queue information. + * + * qm hw v1 does not support this interface. + */ +int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number) +{ + if (!base || !number) + return -EINVAL; + + if (!qm->ops->get_vft) { + dev_err(&qm->pdev->dev, "Don't support vft read!\n"); + return -EINVAL; + } + + return qm->ops->get_vft(qm, base, number); +} +EXPORT_SYMBOL_GPL(hisi_qm_get_vft); + +/** + * hisi_qm_set_vft() - Set "virtual function table" for a qm. + * @fun_num: Number of operated function. + * @qm: The qm in which to set vft, alway in a PF. + * @base: The base number of queue in vft. + * @number: The number of queues in vft. + * + * This function is alway called in PF driver, it is used to assign queues + * among PF and VFs. Number is zero means invalid corresponding entry. + * + * Assign queues A~B to PF: hisi_qm_set_vft(qm, 0, A, B - A + 1) + * Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1) + * (VF function number 0x2) + */ +static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, + u32 number) +{ + u32 max_q_num = qm->ctrl_q_num; + + if (base >= max_q_num || number > max_q_num || + (base + number) > max_q_num) + return -EINVAL; + + return qm_set_sqc_cqc_vft(qm, fun_num, base, number); +} + +static void qm_init_eq_aeq_status(struct hisi_qm *qm) +{ + struct hisi_qm_status *status = &qm->status; + + status->eq_head = 0; + status->aeq_head = 0; + status->eqc_phase = true; + status->aeqc_phase = true; +} + +static int qm_eq_ctx_cfg(struct hisi_qm *qm) +{ + struct qm_eqc eqc = {0}; + + eqc.base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma)); + eqc.base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma)); + if (qm->ver == QM_HW_V1) + eqc.dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE); + eqc.dw6 = cpu_to_le32((QM_EQ_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT)); + + return qm_set_and_get_xqc(qm, QM_MB_CMD_EQC, &eqc, 0, 0); +} + +static int qm_aeq_ctx_cfg(struct hisi_qm *qm) +{ + struct qm_aeqc aeqc = {0}; + + aeqc.base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma)); + aeqc.base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma)); + aeqc.dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT)); + + return qm_set_and_get_xqc(qm, QM_MB_CMD_AEQC, &aeqc, 0, 0); +} + +static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm) +{ + struct device *dev = &qm->pdev->dev; + int ret; + + qm_init_eq_aeq_status(qm); + + ret = qm_eq_ctx_cfg(qm); + if (ret) { + dev_err(dev, "Set eqc failed!\n"); + return ret; + } + + return qm_aeq_ctx_cfg(qm); +} + +static int __hisi_qm_start(struct hisi_qm *qm) +{ + int ret; + + /* dma must be ready before start, nomatter by init or by uacce mmap */ + WARN_ON(!qm->qdma.dma); + + if (qm->qp_num == 0) + return -EINVAL; + + if (qm->fun_type == QM_HW_PF) { + ret = qm_dev_mem_reset(qm); + if (ret) + return ret; + + ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num); + if (ret) + return ret; + } + + ret = qm_eq_aeq_ctx_cfg(qm); + if (ret) + return ret; + + ret = hisi_qm_mb_write(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); + if (ret) + return ret; + + ret = hisi_qm_mb_write(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); + if (ret) + return ret; + + writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK); + writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK); + + return 0; +} + +/* restart stopped qm and qps in reset flow */ +int hisi_qm_restart(struct hisi_qm *qm) +{ + struct device *dev = &qm->pdev->dev; + struct hisi_qp *qp; + int ret, i; + + ret = hisi_qm_start(qm); + if (ret < 0) + return ret; + + down_write(&qm->qps_lock); + for (i = 0; i < qm->qp_num; i++) { + qp = &qm->qp_array[i]; + + if (atomic_read(&qp->qp_status.flags) == QP_STOP && + qp->is_resetting && qp->is_in_kernel) { + ret = hisi_qm_start_qp_nolock(qp, 0); + if (ret < 0) { + dev_err(dev, "Failed to start qp%d!\n", i); + + up_write(&qm->qps_lock); + return ret; + } + qp->is_resetting = false; + } + } + up_write(&qm->qps_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(hisi_qm_restart); + +/** + * hisi_qm_start() - start qm + * @qm: The qm to be started. + * + * This function starts a qm, then we can allocate qp from this qm. + */ +int hisi_qm_start(struct hisi_qm *qm) +{ + int ret; + + down_write(&qm->qps_lock); + + if (!qm_avail_state(qm, QM_START)) { + up_write(&qm->qps_lock); + return -EPERM; + } + + ret = __hisi_qm_start(qm); + if (!ret) + atomic_set(&qm->status.flags, QM_START); + + up_write(&qm->qps_lock); + return ret; +} +EXPORT_SYMBOL_GPL(hisi_qm_start); + +/* Stop started qps in reset flow */ +static int qm_stop_started_qp(struct hisi_qm *qm) +{ + struct device *dev = &qm->pdev->dev; + struct hisi_qp *qp; + int i, ret; + + for (i = 0; i < qm->qp_num; i++) { + qp = &qm->qp_array[i]; + if (atomic_read(&qp->qp_status.flags) == QP_START) { + qp->is_resetting = true; + ret = hisi_qm_stop_qp_nolock(qp); + if (ret < 0) { + dev_err(dev, "Failed to stop qp%d!\n", i); + return ret; + } + } + } + + return 0; +} + +/** + * qm_clear_queues() - Clear memory of queues in a qm. + * @qm: The qm which memory needs clear. + * + * This function clears all queues memory in a qm. Reset of accelerator can + * use this to clear queues. + */ +static void qm_clear_queues(struct hisi_qm *qm) +{ + struct hisi_qp *qp; + int i; + + for (i = 0; i < qm->qp_num; i++) { + qp = &qm->qp_array[i]; + if (qp->is_in_kernel && qp->is_resetting) + /* device state use the last page */ + memset(qp->qdma.va, 0, qp->qdma.size - PAGE_SIZE); + } + + memset(qm->qdma.va, 0, qm->qdma.size); +} + +/** + * hisi_qm_stop() - Stop a qm. + * @qm: The qm which will be stopped. + * @r: The reason to stop qm. + * + * This function stops qm and its qps, then qm can not accept request. + * Related resources are not released at this state, we can use hisi_qm_start + * to let qm start again. + */ +int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r) +{ + struct device *dev = &qm->pdev->dev; + int ret = 0; + + down_write(&qm->qps_lock); + + qm->status.stop_reason = r; + + if (!qm_avail_state(qm, QM_STOP)) { + ret = -EPERM; + goto err_unlock; + } + + if (qm->status.stop_reason == QM_SOFT_RESET || + qm->status.stop_reason == QM_FLR) { + hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); + ret = qm_stop_started_qp(qm); + if (ret < 0) + goto err_unlock; + + hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); + } + + /* Mask eq and aeq irq */ + writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK); + writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK); + + if (qm->fun_type == QM_HW_PF) { + ret = hisi_qm_set_vft(qm, 0, 0, 0); + if (ret) { + dev_err(dev, "Failed to set vft!\n"); + ret = -EBUSY; + goto err_unlock; + } + } + + qm_clear_queues(qm); + atomic_set(&qm->status.flags, QM_STOP); + +err_unlock: + up_write(&qm->qps_lock); + return ret; +} +EXPORT_SYMBOL_GPL(hisi_qm_stop); + +/** + * hisi_qm_debug_regs_clear() - clear qm debug related registers. + * @qm: The qm for which we want to clear. + */ +void hisi_qm_debug_regs_clear(struct hisi_qm *qm) +{ + struct qm_dfx_registers *regs; + int i; + + /* clear current_q */ + writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); + writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); + + /* clear regs, these cnt regs are read_clear */ + writel(0x1, qm->io_base + QM_DFX_CNT_CLR_CE); + + regs = qm_dfx_regs; +#define CNT_CYC_REGS_NUM 10 + for (i = 0; i < CNT_CYC_REGS_NUM; i++) { + readl(qm->io_base + regs->reg_offset); + regs++; + } + + /* clear clear_enable */ + writel(0x0, qm->io_base + QM_DFX_CNT_CLR_CE); +} +EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear); + +static ssize_t qm_status_read(struct file *filp, char __user *buffer, + size_t count, loff_t *pos) +{ + struct hisi_qm *qm = filp->private_data; + char buf[QM_DBG_READ_LEN]; + int val, cp_len, len; + + if (*pos) + return 0; + + if (count < QM_DBG_READ_LEN) + return -ENOSPC; + + val = atomic_read(&qm->status.flags); + len = snprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]); + if (!len) + return -EFAULT; + + cp_len = copy_to_user(buffer, buf, len); + if (cp_len) + return -EFAULT; + + return (*pos = len); +} + +static const struct file_operations qm_status_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = qm_status_read, +}; + +static int qm_debugfs_atomic64_set(void *data, u64 val) +{ + if (!val) + atomic64_set((atomic64_t *)data, 0); + else + return -EINVAL; + + return 0; +} + +static int qm_debugfs_atomic64_get(void *data, u64 *val) +{ + *val = atomic64_read((atomic64_t *)data); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get, + qm_debugfs_atomic64_set, "%llu\n"); + +/** + * hisi_qm_debug_init() - Initialize qm related debugfs files. + * @qm: The qm for which we want to add debugfs files. + * + * Create qm related debugfs files. + */ +int hisi_qm_debug_init(struct hisi_qm *qm) +{ + struct qm_dfx *dfx = &qm->debug.dfx; + struct dentry *qm_d; + void *data; + int i, ret; + + qm_d = debugfs_create_dir("qm", qm->debug.debug_root); + if (IS_ERR(qm_d)) + return -ENOENT; + qm->debug.qm_d = qm_d; + + /* only show this in PF */ + if (qm->fun_type == QM_HW_PF) + for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++) + if (qm_create_debugfs_file(qm, i)) { + ret = -ENOENT; + goto failed_to_create; + } + + debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops); + + debugfs_create_file("cmd", 0444, qm->debug.qm_d, qm, &qm_cmd_fops); + + debugfs_create_file("status", 0444, qm->debug.qm_d, qm, + &qm_status_fops); + + for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) { + data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset); + debugfs_create_file(qm_dfx_files[i].name, + 0644, + qm_d, + data, + &qm_atomic64_ops); + } + + return 0; + +failed_to_create: + debugfs_remove_recursive(qm_d); + return ret; +} +EXPORT_SYMBOL_GPL(hisi_qm_debug_init); + +/** + * qm_hw_error_init() - Configure qm hardware error report method. + * @qm: The qm which we want to configure. + * @ce: Correctable error configure. + * @nfe: Non-fatal error configure. + * @fe: Fatal error configure. + * @msi: Error reported by message signal interrupt. + * + * Hardware errors of qm can be reported either by RAS interrupts which will + * be handled by UEFI and then PCIe AER or by device MSI. User can configure + * each error to use either of above two methods. For RAS interrupts, we can + * configure an error as one of correctable error, non-fatal error or + * fatal error. + * + * Bits indicating errors can be configured to ce, nfe, fe and msi to enable + * related report methods. Error report will be masked if related error bit + * does not configure. + */ +static void qm_hw_error_init(struct hisi_qm *qm) +{ + u32 nfe = qm->err_ini.err_info.nfe; + u32 msi = qm->err_ini.err_info.msi; + u32 ce = qm->err_ini.err_info.ce; + u32 fe = qm->err_ini.err_info.fe; + + if (!qm->ops->hw_error_init) { + dev_err(&qm->pdev->dev, + "QM version %d doesn't support hw error handling!\n", + qm->ver); + return; + } + + qm->ops->hw_error_init(qm, ce, nfe, fe, msi); +} + +static void qm_hw_error_uninit(struct hisi_qm *qm) +{ + if (!qm->ops->hw_error_uninit) { + dev_err(&qm->pdev->dev, + "QM version %d doesn't support hw error handling!\n", + qm->ver); + return; + } + + qm->ops->hw_error_uninit(qm); +} + +/** + * qm_hw_error_handle() - Handle qm non-fatal hardware errors. + * @qm: The qm which has non-fatal hardware errors. + * + * Accelerators use this function to handle qm non-fatal hardware errors. + */ +static pci_ers_result_t qm_hw_error_handle(struct hisi_qm *qm) +{ + if (!qm->ops->hw_error_handle) { + dev_err(&qm->pdev->dev, + "QM version %d doesn't support hw error report!\n", + qm->ver); + return PCI_ERS_RESULT_NONE; + } + + return qm->ops->hw_error_handle(qm); +} + +static int qm_get_hw_error_status(struct hisi_qm *qm) +{ + u32 err_sts; + + err_sts = readl(qm->io_base + QM_ABNORMAL_INT_STATUS) & QM_ECC_MBIT; + if (err_sts) + return err_sts; + + return 0; +} + +static int qm_reg_test(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + int ret; + u32 val; + + writel(VENDOR_ID_TEST_VALUE, qm->io_base + QM_PEH_VENDOR_ID); + ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, + (val == VENDOR_ID_TEST_VALUE), + POLL_PERIOD, POLL_TIMEOUT); + if (ret) { + dev_err(&pdev->dev, "Fails to read QM reg!\n"); + return ret; + } + + writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID); + ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, + (val == PCI_VENDOR_ID_HUAWEI), + POLL_PERIOD, POLL_TIMEOUT); + if (ret) + dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n"); + + return ret; +} + +static int qm_set_pf_mse(struct hisi_qm *qm, bool set) +{ + struct pci_dev *pdev = qm->pdev; + u16 cmd; + int i; + + pci_read_config_word(pdev, PCI_COMMAND, &cmd); + if (set) + cmd |= PCI_COMMAND_MEMORY; + else + cmd &= ~PCI_COMMAND_MEMORY; + + pci_write_config_word(pdev, PCI_COMMAND, cmd); + for (i = 0; i < MAX_WAIT_COUNTS; i++) { + pci_read_config_word(pdev, PCI_COMMAND, &cmd); + if (set == ((cmd & PCI_COMMAND_MEMORY) >> 1)) + return 0; + + udelay(1); + } + + return -ETIMEDOUT; +} + +static int qm_set_vf_mse(struct hisi_qm *qm, bool set) +{ + struct pci_dev *pdev = qm->pdev; + u16 sriov_ctrl; + int pos; + int i; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl); + if (set) + sriov_ctrl |= PCI_SRIOV_CTRL_MSE; + else + sriov_ctrl &= ~PCI_SRIOV_CTRL_MSE; + pci_write_config_word(pdev, pos + PCI_SRIOV_CTRL, sriov_ctrl); + + for (i = 0; i < MAX_WAIT_COUNTS; i++) { + pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl); + if (set == (sriov_ctrl & PCI_SRIOV_CTRL_MSE) >> + PEH_SRIOV_CTRL_VF_MSE_SHIFT) + return 0; + + udelay(1); + } + + return -ETIMEDOUT; +} + +static int qm_set_msi(struct hisi_qm *qm, bool set) +{ + struct pci_dev *pdev = qm->pdev; + + if (set) { + pci_write_config_dword(pdev, pdev->msi_cap + + PCI_MSI_MASK_64, 0); + } else { + pci_write_config_dword(pdev, pdev->msi_cap + + PCI_MSI_MASK_64, PEH_MSI_DISABLE); + if (qm->err_ini.err_info.is_qm_ecc_mbit || + qm->err_ini.err_info.is_dev_ecc_mbit) + return 0; + + mdelay(1); + if (readl(qm->io_base + QM_PEH_DFX_INFO0)) + return -EFAULT; + } + + return 0; +} + +void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num) +{ + int i; + + if (!qps || qp_num < 0) + return; + + for (i = qp_num - 1; i >= 0; i--) + hisi_qm_release_qp(qps[i]); +} +EXPORT_SYMBOL_GPL(hisi_qm_free_qps); + +static void free_list(struct list_head *head) +{ + struct hisi_qm_resource *res, *tmp; + + list_for_each_entry_safe(res, tmp, head, list) { + list_del(&res->list); + kfree(res); + } +} + +static int hisi_qm_sort_devices(int node, struct list_head *head, + struct hisi_qm_list *qm_list) +{ + struct hisi_qm_resource *res, *tmp; + struct hisi_qm *qm; + struct list_head *n; + struct device *dev; + int dev_node; + + list_for_each_entry(qm, &qm_list->list, list) { + dev = &qm->pdev->dev; + + dev_node = dev_to_node(dev); + if (dev_node < 0) + dev_node = 0; + + if (qm_list->check && !qm_list->check(qm)) + continue; + + res = kzalloc(sizeof(*res), GFP_KERNEL); + if (!res) + return -ENOMEM; + + res->qm = qm; + res->distance = node_distance(dev_node, node); + n = head; + list_for_each_entry(tmp, head, list) { + if (res->distance < tmp->distance) { + n = &tmp->list; + break; + } + } + list_add_tail(&res->list, n); + } + + return 0; +} + +int hisi_qm_alloc_qps_node(int node, struct hisi_qm_list *qm_list, + struct hisi_qp **qps, int qp_num, u8 alg_type) +{ + struct hisi_qm_resource *tmp; + int ret = -ENODEV; + LIST_HEAD(head); + int i; + + if (!qps || !qm_list || qp_num <= 0) + return -EINVAL; + + mutex_lock(&qm_list->lock); + if (hisi_qm_sort_devices(node, &head, qm_list)) { + mutex_unlock(&qm_list->lock); + goto err; + } + + list_for_each_entry(tmp, &head, list) { + for (i = 0; i < qp_num; i++) { + qps[i] = hisi_qm_create_qp(tmp->qm, alg_type); + if (IS_ERR(qps[i])) { + hisi_qm_free_qps(qps, i); + break; + } + } + + if (i == qp_num) { + ret = 0; + break; + } + } + + mutex_unlock(&qm_list->lock); + if (ret) + pr_info("Failed to create qps, node[%d], alg[%d], qp[%d]!\n", + node, alg_type, qp_num); + +err: + free_list(&head); + return ret; +} +EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node); + +static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs) +{ + u32 q_num, i, remain_q_num; + u32 q_base = qm->qp_num; + int ret; + + if (!num_vfs) + return -EINVAL; + + remain_q_num = qm->ctrl_q_num - qm->qp_num; + + /* If remain queues not enough, return error. */ + if (qm->ctrl_q_num < qm->qp_num || remain_q_num < num_vfs) + return -EINVAL; + + q_num = remain_q_num / num_vfs; + for (i = 1; i <= num_vfs; i++) { + if (i == num_vfs) + q_num += remain_q_num % num_vfs; + ret = hisi_qm_set_vft(qm, i, q_base, q_num); + if (ret) + return ret; + q_base += q_num; + } + + return 0; +} + +static int qm_clear_vft_config(struct hisi_qm *qm) +{ + int ret; + u32 i; + + for (i = 1; i <= qm->vfs_num; i++) { + ret = hisi_qm_set_vft(qm, i, 0, 0); + if (ret) + return ret; + } + qm->vfs_num = 0; + + return 0; +} + +int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs) +{ + struct hisi_qm *qm = pci_get_drvdata(pdev); + int pre_existing_vfs, num_vfs, ret; + int total_vfs; + + total_vfs = pci_sriov_get_totalvfs(pdev); + pre_existing_vfs = pci_num_vf(pdev); + if (pre_existing_vfs) { + pci_err(pdev, + "Can't enable VF. Please disable pre-enabled VFs!\n"); + return 0; + } + + num_vfs = min_t(int, max_vfs, total_vfs); + ret = qm_vf_q_assign(qm, num_vfs); + if (ret) { + pci_err(pdev, "Can't assign queues for VF!\n"); + return ret; + } + + qm->vfs_num = num_vfs; + + ret = pci_enable_sriov(pdev, num_vfs); + if (ret) { + pci_err(pdev, "Can't enable VF!\n"); + qm_clear_vft_config(qm); + return ret; + } + + pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs); + + return num_vfs; +} +EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable); + +int hisi_qm_sriov_disable(struct pci_dev *pdev, struct hisi_qm_list *qm_list) +{ + struct hisi_qm *qm = pci_get_drvdata(pdev); + + if (pci_vfs_assigned(pdev)) { + pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n"); + return -EPERM; + } + + /* While VF is in used, SRIOV cannot be disabled. + * However, there is a risk that the behavior is uncertain if the + * device is in hardware resetting. + */ + if (qm_list && qm_try_frozen_vfs(pdev, qm_list)) { + pci_err(pdev, "Uacce user space task is using its VF!\n"); + return -EBUSY; + } + + /* remove in hpre_pci_driver will be called to free VF resources */ + pci_disable_sriov(pdev); + return qm_clear_vft_config(qm); +} +EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable); + +void hisi_qm_dev_err_init(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); + + if (pf_qm->fun_type == QM_HW_VF) + return; + + qm_hw_error_init(pf_qm); + pf_qm->err_ini.hw_err_enable(pf_qm); +} +EXPORT_SYMBOL_GPL(hisi_qm_dev_err_init); + +/** + * hisi_qm_dev_err_uninit() - Uninitialize device error configuration. + * @qm: The qm for which we want to do error uninitialization. + * + * Uninitialize QM and device error related configuration, It may called + * by PF/VF, the caller should ensure the scene explicilty. + */ +void hisi_qm_dev_err_uninit(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); + + if (pf_qm->fun_type == QM_HW_VF) + return; + + qm_hw_error_uninit(pf_qm); + pf_qm->err_ini.hw_err_disable(pf_qm); +} +EXPORT_SYMBOL_GPL(hisi_qm_dev_err_uninit); + +static pci_ers_result_t qm_dev_err_handle(struct hisi_qm *qm) +{ + u32 err_sts; + + /* read err sts */ + err_sts = qm->err_ini.get_dev_hw_err_status(qm); + if (err_sts) { + if (err_sts & qm->err_ini.err_info.ecc_2bits_mask) + qm->err_ini.err_info.is_dev_ecc_mbit = true; + + qm->err_ini.log_dev_hw_err(qm, err_sts); + return PCI_ERS_RESULT_NEED_RESET; + } + + return PCI_ERS_RESULT_RECOVERED; +} + +pci_ers_result_t hisi_qm_process_dev_error(struct pci_dev *pdev) +{ + struct hisi_qm *qm = pci_get_drvdata(pdev); + pci_ers_result_t qm_ret, dev_ret; + + /* log qm error */ + qm_ret = qm_hw_error_handle(qm); + + /* log device error */ + dev_ret = qm_dev_err_handle(qm); + + return (qm_ret == PCI_ERS_RESULT_NEED_RESET || + dev_ret == PCI_ERS_RESULT_NEED_RESET) ? + PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED; +} +EXPORT_SYMBOL_GPL(hisi_qm_process_dev_error); + +pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + if (pdev->is_virtfn) + return PCI_ERS_RESULT_NONE; + + pci_info(pdev, "PCI error detected, state(=%d)!!\n", state); + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + return hisi_qm_process_dev_error(pdev); +} +EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected); + +static int qm_vf_reset_prepare(struct pci_dev *pdev, + struct hisi_qm_list *qm_list, + enum qm_stop_reason stop_reason) +{ + struct pci_dev *dev; + struct hisi_qm *qm; + int ret = 0; + + mutex_lock(&qm_list->lock); + list_for_each_entry(qm, &qm_list->list, list) { + dev = qm->pdev; + if (dev == pdev) + continue; + + if (pci_physfn(dev) == pdev) { + /* save VFs PCIE BAR configuration */ + pci_save_state(dev); + + ret = hisi_qm_stop(qm, stop_reason); + if (ret) { + hisi_qm_set_hw_reset(qm, + QM_RESET_STOP_TX_OFFSET); + hisi_qm_set_hw_reset(qm, + QM_RESET_STOP_RX_OFFSET); + atomic_set(&qm->status.flags, QM_STOP); + } + } + } + + mutex_unlock(&qm_list->lock); + return ret; +} + +static int qm_reset_prepare_ready(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); + int delay = 0; + + while (test_and_set_bit(QM_DEV_RESET_STATUS, &pf_qm->hw_status)) { + msleep(++delay); + if (delay > QM_RESET_WAIT_TIMEOUT) + return -EBUSY; + } + + return 0; +} + +static int qm_controller_reset_prepare(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + int ret; + + ret = qm_reset_prepare_ready(qm); + if (ret) { + pci_err(pdev, "Controller reset not ready!\n"); + return ret; + } + + if (qm->vfs_num) { + ret = qm_vf_reset_prepare(pdev, qm->qm_list, QM_SOFT_RESET); + if (ret) { + pci_err(pdev, "Fails to stop VFs!\n"); + return ret; + } + } + + ret = hisi_qm_stop(qm, QM_SOFT_RESET); + if (ret) { + pci_err(pdev, "Fails to stop QM!\n"); + return ret; + } + + if (qm->use_uacce) { + ret = uacce_hw_err_isolate(&qm->uacce); + if (ret) { + pci_err(pdev, "Fails to isolate hw err!\n"); + return ret; + } + } + + return 0; +} + +static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm) +{ + u32 nfe_enb = 0; + + if (!qm->err_ini.err_info.is_dev_ecc_mbit && + qm->err_ini.err_info.is_qm_ecc_mbit && + qm->err_ini.close_axi_master_ooo) { + + qm->err_ini.close_axi_master_ooo(qm); + + } else if (qm->err_ini.err_info.is_dev_ecc_mbit && + !qm->err_ini.err_info.is_qm_ecc_mbit && + !qm->err_ini.close_axi_master_ooo) { + + nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE); + writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE, + qm->io_base + QM_RAS_NFE_ENABLE); + writel(QM_ECC_MBIT, qm->io_base + QM_PF_ABNORMAL_INT_SET); + } +} + +static int qm_reset_device(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + unsigned long long value = 0; + acpi_status s; + + /* The reset related sub-control registers are not in PCI BAR */ + if (ACPI_HANDLE(&pdev->dev)) { + s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev), + qm->err_ini.err_info.acpi_rst, + NULL, &value); + if (ACPI_FAILURE(s)) { + pci_err(pdev, "NO controller reset method!\n"); + return -EIO; + } + + if (value) { + pci_err(pdev, "Reset step %llu failed!\n", value); + return -EIO; + } + + return 0; + } + + pci_err(pdev, "No reset method!\n"); + return -EINVAL; +} + +static int qm_soft_reset(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + int ret; + u32 val; + + ret = qm_reg_test(qm); + if (ret) + return ret; + + if (qm->vfs_num) { + ret = qm_set_vf_mse(qm, false); + if (ret) { + pci_err(pdev, "Fails to disable vf mse bit.\n"); + return ret; + } + } + + ret = qm_set_msi(qm, false); + if (ret) { + pci_err(pdev, "Fails to disable peh msi bit.\n"); + return ret; + } + + qm_dev_ecc_mbit_handle(qm); + + /* OOO register set and check */ + writel(MASTER_GLOBAL_CTRL_SHUTDOWN, qm->io_base + MASTER_GLOBAL_CTRL); + + /* If bus lock, reset chip */ + ret = readl_relaxed_poll_timeout(qm->io_base + MASTER_TRANS_RETURN, + val, (val == MASTER_TRANS_RETURN_RW), + QM_REG_RD_INTVRL_US, + QM_REG_RD_TMOUT_US); + if (ret) { + pci_emerg(pdev, "Bus lock! Please reset system.\n"); + return ret; + } + + ret = qm_set_pf_mse(qm, false); + if (ret) { + pci_err(pdev, "Fails to disable pf mse bit.\n"); + return ret; + } + + return qm_reset_device(qm); +} + +static int qm_vf_reset_done(struct pci_dev *pdev, + struct hisi_qm_list *qm_list) +{ + struct pci_dev *dev; + struct hisi_qm *qm; + int ret = 0; + + mutex_lock(&qm_list->lock); + list_for_each_entry(qm, &qm_list->list, list) { + dev = qm->pdev; + if (dev == pdev) + continue; + + if (pci_physfn(dev) == pdev) { + /* enable VFs PCIE BAR configuration */ + pci_restore_state(dev); + + ret = hisi_qm_restart(qm); + if (ret) + goto reset_fail; + } + } + +reset_fail: + mutex_unlock(&qm_list->lock); + return ret; +} + +static int qm_get_dev_err_status(struct hisi_qm *qm) +{ + u32 err_sts; + + err_sts = qm->err_ini.get_dev_hw_err_status(qm) & + qm->err_ini.err_info.ecc_2bits_mask; + if (err_sts) + return err_sts; + + return 0; +} + +static void hisi_qm_restart_prepare(struct hisi_qm *qm) +{ + u32 value; + + if (!qm->err_ini.err_info.is_qm_ecc_mbit && + !qm->err_ini.err_info.is_dev_ecc_mbit) + return; + + value = readl(qm->io_base + AM_CFG_PORT_WR_EN); + writel(value & ~qm->err_ini.err_info.msi_wr_port, + qm->io_base + AM_CFG_PORT_WR_EN); + + /* clear dev ecc 2bit error source if having */ + value = qm_get_dev_err_status(qm); + if (value && qm->err_ini.clear_dev_hw_err_status) + qm->err_ini.clear_dev_hw_err_status(qm, value); + + /* clear QM ecc mbit error source */ + writel(QM_ECC_MBIT, qm->io_base + + QM_ABNORMAL_INT_SOURCE); + + /* clear AM Reorder Buffer ecc mbit source */ + writel(ROB_ECC_ERR_MULTPL, qm->io_base + + AM_ROB_ECC_INT_STS); + + if (qm->err_ini.open_axi_master_ooo) + qm->err_ini.open_axi_master_ooo(qm); +} + +static void hisi_qm_restart_done(struct hisi_qm *qm) +{ + u32 value; + + if (!qm->err_ini.err_info.is_qm_ecc_mbit && + !qm->err_ini.err_info.is_dev_ecc_mbit) + return; + + value = readl(qm->io_base + AM_CFG_PORT_WR_EN); + value |= qm->err_ini.err_info.msi_wr_port; + + writel(value, qm->io_base + AM_CFG_PORT_WR_EN); + qm->err_ini.err_info.is_qm_ecc_mbit = false; + qm->err_ini.err_info.is_dev_ecc_mbit = false; +} + +static int qm_controller_reset_done(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + int ret; + + ret = qm_set_msi(qm, true); + if (ret) { + pci_err(pdev, "Fails to enable peh msi bit!\n"); + return ret; + } + + ret = qm_set_pf_mse(qm, true); + if (ret) { + pci_err(pdev, "Fails to enable pf mse bit!\n"); + return ret; + } + + if (qm->vfs_num) { + ret = qm_set_vf_mse(qm, true); + if (ret) { + pci_err(pdev, "Fails to enable vf mse bit!\n"); + return ret; + } + } + + ret = qm->err_ini.set_usr_domain_cache(qm); + if (ret) + return ret; + + hisi_qm_restart_prepare(qm); + + ret = hisi_qm_restart(qm); + if (ret) { + pci_err(pdev, "Failed to start QM!\n"); + return ret; + } + + if (qm->vfs_num) { + ret = qm_vf_q_assign(qm, qm->vfs_num); + if (ret) { + pci_err(pdev, "Failed to assign queue!\n"); + return ret; + } + } + + ret = qm_vf_reset_done(pdev, qm->qm_list); + if (ret) { + pci_err(pdev, "Failed to start VFs!\n"); + return -EPERM; + } + + hisi_qm_dev_err_init(qm); + + hisi_qm_restart_done(qm); + + return 0; +} + +int hisi_qm_controller_reset(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + int ret; + + pci_info(pdev, "Controller resetting...\n"); + + ret = qm_controller_reset_prepare(qm); + if (ret) + goto err_prepare; + + ret = qm_soft_reset(qm); + if (ret) { + pci_err(pdev, "Controller reset failed (%d)\n", ret); + goto err_reset; + } + + ret = qm_controller_reset_done(qm); + if (ret) + goto err_reset; + + clear_bit(QM_DEV_RESET_STATUS, &qm->hw_status); + pci_info(pdev, "Controller reset complete\n"); + + return 0; + +err_prepare: + pci_info(pdev, "Controller reset_prepare failed\n"); + writel(MASTER_GLOBAL_CTRL_SHUTDOWN, qm->io_base + MASTER_GLOBAL_CTRL); + hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); + hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); + +err_reset: + pci_info(pdev, "Controller reset failed\n"); + clear_bit(QM_DEV_RESET_STATUS, &qm->hw_status); + /* if resetting fails, isolate the device */ + if (qm->use_uacce && !qm->uacce.is_vf) + atomic_set(&qm->uacce.isolate->is_isolate, 1); + + return ret; +} +EXPORT_SYMBOL_GPL(hisi_qm_controller_reset); + +pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev) +{ + struct hisi_qm *qm = pci_get_drvdata(pdev); + int ret; + + if (pdev->is_virtfn) + return PCI_ERS_RESULT_RECOVERED; + + pci_info(pdev, "Requesting reset due to PCI error\n"); + pci_cleanup_aer_uncorrect_error_status(pdev); + + /* reset pcie device controller */ + ret = hisi_qm_controller_reset(qm); + if (ret) { + pci_err(pdev, "controller reset failed (%d)\n", ret); + return PCI_ERS_RESULT_DISCONNECT; + } + + return PCI_ERS_RESULT_RECOVERED; +} +EXPORT_SYMBOL_GPL(hisi_qm_dev_slot_reset); + +/* check the interrupt is ecc-mbit error or not */ +static int qm_check_dev_error(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); + int ret; + + if (pf_qm->fun_type == QM_HW_VF) + return 0; + + ret = qm_get_hw_error_status(pf_qm); + if (ret) + return ret; + + return qm_get_dev_err_status(pf_qm); +} + +void hisi_qm_reset_prepare(struct pci_dev *pdev) +{ + struct hisi_qm *qm = pci_get_drvdata(pdev); + u32 delay = 0; + int ret; + + hisi_qm_dev_err_uninit(qm); + + while (qm_check_dev_error(qm)) { + msleep(++delay); + if (delay > QM_RESET_WAIT_TIMEOUT) + return; + } + + ret = qm_reset_prepare_ready(qm); + if (ret) { + pci_err(pdev, "FLR not ready!\n"); + return; + } + + if (qm->vfs_num) { + ret = qm_vf_reset_prepare(pdev, qm->qm_list, QM_FLR); + if (ret) + pci_err(pdev, "Failed to stop vfs!\n"); + } + + ret = hisi_qm_stop(qm, QM_FLR); + if (ret) { + pci_err(pdev, "Failed to stop QM!\n"); + goto err_prepare; + } + + hisi_qm_cache_wb(qm); + pci_info(pdev, "FLR resetting...\n"); + return; + +err_prepare: + pci_info(pdev, "FLR resetting prepare failed!\n"); + hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); + hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); + atomic_set(&qm->status.flags, QM_STOP); + hisi_qm_cache_wb(qm); +} +EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare); + +static bool qm_flr_reset_complete(struct pci_dev *pdev) +{ + struct pci_dev *pf_pdev = pci_physfn(pdev); + struct hisi_qm *qm = pci_get_drvdata(pf_pdev); + u32 id; + + pci_read_config_dword(qm->pdev, PCI_COMMAND, &id); + if (id == QM_PCI_COMMAND_INVALID) { + pci_err(pdev, "Device can not be used!\n"); + return false; + } + + clear_bit(QM_DEV_RESET_STATUS, &qm->hw_status); + return true; +} + +void hisi_qm_reset_done(struct pci_dev *pdev) +{ + struct hisi_qm *qm = pci_get_drvdata(pdev); + int ret; + + hisi_qm_dev_err_init(qm); + + ret = hisi_qm_restart(qm); + if (ret) { + pci_err(pdev, "Failed to start QM!\n"); + goto flr_done; + } + + if (qm->fun_type == QM_HW_PF) { + ret = qm->err_ini.set_usr_domain_cache(qm); + if (ret) { + pci_err(pdev, "Failed to start QM!\n"); + goto flr_done; + } + + if (qm->vfs_num) + qm_vf_q_assign(qm, qm->vfs_num); + + ret = qm_vf_reset_done(pdev, qm->qm_list); + if (ret) { + pci_err(pdev, "Failed to start VFs!\n"); + goto flr_done; + } + } + +flr_done: + if (qm_flr_reset_complete(pdev)) + pci_info(pdev, "FLR reset complete\n"); +} +EXPORT_SYMBOL_GPL(hisi_qm_reset_done); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Zhou Wang "); +MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver"); diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h new file mode 100644 index 0000000000000000000000000000000000000000..9f5e440d739679352907236545de343007ba98e8 --- /dev/null +++ b/drivers/crypto/hisilicon/qm.h @@ -0,0 +1,562 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2018-2019 HiSilicon Limited. */ +#ifndef HISI_ACC_QM_H +#define HISI_ACC_QM_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include "qm_usr_if.h" + +#define QNUM_V1 4096 +#define QNUM_V2 1024 +#define QM_MAX_VFS_NUM_V2 63 +/* qm user domain */ +#define QM_ARUSER_M_CFG_1 0x100088 +#define AXUSER_SNOOP_ENABLE BIT(30) +#define AXUSER_CMD_TYPE GENMASK(14, 12) +#define AXUSER_CMD_SMMU_NORMAL 1 +#define AXUSER_NS BIT(6) +#define AXUSER_NO BIT(5) +#define AXUSER_FP BIT(4) +#define AXUSER_SSV BIT(0) +#define AXUSER_BASE (AXUSER_SNOOP_ENABLE | \ + FIELD_PREP(AXUSER_CMD_TYPE, \ + AXUSER_CMD_SMMU_NORMAL) | \ + AXUSER_NS | AXUSER_NO | AXUSER_FP) +#define QM_ARUSER_M_CFG_ENABLE 0x100090 +#define ARUSER_M_CFG_ENABLE 0xfffffffe +#define QM_AWUSER_M_CFG_1 0x100098 +#define QM_AWUSER_M_CFG_ENABLE 0x1000a0 +#define AWUSER_M_CFG_ENABLE 0xfffffffe +#define QM_WUSER_M_CFG_ENABLE 0x1000a8 +#define WUSER_M_CFG_ENABLE 0xffffffff + +/* qm cache */ +#define QM_CACHE_CTL 0x100050 +#define SQC_CACHE_ENABLE BIT(0) +#define CQC_CACHE_ENABLE BIT(1) +#define SQC_CACHE_WB_ENABLE BIT(4) +#define SQC_CACHE_WB_THRD GENMASK(10, 5) +#define CQC_CACHE_WB_ENABLE BIT(11) +#define CQC_CACHE_WB_THRD GENMASK(17, 12) +#define QM_AXI_M_CFG 0x1000ac +#define AXI_M_CFG 0xffff +#define QM_AXI_M_CFG_ENABLE 0x1000b0 +#define AM_CFG_SINGLE_PORT_MAX_TRANS 0x300014 +#define AXI_M_CFG_ENABLE 0xffffffff +#define QM_PEH_AXUSER_CFG 0x1000cc +#define QM_PEH_AXUSER_CFG_ENABLE 0x1000d0 +#define PEH_AXUSER_CFG 0x400801 +#define PEH_AXUSER_CFG_ENABLE 0xffffffff + +#define QM_DFX_MB_CNT_VF 0x104010 +#define QM_DFX_DB_CNT_VF 0x104020 + +#define QM_DFX_SQE_CNT_VF_SQN 0x104030 +#define QM_DFX_CQE_CNT_VF_CQN 0x104040 + +#define QM_AXI_RRESP BIT(0) +#define QM_AXI_BRESP BIT(1) +#define QM_ECC_MBIT BIT(2) +#define QM_ECC_1BIT BIT(3) +#define QM_ACC_GET_TASK_TIMEOUT BIT(4) +#define QM_ACC_DO_TASK_TIMEOUT BIT(5) +#define QM_ACC_WB_NOT_READY_TIMEOUT BIT(6) +#define QM_SQ_CQ_VF_INVALID BIT(7) +#define QM_CQ_VF_INVALID BIT(8) +#define QM_SQ_VF_INVALID BIT(9) +#define QM_DB_TIMEOUT BIT(10) +#define QM_OF_FIFO_OF BIT(11) +#define QM_DB_RANDOM_INVALID BIT(12) + +#define QM_BASE_NFE (QM_AXI_RRESP | QM_AXI_BRESP | QM_ECC_MBIT | \ + QM_ACC_GET_TASK_TIMEOUT | QM_DB_TIMEOUT | \ + QM_OF_FIFO_OF) +#define QM_BASE_CE QM_ECC_1BIT + +#define HISI_ACC_SGL_SGE_NR_MAX 255 +#define QM_DFX_QN_SHIFT 16 + +#define CURRENT_FUN_MASK GENMASK(5, 0) +#define CURRENT_Q_MASK GENMASK(31, 16) + +#define SQE_ADDR_MASK GENMASK(7, 0) + +#define PCI_BAR_2 2 + +enum qm_stop_reason { + QM_NORMAL, + QM_SOFT_RESET, + QM_FLR, +}; + +enum qm_state { + QM_INIT = 0, + QM_START, + QM_CLOSE, + QM_STOP, +}; + +enum qp_state { + QP_INIT = 1, + QP_START, + QP_STOP, + QP_CLOSE, +}; + +enum qm_hw_ver { + QM_HW_UNKNOWN = -1, + QM_HW_V1 = 0x20, + QM_HW_V2 = 0x21, +}; + +enum qm_fun_type { + QM_HW_PF, + QM_HW_VF, +}; + +enum qm_debug_file { + CURRENT_Q, + CLEAR_ENABLE, + QM_STATE, + DEBUG_FILE_NUM, +}; + +struct qm_dfx { + atomic64_t qm_err_irq_cnt; + atomic64_t aeq_irq_cnt; + atomic64_t abnormal_irq_cnt; + atomic64_t qp_err_cnt; + atomic64_t mb_err_cnt; +}; + +struct debugfs_file { + enum qm_debug_file index; + struct mutex lock; + struct qm_debug *debug; +}; + +struct qm_debug { + u32 sqe_mask_len; + u32 sqe_mask_offset; + u32 curr_qm_qp_num; + struct qm_dfx dfx; + struct dentry *debug_root; + struct dentry *qm_d; + struct debugfs_file files[DEBUG_FILE_NUM]; +}; + +struct qm_cqe { + __le32 rsvd0; + __le16 cmd_id; + __le16 rsvd1; + __le16 sq_head; + __le16 sq_num; + __le16 rsvd2; + __le16 w7; +}; + +struct qm_eqe { + __le32 dw0; +}; + +struct qm_aeqe { + __le32 dw0; +}; + +struct qm_sqc { + __le16 head; + __le16 tail; + __le32 base_l; + __le32 base_h; + __le32 dw3; + __le16 w8; + __le16 rsvd0; + __le16 pasid; + __le16 w11; + __le16 cq_num; + __le16 w13; + __le32 rsvd1; +}; + +struct qm_cqc { + __le16 head; + __le16 tail; + __le32 base_l; + __le32 base_h; + __le32 dw3; + __le16 w8; + __le16 rsvd0; + __le16 pasid; + __le16 w11; + __le32 dw6; + __le32 rsvd1; +}; + +struct qm_eqc { + __le16 head; + __le16 tail; + __le32 base_l; + __le32 base_h; + __le32 dw3; + __le32 rsvd[2]; + __le32 dw6; +}; + +struct qm_aeqc { + __le16 head; + __le16 tail; + __le32 base_l; + __le32 base_h; + __le32 dw3; + __le32 rsvd[2]; + __le32 dw6; +}; + +struct qm_mailbox { + __le16 w0; + __le16 queue_num; + __le32 base_l; + __le32 base_h; + __le32 rsvd; +}; + +struct qm_doorbell { + __le16 queue_num; + __le16 cmd; + __le16 index; + __le16 priority; +}; + +struct qm_dma { + void *va; + dma_addr_t dma; + size_t size; +}; + +struct hisi_qm_status { + u32 eq_head; + bool eqc_phase; + u32 aeq_head; + bool aeqc_phase; + atomic_t flags; + int stop_reason; +}; + +struct hisi_qm_hw_error { + u32 int_msk; + const char *msg; +}; + +struct hisi_qm; + +struct hisi_qm_err_info { + char *acpi_rst; + u32 msi_wr_port; + u32 ecc_2bits_mask; + u32 is_qm_ecc_mbit; + u32 is_dev_ecc_mbit; + u32 ce; + u32 nfe; + u32 fe; + u32 msi; +}; + +struct hisi_qm_err_ini { + u32 (*get_dev_hw_err_status)(struct hisi_qm *qm); + void (*clear_dev_hw_err_status)(struct hisi_qm *qm, u32 err_sts); + void (*hw_err_enable)(struct hisi_qm *qm); + void (*hw_err_disable)(struct hisi_qm *qm); + int (*set_usr_domain_cache)(struct hisi_qm *qm); + void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts); + void (*open_axi_master_ooo)(struct hisi_qm *qm); + void (*close_axi_master_ooo)(struct hisi_qm *qm); + struct hisi_qm_err_info err_info; +}; + +struct hisi_qm_list { + struct mutex lock; + struct list_head list; + bool (*check)(struct hisi_qm *qm); +}; + +struct qm_rsv_buf { + struct qm_sqc *sqc; + struct qm_cqc *cqc; + struct qm_eqc *eqc; + struct qm_aeqc *aeqc; + dma_addr_t sqc_dma; + dma_addr_t cqc_dma; + dma_addr_t eqc_dma; + dma_addr_t aeqc_dma; + struct qm_dma qcdma; +}; + +struct hisi_qm { + enum qm_hw_ver ver; + enum qm_fun_type fun_type; + const char *dev_name; + struct pci_dev *pdev; + void __iomem *io_base; + u32 sqe_size; + u32 qp_base; + u32 qp_num; + u32 ctrl_q_num; + u32 vfs_num; + u32 free_qp_num; + struct list_head list; + struct hisi_qm_list *qm_list; + struct qm_dma qdma; + struct qm_sqc *sqc; + struct qm_cqc *cqc; + struct qm_eqe *eqe; + struct qm_aeqe *aeqe; + dma_addr_t sqc_dma; + dma_addr_t cqc_dma; + dma_addr_t eqe_dma; + dma_addr_t aeqe_dma; + struct qm_rsv_buf xqc_buf; + + struct hisi_qm_status status; + struct hisi_qm_err_ini err_ini; + struct rw_semaphore qps_lock; + struct idr qp_idr; + struct hisi_qp *qp_array; + + struct mutex mailbox_lock; + + const struct hisi_qm_hw_ops *ops; + + struct qm_debug debug; + + u32 error_mask; + u32 msi_mask; + unsigned long hw_status; + bool use_uacce; /* register to uacce */ + bool use_sva; + bool is_frozen; + + resource_size_t phys_base; + resource_size_t size; + struct uacce uacce; + const char *algs; + int uacce_mode; + + struct workqueue_struct *wq; + struct work_struct work; + /* design for module not support aer, such as rde */ + int (*abnormal_fix)(struct hisi_qm *qm); +}; + +struct hisi_qp_status { + atomic_t used; + atomic_t send_ref; + u16 sq_tail; + u16 cq_head; + bool cqc_phase; + atomic_t flags; +}; + +struct hisi_qp_ops { + int (*fill_sqe)(void *sqe, void *q_parm, void *d_parm); +}; + +struct hisi_qp { + u32 qp_id; + u8 alg_type; + u8 req_type; + u8 c_flag; + + struct qm_dma qdma; + void *sqe; + struct qm_cqe *cqe; + dma_addr_t sqe_dma; + dma_addr_t cqe_dma; + + struct hisi_qp_status qp_status; + struct hisi_qp_ops *hw_ops; + void *qp_ctx; + void (*req_cb)(struct hisi_qp *qp, void *data); + void (*event_cb)(struct hisi_qp *qp); + + struct hisi_qm *qm; + bool is_resetting; + bool is_in_kernel; + u16 pasid; + struct uacce_queue *uacce_q; +}; + +static inline int q_num_set(const char *val, const struct kernel_param *kp, + unsigned int device) +{ + struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, + device, NULL); + u32 n, q_num; + u8 rev_id; + int ret; + + if (!val) + return -EINVAL; + + if (!pdev) { + q_num = min_t(u32, QNUM_V1, QNUM_V2); + pr_info("No device found currently, suppose queue number is %d\n", + q_num); + } else { + rev_id = pdev->revision; + switch (rev_id) { + case QM_HW_V1: + q_num = QNUM_V1; + break; + case QM_HW_V2: + q_num = QNUM_V2; + break; + default: + return -EINVAL; + } + } + + ret = kstrtou32(val, 10, &n); + if (ret || !n || n > q_num) + return -EINVAL; + + return param_set_int(val, kp); +} + +static inline int vf_num_set(const char *val, const struct kernel_param *kp) +{ + u32 n; + int ret; + + if (!val) + return -EINVAL; + + ret = kstrtou32(val, 10, &n); + if (ret < 0) + return ret; + + if (n > QM_MAX_VFS_NUM_V2) + return -ERANGE; + + return param_set_int(val, kp); +} + +static inline int mode_set(const char *val, const struct kernel_param *kp) +{ + u32 n; + int ret; + + if (!val) + return -EINVAL; + + ret = kstrtou32(val, 10, &n); + if (ret != 0 || (n != UACCE_MODE_NOIOMMU && + n != UACCE_MODE_NOUACCE)) + return -EINVAL; + + return param_set_int(val, kp); +} + +static inline void hisi_qm_add_to_list(struct hisi_qm *qm, + struct hisi_qm_list *qm_list) +{ + mutex_lock(&qm_list->lock); + list_add_tail(&qm->list, &qm_list->list); + mutex_unlock(&qm_list->lock); +} + +static inline void hisi_qm_del_from_list(struct hisi_qm *qm, + struct hisi_qm_list *qm_list) +{ + mutex_lock(&qm_list->lock); + list_del(&qm->list); + mutex_unlock(&qm_list->lock); +} + +static inline int hisi_qm_pre_init(struct hisi_qm *qm, + u32 pf_q_num, u32 def_q_num) +{ + struct pci_dev *pdev = qm->pdev; + + switch (pdev->revision) { + case QM_HW_V1: + case QM_HW_V2: + qm->ver = pdev->revision; + break; + default: + pci_err(pdev, "hardware version err!\n"); + return -ENODEV; + } + + pci_set_drvdata(pdev, qm); + + switch (qm->uacce_mode) { + case UACCE_MODE_NOUACCE: + qm->use_uacce = false; + break; + case UACCE_MODE_NOIOMMU: + qm->use_uacce = true; + break; + default: + pci_err(pdev, "uacce mode error!\n"); + return -EINVAL; + } + + if (qm->fun_type == QM_HW_PF) { + qm->qp_base = def_q_num; + qm->qp_num = pf_q_num; + qm->debug.curr_qm_qp_num = pf_q_num; + } + + return 0; +} + +void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num); +int hisi_qm_alloc_qps_node(int node, struct hisi_qm_list *qm_list, + struct hisi_qp **qps, int qp_num, u8 alg_type); +int hisi_qm_init(struct hisi_qm *qm); +void hisi_qm_uninit(struct hisi_qm *qm); +void hisi_qm_dev_shutdown(struct pci_dev *pdev); +void hisi_qm_remove_wait_delay(struct hisi_qm *qm, + struct hisi_qm_list *qm_list); +int hisi_qm_start(struct hisi_qm *qm); +int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r); +struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type); +int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg); +int hisi_qm_stop_qp(struct hisi_qp *qp); +void hisi_qm_release_qp(struct hisi_qp *qp); +int hisi_qp_send(struct hisi_qp *qp, const void *msg); +int hisi_qm_get_free_qp_num(struct hisi_qm *qm); +int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number); +void hisi_qm_debug_regs_clear(struct hisi_qm *qm); +int hisi_qm_debug_init(struct hisi_qm *qm); +int hisi_qm_restart(struct hisi_qm *qm); +int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs); +int hisi_qm_sriov_disable(struct pci_dev *pdev, struct hisi_qm_list *qm_list); +void hisi_qm_dev_err_init(struct hisi_qm *qm); +void hisi_qm_dev_err_uninit(struct hisi_qm *qm); +pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev, + pci_channel_state_t state); +pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev); +void hisi_qm_reset_prepare(struct pci_dev *pdev); +void hisi_qm_reset_done(struct pci_dev *pdev); +pci_ers_result_t hisi_qm_process_dev_error(struct pci_dev *pdev); +int hisi_qm_controller_reset(struct hisi_qm *qm); + +struct hisi_acc_sgl_pool; +struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, + struct scatterlist *sgl, struct hisi_acc_sgl_pool *pool, + u32 index, dma_addr_t *hw_sgl_dma); +void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl, + struct hisi_acc_hw_sgl *hw_sgl); +struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev, + u32 count, u32 sge_nr); +void hisi_acc_free_sgl_pool(struct device *dev, + struct hisi_acc_sgl_pool *pool); +#endif diff --git a/drivers/crypto/hisilicon/qm_usr_if.h b/drivers/crypto/hisilicon/qm_usr_if.h new file mode 100644 index 0000000000000000000000000000000000000000..e12ef639954732b1efaf5fb44809ed7f9e9120f9 --- /dev/null +++ b/drivers/crypto/hisilicon/qm_usr_if.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2018-2019 HiSilicon Limited. */ +#ifndef HISI_QM_USR_IF_H +#define HISI_QM_USR_IF_H + +#define QM_CQE_SIZE 16 + +/* default queue depth for sq/cq/eq */ +#define QM_Q_DEPTH 1024 +#define QM_EQ_DEPTH (1024 * 2) + +/* page number for queue file region */ +#define QM_DOORBELL_PAGE_NR 1 + + +#define QM_DOORBELL_OFFSET 0x340 +#define QM_V2_DOORBELL_OFFSET 0x1000 + +struct cqe { + __le32 rsvd0; + __le16 cmd_id; + __le16 rsvd1; + __le16 sq_head; + __le16 sq_num; + __le16 rsvd2; + __le16 w7; +}; + +struct hisi_qp_ctx { + __u16 id; + __u16 qc_type; +}; + +#define HISI_QM_API_VER_BASE "hisi_qm_v1" +#define HISI_QM_API_VER2_BASE "hisi_qm_v2" + +#define UACCE_CMD_QM_SET_QP_CTX _IOWR('H', 10, struct hisi_qp_ctx) + +#endif diff --git a/drivers/crypto/hisilicon/rde/Makefile b/drivers/crypto/hisilicon/rde/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..f5d6c9b456e0fd4a691a8d3a38cec698293de537 --- /dev/null +++ b/drivers/crypto/hisilicon/rde/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_CRYPTO_DEV_HISI_RDE) += hisi_rde.o +hisi_rde-objs = rde_main.o rde_api.o rde_data.o diff --git a/drivers/crypto/hisilicon/rde/rde.h b/drivers/crypto/hisilicon/rde/rde.h new file mode 100644 index 0000000000000000000000000000000000000000..b481a0d8fc6b5edf0a2e256c751bae82384975d3 --- /dev/null +++ b/drivers/crypto/hisilicon/rde/rde.h @@ -0,0 +1,320 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2018-2019 HiSilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#ifndef __RDE_H__ +#define __RDE_H__ + +#include +#include "../qm.h" +#include "rde_usr_if.h" +#include "rde_api.h" + +#undef pr_fmt +#define pr_fmt(fmt) "hisi_rde: " fmt + +struct hisi_rde_ctrl; + +struct hisi_rde { + struct hisi_qm qm; + struct hisi_rde_ctrl *ctrl; + struct work_struct reset_work; + u32 smmu_state; +}; + +#define RDE_CM_LOAD_ENABLE 1 +#define RDE_MPCC_MAX_SRC_NUM 17 +#define RDE_FLEXEC_MAX_SRC_NUM 32 +#define RDE_MPCC_CMSIZE 2176 +#define RDE_FLEXEC_CMSIZE 1024 +#define RDE_MEM_SAVE_SHIFT 2 +#define RDE_BUF_TYPE_SHIFT 3 +#define SGL_DATA_OFFSET_SHIFT 8 +#define DIF_GEN_PAD_CTRL_SHIFT 32 +#define DIF_GEN_REF_CTRL_SHIFT 35 +#define DIF_GEN_APP_CTRL_SHIFT 38 +#define DIF_GEN_VER_CTRL_SHIFT 41 +#define DIF_GEN_GRD_CTRL_SHIFT 44 +#define DIF_APP_TAG_SHIFT 48 +#define DIF_VERSION_SHIFT 56 +#define RDE_TASK_DONE_STATUS 0x80 +#define RDE_CRC16_IV 0x310004 +#define RDE_PRP_PAGE_SIZE 0x31022c +#define RDE_SGL_SGE_OFFSET 0x310228 +#define RDE_INVLD_REQ_ID (-1) +#define RDE_ALG_TYPE_MSK 0x60 +#define RDE_BUF_TYPE_MSK 0x18 +#define RDE_MAX_SRC_PLATE_NUM 32 +#define RDE_MAX_DST_PLATE_NUM 17 +#define SRC_ADDR_TABLE_NUM 48 +#define DST_ADDR_TABLE_NUM 26 +#define SRC_DIF_TABLE_NUM 20 +#define DST_DIF_TABLE_NUM 17 +#define RDE_STATUS_MSK 0x7f +#define RDE_DONE_MSK 0x1 +#define RDE_DONE_SHIFT 7 +#define RDE_PER_SRC_COEF_SIZE 32 +#define RDE_PER_SRC_COEF_TIMES 4 +#define RDE_TASK_TMOUT_MS 3000 + + +#define RDE_GN_WITH_MODE(column, mode, parity) \ + ((u8)column + ((ACC_OPT_UPD ^ mode) ? 0 : (0x80 & (parity << 7)))) +#define RDE_GN_CNT(i) (((i + 1) % 2 == 0) ? ((i + 1) >> 1) : ((i + 2) >> 1)) +#define RDE_GN_FLAG(i) (((i + 1) % 2 == 0) ? 2 : 1) +#define RDE_GN_SHIFT(i) (32 * (i == 1 ? 1 : 0)) +#define RDE_CHK_CTRL_CNT(i) ((i / 8) * 5) +#define RDE_LBA_CNT(i) ((i / 8 + 1) + \ + ((i % 2 == 0) ? (i >> 1) : ((i - 1) >> 1))) +#define RDE_CHK_CTRL_VALUE(grd, ref, i) \ + ((u64)(grd << 4 | ref) << (8 * (i % 8))) +#define RDE_LBA_SHIFT(i) (32 * ((i % 2) ^ 1)) + +struct hisi_rde_hw_error { + u8 status; + u32 int_msk; + const char *msg; +}; + +/* src data addr table, should be 64byte aligned */ +struct rde_src_tbl { + u64 content[SRC_ADDR_TABLE_NUM]; +}; + +/* src data dif table, should be 64byte aligned */ +struct rde_src_tag_tbl { + u64 content[SRC_DIF_TABLE_NUM]; +}; + +/* dst data addr table, should be 64byte aligned */ +struct rde_dst_tbl { + u64 content[DST_ADDR_TABLE_NUM]; +}; + +/* dst data dif table, should be 64byte aligned */ +struct rde_dst_tag_tbl { + u64 content[DST_DIF_TABLE_NUM]; +}; + +/* inner msg structure, keep addr info */ +struct hisi_rde_msg { + struct rde_src_tbl *src_addr; + dma_addr_t src_dma_addr; + struct rde_dst_tbl *dst_addr; + dma_addr_t dst_dma_addr; + struct rde_src_tag_tbl *src_tag_addr; + dma_addr_t src_tag_dma_addr; + struct rde_dst_tag_tbl *dst_tag_addr; + dma_addr_t dst_tag_dma_addr; + u64 src_record[RDE_MAX_SRC_PLATE_NUM]; + u64 dst_record[RDE_MAX_DST_PLATE_NUM]; + struct hisi_rde_sqe sqe; + struct raid_ec_ctrl *udata; + struct completion completion; + u32 req_id; + int result; +}; + +/* rde ctx structure, acc_init api can alloc and init this structure */ +struct hisi_rde_ctx { + struct device *dev; + struct hisi_qp *qp; + struct hisi_rde_msg *req_list; + unsigned long *req_bitmap; + spinlock_t req_lock; + u32 smmu_state; + u32 session_num; + u8 addr_type; +}; + +/* inner structure, to distinguish diffenernt alg and operation */ +struct rde_type { + u8 alg_mode; + u8 mem_mode; + u8 buf_mode; + u8 alg_type; +}; + +/* RDE hardware error status */ +enum { + RDE_STATUS_NULL = 0, + RDE_BD_ADDR_NO_ALIGN = 0x2, + RDE_BD_RD_BUS_ERR = 0x3, + RDE_IO_ABORT = 0x4, + RDE_BD_ERR = 0x5, + RDE_ECC_ERR = 0x6, + RDE_SGL_ADDR_ERR = 0x7, + RDE_SGL_PARA_ERR = 0x8, + RDE_DATA_RD_BUS_ERR = 0x1c, + RDE_DATA_WR_BUS_ERR = 0x1d, + RDE_CRC_CHK_ERR = 0x1e, + RDE_REF_CHK_ERR = 0x1f, + RDE_DISK0_VERIFY = 0x20, + RDE_DISK1_VERIFY = 0x21, + RDE_DISK2_VERIFY = 0x22, + RDE_DISK3_VERIFY = 0x23, + RDE_DISK4_VERIFY = 0x24, + RDE_DISK5_VERIFY = 0x25, + RDE_DISK6_VERIFY = 0x26, + RDE_DISK7_VERIFY = 0x27, + RDE_DISK8_VERIFY = 0x28, + RDE_DISK9_VERIFY = 0x29, + RDE_DISK10_VERIFY = 0x2a, + RDE_DISK11_VERIFY = 0x2b, + RDE_DISK12_VERIFY = 0x2c, + RDE_DISK13_VERIFY = 0x2d, + RDE_DISK14_VERIFY = 0x2e, + RDE_DISK15_VERIFY = 0x2f, + RDE_DISK16_VERIFY = 0x30, + RDE_CHAN_TMOUT = 0x31, +}; + +/* RDE algorithm types */ +enum { + MPCC = 0x00, /* EC */ + PQ_FLEXEC = 0x40, /* RAID5/RAID6/FlexEC */ + XOR = 0x60, /* XOR */ +}; + +/* RDE buffer access types */ +enum { + PBUF = 0x00, /* Direct Access */ + SGL = 0x08, /* Scatter Gather List */ + PRP = 0x10, /* Physical Region Page List */ + REVD = 0x18, /* Reserved */ +}; + +/* RDE DIF GRD types */ +enum { + NO_GRD = 0, /* no GRD domain */ + GRD = 1, /* GRD domain without checking */ + GRD_CHECK = 2, /* GRD domain with checking */ +}; + +/* RDE DIF REF types */ +enum { + NO_REF = 0, /* no REF domain */ + REF = 1, /* REF domain without checking */ + REF_CHECK_LBA = 2, /* REF domain checking with lab */ + REF_CHECK_PRI = 3, /* REF domain checking with individual information*/ +}; + +/* RDE IO abort switch */ +enum { + NO_ABORT = 0, /* don't abort the io */ + ABORT = 1, /* abort the io */ +}; + +/* RDE coefficient matrix load enable */ +enum { + NO_CM_LOAD = 0, /* don't load matrix */ + CM_LOAD = 1, /* load matrix */ +}; + +/* RDE coefficient matrix types */ +enum { + CM_ENCODE = 0, /* encode type */ + CM_DECODE = 1, /* decode type */ +}; + +/* RDE algorithms block size */ +enum { + ALG_BLK_512B = 0, /* 512 bytes */ + ALG_BLK_4K = 1, /* 4K bytes */ +}; + +/* RDE crc iv enable */ +enum { + NO_CRCIV = 0, /* default IV is 0 */ + CRCIV = 1, /* IV is register's value */ +}; + +/* RDE crc iv switch */ +enum { + CRCIV0 = 0, /* select crc16_iv0 of register */ + CRCIV1 = 1, /* select crc16_iv1 of register */ +}; + +/* RDE DIF types */ +enum { + NO_RDE_DIF = 0, /* without DIF */ + RDE_DIF = 1, /* DIF */ +}; + +/* RDE page padding types */ +enum { + NO_PAD = 0, /* without padding */ + PRE_PAD = 1, /* padding before DIF */ + POST_PAD = 2, /* padding after DIF */ +}; + +enum { + QNUM_64 = 64, + QNUM_128 = 128, + QNUM_256 = 256, + QNUM_512 = 512, + QNUM_1024 = 1024, + QNUM_BUTT +}; + +enum { + QDEPTH_64 = 64, + QDEPTH_128 = 128, + QDEPTH_256 = 256, + QDEPTH_512 = 512, + QDEPTH_1024 = 1024, + QDEPTH_BUTT +}; + +static inline void rde_bd_dump(struct hisi_rde_sqe *bd) +{ + int i; + + pr_info_ratelimited("====== BD info start======\n"); + for (i = 0; i < sizeof(struct hisi_rde_sqe) / sizeof(u64); i++) + pr_info_ratelimited("sqe-word[%d]: 0x%llx.\n", + i, *((u64 *)bd + i)); + + pr_info_ratelimited("====== BD info end======\n"); +} + +static inline void rde_table_dump(const struct hisi_rde_msg *req) +{ + int i; + + for (i = 0; i < SRC_ADDR_TABLE_NUM; i++) { + if (req->src_addr->content[i]) + pr_info_ratelimited("Table0 info[%d] is 0x%llx.\n", + i, req->src_addr->content[i]); + } + + for (i = 0; i < SRC_DIF_TABLE_NUM; i++) { + if (req->src_tag_addr->content[i]) + pr_info_ratelimited("Table1 info[%d] is 0x%llx.\n", + i, req->src_tag_addr->content[i]); + } + + for (i = 0; i < DST_ADDR_TABLE_NUM; i++) { + if (req->dst_addr->content[i]) + pr_info_ratelimited("Table2 info[%d] is 0x%llx.\n", + i, req->dst_addr->content[i]); + } + + for (i = 0; i < DST_DIF_TABLE_NUM; i++) { + if (req->dst_tag_addr->content[i]) + pr_info_ratelimited("Table3 info[%d] is 0x%llx.\n", + i, req->dst_tag_addr->content[i]); + } +} + +struct hisi_qp *rde_create_qp(void); +int hisi_rde_abnormal_fix(struct hisi_qm *qm); + +#endif diff --git a/drivers/crypto/hisilicon/rde/rde_api.c b/drivers/crypto/hisilicon/rde/rde_api.c new file mode 100644 index 0000000000000000000000000000000000000000..f1330f1f6bad0318d411d95a895be8e97ba435c2 --- /dev/null +++ b/drivers/crypto/hisilicon/rde/rde_api.c @@ -0,0 +1,1127 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2019 HiSilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "rde_data.h" +#include "rde.h" + +static const struct hisi_rde_hw_error rde_task_error[] = { + {.status = RDE_BD_ADDR_NO_ALIGN, .msg = "Rde bd addr no align err"}, + {.status = RDE_BD_RD_BUS_ERR, .msg = "Rde bd read bus err"}, + {.status = RDE_IO_ABORT, .msg = "Rde io abort err"}, + {.status = RDE_BD_ERR, .msg = "Rde bd config err"}, + {.status = RDE_ECC_ERR, .msg = "Rde ecc err"}, + {.status = RDE_SGL_ADDR_ERR, .msg = "Rde sgl/prp read bus err"}, + {.status = RDE_SGL_PARA_ERR, .msg = "Rde sgl/prp config err"}, + {.status = RDE_DATA_RD_BUS_ERR, .msg = "Rde data read bus err"}, + {.status = RDE_DATA_WR_BUS_ERR, .msg = "Rde data write bus err"}, + {.status = RDE_CRC_CHK_ERR, .msg = "Rde data or parity disk grd err"}, + {.status = RDE_REF_CHK_ERR, .msg = "Rde data or parity disk ref err"}, + {.status = RDE_DISK0_VERIFY, .msg = "Rde parity disk0 err"}, + {.status = RDE_DISK1_VERIFY, .msg = "Rde parity disk1 err"}, + {.status = RDE_DISK2_VERIFY, .msg = "Rde parity disk2 err"}, + {.status = RDE_DISK3_VERIFY, .msg = "Rde parity disk3 err"}, + {.status = RDE_DISK4_VERIFY, .msg = "Rde parity disk4 err"}, + {.status = RDE_DISK5_VERIFY, .msg = "Rde parity disk5 err"}, + {.status = RDE_DISK6_VERIFY, .msg = "Rde parity disk6 err"}, + {.status = RDE_DISK7_VERIFY, .msg = "Rde parity disk7 err"}, + {.status = RDE_DISK8_VERIFY, .msg = "Rde parity disk8 err"}, + {.status = RDE_DISK9_VERIFY, .msg = "Rde parity disk9 err"}, + {.status = RDE_DISK10_VERIFY, .msg = "Rde parity disk10 err"}, + {.status = RDE_DISK11_VERIFY, .msg = "Rde parity disk11 err"}, + {.status = RDE_DISK12_VERIFY, .msg = "Rde parity disk12 err"}, + {.status = RDE_DISK13_VERIFY, .msg = "Rde parity disk13 err"}, + {.status = RDE_DISK14_VERIFY, .msg = "Rde parity disk14 err"}, + {.status = RDE_DISK15_VERIFY, .msg = "Rde parity disk15 err"}, + {.status = RDE_DISK16_VERIFY, .msg = "Rde parity disk16 err"}, + {.status = RDE_CHAN_TMOUT, .msg = "Rde channel timeout err"}, + { /* sentinel */ } +}; + +static u32 rde_matrix_len(u8 alg_type, u8 cm_len) +{ + u32 len = 0; + + switch (alg_type) { + case PQ_FLEXEC: + len = RDE_PER_SRC_COEF_SIZE * cm_len; + break; + case MPCC: + len = (RDE_PER_SRC_COEF_SIZE * + RDE_PER_SRC_COEF_TIMES * cm_len); + break; + default: + pr_err("[%s] Err alg type.\n", __func__); + break; + } + + return len; +} + +static int rde_sgl_src_scatterlist_release(struct pci_dev *pdev, + struct hisi_rde_ctx *rde_ctx, + struct hisi_rde_msg *req, u32 num) +{ + u32 i; + int ret; + + for (i = 0; i < num; i++) { + if (req->src_record[i]) { + ret = acc_sgl_phys_to_virt(pdev, + (void *)req->src_record[i], + rde_ctx->smmu_state); + if (ret) { + dev_err(&pdev->dev, + "[%s] Src[%d] fail.\n", __func__, i); + return ret; + } + } + } + + return 0; +} + +static int rde_sgl_dst_scatterlist_release(struct pci_dev *pdev, + struct hisi_rde_ctx *rde_ctx, + struct hisi_rde_msg *req, + u32 num) +{ + u32 i; + int ret; + + for (i = 0; i < num; i++) { + if (req->dst_record[i]) { + ret = acc_sgl_phys_to_virt(pdev, + (void *)req->dst_record[i], + rde_ctx->smmu_state); + if (ret) { + dev_err(&pdev->dev, + "[%s] Dst[%d] fail.\n", __func__, i); + return ret; + } + } + } + + return 0; +} + +static void rde_pbuf_src_addr_unmap(struct pci_dev *pdev, + struct hisi_rde_ctx *rde_ctx, + struct hisi_rde_msg *req, u32 num) +{ + u32 i; + u32 gn_cnt; + struct raid_ec_ctrl *udata = req->udata; + + if (!rde_ctx->smmu_state) + return; + + for (i = 0; i < num; i++) { + gn_cnt = RDE_GN_CNT(i) + i; + if (req->src_addr->content[gn_cnt]) { + acc_phys_to_virt(pdev, + req->src_addr->content[gn_cnt], + (size_t)udata->data_len, + rde_ctx->smmu_state); + } + } +} + +static void rde_pbuf_dst_addr_unmap(struct pci_dev *pdev, + struct hisi_rde_ctx *rde_ctx, + struct hisi_rde_msg *req, u32 num) +{ + u32 i; + u32 gn_cnt; + struct raid_ec_ctrl *udata = req->udata; + + if (!rde_ctx->smmu_state) + return; + + for (i = 0; i < num; i++) { + gn_cnt = RDE_GN_CNT(i) + i; + if (req->dst_addr->content[gn_cnt]) { + acc_phys_to_virt(pdev, + req->dst_addr->content[gn_cnt], + (size_t)udata->data_len, + rde_ctx->smmu_state); + } + } +} + +static void rde_cm_addr_unmap(struct pci_dev *pdev, struct hisi_rde_sqe *bd, + u8 alg_type, struct hisi_rde_ctx *rde_ctx) +{ + u32 matrix_len; + + if (!rde_ctx->smmu_state) + return; + + matrix_len = rde_matrix_len(alg_type, (u8)bd->cm_len); + if (bd->coef_matrix_addr && matrix_len) + acc_phys_to_virt(pdev, (dma_addr_t)bd->coef_matrix_addr, + (size_t)matrix_len, rde_ctx->smmu_state); +} + +static void rde_bd_addr_release(struct pci_dev *pdev, + struct hisi_rde_ctx *rde_ctx, + struct hisi_rde_msg *req, u8 buf_mode) +{ + int ret = 0; + struct raid_ec_ctrl *udata = req->udata; + u32 src_num = udata->src_num; + u32 dst_num = udata->dst_num; + + if (buf_mode == PBUF) { + rde_pbuf_src_addr_unmap(pdev, rde_ctx, req, src_num); + rde_pbuf_dst_addr_unmap(pdev, rde_ctx, req, dst_num); + } else if (buf_mode == SGL) { + ret = rde_sgl_src_scatterlist_release(pdev, + rde_ctx, req, src_num); + if (ret) + dev_err(&pdev->dev, + "[%s] Src release fail.\n", __func__); + + ret = rde_sgl_dst_scatterlist_release(pdev, + rde_ctx, req, dst_num); + if (ret) + dev_err(&pdev->dev, + "[%s] Dst release fail.\n", __func__); + } +} + +static int rde_cm_len_check(struct device *dev, struct raid_ec_ctrl *req, + u8 alg_type) +{ + if (unlikely(req->src_num > RDE_MAX_SRC_PLATE_NUM || + req->dst_num > RDE_MAX_DST_PLATE_NUM)) { + dev_err(dev, "Error!Invalid disk num.\n"); + return -EINVAL; + } + + if (alg_type == MPCC) { + if (unlikely(req->cm_len > RDE_MPCC_MAX_SRC_NUM)) { + dev_err(dev, + "Error!mpcc cmlen should smaller than 17.\n"); + return -EINVAL; + } + } else if (alg_type == PQ_FLEXEC) { + if (unlikely(req->cm_len > RDE_FLEXEC_MAX_SRC_NUM)) { + dev_err(dev, + "Error!flexec cmlen should smaller than 32.\n"); + return -EINVAL; + } + } else { + dev_err(dev, "No support alg_type.\n"); + return -EINVAL; + } + + return 0; +} + +static int rde_io_para_check(struct acc_ctx *ctx, struct raid_ec_ctrl *req, + u8 op_type, u8 alg_type) +{ + struct hisi_rde_ctx *rde_ctx; + + if (unlikely(!ctx || !req || op_type >= ACC_OPT_RAID_BUTT)) { + pr_err("[%s] Invalid input para.\n", __func__); + return -EINVAL; + } + + rde_ctx = (struct hisi_rde_ctx *)ctx->inner; + if (unlikely(!rde_ctx)) { + pr_err("[%s] Rde_ctx without init.\n", __func__); + return -EINVAL; + } + + if ((rde_ctx->addr_type == VA_FROM_HIGHMEM_ZONE && + req->buf_type != ACC_BUF_TYPE_PBUFFER) || + (rde_ctx->addr_type == VA_FROM_HIGHMEM_ZONE && + rde_ctx->smmu_state)) { + dev_err(rde_ctx->dev, "Error!invalid addr & buf type.\n"); + return -EINVAL; + } + + if (unlikely(!req->input_block || !req->data_len)) { + dev_err(rde_ctx->dev, "Error!invalid input block.\n"); + return -EINVAL; + } + + if (unlikely(!req->src_data || !req->dst_data || !req->coe_matrix)) { + dev_err(rde_ctx->dev, "Error!input addr is NULL.\n"); + return -EINVAL; + } + + return rde_cm_len_check(rde_ctx->dev, req, alg_type); +} + +static void src_dif_package(struct hisi_rde_msg *req) +{ + u32 i; + u32 lba_info_cnt = 0; + u32 chk_info_cnt = 0; + struct raid_ec_ctrl *udata = req->udata; + u8 grd = udata->src_dif.ctrl.verify.grd_verify_type; + u8 ref = udata->src_dif.ctrl.verify.ref_verify_type; + u32 num = udata->src_num; + + for (i = 0; i < num; i++) { + chk_info_cnt = RDE_CHK_CTRL_CNT(i); + lba_info_cnt = RDE_LBA_CNT(i); + req->src_tag_addr->content[chk_info_cnt] |= + RDE_CHK_CTRL_VALUE(grd, ref, i); + req->src_tag_addr->content[lba_info_cnt] |= + ((u64)udata->src_dif.priv << RDE_LBA_SHIFT(i)); + } +} + +static void dst_dif_package(struct hisi_rde_msg *req) +{ + u32 i; + struct dif_ctrl ctrl; + struct raid_ec_ctrl *udata = req->udata; + u32 num = udata->dst_num; + + ctrl = udata->dst_dif.ctrl; + for (i = 0; i < num; i++) { + req->dst_tag_addr->content[i] |= + ((u64)(ctrl.gen.page_layout_gen_type) << + DIF_GEN_PAD_CTRL_SHIFT); + req->dst_tag_addr->content[i] |= + ((u64)(ctrl.gen.ref_gen_type) << + DIF_GEN_REF_CTRL_SHIFT); + req->dst_tag_addr->content[i] |= + ((u64)(ctrl.gen.app_gen_type) << + DIF_GEN_APP_CTRL_SHIFT); + req->dst_tag_addr->content[i] |= + ((u64)(ctrl.gen.ver_gen_type) << + DIF_GEN_VER_CTRL_SHIFT); + req->dst_tag_addr->content[i] |= + ((u64)(ctrl.gen.grd_gen_type) << + DIF_GEN_GRD_CTRL_SHIFT); + req->dst_tag_addr->content[i] |= (u64)udata->dst_dif.priv; + req->dst_tag_addr->content[i] |= + ((u64)(udata->dst_dif.app) << DIF_APP_TAG_SHIFT); + req->dst_tag_addr->content[i] |= + ((u64)(udata->dst_dif.ver) << DIF_VERSION_SHIFT); + } +} + +static int rde_disk_sgl_addr_tran(struct pci_dev *pdev, + struct hisi_rde_ctx *rde_ctx, + struct sgl_hw *sgl_addr, + u64 *content, u64 *record) +{ + int ret; + void *sg_head = NULL; + + switch (rde_ctx->addr_type) { + case VA_FROM_NORMAL_DMA_ZONE: + ret = acc_sgl_virt_to_phys(pdev, sgl_addr, &sg_head, + rde_ctx->smmu_state); + if (unlikely(ret)) + return ret; + break; + case PA_PASS_THROUGH: + *content = (u64)sgl_addr; + return 0; + default: + dev_err(rde_ctx->dev, "[%s] Invalid addr type.\n", __func__); + return -EINVAL; + } + + *content = sg_dma_address((struct scatterlist *)sg_head); + *record = (u64)sg_head; + + return 0; +} + +static int sgl_src_addr_package(struct pci_dev *pdev, + struct hisi_rde_ctx *rde_ctx, + struct hisi_rde_msg *req, u8 mode) +{ + int ret, r_ret; + u32 i; + u8 gn; + u32 sgl_data; + u32 gn_cnt, gn_flag, cur_cnt; + struct raid_ec_ctrl *ctrl = req->udata; + u32 num = ctrl->src_num; + struct rde_sgl *rde_sgl_src = (struct rde_sgl *)(ctrl->src_data); + + if (unlikely(!rde_sgl_src->ctrl)) { + dev_err(rde_ctx->dev, "[%s] Sgl_hw is NULL.\n", __func__); + return -EFAULT; + } + + memset(&req->src_record[0], 0, num * sizeof(u64)); + for (i = 0; i < num; i++) { + gn = RDE_GN_WITH_MODE(rde_sgl_src->column, mode, + rde_sgl_src->parity); + sgl_data = (rde_sgl_src->buf_offset << SGL_DATA_OFFSET_SHIFT) | + (u32)gn; + gn_cnt = RDE_GN_CNT(i) + i; + gn_flag = RDE_GN_FLAG(i); + cur_cnt = gn_cnt - gn_flag; + req->src_addr->content[cur_cnt] |= + ((u64)sgl_data << RDE_GN_SHIFT(gn_flag)); + ret = rde_disk_sgl_addr_tran(pdev, rde_ctx, + rde_sgl_src->ctrl, + &req->src_addr->content[gn_cnt], + &req->src_record[i]); + if (ret) { + r_ret = rde_sgl_src_scatterlist_release(pdev, rde_ctx, + req, i); + if (r_ret) + return r_ret; + return ret; + } + + rde_sgl_src++; + } + + return 0; +} + +static int sgl_dst_addr_package(struct pci_dev *pdev, + struct hisi_rde_ctx *rde_ctx, + struct hisi_rde_msg *req) +{ + int ret, r_ret; + u32 i; + u8 gn; + u32 sgl_data; + u32 gn_cnt, gn_flag, cur_cnt; + struct raid_ec_ctrl *ctrl = req->udata; + u32 num = ctrl->dst_num; + struct rde_sgl *rde_sgl_dst = (struct rde_sgl *)(ctrl->dst_data); + + if (unlikely(!rde_sgl_dst->ctrl)) { + dev_err(rde_ctx->dev, "[%s] Dst sgl_addr->ctrl is NULL.\n", + __func__); + return -EFAULT; + } + + memset(&req->dst_record[0], 0, num * sizeof(u64)); + for (i = 0; i < num; i++) { + gn = (u8)(rde_sgl_dst->column); + sgl_data = (rde_sgl_dst->buf_offset << SGL_DATA_OFFSET_SHIFT) | + (u32)gn; + gn_cnt = RDE_GN_CNT(i) + i; + gn_flag = RDE_GN_FLAG(i); + cur_cnt = gn_cnt - gn_flag; + req->dst_addr->content[cur_cnt] |= ((u64)sgl_data << + RDE_GN_SHIFT(gn_flag)); + ret = rde_disk_sgl_addr_tran(pdev, rde_ctx, + rde_sgl_dst->ctrl, + &req->dst_addr->content[gn_cnt], + &req->dst_record[i]); + if (ret) { + r_ret = rde_sgl_dst_scatterlist_release(pdev, rde_ctx, + req, i); + if (r_ret) + return r_ret; + return ret; + } + + rde_sgl_dst++; + } + + return 0; +} + +static int rde_disk_pbuf_addr_tran(struct pci_dev *pdev, + struct hisi_rde_ctx *rde_ctx, + u64 *content, char *addr, u32 data_len) +{ + dma_addr_t pa = 0; + + switch (rde_ctx->addr_type) { + case VA_FROM_NORMAL_DMA_ZONE: + pa = acc_virt_to_phys(pdev, addr, (size_t)data_len, + rde_ctx->smmu_state); + break; + case VA_FROM_HIGHMEM_ZONE: + pa = acc_pfn_to_phys(addr); + break; + case PA_PASS_THROUGH: + *content = (u64)addr; + return 0; + default: + dev_err(rde_ctx->dev, "[%s] Invalid addr type.\n", __func__); + return -EINVAL; + } + + if (unlikely(!pa)) { + dev_err(rde_ctx->dev, "[%s] Addr map fail.\n", __func__); + return -EFAULT; + } + + *content = pa; + + return 0; +} + +static int pbuf_src_addr_package(struct pci_dev *pdev, + struct hisi_rde_ctx *rde_ctx, + struct hisi_rde_msg *req, u8 mode) +{ + u32 i; + int ret; + u8 gn; + u32 gn_cnt, gn_flag, cur_cnt; + struct raid_ec_ctrl *ctrl = req->udata; + u32 data_len_nbytes = ctrl->data_len; + u32 num = ctrl->src_num; + struct rde_pbuf *rde_pbuf_src = (struct rde_pbuf *)(ctrl->src_data); + + for (i = 0; i < num; i++) { + gn = RDE_GN_WITH_MODE(rde_pbuf_src->column, mode, + rde_pbuf_src->parity); + gn_cnt = RDE_GN_CNT(i) + i; + gn_flag = RDE_GN_FLAG(i); + cur_cnt = gn_cnt - gn_flag; + req->src_addr->content[cur_cnt] |= ((u64)gn << + RDE_GN_SHIFT(gn_flag)); + ret = rde_disk_pbuf_addr_tran(pdev, rde_ctx, + &req->src_addr->content[gn_cnt], + rde_pbuf_src->pbuf, + data_len_nbytes); + if (ret) { + rde_pbuf_src_addr_unmap(pdev, rde_ctx, req, i); + return ret; + } + + rde_pbuf_src++; + } + + return 0; +} + +static int pbuf_dst_addr_package(struct pci_dev *pdev, + struct hisi_rde_ctx *rde_ctx, + struct hisi_rde_msg *req) +{ + u32 i; + int ret; + u8 gf_coef; + u32 gf_cnt, gf_flag, cur_cnt; + struct raid_ec_ctrl *ctrl = req->udata; + u32 num = ctrl->dst_num; + u32 data_len_nbytes = ctrl->data_len; + struct rde_pbuf *rde_pbuf_dst = (struct rde_pbuf *)(ctrl->dst_data); + + for (i = 0; i < num; i++) { + gf_coef = (u8)rde_pbuf_dst->column; + gf_cnt = RDE_GN_CNT(i) + i; + gf_flag = RDE_GN_FLAG(i); + cur_cnt = gf_cnt - gf_flag; + req->dst_addr->content[cur_cnt] |= ((u64)gf_coef << + RDE_GN_SHIFT(gf_flag)); + ret = rde_disk_pbuf_addr_tran(pdev, rde_ctx, + &req->dst_addr->content[gf_cnt], + rde_pbuf_dst->pbuf, data_len_nbytes); + if (ret) { + rde_pbuf_dst_addr_unmap(pdev, rde_ctx, req, i); + return ret; + } + + rde_pbuf_dst++; + } + + return 0; +} + +static int hisi_rde_fill_addr_tlb(struct pci_dev *pdev, + struct hisi_rde_ctx *rde_ctx, + struct hisi_rde_msg *req, + struct rde_type *type) +{ + int ret, r_ret; + u32 num = req->udata->src_num; + + if (type->buf_mode == PBUF) { + ret = pbuf_src_addr_package(pdev, rde_ctx, req, type->alg_mode); + if (ret) { + dev_err(&pdev->dev, "Pbuf src addr package fail.\n"); + return ret; + } + ret = pbuf_dst_addr_package(pdev, rde_ctx, req); + if (ret) { + dev_err(&pdev->dev, "Pbuf dst addr package fail.\n"); + rde_pbuf_src_addr_unmap(pdev, rde_ctx, req, num); + return ret; + } + } else if (type->buf_mode == SGL) { + ret = sgl_src_addr_package(pdev, rde_ctx, req, type->alg_mode); + if (ret) { + dev_err(&pdev->dev, "Sgl src addr package fail.\n"); + return ret; + } + ret = sgl_dst_addr_package(pdev, rde_ctx, req); + if (ret) { + dev_err(&pdev->dev, "Sgl dst addr package fail.\n"); + r_ret = rde_sgl_src_scatterlist_release(pdev, rde_ctx, + req, num); + if (r_ret) + return r_ret; + return ret; + } + } else { + dev_err(&pdev->dev, "[%s] Invalid buf type.\n", __func__); + return -EINVAL; + } + + return 0; +} + +static int rde_cm_addr_translation(struct pci_dev *pdev, + struct hisi_rde_ctx *rde_ctx, + struct raid_ec_ctrl *ctrl, + struct hisi_rde_sqe *bd, u8 alg_type) +{ + u32 matrix_len = 0; + dma_addr_t pa = 0; + + if (rde_ctx->addr_type != PA_PASS_THROUGH) { + matrix_len = rde_matrix_len(alg_type, ctrl->cm_len); + pa = acc_virt_to_phys(pdev, ctrl->coe_matrix, + (size_t)matrix_len, rde_ctx->smmu_state); + if (unlikely(!pa)) { + dev_err(rde_ctx->dev, + "[%s] Coe_matrix virt to phys fail.\n", + __func__); + return -EFAULT; + } + bd->coef_matrix_addr = pa; + } else + bd->coef_matrix_addr = (u64)ctrl->coe_matrix; + + return 0; +} + +int hisi_rde_fill_sqe(struct hisi_rde_ctx *rde_ctx, struct hisi_rde_msg *req, + struct rde_type *type) +{ + int ret; + struct raid_ec_ctrl *ctrl = req->udata; + struct hisi_rde_sqe *bd = &req->sqe; + struct pci_dev *pdev = rde_ctx->qp->qm->pdev; + u32 q_id = rde_ctx->qp->qp_id; + + memset(bd, 0, sizeof(struct hisi_rde_sqe)); + bd->op_tag = q_id * rde_ctx->session_num + req->req_id; + bd->alg_blk_size = ctrl->alg_blk_size; + bd->cm_type = (type->alg_mode == + ACC_OPT_RCT) ? CM_DECODE : CM_ENCODE; + bd->cm_le = ctrl->cm_load; + bd->abort = NO_ABORT; + bd->src_nblks = ctrl->src_num; + bd->dst_nblks = ctrl->dst_num; + if (type->alg_mode == ACC_OPT_VLD) { + bd->chk_dst_ref_ctrl = + ctrl->dst_dif.ctrl.verify.ref_verify_type; + bd->chk_dst_grd_ctrl = + ctrl->dst_dif.ctrl.verify.grd_verify_type; + } + bd->op_type = type->alg_mode | type->mem_mode | + type->buf_mode | type->alg_type; + bd->block_size = ctrl->block_size; + bd->page_pad_type = ctrl->dst_dif.ctrl.gen.page_layout_pad_type; + bd->dif_type = ((ctrl->dst_dif.ctrl.gen.grd_gen_type) ? + RDE_DIF : NO_RDE_DIF); + bd->crciv_sel = CRCIV1; + bd->crciv_en = CRCIV; + bd->cm_len = ctrl->cm_len; + bd->transfer_size = ctrl->input_block - 1; + + ret = rde_cm_addr_translation(pdev, rde_ctx, ctrl, bd, + type->alg_type); + if (ret) + return ret; + bd->src_addr = req->src_dma_addr; + bd->dst_addr = req->dst_dma_addr; + bd->src_tag_addr = req->src_tag_dma_addr; + bd->dst_tag_addr = req->dst_tag_dma_addr; + memset(req->src_addr, 0, sizeof(struct rde_src_tbl)); + memset(req->dst_addr, 0, sizeof(struct rde_dst_tbl)); + ret = hisi_rde_fill_addr_tlb(pdev, rde_ctx, req, type); + if (ret) { + if (rde_ctx->addr_type != PA_PASS_THROUGH) + rde_cm_addr_unmap(pdev, bd, type->alg_type, rde_ctx); + return ret; + } + + memset(req->src_tag_addr, 0, sizeof(struct rde_src_tag_tbl)); + memset(req->dst_tag_addr, 0, sizeof(struct rde_dst_tag_tbl)); + if (bd->dif_type) { + src_dif_package(req); + dst_dif_package(req); + } + + return 0; +} + +static int hisi_rde_alloc_req_id(struct hisi_rde_ctx *rde_ctx) +{ + int req_id; + unsigned long flags; + struct device *dev = rde_ctx->dev; + + spin_lock_irqsave(&rde_ctx->req_lock, flags); + req_id = find_first_zero_bit(rde_ctx->req_bitmap, rde_ctx->session_num); + if ((u32)req_id >= rde_ctx->session_num) { + spin_unlock_irqrestore(&rde_ctx->req_lock, flags); + dev_err(dev, "[%s] No free req id.\n", __func__); + return -EBUSY; + } + set_bit(req_id, rde_ctx->req_bitmap); + spin_unlock_irqrestore(&rde_ctx->req_lock, flags); + dev_dbg(dev, "Alloc_id is %d.\n", req_id); + + return req_id; +} + +static void hisi_rde_free_req_id(struct hisi_rde_ctx *rde_ctx, int req_id) +{ + unsigned long flags; + + dev_dbg(rde_ctx->dev, "Free_id is %d.\n", req_id); + spin_lock_irqsave(&rde_ctx->req_lock, flags); + clear_bit(req_id, rde_ctx->req_bitmap); + spin_unlock_irqrestore(&rde_ctx->req_lock, flags); +} + +static int rde_task_error_log(struct pci_dev *pdev, u8 err_sts) +{ + const struct hisi_rde_hw_error *err = rde_task_error; + + while (err->msg) { + if (err_sts == err->status) { + dev_err_ratelimited(&pdev->dev, + "[%s][Error status=0x%x] found.\n", + err->msg, err->status); + break; + } + + err++; + } + + /* err_sts is 0, fatal engine*/ + if (err_sts == RDE_STATUS_NULL) + return -EAGAIN; + else if (err_sts < RDE_CRC_CHK_ERR || err_sts > RDE_DISK16_VERIFY) + return ACC_INVALID_PARAM; + else if (err_sts >= RDE_CRC_CHK_ERR && err_sts <= RDE_REF_CHK_ERR) + return ACC_RDE_DIF_ERR; + else + return ACC_RDE_DISK_VERIFY_ERR; +} + +static void rde_cb(struct hisi_qp *qp, void *resp) +{ + struct hisi_rde_sqe *wb_sqe = (struct hisi_rde_sqe *)resp; + struct acc_ctx *ctx = qp->qp_ctx; + struct hisi_rde_ctx *rde_ctx = (struct hisi_rde_ctx *)ctx->inner; + u16 req_id = wb_sqe->op_tag % (rde_ctx->session_num); + struct pci_dev *pdev = qp->qm->pdev; + u8 alg_type = wb_sqe->op_type & RDE_ALG_TYPE_MSK; + u8 buf_mode = wb_sqe->op_type & RDE_BUF_TYPE_MSK; + struct hisi_rde_msg *req; + struct raid_ec_ctrl *ctrl; + u8 err_status; + + req = &rde_ctx->req_list[req_id]; + ctrl = req->udata; + err_status = wb_sqe->status & RDE_STATUS_MSK; + if (wb_sqe->status != RDE_TASK_DONE_STATUS) + req->result = rde_task_error_log(pdev, err_status); + + if (ctx->cb) { + if (rde_ctx->addr_type != PA_PASS_THROUGH) { + rde_cm_addr_unmap(pdev, wb_sqe, alg_type, rde_ctx); + rde_bd_addr_release(pdev, rde_ctx, req, buf_mode); + } + hisi_rde_free_req_id(rde_ctx, (int)req_id); + + ctx->cb((void *)ctx, (void *)ctrl, + req->result, ctrl->data_len); + } else + complete(&rde_ctx->req_list[req_id].completion); +} + +int hisi_rde_io_proc(struct acc_ctx *ctx, struct raid_ec_ctrl *ctrl, + u8 op_type, u8 alg_type, bool sync) +{ + int ret, id; + struct hisi_rde_ctx *rde_ctx; + struct hisi_qp *qp; + struct pci_dev *pdev; + struct hisi_rde_msg *req; + struct rde_type type; + unsigned long flags; + + ret = rde_io_para_check(ctx, ctrl, op_type, alg_type); + if (ret) + return ret; + rde_ctx = (struct hisi_rde_ctx *)ctx->inner; + qp = rde_ctx->qp; + qp->req_cb = rde_cb; + pdev = qp->qm->pdev; + type.alg_mode = op_type; + type.mem_mode = ctrl->mem_saving << RDE_MEM_SAVE_SHIFT; + type.buf_mode = (u8)(ctrl->buf_type) << RDE_BUF_TYPE_SHIFT; + type.alg_type = alg_type; + + id = hisi_rde_alloc_req_id(rde_ctx); + if (id < 0) + return id; + req = &rde_ctx->req_list[id]; + req->req_id = (u32)id; + req->udata = ctrl; + req->result = 0; + init_completion(&req->completion); + + ret = hisi_rde_fill_sqe(rde_ctx, req, &type); + if (ret) { + dev_err(rde_ctx->dev, "[%s] Package sqe failed.\n", __func__); + goto req_free; + } + + spin_lock_irqsave(&rde_ctx->req_lock, flags); + ret = hisi_qp_send(qp, &req->sqe); + if (ret < 0) { + spin_unlock_irqrestore(&rde_ctx->req_lock, flags); + goto addr_unmap; + } + spin_unlock_irqrestore(&rde_ctx->req_lock, flags); + + if (!sync) + return ret; + + if (wait_for_completion_timeout(&req->completion, + msecs_to_jiffies(RDE_TASK_TMOUT_MS)) + == 0) { + dev_err_ratelimited(rde_ctx->dev, "Sync mode task timeout.\n"); + ret = -ETIME; + goto addr_unmap; + } + + ret = req->result; + +addr_unmap: + if (rde_ctx->addr_type != PA_PASS_THROUGH) { + rde_cm_addr_unmap(pdev, &req->sqe, type.alg_type, rde_ctx); + rde_bd_addr_release(pdev, rde_ctx, req, type.buf_mode); + } +req_free: + hisi_rde_free_req_id(rde_ctx, id); + return ret; +} + +static int hisi_rde_start_qp(struct hisi_qp *qp, struct acc_ctx *ctx, + int req_type) +{ + struct hisi_rde_ctx *rde_ctx; + int ret; + + qp->req_type = req_type; + qp->qp_ctx = ctx; + + rde_ctx = (struct hisi_rde_ctx *)ctx->inner; + rde_ctx->qp = qp; + + ret = hisi_qm_start_qp(qp, 0); + if (ret < 0) + goto err_release_qp; + + return 0; + +err_release_qp: + hisi_qm_release_qp(qp); + return ret; +} + +static void hisi_rde_release_qp(struct hisi_rde_ctx *rde_ctx) +{ + hisi_qm_stop_qp(rde_ctx->qp); + hisi_qm_release_qp(rde_ctx->qp); +} + +static int hisi_rde_tbl_init(struct device *dev, struct hisi_rde_msg *req) +{ + req->src_addr = dma_alloc_coherent(dev, + (size_t)sizeof(struct rde_src_tbl), + &req->src_dma_addr, GFP_KERNEL); + if (!req->src_addr) { + dev_err(dev, "[%s] Alloc rde_src_tlb failed.\n", __func__); + return -ENOMEM; + } + + req->dst_addr = dma_alloc_coherent(dev, + (size_t)sizeof(struct rde_dst_tbl), + &req->dst_dma_addr, GFP_KERNEL); + if (!req->dst_addr) { + dev_err(dev, "[%s] Alloc rde_dst_tlb failed.\n", __func__); + return -ENOMEM; + } + + req->src_tag_addr = dma_alloc_coherent(dev, + (size_t)sizeof(struct rde_src_tag_tbl), + &req->src_tag_dma_addr, GFP_KERNEL); + if (!req->src_tag_addr) { + dev_err(dev, "[%s] Alloc rde_src_tag_tlb failed.\n", __func__); + return -ENOMEM; + } + + req->dst_tag_addr = dma_alloc_coherent(dev, + (size_t)sizeof(struct rde_dst_tag_tbl), + &req->dst_tag_dma_addr, GFP_KERNEL); + if (!req->dst_tag_addr) { + dev_err(dev, "[%s] Alloc rde_dst_tag_tlb failed.\n", __func__); + return -ENOMEM; + } + + return 0; +} + +static void hisi_rde_tbl_deinit(struct device *dev, struct hisi_rde_msg *req) +{ + if (!dev || !req) { + pr_err("[%s][%d] Invalid para.\n", __func__, __LINE__); + return; + } + + if (req->src_addr) { + dma_free_coherent(dev, (size_t)sizeof(struct rde_src_tbl), + req->src_addr, req->src_dma_addr); + req->src_addr = NULL; + } + + if (req->dst_addr) { + dma_free_coherent(dev, (size_t)sizeof(struct rde_dst_tbl), + req->dst_addr, req->dst_dma_addr); + req->dst_addr = NULL; + } + + if (req->src_tag_addr) { + dma_free_coherent(dev, (size_t)sizeof(struct rde_src_tag_tbl), + req->src_tag_addr, req->src_tag_dma_addr); + req->src_tag_addr = NULL; + } + + if (req->dst_tag_addr) { + dma_free_coherent(dev, (size_t)sizeof(struct rde_dst_tag_tbl), + req->dst_tag_addr, req->dst_tag_dma_addr); + req->dst_tag_addr = NULL; + } +} + +static void hisi_rde_session_init(struct hisi_rde_ctx *rde_ctx) +{ + u32 num = rde_ctx->qp->qm->qp_num; + + if (num <= QNUM_64) + rde_ctx->session_num = QDEPTH_1024; + else if (num > QNUM_64 && num <= QNUM_128) + rde_ctx->session_num = QDEPTH_512; + else if (num > QNUM_128 && num <= QNUM_256) + rde_ctx->session_num = QDEPTH_256; + else if (num > QNUM_256 && num <= QNUM_512) + rde_ctx->session_num = QDEPTH_128; + else + rde_ctx->session_num = QDEPTH_64; +} + +static int hisi_rde_ctx_init(struct hisi_rde_ctx *rde_ctx, int qlen) +{ + struct device *dev = rde_ctx->dev; + int i, j; + int ret; + + spin_lock_init(&rde_ctx->req_lock); + rde_ctx->req_bitmap = kcalloc(BITS_TO_LONGS(qlen), sizeof(long), + GFP_KERNEL); + if (!rde_ctx->req_bitmap) + return -ENOMEM; + + rde_ctx->req_list = kcalloc(qlen, sizeof(struct hisi_rde_msg), + GFP_KERNEL); + if (!rde_ctx->req_list) { + kfree(rde_ctx->req_bitmap); + rde_ctx->req_bitmap = NULL; + return -ENOMEM; + } + + for (i = 0; i < qlen; i++) { + ret = hisi_rde_tbl_init(dev, &rde_ctx->req_list[i]); + if (ret) + goto err_proc; + } + + return 0; + +err_proc: + for (j = 0; j <= i; j++) + hisi_rde_tbl_deinit(dev, &rde_ctx->req_list[j]); + kfree(rde_ctx->req_list); + rde_ctx->req_list = NULL; + kfree(rde_ctx->req_bitmap); + rde_ctx->req_bitmap = NULL; + return ret; +} + +int acc_init(struct acc_ctx *ctx) +{ + struct hisi_rde_ctx *rde_ctx; + struct hisi_rde *hisi_rde; + struct hisi_qp *qp; + struct hisi_qm *qm; + int ret; + + if (unlikely(!ctx)) { + pr_err("[%s] Acc_ctx is NULL.\n", __func__); + return -EINVAL; + } + + qp = rde_create_qp(); + if (unlikely(!qp)) { + pr_err("[%s]Can not create RDE qp.\n", __func__); + return -ENODEV; + } + /* alloc inner private struct */ + rde_ctx = kzalloc(sizeof(*rde_ctx), GFP_KERNEL); + if (unlikely(!rde_ctx)) { + pr_err("[%s] Alloc rde_ctx failed.\n", __func__); + return -ENOMEM; + } + ctx->inner = (void *)rde_ctx; + + qm = qp->qm; + if (unlikely(!qm->pdev)) { + pr_err("[%s] Pdev is NULL.\n", __func__); + return -ENODEV; + } + rde_ctx->dev = &qm->pdev->dev; + + ret = hisi_rde_start_qp(qp, ctx, 0); + if (ret) { + dev_err(rde_ctx->dev, "[%s] start qp failed.\n", __func__); + goto qp_err; + } + + hisi_rde = container_of(qm, struct hisi_rde, qm); + rde_ctx->smmu_state = hisi_rde->smmu_state; + rde_ctx->addr_type = ctx->addr_type; + hisi_rde_session_init(rde_ctx); + ret = hisi_rde_ctx_init(rde_ctx, (int)rde_ctx->session_num); + if (ret) { + dev_err(rde_ctx->dev, "[%s] Init rde ctx failed.\n", __func__); + goto ctx_err; + } + + return 0; + +ctx_err: + hisi_rde_release_qp(rde_ctx); +qp_err: + kfree(rde_ctx); + ctx->inner = NULL; + return ret; +} +EXPORT_SYMBOL(acc_init); + +int acc_clear(struct acc_ctx *ctx) +{ + struct hisi_rde_ctx *rde_ctx; + u32 i; + + if (unlikely(!ctx)) { + pr_err("[%s] Acc_ctx is NULL.\n", __func__); + return -EINVAL; + } + + rde_ctx = (struct hisi_rde_ctx *)ctx->inner; + if (unlikely(!rde_ctx)) { + pr_err("[%s] Rde ctx is NULL.\n", __func__); + return -EINVAL; + } + + if (unlikely(!rde_ctx->dev)) { + pr_err("[%s] Dev is NULL.\n", __func__); + return -EINVAL; + } + + for (i = 0; i < rde_ctx->session_num; i++) + hisi_rde_tbl_deinit(rde_ctx->dev, &rde_ctx->req_list[i]); + + kfree(rde_ctx->req_bitmap); + rde_ctx->req_bitmap = NULL; + kfree(rde_ctx->req_list); + rde_ctx->req_list = NULL; + hisi_rde_release_qp(rde_ctx); + + kfree(rde_ctx); + ctx->inner = NULL; + + return 0; +} +EXPORT_SYMBOL(acc_clear); + +int acc_setup_callback(struct acc_ctx *ctx, acc_callback cb) +{ + if (!ctx) { + pr_err("[%s] Hisi_rde acc_ctx is NULL.\n", __func__); + return -EINVAL; + } + + ctx->cb = cb; + + return 0; +} +EXPORT_SYMBOL(acc_setup_callback); + +int acc_do_flexec_asyn(struct acc_ctx *ctx, struct raid_ec_ctrl *ctrl, + uint8_t op_type) +{ + return hisi_rde_io_proc(ctx, ctrl, op_type, PQ_FLEXEC, false); +} +EXPORT_SYMBOL(acc_do_flexec_asyn); + +int acc_do_flexec(struct acc_ctx *ctx, struct raid_ec_ctrl *ctrl, + uint8_t op_type) +{ + return hisi_rde_io_proc(ctx, ctrl, op_type, PQ_FLEXEC, true); +} +EXPORT_SYMBOL(acc_do_flexec); + +int acc_do_mpcc_asyn(struct acc_ctx *ctx, struct raid_ec_ctrl *ctrl, + uint8_t op_type) +{ + return hisi_rde_io_proc(ctx, ctrl, op_type, MPCC, false); +} +EXPORT_SYMBOL(acc_do_mpcc_asyn); + +int acc_do_mpcc(struct acc_ctx *ctx, struct raid_ec_ctrl *ctrl, + uint8_t op_type) +{ + return hisi_rde_io_proc(ctx, ctrl, op_type, MPCC, true); +} +EXPORT_SYMBOL(acc_do_mpcc); + diff --git a/drivers/crypto/hisilicon/rde/rde_api.h b/drivers/crypto/hisilicon/rde/rde_api.h new file mode 100644 index 0000000000000000000000000000000000000000..b284a5b112c1b8b5ff5dd5f254a6a49fa951dbed --- /dev/null +++ b/drivers/crypto/hisilicon/rde/rde_api.h @@ -0,0 +1,470 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2018-2019 HiSilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#ifndef __RDE_API_H__ +#define __RDE_API_H__ + +/** + * @brief dif pad type + */ +enum DIF_PAGE_LAYOUT_PAD_TYPE_E { + DIF_PAGE_LAYOUT_PAD_NONE = 0x0, + DIF_PAGE_LAYOUT_PAD_AHEAD_DIF = 0x1, /* 4096+56+8 */ + DIF_PAGE_LAYOUT_PAD_BEHIND_DIF = 0x2, /* 4096+8+56 */ + DIF_PAGE_LAYOUT_PAD_BUTT +}; + +/** + * @brief dif pad gen mode enumeration, rde only support 0,3,5. + */ +enum DIF_PAGE_LAYOUT_PAD_GEN_CTRL_E { + DIF_PAGE_LAYOUT_PAD_GEN_NONE = 0x0, + DIF_PAGE_LAYOUT_PAD_GEN_FROM_ZERO = 0x3, + DIF_PAGE_LAYOUT_PAD_GEN_FROM_SOURCE_DATA = 0x4, + DIF_PAGE_LAYOUT_PAD_GEN_FROM_RAID_OR_EC = 0x5, + DIF_PAGE_LAYOUT_PAD_GEN_BUTT +}; + +/** + * @brief dif grd gen mode enumeration. + */ +enum DIF_GRD_GEN_CTRL_E { + DIF_GRD_GEN_NONE = 0x0, + DIF_GRD_GEN_FROM_T10CRC = 0x1, + DIF_GRD_GEN_FROM_RAID_OR_EC = 0x5, + DIF_GRD_GEN_BUTT +}; + +/** + * @brief dif ver gen mode enumeration, rde only support 0 or 1. + */ +enum DIF_VER_GEN_CTRL_E { + DIF_VER_GEN_NONE = 0x0, + DIF_VER_GEN_FROM_INPUT = 0x1, + DIF_VER_GEN_FROM_ZERO = 0x3, + DIF_VER_GEN_FROM_SOURCE_DATA = 0x4, + DIF_VER_GEN_BUTT +}; + +/** + * @brief dif app gen mode enumeration, rde only support 0,1,5. + */ +enum DIF_APP_GEN_CTRL_E { + DIF_APP_GEN_NONE = 0x0, + DIF_APP_GEN_FROM_INPUT = 0x1, + DIF_APP_GEN_FROM_ZERO = 0x3, + DIF_APP_GEN_FROM_SOURCE_DATA = 0x4, + DIF_APP_GEN_FROM_RAID_OR_EC = 0x5, + DIF_APP_GEN_BUTT +}; + +/** + * @brief dif ref gen mode enumeration, rde only support 0,1,2,5. + */ +enum DIF_REF_GEN_CTRL_E { + DIF_REF_GEN_NONE = 0x0, + DIF_REF_GEN_FROM_INPUT_LBA = 0x1, + DIF_REF_GEN_FROM_INDIVIDUAL_INFO = 0x2, + DIF_REF_GEN_FROM_ZERO = 0x3, + DIF_REF_GEN_FROM_SOURCE_DATA = 0x4, + DIF_REF_GEN_FROM_RAID_OR_EC = 0x5, + DIF_REF_GEN_BUTT +}; + +/** + * @brief dif verify mode enumeration, grd: rde only support 0,1,2. + */ +enum DIF_VERIFY_CTRL_E { + DIF_VERIFY_NONE = 0x0, + DIF_VERIFY_DO_NOT_VERIFY = 0x1, + DIF_VERIFY_ALL_BLOCK = 0x2, + DIF_VERIFY_BY_INDIVIDUAL_INFO = 0x3, + DIF_VERIFY_BUTT +}; + +/** + * @brief data store mode, sdk do not support prp temporarily. + */ +enum ACC_BUF_TYPE_E { + ACC_BUF_TYPE_PBUFFER = 0x0, + ACC_BUF_TYPE_SGL = 0x1, + ACC_BUF_TYPE_PRP = 0x2, + ACC_BUF_TYPE_BUTT +}; + +/** + * @brief rde operation enumeration. + */ +enum ACC_OPT_RAID_E { + ACC_OPT_GEN = 0x0, /* generate */ + ACC_OPT_VLD = 0x1, /* validate */ + ACC_OPT_UPD = 0x2, /* update */ + ACC_OPT_RCT = 0x3, /* reconstruct */ + ACC_OPT_RAID_BUTT +}; + +/** + * @brief input addr type mode + * @note + * value 0 means input virt addr from + * kzalloc/get_free_pages/dma_alloc_coherent + * value 1 means input phy addr directly without tranform + * value 2 means input virt addr from vmalloc, + * and this addr type only supports pbuf data store mode + * in smmu bypass mode + */ +enum ACC_ADDR_TYPE_E { + VA_FROM_NORMAL_DMA_ZONE = 0x0, + PA_PASS_THROUGH = 0x1, + VA_FROM_HIGHMEM_ZONE = 0x2, + ACC_ADDR_TYPE_BUTT +}; + +/** + * @brief WRR sched, weights is 1:2:3:4:5:6:7:8:9:10:11:12:13:14:15:16. + */ +enum ACC_PRT_E { + ACC_PRT_WEIGHTS_1 = 0x0, + ACC_PRT_WEIGHTS_2, + ACC_PRT_WEIGHTS_3, + ACC_PRT_WEIGHTS_4, + ACC_PRT_WEIGHTS_5, + ACC_PRT_WEIGHTS_6, + ACC_PRT_WEIGHTS_7, + ACC_PRT_WEIGHTS_8, + ACC_PRT_WEIGHTS_9, + ACC_PRT_WEIGHTS_10, + ACC_PRT_WEIGHTS_11, + ACC_PRT_WEIGHTS_12, + ACC_PRT_WEIGHTS_13, + ACC_PRT_WEIGHTS_14, + ACC_PRT_WEIGHTS_15, + ACC_PRT_WEIGHTS_16, + ACC_PRT_BUTT, +}; + +/** + * @brief sge structure, should fill buf and len. + * @buf: page data start address, 64bit + * @len: valid data len, Byte + * @note + * usually, just need to fill buf and len + */ +struct sgl_entry_hw { + char *buf; + void *page_ctrl; + uint32_t len; + uint32_t pad; + uint32_t pad0; + uint32_t pad1; +}; + +/** + * @brief sgl structure. + * @next: next sgl point, to make up chain, 64bit + * @entry_sum_in_chain: sum of entry_sum_in_sgl in sgl chain + * @entry_sum_in_sgl: valid sgl_entry num in this sgl + * @entry_num_in_sgl: sgl_entry num in this sgl + * @entries: sgl_entry point + * @note + * usually, just need to fill next, entry_sum_in_chain, + * entry_sum_in_sgl, entry_num_in_sgl and entry + * entry_sum_in_chain is valid from the first sgl + * entry_sum_in_sgl <= entry_num_in_sgl + * sgl_entry point is determined by entry_sum_in_sgl + */ +struct sgl_hw { + struct sgl_hw *next; + uint16_t entry_sum_in_chain; + uint16_t entry_sum_in_sgl; + uint16_t entry_num_in_sgl; + uint8_t pad0[2]; + uint64_t serial_num; + uint32_t flag; + uint32_t cpu_id; + uint8_t pad1[8]; + uint8_t reserved[24]; + struct sgl_entry_hw entries[0]; +}; + +/** + * @brief sgl structure for rde. + * @ctrl: source and destination data block SGL address + * @buf_offset: offset of per data disk in the SGL chain + * @parity: 0 means data disk, 1 means parity disk + * @column: the index corresponding to src and dst disk + * @note + * parity is just valid in update mode + */ +struct rde_sgl { + struct sgl_hw *ctrl; + uint32_t buf_offset; + uint8_t parity; + uint8_t reserve; + uint16_t column; +}; + +/** + * @brief pbuf structure for rde. + * @note + * parity is just valid in update mode + */ +struct rde_pbuf { + char *pbuf; + uint32_t reserve1; + uint8_t parity; + uint8_t reserve2; + uint16_t column; +}; + +/** + * @brief dif data structure. + * @grd: 16bit gurad tag + * @ver: 8bit version + * @app: 8bit application information field + * @ref: 32bit reference tag + */ +struct dif_data { + uint16_t grd; + uint8_t ver; + uint8_t app; + uint32_t ref; +}; + +/** + * @brief dif gen ctrl structure. + * @page_layout_gen_type: denoted by enum DIF_PAGE_LAYOUT_PAD_GEN_CTRL_E + * @grd_gen_type: denoted by enum DIF_GRD_GEN_CTRL_E + * @ver_gen_type: denoted by enum DIF_VER_GEN_CTRL_E + * @app_gen_type: denoted by enum DIF_APP_GEN_CTRL_E + * @ref_gen_type: denoted by enum DIF_REF_GEN_CTRL_E + * @page_layout_pad_type: denoted by enum DIF_PAGE_LAYOUT_PAD_TYPE_E + */ +struct dif_gen { + uint32_t page_layout_gen_type:4; + uint32_t grd_gen_type:4; + uint32_t ver_gen_type:4; + uint32_t app_gen_type:4; + uint32_t ref_gen_type:4; + uint32_t page_layout_pad_type:2; + uint32_t reserved:10; +}; + +/** + * @brief dif verify ctrl structure. + * @grd_verify_type: denoted by enum DIF_VERIFY_CTRL_E + * @ref_verify_type: denoted by enum DIF_VERIFY_CTRL_E + * @note + * just need to fill grd_verify_type and ref_verify_type + */ +struct dif_verify { + uint16_t page_layout_pad_type:2; + uint16_t grd_verify_type:4; + uint16_t ref_verify_type:4; + uint16_t reserved:6; +}; + +/** + * @brief dif ctrl structure. + */ +struct dif_ctrl { + struct dif_gen gen; + struct dif_verify verify; +}; + +/** + * @brief general dif structure. + * @lba: lba for dif ref field + * @priv: individual info for dif ref field + * @ver: 8bit version + * @app: 8bit application information field + * @note + * RDE need not to fill lba + */ +struct acc_dif { + uint64_t lba; + uint32_t priv; + uint8_t ver; + uint8_t app; + struct dif_ctrl ctrl; +}; + +/** + * @brief ctrl information for per request, + * user should alloc and init this structure. + * @src_data: src data address, reference rde data structure + * @dst_data: dst data address, reference rde data structure + * @src_num: number of source disks + * @dst_num: number of dst disks + * @block_size: support 512,520,4096,4104,4160 + * @input_block: number of sector + * @data_len: data len of per disk, block_size (with dif)* input_block + * @buf_type: denoted by ACC_BUF_TYPE_E + * @src_dif: dif information of source disks + * @dst_dif: dif information of dest disks + * @cm_load: coe_matrix reload control, 0: do not load, 1: load + * @cm_len: length of loaded coe_matrix, equal to src_num + * @alg_blk_size: algorithm granularity, 0: 512 gran, 1: 4096 gran + * @mem_saving: mem saving or not, default 0 + * @coe_matrix: coe matrix address, should be 64byte aligned + * @priv: design for user + * @note + * only mpcc support mem_saving mode, no mem_saving is 0x0, mem_saving is 0x1 + */ +struct raid_ec_ctrl { + void *src_data; + void *dst_data; + uint32_t src_num; + uint32_t dst_num; + uint32_t block_size; + uint32_t input_block; + uint32_t data_len; + uint32_t buf_type; + struct acc_dif src_dif; + struct acc_dif dst_dif; + uint8_t cm_load; + uint8_t cm_len; + uint8_t alg_blk_size; + uint8_t mem_saving; + void *coe_matrix; + void *priv; +}; + +/** + * @brief acc_callback of user. + * @note + * ctx means struct acc_ctx + * tag means struct raid_ec_ctrl + */ +typedef void (*acc_callback)(void *ctx, void *tag, int status, size_t len); + +/** + * @brief acc ctx structure, acc_init api will init this structure + * @inner: reserved for SDK to point to hisi_rde_ctx structure + * @cb: callback function for pool and asynchronously api + * @priority: denoted by ACC_PRT_E + * @addr_type: denoted by ACC_ADDR_TYPE_E + */ +struct acc_ctx { + void *inner; + acc_callback cb; + uint8_t priority; + uint8_t addr_type; +}; + +/** + * @brief return value. + */ +enum ACC_STATUS_E { + ACC_SUCCESS = 0, + ACC_INVALID_PARAM = (-103), /*!< parameter error */ + ACC_RDE_DIF_ERR = (-113), /*!< Input or Output dif check error */ + ACC_RDE_DISK_VERIFY_ERR = (-114) /*!< Output data verify error */ +}; + +/** + * + * @brief initialization before you call the other api. + * + * @param [in] ctx is the context which manage the instance. + * @retval 0 is success, else is a negative number that is error code. + * + * @note + * Be sure you will fill para cb and addr_type, then call this function. + * + */ +int acc_init(struct acc_ctx *ctx); + +/** + * + * @brief reconfig callback of ctx. + * + * @param [in] ctx is the context which manage the instance. + * @retval 0 is success, else is a negative number that is error code. + * + * @note + * + */ +int acc_setup_callback(struct acc_ctx *ctx, acc_callback cb); + +/** + * + * @brief release resource that alloced by acc_init(). + * + * @param [in] ctx is the context which manage the instance. + * @retval 0 is success, else is a negative number that is error code. + * + * @note + * + */ +int acc_clear(struct acc_ctx *ctx); + +/** + * + * @brief flexec/raid5/raid6 operation asynchronously. + * + * @param [in] ctx is the context which manage the instance. + * @param [in] ctrl is the parameter data of current io. + * @param [in] op_type is from ACC_OPT_RAID_E + * @retval 0 is success, else is a negative number that is error code. + * + * @note + *Multiple concurrent processing is not supported for the same instance. + */ +int acc_do_flexec_asyn(struct acc_ctx *ctx, + struct raid_ec_ctrl *ctrl, uint8_t op_type); + +/** + * + * @brief mpcc operation asynchronously. + * + * @param [in] ctx is the context which manage the instance. + * @param [in] ctrl is the parameter data of current io. + * @param [in] op_type is from ACC_OPT_RAID_E + * @retval 0 is success, else is a negative number that is error code. + * + * @note + *Multiple concurrent processing is not supported for the same instance. + */ +int acc_do_mpcc_asyn(struct acc_ctx *ctx, + struct raid_ec_ctrl *ctrl, uint8_t op_type); + +/** + * + * @brief flexec/raid5/raid6 operation synchronously. + * + * @param [in] ctx is the context which manage the instance. + * @param [in] ctrl is the parameter data of current io. + * @param [in] op_type is from ACC_OPT_RAID_E + * @retval 0 is success, else is a negative number that is error code. + * + * @note + *Multiple concurrent processing is not supported for the same instance. + */ +int acc_do_flexec(struct acc_ctx *ctx, + struct raid_ec_ctrl *ctrl, uint8_t op_type); + +/** + * + * @brief mpcc operation synchronously. + * + * @param [in] ctx is the context which manage the instance. + * @param [in] ctrl is the parameter data of current io. + * @param [in] op_type is from ACC_OPT_RAID_E + * @retval 0 is success, else is a negative number that is error code. + * + * @note + *Multiple concurrent processing is not supported for the same instance. + */ +int acc_do_mpcc(struct acc_ctx *ctx, + struct raid_ec_ctrl *ctrl, uint8_t op_type); + +#endif /* __ACC_API_H__ */ diff --git a/drivers/crypto/hisilicon/rde/rde_data.c b/drivers/crypto/hisilicon/rde/rde_data.c new file mode 100644 index 0000000000000000000000000000000000000000..c25d3f36b9bf52c3a64bc21edad2ece3ddff1497 --- /dev/null +++ b/drivers/crypto/hisilicon/rde/rde_data.c @@ -0,0 +1,216 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2019 HiSilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "rde_data.h" + +static inline u32 sgl_addr_cnt(struct sgl_hw *sgl) +{ + u32 cnt = 0; + struct sgl_hw *cur_sgl = sgl; + + if (!sgl) { + pr_err("[%s] Sgl address is NULL.\n", __func__); + return 0; + } + + while (cur_sgl) { + cnt += 1; + cnt += cur_sgl->entry_sum_in_sgl; + cur_sgl = cur_sgl->next; + } + + return cnt; +} + +int acc_sgl_dump(struct sgl_hw *data) +{ + u32 i; + u32 cnt_entries; + struct sgl_hw *cur_sgl; + struct sgl_hw *next_sgl; + struct sgl_entry_hw *entry; + + if (unlikely(!data->entry_sum_in_sgl)) { + pr_err("Error! The entrysum of sgl is zero.\n"); + return -EINVAL; + } + cnt_entries = sgl_addr_cnt(data); + pr_info("Sgl entries:%d.\n", cnt_entries); + + for (cur_sgl = data; cur_sgl; ) { + pr_info("Sgl addr: 0x%pK.\n", cur_sgl); + pr_info("NextSgl: 0x%pK.\n", cur_sgl->next); + pr_info("EntrySumInChain: %u.\n", cur_sgl->entry_sum_in_chain); + pr_info("EntrySumInSgl: %u.\n", cur_sgl->entry_sum_in_sgl); + + entry = cur_sgl->entries; + for (i = 0; (i < cur_sgl->entry_sum_in_sgl && + entry->buf); i++) { + pr_info("Entries[%d]:addr = 0x%pK.\n", i, entry->buf); + entry++; + } + if (cur_sgl->next) + next_sgl = cur_sgl->next; + else + next_sgl = NULL; + + cur_sgl = next_sgl; + } + + return 0; +} + +static void acc_sgl_to_scatterlist(struct pci_dev *pdev, struct sgl_hw *data, + struct scatterlist *sglist, u32 smmu_state) +{ + u16 i; + struct sgl_hw *cur_sgl; + struct sgl_hw *next_sgl; + struct sgl_entry_hw *entry; + dma_addr_t pa; + + cur_sgl = data; + while (cur_sgl) { + entry = cur_sgl->entries; + for (i = 0; (i < cur_sgl->entry_sum_in_sgl && + entry->buf); i++) { + sg_set_buf(sglist, (void *)entry->buf, entry->len); + pa = acc_virt_to_phys(pdev, sg_virt(sglist), + (size_t)sglist->length, + smmu_state); + sg_dma_address(sglist) = pa; + sglist++; + entry->buf = (char *)pa; + entry++; + } + if (cur_sgl->next) { + next_sgl = cur_sgl->next; + sg_set_buf(sglist, (void *)next_sgl, + (u32)(sizeof(struct sgl_hw) + + sizeof(struct sgl_entry_hw) * + (next_sgl->entry_sum_in_sgl))); + pa = acc_virt_to_phys(pdev, sg_virt(sglist), + (size_t)sglist->length, + smmu_state); + sg_dma_address(sglist) = pa; + sglist++; + cur_sgl->next = (struct sgl_hw *)pa; + } else { + next_sgl = NULL; + } + cur_sgl = next_sgl; + } +} + +int acc_sgl_virt_to_phys(struct pci_dev *pdev, struct sgl_hw *data, + void **sglist_head, u32 smmu_state) +{ + u32 addr_cnt; + struct scatterlist *sglist; + + if (!data) { + pr_err("[%s] Para sgl_s is NULL.\n", __func__); + return -EINVAL; + } + + if (unlikely(!data->entry_sum_in_sgl) || + data->entry_sum_in_sgl > data->entry_num_in_sgl) { + pr_err("[%s] Para sge num is wrong.\n", __func__); + return -EINVAL; + } + + addr_cnt = sgl_addr_cnt(data); + sglist = kcalloc(addr_cnt, sizeof(*sglist), GFP_KERNEL); + if (unlikely(!sglist)) { + pr_err("[%s] Malloc sglist fail.\n", __func__); + return -ENOMEM; + } + + *sglist_head = sglist; + sg_init_table(sglist, addr_cnt); + sg_set_buf(sglist, (void *)data, (u32)(sizeof(struct sgl_hw) + + sizeof(struct sgl_entry_hw) * (data->entry_sum_in_sgl))); + sg_dma_address(sglist) = acc_virt_to_phys(pdev, sg_virt(sglist), + (size_t)sglist->length, smmu_state); + sglist++; + acc_sgl_to_scatterlist(pdev, data, sglist, smmu_state); + + return 0; +} + +int acc_sgl_phys_to_virt(struct pci_dev *pdev, void *sglist_head, + u32 smmu_state) +{ + int i; + struct sgl_hw *cur_sgl; + struct sgl_hw *next_sgl; + struct sgl_entry_hw *entry; + struct scatterlist *sglist; + struct scatterlist *sg; + int ret = -EFAULT; + + if (!sglist_head) { + pr_err("[%s] Para sglist_head is NULL.\n", __func__); + return -EINVAL; + } + + sglist = (struct scatterlist *)sglist_head; + sg = sglist; + cur_sgl = (struct sgl_hw *)sg_virt(sg); + acc_phys_to_virt(pdev, sg_dma_address(sg), + (size_t)sg->length, smmu_state); + while (cur_sgl) { + entry = cur_sgl->entries; + for (i = 0; (i < cur_sgl->entry_sum_in_sgl && + entry->buf); i++) { + sg = sg_next(sg); + if (unlikely(!sg)) { + pr_err("[%s][%d]Scatterlist happens to be NULL.\n", + __func__, __LINE__); + goto FAIL; + } + entry->buf = (char *)sg_virt(sg); + acc_phys_to_virt(pdev, sg_dma_address(sg), + (size_t)sg->length, smmu_state); + entry++; + } + + if (cur_sgl->next) { + sg = sg_next(sg); + if (unlikely(!sg)) { + pr_err("[%s][%d]Scatterlist happens to be NULL.\n", + __func__, __LINE__); + goto FAIL; + } + next_sgl = (struct sgl_hw *)sg_virt(sg); + acc_phys_to_virt(pdev, sg_dma_address(sg), + (size_t)sg->length, smmu_state); + cur_sgl->next = next_sgl; + } else { + next_sgl = NULL; + } + + cur_sgl = next_sgl; + } + + ret = 0; + +FAIL: + kfree(sglist); + return ret; +} + diff --git a/drivers/crypto/hisilicon/rde/rde_data.h b/drivers/crypto/hisilicon/rde/rde_data.h new file mode 100644 index 0000000000000000000000000000000000000000..fddc92e53010c9408a05f64122fa8cb5a6b2fead --- /dev/null +++ b/drivers/crypto/hisilicon/rde/rde_data.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2018-2019 HiSilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#ifndef __ACC_DATA_H__ +#define __ACC_DATA_H__ + +#include +#include +#include "rde_api.h" + +static inline dma_addr_t acc_virt_to_phys(struct pci_dev *pdev, void *va, + size_t size, u32 smmu_state) +{ + dma_addr_t pa; + + if (!smmu_state) + pa = (dma_addr_t)virt_to_phys(va); + else + pa = pci_map_single(pdev, va, size, DMA_BIDIRECTIONAL); + + return pa; +} + +static inline dma_addr_t acc_pfn_to_phys(void *va) +{ + unsigned long pfn; + unsigned long off; + unsigned long pa; + + off = (uintptr_t)va % PAGE_SIZE; + pfn = vmalloc_to_pfn(va); + pa = (pfn << PAGE_SHIFT) + off; + + return pa; +} + +static inline void acc_phys_to_virt(struct pci_dev *pdev, dma_addr_t pa, + size_t size, u32 smmu_state) +{ + if (smmu_state) + pci_unmap_single(pdev, pa, size, DMA_BIDIRECTIONAL); +} + +int acc_sgl_dump(struct sgl_hw *data); +int acc_sgl_virt_to_phys(struct pci_dev *pdev, struct sgl_hw *data, + void **sglist_head, u32 smmu_state); +int acc_sgl_phys_to_virt(struct pci_dev *pdev, + void *sglist_head, u32 smmu_state); + +#endif diff --git a/drivers/crypto/hisilicon/rde/rde_main.c b/drivers/crypto/hisilicon/rde/rde_main.c new file mode 100644 index 0000000000000000000000000000000000000000..a47be8f720649d6cddb1f0f33c36111d70376f76 --- /dev/null +++ b/drivers/crypto/hisilicon/rde/rde_main.c @@ -0,0 +1,866 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2018-2019 HiSilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rde.h" + +#define HRDE_QUEUE_NUM_V1 4096 +#define HRDE_QUEUE_NUM_V2 1024 +#define HRDE_PCI_DEVICE_ID 0xa25a +#define HRDE_SQE_SIZE 64 +#define HRDE_PF_DEF_Q_NUM 64 +#define HRDE_PF_DEF_Q_BASE 0 + +#define HRDE_RAS_INT_MSK 0x310290 +#define HRDE_RAS_CE_MSK BIT(2) +#define HRDE_RAS_NFE_MSK BIT(1) +#define HRDE_RAS_ENABLE BIT(0) +#define HRDE_INT_MSK 0x310314 +#define HRDE_INT_ENABLE 0x0 +#define HRDE_INT_DISABLE 0x3ffff +#define HRDE_BD_PREFETCH BIT(8) +#define HRDE_INT_SOURCE 0x31030c +#define HRDE_INT_SOURCE_CLEAR GENMASK(17, 0) +#define HRDE_INT_STATUS 0x310318 +#define HRDE_DFX_CTRL_0 0x310240 +#define HRDE_ECC_ERR 0x310234 +#define HRDE_ECC_ERR_CNT 0x310238 +#define HRDE_OP_DONE_CNT 0x310250 +#define HRDE_OP_ERR_CNT 0x310254 +#define HRDE_OP_ABORT_CNT 0x310258 +#define HRDE_FIFO_STAT_0 0x310200 +#define HRDE_DFX_STAT_7 0x310334 +#define HRDE_DFX_STAT_8 0x310338 +#define DFX_CTRL0 0x3 +#define WRITE_CLEAR_VAL GENMASK(31, 0) +#define HRDE_AWCACHE 0x310154 +#define HRDE_ARCACHE 0x31015c +#define AWCACHE 0xff0 +#define ARCACHE 0xfff0 +#define HRDE_CFG 0x310000 +#define CHN_CFG 0x5010101 +#define HRDE_AXI_SHUTDOWN_EN BIT(26) +#define HRDE_AXI_SHUTDOWN_DIS 0xFBFFFFFF +#define HRDE_WR_MSI_PORT BIT(0) +#define HRDE_AWUSER_BD_1 0x310104 +#define HRDE_ARUSER_BD_1 0x310114 +#define HRDE_ARUSER_SGL_1 0x310124 +#define HRDE_AWUSER_DAT_1 0x310134 +#define HRDE_ARUSER_DAT_1 0x310144 +#define HRDE_USER_SMMU 0x40001070 +#define HRDE_ERR_CNT 0x310238 +#define HRDE_ECC_1BIT_ERR BIT(0) +#define HRDE_ECC_2BIT_ERR BIT(1) +#define HRDE_ECC_1BIT_SHIFT 16 +#define HRDE_ECC_2BIT_CNT_MSK GENMASK(15, 0) +#define HRDE_STATE_INT_ERR GENMASK(11, 2) +#define HRDE_AM_CURR_PORT_STS 0x300100 +#define HRDE_MASTER_TRANS_RET 0x300150 +#define HRDE_FSM_MAX_CNT 0x310280 +#define HRDE_QM_IDEL_STATUS 0x1040e4 +#define HRDE_QM_PEH_DFX_INFO0 0x1000fc +#define PEH_MSI_MASK_SHIFT 0x90 +#define CACHE_CTL 0x1833 +#define HRDE_DBGFS_VAL_MAX_LEN 20 +#define HRDE_PROBE_ADDR 0x31025c +#define HRDE_PROBE_DATA 0x310260 +#define HRDE_PROBE_EN BIT(16) +#define HRDE_PROBE_DATA_EN BIT(17) +#define HRDE_STRB_CS_SHIFT 9 + +static const char hisi_rde_name[] = "hisi_rde"; +static struct dentry *hrde_debugfs_root; +static struct hisi_qm_list rde_devices; +static void hisi_rde_ras_proc(struct work_struct *work); + +static const struct hisi_rde_hw_error rde_hw_error[] = { + {.int_msk = BIT(0), .msg = "Rde_ecc_1bit_err"}, + {.int_msk = BIT(1), .msg = "Rde_ecc_2bit_err"}, + {.int_msk = BIT(2), .msg = "Rde_stat_mgmt_state_timeout_err"}, + {.int_msk = BIT(3), .msg = "Rde_data_wr_state_timeout_err"}, + {.int_msk = BIT(4), .msg = "Rde_alg_state_timeout_err"}, + {.int_msk = BIT(5), .msg = "Rde_data_ar_state_timeout_err"}, + {.int_msk = BIT(6), .msg = "Rde_bd_mgmt_state_timeout_err"}, + {.int_msk = BIT(7), .msg = "Rde_list_parse_ar_state_timeout_err"}, + {.int_msk = BIT(8), .msg = "Rde_bd_prefetch_state_timeout_err"}, + {.int_msk = BIT(9), .msg = "Rde_dst_buf_parse_state_timeout_err"}, + {.int_msk = BIT(10), .msg = "Rde_src_buf_parse_state_timeout_err"}, + {.int_msk = BIT(11), .msg = "Rde_chn_timeout_err"}, + {.int_msk = BIT(12), .msg = "Rde_bd_bresp_err"}, + {.int_msk = BIT(13), .msg = "Rde_data_bresp_err"}, + {.int_msk = BIT(14), .msg = "Rde_data_rresp_err"}, + {.int_msk = BIT(15), .msg = "Rde_sgl_rresp_err"}, + {.int_msk = BIT(16), .msg = "Rde_list_rresp_err"}, + {.int_msk = BIT(17), .msg = "Rde_bd_rresp_err"}, + { /* sentinel */ } +}; + +enum ctrl_debug_file_index { + HRDE_CURRENT_FUNCTION, + HRDE_CURRENT_BD, + HRDE_DEBUG_FILE_NUM, + /* RDE not support CNT_CLR_CE config, default enable */ +}; + +static const char *const ctrl_debug_file_name[] = { + [HRDE_CURRENT_FUNCTION] = "current_function_id", + [HRDE_CURRENT_BD] = "current_bd", +}; + +struct ctrl_debug_file { + enum ctrl_debug_file_index index; + spinlock_t lock; + struct hisi_rde_ctrl *ctrl; +}; + +/* + * One RDE controller has one PF and multiple VFs, some global configurations + * which PF has need this structure. + * Just relevant for PF. + */ +struct hisi_rde_ctrl { + struct hisi_rde *hisi_rde; + struct ctrl_debug_file files[HRDE_DEBUG_FILE_NUM]; +}; + +static struct debugfs_reg32 hrde_dfx_regs[] = { + {"HRDE_DFX_STAT_0", 0x310220ull}, + {"HRDE_DFX_STAT_1", 0x310224ull}, + {"HRDE_DFX_STAT_2", 0x310320ull}, + {"HRDE_DFX_STAT_3", 0x310324ull}, + {"HRDE_DFX_STAT_4", 0x310328ull}, + {"HRDE_DFX_STAT_5", 0x31032cull}, + {"HRDE_DFX_STAT_6", 0x310330ull}, + {"HRDE_DFX_STAT_7", 0x310334ull}, + {"HRDE_DFX_STAT_8", 0x310338ull}, + {"HRDE_FIFO_STAT_0", 0x310200ull}, + {"HRDE_FIFO_STAT_1", 0x310204ull}, + {"HRDE_OP_TAG_0", 0x310214ull}, + {"HRDE_OP_TAG_1", 0x310218ull}, + {"HRDE_OP_TAG_2", 0x31021cull}, + {"HRDE_ECC_ERR", 0x310234ull}, + {"HRDE_ECC_ERR_CNT", 0x310238ull}, + {"HRDE_OP_DONE_CNT", 0x310250ull}, + {"HRDE_OP_ERR_CNT", 0x310254ull}, + {"HRDE_OP_ABORT_CNT", 0x310258ull}, + {"HRDE_TMP_ADDR_HIGH", 0x310270ull}, + {"HRDE_TMP_ADDR_LOW", 0x310274ull}, + {"HRDE_TMP_LENGTH", 0x310278ull}, + {"HRDE_INT_STATUS", 0x310318ull}, +}; + +static struct debugfs_reg32 hrde_ooo_dfx_regs[] = { + {"HRDE_AM_CURR_PORT_STS", 0x300100ull}, + {"HRDE_AM_ROB_ECC_ERR_ADDR", 0x30010cull}, + {"HRDE_AM_CURR_TRANS_RETURN", 0x300150ull}, + {"HRDE_AM_CURR_RD_TXID_STS_0", 0x300160ull}, + {"HRDE_AM_CURR_RD_TXID_STS_1", 0x300164ull}, + {"HRDE_AM_CURR_RD_TXID_STS_2", 0x300168ull}, + {"HRDE_AM_CURR_WR_TXID_STS_0", 0x300170ull}, + {"HRDE_AM_CURR_WR_TXID_STS_1", 0x300174ull}, + {"HRDE_AM_CURR_WR_TXID_STS_2", 0x300178ull}, +}; + +static int uacce_mode_set(const char *val, const struct kernel_param *kp) +{ + return mode_set(val, kp); +} + +static const struct kernel_param_ops uacce_mode_ops = { + .set = uacce_mode_set, + .get = param_get_int, +}; + +static int uacce_mode = UACCE_MODE_NOUACCE; +module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444); +MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 2"); + +static int pf_q_num_set(const char *val, const struct kernel_param *kp) +{ + return q_num_set(val, kp, HRDE_PCI_DEVICE_ID); +} + +static const struct kernel_param_ops pf_q_num_ops = { + .set = pf_q_num_set, + .get = param_get_int, +}; + +static u32 pf_q_num = HRDE_PF_DEF_Q_NUM; +module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444); +MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 0-4096, v2 0-1024)"); + +static const struct pci_device_id hisi_rde_dev_ids[] = { + {PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HRDE_PCI_DEVICE_ID)}, + {0,} +}; + +MODULE_DEVICE_TABLE(pci, hisi_rde_dev_ids); + +struct hisi_qp *rde_create_qp(void) +{ + int node = cpu_to_node(raw_smp_processor_id()); + struct hisi_qp *qp; + int ret; + + ret = hisi_qm_alloc_qps_node(node, &rde_devices, &qp, 1, 0); + if (!ret) + return qp; + + return NULL; +} + +static int hisi_rde_engine_init(struct hisi_qm *qm) +{ + writel(DFX_CTRL0, qm->io_base + HRDE_DFX_CTRL_0); + + /* usr domain */ + writel(HRDE_USER_SMMU, qm->io_base + HRDE_AWUSER_BD_1); + writel(HRDE_USER_SMMU, qm->io_base + HRDE_ARUSER_BD_1); + writel(HRDE_USER_SMMU, qm->io_base + HRDE_AWUSER_DAT_1); + writel(HRDE_USER_SMMU, qm->io_base + HRDE_ARUSER_DAT_1); + writel(HRDE_USER_SMMU, qm->io_base + HRDE_ARUSER_SGL_1); + /* rde cache */ + writel(AWCACHE, qm->io_base + HRDE_AWCACHE); + writel(ARCACHE, qm->io_base + HRDE_ARCACHE); + + /* rde chn enable + outstangding config */ + writel(CHN_CFG, qm->io_base + HRDE_CFG); + + return 0; +} + +static int hisi_rde_set_user_domain_and_cache(struct hisi_qm *qm) +{ + /* qm user domain */ + writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1); + writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE); + writel(AXUSER_BASE, qm->io_base + QM_AWUSER_M_CFG_1); + writel(AWUSER_M_CFG_ENABLE, qm->io_base + QM_AWUSER_M_CFG_ENABLE); + writel(WUSER_M_CFG_ENABLE, qm->io_base + QM_WUSER_M_CFG_ENABLE); + + /* qm cache */ + writel(AXI_M_CFG, qm->io_base + QM_AXI_M_CFG); + writel(AXI_M_CFG_ENABLE, qm->io_base + QM_AXI_M_CFG_ENABLE); + + /* disable BME/PM/SRIOV FLR */ + writel(PEH_AXUSER_CFG, qm->io_base + QM_PEH_AXUSER_CFG); + writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE); + + writel(CACHE_CTL, qm->io_base + QM_CACHE_CTL); + + return hisi_rde_engine_init(qm); +} + +static void hisi_rde_debug_regs_clear(struct hisi_qm *qm) +{ + /* clear rde debug regs */ + readl(qm->io_base + HRDE_ECC_ERR); + readl(qm->io_base + HRDE_ECC_ERR_CNT); + readl(qm->io_base + HRDE_OP_DONE_CNT); + readl(qm->io_base + HRDE_OP_ERR_CNT); + readl(qm->io_base + HRDE_OP_ABORT_CNT); + writel(WRITE_CLEAR_VAL, qm->io_base + HRDE_FIFO_STAT_0); + writel(WRITE_CLEAR_VAL, qm->io_base + HRDE_DFX_STAT_7); + writel(WRITE_CLEAR_VAL, qm->io_base + HRDE_DFX_STAT_8); + + /* clear current_qm */ + writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF); + writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF); + + hisi_qm_debug_regs_clear(qm); +} + +static void hisi_rde_hw_error_enable(struct hisi_qm *qm) +{ + u32 val; + + val = readl(qm->io_base + HRDE_CFG); + + /* clear RDE hw error source if having */ + writel(HRDE_INT_SOURCE_CLEAR, qm->io_base + HRDE_INT_SOURCE); + writel(HRDE_RAS_ENABLE, qm->io_base + HRDE_RAS_INT_MSK); + + /* bd prefetch should bd masked to prevent misreport */ + writel((HRDE_INT_ENABLE | HRDE_BD_PREFETCH), + qm->io_base + HRDE_INT_MSK); + + /* when m-bit error occur, master ooo will close */ + val = val | HRDE_AXI_SHUTDOWN_EN; + writel(val, qm->io_base + HRDE_CFG); +} + +static void hisi_rde_hw_error_disable(struct hisi_qm *qm) +{ + u32 ras_msk = HRDE_RAS_CE_MSK | HRDE_RAS_NFE_MSK; + u32 val; + + val = readl(qm->io_base + HRDE_CFG); + + writel(ras_msk, qm->io_base + HRDE_RAS_INT_MSK); + writel(HRDE_INT_DISABLE, qm->io_base + HRDE_INT_MSK); + + /* when m-bit error occur, master ooo will not close */ + val = val & HRDE_AXI_SHUTDOWN_DIS; + writel(val, qm->io_base + HRDE_CFG); +} + +static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file) +{ + struct hisi_rde *hisi_rde = file->ctrl->hisi_rde; + + return &hisi_rde->qm; +} + +static u32 current_qm_read(struct ctrl_debug_file *file) +{ + struct hisi_qm *qm = file_to_qm(file); + + return readl(qm->io_base + QM_DFX_MB_CNT_VF); +} + +static int current_qm_write(struct ctrl_debug_file *file, u32 val) +{ + struct hisi_qm *qm = file_to_qm(file); + u32 tmp; + + if (val > 0) { + pr_err("Function id should be equal to 0.\n"); + return -EINVAL; + } + + writel(val, qm->io_base + QM_DFX_MB_CNT_VF); + writel(val, qm->io_base + QM_DFX_DB_CNT_VF); + + tmp = val | + (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK); + writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); + + tmp = val | + (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK); + writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); + + return 0; +} + +static int current_bd_read(struct ctrl_debug_file *file) +{ + struct hisi_qm *qm = file_to_qm(file); + + return readl(qm->io_base + HRDE_PROBE_DATA); +} + +static int current_bd_write(struct ctrl_debug_file *file, u32 val) +{ + struct hisi_qm *qm = file_to_qm(file); + u32 tmp = 0; + + if (val >= (HRDE_SQE_SIZE / sizeof(u32))) { + pr_err("Width index should be smaller than 16.\n"); + return -EINVAL; + } + + tmp = HRDE_PROBE_DATA_EN | HRDE_PROBE_EN | (val << HRDE_STRB_CS_SHIFT); + writel(tmp, qm->io_base + HRDE_PROBE_ADDR); + + return 0; +} + +static ssize_t ctrl_debug_read(struct file *filp, char __user *buf, + size_t count, loff_t *pos) +{ + struct ctrl_debug_file *file = filp->private_data; + char tbuf[HRDE_DBGFS_VAL_MAX_LEN]; + u32 val; + int ret; + + spin_lock_irq(&file->lock); + switch (file->index) { + case HRDE_CURRENT_FUNCTION: + val = current_qm_read(file); + ret = snprintf(tbuf, HRDE_DBGFS_VAL_MAX_LEN, "%u\n", val); + break; + case HRDE_CURRENT_BD: + val = current_bd_read(file); + ret = snprintf(tbuf, HRDE_DBGFS_VAL_MAX_LEN, "%x\n", val); + break; + default: + spin_unlock_irq(&file->lock); + return -EINVAL; + } + spin_unlock_irq(&file->lock); + + return simple_read_from_buffer(buf, count, pos, tbuf, ret); +} + +static ssize_t ctrl_debug_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct ctrl_debug_file *file = filp->private_data; + char tbuf[HRDE_DBGFS_VAL_MAX_LEN]; + unsigned long val; + int len, ret; + + if (*pos != 0) + return 0; + + if (count >= HRDE_DBGFS_VAL_MAX_LEN) + return -ENOSPC; + + len = simple_write_to_buffer(tbuf, HRDE_DBGFS_VAL_MAX_LEN - 1, + pos, buf, count); + if (len < 0) + return len; + + tbuf[len] = '\0'; + if (kstrtoul(tbuf, 0, &val)) + return -EFAULT; + + spin_lock_irq(&file->lock); + switch (file->index) { + case HRDE_CURRENT_FUNCTION: + ret = current_qm_write(file, val); + if (ret) + goto err_input; + break; + case HRDE_CURRENT_BD: + ret = current_bd_write(file, val); + if (ret) + goto err_input; + break; + default: + ret = -EINVAL; + goto err_input; + } + spin_unlock_irq(&file->lock); + + return count; + + err_input: + spin_unlock_irq(&file->lock); + return ret; +} + +static const struct file_operations ctrl_debug_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ctrl_debug_read, + .write = ctrl_debug_write, +}; + +static int hisi_rde_chn_debug_init(struct hisi_qm *qm) +{ + struct device *dev = &qm->pdev->dev; + struct debugfs_regset32 *regset, *regset_ooo; + struct dentry *tmp_d, *tmp; + char buf[HRDE_DBGFS_VAL_MAX_LEN]; + int ret; + + ret = snprintf(buf, HRDE_DBGFS_VAL_MAX_LEN, "rde_dfx"); + if (ret < 0) + return -ENOENT; + + tmp_d = debugfs_create_dir(buf, qm->debug.debug_root); + if (!tmp_d) + return -ENOENT; + + regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); + if (!regset) + return -ENOENT; + regset->regs = hrde_dfx_regs; + regset->nregs = ARRAY_SIZE(hrde_dfx_regs); + regset->base = qm->io_base; + tmp = debugfs_create_regset32("chn_regs", 0444, tmp_d, regset); + if (!tmp) + return -ENOENT; + + regset_ooo = devm_kzalloc(dev, sizeof(*regset_ooo), GFP_KERNEL); + if (!regset_ooo) + return -ENOENT; + regset_ooo->regs = hrde_ooo_dfx_regs; + regset_ooo->nregs = ARRAY_SIZE(hrde_ooo_dfx_regs); + regset_ooo->base = qm->io_base; + tmp = debugfs_create_regset32("ooo_regs", 0444, tmp_d, regset_ooo); + if (!tmp) + return -ENOENT; + + return 0; +} + +static int hisi_rde_ctrl_debug_init(struct hisi_qm *qm) +{ + struct hisi_rde *hisi_rde = container_of(qm, struct hisi_rde, qm); + struct dentry *tmp; + int i; + + for (i = HRDE_CURRENT_FUNCTION; i < HRDE_DEBUG_FILE_NUM; i++) { + spin_lock_init(&hisi_rde->ctrl->files[i].lock); + hisi_rde->ctrl->files[i].ctrl = hisi_rde->ctrl; + hisi_rde->ctrl->files[i].index = i; + + tmp = debugfs_create_file(ctrl_debug_file_name[i], 0600, + qm->debug.debug_root, + hisi_rde->ctrl->files + i, + &ctrl_debug_fops); + if (!tmp) + return -ENOENT; + } + + return hisi_rde_chn_debug_init(qm); +} + +static int hisi_rde_debugfs_init(struct hisi_qm *qm) +{ + struct device *dev = &qm->pdev->dev; + struct dentry *dev_d; + int ret; + + dev_d = debugfs_create_dir(dev_name(dev), hrde_debugfs_root); + if (!dev_d) + return -ENOENT; + + qm->debug.debug_root = dev_d; + ret = hisi_qm_debug_init(qm); + if (ret) + goto failed_to_create; + + if (qm->pdev->device == HRDE_PCI_DEVICE_ID) { + ret = hisi_rde_ctrl_debug_init(qm); + if (ret) + goto failed_to_create; + } + + return 0; + + failed_to_create: + debugfs_remove_recursive(qm->debug.debug_root); + return ret; +} + +static void hisi_rde_debugfs_exit(struct hisi_qm *qm) +{ + debugfs_remove_recursive(qm->debug.debug_root); + + if (qm->fun_type == QM_HW_PF) { + hisi_rde_debug_regs_clear(qm); + qm->debug.curr_qm_qp_num = 0; + } +} + +void hisi_rde_hw_error_log(struct hisi_qm *qm, u32 err_sts) +{ + const struct hisi_rde_hw_error *err = rde_hw_error; + struct device *dev = &qm->pdev->dev; + u32 err_val; + + while (err->msg) { + if (err->int_msk & err_sts) + dev_err_ratelimited(dev, + "[%s] [Error status=0x%x] found.\n", + err->msg, err->int_msk); + err++; + } + + if (HRDE_ECC_2BIT_ERR & err_sts) { + err_val = (readl(qm->io_base + HRDE_ERR_CNT) & + HRDE_ECC_2BIT_CNT_MSK); + dev_err_ratelimited(dev, + "Rde ecc 2bit sram num=0x%x.\n", err_val); + } + + if (HRDE_STATE_INT_ERR & err_sts) { + err_val = readl(qm->io_base + HRDE_AM_CURR_PORT_STS); + dev_err_ratelimited(dev, + "Rde ooo cur port sts=0x%x.\n", err_val); + err_val = readl(qm->io_base + HRDE_MASTER_TRANS_RET); + dev_err_ratelimited(dev, + "Rde ooo outstanding sts=0x%x.\n", err_val); + } +} + +u32 hisi_rde_get_hw_err_status(struct hisi_qm *qm) +{ + return readl(qm->io_base + HRDE_INT_STATUS); +} + +void hisi_rde_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) +{ + writel(err_sts, qm->io_base + HRDE_INT_SOURCE); +} + +static void hisi_rde_open_master_ooo(struct hisi_qm *qm) +{ + u32 val; + + val = readl(qm->io_base + HRDE_CFG); + writel(val & HRDE_AXI_SHUTDOWN_DIS, qm->io_base + HRDE_CFG); + writel(val | HRDE_AXI_SHUTDOWN_EN, qm->io_base + HRDE_CFG); +} + +static void hisi_rde_err_ini_set(struct hisi_qm *qm) +{ + qm->err_ini.get_dev_hw_err_status = hisi_rde_get_hw_err_status; + qm->err_ini.clear_dev_hw_err_status = hisi_rde_clear_hw_err_status; + qm->err_ini.err_info.ecc_2bits_mask = HRDE_ECC_2BIT_ERR; + qm->err_ini.err_info.ce = QM_BASE_CE; + qm->err_ini.err_info.nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT; + qm->err_ini.err_info.fe = 0; + qm->err_ini.err_info.msi = 0; + qm->err_ini.err_info.acpi_rst = "RRST"; + qm->err_ini.hw_err_disable = hisi_rde_hw_error_disable; + qm->err_ini.hw_err_enable = hisi_rde_hw_error_enable; + qm->err_ini.set_usr_domain_cache = hisi_rde_set_user_domain_and_cache; + qm->err_ini.log_dev_hw_err = hisi_rde_hw_error_log; + qm->err_ini.open_axi_master_ooo = hisi_rde_open_master_ooo; + qm->err_ini.err_info.msi_wr_port = HRDE_WR_MSI_PORT; +} + +static int hisi_rde_pf_probe_init(struct hisi_qm *qm) +{ + struct hisi_rde *hisi_rde = container_of(qm, struct hisi_rde, qm); + struct hisi_rde_ctrl *ctrl; + int ret; + + ctrl = devm_kzalloc(&qm->pdev->dev, sizeof(*ctrl), GFP_KERNEL); + if (!ctrl) + return -ENOMEM; + + hisi_rde->ctrl = ctrl; + ctrl->hisi_rde = hisi_rde; + + switch (qm->ver) { + case QM_HW_V1: + qm->ctrl_q_num = HRDE_QUEUE_NUM_V1; + break; + + case QM_HW_V2: + qm->ctrl_q_num = HRDE_QUEUE_NUM_V2; + break; + + default: + return -EINVAL; + } + + ret = qm->err_ini.set_usr_domain_cache(qm); + if (ret) + return ret; + + hisi_qm_dev_err_init(qm); + qm->err_ini.open_axi_master_ooo(qm); + hisi_rde_debug_regs_clear(qm); + + return 0; +} + +static int hisi_rde_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev) +{ + int ret; + + qm->algs = "ec\n"; + qm->uacce_mode = uacce_mode; + qm->pdev = pdev; + ret = hisi_qm_pre_init(qm, pf_q_num, HRDE_PF_DEF_Q_BASE); + if (ret) + return ret; + + qm->qm_list = &rde_devices; + qm->sqe_size = HRDE_SQE_SIZE; + qm->dev_name = hisi_rde_name; + qm->abnormal_fix = hisi_rde_abnormal_fix; + hisi_rde_err_ini_set(qm); + + return 0; +} + +static u32 hisi_rde_smmu_state(struct device *dev) +{ + struct iommu_domain *domain; + + domain = iommu_get_domain_for_dev(dev); + if (domain) { + if (domain->type == IOMMU_DOMAIN_DMA) + return true; + else + return false; + } else { + return false; + } +} + +static int hisi_rde_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct hisi_rde *hisi_rde; + struct hisi_qm *qm; + int ret; + + hisi_rde = devm_kzalloc(&pdev->dev, sizeof(*hisi_rde), GFP_KERNEL); + if (!hisi_rde) + return -ENOMEM; + + INIT_WORK(&hisi_rde->reset_work, hisi_rde_ras_proc); + hisi_rde->smmu_state = hisi_rde_smmu_state(&pdev->dev); + + qm = &hisi_rde->qm; + qm->fun_type = QM_HW_PF; + + ret = hisi_rde_qm_pre_init(qm, pdev); + if (ret) { + pci_err(pdev, "Failed to pre init qm!\n"); + return ret; + } + + ret = hisi_qm_init(qm); + if (ret) { + pci_err(pdev, "Failed to init qm!\n"); + return ret; + } + + ret = hisi_rde_pf_probe_init(qm); + if (ret) { + pci_err(pdev, "Failed to init pf!\n"); + goto err_qm_uninit; + } + + ret = hisi_qm_start(qm); + if (ret) { + pci_err(pdev, "Failed to start qm!\n"); + goto err_qm_uninit; + } + + ret = hisi_rde_debugfs_init(qm); + if (ret) + pci_warn(pdev, "Failed to init debugfs!\n"); + + hisi_qm_add_to_list(qm, &rde_devices); + + return 0; + +err_qm_uninit: + hisi_qm_uninit(qm); + + return ret; +} + +static void hisi_rde_remove(struct pci_dev *pdev) +{ + struct hisi_qm *qm = pci_get_drvdata(pdev); + struct hisi_rde *hisi_rde = container_of(qm, struct hisi_rde, qm); + + hisi_qm_remove_wait_delay(qm, &rde_devices); + + qm->abnormal_fix = NULL; + hisi_qm_dev_err_uninit(qm); + cancel_work_sync(&hisi_rde->reset_work); + hisi_qm_del_from_list(qm, &rde_devices); + hisi_rde_debugfs_exit(qm); + hisi_qm_stop(qm, QM_NORMAL); + hisi_qm_uninit(qm); +} + +static void hisi_rde_ras_proc(struct work_struct *work) +{ + struct pci_dev *pdev; + struct hisi_rde *hisi_rde; + pci_ers_result_t ret; + + hisi_rde = container_of(work, struct hisi_rde, reset_work); + pdev = hisi_rde->qm.pdev; + if (!pdev) + return; + + ret = hisi_qm_process_dev_error(pdev); + if (ret == PCI_ERS_RESULT_NEED_RESET) + if (hisi_qm_controller_reset(&hisi_rde->qm)) + dev_err(&pdev->dev, "Failed to reset device!\n"); +} + +int hisi_rde_abnormal_fix(struct hisi_qm *qm) +{ + struct hisi_rde *hisi_rde; + + if (!qm) + return -EINVAL; + + hisi_rde = container_of(qm, struct hisi_rde, qm); + + return schedule_work(&hisi_rde->reset_work); +} + +static const struct pci_error_handlers hisi_rde_err_handler = { + .reset_prepare = hisi_qm_reset_prepare, + .reset_done = hisi_qm_reset_done, +}; + +static struct pci_driver hisi_rde_pci_driver = { + .name = "hisi_rde", + .id_table = hisi_rde_dev_ids, + .probe = hisi_rde_probe, + .remove = hisi_rde_remove, + .err_handler = &hisi_rde_err_handler, + .shutdown = hisi_qm_dev_shutdown, +}; + +static void hisi_rde_register_debugfs(void) +{ + if (!debugfs_initialized()) + return; + + hrde_debugfs_root = debugfs_create_dir("hisi_rde", NULL); + if (IS_ERR_OR_NULL(hrde_debugfs_root)) + hrde_debugfs_root = NULL; +} + +static void hisi_rde_unregister_debugfs(void) +{ + debugfs_remove_recursive(hrde_debugfs_root); +} + +static int __init hisi_rde_init(void) +{ + int ret; + + INIT_LIST_HEAD(&rde_devices.list); + mutex_init(&rde_devices.lock); + rde_devices.check = NULL; + hisi_rde_register_debugfs(); + + ret = pci_register_driver(&hisi_rde_pci_driver); + if (ret < 0) { + hisi_rde_unregister_debugfs(); + pr_err("Failed to register pci driver!\n"); + } + + return ret; +} + +static void __exit hisi_rde_exit(void) +{ + pci_unregister_driver(&hisi_rde_pci_driver); + hisi_rde_unregister_debugfs(); +} + +module_init(hisi_rde_init); +module_exit(hisi_rde_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Yu'an Wang"); +MODULE_DESCRIPTION("Driver for HiSilicon RDE accelerator"); diff --git a/drivers/crypto/hisilicon/rde/rde_usr_if.h b/drivers/crypto/hisilicon/rde/rde_usr_if.h new file mode 100644 index 0000000000000000000000000000000000000000..c67732bab75e66e82ea0d9dd9a989972b6cf6653 --- /dev/null +++ b/drivers/crypto/hisilicon/rde/rde_usr_if.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2019 HiSilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#ifndef __RDE_USR_IF_H__ +#define __RDE_USR_IF_H__ + +struct hisi_rde_sqe { + __u64 rsvd0: 16; + __u64 op_tag: 16; + __u64 alg_blk_size: 2; + __u64 cm_type: 1; + __u64 cm_le: 1; + __u64 abort: 1; + __u64 src_nblks: 6; + __u64 dst_nblks: 5; + __u64 chk_dst_ref_ctrl: 4; + __u64 chk_dst_grd_ctrl: 4; + __u64 op_type: 8; + __u64 block_size: 16; + __u64 page_pad_type: 2; + __u64 dif_type: 1; + __u64 rsvd1: 3; + __u64 crciv_sel: 1; + __u64 crciv_en: 1; + __u64 status: 8; + __u64 rsvd2: 10; + __u64 cm_len: 6; + __u64 transfer_size: 16; + __u64 coef_matrix_addr; + __u64 src_addr; + __u64 src_tag_addr; + __u64 dst_addr; + __u64 dst_tag_addr; + __u64 dw7; +}; + +#endif diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c index f7d6d690116ee8f32bada36c6b25520976c219c5..cdc4f9a171d986625352319d76ccf243e417410a 100644 --- a/drivers/crypto/hisilicon/sec/sec_algs.c +++ b/drivers/crypto/hisilicon/sec/sec_algs.c @@ -732,6 +732,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, int *splits_in_nents; int *splits_out_nents = NULL; struct sec_request_el *el, *temp; + bool split = skreq->src != skreq->dst; mutex_init(&sec_req->lock); sec_req->req_base = &skreq->base; @@ -750,7 +751,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, if (ret) goto err_free_split_sizes; - if (skreq->src != skreq->dst) { + if (split) { sec_req->len_out = sg_nents(skreq->dst); ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps, &splits_out, &splits_out_nents, @@ -785,8 +786,9 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, split_sizes[i], skreq->src != skreq->dst, splits_in[i], splits_in_nents[i], - splits_out[i], - splits_out_nents[i], info); + split ? splits_out[i] : NULL, + split ? splits_out_nents[i] : 0, + info); if (IS_ERR(el)) { ret = PTR_ERR(el); goto err_free_elements; @@ -806,13 +808,6 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, * more refined but this is unlikely to happen so no need. */ - /* Cleanup - all elements in pointer arrays have been coppied */ - kfree(splits_in_nents); - kfree(splits_in); - kfree(splits_out_nents); - kfree(splits_out); - kfree(split_sizes); - /* Grab a big lock for a long time to avoid concurrency issues */ mutex_lock(&queue->queuelock); @@ -827,13 +822,13 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, (!queue->havesoftqueue || kfifo_avail(&queue->softqueue) > steps)) || !list_empty(&ctx->backlog)) { + ret = -EBUSY; if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { list_add_tail(&sec_req->backlog_head, &ctx->backlog); mutex_unlock(&queue->queuelock); - return -EBUSY; + goto out; } - ret = -EBUSY; mutex_unlock(&queue->queuelock); goto err_free_elements; } @@ -842,7 +837,15 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, if (ret) goto err_free_elements; - return -EINPROGRESS; + ret = -EINPROGRESS; +out: + /* Cleanup - all elements in pointer arrays have been copied */ + kfree(splits_in_nents); + kfree(splits_in); + kfree(splits_out_nents); + kfree(splits_out); + kfree(split_sizes); + return ret; err_free_elements: list_for_each_entry_safe(el, temp, &sec_req->elements, head) { @@ -854,7 +857,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, crypto_skcipher_ivsize(atfm), DMA_BIDIRECTIONAL); err_unmap_out_sg: - if (skreq->src != skreq->dst) + if (split) sec_unmap_sg_on_err(skreq->dst, steps, splits_out, splits_out_nents, sec_req->len_out, info->dev); diff --git a/drivers/crypto/hisilicon/sec2/Makefile b/drivers/crypto/hisilicon/sec2/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..b4f6cf14be3a4f9e76e3d057ac833f7f3e7c6128 --- /dev/null +++ b/drivers/crypto/hisilicon/sec2/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_CRYPTO_DEV_HISI_SEC2) += hisi_sec2.o +hisi_sec2-objs = sec_main.o sec_crypto.o diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h new file mode 100644 index 0000000000000000000000000000000000000000..450c08d13c7669435282e27b4c7bd1f33eccb341 --- /dev/null +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -0,0 +1,168 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2018-2019 HiSilicon Limited. */ + +#ifndef __HISI_SEC_V2_H +#define __HISI_SEC_V2_H + +#include + +#include "../qm.h" +#include "sec_crypto.h" + +/* Algorithm resource per hardware SEC queue */ +struct sec_alg_res { + u8 *pbuf; + dma_addr_t pbuf_dma; + u8 *c_ivin; + dma_addr_t c_ivin_dma; + u8 *out_mac; + dma_addr_t out_mac_dma; +}; + +/* Cipher request of SEC private */ +struct sec_cipher_req { + struct hisi_acc_hw_sgl *c_in; + dma_addr_t c_in_dma; + struct hisi_acc_hw_sgl *c_out; + dma_addr_t c_out_dma; + u8 *c_ivin; + dma_addr_t c_ivin_dma; + struct skcipher_request *sk_req; + u32 c_len; + bool encrypt; +}; + +/* SEC request of Crypto */ +struct sec_req { + struct sec_sqe sec_sqe; + struct sec_ctx *ctx; + struct sec_qp_ctx *qp_ctx; + + struct sec_cipher_req c_req; + + int err_type; + int req_id; + u32 flag; + + /* Status of the SEC request */ + bool fake_busy; + bool use_pbuf; +}; + +/** + * struct sec_req_op - Operations for SEC request + * @buf_map: DMA map the SGL buffers of the request + * @buf_unmap: DMA unmap the SGL buffers of the request + * @bd_fill: Fill the SEC queue BD + * @bd_send: Send the SEC BD into the hardware queue + * @callback: Call back for the request + * @process: Main processing logic of Skcipher + */ +struct sec_req_op { + int (*buf_map)(struct sec_ctx *ctx, struct sec_req *req); + void (*buf_unmap)(struct sec_ctx *ctx, struct sec_req *req); + void (*do_transfer)(struct sec_ctx *ctx, struct sec_req *req); + int (*bd_fill)(struct sec_ctx *ctx, struct sec_req *req); + int (*bd_send)(struct sec_ctx *ctx, struct sec_req *req); + void (*callback)(struct sec_ctx *ctx, struct sec_req *req, int err); + int (*process)(struct sec_ctx *ctx, struct sec_req *req); +}; + +/* SEC cipher context which cipher's relatives */ +struct sec_cipher_ctx { + u8 *c_key; + dma_addr_t c_key_dma; + sector_t iv_offset; + u32 c_gran_size; + u32 ivsize; + u8 c_mode; + u8 c_alg; + u8 c_key_len; +}; + +/* SEC queue context which defines queue's relatives */ +struct sec_qp_ctx { + struct hisi_qp *qp; + struct sec_req *req_list[QM_Q_DEPTH]; + struct idr req_idr; + struct sec_alg_res res[QM_Q_DEPTH]; + struct sec_ctx *ctx; + spinlock_t req_lock; + struct hisi_acc_sgl_pool *c_in_pool; + struct hisi_acc_sgl_pool *c_out_pool; +}; + +enum sec_alg_type { + SEC_SKCIPHER, + SEC_AEAD +}; + +/* SEC Crypto TFM context which defines queue and cipher .etc relatives */ +struct sec_ctx { + struct sec_qp_ctx *qp_ctx; + struct sec_dev *sec; + const struct sec_req_op *req_op; + struct hisi_qp **qps; + + /* Half queues for encipher, and half for decipher */ + u32 hlf_q_num; + + /* Threshold for fake busy, trigger to return -EBUSY to user */ + u32 fake_req_limit; + + /* Currrent cyclic index to select a queue for encipher */ + atomic_t enc_qcyclic; + + /* Currrent cyclic index to select a queue for decipher */ + atomic_t dec_qcyclic; + + enum sec_alg_type alg_type; + bool pbuf_supported; + struct sec_cipher_ctx c_ctx; +}; + +enum sec_endian { + SEC_LE = 0, + SEC_32BE, + SEC_64BE +}; + +enum sec_debug_file_index { + SEC_CURRENT_QM, + SEC_CLEAR_ENABLE, + SEC_DEBUG_FILE_NUM, +}; + +struct sec_debug_file { + enum sec_debug_file_index index; + spinlock_t lock; + struct hisi_qm *qm; +}; + +struct sec_dfx { + atomic64_t send_cnt; + atomic64_t recv_cnt; + atomic64_t send_busy_cnt; + atomic64_t recv_busy_cnt; + atomic64_t err_bd_cnt; + atomic64_t invalid_req_cnt; + atomic64_t done_flag_cnt; +}; + +struct sec_debug { + struct sec_dfx dfx; + struct sec_debug_file files[SEC_DEBUG_FILE_NUM]; +}; + +struct sec_dev { + struct hisi_qm qm; + struct sec_debug debug; + u32 ctx_q_num; + bool iommu_used; +}; + +void sec_destroy_qps(struct hisi_qp **qps, int qp_num); +struct hisi_qp **sec_create_qps(void); +int sec_register_to_crypto(void); +void sec_unregister_from_crypto(void); +#endif diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c new file mode 100644 index 0000000000000000000000000000000000000000..c08812be6540b9674d08f3f6ab0e2cd4f845c79e --- /dev/null +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -0,0 +1,1084 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 HiSilicon Limited. */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sec.h" +#include "sec_crypto.h" + +#define SEC_PRIORITY 4001 +#define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE) +#define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE) +#define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE) +#define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE) + +/* SEC sqe(bd) bit operational relative MACRO */ +#define SEC_DE_OFFSET 1 +#define SEC_CI_GEN_OFFSET 6 +#define SEC_CIPHER_OFFSET 4 +#define SEC_SCENE_OFFSET 3 +#define SEC_DST_SGL_OFFSET 2 +#define SEC_SRC_SGL_OFFSET 7 +#define SEC_CKEY_OFFSET 9 +#define SEC_CMODE_OFFSET 12 +#define SEC_AKEY_OFFSET 5 +#define SEC_AEAD_ALG_OFFSET 11 +#define SEC_AUTH_OFFSET 6 + +#define SEC_FLAG_OFFSET 7 +#define SEC_FLAG_MASK 0x0780 +#define SEC_TYPE_MASK 0x0F +#define SEC_DONE_MASK 0x0001 + +#define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH) +#define SEC_SGL_SGE_NR 128 +#define SEC_CTX_DEV(ctx) (&(ctx)->sec->qm.pdev->dev) +#define SEC_CIPHER_AUTH 0xfe +#define SEC_AUTH_CIPHER 0x1 +#define SEC_MAX_MAC_LEN 64 +#define SEC_MAX_AAD_LEN 65535 +#define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH) + +#define SEC_PBUF_SZ 512 +#define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ +#define SEC_PBUF_MAC_OFFSET (SEC_PBUF_SZ + SEC_IV_SIZE) +#define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \ + SEC_MAX_MAC_LEN * 2) +#define SEC_PBUF_NUM (PAGE_SIZE / SEC_PBUF_PKG) +#define SEC_PBUF_PAGE_NUM (QM_Q_DEPTH / SEC_PBUF_NUM) +#define SEC_PBUF_LEFT_SZ (SEC_PBUF_PKG * (QM_Q_DEPTH - \ + SEC_PBUF_PAGE_NUM * SEC_PBUF_NUM)) +#define SEC_TOTAL_PBUF_SZ (PAGE_SIZE * SEC_PBUF_PAGE_NUM + \ + SEC_PBUF_LEFT_SZ) + +#define SEC_SQE_LEN_RATE 4 +#define SEC_SQE_CFLAG 2 +#define SEC_SQE_AEAD_FLAG 3 +#define SEC_SQE_DONE 0x1 + +static atomic_t sec_active_devs; + +/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */ +static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req) +{ + if (req->c_req.encrypt) + return atomic_inc_return(&ctx->enc_qcyclic) % ctx->hlf_q_num; + + return atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num + + ctx->hlf_q_num; +} + +static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req) +{ + if (req->c_req.encrypt) + atomic_dec(&ctx->enc_qcyclic); + else + atomic_dec(&ctx->dec_qcyclic); +} + +static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx) +{ + int req_id; + + spin_lock_bh(&qp_ctx->req_lock); + + req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, + 0, QM_Q_DEPTH, GFP_ATOMIC); + spin_unlock_bh(&qp_ctx->req_lock); + if (unlikely(req_id < 0)) { + dev_err(SEC_CTX_DEV(req->ctx), "alloc req id fail!\n"); + return req_id; + } + + req->qp_ctx = qp_ctx; + qp_ctx->req_list[req_id] = req; + return req_id; +} + +static void sec_free_req_id(struct sec_req *req) +{ + struct sec_qp_ctx *qp_ctx = req->qp_ctx; + int req_id = req->req_id; + + if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) { + dev_err(SEC_CTX_DEV(req->ctx), "free request id invalid!\n"); + return; + } + + qp_ctx->req_list[req_id] = NULL; + req->qp_ctx = NULL; + + spin_lock_bh(&qp_ctx->req_lock); + idr_remove(&qp_ctx->req_idr, req_id); + spin_unlock_bh(&qp_ctx->req_lock); +} + +static void sec_req_cb(struct hisi_qp *qp, void *resp) +{ + struct sec_qp_ctx *qp_ctx = qp->qp_ctx; + struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx; + struct sec_sqe *bd = resp; + struct sec_ctx *ctx; + struct sec_req *req; + u16 done, flag; + int err = 0; + u8 type; + + type = bd->type_cipher_auth & SEC_TYPE_MASK; + if (unlikely(type != SEC_BD_TYPE2)) { + atomic64_inc(&dfx->err_bd_cnt); + pr_err("err bd type [%d]\n", type); + return; + } + + req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)]; + if (unlikely(!req)) { + atomic64_inc(&dfx->invalid_req_cnt); + atomic_inc(&qp->qp_status.used); + return; + } + + req->err_type = bd->type2.error_type; + ctx = req->ctx; + done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK; + flag = (le16_to_cpu(bd->type2.done_flag) & + SEC_FLAG_MASK) >> SEC_FLAG_OFFSET; + if (unlikely(req->err_type || done != SEC_SQE_DONE || + (ctx->alg_type == SEC_SKCIPHER && flag != SEC_SQE_CFLAG))) { + dev_err_ratelimited(SEC_CTX_DEV(ctx), + "err_type[%d],done[%d],flag[%d]\n", + req->err_type, done, flag); + err = -EIO; + atomic64_inc(&dfx->done_flag_cnt); + } + + atomic64_inc(&dfx->recv_cnt); + + ctx->req_op->buf_unmap(ctx, req); + + ctx->req_op->callback(ctx, req, err); +} + +static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) +{ + struct sec_qp_ctx *qp_ctx = req->qp_ctx; + int ret; + + if (ctx->fake_req_limit <= + atomic_read(&qp_ctx->qp->qp_status.used) && + !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)) + return -EBUSY; + + spin_lock_bh(&qp_ctx->req_lock); + ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe); + if (ctx->fake_req_limit <= + atomic_read(&qp_ctx->qp->qp_status.used) && !ret) { + req->fake_busy = true; + spin_unlock_bh(&qp_ctx->req_lock); + atomic64_inc(&ctx->sec->debug.dfx.send_cnt); + atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt); + return -EBUSY; + } + spin_unlock_bh(&qp_ctx->req_lock); + + if (unlikely(ret == -EBUSY)) + return -ENOBUFS; + + if (likely(!ret)) { + ret = -EINPROGRESS; + atomic64_inc(&ctx->sec->debug.dfx.send_cnt); + } + + return ret; +} + +/* Get DMA memory resources */ +static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res) +{ + int i; + + res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ, + &res->c_ivin_dma, GFP_KERNEL); + if (!res->c_ivin) + return -ENOMEM; + + for (i = 1; i < QM_Q_DEPTH; i++) { + res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE; + res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE; + } + + return 0; +} + +static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res) +{ + if (res->c_ivin) + dma_free_coherent(dev, SEC_TOTAL_IV_SZ, + res->c_ivin, res->c_ivin_dma); +} + +static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res) +{ + if (res->pbuf) + dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ, + res->pbuf, res->pbuf_dma); +} + +/* + * To improve performance, pbuffer is used for + * small packets (< 512Bytes) as IOMMU translation using. + */ +static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res) +{ + int pbuf_page_offset; + int i, j, k; + + res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ, + &res->pbuf_dma, GFP_KERNEL); + if (!res->pbuf) + return -ENOMEM; + + /* + * SEC_PBUF_PKG contains data pbuf, iv and + * out_mac : + * Every PAGE contains six SEC_PBUF_PKG + * The sec_qp_ctx contains QM_Q_DEPTH numbers of SEC_PBUF_PKG + * So we need SEC_PBUF_PAGE_NUM numbers of PAGE + * for the SEC_TOTAL_PBUF_SZ + */ + for (i = 0; i <= SEC_PBUF_PAGE_NUM; i++) { + pbuf_page_offset = PAGE_SIZE * i; + for (j = 0; j < SEC_PBUF_NUM; j++) { + k = i * SEC_PBUF_NUM + j; + if (k == QM_Q_DEPTH) + break; + res[k].pbuf = res->pbuf + + j * SEC_PBUF_PKG + pbuf_page_offset; + res[k].pbuf_dma = res->pbuf_dma + + j * SEC_PBUF_PKG + pbuf_page_offset; + } + } + return 0; +} + +static int sec_alg_resource_alloc(struct sec_ctx *ctx, + struct sec_qp_ctx *qp_ctx) +{ + struct device *dev = SEC_CTX_DEV(ctx); + struct sec_alg_res *res = qp_ctx->res; + int ret; + + ret = sec_alloc_civ_resource(dev, res); + if (ret) + return ret; + + if (ctx->pbuf_supported) { + ret = sec_alloc_pbuf_resource(dev, res); + if (ret) { + dev_err(dev, "fail to alloc pbuf dma resource!\n"); + goto alloc_fail; + } + } + + return 0; +alloc_fail: + sec_free_civ_resource(dev, res); + + return ret; +} + +static void sec_alg_resource_free(struct sec_ctx *ctx, + struct sec_qp_ctx *qp_ctx) +{ + struct device *dev = SEC_CTX_DEV(ctx); + + sec_free_civ_resource(dev, qp_ctx->res); + + if (ctx->pbuf_supported) + sec_free_pbuf_resource(dev, qp_ctx->res); +} + +static int sec_create_qp_ctx(struct sec_ctx *ctx, int qp_ctx_id, int alg_type) +{ + struct device *dev = SEC_CTX_DEV(ctx); + struct sec_qp_ctx *qp_ctx; + struct hisi_qp *qp; + int ret = -ENOMEM; + + qp_ctx = &ctx->qp_ctx[qp_ctx_id]; + qp = ctx->qps[qp_ctx_id]; + qp->req_type = 0; + qp->qp_ctx = qp_ctx; + qp->req_cb = sec_req_cb; + qp_ctx->qp = qp; + qp_ctx->ctx = ctx; + + spin_lock_init(&qp_ctx->req_lock); + idr_init(&qp_ctx->req_idr); + + qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH, + SEC_SGL_SGE_NR); + if (IS_ERR(qp_ctx->c_in_pool)) { + dev_err(dev, "fail to create sgl pool for input!\n"); + goto err_destroy_idr; + } + + qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH, + SEC_SGL_SGE_NR); + if (IS_ERR(qp_ctx->c_out_pool)) { + dev_err(dev, "fail to create sgl pool for output!\n"); + goto err_free_c_in_pool; + } + + ret = sec_alg_resource_alloc(ctx, qp_ctx); + if (ret) + goto err_free_c_out_pool; + + ret = hisi_qm_start_qp(qp, 0); + if (ret < 0) + goto err_queue_free; + + return 0; + +err_queue_free: + sec_alg_resource_free(ctx, qp_ctx); +err_free_c_out_pool: + hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool); +err_free_c_in_pool: + hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool); +err_destroy_idr: + idr_destroy(&qp_ctx->req_idr); + + return ret; +} + +static void sec_release_qp_ctx(struct sec_ctx *ctx, + struct sec_qp_ctx *qp_ctx) +{ + struct device *dev = SEC_CTX_DEV(ctx); + + hisi_qm_stop_qp(qp_ctx->qp); + sec_alg_resource_free(ctx, qp_ctx); + + hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool); + hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool); + + idr_destroy(&qp_ctx->req_idr); +} + +static int sec_ctx_base_init(struct sec_ctx *ctx) +{ + struct sec_dev *sec; + int i, ret; + + ctx->qps = sec_create_qps(); + if (!ctx->qps) { + pr_err("Can not create sec qps!\n"); + return -ENODEV; + } + + sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm); + ctx->sec = sec; + ctx->hlf_q_num = sec->ctx_q_num >> 1; + + ctx->pbuf_supported = ctx->sec->iommu_used; + + /* Half of queue depth is taken as fake requests limit in the queue. */ + ctx->fake_req_limit = QM_Q_DEPTH >> 1; + ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx), + GFP_KERNEL); + if (!ctx->qp_ctx) + return -ENOMEM; + + for (i = 0; i < sec->ctx_q_num; i++) { + ret = sec_create_qp_ctx(ctx, i, 0); + if (ret) + goto err_sec_release_qp_ctx; + } + return 0; +err_sec_release_qp_ctx: + for (i = i - 1; i >= 0; i--) + sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]); + + sec_destroy_qps(ctx->qps, sec->ctx_q_num); + kfree(ctx->qp_ctx); + + return ret; +} + +static void sec_ctx_base_uninit(struct sec_ctx *ctx) +{ + int i; + + for (i = 0; i < ctx->sec->ctx_q_num; i++) + sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]); + + sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num); + kfree(ctx->qp_ctx); +} + +static int sec_cipher_init(struct sec_ctx *ctx) +{ + struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; + + c_ctx->c_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, + &c_ctx->c_key_dma, GFP_KERNEL); + if (!c_ctx->c_key) + return -ENOMEM; + + return 0; +} + +static void sec_cipher_uninit(struct sec_ctx *ctx) +{ + struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; + + memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE); + dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, + c_ctx->c_key, c_ctx->c_key_dma); +} + +static int sec_skcipher_init(struct crypto_skcipher *tfm) +{ + struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); + int ret; + + ctx->alg_type = SEC_SKCIPHER; + crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req)); + ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm); + if (ctx->c_ctx.ivsize > SEC_IV_SIZE) { + dev_err(SEC_CTX_DEV(ctx), "get error skcipher iv size!\n"); + return -EINVAL; + } + + ret = sec_ctx_base_init(ctx); + if (ret) + return ret; + + ret = sec_cipher_init(ctx); + if (ret) + goto err_cipher_init; + + return 0; +err_cipher_init: + sec_ctx_base_uninit(ctx); + + return ret; +} + +static void sec_skcipher_uninit(struct crypto_skcipher *tfm) +{ + struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); + + sec_cipher_uninit(ctx); + sec_ctx_base_uninit(ctx); +} + +static int sec_skcipher_3des_setkey(struct sec_cipher_ctx *c_ctx, + const u32 keylen, + const enum sec_cmode c_mode) +{ + switch (keylen) { + case SEC_DES3_2KEY_SIZE: + c_ctx->c_key_len = SEC_CKEY_3DES_2KEY; + break; + case SEC_DES3_3KEY_SIZE: + c_ctx->c_key_len = SEC_CKEY_3DES_3KEY; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx, + const u32 keylen, + const enum sec_cmode c_mode) +{ + if (c_mode == SEC_CMODE_XTS) { + switch (keylen) { + case SEC_XTS_MIN_KEY_SIZE: + c_ctx->c_key_len = SEC_CKEY_128BIT; + break; + case SEC_XTS_MAX_KEY_SIZE: + c_ctx->c_key_len = SEC_CKEY_256BIT; + break; + default: + pr_err("hisi_sec2: xts mode key error!\n"); + return -EINVAL; + } + } else { + switch (keylen) { + case AES_KEYSIZE_128: + c_ctx->c_key_len = SEC_CKEY_128BIT; + break; + case AES_KEYSIZE_192: + c_ctx->c_key_len = SEC_CKEY_192BIT; + break; + case AES_KEYSIZE_256: + c_ctx->c_key_len = SEC_CKEY_256BIT; + break; + default: + pr_err("hisi_sec2: aes key error!\n"); + return -EINVAL; + } + } + + return 0; +} + +static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, + const u32 keylen, const enum sec_calg c_alg, + const enum sec_cmode c_mode) +{ + struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); + struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; + int ret; + + if (c_mode == SEC_CMODE_XTS) { + ret = xts_verify_key(tfm, key, keylen); + if (ret) { + dev_err(SEC_CTX_DEV(ctx), "xts mode key err!\n"); + return ret; + } + } + + c_ctx->c_alg = c_alg; + c_ctx->c_mode = c_mode; + + switch (c_alg) { + case SEC_CALG_3DES: + ret = sec_skcipher_3des_setkey(c_ctx, keylen, c_mode); + break; + case SEC_CALG_AES: + case SEC_CALG_SM4: + ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode); + break; + default: + return -EINVAL; + } + + if (ret) { + dev_err(SEC_CTX_DEV(ctx), "set sec key err!\n"); + return ret; + } + + memcpy(c_ctx->c_key, key, keylen); + + return 0; +} + +#define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode) \ +static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\ + u32 keylen) \ +{ \ + return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode); \ +} + +GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB) +GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC) +GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS) + +GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB) +GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC) + +GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS) +GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC) + +static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req, + struct scatterlist *src) +{ + struct sec_cipher_req *c_req = &req->c_req; + struct sec_qp_ctx *qp_ctx = req->qp_ctx; + struct device *dev = SEC_CTX_DEV(ctx); + int copy_size, pbuf_length; + int req_id = req->req_id; + + copy_size = c_req->c_len; + + pbuf_length = sg_copy_to_buffer(src, sg_nents(src), + qp_ctx->res[req_id].pbuf, + copy_size); + + if (unlikely(pbuf_length != copy_size)) { + dev_err(dev, "copy src data to pbuf error!\n"); + return -EINVAL; + } + + c_req->c_in_dma = qp_ctx->res[req_id].pbuf_dma; + if (!c_req->c_in_dma) { + dev_err(dev, "fail to set pbuffer address!\n"); + return -ENOMEM; + } + + c_req->c_out_dma = c_req->c_in_dma; + + return 0; +} + +static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req, + struct scatterlist *dst) +{ + struct sec_cipher_req *c_req = &req->c_req; + struct sec_qp_ctx *qp_ctx = req->qp_ctx; + struct device *dev = SEC_CTX_DEV(ctx); + int copy_size, pbuf_length; + int req_id = req->req_id; + + copy_size = c_req->c_len; + + pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst), + qp_ctx->res[req_id].pbuf, + copy_size); + + if (unlikely(pbuf_length != copy_size)) + dev_err(dev, "copy pbuf data to dst error!\n"); +} + +static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req, + struct scatterlist *src, struct scatterlist *dst) +{ + struct sec_cipher_req *c_req = &req->c_req; + struct sec_qp_ctx *qp_ctx = req->qp_ctx; + struct sec_alg_res *res = &qp_ctx->res[req->req_id]; + struct device *dev = SEC_CTX_DEV(ctx); + int ret; + + if (req->use_pbuf) { + ret = sec_cipher_pbuf_map(ctx, req, src); + c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET; + c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET; + + return ret; + } + c_req->c_ivin = res->c_ivin; + c_req->c_ivin_dma = res->c_ivin_dma; + + c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src, + qp_ctx->c_in_pool, + req->req_id, + &c_req->c_in_dma); + + if (IS_ERR(c_req->c_in)) { + dev_err(dev, "fail to dma map input sgl buffers!\n"); + return PTR_ERR(c_req->c_in); + } + + if (dst == src) { + c_req->c_out = c_req->c_in; + c_req->c_out_dma = c_req->c_in_dma; + } else { + c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst, + qp_ctx->c_out_pool, + req->req_id, + &c_req->c_out_dma); + + if (IS_ERR(c_req->c_out)) { + dev_err(dev, "fail to dma map output sgl buffers!\n"); + hisi_acc_sg_buf_unmap(dev, src, c_req->c_in); + return PTR_ERR(c_req->c_out); + } + } + + return 0; +} + +static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req, + struct scatterlist *src, struct scatterlist *dst) +{ + struct sec_cipher_req *c_req = &req->c_req; + struct device *dev = SEC_CTX_DEV(ctx); + + if (req->use_pbuf) { + sec_cipher_pbuf_unmap(ctx, req, dst); + } else { + if (dst != src) + hisi_acc_sg_buf_unmap(dev, src, c_req->c_in); + + hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out); + } +} + +static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req) +{ + struct skcipher_request *sq = req->c_req.sk_req; + + return sec_cipher_map(ctx, req, sq->src, sq->dst); +} + +static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req) +{ + struct skcipher_request *sq = req->c_req.sk_req; + + sec_cipher_unmap(ctx, req, sq->src, sq->dst); +} + +static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req) +{ + int ret; + + ret = ctx->req_op->buf_map(ctx, req); + if (unlikely(ret)) + return ret; + + ctx->req_op->do_transfer(ctx, req); + + ret = ctx->req_op->bd_fill(ctx, req); + if (unlikely(ret)) + goto unmap_req_buf; + + return ret; + +unmap_req_buf: + ctx->req_op->buf_unmap(ctx, req); + + return ret; +} + +static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req) +{ + ctx->req_op->buf_unmap(ctx, req); +} + +static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req) +{ + struct skcipher_request *sk_req = req->c_req.sk_req; + struct sec_cipher_req *c_req = &req->c_req; + + memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize); +} + +static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req) +{ + struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; + struct sec_cipher_req *c_req = &req->c_req; + struct sec_sqe *sec_sqe = &req->sec_sqe; + u8 scene, sa_type, da_type; + u8 bd_type, cipher; + u8 de = 0; + + memset(sec_sqe, 0, sizeof(struct sec_sqe)); + + sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma); + sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma); + sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma); + sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma); + + sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) << + SEC_CMODE_OFFSET); + sec_sqe->type2.c_alg = c_ctx->c_alg; + sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) << + SEC_CKEY_OFFSET); + + bd_type = SEC_BD_TYPE2; + if (c_req->encrypt) + cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET; + else + cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET; + sec_sqe->type_cipher_auth = bd_type | cipher; + + if (req->use_pbuf) + sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET; + else + sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET; + scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET; + if (c_req->c_in_dma != c_req->c_out_dma) + de = 0x1 << SEC_DE_OFFSET; + + sec_sqe->sds_sa_type = (de | scene | sa_type); + + /* Just set DST address type */ + if (req->use_pbuf) + da_type = SEC_PBUF << SEC_DST_SGL_OFFSET; + else + da_type = SEC_SGL << SEC_DST_SGL_OFFSET; + sec_sqe->sdm_addr_type |= da_type; + + sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len); + sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id); + + return 0; +} + +static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type) +{ + struct skcipher_request *sk_req = req->c_req.sk_req; + u32 iv_size = req->ctx->c_ctx.ivsize; + struct scatterlist *sgl; + unsigned int cryptlen; + size_t sz; + u8 *iv; + + if (req->c_req.encrypt) + sgl = sk_req->dst; + else + sgl = sk_req->src; + + iv = sk_req->iv; + cryptlen = sk_req->cryptlen; + + sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size, + cryptlen - iv_size); + if (unlikely(sz != iv_size)) + dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n"); +} + +static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req, + int err) +{ + struct skcipher_request *sk_req = req->c_req.sk_req; + + sec_free_req_id(req); + + /* IV output at encrypto of CBC mode */ + if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt) + sec_update_iv(req, SEC_SKCIPHER); + + if (req->fake_busy) { + sk_req->base.complete(&sk_req->base, -EINPROGRESS); + atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt); + } + + sk_req->base.complete(&sk_req->base, err); +} + +static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req) +{ + sec_free_req_id(req); + sec_free_queue_id(ctx, req); +} + +static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req) +{ + struct sec_qp_ctx *qp_ctx; + int queue_id; + + /* To load balance */ + queue_id = sec_alloc_queue_id(ctx, req); + qp_ctx = &ctx->qp_ctx[queue_id]; + + req->req_id = sec_alloc_req_id(req, qp_ctx); + if (unlikely(req->req_id < 0)) { + sec_free_queue_id(ctx, req); + return req->req_id; + } + + return 0; +} + +static int sec_process(struct sec_ctx *ctx, struct sec_req *req) +{ + struct sec_cipher_req *c_req = &req->c_req; + int ret; + + ret = sec_request_init(ctx, req); + if (unlikely(ret)) + return ret; + + ret = sec_request_transfer(ctx, req); + if (unlikely(ret)) + goto err_uninit_req; + + /* Output IV as decrypto */ + if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) + sec_update_iv(req, ctx->alg_type); + + ret = ctx->req_op->bd_send(ctx, req); + if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) || + (ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) { + dev_err_ratelimited(SEC_CTX_DEV(ctx), + "send sec request failed!\n"); + goto err_send_req; + } + + return ret; + +err_send_req: + /* As failing, restore the IV from user */ + if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) { + if (ctx->alg_type == SEC_SKCIPHER) + memcpy(req->c_req.sk_req->iv, c_req->c_ivin, + ctx->c_ctx.ivsize); + } + + sec_request_untransfer(ctx, req); +err_uninit_req: + sec_request_uninit(ctx, req); + + return ret; +} + +static const struct sec_req_op sec_skcipher_req_ops = { + .buf_map = sec_skcipher_sgl_map, + .buf_unmap = sec_skcipher_sgl_unmap, + .do_transfer = sec_skcipher_copy_iv, + .bd_fill = sec_skcipher_bd_fill, + .bd_send = sec_bd_send, + .callback = sec_skcipher_callback, + .process = sec_process, +}; + +static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm) +{ + struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); + + ctx->req_op = &sec_skcipher_req_ops; + + return sec_skcipher_init(tfm); +} + +static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm) +{ + sec_skcipher_uninit(tfm); +} + +static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq) +{ + struct skcipher_request *sk_req = sreq->c_req.sk_req; + struct device *dev = SEC_CTX_DEV(ctx); + u8 c_alg = ctx->c_ctx.c_alg; + + if (unlikely(!sk_req->src || !sk_req->dst)) { + dev_err(dev, "skcipher input param error!\n"); + return -EINVAL; + } + sreq->c_req.c_len = sk_req->cryptlen; + + if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ) + sreq->use_pbuf = true; + else + sreq->use_pbuf = false; + + if (c_alg == SEC_CALG_3DES) { + if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) { + dev_err(dev, "skcipher 3des input length error!\n"); + return -EINVAL; + } + return 0; + } else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) { + if (unlikely(sk_req->cryptlen & (AES_BLOCK_SIZE - 1))) { + dev_err(dev, "skcipher aes input length error!\n"); + return -EINVAL; + } + return 0; + } + + dev_err(dev, "skcipher algorithm error!\n"); + return -EINVAL; +} + +static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req); + struct sec_req *req = skcipher_request_ctx(sk_req); + struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); + int ret; + + if (!sk_req->cryptlen) + return 0; + + req->flag = sk_req->base.flags; + req->c_req.sk_req = sk_req; + req->c_req.encrypt = encrypt; + req->ctx = ctx; + req->fake_busy = false; + + ret = sec_skcipher_param_check(ctx, req); + if (unlikely(ret)) + return -EINVAL; + + return ctx->req_op->process(ctx, req); +} + +static int sec_skcipher_encrypt(struct skcipher_request *sk_req) +{ + return sec_skcipher_crypto(sk_req, true); +} + +static int sec_skcipher_decrypt(struct skcipher_request *sk_req) +{ + return sec_skcipher_crypto(sk_req, false); +} + +#define SEC_SKCIPHER_GEN_ALG(sec_cra_name, sec_set_key, sec_min_key_size, \ + sec_max_key_size, ctx_init, ctx_exit, blk_size, iv_size)\ +{\ + .base = {\ + .cra_name = sec_cra_name,\ + .cra_driver_name = "hisi_sec_"sec_cra_name,\ + .cra_priority = SEC_PRIORITY,\ + .cra_flags = CRYPTO_ALG_ASYNC,\ + .cra_blocksize = blk_size,\ + .cra_ctxsize = sizeof(struct sec_ctx),\ + .cra_module = THIS_MODULE,\ + },\ + .init = ctx_init,\ + .exit = ctx_exit,\ + .setkey = sec_set_key,\ + .decrypt = sec_skcipher_decrypt,\ + .encrypt = sec_skcipher_encrypt,\ + .min_keysize = sec_min_key_size,\ + .max_keysize = sec_max_key_size,\ + .ivsize = iv_size,\ +}, + +#define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \ + max_key_size, blk_size, iv_size) \ + SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \ + sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size) + +static struct skcipher_alg sec_skciphers[] = { + SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb, + AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, + AES_BLOCK_SIZE, 0) + + SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc, + AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, + AES_BLOCK_SIZE, AES_BLOCK_SIZE) + + SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts, + SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE, + AES_BLOCK_SIZE, AES_BLOCK_SIZE) + + SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb, + SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE, + DES3_EDE_BLOCK_SIZE, 0) + + SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc, + SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE, + DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE) + + SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts, + SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE, + AES_BLOCK_SIZE, AES_BLOCK_SIZE) + + SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc, + AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, + AES_BLOCK_SIZE, AES_BLOCK_SIZE) + +}; + +int sec_register_to_crypto(void) +{ + /* To avoid repeat register */ + if (atomic_add_return(1, &sec_active_devs) == 1) + return crypto_register_skciphers(sec_skciphers, + ARRAY_SIZE(sec_skciphers)); + + return 0; +} + +void sec_unregister_from_crypto(void) +{ + if (atomic_sub_return(1, &sec_active_devs) == 0) + crypto_unregister_skciphers(sec_skciphers, + ARRAY_SIZE(sec_skciphers)); +} diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h new file mode 100644 index 0000000000000000000000000000000000000000..221257ef371f37195d33eaad6852fe73b28d7499 --- /dev/null +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h @@ -0,0 +1,238 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2018-2019 HiSilicon Limited. */ + +#ifndef __HISI_SEC_V2_CRYPTO_H +#define __HISI_SEC_V2_CRYPTO_H + +#define SEC_IV_SIZE 24 +#define SEC_MAX_KEY_SIZE 64 +#define SEC_MAX_AUTH_KEY_SIZE 64 + +#define SEC_COMM_SCENE 0 + +enum sec_calg { + SEC_CALG_3DES = 0x1, + SEC_CALG_AES = 0x2, + SEC_CALG_SM4 = 0x3, +}; + +enum sec_hash_alg { + SEC_A_HMAC_SHA1 = 0x10, + SEC_A_HMAC_SHA256 = 0x11, + SEC_A_HMAC_SHA512 = 0x15, +}; + +enum sec_mac_len { + SEC_HMAC_SHA1_MAC = 20, + SEC_HMAC_SHA256_MAC = 32, + SEC_HMAC_SHA512_MAC = 64, +}; + +enum sec_cmode { + SEC_CMODE_ECB = 0x0, + SEC_CMODE_CBC = 0x1, + SEC_CMODE_CTR = 0x4, + SEC_CMODE_XTS = 0x7, +}; + +enum sec_ckey_type { + SEC_CKEY_128BIT = 0x0, + SEC_CKEY_192BIT = 0x1, + SEC_CKEY_256BIT = 0x2, + SEC_CKEY_3DES_3KEY = 0x1, + SEC_CKEY_3DES_2KEY = 0x3, +}; + +enum sec_bd_type { + SEC_BD_TYPE1 = 0x1, + SEC_BD_TYPE2 = 0x2, +}; + +enum sec_auth { + SEC_NO_AUTH = 0x0, + SEC_AUTH_TYPE1 = 0x1, + SEC_AUTH_TYPE2 = 0x2, +}; + +enum sec_cipher_dir { + SEC_CIPHER_ENC = 0x1, + SEC_CIPHER_DEC = 0x2, +}; + +enum sec_addr_type { + SEC_PBUF = 0x0, + SEC_SGL = 0x1, + SEC_PRP = 0x2, +}; + +enum sec_ci_gen { + SEC_CI_GEN_BY_ADDR = 0x0, + SEC_CI_GEN_BY_LBA = 0X3, +}; + +enum sec_scene { + SEC_SCENE_IPSEC = 0x1, + SEC_SCENE_STORAGE = 0x5, +}; + +enum sec_work_mode { + SEC_NO_FUSION = 0x0, + SEC_IV_FUSION = 0x1, + SEC_FUSION_BUTT +}; + +enum sec_req_ops_type { + SEC_OPS_SKCIPHER_ALG = 0x0, + SEC_OPS_DMCRYPT = 0x1, + SEC_OPS_MULTI_IV = 0x2, + SEC_OPS_BUTT +}; + +struct sec_sqe_type2 { + /* + * mac_len: 0~4 bits + * a_key_len: 5~10 bits + * a_alg: 11~16 bits + */ + __le32 mac_key_alg; + + /* + * c_icv_len: 0~5 bits + * c_width: 6~8 bits + * c_key_len: 9~11 bits + * c_mode: 12~15 bits + */ + __le16 icvw_kmode; + + /* c_alg: 0~3 bits */ + __u8 c_alg; + __u8 rsvd4; + + /* + * a_len: 0~23 bits + * iv_offset_l: 24~31 bits + */ + __le32 alen_ivllen; + + /* + * c_len: 0~23 bits + * iv_offset_h: 24~31 bits + */ + __le32 clen_ivhlen; + + __le16 auth_src_offset; + __le16 cipher_src_offset; + __le16 cs_ip_header_offset; + __le16 cs_udp_header_offset; + __le16 pass_word_len; + __le16 dk_len; + __u8 salt3; + __u8 salt2; + __u8 salt1; + __u8 salt0; + + __le16 tag; + __le16 rsvd5; + + /* + * c_pad_type: 0~3 bits + * c_pad_len: 4~11 bits + * c_pad_data_type: 12~15 bits + */ + __le16 cph_pad; + + /* c_pad_len_field: 0~1 bits */ + __le16 c_pad_len_field; + + __le64 long_a_data_len; + __le64 a_ivin_addr; + __le64 a_key_addr; + __le64 mac_addr; + __le64 c_ivin_addr; + __le64 c_key_addr; + + __le64 data_src_addr; + __le64 data_dst_addr; + + /* + * done: 0 bit + * icv: 1~3 bits + * csc: 4~6 bits + * flag: 7-10 bits + * dif_check: 11~13 bits + */ + __le16 done_flag; + + __u8 error_type; + __u8 warning_type; + __u8 mac_i3; + __u8 mac_i2; + __u8 mac_i1; + __u8 mac_i0; + __le16 check_sum_i; + __u8 tls_pad_len_i; + __u8 rsvd12; + __le32 counter; +}; + +struct sec_sqe { + /* + * type: 0~3 bits + * cipher: 4~5 bits + * auth: 6~7 bit s + */ + __u8 type_cipher_auth; + + /* + * seq: 0 bit + * de: 1~2 bits + * scene: 3~6 bits + * src_addr_type: ~7 bit, with sdm_addr_type 0-1 bits + */ + __u8 sds_sa_type; + + /* + * src_addr_type: 0~1 bits, not used now, + * if support PRP, set this field, or set zero. + * dst_addr_type: 2~4 bits + * mac_addr_type: 5~7 bits + */ + __u8 sdm_addr_type; + __u8 rsvd0; + + /* + * nonce_len(type2): 0~3 bits + * huk(type2): 4 bit + * key_s(type2): 5 bit + * ci_gen: 6~7 bits + */ + __u8 huk_key_ci; + + /* + * ai_gen: 0~1 bits + * a_pad(type2): 2~3 bits + * c_s(type2): 4~5 bits + */ + __u8 ai_apd_cs; + + /* + * rhf(type2): 0 bit + * c_key_type: 1~2 bits + * a_key_type: 3~4 bits + * write_frame_len(type2): 5~7 bits + */ + __u8 rca_key_frm; + + /* + * cal_iv_addr_en(type2): 0 bit + * tls_up(type2): 1 bit + * inveld: 7 bit + */ + __u8 iv_tls_ld; + + struct sec_sqe_type2 type2; +}; + +int sec_register_to_crypto(void); +void sec_unregister_from_crypto(void); +#endif diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c new file mode 100644 index 0000000000000000000000000000000000000000..438f0a206a1b1cfaac18e443140d06ca81d51b1b --- /dev/null +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -0,0 +1,1014 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2018-2019 HiSilicon Limited. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sec.h" + +#define SEC_QUEUE_NUM_V1 4096 +#define SEC_QUEUE_NUM_V2 1024 +#define SEC_PF_PCI_DEVICE_ID 0xa255 +#define SEC_VF_PCI_DEVICE_ID 0xa256 + +#define SEC_BD_ERR_CHK_EN0 0xEFFFFFFF +#define SEC_BD_ERR_CHK_EN1 0x7ffff7fd +#define SEC_BD_ERR_CHK_EN3 0xffffbfff + +#define SEC_SQE_SIZE 128 +#define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH) +#define SEC_PF_DEF_Q_NUM 64 +#define SEC_PF_DEF_Q_BASE 0 +#define SEC_CTX_Q_NUM_DEF 24 +#define SEC_CTX_Q_NUM_MAX 32 + +#define SEC_CTRL_CNT_CLR_CE 0x301120 +#define SEC_CTRL_CNT_CLR_CE_BIT BIT(0) +#define SEC_ENGINE_PF_CFG_OFF 0x300000 +#define SEC_ACC_COMMON_REG_OFF 0x1000 +#define SEC_CORE_INT_SOURCE 0x301010 +#define SEC_CORE_INT_MASK 0x301000 +#define SEC_CORE_INT_STATUS 0x301008 +#define SEC_CORE_SRAM_ECC_ERR_INFO 0x301C14 +#define SEC_ECC_NUM(err) (((err) >> 16) & 0xFFFF) +#define SEC_ECC_ADDR(err) ((err) >> 0) +#define SEC_CORE_INT_DISABLE 0x0 +#define SEC_CORE_INT_ENABLE 0x1ff +#define SEC_CORE_INT_CLEAR 0x1ff +#define SEC_SAA_ENABLE 0x17f + +#define SEC_RAS_CE_REG 0x301050 +#define SEC_RAS_FE_REG 0x301054 +#define SEC_RAS_NFE_REG 0x301058 +#define SEC_RAS_CE_ENB_MSK 0x88 +#define SEC_RAS_FE_ENB_MSK 0x0 +#define SEC_RAS_NFE_ENB_MSK 0x177 +#define SEC_RAS_DISABLE 0x0 +#define SEC_MEM_START_INIT_REG 0x0100 +#define SEC_MEM_INIT_DONE_REG 0x0104 + +#define SEC_CONTROL_REG 0x0200 +#define SEC_TRNG_EN_SHIFT 8 +#define SEC_CLK_GATE_ENABLE BIT(3) +#define SEC_CLK_GATE_DISABLE (~BIT(3)) +#define SEC_AXI_SHUTDOWN_ENABLE BIT(12) +#define SEC_AXI_SHUTDOWN_DISABLE 0xFFFFEFFF +#define SEC_WR_MSI_PORT BIT(0) + +#define SEC_INTERFACE_USER_CTRL0_REG 0x0220 +#define SEC_INTERFACE_USER_CTRL1_REG 0x0224 +#define SEC_SAA_EN_REG 0x0270 +#define SEC_BD_ERR_CHK_EN_REG0 0x0380 +#define SEC_BD_ERR_CHK_EN_REG1 0x0384 +#define SEC_BD_ERR_CHK_EN_REG3 0x038c + +#define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15)) +#define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7)) +#define SEC_CORE_INT_STATUS_M_ECC BIT(2) + +#define SEC_DELAY_10_US 10 +#define SEC_POLL_TIMEOUT_US 1000 +#define SEC_DBGFS_VAL_MAX_LEN 20 +#define SEC_SINGLE_PORT_MAX_TRANS 0x2060 + +#define SEC_SQE_MASK_OFFSET 64 +#define SEC_SQE_MASK_LEN 48 + +#define SEC_ADDR(qm, offset) ((qm)->io_base + (offset) + \ + SEC_ENGINE_PF_CFG_OFF + SEC_ACC_COMMON_REG_OFF) + +struct sec_hw_error { + u32 int_msk; + const char *msg; +}; + +struct sec_dfx_item { + const char *name; + u32 offset; +}; + +static const char sec_name[] = "hisi_sec2"; +static struct dentry *sec_debugfs_root; +static struct hisi_qm_list sec_devices; + +static const struct sec_hw_error sec_hw_errors[] = { + {.int_msk = BIT(0), .msg = "sec_axi_rresp_err_rint"}, + {.int_msk = BIT(1), .msg = "sec_axi_bresp_err_rint"}, + {.int_msk = BIT(2), .msg = "sec_ecc_2bit_err_rint"}, + {.int_msk = BIT(3), .msg = "sec_ecc_1bit_err_rint"}, + {.int_msk = BIT(4), .msg = "sec_req_trng_timeout_rint"}, + {.int_msk = BIT(5), .msg = "sec_fsm_hbeat_rint"}, + {.int_msk = BIT(6), .msg = "sec_channel_req_rng_timeout_rint"}, + {.int_msk = BIT(7), .msg = "sec_bd_err_rint"}, + {.int_msk = BIT(8), .msg = "sec_chain_buff_err_rint"}, + { /* sentinel */ } +}; + +static const char * const sec_dbg_file_name[] = { + [SEC_CURRENT_QM] = "current_qm", + [SEC_CLEAR_ENABLE] = "clear_enable", +}; + +static struct sec_dfx_item sec_dfx_labels[] = { + {"send_cnt", offsetof(struct sec_dfx, send_cnt)}, + {"recv_cnt", offsetof(struct sec_dfx, recv_cnt)}, + {"send_busy_cnt", offsetof(struct sec_dfx, send_busy_cnt)}, + {"recv_busy_cnt", offsetof(struct sec_dfx, recv_busy_cnt)}, + {"err_bd_cnt", offsetof(struct sec_dfx, err_bd_cnt)}, + {"invalid_req_cnt", offsetof(struct sec_dfx, invalid_req_cnt)}, + {"done_flag_cnt", offsetof(struct sec_dfx, done_flag_cnt)}, +}; + +static struct debugfs_reg32 sec_dfx_regs[] = { + {"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010}, + {"SEC_SAA_EN ", 0x301270}, + {"SEC_BD_LATENCY_MIN ", 0x301600}, + {"SEC_BD_LATENCY_MAX ", 0x301608}, + {"SEC_BD_LATENCY_AVG ", 0x30160C}, + {"SEC_BD_NUM_IN_SAA0 ", 0x301670}, + {"SEC_BD_NUM_IN_SAA1 ", 0x301674}, + {"SEC_BD_NUM_IN_SEC ", 0x301680}, + {"SEC_ECC_1BIT_CNT ", 0x301C00}, + {"SEC_ECC_1BIT_INFO ", 0x301C04}, + {"SEC_ECC_2BIT_CNT ", 0x301C10}, + {"SEC_ECC_2BIT_INFO ", 0x301C14}, + {"SEC_BD_SAA0 ", 0x301C20}, + {"SEC_BD_SAA1 ", 0x301C24}, + {"SEC_BD_SAA2 ", 0x301C28}, + {"SEC_BD_SAA3 ", 0x301C2C}, + {"SEC_BD_SAA4 ", 0x301C30}, + {"SEC_BD_SAA5 ", 0x301C34}, + {"SEC_BD_SAA6 ", 0x301C38}, + {"SEC_BD_SAA7 ", 0x301C3C}, + {"SEC_BD_SAA8 ", 0x301C40}, +}; + +static int sec_ctx_q_num_set(const char *val, const struct kernel_param *kp) +{ + u32 ctx_q_num; + int ret; + + if (!val) + return -EINVAL; + + ret = kstrtou32(val, 10, &ctx_q_num); + if (ret) + return -EINVAL; + + if (!ctx_q_num || ctx_q_num > SEC_CTX_Q_NUM_MAX || ctx_q_num & 0x1) { + pr_err("ctx queue num[%u] is invalid!\n", ctx_q_num); + return -EINVAL; + } + + return param_set_int(val, kp); +} + +static const struct kernel_param_ops sec_ctx_q_num_ops = { + .set = sec_ctx_q_num_set, + .get = param_get_int, +}; +static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF; +module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444); +MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (24 default, 2, 4, ..., 32)"); + +void sec_destroy_qps(struct hisi_qp **qps, int qp_num) +{ + hisi_qm_free_qps(qps, qp_num); + kfree(qps); +} + +struct hisi_qp **sec_create_qps(void) +{ + int node = cpu_to_node(raw_smp_processor_id()); + u32 ctx_num = ctx_q_num; + struct hisi_qp **qps; + int ret; + + qps = kcalloc(ctx_num, sizeof(struct hisi_qp *), GFP_KERNEL); + if (!qps) + return NULL; + + ret = hisi_qm_alloc_qps_node(node, &sec_devices, qps, ctx_num, 0); + if (!ret) + return qps; + + kfree(qps); + return NULL; +} + +static int uacce_mode_set(const char *val, const struct kernel_param *kp) +{ + return mode_set(val, kp); +} + +static const struct kernel_param_ops sec_uacce_mode_ops = { + .set = uacce_mode_set, + .get = param_get_int, +}; + +static u32 uacce_mode = UACCE_MODE_NOUACCE; +module_param_cb(uacce_mode, &sec_uacce_mode_ops, &uacce_mode, 0444); +MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 2"); + +static int pf_q_num_set(const char *val, const struct kernel_param *kp) +{ + return q_num_set(val, kp, SEC_PF_PCI_DEVICE_ID); +} + +static const struct kernel_param_ops sec_pf_q_num_ops = { + .set = pf_q_num_set, + .get = param_get_int, +}; + +static u32 pf_q_num = SEC_PF_DEF_Q_NUM; +module_param_cb(pf_q_num, &sec_pf_q_num_ops, &pf_q_num, 0444); +MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 1-4096, v2 1-1024)"); + +static int vfs_num_set(const char *val, const struct kernel_param *kp) +{ + return vf_num_set(val, kp); +} + +static const struct kernel_param_ops vfs_num_ops = { + .set = vfs_num_set, + .get = param_get_int, +}; + +static u32 vfs_num; +module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444); +MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)"); + +static const struct pci_device_id sec_dev_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) }, + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_VF_PCI_DEVICE_ID) }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, sec_dev_ids); + +static void sec_set_endian(struct hisi_qm *qm) +{ + u32 reg; + + reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG)); + reg &= ~(BIT(1) | BIT(0)); + if (IS_ENABLED(CONFIG_64BIT)) + reg |= BIT(1); + + if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + reg |= BIT(0); + + writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG)); +} + +static int sec_engine_init(struct hisi_qm *qm) +{ + int ret; + u32 reg; + + /* disable clock gate control */ + reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG)); + reg &= SEC_CLK_GATE_DISABLE; + writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG)); + + writel_relaxed(0x1, SEC_ADDR(qm, SEC_MEM_START_INIT_REG)); + + ret = readl_relaxed_poll_timeout(SEC_ADDR(qm, SEC_MEM_INIT_DONE_REG), + reg, reg & 0x1, SEC_DELAY_10_US, + SEC_POLL_TIMEOUT_US); + if (ret) { + pci_err(qm->pdev, "fail to init sec mem\n"); + return ret; + } + + reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG)); + reg |= (0x1 << SEC_TRNG_EN_SHIFT); + writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG)); + + reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG)); + reg |= SEC_USER0_SMMU_NORMAL; + writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG)); + + reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG)); + reg |= SEC_USER1_SMMU_NORMAL; + writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG)); + + writel(SEC_SINGLE_PORT_MAX_TRANS, + qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS); + + writel(SEC_SAA_ENABLE, SEC_ADDR(qm, SEC_SAA_EN_REG)); + + /* Enable sm4 extra mode, as ctr/ecb */ + writel_relaxed(SEC_BD_ERR_CHK_EN0, + SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG0)); + /* Enable sm4 xts mode multiple iv */ + writel_relaxed(SEC_BD_ERR_CHK_EN1, + SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG1)); + writel_relaxed(SEC_BD_ERR_CHK_EN3, + SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG3)); + + /* config endian */ + sec_set_endian(qm); + + return 0; +} + +static int sec_set_user_domain_and_cache(struct hisi_qm *qm) +{ + /* qm user domain */ + writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1); + writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE); + writel(AXUSER_BASE, qm->io_base + QM_AWUSER_M_CFG_1); + writel(AWUSER_M_CFG_ENABLE, qm->io_base + QM_AWUSER_M_CFG_ENABLE); + writel(WUSER_M_CFG_ENABLE, qm->io_base + QM_WUSER_M_CFG_ENABLE); + + /* qm cache */ + writel(AXI_M_CFG, qm->io_base + QM_AXI_M_CFG); + writel(AXI_M_CFG_ENABLE, qm->io_base + QM_AXI_M_CFG_ENABLE); + + /* disable FLR triggered by BME(bus master enable) */ + writel(PEH_AXUSER_CFG, qm->io_base + QM_PEH_AXUSER_CFG); + writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE); + + /* enable sqc,cqc writeback */ + writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE | + CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) | + FIELD_PREP(CQC_CACHE_WB_THRD, 1), qm->io_base + QM_CACHE_CTL); + + return sec_engine_init(qm); +} + +/* sec_debug_regs_clear() - clear the sec debug regs */ +static void sec_debug_regs_clear(struct hisi_qm *qm) +{ + int i; + + /* clear current_qm */ + writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF); + writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF); + + /* clear sec dfx regs */ + writel(0x1, qm->io_base + SEC_CTRL_CNT_CLR_CE); + for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++) + readl(qm->io_base + sec_dfx_regs[i].offset); + + /* clear rdclr_en */ + writel(0x0, qm->io_base + SEC_CTRL_CNT_CLR_CE); + + hisi_qm_debug_regs_clear(qm); +} + +static void sec_hw_error_enable(struct hisi_qm *qm) +{ + u32 val; + + if (qm->ver == QM_HW_V1) { + writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK); + pci_info(qm->pdev, "V1 not support hw error handle\n"); + return; + } + + val = readl(SEC_ADDR(qm, SEC_CONTROL_REG)); + + /* clear SEC hw error source if having */ + writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE); + + /* enable SEC hw error interrupts */ + writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK); + + /* enable RAS int */ + writel(SEC_RAS_CE_ENB_MSK, qm->io_base + SEC_RAS_CE_REG); + writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG); + writel(SEC_RAS_NFE_ENB_MSK, qm->io_base + SEC_RAS_NFE_REG); + + /* enable SEC block master OOO when m-bit error occur */ + val = val | SEC_AXI_SHUTDOWN_ENABLE; + + writel(val, SEC_ADDR(qm, SEC_CONTROL_REG)); +} + +static void sec_hw_error_disable(struct hisi_qm *qm) +{ + u32 val; + + val = readl(SEC_ADDR(qm, SEC_CONTROL_REG)); + + /* disable RAS int */ + writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG); + writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_FE_REG); + writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG); + + /* disable SEC hw error interrupts */ + writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK); + + /* disable SEC block master OOO when m-bit error occur */ + val = val & SEC_AXI_SHUTDOWN_DISABLE; + + writel(val, SEC_ADDR(qm, SEC_CONTROL_REG)); +} + +static u32 sec_current_qm_read(struct sec_debug_file *file) +{ + struct hisi_qm *qm = file->qm; + + return readl(qm->io_base + QM_DFX_MB_CNT_VF); +} + +static int sec_current_qm_write(struct sec_debug_file *file, u32 val) +{ + struct hisi_qm *qm = file->qm; + u32 vfq_num; + u32 tmp; + + if (val > qm->vfs_num) + return -EINVAL; + + /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */ + if (!val) { + qm->debug.curr_qm_qp_num = qm->qp_num; + } else { + vfq_num = (qm->ctrl_q_num - qm->qp_num) / qm->vfs_num; + + if (val == qm->vfs_num) + qm->debug.curr_qm_qp_num = + qm->ctrl_q_num - qm->qp_num - + (qm->vfs_num - 1) * vfq_num; + else + qm->debug.curr_qm_qp_num = vfq_num; + } + + writel(val, qm->io_base + QM_DFX_MB_CNT_VF); + writel(val, qm->io_base + QM_DFX_DB_CNT_VF); + + tmp = val | + (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK); + writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); + + tmp = val | + (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK); + writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); + + return 0; +} + +static u32 sec_clear_enable_read(struct sec_debug_file *file) +{ + struct hisi_qm *qm = file->qm; + + return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) & + SEC_CTRL_CNT_CLR_CE_BIT; +} + +static int sec_clear_enable_write(struct sec_debug_file *file, u32 val) +{ + struct hisi_qm *qm = file->qm; + u32 tmp; + + if (val != 1 && val) + return -EINVAL; + + tmp = (readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) & + ~SEC_CTRL_CNT_CLR_CE_BIT) | val; + writel(tmp, qm->io_base + SEC_CTRL_CNT_CLR_CE); + + return 0; +} + +static ssize_t sec_debug_read(struct file *filp, char __user *buf, + size_t count, loff_t *pos) +{ + struct sec_debug_file *file = filp->private_data; + char tbuf[SEC_DBGFS_VAL_MAX_LEN]; + u32 val; + int ret; + + spin_lock_irq(&file->lock); + + switch (file->index) { + case SEC_CURRENT_QM: + val = sec_current_qm_read(file); + break; + case SEC_CLEAR_ENABLE: + val = sec_clear_enable_read(file); + break; + default: + spin_unlock_irq(&file->lock); + return -EINVAL; + } + + spin_unlock_irq(&file->lock); + ret = snprintf(tbuf, SEC_DBGFS_VAL_MAX_LEN, "%u\n", val); + + return simple_read_from_buffer(buf, count, pos, tbuf, ret); +} + +static ssize_t sec_debug_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct sec_debug_file *file = filp->private_data; + char tbuf[SEC_DBGFS_VAL_MAX_LEN]; + unsigned long val; + int len, ret; + + if (*pos != 0) + return 0; + + if (count >= SEC_DBGFS_VAL_MAX_LEN) + return -ENOSPC; + + len = simple_write_to_buffer(tbuf, SEC_DBGFS_VAL_MAX_LEN - 1, + pos, buf, count); + if (len < 0) + return len; + + tbuf[len] = '\0'; + if (kstrtoul(tbuf, 0, &val)) + return -EFAULT; + + spin_lock_irq(&file->lock); + + switch (file->index) { + case SEC_CURRENT_QM: + ret = sec_current_qm_write(file, val); + if (ret) + goto err_input; + break; + case SEC_CLEAR_ENABLE: + ret = sec_clear_enable_write(file, val); + if (ret) + goto err_input; + break; + default: + ret = -EINVAL; + goto err_input; + } + + spin_unlock_irq(&file->lock); + + return count; + + err_input: + spin_unlock_irq(&file->lock); + return ret; +} + +static const struct file_operations sec_dbg_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = sec_debug_read, + .write = sec_debug_write, +}; + +static int sec_debugfs_atomic64_get(void *data, u64 *val) +{ + *val = atomic64_read((atomic64_t *)data); + + return 0; +} + +static int sec_debugfs_atomic64_set(void *data, u64 val) +{ + if (!val) { + atomic64_set((atomic64_t *)data, 0); + return 0; + } + + return -EINVAL; +} + +DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get, + sec_debugfs_atomic64_set, "%lld\n"); + +static int sec_core_debug_init(struct hisi_qm *qm) +{ + struct sec_dev *sec = container_of(qm, struct sec_dev, qm); + struct device *dev = &qm->pdev->dev; + struct sec_dfx *dfx = &sec->debug.dfx; + struct debugfs_regset32 *regset; + struct dentry *tmp_d; + int i; + + tmp_d = debugfs_create_dir("sec_dfx", qm->debug.debug_root); + + regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); + if (!regset) + return -ENOMEM; + + regset->regs = sec_dfx_regs; + regset->nregs = ARRAY_SIZE(sec_dfx_regs); + regset->base = qm->io_base; + + if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) + debugfs_create_regset32("regs", 0444, tmp_d, regset); + + for (i = 0; i < ARRAY_SIZE(sec_dfx_labels); i++) { + atomic64_t *data = (atomic64_t *)((uintptr_t)dfx + + sec_dfx_labels[i].offset); + debugfs_create_file(sec_dfx_labels[i].name, 0644, + tmp_d, data, &sec_atomic64_ops); + } + + return 0; +} + +static int sec_debug_init(struct hisi_qm *qm) +{ + struct sec_dev *sec = container_of(qm, struct sec_dev, qm); + int i; + + if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) { + for (i = SEC_CURRENT_QM; i < SEC_DEBUG_FILE_NUM; i++) { + spin_lock_init(&sec->debug.files[i].lock); + sec->debug.files[i].index = i; + sec->debug.files[i].qm = qm; + + debugfs_create_file(sec_dbg_file_name[i], 0600, + qm->debug.debug_root, + sec->debug.files + i, + &sec_dbg_fops); + } + } + + return sec_core_debug_init(qm); +} + +static int sec_debugfs_init(struct hisi_qm *qm) +{ + struct device *dev = &qm->pdev->dev; + int ret; + + qm->debug.debug_root = debugfs_create_dir(dev_name(dev), + sec_debugfs_root); + qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET; + qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN; + ret = hisi_qm_debug_init(qm); + if (ret) + goto failed_to_create; + + ret = sec_debug_init(qm); + if (ret) + goto failed_to_create; + + return 0; + +failed_to_create: + debugfs_remove_recursive(sec_debugfs_root); + + return ret; +} + +static void sec_debugfs_exit(struct hisi_qm *qm) +{ + debugfs_remove_recursive(qm->debug.debug_root); +} + +static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts) +{ + const struct sec_hw_error *errs = sec_hw_errors; + struct device *dev = &qm->pdev->dev; + u32 err_val; + + while (errs->msg) { + if (errs->int_msk & err_sts) { + dev_err(dev, "%s [error status=0x%x] found\n", + errs->msg, errs->int_msk); + + if (SEC_CORE_INT_STATUS_M_ECC & errs->int_msk) { + err_val = readl(qm->io_base + + SEC_CORE_SRAM_ECC_ERR_INFO); + dev_err(dev, "multi ecc sram num=0x%x\n", + SEC_ECC_NUM(err_val)); + } + } + errs++; + } +} + +static u32 sec_get_hw_err_status(struct hisi_qm *qm) +{ + return readl(qm->io_base + SEC_CORE_INT_STATUS); +} + +static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) +{ + writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE); +} + +static void sec_open_axi_master_ooo(struct hisi_qm *qm) +{ + u32 val; + + val = readl(SEC_ADDR(qm, SEC_CONTROL_REG)); + writel(val & SEC_AXI_SHUTDOWN_DISABLE, SEC_ADDR(qm, SEC_CONTROL_REG)); + writel(val | SEC_AXI_SHUTDOWN_ENABLE, SEC_ADDR(qm, SEC_CONTROL_REG)); +} + +static void sec_err_ini_set(struct hisi_qm *qm) +{ + if (qm->fun_type == QM_HW_VF) + return; + + qm->err_ini.get_dev_hw_err_status = sec_get_hw_err_status; + qm->err_ini.clear_dev_hw_err_status = sec_clear_hw_err_status; + qm->err_ini.err_info.ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC; + qm->err_ini.err_info.ce = QM_BASE_CE; + qm->err_ini.err_info.nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT | + QM_ACC_WB_NOT_READY_TIMEOUT; + qm->err_ini.err_info.fe = 0; + qm->err_ini.err_info.msi = QM_DB_RANDOM_INVALID; + qm->err_ini.err_info.acpi_rst = "SRST"; + qm->err_ini.hw_err_disable = sec_hw_error_disable; + qm->err_ini.hw_err_enable = sec_hw_error_enable; + qm->err_ini.set_usr_domain_cache = sec_set_user_domain_and_cache; + qm->err_ini.log_dev_hw_err = sec_log_hw_error; + qm->err_ini.open_axi_master_ooo = sec_open_axi_master_ooo; + qm->err_ini.err_info.msi_wr_port = SEC_WR_MSI_PORT; +} + +static int sec_pf_probe_init(struct hisi_qm *qm) +{ + int ret; + + switch (qm->ver) { + case QM_HW_V1: + qm->ctrl_q_num = SEC_QUEUE_NUM_V1; + break; + + case QM_HW_V2: + qm->ctrl_q_num = SEC_QUEUE_NUM_V2; + break; + + default: + return -EINVAL; + } + + ret = qm->err_ini.set_usr_domain_cache(qm); + if (ret) + return ret; + + hisi_qm_dev_err_init(qm); + sec_debug_regs_clear(qm); + + return 0; +} + +static int sec_probe_init(struct hisi_qm *qm) +{ + int ret; + + /* + * WQ_HIGHPRI: SEC request must be low delayed, + * so need a high priority workqueue. + * WQ_UNBOUND: SEC task is likely with long + * running CPU intensive workloads. + */ + qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | + WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus(), + pci_name(qm->pdev)); + if (!qm->wq) { + pci_err(qm->pdev, "fail to alloc workqueue\n"); + return -ENOMEM; + } + + if (qm->fun_type == QM_HW_PF) { + ret = sec_pf_probe_init(qm); + if (ret) + goto err_probe_uninit; + } + + return 0; + +err_probe_uninit: + destroy_workqueue(qm->wq); + return ret; +} + +static void sec_probe_uninit(struct hisi_qm *qm) +{ + if (qm->fun_type == QM_HW_PF) + hisi_qm_dev_err_uninit(qm); + destroy_workqueue(qm->wq); +} + +static int sec_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev) +{ + int ret; + + qm->algs = "cipher\ndigest\naead\n"; + qm->uacce_mode = uacce_mode; + qm->pdev = pdev; + ret = hisi_qm_pre_init(qm, pf_q_num, SEC_PF_DEF_Q_BASE); + if (ret) + return ret; + + qm->qm_list = &sec_devices; + qm->sqe_size = SEC_SQE_SIZE; + qm->dev_name = sec_name; + sec_err_ini_set(qm); + + return 0; +} + +static void sec_iommu_used_check(struct sec_dev *sec) +{ + struct iommu_domain *domain; + struct device *dev = &sec->qm.pdev->dev; + + domain = iommu_get_domain_for_dev(dev); + + /* Check if iommu is used */ + sec->iommu_used = false; + if (domain) { + if (domain->type & __IOMMU_DOMAIN_PAGING) + sec->iommu_used = true; + dev_info(dev, "SMMU Opened, the iommu type = %u\n", + domain->type); + } +} + +static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct sec_dev *sec; + struct hisi_qm *qm; + int ret; + + sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL); + if (!sec) + return -ENOMEM; + + qm = &sec->qm; + qm->fun_type = (pdev->device == SEC_PF_PCI_DEVICE_ID) ? + QM_HW_PF : QM_HW_VF; + + ret = sec_qm_pre_init(qm, pdev); + if (ret) + return ret; + + sec->ctx_q_num = ctx_q_num; + sec_iommu_used_check(sec); + + if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) { + qm->qp_base = SEC_PF_DEF_Q_NUM; + qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM; + } + + ret = hisi_qm_init(qm); + if (ret) { + pci_err(pdev, "Failed to init qm (%d)!\n", ret); + return ret; + } + + ret = sec_probe_init(qm); + if (ret) { + pci_err(pdev, "Failed to probe init (%d)!\n", ret); + goto err_qm_uninit; + } + + ret = hisi_qm_start(qm); + if (ret) { + pci_err(pdev, "Failed to start qm (%d)!\n", ret); + goto err_probe_uninit; + } + + ret = sec_debugfs_init(qm); + if (ret) + pci_warn(pdev, "Failed to init debugfs (%d)!\n", ret); + + hisi_qm_add_to_list(qm, &sec_devices); + + ret = sec_register_to_crypto(); + if (ret < 0) { + pr_err("Failed to register driver to crypto!\n"); + goto err_remove_from_list; + } + + if (qm->fun_type == QM_HW_PF && vfs_num > 0) { + ret = hisi_qm_sriov_enable(pdev, vfs_num); + if (ret < 0) + goto err_crypto_unregister; + } + + return 0; + +err_crypto_unregister: + sec_unregister_from_crypto(); + +err_remove_from_list: + hisi_qm_del_from_list(qm, &sec_devices); + sec_debugfs_exit(qm); + hisi_qm_stop(qm, QM_NORMAL); + +err_probe_uninit: + sec_probe_uninit(qm); + +err_qm_uninit: + hisi_qm_uninit(qm); + + return ret; +} + +static int sec_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + if (num_vfs) + return hisi_qm_sriov_enable(pdev, num_vfs); + else + return hisi_qm_sriov_disable(pdev, &sec_devices); +} + +static void sec_remove(struct pci_dev *pdev) +{ + struct hisi_qm *qm = pci_get_drvdata(pdev); + + hisi_qm_remove_wait_delay(qm, &sec_devices); + + sec_unregister_from_crypto(); + + hisi_qm_del_from_list(qm, &sec_devices); + + if (qm->fun_type == QM_HW_PF && qm->vfs_num) + (void)hisi_qm_sriov_disable(pdev, NULL); + + sec_debugfs_exit(qm); + + (void)hisi_qm_stop(qm, QM_NORMAL); + + if (qm->fun_type == QM_HW_PF) + sec_debug_regs_clear(qm); + + sec_probe_uninit(qm); + + hisi_qm_uninit(qm); +} + +static const struct pci_error_handlers sec_err_handler = { + .error_detected = hisi_qm_dev_err_detected, + .slot_reset = hisi_qm_dev_slot_reset, + .reset_prepare = hisi_qm_reset_prepare, + .reset_done = hisi_qm_reset_done, +}; + +static struct pci_driver sec_pci_driver = { + .name = "hisi_sec2", + .id_table = sec_dev_ids, + .probe = sec_probe, + .remove = sec_remove, + .err_handler = &sec_err_handler, + .sriov_configure = sec_sriov_configure, + .shutdown = hisi_qm_dev_shutdown, +}; + +static void sec_register_debugfs(void) +{ + if (!debugfs_initialized()) + return; + + sec_debugfs_root = debugfs_create_dir("hisi_sec2", NULL); +} + +static void sec_unregister_debugfs(void) +{ + debugfs_remove_recursive(sec_debugfs_root); +} + +static int __init sec_init(void) +{ + int ret; + + INIT_LIST_HEAD(&sec_devices.list); + mutex_init(&sec_devices.lock); + sec_devices.check = NULL; + sec_register_debugfs(); + + ret = pci_register_driver(&sec_pci_driver); + if (ret < 0) { + sec_unregister_debugfs(); + pr_err("Failed to register pci driver.\n"); + return ret; + } + + return 0; +} + +static void __exit sec_exit(void) +{ + pci_unregister_driver(&sec_pci_driver); + sec_unregister_debugfs(); +} + +module_init(sec_init); +module_exit(sec_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Zaibo Xu "); +MODULE_AUTHOR("Longfang Liu "); +MODULE_AUTHOR("Zhang Wei "); +MODULE_DESCRIPTION("Driver for HiSilicon SEC accelerator"); diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c new file mode 100644 index 0000000000000000000000000000000000000000..7aca6f79dcfc85332f36b5a698e2eaaaba437f77 --- /dev/null +++ b/drivers/crypto/hisilicon/sgl.c @@ -0,0 +1,275 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 HiSilicon Limited. */ +#include +#include +#include +#include "qm.h" + +#define HISI_ACC_SGL_SGE_NR_MIN 1 +#define HISI_ACC_SGL_NR_MAX 256 +#define HISI_ACC_SGL_ALIGN_SIZE 64 +#define HISI_ACC_MEM_BLOCK_NR 5 +#define HISI_ACC_BLOCK_SIZE_MAX_SHIFT 31 + +struct acc_hw_sge { + dma_addr_t buf; + void *page_ctrl; + __le32 len; + __le32 pad; + __le32 pad0; + __le32 pad1; +}; + +/* use default sgl head size 64B */ +struct hisi_acc_hw_sgl { + dma_addr_t next_dma; + __le16 entry_sum_in_chain; + __le16 entry_sum_in_sgl; + __le16 entry_length_in_sgl; + __le16 pad0; + __le64 pad1[5]; + struct hisi_acc_hw_sgl *next; + struct acc_hw_sge sge_entries[]; +} __aligned(1); + +struct hisi_acc_sgl_pool { + struct mem_block { + struct hisi_acc_hw_sgl *sgl; + dma_addr_t sgl_dma; + size_t size; + } mem_block[HISI_ACC_MEM_BLOCK_NR]; + u32 sgl_num_per_block; + u32 block_num; + u32 count; + u32 sge_nr; + size_t sgl_size; +}; + +/** + * hisi_acc_create_sgl_pool() - Create a hw sgl pool. + * @dev: The device which hw sgl pool belongs to. + * @count: Count of hisi_acc_hw_sgl in pool. + * @sge_nr: The count of sge in hw_sgl + * + * This function creates a hw sgl pool, after this user can get hw sgl memory + * from it. + */ +struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev, + u32 count, u32 sge_nr) +{ + u32 sgl_size, block_size, sgl_num_per_block, block_num, remain_sgl; + struct hisi_acc_sgl_pool *pool; + struct mem_block *block; + u32 i, j; + + if (!dev || !count || !sge_nr || sge_nr > HISI_ACC_SGL_SGE_NR_MAX) + return ERR_PTR(-EINVAL); + + sgl_size = sizeof(struct acc_hw_sge) * sge_nr + + sizeof(struct hisi_acc_hw_sgl); + block_size = 1 << (PAGE_SHIFT + MAX_ORDER <= 32 ? + PAGE_SHIFT + MAX_ORDER - 1 : + HISI_ACC_BLOCK_SIZE_MAX_SHIFT); + sgl_num_per_block = block_size / sgl_size; + block_num = count / sgl_num_per_block; + remain_sgl = count % sgl_num_per_block; + + if ((!remain_sgl && block_num > HISI_ACC_MEM_BLOCK_NR) || + (remain_sgl > 0 && block_num > HISI_ACC_MEM_BLOCK_NR - 1)) + return ERR_PTR(-EINVAL); + + pool = kzalloc(sizeof(*pool), GFP_KERNEL); + if (!pool) + return ERR_PTR(-ENOMEM); + block = pool->mem_block; + + for (i = 0; i < block_num; i++) { + block[i].sgl = dma_alloc_coherent(dev, block_size, + &block[i].sgl_dma, + GFP_KERNEL); + if (!block[i].sgl) { + dev_err(dev, "Fail to allocate hw SG buffer!\n"); + goto err_free_mem; + } + + block[i].size = block_size; + } + + if (remain_sgl > 0) { + block[i].sgl = dma_alloc_coherent(dev, remain_sgl * sgl_size, + &block[i].sgl_dma, + GFP_KERNEL); + if (!block[i].sgl) { + dev_err(dev, "Fail to allocate remained hw SG buffer!\n"); + goto err_free_mem; + } + + block[i].size = remain_sgl * sgl_size; + } + + pool->sgl_num_per_block = sgl_num_per_block; + pool->block_num = remain_sgl ? block_num + 1 : block_num; + pool->count = count; + pool->sgl_size = sgl_size; + pool->sge_nr = sge_nr; + + return pool; + +err_free_mem: + for (j = 0; j < i; j++) { + dma_free_coherent(dev, block_size, block[j].sgl, + block[j].sgl_dma); + memset(block + j, 0, sizeof(*block)); + } + kfree(pool); + return ERR_PTR(-ENOMEM); +} +EXPORT_SYMBOL_GPL(hisi_acc_create_sgl_pool); + +/** + * hisi_acc_free_sgl_pool() - Free a hw sgl pool. + * @dev: The device which hw sgl pool belongs to. + * @pool: Pointer of pool. + * + * This function frees memory of a hw sgl pool. + */ +void hisi_acc_free_sgl_pool(struct device *dev, struct hisi_acc_sgl_pool *pool) +{ + struct mem_block *block; + int i; + + if (!dev || !pool) + return; + + block = pool->mem_block; + + for (i = 0; i < pool->block_num; i++) + dma_free_coherent(dev, block[i].size, block[i].sgl, + block[i].sgl_dma); + + kfree(pool); +} +EXPORT_SYMBOL_GPL(hisi_acc_free_sgl_pool); + +static struct hisi_acc_hw_sgl *acc_get_sgl(struct hisi_acc_sgl_pool *pool, + u32 index, dma_addr_t *hw_sgl_dma) +{ + struct mem_block *block; + u32 block_index, offset; + + if (!pool || !hw_sgl_dma || index >= pool->count) + return ERR_PTR(-EINVAL); + + block = pool->mem_block; + block_index = index / pool->sgl_num_per_block; + offset = index % pool->sgl_num_per_block; + + *hw_sgl_dma = block[block_index].sgl_dma + pool->sgl_size * offset; + return (void *)block[block_index].sgl + pool->sgl_size * offset; +} + +static void sg_map_to_hw_sg(struct scatterlist *sgl, + struct acc_hw_sge *hw_sge) +{ + hw_sge->buf = sg_dma_address(sgl); + hw_sge->len = cpu_to_le32(sg_dma_len(sgl)); +} + +static void inc_hw_sgl_sge(struct hisi_acc_hw_sgl *hw_sgl) +{ + u16 var = le16_to_cpu(hw_sgl->entry_sum_in_sgl); + + var++; + hw_sgl->entry_sum_in_sgl = cpu_to_le16(var); +} + +static void update_hw_sgl_sum_sge(struct hisi_acc_hw_sgl *hw_sgl, u16 sum) +{ + hw_sgl->entry_sum_in_chain = cpu_to_le16(sum); +} + +/** + * hisi_acc_sg_buf_map_to_hw_sgl - Map a scatterlist to a hw sgl. + * @dev: The device which hw sgl belongs to. + * @sgl: Scatterlist which will be mapped to hw sgl. + * @pool: Pool which hw sgl memory will be allocated in. + * @index: Index of hisi_acc_hw_sgl in pool. + * @hw_sgl_dma: The dma address of allocated hw sgl. + * + * This function builds hw sgl according input sgl, user can use hw_sgl_dma + * as src/dst in its BD. Only support single hw sgl currently. + */ +struct hisi_acc_hw_sgl * +hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, + struct scatterlist *sgl, + struct hisi_acc_sgl_pool *pool, + u32 index, dma_addr_t *hw_sgl_dma) +{ + struct hisi_acc_hw_sgl *curr_hw_sgl; + dma_addr_t curr_sgl_dma = 0; + struct acc_hw_sge *curr_hw_sge; + struct scatterlist *sg; + int i, sg_n, sg_n_mapped; + + if (!dev || !sgl || !pool || !hw_sgl_dma) + return ERR_PTR(-EINVAL); + + sg_n = sg_nents(sgl); + + sg_n_mapped = dma_map_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL); + if (!sg_n_mapped) { + dev_err(dev, "DMA mapping for SG error!\n"); + return ERR_PTR(-EINVAL); + } + + if (sg_n_mapped > pool->sge_nr) { + dev_err(dev, "the number of entries in input scatterlist is bigger than SGL pool setting.\n"); + return ERR_PTR(-EINVAL); + } + + curr_hw_sgl = acc_get_sgl(pool, index, &curr_sgl_dma); + if (IS_ERR(curr_hw_sgl)) { + dev_err(dev, "Get SGL error!\n"); + dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL); + return ERR_PTR(-ENOMEM); + } + + curr_hw_sgl->entry_length_in_sgl = cpu_to_le16(pool->sge_nr); + curr_hw_sge = curr_hw_sgl->sge_entries; + + for_each_sg(sgl, sg, sg_n_mapped, i) { + sg_map_to_hw_sg(sg, curr_hw_sge); + inc_hw_sgl_sge(curr_hw_sgl); + curr_hw_sge++; + } + + update_hw_sgl_sum_sge(curr_hw_sgl, pool->sge_nr); + *hw_sgl_dma = curr_sgl_dma; + + return curr_hw_sgl; +} +EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_map_to_hw_sgl); + +/** + * hisi_acc_sg_buf_unmap() - Unmap allocated hw sgl. + * @dev: The device which hw sgl belongs to. + * @sgl: Related scatterlist. + * @hw_sgl: Virtual address of hw sgl. + * @hw_sgl_dma: DMA address of hw sgl. + * @pool: Pool which hw sgl is allocated in. + * + * This function unmaps allocated hw sgl. + */ +void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl, + struct hisi_acc_hw_sgl *hw_sgl) +{ + if (!dev || !sgl || !hw_sgl) + return; + + dma_unmap_sg(dev, sgl, sg_nents(sgl), DMA_BIDIRECTIONAL); + + hw_sgl->entry_sum_in_chain = 0; + hw_sgl->entry_sum_in_sgl = 0; + hw_sgl->entry_length_in_sgl = 0; +} +EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_unmap); diff --git a/drivers/crypto/hisilicon/zip/Makefile b/drivers/crypto/hisilicon/zip/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..a936f099ee22eadfbf5a9cb8fcea2049a30255ef --- /dev/null +++ b/drivers/crypto/hisilicon/zip/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += hisi_zip.o +hisi_zip-objs = zip_main.o zip_crypto.o diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h new file mode 100644 index 0000000000000000000000000000000000000000..81bfcfe6617f57d2457dd2577d70668aafd10cc3 --- /dev/null +++ b/drivers/crypto/hisilicon/zip/zip.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2019 HiSilicon Limited. */ + +#ifndef HISI_ZIP_H +#define HISI_ZIP_H + +#include +#include "../qm.h" +#include "zip_usr_if.h" + +#undef pr_fmt +#define pr_fmt(fmt) "hisi_zip: " fmt +#define ZIP_WAIT_DELAY 1000 + +enum hisi_zip_error_type { + /* negative compression */ + HZIP_NC_ERR = 0x0d, +}; + +struct zip_dfx { + atomic64_t send_cnt; + atomic64_t recv_cnt; + atomic64_t send_busy_cnt; + atomic64_t err_bd_cnt; +}; + +struct hisi_zip_ctrl; +struct hisi_zip { + struct hisi_qm qm; + struct hisi_zip_ctrl *ctrl; + struct zip_dfx dfx; +}; + +int zip_create_qps(struct hisi_qp **qps, int ctx_num); +int hisi_zip_register_to_crypto(void); +void hisi_zip_unregister_from_crypto(void); +#endif diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c new file mode 100644 index 0000000000000000000000000000000000000000..1cf8844736b2c547cad0770d980c0221abd57c86 --- /dev/null +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -0,0 +1,698 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 HiSilicon Limited. */ +#include +#include +#include +#include +#include "zip.h" + +/* hisi_zip_sqe dw3 */ +#define HZIP_BD_STATUS_M GENMASK(7, 0) +/* hisi_zip_sqe dw7 */ +#define HZIP_IN_SGE_DATA_OFFSET_M GENMASK(23, 0) +/* hisi_zip_sqe dw8 */ +#define HZIP_OUT_SGE_DATA_OFFSET_M GENMASK(23, 0) +/* hisi_zip_sqe dw9 */ +#define HZIP_REQ_TYPE_M GENMASK(7, 0) +#define HZIP_ALG_TYPE_ZLIB 0x02 +#define HZIP_ALG_TYPE_GZIP 0x03 +#define HZIP_BUF_TYPE_M GENMASK(11, 8) +#define HZIP_PBUFFER 0x0 +#define HZIP_SGL 0x1 + +#define GZIP_HEAD_FHCRC_BIT BIT(1) +#define GZIP_HEAD_FEXTRA_BIT BIT(2) +#define GZIP_HEAD_FNAME_BIT BIT(3) +#define GZIP_HEAD_FCOMMENT_BIT BIT(4) + +#define GZIP_HEAD_FLG_SHIFT 3 +#define GZIP_HEAD_FEXTRA_SHIFT 10 +#define GZIP_HEAD_FEXTRA_XLEN 2UL +#define GZIP_HEAD_FHCRC_SIZE 2 + +#define HZIP_ZLIB_HEAD_SIZE 2 +#define HZIP_GZIP_HEAD_SIZE 10 +#define HZIP_GZIP_HEAD_BUF 256 +#define HZIP_ALG_PRIORITY 300 + +#define HZIP_SGL_SGE_NR 10 +#define HZIP_SGL_SGE_MAX 255 + +static const u8 zlib_head[HZIP_ZLIB_HEAD_SIZE] = {0x78, 0x9c}; +static const u8 gzip_head[HZIP_GZIP_HEAD_SIZE] = { + 0x1f, 0x8b, 0x08, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x03 +}; +enum hisi_zip_alg_type { + HZIP_ALG_TYPE_COMP = 0, + HZIP_ALG_TYPE_DECOMP = 1, +}; + +enum { + QPC_COMP, + QPC_DECOMP, + HZIP_CTX_Q_NUM +}; + +#define COMP_NAME_TO_TYPE(alg_name) \ + (!strcmp((alg_name), "zlib-deflate") ? HZIP_ALG_TYPE_ZLIB : \ + !strcmp((alg_name), "gzip") ? HZIP_ALG_TYPE_GZIP : 0) \ + +#define TO_HEAD_SIZE(req_type) \ + (((req_type) == HZIP_ALG_TYPE_ZLIB) ? sizeof(zlib_head) : \ + ((req_type) == HZIP_ALG_TYPE_GZIP) ? sizeof(gzip_head) : 0) \ + +#define TO_HEAD(req_type) \ + (((req_type) == HZIP_ALG_TYPE_ZLIB) ? zlib_head : \ + ((req_type) == HZIP_ALG_TYPE_GZIP) ? gzip_head : NULL) \ + +struct hisi_zip_req { + struct acomp_req *req; + u32 sskip; + u32 dskip; + struct hisi_acc_hw_sgl *hw_src; + struct hisi_acc_hw_sgl *hw_dst; + dma_addr_t dma_src; + dma_addr_t dma_dst; + u16 req_id; +}; + +struct hisi_zip_req_q { + struct hisi_zip_req *q; + unsigned long *req_bitmap; + rwlock_t req_lock; + u16 size; +}; + +struct hisi_zip_qp_ctx { + struct hisi_qp *qp; + struct hisi_zip_sqe zip_sqe; + struct hisi_zip_req_q req_q; + struct hisi_acc_sgl_pool *sgl_pool; + struct hisi_zip *zip_dev; + struct hisi_zip_ctx *ctx; +}; + +struct hisi_zip_ctx { + struct hisi_zip_qp_ctx qp_ctx[HZIP_CTX_Q_NUM]; +}; + +static int sgl_sge_nr_set(const char *val, const struct kernel_param *kp) +{ + int ret; + u16 n; + + if (!val) + return -EINVAL; + + ret = kstrtou16(val, 10, &n); + if (ret || n == 0 || n > HZIP_SGL_SGE_MAX) + return -EINVAL; + + return param_set_int(val, kp); +} + +static const struct kernel_param_ops sgl_sge_nr_ops = { + .set = sgl_sge_nr_set, + .get = param_get_int, +}; + +static u16 sgl_sge_nr = HZIP_SGL_SGE_NR; +module_param_cb(sgl_sge_nr, &sgl_sge_nr_ops, &sgl_sge_nr, 0444); +MODULE_PARM_DESC(sgl_sge_nr, "Number of sge in sgl(1-255)"); +static DEFINE_MUTEX(hisi_zip_alg_lock); +static unsigned int hisi_zip_active_devs; + +static void hisi_zip_config_buf_type(struct hisi_zip_sqe *sqe, u8 buf_type) +{ + u32 val; + + val = (sqe->dw9) & ~HZIP_BUF_TYPE_M; + val |= FIELD_PREP(HZIP_BUF_TYPE_M, buf_type); + sqe->dw9 = val; +} + +static void hisi_zip_config_tag(struct hisi_zip_sqe *sqe, u32 tag) +{ + sqe->tag = tag; +} + +static void hisi_zip_fill_sqe(struct hisi_zip_sqe *sqe, u8 req_type, + dma_addr_t s_addr, dma_addr_t d_addr, u32 slen, + u32 dlen, u32 sskip, u32 dskip) +{ + memset(sqe, 0, sizeof(struct hisi_zip_sqe)); + + sqe->input_data_length = slen - sskip; + sqe->dw7 = FIELD_PREP(HZIP_IN_SGE_DATA_OFFSET_M, sskip); + sqe->dw8 = FIELD_PREP(HZIP_OUT_SGE_DATA_OFFSET_M, dskip); + sqe->dw9 = FIELD_PREP(HZIP_REQ_TYPE_M, req_type); + sqe->dest_avail_out = dlen - dskip; + sqe->source_addr_l = lower_32_bits(s_addr); + sqe->source_addr_h = upper_32_bits(s_addr); + sqe->dest_addr_l = lower_32_bits(d_addr); + sqe->dest_addr_h = upper_32_bits(d_addr); +} + +static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx, + int alg_type, int req_type) +{ + struct device *dev = &qp->qm->pdev->dev; + int ret; + + qp->req_type = req_type; + qp->alg_type = alg_type; + qp->qp_ctx = ctx; + + ret = hisi_qm_start_qp(qp, 0); + if (ret < 0) { + dev_err(dev, "start qp failed!\n"); + return ret; + } + + ctx->qp = qp; + return 0; +} + +static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx) +{ + hisi_qm_stop_qp(ctx->qp); + hisi_qm_release_qp(ctx->qp); +} + +static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type) +{ + struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL }; + struct hisi_zip *hisi_zip; + int ret, i, j; + + ret = zip_create_qps(qps, HZIP_CTX_Q_NUM); + if (ret) { + pr_err("Can not create zip qps!\n"); + return -ENODEV; + } + + hisi_zip = container_of(qps[0]->qm, struct hisi_zip, qm); + + for (i = 0; i < HZIP_CTX_Q_NUM; i++) { + /* alg_type = 0 for compress, 1 for decompress in hw sqe */ + ret = hisi_zip_start_qp(qps[i], &hisi_zip_ctx->qp_ctx[i], i, + req_type); + if (ret) { + for (j = i - 1; j >= 0; j--) + hisi_qm_stop_qp(hisi_zip_ctx->qp_ctx[j].qp); + + hisi_qm_free_qps(qps, HZIP_CTX_Q_NUM); + return ret; + } + + hisi_zip_ctx->qp_ctx[i].zip_dev = hisi_zip; + } + + return 0; +} + +static void hisi_zip_ctx_exit(struct hisi_zip_ctx *hisi_zip_ctx) +{ + int i; + + for (i = 1; i >= 0; i--) + hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[i]); +} + +static u16 get_extra_field_size(const u8 *start) +{ + return *((u16 *)start) + GZIP_HEAD_FEXTRA_XLEN; +} + +static u32 get_name_field_size(const u8 *start) +{ + return strlen(start) + 1; +} + +static u32 get_comment_field_size(const u8 *start) +{ + return strlen(start) + 1; +} + +static u32 __get_gzip_head_size(const u8 *src) +{ + u8 head_flg = *(src + GZIP_HEAD_FLG_SHIFT); + u32 size = GZIP_HEAD_FEXTRA_SHIFT; + + if (head_flg & GZIP_HEAD_FEXTRA_BIT) + size += get_extra_field_size(src + size); + if (head_flg & GZIP_HEAD_FNAME_BIT) + size += get_name_field_size(src + size); + if (head_flg & GZIP_HEAD_FCOMMENT_BIT) + size += get_comment_field_size(src + size); + if (head_flg & GZIP_HEAD_FHCRC_BIT) + size += GZIP_HEAD_FHCRC_SIZE; + + return size; +} + +static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx) +{ + struct hisi_zip_req_q *req_q; + int i, ret; + + for (i = 0; i < HZIP_CTX_Q_NUM; i++) { + req_q = &ctx->qp_ctx[i].req_q; + req_q->size = QM_Q_DEPTH; + + req_q->req_bitmap = kcalloc(BITS_TO_LONGS(req_q->size), + sizeof(long), GFP_KERNEL); + if (!req_q->req_bitmap) { + ret = -ENOMEM; + if (i == 0) + return ret; + + goto err_free_loop0; + } + rwlock_init(&req_q->req_lock); + + req_q->q = kcalloc(req_q->size, sizeof(struct hisi_zip_req), + GFP_KERNEL); + if (!req_q->q) { + ret = -ENOMEM; + if (i == 0) + goto err_free_bitmap; + else + goto err_free_loop1; + } + } + + return 0; + +err_free_loop1: + kfree(ctx->qp_ctx[QPC_DECOMP].req_q.req_bitmap); +err_free_loop0: + kfree(ctx->qp_ctx[QPC_COMP].req_q.q); +err_free_bitmap: + kfree(ctx->qp_ctx[QPC_COMP].req_q.req_bitmap); + return ret; +} + +static void hisi_zip_release_req_q(struct hisi_zip_ctx *ctx) +{ + int i; + + for (i = 0; i < HZIP_CTX_Q_NUM; i++) { + kfree(ctx->qp_ctx[i].req_q.q); + kfree(ctx->qp_ctx[i].req_q.req_bitmap); + } +} + +static int hisi_zip_create_sgl_pool(struct hisi_zip_ctx *ctx) +{ + struct hisi_zip_qp_ctx *tmp; + struct device *dev; + int i; + + for (i = 0; i < HZIP_CTX_Q_NUM; i++) { + tmp = &ctx->qp_ctx[i]; + dev = &tmp->qp->qm->pdev->dev; + tmp->sgl_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH << 1, + sgl_sge_nr); + if (IS_ERR(tmp->sgl_pool)) { + if (i == 1) + goto err_free_sgl_pool0; + return -ENOMEM; + } + } + + return 0; + +err_free_sgl_pool0: + hisi_acc_free_sgl_pool(&ctx->qp_ctx[QPC_COMP].qp->qm->pdev->dev, + ctx->qp_ctx[QPC_COMP].sgl_pool); + return -ENOMEM; +} + +static void hisi_zip_release_sgl_pool(struct hisi_zip_ctx *ctx) +{ + int i; + + for (i = 0; i < HZIP_CTX_Q_NUM; i++) + hisi_acc_free_sgl_pool(&ctx->qp_ctx[i].qp->qm->pdev->dev, + ctx->qp_ctx[i].sgl_pool); +} + +static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx, + struct hisi_zip_req *req) +{ + struct hisi_zip_req_q *req_q = &qp_ctx->req_q; + + write_lock(&req_q->req_lock); + clear_bit(req->req_id, req_q->req_bitmap); + memset(req, 0, sizeof(struct hisi_zip_req)); + write_unlock(&req_q->req_lock); +} + +static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data) +{ + struct hisi_zip_sqe *sqe = data; + struct hisi_zip_qp_ctx *qp_ctx = qp->qp_ctx; + struct zip_dfx *dfx = &qp_ctx->zip_dev->dfx; + struct hisi_zip_req_q *req_q = &qp_ctx->req_q; + struct hisi_zip_req *req = req_q->q + sqe->tag; + struct acomp_req *acomp_req = req->req; + struct device *dev = &qp->qm->pdev->dev; + u32 status, dlen, head_size; + int err = 0; + + atomic64_inc(&dfx->recv_cnt); + status = sqe->dw3 & HZIP_BD_STATUS_M; + if (status != 0 && status != HZIP_NC_ERR) { + dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n", + (qp->alg_type == 0) ? "" : "de", qp->qp_id, status, + sqe->produced); + atomic64_inc(&dfx->err_bd_cnt); + err = -EIO; + } + dlen = sqe->produced; + + hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src); + hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst); + + head_size = (qp->alg_type == 0) ? TO_HEAD_SIZE(qp->req_type) : 0; + acomp_req->dlen = dlen + head_size; + + if (acomp_req->base.complete) + acomp_request_complete(acomp_req, err); + + hisi_zip_remove_req(qp_ctx, req); +} + +static void hisi_zip_set_acomp_cb(struct hisi_zip_ctx *ctx, + void (*fn)(struct hisi_qp *, void *)) +{ + int i; + + for (i = 0; i < HZIP_CTX_Q_NUM; i++) + ctx->qp_ctx[i].qp->req_cb = fn; +} + +static int hisi_zip_acomp_init(struct crypto_acomp *tfm) +{ + const char *alg_name = crypto_tfm_alg_name(&tfm->base); + struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base); + struct device *dev; + int ret; + + ret = hisi_zip_ctx_init(ctx, COMP_NAME_TO_TYPE(alg_name)); + if (ret) { + pr_err("Init ctx failed!\n"); + return ret; + } + + dev = &ctx->qp_ctx[0].qp->qm->pdev->dev; + ret = hisi_zip_create_req_q(ctx); + if (ret) { + dev_err(dev, "Create request queue failed!\n "); + goto err_ctx_exit; + } + + ret = hisi_zip_create_sgl_pool(ctx); + if (ret) { + dev_err(dev, "Create sgl pool failed!\n "); + goto err_release_req_q; + } + + hisi_zip_set_acomp_cb(ctx, hisi_zip_acomp_cb); + + return 0; + +err_release_req_q: + hisi_zip_release_req_q(ctx); +err_ctx_exit: + hisi_zip_ctx_exit(ctx); + return ret; +} + +static void hisi_zip_acomp_exit(struct crypto_acomp *tfm) +{ + struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base); + + hisi_zip_set_acomp_cb(ctx, NULL); + hisi_zip_release_sgl_pool(ctx); + hisi_zip_release_req_q(ctx); + hisi_zip_ctx_exit(ctx); +} + +static int add_comp_head(struct scatterlist *dst, u8 req_type) +{ + int head_size = TO_HEAD_SIZE(req_type); + const u8 *head = TO_HEAD(req_type); + int ret; + + ret = sg_copy_from_buffer(dst, sg_nents(dst), head, head_size); + if (ret != head_size) { + pr_err("The head size of buffer is wrong!\n"); + return -ENOMEM; + } + + return head_size; +} + +static size_t __maybe_unused get_gzip_head_size(struct scatterlist *sgl) +{ + char buf[HZIP_GZIP_HEAD_BUF]; + + sg_copy_to_buffer(sgl, sg_nents(sgl), buf, sizeof(buf)); + + return __get_gzip_head_size(buf); +} + +static int get_comp_head_size(struct acomp_req *acomp_req, u8 req_type) +{ + if (!acomp_req->src || !acomp_req->slen) + return -EINVAL; + + if ((req_type == HZIP_ALG_TYPE_GZIP) && + (acomp_req->slen < GZIP_HEAD_FEXTRA_SHIFT)) + return -EINVAL; + + switch (req_type) { + case HZIP_ALG_TYPE_ZLIB: + return TO_HEAD_SIZE(HZIP_ALG_TYPE_ZLIB); + case HZIP_ALG_TYPE_GZIP: + return TO_HEAD_SIZE(HZIP_ALG_TYPE_GZIP); + default: + pr_err("request type does not support!\n"); + return -EINVAL; + } +} + +static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req, + struct hisi_zip_qp_ctx *qp_ctx, + size_t head_size, bool is_comp) +{ + struct hisi_zip_req_q *req_q = &qp_ctx->req_q; + struct hisi_zip_req *q = req_q->q; + struct hisi_zip_req *req_cache; + int req_id; + + write_lock(&req_q->req_lock); + + req_id = find_first_zero_bit(req_q->req_bitmap, req_q->size); + if (req_id >= req_q->size) { + write_unlock(&req_q->req_lock); + dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n"); + return ERR_PTR(-EPERM); + } + set_bit(req_id, req_q->req_bitmap); + + req_cache = q + req_id; + req_cache->req_id = req_id; + req_cache->req = req; + + if (is_comp) { + req_cache->sskip = 0; + req_cache->dskip = head_size; + } else { + req_cache->sskip = head_size; + req_cache->dskip = 0; + } + + write_unlock(&req_q->req_lock); + + return req_cache; +} + +static int hisi_zip_do_work(struct hisi_zip_req *req, + struct hisi_zip_qp_ctx *qp_ctx) +{ + struct hisi_zip_sqe *zip_sqe = &qp_ctx->zip_sqe; + struct acomp_req *a_req = req->req; + struct hisi_qp *qp = qp_ctx->qp; + struct device *dev = &qp->qm->pdev->dev; + struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool; + struct zip_dfx *dfx = &qp_ctx->zip_dev->dfx; + dma_addr_t input; + dma_addr_t output; + int ret; + + if (!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen) + return -EINVAL; + + req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool, + req->req_id << 1, &input); + if (IS_ERR(req->hw_src)) { + dev_err(dev, "the src map to hw SGL failed!\n"); + return PTR_ERR(req->hw_src); + } + req->dma_src = input; + + req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool, + (req->req_id << 1) + 1, + &output); + if (IS_ERR(req->hw_dst)) { + dev_err(dev, "the dst map to hw SGL failed!\n"); + ret = PTR_ERR(req->hw_dst); + goto err_unmap_input; + } + req->dma_dst = output; + + hisi_zip_fill_sqe(zip_sqe, qp->req_type, input, output, a_req->slen, + a_req->dlen, req->sskip, req->dskip); + hisi_zip_config_buf_type(zip_sqe, HZIP_SGL); + hisi_zip_config_tag(zip_sqe, req->req_id); + + /* send command to start a task */ + atomic64_inc(&dfx->send_cnt); + ret = hisi_qp_send(qp, zip_sqe); + if (ret < 0) { + atomic64_inc(&dfx->send_busy_cnt); + ret = -EPERM; + dev_dbg_ratelimited(dev, "send task message failed!\n"); + goto err_unmap_output; + } + + return -EINPROGRESS; + +err_unmap_output: + hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst); +err_unmap_input: + hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src); + return ret; +} + +static int hisi_zip_acompress(struct acomp_req *acomp_req) +{ + struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm); + struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[QPC_COMP]; + struct device *dev = &qp_ctx->qp->qm->pdev->dev; + struct hisi_zip_req *req; + int head_size; + int ret; + + /* let's output compression head now */ + head_size = add_comp_head(acomp_req->dst, qp_ctx->qp->req_type); + if (head_size < 0) + return -ENOMEM; + + req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, true); + if (IS_ERR(req)) { + dev_err_ratelimited(dev, "create request before compress failed!\n"); + return PTR_ERR(req); + } + + ret = hisi_zip_do_work(req, qp_ctx); + if (ret != -EINPROGRESS) { + dev_err_ratelimited(dev, "do compress work failed!\n"); + hisi_zip_remove_req(qp_ctx, req); + } + + return ret; +} + +static int hisi_zip_adecompress(struct acomp_req *acomp_req) +{ + struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm); + struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[QPC_DECOMP]; + struct device *dev = &qp_ctx->qp->qm->pdev->dev; + struct hisi_zip_req *req; + int head_size; + int ret; + + head_size = get_comp_head_size(acomp_req, qp_ctx->qp->req_type); + if (head_size < 0) + return -ENOMEM; + + req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, false); + if (IS_ERR(req)) { + dev_err_ratelimited(dev, "create request before decompress failed!\n"); + return PTR_ERR(req); + } + + ret = hisi_zip_do_work(req, qp_ctx); + if (ret != -EINPROGRESS) { + dev_err_ratelimited(dev, "do decompress work failed!\n"); + hisi_zip_remove_req(qp_ctx, req); + } + + return ret; +} + +static struct acomp_alg hisi_zip_acomp_zlib = { + .init = hisi_zip_acomp_init, + .exit = hisi_zip_acomp_exit, + .compress = hisi_zip_acompress, + .decompress = hisi_zip_adecompress, + .base = { + .cra_name = "zlib-deflate", + .cra_driver_name = "hisi-zlib-acomp", + .cra_module = THIS_MODULE, + .cra_priority = HZIP_ALG_PRIORITY, + .cra_ctxsize = sizeof(struct hisi_zip_ctx), + } +}; + +static struct acomp_alg hisi_zip_acomp_gzip = { + .init = hisi_zip_acomp_init, + .exit = hisi_zip_acomp_exit, + .compress = hisi_zip_acompress, + .decompress = hisi_zip_adecompress, + .base = { + .cra_name = "gzip", + .cra_driver_name = "hisi-gzip-acomp", + .cra_module = THIS_MODULE, + .cra_priority = HZIP_ALG_PRIORITY, + .cra_ctxsize = sizeof(struct hisi_zip_ctx), + } +}; + +int hisi_zip_register_to_crypto(void) +{ + int ret = 0; + + mutex_lock(&hisi_zip_alg_lock); + if (++hisi_zip_active_devs == 1) { + ret = crypto_register_acomp(&hisi_zip_acomp_zlib); + if (ret) { + pr_err("Zlib acomp algorithm registration failed\n"); + goto err_unlock; + } + + ret = crypto_register_acomp(&hisi_zip_acomp_gzip); + if (ret) { + pr_err("Gzip acomp algorithm registration failed\n"); + crypto_unregister_acomp(&hisi_zip_acomp_zlib); + } + } + +err_unlock: + mutex_unlock(&hisi_zip_alg_lock); + return ret; +} + +void hisi_zip_unregister_from_crypto(void) +{ + mutex_lock(&hisi_zip_alg_lock); + if (--hisi_zip_active_devs == 0) { + crypto_unregister_acomp(&hisi_zip_acomp_gzip); + crypto_unregister_acomp(&hisi_zip_acomp_zlib); + } + mutex_unlock(&hisi_zip_alg_lock); +} diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c new file mode 100644 index 0000000000000000000000000000000000000000..e9e4b43acdbf64fd7a9be51ba356eece579a7be3 --- /dev/null +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -0,0 +1,1000 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018-2019 HiSilicon Limited. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "zip.h" + +#define HZIP_QUEUE_NUM_V1 4096 +#define HZIP_QUEUE_NUM_V2 1024 + +#define PCI_DEVICE_ID_ZIP_PF 0xa250 +#define PCI_DEVICE_ID_ZIP_VF 0xa251 + +#define HZIP_CLOCK_GATE_CTRL 0x301004 +#define COMP0_ENABLE BIT(0) +#define COMP1_ENABLE BIT(1) +#define DECOMP0_ENABLE BIT(2) +#define DECOMP1_ENABLE BIT(3) +#define DECOMP2_ENABLE BIT(4) +#define DECOMP3_ENABLE BIT(5) +#define DECOMP4_ENABLE BIT(6) +#define DECOMP5_ENABLE BIT(7) +#define HZIP_ALL_COMP_DECOMP_EN (COMP0_ENABLE | COMP1_ENABLE | \ + DECOMP0_ENABLE | DECOMP1_ENABLE | \ + DECOMP2_ENABLE | DECOMP3_ENABLE | \ + DECOMP4_ENABLE | DECOMP5_ENABLE) +#define HZIP_DECOMP_CHECK_ENABLE BIT(16) +#define HZIP_FSM_MAX_CNT 0x301008 + +#define HZIP_PORT_ARCA_CHE_0 0x301040 +#define HZIP_PORT_ARCA_CHE_1 0x301044 +#define HZIP_PORT_AWCA_CHE_0 0x301060 +#define HZIP_PORT_AWCA_CHE_1 0x301064 +#define HZIP_CACHE_ALL_EN 0xffffffff + +#define HZIP_BD_RUSER_32_63 0x301110 +#define HZIP_SGL_RUSER_32_63 0x30111c +#define HZIP_DATA_RUSER_32_63 0x301128 +#define HZIP_DATA_WUSER_32_63 0x301134 +#define HZIP_BD_WUSER_32_63 0x301140 + +#define HZIP_QM_IDEL_STATUS 0x3040e4 +#define HZIP_MASTER_GLOBAL_CTRL 0x300000 +#define HZIP_MASTER_GLOBAL_CTRL_SHUTDOWN 0x1 +#define HZIP_MASTER_TRANS_RETURN 0x300150 +#define HZIP_MASTER_TRANS_RETURN_RW 0x3 + +#define HZIP_CORE_DEBUG_COMP_0 0x302000 +#define HZIP_CORE_DEBUG_COMP_1 0x303000 +#define HZIP_CORE_DEBUG_DECOMP_0 0x304000 +#define HZIP_CORE_DEBUG_DECOMP_1 0x305000 +#define HZIP_CORE_DEBUG_DECOMP_2 0x306000 +#define HZIP_CORE_DEBUG_DECOMP_3 0x307000 +#define HZIP_CORE_DEBUG_DECOMP_4 0x308000 +#define HZIP_CORE_DEBUG_DECOMP_5 0x309000 + +#define HZIP_CORE_INT_SOURCE 0x3010A0 +#define HZIP_CORE_INT_MASK 0x3010A4 +#define HZIP_CORE_INT_SET 0x3010A8 +#define HZIP_HW_ERROR_IRQ_ENABLE 1 +#define HZIP_HW_ERROR_IRQ_DISABLE 0 +#define HZIP_CORE_INT_STATUS 0x3010AC +#define HZIP_CORE_INT_STATUS_M_ECC BIT(1) +#define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148 +#define HZIP_CORE_INT_RAS_CE_ENB 0x301160 +#define HZIP_CORE_INT_RAS_NFE_ENB 0x301164 +#define HZIP_CORE_INT_RAS_FE_ENB 0x301168 +#define HZIP_CORE_INT_RAS_NFE_ENABLE 0x7FE +#define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16 +#define HZIP_SRAM_ECC_ERR_ADDR_SHIFT 24 +#define HZIP_CORE_INT_DISABLE 0x000007FF +#define HZIP_COMP_CORE_NUM 2 +#define HZIP_DECOMP_CORE_NUM 6 +#define HZIP_CORE_NUM (HZIP_COMP_CORE_NUM + \ + HZIP_DECOMP_CORE_NUM) +#define HZIP_SQE_SIZE 128 +#define HZIP_SQ_SIZE (HZIP_SQE_SIZE * QM_Q_DEPTH) +#define HZIP_PF_DEF_Q_NUM 64 +#define HZIP_PF_DEF_Q_BASE 0 + +#define HZIP_SOFT_CTRL_CNT_CLR_CE 0x301000 +#define HZIP_SOFT_CTRL_CNT_CLR_CE_BIT BIT(0) +#define HZIP_SOFT_CTRL_ZIP_CONTROL 0x30100C +#define HZIP_AXI_SHUTDOWN_ENABLE BIT(14) +#define HZIP_AXI_SHUTDOWN_DISABLE 0xFFFFBFFF +#define HZIP_WR_PORT BIT(11) + +#define HZIP_BUF_SIZE 22 +#define FORMAT_DECIMAL 10 + +#define HZIP_SQE_MASK_OFFSET 64 +#define HZIP_SQE_MASK_LEN 48 + +#define HZIP_CNT_CLR_CE_EN BIT(0) +#define HZIP_RO_CNT_CLR_CE_EN BIT(2) +#define HZIP_RD_CNT_CLR_CE_EN (HZIP_CNT_CLR_CE_EN | \ + HZIP_RO_CNT_CLR_CE_EN) + +static const char hisi_zip_name[] = "hisi_zip"; +static struct dentry *hzip_debugfs_root; +static struct hisi_qm_list zip_devices; + +struct hisi_zip_hw_error { + u32 int_msk; + const char *msg; +}; + +struct zip_dfx_item { + const char *name; + u32 offset; +}; + +static struct zip_dfx_item zip_dfx_files[] = { + {"send_cnt", offsetof(struct zip_dfx, send_cnt)}, + {"recv_cnt", offsetof(struct zip_dfx, recv_cnt)}, + {"send_busy_cnt", offsetof(struct zip_dfx, send_busy_cnt)}, + {"err_bd_cnt", offsetof(struct zip_dfx, err_bd_cnt)}, +}; + +static const struct hisi_zip_hw_error zip_hw_error[] = { + { .int_msk = BIT(0), .msg = "zip_ecc_1bitt_err" }, + { .int_msk = BIT(1), .msg = "zip_ecc_2bit_err" }, + { .int_msk = BIT(2), .msg = "zip_axi_rresp_err" }, + { .int_msk = BIT(3), .msg = "zip_axi_bresp_err" }, + { .int_msk = BIT(4), .msg = "zip_src_addr_parse_err" }, + { .int_msk = BIT(5), .msg = "zip_dst_addr_parse_err" }, + { .int_msk = BIT(6), .msg = "zip_pre_in_addr_err" }, + { .int_msk = BIT(7), .msg = "zip_pre_in_data_err" }, + { .int_msk = BIT(8), .msg = "zip_com_inf_err" }, + { .int_msk = BIT(9), .msg = "zip_enc_inf_err" }, + { .int_msk = BIT(10), .msg = "zip_pre_out_err" }, + { /* sentinel */ } +}; + +enum ctrl_debug_file_index { + HZIP_CURRENT_QM, + HZIP_CLEAR_ENABLE, + HZIP_DEBUG_FILE_NUM, +}; + +static const char * const ctrl_debug_file_name[] = { + [HZIP_CURRENT_QM] = "current_qm", + [HZIP_CLEAR_ENABLE] = "clear_enable", +}; + +struct ctrl_debug_file { + enum ctrl_debug_file_index index; + spinlock_t lock; + struct hisi_zip_ctrl *ctrl; +}; + +/* + * One ZIP controller has one PF and multiple VFs, some global configurations + * which PF has need this structure. + * + * Just relevant for PF. + */ +struct hisi_zip_ctrl { + struct hisi_zip *hisi_zip; + struct ctrl_debug_file files[HZIP_DEBUG_FILE_NUM]; +}; + +enum { + HZIP_COMP_CORE0, + HZIP_COMP_CORE1, + HZIP_DECOMP_CORE0, + HZIP_DECOMP_CORE1, + HZIP_DECOMP_CORE2, + HZIP_DECOMP_CORE3, + HZIP_DECOMP_CORE4, + HZIP_DECOMP_CORE5, +}; + +static const u64 core_offsets[] = { + [HZIP_COMP_CORE0] = 0x302000, + [HZIP_COMP_CORE1] = 0x303000, + [HZIP_DECOMP_CORE0] = 0x304000, + [HZIP_DECOMP_CORE1] = 0x305000, + [HZIP_DECOMP_CORE2] = 0x306000, + [HZIP_DECOMP_CORE3] = 0x307000, + [HZIP_DECOMP_CORE4] = 0x308000, + [HZIP_DECOMP_CORE5] = 0x309000, +}; + +static struct debugfs_reg32 hzip_dfx_regs[] = { + {"HZIP_GET_BD_NUM ", 0x00ull}, + {"HZIP_GET_RIGHT_BD ", 0x04ull}, + {"HZIP_GET_ERROR_BD ", 0x08ull}, + {"HZIP_DONE_BD_NUM ", 0x0cull}, + {"HZIP_WORK_CYCLE ", 0x10ull}, + {"HZIP_IDLE_CYCLE ", 0x18ull}, + {"HZIP_MAX_DELAY ", 0x20ull}, + {"HZIP_MIN_DELAY ", 0x24ull}, + {"HZIP_AVG_DELAY ", 0x28ull}, + {"HZIP_MEM_VISIBLE_DATA ", 0x30ull}, + {"HZIP_MEM_VISIBLE_ADDR ", 0x34ull}, + {"HZIP_CONSUMED_BYTE ", 0x38ull}, + {"HZIP_PRODUCED_BYTE ", 0x40ull}, + {"HZIP_COMP_INF ", 0x70ull}, + {"HZIP_PRE_OUT ", 0x78ull}, + {"HZIP_BD_RD ", 0x7cull}, + {"HZIP_BD_WR ", 0x80ull}, + {"HZIP_GET_BD_AXI_ERR_NUM ", 0x84ull}, + {"HZIP_GET_BD_PARSE_ERR_NUM ", 0x88ull}, + {"HZIP_ADD_BD_AXI_ERR_NUM ", 0x8cull}, + {"HZIP_DECOMP_STF_RELOAD_CURR_ST ", 0x94ull}, + {"HZIP_DECOMP_LZ77_CURR_ST ", 0x9cull}, +}; + +static int uacce_mode_set(const char *val, const struct kernel_param *kp) +{ + return mode_set(val, kp); +} + +static const struct kernel_param_ops uacce_mode_ops = { + .set = uacce_mode_set, + .get = param_get_int, +}; + +static int uacce_mode = UACCE_MODE_NOUACCE; +module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444); +MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 2"); + +static int pf_q_num_set(const char *val, const struct kernel_param *kp) +{ + return q_num_set(val, kp, PCI_DEVICE_ID_ZIP_PF); +} + +static const struct kernel_param_ops pf_q_num_ops = { + .set = pf_q_num_set, + .get = param_get_int, +}; + +static u32 pf_q_num = HZIP_PF_DEF_Q_NUM; +module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444); +MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 1-4096, v2 1-1024)"); + +static int vfs_num_set(const char *val, const struct kernel_param *kp) +{ + return vf_num_set(val, kp); +} + +static const struct kernel_param_ops vfs_num_ops = { + .set = vfs_num_set, + .get = param_get_int, +}; + +static u32 vfs_num; +module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444); +MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)"); + +static const struct pci_device_id hisi_zip_dev_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_PF) }, + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_VF) }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, hisi_zip_dev_ids); + +int zip_create_qps(struct hisi_qp **qps, int ctx_num) +{ + int node = cpu_to_node(raw_smp_processor_id()); + + return hisi_qm_alloc_qps_node(node, &zip_devices, + qps, ctx_num, 0); +} + +static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm) +{ + void __iomem *base = qm->io_base; + + /* qm user domain */ + writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1); + writel(ARUSER_M_CFG_ENABLE, base + QM_ARUSER_M_CFG_ENABLE); + writel(AXUSER_BASE, base + QM_AWUSER_M_CFG_1); + writel(AWUSER_M_CFG_ENABLE, base + QM_AWUSER_M_CFG_ENABLE); + writel(WUSER_M_CFG_ENABLE, base + QM_WUSER_M_CFG_ENABLE); + + /* qm cache */ + writel(AXI_M_CFG, base + QM_AXI_M_CFG); + writel(AXI_M_CFG_ENABLE, base + QM_AXI_M_CFG_ENABLE); + + /* disable FLR triggered by BME(bus master enable) */ + writel(PEH_AXUSER_CFG, base + QM_PEH_AXUSER_CFG); + writel(PEH_AXUSER_CFG_ENABLE, base + QM_PEH_AXUSER_CFG_ENABLE); + + /* cache */ + writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_0); + writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_1); + writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_0); + writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_1); + + /* user domain configurations */ + writel(AXUSER_BASE, base + HZIP_BD_RUSER_32_63); + writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63); + writel(AXUSER_BASE, base + HZIP_BD_WUSER_32_63); + + if (qm->use_sva) { + writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_RUSER_32_63); + writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_WUSER_32_63); + } else { + writel(AXUSER_BASE, base + HZIP_DATA_RUSER_32_63); + writel(AXUSER_BASE, base + HZIP_DATA_WUSER_32_63); + } + + /* let's open all compression/decompression cores */ + writel(HZIP_DECOMP_CHECK_ENABLE | HZIP_ALL_COMP_DECOMP_EN, + base + HZIP_CLOCK_GATE_CTRL); + + /* enable sqc,cqc writeback */ + writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE | + CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) | + FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL); + + return 0; +} + +/* hisi_zip_debug_regs_clear() - clear the zip debug regs */ +static void hisi_zip_debug_regs_clear(struct hisi_qm *qm) +{ + int i, j; + + /* clear current_qm */ + writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF); + writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF); + + /* enable rdclr_en */ + writel(HZIP_RD_CNT_CLR_CE_EN, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE); + for (i = 0; i < ARRAY_SIZE(core_offsets); i++) + for (j = 0; j < ARRAY_SIZE(hzip_dfx_regs); j++) + readl(qm->io_base + core_offsets[i] + + hzip_dfx_regs[j].offset); + + /* clear rdclr_en */ + writel(0x0, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE); + + hisi_qm_debug_regs_clear(qm); +} + +static int hisi_zip_hw_err_pre_set(struct hisi_qm *qm, u32 *val) +{ + if (qm->ver == QM_HW_V1) { + writel(HZIP_CORE_INT_DISABLE, qm->io_base + HZIP_CORE_INT_MASK); + pci_info(qm->pdev, "ZIP v%d cannot support hw error handle!\n", + qm->ver); + return -EINVAL; + } + + /* configure error type */ + writel(0x1, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB); + writel(0x0, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB); + writel(HZIP_CORE_INT_RAS_NFE_ENABLE, + qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); + + *val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL); + + return 0; +} + +static void hisi_zip_hw_error_enable(struct hisi_qm *qm) +{ + u32 val; + int ret; + + ret = hisi_zip_hw_err_pre_set(qm, &val); + if (ret) + return; + + /* clear ZIP hw error source if having */ + writel(HZIP_CORE_INT_DISABLE, qm->io_base + HZIP_CORE_INT_SOURCE); + + /* enable ZIP hw error interrupts */ + writel(0, qm->io_base + HZIP_CORE_INT_MASK); + + /* enable ZIP block master OOO when m-bit error occur */ + val = val | HZIP_AXI_SHUTDOWN_ENABLE; + + writel(val, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL); +} + +static void hisi_zip_hw_error_disable(struct hisi_qm *qm) +{ + u32 val; + int ret; + + ret = hisi_zip_hw_err_pre_set(qm, &val); + if (ret) + return; + + /* disable ZIP hw error interrupts */ + writel(HZIP_CORE_INT_DISABLE, qm->io_base + HZIP_CORE_INT_MASK); + + /* disable ZIP block master OOO when m-bit error occur */ + val = val & HZIP_AXI_SHUTDOWN_DISABLE; + + writel(val, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL); +} + +static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file) +{ + struct hisi_zip *zip = file->ctrl->hisi_zip; + + return &zip->qm; +} + +static u32 current_qm_read(struct ctrl_debug_file *file) +{ + struct hisi_qm *qm = file_to_qm(file); + + return readl(qm->io_base + QM_DFX_MB_CNT_VF); +} + +static int current_qm_write(struct ctrl_debug_file *file, u32 val) +{ + struct hisi_qm *qm = file_to_qm(file); + u32 vfq_num; + u32 tmp; + + if (val > qm->vfs_num) + return -EINVAL; + + /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */ + if (val == 0) { + qm->debug.curr_qm_qp_num = qm->qp_num; + } else { + vfq_num = (qm->ctrl_q_num - qm->qp_num) / qm->vfs_num; + if (val == qm->vfs_num) { + qm->debug.curr_qm_qp_num = + qm->ctrl_q_num - qm->qp_num - + (qm->vfs_num - 1) * vfq_num; + } else { + qm->debug.curr_qm_qp_num = vfq_num; + } + } + + writel(val, qm->io_base + QM_DFX_MB_CNT_VF); + writel(val, qm->io_base + QM_DFX_DB_CNT_VF); + + tmp = val | + (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK); + writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); + + tmp = val | + (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK); + writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); + + return 0; +} + +static u32 clear_enable_read(struct ctrl_debug_file *file) +{ + struct hisi_qm *qm = file_to_qm(file); + + return readl(qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE) & + HZIP_SOFT_CTRL_CNT_CLR_CE_BIT; +} + +static int clear_enable_write(struct ctrl_debug_file *file, u32 val) +{ + struct hisi_qm *qm = file_to_qm(file); + u32 tmp; + + if (val != 1 && val != 0) + return -EINVAL; + + tmp = (readl(qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE) & + ~HZIP_SOFT_CTRL_CNT_CLR_CE_BIT) | val; + writel(tmp, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE); + + return 0; +} + +static ssize_t hisi_zip_ctrl_debug_read(struct file *filp, char __user *buf, + size_t count, loff_t *pos) +{ + struct ctrl_debug_file *file = filp->private_data; + char tbuf[HZIP_BUF_SIZE]; + u32 val; + int ret; + + spin_lock(&file->lock); + switch (file->index) { + case HZIP_CURRENT_QM: + val = current_qm_read(file); + break; + case HZIP_CLEAR_ENABLE: + val = clear_enable_read(file); + break; + default: + spin_unlock(&file->lock); + return -EINVAL; + } + spin_unlock(&file->lock); + ret = snprintf(tbuf, HZIP_BUF_SIZE, "%u\n", val); + return simple_read_from_buffer(buf, count, pos, tbuf, ret); +} + +static ssize_t hisi_zip_ctrl_debug_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *pos) +{ + struct ctrl_debug_file *file = filp->private_data; + char tbuf[HZIP_BUF_SIZE]; + unsigned long val; + int len, ret; + + if (*pos != 0) + return 0; + + if (count >= HZIP_BUF_SIZE) + return -ENOSPC; + + len = simple_write_to_buffer(tbuf, HZIP_BUF_SIZE - 1, pos, buf, count); + if (len < 0) + return len; + + tbuf[len] = '\0'; + if (kstrtoul(tbuf, 0, &val)) + return -EFAULT; + + spin_lock(&file->lock); + switch (file->index) { + case HZIP_CURRENT_QM: + ret = current_qm_write(file, val); + if (ret) + goto err_input; + break; + case HZIP_CLEAR_ENABLE: + ret = clear_enable_write(file, val); + if (ret) + goto err_input; + break; + default: + ret = -EINVAL; + goto err_input; + } + spin_unlock(&file->lock); + + return count; + +err_input: + spin_unlock(&file->lock); + return ret; +} + +static const struct file_operations ctrl_debug_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = hisi_zip_ctrl_debug_read, + .write = hisi_zip_ctrl_debug_write, +}; + +static int zip_debugfs_atomic64_set(void *data, u64 val) +{ + if (!val) + atomic64_set((atomic64_t *)data, 0); + else + return -EINVAL; + + return 0; +} + +static int zip_debugfs_atomic64_get(void *data, u64 *val) +{ + *val = atomic64_read((atomic64_t *)data); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(zip_atomic64_ops, zip_debugfs_atomic64_get, + zip_debugfs_atomic64_set, "%llu\n"); + +static int hisi_zip_core_debug_init(struct hisi_qm *qm) +{ + struct device *dev = &qm->pdev->dev; + struct debugfs_regset32 *regset; + struct dentry *tmp_d; + char buf[HZIP_BUF_SIZE]; + int i, ret; + + for (i = 0; i < HZIP_CORE_NUM; i++) { + if (i < HZIP_COMP_CORE_NUM) + ret = snprintf(buf, HZIP_BUF_SIZE, "comp_core%d", i); + else + ret = snprintf(buf, HZIP_BUF_SIZE, + "decomp_core%d", i - HZIP_COMP_CORE_NUM); + if (ret < 0) + return -EINVAL; + + tmp_d = debugfs_create_dir(buf, qm->debug.debug_root); + if (!tmp_d) + return -ENOENT; + + regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); + if (!regset) + return -ENOENT; + + regset->regs = hzip_dfx_regs; + regset->nregs = ARRAY_SIZE(hzip_dfx_regs); + regset->base = qm->io_base + core_offsets[i]; + + debugfs_create_regset32("regs", 0444, tmp_d, regset); + } + + return 0; +} + +static void hisi_zip_dfx_debug_init(struct hisi_qm *qm) +{ + struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm); + struct zip_dfx *dfx = &zip->dfx; + struct dentry *tmp_dir; + void *data; + int i; + + tmp_dir = debugfs_create_dir("zip_dfx", qm->debug.debug_root); + for (i = 0; i < ARRAY_SIZE(zip_dfx_files); i++) { + data = (atomic64_t *)((uintptr_t)dfx + zip_dfx_files[i].offset); + debugfs_create_file(zip_dfx_files[i].name, + 0644, + tmp_dir, + data, + &zip_atomic64_ops); + } +} + +static int hisi_zip_ctrl_debug_init(struct hisi_qm *qm) +{ + struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm); + struct dentry *tmp; + int i; + + for (i = HZIP_CURRENT_QM; i < HZIP_DEBUG_FILE_NUM; i++) { + spin_lock_init(&zip->ctrl->files[i].lock); + zip->ctrl->files[i].ctrl = zip->ctrl; + zip->ctrl->files[i].index = i; + + tmp = debugfs_create_file(ctrl_debug_file_name[i], 0600, + qm->debug.debug_root, zip->ctrl->files + i, + &ctrl_debug_fops); + if (!tmp) + return -ENOENT; + } + + return hisi_zip_core_debug_init(qm); +} + +static int hisi_zip_debugfs_init(struct hisi_qm *qm) +{ + struct device *dev = &qm->pdev->dev; + struct dentry *dev_d; + int ret; + + dev_d = debugfs_create_dir(dev_name(dev), hzip_debugfs_root); + if (!dev_d) + return -ENOENT; + + qm->debug.sqe_mask_offset = HZIP_SQE_MASK_OFFSET; + qm->debug.sqe_mask_len = HZIP_SQE_MASK_LEN; + qm->debug.debug_root = dev_d; + ret = hisi_qm_debug_init(qm); + if (ret) + goto failed_to_create; + + if (qm->fun_type == QM_HW_PF) { + ret = hisi_zip_ctrl_debug_init(qm); + if (ret) + goto failed_to_create; + } + + hisi_zip_dfx_debug_init(qm); + + return 0; + +failed_to_create: + debugfs_remove_recursive(hzip_debugfs_root); + return ret; +} + +static void hisi_zip_debugfs_exit(struct hisi_qm *qm) +{ + debugfs_remove_recursive(qm->debug.debug_root); + + if (qm->fun_type == QM_HW_PF) { + hisi_zip_debug_regs_clear(qm); + qm->debug.curr_qm_qp_num = 0; + } +} + +static void hisi_zip_log_hw_error(struct hisi_qm *qm, u32 err_sts) +{ + const struct hisi_zip_hw_error *err = zip_hw_error; + struct device *dev = &qm->pdev->dev; + u32 err_val; + + while (err->msg) { + if (err->int_msk & err_sts) { + dev_err(dev, "%s [error status=0x%x] found\n", + err->msg, err->int_msk); + + if (err->int_msk & HZIP_CORE_INT_STATUS_M_ECC) { + err_val = readl(qm->io_base + + HZIP_CORE_SRAM_ECC_ERR_INFO); + dev_err(dev, "hisi-zip multi ecc sram num=0x%x\n", + ((err_val >> + HZIP_SRAM_ECC_ERR_NUM_SHIFT) & + 0xFF)); + } + } + err++; + } +} + +static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm) +{ + return readl(qm->io_base + HZIP_CORE_INT_STATUS); +} + +static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) +{ + writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE); +} + +static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm) +{ + u32 val; + + val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL); + + writel(val & HZIP_AXI_SHUTDOWN_DISABLE, + qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL); + + writel(val | HZIP_AXI_SHUTDOWN_ENABLE, + qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL); +} + +static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm) +{ + u32 nfe_enb; + + nfe_enb = readl(qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); + writel(nfe_enb & ~HZIP_CORE_INT_STATUS_M_ECC, + qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); + + writel(HZIP_CORE_INT_STATUS_M_ECC, + qm->io_base + HZIP_CORE_INT_SET); +} + +static void hisi_zip_err_ini_set(struct hisi_qm *qm) +{ + if (qm->fun_type == QM_HW_VF) + return; + + qm->err_ini.get_dev_hw_err_status = hisi_zip_get_hw_err_status; + qm->err_ini.clear_dev_hw_err_status = hisi_zip_clear_hw_err_status; + qm->err_ini.err_info.ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC; + qm->err_ini.err_info.ce = QM_BASE_CE; + qm->err_ini.err_info.nfe = QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT; + qm->err_ini.err_info.fe = 0; + qm->err_ini.err_info.msi = QM_DB_RANDOM_INVALID; + qm->err_ini.err_info.acpi_rst = "ZRST"; + qm->err_ini.hw_err_disable = hisi_zip_hw_error_disable; + qm->err_ini.hw_err_enable = hisi_zip_hw_error_enable; + qm->err_ini.set_usr_domain_cache = hisi_zip_set_user_domain_and_cache; + qm->err_ini.log_dev_hw_err = hisi_zip_log_hw_error; + qm->err_ini.open_axi_master_ooo = hisi_zip_open_axi_master_ooo; + qm->err_ini.close_axi_master_ooo = hisi_zip_close_axi_master_ooo; + qm->err_ini.err_info.msi_wr_port = HZIP_WR_PORT; +} + +static int hisi_zip_pf_probe_init(struct hisi_qm *qm) +{ + struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm); + struct hisi_zip_ctrl *ctrl; + int ret; + + ctrl = devm_kzalloc(&qm->pdev->dev, sizeof(*ctrl), GFP_KERNEL); + if (!ctrl) + return -ENOMEM; + + zip->ctrl = ctrl; + ctrl->hisi_zip = zip; + + switch (qm->ver) { + case QM_HW_V1: + qm->ctrl_q_num = HZIP_QUEUE_NUM_V1; + break; + + case QM_HW_V2: + qm->ctrl_q_num = HZIP_QUEUE_NUM_V2; + break; + + default: + return -EINVAL; + } + + ret = qm->err_ini.set_usr_domain_cache(qm); + if (ret) + return ret; + + hisi_qm_dev_err_init(qm); + + hisi_zip_debug_regs_clear(qm); + + return 0; +} + +static int hisi_zip_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev) +{ + int ret; + + qm->algs = "zlib\ngzip\nxts(sm4)\nxts(aes)\n"; + qm->uacce_mode = uacce_mode; + qm->pdev = pdev; + ret = hisi_qm_pre_init(qm, pf_q_num, HZIP_PF_DEF_Q_BASE); + if (ret) + return ret; + qm->sqe_size = HZIP_SQE_SIZE; + qm->dev_name = hisi_zip_name; + qm->qm_list = &zip_devices; + hisi_zip_err_ini_set(qm); + + return 0; +} + +static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct hisi_zip *zip; + struct hisi_qm *qm; + int ret; + + zip = devm_kzalloc(&pdev->dev, sizeof(*zip), GFP_KERNEL); + if (!zip) + return -ENOMEM; + + qm = &zip->qm; + qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ? + QM_HW_PF : QM_HW_VF; + + ret = hisi_zip_qm_pre_init(qm, pdev); + if (ret) + return ret; + + hisi_qm_add_to_list(qm, &zip_devices); + if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) { + qm->qp_base = HZIP_PF_DEF_Q_NUM; + qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM; + } + + ret = hisi_qm_init(qm); + if (ret) { + pci_err(pdev, "Failed to init qm (%d)!\n", ret); + goto err_remove_from_list; + } + + if (qm->fun_type == QM_HW_PF) { + ret = hisi_zip_pf_probe_init(qm); + if (ret) { + pci_err(pdev, "Failed to init pf probe (%d)!\n", ret); + goto err_remove_from_list; + } + } + + ret = hisi_qm_start(qm); + if (ret) { + pci_err(pdev, "Failed to start qm (%d)!\n", ret); + goto err_qm_uninit; + } + + ret = hisi_zip_debugfs_init(qm); + if (ret) + pci_err(pdev, "Failed to init debugfs (%d)!\n", ret); + + ret = hisi_zip_register_to_crypto(); + if (ret < 0) { + pci_err(pdev, "Failed to register driver to crypto!\n"); + goto err_qm_stop; + } + + if (qm->fun_type == QM_HW_PF && vfs_num > 0) { + ret = hisi_qm_sriov_enable(pdev, vfs_num); + if (ret < 0) + goto err_crypto_unregister; + } + + return 0; + +err_crypto_unregister: + hisi_zip_unregister_from_crypto(); +err_qm_stop: + hisi_zip_debugfs_exit(qm); + hisi_qm_stop(qm, QM_NORMAL); +err_qm_uninit: + hisi_qm_uninit(qm); +err_remove_from_list: + hisi_qm_del_from_list(qm, &zip_devices); + return ret; +} + +static int hisi_zip_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + if (num_vfs == 0) + return hisi_qm_sriov_disable(pdev, &zip_devices); + else + return hisi_qm_sriov_enable(pdev, num_vfs); +} + +static void hisi_zip_remove(struct pci_dev *pdev) +{ + struct hisi_qm *qm = pci_get_drvdata(pdev); + + hisi_qm_remove_wait_delay(qm, &zip_devices); + + if (qm->fun_type == QM_HW_PF && qm->vfs_num) + hisi_qm_sriov_disable(pdev, NULL); + + hisi_zip_unregister_from_crypto(); + + hisi_zip_debugfs_exit(qm); + hisi_qm_stop(qm, QM_NORMAL); + + if (qm->fun_type == QM_HW_PF) + hisi_qm_dev_err_uninit(qm); + + hisi_qm_uninit(qm); + hisi_qm_del_from_list(qm, &zip_devices); +} + +static const struct pci_error_handlers hisi_zip_err_handler = { + .error_detected = hisi_qm_dev_err_detected, + .slot_reset = hisi_qm_dev_slot_reset, + .reset_prepare = hisi_qm_reset_prepare, + .reset_done = hisi_qm_reset_done, +}; + +static struct pci_driver hisi_zip_pci_driver = { + .name = "hisi_zip", + .id_table = hisi_zip_dev_ids, + .probe = hisi_zip_probe, + .remove = hisi_zip_remove, + .sriov_configure = hisi_zip_sriov_configure, + .err_handler = &hisi_zip_err_handler, + .shutdown = hisi_qm_dev_shutdown, +}; + +static void hisi_zip_register_debugfs(void) +{ + if (!debugfs_initialized()) + return; + + hzip_debugfs_root = debugfs_create_dir("hisi_zip", NULL); + if (IS_ERR_OR_NULL(hzip_debugfs_root)) + hzip_debugfs_root = NULL; +} + +static void hisi_zip_unregister_debugfs(void) +{ + debugfs_remove_recursive(hzip_debugfs_root); +} + +static int __init hisi_zip_init(void) +{ + int ret; + + INIT_LIST_HEAD(&zip_devices.list); + mutex_init(&zip_devices.lock); + zip_devices.check = NULL; + + hisi_zip_register_debugfs(); + + ret = pci_register_driver(&hisi_zip_pci_driver); + if (ret < 0) { + hisi_zip_unregister_debugfs(); + pr_err("Failed to register pci driver.\n"); + } + + return ret; +} + +static void __exit hisi_zip_exit(void) +{ + pci_unregister_driver(&hisi_zip_pci_driver); + hisi_zip_unregister_debugfs(); +} + +module_init(hisi_zip_init); +module_exit(hisi_zip_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Zhou Wang "); +MODULE_DESCRIPTION("Driver for HiSilicon ZIP accelerator"); diff --git a/drivers/crypto/hisilicon/zip/zip_usr_if.h b/drivers/crypto/hisilicon/zip/zip_usr_if.h new file mode 100644 index 0000000000000000000000000000000000000000..d5dbaa844ed03bff2b0966ea63f0dd1fc7d1bffd --- /dev/null +++ b/drivers/crypto/hisilicon/zip/zip_usr_if.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2018-2019 HiSilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#ifndef HISI_ZIP_USR_IF_H +#define HISI_ZIP_USR_IF_H + +struct hisi_zip_sqe { + __u32 consumed; + __u32 produced; + __u32 comp_data_length; + __u32 dw3; + __u32 input_data_length; + __u32 lba_l; + __u32 lba_h; + __u32 dw7; + __u32 dw8; + __u32 dw9; + __u32 dw10; + __u32 dif_info; + __u32 dw12; + __u32 tag; + __u32 dest_avail_out; + __u32 rsvd0; + __u32 comp_head_addr_l; + __u32 comp_head_addr_h; + __u32 source_addr_l; + __u32 source_addr_h; + __u32 dest_addr_l; + __u32 dest_addr_h; + __u32 stream_ctx_addr_l; + __u32 stream_ctx_addr_h; + __u32 cipher_key1_addr_l; + __u32 cipher_key1_addr_h; + __u32 cipher_key2_addr_l; + __u32 cipher_key2_addr_h; + __u32 rsvd1[4]; +}; + +#endif diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 3aef1d43e43510130dc359d61c911e51209e9a7f..42a3830fbd1901f02529d10071a9a6453bf42cbe 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -51,6 +51,8 @@ struct safexcel_cipher_ctx { struct safexcel_cipher_req { enum safexcel_cipher_direction direction; + /* Number of result descriptors associated to the request */ + unsigned int rdescs; bool needs_inv; }; @@ -333,7 +335,10 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin *ret = 0; - do { + if (unlikely(!sreq->rdescs)) + return 0; + + while (sreq->rdescs--) { rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr); if (IS_ERR(rdesc)) { dev_err(priv->dev, @@ -346,7 +351,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin *ret = safexcel_rdesc_check_errors(priv, rdesc); ndesc++; - } while (!rdesc->last_seg); + } safexcel_complete(priv, ring); @@ -501,6 +506,7 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring, static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, int ring, struct crypto_async_request *base, + struct safexcel_cipher_req *sreq, bool *should_complete, int *ret) { struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm); @@ -509,7 +515,10 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, *ret = 0; - do { + if (unlikely(!sreq->rdescs)) + return 0; + + while (sreq->rdescs--) { rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr); if (IS_ERR(rdesc)) { dev_err(priv->dev, @@ -522,7 +531,7 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, *ret = safexcel_rdesc_check_errors(priv, rdesc); ndesc++; - } while (!rdesc->last_seg); + } safexcel_complete(priv, ring); @@ -564,7 +573,7 @@ static int safexcel_skcipher_handle_result(struct safexcel_crypto_priv *priv, if (sreq->needs_inv) { sreq->needs_inv = false; - err = safexcel_handle_inv_result(priv, ring, async, + err = safexcel_handle_inv_result(priv, ring, async, sreq, should_complete, ret); } else { err = safexcel_handle_req_result(priv, ring, async, req->src, @@ -587,7 +596,7 @@ static int safexcel_aead_handle_result(struct safexcel_crypto_priv *priv, if (sreq->needs_inv) { sreq->needs_inv = false; - err = safexcel_handle_inv_result(priv, ring, async, + err = safexcel_handle_inv_result(priv, ring, async, sreq, should_complete, ret); } else { err = safexcel_handle_req_result(priv, ring, async, req->src, @@ -633,6 +642,8 @@ static int safexcel_skcipher_send(struct crypto_async_request *async, int ring, ret = safexcel_send_req(async, ring, sreq, req->src, req->dst, req->cryptlen, 0, 0, req->iv, commands, results); + + sreq->rdescs = *results; return ret; } @@ -655,6 +666,7 @@ static int safexcel_aead_send(struct crypto_async_request *async, int ring, req->cryptlen, req->assoclen, crypto_aead_authsize(tfm), req->iv, commands, results); + sreq->rdescs = *results; return ret; } diff --git a/drivers/crypto/mxc-scc.c b/drivers/crypto/mxc-scc.c index e01c46387df8d8823f4a9434ea0db3b9992583c4..519086730791b9ab53898e7f0464f0f3742cc552 100644 --- a/drivers/crypto/mxc-scc.c +++ b/drivers/crypto/mxc-scc.c @@ -178,12 +178,12 @@ static int mxc_scc_get_data(struct mxc_scc_ctx *ctx, else from = scc->black_memory; - dev_dbg(scc->dev, "pcopy: from 0x%p %d bytes\n", from, + dev_dbg(scc->dev, "pcopy: from 0x%p %zu bytes\n", from, ctx->dst_nents * 8); len = sg_pcopy_from_buffer(ablkreq->dst, ctx->dst_nents, from, ctx->size, ctx->offset); if (!len) { - dev_err(scc->dev, "pcopy err from 0x%p (len=%d)\n", from, len); + dev_err(scc->dev, "pcopy err from 0x%p (len=%zu)\n", from, len); return -EINVAL; } @@ -274,7 +274,7 @@ static int mxc_scc_put_data(struct mxc_scc_ctx *ctx, len = sg_pcopy_to_buffer(req->src, ctx->src_nents, to, len, ctx->offset); if (!len) { - dev_err(scc->dev, "pcopy err to 0x%p (len=%d)\n", to, len); + dev_err(scc->dev, "pcopy err to 0x%p (len=%zu)\n", to, len); return -EINVAL; } @@ -335,9 +335,9 @@ static void mxc_scc_ablkcipher_next(struct mxc_scc_ctx *ctx, return; } - dev_dbg(scc->dev, "Start encryption (0x%p/0x%p)\n", - (void *)readl(scc->base + SCC_SCM_RED_START), - (void *)readl(scc->base + SCC_SCM_BLACK_START)); + dev_dbg(scc->dev, "Start encryption (0x%x/0x%x)\n", + readl(scc->base + SCC_SCM_RED_START), + readl(scc->base + SCC_SCM_BLACK_START)); /* clear interrupt control registers */ writel(SCC_SCM_INTR_CTRL_CLR_INTR, diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index 56bd28174f5251c11c8996a959fc7e96160ee6ac..b926098f70ffdb213bbfb1267cf6e473cfd880b4 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c @@ -28,9 +28,24 @@ #define DCP_MAX_CHANS 4 #define DCP_BUF_SZ PAGE_SIZE +#define DCP_SHA_PAY_SZ 64 #define DCP_ALIGNMENT 64 +/* + * Null hashes to align with hw behavior on imx6sl and ull + * these are flipped for consistency with hw output + */ +const uint8_t sha1_null_hash[] = + "\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf" + "\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda"; + +const uint8_t sha256_null_hash[] = + "\x55\xb8\x52\x78\x1b\x99\x95\xa4" + "\x4c\x93\x9b\x64\xe4\x41\xae\x27" + "\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a" + "\x14\x1c\xfc\x98\x42\xc4\xb0\xe3"; + /* DCP DMA descriptor. */ struct dcp_dma_desc { uint32_t next_cmd_addr; @@ -48,6 +63,7 @@ struct dcp_coherent_block { uint8_t aes_in_buf[DCP_BUF_SZ]; uint8_t aes_out_buf[DCP_BUF_SZ]; uint8_t sha_in_buf[DCP_BUF_SZ]; + uint8_t sha_out_buf[DCP_SHA_PAY_SZ]; uint8_t aes_key[2 * AES_KEYSIZE_128]; @@ -209,6 +225,12 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf, DCP_BUF_SZ, DMA_FROM_DEVICE); + if (actx->fill % AES_BLOCK_SIZE) { + dev_err(sdcp->dev, "Invalid block size!\n"); + ret = -EINVAL; + goto aes_done_run; + } + /* Fill in the DMA descriptor. */ desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE | MXS_DCP_CONTROL0_INTERRUPT | @@ -238,6 +260,7 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, ret = mxs_dcp_start_dma(actx); +aes_done_run: dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128, DMA_TO_DEVICE); dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE); @@ -264,13 +287,15 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) uint8_t *out_tmp, *src_buf, *dst_buf = NULL; uint32_t dst_off = 0; + uint32_t last_out_len = 0; uint8_t *key = sdcp->coh->aes_key; int ret = 0; int split = 0; - unsigned int i, len, clen, rem = 0; + unsigned int i, len, clen, rem = 0, tlen = 0; int init = 0; + bool limit_hit = false; actx->fill = 0; @@ -289,6 +314,11 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) for_each_sg(req->src, src, nents, i) { src_buf = sg_virt(src); len = sg_dma_len(src); + tlen += len; + limit_hit = tlen > req->nbytes; + + if (limit_hit) + len = req->nbytes - (tlen - len); do { if (actx->fill + len > out_off) @@ -305,13 +335,15 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) * If we filled the buffer or this is the last SG, * submit the buffer. */ - if (actx->fill == out_off || sg_is_last(src)) { + if (actx->fill == out_off || sg_is_last(src) || + limit_hit) { ret = mxs_dcp_run_aes(actx, req, init); if (ret) return ret; init = 0; out_tmp = out_buf; + last_out_len = actx->fill; while (dst && actx->fill) { if (!split) { dst_buf = sg_virt(dst); @@ -334,6 +366,19 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) } } } while (len); + + if (limit_hit) + break; + } + + /* Copy the IV for CBC for chaining */ + if (!rctx->ecb) { + if (rctx->enc) + memcpy(req->info, out_buf+(last_out_len-AES_BLOCK_SIZE), + AES_BLOCK_SIZE); + else + memcpy(req->info, in_buf+(last_out_len-AES_BLOCK_SIZE), + AES_BLOCK_SIZE); } return ret; @@ -513,8 +558,6 @@ static int mxs_dcp_run_sha(struct ahash_request *req) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); - struct hash_alg_common *halg = crypto_hash_alg_common(tfm); - struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; dma_addr_t digest_phys = 0; @@ -536,10 +579,23 @@ static int mxs_dcp_run_sha(struct ahash_request *req) desc->payload = 0; desc->status = 0; + /* + * Align driver with hw behavior when generating null hashes + */ + if (rctx->init && rctx->fini && desc->size == 0) { + struct hash_alg_common *halg = crypto_hash_alg_common(tfm); + const uint8_t *sha_buf = + (actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ? + sha1_null_hash : sha256_null_hash; + memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize); + ret = 0; + goto done_run; + } + /* Set HASH_TERM bit for last transfer block. */ if (rctx->fini) { - digest_phys = dma_map_single(sdcp->dev, req->result, - halg->digestsize, DMA_FROM_DEVICE); + digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf, + DCP_SHA_PAY_SZ, DMA_FROM_DEVICE); desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM; desc->payload = digest_phys; } @@ -547,9 +603,10 @@ static int mxs_dcp_run_sha(struct ahash_request *req) ret = mxs_dcp_start_dma(actx); if (rctx->fini) - dma_unmap_single(sdcp->dev, digest_phys, halg->digestsize, + dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ, DMA_FROM_DEVICE); +done_run: dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE); return ret; @@ -567,6 +624,7 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq) const int nents = sg_nents(req->src); uint8_t *in_buf = sdcp->coh->sha_in_buf; + uint8_t *out_buf = sdcp->coh->sha_out_buf; uint8_t *src_buf; @@ -621,11 +679,9 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq) actx->fill = 0; - /* For some reason, the result is flipped. */ - for (i = 0; i < halg->digestsize / 2; i++) { - swap(req->result[i], - req->result[halg->digestsize - i - 1]); - } + /* For some reason the result is flipped */ + for (i = 0; i < halg->digestsize; i++) + req->result[i] = out_buf[halg->digestsize - i - 1]; } return 0; diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c index c68df7e8bee185487cd6e9544fd6c4f31ab0a9c2..7ce2467c771eb63d7a2b932329670904ec40b027 100644 --- a/drivers/crypto/nx/nx-842-powernv.c +++ b/drivers/crypto/nx/nx-842-powernv.c @@ -36,8 +36,6 @@ MODULE_ALIAS_CRYPTO("842-nx"); #define WORKMEM_ALIGN (CRB_ALIGN) #define CSB_WAIT_MAX (5000) /* ms */ #define VAS_RETRIES (10) -/* # of requests allowed per RxFIFO at a time. 0 for unlimited */ -#define MAX_CREDITS_PER_RXFIFO (1024) struct nx842_workmem { /* Below fields must be properly aligned */ @@ -821,7 +819,11 @@ static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id, rxattr.lnotify_lpid = lpid; rxattr.lnotify_pid = pid; rxattr.lnotify_tid = tid; - rxattr.wcreds_max = MAX_CREDITS_PER_RXFIFO; + /* + * Maximum RX window credits can not be more than #CRBs in + * RxFIFO. Otherwise, can get checkstop if RxFIFO overruns. + */ + rxattr.wcreds_max = fifo_size / CRB_SIZE; /* * Open a VAS receice window which is used to configure RxFIFO diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 09d823d36d3a4ec71913e235a82e4fd101132728..358ebf61fca52748a275b8815bb217f3f1777fe8 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -487,7 +487,7 @@ static struct crypto_alg cbc_aes_alg = { }; static const struct x86_cpu_id padlock_cpu_id[] = { - X86_FEATURE_MATCH(X86_FEATURE_XCRYPT), + { X86_VENDOR_CENTAUR, 6, X86_MODEL_ANY, X86_FEATURE_XCRYPT }, {} }; MODULE_DEVICE_TABLE(x86cpu, padlock_cpu_id); diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index 21e5cae0a1e046ee84176569cd23c908af736a4d..d05f9ffb7a7bd27a1aabb35ef46b42ba36ae97cb 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c @@ -506,7 +506,7 @@ static struct shash_alg sha256_alg_nano = { }; static const struct x86_cpu_id padlock_sha_ids[] = { - X86_FEATURE_MATCH(X86_FEATURE_PHE), + { X86_VENDOR_CENTAUR, 6, X86_MODEL_ANY, X86_FEATURE_PHE }, {} }; MODULE_DEVICE_TABLE(x86cpu, padlock_sha_ids); diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c index 613c7d5644ced6d250adefdf3d49a7c4d0876938..e87b7c466bdbf2b8d9de7e534aadab7992abe6c7 100644 --- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c +++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c @@ -238,12 +238,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto out_err_free_reg; - set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); - ret = adf_dev_init(accel_dev); if (ret) goto out_err_dev_shutdown; + set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); + ret = adf_dev_start(accel_dev); if (ret) goto out_err_dev_stop; diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c index 278452b8ef81c8a5e5aa296a38bb8caf6402fdd3..a8f3f2ecae70dda2211b0a528fd8080d5370c8f8 100644 --- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c +++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c @@ -238,12 +238,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto out_err_free_reg; - set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); - ret = adf_dev_init(accel_dev); if (ret) goto out_err_dev_shutdown; + set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); + ret = adf_dev_start(accel_dev); if (ret) goto out_err_dev_stop; diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c index 9225d060e18f45cae459be381b0d2a0d79e25793..3811ca0b6e0bb9e523a814d13134f8fd65a6d5c6 100644 --- a/drivers/crypto/qat/qat_common/adf_aer.c +++ b/drivers/crypto/qat/qat_common/adf_aer.c @@ -139,7 +139,8 @@ static void adf_device_reset_worker(struct work_struct *work) if (adf_dev_init(accel_dev) || adf_dev_start(accel_dev)) { /* The device hanged and we can't restart it so stop here */ dev_err(&GET_DEV(accel_dev), "Restart device failed\n"); - kfree(reset_data); + if (reset_data->mode == ADF_DEV_RESET_ASYNC) + kfree(reset_data); WARN(1, "QAT: device restart failed. Device is unusable\n"); return; } @@ -147,10 +148,10 @@ static void adf_device_reset_worker(struct work_struct *work) clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); /* The dev is back alive. Notify the caller if in sync mode */ - if (reset_data->mode == ADF_DEV_RESET_SYNC) - complete(&reset_data->compl); - else + if (reset_data->mode == ADF_DEV_RESET_ASYNC) kfree(reset_data); + else + complete(&reset_data->compl); } static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev, @@ -182,6 +183,7 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev, if (!timeout) { dev_err(&GET_DEV(accel_dev), "Reset device timeout expired\n"); + cancel_work_sync(&reset_data->reset_work); ret = -EFAULT; } kfree(reset_data); diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h index 5c4c0a2531296a157ae146f19226d9d8f608830f..d78f8d5c89c3fe4f6906fca888ca4bc192255ef4 100644 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h @@ -95,7 +95,7 @@ struct service_hndl { static inline int get_current_node(void) { - return topology_physical_package_id(smp_processor_id()); + return topology_physical_package_id(raw_smp_processor_id()); } int adf_service_register(struct service_hndl *service); diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c index 3da0f951cb590a555fea8a9d848c657888a305d9..1b954abf67fb8250bb4d7d025a79c69dfff06f48 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c @@ -238,12 +238,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto out_err_free_reg; - set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); - ret = adf_dev_init(accel_dev); if (ret) goto out_err_dev_shutdown; + set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); + ret = adf_dev_start(accel_dev); if (ret) goto out_err_dev_stop; diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c index d8a5db11b7ea1f3b3fec471b2cca1e11a84d6d1e..bffd4d15145d95841c006535c216a24e5383cb6e 100644 --- a/drivers/crypto/qce/sha.c +++ b/drivers/crypto/qce/sha.c @@ -521,8 +521,8 @@ static int qce_ahash_register_one(const struct qce_ahash_def *def, ret = crypto_register_ahash(alg); if (ret) { - kfree(tmpl); dev_err(qce->dev, "%s registration failed\n", base->cra_name); + kfree(tmpl); return ret; } diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c index e54249ccc009202973fcbaf4635ac563ab174cbf..818e3e9479fe672f6c46e70f7af735d013763273 100644 --- a/drivers/crypto/qcom-rng.c +++ b/drivers/crypto/qcom-rng.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -42,16 +43,19 @@ static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max) { unsigned int currsize = 0; u32 val; + int ret; /* read random data from hardware */ do { - val = readl_relaxed(rng->base + PRNG_STATUS); - if (!(val & PRNG_STATUS_DATA_AVAIL)) - break; + ret = readl_poll_timeout(rng->base + PRNG_STATUS, val, + val & PRNG_STATUS_DATA_AVAIL, + 200, 10000); + if (ret) + return ret; val = readl_relaxed(rng->base + PRNG_DATA_OUT); if (!val) - break; + return -EINVAL; if ((max - currsize) >= WORD_SZ) { memcpy(data, &val, WORD_SZ); @@ -64,7 +68,7 @@ static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max) } } while (currsize < max); - return currsize; + return 0; } static int qcom_rng_generate(struct crypto_rng *tfm, @@ -86,7 +90,7 @@ static int qcom_rng_generate(struct crypto_rng *tfm, mutex_unlock(&rng->lock); clk_disable_unprepare(rng->clk); - return 0; + return ret; } static int qcom_rng_seed(struct crypto_rng *tfm, const u8 *seed, diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c index c9d622abd90c09c5c1c87b8564502460c0456dc2..0ce4a65b95f5da82a5e14e0d843b38440512dcce 100644 --- a/drivers/crypto/rockchip/rk3288_crypto.c +++ b/drivers/crypto/rockchip/rk3288_crypto.c @@ -119,7 +119,7 @@ static int rk_load_data(struct rk_crypto_info *dev, count = (dev->left_bytes > PAGE_SIZE) ? PAGE_SIZE : dev->left_bytes; - if (!sg_pcopy_to_buffer(dev->first, dev->nents, + if (!sg_pcopy_to_buffer(dev->first, dev->src_nents, dev->addr_vir, count, dev->total - dev->left_bytes)) { dev_err(dev->dev, "[%s:%d] pcopy err\n", diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h index d5fb4013fb42a23d520009df3e9f7a524eb2a5f3..54ee5b3ed9db8d23b13c0f34dc82d146503ec491 100644 --- a/drivers/crypto/rockchip/rk3288_crypto.h +++ b/drivers/crypto/rockchip/rk3288_crypto.h @@ -207,7 +207,8 @@ struct rk_crypto_info { void *addr_vir; int aligned; int align_size; - size_t nents; + size_t src_nents; + size_t dst_nents; unsigned int total; unsigned int count; dma_addr_t addr_in; @@ -244,6 +245,7 @@ struct rk_cipher_ctx { struct rk_crypto_info *dev; unsigned int keylen; u32 mode; + u8 iv[AES_BLOCK_SIZE]; }; enum alg_type { diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c index 639c15c5364b4d081d7443f2e75a8b91133a5557..204e4ad62c3843b74c091ad7666ca6878fec54d8 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c +++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c @@ -242,6 +242,22 @@ static void crypto_dma_start(struct rk_crypto_info *dev) static int rk_set_data_start(struct rk_crypto_info *dev) { int err; + struct ablkcipher_request *req = + ablkcipher_request_cast(dev->async_req); + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); + u32 ivsize = crypto_ablkcipher_ivsize(tfm); + u8 *src_last_blk = page_address(sg_page(dev->sg_src)) + + dev->sg_src->offset + dev->sg_src->length - ivsize; + + /* Store the iv that need to be updated in chain mode. + * And update the IV buffer to contain the next IV for decryption mode. + */ + if (ctx->mode & RK_CRYPTO_DEC) { + memcpy(ctx->iv, src_last_blk, ivsize); + sg_pcopy_to_buffer(dev->first, dev->src_nents, req->info, + ivsize, dev->total - ivsize); + } err = dev->load_data(dev, dev->sg_src, dev->sg_dst); if (!err) @@ -260,8 +276,9 @@ static int rk_ablk_start(struct rk_crypto_info *dev) dev->total = req->nbytes; dev->sg_src = req->src; dev->first = req->src; - dev->nents = sg_nents(req->src); + dev->src_nents = sg_nents(req->src); dev->sg_dst = req->dst; + dev->dst_nents = sg_nents(req->dst); dev->aligned = 1; spin_lock_irqsave(&dev->lock, flags); @@ -276,13 +293,41 @@ static void rk_iv_copyback(struct rk_crypto_info *dev) struct ablkcipher_request *req = ablkcipher_request_cast(dev->async_req); struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); + u32 ivsize = crypto_ablkcipher_ivsize(tfm); + + /* Update the IV buffer to contain the next IV for encryption mode. */ + if (!(ctx->mode & RK_CRYPTO_DEC)) { + if (dev->aligned) { + memcpy(req->info, sg_virt(dev->sg_dst) + + dev->sg_dst->length - ivsize, ivsize); + } else { + memcpy(req->info, dev->addr_vir + + dev->count - ivsize, ivsize); + } + } +} + +static void rk_update_iv(struct rk_crypto_info *dev) +{ + struct ablkcipher_request *req = + ablkcipher_request_cast(dev->async_req); + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); u32 ivsize = crypto_ablkcipher_ivsize(tfm); + u8 *new_iv = NULL; + + if (ctx->mode & RK_CRYPTO_DEC) { + new_iv = ctx->iv; + } else { + new_iv = page_address(sg_page(dev->sg_dst)) + + dev->sg_dst->offset + dev->sg_dst->length - ivsize; + } if (ivsize == DES_BLOCK_SIZE) - memcpy_fromio(req->info, dev->reg + RK_CRYPTO_TDES_IV_0, - ivsize); + memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, new_iv, ivsize); else if (ivsize == AES_BLOCK_SIZE) - memcpy_fromio(req->info, dev->reg + RK_CRYPTO_AES_IV_0, ivsize); + memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, new_iv, ivsize); } /* return: @@ -297,7 +342,7 @@ static int rk_ablk_rx(struct rk_crypto_info *dev) dev->unload_data(dev); if (!dev->aligned) { - if (!sg_pcopy_from_buffer(req->dst, dev->nents, + if (!sg_pcopy_from_buffer(req->dst, dev->dst_nents, dev->addr_vir, dev->count, dev->total - dev->left_bytes - dev->count)) { @@ -306,6 +351,7 @@ static int rk_ablk_rx(struct rk_crypto_info *dev) } } if (dev->left_bytes) { + rk_update_iv(dev); if (dev->aligned) { if (sg_is_last(dev->sg_src)) { dev_err(dev->dev, "[%s:%d] Lack of data\n", diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c index 821a506b9e17f663bb2fa721ecb3e50057696c6b..c336ae75e361fa6338139fa4a14066c107ed4657 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c +++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c @@ -206,7 +206,7 @@ static int rk_ahash_start(struct rk_crypto_info *dev) dev->sg_dst = NULL; dev->sg_src = req->src; dev->first = req->src; - dev->nents = sg_nents(req->src); + dev->src_nents = sg_nents(req->src); rctx = ahash_request_ctx(req); rctx->mode = 0; diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index faa282074e5aa4a611b9ee137fc393d3fecc4a3c..b7216935236f03fc1892f9ec3e4b8bff7a48f07e 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c @@ -475,9 +475,9 @@ static void s5p_sg_done(struct s5p_aes_dev *dev) } /* Calls the completion. Cannot be called with dev->lock hold. */ -static void s5p_aes_complete(struct s5p_aes_dev *dev, int err) +static void s5p_aes_complete(struct ablkcipher_request *req, int err) { - dev->req->base.complete(&dev->req->base, err); + req->base.complete(&req->base, err); } static void s5p_unset_outdata(struct s5p_aes_dev *dev) @@ -491,7 +491,7 @@ static void s5p_unset_indata(struct s5p_aes_dev *dev) } static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src, - struct scatterlist **dst) + struct scatterlist **dst) { void *pages; int len; @@ -655,6 +655,7 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id) { struct platform_device *pdev = dev_id; struct s5p_aes_dev *dev = platform_get_drvdata(pdev); + struct ablkcipher_request *req; int err_dma_tx = 0; int err_dma_rx = 0; int err_dma_hx = 0; @@ -727,7 +728,7 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id) spin_unlock_irqrestore(&dev->lock, flags); - s5p_aes_complete(dev, 0); + s5p_aes_complete(dev->req, 0); /* Device is still busy */ tasklet_schedule(&dev->tasklet); } else { @@ -752,11 +753,12 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id) error: s5p_sg_done(dev); dev->busy = false; + req = dev->req; if (err_dma_hx == 1) s5p_set_dma_hashdata(dev, dev->hash_sg_iter); spin_unlock_irqrestore(&dev->lock, flags); - s5p_aes_complete(dev, err); + s5p_aes_complete(req, err); hash_irq_end: /* @@ -1887,7 +1889,7 @@ static int s5p_set_indata_start(struct s5p_aes_dev *dev, } static int s5p_set_outdata_start(struct s5p_aes_dev *dev, - struct ablkcipher_request *req) + struct ablkcipher_request *req) { struct scatterlist *sg; int err; @@ -1983,7 +1985,7 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode) s5p_sg_done(dev); dev->busy = false; spin_unlock_irqrestore(&dev->lock, flags); - s5p_aes_complete(dev, err); + s5p_aes_complete(req, err); } static void s5p_tasklet_cb(unsigned long data) diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c index 23b0b7bd64c7f8388e0e641f86cd7ecbdd95b40e..b3b49dce1136955e84af78c824f576a28452ef89 100644 --- a/drivers/crypto/stm32/stm32-cryp.c +++ b/drivers/crypto/stm32/stm32-cryp.c @@ -2036,8 +2036,6 @@ static int stm32_cryp_probe(struct platform_device *pdev) list_del(&cryp->list); spin_unlock(&cryp_list.lock); - pm_runtime_disable(dev); - pm_runtime_put_noidle(dev); pm_runtime_disable(dev); pm_runtime_put_noidle(dev); diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index 590d7352837e50e0a37ee6f84af4a9cff2dcc6db..641b11077f479df05544e35dacc482ffe56e79ca 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c @@ -365,7 +365,7 @@ static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev, return -ETIMEDOUT; if ((hdev->flags & HASH_FLAGS_HMAC) && - (hdev->flags & ~HASH_FLAGS_HMAC_KEY)) { + (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) { hdev->flags |= HASH_FLAGS_HMAC_KEY; stm32_hash_write_key(hdev); if (stm32_hash_wait_busy(hdev)) diff --git a/drivers/crypto/stm32/stm32_crc32.c b/drivers/crypto/stm32/stm32_crc32.c index 29d2095d9dfda8eccb40eafb097fdaf7baeb7b35..48c4a71d1cb328ba00538bef357cd53317f216b3 100644 --- a/drivers/crypto/stm32/stm32_crc32.c +++ b/drivers/crypto/stm32/stm32_crc32.c @@ -217,7 +217,7 @@ static struct shash_alg algs[] = { .digestsize = CHKSUM_DIGEST_SIZE, .base = { .cra_name = "crc32", - .cra_driver_name = DRIVER_NAME, + .cra_driver_name = "stm32-crc32-crc32", .cra_priority = 200, .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, @@ -239,7 +239,7 @@ static struct shash_alg algs[] = { .digestsize = CHKSUM_DIGEST_SIZE, .base = { .cra_name = "crc32c", - .cra_driver_name = DRIVER_NAME, + .cra_driver_name = "stm32-crc32-crc32c", .cra_priority = 200, .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c index 5cf64746731a331e5a4e6099320b966beac5a702..22e49185792545d9909bf8495c423dd2c0276895 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c @@ -81,7 +81,8 @@ static int sun4i_ss_opti_poll(struct skcipher_request *areq) oi = 0; oo = 0; do { - todo = min3(rx_cnt, ileft, (mi.length - oi) / 4); + todo = min(rx_cnt, ileft); + todo = min_t(size_t, todo, (mi.length - oi) / 4); if (todo) { ileft -= todo; writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo); @@ -96,7 +97,8 @@ static int sun4i_ss_opti_poll(struct skcipher_request *areq) rx_cnt = SS_RXFIFO_SPACES(spaces); tx_cnt = SS_TXFIFO_SPACES(spaces); - todo = min3(tx_cnt, oleft, (mo.length - oo) / 4); + todo = min(tx_cnt, oleft); + todo = min_t(size_t, todo, (mo.length - oo) / 4); if (todo) { oleft -= todo; readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo); @@ -220,7 +222,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq) * todo is the number of consecutive 4byte word that we * can read from current SG */ - todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4); + todo = min(rx_cnt, ileft / 4); + todo = min_t(size_t, todo, (mi.length - oi) / 4); if (todo && !ob) { writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo); @@ -234,8 +237,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq) * we need to be able to write all buf in one * pass, so it is why we min() with rx_cnt */ - todo = min3(rx_cnt * 4 - ob, ileft, - mi.length - oi); + todo = min(rx_cnt * 4 - ob, ileft); + todo = min_t(size_t, todo, mi.length - oi); memcpy(buf + ob, mi.addr + oi, todo); ileft -= todo; oi += todo; @@ -255,7 +258,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq) spaces = readl(ss->base + SS_FCSR); rx_cnt = SS_RXFIFO_SPACES(spaces); tx_cnt = SS_TXFIFO_SPACES(spaces); - dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u\n", + dev_dbg(ss->dev, + "%x %u/%zu %u/%u cnt=%u %u/%zu %u/%u cnt=%u %u\n", mode, oi, mi.length, ileft, areq->cryptlen, rx_cnt, oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob); @@ -263,7 +267,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq) if (!tx_cnt) continue; /* todo in 4bytes word */ - todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4); + todo = min(tx_cnt, oleft / 4); + todo = min_t(size_t, todo, (mo.length - oo) / 4); if (todo) { readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo); oleft -= todo * 4; @@ -287,7 +292,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq) * no more than remaining buffer * no need to test against oleft */ - todo = min(mo.length - oo, obl - obo); + todo = min_t(size_t, + mo.length - oo, obl - obo); memcpy(mo.addr + oo, bufo + obo, todo); oleft -= todo; obo += todo; diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c index a4b5ff2b72f874ff71726372942cae4d54cef570..1a724263761bc52c5f2a518f8e9560e5262c9474 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c @@ -240,7 +240,10 @@ static int sun4i_hash(struct ahash_request *areq) } } else { /* Since we have the flag final, we can go up to modulo 4 */ - end = ((areq->nbytes + op->len) / 4) * 4 - op->len; + if (areq->nbytes < 4) + end = 0; + else + end = ((areq->nbytes + op->len) / 4) * 4 - op->len; } /* TODO if SGlen % 4 and !op->len then DMA */ @@ -273,8 +276,8 @@ static int sun4i_hash(struct ahash_request *areq) */ while (op->len < 64 && i < end) { /* how many bytes we can read from current SG */ - in_r = min3(mi.length - in_i, end - i, - 64 - op->len); + in_r = min(end - i, 64 - op->len); + in_r = min_t(size_t, mi.length - in_i, in_r); memcpy(op->buf + op->len, mi.addr + in_i, in_r); op->len += in_r; i += in_r; @@ -294,8 +297,8 @@ static int sun4i_hash(struct ahash_request *areq) } if (mi.length - in_i > 3 && i < end) { /* how many bytes we can read from current SG */ - in_r = min3(mi.length - in_i, areq->nbytes - i, - ((mi.length - in_i) / 4) * 4); + in_r = min_t(size_t, mi.length - in_i, areq->nbytes - i); + in_r = min_t(size_t, ((mi.length - in_i) / 4) * 4, in_r); /* how many bytes we can write in the device*/ todo = min3((u32)(end - i) / 4, rx_cnt, (u32)in_r / 4); writesl(ss->base + SS_RXFIFO, mi.addr + in_i, todo); @@ -321,8 +324,8 @@ static int sun4i_hash(struct ahash_request *areq) if ((areq->nbytes - i) < 64) { while (i < areq->nbytes && in_i < mi.length && op->len < 64) { /* how many bytes we can read from current SG */ - in_r = min3(mi.length - in_i, areq->nbytes - i, - 64 - op->len); + in_r = min(areq->nbytes - i, 64 - op->len); + in_r = min_t(size_t, mi.length - in_i, in_r); memcpy(op->buf + op->len, mi.addr + in_i, in_r); op->len += in_r; i += in_r; diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 6988012deca4cdd8f0f2bc6dd21ca790e4c98f6a..634ae487c372e0213ae9d8b123d8def8a6abcd80 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -334,6 +334,21 @@ int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, } EXPORT_SYMBOL(talitos_submit); +static __be32 get_request_hdr(struct talitos_request *request, bool is_sec1) +{ + struct talitos_edesc *edesc; + + if (!is_sec1) + return request->desc->hdr; + + if (!request->desc->next_desc) + return request->desc->hdr1; + + edesc = container_of(request->desc, struct talitos_edesc, desc); + + return ((struct talitos_desc *)(edesc->buf + edesc->dma_len))->hdr1; +} + /* * process what was done, notify callback of error if not */ @@ -355,12 +370,7 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch) /* descriptors with their done bits set don't get the error */ rmb(); - if (!is_sec1) - hdr = request->desc->hdr; - else if (request->desc->next_desc) - hdr = (request->desc + 1)->hdr1; - else - hdr = request->desc->hdr1; + hdr = get_request_hdr(request, is_sec1); if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE) status = 0; @@ -490,8 +500,14 @@ static u32 current_desc_hdr(struct device *dev, int ch) } } - if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) - return (priv->chan[ch].fifo[iter].desc + 1)->hdr; + if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) { + struct talitos_edesc *edesc; + + edesc = container_of(priv->chan[ch].fifo[iter].desc, + struct talitos_edesc, desc); + return ((struct talitos_desc *) + (edesc->buf + edesc->dma_len))->hdr; + } return priv->chan[ch].fifo[iter].desc->hdr; } @@ -913,36 +929,6 @@ static int aead_setkey(struct crypto_aead *authenc, return -EINVAL; } -/* - * talitos_edesc - s/w-extended descriptor - * @src_nents: number of segments in input scatterlist - * @dst_nents: number of segments in output scatterlist - * @icv_ool: whether ICV is out-of-line - * @iv_dma: dma address of iv for checking continuity and link table - * @dma_len: length of dma mapped link_tbl space - * @dma_link_tbl: bus physical address of link_tbl/buf - * @desc: h/w descriptor - * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2) - * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1) - * - * if decrypting (with authcheck), or either one of src_nents or dst_nents - * is greater than 1, an integrity check value is concatenated to the end - * of link_tbl data - */ -struct talitos_edesc { - int src_nents; - int dst_nents; - bool icv_ool; - dma_addr_t iv_dma; - int dma_len; - dma_addr_t dma_link_tbl; - struct talitos_desc desc; - union { - struct talitos_ptr link_tbl[0]; - u8 buf[0]; - }; -}; - static void talitos_sg_unmap(struct device *dev, struct talitos_edesc *edesc, struct scatterlist *src, @@ -973,11 +959,13 @@ static void talitos_sg_unmap(struct device *dev, static void ipsec_esp_unmap(struct device *dev, struct talitos_edesc *edesc, - struct aead_request *areq) + struct aead_request *areq, bool encrypt) { struct crypto_aead *aead = crypto_aead_reqtfm(areq); struct talitos_ctx *ctx = crypto_aead_ctx(aead); unsigned int ivsize = crypto_aead_ivsize(aead); + unsigned int authsize = crypto_aead_authsize(aead); + unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize); bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP; struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3]; @@ -986,7 +974,7 @@ static void ipsec_esp_unmap(struct device *dev, DMA_FROM_DEVICE); unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE); - talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen, + talitos_sg_unmap(dev, edesc, areq->src, areq->dst, cryptlen, areq->assoclen); if (edesc->dma_len) @@ -997,7 +985,7 @@ static void ipsec_esp_unmap(struct device *dev, unsigned int dst_nents = edesc->dst_nents ? : 1; sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize, - areq->assoclen + areq->cryptlen - ivsize); + areq->assoclen + cryptlen - ivsize); } } @@ -1015,12 +1003,11 @@ static void ipsec_esp_encrypt_done(struct device *dev, unsigned int authsize = crypto_aead_authsize(authenc); unsigned int ivsize = crypto_aead_ivsize(authenc); struct talitos_edesc *edesc; - struct scatterlist *sg; void *icvdata; edesc = container_of(desc, struct talitos_edesc, desc); - ipsec_esp_unmap(dev, edesc, areq); + ipsec_esp_unmap(dev, edesc, areq, true); /* copy the generated ICV to dst */ if (edesc->icv_ool) { @@ -1029,9 +1016,8 @@ static void ipsec_esp_encrypt_done(struct device *dev, else icvdata = &edesc->link_tbl[edesc->src_nents + edesc->dst_nents + 2]; - sg = sg_last(areq->dst, edesc->dst_nents); - memcpy((char *)sg_virt(sg) + sg->length - authsize, - icvdata, authsize); + sg_pcopy_from_buffer(areq->dst, edesc->dst_nents ? : 1, icvdata, + authsize, areq->assoclen + areq->cryptlen); } dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE); @@ -1049,19 +1035,27 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev, struct crypto_aead *authenc = crypto_aead_reqtfm(req); unsigned int authsize = crypto_aead_authsize(authenc); struct talitos_edesc *edesc; - struct scatterlist *sg; char *oicv, *icv; struct talitos_private *priv = dev_get_drvdata(dev); bool is_sec1 = has_ftr_sec1(priv); edesc = container_of(desc, struct talitos_edesc, desc); - ipsec_esp_unmap(dev, edesc, req); + ipsec_esp_unmap(dev, edesc, req, false); if (!err) { + char icvdata[SHA512_DIGEST_SIZE]; + int nents = edesc->dst_nents ? : 1; + unsigned int len = req->assoclen + req->cryptlen; + /* auth check */ - sg = sg_last(req->dst, edesc->dst_nents ? : 1); - icv = (char *)sg_virt(sg) + sg->length - authsize; + if (nents > 1) { + sg_pcopy_to_buffer(req->dst, nents, icvdata, authsize, + len - authsize); + icv = icvdata; + } else { + icv = (char *)sg_virt(req->dst) + len - authsize; + } if (edesc->dma_len) { if (is_sec1) @@ -1093,7 +1087,7 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev, edesc = container_of(desc, struct talitos_edesc, desc); - ipsec_esp_unmap(dev, edesc, req); + ipsec_esp_unmap(dev, edesc, req, false); /* check ICV auth status */ if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) != @@ -1196,6 +1190,7 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src, * fill in and submit ipsec_esp descriptor */ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, + bool encrypt, void (*callback)(struct device *dev, struct talitos_desc *desc, void *context, int error)) @@ -1205,7 +1200,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, struct talitos_ctx *ctx = crypto_aead_ctx(aead); struct device *dev = ctx->dev; struct talitos_desc *desc = &edesc->desc; - unsigned int cryptlen = areq->cryptlen; + unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize); unsigned int ivsize = crypto_aead_ivsize(aead); int tbl_off = 0; int sg_count, ret; @@ -1332,7 +1327,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, ret = talitos_submit(dev, ctx->ch, desc, callback, areq); if (ret != -EINPROGRESS) { - ipsec_esp_unmap(dev, edesc, areq); + ipsec_esp_unmap(dev, edesc, areq, encrypt); kfree(edesc); } return ret; @@ -1361,23 +1356,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, struct talitos_private *priv = dev_get_drvdata(dev); bool is_sec1 = has_ftr_sec1(priv); int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN; - void *err; if (cryptlen + authsize > max_len) { dev_err(dev, "length exceeds h/w max limit\n"); return ERR_PTR(-EINVAL); } - if (ivsize) - iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); - if (!dst || dst == src) { src_len = assoclen + cryptlen + authsize; src_nents = sg_nents_for_len(src, src_len); if (src_nents < 0) { dev_err(dev, "Invalid number of src SG.\n"); - err = ERR_PTR(-EINVAL); - goto error_sg; + return ERR_PTR(-EINVAL); } src_nents = (src_nents == 1) ? 0 : src_nents; dst_nents = dst ? src_nents : 0; @@ -1387,16 +1377,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, src_nents = sg_nents_for_len(src, src_len); if (src_nents < 0) { dev_err(dev, "Invalid number of src SG.\n"); - err = ERR_PTR(-EINVAL); - goto error_sg; + return ERR_PTR(-EINVAL); } src_nents = (src_nents == 1) ? 0 : src_nents; dst_len = assoclen + cryptlen + (encrypt ? authsize : 0); dst_nents = sg_nents_for_len(dst, dst_len); if (dst_nents < 0) { dev_err(dev, "Invalid number of dst SG.\n"); - err = ERR_PTR(-EINVAL); - goto error_sg; + return ERR_PTR(-EINVAL); } dst_nents = (dst_nents == 1) ? 0 : dst_nents; } @@ -1423,11 +1411,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, /* if its a ahash, add space for a second desc next to the first one */ if (is_sec1 && !dst) alloc_len += sizeof(struct talitos_desc); + alloc_len += ivsize; edesc = kmalloc(alloc_len, GFP_DMA | flags); - if (!edesc) { - err = ERR_PTR(-ENOMEM); - goto error_sg; + if (!edesc) + return ERR_PTR(-ENOMEM); + if (ivsize) { + iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize); + iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); } memset(&edesc->desc, 0, sizeof(edesc->desc)); @@ -1435,20 +1426,12 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, edesc->dst_nents = dst_nents; edesc->iv_dma = iv_dma; edesc->dma_len = dma_len; - if (dma_len) { - void *addr = &edesc->link_tbl[0]; - - if (is_sec1 && !dst) - addr += sizeof(struct talitos_desc); - edesc->dma_link_tbl = dma_map_single(dev, addr, + if (dma_len) + edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0], edesc->dma_len, DMA_BIDIRECTIONAL); - } + return edesc; -error_sg: - if (iv_dma) - dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); - return err; } static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, @@ -1458,9 +1441,10 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, unsigned int authsize = crypto_aead_authsize(authenc); struct talitos_ctx *ctx = crypto_aead_ctx(authenc); unsigned int ivsize = crypto_aead_ivsize(authenc); + unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize); return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, - iv, areq->assoclen, areq->cryptlen, + iv, areq->assoclen, cryptlen, authsize, ivsize, icv_stashing, areq->base.flags, encrypt); } @@ -1479,7 +1463,7 @@ static int aead_encrypt(struct aead_request *req) /* set encrypt */ edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; - return ipsec_esp(edesc, req, ipsec_esp_encrypt_done); + return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done); } static int aead_decrypt(struct aead_request *req) @@ -1489,17 +1473,15 @@ static int aead_decrypt(struct aead_request *req) struct talitos_ctx *ctx = crypto_aead_ctx(authenc); struct talitos_private *priv = dev_get_drvdata(ctx->dev); struct talitos_edesc *edesc; - struct scatterlist *sg; void *icvdata; - req->cryptlen -= authsize; - /* allocate extended descriptor */ edesc = aead_edesc_alloc(req, req->iv, 1, false); if (IS_ERR(edesc)) return PTR_ERR(edesc); - if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) && + if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) && + (priv->features & TALITOS_FTR_HW_AUTH_CHECK) && ((!edesc->src_nents && !edesc->dst_nents) || priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) { @@ -1510,7 +1492,8 @@ static int aead_decrypt(struct aead_request *req) /* reset integrity check result bits */ - return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done); + return ipsec_esp(edesc, req, false, + ipsec_esp_decrypt_hwauth_done); } /* Have to check the ICV with software */ @@ -1523,11 +1506,10 @@ static int aead_decrypt(struct aead_request *req) else icvdata = &edesc->link_tbl[0]; - sg = sg_last(req->src, edesc->src_nents ? : 1); - - memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize); + sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize, + req->assoclen + req->cryptlen - authsize); - return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done); + return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done); } static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, @@ -1560,6 +1542,18 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, return 0; } +static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher, + const u8 *key, unsigned int keylen) +{ + if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 || + keylen == AES_KEYSIZE_256) + return ablkcipher_setkey(cipher, key, keylen); + + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + + return -EINVAL; +} + static void common_nonsnoop_unmap(struct device *dev, struct talitos_edesc *edesc, struct ablkcipher_request *areq) @@ -1579,11 +1573,15 @@ static void ablkcipher_done(struct device *dev, int err) { struct ablkcipher_request *areq = context; + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); + struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); + unsigned int ivsize = crypto_ablkcipher_ivsize(cipher); struct talitos_edesc *edesc; edesc = container_of(desc, struct talitos_edesc, desc); common_nonsnoop_unmap(dev, edesc, areq); + memcpy(areq->info, ctx->iv, ivsize); kfree(edesc); @@ -1678,6 +1676,14 @@ static int ablkcipher_encrypt(struct ablkcipher_request *areq) struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); struct talitos_edesc *edesc; + unsigned int blocksize = + crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher)); + + if (!areq->nbytes) + return 0; + + if (areq->nbytes % blocksize) + return -EINVAL; /* allocate extended descriptor */ edesc = ablkcipher_edesc_alloc(areq, true); @@ -1695,6 +1701,14 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq) struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); struct talitos_edesc *edesc; + unsigned int blocksize = + crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher)); + + if (!areq->nbytes) + return 0; + + if (areq->nbytes % blocksize) + return -EINVAL; /* allocate extended descriptor */ edesc = ablkcipher_edesc_alloc(areq, false); @@ -1714,14 +1728,16 @@ static void common_nonsnoop_hash_unmap(struct device *dev, struct talitos_private *priv = dev_get_drvdata(dev); bool is_sec1 = has_ftr_sec1(priv); struct talitos_desc *desc = &edesc->desc; - struct talitos_desc *desc2 = desc + 1; + struct talitos_desc *desc2 = (struct talitos_desc *) + (edesc->buf + edesc->dma_len); unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); if (desc->next_desc && desc->ptr[5].ptr != desc2->ptr[5].ptr) unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE); - talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0); + if (req_ctx->psrc) + talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0); /* When using hashctx-in, must unmap it. */ if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1)) @@ -1788,7 +1804,6 @@ static void talitos_handle_buggy_hash(struct talitos_ctx *ctx, static int common_nonsnoop_hash(struct talitos_edesc *edesc, struct ahash_request *areq, unsigned int length, - unsigned int offset, void (*callback) (struct device *dev, struct talitos_desc *desc, void *context, int error)) @@ -1827,9 +1842,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, sg_count = edesc->src_nents ?: 1; if (is_sec1 && sg_count > 1) - sg_pcopy_to_buffer(req_ctx->psrc, sg_count, - edesc->buf + sizeof(struct talitos_desc), - length, req_ctx->nbuf); + sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length); else if (length) sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count, DMA_TO_DEVICE); @@ -1842,7 +1855,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, DMA_TO_DEVICE); } else { sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc, - &desc->ptr[3], sg_count, offset, 0); + &desc->ptr[3], sg_count, 0, 0); if (sg_count > 1) sync_needed = true; } @@ -1866,7 +1879,8 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]); if (is_sec1 && req_ctx->nbuf && length) { - struct talitos_desc *desc2 = desc + 1; + struct talitos_desc *desc2 = (struct talitos_desc *) + (edesc->buf + edesc->dma_len); dma_addr_t next_desc; memset(desc2, 0, sizeof(*desc2)); @@ -1887,7 +1901,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, DMA_TO_DEVICE); copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1); sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc, - &desc2->ptr[3], sg_count, offset, 0); + &desc2->ptr[3], sg_count, 0, 0); if (sg_count > 1) sync_needed = true; copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1); @@ -1998,7 +2012,6 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) struct device *dev = ctx->dev; struct talitos_private *priv = dev_get_drvdata(dev); bool is_sec1 = has_ftr_sec1(priv); - int offset = 0; u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx]; if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) { @@ -2038,6 +2051,8 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) sg_chain(req_ctx->bufsl, 2, areq->src); req_ctx->psrc = req_ctx->bufsl; } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) { + int offset; + if (nbytes_to_hash > blocksize) offset = blocksize - req_ctx->nbuf; else @@ -2050,7 +2065,8 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) sg_copy_to_buffer(areq->src, nents, ctx_buf + req_ctx->nbuf, offset); req_ctx->nbuf += offset; - req_ctx->psrc = areq->src; + req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src, + offset); } else req_ctx->psrc = areq->src; @@ -2090,8 +2106,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) if (ctx->keylen && (req_ctx->first || req_ctx->last)) edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC; - return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, offset, - ahash_done); + return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done); } static int ahash_update(struct ahash_request *areq) @@ -2294,7 +2309,7 @@ static struct talitos_alg_template driver_algs[] = { .base = { .cra_name = "authenc(hmac(sha1),cbc(aes))", .cra_driver_name = "authenc-hmac-sha1-" - "cbc-aes-talitos", + "cbc-aes-talitos-hsna", .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_ASYNC, }, @@ -2338,7 +2353,7 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "authenc(hmac(sha1)," "cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha1-" - "cbc-3des-talitos", + "cbc-3des-talitos-hsna", .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_ASYNC, }, @@ -2380,7 +2395,7 @@ static struct talitos_alg_template driver_algs[] = { .base = { .cra_name = "authenc(hmac(sha224),cbc(aes))", .cra_driver_name = "authenc-hmac-sha224-" - "cbc-aes-talitos", + "cbc-aes-talitos-hsna", .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_ASYNC, }, @@ -2424,7 +2439,7 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "authenc(hmac(sha224)," "cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha224-" - "cbc-3des-talitos", + "cbc-3des-talitos-hsna", .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_ASYNC, }, @@ -2466,7 +2481,7 @@ static struct talitos_alg_template driver_algs[] = { .base = { .cra_name = "authenc(hmac(sha256),cbc(aes))", .cra_driver_name = "authenc-hmac-sha256-" - "cbc-aes-talitos", + "cbc-aes-talitos-hsna", .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_ASYNC, }, @@ -2510,7 +2525,7 @@ static struct talitos_alg_template driver_algs[] = { .cra_name = "authenc(hmac(sha256)," "cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha256-" - "cbc-3des-talitos", + "cbc-3des-talitos-hsna", .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_ASYNC, }, @@ -2636,7 +2651,7 @@ static struct talitos_alg_template driver_algs[] = { .base = { .cra_name = "authenc(hmac(md5),cbc(aes))", .cra_driver_name = "authenc-hmac-md5-" - "cbc-aes-talitos", + "cbc-aes-talitos-hsna", .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_ASYNC, }, @@ -2678,7 +2693,7 @@ static struct talitos_alg_template driver_algs[] = { .base = { .cra_name = "authenc(hmac(md5),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-md5-" - "cbc-3des-talitos", + "cbc-3des-talitos-hsna", .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_ASYNC, }, @@ -2722,6 +2737,7 @@ static struct talitos_alg_template driver_algs[] = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, + .setkey = ablkcipher_aes_setkey, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | @@ -2732,13 +2748,13 @@ static struct talitos_alg_template driver_algs[] = { .alg.crypto = { .cra_name = "ctr(aes)", .cra_driver_name = "ctr-aes-talitos", - .cra_blocksize = AES_BLOCK_SIZE, + .cra_blocksize = 1, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_ablkcipher = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, + .setkey = ablkcipher_aes_setkey, } }, .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP | @@ -3108,6 +3124,7 @@ static int talitos_remove(struct platform_device *ofdev) break; case CRYPTO_ALG_TYPE_AEAD: crypto_unregister_aead(&t_alg->algt.alg.aead); + break; case CRYPTO_ALG_TYPE_AHASH: crypto_unregister_ahash(&t_alg->algt.alg.hash); break; @@ -3210,7 +3227,10 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, alg->cra_priority = t_alg->algt.priority; else alg->cra_priority = TALITOS_CRA_PRIORITY; - alg->cra_alignmask = 0; + if (has_ftr_sec1(priv)) + alg->cra_alignmask = 3; + else + alg->cra_alignmask = 0; alg->cra_ctxsize = sizeof(struct talitos_ctx); alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY; diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h index a65a63e0d6c10e7d31c0010a97344802896f0679..979f6a61e545f1aa54d9d895932b838572f3fd86 100644 --- a/drivers/crypto/talitos.h +++ b/drivers/crypto/talitos.h @@ -65,6 +65,36 @@ struct talitos_desc { #define TALITOS_DESC_SIZE (sizeof(struct talitos_desc) - sizeof(__be32)) +/* + * talitos_edesc - s/w-extended descriptor + * @src_nents: number of segments in input scatterlist + * @dst_nents: number of segments in output scatterlist + * @icv_ool: whether ICV is out-of-line + * @iv_dma: dma address of iv for checking continuity and link table + * @dma_len: length of dma mapped link_tbl space + * @dma_link_tbl: bus physical address of link_tbl/buf + * @desc: h/w descriptor + * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2) + * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1) + * + * if decrypting (with authcheck), or either one of src_nents or dst_nents + * is greater than 1, an integrity check value is concatenated to the end + * of link_tbl data + */ +struct talitos_edesc { + int src_nents; + int dst_nents; + bool icv_ool; + dma_addr_t iv_dma; + int dma_len; + dma_addr_t dma_link_tbl; + struct talitos_desc desc; + union { + struct talitos_ptr link_tbl[0]; + u8 buf[0]; + }; +}; + /** * talitos_request - descriptor submission request * @desc: descriptor pointer (kernel virtual) diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index d2663a4e1f5eb8050589c82e8c89406df4bd3df8..a92a66b1ff46ed689a71d02dc4ab41481284ed1c 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c @@ -556,7 +556,7 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx, desc = dmaengine_prep_slave_sg(channel, ctx->device->dma.sg_src, ctx->device->dma.sg_src_len, - direction, DMA_CTRL_ACK); + DMA_MEM_TO_DEV, DMA_CTRL_ACK); break; case DMA_FROM_DEVICE: @@ -580,7 +580,7 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx, desc = dmaengine_prep_slave_sg(channel, ctx->device->dma.sg_dst, ctx->device->dma.sg_dst_len, - direction, + DMA_DEV_TO_MEM, DMA_CTRL_ACK | DMA_PREP_INTERRUPT); diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index 633321a8dd034390b7d809aaeef792e8d971a21b..a0bb8a6eec3fd954e16ad601d2a1d16ebbbd4f0b 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -166,7 +166,7 @@ static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg, __func__); desc = dmaengine_prep_slave_sg(channel, ctx->device->dma.sg, ctx->device->dma.sg_len, - direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT); + DMA_MEM_TO_DEV, DMA_CTRL_ACK | DMA_PREP_INTERRUPT); if (!desc) { dev_err(ctx->device->dev, "%s: dmaengine_prep_slave_sg() failed!\n", __func__); diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c index 2c573d1aaa64f2bd2f7c70088c6ddec7ad81f35b..523b712770ac50df05faea1f66f1b997a7fdc32a 100644 --- a/drivers/crypto/virtio/virtio_crypto_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_algs.c @@ -117,8 +117,6 @@ virtio_crypto_alg_validate_key(int key_len, uint32_t *alg) *alg = VIRTIO_CRYPTO_CIPHER_AES_CBC; break; default: - pr_err("virtio_crypto: Unsupported key length: %d\n", - key_len); return -EINVAL; } return 0; @@ -498,6 +496,11 @@ static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req) /* Use the first data virtqueue as default */ struct data_queue *data_vq = &vcrypto->data_vq[0]; + if (!req->nbytes) + return 0; + if (req->nbytes % AES_BLOCK_SIZE) + return -EINVAL; + vc_req->dataq = data_vq; vc_req->alg_cb = virtio_crypto_dataq_sym_callback; vc_sym_req->ablkcipher_ctx = ctx; @@ -518,6 +521,11 @@ static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req) /* Use the first data virtqueue as default */ struct data_queue *data_vq = &vcrypto->data_vq[0]; + if (!req->nbytes) + return 0; + if (req->nbytes % AES_BLOCK_SIZE) + return -EINVAL; + vc_req->dataq = data_vq; vc_req->alg_cb = virtio_crypto_dataq_sym_callback; vc_sym_req->ablkcipher_ctx = ctx; diff --git a/drivers/crypto/vmx/Makefile b/drivers/crypto/vmx/Makefile index cab32cfec9c45d0e90d930404462743beab2d542..709670d2b553abb7eb7510621b0ee29196b2945b 100644 --- a/drivers/crypto/vmx/Makefile +++ b/drivers/crypto/vmx/Makefile @@ -3,13 +3,13 @@ obj-$(CONFIG_CRYPTO_DEV_VMX_ENCRYPT) += vmx-crypto.o vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o aes_xts.o ghash.o ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y) -TARGET := linux-ppc64le +override flavour := linux-ppc64le else -TARGET := linux-ppc64 +override flavour := linux-ppc64 endif quiet_cmd_perl = PERL $@ - cmd_perl = $(PERL) $(<) $(TARGET) > $(@) + cmd_perl = $(PERL) $(<) $(flavour) > $(@) targets += aesp8-ppc.S ghashp8-ppc.S diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl index d6a9f63d65ba2d6d3a0d903d5167808351cd603c..9c6b5c1d6a1a61c0408a55822a816516057a486f 100644 --- a/drivers/crypto/vmx/aesp8-ppc.pl +++ b/drivers/crypto/vmx/aesp8-ppc.pl @@ -1357,7 +1357,7 @@ Loop_ctr32_enc: addi $idx,$idx,16 bdnz Loop_ctr32_enc - vadduwm $ivec,$ivec,$one + vadduqm $ivec,$ivec,$one vmr $dat,$inptail lvx $inptail,0,$inp addi $inp,$inp,16 @@ -1854,7 +1854,7 @@ Lctr32_enc8x_three: stvx_u $out1,$x10,$out stvx_u $out2,$x20,$out addi $out,$out,0x30 - b Lcbc_dec8x_done + b Lctr32_enc8x_done .align 5 Lctr32_enc8x_two: @@ -1866,7 +1866,7 @@ Lctr32_enc8x_two: stvx_u $out0,$x00,$out stvx_u $out1,$x10,$out addi $out,$out,0x20 - b Lcbc_dec8x_done + b Lctr32_enc8x_done .align 5 Lctr32_enc8x_one: diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c index dd8b8716467a2cdd314683541436de0742b0106e..2d1a8cd355090701454d73b5adfb728e080e8473 100644 --- a/drivers/crypto/vmx/ghash.c +++ b/drivers/crypto/vmx/ghash.c @@ -1,22 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 /** * GHASH routines supporting VMX instructions on the Power 8 * - * Copyright (C) 2015 International Business Machines Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 only. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * Copyright (C) 2015, 2019 International Business Machines Inc. * * Author: Marcelo Henrique Cerri + * + * Extended by Daniel Axtens to replace the fallback + * mechanism. The new approach is based on arm64 code, which is: + * Copyright (C) 2014 - 2018 Linaro Ltd. */ #include @@ -39,71 +31,25 @@ void gcm_ghash_p8(u64 Xi[2], const u128 htable[16], const u8 *in, size_t len); struct p8_ghash_ctx { + /* key used by vector asm */ u128 htable[16]; - struct crypto_shash *fallback; + /* key used by software fallback */ + be128 key; }; struct p8_ghash_desc_ctx { u64 shash[2]; u8 buffer[GHASH_DIGEST_SIZE]; int bytes; - struct shash_desc fallback_desc; }; -static int p8_ghash_init_tfm(struct crypto_tfm *tfm) -{ - const char *alg = "ghash-generic"; - struct crypto_shash *fallback; - struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm); - struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm); - - fallback = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK); - if (IS_ERR(fallback)) { - printk(KERN_ERR - "Failed to allocate transformation for '%s': %ld\n", - alg, PTR_ERR(fallback)); - return PTR_ERR(fallback); - } - - crypto_shash_set_flags(fallback, - crypto_shash_get_flags((struct crypto_shash - *) tfm)); - - /* Check if the descsize defined in the algorithm is still enough. */ - if (shash_tfm->descsize < sizeof(struct p8_ghash_desc_ctx) - + crypto_shash_descsize(fallback)) { - printk(KERN_ERR - "Desc size of the fallback implementation (%s) does not match the expected value: %lu vs %u\n", - alg, - shash_tfm->descsize - sizeof(struct p8_ghash_desc_ctx), - crypto_shash_descsize(fallback)); - return -EINVAL; - } - ctx->fallback = fallback; - - return 0; -} - -static void p8_ghash_exit_tfm(struct crypto_tfm *tfm) -{ - struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm); - - if (ctx->fallback) { - crypto_free_shash(ctx->fallback); - ctx->fallback = NULL; - } -} - static int p8_ghash_init(struct shash_desc *desc) { - struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); dctx->bytes = 0; memset(dctx->shash, 0, GHASH_DIGEST_SIZE); - dctx->fallback_desc.tfm = ctx->fallback; - dctx->fallback_desc.flags = desc->flags; - return crypto_shash_init(&dctx->fallback_desc); + return 0; } static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key, @@ -121,7 +67,51 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key, disable_kernel_vsx(); pagefault_enable(); preempt_enable(); - return crypto_shash_setkey(ctx->fallback, key, keylen); + + memcpy(&ctx->key, key, GHASH_BLOCK_SIZE); + + return 0; +} + +static inline void __ghash_block(struct p8_ghash_ctx *ctx, + struct p8_ghash_desc_ctx *dctx) +{ + if (!IN_INTERRUPT) { + preempt_disable(); + pagefault_disable(); + enable_kernel_vsx(); + gcm_ghash_p8(dctx->shash, ctx->htable, + dctx->buffer, GHASH_DIGEST_SIZE); + disable_kernel_vsx(); + pagefault_enable(); + preempt_enable(); + } else { + crypto_xor((u8 *)dctx->shash, dctx->buffer, GHASH_BLOCK_SIZE); + gf128mul_lle((be128 *)dctx->shash, &ctx->key); + } +} + +static inline void __ghash_blocks(struct p8_ghash_ctx *ctx, + struct p8_ghash_desc_ctx *dctx, + const u8 *src, unsigned int srclen) +{ + if (!IN_INTERRUPT) { + preempt_disable(); + pagefault_disable(); + enable_kernel_vsx(); + gcm_ghash_p8(dctx->shash, ctx->htable, + src, srclen); + disable_kernel_vsx(); + pagefault_enable(); + preempt_enable(); + } else { + while (srclen >= GHASH_BLOCK_SIZE) { + crypto_xor((u8 *)dctx->shash, src, GHASH_BLOCK_SIZE); + gf128mul_lle((be128 *)dctx->shash, &ctx->key); + srclen -= GHASH_BLOCK_SIZE; + src += GHASH_BLOCK_SIZE; + } + } } static int p8_ghash_update(struct shash_desc *desc, @@ -131,49 +121,33 @@ static int p8_ghash_update(struct shash_desc *desc, struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); - if (IN_INTERRUPT) { - return crypto_shash_update(&dctx->fallback_desc, src, - srclen); - } else { - if (dctx->bytes) { - if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) { - memcpy(dctx->buffer + dctx->bytes, src, - srclen); - dctx->bytes += srclen; - return 0; - } + if (dctx->bytes) { + if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) { memcpy(dctx->buffer + dctx->bytes, src, - GHASH_DIGEST_SIZE - dctx->bytes); - preempt_disable(); - pagefault_disable(); - enable_kernel_vsx(); - gcm_ghash_p8(dctx->shash, ctx->htable, - dctx->buffer, GHASH_DIGEST_SIZE); - disable_kernel_vsx(); - pagefault_enable(); - preempt_enable(); - src += GHASH_DIGEST_SIZE - dctx->bytes; - srclen -= GHASH_DIGEST_SIZE - dctx->bytes; - dctx->bytes = 0; - } - len = srclen & ~(GHASH_DIGEST_SIZE - 1); - if (len) { - preempt_disable(); - pagefault_disable(); - enable_kernel_vsx(); - gcm_ghash_p8(dctx->shash, ctx->htable, src, len); - disable_kernel_vsx(); - pagefault_enable(); - preempt_enable(); - src += len; - srclen -= len; - } - if (srclen) { - memcpy(dctx->buffer, src, srclen); - dctx->bytes = srclen; + srclen); + dctx->bytes += srclen; + return 0; } - return 0; + memcpy(dctx->buffer + dctx->bytes, src, + GHASH_DIGEST_SIZE - dctx->bytes); + + __ghash_block(ctx, dctx); + + src += GHASH_DIGEST_SIZE - dctx->bytes; + srclen -= GHASH_DIGEST_SIZE - dctx->bytes; + dctx->bytes = 0; + } + len = srclen & ~(GHASH_DIGEST_SIZE - 1); + if (len) { + __ghash_blocks(ctx, dctx, src, len); + src += len; + srclen -= len; } + if (srclen) { + memcpy(dctx->buffer, src, srclen); + dctx->bytes = srclen; + } + return 0; } static int p8_ghash_final(struct shash_desc *desc, u8 *out) @@ -182,25 +156,14 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out) struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); - if (IN_INTERRUPT) { - return crypto_shash_final(&dctx->fallback_desc, out); - } else { - if (dctx->bytes) { - for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++) - dctx->buffer[i] = 0; - preempt_disable(); - pagefault_disable(); - enable_kernel_vsx(); - gcm_ghash_p8(dctx->shash, ctx->htable, - dctx->buffer, GHASH_DIGEST_SIZE); - disable_kernel_vsx(); - pagefault_enable(); - preempt_enable(); - dctx->bytes = 0; - } - memcpy(out, dctx->shash, GHASH_DIGEST_SIZE); - return 0; + if (dctx->bytes) { + for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++) + dctx->buffer[i] = 0; + __ghash_block(ctx, dctx); + dctx->bytes = 0; } + memcpy(out, dctx->shash, GHASH_DIGEST_SIZE); + return 0; } struct shash_alg p8_ghash_alg = { @@ -215,11 +178,8 @@ struct shash_alg p8_ghash_alg = { .cra_name = "ghash", .cra_driver_name = "p8_ghash", .cra_priority = 1000, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = GHASH_BLOCK_SIZE, .cra_ctxsize = sizeof(struct p8_ghash_ctx), .cra_module = THIS_MODULE, - .cra_init = p8_ghash_init_tfm, - .cra_exit = p8_ghash_exit_tfm, }, }; diff --git a/drivers/crypto/zhaoxin-aes.c b/drivers/crypto/zhaoxin-aes.c new file mode 100644 index 0000000000000000000000000000000000000000..928135be49d664719b17d62f816a19873cd336e7 --- /dev/null +++ b/drivers/crypto/zhaoxin-aes.c @@ -0,0 +1,552 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * + * Support for ACE hardware crypto engine. + * + * Copyright (c) 2004 Michal Ludvig + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Number of data blocks actually fetched for each xcrypt insn. + * Processors with prefetch errata will fetch extra blocks. + */ +static unsigned int ecb_fetch_blocks = 2; +#define MAX_ECB_FETCH_BLOCKS (8) +#define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE) + +static unsigned int cbc_fetch_blocks = 1; +#define MAX_CBC_FETCH_BLOCKS (4) +#define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE) + +/* Control word. */ +struct cword { + unsigned int __attribute__ ((__packed__)) + rounds:4, + algo:3, + keygen:1, + interm:1, + encdec:1, + ksize:2; +} __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); + +/* Whenever making any changes to the following + * structure *make sure* you keep E, d_data + * and cword aligned on 16 Bytes boundaries and + * the Hardware can access 16 * 16 bytes of E and d_data + * (only the first 15 * 16 bytes matter but the HW reads + * more). + */ +struct aes_ctx { + u32 E[AES_MAX_KEYLENGTH_U32] + __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); + u32 d_data[AES_MAX_KEYLENGTH_U32] + __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); + struct { + struct cword encrypt; + struct cword decrypt; + } cword; + u32 *D; +}; + +static DEFINE_PER_CPU(struct cword *, paes_last_cword); + +/* + * Tells whether the ACE is capable to generate + * the extended key for a given key_len. + */ +static inline int +aes_hw_extkey_available(uint8_t key_len) +{ + /* + * TODO: We should check the actual CPU model/stepping + * as it's possible that the capability will be + * added in the next CPU revisions. + */ + if (key_len == 16) + return 1; + return 0; +} + +static inline struct aes_ctx *aes_ctx_common(void *ctx) +{ + unsigned long addr = (unsigned long)ctx; + unsigned long align = PADLOCK_ALIGNMENT; + + if (align <= crypto_tfm_ctx_alignment()) + align = 1; + return (struct aes_ctx *)ALIGN(addr, align); +} + +static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) +{ + return aes_ctx_common(crypto_tfm_ctx(tfm)); +} + +static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm) +{ + return aes_ctx_common(crypto_blkcipher_ctx(tfm)); +} + +static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len) +{ + struct aes_ctx *ctx = aes_ctx(tfm); + const __le32 *key = (const __le32 *)in_key; + u32 *flags = &tfm->crt_flags; + struct crypto_aes_ctx gen_aes; + int cpu; + + if (key_len % 8) { + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; + } + + /* + * If the hardware is capable of generating the extended key + * itself we must supply the plain key for both encryption + * and decryption. + */ + ctx->D = ctx->E; + + ctx->E[0] = le32_to_cpu(key[0]); + ctx->E[1] = le32_to_cpu(key[1]); + ctx->E[2] = le32_to_cpu(key[2]); + ctx->E[3] = le32_to_cpu(key[3]); + + /* Prepare control words. */ + memset(&ctx->cword, 0, sizeof(ctx->cword)); + + ctx->cword.decrypt.encdec = 1; + ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4; + ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds; + ctx->cword.encrypt.ksize = (key_len - 16) / 8; + ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize; + + /* Don't generate extended keys if the hardware can do it. */ + if (aes_hw_extkey_available(key_len)) + goto ok; + + ctx->D = ctx->d_data; + ctx->cword.encrypt.keygen = 1; + ctx->cword.decrypt.keygen = 1; + + if (crypto_aes_expand_key(&gen_aes, in_key, key_len)) { + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; + } + + memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH); + memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH); + +ok: + for_each_online_cpu(cpu) + if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) || + &ctx->cword.decrypt == per_cpu(paes_last_cword, cpu)) + per_cpu(paes_last_cword, cpu) = NULL; + + return 0; +} + +/* ====== Encryption/decryption routines ====== */ + +/* These are the real call to PadLock. */ +static inline void padlock_reset_key(struct cword *cword) +{ + int cpu = raw_smp_processor_id(); + + if (cword != per_cpu(paes_last_cword, cpu)) +#ifndef CONFIG_X86_64 + asm volatile ("pushfl; popfl"); +#else + asm volatile ("pushfq; popfq"); +#endif +} + +static inline void padlock_store_cword(struct cword *cword) +{ + per_cpu(paes_last_cword, raw_smp_processor_id()) = cword; +} + +/* + * While the padlock instructions don't use FP/SSE registers, they + * generate a spurious DNA fault when CR0.TS is '1'. Fortunately, + * the kernel doesn't use CR0.TS. + */ + +static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key, + struct cword *control_word, int count) +{ + asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ + : "+S"(input), "+D"(output) + : "d"(control_word), "b"(key), "c"(count)); +} + +static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key, + u8 *iv, struct cword *control_word, int count) +{ + asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ + : "+S" (input), "+D" (output), "+a" (iv) + : "d" (control_word), "b" (key), "c" (count)); + return iv; +} + +static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key, + struct cword *cword, int count) +{ + /* + * Padlock prefetches extra data so we must provide mapped input buffers. + * Assume there are at least 16 bytes of stack already in use. + */ + u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1]; + u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); + + memcpy(tmp, in, count * AES_BLOCK_SIZE); + rep_xcrypt_ecb(tmp, out, key, cword, count); +} + +static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key, + u8 *iv, struct cword *cword, int count) +{ + /* + * Padlock prefetches extra data so we must provide mapped input buffers. + * Assume there are at least 16 bytes of stack already in use. + */ + u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1]; + u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); + + memcpy(tmp, in, count * AES_BLOCK_SIZE); + return rep_xcrypt_cbc(tmp, out, key, iv, cword, count); +} + +static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key, + struct cword *cword, int count) +{ + /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data. + * We could avoid some copying here but it's probably not worth it. + */ + if (unlikely(offset_in_page(in) + ecb_fetch_bytes > PAGE_SIZE)) { + ecb_crypt_copy(in, out, key, cword, count); + return; + } + + rep_xcrypt_ecb(in, out, key, cword, count); +} + +static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key, + u8 *iv, struct cword *cword, int count) +{ + /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */ + if (unlikely(offset_in_page(in) + cbc_fetch_bytes > PAGE_SIZE)) + return cbc_crypt_copy(in, out, key, iv, cword, count); + + return rep_xcrypt_cbc(in, out, key, iv, cword, count); +} + +static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, + void *control_word, u32 count) +{ + u32 initial = count & (ecb_fetch_blocks - 1); + + if (count < ecb_fetch_blocks) { + ecb_crypt(input, output, key, control_word, count); + return; + } + + count -= initial; + + if (initial) + asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ + : "+S"(input), "+D"(output) + : "d"(control_word), "b"(key), "c"(initial)); + + asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ + : "+S"(input), "+D"(output) + : "d"(control_word), "b"(key), "c"(count)); +} + +static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, + u8 *iv, void *control_word, u32 count) +{ + u32 initial = count & (cbc_fetch_blocks - 1); + + if (count < cbc_fetch_blocks) + return cbc_crypt(input, output, key, iv, control_word, count); + + count -= initial; + + if (initial) + asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ + : "+S" (input), "+D" (output), "+a" (iv) + : "d" (control_word), "b" (key), "c" (initial)); + + asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ + : "+S" (input), "+D" (output), "+a" (iv) + : "d" (control_word), "b" (key), "c" (count)); + return iv; +} + +static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + struct aes_ctx *ctx = aes_ctx(tfm); + + padlock_reset_key(&ctx->cword.encrypt); + ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1); + padlock_store_cword(&ctx->cword.encrypt); +} + +static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + struct aes_ctx *ctx = aes_ctx(tfm); + + padlock_reset_key(&ctx->cword.encrypt); + ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1); + padlock_store_cword(&ctx->cword.encrypt); +} + +static struct crypto_alg aes_alg = { + .cra_name = "aes", + .cra_driver_name = "aes-padlock", + .cra_priority = PADLOCK_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aes_ctx), + .cra_alignmask = PADLOCK_ALIGNMENT - 1, + .cra_module = THIS_MODULE, + .cra_u = { + .cipher = { + .cia_min_keysize = AES_MIN_KEY_SIZE, + .cia_max_keysize = AES_MAX_KEY_SIZE, + .cia_setkey = aes_set_key, + .cia_encrypt = aes_encrypt, + .cia_decrypt = aes_decrypt, + } + } +}; + +static int ecb_aes_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); + struct blkcipher_walk walk; + int err; + + padlock_reset_key(&ctx->cword.encrypt); + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + + while ((nbytes = walk.nbytes)) { + padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, + ctx->E, &ctx->cword.encrypt, + nbytes / AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } + + padlock_store_cword(&ctx->cword.encrypt); + + return err; +} + +static int ecb_aes_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); + struct blkcipher_walk walk; + int err; + + padlock_reset_key(&ctx->cword.decrypt); + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + + while ((nbytes = walk.nbytes)) { + padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, + ctx->D, &ctx->cword.decrypt, + nbytes / AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } + + padlock_store_cword(&ctx->cword.encrypt); + + return err; +} + +static struct crypto_alg ecb_aes_alg = { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-padlock", + .cra_priority = PADLOCK_COMPOSITE_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aes_ctx), + .cra_alignmask = PADLOCK_ALIGNMENT - 1, + .cra_type = &crypto_blkcipher_type, + .cra_module = THIS_MODULE, + .cra_u = { + .blkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = aes_set_key, + .encrypt = ecb_aes_encrypt, + .decrypt = ecb_aes_decrypt, + } + } +}; + +static int cbc_aes_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); + struct blkcipher_walk walk; + int err; + + padlock_reset_key(&ctx->cword.encrypt); + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + + while ((nbytes = walk.nbytes)) { + u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr, + walk.dst.virt.addr, ctx->E, + walk.iv, &ctx->cword.encrypt, + nbytes / AES_BLOCK_SIZE); + memcpy(walk.iv, iv, AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } + + padlock_store_cword(&ctx->cword.decrypt); + + return err; +} + +static int cbc_aes_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); + struct blkcipher_walk walk; + int err; + + padlock_reset_key(&ctx->cword.encrypt); + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + + while ((nbytes = walk.nbytes)) { + padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr, + ctx->D, walk.iv, &ctx->cword.decrypt, + nbytes / AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } + + padlock_store_cword(&ctx->cword.encrypt); + + return err; +} + +static struct crypto_alg cbc_aes_alg = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-padlock", + .cra_priority = PADLOCK_COMPOSITE_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aes_ctx), + .cra_alignmask = PADLOCK_ALIGNMENT - 1, + .cra_type = &crypto_blkcipher_type, + .cra_module = THIS_MODULE, + .cra_u = { + .blkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = aes_set_key, + .encrypt = cbc_aes_encrypt, + .decrypt = cbc_aes_decrypt, + } + } +}; + +static const struct x86_cpu_id zhaoxin_cpu_id[] = { + { X86_VENDOR_CENTAUR, 7, X86_MODEL_ANY, X86_STEPPING_ANY, X86_FEATURE_XCRYPT }, + { X86_VENDOR_ZHAOXIN, 7, X86_MODEL_ANY, X86_STEPPING_ANY, X86_FEATURE_XCRYPT }, + {} +}; +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_cpu_id); + +static int __init padlock_init(void) +{ + int ret; + + if (!x86_match_cpu(zhaoxin_cpu_id)) + return -ENODEV; + + if (!boot_cpu_has(X86_FEATURE_XCRYPT_EN)) { + pr_notice("ACE detected, but not enabled. Hmm, strange...\n"); + return -ENODEV; + } + + ret = crypto_register_alg(&aes_alg); + if (ret) + goto aes_err; + + ret = crypto_register_alg(&ecb_aes_alg); + if (ret) + goto ecb_aes_err; + + ret = crypto_register_alg(&cbc_aes_alg); + if (ret) + goto cbc_aes_err; + + pr_notice("Using ACE for AES algorithm.\n"); + +out: + return ret; + +cbc_aes_err: + crypto_unregister_alg(&ecb_aes_alg); +ecb_aes_err: + crypto_unregister_alg(&aes_alg); +aes_err: + pr_err("ACE AES initialization failed.\n"); + goto out; +} + +static void __exit padlock_fini(void) +{ + crypto_unregister_alg(&cbc_aes_alg); + crypto_unregister_alg(&ecb_aes_alg); + crypto_unregister_alg(&aes_alg); +} + +module_init(padlock_init); +module_exit(padlock_fini); + +MODULE_DESCRIPTION("ACE AES algorithm support"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Michal Ludvig"); + +MODULE_ALIAS_CRYPTO("aes"); diff --git a/drivers/crypto/zhaoxin-gmi-sm3.c b/drivers/crypto/zhaoxin-gmi-sm3.c new file mode 100644 index 0000000000000000000000000000000000000000..6f6f362900ab79cbef4dde438c372b3f96a76a1b --- /dev/null +++ b/drivers/crypto/zhaoxin-gmi-sm3.c @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * zx-gmi-sm4.c - wrapper code for Zhaoxin GMI. + * + * Copyright (C) 2023 Shanghai Zhaoxin Semiconductor LTD. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static u8 use_ccs; + +const u8 zx_sm3_zero_message_hash[SM3_DIGEST_SIZE] = { + 0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F, + 0x8e, 0x61, 0x19, 0x48, 0x31, 0xE8, 0x1A, 0x8F, + 0x22, 0xBE, 0xC8, 0xC7, 0x28, 0xFE, 0xFB, 0x74, + 0x7E, 0xD0, 0x35, 0xEB, 0x50, 0x82, 0xAA, 0x2B +}; +EXPORT_SYMBOL_GPL(zx_sm3_zero_message_hash); + +u32 zx_gmi_capability(void) +{ + u32 eax = 0; + u32 ebx, ecx, edx = 0; + + // 1. check vendor ID string + __asm__ __volatile__ ("cpuid" : "=b"(ebx), "=c"(ecx), "=d"(edx) : "a"(eax) : ); + + if (((ebx == 0x746e6543) && (ecx == 0x736c7561) && (edx == 0x48727561)) || + ((ebx == 0x68532020) && (ecx == 0x20206961) && (edx == 0x68676e61))) { + // 2. check whether support SM3/SM4/SM2 Instructions + eax = 0xC0000001; + __asm__ __volatile__ ("cpuid":"=d"(edx):"a"(eax) : ); + } else { + pr_warn("This is not a ZX CPU! Return!\n"); + return 0; + } + + return edx; +} + +static u32 get_cpu_fms(u32 *eax, u32 *leaf) +{ + u32 eax_tmp = *eax, leaf_tmp = *leaf; + + __asm__ __volatile__ ( + "cpuid" + : "=a"(eax_tmp) + : "0"(leaf_tmp) + : "ebx", "ecx"); + + *eax = eax_tmp; + return eax_tmp; +} + +/* + * Load supported features of the CPU to see if the SM3/SM4 is available. + */ +static int gmi_available(void) +{ + u32 eax = 0; + u32 edx = 0; + u8 family, model; + + /* Diff ZXC with ZXD */ + u32 leaf = 0x1; + + get_cpu_fms(&eax, &leaf); + family = (eax & 0xf00) >> 8; /* bit 11-08 */ + model = (eax & 0xf0) >> 4; /* bit 7-4 */ + + edx = zx_gmi_capability(); + + if (((family == 7) && (model == 0xb)) + || ((family == 6) && (model == 0xf)) + || ((family == 6) && (model == 9))) + use_ccs = ((edx & (0x3 << 4)) == (0x3 << 4)); + else + use_ccs = 0; + + return use_ccs; +} + +void sm3_generic_block_fn(struct sm3_state *sst, const u8 *inp, int blockcnt) +{ + u64 in, out, cnt; + + if (!inp) { + pr_warn("GMI-SM3: input is null\n"); + return; + } + + if (!(sst)) { + pr_warn("GMI-SM3: sst is null\n"); + return; + } + + if (!blockcnt) { + pr_warn("GMI-SM3: cnt is 0\n"); + return; + } + + in = (u64)inp; + out = (u64)(sst->state); + cnt = (u64)blockcnt; + + //printk(KERN_INFO "ZX-GMI-SM3 is called\n"); + + __asm__ __volatile__( + "movq %2, %%rdi\n" + "movq %0, %%rsi\n" + "movq %1, %%rcx\n" + "movq $-1, %%rax\n" + "movq $0x20, %%rbx\n" + ".byte 0xf3, 0x0f, 0xa6, 0xe8" + : + : "r"(in), "r"(cnt), "r"(out) + : "%rdi", "%rsi", "%rcx", "rbx", "%rax", "memory" + ); +} + +static inline int zx_sm3_init(struct shash_desc *desc) +{ + struct sm3_state *sctx; + + if (!desc) + return -EINVAL; + + sctx = shash_desc_ctx(desc); + + sctx->state[0] = 0x6f168073UL; + sctx->state[1] = 0xb9b21449UL; + sctx->state[2] = 0xd7422417UL; + sctx->state[3] = 0x00068adaUL; + sctx->state[4] = 0xbc306fa9UL; + sctx->state[5] = 0xaa383116UL; + sctx->state[6] = 0x4dee8de3UL; + sctx->state[7] = 0x4e0efbb0UL; + + sctx->count = 0; + + return 0; +} + +static inline int zx_sm3_base_finish(struct shash_desc *desc, u8 *out) +{ + struct sm3_state *sctx = shash_desc_ctx(desc); + __be32 *digest = (__be32 *)out; + + memcpy(digest, sctx->state, 32); + + *sctx = (struct sm3_state){}; + return 0; +} + +int zx_sm3_update(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + if (!data || !len) + return -EINVAL; + + return sm3_base_do_update(desc, data, len, sm3_generic_block_fn); +} +EXPORT_SYMBOL(zx_sm3_update); + +static int zx_sm3_final(struct shash_desc *desc, u8 *out) +{ + if (!desc || !out) + return -EINVAL; + + sm3_base_do_finalize(desc, sm3_generic_block_fn); + + return zx_sm3_base_finish(desc, out); +} + +int zx_sm3_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *hash) +{ + if (!desc || !data || !len || !hash) + return -EINVAL; + + sm3_base_do_update(desc, data, len, sm3_generic_block_fn); + + return zx_sm3_final(desc, hash); +} +EXPORT_SYMBOL(zx_sm3_finup); + +static struct shash_alg zx_sm3_alg = { + .digestsize = SM3_DIGEST_SIZE, + .init = zx_sm3_init, + .update = zx_sm3_update, + .final = zx_sm3_final, + .finup = zx_sm3_finup, + .descsize = sizeof(struct sm3_state), + .base = { + .cra_name = "sm3", + .cra_driver_name = "zhaoxin-gmi-sm3", + .cra_priority = 300, + .cra_blocksize = SM3_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +}; + +static int __init zx_sm3_generic_mod_init(void) +{ + if (!gmi_available()) { + pr_warn("GMI is unavailable on this platform."); + return -ENODEV; + } + return crypto_register_shash(&zx_sm3_alg); +} + +static void __exit zx_sm3_generic_mod_fini(void) +{ + crypto_unregister_shash(&zx_sm3_alg); +} + +module_init(zx_sm3_generic_mod_init); +module_exit(zx_sm3_generic_mod_fini); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("SM3 Secure Hash Algorithm"); + +MODULE_ALIAS_CRYPTO("zx-sm3"); +MODULE_ALIAS_CRYPTO("zhaoxin-gmi-sm3"); diff --git a/drivers/crypto/zhaoxin-gmi-sm4.c b/drivers/crypto/zhaoxin-gmi-sm4.c new file mode 100644 index 0000000000000000000000000000000000000000..7cfccf78f91270e75698951a0b3c428a515e02d5 --- /dev/null +++ b/drivers/crypto/zhaoxin-gmi-sm4.c @@ -0,0 +1,869 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * zhaoxin-gmi-sm4.c - wrapper code for Zhaoxin GMI. + * + * Copyright (C) 2023 Shanghai Zhaoxin Semiconductor LTD. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SM4_ECB (1<<6) +#define SM4_CBC (1<<7) +#define SM4_CFB (1<<8) +#define SM4_OFB (1<<9) +#define SM4_CTR (1<<10) + +#define ZX_GMI_ALIGNMENT 16 + +#define GETU16(p) ((u16)(p)[0]<<8 | (u16)(p)[1]) +#define GETU32(p) ((u32)(p)[0]<<24|(u32)(p)[1]<<16|(u32)(p)[2]<<8|(u32)(p)[3]) + +/* Control word. */ +struct sm4_cipher_data { + u8 iv[SM4_BLOCK_SIZE]; /* Initialization vector */ + union { + u32 pad; + struct { + u32 encdec:1; + u32 func:5; + u32 mode:5; + u32 digest:1; + } b; + } cword; /* Control word */ + struct crypto_sm4_ctx keys; /* Encryption key */ +}; + +static inline u8 *rep_xcrypt(const u8 *input, u8 *output, void *key, u8 *iv, + struct sm4_cipher_data *sm4_data, int count) +{ + int eax = sm4_data->cword.pad; + + // Set the flag for encryption or decryption + if (sm4_data->cword.b.encdec == 1) + eax &= ~0x01; + else + eax |= 0x01; + + asm volatile (".byte 0xf3, 0x0f, 0xa7, 0xf0" /* rep xcryptcbc */ + : "+S" (input), "+D" (output), "+a" (eax) + : "d" (iv), "b" (key), "c" (count)); + + return iv; +} + +static inline u8 *rep_xcrypt_ctr(const u8 *input, u8 *output, void *key, u8 *iv, + struct sm4_cipher_data *sm4_data, int count) +{ + int eax = sm4_data->cword.pad; + u8 oiv[SM4_BLOCK_SIZE] = {0}; + u32 cnt_tmp; + u32 i; + + //Backup the original IV if it is not NULL. + if (iv) + memcpy(oiv, iv, SM4_BLOCK_SIZE); + + // Set the flag for encryption or decryption + if (sm4_data->cword.b.encdec == 1) + eax &= ~0x01; + else + eax |= 0x01; + + // Get the current counter. + cnt_tmp = GETU16(&iv[14]); + + // Get the available counter space before overflow. + cnt_tmp = 0x10000 - cnt_tmp; + + // + // Check there is enough counter space for the required blocks. + // + if (cnt_tmp < count) { + + // Process the first part of data blocks. + asm volatile (".byte 0xf3,0x0f,0xa7,0xf0" /* rep xcryptcbc */ + : "+S" (input), "+D" (output), "+a" (eax) + : "d" (iv), "b" (key), "c" (cnt_tmp)); + + // The IV's lower 16 bits should be 0x0000 NOW. Check it + //if (GETU16(&iv[14]) != 0) + // printk(KERN_WARNING "ZX-GMI: Counter should be 0, please check\n"); + + // Only increase the counter by SW when overflow occurs. + memcpy(iv, oiv, SM4_BLOCK_SIZE); + for (i = 0; i < cnt_tmp; i++) + crypto_inc(iv, SM4_BLOCK_SIZE); + + // Get the number of data blocks that have not beed encrypted. + cnt_tmp = count - cnt_tmp; + + // Process the remaining part of data blocks. + asm volatile (".byte 0xf3,0x0f,0xa7,0xf0" /* rep xcryptcbc */ + : "+S" (input), "+D" (output), "+a" (eax) + : "d" (iv), "b" (key), "c" (cnt_tmp)); + } else { + // Counter space is big enough, the counter will not overflow. + asm volatile (".byte 0xf3,0x0f,0xa7,0xf0" /* rep xcryptcbc */ + : "+S" (input), "+D" (output), "+a" (eax) + : "d" (iv), "b" (key), "c" (count)); + } + + // Restore the iv if not null + if (iv) + memcpy(iv, oiv, SM4_BLOCK_SIZE); + + return iv; +} + +static u8 *rep_xcrypt_ebc_ONE(const u8 *input, u8 *output, void *key, + u8 *iv, struct sm4_cipher_data *sm4_data, int count) +{ + u64 in, out, enkey, ivec; + + in = (u64)input; + out = (u64)(output); + enkey = (u64)key; + ivec = (u64)iv; + + __asm__ __volatile__( + "movq %2, %%rdi\n" + "movq %0, %%rsi\n" + "movq $1, %%rcx\n" + "movq $0x60, %%rax\n" + "movq %1, %%rbx\n" + "movq %3, %%rdx\n" + ".byte 0xf3, 0x0f, 0xa7, 0xf0" + : + : "r"(in), "r"(enkey), "r"(out), "r"(ivec) + : "%rdi", "%rsi", "%rdx", "%rcx", "rbx", "%rax", "memory" + ); + + return iv; +} + +/** + * gmi_sm4_set_key - Set the sm4 key. + * @tfm: The %crypto_skcipher that is used in the context. + * @in_key: The input key. + * @key_len:The size of the key. + */ +int gmi_sm4_set_key(struct crypto_skcipher *tfm, const u8 *in_key, + unsigned int key_len) +{ + struct crypto_sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + + if (key_len != SM4_KEY_SIZE) { + pr_err("The key_len must be 16 bytes. please check\n"); + return -EINVAL; + } + + memcpy(ctx->rkey_enc, in_key, key_len); + memcpy(ctx->rkey_dec, in_key, key_len); + + return 0; +} +EXPORT_SYMBOL_GPL(gmi_sm4_set_key); + + +static int sm4_cipher_common(struct skcipher_request *req, struct sm4_cipher_data *cw) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int blocks; + int err; + u8 *iv; + + err = skcipher_walk_virt(&walk, req, true); + + while ((blocks = (walk.nbytes / SM4_BLOCK_SIZE))) { + iv = rep_xcrypt(walk.src.virt.addr, walk.dst.virt.addr, ctx->rkey_enc, + walk.iv, cw, blocks); + + err = skcipher_walk_done(&walk, walk.nbytes % SM4_BLOCK_SIZE); + } + + return err; +} + + +static int ebc_encrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20|SM4_ECB; + + err = sm4_cipher_common(req, &cw); + + return err; +} + +static int ebc_decrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.pad |= 0x20|SM4_ECB; + + err = sm4_cipher_common(req, &cw); + + return err; +} + +static int cbc_encrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20|SM4_CBC; + + err = sm4_cipher_common(req, &cw); + + return err; +} + +static int cbc_decrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.pad |= 0x20|SM4_CBC; + + err = sm4_cipher_common(req, &cw); + + return err; +} + +static void ctr_crypt_final(struct crypto_sm4_ctx *ctx, + struct skcipher_walk *walk) +{ + u8 *ctrblk = walk->iv; + u8 keystream[SM4_BLOCK_SIZE]; + u8 *src = walk->src.virt.addr; + u8 *dst = walk->dst.virt.addr; + u8 iv_temp[16]; + unsigned int nbytes = walk->nbytes; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20|SM4_ECB; + + memcpy(iv_temp, ctrblk, 16); + + rep_xcrypt_ebc_ONE(ctrblk, keystream, ctx->rkey_enc, walk->iv, &cw, 1); + + crypto_xor_cpy(dst, keystream, src, nbytes); + + crypto_inc(ctrblk, SM4_BLOCK_SIZE); +} + +/* + * sm4_cipher_ctr is usef for ZX-E or newer + */ +static int sm4_cipher_ctr(struct skcipher_request *req, struct sm4_cipher_data *cw) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int blocks; + int err; + u8 *iv; + u32 i; + + err = skcipher_walk_virt(&walk, req, true); + + while ((blocks = (walk.nbytes / SM4_BLOCK_SIZE))) { + iv = rep_xcrypt_ctr(walk.src.virt.addr, walk.dst.virt.addr, + ctx->rkey_enc, walk.iv, cw, blocks); + + // Update the counter. + for (i = 0; i < blocks; i++) + crypto_inc(walk.iv, SM4_BLOCK_SIZE); + + err = skcipher_walk_done(&walk, walk.nbytes % SM4_BLOCK_SIZE); + } + + if (walk.nbytes) { + ctr_crypt_final(ctx, &walk); + err = skcipher_walk_done(&walk, 0); + } + + return err; +} + +/* + * ctr_encrypt is usef for ZX-E or newer + */ +static int ctr_encrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20|SM4_CTR; + + err = sm4_cipher_ctr(req, &cw); + + return err; +} + +/* + * ctr_decrypt is usef for ZX-E or newer + */ +static int ctr_decrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.pad |= 0x20|SM4_CTR; + + err = sm4_cipher_ctr(req, &cw); + + return err; +} + +/* + * sm4_ctr_zxc is used for ZXC+ + */ +static int sm4_ctr_zxc(struct skcipher_request *req, struct sm4_cipher_data *cw) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int blocks; + int err; + u8 *iv = NULL; + u32 n; + u8 en_iv[SM4_BLOCK_SIZE] = {0}; + + err = skcipher_walk_virt(&walk, req, true); + + while ((blocks = (walk.nbytes / SM4_BLOCK_SIZE))) { + while (blocks--) { + + iv = rep_xcrypt_ebc_ONE(walk.iv, en_iv, ctx->rkey_enc, walk.iv, cw, 1); + crypto_inc(walk.iv, SM4_BLOCK_SIZE); + + for (n = 0; n < 16; n += sizeof(size_t)) + *(size_t *)(walk.dst.virt.addr + n) = *(size_t *)(en_iv + n) + ^ *(size_t *)(walk.src.virt.addr + n); + + walk.src.virt.addr += SM4_BLOCK_SIZE; + walk.dst.virt.addr += SM4_BLOCK_SIZE; + + } + + err = skcipher_walk_done(&walk, walk.nbytes % SM4_BLOCK_SIZE); + } + + if (walk.nbytes) { + ctr_crypt_final(ctx, &walk); + err = skcipher_walk_done(&walk, 0); + } + + return err; +} + +/* + * ctr_encrypt_zxc is usef for ZX-C+ + */ +static int ctr_encrypt_zxc(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20|SM4_CTR; + + err = sm4_ctr_zxc(req, &cw); + + return err; +} + +/* + * ctr_decrypt_zxc is usef for ZX-C+ + */ +static int ctr_decrypt_zxc(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 0; + cw.cword.pad |= 0x20|SM4_CTR; + + err = sm4_ctr_zxc(req, &cw); + + return err; +} + +/* + * ofb_encrypt is usef for ZX-E or newer + */ +static int ofb_encrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20|SM4_OFB; + + err = sm4_cipher_common(req, &cw); + + return err; +} + +/* + * ofb_decrypt is usef for ZX-E or newer + */ +static int ofb_decrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.pad |= 0x20|SM4_OFB; + + err = sm4_cipher_common(req, &cw); + + return err; +} + +/* + * sm4_ofb_zxc is usef for ZX-C+ + */ +static int sm4_ofb_zxc(struct skcipher_request *req, struct sm4_cipher_data *cw) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int blocks; + int err; + u8 *iv = NULL; + u32 n; + + err = skcipher_walk_virt(&walk, req, true); + + while ((blocks = (walk.nbytes / SM4_BLOCK_SIZE))) { + while (blocks--) { + iv = rep_xcrypt_ebc_ONE(walk.iv, walk.iv, ctx->rkey_enc, walk.iv, cw, 1); + + for (n = 0; n < 16; n += sizeof(size_t)) + *(size_t *)(walk.dst.virt.addr + n) = *(size_t *)(walk.iv + n) + ^ *(size_t *)(walk.src.virt.addr + n); + + walk.src.virt.addr += SM4_BLOCK_SIZE; + walk.dst.virt.addr += SM4_BLOCK_SIZE; + + } + + err = skcipher_walk_done(&walk, walk.nbytes % SM4_BLOCK_SIZE); + } + + return err; +} + +/* + * ofb_encrypt_zxc is usef for ZX-C+ + */ +static int ofb_encrypt_zxc(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20|SM4_OFB; + + err = sm4_ofb_zxc(req, &cw); + + return err; +} + +/* + * ofb_decrypt_zxc is usef for ZX-C+ + */ +static int ofb_decrypt_zxc(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 0; + cw.cword.pad |= 0x20|SM4_OFB; + + err = sm4_ofb_zxc(req, &cw); + + return err; +} + + +/* + * cfb_encrypt is usef for ZX-E or newer. + */ +static int cfb_encrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20|SM4_CFB; + + err = sm4_cipher_common(req, &cw); + + return err; +} + +/* + * cfb_decrypt is usef for ZX-E or newer. + */ + +static int cfb_decrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.pad |= 0x20|SM4_CFB; + + err = sm4_cipher_common(req, &cw); + + return err; + +} + +/* + * sm4_cfb_zxc is usef for ZX-C+ + */ +static int sm4_cfb_zxc(struct skcipher_request *req, struct sm4_cipher_data *cw) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int blocks; + int err; + u8 *iv = NULL; + u32 n; + size_t t; + + err = skcipher_walk_virt(&walk, req, true); + + while ((blocks = (walk.nbytes / SM4_BLOCK_SIZE))) { + while (blocks--) { + + iv = rep_xcrypt_ebc_ONE(walk.iv, walk.iv, ctx->rkey_enc, walk.iv, cw, 1); + + if (cw->cword.b.encdec) + for (n = 0; n < 16; n += sizeof(size_t)) + *(size_t *)(walk.dst.virt.addr + n) = + *(size_t *)(walk.iv + n) + ^= *(size_t *)(walk.src.virt.addr + n); + else + for (n = 0; n < 16; n += sizeof(size_t)) { + t = *(size_t *)(walk.src.virt.addr + n); + *(size_t *)(walk.dst.virt.addr + n) = + *(size_t *)(walk.iv + n) ^ t; + *(size_t *)(walk.iv + n) = t; + } + + walk.src.virt.addr += SM4_BLOCK_SIZE; + walk.dst.virt.addr += SM4_BLOCK_SIZE; + } + + err = skcipher_walk_done(&walk, walk.nbytes % SM4_BLOCK_SIZE); + } + + return err; +} + +/* + * cfb_encrypt_zxc is usef for ZX-C+ + */ +static int cfb_encrypt_zxc(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20|SM4_CFB; + + err = sm4_cfb_zxc(req, &cw); + + return err; +} + +/* + * cfb_decrypt_zxc is usef for ZX-C+ + */ +static int cfb_decrypt_zxc(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 0; + cw.cword.pad |= 0x20|SM4_CFB; + + err = sm4_cfb_zxc(req, &cw); + + return err; +} + + +static struct skcipher_alg aes_algs[] = { + { + .base = { + .cra_name = "__ecb(sm4)", + .cra_driver_name = "__ecb-sm4-gmi", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct crypto_sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .setkey = gmi_sm4_set_key, + .encrypt = ebc_encrypt, + .decrypt = ebc_decrypt, + }, + + { + .base = { + .cra_name = "__cbc(sm4)", + .cra_driver_name = "__cbc-sm4-gmi", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct crypto_sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .setkey = gmi_sm4_set_key, + .encrypt = cbc_encrypt, + .decrypt = cbc_decrypt, + }, + + { + .base = { + .cra_name = "__ctr(sm4)", + .cra_driver_name = "__ctr-sm4-gmi", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = 1, //SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct crypto_sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .setkey = gmi_sm4_set_key, + .encrypt = ctr_encrypt, + .decrypt = ctr_decrypt, + }, + + { + .base = { + .cra_name = "__ofb(sm4)", + .cra_driver_name = "__ofb-sm4-gmi", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct crypto_sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .setkey = gmi_sm4_set_key, + .encrypt = ofb_encrypt, + .decrypt = ofb_decrypt, + }, + + { + .base = { + .cra_name = "__cfb(sm4)", + .cra_driver_name = "__cfb-sm4-gmi", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct crypto_sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .setkey = gmi_sm4_set_key, + .encrypt = cfb_encrypt, + .decrypt = cfb_decrypt, + } +}; + +static struct simd_skcipher_alg *sm4_simd_algs[ARRAY_SIZE(aes_algs)]; + + +static int zx_gmi_capability(void) +{ + int eax = 0; + int ebx, ecx, edx = 0; + // 1. check vendor ID string + asm volatile ("cpuid":"=b"(ebx), "=c"(ecx), "=d"(edx):"a"(eax) : ); + + if (((ebx == 0x746e6543) && (ecx == 0x736c7561) && (edx == 0x48727561)) || + ((ebx == 0x68532020) && (ecx == 0x20206961) && (edx == 0x68676e61))) { + // 2. check whether support SM3/SM4/SM2 Instructions + eax = 0xC0000001; + __asm__ __volatile__ ("cpuid":"=d"(edx):"a"(eax) : ); + + } else { + pr_warn("This is not a ZX CPU!\n"); + } + return edx; +} + +static u32 get_cpu_fms(u32 *eax, u32 *leaf) +{ + u32 eax_tmp = *eax, leaf_tmp = *leaf; + + __asm__ __volatile__ ( + "cpuid" + : "=a"(eax_tmp) + : "0"(leaf_tmp) + : "ebx", "ecx"); + + *eax = eax_tmp; + return eax_tmp; +} + +static int gmi_zxc_check(void) +{ + u32 eax = 0; + char family, model; + u32 leaf = 0x1; + int f_zxc = 0; + + get_cpu_fms(&eax, &leaf); + family = (eax & 0xf00) >> 8; /* bit 11-08 */ + model = (eax & 0xf0) >> 4; /* bit 7-4 */ + + if ((family == 7) && (model == 0xb)) + f_zxc = 0; + else if (((family == 6) && (model == 0xf)) || + ((family == 6) && (model == 9))) + f_zxc = 1; + + return f_zxc; +} + +/* + * Load supported features of the CPU to see if the SM3/SM4 is available. + */ +static int gmi_ccs_available(void) +{ + unsigned int zx_gmi_use_ccs = 0; /* Chinese Cipher Standard SM3 and SM4 Support */ + + zx_gmi_use_ccs = ((zx_gmi_capability() & (0x3 << 4)) == (0x3 << 4)); + + return zx_gmi_use_ccs; +} + +static void gmi_sm4_exit(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(sm4_simd_algs) && sm4_simd_algs[i]; i++) + simd_skcipher_free(sm4_simd_algs[i]); + + crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs)); +} +static int __init gmi_sm4_init(void) +{ + struct simd_skcipher_alg *simd; + const char *basename; + const char *algname; + const char *drvname; + int err; + int i; + + if (!gmi_ccs_available()) + return -ENODEV; + + if (gmi_zxc_check()) { + + for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { + if (!strcmp(aes_algs[i].base.cra_name, "__ctr(sm4)")) { + pr_info("GRX: zxc gmi sm4 ctr FOUND\n"); + aes_algs[i].encrypt = ctr_encrypt_zxc; + aes_algs[i].decrypt = ctr_decrypt_zxc; + } else if (!strcmp(aes_algs[i].base.cra_name, "__cfb(sm4)")) { + pr_info("GRX: zxc gmi sm4 cfb FOUND\n"); + aes_algs[i].encrypt = cfb_encrypt_zxc; + aes_algs[i].decrypt = cfb_decrypt_zxc; + } else if (!strcmp(aes_algs[i].base.cra_name, "__ofb(sm4)")) { + pr_info("GRX: zxc gmi sm4 ofb FOUND\n"); + aes_algs[i].encrypt = ofb_encrypt_zxc; + aes_algs[i].decrypt = ofb_decrypt_zxc; + } + } + } + + err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs)); + if (err) + return err; + + for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { + algname = aes_algs[i].base.cra_name + 2; + drvname = aes_algs[i].base.cra_driver_name + 2; + basename = aes_algs[i].base.cra_driver_name; + simd = simd_skcipher_create_compat(algname, drvname, basename); + err = PTR_ERR(simd); + if (IS_ERR(simd)) + goto unregister_simds; + + sm4_simd_algs[i] = simd; + } + + return 0; + +unregister_simds: + gmi_sm4_exit(); + return err; +} + +late_initcall(gmi_sm4_init); +module_exit(gmi_sm4_exit); + +MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using Zhaoxin GMI"); +MODULE_AUTHOR("GRX"); +MODULE_LICENSE("GPL"); diff --git a/drivers/crypto/zhaoxin-sha.c b/drivers/crypto/zhaoxin-sha.c new file mode 100644 index 0000000000000000000000000000000000000000..a2758801aa2db59a8c67fdf866d9ff375f6a115e --- /dev/null +++ b/drivers/crypto/zhaoxin-sha.c @@ -0,0 +1,318 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Cryptographic API. + * + * Support for ACE hardware crypto engine. + * + * Copyright (c) 2006 Michal Ludvig + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static inline void padlock_output_block(uint32_t *src, + uint32_t *dst, size_t count) +{ + while (count--) + *dst++ = swab32(*src++); +} + +/* + * Add two shash_alg instance for hardware-implemented + * multiple-parts hash supported by Zhaoxin Processor. + */ +static int padlock_sha1_init_zhaoxin(struct shash_desc *desc) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + + *sctx = (struct sha1_state){ + .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, + }; + + return 0; +} + +static int padlock_sha1_update_zhaoxin(struct shash_desc *desc, + const u8 *data, unsigned int len) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + unsigned int partial, done; + const u8 *src; + /*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/ + u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ + ((aligned(STACK_ALIGN))); + u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); + + partial = sctx->count & 0x3f; + sctx->count += len; + done = 0; + src = data; + memcpy(dst, (u8 *)(sctx->state), SHA1_DIGEST_SIZE); + + if ((partial + len) >= SHA1_BLOCK_SIZE) { + + /* Append the bytes in state's buffer to a block to handle */ + if (partial) { + done = -partial; + memcpy(sctx->buffer + partial, data, + done + SHA1_BLOCK_SIZE); + src = sctx->buffer; + asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" + : "+S"(src), "+D"(dst) + : "a"((long)-1), "c"((unsigned long)1)); + done += SHA1_BLOCK_SIZE; + src = data + done; + } + + /* Process the left bytes from the input data */ + if (len - done >= SHA1_BLOCK_SIZE) { + asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" + : "+S"(src), "+D"(dst) + : "a"((long)-1), + "c"((unsigned long)((len - done) / SHA1_BLOCK_SIZE))); + done += ((len - done) - (len - done) % SHA1_BLOCK_SIZE); + src = data + done; + } + partial = 0; + } + memcpy((u8 *)(sctx->state), dst, SHA1_DIGEST_SIZE); + memcpy(sctx->buffer + partial, src, len - done); + + return 0; +} + +static int padlock_sha1_final_zhaoxin(struct shash_desc *desc, u8 *out) +{ + struct sha1_state *state = (struct sha1_state *)shash_desc_ctx(desc); + unsigned int partial, padlen; + __be64 bits; + static const u8 padding[64] = { 0x80, }; + + bits = cpu_to_be64(state->count << 3); + + /* Pad out to 56 mod 64 */ + partial = state->count & 0x3f; + padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial); + padlock_sha1_update_zhaoxin(desc, padding, padlen); + + /* Append length field bytes */ + padlock_sha1_update_zhaoxin(desc, (const u8 *)&bits, sizeof(bits)); + + /* Swap to output */ + padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 5); + + return 0; +} + +static int padlock_sha256_init_zhaoxin(struct shash_desc *desc) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + + *sctx = (struct sha256_state){ + .state = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, + SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7}, + }; + + return 0; +} + +static int padlock_sha256_update_zhaoxin(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + unsigned int partial, done; + const u8 *src; + /*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/ + u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ + ((aligned(STACK_ALIGN))); + u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); + + partial = sctx->count & 0x3f; + sctx->count += len; + done = 0; + src = data; + memcpy(dst, (u8 *)(sctx->state), SHA256_DIGEST_SIZE); + + if ((partial + len) >= SHA256_BLOCK_SIZE) { + /* Append the bytes in state's buffer to a block to handle */ + if (partial) { + done = -partial; + memcpy(sctx->buf + partial, data, + done + SHA256_BLOCK_SIZE); + src = sctx->buf; + asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" + : "+S"(src), "+D"(dst) + : "a"((long)-1), "c"((unsigned long)1)); + done += SHA256_BLOCK_SIZE; + src = data + done; + } + + /* Process the left bytes from input data*/ + if (len - done >= SHA256_BLOCK_SIZE) { + asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" + : "+S"(src), "+D"(dst) + : "a"((long)-1), + "c"((unsigned long)((len - done) / 64))); + done += ((len - done) - (len - done) % 64); + src = data + done; + } + partial = 0; + } + memcpy((u8 *)(sctx->state), dst, SHA256_DIGEST_SIZE); + memcpy(sctx->buf + partial, src, len - done); + + return 0; +} + +static int padlock_sha256_final_zhaoxin(struct shash_desc *desc, u8 *out) +{ + struct sha256_state *state = + (struct sha256_state *)shash_desc_ctx(desc); + unsigned int partial, padlen; + __be64 bits; + static const u8 padding[64] = { 0x80, }; + + bits = cpu_to_be64(state->count << 3); + + /* Pad out to 56 mod 64 */ + partial = state->count & 0x3f; + padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial); + padlock_sha256_update_zhaoxin(desc, padding, padlen); + + /* Append length field bytes */ + padlock_sha256_update_zhaoxin(desc, (const u8 *)&bits, sizeof(bits)); + + /* Swap to output */ + padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 8); + + return 0; +} + +static int padlock_sha_export_zhaoxin(struct shash_desc *desc, + void *out) +{ + int statesize = crypto_shash_statesize(desc->tfm); + void *sctx = shash_desc_ctx(desc); + + memcpy(out, sctx, statesize); + return 0; +} + +static int padlock_sha_import_zhaoxin(struct shash_desc *desc, + const void *in) +{ + int statesize = crypto_shash_statesize(desc->tfm); + void *sctx = shash_desc_ctx(desc); + + memcpy(sctx, in, statesize); + return 0; +} + +static struct shash_alg sha1_alg_zhaoxin = { + .digestsize = SHA1_DIGEST_SIZE, + .init = padlock_sha1_init_zhaoxin, + .update = padlock_sha1_update_zhaoxin, + .final = padlock_sha1_final_zhaoxin, + .export = padlock_sha_export_zhaoxin, + .import = padlock_sha_import_zhaoxin, + .descsize = sizeof(struct sha1_state), + .statesize = sizeof(struct sha1_state), + .base = { + .cra_name = "sha1", + .cra_driver_name = "sha1-padlock-zhaoxin", + .cra_priority = PADLOCK_CRA_PRIORITY, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +}; + +static struct shash_alg sha256_alg_zhaoxin = { + .digestsize = SHA256_DIGEST_SIZE, + .init = padlock_sha256_init_zhaoxin, + .update = padlock_sha256_update_zhaoxin, + .final = padlock_sha256_final_zhaoxin, + .export = padlock_sha_export_zhaoxin, + .import = padlock_sha_import_zhaoxin, + .descsize = sizeof(struct sha256_state), + .statesize = sizeof(struct sha256_state), + .base = { + .cra_name = "sha256", + .cra_driver_name = "sha256-padlock-zhaoxin", + .cra_priority = PADLOCK_CRA_PRIORITY, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +}; + +static const struct x86_cpu_id zhaoxin_sha_ids[] = { + { X86_VENDOR_CENTAUR, 7, X86_MODEL_ANY, X86_STEPPING_ANY, X86_FEATURE_PHE }, + { X86_VENDOR_ZHAOXIN, 7, X86_MODEL_ANY, X86_STEPPING_ANY, X86_FEATURE_PHE }, + {} +}; +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_sha_ids); + +static int __init padlock_init(void) +{ + int rc = -ENODEV; + struct shash_alg *sha1; + struct shash_alg *sha256; + + if (!x86_match_cpu(zhaoxin_sha_ids) || !boot_cpu_has(X86_FEATURE_PHE_EN)) + return -ENODEV; + + sha1 = &sha1_alg_zhaoxin; + sha256 = &sha256_alg_zhaoxin; + + rc = crypto_register_shash(sha1); + if (rc) + goto out; + + rc = crypto_register_shash(sha256); + if (rc) + goto out_unreg1; + + pr_notice("Using ACE for SHA1/SHA256 algorithms.\n"); + + return 0; + +out_unreg1: + crypto_unregister_shash(sha1); + +out: + pr_err("ACE SHA1/SHA256 initialization failed.\n"); + return rc; +} + +static void __exit padlock_fini(void) +{ + crypto_unregister_shash(&sha1_alg_zhaoxin); + crypto_unregister_shash(&sha256_alg_zhaoxin); +} + +module_init(padlock_init); +module_exit(padlock_fini); + +MODULE_DESCRIPTION("ACE SHA1/SHA256 algorithms support."); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Michal Ludvig"); + +MODULE_ALIAS_CRYPTO("sha1-all"); +MODULE_ALIAS_CRYPTO("sha256-all"); +MODULE_ALIAS_CRYPTO("sha1-padlock"); +MODULE_ALIAS_CRYPTO("sha256-padlock"); diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig index e0700bf4893a31e4e4f148ae26e95b509c8b6cbd..a59f338f520f528157dd5e21d381aec8c1fd1bda 100644 --- a/drivers/dax/Kconfig +++ b/drivers/dax/Kconfig @@ -29,6 +29,31 @@ config DEV_DAX_PMEM driver consumes memory ranges allocated and exported by the libnvdimm sub-system. - Say Y if unsure + Say M if unsure + +config DEV_DAX_KMEM + tristate "KMEM DAX: volatile-use of persistent memory" + default DEV_DAX + depends on DEV_DAX + depends on MEMORY_HOTPLUG # for add_memory() and friends + help + Support access to persistent memory as if it were RAM. This + allows easier use of persistent memory by unmodified + applications. + + To use this feature, a DAX device must be unbound from the + device_dax driver (PMEM DAX) and bound to this kmem driver + on each boot. + + Say N if unsure. + +config DEV_DAX_PMEM_COMPAT + tristate "PMEM DAX: support the deprecated /sys/class/dax interface" + depends on m && DEV_DAX_PMEM=m + default DEV_DAX_PMEM + help + Older versions of the libdaxctl library expect to find all + device-dax instances under /sys/class/dax. If libdaxctl in + your distribution is older than v58 say M, otherwise say N. endif diff --git a/drivers/dax/Makefile b/drivers/dax/Makefile index 574286fac87ce71b88e61bf5935e3905e226638b..81f7d54dadfb34ed470ee90627556172c9fc3f7a 100644 --- a/drivers/dax/Makefile +++ b/drivers/dax/Makefile @@ -1,8 +1,10 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_DAX) += dax.o obj-$(CONFIG_DEV_DAX) += device_dax.o -obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem.o +obj-$(CONFIG_DEV_DAX_KMEM) += kmem.o dax-y := super.o -dax_pmem-y := pmem.o +dax-y += bus.o device_dax-y := device.o + +obj-y += pmem/ diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c new file mode 100644 index 0000000000000000000000000000000000000000..2109cfe80219db2ff4bb9fa516bbbfefbed10045 --- /dev/null +++ b/drivers/dax/bus.c @@ -0,0 +1,503 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017-2018 Intel Corporation. All rights reserved. */ +#include +#include +#include +#include +#include +#include +#include "dax-private.h" +#include "bus.h" + +static struct class *dax_class; + +static DEFINE_MUTEX(dax_bus_lock); + +#define DAX_NAME_LEN 30 +struct dax_id { + struct list_head list; + char dev_name[DAX_NAME_LEN]; +}; + +static int dax_bus_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + /* + * We only ever expect to handle device-dax instances, i.e. the + * @type argument to MODULE_ALIAS_DAX_DEVICE() is always zero + */ + return add_uevent_var(env, "MODALIAS=" DAX_DEVICE_MODALIAS_FMT, 0); +} + +static struct dax_device_driver *to_dax_drv(struct device_driver *drv) +{ + return container_of(drv, struct dax_device_driver, drv); +} + +static struct dax_id *__dax_match_id(struct dax_device_driver *dax_drv, + const char *dev_name) +{ + struct dax_id *dax_id; + + lockdep_assert_held(&dax_bus_lock); + + list_for_each_entry(dax_id, &dax_drv->ids, list) + if (sysfs_streq(dax_id->dev_name, dev_name)) + return dax_id; + return NULL; +} + +static int dax_match_id(struct dax_device_driver *dax_drv, struct device *dev) +{ + int match; + + mutex_lock(&dax_bus_lock); + match = !!__dax_match_id(dax_drv, dev_name(dev)); + mutex_unlock(&dax_bus_lock); + + return match; +} + +enum id_action { + ID_REMOVE, + ID_ADD, +}; + +static ssize_t do_id_store(struct device_driver *drv, const char *buf, + size_t count, enum id_action action) +{ + struct dax_device_driver *dax_drv = to_dax_drv(drv); + unsigned int region_id, id; + char devname[DAX_NAME_LEN]; + struct dax_id *dax_id; + ssize_t rc = count; + int fields; + + fields = sscanf(buf, "dax%d.%d", ®ion_id, &id); + if (fields != 2) + return -EINVAL; + sprintf(devname, "dax%d.%d", region_id, id); + if (!sysfs_streq(buf, devname)) + return -EINVAL; + + mutex_lock(&dax_bus_lock); + dax_id = __dax_match_id(dax_drv, buf); + if (!dax_id) { + if (action == ID_ADD) { + dax_id = kzalloc(sizeof(*dax_id), GFP_KERNEL); + if (dax_id) { + strncpy(dax_id->dev_name, buf, DAX_NAME_LEN); + list_add(&dax_id->list, &dax_drv->ids); + } else + rc = -ENOMEM; + } else + /* nothing to remove */; + } else if (action == ID_REMOVE) { + list_del(&dax_id->list); + kfree(dax_id); + } else + /* dax_id already added */; + mutex_unlock(&dax_bus_lock); + + if (rc < 0) + return rc; + if (action == ID_ADD) + rc = driver_attach(drv); + if (rc) + return rc; + return count; +} + +static ssize_t new_id_store(struct device_driver *drv, const char *buf, + size_t count) +{ + return do_id_store(drv, buf, count, ID_ADD); +} +static DRIVER_ATTR_WO(new_id); + +static ssize_t remove_id_store(struct device_driver *drv, const char *buf, + size_t count) +{ + return do_id_store(drv, buf, count, ID_REMOVE); +} +static DRIVER_ATTR_WO(remove_id); + +static struct attribute *dax_drv_attrs[] = { + &driver_attr_new_id.attr, + &driver_attr_remove_id.attr, + NULL, +}; +ATTRIBUTE_GROUPS(dax_drv); + +static int dax_bus_match(struct device *dev, struct device_driver *drv); + +static struct bus_type dax_bus_type = { + .name = "dax", + .uevent = dax_bus_uevent, + .match = dax_bus_match, + .drv_groups = dax_drv_groups, +}; + +static int dax_bus_match(struct device *dev, struct device_driver *drv) +{ + struct dax_device_driver *dax_drv = to_dax_drv(drv); + + /* + * All but the 'device-dax' driver, which has 'match_always' + * set, requires an exact id match. + */ + if (dax_drv->match_always) + return 1; + + return dax_match_id(dax_drv, dev); +} + +/* + * Rely on the fact that drvdata is set before the attributes are + * registered, and that the attributes are unregistered before drvdata + * is cleared to assume that drvdata is always valid. + */ +static ssize_t id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dax_region *dax_region = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", dax_region->id); +} +static DEVICE_ATTR_RO(id); + +static ssize_t region_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dax_region *dax_region = dev_get_drvdata(dev); + + return sprintf(buf, "%llu\n", (unsigned long long) + resource_size(&dax_region->res)); +} +static struct device_attribute dev_attr_region_size = __ATTR(size, 0444, + region_size_show, NULL); + +static ssize_t align_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dax_region *dax_region = dev_get_drvdata(dev); + + return sprintf(buf, "%u\n", dax_region->align); +} +static DEVICE_ATTR_RO(align); + +static struct attribute *dax_region_attributes[] = { + &dev_attr_region_size.attr, + &dev_attr_align.attr, + &dev_attr_id.attr, + NULL, +}; + +static const struct attribute_group dax_region_attribute_group = { + .name = "dax_region", + .attrs = dax_region_attributes, +}; + +static const struct attribute_group *dax_region_attribute_groups[] = { + &dax_region_attribute_group, + NULL, +}; + +static void dax_region_free(struct kref *kref) +{ + struct dax_region *dax_region; + + dax_region = container_of(kref, struct dax_region, kref); + kfree(dax_region); +} + +void dax_region_put(struct dax_region *dax_region) +{ + kref_put(&dax_region->kref, dax_region_free); +} +EXPORT_SYMBOL_GPL(dax_region_put); + +static void dax_region_unregister(void *region) +{ + struct dax_region *dax_region = region; + + sysfs_remove_groups(&dax_region->dev->kobj, + dax_region_attribute_groups); + dax_region_put(dax_region); +} + +struct dax_region *alloc_dax_region(struct device *parent, int region_id, + struct resource *res, int target_node, unsigned int align, + unsigned long pfn_flags) +{ + struct dax_region *dax_region; + + /* + * The DAX core assumes that it can store its private data in + * parent->driver_data. This WARN is a reminder / safeguard for + * developers of device-dax drivers. + */ + if (dev_get_drvdata(parent)) { + dev_WARN(parent, "dax core failed to setup private data\n"); + return NULL; + } + + if (!IS_ALIGNED(res->start, align) + || !IS_ALIGNED(resource_size(res), align)) + return NULL; + + dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL); + if (!dax_region) + return NULL; + + dev_set_drvdata(parent, dax_region); + memcpy(&dax_region->res, res, sizeof(*res)); + dax_region->pfn_flags = pfn_flags; + kref_init(&dax_region->kref); + dax_region->id = region_id; + dax_region->align = align; + dax_region->dev = parent; + dax_region->target_node = target_node; + if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) { + kfree(dax_region); + return NULL; + } + + kref_get(&dax_region->kref); + if (devm_add_action_or_reset(parent, dax_region_unregister, dax_region)) + return NULL; + return dax_region; +} +EXPORT_SYMBOL_GPL(alloc_dax_region); + +static ssize_t size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_dax *dev_dax = to_dev_dax(dev); + unsigned long long size = resource_size(&dev_dax->region->res); + + return sprintf(buf, "%llu\n", size); +} +static DEVICE_ATTR_RO(size); + +static int dev_dax_target_node(struct dev_dax *dev_dax) +{ + struct dax_region *dax_region = dev_dax->region; + + return dax_region->target_node; +} + +static ssize_t target_node_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_dax *dev_dax = to_dev_dax(dev); + + return sprintf(buf, "%d\n", dev_dax_target_node(dev_dax)); +} +static DEVICE_ATTR_RO(target_node); + +static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + /* + * We only ever expect to handle device-dax instances, i.e. the + * @type argument to MODULE_ALIAS_DAX_DEVICE() is always zero + */ + return sprintf(buf, DAX_DEVICE_MODALIAS_FMT "\n", 0); +} +static DEVICE_ATTR_RO(modalias); + +static umode_t dev_dax_visible(struct kobject *kobj, struct attribute *a, int n) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct dev_dax *dev_dax = to_dev_dax(dev); + + if (a == &dev_attr_target_node.attr && dev_dax_target_node(dev_dax) < 0) + return 0; + return a->mode; +} + +static struct attribute *dev_dax_attributes[] = { + &dev_attr_modalias.attr, + &dev_attr_size.attr, + &dev_attr_target_node.attr, + NULL, +}; + +static const struct attribute_group dev_dax_attribute_group = { + .attrs = dev_dax_attributes, + .is_visible = dev_dax_visible, +}; + +static const struct attribute_group *dax_attribute_groups[] = { + &dev_dax_attribute_group, + NULL, +}; + +void kill_dev_dax(struct dev_dax *dev_dax) +{ + struct dax_device *dax_dev = dev_dax->dax_dev; + struct inode *inode = dax_inode(dax_dev); + + kill_dax(dax_dev); + unmap_mapping_range(inode->i_mapping, 0, 0, 1); +} +EXPORT_SYMBOL_GPL(kill_dev_dax); + +static void dev_dax_release(struct device *dev) +{ + struct dev_dax *dev_dax = to_dev_dax(dev); + struct dax_region *dax_region = dev_dax->region; + struct dax_device *dax_dev = dev_dax->dax_dev; + + dax_region_put(dax_region); + put_dax(dax_dev); + kfree(dev_dax); +} + +static void unregister_dev_dax(void *dev) +{ + struct dev_dax *dev_dax = to_dev_dax(dev); + + dev_dbg(dev, "%s\n", __func__); + + kill_dev_dax(dev_dax); + device_del(dev); + put_device(dev); +} + +struct dev_dax *__devm_create_dev_dax(struct dax_region *dax_region, int id, + struct dev_pagemap *pgmap, enum dev_dax_subsys subsys) +{ + struct device *parent = dax_region->dev; + struct dax_device *dax_dev; + struct dev_dax *dev_dax; + struct inode *inode; + struct device *dev; + int rc = -ENOMEM; + + if (id < 0) + return ERR_PTR(-EINVAL); + + dev_dax = kzalloc(sizeof(*dev_dax), GFP_KERNEL); + if (!dev_dax) + return ERR_PTR(-ENOMEM); + + memcpy(&dev_dax->pgmap, pgmap, sizeof(*pgmap)); + + /* + * No 'host' or dax_operations since there is no access to this + * device outside of mmap of the resulting character device. + */ + dax_dev = alloc_dax(dev_dax, NULL, NULL); + if (!dax_dev) + goto err; + + /* a device_dax instance is dead while the driver is not attached */ + kill_dax(dax_dev); + + /* from here on we're committed to teardown via dax_dev_release() */ + dev = &dev_dax->dev; + device_initialize(dev); + + dev_dax->dax_dev = dax_dev; + dev_dax->region = dax_region; + dev_dax->target_node = dax_region->target_node; + kref_get(&dax_region->kref); + + inode = dax_inode(dax_dev); + dev->devt = inode->i_rdev; + if (subsys == DEV_DAX_BUS) + dev->bus = &dax_bus_type; + else + dev->class = dax_class; + dev->parent = parent; + dev->groups = dax_attribute_groups; + dev->release = dev_dax_release; + dev_set_name(dev, "dax%d.%d", dax_region->id, id); + + rc = device_add(dev); + if (rc) { + kill_dev_dax(dev_dax); + put_device(dev); + return ERR_PTR(rc); + } + + rc = devm_add_action_or_reset(dax_region->dev, unregister_dev_dax, dev); + if (rc) + return ERR_PTR(rc); + + return dev_dax; + + err: + kfree(dev_dax); + + return ERR_PTR(rc); +} +EXPORT_SYMBOL_GPL(__devm_create_dev_dax); + +static int match_always_count; + +int __dax_driver_register(struct dax_device_driver *dax_drv, + struct module *module, const char *mod_name) +{ + struct device_driver *drv = &dax_drv->drv; + int rc = 0; + + INIT_LIST_HEAD(&dax_drv->ids); + drv->owner = module; + drv->name = mod_name; + drv->mod_name = mod_name; + drv->bus = &dax_bus_type; + + /* there can only be one default driver */ + mutex_lock(&dax_bus_lock); + match_always_count += dax_drv->match_always; + if (match_always_count > 1) { + match_always_count--; + WARN_ON(1); + rc = -EINVAL; + } + mutex_unlock(&dax_bus_lock); + if (rc) + return rc; + return driver_register(drv); +} +EXPORT_SYMBOL_GPL(__dax_driver_register); + +void dax_driver_unregister(struct dax_device_driver *dax_drv) +{ + struct device_driver *drv = &dax_drv->drv; + struct dax_id *dax_id, *_id; + + mutex_lock(&dax_bus_lock); + match_always_count -= dax_drv->match_always; + list_for_each_entry_safe(dax_id, _id, &dax_drv->ids, list) { + list_del(&dax_id->list); + kfree(dax_id); + } + mutex_unlock(&dax_bus_lock); + driver_unregister(drv); +} +EXPORT_SYMBOL_GPL(dax_driver_unregister); + +int __init dax_bus_init(void) +{ + int rc; + + if (IS_ENABLED(CONFIG_DEV_DAX_PMEM_COMPAT)) { + dax_class = class_create(THIS_MODULE, "dax"); + if (IS_ERR(dax_class)) + return PTR_ERR(dax_class); + } + + rc = bus_register(&dax_bus_type); + if (rc) + class_destroy(dax_class); + return rc; +} + +void __exit dax_bus_exit(void) +{ + bus_unregister(&dax_bus_type); + class_destroy(dax_class); +} diff --git a/drivers/dax/bus.h b/drivers/dax/bus.h new file mode 100644 index 0000000000000000000000000000000000000000..8619e32999436da995a8d17926f9cd0977ee3140 --- /dev/null +++ b/drivers/dax/bus.h @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. */ +#ifndef __DAX_BUS_H__ +#define __DAX_BUS_H__ +#include + +struct dev_dax; +struct resource; +struct dax_device; +struct dax_region; +void dax_region_put(struct dax_region *dax_region); +struct dax_region *alloc_dax_region(struct device *parent, int region_id, + struct resource *res, int target_node, unsigned int align, + unsigned long flags); + +enum dev_dax_subsys { + DEV_DAX_BUS, + DEV_DAX_CLASS, +}; + +struct dev_dax *__devm_create_dev_dax(struct dax_region *dax_region, int id, + struct dev_pagemap *pgmap, enum dev_dax_subsys subsys); + +static inline struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region, + int id, struct dev_pagemap *pgmap) +{ + return __devm_create_dev_dax(dax_region, id, pgmap, DEV_DAX_BUS); +} + +/* to be deleted when DEV_DAX_CLASS is removed */ +struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys); + +struct dax_device_driver { + struct device_driver drv; + struct list_head ids; + int match_always; +}; + +int __dax_driver_register(struct dax_device_driver *dax_drv, + struct module *module, const char *mod_name); +#define dax_driver_register(driver) \ + __dax_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) +void dax_driver_unregister(struct dax_device_driver *dax_drv); +void kill_dev_dax(struct dev_dax *dev_dax); + +#if IS_ENABLED(CONFIG_DEV_DAX_PMEM_COMPAT) +int dev_dax_probe(struct device *dev); +#endif + +/* + * While run_dax() is potentially a generic operation that could be + * defined in include/linux/dax.h we don't want to grow any users + * outside of drivers/dax/ + */ +void run_dax(struct dax_device *dax_dev); + +#define MODULE_ALIAS_DAX_DEVICE(type) \ + MODULE_ALIAS("dax:t" __stringify(type) "*") +#define DAX_DEVICE_MODALIAS_FMT "dax:t%d" + +#endif /* __DAX_BUS_H__ */ diff --git a/drivers/dax/dax-private.h b/drivers/dax/dax-private.h index b6fc4f04636de1cd52d4195331b1e4168847bb1e..a45612148ca021e96ff3c8cf60eab9b4071fb858 100644 --- a/drivers/dax/dax-private.h +++ b/drivers/dax/dax-private.h @@ -16,10 +16,17 @@ #include #include +/* private routines between core files */ +struct dax_device; +struct dax_device *inode_dax(struct inode *inode); +struct inode *dax_inode(struct dax_device *dax_dev); +int dax_bus_init(void); +void dax_bus_exit(void); + /** * struct dax_region - mapping infrastructure for dax devices * @id: kernel-wide unique region for a memory range - * @base: linear address corresponding to @res + * @target_node: effective numa node if this memory range is onlined * @kref: to pin while other agents have a need to do lookups * @dev: parent device backing this region * @align: allocation and mapping alignment for child dax devices @@ -28,8 +35,7 @@ */ struct dax_region { int id; - struct ida ida; - void *base; + int target_node; struct kref kref; struct device *dev; unsigned int align; @@ -38,20 +44,28 @@ struct dax_region { }; /** - * struct dev_dax - instance data for a subdivision of a dax region + * struct dev_dax - instance data for a subdivision of a dax region, and + * data while the device is activated in the driver. * @region - parent region * @dax_dev - core dax functionality + * @target_node: effective numa node if dev_dax memory range is onlined * @dev - device core - * @id - child id in the region - * @num_resources - number of physical address extents in this device - * @res - array of physical address ranges + * @pgmap - pgmap for memmap setup / lifetime (driver owned) + * @ref: pgmap reference count (driver owned) + * @cmp: @ref final put completion (driver owned) */ struct dev_dax { struct dax_region *region; struct dax_device *dax_dev; + int target_node; struct device dev; - int id; - int num_resources; - struct resource res[0]; + struct dev_pagemap pgmap; + struct percpu_ref ref; + struct completion cmp; }; + +static inline struct dev_dax *to_dev_dax(struct device *dev) +{ + return container_of(dev, struct dev_dax, dev); +} #endif diff --git a/drivers/dax/dax.h b/drivers/dax/dax.h deleted file mode 100644 index f9e5feea742cd1e7872d4c855565ad6ef2764123..0000000000000000000000000000000000000000 --- a/drivers/dax/dax.h +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Copyright(c) 2016 - 2017 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#ifndef __DAX_H__ -#define __DAX_H__ -struct dax_device; -struct dax_device *inode_dax(struct inode *inode); -struct inode *dax_inode(struct dax_device *dax_dev); -#endif /* __DAX_H__ */ diff --git a/drivers/dax/device-dax.h b/drivers/dax/device-dax.h deleted file mode 100644 index 688b051750bd7cc615849491605bc0ecf50d9101..0000000000000000000000000000000000000000 --- a/drivers/dax/device-dax.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright(c) 2016 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#ifndef __DEVICE_DAX_H__ -#define __DEVICE_DAX_H__ -struct device; -struct dev_dax; -struct resource; -struct dax_region; -void dax_region_put(struct dax_region *dax_region); -struct dax_region *alloc_dax_region(struct device *parent, - int region_id, struct resource *res, unsigned int align, - void *addr, unsigned long flags); -struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region, - int id, struct resource *res, int count); -#endif /* __DEVICE_DAX_H__ */ diff --git a/drivers/dax/device.c b/drivers/dax/device.c index 948806e57cee33f74024adb442f398579319b89d..996d68ff992a88bf63d9f6ca1a6a617a83b5be6d 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -1,15 +1,6 @@ -/* - * Copyright(c) 2016 - 2017 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2016-2018 Intel Corporation. All rights reserved. */ +#include #include #include #include @@ -21,161 +12,39 @@ #include #include #include "dax-private.h" -#include "dax.h" +#include "bus.h" -static struct class *dax_class; - -/* - * Rely on the fact that drvdata is set before the attributes are - * registered, and that the attributes are unregistered before drvdata - * is cleared to assume that drvdata is always valid. - */ -static ssize_t id_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct dax_region *dax_region = dev_get_drvdata(dev); - - return sprintf(buf, "%d\n", dax_region->id); -} -static DEVICE_ATTR_RO(id); - -static ssize_t region_size_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct dax_region *dax_region = dev_get_drvdata(dev); - - return sprintf(buf, "%llu\n", (unsigned long long) - resource_size(&dax_region->res)); -} -static struct device_attribute dev_attr_region_size = __ATTR(size, 0444, - region_size_show, NULL); - -static ssize_t align_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct dax_region *dax_region = dev_get_drvdata(dev); - - return sprintf(buf, "%u\n", dax_region->align); -} -static DEVICE_ATTR_RO(align); - -static struct attribute *dax_region_attributes[] = { - &dev_attr_region_size.attr, - &dev_attr_align.attr, - &dev_attr_id.attr, - NULL, -}; - -static const struct attribute_group dax_region_attribute_group = { - .name = "dax_region", - .attrs = dax_region_attributes, -}; - -static const struct attribute_group *dax_region_attribute_groups[] = { - &dax_region_attribute_group, - NULL, -}; - -static void dax_region_free(struct kref *kref) -{ - struct dax_region *dax_region; - - dax_region = container_of(kref, struct dax_region, kref); - kfree(dax_region); -} - -void dax_region_put(struct dax_region *dax_region) +static struct dev_dax *ref_to_dev_dax(struct percpu_ref *ref) { - kref_put(&dax_region->kref, dax_region_free); + return container_of(ref, struct dev_dax, ref); } -EXPORT_SYMBOL_GPL(dax_region_put); -static void dax_region_unregister(void *region) +static void dev_dax_percpu_release(struct percpu_ref *ref) { - struct dax_region *dax_region = region; + struct dev_dax *dev_dax = ref_to_dev_dax(ref); - sysfs_remove_groups(&dax_region->dev->kobj, - dax_region_attribute_groups); - dax_region_put(dax_region); + dev_dbg(&dev_dax->dev, "%s\n", __func__); + complete(&dev_dax->cmp); } -struct dax_region *alloc_dax_region(struct device *parent, int region_id, - struct resource *res, unsigned int align, void *addr, - unsigned long pfn_flags) +static void dev_dax_percpu_exit(void *data) { - struct dax_region *dax_region; - - /* - * The DAX core assumes that it can store its private data in - * parent->driver_data. This WARN is a reminder / safeguard for - * developers of device-dax drivers. - */ - if (dev_get_drvdata(parent)) { - dev_WARN(parent, "dax core failed to setup private data\n"); - return NULL; - } - - if (!IS_ALIGNED(res->start, align) - || !IS_ALIGNED(resource_size(res), align)) - return NULL; - - dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL); - if (!dax_region) - return NULL; - - dev_set_drvdata(parent, dax_region); - memcpy(&dax_region->res, res, sizeof(*res)); - dax_region->pfn_flags = pfn_flags; - kref_init(&dax_region->kref); - dax_region->id = region_id; - ida_init(&dax_region->ida); - dax_region->align = align; - dax_region->dev = parent; - dax_region->base = addr; - if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) { - kfree(dax_region); - return NULL; - } + struct percpu_ref *ref = data; + struct dev_dax *dev_dax = ref_to_dev_dax(ref); - kref_get(&dax_region->kref); - if (devm_add_action_or_reset(parent, dax_region_unregister, dax_region)) - return NULL; - return dax_region; + dev_dbg(&dev_dax->dev, "%s\n", __func__); + wait_for_completion(&dev_dax->cmp); + percpu_ref_exit(ref); } -EXPORT_SYMBOL_GPL(alloc_dax_region); -static struct dev_dax *to_dev_dax(struct device *dev) +static void dev_dax_percpu_kill(struct percpu_ref *data) { - return container_of(dev, struct dev_dax, dev); -} - -static ssize_t size_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct dev_dax *dev_dax = to_dev_dax(dev); - unsigned long long size = 0; - int i; + struct percpu_ref *ref = data; + struct dev_dax *dev_dax = ref_to_dev_dax(ref); - for (i = 0; i < dev_dax->num_resources; i++) - size += resource_size(&dev_dax->res[i]); - - return sprintf(buf, "%llu\n", size); + dev_dbg(&dev_dax->dev, "%s\n", __func__); + percpu_ref_kill(ref); } -static DEVICE_ATTR_RO(size); - -static struct attribute *dev_dax_attributes[] = { - &dev_attr_size.attr, - NULL, -}; - -static const struct attribute_group dev_dax_attribute_group = { - .attrs = dev_dax_attributes, -}; - -static const struct attribute_group *dax_attribute_groups[] = { - &dev_dax_attribute_group, - NULL, -}; static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma, const char *func) @@ -226,21 +95,11 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma, __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff, unsigned long size) { - struct resource *res; - /* gcc-4.6.3-nolibc for i386 complains that this is uninitialized */ - phys_addr_t uninitialized_var(phys); - int i; - - for (i = 0; i < dev_dax->num_resources; i++) { - res = &dev_dax->res[i]; - phys = pgoff * PAGE_SIZE + res->start; - if (phys >= res->start && phys <= res->end) - break; - pgoff -= PHYS_PFN(resource_size(res)); - } + struct resource *res = &dev_dax->region->res; + phys_addr_t phys; - if (i < dev_dax->num_resources) { - res = &dev_dax->res[i]; + phys = pgoff * PAGE_SIZE + res->start; + if (phys >= res->start && phys <= res->end) { if (phys + size - 1 <= res->end) return phys; } @@ -325,8 +184,7 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax, *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); - return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, *pfn, - vmf->flags & FAULT_FLAG_WRITE); + return vmf_insert_pfn_pmd(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE); } #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD @@ -376,8 +234,7 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax, *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); - return vmf_insert_pfn_pud(vmf->vma, vmf->address, vmf->pud, *pfn, - vmf->flags & FAULT_FLAG_WRITE); + return vmf_insert_pfn_pud(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE); } #else static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax, @@ -576,152 +433,100 @@ static const struct file_operations dax_fops = { .mmap_supported_flags = MAP_SYNC, }; -static void dev_dax_release(struct device *dev) +static void dev_dax_cdev_del(void *cdev) { - struct dev_dax *dev_dax = to_dev_dax(dev); - struct dax_region *dax_region = dev_dax->region; - struct dax_device *dax_dev = dev_dax->dax_dev; - - if (dev_dax->id >= 0) - ida_simple_remove(&dax_region->ida, dev_dax->id); - dax_region_put(dax_region); - put_dax(dax_dev); - kfree(dev_dax); + cdev_del(cdev); } -static void kill_dev_dax(struct dev_dax *dev_dax) +static void dev_dax_kill(void *dev_dax) { - struct dax_device *dax_dev = dev_dax->dax_dev; - struct inode *inode = dax_inode(dax_dev); - - kill_dax(dax_dev); - unmap_mapping_range(inode->i_mapping, 0, 0, 1); + kill_dev_dax(dev_dax); } -static void unregister_dev_dax(void *dev) +int dev_dax_probe(struct device *dev) { struct dev_dax *dev_dax = to_dev_dax(dev); struct dax_device *dax_dev = dev_dax->dax_dev; - struct inode *inode = dax_inode(dax_dev); - struct cdev *cdev = inode->i_cdev; - - dev_dbg(dev, "trace\n"); - - kill_dev_dax(dev_dax); - cdev_device_del(cdev, dev); - put_device(dev); -} - -struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region, - int id, struct resource *res, int count) -{ - struct device *parent = dax_region->dev; - struct dax_device *dax_dev; - struct dev_dax *dev_dax; + struct resource *res = &dev_dax->region->res; struct inode *inode; - struct device *dev; struct cdev *cdev; - int rc, i; - - if (!count) - return ERR_PTR(-EINVAL); - - dev_dax = kzalloc(struct_size(dev_dax, res, count), GFP_KERNEL); - if (!dev_dax) - return ERR_PTR(-ENOMEM); - - for (i = 0; i < count; i++) { - if (!IS_ALIGNED(res[i].start, dax_region->align) - || !IS_ALIGNED(resource_size(&res[i]), - dax_region->align)) { - rc = -EINVAL; - break; - } - dev_dax->res[i].start = res[i].start; - dev_dax->res[i].end = res[i].end; + void *addr; + int rc; + + /* 1:1 map region resource range to device-dax instance range */ + if (!devm_request_mem_region(dev, res->start, resource_size(res), + dev_name(dev))) { + dev_warn(dev, "could not reserve region %pR\n", res); + return -EBUSY; } - if (i < count) - goto err_id; + init_completion(&dev_dax->cmp); + rc = percpu_ref_init(&dev_dax->ref, dev_dax_percpu_release, 0, + GFP_KERNEL); + if (rc) + return rc; - if (id < 0) { - id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL); - dev_dax->id = id; - if (id < 0) { - rc = id; - goto err_id; - } - } else { - /* region provider owns @id lifetime */ - dev_dax->id = -1; - } + rc = devm_add_action_or_reset(dev, dev_dax_percpu_exit, &dev_dax->ref); + if (rc) + return rc; - /* - * No 'host' or dax_operations since there is no access to this - * device outside of mmap of the resulting character device. - */ - dax_dev = alloc_dax(dev_dax, NULL, NULL); - if (!dax_dev) { - rc = -ENOMEM; - goto err_dax; + dev_dax->pgmap.ref = &dev_dax->ref; + dev_dax->pgmap.kill = dev_dax_percpu_kill; + addr = devm_memremap_pages(dev, &dev_dax->pgmap); + if (IS_ERR(addr)) { + devm_remove_action(dev, dev_dax_percpu_exit, &dev_dax->ref); + percpu_ref_exit(&dev_dax->ref); + return PTR_ERR(addr); } - /* from here on we're committed to teardown via dax_dev_release() */ - dev = &dev_dax->dev; - device_initialize(dev); - inode = dax_inode(dax_dev); cdev = inode->i_cdev; cdev_init(cdev, &dax_fops); - cdev->owner = parent->driver->owner; - - dev_dax->num_resources = count; - dev_dax->dax_dev = dax_dev; - dev_dax->region = dax_region; - kref_get(&dax_region->kref); - - dev->devt = inode->i_rdev; - dev->class = dax_class; - dev->parent = parent; - dev->groups = dax_attribute_groups; - dev->release = dev_dax_release; - dev_set_name(dev, "dax%d.%d", dax_region->id, id); - - rc = cdev_device_add(cdev, dev); - if (rc) { - kill_dev_dax(dev_dax); - put_device(dev); - return ERR_PTR(rc); - } - - rc = devm_add_action_or_reset(dax_region->dev, unregister_dev_dax, dev); + if (dev->class) { + /* for the CONFIG_DEV_DAX_PMEM_COMPAT case */ + cdev->owner = dev->parent->driver->owner; + } else + cdev->owner = dev->driver->owner; + cdev_set_parent(cdev, &dev->kobj); + rc = cdev_add(cdev, dev->devt, 1); if (rc) - return ERR_PTR(rc); + return rc; - return dev_dax; + rc = devm_add_action_or_reset(dev, dev_dax_cdev_del, cdev); + if (rc) + return rc; - err_dax: - if (dev_dax->id >= 0) - ida_simple_remove(&dax_region->ida, dev_dax->id); - err_id: - kfree(dev_dax); + run_dax(dax_dev); + return devm_add_action_or_reset(dev, dev_dax_kill, dev_dax); +} +EXPORT_SYMBOL_GPL(dev_dax_probe); - return ERR_PTR(rc); +static int dev_dax_remove(struct device *dev) +{ + /* all probe actions are unwound by devm */ + return 0; } -EXPORT_SYMBOL_GPL(devm_create_dev_dax); + +static struct dax_device_driver device_dax_driver = { + .drv = { + .probe = dev_dax_probe, + .remove = dev_dax_remove, + }, + .match_always = 1, +}; static int __init dax_init(void) { - dax_class = class_create(THIS_MODULE, "dax"); - return PTR_ERR_OR_ZERO(dax_class); + return dax_driver_register(&device_dax_driver); } static void __exit dax_exit(void) { - class_destroy(dax_class); + dax_driver_unregister(&device_dax_driver); } MODULE_AUTHOR("Intel Corporation"); MODULE_LICENSE("GPL v2"); -subsys_initcall(dax_init); +module_init(dax_init); module_exit(dax_exit); +MODULE_ALIAS_DAX_DEVICE(0); diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c new file mode 100644 index 0000000000000000000000000000000000000000..4c0131857133d646f207305283a4014c4e3a3a58 --- /dev/null +++ b/drivers/dax/kmem.c @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2016-2019 Intel Corporation. All rights reserved. */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "dax-private.h" +#include "bus.h" + +int dev_dax_kmem_probe(struct device *dev) +{ + struct dev_dax *dev_dax = to_dev_dax(dev); + struct resource *res = &dev_dax->region->res; + resource_size_t kmem_start; + resource_size_t kmem_size; + resource_size_t kmem_end; + struct resource *new_res; + int numa_node; + int rc; + + /* + * Ensure good NUMA information for the persistent memory. + * Without this check, there is a risk that slow memory + * could be mixed in a node with faster memory, causing + * unavoidable performance issues. + */ + numa_node = dev_dax->target_node; + if (numa_node < 0) { + dev_warn(dev, "rejecting DAX region %pR with invalid node: %d\n", + res, numa_node); + return -EINVAL; + } + + /* Hotplug starting at the beginning of the next block: */ + kmem_start = ALIGN(res->start, memory_block_size_bytes()); + + kmem_size = resource_size(res); + /* Adjust the size down to compensate for moving up kmem_start: */ + kmem_size -= kmem_start - res->start; + /* Align the size down to cover only complete blocks: */ + kmem_size &= ~(memory_block_size_bytes() - 1); + kmem_end = kmem_start + kmem_size; + + /* Region is permanently reserved. Hot-remove not yet implemented. */ + new_res = request_mem_region(kmem_start, kmem_size, dev_name(dev)); + if (!new_res) { + dev_warn(dev, "could not reserve region [%pa-%pa]\n", + &kmem_start, &kmem_end); + return -EBUSY; + } + + /* + * Set flags appropriate for System RAM. Leave ..._BUSY clear + * so that add_memory() can add a child resource. Do not + * inherit flags from the parent since it may set new flags + * unknown to us that will break add_memory() below. + */ + new_res->flags = IORESOURCE_SYSTEM_RAM; + new_res->name = dev_name(dev); + + rc = add_memory(numa_node, new_res->start, resource_size(new_res)); + if (rc) { + release_resource(new_res); + kfree(new_res); + return rc; + } + + return 0; +} + +static int dev_dax_kmem_remove(struct device *dev) +{ + /* + * Purposely leak the request_mem_region() for the device-dax + * range and return '0' to ->remove() attempts. The removal of + * the device from the driver always succeeds, but the region + * is permanently pinned as reserved by the unreleased + * request_mem_region(). + */ + return 0; +} + +static struct dax_device_driver device_dax_kmem_driver = { + .drv = { + .probe = dev_dax_kmem_probe, + .remove = dev_dax_kmem_remove, + }, +}; + +static int __init dax_kmem_init(void) +{ + return dax_driver_register(&device_dax_kmem_driver); +} + +static void __exit dax_kmem_exit(void) +{ + dax_driver_unregister(&device_dax_kmem_driver); +} + +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("GPL v2"); +module_init(dax_kmem_init); +module_exit(dax_kmem_exit); +MODULE_ALIAS_DAX_DEVICE(0); diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c deleted file mode 100644 index 99e2aace8078c87fde81019ccd3380fcccc666ec..0000000000000000000000000000000000000000 --- a/drivers/dax/pmem.c +++ /dev/null @@ -1,161 +0,0 @@ -/* - * Copyright(c) 2016 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#include -#include -#include -#include -#include "../nvdimm/pfn.h" -#include "../nvdimm/nd.h" -#include "device-dax.h" - -struct dax_pmem { - struct device *dev; - struct percpu_ref ref; - struct dev_pagemap pgmap; - struct completion cmp; -}; - -static struct dax_pmem *to_dax_pmem(struct percpu_ref *ref) -{ - return container_of(ref, struct dax_pmem, ref); -} - -static void dax_pmem_percpu_release(struct percpu_ref *ref) -{ - struct dax_pmem *dax_pmem = to_dax_pmem(ref); - - dev_dbg(dax_pmem->dev, "trace\n"); - complete(&dax_pmem->cmp); -} - -static void dax_pmem_percpu_exit(void *data) -{ - struct percpu_ref *ref = data; - struct dax_pmem *dax_pmem = to_dax_pmem(ref); - - dev_dbg(dax_pmem->dev, "trace\n"); - wait_for_completion(&dax_pmem->cmp); - percpu_ref_exit(ref); -} - -static void dax_pmem_percpu_kill(void *data) -{ - struct percpu_ref *ref = data; - struct dax_pmem *dax_pmem = to_dax_pmem(ref); - - dev_dbg(dax_pmem->dev, "trace\n"); - percpu_ref_kill(ref); -} - -static int dax_pmem_probe(struct device *dev) -{ - void *addr; - struct resource res; - int rc, id, region_id; - struct nd_pfn_sb *pfn_sb; - struct dev_dax *dev_dax; - struct dax_pmem *dax_pmem; - struct nd_namespace_io *nsio; - struct dax_region *dax_region; - struct nd_namespace_common *ndns; - struct nd_dax *nd_dax = to_nd_dax(dev); - struct nd_pfn *nd_pfn = &nd_dax->nd_pfn; - - ndns = nvdimm_namespace_common_probe(dev); - if (IS_ERR(ndns)) - return PTR_ERR(ndns); - nsio = to_nd_namespace_io(&ndns->dev); - - dax_pmem = devm_kzalloc(dev, sizeof(*dax_pmem), GFP_KERNEL); - if (!dax_pmem) - return -ENOMEM; - - /* parse the 'pfn' info block via ->rw_bytes */ - rc = devm_nsio_enable(dev, nsio); - if (rc) - return rc; - rc = nvdimm_setup_pfn(nd_pfn, &dax_pmem->pgmap); - if (rc) - return rc; - devm_nsio_disable(dev, nsio); - - pfn_sb = nd_pfn->pfn_sb; - - if (!devm_request_mem_region(dev, nsio->res.start, - resource_size(&nsio->res), - dev_name(&ndns->dev))) { - dev_warn(dev, "could not reserve region %pR\n", &nsio->res); - return -EBUSY; - } - - dax_pmem->dev = dev; - init_completion(&dax_pmem->cmp); - rc = percpu_ref_init(&dax_pmem->ref, dax_pmem_percpu_release, 0, - GFP_KERNEL); - if (rc) - return rc; - - rc = devm_add_action(dev, dax_pmem_percpu_exit, &dax_pmem->ref); - if (rc) { - percpu_ref_exit(&dax_pmem->ref); - return rc; - } - - dax_pmem->pgmap.ref = &dax_pmem->ref; - addr = devm_memremap_pages(dev, &dax_pmem->pgmap); - if (IS_ERR(addr)) { - devm_remove_action(dev, dax_pmem_percpu_exit, &dax_pmem->ref); - percpu_ref_exit(&dax_pmem->ref); - return PTR_ERR(addr); - } - - rc = devm_add_action_or_reset(dev, dax_pmem_percpu_kill, - &dax_pmem->ref); - if (rc) - return rc; - - /* adjust the dax_region resource to the start of data */ - memcpy(&res, &dax_pmem->pgmap.res, sizeof(res)); - res.start += le64_to_cpu(pfn_sb->dataoff); - - rc = sscanf(dev_name(&ndns->dev), "namespace%d.%d", ®ion_id, &id); - if (rc != 2) - return -EINVAL; - - dax_region = alloc_dax_region(dev, region_id, &res, - le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP); - if (!dax_region) - return -ENOMEM; - - /* TODO: support for subdividing a dax region... */ - dev_dax = devm_create_dev_dax(dax_region, id, &res, 1); - - /* child dev_dax instances now own the lifetime of the dax_region */ - dax_region_put(dax_region); - - return PTR_ERR_OR_ZERO(dev_dax); -} - -static struct nd_device_driver dax_pmem_driver = { - .probe = dax_pmem_probe, - .drv = { - .name = "dax_pmem", - }, - .type = ND_DRIVER_DAX_PMEM, -}; - -module_nd_driver(dax_pmem_driver); - -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Intel Corporation"); -MODULE_ALIAS_ND_DEVICE(ND_DEVICE_DAX_PMEM); diff --git a/drivers/dax/pmem/Makefile b/drivers/dax/pmem/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..e2e79bd3fdcf91c6d35613757c75c933c3ebd950 --- /dev/null +++ b/drivers/dax/pmem/Makefile @@ -0,0 +1,7 @@ +obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem.o +obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem_core.o +obj-$(CONFIG_DEV_DAX_PMEM_COMPAT) += dax_pmem_compat.o + +dax_pmem-y := pmem.o +dax_pmem_core-y := core.o +dax_pmem_compat-y := compat.o diff --git a/drivers/dax/pmem/compat.c b/drivers/dax/pmem/compat.c new file mode 100644 index 0000000000000000000000000000000000000000..d7b15e6f30c5b3f696bb593980abe84f9b1e7017 --- /dev/null +++ b/drivers/dax/pmem/compat.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. */ +#include +#include +#include +#include +#include +#include "../bus.h" + +/* we need the private definitions to implement compat suport */ +#include "../dax-private.h" + +static int dax_pmem_compat_probe(struct device *dev) +{ + struct dev_dax *dev_dax = __dax_pmem_probe(dev, DEV_DAX_CLASS); + int rc; + + if (IS_ERR(dev_dax)) + return PTR_ERR(dev_dax); + + if (!devres_open_group(&dev_dax->dev, dev_dax, GFP_KERNEL)) + return -ENOMEM; + + device_lock(&dev_dax->dev); + rc = dev_dax_probe(&dev_dax->dev); + device_unlock(&dev_dax->dev); + + devres_close_group(&dev_dax->dev, dev_dax); + if (rc) + devres_release_group(&dev_dax->dev, dev_dax); + + return rc; +} + +static int dax_pmem_compat_release(struct device *dev, void *data) +{ + device_lock(dev); + devres_release_group(dev, to_dev_dax(dev)); + device_unlock(dev); + + return 0; +} + +static int dax_pmem_compat_remove(struct device *dev) +{ + device_for_each_child(dev, NULL, dax_pmem_compat_release); + return 0; +} + +static struct nd_device_driver dax_pmem_compat_driver = { + .probe = dax_pmem_compat_probe, + .remove = dax_pmem_compat_remove, + .drv = { + .name = "dax_pmem_compat", + }, + .type = ND_DRIVER_DAX_PMEM, +}; + +static int __init dax_pmem_compat_init(void) +{ + return nd_driver_register(&dax_pmem_compat_driver); +} +module_init(dax_pmem_compat_init); + +static void __exit dax_pmem_compat_exit(void) +{ + driver_unregister(&dax_pmem_compat_driver.drv); +} +module_exit(dax_pmem_compat_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Intel Corporation"); +MODULE_ALIAS_ND_DEVICE(ND_DEVICE_DAX_PMEM); diff --git a/drivers/dax/pmem/core.c b/drivers/dax/pmem/core.c new file mode 100644 index 0000000000000000000000000000000000000000..f71019ce06470019caff8207d7c1fb566206f9eb --- /dev/null +++ b/drivers/dax/pmem/core.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. */ +#include +#include +#include +#include "../../nvdimm/pfn.h" +#include "../../nvdimm/nd.h" +#include "../bus.h" + +struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys) +{ + struct resource res; + int rc, id, region_id; + resource_size_t offset; + struct nd_pfn_sb *pfn_sb; + struct dev_dax *dev_dax; + struct nd_namespace_io *nsio; + struct dax_region *dax_region; + struct dev_pagemap pgmap = { 0 }; + struct nd_namespace_common *ndns; + struct nd_dax *nd_dax = to_nd_dax(dev); + struct nd_pfn *nd_pfn = &nd_dax->nd_pfn; + struct nd_region *nd_region = to_nd_region(dev->parent); + + ndns = nvdimm_namespace_common_probe(dev); + if (IS_ERR(ndns)) + return ERR_CAST(ndns); + nsio = to_nd_namespace_io(&ndns->dev); + + /* parse the 'pfn' info block via ->rw_bytes */ + rc = devm_nsio_enable(dev, nsio); + if (rc) + return ERR_PTR(rc); + rc = nvdimm_setup_pfn(nd_pfn, &pgmap); + if (rc) + return ERR_PTR(rc); + devm_nsio_disable(dev, nsio); + + /* reserve the metadata area, device-dax will reserve the data */ + pfn_sb = nd_pfn->pfn_sb; + offset = le64_to_cpu(pfn_sb->dataoff); + if (!devm_request_mem_region(dev, nsio->res.start, offset, + dev_name(&ndns->dev))) { + dev_warn(dev, "could not reserve metadata\n"); + return ERR_PTR(-EBUSY); + } + + rc = sscanf(dev_name(&ndns->dev), "namespace%d.%d", ®ion_id, &id); + if (rc != 2) + return ERR_PTR(-EINVAL); + + /* adjust the dax_region resource to the start of data */ + memcpy(&res, &pgmap.res, sizeof(res)); + res.start += offset; + dax_region = alloc_dax_region(dev, region_id, &res, + nd_region->target_node, le32_to_cpu(pfn_sb->align), + PFN_DEV|PFN_MAP); + if (!dax_region) + return ERR_PTR(-ENOMEM); + + dev_dax = __devm_create_dev_dax(dax_region, id, &pgmap, subsys); + + /* child dev_dax instances now own the lifetime of the dax_region */ + dax_region_put(dax_region); + + return dev_dax; +} +EXPORT_SYMBOL_GPL(__dax_pmem_probe); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Intel Corporation"); diff --git a/drivers/dax/pmem/pmem.c b/drivers/dax/pmem/pmem.c new file mode 100644 index 0000000000000000000000000000000000000000..0ae4238a0ef88dd9be32dcea7bef9b2abd8a8331 --- /dev/null +++ b/drivers/dax/pmem/pmem.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. */ +#include +#include +#include +#include +#include +#include "../bus.h" + +static int dax_pmem_probe(struct device *dev) +{ + return PTR_ERR_OR_ZERO(__dax_pmem_probe(dev, DEV_DAX_BUS)); +} + +static struct nd_device_driver dax_pmem_driver = { + .probe = dax_pmem_probe, + .drv = { + .name = "dax_pmem", + }, + .type = ND_DRIVER_DAX_PMEM, +}; + +static int __init dax_pmem_init(void) +{ + return nd_driver_register(&dax_pmem_driver); +} +module_init(dax_pmem_init); + +static void __exit dax_pmem_exit(void) +{ + driver_unregister(&dax_pmem_driver.drv); +} +module_exit(dax_pmem_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Intel Corporation"); +#if !IS_ENABLED(CONFIG_DEV_DAX_PMEM_COMPAT) +/* For compat builds, don't load this module by default */ +MODULE_ALIAS_ND_DEVICE(ND_DEVICE_DAX_PMEM); +#endif diff --git a/drivers/dax/super.c b/drivers/dax/super.c index 6e928f37d08429defdcecf71d1bf35c0ef5b613e..a5588c00c8dd060f166d971e9b107a054ca3d44e 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -22,6 +22,7 @@ #include #include #include +#include "dax-private.h" static dev_t dax_devt; DEFINE_STATIC_SRCU(dax_srcu); @@ -365,11 +366,15 @@ void kill_dax(struct dax_device *dax_dev) spin_lock(&dax_host_lock); hlist_del_init(&dax_dev->list); spin_unlock(&dax_host_lock); - - dax_dev->private = NULL; } EXPORT_SYMBOL_GPL(kill_dax); +void run_dax(struct dax_device *dax_dev) +{ + set_bit(DAXDEV_ALIVE, &dax_dev->flags); +} +EXPORT_SYMBOL_GPL(run_dax); + static struct inode *dax_alloc_inode(struct super_block *sb) { struct dax_device *dax_dev; @@ -584,6 +589,8 @@ EXPORT_SYMBOL_GPL(dax_inode); void *dax_get_private(struct dax_device *dax_dev) { + if (!test_bit(DAXDEV_ALIVE, &dax_dev->flags)) + return NULL; return dax_dev->private; } EXPORT_SYMBOL_GPL(dax_get_private); @@ -597,7 +604,7 @@ static void init_once(void *_dax_dev) inode_init_once(inode); } -static int __dax_fs_init(void) +static int dax_fs_init(void) { int rc; @@ -629,35 +636,46 @@ static int __dax_fs_init(void) return rc; } -static void __dax_fs_exit(void) +static void dax_fs_exit(void) { kern_unmount(dax_mnt); unregister_filesystem(&dax_fs_type); kmem_cache_destroy(dax_cache); } -static int __init dax_fs_init(void) +static int __init dax_core_init(void) { int rc; - rc = __dax_fs_init(); + rc = dax_fs_init(); if (rc) return rc; rc = alloc_chrdev_region(&dax_devt, 0, MINORMASK+1, "dax"); if (rc) - __dax_fs_exit(); - return rc; + goto err_chrdev; + + rc = dax_bus_init(); + if (rc) + goto err_bus; + return 0; + +err_bus: + unregister_chrdev_region(dax_devt, MINORMASK+1); +err_chrdev: + dax_fs_exit(); + return 0; } -static void __exit dax_fs_exit(void) +static void __exit dax_core_exit(void) { + dax_bus_exit(); unregister_chrdev_region(dax_devt, MINORMASK+1); ida_destroy(&dax_minor_ida); - __dax_fs_exit(); + dax_fs_exit(); } MODULE_AUTHOR("Intel Corporation"); MODULE_LICENSE("GPL v2"); -subsys_initcall(dax_fs_init); -module_exit(dax_fs_exit); +subsys_initcall(dax_core_init); +module_exit(dax_core_exit); diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 4c49bb1330b527ecb443536f6d1109f1dee2aa66..57589022d45e5dff72d77ff2b456214890bb4105 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -11,6 +11,7 @@ */ #include +#include #include #include #include @@ -162,6 +163,7 @@ int devfreq_update_status(struct devfreq *devfreq, unsigned long freq) int lev, prev_lev, ret = 0; unsigned long cur_time; + lockdep_assert_held(&devfreq->lock); cur_time = jiffies; /* Immediately exit if previous_freq is not initialized yet. */ @@ -221,6 +223,49 @@ static struct devfreq_governor *find_devfreq_governor(const char *name) return ERR_PTR(-ENODEV); } +/** + * try_then_request_governor() - Try to find the governor and request the + * module if is not found. + * @name: name of the governor + * + * Search the list of devfreq governors and request the module and try again + * if is not found. This can happen when both drivers (the governor driver + * and the driver that call devfreq_add_device) are built as modules. + * devfreq_list_lock should be held by the caller. Returns the matched + * governor's pointer or an error pointer. + */ +static struct devfreq_governor *try_then_request_governor(const char *name) +{ + struct devfreq_governor *governor; + int err = 0; + + if (IS_ERR_OR_NULL(name)) { + pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); + return ERR_PTR(-EINVAL); + } + WARN(!mutex_is_locked(&devfreq_list_lock), + "devfreq_list_lock must be locked."); + + governor = find_devfreq_governor(name); + if (IS_ERR(governor)) { + mutex_unlock(&devfreq_list_lock); + + if (!strncmp(name, DEVFREQ_GOV_SIMPLE_ONDEMAND, + DEVFREQ_NAME_LEN)) + err = request_module("governor_%s", "simpleondemand"); + else + err = request_module("governor_%s", name); + /* Restore previous state before return */ + mutex_lock(&devfreq_list_lock); + if (err) + return (err < 0) ? ERR_PTR(err) : ERR_PTR(-EINVAL); + + governor = find_devfreq_governor(name); + } + + return governor; +} + static int devfreq_notify_transition(struct devfreq *devfreq, struct devfreq_freqs *freqs, unsigned int state) { @@ -283,11 +328,11 @@ int update_devfreq(struct devfreq *devfreq) max_freq = MIN(devfreq->scaling_max_freq, devfreq->max_freq); min_freq = MAX(devfreq->scaling_min_freq, devfreq->min_freq); - if (min_freq && freq < min_freq) { + if (freq < min_freq) { freq = min_freq; flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */ } - if (max_freq && freq > max_freq) { + if (freq > max_freq) { freq = max_freq; flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */ } @@ -336,8 +381,13 @@ static void devfreq_monitor(struct work_struct *work) if (err) dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err); + if (devfreq->stop_polling) + goto out; + queue_delayed_work(devfreq_wq, &devfreq->work, msecs_to_jiffies(devfreq->profile->polling_ms)); + +out: mutex_unlock(&devfreq->lock); } @@ -352,10 +402,18 @@ static void devfreq_monitor(struct work_struct *work) */ void devfreq_monitor_start(struct devfreq *devfreq) { + mutex_lock(&devfreq->lock); + if (delayed_work_pending(&devfreq->work)) + goto out; + INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor); if (devfreq->profile->polling_ms) queue_delayed_work(devfreq_wq, &devfreq->work, msecs_to_jiffies(devfreq->profile->polling_ms)); + +out: + devfreq->stop_polling = false; + mutex_unlock(&devfreq->lock); } EXPORT_SYMBOL(devfreq_monitor_start); @@ -369,6 +427,14 @@ EXPORT_SYMBOL(devfreq_monitor_start); */ void devfreq_monitor_stop(struct devfreq *devfreq) { + mutex_lock(&devfreq->lock); + if (devfreq->stop_polling) { + mutex_unlock(&devfreq->lock); + return; + } + + devfreq->stop_polling = true; + mutex_unlock(&devfreq->lock); cancel_delayed_work_sync(&devfreq->work); } EXPORT_SYMBOL(devfreq_monitor_stop); @@ -493,26 +559,30 @@ static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type, void *devp) { struct devfreq *devfreq = container_of(nb, struct devfreq, nb); - int ret; + int err = -EINVAL; mutex_lock(&devfreq->lock); devfreq->scaling_min_freq = find_available_min_freq(devfreq); - if (!devfreq->scaling_min_freq) { - mutex_unlock(&devfreq->lock); - return -EINVAL; - } + if (!devfreq->scaling_min_freq) + goto out; devfreq->scaling_max_freq = find_available_max_freq(devfreq); if (!devfreq->scaling_max_freq) { - mutex_unlock(&devfreq->lock); - return -EINVAL; + devfreq->scaling_max_freq = ULONG_MAX; + goto out; } - ret = update_devfreq(devfreq); + err = update_devfreq(devfreq); + +out: mutex_unlock(&devfreq->lock); + if (err) + dev_err(devfreq->dev.parent, + "failed to update frequency from OPP notifier (%d)\n", + err); - return ret; + return NOTIFY_OK; } /** @@ -526,18 +596,9 @@ static void devfreq_dev_release(struct device *dev) struct devfreq *devfreq = to_devfreq(dev); mutex_lock(&devfreq_list_lock); - if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) { - mutex_unlock(&devfreq_list_lock); - dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n"); - return; - } list_del(&devfreq->node); mutex_unlock(&devfreq_list_lock); - if (devfreq->governor) - devfreq->governor->event_handler(devfreq, - DEVFREQ_GOV_STOP, NULL); - if (devfreq->profile->exit) devfreq->profile->exit(devfreq->dev.parent); @@ -589,6 +650,7 @@ struct devfreq *devfreq_add_device(struct device *dev, devfreq->dev.parent = dev; devfreq->dev.class = devfreq_class; devfreq->dev.release = devfreq_dev_release; + INIT_LIST_HEAD(&devfreq->node); devfreq->profile = profile; strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN); devfreq->previous_freq = profile->initial_freq; @@ -646,9 +708,8 @@ struct devfreq *devfreq_add_device(struct device *dev, mutex_unlock(&devfreq->lock); mutex_lock(&devfreq_list_lock); - list_add(&devfreq->node, &devfreq_list); - governor = find_devfreq_governor(devfreq->governor_name); + governor = try_then_request_governor(devfreq->governor_name); if (IS_ERR(governor)) { dev_err(dev, "%s: Unable to find governor for the device\n", __func__); @@ -664,15 +725,17 @@ struct devfreq *devfreq_add_device(struct device *dev, __func__); goto err_init; } + + list_add(&devfreq->node, &devfreq_list); + mutex_unlock(&devfreq_list_lock); return devfreq; err_init: - list_del(&devfreq->node); mutex_unlock(&devfreq_list_lock); - device_unregister(&devfreq->dev); + devfreq_remove_device(devfreq); devfreq = NULL; err_dev: if (devfreq) @@ -693,6 +756,9 @@ int devfreq_remove_device(struct devfreq *devfreq) if (!devfreq) return -EINVAL; + if (devfreq->governor) + devfreq->governor->event_handler(devfreq, + DEVFREQ_GOV_STOP, NULL); device_unregister(&devfreq->dev); return 0; @@ -991,7 +1057,7 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr, return -EINVAL; mutex_lock(&devfreq_list_lock); - governor = find_devfreq_governor(str_governor); + governor = try_then_request_governor(str_governor); if (IS_ERR(governor)) { ret = PTR_ERR(governor); goto out; @@ -1041,7 +1107,7 @@ static ssize_t available_governors_show(struct device *d, * The devfreq with immutable governor (e.g., passive) shows * only own governor. */ - if (df->governor->immutable) { + if (df->governor && df->governor->immutable) { count = scnprintf(&buf[count], DEVFREQ_NAME_LEN, "%s ", df->governor_name); /* @@ -1126,17 +1192,26 @@ static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr, struct devfreq *df = to_devfreq(dev); unsigned long value; int ret; - unsigned long max; ret = sscanf(buf, "%lu", &value); if (ret != 1) return -EINVAL; mutex_lock(&df->lock); - max = df->max_freq; - if (value && max && value > max) { - ret = -EINVAL; - goto unlock; + + if (value) { + if (value > df->max_freq) { + ret = -EINVAL; + goto unlock; + } + } else { + unsigned long *freq_table = df->profile->freq_table; + + /* Get minimum frequency according to sorting order */ + if (freq_table[0] < freq_table[df->profile->max_state - 1]) + value = freq_table[0]; + else + value = freq_table[df->profile->max_state - 1]; } df->min_freq = value; @@ -1161,17 +1236,26 @@ static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr, struct devfreq *df = to_devfreq(dev); unsigned long value; int ret; - unsigned long min; ret = sscanf(buf, "%lu", &value); if (ret != 1) return -EINVAL; mutex_lock(&df->lock); - min = df->min_freq; - if (value && min && value < min) { - ret = -EINVAL; - goto unlock; + + if (value) { + if (value < df->min_freq) { + ret = -EINVAL; + goto unlock; + } + } else { + unsigned long *freq_table = df->profile->freq_table; + + /* Get maximum frequency according to sorting order */ + if (freq_table[0] < freq_table[df->profile->max_state - 1]) + value = freq_table[df->profile->max_state - 1]; + else + value = freq_table[0]; } df->max_freq = value; @@ -1225,12 +1309,17 @@ static ssize_t trans_stat_show(struct device *dev, int i, j; unsigned int max_state = devfreq->profile->max_state; - if (!devfreq->stop_polling && - devfreq_update_status(devfreq, devfreq->previous_freq)) - return 0; if (max_state == 0) return sprintf(buf, "Not Supported.\n"); + mutex_lock(&devfreq->lock); + if (!devfreq->stop_polling && + devfreq_update_status(devfreq, devfreq->previous_freq)) { + mutex_unlock(&devfreq->lock); + return 0; + } + mutex_unlock(&devfreq->lock); + len = sprintf(buf, " From : To\n"); len += sprintf(buf + len, " :"); for (i = 0; i < max_state; i++) diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c index c25658b265988b4fdd2a3f4304fdbfab83b7b9f3..24a9658348d7861470354af7246ae632f25815cf 100644 --- a/drivers/devfreq/exynos-bus.c +++ b/drivers/devfreq/exynos-bus.c @@ -194,11 +194,10 @@ static void exynos_bus_exit(struct device *dev) if (ret < 0) dev_warn(dev, "failed to disable the devfreq-event devices\n"); - if (bus->regulator) - regulator_disable(bus->regulator); - dev_pm_opp_of_remove_table(dev); clk_disable_unprepare(bus->clk); + if (bus->regulator) + regulator_disable(bus->regulator); } /* @@ -386,6 +385,7 @@ static int exynos_bus_probe(struct platform_device *pdev) struct exynos_bus *bus; int ret, max_state; unsigned long min_freq, max_freq; + bool passive = false; if (!np) { dev_err(dev, "failed to find devicetree node\n"); @@ -399,27 +399,27 @@ static int exynos_bus_probe(struct platform_device *pdev) bus->dev = &pdev->dev; platform_set_drvdata(pdev, bus); - /* Parse the device-tree to get the resource information */ - ret = exynos_bus_parse_of(np, bus); - if (ret < 0) - return ret; - profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL); - if (!profile) { - ret = -ENOMEM; - goto err; - } + if (!profile) + return -ENOMEM; node = of_parse_phandle(dev->of_node, "devfreq", 0); if (node) { of_node_put(node); - goto passive; + passive = true; } else { ret = exynos_bus_parent_parse_of(np, bus); + if (ret < 0) + return ret; } + /* Parse the device-tree to get the resource information */ + ret = exynos_bus_parse_of(np, bus); if (ret < 0) - goto err; + goto err_reg; + + if (passive) + goto passive; /* Initialize the struct profile and governor data for parent device */ profile->polling_ms = 50; @@ -510,6 +510,9 @@ static int exynos_bus_probe(struct platform_device *pdev) err: dev_pm_opp_of_remove_table(dev); clk_disable_unprepare(bus->clk); +err_reg: + if (!passive) + regulator_disable(bus->regulator); return ret; } diff --git a/drivers/devfreq/governor_passive.c b/drivers/devfreq/governor_passive.c index 3bc29acbd54e85480d375514fd56e2878681fa96..8cfb69749d4984036ee29de4a403361fadbdebdd 100644 --- a/drivers/devfreq/governor_passive.c +++ b/drivers/devfreq/governor_passive.c @@ -152,7 +152,6 @@ static int devfreq_passive_notifier_call(struct notifier_block *nb, static int devfreq_passive_event_handler(struct devfreq *devfreq, unsigned int event, void *data) { - struct device *dev = devfreq->dev.parent; struct devfreq_passive_data *p_data = (struct devfreq_passive_data *)devfreq->data; struct devfreq *parent = (struct devfreq *)p_data->parent; @@ -168,12 +167,12 @@ static int devfreq_passive_event_handler(struct devfreq *devfreq, p_data->this = devfreq; nb->notifier_call = devfreq_passive_notifier_call; - ret = devm_devfreq_register_notifier(dev, parent, nb, + ret = devfreq_register_notifier(parent, nb, DEVFREQ_TRANSITION_NOTIFIER); break; case DEVFREQ_GOV_STOP: - devm_devfreq_unregister_notifier(dev, parent, nb, - DEVFREQ_TRANSITION_NOTIFIER); + WARN_ON(devfreq_unregister_notifier(parent, nb, + DEVFREQ_TRANSITION_NOTIFIER)); break; default: break; diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c index c59d2eee5d3091ce0315092975a2692e1589ce34..06768074d2d822cb0e837e953edb68ddcb02917d 100644 --- a/drivers/devfreq/tegra-devfreq.c +++ b/drivers/devfreq/tegra-devfreq.c @@ -486,11 +486,11 @@ static int tegra_devfreq_target(struct device *dev, unsigned long *freq, { struct tegra_devfreq *tegra = dev_get_drvdata(dev); struct dev_pm_opp *opp; - unsigned long rate = *freq * KHZ; + unsigned long rate; - opp = devfreq_recommended_opp(dev, &rate, flags); + opp = devfreq_recommended_opp(dev, freq, flags); if (IS_ERR(opp)) { - dev_err(dev, "Failed to find opp for %lu KHz\n", *freq); + dev_err(dev, "Failed to find opp for %lu Hz\n", *freq); return PTR_ERR(opp); } rate = dev_pm_opp_get_freq(opp); @@ -499,8 +499,6 @@ static int tegra_devfreq_target(struct device *dev, unsigned long *freq, clk_set_min_rate(tegra->emc_clock, rate); clk_set_rate(tegra->emc_clock, 0); - *freq = rate; - return 0; } @@ -510,7 +508,7 @@ static int tegra_devfreq_get_dev_status(struct device *dev, struct tegra_devfreq *tegra = dev_get_drvdata(dev); struct tegra_devfreq_device *actmon_dev; - stat->current_frequency = tegra->cur_freq; + stat->current_frequency = tegra->cur_freq * KHZ; /* To be used by the tegra governor */ stat->private_data = tegra; @@ -565,7 +563,7 @@ static int tegra_governor_get_target(struct devfreq *devfreq, target_freq = max(target_freq, dev->target_freq); } - *freq = target_freq; + *freq = target_freq * KHZ; return 0; } diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 13884474d1588f7a086154d3b06dbd09ff28e881..69842145c223fd73f8228f37ab668774ac55c112 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -1069,6 +1069,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) fence->ops->get_driver_name(fence), fence->ops->get_timeline_name(fence), dma_fence_is_signaled(fence) ? "" : "un"); + dma_fence_put(fence); } rcu_read_unlock(); diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c index 6c95f61a32e73d54ed70f461e676826075419f45..49ab09468ba15c12ec80e3e34b1656e450f1fa70 100644 --- a/drivers/dma-buf/reservation.c +++ b/drivers/dma-buf/reservation.c @@ -416,6 +416,10 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj, GFP_NOWAIT | __GFP_NOWARN); if (!nshared) { rcu_read_unlock(); + + dma_fence_put(fence_excl); + fence_excl = NULL; + nshared = krealloc(shared, sz, GFP_KERNEL); if (nshared) { shared = nshared; diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index 53c1d6d36a642f04ec49aa0e3cfeabeb84205609..81ba4eb34890952468b877fa2107363adcdc42bb 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -141,17 +141,14 @@ static void timeline_fence_release(struct dma_fence *fence) { struct sync_pt *pt = dma_fence_to_sync_pt(fence); struct sync_timeline *parent = dma_fence_parent(fence); + unsigned long flags; + spin_lock_irqsave(fence->lock, flags); if (!list_empty(&pt->link)) { - unsigned long flags; - - spin_lock_irqsave(fence->lock, flags); - if (!list_empty(&pt->link)) { - list_del(&pt->link); - rb_erase(&pt->node, &parent->pt_tree); - } - spin_unlock_irqrestore(fence->lock, flags); + list_del(&pt->link); + rb_erase(&pt->node, &parent->pt_tree); } + spin_unlock_irqrestore(fence->lock, flags); sync_timeline_put(parent); dma_fence_free(fence); @@ -274,7 +271,8 @@ static struct sync_pt *sync_pt_create(struct sync_timeline *obj, p = &parent->rb_left; } else { if (dma_fence_get_rcu(&other->base)) { - dma_fence_put(&pt->base); + sync_timeline_put(obj); + kfree(pt); pt = other; goto unlock; } diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c index c4c8ecb24aa9b4e9eb233847dd9a7bbfa1cb7fe4..cfe31e52d78d83d714f07f91f80d779e7ad56c19 100644 --- a/drivers/dma-buf/sync_debug.c +++ b/drivers/dma-buf/sync_debug.c @@ -119,12 +119,12 @@ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj) seq_printf(s, "%s: %d\n", obj->name, obj->value); - spin_lock_irq(&obj->lock); + spin_lock(&obj->lock); /* Caller already disabled IRQ. */ list_for_each(pos, &obj->pt_list) { struct sync_pt *pt = container_of(pos, struct sync_pt, link); sync_print_fence(s, &pt->base, false); } - spin_unlock_irq(&obj->lock); + spin_unlock(&obj->lock); } static void sync_print_sync_file(struct seq_file *s, diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index 35dd06479867fad9636db14fc58747ee60dbf198..b0d2563cde5d2ac6277087a00cf409d6640e5b6a 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -220,8 +220,8 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, struct sync_file *b) { struct sync_file *sync_file; - struct dma_fence **fences, **nfences, **a_fences, **b_fences; - int i, i_a, i_b, num_fences, a_num_fences, b_num_fences; + struct dma_fence **fences = NULL, **nfences, **a_fences, **b_fences; + int i = 0, i_a, i_b, num_fences, a_num_fences, b_num_fences; sync_file = sync_file_alloc(); if (!sync_file) @@ -230,7 +230,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, a_fences = get_fences(a, &a_num_fences); b_fences = get_fences(b, &b_num_fences); if (a_num_fences > INT_MAX - b_num_fences) - return NULL; + goto err; num_fences = a_num_fences + b_num_fences; @@ -245,7 +245,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, * If a sync_file can only be created with sync_file_merge * and sync_file_create, this is a reasonable assumption. */ - for (i = i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) { + for (i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) { struct dma_fence *pt_a = a_fences[i_a]; struct dma_fence *pt_b = b_fences[i_b]; @@ -286,15 +286,16 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, fences = nfences; } - if (sync_file_set_fence(sync_file, fences, i) < 0) { - kfree(fences); + if (sync_file_set_fence(sync_file, fences, i) < 0) goto err; - } strlcpy(sync_file->user_name, name, sizeof(sync_file->user_name)); return sync_file; err: + while (i) + dma_fence_put(fences[--i]); + kfree(fences); fput(sync_file->file); return NULL; diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index dacf3f42426de9e54a2c255248e422e313096acc..a4f95574eb9adc9cb4030c81e4f142e34b340642 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -143,7 +143,7 @@ config DMA_JZ4740 config DMA_JZ4780 tristate "JZ4780 DMA support" - depends on MACH_JZ4780 || COMPILE_TEST + depends on MIPS || COMPILE_TEST select DMA_ENGINE select DMA_VIRTUAL_CHANNELS help diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c index 4a748c3435d7d0b5e4ddd5a5a5a70771d9b7ba3c..02149742b334c73e37c8223e1b191b898964a0db 100644 --- a/drivers/dma/acpi-dma.c +++ b/drivers/dma/acpi-dma.c @@ -72,10 +72,14 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp, si = (const struct acpi_csrt_shared_info *)&grp[1]; - /* Match device by MMIO and IRQ */ + /* Match device by MMIO */ if (si->mmio_base_low != lower_32_bits(mem) || - si->mmio_base_high != upper_32_bits(mem) || - si->gsi_interrupt != irq) + si->mmio_base_high != upper_32_bits(mem)) + return 0; + + /* Match device by Linux vIRQ */ + ret = acpi_register_gsi(NULL, si->gsi_interrupt, si->interrupt_mode, si->interrupt_polarity); + if (ret != irq) return 0; dev_dbg(&adev->dev, "matches with %.4s%04X (rev %u)\n", diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 75f38d19fcbed5dac47e4fd8b7d8b78cef228e66..dbc51154f12294b418c72f73459f1a02d25b173a 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -1641,6 +1641,12 @@ static void atc_free_chan_resources(struct dma_chan *chan) atchan->descs_allocated = 0; atchan->status = 0; + /* + * Free atslave allocated in at_dma_xlate() + */ + kfree(chan->private); + chan->private = NULL; + dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); } @@ -1675,7 +1681,7 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); - atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL); + atslave = kzalloc(sizeof(*atslave), GFP_KERNEL); if (!atslave) return NULL; @@ -2000,6 +2006,8 @@ static int at_dma_remove(struct platform_device *pdev) struct resource *io; at_dma_off(atdma); + if (pdev->dev.of_node) + of_dma_controller_free(pdev->dev.of_node); dma_async_device_unregister(&atdma->dma_common); dma_pool_destroy(atdma->memset_pool); diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 4bf72561667c7bd636749f37373477d5a8862f99..7db66f974041e9eb859f7879f38354320503a742 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -203,6 +203,7 @@ struct at_xdmac_chan { u32 save_cim; u32 save_cnda; u32 save_cndc; + u32 irq_status; unsigned long status; struct tasklet_struct tasklet; struct dma_slave_config sconfig; @@ -1580,8 +1581,8 @@ static void at_xdmac_tasklet(unsigned long data) struct at_xdmac_desc *desc; u32 error_mask; - dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n", - __func__, atchan->status); + dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n", + __func__, atchan->irq_status); error_mask = AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS @@ -1589,15 +1590,15 @@ static void at_xdmac_tasklet(unsigned long data) if (at_xdmac_chan_is_cyclic(atchan)) { at_xdmac_handle_cyclic(atchan); - } else if ((atchan->status & AT_XDMAC_CIS_LIS) - || (atchan->status & error_mask)) { + } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS) + || (atchan->irq_status & error_mask)) { struct dma_async_tx_descriptor *txd; - if (atchan->status & AT_XDMAC_CIS_RBEIS) + if (atchan->irq_status & AT_XDMAC_CIS_RBEIS) dev_err(chan2dev(&atchan->chan), "read bus error!!!"); - if (atchan->status & AT_XDMAC_CIS_WBEIS) + if (atchan->irq_status & AT_XDMAC_CIS_WBEIS) dev_err(chan2dev(&atchan->chan), "write bus error!!!"); - if (atchan->status & AT_XDMAC_CIS_ROIS) + if (atchan->irq_status & AT_XDMAC_CIS_ROIS) dev_err(chan2dev(&atchan->chan), "request overflow error!!!"); spin_lock_bh(&atchan->lock); @@ -1605,7 +1606,11 @@ static void at_xdmac_tasklet(unsigned long data) struct at_xdmac_desc, xfer_node); dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); - BUG_ON(!desc->active_xfer); + if (!desc->active_xfer) { + dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting"); + spin_unlock(&atchan->lock); + return; + } txd = &desc->tx_dma_desc; @@ -1652,7 +1657,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id) atchan = &atxdmac->chan[i]; chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM); chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS); - atchan->status = chan_status & chan_imr; + atchan->irq_status = chan_status & chan_imr; dev_vdbg(atxdmac->dma.dev, "%s: chan%d: imr=0x%x, status=0x%x\n", __func__, i, chan_imr, chan_status); @@ -1666,7 +1671,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id) at_xdmac_chan_read(atchan, AT_XDMAC_CDA), at_xdmac_chan_read(atchan, AT_XDMAC_CUBC)); - if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS)) + if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS)) at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); tasklet_schedule(&atchan->tasklet); diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index 847f84a41a692b48a653745052a4d222b262f18d..9d782cc95c6a0589a9db6be6fc495e4a73baa33a 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -415,38 +415,32 @@ static void bcm2835_dma_fill_cb_chain_with_sg( } } -static int bcm2835_dma_abort(void __iomem *chan_base) +static int bcm2835_dma_abort(struct bcm2835_chan *c) { - unsigned long cs; + void __iomem *chan_base = c->chan_base; long int timeout = 10000; - cs = readl(chan_base + BCM2835_DMA_CS); - if (!(cs & BCM2835_DMA_ACTIVE)) + /* + * A zero control block address means the channel is idle. + * (The ACTIVE flag in the CS register is not a reliable indicator.) + */ + if (!readl(chan_base + BCM2835_DMA_ADDR)) return 0; /* Write 0 to the active bit - Pause the DMA */ writel(0, chan_base + BCM2835_DMA_CS); /* Wait for any current AXI transfer to complete */ - while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) { + while ((readl(chan_base + BCM2835_DMA_CS) & + BCM2835_DMA_WAITING_FOR_WRITES) && --timeout) cpu_relax(); - cs = readl(chan_base + BCM2835_DMA_CS); - } - /* We'll un-pause when we set of our next DMA */ + /* Peripheral might be stuck and fail to signal AXI write responses */ if (!timeout) - return -ETIMEDOUT; - - if (!(cs & BCM2835_DMA_ACTIVE)) - return 0; - - /* Terminate the control block chain */ - writel(0, chan_base + BCM2835_DMA_NEXTCB); - - /* Abort the whole DMA */ - writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE, - chan_base + BCM2835_DMA_CS); + dev_err(c->vc.chan.device->dev, + "failed to complete outstanding writes\n"); + writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS); return 0; } @@ -485,8 +479,15 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data) spin_lock_irqsave(&c->vc.lock, flags); - /* Acknowledge interrupt */ - writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS); + /* + * Clear the INT flag to receive further interrupts. Keep the channel + * active in case the descriptor is cyclic or in case the client has + * already terminated the descriptor and issued a new one. (May happen + * if this IRQ handler is threaded.) If the channel is finished, it + * will remain idle despite the ACTIVE flag being set. + */ + writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE, + c->chan_base + BCM2835_DMA_CS); d = c->desc; @@ -494,11 +495,7 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data) if (d->cyclic) { /* call the cyclic callback */ vchan_cyclic_callback(&d->vd); - - /* Keep the DMA engine running */ - writel(BCM2835_DMA_ACTIVE, - c->chan_base + BCM2835_DMA_CS); - } else { + } else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) { vchan_cookie_complete(&c->desc->vd); bcm2835_dma_start_desc(c); } @@ -796,7 +793,6 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan) struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device); unsigned long flags; - int timeout = 10000; LIST_HEAD(head); spin_lock_irqsave(&c->vc.lock, flags); @@ -806,27 +802,11 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan) list_del_init(&c->node); spin_unlock(&d->lock); - /* - * Stop DMA activity: we assume the callback will not be called - * after bcm_dma_abort() returns (even if it does, it will see - * c->desc is NULL and exit.) - */ + /* stop DMA activity */ if (c->desc) { vchan_terminate_vdesc(&c->desc->vd); c->desc = NULL; - bcm2835_dma_abort(c->chan_base); - - /* Wait for stopping */ - while (--timeout) { - if (!(readl(c->chan_base + BCM2835_DMA_CS) & - BCM2835_DMA_ACTIVE)) - break; - - cpu_relax(); - } - - if (!timeout) - dev_err(d->ddev.dev, "DMA transfer could not be terminated\n"); + bcm2835_dma_abort(c); } vchan_get_all_descriptors(&c->vc, &head); @@ -918,8 +898,10 @@ static int bcm2835_dma_probe(struct platform_device *pdev) pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); - if (rc) + if (rc) { + dev_err(&pdev->dev, "Unable to set DMA mask\n"); return rc; + } od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL); if (!od) diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index da74fd74636b41f14b40b17abc1022ab23de3593..cee78f9c4794c387ec51b8f1a55bd8d2e20bc0fc 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -1797,13 +1797,10 @@ static struct dma_chan *coh901318_xlate(struct of_phandle_args *dma_spec, static int coh901318_config(struct coh901318_chan *cohc, struct coh901318_params *param) { - unsigned long flags; const struct coh901318_params *p; int channel = cohc->id; void __iomem *virtbase = cohc->base->virtbase; - spin_lock_irqsave(&cohc->lock, flags); - if (param) p = param; else @@ -1823,8 +1820,6 @@ static int coh901318_config(struct coh901318_chan *cohc, coh901318_set_conf(cohc, p->config); coh901318_set_ctrl(cohc, p->ctrl_lli_last); - spin_unlock_irqrestore(&cohc->lock, flags); - return 0; } diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index 85820a2d69d48a104ca8063e796f8870e3daef2c..edff93aacad36b23891c793ab48faaa9f61d12dc 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -587,7 +587,7 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan, to_jz4780_dma_desc(vdesc), 0); } else if (cookie == jzchan->desc->vdesc.tx.cookie) { txstate->residue = jz4780_dma_desc_residue(jzchan, jzchan->desc, - (jzchan->curr_hwdesc + 1) % jzchan->desc->count); + jzchan->curr_hwdesc + 1); } else txstate->residue = 0; @@ -761,6 +761,11 @@ static int jz4780_dma_probe(struct platform_device *pdev) struct resource *res; int i, ret; + if (!dev->of_node) { + dev_err(dev, "This driver must be probed from devicetree\n"); + return -EINVAL; + } + jzdma = devm_kzalloc(dev, sizeof(*jzdma), GFP_KERNEL); if (!jzdma) return -ENOMEM; diff --git a/drivers/dma/dmaengine.h b/drivers/dma/dmaengine.h index 501c0b063f852d9a38a619940699d71b146399f4..302f13efd35d98de6e970374b84f26ed4b53faab 100644 --- a/drivers/dma/dmaengine.h +++ b/drivers/dma/dmaengine.h @@ -168,7 +168,7 @@ dmaengine_desc_get_callback_invoke(struct dma_async_tx_descriptor *tx, static inline bool dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb) { - return (cb->callback) ? true : false; + return cb->callback || cb->callback_result; } #endif diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index aa1712beb0cc355d454948dddb42d615c10b9762..7b7fba0c92532fe32258b6f039709b82355c82cb 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -642,11 +642,9 @@ static int dmatest_func(void *data) srcs[i] = um->addr[i] + src_off; ret = dma_mapping_error(dev->dev, um->addr[i]); if (ret) { - dmaengine_unmap_put(um); result("src mapping error", total_tests, src_off, dst_off, len, ret); - failed_tests++; - continue; + goto error_unmap_continue; } um->to_cnt++; } @@ -661,11 +659,9 @@ static int dmatest_func(void *data) DMA_BIDIRECTIONAL); ret = dma_mapping_error(dev->dev, dsts[i]); if (ret) { - dmaengine_unmap_put(um); result("dst mapping error", total_tests, src_off, dst_off, len, ret); - failed_tests++; - continue; + goto error_unmap_continue; } um->bidi_cnt++; } @@ -693,12 +689,10 @@ static int dmatest_func(void *data) } if (!tx) { - dmaengine_unmap_put(um); result("prep error", total_tests, src_off, dst_off, len, ret); msleep(100); - failed_tests++; - continue; + goto error_unmap_continue; } done->done = false; @@ -707,12 +701,10 @@ static int dmatest_func(void *data) cookie = tx->tx_submit(tx); if (dma_submit_error(cookie)) { - dmaengine_unmap_put(um); result("submit error", total_tests, src_off, dst_off, len, ret); msleep(100); - failed_tests++; - continue; + goto error_unmap_continue; } dma_async_issue_pending(chan); @@ -725,16 +717,14 @@ static int dmatest_func(void *data) dmaengine_unmap_put(um); result("test timed out", total_tests, src_off, dst_off, len, 0); - failed_tests++; - continue; + goto error_unmap_continue; } else if (status != DMA_COMPLETE) { dmaengine_unmap_put(um); result(status == DMA_ERROR ? "completion error status" : "completion busy status", total_tests, src_off, dst_off, len, ret); - failed_tests++; - continue; + goto error_unmap_continue; } dmaengine_unmap_put(um); @@ -779,6 +769,12 @@ static int dmatest_func(void *data) verbose_result("test passed", total_tests, src_off, dst_off, len, 0); } + + continue; + +error_unmap_continue: + dmaengine_unmap_put(um); + failed_tests++; } ktime = ktime_sub(ktime_get(), ktime); ktime = ktime_sub(ktime, comparetime); diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index c4eb55e3011c9fe26d65d168d91fd893a0efefe4..99a40385267cd5025444ae9527f1742b2a047d78 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -512,7 +512,8 @@ dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr, return vchan_tx_prep(&chan->vc, &first->vd, flags); err_desc_get: - axi_desc_put(first); + if (first) + axi_desc_put(first); return NULL; } @@ -550,6 +551,11 @@ static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status) /* The bad descriptor currently is in the head of vc list */ vd = vchan_next_desc(&chan->vc); + if (!vd) { + dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n", + axi_chan_name(chan)); + goto out; + } /* Remove the completed descriptor from issued list */ list_del(&vd->node); @@ -564,6 +570,7 @@ static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status) /* Try to restart the controller */ axi_chan_start_first_queued(chan); +out: spin_unlock_irqrestore(&chan->vc.lock, flags); } diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index f43e6dafe446d47dc906137b3fe9733fef1182f4..055d83b6cb68af4bbf6798abf09c7ff749607224 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -160,12 +160,14 @@ static void dwc_initialize_chan_idma32(struct dw_dma_chan *dwc) static void dwc_initialize_chan_dw(struct dw_dma_chan *dwc) { + struct dw_dma *dw = to_dw_dma(dwc->chan.device); u32 cfghi = DWC_CFGH_FIFO_MODE; u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); bool hs_polarity = dwc->dws.hs_polarity; cfghi |= DWC_CFGH_DST_PER(dwc->dws.dst_id); cfghi |= DWC_CFGH_SRC_PER(dwc->dws.src_id); + cfghi |= DWC_CFGH_PROTCTL(dw->pdata->protctl); /* Set polarity of handshake interface */ cfglo |= hs_polarity ? DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL : 0; @@ -1064,12 +1066,12 @@ static void dwc_issue_pending(struct dma_chan *chan) /* * Program FIFO size of channels. * - * By default full FIFO (1024 bytes) is assigned to channel 0. Here we + * By default full FIFO (512 bytes) is assigned to channel 0. Here we * slice FIFO on equal parts between channels. */ static void idma32_fifo_partition(struct dw_dma *dw) { - u64 value = IDMA32C_FP_PSIZE_CH0(128) | IDMA32C_FP_PSIZE_CH1(128) | + u64 value = IDMA32C_FP_PSIZE_CH0(64) | IDMA32C_FP_PSIZE_CH1(64) | IDMA32C_FP_UPDATE; u64 fifo_partition = 0; @@ -1082,7 +1084,7 @@ static void idma32_fifo_partition(struct dw_dma *dw) /* Fill FIFO_PARTITION high bits (Channels 2..3, 6..7) */ fifo_partition |= value << 32; - /* Program FIFO Partition registers - 128 bytes for each channel */ + /* Program FIFO Partition registers - 64 bytes per channel */ idma32_writeq(dw, FIFO_PARTITION1, fifo_partition); idma32_writeq(dw, FIFO_PARTITION0, fifo_partition); } diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c index f62dd0944908d2015032859c20a2580a67fcf189..c299ff181bb68bcbc9ee53bd7bec96ad357ab059 100644 --- a/drivers/dma/dw/platform.c +++ b/drivers/dma/dw/platform.c @@ -162,6 +162,12 @@ dw_dma_parse_dt(struct platform_device *pdev) pdata->multi_block[tmp] = 1; } + if (!of_property_read_u32(np, "snps,dma-protection-control", &tmp)) { + if (tmp > CHAN_PROTCTL_MASK) + return NULL; + pdata->protctl = tmp; + } + return pdata; } #else diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h index 09e7dfdbb7907df6217b65fb6b2ff3a96520e2ec..646c9c960c071a40b9dad231c5298f44106aa2f4 100644 --- a/drivers/dma/dw/regs.h +++ b/drivers/dma/dw/regs.h @@ -200,6 +200,10 @@ enum dw_dma_msize { #define DWC_CFGH_FCMODE (1 << 0) #define DWC_CFGH_FIFO_MODE (1 << 1) #define DWC_CFGH_PROTCTL(x) ((x) << 2) +#define DWC_CFGH_PROTCTL_DATA (0 << 2) /* data access - always set */ +#define DWC_CFGH_PROTCTL_PRIV (1 << 2) /* privileged -> AHB HPROT[1] */ +#define DWC_CFGH_PROTCTL_BUFFER (2 << 2) /* bufferable -> AHB HPROT[2] */ +#define DWC_CFGH_PROTCTL_CACHE (4 << 2) /* cacheable -> AHB HPROT[3] */ #define DWC_CFGH_DS_UPD_EN (1 << 5) #define DWC_CFGH_SS_UPD_EN (1 << 6) #define DWC_CFGH_SRC_PER(x) ((x) << 7) diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c index 1fbf9cb9b74297c9daa6005c520fa54f90c8dc45..89c5e5b46068704d9374227855cb46739467686d 100644 --- a/drivers/dma/idma64.c +++ b/drivers/dma/idma64.c @@ -597,7 +597,7 @@ static int idma64_probe(struct idma64_chip *chip) idma64->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); idma64->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; - idma64->dma.dev = chip->dev; + idma64->dma.dev = chip->sysdev; dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK); @@ -637,6 +637,7 @@ static int idma64_platform_probe(struct platform_device *pdev) { struct idma64_chip *chip; struct device *dev = &pdev->dev; + struct device *sysdev = dev->parent; struct resource *mem; int ret; @@ -653,11 +654,12 @@ static int idma64_platform_probe(struct platform_device *pdev) if (IS_ERR(chip->regs)) return PTR_ERR(chip->regs); - ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + ret = dma_coerce_mask_and_coherent(sysdev, DMA_BIT_MASK(64)); if (ret) return ret; chip->dev = dev; + chip->sysdev = sysdev; ret = idma64_probe(chip); if (ret) diff --git a/drivers/dma/idma64.h b/drivers/dma/idma64.h index 6b816878e5e7a79d9e4fb14f982e3aa21e3e34a2..baa32e1425de31dcfbf88f8ea10d0002a5fe3e13 100644 --- a/drivers/dma/idma64.h +++ b/drivers/dma/idma64.h @@ -216,12 +216,14 @@ static inline void idma64_writel(struct idma64 *idma64, int offset, u32 value) /** * struct idma64_chip - representation of iDMA 64-bit controller hardware * @dev: struct device of the DMA controller + * @sysdev: struct device of the physical device that does DMA * @irq: irq line * @regs: memory mapped I/O space * @idma64: struct idma64 that is filed by idma64_probe() */ struct idma64_chip { struct device *dev; + struct device *sysdev; int irq; void __iomem *regs; struct idma64 *idma64; diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 75b6ff0415ee0206c8d2ef70399153eae169a6c5..dfee0d895ce39a35dbed415c595cab8f8120ea61 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -284,7 +284,7 @@ static inline int imxdma_sg_next(struct imxdma_desc *d) struct scatterlist *sg = d->sg; unsigned long now; - now = min(d->len, sg_dma_len(sg)); + now = min_t(size_t, d->len, sg_dma_len(sg)); if (d->len != IMX_DMA_LENGTH_LOOP) d->len -= now; @@ -617,7 +617,7 @@ static void imxdma_tasklet(unsigned long data) { struct imxdma_channel *imxdmac = (void *)data; struct imxdma_engine *imxdma = imxdmac->imxdma; - struct imxdma_desc *desc; + struct imxdma_desc *desc, *next_desc; unsigned long flags; spin_lock_irqsave(&imxdma->lock, flags); @@ -647,10 +647,10 @@ static void imxdma_tasklet(unsigned long data) list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free); if (!list_empty(&imxdmac->ld_queue)) { - desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc, - node); + next_desc = list_first_entry(&imxdmac->ld_queue, + struct imxdma_desc, node); list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active); - if (imxdma_xfer_desc(desc) < 0) + if (imxdma_xfer_desc(next_desc) < 0) dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n", __func__, imxdmac->channel); } diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index b4ec2d20e66167786939ae867de0d93378ae4054..3f5a01cb4ab45b05c37aaac65512be88587615db 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include @@ -33,6 +32,7 @@ #include #include #include +#include #include #include @@ -376,7 +376,7 @@ struct sdma_channel { u32 shp_addr, per_addr; enum dma_status status; struct imx_dma_data data; - struct dma_pool *bd_pool; + struct work_struct terminate_worker; }; #define IMX_DMA_SG_LOOP BIT(0) @@ -681,7 +681,7 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size, spin_lock_irqsave(&sdma->channel_0_lock, flags); bd0->mode.command = C0_SETPM; - bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; + bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD; bd0->mode.count = size / 2; bd0->buffer_addr = buf_phys; bd0->ext_buffer_addr = address; @@ -1000,7 +1000,7 @@ static int sdma_load_context(struct sdma_channel *sdmac) context->gReg[7] = sdmac->watermark_level; bd0->mode.command = C0_SETDM; - bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; + bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD; bd0->mode.count = sizeof(*context) / 4; bd0->buffer_addr = sdma->context_phys; bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel; @@ -1027,31 +1027,49 @@ static int sdma_disable_channel(struct dma_chan *chan) return 0; } - -static int sdma_disable_channel_with_delay(struct dma_chan *chan) +static void sdma_channel_terminate_work(struct work_struct *work) { - struct sdma_channel *sdmac = to_sdma_chan(chan); + struct sdma_channel *sdmac = container_of(work, struct sdma_channel, + terminate_worker); unsigned long flags; LIST_HEAD(head); - sdma_disable_channel(chan); - spin_lock_irqsave(&sdmac->vc.lock, flags); - vchan_get_all_descriptors(&sdmac->vc, &head); - sdmac->desc = NULL; - spin_unlock_irqrestore(&sdmac->vc.lock, flags); - vchan_dma_desc_free_list(&sdmac->vc, &head); - /* * According to NXP R&D team a delay of one BD SDMA cost time * (maximum is 1ms) should be added after disable of the channel * bit, to ensure SDMA core has really been stopped after SDMA * clients call .device_terminate_all. */ - mdelay(1); + usleep_range(1000, 2000); + + spin_lock_irqsave(&sdmac->vc.lock, flags); + vchan_get_all_descriptors(&sdmac->vc, &head); + sdmac->desc = NULL; + spin_unlock_irqrestore(&sdmac->vc.lock, flags); + vchan_dma_desc_free_list(&sdmac->vc, &head); +} + +static int sdma_disable_channel_async(struct dma_chan *chan) +{ + struct sdma_channel *sdmac = to_sdma_chan(chan); + + sdma_disable_channel(chan); + + if (sdmac->desc) + schedule_work(&sdmac->terminate_worker); return 0; } +static void sdma_channel_synchronize(struct dma_chan *chan) +{ + struct sdma_channel *sdmac = to_sdma_chan(chan); + + vchan_synchronize(&sdmac->vc); + + flush_work(&sdmac->terminate_worker); +} + static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac) { struct sdma_engine *sdma = sdmac->sdma; @@ -1192,10 +1210,11 @@ static int sdma_request_channel0(struct sdma_engine *sdma) static int sdma_alloc_bd(struct sdma_desc *desc) { + u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); int ret = 0; - desc->bd = dma_pool_alloc(desc->sdmac->bd_pool, GFP_NOWAIT, - &desc->bd_phys); + desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys, + GFP_NOWAIT); if (!desc->bd) { ret = -ENOMEM; goto out; @@ -1206,7 +1225,9 @@ static int sdma_alloc_bd(struct sdma_desc *desc) static void sdma_free_bd(struct sdma_desc *desc) { - dma_pool_free(desc->sdmac->bd_pool, desc->bd, desc->bd_phys); + u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); + + dma_free_coherent(NULL, bd_size, desc->bd, desc->bd_phys); } static void sdma_desc_free(struct virt_dma_desc *vd) @@ -1272,10 +1293,6 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan) if (ret) goto disable_clk_ahb; - sdmac->bd_pool = dma_pool_create("bd_pool", chan->device->dev, - sizeof(struct sdma_buffer_descriptor), - 32, 0); - return 0; disable_clk_ahb: @@ -1290,7 +1307,9 @@ static void sdma_free_chan_resources(struct dma_chan *chan) struct sdma_channel *sdmac = to_sdma_chan(chan); struct sdma_engine *sdma = sdmac->sdma; - sdma_disable_channel_with_delay(chan); + sdma_disable_channel_async(chan); + + sdma_channel_synchronize(chan); if (sdmac->event_id0) sdma_event_disable(sdmac, sdmac->event_id0); @@ -1304,9 +1323,6 @@ static void sdma_free_chan_resources(struct dma_chan *chan) clk_disable(sdma->clk_ipg); clk_disable(sdma->clk_ahb); - - dma_pool_destroy(sdmac->bd_pool); - sdmac->bd_pool = NULL; } static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac, @@ -1999,6 +2015,8 @@ static int sdma_probe(struct platform_device *pdev) sdmac->channel = i; sdmac->vc.desc_free = sdma_desc_free; + INIT_WORK(&sdmac->terminate_worker, + sdma_channel_terminate_work); /* * Add the channel to the DMAC list. Do not add channel 0 though * because we need it internally in the SDMA driver. This also means @@ -2021,27 +2039,6 @@ static int sdma_probe(struct platform_device *pdev) if (pdata && pdata->script_addrs) sdma_add_scripts(sdma, pdata->script_addrs); - if (pdata) { - ret = sdma_get_firmware(sdma, pdata->fw_name); - if (ret) - dev_warn(&pdev->dev, "failed to get firmware from platform data\n"); - } else { - /* - * Because that device tree does not encode ROM script address, - * the RAM script in firmware is mandatory for device tree - * probe, otherwise it fails. - */ - ret = of_property_read_string(np, "fsl,sdma-ram-script-name", - &fw_name); - if (ret) - dev_warn(&pdev->dev, "failed to get firmware name\n"); - else { - ret = sdma_get_firmware(sdma, fw_name); - if (ret) - dev_warn(&pdev->dev, "failed to get firmware from device tree\n"); - } - } - sdma->dma_device.dev = &pdev->dev; sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources; @@ -2050,7 +2047,8 @@ static int sdma_probe(struct platform_device *pdev) sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; sdma->dma_device.device_config = sdma_config; - sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay; + sdma->dma_device.device_terminate_all = sdma_disable_channel_async; + sdma->dma_device.device_synchronize = sdma_channel_synchronize; sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS; sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS; sdma->dma_device.directions = SDMA_DMA_DIRECTIONS; @@ -2084,6 +2082,33 @@ static int sdma_probe(struct platform_device *pdev) of_node_put(spba_bus); } + /* + * Kick off firmware loading as the very last step: + * attempt to load firmware only if we're not on the error path, because + * the firmware callback requires a fully functional and allocated sdma + * instance. + */ + if (pdata) { + ret = sdma_get_firmware(sdma, pdata->fw_name); + if (ret) + dev_warn(&pdev->dev, "failed to get firmware from platform data\n"); + } else { + /* + * Because that device tree does not encode ROM script address, + * the RAM script in firmware is mandatory for device tree + * probe, otherwise it fails. + */ + ret = of_property_read_string(np, "fsl,sdma-ram-script-name", + &fw_name); + if (ret) { + dev_warn(&pdev->dev, "failed to get firmware name\n"); + } else { + ret = sdma_get_firmware(sdma, fw_name); + if (ret) + dev_warn(&pdev->dev, "failed to get firmware from device tree\n"); + } + } + return 0; err_register: diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 23fb2fa040002daeb3e248036efa261b63b3769e..f373a139e0c37b175dbce3a1c2a9658ef7a1bb05 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -372,6 +372,7 @@ struct ioat_ring_ent ** ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) { struct ioatdma_chan *ioat_chan = to_ioat_chan(c); + struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; struct ioat_ring_ent **ring; int total_descs = 1 << order; int i, chunks; @@ -437,6 +438,17 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) } ring[i]->hw->next = ring[0]->txd.phys; + /* setup descriptor pre-fetching for v3.4 */ + if (ioat_dma->cap & IOAT_CAP_DPS) { + u16 drsctl = IOAT_CHAN_DRSZ_2MB | IOAT_CHAN_DRS_EN; + + if (chunks == 1) + drsctl |= IOAT_CHAN_DRS_AUTOWRAP; + + writew(drsctl, ioat_chan->reg_base + IOAT_CHAN_DRSCTL_OFFSET); + + } + return ring; } diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 1ab42ec2b7ff12bf5634c422b93200b585be0a0a..aaafd0e882b5dd63282333393c17c51240c88d75 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -27,7 +27,7 @@ #include "registers.h" #include "hw.h" -#define IOAT_DMA_VERSION "4.00" +#define IOAT_DMA_VERSION "5.00" #define IOAT_DMA_DCA_ANY_CPU ~0 diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h index abcc51b343cecd1629700233e8ca9d8882da2dff..781c94de8e810b5fb31c07301aba8c4b1a738d4b 100644 --- a/drivers/dma/ioat/hw.h +++ b/drivers/dma/ioat/hw.h @@ -66,11 +66,14 @@ #define PCI_DEVICE_ID_INTEL_IOAT_SKX 0x2021 +#define PCI_DEVICE_ID_INTEL_IOAT_ICX 0x0b00 + #define IOAT_VER_1_2 0x12 /* Version 1.2 */ #define IOAT_VER_2_0 0x20 /* Version 2.0 */ #define IOAT_VER_3_0 0x30 /* Version 3.0 */ #define IOAT_VER_3_2 0x32 /* Version 3.2 */ #define IOAT_VER_3_3 0x33 /* Version 3.3 */ +#define IOAT_VER_3_4 0x34 /* Version 3.4 */ int system_has_dca_enabled(struct pci_dev *pdev); diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index 4fa4c06c9edb9809675ea98a2d058e3c8b5dfa35..5d0ee7a30275eec3fbcbfb27a7e3111b447b3cc1 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c @@ -119,6 +119,9 @@ static const struct pci_device_id ioat_pci_tbl[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) }, + /* I/OAT v3.4 platforms */ + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_ICX) }, + { 0, } }; MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); @@ -129,16 +132,16 @@ static void ioat_init_channel(struct ioatdma_device *ioat_dma, struct ioatdma_chan *ioat_chan, int idx); static void ioat_intr_quirk(struct ioatdma_device *ioat_dma); -static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma); +static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma); static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma); static int ioat_dca_enabled = 1; module_param(ioat_dca_enabled, int, 0644); MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); -int ioat_pending_level = 4; +int ioat_pending_level = 7; module_param(ioat_pending_level, int, 0644); MODULE_PARM_DESC(ioat_pending_level, - "high-water mark for pushing ioat descriptors (default: 4)"); + "high-water mark for pushing ioat descriptors (default: 7)"); static char ioat_interrupt_style[32] = "msix"; module_param_string(ioat_interrupt_style, ioat_interrupt_style, sizeof(ioat_interrupt_style), 0644); @@ -575,7 +578,7 @@ static void ioat_dma_remove(struct ioatdma_device *ioat_dma) * ioat_enumerate_channels - find and initialize the device's channels * @ioat_dma: the ioat dma device to be enumerated */ -static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma) +static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma) { struct ioatdma_chan *ioat_chan; struct device *dev = &ioat_dma->pdev->dev; @@ -594,7 +597,7 @@ static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma) xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET); xfercap_log &= 0x1f; /* bits [4:0] valid */ if (xfercap_log == 0) - return 0; + return; dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log); for (i = 0; i < dma->chancnt; i++) { @@ -611,7 +614,6 @@ static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma) } } dma->chancnt = i; - return i; } /** @@ -636,6 +638,11 @@ static void ioat_free_chan_resources(struct dma_chan *c) ioat_stop(ioat_chan); ioat_reset_hw(ioat_chan); + /* Put LTR to idle */ + if (ioat_dma->version >= IOAT_VER_3_4) + writeb(IOAT_CHAN_LTR_SWSEL_IDLE, + ioat_chan->reg_base + IOAT_CHAN_LTR_SWSEL_OFFSET); + spin_lock_bh(&ioat_chan->cleanup_lock); spin_lock_bh(&ioat_chan->prep_lock); descs = ioat_ring_space(ioat_chan); @@ -725,6 +732,28 @@ static int ioat_alloc_chan_resources(struct dma_chan *c) spin_unlock_bh(&ioat_chan->prep_lock); spin_unlock_bh(&ioat_chan->cleanup_lock); + /* Setting up LTR values for 3.4 or later */ + if (ioat_chan->ioat_dma->version >= IOAT_VER_3_4) { + u32 lat_val; + + lat_val = IOAT_CHAN_LTR_ACTIVE_SNVAL | + IOAT_CHAN_LTR_ACTIVE_SNLATSCALE | + IOAT_CHAN_LTR_ACTIVE_SNREQMNT; + writel(lat_val, ioat_chan->reg_base + + IOAT_CHAN_LTR_ACTIVE_OFFSET); + + lat_val = IOAT_CHAN_LTR_IDLE_SNVAL | + IOAT_CHAN_LTR_IDLE_SNLATSCALE | + IOAT_CHAN_LTR_IDLE_SNREQMNT; + writel(lat_val, ioat_chan->reg_base + + IOAT_CHAN_LTR_IDLE_OFFSET); + + /* Select to active */ + writeb(IOAT_CHAN_LTR_SWSEL_ACTIVE, + ioat_chan->reg_base + + IOAT_CHAN_LTR_SWSEL_OFFSET); + } + ioat_start_null_desc(ioat_chan); /* check that we got off the ground */ @@ -1186,6 +1215,10 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) if (err) return err; + if (ioat_dma->cap & IOAT_CAP_DPS) + writeb(ioat_pending_level + 1, + ioat_dma->reg_base + IOAT_PREFETCH_LIMIT_OFFSET); + return 0; } @@ -1205,8 +1238,15 @@ static void ioat_shutdown(struct pci_dev *pdev) spin_lock_bh(&ioat_chan->prep_lock); set_bit(IOAT_CHAN_DOWN, &ioat_chan->state); - del_timer_sync(&ioat_chan->timer); spin_unlock_bh(&ioat_chan->prep_lock); + /* + * Synchronization rule for del_timer_sync(): + * - The caller must not hold locks which would prevent + * completion of the timer's handler. + * So prep_lock cannot be held before calling it. + */ + del_timer_sync(&ioat_chan->timer); + /* this should quiesce then reset */ ioat_reset_hw(ioat_chan); } @@ -1351,6 +1391,8 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_drvdata(pdev, device); device->version = readb(device->reg_base + IOAT_VER_OFFSET); + if (device->version >= IOAT_VER_3_4) + ioat_dca_enabled = 0; if (device->version >= IOAT_VER_3_0) { if (is_skx_ioat(pdev)) device->version = IOAT_VER_3_2; diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h index 2f3bbc88ff2a0faca2bed832525c7a6fd1c475b3..99c1c24d465dade2e2a7093a9c2eb2ba9a420d37 100644 --- a/drivers/dma/ioat/registers.h +++ b/drivers/dma/ioat/registers.h @@ -84,6 +84,9 @@ #define IOAT_CAP_PQ 0x00000200 #define IOAT_CAP_DWBES 0x00002000 #define IOAT_CAP_RAID16SS 0x00020000 +#define IOAT_CAP_DPS 0x00800000 + +#define IOAT_PREFETCH_LIMIT_OFFSET 0x4C /* CHWPREFLMT */ #define IOAT_CHANNEL_MMIO_SIZE 0x80 /* Each Channel MMIO space is this size */ @@ -243,4 +246,25 @@ #define IOAT_CHANERR_MASK_OFFSET 0x2C /* 32-bit Channel Error Register */ +#define IOAT_CHAN_DRSCTL_OFFSET 0xB6 +#define IOAT_CHAN_DRSZ_4KB 0x0000 +#define IOAT_CHAN_DRSZ_8KB 0x0001 +#define IOAT_CHAN_DRSZ_2MB 0x0009 +#define IOAT_CHAN_DRS_EN 0x0100 +#define IOAT_CHAN_DRS_AUTOWRAP 0x0200 + +#define IOAT_CHAN_LTR_SWSEL_OFFSET 0xBC +#define IOAT_CHAN_LTR_SWSEL_ACTIVE 0x0 +#define IOAT_CHAN_LTR_SWSEL_IDLE 0x1 + +#define IOAT_CHAN_LTR_ACTIVE_OFFSET 0xC0 +#define IOAT_CHAN_LTR_ACTIVE_SNVAL 0x0000 /* 0 us */ +#define IOAT_CHAN_LTR_ACTIVE_SNLATSCALE 0x0800 /* 1us scale */ +#define IOAT_CHAN_LTR_ACTIVE_SNREQMNT 0x8000 /* snoop req enable */ + +#define IOAT_CHAN_LTR_IDLE_OFFSET 0xC4 +#define IOAT_CHAN_LTR_IDLE_SNVAL 0x0258 /* 600 us */ +#define IOAT_CHAN_LTR_IDLE_SNLATSCALE 0x0800 /* 1us scale */ +#define IOAT_CHAN_LTR_IDLE_SNREQMNT 0x8000 /* snoop req enable */ + #endif /* _IOAT_REGISTERS_H_ */ diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index a410657f7bcd6f61c0335281e1d0997e540f6dbc..012584cf3c17bf727f843afe70505c59255b0a9d 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -125,9 +125,9 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) list_for_each_entry_safe(iter, _iter, &iop_chan->chain, chain_node) { pr_debug("\tcookie: %d slot: %d busy: %d " - "this_desc: %#x next_desc: %#x ack: %d\n", + "this_desc: %#x next_desc: %#llx ack: %d\n", iter->async_tx.cookie, iter->idx, busy, - iter->async_tx.phys, iop_desc_get_next_desc(iter), + iter->async_tx.phys, (u64)iop_desc_get_next_desc(iter), async_tx_test_ack(&iter->async_tx)); prefetch(_iter); prefetch(&_iter->async_tx); @@ -315,9 +315,9 @@ iop_adma_alloc_slots(struct iop_adma_chan *iop_chan, int num_slots, int i; dev_dbg(iop_chan->device->common.dev, "allocated slot: %d " - "(desc %p phys: %#x) slots_per_op %d\n", + "(desc %p phys: %#llx) slots_per_op %d\n", iter->idx, iter->hw_desc, - iter->async_tx.phys, slots_per_op); + (u64)iter->async_tx.phys, slots_per_op); /* pre-ack all but the last descriptor */ if (num_slots != slots_per_op) @@ -525,7 +525,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, return NULL; BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT); - dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", + dev_dbg(iop_chan->device->common.dev, "%s len: %zu\n", __func__, len); spin_lock_bh(&iop_chan->lock); @@ -558,7 +558,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT); dev_dbg(iop_chan->device->common.dev, - "%s src_cnt: %d len: %u flags: %lx\n", + "%s src_cnt: %d len: %zu flags: %lx\n", __func__, src_cnt, len, flags); spin_lock_bh(&iop_chan->lock); @@ -591,7 +591,7 @@ iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src, if (unlikely(!len)) return NULL; - dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n", + dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n", __func__, src_cnt, len); spin_lock_bh(&iop_chan->lock); @@ -629,7 +629,7 @@ iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT); dev_dbg(iop_chan->device->common.dev, - "%s src_cnt: %d len: %u flags: %lx\n", + "%s src_cnt: %d len: %zu flags: %lx\n", __func__, src_cnt, len, flags); if (dmaf_p_disabled_continue(flags)) @@ -692,7 +692,7 @@ iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, return NULL; BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT); - dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n", + dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n", __func__, src_cnt, len); spin_lock_bh(&iop_chan->lock); diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index eb3a1f42ab065793fbd4c30197b7cbc2300ec7f7..e8b2d3e31de802b4e5d321383d01994298e76012 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c @@ -722,12 +722,6 @@ static int mmp_pdma_config(struct dma_chan *dchan, chan->dir = cfg->direction; chan->dev_addr = addr; - /* FIXME: drivers should be ported over to use the filter - * function. Once that's done, the following two lines can - * be removed. - */ - if (cfg->slave_id) - chan->drcmr = cfg->slave_id; return 0; } diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 88750a34e85987f352ef0e32ded6b22fb8f5115f..bc8050c025b7b8e323c04f298c850fab138b0a67 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -961,6 +961,7 @@ static void _stop(struct pl330_thread *thrd) { void __iomem *regs = thrd->dmac->base; u8 insn[6] = {0, 0, 0, 0, 0, 0}; + u32 inten = readl(regs + INTEN); if (_state(thrd) == PL330_STATE_FAULT_COMPLETING) UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING); @@ -973,10 +974,13 @@ static void _stop(struct pl330_thread *thrd) _emit_KILL(0, insn); - /* Stop generating interrupts for SEV */ - writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN); - _execute_DBGINSN(thrd, insn, is_manager(thrd)); + + /* clear the event */ + if (inten & (1 << thrd->ev)) + writel(1 << thrd->ev, regs + INTCLR); + /* Stop generating interrupts for SEV */ + writel(inten & ~(1 << thrd->ev), regs + INTEN); } /* Start doing req 'idx' of thread 'thrd' */ diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 4cf0d4d0cecfb92175610461d5c984592017fd70..25610286979f67e2835a98be26435a1ec0e671a9 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c @@ -4360,7 +4360,7 @@ static ssize_t enable_store(struct device_driver *dev, const char *buf, } static DRIVER_ATTR_RW(enable); -static ssize_t poly_store(struct device_driver *dev, char *buf) +static ssize_t poly_show(struct device_driver *dev, char *buf) { ssize_t size = 0; u32 reg; diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index b31c28b67ad3ec6aef3babf5402defaac504f680..c54986902b9d28f8378ccc65fe6b01ee5937e6ae 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c @@ -960,13 +960,6 @@ static void pxad_get_config(struct pxad_chan *chan, *dcmd |= PXA_DCMD_BURST16; else if (maxburst == 32) *dcmd |= PXA_DCMD_BURST32; - - /* FIXME: drivers should be ported over to use the filter - * function. Once that's done, the following two lines can - * be removed. - */ - if (chan->cfg.slave_id) - chan->drcmr = chan->cfg.slave_id; } static struct dma_async_tx_descriptor * diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index 1617715aa6e072616374bfbb45089f4b119ad9c6..4451ccfaf7c9292ae461c38cbfae0423ce7ffb32 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c @@ -703,6 +703,25 @@ static int bam_dma_terminate_all(struct dma_chan *chan) /* remove all transactions, including active transaction */ spin_lock_irqsave(&bchan->vc.lock, flag); + /* + * If we have transactions queued, then some might be committed to the + * hardware in the desc fifo. The only way to reset the desc fifo is + * to do a hardware reset (either by pipe or the entire block). + * bam_chan_init_hw() will trigger a pipe reset, and also reinit the + * pipe. If the pipe is left disabled (default state after pipe reset) + * and is accessed by a connected hardware engine, a fatal error in + * the BAM will occur. There is a small window where this could happen + * with bam_chan_init_hw(), but it is assumed that the caller has + * stopped activity on any attached hardware engine. Make sure to do + * this first so that the BAM hardware doesn't cause memory corruption + * by accessing freed resources. + */ + if (!list_empty(&bchan->desc_list)) { + async_desc = list_first_entry(&bchan->desc_list, + struct bam_async_desc, desc_node); + bam_chan_init_hw(bchan, async_desc->dir); + } + list_for_each_entry_safe(async_desc, tmp, &bchan->desc_list, desc_node) { list_add(&async_desc->vd.node, &bchan->vc.desc_issued); @@ -808,6 +827,9 @@ static u32 process_channel_irqs(struct bam_device *bdev) /* Number of bytes available to read */ avail = CIRC_CNT(offset, bchan->head, MAX_DESCRIPTORS + 1); + if (offset < bchan->head) + avail--; + list_for_each_entry_safe(async_desc, tmp, &bchan->desc_list, desc_node) { /* Not enough data to read */ diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c index 43d4b00b81388e061b8b9bc2f251617509030821..411f91fde734584e533d49168a167181f78c0fb3 100644 --- a/drivers/dma/qcom/hidma.c +++ b/drivers/dma/qcom/hidma.c @@ -138,24 +138,25 @@ static void hidma_process_completed(struct hidma_chan *mchan) desc = &mdesc->desc; last_cookie = desc->cookie; + llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch); + spin_lock_irqsave(&mchan->lock, irqflags); + if (llstat == DMA_COMPLETE) { + mchan->last_success = last_cookie; + result.result = DMA_TRANS_NOERROR; + } else { + result.result = DMA_TRANS_ABORTED; + } + dma_cookie_complete(desc); spin_unlock_irqrestore(&mchan->lock, irqflags); - llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch); dmaengine_desc_get_callback(desc, &cb); dma_run_dependencies(desc); spin_lock_irqsave(&mchan->lock, irqflags); list_move(&mdesc->node, &mchan->free); - - if (llstat == DMA_COMPLETE) { - mchan->last_success = last_cookie; - result.result = DMA_TRANS_NOERROR; - } else - result.result = DMA_TRANS_ABORTED; - spin_unlock_irqrestore(&mchan->lock, irqflags); dmaengine_desc_callback_invoke(&cb, &result); @@ -415,6 +416,7 @@ hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src, if (!mdesc) return NULL; + mdesc->desc.flags = flags; hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch, src, dest, len, flags, HIDMA_TRE_MEMCPY); @@ -447,6 +449,7 @@ hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value, if (!mdesc) return NULL; + mdesc->desc.flags = flags; hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch, value, dest, len, flags, HIDMA_TRE_MEMSET); diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 48ee35e2bce6dd90165e8bdf085f6f1eccf5090a..29c51762336d7ed31acaa3c20fcc61de8340a863 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -198,6 +198,7 @@ struct rcar_dmac { struct dma_device engine; struct device *dev; void __iomem *iomem; + struct device_dma_parameters parms; unsigned int n_channels; struct rcar_dmac_chan *channels; @@ -1164,7 +1165,7 @@ rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); /* Someone calling slave DMA on a generic channel? */ - if (rchan->mid_rid < 0 || !sg_len) { + if (rchan->mid_rid < 0 || !sg_len || !sg_dma_len(sgl)) { dev_warn(chan->device->dev, "%s: bad parameter: len=%d, id=%d\n", __func__, sg_len, rchan->mid_rid); @@ -1281,6 +1282,9 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, enum dma_status status; unsigned int residue = 0; unsigned int dptr = 0; + unsigned int chcrb; + unsigned int tcrb; + unsigned int i; if (!desc) return 0; @@ -1328,6 +1332,24 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, return 0; } + /* + * We need to read two registers. + * Make sure the control register does not skip to next chunk + * while reading the counter. + * Trying it 3 times should be enough: Initial read, retry, retry + * for the paranoid. + */ + for (i = 0; i < 3; i++) { + chcrb = rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & + RCAR_DMACHCRB_DPTR_MASK; + tcrb = rcar_dmac_chan_read(chan, RCAR_DMATCRB); + /* Still the same? */ + if (chcrb == (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & + RCAR_DMACHCRB_DPTR_MASK)) + break; + } + WARN_ONCE(i >= 3, "residue might be not continuous!"); + /* * In descriptor mode the descriptor running pointer is not maintained * by the interrupt handler, find the running descriptor from the @@ -1335,8 +1357,7 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, * mode just use the running descriptor pointer. */ if (desc->hwdescs.use) { - dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & - RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT; + dptr = chcrb >> RCAR_DMACHCRB_DPTR_SHIFT; if (dptr == 0) dptr = desc->nchunks; dptr--; @@ -1354,7 +1375,7 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, } /* Add the residue for the current chunk. */ - residue += rcar_dmac_chan_read(chan, RCAR_DMATCRB) << desc->xfer_shift; + residue += tcrb << desc->xfer_shift; return residue; } @@ -1367,6 +1388,7 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan, enum dma_status status; unsigned long flags; unsigned int residue; + bool cyclic; status = dma_cookie_status(chan, cookie, txstate); if (status == DMA_COMPLETE || !txstate) @@ -1374,10 +1396,11 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan, spin_lock_irqsave(&rchan->lock, flags); residue = rcar_dmac_chan_get_residue(rchan, cookie); + cyclic = rchan->desc.running ? rchan->desc.running->cyclic : false; spin_unlock_irqrestore(&rchan->lock, flags); /* if there's no residue, the cookie is complete */ - if (!residue) + if (!residue && !cyclic) return DMA_COMPLETE; dma_set_residue(txstate, residue); @@ -1792,7 +1815,11 @@ static int rcar_dmac_probe(struct platform_device *pdev) dmac->dev = &pdev->dev; platform_set_drvdata(pdev, dmac); - dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40)); + dmac->dev->dma_parms = &dmac->parms; + dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK); + ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40)); + if (ret) + return ret; ret = rcar_dmac_parse_of(&pdev->dev, dmac); if (ret < 0) diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c index 1bb1a8e09025feb0d4d24c3a282d6001895b0ed8..6c94ed7500494104664fca4c02dc0f1f2993ef02 100644 --- a/drivers/dma/sh/usb-dmac.c +++ b/drivers/dma/sh/usb-dmac.c @@ -697,6 +697,8 @@ static int usb_dmac_runtime_resume(struct device *dev) #endif /* CONFIG_PM */ static const struct dev_pm_ops usb_dmac_pm = { + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume, NULL) }; diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c index 55df0d41355b0622f4fb9ede60fd6bddbfe5a4e6..9e8ce56a83d8a65e4e9410ddbbea32edd38388ab 100644 --- a/drivers/dma/sprd-dma.c +++ b/drivers/dma/sprd-dma.c @@ -181,6 +181,7 @@ struct sprd_dma_dev { struct sprd_dma_chn channels[0]; }; +static void sprd_dma_free_desc(struct virt_dma_desc *vd); static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param); static struct of_dma_filter_info sprd_dma_info = { .filter_fn = sprd_dma_filter_fn, @@ -493,12 +494,19 @@ static int sprd_dma_alloc_chan_resources(struct dma_chan *chan) static void sprd_dma_free_chan_resources(struct dma_chan *chan) { struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); + struct virt_dma_desc *cur_vd = NULL; unsigned long flags; spin_lock_irqsave(&schan->vc.lock, flags); + if (schan->cur_desc) + cur_vd = &schan->cur_desc->vd; + sprd_dma_stop(schan); spin_unlock_irqrestore(&schan->vc.lock, flags); + if (cur_vd) + sprd_dma_free_desc(cur_vd); + vchan_free_chan_resources(&schan->vc); pm_runtime_put(chan->device->dev); } @@ -663,7 +671,7 @@ static int sprd_dma_fill_desc(struct dma_chan *chan, temp |= slave_cfg->src_maxburst & SPRD_DMA_FRG_LEN_MASK; hw->frg_len = temp; - hw->blk_len = len & SPRD_DMA_BLK_LEN_MASK; + hw->blk_len = slave_cfg->src_maxburst & SPRD_DMA_BLK_LEN_MASK; hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK; temp = (dst_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET; @@ -814,15 +822,22 @@ static int sprd_dma_resume(struct dma_chan *chan) static int sprd_dma_terminate_all(struct dma_chan *chan) { struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); + struct virt_dma_desc *cur_vd = NULL; unsigned long flags; LIST_HEAD(head); spin_lock_irqsave(&schan->vc.lock, flags); + if (schan->cur_desc) + cur_vd = &schan->cur_desc->vd; + sprd_dma_stop(schan); vchan_get_all_descriptors(&schan->vc, &head); spin_unlock_irqrestore(&schan->vc.lock, flags); + if (cur_vd) + sprd_dma_free_desc(cur_vd); + vchan_dma_desc_free_list(&schan->vc, &head); return 0; } diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index f4edfc56f34ef65dc34e50e38fd3c9aa258364fd..3d55405c49cacc409c937a0048f31d0de18edd64 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -142,7 +142,7 @@ enum d40_events { * when the DMA hw is powered off. * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works. */ -static u32 d40_backup_regs[] = { +static __maybe_unused u32 d40_backup_regs[] = { D40_DREG_LCPA, D40_DREG_LCLA, D40_DREG_PRMSE, @@ -211,7 +211,7 @@ static u32 d40_backup_regs_v4b[] = { #define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b) -static u32 d40_backup_regs_chan[] = { +static __maybe_unused u32 d40_backup_regs_chan[] = { D40_CHAN_REG_SSCFG, D40_CHAN_REG_SSELT, D40_CHAN_REG_SSPTR, diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index 379e8d534e615cc23d4d3a0281ff9498fedd7e00..4903a408fc146eae9c5b3ac00a6e00e87046b4d6 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c @@ -308,20 +308,12 @@ static bool stm32_dma_fifo_threshold_is_allowed(u32 burst, u32 threshold, static bool stm32_dma_is_burst_possible(u32 buf_len, u32 threshold) { - switch (threshold) { - case STM32_DMA_FIFO_THRESHOLD_FULL: - if (buf_len >= STM32_DMA_MAX_BURST) - return true; - else - return false; - case STM32_DMA_FIFO_THRESHOLD_HALFFULL: - if (buf_len >= STM32_DMA_MAX_BURST / 2) - return true; - else - return false; - default: - return false; - } + /* + * Buffer or period length has to be aligned on FIFO depth. + * Otherwise bytes may be stuck within FIFO at buffer or period + * length. + */ + return ((buf_len % ((threshold + 1) * 4)) == 0); } static u32 stm32_dma_get_best_burst(u32 buf_len, u32 max_burst, u32 threshold, diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c index 06dd1725375e514710c245305069d61586ac2961..8c3c3e5b812a85d66c1b6b04797cdd2bf5ff6e64 100644 --- a/drivers/dma/stm32-mdma.c +++ b/drivers/dma/stm32-mdma.c @@ -1376,7 +1376,7 @@ static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid) chan = &dmadev->chan[id]; if (!chan) { - dev_err(chan2dev(chan), "MDMA channel not initialized\n"); + dev_dbg(mdma2dev(dmadev), "MDMA channel not initialized\n"); goto exit; } diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 9a558e30c461c4f5b9d26ecf4b8602e05a3a2e85..fb23993430d31490b66da870954187f0b80e05f7 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -636,7 +636,10 @@ static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc, sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node); dma_desc = sgreq->dma_desc; - dma_desc->bytes_transferred += sgreq->req_len; + /* if we dma for long enough the transfer count will wrap */ + dma_desc->bytes_transferred = + (dma_desc->bytes_transferred + sgreq->req_len) % + dma_desc->bytes_requested; /* Callback need to be call */ if (!dma_desc->cb_count) @@ -978,8 +981,12 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; } - if (flags & DMA_PREP_INTERRUPT) + if (flags & DMA_PREP_INTERRUPT) { csr |= TEGRA_APBDMA_CSR_IE_EOC; + } else { + WARN_ON_ONCE(1); + return NULL; + } apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; @@ -1121,8 +1128,12 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; } - if (flags & DMA_PREP_INTERRUPT) + if (flags & DMA_PREP_INTERRUPT) { csr |= TEGRA_APBDMA_CSR_IE_EOC; + } else { + WARN_ON_ONCE(1); + return NULL; + } apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c index b26256f23d67fbdf58206adf2a253fcf40c50091..09b6756366c30ec70c0976850abbfd9c703be0a8 100644 --- a/drivers/dma/tegra210-adma.c +++ b/drivers/dma/tegra210-adma.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include @@ -141,6 +140,7 @@ struct tegra_adma { struct dma_device dma_dev; struct device *dev; void __iomem *base_addr; + struct clk *ahub_clk; unsigned int nr_channels; unsigned long rx_requests_reserved; unsigned long tx_requests_reserved; @@ -637,8 +637,9 @@ static int tegra_adma_runtime_suspend(struct device *dev) struct tegra_adma *tdma = dev_get_drvdata(dev); tdma->global_cmd = tdma_read(tdma, ADMA_GLOBAL_CMD); + clk_disable_unprepare(tdma->ahub_clk); - return pm_clk_suspend(dev); + return 0; } static int tegra_adma_runtime_resume(struct device *dev) @@ -646,10 +647,11 @@ static int tegra_adma_runtime_resume(struct device *dev) struct tegra_adma *tdma = dev_get_drvdata(dev); int ret; - ret = pm_clk_resume(dev); - if (ret) + ret = clk_prepare_enable(tdma->ahub_clk); + if (ret) { + dev_err(dev, "ahub clk_enable failed: %d\n", ret); return ret; - + } tdma_write(tdma, ADMA_GLOBAL_CMD, tdma->global_cmd); return 0; @@ -692,13 +694,11 @@ static int tegra_adma_probe(struct platform_device *pdev) if (IS_ERR(tdma->base_addr)) return PTR_ERR(tdma->base_addr); - ret = pm_clk_create(&pdev->dev); - if (ret) - return ret; - - ret = of_pm_clk_add_clk(&pdev->dev, "d_audio"); - if (ret) - goto clk_destroy; + tdma->ahub_clk = devm_clk_get(&pdev->dev, "d_audio"); + if (IS_ERR(tdma->ahub_clk)) { + dev_err(&pdev->dev, "Error: Missing ahub controller clock\n"); + return PTR_ERR(tdma->ahub_clk); + } pm_runtime_enable(&pdev->dev); @@ -775,8 +775,6 @@ static int tegra_adma_probe(struct platform_device *pdev) pm_runtime_put_sync(&pdev->dev); rpm_disable: pm_runtime_disable(&pdev->dev); -clk_destroy: - pm_clk_destroy(&pdev->dev); return ret; } @@ -786,6 +784,7 @@ static int tegra_adma_remove(struct platform_device *pdev) struct tegra_adma *tdma = platform_get_drvdata(pdev); int i; + of_dma_controller_free(pdev->dev.of_node); dma_async_device_unregister(&tdma->dma_dev); for (i = 0; i < tdma->nr_channels; ++i) @@ -793,7 +792,6 @@ static int tegra_adma_remove(struct platform_device *pdev) pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); - pm_clk_destroy(&pdev->dev); return 0; } diff --git a/drivers/dma/ti/cppi41.c b/drivers/dma/ti/cppi41.c index 1497da3677109c78949cb91adf0907a30eb9b32f..f8fa99402f12bc13220bbfe834ebe096ea185d1a 100644 --- a/drivers/dma/ti/cppi41.c +++ b/drivers/dma/ti/cppi41.c @@ -585,9 +585,22 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg( enum dma_transfer_direction dir, unsigned long tx_flags, void *context) { struct cppi41_channel *c = to_cpp41_chan(chan); + struct dma_async_tx_descriptor *txd = NULL; + struct cppi41_dd *cdd = c->cdd; struct cppi41_desc *d; struct scatterlist *sg; unsigned int i; + int error; + + error = pm_runtime_get(cdd->ddev.dev); + if (error < 0) { + pm_runtime_put_noidle(cdd->ddev.dev); + + return NULL; + } + + if (cdd->is_suspended) + goto err_out_not_ready; d = c->desc; for_each_sg(sgl, sg, sg_len, i) { @@ -610,7 +623,13 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg( d++; } - return &c->txd; + txd = &c->txd; + +err_out_not_ready: + pm_runtime_mark_last_busy(cdd->ddev.dev); + pm_runtime_put_autosuspend(cdd->ddev.dev); + + return txd; } static void cppi41_compute_td_desc(struct cppi41_desc *d) @@ -723,8 +742,22 @@ static int cppi41_stop_chan(struct dma_chan *chan) desc_phys = lower_32_bits(c->desc_phys); desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc); - if (!cdd->chan_busy[desc_num]) + if (!cdd->chan_busy[desc_num]) { + struct cppi41_channel *cc, *_ct; + + /* + * channels might still be in the pendling list if + * cppi41_dma_issue_pending() is called after + * cppi41_runtime_suspend() is called + */ + list_for_each_entry_safe(cc, _ct, &cdd->pending, node) { + if (cc != c) + continue; + list_del(&cc->node); + break; + } return 0; + } ret = cppi41_tear_down_chan(c); if (ret) diff --git a/drivers/dma/ti/dma-crossbar.c b/drivers/dma/ti/dma-crossbar.c index 9272b173c74655203ec4c1997988dccaa9ba02d3..6574cb5a12fee8dd30d2ec4a20b10c9bb7065146 100644 --- a/drivers/dma/ti/dma-crossbar.c +++ b/drivers/dma/ti/dma-crossbar.c @@ -395,8 +395,10 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev) ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events, nelm * 2); - if (ret) + if (ret) { + kfree(rsv_events); return ret; + } for (i = 0; i < nelm; i++) { ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1], diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c index ceabdea40ae0fd20aca3e54fb30f2d1e7f562a28..2824f8cf414b0695d9b93b4e18f17ccc0993accc 100644 --- a/drivers/dma/ti/edma.c +++ b/drivers/dma/ti/edma.c @@ -2218,13 +2218,6 @@ static int edma_probe(struct platform_device *pdev) if (!info) return -ENODEV; - pm_runtime_enable(dev); - ret = pm_runtime_get_sync(dev); - if (ret < 0) { - dev_err(dev, "pm_runtime_get_sync() failed\n"); - return ret; - } - ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); if (ret) return ret; @@ -2255,27 +2248,32 @@ static int edma_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ecc); + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "pm_runtime_get_sync() failed\n"); + pm_runtime_disable(dev); + return ret; + } + /* Get eDMA3 configuration from IP */ ret = edma_setup_from_hw(dev, info, ecc); if (ret) - return ret; + goto err_disable_pm; /* Allocate memory based on the information we got from the IP */ ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels, sizeof(*ecc->slave_chans), GFP_KERNEL); - if (!ecc->slave_chans) - return -ENOMEM; ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots), sizeof(unsigned long), GFP_KERNEL); - if (!ecc->slot_inuse) - return -ENOMEM; + if (!ecc->slave_chans || !ecc->slot_inuse) { + ret = -ENOMEM; + goto err_disable_pm; + } ecc->default_queue = info->default_queue; - for (i = 0; i < ecc->num_slots; i++) - edma_write_slot(ecc, i, &dummy_paramset); - if (info->rsv) { /* Set the reserved slots in inuse list */ rsv_slots = info->rsv->rsv_slots; @@ -2288,6 +2286,12 @@ static int edma_probe(struct platform_device *pdev) } } + for (i = 0; i < ecc->num_slots; i++) { + /* Reset only unused - not reserved - paRAM slots */ + if (!test_bit(i, ecc->slot_inuse)) + edma_write_slot(ecc, i, &dummy_paramset); + } + /* Clear the xbar mapped channels in unused list */ xbar_chans = info->xbar_chans; if (xbar_chans) { @@ -2303,11 +2307,16 @@ static int edma_probe(struct platform_device *pdev) if (irq >= 0) { irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint", dev_name(dev)); + if (!irq_name) { + ret = -ENOMEM; + goto err_disable_pm; + } + ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name, ecc); if (ret) { dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret); - return ret; + goto err_disable_pm; } ecc->ccint = irq; } @@ -2319,11 +2328,16 @@ static int edma_probe(struct platform_device *pdev) if (irq >= 0) { irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint", dev_name(dev)); + if (!irq_name) { + ret = -ENOMEM; + goto err_disable_pm; + } + ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name, ecc); if (ret) { dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret); - return ret; + goto err_disable_pm; } ecc->ccerrint = irq; } @@ -2331,7 +2345,8 @@ static int edma_probe(struct platform_device *pdev) ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY); if (ecc->dummy_slot < 0) { dev_err(dev, "Can't allocate PaRAM dummy slot\n"); - return ecc->dummy_slot; + ret = ecc->dummy_slot; + goto err_disable_pm; } queue_priority_mapping = info->queue_priority_mapping; @@ -2342,8 +2357,10 @@ static int edma_probe(struct platform_device *pdev) ecc->tc_list = devm_kcalloc(dev, ecc->num_tc, sizeof(*ecc->tc_list), GFP_KERNEL); - if (!ecc->tc_list) - return -ENOMEM; + if (!ecc->tc_list) { + ret = -ENOMEM; + goto err_reg1; + } for (i = 0;; i++) { ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs", @@ -2413,6 +2430,9 @@ static int edma_probe(struct platform_device *pdev) err_reg1: edma_free_slot(ecc, ecc->dummy_slot); +err_disable_pm: + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); return ret; } @@ -2443,6 +2463,8 @@ static int edma_remove(struct platform_device *pdev) if (ecc->dma_memcpy) dma_async_device_unregister(ecc->dma_memcpy); edma_free_slot(ecc, ecc->dummy_slot); + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); return 0; } diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c index a4a931ddf6f695fa21a25a359a94ee0c57f92beb..c192bdc30aae1170ca6d3a3df6e3dd6a0519c659 100644 --- a/drivers/dma/ti/omap-dma.c +++ b/drivers/dma/ti/omap-dma.c @@ -1237,7 +1237,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved( if (src_icg) { d->ccr |= CCR_SRC_AMODE_DBLIDX; d->ei = 1; - d->fi = src_icg; + d->fi = src_icg + 1; } else if (xt->src_inc) { d->ccr |= CCR_SRC_AMODE_POSTINC; d->fi = 0; @@ -1252,7 +1252,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved( if (dst_icg) { d->ccr |= CCR_DST_AMODE_DBLIDX; sg->ei = 1; - sg->fi = dst_icg; + sg->fi = dst_icg + 1; } else if (xt->dst_inc) { d->ccr |= CCR_DST_AMODE_POSTINC; sg->fi = 0; @@ -1543,8 +1543,10 @@ static int omap_dma_probe(struct platform_device *pdev) rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq, IRQF_SHARED, "omap-dma-engine", od); - if (rc) + if (rc) { + omap_dma_free(od); return rc; + } } if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123) diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index 395c698edb4d7f1d762497f96c0d9f55c2a29130..fc0f9c8766a87c35c17b7e42e95d1ca2db7fd20a 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c @@ -545,7 +545,7 @@ static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan, } dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys, - td_desc->desc_list_len, DMA_MEM_TO_DEV); + td_desc->desc_list_len, DMA_TO_DEVICE); return &td_desc->txd; } diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index c1244231259518d08dda4427c977fac160aee76b..d56b6b0e22a847e211e8203126bd4a67b997d73c 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -72,6 +72,9 @@ #define XILINX_DMA_DMACR_CIRC_EN BIT(1) #define XILINX_DMA_DMACR_RUNSTOP BIT(0) #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5) +#define XILINX_DMA_DMACR_DELAY_MASK GENMASK(31, 24) +#define XILINX_DMA_DMACR_FRAME_COUNT_MASK GENMASK(23, 16) +#define XILINX_DMA_DMACR_MASTER_MASK GENMASK(11, 8) #define XILINX_DMA_REG_DMASR 0x0004 #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15) @@ -1424,6 +1427,7 @@ static int xilinx_dma_reset(struct xilinx_dma_chan *chan) chan->err = false; chan->idle = true; + chan->desc_pendingcount = 0; chan->desc_submitcount = 0; return err; @@ -2112,8 +2116,10 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan, chan->config.gen_lock = cfg->gen_lock; chan->config.master = cfg->master; + dmacr &= ~XILINX_DMA_DMACR_GENLOCK_EN; if (cfg->gen_lock && chan->genlock) { dmacr |= XILINX_DMA_DMACR_GENLOCK_EN; + dmacr &= ~XILINX_DMA_DMACR_MASTER_MASK; dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT; } @@ -2129,11 +2135,13 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan, chan->config.delay = cfg->delay; if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) { + dmacr &= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK; dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT; chan->config.coalesc = cfg->coalesc; } if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) { + dmacr &= ~XILINX_DMA_DMACR_DELAY_MASK; dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT; chan->config.delay = cfg->delay; } diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index c74a88b650396e34cb713e069ffb4640060d94f8..73de6a6179fcd106f7fbaff8de4cede47304f665 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -163,7 +163,7 @@ struct zynqmp_dma_desc_ll { u32 ctrl; u64 nxtdscraddr; u64 rsvd; -}; __aligned(64) +}; /** * struct zynqmp_dma_desc_sw - Per Transaction structure diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 57304b2e989f2ca6ba45fe808cb933240273e7a0..1886c8e496048f1c421a52d898da19aa53f8afdc 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -231,15 +231,28 @@ config EDAC_SBRIDGE config EDAC_SKX tristate "Intel Skylake server Integrated MC" - depends on PCI && X86_64 && X86_MCE_INTEL && PCI_MMCONFIG + depends on PCI && X86_64 && X86_MCE_INTEL && PCI_MMCONFIG && ACPI depends on ACPI_NFIT || !ACPI_NFIT # if ACPI_NFIT=m, EDAC_SKX can't be y select DMI + select ACPI_ADXL help Support for error detection and correction the Intel Skylake server Integrated Memory Controllers. If your system has non-volatile DIMMs you should also manually select CONFIG_ACPI_NFIT. +config EDAC_I10NM + tristate "Intel 10nm server Integrated MC" + depends on PCI && X86_64 && X86_MCE_INTEL && PCI_MMCONFIG + depends on ACPI_NFIT || !ACPI_NFIT # if ACPI_NFIT=m, EDAC_I10NM can't be y + select DMI + select ACPI_ADXL if ACPI + help + Support for error detection and correction the Intel + 10nm server Integrated Memory Controllers. If your + system has non-volatile DIMMs you should also manually + select CONFIG_ACPI_NFIT. + config EDAC_PND2 tristate "Intel Pondicherry2" depends on PCI && X86_64 && X86_MCE_INTEL @@ -250,8 +263,8 @@ config EDAC_PND2 micro-server but may appear on others in the future. config EDAC_MPC85XX - tristate "Freescale MPC83xx / MPC85xx" - depends on FSL_SOC + bool "Freescale MPC83xx / MPC85xx" + depends on FSL_SOC && EDAC=y help Support for error detection and correction on the Freescale MPC8349, MPC8560, MPC8540, MPC8548, T4240 diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index 02b43a7d8c3ee3072863c8560d08eb871962feb7..3cd809d92c747569a3d4ac63de7062cb9569df4a 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile @@ -30,7 +30,6 @@ obj-$(CONFIG_EDAC_I5400) += i5400_edac.o obj-$(CONFIG_EDAC_I7300) += i7300_edac.o obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o obj-$(CONFIG_EDAC_SBRIDGE) += sb_edac.o -obj-$(CONFIG_EDAC_SKX) += skx_edac.o obj-$(CONFIG_EDAC_PND2) += pnd2_edac.o obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o obj-$(CONFIG_EDAC_E752X) += e752x_edac.o @@ -58,6 +57,12 @@ obj-$(CONFIG_EDAC_MPC85XX) += mpc85xx_edac_mod.o layerscape_edac_mod-y := fsl_ddr_edac.o layerscape_edac.o obj-$(CONFIG_EDAC_LAYERSCAPE) += layerscape_edac_mod.o +skx_edac-y := skx_common.o skx_base.o +obj-$(CONFIG_EDAC_SKX) += skx_edac.o + +i10nm_edac-y := skx_common.o i10nm_base.o +obj-$(CONFIG_EDAC_I10NM) += i10nm_edac.o + obj-$(CONFIG_EDAC_MV64X60) += mv64x60_edac.o obj-$(CONFIG_EDAC_CELL) += cell_edac.o obj-$(CONFIG_EDAC_PPC4XX) += ppc4xx_edac.o diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c index 5762c3c383f2ee05d67641bc1e2bd18f60c07f92..56de378ad13dce008e92131df715b22c8b580100 100644 --- a/drivers/edac/altera_edac.c +++ b/drivers/edac/altera_edac.c @@ -1956,6 +1956,7 @@ static void altr_edac_a10_irq_handler(struct irq_desc *desc) struct altr_arria10_edac *edac = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); int irq = irq_desc_get_irq(desc); + unsigned long bits; dberr = (irq == edac->db_irq) ? 1 : 0; sm_offset = dberr ? A10_SYSMGR_ECC_INTSTAT_DERR_OFST : @@ -1965,7 +1966,8 @@ static void altr_edac_a10_irq_handler(struct irq_desc *desc) regmap_read(edac->ecc_mgr_map, sm_offset, &irq_status); - for_each_set_bit(bit, (unsigned long *)&irq_status, 32) { + bits = irq_status; + for_each_set_bit(bit, &bits, 32) { irq = irq_linear_revmap(edac->domain, dberr * 32 + bit); if (irq) generic_handle_irq(irq); diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 18aeabb1d5ee4afdb14051d42adad6121d83cc43..8d9f4100628733f7902d6771a56299099d086739 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -15,6 +15,23 @@ module_param(ecc_enable_override, int, 0644); static struct msr __percpu *msrs; +static struct amd64_family_type *fam_type; + +static inline u32 get_umc_reg(u32 reg) +{ + if (!fam_type->flags.zn_regs_v2) + return reg; + + switch (reg) { + case UMCCH_ADDR_CFG: return UMCCH_ADDR_CFG_DDR5; + case UMCCH_ADDR_MASK_SEC: return UMCCH_ADDR_MASK_SEC_DDR5; + case UMCCH_DIMM_CFG: return UMCCH_DIMM_CFG_DDR5; + } + + WARN_ONCE(1, "%s: unknown register 0x%x", __func__, reg); + return 0; +} + /* Per-node stuff */ static struct ecc_settings **ecc_stngs; @@ -80,6 +97,17 @@ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset, return err; } +static u32 get_umc_base_f18h_m4h(u16 node, u8 channel) +{ + struct pci_dev *f3 = node_to_amd_nb(node)->misc; + u8 df_id; + + get_df_id(f3, &df_id); + df_id -= 4; + + return get_umc_base(channel) + (0x80000000 + (0x10000000 * df_id)); +} + /* * Select DCT to which PCI cfg accesses are routed */ @@ -211,7 +239,7 @@ static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate) scrubval = scrubrates[i].scrubval; - if (pvt->fam == 0x17) { + if (pvt->umc) { __f17h_set_scrubval(pvt, scrubval); } else if (pvt->fam == 0x15 && pvt->model == 0x60) { f15h_select_dct(pvt, 0); @@ -253,17 +281,7 @@ static int get_scrub_rate(struct mem_ctl_info *mci) int i, retval = -EINVAL; u32 scrubval = 0; - switch (pvt->fam) { - case 0x15: - /* Erratum #505 */ - if (pvt->model < 0x10) - f15h_select_dct(pvt, 0); - - if (pvt->model == 0x60) - amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval); - break; - - case 0x17: + if (pvt->umc) { amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval); if (scrubval & BIT(0)) { amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval); @@ -272,11 +290,15 @@ static int get_scrub_rate(struct mem_ctl_info *mci) } else { scrubval = 0; } - break; + } else if (pvt->fam == 0x15) { + /* Erratum #505 */ + if (pvt->model < 0x10) + f15h_select_dct(pvt, 0); - default: + if (pvt->model == 0x60) + amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval); + } else { amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval); - break; } scrubval = scrubval & 0x001F; @@ -448,6 +470,9 @@ static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct, #define for_each_chip_select_mask(i, dct, pvt) \ for (i = 0; i < pvt->csels[dct].m_cnt; i++) +#define for_each_umc(i) \ + for (i = 0; i < fam_type->max_mcs; i++) + /* * @input_addr is an InputAddr associated with the node given by mci. Return the * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr). @@ -721,7 +746,7 @@ static unsigned long determine_edac_cap(struct amd64_pvt *pvt) if (pvt->umc) { u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0; - for (i = 0; i < NUM_UMCS; i++) { + for_each_umc(i) { if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT)) continue; @@ -780,24 +805,61 @@ static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan) (dclr & BIT(15)) ? "yes" : "no"); } +#define CS_EVEN_PRIMARY BIT(0) +#define CS_ODD_PRIMARY BIT(1) +#define CS_EVEN_SECONDARY BIT(2) +#define CS_ODD_SECONDARY BIT(3) +#define CS_3R_INTERLEAVE BIT(4) + +#define CS_EVEN (CS_EVEN_PRIMARY | CS_EVEN_SECONDARY) +#define CS_ODD (CS_ODD_PRIMARY | CS_ODD_SECONDARY) + +static int f17_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt) +{ + u8 base, count = 0; + int cs_mode = 0; + + if (csrow_enabled(2 * dimm, ctrl, pvt)) + cs_mode |= CS_EVEN_PRIMARY; + + if (csrow_enabled(2 * dimm + 1, ctrl, pvt)) + cs_mode |= CS_ODD_PRIMARY; + + /* Asymmetric dual-rank DIMM support. */ + if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt)) + cs_mode |= CS_ODD_SECONDARY; + + /* + * 3 Rank inteleaving support. + * There should be only three bases enabled and their two masks should + * be equal. + */ + for_each_chip_select(base, ctrl, pvt) + count += csrow_enabled(base, ctrl, pvt); + + if (count == 3 && + pvt->csels[ctrl].csmasks[0] == pvt->csels[ctrl].csmasks[1]) { + edac_dbg(1, "3R interleaving in use.\n"); + cs_mode |= CS_3R_INTERLEAVE; + } + + return cs_mode; +} + static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl) { - int dimm, size0, size1, cs0, cs1; + int dimm, size0, size1, cs0, cs1, cs_mode; edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl); - for (dimm = 0; dimm < 4; dimm++) { - size0 = 0; + for (dimm = 0; dimm < 2; dimm++) { cs0 = dimm * 2; - - if (csrow_enabled(cs0, ctrl, pvt)) - size0 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs0); - - size1 = 0; cs1 = dimm * 2 + 1; - if (csrow_enabled(cs1, ctrl, pvt)) - size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs1); + cs_mode = f17_get_cs_mode(dimm, ctrl, pvt); + + size0 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs0); + size1 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs1); amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n", cs0, size0, @@ -810,8 +872,11 @@ static void __dump_misc_regs_df(struct amd64_pvt *pvt) struct amd64_umc *umc; u32 i, tmp, umc_base; - for (i = 0; i < NUM_UMCS; i++) { - umc_base = get_umc_base(i); + for_each_umc(i) { + if (hygon_f18h_m4h()) + umc_base = get_umc_base_f18h_m4h(pvt->mc_node_id, i); + else + umc_base = get_umc_base(i); umc = &pvt->umc[i]; edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg); @@ -836,8 +901,10 @@ static void __dump_misc_regs_df(struct amd64_pvt *pvt) edac_dbg(1, "UMC%d x16 DIMMs present: %s\n", i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no"); - if (pvt->dram_type == MEM_LRDDR4) { - amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ADDR_CFG, &tmp); + if (umc->dram_type == MEM_LRDDR4 || umc->dram_type == MEM_LRDDR5) { + amd_smn_read(pvt->mc_node_id, + umc_base + get_umc_reg(UMCCH_ADDR_CFG), + &tmp); edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n", i, 1 << ((tmp >> 4) & 0x3)); } @@ -893,8 +960,7 @@ static void dump_misc_regs(struct amd64_pvt *pvt) edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no"); - amd64_info("using %s syndromes.\n", - ((pvt->ecc_sym_sz == 8) ? "x8" : "x4")); + amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz); } /* @@ -908,89 +974,163 @@ static void prep_chip_selects(struct amd64_pvt *pvt) } else if (pvt->fam == 0x15 && pvt->model == 0x30) { pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4; pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2; + } else if (pvt->fam >= 0x17) { + int umc; + + for_each_umc(umc) { + pvt->csels[umc].b_cnt = 4; + pvt->csels[umc].m_cnt = fam_type->flags.zn_regs_v2 ? 4 : 2; + } + } else { pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8; pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4; } } +static void read_umc_base_mask(struct amd64_pvt *pvt) +{ + u32 umc_base_reg, umc_base_reg_sec; + u32 umc_mask_reg, umc_mask_reg_sec; + u32 base_reg, base_reg_sec; + u32 mask_reg, mask_reg_sec; + u32 *base, *base_sec; + u32 *mask, *mask_sec; + u32 umc_base; + int cs, umc; + + for_each_umc(umc) { + if (hygon_f18h_m4h()) + umc_base = get_umc_base_f18h_m4h(pvt->mc_node_id, umc); + else + umc_base = get_umc_base(umc); + + umc_base_reg = umc_base + UMCCH_BASE_ADDR; + umc_base_reg_sec = umc_base + UMCCH_BASE_ADDR_SEC; + + for_each_chip_select(cs, umc, pvt) { + base = &pvt->csels[umc].csbases[cs]; + base_sec = &pvt->csels[umc].csbases_sec[cs]; + + base_reg = umc_base_reg + (cs * 4); + base_reg_sec = umc_base_reg_sec + (cs * 4); + + if (!amd_smn_read(pvt->mc_node_id, base_reg, base)) + edac_dbg(0, " DCSB%d[%d]=0x%08x reg: 0x%x\n", + umc, cs, *base, base_reg); + + if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, base_sec)) + edac_dbg(0, " DCSB_SEC%d[%d]=0x%08x reg: 0x%x\n", + umc, cs, *base_sec, base_reg_sec); + } + + umc_mask_reg = umc_base + UMCCH_ADDR_MASK; + umc_mask_reg_sec = umc_base + get_umc_reg(UMCCH_ADDR_MASK_SEC); + + for_each_chip_select_mask(cs, umc, pvt) { + mask = &pvt->csels[umc].csmasks[cs]; + mask_sec = &pvt->csels[umc].csmasks_sec[cs]; + + mask_reg = umc_mask_reg + (cs * 4); + mask_reg_sec = umc_mask_reg_sec + (cs * 4); + + if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask)) + edac_dbg(0, " DCSM%d[%d]=0x%08x reg: 0x%x\n", + umc, cs, *mask, mask_reg); + + if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, mask_sec)) + edac_dbg(0, " DCSM_SEC%d[%d]=0x%08x reg: 0x%x\n", + umc, cs, *mask_sec, mask_reg_sec); + } + } +} + /* * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers */ static void read_dct_base_mask(struct amd64_pvt *pvt) { - int base_reg0, base_reg1, mask_reg0, mask_reg1, cs; + int cs; prep_chip_selects(pvt); - if (pvt->umc) { - base_reg0 = get_umc_base(0) + UMCCH_BASE_ADDR; - base_reg1 = get_umc_base(1) + UMCCH_BASE_ADDR; - mask_reg0 = get_umc_base(0) + UMCCH_ADDR_MASK; - mask_reg1 = get_umc_base(1) + UMCCH_ADDR_MASK; - } else { - base_reg0 = DCSB0; - base_reg1 = DCSB1; - mask_reg0 = DCSM0; - mask_reg1 = DCSM1; - } + if (pvt->umc) + return read_umc_base_mask(pvt); for_each_chip_select(cs, 0, pvt) { - int reg0 = base_reg0 + (cs * 4); - int reg1 = base_reg1 + (cs * 4); + int reg0 = DCSB0 + (cs * 4); + int reg1 = DCSB1 + (cs * 4); u32 *base0 = &pvt->csels[0].csbases[cs]; u32 *base1 = &pvt->csels[1].csbases[cs]; - if (pvt->umc) { - if (!amd_smn_read(pvt->mc_node_id, reg0, base0)) - edac_dbg(0, " DCSB0[%d]=0x%08x reg: 0x%x\n", - cs, *base0, reg0); + if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0)) + edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n", + cs, *base0, reg0); - if (!amd_smn_read(pvt->mc_node_id, reg1, base1)) - edac_dbg(0, " DCSB1[%d]=0x%08x reg: 0x%x\n", - cs, *base1, reg1); - } else { - if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0)) - edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n", - cs, *base0, reg0); - - if (pvt->fam == 0xf) - continue; + if (pvt->fam == 0xf) + continue; - if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1)) - edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n", - cs, *base1, (pvt->fam == 0x10) ? reg1 - : reg0); - } + if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1)) + edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n", + cs, *base1, (pvt->fam == 0x10) ? reg1 + : reg0); } for_each_chip_select_mask(cs, 0, pvt) { - int reg0 = mask_reg0 + (cs * 4); - int reg1 = mask_reg1 + (cs * 4); + int reg0 = DCSM0 + (cs * 4); + int reg1 = DCSM1 + (cs * 4); u32 *mask0 = &pvt->csels[0].csmasks[cs]; u32 *mask1 = &pvt->csels[1].csmasks[cs]; - if (pvt->umc) { - if (!amd_smn_read(pvt->mc_node_id, reg0, mask0)) - edac_dbg(0, " DCSM0[%d]=0x%08x reg: 0x%x\n", - cs, *mask0, reg0); + if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0)) + edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n", + cs, *mask0, reg0); - if (!amd_smn_read(pvt->mc_node_id, reg1, mask1)) - edac_dbg(0, " DCSM1[%d]=0x%08x reg: 0x%x\n", - cs, *mask1, reg1); - } else { - if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0)) - edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n", - cs, *mask0, reg0); + if (pvt->fam == 0xf) + continue; - if (pvt->fam == 0xf) - continue; + if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1)) + edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n", + cs, *mask1, (pvt->fam == 0x10) ? reg1 + : reg0); + } +} + +static void determine_memory_type_df(struct amd64_pvt *pvt) +{ + struct amd64_umc *umc; + u32 i; + + for_each_umc(i) { + umc = &pvt->umc[i]; - if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1)) - edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n", - cs, *mask1, (pvt->fam == 0x10) ? reg1 - : reg0); + if (!(umc->sdp_ctrl & UMC_SDP_INIT)) { + umc->dram_type = MEM_EMPTY; + continue; } + + /* + * Check if the system supports the "DDR Type" field in UMC Config + * and has DDR5 DIMMs in use. + */ + if ((fam_type->flags.zn_regs_v2 || hygon_f18h_m4h()) && + ((umc->umc_cfg & GENMASK(2, 0)) == 0x1)) { + if (umc->dimm_cfg & BIT(5)) + umc->dram_type = MEM_LRDDR5; + else if (umc->dimm_cfg & BIT(4)) + umc->dram_type = MEM_RDDR5; + else + umc->dram_type = MEM_DDR5; + } else { + if (umc->dimm_cfg & BIT(5)) + umc->dram_type = MEM_LRDDR4; + else if (umc->dimm_cfg & BIT(4)) + umc->dram_type = MEM_RDDR4; + else + umc->dram_type = MEM_DDR4; + } + + edac_dbg(1, " UMC%d DIMM type: %s\n", i, edac_mem_types[umc->dram_type]); } } @@ -998,6 +1138,9 @@ static void determine_memory_type(struct amd64_pvt *pvt) { u32 dram_ctrl, dcsm; + if (pvt->umc) + return determine_memory_type_df(pvt); + switch (pvt->fam) { case 0xf: if (pvt->ext_model >= K8_REV_F) @@ -1043,15 +1186,6 @@ static void determine_memory_type(struct amd64_pvt *pvt) case 0x16: goto ddr3; - case 0x17: - if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5)) - pvt->dram_type = MEM_LRDDR4; - else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4)) - pvt->dram_type = MEM_RDDR4; - else - pvt->dram_type = MEM_DDR4; - return; - default: WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam); pvt->dram_type = MEM_EMPTY; @@ -1386,7 +1520,7 @@ static int f17_early_channel_count(struct amd64_pvt *pvt) int i, channels = 0; /* SDP Control bit 31 (SdpInit) is clear for unused UMC channels */ - for (i = 0; i < NUM_UMCS; i++) + for_each_umc(i) channels += !!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT); amd64_info("MCT channel count: %d\n", channels); @@ -1521,18 +1655,79 @@ static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, return ddr3_cs_size(cs_mode, false); } -static int f17_base_addr_to_cs_size(struct amd64_pvt *pvt, u8 umc, +static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc, unsigned int cs_mode, int csrow_nr) { - u32 base_addr = pvt->csels[umc].csbases[csrow_nr]; + u32 addr_mask_orig, addr_mask_deinterleaved; + u32 msb, weight, num_zero_bits; + int cs_mask_nr = csrow_nr; + int dimm, size = 0; - /* Each mask is used for every two base addresses. */ - u32 addr_mask = pvt->csels[umc].csmasks[csrow_nr >> 1]; + /* No Chip Selects are enabled. */ + if (!cs_mode) + return size; - /* Register [31:1] = Address [39:9]. Size is in kBs here. */ - u32 size = ((addr_mask >> 1) - (base_addr >> 1) + 1) >> 1; + /* Requested size of an even CS but none are enabled. */ + if (!(cs_mode & CS_EVEN) && !(csrow_nr & 1)) + return size; - edac_dbg(1, "BaseAddr: 0x%x, AddrMask: 0x%x\n", base_addr, addr_mask); + /* Requested size of an odd CS but none are enabled. */ + if (!(cs_mode & CS_ODD) && (csrow_nr & 1)) + return size; + + /* + * Family 17h introduced systems with one mask per DIMM, + * and two Chip Selects per DIMM. + * + * CS0 and CS1 -> MASK0 / DIMM0 + * CS2 and CS3 -> MASK1 / DIMM1 + * + * Family 19h Model 10h introduced systems with one mask per Chip Select, + * and two Chip Selects per DIMM. + * + * CS0 -> MASK0 -> DIMM0 + * CS1 -> MASK1 -> DIMM0 + * CS2 -> MASK2 -> DIMM1 + * CS3 -> MASK3 -> DIMM1 + * + * Keep the mask number equal to the Chip Select number for newer systems, + * and shift the mask number for older systems. + */ + dimm = csrow_nr >> 1; + + if (!fam_type->flags.zn_regs_v2) + cs_mask_nr >>= 1; + + /* Asymmetric dual-rank DIMM support. */ + if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY)) + addr_mask_orig = pvt->csels[umc].csmasks_sec[cs_mask_nr]; + else + addr_mask_orig = pvt->csels[umc].csmasks[cs_mask_nr]; + + /* + * The number of zero bits in the mask is equal to the number of bits + * in a full mask minus the number of bits in the current mask. + * + * The MSB is the number of bits in the full mask because BIT[0] is + * always 0. + * + * In the special 3 Rank interleaving case, a single bit is flipped + * without swapping with the most significant bit. This can be handled + * by keeping the MSB where it is and ignoring the single zero bit. + */ + msb = fls(addr_mask_orig) - 1; + weight = hweight_long(addr_mask_orig); + num_zero_bits = msb - weight - !!(cs_mode & CS_3R_INTERLEAVE); + + /* Take the number of zero bits off from the top of the mask. */ + addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1); + + edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm); + edac_dbg(1, " Original AddrMask: 0x%x\n", addr_mask_orig); + edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved); + + /* Register [31:1] = Address [39:9]. Size is in kBs here. */ + size = (addr_mask_deinterleaved >> 2) + 1; /* Return size in MBs. */ return size >> 10; @@ -2125,6 +2320,7 @@ static struct amd64_family_type family_types[] = { .ctl_name = "K8", .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP, .f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL, + .max_mcs = 2, .ops = { .early_channel_count = k8_early_channel_count, .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow, @@ -2135,6 +2331,7 @@ static struct amd64_family_type family_types[] = { .ctl_name = "F10h", .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP, .f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM, + .max_mcs = 2, .ops = { .early_channel_count = f1x_early_channel_count, .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, @@ -2145,6 +2342,7 @@ static struct amd64_family_type family_types[] = { .ctl_name = "F15h", .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1, .f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2, + .max_mcs = 2, .ops = { .early_channel_count = f1x_early_channel_count, .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, @@ -2155,6 +2353,7 @@ static struct amd64_family_type family_types[] = { .ctl_name = "F15h_M30h", .f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1, .f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2, + .max_mcs = 2, .ops = { .early_channel_count = f1x_early_channel_count, .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, @@ -2165,6 +2364,7 @@ static struct amd64_family_type family_types[] = { .ctl_name = "F15h_M60h", .f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1, .f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2, + .max_mcs = 2, .ops = { .early_channel_count = f1x_early_channel_count, .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, @@ -2175,6 +2375,7 @@ static struct amd64_family_type family_types[] = { .ctl_name = "F16h", .f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1, .f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2, + .max_mcs = 2, .ops = { .early_channel_count = f1x_early_channel_count, .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, @@ -2185,6 +2386,7 @@ static struct amd64_family_type family_types[] = { .ctl_name = "F16h_M30h", .f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1, .f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2, + .max_mcs = 2, .ops = { .early_channel_count = f1x_early_channel_count, .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, @@ -2195,9 +2397,81 @@ static struct amd64_family_type family_types[] = { .ctl_name = "F17h", .f0_id = PCI_DEVICE_ID_AMD_17H_DF_F0, .f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6, + .max_mcs = 2, + .ops = { + .early_channel_count = f17_early_channel_count, + .dbam_to_cs = f17_addr_mask_to_cs_size, + } + }, + [F17_M10H_CPUS] = { + .ctl_name = "F17h_M10h", + .f0_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F0, + .f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6, + .max_mcs = 2, + .ops = { + .early_channel_count = f17_early_channel_count, + .dbam_to_cs = f17_addr_mask_to_cs_size, + } + }, + [F17_M30H_CPUS] = { + .ctl_name = "F17h_M30h", + .f0_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F0, + .f6_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F6, + .max_mcs = 8, + .ops = { + .early_channel_count = f17_early_channel_count, + .dbam_to_cs = f17_addr_mask_to_cs_size, + } + }, + [F17_M70H_CPUS] = { + .ctl_name = "F17h_M70h", + .f0_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F0, + .f6_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F6, + .max_mcs = 2, + .ops = { + .early_channel_count = f17_early_channel_count, + .dbam_to_cs = f17_addr_mask_to_cs_size, + } + }, + [F18_M06H_CPUS] = { + .ctl_name = "F18h_M06h", + .f0_id = PCI_DEVICE_ID_HYGON_18H_M06H_DF_F0, + .f6_id = PCI_DEVICE_ID_HYGON_18H_M06H_DF_F6, + .max_mcs = 2, + .ops = { + .early_channel_count = f17_early_channel_count, + .dbam_to_cs = f17_addr_mask_to_cs_size, + } + }, + [F18_M10H_CPUS] = { + .ctl_name = "F18h_M10h", + .f0_id = PCI_DEVICE_ID_HYGON_18H_M10H_DF_F0, + .f6_id = PCI_DEVICE_ID_HYGON_18H_M10H_DF_F6, + .max_mcs = 2, + .ops = { + .early_channel_count = f17_early_channel_count, + .dbam_to_cs = f17_addr_mask_to_cs_size, + } + }, + [F19_CPUS] = { + .ctl_name = "F19h", + .f0_id = PCI_DEVICE_ID_AMD_19H_DF_F0, + .f6_id = PCI_DEVICE_ID_AMD_19H_DF_F6, + .max_mcs = 8, .ops = { .early_channel_count = f17_early_channel_count, - .dbam_to_cs = f17_base_addr_to_cs_size, + .dbam_to_cs = f17_addr_mask_to_cs_size, + } + }, + [F19_M10H_CPUS] = { + .ctl_name = "F19h_M10h", + .f0_id = PCI_DEVICE_ID_AMD_19H_M10H_DF_F0, + .f6_id = PCI_DEVICE_ID_AMD_19H_M10H_DF_F6, + .max_mcs = 12, + .flags.zn_regs_v2 = 1, + .ops = { + .early_channel_count = f17_early_channel_count, + .dbam_to_cs = f17_addr_mask_to_cs_size, } }, }; @@ -2453,18 +2727,17 @@ static inline void decode_bus_error(int node_id, struct mce *m) * To find the UMC channel represented by this bank we need to match on its * instance_id. The instance_id of a bank is held in the lower 32 bits of its * IPID. + * + * Currently, we can derive the channel number by looking at the 6th nibble in + * the instance_id. For example, instance_id=0xYXXXXX where Y is the channel + * number. */ -static int find_umc_channel(struct amd64_pvt *pvt, struct mce *m) +static int find_umc_channel(struct mce *m) { - u32 umc_instance_id[] = {0x50f00, 0x150f00}; - u32 instance_id = m->ipid & GENMASK(31, 0); - int i, channel = -1; - - for (i = 0; i < ARRAY_SIZE(umc_instance_id); i++) - if (umc_instance_id[i] == instance_id) - channel = i; - - return channel; + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) + return (m->ipid & GENMASK(23, 0)) >> 20; + return (m->ipid & GENMASK(31, 0)) >> 20; } static void decode_umc_error(int node_id, struct mce *m) @@ -2474,6 +2747,7 @@ static void decode_umc_error(int node_id, struct mce *m) struct amd64_pvt *pvt; struct err_info err; u64 sys_addr; + u8 umc; mci = edac_mc_find(node_id); if (!mci) @@ -2486,18 +2760,7 @@ static void decode_umc_error(int node_id, struct mce *m) if (m->status & MCI_STATUS_DEFERRED) ecc_type = 3; - err.channel = find_umc_channel(pvt, m); - if (err.channel < 0) { - err.err_code = ERR_CHANNEL; - goto log_error; - } - - if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) { - err.err_code = ERR_NORM_ADDR; - goto log_error; - } - - error_address_to_page_and_offset(sys_addr, &err); + err.channel = find_umc_channel(m); if (!(m->status & MCI_STATUS_SYNDV)) { err.err_code = ERR_SYND; @@ -2515,6 +2778,18 @@ static void decode_umc_error(int node_id, struct mce *m) err.csrow = m->synd & 0x7; + if (hygon_f18h_m4h() && boot_cpu_data.x86_model == 0x6) + umc = err.channel << 1; + else + umc = err.channel; + + if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, umc, &sys_addr)) { + err.err_code = ERR_NORM_ADDR; + goto log_error; + } + + error_address_to_page_and_offset(sys_addr, &err); + log_error: __log_ecc_error(mci, &err, ecc_type); } @@ -2585,6 +2860,14 @@ static void free_mc_sibling_devs(struct amd64_pvt *pvt) } } +static void determine_ecc_sym_sz_f18h_m4h(struct amd64_pvt *pvt, int channel) +{ + if (pvt->umc[channel].ecc_ctrl & BIT(8)) + pvt->ecc_sym_sz = 16; + else if (pvt->umc[channel].ecc_ctrl & BIT(7)) + pvt->ecc_sym_sz = 8; +} + static void determine_ecc_sym_sz(struct amd64_pvt *pvt) { pvt->ecc_sym_sz = 4; @@ -2592,19 +2875,28 @@ static void determine_ecc_sym_sz(struct amd64_pvt *pvt) if (pvt->umc) { u8 i; - for (i = 0; i < NUM_UMCS; i++) { + for_each_umc(i) { /* Check enabled channels only: */ - if ((pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) && - (pvt->umc[i].ecc_ctrl & BIT(7))) { - pvt->ecc_sym_sz = 8; - break; + if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) { + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18 && + (boot_cpu_data.x86_model == 0x4 || + boot_cpu_data.x86_model == 0x5) && + (pvt->umc[i].umc_cfg & GENMASK(2, 0)) == 0x1) { + determine_ecc_sym_sz_f18h_m4h(pvt, i); + return; + } + + if (pvt->umc[i].ecc_ctrl & BIT(9)) { + pvt->ecc_sym_sz = 16; + return; + } else if (pvt->umc[i].ecc_ctrl & BIT(7)) { + pvt->ecc_sym_sz = 8; + return; + } } } - - return; - } - - if (pvt->fam >= 0x10) { + } else if (pvt->fam >= 0x10) { u32 tmp; amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp); @@ -2628,12 +2920,15 @@ static void __read_mc_regs_df(struct amd64_pvt *pvt) u32 i, umc_base; /* Read registers from each UMC */ - for (i = 0; i < NUM_UMCS; i++) { + for_each_umc(i) { + if (hygon_f18h_m4h()) + umc_base = get_umc_base_f18h_m4h(pvt->mc_node_id, i); + else + umc_base = get_umc_base(i); - umc_base = get_umc_base(i); umc = &pvt->umc[i]; - amd_smn_read(nid, umc_base + UMCCH_DIMM_CFG, &umc->dimm_cfg); + amd_smn_read(nid, umc_base + get_umc_reg(UMCCH_DIMM_CFG), &umc->dimm_cfg); amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg); amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl); amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl); @@ -2717,7 +3012,9 @@ static void read_mc_regs(struct amd64_pvt *pvt) read_dct_base_mask(pvt); determine_memory_type(pvt); - edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]); + + if (!pvt->umc) + edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]); determine_ecc_sym_sz(pvt); @@ -2764,10 +3061,12 @@ static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig) int csrow_nr = csrow_nr_orig; u32 cs_mode, nr_pages; - if (!pvt->umc) + if (!pvt->umc) { csrow_nr >>= 1; - - cs_mode = DBAM_DIMM(csrow_nr, dbam); + cs_mode = DBAM_DIMM(csrow_nr, dbam); + } else { + cs_mode = f17_get_cs_mode(csrow_nr >> 1, dct, pvt); + } nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr); nr_pages <<= 20 - PAGE_SHIFT; @@ -2779,6 +3078,50 @@ static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig) return nr_pages; } +static int init_csrows_df(struct mem_ctl_info *mci) +{ + struct amd64_pvt *pvt = mci->pvt_info; + enum edac_type edac_mode = EDAC_NONE; + enum dev_type dev_type = DEV_UNKNOWN; + struct dimm_info *dimm; + int empty = 1; + u8 umc, cs; + + if (mci->edac_ctl_cap & EDAC_FLAG_S16ECD16ED) { + edac_mode = EDAC_S16ECD16ED; + dev_type = DEV_X16; + } else if (mci->edac_ctl_cap & EDAC_FLAG_S8ECD8ED) { + edac_mode = EDAC_S8ECD8ED; + dev_type = DEV_X8; + } else if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED) { + edac_mode = EDAC_S4ECD4ED; + dev_type = DEV_X4; + } else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED) { + edac_mode = EDAC_SECDED; + } + + for_each_umc(umc) { + for_each_chip_select(cs, umc, pvt) { + if (!csrow_enabled(cs, umc, pvt)) + continue; + + empty = 0; + dimm = mci->csrows[cs]->channels[umc]->dimm; + + edac_dbg(1, "MC node: %d, csrow: %d\n", + pvt->mc_node_id, cs); + + dimm->nr_pages = get_csrow_nr_pages(pvt, umc, cs); + dimm->mtype = pvt->umc[umc].dram_type; + dimm->edac_mode = edac_mode; + dimm->dtype = dev_type; + dimm->grain = 64; + } + } + + return empty; +} + /* * Initialize the array of csrow attribute instances, based on the values * from pci config hardware registers. @@ -2793,15 +3136,16 @@ static int init_csrows(struct mem_ctl_info *mci) int nr_pages = 0; u32 val; - if (!pvt->umc) { - amd64_read_pci_cfg(pvt->F3, NBCFG, &val); + if (pvt->umc) + return init_csrows_df(mci); - pvt->nbcfg = val; + amd64_read_pci_cfg(pvt->F3, NBCFG, &val); - edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n", - pvt->mc_node_id, val, - !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE)); - } + pvt->nbcfg = val; + + edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n", + pvt->mc_node_id, val, + !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE)); /* * We iterate over DCT0 here but we look at DCT1 in parallel, if needed. @@ -2838,13 +3182,7 @@ static int init_csrows(struct mem_ctl_info *mci) edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages); /* Determine DIMM ECC mode: */ - if (pvt->umc) { - if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED) - edac_mode = EDAC_S4ECD4ED; - else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED) - edac_mode = EDAC_SECDED; - - } else if (pvt->nbcfg & NBCFG_ECC_ENABLE) { + if (pvt->nbcfg & NBCFG_ECC_ENABLE) { edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL) ? EDAC_S4ECD4ED : EDAC_SECDED; @@ -2854,6 +3192,7 @@ static int init_csrows(struct mem_ctl_info *mci) dimm = csrow->channels[j]->dimm; dimm->mtype = pvt->dram_type; dimm->edac_mode = edac_mode; + dimm->grain = 64; } } @@ -3041,7 +3380,7 @@ static bool ecc_enabled(struct pci_dev *F3, u16 nid) if (boot_cpu_data.x86 >= 0x17) { u8 umc_en_mask = 0, ecc_en_mask = 0; - for (i = 0; i < NUM_UMCS; i++) { + for_each_umc(i) { u32 base = get_umc_base(i); /* Only check enabled UMCs. */ @@ -3092,12 +3431,15 @@ static bool ecc_enabled(struct pci_dev *F3, u16 nid) static inline void f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt) { - u8 i, ecc_en = 1, cpk_en = 1; + u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1; - for (i = 0; i < NUM_UMCS; i++) { + for_each_umc(i) { if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) { ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED); cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP); + + dev_x4 &= !!(pvt->umc[i].dimm_cfg & BIT(6)); + dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7)); } } @@ -3105,13 +3447,19 @@ f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt) if (ecc_en) { mci->edac_ctl_cap |= EDAC_FLAG_SECDED; - if (cpk_en) + if (!cpk_en) + return; + + if (dev_x4) mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED; + else if (dev_x16) + mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED; + else + mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED; } } -static void setup_mci_misc_attrs(struct mem_ctl_info *mci, - struct amd64_family_type *fam) +static void setup_mci_misc_attrs(struct mem_ctl_info *mci) { struct amd64_pvt *pvt = mci->pvt_info; @@ -3130,7 +3478,7 @@ static void setup_mci_misc_attrs(struct mem_ctl_info *mci, mci->edac_cap = determine_edac_cap(pvt); mci->mod_name = EDAC_MOD_STR; - mci->ctl_name = fam->ctl_name; + mci->ctl_name = fam_type->ctl_name; mci->dev_name = pci_name(pvt->F3); mci->ctl_page_to_phys = NULL; @@ -3144,8 +3492,6 @@ static void setup_mci_misc_attrs(struct mem_ctl_info *mci, */ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt) { - struct amd64_family_type *fam_type = NULL; - pvt->ext_model = boot_cpu_data.x86_model >> 4; pvt->stepping = boot_cpu_data.x86_stepping; pvt->model = boot_cpu_data.x86_model; @@ -3188,10 +3534,76 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt) break; case 0x17: + if (pvt->model >= 0x10 && pvt->model <= 0x2f) { + fam_type = &family_types[F17_M10H_CPUS]; + pvt->ops = &family_types[F17_M10H_CPUS].ops; + break; + } else if (pvt->model >= 0x30 && pvt->model <= 0x3f) { + fam_type = &family_types[F17_M30H_CPUS]; + pvt->ops = &family_types[F17_M30H_CPUS].ops; + break; + } else if (pvt->model >= 0x70 && pvt->model <= 0x7f) { + fam_type = &family_types[F17_M70H_CPUS]; + pvt->ops = &family_types[F17_M70H_CPUS].ops; + break; + } fam_type = &family_types[F17_CPUS]; pvt->ops = &family_types[F17_CPUS].ops; break; + case 0x18: + if (pvt->model == 0x4) { + fam_type = &family_types[F17_M30H_CPUS]; + pvt->ops = &family_types[F17_M30H_CPUS].ops; + family_types[F17_M30H_CPUS].max_mcs = 3; + family_types[F17_M30H_CPUS].ctl_name = "F18h_M04h"; + break; + } else if (pvt->model == 0x5) { + fam_type = &family_types[F17_M30H_CPUS]; + pvt->ops = &family_types[F17_M30H_CPUS].ops; + family_types[F17_M30H_CPUS].max_mcs = 1; + family_types[F17_M30H_CPUS].ctl_name = "F18h_M05h"; + break; + } else if (pvt->model == 0x6) { + fam_type = &family_types[F18_M06H_CPUS]; + pvt->ops = &family_types[F18_M06H_CPUS].ops; + break; + } else if (pvt->model == 0x7) { + fam_type = &family_types[F18_M06H_CPUS]; + pvt->ops = &family_types[F18_M06H_CPUS].ops; + family_types[F18_M06H_CPUS].ctl_name = "F18h_M07h"; + break; + } else if (pvt->model == 0x10) { + fam_type = &family_types[F18_M10H_CPUS]; + pvt->ops = &family_types[F18_M10H_CPUS].ops; + break; + } + fam_type = &family_types[F17_CPUS]; + pvt->ops = &family_types[F17_CPUS].ops; + family_types[F17_CPUS].ctl_name = "F18h"; + break; + + case 0x19: + if (pvt->model >= 0x10 && pvt->model <= 0x1f) { + fam_type = &family_types[F19_M10H_CPUS]; + pvt->ops = &family_types[F19_M10H_CPUS].ops; + break; + } else if (pvt->model >= 0x20 && pvt->model <= 0x2f) { + fam_type = &family_types[F17_M70H_CPUS]; + pvt->ops = &family_types[F17_M70H_CPUS].ops; + fam_type->ctl_name = "F19h_M20h"; + break; + } else if (pvt->model >= 0xa0 && pvt->model <= 0xaf) { + fam_type = &family_types[F19_M10H_CPUS]; + pvt->ops = &family_types[F19_M10H_CPUS].ops; + fam_type->ctl_name = "F19h_MA0h"; + break; + } + fam_type = &family_types[F19_CPUS]; + pvt->ops = &family_types[F19_CPUS].ops; + family_types[F19_CPUS].ctl_name = "F19h"; + break; + default: amd64_err("Unsupported family!\n"); return NULL; @@ -3215,35 +3627,15 @@ static const struct attribute_group *amd64_edac_attr_groups[] = { NULL }; -static int init_one_instance(unsigned int nid) +static int hw_info_get(struct amd64_pvt *pvt) { - struct pci_dev *F3 = node_to_amd_nb(nid)->misc; - struct amd64_family_type *fam_type = NULL; - struct mem_ctl_info *mci = NULL; - struct edac_mc_layer layers[2]; - struct amd64_pvt *pvt = NULL; u16 pci_id1, pci_id2; - int err = 0, ret; - - ret = -ENOMEM; - pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL); - if (!pvt) - goto err_ret; - - pvt->mc_node_id = nid; - pvt->F3 = F3; - - ret = -EINVAL; - fam_type = per_family_init(pvt); - if (!fam_type) - goto err_free; + int ret = -EINVAL; if (pvt->fam >= 0x17) { - pvt->umc = kcalloc(NUM_UMCS, sizeof(struct amd64_umc), GFP_KERNEL); - if (!pvt->umc) { - ret = -ENOMEM; - goto err_free; - } + pvt->umc = kcalloc(fam_type->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL); + if (!pvt->umc) + return -ENOMEM; pci_id1 = fam_type->f0_id; pci_id2 = fam_type->f6_id; @@ -3252,21 +3644,37 @@ static int init_one_instance(unsigned int nid) pci_id2 = fam_type->f2_id; } - err = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2); - if (err) - goto err_post_init; + ret = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2); + if (ret) + return ret; read_mc_regs(pvt); + return 0; +} + +static void hw_info_put(struct amd64_pvt *pvt) +{ + if (pvt->F0 || pvt->F1) + free_mc_sibling_devs(pvt); + + kfree(pvt->umc); +} + +static int init_one_instance(struct amd64_pvt *pvt) +{ + struct mem_ctl_info *mci = NULL; + struct edac_mc_layer layers[2]; + int ret = -EINVAL; + /* * We need to determine how many memory channels there are. Then use * that information for calculating the size of the dynamic instance * tables in the 'mci' structure. */ - ret = -EINVAL; pvt->channel_count = pvt->ops->early_channel_count(pvt); if (pvt->channel_count < 0) - goto err_siblings; + return ret; ret = -ENOMEM; layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; @@ -3279,17 +3687,17 @@ static int init_one_instance(unsigned int nid) * only one channel. Also, this simplifies handling later for the price * of a couple of KBs tops. */ - layers[1].size = 2; + layers[1].size = fam_type->max_mcs; layers[1].is_virt_csrow = false; - mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0); + mci = edac_mc_alloc(pvt->mc_node_id, ARRAY_SIZE(layers), layers, 0); if (!mci) - goto err_siblings; + return ret; mci->pvt_info = pvt; mci->pdev = &pvt->F3->dev; - setup_mci_misc_attrs(mci, fam_type); + setup_mci_misc_attrs(mci); if (init_csrows(mci)) mci->edac_cap = EDAC_FLAG_NONE; @@ -3297,31 +3705,17 @@ static int init_one_instance(unsigned int nid) ret = -ENODEV; if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) { edac_dbg(1, "failed edac_mc_add_mc()\n"); - goto err_add_mc; + edac_mc_free(mci); + return ret; } return 0; - -err_add_mc: - edac_mc_free(mci); - -err_siblings: - free_mc_sibling_devs(pvt); - -err_post_init: - if (pvt->fam >= 0x17) - kfree(pvt->umc); - -err_free: - kfree(pvt); - -err_ret: - return ret; } static int probe_one_instance(unsigned int nid) { struct pci_dev *F3 = node_to_amd_nb(nid)->misc; + struct amd64_pvt *pvt = NULL; struct ecc_settings *s; int ret; @@ -3332,6 +3726,21 @@ static int probe_one_instance(unsigned int nid) ecc_stngs[nid] = s; + pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL); + if (!pvt) + goto err_settings; + + pvt->mc_node_id = nid; + pvt->F3 = F3; + + fam_type = per_family_init(pvt); + if (!fam_type) + goto err_enable; + + ret = hw_info_get(pvt); + if (ret < 0) + goto err_enable; + if (!ecc_enabled(F3, nid)) { ret = 0; @@ -3348,7 +3757,7 @@ static int probe_one_instance(unsigned int nid) goto err_enable; } - ret = init_one_instance(nid); + ret = init_one_instance(pvt); if (ret < 0) { amd64_err("Error probing instance: %d\n", nid); @@ -3361,6 +3770,10 @@ static int probe_one_instance(unsigned int nid) return ret; err_enable: + hw_info_put(pvt); + kfree(pvt); + +err_settings: kfree(s); ecc_stngs[nid] = NULL; @@ -3387,14 +3800,13 @@ static void remove_one_instance(unsigned int nid) restore_ecc_error_reporting(s, nid, F3); - free_mc_sibling_devs(pvt); - kfree(ecc_stngs[nid]); ecc_stngs[nid] = NULL; /* Free the EDAC CORE resources */ mci->pvt_info = NULL; + hw_info_put(pvt); kfree(pvt); edac_mc_free(mci); } @@ -3428,6 +3840,8 @@ static const struct x86_cpu_id amd64_cpuids[] = { { X86_VENDOR_AMD, 0x15, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, { X86_VENDOR_AMD, 0x16, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, { X86_VENDOR_AMD, 0x17, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, + { X86_VENDOR_HYGON, 0x18, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, + { X86_VENDOR_AMD, 0x19, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, { } }; MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids); @@ -3436,6 +3850,7 @@ static int __init amd64_edac_init(void) { const char *owner; int err = -ENODEV; + u16 instance_num; int i; owner = edac_get_owner(); @@ -3450,8 +3865,13 @@ static int __init amd64_edac_init(void) opstate_init(); + if (hygon_f18h_m4h()) + instance_num = hygon_nb_num(); + else + instance_num = amd_nb_num(); + err = -ENOMEM; - ecc_stngs = kcalloc(amd_nb_num(), sizeof(ecc_stngs[0]), GFP_KERNEL); + ecc_stngs = kcalloc(instance_num, sizeof(ecc_stngs[0]), GFP_KERNEL); if (!ecc_stngs) goto err_free; @@ -3459,7 +3879,7 @@ static int __init amd64_edac_init(void) if (!msrs) goto err_free; - for (i = 0; i < amd_nb_num(); i++) { + for (i = 0; i < instance_num; i++) { err = probe_one_instance(i); if (err) { /* unwind properly */ @@ -3507,6 +3927,7 @@ static int __init amd64_edac_init(void) static void __exit amd64_edac_exit(void) { + u16 instance_num; int i; if (pci_ctl) @@ -3520,7 +3941,12 @@ static void __exit amd64_edac_exit(void) else amd_unregister_ecc_decoder(decode_bus_error); - for (i = 0; i < amd_nb_num(); i++) + if (hygon_f18h_m4h()) + instance_num = hygon_nb_num(); + else + instance_num = amd_nb_num(); + + for (i = 0; i < instance_num; i++) remove_one_instance(i); kfree(ecc_stngs); diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index 1d4b74e9a037f70bd68bd8e8c615976691c52f9c..bfac6734f155328d793cd2c40846ceb42e19bf46 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h @@ -96,6 +96,7 @@ /* Hardware limit on ChipSelect rows per MC and processors per system */ #define NUM_CHIPSELECTS 8 #define DRAM_RANGES 8 +#define NUM_CONTROLLERS 12 #define ON true #define OFF false @@ -115,6 +116,21 @@ #define PCI_DEVICE_ID_AMD_16H_M30H_NB_F2 0x1582 #define PCI_DEVICE_ID_AMD_17H_DF_F0 0x1460 #define PCI_DEVICE_ID_AMD_17H_DF_F6 0x1466 +#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F0 0x15e8 +#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F6 0x15ee +#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F0 0x1490 +#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F6 0x1496 +#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F0 0x1440 +#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F6 0x1446 +#define PCI_DEVICE_ID_AMD_19H_DF_F0 0x1650 +#define PCI_DEVICE_ID_AMD_19H_DF_F6 0x1656 +#define PCI_DEVICE_ID_AMD_19H_M10H_DF_F0 0x14ad +#define PCI_DEVICE_ID_AMD_19H_M10H_DF_F6 0x14b3 + +#define PCI_DEVICE_ID_HYGON_18H_M06H_DF_F0 0x14b0 +#define PCI_DEVICE_ID_HYGON_18H_M06H_DF_F6 0x14b6 +#define PCI_DEVICE_ID_HYGON_18H_M10H_DF_F0 0x14d0 +#define PCI_DEVICE_ID_HYGON_18H_M10H_DF_F6 0x14d6 /* * Function 1 - Address Map @@ -164,7 +180,8 @@ #define DCSM0 0x60 #define DCSM1 0x160 -#define csrow_enabled(i, dct, pvt) ((pvt)->csels[(dct)].csbases[(i)] & DCSB_CS_ENABLE) +#define csrow_enabled(i, dct, pvt) ((pvt)->csels[(dct)].csbases[(i)] & DCSB_CS_ENABLE) +#define csrow_sec_enabled(i, dct, pvt) ((pvt)->csels[(dct)].csbases_sec[(i)] & DCSB_CS_ENABLE) #define DRAM_CONTROL 0x78 @@ -254,9 +271,14 @@ /* UMC CH register offsets */ #define UMCCH_BASE_ADDR 0x0 +#define UMCCH_BASE_ADDR_SEC 0x10 #define UMCCH_ADDR_MASK 0x20 +#define UMCCH_ADDR_MASK_SEC 0x28 +#define UMCCH_ADDR_MASK_SEC_DDR5 0x30 #define UMCCH_ADDR_CFG 0x30 +#define UMCCH_ADDR_CFG_DDR5 0x40 #define UMCCH_DIMM_CFG 0x80 +#define UMCCH_DIMM_CFG_DDR5 0x90 #define UMCCH_UMC_CFG 0x100 #define UMCCH_SDP_CTRL 0x104 #define UMCCH_ECC_CTRL 0x14C @@ -270,8 +292,6 @@ #define UMC_SDP_INIT BIT(31) -#define NUM_UMCS 2 - enum amd_families { K8_CPUS = 0, F10_CPUS, @@ -281,6 +301,13 @@ enum amd_families { F16_CPUS, F16_M30H_CPUS, F17_CPUS, + F17_M10H_CPUS, + F17_M30H_CPUS, + F17_M70H_CPUS, + F18_M06H_CPUS, + F18_M10H_CPUS, + F19_CPUS, + F19_M10H_CPUS, NUM_FAMILIES, }; @@ -307,9 +334,11 @@ struct dram_range { /* A DCT chip selects collection */ struct chip_select { u32 csbases[NUM_CHIPSELECTS]; + u32 csbases_sec[NUM_CHIPSELECTS]; u8 b_cnt; u32 csmasks[NUM_CHIPSELECTS]; + u32 csmasks_sec[NUM_CHIPSELECTS]; u8 m_cnt; }; @@ -319,6 +348,9 @@ struct amd64_umc { u32 sdp_ctrl; /* SDP Control reg */ u32 ecc_ctrl; /* DRAM ECC Control reg */ u32 umc_cap_hi; /* Capabilities High reg */ + + /* cache the dram_type */ + enum mem_type dram_type; }; struct amd64_pvt { @@ -347,8 +379,8 @@ struct amd64_pvt { u32 dbam0; /* DRAM Base Address Mapping reg for DCT0 */ u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */ - /* one for each DCT */ - struct chip_select csels[2]; + /* one for each DCT/UMC */ + struct chip_select csels[NUM_CONTROLLERS]; /* DRAM base and limit pairs F1x[78,70,68,60,58,50,48,40] */ struct dram_range ranges[DRAM_RANGES]; @@ -360,13 +392,18 @@ struct amd64_pvt { u32 dct_sel_hi; /* DRAM Controller Select High */ u32 online_spare; /* On-Line spare Reg */ - /* x4 or x8 syndromes in use */ + /* x4, x8, or x16 syndromes in use */ u8 ecc_sym_sz; /* place to store error injection parameters prior to issue */ struct error_injection injection; - /* cache the dram_type */ + /* + * cache the dram_type + * + * NOTE: Don't use this for Family 17h and later. + * Use dram_type in struct amd64_umc instead. + */ enum mem_type dram_type; struct amd64_umc *umc; /* UMC registers */ @@ -393,8 +430,8 @@ struct err_info { static inline u32 get_umc_base(u8 channel) { - /* ch0: 0x50000, ch1: 0x150000 */ - return 0x50000 + (!!channel << 20); + /* chY: 0xY50000 */ + return 0x50000 + (channel << 20); } static inline u64 get_dram_base(struct amd64_pvt *pvt, u8 i) @@ -463,9 +500,22 @@ struct low_ops { unsigned cs_mode, int cs_mask_nr); }; +struct amd64_family_flags { + /* + * Indicates that the system supports the new register offsets, etc. + * first introduced with Family 19h Model 10h. + */ + __u64 zn_regs_v2 : 1, + + __reserved : 63; +}; + struct amd64_family_type { const char *ctl_name; u16 f0_id, f1_id, f2_id, f6_id; + /* Maximum number of memory controllers per die/node. */ + u8 max_mcs; + struct amd64_family_flags flags; struct low_ops ops; }; diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 7d3edd7139328b5466dfe2041d24e353e166ea7a..2056e3d6b6494042c30f0cad7eebc831b1cd8c7a 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c @@ -216,6 +216,9 @@ const char * const edac_mem_types[] = { [MEM_DDR4] = "Unbuffered-DDR4", [MEM_RDDR4] = "Registered-DDR4", [MEM_LRDDR4] = "Load-Reduced-DDR4-RAM", + [MEM_DDR5] = "Unbuffered-DDR5", + [MEM_RDDR5] = "Registered-DDR5", + [MEM_LRDDR5] = "Load-Reduced-DDR5-RAM", [MEM_NVDIMM] = "Non-volatile-RAM", }; EXPORT_SYMBOL_GPL(edac_mem_types); @@ -681,22 +684,18 @@ static int del_mc_from_global_list(struct mem_ctl_info *mci) struct mem_ctl_info *edac_mc_find(int idx) { - struct mem_ctl_info *mci = NULL; + struct mem_ctl_info *mci; struct list_head *item; mutex_lock(&mem_ctls_mutex); list_for_each(item, &mc_devices) { mci = list_entry(item, struct mem_ctl_info, link); - - if (mci->mc_idx >= idx) { - if (mci->mc_idx == idx) { - goto unlock; - } - break; - } + if (mci->mc_idx == idx) + goto unlock; } + mci = NULL; unlock: mutex_unlock(&mem_ctls_mutex); return mci; @@ -1246,9 +1245,13 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type, if (p > e->location) *(p - 1) = '\0'; - /* Report the error via the trace interface */ - grain_bits = fls_long(e->grain) + 1; + /* Sanity-check driver-supplied grain value. */ + if (WARN_ON_ONCE(!e->grain)) + e->grain = 1; + grain_bits = fls_long(e->grain - 1); + + /* Report the error via the trace interface */ if (IS_ENABLED(CONFIG_RAS)) trace_mc_event(type, e->msg, e->label, e->error_count, mci->mc_idx, e->top_layer, e->mid_layer, diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c index 20374b8248f087e343738bee152a8b34e746d6d6..19158595f5a30d0a812184090a2992e9716ede4c 100644 --- a/drivers/edac/edac_mc_sysfs.c +++ b/drivers/edac/edac_mc_sysfs.c @@ -26,7 +26,7 @@ static int edac_mc_log_ue = 1; static int edac_mc_log_ce = 1; static int edac_mc_panic_on_ue; -static int edac_mc_poll_msec = 1000; +static unsigned int edac_mc_poll_msec = 1000; /* Getter functions for above */ int edac_mc_get_log_ue(void) @@ -45,30 +45,30 @@ int edac_mc_get_panic_on_ue(void) } /* this is temporary */ -int edac_mc_get_poll_msec(void) +unsigned int edac_mc_get_poll_msec(void) { return edac_mc_poll_msec; } static int edac_set_poll_msec(const char *val, const struct kernel_param *kp) { - unsigned long l; + unsigned int i; int ret; if (!val) return -EINVAL; - ret = kstrtoul(val, 0, &l); + ret = kstrtouint(val, 0, &i); if (ret) return ret; - if (l < 1000) + if (i < 1000) return -EINVAL; - *((unsigned long *)kp->arg) = l; + *((unsigned int *)kp->arg) = i; /* notify edac_mc engine to reset the poll period */ - edac_mc_reset_delay_period(l); + edac_mc_reset_delay_period(i); return 0; } @@ -82,7 +82,7 @@ MODULE_PARM_DESC(edac_mc_log_ue, module_param(edac_mc_log_ce, int, 0644); MODULE_PARM_DESC(edac_mc_log_ce, "Log correctable error to console: 0=off 1=on"); -module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_int, +module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_uint, &edac_mc_poll_msec, 0644); MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds"); @@ -307,6 +307,14 @@ DEVICE_CHANNEL(ch6_dimm_label, S_IRUGO | S_IWUSR, channel_dimm_label_show, channel_dimm_label_store, 6); DEVICE_CHANNEL(ch7_dimm_label, S_IRUGO | S_IWUSR, channel_dimm_label_show, channel_dimm_label_store, 7); +DEVICE_CHANNEL(ch8_dimm_label, S_IRUGO | S_IWUSR, + channel_dimm_label_show, channel_dimm_label_store, 8); +DEVICE_CHANNEL(ch9_dimm_label, S_IRUGO | S_IWUSR, + channel_dimm_label_show, channel_dimm_label_store, 9); +DEVICE_CHANNEL(ch10_dimm_label, S_IRUGO | S_IWUSR, + channel_dimm_label_show, channel_dimm_label_store, 10); +DEVICE_CHANNEL(ch11_dimm_label, S_IRUGO | S_IWUSR, + channel_dimm_label_show, channel_dimm_label_store, 11); /* Total possible dynamic DIMM Label attribute file table */ static struct attribute *dynamic_csrow_dimm_attr[] = { @@ -318,6 +326,10 @@ static struct attribute *dynamic_csrow_dimm_attr[] = { &dev_attr_legacy_ch5_dimm_label.attr.attr, &dev_attr_legacy_ch6_dimm_label.attr.attr, &dev_attr_legacy_ch7_dimm_label.attr.attr, + &dev_attr_legacy_ch8_dimm_label.attr.attr, + &dev_attr_legacy_ch9_dimm_label.attr.attr, + &dev_attr_legacy_ch10_dimm_label.attr.attr, + &dev_attr_legacy_ch11_dimm_label.attr.attr, NULL }; @@ -338,6 +350,14 @@ DEVICE_CHANNEL(ch6_ce_count, S_IRUGO, channel_ce_count_show, NULL, 6); DEVICE_CHANNEL(ch7_ce_count, S_IRUGO, channel_ce_count_show, NULL, 7); +DEVICE_CHANNEL(ch8_ce_count, S_IRUGO, + channel_ce_count_show, NULL, 8); +DEVICE_CHANNEL(ch9_ce_count, S_IRUGO, + channel_ce_count_show, NULL, 9); +DEVICE_CHANNEL(ch10_ce_count, S_IRUGO, + channel_ce_count_show, NULL, 10); +DEVICE_CHANNEL(ch11_ce_count, S_IRUGO, + channel_ce_count_show, NULL, 11); /* Total possible dynamic ce_count attribute file table */ static struct attribute *dynamic_csrow_ce_count_attr[] = { @@ -349,6 +369,10 @@ static struct attribute *dynamic_csrow_ce_count_attr[] = { &dev_attr_legacy_ch5_ce_count.attr.attr, &dev_attr_legacy_ch6_ce_count.attr.attr, &dev_attr_legacy_ch7_ce_count.attr.attr, + &dev_attr_legacy_ch8_ce_count.attr.attr, + &dev_attr_legacy_ch9_ce_count.attr.attr, + &dev_attr_legacy_ch10_ce_count.attr.attr, + &dev_attr_legacy_ch11_ce_count.attr.attr, NULL }; @@ -404,6 +428,8 @@ static inline int nr_pages_per_csrow(struct csrow_info *csrow) static int edac_create_csrow_object(struct mem_ctl_info *mci, struct csrow_info *csrow, int index) { + int err; + csrow->dev.type = &csrow_attr_type; csrow->dev.bus = mci->bus; csrow->dev.groups = csrow_dev_groups; @@ -416,7 +442,11 @@ static int edac_create_csrow_object(struct mem_ctl_info *mci, edac_dbg(0, "creating (virtual) csrow node %s\n", dev_name(&csrow->dev)); - return device_add(&csrow->dev); + err = device_add(&csrow->dev); + if (err) + put_device(&csrow->dev); + + return err; } /* Create a CSROW object under specifed edac_mc_device */ diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h index dec88dcea036f5b7672409fb2f2cfd78bc863609..c9f0e73872a6445b66e7669937fe5e014f8cff4a 100644 --- a/drivers/edac/edac_module.h +++ b/drivers/edac/edac_module.h @@ -36,7 +36,7 @@ extern int edac_mc_get_log_ue(void); extern int edac_mc_get_log_ce(void); extern int edac_mc_get_panic_on_ue(void); extern int edac_get_poll_msec(void); -extern int edac_mc_get_poll_msec(void); +extern unsigned int edac_mc_get_poll_msec(void); unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf, unsigned len); diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c index 473aeec4b1da44ec4c069d1225db85c67ea691a8..f7f8feb4498bca858367042f80f9c61a89a1ef45 100644 --- a/drivers/edac/ghes_edac.c +++ b/drivers/edac/ghes_edac.c @@ -23,8 +23,7 @@ struct ghes_edac_pvt { struct mem_ctl_info *mci; /* Buffers for the error handling routine */ - char detail_location[240]; - char other_detail[160]; + char other_detail[400]; char msg[80]; }; @@ -210,6 +209,7 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err) /* Cleans the error report buffer */ memset(e, 0, sizeof (*e)); e->error_count = 1; + e->grain = 1; strcpy(e->label, "unknown label"); e->msg = pvt->msg; e->other_detail = pvt->other_detail; @@ -305,7 +305,7 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err) /* Error grain */ if (mem_err->validation_bits & CPER_MEM_VALID_PA_MASK) - e->grain = ~(mem_err->physical_addr_mask & ~PAGE_MASK); + e->grain = ~mem_err->physical_addr_mask + 1; /* Memory error location, mapped on e->location */ p = e->location; @@ -339,6 +339,8 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err) /* All other fields are mapped on e->other_detail */ p = pvt->other_detail; + p += snprintf(p, sizeof(pvt->other_detail), + "APEI location: %s ", e->location); if (mem_err->validation_bits & CPER_MEM_VALID_ERROR_STATUS) { u64 status = mem_err->error_status; @@ -412,14 +414,17 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err) if (p > pvt->other_detail) *(p - 1) = '\0'; + /* Sanity-check driver-supplied grain value. */ + if (WARN_ON_ONCE(!e->grain)) + e->grain = 1; + + grain_bits = fls_long(e->grain - 1); + /* Generate the trace event */ - grain_bits = fls_long(e->grain); - snprintf(pvt->detail_location, sizeof(pvt->detail_location), - "APEI location: %s %s", e->location, e->other_detail); trace_mc_event(type, e->msg, e->label, e->error_count, mci->mc_idx, e->top_layer, e->mid_layer, e->low_layer, (e->page_frame_number << PAGE_SHIFT) | e->offset_in_page, - grain_bits, e->syndrome, pvt->detail_location); + grain_bits, e->syndrome, e->other_detail); edac_raw_mc_handle_error(type, mci, e); spin_unlock_irqrestore(&ghes_lock, flags); @@ -532,7 +537,11 @@ void ghes_edac_unregister(struct ghes *ghes) if (!ghes_pvt) return; + if (atomic_dec_return(&ghes_init)) + return; + mci = ghes_pvt->mci; + ghes_pvt = NULL; edac_mc_del_mc(mci->pdev); edac_mc_free(mci); } diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c new file mode 100644 index 0000000000000000000000000000000000000000..1c644cc024875b92db581828fca56d90e606f688 --- /dev/null +++ b/drivers/edac/i10nm_base.c @@ -0,0 +1,344 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for Intel(R) 10nm server memory controller. + * Copyright (c) 2019, Intel Corporation. + * + */ + +#include +#include +#include +#include +#include "edac_module.h" +#include "skx_common.h" + +#define I10NM_REVISION "v0.0.3" +#define EDAC_MOD_STR "i10nm_edac" + +/* Debug macros */ +#define i10nm_printk(level, fmt, arg...) \ + edac_printk(level, "i10nm", fmt, ##arg) + +#define I10NM_GET_SCK_BAR(d, reg) \ + pci_read_config_dword((d)->uracu, 0xd0, &(reg)) +#define I10NM_GET_IMC_BAR(d, i, reg) \ + pci_read_config_dword((d)->uracu, 0xd8 + (i) * 4, &(reg)) +#define I10NM_GET_DIMMMTR(m, i, j) \ + (*(u32 *)((m)->mbase + 0x2080c + (i) * 0x4000 + (j) * 4)) +#define I10NM_GET_MCDDRTCFG(m, i, j) \ + (*(u32 *)((m)->mbase + 0x20970 + (i) * 0x4000 + (j) * 4)) + +#define I10NM_GET_SCK_MMIO_BASE(reg) (GET_BITFIELD(reg, 0, 28) << 23) +#define I10NM_GET_IMC_MMIO_OFFSET(reg) (GET_BITFIELD(reg, 0, 10) << 12) +#define I10NM_GET_IMC_MMIO_SIZE(reg) ((GET_BITFIELD(reg, 13, 23) - \ + GET_BITFIELD(reg, 0, 10) + 1) << 12) + +static struct list_head *i10nm_edac_list; + +static struct pci_dev *pci_get_dev_wrapper(int dom, unsigned int bus, + unsigned int dev, unsigned int fun) +{ + struct pci_dev *pdev; + + pdev = pci_get_domain_bus_and_slot(dom, bus, PCI_DEVFN(dev, fun)); + if (!pdev) { + edac_dbg(2, "No device %02x:%02x.%x\n", + bus, dev, fun); + return NULL; + } + + if (unlikely(pci_enable_device(pdev) < 0)) { + edac_dbg(2, "Failed to enable device %02x:%02x.%x\n", + bus, dev, fun); + return NULL; + } + + pci_dev_get(pdev); + + return pdev; +} + +static int i10nm_get_all_munits(void) +{ + struct pci_dev *mdev; + void __iomem *mbase; + unsigned long size; + struct skx_dev *d; + int i, j = 0; + u32 reg, off; + u64 base; + + list_for_each_entry(d, i10nm_edac_list, list) { + d->util_all = pci_get_dev_wrapper(d->seg, d->bus[1], 29, 1); + if (!d->util_all) + return -ENODEV; + + d->uracu = pci_get_dev_wrapper(d->seg, d->bus[0], 0, 1); + if (!d->uracu) + return -ENODEV; + + if (I10NM_GET_SCK_BAR(d, reg)) { + i10nm_printk(KERN_ERR, "Failed to socket bar\n"); + return -ENODEV; + } + + base = I10NM_GET_SCK_MMIO_BASE(reg); + edac_dbg(2, "socket%d mmio base 0x%llx (reg 0x%x)\n", + j++, base, reg); + + for (i = 0; i < I10NM_NUM_IMC; i++) { + mdev = pci_get_dev_wrapper(d->seg, d->bus[0], + 12 + i, 0); + if (i == 0 && !mdev) { + i10nm_printk(KERN_ERR, "No IMC found\n"); + return -ENODEV; + } + if (!mdev) + continue; + + d->imc[i].mdev = mdev; + + if (I10NM_GET_IMC_BAR(d, i, reg)) { + i10nm_printk(KERN_ERR, "Failed to get mc bar\n"); + return -ENODEV; + } + + off = I10NM_GET_IMC_MMIO_OFFSET(reg); + size = I10NM_GET_IMC_MMIO_SIZE(reg); + edac_dbg(2, "mc%d mmio base 0x%llx size 0x%lx (reg 0x%x)\n", + i, base + off, size, reg); + + mbase = ioremap(base + off, size); + if (!mbase) { + i10nm_printk(KERN_ERR, "Failed to ioremap 0x%llx\n", + base + off); + return -ENODEV; + } + + d->imc[i].mbase = mbase; + } + } + + return 0; +} + +static struct res_config i10nm_cfg0 = { + .type = I10NM, + .decs_did = 0x3452, + .busno_cfg_offset = 0xcc, +}; + +static struct res_config i10nm_cfg1 = { + .type = I10NM, + .decs_did = 0x3452, + .busno_cfg_offset = 0xd0, +}; + +static const struct x86_cpu_id i10nm_cpuids[] = { + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_TREMONT_X, 0, (kernel_ulong_t)&i10nm_cfg0}, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ICELAKE_X, 0, (kernel_ulong_t)&i10nm_cfg0}, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ICELAKE_XEON_D, 0, (kernel_ulong_t)&i10nm_cfg1}, + { } +}; +MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids); + +static bool i10nm_check_ecc(struct skx_imc *imc, int chan) +{ + u32 mcmtr; + + mcmtr = *(u32 *)(imc->mbase + 0x20ef8 + chan * 0x4000); + edac_dbg(1, "ch%d mcmtr reg %x\n", chan, mcmtr); + + return !!GET_BITFIELD(mcmtr, 2, 2); +} + +static int i10nm_get_dimm_config(struct mem_ctl_info *mci) +{ + struct skx_pvt *pvt = mci->pvt_info; + struct skx_imc *imc = pvt->imc; + struct dimm_info *dimm; + u32 mtr, mcddrtcfg; + int i, j, ndimms; + + for (i = 0; i < I10NM_NUM_CHANNELS; i++) { + if (!imc->mbase) + continue; + + ndimms = 0; + for (j = 0; j < I10NM_NUM_DIMMS; j++) { + dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, + mci->n_layers, i, j, 0); + mtr = I10NM_GET_DIMMMTR(imc, i, j); + mcddrtcfg = I10NM_GET_MCDDRTCFG(imc, i, j); + edac_dbg(1, "dimmmtr 0x%x mcddrtcfg 0x%x (mc%d ch%d dimm%d)\n", + mtr, mcddrtcfg, imc->mc, i, j); + + if (IS_DIMM_PRESENT(mtr)) + ndimms += skx_get_dimm_info(mtr, 0, dimm, + imc, i, j); + else if (IS_NVDIMM_PRESENT(mcddrtcfg, j)) + ndimms += skx_get_nvdimm_info(dimm, imc, i, j, + EDAC_MOD_STR); + } + if (ndimms && !i10nm_check_ecc(imc, i)) { + i10nm_printk(KERN_ERR, "ECC is disabled on imc %d channel %d\n", + imc->mc, i); + return -ENODEV; + } + } + + return 0; +} + +static struct notifier_block i10nm_mce_dec = { + .notifier_call = skx_mce_check_error, + .priority = MCE_PRIO_EDAC, +}; + +#ifdef CONFIG_EDAC_DEBUG +/* + * Debug feature. + * Exercise the address decode logic by writing an address to + * /sys/kernel/debug/edac/i10nm_test/addr. + */ +static struct dentry *i10nm_test; + +static int debugfs_u64_set(void *data, u64 val) +{ + struct mce m; + + pr_warn_once("Fake error to 0x%llx injected via debugfs\n", val); + + memset(&m, 0, sizeof(m)); + /* ADDRV + MemRd + Unknown channel */ + m.status = MCI_STATUS_ADDRV + 0x90; + /* One corrected error */ + m.status |= BIT_ULL(38); + m.addr = val; + skx_mce_check_error(NULL, 0, &m); + + return 0; +} +DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n"); + +static void setup_i10nm_debug(void) +{ + i10nm_test = edac_debugfs_create_dir("i10nm_test"); + if (!i10nm_test) + return; + + if (!edac_debugfs_create_file("addr", 0200, i10nm_test, + NULL, &fops_u64_wo)) { + debugfs_remove(i10nm_test); + i10nm_test = NULL; + } +} + +static void teardown_i10nm_debug(void) +{ + debugfs_remove_recursive(i10nm_test); +} +#else +static inline void setup_i10nm_debug(void) {} +static inline void teardown_i10nm_debug(void) {} +#endif /*CONFIG_EDAC_DEBUG*/ + +static int __init i10nm_init(void) +{ + u8 mc = 0, src_id = 0, node_id = 0; + const struct x86_cpu_id *id; + struct res_config *cfg; + const char *owner; + struct skx_dev *d; + int rc, i, off[3] = {0xd0, 0xc8, 0xcc}; + u64 tolm, tohm; + + edac_dbg(2, "\n"); + + owner = edac_get_owner(); + if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR))) + return -EBUSY; + + id = x86_match_cpu(i10nm_cpuids); + if (!id) + return -ENODEV; + + cfg = (struct res_config *)id->driver_data; + + /* Newer steppings have different offset for ATOM_TREMONT_D/ICELAKE_X */ + if (boot_cpu_data.x86_stepping >= 4) + cfg->busno_cfg_offset = 0xd0; + + rc = skx_get_hi_lo(0x09a2, off, &tolm, &tohm); + if (rc) + return rc; + + rc = skx_get_all_bus_mappings(cfg, &i10nm_edac_list); + if (rc < 0) + goto fail; + if (rc == 0) { + i10nm_printk(KERN_ERR, "No memory controllers found\n"); + return -ENODEV; + } + + rc = i10nm_get_all_munits(); + if (rc < 0) + goto fail; + + list_for_each_entry(d, i10nm_edac_list, list) { + rc = skx_get_src_id(d, 0xf8, &src_id); + if (rc < 0) + goto fail; + + rc = skx_get_node_id(d, &node_id); + if (rc < 0) + goto fail; + + edac_dbg(2, "src_id = %d node_id = %d\n", src_id, node_id); + for (i = 0; i < I10NM_NUM_IMC; i++) { + if (!d->imc[i].mdev) + continue; + + d->imc[i].mc = mc++; + d->imc[i].lmc = i; + d->imc[i].src_id = src_id; + d->imc[i].node_id = node_id; + + rc = skx_register_mci(&d->imc[i], d->imc[i].mdev, + "Intel_10nm Socket", EDAC_MOD_STR, + i10nm_get_dimm_config); + if (rc < 0) + goto fail; + } + } + + rc = skx_adxl_get(); + if (rc) + goto fail; + + opstate_init(); + mce_register_decode_chain(&i10nm_mce_dec); + setup_i10nm_debug(); + + i10nm_printk(KERN_INFO, "%s\n", I10NM_REVISION); + + return 0; +fail: + skx_remove(); + return rc; +} + +static void __exit i10nm_exit(void) +{ + edac_dbg(2, "\n"); + teardown_i10nm_debug(); + mce_unregister_decode_chain(&i10nm_mce_dec); + skx_adxl_put(); + skx_remove(); +} + +module_init(i10nm_init); +module_exit(i10nm_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MC Driver for Intel 10nm server processors"); diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c index d92d56cee10170df7600d3812dd1b1c1ad698abe..299b441647cd5c62740403c7476e79bf35983e13 100644 --- a/drivers/edac/i3200_edac.c +++ b/drivers/edac/i3200_edac.c @@ -399,7 +399,7 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx) if (nr_pages == 0) continue; - edac_dbg(0, "csrow %d, channel %d%s, size = %ld Mb\n", i, j, + edac_dbg(0, "csrow %d, channel %d%s, size = %ld MiB\n", i, j, stacked ? " (stacked)" : "", PAGES_TO_MiB(nr_pages)); dimm->nr_pages = nr_pages; diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index 8e120bf6062434a92cfa46657da849d95e3294dd..4a3300c2da333311d29ca0dacab86882215b0156 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c @@ -597,7 +597,7 @@ static int get_dimm_config(struct mem_ctl_info *mci) /* DDR3 has 8 I/O banks */ size = (rows * cols * banks * ranks) >> (20 - 3); - edac_dbg(0, "\tdimm %d %d Mb offset: %x, bank: %d, rank: %d, row: %#x, col: %#x\n", + edac_dbg(0, "\tdimm %d %d MiB offset: %x, bank: %d, rank: %d, row: %#x, col: %#x\n", j, size, RANKOFFSET(dimm_dod[j]), banks, ranks, rows, cols); @@ -1711,6 +1711,7 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci, u32 errnum = find_first_bit(&error, 32); if (uncorrected_error) { + core_err_cnt = 1; if (ripv) tp_event = HW_EVENT_ERR_FATAL; else diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c index 2ab4d61ee47e86d41a730302632ebb0a7cd4b4be..886a6b61dbd8cae7d733ea41b3d2c643e5decf49 100644 --- a/drivers/edac/mce_amd.c +++ b/drivers/edac/mce_amd.c @@ -155,7 +155,7 @@ static const char * const smca_ls_mce_desc[] = { "Store queue parity", "Miss address buffer payload parity", "L1 TLB parity", - "Reserved", + "DC Tag error type 5", "DC tag error type 6", "DC tag error type 1", "Internal error type 1", @@ -174,6 +174,33 @@ static const char * const smca_ls_mce_desc[] = { "L2 fill data error", }; +static const char * const smca_ls2_mce_desc[] = { + "An ECC error was detected on a data cache read by a probe or victimization", + "An ECC error or L2 poison was detected on a data cache read by a load", + "An ECC error was detected on a data cache read-modify-write by a store", + "An ECC error or poison bit mismatch was detected on a tag read by a probe or victimization", + "An ECC error or poison bit mismatch was detected on a tag read by a load", + "An ECC error or poison bit mismatch was detected on a tag read by a store", + "An ECC error was detected on an EMEM read by a load", + "An ECC error was detected on an EMEM read-modify-write by a store", + "A parity error was detected in an L1 TLB entry by any access", + "A parity error was detected in an L2 TLB entry by any access", + "A parity error was detected in a PWC entry by any access", + "A parity error was detected in an STQ entry by any access", + "A parity error was detected in an LDQ entry by any access", + "A parity error was detected in a MAB entry by any access", + "A parity error was detected in an SCB entry state field by any access", + "A parity error was detected in an SCB entry address field by any access", + "A parity error was detected in an SCB entry data field by any access", + "A parity error was detected in a WCB entry by any access", + "A poisoned line was detected in an SCB entry by any access", + "A SystemReadDataError error was reported on read data returned from L2 for a load", + "A SystemReadDataError error was reported on read data returned from L2 for an SCB store", + "A SystemReadDataError error was reported on read data returned from L2 for a WCB store", + "A hardware assertion error was reported", + "A parity error was detected in an STLF, SCB EMEM entry or SRB store data by any access", +}; + static const char * const smca_if_mce_desc[] = { "microtag probe port parity error", "IC microtag or full tag multi-hit error", @@ -222,6 +249,7 @@ static const char * const smca_ex_mce_desc[] = { "Retire status queue parity error", "Scheduling queue parity error", "Branch buffer queue parity error", + "Hardware Assertion error", }; static const char * const smca_fp_mce_desc[] = { @@ -257,11 +285,29 @@ static const char * const smca_cs_mce_desc[] = { "ECC error on probe filter access", }; +static const char * const smca_cs2_mce_desc[] = { + "Illegal Request", + "Address Violation", + "Security Violation", + "Illegal Response", + "Unexpected Response", + "Request or Probe Parity Error", + "Read Response Parity Error", + "Atomic Request Parity Error", + "SDP read response had no match in the CS queue", + "Probe Filter Protocol Error", + "Probe Filter ECC Error", + "SDP read response had an unexpected RETRY error", + "Counter overflow error", + "Counter underflow error", +}; + static const char * const smca_pie_mce_desc[] = { "HW assert", "Internal PIE register security violation", "Error on GMI link", "Poison data written to internal PIE register", + "A deferred error was detected in the DF" }; static const char * const smca_umc_mce_desc[] = { @@ -271,6 +317,8 @@ static const char * const smca_umc_mce_desc[] = { "Advanced peripheral bus error", "Command/address parity error", "Write data CRC error", + "DCQ SRAM ECC error", + "AES SRAM ECC error", }; static const char * const smca_pb_mce_desc[] = { @@ -281,10 +329,74 @@ static const char * const smca_psp_mce_desc[] = { "PSP RAM ECC or parity error", }; +static const char * const smca_psp2_mce_desc[] = { + "High SRAM ECC or parity error", + "Low SRAM ECC or parity error", + "Instruction Cache Bank 0 ECC or parity error", + "Instruction Cache Bank 1 ECC or parity error", + "Instruction Tag Ram 0 parity error", + "Instruction Tag Ram 1 parity error", + "Data Cache Bank 0 ECC or parity error", + "Data Cache Bank 1 ECC or parity error", + "Data Cache Bank 2 ECC or parity error", + "Data Cache Bank 3 ECC or parity error", + "Data Tag Bank 0 parity error", + "Data Tag Bank 1 parity error", + "Data Tag Bank 2 parity error", + "Data Tag Bank 3 parity error", + "Dirty Data Ram parity error", + "TLB Bank 0 parity error", + "TLB Bank 1 parity error", + "System Hub Read Buffer ECC or parity error", +}; + static const char * const smca_smu_mce_desc[] = { "SMU RAM ECC or parity error", }; +static const char * const smca_smu2_mce_desc[] = { + "High SRAM ECC or parity error", + "Low SRAM ECC or parity error", + "Data Cache Bank A ECC or parity error", + "Data Cache Bank B ECC or parity error", + "Data Tag Cache Bank A ECC or parity error", + "Data Tag Cache Bank B ECC or parity error", + "Instruction Cache Bank A ECC or parity error", + "Instruction Cache Bank B ECC or parity error", + "Instruction Tag Cache Bank A ECC or parity error", + "Instruction Tag Cache Bank B ECC or parity error", + "System Hub Read Buffer ECC or parity error", +}; + +static const char * const smca_mp5_mce_desc[] = { + "High SRAM ECC or parity error", + "Low SRAM ECC or parity error", + "Data Cache Bank A ECC or parity error", + "Data Cache Bank B ECC or parity error", + "Data Tag Cache Bank A ECC or parity error", + "Data Tag Cache Bank B ECC or parity error", + "Instruction Cache Bank A ECC or parity error", + "Instruction Cache Bank B ECC or parity error", + "Instruction Tag Cache Bank A ECC or parity error", + "Instruction Tag Cache Bank B ECC or parity error", +}; + +static const char * const smca_nbio_mce_desc[] = { + "ECC or Parity error", + "PCIE error", + "SDP ErrEvent error", + "SDP Egress Poison Error", + "IOHC Internal Poison Error", +}; + +static const char * const smca_pcie_mce_desc[] = { + "CCIX PER Message logging", + "CCIX Read Response with Status: Non-Data Error", + "CCIX Write Response with Status: Non-Data Error", + "CCIX Read Response with Status: Data Error", + "CCIX Non-okay write response with data error", +}; + struct smca_mce_desc { const char * const *descs; unsigned int num_descs; @@ -292,6 +404,7 @@ struct smca_mce_desc { static struct smca_mce_desc smca_mce_descs[] = { [SMCA_LS] = { smca_ls_mce_desc, ARRAY_SIZE(smca_ls_mce_desc) }, + [SMCA_LS_V2] = { smca_ls2_mce_desc, ARRAY_SIZE(smca_ls2_mce_desc) }, [SMCA_IF] = { smca_if_mce_desc, ARRAY_SIZE(smca_if_mce_desc) }, [SMCA_L2_CACHE] = { smca_l2_mce_desc, ARRAY_SIZE(smca_l2_mce_desc) }, [SMCA_DE] = { smca_de_mce_desc, ARRAY_SIZE(smca_de_mce_desc) }, @@ -299,11 +412,17 @@ static struct smca_mce_desc smca_mce_descs[] = { [SMCA_FP] = { smca_fp_mce_desc, ARRAY_SIZE(smca_fp_mce_desc) }, [SMCA_L3_CACHE] = { smca_l3_mce_desc, ARRAY_SIZE(smca_l3_mce_desc) }, [SMCA_CS] = { smca_cs_mce_desc, ARRAY_SIZE(smca_cs_mce_desc) }, + [SMCA_CS_V2] = { smca_cs2_mce_desc, ARRAY_SIZE(smca_cs2_mce_desc) }, [SMCA_PIE] = { smca_pie_mce_desc, ARRAY_SIZE(smca_pie_mce_desc) }, [SMCA_UMC] = { smca_umc_mce_desc, ARRAY_SIZE(smca_umc_mce_desc) }, [SMCA_PB] = { smca_pb_mce_desc, ARRAY_SIZE(smca_pb_mce_desc) }, [SMCA_PSP] = { smca_psp_mce_desc, ARRAY_SIZE(smca_psp_mce_desc) }, + [SMCA_PSP_V2] = { smca_psp2_mce_desc, ARRAY_SIZE(smca_psp2_mce_desc) }, [SMCA_SMU] = { smca_smu_mce_desc, ARRAY_SIZE(smca_smu_mce_desc) }, + [SMCA_SMU_V2] = { smca_smu2_mce_desc, ARRAY_SIZE(smca_smu2_mce_desc) }, + [SMCA_MP5] = { smca_mp5_mce_desc, ARRAY_SIZE(smca_mp5_mce_desc) }, + [SMCA_NBIO] = { smca_nbio_mce_desc, ARRAY_SIZE(smca_nbio_mce_desc) }, + [SMCA_PCIE] = { smca_pcie_mce_desc, ARRAY_SIZE(smca_pcie_mce_desc) }, }; static bool f12h_mc0_mce(u16 ec, u8 xec) @@ -883,8 +1002,13 @@ static void decode_smca_error(struct mce *m) pr_cont("%s.\n", smca_mce_descs[bank_type].descs[xec]); } - if (bank_type == SMCA_UMC && xec == 0 && decode_dram_ecc) - decode_dram_ecc(cpu_to_node(m->extcpu), m); + if (bank_type == SMCA_UMC && xec == 0 && decode_dram_ecc) { + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) + decode_dram_ecc(topology_logical_die_id(m->extcpu), m); + else + decode_dram_ecc(topology_die_id(m->extcpu), m); + } } static inline void amd_decode_err_code(u16 ec) @@ -1059,13 +1183,19 @@ static int __init mce_amd_init(void) { struct cpuinfo_x86 *c = &boot_cpu_data; - if (c->x86_vendor != X86_VENDOR_AMD) + if (c->x86_vendor != X86_VENDOR_AMD && + c->x86_vendor != X86_VENDOR_HYGON) return -ENODEV; fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL); if (!fam_ops) return -ENOMEM; + if (boot_cpu_has(X86_FEATURE_SMCA)) { + xec_mask = 0x3f; + goto out; + } + switch (c->x86) { case 0xf: fam_ops->mc0_mce = k8_mc0_mce; @@ -1113,11 +1243,9 @@ static int __init mce_amd_init(void) break; case 0x17: - xec_mask = 0x3f; - if (!boot_cpu_has(X86_FEATURE_SMCA)) { - printk(KERN_WARNING "Decoding supported only on Scalable MCA processors.\n"); - goto err_out; - } + case 0x18: + pr_warn("Decoding supported only on Scalable MCA processors.\n"); + goto err_out; break; default: @@ -1125,6 +1253,7 @@ static int __init mce_amd_init(void) goto err_out; } +out: pr_info("MCE: In-kernel MCE decoding enabled.\n"); mce_register_decode_chain(&amd_mce_dec_nb); diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c index df28b65358d26f26a6ec054222752fb2e497ba5f..0153c730750e5b10ea91280d77cdf87db09fe0f8 100644 --- a/drivers/edac/pnd2_edac.c +++ b/drivers/edac/pnd2_edac.c @@ -268,11 +268,14 @@ static u64 get_sideband_reg_base_addr(void) } } +#define DNV_MCHBAR_SIZE 0x8000 +#define DNV_SB_PORT_SIZE 0x10000 static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name) { struct pci_dev *pdev; char *base; u64 addr; + unsigned long size; if (op == 4) { pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL); @@ -287,15 +290,17 @@ static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *na addr = get_mem_ctrl_hub_base_addr(); if (!addr) return -ENODEV; + size = DNV_MCHBAR_SIZE; } else { /* MMIO via sideband register base address */ addr = get_sideband_reg_base_addr(); if (!addr) return -ENODEV; addr += (port << 16); + size = DNV_SB_PORT_SIZE; } - base = ioremap((resource_size_t)addr, 0x10000); + base = ioremap((resource_size_t)addr, size); if (!base) return -ENODEV; @@ -1541,7 +1546,7 @@ static struct dunit_ops dnv_ops = { static const struct x86_cpu_id pnd2_cpuids[] = { { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, 0, (kernel_ulong_t)&apl_ops }, - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON, 0, (kernel_ulong_t)&dnv_ops }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_X, 0, (kernel_ulong_t)&dnv_ops }, { } }; MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids); diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index 07726fb00321e8cb5a9e53a9d73ac01cfbbc1cfa..53074ad361e58c179fe205bc5cf0b7876ad9fa16 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c @@ -1622,7 +1622,7 @@ static int __populate_dimms(struct mem_ctl_info *mci, size = ((u64)rows * cols * banks * ranks) >> (20 - 3); npages = MiB_TO_PAGES(size); - edac_dbg(0, "mc#%d: ha %d channel %d, dimm %d, %lld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n", + edac_dbg(0, "mc#%d: ha %d channel %d, dimm %d, %lld MiB (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n", pvt->sbridge_dev->mc, pvt->sbridge_dev->dom, i, j, size, npages, banks, ranks, rows, cols); @@ -2888,6 +2888,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci, recoverable = GET_BITFIELD(m->status, 56, 56); if (uncorrected_error) { + core_err_cnt = 1; if (ripv) { type = "FATAL"; tp_event = HW_EVENT_ERR_FATAL; @@ -2911,35 +2912,27 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci, * cccc = channel * If the mask doesn't match, report an error to the parsing logic */ - if (! ((errcode & 0xef80) == 0x80)) { - optype = "Can't parse: it is not a mem"; - } else { - switch (optypenum) { - case 0: - optype = "generic undef request error"; - break; - case 1: - optype = "memory read error"; - break; - case 2: - optype = "memory write error"; - break; - case 3: - optype = "addr/cmd error"; - break; - case 4: - optype = "memory scrubbing error"; - break; - default: - optype = "reserved"; - break; - } + switch (optypenum) { + case 0: + optype = "generic undef request error"; + break; + case 1: + optype = "memory read error"; + break; + case 2: + optype = "memory write error"; + break; + case 3: + optype = "addr/cmd error"; + break; + case 4: + optype = "memory scrubbing error"; + break; + default: + optype = "reserved"; + break; } - /* Only decode errors with an valid address (ADDRV) */ - if (!GET_BITFIELD(m->status, 58, 58)) - return; - if (pvt->info.type == KNIGHTS_LANDING) { if (channel == 14) { edac_dbg(0, "%s%s err_code:%04x:%04x EDRAM bank %d\n", @@ -3045,17 +3038,11 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val, { struct mce *mce = (struct mce *)data; struct mem_ctl_info *mci; - struct sbridge_pvt *pvt; char *type; if (edac_get_report_status() == EDAC_REPORTING_DISABLED) return NOTIFY_DONE; - mci = get_mci_for_node_id(mce->socketid, IMC0); - if (!mci) - return NOTIFY_DONE; - pvt = mci->pvt_info; - /* * Just let mcelog handle it if the error is * outside the memory controller. A memory error @@ -3065,6 +3052,22 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val, if ((mce->status & 0xefff) >> 7 != 1) return NOTIFY_DONE; + /* Check ADDRV bit in STATUS */ + if (!GET_BITFIELD(mce->status, 58, 58)) + return NOTIFY_DONE; + + /* Check MISCV bit in STATUS */ + if (!GET_BITFIELD(mce->status, 59, 59)) + return NOTIFY_DONE; + + /* Check address type in MISC (physical address only) */ + if (GET_BITFIELD(mce->misc, 6, 8) != 2) + return NOTIFY_DONE; + + mci = get_mci_for_node_id(mce->socketid, IMC0); + if (!mci) + return NOTIFY_DONE; + if (mce->mcgstatus & MCG_STATUS_MCIP) type = "Exception"; else diff --git a/drivers/edac/skx_edac.c b/drivers/edac/skx_base.c similarity index 48% rename from drivers/edac/skx_edac.c rename to drivers/edac/skx_base.c index fae095162c0175d860bfb9669b6036e4773282fd..ebde975b178b61cda214c26985aa65b8aa8981e5 100644 --- a/drivers/edac/skx_edac.c +++ b/drivers/edac/skx_base.c @@ -1,38 +1,17 @@ +// SPDX-License-Identifier: GPL-2.0 /* * EDAC driver for Intel(R) Xeon(R) Skylake processors * Copyright (c) 2016, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include +#include #include #include -#include #include #include "edac_module.h" +#include "skx_common.h" #define EDAC_MOD_STR "skx_edac" @@ -45,82 +24,20 @@ #define skx_mc_printk(mci, level, fmt, arg...) \ edac_mc_chipset_printk(mci, level, "skx", fmt, ##arg) -/* - * Get a bit field at register value , from bit to bit - */ -#define GET_BITFIELD(v, lo, hi) \ - (((v) & GENMASK_ULL((hi), (lo))) >> (lo)) - -static LIST_HEAD(skx_edac_list); +static struct list_head *skx_edac_list; static u64 skx_tolm, skx_tohm; - -#define NUM_IMC 2 /* memory controllers per socket */ -#define NUM_CHANNELS 3 /* channels per memory controller */ -#define NUM_DIMMS 2 /* Max DIMMS per channel */ +static int skx_num_sockets; +static unsigned int nvdimm_count; #define MASK26 0x3FFFFFF /* Mask for 2^26 */ #define MASK29 0x1FFFFFFF /* Mask for 2^29 */ -/* - * Each cpu socket contains some pci devices that provide global - * information, and also some that are local to each of the two - * memory controllers on the die. - */ -struct skx_dev { - struct list_head list; - u8 bus[4]; - int seg; - struct pci_dev *sad_all; - struct pci_dev *util_all; - u32 mcroute; - struct skx_imc { - struct mem_ctl_info *mci; - u8 mc; /* system wide mc# */ - u8 lmc; /* socket relative mc# */ - u8 src_id, node_id; - struct skx_channel { - struct pci_dev *cdev; - struct skx_dimm { - u8 close_pg; - u8 bank_xor_enable; - u8 fine_grain_bank; - u8 rowbits; - u8 colbits; - } dimms[NUM_DIMMS]; - } chan[NUM_CHANNELS]; - } imc[NUM_IMC]; -}; -static int skx_num_sockets; - -struct skx_pvt { - struct skx_imc *imc; -}; - -struct decoded_addr { - struct skx_dev *dev; - u64 addr; - int socket; - int imc; - int channel; - u64 chan_addr; - int sktways; - int chanways; - int dimm; - int rank; - int channel_rank; - u64 rank_address; - int row; - int column; - int bank_address; - int bank_group; -}; - static struct skx_dev *get_skx_dev(struct pci_bus *bus, u8 idx) { struct skx_dev *d; - list_for_each_entry(d, &skx_edac_list, list) { + list_for_each_entry(d, skx_edac_list, list) { if (d->seg == pci_domain_nr(bus) && d->bus[idx] == bus->number) return d; } @@ -129,12 +46,13 @@ static struct skx_dev *get_skx_dev(struct pci_bus *bus, u8 idx) } enum munittype { - CHAN0, CHAN1, CHAN2, SAD_ALL, UTIL_ALL, SAD + CHAN0, CHAN1, CHAN2, SAD_ALL, UTIL_ALL, SAD, + ERRCHAN0, ERRCHAN1, ERRCHAN2, }; struct munit { u16 did; - u16 devfn[NUM_IMC]; + u16 devfn[SKX_NUM_IMC]; u8 busidx; u8 per_socket; enum munittype mtype; @@ -151,49 +69,13 @@ static const struct munit skx_all_munits[] = { { 0x2040, { PCI_DEVFN(10, 0), PCI_DEVFN(12, 0) }, 2, 2, CHAN0 }, { 0x2044, { PCI_DEVFN(10, 4), PCI_DEVFN(12, 4) }, 2, 2, CHAN1 }, { 0x2048, { PCI_DEVFN(11, 0), PCI_DEVFN(13, 0) }, 2, 2, CHAN2 }, + { 0x2043, { PCI_DEVFN(10, 3), PCI_DEVFN(12, 3) }, 2, 2, ERRCHAN0 }, + { 0x2047, { PCI_DEVFN(10, 7), PCI_DEVFN(12, 7) }, 2, 2, ERRCHAN1 }, + { 0x204b, { PCI_DEVFN(11, 3), PCI_DEVFN(13, 3) }, 2, 2, ERRCHAN2 }, { 0x208e, { }, 1, 0, SAD }, { } }; -/* - * We use the per-socket device 0x2016 to count how many sockets are present, - * and to detemine which PCI buses are associated with each socket. Allocate - * and build the full list of all the skx_dev structures that we need here. - */ -static int get_all_bus_mappings(void) -{ - struct pci_dev *pdev, *prev; - struct skx_dev *d; - u32 reg; - int ndev = 0; - - prev = NULL; - for (;;) { - pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2016, prev); - if (!pdev) - break; - ndev++; - d = kzalloc(sizeof(*d), GFP_KERNEL); - if (!d) { - pci_dev_put(pdev); - return -ENOMEM; - } - d->seg = pci_domain_nr(pdev->bus); - pci_read_config_dword(pdev, 0xCC, ®); - d->bus[0] = GET_BITFIELD(reg, 0, 7); - d->bus[1] = GET_BITFIELD(reg, 8, 15); - d->bus[2] = GET_BITFIELD(reg, 16, 23); - d->bus[3] = GET_BITFIELD(reg, 24, 31); - edac_dbg(2, "busses: %x, %x, %x, %x\n", - d->bus[0], d->bus[1], d->bus[2], d->bus[3]); - list_add_tail(&d->list, &skx_edac_list); - skx_num_sockets++; - prev = pdev; - } - - return ndev; -} - static int get_all_munits(const struct munit *m) { struct pci_dev *pdev, *prev; @@ -207,11 +89,11 @@ static int get_all_munits(const struct munit *m) if (!pdev) break; ndev++; - if (m->per_socket == NUM_IMC) { - for (i = 0; i < NUM_IMC; i++) + if (m->per_socket == SKX_NUM_IMC) { + for (i = 0; i < SKX_NUM_IMC; i++) if (m->devfn[i] == pdev->devfn) break; - if (i == NUM_IMC) + if (i == SKX_NUM_IMC) goto fail; } d = get_skx_dev(pdev->bus, m->busidx); @@ -220,16 +102,24 @@ static int get_all_munits(const struct munit *m) /* Be sure that the device is enabled */ if (unlikely(pci_enable_device(pdev) < 0)) { - skx_printk(KERN_ERR, - "Couldn't enable %04x:%04x\n", PCI_VENDOR_ID_INTEL, m->did); + skx_printk(KERN_ERR, "Couldn't enable device %04x:%04x\n", + PCI_VENDOR_ID_INTEL, m->did); goto fail; } switch (m->mtype) { - case CHAN0: case CHAN1: case CHAN2: + case CHAN0: + case CHAN1: + case CHAN2: pci_dev_get(pdev); d->imc[i].chan[m->mtype].cdev = pdev; break; + case ERRCHAN0: + case ERRCHAN1: + case ERRCHAN2: + pci_dev_get(pdev); + d->imc[i].chan[m->mtype - ERRCHAN0].edev = pdev; + break; case SAD_ALL: pci_dev_get(pdev); d->sad_all = pdev; @@ -247,11 +137,10 @@ static int get_all_munits(const struct munit *m) */ pci_read_config_dword(pdev, 0xB4, ®); if (reg != 0) { - if (d->mcroute == 0) + if (d->mcroute == 0) { d->mcroute = reg; - else if (d->mcroute != reg) { - skx_printk(KERN_ERR, - "mcroute mismatch\n"); + } else if (d->mcroute != reg) { + skx_printk(KERN_ERR, "mcroute mismatch\n"); goto fail; } } @@ -268,173 +157,20 @@ static int get_all_munits(const struct munit *m) return -ENODEV; } +static struct res_config skx_cfg = { + .type = SKX, + .decs_did = 0x2016, + .busno_cfg_offset = 0xcc, +}; + static const struct x86_cpu_id skx_cpuids[] = { - { X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_X, 0, 0 }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_X, 0, (kernel_ulong_t)&skx_cfg}, { } }; MODULE_DEVICE_TABLE(x86cpu, skx_cpuids); -static u8 get_src_id(struct skx_dev *d) -{ - u32 reg; - - pci_read_config_dword(d->util_all, 0xF0, ®); - - return GET_BITFIELD(reg, 12, 14); -} - -static u8 skx_get_node_id(struct skx_dev *d) -{ - u32 reg; - - pci_read_config_dword(d->util_all, 0xF4, ®); - - return GET_BITFIELD(reg, 0, 2); -} - -static int get_dimm_attr(u32 reg, int lobit, int hibit, int add, int minval, - int maxval, char *name) -{ - u32 val = GET_BITFIELD(reg, lobit, hibit); - - if (val < minval || val > maxval) { - edac_dbg(2, "bad %s = %d (raw=%x)\n", name, val, reg); - return -EINVAL; - } - return val + add; -} - -#define IS_DIMM_PRESENT(mtr) GET_BITFIELD((mtr), 15, 15) -#define IS_NVDIMM_PRESENT(mcddrtcfg, i) GET_BITFIELD((mcddrtcfg), (i), (i)) - -#define numrank(reg) get_dimm_attr((reg), 12, 13, 0, 0, 2, "ranks") -#define numrow(reg) get_dimm_attr((reg), 2, 4, 12, 1, 6, "rows") -#define numcol(reg) get_dimm_attr((reg), 0, 1, 10, 0, 2, "cols") - -static int get_width(u32 mtr) -{ - switch (GET_BITFIELD(mtr, 8, 9)) { - case 0: - return DEV_X4; - case 1: - return DEV_X8; - case 2: - return DEV_X16; - } - return DEV_UNKNOWN; -} - -static int skx_get_hi_lo(void) -{ - struct pci_dev *pdev; - u32 reg; - - pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2034, NULL); - if (!pdev) { - edac_dbg(0, "Can't get tolm/tohm\n"); - return -ENODEV; - } - - pci_read_config_dword(pdev, 0xD0, ®); - skx_tolm = reg; - pci_read_config_dword(pdev, 0xD4, ®); - skx_tohm = reg; - pci_read_config_dword(pdev, 0xD8, ®); - skx_tohm |= (u64)reg << 32; - - pci_dev_put(pdev); - edac_dbg(2, "tolm=%llx tohm=%llx\n", skx_tolm, skx_tohm); - - return 0; -} - -static int get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm, - struct skx_imc *imc, int chan, int dimmno) -{ - int banks = 16, ranks, rows, cols, npages; - u64 size; - - ranks = numrank(mtr); - rows = numrow(mtr); - cols = numcol(mtr); - - /* - * Compute size in 8-byte (2^3) words, then shift to MiB (2^20) - */ - size = ((1ull << (rows + cols + ranks)) * banks) >> (20 - 3); - npages = MiB_TO_PAGES(size); - - edac_dbg(0, "mc#%d: channel %d, dimm %d, %lld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n", - imc->mc, chan, dimmno, size, npages, - banks, 1 << ranks, rows, cols); - - imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mtr, 0, 0); - imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mtr, 9, 9); - imc->chan[chan].dimms[dimmno].fine_grain_bank = GET_BITFIELD(amap, 0, 0); - imc->chan[chan].dimms[dimmno].rowbits = rows; - imc->chan[chan].dimms[dimmno].colbits = cols; - - dimm->nr_pages = npages; - dimm->grain = 32; - dimm->dtype = get_width(mtr); - dimm->mtype = MEM_DDR4; - dimm->edac_mode = EDAC_SECDED; /* likely better than this */ - snprintf(dimm->label, sizeof(dimm->label), "CPU_SrcID#%u_MC#%u_Chan#%u_DIMM#%u", - imc->src_id, imc->lmc, chan, dimmno); - - return 1; -} - -static int get_nvdimm_info(struct dimm_info *dimm, struct skx_imc *imc, - int chan, int dimmno) -{ - int smbios_handle; - u32 dev_handle; - u16 flags; - u64 size = 0; - - dev_handle = ACPI_NFIT_BUILD_DEVICE_HANDLE(dimmno, chan, imc->lmc, - imc->src_id, 0); - - smbios_handle = nfit_get_smbios_id(dev_handle, &flags); - if (smbios_handle == -EOPNOTSUPP) { - pr_warn_once(EDAC_MOD_STR ": Can't find size of NVDIMM. Try enabling CONFIG_ACPI_NFIT\n"); - goto unknown_size; - } - - if (smbios_handle < 0) { - skx_printk(KERN_ERR, "Can't find handle for NVDIMM ADR=%x\n", dev_handle); - goto unknown_size; - } - - if (flags & ACPI_NFIT_MEM_MAP_FAILED) { - skx_printk(KERN_ERR, "NVDIMM ADR=%x is not mapped\n", dev_handle); - goto unknown_size; - } - - size = dmi_memdev_size(smbios_handle); - if (size == ~0ull) - skx_printk(KERN_ERR, "Can't find size for NVDIMM ADR=%x/SMBIOS=%x\n", - dev_handle, smbios_handle); - -unknown_size: - dimm->nr_pages = size >> PAGE_SHIFT; - dimm->grain = 32; - dimm->dtype = DEV_UNKNOWN; - dimm->mtype = MEM_NVDIMM; - dimm->edac_mode = EDAC_SECDED; /* likely better than this */ - - edac_dbg(0, "mc#%d: channel %d, dimm %d, %llu Mb (%u pages)\n", - imc->mc, chan, dimmno, size >> 20, dimm->nr_pages); - - snprintf(dimm->label, sizeof(dimm->label), "CPU_SrcID#%u_MC#%u_Chan#%u_DIMM#%u", - imc->src_id, imc->lmc, chan, dimmno); - - return (size == 0 || size == ~0ull) ? 0 : 1; -} - #define SKX_GET_MTMTR(dev, reg) \ - pci_read_config_dword((dev), 0x87c, ®) + pci_read_config_dword((dev), 0x87c, &(reg)) static bool skx_check_ecc(struct pci_dev *pdev) { @@ -454,19 +190,22 @@ static int skx_get_dimm_config(struct mem_ctl_info *mci) int i, j; int ndimms; - for (i = 0; i < NUM_CHANNELS; i++) { + for (i = 0; i < SKX_NUM_CHANNELS; i++) { ndimms = 0; pci_read_config_dword(imc->chan[i].cdev, 0x8C, &amap); pci_read_config_dword(imc->chan[i].cdev, 0x400, &mcddrtcfg); - for (j = 0; j < NUM_DIMMS; j++) { + for (j = 0; j < SKX_NUM_DIMMS; j++) { dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0); pci_read_config_dword(imc->chan[i].cdev, - 0x80 + 4*j, &mtr); - if (IS_DIMM_PRESENT(mtr)) - ndimms += get_dimm_info(mtr, amap, dimm, imc, i, j); - else if (IS_NVDIMM_PRESENT(mcddrtcfg, j)) - ndimms += get_nvdimm_info(dimm, imc, i, j); + 0x80 + 4 * j, &mtr); + if (IS_DIMM_PRESENT(mtr)) { + ndimms += skx_get_dimm_info(mtr, amap, dimm, imc, i, j); + } else if (IS_NVDIMM_PRESENT(mcddrtcfg, j)) { + ndimms += skx_get_nvdimm_info(dimm, imc, i, j, + EDAC_MOD_STR); + nvdimm_count++; + } } if (ndimms && !skx_check_ecc(imc->chan[0].cdev)) { skx_printk(KERN_ERR, "ECC is disabled on imc %d\n", imc->mc); @@ -477,95 +216,12 @@ static int skx_get_dimm_config(struct mem_ctl_info *mci) return 0; } -static void skx_unregister_mci(struct skx_imc *imc) -{ - struct mem_ctl_info *mci = imc->mci; - - if (!mci) - return; - - edac_dbg(0, "MC%d: mci = %p\n", imc->mc, mci); - - /* Remove MC sysfs nodes */ - edac_mc_del_mc(mci->pdev); - - edac_dbg(1, "%s: free mci struct\n", mci->ctl_name); - kfree(mci->ctl_name); - edac_mc_free(mci); -} - -static int skx_register_mci(struct skx_imc *imc) -{ - struct mem_ctl_info *mci; - struct edac_mc_layer layers[2]; - struct pci_dev *pdev = imc->chan[0].cdev; - struct skx_pvt *pvt; - int rc; - - /* allocate a new MC control structure */ - layers[0].type = EDAC_MC_LAYER_CHANNEL; - layers[0].size = NUM_CHANNELS; - layers[0].is_virt_csrow = false; - layers[1].type = EDAC_MC_LAYER_SLOT; - layers[1].size = NUM_DIMMS; - layers[1].is_virt_csrow = true; - mci = edac_mc_alloc(imc->mc, ARRAY_SIZE(layers), layers, - sizeof(struct skx_pvt)); - - if (unlikely(!mci)) - return -ENOMEM; - - edac_dbg(0, "MC#%d: mci = %p\n", imc->mc, mci); - - /* Associate skx_dev and mci for future usage */ - imc->mci = mci; - pvt = mci->pvt_info; - pvt->imc = imc; - - mci->ctl_name = kasprintf(GFP_KERNEL, "Skylake Socket#%d IMC#%d", - imc->node_id, imc->lmc); - if (!mci->ctl_name) { - rc = -ENOMEM; - goto fail0; - } - - mci->mtype_cap = MEM_FLAG_DDR4 | MEM_FLAG_NVDIMM; - mci->edac_ctl_cap = EDAC_FLAG_NONE; - mci->edac_cap = EDAC_FLAG_NONE; - mci->mod_name = EDAC_MOD_STR; - mci->dev_name = pci_name(imc->chan[0].cdev); - mci->ctl_page_to_phys = NULL; - - rc = skx_get_dimm_config(mci); - if (rc < 0) - goto fail; - - /* record ptr to the generic device */ - mci->pdev = &pdev->dev; - - /* add this new MC control structure to EDAC's list of MCs */ - if (unlikely(edac_mc_add_mc(mci))) { - edac_dbg(0, "MC: failed edac_mc_add_mc()\n"); - rc = -EINVAL; - goto fail; - } - - return 0; - -fail: - kfree(mci->ctl_name); -fail0: - edac_mc_free(mci); - imc->mci = NULL; - return rc; -} - #define SKX_MAX_SAD 24 #define SKX_GET_SAD(d, i, reg) \ - pci_read_config_dword((d)->sad_all, 0x60 + 8 * (i), ®) + pci_read_config_dword((d)->sad_all, 0x60 + 8 * (i), &(reg)) #define SKX_GET_ILV(d, i, reg) \ - pci_read_config_dword((d)->sad_all, 0x64 + 8 * (i), ®) + pci_read_config_dword((d)->sad_all, 0x64 + 8 * (i), &(reg)) #define SKX_SAD_MOD3MODE(sad) GET_BITFIELD((sad), 30, 31) #define SKX_SAD_MOD3(sad) GET_BITFIELD((sad), 27, 27) @@ -578,9 +234,42 @@ static int skx_register_mci(struct skx_imc *imc) #define SKX_ILV_REMOTE(tgt) (((tgt) & 8) == 0) #define SKX_ILV_TARGET(tgt) ((tgt) & 7) +static void skx_show_retry_rd_err_log(struct decoded_addr *res, + char *msg, int len) +{ + u32 log0, log1, log2, log3, log4; + u32 corr0, corr1, corr2, corr3; + struct pci_dev *edev; + int n; + + edev = res->dev->imc[res->imc].chan[res->channel].edev; + + pci_read_config_dword(edev, 0x154, &log0); + pci_read_config_dword(edev, 0x148, &log1); + pci_read_config_dword(edev, 0x150, &log2); + pci_read_config_dword(edev, 0x15c, &log3); + pci_read_config_dword(edev, 0x114, &log4); + + n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.8x %.8x %.8x]", + log0, log1, log2, log3, log4); + + pci_read_config_dword(edev, 0x104, &corr0); + pci_read_config_dword(edev, 0x108, &corr1); + pci_read_config_dword(edev, 0x10c, &corr2); + pci_read_config_dword(edev, 0x110, &corr3); + + if (len - n > 0) + snprintf(msg + n, len - n, + " correrrcnt[%.4x %.4x %.4x %.4x %.4x %.4x %.4x %.4x]", + corr0 & 0xffff, corr0 >> 16, + corr1 & 0xffff, corr1 >> 16, + corr2 & 0xffff, corr2 >> 16, + corr3 & 0xffff, corr3 >> 16); +} + static bool skx_sad_decode(struct decoded_addr *res) { - struct skx_dev *d = list_first_entry(&skx_edac_list, typeof(*d), list); + struct skx_dev *d = list_first_entry(skx_edac_list, typeof(*d), list); u64 addr = res->addr; int i, idx, tgt, lchan, shift; u32 sad, ilv; @@ -589,7 +278,7 @@ static bool skx_sad_decode(struct decoded_addr *res) /* Simple sanity check for I/O space or out of range */ if (addr >= skx_tohm || (addr >= skx_tolm && addr < BIT_ULL(32))) { - edac_dbg(0, "Address %llx out of range\n", addr); + edac_dbg(0, "Address 0x%llx out of range\n", addr); return false; } @@ -604,7 +293,7 @@ static bool skx_sad_decode(struct decoded_addr *res) } prev_limit = limit + 1; } - edac_dbg(0, "No SAD entry for %llx\n", addr); + edac_dbg(0, "No SAD entry for 0x%llx\n", addr); return false; sad_found: @@ -634,7 +323,7 @@ static bool skx_sad_decode(struct decoded_addr *res) return false; } remote = 1; - list_for_each_entry(d, &skx_edac_list, list) { + list_for_each_entry(d, skx_edac_list, list) { if (d->imc[0].src_id == SKX_ILV_TARGET(tgt)) goto restart; } @@ -642,9 +331,9 @@ static bool skx_sad_decode(struct decoded_addr *res) return false; } - if (SKX_SAD_MOD3(sad) == 0) + if (SKX_SAD_MOD3(sad) == 0) { lchan = SKX_ILV_TARGET(tgt); - else { + } else { switch (SKX_SAD_MOD3MODE(sad)) { case 0: shift = 6; @@ -668,7 +357,7 @@ static bool skx_sad_decode(struct decoded_addr *res) break; case 2: lchan = (addr >> shift) % 2; - lchan = (lchan << 1) | ~lchan; + lchan = (lchan << 1) | !lchan; break; case 3: lchan = ((addr >> shift) % 2) << 1; @@ -682,7 +371,7 @@ static bool skx_sad_decode(struct decoded_addr *res) res->imc = GET_BITFIELD(d->mcroute, lchan * 3, lchan * 3 + 2); res->channel = GET_BITFIELD(d->mcroute, lchan * 2 + 18, lchan * 2 + 19); - edac_dbg(2, "%llx: socket=%d imc=%d channel=%d\n", + edac_dbg(2, "0x%llx: socket=%d imc=%d channel=%d\n", res->addr, res->socket, res->imc, res->channel); return true; } @@ -690,11 +379,11 @@ static bool skx_sad_decode(struct decoded_addr *res) #define SKX_MAX_TAD 8 #define SKX_GET_TADBASE(d, mc, i, reg) \ - pci_read_config_dword((d)->imc[mc].chan[0].cdev, 0x850 + 4 * (i), ®) + pci_read_config_dword((d)->imc[mc].chan[0].cdev, 0x850 + 4 * (i), &(reg)) #define SKX_GET_TADWAYNESS(d, mc, i, reg) \ - pci_read_config_dword((d)->imc[mc].chan[0].cdev, 0x880 + 4 * (i), ®) + pci_read_config_dword((d)->imc[mc].chan[0].cdev, 0x880 + 4 * (i), &(reg)) #define SKX_GET_TADCHNILVOFFSET(d, mc, ch, i, reg) \ - pci_read_config_dword((d)->imc[mc].chan[ch].cdev, 0x90 + 4 * (i), ®) + pci_read_config_dword((d)->imc[mc].chan[ch].cdev, 0x90 + 4 * (i), &(reg)) #define SKX_TAD_BASE(b) ((u64)GET_BITFIELD((b), 12, 31) << 26) #define SKX_TAD_SKT_GRAN(b) GET_BITFIELD((b), 4, 5) @@ -729,7 +418,7 @@ static bool skx_tad_decode(struct decoded_addr *res) if (SKX_TAD_BASE(base) <= res->addr && res->addr <= SKX_TAD_LIMIT(wayness)) goto tad_found; } - edac_dbg(0, "No TAD entry for %llx\n", res->addr); + edac_dbg(0, "No TAD entry for 0x%llx\n", res->addr); return false; tad_found: @@ -757,7 +446,7 @@ static bool skx_tad_decode(struct decoded_addr *res) res->chan_addr = channel_addr; - edac_dbg(2, "%llx: chan_addr=%llx sktways=%d chanways=%d\n", + edac_dbg(2, "0x%llx: chan_addr=0x%llx sktways=%d chanways=%d\n", res->addr, res->chan_addr, res->sktways, res->chanways); return true; } @@ -766,10 +455,10 @@ static bool skx_tad_decode(struct decoded_addr *res) #define SKX_GET_RIRWAYNESS(d, mc, ch, i, reg) \ pci_read_config_dword((d)->imc[mc].chan[ch].cdev, \ - 0x108 + 4 * (i), ®) + 0x108 + 4 * (i), &(reg)) #define SKX_GET_RIRILV(d, mc, ch, idx, i, reg) \ pci_read_config_dword((d)->imc[mc].chan[ch].cdev, \ - 0x120 + 16 * idx + 4 * (i), ®) + 0x120 + 16 * (idx) + 4 * (i), &(reg)) #define SKX_RIR_VALID(b) GET_BITFIELD((b), 31, 31) #define SKX_RIR_LIMIT(b) (((u64)GET_BITFIELD((b), 1, 11) << 29) | MASK29) @@ -799,7 +488,7 @@ static bool skx_rir_decode(struct decoded_addr *res) } prev_limit = limit; } - edac_dbg(0, "No RIR entry for %llx\n", res->addr); + edac_dbg(0, "No RIR entry for 0x%llx\n", res->addr); return false; rir_found: @@ -818,7 +507,7 @@ static bool skx_rir_decode(struct decoded_addr *res) res->dimm = chan_rank / 4; res->rank = chan_rank % 4; - edac_dbg(2, "%llx: dimm=%d rank=%d chan_rank=%d rank_addr=%llx\n", + edac_dbg(2, "0x%llx: dimm=%d rank=%d chan_rank=%d rank_addr=0x%llx\n", res->addr, res->dimm, res->rank, res->channel_rank, res->rank_address); return true; @@ -827,15 +516,19 @@ static bool skx_rir_decode(struct decoded_addr *res) static u8 skx_close_row[] = { 15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33 }; + static u8 skx_close_column[] = { 3, 4, 5, 14, 19, 23, 24, 25, 26, 27 }; + static u8 skx_open_row[] = { 14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33 }; + static u8 skx_open_column[] = { 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 }; + static u8 skx_open_fine_column[] = { 3, 4, 5, 7, 8, 9, 10, 11, 12, 13 }; @@ -881,7 +574,7 @@ static bool skx_mad_decode(struct decoded_addr *r) } r->row &= (1u << dimm->rowbits) - 1; - edac_dbg(2, "%llx: row=%x col=%x bank_addr=%d bank_group=%d\n", + edac_dbg(2, "0x%llx: row=0x%x col=0x%x bank_addr=%d bank_group=%d\n", r->addr, r->row, r->column, r->bank_address, r->bank_group); return true; @@ -889,42 +582,52 @@ static bool skx_mad_decode(struct decoded_addr *r) static bool skx_decode(struct decoded_addr *res) { - return skx_sad_decode(res) && skx_tad_decode(res) && skx_rir_decode(res) && skx_mad_decode(res); } +static struct notifier_block skx_mce_dec = { + .notifier_call = skx_mce_check_error, + .priority = MCE_PRIO_EDAC, +}; + #ifdef CONFIG_EDAC_DEBUG /* - * Debug feature. Make /sys/kernel/debug/skx_edac_test/addr. - * Write an address to this file to exercise the address decode - * logic in this driver. + * Debug feature. + * Exercise the address decode logic by writing an address to + * /sys/kernel/debug/edac/skx_test/addr. */ static struct dentry *skx_test; -static u64 skx_fake_addr; static int debugfs_u64_set(void *data, u64 val) { - struct decoded_addr res; + struct mce m; + + pr_warn_once("Fake error to 0x%llx injected via debugfs\n", val); - res.addr = val; - skx_decode(&res); + memset(&m, 0, sizeof(m)); + /* ADDRV + MemRd + Unknown channel */ + m.status = MCI_STATUS_ADDRV + 0x90; + /* One corrected error */ + m.status |= BIT_ULL(38); + m.addr = val; + skx_mce_check_error(NULL, 0, &m); return 0; } - DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n"); -static struct dentry *mydebugfs_create(const char *name, umode_t mode, - struct dentry *parent, u64 *value) -{ - return debugfs_create_file(name, mode, parent, value, &fops_u64_wo); -} - static void setup_skx_debug(void) { - skx_test = debugfs_create_dir("skx_edac_test", NULL); - mydebugfs_create("addr", S_IWUSR, skx_test, &skx_fake_addr); + skx_test = edac_debugfs_create_dir("skx_test"); + if (!skx_test) + return; + + if (!edac_debugfs_create_file("addr", 0200, skx_test, + NULL, &fops_u64_wo)) { + debugfs_remove(skx_test); + skx_test = NULL; + } } static void teardown_skx_debug(void) @@ -932,167 +635,10 @@ static void teardown_skx_debug(void) debugfs_remove_recursive(skx_test); } #else -static void setup_skx_debug(void) -{ -} - -static void teardown_skx_debug(void) -{ -} +static inline void setup_skx_debug(void) {} +static inline void teardown_skx_debug(void) {} #endif /*CONFIG_EDAC_DEBUG*/ -static void skx_mce_output_error(struct mem_ctl_info *mci, - const struct mce *m, - struct decoded_addr *res) -{ - enum hw_event_mc_err_type tp_event; - char *type, *optype, msg[256]; - bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0); - bool overflow = GET_BITFIELD(m->status, 62, 62); - bool uncorrected_error = GET_BITFIELD(m->status, 61, 61); - bool recoverable; - u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52); - u32 mscod = GET_BITFIELD(m->status, 16, 31); - u32 errcode = GET_BITFIELD(m->status, 0, 15); - u32 optypenum = GET_BITFIELD(m->status, 4, 6); - - recoverable = GET_BITFIELD(m->status, 56, 56); - - if (uncorrected_error) { - if (ripv) { - type = "FATAL"; - tp_event = HW_EVENT_ERR_FATAL; - } else { - type = "NON_FATAL"; - tp_event = HW_EVENT_ERR_UNCORRECTED; - } - } else { - type = "CORRECTED"; - tp_event = HW_EVENT_ERR_CORRECTED; - } - - /* - * According with Table 15-9 of the Intel Architecture spec vol 3A, - * memory errors should fit in this mask: - * 000f 0000 1mmm cccc (binary) - * where: - * f = Correction Report Filtering Bit. If 1, subsequent errors - * won't be shown - * mmm = error type - * cccc = channel - * If the mask doesn't match, report an error to the parsing logic - */ - if (!((errcode & 0xef80) == 0x80)) { - optype = "Can't parse: it is not a mem"; - } else { - switch (optypenum) { - case 0: - optype = "generic undef request error"; - break; - case 1: - optype = "memory read error"; - break; - case 2: - optype = "memory write error"; - break; - case 3: - optype = "addr/cmd error"; - break; - case 4: - optype = "memory scrubbing error"; - break; - default: - optype = "reserved"; - break; - } - } - - snprintf(msg, sizeof(msg), - "%s%s err_code:%04x:%04x socket:%d imc:%d rank:%d bg:%d ba:%d row:%x col:%x", - overflow ? " OVERFLOW" : "", - (uncorrected_error && recoverable) ? " recoverable" : "", - mscod, errcode, - res->socket, res->imc, res->rank, - res->bank_group, res->bank_address, res->row, res->column); - - edac_dbg(0, "%s\n", msg); - - /* Call the helper to output message */ - edac_mc_handle_error(tp_event, mci, core_err_cnt, - m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0, - res->channel, res->dimm, -1, - optype, msg); -} - -static int skx_mce_check_error(struct notifier_block *nb, unsigned long val, - void *data) -{ - struct mce *mce = (struct mce *)data; - struct decoded_addr res; - struct mem_ctl_info *mci; - char *type; - - if (edac_get_report_status() == EDAC_REPORTING_DISABLED) - return NOTIFY_DONE; - - /* ignore unless this is memory related with an address */ - if ((mce->status & 0xefff) >> 7 != 1 || !(mce->status & MCI_STATUS_ADDRV)) - return NOTIFY_DONE; - - res.addr = mce->addr; - if (!skx_decode(&res)) - return NOTIFY_DONE; - mci = res.dev->imc[res.imc].mci; - - if (mce->mcgstatus & MCG_STATUS_MCIP) - type = "Exception"; - else - type = "Event"; - - skx_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n"); - - skx_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: %Lx " - "Bank %d: %016Lx\n", mce->extcpu, type, - mce->mcgstatus, mce->bank, mce->status); - skx_mc_printk(mci, KERN_DEBUG, "TSC %llx ", mce->tsc); - skx_mc_printk(mci, KERN_DEBUG, "ADDR %llx ", mce->addr); - skx_mc_printk(mci, KERN_DEBUG, "MISC %llx ", mce->misc); - - skx_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:%x TIME %llu SOCKET " - "%u APIC %x\n", mce->cpuvendor, mce->cpuid, - mce->time, mce->socketid, mce->apicid); - - skx_mce_output_error(mci, mce, &res); - - return NOTIFY_DONE; -} - -static struct notifier_block skx_mce_dec = { - .notifier_call = skx_mce_check_error, - .priority = MCE_PRIO_EDAC, -}; - -static void skx_remove(void) -{ - int i, j; - struct skx_dev *d, *tmp; - - edac_dbg(0, "\n"); - - list_for_each_entry_safe(d, tmp, &skx_edac_list, list) { - list_del(&d->list); - for (i = 0; i < NUM_IMC; i++) { - skx_unregister_mci(&d->imc[i]); - for (j = 0; j < NUM_CHANNELS; j++) - pci_dev_put(d->imc[i].chan[j].cdev); - } - pci_dev_put(d->util_all); - pci_dev_put(d->sad_all); - - kfree(d); - } -} - /* * skx_init: * make sure we are running on the correct cpu model @@ -1102,9 +648,10 @@ static void skx_remove(void) static int __init skx_init(void) { const struct x86_cpu_id *id; + struct res_config *cfg; const struct munit *m; const char *owner; - int rc = 0, i; + int rc = 0, i, off[3] = {0xd0, 0xd4, 0xd8}; u8 mc = 0, src_id, node_id; struct skx_dev *d; @@ -1118,45 +665,59 @@ static int __init skx_init(void) if (!id) return -ENODEV; - rc = skx_get_hi_lo(); + cfg = (struct res_config *)id->driver_data; + + rc = skx_get_hi_lo(0x2034, off, &skx_tolm, &skx_tohm); if (rc) return rc; - rc = get_all_bus_mappings(); + rc = skx_get_all_bus_mappings(cfg, &skx_edac_list); if (rc < 0) goto fail; if (rc == 0) { edac_dbg(2, "No memory controllers found\n"); return -ENODEV; } + skx_num_sockets = rc; for (m = skx_all_munits; m->did; m++) { rc = get_all_munits(m); if (rc < 0) goto fail; if (rc != m->per_socket * skx_num_sockets) { - edac_dbg(2, "Expected %d, got %d of %x\n", + edac_dbg(2, "Expected %d, got %d of 0x%x\n", m->per_socket * skx_num_sockets, rc, m->did); rc = -ENODEV; goto fail; } } - list_for_each_entry(d, &skx_edac_list, list) { - src_id = get_src_id(d); - node_id = skx_get_node_id(d); + list_for_each_entry(d, skx_edac_list, list) { + rc = skx_get_src_id(d, 0xf0, &src_id); + if (rc < 0) + goto fail; + rc = skx_get_node_id(d, &node_id); + if (rc < 0) + goto fail; edac_dbg(2, "src_id=%d node_id=%d\n", src_id, node_id); - for (i = 0; i < NUM_IMC; i++) { + for (i = 0; i < SKX_NUM_IMC; i++) { d->imc[i].mc = mc++; d->imc[i].lmc = i; d->imc[i].src_id = src_id; d->imc[i].node_id = node_id; - rc = skx_register_mci(&d->imc[i]); + rc = skx_register_mci(&d->imc[i], d->imc[i].chan[0].cdev, + "Skylake Socket", EDAC_MOD_STR, + skx_get_dimm_config); if (rc < 0) goto fail; } } + skx_set_decode(skx_decode, skx_show_retry_rd_err_log); + + if (nvdimm_count && skx_adxl_get() == -ENODEV) + skx_printk(KERN_NOTICE, "Only decoding DDR4 address!\n"); + /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); @@ -1174,8 +735,10 @@ static void __exit skx_exit(void) { edac_dbg(2, "\n"); mce_unregister_decode_chain(&skx_mce_dec); - skx_remove(); teardown_skx_debug(); + if (nvdimm_count) + skx_adxl_put(); + skx_remove(); } module_init(skx_init); diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c new file mode 100644 index 0000000000000000000000000000000000000000..1f49f5f2f3f7cc39a88cc2cd652c8b69fd21bf0d --- /dev/null +++ b/drivers/edac/skx_common.c @@ -0,0 +1,657 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * + * Shared code by both skx_edac and i10nm_edac. Originally split out + * from the skx_edac driver. + * + * This file is linked into both skx_edac and i10nm_edac drivers. In + * order to avoid link errors, this file must be like a pure library + * without including symbols and defines which would otherwise conflict, + * when linked once into a module and into a built-in object, at the + * same time. For example, __this_module symbol references when that + * file is being linked into a built-in object. + * + * Copyright (c) 2018, Intel Corporation. + */ + +#include +#include +#include +#include +#include +#include "edac_module.h" +#include "skx_common.h" + +static const char * const component_names[] = { + [INDEX_SOCKET] = "ProcessorSocketId", + [INDEX_MEMCTRL] = "MemoryControllerId", + [INDEX_CHANNEL] = "ChannelId", + [INDEX_DIMM] = "DimmSlotId", +}; + +static int component_indices[ARRAY_SIZE(component_names)]; +static int adxl_component_count; +static const char * const *adxl_component_names; +static u64 *adxl_values; +static char *adxl_msg; + +static char skx_msg[MSG_SIZE]; +static skx_decode_f skx_decode; +static skx_show_retry_log_f skx_show_retry_rd_err_log; +static u64 skx_tolm, skx_tohm; +static LIST_HEAD(dev_edac_list); + +int __init skx_adxl_get(void) +{ + const char * const *names; + int i, j; + + names = adxl_get_component_names(); + if (!names) { + skx_printk(KERN_NOTICE, "No firmware support for address translation.\n"); + return -ENODEV; + } + + for (i = 0; i < INDEX_MAX; i++) { + for (j = 0; names[j]; j++) { + if (!strcmp(component_names[i], names[j])) { + component_indices[i] = j; + break; + } + } + + if (!names[j]) + goto err; + } + + adxl_component_names = names; + while (*names++) + adxl_component_count++; + + adxl_values = kcalloc(adxl_component_count, sizeof(*adxl_values), + GFP_KERNEL); + if (!adxl_values) { + adxl_component_count = 0; + return -ENOMEM; + } + + adxl_msg = kzalloc(MSG_SIZE, GFP_KERNEL); + if (!adxl_msg) { + adxl_component_count = 0; + kfree(adxl_values); + return -ENOMEM; + } + + return 0; +err: + skx_printk(KERN_ERR, "'%s' is not matched from DSM parameters: ", + component_names[i]); + for (j = 0; names[j]; j++) + skx_printk(KERN_CONT, "%s ", names[j]); + skx_printk(KERN_CONT, "\n"); + + return -ENODEV; +} + +void __exit skx_adxl_put(void) +{ + kfree(adxl_values); + kfree(adxl_msg); +} + +static bool skx_adxl_decode(struct decoded_addr *res) +{ + struct skx_dev *d; + int i, len = 0; + + if (res->addr >= skx_tohm || (res->addr >= skx_tolm && + res->addr < BIT_ULL(32))) { + edac_dbg(0, "Address 0x%llx out of range\n", res->addr); + return false; + } + + if (adxl_decode(res->addr, adxl_values)) { + edac_dbg(0, "Failed to decode 0x%llx\n", res->addr); + return false; + } + + res->socket = (int)adxl_values[component_indices[INDEX_SOCKET]]; + res->imc = (int)adxl_values[component_indices[INDEX_MEMCTRL]]; + res->channel = (int)adxl_values[component_indices[INDEX_CHANNEL]]; + res->dimm = (int)adxl_values[component_indices[INDEX_DIMM]]; + + if (res->imc > NUM_IMC - 1) { + skx_printk(KERN_ERR, "Bad imc %d\n", res->imc); + return false; + } + + list_for_each_entry(d, &dev_edac_list, list) { + if (d->imc[0].src_id == res->socket) { + res->dev = d; + break; + } + } + + if (!res->dev) { + skx_printk(KERN_ERR, "No device for src_id %d imc %d\n", + res->socket, res->imc); + return false; + } + + for (i = 0; i < adxl_component_count; i++) { + if (adxl_values[i] == ~0x0ull) + continue; + + len += snprintf(adxl_msg + len, MSG_SIZE - len, " %s:0x%llx", + adxl_component_names[i], adxl_values[i]); + if (MSG_SIZE - len <= 0) + break; + } + + return true; +} + +void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log) +{ + skx_decode = decode; + skx_show_retry_rd_err_log = show_retry_log; +} + +int skx_get_src_id(struct skx_dev *d, int off, u8 *id) +{ + u32 reg; + + if (pci_read_config_dword(d->util_all, off, ®)) { + skx_printk(KERN_ERR, "Failed to read src id\n"); + return -ENODEV; + } + + *id = GET_BITFIELD(reg, 12, 14); + return 0; +} + +int skx_get_node_id(struct skx_dev *d, u8 *id) +{ + u32 reg; + + if (pci_read_config_dword(d->util_all, 0xf4, ®)) { + skx_printk(KERN_ERR, "Failed to read node id\n"); + return -ENODEV; + } + + *id = GET_BITFIELD(reg, 0, 2); + return 0; +} + +static int get_width(u32 mtr) +{ + switch (GET_BITFIELD(mtr, 8, 9)) { + case 0: + return DEV_X4; + case 1: + return DEV_X8; + case 2: + return DEV_X16; + } + return DEV_UNKNOWN; +} + +/* + * We use the per-socket device @cfg->did to count how many sockets are present, + * and to detemine which PCI buses are associated with each socket. Allocate + * and build the full list of all the skx_dev structures that we need here. + */ +int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list) +{ + struct pci_dev *pdev, *prev; + struct skx_dev *d; + u32 reg; + int ndev = 0; + + prev = NULL; + for (;;) { + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, cfg->decs_did, prev); + if (!pdev) + break; + ndev++; + d = kzalloc(sizeof(*d), GFP_KERNEL); + if (!d) { + pci_dev_put(pdev); + return -ENOMEM; + } + + if (pci_read_config_dword(pdev, cfg->busno_cfg_offset, ®)) { + kfree(d); + pci_dev_put(pdev); + skx_printk(KERN_ERR, "Failed to read bus idx\n"); + return -ENODEV; + } + + d->bus[0] = GET_BITFIELD(reg, 0, 7); + d->bus[1] = GET_BITFIELD(reg, 8, 15); + if (cfg->type == SKX) { + d->seg = pci_domain_nr(pdev->bus); + d->bus[2] = GET_BITFIELD(reg, 16, 23); + d->bus[3] = GET_BITFIELD(reg, 24, 31); + } else { + d->seg = GET_BITFIELD(reg, 16, 23); + } + + edac_dbg(2, "busses: 0x%x, 0x%x, 0x%x, 0x%x\n", + d->bus[0], d->bus[1], d->bus[2], d->bus[3]); + list_add_tail(&d->list, &dev_edac_list); + prev = pdev; + } + + if (list) + *list = &dev_edac_list; + return ndev; +} + +int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm) +{ + struct pci_dev *pdev; + u32 reg; + + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, did, NULL); + if (!pdev) { + edac_dbg(2, "Can't get tolm/tohm\n"); + return -ENODEV; + } + + if (pci_read_config_dword(pdev, off[0], ®)) { + skx_printk(KERN_ERR, "Failed to read tolm\n"); + goto fail; + } + skx_tolm = reg; + + if (pci_read_config_dword(pdev, off[1], ®)) { + skx_printk(KERN_ERR, "Failed to read lower tohm\n"); + goto fail; + } + skx_tohm = reg; + + if (pci_read_config_dword(pdev, off[2], ®)) { + skx_printk(KERN_ERR, "Failed to read upper tohm\n"); + goto fail; + } + skx_tohm |= (u64)reg << 32; + + pci_dev_put(pdev); + *tolm = skx_tolm; + *tohm = skx_tohm; + edac_dbg(2, "tolm = 0x%llx tohm = 0x%llx\n", skx_tolm, skx_tohm); + return 0; +fail: + pci_dev_put(pdev); + return -ENODEV; +} + +static int skx_get_dimm_attr(u32 reg, int lobit, int hibit, int add, + int minval, int maxval, const char *name) +{ + u32 val = GET_BITFIELD(reg, lobit, hibit); + + if (val < minval || val > maxval) { + edac_dbg(2, "bad %s = %d (raw=0x%x)\n", name, val, reg); + return -EINVAL; + } + return val + add; +} + +#define numrank(reg) skx_get_dimm_attr(reg, 12, 13, 0, 0, 2, "ranks") +#define numrow(reg) skx_get_dimm_attr(reg, 2, 4, 12, 1, 6, "rows") +#define numcol(reg) skx_get_dimm_attr(reg, 0, 1, 10, 0, 2, "cols") + +int skx_get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm, + struct skx_imc *imc, int chan, int dimmno) +{ + int banks = 16, ranks, rows, cols, npages; + u64 size; + + ranks = numrank(mtr); + rows = numrow(mtr); + cols = numcol(mtr); + + /* + * Compute size in 8-byte (2^3) words, then shift to MiB (2^20) + */ + size = ((1ull << (rows + cols + ranks)) * banks) >> (20 - 3); + npages = MiB_TO_PAGES(size); + + edac_dbg(0, "mc#%d: channel %d, dimm %d, %lld MiB (%d pages) bank: %d, rank: %d, row: 0x%x, col: 0x%x\n", + imc->mc, chan, dimmno, size, npages, + banks, 1 << ranks, rows, cols); + + imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mtr, 0, 0); + imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mtr, 9, 9); + imc->chan[chan].dimms[dimmno].fine_grain_bank = GET_BITFIELD(amap, 0, 0); + imc->chan[chan].dimms[dimmno].rowbits = rows; + imc->chan[chan].dimms[dimmno].colbits = cols; + + dimm->nr_pages = npages; + dimm->grain = 32; + dimm->dtype = get_width(mtr); + dimm->mtype = MEM_DDR4; + dimm->edac_mode = EDAC_SECDED; /* likely better than this */ + snprintf(dimm->label, sizeof(dimm->label), "CPU_SrcID#%u_MC#%u_Chan#%u_DIMM#%u", + imc->src_id, imc->lmc, chan, dimmno); + + return 1; +} + +int skx_get_nvdimm_info(struct dimm_info *dimm, struct skx_imc *imc, + int chan, int dimmno, const char *mod_str) +{ + int smbios_handle; + u32 dev_handle; + u16 flags; + u64 size = 0; + + dev_handle = ACPI_NFIT_BUILD_DEVICE_HANDLE(dimmno, chan, imc->lmc, + imc->src_id, 0); + + smbios_handle = nfit_get_smbios_id(dev_handle, &flags); + if (smbios_handle == -EOPNOTSUPP) { + pr_warn_once("%s: Can't find size of NVDIMM. Try enabling CONFIG_ACPI_NFIT\n", mod_str); + goto unknown_size; + } + + if (smbios_handle < 0) { + skx_printk(KERN_ERR, "Can't find handle for NVDIMM ADR=0x%x\n", dev_handle); + goto unknown_size; + } + + if (flags & ACPI_NFIT_MEM_MAP_FAILED) { + skx_printk(KERN_ERR, "NVDIMM ADR=0x%x is not mapped\n", dev_handle); + goto unknown_size; + } + + size = dmi_memdev_size(smbios_handle); + if (size == ~0ull) + skx_printk(KERN_ERR, "Can't find size for NVDIMM ADR=0x%x/SMBIOS=0x%x\n", + dev_handle, smbios_handle); + +unknown_size: + dimm->nr_pages = size >> PAGE_SHIFT; + dimm->grain = 32; + dimm->dtype = DEV_UNKNOWN; + dimm->mtype = MEM_NVDIMM; + dimm->edac_mode = EDAC_SECDED; /* likely better than this */ + + edac_dbg(0, "mc#%d: channel %d, dimm %d, %llu MiB (%u pages)\n", + imc->mc, chan, dimmno, size >> 20, dimm->nr_pages); + + snprintf(dimm->label, sizeof(dimm->label), "CPU_SrcID#%u_MC#%u_Chan#%u_DIMM#%u", + imc->src_id, imc->lmc, chan, dimmno); + + return (size == 0 || size == ~0ull) ? 0 : 1; +} + +int skx_register_mci(struct skx_imc *imc, struct pci_dev *pdev, + const char *ctl_name, const char *mod_str, + get_dimm_config_f get_dimm_config) +{ + struct mem_ctl_info *mci; + struct edac_mc_layer layers[2]; + struct skx_pvt *pvt; + int rc; + + /* Allocate a new MC control structure */ + layers[0].type = EDAC_MC_LAYER_CHANNEL; + layers[0].size = NUM_CHANNELS; + layers[0].is_virt_csrow = false; + layers[1].type = EDAC_MC_LAYER_SLOT; + layers[1].size = NUM_DIMMS; + layers[1].is_virt_csrow = true; + mci = edac_mc_alloc(imc->mc, ARRAY_SIZE(layers), layers, + sizeof(struct skx_pvt)); + + if (unlikely(!mci)) + return -ENOMEM; + + edac_dbg(0, "MC#%d: mci = %p\n", imc->mc, mci); + + /* Associate skx_dev and mci for future usage */ + imc->mci = mci; + pvt = mci->pvt_info; + pvt->imc = imc; + + mci->ctl_name = kasprintf(GFP_KERNEL, "%s#%d IMC#%d", ctl_name, + imc->node_id, imc->lmc); + if (!mci->ctl_name) { + rc = -ENOMEM; + goto fail0; + } + + mci->mtype_cap = MEM_FLAG_DDR4 | MEM_FLAG_NVDIMM; + mci->edac_ctl_cap = EDAC_FLAG_NONE; + mci->edac_cap = EDAC_FLAG_NONE; + mci->mod_name = mod_str; + mci->dev_name = pci_name(pdev); + mci->ctl_page_to_phys = NULL; + + rc = get_dimm_config(mci); + if (rc < 0) + goto fail; + + /* Record ptr to the generic device */ + mci->pdev = &pdev->dev; + + /* Add this new MC control structure to EDAC's list of MCs */ + if (unlikely(edac_mc_add_mc(mci))) { + edac_dbg(0, "MC: failed edac_mc_add_mc()\n"); + rc = -EINVAL; + goto fail; + } + + return 0; + +fail: + kfree(mci->ctl_name); +fail0: + edac_mc_free(mci); + imc->mci = NULL; + return rc; +} + +static void skx_unregister_mci(struct skx_imc *imc) +{ + struct mem_ctl_info *mci = imc->mci; + + if (!mci) + return; + + edac_dbg(0, "MC%d: mci = %p\n", imc->mc, mci); + + /* Remove MC sysfs nodes */ + edac_mc_del_mc(mci->pdev); + + edac_dbg(1, "%s: free mci struct\n", mci->ctl_name); + kfree(mci->ctl_name); + edac_mc_free(mci); +} + +static void skx_mce_output_error(struct mem_ctl_info *mci, + const struct mce *m, + struct decoded_addr *res) +{ + enum hw_event_mc_err_type tp_event; + char *type, *optype; + bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0); + bool overflow = GET_BITFIELD(m->status, 62, 62); + bool uncorrected_error = GET_BITFIELD(m->status, 61, 61); + bool recoverable; + int len; + u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52); + u32 mscod = GET_BITFIELD(m->status, 16, 31); + u32 errcode = GET_BITFIELD(m->status, 0, 15); + u32 optypenum = GET_BITFIELD(m->status, 4, 6); + + recoverable = GET_BITFIELD(m->status, 56, 56); + + if (uncorrected_error) { + core_err_cnt = 1; + if (ripv) { + type = "FATAL"; + tp_event = HW_EVENT_ERR_FATAL; + } else { + type = "NON_FATAL"; + tp_event = HW_EVENT_ERR_UNCORRECTED; + } + } else { + type = "CORRECTED"; + tp_event = HW_EVENT_ERR_CORRECTED; + } + + /* + * According to Intel Architecture spec vol 3B, + * Table 15-10 "IA32_MCi_Status [15:0] Compound Error Code Encoding" + * memory errors should fit one of these masks: + * 000f 0000 1mmm cccc (binary) + * 000f 0010 1mmm cccc (binary) [RAM used as cache] + * where: + * f = Correction Report Filtering Bit. If 1, subsequent errors + * won't be shown + * mmm = error type + * cccc = channel + * If the mask doesn't match, report an error to the parsing logic + */ + if (!((errcode & 0xef80) == 0x80 || (errcode & 0xef80) == 0x280)) { + optype = "Can't parse: it is not a mem"; + } else { + switch (optypenum) { + case 0: + optype = "generic undef request error"; + break; + case 1: + optype = "memory read error"; + break; + case 2: + optype = "memory write error"; + break; + case 3: + optype = "addr/cmd error"; + break; + case 4: + optype = "memory scrubbing error"; + break; + default: + optype = "reserved"; + break; + } + } + if (adxl_component_count) { + len = snprintf(skx_msg, MSG_SIZE, "%s%s err_code:0x%04x:0x%04x %s", + overflow ? " OVERFLOW" : "", + (uncorrected_error && recoverable) ? " recoverable" : "", + mscod, errcode, adxl_msg); + } else { + len = snprintf(skx_msg, MSG_SIZE, + "%s%s err_code:0x%04x:0x%04x socket:%d imc:%d rank:%d bg:%d ba:%d row:0x%x col:0x%x", + overflow ? " OVERFLOW" : "", + (uncorrected_error && recoverable) ? " recoverable" : "", + mscod, errcode, + res->socket, res->imc, res->rank, + res->bank_group, res->bank_address, res->row, res->column); + } + + if (skx_show_retry_rd_err_log) + skx_show_retry_rd_err_log(res, skx_msg + len, MSG_SIZE - len); + + edac_dbg(0, "%s\n", skx_msg); + + /* Call the helper to output message */ + edac_mc_handle_error(tp_event, mci, core_err_cnt, + m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0, + res->channel, res->dimm, -1, + optype, skx_msg); +} + +int skx_mce_check_error(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct mce *mce = (struct mce *)data; + struct decoded_addr res; + struct mem_ctl_info *mci; + char *type; + + if (edac_get_report_status() == EDAC_REPORTING_DISABLED) + return NOTIFY_DONE; + + /* ignore unless this is memory related with an address */ + if ((mce->status & 0xefff) >> 7 != 1 || !(mce->status & MCI_STATUS_ADDRV)) + return NOTIFY_DONE; + + memset(&res, 0, sizeof(res)); + res.addr = mce->addr; + + if (adxl_component_count) { + if (!skx_adxl_decode(&res)) + return NOTIFY_DONE; + } else if (!skx_decode || !skx_decode(&res)) { + return NOTIFY_DONE; + } + + mci = res.dev->imc[res.imc].mci; + + if (!mci) + return NOTIFY_DONE; + + if (mce->mcgstatus & MCG_STATUS_MCIP) + type = "Exception"; + else + type = "Event"; + + skx_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n"); + + skx_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: 0x%llx " + "Bank %d: 0x%llx\n", mce->extcpu, type, + mce->mcgstatus, mce->bank, mce->status); + skx_mc_printk(mci, KERN_DEBUG, "TSC 0x%llx ", mce->tsc); + skx_mc_printk(mci, KERN_DEBUG, "ADDR 0x%llx ", mce->addr); + skx_mc_printk(mci, KERN_DEBUG, "MISC 0x%llx ", mce->misc); + + skx_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:0x%x TIME %llu SOCKET " + "%u APIC 0x%x\n", mce->cpuvendor, mce->cpuid, + mce->time, mce->socketid, mce->apicid); + + skx_mce_output_error(mci, mce, &res); + + return NOTIFY_DONE; +} + +void skx_remove(void) +{ + int i, j; + struct skx_dev *d, *tmp; + + edac_dbg(0, "\n"); + + list_for_each_entry_safe(d, tmp, &dev_edac_list, list) { + list_del(&d->list); + for (i = 0; i < NUM_IMC; i++) { + if (d->imc[i].mci) + skx_unregister_mci(&d->imc[i]); + + if (d->imc[i].mdev) + pci_dev_put(d->imc[i].mdev); + + if (d->imc[i].mbase) + iounmap(d->imc[i].mbase); + + for (j = 0; j < NUM_CHANNELS; j++) { + if (d->imc[i].chan[j].cdev) + pci_dev_put(d->imc[i].chan[j].cdev); + } + } + if (d->util_all) + pci_dev_put(d->util_all); + if (d->sad_all) + pci_dev_put(d->sad_all); + if (d->uracu) + pci_dev_put(d->uracu); + + kfree(d); + } +} diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h new file mode 100644 index 0000000000000000000000000000000000000000..19dd8c0995200cfcae257ddc203bcca700d8f40b --- /dev/null +++ b/drivers/edac/skx_common.h @@ -0,0 +1,153 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common codes for both the skx_edac driver and Intel 10nm server EDAC driver. + * Originally split out from the skx_edac driver. + * + * Copyright (c) 2018, Intel Corporation. + */ + +#ifndef _SKX_COMM_EDAC_H +#define _SKX_COMM_EDAC_H + +#define MSG_SIZE 1024 + +/* + * Debug macros + */ +#define skx_printk(level, fmt, arg...) \ + edac_printk(level, "skx", fmt, ##arg) + +#define skx_mc_printk(mci, level, fmt, arg...) \ + edac_mc_chipset_printk(mci, level, "skx", fmt, ##arg) + +/* + * Get a bit field at register value , from bit to bit + */ +#define GET_BITFIELD(v, lo, hi) \ + (((v) & GENMASK_ULL((hi), (lo))) >> (lo)) + +#define SKX_NUM_IMC 2 /* Memory controllers per socket */ +#define SKX_NUM_CHANNELS 3 /* Channels per memory controller */ +#define SKX_NUM_DIMMS 2 /* Max DIMMS per channel */ + +#define I10NM_NUM_IMC 4 +#define I10NM_NUM_CHANNELS 2 +#define I10NM_NUM_DIMMS 2 + +#define MAX(a, b) ((a) > (b) ? (a) : (b)) +#define NUM_IMC MAX(SKX_NUM_IMC, I10NM_NUM_IMC) +#define NUM_CHANNELS MAX(SKX_NUM_CHANNELS, I10NM_NUM_CHANNELS) +#define NUM_DIMMS MAX(SKX_NUM_DIMMS, I10NM_NUM_DIMMS) + +#define IS_DIMM_PRESENT(r) GET_BITFIELD(r, 15, 15) +#define IS_NVDIMM_PRESENT(r, i) GET_BITFIELD(r, i, i) + +/* + * Each cpu socket contains some pci devices that provide global + * information, and also some that are local to each of the two + * memory controllers on the die. + */ +struct skx_dev { + struct list_head list; + u8 bus[4]; + int seg; + struct pci_dev *sad_all; + struct pci_dev *util_all; + struct pci_dev *uracu; /* for i10nm CPU */ + u32 mcroute; + struct skx_imc { + struct mem_ctl_info *mci; + struct pci_dev *mdev; /* for i10nm CPU */ + void __iomem *mbase; /* for i10nm CPU */ + u8 mc; /* system wide mc# */ + u8 lmc; /* socket relative mc# */ + u8 src_id, node_id; + struct skx_channel { + struct pci_dev *cdev; + struct pci_dev *edev; + struct skx_dimm { + u8 close_pg; + u8 bank_xor_enable; + u8 fine_grain_bank; + u8 rowbits; + u8 colbits; + } dimms[NUM_DIMMS]; + } chan[NUM_CHANNELS]; + } imc[NUM_IMC]; +}; + +struct skx_pvt { + struct skx_imc *imc; +}; + +enum type { + SKX, + I10NM +}; + +enum { + INDEX_SOCKET, + INDEX_MEMCTRL, + INDEX_CHANNEL, + INDEX_DIMM, + INDEX_MAX +}; + +struct decoded_addr { + struct skx_dev *dev; + u64 addr; + int socket; + int imc; + int channel; + u64 chan_addr; + int sktways; + int chanways; + int dimm; + int rank; + int channel_rank; + u64 rank_address; + int row; + int column; + int bank_address; + int bank_group; +}; + +struct res_config { + enum type type; + /* Configuration agent device ID */ + unsigned int decs_did; + /* Default bus number configuration register offset */ + int busno_cfg_offset; +}; + +typedef int (*get_dimm_config_f)(struct mem_ctl_info *mci); +typedef bool (*skx_decode_f)(struct decoded_addr *res); +typedef void (*skx_show_retry_log_f)(struct decoded_addr *res, char *msg, int len); + +int __init skx_adxl_get(void); +void __exit skx_adxl_put(void); +void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log); + +int skx_get_src_id(struct skx_dev *d, int off, u8 *id); +int skx_get_node_id(struct skx_dev *d, u8 *id); + +int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list); + +int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm); + +int skx_get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm, + struct skx_imc *imc, int chan, int dimmno); + +int skx_get_nvdimm_info(struct dimm_info *dimm, struct skx_imc *imc, + int chan, int dimmno, const char *mod_str); + +int skx_register_mci(struct skx_imc *imc, struct pci_dev *pdev, + const char *ctl_name, const char *mod_str, + get_dimm_config_f get_dimm_config); + +int skx_mce_check_error(struct notifier_block *nb, unsigned long val, + void *data); + +void skx_remove(void); + +#endif /* _SKX_COMM_EDAC_H */ diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c index c009d94f40c52982546fd8354c0b613d0b30237c..0fffb393415bbd1dea2bac451159229bf369b53b 100644 --- a/drivers/edac/thunderx_edac.c +++ b/drivers/edac/thunderx_edac.c @@ -1133,7 +1133,7 @@ static irqreturn_t thunderx_ocx_com_threaded_isr(int irq, void *irq_id) decode_register(other, OCX_OTHER_SIZE, ocx_com_errors, ctx->reg_com_int); - strncat(msg, other, OCX_MESSAGE_SIZE); + strlcat(msg, other, OCX_MESSAGE_SIZE); for (lane = 0; lane < OCX_RX_LANES; lane++) if (ctx->reg_com_int & BIT(lane)) { @@ -1142,12 +1142,12 @@ static irqreturn_t thunderx_ocx_com_threaded_isr(int irq, void *irq_id) lane, ctx->reg_lane_int[lane], lane, ctx->reg_lane_stat11[lane]); - strncat(msg, other, OCX_MESSAGE_SIZE); + strlcat(msg, other, OCX_MESSAGE_SIZE); decode_register(other, OCX_OTHER_SIZE, ocx_lane_errors, ctx->reg_lane_int[lane]); - strncat(msg, other, OCX_MESSAGE_SIZE); + strlcat(msg, other, OCX_MESSAGE_SIZE); } if (ctx->reg_com_int & OCX_COM_INT_CE) @@ -1217,7 +1217,7 @@ static irqreturn_t thunderx_ocx_lnk_threaded_isr(int irq, void *irq_id) decode_register(other, OCX_OTHER_SIZE, ocx_com_link_errors, ctx->reg_com_link_int); - strncat(msg, other, OCX_MESSAGE_SIZE); + strlcat(msg, other, OCX_MESSAGE_SIZE); if (ctx->reg_com_link_int & OCX_COM_LINK_INT_UE) edac_device_handle_ue(ocx->edac_dev, 0, 0, msg); @@ -1884,7 +1884,7 @@ static irqreturn_t thunderx_l2c_threaded_isr(int irq, void *irq_id) default: dev_err(&l2c->pdev->dev, "Unsupported device: %04x\n", l2c->pdev->device); - return IRQ_NONE; + goto err_free; } while (CIRC_CNT(l2c->ring_head, l2c->ring_tail, @@ -1896,7 +1896,7 @@ static irqreturn_t thunderx_l2c_threaded_isr(int irq, void *irq_id) decode_register(other, L2C_OTHER_SIZE, l2_errors, ctx->reg_int); - strncat(msg, other, L2C_MESSAGE_SIZE); + strlcat(msg, other, L2C_MESSAGE_SIZE); if (ctx->reg_int & mask_ue) edac_device_handle_ue(l2c->edac_dev, 0, 0, msg); @@ -1906,7 +1906,7 @@ static irqreturn_t thunderx_l2c_threaded_isr(int irq, void *irq_id) l2c->ring_tail++; } - return IRQ_HANDLED; + ret = IRQ_HANDLED; err_free: kfree(other); diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c index da0e9bc4262fad1a0eac36b0747766655b9d7fe7..9327479c719c23b3653eb0572a1750ed5b06ee4a 100644 --- a/drivers/extcon/extcon-arizona.c +++ b/drivers/extcon/extcon-arizona.c @@ -1726,6 +1726,16 @@ static int arizona_extcon_remove(struct platform_device *pdev) struct arizona_extcon_info *info = platform_get_drvdata(pdev); struct arizona *arizona = info->arizona; int jack_irq_rise, jack_irq_fall; + bool change; + + regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1, + ARIZONA_MICD_ENA, 0, + &change); + + if (change) { + regulator_disable(info->micvdd); + pm_runtime_put(info->dev); + } gpiod_put(info->micd_pol_gpio); diff --git a/drivers/extcon/extcon-intel-cht-wc.c b/drivers/extcon/extcon-intel-cht-wc.c index 5e1dd2772278147cf1d7ab51333540a13eb52df3..bdb67878179ed7970f428fce6de5269231c3c541 100644 --- a/drivers/extcon/extcon-intel-cht-wc.c +++ b/drivers/extcon/extcon-intel-cht-wc.c @@ -156,7 +156,7 @@ static int cht_wc_extcon_get_charger(struct cht_wc_extcon_data *ext, dev_warn(ext->dev, "Unhandled charger type %d, defaulting to SDP\n", ret); - /* Fall through, treat as SDP */ + return EXTCON_CHG_USB_SDP; case CHT_WC_USBSRC_TYPE_SDP: case CHT_WC_USBSRC_TYPE_FLOAT_DP_DN: case CHT_WC_USBSRC_TYPE_OTHER: diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c index 9f30f4929b72999be05a896fc22340b1a50bb1c0..7a767b66dd8656fee5dfa72c709016f030fece19 100644 --- a/drivers/extcon/extcon-max8997.c +++ b/drivers/extcon/extcon-max8997.c @@ -321,12 +321,10 @@ static int max8997_muic_handle_usb(struct max8997_muic_info *info, { int ret = 0; - if (usb_type == MAX8997_USB_HOST) { - ret = max8997_muic_set_path(info, info->path_usb, attached); - if (ret < 0) { - dev_err(info->dev, "failed to update muic register\n"); - return ret; - } + ret = max8997_muic_set_path(info, info->path_usb, attached); + if (ret < 0) { + dev_err(info->dev, "failed to update muic register\n"); + return ret; } switch (usb_type) { diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c index 0cfb5a3efdf65ad3637fc479c45cf53ebc43933c..2efcd94f74fc6bc808344166d711d2181fc2ec50 100644 --- a/drivers/extcon/extcon-sm5502.c +++ b/drivers/extcon/extcon-sm5502.c @@ -69,6 +69,10 @@ struct sm5502_muic_info { /* Default value of SM5502 register to bring up MUIC device. */ static struct reg_data sm5502_reg_data[] = { { + .reg = SM5502_REG_RESET, + .val = SM5502_REG_RESET_MASK, + .invert = true, + }, { .reg = SM5502_REG_CONTROL, .val = SM5502_REG_CONTROL_MASK_INT_MASK, .invert = false, diff --git a/drivers/extcon/extcon-sm5502.h b/drivers/extcon/extcon-sm5502.h index 974b53222f568df3aefcc25c9fe7c732eec7ec4d..12f8b01e575387be81bde88395d7a2f1e57433ac 100644 --- a/drivers/extcon/extcon-sm5502.h +++ b/drivers/extcon/extcon-sm5502.h @@ -241,6 +241,8 @@ enum sm5502_reg { #define DM_DP_SWITCH_UART ((DM_DP_CON_SWITCH_UART <packets); - if (!access_ok(VERIFY_READ, p, a->size)) + if (!access_ok(p, a->size)) return -EFAULT; end = (void __user *)p + a->size; @@ -1495,6 +1495,7 @@ static void outbound_phy_packet_callback(struct fw_packet *packet, { struct outbound_phy_packet_event *e = container_of(packet, struct outbound_phy_packet_event, p); + struct client *e_client; switch (status) { /* expected: */ @@ -1511,9 +1512,10 @@ static void outbound_phy_packet_callback(struct fw_packet *packet, } e->phy_packet.data[0] = packet->timestamp; + e_client = e->client; queue_event(e->client, &e->event, &e->phy_packet, sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0); - client_put(e->client); + client_put(e_client); } static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg) diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index 82ba110d9d1ad29600c006dbe2cc350616a099b8..bbabfca812bbcd80698bfcd9f91ab8a992c864df 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c @@ -249,7 +249,11 @@ static int fwnet_header_cache(const struct neighbour *neigh, h = (struct fwnet_header *)((u8 *)hh->hh_data + HH_DATA_OFF(sizeof(*h))); h->h_proto = type; memcpy(h->h_dest, neigh->ha, net->addr_len); - hh->hh_len = FWNET_HLEN; + + /* Pairs with the READ_ONCE() in neigh_resolve_output(), + * neigh_hh_output() and neigh_update_hhs(). + */ + smp_store_release(&hh->hh_len, FWNET_HLEN); return 0; } diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c index a128dd1126ae458c323d7ca6a43293bef05dd14f..f3784c054dd6e1e8f6f8b8905584ea16b7c0ee0d 100644 --- a/drivers/firewire/nosy.c +++ b/drivers/firewire/nosy.c @@ -161,10 +161,12 @@ packet_buffer_get(struct client *client, char __user *data, size_t user_length) if (atomic_read(&buffer->size) == 0) return -ENODEV; - /* FIXME: Check length <= user_length. */ + length = buffer->head->length; + + if (length > user_length) + return 0; end = buffer->data + buffer->capacity; - length = buffer->head->length; if (&buffer->head->data[length] < end) { if (copy_to_user(data, buffer->head->data, length)) @@ -359,6 +361,7 @@ nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg) struct client *client = file->private_data; spinlock_t *client_list_lock = &client->lynx->client_list_lock; struct nosy_stats stats; + int ret; switch (cmd) { case NOSY_IOC_GET_STATS: @@ -373,11 +376,15 @@ nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return 0; case NOSY_IOC_START: + ret = -EBUSY; spin_lock_irq(client_list_lock); - list_add_tail(&client->link, &client->lynx->client_list); + if (list_empty(&client->link)) { + list_add_tail(&client->link, &client->lynx->client_list); + ret = 0; + } spin_unlock_irq(client_list_lock); - return 0; + return ret; case NOSY_IOC_STOP: spin_lock_irq(client_list_lock); diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 45c048751f3bd8a9527631d26673cc7f603fe85e..11f53e5a4060b03a9cc3913a7d7b8b2cdd2d978d 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -2018,6 +2018,8 @@ static void bus_reset_work(struct work_struct *work) ohci->generation = generation; reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); + if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) + reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset); if (ohci->quirks & QUIRK_RESET_PACKET) ohci->request_generation = generation; @@ -2084,12 +2086,14 @@ static irqreturn_t irq_handler(int irq, void *data) return IRQ_NONE; /* - * busReset and postedWriteErr must not be cleared yet + * busReset and postedWriteErr events must not be cleared yet * (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1) */ reg_write(ohci, OHCI1394_IntEventClear, event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr)); log_irqs(ohci, event); + if (event & OHCI1394_busReset) + reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset); if (event & OHCI1394_selfIDComplete) queue_work(selfid_workqueue, &ohci->bus_reset_work); diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 6e83880046d787d978cddaa1fcf2c5b930843186..ed212c8b4108370152fda28275843e65fc4d50c9 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -198,7 +198,7 @@ config DMI_SCAN_MACHINE_NON_EFI_FALLBACK config ISCSI_IBFT_FIND bool "iSCSI Boot Firmware Table Attributes" - depends on X86 && ACPI + depends on X86 && ISCSI_IBFT default n help This option enables the kernel to find the region of memory @@ -209,7 +209,8 @@ config ISCSI_IBFT_FIND config ISCSI_IBFT tristate "iSCSI Boot Firmware Table Attributes module" select ISCSI_BOOT_SYSFS - depends on ISCSI_IBFT_FIND && SCSI && SCSI_LOWLEVEL + select ISCSI_IBFT_FIND if X86 + depends on ACPI && SCSI && SCSI_LOWLEVEL default n help This option enables support for detection and exposing of iSCSI diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c index 9dff33ea6416f66879ea4de32e8dba6a22e37c01..204390297f4bd4e249a6f3987e992ff1eb2b7d56 100644 --- a/drivers/firmware/arm_scmi/base.c +++ b/drivers/firmware/arm_scmi/base.c @@ -208,7 +208,7 @@ static int scmi_base_discover_agent_get(const struct scmi_handle *handle, ret = scmi_do_xfer(handle, t); if (!ret) - memcpy(name, t->rx.buf, SCMI_MAX_STR_SIZE); + strlcpy(name, t->rx.buf, SCMI_MAX_STR_SIZE); scmi_xfer_put(handle, t); diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c index 472c88ae1c0f9d37ffae6d094a8500ca85ed3146..7a30952b463d58329edd05980409b40b6b856c74 100644 --- a/drivers/firmware/arm_scmi/bus.c +++ b/drivers/firmware/arm_scmi/bus.c @@ -119,6 +119,11 @@ void scmi_driver_unregister(struct scmi_driver *driver) } EXPORT_SYMBOL_GPL(scmi_driver_unregister); +static void scmi_device_release(struct device *dev) +{ + kfree(to_scmi_dev(dev)); +} + struct scmi_device * scmi_device_create(struct device_node *np, struct device *parent, int protocol) { @@ -130,14 +135,17 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol) return NULL; id = ida_simple_get(&scmi_bus_id, 1, 0, GFP_KERNEL); - if (id < 0) - goto free_mem; + if (id < 0) { + kfree(scmi_dev); + return NULL; + } scmi_dev->id = id; scmi_dev->protocol_id = protocol; scmi_dev->dev.parent = parent; scmi_dev->dev.of_node = np; scmi_dev->dev.bus = &scmi_bus_type; + scmi_dev->dev.release = scmi_device_release; dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id); retval = device_register(&scmi_dev->dev); @@ -148,17 +156,14 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol) put_dev: put_device(&scmi_dev->dev); ida_simple_remove(&scmi_bus_id, id); -free_mem: - kfree(scmi_dev); return NULL; } void scmi_device_destroy(struct scmi_device *scmi_dev) { scmi_handle_put(scmi_dev->handle); - device_unregister(&scmi_dev->dev); ida_simple_remove(&scmi_bus_id, scmi_dev->id); - kfree(scmi_dev); + device_unregister(&scmi_dev->dev); } void scmi_set_handle(struct scmi_device *scmi_dev) diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c index e4119eb34986cb5219b648d1b900a23c8686329b..30fc04e284312e9f19c165994dad1170966304b8 100644 --- a/drivers/firmware/arm_scmi/clock.c +++ b/drivers/firmware/arm_scmi/clock.c @@ -111,7 +111,7 @@ static int scmi_clock_attributes_get(const struct scmi_handle *handle, ret = scmi_do_xfer(handle, t); if (!ret) - memcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE); + strlcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE); else clk->name[0] = '\0'; diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 8f952f2f1a29203f8b7729cbfd10aafeba3cd9f6..09119e3f5c018b9495068eeda50ef6ad63534d89 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -271,6 +271,14 @@ static void scmi_tx_prepare(struct mbox_client *cl, void *m) struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl); struct scmi_shared_mem __iomem *mem = cinfo->payload; + /* + * Ideally channel must be free by now unless OS timeout last + * request and platform continued to process the same, wait + * until it releases the shared memory, otherwise we may endup + * overwriting its response with new message payload or vice-versa + */ + spin_until_cond(ioread32(&mem->channel_status) & + SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE); /* Mark channel busy + clear error */ iowrite32(0x0, &mem->channel_status); iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED, diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c index 64342944d9175c54918100c36f0b43a20e6acae3..87c99d296ecd3331218b41b65b19d487b039e62e 100644 --- a/drivers/firmware/arm_scmi/perf.c +++ b/drivers/firmware/arm_scmi/perf.c @@ -174,7 +174,7 @@ scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain, dom_info->mult_factor = (dom_info->sustained_freq_khz * 1000) / dom_info->sustained_perf_level; - memcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE); + strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE); } scmi_xfer_put(handle, t); diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c index cfa033b05aed5e568b2510e2a7dace5162d13327..62f3401a1f01e90d9ddceabbe462416471d67614 100644 --- a/drivers/firmware/arm_scmi/power.c +++ b/drivers/firmware/arm_scmi/power.c @@ -106,7 +106,7 @@ scmi_power_domain_attributes_get(const struct scmi_handle *handle, u32 domain, dom_info->state_set_notify = SUPPORTS_STATE_SET_NOTIFY(flags); dom_info->state_set_async = SUPPORTS_STATE_SET_ASYNC(flags); dom_info->state_set_sync = SUPPORTS_STATE_SET_SYNC(flags); - memcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE); + strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE); } scmi_xfer_put(handle, t); diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c index 27f2092b9882aef307763e214eb74c69f4db2af5..b53d5cc9c9f6c57ebae04f6a43e42fb814fc27d9 100644 --- a/drivers/firmware/arm_scmi/sensors.c +++ b/drivers/firmware/arm_scmi/sensors.c @@ -140,7 +140,7 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle, s = &si->sensors[desc_index + cnt]; s->id = le32_to_cpu(buf->desc[cnt].id); s->type = SENSOR_TYPE(attrh); - memcpy(s->name, buf->desc[cnt].name, SCMI_MAX_STR_SIZE); + strlcpy(s->name, buf->desc[cnt].name, SCMI_MAX_STR_SIZE); } desc_index += num_returned; diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c index c7d06a36b23a5670f4e2ddbe339698e38bce5796..e3fff7b29045bf43e32dd5d65feb0038796b69e1 100644 --- a/drivers/firmware/arm_scpi.c +++ b/drivers/firmware/arm_scpi.c @@ -563,8 +563,10 @@ static unsigned long scpi_clk_get_val(u16 clk_id) ret = scpi_send_message(CMD_GET_CLOCK_VALUE, &le_clk_id, sizeof(le_clk_id), &rate, sizeof(rate)); + if (ret) + return 0; - return ret ? ret : le32_to_cpu(rate); + return le32_to_cpu(rate); } static int scpi_clk_set_val(u16 clk_id, unsigned long rate) @@ -636,6 +638,9 @@ static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain) if (ret) return ERR_PTR(ret); + if (!buf.opp_count) + return ERR_PTR(-ENOENT); + info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) return ERR_PTR(-ENOMEM); diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c index 1ea71640fdc21dd6b8b96c93d1383babc6e89a33..2b5f277819fd8de45383080cd128034455dc7eed 100644 --- a/drivers/firmware/arm_sdei.c +++ b/drivers/firmware/arm_sdei.c @@ -164,6 +164,7 @@ static int invoke_sdei_fn(unsigned long function_id, unsigned long arg0, return err; } +NOKPROBE_SYMBOL(invoke_sdei_fn); static struct sdei_event *sdei_event_find(u32 event_num) { @@ -190,6 +191,28 @@ int sdei_api_event_context(u32 query, u64 *result) } NOKPROBE_SYMBOL(sdei_api_event_context); +int sdei_api_event_interrupt_bind(int hwirq) +{ + u64 event_number; + + invoke_sdei_fn(SDEI_1_0_FN_SDEI_INTERRUPT_BIND, hwirq, 0, 0, 0, 0, + &event_number); + + return (int)event_number; +} + +int sdei_api_clear_eoi(int hwirq) +{ + return invoke_sdei_fn(SDEI_1_0_FN_SDEI_CLEAR_EOI, hwirq, 0, 0, 0, 0, + NULL); +} + +int sdei_api_set_secure_timer_period(int sec) +{ + return invoke_sdei_fn(SDEI_1_0_FN_SET_SECURE_TIMER_PERIOD, sec, 0, 0, 0, + 0, NULL); +} + static int sdei_api_event_get_info(u32 event, u32 info, u64 *result) { return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_GET_INFO, event, info, 0, @@ -379,7 +402,7 @@ static int sdei_platform_reset(void) return err; } -static int sdei_api_event_enable(u32 event_num) +int sdei_api_event_enable(u32 event_num) { return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_ENABLE, event_num, 0, 0, 0, 0, NULL); @@ -424,7 +447,7 @@ int sdei_event_enable(u32 event_num) } EXPORT_SYMBOL(sdei_event_enable); -static int sdei_api_event_disable(u32 event_num) +int sdei_api_event_disable(u32 event_num) { return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_DISABLE, event_num, 0, 0, 0, 0, NULL); @@ -489,11 +512,6 @@ static int _sdei_event_unregister(struct sdei_event *event) { lockdep_assert_held(&sdei_events_lock); - spin_lock(&sdei_list_lock); - event->reregister = false; - event->reenable = false; - spin_unlock(&sdei_list_lock); - if (event->type == SDEI_EVENT_TYPE_SHARED) return sdei_api_event_unregister(event->event_num); @@ -516,6 +534,11 @@ int sdei_event_unregister(u32 event_num) break; } + spin_lock(&sdei_list_lock); + event->reregister = false; + event->reenable = false; + spin_unlock(&sdei_list_lock); + err = _sdei_event_unregister(event); if (err) break; @@ -583,26 +606,15 @@ static int _sdei_event_register(struct sdei_event *event) lockdep_assert_held(&sdei_events_lock); - spin_lock(&sdei_list_lock); - event->reregister = true; - spin_unlock(&sdei_list_lock); - if (event->type == SDEI_EVENT_TYPE_SHARED) return sdei_api_event_register(event->event_num, sdei_entry_point, event->registered, SDEI_EVENT_REGISTER_RM_ANY, 0); - err = sdei_do_cross_call(_local_event_register, event); - if (err) { - spin_lock(&sdei_list_lock); - event->reregister = false; - event->reenable = false; - spin_unlock(&sdei_list_lock); - + if (err) sdei_do_cross_call(_local_event_unregister, event); - } return err; } @@ -630,8 +642,17 @@ int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg) break; } + spin_lock(&sdei_list_lock); + event->reregister = true; + spin_unlock(&sdei_list_lock); + err = _sdei_event_register(event); if (err) { + spin_lock(&sdei_list_lock); + event->reregister = false; + event->reenable = false; + spin_unlock(&sdei_list_lock); + sdei_event_destroy(event); pr_warn("Failed to register event %u: %d\n", event_num, err); @@ -878,6 +899,7 @@ static void sdei_smccc_smc(unsigned long function_id, { arm_smccc_smc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res); } +NOKPROBE_SYMBOL(sdei_smccc_smc); static void sdei_smccc_hvc(unsigned long function_id, unsigned long arg0, unsigned long arg1, @@ -886,6 +908,7 @@ static void sdei_smccc_hvc(unsigned long function_id, { arm_smccc_hvc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res); } +NOKPROBE_SYMBOL(sdei_smccc_hvc); static int sdei_get_conduit(struct platform_device *pdev) { @@ -1009,7 +1032,6 @@ static struct platform_driver sdei_driver = { static bool __init sdei_present_dt(void) { - struct platform_device *pdev; struct device_node *np, *fw_np; fw_np = of_find_node_by_name(NULL, "firmware"); @@ -1017,14 +1039,9 @@ static bool __init sdei_present_dt(void) return false; np = of_find_matching_node(fw_np, sdei_of_match); - of_node_put(fw_np); if (!np) return false; - - pdev = of_platform_device_create(np, sdei_driver.driver.name, NULL); of_node_put(np); - if (!pdev) - return false; return true; } diff --git a/drivers/firmware/dell_rbu.c b/drivers/firmware/dell_rbu.c index fb8af5cb7c9bffef3f4446baf2e94045685e6f3e..ccefa84f730576874cfa5f78751088612cafc632 100644 --- a/drivers/firmware/dell_rbu.c +++ b/drivers/firmware/dell_rbu.c @@ -45,6 +45,7 @@ #include #include #include +#include MODULE_AUTHOR("Abhay Salunke "); MODULE_DESCRIPTION("Driver for updating BIOS image on DELL systems"); @@ -181,6 +182,11 @@ static int create_packet(void *data, size_t length) packet_data_temp_buf = NULL; } } + /* + * set to uncachable or it may never get written back before reboot + */ + set_memory_uc((unsigned long)packet_data_temp_buf, 1 << ordernum); + spin_lock(&rbu_data.lock); newpacket->data = packet_data_temp_buf; @@ -349,6 +355,8 @@ static void packet_empty_list(void) * to make sure there are no stale RBU packets left in memory */ memset(newpacket->data, 0, rbu_data.packetsize); + set_memory_wb((unsigned long)newpacket->data, + 1 << newpacket->ordernum); free_pages((unsigned long) newpacket->data, newpacket->ordernum); kfree(newpacket); diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index f2483548cde92d692f748d6a9c7da0cbf98274a3..0dc0c78f1fdb2db40fd9dedb4b179999ac98fc60 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c @@ -407,7 +407,7 @@ static void __init save_mem_devices(const struct dmi_header *dm, void *v) bytes = ~0ull; else if (size & 0x8000) bytes = (u64)(size & 0x7fff) << 10; - else if (size != 0x7fff) + else if (size != 0x7fff || dm->length < 0x20) bytes = (u64)size << 20; else bytes = (u64)get_unaligned((u32 *)&d[0x1C]) << 20; diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 89110dfc7127c5f13d3b8fbdf29a10507d6f16d4..65ae56c6c040fe4dfad5dae71ea200586dfdbd42 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -54,7 +54,7 @@ config EFI_RUNTIME_MAP config EFI_FAKE_MEMMAP bool "Enable EFI fake memory map" - depends on EFI && X86 + depends on EFI && (X86 || ARM64) default n help Saying Y here will enable "efi_fake_mem" boot option. diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c index 388a929baf95d1e1107ab4b0ab20516dea3cfdf7..1a6a77df8a5e8aea45f3cbc2bac9c5d0883b0edb 100644 --- a/drivers/firmware/efi/arm-init.c +++ b/drivers/firmware/efi/arm-init.c @@ -265,6 +265,10 @@ void __init efi_init(void) (params.mmap & ~PAGE_MASK))); init_screen_info(); + + /* ARM does not permit early mappings to persist across paging_init() */ + if (IS_ENABLED(CONFIG_ARM)) + efi_memmap_unmap(); } static int __init register_gop_device(void) diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c index 922cfb813109a3c14a88a0bb054a09534e813ec9..a00934d263c519a9d476a57a5bb2388c6041b810 100644 --- a/drivers/firmware/efi/arm-runtime.c +++ b/drivers/firmware/efi/arm-runtime.c @@ -110,7 +110,7 @@ static int __init arm_enable_runtime_services(void) { u64 mapsize; - if (!efi_enabled(EFI_BOOT) || !efi_enabled(EFI_MEMMAP)) { + if (!efi_enabled(EFI_BOOT)) { pr_info("EFI services will not be available.\n"); return 0; } diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c index 96688986da56ab48dd24059d1289618a89eaa492..43fefab755242f0e0fc899d196682589b41a576f 100644 --- a/drivers/firmware/efi/capsule-loader.c +++ b/drivers/firmware/efi/capsule-loader.c @@ -243,29 +243,6 @@ static ssize_t efi_capsule_write(struct file *file, const char __user *buff, return ret; } -/** - * efi_capsule_flush - called by file close or file flush - * @file: file pointer - * @id: not used - * - * If a capsule is being partially uploaded then calling this function - * will be treated as upload termination and will free those completed - * buffer pages and -ECANCELED will be returned. - **/ -static int efi_capsule_flush(struct file *file, fl_owner_t id) -{ - int ret = 0; - struct capsule_info *cap_info = file->private_data; - - if (cap_info->index > 0) { - pr_err("capsule upload not complete\n"); - efi_free_all_buff_pages(cap_info); - ret = -ECANCELED; - } - - return ret; -} - /** * efi_capsule_release - called by file close * @inode: not used @@ -278,6 +255,13 @@ static int efi_capsule_release(struct inode *inode, struct file *file) { struct capsule_info *cap_info = file->private_data; + if (cap_info->index > 0 && + (cap_info->header.headersize == 0 || + cap_info->count < cap_info->total_size)) { + pr_err("capsule upload not complete\n"); + efi_free_all_buff_pages(cap_info); + } + kfree(cap_info->pages); kfree(cap_info->phys); kfree(file->private_data); @@ -309,7 +293,7 @@ static int efi_capsule_open(struct inode *inode, struct file *file) return -ENOMEM; } - cap_info->phys = kzalloc(sizeof(void *), GFP_KERNEL); + cap_info->phys = kzalloc(sizeof(phys_addr_t), GFP_KERNEL); if (!cap_info->phys) { kfree(cap_info->pages); kfree(cap_info); @@ -325,7 +309,6 @@ static const struct file_operations efi_capsule_fops = { .owner = THIS_MODULE, .open = efi_capsule_open, .write = efi_capsule_write, - .flush = efi_capsule_flush, .release = efi_capsule_release, .llseek = no_llseek, }; diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index a7902fccdcfa4c5f39e42d6a4561d46a5a2a017b..97da083afd324d7100257eb957108f6395f21a0b 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -275,8 +275,7 @@ static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg) if (!msg || !(mem->validation_bits & CPER_MEM_VALID_MODULE_HANDLE)) return 0; - n = 0; - len = CPER_REC_LEN - 1; + len = CPER_REC_LEN; dmi_memdev_name(mem->mem_dev_handle, &bank, &device); if (bank && device) n = snprintf(msg, len, "DIMM location: %s %s ", bank, device); @@ -285,7 +284,6 @@ static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg) "DIMM location: not present. DMI handle: 0x%.4x ", mem->mem_dev_handle); - msg[n] = '\0'; return n; } @@ -393,7 +391,7 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie, printk("%s""vendor_id: 0x%04x, device_id: 0x%04x\n", pfx, pcie->device_id.vendor_id, pcie->device_id.device_id); p = pcie->device_id.class_code; - printk("%s""class_code: %02x%02x%02x\n", pfx, p[0], p[1], p[2]); + printk("%s""class_code: %02x%02x%02x\n", pfx, p[2], p[1], p[0]); } if (pcie->validation_bits & CPER_PCIE_VALID_SERIAL_NUMBER) printk("%s""serial number: 0x%04x, 0x%04x\n", pfx, @@ -402,6 +400,21 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie, printk( "%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n", pfx, pcie->bridge.secondary_status, pcie->bridge.control); + + /* Fatal errors call __ghes_panic() before AER handler prints this */ + if ((pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) && + (gdata->error_severity & CPER_SEV_FATAL)) { + struct aer_capability_regs *aer; + + aer = (struct aer_capability_regs *)pcie->aer_info; + printk("%saer_uncor_status: 0x%08x, aer_uncor_mask: 0x%08x\n", + pfx, aer->uncor_status, aer->uncor_mask); + printk("%saer_uncor_severity: 0x%08x\n", + pfx, aer->uncor_severity); + printk("%sTLP Header: %08x %08x %08x %08x\n", pfx, + aer->header_log.dw0, aer->header_log.dw1, + aer->header_log.dw2, aer->header_log.dw3); + } } static void cper_print_tstamp(const char *pfx, @@ -546,19 +559,24 @@ EXPORT_SYMBOL_GPL(cper_estatus_check_header); int cper_estatus_check(const struct acpi_hest_generic_status *estatus) { struct acpi_hest_generic_data *gdata; - unsigned int data_len, gedata_len; + unsigned int data_len, record_size; int rc; rc = cper_estatus_check_header(estatus); if (rc) return rc; + data_len = estatus->data_length; apei_estatus_for_each_section(estatus, gdata) { - gedata_len = acpi_hest_get_error_length(gdata); - if (gedata_len > data_len - acpi_hest_get_size(gdata)) + if (sizeof(struct acpi_hest_generic_data) > data_len) + return -EINVAL; + + record_size = acpi_hest_get_record_size(gdata); + if (record_size > data_len) return -EINVAL; - data_len -= acpi_hest_get_record_size(gdata); + + data_len -= record_size; } if (data_len) return -EINVAL; diff --git a/drivers/firmware/efi/efi-bgrt.c b/drivers/firmware/efi/efi-bgrt.c index b22ccfb0c991bde8c6d222d0a44ce527e304c7ae..2bf4d31f4967566e2cb5e72853ddad0876e3aad1 100644 --- a/drivers/firmware/efi/efi-bgrt.c +++ b/drivers/firmware/efi/efi-bgrt.c @@ -50,11 +50,6 @@ void __init efi_bgrt_init(struct acpi_table_header *table) bgrt->version); goto out; } - if (bgrt->status & 0xfe) { - pr_notice("Ignoring BGRT: reserved status bits are non-zero %u\n", - bgrt->status); - goto out; - } if (bgrt->image_type != 0) { pr_notice("Ignoring BGRT: invalid image type %u (expected 0)\n", bgrt->image_type); diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c index cfe87b465819f8ef42d0da6a60ca2a2294433ab7..0f7d97917197d5a03053548bcd253f2e9cb4c660 100644 --- a/drivers/firmware/efi/efi-pstore.c +++ b/drivers/firmware/efi/efi-pstore.c @@ -259,8 +259,7 @@ static int efi_pstore_write(struct pstore_record *record) efi_name[i] = name[i]; ret = efivar_entry_set_safe(efi_name, vendor, PSTORE_EFI_ATTRIBUTES, - !pstore_cannot_block_path(record->reason), - record->size, record->psi->buf); + preemptible(), record->size, record->psi->buf); if (record->reason == KMSG_DUMP_OOPS) efivar_run_worker(); @@ -369,7 +368,6 @@ static __init int efivars_pstore_init(void) return -ENOMEM; efi_pstore_info.bufsize = 1024; - spin_lock_init(&efi_pstore_info.buf_lock); if (pstore_register(&efi_pstore_info)) { kfree(efi_pstore_info.buf); diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 2a29dd9c986d4e2df7663aa1305df8adbaa59739..e71037917dd5b24cf63960bd4b0518b85bd5d492 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -31,6 +31,7 @@ #include #include #include +#include #include @@ -52,7 +53,8 @@ struct efi __read_mostly efi = { .properties_table = EFI_INVALID_TABLE_ADDR, .mem_attr_table = EFI_INVALID_TABLE_ADDR, .rng_seed = EFI_INVALID_TABLE_ADDR, - .tpm_log = EFI_INVALID_TABLE_ADDR + .tpm_log = EFI_INVALID_TABLE_ADDR, + .mem_reserve = EFI_INVALID_TABLE_ADDR, }; EXPORT_SYMBOL(efi); @@ -281,6 +283,9 @@ static __init int efivar_ssdt_load(void) void *data; int ret; + if (!efivar_ssdt[0]) + return 0; + ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries); list_for_each_entry_safe(entry, aux, &entries, list) { @@ -356,7 +361,8 @@ static int __init efisubsys_init(void) efi_kobj = kobject_create_and_add("efi", firmware_kobj); if (!efi_kobj) { pr_err("efi: Firmware registration failed.\n"); - return -ENOMEM; + error = -ENOMEM; + goto err_destroy_wq; } error = generic_ops_register(); @@ -392,11 +398,38 @@ static int __init efisubsys_init(void) generic_ops_unregister(); err_put: kobject_put(efi_kobj); +err_destroy_wq: + if (efi_rts_wq) + destroy_workqueue(efi_rts_wq); + return error; } subsys_initcall(efisubsys_init); +void __init efi_find_mirror(void) +{ + efi_memory_desc_t *md; + u64 mirror_size = 0, total_size = 0; + + if (!mirrored_kernelcore) + return; + + for_each_efi_memory_desc(md) { + unsigned long long start = md->phys_addr; + unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; + + total_size += size; + if (md->attribute & EFI_MEMORY_MORE_RELIABLE) { + memblock_mark_mirror(start, size); + mirror_size += size; + } + } + if (mirror_size) + pr_info("Memory: %lldM/%lldM mirrored memory\n", + mirror_size>>20, total_size>>20); +} + /* * Find the efi memory descriptor for a given physical address. Given a * physical address, determine if it exists within an EFI Memory Map entry, @@ -484,6 +517,7 @@ static __initdata efi_config_table_type_t common_tables[] = { {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, "MEMATTR", &efi.mem_attr_table}, {LINUX_EFI_RANDOM_SEED_TABLE_GUID, "RNG", &efi.rng_seed}, {LINUX_EFI_TPM_EVENT_LOG_GUID, "TPMEventLog", &efi.tpm_log}, + {LINUX_EFI_MEMRESERVE_TABLE_GUID, "MEMRESERVE", &efi.mem_reserve}, {NULL_GUID, NULL, NULL}, }; @@ -591,6 +625,41 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz, early_memunmap(tbl, sizeof(*tbl)); } + if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) { + unsigned long prsv = efi.mem_reserve; + + while (prsv) { + struct linux_efi_memreserve *rsv; + u8 *p; + int i; + + /* + * Just map a full page: that is what we will get + * anyway, and it permits us to map the entire entry + * before knowing its size. + */ + p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE), + PAGE_SIZE); + if (p == NULL) { + pr_err("Could not map UEFI memreserve entry!\n"); + return -ENOMEM; + } + + rsv = (void *)(p + prsv % PAGE_SIZE); + + /* reserve the entry itself */ + memblock_reserve(prsv, EFI_MEMRESERVE_SIZE(rsv->size)); + + for (i = 0; i < atomic_read(&rsv->count); i++) { + memblock_reserve(rsv->entry[i].base, + rsv->entry[i].size); + } + + prsv = rsv->next; + early_memunmap(p, PAGE_SIZE); + } + } + return 0; } @@ -937,6 +1006,109 @@ bool efi_is_table_address(unsigned long phys_addr) return false; } +static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock); +static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init; + +static int __init efi_memreserve_map_root(void) +{ + if (efi.mem_reserve == EFI_INVALID_TABLE_ADDR) + return -ENODEV; + + efi_memreserve_root = memremap(efi.mem_reserve, + sizeof(*efi_memreserve_root), + MEMREMAP_WB); + if (WARN_ON_ONCE(!efi_memreserve_root)) + return -ENOMEM; + return 0; +} + +static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size) +{ + struct resource *res, *parent; + + res = kzalloc(sizeof(struct resource), GFP_ATOMIC); + if (!res) + return -ENOMEM; + + res->name = "reserved"; + res->flags = IORESOURCE_MEM; + res->start = addr; + res->end = addr + size - 1; + + /* we expect a conflict with a 'System RAM' region */ + parent = request_resource_conflict(&iomem_resource, res); + return parent ? request_resource(parent, res) : 0; +} + +int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) +{ + struct linux_efi_memreserve *rsv; + unsigned long prsv; + int rc, index; + + if (efi_memreserve_root == (void *)ULONG_MAX) + return -ENODEV; + + if (!efi_memreserve_root) { + rc = efi_memreserve_map_root(); + if (rc) + return rc; + } + + /* first try to find a slot in an existing linked list entry */ + for (prsv = efi_memreserve_root->next; prsv; prsv = rsv->next) { + rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB); + index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size); + if (index < rsv->size) { + rsv->entry[index].base = addr; + rsv->entry[index].size = size; + + memunmap(rsv); + return efi_mem_reserve_iomem(addr, size); + } + memunmap(rsv); + } + + /* no slot found - allocate a new linked list entry */ + rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC); + if (!rsv) + return -ENOMEM; + + rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K); + if (rc) { + free_page((unsigned long)rsv); + return rc; + } + + /* + * The memremap() call above assumes that a linux_efi_memreserve entry + * never crosses a page boundary, so let's ensure that this remains true + * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by + * using SZ_4K explicitly in the size calculation below. + */ + rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K); + atomic_set(&rsv->count, 1); + rsv->entry[0].base = addr; + rsv->entry[0].size = size; + + spin_lock(&efi_mem_reserve_persistent_lock); + rsv->next = efi_memreserve_root->next; + efi_memreserve_root->next = __pa(rsv); + spin_unlock(&efi_mem_reserve_persistent_lock); + + return efi_mem_reserve_iomem(addr, size); +} + +static int __init efi_memreserve_root_init(void) +{ + if (efi_memreserve_root) + return 0; + if (efi_memreserve_map_root()) + efi_memreserve_root = (void *)ULONG_MAX; + return 0; +} +early_initcall(efi_memreserve_root_init); + #ifdef CONFIG_KEXEC static int update_efi_random_seed(struct notifier_block *nb, unsigned long code, void *unused) diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c index 3e626fd9bd4e1fafe6e0f4da4597ed66a978bd08..8cdd01117731c98b4accb495a998758fed3faf3a 100644 --- a/drivers/firmware/efi/efivars.c +++ b/drivers/firmware/efi/efivars.c @@ -139,13 +139,16 @@ static ssize_t efivar_attr_read(struct efivar_entry *entry, char *buf) { struct efi_variable *var = &entry->var; + unsigned long size = sizeof(var->Data); char *str = buf; + int ret; if (!entry || !buf) return -EINVAL; - var->DataSize = 1024; - if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data)) + ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data); + var->DataSize = size; + if (ret) return -EIO; if (var->Attributes & EFI_VARIABLE_NON_VOLATILE) @@ -172,13 +175,16 @@ static ssize_t efivar_size_read(struct efivar_entry *entry, char *buf) { struct efi_variable *var = &entry->var; + unsigned long size = sizeof(var->Data); char *str = buf; + int ret; if (!entry || !buf) return -EINVAL; - var->DataSize = 1024; - if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data)) + ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data); + var->DataSize = size; + if (ret) return -EIO; str += sprintf(str, "0x%lx\n", var->DataSize); @@ -189,12 +195,15 @@ static ssize_t efivar_data_read(struct efivar_entry *entry, char *buf) { struct efi_variable *var = &entry->var; + unsigned long size = sizeof(var->Data); + int ret; if (!entry || !buf) return -EINVAL; - var->DataSize = 1024; - if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data)) + ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data); + var->DataSize = size; + if (ret) return -EIO; memcpy(buf, var->Data, var->DataSize); @@ -314,14 +323,16 @@ efivar_show_raw(struct efivar_entry *entry, char *buf) { struct efi_variable *var = &entry->var; struct compat_efi_variable *compat; + unsigned long datasize = sizeof(var->Data); size_t size; + int ret; if (!entry || !buf) return 0; - var->DataSize = 1024; - if (efivar_entry_get(entry, &entry->var.Attributes, - &entry->var.DataSize, entry->var.Data)) + ret = efivar_entry_get(entry, &var->Attributes, &datasize, var->Data); + var->DataSize = datasize; + if (ret) return -EIO; if (is_compat()) { @@ -572,8 +583,10 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var) ret = kobject_init_and_add(&new_var->kobj, &efivar_ktype, NULL, "%s", short_name); kfree(short_name); - if (ret) + if (ret) { + kobject_put(&new_var->kobj); return ret; + } kobject_uevent(&new_var->kobj, KOBJ_ADD); if (efivar_entry_add(new_var, &efivar_sysfs_list)) { diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c index 5d06bd247d0731a652424d504293f028d291c1b8..2f6204b2fdd38a66db2ed4bfb1e0c2066d44d12c 100644 --- a/drivers/firmware/efi/esrt.c +++ b/drivers/firmware/efi/esrt.c @@ -180,7 +180,7 @@ static int esre_create_sysfs_entry(void *esre, int entry_num) rc = kobject_init_and_add(&entry->kobj, &esre1_ktype, NULL, "entry%d", entry_num); if (rc) { - kfree(entry); + kobject_put(&entry->kobj); return rc; } } diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index 14c40a7750d1d4017edbc386d4e02df7949149fe..d9845099635e08919414c17ec9421b1d83e6cd1d 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -9,14 +9,18 @@ cflags-$(CONFIG_X86_32) := -march=i386 cflags-$(CONFIG_X86_64) := -mcmodel=small cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -O2 \ -fPIC -fno-strict-aliasing -mno-red-zone \ - -mno-mmx -mno-sse -fshort-wchar + -mno-mmx -mno-sse -fshort-wchar \ + -Wno-pointer-sign \ + $(call cc-disable-warning, address-of-packed-member) \ + $(call cc-disable-warning, gnu) # arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly # disable the stackleak plugin cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) -fpie \ $(DISABLE_STACKLEAK_PLUGIN) cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \ - -fno-builtin -fpic -mno-single-pic-base + -fno-builtin -fpic \ + $(call cc-option,-mno-single-pic-base) cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c index 6920033de6d411689719e64226112a19a8c8021d..296b3211f689dd46598d8335d1b6bcb85a56bf81 100644 --- a/drivers/firmware/efi/libstub/arm-stub.c +++ b/drivers/firmware/efi/libstub/arm-stub.c @@ -69,6 +69,31 @@ static struct screen_info *setup_graphics(efi_system_table_t *sys_table_arg) return si; } +void install_memreserve_table(efi_system_table_t *sys_table_arg) +{ + struct linux_efi_memreserve *rsv; + efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID; + efi_status_t status; + + status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv), + (void **)&rsv); + if (status != EFI_SUCCESS) { + pr_efi_err(sys_table_arg, "Failed to allocate memreserve entry!\n"); + return; + } + + rsv->next = 0; + rsv->size = 0; + atomic_set(&rsv->count, 0); + + status = efi_call_early(install_configuration_table, + &memreserve_table_guid, + rsv); + if (status != EFI_SUCCESS) + pr_efi_err(sys_table_arg, "Failed to install memreserve config table!\n"); +} + + /* * This function handles the architcture specific differences between arm and * arm64 regarding where the kernel image must be loaded and any memory that @@ -235,6 +260,8 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, } } + install_memreserve_table(sys_table); + new_fdt_addr = fdt_addr; status = allocate_new_fdt_and_exit_boot(sys_table, handle, &new_fdt_addr, efi_get_max_fdt_addr(dram_base), @@ -340,6 +367,11 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size, paddr = in->phys_addr; size = in->num_pages * EFI_PAGE_SIZE; + if (novamap()) { + in->virt_addr = in->phys_addr; + continue; + } + /* * Make the mapping compatible with 64k pages: this allows * a 4k page size kernel to kexec a 64k page size kernel and diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c index e94975f4655bdd50d1db4b4a8be728d266d7d02b..442f51c2a53db154f971bdc077077e0fca2f8f22 100644 --- a/drivers/firmware/efi/libstub/efi-stub-helper.c +++ b/drivers/firmware/efi/libstub/efi-stub-helper.c @@ -34,6 +34,7 @@ static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE; static int __section(.data) __nokaslr; static int __section(.data) __quiet; +static int __section(.data) __novamap; int __pure nokaslr(void) { @@ -43,6 +44,10 @@ int __pure is_quiet(void) { return __quiet; } +int __pure novamap(void) +{ + return __novamap; +} #define EFI_MMAP_NR_SLACK_SLOTS 8 @@ -482,6 +487,11 @@ efi_status_t efi_parse_options(char const *cmdline) __chunk_size = -1UL; } + if (!strncmp(str, "novamap", 7)) { + str += strlen("novamap"); + __novamap = 1; + } + /* Group words together, delimited by "," */ while (*str && *str != ' ' && *str != ',') str++; diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h index 32799cf039ef1562afc8f124ab7eb9d64342083c..337b52c4702c0f69b13477897e3c2ef0dcf9b976 100644 --- a/drivers/firmware/efi/libstub/efistub.h +++ b/drivers/firmware/efi/libstub/efistub.h @@ -27,6 +27,7 @@ extern int __pure nokaslr(void); extern int __pure is_quiet(void); +extern int __pure novamap(void); #define pr_efi(sys_table, msg) do { \ if (!is_quiet()) efi_printk(sys_table, "EFI stub: "msg); \ diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c index 8830fa601e45d9a1b1094419cd1ec66f41a25e49..dba296a44f4ec27dda6f73a00d716837c9aa106c 100644 --- a/drivers/firmware/efi/libstub/fdt.c +++ b/drivers/firmware/efi/libstub/fdt.c @@ -158,6 +158,10 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, return efi_status; } } + + /* shrink the FDT back to its minimum size */ + fdt_pack(fdt); + return EFI_SUCCESS; fdt_set_fail: @@ -323,6 +327,9 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, if (status == EFI_SUCCESS) { efi_set_virtual_address_map_t *svam; + if (novamap()) + return EFI_SUCCESS; + /* Install the new virtual address map */ svam = sys_table->runtime->set_virtual_address_map; status = svam(runtime_entry_count * desc_size, desc_size, diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c index 24c461dea7afb146a509e097b581aa2fdaede132..fd8053f9556e72090d595a736621333aa5be546a 100644 --- a/drivers/firmware/efi/libstub/gop.c +++ b/drivers/firmware/efi/libstub/gop.c @@ -85,30 +85,6 @@ setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line, } } -static efi_status_t -__gop_query32(efi_system_table_t *sys_table_arg, - struct efi_graphics_output_protocol_32 *gop32, - struct efi_graphics_output_mode_info **info, - unsigned long *size, u64 *fb_base) -{ - struct efi_graphics_output_protocol_mode_32 *mode; - efi_graphics_output_protocol_query_mode query_mode; - efi_status_t status; - unsigned long m; - - m = gop32->mode; - mode = (struct efi_graphics_output_protocol_mode_32 *)m; - query_mode = (void *)(unsigned long)gop32->query_mode; - - status = __efi_call_early(query_mode, (void *)gop32, mode->mode, size, - info); - if (status != EFI_SUCCESS) - return status; - - *fb_base = mode->frame_buffer_base; - return status; -} - static efi_status_t setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si, efi_guid_t *proto, unsigned long size, void **gop_handle) @@ -121,7 +97,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si, u64 fb_base; struct efi_pixel_bitmask pixel_info; int pixel_format; - efi_status_t status = EFI_NOT_FOUND; + efi_status_t status; u32 *handles = (u32 *)(unsigned long)gop_handle; int i; @@ -130,6 +106,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si, nr_gops = size / sizeof(u32); for (i = 0; i < nr_gops; i++) { + struct efi_graphics_output_protocol_mode_32 *mode; struct efi_graphics_output_mode_info *info = NULL; efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID; bool conout_found = false; @@ -147,9 +124,11 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si, if (status == EFI_SUCCESS) conout_found = true; - status = __gop_query32(sys_table_arg, gop32, &info, &size, - ¤t_fb_base); - if (status == EFI_SUCCESS && (!first_gop || conout_found) && + mode = (void *)(unsigned long)gop32->mode; + info = (void *)(unsigned long)mode->info; + current_fb_base = mode->frame_buffer_base; + + if ((!first_gop || conout_found) && info->pixel_format != PIXEL_BLT_ONLY) { /* * Systems that use the UEFI Console Splitter may @@ -177,7 +156,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si, /* Did we find any GOPs? */ if (!first_gop) - goto out; + return EFI_NOT_FOUND; /* EFI framebuffer */ si->orig_video_isVGA = VIDEO_TYPE_EFI; @@ -199,32 +178,8 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si, si->lfb_size = si->lfb_linelength * si->lfb_height; si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS; -out: - return status; -} - -static efi_status_t -__gop_query64(efi_system_table_t *sys_table_arg, - struct efi_graphics_output_protocol_64 *gop64, - struct efi_graphics_output_mode_info **info, - unsigned long *size, u64 *fb_base) -{ - struct efi_graphics_output_protocol_mode_64 *mode; - efi_graphics_output_protocol_query_mode query_mode; - efi_status_t status; - unsigned long m; - - m = gop64->mode; - mode = (struct efi_graphics_output_protocol_mode_64 *)m; - query_mode = (void *)(unsigned long)gop64->query_mode; - - status = __efi_call_early(query_mode, (void *)gop64, mode->mode, size, - info); - if (status != EFI_SUCCESS) - return status; - *fb_base = mode->frame_buffer_base; - return status; + return EFI_SUCCESS; } static efi_status_t @@ -239,7 +194,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si, u64 fb_base; struct efi_pixel_bitmask pixel_info; int pixel_format; - efi_status_t status = EFI_NOT_FOUND; + efi_status_t status; u64 *handles = (u64 *)(unsigned long)gop_handle; int i; @@ -248,6 +203,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si, nr_gops = size / sizeof(u64); for (i = 0; i < nr_gops; i++) { + struct efi_graphics_output_protocol_mode_64 *mode; struct efi_graphics_output_mode_info *info = NULL; efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID; bool conout_found = false; @@ -265,9 +221,11 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si, if (status == EFI_SUCCESS) conout_found = true; - status = __gop_query64(sys_table_arg, gop64, &info, &size, - ¤t_fb_base); - if (status == EFI_SUCCESS && (!first_gop || conout_found) && + mode = (void *)(unsigned long)gop64->mode; + info = (void *)(unsigned long)mode->info; + current_fb_base = mode->frame_buffer_base; + + if ((!first_gop || conout_found) && info->pixel_format != PIXEL_BLT_ONLY) { /* * Systems that use the UEFI Console Splitter may @@ -295,7 +253,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si, /* Did we find any GOPs? */ if (!first_gop) - goto out; + return EFI_NOT_FOUND; /* EFI framebuffer */ si->orig_video_isVGA = VIDEO_TYPE_EFI; @@ -317,8 +275,8 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si, si->lfb_size = si->lfb_linelength * si->lfb_height; si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS; -out: - return status; + + return EFI_SUCCESS; } /* diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c index 8986757eafafa29dbf287b9e45b4de592cbc9570..e0889922cc6d731d78170436c374b7bf94e35811 100644 --- a/drivers/firmware/efi/memattr.c +++ b/drivers/firmware/efi/memattr.c @@ -69,11 +69,6 @@ static bool entry_is_valid(const efi_memory_desc_t *in, efi_memory_desc_t *out) return false; } - if (!(in->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))) { - pr_warn("Entry attributes invalid: RO and XP bits both cleared\n"); - return false; - } - if (PAGE_SIZE > EFI_PAGE_SIZE && (!PAGE_ALIGNED(in->phys_addr) || !PAGE_ALIGNED(in->num_pages << EFI_PAGE_SHIFT))) { @@ -94,7 +89,7 @@ static bool entry_is_valid(const efi_memory_desc_t *in, efi_memory_desc_t *out) if (!(md->attribute & EFI_MEMORY_RUNTIME)) continue; - if (md->virt_addr == 0) { + if (md->virt_addr == 0 && md->phys_addr != 0) { /* no virtual mapping has been installed by the stub */ break; } diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c index 5fc70520e04c4b57cd047c9dc6c0f5f7fab590f3..16c160569e1a124031b0da519c398b8e47fcb1d0 100644 --- a/drivers/firmware/efi/memmap.c +++ b/drivers/firmware/efi/memmap.c @@ -29,6 +29,22 @@ static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size) return PFN_PHYS(page_to_pfn(p)); } +void __init efi_print_memmap(void) +{ + efi_memory_desc_t *md; + int i = 0; + + for_each_efi_memory_desc(md) { + char buf[64]; + + pr_info("mem%02u: %s range=[0x%016llx-0x%016llx] (%lluMB)\n", + i++, efi_md_typeattr_format(buf, sizeof(buf), md), + md->phys_addr, + md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1, + (md->num_pages >> (20 - EFI_PAGE_SHIFT))); + } +} + /** * efi_memmap_alloc - Allocate memory for the EFI memory map * @num_entries: Number of entries in the allocated map. @@ -118,6 +134,9 @@ int __init efi_memmap_init_early(struct efi_memory_map_data *data) void __init efi_memmap_unmap(void) { + if (!efi_enabled(EFI_MEMMAP)) + return; + if (!efi.memmap.late) { unsigned long size; diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c index aa66cbf23512af3c6e9195cd5b3471533a8f8e42..cec6558bf33a6c16a3c6340993487426a761382f 100644 --- a/drivers/firmware/efi/runtime-wrappers.c +++ b/drivers/firmware/efi/runtime-wrappers.c @@ -115,11 +115,24 @@ struct efi_runtime_work { efi_rts_work.status; \ }) +#ifndef arch_efi_save_flags +#define arch_efi_save_flags(state_flags) local_save_flags(state_flags) +#define arch_efi_restore_flags(state_flags) local_irq_restore(state_flags) +#endif + +unsigned long efi_call_virt_save_flags(void) +{ + unsigned long flags; + + arch_efi_save_flags(flags); + return flags; +} + void efi_call_virt_check_flags(unsigned long flags, const char *call) { unsigned long cur_flags, mismatch; - local_save_flags(cur_flags); + cur_flags = efi_call_virt_save_flags(); mismatch = flags ^ cur_flags; if (!WARN_ON_ONCE(mismatch & ARCH_EFI_IRQ_FLAGS_MASK)) @@ -128,7 +141,7 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call) add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_NOW_UNRELIABLE); pr_err_ratelimited(FW_BUG "IRQ flags corrupted (0x%08lx=>0x%08lx) by EFI %s\n", flags, cur_flags, call); - local_irq_restore(flags); + arch_efi_restore_flags(flags); } /* @@ -172,6 +185,13 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call) */ static DEFINE_SEMAPHORE(efi_runtime_lock); +/* + * Expose the EFI runtime lock to the UV platform + */ +#ifdef CONFIG_X86_UV +extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock); +#endif + /* * Calls the appropriate efi_runtime_service() with the appropriate * arguments. @@ -423,7 +443,7 @@ static void virt_efi_reset_system(int reset_type, unsigned long data_size, efi_char16_t *data) { - if (down_interruptible(&efi_runtime_lock)) { + if (down_trylock(&efi_runtime_lock)) { pr_warn("failed to invoke the reset_system() runtime service:\n" "could not get exclusive access to the firmware\n"); return; diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c index 41c48a1e8baaa8e46e9609c0b76a2698b5ac5c49..04ca155fabfaa555ebd2abf34432fffa68fc3e2f 100644 --- a/drivers/firmware/efi/test/efi_test.c +++ b/drivers/firmware/efi/test/efi_test.c @@ -68,7 +68,7 @@ copy_ucs2_from_user_len(efi_char16_t **dst, efi_char16_t __user *src, return 0; } - if (!access_ok(VERIFY_READ, src, 1)) + if (!access_ok(src, 1)) return -EFAULT; buf = memdup_user(src, len); @@ -89,7 +89,7 @@ copy_ucs2_from_user_len(efi_char16_t **dst, efi_char16_t __user *src, static inline int get_ucs2_strsize_from_user(efi_char16_t __user *src, size_t *len) { - if (!access_ok(VERIFY_READ, src, 1)) + if (!access_ok(src, 1)) return -EFAULT; *len = user_ucs2_strsize(src); @@ -116,7 +116,7 @@ copy_ucs2_from_user(efi_char16_t **dst, efi_char16_t __user *src) { size_t len; - if (!access_ok(VERIFY_READ, src, 1)) + if (!access_ok(src, 1)) return -EFAULT; len = user_ucs2_strsize(src); @@ -140,7 +140,7 @@ copy_ucs2_to_user_len(efi_char16_t __user *dst, efi_char16_t *src, size_t len) if (!src) return 0; - if (!access_ok(VERIFY_WRITE, dst, 1)) + if (!access_ok(dst, 1)) return -EFAULT; return copy_to_user(dst, src, len); diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c index 9336ffdf6e2c6482a0ecc07b3423272292b65320..fceaafd67ec61d3f80a8a584e18b7d132a1592ff 100644 --- a/drivers/firmware/efi/vars.c +++ b/drivers/firmware/efi/vars.c @@ -318,7 +318,12 @@ EXPORT_SYMBOL_GPL(efivar_variable_is_removable); static efi_status_t check_var_size(u32 attributes, unsigned long size) { - const struct efivar_operations *fops = __efivars->ops; + const struct efivar_operations *fops; + + if (!__efivars) + return EFI_UNSUPPORTED; + + fops = __efivars->ops; if (!fops->query_variable_store) return EFI_UNSUPPORTED; @@ -329,7 +334,12 @@ check_var_size(u32 attributes, unsigned long size) static efi_status_t check_var_size_nonblocking(u32 attributes, unsigned long size) { - const struct efivar_operations *fops = __efivars->ops; + const struct efivar_operations *fops; + + if (!__efivars) + return EFI_UNSUPPORTED; + + fops = __efivars->ops; if (!fops->query_variable_store) return EFI_UNSUPPORTED; @@ -429,13 +439,18 @@ static void dup_variable_bug(efi_char16_t *str16, efi_guid_t *vendor_guid, int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *), void *data, bool duplicates, struct list_head *head) { - const struct efivar_operations *ops = __efivars->ops; + const struct efivar_operations *ops; unsigned long variable_name_size = 1024; efi_char16_t *variable_name; efi_status_t status; efi_guid_t vendor_guid; int err = 0; + if (!__efivars) + return -EFAULT; + + ops = __efivars->ops; + variable_name = kzalloc(variable_name_size, GFP_KERNEL); if (!variable_name) { printk(KERN_ERR "efivars: Memory allocation failed.\n"); @@ -583,12 +598,14 @@ static void efivar_entry_list_del_unlock(struct efivar_entry *entry) */ int __efivar_entry_delete(struct efivar_entry *entry) { - const struct efivar_operations *ops = __efivars->ops; efi_status_t status; - status = ops->set_variable(entry->var.VariableName, - &entry->var.VendorGuid, - 0, 0, NULL); + if (!__efivars) + return -EINVAL; + + status = __efivars->ops->set_variable(entry->var.VariableName, + &entry->var.VendorGuid, + 0, 0, NULL); return efi_status_to_err(status); } @@ -607,12 +624,17 @@ EXPORT_SYMBOL_GPL(__efivar_entry_delete); */ int efivar_entry_delete(struct efivar_entry *entry) { - const struct efivar_operations *ops = __efivars->ops; + const struct efivar_operations *ops; efi_status_t status; if (down_interruptible(&efivars_lock)) return -EINTR; + if (!__efivars) { + up(&efivars_lock); + return -EINVAL; + } + ops = __efivars->ops; status = ops->set_variable(entry->var.VariableName, &entry->var.VendorGuid, 0, 0, NULL); @@ -650,13 +672,19 @@ EXPORT_SYMBOL_GPL(efivar_entry_delete); int efivar_entry_set(struct efivar_entry *entry, u32 attributes, unsigned long size, void *data, struct list_head *head) { - const struct efivar_operations *ops = __efivars->ops; + const struct efivar_operations *ops; efi_status_t status; efi_char16_t *name = entry->var.VariableName; efi_guid_t vendor = entry->var.VendorGuid; if (down_interruptible(&efivars_lock)) return -EINTR; + + if (!__efivars) { + up(&efivars_lock); + return -EINVAL; + } + ops = __efivars->ops; if (head && efivar_entry_find(name, vendor, head, false)) { up(&efivars_lock); return -EEXIST; @@ -687,12 +715,17 @@ static int efivar_entry_set_nonblocking(efi_char16_t *name, efi_guid_t vendor, u32 attributes, unsigned long size, void *data) { - const struct efivar_operations *ops = __efivars->ops; + const struct efivar_operations *ops; efi_status_t status; if (down_trylock(&efivars_lock)) return -EBUSY; + if (!__efivars) { + up(&efivars_lock); + return -EINVAL; + } + status = check_var_size_nonblocking(attributes, size + ucs2_strsize(name, 1024)); if (status != EFI_SUCCESS) { @@ -700,6 +733,7 @@ efivar_entry_set_nonblocking(efi_char16_t *name, efi_guid_t vendor, return -ENOSPC; } + ops = __efivars->ops; status = ops->set_variable_nonblocking(name, &vendor, attributes, size, data); @@ -727,9 +761,13 @@ efivar_entry_set_nonblocking(efi_char16_t *name, efi_guid_t vendor, int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes, bool block, unsigned long size, void *data) { - const struct efivar_operations *ops = __efivars->ops; + const struct efivar_operations *ops; efi_status_t status; + if (!__efivars) + return -EINVAL; + + ops = __efivars->ops; if (!ops->query_variable_store) return -ENOSYS; @@ -829,13 +867,18 @@ EXPORT_SYMBOL_GPL(efivar_entry_find); */ int efivar_entry_size(struct efivar_entry *entry, unsigned long *size) { - const struct efivar_operations *ops = __efivars->ops; + const struct efivar_operations *ops; efi_status_t status; *size = 0; if (down_interruptible(&efivars_lock)) return -EINTR; + if (!__efivars) { + up(&efivars_lock); + return -EINVAL; + } + ops = __efivars->ops; status = ops->get_variable(entry->var.VariableName, &entry->var.VendorGuid, NULL, size, NULL); up(&efivars_lock); @@ -861,12 +904,14 @@ EXPORT_SYMBOL_GPL(efivar_entry_size); int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes, unsigned long *size, void *data) { - const struct efivar_operations *ops = __efivars->ops; efi_status_t status; - status = ops->get_variable(entry->var.VariableName, - &entry->var.VendorGuid, - attributes, size, data); + if (!__efivars) + return -EINVAL; + + status = __efivars->ops->get_variable(entry->var.VariableName, + &entry->var.VendorGuid, + attributes, size, data); return efi_status_to_err(status); } @@ -882,14 +927,19 @@ EXPORT_SYMBOL_GPL(__efivar_entry_get); int efivar_entry_get(struct efivar_entry *entry, u32 *attributes, unsigned long *size, void *data) { - const struct efivar_operations *ops = __efivars->ops; efi_status_t status; if (down_interruptible(&efivars_lock)) return -EINTR; - status = ops->get_variable(entry->var.VariableName, - &entry->var.VendorGuid, - attributes, size, data); + + if (!__efivars) { + up(&efivars_lock); + return -EINVAL; + } + + status = __efivars->ops->get_variable(entry->var.VariableName, + &entry->var.VendorGuid, + attributes, size, data); up(&efivars_lock); return efi_status_to_err(status); @@ -921,7 +971,7 @@ EXPORT_SYMBOL_GPL(efivar_entry_get); int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes, unsigned long *size, void *data, bool *set) { - const struct efivar_operations *ops = __efivars->ops; + const struct efivar_operations *ops; efi_char16_t *name = entry->var.VariableName; efi_guid_t *vendor = &entry->var.VendorGuid; efi_status_t status; @@ -940,6 +990,11 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes, if (down_interruptible(&efivars_lock)) return -EINTR; + if (!__efivars) { + err = -EINVAL; + goto out; + } + /* * Ensure that the available space hasn't shrunk below the safe level */ @@ -956,6 +1011,8 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes, } } + ops = __efivars->ops; + status = ops->set_variable(name, vendor, attributes, *size, data); if (status != EFI_SUCCESS) { err = efi_status_to_err(status); diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c index 19db5709ae2886efeafd1df94353e9f85c3fee6a..898bb9abc41f153204284a625edea91e4631b3fd 100644 --- a/drivers/firmware/google/coreboot_table.c +++ b/drivers/firmware/google/coreboot_table.c @@ -110,7 +110,8 @@ int coreboot_table_init(struct device *dev, void __iomem *ptr) if (strncmp(header.signature, "LBIO", sizeof(header.signature))) { pr_warn("coreboot_table: coreboot table missing or corrupt!\n"); - return -ENODEV; + ret = -ENODEV; + goto out; } ptr_entry = (void *)ptr_header + header.header_bytes; @@ -137,7 +138,8 @@ int coreboot_table_init(struct device *dev, void __iomem *ptr) ptr_entry += entry.size; } - +out: + iounmap(ptr); return ret; } EXPORT_SYMBOL(coreboot_table_init); @@ -146,7 +148,6 @@ int coreboot_table_exit(void) { if (ptr_header) { bus_unregister(&coreboot_bus_type); - iounmap(ptr_header); ptr_header = NULL; } diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c index c8f169bf2e27dc0264785f402759a3d15f307ef0..4fe66fb4dce4c212ffc819ade7e80fda15553af9 100644 --- a/drivers/firmware/google/gsmi.c +++ b/drivers/firmware/google/gsmi.c @@ -343,9 +343,10 @@ static efi_status_t gsmi_get_variable(efi_char16_t *name, memcpy(data, gsmi_dev.data_buf->start, *data_size); /* All variables are have the following attributes */ - *attr = EFI_VARIABLE_NON_VOLATILE | - EFI_VARIABLE_BOOTSERVICE_ACCESS | - EFI_VARIABLE_RUNTIME_ACCESS; + if (attr) + *attr = EFI_VARIABLE_NON_VOLATILE | + EFI_VARIABLE_BOOTSERVICE_ACCESS | + EFI_VARIABLE_RUNTIME_ACCESS; } spin_unlock_irqrestore(&gsmi_dev.lock, flags); @@ -480,11 +481,10 @@ static ssize_t eventlog_write(struct file *filp, struct kobject *kobj, if (count < sizeof(u32)) return -EINVAL; param.type = *(u32 *)buf; - count -= sizeof(u32); buf += sizeof(u32); /* The remaining buffer is the data payload */ - if (count > gsmi_dev.data_buf->length) + if ((count - sizeof(u32)) > gsmi_dev.data_buf->length) return -EINVAL; param.data_len = count - sizeof(u32); @@ -504,7 +504,7 @@ static ssize_t eventlog_write(struct file *filp, struct kobject *kobj, spin_unlock_irqrestore(&gsmi_dev.lock, flags); - return rc; + return (rc == 0) ? count : rc; } diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c index 1aa67bb5d8c0b02f10f49f0f8cdf6a17aca56955..ebd3ffc7ca0063f0ef9b525334e543578b45e4c7 100644 --- a/drivers/firmware/google/vpd.c +++ b/drivers/firmware/google/vpd.c @@ -100,8 +100,8 @@ static int vpd_section_check_key_name(const u8 *key, s32 key_len) return VPD_OK; } -static int vpd_section_attrib_add(const u8 *key, s32 key_len, - const u8 *value, s32 value_len, +static int vpd_section_attrib_add(const u8 *key, u32 key_len, + const u8 *value, u32 value_len, void *arg) { int ret; diff --git a/drivers/firmware/google/vpd_decode.c b/drivers/firmware/google/vpd_decode.c index 943acaa8aa765f8ee433954306822f6149b19f94..6c7ab2ba85d2fa8942361b563b222f73d54e18e9 100644 --- a/drivers/firmware/google/vpd_decode.c +++ b/drivers/firmware/google/vpd_decode.c @@ -19,8 +19,8 @@ #include "vpd_decode.h" -static int vpd_decode_len(const s32 max_len, const u8 *in, - s32 *length, s32 *decoded_len) +static int vpd_decode_len(const u32 max_len, const u8 *in, + u32 *length, u32 *decoded_len) { u8 more; int i = 0; @@ -40,18 +40,39 @@ static int vpd_decode_len(const s32 max_len, const u8 *in, } while (more); *decoded_len = i; + return VPD_OK; +} + +static int vpd_decode_entry(const u32 max_len, const u8 *input_buf, + u32 *_consumed, const u8 **entry, u32 *entry_len) +{ + u32 decoded_len; + u32 consumed = *_consumed; + + if (vpd_decode_len(max_len - consumed, &input_buf[consumed], + entry_len, &decoded_len) != VPD_OK) + return VPD_FAIL; + if (max_len - consumed < decoded_len) + return VPD_FAIL; + + consumed += decoded_len; + *entry = input_buf + consumed; + + /* entry_len is untrusted data and must be checked again. */ + if (max_len - consumed < *entry_len) + return VPD_FAIL; + consumed += *entry_len; + *_consumed = consumed; return VPD_OK; } -int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed, +int vpd_decode_string(const u32 max_len, const u8 *input_buf, u32 *consumed, vpd_decode_callback callback, void *callback_arg) { int type; - int res; - s32 key_len; - s32 value_len; - s32 decoded_len; + u32 key_len; + u32 value_len; const u8 *key; const u8 *value; @@ -66,26 +87,14 @@ int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed, case VPD_TYPE_STRING: (*consumed)++; - /* key */ - res = vpd_decode_len(max_len - *consumed, &input_buf[*consumed], - &key_len, &decoded_len); - if (res != VPD_OK || *consumed + decoded_len >= max_len) + if (vpd_decode_entry(max_len, input_buf, consumed, &key, + &key_len) != VPD_OK) return VPD_FAIL; - *consumed += decoded_len; - key = &input_buf[*consumed]; - *consumed += key_len; - - /* value */ - res = vpd_decode_len(max_len - *consumed, &input_buf[*consumed], - &value_len, &decoded_len); - if (res != VPD_OK || *consumed + decoded_len > max_len) + if (vpd_decode_entry(max_len, input_buf, consumed, &value, + &value_len) != VPD_OK) return VPD_FAIL; - *consumed += decoded_len; - value = &input_buf[*consumed]; - *consumed += value_len; - if (type == VPD_TYPE_STRING) return callback(key, key_len, value, value_len, callback_arg); diff --git a/drivers/firmware/google/vpd_decode.h b/drivers/firmware/google/vpd_decode.h index be3d62c5ca2fb967e51f7ae292627f014ac90970..e921456b8e78a34dcf1a280481c37377b09802ad 100644 --- a/drivers/firmware/google/vpd_decode.h +++ b/drivers/firmware/google/vpd_decode.h @@ -33,8 +33,8 @@ enum { }; /* Callback for vpd_decode_string to invoke. */ -typedef int vpd_decode_callback(const u8 *key, s32 key_len, - const u8 *value, s32 value_len, +typedef int vpd_decode_callback(const u8 *key, u32 key_len, + const u8 *value, u32 value_len, void *arg); /* @@ -52,7 +52,7 @@ typedef int vpd_decode_callback(const u8 *key, s32 key_len, * If one entry is successfully decoded, sends it to callback and returns the * result. */ -int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed, +int vpd_decode_string(const u32 max_len, const u8 *input_buf, u32 *consumed, vpd_decode_callback callback, void *callback_arg); #endif /* __VPD_DECODE_H */ diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c index 6bc8e6640d713eb890feb37b95083b1303f364a5..966aef334c420f3015e475c349dae84adb151f41 100644 --- a/drivers/firmware/iscsi_ibft.c +++ b/drivers/firmware/iscsi_ibft.c @@ -93,6 +93,10 @@ MODULE_DESCRIPTION("sysfs interface to BIOS iBFT information"); MODULE_LICENSE("GPL"); MODULE_VERSION(IBFT_ISCSI_VERSION); +#ifndef CONFIG_ISCSI_IBFT_FIND +struct acpi_table_ibft *ibft_addr; +#endif + struct ibft_hdr { u8 id; u8 version; @@ -542,6 +546,7 @@ static umode_t __init ibft_check_tgt_for(void *data, int type) case ISCSI_BOOT_TGT_NIC_ASSOC: case ISCSI_BOOT_TGT_CHAP_TYPE: rc = S_IRUGO; + break; case ISCSI_BOOT_TGT_NAME: if (tgt->tgt_name_len) rc = S_IRUGO; diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c index c80ec1d03274413863fb8e3f917f26daa8f868c2..2b0283b06dba31866aaa6a3d2565dc6ed982959f 100644 --- a/drivers/firmware/psci.c +++ b/drivers/firmware/psci.c @@ -63,6 +63,7 @@ struct psci_operations psci_ops = { .conduit = PSCI_CONDUIT_NONE, .smccc_version = SMCCC_VERSION_1_0, }; +EXPORT_SYMBOL(psci_ops); typedef unsigned long (psci_fn)(unsigned long, unsigned long, unsigned long, unsigned long); diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c index 3469436579622b957fcf805aff1edb0da41adc1c..cbd53cb1b2d4783bbacca41f4ada309f24af642f 100644 --- a/drivers/firmware/psci_checker.c +++ b/drivers/firmware/psci_checker.c @@ -366,16 +366,16 @@ static int suspend_test_thread(void *arg) for (;;) { /* Needs to be set first to avoid missing a wakeup. */ set_current_state(TASK_INTERRUPTIBLE); - if (kthread_should_stop()) { - __set_current_state(TASK_RUNNING); + if (kthread_should_park()) break; - } schedule(); } pr_info("CPU %d suspend test results: success %d, shallow states %d, errors %d\n", cpu, nb_suspend, nb_shallow_sleep, nb_err); + kthread_parkme(); + return nb_err; } @@ -440,8 +440,10 @@ static int suspend_tests(void) /* Stop and destroy all threads, get return status. */ - for (i = 0; i < nb_threads; ++i) + for (i = 0; i < nb_threads; ++i) { + err += kthread_park(threads[i]); err += kthread_stop(threads[i]); + } out: cpuidle_resume_and_unlock(); kfree(threads); diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c index 688525dd4aee599548c52502168b3f04f62388fb..367e727a8f93ed24aaea8469c3af42dcc768df41 100644 --- a/drivers/firmware/qcom_scm-64.c +++ b/drivers/firmware/qcom_scm-64.c @@ -158,7 +158,7 @@ static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id, kfree(args_virt); } - if (res->a0 < 0) + if ((long)res->a0 < 0) return qcom_scm_remap_error(res->a0); return 0; diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index e778af766fae3c2c88d20e8f7ae6f47f9114935c..98c987188835bcf8e5fb0e39ef3218fd6cab7370 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -449,6 +450,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, phys_addr_t mem_to_map_phys; phys_addr_t dest_phys; phys_addr_t ptr_phys; + dma_addr_t ptr_dma; size_t mem_to_map_sz; size_t dest_sz; size_t src_sz; @@ -466,9 +468,10 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(dest_sz, SZ_64); - ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL); + ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_dma, GFP_KERNEL); if (!ptr) return -ENOMEM; + ptr_phys = dma_to_phys(__scm->dev, ptr_dma); /* Fill source vmid detail */ src = ptr; @@ -498,7 +501,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz, ptr_phys, src_sz, dest_phys, dest_sz); - dma_free_coherent(__scm->dev, ALIGN(ptr_sz, SZ_64), ptr, ptr_phys); + dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_dma); if (ret) { dev_err(__scm->dev, "Assign memory protection call failed %d.\n", ret); diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c index 039e0f91dba8f5229da95bbaf6fcc9fc55349d27..6945c3c966375a2a7c96991d3094fad5dfd194c7 100644 --- a/drivers/firmware/qemu_fw_cfg.c +++ b/drivers/firmware/qemu_fw_cfg.c @@ -605,8 +605,10 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f) /* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */ err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype, fw_cfg_sel_ko, "%d", entry->select); - if (err) - goto err_register; + if (err) { + kobject_put(&entry->kobj); + return err; + } /* add raw binary content access */ err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw); @@ -622,7 +624,6 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f) err_add_raw: kobject_del(&entry->kobj); -err_register: kfree(entry); return err; } diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c index a200a217461191b796105682e599582bb736683e..44eb99807e337c20b6e72c7ec712593ea85629dd 100644 --- a/drivers/firmware/raspberrypi.c +++ b/drivers/firmware/raspberrypi.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #define MBOX_MSG(chan, data28) (((data28) & ~0xf) | ((chan) & 0xf)) @@ -21,8 +22,6 @@ #define MBOX_DATA28(msg) ((msg) & ~0xf) #define MBOX_CHAN_PROPERTY 8 -#define MAX_RPI_FW_PROP_BUF_SIZE 32 - static struct platform_device *rpi_hwmon; struct rpi_firmware { @@ -144,28 +143,30 @@ EXPORT_SYMBOL_GPL(rpi_firmware_property_list); int rpi_firmware_property(struct rpi_firmware *fw, u32 tag, void *tag_data, size_t buf_size) { - /* Single tags are very small (generally 8 bytes), so the - * stack should be safe. - */ - u8 data[sizeof(struct rpi_firmware_property_tag_header) + - MAX_RPI_FW_PROP_BUF_SIZE]; - struct rpi_firmware_property_tag_header *header = - (struct rpi_firmware_property_tag_header *)data; + struct rpi_firmware_property_tag_header *header; int ret; - if (WARN_ON(buf_size > sizeof(data) - sizeof(*header))) - return -EINVAL; + /* Some mailboxes can use over 1k bytes. Rather than checking + * size and using stack or kmalloc depending on requirements, + * just use kmalloc. Mailboxes don't get called enough to worry + * too much about the time taken in the allocation. + */ + void *data = kmalloc(sizeof(*header) + buf_size, GFP_KERNEL); + if (!data) + return -ENOMEM; + + header = data; header->tag = tag; header->buf_size = buf_size; header->req_resp_size = 0; - memcpy(data + sizeof(struct rpi_firmware_property_tag_header), - tag_data, buf_size); + memcpy(data + sizeof(*header), tag_data, buf_size); + + ret = rpi_firmware_property_list(fw, data, buf_size + sizeof(*header)); + + memcpy(tag_data, data + sizeof(*header), buf_size); - ret = rpi_firmware_property_list(fw, &data, buf_size + sizeof(*header)); - memcpy(tag_data, - data + sizeof(struct rpi_firmware_property_tag_header), - buf_size); + kfree(data); return ret; } diff --git a/drivers/firmware/scpi_pm_domain.c b/drivers/firmware/scpi_pm_domain.c index f395dec271131a435240bcdd6a8d84236240a8dd..a6e62a793fbe608be4ea9689e304ae51063d581f 100644 --- a/drivers/firmware/scpi_pm_domain.c +++ b/drivers/firmware/scpi_pm_domain.c @@ -27,7 +27,6 @@ struct scpi_pm_domain { struct generic_pm_domain genpd; struct scpi_ops *ops; u32 domain; - char name[30]; }; /* @@ -121,8 +120,13 @@ static int scpi_pm_domain_probe(struct platform_device *pdev) scpi_pd->domain = i; scpi_pd->ops = scpi_ops; - sprintf(scpi_pd->name, "%s.%d", np->name, i); - scpi_pd->genpd.name = scpi_pd->name; + scpi_pd->genpd.name = devm_kasprintf(dev, GFP_KERNEL, + "%s.%d", np->name, i); + if (!scpi_pd->genpd.name) { + dev_err(dev, "Failed to allocate genpd name:%s.%d\n", + np->name, i); + continue; + } scpi_pd->genpd.power_off = scpi_pd_power_off; scpi_pd->genpd.power_on = scpi_pd_power_on; diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c index 7fa744793bc5c900eb5d6998848aba233bd1875f..5e35a66ed0ae974b3133899e813442838250d155 100644 --- a/drivers/firmware/ti_sci.c +++ b/drivers/firmware/ti_sci.c @@ -463,9 +463,9 @@ static int ti_sci_cmd_get_revision(struct ti_sci_info *info) struct ti_sci_xfer *xfer; int ret; - /* No need to setup flags since it is expected to respond */ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION, - 0x0, sizeof(struct ti_sci_msg_hdr), + TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, + sizeof(struct ti_sci_msg_hdr), sizeof(*rev_info)); if (IS_ERR(xfer)) { ret = PTR_ERR(xfer); @@ -593,9 +593,9 @@ static int ti_sci_get_device_state(const struct ti_sci_handle *handle, info = handle_to_ti_sci_info(handle); dev = info->dev; - /* Response is expected, so need of any flags */ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE, - 0, sizeof(*req), sizeof(*resp)); + TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, + sizeof(*req), sizeof(*resp)); if (IS_ERR(xfer)) { ret = PTR_ERR(xfer); dev_err(dev, "Message alloc failed(%d)\n", ret); diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig index 1ebcef4bab5b8f52b4676ab63d732b42ffd8d349..87337fcfbc0d2bd862127bea2208b8afa143ee81 100644 --- a/drivers/fpga/Kconfig +++ b/drivers/fpga/Kconfig @@ -39,6 +39,7 @@ config ALTERA_PR_IP_CORE_PLAT config FPGA_MGR_ALTERA_PS_SPI tristate "Altera FPGA Passive Serial over SPI" depends on SPI + select BITREVERSE help FPGA manager driver support for Altera Arria/Cyclone/Stratix using the passive serial interface over SPI. diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c index 7fa793672a7a969239329ef1fccc5a2391c764c8..7a42c194b944bb241d13ef74ccf76e0d5f54e10a 100644 --- a/drivers/fpga/altera-cvp.c +++ b/drivers/fpga/altera-cvp.c @@ -403,6 +403,7 @@ static int altera_cvp_probe(struct pci_dev *pdev, struct altera_cvp_conf *conf; struct fpga_manager *mgr; u16 cmd, val; + u32 regval; int ret; /* @@ -416,6 +417,14 @@ static int altera_cvp_probe(struct pci_dev *pdev, return -ENODEV; } + pci_read_config_dword(pdev, VSE_CVP_STATUS, ®val); + if (!(regval & VSE_CVP_STATUS_CVP_EN)) { + dev_err(&pdev->dev, + "CVP is disabled for this device: CVP_STATUS Reg 0x%x\n", + regval); + return -ENODEV; + } + conf = devm_kzalloc(&pdev->dev, sizeof(*conf), GFP_KERNEL); if (!conf) return -ENOMEM; @@ -468,18 +477,11 @@ static int altera_cvp_probe(struct pci_dev *pdev, goto err_unmap; } - ret = driver_create_file(&altera_cvp_driver.driver, - &driver_attr_chkcfg); - if (ret) { - dev_err(&pdev->dev, "Can't create sysfs chkcfg file\n"); - fpga_mgr_unregister(mgr); - goto err_unmap; - } - return 0; err_unmap: - pci_iounmap(pdev, conf->map); + if (conf->map) + pci_iounmap(pdev, conf->map); pci_release_region(pdev, CVP_BAR); err_disable: cmd &= ~PCI_COMMAND_MEMORY; @@ -493,16 +495,39 @@ static void altera_cvp_remove(struct pci_dev *pdev) struct altera_cvp_conf *conf = mgr->priv; u16 cmd; - driver_remove_file(&altera_cvp_driver.driver, &driver_attr_chkcfg); fpga_mgr_unregister(mgr); - pci_iounmap(pdev, conf->map); + if (conf->map) + pci_iounmap(pdev, conf->map); pci_release_region(pdev, CVP_BAR); pci_read_config_word(pdev, PCI_COMMAND, &cmd); cmd &= ~PCI_COMMAND_MEMORY; pci_write_config_word(pdev, PCI_COMMAND, cmd); } -module_pci_driver(altera_cvp_driver); +static int __init altera_cvp_init(void) +{ + int ret; + + ret = pci_register_driver(&altera_cvp_driver); + if (ret) + return ret; + + ret = driver_create_file(&altera_cvp_driver.driver, + &driver_attr_chkcfg); + if (ret) + pr_warn("Can't create sysfs chkcfg file\n"); + + return 0; +} + +static void __exit altera_cvp_exit(void) +{ + driver_remove_file(&altera_cvp_driver.driver, &driver_attr_chkcfg); + pci_unregister_driver(&altera_cvp_driver); +} + +module_init(altera_cvp_init); +module_exit(altera_cvp_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Anatolij Gustschin "); diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c index 24b25c62603665aee105e394121110c59f65ad91..4925cae7dcddeb7aa6c8c47ca954f6ca6cf5aefd 100644 --- a/drivers/fpga/altera-ps-spi.c +++ b/drivers/fpga/altera-ps-spi.c @@ -207,7 +207,7 @@ static int altera_ps_write_complete(struct fpga_manager *mgr, return -EIO; } - if (!IS_ERR(conf->confd)) { + if (conf->confd) { if (!gpiod_get_raw_value_cansleep(conf->confd)) { dev_err(&mgr->dev, "CONF_DONE is inactive!\n"); return -EIO; @@ -265,10 +265,13 @@ static int altera_ps_probe(struct spi_device *spi) return PTR_ERR(conf->status); } - conf->confd = devm_gpiod_get(&spi->dev, "confd", GPIOD_IN); + conf->confd = devm_gpiod_get_optional(&spi->dev, "confd", GPIOD_IN); if (IS_ERR(conf->confd)) { - dev_warn(&spi->dev, "Not using confd gpio: %ld\n", - PTR_ERR(conf->confd)); + dev_err(&spi->dev, "Failed to get confd gpio: %ld\n", + PTR_ERR(conf->confd)); + return PTR_ERR(conf->confd); + } else if (!conf->confd) { + dev_warn(&spi->dev, "Not using confd gpio"); } /* Register manager with unique name */ diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c index 0e81d33af856a6f51751762bd84fd61baebdf162..53f277cf1f169dc58ce4251c58452ca8963033be 100644 --- a/drivers/fpga/dfl-afu-dma-region.c +++ b/drivers/fpga/dfl-afu-dma-region.c @@ -45,6 +45,7 @@ void afu_dma_region_init(struct dfl_feature_platform_data *pdata) static int afu_dma_adjust_locked_vm(struct device *dev, long npages, bool incr) { unsigned long locked, lock_limit; + long locked_vm; int ret = 0; /* the task is exiting. */ @@ -53,24 +54,25 @@ static int afu_dma_adjust_locked_vm(struct device *dev, long npages, bool incr) down_write(¤t->mm->mmap_sem); + locked_vm = atomic_long_read(¤t->mm->locked_vm); if (incr) { - locked = current->mm->locked_vm + npages; + locked = locked_vm + npages; lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; if (locked > lock_limit && !capable(CAP_IPC_LOCK)) ret = -ENOMEM; else - current->mm->locked_vm += npages; + atomic_long_add(npages, ¤t->mm->locked_vm); } else { - if (WARN_ON_ONCE(npages > current->mm->locked_vm)) - npages = current->mm->locked_vm; - current->mm->locked_vm -= npages; + if (WARN_ON_ONCE(npages > locked_vm)) + npages = locked_vm; + atomic_long_sub(npages, ¤t->mm->locked_vm); } dev_dbg(dev, "[%d] RLIMIT_MEMLOCK %c%ld %ld/%ld%s\n", current->pid, incr ? '+' : '-', npages << PAGE_SHIFT, - current->mm->locked_vm << PAGE_SHIFT, rlimit(RLIMIT_MEMLOCK), - ret ? "- execeeded" : ""); + atomic_long_read(¤t->mm->locked_vm) << PAGE_SHIFT, + rlimit(RLIMIT_MEMLOCK), ret ? "- exceeded" : ""); up_write(¤t->mm->mmap_sem); @@ -369,7 +371,7 @@ int afu_dma_map_region(struct dfl_feature_platform_data *pdata, if (user_addr + length < user_addr) return -EINVAL; - if (!access_ok(VERIFY_WRITE, (void __user *)(unsigned long)user_addr, + if (!access_ok((void __user *)(unsigned long)user_addr, length)) return -EINVAL; @@ -399,7 +401,7 @@ int afu_dma_map_region(struct dfl_feature_platform_data *pdata, region->pages[0], 0, region->length, DMA_BIDIRECTIONAL); - if (dma_mapping_error(&pdata->dev->dev, region->iova)) { + if (dma_mapping_error(dfl_fpga_pdata_to_parent(pdata), region->iova)) { dev_err(&pdata->dev->dev, "failed to map for dma\n"); ret = -EFAULT; goto unpin_pages; diff --git a/drivers/fpga/dfl-fme-pr.c b/drivers/fpga/dfl-fme-pr.c index 0b840531ef33a2e1f387d15f550fb808df5ae8bf..38871bb52c29a48dd6045468ddacf9d4c40f20c4 100644 --- a/drivers/fpga/dfl-fme-pr.c +++ b/drivers/fpga/dfl-fme-pr.c @@ -99,8 +99,7 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg) return -EINVAL; } - if (!access_ok(VERIFY_READ, - (void __user *)(unsigned long)port_pr.buffer_address, + if (!access_ok((void __user *)(unsigned long)port_pr.buffer_address, port_pr.buffer_size)) return -EFAULT; diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c index a9b521bccb06dbf8e0e9c20489241a1f71b963bd..ab361ec78df4c3a5c8d09db031f99f76a6e9aa08 100644 --- a/drivers/fpga/dfl.c +++ b/drivers/fpga/dfl.c @@ -40,6 +40,13 @@ enum dfl_fpga_devt_type { DFL_FPGA_DEVT_MAX, }; +static struct lock_class_key dfl_pdata_keys[DFL_ID_MAX]; + +static const char *dfl_pdata_key_strings[DFL_ID_MAX] = { + "dfl-fme-pdata", + "dfl-port-pdata", +}; + /** * dfl_dev_info - dfl feature device information. * @name: name string of the feature platform device. @@ -443,11 +450,16 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo) struct platform_device *fdev = binfo->feature_dev; struct dfl_feature_platform_data *pdata; struct dfl_feature_info *finfo, *p; + enum dfl_id_type type; int ret, index = 0; if (!fdev) return 0; + type = feature_dev_id_type(fdev); + if (WARN_ON_ONCE(type >= DFL_ID_MAX)) + return -EINVAL; + /* * we do not need to care for the memory which is associated with * the platform device. After calling platform_device_unregister(), @@ -463,6 +475,8 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo) pdata->num = binfo->feature_num; pdata->dfl_cdev = binfo->cdev; mutex_init(&pdata->lock); + lockdep_set_class_and_name(&pdata->lock, &dfl_pdata_keys[type], + dfl_pdata_key_strings[type]); /* * the count should be initialized to 0 to make sure @@ -497,7 +511,7 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo) ret = platform_device_add(binfo->feature_dev); if (!ret) { - if (feature_dev_id_type(binfo->feature_dev) == PORT_ID) + if (type == PORT_ID) dfl_fpga_cdev_add_port_dev(binfo->cdev, binfo->feature_dev); else diff --git a/drivers/fsi/Kconfig b/drivers/fsi/Kconfig index af3a20dd5aa4a504524c0bd36f932ed328903c2c..99c99a5d57fe26d573310f7f30b50e7bf11cecf7 100644 --- a/drivers/fsi/Kconfig +++ b/drivers/fsi/Kconfig @@ -46,6 +46,7 @@ config FSI_MASTER_AST_CF tristate "FSI master based on Aspeed ColdFire coprocessor" depends on GPIOLIB depends on GPIO_ASPEED + select GENERIC_ALLOCATOR ---help--- This option enables a FSI master using the AST2400 and AST2500 GPIO lines driven by the internal ColdFire coprocessor. This requires diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c index 2c31563fdcae7ced8c1496054830efa3b5f3e5dc..c6fa9b393e84baaca4fc0326902b2d5bba9b1799 100644 --- a/drivers/fsi/fsi-core.c +++ b/drivers/fsi/fsi-core.c @@ -552,6 +552,31 @@ static int fsi_slave_scan(struct fsi_slave *slave) return 0; } +static unsigned long aligned_access_size(size_t offset, size_t count) +{ + unsigned long offset_unit, count_unit; + + /* Criteria: + * + * 1. Access size must be less than or equal to the maximum access + * width or the highest power-of-two factor of offset + * 2. Access size must be less than or equal to the amount specified by + * count + * + * The access width is optimal if we can calculate 1 to be strictly + * equal while still satisfying 2. + */ + + /* Find 1 by the bottom bit of offset (with a 4 byte access cap) */ + offset_unit = BIT(__builtin_ctzl(offset | 4)); + + /* Find 2 by the top bit of count */ + count_unit = BIT(8 * sizeof(unsigned long) - 1 - __builtin_clzl(count)); + + /* Constrain the maximum access width to the minimum of both criteria */ + return BIT(__builtin_ctzl(offset_unit | count_unit)); +} + static ssize_t fsi_slave_sysfs_raw_read(struct file *file, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) @@ -567,8 +592,7 @@ static ssize_t fsi_slave_sysfs_raw_read(struct file *file, return -EINVAL; for (total_len = 0; total_len < count; total_len += read_len) { - read_len = min_t(size_t, count, 4); - read_len -= off & 0x3; + read_len = aligned_access_size(off, count - total_len); rc = fsi_slave_read(slave, off, buf + total_len, read_len); if (rc) @@ -595,8 +619,7 @@ static ssize_t fsi_slave_sysfs_raw_write(struct file *file, return -EINVAL; for (total_len = 0; total_len < count; total_len += write_len) { - write_len = min_t(size_t, count, 4); - write_len -= off & 0x3; + write_len = aligned_access_size(off, count - total_len); rc = fsi_slave_write(slave, off, buf + total_len, write_len); if (rc) diff --git a/drivers/fsi/fsi-scom.c b/drivers/fsi/fsi-scom.c index df94021dd9d12bc32b18873076151d3fccbae5c7..fdc0e458dbaaf9c2b59488e6b1a4d7102926657a 100644 --- a/drivers/fsi/fsi-scom.c +++ b/drivers/fsi/fsi-scom.c @@ -47,8 +47,7 @@ #define SCOM_STATUS_PIB_RESP_MASK 0x00007000 #define SCOM_STATUS_PIB_RESP_SHIFT 12 -#define SCOM_STATUS_ANY_ERR (SCOM_STATUS_ERR_SUMMARY | \ - SCOM_STATUS_PROTECTION | \ +#define SCOM_STATUS_ANY_ERR (SCOM_STATUS_PROTECTION | \ SCOM_STATUS_PARITY | \ SCOM_STATUS_PIB_ABORT | \ SCOM_STATUS_PIB_RESP_MASK) @@ -260,11 +259,6 @@ static int handle_fsi2pib_status(struct scom_device *scom, uint32_t status) /* Return -EBUSY on PIB abort to force a retry */ if (status & SCOM_STATUS_PIB_ABORT) return -EBUSY; - if (status & SCOM_STATUS_ERR_SUMMARY) { - fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG, &dummy, - sizeof(uint32_t)); - return -EIO; - } return 0; } diff --git a/drivers/gnss/serial.c b/drivers/gnss/serial.c index b01ba4438501a959de7796dc2eff67a6126d88e3..31e891f00175c635a9ee92c7e0f090eb135fc29b 100644 --- a/drivers/gnss/serial.c +++ b/drivers/gnss/serial.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -63,7 +64,7 @@ static int gnss_serial_write_raw(struct gnss_device *gdev, int ret; /* write is only buffered synchronously */ - ret = serdev_device_write(serdev, buf, count, 0); + ret = serdev_device_write(serdev, buf, count, MAX_SCHEDULE_TIMEOUT); if (ret < 0) return ret; diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c index 79cb98950013bbb60f4ff4126cd562453441045a..4596fde16dfe622ca01324685512022b51800077 100644 --- a/drivers/gnss/sirf.c +++ b/drivers/gnss/sirf.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -83,7 +84,7 @@ static int sirf_write_raw(struct gnss_device *gdev, const unsigned char *buf, int ret; /* write is only buffered synchronously */ - ret = serdev_device_write(serdev, buf, count, 0); + ret = serdev_device_write(serdev, buf, count, MAX_SCHEDULE_TIMEOUT); if (ret < 0) return ret; @@ -167,7 +168,7 @@ static int sirf_set_active(struct sirf_data *data, bool active) else timeout = SIRF_HIBERNATE_TIMEOUT; - while (retries-- > 0) { + do { sirf_pulse_on_off(data); ret = sirf_wait_for_power_state(data, active, timeout); if (ret < 0) { @@ -178,9 +179,9 @@ static int sirf_set_active(struct sirf_data *data, bool active) } break; - } + } while (retries--); - if (retries == 0) + if (retries < 0) return -ETIMEDOUT; return 0; @@ -309,30 +310,26 @@ static int sirf_probe(struct serdev_device *serdev) ret = -ENODEV; goto err_put_device; } + + ret = regulator_enable(data->vcc); + if (ret) + goto err_put_device; + + /* Wait for chip to boot into hibernate mode. */ + msleep(SIRF_BOOT_DELAY); } if (data->wakeup) { ret = gpiod_to_irq(data->wakeup); if (ret < 0) - goto err_put_device; - + goto err_disable_vcc; data->irq = ret; - ret = devm_request_threaded_irq(dev, data->irq, NULL, - sirf_wakeup_handler, + ret = request_threaded_irq(data->irq, NULL, sirf_wakeup_handler, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "wakeup", data); if (ret) - goto err_put_device; - } - - if (data->on_off) { - ret = regulator_enable(data->vcc); - if (ret) - goto err_put_device; - - /* Wait for chip to boot into hibernate mode */ - msleep(SIRF_BOOT_DELAY); + goto err_disable_vcc; } if (IS_ENABLED(CONFIG_PM)) { @@ -341,7 +338,7 @@ static int sirf_probe(struct serdev_device *serdev) } else { ret = sirf_runtime_resume(dev); if (ret < 0) - goto err_disable_vcc; + goto err_free_irq; } ret = gnss_register_device(gdev); @@ -355,6 +352,9 @@ static int sirf_probe(struct serdev_device *serdev) pm_runtime_disable(dev); else sirf_runtime_suspend(dev); +err_free_irq: + if (data->wakeup) + free_irq(data->irq, data); err_disable_vcc: if (data->on_off) regulator_disable(data->vcc); @@ -375,6 +375,9 @@ static void sirf_remove(struct serdev_device *serdev) else sirf_runtime_suspend(&serdev->dev); + if (data->wakeup) + free_irq(data->irq, data); + if (data->on_off) regulator_disable(data->vcc); diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 4f52c3a8ec99bf0801ac1a2a3c7e01a9b9be1453..71c28b6ecc89be43a7c0b2a7fb2d88bbc2f51360 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -79,6 +79,10 @@ config GPIO_GENERIC # put drivers in the right section, in alphabetical order +# This symbol is selected by both MMIO and PCI expanders +config GPIO_PHYTIUM_CORE + tristate + # This symbol is selected by both I2C and SPI expanders config GPIO_MAX730X tristate @@ -245,6 +249,17 @@ config GPIO_GRGPIO Select this to support Aeroflex Gaisler GRGPIO cores from the GRLIB VHDL IP core library. +config GPIO_HISI + tristate "HiSilicon GPIO controller driver" + depends on (ARM64 || COMPILE_TEST) && ACPI + select GPIO_GENERIC + select GPIOLIB_IRQCHIP + help + Say Y or M here to build support for the HiSilicon GPIO controller + driver GPIO block. + This GPIO controller support double-edge interrupt and multi-core + concurrent access. + config GPIO_HLWD tristate "Nintendo Wii (Hollywood) GPIO" depends on OF_GPIO @@ -404,6 +419,25 @@ config GPIO_OMAP help Say yes here to enable GPIO support for TI OMAP SoCs. +config GPIO_PHYTIUM_PLAT + tristate "Phytium GPIO Platform support" + depends on ARM64 + select GPIO_PHYTIUM_CORE + select IRQ_DOMAIN + select GENERIC_IRQ_CHIP + select GPIOLIB_IRQCHIP + help + Say yes here to support the on-chip GPIO controller for the + Phytium SoC family. + +config GPIO_PHYTIUM_SGPIO + tristate "Phytium SGPIO support" + depends on ARM64 + select IRQ_DOMAIN + select GENERIC_IRQ_CHIP + help + Say yes here to enable SGPIO support for Phytium SoCs. + config GPIO_PL061 bool "PrimeCell PL061 GPIO support" depends on ARM_AMBA @@ -784,6 +818,7 @@ config GPIO_ADP5588 config GPIO_ADP5588_IRQ bool "Interrupt controller support for ADP5588" depends on GPIO_ADP5588=y + select GPIOLIB_IRQCHIP help Say yes here to enable the adp5588 to be used as an interrupt controller. It requires the driver to be built in the kernel. @@ -1306,6 +1341,19 @@ config GPIO_PCIE_IDIO_24 Input filter control is not supported by this driver, and the input filters are deactivated by this driver. +config GPIO_PHYTIUM_PCI + tristate "Phytium GPIO PCI support" + select GPIO_PHYTIUM_CORE + select IRQ_DOMAIN + select GENERIC_IRQ_CHIP + select GPIOLIB_IRQCHIP + help + Say Y here to support Phytium PCI GPIO controller on Px210 chipset. + An interrupt is generated when any of the inputs change state + (low to high or high to low). + + This driver can be used for Phytium Px210. + config GPIO_RDC321X tristate "RDC R-321x GPIO support" select MFD_CORE diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index c256aff66a6567e22ff8e80cfc73fffe224ac1d1..3151436029ff126aed1a16eb22197a284b766a27 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -54,6 +54,7 @@ obj-$(CONFIG_GPIO_FTGPIO010) += gpio-ftgpio010.o obj-$(CONFIG_GPIO_GE_FPGA) += gpio-ge.o obj-$(CONFIG_GPIO_GPIO_MM) += gpio-gpio-mm.o obj-$(CONFIG_GPIO_GRGPIO) += gpio-grgpio.o +obj-$(CONFIG_GPIO_HISI) += gpio-hisi.o obj-$(CONFIG_GPIO_HLWD) += gpio-hlwd.o obj-$(CONFIG_HTC_EGPIO) += gpio-htc-egpio.o obj-$(CONFIG_GPIO_ICH) += gpio-ich.o @@ -95,6 +96,10 @@ obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o obj-$(CONFIG_GPIO_MXS) += gpio-mxs.o obj-$(CONFIG_GPIO_OCTEON) += gpio-octeon.o obj-$(CONFIG_GPIO_OMAP) += gpio-omap.o +obj-$(CONFIG_GPIO_PHYTIUM_CORE) += gpio-phytium-core.o +obj-$(CONFIG_GPIO_PHYTIUM_PCI) += gpio-phytium-pci.o +obj-$(CONFIG_GPIO_PHYTIUM_PLAT) += gpio-phytium-platform.o +obj-$(CONFIG_GPIO_PHYTIUM_SGPIO) += gpio-phytium-sgpio.o obj-$(CONFIG_GPIO_PCA953X) += gpio-pca953x.o obj-$(CONFIG_GPIO_PCF857X) += gpio-pcf857x.o obj-$(CONFIG_GPIO_PCH) += gpio-pch.o diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c index 91b90c0cea731778bd524a13d801433e7df0a2ab..12acdac858208979438491e90c3782b064f9e952 100644 --- a/drivers/gpio/gpio-adnp.c +++ b/drivers/gpio/gpio-adnp.c @@ -132,8 +132,10 @@ static int adnp_gpio_direction_input(struct gpio_chip *chip, unsigned offset) if (err < 0) goto out; - if (err & BIT(pos)) - err = -EACCES; + if (value & BIT(pos)) { + err = -EPERM; + goto out; + } err = 0; diff --git a/drivers/gpio/gpio-altera-a10sr.c b/drivers/gpio/gpio-altera-a10sr.c index 6b11f131424848b9cd911936f7a124e2edcf32ee..7f9e0304b5109a8b51fc108679cc32d4d8fbb9e8 100644 --- a/drivers/gpio/gpio-altera-a10sr.c +++ b/drivers/gpio/gpio-altera-a10sr.c @@ -66,8 +66,10 @@ static int altr_a10sr_gpio_direction_input(struct gpio_chip *gc, static int altr_a10sr_gpio_direction_output(struct gpio_chip *gc, unsigned int nr, int value) { - if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT)) + if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT)) { + altr_a10sr_gpio_set(gc, nr, value); return 0; + } return -EINVAL; } diff --git a/drivers/gpio/gpio-amd8111.c b/drivers/gpio/gpio-amd8111.c index fdcebe59510dd798c651993a4b805439cdd7a2b1..68d95051dd0e670636913c0528536540e69d8675 100644 --- a/drivers/gpio/gpio-amd8111.c +++ b/drivers/gpio/gpio-amd8111.c @@ -231,7 +231,10 @@ static int __init amd_gpio_init(void) ioport_unmap(gp.pm); goto out; } + return 0; + out: + pci_dev_put(pdev); return err; } @@ -239,6 +242,7 @@ static void __exit amd_gpio_exit(void) { gpiochip_remove(&gp.chip); ioport_unmap(gp.pm); + pci_dev_put(gp.pdev); } module_init(amd_gpio_init); diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c index 2342e154029b0ad3aa7a849ec1d981a1e33d9e2b..b696ec35efb38a540c91a01f4c3d29b1954e539d 100644 --- a/drivers/gpio/gpio-aspeed.c +++ b/drivers/gpio/gpio-aspeed.c @@ -1225,6 +1225,8 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev) gpio->offset_timer = devm_kzalloc(&pdev->dev, gpio->chip.ngpio, GFP_KERNEL); + if (!gpio->offset_timer) + return -ENOMEM; return aspeed_gpio_setup_irqs(gpio, pdev); } diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c index 16c7f9f4941644b64c8fa71f4b0b2c7393e99b6a..af936dcca6596f2e60fb66f5615ff80210e1905d 100644 --- a/drivers/gpio/gpio-brcmstb.c +++ b/drivers/gpio/gpio-brcmstb.c @@ -664,6 +664,18 @@ static int brcmstb_gpio_probe(struct platform_device *pdev) struct brcmstb_gpio_bank *bank; struct gpio_chip *gc; + /* + * If bank_width is 0, then there is an empty bank in the + * register block. Special handling for this case. + */ + if (bank_width == 0) { + dev_dbg(dev, "Width 0 found: Empty bank @ %d\n", + num_banks); + num_banks++; + gpio_base += MAX_GPIO_PER_BANK; + continue; + } + bank = devm_kzalloc(dev, sizeof(*bank), GFP_KERNEL); if (!bank) { err = -ENOMEM; @@ -740,9 +752,6 @@ static int brcmstb_gpio_probe(struct platform_device *pdev) goto fail; } - dev_info(dev, "Registered %d banks (GPIO(s): %d-%d)\n", - num_banks, priv->gpio_base, gpio_base - 1); - if (priv->parent_wake_irq && need_wakeup_event) pm_wakeup_event(dev, 0); diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c index a5ece8ea79bc83837838760dd449e238e9570f3a..b77e1c6b099ea44614a85ee58d701f0bafa5a980 100644 --- a/drivers/gpio/gpio-davinci.c +++ b/drivers/gpio/gpio-davinci.c @@ -207,6 +207,11 @@ static int davinci_gpio_probe(struct platform_device *pdev) else nirq = DIV_ROUND_UP(ngpio, 16); + if (nirq > MAX_INT_PER_BANK) { + dev_err(dev, "Too many IRQs!\n"); + return -EINVAL; + } + nbank = DIV_ROUND_UP(ngpio, 32); chips = devm_kcalloc(dev, nbank, sizeof(struct davinci_gpio_controller), @@ -222,8 +227,9 @@ static int davinci_gpio_probe(struct platform_device *pdev) for (i = 0; i < nirq; i++) { chips->irqs[i] = platform_get_irq(pdev, i); if (chips->irqs[i] < 0) { - dev_info(dev, "IRQ not populated, err = %d\n", - chips->irqs[i]); + if (chips->irqs[i] != -EPROBE_DEFER) + dev_info(dev, "IRQ not populated, err = %d\n", + chips->irqs[i]); return chips->irqs[i]; } } diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c index 044888fd96a1f642617cba9a9dfc98655c5493aa..53eb5c506fea022db693641f0fad28049fd4610a 100644 --- a/drivers/gpio/gpio-dwapb.c +++ b/drivers/gpio/gpio-dwapb.c @@ -50,6 +50,9 @@ #define GPIO_EXT_PORTB 0x54 #define GPIO_EXT_PORTC 0x58 #define GPIO_EXT_PORTD 0x5c +#define GPIO_INTCOMB_MASK 0xffc +#define GPIO_INT_MASK_REG 0x3804 +#define MEM_PERI_SUBCTRL_IOBASE 0x1 #define DWAPB_MAX_PORTS 4 #define GPIO_EXT_PORT_STRIDE 0x04 /* register stride 32 bits */ @@ -64,8 +67,18 @@ #define GPIO_INTSTATUS_V2 0x3c #define GPIO_PORTA_EOI_V2 0x40 +bool enable_ascend_mini_gpio_dwapb; +bool enable_ascend_gpio_dwapb; struct dwapb_gpio; +static int __init enable_ascend_gpio_dwapb_setup(char *str) +{ + enable_ascend_gpio_dwapb = true; + + return 1; +} +__setup("enable_ascend_gpio_dwapb", enable_ascend_gpio_dwapb_setup); + #ifdef CONFIG_PM_SLEEP /* Store GPIO context across system-wide suspend/resume transitions */ struct dwapb_context { @@ -77,10 +90,11 @@ struct dwapb_context { u32 int_type; u32 int_pol; u32 int_deb; + u32 int_comb_mask; u32 wake_en; }; #endif - +static void __iomem *peri_subctrl_base_addr; struct dwapb_gpio_port { struct gpio_chip gc; bool is_registered; @@ -232,6 +246,11 @@ static void dwapb_irq_enable(struct irq_data *d) val = dwapb_read(gpio, GPIO_INTEN); val |= BIT(d->hwirq); dwapb_write(gpio, GPIO_INTEN, val); + if (enable_ascend_gpio_dwapb) { + val = dwapb_read(gpio, GPIO_INTMASK); + val &= ~BIT(d->hwirq); + dwapb_write(gpio, GPIO_INTMASK, val); + } spin_unlock_irqrestore(&gc->bgpio_lock, flags); } @@ -244,6 +263,11 @@ static void dwapb_irq_disable(struct irq_data *d) u32 val; spin_lock_irqsave(&gc->bgpio_lock, flags); + if (enable_ascend_gpio_dwapb) { + val = dwapb_read(gpio, GPIO_INTMASK); + val |= BIT(d->hwirq); + dwapb_write(gpio, GPIO_INTMASK, val); + } val = dwapb_read(gpio, GPIO_INTEN); val &= ~BIT(d->hwirq); dwapb_write(gpio, GPIO_INTEN, val); @@ -393,6 +417,7 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio, unsigned int hwirq, ngpio = gc->ngpio; struct irq_chip_type *ct; int err, i; + u32 val; gpio->domain = irq_domain_create_linear(fwnode, ngpio, &irq_generic_chip_ops, gpio); @@ -470,6 +495,12 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio, irq_create_mapping(gpio->domain, hwirq); port->gc.to_irq = dwapb_gpio_to_irq; + + if (enable_ascend_gpio_dwapb) { + val = dwapb_read(gpio, GPIO_INTCOMB_MASK); + val |= BIT(0); + dwapb_write(gpio, GPIO_INTCOMB_MASK, val); + } } static void dwapb_irq_teardown(struct dwapb_gpio *gpio) @@ -478,6 +509,7 @@ static void dwapb_irq_teardown(struct dwapb_gpio *gpio) struct gpio_chip *gc = &port->gc; unsigned int ngpio = gc->ngpio; irq_hw_number_t hwirq; + u32 val; if (!gpio->domain) return; @@ -487,6 +519,12 @@ static void dwapb_irq_teardown(struct dwapb_gpio *gpio) irq_domain_remove(gpio->domain); gpio->domain = NULL; + + if (enable_ascend_gpio_dwapb) { + val = dwapb_read(gpio, GPIO_INTCOMB_MASK); + val &= ~BIT(0); + dwapb_write(gpio, GPIO_INTCOMB_MASK, val); + } } static int dwapb_gpio_add_port(struct dwapb_gpio *gpio, @@ -660,6 +698,22 @@ static int dwapb_gpio_probe(struct platform_device *pdev) int err; struct device *dev = &pdev->dev; struct dwapb_platform_data *pdata = dev_get_platdata(dev); + struct device_node *np = dev->of_node; + unsigned int value; + + if (enable_ascend_mini_gpio_dwapb && enable_ascend_gpio_dwapb) { + peri_subctrl_base_addr = of_iomap(np, MEM_PERI_SUBCTRL_IOBASE); + if (!peri_subctrl_base_addr) { + dev_err(&pdev->dev, "sysctrl iomap not find!\n"); + } else { + dev_dbg(&pdev->dev, "sysctrl iomap find!\n"); + value = readl(peri_subctrl_base_addr + + GPIO_INT_MASK_REG); + value &= ~1UL; + writel(value, peri_subctrl_base_addr + + GPIO_INT_MASK_REG); + } + } if (!pdata) { pdata = dwapb_gpio_get_pdata(dev); @@ -742,6 +796,10 @@ static int dwapb_gpio_remove(struct platform_device *pdev) reset_control_assert(gpio->rst); clk_disable_unprepare(gpio->clk); + if ((peri_subctrl_base_addr != NULL) && enable_ascend_mini_gpio_dwapb && + enable_ascend_gpio_dwapb) + iounmap(peri_subctrl_base_addr); + return 0; } @@ -778,6 +836,9 @@ static int dwapb_gpio_suspend(struct device *dev) ctx->int_pol = dwapb_read(gpio, GPIO_INT_POLARITY); ctx->int_type = dwapb_read(gpio, GPIO_INTTYPE_LEVEL); ctx->int_deb = dwapb_read(gpio, GPIO_PORTA_DEBOUNCE); + if (enable_ascend_gpio_dwapb) + ctx->int_comb_mask = + dwapb_read(gpio, GPIO_INTCOMB_MASK); /* Mask out interrupts */ dwapb_write(gpio, GPIO_INTMASK, @@ -798,6 +859,7 @@ static int dwapb_gpio_resume(struct device *dev) struct gpio_chip *gc = &gpio->ports[0].gc; unsigned long flags; int i; + unsigned int value; if (!IS_ERR(gpio->clk)) clk_prepare_enable(gpio->clk); @@ -826,6 +888,9 @@ static int dwapb_gpio_resume(struct device *dev) dwapb_write(gpio, GPIO_PORTA_DEBOUNCE, ctx->int_deb); dwapb_write(gpio, GPIO_INTEN, ctx->int_en); dwapb_write(gpio, GPIO_INTMASK, ctx->int_mask); + if (enable_ascend_gpio_dwapb) + dwapb_write(gpio, + GPIO_INTCOMB_MASK, ctx->int_comb_mask); /* Clear out spurious interrupts */ dwapb_write(gpio, GPIO_PORTA_EOI, 0xffffffff); @@ -833,6 +898,13 @@ static int dwapb_gpio_resume(struct device *dev) } spin_unlock_irqrestore(&gc->bgpio_lock, flags); + if ((peri_subctrl_base_addr != NULL) && enable_ascend_mini_gpio_dwapb && + enable_ascend_gpio_dwapb) { + value = readl(peri_subctrl_base_addr + GPIO_INT_MASK_REG); + value &= ~1UL; + writel(value, peri_subctrl_base_addr + GPIO_INT_MASK_REG); + } + return 0; } #endif diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c index e0d6a0a7bc697d0de1e3f1b364a0dc4041ad9b2a..4935cda5301ea8e7ff414807a3675b10a1337062 100644 --- a/drivers/gpio/gpio-eic-sprd.c +++ b/drivers/gpio/gpio-eic-sprd.c @@ -180,7 +180,18 @@ static void sprd_eic_free(struct gpio_chip *chip, unsigned int offset) static int sprd_eic_get(struct gpio_chip *chip, unsigned int offset) { - return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA); + struct sprd_eic *sprd_eic = gpiochip_get_data(chip); + + switch (sprd_eic->type) { + case SPRD_EIC_DEBOUNCE: + return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA); + case SPRD_EIC_ASYNC: + return sprd_eic_read(chip, offset, SPRD_EIC_ASYNC_DATA); + case SPRD_EIC_SYNC: + return sprd_eic_read(chip, offset, SPRD_EIC_SYNC_DATA); + default: + return -ENOTSUPP; + } } static int sprd_eic_direction_input(struct gpio_chip *chip, unsigned int offset) @@ -368,6 +379,7 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type) irq_set_handler_locked(data, handle_edge_irq); break; case IRQ_TYPE_EDGE_BOTH: + sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0); sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 1); irq_set_handler_locked(data, handle_edge_irq); break; @@ -402,6 +414,7 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type) irq_set_handler_locked(data, handle_edge_irq); break; case IRQ_TYPE_EDGE_BOTH: + sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0); sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 1); irq_set_handler_locked(data, handle_edge_irq); break; @@ -516,11 +529,12 @@ static void sprd_eic_handle_one_type(struct gpio_chip *chip) } for_each_set_bit(n, ®, SPRD_EIC_PER_BANK_NR) { - girq = irq_find_mapping(chip->irq.domain, - bank * SPRD_EIC_PER_BANK_NR + n); + u32 offset = bank * SPRD_EIC_PER_BANK_NR + n; + + girq = irq_find_mapping(chip->irq.domain, offset); generic_handle_irq(girq); - sprd_eic_toggle_trigger(chip, girq, n); + sprd_eic_toggle_trigger(chip, girq, offset); } } } diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c index 0ecd2369c2cad0daa5e08696ab85b91af5235a26..a09d2f9ebacc8d4909d79119333e344453ea6e0a 100644 --- a/drivers/gpio/gpio-exar.c +++ b/drivers/gpio/gpio-exar.c @@ -148,6 +148,8 @@ static int gpio_exar_probe(struct platform_device *pdev) mutex_init(&exar_gpio->lock); index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL); + if (index < 0) + goto err_destroy; sprintf(exar_gpio->name, "exar_gpio%d", index); exar_gpio->gpio_chip.label = exar_gpio->name; diff --git a/drivers/gpio/gpio-hisi.c b/drivers/gpio/gpio-hisi.c new file mode 100644 index 0000000000000000000000000000000000000000..cb5265fcae95b2711742bbdb387250b0aa47f3ec --- /dev/null +++ b/drivers/gpio/gpio-hisi.c @@ -0,0 +1,368 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2020 HiSilicon Limited. */ +#include +#include +#include +#include +#include + +#define HISI_GPIO_SWPORT_DR_SET_WX 0x000 +#define HISI_GPIO_SWPORT_DR_CLR_WX 0x004 +#define HISI_GPIO_SWPORT_DDR_SET_WX 0x010 +#define HISI_GPIO_SWPORT_DDR_CLR_WX 0x014 +#define HISI_GPIO_SWPORT_DDR_ST_WX 0x018 +#define HISI_GPIO_INTEN_SET_WX 0x020 +#define HISI_GPIO_INTEN_CLR_WX 0x024 +#define HISI_GPIO_INTMASK_SET_WX 0x030 +#define HISI_GPIO_INTMASK_CLR_WX 0x034 +#define HISI_GPIO_INTTYPE_EDGE_SET_WX 0x040 +#define HISI_GPIO_INTTYPE_EDGE_CLR_WX 0x044 +#define HISI_GPIO_INT_POLARITY_SET_WX 0x050 +#define HISI_GPIO_INT_POLARITY_CLR_WX 0x054 +#define HISI_GPIO_DEBOUNCE_SET_WX 0x060 +#define HISI_GPIO_DEBOUNCE_CLR_WX 0x064 +#define HISI_GPIO_INTSTATUS_WX 0x070 +#define HISI_GPIO_PORTA_EOI_WX 0x078 +#define HISI_GPIO_EXT_PORT_WX 0x080 +#define HISI_GPIO_INTCOMB_MASK_WX 0x0a0 +#define HISI_GPIO_INT_DEDGE_SET 0x0b0 +#define HISI_GPIO_INT_DEDGE_CLR 0x0b4 +#define HISI_GPIO_INT_DEDGE_ST 0x0b8 + +#define HISI_GPIO_LINE_NUM_MAX 32 +#define HISI_GPIO_DRIVER_NAME "gpio-hisi" + +struct hisi_gpio { + struct gpio_chip chip; + struct device *dev; + void __iomem *reg_base; + unsigned int line_num; + struct irq_chip irq_chip; + int irq; +}; + +static inline u32 hisi_gpio_read_reg(struct gpio_chip *chip, + unsigned int off) +{ + struct hisi_gpio *hisi_gpio = + container_of(chip, struct hisi_gpio, chip); + void __iomem *reg = hisi_gpio->reg_base + off; + + return readl(reg); +} + +static inline void hisi_gpio_write_reg(struct gpio_chip *chip, + unsigned int off, u32 val) +{ + struct hisi_gpio *hisi_gpio = + container_of(chip, struct hisi_gpio, chip); + void __iomem *reg = hisi_gpio->reg_base + off; + + writel(val, reg); +} + +static void hisi_gpio_set_debounce(struct gpio_chip *chip, unsigned int off, + u32 debounce) +{ + if (debounce) + hisi_gpio_write_reg(chip, HISI_GPIO_DEBOUNCE_SET_WX, BIT(off)); + else + hisi_gpio_write_reg(chip, HISI_GPIO_DEBOUNCE_CLR_WX, BIT(off)); +} + +static int hisi_gpio_set_config(struct gpio_chip *chip, unsigned int offset, + unsigned long config) +{ + u32 config_para = pinconf_to_config_param(config); + u32 config_arg; + + switch (config_para) { + case PIN_CONFIG_INPUT_DEBOUNCE: + config_arg = pinconf_to_config_argument(config); + hisi_gpio_set_debounce(chip, offset, config_arg); + break; + default: + return -ENOTSUPP; + } + + return 0; +} + +static void hisi_gpio_set_ack(struct irq_data *d) +{ + struct gpio_chip *chip = irq_data_get_irq_chip_data(d); + + hisi_gpio_write_reg(chip, HISI_GPIO_PORTA_EOI_WX, BIT(irqd_to_hwirq(d))); +} + +static void hisi_gpio_irq_set_mask(struct irq_data *d) +{ + struct gpio_chip *chip = irq_data_get_irq_chip_data(d); + + hisi_gpio_write_reg(chip, HISI_GPIO_INTMASK_SET_WX, BIT(irqd_to_hwirq(d))); +} + +static void hisi_gpio_irq_clr_mask(struct irq_data *d) +{ + struct gpio_chip *chip = irq_data_get_irq_chip_data(d); + + hisi_gpio_write_reg(chip, HISI_GPIO_INTMASK_CLR_WX, BIT(irqd_to_hwirq(d))); +} + +static int hisi_gpio_irq_set_type(struct irq_data *d, u32 type) +{ + struct gpio_chip *chip = irq_data_get_irq_chip_data(d); + unsigned int mask = BIT(irqd_to_hwirq(d)); + + switch (type) { + case IRQ_TYPE_EDGE_BOTH: + hisi_gpio_write_reg(chip, HISI_GPIO_INT_DEDGE_SET, mask); + break; + case IRQ_TYPE_EDGE_RISING: + hisi_gpio_write_reg(chip, HISI_GPIO_INTTYPE_EDGE_SET_WX, mask); + hisi_gpio_write_reg(chip, HISI_GPIO_INT_POLARITY_SET_WX, mask); + break; + case IRQ_TYPE_EDGE_FALLING: + hisi_gpio_write_reg(chip, HISI_GPIO_INTTYPE_EDGE_SET_WX, mask); + hisi_gpio_write_reg(chip, HISI_GPIO_INT_POLARITY_CLR_WX, mask); + break; + case IRQ_TYPE_LEVEL_HIGH: + hisi_gpio_write_reg(chip, HISI_GPIO_INTTYPE_EDGE_CLR_WX, mask); + hisi_gpio_write_reg(chip, HISI_GPIO_INT_POLARITY_SET_WX, mask); + break; + case IRQ_TYPE_LEVEL_LOW: + hisi_gpio_write_reg(chip, HISI_GPIO_INTTYPE_EDGE_CLR_WX, mask); + hisi_gpio_write_reg(chip, HISI_GPIO_INT_POLARITY_CLR_WX, mask); + break; + default: + return -EINVAL; + } + + /* + * The dual-edge interrupt and other interrupt's registers do not + * take effect at the same time. The registers of the two-edge + * interrupts have higher priorities, the configuration of + * the dual-edge interrupts must be disabled before the configuration + * of other kind of interrupts. + */ + if (type != IRQ_TYPE_EDGE_BOTH) { + unsigned int both = hisi_gpio_read_reg(chip, HISI_GPIO_INT_DEDGE_ST); + + if (both & mask) + hisi_gpio_write_reg(chip, HISI_GPIO_INT_DEDGE_CLR, mask); + } + + if (type & IRQ_TYPE_LEVEL_MASK) + irq_set_handler_locked(d, handle_level_irq); + else if (type & IRQ_TYPE_EDGE_BOTH) + irq_set_handler_locked(d, handle_edge_irq); + + return 0; +} + +static void hisi_gpio_irq_enable(struct irq_data *d) +{ + struct gpio_chip *chip = irq_data_get_irq_chip_data(d); + + hisi_gpio_irq_clr_mask(d); + hisi_gpio_write_reg(chip, HISI_GPIO_INTEN_SET_WX, BIT(irqd_to_hwirq(d))); +} + +static void hisi_gpio_irq_disable(struct irq_data *d) +{ + struct gpio_chip *chip = irq_data_get_irq_chip_data(d); + + hisi_gpio_irq_set_mask(d); + hisi_gpio_write_reg(chip, HISI_GPIO_INTEN_CLR_WX, BIT(irqd_to_hwirq(d))); +} + +static void hisi_gpio_irq_handler(struct irq_desc *desc) +{ + struct hisi_gpio *hisi_gpio = irq_desc_get_handler_data(desc); + unsigned long irq_msk = hisi_gpio_read_reg(&hisi_gpio->chip, + HISI_GPIO_INTSTATUS_WX); + struct irq_chip *irq_c = irq_desc_get_chip(desc); + int hwirq; + + chained_irq_enter(irq_c, desc); + for_each_set_bit(hwirq, &irq_msk, HISI_GPIO_LINE_NUM_MAX) + generic_handle_irq(irq_find_mapping(hisi_gpio->chip.irq.domain, + hwirq)); + chained_irq_exit(irq_c, desc); +} + +static void hisi_gpio_init_irq(struct hisi_gpio *hisi_gpio) +{ + struct gpio_chip *chip = &hisi_gpio->chip; + struct gpio_irq_chip *girq_chip = &chip->irq; + + /* Set hooks for irq_chip */ + hisi_gpio->irq_chip.irq_ack = hisi_gpio_set_ack; + hisi_gpio->irq_chip.irq_mask = hisi_gpio_irq_set_mask; + hisi_gpio->irq_chip.irq_unmask = hisi_gpio_irq_clr_mask; + hisi_gpio->irq_chip.irq_set_type = hisi_gpio_irq_set_type; + hisi_gpio->irq_chip.irq_enable = hisi_gpio_irq_enable; + hisi_gpio->irq_chip.irq_disable = hisi_gpio_irq_disable; + + girq_chip->chip = &hisi_gpio->irq_chip; + girq_chip->default_type = IRQ_TYPE_NONE; + girq_chip->num_parents = 1; + girq_chip->parents = &hisi_gpio->irq; + girq_chip->parent_handler = hisi_gpio_irq_handler; + girq_chip->parent_handler_data = hisi_gpio; + + /* Clear Mask of GPIO controller combine IRQ */ + hisi_gpio_write_reg(chip, HISI_GPIO_INTCOMB_MASK_WX, 1); +} + +static const struct acpi_device_id hisi_gpio_acpi_match[] = { + {"HISI0184", 0}, + {} +}; +MODULE_DEVICE_TABLE(acpi, hisi_gpio_acpi_match); + +static void hisi_gpio_get_pdata(struct device *dev, + struct hisi_gpio *hisi_gpio) +{ + struct platform_device *pdev = to_platform_device(dev); + struct fwnode_handle *fwnode; + int idx = 0; + + device_for_each_child_node(dev, fwnode) { + /* Cycle for once, no need for an array to save line_num */ + if (fwnode_property_read_u32(fwnode, "ngpios", + &hisi_gpio->line_num)) { + dev_err(dev, + "failed to get number of lines for port%d and use default value instead\n", + idx); + hisi_gpio->line_num = HISI_GPIO_LINE_NUM_MAX; + } + + if (WARN_ON(hisi_gpio->line_num > HISI_GPIO_LINE_NUM_MAX)) + hisi_gpio->line_num = HISI_GPIO_LINE_NUM_MAX; + + hisi_gpio->irq = platform_get_irq(pdev, idx); + + dev_info(dev, + "get hisi_gpio[%d] with %d lines\n", idx, + hisi_gpio->line_num); + + idx++; + } +} + +static int hisi_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) +{ + unsigned long flags; + + spin_lock_irqsave(&gc->bgpio_lock, flags); + + gc->bgpio_dir |= BIT(gpio); + + hisi_gpio_write_reg(gc, HISI_GPIO_SWPORT_DDR_CLR_WX, ~gc->bgpio_dir); + hisi_gpio_write_reg(gc, HISI_GPIO_SWPORT_DDR_SET_WX, gc->bgpio_dir); + + spin_unlock_irqrestore(&gc->bgpio_lock, flags); + + gc->set(gc, gpio, val); + + return 0; +} + +static int hisi_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio) +{ + unsigned long flags; + + spin_lock_irqsave(&gc->bgpio_lock, flags); + + gc->bgpio_dir &= ~BIT(gpio); + + hisi_gpio_write_reg(gc, HISI_GPIO_SWPORT_DDR_CLR_WX, ~gc->bgpio_dir); + hisi_gpio_write_reg(gc, HISI_GPIO_SWPORT_DDR_SET_WX, gc->bgpio_dir); + + spin_unlock_irqrestore(&gc->bgpio_lock, flags); + + return 0; +} + +static int hisi_gpio_get_dir(struct gpio_chip *gc, unsigned int gpio) +{ + /* Return 0 if output, 1 of input */ + return !(gc->read_reg(gc->reg_dir) & BIT(gpio)); +} + +static int hisi_gpio_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct hisi_gpio *hisi_gpio; + struct resource *res; + int port_num; + int ret; + + /* + * One GPIO controller own one port currently, + * if we get more from ACPI table, return error. + */ + port_num = device_get_child_node_count(dev); + if (WARN_ON(port_num != 1)) + return -ENODEV; + + hisi_gpio = devm_kzalloc(dev, sizeof(*hisi_gpio), GFP_KERNEL); + if (!hisi_gpio) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + hisi_gpio->reg_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(hisi_gpio->reg_base)) + return PTR_ERR(hisi_gpio->reg_base); + + hisi_gpio_get_pdata(dev, hisi_gpio); + + hisi_gpio->dev = dev; + + hisi_gpio->chip.reg_dir = hisi_gpio->reg_base + + HISI_GPIO_SWPORT_DDR_ST_WX; + + ret = bgpio_init(&hisi_gpio->chip, hisi_gpio->dev, 0x4, + hisi_gpio->reg_base + HISI_GPIO_EXT_PORT_WX, + hisi_gpio->reg_base + HISI_GPIO_SWPORT_DR_SET_WX, + hisi_gpio->reg_base + HISI_GPIO_SWPORT_DR_CLR_WX, + NULL, NULL, 0); + if (ret) { + dev_err(dev, "failed to init, ret = %d\n", ret); + return ret; + } + + hisi_gpio->chip.direction_output = hisi_gpio_dir_out; + hisi_gpio->chip.direction_input = hisi_gpio_dir_in; + hisi_gpio->chip.get_direction = hisi_gpio_get_dir; + hisi_gpio->chip.set_config = hisi_gpio_set_config; + hisi_gpio->chip.ngpio = hisi_gpio->line_num; + hisi_gpio->chip.base = -1; + + if (hisi_gpio->irq > 0) + hisi_gpio_init_irq(hisi_gpio); + + ret = devm_gpiochip_add_data(dev, &hisi_gpio->chip, hisi_gpio); + if (ret) { + dev_err(dev, "failed to register gpiochip, ret = %d\n", ret); + return ret; + } + + return 0; +} + +static struct platform_driver hisi_gpio_driver = { + .driver = { + .name = HISI_GPIO_DRIVER_NAME, + .acpi_match_table = hisi_gpio_acpi_match, + }, + .probe = hisi_gpio_probe, +}; + +module_platform_driver(hisi_gpio_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Luo Jiaxing "); +MODULE_DESCRIPTION("HiSilicon GPIO controller driver"); +MODULE_ALIAS("platform:" HISI_GPIO_DRIVER_NAME); diff --git a/drivers/gpio/gpio-max7301.c b/drivers/gpio/gpio-max7301.c index 05813fbf3daf25f4aeb6ea0233f13c13dd95ad73..647dfbbc4e1cf44ac989a1562e2c2f711fca5d36 100644 --- a/drivers/gpio/gpio-max7301.c +++ b/drivers/gpio/gpio-max7301.c @@ -25,7 +25,7 @@ static int max7301_spi_write(struct device *dev, unsigned int reg, struct spi_device *spi = to_spi_device(dev); u16 word = ((reg & 0x7F) << 8) | (val & 0xFF); - return spi_write(spi, (const u8 *)&word, sizeof(word)); + return spi_write_then_read(spi, &word, sizeof(word), NULL, 0); } /* A read from the MAX7301 means two transfers; here, one message each */ @@ -37,14 +37,8 @@ static int max7301_spi_read(struct device *dev, unsigned int reg) struct spi_device *spi = to_spi_device(dev); word = 0x8000 | (reg << 8); - ret = spi_write(spi, (const u8 *)&word, sizeof(word)); - if (ret) - return ret; - /* - * This relies on the fact, that a transfer with NULL tx_buf shifts out - * zero bytes (=NOOP for MAX7301) - */ - ret = spi_read(spi, (u8 *)&word, sizeof(word)); + ret = spi_write_then_read(spi, &word, sizeof(word), &word, + sizeof(word)); if (ret) return ret; return word & 0xff; diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c index 538bce4b5b4271b5d830623840c5e71aa7527017..78254ed93206b19f648863390ca4a389e290dee9 100644 --- a/drivers/gpio/gpio-max77620.c +++ b/drivers/gpio/gpio-max77620.c @@ -163,13 +163,13 @@ static int max77620_gpio_set_debounce(struct max77620_gpio *mgpio, case 0: val = MAX77620_CNFG_GPIO_DBNC_None; break; - case 1 ... 8: + case 1 ... 8000: val = MAX77620_CNFG_GPIO_DBNC_8ms; break; - case 9 ... 16: + case 8001 ... 16000: val = MAX77620_CNFG_GPIO_DBNC_16ms; break; - case 17 ... 32: + case 16001 ... 32000: val = MAX77620_CNFG_GPIO_DBNC_32ms; break; default: diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c index d66b7a768ecd264d5872997e6acf8715344640e8..945bd13e5e7918c014a1d44792c0d67393f50629 100644 --- a/drivers/gpio/gpio-mockup.c +++ b/drivers/gpio/gpio-mockup.c @@ -32,8 +32,8 @@ #define gpio_mockup_err(...) pr_err(GPIO_MOCKUP_NAME ": " __VA_ARGS__) enum { - GPIO_MOCKUP_DIR_OUT = 0, - GPIO_MOCKUP_DIR_IN = 1, + GPIO_MOCKUP_DIR_IN = 0, + GPIO_MOCKUP_DIR_OUT = 1, }; /* @@ -135,7 +135,7 @@ static int gpio_mockup_get_direction(struct gpio_chip *gc, unsigned int offset) { struct gpio_mockup_chip *chip = gpiochip_get_data(gc); - return chip->lines[offset].dir; + return !chip->lines[offset].dir; } static int gpio_mockup_to_irq(struct gpio_chip *gc, unsigned int offset) diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c index c8673a5d941223c0b27e3a4062f2d7685e0ed178..3f10f9599f2cb6b99b258d1817fe51b847322bb5 100644 --- a/drivers/gpio/gpio-mpc8xxx.c +++ b/drivers/gpio/gpio-mpc8xxx.c @@ -348,7 +348,8 @@ static int mpc8xxx_probe(struct platform_device *pdev) * It's assumed that only a single type of gpio controller is available * on the current machine, so overwriting global data is fine. */ - mpc8xxx_irq_chip.irq_set_type = devtype->irq_set_type; + if (devtype->irq_set_type) + mpc8xxx_irq_chip.irq_set_type = devtype->irq_set_type; if (devtype->gpio_dir_out) gc->direction_output = devtype->gpio_dir_out; diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c index d72af6f6cdbda0870266b7531c878f7a2f954968..74401e0adb29ce044c3534745e5f0218ab563965 100644 --- a/drivers/gpio/gpio-mt7621.c +++ b/drivers/gpio/gpio-mt7621.c @@ -30,6 +30,7 @@ #define GPIO_REG_EDGE 0xA0 struct mtk_gc { + struct irq_chip irq_chip; struct gpio_chip chip; spinlock_t lock; int bank; @@ -189,13 +190,6 @@ mediatek_gpio_irq_type(struct irq_data *d, unsigned int type) return 0; } -static struct irq_chip mediatek_gpio_irq_chip = { - .irq_unmask = mediatek_gpio_irq_unmask, - .irq_mask = mediatek_gpio_irq_mask, - .irq_mask_ack = mediatek_gpio_irq_mask, - .irq_set_type = mediatek_gpio_irq_type, -}; - static int mediatek_gpio_xlate(struct gpio_chip *chip, const struct of_phandle_args *spec, u32 *flags) @@ -244,6 +238,8 @@ mediatek_gpio_bank_probe(struct device *dev, rg->chip.of_xlate = mediatek_gpio_xlate; rg->chip.label = devm_kasprintf(dev, GFP_KERNEL, "%s-bank%d", dev_name(dev), bank); + if (!rg->chip.label) + return -ENOMEM; ret = devm_gpiochip_add_data(dev, &rg->chip, mtk); if (ret < 0) { @@ -252,6 +248,13 @@ mediatek_gpio_bank_probe(struct device *dev, return ret; } + rg->irq_chip.name = dev_name(dev); + rg->irq_chip.parent_device = dev; + rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask; + rg->irq_chip.irq_mask = mediatek_gpio_irq_mask; + rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask; + rg->irq_chip.irq_set_type = mediatek_gpio_irq_type; + if (mtk->gpio_irq) { /* * Manually request the irq here instead of passing @@ -268,14 +271,14 @@ mediatek_gpio_bank_probe(struct device *dev, return ret; } - ret = gpiochip_irqchip_add(&rg->chip, &mediatek_gpio_irq_chip, + ret = gpiochip_irqchip_add(&rg->chip, &rg->irq_chip, 0, handle_simple_irq, IRQ_TYPE_NONE); if (ret) { dev_err(dev, "failed to add gpiochip_irqchip\n"); return ret; } - gpiochip_set_chained_irqchip(&rg->chip, &mediatek_gpio_irq_chip, + gpiochip_set_chained_irqchip(&rg->chip, &rg->irq_chip, mtk->gpio_irq, NULL); } @@ -295,6 +298,7 @@ mediatek_gpio_probe(struct platform_device *pdev) struct device_node *np = dev->of_node; struct mtk *mtk; int i; + int ret; mtk = devm_kzalloc(dev, sizeof(*mtk), GFP_KERNEL); if (!mtk) @@ -307,10 +311,12 @@ mediatek_gpio_probe(struct platform_device *pdev) mtk->gpio_irq = irq_of_parse_and_map(np, 0); mtk->dev = dev; platform_set_drvdata(pdev, mtk); - mediatek_gpio_irq_chip.name = dev_name(dev); - for (i = 0; i < MTK_BANK_CNT; i++) - mediatek_gpio_bank_probe(dev, np, i); + for (i = 0; i < MTK_BANK_CNT; i++) { + ret = mediatek_gpio_bank_probe(dev, np, i); + if (ret) + return ret; + } return 0; } diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index 6e02148c208b2cc600d75263d1c87064db2ebc23..adc768f908f1ae1937f3d245088c590b449e574f 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c @@ -773,9 +773,6 @@ static int mvebu_pwm_probe(struct platform_device *pdev, "marvell,armada-370-gpio")) return 0; - if (IS_ERR(mvchip->clk)) - return PTR_ERR(mvchip->clk); - /* * There are only two sets of PWM configuration registers for * all the GPIO lines on those SoCs which this driver reserves @@ -786,6 +783,9 @@ static int mvebu_pwm_probe(struct platform_device *pdev, if (!res) return 0; + if (IS_ERR(mvchip->clk)) + return PTR_ERR(mvchip->clk); + /* * Use set A for lines of GPIO chip with id 0, B for GPIO chip * with id 1. Don't allow further GPIO chips to be used for PWM. diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c index 995cf0b9e0b1baa5364dc525850a665ba34c5a98..e86e61dda4b757fa04c3616aab6c7ffb14554a53 100644 --- a/drivers/gpio/gpio-mxc.c +++ b/drivers/gpio/gpio-mxc.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -437,8 +438,11 @@ static int mxc_gpio_probe(struct platform_device *pdev) /* the controller clock is optional */ port->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(port->clk)) + if (IS_ERR(port->clk)) { + if (PTR_ERR(port->clk) == -EPROBE_DEFER) + return -EPROBE_DEFER; port->clk = NULL; + } err = clk_prepare_enable(port->clk); if (err) { @@ -550,33 +554,38 @@ static void mxc_gpio_restore_regs(struct mxc_gpio_port *port) writel(port->gpio_saved_reg.dr, port->base + GPIO_DR); } -static int __maybe_unused mxc_gpio_noirq_suspend(struct device *dev) +static int mxc_gpio_syscore_suspend(void) { - struct platform_device *pdev = to_platform_device(dev); - struct mxc_gpio_port *port = platform_get_drvdata(pdev); + struct mxc_gpio_port *port; - mxc_gpio_save_regs(port); - clk_disable_unprepare(port->clk); + /* walk through all ports */ + list_for_each_entry(port, &mxc_gpio_ports, node) { + mxc_gpio_save_regs(port); + clk_disable_unprepare(port->clk); + } return 0; } -static int __maybe_unused mxc_gpio_noirq_resume(struct device *dev) +static void mxc_gpio_syscore_resume(void) { - struct platform_device *pdev = to_platform_device(dev); - struct mxc_gpio_port *port = platform_get_drvdata(pdev); + struct mxc_gpio_port *port; int ret; - ret = clk_prepare_enable(port->clk); - if (ret) - return ret; - mxc_gpio_restore_regs(port); - - return 0; + /* walk through all ports */ + list_for_each_entry(port, &mxc_gpio_ports, node) { + ret = clk_prepare_enable(port->clk); + if (ret) { + pr_err("mxc: failed to enable gpio clock %d\n", ret); + return; + } + mxc_gpio_restore_regs(port); + } } -static const struct dev_pm_ops mxc_gpio_dev_pm_ops = { - SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mxc_gpio_noirq_suspend, mxc_gpio_noirq_resume) +static struct syscore_ops mxc_gpio_syscore_ops = { + .suspend = mxc_gpio_syscore_suspend, + .resume = mxc_gpio_syscore_resume, }; static struct platform_driver mxc_gpio_driver = { @@ -584,7 +593,6 @@ static struct platform_driver mxc_gpio_driver = { .name = "gpio-mxc", .of_match_table = mxc_gpio_dt_ids, .suppress_bind_attrs = true, - .pm = &mxc_gpio_dev_pm_ops, }, .probe = mxc_gpio_probe, .id_table = mxc_gpio_devtype, @@ -592,6 +600,8 @@ static struct platform_driver mxc_gpio_driver = { static int __init gpio_mxc_init(void) { + register_syscore_ops(&mxc_gpio_syscore_ops); + return platform_driver_register(&mxc_gpio_driver); } subsys_initcall(gpio_mxc_init); diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c index df30490da820da8f4c31fce7df386e2e0efb50a8..ea874fd033a5e2da19b4d3d8e6fb02afd598180d 100644 --- a/drivers/gpio/gpio-mxs.c +++ b/drivers/gpio/gpio-mxs.c @@ -18,8 +18,6 @@ #include #include #include -/* FIXME: for gpio_get_value(), replace this by direct register read */ -#include #include #define MXS_SET 0x4 @@ -86,7 +84,7 @@ static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type) port->both_edges &= ~pin_mask; switch (type) { case IRQ_TYPE_EDGE_BOTH: - val = gpio_get_value(port->gc.base + d->hwirq); + val = port->gc.get(&port->gc, d->hwirq); if (val) edge = GPIO_INT_FALL_EDGE; else diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index e81008678a38f5c36987b37a83bac80ccaccae8a..feabac40743ee04acdcc8f62b68ccd068f1e4a96 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -343,6 +343,22 @@ static void omap_clear_gpio_debounce(struct gpio_bank *bank, unsigned offset) } } +/* + * Off mode wake-up capable GPIOs in bank(s) that are in the wakeup domain. + * See TRM section for GPIO for "Wake-Up Generation" for the list of GPIOs + * in wakeup domain. If bank->non_wakeup_gpios is not configured, assume none + * are capable waking up the system from off mode. + */ +static bool omap_gpio_is_off_wakeup_capable(struct gpio_bank *bank, u32 gpio_mask) +{ + u32 no_wake = bank->non_wakeup_gpios; + + if (no_wake) + return !!(~no_wake & gpio_mask); + + return false; +} + static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio, unsigned trigger) { @@ -374,13 +390,7 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio, } /* This part needs to be executed always for OMAP{34xx, 44xx} */ - if (!bank->regs->irqctrl) { - /* On omap24xx proceed only when valid GPIO bit is set */ - if (bank->non_wakeup_gpios) { - if (!(bank->non_wakeup_gpios & gpio_bit)) - goto exit; - } - + if (!bank->regs->irqctrl && !omap_gpio_is_off_wakeup_capable(bank, gpio)) { /* * Log the edge gpio and manually trigger the IRQ * after resume if the input level changes @@ -393,7 +403,6 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio, bank->enabled_non_wakeup_gpios &= ~gpio_bit; } -exit: bank->level_mask = readl_relaxed(bank->base + bank->regs->leveldetect0) | readl_relaxed(bank->base + bank->regs->leveldetect1); @@ -828,9 +837,9 @@ static void omap_gpio_irq_shutdown(struct irq_data *d) raw_spin_lock_irqsave(&bank->lock, flags); bank->irq_usage &= ~(BIT(offset)); - omap_set_gpio_irqenable(bank, offset, 0); - omap_clear_gpio_irqstatus(bank, offset); omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); + omap_clear_gpio_irqstatus(bank, offset); + omap_set_gpio_irqenable(bank, offset, 0); if (!LINE_USED(bank->mod_usage, offset)) omap_clear_gpio_debounce(bank, offset); omap_disable_gpio_module(bank, offset); @@ -872,8 +881,8 @@ static void omap_gpio_mask_irq(struct irq_data *d) unsigned long flags; raw_spin_lock_irqsave(&bank->lock, flags); - omap_set_gpio_irqenable(bank, offset, 0); omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); + omap_set_gpio_irqenable(bank, offset, 0); raw_spin_unlock_irqrestore(&bank->lock, flags); } @@ -885,17 +894,20 @@ static void omap_gpio_unmask_irq(struct irq_data *d) unsigned long flags; raw_spin_lock_irqsave(&bank->lock, flags); - if (trigger) - omap_set_gpio_triggering(bank, offset, trigger); + omap_set_gpio_irqenable(bank, offset, 1); - /* For level-triggered GPIOs, the clearing must be done after - * the HW source is cleared, thus after the handler has run */ - if (bank->level_mask & BIT(offset)) { - omap_set_gpio_irqenable(bank, offset, 0); + /* + * For level-triggered GPIOs, clearing must be done after the source + * is cleared, thus after the handler has run. OMAP4 needs this done + * after enabing the interrupt to clear the wakeup status. + */ + if (bank->regs->leveldetect0 && bank->regs->wkup_en && + trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) omap_clear_gpio_irqstatus(bank, offset); - } - omap_set_gpio_irqenable(bank, offset, 1); + if (trigger) + omap_set_gpio_triggering(bank, offset, trigger); + raw_spin_unlock_irqrestore(&bank->lock, flags); } @@ -1676,6 +1688,8 @@ static struct omap_gpio_reg_offs omap4_gpio_regs = { .clr_dataout = OMAP4_GPIO_CLEARDATAOUT, .irqstatus = OMAP4_GPIO_IRQSTATUS0, .irqstatus2 = OMAP4_GPIO_IRQSTATUS1, + .irqstatus_raw0 = OMAP4_GPIO_IRQSTATUSRAW0, + .irqstatus_raw1 = OMAP4_GPIO_IRQSTATUSRAW1, .irqenable = OMAP4_GPIO_IRQSTATUSSET0, .irqenable2 = OMAP4_GPIO_IRQSTATUSSET1, .set_irqenable = OMAP4_GPIO_IRQSTATUSSET0, diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 023a32cfac42da79789af9026f394eb1fe4a5d5c..0232c25a158645f588d2d9826ef1e2a36311f605 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -58,7 +58,7 @@ #define PCA_GPIO_MASK 0x00FF #define PCAL_GPIO_MASK 0x1f -#define PCAL_PINCTRL_MASK 0xe0 +#define PCAL_PINCTRL_MASK 0x60 #define PCA_INT 0x0100 #define PCA_PCAL 0x0200 @@ -543,7 +543,8 @@ static int pca953x_irq_set_type(struct irq_data *d, unsigned int type) static void pca953x_irq_shutdown(struct irq_data *d) { - struct pca953x_chip *chip = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct pca953x_chip *chip = gpiochip_get_data(gc); u8 mask = 1 << (d->hwirq % BANK_SZ); chip->irq_trig_raise[d->hwirq / BANK_SZ] &= ~mask; diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c index adf72dda25a2b543e32e6e2aee1d2f4d1b82d352..68a35b65925aca762ca349f0f54704b306b72190 100644 --- a/drivers/gpio/gpio-pcf857x.c +++ b/drivers/gpio/gpio-pcf857x.c @@ -84,6 +84,7 @@ MODULE_DEVICE_TABLE(of, pcf857x_of_table); */ struct pcf857x { struct gpio_chip chip; + struct irq_chip irqchip; struct i2c_client *client; struct mutex lock; /* protect 'out' */ unsigned out; /* software latch */ @@ -252,18 +253,6 @@ static void pcf857x_irq_bus_sync_unlock(struct irq_data *data) mutex_unlock(&gpio->lock); } -static struct irq_chip pcf857x_irq_chip = { - .name = "pcf857x", - .irq_enable = pcf857x_irq_enable, - .irq_disable = pcf857x_irq_disable, - .irq_ack = noop, - .irq_mask = noop, - .irq_unmask = noop, - .irq_set_wake = pcf857x_irq_set_wake, - .irq_bus_lock = pcf857x_irq_bus_lock, - .irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock, -}; - /*-------------------------------------------------------------------------*/ static int pcf857x_probe(struct i2c_client *client, @@ -376,8 +365,17 @@ static int pcf857x_probe(struct i2c_client *client, /* Enable irqchip if we have an interrupt */ if (client->irq) { + gpio->irqchip.name = "pcf857x", + gpio->irqchip.irq_enable = pcf857x_irq_enable, + gpio->irqchip.irq_disable = pcf857x_irq_disable, + gpio->irqchip.irq_ack = noop, + gpio->irqchip.irq_mask = noop, + gpio->irqchip.irq_unmask = noop, + gpio->irqchip.irq_set_wake = pcf857x_irq_set_wake, + gpio->irqchip.irq_bus_lock = pcf857x_irq_bus_lock, + gpio->irqchip.irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock, status = gpiochip_irqchip_add_nested(&gpio->chip, - &pcf857x_irq_chip, + &gpio->irqchip, 0, handle_level_irq, IRQ_TYPE_NONE); if (status) { @@ -392,7 +390,7 @@ static int pcf857x_probe(struct i2c_client *client, if (status) goto fail; - gpiochip_set_nested_irqchip(&gpio->chip, &pcf857x_irq_chip, + gpiochip_set_nested_irqchip(&gpio->chip, &gpio->irqchip, client->irq); gpio->irq_parent = client->irq; } diff --git a/drivers/gpio/gpio-phytium-core.c b/drivers/gpio/gpio-phytium-core.c new file mode 100644 index 0000000000000000000000000000000000000000..8d30642d1fcd97b51718b32e07d6799e428f4eab --- /dev/null +++ b/drivers/gpio/gpio-phytium-core.c @@ -0,0 +1,364 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2019-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include + +#include "gpio-phytium-core.h" + +static int get_pin_location(struct phytium_gpio *gpio, unsigned int offset, + struct pin_loc *pl) +{ + int ret; + + if (offset < gpio->ngpio[0]) { + pl->port = 0; + pl->offset = offset; + ret = 0; + } else if (offset < (gpio->ngpio[0] + gpio->ngpio[1])) { + pl->port = 1; + pl->offset = offset - gpio->ngpio[0]; + ret = 0; + } else { + ret = -EINVAL; + } + + return ret; +} + +static void phytium_gpio_toggle_trigger(struct phytium_gpio *gpio, + unsigned int offset) +{ + struct gpio_chip *gc; + u32 pol; + int val; + + /* Only port A can provide interrupt source */ + if (offset >= gpio->ngpio[0]) + return; + + gc = &gpio->gc; + + pol = readl(gpio->regs + GPIO_INT_POLARITY); + /* Just read the current value right out of the data register */ + val = gc->get(gc, offset); + if (val) + pol &= ~BIT(offset); + else + pol |= BIT(offset); + + writel(pol, gpio->regs + GPIO_INT_POLARITY); +} + +int phytium_gpio_get(struct gpio_chip *gc, unsigned int offset) +{ + struct phytium_gpio *gpio = gpiochip_get_data(gc); + struct pin_loc loc; + void __iomem *dat; + + if (get_pin_location(gpio, offset, &loc)) + return -EINVAL; + + dat = gpio->regs + GPIO_EXT_PORTA + (loc.port * GPIO_PORT_STRIDE); + + return !!(readl(dat) & BIT(loc.offset)); +} +EXPORT_SYMBOL_GPL(phytium_gpio_get); + +void phytium_gpio_set(struct gpio_chip *gc, unsigned int offset, int value) +{ + struct phytium_gpio *gpio = gpiochip_get_data(gc); + struct pin_loc loc; + void __iomem *dr; + unsigned long flags; + u32 mask; + + if (get_pin_location(gpio, offset, &loc)) + return; + dr = gpio->regs + GPIO_SWPORTA_DR + (loc.port * GPIO_PORT_STRIDE); + + raw_spin_lock_irqsave(&gpio->lock, flags); + + if (value) + mask = readl(dr) | BIT(loc.offset); + else + mask = readl(dr) & ~BIT(loc.offset); + + writel(mask, dr); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); +} +EXPORT_SYMBOL_GPL(phytium_gpio_set); + +int phytium_gpio_direction_input(struct gpio_chip *gc, unsigned int offset) +{ + struct phytium_gpio *gpio = gpiochip_get_data(gc); + struct pin_loc loc; + unsigned long flags; + void __iomem *ddr; + + if (get_pin_location(gpio, offset, &loc)) + return -EINVAL; + ddr = gpio->regs + GPIO_SWPORTA_DDR + (loc.port * GPIO_PORT_STRIDE); + + raw_spin_lock_irqsave(&gpio->lock, flags); + + writel(readl(ddr) & ~(BIT(loc.offset)), ddr); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_gpio_direction_input); + +int phytium_gpio_direction_output(struct gpio_chip *gc, unsigned int offset, + int value) +{ + struct phytium_gpio *gpio = gpiochip_get_data(gc); + struct pin_loc loc; + unsigned long flags; + void __iomem *ddr; + + if (get_pin_location(gpio, offset, &loc)) + return -EINVAL; + ddr = gpio->regs + GPIO_SWPORTA_DDR + (loc.port * GPIO_PORT_STRIDE); + + raw_spin_lock_irqsave(&gpio->lock, flags); + + writel(readl(ddr) | BIT(loc.offset), ddr); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); + + phytium_gpio_set(gc, offset, value); + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_gpio_direction_output); + +void phytium_gpio_irq_ack(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct phytium_gpio *gpio = gpiochip_get_data(gc); + u32 val = BIT(irqd_to_hwirq(d)); + + raw_spin_lock(&gpio->lock); + + writel(val, gpio->regs + GPIO_PORTA_EOI); + + raw_spin_unlock(&gpio->lock); +} +EXPORT_SYMBOL_GPL(phytium_gpio_irq_ack); + +void phytium_gpio_irq_mask(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct phytium_gpio *gpio = gpiochip_get_data(gc); + u32 val; + + /* Only port A can provide interrupt source */ + if (irqd_to_hwirq(d) >= gpio->ngpio[0]) + return; + + raw_spin_lock(&gpio->lock); + + val = readl(gpio->regs + GPIO_INTMASK); + val |= BIT(irqd_to_hwirq(d)); + writel(val, gpio->regs + GPIO_INTMASK); + + raw_spin_unlock(&gpio->lock); +} +EXPORT_SYMBOL_GPL(phytium_gpio_irq_mask); + +void phytium_gpio_irq_unmask(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct phytium_gpio *gpio = gpiochip_get_data(gc); + u32 val; + + /* Only port A can provide interrupt source */ + if (irqd_to_hwirq(d) >= gpio->ngpio[0]) + return; + + raw_spin_lock(&gpio->lock); + + val = readl(gpio->regs + GPIO_INTMASK); + val &= ~BIT(irqd_to_hwirq(d)); + writel(val, gpio->regs + GPIO_INTMASK); + + raw_spin_unlock(&gpio->lock); +} +EXPORT_SYMBOL_GPL(phytium_gpio_irq_unmask); + +int phytium_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct phytium_gpio *gpio = gpiochip_get_data(gc); + int hwirq = irqd_to_hwirq(d); + unsigned long flags, lvl, pol; + + if (hwirq < 0 || hwirq >= gpio->ngpio[0]) + return -EINVAL; + + if ((flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) && + (flow_type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))) { + dev_err(gc->parent, + "trying to configure line %d for both level and edge detection, choose one!\n", + hwirq); + return -EINVAL; + } + + raw_spin_lock_irqsave(&gpio->lock, flags); + + lvl = readl(gpio->regs + GPIO_INTTYPE_LEVEL); + pol = readl(gpio->regs + GPIO_INT_POLARITY); + + switch (flow_type) { + case IRQ_TYPE_EDGE_BOTH: + lvl |= BIT(hwirq); + phytium_gpio_toggle_trigger(gpio, hwirq); + irq_set_handler_locked(d, handle_edge_irq); + dev_dbg(gc->parent, "line %d: IRQ on both edges\n", hwirq); + break; + case IRQ_TYPE_EDGE_RISING: + lvl |= BIT(hwirq); + pol |= BIT(hwirq); + irq_set_handler_locked(d, handle_edge_irq); + dev_dbg(gc->parent, "line %d: IRQ on RISING edge\n", hwirq); + break; + case IRQ_TYPE_EDGE_FALLING: + lvl |= BIT(hwirq); + pol &= ~BIT(hwirq); + irq_set_handler_locked(d, handle_edge_irq); + dev_dbg(gc->parent, "line %d: IRQ on FALLING edge\n", hwirq); + break; + case IRQ_TYPE_LEVEL_HIGH: + lvl &= ~BIT(hwirq); + pol |= BIT(hwirq); + irq_set_handler_locked(d, handle_level_irq); + dev_dbg(gc->parent, "line %d: IRQ on HIGH level\n", hwirq); + break; + case IRQ_TYPE_LEVEL_LOW: + lvl &= ~BIT(hwirq); + pol &= ~BIT(hwirq); + irq_set_handler_locked(d, handle_level_irq); + dev_dbg(gc->parent, "line %d: IRQ on LOW level\n", hwirq); + break; + } + + writel(lvl, gpio->regs + GPIO_INTTYPE_LEVEL); + if (flow_type != IRQ_TYPE_EDGE_BOTH) + writel(pol, gpio->regs + GPIO_INT_POLARITY); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_gpio_irq_set_type); + +void phytium_gpio_irq_enable(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct phytium_gpio *gpio = gpiochip_get_data(gc); + unsigned long flags; + u32 val; + + /* Only port A can provide interrupt source */ + if (irqd_to_hwirq(d) >= gpio->ngpio[0]) + return; + + raw_spin_lock_irqsave(&gpio->lock, flags); + + val = readl(gpio->regs + GPIO_INTEN); + val |= BIT(irqd_to_hwirq(d)); + writel(val, gpio->regs + GPIO_INTEN); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); +} +EXPORT_SYMBOL_GPL(phytium_gpio_irq_enable); + +void phytium_gpio_irq_disable(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct phytium_gpio *gpio = gpiochip_get_data(gc); + unsigned long flags; + u32 val; + + /* Only port A can provide interrupt source */ + if (irqd_to_hwirq(d) >= gpio->ngpio[0]) + return; + + raw_spin_lock_irqsave(&gpio->lock, flags); + + val = readl(gpio->regs + GPIO_INTEN); + val &= ~BIT(irqd_to_hwirq(d)); + writel(val, gpio->regs + GPIO_INTEN); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); +} +EXPORT_SYMBOL_GPL(phytium_gpio_irq_disable); + +void phytium_gpio_irq_handler(struct irq_desc *desc) +{ + struct gpio_chip *gc = irq_desc_get_handler_data(desc); + struct phytium_gpio *gpio = gpiochip_get_data(gc); + struct irq_chip *irqchip = irq_desc_get_chip(desc); + unsigned long pending; + int offset; + + chained_irq_enter(irqchip, desc); + + pending = readl(gpio->regs + GPIO_INTSTATUS); + if (pending) { + for_each_set_bit(offset, &pending, gpio->ngpio[0]) { + int gpio_irq = irq_find_mapping(gc->irq.domain, + offset); + generic_handle_irq(gpio_irq); + + if ((irq_get_trigger_type(gpio_irq) & + IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) + phytium_gpio_toggle_trigger(gpio, offset); + } + } + + chained_irq_exit(irqchip, desc); +} +EXPORT_SYMBOL_GPL(phytium_gpio_irq_handler); + +int phytium_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) +{ + struct phytium_gpio *gpio = gpiochip_get_data(gc); + struct pin_loc loc; + void __iomem *ddr; + + if (get_pin_location(gpio, offset, &loc)) + return -EINVAL; + ddr = gpio->regs + GPIO_SWPORTA_DDR + (loc.port * GPIO_PORT_STRIDE); + + return !(readl(ddr) & BIT(loc.offset)); +} +EXPORT_SYMBOL_GPL(phytium_gpio_get_direction); + +#if CONFIG_SMP +int +phytium_gpio_irq_set_affinity(struct irq_data *d, + const struct cpumask *mask_val, bool force) +{ + struct gpio_chip *chip_data = irq_data_get_irq_chip_data(d); + struct irq_chip *chip = irq_get_chip(chip_data->irq.num_parents); + struct irq_data *data = irq_get_irq_data(chip_data->irq.num_parents); + + if (chip && chip->irq_set_affinity) + return chip->irq_set_affinity(data, mask_val, force); + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(phytium_gpio_irq_set_affinity); +#endif + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Phytium GPIO Controller core"); diff --git a/drivers/gpio/gpio-phytium-core.h b/drivers/gpio/gpio-phytium-core.h new file mode 100644 index 0000000000000000000000000000000000000000..8eb978c03c37d6d7947f891360b30736a07188a6 --- /dev/null +++ b/drivers/gpio/gpio-phytium-core.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef _GPIO_PHYTIUM_H +#define _GPIO_PHYTIUM_H + +#include +#include + +#include "gpiolib.h" + +#define GPIO_SWPORTA_DR 0x00 /* WR Port A Output Data Register */ +#define GPIO_SWPORTA_DDR 0x04 /* WR Port A Data Direction Register */ +#define GPIO_EXT_PORTA 0x08 /* RO Port A Input Data Register */ +#define GPIO_SWPORTB_DR 0x0c /* WR Port B Output Data Register */ +#define GPIO_SWPORTB_DDR 0x10 /* WR Port B Data Direction Register */ +#define GPIO_EXT_PORTB 0x14 /* RO Port B Input Data Register */ + +#define GPIO_INTEN 0x18 /* WR Port A Interrput Enable Register */ +#define GPIO_INTMASK 0x1c /* WR Port A Interrupt Mask Register */ +#define GPIO_INTTYPE_LEVEL 0x20 /* WR Port A Interrupt Level Register */ +#define GPIO_INT_POLARITY 0x24 /* WR Port A Interrupt Polarity Register */ +#define GPIO_INTSTATUS 0x28 /* RO Port A Interrupt Status Register */ +#define GPIO_RAW_INTSTATUS 0x2c +/* RO Port A Raw Interrupt Status Register */ +#define GPIO_LS_SYNC 0x30 +/* WR Level-sensitive Synchronization Enable Register */ +#define GPIO_DEBOUNCE 0x34 /* WR Debounce Enable Register */ +#define GPIO_PORTA_EOI 0x38 /* WO Port A Clear Interrupt Register */ + +#define MAX_NPORTS 2 +#define NGPIO_DEFAULT 8 +#define NGPIO_MAX 32 +#define GPIO_PORT_STRIDE (GPIO_EXT_PORTB - GPIO_EXT_PORTA) + +struct pin_loc { + unsigned int port; + unsigned int offset; +}; + +#ifdef CONFIG_PM_SLEEP +struct phytium_gpio_ctx { + u32 swporta_dr; + u32 swporta_ddr; + u32 ext_porta; + u32 swportb_dr; + u32 swportb_ddr; + u32 ext_portb; + u32 inten; + u32 intmask; + u32 inttype_level; + u32 int_polarity; + u32 intstatus; + u32 raw_intstatus; + u32 ls_sync; + u32 debounce; +}; +#endif + +struct phytium_gpio { + raw_spinlock_t lock; + void __iomem *regs; + struct gpio_chip gc; + struct irq_chip irq_chip; + unsigned int ngpio[2]; + int irq[32]; +#ifdef CONFIG_PM_SLEEP + struct phytium_gpio_ctx ctx; +#endif +}; + +int phytium_gpio_get(struct gpio_chip *gc, unsigned int offset); +void phytium_gpio_set(struct gpio_chip *gc, unsigned int offset, int value); + +int phytium_gpio_get_direction(struct gpio_chip *gc, unsigned int offset); +int phytium_gpio_direction_input(struct gpio_chip *gc, unsigned int offset); +int phytium_gpio_direction_output(struct gpio_chip *gc, + unsigned int offset, int value); + +void phytium_gpio_irq_ack(struct irq_data *d); +void phytium_gpio_irq_mask(struct irq_data *d); +void phytium_gpio_irq_unmask(struct irq_data *d); +int phytium_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type); +void phytium_gpio_irq_enable(struct irq_data *d); +void phytium_gpio_irq_disable(struct irq_data *d); +void phytium_gpio_irq_handler(struct irq_desc *desc); +int phytium_gpio_irq_set_affinity(struct irq_data *d, + const struct cpumask *mask_val, bool force); +#endif diff --git a/drivers/gpio/gpio-phytium-pci.c b/drivers/gpio/gpio-phytium-pci.c new file mode 100644 index 0000000000000000000000000000000000000000..e85a170297737bba38201a71246f51a20e48ec1f --- /dev/null +++ b/drivers/gpio/gpio-phytium-pci.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "gpio-phytium-core.h" + +static int phytium_gpio_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct device *dev = &pdev->dev; + struct phytium_gpio *gpio; + struct gpio_irq_chip *girq; + int err; + + gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL); + if (!gpio) + return -ENOMEM; + + err = pcim_enable_device(pdev); + if (err) { + dev_err(dev, "Failed to enable PCI device: err %d\n", err); + goto out; + } + + err = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev)); + if (err) { + dev_err(dev, "Failed to iomap PCI device: err %d\n", err); + goto out; + } + + gpio->regs = pcim_iomap_table(pdev)[0]; + if (!gpio->regs) { + dev_err(dev, "Cannot map PCI resource\n"); + err = -ENOMEM; + goto out; + } + + err = pci_enable_msi(pdev); + if (err < 0) + goto out; + + gpio->irq[0] = pdev->irq; + if (gpio->irq < 0) + dev_warn(dev, "no irq is found.\n"); + + /* There is only one group of Pins at the moment. */ + gpio->ngpio[0] = NGPIO_MAX; + + /* irq_chip support */ + gpio->irq_chip.name = dev_name(dev); + gpio->irq_chip.irq_ack = phytium_gpio_irq_ack; + gpio->irq_chip.irq_mask = phytium_gpio_irq_mask; + gpio->irq_chip.irq_unmask = phytium_gpio_irq_unmask; + gpio->irq_chip.irq_set_type = phytium_gpio_irq_set_type; + gpio->irq_chip.irq_enable = phytium_gpio_irq_enable; + gpio->irq_chip.irq_disable = phytium_gpio_irq_disable; + + raw_spin_lock_init(&gpio->lock); + + gpio->gc.base = -1; + gpio->gc.get_direction = phytium_gpio_get_direction; + gpio->gc.direction_input = phytium_gpio_direction_input; + gpio->gc.direction_output = phytium_gpio_direction_output; + gpio->gc.get = phytium_gpio_get; + gpio->gc.set = phytium_gpio_set; + gpio->gc.ngpio = gpio->ngpio[0] + gpio->ngpio[1]; + gpio->gc.label = dev_name(dev); + gpio->gc.parent = dev; + gpio->gc.owner = THIS_MODULE; + + girq = &gpio->gc.irq; + girq->handler = handle_bad_irq; + girq->default_type = IRQ_TYPE_NONE; + + girq->num_parents = 1; + girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents, + sizeof(*girq->parents), GFP_KERNEL); + if (!girq->parents) + return -ENOMEM; + girq->parents[0] = gpio->irq[0]; + girq->parent_handler = phytium_gpio_irq_handler; + + girq->chip = &gpio->irq_chip; + + err = devm_gpiochip_add_data(dev, &gpio->gc, gpio); + if (err) + goto out; + + dev_info(dev, "Phytium PCI GPIO controller @%pa registered\n", + &gpio->regs); + + pci_set_drvdata(pdev, gpio); + +out: + return err; +} + +static const struct pci_device_id phytium_gpio_pci_ids[] = { + { PCI_DEVICE(0x1DB7, 0xDC31) }, + { 0 } +}; +MODULE_DEVICE_TABLE(pci, phytium_gpio_pci_ids); + +#ifdef CONFIG_PM_SLEEP +static int phytium_gpio_pci_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct phytium_gpio *gpio = pci_get_drvdata(pdev); + unsigned long flags; + + raw_spin_lock_irqsave(&gpio->lock, flags); + + gpio->ctx.swporta_dr = readl(gpio->regs + GPIO_SWPORTA_DR); + gpio->ctx.swporta_ddr = readl(gpio->regs + GPIO_SWPORTA_DDR); + gpio->ctx.ext_porta = readl(gpio->regs + GPIO_EXT_PORTA); + gpio->ctx.swportb_dr = readl(gpio->regs + GPIO_SWPORTB_DR); + gpio->ctx.swportb_ddr = readl(gpio->regs + GPIO_SWPORTB_DDR); + gpio->ctx.ext_portb = readl(gpio->regs + GPIO_EXT_PORTB); + + gpio->ctx.inten = readl(gpio->regs + GPIO_INTEN); + gpio->ctx.intmask = readl(gpio->regs + GPIO_INTMASK); + gpio->ctx.inttype_level = readl(gpio->regs + GPIO_INTTYPE_LEVEL); + gpio->ctx.int_polarity = readl(gpio->regs + GPIO_INT_POLARITY); + gpio->ctx.debounce = readl(gpio->regs + GPIO_DEBOUNCE); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); + + return 0; +} + +static int phytium_gpio_pci_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct phytium_gpio *gpio = pci_get_drvdata(pdev); + unsigned long flags; + + raw_spin_lock_irqsave(&gpio->lock, flags); + + writel(gpio->ctx.swporta_dr, gpio->regs + GPIO_SWPORTA_DR); + writel(gpio->ctx.swporta_ddr, gpio->regs + GPIO_SWPORTA_DDR); + writel(gpio->ctx.ext_porta, gpio->regs + GPIO_EXT_PORTA); + writel(gpio->ctx.swportb_dr, gpio->regs + GPIO_SWPORTB_DR); + writel(gpio->ctx.swportb_ddr, gpio->regs + GPIO_SWPORTB_DDR); + writel(gpio->ctx.ext_portb, gpio->regs + GPIO_EXT_PORTB); + + writel(gpio->ctx.inten, gpio->regs + GPIO_INTEN); + writel(gpio->ctx.intmask, gpio->regs + GPIO_INTMASK); + writel(gpio->ctx.inttype_level, gpio->regs + GPIO_INTTYPE_LEVEL); + writel(gpio->ctx.int_polarity, gpio->regs + GPIO_INT_POLARITY); + writel(gpio->ctx.debounce, gpio->regs + GPIO_DEBOUNCE); + + writel(0xffffffff, gpio->regs + GPIO_PORTA_EOI); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(phytium_gpio_pci_pm_ops, + phytium_gpio_pci_suspend, + phytium_gpio_pci_resume); + +static struct pci_driver phytium_gpio_pci_driver = { + .name = "gpio-phytium-pci", + .id_table = phytium_gpio_pci_ids, + .probe = phytium_gpio_pci_probe, + .driver = { + .pm = &phytium_gpio_pci_pm_ops, + }, +}; + +module_pci_driver(phytium_gpio_pci_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Cheng Quan "); +MODULE_DESCRIPTION("Phytium GPIO PCI Driver"); diff --git a/drivers/gpio/gpio-phytium-platform.c b/drivers/gpio/gpio-phytium-platform.c new file mode 100644 index 0000000000000000000000000000000000000000..546ca4cb32aee0d4cfae580ec65ad5056f77326a --- /dev/null +++ b/drivers/gpio/gpio-phytium-platform.c @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Support functions for Phytium GPIO + * + * Copyright (c) 2019-2023, Phytium Technology Co., Ltd. + * + * Derived from drivers/gpio/gpio-pl061.c + * Copyright (C) 2008, 2009 Provigent Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gpio-phytium-core.h" + +static const struct of_device_id phytium_gpio_of_match[] = { + { .compatible = "phytium,gpio", }, + { } +}; +MODULE_DEVICE_TABLE(of, phytium_gpio_of_match); + +static const struct acpi_device_id phytium_gpio_acpi_match[] = { + { "PHYT0001", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, phytium_gpio_acpi_match); + +static int phytium_gpio_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct phytium_gpio *gpio; + struct gpio_irq_chip *girq; + struct fwnode_handle *fwnode; + int err, irq_count; + + gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL); + if (!gpio) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + gpio->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(gpio->regs)) + return PTR_ERR(gpio->regs); + + if (!device_get_child_node_count(dev)) + return -ENODEV; + + device_for_each_child_node(dev, fwnode) { + int idx; + + if (fwnode_property_read_u32(fwnode, "reg", &idx) || + idx >= MAX_NPORTS) { + dev_err(dev, "missing/invalid port index\n"); + fwnode_handle_put(fwnode); + return -EINVAL; + } + + if (fwnode_property_read_u32(fwnode, "nr-gpios", + &gpio->ngpio[idx])) { + dev_info(dev, + "failed to get number of gpios for Port%c\n", + idx ? 'B' : 'A'); + gpio->ngpio[idx] = NGPIO_DEFAULT; + } + } + + /* irq_chip support */ + gpio->irq_chip.name = dev_name(dev); + gpio->irq_chip.irq_ack = phytium_gpio_irq_ack; + gpio->irq_chip.irq_mask = phytium_gpio_irq_mask; + gpio->irq_chip.irq_unmask = phytium_gpio_irq_unmask; + gpio->irq_chip.irq_set_type = phytium_gpio_irq_set_type; + gpio->irq_chip.irq_enable = phytium_gpio_irq_enable; + gpio->irq_chip.irq_disable = phytium_gpio_irq_disable; +#ifdef CONFIG_SMP + gpio->irq_chip.irq_set_affinity = phytium_gpio_irq_set_affinity; +#endif + raw_spin_lock_init(&gpio->lock); + + gpio->gc.base = -1; + gpio->gc.get_direction = phytium_gpio_get_direction; + gpio->gc.direction_input = phytium_gpio_direction_input; + gpio->gc.direction_output = phytium_gpio_direction_output; + gpio->gc.get = phytium_gpio_get; + gpio->gc.set = phytium_gpio_set; + gpio->gc.ngpio = gpio->ngpio[0] + gpio->ngpio[1]; + gpio->gc.label = dev_name(dev); + gpio->gc.parent = dev; + gpio->gc.owner = THIS_MODULE; + + girq = &gpio->gc.irq; + girq->handler = handle_bad_irq; + girq->default_type = IRQ_TYPE_NONE; + + for (irq_count = 0; irq_count < gpio->ngpio[0]; irq_count++) { + gpio->irq[irq_count] = -ENXIO; + gpio->irq[irq_count] = platform_get_irq(pdev, irq_count); + if (gpio->irq[irq_count] < 0) { + dev_warn(dev, "no irq is found.\n"); + break; + } + }; + + girq->num_parents = irq_count; + girq->parents = gpio->irq; + girq->parent_handler = phytium_gpio_irq_handler; + + girq->chip = &gpio->irq_chip; + + err = devm_gpiochip_add_data(dev, &gpio->gc, gpio); + if (err) + return err; + + platform_set_drvdata(pdev, gpio); + dev_info(dev, "Phytium GPIO controller @%pa registered\n", + &res->start); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int phytium_gpio_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct phytium_gpio *gpio = platform_get_drvdata(pdev); + unsigned long flags; + + raw_spin_lock_irqsave(&gpio->lock, flags); + + gpio->ctx.swporta_dr = readl(gpio->regs + GPIO_SWPORTA_DR); + gpio->ctx.swporta_ddr = readl(gpio->regs + GPIO_SWPORTA_DDR); + gpio->ctx.ext_porta = readl(gpio->regs + GPIO_EXT_PORTA); + gpio->ctx.swportb_dr = readl(gpio->regs + GPIO_SWPORTB_DR); + gpio->ctx.swportb_ddr = readl(gpio->regs + GPIO_SWPORTB_DDR); + gpio->ctx.ext_portb = readl(gpio->regs + GPIO_EXT_PORTB); + + gpio->ctx.inten = readl(gpio->regs + GPIO_INTEN); + gpio->ctx.intmask = readl(gpio->regs + GPIO_INTMASK); + gpio->ctx.inttype_level = readl(gpio->regs + GPIO_INTTYPE_LEVEL); + gpio->ctx.int_polarity = readl(gpio->regs + GPIO_INT_POLARITY); + gpio->ctx.debounce = readl(gpio->regs + GPIO_DEBOUNCE); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); + + return 0; +} + +static int phytium_gpio_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct phytium_gpio *gpio = platform_get_drvdata(pdev); + unsigned long flags; + + raw_spin_lock_irqsave(&gpio->lock, flags); + + writel(gpio->ctx.swporta_dr, gpio->regs + GPIO_SWPORTA_DR); + writel(gpio->ctx.swporta_ddr, gpio->regs + GPIO_SWPORTA_DDR); + writel(gpio->ctx.ext_porta, gpio->regs + GPIO_EXT_PORTA); + writel(gpio->ctx.swportb_dr, gpio->regs + GPIO_SWPORTB_DR); + writel(gpio->ctx.swportb_ddr, gpio->regs + GPIO_SWPORTB_DDR); + writel(gpio->ctx.ext_portb, gpio->regs + GPIO_EXT_PORTB); + + writel(gpio->ctx.inten, gpio->regs + GPIO_INTEN); + writel(gpio->ctx.intmask, gpio->regs + GPIO_INTMASK); + writel(gpio->ctx.inttype_level, gpio->regs + GPIO_INTTYPE_LEVEL); + writel(gpio->ctx.int_polarity, gpio->regs + GPIO_INT_POLARITY); + writel(gpio->ctx.debounce, gpio->regs + GPIO_DEBOUNCE); + + writel(0xffffffff, gpio->regs + GPIO_PORTA_EOI); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(phytium_gpio_pm_ops, phytium_gpio_suspend, + phytium_gpio_resume); + +static struct platform_driver phytium_gpio_driver = { + .driver = { + .name = "gpio-phytium-platform", + .pm = &phytium_gpio_pm_ops, + .of_match_table = of_match_ptr(phytium_gpio_of_match), + .acpi_match_table = ACPI_PTR(phytium_gpio_acpi_match), + }, + .probe = phytium_gpio_probe, +}; + +module_platform_driver(phytium_gpio_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Chen Baozi "); +MODULE_DESCRIPTION("Phytium GPIO driver"); diff --git a/drivers/gpio/gpio-phytium-sgpio.c b/drivers/gpio/gpio-phytium-sgpio.c new file mode 100644 index 0000000000000000000000000000000000000000..4ec30f3f01ab1039f31b3460ddbcead53c2ebc15 --- /dev/null +++ b/drivers/gpio/gpio-phytium-sgpio.c @@ -0,0 +1,308 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium SGPIO Driver + * + * Copyright (c) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SGPIO_CTL0_REG 0x00 +#define SGPIO_CTL0_REG_ENABLE BIT(0) +#define SGPIO_CTL0_REG_RX_DISABLE BIT(1) +#define SGPIO_CTL0_REG_L3_L0 GENMASK(11, 8) +#define SGPIO_CTL0_REG_CLK_DIV_NUM GENMASK(31, 12) +#define SGPIO_CTL1_REG 0x04 +#define SGPIO_CTL1_REG_READY BIT(0) +#define SGPIO_CTL1_REG_W_UPDATA BIT(1) +#define SGPIO_CTL1_REG_OP_MODE BIT(2) +#define SGPIO_CTL1_REG_OP_STATE BIT(3) +#define SGPIO_CTL1_REG_BIT_NUM GENMASK(14, 8) +#define SGPIO_CTL1_REG_INTERVAL_TIMER GENMASK(31, 16) +#define SGPIO_SOFT_RESET_REG 0x08 +#define SGPIO_SOFT_RESET_REG_MASK BIT(0) +#define SGPIO_IRQ_REG 0x0c +#define SGPIO_IRQ_REG_MASK BIT(0) +#define SGPIO_IRQ_M_REG 0x10 +#define SGPIO_IRQ_M_REG_MASK BIT(0) +#define SGPIO_WDATA0_REG 0x14 +#define SGPIO_WDATA_REG(x) (SGPIO_WDATA0_REG + (x) * 4) +#define SGPIO_RDATA0_REG 0x24 +#define SGPIO_RDATA_REG(x) (SGPIO_RDATA0_REG + (x) * 4) + +#define DEFAULT_L3_L0 0 + +#define GPIO_GROUP(x) ((x) >> 6) +#define GPIO_OFFSET(x) ((x) & GENMASK(5, 0)) +#define GPIO_BIT(x) BIT(GPIO_OFFSET(x) >> 1) + +struct phytium_sgpio { + struct gpio_chip gc; + void __iomem *regs; + unsigned int ngpios; + struct clk *pclk; + + struct mutex lock; + struct completion completion; +}; + +static bool phytium_sgpio_is_input(unsigned int offset) +{ + return !(offset % 2); +} + +static int sgpio_set_value(struct gpio_chip *gc, unsigned int offset, int val) +{ + struct phytium_sgpio *gpio = gpiochip_get_data(gc); + u32 reg; + int rc = 0; + + if (phytium_sgpio_is_input(offset)) + return -EINVAL; + + reinit_completion(&gpio->completion); + + /* + * Since this is an output, read the cached value from rdata, + * then update value. + */ + reg = readl(gpio->regs + SGPIO_RDATA_REG(GPIO_GROUP(offset))); + if (val) + reg |= GPIO_BIT(offset); + else + reg &= GPIO_BIT(offset); + writel(reg, gpio->regs + SGPIO_WDATA_REG(GPIO_GROUP(offset))); + + /* Start transmission and wait for completion */ + writel(readl(gpio->regs + SGPIO_CTL1_REG) | SGPIO_CTL1_REG_W_UPDATA, + gpio->regs + SGPIO_CTL1_REG); + if (!wait_for_completion_timeout(&gpio->completion, + msecs_to_jiffies(1000))) + rc = -EINVAL; + + return rc; +} + +static int phytium_sgpio_direction_input(struct gpio_chip *gc, + unsigned int offset) +{ + return phytium_sgpio_is_input(offset) ? 0 : -EINVAL; +} + +static int phytium_sgpio_direction_output(struct gpio_chip *gc, + unsigned int offset, int val) +{ + struct phytium_sgpio *gpio = gpiochip_get_data(gc); + int rc; + + mutex_lock(&gpio->lock); + + /* + * No special action is required for setting the direction; we'll + * error-out in sgpio_set_value if this isn't an output GPIO + */ + rc = sgpio_set_value(&gpio->gc, offset, val); + + mutex_unlock(&gpio->lock); + + return rc; +} + +static int phytium_sgpio_get_direction(struct gpio_chip *gc, + unsigned int offset) +{ + return !!phytium_sgpio_is_input(offset); +} + +static int phytium_sgpio_get(struct gpio_chip *gc, unsigned int offset) +{ + struct phytium_sgpio *gpio = gpiochip_get_data(gc); + int rc = 0; + u32 val, ctl0; + + mutex_lock(&gpio->lock); + + if (!phytium_sgpio_is_input(offset)) { + val = readl(gpio->regs + SGPIO_WDATA_REG(GPIO_GROUP(offset))); + rc = !!(val & GPIO_BIT(offset)); + mutex_unlock(&gpio->lock); + return rc; + } + + reinit_completion(&gpio->completion); + + /* Enable Rx */ + ctl0 = readl(gpio->regs + SGPIO_CTL0_REG); + writel(ctl0 & ~SGPIO_CTL0_REG_RX_DISABLE, gpio->regs + SGPIO_CTL0_REG); + + /* Start reading transaction and wait for completion */ + writel(readl(gpio->regs + SGPIO_CTL1_REG) | SGPIO_CTL1_REG_W_UPDATA, + gpio->regs + SGPIO_CTL1_REG); + if (!wait_for_completion_timeout(&gpio->completion, + msecs_to_jiffies(1000))) { + rc = -EINVAL; + goto err; + } + + val = readl(gpio->regs + SGPIO_RDATA_REG(GPIO_GROUP(offset))); + rc = !!(val & GPIO_BIT(offset)); + +err: + /* Disalbe Rx to hold the value */ + writel(ctl0 | SGPIO_CTL0_REG_RX_DISABLE, gpio->regs + SGPIO_CTL0_REG); + mutex_unlock(&gpio->lock); + + return rc; +} + +static void phytium_sgpio_set(struct gpio_chip *gc, + unsigned int offset, int val) +{ + struct phytium_sgpio *gpio = gpiochip_get_data(gc); + + mutex_lock(&gpio->lock); + + sgpio_set_value(gc, offset, val); + + mutex_unlock(&gpio->lock); +} + +static irqreturn_t phytium_sgpio_irq_handler(int irq, void *data) +{ + struct phytium_sgpio *gpio = data; + + if (!readl(gpio->regs + SGPIO_IRQ_REG)) + return IRQ_NONE; + + /* Clear the interrupt */ + writel(0, gpio->regs + SGPIO_IRQ_REG); + + /* Check if tx/rx has been done */ + if (!(readl(gpio->regs + SGPIO_CTL1_REG) & SGPIO_CTL1_REG_OP_STATE)) + complete(&gpio->completion); + + return IRQ_HANDLED; +} + +static int phytium_sgpio_probe(struct platform_device *pdev) +{ + u32 pclk_freq, sclk_freq, clk_div; + struct phytium_sgpio *gpio; + struct resource *res; + struct device *dev = &pdev->dev; + int rc; + + gpio = devm_kzalloc(dev, sizeof(*gpio), GFP_KERNEL); + if (!gpio) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + gpio->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(gpio->regs)) + return PTR_ERR(gpio->regs); + + if (devm_request_irq(dev, platform_get_irq(pdev, 0), + phytium_sgpio_irq_handler, + IRQF_SHARED, dev_name(dev), gpio)) { + dev_err(dev, "failed to request IRQ\n"); + return -ENOENT; + } + + rc = fwnode_property_read_u32(dev_fwnode(dev), "ngpios", &gpio->ngpios); + if (rc < 0) { + dev_err(dev, "Could not read ngpios property\n"); + return -EINVAL; + } else if (gpio->ngpios % 32) { + dev_err(&pdev->dev, "Number of GPIOs not multiple of 32: %d\n", + gpio->ngpios); + return -EINVAL; + } + + rc = fwnode_property_read_u32(dev_fwnode(dev), + "bus-frequency", &sclk_freq); + if (rc < 0) { + dev_err(dev, "Could not read bus-frequency property\n"); + return -EINVAL; + } + + gpio->pclk = devm_clk_get(dev, NULL); + if (IS_ERR(gpio->pclk)) { + dev_err(dev, "Could not get the APB clock property\n"); + return PTR_ERR(gpio->pclk); + } + rc = clk_prepare_enable(gpio->pclk); + if (rc) { + dev_err(dev, "failed to enable pclk: %d\n", rc); + return rc; + } + pclk_freq = clk_get_rate(gpio->pclk); + + /* + * From the datasheet: + * (pclk / 2) / (clk_div + 1) = sclk + */ + if (sclk_freq == 0) { + dev_err(dev, "SCLK should not be 0\n"); + return -EINVAL; + } + + clk_div = (pclk_freq / (sclk_freq * 2)) - 1; + if (clk_div > (1 << 20) - 1) { + dev_err(dev, "clk_div is overflow\n"); + return -EINVAL; + } + + writel(FIELD_PREP(SGPIO_CTL0_REG_CLK_DIV_NUM, clk_div) | + FIELD_PREP(SGPIO_CTL0_REG_L3_L0, DEFAULT_L3_L0) | + SGPIO_CTL0_REG_RX_DISABLE | SGPIO_CTL0_REG_ENABLE, + gpio->regs + SGPIO_CTL0_REG); + + writel(FIELD_PREP(SGPIO_CTL1_REG_BIT_NUM, gpio->ngpios) | + SGPIO_CTL1_REG_READY, gpio->regs + SGPIO_CTL1_REG); + + mutex_init(&gpio->lock); + init_completion(&gpio->completion); + platform_set_drvdata(pdev, gpio); + + gpio->gc.parent = dev; + gpio->gc.base = -1; + gpio->gc.ngpio = gpio->ngpios * 2; + gpio->gc.label = dev_name(dev); + gpio->gc.direction_input = phytium_sgpio_direction_input; + gpio->gc.direction_output = phytium_sgpio_direction_output; + gpio->gc.get_direction = phytium_sgpio_get_direction; + gpio->gc.get = phytium_sgpio_get; + gpio->gc.set = phytium_sgpio_set; + + return devm_gpiochip_add_data(dev, &gpio->gc, gpio); +} + +static const struct of_device_id phytium_sgpio_of_match[] = { + { .compatible = "phytium,sgpio", }, + { } +}; +MODULE_DEVICE_TABLE(of, phytium_sgpio_of_match); + +static struct platform_driver phytium_sgpio_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = of_match_ptr(phytium_sgpio_of_match), + }, + .probe = phytium_sgpio_probe, +}; +module_platform_driver(phytium_sgpio_driver); + +MODULE_AUTHOR("Chen Baozi "); +MODULE_DESCRIPTION("Phytium SGPIO driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c index 2afd9de84a0d051fed6ca12e7c97e0e266e314fd..dc42571e6fdc8619bd2a01016c33c51f621bc37d 100644 --- a/drivers/gpio/gpio-pl061.c +++ b/drivers/gpio/gpio-pl061.c @@ -54,6 +54,7 @@ struct pl061 { void __iomem *base; struct gpio_chip gc; + struct irq_chip irq_chip; int parent_irq; #ifdef CONFIG_PM @@ -281,15 +282,6 @@ static int pl061_irq_set_wake(struct irq_data *d, unsigned int state) return irq_set_irq_wake(pl061->parent_irq, state); } -static struct irq_chip pl061_irqchip = { - .name = "pl061", - .irq_ack = pl061_irq_ack, - .irq_mask = pl061_irq_mask, - .irq_unmask = pl061_irq_unmask, - .irq_set_type = pl061_irq_type, - .irq_set_wake = pl061_irq_set_wake, -}; - static int pl061_probe(struct amba_device *adev, const struct amba_id *id) { struct device *dev = &adev->dev; @@ -328,6 +320,13 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id) /* * irq_chip support */ + pl061->irq_chip.name = dev_name(dev); + pl061->irq_chip.irq_ack = pl061_irq_ack; + pl061->irq_chip.irq_mask = pl061_irq_mask; + pl061->irq_chip.irq_unmask = pl061_irq_unmask; + pl061->irq_chip.irq_set_type = pl061_irq_type; + pl061->irq_chip.irq_set_wake = pl061_irq_set_wake; + writeb(0, pl061->base + GPIOIE); /* disable irqs */ irq = adev->irq[0]; if (irq < 0) { @@ -336,14 +335,14 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id) } pl061->parent_irq = irq; - ret = gpiochip_irqchip_add(&pl061->gc, &pl061_irqchip, + ret = gpiochip_irqchip_add(&pl061->gc, &pl061->irq_chip, 0, handle_bad_irq, IRQ_TYPE_NONE); if (ret) { dev_info(&adev->dev, "could not add irqchip\n"); return ret; } - gpiochip_set_chained_irqchip(&pl061->gc, &pl061_irqchip, + gpiochip_set_chained_irqchip(&pl061->gc, &pl061->irq_chip, irq, pl061_irq_handler); amba_set_drvdata(adev, pl061); diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c index c18712dabf93d359a9fc34ed4431d1791dc36aef..bcc6be4a5cb2ed38c000c6e15a99e91692c81c2c 100644 --- a/drivers/gpio/gpio-pxa.c +++ b/drivers/gpio/gpio-pxa.c @@ -245,6 +245,7 @@ static bool pxa_gpio_has_pinctrl(void) { switch (gpio_type) { case PXA3XX_GPIO: + case MMP2_GPIO: return false; default: @@ -268,8 +269,8 @@ static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset) if (pxa_gpio_has_pinctrl()) { ret = pinctrl_gpio_direction_input(chip->base + offset); - if (!ret) - return 0; + if (ret) + return ret; } spin_lock_irqsave(&gpio_lock, flags); @@ -776,6 +777,9 @@ static int pxa_gpio_suspend(void) struct pxa_gpio_bank *c; int gpio; + if (!pchip) + return 0; + for_each_gpio_bank(gpio, c, pchip) { c->saved_gplr = readl_relaxed(c->regbase + GPLR_OFFSET); c->saved_gpdr = readl_relaxed(c->regbase + GPDR_OFFSET); @@ -794,6 +798,9 @@ static void pxa_gpio_resume(void) struct pxa_gpio_bank *c; int gpio; + if (!pchip) + return; + for_each_gpio_bank(gpio, c, pchip) { /* restore level with set/clear */ writel_relaxed(c->saved_gplr, c->regbase + GPSR_OFFSET); diff --git a/drivers/gpio/gpio-raspberrypi-exp.c b/drivers/gpio/gpio-raspberrypi-exp.c index d6d36d537e3736955881944cbf2aae53326841bf..b77ea16ffa031e4cbe291d8f5d6463338eaed536 100644 --- a/drivers/gpio/gpio-raspberrypi-exp.c +++ b/drivers/gpio/gpio-raspberrypi-exp.c @@ -206,6 +206,7 @@ static int rpi_exp_gpio_probe(struct platform_device *pdev) } fw = rpi_firmware_get(fw_node); + of_node_put(fw_node); if (!fw) return -EPROBE_DEFER; diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c index 87c18a544513768ce03aa38464a77125e6b1c867..7f3da34c78746b7549d8d394337e408e1c2a9768 100644 --- a/drivers/gpio/gpio-syscon.c +++ b/drivers/gpio/gpio-syscon.c @@ -122,7 +122,7 @@ static int syscon_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int val) BIT(offs % SYSCON_REG_BITS)); } - priv->data->set(chip, offset, val); + chip->set(chip, offset, val); return 0; } diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c index d4ad6d0e02a25be60755b39b600c16cbbcd43981..a9cb5571de5459760e726fa75f2ffd0b3bac3310 100644 --- a/drivers/gpio/gpio-vf610.c +++ b/drivers/gpio/gpio-vf610.c @@ -37,6 +37,7 @@ struct fsl_gpio_soc_data { struct vf610_gpio_port { struct gpio_chip gc; + struct irq_chip ic; void __iomem *base; void __iomem *gpio_base; const struct fsl_gpio_soc_data *sdata; @@ -66,8 +67,6 @@ struct vf610_gpio_port { #define PORT_INT_EITHER_EDGE 0xb #define PORT_INT_LOGIC_ONE 0xc -static struct irq_chip vf610_gpio_irq_chip; - static const struct fsl_gpio_soc_data imx_data = { .have_paddr = true, }; @@ -243,15 +242,6 @@ static int vf610_gpio_irq_set_wake(struct irq_data *d, u32 enable) return 0; } -static struct irq_chip vf610_gpio_irq_chip = { - .name = "gpio-vf610", - .irq_ack = vf610_gpio_irq_ack, - .irq_mask = vf610_gpio_irq_mask, - .irq_unmask = vf610_gpio_irq_unmask, - .irq_set_type = vf610_gpio_irq_set_type, - .irq_set_wake = vf610_gpio_irq_set_wake, -}; - static int vf610_gpio_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -259,6 +249,8 @@ static int vf610_gpio_probe(struct platform_device *pdev) struct vf610_gpio_port *port; struct resource *iores; struct gpio_chip *gc; + struct irq_chip *ic; + int i; int ret; port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL); @@ -294,21 +286,32 @@ static int vf610_gpio_probe(struct platform_device *pdev) gc->direction_output = vf610_gpio_direction_output; gc->set = vf610_gpio_set; + ic = &port->ic; + ic->name = "gpio-vf610"; + ic->irq_ack = vf610_gpio_irq_ack; + ic->irq_mask = vf610_gpio_irq_mask; + ic->irq_unmask = vf610_gpio_irq_unmask; + ic->irq_set_type = vf610_gpio_irq_set_type; + ic->irq_set_wake = vf610_gpio_irq_set_wake; + ret = gpiochip_add_data(gc, port); if (ret < 0) return ret; + /* Mask all GPIO interrupts */ + for (i = 0; i < gc->ngpio; i++) + vf610_gpio_writel(0, port->base + PORT_PCR(i)); + /* Clear the interrupt status register for all GPIO's */ vf610_gpio_writel(~0, port->base + PORT_ISFR); - ret = gpiochip_irqchip_add(gc, &vf610_gpio_irq_chip, 0, - handle_edge_irq, IRQ_TYPE_NONE); + ret = gpiochip_irqchip_add(gc, ic, 0, handle_edge_irq, IRQ_TYPE_NONE); if (ret) { dev_err(dev, "failed to add irqchip\n"); gpiochip_remove(gc); return ret; } - gpiochip_set_chained_irqchip(gc, &vf610_gpio_irq_chip, port->irq, + gpiochip_set_chained_irqchip(gc, ic, port->irq, vf610_gpio_irq_handler); return 0; diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 8b9d7e42c600b60d26bad7f26c32ed938be6e7a2..8edbb3f0c1013bdabab88b20f9acc7facea60963 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -10,6 +10,7 @@ * published by the Free Software Foundation. */ +#include #include #include #include @@ -23,11 +24,33 @@ #include "gpiolib.h" +static int run_edge_events_on_boot = -1; +module_param(run_edge_events_on_boot, int, 0444); +MODULE_PARM_DESC(run_edge_events_on_boot, + "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto"); + +/** + * struct acpi_gpio_event - ACPI GPIO event handler data + * + * @node: list-entry of the events list of the struct acpi_gpio_chip + * @handle: handle of ACPI method to execute when the IRQ triggers + * @handler: irq_handler to pass to request_irq when requesting the IRQ + * @pin: GPIO pin number on the gpio_chip + * @irq: Linux IRQ number for the event, for request_ / free_irq + * @irqflags: flags to pass to request_irq when requesting the IRQ + * @irq_is_wake: If the ACPI flags indicate the IRQ is a wakeup source + * @is_requested: True if request_irq has been done + * @desc: gpio_desc for the GPIO pin for this event + */ struct acpi_gpio_event { struct list_head node; acpi_handle handle; + irq_handler_t handler; unsigned int pin; unsigned int irq; + unsigned long irqflags; + bool irq_is_wake; + bool irq_requested; struct gpio_desc *desc; }; @@ -53,10 +76,10 @@ struct acpi_gpio_chip { /* * For gpiochips which call acpi_gpiochip_request_interrupts() before late_init - * (so builtin drivers) we register the ACPI GpioInt event handlers from a + * (so builtin drivers) we register the ACPI GpioInt IRQ handlers from a * late_initcall_sync handler, so that other builtin drivers can register their * OpRegions before the event handlers can run. This list contains gpiochips - * for which the acpi_gpiochip_request_interrupts() has been deferred. + * for which the acpi_gpiochip_request_irqs() call has been deferred. */ static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock); static LIST_HEAD(acpi_gpio_deferred_req_irqs_list); @@ -137,8 +160,45 @@ bool acpi_gpio_get_irq_resource(struct acpi_resource *ares, } EXPORT_SYMBOL_GPL(acpi_gpio_get_irq_resource); -static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, - void *context) +static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio, + struct acpi_gpio_event *event) +{ + int ret, value; + + ret = request_threaded_irq(event->irq, NULL, event->handler, + event->irqflags, "ACPI:Event", event); + if (ret) { + dev_err(acpi_gpio->chip->parent, + "Failed to setup interrupt handler for %d\n", + event->irq); + return; + } + + if (event->irq_is_wake) + enable_irq_wake(event->irq); + + event->irq_requested = true; + + /* Make sure we trigger the initial state of edge-triggered IRQs */ + if (run_edge_events_on_boot && + (event->irqflags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))) { + value = gpiod_get_raw_value_cansleep(event->desc); + if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) || + ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0)) + event->handler(event->irq, event); + } +} + +static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio) +{ + struct acpi_gpio_event *event; + + list_for_each_entry(event, &acpi_gpio->events, node) + acpi_gpiochip_request_irq(acpi_gpio, event); +} + +static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares, + void *context) { struct acpi_gpio_chip *acpi_gpio = context; struct gpio_chip *chip = acpi_gpio->chip; @@ -147,8 +207,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, struct acpi_gpio_event *event; irq_handler_t handler = NULL; struct gpio_desc *desc; - unsigned long irqflags; - int ret, pin, irq, value; + int ret, pin, irq; if (!acpi_gpio_get_irq_resource(ares, &agpio)) return AE_OK; @@ -179,8 +238,6 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, gpiod_direction_input(desc); - value = gpiod_get_value_cansleep(desc); - ret = gpiochip_lock_as_irq(chip, pin); if (ret) { dev_err(chip->parent, "Failed to lock GPIO as interrupt\n"); @@ -193,64 +250,42 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, goto fail_unlock_irq; } - irqflags = IRQF_ONESHOT; + event = kzalloc(sizeof(*event), GFP_KERNEL); + if (!event) + goto fail_unlock_irq; + + event->irqflags = IRQF_ONESHOT; if (agpio->triggering == ACPI_LEVEL_SENSITIVE) { if (agpio->polarity == ACPI_ACTIVE_HIGH) - irqflags |= IRQF_TRIGGER_HIGH; + event->irqflags |= IRQF_TRIGGER_HIGH; else - irqflags |= IRQF_TRIGGER_LOW; + event->irqflags |= IRQF_TRIGGER_LOW; } else { switch (agpio->polarity) { case ACPI_ACTIVE_HIGH: - irqflags |= IRQF_TRIGGER_RISING; + event->irqflags |= IRQF_TRIGGER_RISING; break; case ACPI_ACTIVE_LOW: - irqflags |= IRQF_TRIGGER_FALLING; + event->irqflags |= IRQF_TRIGGER_FALLING; break; default: - irqflags |= IRQF_TRIGGER_RISING | - IRQF_TRIGGER_FALLING; + event->irqflags |= IRQF_TRIGGER_RISING | + IRQF_TRIGGER_FALLING; break; } } - event = kzalloc(sizeof(*event), GFP_KERNEL); - if (!event) - goto fail_unlock_irq; - event->handle = evt_handle; + event->handler = handler; event->irq = irq; + event->irq_is_wake = agpio->wake_capable == ACPI_WAKE_CAPABLE; event->pin = pin; event->desc = desc; - ret = request_threaded_irq(event->irq, NULL, handler, irqflags, - "ACPI:Event", event); - if (ret) { - dev_err(chip->parent, - "Failed to setup interrupt handler for %d\n", - event->irq); - goto fail_free_event; - } - - if (agpio->wake_capable == ACPI_WAKE_CAPABLE) - enable_irq_wake(irq); - list_add_tail(&event->node, &acpi_gpio->events); - /* - * Make sure we trigger the initial state of the IRQ when using RISING - * or FALLING. Note we run the handlers on late_init, the AML code - * may refer to OperationRegions from other (builtin) drivers which - * may be probed after us. - */ - if (((irqflags & IRQF_TRIGGER_RISING) && value == 1) || - ((irqflags & IRQF_TRIGGER_FALLING) && value == 0)) - handler(event->irq, event); - return AE_OK; -fail_free_event: - kfree(event); fail_unlock_irq: gpiochip_unlock_as_irq(chip, pin); fail_free_desc: @@ -287,6 +322,9 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip) if (ACPI_FAILURE(status)) return; + acpi_walk_resources(handle, "_AEI", + acpi_gpiochip_alloc_event, acpi_gpio); + mutex_lock(&acpi_gpio_deferred_req_irqs_lock); defer = !acpi_gpio_deferred_req_irqs_done; if (defer) @@ -297,8 +335,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip) if (defer) return; - acpi_walk_resources(handle, "_AEI", - acpi_gpiochip_request_interrupt, acpi_gpio); + acpi_gpiochip_request_irqs(acpi_gpio); } EXPORT_SYMBOL_GPL(acpi_gpiochip_request_interrupts); @@ -335,10 +372,13 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip) list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { struct gpio_desc *desc; - if (irqd_is_wakeup_set(irq_get_irq_data(event->irq))) - disable_irq_wake(event->irq); + if (event->irq_requested) { + if (event->irq_is_wake) + disable_irq_wake(event->irq); + + free_irq(event->irq, event); + } - free_irq(event->irq, event); desc = event->desc; if (WARN_ON(IS_ERR(desc))) continue; @@ -1204,23 +1244,16 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id) return con_id == NULL; } -/* Run deferred acpi_gpiochip_request_interrupts() */ -static int acpi_gpio_handle_deferred_request_interrupts(void) +/* Run deferred acpi_gpiochip_request_irqs() */ +static int acpi_gpio_handle_deferred_request_irqs(void) { struct acpi_gpio_chip *acpi_gpio, *tmp; mutex_lock(&acpi_gpio_deferred_req_irqs_lock); list_for_each_entry_safe(acpi_gpio, tmp, &acpi_gpio_deferred_req_irqs_list, - deferred_req_irqs_list_entry) { - acpi_handle handle; - - handle = ACPI_HANDLE(acpi_gpio->chip->parent); - acpi_walk_resources(handle, "_AEI", - acpi_gpiochip_request_interrupt, acpi_gpio); - - list_del_init(&acpi_gpio->deferred_req_irqs_list_entry); - } + deferred_req_irqs_list_entry) + acpi_gpiochip_request_irqs(acpi_gpio); acpi_gpio_deferred_req_irqs_done = true; mutex_unlock(&acpi_gpio_deferred_req_irqs_lock); @@ -1228,4 +1261,46 @@ static int acpi_gpio_handle_deferred_request_interrupts(void) return 0; } /* We must use _sync so that this runs after the first deferred_probe run */ -late_initcall_sync(acpi_gpio_handle_deferred_request_interrupts); +late_initcall_sync(acpi_gpio_handle_deferred_request_irqs); + +static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = { + { + /* + * The Minix Neo Z83-4 has a micro-USB-B id-pin handler for + * a non existing micro-USB-B connector which puts the HDMI + * DDC pins in GPIO mode, breaking HDMI support. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MINIX"), + DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"), + } + }, + { + /* + * The Terra Pad 1061 has a micro-USB-B id-pin handler, which + * instead of controlling the actual micro-USB-B turns the 5V + * boost for its USB-A connector off. The actual micro-USB-B + * connector is wired for charging only. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"), + DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"), + } + }, + {} /* Terminating entry */ +}; + +static int acpi_gpio_setup_params(void) +{ + if (run_edge_events_on_boot < 0) { + if (dmi_check_system(run_edge_events_on_boot_blacklist)) + run_edge_events_on_boot = 0; + else + run_edge_events_on_boot = 1; + } + + return 0; +} + +/* Directly after dmi_setup() which runs as core_initcall() */ +postcore_initcall(acpi_gpio_setup_params); diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index d4e7a09598faedbecb1ccdee24e6138b70e4e7d8..e0f149bdf98ff22638cc1e86bd0915077cbf7592 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -646,7 +646,13 @@ int of_gpiochip_add(struct gpio_chip *chip) of_node_get(chip->of_node); - return of_gpiochip_scan_gpios(chip); + status = of_gpiochip_scan_gpios(chip); + if (status) { + of_node_put(chip->of_node); + gpiochip_remove_pin_ranges(chip); + } + + return status; } void of_gpiochip_remove(struct gpio_chip *chip) diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 25187403e3ace0d891feaa3b59923efbcdb45a7a..4040cca95532da65b6260423c8bc58bfd6329863 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -217,6 +217,14 @@ int gpiod_get_direction(struct gpio_desc *desc) chip = gpiod_to_chip(desc); offset = gpio_chip_hwgpio(desc); + /* + * Open drain emulation using input mode may incorrectly report + * input here, fix that up. + */ + if (test_bit(FLAG_OPEN_DRAIN, &desc->flags) && + test_bit(FLAG_IS_OUT, &desc->flags)) + return 0; + if (!chip->get_direction) return status; @@ -359,7 +367,7 @@ static unsigned long *gpiochip_allocate_mask(struct gpio_chip *chip) return p; } -static int gpiochip_init_valid_mask(struct gpio_chip *gpiochip) +static int gpiochip_alloc_valid_mask(struct gpio_chip *gpiochip) { #ifdef CONFIG_OF_GPIO int size; @@ -380,6 +388,14 @@ static int gpiochip_init_valid_mask(struct gpio_chip *gpiochip) return 0; } +static int gpiochip_init_valid_mask(struct gpio_chip *gpiochip) +{ + if (gpiochip->init_valid_mask) + return gpiochip->init_valid_mask(gpiochip); + + return 0; +} + static void gpiochip_free_valid_mask(struct gpio_chip *gpiochip) { kfree(gpiochip->valid_mask); @@ -524,6 +540,14 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip) if (lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) return -EINVAL; + /* + * Do not allow both INPUT & OUTPUT flags to be set as they are + * contradictory. + */ + if ((lflags & GPIOHANDLE_REQUEST_INPUT) && + (lflags & GPIOHANDLE_REQUEST_OUTPUT)) + return -EINVAL; + /* * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If * the hardware actually supports enabling both at the same time the @@ -817,7 +841,15 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p) /* Do not leak kernel stack to userspace */ memset(&ge, 0, sizeof(ge)); - ge.timestamp = le->timestamp; + /* + * We may be running from a nested threaded interrupt in which case + * we didn't get the timestamp from lineevent_irq_handler(). + */ + if (!le->timestamp) + ge.timestamp = ktime_get_real_ns(); + else + ge.timestamp = le->timestamp; + level = gpiod_get_value_cansleep(le->desc); if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE @@ -908,7 +940,9 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) } /* This is just wrong: we don't look for events on output lines */ - if (lflags & GPIOHANDLE_REQUEST_OUTPUT) { + if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) || + (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) || + (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)) { ret = -EINVAL; goto out_free_label; } @@ -922,10 +956,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW) set_bit(FLAG_ACTIVE_LOW, &desc->flags); - if (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) - set_bit(FLAG_OPEN_DRAIN, &desc->flags); - if (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE) - set_bit(FLAG_OPEN_SOURCE, &desc->flags); ret = gpiod_direction_input(desc); if (ret) @@ -938,9 +968,11 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) } if (eflags & GPIOEVENT_REQUEST_RISING_EDGE) - irqflags |= IRQF_TRIGGER_RISING; + irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? + IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING; if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE) - irqflags |= IRQF_TRIGGER_FALLING; + irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? + IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING; irqflags |= IRQF_ONESHOT; irqflags |= IRQF_SHARED; @@ -1072,9 +1104,11 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) lineinfo.flags |= GPIOLINE_FLAG_ACTIVE_LOW; if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) - lineinfo.flags |= GPIOLINE_FLAG_OPEN_DRAIN; + lineinfo.flags |= (GPIOLINE_FLAG_OPEN_DRAIN | + GPIOLINE_FLAG_IS_OUT); if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) - lineinfo.flags |= GPIOLINE_FLAG_OPEN_SOURCE; + lineinfo.flags |= (GPIOLINE_FLAG_OPEN_SOURCE | + GPIOLINE_FLAG_IS_OUT); if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) return -EFAULT; @@ -1285,7 +1319,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, gdev->descs = kcalloc(chip->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL); if (!gdev->descs) { status = -ENOMEM; - goto err_free_gdev; + goto err_free_ida; } if (chip->ngpio == 0) { @@ -1341,19 +1375,8 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, spin_unlock_irqrestore(&gpio_lock, flags); - for (i = 0; i < chip->ngpio; i++) { - struct gpio_desc *desc = &gdev->descs[i]; - - desc->gdev = gdev; - - /* REVISIT: most hardware initializes GPIOs as inputs (often - * with pullups enabled) so power usage is minimized. Linux - * code should set the gpio direction first thing; but until - * it does, and in case chip->get_direction is not set, we may - * expose the wrong direction in sysfs. - */ - desc->flags = !chip->direction_input ? (1 << FLAG_IS_OUT) : 0; - } + for (i = 0; i < chip->ngpio; i++) + gdev->descs[i].gdev = gdev; #ifdef CONFIG_PINCTRL INIT_LIST_HEAD(&gdev->pin_ranges); @@ -1363,26 +1386,46 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, if (status) goto err_remove_from_list; - status = gpiochip_irqchip_init_valid_mask(chip); + status = gpiochip_alloc_valid_mask(chip); if (status) goto err_remove_from_list; - status = gpiochip_init_valid_mask(chip); + status = of_gpiochip_add(chip); if (status) - goto err_remove_irqchip_mask; + goto err_free_gpiochip_mask; - status = gpiochip_add_irqchip(chip, lock_key, request_key); + status = gpiochip_init_valid_mask(chip); if (status) - goto err_remove_chip; + goto err_remove_of_chip; - status = of_gpiochip_add(chip); - if (status) - goto err_remove_chip; + for (i = 0; i < chip->ngpio; i++) { + struct gpio_desc *desc = &gdev->descs[i]; + + if (chip->get_direction && gpiochip_line_is_valid(chip, i)) { + if (!chip->get_direction(chip, i)) + set_bit(FLAG_IS_OUT, &desc->flags); + else + clear_bit(FLAG_IS_OUT, &desc->flags); + } else { + if (!chip->direction_input) + set_bit(FLAG_IS_OUT, &desc->flags); + else + clear_bit(FLAG_IS_OUT, &desc->flags); + } + } acpi_gpiochip_add(chip); machine_gpiochip_add(chip); + status = gpiochip_irqchip_init_valid_mask(chip); + if (status) + goto err_remove_acpi_chip; + + status = gpiochip_add_irqchip(chip, lock_key, request_key); + if (status) + goto err_remove_irqchip_mask; + /* * By first adding the chardev, and then adding the device, * we get a device node entry in sysfs under @@ -1394,17 +1437,21 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, if (gpiolib_initialized) { status = gpiochip_setup_dev(gdev); if (status) - goto err_remove_chip; + goto err_remove_irqchip; } return 0; -err_remove_chip: +err_remove_irqchip: + gpiochip_irqchip_remove(chip); +err_remove_irqchip_mask: + gpiochip_irqchip_free_valid_mask(chip); +err_remove_acpi_chip: acpi_gpiochip_remove(chip); +err_remove_of_chip: gpiochip_free_hogs(chip); of_gpiochip_remove(chip); +err_free_gpiochip_mask: gpiochip_free_valid_mask(chip); -err_remove_irqchip_mask: - gpiochip_irqchip_free_valid_mask(chip); err_remove_from_list: spin_lock_irqsave(&gpio_lock, flags); list_del(&gdev->list); @@ -1413,8 +1460,9 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, kfree_const(gdev->label); err_free_descs: kfree(gdev->descs); -err_free_gdev: +err_free_ida: ida_simple_remove(&gpio_ida, gdev->id); +err_free_gdev: /* failures here can mean systems won't boot... */ pr_err("%s: GPIOs %d..%d (%s) failed to register, %d\n", __func__, gdev->base, gdev->base + gdev->ngpio - 1, @@ -2270,6 +2318,12 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label) unsigned long flags; unsigned offset; + if (label) { + label = kstrdup_const(label, GFP_KERNEL); + if (!label) + return -ENOMEM; + } + spin_lock_irqsave(&gpio_lock, flags); /* NOTE: gpio_request() can be called in early boot, @@ -2280,6 +2334,7 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label) desc_set_label(desc, label ? : "?"); status = 0; } else { + kfree_const(label); status = -EBUSY; goto done; } @@ -2296,6 +2351,7 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label) if (status < 0) { desc_set_label(desc, NULL); + kfree_const(label); clear_bit(FLAG_REQUESTED, &desc->flags); goto done; } @@ -2391,6 +2447,7 @@ static bool gpiod_free_commit(struct gpio_desc *desc) chip->free(chip, gpio_chip_hwgpio(desc)); spin_lock_irqsave(&gpio_lock, flags); } + kfree_const(desc->label); desc_set_label(desc, NULL); clear_bit(FLAG_ACTIVE_LOW, &desc->flags); clear_bit(FLAG_REQUESTED, &desc->flags); @@ -2513,19 +2570,27 @@ EXPORT_SYMBOL_GPL(gpiochip_free_own_desc); int gpiod_direction_input(struct gpio_desc *desc) { struct gpio_chip *chip; - int status = -EINVAL; + int status = 0; VALIDATE_DESC(desc); chip = desc->gdev->chip; - if (!chip->get || !chip->direction_input) { + if (!chip->get && chip->direction_input) { gpiod_warn(desc, - "%s: missing get() or direction_input() operations\n", + "%s: missing get() and direction_input() operations\n", __func__); return -EIO; } - status = chip->direction_input(chip, gpio_chip_hwgpio(desc)); + if (chip->direction_input) { + status = chip->direction_input(chip, gpio_chip_hwgpio(desc)); + } else if (chip->get_direction && + (chip->get_direction(chip, gpio_chip_hwgpio(desc)) != 1)) { + gpiod_warn(desc, + "%s: missing direction_input() operation\n", + __func__); + return -EIO; + } if (status == 0) clear_bit(FLAG_IS_OUT, &desc->flags); @@ -2547,16 +2612,28 @@ static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value) { struct gpio_chip *gc = desc->gdev->chip; int val = !!value; - int ret; + int ret = 0; - if (!gc->set || !gc->direction_output) { + if (!gc->set && !gc->direction_output) { gpiod_warn(desc, - "%s: missing set() or direction_output() operations\n", + "%s: missing set() and direction_output() operations\n", __func__); return -EIO; } - ret = gc->direction_output(gc, gpio_chip_hwgpio(desc), val); + if (gc->direction_output) { + ret = gc->direction_output(gc, gpio_chip_hwgpio(desc), val); + } else { + if (gc->get_direction && + gc->get_direction(gc, gpio_chip_hwgpio(desc))) { + gpiod_warn(desc, + "%s: missing direction_output() operation\n", + __func__); + return -EIO; + } + gc->set(gc, gpio_chip_hwgpio(desc), val); + } + if (!ret) set_bit(FLAG_IS_OUT, &desc->flags); trace_gpio_value(desc_to_gpio(desc), 0, val); @@ -2621,8 +2698,10 @@ int gpiod_direction_output(struct gpio_desc *desc, int value) if (!ret) goto set_output_value; /* Emulate open drain by not actively driving the line high */ - if (value) - return gpiod_direction_input(desc); + if (value) { + ret = gpiod_direction_input(desc); + goto set_output_flag; + } } else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) { ret = gpio_set_drive_single_ended(gc, gpio_chip_hwgpio(desc), @@ -2630,8 +2709,10 @@ int gpiod_direction_output(struct gpio_desc *desc, int value) if (!ret) goto set_output_value; /* Emulate open source by not actively driving the line low */ - if (!value) - return gpiod_direction_input(desc); + if (!value) { + ret = gpiod_direction_input(desc); + goto set_output_flag; + } } else { gpio_set_drive_single_ended(gc, gpio_chip_hwgpio(desc), PIN_CONFIG_DRIVE_PUSH_PULL); @@ -2639,6 +2720,17 @@ int gpiod_direction_output(struct gpio_desc *desc, int value) set_output_value: return gpiod_direction_output_raw_commit(desc, value); + +set_output_flag: + /* + * When emulating open-source or open-drain functionalities by not + * actively driving the line (setting mode to input) we still need to + * set the IS_OUT flag or otherwise we won't be able to set the line + * value anymore. + */ + if (ret == 0) + set_bit(FLAG_IS_OUT, &desc->flags); + return ret; } EXPORT_SYMBOL_GPL(gpiod_direction_output); @@ -2859,7 +2951,7 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep, int gpiod_get_raw_value(const struct gpio_desc *desc) { VALIDATE_DESC(desc); - /* Should be using gpio_get_value_cansleep() */ + /* Should be using gpiod_get_raw_value_cansleep() */ WARN_ON(desc->gdev->chip->can_sleep); return gpiod_get_raw_value_commit(desc); } @@ -2880,7 +2972,7 @@ int gpiod_get_value(const struct gpio_desc *desc) int value; VALIDATE_DESC(desc); - /* Should be using gpio_get_value_cansleep() */ + /* Should be using gpiod_get_value_cansleep() */ WARN_ON(desc->gdev->chip->can_sleep); value = gpiod_get_raw_value_commit(desc); @@ -2952,8 +3044,6 @@ static void gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value) if (value) { err = chip->direction_input(chip, offset); - if (!err) - clear_bit(FLAG_IS_OUT, &desc->flags); } else { err = chip->direction_output(chip, offset, 0); if (!err) @@ -2983,8 +3073,6 @@ static void gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value set_bit(FLAG_IS_OUT, &desc->flags); } else { err = chip->direction_input(chip, offset); - if (!err) - clear_bit(FLAG_IS_OUT, &desc->flags); } trace_gpio_direction(desc_to_gpio(desc), !value, err); if (err < 0) @@ -3105,7 +3193,7 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep, void gpiod_set_raw_value(struct gpio_desc *desc, int value) { VALIDATE_DESC_VOID(desc); - /* Should be using gpiod_set_value_cansleep() */ + /* Should be using gpiod_set_raw_value_cansleep() */ WARN_ON(desc->gdev->chip->can_sleep); gpiod_set_raw_value_commit(desc, value); } @@ -3146,6 +3234,7 @@ static void gpiod_set_value_nocheck(struct gpio_desc *desc, int value) void gpiod_set_value(struct gpio_desc *desc, int value) { VALIDATE_DESC_VOID(desc); + /* Should be using gpiod_set_value_cansleep() */ WARN_ON(desc->gdev->chip->can_sleep); gpiod_set_value_nocheck(desc, value); } @@ -3212,11 +3301,19 @@ EXPORT_SYMBOL_GPL(gpiod_cansleep); * @desc: gpio to set the consumer name on * @name: the new consumer name */ -void gpiod_set_consumer_name(struct gpio_desc *desc, const char *name) +int gpiod_set_consumer_name(struct gpio_desc *desc, const char *name) { - VALIDATE_DESC_VOID(desc); - /* Just overwrite whatever the previous name was */ - desc->label = name; + VALIDATE_DESC(desc); + if (name) { + name = kstrdup_const(name, GFP_KERNEL); + if (!name) + return -ENOMEM; + } + + kfree_const(desc->label); + desc_set_label(desc, name); + + return 0; } EXPORT_SYMBOL_GPL(gpiod_set_consumer_name); diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index cb88528e7b10c0aff35dc17f3996a451a064e85a..3eef585b6f64f742b55b3d8a2d03b33ca3b8ddf8 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -110,6 +110,26 @@ config DRM_FBDEV_OVERALLOC is 100. Typical values for double buffering will be 200, triple buffering 300. +config DRM_FBDEV_LEAK_PHYS_SMEM + bool "Shamelessly allow leaking of fbdev physical address (DANGEROUS)" + depends on DRM_FBDEV_EMULATION && EXPERT + default n + help + In order to keep user-space compatibility, we want in certain + use-cases to keep leaking the fbdev physical address to the + user-space program handling the fbdev buffer. + This affects, not only, Amlogic, Allwinner or Rockchip devices + with ARM Mali GPUs using an userspace Blob. + This option is not supported by upstream developers and should be + removed as soon as possible and be considered as a broken and + legacy behaviour from a modern fbdev device driver. + + Please send any bug reports when using this to your proprietary + software vendor that requires this. + + If in doubt, say "N" or spread the word to your closed source + library vendor. + config DRM_LOAD_EDID_FIRMWARE bool "Allow to specify an EDID data set instead of probing for it" depends on DRM @@ -315,6 +335,8 @@ source "drivers/gpu/drm/tve200/Kconfig" source "drivers/gpu/drm/xen/Kconfig" +source "drivers/gpu/drm/phytium/Kconfig" + # Keep legacy drivers last menuconfig DRM_LEGACY diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index a6771cef85e25d74b73f1a2ff29bd34110605488..003ad888722984908b95c658e215a0ab322f2b56 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -106,4 +106,5 @@ obj-$(CONFIG_DRM_MXSFB) += mxsfb/ obj-$(CONFIG_DRM_TINYDRM) += tinydrm/ obj-$(CONFIG_DRM_PL111) += pl111/ obj-$(CONFIG_DRM_TVE200) += tve200/ +obj-$(CONFIG_DRM_PHYTIUM) += phytium/ obj-$(CONFIG_DRM_XEN) += xen/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index 71efcf38f11beb2c628cee39ff4bae7fa50bedbd..94cd8a2610912d0eb813909de7e9ce3e02bdc1b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -276,7 +276,7 @@ static int acp_hw_init(void *handle) u32 val = 0; u32 count = 0; struct device *dev; - struct i2s_platform_data *i2s_pdata; + struct i2s_platform_data *i2s_pdata = NULL; struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -317,20 +317,21 @@ static int acp_hw_init(void *handle) adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell), GFP_KERNEL); - if (adev->acp.acp_cell == NULL) - return -ENOMEM; + if (adev->acp.acp_cell == NULL) { + r = -ENOMEM; + goto failure; + } adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL); if (adev->acp.acp_res == NULL) { - kfree(adev->acp.acp_cell); - return -ENOMEM; + r = -ENOMEM; + goto failure; } i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL); if (i2s_pdata == NULL) { - kfree(adev->acp.acp_res); - kfree(adev->acp.acp_cell); - return -ENOMEM; + r = -ENOMEM; + goto failure; } switch (adev->asic_type) { @@ -427,7 +428,7 @@ static int acp_hw_init(void *handle) r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, ACP_DEVS); if (r) - return r; + goto failure; if (adev->asic_type != CHIP_STONEY) { for (i = 0; i < ACP_DEVS ; i++) { @@ -435,7 +436,7 @@ static int acp_hw_init(void *handle) r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev); if (r) { dev_err(dev, "Failed to add dev to genpd\n"); - return r; + goto failure; } } } @@ -454,7 +455,8 @@ static int acp_hw_init(void *handle) break; if (--count == 0) { dev_err(&adev->pdev->dev, "Failed to reset ACP\n"); - return -ETIMEDOUT; + r = -ETIMEDOUT; + goto failure; } udelay(100); } @@ -471,7 +473,8 @@ static int acp_hw_init(void *handle) break; if (--count == 0) { dev_err(&adev->pdev->dev, "Failed to reset ACP\n"); - return -ETIMEDOUT; + r = -ETIMEDOUT; + goto failure; } udelay(100); } @@ -480,6 +483,13 @@ static int acp_hw_init(void *handle) val &= ~ACP_SOFT_RESET__SoftResetAud_MASK; cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val); return 0; + +failure: + kfree(i2s_pdata); + kfree(adev->acp.acp_res); + kfree(adev->acp.acp_cell); + kfree(adev->acp.acp_genpd); + return r; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 353993218f213ff6e3686edfc9444b3296ba31f9..cb27016b2a769de539a3dcf7c61a9b2fa3411583 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -90,6 +90,7 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif, struct acpi_buffer *params) { acpi_status status; + union acpi_object *obj; union acpi_object atif_arg_elements[2]; struct acpi_object_list atif_arg; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; @@ -112,16 +113,24 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif, status = acpi_evaluate_object(atif->handle, NULL, &atif_arg, &buffer); + obj = (union acpi_object *)buffer.pointer; - /* Fail only if calling the method fails and ATIF is supported */ - if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { + /* Fail if calling the method fails */ + if (ACPI_FAILURE(status)) { DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n", acpi_format_exception(status)); - kfree(buffer.pointer); + kfree(obj); return NULL; } - return buffer.pointer; + if (obj->type != ACPI_TYPE_BUFFER) { + DRM_DEBUG_DRIVER("bad object returned from ATIF: %d\n", + obj->type); + kfree(obj); + return NULL; + } + + return obj; } /** @@ -358,7 +367,9 @@ static int amdgpu_atif_get_sbios_requests(struct amdgpu_atif *atif, * * Checks the acpi event and if it matches an atif event, * handles it. - * Returns NOTIFY code + * + * Returns: + * NOTIFY_BAD or NOTIFY_DONE, depending on the event. */ static int amdgpu_atif_handler(struct amdgpu_device *adev, struct acpi_bus_event *event) @@ -372,11 +383,16 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev, if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0) return NOTIFY_DONE; + /* Is this actually our event? */ if (!atif || !atif->notification_cfg.enabled || - event->type != atif->notification_cfg.command_code) - /* Not our event */ - return NOTIFY_DONE; + event->type != atif->notification_cfg.command_code) { + /* These events will generate keypresses otherwise */ + if (event->type == ACPI_VIDEO_NOTIFY_PROBE) + return NOTIFY_BAD; + else + return NOTIFY_DONE; + } if (atif->functions.sbios_requests) { struct atif_sbios_requests req; @@ -385,7 +401,7 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev, count = amdgpu_atif_get_sbios_requests(atif, &req); if (count <= 0) - return NOTIFY_DONE; + return NOTIFY_BAD; DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count); @@ -409,8 +425,7 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev, } } if (req.pending & ATIF_DGPU_DISPLAY_EVENT) { - if ((adev->flags & AMD_IS_PX) && - amdgpu_atpx_dgpu_req_power_for_displays()) { + if (adev->flags & AMD_IS_PX) { pm_runtime_get_sync(adev->ddev->dev); /* Just fire off a uevent and let userspace tell us what to do */ drm_helper_hpd_irq_event(adev->ddev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 0c791e35acf02f3c71e17c77126ec1f9e97c6fe5..b0a7c3403f02d30ecdd412561bea8f709ca4c7c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -342,15 +342,15 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, return r; } -void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj) +void free_gtt_mem(struct kgd_dev *kgd, void **mem_obj) { - struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj; + struct amdgpu_bo **bo = (struct amdgpu_bo **) mem_obj; - amdgpu_bo_reserve(bo, true); - amdgpu_bo_kunmap(bo); - amdgpu_bo_unpin(bo); - amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&(bo)); + amdgpu_bo_reserve(*bo, true); + amdgpu_bo_kunmap(*bo); + amdgpu_bo_unpin(*bo); + amdgpu_bo_unreserve(*bo); + amdgpu_bo_unref(bo); } void get_local_mem_info(struct kgd_dev *kgd, @@ -496,8 +496,11 @@ void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - amdgpu_dpm_switch_power_profile(adev, - PP_SMC_POWER_PROFILE_COMPUTE, !idle); + if (adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->switch_power_profile) + amdgpu_dpm_switch_power_profile(adev, + PP_SMC_POWER_PROFILE_COMPUTE, + !idle); } bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index cc9aeab5468c8559b5b47fa56e452d6b2feb4d11..b45a45efcfe94185ab87e203bda71e6b2b2ae004 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -137,7 +137,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd); int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, void **mem_obj, uint64_t *gpu_addr, void **cpu_ptr, bool mqd_gfx9); -void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj); +void free_gtt_mem(struct kgd_dev *kgd, void **mem_obj); void get_local_mem_info(struct kgd_dev *kgd, struct kfd_local_mem_info *mem_info); uint64_t get_gpu_clock_counter(struct kgd_dev *kgd); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index bf872f694f5090da9ebc1fc5b227bb45364ef7d4..653651a5b91079aadc5ed59ce892dc0d5424933c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -1641,6 +1641,8 @@ int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev, (u32)le32_to_cpu(*((u32 *)reg_data + j)); j++; } else if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_EQU_PREV) { + if (i == 0) + continue; reg_table->mc_reg_table_entry[num_ranges].mc_data[i] = reg_table->mc_reg_table_entry[num_ranges].mc_data[i - 1]; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index a028661d9e2013dd2a6e5611448438c7590fec82..354c8b6106dc273b52f1d9e898d060038e4d6214 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -575,7 +575,9 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = { { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, + { 0x1002, 0x699f, 0x1028, 0x0814, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX }, + { 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0, 0, 0, 0, 0 }, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index d472a2c8399febe576f29f0522ddaafeb0eabebe..ce7f18c5ccb26e861ef625aed1434ef0941e17ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -67,7 +67,8 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp, unsigned i; int r; - if (num_entries > SIZE_MAX / sizeof(struct amdgpu_bo_list_entry)) + if (num_entries > (SIZE_MAX - sizeof(struct amdgpu_bo_list)) + / sizeof(struct amdgpu_bo_list_entry)) return -EINVAL; size = sizeof(struct amdgpu_bo_list); @@ -263,7 +264,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, r = amdgpu_bo_create_list_entry_array(&args->in, &info); if (r) - goto error_free; + return r; switch (args->in.operation) { case AMDGPU_BO_LIST_OP_CREATE: @@ -276,8 +277,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL); mutex_unlock(&fpriv->bo_list_lock); if (r < 0) { - amdgpu_bo_list_put(list); - return r; + goto error_put_list; } handle = r; @@ -299,9 +299,8 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, mutex_unlock(&fpriv->bo_list_lock); if (IS_ERR(old)) { - amdgpu_bo_list_put(list); r = PTR_ERR(old); - goto error_free; + goto error_put_list; } amdgpu_bo_list_put(old); @@ -318,8 +317,10 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, return 0; +error_put_list: + amdgpu_bo_list_put(list); + error_free: - if (info) - kvfree(info); + kvfree(info); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 8816c697b2053c7c28f119f1362443d7b9ad6e98..9e768ff392fec058c21947ebb47ab84dd8d2e246 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -212,6 +212,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, struct amdgpu_firmware_info *ucode; id = fw_type_convert(cgs_device, type); + if (id >= AMDGPU_UCODE_ID_MAXIMUM) + return -EINVAL; + ucode = &adev->firmware.ucode[id]; if (ucode->fw == NULL) return -EINVAL; @@ -330,7 +333,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, case CHIP_TOPAZ: if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) || ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) || - ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) { + ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87)) || + ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD1)) || + ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD3))) { info->is_kicker = true; strcpy(fw_name, "amdgpu/topaz_k_smc.bin"); } else @@ -351,7 +356,6 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, if (type == CGS_UCODE_ID_SMU) { if (((adev->pdev->device == 0x67ef) && ((adev->pdev->revision == 0xe0) || - (adev->pdev->revision == 0xe2) || (adev->pdev->revision == 0xe5))) || ((adev->pdev->device == 0x67ff) && ((adev->pdev->revision == 0xcf) || @@ -359,8 +363,13 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, (adev->pdev->revision == 0xff)))) { info->is_kicker = true; strcpy(fw_name, "amdgpu/polaris11_k_smc.bin"); - } else + } else if ((adev->pdev->device == 0x67ef) && + (adev->pdev->revision == 0xe2)) { + info->is_kicker = true; + strcpy(fw_name, "amdgpu/polaris11_k2_smc.bin"); + } else { strcpy(fw_name, "amdgpu/polaris11_smc.bin"); + } } else if (type == CGS_UCODE_ID_SMU_SK) { strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin"); } @@ -375,17 +384,35 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, (adev->pdev->revision == 0xe7) || (adev->pdev->revision == 0xef))) || ((adev->pdev->device == 0x6fdf) && - (adev->pdev->revision == 0xef))) { + ((adev->pdev->revision == 0xef) || + (adev->pdev->revision == 0xff)))) { info->is_kicker = true; strcpy(fw_name, "amdgpu/polaris10_k_smc.bin"); - } else + } else if ((adev->pdev->device == 0x67df) && + ((adev->pdev->revision == 0xe1) || + (adev->pdev->revision == 0xf7))) { + info->is_kicker = true; + strcpy(fw_name, "amdgpu/polaris10_k2_smc.bin"); + } else { strcpy(fw_name, "amdgpu/polaris10_smc.bin"); + } } else if (type == CGS_UCODE_ID_SMU_SK) { strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin"); } break; case CHIP_POLARIS12: - strcpy(fw_name, "amdgpu/polaris12_smc.bin"); + if (((adev->pdev->device == 0x6987) && + ((adev->pdev->revision == 0xc0) || + (adev->pdev->revision == 0xc3))) || + ((adev->pdev->device == 0x6981) && + ((adev->pdev->revision == 0x00) || + (adev->pdev->revision == 0x01) || + (adev->pdev->revision == 0x10)))) { + info->is_kicker = true; + strcpy(fw_name, "amdgpu/polaris12_k_smc.bin"); + } else { + strcpy(fw_name, "amdgpu/polaris12_smc.bin"); + } break; case CHIP_VEGAM: strcpy(fw_name, "amdgpu/vegam_smc.bin"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index b31d121a876bf32e64c51ad23ead9b08713f74bb..aa2731848c3a1d8cf3ddb7948e8e6f9c28c00a62 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -122,14 +122,14 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs goto free_chunk; } + mutex_lock(&p->ctx->lock); + /* skip guilty context job */ if (atomic_read(&p->ctx->guilty) == 1) { ret = -ECANCELED; goto free_chunk; } - mutex_lock(&p->ctx->lock); - /* get chunks */ chunk_array_user = u64_to_user_ptr(cs->in.chunks); if (copy_from_user(chunk_array, chunk_array_user, @@ -1501,15 +1501,15 @@ static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev, continue; r = dma_fence_wait_timeout(fence, true, timeout); + if (r > 0 && fence->error) + r = fence->error; + dma_fence_put(fence); if (r < 0) return r; if (r == 0) break; - - if (fence->error) - return fence->error; } memset(wait, 0, sizeof(*wait)); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index f5fb93795a69a8955cd0327db6e6eddac37a7c1e..ccdd7f4b029cbd4910b232437d753fb2de108398 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -391,6 +391,9 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, ssize_t result = 0; int r; + if (!adev->smc_wreg) + return -EOPNOTSUPP; + if (size & 0x3 || *pos & 0x3) return -EINVAL; @@ -430,6 +433,9 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user * ssize_t result = 0; int r; + if (!adev->smc_rreg) + return -EOPNOTSUPP; + if (size & 0x3 || *pos & 0x3) return -EINVAL; @@ -707,7 +713,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, thread = (*pos & GENMASK_ULL(59, 52)) >> 52; bank = (*pos & GENMASK_ULL(61, 60)) >> 60; - data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL); + data = kcalloc(1024, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 39bf2ce548c61e2cabb2ad65aa9036d07f963093..7f6af421d3e98998dd534da186ea89b2f86bbdc6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1653,8 +1653,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) amdgpu_amdkfd_device_init(adev); - if (amdgpu_sriov_vf(adev)) + if (amdgpu_sriov_vf(adev)) { + amdgpu_virt_init_data_exchange(adev); amdgpu_virt_release_full_gpu(adev, true); + } return 0; } @@ -2555,9 +2557,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, goto failed; } - if (amdgpu_sriov_vf(adev)) - amdgpu_virt_init_data_exchange(adev); - amdgpu_fbdev_init(adev); r = amdgpu_pm_sysfs_init(adev); @@ -3269,6 +3268,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, r = amdgpu_ib_ring_tests(adev); error: + amdgpu_virt_init_data_exchange(adev); amdgpu_virt_release_full_gpu(adev, true); if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { atomic_inc(&adev->vram_lost_counter); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 6748cd7fc129b0e7b83966da865f674c676c04e1..686a26de50f91e816471548bf3c1a0fc3f86db86 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -626,6 +626,13 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev) "dither", amdgpu_dither_enum_list, sz); + if (amdgpu_device_has_dc_support(adev)) { + adev->mode_info.max_bpc_property = + drm_property_create_range(adev->ddev, 0, "max bpc", 8, 16); + if (!adev->mode_info.max_bpc_property) + return -ENOMEM; + } + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 0f41d8647376a23234905751fdef534bfed8bd5a..5e29f14f4b301bea74c28153724668c9c6c0bef3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -753,6 +753,7 @@ static const struct pci_device_id pciidlist[] = { /* VEGAM */ {0x1002, 0x694C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM}, {0x1002, 0x694E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM}, + {0x1002, 0x694F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM}, /* Vega 10 */ {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, {0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, @@ -761,7 +762,13 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, {0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, {0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, + {0x1002, 0x6869, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, + {0x1002, 0x686a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, + {0x1002, 0x686b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, {0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, + {0x1002, 0x686d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, + {0x1002, 0x686e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, + {0x1002, 0x686f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, {0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, /* Vega 12 */ {0x1002, 0x69A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12}, @@ -834,6 +841,41 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, if (ret == -EPROBE_DEFER) return ret; +#ifdef CONFIG_DRM_AMDGPU_SI + if (!amdgpu_si_support) { + switch (flags & AMD_ASIC_MASK) { + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_VERDE: + case CHIP_OLAND: + case CHIP_HAINAN: + dev_info(&pdev->dev, + "SI support provided by radeon.\n"); + dev_info(&pdev->dev, + "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n" + ); + return -ENODEV; + } + } +#endif +#ifdef CONFIG_DRM_AMDGPU_CIK + if (!amdgpu_cik_support) { + switch (flags & AMD_ASIC_MASK) { + case CHIP_KAVERI: + case CHIP_BONAIRE: + case CHIP_HAWAII: + case CHIP_KABINI: + case CHIP_MULLINS: + dev_info(&pdev->dev, + "CIK support provided by radeon.\n"); + dev_info(&pdev->dev, + "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n" + ); + return -ENODEV; + } + } +#endif + /* Get rid of things like offb */ ret = amdgpu_kick_out_firmware_fb(pdev); if (ret) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index 69c5d22f29bdf96fb44aba06111d7a347c91f533..d55ff59584c82bb4551c8853d72c435b60cbbae0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -297,10 +297,13 @@ static int amdgpufb_create(struct drm_fb_helper *helper, static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev) { struct amdgpu_framebuffer *rfb = &rfbdev->rfb; + int i; drm_fb_helper_unregister_fbi(&rfbdev->helper); if (rfb->base.obj[0]) { + for (i = 0; i < rfb->base.format->num_planes; i++) + drm_gem_object_put(rfb->base.obj[0]); amdgpufb_destroy_pinned_object(rfb->base.obj[0]); rfb->base.obj[0] = NULL; drm_framebuffer_unregister_private(&rfb->base); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 7056925eb38606fcd896ea2525643261f8495575..869ff624b108c5c6241470427bd5ff5bb19d6ca1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -136,8 +136,9 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, { struct amdgpu_device *adev = ring->adev; struct amdgpu_fence *fence; - struct dma_fence *old, **ptr; + struct dma_fence __rcu **ptr; uint32_t seq; + int r; fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); if (fence == NULL) @@ -153,15 +154,24 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, seq, flags | AMDGPU_FENCE_FLAG_INT); ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; + if (unlikely(rcu_dereference_protected(*ptr, 1))) { + struct dma_fence *old; + + rcu_read_lock(); + old = dma_fence_get_rcu_safe(ptr); + rcu_read_unlock(); + + if (old) { + r = dma_fence_wait(old, false); + dma_fence_put(old); + if (r) + return r; + } + } + /* This function can't be called concurrently anyway, otherwise * emitting the fence would mess up the hardware ring buffer. */ - old = rcu_dereference_protected(*ptr, 1); - if (old && !dma_fence_is_signaled(old)) { - DRM_INFO("rcu slot is busy\n"); - dma_fence_wait(old, false); - } - rcu_assign_pointer(*ptr, dma_fence_get(&fence->base)); *f = &fence->base; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 51b5e977ca885ef1f7d7df49698f3c6843bab437..f4e9d1b10e3edef7de36431f2a0085958b9a1278 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -139,7 +139,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, /* ring tests don't use a job */ if (job) { vm = job->vm; - fence_ctx = job->base.s_fence->scheduled.context; + fence_ctx = job->base.s_fence ? + job->base.s_fence->scheduled.context : 0; } else { vm = NULL; fence_ctx = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 3a072a7a39f0faba89936f5735b349e282cf229a..df9b173c3d0b000462ce0731bbe2695c92f35d59 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -574,7 +574,7 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev) /* skip over VMID 0, since it is the system VM */ for (j = 1; j < id_mgr->num_ids; ++j) { amdgpu_vmid_reset(adev, i, j); - amdgpu_sync_create(&id_mgr->ids[i].active); + amdgpu_sync_create(&id_mgr->ids[j].active); list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 391e2f7c03aacdfae679057204e02e10e756cb8a..cf582cc46d53e05b4e5ba1064ce0ca046368afe4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -66,6 +66,7 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, amdgpu_sync_create(&(*job)->sync); amdgpu_sync_create(&(*job)->sched_sync); (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter); + (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET; return 0; } @@ -202,7 +203,7 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched); struct dma_fence *fence = NULL, *finished; struct amdgpu_job *job; - int r; + int r = 0; job = to_amdgpu_job(sched_job); finished = &job->base.s_fence->finished; @@ -227,6 +228,8 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) job->fence = dma_fence_get(fence); amdgpu_job_free_resources(job); + + fence = r ? ERR_PTR(r) : fence; return fence; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index bd98cc5fb97bcab725c18f240fa19658da961354..ba10577569f856d42e8393a8d7e6010430c3a803 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -87,41 +87,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) struct amdgpu_device *adev; int r, acpi_status; -#ifdef CONFIG_DRM_AMDGPU_SI - if (!amdgpu_si_support) { - switch (flags & AMD_ASIC_MASK) { - case CHIP_TAHITI: - case CHIP_PITCAIRN: - case CHIP_VERDE: - case CHIP_OLAND: - case CHIP_HAINAN: - dev_info(dev->dev, - "SI support provided by radeon.\n"); - dev_info(dev->dev, - "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n" - ); - return -ENODEV; - } - } -#endif -#ifdef CONFIG_DRM_AMDGPU_CIK - if (!amdgpu_cik_support) { - switch (flags & AMD_ASIC_MASK) { - case CHIP_KAVERI: - case CHIP_BONAIRE: - case CHIP_HAWAII: - case CHIP_KABINI: - case CHIP_MULLINS: - dev_info(dev->dev, - "CIK support provided by radeon.\n"); - dev_info(dev->dev, - "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n" - ); - return -ENODEV; - } - } -#endif - adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL); if (adev == NULL) { return -ENOMEM; @@ -159,6 +124,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) } if (amdgpu_device_is_px(dev)) { + dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP); pm_runtime_use_autosuspend(dev->dev); pm_runtime_set_autosuspend_delay(dev->dev, 5000); pm_runtime_set_active(dev->dev); @@ -292,9 +258,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file if (!info->return_size || !info->return_pointer) return -EINVAL; - /* Ensure IB tests are run on ring */ - flush_delayed_work(&adev->late_init_work); - switch (info->query) { case AMDGPU_INFO_ACCEL_WORKING: ui32 = adev->accel_working; @@ -564,6 +527,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) sh_num = 0xffffffff; + if (info->read_mmr_reg.count > 128) + return -EINVAL; + regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL); if (!regs) return -ENOMEM; @@ -861,6 +827,9 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) struct amdgpu_fpriv *fpriv; int r, pasid; + /* Ensure IB tests are run on ring */ + flush_delayed_work(&adev->late_init_work); + file_priv->driver_priv = NULL; r = pm_runtime_get_sync(dev->dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index b9e9e8b02fb756a0d7291c605353d59cd228826e..d1b4d9b6aae0d1743f77dc2373d0c9159d03f937 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -339,6 +339,8 @@ struct amdgpu_mode_info { struct drm_property *audio_property; /* FMT dithering */ struct drm_property *dither_property; + /* maximum number of bits per channel for monitor color */ + struct drm_property *max_bpc_property; /* hardcoded DFP edid from BIOS */ struct edid *bios_hardcoded_edid; int bios_hardcoded_edid_size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index b0e14a3d54efd44e86580f816f41f05f16368b08..b14ce112703f06d6367ca9e3f6b99d87e91025d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -428,7 +428,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, .interruptible = (bp->type != ttm_bo_type_kernel), .no_wait_gpu = false, .resv = bp->resv, - .flags = TTM_OPT_FLAG_ALLOW_RES_EVICT + .flags = bp->type != ttm_bo_type_kernel ? + TTM_OPT_FLAG_ALLOW_RES_EVICT : 0 }; struct amdgpu_bo *bo; unsigned long page_align, size = bp->size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 7b4e657a95c700561298346654118a9915c8caec..c3df75a9f65d98f213e24aa5e63261b36636de2e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -1443,7 +1443,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, effective_mode &= ~S_IWUSR; if ((adev->flags & AMD_IS_APU) && - (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || + (attr == &sensor_dev_attr_power1_average.dev_attr.attr || + attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr|| attr == &sensor_dev_attr_power1_cap.dev_attr.attr)) return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index 1c5d97f4b4dde4e9a7b53c7ea70fc5746327640e..8dcf6227ab99076e07343d4ff456ef7dc440c307 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c @@ -37,6 +37,7 @@ #include "amdgpu_display.h" #include #include +#include static const struct dma_buf_ops amdgpu_dmabuf_ops; @@ -188,6 +189,48 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev, return ERR_PTR(ret); } +static int +__reservation_object_make_exclusive(struct reservation_object *obj) +{ + struct dma_fence **fences; + unsigned int count; + int r; + + if (!reservation_object_get_list(obj)) /* no shared fences to convert */ + return 0; + + r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences); + if (r) + return r; + + if (count == 0) { + /* Now that was unexpected. */ + } else if (count == 1) { + reservation_object_add_excl_fence(obj, fences[0]); + dma_fence_put(fences[0]); + kfree(fences); + } else { + struct dma_fence_array *array; + + array = dma_fence_array_create(count, fences, + dma_fence_context_alloc(1), 0, + false); + if (!array) + goto err_fences_put; + + reservation_object_add_excl_fence(obj, &array->base); + dma_fence_put(&array->base); + } + + return 0; + +err_fences_put: + while (count--) + dma_fence_put(fences[count]); + kfree(fences); + return -ENOMEM; +} + /** * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation * @dma_buf: shared DMA buffer @@ -219,16 +262,16 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf, if (attach->dev->driver != adev->dev->driver) { /* - * Wait for all shared fences to complete before we switch to future - * use of exclusive fence on this prime shared bo. + * We only create shared fences for internal use, but importers + * of the dmabuf rely on exclusive fences for implicitly + * tracking write hazards. As any of the current fences may + * correspond to a write, we need to convert all existing + * fences on the reservation object into a single exclusive + * fence. */ - r = reservation_object_wait_timeout_rcu(bo->tbo.resv, - true, false, - MAX_SCHEDULE_TIMEOUT); - if (unlikely(r < 0)) { - DRM_DEBUG_PRIME("Fence wait failed: %li\n", r); + r = __reservation_object_make_exclusive(bo->tbo.resv); + if (r) goto error_unreserve; - } } /* pin buffer into GTT */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 5b39d139963046bb851df3c0ebdb4746c50ed8ae..5be82e4fd1da6005d48a4eb2a43728059e1e1835 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -37,18 +37,10 @@ static void psp_set_funcs(struct amdgpu_device *adev); static int psp_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct psp_context *psp = &adev->psp; psp_set_funcs(adev); - return 0; -} - -static int psp_sw_init(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct psp_context *psp = &adev->psp; - int ret; - switch (adev->asic_type) { case CHIP_VEGA10: case CHIP_VEGA12: @@ -67,6 +59,15 @@ static int psp_sw_init(void *handle) if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) return 0; + return 0; +} + +static int psp_sw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct psp_context *psp = &adev->psp; + int ret; + ret = psp_init_microcode(psp); if (ret) { DRM_ERROR("Failed to load psp firmware!\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c index 8904e62dca7ae277143f4a3cbdffaa28ef120bf9..41d3142ef3cf048c8deb43910109344f70a482de 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c @@ -138,6 +138,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) } dma_fence_put(fence); + fence = NULL; r = amdgpu_bo_kmap(vram_obj, &vram_map); if (r) { @@ -183,6 +184,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) } dma_fence_put(fence); + fence = NULL; r = amdgpu_bo_kmap(gtt_obj[i], >t_map); if (r) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index fcf421263fd9689226de77738da98f09a6d4b280..47728a7bd83736a98af59878c667c01a20e2bee1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1276,6 +1276,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) if (gtt && gtt->userptr) { amdgpu_ttm_tt_set_user_pages(ttm, NULL); kfree(ttm->sg); + ttm->sg = NULL; ttm->page_flags &= ~TTM_PAGE_FLAG_SG; return; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index f55f72a37ca838d223cd9759ae6c8b2fddff0875..c29d519fa381a8fbe91a7703374a10ef0c75d1f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -277,6 +277,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) case CHIP_PITCAIRN: case CHIP_VERDE: case CHIP_OLAND: + case CHIP_HAINAN: return AMDGPU_FW_LOAD_DIRECT; #endif #ifdef CONFIG_DRM_AMDGPU_CIK diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 5f3f540738187c6db03a7975bced71ea4163c9e0..b480ae86ca2226efe134e7bb928a83a1ba078713 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -213,15 +213,15 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev) drm_sched_entity_destroy(&adev->vce.entity); - amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr, - (void **)&adev->vce.cpu_addr); - for (i = 0; i < adev->vce.num_rings; i++) amdgpu_ring_fini(&adev->vce.ring[i]); release_firmware(adev->vce.fw); mutex_destroy(&adev->vce.idle_mutex); + amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr, + (void **)&adev->vce.cpu_addr); + return 0; } @@ -714,7 +714,8 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) uint32_t created = 0; uint32_t allocated = 0; uint32_t tmp, handle = 0; - uint32_t *size = &tmp; + uint32_t dummy = 0xffffffff; + uint32_t *size = &dummy; unsigned idx; int i, r = 0; @@ -1070,7 +1071,7 @@ void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - uint32_t rptr = amdgpu_ring_get_rptr(ring); + uint32_t rptr; unsigned i; int r, timeout = adev->usec_timeout; @@ -1084,6 +1085,9 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) ring->idx, r); return r; } + + rptr = amdgpu_ring_get_rptr(ring); + amdgpu_ring_write(ring, VCE_CMD_END); amdgpu_ring_commit(ring); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 400fc74bbae27e878aebe4e6e27f6eaf22ca8e15..205e683fb92060ad407fdbb18696fe7cf2a699df 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -431,7 +431,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - uint32_t rptr = amdgpu_ring_get_rptr(ring); + uint32_t rptr; unsigned i; int r; @@ -441,6 +441,9 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring) ring->idx, r); return r; } + + rptr = amdgpu_ring_get_rptr(ring); + amdgpu_ring_write(ring, VCN_ENC_CMD_END); amdgpu_ring_commit(ring); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index b17771dd5ce732620e8c058f788516b7ef9b6fee..ffc2823cb16b0a24b1d3a43a3423ccf938a4e34c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -700,10 +700,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ id->oa_base != job->oa_base || id->oa_size != job->oa_size); bool vm_flush_needed = job->vm_needs_flush; - bool pasid_mapping_needed = id->pasid != job->pasid || - !id->pasid_mapping || - !dma_fence_is_signaled(id->pasid_mapping); struct dma_fence *fence = NULL; + bool pasid_mapping_needed = false; unsigned patch_offset = 0; int r; @@ -713,8 +711,15 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ pasid_mapping_needed = true; } + mutex_lock(&id_mgr->lock); + if (id->pasid != job->pasid || !id->pasid_mapping || + !dma_fence_is_signaled(id->pasid_mapping)) + pasid_mapping_needed = true; + mutex_unlock(&id_mgr->lock); + gds_switch_needed &= !!ring->funcs->emit_gds_switch; - vm_flush_needed &= !!ring->funcs->emit_vm_flush; + vm_flush_needed &= !!ring->funcs->emit_vm_flush && + job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET; pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping && ring->funcs->emit_wreg; @@ -751,9 +756,11 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ } if (pasid_mapping_needed) { + mutex_lock(&id_mgr->lock); id->pasid = job->pasid; dma_fence_put(id->pasid_mapping); id->pasid_mapping = dma_fence_get(fence); + mutex_unlock(&id_mgr->lock); } dma_fence_put(fence); @@ -2041,6 +2048,37 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev, trace_amdgpu_vm_bo_map(bo_va, mapping); } +/* Validate operation parameters to prevent potential abuse */ +static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev, + struct amdgpu_bo *bo, + uint64_t saddr, + uint64_t offset, + uint64_t size) +{ + uint64_t tmp, lpfn; + + if (saddr & AMDGPU_GPU_PAGE_MASK + || offset & AMDGPU_GPU_PAGE_MASK + || size & AMDGPU_GPU_PAGE_MASK) + return -EINVAL; + + if (check_add_overflow(saddr, size, &tmp) + || check_add_overflow(offset, size, &tmp) + || size == 0 /* which also leads to end < begin */) + return -EINVAL; + + /* make sure object fit at this offset */ + if (bo && offset + size > amdgpu_bo_size(bo)) + return -EINVAL; + + /* Ensure last pfn not exceed max_pfn */ + lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT; + if (lpfn >= adev->vm_manager.max_pfn) + return -EINVAL; + + return 0; +} + /** * amdgpu_vm_bo_map - map bo inside a vm * @@ -2067,20 +2105,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, struct amdgpu_bo *bo = bo_va->base.bo; struct amdgpu_vm *vm = bo_va->base.vm; uint64_t eaddr; + int r; - /* validate the parameters */ - if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || - size == 0 || size & AMDGPU_GPU_PAGE_MASK) - return -EINVAL; - - /* make sure object fit at this offset */ - eaddr = saddr + size - 1; - if (saddr >= eaddr || - (bo && offset + size > amdgpu_bo_size(bo))) - return -EINVAL; + r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size); + if (r) + return r; saddr /= AMDGPU_GPU_PAGE_SIZE; - eaddr /= AMDGPU_GPU_PAGE_SIZE; + eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE; tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); if (tmp) { @@ -2133,16 +2165,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, uint64_t eaddr; int r; - /* validate the parameters */ - if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || - size == 0 || size & AMDGPU_GPU_PAGE_MASK) - return -EINVAL; - - /* make sure object fit at this offset */ - eaddr = saddr + size - 1; - if (saddr >= eaddr || - (bo && offset + size > amdgpu_bo_size(bo))) - return -EINVAL; + r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size); + if (r) + return r; /* Allocate all the needed memory */ mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); @@ -2156,7 +2181,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, } saddr /= AMDGPU_GPU_PAGE_SIZE; - eaddr /= AMDGPU_GPU_PAGE_SIZE; + eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE; mapping->start = saddr; mapping->last = eaddr; @@ -2243,10 +2268,14 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, struct amdgpu_bo_va_mapping *before, *after, *tmp, *next; LIST_HEAD(removed); uint64_t eaddr; + int r; + + r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size); + if (r) + return r; - eaddr = saddr + size - 1; saddr /= AMDGPU_GPU_PAGE_SIZE; - eaddr /= AMDGPU_GPU_PAGE_SIZE; + eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE; /* Allocate all the needed memory */ before = kzalloc(sizeof(*before), GFP_KERNEL); @@ -3010,14 +3039,15 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid, struct amdgpu_task_info *task_info) { struct amdgpu_vm *vm; + unsigned long flags; - spin_lock(&adev->vm_manager.pasid_lock); + spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); vm = idr_find(&adev->vm_manager.pasid_idr, pasid); if (vm) *task_info = vm->task_info; - spin_unlock(&adev->vm_manager.pasid_lock); + spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c index 44d10c2172f69b822695a71b4fd7efb1831a8107..581ecbe5bbd17551f7d37dba8b4297b68c607ac1 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c @@ -201,6 +201,12 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev) tmp = RREG32(mmIH_RB_CNTL); tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK; WREG32(mmIH_RB_CNTL, tmp); + + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK; + WREG32(mmIH_RB_CNTL, tmp); } return (wptr & adev->irq.ih.ptr_mask); } diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c index 960c29e17da6574da108b0e77979557d94a3bcab..cf7beff60058db0183cfa2b9f6bdcf1d898b6eb8 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c @@ -203,6 +203,12 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev) tmp = RREG32(mmIH_RB_CNTL); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); WREG32(mmIH_RB_CNTL, tmp); + + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + WREG32(mmIH_RB_CNTL, tmp); } return (wptr & adev->irq.ih.ptr_mask); } diff --git a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c index 9935371db7ceba44457d58fde6b3d8abca30cadf..7aecb52365e914f2de643391e0bceb2837d450ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c +++ b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c @@ -63,6 +63,8 @@ static u32 df_v1_7_get_hbm_channel_number(struct amdgpu_device *adev) int fb_channel_number; fb_channel_number = adev->df_funcs->get_fb_channel_number(adev); + if (fb_channel_number >= ARRAY_SIZE(df_v1_7_channel_number)) + fb_channel_number = 0; return df_v1_7_channel_number[fb_channel_number]; } diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c index d5ebe566809b22e401f88582a7b73d9caf4faf81..a1c941229f4b342b228f39005088a2b199cfa7a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c +++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c @@ -75,23 +75,29 @@ static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev, { u32 tmp; - /* Put DF on broadcast mode */ - adev->df_funcs->enable_broadcast_mode(adev, true); - - if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) { - tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater); - tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; - tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY; - WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp); - } else { - tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater); - tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; - tmp |= DF_V3_6_MGCG_DISABLE; - WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp); + if (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG) { + /* Put DF on broadcast mode */ + adev->df_funcs->enable_broadcast_mode(adev, true); + + if (enable) { + tmp = RREG32_SOC15(DF, 0, + mmDF_PIE_AON0_DfGlobalClkGater); + tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; + tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY; + WREG32_SOC15(DF, 0, + mmDF_PIE_AON0_DfGlobalClkGater, tmp); + } else { + tmp = RREG32_SOC15(DF, 0, + mmDF_PIE_AON0_DfGlobalClkGater); + tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; + tmp |= DF_V3_6_MGCG_DISABLE; + WREG32_SOC15(DF, 0, + mmDF_PIE_AON0_DfGlobalClkGater, tmp); + } + + /* Exit broadcast mode */ + adev->df_funcs->enable_broadcast_mode(adev, false); } - - /* Exit broadcast mode */ - adev->df_funcs->enable_broadcast_mode(adev, false); } static void df_v3_6_get_clockgating_state(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 5a9534a82d40911cebb02462ba0cc5a995a5bfca..e1cb7fa89e4d69f7eb3fe6df079f505b89f19665 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -6405,7 +6405,23 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; - /* EVENT_WRITE_EOP - flush caches, send int */ + /* Workaround for cache flush problems. First send a dummy EOP + * event down the pipe with seq one below. + */ + amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); + amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | + EOP_TC_ACTION_EN | + EOP_TC_WB_ACTION_EN | + EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | + EVENT_INDEX(5))); + amdgpu_ring_write(ring, addr & 0xfffffffc); + amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | + DATA_SEL(1) | INT_SEL(0)); + amdgpu_ring_write(ring, lower_32_bits(seq - 1)); + amdgpu_ring_write(ring, upper_32_bits(seq - 1)); + + /* Then send the real EOP event down the pipe: + * EVENT_WRITE_EOP - flush caches, send int */ amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | EOP_TC_ACTION_EN | @@ -7154,7 +7170,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { 5 + /* COND_EXEC */ 7 + /* PIPELINE_SYNC */ VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 9 + /* VM_FLUSH */ - 8 + /* FENCE for VM_FLUSH */ + 12 + /* FENCE for VM_FLUSH */ 20 + /* GDS switch */ 4 + /* double SWITCH_BUFFER, the first COND_EXEC jump to the place just @@ -7166,7 +7182,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { 31 + /* DE_META */ 3 + /* CNTX_CTRL */ 5 + /* HDP_INVL */ - 8 + 8 + /* FENCE x2 */ + 12 + 12 + /* FENCE x2 */ 2, /* SWITCH_BUFFER */ .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */ .emit_ib = gfx_v8_0_ring_emit_ib_gfx, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index ef00d14f86453bb1e0c4fd3653be6bb144e412f1..28794b1b15c10d28e4af8910c3f9219cd4b9d01f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -82,7 +82,8 @@ MODULE_FIRMWARE("amdgpu/raven_rlc.bin"); static const struct soc15_reg_golden golden_settings_gc_9_0[] = { - SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x80000000, 0x80000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001), @@ -1801,25 +1802,6 @@ static void gfx_v9_0_gpu_init(struct amdgpu_device *adev) mutex_unlock(&adev->srbm_mutex); gfx_v9_0_init_compute_vmid(adev); - - mutex_lock(&adev->grbm_idx_mutex); - /* - * making sure that the following register writes will be broadcasted - * to all the shaders - */ - gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); - - WREG32_SOC15(GC, 0, mmPA_SC_FIFO_SIZE, - (adev->gfx.config.sc_prim_fifo_size_frontend << - PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) | - (adev->gfx.config.sc_prim_fifo_size_backend << - PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) | - (adev->gfx.config.sc_hiz_tile_fifo_size << - PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) | - (adev->gfx.config.sc_earlyz_tile_fifo_size << - PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT)); - mutex_unlock(&adev->grbm_idx_mutex); - } static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev) @@ -2205,7 +2187,8 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev) * And it's needed by gfxoff feature. */ if (adev->gfx.rlc.is_rlc_v2_1) { - gfx_v9_1_init_rlc_save_restore_list(adev); + if (adev->asic_type == CHIP_VEGA12) + gfx_v9_1_init_rlc_save_restore_list(adev); gfx_v9_0_enable_save_restore_machine(adev); } @@ -2243,12 +2226,13 @@ static void gfx_v9_0_rlc_start(struct amdgpu_device *adev) #endif WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1); + udelay(50); /* carrizo do enable cp interrupt after cp inited */ - if (!(adev->flags & AMD_IS_APU)) + if (!(adev->flags & AMD_IS_APU)) { gfx_v9_0_enable_gui_idle_interrupt(adev, true); - - udelay(50); + udelay(50); + } #ifdef AMDGPU_RLC_DEBUG_RETRY /* RLC_GPM_GENERAL_6 : RLC Ucode version */ diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index ad151fefa41f1ed1d6f19ae1783b13b1b76b4f2e..db406a35808f69f3ffd84d6064ed32a8cee4526a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -45,6 +45,7 @@ MODULE_FIRMWARE("amdgpu/tahiti_mc.bin"); MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin"); MODULE_FIRMWARE("amdgpu/verde_mc.bin"); MODULE_FIRMWARE("amdgpu/oland_mc.bin"); +MODULE_FIRMWARE("amdgpu/hainan_mc.bin"); MODULE_FIRMWARE("amdgpu/si58_mc.bin"); #define MC_SEQ_MISC0__MT__MASK 0xf0000000 diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 9333109b210de810119f0d15d94ec5d125a84cf7..1a744f964b301fd739704d7f20a52106d60d4ac7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -55,6 +55,9 @@ MODULE_FIRMWARE("amdgpu/tonga_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris11_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris10_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris12_mc.bin"); +MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin"); +MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin"); +MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin"); static const u32 golden_settings_tonga_a11[] = { @@ -223,13 +226,39 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev) chip_name = "tonga"; break; case CHIP_POLARIS11: - chip_name = "polaris11"; + if (((adev->pdev->device == 0x67ef) && + ((adev->pdev->revision == 0xe0) || + (adev->pdev->revision == 0xe5))) || + ((adev->pdev->device == 0x67ff) && + ((adev->pdev->revision == 0xcf) || + (adev->pdev->revision == 0xef) || + (adev->pdev->revision == 0xff)))) + chip_name = "polaris11_k"; + else if ((adev->pdev->device == 0x67ef) && + (adev->pdev->revision == 0xe2)) + chip_name = "polaris11_k"; + else + chip_name = "polaris11"; break; case CHIP_POLARIS10: - chip_name = "polaris10"; + if ((adev->pdev->device == 0x67df) && + ((adev->pdev->revision == 0xe1) || + (adev->pdev->revision == 0xf7))) + chip_name = "polaris10_k"; + else + chip_name = "polaris10"; break; case CHIP_POLARIS12: - chip_name = "polaris12"; + if (((adev->pdev->device == 0x6987) && + ((adev->pdev->revision == 0xc0) || + (adev->pdev->revision == 0xc3))) || + ((adev->pdev->device == 0x6981) && + ((adev->pdev->revision == 0x00) || + (adev->pdev->revision == 0x01) || + (adev->pdev->revision == 0x10)))) + chip_name = "polaris12_k"; + else + chip_name = "polaris12"; break; case CHIP_FIJI: case CHIP_CARRIZO: @@ -336,7 +365,7 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev) const struct mc_firmware_header_v1_0 *hdr; const __le32 *fw_data = NULL; const __le32 *io_mc_regs = NULL; - u32 data, vbios_version; + u32 data; int i, ucode_size, regs_size; /* Skip MC ucode loading on SR-IOV capable boards. @@ -347,13 +376,6 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev) if (amdgpu_sriov_bios(adev)) return 0; - WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 0x9F); - data = RREG32(mmMC_SEQ_IO_DEBUG_DATA); - vbios_version = data & 0xf; - - if (vbios_version == 0) - return 0; - if (!adev->gmc.fw) return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 72f8018fa2a836572b9c898785bb99deecc1ca91..ede27dab675facf137f2a80b2b738b485c864874 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1037,6 +1037,9 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL); WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp); + WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8)); + WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40)); + /* After HDP is initialized, flush HDP.*/ adev->nbio_funcs->hdp_flush(adev, NULL); diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c index 842c4b677b4d96b083ff2481dd87b5215a043c26..36b771837ec6bfdbe3eb58de7add3ceb2972bf6a 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c @@ -203,6 +203,12 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev) tmp = RREG32(mmIH_RB_CNTL); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); WREG32(mmIH_RB_CNTL, tmp); + + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + WREG32(mmIH_RB_CNTL, tmp); } return (wptr & adev->irq.ih.ptr_mask); } diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index cb79a93c2eb73a5f23fb008cee50e80325ada627..e74e53bdec49926ffca47ce8ff0ae329697235b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -164,6 +164,8 @@ static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev, for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { if (table[i].ulSupportedSCLK != 0) { + if (table[i].usVoltageIndex >= SUMO_MAX_NUMBER_VOLTAGES) + continue; vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit = table[i].usVoltageID; vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit = @@ -2746,10 +2748,8 @@ static int kv_parse_power_table(struct amdgpu_device *adev) non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) &non_clock_info_array->nonClockInfo[non_clock_array_index]; ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL); - if (ps == NULL) { - kfree(adev->pm.dpm.ps); + if (ps == NULL) return -ENOMEM; - } adev->pm.dpm.ps[i].ps_priv = ps; k = 0; idx = (u8 *)&power_state->v2.clockInfoIndex[0]; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index e70a0d4d6db4111a59239accef4d47d0d6676d72..c963eec58c7028de58bb4cb67044e92bb01b170b 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -164,6 +164,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6); } + WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp); tmp = mmVM_L2_CNTL4_DEFAULT; tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index 078f70faedcbb1e15dbf8d737090071d10ea403a..d06332be59d3291bae68f3a1282095f0337352fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -174,7 +174,7 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, return r; } /* Retrieve checksum from mailbox2 */ - if (req == IDH_REQ_GPU_INIT_ACCESS) { + if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) { adev->virt.fw_reserve.checksum_key = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2)); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 7c3b634d8d5f4117698ca46542eba887b85c4692..de5a689e19250fff05b96f195c593808fac6fc87 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -71,7 +71,6 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = { SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000), - SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), @@ -89,6 +88,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = { static const struct soc15_reg_golden golden_settings_sdma_vg10[] = { SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002) }; @@ -96,6 +96,7 @@ static const struct soc15_reg_golden golden_settings_sdma_vg10[] = { static const struct soc15_reg_golden golden_settings_sdma_vg12[] = { SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001) }; diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index c364ef94cc366e515533728dc05997492de58cab..77c9f4d8668adf96e2042240206c90d606ec1b7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -1813,7 +1813,7 @@ static void si_program_aspm(struct amdgpu_device *adev) if (orig != data) si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data); - if ((adev->family != CHIP_OLAND) && (adev->family != CHIP_HAINAN)) { + if ((adev->asic_type != CHIP_OLAND) && (adev->asic_type != CHIP_HAINAN)) { orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0); data &= ~PLL_RAMP_UP_TIME_0_MASK; if (orig != data) @@ -1862,14 +1862,14 @@ static void si_program_aspm(struct amdgpu_device *adev) orig = data = si_pif_phy0_rreg(adev,PB0_PIF_CNTL); data &= ~LS2_EXIT_TIME_MASK; - if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN)) + if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN)) data |= LS2_EXIT_TIME(5); if (orig != data) si_pif_phy0_wreg(adev,PB0_PIF_CNTL, data); orig = data = si_pif_phy1_rreg(adev,PB1_PIF_CNTL); data &= ~LS2_EXIT_TIME_MASK; - if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN)) + if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN)) data |= LS2_EXIT_TIME(5); if (orig != data) si_pif_phy1_wreg(adev,PB1_PIF_CNTL, data); diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index 1de96995e6900c934c91cf610160768b6c08cd37..0b80cd38f9684d381acf8aa2f9f39924b85fe2da 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c @@ -6888,6 +6888,8 @@ static int si_dpm_enable(struct amdgpu_device *adev) si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true); si_thermal_start_thermal_controller(adev); + ni_update_current_ps(adev, boot_ps); + return 0; } @@ -7348,10 +7350,9 @@ static int si_dpm_init(struct amdgpu_device *adev) kcalloc(4, sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) { - amdgpu_free_extended_power_table(adev); + if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) return -ENOMEM; - } + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4; adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0; adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c index 60dad63098a2aa4db47de4804174a992fb78039e..3d02afe40f17a87f729280179778676a84c21622 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c @@ -62,7 +62,8 @@ static int si_ih_irq_init(struct amdgpu_device *adev) u64 wptr_off; si_ih_disable_interrupts(adev); - WREG32(INTERRUPT_CNTL2, adev->irq.ih.gpu_addr >> 8); + /* set dummy read address to dummy page address */ + WREG32(INTERRUPT_CNTL2, adev->dummy_page_addr >> 8); interrupt_cntl = RREG32(INTERRUPT_CNTL); interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE; interrupt_cntl &= ~IH_REQ_NONSNOOP_EN; @@ -114,6 +115,12 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev) tmp = RREG32(IH_RB_CNTL); tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK; WREG32(IH_RB_CNTL, tmp); + + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK; + WREG32(IH_RB_CNTL, tmp); } return (wptr & adev->irq.ih.ptr_mask); } diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c index 52853d8a8fdda04a98bbedde7b70ee3a52d21b18..6375109e1e72f4d82c99df3c1f7cec94e1e5c46e 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c @@ -214,6 +214,12 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev) tmp = RREG32(mmIH_RB_CNTL); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); WREG32(mmIH_RB_CNTL, tmp); + + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + WREG32(mmIH_RB_CNTL, tmp); } return (wptr & adev->irq.ih.ptr_mask); } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 8a926d1df939a43a2531f2a3d5c9fbf550dff1d9..2b4199adcd946bf238721e953dbe09f189860f74 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -116,16 +116,16 @@ static int uvd_v4_2_sw_init(void *handle) if (r) return r; - r = amdgpu_uvd_resume(adev); - if (r) - return r; - ring = &adev->uvd.inst->ring; sprintf(ring->name, "uvd"); r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); if (r) return r; + r = amdgpu_uvd_resume(adev); + if (r) + return r; + r = amdgpu_uvd_entity_init(adev); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index 50248059412e78353d2c653819f8b3b311c8214e..88c006c5ee2cdf9657386b3f3ec4b4faa8140462 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -113,16 +113,16 @@ static int uvd_v5_0_sw_init(void *handle) if (r) return r; - r = amdgpu_uvd_resume(adev); - if (r) - return r; - ring = &adev->uvd.inst->ring; sprintf(ring->name, "uvd"); r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); if (r) return r; + r = amdgpu_uvd_resume(adev); + if (r) + return r; + r = amdgpu_uvd_entity_init(adev); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 6ae82cc2e55e007cd8b4af958f6e0104510455ae..80613a74df4207827281f3c17b691fb81763419e 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -170,7 +170,7 @@ static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring) static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - uint32_t rptr = amdgpu_ring_get_rptr(ring); + uint32_t rptr; unsigned i; int r; @@ -180,6 +180,9 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring) ring->idx, r); return r; } + + rptr = amdgpu_ring_get_rptr(ring); + amdgpu_ring_write(ring, HEVC_ENC_CMD_END); amdgpu_ring_commit(ring); @@ -420,16 +423,16 @@ static int uvd_v6_0_sw_init(void *handle) DRM_INFO("UVD ENC is disabled\n"); } - r = amdgpu_uvd_resume(adev); - if (r) - return r; - ring = &adev->uvd.inst->ring; sprintf(ring->name, "uvd"); r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); if (r) return r; + r = amdgpu_uvd_resume(adev); + if (r) + return r; + if (uvd_v6_0_enc_support(adev)) { for (i = 0; i < adev->uvd.num_enc_rings; ++i) { ring = &adev->uvd.inst->ring_enc[i]; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 9b7f8469bc5c081baae8955b23240a8b57628cbe..ce16b8329af044da43d19a000bd6e0848dc80ab9 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -175,7 +175,7 @@ static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring) static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - uint32_t rptr = amdgpu_ring_get_rptr(ring); + uint32_t rptr; unsigned i; int r; @@ -188,6 +188,9 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring) ring->me, ring->idx, r); return r; } + + rptr = amdgpu_ring_get_rptr(ring); + amdgpu_ring_write(ring, HEVC_ENC_CMD_END); amdgpu_ring_commit(ring); @@ -444,10 +447,6 @@ static int uvd_v7_0_sw_init(void *handle) DRM_INFO("PSP loading UVD firmware\n"); } - r = amdgpu_uvd_resume(adev); - if (r) - return r; - for (j = 0; j < adev->uvd.num_uvd_inst; j++) { if (adev->uvd.harvest_config & (1 << j)) continue; @@ -479,6 +478,10 @@ static int uvd_v7_0_sw_init(void *handle) } } + r = amdgpu_uvd_resume(adev); + if (r) + return r; + r = amdgpu_uvd_entity_init(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 072371ef597595505be617b680916266086a0eba..4f8f3bb2183200c0c435d31486a861cbad1b3ceb 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -43,6 +43,7 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev); static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev); static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev); static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr); +static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state); /** * vcn_v1_0_early_init - set function pointers @@ -216,7 +217,7 @@ static int vcn_v1_0_hw_fini(void *handle) struct amdgpu_ring *ring = &adev->vcn.ring_dec; if (RREG32_SOC15(VCN, 0, mmUVD_STATUS)) - vcn_v1_0_stop(adev); + vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE); ring->ready = false; diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index 5ae5ed2e62d63ba6c667f9ca232501908896b9c5..900bf3f3a09ab2258939041ab24a1472c9502fe1 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -129,7 +129,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev) else wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4); WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off)); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF); + WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFFFF); /* set rptr, wptr to 0 */ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0); @@ -215,6 +215,12 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev) tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL)); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); WREG32_NO_KIQ(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL), tmp); + + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + WREG32_NO_KIQ(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL), tmp); } return (wptr & adev->irq.ih.ptr_mask); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 297b36c26a05c819634f9cdeb9426aa243f76fe4..1e8f7d5f67fbbaaaa3da690300c24e9ed78c6c06 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -122,9 +122,6 @@ static int kfd_open(struct inode *inode, struct file *filep) if (IS_ERR(process)) return PTR_ERR(process); - if (kfd_is_locked()) - return -EAGAIN; - dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n", process->pasid, process->is_32bit_user_mode); @@ -156,8 +153,7 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties, } if ((args->ring_base_address) && - (!access_ok(VERIFY_WRITE, - (const void __user *) args->ring_base_address, + (!access_ok((const void __user *) args->ring_base_address, sizeof(uint64_t)))) { pr_err("Can't access ring base address\n"); return -EFAULT; @@ -168,31 +164,27 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties, return -EINVAL; } - if (!access_ok(VERIFY_WRITE, - (const void __user *) args->read_pointer_address, + if (!access_ok((const void __user *) args->read_pointer_address, sizeof(uint32_t))) { pr_err("Can't access read pointer\n"); return -EFAULT; } - if (!access_ok(VERIFY_WRITE, - (const void __user *) args->write_pointer_address, + if (!access_ok((const void __user *) args->write_pointer_address, sizeof(uint32_t))) { pr_err("Can't access write pointer\n"); return -EFAULT; } if (args->eop_buffer_address && - !access_ok(VERIFY_WRITE, - (const void __user *) args->eop_buffer_address, + !access_ok((const void __user *) args->eop_buffer_address, sizeof(uint32_t))) { pr_debug("Can't access eop buffer"); return -EFAULT; } if (args->ctx_save_restore_address && - !access_ok(VERIFY_WRITE, - (const void __user *) args->ctx_save_restore_address, + !access_ok((const void __user *) args->ctx_save_restore_address, sizeof(uint32_t))) { pr_debug("Can't access ctx save restore buffer"); return -EFAULT; @@ -363,8 +355,7 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p, } if ((args->ring_base_address) && - (!access_ok(VERIFY_WRITE, - (const void __user *) args->ring_base_address, + (!access_ok((const void __user *) args->ring_base_address, sizeof(uint64_t)))) { pr_err("Can't access ring base address\n"); return -EFAULT; @@ -923,8 +914,8 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp, * nodes, but not more than args->num_of_nodes as that is * the amount of memory allocated by user */ - pa = kzalloc((sizeof(struct kfd_process_device_apertures) * - args->num_of_nodes), GFP_KERNEL); + pa = kcalloc(args->num_of_nodes, sizeof(struct kfd_process_device_apertures), + GFP_KERNEL); if (!pa) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index ee4996029a86866fc05807e60ec956e2ddad80df..e84285130bdebf1fcb507db7b5a3db96218cd9de 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -397,6 +397,9 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink, return -ENODEV; /* same everything but the other direction */ props2 = kmemdup(props, sizeof(*props2), GFP_KERNEL); + if (!props2) + return -ENOMEM; + props2->node_from = id_to; props2->node_to = id_from; props2->kobj = NULL; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 29ac74f40dceb729436907215427298ae9190909..5777b3fff549c4175196419e19ee5f40e86b00b8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -36,7 +36,7 @@ * once locked, kfd driver will stop any further GPU execution. * create process (open) will return -EAGAIN. */ -static atomic_t kfd_locked = ATOMIC_INIT(0); +static int kfd_locked; #ifdef KFD_SUPPORT_IOMMU_V2 static const struct kfd_device_info kaveri_device_info = { @@ -310,6 +310,7 @@ static const struct kfd_deviceid supported_devices[] = { { 0x67CF, &polaris10_device_info }, /* Polaris10 */ { 0x67D0, &polaris10_vf_device_info }, /* Polaris10 vf*/ { 0x67DF, &polaris10_device_info }, /* Polaris10 */ + { 0x6FDF, &polaris10_device_info }, /* Polaris10 */ { 0x67E0, &polaris11_device_info }, /* Polaris11 */ { 0x67E1, &polaris11_device_info }, /* Polaris11 */ { 0x67E3, &polaris11_device_info }, /* Polaris11 */ @@ -326,7 +327,13 @@ static const struct kfd_deviceid supported_devices[] = { { 0x6864, &vega10_device_info }, /* Vega10 */ { 0x6867, &vega10_device_info }, /* Vega10 */ { 0x6868, &vega10_device_info }, /* Vega10 */ + { 0x6869, &vega10_device_info }, /* Vega10 */ + { 0x686A, &vega10_device_info }, /* Vega10 */ + { 0x686B, &vega10_device_info }, /* Vega10 */ { 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/ + { 0x686D, &vega10_device_info }, /* Vega10 */ + { 0x686E, &vega10_device_info }, /* Vega10 */ + { 0x686F, &vega10_device_info }, /* Vega10 */ { 0x687F, &vega10_device_info }, /* Vega10 */ }; @@ -526,7 +533,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd_doorbell_error: kfd_gtt_sa_fini(kfd); kfd_gtt_sa_init_error: - kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem); + kfd->kfd2kgd->free_gtt_mem(kfd->kgd, &kfd->gtt_mem); dev_err(kfd_device, "device %x:%x NOT added due to errors\n", kfd->pdev->vendor, kfd->pdev->device); @@ -543,7 +550,7 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd) kfd_topology_remove_device(kfd); kfd_doorbell_fini(kfd); kfd_gtt_sa_fini(kfd); - kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem); + kfd->kfd2kgd->free_gtt_mem(kfd->kgd, &kfd->gtt_mem); } kfree(kfd); @@ -570,7 +577,7 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd) int kgd2kfd_post_reset(struct kfd_dev *kfd) { - int ret, count; + int ret; if (!kfd->init_complete) return 0; @@ -580,14 +587,18 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd) ret = kfd_resume(kfd); if (ret) return ret; - count = atomic_dec_return(&kfd_locked); - WARN_ONCE(count != 0, "KFD reset ref. error"); + + mutex_lock(&kfd_processes_mutex); + --kfd_locked; + mutex_unlock(&kfd_processes_mutex); return 0; } bool kfd_is_locked(void) { - return (atomic_read(&kfd_locked) > 0); + lockdep_assert_held(&kfd_processes_mutex); + return (kfd_locked > 0); + } void kgd2kfd_suspend(struct kfd_dev *kfd) @@ -595,9 +606,11 @@ void kgd2kfd_suspend(struct kfd_dev *kfd) if (!kfd->init_complete) return; + mutex_lock(&kfd_processes_mutex); /* For first KFD device suspend all the KFD processes */ - if (atomic_inc_return(&kfd_locked) == 1) + if (++kfd_locked == 1) kfd_suspend_all_processes(); + mutex_unlock(&kfd_processes_mutex); kfd->dqm->ops.stop(kfd->dqm); @@ -606,7 +619,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd) int kgd2kfd_resume(struct kfd_dev *kfd) { - int ret, count; + int ret; if (!kfd->init_complete) return 0; @@ -615,10 +628,11 @@ int kgd2kfd_resume(struct kfd_dev *kfd) if (ret) return ret; - count = atomic_dec_return(&kfd_locked); - WARN_ONCE(count < 0, "KFD suspend / resume ref. error"); - if (count == 0) + mutex_lock(&kfd_processes_mutex); + if (--kfd_locked == 0) ret = kfd_resume_all_processes(); + WARN_ONCE(kfd_locked < 0, "KFD suspend / resume ref. error"); + mutex_unlock(&kfd_processes_mutex); return ret; } @@ -655,6 +669,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) { uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE]; bool is_patched = false; + unsigned long flags; if (!kfd->init_complete) return; @@ -664,7 +679,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) return; } - spin_lock(&kfd->interrupt_lock); + spin_lock_irqsave(&kfd->interrupt_lock, flags); if (kfd->interrupts_active && interrupt_is_wanted(kfd, ih_ring_entry, @@ -673,7 +688,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) is_patched ? patched_ihre : ih_ring_entry)) queue_work(kfd->ih_wq, &kfd->interrupt_work); - spin_unlock(&kfd->interrupt_lock); + spin_unlock_irqrestore(&kfd->interrupt_lock, flags); } int kgd2kfd_quiesce_mm(struct mm_struct *mm) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 4f22e745df51b4c2aad4ec842f04ac96b4070f68..189212cb3547585b4c5f6f6e64309953a4982a5e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1268,12 +1268,17 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr, return 0; } -static int unmap_sdma_queues(struct device_queue_manager *dqm, - unsigned int sdma_engine) +static int unmap_sdma_queues(struct device_queue_manager *dqm) { - return pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA, - KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false, - sdma_engine); + int i, retval = 0; + + for (i = 0; i < dqm->dev->device_info->num_sdma_engines; i++) { + retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA, + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false, i); + if (retval) + return retval; + } + return retval; } /* dqm->lock mutex has to be locked before calling this function */ @@ -1312,10 +1317,8 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, pr_debug("Before destroying queues, sdma queue count is : %u\n", dqm->sdma_queue_count); - if (dqm->sdma_queue_count > 0) { - unmap_sdma_queues(dqm, 0); - unmap_sdma_queues(dqm, 1); - } + if (dqm->sdma_queue_count > 0) + unmap_sdma_queues(dqm); retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE, filter, filter_param, false, 0); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c index c56ac47cd3189779333acc9e8da89a8f9ad7c5be..c07c9bf58d5f891dcf4ae191bef897a21a37bcb4 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c @@ -62,6 +62,11 @@ int kfd_interrupt_init(struct kfd_dev *kfd) } kfd->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1); + if (!kfd->ih_wq) { + kfifo_free(&kfd->ih_fifo); + dev_err(kfd_chardev(), "Failed to allocate KFD IH workqueue\n"); + return -ENOMEM; + } spin_lock_init(&kfd->interrupt_lock); INIT_WORK(&kfd->interrupt_work, interrupt_wq); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index 47243165a082a5221b56fa05929f4f5b46fa8081..ae90a99909efeced0641a2283863bc679e03555e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -323,57 +323,7 @@ static int init_mqd_hiq(struct mqd_manager *mm, void **mqd, struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr, struct queue_properties *q) { - uint64_t addr; - struct cik_mqd *m; - int retval; - - retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd), - mqd_mem_obj); - - if (retval != 0) - return -ENOMEM; - - m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr; - addr = (*mqd_mem_obj)->gpu_addr; - - memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256)); - - m->header = 0xC0310800; - m->compute_pipelinestat_enable = 1; - m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF; - m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF; - m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF; - m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF; - - m->cp_hqd_persistent_state = DEFAULT_CP_HQD_PERSISTENT_STATE | - PRELOAD_REQ; - m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS | - QUANTUM_DURATION(10); - - m->cp_mqd_control = MQD_CONTROL_PRIV_STATE_EN; - m->cp_mqd_base_addr_lo = lower_32_bits(addr); - m->cp_mqd_base_addr_hi = upper_32_bits(addr); - - m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE; - - /* - * Pipe Priority - * Identifies the pipe relative priority when this queue is connected - * to the pipeline. The pipe priority is against the GFX pipe and HP3D. - * In KFD we are using a fixed pipe priority set to CS_MEDIUM. - * 0 = CS_LOW (typically below GFX) - * 1 = CS_MEDIUM (typically between HP3D and GFX - * 2 = CS_HIGH (typically above HP3D) - */ - m->cp_hqd_pipe_priority = 1; - m->cp_hqd_queue_priority = 15; - - *mqd = m; - if (gart_addr) - *gart_addr = addr; - retval = mm->update_mqd(mm, m, q); - - return retval; + return init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q); } static int update_mqd_hiq(struct mqd_manager *mm, void *mqd, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 0cedb37cf513563dc6fea50e6b40ef0889c3bb61..c1c43c2ea33f1a3cbee7e0256768d8204e797dc3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -75,6 +75,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd, struct v9_mqd *m; struct kfd_dev *kfd = mm->dev; + *mqd_mem_obj = NULL; /* From V9, for CWSR, the control stack is located on the next page * boundary after the mqd, we will use the gtt allocation function * instead of sub-allocation function. @@ -92,8 +93,10 @@ static int init_mqd(struct mqd_manager *mm, void **mqd, } else retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct v9_mqd), mqd_mem_obj); - if (retval != 0) + if (retval) { + kfree(*mqd_mem_obj); return -ENOMEM; + } m = (struct v9_mqd *) (*mqd_mem_obj)->cpu_ptr; addr = (*mqd_mem_obj)->gpu_addr; @@ -250,7 +253,7 @@ static void uninit_mqd(struct mqd_manager *mm, void *mqd, struct kfd_dev *kfd = mm->dev; if (mqd_mem_obj->gtt_mem) { - kfd->kfd2kgd->free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem); + kfd->kfd2kgd->free_gtt_mem(kfd->kgd, &mqd_mem_obj->gtt_mem); kfree(mqd_mem_obj); } else { kfd_gtt_sa_free(mm->dev, mqd_mem_obj); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 92b285ca73aabb1b225f37e17bd36a9f05e9641e..e04b2bf61a3be87c64c43048a2d3a4e1044ee048 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -148,6 +148,7 @@ extern int noretry; * Halt if HWS hang is detected */ extern int halt_if_hws_hang; +extern struct mutex kfd_processes_mutex; /** * enum kfd_sched_policy diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 4694386cc6236238df2230be18a7173ab1bd86af..458e7af54a0c70bc74914f00a34a53ce056245ac 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -44,7 +44,7 @@ struct mm_struct; * Unique/indexed by mm_struct* */ DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE); -static DEFINE_MUTEX(kfd_processes_mutex); +DEFINE_MUTEX(kfd_processes_mutex); DEFINE_SRCU(kfd_processes_srcu); @@ -220,6 +220,12 @@ struct kfd_process *kfd_create_process(struct file *filep) */ mutex_lock(&kfd_processes_mutex); + if (kfd_is_locked()) { + mutex_unlock(&kfd_processes_mutex); + pr_debug("KFD is locked! Cannot create process"); + return ERR_PTR(-EINVAL); + } + /* A prior open of /dev/kfd could have already created the process. */ process = find_process(thread); if (process) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 80f5db4ef75fd03ded8df14abce8348964386d69..0805c423a5ce08d032c432dfa8133afb0177f02a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1072,8 +1072,6 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu) * the GPU device is not already present in the topology device * list then return NULL. This means a new topology device has to * be created for this GPU. - * TODO: Rather than assiging @gpu to first topology device withtout - * gpu attached, it will better to have more stringent check. */ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) { @@ -1081,12 +1079,20 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) struct kfd_topology_device *out_dev = NULL; down_write(&topology_lock); - list_for_each_entry(dev, &topology_device_list, list) + list_for_each_entry(dev, &topology_device_list, list) { + /* Discrete GPUs need their own topology device list + * entries. Don't assign them to CPU/APU nodes. + */ + if (!gpu->device_info->needs_iommu_device && + dev->node_props.cpu_cores_count) + continue; + if (!dev->gpu && (dev->node_props.simd_count > 0)) { dev->gpu = gpu; out_dev = dev; break; } + } up_write(&topology_lock); return out_dev; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 6903fe6c894ba053693c16d3dd23538d58026ea2..65fb64c8727f49dfd235708d0f76dd7dfdb76428 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -565,22 +565,36 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) { struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; + struct drm_dp_mst_topology_mgr *mgr; + int ret; + bool need_hotplug = false; drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - aconnector = to_amdgpu_dm_connector(connector); - if (aconnector->dc_link->type == dc_connection_mst_branch && - !aconnector->mst_port) { + list_for_each_entry(connector, &dev->mode_config.connector_list, + head) { + aconnector = to_amdgpu_dm_connector(connector); + if (aconnector->dc_link->type != dc_connection_mst_branch || + aconnector->mst_port) + continue; + + mgr = &aconnector->mst_mgr; - if (suspend) - drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr); - else - drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr); - } + if (suspend) { + drm_dp_mst_topology_mgr_suspend(mgr); + } else { + ret = drm_dp_mst_topology_mgr_resume(mgr); + if (ret < 0) { + drm_dp_mst_topology_mgr_set_mst(mgr, false); + need_hotplug = true; + } + } } drm_modeset_unlock(&dev->mode_config.connection_mutex); + + if (need_hotplug) + drm_kms_helper_hotplug_event(dev); } static int dm_hw_init(void *handle) @@ -610,12 +624,13 @@ static int dm_suspend(void *handle) struct amdgpu_display_manager *dm = &adev->dm; int ret = 0; + WARN_ON(adev->dm.cached_state); + adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev); + s3_handle_mst(adev->ddev, true); amdgpu_dm_irq_suspend(adev); - WARN_ON(adev->dm.cached_state); - adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev); dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); @@ -736,7 +751,6 @@ static int dm_resume(void *handle) struct drm_plane_state *new_plane_state; struct dm_plane_state *dm_new_plane_state; enum dc_connection_type new_connection_type = dc_connection_none; - int ret; int i; /* power on hardware */ @@ -756,6 +770,9 @@ static int dm_resume(void *handle) /* Do detection*/ list_for_each_entry(connector, &ddev->mode_config.connector_list, head) { + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); /* @@ -809,13 +826,13 @@ static int dm_resume(void *handle) } } - ret = drm_atomic_helper_resume(ddev, dm->cached_state); + drm_atomic_helper_resume(ddev, dm->cached_state); dm->cached_state = NULL; amdgpu_dm_irq_resume_late(adev); - return ret; + return 0; } static const struct amd_ip_funcs amdgpu_dm_funcs = { @@ -1448,6 +1465,7 @@ static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd) } static const struct backlight_ops amdgpu_dm_backlight_ops = { + .options = BL_CORE_SUSPENDRESUME, .get_brightness = amdgpu_dm_backlight_get_brightness, .update_status = amdgpu_dm_backlight_update_status, }; @@ -1590,17 +1608,17 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) dm->display_indexes_num = dm->dc->caps.max_streams; + if (link_cnt > MAX_PIPES * 2) { + DRM_ERROR( + "KMS: Cannot support more than %d display indexes\n", + MAX_PIPES * 2); + goto fail; + } + /* loops over all connectors on the board */ for (i = 0; i < link_cnt; i++) { struct dc_link *link = NULL; - if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) { - DRM_ERROR( - "KMS: Cannot support more than %d display indexes\n", - AMDGPU_DM_MAX_DISPLAY_INDEX); - continue; - } - aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); if (!aconnector) goto fail; @@ -2213,8 +2231,15 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode, static enum dc_color_depth convert_color_depth_from_display_info(const struct drm_connector *connector) { + struct dm_connector_state *dm_conn_state = + to_dm_connector_state(connector->state); uint32_t bpc = connector->display_info.bpc; + /* TODO: Remove this when there's support for max_bpc in drm */ + if (dm_conn_state && bpc > dm_conn_state->max_bpc) + /* Round down to nearest even number. */ + bpc = dm_conn_state->max_bpc - (dm_conn_state->max_bpc & 1); + switch (bpc) { case 0: /* Temporary Work around, DRM don't parse color depth for @@ -2796,6 +2821,9 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, } else if (property == adev->mode_info.underscan_property) { dm_new_state->underscan_enable = val; ret = 0; + } else if (property == adev->mode_info.max_bpc_property) { + dm_new_state->max_bpc = val; + ret = 0; } return ret; @@ -2838,6 +2866,9 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, } else if (property == adev->mode_info.underscan_property) { *val = dm_state->underscan_enable; ret = 0; + } else if (property == adev->mode_info.max_bpc_property) { + *val = dm_state->max_bpc; + ret = 0; } return ret; } @@ -2881,6 +2912,7 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) state->underscan_enable = false; state->underscan_hborder = 0; state->underscan_vborder = 0; + state->max_bpc = 8; __drm_atomic_helper_connector_reset(connector, &state->base); } @@ -2898,6 +2930,7 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector) if (new_state) { __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base); + new_state->max_bpc = state->max_bpc; return &new_state->base; } @@ -3167,7 +3200,7 @@ void dm_drm_plane_destroy_state(struct drm_plane *plane, static const struct drm_plane_funcs dm_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, - .destroy = drm_plane_cleanup, + .destroy = drm_primary_helper_destroy, .reset = dm_drm_plane_reset, .atomic_duplicate_state = dm_drm_plane_duplicate_state, .atomic_destroy_state = dm_drm_plane_destroy_state, @@ -3615,6 +3648,13 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, { struct amdgpu_device *adev = dm->ddev->dev_private; + /* + * Some of the properties below require access to state, like bpc. + * Allocate some default initial connector state with our reset helper. + */ + if (aconnector->base.funcs->reset) + aconnector->base.funcs->reset(&aconnector->base); + aconnector->connector_id = link_index; aconnector->dc_link = link; aconnector->base.interlace_allowed = false; @@ -3658,6 +3698,9 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, drm_object_attach_property(&aconnector->base.base, adev->mode_info.underscan_vborder_property, 0); + drm_object_attach_property(&aconnector->base.base, + adev->mode_info.max_bpc_property, + 0); } @@ -3779,9 +3822,6 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, &aconnector->base, &amdgpu_dm_connector_helper_funcs); - if (aconnector->base.funcs->reset) - aconnector->base.funcs->reset(&aconnector->base); - amdgpu_dm_connector_init_helper( dm, aconnector, @@ -3996,6 +4036,7 @@ static void handle_cursor_update(struct drm_plane *plane, amdgpu_crtc->cursor_width = plane->state->crtc_w; amdgpu_crtc->cursor_height = plane->state->crtc_h; + memset(&attributes, 0, sizeof(attributes)); attributes.address.high_part = upper_32_bits(address); attributes.address.low_part = lower_32_bits(address); attributes.width = plane->state->crtc_w; @@ -4336,7 +4377,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state, struct dc_stream_state *stream_state) { - stream_state->mode_changed = crtc_state->mode_changed; + stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state); } static int amdgpu_dm_atomic_commit(struct drm_device *dev, @@ -4357,10 +4398,22 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev, */ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); + struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); - if (drm_atomic_crtc_needs_modeset(new_crtc_state) && dm_old_crtc_state->stream) + if (drm_atomic_crtc_needs_modeset(new_crtc_state) + && dm_old_crtc_state->stream) { + /* + * CRC capture was enabled but not disabled. + * Release the vblank reference. + */ + if (dm_new_crtc_state->crc_enabled) { + drm_crtc_vblank_put(crtc); + dm_new_crtc_state->crc_enabled = false; + } + manage_dm_interrupts(adev, acrtc, false); + } } /* Add check here for SoC's that support hardware cursor plane, to * unset legacy_cursor_update */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index a29dc35954c9a9ec09e5585b132a3098e87f4b3e..74aedcffc4bb7e9fc5e1f63a2ff9e75046cb971c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -173,8 +173,6 @@ struct amdgpu_dm_connector { struct mutex hpd_lock; bool fake_enable; - - bool mst_connected; }; #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base) @@ -215,6 +213,7 @@ struct dm_connector_state { enum amdgpu_rmx_type scaling; uint8_t underscan_vborder; uint8_t underscan_hborder; + uint8_t max_bpc; bool underscan_enable; struct mod_freesync_user_enable user_enable; bool freesync_capable; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index 9bfb040352e9875584b4efbb137c84e2fb7a0bf9..36a0bed9af07f826499bb822edb28339bb419f39 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c @@ -51,6 +51,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name, { struct dm_crtc_state *crtc_state = to_dm_crtc_state(crtc->state); struct dc_stream_state *stream_state = crtc_state->stream; + bool enable; enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name); @@ -60,29 +61,33 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name, return -EINVAL; } - /* When enabling CRC, we should also disable dithering. */ - if (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO) { - if (dc_stream_configure_crc(stream_state->ctx->dc, - stream_state, - true, true)) { - crtc_state->crc_enabled = true; - dc_stream_set_dither_option(stream_state, - DITHER_OPTION_TRUN8); - } - else - return -EINVAL; - } else { - if (dc_stream_configure_crc(stream_state->ctx->dc, - stream_state, - false, false)) { - crtc_state->crc_enabled = false; - dc_stream_set_dither_option(stream_state, - DITHER_OPTION_DEFAULT); - } - else - return -EINVAL; + if (!stream_state) { + DRM_ERROR("No stream state for CRTC%d\n", crtc->index); + return -EINVAL; } + enable = (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO); + + if (!dc_stream_configure_crc(stream_state->ctx->dc, stream_state, + enable, enable)) + return -EINVAL; + + /* When enabling CRC, we should also disable dithering. */ + dc_stream_set_dither_option(stream_state, + enable ? DITHER_OPTION_TRUN8 + : DITHER_OPTION_DEFAULT); + + /* + * Reading the CRC requires the vblank interrupt handler to be + * enabled. Keep a reference until CRC capture stops. + */ + if (!crtc_state->crc_enabled && enable) + drm_crtc_vblank_get(crtc); + else if (crtc_state->crc_enabled && !enable) + drm_crtc_vblank_put(crtc); + + crtc_state->crc_enabled = enable; + *values_cnt = 3; /* Reset crc_skipped on dm state */ crtc_state->crc_skip_count = 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 9a300732ba3747a86541d8b62c2562e27e96b9fc..c85bea70d96522a841d61543e5a8674d244f420e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -317,12 +317,7 @@ dm_dp_create_fake_mst_encoder(struct amdgpu_dm_connector *connector) struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder; struct drm_encoder *encoder; - const struct drm_connector_helper_funcs *connector_funcs = - connector->base.helper_private; - struct drm_encoder *enc_master = - connector_funcs->best_encoder(&connector->base); - DRM_DEBUG_KMS("enc master is %p\n", enc_master); amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL); if (!amdgpu_encoder) return NULL; @@ -352,25 +347,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct amdgpu_device *adev = dev->dev_private; struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; - - drm_connector_list_iter_begin(dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - aconnector = to_amdgpu_dm_connector(connector); - if (aconnector->mst_port == master - && !aconnector->port) { - DRM_INFO("DM_MST: reusing connector: %p [id: %d] [master: %p]\n", - aconnector, connector->base.id, aconnector->mst_port); - - aconnector->port = port; - drm_connector_set_path_property(connector, pathprop); - - drm_connector_list_iter_end(&conn_iter); - aconnector->mst_connected = true; - return &aconnector->base; - } - } - drm_connector_list_iter_end(&conn_iter); aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); if (!aconnector) @@ -398,10 +374,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, master->connector_id); aconnector->mst_encoder = dm_dp_create_fake_mst_encoder(master); + drm_connector_attach_encoder(&aconnector->base, + &aconnector->mst_encoder->base); - /* - * TODO: understand why this one is needed - */ drm_object_attach_property( &connector->base, dev->mode_config.path_property, @@ -419,8 +394,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, */ amdgpu_dm_connector_funcs_reset(connector); - aconnector->mst_connected = true; - DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n", aconnector, connector->base.id, aconnector->mst_port); @@ -432,6 +405,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_connector *connector) { + struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr); + struct drm_device *dev = master->base.dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n", @@ -445,7 +421,10 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, aconnector->dc_sink = NULL; } - aconnector->mst_connected = false; + drm_connector_unregister(connector); + if (adev->mode_info.rfbdev) + drm_fb_helper_remove_one_connector(&adev->mode_info.rfbdev->helper, connector); + drm_connector_put(connector); } static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) @@ -456,18 +435,10 @@ static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) drm_kms_helper_hotplug_event(dev); } -static void dm_dp_mst_link_status_reset(struct drm_connector *connector) -{ - mutex_lock(&connector->dev->mode_config.mutex); - drm_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD); - mutex_unlock(&connector->dev->mode_config.mutex); -} - static void dm_dp_mst_register_connector(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct amdgpu_device *adev = dev->dev_private; - struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); if (adev->mode_info.rfbdev) drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector); @@ -475,9 +446,6 @@ static void dm_dp_mst_register_connector(struct drm_connector *connector) DRM_ERROR("adev->mode_info.rfbdev is NULL\n"); drm_connector_register(connector); - - if (aconnector->mst_connected) - dm_dp_mst_link_status_reset(connector); } static const struct drm_dp_mst_topology_cbs dm_mst_cbs = { diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile index 95f332ee3e7e6e3858294d485c03c23c1c25f613..16614d73a5fcf61535fc1aac6a3a8e75753c589c 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile +++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile @@ -32,6 +32,10 @@ endif calcs_ccflags := -mhard-float -msse $(cc_stack_align) +ifdef CONFIG_CC_IS_CLANG +calcs_ccflags += -msse2 +endif + CFLAGS_dcn_calcs.o := $(calcs_ccflags) CFLAGS_dcn_calc_auto.o := $(calcs_ccflags) CFLAGS_dcn_calc_math.o := $(calcs_ccflags) -Wno-tautological-compare diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c index bd039322f697b4727a8ef985416d3f5d0355de7b..6342f649935123c7e442bd839ab6a6f93d694b6c 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c @@ -1347,12 +1347,12 @@ void dcn_bw_update_from_pplib(struct dc *dc) struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0}; bool res; - kernel_fpu_begin(); - /* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */ res = dm_pp_get_clock_levels_by_type_with_voltage( ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks); + kernel_fpu_begin(); + if (res) res = verify_clock_values(&fclks); @@ -1371,9 +1371,13 @@ void dcn_bw_update_from_pplib(struct dc *dc) } else BREAK_TO_DEBUGGER(); + kernel_fpu_end(); + res = dm_pp_get_clock_levels_by_type_with_voltage( ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks); + kernel_fpu_begin(); + if (res) res = verify_clock_values(&dcfclks); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 6ae050dc32209e0b3bbad59842bc2b0c01c7210c..2b2efe443c36d27b70b4591193efe2a19648378f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -462,8 +462,10 @@ void dc_link_set_test_pattern(struct dc_link *link, static void destruct(struct dc *dc) { - dc_release_state(dc->current_state); - dc->current_state = NULL; + if (dc->current_state) { + dc_release_state(dc->current_state); + dc->current_state = NULL; + } destroy_links(dc); @@ -958,6 +960,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c /* pplib is notified if disp_num changed */ dc->hwss.set_bandwidth(dc, context, true); + for (i = 0; i < context->stream_count; i++) + context->streams[i]->mode_changed = false; + dc_release_state(dc->current_state); dc->current_state = context; @@ -1120,9 +1125,6 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa */ update_flags->bits.bpp_change = 1; - if (u->gamma && dce_use_lut(u->plane_info->format)) - update_flags->bits.gamma_change = 1; - if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info, sizeof(union dc_tiling_info)) != 0) { update_flags->bits.swizzle_change = 1; @@ -1139,7 +1141,6 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa if (update_flags->bits.rotation_change || update_flags->bits.stereo_format_change || update_flags->bits.pixel_format_change - || update_flags->bits.gamma_change || update_flags->bits.bpp_change || update_flags->bits.bandwidth_change || update_flags->bits.output_tf_change) @@ -1214,6 +1215,11 @@ static enum surface_update_type det_surface_update(const struct dc *dc, return UPDATE_TYPE_FULL; } + if (u->surface->force_full_update) { + update_flags->bits.full_update = 1; + return UPDATE_TYPE_FULL; + } + type = get_plane_info_update_type(u); elevate_update_type(&overall_type, type); @@ -1229,13 +1235,26 @@ static enum surface_update_type det_surface_update(const struct dc *dc, if (u->coeff_reduction_factor) update_flags->bits.coeff_reduction_change = 1; + if (u->gamma) { + enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN; + + if (u->plane_info) + format = u->plane_info->format; + else if (u->surface) + format = u->surface->format; + + if (dce_use_lut(format)) + update_flags->bits.gamma_change = 1; + } + if (update_flags->bits.in_transfer_func_change) { type = UPDATE_TYPE_MED; elevate_update_type(&overall_type, type); } if (update_flags->bits.input_csc_change - || update_flags->bits.coeff_reduction_change) { + || update_flags->bits.coeff_reduction_change + || update_flags->bits.gamma_change) { type = UPDATE_TYPE_FULL; elevate_update_type(&overall_type, type); } @@ -1455,6 +1474,14 @@ void dc_commit_updates_for_stream(struct dc *dc, } dc_resource_state_copy_construct(state, context); + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; + struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state) + new_pipe->plane_state->force_full_update = true; + } } @@ -1498,6 +1525,12 @@ void dc_commit_updates_for_stream(struct dc *dc, dc->current_state = context; dc_release_state(old); + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (pipe_ctx->plane_state && pipe_ctx->stream == stream) + pipe_ctx->plane_state->force_full_update = false; + } } /*let's use current_state to update watermark etc*/ if (update_type >= UPDATE_TYPE_FULL) @@ -1552,6 +1585,14 @@ void dc_set_power_state( dc_resource_state_construct(dc, dc->current_state); dc->hwss.init_hw(dc); + +#ifdef CONFIG_DRM_AMD_DC_DCN2_0 + if (dc->hwss.init_sys_ctx != NULL && + dc->vm_pa_config.valid) { + dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config); + } +#endif + break; default: diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index fced3c1c2ef5f6ac117a993714de8bf24da2258a..2f42964fb9f4541d88a8cdcac685636c2650fe0e 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -324,7 +324,7 @@ bool dc_link_is_dp_sink_present(struct dc_link *link) { enum gpio_result gpio_result; uint32_t clock_pin = 0; - + uint8_t retry = 0; struct ddc *ddc; enum connector_id connector_id = @@ -348,16 +348,27 @@ bool dc_link_is_dp_sink_present(struct dc_link *link) if (GPIO_RESULT_OK != dal_ddc_open( ddc, GPIO_MODE_INPUT, GPIO_DDC_CONFIG_TYPE_MODE_I2C)) { - dal_gpio_destroy_ddc(&ddc); + dal_ddc_close(ddc); return present; } - /* Read GPIO: DP sink is present if both clock and data pins are zero */ - /* [anaumov] in DAL2, there was no check for GPIO failure */ - - gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin); - ASSERT(gpio_result == GPIO_RESULT_OK); + /* + * Read GPIO: DP sink is present if both clock and data pins are zero + * + * [W/A] plug-unplug DP cable, sometimes customer board has + * one short pulse on clk_pin(1V, < 1ms). DP will be config to HDMI/DVI + * then monitor can't br light up. Add retry 3 times + * But in real passive dongle, it need additional 3ms to detect + */ + do { + gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin); + ASSERT(gpio_result == GPIO_RESULT_OK); + if (clock_pin) + udelay(1000); + else + break; + } while (retry++ < 3); present = (gpio_result == GPIO_RESULT_OK) && !clock_pin; @@ -1939,7 +1950,7 @@ static bool dp_active_dongle_validate_timing( break; } - if (dongle_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER || + if (dpcd_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER || dongle_caps->extendedCapValid == false) return true; @@ -2457,11 +2468,11 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option) { struct dc *core_dc = pipe_ctx->stream->ctx->dc; + core_dc->hwss.blank_stream(pipe_ctx); + if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) deallocate_mst_payload(pipe_ctx); - core_dc->hwss.blank_stream(pipe_ctx); - core_dc->hwss.disable_stream(pipe_ctx, option); disable_link(pipe_ctx->stream->sink->link, pipe_ctx->stream->signal); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c index 8def0d9fa0ff0586343a13d08a9bea59c1a2da32..46c9cb47a96e58b156a9162a984c1eccbd50a2fe 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c @@ -433,6 +433,7 @@ void dal_ddc_service_i2c_query_dp_dual_mode_adaptor( enum display_dongle_type *dongle = &sink_cap->dongle_type; uint8_t type2_dongle_buf[DP_ADAPTOR_TYPE2_SIZE]; bool is_type2_dongle = false; + int retry_count = 2; struct dp_hdmi_dongle_signature_data *dongle_signature; /* Assume we have no valid DP passive dongle connected */ @@ -445,13 +446,24 @@ void dal_ddc_service_i2c_query_dp_dual_mode_adaptor( DP_HDMI_DONGLE_ADDRESS, type2_dongle_buf, sizeof(type2_dongle_buf))) { - *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE; - sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK; + /* Passive HDMI dongles can sometimes fail here without retrying*/ + while (retry_count > 0) { + if (i2c_read(ddc, + DP_HDMI_DONGLE_ADDRESS, + type2_dongle_buf, + sizeof(type2_dongle_buf))) + break; + retry_count--; + } + if (retry_count == 0) { + *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE; + sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK; - CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf), - "DP-DVI passive dongle %dMhz: ", - DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000); - return; + CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf), + "DP-DVI passive dongle %dMhz: ", + DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000); + return; + } } /* Check if Type 2 dongle.*/ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index a7553b6d59c28d49a60e6adbdd0925fbd6ca13c9..122249da03ab7f55707a155fe42d8ac93b9d7ae0 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -2172,6 +2172,7 @@ static void get_active_converter_info( uint8_t data, struct dc_link *link) { union dp_downstream_port_present ds_port = { .byte = data }; + memset(&link->dpcd_caps.dongle_caps, 0, sizeof(link->dpcd_caps.dongle_caps)); /* decode converter info*/ if (!ds_port.fields.PORT_PRESENT) { @@ -2240,7 +2241,8 @@ static void get_active_converter_info( translate_dpcd_max_bpc( hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT); - link->dpcd_caps.dongle_caps.extendedCapValid = true; + if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk != 0) + link->dpcd_caps.dongle_caps.extendedCapValid = true; } break; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index ea6beccfd89d3ac0e3aa2119e77f3f133195f89d..8b4337794d1ef99af4a628efc30266fb67044b14 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -222,19 +222,17 @@ bool resource_construct( * PORT_CONNECTIVITY == 1 (as instructed by HW team). */ update_num_audio(&straps, &num_audio, &pool->audio_support); - for (i = 0; i < pool->pipe_count && i < num_audio; i++) { + for (i = 0; i < caps->num_audio; i++) { struct audio *aud = create_funcs->create_audio(ctx, i); if (aud == NULL) { DC_ERR("DC: failed to create audio!\n"); return false; } - if (!aud->funcs->endpoint_valid(aud)) { aud->funcs->destroy(&aud); break; } - pool->audios[i] = aud; pool->audio_count++; } @@ -1401,10 +1399,12 @@ bool dc_remove_plane_from_context( * For head pipe detach surfaces from pipe for tail * pipe just zero it out */ - if (!pipe_ctx->top_pipe) { + if (!pipe_ctx->top_pipe || (!pipe_ctx->top_pipe->top_pipe && + pipe_ctx->top_pipe->stream_res.opp != pipe_ctx->stream_res.opp)) { + pipe_ctx->top_pipe = NULL; pipe_ctx->plane_state = NULL; pipe_ctx->bottom_pipe = NULL; - } else { + } else { memset(pipe_ctx, 0, sizeof(*pipe_ctx)); } } @@ -1701,18 +1701,28 @@ static struct audio *find_first_free_audio( const struct resource_pool *pool, enum engine_id id) { - int i; - for (i = 0; i < pool->audio_count; i++) { + int i, available_audio_count; + + if (id == ENGINE_ID_UNKNOWN) + return NULL; + + available_audio_count = pool->audio_count; + + for (i = 0; i < available_audio_count; i++) { if ((res_ctx->is_audio_acquired[i] == false) && (res_ctx->is_stream_enc_acquired[i] == true)) { /*we have enough audio endpoint, find the matching inst*/ if (id != i) continue; - return pool->audios[i]; } } + + /* use engine id to find free audio */ + if ((id < available_audio_count) && (res_ctx->is_audio_acquired[id] == false)) { + return pool->audios[id]; + } /*not found the matching one, first come first serve*/ - for (i = 0; i < pool->audio_count; i++) { + for (i = 0; i < available_audio_count; i++) { if (res_ctx->is_audio_acquired[i] == false) { return pool->audios[i]; } @@ -1796,8 +1806,6 @@ enum dc_status dc_remove_stream_from_ctx( dc->res_pool->funcs->remove_stream_from_ctx(dc, new_ctx, stream); memset(del_pipe, 0, sizeof(*del_pipe)); - - break; } } @@ -1864,6 +1872,7 @@ static int get_norm_pix_clk(const struct dc_crtc_timing *timing) pix_clk /= 2; if (timing->pixel_encoding != PIXEL_ENCODING_YCBCR422) { switch (timing->display_color_depth) { + case COLOR_DEPTH_666: case COLOR_DEPTH_888: normalized_pix_clk = pix_clk; break; @@ -1917,6 +1926,8 @@ enum dc_status resource_map_pool_resources( } */ + calculate_phy_pix_clks(stream); + /* acquire new resources */ pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream); @@ -1945,7 +1956,7 @@ enum dc_status resource_map_pool_resources( /* TODO: Add check if ASIC support and EDID audio */ if (!stream->sink->converter_disable_audio && dc_is_audio_capable_signal(pipe_ctx->stream->signal) && - stream->audio_info.mode_count) { + stream->audio_info.mode_count && stream->audio_info.flags.all) { pipe_ctx->stream_res.audio = find_first_free_audio( &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index fdcc8ab19bf3f3eb979ab643f7b1d217551f6900..25b8a8f933821def07f12e551835d18c2c188e90 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -281,7 +281,7 @@ uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream) for (i = 0; i < MAX_PIPES; i++) { struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg; - if (res_ctx->pipe_ctx[i].stream != stream) + if (res_ctx->pipe_ctx[i].stream != stream || !tg) continue; return tg->funcs->get_frame_count(tg); @@ -305,7 +305,7 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream, for (i = 0; i < MAX_PIPES; i++) { struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg; - if (res_ctx->pipe_ctx[i].stream != stream) + if (res_ctx->pipe_ctx[i].stream != stream || !tg) continue; tg->funcs->get_scanoutpos(tg, diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 6c9990bef267e1cd469de9ab10e60b603e809435..4094b4f5011173ad075eab529ac02fc94d054721 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -505,6 +505,9 @@ struct dc_plane_state { struct dc_plane_status status; struct dc_context *ctx; + /* HACK: Workaround for forcing full reprogramming under some conditions */ + bool force_full_update; + /* private to dc_surface.c */ enum dc_irq_source irq_source; struct kref refcount; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c index 29294db1a96b775013635d080725e392cda4766b..da8b198538e5fdea8ba6dadbab666589ab28c1da 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c @@ -242,6 +242,10 @@ static void dmcu_set_backlight_level( s2 |= (level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT); REG_WRITE(BIOS_SCRATCH_2, s2); + + /* waitDMCUReadyForCmd */ + REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, + 0, 1, 80000); } static void dce_abm_init(struct abm *abm) @@ -474,6 +478,8 @@ void dce_abm_destroy(struct abm **abm) { struct dce_abm *abm_dce = TO_DCE_ABM(*abm); + abm_dce->base.funcs->set_abm_immediate_disable(*abm); + kfree(abm_dce); *abm = NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c index 7f6d724686f1adc7255c244568d140565b3a8fd3..abb559ce640850f38a6a9cb8c36a673c1730a30c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c @@ -611,6 +611,8 @@ void dce_aud_az_configure( AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1, value); + DC_LOG_HW_AUDIO("\n\tAUDIO:az_configure: index: %u data, 0x%x, displayName %s: \n", + audio->inst, value, audio_info->display_name); /* *write the port ID: @@ -922,7 +924,6 @@ static const struct audio_funcs funcs = { .az_configure = dce_aud_az_configure, .destroy = dce_aud_destroy, }; - void dce_aud_destroy(struct audio **audio) { struct dce_audio *aud = DCE_AUD(*audio); @@ -953,7 +954,6 @@ struct audio *dce_audio_create( audio->regs = reg; audio->shifts = shifts; audio->masks = masks; - return &audio->base; } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c index 3f5b2e6f7553f56488412b03be5a900f529a935e..df936edac5c76839db2b48aaab0cae194462164e 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c @@ -189,6 +189,12 @@ static void submit_channel_request( 1, 0); } + + REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1); + + REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0, + 10, aux110->timeout_period/10); + /* set the delay and the number of bytes to write */ /* The length include @@ -241,9 +247,6 @@ static void submit_channel_request( } } - REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1); - REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0, - 10, aux110->timeout_period/10); REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1); } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h index f7caab85dc801d75d260cf5d31aa2d65cc04718a..2c6f50b4245a438d4dc946e003946ca687460a11 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h @@ -69,11 +69,11 @@ enum { /* This is the timeout as defined in DP 1.2a, * at most within ~240usec. That means, * increasing this timeout will not affect normal operation, * and we'll timeout after - * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 1600usec. + * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 2400usec. * This timeout is especially important for - * resume from S3 and CTS. + * converters, resume from S3, and CTS. */ - SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 4 + SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 6 }; struct aux_engine_dce110 { struct aux_engine base; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c index fb1f373d08a12f082b3d312bd6abbdc2502f532d..e798241fae37ae19d6c5226fa573592c2d865790 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c @@ -466,6 +466,9 @@ static void dce12_update_clocks(struct dccg *dccg, { struct dm_pp_clock_for_voltage_req clock_voltage_req = {0}; + /* TODO: Investigate why this is needed to fix display corruption. */ + new_clocks->dispclk_khz = new_clocks->dispclk_khz * 115 / 100; + if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) { clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK; clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz; diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c index 3f76e6019546f029739bfd182036b37fac3b671d..2c89ba5a084cc5bd8987c800b913daf9de616fa8 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c @@ -622,6 +622,7 @@ struct clock_source *dce100_clock_source_create( return &clk_src->base; } + kfree(clk_src); BREAK_TO_DEBUGGER(); return NULL; } @@ -1001,6 +1002,7 @@ struct resource_pool *dce100_create_resource_pool( if (construct(num_virtual_links, dc, pool)) return &pool->base; + kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index b2f308766a9e8e5cfc6c34dea6e9b4515cf7fc8d..c3ad2bbec1a5278beb92a46c5c84204455f8ee01 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -242,6 +242,9 @@ static void build_prescale_params(struct ipp_prescale_params *prescale_params, prescale_params->mode = IPP_PRESCALE_MODE_FIXED_UNSIGNED; switch (plane_state->format) { + case SURFACE_PIXEL_FORMAT_GRPH_RGB565: + prescale_params->scale = 0x2082; + break; case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: prescale_params->scale = 0x2020; @@ -1000,7 +1003,7 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio); - if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL) + if (num_audio >= 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL) /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/ pp_smu->set_pme_wa_enable(&pp_smu->pp_smu); /* un-mute audio */ @@ -1017,6 +1020,8 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option) pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( pipe_ctx->stream_res.stream_enc, true); if (pipe_ctx->stream_res.audio) { + struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu; + if (option != KEEP_ACQUIRED_RESOURCE || !dc->debug.az_endpoint_mute_only) { /*only disalbe az_endpoint if power down or free*/ @@ -1036,6 +1041,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option) update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false); pipe_ctx->stream_res.audio = NULL; } + if (pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL) + /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/ + pp_smu->set_pme_wa_enable(&pp_smu->pp_smu); /* TODO: notify audio driver for if audio modes list changed * add audio mode list change flag */ @@ -1268,10 +1276,19 @@ static void program_scaler(const struct dc *dc, pipe_ctx->plane_res.scl_data.lb_params.depth, &pipe_ctx->stream->bit_depth_params); - if (pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color) + if (pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color) { + /* + * The way 420 is packed, 2 channels carry Y component, 1 channel + * alternate between Cb and Cr, so both channels need the pixel + * value for Y + */ + if (pipe_ctx->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) + color.color_r_cr = color.color_g_y; + pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color( pipe_ctx->stream_res.tg, &color); + } pipe_ctx->plane_res.xfm->funcs->transform_set_scaler(pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data); @@ -2530,6 +2547,8 @@ static void pplib_apply_display_requirements( dc, context->bw.dce.sclk_khz); + pp_display_cfg->min_dcfclock_khz = pp_display_cfg->min_engine_clock_khz; + pp_display_cfg->min_engine_clock_deep_sleep_khz = context->bw.dce.sclk_deep_sleep_khz; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index e5e9e92521e91fab5afd921d245a575802a8b330..4e87a0bd4ad46f27d512dbcac6ebf8a36bfe97fd 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c @@ -640,6 +640,7 @@ struct clock_source *dce110_clock_source_create( return &clk_src->base; } + kfree(clk_src); BREAK_TO_DEBUGGER(); return NULL; } @@ -1344,6 +1345,7 @@ struct resource_pool *dce110_create_resource_pool( if (construct(num_virtual_links, dc, pool, asic_id)) return &pool->base; + kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index 288129343c77893f7d3db3011e3bfab835a75edc..b1e5dfa335b3d32dadae95c073d938f4e7729eab 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c @@ -640,6 +640,7 @@ struct clock_source *dce112_clock_source_create( return &clk_src->base; } + kfree(clk_src); BREAK_TO_DEBUGGER(); return NULL; } @@ -1287,6 +1288,7 @@ struct resource_pool *dce112_create_resource_pool( if (construct(num_virtual_links, dc, pool)) return &pool->base; + kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index d43f37d99c7d9d2ed7bb0e37bfb58b91981914e6..dbebaabd3ab7ae745b31b97f1e262d145b864794 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -427,6 +427,7 @@ struct clock_source *dce120_clock_source_create( return &clk_src->base; } + kfree(clk_src); BREAK_TO_DEBUGGER(); return NULL; } @@ -1076,6 +1077,7 @@ struct resource_pool *dce120_create_resource_pool( if (construct(num_virtual_links, dc, pool)) return &pool->base; + kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index 604c62969ead727a225a57eaea4d6c8198c24ac1..17cbb416965c8635cc3c7b8bc67270b6232ab911 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c @@ -640,6 +640,7 @@ struct clock_source *dce80_clock_source_create( return &clk_src->base; } + kfree(clk_src); BREAK_TO_DEBUGGER(); return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c index 5d95a997fd9f96e5539cf6e91fa3d05483cf9a8f..ad42470613441212eece3205bf60095690897462 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c @@ -292,9 +292,10 @@ bool cm_helper_translate_curve_to_hw_format( seg_distr[7] = 4; seg_distr[8] = 4; seg_distr[9] = 4; + seg_distr[10] = 1; region_start = -10; - region_end = 0; + region_end = 1; } for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++) @@ -314,6 +315,11 @@ bool cm_helper_translate_curve_to_hw_format( i += increment) { if (j == hw_points - 1) break; + if (i >= TRANSFER_FUNC_POINTS) { + DC_LOG_ERROR("Index out of bounds: i=%d, TRANSFER_FUNC_POINTS=%d\n", + i, TRANSFER_FUNC_POINTS); + return false; + } rgb_resulted[j].red = output_tf->tf_pts.red[i]; rgb_resulted[j].green = output_tf->tf_pts.green[i]; rgb_resulted[j].blue = output_tf->tf_pts.blue[i]; @@ -476,6 +482,8 @@ bool cm_helper_translate_curve_to_degamma_hw_format( i += increment) { if (j == hw_points - 1) break; + if (i >= TRANSFER_FUNC_POINTS) + return false; rgb_resulted[j].red = output_tf->tf_pts.red[i]; rgb_resulted[j].green = output_tf->tf_pts.green[i]; rgb_resulted[j].blue = output_tf->tf_pts.blue[i]; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index bf8b68f8db4f7ffab3329b7170906abbbdc42d2d..bce5741f2952efed9edab98f5405260123551744 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c @@ -388,6 +388,10 @@ void dpp1_cnv_setup ( default: break; } + + /* Set default color space based on format if none is given. */ + color_space = input_color_space ? input_color_space : color_space; + REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0, CNVC_SURFACE_PIXEL_FORMAT, pixel_format); REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en); @@ -399,7 +403,7 @@ void dpp1_cnv_setup ( for (i = 0; i < 12; i++) tbl_entry.regval[i] = input_csc_color_matrix.matrix[i]; - tbl_entry.color_space = input_color_space; + tbl_entry.color_space = color_space; if (color_space >= COLOR_SPACE_YCBCR601) select = INPUT_CSC_SELECT_ICSC; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c index 4a863a5dab4178103698f574e1fa64b9dd85a625..321af9af95e8616dbbccea866c0372c0cd7b7674 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c @@ -406,15 +406,25 @@ void dpp1_dscl_calc_lb_num_partitions( int *num_part_y, int *num_part_c) { + int lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a, + lb_bpc, memory_line_size_y, memory_line_size_c, memory_line_size_a; + int line_size = scl_data->viewport.width < scl_data->recout.width ? scl_data->viewport.width : scl_data->recout.width; int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ? scl_data->viewport_c.width : scl_data->recout.width; - int lb_bpc = dpp1_dscl_get_lb_depth_bpc(scl_data->lb_params.depth); - int memory_line_size_y = (line_size * lb_bpc + 71) / 72; /* +71 to ceil */ - int memory_line_size_c = (line_size_c * lb_bpc + 71) / 72; /* +71 to ceil */ - int memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */ - int lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a; + + if (line_size == 0) + line_size = 1; + + if (line_size_c == 0) + line_size_c = 1; + + + lb_bpc = dpp1_dscl_get_lb_depth_bpc(scl_data->lb_params.depth); + memory_line_size_y = (line_size * lb_bpc + 71) / 72; /* +71 to ceil */ + memory_line_size_c = (line_size_c * lb_bpc + 71) / 72; /* +71 to ceil */ + memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */ if (lb_config == LB_MEMORY_CONFIG_1) { lb_memory_size = 816; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index cfcc54f2ce653b6ad968a0f1550eba19ab505416..ead221ccb93e0aa78f84c5152a1716c13ca85983 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -23,6 +23,7 @@ * */ +#include #include "dm_services.h" #include "core_types.h" #include "resource.h" @@ -1190,7 +1191,8 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx, tf = plane_state->in_transfer_func; if (plane_state->gamma_correction && - !plane_state->gamma_correction->is_identity + !dpp_base->ctx->dc->debug.always_use_regamma + && !plane_state->gamma_correction->is_identity && dce_use_lut(plane_state->format)) dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction); @@ -1889,7 +1891,7 @@ static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state) plane_state->format, EXPANSION_MODE_ZERO, plane_state->input_csc_color_matrix, - COLOR_SPACE_YCBCR601_LIMITED); + plane_state->color_space); //set scale and bias registers build_prescale_params(&bns_params, plane_state); @@ -2120,6 +2122,15 @@ static void dcn10_blank_pixel_data( color_space = stream->output_color_space; color_space_to_black_color(dc, color_space, &black_color); + /* + * The way 420 is packed, 2 channels carry Y component, 1 channel + * alternate between Cb and Cr, so both channels need the pixel + * value for Y + */ + if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) + black_color.color_r_cr = black_color.color_g_y; + + if (stream_res->tg->funcs->set_blank_color) stream_res->tg->funcs->set_blank_color( stream_res->tg, @@ -2326,9 +2337,10 @@ static void dcn10_apply_ctx_for_surface( } } - if (!pipe_ctx->plane_state && - old_pipe_ctx->plane_state && - old_pipe_ctx->stream_res.tg == tg) { + if ((!pipe_ctx->plane_state || + pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) && + old_pipe_ctx->plane_state && + old_pipe_ctx->stream_res.tg == tg) { dc->hwss.plane_atomic_disconnect(dc, old_pipe_ctx); removed_pipe[i] = true; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 6b44ed3697a4f4d5fafdc2bd9febede20e4cbae7..105c628eb5cff89358e744acc5e098da90a53c60 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -717,6 +717,7 @@ struct clock_source *dcn10_clock_source_create( return &clk_src->base; } + kfree(clk_src); BREAK_TO_DEBUGGER(); return NULL; } @@ -1361,6 +1362,7 @@ struct resource_pool *dcn10_create_resource_pool( if (construct(num_virtual_links, dc, pool)) return &pool->base; + kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index d97ca6528f9d9d943f41bb416466277d2fcdf2bc..934ffe1b4b00e2e11423292021a7388432c4dc2a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -32,6 +32,10 @@ endif dml_ccflags := -mhard-float -msse $(cc_stack_align) +ifdef CONFIG_CC_IS_CLANG +dml_ccflags += -msse2 +endif + CFLAGS_display_mode_lib.o := $(dml_ccflags) CFLAGS_display_pipe_clocks.o := $(dml_ccflags) CFLAGS_dml1_display_rq_dlg_calc.o := $(dml_ccflags) diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index c0b9ca13393b61a502902b4950dc43e462b8c718..f4469fa5afb553e9828c5c25e412926bb6c9508f 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -159,7 +159,7 @@ struct resource_pool { struct clock_source *clock_sources[MAX_CLOCK_SOURCES]; unsigned int clk_src_count; - struct audio *audios[MAX_PIPES]; + struct audio *audios[MAX_AUDIOS]; unsigned int audio_count; struct audio_support audio_support; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index cf7433ebf91a07557b1f4328980ba4bdcf15ab07..71901743a9387b705e5e54b465f2ffbea1afa333 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h @@ -34,6 +34,7 @@ * Data types shared between different Virtual HW blocks ******************************************************************************/ +#define MAX_AUDIOS 7 #define MAX_PIPES 6 struct gamma_curve { diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index bf29733958c37c8added89243fd78d59d1e33d3a..962900932beedd8c02c3df1675fd0569dac343df 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -1069,10 +1069,14 @@ static void build_evenly_distributed_points( struct dividers dividers) { struct gamma_pixel *p = points; - struct gamma_pixel *p_last = p + numberof_points - 1; + struct gamma_pixel *p_last; uint32_t i = 0; + // This function should not gets called with 0 as a parameter + ASSERT(numberof_points > 0); + p_last = p + numberof_points - 1; + do { struct fixed31_32 value = dc_fixpt_from_fraction(i, numberof_points - 1); @@ -1083,7 +1087,7 @@ static void build_evenly_distributed_points( ++p; ++i; - } while (i != numberof_points); + } while (i < numberof_points); p->r = dc_fixpt_div(p_last->r, dividers.divider1); p->g = dc_fixpt_div(p_last->g, dividers.divider1); diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 43b82e14007ef0d7d1f9ac9bf8d8a121cb490c28..4be8627cc810709ea8613584850a8bebf10a5569 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -294,7 +294,7 @@ struct kfd2kgd_calls { void **mem_obj, uint64_t *gpu_addr, void **cpu_ptr, bool mqd_gfx9); - void (*free_gtt_mem)(struct kgd_dev *kgd, void *mem_obj); + void (*free_gtt_mem)(struct kgd_dev *kgd, void **mem_obj); void (*get_local_mem_info)(struct kgd_dev *kgd, struct kfd_local_mem_info *mem_info); diff --git a/drivers/gpu/drm/amd/include/pptable.h b/drivers/gpu/drm/amd/include/pptable.h index 0b6a057e0a4c487d553bab851c80185b8b4eee33..5aac8d545bdc6d45ef4719011307c308579f730e 100644 --- a/drivers/gpu/drm/amd/include/pptable.h +++ b/drivers/gpu/drm/amd/include/pptable.h @@ -78,7 +78,7 @@ typedef struct _ATOM_PPLIB_THERMALCONTROLLER typedef struct _ATOM_PPLIB_STATE { UCHAR ucNonClockStateIndex; - UCHAR ucClockStateIndices[1]; // variable-sized + UCHAR ucClockStateIndices[]; // variable-sized } ATOM_PPLIB_STATE; @@ -473,7 +473,7 @@ typedef struct _ATOM_PPLIB_STATE_V2 /** * Driver will read the first ucNumDPMLevels in this array */ - UCHAR clockInfoIndex[1]; + UCHAR clockInfoIndex[]; } ATOM_PPLIB_STATE_V2; typedef struct _StateArray{ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 8994aa5c8cf80cb56734a3737c6bffc39bb3cfe0..64596029b696399a21e3752971c5a6d748994631 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -365,6 +365,9 @@ int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id, switch (task_id) { case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: + ret = phm_pre_display_configuration_changed(hwmgr); + if (ret) + return ret; ret = phm_set_cpu_power_state(hwmgr); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c index 91ffb7bc4ee72512f9a31aebbce9eaec3939d9d7..56437866d1206c163f36593e2764bfb6bfd96170 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c @@ -265,8 +265,6 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip, if (skip) return 0; - phm_pre_display_configuration_changed(hwmgr); - phm_display_configuration_changed(hwmgr); if (hwmgr->ps) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h b/drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h index 1e870f58dd12ababe9887083624b10b5d2deedf9..0c61e2bc14cdef8c2b97f34f27400d848a64b311 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h @@ -164,7 +164,7 @@ typedef struct _ATOM_Tonga_State { typedef struct _ATOM_Tonga_State_Array { UCHAR ucRevId; UCHAR ucNumEntries; /* Number of entries. */ - ATOM_Tonga_State entries[1]; /* Dynamically allocate entries. */ + ATOM_Tonga_State entries[]; /* Dynamically allocate entries. */ } ATOM_Tonga_State_Array; typedef struct _ATOM_Tonga_MCLK_Dependency_Record { @@ -179,7 +179,7 @@ typedef struct _ATOM_Tonga_MCLK_Dependency_Record { typedef struct _ATOM_Tonga_MCLK_Dependency_Table { UCHAR ucRevId; UCHAR ucNumEntries; /* Number of entries. */ - ATOM_Tonga_MCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ + ATOM_Tonga_MCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */ } ATOM_Tonga_MCLK_Dependency_Table; typedef struct _ATOM_Tonga_SCLK_Dependency_Record { @@ -194,7 +194,7 @@ typedef struct _ATOM_Tonga_SCLK_Dependency_Record { typedef struct _ATOM_Tonga_SCLK_Dependency_Table { UCHAR ucRevId; UCHAR ucNumEntries; /* Number of entries. */ - ATOM_Tonga_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ + ATOM_Tonga_SCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */ } ATOM_Tonga_SCLK_Dependency_Table; typedef struct _ATOM_Polaris_SCLK_Dependency_Record { @@ -210,7 +210,7 @@ typedef struct _ATOM_Polaris_SCLK_Dependency_Record { typedef struct _ATOM_Polaris_SCLK_Dependency_Table { UCHAR ucRevId; UCHAR ucNumEntries; /* Number of entries. */ - ATOM_Polaris_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ + ATOM_Polaris_SCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */ } ATOM_Polaris_SCLK_Dependency_Table; typedef struct _ATOM_Tonga_PCIE_Record { @@ -222,7 +222,7 @@ typedef struct _ATOM_Tonga_PCIE_Record { typedef struct _ATOM_Tonga_PCIE_Table { UCHAR ucRevId; UCHAR ucNumEntries; /* Number of entries. */ - ATOM_Tonga_PCIE_Record entries[1]; /* Dynamically allocate entries. */ + ATOM_Tonga_PCIE_Record entries[]; /* Dynamically allocate entries. */ } ATOM_Tonga_PCIE_Table; typedef struct _ATOM_Polaris10_PCIE_Record { @@ -235,7 +235,7 @@ typedef struct _ATOM_Polaris10_PCIE_Record { typedef struct _ATOM_Polaris10_PCIE_Table { UCHAR ucRevId; UCHAR ucNumEntries; /* Number of entries. */ - ATOM_Polaris10_PCIE_Record entries[1]; /* Dynamically allocate entries. */ + ATOM_Polaris10_PCIE_Record entries[]; /* Dynamically allocate entries. */ } ATOM_Polaris10_PCIE_Table; @@ -252,7 +252,7 @@ typedef struct _ATOM_Tonga_MM_Dependency_Record { typedef struct _ATOM_Tonga_MM_Dependency_Table { UCHAR ucRevId; UCHAR ucNumEntries; /* Number of entries. */ - ATOM_Tonga_MM_Dependency_Record entries[1]; /* Dynamically allocate entries. */ + ATOM_Tonga_MM_Dependency_Record entries[]; /* Dynamically allocate entries. */ } ATOM_Tonga_MM_Dependency_Table; typedef struct _ATOM_Tonga_Voltage_Lookup_Record { @@ -265,7 +265,7 @@ typedef struct _ATOM_Tonga_Voltage_Lookup_Record { typedef struct _ATOM_Tonga_Voltage_Lookup_Table { UCHAR ucRevId; UCHAR ucNumEntries; /* Number of entries. */ - ATOM_Tonga_Voltage_Lookup_Record entries[1]; /* Dynamically allocate entries. */ + ATOM_Tonga_Voltage_Lookup_Record entries[]; /* Dynamically allocate entries. */ } ATOM_Tonga_Voltage_Lookup_Table; typedef struct _ATOM_Tonga_Fan_Table { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c index 4e1fd53938458ed3ff488ce5ad451abf12fe0480..fbbd5a4877e9ab7cd1dfd65a5c7ee423cdb17ab0 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c @@ -916,8 +916,10 @@ static int init_thermal_controller( PHM_PlatformCaps_ThermalController ); - if (0 == powerplay_table->usFanTableOffset) + if (0 == powerplay_table->usFanTableOffset) { + hwmgr->thermal_controller.use_hw_fan_control = 1; return 0; + } fan_table = (const PPTable_Generic_SubTable_Header *) (((unsigned long)powerplay_table) + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index a63e006533243ba1621d851e05e0f5f5434f056e..1546bc49004f8c84dbadd64c7ea32cae6a1e4aa9 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -984,6 +984,7 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr, break; case amd_pp_dpp_clock: pclk_vol_table = pinfo->vdd_dep_on_dppclk; + break; default: return -EINVAL; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 052e60dfaf9fd77003bc49e82eb2d35646fdace2..6bf032e81e39fd9610e6a612e5e81b397634da39 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -3472,29 +3472,42 @@ static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr, static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query) { + struct amdgpu_device *adev = hwmgr->adev; int i; u32 tmp = 0; if (!query) return -EINVAL; - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0); - tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - *query = tmp; + /* + * PPSMC_MSG_GetCurrPkgPwr is not supported on: + * - Hawaii + * - Bonaire + * - Fiji + * - Tonga + */ + if ((adev->asic_type != CHIP_HAWAII) && + (adev->asic_type != CHIP_BONAIRE) && + (adev->asic_type != CHIP_FIJI) && + (adev->asic_type != CHIP_TONGA)) { + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0); + tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + *query = tmp; - if (tmp != 0) - return 0; + if (tmp != 0) + return 0; + } smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixSMU_PM_STATUS_94, 0); + ixSMU_PM_STATUS_95, 0); for (i = 0; i < 10; i++) { - mdelay(1); + mdelay(500); smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample); tmp = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixSMU_PM_STATUS_94); + ixSMU_PM_STATUS_95); if (tmp != 0) break; } @@ -4052,6 +4065,11 @@ static int smu7_program_display_gap(struct pp_hwmgr *hwmgr) data->frame_time_x2 = frame_time_in_us * 2 / 100; + if (data->frame_time_x2 < 280) { + pr_debug("%s: enforce minimal VBITimeout: %d -> 280\n", __func__, data->frame_time_x2); + data->frame_time_x2 = 280; + } + display_gap2 = pre_vbi_time_in_us * (ref_clock / 100); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c index 2aab1b4759459fb421443b30d563a1a4e1860e3a..cede78cdf28db5c271895be9b94857717a1974ab 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c @@ -669,20 +669,20 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table, for (i = 0; i < wm_with_clock_ranges->num_wm_dmif_sets; i++) { table->WatermarkRow[1][i].MinClock = cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz) / - 1000); + (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz / + 1000)); table->WatermarkRow[1][i].MaxClock = cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz) / - 100); + (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz / + 1000)); table->WatermarkRow[1][i].MinUclk = cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz) / - 1000); + (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz / + 1000)); table->WatermarkRow[1][i].MaxUclk = cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz) / - 1000); + (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz / + 1000)); table->WatermarkRow[1][i].WmSetting = (uint8_t) wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id; } @@ -690,20 +690,20 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table, for (i = 0; i < wm_with_clock_ranges->num_wm_mcif_sets; i++) { table->WatermarkRow[0][i].MinClock = cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz) / - 1000); + (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz / + 1000)); table->WatermarkRow[0][i].MaxClock = cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz) / - 1000); + (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz / + 1000)); table->WatermarkRow[0][i].MinUclk = cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz) / - 1000); + (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz / + 1000)); table->WatermarkRow[0][i].MaxUclk = cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz) / - 1000); + (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz / + 1000)); table->WatermarkRow[0][i].WmSetting = (uint8_t) wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index fb86c24394ff463f3ee565125d72e27434889038..ce459ea4ec3ad1858ba8a6bc9e7f209a9c509a64 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -4751,9 +4751,7 @@ static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr, if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) { podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk; - for (i = 0; i < podn_vdd_dep->count - 1; i++) - od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc; - if (od_vddc_lookup_table->entries[i].us_vdd < podn_vdd_dep->entries[i].vddc) + for (i = 0; i < podn_vdd_dep->count; i++) od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc; } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) { podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c index 16b1a9cf6cf08219db8b4724cb29a55d8bef68f4..743d3c983082def14ff7aea92e6f2f01d5ad7f35 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c @@ -32,6 +32,7 @@ #include "vega10_pptable.h" #define NUM_DSPCLK_LEVELS 8 +#define VEGA10_ENGINECLOCK_HARDMAX 198000 static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable, enum phm_platform_caps cap) @@ -258,7 +259,26 @@ static int init_over_drive_limits( struct pp_hwmgr *hwmgr, const ATOM_Vega10_POWERPLAYTABLE *powerplay_table) { - hwmgr->platform_descriptor.overdriveLimit.engineClock = + const ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table = + (const ATOM_Vega10_GFXCLK_Dependency_Table *) + (((unsigned long) powerplay_table) + + le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset)); + bool is_acg_enabled = false; + ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_v2; + + if (gfxclk_dep_table->ucRevId == 1) { + patom_record_v2 = + (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries; + is_acg_enabled = + (bool)patom_record_v2[gfxclk_dep_table->ucNumEntries-1].ucACGEnable; + } + + if (powerplay_table->ulMaxODEngineClock > VEGA10_ENGINECLOCK_HARDMAX && + !is_acg_enabled) + hwmgr->platform_descriptor.overdriveLimit.engineClock = + VEGA10_ENGINECLOCK_HARDMAX; + else + hwmgr->platform_descriptor.overdriveLimit.engineClock = le32_to_cpu(powerplay_table->ulMaxODEngineClock); hwmgr->platform_descriptor.overdriveLimit.memoryClock = le32_to_cpu(powerplay_table->ulMaxODMemoryClock); diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index d3d96260f440673ed39960d999c4094577904282..6ee864455a12ab35057cefcb305d21aedbdc67e9 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -677,6 +677,7 @@ struct pp_thermal_controller_info { uint8_t ucType; uint8_t ucI2cLine; uint8_t ucI2cAddress; + uint8_t use_hw_fan_control; struct pp_fan_info fanInfo; struct pp_advance_fan_control_parameters advanceFanControlParameters; }; diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h index 62f36ba2435be4770e9a14f9dd8a779a2802fa79..c1a99dfe4913f247d20ce8cafab4dd5e0499715f 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h @@ -386,6 +386,8 @@ typedef uint16_t PPSMC_Result; #define PPSMC_MSG_AgmResetPsm ((uint16_t) 0x403) #define PPSMC_MSG_ReadVftCell ((uint16_t) 0x404) +#define PPSMC_MSG_ApplyAvfsCksOffVoltage ((uint16_t) 0x415) + #define PPSMC_MSG_GFX_CU_PG_ENABLE ((uint16_t) 0x280) #define PPSMC_MSG_GFX_CU_PG_DISABLE ((uint16_t) 0x281) #define PPSMC_MSG_GetCurrPkgPwr ((uint16_t) 0x282) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c index fbe3ef4ee45c66b01e9e1f0ea0e3021854c6be21..924788772b07f6c293dfb1cb8b2d3f43007f0ec3 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c @@ -2268,11 +2268,13 @@ static uint32_t ci_get_offsetof(uint32_t type, uint32_t member) case DRAM_LOG_BUFF_SIZE: return offsetof(SMU7_SoftRegisters, DRAM_LOG_BUFF_SIZE); } + break; case SMU_Discrete_DpmTable: switch (member) { case LowSclkInterruptThreshold: return offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT); } + break; } pr_debug("can't get the offset of type %x member %x\n", type, member); return 0; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index 18048f8e2f130ec27bf993a68373636c0518c171..40df5c2706ccedf3554c5410797a0c23c0c35360 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c @@ -2330,6 +2330,7 @@ static uint32_t fiji_get_offsetof(uint32_t type, uint32_t member) case DRAM_LOG_BUFF_SIZE: return offsetof(SMU73_SoftRegisters, DRAM_LOG_BUFF_SIZE); } + break; case SMU_Discrete_DpmTable: switch (member) { case UvdBootLevel: @@ -2339,6 +2340,7 @@ static uint32_t fiji_get_offsetof(uint32_t type, uint32_t member) case LowSclkInterruptThreshold: return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold); } + break; } pr_warn("can't get the offset of type %x member %x\n", type, member); return 0; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c index 9299b93aa09af87e28d9fd3e4e08b27916d4be62..302ca7745723eef8354ab2b71b5cddb0786ec636 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c @@ -2236,11 +2236,13 @@ static uint32_t iceland_get_offsetof(uint32_t type, uint32_t member) case DRAM_LOG_BUFF_SIZE: return offsetof(SMU71_SoftRegisters, DRAM_LOG_BUFF_SIZE); } + break; case SMU_Discrete_DpmTable: switch (member) { case LowSclkInterruptThreshold: return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold); } + break; } pr_warn("can't get the offset of type %x member %x\n", type, member); return 0; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index 1276f168ff68d75a742d4101d7edb3fcf7a31b1d..0dbca38658514c2fbff3c9a0abcc6e5e9fe1b05e 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -1528,8 +1528,21 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) efuse = efuse >> 24; if (hwmgr->chip_id == CHIP_POLARIS10) { - min = 1000; - max = 2300; + if (hwmgr->is_kicker) { + min = 1200; + max = 2500; + } else { + min = 1000; + max = 2300; + } + } else if (hwmgr->chip_id == CHIP_POLARIS11) { + if (hwmgr->is_kicker) { + min = 900; + max = 2100; + } else { + min = 1100; + max = 2100; + } } else { min = 1100; max = 2100; @@ -1984,6 +1997,12 @@ int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr) smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs); + /* Apply avfs cks-off voltages to avoid the overshoot + * when switching to the highest sclk frequency + */ + if (data->apply_avfs_cks_off_voltage) + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage); + return 0; } @@ -2019,6 +2038,10 @@ static int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) return 0; } + /* use hardware fan control */ + if (hwmgr->thermal_controller.use_hw_fan_control) + return 0; + tmp64 = hwmgr->thermal_controller.advanceFanControlParameters. usPWMMin * duty100; do_div(tmp64, 10000); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index 99d5e4f98f49cd7ec103a70eee060b8c9e0241e4..a6edd5df33b0fa0cf9b4b3ed8dd694ba9898b14b 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c @@ -37,10 +37,13 @@ MODULE_FIRMWARE("amdgpu/fiji_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); MODULE_FIRMWARE("amdgpu/polaris10_k_smc.bin"); +MODULE_FIRMWARE("amdgpu/polaris10_k2_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris11_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin"); MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin"); +MODULE_FIRMWARE("amdgpu/polaris11_k2_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris12_smc.bin"); +MODULE_FIRMWARE("amdgpu/polaris12_k_smc.bin"); MODULE_FIRMWARE("amdgpu/vegam_smc.bin"); MODULE_FIRMWARE("amdgpu/vega10_smc.bin"); MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin"); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c index 7dabc6c456e120b9e9e16dac8cd6ceb2c5b59100..697c8d92bd531b9bcbed63c224b3c735bf012ba5 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c @@ -2618,6 +2618,7 @@ static uint32_t tonga_get_offsetof(uint32_t type, uint32_t member) case DRAM_LOG_BUFF_SIZE: return offsetof(SMU72_SoftRegisters, DRAM_LOG_BUFF_SIZE); } + break; case SMU_Discrete_DpmTable: switch (member) { case UvdBootLevel: @@ -2627,6 +2628,7 @@ static uint32_t tonga_get_offsetof(uint32_t type, uint32_t member) case LowSclkInterruptThreshold: return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold); } + break; } pr_warn("can't get the offset of type %x member %x\n", type, member); return 0; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c index 57420d7caa4e968181b93cdf69e7eded94f7571a..59113fdd1c1c13ce7b26795c072ddea52050122c 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c @@ -2184,6 +2184,7 @@ static uint32_t vegam_get_offsetof(uint32_t type, uint32_t member) case DRAM_LOG_BUFF_SIZE: return offsetof(SMU75_SoftRegisters, DRAM_LOG_BUFF_SIZE); } + break; case SMU_Discrete_DpmTable: switch (member) { case UvdBootLevel: @@ -2193,6 +2194,7 @@ static uint32_t vegam_get_offsetof(uint32_t type, uint32_t member) case LowSclkInterruptThreshold: return offsetof(SMU75_Discrete_DpmTable, LowSclkInterruptThreshold); } + break; } pr_warn("can't get the offset of type %x member %x\n", type, member); return 0; diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c index e4d67b70244d5716764a6afa1ef2ae991e2e51c6..e69d996eabadce0e31747300681204eba5d1d30b 100644 --- a/drivers/gpu/drm/arm/hdlcd_crtc.c +++ b/drivers/gpu/drm/arm/hdlcd_crtc.c @@ -186,20 +186,20 @@ static void hdlcd_crtc_atomic_disable(struct drm_crtc *crtc, clk_disable_unprepare(hdlcd->clk); } -static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) +static enum drm_mode_status hdlcd_crtc_mode_valid(struct drm_crtc *crtc, + const struct drm_display_mode *mode) { struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); - struct drm_display_mode *mode = &state->adjusted_mode; long rate, clk_rate = mode->clock * 1000; rate = clk_round_rate(hdlcd->clk, clk_rate); - if (rate != clk_rate) { + /* 0.1% seems a close enough tolerance for the TDA19988 on Juno */ + if (abs(rate - clk_rate) * 1000 > clk_rate) { /* clock required by mode not supported by hardware */ - return -EINVAL; + return MODE_NOCLOCK; } - return 0; + return MODE_OK; } static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc, @@ -220,7 +220,7 @@ static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc, } static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = { - .atomic_check = hdlcd_crtc_atomic_check, + .mode_valid = hdlcd_crtc_mode_valid, .atomic_begin = hdlcd_crtc_atomic_begin, .atomic_enable = hdlcd_crtc_atomic_enable, .atomic_disable = hdlcd_crtc_atomic_disable, diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c index ef44202fb43f8135dbb0386560b2f466ae87d400..8d722d2ce20a14549f20dbf2f11cf726909c1cd3 100644 --- a/drivers/gpu/drm/arm/malidp_crtc.c +++ b/drivers/gpu/drm/arm/malidp_crtc.c @@ -473,6 +473,8 @@ static void malidp_crtc_reset(struct drm_crtc *crtc) if (state) { crtc->state = &state->base; crtc->state->crtc = crtc; + } else { + crtc->state = NULL; } } diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index 94d6dabec2dc80ee47794430d0642de6846fbfe4..1ab511e332432187be677a56538d337203ccfb30 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -190,6 +190,7 @@ static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state) { struct drm_device *drm = state->dev; struct malidp_drm *malidp = drm->dev_private; + int loop = 5; malidp->event = malidp->crtc.state->event; malidp->crtc.state->event = NULL; @@ -204,8 +205,18 @@ static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state) drm_crtc_vblank_get(&malidp->crtc); /* only set config_valid if the CRTC is enabled */ - if (malidp_set_and_wait_config_valid(drm) < 0) + if (malidp_set_and_wait_config_valid(drm) < 0) { + /* + * make a loop around the second CVAL setting and + * try 5 times before giving up. + */ + while (loop--) { + if (!malidp_set_and_wait_config_valid(drm)) + break; + } DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n"); + } + } else if (malidp->event) { /* CRTC inactive means vblank IRQ is disabled, send event directly */ spin_lock_irq(&drm->event_lock); diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c index 91472e5e0c8b8a3b0c19a2fb5f860fac83ffcb5b..cc27ac25372fcc770f4336d901ad750afcb9b9c4 100644 --- a/drivers/gpu/drm/arm/malidp_mw.c +++ b/drivers/gpu/drm/arm/malidp_mw.c @@ -69,7 +69,10 @@ static void malidp_mw_connector_reset(struct drm_connector *connector) __drm_atomic_helper_connector_destroy_state(connector->state); kfree(connector->state); - __drm_atomic_helper_connector_reset(connector, &mw_state->base); + connector->state = NULL; + + if (mw_state) + __drm_atomic_helper_connector_reset(connector, &mw_state->base); } static enum drm_connector_status diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c index 892c1d9304bb7640d46f192c587958c0946c48b1..642d0e70d0f8ffe4634046b5327039321e08a1a4 100644 --- a/drivers/gpu/drm/armada/armada_gem.c +++ b/drivers/gpu/drm/armada/armada_gem.c @@ -334,7 +334,7 @@ int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data, ptr = (char __user *)(uintptr_t)args->ptr; - if (!access_ok(VERIFY_READ, ptr, args->size)) + if (!access_ok(ptr, args->size)) return -EFAULT; ret = fault_in_pages_readable(ptr, args->size); diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index 69dab82a37714853b5dfdb74dbe479f7d6c10fb1..bf589c53b908d66789679df6f4098c883150fa87 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -60,8 +60,29 @@ static const struct pci_device_id pciidlist[] = { MODULE_DEVICE_TABLE(pci, pciidlist); +static void ast_kick_out_firmware_fb(struct pci_dev *pdev) +{ + struct apertures_struct *ap; + bool primary = false; + + ap = alloc_apertures(1); + if (!ap) + return; + + ap->ranges[0].base = pci_resource_start(pdev, 0); + ap->ranges[0].size = pci_resource_len(pdev, 0); + +#ifdef CONFIG_X86 + primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; +#endif + drm_fb_helper_remove_conflicting_framebuffers(ap, "astdrmfb", primary); + kfree(ap); +} + static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { + ast_kick_out_firmware_fb(pdev); + return drm_get_pci_dev(pdev, ent, &driver); } diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c index 0cd827e11fa20d8af7f038cecb8ea12465014523..de26df0c6044de127422999c669eeb5d68304a66 100644 --- a/drivers/gpu/drm/ast/ast_fb.c +++ b/drivers/gpu/drm/ast/ast_fb.c @@ -263,6 +263,7 @@ static void ast_fbdev_destroy(struct drm_device *dev, { struct ast_framebuffer *afb = &afbdev->afb; + drm_crtc_force_disable_all(dev); drm_fb_helper_unregister_fbi(&afbdev->helper); if (afb->obj) { diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index dac355812adcbdcea7d68a40ff44bca90b0e0dcf..224fa1ef87ff92125c5ada667f41c0defac43e86 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -131,8 +131,8 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) /* Enable extended register access */ - ast_enable_mmio(dev); ast_open_key(ast); + ast_enable_mmio(dev); /* Find out whether P2A works or whether to use device-tree */ ast_detect_config_mode(dev, &scu_rev); @@ -576,6 +576,9 @@ void ast_driver_unload(struct drm_device *dev) { struct ast_private *ast = dev->dev_private; + /* enable standard VGA decode */ + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04); + ast_release_firmware(dev); kfree(ast->dp501_fw_addr); ast_mode_fini(dev); @@ -583,7 +586,8 @@ void ast_driver_unload(struct drm_device *dev) drm_mode_config_cleanup(dev); ast_mm_fini(ast); - pci_iounmap(dev->pdev, ast->ioregs); + if (ast->ioregs != ast->regs + AST_IO_MM_OFFSET) + pci_iounmap(dev->pdev, ast->ioregs); pci_iounmap(dev->pdev, ast->regs); kfree(ast); } diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 5e77d456d9bb9434040107a69536815a270c7865..9d92d2d2fcfc7c1ccb85fa6ffce1485fc874f979 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -568,6 +568,7 @@ static int ast_crtc_do_set_base(struct drm_crtc *crtc, } ast_bo_unreserve(bo); + ast_set_offset_reg(crtc); ast_set_start_address_crt1(crtc, (u32)gpu_addr); return 0; @@ -599,7 +600,7 @@ static int ast_crtc_mode_set(struct drm_crtc *crtc, return -EINVAL; ast_open_key(ast); - ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06); ast_set_std_reg(crtc, adjusted_mode, &vbios_mode); ast_set_crtc_reg(crtc, adjusted_mode, &vbios_mode); @@ -972,9 +973,21 @@ static int get_clock(void *i2c_priv) { struct ast_i2c_chan *i2c = i2c_priv; struct ast_private *ast = i2c->dev->dev_private; - uint32_t val; + uint32_t val, val2, count, pass; + + count = 0; + pass = 0; + val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01; + do { + val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01; + if (val == val2) { + pass++; + } else { + pass = 0; + val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01; + } + } while ((pass < 5) && (count++ < 0x10000)); - val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4; return val & 1 ? 1 : 0; } @@ -982,9 +995,21 @@ static int get_data(void *i2c_priv) { struct ast_i2c_chan *i2c = i2c_priv; struct ast_private *ast = i2c->dev->dev_private; - uint32_t val; + uint32_t val, val2, count, pass; + + count = 0; + pass = 0; + val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01; + do { + val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01; + if (val == val2) { + pass++; + } else { + pass = 0; + val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01; + } + } while ((pass < 5) && (count++ < 0x10000)); - val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5; return val & 1 ? 1 : 0; } @@ -997,7 +1022,7 @@ static void set_clock(void *i2c_priv, int clock) for (i = 0; i < 0x10000; i++) { ujcrb7 = ((clock & 0x01) ? 0 : 1); - ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfe, ujcrb7); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf4, ujcrb7); jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x01); if (ujcrb7 == jtemp) break; @@ -1013,7 +1038,7 @@ static void set_data(void *i2c_priv, int data) for (i = 0; i < 0x10000; i++) { ujcrb7 = ((data & 0x01) ? 0 : 1) << 2; - ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfb, ujcrb7); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf1, ujcrb7); jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x04); if (ujcrb7 == jtemp) break; @@ -1254,7 +1279,7 @@ static int ast_cursor_move(struct drm_crtc *crtc, ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, ((y >> 8) & 0x07)); /* dummy write to fire HWC */ - ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xCB, 0xFF, 0x00); + ast_show_cursor(crtc); return 0; } diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c index f7d421359d564756ff86d78c64293e37eea74c7e..c1d1ac51d1c207c0cb0b2f08825aa19ca7761bde 100644 --- a/drivers/gpu/drm/ast/ast_post.c +++ b/drivers/gpu/drm/ast/ast_post.c @@ -46,7 +46,7 @@ void ast_enable_mmio(struct drm_device *dev) { struct ast_private *ast = dev->dev_private; - ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06); } diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c index 04440064b9b7baaeaec4a9d19d48a33ed2b5dbdb..e5b3ba73e6617af5e8c2a7c037e124240de53149 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c @@ -382,7 +382,7 @@ atmel_hlcdc_plane_update_general_settings(struct atmel_hlcdc_plane *plane, cfg |= ATMEL_HLCDC_LAYER_LAEN; else cfg |= ATMEL_HLCDC_LAYER_GAEN | - ATMEL_HLCDC_LAYER_GA(state->base.alpha >> 8); + ATMEL_HLCDC_LAYER_GA(state->base.alpha); } if (state->disc_h && state->disc_w) diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index bf6cad6c9178b10eda1e639ac0d785631d1f6518..7a3e5a8f6439b547ea7f978921fef43bae557328 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -46,6 +46,7 @@ config DRM_DUMB_VGA_DAC config DRM_LVDS_ENCODER tristate "Transparent parallel to LVDS encoder support" depends on OF + select DRM_KMS_HELPER select DRM_PANEL_BRIDGE help Support for transparent parallel to LVDS encoders that don't require diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 85c2d407a52e1a5476b3269d13655606d10478fd..faf64f6c2a53f7bc1b7e69dd1d538b3f402584eb 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -747,11 +747,11 @@ static void adv7511_mode_set(struct adv7511 *adv7511, vsync_polarity = 1; } - if (mode->vrefresh <= 24000) + if (drm_mode_vrefresh(mode) <= 24) low_refresh_rate = ADV7511_LOW_REFRESH_RATE_24HZ; - else if (mode->vrefresh <= 25000) + else if (drm_mode_vrefresh(mode) <= 25) low_refresh_rate = ADV7511_LOW_REFRESH_RATE_25HZ; - else if (mode->vrefresh <= 30000) + else if (drm_mode_vrefresh(mode) <= 30) low_refresh_rate = ADV7511_LOW_REFRESH_RATE_30HZ; else low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE; @@ -1125,7 +1125,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) ret = adv7511_init_regulators(adv7511); if (ret) { dev_err(dev, "failed to init regulators\n"); - return ret; + goto err_of_node_put; } /* @@ -1194,17 +1194,6 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) INIT_WORK(&adv7511->hpd_work, adv7511_hpd_work); - if (i2c->irq) { - init_waitqueue_head(&adv7511->wq); - - ret = devm_request_threaded_irq(dev, i2c->irq, NULL, - adv7511_irq_handler, - IRQF_ONESHOT, dev_name(dev), - adv7511); - if (ret) - goto err_unregister_cec; - } - adv7511_power_off(adv7511); i2c_set_clientdata(i2c, adv7511); @@ -1222,6 +1211,18 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) drm_bridge_add(&adv7511->bridge); adv7511_audio_init(dev, adv7511); + + if (i2c->irq) { + init_waitqueue_head(&adv7511->wq); + + ret = devm_request_threaded_irq(dev, i2c->irq, NULL, + adv7511_irq_handler, + IRQF_ONESHOT, dev_name(dev), + adv7511); + if (ret) + goto err_unregister_cec; + } + return 0; err_unregister_cec: @@ -1234,6 +1235,8 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) i2c_unregister_device(adv7511->i2c_edid); uninit_regulators: adv7511_uninit_regulators(adv7511); +err_of_node_put: + of_node_put(adv7511->host_node); return ret; } @@ -1248,6 +1251,7 @@ static int adv7511_remove(struct i2c_client *i2c) if (adv7511->cec_clk) clk_disable_unprepare(adv7511->cec_clk); + of_node_put(adv7511->host_node); adv7511_uninit_regulators(adv7511); drm_bridge_remove(&adv7511->bridge); diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c index 185b6d84216653003bdd0cd26df89bd6e598b3ad..de0bd603baf1748f09a789e077c2e945192365e9 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7533.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c @@ -210,8 +210,6 @@ int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv) if (!adv->host_node) return -ENODEV; - of_node_put(adv->host_node); - adv->use_timing_gen = !of_property_read_bool(np, "adi,disable-timing-generator"); diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c index f8433c93f4634620c177c77ac67aea70337288ec..cc820e9aea1db066cdd54a0e60ad305c605d2ae3 100644 --- a/drivers/gpu/drm/bridge/analogix-anx78xx.c +++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c @@ -725,7 +725,9 @@ static int anx78xx_init_pdata(struct anx78xx *anx78xx) /* 1.0V digital core power regulator */ pdata->dvdd10 = devm_regulator_get(dev, "dvdd10"); if (IS_ERR(pdata->dvdd10)) { - DRM_ERROR("DVDD10 regulator not found\n"); + if (PTR_ERR(pdata->dvdd10) != -EPROBE_DEFER) + DRM_ERROR("DVDD10 regulator not found\n"); + return PTR_ERR(pdata->dvdd10); } @@ -1341,7 +1343,9 @@ static int anx78xx_i2c_probe(struct i2c_client *client, err = anx78xx_init_pdata(anx78xx); if (err) { - DRM_ERROR("Failed to initialize pdata: %d\n", err); + if (err != -EPROBE_DEFER) + DRM_ERROR("Failed to initialize pdata: %d\n", err); + return err; } diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index d68986cea13258bed331d8d023db7b493f720e5b..84abf5d6f760a4c6530c613e718baf512e599258 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c @@ -1040,16 +1040,17 @@ static int analogix_dp_commit(struct analogix_dp_device *dp) if (ret) return ret; + /* Check whether panel supports fast training */ + ret = analogix_dp_fast_link_train_detection(dp); + if (ret) + dp->psr_enable = false; + if (dp->psr_enable) { ret = analogix_dp_enable_sink_psr(dp); if (ret) return ret; } - /* Check whether panel supports fast training */ - ret = analogix_dp_fast_link_train_detection(dp); - if (ret) - dp->psr_enable = false; return ret; } diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c index 7cbaba213ef693d11c430533df45c838a56c1229..dbeaf1257668cd7c5681707f240d122a3f6b9f4e 100644 --- a/drivers/gpu/drm/bridge/panel.c +++ b/drivers/gpu/drm/bridge/panel.c @@ -92,6 +92,17 @@ static int panel_bridge_attach(struct drm_bridge *bridge) static void panel_bridge_detach(struct drm_bridge *bridge) { struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge); + struct drm_connector *connector = &panel_bridge->connector; + + /* + * Cleanup the connector if we know it was initialized. + * + * FIXME: This wouldn't be needed if the panel_bridge structure was + * allocated with drmm_kzalloc(). This might be tricky since the + * drm_device pointer can only be retrieved when the bridge is attached. + */ + if (connector->dev) + drm_connector_cleanup(connector); drm_panel_detach(panel_bridge->panel); } diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c index e59a135423336bd187f0038956f06ac4574d94dc..0cc6dbbcddcf5630f2abf9871d7ce205790c91e9 100644 --- a/drivers/gpu/drm/bridge/sii902x.c +++ b/drivers/gpu/drm/bridge/sii902x.c @@ -261,10 +261,11 @@ static void sii902x_bridge_mode_set(struct drm_bridge *bridge, struct regmap *regmap = sii902x->regmap; u8 buf[HDMI_INFOFRAME_SIZE(AVI)]; struct hdmi_avi_infoframe frame; + u16 pixel_clock_10kHz = adj->clock / 10; int ret; - buf[0] = adj->clock; - buf[1] = adj->clock >> 8; + buf[0] = pixel_clock_10kHz & 0xff; + buf[1] = pixel_clock_10kHz >> 8; buf[2] = adj->vrefresh; buf[3] = 0x00; buf[4] = adj->hdisplay; diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 5971976284bf9ddb34434485624d531313791ee3..2a0a1654d3ce5b769049a9f75ddd743eeb21c1bd 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -39,6 +39,7 @@ #include +#define DDC_CI_ADDR 0x37 #define DDC_SEGMENT_ADDR 0x30 #define HDMI_EDID_LEN 512 @@ -320,6 +321,15 @@ static int dw_hdmi_i2c_xfer(struct i2c_adapter *adap, u8 addr = msgs[0].addr; int i, ret = 0; + if (addr == DDC_CI_ADDR) + /* + * The internal I2C controller does not support the multi-byte + * read and write operations needed for DDC/CI. + * TOFIX: Blacklist the DDC/CI address until we filter out + * unsupported I2C operations. + */ + return -EOPNOTSUPP; + dev_dbg(hdmi->dev, "xfer: num: %d, addr: %#x\n", num, addr); for (i = 0; i < num; i++) { @@ -1747,7 +1757,7 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode) /* HDMI Initialization Step E - Configure audio */ hdmi_clk_regenerator_update_pixel_clock(hdmi); - hdmi_enable_audio_clk(hdmi, true); + hdmi_enable_audio_clk(hdmi, hdmi->audio_enable); } /* not for DVI mode */ diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index 8e28e738cb52dec6ee8ea7eda2d655fc7035be93..d728b6cf6109673dc8c13df3b49509a1ccd5abc7 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -98,6 +98,8 @@ #define DP0_STARTVAL 0x064c #define DP0_ACTIVEVAL 0x0650 #define DP0_SYNCVAL 0x0654 +#define SYNCVAL_HS_POL_ACTIVE_LOW (1 << 15) +#define SYNCVAL_VS_POL_ACTIVE_LOW (1 << 31) #define DP0_MISC 0x0658 #define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */ #define BPC_6 (0 << 5) @@ -142,6 +144,8 @@ #define DP0_LTLOOPCTRL 0x06d8 #define DP0_SNKLTCTRL 0x06e4 +#define DP1_SRCCTRL 0x07a0 + /* PHY */ #define DP_PHY_CTRL 0x0800 #define DP_PHY_RST BIT(28) /* DP PHY Global Soft Reset */ @@ -150,6 +154,7 @@ #define PHY_M1_RST BIT(12) /* Reset PHY1 Main Channel */ #define PHY_RDY BIT(16) /* PHY Main Channels Ready */ #define PHY_M0_RST BIT(8) /* Reset PHY0 Main Channel */ +#define PHY_2LANE BIT(2) /* PHY Enable 2 lanes */ #define PHY_A0_EN BIT(1) /* PHY Aux Channel0 Enable */ #define PHY_M0_EN BIT(0) /* PHY Main Channel0 Enable */ @@ -297,7 +302,7 @@ static ssize_t tc_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { struct tc_data *tc = aux_to_tc(aux); - size_t size = min_t(size_t, 8, msg->size); + size_t size = min_t(size_t, DP_AUX_MAX_PAYLOAD_BYTES - 1, msg->size); u8 request = msg->request & ~DP_AUX_I2C_MOT; u8 *buf = msg->buffer; u32 tmp = 0; @@ -540,6 +545,7 @@ static int tc_aux_link_setup(struct tc_data *tc) unsigned long rate; u32 value; int ret; + u32 dp_phy_ctrl; rate = clk_get_rate(tc->refclk); switch (rate) { @@ -564,7 +570,10 @@ static int tc_aux_link_setup(struct tc_data *tc) value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2; tc_write(SYS_PLLPARAM, value); - tc_write(DP_PHY_CTRL, BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN); + dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN; + if (tc->link.base.num_lanes == 2) + dp_phy_ctrl |= PHY_2LANE; + tc_write(DP_PHY_CTRL, dp_phy_ctrl); /* * Initially PLLs are in bypass. Force PLL parameter update, @@ -719,7 +728,9 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode) tc_write(DP0_ACTIVEVAL, (mode->vdisplay << 16) | (mode->hdisplay)); - tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0)); + tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0) | + ((mode->flags & DRM_MODE_FLAG_NHSYNC) ? SYNCVAL_HS_POL_ACTIVE_LOW : 0) | + ((mode->flags & DRM_MODE_FLAG_NVSYNC) ? SYNCVAL_VS_POL_ACTIVE_LOW : 0)); tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW | DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888); @@ -829,12 +840,11 @@ static int tc_main_link_setup(struct tc_data *tc) if (!tc->mode) return -EINVAL; - /* from excel file - DP0_SrcCtrl */ - tc_write(DP0_SRCCTRL, DP0_SRCCTRL_SCRMBLDIS | DP0_SRCCTRL_EN810B | - DP0_SRCCTRL_LANESKEW | DP0_SRCCTRL_LANES_2 | - DP0_SRCCTRL_BW27 | DP0_SRCCTRL_AUTOCORRECT); - /* from excel file - DP1_SrcCtrl */ - tc_write(0x07a0, 0x00003083); + tc_write(DP0_SRCCTRL, tc_srcctrl(tc)); + /* SSCG and BW27 on DP1 must be set to the same as on DP0 */ + tc_write(DP1_SRCCTRL, + (tc->link.spread ? DP0_SRCCTRL_SSCG : 0) | + ((tc->link.base.rate != 162000) ? DP0_SRCCTRL_BW27 : 0)); rate = clk_get_rate(tc->refclk); switch (rate) { @@ -855,8 +865,11 @@ static int tc_main_link_setup(struct tc_data *tc) } value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2; tc_write(SYS_PLLPARAM, value); + /* Setup Main Link */ - dp_phy_ctrl = BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN | PHY_M0_EN; + dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN | PHY_M0_EN; + if (tc->link.base.num_lanes == 2) + dp_phy_ctrl |= PHY_2LANE; tc_write(DP_PHY_CTRL, dp_phy_ctrl); msleep(100); @@ -1105,10 +1118,20 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge, static enum drm_mode_status tc_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { + struct tc_data *tc = connector_to_tc(connector); + u32 req, avail; + u32 bits_per_pixel = 24; + /* DPI interface clock limitation: upto 154 MHz */ if (mode->clock > 154000) return MODE_CLOCK_HIGH; + req = mode->clock * bits_per_pixel / 8; + avail = tc->link.base.num_lanes * tc->link.base.rate; + + if (req > avail) + return MODE_BAD; + return MODE_OK; } @@ -1126,6 +1149,13 @@ static int tc_connector_get_modes(struct drm_connector *connector) struct tc_data *tc = connector_to_tc(connector); struct edid *edid; unsigned int count; + int ret; + + ret = tc_get_display_props(tc); + if (ret < 0) { + dev_err(tc->dev, "failed to read display props: %d\n", ret); + return 0; + } if (tc->panel && tc->panel->funcs && tc->panel->funcs->get_modes) { count = tc->panel->funcs->get_modes(tc->panel); @@ -1195,6 +1225,10 @@ static int tc_bridge_attach(struct drm_bridge *bridge) drm_display_info_set_bus_formats(&tc->connector.display_info, &bus_format, 1); + tc->connector.display_info.bus_flags = + DRM_BUS_FLAG_DE_HIGH | + DRM_BUS_FLAG_PIXDATA_NEGEDGE | + DRM_BUS_FLAG_SYNC_NEGEDGE; drm_connector_attach_encoder(&tc->connector, tc->bridge.encoder); return 0; diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c index c3e32138c6bb08c5cdb6c75e7d6624984914e0c0..9dc109df0808c70d193377b8217ff37f9d93bd35 100644 --- a/drivers/gpu/drm/bridge/ti-tfp410.c +++ b/drivers/gpu/drm/bridge/ti-tfp410.c @@ -64,7 +64,12 @@ static int tfp410_get_modes(struct drm_connector *connector) drm_connector_update_edid_property(connector, edid); - return drm_add_edid_modes(connector, edid); + ret = drm_add_edid_modes(connector, edid); + + kfree(edid); + + return ret; + fallback: /* No EDID, fallback on the XGA standard modes */ ret = drm_add_modes_noedid(connector, 1920, 1200); diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h index ce9db7aab2255c773218778aab7fd62e607a071d..a29f87e98d9d2a224cfb699528e38f4cbdb44c7b 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.h +++ b/drivers/gpu/drm/cirrus/cirrus_drv.h @@ -146,7 +146,7 @@ struct cirrus_device { struct cirrus_fbdev { struct drm_fb_helper helper; - struct drm_framebuffer gfb; + struct drm_framebuffer *gfb; void *sysram; int size; int x1, y1, x2, y2; /* dirty rect */ diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c index b643ac92801c81cacae37fc876497cb9a52accdf..82cc82e0bd80db980f76f37cbc49ce176a268025 100644 --- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c +++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c @@ -22,14 +22,14 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev, struct drm_gem_object *obj; struct cirrus_bo *bo; int src_offset, dst_offset; - int bpp = afbdev->gfb.format->cpp[0]; + int bpp = afbdev->gfb->format->cpp[0]; int ret = -EBUSY; bool unmap = false; bool store_for_later = false; int x2, y2; unsigned long flags; - obj = afbdev->gfb.obj[0]; + obj = afbdev->gfb->obj[0]; bo = gem_to_cirrus_bo(obj); /* @@ -82,7 +82,7 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev, } for (i = y; i < y + height; i++) { /* assume equal stride for now */ - src_offset = dst_offset = i * afbdev->gfb.pitches[0] + (x * bpp); + src_offset = dst_offset = i * afbdev->gfb->pitches[0] + (x * bpp); memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp); } @@ -192,23 +192,26 @@ static int cirrusfb_create(struct drm_fb_helper *helper, return -ENOMEM; info = drm_fb_helper_alloc_fbi(helper); - if (IS_ERR(info)) - return PTR_ERR(info); + if (IS_ERR(info)) { + ret = PTR_ERR(info); + goto err_vfree; + } info->par = gfbdev; - ret = cirrus_framebuffer_init(cdev->dev, &gfbdev->gfb, &mode_cmd, gobj); + fb = kzalloc(sizeof(*fb), GFP_KERNEL); + if (!fb) { + ret = -ENOMEM; + goto err_drm_gem_object_put_unlocked; + } + + ret = cirrus_framebuffer_init(cdev->dev, fb, &mode_cmd, gobj); if (ret) - return ret; + goto err_kfree; gfbdev->sysram = sysram; gfbdev->size = size; - - fb = &gfbdev->gfb; - if (!fb) { - DRM_INFO("fb is NULL\n"); - return -EINVAL; - } + gfbdev->gfb = fb; /* setup helper */ gfbdev->helper.fb = fb; @@ -241,24 +244,27 @@ static int cirrusfb_create(struct drm_fb_helper *helper, DRM_INFO(" pitch is %d\n", fb->pitches[0]); return 0; + +err_kfree: + kfree(fb); +err_drm_gem_object_put_unlocked: + drm_gem_object_put_unlocked(gobj); +err_vfree: + vfree(sysram); + return ret; } static int cirrus_fbdev_destroy(struct drm_device *dev, struct cirrus_fbdev *gfbdev) { - struct drm_framebuffer *gfb = &gfbdev->gfb; + struct drm_framebuffer *gfb = gfbdev->gfb; drm_fb_helper_unregister_fbi(&gfbdev->helper); - if (gfb->obj[0]) { - drm_gem_object_put_unlocked(gfb->obj[0]); - gfb->obj[0] = NULL; - } - vfree(gfbdev->sysram); drm_fb_helper_fini(&gfbdev->helper); - drm_framebuffer_unregister_private(gfb); - drm_framebuffer_cleanup(gfb); + if (gfb) + drm_framebuffer_put(gfb); return 0; } diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c index 336bfda401257f60a17bfa350529e4dc3555b9f9..90a4e641d3fb94791846ffba1c8d5b43879ac1a0 100644 --- a/drivers/gpu/drm/cirrus/cirrus_mode.c +++ b/drivers/gpu/drm/cirrus/cirrus_mode.c @@ -127,7 +127,7 @@ static int cirrus_crtc_do_set_base(struct drm_crtc *crtc, return ret; } - if (&cdev->mode_info.gfbdev->gfb == crtc->primary->fb) { + if (cdev->mode_info.gfbdev->gfb == crtc->primary->fb) { /* if pushing console in kmap it */ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); if (ret) diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 281cf9cbb44c41981b7541408c97fda822fb0061..e703341e4cb2a034ca75379de983f5b640c6b638 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -91,6 +91,12 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state) if (!state->planes) goto fail; + /* + * Because drm_atomic_state can be committed asynchronously we need our + * own reference and cannot rely on the on implied by drm_file in the + * ioctl call. + */ + drm_dev_get(dev); state->dev = dev; DRM_DEBUG_ATOMIC("Allocated atomic state %p\n", state); @@ -250,7 +256,8 @@ EXPORT_SYMBOL(drm_atomic_state_clear); void __drm_atomic_state_free(struct kref *ref) { struct drm_atomic_state *state = container_of(ref, typeof(*state), ref); - struct drm_mode_config *config = &state->dev->mode_config; + struct drm_device *dev = state->dev; + struct drm_mode_config *config = &dev->mode_config; drm_atomic_state_clear(state); @@ -262,6 +269,8 @@ void __drm_atomic_state_free(struct kref *ref) drm_atomic_state_default_release(state); kfree(state); } + + drm_dev_put(dev); } EXPORT_SYMBOL(__drm_atomic_state_free); @@ -1702,6 +1711,27 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state, struct drm_connector *connector = conn_state->connector; struct drm_crtc_state *crtc_state; + /* + * For compatibility with legacy users, we want to make sure that + * we allow DPMS On<->Off modesets on unregistered connectors, since + * legacy modesetting users will not be expecting these to fail. We do + * not however, want to allow legacy users to assign a connector + * that's been unregistered from sysfs to another CRTC, since doing + * this with a now non-existent connector could potentially leave us + * in an invalid state. + * + * Since the connector can be unregistered at any point during an + * atomic check or commit, this is racy. But that's OK: all we care + * about is ensuring that userspace can't use this connector for new + * configurations after it's been notified that the connector is no + * longer present. + */ + if (!READ_ONCE(connector->registered) && crtc) { + DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] is not registered\n", + connector->base.id, connector->name); + return -EINVAL; + } + if (conn_state->crtc == crtc) return 0; diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 1bb4c318bdd4d36ae6e1b666176bd6134bf1e81f..f9306c3e9babf699b91ec60fcc756bc76987a201 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1380,7 +1380,7 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, ret = wait_event_timeout(dev->vblank[i].queue, old_state->crtcs[i].last_vblank_count != drm_crtc_vblank_count(crtc), - msecs_to_jiffies(50)); + msecs_to_jiffies(100)); WARN(!ret, "[CRTC:%d:%s] vblank wait timed out\n", crtc->base.id, crtc->name); @@ -1425,6 +1425,9 @@ void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev, DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n", crtc->base.id, crtc->name); } + + if (old_state->fake_commit) + complete_all(&old_state->fake_commit->flip_done); } EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done); @@ -1561,6 +1564,15 @@ int drm_atomic_helper_async_check(struct drm_device *dev, old_plane_state->crtc != new_plane_state->crtc) return -EINVAL; + /* + * FIXME: Since prepare_fb and cleanup_fb are always called on + * the new_plane_state for async updates we need to block framebuffer + * changes. This prevents use of a fb that's been cleaned up and + * double cleanups from occuring. + */ + if (old_plane_state->fb != new_plane_state->fb) + return -EINVAL; + funcs = plane->helper_private; if (!funcs->atomic_async_update) return -EINVAL; @@ -1591,6 +1603,8 @@ EXPORT_SYMBOL(drm_atomic_helper_async_check); * drm_atomic_async_check() succeeds. Async commits are not supposed to swap * the states like normal sync commits, but just do in-place changes on the * current state. + * + * TODO: Implement full swap instead of doing in-place changes. */ void drm_atomic_helper_async_commit(struct drm_device *dev, struct drm_atomic_state *state) @@ -1601,6 +1615,9 @@ void drm_atomic_helper_async_commit(struct drm_device *dev, int i; for_each_new_plane_in_state(state, plane, plane_state, i) { + struct drm_framebuffer *new_fb = plane_state->fb; + struct drm_framebuffer *old_fb = plane->state->fb; + funcs = plane->helper_private; funcs->atomic_async_update(plane, plane_state); @@ -1609,11 +1626,17 @@ void drm_atomic_helper_async_commit(struct drm_device *dev, * plane->state in-place, make sure at least common * properties have been properly updated. */ - WARN_ON_ONCE(plane->state->fb != plane_state->fb); + WARN_ON_ONCE(plane->state->fb != new_fb); WARN_ON_ONCE(plane->state->crtc_x != plane_state->crtc_x); WARN_ON_ONCE(plane->state->crtc_y != plane_state->crtc_y); WARN_ON_ONCE(plane->state->src_x != plane_state->src_x); WARN_ON_ONCE(plane->state->src_y != plane_state->src_y); + + /* + * Make sure the FBs have been swapped so that cleanups in the + * new_state performs a cleanup in the old FB. + */ + WARN_ON_ONCE(plane_state->fb != old_fb); } } EXPORT_SYMBOL(drm_atomic_helper_async_commit); @@ -3189,7 +3212,7 @@ EXPORT_SYMBOL(drm_atomic_helper_suspend); int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state, struct drm_modeset_acquire_ctx *ctx) { - int i; + int i, ret; struct drm_plane *plane; struct drm_plane_state *new_plane_state; struct drm_connector *connector; @@ -3208,7 +3231,11 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state, for_each_new_connector_in_state(state, connector, new_conn_state, i) state->connectors[i].old_state = connector->state; - return drm_atomic_commit(state); + ret = drm_atomic_commit(state); + + state->acquire_ctx = NULL; + + return ret; } EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state); diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c index d9c0f75739054d88f9533a47684ec4c6f9fb30bf..ce098eb57de54c0290b14605ce7363e8ce2fcec8 100644 --- a/drivers/gpu/drm/drm_auth.c +++ b/drivers/gpu/drm/drm_auth.c @@ -142,6 +142,7 @@ static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv) lockdep_assert_held_once(&dev->master_mutex); + WARN_ON(fpriv->is_master); old_master = fpriv->master; fpriv->master = drm_master_create(dev); if (!fpriv->master) { @@ -170,6 +171,7 @@ static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv) /* drop references and restore old master on failure */ drm_master_put(&fpriv->master); fpriv->master = old_master; + fpriv->is_master = 0; return ret; } @@ -263,9 +265,10 @@ int drm_master_open(struct drm_file *file_priv) void drm_master_release(struct drm_file *file_priv) { struct drm_device *dev = file_priv->minor->dev; - struct drm_master *master = file_priv->master; + struct drm_master *master; mutex_lock(&dev->master_mutex); + master = file_priv->master; if (file_priv->magic) idr_remove(&file_priv->master->magic_map, file_priv->magic); diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index ba8cfe65c65bd424297e2610ee0975e8b693862b..21bec45480927eb5b4ea3770876f5dc798ef506a 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c @@ -36,6 +36,8 @@ #include #include "drm_legacy.h" +#include + static struct drm_map_list *drm_find_matching_map(struct drm_device *dev, struct drm_local_map *map) { @@ -1319,7 +1321,10 @@ static int copy_one_buf(void *data, int count, struct drm_buf_entry *from) .size = from->buf_size, .low_mark = from->low_mark, .high_mark = from->high_mark}; - return copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags)); + + if (copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags))) + return -EFAULT; + return 0; } int drm_legacy_infobufs(struct drm_device *dev, void *data, @@ -1417,6 +1422,7 @@ int drm_legacy_freebufs(struct drm_device *dev, void *data, idx, dma->buf_count - 1); return -EINVAL; } + idx = array_index_nospec(idx, dma->buf_count); buf = dma->buflist[idx]; if (buf->file_priv != file_priv) { DRM_ERROR("Process %d freeing buffer not owned\n", diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 9cbe8f5c9acafedf5bee808e82b205984528f1b8..6e241a3c31ee3abd1c4609ab376d7c8cde34beb2 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -595,6 +595,10 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, plane = crtc->primary; + /* allow disabling with the primary plane leased */ + if (crtc_req->mode_valid && !drm_lease_held(file_priv, plane->base.id)) + return -EACCES; + mutex_lock(&crtc->dev->mode_config.mutex); drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); retry: diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c index 99961192bf034f893cbac5521c996dc98aa49887..c88e5ff41add6a4898186672070b03293c3e9ffa 100644 --- a/drivers/gpu/drm/drm_debugfs_crc.c +++ b/drivers/gpu/drm/drm_debugfs_crc.c @@ -379,12 +379,13 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame, struct drm_crtc_crc *crc = &crtc->crc; struct drm_crtc_crc_entry *entry; int head, tail; + unsigned long flags; - spin_lock(&crc->lock); + spin_lock_irqsave(&crc->lock, flags); /* Caller may not have noticed yet that userspace has stopped reading */ if (!crc->entries) { - spin_unlock(&crc->lock); + spin_unlock_irqrestore(&crc->lock, flags); return -EINVAL; } @@ -395,7 +396,7 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame, bool was_overflow = crc->overflow; crc->overflow = true; - spin_unlock(&crc->lock); + spin_unlock_irqrestore(&crc->lock, flags); if (!was_overflow) DRM_ERROR("Overflow of CRC buffer, userspace reads too slow.\n"); @@ -411,7 +412,7 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame, head = (head + 1) & (DRM_CRC_ENTRIES_NR - 1); crc->head = head; - spin_unlock(&crc->lock); + spin_unlock_irqrestore(&crc->lock, flags); wake_up_interruptible(&crc->wq); diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 7780567aa6692fa56450aed78ff101b294fe7da6..77347a258f6cad45b8e73dd5f1cae4df5586dd88 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -1274,6 +1274,9 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_ mutex_lock(&mgr->lock); mstb = mgr->mst_primary; + if (!mstb) + goto out; + for (i = 0; i < lct - 1; i++) { int shift = (i % 2) ? 0 : 4; int port_num = (rad[i / 2] >> shift) & 0xf; @@ -1579,7 +1582,11 @@ static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr, if (ret != 1) DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); - txmsg->dst->tx_slots[txmsg->seqno] = NULL; + if (txmsg->seqno != -1) { + WARN_ON((unsigned int)txmsg->seqno > + ARRAY_SIZE(txmsg->dst->tx_slots)); + txmsg->dst->tx_slots[txmsg->seqno] = NULL; + } } static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, @@ -3275,6 +3282,7 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr; msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len; msg.u.i2c_read.transactions[i].bytes = msgs[i].buf; + msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP); } msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr; msg.u.i2c_read.num_bytes_read = msgs[num - 1].len; diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index ea4941da9b273f39fb81ab238eae769589f6a1c7..d8ae4ca129c70192e76bcae2b1cca6d4a6ef8246 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -381,11 +381,7 @@ void drm_dev_unplug(struct drm_device *dev) synchronize_srcu(&drm_unplug_srcu); drm_dev_unregister(dev); - - mutex_lock(&drm_global_mutex); - if (dev->open_count == 0) - drm_dev_put(dev); - mutex_unlock(&drm_global_mutex); + drm_dev_put(dev); } EXPORT_SYMBOL(drm_dev_unplug); @@ -503,7 +499,7 @@ int drm_dev_init(struct drm_device *dev, } kref_init(&dev->ref); - dev->dev = parent; + dev->dev = get_device(parent); dev->driver = driver; INIT_LIST_HEAD(&dev->filelist); @@ -572,6 +568,7 @@ int drm_dev_init(struct drm_device *dev, drm_minor_free(dev, DRM_MINOR_RENDER); drm_fs_inode_free(dev->anon_inode); err_free: + put_device(dev->dev); mutex_destroy(&dev->master_mutex); mutex_destroy(&dev->ctxlist_mutex); mutex_destroy(&dev->clientlist_mutex); @@ -607,6 +604,8 @@ void drm_dev_fini(struct drm_device *dev) drm_minor_free(dev, DRM_MINOR_PRIMARY); drm_minor_free(dev, DRM_MINOR_RENDER); + put_device(dev->dev); + mutex_destroy(&dev->master_mutex); mutex_destroy(&dev->ctxlist_mutex); mutex_destroy(&dev->clientlist_mutex); diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index ff0bfc65a8c1dbbbbe99ac77aeb2e122cdaf3026..f5926bf5dabd9a80d1d71b596aa550d07c8a2db2 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -122,6 +122,9 @@ static const struct edid_quirk { /* SDC panel of Lenovo B50-80 reports 8 bpc, but is a 6 bpc panel */ { "SDC", 0x3652, EDID_QUIRK_FORCE_6BPC }, + /* BOE model 0x0771 reports 8 bpc, but is a 6 bpc panel */ + { "BOE", 0x0771, EDID_QUIRK_FORCE_6BPC }, + /* Belinea 10 15 55 */ { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 }, { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 }, @@ -163,12 +166,34 @@ static const struct edid_quirk { /* Medion MD 30217 PG */ { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 }, + /* Lenovo G50 */ + { "SDC", 18514, EDID_QUIRK_FORCE_6BPC }, + /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */ { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC }, /* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/ { "ETR", 13896, EDID_QUIRK_FORCE_8BPC }, + /* Valve Index Headset */ + { "VLV", 0x91a8, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91b0, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91b1, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91b2, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91b3, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91b4, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91b5, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91b6, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91b7, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91b8, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91b9, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91ba, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91bb, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91bc, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91bd, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91be, EDID_QUIRK_NON_DESKTOP }, + { "VLV", 0x91bf, EDID_QUIRK_NON_DESKTOP }, + /* HTC Vive and Vive Pro VR Headsets */ { "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP }, { "HVR", 0xaa02, EDID_QUIRK_NON_DESKTOP }, @@ -190,6 +215,12 @@ static const struct edid_quirk { /* Sony PlayStation VR Headset */ { "SNY", 0x0704, EDID_QUIRK_NON_DESKTOP }, + + /* Sensics VR Headsets */ + { "SEN", 0x1019, EDID_QUIRK_NON_DESKTOP }, + + /* OSVR HDK and HDK2 VR Headsets */ + { "SVR", 0x1019, EDID_QUIRK_NON_DESKTOP }, }; /* @@ -1321,6 +1352,7 @@ MODULE_PARM_DESC(edid_fixup, static void drm_get_displayid(struct drm_connector *connector, struct edid *edid); +static int validate_displayid(u8 *displayid, int length, int idx); static int drm_edid_block_checksum(const u8 *raw_edid) { @@ -1552,6 +1584,50 @@ static void connector_bad_edid(struct drm_connector *connector, } } +/* Get override or firmware EDID */ +static struct edid *drm_get_override_edid(struct drm_connector *connector) +{ + struct edid *override = NULL; + + if (connector->override_edid) + override = drm_edid_duplicate(connector->edid_blob_ptr->data); + + if (!override) + override = drm_load_edid_firmware(connector); + + return IS_ERR(override) ? NULL : override; +} + +/** + * drm_add_override_edid_modes - add modes from override/firmware EDID + * @connector: connector we're probing + * + * Add modes from the override/firmware EDID, if available. Only to be used from + * drm_helper_probe_single_connector_modes() as a fallback for when DDC probe + * failed during drm_get_edid() and caused the override/firmware EDID to be + * skipped. + * + * Return: The number of modes added or 0 if we couldn't find any. + */ +int drm_add_override_edid_modes(struct drm_connector *connector) +{ + struct edid *override; + int num_modes = 0; + + override = drm_get_override_edid(connector); + if (override) { + drm_connector_update_edid_property(connector, override); + num_modes = drm_add_edid_modes(connector, override); + kfree(override); + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] adding %d modes via fallback override/firmware EDID\n", + connector->base.id, connector->name, num_modes); + } + + return num_modes; +} +EXPORT_SYMBOL(drm_add_override_edid_modes); + /** * drm_do_get_edid - get EDID data using a custom EDID block read function * @connector: connector we're probing @@ -1579,15 +1655,10 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, { int i, j = 0, valid_extensions = 0; u8 *edid, *new; - struct edid *override = NULL; - - if (connector->override_edid) - override = drm_edid_duplicate(connector->edid_blob_ptr->data); - - if (!override) - override = drm_load_edid_firmware(connector); + struct edid *override; - if (!IS_ERR_OR_NULL(override)) + override = drm_get_override_edid(connector); + if (override) return override; if ((edid = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) @@ -2865,16 +2936,46 @@ static u8 *drm_find_edid_extension(const struct edid *edid, int ext_id) return edid_ext; } -static u8 *drm_find_cea_extension(const struct edid *edid) -{ - return drm_find_edid_extension(edid, CEA_EXT); -} static u8 *drm_find_displayid_extension(const struct edid *edid) { return drm_find_edid_extension(edid, DISPLAYID_EXT); } +static u8 *drm_find_cea_extension(const struct edid *edid) +{ + int ret; + int idx = 1; + int length = EDID_LENGTH; + struct displayid_block *block; + u8 *cea; + u8 *displayid; + + /* Look for a top level CEA extension block */ + cea = drm_find_edid_extension(edid, CEA_EXT); + if (cea) + return cea; + + /* CEA blocks can also be found embedded in a DisplayID block */ + displayid = drm_find_displayid_extension(edid); + if (!displayid) + return NULL; + + ret = validate_displayid(displayid, length, idx); + if (ret) + return NULL; + + idx += sizeof(struct displayid_hdr); + for_each_displayid_db(displayid, block, idx, length) { + if (block->tag == DATA_BLOCK_CTA) { + cea = (u8 *)block; + break; + } + } + + return cea; +} + /* * Calculate the alternate clock for the CEA mode * (60Hz vs. 59.94Hz etc.) @@ -3598,13 +3699,38 @@ cea_revision(const u8 *cea) static int cea_db_offsets(const u8 *cea, int *start, int *end) { - /* Data block offset in CEA extension block */ - *start = 4; - *end = cea[2]; - if (*end == 0) - *end = 127; - if (*end < 4 || *end > 127) - return -ERANGE; + /* DisplayID CTA extension blocks and top-level CEA EDID + * block header definitions differ in the following bytes: + * 1) Byte 2 of the header specifies length differently, + * 2) Byte 3 is only present in the CEA top level block. + * + * The different definitions for byte 2 follow. + * + * DisplayID CTA extension block defines byte 2 as: + * Number of payload bytes + * + * CEA EDID block defines byte 2 as: + * Byte number (decimal) within this block where the 18-byte + * DTDs begin. If no non-DTD data is present in this extension + * block, the value should be set to 04h (the byte after next). + * If set to 00h, there are no DTDs present in this block and + * no non-DTD data. + */ + if (cea[0] == DATA_BLOCK_CTA) { + *start = 3; + *end = *start + cea[2]; + } else if (cea[0] == CEA_EXT) { + /* Data block offset in CEA extension block */ + *start = 4; + *end = cea[2]; + if (*end == 0) + *end = 127; + if (*end < 4 || *end > 127) + return -ERANGE; + } else { + return -ENOTSUPP; + } + return 0; } @@ -5151,6 +5277,9 @@ static int drm_parse_display_id(struct drm_connector *connector, case DATA_BLOCK_TYPE_1_DETAILED_TIMING: /* handled in mode gathering code. */ break; + case DATA_BLOCK_CTA: + /* handled in the cea parser code. */ + break; default: DRM_DEBUG_KMS("found DisplayID tag 0x%x, unhandled\n", block->tag); break; diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c index a4915099aaa99223b56de4181c26510303769966..a0e107abc40d722c7d5bf06c74c5c4bbb509dd68 100644 --- a/drivers/gpu/drm/drm_edid_load.c +++ b/drivers/gpu/drm/drm_edid_load.c @@ -290,6 +290,8 @@ struct edid *drm_load_edid_firmware(struct drm_connector *connector) * the last one found one as a fallback. */ fwstr = kstrdup(edid_firmware, GFP_KERNEL); + if (!fwstr) + return ERR_PTR(-ENOMEM); edidstr = fwstr; while ((edidname = strsep(&edidstr, ","))) { diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 9628dd61782698cf71d3e6687df3dc6ff8053d09..4f5e3b3513d87b1944f73340e2c9443c0f44a636 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -56,6 +56,25 @@ MODULE_PARM_DESC(drm_fbdev_overalloc, "Overallocation of the fbdev buffer (%) [default=" __MODULE_STRING(CONFIG_DRM_FBDEV_OVERALLOC) "]"); +/* + * In order to keep user-space compatibility, we want in certain use-cases + * to keep leaking the fbdev physical address to the user-space program + * handling the fbdev buffer. + * This is a bad habit essentially kept into closed source opengl driver + * that should really be moved into open-source upstream projects instead + * of using legacy physical addresses in user space to communicate with + * other out-of-tree kernel modules. + * + * This module_param *should* be removed as soon as possible and be + * considered as a broken and legacy behaviour from a modern fbdev device. + */ +#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) +static bool drm_leak_fbdev_smem = false; +module_param_unsafe(drm_leak_fbdev_smem, bool, 0600); +MODULE_PARM_DESC(fbdev_emulation, + "Allow unsafe leaking fbdev physical smem address [default=false]"); +#endif + static LIST_HEAD(kernel_fb_helper_list); static DEFINE_MUTEX(kernel_fb_helper_lock); @@ -200,6 +219,9 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) mutex_lock(&fb_helper->lock); drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + ret = __drm_fb_helper_add_one_connector(fb_helper, connector); if (ret) goto fail; @@ -1599,6 +1621,64 @@ static bool drm_fb_pixel_format_equal(const struct fb_var_screeninfo *var_1, var_1->transp.msb_right == var_2->transp.msb_right; } +static void drm_fb_helper_fill_pixel_fmt(struct fb_var_screeninfo *var, + u8 depth) +{ + switch (depth) { + case 8: + var->red.offset = 0; + var->green.offset = 0; + var->blue.offset = 0; + var->red.length = 8; /* 8bit DAC */ + var->green.length = 8; + var->blue.length = 8; + var->transp.offset = 0; + var->transp.length = 0; + break; + case 15: + var->red.offset = 10; + var->green.offset = 5; + var->blue.offset = 0; + var->red.length = 5; + var->green.length = 5; + var->blue.length = 5; + var->transp.offset = 15; + var->transp.length = 1; + break; + case 16: + var->red.offset = 11; + var->green.offset = 5; + var->blue.offset = 0; + var->red.length = 5; + var->green.length = 6; + var->blue.length = 5; + var->transp.offset = 0; + break; + case 24: + var->red.offset = 16; + var->green.offset = 8; + var->blue.offset = 0; + var->red.length = 8; + var->green.length = 8; + var->blue.length = 8; + var->transp.offset = 0; + var->transp.length = 0; + break; + case 32: + var->red.offset = 16; + var->green.offset = 8; + var->blue.offset = 0; + var->red.length = 8; + var->green.length = 8; + var->blue.length = 8; + var->transp.offset = 24; + var->transp.length = 8; + break; + default: + break; + } +} + /** * drm_fb_helper_check_var - implementation for &fb_ops.fb_check_var * @var: screeninfo to check @@ -1610,9 +1690,14 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var, struct drm_fb_helper *fb_helper = info->par; struct drm_framebuffer *fb = fb_helper->fb; - if (var->pixclock != 0 || in_dbg_master()) + if (in_dbg_master()) return -EINVAL; + if (var->pixclock != 0) { + DRM_DEBUG("fbdev emulation doesn't support changing the pixel clock, value of pixclock is ignored\n"); + var->pixclock = 0; + } + /* * Changes struct fb_var_screeninfo are currently not pushed back * to KMS, hence fail if different settings are requested. @@ -1628,6 +1713,20 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var, return -EINVAL; } + /* + * Workaround for SDL 1.2, which is known to be setting all pixel format + * fields values to zero in some cases. We treat this situation as a + * kind of "use some reasonable autodetected values". + */ + if (!var->red.offset && !var->green.offset && + !var->blue.offset && !var->transp.offset && + !var->red.length && !var->green.length && + !var->blue.length && !var->transp.length && + !var->red.msb_right && !var->green.msb_right && + !var->blue.msb_right && !var->transp.msb_right) { + drm_fb_helper_fill_pixel_fmt(var, fb->format->depth); + } + /* * drm fbdev emulation doesn't support changing the pixel format at all, * so reject all pixel format changing requests. @@ -1939,59 +2038,7 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe info->var.yoffset = 0; info->var.activate = FB_ACTIVATE_NOW; - switch (fb->format->depth) { - case 8: - info->var.red.offset = 0; - info->var.green.offset = 0; - info->var.blue.offset = 0; - info->var.red.length = 8; /* 8bit DAC */ - info->var.green.length = 8; - info->var.blue.length = 8; - info->var.transp.offset = 0; - info->var.transp.length = 0; - break; - case 15: - info->var.red.offset = 10; - info->var.green.offset = 5; - info->var.blue.offset = 0; - info->var.red.length = 5; - info->var.green.length = 5; - info->var.blue.length = 5; - info->var.transp.offset = 15; - info->var.transp.length = 1; - break; - case 16: - info->var.red.offset = 11; - info->var.green.offset = 5; - info->var.blue.offset = 0; - info->var.red.length = 5; - info->var.green.length = 6; - info->var.blue.length = 5; - info->var.transp.offset = 0; - break; - case 24: - info->var.red.offset = 16; - info->var.green.offset = 8; - info->var.blue.offset = 0; - info->var.red.length = 8; - info->var.green.length = 8; - info->var.blue.length = 8; - info->var.transp.offset = 0; - info->var.transp.length = 0; - break; - case 32: - info->var.red.offset = 16; - info->var.green.offset = 8; - info->var.blue.offset = 0; - info->var.red.length = 8; - info->var.green.length = 8; - info->var.blue.length = 8; - info->var.transp.offset = 24; - info->var.transp.length = 8; - break; - default: - break; - } + drm_fb_helper_fill_pixel_fmt(&info->var, fb->format->depth); info->var.xres = fb_width; info->var.yres = fb_height; @@ -2460,6 +2507,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper, { struct drm_device *dev = fb_helper->dev; struct drm_fb_helper_crtc **crtcs; + /* points to modes protected by mode_config.mutex */ struct drm_display_mode **modes; struct drm_fb_offset *offsets; bool *enabled; @@ -2506,7 +2554,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper, drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height); } - mutex_unlock(&fb_helper->dev->mode_config.mutex); /* need to set the modesets up here for use later */ /* fill out the connector<->crtc mappings into the modesets */ @@ -2540,6 +2587,8 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper, modeset->y = offset->y; } } + mutex_unlock(&fb_helper->dev->mode_config.mutex); + out: kfree(crtcs); kfree(modes); @@ -2830,7 +2879,7 @@ int drm_fb_helper_fbdev_setup(struct drm_device *dev, return 0; err_drm_fb_helper_fini: - drm_fb_helper_fini(fb_helper); + drm_fb_helper_fbdev_teardown(dev); return ret; } @@ -2910,7 +2959,8 @@ static int drm_fbdev_fb_open(struct fb_info *info, int user) { struct drm_fb_helper *fb_helper = info->par; - if (!try_module_get(fb_helper->dev->driver->fops->owner)) + /* No need to take a ref for fbcon because it unbinds on unregister */ + if (user && !try_module_get(fb_helper->dev->driver->fops->owner)) return -ENODEV; return 0; @@ -2920,7 +2970,8 @@ static int drm_fbdev_fb_release(struct fb_info *info, int user) { struct drm_fb_helper *fb_helper = info->par; - module_put(fb_helper->dev->driver->fops->owner); + if (user) + module_put(fb_helper->dev->driver->fops->owner); return 0; } @@ -3038,6 +3089,12 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, fbi->screen_size = fb->height * fb->pitches[0]; fbi->fix.smem_len = fbi->screen_size; fbi->screen_buffer = buffer->vaddr; + /* Shamelessly leak the physical address to user-space */ +#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) + if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0) + fbi->fix.smem_start = + page_to_phys(virt_to_page(fbi->screen_buffer)); +#endif strcpy(fbi->fix.id, "DRM emulated"); drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth); @@ -3103,9 +3160,7 @@ static void drm_fbdev_client_unregister(struct drm_client_dev *client) static int drm_fbdev_client_restore(struct drm_client_dev *client) { - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - - drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper); + drm_fb_helper_lastclose(client->dev); return 0; } diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index ffa8dc35515ffaddf0f87c56cc2108e0be4151f6..9c5bc0121ff99a0f28282c80254a963b6e46be0b 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -479,11 +479,9 @@ int drm_release(struct inode *inode, struct file *filp) drm_file_free(file_priv); - if (!--dev->open_count) { + if (!--dev->open_count) drm_lastclose(dev); - if (drm_dev_is_unplugged(dev)) - drm_put_dev(dev); - } + mutex_unlock(&drm_global_mutex); drm_minor_release(minor); @@ -525,7 +523,7 @@ ssize_t drm_read(struct file *filp, char __user *buffer, struct drm_device *dev = file_priv->minor->dev; ssize_t ret; - if (!access_ok(VERIFY_WRITE, buffer, count)) + if (!access_ok(buffer, count)) return -EFAULT; ret = mutex_lock_interruptible(&file_priv->event_read_lock); @@ -569,6 +567,7 @@ ssize_t drm_read(struct file *filp, char __user *buffer, file_priv->event_space -= length; list_add(&e->link, &file_priv->event_list); spin_unlock_irq(&dev->event_lock); + wake_up_interruptible(&file_priv->event_wait); break; } diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index 781af1d42d766bf63db12801ace4703132db84fa..b64a6ffc0aed72c6429e81b68527bb60b4a447a7 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -793,7 +793,7 @@ static int atomic_remove_fb(struct drm_framebuffer *fb) struct drm_device *dev = fb->dev; struct drm_atomic_state *state; struct drm_plane *plane; - struct drm_connector *conn; + struct drm_connector *conn __maybe_unused; struct drm_connector_state *conn_state; int i, ret; unsigned plane_mask; diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 40179c5fc6b87981353181ce41529a85a1cc8100..8750f3f02b3fe4b5841b1121bc3c2ee96497a17d 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -99,6 +99,8 @@ struct device *drm_sysfs_minor_alloc(struct drm_minor *minor); int drm_sysfs_connector_add(struct drm_connector *connector); void drm_sysfs_connector_remove(struct drm_connector *connector); +void drm_sysfs_lease_event(struct drm_device *dev); + /* drm_gem.c */ int drm_gem_init(struct drm_device *dev); void drm_gem_destroy(struct drm_device *dev); diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c index 67b1fca39aa6c76755816dee80da87805fb8d70d..ed20dbae660708087bce224cc1c61f7af3a7598c 100644 --- a/drivers/gpu/drm/drm_ioc32.c +++ b/drivers/gpu/drm/drm_ioc32.c @@ -105,7 +105,7 @@ static int compat_drm_version(struct file *file, unsigned int cmd, .desc = compat_ptr(v32.desc), }; err = drm_ioctl_kernel(file, drm_version, &v, - DRM_UNLOCKED|DRM_RENDER_ALLOW); + DRM_RENDER_ALLOW); if (err) return err; @@ -139,7 +139,7 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd, .unique = compat_ptr(uq32.unique), }; - err = drm_ioctl_kernel(file, drm_getunique, &uq, DRM_UNLOCKED); + err = drm_ioctl_kernel(file, drm_getunique, &uq, 0); if (err) return err; @@ -177,7 +177,7 @@ static int compat_drm_getmap(struct file *file, unsigned int cmd, return -EFAULT; map.offset = m32.offset; - err = drm_ioctl_kernel(file, drm_legacy_getmap_ioctl, &map, DRM_UNLOCKED); + err = drm_ioctl_kernel(file, drm_legacy_getmap_ioctl, &map, 0); if (err) return err; @@ -185,7 +185,7 @@ static int compat_drm_getmap(struct file *file, unsigned int cmd, m32.size = map.size; m32.type = map.type; m32.flags = map.flags; - m32.handle = ptr_to_compat(map.handle); + m32.handle = ptr_to_compat((void __user *)map.handle); m32.mtrr = map.mtrr; if (copy_to_user(argp, &m32, sizeof(m32))) return -EFAULT; @@ -216,7 +216,7 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd, m32.offset = map.offset; m32.mtrr = map.mtrr; - m32.handle = ptr_to_compat(map.handle); + m32.handle = ptr_to_compat((void __user *)map.handle); if (map.handle != compat_ptr(m32.handle)) pr_err_ratelimited("compat_drm_addmap truncated handle %p for type %d offset %x\n", map.handle, m32.type, m32.offset); @@ -262,7 +262,7 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd, client.idx = c32.idx; - err = drm_ioctl_kernel(file, drm_getclient, &client, DRM_UNLOCKED); + err = drm_ioctl_kernel(file, drm_getclient, &client, 0); if (err) return err; @@ -292,7 +292,7 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd, drm_stats32_t __user *argp = (void __user *)arg; int err; - err = drm_ioctl_kernel(file, drm_noop, NULL, DRM_UNLOCKED); + err = drm_ioctl_kernel(file, drm_noop, NULL, 0); if (err) return err; @@ -372,7 +372,10 @@ static int copy_one_buf32(void *data, int count, struct drm_buf_entry *from) .size = from->buf_size, .low_mark = from->low_mark, .high_mark = from->high_mark}; - return copy_to_user(to + count, &v, offsetof(drm_buf_desc32_t, flags)); + + if (copy_to_user(to + count, &v, offsetof(drm_buf_desc32_t, flags))) + return -EFAULT; + return 0; } static int drm_legacy_infobufs32(struct drm_device *dev, void *data, @@ -526,7 +529,7 @@ static int compat_drm_getsareactx(struct file *file, unsigned int cmd, if (err) return err; - req32.handle = ptr_to_compat(req.handle); + req32.handle = ptr_to_compat((void __user *)req.handle); if (copy_to_user(argp, &req32, sizeof(req32))) return -EFAULT; @@ -884,8 +887,7 @@ static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd, sizeof(req64.modifier))) return -EFAULT; - err = drm_ioctl_kernel(file, drm_mode_addfb2, &req64, - DRM_UNLOCKED); + err = drm_ioctl_kernel(file, drm_mode_addfb2, &req64, 0); if (err) return err; diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index ea10e9a26aadd5e8598e837ccd4375a15bfbe3dc..d15d95e3bbf440eac9e212aafe2deceab49ffcd0 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -37,6 +37,7 @@ #include #include +#include /** * DOC: getunique and setversion story @@ -111,17 +112,18 @@ int drm_getunique(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_unique *u = data; - struct drm_master *master = file_priv->master; + struct drm_master *master; - mutex_lock(&master->dev->master_mutex); + mutex_lock(&dev->master_mutex); + master = file_priv->master; if (u->unique_len >= master->unique_len) { if (copy_to_user(u->unique, master->unique, master->unique_len)) { - mutex_unlock(&master->dev->master_mutex); + mutex_unlock(&dev->master_mutex); return -EFAULT; } } u->unique_len = master->unique_len; - mutex_unlock(&master->dev->master_mutex); + mutex_unlock(&dev->master_mutex); return 0; } @@ -548,22 +550,21 @@ EXPORT_SYMBOL(drm_ioctl_permit); /* Ioctl table */ static const struct drm_ioctl_desc drm_ioctls[] = { - DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, - DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), + DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_UNLOCKED | DRM_MASTER), + DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, 0), + DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0), + DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0), + DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0), + DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER), DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_UNLOCKED|DRM_MASTER), + DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER), DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH), @@ -571,8 +572,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_UNLOCKED|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_UNLOCKED|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), @@ -619,66 +620,66 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED), - - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_UNLOCKED), - - DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), - - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_connector_property_set_ioctl, DRM_MASTER|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATOMIC, drm_mode_atomic_ioctl, DRM_MASTER|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATEPROPBLOB, drm_mode_createblob_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROYPROPBLOB, drm_mode_destroyblob_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH), + + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, 0), + + DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, 0), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, 0), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, 0), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, 0), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, 0), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, 0), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, 0), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_connector_property_set_ioctl, DRM_MASTER), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, 0), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, 0), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb_ioctl, 0), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, 0), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb_ioctl, 0), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, 0), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, 0), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, 0), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, 0), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATOMIC, drm_mode_atomic_ioctl, DRM_MASTER), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATEPROPBLOB, drm_mode_createblob_ioctl, 0), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROYPROPBLOB, drm_mode_destroyblob_ioctl, 0), DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_CREATE, drm_syncobj_create_ioctl, - DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_DESTROY, drm_syncobj_destroy_ioctl, - DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, drm_syncobj_handle_to_fd_ioctl, - DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, drm_syncobj_fd_to_handle_ioctl, - DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_WAIT, drm_syncobj_wait_ioctl, - DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_RESET, drm_syncobj_reset_ioctl, - DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_SIGNAL, drm_syncobj_signal_ioctl, - DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_CRTC_GET_SEQUENCE, drm_crtc_get_sequence_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_CRTC_QUEUE_SEQUENCE, drm_crtc_queue_sequence_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_LEASE, drm_mode_create_lease_ioctl, DRM_MASTER|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_LIST_LESSEES, drm_mode_list_lessees_ioctl, DRM_MASTER|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GET_LEASE, drm_mode_get_lease_ioctl, DRM_MASTER|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_REVOKE_LEASE, drm_mode_revoke_lease_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_CRTC_GET_SEQUENCE, drm_crtc_get_sequence_ioctl, 0), + DRM_IOCTL_DEF(DRM_IOCTL_CRTC_QUEUE_SEQUENCE, drm_crtc_queue_sequence_ioctl, 0), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_LEASE, drm_mode_create_lease_ioctl, DRM_MASTER), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_LIST_LESSEES, drm_mode_list_lessees_ioctl, DRM_MASTER), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GET_LEASE, drm_mode_get_lease_ioctl, DRM_MASTER), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_REVOKE_LEASE, drm_mode_revoke_lease_ioctl, DRM_MASTER), }; #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) @@ -746,7 +747,7 @@ long drm_ioctl_kernel(struct file *file, drm_ioctl_t *func, void *kdata, return retcode; /* Enforce sane locking for modern driver ioctls. */ - if (!drm_core_check_feature(dev, DRIVER_LEGACY) || + if (likely(!drm_core_check_feature(dev, DRIVER_LEGACY)) || (flags & DRM_UNLOCKED)) retcode = func(dev, kdata, file_priv); else { @@ -794,13 +795,17 @@ long drm_ioctl(struct file *filp, if (is_driver_ioctl) { /* driver ioctl */ - if (nr - DRM_COMMAND_BASE >= dev->driver->num_ioctls) + unsigned int index = nr - DRM_COMMAND_BASE; + + if (index >= dev->driver->num_ioctls) goto err_i1; - ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE]; + index = array_index_nospec(index, dev->driver->num_ioctls); + ioctl = &dev->driver->ioctls[index]; } else { /* core ioctl */ if (nr >= DRM_CORE_IOCTL_COUNT) goto err_i1; + nr = array_index_nospec(nr, DRM_CORE_IOCTL_COUNT); ioctl = &drm_ioctls[nr]; } @@ -882,6 +887,7 @@ bool drm_ioctl_flags(unsigned int nr, unsigned int *flags) if (nr >= DRM_CORE_IOCTL_COUNT) return false; + nr = array_index_nospec(nr, DRM_CORE_IOCTL_COUNT); *flags = drm_ioctls[nr].flags; return true; diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c index b82da96ded5c85d847c9c7566236f1deaf48ffc2..086f2adc541b02d61eb222517d622e931e926692 100644 --- a/drivers/gpu/drm/drm_lease.c +++ b/drivers/gpu/drm/drm_lease.c @@ -296,7 +296,7 @@ void drm_lease_destroy(struct drm_master *master) if (master->lessor) { /* Tell the master to check the lessee list */ - drm_sysfs_hotplug_event(dev); + drm_sysfs_lease_event(dev); drm_master_put(&master->lessor); } @@ -521,7 +521,8 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev, object_count = cl->object_count; - object_ids = memdup_user(u64_to_user_ptr(cl->object_ids), object_count * sizeof(__u32)); + object_ids = memdup_user(u64_to_user_ptr(cl->object_ids), + array_size(object_count, sizeof(__u32))); if (IS_ERR(object_ids)) return PTR_ERR(object_ids); diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c index fcb0ab0abb75572a165341d00de8f83b01393aa9..57cc9aa6683a09cd65e29e08fb185b2946c68afa 100644 --- a/drivers/gpu/drm/drm_mode_object.c +++ b/drivers/gpu/drm/drm_mode_object.c @@ -458,12 +458,13 @@ static int set_property_atomic(struct drm_mode_object *obj, struct drm_modeset_acquire_ctx ctx; int ret; - drm_modeset_acquire_init(&ctx, 0); - state = drm_atomic_state_alloc(dev); if (!state) return -ENOMEM; + + drm_modeset_acquire_init(&ctx, 0); state->acquire_ctx = &ctx; + retry: if (prop == state->dev->mode_config.dpms_property) { if (obj->type != DRM_MODE_OBJECT_CONNECTOR) { diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 02db9ac82d7a91a9b7de054a71a92e377d27a3d5..a3104d79b48f07d229264d050f993e515d78e396 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -758,7 +758,7 @@ int drm_mode_hsync(const struct drm_display_mode *mode) if (mode->hsync) return mode->hsync; - if (mode->htotal < 0) + if (mode->htotal <= 0) return 0; calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */ diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index fe9c6c731e8787e1480a436bceb544a811c9b8c5..cc354b49177422e265c96d00ef2afaef3d1b66e5 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -30,12 +30,26 @@ struct drm_dmi_panel_orientation_data { int orientation; }; +static const struct drm_dmi_panel_orientation_data acer_s1003 = { + .width = 800, + .height = 1280, + .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, +}; + static const struct drm_dmi_panel_orientation_data asus_t100ha = { .width = 800, .height = 1280, .orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP, }; +static const struct drm_dmi_panel_orientation_data gpd_micropc = { + .width = 720, + .height = 1280, + .bios_dates = (const char * const []){ "04/26/2019", + NULL }, + .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, +}; + static const struct drm_dmi_panel_orientation_data gpd_pocket = { .width = 1200, .height = 1920, @@ -44,6 +58,14 @@ static const struct drm_dmi_panel_orientation_data gpd_pocket = { .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, }; +static const struct drm_dmi_panel_orientation_data gpd_pocket2 = { + .width = 1200, + .height = 1920, + .bios_dates = (const char * const []){ "06/28/2018", "08/28/2018", + "12/07/2018", NULL }, + .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, +}; + static const struct drm_dmi_panel_orientation_data gpd_win = { .width = 720, .height = 1280, @@ -60,6 +82,12 @@ static const struct drm_dmi_panel_orientation_data itworks_tw891 = { .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, }; +static const struct drm_dmi_panel_orientation_data lcd720x1280_rightside_up = { + .width = 720, + .height = 1280, + .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, +}; + static const struct drm_dmi_panel_orientation_data lcd800x1280_rightside_up = { .width = 800, .height = 1280, @@ -67,12 +95,32 @@ static const struct drm_dmi_panel_orientation_data lcd800x1280_rightside_up = { }; static const struct dmi_system_id orientation_data[] = { - { /* Asus T100HA */ + { /* Acer One 10 (S1003) */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"), + }, + .driver_data = (void *)&acer_s1003, + }, { /* Asus T100HA */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100HAN"), }, .driver_data = (void *)&asus_t100ha, + }, { /* GPD MicroPC (generic strings, also match on bios date) */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"), + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Default string"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"), + }, + .driver_data = (void *)&gpd_micropc, + }, { /* GPD MicroPC (later BIOS versions with proper DMI strings) */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "MicroPC"), + }, + .driver_data = (void *)&lcd720x1280_rightside_up, }, { /* * GPD Pocket, note that the the DMI data is less generic then * it seems, devices with a board-vendor of "AMI Corporation" @@ -86,6 +134,14 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"), }, .driver_data = (void *)&gpd_pocket, + }, { /* GPD Pocket 2 (generic strings, also match on bios date) */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"), + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Default string"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"), + }, + .driver_data = (void *)&gpd_pocket2, }, { /* GPD Win (same note on DMI match as GPD Pocket) */ .matches = { DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index 6153cbda239fe6e6d6506503946c4119d77b7c46..4c530900a2084f6ba109f43e87ecddf2b3c842dc 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -211,6 +211,9 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane, format_modifier_count++; } + if (format_modifier_count) + config->allow_fb_modifiers = true; + plane->modifier_count = format_modifier_count; plane->modifiers = kmalloc_array(format_modifier_count, sizeof(format_modifiers[0]), @@ -937,6 +940,11 @@ static int drm_mode_cursor_common(struct drm_device *dev, if (ret) goto out; + if (!drm_lease_held(file_priv, crtc->cursor->base.id)) { + ret = -EACCES; + goto out; + } + ret = drm_mode_cursor_universal(crtc, req, file_priv, &ctx); goto out; } @@ -1039,6 +1047,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, plane = crtc->primary; + if (!drm_lease_held(file_priv, plane->base.id)) + return -EACCES; + if (crtc->funcs->page_flip_target) { u32 current_vblank; int r; @@ -1172,6 +1183,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, out: if (fb) drm_framebuffer_put(fb); + fb = NULL; if (plane->old_fb) drm_framebuffer_put(plane->old_fb); plane->old_fb = NULL; diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index a1bb157bfdfaeb9bad32c5f39b200970547ec039..c0b26135dbd5b52f7dd1aa64e49dbce003a7238a 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -479,6 +479,13 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, count = (*connector_funcs->get_modes)(connector); + /* + * Fallback for when DDC probe failed in drm_get_edid() and thus skipped + * override/firmware EDID. + */ + if (count == 0 && connector->status == connector_status_connected) + count = drm_add_override_edid_modes(connector); + if (count == 0 && connector->status == connector_status_connected) count = drm_add_modes_noedid(connector, 1024, 768); count += drm_helper_probe_add_cmdline_mode(connector); @@ -574,6 +581,9 @@ static void output_poll_execute(struct work_struct *work) enum drm_connector_status old_status; bool repoll = false, changed; + if (!dev->mode_config.poll_enabled) + return; + /* Pick up any changes detected by the probe functions. */ changed = dev->mode_config.delayed_event; dev->mode_config.delayed_event = false; @@ -728,7 +738,11 @@ EXPORT_SYMBOL(drm_kms_helper_poll_init); */ void drm_kms_helper_poll_fini(struct drm_device *dev) { - drm_kms_helper_poll_disable(dev); + if (!dev->mode_config.poll_enabled) + return; + + dev->mode_config.poll_enabled = false; + cancel_delayed_work_sync(&dev->mode_config.output_poll_work); } EXPORT_SYMBOL(drm_kms_helper_poll_fini); diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c index cdb10f885a4febea85fc5272e22f1378d770da8b..69dfed57c2f844de3fb475d20500a51b89434170 100644 --- a/drivers/gpu/drm/drm_property.c +++ b/drivers/gpu/drm/drm_property.c @@ -556,7 +556,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length, struct drm_property_blob *blob; int ret; - if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob)) + if (!length || length > INT_MAX - sizeof(struct drm_property_blob)) return ERR_PTR(-EINVAL); blob = kvzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL); diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index b3c1daad1169b806271691c7e373900ef6b27e5e..ecb7b33002bb27de0af599702a354e7c241cd6ed 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -301,6 +301,16 @@ void drm_sysfs_connector_remove(struct drm_connector *connector) connector->kdev = NULL; } +void drm_sysfs_lease_event(struct drm_device *dev) +{ + char *event_string = "LEASE=1"; + char *envp[] = { event_string, NULL }; + + DRM_DEBUG("generating lease event\n"); + + kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp); +} + /** * drm_sysfs_hotplug_event - generate a DRM uevent * @dev: DRM device diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index 28cdcf76b6f9988a24d6126090264d99fd8bd4c3..33a72a84361ef672431740c4e954a67618dc2989 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -105,13 +105,20 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe, write_sequnlock(&vblank->seqlock); } +static u32 drm_max_vblank_count(struct drm_device *dev, unsigned int pipe) +{ + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; + + return vblank->max_vblank_count ?: dev->max_vblank_count; +} + /* * "No hw counter" fallback implementation of .get_vblank_counter() hook, * if there is no useable hardware frame counter available. */ static u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe) { - WARN_ON_ONCE(dev->max_vblank_count != 0); + WARN_ON_ONCE(drm_max_vblank_count(dev, pipe) != 0); return 0; } @@ -198,6 +205,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe, ktime_t t_vblank; int count = DRM_TIMESTAMP_MAXRETRIES; int framedur_ns = vblank->framedur_ns; + u32 max_vblank_count = drm_max_vblank_count(dev, pipe); /* * Interrupts were disabled prior to this call, so deal with counter @@ -216,9 +224,9 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe, rc = drm_get_last_vbltimestamp(dev, pipe, &t_vblank, in_vblank_irq); } while (cur_vblank != __get_vblank_counter(dev, pipe) && --count > 0); - if (dev->max_vblank_count != 0) { + if (max_vblank_count) { /* trust the hw counter when it's around */ - diff = (cur_vblank - vblank->last) & dev->max_vblank_count; + diff = (cur_vblank - vblank->last) & max_vblank_count; } else if (rc && framedur_ns) { u64 diff_ns = ktime_to_ns(ktime_sub(t_vblank, vblank->time)); @@ -1204,6 +1212,37 @@ void drm_crtc_vblank_reset(struct drm_crtc *crtc) } EXPORT_SYMBOL(drm_crtc_vblank_reset); +/** + * drm_crtc_set_max_vblank_count - configure the hw max vblank counter value + * @crtc: CRTC in question + * @max_vblank_count: max hardware vblank counter value + * + * Update the maximum hardware vblank counter value for @crtc + * at runtime. Useful for hardware where the operation of the + * hardware vblank counter depends on the currently active + * display configuration. + * + * For example, if the hardware vblank counter does not work + * when a specific connector is active the maximum can be set + * to zero. And when that specific connector isn't active the + * maximum can again be set to the appropriate non-zero value. + * + * If used, must be called before drm_vblank_on(). + */ +void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc, + u32 max_vblank_count) +{ + struct drm_device *dev = crtc->dev; + unsigned int pipe = drm_crtc_index(crtc); + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; + + WARN_ON(dev->max_vblank_count); + WARN_ON(!READ_ONCE(vblank->inmodeset)); + + vblank->max_vblank_count = max_vblank_count; +} +EXPORT_SYMBOL(drm_crtc_set_max_vblank_count); + /** * drm_crtc_vblank_on - enable vblank events on a CRTC * @crtc: CRTC in question @@ -1533,7 +1572,7 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data, unsigned int flags, pipe, high_pipe; if (!dev->irq_enabled) - return -EINVAL; + return -EOPNOTSUPP; if (vblwait->request.type & _DRM_VBLANK_SIGNAL) return -EINVAL; @@ -1774,7 +1813,7 @@ int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data, return -EINVAL; if (!dev->irq_enabled) - return -EINVAL; + return -EOPNOTSUPP; crtc = drm_crtc_find(dev, file_priv, get_seq->crtc_id); if (!crtc) @@ -1832,7 +1871,7 @@ int drm_crtc_queue_sequence_ioctl(struct drm_device *dev, void *data, return -EINVAL; if (!dev->irq_enabled) - return -EINVAL; + return -EOPNOTSUPP; crtc = drm_crtc_find(dev, file_priv, queue_seq->crtc_id); if (!crtc) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 83c1f46670bfea9dcbe95e42da598c8665576b03..53bbe78d6f74837fdbfb430454fcfa9766a53634 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -345,7 +345,6 @@ static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_etnaviv_gem_userptr *args = data; - int access; if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) || args->flags == 0) @@ -357,12 +356,7 @@ static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data, args->user_ptr & ~PAGE_MASK) return -EINVAL; - if (args->flags & ETNA_USERPTR_WRITE) - access = VERIFY_WRITE; - else - access = VERIFY_READ; - - if (!access_ok(access, (void __user *)(unsigned long)args->user_ptr, + if (!access_ok((void __user *)(unsigned long)args->user_ptr, args->user_size)) return -EFAULT; @@ -527,6 +521,9 @@ static int etnaviv_bind(struct device *dev) } drm->dev_private = priv; + dev->dma_parms = &priv->dma_parms; + dma_set_max_seg_size(dev, SZ_2G); + mutex_init(&priv->gem_lock); INIT_LIST_HEAD(&priv->gem_list); priv->num_gpus = 0; @@ -564,6 +561,8 @@ static void etnaviv_unbind(struct device *dev) component_unbind_all(dev, drm); + dev->dma_parms = NULL; + drm->dev_private = NULL; kfree(priv); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h index 8d02d1b7dcf5a54b5bc1623b847f73dad1b493d0..b2930d1fe97c04f1de0f1ff208d422af6b6c181b 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h @@ -43,6 +43,7 @@ struct etnaviv_file_private { struct etnaviv_drm_private { int num_gpus; + struct device_dma_parameters dma_parms; struct etnaviv_gpu *gpu[ETNA_MAX_PIPES]; /* list of GEM objects: */ diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c index 9146e30e24a6de20d8bda5f80c7ea7470ddca334..468dff2f79040e32ed972a01481370c317d11b79 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c @@ -124,6 +124,8 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu) return; etnaviv_dump_core = false; + mutex_lock(&gpu->mmu->lock); + mmu_size = etnaviv_iommu_dump_size(gpu->mmu); /* We always dump registers, mmu, ring and end marker */ @@ -166,6 +168,7 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu) iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY, PAGE_KERNEL); if (!iter.start) { + mutex_unlock(&gpu->mmu->lock); dev_warn(gpu->dev, "failed to allocate devcoredump file\n"); return; } @@ -233,6 +236,8 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu) obj->base.size); } + mutex_unlock(&gpu->mmu->lock); + etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data); dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index f225fbc6edd2d94c7b82a37b56c401a66966fd95..6a859e077ea0290d115031da247d4dc69033d1eb 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -760,7 +760,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) if (IS_ERR(gpu->cmdbuf_suballoc)) { dev_err(gpu->dev, "Failed to create cmdbuf suballocator\n"); ret = PTR_ERR(gpu->cmdbuf_suballoc); - goto fail; + goto destroy_iommu; } /* Create buffer: */ @@ -768,7 +768,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) PAGE_SIZE); if (ret) { dev_err(gpu->dev, "could not create command buffer\n"); - goto destroy_iommu; + goto destroy_suballoc; } if (gpu->mmu->version == ETNAVIV_IOMMU_V1 && @@ -800,6 +800,9 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) free_buffer: etnaviv_cmdbuf_free(&gpu->buffer); gpu->buffer.suballoc = NULL; +destroy_suballoc: + etnaviv_cmdbuf_suballoc_destroy(gpu->cmdbuf_suballoc); + gpu->cmdbuf_suballoc = NULL; destroy_iommu: etnaviv_iommu_destroy(gpu->mmu); gpu->mmu = NULL; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index 69e9b431bf1f02ec7c87e5f740f80595ad98dd53..e5a9fae31ab7b7bbba5d33573e65a136a5bf83fa 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c @@ -93,7 +93,7 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job) * If the GPU managed to complete this jobs fence, the timout is * spurious. Bail out. */ - if (fence_completed(gpu, submit->out_fence->seqno)) + if (dma_fence_is_signaled(submit->out_fence)) return; /* diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 94529aa8233922b71cc36011fff305280651be53..aef487dd873153d77fd602726ab6bd92256ab593 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -164,13 +164,6 @@ static u32 decon_get_frame_count(struct decon_context *ctx, bool end) return frm; } -static u32 decon_get_vblank_counter(struct exynos_drm_crtc *crtc) -{ - struct decon_context *ctx = crtc->ctx; - - return decon_get_frame_count(ctx, false); -} - static void decon_setup_trigger(struct decon_context *ctx) { if (!ctx->crtc->i80_mode && !(ctx->out_type & I80_HW_TRG)) @@ -536,7 +529,6 @@ static const struct exynos_drm_crtc_ops decon_crtc_ops = { .disable = decon_disable, .enable_vblank = decon_enable_vblank, .disable_vblank = decon_disable_vblank, - .get_vblank_counter = decon_get_vblank_counter, .atomic_begin = decon_atomic_begin, .update_plane = decon_update_plane, .disable_plane = decon_disable_plane, @@ -554,7 +546,6 @@ static int decon_bind(struct device *dev, struct device *master, void *data) int ret; ctx->drm_dev = drm_dev; - drm_dev->max_vblank_count = 0xffffffff; for (win = ctx->first_win; win < WINDOWS_NR; win++) { ctx->configs[win].pixel_formats = decon_formats; diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index eea90251808fa2e58398fdcb1cac01d160307320..b3e23ace5869cbf61a6cd0cf2d3801167ef14a29 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -43,13 +43,12 @@ static void exynos_drm_crtc_atomic_disable(struct drm_crtc *crtc, if (exynos_crtc->ops->disable) exynos_crtc->ops->disable(exynos_crtc); + spin_lock_irq(&crtc->dev->event_lock); if (crtc->state->event && !crtc->state->active) { - spin_lock_irq(&crtc->dev->event_lock); drm_crtc_send_vblank_event(crtc, crtc->state->event); - spin_unlock_irq(&crtc->dev->event_lock); - crtc->state->event = NULL; } + spin_unlock_irq(&crtc->dev->event_lock); } static int exynos_crtc_atomic_check(struct drm_crtc *crtc, @@ -162,16 +161,6 @@ static void exynos_drm_crtc_disable_vblank(struct drm_crtc *crtc) exynos_crtc->ops->disable_vblank(exynos_crtc); } -static u32 exynos_drm_crtc_get_vblank_counter(struct drm_crtc *crtc) -{ - struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); - - if (exynos_crtc->ops->get_vblank_counter) - return exynos_crtc->ops->get_vblank_counter(exynos_crtc); - - return 0; -} - static const struct drm_crtc_funcs exynos_crtc_funcs = { .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, @@ -181,7 +170,6 @@ static const struct drm_crtc_funcs exynos_crtc_funcs = { .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, .enable_vblank = exynos_drm_crtc_enable_vblank, .disable_vblank = exynos_drm_crtc_disable_vblank, - .get_vblank_counter = exynos_drm_crtc_get_vblank_counter, }; struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev, diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index c737c4bd2c19b3f2cf141fadc7e1b1d4344b88db..630f1edc5de224282f5797733102169882cdee33 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -133,7 +133,6 @@ struct exynos_drm_crtc_ops { void (*disable)(struct exynos_drm_crtc *crtc); int (*enable_vblank)(struct exynos_drm_crtc *crtc); void (*disable_vblank)(struct exynos_drm_crtc *crtc); - u32 (*get_vblank_counter)(struct exynos_drm_crtc *crtc); enum drm_mode_status (*mode_valid)(struct exynos_drm_crtc *crtc, const struct drm_display_mode *mode); bool (*mode_fixup)(struct exynos_drm_crtc *crtc, diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 7ba414b52faa940595a028db7fa3e959bc7a58cd..d71188b982cb858061a51c8e41f0cc027df58138 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -1292,6 +1292,7 @@ static int gsc_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; + component_del(dev, &gsc_component_ops); pm_runtime_dont_use_autosuspend(dev); pm_runtime_disable(dev); diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c index 0ddb6eec7b113ea306fea4bde563e8ecb9945495..df228436a03d92965d65178598df9c923a21f7c6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c +++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c @@ -108,12 +108,12 @@ static inline int scaler_reset(struct scaler_context *scaler) scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG); do { cpu_relax(); - } while (retry > 1 && + } while (--retry > 1 && scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET); do { cpu_relax(); scaler_write(1, SCALER_INT_EN); - } while (retry > 0 && scaler_read(SCALER_INT_EN) != 1); + } while (--retry > 0 && scaler_read(SCALER_INT_EN) != 1); return retry ? 0 : -EIO; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 19697c1362d8facf536a55485c45b2b2aaae6a85..362400eaa68840415c087e55a81587844a5c5929 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -302,6 +302,7 @@ static int vidi_get_modes(struct drm_connector *connector) struct vidi_context *ctx = ctx_from_connector(connector); struct edid *edid; int edid_len; + int count; /* * the edid data comes from user side and it would be set @@ -321,7 +322,11 @@ static int vidi_get_modes(struct drm_connector *connector) drm_connector_update_edid_property(connector, edid); - return drm_add_edid_modes(connector, edid); + count = drm_add_edid_modes(connector, edid); + + kfree(edid); + + return count; } static const struct drm_connector_helper_funcs vidi_connector_helper_funcs = { diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index ffbf4a950f696d13476b0bf641b9586ce8cc44d7..522d6c46d7b14bf0991d4c6c9aba8ea500234178 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -20,6 +20,7 @@ #include "regs-vp.h" #include +#include #include #include #include @@ -337,15 +338,62 @@ static void mixer_cfg_vp_blend(struct mixer_context *ctx) mixer_reg_write(ctx, MXR_VIDEO_CFG, val); } -static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable) +static bool mixer_is_synced(struct mixer_context *ctx) { - /* block update on vsync */ - mixer_reg_writemask(ctx, MXR_STATUS, enable ? - MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE); + u32 base, shadow; + if (ctx->mxr_ver == MXR_VER_16_0_33_0 || + ctx->mxr_ver == MXR_VER_128_0_0_184) + return !(mixer_reg_read(ctx, MXR_CFG) & + MXR_CFG_LAYER_UPDATE_COUNT_MASK); + + if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) && + vp_reg_read(ctx, VP_SHADOW_UPDATE)) + return false; + + base = mixer_reg_read(ctx, MXR_CFG); + shadow = mixer_reg_read(ctx, MXR_CFG_S); + if (base != shadow) + return false; + + base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0)); + shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0)); + if (base != shadow) + return false; + + base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1)); + shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1)); + if (base != shadow) + return false; + + return true; +} + +static int mixer_wait_for_sync(struct mixer_context *ctx) +{ + ktime_t timeout = ktime_add_us(ktime_get(), 100000); + + while (!mixer_is_synced(ctx)) { + usleep_range(1000, 2000); + if (ktime_compare(ktime_get(), timeout) > 0) + return -ETIMEDOUT; + } + return 0; +} + +static void mixer_disable_sync(struct mixer_context *ctx) +{ + mixer_reg_writemask(ctx, MXR_STATUS, 0, MXR_STATUS_SYNC_ENABLE); +} + +static void mixer_enable_sync(struct mixer_context *ctx) +{ + if (ctx->mxr_ver == MXR_VER_16_0_33_0 || + ctx->mxr_ver == MXR_VER_128_0_0_184) + mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE); + mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SYNC_ENABLE); if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) - vp_reg_write(ctx, VP_SHADOW_UPDATE, enable ? - VP_SHADOW_UPDATE_ENABLE : 0); + vp_reg_write(ctx, VP_SHADOW_UPDATE, VP_SHADOW_UPDATE_ENABLE); } static void mixer_cfg_scan(struct mixer_context *ctx, int width, int height) @@ -482,7 +530,6 @@ static void vp_video_buffer(struct mixer_context *ctx, spin_lock_irqsave(&ctx->reg_slock, flags); - vp_reg_write(ctx, VP_SHADOW_UPDATE, 1); /* interlace or progressive scan mode */ val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0); vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_LINE_SKIP); @@ -537,11 +584,6 @@ static void vp_video_buffer(struct mixer_context *ctx, vp_regs_dump(ctx); } -static void mixer_layer_update(struct mixer_context *ctx) -{ - mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE); -} - static void mixer_graph_buffer(struct mixer_context *ctx, struct exynos_drm_plane *plane) { @@ -618,11 +660,6 @@ static void mixer_graph_buffer(struct mixer_context *ctx, mixer_cfg_layer(ctx, win, priority, true); mixer_cfg_gfx_blend(ctx, win, fb->format->has_alpha); - /* layer update mandatory for mixer 16.0.33.0 */ - if (ctx->mxr_ver == MXR_VER_16_0_33_0 || - ctx->mxr_ver == MXR_VER_128_0_0_184) - mixer_layer_update(ctx); - spin_unlock_irqrestore(&ctx->reg_slock, flags); mixer_regs_dump(ctx); @@ -687,7 +724,7 @@ static void mixer_win_reset(struct mixer_context *ctx) static irqreturn_t mixer_irq_handler(int irq, void *arg) { struct mixer_context *ctx = arg; - u32 val, base, shadow; + u32 val; spin_lock(&ctx->reg_slock); @@ -701,26 +738,9 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg) val &= ~MXR_INT_STATUS_VSYNC; /* interlace scan need to check shadow register */ - if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) { - if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) && - vp_reg_read(ctx, VP_SHADOW_UPDATE)) - goto out; - - base = mixer_reg_read(ctx, MXR_CFG); - shadow = mixer_reg_read(ctx, MXR_CFG_S); - if (base != shadow) - goto out; - - base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0)); - shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0)); - if (base != shadow) - goto out; - - base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1)); - shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1)); - if (base != shadow) - goto out; - } + if (test_bit(MXR_BIT_INTERLACE, &ctx->flags) + && !mixer_is_synced(ctx)) + goto out; drm_crtc_handle_vblank(&ctx->crtc->base); } @@ -895,12 +915,14 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc) static void mixer_atomic_begin(struct exynos_drm_crtc *crtc) { - struct mixer_context *mixer_ctx = crtc->ctx; + struct mixer_context *ctx = crtc->ctx; - if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags)) + if (!test_bit(MXR_BIT_POWERED, &ctx->flags)) return; - mixer_vsync_set_update(mixer_ctx, false); + if (mixer_wait_for_sync(ctx)) + dev_err(ctx->dev, "timeout waiting for VSYNC\n"); + mixer_disable_sync(ctx); } static void mixer_update_plane(struct exynos_drm_crtc *crtc, @@ -942,7 +964,7 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc) if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags)) return; - mixer_vsync_set_update(mixer_ctx, true); + mixer_enable_sync(mixer_ctx); exynos_crtc_handle_event(crtc); } @@ -957,7 +979,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc) exynos_drm_pipe_clk_enable(crtc, true); - mixer_vsync_set_update(ctx, false); + mixer_disable_sync(ctx); mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET); @@ -970,7 +992,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc) mixer_commit(ctx); - mixer_vsync_set_update(ctx, true); + mixer_enable_sync(ctx); set_bit(MXR_BIT_POWERED, &ctx->flags); } diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c index de9531caaca0e0ae5bb36f8bb348b445f43c8757..4f96cd10971f5b25e45a96489b1a1c83690c7bfd 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c +++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c @@ -404,6 +404,9 @@ static int cdv_intel_lvds_get_modes(struct drm_connector *connector) if (mode_dev->panel_fixed_mode != NULL) { struct drm_display_mode *mode = drm_mode_duplicate(dev, mode_dev->panel_fixed_mode); + if (!mode) + return 0; + drm_mode_probed_add(connector, mode); return 1; } @@ -594,6 +597,9 @@ void cdv_intel_lvds_init(struct drm_device *dev, int pipe; u8 pin; + if (!dev_priv->lvds_enabled_in_vbt) + return; + pin = GMBUS_PORT_PANEL; if (!lvds_is_present_in_vbt(dev, &pin)) { DRM_DEBUG_KMS("LVDS is not present in VBT\n"); diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c index 63bde4e86c6a11643419a099b2e3b4216a0c038b..e019ea271ffc432262f3743d8e548cf14c76d518 100644 --- a/drivers/gpu/drm/gma500/intel_bios.c +++ b/drivers/gpu/drm/gma500/intel_bios.c @@ -436,6 +436,9 @@ parse_driver_features(struct drm_psb_private *dev_priv, if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP) dev_priv->edp.support = 1; + dev_priv->lvds_enabled_in_vbt = driver->lvds_config != 0; + DRM_DEBUG_KMS("LVDS VBT config bits: 0x%x\n", driver->lvds_config); + /* This bit means to use 96Mhz for DPLL_A or not */ if (driver->primary_lfp_id) dev_priv->dplla_96mhz = true; diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c index 1b7fd6a9d8a518ba5ad635a8524a109d822b1c54..f73a02a2a5b39f79f940d93e3babd59f581e6602 100644 --- a/drivers/gpu/drm/gma500/oaktrail_crtc.c +++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c @@ -139,6 +139,7 @@ static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit, s32 freq_error, min_error = 100000; memset(best_clock, 0, sizeof(*best_clock)); + memset(&clock, 0, sizeof(clock)); for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) { for (clock.n = limit->n.min; clock.n <= limit->n.max; @@ -195,6 +196,7 @@ static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit, int err = target; memset(best_clock, 0, sizeof(*best_clock)); + memset(&clock, 0, sizeof(clock)); for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) { for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max; diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h index 93d2f4000d2f90beb943539dd0bdfc0f668c7707..be3cf9b348bd4c19b240c74de77cccbd94eba89d 100644 --- a/drivers/gpu/drm/gma500/psb_drv.h +++ b/drivers/gpu/drm/gma500/psb_drv.h @@ -538,6 +538,7 @@ struct drm_psb_private { int lvds_ssc_freq; bool is_lvds_on; bool is_mipi_on; + bool lvds_enabled_in_vbt; u32 mipi_ctrl_display; unsigned int core_freq; diff --git a/drivers/gpu/drm/hisilicon/hibmc/Makefile b/drivers/gpu/drm/hisilicon/hibmc/Makefile index 3df726696372fdbfce0a8631e7590a7f0a100fa9..71c248f4c75624f46a10f8c5c959282a0dbc3bbe 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/Makefile +++ b/drivers/gpu/drm/hisilicon/hibmc/Makefile @@ -1,3 +1,4 @@ -hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_drm_fbdev.o hibmc_ttm.o +hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o \ + hibmc_drm_fbdev.o hibmc_ttm.o hibmc_drm_i2c.o obj-$(CONFIG_DRM_HISI_HIBMC) += hibmc-drm.o diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c index a956545774a39225702ac05243769ed7cc3e76af..3a0c0c650eed636e903cce7468a202597a528caf 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c @@ -39,6 +39,7 @@ struct hibmc_dislay_pll_config { }; static const struct hibmc_dislay_pll_config hibmc_pll_table[] = { + {640, 480, CRT_PLL1_HS_25MHZ, CRT_PLL2_HS_25MHZ}, {800, 600, CRT_PLL1_HS_40MHZ, CRT_PLL2_HS_40MHZ}, {1024, 768, CRT_PLL1_HS_65MHZ, CRT_PLL2_HS_65MHZ}, {1152, 864, CRT_PLL1_HS_80MHZ_1152, CRT_PLL2_HS_80MHZ}, @@ -46,6 +47,8 @@ static const struct hibmc_dislay_pll_config hibmc_pll_table[] = { {1280, 720, CRT_PLL1_HS_74MHZ, CRT_PLL2_HS_74MHZ}, {1280, 960, CRT_PLL1_HS_108MHZ, CRT_PLL2_HS_108MHZ}, {1280, 1024, CRT_PLL1_HS_108MHZ, CRT_PLL2_HS_108MHZ}, + {1440, 900, CRT_PLL1_HS_106MHZ, CRT_PLL2_HS_106MHZ}, + {1600, 900, CRT_PLL1_HS_108MHZ, CRT_PLL2_HS_108MHZ}, {1600, 1200, CRT_PLL1_HS_162MHZ, CRT_PLL2_HS_162MHZ}, {1920, 1080, CRT_PLL1_HS_148MHZ, CRT_PLL2_HS_148MHZ}, {1920, 1200, CRT_PLL1_HS_193MHZ, CRT_PLL2_HS_193MHZ}, @@ -123,11 +126,9 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane, writel(gpu_addr, priv->mmio + HIBMC_CRT_FB_ADDRESS); reg = state->fb->width * (state->fb->format->cpp[0]); - /* now line_pad is 16 */ - reg = PADDING(16, reg); line_l = state->fb->width * state->fb->format->cpp[0]; - line_l = PADDING(16, line_l); + line_l = PADDING(128, line_l); writel(HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_WIDTH, reg) | HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_OFFS, line_l), priv->mmio + HIBMC_CRT_FB_WIDTH); diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c index d4f6f1f9df5b73e06fd7f19999aa345f1b46798a..7be784a77efa31e9c96005f8e51e16a9cfe802f9 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c @@ -55,9 +55,27 @@ irqreturn_t hibmc_drm_interrupt(int irq, void *arg) return IRQ_HANDLED; } +static void hibmc_remove_framebuffers(struct pci_dev *pdev) +{ + struct apertures_struct *ap; + + ap = alloc_apertures(1); + if (!ap) + return; + + ap->ranges[0].base = pci_resource_start(pdev, 0); + ap->ranges[0].size = pci_resource_len(pdev, 0); + + drm_fb_helper_remove_conflicting_framebuffers(ap, "hibmcdrmfb", false); + + kfree(ap); +} + static struct drm_driver hibmc_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_HAVE_IRQ, + .load = hibmc_load, + .unload = hibmc_unload, .fops = &hibmc_fops, .name = "hibmc", .date = "20160828", @@ -70,7 +88,7 @@ static struct drm_driver hibmc_driver = { .irq_handler = hibmc_drm_interrupt, }; -static int __maybe_unused hibmc_pm_suspend(struct device *dev) +static int hibmc_pm_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); @@ -88,7 +106,7 @@ static int __maybe_unused hibmc_pm_suspend(struct device *dev) return 0; } -static int __maybe_unused hibmc_pm_resume(struct device *dev) +static int hibmc_pm_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); @@ -115,11 +133,11 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv) priv->dev->mode_config.min_width = 0; priv->dev->mode_config.min_height = 0; priv->dev->mode_config.max_width = 1920; - priv->dev->mode_config.max_height = 1440; + priv->dev->mode_config.max_height = 1200; priv->dev->mode_config.fb_base = priv->fb_base; priv->dev->mode_config.preferred_depth = 24; - priv->dev->mode_config.prefer_shadow = 0; + priv->dev->mode_config.prefer_shadow = 1; priv->dev->mode_config.funcs = (void *)&hibmc_mode_funcs; @@ -138,6 +156,21 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv) return 0; } +static void hibmc_hw_unmap(struct hibmc_drm_private *priv) +{ + struct drm_device *dev = priv->dev; + + if (priv->mmio) { + devm_iounmap(dev->dev, priv->mmio); + priv->mmio = NULL; + } + + if (priv->fb_map) { + devm_iounmap(dev->dev, priv->fb_map); + priv->fb_map = NULL; + } +} + static void hibmc_kms_fini(struct hibmc_drm_private *priv) { if (priv->mode_config_initialized) { @@ -269,7 +302,7 @@ static int hibmc_hw_init(struct hibmc_drm_private *priv) return 0; } -static int hibmc_unload(struct drm_device *dev) +void hibmc_unload(struct drm_device *dev) { struct hibmc_drm_private *priv = dev->dev_private; @@ -284,11 +317,11 @@ static int hibmc_unload(struct drm_device *dev) hibmc_kms_fini(priv); hibmc_mm_fini(priv); + hibmc_hw_unmap(priv); dev->dev_private = NULL; - return 0; } -static int hibmc_load(struct drm_device *dev) +int hibmc_load(struct drm_device *dev, unsigned long flags) { struct hibmc_drm_private *priv; int ret; @@ -350,55 +383,22 @@ static int hibmc_load(struct drm_device *dev) static int hibmc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - struct drm_device *dev; - int ret; - - dev = drm_dev_alloc(&hibmc_driver, &pdev->dev); - if (IS_ERR(dev)) { - DRM_ERROR("failed to allocate drm_device\n"); - return PTR_ERR(dev); - } + hibmc_remove_framebuffers(pdev); - dev->pdev = pdev; - pci_set_drvdata(pdev, dev); - - ret = pci_enable_device(pdev); - if (ret) { - DRM_ERROR("failed to enable pci device: %d\n", ret); - goto err_free; - } - - ret = hibmc_load(dev); - if (ret) { - DRM_ERROR("failed to load hibmc: %d\n", ret); - goto err_disable; - } - - ret = drm_dev_register(dev, 0); - if (ret) { - DRM_ERROR("failed to register drv for userspace access: %d\n", - ret); - goto err_unload; - } - return 0; - -err_unload: - hibmc_unload(dev); -err_disable: - pci_disable_device(pdev); -err_free: - drm_dev_unref(dev); - - return ret; + return drm_get_pci_dev(pdev, ent, &hibmc_driver); } static void hibmc_pci_remove(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); - drm_dev_unregister(dev); - hibmc_unload(dev); - drm_dev_unref(dev); + drm_put_dev(dev); + pci_disable_device(pdev); +} + +static void hibmc_pci_shutdown(struct pci_dev *pdev) +{ + hibmc_pci_remove(pdev); } static struct pci_device_id hibmc_pci_table[] = { @@ -411,6 +411,7 @@ static struct pci_driver hibmc_pci_driver = { .id_table = hibmc_pci_table, .probe = hibmc_pci_probe, .remove = hibmc_pci_remove, + .shutdown = hibmc_pci_shutdown, .driver.pm = &hibmc_pm_ops, }; diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h index e195521eb41e9b54c89c428afc3bfbf7c4a41216..c246151b29942a9e083e7d282f031e9778a6408c 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h @@ -19,12 +19,18 @@ #ifndef HIBMC_DRM_DRV_H #define HIBMC_DRM_DRV_H +#include +#include +#include + +#include #include #include #include #include #include + struct hibmc_framebuffer { struct drm_framebuffer fb; struct drm_gem_object *obj; @@ -36,6 +42,13 @@ struct hibmc_fbdev { int size; }; +struct hibmc_connector { + struct drm_connector base; + + struct i2c_adapter adapter; + struct i2c_algo_bit_data bit_data; +}; + struct hibmc_drm_private { /* hw */ void __iomem *mmio; @@ -46,6 +59,7 @@ struct hibmc_drm_private { /* drm */ struct drm_device *dev; + struct hibmc_connector connector; bool mode_config_initialized; struct drm_atomic_state *suspend_state; @@ -60,6 +74,16 @@ struct hibmc_drm_private { bool mm_inited; }; +static inline struct hibmc_connector *to_hibmc_connector(struct drm_connector *connector) +{ + return container_of(connector, struct hibmc_connector, base); +} + +static inline struct hibmc_drm_private *to_hibmc_drm_private(struct drm_device *dev) +{ + return dev->dev_private; +} + #define to_hibmc_framebuffer(x) container_of(x, struct hibmc_framebuffer, fb) struct hibmc_bo { @@ -85,6 +109,8 @@ void hibmc_set_power_mode(struct hibmc_drm_private *priv, unsigned int power_mode); void hibmc_set_current_gate(struct hibmc_drm_private *priv, unsigned int gate); +int hibmc_load(struct drm_device *dev, unsigned long flags); +void hibmc_unload(struct drm_device *dev); int hibmc_de_init(struct hibmc_drm_private *priv); int hibmc_vdac_init(struct hibmc_drm_private *priv); @@ -108,6 +134,7 @@ int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev, int hibmc_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev, u32 handle, u64 *offset); int hibmc_mmap(struct file *filp, struct vm_area_struct *vma); +int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_connector *connector); extern const struct drm_mode_config_funcs hibmc_mode_funcs; diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c index b92595c477ef63038811e1af940822599eb577e3..6217962c12892144e46e64ae6cb63ee9e562bda7 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c @@ -71,13 +71,12 @@ static int hibmc_drm_fb_create(struct drm_fb_helper *helper, DRM_DEBUG_DRIVER("surface width(%d), height(%d) and bpp(%d)\n", sizes->surface_width, sizes->surface_height, sizes->surface_bpp); - sizes->surface_depth = 32; bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8); mode_cmd.width = sizes->surface_width; mode_cmd.height = sizes->surface_height; - mode_cmd.pitches[0] = mode_cmd.width * bytes_per_pixel; + mode_cmd.pitches[0] = ALIGN(mode_cmd.width * bytes_per_pixel, 128); mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); @@ -122,6 +121,7 @@ static int hibmc_drm_fb_create(struct drm_fb_helper *helper, hi_fbdev->fb = hibmc_framebuffer_init(priv->dev, &mode_cmd, gobj); if (IS_ERR(hi_fbdev->fb)) { ret = PTR_ERR(hi_fbdev->fb); + hi_fbdev->fb = NULL; DRM_ERROR("failed to initialize framebuffer: %d\n", ret); goto out_release_fbi; } @@ -209,7 +209,7 @@ int hibmc_fbdev_init(struct hibmc_drm_private *priv) goto fini; } - ret = drm_fb_helper_initial_config(&hifbdev->helper, 16); + ret = drm_fb_helper_initial_config(&hifbdev->helper, 32); if (ret) { DRM_ERROR("failed to setup initial conn config: %d\n", ret); goto fini; diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c new file mode 100644 index 0000000000000000000000000000000000000000..ffd7c7bf4b7d8dc306b24133d0a02f7e0a0b63bf --- /dev/null +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Hisilicon Hibmc SoC drm driver + * + * Based on the bochs drm driver. + * + * Copyright (c) 2016 Huawei Limited. + * + * Author: + * Tian Tao + */ + +#include +#include + +#include + +#include "hibmc_drm_drv.h" + +#define GPIO_DATA 0x0802A0 +#define GPIO_DATA_DIRECTION 0x0802A4 + +#define I2C_SCL_MASK BIT(0) +#define I2C_SDA_MASK BIT(1) + +static void hibmc_set_i2c_signal(void *data, u32 mask, int value) +{ + struct hibmc_connector *hibmc_connector = data; + struct hibmc_drm_private *priv = to_hibmc_drm_private(hibmc_connector->base.dev); + u32 tmp_dir = readl(priv->mmio + GPIO_DATA_DIRECTION); + + if (value) { + tmp_dir &= ~mask; + writel(tmp_dir, priv->mmio + GPIO_DATA_DIRECTION); + } else { + u32 tmp_data = readl(priv->mmio + GPIO_DATA); + + tmp_data &= ~mask; + writel(tmp_data, priv->mmio + GPIO_DATA); + + tmp_dir |= mask; + writel(tmp_dir, priv->mmio + GPIO_DATA_DIRECTION); + } +} + +static int hibmc_get_i2c_signal(void *data, u32 mask) +{ + struct hibmc_connector *hibmc_connector = data; + struct hibmc_drm_private *priv = to_hibmc_drm_private(hibmc_connector->base.dev); + u32 tmp_dir = readl(priv->mmio + GPIO_DATA_DIRECTION); + + if ((tmp_dir & mask) != mask) { + tmp_dir &= ~mask; + writel(tmp_dir, priv->mmio + GPIO_DATA_DIRECTION); + } + + return (readl(priv->mmio + GPIO_DATA) & mask) ? 1 : 0; +} + +static void hibmc_ddc_setsda(void *data, int state) +{ + hibmc_set_i2c_signal(data, I2C_SDA_MASK, state); +} + +static void hibmc_ddc_setscl(void *data, int state) +{ + hibmc_set_i2c_signal(data, I2C_SCL_MASK, state); +} + +static int hibmc_ddc_getsda(void *data) +{ + return hibmc_get_i2c_signal(data, I2C_SDA_MASK); +} + +static int hibmc_ddc_getscl(void *data) +{ + return hibmc_get_i2c_signal(data, I2C_SCL_MASK); +} + +int hibmc_ddc_create(struct drm_device *drm_dev, + struct hibmc_connector *connector) +{ + connector->adapter.owner = THIS_MODULE; + connector->adapter.class = I2C_CLASS_DDC; + snprintf(connector->adapter.name, I2C_NAME_SIZE, "HIS i2c bit bus"); + connector->adapter.dev.parent = &drm_dev->pdev->dev; + i2c_set_adapdata(&connector->adapter, connector); + connector->adapter.algo_data = &connector->bit_data; + + connector->bit_data.udelay = 20; + connector->bit_data.timeout = usecs_to_jiffies(2000); + connector->bit_data.data = connector; + connector->bit_data.setsda = hibmc_ddc_setsda; + connector->bit_data.setscl = hibmc_ddc_setscl; + connector->bit_data.getsda = hibmc_ddc_getsda; + connector->bit_data.getscl = hibmc_ddc_getscl; + + return i2c_bit_add_bus(&connector->adapter); +} diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h index f7035bf3ec1fa689696b08d3f6fbdbbfd47560c3..e2c6155152f8151002bdedf4d96974fd87090e72 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h @@ -174,11 +174,12 @@ #define CRT_PLL1_HS_78MHZ 0x23540F82 #define CRT_PLL1_HS_74MHZ 0x23941dc2 #define CRT_PLL1_HS_80MHZ 0x23941001 -#define CRT_PLL1_HS_80MHZ_1152 0x23540fc2 +#define CRT_PLL1_HS_80MHZ_1152 0x23540fc2 #define CRT_PLL1_HS_108MHZ 0x23b41b01 #define CRT_PLL1_HS_162MHZ 0x23480681 #define CRT_PLL1_HS_148MHZ 0x23541dc2 #define CRT_PLL1_HS_193MHZ 0x234807c1 +#define CRT_PLL1_HS_106MHZ 0x237C1641 #define CRT_PLL2_HS 0x802ac #define CRT_PLL2_HS_25MHZ 0x206B851E @@ -191,6 +192,7 @@ #define CRT_PLL2_HS_162MHZ 0xA0000000 #define CRT_PLL2_HS_148MHZ 0xB0CCCCCD #define CRT_PLL2_HS_193MHZ 0xC0872B02 +#define CRT_PLL2_HS_106MHZ 0x0075c28f #define HIBMC_FIELD(field, value) (field(value) & field##_MASK) #endif diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c index 744956cea7496afd8ec2672bd953eaf67ef661c3..aeef73037da1e4ed55aec578925c367a92e1c977 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c @@ -22,15 +22,74 @@ #include "hibmc_drm_drv.h" #include "hibmc_drm_regs.h" +struct hibmc_resolution { + int w; + int h; +}; + +static const struct hibmc_resolution hibmc_modetables[] = { + {640, 480}, + {800, 600}, + {1024, 768}, + {1152, 864}, + {1280, 768}, + {1280, 720}, + {1280, 960}, + {1280, 1024}, + {1440, 900}, + {1600, 900}, + {1600, 1200}, + {1920, 1080}, + {1920, 1200} +}; + +static int hibmc_valid_mode(int w, int h) +{ + int i; + + for (i = 0; i < sizeof(hibmc_modetables) / + sizeof(struct hibmc_resolution); i++) { + if (hibmc_modetables[i].w == w && hibmc_modetables[i].h == h) + return 0; + } + + return -1; +} + static int hibmc_connector_get_modes(struct drm_connector *connector) { - return drm_add_modes_noedid(connector, 800, 600); + int count; + void *edid; + struct hibmc_connector *hibmc_connector = to_hibmc_connector(connector); + + edid = drm_get_edid(connector, &hibmc_connector->adapter); + if (edid) { + drm_connector_update_edid_property(connector, edid); + count = drm_add_edid_modes(connector, edid); + if (count) + goto out; + } + + drm_connector_update_edid_property(connector, NULL); + count = drm_add_modes_noedid(connector, 1920, 1200); + drm_set_preferred_mode(connector, 1024, 768); + +out: + kfree(edid); + return count; } static enum drm_mode_status hibmc_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - return MODE_OK; + int vrefresh = drm_mode_vrefresh(mode); + + if (vrefresh < 59 || vrefresh > 61) + return MODE_NOMODE; + else if (hibmc_valid_mode(mode->hdisplay, mode->vdisplay) != 0) + return MODE_NOMODE; + else + return MODE_OK; } static struct drm_encoder * @@ -46,38 +105,41 @@ static const struct drm_connector_helper_funcs .best_encoder = hibmc_connector_best_encoder, }; +static void hibmc_connector_destroy(struct drm_connector *connector) +{ + struct hibmc_connector *hibmc_connector = to_hibmc_connector(connector); + + i2c_del_adapter(&hibmc_connector->adapter); + drm_connector_cleanup(connector); +} + static const struct drm_connector_funcs hibmc_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = drm_connector_cleanup, + .destroy = hibmc_connector_destroy, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; -static struct drm_connector * +static int hibmc_connector_init(struct hibmc_drm_private *priv) { struct drm_device *dev = priv->dev; - struct drm_connector *connector; + struct drm_connector *connector = &priv->connector.base; int ret; - connector = devm_kzalloc(dev->dev, sizeof(*connector), GFP_KERNEL); - if (!connector) { - DRM_ERROR("failed to alloc memory when init connector\n"); - return ERR_PTR(-ENOMEM); - } - ret = drm_connector_init(dev, connector, &hibmc_connector_funcs, DRM_MODE_CONNECTOR_VGA); if (ret) { DRM_ERROR("failed to init connector: %d\n", ret); - return ERR_PTR(ret); + return ret; } drm_connector_helper_add(connector, &hibmc_connector_helper_funcs); + drm_connector_register(connector); - return connector; + return 0; } static void hibmc_encoder_mode_set(struct drm_encoder *encoder, @@ -107,15 +169,21 @@ static const struct drm_encoder_funcs hibmc_encoder_funcs = { int hibmc_vdac_init(struct hibmc_drm_private *priv) { struct drm_device *dev = priv->dev; + struct hibmc_connector *hibmc_connector = &priv->connector; struct drm_encoder *encoder; - struct drm_connector *connector; + struct drm_connector *connector = &hibmc_connector->base; int ret; - connector = hibmc_connector_init(priv); - if (IS_ERR(connector)) { - DRM_ERROR("failed to create connector: %ld\n", - PTR_ERR(connector)); - return PTR_ERR(connector); + ret = hibmc_connector_init(priv); + if (ret) { + DRM_ERROR("failed to init connector: %d\n", ret); + return ret; + } + + ret = hibmc_ddc_create(dev, hibmc_connector); + if (ret) { + DRM_ERROR("failed to create ddc: %d\n", ret); + return ret; } encoder = devm_kzalloc(dev->dev, sizeof(*encoder), GFP_KERNEL); diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c index 4871025f7573aede35d63db6623ddecd837ff9d9..83fd77cc41e9259fa4060d6b92d017b9d42a18c7 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c @@ -421,7 +421,7 @@ int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev, u32 handle; int ret; - args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 16); + args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 128); args->size = args->pitch * args->height; ret = hibmc_gem_create(dev, args->size, false, diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index 3b378936f57559fbfe80a30a6b14ee00aa9f7136..a9b15001416a729bca83439647cdd52ecf2b9965 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c @@ -721,7 +721,7 @@ static void i810_dma_dispatch_vertex(struct drm_device *dev, if (nbox > I810_NR_SAREA_CLIPRECTS) nbox = I810_NR_SAREA_CLIPRECTS; - if (used > 4 * 1024) + if (used < 0 || used > 4 * 1024) used = 0; if (sarea_priv->dirty) @@ -1041,7 +1041,7 @@ static void i810_dma_dispatch_mc(struct drm_device *dev, struct drm_buf *buf, in if (u != I810_BUF_CLIENT) DRM_DEBUG("MC found buffer that isn't mine!\n"); - if (used > 4 * 1024) + if (used < 0 || used > 4 * 1024) used = 0; sarea_priv->dirty = 0x7f; diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index a614db310ea276a5deca674363c09db44e79ac79..be15289bff9c187d3926bd066498d9a89bad45a9 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -1446,7 +1446,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s, } if (index_mode) { - if (guest_gma >= I915_GTT_PAGE_SIZE / sizeof(u64)) { + if (guest_gma >= I915_GTT_PAGE_SIZE) { ret = -EFAULT; goto err; } diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index 481896fb712abf4c178b28af2f224a369d0aefd9..85e6736f0a327742329dc9ee3bc8cf2dce294ed3 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c @@ -235,7 +235,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, plane->bpp = skl_pixel_formats[fmt].bpp; plane->drm_format = skl_pixel_formats[fmt].drm_format; } else { - plane->tiled = !!(val & DISPPLANE_TILED); + plane->tiled = val & DISPPLANE_TILED; fmt = bdw_format_to_drm(val & DISPPLANE_PIXFORMAT_MASK); plane->bpp = bdw_pixel_formats[fmt].bpp; plane->drm_format = bdw_pixel_formats[fmt].drm_format; diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 00aad8164dec2037f8fc8709298aa3dba2c8c0fa..afbc648befeccb2da9b3c86482db632658292a3e 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1155,10 +1155,8 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu, for_each_shadow_entry(sub_spt, &sub_se, sub_index) { ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, start_gfn + sub_index, PAGE_SIZE, &dma_addr); - if (ret) { - ppgtt_invalidate_spt(spt); - return ret; - } + if (ret) + goto err; sub_se.val64 = se->val64; /* Copy the PAT field from PDE. */ @@ -1177,6 +1175,17 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu, ops->set_pfn(se, sub_spt->shadow_page.mfn); ppgtt_set_shadow_entry(spt, se, index); return 0; +err: + /* Cancel the existing addess mappings of DMA addr. */ + for_each_present_shadow_entry(sub_spt, &sub_se, sub_index) { + gvt_vdbg_mm("invalidate 4K entry\n"); + ppgtt_invalidate_pte(sub_spt, &sub_se); + } + /* Release the new allocated spt. */ + trace_spt_change(sub_spt->vgpu->id, "release", sub_spt, + sub_spt->guest_page.gfn, sub_spt->shadow_page.type); + ppgtt_free_spt(sub_spt); + return ret; } static int split_64KB_gtt_entry(struct intel_vgpu *vgpu, @@ -1940,7 +1949,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref) */ void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm) { - atomic_dec(&mm->pincount); + atomic_dec_if_positive(&mm->pincount); } /** @@ -2161,7 +2170,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; unsigned long g_gtt_index = off >> info->gtt_entry_size_shift; unsigned long gma, gfn; - struct intel_gvt_gtt_entry e, m; + struct intel_gvt_gtt_entry e = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE}; + struct intel_gvt_gtt_entry m = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE}; dma_addr_t dma_addr; int ret; @@ -2237,7 +2247,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, if (ops->test_present(&e)) { gfn = ops->get_pfn(&e); - m = e; + m.val64 = e.val64; + m.type = e.type; /* one PTE update may be issued in multiple writes and the * first write may not construct a valid gfn diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index 7a9b36176efb7fca7198527512f8873ad21248cb..bfb6f652b09fc39b6f9329dd409216bdc3903a1b 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -35,7 +35,6 @@ #define _GVT_GTT_H_ #define I915_GTT_PAGE_SHIFT 12 -#define I915_GTT_PAGE_MASK (~(I915_GTT_PAGE_SIZE - 1)) struct intel_vgpu_mm; diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 9ad89e38f6c07643f2176afade5c223388ada755..66abe061f07b09397c228d76a177bea184ac2329 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -996,7 +996,7 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) { unsigned int index; u64 virtaddr; - unsigned long req_size, pgoff = 0; + unsigned long req_size, pgoff, req_start; pgprot_t pg_prot; struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); @@ -1014,7 +1014,17 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) pg_prot = vma->vm_page_prot; virtaddr = vma->vm_start; req_size = vma->vm_end - vma->vm_start; - pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT; + pgoff = vma->vm_pgoff & + ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1); + req_start = pgoff << PAGE_SHIFT; + + if (!intel_vgpu_in_aperture(vgpu, req_start)) + return -EINVAL; + if (req_start + req_size > + vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu)) + return -EINVAL; + + pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff; return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot); } @@ -1731,6 +1741,18 @@ int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, entry = __gvt_cache_find_gfn(info->vgpu, gfn); if (!entry) { + ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size); + if (ret) + goto err_unlock; + + ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size); + if (ret) + goto err_unmap; + } else if (entry->size != size) { + /* the same gfn with different size: unmap and re-map */ + gvt_dma_unmap_page(vgpu, gfn, entry->dma_addr, entry->size); + __gvt_cache_remove_entry(vgpu, entry); + ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size); if (ret) goto err_unlock; diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 43aa058e29fca92368c55aa3b458393951de72e8..d0e216d85a22e80896aac3effa29a422741a13f2 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -1276,9 +1276,6 @@ static int prepare_mm(struct intel_vgpu_workload *workload) #define same_context(a, b) (((a)->context_id == (b)->context_id) && \ ((a)->lrca == (b)->lrca)) -#define get_last_workload(q) \ - (list_empty(q) ? NULL : container_of(q->prev, \ - struct intel_vgpu_workload, list)) /** * intel_vgpu_create_workload - create a vGPU workload * @vgpu: a vGPU @@ -1297,7 +1294,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, { struct intel_vgpu_submission *s = &vgpu->submission; struct list_head *q = workload_q_head(vgpu, ring_id); - struct intel_vgpu_workload *last_workload = get_last_workload(q); + struct intel_vgpu_workload *last_workload = NULL; struct intel_vgpu_workload *workload = NULL; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; u64 ring_context_gpa; @@ -1320,15 +1317,20 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, head &= RB_HEAD_OFF_MASK; tail &= RB_TAIL_OFF_MASK; - if (last_workload && same_context(&last_workload->ctx_desc, desc)) { - gvt_dbg_el("ring id %d cur workload == last\n", ring_id); - gvt_dbg_el("ctx head %x real head %lx\n", head, - last_workload->rb_tail); - /* - * cannot use guest context head pointer here, - * as it might not be updated at this time - */ - head = last_workload->rb_tail; + list_for_each_entry_reverse(last_workload, q, list) { + + if (same_context(&last_workload->ctx_desc, desc)) { + gvt_dbg_el("ring id %d cur workload == last\n", + ring_id); + gvt_dbg_el("ctx head %x real head %lx\n", head, + last_workload->rb_tail); + /* + * cannot use guest context head pointer here, + * as it might not be updated at this time + */ + head = last_workload->rb_tail; + break; + } } gvt_dbg_el("ring id %d begin a new workload\n", ring_id); @@ -1389,8 +1391,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, intel_runtime_pm_put(dev_priv); } - if (ret && (vgpu_is_vm_unhealthy(ret))) { - enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); + if (ret) { + if (vgpu_is_vm_unhealthy(ret)) + enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); intel_vgpu_destroy_workload(workload); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index 95478db9998b51a410b927d654990967c74c5fdc..e4b9eb1f6b6021b0563341aff68ae4eab3c75f98 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -51,13 +51,11 @@ * granting userspace undue privileges. There are three categories of privilege. * * First, commands which are explicitly defined as privileged or which should - * only be used by the kernel driver. The parser generally rejects such - * commands, though it may allow some from the drm master process. + * only be used by the kernel driver. The parser rejects such commands * * Second, commands which access registers. To support correct/enhanced * userspace functionality, particularly certain OpenGL extensions, the parser - * provides a whitelist of registers which userspace may safely access (for both - * normal and drm master processes). + * provides a whitelist of registers which userspace may safely access * * Third, commands which access privileged memory (i.e. GGTT, HWS page, etc). * The parser always rejects such commands. @@ -82,9 +80,9 @@ * in the per-engine command tables. * * Other command table entries map fairly directly to high level categories - * mentioned above: rejected, master-only, register whitelist. The parser - * implements a number of checks, including the privileged memory checks, via a - * general bitmasking mechanism. + * mentioned above: rejected, register whitelist. The parser implements a number + * of checks, including the privileged memory checks, via a general bitmasking + * mechanism. */ /* @@ -102,8 +100,6 @@ struct drm_i915_cmd_descriptor { * CMD_DESC_REJECT: The command is never allowed * CMD_DESC_REGISTER: The command should be checked against the * register whitelist for the appropriate ring - * CMD_DESC_MASTER: The command is allowed if the submitting process - * is the DRM master */ u32 flags; #define CMD_DESC_FIXED (1<<0) @@ -111,7 +107,6 @@ struct drm_i915_cmd_descriptor { #define CMD_DESC_REJECT (1<<2) #define CMD_DESC_REGISTER (1<<3) #define CMD_DESC_BITMASK (1<<4) -#define CMD_DESC_MASTER (1<<5) /* * The command's unique identification bits and the bitmask to get them. @@ -192,7 +187,7 @@ struct drm_i915_cmd_table { #define CMD(op, opm, f, lm, fl, ...) \ { \ .flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \ - .cmd = { (op), ~0u << (opm) }, \ + .cmd = { (op & ~0u << (opm)), ~0u << (opm) }, \ .length = { (lm) }, \ __VA_ARGS__ \ } @@ -207,14 +202,13 @@ struct drm_i915_cmd_table { #define R CMD_DESC_REJECT #define W CMD_DESC_REGISTER #define B CMD_DESC_BITMASK -#define M CMD_DESC_MASTER /* Command Mask Fixed Len Action ---------------------------------------------------------- */ -static const struct drm_i915_cmd_descriptor common_cmds[] = { +static const struct drm_i915_cmd_descriptor gen7_common_cmds[] = { CMD( MI_NOOP, SMI, F, 1, S ), CMD( MI_USER_INTERRUPT, SMI, F, 1, R ), - CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, M ), + CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, R ), CMD( MI_ARB_CHECK, SMI, F, 1, S ), CMD( MI_REPORT_HEAD, SMI, F, 1, S ), CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ), @@ -244,7 +238,7 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = { CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ), }; -static const struct drm_i915_cmd_descriptor render_cmds[] = { +static const struct drm_i915_cmd_descriptor gen7_render_cmds[] = { CMD( MI_FLUSH, SMI, F, 1, S ), CMD( MI_ARB_ON_OFF, SMI, F, 1, R ), CMD( MI_PREDICATE, SMI, F, 1, S ), @@ -311,7 +305,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = { CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ), CMD( MI_SET_APPID, SMI, F, 1, S ), CMD( MI_RS_CONTEXT, SMI, F, 1, S ), - CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ), + CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, R ), CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ), CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W, .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ), @@ -328,7 +322,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = { CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS, S3D, !F, 0x1FF, S ), }; -static const struct drm_i915_cmd_descriptor video_cmds[] = { +static const struct drm_i915_cmd_descriptor gen7_video_cmds[] = { CMD( MI_ARB_ON_OFF, SMI, F, 1, R ), CMD( MI_SET_APPID, SMI, F, 1, S ), CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B, @@ -372,7 +366,7 @@ static const struct drm_i915_cmd_descriptor video_cmds[] = { CMD( MFX_WAIT, SMFX, F, 1, S ), }; -static const struct drm_i915_cmd_descriptor vecs_cmds[] = { +static const struct drm_i915_cmd_descriptor gen7_vecs_cmds[] = { CMD( MI_ARB_ON_OFF, SMI, F, 1, R ), CMD( MI_SET_APPID, SMI, F, 1, S ), CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B, @@ -410,7 +404,7 @@ static const struct drm_i915_cmd_descriptor vecs_cmds[] = { }}, ), }; -static const struct drm_i915_cmd_descriptor blt_cmds[] = { +static const struct drm_i915_cmd_descriptor gen7_blt_cmds[] = { CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ), CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, B, .bits = {{ @@ -444,10 +438,64 @@ static const struct drm_i915_cmd_descriptor blt_cmds[] = { }; static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = { - CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ), + CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, R ), CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ), }; +/* + * For Gen9 we can still rely on the h/w to enforce cmd security, and only + * need to re-enforce the register access checks. We therefore only need to + * teach the cmdparser how to find the end of each command, and identify + * register accesses. The table doesn't need to reject any commands, and so + * the only commands listed here are: + * 1) Those that touch registers + * 2) Those that do not have the default 8-bit length + * + * Note that the default MI length mask chosen for this table is 0xFF, not + * the 0x3F used on older devices. This is because the vast majority of MI + * cmds on Gen9 use a standard 8-bit Length field. + * All the Gen9 blitter instructions are standard 0xFF length mask, and + * none allow access to non-general registers, so in fact no BLT cmds are + * included in the table at all. + * + */ +static const struct drm_i915_cmd_descriptor gen9_blt_cmds[] = { + CMD( MI_NOOP, SMI, F, 1, S ), + CMD( MI_USER_INTERRUPT, SMI, F, 1, S ), + CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, S ), + CMD( MI_FLUSH, SMI, F, 1, S ), + CMD( MI_ARB_CHECK, SMI, F, 1, S ), + CMD( MI_REPORT_HEAD, SMI, F, 1, S ), + CMD( MI_ARB_ON_OFF, SMI, F, 1, S ), + CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ), + CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, S ), + CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, S ), + CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, S ), + CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W, + .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 } ), + CMD( MI_UPDATE_GTT, SMI, !F, 0x3FF, S ), + CMD( MI_STORE_REGISTER_MEM_GEN8, SMI, F, 4, W, + .reg = { .offset = 1, .mask = 0x007FFFFC } ), + CMD( MI_FLUSH_DW, SMI, !F, 0x3F, S ), + CMD( MI_LOAD_REGISTER_MEM_GEN8, SMI, F, 4, W, + .reg = { .offset = 1, .mask = 0x007FFFFC } ), + CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W, + .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ), + + /* + * We allow BB_START but apply further checks. We just sanitize the + * basic fields here. + */ +#define MI_BB_START_OPERAND_MASK GENMASK(SMI-1, 0) +#define MI_BB_START_OPERAND_EXPECT (MI_BATCH_PPGTT_HSW | 1) + CMD( MI_BATCH_BUFFER_START_GEN8, SMI, !F, 0xFF, B, + .bits = {{ + .offset = 0, + .mask = MI_BB_START_OPERAND_MASK, + .expected = MI_BB_START_OPERAND_EXPECT, + }}, ), +}; + static const struct drm_i915_cmd_descriptor noop_desc = CMD(MI_NOOP, SMI, F, 1, S); @@ -461,40 +509,44 @@ static const struct drm_i915_cmd_descriptor noop_desc = #undef R #undef W #undef B -#undef M -static const struct drm_i915_cmd_table gen7_render_cmds[] = { - { common_cmds, ARRAY_SIZE(common_cmds) }, - { render_cmds, ARRAY_SIZE(render_cmds) }, +static const struct drm_i915_cmd_table gen7_render_cmd_table[] = { + { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) }, + { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) }, }; -static const struct drm_i915_cmd_table hsw_render_ring_cmds[] = { - { common_cmds, ARRAY_SIZE(common_cmds) }, - { render_cmds, ARRAY_SIZE(render_cmds) }, +static const struct drm_i915_cmd_table hsw_render_ring_cmd_table[] = { + { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) }, + { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) }, { hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) }, }; -static const struct drm_i915_cmd_table gen7_video_cmds[] = { - { common_cmds, ARRAY_SIZE(common_cmds) }, - { video_cmds, ARRAY_SIZE(video_cmds) }, +static const struct drm_i915_cmd_table gen7_video_cmd_table[] = { + { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) }, + { gen7_video_cmds, ARRAY_SIZE(gen7_video_cmds) }, }; -static const struct drm_i915_cmd_table hsw_vebox_cmds[] = { - { common_cmds, ARRAY_SIZE(common_cmds) }, - { vecs_cmds, ARRAY_SIZE(vecs_cmds) }, +static const struct drm_i915_cmd_table hsw_vebox_cmd_table[] = { + { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) }, + { gen7_vecs_cmds, ARRAY_SIZE(gen7_vecs_cmds) }, }; -static const struct drm_i915_cmd_table gen7_blt_cmds[] = { - { common_cmds, ARRAY_SIZE(common_cmds) }, - { blt_cmds, ARRAY_SIZE(blt_cmds) }, +static const struct drm_i915_cmd_table gen7_blt_cmd_table[] = { + { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) }, + { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) }, }; -static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = { - { common_cmds, ARRAY_SIZE(common_cmds) }, - { blt_cmds, ARRAY_SIZE(blt_cmds) }, +static const struct drm_i915_cmd_table hsw_blt_ring_cmd_table[] = { + { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) }, + { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) }, { hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) }, }; +static const struct drm_i915_cmd_table gen9_blt_cmd_table[] = { + { gen9_blt_cmds, ARRAY_SIZE(gen9_blt_cmds) }, +}; + + /* * Register whitelists, sorted by increasing register offset. */ @@ -610,17 +662,27 @@ static const struct drm_i915_reg_descriptor gen7_blt_regs[] = { REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE), }; -static const struct drm_i915_reg_descriptor ivb_master_regs[] = { - REG32(FORCEWAKE_MT), - REG32(DERRMR), - REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_A)), - REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_B)), - REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_C)), -}; - -static const struct drm_i915_reg_descriptor hsw_master_regs[] = { - REG32(FORCEWAKE_MT), - REG32(DERRMR), +static const struct drm_i915_reg_descriptor gen9_blt_regs[] = { + REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE), + REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE), + REG32(BCS_SWCTRL), + REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE), + REG64_IDX(BCS_GPR, 0), + REG64_IDX(BCS_GPR, 1), + REG64_IDX(BCS_GPR, 2), + REG64_IDX(BCS_GPR, 3), + REG64_IDX(BCS_GPR, 4), + REG64_IDX(BCS_GPR, 5), + REG64_IDX(BCS_GPR, 6), + REG64_IDX(BCS_GPR, 7), + REG64_IDX(BCS_GPR, 8), + REG64_IDX(BCS_GPR, 9), + REG64_IDX(BCS_GPR, 10), + REG64_IDX(BCS_GPR, 11), + REG64_IDX(BCS_GPR, 12), + REG64_IDX(BCS_GPR, 13), + REG64_IDX(BCS_GPR, 14), + REG64_IDX(BCS_GPR, 15), }; #undef REG64 @@ -629,28 +691,27 @@ static const struct drm_i915_reg_descriptor hsw_master_regs[] = { struct drm_i915_reg_table { const struct drm_i915_reg_descriptor *regs; int num_regs; - bool master; }; static const struct drm_i915_reg_table ivb_render_reg_tables[] = { - { gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false }, - { ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true }, + { gen7_render_regs, ARRAY_SIZE(gen7_render_regs) }, }; static const struct drm_i915_reg_table ivb_blt_reg_tables[] = { - { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false }, - { ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true }, + { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) }, }; static const struct drm_i915_reg_table hsw_render_reg_tables[] = { - { gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false }, - { hsw_render_regs, ARRAY_SIZE(hsw_render_regs), false }, - { hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true }, + { gen7_render_regs, ARRAY_SIZE(gen7_render_regs) }, + { hsw_render_regs, ARRAY_SIZE(hsw_render_regs) }, }; static const struct drm_i915_reg_table hsw_blt_reg_tables[] = { - { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false }, - { hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true }, + { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) }, +}; + +static const struct drm_i915_reg_table gen9_blt_reg_tables[] = { + { gen9_blt_regs, ARRAY_SIZE(gen9_blt_regs) }, }; static u32 gen7_render_get_cmd_length_mask(u32 cmd_header) @@ -708,6 +769,17 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header) return 0; } +static u32 gen9_blt_get_cmd_length_mask(u32 cmd_header) +{ + u32 client = cmd_header >> INSTR_CLIENT_SHIFT; + + if (client == INSTR_MI_CLIENT || client == INSTR_BC_CLIENT) + return 0xFF; + + DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header); + return 0; +} + static bool validate_cmds_sorted(const struct intel_engine_cs *engine, const struct drm_i915_cmd_table *cmd_tables, int cmd_table_count) @@ -865,18 +937,19 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) int cmd_table_count; int ret; - if (!IS_GEN7(engine->i915)) + if (!IS_GEN7(engine->i915) && !(IS_GEN9(engine->i915) && + engine->id == BCS)) return; switch (engine->id) { case RCS: if (IS_HASWELL(engine->i915)) { - cmd_tables = hsw_render_ring_cmds; + cmd_tables = hsw_render_ring_cmd_table; cmd_table_count = - ARRAY_SIZE(hsw_render_ring_cmds); + ARRAY_SIZE(hsw_render_ring_cmd_table); } else { - cmd_tables = gen7_render_cmds; - cmd_table_count = ARRAY_SIZE(gen7_render_cmds); + cmd_tables = gen7_render_cmd_table; + cmd_table_count = ARRAY_SIZE(gen7_render_cmd_table); } if (IS_HASWELL(engine->i915)) { @@ -886,36 +959,46 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) engine->reg_tables = ivb_render_reg_tables; engine->reg_table_count = ARRAY_SIZE(ivb_render_reg_tables); } - engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask; break; case VCS: - cmd_tables = gen7_video_cmds; - cmd_table_count = ARRAY_SIZE(gen7_video_cmds); + cmd_tables = gen7_video_cmd_table; + cmd_table_count = ARRAY_SIZE(gen7_video_cmd_table); engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; break; case BCS: - if (IS_HASWELL(engine->i915)) { - cmd_tables = hsw_blt_ring_cmds; - cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds); + engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask; + if (IS_GEN9(engine->i915)) { + cmd_tables = gen9_blt_cmd_table; + cmd_table_count = ARRAY_SIZE(gen9_blt_cmd_table); + engine->get_cmd_length_mask = + gen9_blt_get_cmd_length_mask; + + /* BCS Engine unsafe without parser */ + engine->flags |= I915_ENGINE_REQUIRES_CMD_PARSER; + } else if (IS_HASWELL(engine->i915)) { + cmd_tables = hsw_blt_ring_cmd_table; + cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmd_table); } else { - cmd_tables = gen7_blt_cmds; - cmd_table_count = ARRAY_SIZE(gen7_blt_cmds); + cmd_tables = gen7_blt_cmd_table; + cmd_table_count = ARRAY_SIZE(gen7_blt_cmd_table); } - if (IS_HASWELL(engine->i915)) { + if (IS_GEN9(engine->i915)) { + engine->reg_tables = gen9_blt_reg_tables; + engine->reg_table_count = + ARRAY_SIZE(gen9_blt_reg_tables); + } else if (IS_HASWELL(engine->i915)) { engine->reg_tables = hsw_blt_reg_tables; engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables); } else { engine->reg_tables = ivb_blt_reg_tables; engine->reg_table_count = ARRAY_SIZE(ivb_blt_reg_tables); } - - engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask; break; case VECS: - cmd_tables = hsw_vebox_cmds; - cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds); + cmd_tables = hsw_vebox_cmd_table; + cmd_table_count = ARRAY_SIZE(hsw_vebox_cmd_table); /* VECS can use the same length_mask function as VCS */ engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; break; @@ -941,7 +1024,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) return; } - engine->flags |= I915_ENGINE_NEEDS_CMD_PARSER; + engine->flags |= I915_ENGINE_USING_CMD_PARSER; } /** @@ -953,7 +1036,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) */ void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine) { - if (!intel_engine_needs_cmd_parser(engine)) + if (!intel_engine_using_cmd_parser(engine)) return; fini_hash_table(engine); @@ -1027,22 +1110,16 @@ __find_reg(const struct drm_i915_reg_descriptor *table, int count, u32 addr) } static const struct drm_i915_reg_descriptor * -find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr) +find_reg(const struct intel_engine_cs *engine, u32 addr) { const struct drm_i915_reg_table *table = engine->reg_tables; + const struct drm_i915_reg_descriptor *reg = NULL; int count = engine->reg_table_count; - for (; count > 0; ++table, --count) { - if (!table->master || is_master) { - const struct drm_i915_reg_descriptor *reg; + for (; !reg && (count > 0); ++table, --count) + reg = __find_reg(table->regs, table->num_regs, addr); - reg = __find_reg(table->regs, table->num_regs, addr); - if (reg != NULL) - return reg; - } - } - - return NULL; + return reg; } /* Returns a vmap'd pointer to dst_obj, which the caller must unmap */ @@ -1127,8 +1204,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, static bool check_cmd(const struct intel_engine_cs *engine, const struct drm_i915_cmd_descriptor *desc, - const u32 *cmd, u32 length, - const bool is_master) + const u32 *cmd, u32 length) { if (desc->flags & CMD_DESC_SKIP) return true; @@ -1138,12 +1214,6 @@ static bool check_cmd(const struct intel_engine_cs *engine, return false; } - if ((desc->flags & CMD_DESC_MASTER) && !is_master) { - DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n", - *cmd); - return false; - } - if (desc->flags & CMD_DESC_REGISTER) { /* * Get the distance between individual register offset @@ -1157,7 +1227,7 @@ static bool check_cmd(const struct intel_engine_cs *engine, offset += step) { const u32 reg_addr = cmd[offset] & desc->reg.mask; const struct drm_i915_reg_descriptor *reg = - find_reg(engine, is_master, reg_addr); + find_reg(engine, reg_addr); if (!reg) { DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (%s)\n", @@ -1235,16 +1305,112 @@ static bool check_cmd(const struct intel_engine_cs *engine, return true; } +static int check_bbstart(const struct i915_gem_context *ctx, + u32 *cmd, u32 offset, u32 length, + u32 batch_len, + u64 batch_start, + u64 shadow_batch_start) +{ + u64 jump_offset, jump_target; + u32 target_cmd_offset, target_cmd_index; + + /* For igt compatibility on older platforms */ + if (CMDPARSER_USES_GGTT(ctx->i915)) { + DRM_DEBUG("CMD: Rejecting BB_START for ggtt based submission\n"); + return -EACCES; + } + + if (length != 3) { + DRM_DEBUG("CMD: Recursive BB_START with bad length(%u)\n", + length); + return -EINVAL; + } + + jump_target = *(u64*)(cmd+1); + jump_offset = jump_target - batch_start; + + /* + * Any underflow of jump_target is guaranteed to be outside the range + * of a u32, so >= test catches both too large and too small + */ + if (jump_offset >= batch_len) { + DRM_DEBUG("CMD: BB_START to 0x%llx jumps out of BB\n", + jump_target); + return -EINVAL; + } + + /* + * This cannot overflow a u32 because we already checked jump_offset + * is within the BB, and the batch_len is a u32 + */ + target_cmd_offset = lower_32_bits(jump_offset); + target_cmd_index = target_cmd_offset / sizeof(u32); + + *(u64*)(cmd + 1) = shadow_batch_start + target_cmd_offset; + + if (target_cmd_index == offset) + return 0; + + if (ctx->jump_whitelist_cmds <= target_cmd_index) { + DRM_DEBUG("CMD: Rejecting BB_START - truncated whitelist array\n"); + return -EINVAL; + } else if (!test_bit(target_cmd_index, ctx->jump_whitelist)) { + DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n", + jump_target); + return -EINVAL; + } + + return 0; +} + +static void init_whitelist(struct i915_gem_context *ctx, u32 batch_len) +{ + const u32 batch_cmds = DIV_ROUND_UP(batch_len, sizeof(u32)); + const u32 exact_size = BITS_TO_LONGS(batch_cmds); + u32 next_size = BITS_TO_LONGS(roundup_pow_of_two(batch_cmds)); + unsigned long *next_whitelist; + + if (CMDPARSER_USES_GGTT(ctx->i915)) + return; + + if (batch_cmds <= ctx->jump_whitelist_cmds) { + bitmap_zero(ctx->jump_whitelist, batch_cmds); + return; + } + +again: + next_whitelist = kcalloc(next_size, sizeof(long), GFP_KERNEL); + if (next_whitelist) { + kfree(ctx->jump_whitelist); + ctx->jump_whitelist = next_whitelist; + ctx->jump_whitelist_cmds = + next_size * BITS_PER_BYTE * sizeof(long); + return; + } + + if (next_size > exact_size) { + next_size = exact_size; + goto again; + } + + DRM_DEBUG("CMD: Failed to extend whitelist. BB_START may be disallowed\n"); + bitmap_zero(ctx->jump_whitelist, ctx->jump_whitelist_cmds); + + return; +} + #define LENGTH_BIAS 2 /** * i915_parse_cmds() - parse a submitted batch buffer for privilege violations + * @ctx: the context in which the batch is to execute * @engine: the engine on which the batch is to execute * @batch_obj: the batch buffer in question - * @shadow_batch_obj: copy of the batch buffer in question + * @batch_start: Canonical base address of batch * @batch_start_offset: byte offset in the batch at which execution starts * @batch_len: length of the commands in batch_obj - * @is_master: is the submitting process the drm master? + * @shadow_batch_obj: copy of the batch buffer in question + * @shadow_batch_start: Canonical base address of shadow_batch_obj * * Parses the specified batch buffer looking for privilege violations as * described in the overview. @@ -1252,14 +1418,17 @@ static bool check_cmd(const struct intel_engine_cs *engine, * Return: non-zero if the parser finds violations or otherwise fails; -EACCES * if the batch appears legal but should use hardware parsing */ -int intel_engine_cmd_parser(struct intel_engine_cs *engine, + +int intel_engine_cmd_parser(struct i915_gem_context *ctx, + struct intel_engine_cs *engine, struct drm_i915_gem_object *batch_obj, - struct drm_i915_gem_object *shadow_batch_obj, + u64 batch_start, u32 batch_start_offset, u32 batch_len, - bool is_master) + struct drm_i915_gem_object *shadow_batch_obj, + u64 shadow_batch_start) { - u32 *cmd, *batch_end; + u32 *cmd, *batch_end, offset = 0; struct drm_i915_cmd_descriptor default_desc = noop_desc; const struct drm_i915_cmd_descriptor *desc = &default_desc; bool needs_clflush_after = false; @@ -1273,6 +1442,8 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, return PTR_ERR(cmd); } + init_whitelist(ctx, batch_len); + /* * We use the batch length as size because the shadow object is as * large or larger and copy_batch() will write MI_NOPs to the extra @@ -1282,31 +1453,15 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, do { u32 length; - if (*cmd == MI_BATCH_BUFFER_END) { - if (needs_clflush_after) { - void *ptr = page_mask_bits(shadow_batch_obj->mm.mapping); - drm_clflush_virt_range(ptr, - (void *)(cmd + 1) - ptr); - } + if (*cmd == MI_BATCH_BUFFER_END) break; - } desc = find_cmd(engine, *cmd, desc, &default_desc); if (!desc) { DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n", *cmd); ret = -EINVAL; - break; - } - - /* - * If the batch buffer contains a chained batch, return an - * error that tells the caller to abort and dispatch the - * workload as a non-secure batch. - */ - if (desc->cmd.value == MI_BATCH_BUFFER_START) { - ret = -EACCES; - break; + goto err; } if (desc->flags & CMD_DESC_FIXED) @@ -1320,22 +1475,43 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, length, batch_end - cmd); ret = -EINVAL; - break; + goto err; } - if (!check_cmd(engine, desc, cmd, length, is_master)) { + if (!check_cmd(engine, desc, cmd, length)) { ret = -EACCES; + goto err; + } + + if (desc->cmd.value == MI_BATCH_BUFFER_START) { + ret = check_bbstart(ctx, cmd, offset, length, + batch_len, batch_start, + shadow_batch_start); + + if (ret) + goto err; break; } + if (ctx->jump_whitelist_cmds > offset) + set_bit(offset, ctx->jump_whitelist); + cmd += length; + offset += length; if (cmd >= batch_end) { DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n"); ret = -EINVAL; - break; + goto err; } } while (1); + if (needs_clflush_after) { + void *ptr = page_mask_bits(shadow_batch_obj->mm.mapping); + + drm_clflush_virt_range(ptr, (void *)(cmd + 1) - ptr); + } + +err: i915_gem_object_unpin_map(shadow_batch_obj); return ret; } @@ -1357,7 +1533,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv) /* If the command parser is not enabled, report 0 - unsupported */ for_each_engine(engine, dev_priv, id) { - if (intel_engine_needs_cmd_parser(engine)) { + if (intel_engine_using_cmd_parser(engine)) { active = true; break; } @@ -1382,6 +1558,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv) * the parser enabled. * 9. Don't whitelist or handle oacontrol specially, as ownership * for oacontrol state is moving to i915-perf. + * 10. Support for Gen9 BCS Parsing */ - return 9; + return 10; } diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index f9ce35da4123ec52657f55f6a704c12c9c286080..e063e98d1e82ec993ce357898952318f38f9c324 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1788,6 +1788,8 @@ static int i915_emon_status(struct seq_file *m, void *unused) if (!IS_GEN5(dev_priv)) return -ENODEV; + intel_runtime_pm_get(dev_priv); + ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; @@ -1802,6 +1804,8 @@ static int i915_emon_status(struct seq_file *m, void *unused) seq_printf(m, "GFX power: %ld\n", gfx); seq_printf(m, "Total power: %ld\n", chipset + gfx); + intel_runtime_pm_put(dev_priv); + return 0; } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index f8cfd16be534cf3eece97c4a59a456e5d8769bde..b0d76a7a0946fdb5c8e9ca32ec6b7be5cc2422a9 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -351,7 +351,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data, value = HAS_LEGACY_SEMAPHORES(dev_priv); break; case I915_PARAM_HAS_SECURE_BATCHES: - value = capable(CAP_SYS_ADMIN); + value = HAS_SECURE_BATCHES(dev_priv) && capable(CAP_SYS_ADMIN); break; case I915_PARAM_CMD_PARSER_VERSION: value = i915_cmd_parser_get_version(dev_priv); @@ -1120,6 +1120,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) pci_set_master(pdev); + /* + * We don't have a max segment size, so set it to the max so sg's + * debugging layer doesn't complain + */ + dma_set_max_seg_size(&pdev->dev, UINT_MAX); + /* overlay on gen2 is broken and can't address above 1G */ if (IS_GEN2(dev_priv)) { ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30)); @@ -1621,6 +1627,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) i915_gem_suspend_late(dev_priv); intel_display_set_init_power(dev_priv, false); + i915_rc6_ctx_wa_suspend(dev_priv); intel_uncore_suspend(dev_priv); /* @@ -1847,6 +1854,8 @@ static int i915_drm_resume_early(struct drm_device *dev) else intel_display_set_init_power(dev_priv, true); + i915_rc6_ctx_wa_resume(dev_priv); + intel_engines_sanitize(dev_priv); enable_rpm_wakeref_asserts(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4aca5344863d6fc013470b41a706c4e7bd18d567..b65f3e38208a307e2be3bb1846c976e09af29162 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -801,6 +801,7 @@ struct intel_rps { struct intel_rc6 { bool enabled; + bool ctx_corrupted; u64 prev_hw_residency[4]; u64 cur_residency[4]; }; @@ -1592,6 +1593,8 @@ struct drm_i915_private { struct intel_uncore uncore; + struct mutex tlb_invalidate_lock; + struct i915_virtual_gpu vgpu; struct intel_gvt *gvt; @@ -2248,7 +2251,7 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg) #define for_each_sgt_dma(__dmap, __iter, __sgt) \ for ((__iter) = __sgt_iter((__sgt)->sgl, true); \ ((__dmap) = (__iter).dma + (__iter).curr); \ - (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \ + (((__iter).curr += I915_GTT_PAGE_SIZE) >= (__iter).max) ? \ (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0) /** @@ -2496,6 +2499,12 @@ intel_info(const struct drm_i915_private *dev_priv) #define IS_GEN9_LP(dev_priv) (IS_GEN9(dev_priv) && IS_LP(dev_priv)) #define IS_GEN9_BC(dev_priv) (IS_GEN9(dev_priv) && !IS_LP(dev_priv)) +/* + * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution + * All later gens can run the final buffer from the ppgtt + */ +#define CMDPARSER_USES_GGTT(dev_priv) IS_GEN7(dev_priv) + #define ENGINE_MASK(id) BIT(id) #define RENDER_RING ENGINE_MASK(RCS) #define BSD_RING ENGINE_MASK(VCS) @@ -2517,6 +2526,8 @@ intel_info(const struct drm_i915_private *dev_priv) #define HAS_LEGACY_SEMAPHORES(dev_priv) IS_GEN7(dev_priv) +#define HAS_SECURE_BATCHES(dev_priv) (INTEL_GEN(dev_priv) < 6) + #define HAS_LLC(dev_priv) ((dev_priv)->info.has_llc) #define HAS_SNOOP(dev_priv) ((dev_priv)->info.has_snoop) #define HAS_EDRAM(dev_priv) (!!((dev_priv)->edram_cap & EDRAM_ENABLED)) @@ -2549,10 +2560,12 @@ intel_info(const struct drm_i915_private *dev_priv) /* Early gen2 have a totally busted CS tlb and require pinned batches. */ #define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv)) +#define NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv) \ + (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) == 9) + /* WaRsDisableCoarsePowerGating:skl,cnl */ #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ - (IS_CANNONLAKE(dev_priv) || \ - IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv)) + (IS_CANNONLAKE(dev_priv) || INTEL_GEN(dev_priv) == 9) #define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4) #define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \ @@ -2944,6 +2957,14 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, u64 alignment, u64 flags); +struct i915_vma * __must_check +i915_gem_object_pin(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + const struct i915_ggtt_view *view, + u64 size, + u64 alignment, + u64 flags); + int i915_gem_object_unbind(struct drm_i915_gem_object *obj); void i915_gem_release_mmap(struct drm_i915_gem_object *obj); @@ -3337,12 +3358,14 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type); int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); void intel_engine_init_cmd_parser(struct intel_engine_cs *engine); void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); -int intel_engine_cmd_parser(struct intel_engine_cs *engine, +int intel_engine_cmd_parser(struct i915_gem_context *cxt, + struct intel_engine_cs *engine, struct drm_i915_gem_object *batch_obj, - struct drm_i915_gem_object *shadow_batch_obj, + u64 user_batch_start, u32 batch_start_offset, u32 batch_len, - bool is_master); + struct drm_i915_gem_object *shadow_batch_obj, + u64 shadow_batch_start); /* i915_perf.c */ extern void i915_perf_init(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index fcc73a6ab503e2f4ffd5b9a5967b36d6ba5f1958..0835643e9495c5c37effb23acec8b30d94c2bfc2 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -174,6 +174,11 @@ static u32 __i915_gem_park(struct drm_i915_private *i915) if (INTEL_GEN(i915) >= 6) gen6_rps_idle(i915); + if (NEEDS_RC6_CTX_CORRUPTION_WA(i915)) { + i915_rc6_ctx_wa_check(i915); + intel_uncore_forcewake_put(i915, FORCEWAKE_ALL); + } + intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ); intel_runtime_pm_put(i915); @@ -220,6 +225,9 @@ void i915_gem_unpark(struct drm_i915_private *i915) */ intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ); + if (NEEDS_RC6_CTX_CORRUPTION_WA(i915)) + intel_uncore_forcewake_get(i915, FORCEWAKE_ALL); + i915->gt.awake = true; if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */ i915->gt.epoch = 1; @@ -1122,11 +1130,7 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj, offset = offset_in_page(args->offset); for (idx = args->offset >> PAGE_SHIFT; remain; idx++) { struct page *page = i915_gem_object_get_page(obj, idx); - int length; - - length = remain; - if (offset + length > PAGE_SIZE) - length = PAGE_SIZE - offset; + unsigned int length = min_t(u64, remain, PAGE_SIZE - offset); ret = shmem_pread(page, offset, length, user_data, page_to_phys(page) & obj_do_bit17_swizzling, @@ -1281,8 +1285,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, if (args->size == 0) return 0; - if (!access_ok(VERIFY_WRITE, - u64_to_user_ptr(args->data_ptr), + if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size)) return -EFAULT; @@ -1570,11 +1573,7 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, offset = offset_in_page(args->offset); for (idx = args->offset >> PAGE_SHIFT; remain; idx++) { struct page *page = i915_gem_object_get_page(obj, idx); - int length; - - length = remain; - if (offset + length > PAGE_SIZE) - length = PAGE_SIZE - offset; + unsigned int length = min_t(u64, remain, PAGE_SIZE - offset); ret = shmem_pwrite(page, offset, length, user_data, page_to_phys(page) & obj_do_bit17_swizzling, @@ -1612,9 +1611,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, if (args->size == 0) return 0; - if (!access_ok(VERIFY_READ, - u64_to_user_ptr(args->data_ptr), - args->size)) + if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size)) return -EFAULT; obj = i915_gem_object_lookup(file, args->handle); @@ -1829,6 +1826,17 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, return 0; } +static inline bool +__vma_matches(struct vm_area_struct *vma, struct file *filp, + unsigned long addr, unsigned long size) +{ + if (vma->vm_file != filp) + return false; + + return vma->vm_start == addr && + (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size); +} + /** * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address * it is mapped to. @@ -1871,39 +1879,50 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, * pages from. */ if (!obj->base.filp) { - i915_gem_object_put(obj); - return -ENXIO; + addr = -ENXIO; + goto err; + } + + if (range_overflows(args->offset, args->size, (u64)obj->base.size)) { + addr = -EINVAL; + goto err; } addr = vm_mmap(obj->base.filp, 0, args->size, PROT_READ | PROT_WRITE, MAP_SHARED, args->offset); + if (IS_ERR_VALUE(addr)) + goto err; + if (args->flags & I915_MMAP_WC) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; if (down_write_killable(&mm->mmap_sem)) { - i915_gem_object_put(obj); - return -EINTR; + addr = -EINTR; + goto err; } vma = find_vma(mm, addr); - if (vma) + if (vma && __vma_matches(vma, obj->base.filp, addr, args->size)) vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); else addr = -ENOMEM; up_write(&mm->mmap_sem); + if (IS_ERR_VALUE(addr)) + goto err; /* This may race, but that's ok, it only gets set */ WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU); } i915_gem_object_put(obj); - if (IS_ERR((void *)addr)) - return addr; args->addr_ptr = (uint64_t) addr; - return 0; + +err: + i915_gem_object_put(obj); + return addr; } static unsigned int tile_row_pages(struct drm_i915_gem_object *obj) @@ -1987,6 +2006,39 @@ compute_partial_view(struct drm_i915_gem_object *obj, return view; } +static void set_address_limits(struct vm_area_struct *area, + struct i915_vma *vma, + unsigned long *start_vaddr, + unsigned long *end_vaddr) +{ + unsigned long vm_start, vm_end, vma_size; /* user's memory parameters */ + long start, end; /* memory boundaries */ + + /* + * Let's move into the ">> PAGE_SHIFT" + * domain to be sure not to lose bits + */ + vm_start = area->vm_start >> PAGE_SHIFT; + vm_end = area->vm_end >> PAGE_SHIFT; + vma_size = vma->size >> PAGE_SHIFT; + + /* + * Calculate the memory boundaries by considering the offset + * provided by the user during memory mapping and the offset + * provided for the partial mapping. + */ + start = vm_start; + start += vma->ggtt_view.partial.offset; + end = start + vma_size; + + start = max_t(long, start, vm_start); + end = min_t(long, end, vm_end); + + /* Let's move back into the "<< PAGE_SHIFT" domain */ + *start_vaddr = (unsigned long)start << PAGE_SHIFT; + *end_vaddr = (unsigned long)end << PAGE_SHIFT; +} + /** * i915_gem_fault - fault a page into the GTT * @vmf: fault info @@ -2014,8 +2066,10 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; bool write = !!(vmf->flags & FAULT_FLAG_WRITE); + unsigned long start, end; /* memory boundaries */ struct i915_vma *vma; pgoff_t page_offset; + unsigned long pfn; int ret; /* Sanity check that we allow writing into this object */ @@ -2097,12 +2151,14 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) if (ret) goto err_unpin; + set_address_limits(area, vma, &start, &end); + + pfn = (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT; + pfn += (start - area->vm_start) >> PAGE_SHIFT; + pfn -= vma->ggtt_view.partial.offset; + /* Finally, remap it using the new GTT offset */ - ret = remap_io_mapping(area, - area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT), - (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT, - min_t(u64, vma->size, area->vm_end - area->vm_start), - &ggtt->iomap); + ret = remap_io_mapping(area, start, pfn, end - start, &ggtt->iomap); if (ret) goto err_fence; @@ -2424,6 +2480,78 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) rcu_read_unlock(); } +struct reg_and_bit { + i915_reg_t reg; + u32 bit; +}; + +static struct reg_and_bit +get_reg_and_bit(const struct intel_engine_cs *engine, + const i915_reg_t *regs, const unsigned int num) +{ + const unsigned int class = engine->class; + struct reg_and_bit rb = { .bit = 1 }; + + if (WARN_ON_ONCE(class >= num || !regs[class].reg)) + return rb; + + rb.reg = regs[class]; + if (class == VIDEO_DECODE_CLASS) + rb.reg.reg += 4 * engine->instance; /* GEN8_M2TCR */ + + return rb; +} + +static void invalidate_tlbs(struct drm_i915_private *dev_priv) +{ + static const i915_reg_t gen8_regs[] = { + [RENDER_CLASS] = GEN8_RTCR, + [VIDEO_DECODE_CLASS] = GEN8_M1TCR, /* , GEN8_M2TCR */ + [VIDEO_ENHANCEMENT_CLASS] = GEN8_VTCR, + [COPY_ENGINE_CLASS] = GEN8_BTCR, + }; + const unsigned int num = ARRAY_SIZE(gen8_regs); + const i915_reg_t *regs = gen8_regs; + struct intel_engine_cs *engine; + enum intel_engine_id id; + + if (INTEL_GEN(dev_priv) < 8) + return; + + GEM_TRACE("\n"); + + assert_rpm_wakelock_held(dev_priv); + + mutex_lock(&dev_priv->tlb_invalidate_lock); + intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + + for_each_engine(engine, dev_priv, id) { + /* + * HW architecture suggest typical invalidation time at 40us, + * with pessimistic cases up to 100us and a recommendation to + * cap at 1ms. We go a bit higher just in case. + */ + const unsigned int timeout_us = 100; + const unsigned int timeout_ms = 4; + struct reg_and_bit rb; + + rb = get_reg_and_bit(engine, regs, num); + if (!i915_mmio_reg_offset(rb.reg)) + continue; + + I915_WRITE_FW(rb.reg, rb.bit); + if (__intel_wait_for_register_fw(dev_priv, + rb.reg, rb.bit, 0, + timeout_us, timeout_ms, + NULL)) + DRM_ERROR_RATELIMITED("%s TLB invalidation did not complete in %ums!\n", + engine->name, timeout_ms); + } + + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + mutex_unlock(&dev_priv->tlb_invalidate_lock); +} + static struct sg_table * __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) { @@ -2453,6 +2581,15 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) __i915_gem_object_reset_page_iter(obj); obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0; + if (test_and_clear_bit(I915_BO_WAS_BOUND_BIT, &obj->flags)) { + struct drm_i915_private *i915 = to_i915(obj->base.dev); + + if (intel_runtime_pm_get_if_in_use(i915)) { + invalidate_tlbs(i915); + intel_runtime_pm_put(i915); + } + } + return pages; } @@ -4411,6 +4548,20 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, { struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct i915_address_space *vm = &dev_priv->ggtt.vm; + + return i915_gem_object_pin(obj, vm, view, size, alignment, + flags | PIN_GLOBAL); +} + +struct i915_vma * +i915_gem_object_pin(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + const struct i915_ggtt_view *view, + u64 size, + u64 alignment, + u64 flags) +{ + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct i915_vma *vma; int ret; @@ -4474,7 +4625,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, return ERR_PTR(ret); } - ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL); + ret = i915_vma_pin(vma, size, alignment, flags); if (ret) return ERR_PTR(ret); @@ -5592,6 +5743,8 @@ int i915_gem_init(struct drm_i915_private *dev_priv) i915_gem_cleanup_userptr(dev_priv); if (ret == -EIO) { + mutex_lock(&dev_priv->drm.struct_mutex); + /* * Allow engine initialisation to fail by marking the GPU as * wedged. But we only want to do this where the GPU is angry, @@ -5602,7 +5755,14 @@ int i915_gem_init(struct drm_i915_private *dev_priv) "Failed to initialize GPU, declaring it wedged!\n"); i915_gem_set_wedged(dev_priv); } - ret = 0; + + /* Minimal basic recovery for KMS */ + ret = i915_ggtt_enable_hw(dev_priv); + i915_gem_restore_gtt_mappings(dev_priv); + i915_gem_restore_fences(dev_priv); + intel_init_clock_gating(dev_priv); + + mutex_unlock(&dev_priv->drm.struct_mutex); } i915_gem_drain_freed_objects(dev_priv); @@ -5612,6 +5772,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv) void i915_gem_fini(struct drm_i915_private *dev_priv) { i915_gem_suspend_late(dev_priv); + intel_disable_gt_powersave(dev_priv); /* Flush any outstanding unpin_work. */ i915_gem_drain_workqueue(dev_priv); @@ -5623,6 +5784,8 @@ void i915_gem_fini(struct drm_i915_private *dev_priv) i915_gem_contexts_fini(dev_priv); mutex_unlock(&dev_priv->drm.struct_mutex); + intel_cleanup_gt_powersave(dev_priv); + intel_uc_fini_misc(dev_priv); i915_gem_cleanup_userptr(dev_priv); @@ -5744,6 +5907,8 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv) spin_lock_init(&dev_priv->fb_tracking.lock); + mutex_init(&dev_priv->tlb_invalidate_lock); + err = i915_gemfs_init(dev_priv); if (err) DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err); diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index b10770cfccd24bedd80a7fd67ac06d78dde695c1..ef383fd429885d8e52a2abb7626a9afb6ffd4fe8 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -124,6 +124,8 @@ static void i915_gem_context_free(struct i915_gem_context *ctx) i915_ppgtt_put(ctx->ppgtt); + kfree(ctx->jump_whitelist); + for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) { struct intel_context *ce = &ctx->__engine[n]; @@ -339,6 +341,9 @@ __create_hw_context(struct drm_i915_private *dev_priv, else ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE; + ctx->jump_whitelist = NULL; + ctx->jump_whitelist_cmds = 0; + return ctx; err_pid: @@ -765,18 +770,19 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, if (args->ctx_id == DEFAULT_CONTEXT_HANDLE) return -ENOENT; + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + ctx = i915_gem_context_lookup(file_priv, args->ctx_id); - if (!ctx) + if (!ctx) { + mutex_unlock(&dev->struct_mutex); return -ENOENT; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - goto out; + } __destroy_hw_context(ctx, file_priv); mutex_unlock(&dev->struct_mutex); -out: i915_gem_context_put(ctx); return 0; } diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h index b116e4942c10d13eb7e9771a4f50e737e47d185a..834d3951d8a9b9d4344b48476e59ee3291759e39 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.h +++ b/drivers/gpu/drm/i915/i915_gem_context.h @@ -183,6 +183,12 @@ struct i915_gem_context { /** remap_slice: Bitmask of cache lines that need remapping */ u8 remap_slice; + /** jump_whitelist: Bit array for tracking cmds during cmdparsing */ + unsigned long *jump_whitelist; + + /** jump_whitelist_cmds: No of cmd slots available */ + u32 jump_whitelist_cmds; + /** handles_vma: rbtree to look up our context specific obj/vma for * the user handle. (user handles are per fd, but the binding is * per vm, which may be one per context or shared with the global GTT) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 3f0c612d42e786d44cff5c86b59bb1da27c0fea9..c57fdb5befa1d10e6496bb489dd9e85d94588946 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -309,7 +309,9 @@ static inline u64 gen8_noncanonical_addr(u64 address) static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb) { - return intel_engine_needs_cmd_parser(eb->engine) && eb->batch_len; + return intel_engine_requires_cmd_parser(eb->engine) || + (intel_engine_using_cmd_parser(eb->engine) && + eb->args->batch_len); } static int eb_create(struct i915_execbuffer *eb) @@ -458,7 +460,7 @@ eb_validate_vma(struct i915_execbuffer *eb, * any non-page-aligned or non-canonical addresses. */ if (unlikely(entry->flags & EXEC_OBJECT_PINNED && - entry->offset != gen8_canonical_addr(entry->offset & PAGE_MASK))) + entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK))) return -EINVAL; /* pad_to_size was once a reserved field, so sanitize it */ @@ -1428,7 +1430,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma) * to read. However, if the array is not writable the user loses * the updated relocation values. */ - if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(*urelocs)))) + if (unlikely(!access_ok(urelocs, remain*sizeof(*urelocs)))) return -EFAULT; do { @@ -1533,7 +1535,7 @@ static int check_relocations(const struct drm_i915_gem_exec_object2 *entry) addr = u64_to_user_ptr(entry->relocs_ptr); size *= sizeof(struct drm_i915_gem_relocation_entry); - if (!access_ok(VERIFY_READ, addr, size)) + if (!access_ok(addr, size)) return -EFAULT; end = addr + size; @@ -1602,7 +1604,9 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb) * happened we would make the mistake of assuming that the * relocations were valid. */ - user_access_begin(); + if (!user_access_begin(urelocs, size)) + goto end_user; + for (copied = 0; copied < nreloc; copied++) unsafe_put_user(-1, &urelocs[copied].presumed_offset, @@ -1893,10 +1897,38 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq) return 0; } -static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master) +static struct i915_vma * +shadow_batch_pin(struct i915_execbuffer *eb, struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *dev_priv = eb->i915; + struct i915_address_space *vm; + u64 flags; + + /* + * PPGTT backed shadow buffers must be mapped RO, to prevent + * post-scan tampering + */ + if (CMDPARSER_USES_GGTT(dev_priv)) { + flags = PIN_GLOBAL; + vm = &dev_priv->ggtt.vm; + } else if (eb->vm->has_read_only) { + flags = PIN_USER; + vm = eb->vm; + i915_gem_object_set_readonly(obj); + } else { + DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n"); + return ERR_PTR(-EINVAL); + } + + return i915_gem_object_pin(obj, vm, NULL, 0, 0, flags); +} + +static struct i915_vma *eb_parse(struct i915_execbuffer *eb) { struct drm_i915_gem_object *shadow_batch_obj; struct i915_vma *vma; + u64 batch_start; + u64 shadow_batch_start; int err; shadow_batch_obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, @@ -1904,29 +1936,54 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master) if (IS_ERR(shadow_batch_obj)) return ERR_CAST(shadow_batch_obj); - err = intel_engine_cmd_parser(eb->engine, + vma = shadow_batch_pin(eb, shadow_batch_obj); + if (IS_ERR(vma)) + goto out; + + batch_start = gen8_canonical_addr(eb->batch->node.start) + + eb->batch_start_offset; + + shadow_batch_start = gen8_canonical_addr(vma->node.start); + + err = intel_engine_cmd_parser(eb->ctx, + eb->engine, eb->batch->obj, - shadow_batch_obj, + batch_start, eb->batch_start_offset, eb->batch_len, - is_master); + shadow_batch_obj, + shadow_batch_start); + if (err) { - if (err == -EACCES) /* unhandled chained batch */ + i915_vma_unpin(vma); + + /* + * Unsafe GGTT-backed buffers can still be submitted safely + * as non-secure. + * For PPGTT backing however, we have no choice but to forcibly + * reject unsafe buffers + */ + if (CMDPARSER_USES_GGTT(eb->i915) && (err == -EACCES)) + /* Execute original buffer non-secure */ vma = NULL; else vma = ERR_PTR(err); - goto out; - } - vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0); - if (IS_ERR(vma)) goto out; + } eb->vma[eb->buffer_count] = i915_vma_get(vma); eb->flags[eb->buffer_count] = __EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF; vma->exec_flags = &eb->flags[eb->buffer_count]; eb->buffer_count++; + eb->batch_start_offset = 0; + eb->batch = vma; + + /* eb->batch_len unchanged */ + + if (CMDPARSER_USES_GGTT(eb->i915)) + eb->batch_flags |= I915_DISPATCH_SECURE; out: i915_gem_object_unpin_pages(shadow_batch_obj); @@ -2070,7 +2127,7 @@ get_fence_array(struct drm_i915_gem_execbuffer2 *args, return ERR_PTR(-EINVAL); user = u64_to_user_ptr(args->cliprects_ptr); - if (!access_ok(VERIFY_READ, user, nfences * sizeof(*user))) + if (!access_ok(user, nfences * sizeof(*user))) return ERR_PTR(-EFAULT); fences = kvmalloc_array(nfences, sizeof(*fences), @@ -2177,6 +2234,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, struct drm_i915_gem_exec_object2 *exec, struct drm_syncobj **fences) { + struct drm_i915_private *i915 = to_i915(dev); struct i915_execbuffer eb; struct dma_fence *in_fence = NULL; struct sync_file *out_fence = NULL; @@ -2187,7 +2245,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & ~__EXEC_OBJECT_UNKNOWN_FLAGS); - eb.i915 = to_i915(dev); + eb.i915 = i915; eb.file = file; eb.args = args; if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC)) @@ -2209,8 +2267,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, eb.batch_flags = 0; if (args->flags & I915_EXEC_SECURE) { + if (INTEL_GEN(i915) >= 11) + return -ENODEV; + + /* Return -EPERM to trigger fallback code on old binaries. */ + if (!HAS_SECURE_BATCHES(i915)) + return -EPERM; + if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN)) - return -EPERM; + return -EPERM; eb.batch_flags |= I915_DISPATCH_SECURE; } @@ -2297,34 +2362,19 @@ i915_gem_do_execbuffer(struct drm_device *dev, goto err_vma; } + if (eb.batch_len == 0) + eb.batch_len = eb.batch->size - eb.batch_start_offset; + if (eb_use_cmdparser(&eb)) { struct i915_vma *vma; - vma = eb_parse(&eb, drm_is_current_master(file)); + vma = eb_parse(&eb); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto err_vma; } - - if (vma) { - /* - * Batch parsed and accepted: - * - * Set the DISPATCH_SECURE bit to remove the NON_SECURE - * bit from MI_BATCH_BUFFER_START commands issued in - * the dispatch_execbuffer implementations. We - * specifically don't want that set on batches the - * command parser has accepted. - */ - eb.batch_flags |= I915_DISPATCH_SECURE; - eb.batch_start_offset = 0; - eb.batch = vma; - } } - if (eb.batch_len == 0) - eb.batch_len = eb.batch->size - eb.batch_start_offset; - /* * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure * batch" bit. Hence we need to pin secure batches into the global gtt. @@ -2601,7 +2651,16 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, unsigned int i; /* Copy the new buffer offsets back to the user's exec list. */ - user_access_begin(); + /* + * Note: count * sizeof(*user_exec_list) does not overflow, + * because we checked 'count' in check_buffer_count(). + * + * And this range already got effectively checked earlier + * when we did the "copy_from_user()" above. + */ + if (!user_access_begin(user_exec_list, count * sizeof(*user_exec_list))) + goto end_user; + for (i = 0; i < args->buffer_count; i++) { if (!(exec2_list[i].offset & UPDATE)) continue; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index f00c7fbef79efc6886116e01dc023fe9008c78f4..d4c6aa7fbac8d57084c052100e77243658a50a14 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -158,7 +158,8 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9) return 0; - if (enable_ppgtt == 1) + /* Full PPGTT is required by the Gen9 cmdparser */ + if (enable_ppgtt == 1 && INTEL_GEN(dev_priv) != 9) return 1; if (enable_ppgtt == 2 && has_full_ppgtt) @@ -1058,7 +1059,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt, do { vaddr[idx->pte] = pte_encode | iter->dma; - iter->dma += PAGE_SIZE; + iter->dma += I915_GTT_PAGE_SIZE; if (iter->dma >= iter->max) { iter->sg = __sg_next(iter->sg); if (!iter->sg) { @@ -1768,9 +1769,9 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m) if (i == 4) continue; - seq_printf(m, "\t\t(%03d, %04d) %08lx: ", + seq_printf(m, "\t\t(%03d, %04d) %08llx: ", pde, pte, - (pde * GEN6_PTES + pte) * PAGE_SIZE); + (pde * GEN6_PTES + pte) * I915_GTT_PAGE_SIZE); for (i = 0; i < 4; i++) { if (vaddr[pte + i] != scratch_pte) seq_printf(m, " %08x", vaddr[pte + i]); @@ -1910,7 +1911,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, do { vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma); - iter.dma += PAGE_SIZE; + iter.dma += I915_GTT_PAGE_SIZE; if (iter.dma == iter.max) { iter.sg = __sg_next(iter.sg); if (!iter.sg) @@ -2048,7 +2049,7 @@ static int pd_vma_bind(struct i915_vma *vma, { struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm); struct gen6_hw_ppgtt *ppgtt = vma->private; - u32 ggtt_offset = i915_ggtt_offset(vma) / PAGE_SIZE; + u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE; struct i915_page_table *pt; unsigned int pde; @@ -2128,6 +2129,7 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size) int gen6_ppgtt_pin(struct i915_hw_ppgtt *base) { struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); + int err; /* * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt @@ -2143,9 +2145,17 @@ int gen6_ppgtt_pin(struct i915_hw_ppgtt *base) * allocator works in address space sizes, so it's multiplied by page * size. We allocate at the top of the GTT to avoid fragmentation. */ - return i915_vma_pin(ppgtt->vma, - 0, GEN6_PD_ALIGN, - PIN_GLOBAL | PIN_HIGH); + err = i915_vma_pin(ppgtt->vma, + 0, GEN6_PD_ALIGN, + PIN_GLOBAL | PIN_HIGH); + if (err) + goto unpin; + + return 0; + +unpin: + ppgtt->pin_count = 0; + return err; } void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base) @@ -2174,7 +2184,7 @@ static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915) ppgtt->base.vm.i915 = i915; ppgtt->base.vm.dma = &i915->drm.pdev->dev; - ppgtt->base.vm.total = I915_PDES * GEN6_PTES * PAGE_SIZE; + ppgtt->base.vm.total = I915_PDES * GEN6_PTES * I915_GTT_PAGE_SIZE; i915_address_space_init(&ppgtt->base.vm, i915); @@ -3031,7 +3041,7 @@ static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) bdw_gmch_ctl = 1 << bdw_gmch_ctl; #ifdef CONFIG_X86_32 - /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */ + /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */ if (bdw_gmch_ctl > 4) bdw_gmch_ctl = 4; #endif @@ -3729,9 +3739,9 @@ rotate_pages(const dma_addr_t *in, unsigned int offset, * the entries so the sg list can be happily traversed. * The only thing we need are DMA addresses. */ - sg_set_page(sg, NULL, PAGE_SIZE, 0); + sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0); sg_dma_address(sg) = in[offset + src_idx]; - sg_dma_len(sg) = PAGE_SIZE; + sg_dma_len(sg) = I915_GTT_PAGE_SIZE; sg = sg_next(sg); src_idx -= stride; } @@ -3744,7 +3754,7 @@ static noinline struct sg_table * intel_rotate_pages(struct intel_rotation_info *rot_info, struct drm_i915_gem_object *obj) { - const unsigned long n_pages = obj->base.size / PAGE_SIZE; + const unsigned long n_pages = obj->base.size / I915_GTT_PAGE_SIZE; unsigned int size = intel_rotation_info_size(rot_info); struct sgt_iter sgt_iter; dma_addr_t dma_addr; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 2a116a91420bc6fab6b31c3638a596bb64b61a33..680e0dc5db4bb83b0d43dd0995213b995f321c73 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -42,13 +42,15 @@ #include "i915_selftest.h" #include "i915_timeline.h" -#define I915_GTT_PAGE_SIZE_4K BIT(12) -#define I915_GTT_PAGE_SIZE_64K BIT(16) -#define I915_GTT_PAGE_SIZE_2M BIT(21) +#define I915_GTT_PAGE_SIZE_4K BIT_ULL(12) +#define I915_GTT_PAGE_SIZE_64K BIT_ULL(16) +#define I915_GTT_PAGE_SIZE_2M BIT_ULL(21) #define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K #define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M +#define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE + #define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE #define I915_FENCE_REG_NONE -1 @@ -662,20 +664,20 @@ int i915_gem_gtt_insert(struct i915_address_space *vm, u64 start, u64 end, unsigned int flags); /* Flags used by pin/bind&friends. */ -#define PIN_NONBLOCK BIT(0) -#define PIN_MAPPABLE BIT(1) -#define PIN_ZONE_4G BIT(2) -#define PIN_NONFAULT BIT(3) -#define PIN_NOEVICT BIT(4) - -#define PIN_MBZ BIT(5) /* I915_VMA_PIN_OVERFLOW */ -#define PIN_GLOBAL BIT(6) /* I915_VMA_GLOBAL_BIND */ -#define PIN_USER BIT(7) /* I915_VMA_LOCAL_BIND */ -#define PIN_UPDATE BIT(8) - -#define PIN_HIGH BIT(9) -#define PIN_OFFSET_BIAS BIT(10) -#define PIN_OFFSET_FIXED BIT(11) +#define PIN_NONBLOCK BIT_ULL(0) +#define PIN_MAPPABLE BIT_ULL(1) +#define PIN_ZONE_4G BIT_ULL(2) +#define PIN_NONFAULT BIT_ULL(3) +#define PIN_NOEVICT BIT_ULL(4) + +#define PIN_MBZ BIT_ULL(5) /* I915_VMA_PIN_OVERFLOW */ +#define PIN_GLOBAL BIT_ULL(6) /* I915_VMA_GLOBAL_BIND */ +#define PIN_USER BIT_ULL(7) /* I915_VMA_LOCAL_BIND */ +#define PIN_UPDATE BIT_ULL(8) + +#define PIN_HIGH BIT_ULL(9) +#define PIN_OFFSET_BIAS BIT_ULL(10) +#define PIN_OFFSET_FIXED BIT_ULL(11) #define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE) #endif diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h index 83e5e01fa9eaa9c8586445329089959290f2f32d..2e3a713e9bcd8bcdd385402fc2c9a4ef65247729 100644 --- a/drivers/gpu/drm/i915/i915_gem_object.h +++ b/drivers/gpu/drm/i915/i915_gem_object.h @@ -136,6 +136,7 @@ struct drm_i915_gem_object { * activity? */ #define I915_BO_ACTIVE_REF 0 +#define I915_BO_WAS_BOUND_BIT 1 /* * Is the object to be mapped as read-only to the GPU diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 2c9b284036d10217a013e146eba8704123006d65..8761df92e7e094e2f60def2612a360c59a56ab23 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -691,8 +691,28 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, i915_gem_gtt_finish_pages(obj, pages); for_each_sgt_page(page, sgt_iter, pages) { - if (obj->mm.dirty) + if (obj->mm.dirty && trylock_page(page)) { + /* + * As this may not be anonymous memory (e.g. shmem) + * but exist on a real mapping, we have to lock + * the page in order to dirty it -- holding + * the page reference is not sufficient to + * prevent the inode from being truncated. + * Play safe and take the lock. + * + * However...! + * + * The mmu-notifier can be invalidated for a + * migrate_page, that is alreadying holding the lock + * on the page. Such a try_to_unmap() will result + * in us calling put_pages() and so recursively try + * to lock the page. We avoid that deadlock with + * a trylock_page() and in exchange we risk missing + * some page dirtying. + */ set_page_dirty(page); + unlock_page(page); + } mark_page_accessed(page); put_page(page); @@ -791,8 +811,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev, if (offset_in_page(args->user_ptr | args->user_size)) return -EINVAL; - if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE, - (char __user *)(unsigned long)args->user_ptr, args->user_size)) + if (!access_ok((char __user *)(unsigned long)args->user_ptr, args->user_size)) return -EFAULT; if (args->flags & I915_USERPTR_READ_ONLY) { diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c index 0e5c580d117cfcced6254ddab4dfcec04611bea9..e869daf9c8a9e0c21f409506ab60240f76bd95c7 100644 --- a/drivers/gpu/drm/i915/i915_ioc32.c +++ b/drivers/gpu/drm/i915/i915_ioc32.c @@ -52,7 +52,7 @@ static int compat_i915_getparam(struct file *file, unsigned int cmd, return -EFAULT; request = compat_alloc_user_space(sizeof(*request)); - if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) || + if (!access_ok(request, sizeof(*request)) || __put_user(req32.param, &request->param) || __put_user((void __user *)(unsigned long)req32.value, &request->value)) diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 6bf10952c7240363fdcd2df907979ef9aef0d247..64031fcce30cafba6b6b772d02558653007a0062 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -3149,7 +3149,7 @@ static struct i915_oa_reg *alloc_oa_regs(struct drm_i915_private *dev_priv, if (!n_regs) return NULL; - if (!access_ok(VERIFY_READ, regs, n_regs * sizeof(u32) * 2)) + if (!access_ok(regs, n_regs * sizeof(u32) * 2)) return ERR_PTR(-EFAULT); /* No is_valid function means we're not allowing any register to be programmed. */ diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index d6c8f8fdfda5f106776e0a148e034e10e64ccbb7..b7fda69342fcb0e71fca62339d4d50639c5805fd 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -827,8 +827,8 @@ create_event_attributes(struct drm_i915_private *i915) const char *name; const char *unit; } events[] = { - __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "MHz"), - __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "MHz"), + __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "M"), + __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "M"), __event(I915_PMU_INTERRUPTS, "interrupts", NULL), __event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"), }; diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c index 3f502eef243166612321dd7cc219374f1daae43b..757b8ab799280c1c238200005970cb435d5102ef 100644 --- a/drivers/gpu/drm/i915/i915_query.c +++ b/drivers/gpu/drm/i915/i915_query.c @@ -47,7 +47,7 @@ static int query_topology_info(struct drm_i915_private *dev_priv, if (topo.flags != 0) return -EINVAL; - if (!access_ok(VERIFY_WRITE, u64_to_user_ptr(query_item->data_ptr), + if (!access_ok(u64_to_user_ptr(query_item->data_ptr), total_length)) return -EFAULT; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 9e63cd47b60f30a5b0f7682d2c3038dd81e2b3ce..830049985e56dfa475c236a47ee0f7baf0e1bbcd 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -32,7 +32,7 @@ * macros. Do **not** mass change existing definitions just to update the style. * * Layout - * '''''' + * ~~~~~~ * * Keep helper macros near the top. For example, _PIPE() and friends. * @@ -78,7 +78,7 @@ * style. Use lower case in hexadecimal values. * * Naming - * '''''' + * ~~~~~~ * * Try to name registers according to the specs. If the register name changes in * the specs from platform to another, stick to the original name. @@ -96,7 +96,7 @@ * suffix to the name. For example, ``_SKL`` or ``_GEN8``. * * Examples - * '''''''' + * ~~~~~~~~ * * (Note that the values in the example are indented using spaces instead of * TABs to avoid misalignment in generated documentation. Use TABs in the @@ -387,6 +387,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define ECOCHK_PPGTT_WT_HSW (0x2 << 3) #define ECOCHK_PPGTT_WB_HSW (0x3 << 3) +#define GEN8_RC6_CTX_INFO _MMIO(0x8504) + #define GAC_ECO_BITS _MMIO(0x14090) #define ECOBITS_SNB_BIT (1 << 13) #define ECOBITS_PPGTT_CACHE64B (3 << 8) @@ -471,6 +473,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) */ #define BCS_SWCTRL _MMIO(0x22200) +/* There are 16 GPR registers */ +#define BCS_GPR(n) _MMIO(0x22600 + (n) * 8) +#define BCS_GPR_UDW(n) _MMIO(0x22600 + (n) * 8 + 4) + #define GPGPU_THREADS_DISPATCHED _MMIO(0x2290) #define GPGPU_THREADS_DISPATCHED_UDW _MMIO(0x2290 + 4) #define HS_INVOCATION_COUNT _MMIO(0x2300) @@ -2097,8 +2103,12 @@ enum i915_power_well_id { /* ICL PHY DFLEX registers */ #define PORT_TX_DFLEXDPMLE1 _MMIO(0x1638C0) -#define DFLEXDPMLE1_DPMLETC_MASK(n) (0xf << (4 * (n))) -#define DFLEXDPMLE1_DPMLETC(n, x) ((x) << (4 * (n))) +#define DFLEXDPMLE1_DPMLETC_MASK(tc_port) (0xf << (4 * (tc_port))) +#define DFLEXDPMLE1_DPMLETC_ML0(tc_port) (1 << (4 * (tc_port))) +#define DFLEXDPMLE1_DPMLETC_ML1_0(tc_port) (3 << (4 * (tc_port))) +#define DFLEXDPMLE1_DPMLETC_ML3(tc_port) (8 << (4 * (tc_port))) +#define DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) (12 << (4 * (tc_port))) +#define DFLEXDPMLE1_DPMLETC_ML3_0(tc_port) (15 << (4 * (tc_port))) /* BXT PHY Ref registers */ #define _PORT_REF_DW3_A 0x16218C @@ -2421,6 +2431,12 @@ enum i915_power_well_id { #define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1 << 28) #define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1 << 24) +#define GEN8_RTCR _MMIO(0x4260) +#define GEN8_M1TCR _MMIO(0x4264) +#define GEN8_M2TCR _MMIO(0x4268) +#define GEN8_BTCR _MMIO(0x426c) +#define GEN8_VTCR _MMIO(0x4270) + #if 0 #define PRB0_TAIL _MMIO(0x2030) #define PRB0_HEAD _MMIO(0x2034) @@ -6527,7 +6543,7 @@ enum { #define PLANE_CTL_YUV422_UYVY (1 << 16) #define PLANE_CTL_YUV422_YVYU (2 << 16) #define PLANE_CTL_YUV422_VYUY (3 << 16) -#define PLANE_CTL_DECOMPRESSION_ENABLE (1 << 15) +#define PLANE_CTL_RENDER_DECOMPRESSION_ENABLE (1 << 15) #define PLANE_CTL_TRICKLE_FEED_DISABLE (1 << 14) #define PLANE_CTL_PLANE_GAMMA_DISABLE (1 << 13) /* Pre-GLK */ #define PLANE_CTL_TILED_MASK (0x7 << 10) @@ -7001,6 +7017,10 @@ enum { #define SKL_CSR_DC5_DC6_COUNT _MMIO(0x8002C) #define BXT_CSR_DC3_DC5_COUNT _MMIO(0x80038) +/* Display Internal Timeout Register */ +#define RM_TIMEOUT _MMIO(0x42060) +#define MMIO_TIMEOUT_US(us) ((us) << 0) + /* interrupts */ #define DE_MASTER_IRQ_CONTROL (1 << 31) #define DE_SPRITEB_FLIP_DONE (1 << 29) diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c index 869cf4a3b6de75fee593c0f66c953cc1035434a6..a6cb3e034dd5a72bfb388c7e414ba6005f97e507 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.c +++ b/drivers/gpu/drm/i915/i915_vgpu.c @@ -100,6 +100,9 @@ static struct _balloon_info_ bl_info; static void vgt_deballoon_space(struct i915_ggtt *ggtt, struct drm_mm_node *node) { + if (!drm_mm_node_allocated(node)) + return; + DRM_DEBUG_DRIVER("deballoon space: range [0x%llx - 0x%llx] %llu KiB.\n", node->start, node->start + node->size, diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 98358b4b36dea7e13177bdf38554ffaad4f994e9..9aceacc43f4b749f5eb0f22786cbd9b4483e0a0f 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -335,6 +335,10 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, return ret; vma->flags |= bind_flags; + + if (vma->obj) + set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags); + return 0; } diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 769f3f5866611174cbabeca5e4d1fbb0711b9b86..ee3ca2de983b96ea52ffda2c794963f2b23d705d 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -144,6 +144,9 @@ static const struct { /* HDMI N/CTS table */ #define TMDS_297M 297000 #define TMDS_296M 296703 +#define TMDS_594M 594000 +#define TMDS_593M 593407 + static const struct { int sample_rate; int clock; @@ -164,6 +167,20 @@ static const struct { { 176400, TMDS_297M, 18816, 247500 }, { 192000, TMDS_296M, 23296, 281250 }, { 192000, TMDS_297M, 20480, 247500 }, + { 44100, TMDS_593M, 8918, 937500 }, + { 44100, TMDS_594M, 9408, 990000 }, + { 48000, TMDS_593M, 5824, 562500 }, + { 48000, TMDS_594M, 6144, 594000 }, + { 32000, TMDS_593M, 5824, 843750 }, + { 32000, TMDS_594M, 3072, 445500 }, + { 88200, TMDS_593M, 17836, 937500 }, + { 88200, TMDS_594M, 18816, 990000 }, + { 96000, TMDS_593M, 11648, 562500 }, + { 96000, TMDS_594M, 12288, 594000 }, + { 176400, TMDS_593M, 35672, 937500 }, + { 176400, TMDS_594M, 37632, 990000 }, + { 192000, TMDS_593M, 23296, 562500 }, + { 192000, TMDS_594M, 24576, 594000 }, }; /* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */ diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c index 29075c763428055ddb3625a80b59643e694f3d76..7b4906ede148b425b663c894573d1fc613b36f15 100644 --- a/drivers/gpu/drm/i915/intel_cdclk.c +++ b/drivers/gpu/drm/i915/intel_cdclk.c @@ -2208,6 +2208,17 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) if (INTEL_GEN(dev_priv) >= 9) min_cdclk = max(2 * 96000, min_cdclk); + /* + * "For DP audio configuration, cdclk frequency shall be set to + * meet the following requirements: + * DP Link Frequency(MHz) | Cdclk frequency(MHz) + * 270 | 320 or higher + * 162 | 200 or higher" + */ + if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && + intel_crtc_has_dp_encoder(crtc_state) && crtc_state->has_audio) + min_cdclk = max(crtc_state->port_clock, min_cdclk); + /* * On Valleyview some DSI panels lose (v|h)sync when the clock is lower * than 320000KHz. diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index cf9b600cca79f22adbe01ecf05fede19c5560392..ca1a578d790d31ff1c55213d8db661f2d5d7f0ea 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -282,10 +282,17 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, uint32_t i; uint32_t *dmc_payload; uint32_t required_version; + size_t fsize; if (!fw) return NULL; + fsize = sizeof(struct intel_css_header) + + sizeof(struct intel_package_header) + + sizeof(struct intel_dmc_header); + if (fsize > fw->size) + goto error_truncated; + /* Extract CSS Header information*/ css_header = (struct intel_css_header *)fw->data; if (sizeof(struct intel_css_header) != @@ -360,6 +367,9 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, return NULL; } readcount += dmc_offset; + fsize += dmc_offset; + if (fsize > fw->size) + goto error_truncated; /* Extract dmc_header information. */ dmc_header = (struct intel_dmc_header *)&fw->data[readcount]; @@ -391,6 +401,10 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, /* fw_size is in dwords, so multiplied by 4 to convert into bytes. */ nbytes = dmc_header->fw_size * 4; + fsize += nbytes; + if (fsize > fw->size) + goto error_truncated; + if (nbytes > CSR_MAX_FW_SIZE) { DRM_ERROR("DMC firmware too big (%u bytes)\n", nbytes); return NULL; @@ -404,6 +418,10 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, } return memcpy(dmc_payload, &fw->data[readcount], nbytes); + +error_truncated: + DRM_ERROR("Truncated DMC firmware, rejecting.\n"); + return NULL; } static void csr_load_work_fn(struct work_struct *work) diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index c9af34861d9e3a5bf3c090f4d5cf7afafed83dfa..b4b1f9ca05b68307be26172b17b5d0b4d85c8820 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1085,7 +1085,7 @@ static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder, return DDI_CLK_SEL_TBT_810; default: MISSING_CASE(clock); - break; + return DDI_CLK_SEL_NONE; } case DPLL_ID_ICL_MGPLL1: case DPLL_ID_ICL_MGPLL2: diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 0ef0c6448d53a835fbdf5319a8010c64d613bd0f..01fa98299bae65a125862e57c307cdbce07c3d32 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -474,7 +474,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv) u8 eu_disabled_mask; u32 n_disabled; - if (!(sseu->subslice_mask[ss] & BIT(ss))) + if (!(sseu->subslice_mask[s] & BIT(ss))) /* skip disabled subslice */ continue; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index d2951096bca0d48caaf10abf1827ef81152abdc8..6902fd2da19ca43eb8337774d79f732a24723716 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2712,6 +2712,17 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc, if (size_aligned * 2 > dev_priv->stolen_usable_size) return false; + switch (fb->modifier) { + case DRM_FORMAT_MOD_LINEAR: + case I915_FORMAT_MOD_X_TILED: + case I915_FORMAT_MOD_Y_TILED: + break; + default: + DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n", + fb->modifier); + return false; + } + mutex_lock(&dev->struct_mutex); obj = i915_gem_object_create_stolen_for_preallocated(dev_priv, base_aligned, @@ -2721,8 +2732,17 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc, if (!obj) return false; - if (plane_config->tiling == I915_TILING_X) - obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X; + switch (plane_config->tiling) { + case I915_TILING_NONE: + break; + case I915_TILING_X: + case I915_TILING_Y: + obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling; + break; + default: + MISSING_CASE(plane_config->tiling); + return false; + } mode_cmd.pixel_format = fb->format->format; mode_cmd.width = fb->width; @@ -2754,20 +2774,33 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state, plane_state->base.visible = visible; - /* FIXME pre-g4x don't work like this */ - if (visible) { + if (visible) crtc_state->base.plane_mask |= drm_plane_mask(&plane->base); - crtc_state->active_planes |= BIT(plane->id); - } else { + else crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base); - crtc_state->active_planes &= ~BIT(plane->id); - } DRM_DEBUG_KMS("%s active planes 0x%x\n", crtc_state->base.crtc->name, crtc_state->active_planes); } +static void fixup_active_planes(struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_plane *plane; + + /* + * Active_planes aliases if multiple "primary" or cursor planes + * have been used on the same (or wrong) pipe. plane_mask uses + * unique ids, hence we can use that to reconstruct active_planes. + */ + crtc_state->active_planes = 0; + + drm_for_each_plane_mask(plane, &dev_priv->drm, + crtc_state->base.plane_mask) + crtc_state->active_planes |= BIT(to_intel_plane(plane)->id); +} + static void intel_plane_disable_noatomic(struct intel_crtc *crtc, struct intel_plane *plane) { @@ -2777,6 +2810,7 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc, to_intel_plane_state(plane->base.state); intel_set_plane_visible(crtc_state, plane_state, false); + fixup_active_planes(crtc_state); if (plane->id == PLANE_PRIMARY) intel_pre_disable_primary_noatomic(&crtc->base); @@ -2795,7 +2829,6 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, struct drm_i915_gem_object *obj; struct drm_plane *primary = intel_crtc->base.primary; struct drm_plane_state *plane_state = primary->state; - struct drm_crtc_state *crtc_state = intel_crtc->base.state; struct intel_plane *intel_plane = to_intel_plane(primary); struct intel_plane_state *intel_state = to_intel_plane_state(plane_state); @@ -2885,10 +2918,6 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, plane_state->fb = fb; plane_state->crtc = &intel_crtc->base; - intel_set_plane_visible(to_intel_crtc_state(crtc_state), - to_intel_plane_state(plane_state), - true); - atomic_or(to_intel_plane(primary)->frontbuffer_bit, &obj->frontbuffer_bits); } @@ -3552,11 +3581,11 @@ static u32 skl_plane_ctl_tiling(uint64_t fb_modifier) case I915_FORMAT_MOD_Y_TILED: return PLANE_CTL_TILED_Y; case I915_FORMAT_MOD_Y_TILED_CCS: - return PLANE_CTL_TILED_Y | PLANE_CTL_DECOMPRESSION_ENABLE; + return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; case I915_FORMAT_MOD_Yf_TILED: return PLANE_CTL_TILED_YF; case I915_FORMAT_MOD_Yf_TILED_CCS: - return PLANE_CTL_TILED_YF | PLANE_CTL_DECOMPRESSION_ENABLE; + return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; default: MISSING_CASE(fb_modifier); } @@ -8803,13 +8832,14 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, fb->modifier = I915_FORMAT_MOD_X_TILED; break; case PLANE_CTL_TILED_Y: - if (val & PLANE_CTL_DECOMPRESSION_ENABLE) + plane_config->tiling = I915_TILING_Y; + if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE) fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS; else fb->modifier = I915_FORMAT_MOD_Y_TILED; break; case PLANE_CTL_TILED_YF: - if (val & PLANE_CTL_DECOMPRESSION_ENABLE) + if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE) fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS; else fb->modifier = I915_FORMAT_MOD_Yf_TILED; @@ -12630,17 +12660,12 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) intel_check_cpu_fifo_underruns(dev_priv); intel_check_pch_fifo_underruns(dev_priv); - if (!new_crtc_state->active) { - /* - * Make sure we don't call initial_watermarks - * for ILK-style watermark updates. - * - * No clue what this is supposed to achieve. - */ - if (INTEL_GEN(dev_priv) >= 9) - dev_priv->display.initial_watermarks(intel_state, - to_intel_crtc_state(new_crtc_state)); - } + /* FIXME unify this for all platforms */ + if (!new_crtc_state->active && + !HAS_GMCH_DISPLAY(dev_priv) && + dev_priv->display.initial_watermarks) + dev_priv->display.initial_watermarks(intel_state, + to_intel_crtc_state(new_crtc_state)); } } @@ -14573,7 +14598,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, fb->height < SKL_MIN_YUV_420_SRC_H || (fb->width % 4) != 0 || (fb->height % 4) != 0)) { DRM_DEBUG_KMS("src dimensions not correct for NV12\n"); - return -EINVAL; + goto err; } for (i = 0; i < fb->format->num_planes; i++) { @@ -15365,17 +15390,6 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) POSTING_READ(DPLL(pipe)); } -static bool intel_plane_mapping_ok(struct intel_crtc *crtc, - struct intel_plane *plane) -{ - enum pipe pipe; - - if (!plane->get_hw_state(plane, &pipe)) - return true; - - return pipe == crtc->pipe; -} - static void intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv) { @@ -15387,13 +15401,20 @@ intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv) for_each_intel_crtc(&dev_priv->drm, crtc) { struct intel_plane *plane = to_intel_plane(crtc->base.primary); + struct intel_crtc *plane_crtc; + enum pipe pipe; + + if (!plane->get_hw_state(plane, &pipe)) + continue; - if (intel_plane_mapping_ok(crtc, plane)) + if (pipe == crtc->pipe) continue; DRM_DEBUG_KMS("%s attached to the wrong pipe, disabling plane\n", plane->base.name); - intel_plane_disable_noatomic(crtc, plane); + + plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + intel_plane_disable_noatomic(plane_crtc, plane); } } @@ -15441,13 +15462,9 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); } - /* restore vblank interrupts to correct state */ - drm_crtc_vblank_reset(&crtc->base); if (crtc->active) { struct intel_plane *plane; - drm_crtc_vblank_on(&crtc->base); - /* Disable everything but the primary plane */ for_each_intel_plane_on_crtc(dev, crtc, plane) { const struct intel_plane_state *plane_state = @@ -15565,23 +15582,32 @@ void i915_redisable_vga(struct drm_i915_private *dev_priv) } /* FIXME read out full plane state for all planes */ -static void readout_plane_state(struct intel_crtc *crtc) +static void readout_plane_state(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); struct intel_plane *plane; + struct intel_crtc *crtc; - for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { + for_each_intel_plane(&dev_priv->drm, plane) { struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); - enum pipe pipe; + struct intel_crtc_state *crtc_state; + enum pipe pipe = PIPE_A; bool visible; visible = plane->get_hw_state(plane, &pipe); + crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + crtc_state = to_intel_crtc_state(crtc->base.state); + intel_set_plane_visible(crtc_state, plane_state, visible); } + + for_each_intel_crtc(&dev_priv->drm, crtc) { + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + + fixup_active_planes(crtc_state); + } } static void intel_modeset_readout_hw_state(struct drm_device *dev) @@ -15613,13 +15639,13 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) if (crtc_state->base.active) dev_priv->active_crtcs |= 1 << crtc->pipe; - readout_plane_state(crtc); - DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n", crtc->base.base.id, crtc->base.name, enableddisabled(crtc_state->base.active)); } + readout_plane_state(dev_priv); + for (i = 0; i < dev_priv->num_shared_dpll; i++) { struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; @@ -15789,7 +15815,6 @@ intel_modeset_setup_hw_state(struct drm_device *dev, struct drm_modeset_acquire_ctx *ctx) { struct drm_i915_private *dev_priv = to_i915(dev); - enum pipe pipe; struct intel_crtc *crtc; struct intel_encoder *encoder; int i; @@ -15800,15 +15825,23 @@ intel_modeset_setup_hw_state(struct drm_device *dev, /* HW state is read out, now we need to sanitize this mess. */ get_encoder_power_domains(dev_priv); - intel_sanitize_plane_mapping(dev_priv); + /* + * intel_sanitize_plane_mapping() may need to do vblank + * waits, so we need vblank interrupts restored beforehand. + */ + for_each_intel_crtc(&dev_priv->drm, crtc) { + drm_crtc_vblank_reset(&crtc->base); - for_each_intel_encoder(dev, encoder) { - intel_sanitize_encoder(encoder); + if (crtc->active) + drm_crtc_vblank_on(&crtc->base); } - for_each_pipe(dev_priv, pipe) { - crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + intel_sanitize_plane_mapping(dev_priv); + for_each_intel_encoder(dev, encoder) + intel_sanitize_encoder(encoder); + + for_each_intel_crtc(&dev_priv->drm, crtc) { intel_sanitize_crtc(crtc, ctx); intel_dump_pipe_config(crtc, crtc->config, "[setup_hw_state]"); @@ -15939,8 +15972,6 @@ void intel_modeset_cleanup(struct drm_device *dev) flush_work(&dev_priv->atomic_helper.free_work); WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list)); - intel_disable_gt_powersave(dev_priv); - /* * Interrupts and polling as the first thing to avoid creating havoc. * Too much stuff here (turning of connectors, ...) would @@ -15968,8 +15999,6 @@ void intel_modeset_cleanup(struct drm_device *dev) intel_cleanup_overlay(dev_priv); - intel_cleanup_gt_powersave(dev_priv); - intel_teardown_gmbus(dev_priv); destroy_workqueue(dev_priv->modeset_wq); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 1193202766a2cb86721cc8aa918c5f692c822c7e..20cd4c8acecc31d22da45116738a82259971cfa6 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -401,6 +401,22 @@ static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, return true; } +static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, + int link_rate, + uint8_t lane_count) +{ + const struct drm_display_mode *fixed_mode = + intel_dp->attached_connector->panel.fixed_mode; + int mode_rate, max_rate; + + mode_rate = intel_dp_link_required(fixed_mode->clock, 18); + max_rate = intel_dp_max_data_rate(link_rate, lane_count); + if (mode_rate > max_rate) + return false; + + return true; +} + int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, int link_rate, uint8_t lane_count) { @@ -410,9 +426,23 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, intel_dp->num_common_rates, link_rate); if (index > 0) { + if (intel_dp_is_edp(intel_dp) && + !intel_dp_can_link_train_fallback_for_edp(intel_dp, + intel_dp->common_rates[index - 1], + lane_count)) { + DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n"); + return 0; + } intel_dp->max_link_rate = intel_dp->common_rates[index - 1]; intel_dp->max_link_lane_count = lane_count; } else if (lane_count > 1) { + if (intel_dp_is_edp(intel_dp) && + !intel_dp_can_link_train_fallback_for_edp(intel_dp, + intel_dp_max_common_rate(intel_dp), + lane_count >> 1)) { + DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n"); + return 0; + } intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); intel_dp->max_link_lane_count = lane_count >> 1; } else { @@ -4727,6 +4757,16 @@ intel_dp_long_pulse(struct intel_connector *connector, intel_dp_retrain_link(encoder, ctx); } + /* + * Some external monitors do not signal loss of link synchronization + * with an IRQ_HPD, so force a link status check. + */ + if (!intel_dp_is_edp(intel_dp)) { + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + + intel_dp_retrain_link(encoder, ctx); + } + /* * Clearing NACK and defer counts to get their exact values * while reading EDID which are required by Compliance tests diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c index 4da6e33c7fa1c9a06839fc3777f074e66309f096..329309a085cb7bd940f6782d2034ea9b6e1728e5 100644 --- a/drivers/gpu/drm/i915/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/intel_dp_link_training.c @@ -352,22 +352,14 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) return; failure_handling: - /* Dont fallback and prune modes if its eDP */ - if (!intel_dp_is_edp(intel_dp)) { - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d", - intel_connector->base.base.id, - intel_connector->base.name, - intel_dp->link_rate, intel_dp->lane_count); - if (!intel_dp_get_link_train_fallback_values(intel_dp, - intel_dp->link_rate, - intel_dp->lane_count)) - /* Schedule a Hotplug Uevent to userspace to start modeset */ - schedule_work(&intel_connector->modeset_retry_work); - } else { - DRM_ERROR("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d", - intel_connector->base.base.id, - intel_connector->base.name, - intel_dp->link_rate, intel_dp->lane_count); - } + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d", + intel_connector->base.base.id, + intel_connector->base.name, + intel_dp->link_rate, intel_dp->lane_count); + if (!intel_dp_get_link_train_fallback_values(intel_dp, + intel_dp->link_rate, + intel_dp->lane_count)) + /* Schedule a Hotplug Uevent to userspace to start modeset */ + schedule_work(&intel_connector->modeset_retry_work); return; } diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 4ecd65375603391ee02f6a11831feecdae397b0b..58ba14966d4f1128ec396da03db01370033f6212 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -38,11 +38,11 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); struct intel_digital_port *intel_dig_port = intel_mst->primary; struct intel_dp *intel_dp = &intel_dig_port->dp; - struct intel_connector *connector = - to_intel_connector(conn_state->connector); + struct drm_connector *connector = conn_state->connector; + void *port = to_intel_connector(connector)->port; struct drm_atomic_state *state = pipe_config->base.state; int bpp; - int lane_count, slots; + int lane_count, slots = 0; const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; int mst_pbn; bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc, @@ -70,17 +70,23 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, pipe_config->port_clock = intel_dp_max_link_rate(intel_dp); - if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, connector->port)) + if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, port)) pipe_config->has_audio = true; mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp); pipe_config->pbn = mst_pbn; - slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr, - connector->port, mst_pbn); - if (slots < 0) { - DRM_DEBUG_KMS("failed finding vcpi slots:%d\n", slots); - return false; + /* Zombie connectors can't have VCPI slots */ + if (READ_ONCE(connector->registered)) { + slots = drm_dp_atomic_find_vcpi_slots(state, + &intel_dp->mst_mgr, + port, + mst_pbn); + if (slots < 0) { + DRM_DEBUG_KMS("failed finding vcpi slots:%d\n", + slots); + return false; + } } intel_link_compute_m_n(bpp, lane_count, @@ -311,9 +317,8 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector) struct edid *edid; int ret; - if (!intel_dp) { + if (!READ_ONCE(connector->registered)) return intel_connector_update_modes(connector, NULL); - } edid = drm_dp_mst_get_edid(connector, &intel_dp->mst_mgr, intel_connector->port); ret = intel_connector_update_modes(connector, edid); @@ -328,9 +333,10 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force) struct intel_connector *intel_connector = to_intel_connector(connector); struct intel_dp *intel_dp = intel_connector->mst_port; - if (!intel_dp) + if (!READ_ONCE(connector->registered)) return connector_status_disconnected; - return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr, intel_connector->port); + return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr, + intel_connector->port); } static void @@ -370,7 +376,7 @@ intel_dp_mst_mode_valid(struct drm_connector *connector, int bpp = 24; /* MST uses fixed bpp */ int max_rate, mode_rate, max_lanes, max_link_clock; - if (!intel_dp) + if (!READ_ONCE(connector->registered)) return MODE_ERROR; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) @@ -402,8 +408,6 @@ static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *c struct intel_dp *intel_dp = intel_connector->mst_port; struct intel_crtc *crtc = to_intel_crtc(state->crtc); - if (!intel_dp) - return NULL; return &intel_dp->mst_encoders[crtc->pipe]->base.base; } @@ -452,6 +456,10 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo if (!intel_connector) return NULL; + intel_connector->get_hw_state = intel_dp_mst_get_hw_state; + intel_connector->mst_port = intel_dp; + intel_connector->port = port; + connector = &intel_connector->base; ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort); @@ -462,10 +470,6 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs); - intel_connector->get_hw_state = intel_dp_mst_get_hw_state; - intel_connector->mst_port = intel_dp; - intel_connector->port = port; - for_each_pipe(dev_priv, pipe) { struct drm_encoder *enc = &intel_dp->mst_encoders[pipe]->base.base; @@ -503,7 +507,6 @@ static void intel_dp_register_mst_connector(struct drm_connector *connector) static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_connector *connector) { - struct intel_connector *intel_connector = to_intel_connector(connector); struct drm_i915_private *dev_priv = to_i915(connector->dev); DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); @@ -512,10 +515,6 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, if (dev_priv->fbdev) drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, connector); - /* prevent race with the check in ->detect */ - drm_modeset_lock(&connector->dev->mode_config.connection_mutex, NULL); - intel_connector->mst_port = NULL; - drm_modeset_unlock(&connector->dev->mode_config.connection_mutex); drm_connector_put(connector); } diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 8fc61e96754ff540626ce6421a275408359be6e4..b1154d80356428ecb9b9b591935846fdfdae8c91 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -209,6 +209,16 @@ struct intel_fbdev { unsigned long vma_flags; async_cookie_t cookie; int preferred_bpp; + + /* Whether or not fbdev hpd processing is temporarily suspended */ + bool hpd_suspended : 1; + /* Set when a hotplug was received while HPD processing was + * suspended + */ + bool hpd_waiting : 1; + + /* Protects hpd_suspended */ + struct mutex hpd_lock; }; struct intel_encoder { @@ -2054,6 +2064,9 @@ void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv); void intel_enable_gt_powersave(struct drm_i915_private *dev_priv); void intel_disable_gt_powersave(struct drm_i915_private *dev_priv); void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv); +bool i915_rc6_ctx_wa_check(struct drm_i915_private *i915); +void i915_rc6_ctx_wa_suspend(struct drm_i915_private *i915); +void i915_rc6_ctx_wa_resume(struct drm_i915_private *i915); void gen6_rps_busy(struct drm_i915_private *dev_priv); void gen6_rps_reset_ei(struct drm_i915_private *dev_priv); void gen6_rps_idle(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index 01d1d2088f0488d1a8a8de4219c0c6307e0916ea..728a20e1f638c75d513601140111041832e5d468 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c @@ -1267,6 +1267,10 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv) if (!HAS_FBC(dev_priv)) return 0; + /* https://bugs.freedesktop.org/show_bug.cgi?id=108085 */ + if (IS_GEMINILAKE(dev_priv)) + return 0; + if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) return 1; diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index fb2f9fce34cd2a3e627d183d2bca1028d7f48282..2d6506c08bf720bab19c174e3a938ab8951a818d 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -677,6 +677,7 @@ int intel_fbdev_init(struct drm_device *dev) if (ifbdev == NULL) return -ENOMEM; + mutex_init(&ifbdev->hpd_lock); drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs); if (!intel_fbdev_init_bios(dev, ifbdev)) @@ -750,6 +751,26 @@ void intel_fbdev_fini(struct drm_i915_private *dev_priv) intel_fbdev_destroy(ifbdev); } +/* Suspends/resumes fbdev processing of incoming HPD events. When resuming HPD + * processing, fbdev will perform a full connector reprobe if a hotplug event + * was received while HPD was suspended. + */ +static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state) +{ + bool send_hpd = false; + + mutex_lock(&ifbdev->hpd_lock); + ifbdev->hpd_suspended = state == FBINFO_STATE_SUSPENDED; + send_hpd = !ifbdev->hpd_suspended && ifbdev->hpd_waiting; + ifbdev->hpd_waiting = false; + mutex_unlock(&ifbdev->hpd_lock); + + if (send_hpd) { + DRM_DEBUG_KMS("Handling delayed fbcon HPD event\n"); + drm_fb_helper_hotplug_event(&ifbdev->helper); + } +} + void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous) { struct drm_i915_private *dev_priv = to_i915(dev); @@ -771,6 +792,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous */ if (state != FBINFO_STATE_RUNNING) flush_work(&dev_priv->fbdev_suspend_work); + console_lock(); } else { /* @@ -798,17 +820,26 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous drm_fb_helper_set_suspend(&ifbdev->helper, state); console_unlock(); + + intel_fbdev_hpd_set_suspend(ifbdev, state); } void intel_fbdev_output_poll_changed(struct drm_device *dev) { struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; + bool send_hpd; if (!ifbdev) return; intel_fbdev_sync(ifbdev); - if (ifbdev->vma || ifbdev->helper.deferred_setup) + + mutex_lock(&ifbdev->hpd_lock); + send_hpd = !ifbdev->hpd_suspended; + ifbdev->hpd_waiting = true; + mutex_unlock(&ifbdev->hpd_lock); + + if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup)) drm_fb_helper_hotplug_event(&ifbdev->helper); } diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index 648a13c6043c0071ddd495424691d795b39b96a1..9a801813023728e2e0a05bb5feba1f8415eb3269 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c @@ -228,7 +228,9 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) drm_for_each_connector_iter(connector, &conn_iter) { struct intel_connector *intel_connector = to_intel_connector(connector); - if (intel_connector->encoder->hpd_pin == pin) { + /* Don't check MST ports, they don't have pins */ + if (!intel_connector->mst_port && + intel_connector->encoder->hpd_pin == pin) { if (connector->polled != intel_connector->polled) DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", connector->name); @@ -395,37 +397,54 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, struct intel_encoder *encoder; bool storm_detected = false; bool queue_dig = false, queue_hp = false; + u32 long_hpd_pulse_mask = 0; + u32 short_hpd_pulse_mask = 0; + enum hpd_pin pin; if (!pin_mask) return; spin_lock(&dev_priv->irq_lock); + + /* + * Determine whether ->hpd_pulse() exists for each pin, and + * whether we have a short or a long pulse. This is needed + * as each pin may have up to two encoders (HDMI and DP) and + * only the one of them (DP) will have ->hpd_pulse(). + */ for_each_intel_encoder(&dev_priv->drm, encoder) { - enum hpd_pin pin = encoder->hpd_pin; bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder); + enum port port = encoder->port; + bool long_hpd; + pin = encoder->hpd_pin; if (!(BIT(pin) & pin_mask)) continue; - if (has_hpd_pulse) { - bool long_hpd = long_mask & BIT(pin); - enum port port = encoder->port; + if (!has_hpd_pulse) + continue; - DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port), - long_hpd ? "long" : "short"); - /* - * For long HPD pulses we want to have the digital queue happen, - * but we still want HPD storm detection to function. - */ - queue_dig = true; - if (long_hpd) { - dev_priv->hotplug.long_port_mask |= (1 << port); - } else { - /* for short HPD just trigger the digital queue */ - dev_priv->hotplug.short_port_mask |= (1 << port); - continue; - } + long_hpd = long_mask & BIT(pin); + + DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port), + long_hpd ? "long" : "short"); + queue_dig = true; + + if (long_hpd) { + long_hpd_pulse_mask |= BIT(pin); + dev_priv->hotplug.long_port_mask |= BIT(port); + } else { + short_hpd_pulse_mask |= BIT(pin); + dev_priv->hotplug.short_port_mask |= BIT(port); } + } + + /* Now process each pin just once */ + for_each_hpd_pin(pin) { + bool long_hpd; + + if (!(BIT(pin) & pin_mask)) + continue; if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) { /* @@ -442,11 +461,22 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED) continue; - if (!has_hpd_pulse) { + /* + * Delegate to ->hpd_pulse() if one of the encoders for this + * pin has it, otherwise let the hotplug_work deal with this + * pin directly. + */ + if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) { + long_hpd = long_hpd_pulse_mask & BIT(pin); + } else { dev_priv->hotplug.event_bits |= BIT(pin); + long_hpd = true; queue_hp = true; } + if (!long_hpd) + continue; + if (intel_hpd_irq_storm_detect(dev_priv, pin)) { dev_priv->hotplug.event_bits &= ~BIT(pin); storm_detected = true; diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c index cdf19553ffacd28f1097bb2096b8cc35d4654b84..5d5336fbe7b05836b7bedc28bffbfef9e6b08b4f 100644 --- a/drivers/gpu/drm/i915/intel_lpe_audio.c +++ b/drivers/gpu/drm/i915/intel_lpe_audio.c @@ -297,8 +297,10 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv) lpe_audio_platdev_destroy(dev_priv); irq_free_desc(dev_priv->lpe_audio.irq); -} + dev_priv->lpe_audio.irq = -1; + dev_priv->lpe_audio.platdev = NULL; +} /** * intel_lpe_audio_notify() - notify lpe audio event diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 174479232e94312bc232a7792ded191e960fee49..13e97faabaa74d808ede2f59b69a8bd4247111c9 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -424,7 +424,8 @@ static u64 execlists_update_context(struct i915_request *rq) reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail); - /* True 32b PPGTT with dynamic page allocation: update PDP + /* + * True 32b PPGTT with dynamic page allocation: update PDP * registers and point the unallocated PDPs to scratch page. * PML4 is allocated during ppgtt init, so this is not needed * in 48-bit mode. @@ -432,6 +433,22 @@ static u64 execlists_update_context(struct i915_request *rq) if (ppgtt && !i915_vm_is_48bit(&ppgtt->vm)) execlists_update_context_pdps(ppgtt, reg_state); + /* + * Make sure the context image is complete before we submit it to HW. + * + * Ostensibly, writes (including the WCB) should be flushed prior to + * an uncached write such as our mmio register access, the empirical + * evidence (esp. on Braswell) suggests that the WC write into memory + * may not be visible to the HW prior to the completion of the UC + * register write and that we may begin execution from the context + * before its image is complete leading to invalid PD chasing. + * + * Furthermore, Braswell, at least, wants a full mb to be sure that + * the writes are coherent in memory (visible to the GPU) prior to + * execution, and not just visible to other CPUs (as is the result of + * wmb). + */ + mb(); return ce->lrc_desc; } @@ -1545,6 +1562,15 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */ batch = gen8_emit_flush_coherentl3_wa(engine, batch); + /* WaClearSlmSpaceAtContextSwitch:skl,bxt,kbl,glk,cfl */ + batch = gen8_emit_pipe_control(batch, + PIPE_CONTROL_FLUSH_L3 | + PIPE_CONTROL_GLOBAL_GTT_IVB | + PIPE_CONTROL_CS_STALL | + PIPE_CONTROL_QW_WRITE, + i915_ggtt_offset(engine->scratch) + + 2 * CACHELINE_BYTES); + batch = emit_lri(batch, lri, ARRAY_SIZE(lri)); /* WaClearSlmSpaceAtContextSwitch:kbl */ diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 43ae9de12ba3eb821c12d63e9c7b1ec923eace48..8d731eb1de69cb1177a6564d324e4a5d56d9ee75 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -114,6 +114,14 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv) */ I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) | PWM1_GATING_DIS | PWM2_GATING_DIS); + + /* + * Lower the display internal timeout. + * This is needed to avoid any hard hangs when DSI port PLL + * is off and a MMIO access is attempted by any privilege + * application, using batch buffers or any other means. + */ + I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950)); } static void glk_init_clock_gating(struct drm_i915_private *dev_priv) @@ -2492,6 +2500,9 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate, uint32_t method1, method2; int cpp; + if (mem_value == 0) + return U32_MAX; + if (!intel_wm_plane_visible(cstate, pstate)) return 0; @@ -2521,6 +2532,9 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate, uint32_t method1, method2; int cpp; + if (mem_value == 0) + return U32_MAX; + if (!intel_wm_plane_visible(cstate, pstate)) return 0; @@ -2544,6 +2558,9 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate, { int cpp; + if (mem_value == 0) + return U32_MAX; + if (!intel_wm_plane_visible(cstate, pstate)) return 0; @@ -2942,8 +2959,8 @@ static void intel_print_wm_latency(struct drm_i915_private *dev_priv, unsigned int latency = wm[level]; if (latency == 0) { - DRM_ERROR("%s WM%d latency not provided\n", - name, level); + DRM_DEBUG_KMS("%s WM%d latency not provided\n", + name, level); continue; } @@ -2998,6 +3015,34 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv) intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); } +static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv) +{ + /* + * On some SNB machines (Thinkpad X220 Tablet at least) + * LP3 usage can cause vblank interrupts to be lost. + * The DEIIR bit will go high but it looks like the CPU + * never gets interrupted. + * + * It's not clear whether other interrupt source could + * be affected or if this is somehow limited to vblank + * interrupts only. To play it safe we disable LP3 + * watermarks entirely. + */ + if (dev_priv->wm.pri_latency[3] == 0 && + dev_priv->wm.spr_latency[3] == 0 && + dev_priv->wm.cur_latency[3] == 0) + return; + + dev_priv->wm.pri_latency[3] = 0; + dev_priv->wm.spr_latency[3] = 0; + dev_priv->wm.cur_latency[3] = 0; + + DRM_DEBUG_KMS("LP3 watermarks disabled due to potential for lost interrupts\n"); + intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency); + intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); + intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); +} + static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) { intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency); @@ -3014,8 +3059,10 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); - if (IS_GEN6(dev_priv)) + if (IS_GEN6(dev_priv)) { snb_wm_latency_quirk(dev_priv); + snb_wm_lp3_irq_quirk(dev_priv); + } } static void skl_setup_wm_latency(struct drm_i915_private *dev_priv) @@ -8149,6 +8196,95 @@ static void intel_init_emon(struct drm_i915_private *dev_priv) dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK); } +static bool i915_rc6_ctx_corrupted(struct drm_i915_private *dev_priv) +{ + return !I915_READ(GEN8_RC6_CTX_INFO); +} + +static void i915_rc6_ctx_wa_init(struct drm_i915_private *i915) +{ + if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915)) + return; + + if (i915_rc6_ctx_corrupted(i915)) { + DRM_INFO("RC6 context corrupted, disabling runtime power management\n"); + i915->gt_pm.rc6.ctx_corrupted = true; + intel_runtime_pm_get(i915); + } +} + +static void i915_rc6_ctx_wa_cleanup(struct drm_i915_private *i915) +{ + if (i915->gt_pm.rc6.ctx_corrupted) { + intel_runtime_pm_put(i915); + i915->gt_pm.rc6.ctx_corrupted = false; + } +} + +/** + * i915_rc6_ctx_wa_suspend - system suspend sequence for the RC6 CTX WA + * @i915: i915 device + * + * Perform any steps needed to clean up the RC6 CTX WA before system suspend. + */ +void i915_rc6_ctx_wa_suspend(struct drm_i915_private *i915) +{ + if (i915->gt_pm.rc6.ctx_corrupted) + intel_runtime_pm_put(i915); +} + +/** + * i915_rc6_ctx_wa_resume - system resume sequence for the RC6 CTX WA + * @i915: i915 device + * + * Perform any steps needed to re-init the RC6 CTX WA after system resume. + */ +void i915_rc6_ctx_wa_resume(struct drm_i915_private *i915) +{ + if (!i915->gt_pm.rc6.ctx_corrupted) + return; + + if (i915_rc6_ctx_corrupted(i915)) { + intel_runtime_pm_get(i915); + return; + } + + DRM_INFO("RC6 context restored, re-enabling runtime power management\n"); + i915->gt_pm.rc6.ctx_corrupted = false; +} + +static void intel_disable_rc6(struct drm_i915_private *dev_priv); + +/** + * i915_rc6_ctx_wa_check - check for a new RC6 CTX corruption + * @i915: i915 device + * + * Check if an RC6 CTX corruption has happened since the last check and if so + * disable RC6 and runtime power management. + * + * Return false if no context corruption has happened since the last call of + * this function, true otherwise. +*/ +bool i915_rc6_ctx_wa_check(struct drm_i915_private *i915) +{ + if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915)) + return false; + + if (i915->gt_pm.rc6.ctx_corrupted) + return false; + + if (!i915_rc6_ctx_corrupted(i915)) + return false; + + DRM_NOTE("RC6 context corruption, disabling runtime power management\n"); + + intel_disable_rc6(i915); + i915->gt_pm.rc6.ctx_corrupted = true; + intel_runtime_pm_get_noresume(i915); + + return true; +} + void intel_init_gt_powersave(struct drm_i915_private *dev_priv) { struct intel_rps *rps = &dev_priv->gt_pm.rps; @@ -8164,6 +8300,8 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv) mutex_lock(&dev_priv->pcu_lock); + i915_rc6_ctx_wa_init(dev_priv); + /* Initialize RPS limits (for userspace) */ if (IS_CHERRYVIEW(dev_priv)) cherryview_init_gt_powersave(dev_priv); @@ -8210,6 +8348,8 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv) if (IS_VALLEYVIEW(dev_priv)) valleyview_cleanup_gt_powersave(dev_priv); + i915_rc6_ctx_wa_cleanup(dev_priv); + if (!HAS_RC6(dev_priv)) intel_runtime_pm_put(dev_priv); } @@ -8254,7 +8394,7 @@ static inline void intel_disable_llc_pstate(struct drm_i915_private *i915) i915->gt_pm.llc_pstate.enabled = false; } -static void intel_disable_rc6(struct drm_i915_private *dev_priv) +static void __intel_disable_rc6(struct drm_i915_private *dev_priv) { lockdep_assert_held(&dev_priv->pcu_lock); @@ -8273,6 +8413,13 @@ static void intel_disable_rc6(struct drm_i915_private *dev_priv) dev_priv->gt_pm.rc6.enabled = false; } +static void intel_disable_rc6(struct drm_i915_private *dev_priv) +{ + mutex_lock(&dev_priv->pcu_lock); + __intel_disable_rc6(dev_priv); + mutex_unlock(&dev_priv->pcu_lock); +} + static void intel_disable_rps(struct drm_i915_private *dev_priv) { lockdep_assert_held(&dev_priv->pcu_lock); @@ -8298,7 +8445,7 @@ void intel_disable_gt_powersave(struct drm_i915_private *dev_priv) { mutex_lock(&dev_priv->pcu_lock); - intel_disable_rc6(dev_priv); + __intel_disable_rc6(dev_priv); intel_disable_rps(dev_priv); if (HAS_LLC(dev_priv)) intel_disable_llc_pstate(dev_priv); @@ -8325,6 +8472,9 @@ static void intel_enable_rc6(struct drm_i915_private *dev_priv) if (dev_priv->gt_pm.rc6.enabled) return; + if (dev_priv->gt_pm.rc6.ctx_corrupted) + return; + if (IS_CHERRYVIEW(dev_priv)) cherryview_enable_rc6(dev_priv); else if (IS_VALLEYVIEW(dev_priv)) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 6a8f27d0a7429e6d3a01f46e9b768031680519b6..3b8218dd9bb1463cafb975b8e87afc4723ad9598 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -91,6 +91,7 @@ static int gen4_render_ring_flush(struct i915_request *rq, u32 mode) { u32 cmd, *cs; + int i; /* * read/write caches: @@ -127,12 +128,45 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode) cmd |= MI_INVALIDATE_ISP; } - cs = intel_ring_begin(rq, 2); + i = 2; + if (mode & EMIT_INVALIDATE) + i += 20; + + cs = intel_ring_begin(rq, i); if (IS_ERR(cs)) return PTR_ERR(cs); *cs++ = cmd; - *cs++ = MI_NOOP; + + /* + * A random delay to let the CS invalidate take effect? Without this + * delay, the GPU relocation path fails as the CS does not see + * the updated contents. Just as important, if we apply the flushes + * to the EMIT_FLUSH branch (i.e. immediately after the relocation + * write and before the invalidate on the next batch), the relocations + * still fail. This implies that is a delay following invalidation + * that is required to reset the caches as opposed to a delay to + * ensure the memory is written. + */ + if (mode & EMIT_INVALIDATE) { + *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; + *cs++ = i915_ggtt_offset(rq->engine->scratch) | + PIPE_CONTROL_GLOBAL_GTT; + *cs++ = 0; + *cs++ = 0; + + for (i = 0; i < 12; i++) + *cs++ = MI_FLUSH; + + *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; + *cs++ = i915_ggtt_offset(rq->engine->scratch) | + PIPE_CONTROL_GLOBAL_GTT; + *cs++ = 0; + *cs++ = 0; + } + + *cs++ = cmd; + intel_ring_advance(rq, cs); return 0; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index f5ffa6d31e82c3d19a4ceb8e1b0e78956d0eae3d..eaf1a161bc96c279a8949c46a1ed314b617a2e56 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -584,9 +584,10 @@ struct intel_engine_cs { struct intel_engine_hangcheck hangcheck; -#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0) -#define I915_ENGINE_SUPPORTS_STATS BIT(1) -#define I915_ENGINE_HAS_PREEMPTION BIT(2) +#define I915_ENGINE_USING_CMD_PARSER BIT(0) +#define I915_ENGINE_SUPPORTS_STATS BIT(1) +#define I915_ENGINE_HAS_PREEMPTION BIT(2) +#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(3) unsigned int flags; /* @@ -647,9 +648,15 @@ struct intel_engine_cs { }; static inline bool -intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine) +intel_engine_using_cmd_parser(const struct intel_engine_cs *engine) { - return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER; + return engine->flags & I915_ENGINE_USING_CMD_PARSER; +} + +static inline bool +intel_engine_requires_cmd_parser(const struct intel_engine_cs *engine) +{ + return engine->flags & I915_ENGINE_REQUIRES_CMD_PARSER; } static inline bool diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 812fe7b06f87389414c4c0cf0ae159de17fbc03d..1817a5c0c80fd13e28466d8b949410d8402aa2f4 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -925,6 +925,13 @@ static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo, return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1); } +static bool intel_sdvo_set_audio_state(struct intel_sdvo *intel_sdvo, + u8 audio_state) +{ + return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_AUDIO_STAT, + &audio_state, 1); +} + #if 0 static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo) { @@ -1371,11 +1378,6 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder, else sdvox |= SDVO_PIPE_SEL(crtc->pipe); - if (crtc_state->has_audio) { - WARN_ON_ONCE(INTEL_GEN(dev_priv) < 4); - sdvox |= SDVO_AUDIO_ENABLE; - } - if (INTEL_GEN(dev_priv) >= 4) { /* done in crtc_mode_set as the dpll_md reg must be written early */ } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || @@ -1515,8 +1517,13 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder, if (sdvox & HDMI_COLOR_RANGE_16_235) pipe_config->limited_color_range = true; - if (sdvox & SDVO_AUDIO_ENABLE) - pipe_config->has_audio = true; + if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_AUDIO_STAT, + &val, 1)) { + u8 mask = SDVO_AUDIO_ELD_VALID | SDVO_AUDIO_PRESENCE_DETECT; + + if ((val & mask) == mask) + pipe_config->has_audio = true; + } if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, &val, 1)) { @@ -1529,6 +1536,32 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder, pipe_config->pixel_multiplier, encoder_pixel_multiplier); } +static void intel_sdvo_disable_audio(struct intel_sdvo *intel_sdvo) +{ + intel_sdvo_set_audio_state(intel_sdvo, 0); +} + +static void intel_sdvo_enable_audio(struct intel_sdvo *intel_sdvo, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + const struct drm_display_mode *adjusted_mode = + &crtc_state->base.adjusted_mode; + struct drm_connector *connector = conn_state->connector; + u8 *eld = connector->eld; + + eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2; + + intel_sdvo_set_audio_state(intel_sdvo, 0); + + intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_ELD, + SDVO_HBUF_TX_DISABLED, + eld, drm_eld_size(eld)); + + intel_sdvo_set_audio_state(intel_sdvo, SDVO_AUDIO_ELD_VALID | + SDVO_AUDIO_PRESENCE_DETECT); +} + static void intel_disable_sdvo(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *conn_state) @@ -1538,6 +1571,9 @@ static void intel_disable_sdvo(struct intel_encoder *encoder, struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); u32 temp; + if (old_crtc_state->has_audio) + intel_sdvo_disable_audio(intel_sdvo); + intel_sdvo_set_active_outputs(intel_sdvo, 0); if (0) intel_sdvo_set_encoder_power_state(intel_sdvo, @@ -1623,6 +1659,9 @@ static void intel_enable_sdvo(struct intel_encoder *encoder, intel_sdvo_set_encoder_power_state(intel_sdvo, DRM_MODE_DPMS_ON); intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output); + + if (pipe_config->has_audio) + intel_sdvo_enable_audio(intel_sdvo, pipe_config, conn_state); } static enum drm_mode_status @@ -2514,7 +2553,6 @@ static bool intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) { struct drm_encoder *encoder = &intel_sdvo->base.base; - struct drm_i915_private *dev_priv = to_i915(encoder->dev); struct drm_connector *connector; struct intel_encoder *intel_encoder = to_intel_encoder(encoder); struct intel_connector *intel_connector; @@ -2551,9 +2589,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) encoder->encoder_type = DRM_MODE_ENCODER_TMDS; connector->connector_type = DRM_MODE_CONNECTOR_DVID; - /* gen3 doesn't do the hdmi bits in the SDVO register */ - if (INTEL_GEN(dev_priv) >= 4 && - intel_sdvo_is_hdmi_connector(intel_sdvo, device)) { + if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) { connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; intel_sdvo->is_hdmi = true; } diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h index db0ed499268ae216cb0ee8185467078907d954a6..e9ba3b047f932123f1a41342b9abf528713c4758 100644 --- a/drivers/gpu/drm/i915/intel_sdvo_regs.h +++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h @@ -707,6 +707,9 @@ struct intel_sdvo_enhancements_arg { #define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90 #define SDVO_CMD_SET_AUDIO_STAT 0x91 #define SDVO_CMD_GET_AUDIO_STAT 0x92 + #define SDVO_AUDIO_ELD_VALID (1 << 0) + #define SDVO_AUDIO_PRESENCE_DETECT (1 << 1) + #define SDVO_AUDIO_CP_READY (1 << 2) #define SDVO_CMD_SET_HBUF_INDEX 0x93 #define SDVO_HBUF_INDEX_ELD 0 #define SDVO_HBUF_INDEX_AVI_IF 1 diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c index 4bcdeaf8d98fa3de5aec7790971098905b5a688b..c44bb37e434c180a686ed7c94ba6155ddbc41069 100644 --- a/drivers/gpu/drm/i915/intel_workarounds.c +++ b/drivers/gpu/drm/i915/intel_workarounds.c @@ -37,7 +37,7 @@ * costly and simplifies things. We can revisit this in the future. * * Layout - * '''''' + * ~~~~~~ * * Keep things in this file ordered by WA type, as per the above (context, GT, * display, register whitelist, batchbuffer). Then, inside each type, keep the diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c index 7efb326badcd677e98ddfe5c6d1e869d201492ba..704572c2e6a231fb1eed95bb559e3e93adc22f19 100644 --- a/drivers/gpu/drm/i915/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/selftests/huge_pages.c @@ -549,7 +549,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg) err = igt_check_page_sizes(vma); if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) { - pr_err("page_sizes.gtt=%u, expected %lu\n", + pr_err("page_sizes.gtt=%u, expected %llu\n", vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K); err = -EINVAL; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 8e2e269db97e82917b299afbe680fc008b8c90a1..127d8151367177dea04bf5c121b2331e46d1accc 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -1337,7 +1337,7 @@ static int igt_gtt_reserve(void *arg) GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); if (vma->node.start != total || vma->node.size != 2*I915_GTT_PAGE_SIZE) { - pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %lx)\n", + pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n", vma->node.start, vma->node.size, total, 2*I915_GTT_PAGE_SIZE); err = -EINVAL; @@ -1386,7 +1386,7 @@ static int igt_gtt_reserve(void *arg) GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); if (vma->node.start != total || vma->node.size != 2*I915_GTT_PAGE_SIZE) { - pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %lx)\n", + pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n", vma->node.start, vma->node.size, total, 2*I915_GTT_PAGE_SIZE); err = -EINVAL; @@ -1430,7 +1430,7 @@ static int igt_gtt_reserve(void *arg) GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); if (vma->node.start != offset || vma->node.size != 2*I915_GTT_PAGE_SIZE) { - pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %lx)\n", + pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n", vma->node.start, vma->node.size, offset, 2*I915_GTT_PAGE_SIZE); err = -EINVAL; diff --git a/drivers/gpu/drm/i915/vlv_dsi_pll.c b/drivers/gpu/drm/i915/vlv_dsi_pll.c index a132a8037ecc6b2a317229918e8d2471cdbcb9dd..77df7903e071e3e74550ee1567fdfa970b44a650 100644 --- a/drivers/gpu/drm/i915/vlv_dsi_pll.c +++ b/drivers/gpu/drm/i915/vlv_dsi_pll.c @@ -413,8 +413,8 @@ static void glk_dsi_program_esc_clock(struct drm_device *dev, else txesc2_div = 10; - I915_WRITE(MIPIO_TXESC_CLK_DIV1, txesc1_div & GLK_TX_ESC_CLK_DIV1_MASK); - I915_WRITE(MIPIO_TXESC_CLK_DIV2, txesc2_div & GLK_TX_ESC_CLK_DIV2_MASK); + I915_WRITE(MIPIO_TXESC_CLK_DIV1, (1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK); + I915_WRITE(MIPIO_TXESC_CLK_DIV2, (1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK); } /* Program BXT Mipi clocks and dividers */ diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index 3bd0f8a18e748ca4be125d02a12bc50f7fe1bebc..42daa5c9ff8e630f1c598c5488b5f291de0ad230 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c @@ -651,8 +651,10 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) int bus_format; ret = of_property_read_u32(child, "reg", &i); - if (ret || i < 0 || i > 1) - return -EINVAL; + if (ret || i < 0 || i > 1) { + ret = -EINVAL; + goto free_child; + } if (!of_device_is_available(child)) continue; @@ -665,7 +667,6 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) channel = &imx_ldb->channel[i]; channel->ldb = imx_ldb; channel->chno = i; - channel->child = child; /* * The output port is port@4 with an external 4-port mux or @@ -675,13 +676,13 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) imx_ldb->lvds_mux ? 4 : 2, 0, &channel->panel, &channel->bridge); if (ret && ret != -ENODEV) - return ret; + goto free_child; /* panel ddc only if there is no bridge */ if (!channel->bridge) { ret = imx_ldb_panel_ddc(dev, channel, child); if (ret) - return ret; + goto free_child; } bus_format = of_get_bus_format(dev, child); @@ -697,18 +698,26 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) if (bus_format < 0) { dev_err(dev, "could not determine data mapping: %d\n", bus_format); - return bus_format; + ret = bus_format; + goto free_child; } channel->bus_format = bus_format; + channel->child = child; ret = imx_ldb_register(drm, channel); - if (ret) - return ret; + if (ret) { + channel->child = NULL; + goto free_child; + } } dev_set_drvdata(dev, imx_ldb); return 0; + +free_child: + of_node_put(child); + return ret; } static void imx_ldb_unbind(struct device *dev, struct device *master, diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index 7d4b710b837ac40d90704484c4b2dc731df851bf..ff34f9bb55a1e8fc44f12a80b16383b40f441042 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -78,7 +78,7 @@ static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc, if (disable_partial) ipu_plane_disable(ipu_crtc->plane[1], true); if (disable_full) - ipu_plane_disable(ipu_crtc->plane[0], false); + ipu_plane_disable(ipu_crtc->plane[0], true); } static void ipu_crtc_atomic_disable(struct drm_crtc *crtc, @@ -98,14 +98,14 @@ static void ipu_crtc_atomic_disable(struct drm_crtc *crtc, ipu_dc_disable(ipu); ipu_prg_disable(ipu); + drm_crtc_vblank_off(crtc); + spin_lock_irq(&crtc->dev->event_lock); - if (crtc->state->event) { + if (crtc->state->event && !crtc->state->active) { drm_crtc_send_vblank_event(crtc, crtc->state->event); crtc->state->event = NULL; } spin_unlock_irq(&crtc->dev->event_lock); - - drm_crtc_vblank_off(crtc); } static void imx_drm_crtc_reset(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index 203f247d485471346af9e001c21b8b01cc0a51a0..a323a0db2fc10fcc712df2cc9a90a3ad7ed6db87 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c @@ -375,9 +375,9 @@ static int ipu_plane_atomic_check(struct drm_plane *plane, if (ret) return ret; - /* CRTC should be enabled */ + /* nothing to check when disabling or disabled */ if (!crtc_state->enable) - return -EINVAL; + return 0; switch (plane->type) { case DRM_PLANE_TYPE_PRIMARY: diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 92ecb9bf982cfe7398eefe3993966fc2acf28b0c..42da8bb4b7c3e2c1e648001ac1778fa295a917bd 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -79,11 +79,13 @@ static void mtk_drm_crtc_finish_page_flip(struct mtk_drm_crtc *mtk_crtc) struct drm_crtc *crtc = &mtk_crtc->base; unsigned long flags; - spin_lock_irqsave(&crtc->dev->event_lock, flags); - drm_crtc_send_vblank_event(crtc, mtk_crtc->event); - drm_crtc_vblank_put(crtc); - mtk_crtc->event = NULL; - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + if (mtk_crtc->event) { + spin_lock_irqsave(&crtc->dev->event_lock, flags); + drm_crtc_send_vblank_event(crtc, mtk_crtc->event); + drm_crtc_vblank_put(crtc); + mtk_crtc->event = NULL; + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + } } static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 47ec604289b712148fdfa740a2d6c64ca4d0810f..947bc6d6230205eb141df8b6ea471ca7053dc35c 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -220,6 +220,7 @@ static int mtk_drm_kms_init(struct drm_device *drm) struct mtk_drm_private *private = drm->dev_private; struct platform_device *pdev; struct device_node *np; + struct device *dma_dev; int ret; if (!iommu_present(&platform_bus_type)) @@ -282,7 +283,29 @@ static int mtk_drm_kms_init(struct drm_device *drm) goto err_component_unbind; } - private->dma_dev = &pdev->dev; + dma_dev = &pdev->dev; + private->dma_dev = dma_dev; + + /* + * Configure the DMA segment size to make sure we get contiguous IOVA + * when importing PRIME buffers. + */ + if (!dma_dev->dma_parms) { + private->dma_parms_allocated = true; + dma_dev->dma_parms = + devm_kzalloc(drm->dev, sizeof(*dma_dev->dma_parms), + GFP_KERNEL); + } + if (!dma_dev->dma_parms) { + ret = -ENOMEM; + goto err_component_unbind; + } + + ret = dma_set_max_seg_size(dma_dev, (unsigned int)DMA_BIT_MASK(32)); + if (ret) { + dev_err(dma_dev, "Failed to set DMA segment size\n"); + goto err_unset_dma_parms; + } /* * We don't use the drm_irq_install() helpers provided by the DRM @@ -292,13 +315,16 @@ static int mtk_drm_kms_init(struct drm_device *drm) drm->irq_enabled = true; ret = drm_vblank_init(drm, MAX_CRTC); if (ret < 0) - goto err_component_unbind; + goto err_unset_dma_parms; drm_kms_helper_poll_init(drm); drm_mode_config_reset(drm); return 0; +err_unset_dma_parms: + if (private->dma_parms_allocated) + dma_dev->dma_parms = NULL; err_component_unbind: component_unbind_all(drm->dev, drm); err_config_cleanup: @@ -309,7 +335,13 @@ static int mtk_drm_kms_init(struct drm_device *drm) static void mtk_drm_kms_deinit(struct drm_device *drm) { + struct mtk_drm_private *private = drm->dev_private; + drm_kms_helper_poll_fini(drm); + drm_atomic_helper_shutdown(drm); + + if (private->dma_parms_allocated) + private->dma_dev->dma_parms = NULL; component_unbind_all(drm->dev, drm); drm_mode_config_cleanup(drm); @@ -326,6 +358,18 @@ static const struct file_operations mtk_drm_fops = { .compat_ioctl = drm_compat_ioctl, }; +/* + * We need to override this because the device used to import the memory is + * not dev->dev, as drm_gem_prime_import() expects. + */ +struct drm_gem_object *mtk_drm_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf) +{ + struct mtk_drm_private *private = dev->dev_private; + + return drm_gem_prime_import_dev(dev, dma_buf, private->dma_dev); +} + static struct drm_driver mtk_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC, @@ -337,7 +381,7 @@ static struct drm_driver mtk_drm_driver = { .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = drm_gem_prime_export, - .gem_prime_import = drm_gem_prime_import, + .gem_prime_import = mtk_drm_gem_prime_import, .gem_prime_get_sg_table = mtk_gem_prime_get_sg_table, .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table, .gem_prime_mmap = mtk_drm_gem_mmap_buf, @@ -390,7 +434,9 @@ static void mtk_drm_unbind(struct device *dev) struct mtk_drm_private *private = dev_get_drvdata(dev); drm_dev_unregister(private->drm); + mtk_drm_kms_deinit(private->drm); drm_dev_put(private->drm); + private->num_pipes = 0; private->drm = NULL; } @@ -520,12 +566,15 @@ static int mtk_drm_probe(struct platform_device *pdev) comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL); if (!comp) { ret = -ENOMEM; + of_node_put(node); goto err_node; } ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL); - if (ret) + if (ret) { + of_node_put(node); goto err_node; + } private->ddp_comp[comp_id] = comp; } @@ -559,13 +608,8 @@ static int mtk_drm_probe(struct platform_device *pdev) static int mtk_drm_remove(struct platform_device *pdev) { struct mtk_drm_private *private = platform_get_drvdata(pdev); - struct drm_device *drm = private->drm; int i; - drm_dev_unregister(drm); - mtk_drm_kms_deinit(drm); - drm_dev_put(drm); - component_master_del(&pdev->dev, &mtk_drm_ops); pm_runtime_disable(&pdev->dev); of_node_put(private->mutex_node); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h index ecc00ca3221daa80f6952f3083f37dc5f1fad0cf..8fa60d46f8605c05203670b35204b5fabd3ddffb 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h @@ -59,6 +59,8 @@ struct mtk_drm_private { } commit; struct drm_atomic_state *suspend_state; + + bool dma_parms_allocated; }; extern struct platform_driver mtk_ddp_driver; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c index 259b7b0de1d22d7beb052c19e2ef06e4859afcff..fec81d92815a6c61ec6e309b8273b00e45fdcae1 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c @@ -26,6 +26,9 @@ static struct mtk_drm_gem_obj *mtk_drm_gem_init(struct drm_device *dev, size = round_up(size, PAGE_SIZE); + if (size == 0) + return ERR_PTR(-EINVAL); + mtk_gem_obj = kzalloc(sizeof(*mtk_gem_obj), GFP_KERNEL); if (!mtk_gem_obj) return ERR_PTR(-ENOMEM); diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 66df1b1779592195e38fe0437874c39241a2f297..0dd317ac5fe57092b43c908371bfe9b8fa426ce2 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -630,6 +630,15 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi) if (--dsi->refcount != 0) return; + /* + * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since + * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(), + * which needs irq for vblank, and mtk_dsi_stop() will disable irq. + * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(), + * after dsi is fully set. + */ + mtk_dsi_stop(dsi); + if (!mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500)) { if (dsi->panel) { if (drm_panel_unprepare(dsi->panel)) { @@ -696,7 +705,6 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi) } } - mtk_dsi_stop(dsi); mtk_dsi_poweroff(dsi); dsi->enabled = false; @@ -841,6 +849,8 @@ static void mtk_dsi_destroy_conn_enc(struct mtk_dsi *dsi) /* Skip connector cleanup if creation was delegated to the bridge */ if (dsi->conn.dev) drm_connector_cleanup(&dsi->conn); + if (dsi->panel) + drm_panel_detach(dsi->panel); } static void mtk_dsi_ddp_start(struct mtk_ddp_comp *comp) diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index 2d45d1dd9554a6c8cea17a3c52c8554af94a613e..62444a3a5742a5d15c0cf175235296ec28dd6bdc 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -1446,8 +1446,7 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, } /* The CEC module handles HDMI hotplug detection */ - cec_np = of_find_compatible_node(np->parent, NULL, - "mediatek,mt8173-cec"); + cec_np = of_get_compatible_child(np->parent, "mediatek,mt8173-cec"); if (!cec_np) { dev_err(dev, "Failed to find CEC node\n"); return -EINVAL; @@ -1457,8 +1456,10 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, if (!cec_pdev) { dev_err(hdmi->dev, "Waiting for CEC device %pOF\n", cec_np); + of_node_put(cec_np); return -EPROBE_DEFER; } + of_node_put(cec_np); hdmi->cec_dev = &cec_pdev->dev; /* @@ -1472,7 +1473,6 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, if (IS_ERR(regmap)) ret = PTR_ERR(regmap); if (ret) { - ret = PTR_ERR(regmap); dev_err(dev, "Failed to get system configuration registers: %d\n", ret); @@ -1508,6 +1508,7 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, of_node_put(remote); hdmi->ddc_adpt = of_find_i2c_adapter_by_node(i2c_np); + of_node_put(i2c_np); if (!hdmi->ddc_adpt) { dev_err(dev, "Failed to get ddc i2c adapter by node\n"); return -EINVAL; diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c index 05520202c96778c1401dac07a9b9ff768ba97b91..709475d5cc30e1fea852763b688543e4676fe2f0 100644 --- a/drivers/gpu/drm/meson/meson_crtc.c +++ b/drivers/gpu/drm/meson/meson_crtc.c @@ -101,6 +101,8 @@ static void meson_crtc_atomic_enable(struct drm_crtc *crtc, writel_bits_relaxed(VPP_POSTBLEND_ENABLE, VPP_POSTBLEND_ENABLE, priv->io_base + _REG(VPP_MISC)); + drm_crtc_vblank_on(crtc); + priv->viu.osd1_enabled = true; } @@ -110,6 +112,8 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc, struct meson_crtc *meson_crtc = to_meson_crtc(crtc); struct meson_drm *priv = meson_crtc->priv; + drm_crtc_vblank_off(crtc); + priv->viu.osd1_enabled = false; priv->viu.osd1_commit = false; diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index d3443125e66164a863fb41bfdb435a1ce13340b6..588b3b0c8315d224738c7f8e41fafd08650a50f7 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -82,6 +82,10 @@ static const struct drm_mode_config_funcs meson_mode_config_funcs = { .fb_create = drm_gem_fb_create, }; +static const struct drm_mode_config_helper_funcs meson_mode_config_helpers = { + .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, +}; + static irqreturn_t meson_irq(int irq, void *arg) { struct drm_device *dev = arg; @@ -246,6 +250,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) drm->mode_config.max_width = 3840; drm->mode_config.max_height = 2160; drm->mode_config.funcs = &meson_mode_config_funcs; + drm->mode_config.helper_private = &meson_mode_config_helpers; /* Hardware Initialization */ @@ -295,10 +300,12 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) ret = drm_dev_register(drm, 0); if (ret) - goto free_drm; + goto uninstall_irq; return 0; +uninstall_irq: + drm_irq_uninstall(drm); free_drm: drm_dev_put(drm); @@ -312,10 +319,11 @@ static int meson_drv_bind(struct device *dev) static void meson_drv_unbind(struct device *dev) { - struct drm_device *drm = dev_get_drvdata(dev); - struct meson_drm *priv = drm->dev_private; + struct meson_drm *priv = dev_get_drvdata(dev); + struct drm_device *drm = priv->drm; drm_dev_unregister(drm); + drm_irq_uninstall(drm); drm_kms_helper_poll_fini(drm); drm_fbdev_cma_fini(priv->fbdev); drm_mode_config_cleanup(drm); @@ -363,8 +371,10 @@ static int meson_probe_remote(struct platform_device *pdev, remote_node = of_graph_get_remote_port_parent(ep); if (!remote_node || remote_node == parent || /* Ignore parent endpoint */ - !of_device_is_available(remote_node)) + !of_device_is_available(remote_node)) { + of_node_put(remote_node); continue; + } count += meson_probe_remote(pdev, match, remote, remote_node); @@ -383,10 +393,13 @@ static int meson_drv_probe(struct platform_device *pdev) for_each_endpoint_of_node(np, ep) { remote = of_graph_get_remote_port_parent(ep); - if (!remote || !of_device_is_available(remote)) + if (!remote || !of_device_is_available(remote)) { + of_node_put(remote); continue; + } count += meson_probe_remote(pdev, &match, np, remote); + of_node_put(remote); } if (count && !match) diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c index df7247cd93f98f9f237721bb1eddd104cbbdc345..2cb2ad26d71670c387b144dc34668567f1fa9c34 100644 --- a/drivers/gpu/drm/meson/meson_dw_hdmi.c +++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c @@ -706,6 +706,7 @@ static const struct regmap_config meson_dw_hdmi_regmap_config = { .reg_read = meson_dw_hdmi_reg_read, .reg_write = meson_dw_hdmi_reg_write, .max_register = 0x10000, + .fast_io = true, }; static bool meson_hdmi_connector_is_available(struct device *dev) diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c index 12c80dfcff59bc9cb40d2e4ccbf2dbb98b6af3a5..c7daae53fa1f5f6b541f45da3b68193ba32f897f 100644 --- a/drivers/gpu/drm/meson/meson_plane.c +++ b/drivers/gpu/drm/meson/meson_plane.c @@ -120,6 +120,13 @@ static void meson_plane_atomic_update(struct drm_plane *plane, priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 | OSD_COLOR_MATRIX_32_ARGB; break; + case DRM_FORMAT_XBGR8888: + /* For XRGB, replace the pixel's alpha by 0xFF */ + writel_bits_relaxed(OSD_REPLACE_EN, OSD_REPLACE_EN, + priv->io_base + _REG(VIU_OSD1_CTRL_STAT2)); + priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 | + OSD_COLOR_MATRIX_32_ABGR; + break; case DRM_FORMAT_ARGB8888: /* For ARGB, use the pixel's alpha */ writel_bits_relaxed(OSD_REPLACE_EN, 0, @@ -127,6 +134,13 @@ static void meson_plane_atomic_update(struct drm_plane *plane, priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 | OSD_COLOR_MATRIX_32_ARGB; break; + case DRM_FORMAT_ABGR8888: + /* For ARGB, use the pixel's alpha */ + writel_bits_relaxed(OSD_REPLACE_EN, 0, + priv->io_base + _REG(VIU_OSD1_CTRL_STAT2)); + priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 | + OSD_COLOR_MATRIX_32_ABGR; + break; case DRM_FORMAT_RGB888: priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_24 | OSD_COLOR_MATRIX_24_RGB; @@ -196,7 +210,9 @@ static const struct drm_plane_funcs meson_plane_funcs = { static const uint32_t supported_drm_formats[] = { DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, DRM_FORMAT_RGB888, DRM_FORMAT_RGB565, }; diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c index 514245e69b3847d1dc1d5f96249d5e49f849a4ca..7a3a6ed9f27bb561fe4a399b1d35c049c855a0c0 100644 --- a/drivers/gpu/drm/meson/meson_venc.c +++ b/drivers/gpu/drm/meson/meson_venc.c @@ -71,6 +71,7 @@ */ /* HHI Registers */ +#define HHI_GCLK_MPEG2 0x148 /* 0x52 offset in data sheet */ #define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */ #define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */ #define HHI_HDMI_PHY_CNTL0 0x3a0 /* 0xe8 offset in data sheet */ @@ -714,6 +715,7 @@ struct meson_hdmi_venc_vic_mode { { 5, &meson_hdmi_encp_mode_1080i60 }, { 20, &meson_hdmi_encp_mode_1080i50 }, { 32, &meson_hdmi_encp_mode_1080p24 }, + { 33, &meson_hdmi_encp_mode_1080p50 }, { 34, &meson_hdmi_encp_mode_1080p30 }, { 31, &meson_hdmi_encp_mode_1080p50 }, { 16, &meson_hdmi_encp_mode_1080p60 }, @@ -1529,10 +1531,12 @@ unsigned int meson_venci_get_field(struct meson_drm *priv) void meson_venc_enable_vsync(struct meson_drm *priv) { writel_relaxed(2, priv->io_base + _REG(VENC_INTCTRL)); + regmap_update_bits(priv->hhi, HHI_GCLK_MPEG2, BIT(25), BIT(25)); } void meson_venc_disable_vsync(struct meson_drm *priv) { + regmap_update_bits(priv->hhi, HHI_GCLK_MPEG2, BIT(25), 0); writel_relaxed(0, priv->io_base + _REG(VENC_INTCTRL)); } diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c index f7945bae3b4a9e74b7400b463c95984b93cabc59..e1760140e3c26ab4c0f9f324e8e5a2e91134224d 100644 --- a/drivers/gpu/drm/meson/meson_venc_cvbs.c +++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c @@ -75,6 +75,25 @@ struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT] = { }, }; +static const struct meson_cvbs_mode * +meson_cvbs_get_mode(const struct drm_display_mode *req_mode) +{ + int i; + + for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) { + struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i]; + + if (drm_mode_match(req_mode, &meson_mode->mode, + DRM_MODE_MATCH_TIMINGS | + DRM_MODE_MATCH_CLOCK | + DRM_MODE_MATCH_FLAGS | + DRM_MODE_MATCH_3D_FLAGS)) + return meson_mode; + } + + return NULL; +} + /* Connector */ static void meson_cvbs_connector_destroy(struct drm_connector *connector) @@ -147,14 +166,8 @@ static int meson_venc_cvbs_encoder_atomic_check(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { - int i; - - for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) { - struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i]; - - if (drm_mode_equal(&crtc_state->mode, &meson_mode->mode)) - return 0; - } + if (meson_cvbs_get_mode(&crtc_state->mode)) + return 0; return -EINVAL; } @@ -192,24 +205,17 @@ static void meson_venc_cvbs_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { + const struct meson_cvbs_mode *meson_mode = meson_cvbs_get_mode(mode); struct meson_venc_cvbs *meson_venc_cvbs = encoder_to_meson_venc_cvbs(encoder); struct meson_drm *priv = meson_venc_cvbs->priv; - int i; - for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) { - struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i]; + if (meson_mode) { + meson_venci_cvbs_mode_set(priv, meson_mode->enci); - if (drm_mode_equal(mode, &meson_mode->mode)) { - meson_venci_cvbs_mode_set(priv, - meson_mode->enci); - - /* Setup 27MHz vclk2 for ENCI and VDAC */ - meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS, - MESON_VCLK_CVBS, MESON_VCLK_CVBS, - MESON_VCLK_CVBS, true); - break; - } + /* Setup 27MHz vclk2 for ENCI and VDAC */ + meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS, MESON_VCLK_CVBS, + MESON_VCLK_CVBS, MESON_VCLK_CVBS, true); } } diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c index 6bcfa527c1801045569496712c904b3a26f8c518..26a0857878bfd520fe3ebe43cfa85c60534ff66a 100644 --- a/drivers/gpu/drm/meson/meson_viu.c +++ b/drivers/gpu/drm/meson/meson_viu.c @@ -184,18 +184,18 @@ void meson_viu_set_osd_lut(struct meson_drm *priv, enum viu_lut_sel_e lut_sel, if (lut_sel == VIU_LUT_OSD_OETF) { writel(0, priv->io_base + _REG(addr_port)); - for (i = 0; i < 20; i++) + for (i = 0; i < (OSD_OETF_LUT_SIZE / 2); i++) writel(r_map[i * 2] | (r_map[i * 2 + 1] << 16), priv->io_base + _REG(data_port)); writel(r_map[OSD_OETF_LUT_SIZE - 1] | (g_map[0] << 16), priv->io_base + _REG(data_port)); - for (i = 0; i < 20; i++) + for (i = 0; i < (OSD_OETF_LUT_SIZE / 2); i++) writel(g_map[i * 2 + 1] | (g_map[i * 2 + 2] << 16), priv->io_base + _REG(data_port)); - for (i = 0; i < 20; i++) + for (i = 0; i < (OSD_OETF_LUT_SIZE / 2); i++) writel(b_map[i * 2] | (b_map[i * 2 + 1] << 16), priv->io_base + _REG(data_port)); @@ -211,18 +211,18 @@ void meson_viu_set_osd_lut(struct meson_drm *priv, enum viu_lut_sel_e lut_sel, } else if (lut_sel == VIU_LUT_OSD_EOTF) { writel(0, priv->io_base + _REG(addr_port)); - for (i = 0; i < 20; i++) + for (i = 0; i < (OSD_EOTF_LUT_SIZE / 2); i++) writel(r_map[i * 2] | (r_map[i * 2 + 1] << 16), priv->io_base + _REG(data_port)); writel(r_map[OSD_EOTF_LUT_SIZE - 1] | (g_map[0] << 16), priv->io_base + _REG(data_port)); - for (i = 0; i < 20; i++) + for (i = 0; i < (OSD_EOTF_LUT_SIZE / 2); i++) writel(g_map[i * 2 + 1] | (g_map[i * 2 + 2] << 16), priv->io_base + _REG(data_port)); - for (i = 0; i < 20; i++) + for (i = 0; i < (OSD_EOTF_LUT_SIZE / 2); i++) writel(b_map[i * 2] | (b_map[i * 2 + 1] << 16), priv->io_base + _REG(data_port)); diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index ab1d9308c31146d968c5b431587b895df9d517a8..ba6f3c14495c0ceb605276e81edce5ddb055a1c3 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -35,7 +35,7 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname) { struct device *dev = &gpu->pdev->dev; const struct firmware *fw; - struct device_node *np; + struct device_node *np, *mem_np; struct resource r; phys_addr_t mem_phys; ssize_t mem_size; @@ -49,11 +49,13 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname) if (!np) return -ENODEV; - np = of_parse_phandle(np, "memory-region", 0); - if (!np) + mem_np = of_parse_phandle(np, "memory-region", 0); + of_node_put(np); + if (!mem_np) return -EINVAL; - ret = of_address_to_resource(np, 0, &r); + ret = of_address_to_resource(mem_np, 0, &r); + of_node_put(mem_np); if (ret) return ret; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index bbb8126ec5c573ba7d58610832606e3eaa18410c..9cde79a7335c825de1f59c059ad13ac2d50eb30a 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -896,7 +896,7 @@ static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq) np = dev_pm_opp_get_of_node(opp); if (np) { - of_property_read_u32(np, "qcom,level", &val); + of_property_read_u32(np, "opp-level", &val); of_node_put(np); } @@ -1140,7 +1140,7 @@ int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node) gmu->dev = &pdev->dev; - of_dma_configure(gmu->dev, node, false); + of_dma_configure(gmu->dev, node, true); /* Fow now, don't do anything fancy until we get our feet under us */ gmu->idle_level = GMU_IDLE_STATE_ACTIVE; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index da1363a0c54d61b6d50fe77f5fa3993ee37668b4..93d70f4a2154e289be09a8dd6ae8a038e007ec7b 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -633,8 +633,7 @@ static int adreno_get_legacy_pwrlevels(struct device *dev) struct device_node *child, *node; int ret; - node = of_find_compatible_node(dev->of_node, NULL, - "qcom,gpu-pwrlevels"); + node = of_get_compatible_child(dev->of_node, "qcom,gpu-pwrlevels"); if (!node) { dev_err(dev, "Could not find the GPU powerlevels\n"); return -ENXIO; @@ -655,6 +654,8 @@ static int adreno_get_legacy_pwrlevels(struct device *dev) dev_pm_opp_add(dev, val, 0); } + of_node_put(node); + return 0; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index 80cbf75bc2ff2bd2ca45cdb23f0363c6a69937fe..5852e1d356e13e5c041dc52192c25fb7a2f11fc8 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -1477,6 +1477,8 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc, } pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL); + if (!pstates) + return -ENOMEM; dpu_crtc = to_dpu_crtc(crtc); cstate = to_dpu_crtc_state(state); @@ -1535,8 +1537,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc, cnt++; dst = drm_plane_state_dest(pstate); - if (!drm_rect_intersect(&clip, &dst) || - !drm_rect_equals(&clip, &dst)) { + if (!drm_rect_intersect(&clip, &dst)) { DPU_ERROR("invalid vertical/horizontal destination\n"); DPU_ERROR("display: " DRM_RECT_FMT " plane: " DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect), @@ -2123,7 +2124,6 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane) NULL); drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs); - plane->crtc = crtc; /* save user friendly CRTC name for later */ snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c index ae2aee7ed9e1938f25e2aa11a40e640dcecf8e55..e741d26185df63ceb59c36399cfb2e60359b5192 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c @@ -1962,7 +1962,7 @@ static void _dpu_dbg_dump_dpu_dbg_bus(struct dpu_dbg_dpu_debug_bus *bus) u32 *dump_addr = NULL; u32 status = 0; struct dpu_debug_bus_entry *head; - phys_addr_t phys = 0; + dma_addr_t dma = 0; int list_size; int i; u32 offset; @@ -2000,7 +2000,7 @@ static void _dpu_dbg_dump_dpu_dbg_bus(struct dpu_dbg_dpu_debug_bus *bus) if (in_mem) { if (!(*dump_mem)) *dump_mem = dma_alloc_coherent(dpu_dbg_base.dev, - list_size, &phys, GFP_KERNEL); + list_size, &dma, GFP_KERNEL); if (*dump_mem) { dump_addr = *dump_mem; @@ -2101,7 +2101,7 @@ static void _dpu_dbg_dump_vbif_dbg_bus(struct dpu_dbg_vbif_debug_bus *bus) u32 value, d0, d1; unsigned long reg, reg1, reg2; struct vbif_debug_bus_entry *head; - phys_addr_t phys = 0; + dma_addr_t dma = 0; int i, list_size = 0; void __iomem *mem_base = NULL; struct vbif_debug_bus_entry *dbg_bus; @@ -2151,7 +2151,7 @@ static void _dpu_dbg_dump_vbif_dbg_bus(struct dpu_dbg_vbif_debug_bus *bus) if (in_mem) { if (!(*dump_mem)) *dump_mem = dma_alloc_coherent(dpu_dbg_base.dev, - list_size, &phys, GFP_KERNEL); + list_size, &dma, GFP_KERNEL); if (*dump_mem) { dump_addr = *dump_mem; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 1b4de3486ef9e14fa8b172de29d68db2df41e802..ec3fd67378c187467f54e3eda1b6da8df09ac846 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -503,8 +503,6 @@ static void dpu_encoder_destroy(struct drm_encoder *drm_enc) drm_encoder_cleanup(drm_enc); mutex_destroy(&dpu_enc->enc_lock); - - kfree(dpu_enc); } void dpu_encoder_helper_split_config( diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h index c7df8aad66137c137b1fb7039b23faf35c3fa468..f376e87e0768f02671cc68b8d31aa5abce6a4e7b 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h @@ -215,6 +215,7 @@ struct dpu_encoder_irq { * @hw_pp: Hardware interface to the ping pong registers * @dpu_kms: Pointer to the dpu_kms top level * @cached_mode: DRM mode cached at mode_set time, acted on in enable + * @vblank_ctl_lock: Vblank ctl mutex lock to protect vblank_refcount * @enabled: Whether the encoder has enabled and running a mode * @split_role: Role to play in a split-panel configuration * @intf_mode: Interface mode @@ -246,13 +247,14 @@ struct dpu_encoder_phys { struct dpu_hw_pingpong *hw_pp; struct dpu_kms *dpu_kms; struct drm_display_mode cached_mode; + struct mutex vblank_ctl_lock; enum dpu_enc_split_role split_role; enum dpu_intf_mode intf_mode; enum dpu_intf intf_idx; enum dpu_rm_topology_name topology_name; spinlock_t *enc_spinlock; enum dpu_enc_enable_state enable_state; - atomic_t vblank_refcount; + int vblank_refcount; atomic_t vsync_cnt; atomic_t underrun_cnt; atomic_t pending_ctlstart_cnt; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c index 3084675ed42578ed70b22ce40241673b782f5435..ec88fafc9cb3425d8966a72908f56c2b661ddac6 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c @@ -316,7 +316,8 @@ static int dpu_encoder_phys_cmd_control_vblank_irq( return -EINVAL; } - refcount = atomic_read(&phys_enc->vblank_refcount); + mutex_lock(&phys_enc->vblank_ctl_lock); + refcount = phys_enc->vblank_refcount; /* Slave encoders don't report vblank */ if (!dpu_encoder_phys_cmd_is_master(phys_enc)) @@ -332,13 +333,21 @@ static int dpu_encoder_phys_cmd_control_vblank_irq( phys_enc->hw_pp->idx - PINGPONG_0, enable ? "true" : "false", refcount); - if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1) - ret = dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_RDPTR); - else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0) - ret = dpu_encoder_helper_unregister_irq(phys_enc, - INTR_IDX_RDPTR); + if (enable) { + if (phys_enc->vblank_refcount == 0) + ret = dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_RDPTR); + if (!ret) + phys_enc->vblank_refcount++; + } else if (!enable) { + if (phys_enc->vblank_refcount == 1) + ret = dpu_encoder_helper_unregister_irq(phys_enc, + INTR_IDX_RDPTR); + if (!ret) + phys_enc->vblank_refcount--; + } end: + mutex_unlock(&phys_enc->vblank_ctl_lock); if (ret) { DRM_ERROR("vblank irq err id:%u pp:%d ret:%d, enable %s/%d\n", DRMID(phys_enc->parent), @@ -361,7 +370,7 @@ static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc, trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0, - enable, atomic_read(&phys_enc->vblank_refcount)); + enable, phys_enc->vblank_refcount); if (enable) { dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_PINGPONG); @@ -846,6 +855,9 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init( phys_enc->hw_mdptop = hw_mdp; phys_enc->intf_idx = p->intf_idx; + mutex_init(&phys_enc->vblank_ctl_lock); + phys_enc->vblank_refcount = 0; + dpu_encoder_phys_cmd_init_ops(&phys_enc->ops); phys_enc->parent = p->parent; phys_enc->parent_ops = p->parent_ops; @@ -887,7 +899,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init( irq->intr_idx = INTR_IDX_UNDERRUN; irq->cb.func = dpu_encoder_phys_cmd_underrun_irq; - atomic_set(&phys_enc->vblank_refcount, 0); atomic_set(&phys_enc->pending_kickoff_cnt, 0); atomic_set(&phys_enc->pending_ctlstart_cnt, 0); atomic_set(&cmd_enc->pending_vblank_cnt, 0); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c index 14fc7c2a6bb764a5e6bfff2842bc82d7e301bc98..7173169d2f3be2ccc156a65c332d65f2bd2efaf3 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c @@ -331,7 +331,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx) if (hw_ctl && hw_ctl->ops.get_flush_register) flush_register = hw_ctl->ops.get_flush_register(hw_ctl); - if (flush_register == 0) + if (!(flush_register & hw_ctl->ops.get_pending_flush(hw_ctl))) new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0); spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags); @@ -445,9 +445,11 @@ static int dpu_encoder_phys_vid_control_vblank_irq( return -EINVAL; } - refcount = atomic_read(&phys_enc->vblank_refcount); vid_enc = to_dpu_encoder_phys_vid(phys_enc); + mutex_lock(&phys_enc->vblank_ctl_lock); + refcount = phys_enc->vblank_refcount; + /* Slave encoders don't report vblank */ if (!dpu_encoder_phys_vid_is_master(phys_enc)) goto end; @@ -458,16 +460,24 @@ static int dpu_encoder_phys_vid_control_vblank_irq( goto end; } - DRM_DEBUG_KMS("id:%u enable=%d/%d\n", DRMID(phys_enc->parent), enable, - atomic_read(&phys_enc->vblank_refcount)); + DRM_DEBUG_VBL("id:%u enable=%d/%d\n", DRMID(phys_enc->parent), enable, + refcount); - if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1) - ret = dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_VSYNC); - else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0) - ret = dpu_encoder_helper_unregister_irq(phys_enc, - INTR_IDX_VSYNC); + if (enable) { + if (phys_enc->vblank_refcount == 0) + ret = dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_VSYNC); + if (!ret) + phys_enc->vblank_refcount++; + } else if (!enable) { + if (phys_enc->vblank_refcount == 1) + ret = dpu_encoder_helper_unregister_irq(phys_enc, + INTR_IDX_VSYNC); + if (!ret) + phys_enc->vblank_refcount--; + } end: + mutex_unlock(&phys_enc->vblank_ctl_lock); if (ret) { DRM_ERROR("failed: id:%u intf:%d ret:%d enable:%d refcnt:%d\n", DRMID(phys_enc->parent), @@ -742,7 +752,7 @@ static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc, trace_dpu_enc_phys_vid_irq_ctrl(DRMID(phys_enc->parent), vid_enc->hw_intf->idx - INTF_0, enable, - atomic_read(&phys_enc->vblank_refcount)); + phys_enc->vblank_refcount); if (enable) { ret = dpu_encoder_phys_vid_control_vblank_irq(phys_enc, true); @@ -877,6 +887,9 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init( DPU_DEBUG_VIDENC(vid_enc, "\n"); + mutex_init(&phys_enc->vblank_ctl_lock); + phys_enc->vblank_refcount = 0; + dpu_encoder_phys_vid_init_ops(&phys_enc->ops); phys_enc->parent = p->parent; phys_enc->parent_ops = p->parent_ops; @@ -904,7 +917,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init( irq->intr_idx = INTR_IDX_UNDERRUN; irq->cb.func = dpu_encoder_phys_vid_underrun_irq; - atomic_set(&phys_enc->vblank_refcount, 0); atomic_set(&phys_enc->pending_kickoff_cnt, 0); init_waitqueue_head(&phys_enc->pending_kickoff_wq); phys_enc->enable_state = DPU_ENC_DISABLED; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index 7dd6bd2d6d378b8351b4dd88116ea12690da8d4d..2d9b7b5fb49c867a4ff9ee271ff98b59addb3670 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -442,35 +442,38 @@ static void dpu_kms_wait_for_commit_done(struct msm_kms *kms, } } -static void _dpu_kms_initialize_dsi(struct drm_device *dev, +static int _dpu_kms_initialize_dsi(struct drm_device *dev, struct msm_drm_private *priv, struct dpu_kms *dpu_kms) { struct drm_encoder *encoder = NULL; - int i, rc; + int i, rc = 0; + + if (!(priv->dsi[0] || priv->dsi[1])) + return rc; /*TODO: Support two independent DSI connectors */ - encoder = dpu_encoder_init(dev, DRM_MODE_CONNECTOR_DSI); - if (IS_ERR_OR_NULL(encoder)) { + encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI); + if (IS_ERR(encoder)) { DPU_ERROR("encoder init failed for dsi display\n"); - return; + return PTR_ERR(encoder); } priv->encoders[priv->num_encoders++] = encoder; for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) { - if (!priv->dsi[i]) { - DPU_DEBUG("invalid msm_dsi for ctrl %d\n", i); - return; - } + if (!priv->dsi[i]) + continue; rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder); if (rc) { DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n", i, rc); - continue; + break; } } + + return rc; } /** @@ -481,16 +484,16 @@ static void _dpu_kms_initialize_dsi(struct drm_device *dev, * @dpu_kms: Pointer to dpu kms structure * Returns: Zero on success */ -static void _dpu_kms_setup_displays(struct drm_device *dev, +static int _dpu_kms_setup_displays(struct drm_device *dev, struct msm_drm_private *priv, struct dpu_kms *dpu_kms) { - _dpu_kms_initialize_dsi(dev, priv, dpu_kms); - /** * Extend this function to initialize other * types of displays */ + + return _dpu_kms_initialize_dsi(dev, priv, dpu_kms); } static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms) @@ -552,7 +555,9 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms) * Create encoder and query display drivers to create * bridges and connectors */ - _dpu_kms_setup_displays(dev, priv, dpu_kms); + ret = _dpu_kms_setup_displays(dev, priv, dpu_kms); + if (ret) + goto fail; max_crtc_count = min(catalog->mixer_count, priv->num_encoders); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index b640e39ebaca28c86015d32e11d68cdf876c687f..4ac2b0c669b74ac24a1dc39d0413c219412ee570 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -1254,7 +1254,7 @@ static int dpu_plane_sspp_atomic_update(struct drm_plane *plane, const struct dpu_format *fmt; struct drm_crtc *crtc; struct drm_framebuffer *fb; - struct drm_rect src, dst; + int ret, min_scale; if (!plane) { DPU_ERROR("invalid plane\n"); @@ -1293,21 +1293,29 @@ static int dpu_plane_sspp_atomic_update(struct drm_plane *plane, pdpu->is_rt_pipe = (dpu_crtc_get_client_type(crtc) != NRT_CLIENT); _dpu_plane_set_qos_ctrl(plane, false, DPU_PLANE_QOS_PANIC_CTRL); - src.x1 = state->src_x >> 16; - src.y1 = state->src_y >> 16; - src.x2 = src.x1 + (state->src_w >> 16); - src.y2 = src.y1 + (state->src_h >> 16); + min_scale = FRAC_16_16(1, pdpu->pipe_sblk->maxdwnscale); + ret = drm_atomic_helper_check_plane_state(state, crtc->state, min_scale, + pdpu->pipe_sblk->maxupscale << 16, + true, false); + if (ret) { + DPU_ERROR_PLANE(pdpu, "Check plane state failed (%d)\n", ret); + return ret; + } - dst = drm_plane_state_dest(state); + DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FP_FMT "->crtc%u " DRM_RECT_FMT + ", %4.4s ubwc %d\n", fb->base.id, DRM_RECT_FP_ARG(&state->src), + crtc->base.id, DRM_RECT_ARG(&state->dst), + (char *)&fmt->base.pixel_format, DPU_FORMAT_IS_UBWC(fmt)); - DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FMT "->crtc%u " DRM_RECT_FMT - ", %4.4s ubwc %d\n", fb->base.id, DRM_RECT_ARG(&src), - crtc->base.id, DRM_RECT_ARG(&dst), - (char *)&fmt->base.pixel_format, - DPU_FORMAT_IS_UBWC(fmt)); + pdpu->pipe_cfg.src_rect = state->src; + + /* state->src is 16.16, src_rect is not */ + pdpu->pipe_cfg.src_rect.x1 >>= 16; + pdpu->pipe_cfg.src_rect.x2 >>= 16; + pdpu->pipe_cfg.src_rect.y1 >>= 16; + pdpu->pipe_cfg.src_rect.y2 >>= 16; - pdpu->pipe_cfg.src_rect = src; - pdpu->pipe_cfg.dst_rect = dst; + pdpu->pipe_cfg.dst_rect = state->dst; _dpu_plane_setup_scaler(pdpu, pstate, fmt, false); diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c index 7d306c5acd09644ff62b7e7d62f6590390a90344..1ddf07514de6d9b9271ae4b8206630a5e445f63e 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c @@ -259,7 +259,6 @@ static void mdp5_plane_cleanup_fb(struct drm_plane *plane, msm_framebuffer_cleanup(fb, kms->aspace); } -#define FRAC_16_16(mult, div) (((mult) << 16) / (div)) static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state, struct drm_plane_state *state) { @@ -504,6 +503,8 @@ static int mdp5_plane_atomic_async_check(struct drm_plane *plane, static void mdp5_plane_atomic_async_update(struct drm_plane *plane, struct drm_plane_state *new_state) { + struct drm_framebuffer *old_fb = plane->state->fb; + plane->state->src_x = new_state->src_x; plane->state->src_y = new_state->src_y; plane->state->crtc_x = new_state->crtc_x; @@ -526,6 +527,8 @@ static void mdp5_plane_atomic_async_update(struct drm_plane *plane, *to_mdp5_plane_state(plane->state) = *to_mdp5_plane_state(new_state); + + new_state->fb = old_fb; } static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = { diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 96fb5f63531482fcf688cb4d69b56f9eec2d719c..cc4ea5502d6c3ac57a32728aea7857d823e630bf 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -429,15 +429,15 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host) } msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk); - if (!msm_host->byte_clk_src) { - ret = -ENODEV; + if (IS_ERR(msm_host->byte_clk_src)) { + ret = PTR_ERR(msm_host->byte_clk_src); pr_err("%s: can't find byte_clk clock. ret=%d\n", __func__, ret); goto exit; } msm_host->pixel_clk_src = clk_get_parent(msm_host->pixel_clk); - if (!msm_host->pixel_clk_src) { - ret = -ENODEV; + if (IS_ERR(msm_host->pixel_clk_src)) { + ret = PTR_ERR(msm_host->pixel_clk_src); pr_err("%s: can't find pixel_clk clock. ret=%d\n", __func__, ret); goto exit; } diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index 9a9fa0c75a131083f32c57f5cd89a0e02c7b87d1..fa0e33ca91ca12a1635ee96b0f2f4d3ad400859a 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -667,12 +667,14 @@ void __exit msm_dsi_phy_driver_unregister(void) int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, struct msm_dsi_phy_clk_request *clk_req) { - struct device *dev = &phy->pdev->dev; + struct device *dev; int ret; if (!phy || !phy->cfg->ops.enable) return -EINVAL; + dev = &phy->pdev->dev; + ret = dsi_phy_enable_resource(phy); if (ret) { dev_err(dev, "%s: resource enable failed, %d\n", diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c index 4c03f0b7343ed655c60111be4d09249bde463b28..31205625c7346220dab656e51a0730c16013c124 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c @@ -17,7 +17,7 @@ * | | * | | * +---------+ | +----------+ | +----+ - * dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0pllbyte + * dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk * +---------+ | +----------+ | +----+ * | | * | | dsi0_pll_by_2_bit_clk @@ -25,7 +25,7 @@ * | | +----+ | |\ dsi0_pclk_mux * | |--| /2 |--o--| \ | * | | +----+ | \ | +---------+ - * | --------------| |--o--| div_7_4 |-- dsi0pll + * | --------------| |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk * |------------------------------| / +---------+ * | +-----+ | / * -----------| /4? |--o----------|/ @@ -39,6 +39,8 @@ #define DSI_PIXEL_PLL_CLK 1 #define NUM_PROVIDED_CLKS 2 +#define VCO_REF_CLK_RATE 19200000 + struct dsi_pll_regs { u32 pll_prop_gain_rate; u32 pll_lockdet_rate; @@ -316,7 +318,7 @@ static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate, parent_rate); pll_10nm->vco_current_rate = rate; - pll_10nm->vco_ref_clk_rate = parent_rate; + pll_10nm->vco_ref_clk_rate = VCO_REF_CLK_RATE; dsi_pll_setup_config(pll_10nm); @@ -688,7 +690,7 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm) hws[num++] = hw; - snprintf(clk_name, 32, "dsi%dpllbyte", pll_10nm->id); + snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_10nm->id); snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id); /* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */ @@ -737,7 +739,7 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm) hws[num++] = hw; - snprintf(clk_name, 32, "dsi%dpll", pll_10nm->id); + snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_10nm->id); snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->id); /* PIX CLK DIV : DIV_CTRL_7_4*/ diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c index 7c72264101ff4239c79ecbfd44f3f97b82014bf3..2589b23c413185285dfb580b2c14bda9dc46fef8 100644 --- a/drivers/gpu/drm/msm/edp/edp_ctrl.c +++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c @@ -1090,7 +1090,7 @@ void msm_edp_ctrl_power(struct edp_ctrl *ctrl, bool on) int msm_edp_ctrl_init(struct msm_edp *edp) { struct edp_ctrl *ctrl = NULL; - struct device *dev = &edp->pdev->dev; + struct device *dev; int ret; if (!edp) { @@ -1098,6 +1098,7 @@ int msm_edp_ctrl_init(struct msm_edp *edp) return -EINVAL; } + dev = &edp->pdev->dev; ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL); if (!ctrl) return -ENOMEM; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index c79659ca570655da77888052fc47a16bc53cf409..33e083f71a170e5c2c05823be647df5a1081fd7d 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -332,6 +332,12 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi, goto fail; } + ret = msm_hdmi_hpd_enable(hdmi->connector); + if (ret < 0) { + DRM_DEV_ERROR(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret); + goto fail; + } + encoder->bridge = hdmi->bridge; priv->bridges[priv->num_bridges++] = hdmi->bridge; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h index accc9a61611d35bf9718d66fe09ce6c6e40a549f..5c5df6ab2a573421726a5ca69fbf619841c8303a 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.h @@ -245,6 +245,7 @@ void msm_hdmi_bridge_destroy(struct drm_bridge *bridge); void msm_hdmi_connector_irq(struct drm_connector *connector); struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi); +int msm_hdmi_hpd_enable(struct drm_connector *connector); /* * i2c adapter for ddc: diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c index e9c9a0af508e8c41bc12e91fc13d2f23b5041f33..30e908dfded7ed888267d2c7a4a8211764fc6a22 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c @@ -167,8 +167,9 @@ static void enable_hpd_clocks(struct hdmi *hdmi, bool enable) } } -static int hpd_enable(struct hdmi_connector *hdmi_connector) +int msm_hdmi_hpd_enable(struct drm_connector *connector) { + struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); struct hdmi *hdmi = hdmi_connector->hdmi; const struct hdmi_platform_config *config = hdmi->config; struct device *dev = &hdmi->pdev->dev; @@ -450,7 +451,6 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi) { struct drm_connector *connector = NULL; struct hdmi_connector *hdmi_connector; - int ret; hdmi_connector = kzalloc(sizeof(*hdmi_connector), GFP_KERNEL); if (!hdmi_connector) @@ -471,12 +471,6 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi) connector->interlace_allowed = 0; connector->doublescan_allowed = 0; - ret = hpd_enable(hdmi_connector); - if (ret) { - dev_err(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret); - return ERR_PTR(ret); - } - drm_connector_attach_encoder(connector, hdmi->encoder); return connector; diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index c1f1779c980f615ce8b339d0f34f351990d8ff5d..2b7bb6e166d3f8b6fae39fddad5ea33a779d0874 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -32,7 +32,12 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev, if (!new_crtc_state->active) continue; + if (drm_crtc_vblank_get(crtc)) + continue; + kms->funcs->wait_for_crtc_commit_done(kms, crtc); + + drm_crtc_vblank_put(crtc); } } diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c index f0da0d3c8a80f7cf9ab5082095aed6df3e3c9529..989465e5f9d91ac00824cb9cd60a38fa55a2dfd1 100644 --- a/drivers/gpu/drm/msm/msm_debugfs.c +++ b/drivers/gpu/drm/msm/msm_debugfs.c @@ -53,12 +53,8 @@ static int msm_gpu_release(struct inode *inode, struct file *file) struct msm_gpu_show_priv *show_priv = m->private; struct msm_drm_private *priv = show_priv->dev->dev_private; struct msm_gpu *gpu = priv->gpu; - int ret; - - ret = mutex_lock_interruptible(&show_priv->dev->struct_mutex); - if (ret) - return ret; + mutex_lock(&show_priv->dev->struct_mutex); gpu->funcs->gpu_state_put(show_priv->state); mutex_unlock(&show_priv->dev->struct_mutex); @@ -84,7 +80,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file) ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) - return ret; + goto free_priv; pm_runtime_get_sync(&gpu->pdev->dev); show_priv->state = gpu->funcs->gpu_state_get(gpu); @@ -94,13 +90,20 @@ static int msm_gpu_open(struct inode *inode, struct file *file) if (IS_ERR(show_priv->state)) { ret = PTR_ERR(show_priv->state); - kfree(show_priv); - return ret; + goto free_priv; } show_priv->dev = dev; - return single_open(file, msm_gpu_show, show_priv); + ret = single_open(file, msm_gpu_show, show_priv); + if (ret) + goto free_priv; + + return 0; + +free_priv: + kfree(show_priv); + return ret; } static const struct file_operations msm_gpu_fops = { diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index c1abad8a8612683a237dfae4fec766c806c9caf8..dbfd2c006f7406ed31614dc3de1b412c8ee37b09 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -1284,7 +1284,8 @@ static int add_gpu_components(struct device *dev, if (!np) return 0; - drm_of_component_match_add(dev, matchptr, compare_of, np); + if (of_device_is_available(np)) + drm_of_component_match_add(dev, matchptr, compare_of, np); of_node_put(np); @@ -1321,16 +1322,24 @@ static int msm_pdev_probe(struct platform_device *pdev) ret = add_gpu_components(&pdev->dev, &match); if (ret) - return ret; + goto fail; /* on all devices that I am aware of, iommu's which can map * any address the cpu can see are used: */ ret = dma_set_mask_and_coherent(&pdev->dev, ~0); if (ret) - return ret; + goto fail; + + ret = component_master_add_with_match(&pdev->dev, &msm_drm_ops, match); + if (ret) + goto fail; - return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match); + return 0; + +fail: + of_platform_depopulate(&pdev->dev); + return ret; } static int msm_pdev_remove(struct platform_device *pdev) diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 8e510d5c758a59b5922ba8357f68ce33ca404eae..9d11f321f5a9286c5b046c8fc46f04622ed22d2c 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -62,6 +62,8 @@ struct msm_gem_vma; #define MAX_BRIDGES 8 #define MAX_CONNECTORS 8 +#define FRAC_16_16(mult, div) (((mult) << 16) / (div)) + struct msm_file_private { rwlock_t queuelock; struct list_head submitqueues; diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 7bd83e0afa971e13de295e7595ac1abd0902c2a9..18fee169b236a1b05c9b7d92e9411a0f14f3fc34 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -77,7 +77,7 @@ void msm_gem_submit_free(struct msm_gem_submit *submit) static inline unsigned long __must_check copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) { - if (access_ok(VERIFY_READ, from, n)) + if (access_ok(from, n)) return __copy_from_user_inatomic(to, from, n); return -EFAULT; } @@ -410,7 +410,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, struct msm_file_private *ctx = file->driver_priv; struct msm_gem_submit *submit; struct msm_gpu *gpu = priv->gpu; - struct dma_fence *in_fence = NULL; struct sync_file *sync_file = NULL; struct msm_gpu_submitqueue *queue; struct msm_ringbuffer *ring; @@ -443,6 +442,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, ring = gpu->rb[queue->prio]; if (args->flags & MSM_SUBMIT_FENCE_FD_IN) { + struct dma_fence *in_fence; + in_fence = sync_file_get_fence(args->fence_fd); if (!in_fence) @@ -452,11 +453,13 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, * Wait if the fence is from a foreign context, or if the fence * array contains any fence from a foreign context. */ - if (!dma_fence_match_context(in_fence, ring->fctx->context)) { + ret = 0; + if (!dma_fence_match_context(in_fence, ring->fctx->context)) ret = dma_fence_wait(in_fence, true); - if (ret) - return ret; - } + + dma_fence_put(in_fence); + if (ret) + return ret; } ret = mutex_lock_interruptible(&dev->struct_mutex); @@ -582,8 +585,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, } out: - if (in_fence) - dma_fence_put(in_fence); submit_cleanup(submit); if (ret) msm_gem_submit_free(submit); diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 5e808cfec345f55e27d31cede99089d6a65d4017..5e6c78ea3a3f67d46d53da25749dc423e71ea57b 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -25,6 +25,7 @@ #include #include #include +#include /* * Power Management: @@ -367,8 +368,8 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, msm_gpu_devcoredump_read, msm_gpu_devcoredump_free); } #else -static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, char *comm, - char *cmd) +static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, + struct msm_gem_submit *submit, char *comm, char *cmd) { } #endif @@ -425,10 +426,9 @@ static void recover_worker(struct work_struct *work) if (submit) { struct task_struct *task; - rcu_read_lock(); - task = pid_task(submit->pid, PIDTYPE_PID); + task = get_pid_task(submit->pid, PIDTYPE_PID); if (task) { - comm = kstrdup(task->comm, GFP_ATOMIC); + comm = kstrdup(task->comm, GFP_KERNEL); /* * So slightly annoying, in other paths like @@ -441,10 +441,10 @@ static void recover_worker(struct work_struct *work) * about the submit going away. */ mutex_unlock(&dev->struct_mutex); - cmd = kstrdup_quotable_cmdline(task, GFP_ATOMIC); + cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL); + put_task_struct(task); mutex_lock(&dev->struct_mutex); } - rcu_read_unlock(); if (comm && cmd) { dev_err(dev->dev, "%s: offending task: %s (%s)\n", diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 9122ee6e55e4c30907de8fdd533a29682e8800ba..1fe93920fb2574cc3261dcf159995d0a7c3dcb33 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -63,7 +63,7 @@ struct msm_gpu_funcs { struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu); void (*recover)(struct msm_gpu *gpu); void (*destroy)(struct msm_gpu *gpu); -#ifdef CONFIG_DEBUG_FS +#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) /* show GPU status in debugfs: */ void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state, struct drm_printer *p); diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index b23d33622f374b0ce88791914b53cb126899676b..2a90aa4caec081b2349ce115d77f4225d22ab3a4 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -66,7 +66,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova, // pm_runtime_get_sync(mmu->dev); ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot); // pm_runtime_put_sync(mmu->dev); - WARN_ON(ret < 0); + WARN_ON(!ret); return (ret == len) ? 0 : -EINVAL; } diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index 3aa8a8576abea8323a4c30534837a3defcac9b17..d4cc5ceb22d01bb4627c27f9a2c63104ea5765ff 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c @@ -115,7 +115,9 @@ static void rd_write(struct msm_rd_state *rd, const void *buf, int sz) char *fptr = &fifo->buf[fifo->head]; int n; - wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0); + wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0 || !rd->open); + if (!rd->open) + return; /* Note that smp_load_acquire() is not strictly required * as CIRC_SPACE_TO_END() does not access the tail more @@ -213,7 +215,10 @@ static int rd_open(struct inode *inode, struct file *file) static int rd_release(struct inode *inode, struct file *file) { struct msm_rd_state *rd = inode->i_private; + rd->open = false; + wake_up_all(&rd->fifo_event); + return 0; } @@ -316,10 +321,11 @@ static void snapshot_buf(struct msm_rd_state *rd, uint64_t iova, uint32_t size) { struct msm_gem_object *obj = submit->bos[idx].obj; + unsigned offset = 0; const char *buf; if (iova) { - buf += iova - submit->bos[idx].iova; + offset = iova - submit->bos[idx].iova; } else { iova = submit->bos[idx].iova; size = obj->base.size; @@ -340,6 +346,8 @@ static void snapshot_buf(struct msm_rd_state *rd, if (IS_ERR(buf)) return; + buf += offset; + rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size); msm_gem_put_vaddr(&obj->base); diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig index 4b75ad40dd80e562a6692e90393de581455ef501..00d9d77f583a7bb52c68779b1a4a4f508938ebd1 100644 --- a/drivers/gpu/drm/nouveau/Kconfig +++ b/drivers/gpu/drm/nouveau/Kconfig @@ -16,10 +16,21 @@ config DRM_NOUVEAU select INPUT if ACPI && X86 select THERMAL if ACPI && X86 select ACPI_VIDEO if ACPI && X86 - select DRM_VM help Choose this option for open-source NVIDIA support. +config NOUVEAU_LEGACY_CTX_SUPPORT + bool "Nouveau legacy context support" + depends on DRM_NOUVEAU + select DRM_VM + default y + help + There was a version of the nouveau DDX that relied on legacy + ctx ioctls not erroring out. But that was back in time a long + ways, so offer a way to disable it now. For uapi compat with + old nouveau ddx this should be on by default, but modern distros + should consider turning it off. + config NOUVEAU_PLATFORM_DRIVER bool "Nouveau (NVIDIA) SoC GPUs" depends on DRM_NOUVEAU && ARCH_TEGRA diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c index 6a4ca139cf5d71efb67427dd2f79eb886482d817..5f16fddca81ee02e19b2a1d03df995b675c074a5 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c +++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c @@ -208,6 +208,8 @@ static int nv17_tv_get_ld_modes(struct drm_encoder *encoder, struct drm_display_mode *mode; mode = drm_mode_duplicate(encoder->dev, tv_mode); + if (!mode) + continue; mode->clock = tv_norm->tv_enc_mode.vrefresh * mode->htotal / 1000 * @@ -257,6 +259,8 @@ static int nv17_tv_get_hd_modes(struct drm_encoder *encoder, if (modes[i].hdisplay == output_mode->hdisplay && modes[i].vdisplay == output_mode->vdisplay) { mode = drm_mode_duplicate(encoder->dev, output_mode); + if (!mode) + continue; mode->type |= DRM_MODE_TYPE_PREFERRED; } else { @@ -264,6 +268,8 @@ static int nv17_tv_get_hd_modes(struct drm_encoder *encoder, modes[i].vdisplay, 60, false, (output_mode->flags & DRM_MODE_FLAG_INTERLACE), false); + if (!mode) + continue; } /* CVT modes are sometimes unsuitable... */ @@ -750,7 +756,9 @@ static int nv17_tv_set_property(struct drm_encoder *encoder, /* Disable the crtc to ensure a full modeset is * performed whenever it's turned on again. */ if (crtc) - drm_crtc_force_disable(crtc); + drm_crtc_helper_set_mode(crtc, &crtc->mode, + crtc->x, crtc->y, + crtc->primary->fb); } return 0; diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 041e7daf8a337f8204107ff02582d8fcfa499b83..10107e551fac35d5c18a1e92402df77c05174acc 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -197,6 +197,22 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp, /****************************************************************************** * EVO channel helpers *****************************************************************************/ +static void +evo_flush(struct nv50_dmac *dmac) +{ + /* Push buffer fetches are not coherent with BAR1, we need to ensure + * writes have been flushed right through to VRAM before writing PUT. + */ + if (dmac->push.type & NVIF_MEM_VRAM) { + struct nvif_device *device = dmac->base.device; + nvif_wr32(&device->object, 0x070000, 0x00000001); + nvif_msec(device, 2000, + if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002)) + break; + ); + } +} + u32 * evo_wait(struct nv50_dmac *evoc, int nr) { @@ -207,6 +223,7 @@ evo_wait(struct nv50_dmac *evoc, int nr) mutex_lock(&dmac->lock); if (put + nr >= (PAGE_SIZE / 4) - 8) { dmac->ptr[put] = 0x20000000; + evo_flush(dmac); nvif_wr32(&dmac->base.user, 0x0000, 0x00000000); if (nvif_msec(device, 2000, @@ -229,17 +246,7 @@ evo_kick(u32 *push, struct nv50_dmac *evoc) { struct nv50_dmac *dmac = evoc; - /* Push buffer fetches are not coherent with BAR1, we need to ensure - * writes have been flushed right through to VRAM before writing PUT. - */ - if (dmac->push.type & NVIF_MEM_VRAM) { - struct nvif_device *device = dmac->base.device; - nvif_wr32(&device->object, 0x070000, 0x00000001); - nvif_msec(device, 2000, - if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002)) - break; - ); - } + evo_flush(dmac); nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2); mutex_unlock(&dmac->lock); @@ -752,7 +759,8 @@ nv50_msto_enable(struct drm_encoder *encoder) slots = drm_dp_find_vcpi_slots(&mstm->mgr, mstc->pbn); r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, mstc->pbn, slots); - WARN_ON(!r); + if (!r) + DRM_DEBUG_KMS("Failed to allocate VCPI\n"); if (!mstm->links++) nv50_outp_acquire(mstm->outp); @@ -843,22 +851,16 @@ nv50_mstc_atomic_best_encoder(struct drm_connector *connector, { struct nv50_head *head = nv50_head(connector_state->crtc); struct nv50_mstc *mstc = nv50_mstc(connector); - if (mstc->port) { - struct nv50_mstm *mstm = mstc->mstm; - return &mstm->msto[head->base.index]->encoder; - } - return NULL; + + return &mstc->mstm->msto[head->base.index]->encoder; } static struct drm_encoder * nv50_mstc_best_encoder(struct drm_connector *connector) { struct nv50_mstc *mstc = nv50_mstc(connector); - if (mstc->port) { - struct nv50_mstm *mstm = mstc->mstm; - return &mstm->msto[0]->encoder; - } - return NULL; + + return &mstc->mstm->msto[0]->encoder; } static enum drm_mode_status @@ -1223,8 +1225,16 @@ nv50_mstm_fini(struct nv50_mstm *mstm) static void nv50_mstm_init(struct nv50_mstm *mstm) { - if (mstm && mstm->mgr.mst_state) - drm_dp_mst_topology_mgr_resume(&mstm->mgr); + int ret; + + if (!mstm || !mstm->mgr.mst_state) + return; + + ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr); + if (ret == -1) { + drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false); + drm_kms_helper_hotplug_event(mstm->mgr.dev); + } } static void @@ -1232,6 +1242,7 @@ nv50_mstm_del(struct nv50_mstm **pmstm) { struct nv50_mstm *mstm = *pmstm; if (mstm) { + drm_dp_mst_topology_mgr_destroy(&mstm->mgr); kfree(*pmstm); *pmstm = NULL; } @@ -1506,7 +1517,8 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe) nv_encoder->aux = aux; } - if ((data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len)) && + if (nv_connector->type != DCB_CONNECTOR_eDP && + (data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len)) && ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04)) { ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16, nv_connector->base.base.id, diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h index e48c5eb35b49883d015d3688a58888d9fa0b80c5..66c125a6b0b3c34221bf8a36559fbca5290addd4 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.h +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h @@ -41,6 +41,7 @@ struct nv50_disp_interlock { NV50_DISP_INTERLOCK__SIZE } type; u32 data; + u32 wimm; }; void corec37d_ntfy_init(struct nouveau_bo *, u32); diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c index 4f57e53797968e845175bb6b0689aa73e9d2bc29..b041ffb3af27049c8a4df327b9d51657eefe22e1 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head.c @@ -169,14 +169,34 @@ nv50_head_atomic_check_view(struct nv50_head_atom *armh, */ switch (mode) { case DRM_MODE_SCALE_CENTER: - asyh->view.oW = min((u16)umode->hdisplay, asyh->view.oW); - asyh->view.oH = min((u16)umode_vdisplay, asyh->view.oH); - /* fall-through */ + /* NOTE: This will cause scaling when the input is + * larger than the output. + */ + asyh->view.oW = min(asyh->view.iW, asyh->view.oW); + asyh->view.oH = min(asyh->view.iH, asyh->view.oH); + break; case DRM_MODE_SCALE_ASPECT: - if (asyh->view.oH < asyh->view.oW) { + /* Determine whether the scaling should be on width or on + * height. This is done by comparing the aspect ratios of the + * sizes. If the output AR is larger than input AR, that means + * we want to change the width (letterboxed on the + * left/right), otherwise on the height (letterboxed on the + * top/bottom). + * + * E.g. 4:3 (1.333) AR image displayed on a 16:10 (1.6) AR + * screen will have letterboxes on the left/right. However a + * 16:9 (1.777) AR image on that same screen will have + * letterboxes on the top/bottom. + * + * inputAR = iW / iH; outputAR = oW / oH + * outputAR > inputAR is equivalent to oW * iH > iW * oH + */ + if (asyh->view.oW * asyh->view.iH > asyh->view.iW * asyh->view.oH) { + /* Recompute output width, i.e. left/right letterbox */ u32 r = (asyh->view.iW << 19) / asyh->view.iH; asyh->view.oW = ((asyh->view.oH * r) + (r / 2)) >> 19; } else { + /* Recompute output height, i.e. top/bottom letterbox */ u32 r = (asyh->view.iH << 19) / asyh->view.iW; asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19; } @@ -306,7 +326,7 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state) asyh->set.or = head->func->or != NULL; } - if (asyh->state.mode_changed) + if (asyh->state.mode_changed || asyh->state.connectors_changed) nv50_head_atomic_check_mode(head, asyh); if (asyh->state.color_mgmt_changed || diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c index 9103b8494279c6273c85867d6e183e73f507ed06..f7dbd965e4e729ccd98b4e7ed153bba6a327f893 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c @@ -75,6 +75,7 @@ wimmc37b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm, return ret; } + wndw->interlock.wimm = wndw->interlock.data; wndw->immd = func; return 0; } diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c index 2187922e8dc28d4d11df28bc33aec3f91281791c..b3db4553098d5736aeb09b27e5eda55d5e0bda61 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c @@ -151,7 +151,7 @@ nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock, if (asyw->set.point) { if (asyw->set.point = false, asyw->set.mask) interlock[wndw->interlock.type] |= wndw->interlock.data; - interlock[NV50_DISP_INTERLOCK_WIMM] |= wndw->interlock.data; + interlock[NV50_DISP_INTERLOCK_WIMM] |= wndw->interlock.wimm; wndw->immd->point(wndw, asyw); wndw->immd->update(wndw, interlock); diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h index 757fac823a10fa9f0de9c4962829bab818044615..7e0bee3e81672c3e1dd44ad5d6a64a7299d1e22e 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h @@ -12,6 +12,7 @@ struct nvkm_client { struct nvkm_client_notify *notify[32]; struct rb_root objroot; + spinlock_t obj_lock; bool super; void *data; diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h index eef54e9b5d77d29fd6e745543b60ea1b4624406b..7957eafa5f0edd212ab9b976f99d04eb44e8de92 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h @@ -38,6 +38,7 @@ struct nvkm_i2c_bus { struct mutex mutex; struct list_head head; struct i2c_adapter i2c; + u8 enabled; }; int nvkm_i2c_bus_acquire(struct nvkm_i2c_bus *); @@ -57,6 +58,7 @@ struct nvkm_i2c_aux { struct mutex mutex; struct list_head head; struct i2c_adapter i2c; + u8 enabled; u32 intr; }; diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h index 8a0f85f5fc1a86b4e47b4e06411c80a89c891bc1..6a765682fbfa2a61d252fcd880978745b922322c 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h @@ -38,6 +38,7 @@ int nvkm_volt_set_id(struct nvkm_volt *, u8 id, u8 min_id, u8 temp, int nv40_volt_new(struct nvkm_device *, int, struct nvkm_volt **); int gf100_volt_new(struct nvkm_device *, int, struct nvkm_volt **); +int gf117_volt_new(struct nvkm_device *, int, struct nvkm_volt **); int gk104_volt_new(struct nvkm_device *, int, struct nvkm_volt **); int gk20a_volt_new(struct nvkm_device *, int, struct nvkm_volt **); int gm20b_volt_new(struct nvkm_device *, int, struct nvkm_volt **); diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index 408b955e5c39a6b41043c18fb37ae8dc9de42c04..6dd72bc32897a6099557ad4a6bf683f1c03d9868 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c @@ -116,7 +116,7 @@ nv40_backlight_init(struct drm_connector *connector) &nv40_bl_ops, &props); if (IS_ERR(bd)) { - if (bl_connector.id > 0) + if (bl_connector.id >= 0) ida_simple_remove(&bl_ida, bl_connector.id); return PTR_ERR(bd); } @@ -249,7 +249,7 @@ nv50_backlight_init(struct drm_connector *connector) nv_encoder, ops, &props); if (IS_ERR(bd)) { - if (bl_connector.id > 0) + if (bl_connector.id >= 0) ida_simple_remove(&bl_ida, bl_connector.id); return PTR_ERR(bd); } diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 66bf2aff4a3ed17b46e7d9cbb3dab34f6f66745b..adecda51a306d1c5dcbdf0d11b5ace6d19d709ec 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -25,6 +25,7 @@ #include #include "nouveau_drv.h" +#include "nouveau_bios.h" #include "nouveau_reg.h" #include "dispnv04/hw.h" #include "nouveau_encoder.h" @@ -1674,7 +1675,7 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf) */ if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) { if (*conn == 0xf2005014 && *conf == 0xffffffff) { - fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, 1); + fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, DCB_OUTPUT_B); return false; } } @@ -1760,26 +1761,26 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios) #ifdef __powerpc__ /* Apple iMac G4 NV17 */ if (of_machine_is_compatible("PowerMac4,5")) { - fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, 1); - fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, 2); + fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, DCB_OUTPUT_B); + fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, DCB_OUTPUT_C); return; } #endif /* Make up some sane defaults */ fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, - bios->legacy.i2c_indices.crt, 1, 1); + bios->legacy.i2c_indices.crt, 1, DCB_OUTPUT_B); if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0) fabricate_dcb_output(dcb, DCB_OUTPUT_TV, bios->legacy.i2c_indices.tv, - all_heads, 0); + all_heads, DCB_OUTPUT_A); else if (bios->tmds.output0_script_ptr || bios->tmds.output1_script_ptr) fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, bios->legacy.i2c_indices.panel, - all_heads, 1); + all_heads, DCB_OUTPUT_B); } static int diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 247f72cc4d10a4547309effb6b8904e9c0d60d49..fb0094fc55834aa7ba69090a517d38ad2b332001 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -251,7 +251,7 @@ nouveau_conn_reset(struct drm_connector *connector) return; if (connector->state) - __drm_atomic_helper_connector_destroy_state(connector->state); + nouveau_conn_atomic_destroy_state(connector, connector->state); __drm_atomic_helper_connector_reset(connector, &asyc->state); asyc->dither.mode = DITHERING_MODE_AUTO; asyc->dither.depth = DITHERING_DEPTH_AUTO; diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h index dc7454e7f19aa0ec9f22e279015a0966eedbd531..b46e99f7641ed6c48f60bb0d583a729816170d0d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.h +++ b/drivers/gpu/drm/nouveau/nouveau_connector.h @@ -29,6 +29,7 @@ #include +#include #include #include #include @@ -37,6 +38,60 @@ struct nvkm_i2c_port; +#define nouveau_conn_atom(p) \ + container_of((p), struct nouveau_conn_atom, state) + +struct nouveau_conn_atom { + struct drm_connector_state state; + + struct { + /* The enum values specifically defined here match nv50/gf119 + * hw values, and the code relies on this. + */ + enum { + DITHERING_MODE_OFF = 0x00, + DITHERING_MODE_ON = 0x01, + DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON, + DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON, + DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON, + DITHERING_MODE_AUTO + } mode; + enum { + DITHERING_DEPTH_6BPC = 0x00, + DITHERING_DEPTH_8BPC = 0x02, + DITHERING_DEPTH_AUTO + } depth; + } dither; + + struct { + int mode; /* DRM_MODE_SCALE_* */ + struct { + enum { + UNDERSCAN_OFF, + UNDERSCAN_ON, + UNDERSCAN_AUTO, + } mode; + u32 hborder; + u32 vborder; + } underscan; + bool full; + } scaler; + + struct { + int color_vibrance; + int vibrant_hue; + } procamp; + + union { + struct { + bool dither:1; + bool scaler:1; + bool procamp:1; + }; + u8 mask; + } set; +}; + struct nouveau_connector { struct drm_connector base; enum dcb_connector_type type; @@ -111,61 +166,6 @@ extern int nouveau_ignorelid; extern int nouveau_duallink; extern int nouveau_hdmimhz; -#include -#define nouveau_conn_atom(p) \ - container_of((p), struct nouveau_conn_atom, state) - -struct nouveau_conn_atom { - struct drm_connector_state state; - - struct { - /* The enum values specifically defined here match nv50/gf119 - * hw values, and the code relies on this. - */ - enum { - DITHERING_MODE_OFF = 0x00, - DITHERING_MODE_ON = 0x01, - DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON, - DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON, - DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON, - DITHERING_MODE_AUTO - } mode; - enum { - DITHERING_DEPTH_6BPC = 0x00, - DITHERING_DEPTH_8BPC = 0x02, - DITHERING_DEPTH_AUTO - } depth; - } dither; - - struct { - int mode; /* DRM_MODE_SCALE_* */ - struct { - enum { - UNDERSCAN_OFF, - UNDERSCAN_ON, - UNDERSCAN_AUTO, - } mode; - u32 hborder; - u32 vborder; - } underscan; - bool full; - } scaler; - - struct { - int color_vibrance; - int vibrant_hue; - } procamp; - - union { - struct { - bool dither:1; - bool scaler:1; - bool procamp:1; - }; - u8 mask; - } set; -}; - void nouveau_conn_attach_properties(struct drm_connector *); void nouveau_conn_reset(struct drm_connector *); struct drm_connector_state * diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c index 9109b69cd052958bbc126b4bad4f490720e11f4a..41f9c53302abbfcb25e387d2cf0027583b65c928 100644 --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c @@ -161,7 +161,7 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf, } ret = pm_runtime_get_sync(drm->dev); - if (IS_ERR_VALUE(ret) && ret != -EACCES) + if (ret < 0 && ret != -EACCES) return ret; ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args)); pm_runtime_put_autosuspend(drm->dev); @@ -182,6 +182,7 @@ static const struct file_operations nouveau_pstate_fops = { .open = nouveau_debugfs_pstate_open, .read = seq_read, .write = nouveau_debugfs_pstate_set, + .release = single_release, }; static struct drm_info_list nouveau_debugfs_list[] = { diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 74d2283f2c28e7bb06f97856f93b610d12522a04..2b7a54cc3c9ef419677cd6284b215ca9f759732d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -1015,8 +1015,11 @@ nouveau_driver_fops = { static struct drm_driver driver_stub = { .driver_features = - DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER | - DRIVER_KMS_LEGACY_CONTEXT, + DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER +#if defined(CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT) + | DRIVER_KMS_LEGACY_CONTEXT +#endif + , .load = nouveau_drm_load, .unload = nouveau_drm_unload, diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 8ebdc74cc0ad71b66347206134eb1972ac380ee3..326948b655428dd0ff56c986f7c34d3fa57e1049 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -96,12 +96,9 @@ nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags) else nvbe->ttm.ttm.func = &nv50_sgdma_backend; - if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags)) - /* - * A failing ttm_dma_tt_init() will call ttm_tt_destroy() - * and thus our nouveau_sgdma_destroy() hook, so we don't need - * to free nvbe here. - */ + if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags)) { + kfree(nvbe); return NULL; + } return &nvbe->ttm.ttm; } diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 8edb9f2a426945be9bf88ff167cc9ccebfa2171b..e4b977cc8452895f6155212869b4c129640092bb 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -169,7 +169,11 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma) struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev); if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) +#if defined(CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT) return drm_legacy_mmap(filp, vma); +#else + return -EINVAL; +#endif return ttm_bo_mmap(filp, vma, &drm->ttm.bdev); } diff --git a/drivers/gpu/drm/nouveau/nvkm/core/client.c b/drivers/gpu/drm/nouveau/nvkm/core/client.c index ac671202919e885fd8b19957f63c6f6217786b2e..5d745d22b5c31aeffcacb471d73b0644e0ee05e5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/client.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/client.c @@ -300,6 +300,7 @@ nvkm_client_new(const char *name, u64 device, const char *cfg, client->device = device; client->debug = nvkm_dbgopt(dbg, "CLIENT"); client->objroot = RB_ROOT; + spin_lock_init(&client->obj_lock); client->ntfy = ntfy; INIT_LIST_HEAD(&client->umem); spin_lock_init(&client->lock); diff --git a/drivers/gpu/drm/nouveau/nvkm/core/object.c b/drivers/gpu/drm/nouveau/nvkm/core/object.c index 301a5e5b5f7f9ca94b2a1817f05107cc9cdc490c..4c038d5715b921d31ad56e9642130910f99c92bd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/object.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/object.c @@ -30,8 +30,10 @@ nvkm_object_search(struct nvkm_client *client, u64 handle, const struct nvkm_object_func *func) { struct nvkm_object *object; + unsigned long flags; if (handle) { + spin_lock_irqsave(&client->obj_lock, flags); struct rb_node *node = client->objroot.rb_node; while (node) { object = rb_entry(node, typeof(*object), node); @@ -40,9 +42,12 @@ nvkm_object_search(struct nvkm_client *client, u64 handle, else if (handle > object->object) node = node->rb_right; - else + else { + spin_unlock_irqrestore(&client->obj_lock, flags); goto done; + } } + spin_unlock_irqrestore(&client->obj_lock, flags); return ERR_PTR(-ENOENT); } else { object = &client->object; @@ -57,30 +62,39 @@ nvkm_object_search(struct nvkm_client *client, u64 handle, void nvkm_object_remove(struct nvkm_object *object) { + unsigned long flags; + + spin_lock_irqsave(&object->client->obj_lock, flags); if (!RB_EMPTY_NODE(&object->node)) rb_erase(&object->node, &object->client->objroot); + spin_unlock_irqrestore(&object->client->obj_lock, flags); } bool nvkm_object_insert(struct nvkm_object *object) { - struct rb_node **ptr = &object->client->objroot.rb_node; + struct rb_node **ptr; struct rb_node *parent = NULL; + unsigned long flags; + spin_lock_irqsave(&object->client->obj_lock, flags); + ptr = &object->client->objroot.rb_node; while (*ptr) { struct nvkm_object *this = rb_entry(*ptr, typeof(*this), node); parent = *ptr; - if (object->object < this->object) + if (object->object < this->object) { ptr = &parent->rb_left; - else - if (object->object > this->object) + } else if (object->object > this->object) { ptr = &parent->rb_right; - else + } else { + spin_unlock_irqrestore(&object->client->obj_lock, flags); return false; + } } rb_link_node(&object->node, parent, ptr); rb_insert_color(&object->node, &object->client->objroot); + spin_unlock_irqrestore(&object->client->obj_lock, flags); return true; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index e294013426ced84844d6950facd1737f63f3205c..347a6a4cb3397bea1044ad7a9d730db98b5d39d0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -1613,7 +1613,7 @@ nvd7_chipset = { .pci = gf106_pci_new, .therm = gf119_therm_new, .timer = nv41_timer_new, - .volt = gf100_volt_new, + .volt = gf117_volt_new, .ce[0] = gf100_ce_new, .disp = gf119_disp_new, .dma = gf119_dma_new, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c index 5f301e632599b471c54291797b59c0d51e18be9a..818d21bd28d3122898f11d88c87bc0fec348c5ee 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c @@ -365,8 +365,15 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps) * and it's better to have a failed modeset than that. */ for (cfg = nvkm_dp_rates; cfg->rate; cfg++) { - if (cfg->nr <= outp_nr && cfg->nr <= outp_bw) - failsafe = cfg; + if (cfg->nr <= outp_nr && cfg->nr <= outp_bw) { + /* Try to respect sink limits too when selecting + * lowest link configuration. + */ + if (!failsafe || + (cfg->nr <= sink_nr && cfg->bw <= sink_bw)) + failsafe = cfg; + } + if (failsafe && cfg[1].rate < dataKBps) break; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c index 816ccaedfc7326581befc7fb7f2b39607325c903..8675613e142b64a54ac6a2acf8dd0afb934dc7e5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c @@ -22,6 +22,7 @@ #include #include +#include #include #include @@ -107,8 +108,10 @@ nvkm_falcon_fini(struct nvkm_engine *engine, bool suspend) } } - nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000); - nvkm_wr32(device, base + 0x014, 0xffffffff); + if (nvkm_mc_enabled(device, engine->subdev.index)) { + nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000); + nvkm_wr32(device, base + 0x014, 0xffffffff); + } return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c index 157b076a1272300f648cd0502eafee675f69f270..38c9c086754b689698950e44b801b38ed7fc671d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c @@ -109,7 +109,7 @@ nv50_bar_oneinit(struct nvkm_bar *base) struct nvkm_device *device = bar->base.subdev.device; static struct lock_class_key bar1_lock; static struct lock_class_key bar2_lock; - u64 start, limit; + u64 start, limit, size; int ret; ret = nvkm_gpuobj_new(device, 0x20000, 0, false, NULL, &bar->mem); @@ -127,7 +127,10 @@ nv50_bar_oneinit(struct nvkm_bar *base) /* BAR2 */ start = 0x0100000000ULL; - limit = start + device->func->resource_size(device, 3); + size = device->func->resource_size(device, 3); + if (!size) + return -ENOMEM; + limit = start + size; ret = nvkm_vmm_new(device, start, limit-- - start, NULL, 0, &bar2_lock, "bar2", &bar->bar2_vmm); @@ -164,7 +167,10 @@ nv50_bar_oneinit(struct nvkm_bar *base) /* BAR1 */ start = 0x0000000000ULL; - limit = start + device->func->resource_size(device, 1); + size = device->func->resource_size(device, 1); + if (!size) + return -ENOMEM; + limit = start + size; ret = nvkm_vmm_new(device, start, limit-- - start, NULL, 0, &bar1_lock, "bar1", &bar->bar1_vmm); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c index f3c30b2a788e8534fe72c23729779afd5e39d573..8bff14ae16b0e5bd76ef0f7541040dda3cb80e70 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c @@ -38,7 +38,7 @@ nvbios_addr(struct nvkm_bios *bios, u32 *addr, u8 size) *addr += bios->imaged_addr; } - if (unlikely(*addr + size >= bios->size)) { + if (unlikely(*addr + size > bios->size)) { nvkm_error(&bios->subdev, "OOB %d %08x %08x\n", size, p, *addr); return false; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c index 7143ea4611aa3ef7d809d1d1b90148a97b1c7c67..33a9fb5ac558577fe7879c2b760d5b5d1beef3aa 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c @@ -96,6 +96,8 @@ nvbios_volt_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, info->min = min(info->base, info->base + info->step * info->vidmask); info->max = nvbios_rd32(bios, volt + 0x0e); + if (!info->max) + info->max = max(info->base, info->base + info->step * info->vidmask); break; case 0x50: info->min = nvbios_rd32(bios, volt + 0x0a); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c index 4c1f547da463afff3dee772446f92f13b2f4043e..a11637b0f6ccf43cc39c417dc64a8fb349815a22 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c @@ -40,8 +40,7 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) u8 *ptr = msg->buf; while (remaining) { - u8 cnt = (remaining > 16) ? 16 : remaining; - u8 cmd; + u8 cnt, retries, cmd; if (msg->flags & I2C_M_RD) cmd = 1; @@ -51,10 +50,19 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) if (mcnt || remaining > 16) cmd |= 4; /* MOT */ - ret = aux->func->xfer(aux, true, cmd, msg->addr, ptr, &cnt); - if (ret < 0) { - nvkm_i2c_aux_release(aux); - return ret; + for (retries = 0, cnt = 0; + retries < 32 && !cnt; + retries++) { + cnt = min_t(u8, remaining, 16); + ret = aux->func->xfer(aux, true, cmd, + msg->addr, ptr, &cnt); + if (ret < 0) + goto out; + } + if (!cnt) { + AUX_TRACE(aux, "no data after 32 retries"); + ret = -EIO; + goto out; } ptr += cnt; @@ -64,8 +72,10 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) msg++; } + ret = num; +out: nvkm_i2c_aux_release(aux); - return num; + return ret; } static u32 @@ -105,9 +115,15 @@ nvkm_i2c_aux_acquire(struct nvkm_i2c_aux *aux) { struct nvkm_i2c_pad *pad = aux->pad; int ret; + AUX_TRACE(aux, "acquire"); mutex_lock(&aux->mutex); - ret = nvkm_i2c_pad_acquire(pad, NVKM_I2C_PAD_AUX); + + if (aux->enabled) + ret = nvkm_i2c_pad_acquire(pad, NVKM_I2C_PAD_AUX); + else + ret = -EIO; + if (ret) mutex_unlock(&aux->mutex); return ret; @@ -145,6 +161,24 @@ nvkm_i2c_aux_del(struct nvkm_i2c_aux **paux) } } +void +nvkm_i2c_aux_init(struct nvkm_i2c_aux *aux) +{ + AUX_TRACE(aux, "init"); + mutex_lock(&aux->mutex); + aux->enabled = true; + mutex_unlock(&aux->mutex); +} + +void +nvkm_i2c_aux_fini(struct nvkm_i2c_aux *aux) +{ + AUX_TRACE(aux, "fini"); + mutex_lock(&aux->mutex); + aux->enabled = false; + mutex_unlock(&aux->mutex); +} + int nvkm_i2c_aux_ctor(const struct nvkm_i2c_aux_func *func, struct nvkm_i2c_pad *pad, int id, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h index 7d56c4ba693cf6b69d7441f52faebb940a0b364d..08f6b2ee64abf01c30f8fc7d3a547af4c5291539 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h @@ -16,6 +16,8 @@ int nvkm_i2c_aux_ctor(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *, int nvkm_i2c_aux_new_(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *, int id, struct nvkm_i2c_aux **); void nvkm_i2c_aux_del(struct nvkm_i2c_aux **); +void nvkm_i2c_aux_init(struct nvkm_i2c_aux *); +void nvkm_i2c_aux_fini(struct nvkm_i2c_aux *); int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type, u32 addr, u8 *data, u8 *size); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c index 4f197b15acf6138c415185de24cbb3eb04887295..719345074711144d13b85b7e75ccac2c8eeb0c5b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c @@ -160,8 +160,18 @@ nvkm_i2c_fini(struct nvkm_subdev *subdev, bool suspend) { struct nvkm_i2c *i2c = nvkm_i2c(subdev); struct nvkm_i2c_pad *pad; + struct nvkm_i2c_bus *bus; + struct nvkm_i2c_aux *aux; u32 mask; + list_for_each_entry(aux, &i2c->aux, head) { + nvkm_i2c_aux_fini(aux); + } + + list_for_each_entry(bus, &i2c->bus, head) { + nvkm_i2c_bus_fini(bus); + } + if ((mask = (1 << i2c->func->aux) - 1), i2c->func->aux_stat) { i2c->func->aux_mask(i2c, NVKM_I2C_ANY, mask, 0); i2c->func->aux_stat(i2c, &mask, &mask, &mask, &mask); @@ -174,12 +184,32 @@ nvkm_i2c_fini(struct nvkm_subdev *subdev, bool suspend) return 0; } +static int +nvkm_i2c_preinit(struct nvkm_subdev *subdev) +{ + struct nvkm_i2c *i2c = nvkm_i2c(subdev); + struct nvkm_i2c_bus *bus; + struct nvkm_i2c_pad *pad; + + /* + * We init our i2c busses as early as possible, since they may be + * needed by the vbios init scripts on some cards + */ + list_for_each_entry(pad, &i2c->pad, head) + nvkm_i2c_pad_init(pad); + list_for_each_entry(bus, &i2c->bus, head) + nvkm_i2c_bus_init(bus); + + return 0; +} + static int nvkm_i2c_init(struct nvkm_subdev *subdev) { struct nvkm_i2c *i2c = nvkm_i2c(subdev); struct nvkm_i2c_bus *bus; struct nvkm_i2c_pad *pad; + struct nvkm_i2c_aux *aux; list_for_each_entry(pad, &i2c->pad, head) { nvkm_i2c_pad_init(pad); @@ -189,6 +219,10 @@ nvkm_i2c_init(struct nvkm_subdev *subdev) nvkm_i2c_bus_init(bus); } + list_for_each_entry(aux, &i2c->aux, head) { + nvkm_i2c_aux_init(aux); + } + return 0; } @@ -223,6 +257,7 @@ nvkm_i2c_dtor(struct nvkm_subdev *subdev) static const struct nvkm_subdev_func nvkm_i2c = { .dtor = nvkm_i2c_dtor, + .preinit = nvkm_i2c_preinit, .init = nvkm_i2c_init, .fini = nvkm_i2c_fini, .intr = nvkm_i2c_intr, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c index 807a2b67bd641e28df0820a5cf7723e9f508152e..ed50cc3736b925fe7295e122aba3b17e1e09b362 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c @@ -110,6 +110,19 @@ nvkm_i2c_bus_init(struct nvkm_i2c_bus *bus) BUS_TRACE(bus, "init"); if (bus->func->init) bus->func->init(bus); + + mutex_lock(&bus->mutex); + bus->enabled = true; + mutex_unlock(&bus->mutex); +} + +void +nvkm_i2c_bus_fini(struct nvkm_i2c_bus *bus) +{ + BUS_TRACE(bus, "fini"); + mutex_lock(&bus->mutex); + bus->enabled = false; + mutex_unlock(&bus->mutex); } void @@ -126,9 +139,15 @@ nvkm_i2c_bus_acquire(struct nvkm_i2c_bus *bus) { struct nvkm_i2c_pad *pad = bus->pad; int ret; + BUS_TRACE(bus, "acquire"); mutex_lock(&bus->mutex); - ret = nvkm_i2c_pad_acquire(pad, NVKM_I2C_PAD_I2C); + + if (bus->enabled) + ret = nvkm_i2c_pad_acquire(pad, NVKM_I2C_PAD_I2C); + else + ret = -EIO; + if (ret) mutex_unlock(&bus->mutex); return ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h index bea0dd33961e1e09537c15863d9881cf3183423c..465464bba58b6eb1d88259e717f2dcd2a0c95d79 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h @@ -18,6 +18,7 @@ int nvkm_i2c_bus_new_(const struct nvkm_i2c_bus_func *, struct nvkm_i2c_pad *, int id, struct nvkm_i2c_bus **); void nvkm_i2c_bus_del(struct nvkm_i2c_bus **); void nvkm_i2c_bus_init(struct nvkm_i2c_bus *); +void nvkm_i2c_bus_fini(struct nvkm_i2c_bus *); int nvkm_i2c_bit_xfer(struct nvkm_i2c_bus *, struct i2c_msg *, int); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c index db48a1daca0c7a3d786332ce25435839fcc10760..f8ca79eaa7f7bc7bb13e5c0e6cd2145650c85124 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c @@ -221,8 +221,11 @@ nv50_instobj_acquire(struct nvkm_memory *memory) void __iomem *map = NULL; /* Already mapped? */ - if (refcount_inc_not_zero(&iobj->maps)) + if (refcount_inc_not_zero(&iobj->maps)) { + /* read barrier match the wmb on refcount set */ + smp_rmb(); return iobj->map; + } /* Take the lock, and re-check that another thread hasn't * already mapped the object in the meantime. @@ -249,6 +252,8 @@ nv50_instobj_acquire(struct nvkm_memory *memory) iobj->base.memory.ptrs = &nv50_instobj_fast; else iobj->base.memory.ptrs = &nv50_instobj_slow; + /* barrier to ensure the ptrs are written before refcount is set */ + smp_wmb(); refcount_set(&iobj->maps, 1); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c index d02e183717dc4575bd667116ab90caaaa8289968..5c14d6ac855d2c96b1622f6bc83392cc02cd52f9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c @@ -801,6 +801,7 @@ acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon, bl = acr->hsbl_unload_blob; } else { nvkm_error(_acr->subdev, "invalid secure boot blob!\n"); + kfree(bl_desc); return -EINVAL; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c index 3695cde669f881335445fb14db5e15f8c26f565a..07914e36939e3d80b557ee1267ab3f9f0f648ca2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c @@ -132,11 +132,12 @@ nvkm_therm_update(struct nvkm_therm *therm, int mode) duty = nvkm_therm_update_linear(therm); break; case NVBIOS_THERM_FAN_OTHER: - if (therm->cstate) + if (therm->cstate) { duty = therm->cstate; - else + poll = false; + } else { duty = nvkm_therm_update_linear_fallback(therm); - poll = false; + } break; } immd = false; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild index bcd179ba11d0d2cd11fa40ef1c7559843a8a1881..146adcdd316a40fc6265aa572fddeb73425e7ac9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild @@ -2,6 +2,7 @@ nvkm-y += nvkm/subdev/volt/base.o nvkm-y += nvkm/subdev/volt/gpio.o nvkm-y += nvkm/subdev/volt/nv40.o nvkm-y += nvkm/subdev/volt/gf100.o +nvkm-y += nvkm/subdev/volt/gf117.o nvkm-y += nvkm/subdev/volt/gk104.o nvkm-y += nvkm/subdev/volt/gk20a.o nvkm-y += nvkm/subdev/volt/gm20b.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c new file mode 100644 index 0000000000000000000000000000000000000000..547a58f0aeac326b3b7685d5a04fa1956e77b1c6 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c @@ -0,0 +1,60 @@ +/* + * Copyright 2019 Ilia Mirkin + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ilia Mirkin + */ +#include "priv.h" + +#include + +static int +gf117_volt_speedo_read(struct nvkm_volt *volt) +{ + struct nvkm_device *device = volt->subdev.device; + struct nvkm_fuse *fuse = device->fuse; + + if (!fuse) + return -EINVAL; + + return nvkm_fuse_read(fuse, 0x3a8); +} + +static const struct nvkm_volt_func +gf117_volt = { + .oneinit = gf100_volt_oneinit, + .vid_get = nvkm_voltgpio_get, + .vid_set = nvkm_voltgpio_set, + .speedo_read = gf117_volt_speedo_read, +}; + +int +gf117_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt) +{ + struct nvkm_volt *volt; + int ret; + + ret = nvkm_volt_new_(&gf117_volt, device, index, &volt); + *pvolt = volt; + if (ret) + return ret; + + return nvkm_voltgpio_init(volt); +} diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 74467b308721863986e3c492a8f97d7214b1c7f3..8160954ebc25713722c23c1cc57018012fe5b35a 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -1386,12 +1386,9 @@ static int dsi_pll_enable(struct dss_pll *pll) */ dsi_enable_scp_clk(dsi); - if (!dsi->vdds_dsi_enabled) { - r = regulator_enable(dsi->vdds_dsi_reg); - if (r) - goto err0; - dsi->vdds_dsi_enabled = true; - } + r = regulator_enable(dsi->vdds_dsi_reg); + if (r) + goto err0; /* XXX PLL does not come out of reset without this... */ dispc_pck_free_enable(dsi->dss->dispc, 1); @@ -1416,36 +1413,25 @@ static int dsi_pll_enable(struct dss_pll *pll) return 0; err1: - if (dsi->vdds_dsi_enabled) { - regulator_disable(dsi->vdds_dsi_reg); - dsi->vdds_dsi_enabled = false; - } + regulator_disable(dsi->vdds_dsi_reg); err0: dsi_disable_scp_clk(dsi); dsi_runtime_put(dsi); return r; } -static void dsi_pll_uninit(struct dsi_data *dsi, bool disconnect_lanes) +static void dsi_pll_disable(struct dss_pll *pll) { + struct dsi_data *dsi = container_of(pll, struct dsi_data, pll); + dsi_pll_power(dsi, DSI_PLL_POWER_OFF); - if (disconnect_lanes) { - WARN_ON(!dsi->vdds_dsi_enabled); - regulator_disable(dsi->vdds_dsi_reg); - dsi->vdds_dsi_enabled = false; - } + + regulator_disable(dsi->vdds_dsi_reg); dsi_disable_scp_clk(dsi); dsi_runtime_put(dsi); - DSSDBG("PLL uninit done\n"); -} - -static void dsi_pll_disable(struct dss_pll *pll) -{ - struct dsi_data *dsi = container_of(pll, struct dsi_data, pll); - - dsi_pll_uninit(dsi, true); + DSSDBG("PLL disable done\n"); } static void dsi_dump_dsi_clocks(struct dsi_data *dsi, struct seq_file *s) @@ -4195,11 +4181,11 @@ static int dsi_display_init_dsi(struct dsi_data *dsi) r = dss_pll_enable(&dsi->pll); if (r) - goto err0; + return r; r = dsi_configure_dsi_clocks(dsi); if (r) - goto err1; + goto err0; dss_select_dsi_clk_source(dsi->dss, dsi->module_id, dsi->module_id == 0 ? @@ -4207,6 +4193,14 @@ static int dsi_display_init_dsi(struct dsi_data *dsi) DSSDBG("PLL OK\n"); + if (!dsi->vdds_dsi_enabled) { + r = regulator_enable(dsi->vdds_dsi_reg); + if (r) + goto err1; + + dsi->vdds_dsi_enabled = true; + } + r = dsi_cio_init(dsi); if (r) goto err2; @@ -4235,10 +4229,13 @@ static int dsi_display_init_dsi(struct dsi_data *dsi) err3: dsi_cio_uninit(dsi); err2: - dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK); + regulator_disable(dsi->vdds_dsi_reg); + dsi->vdds_dsi_enabled = false; err1: - dss_pll_disable(&dsi->pll); + dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK); err0: + dss_pll_disable(&dsi->pll); + return r; } @@ -4257,7 +4254,12 @@ static void dsi_display_uninit_dsi(struct dsi_data *dsi, bool disconnect_lanes, dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK); dsi_cio_uninit(dsi); - dsi_pll_uninit(dsi, disconnect_lanes); + dss_pll_disable(&dsi->pll); + + if (disconnect_lanes) { + regulator_disable(dsi->vdds_dsi_reg); + dsi->vdds_dsi_enabled = false; + } } static int dsi_display_enable(struct omap_dss_device *dssdev) diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index cb80ddaa19d265e63f739e32d96ea0b7aecfcf65..7e9e2f064454485b78bbf20fe678f80d365b0ae0 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -1110,7 +1110,7 @@ static const struct dss_features omap34xx_dss_feats = { static const struct dss_features omap3630_dss_feats = { .model = DSS_MODEL_OMAP3, - .fck_div_max = 32, + .fck_div_max = 31, .fck_freq_max = 173000000, .dss_fck_multiplier = 1, .parent_clk_name = "dpll4_ck", diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c index 340383150fb98d24567b6ca74cae0298cda806b6..ebf9c96d43eee56649e510a5ca8c53a045b10c67 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c @@ -175,6 +175,7 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable) REG_FLD_MOD(core->base, HDMI_CORE_SYS_INTR_UNMASK4, 0, 3, 3); hdmi_wp_clear_irqenable(core->wp, HDMI_IRQ_CORE); hdmi_wp_set_irqstatus(core->wp, HDMI_IRQ_CORE); + REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0); hdmi4_core_disable(core); return 0; } @@ -182,16 +183,24 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable) if (err) return err; + /* + * Initialize CEC clock divider: CEC needs 2MHz clock hence + * set the divider to 24 to get 48/24=2MHz clock + */ + REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0x18, 5, 0); + /* Clear TX FIFO */ if (!hdmi_cec_clear_tx_fifo(adap)) { pr_err("cec-%s: could not clear TX FIFO\n", adap->name); - return -EIO; + err = -EIO; + goto err_disable_clk; } /* Clear RX FIFO */ if (!hdmi_cec_clear_rx_fifo(adap)) { pr_err("cec-%s: could not clear RX FIFO\n", adap->name); - return -EIO; + err = -EIO; + goto err_disable_clk; } /* Clear CEC interrupts */ @@ -236,6 +245,12 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable) hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1, temp); } return 0; + +err_disable_clk: + REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0); + hdmi4_core_disable(core); + + return err; } static int hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr) @@ -333,11 +348,8 @@ int hdmi4_cec_init(struct platform_device *pdev, struct hdmi_core_data *core, return ret; core->wp = wp; - /* - * Initialize CEC clock divider: CEC needs 2MHz clock hence - * set the devider to 24 to get 48/24=2MHz clock - */ - REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0x18, 5, 0); + /* Disable clock initially, hdmi_cec_adap_enable() manages it */ + REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0); ret = cec_register_adapter(core->adap, &pdev->dev); if (ret < 0) { diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index f92fe205550bc9c65df236e97be485fbb428ee87..e884183c018ac6c0d915073257116072537700c5 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c @@ -285,6 +285,17 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait) } txn->last_pat->next_pa = 0; + /* ensure that the written descriptors are visible to DMM */ + wmb(); + + /* + * NOTE: the wmb() above should be enough, but there seems to be a bug + * in OMAP's memory barrier implementation, which in some rare cases may + * cause the writes not to be observable after wmb(). + */ + + /* read back to ensure the data is in RAM */ + readl(&txn->last_pat->next_pa); /* write to PAT_DESCR to clear out any pending transaction */ dmm_write(dmm, 0x0, reg[PAT_DESCR][engine->id]); diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c index 3ad4a46c4e945d2b49785c8d7298cb1d7d26bb30..cc11cf41d392cee2a9033ecb25d513cd96bef3a8 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c @@ -307,10 +307,10 @@ static int ili9881c_prepare(struct drm_panel *panel) msleep(5); /* And reset it */ - gpiod_set_value(ctx->reset, 1); + gpiod_set_value_cansleep(ctx->reset, 1); msleep(20); - gpiod_set_value(ctx->reset, 0); + gpiod_set_value_cansleep(ctx->reset, 0); msleep(20); for (i = 0; i < ARRAY_SIZE(ili9881c_init); i++) { @@ -367,7 +367,7 @@ static int ili9881c_unprepare(struct drm_panel *panel) mipi_dsi_dcs_enter_sleep_mode(ctx->dsi); regulator_disable(ctx->power); - gpiod_set_value(ctx->reset, 1); + gpiod_set_value_cansleep(ctx->reset, 1); return 0; } diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c index 72edb334d9976840b0649debe31ea2819b5d6c74..88c7d035ace66c592b32d330be5b1a33e44a30e4 100644 --- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c +++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c @@ -70,18 +70,12 @@ static inline struct innolux_panel *to_innolux_panel(struct drm_panel *panel) static int innolux_panel_disable(struct drm_panel *panel) { struct innolux_panel *innolux = to_innolux_panel(panel); - int err; if (!innolux->enabled) return 0; backlight_disable(innolux->backlight); - err = mipi_dsi_dcs_set_display_off(innolux->link); - if (err < 0) - DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n", - err); - innolux->enabled = false; return 0; @@ -95,6 +89,11 @@ static int innolux_panel_unprepare(struct drm_panel *panel) if (!innolux->prepared) return 0; + err = mipi_dsi_dcs_set_display_off(innolux->link); + if (err < 0) + DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n", + err); + err = mipi_dsi_dcs_enter_sleep_mode(innolux->link); if (err < 0) { DRM_DEV_ERROR(panel->dev, "failed to enter sleep mode: %d\n", diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c index 87fa316e1d7b09a60778751acc51972c759a1c84..58ccf648b70fbde9cbed5241e99ab8119a9af416 100644 --- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c +++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c @@ -248,6 +248,9 @@ static int otm8009a_init_sequence(struct otm8009a *ctx) /* Send Command GRAM memory write (no parameters) */ dcs_write_seq(ctx, MIPI_DCS_WRITE_MEMORY_START); + /* Wait a short while to let the panel be ready before the 1st frame */ + mdelay(10); + return 0; } diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c index 2c9c9722734f586d27af64d9df5ca156a46a31fd..aab6a70ece7f05d33d713cb49d5d65932112617a 100644 --- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c +++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c @@ -400,7 +400,13 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c, /* Look up the DSI host. It needs to probe before we do. */ endpoint = of_graph_get_next_endpoint(dev->of_node, NULL); + if (!endpoint) + return -ENODEV; + dsi_host_node = of_graph_get_remote_port_parent(endpoint); + if (!dsi_host_node) + goto error; + host = of_find_mipi_dsi_host_by_node(dsi_host_node); of_node_put(dsi_host_node); if (!host) { @@ -409,6 +415,9 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c, } info.node = of_graph_get_remote_port(endpoint); + if (!info.node) + goto error; + of_node_put(endpoint); ts->dsi = mipi_dsi_device_register_full(host, &info); @@ -418,6 +427,7 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c, return PTR_ERR(ts->dsi); } + drm_panel_init(&ts->base); ts->base.dev = dev; ts->base.funcs = &rpi_touchscreen_funcs; @@ -429,6 +439,10 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c, return ret; return 0; + +error: + of_node_put(endpoint); + return -ENODEV; } static int rpi_touchscreen_remove(struct i2c_client *i2c) diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 97964f7f2acee08350101947a4ccd9a717f5f199..654fea2b43124777b7afad6bb8d920da90d61cd5 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -436,6 +436,32 @@ static const struct panel_desc ampire_am800480r3tmqwa1h = { .bus_format = MEDIA_BUS_FMT_RGB666_1X18, }; +static const struct display_timing santek_st0700i5y_rbslw_f_timing = { + .pixelclock = { 26400000, 33300000, 46800000 }, + .hactive = { 800, 800, 800 }, + .hfront_porch = { 16, 210, 354 }, + .hback_porch = { 45, 36, 6 }, + .hsync_len = { 1, 10, 40 }, + .vactive = { 480, 480, 480 }, + .vfront_porch = { 7, 22, 147 }, + .vback_porch = { 22, 13, 3 }, + .vsync_len = { 1, 10, 20 }, + .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW | + DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE +}; + +static const struct panel_desc armadeus_st0700_adapt = { + .timings = &santek_st0700i5y_rbslw_f_timing, + .num_timings = 1, + .bpc = 6, + .size = { + .width = 154, + .height = 86, + }, + .bus_format = MEDIA_BUS_FMT_RGB666_1X18, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE, +}; + static const struct drm_display_mode auo_b101aw03_mode = { .clock = 51450, .hdisplay = 1024, @@ -663,9 +689,9 @@ static const struct panel_desc auo_g133han01 = { static const struct display_timing auo_g185han01_timings = { .pixelclock = { 120000000, 144000000, 175000000 }, .hactive = { 1920, 1920, 1920 }, - .hfront_porch = { 18, 60, 74 }, - .hback_porch = { 12, 44, 54 }, - .hsync_len = { 10, 24, 32 }, + .hfront_porch = { 36, 120, 148 }, + .hback_porch = { 24, 88, 108 }, + .hsync_len = { 20, 48, 64 }, .vactive = { 1080, 1080, 1080 }, .vfront_porch = { 6, 10, 40 }, .vback_porch = { 2, 5, 20 }, @@ -2330,6 +2356,9 @@ static const struct of_device_id platform_of_match[] = { }, { .compatible = "ampire,am800480r3tmqwa1h", .data = &ire_am800480r3tmqwa1h, + }, { + .compatible = "armadeus,st0700-adapt", + .data = &armadeus_st0700_adapt, }, { .compatible = "auo,b101aw03", .data = &auo_b101aw03, @@ -2803,7 +2832,14 @@ static int panel_simple_dsi_probe(struct mipi_dsi_device *dsi) dsi->format = desc->format; dsi->lanes = desc->lanes; - return mipi_dsi_attach(dsi); + err = mipi_dsi_attach(dsi); + if (err) { + struct panel_simple *panel = dev_get_drvdata(&dsi->dev); + + drm_panel_remove(&panel->base); + } + + return err; } static int panel_simple_dsi_remove(struct mipi_dsi_device *dsi) diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c index 74284e5afc5d9f78b6746554ecc124f641b89276..89fa17877b336b4d6e47bcd2d20a99a9c4d79865 100644 --- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c +++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c @@ -380,6 +380,7 @@ static int st7789v_probe(struct spi_device *spi) spi_set_drvdata(spi, ctx); ctx->spi = spi; + drm_panel_init(&ctx->panel); ctx->panel.dev = &spi->dev; ctx->panel.funcs = &st7789v_drm_funcs; diff --git a/drivers/gpu/drm/phytium/Kconfig b/drivers/gpu/drm/phytium/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..3619790fa8e88028a33f2f9e40bb69e1a73bd037 --- /dev/null +++ b/drivers/gpu/drm/phytium/Kconfig @@ -0,0 +1,7 @@ +config DRM_PHYTIUM + tristate "DRM Support for Phytium Graphics Card" + depends on DRM && ARCH_PHYTIUM + select DRM_KMS_HELPER + help + Choose this option if you have a phytium graphics card. + This driver provides kernel mode setting and buffer management to userspace. diff --git a/drivers/gpu/drm/phytium/Makefile b/drivers/gpu/drm/phytium/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..f1b25a511fef0bf7dd5ef7fd64c28e37e42b6ff6 --- /dev/null +++ b/drivers/gpu/drm/phytium/Makefile @@ -0,0 +1,18 @@ +phytium-dc-drm-y := phytium_display_drv.o \ + phytium_plane.o \ + phytium_crtc.o \ + phytium_dp.o \ + phytium_fb.o \ + phytium_gem.o \ + phytium_fbdev.o \ + phytium_debugfs.o \ + x100_dp.o \ + phytium_panel.o \ + x100_dc.o \ + phytium_pci.o \ + e2000_dp.o \ + e2000_dc.o \ + phytium_platform.o + +obj-$(CONFIG_DRM_PHYTIUM) += phytium-dc-drm.o +CFLAGS_REMOVE_phytium_crtc.o += -mgeneral-regs-only diff --git a/drivers/gpu/drm/phytium/e2000_dc.c b/drivers/gpu/drm/phytium/e2000_dc.c new file mode 100644 index 0000000000000000000000000000000000000000..8d7a6070f54864711b76ed4d69e1d09bc6981456 --- /dev/null +++ b/drivers/gpu/drm/phytium/e2000_dc.c @@ -0,0 +1,250 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium E2000 display controller DRM driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "e2000_reg.h" +#include "phytium_crtc.h" +#include "phytium_plane.h" +#include "phytium_fb.h" +#include "phytium_gem.h" + +void e2000_dc_hw_disable(struct drm_crtc *crtc); + +static const unsigned int e2000_primary_formats[] = { + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_ABGR2101010, + DRM_FORMAT_RGBA1010102, + DRM_FORMAT_BGRA1010102, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_ARGB4444, + DRM_FORMAT_ABGR4444, + DRM_FORMAT_RGBA4444, + DRM_FORMAT_BGRA4444, + DRM_FORMAT_XRGB4444, + DRM_FORMAT_XBGR4444, + DRM_FORMAT_RGBX4444, + DRM_FORMAT_BGRX4444, + DRM_FORMAT_ARGB1555, + DRM_FORMAT_ABGR1555, + DRM_FORMAT_RGBA5551, + DRM_FORMAT_BGRA5551, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_XBGR1555, + DRM_FORMAT_RGBX5551, + DRM_FORMAT_BGRX5551, + DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, + DRM_FORMAT_YUYV, + DRM_FORMAT_UYVY, + DRM_FORMAT_NV16, + DRM_FORMAT_NV12, + DRM_FORMAT_NV21, +}; + +static uint64_t e2000_primary_formats_modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID +}; + +static const unsigned int e2000_cursor_formats[] = { + DRM_FORMAT_ARGB8888, +}; + +void e2000_dc_hw_vram_init(struct phytium_display_private *priv, resource_size_t vram_addr, + resource_size_t vram_size) +{ + uint32_t config; + uint32_t group_offset = priv->address_transform_base; + + phytium_writel_reg(priv, (vram_addr & SRC_ADDR_MASK) >> SRC_ADDR_OFFSET, + group_offset, E2000_DC_ADDRESS_TRANSFORM_SRC_ADDR); + phytium_writel_reg(priv, (vram_size >> SIZE_OFFSET) | ADDRESS_TRANSFORM_ENABLE, + group_offset, E2000_DC_ADDRESS_TRANSFORM_SIZE); + config = phytium_readl_reg(priv, group_offset, E2000_DC_ADDRESS_TRANSFORM_DST_ADDR); + phytium_writel_reg(priv, config, group_offset, E2000_DC_ADDRESS_TRANSFORM_DST_ADDR); +} + +void e2000_dc_hw_config_pix_clock(struct drm_crtc *crtc, int clock) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + int ret = 0; + + /* config pix clock */ + phytium_writel_reg(priv, FLAG_REQUEST | CMD_PIXEL_CLOCK | (clock & PIXEL_CLOCK_MASK), + 0, E2000_DC_CMD_REGISTER(phys_pipe)); + ret = phytium_wait_cmd_done(priv, E2000_DC_CMD_REGISTER(phys_pipe), + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to set pixel clock\n", __func__); +} + +void e2000_dc_hw_reset(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int config = 0; + int phys_pipe = phytium_crtc->phys_pipe; + + /* disable pixel clock for bmc mode */ + if (phys_pipe == 0) + e2000_dc_hw_disable(crtc); + + config = phytium_readl_reg(priv, 0, E2000_DC_CLOCK_CONTROL); + config &= (~(DC0_CORE_RESET | DC1_CORE_RESET | AXI_RESET | AHB_RESET)); + + if (phys_pipe == 0) { + phytium_writel_reg(priv, config | DC0_CORE_RESET, + 0, E2000_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC0_CORE_RESET | AXI_RESET, + 0, E2000_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC0_CORE_RESET | AXI_RESET | AHB_RESET, + 0, E2000_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC0_CORE_RESET | AXI_RESET, + 0, E2000_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC0_CORE_RESET, + 0, E2000_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config, 0, E2000_DC_CLOCK_CONTROL); + udelay(20); + } else { + phytium_writel_reg(priv, config | DC1_CORE_RESET, + 0, E2000_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC1_CORE_RESET | AXI_RESET, + 0, E2000_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC1_CORE_RESET | AXI_RESET | AHB_RESET, + 0, E2000_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC1_CORE_RESET | AXI_RESET, + 0, E2000_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC1_CORE_RESET, + 0, E2000_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config, 0, E2000_DC_CLOCK_CONTROL); + udelay(20); + } +} + +void e2000_dc_hw_disable(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int config = 0; + int phys_pipe = phytium_crtc->phys_pipe; + + /* clear framebuffer */ + phytium_writel_reg(priv, CLEAR_VALUE_BLACK, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CLEARVALUE); + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); + config |= FRAMEBUFFER_CLEAR; + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); + + /* disable cursor */ + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], PHYTIUM_DC_CURSOR_CONFIG); + config = ((config & (~CURSOR_FORMAT_MASK)) | CURSOR_FORMAT_DISABLED); + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], PHYTIUM_DC_CURSOR_CONFIG); + mdelay(20); + + /* reset pix clock */ + e2000_dc_hw_config_pix_clock(crtc, 0); + + if (phys_pipe == 0) { + config = phytium_readl_reg(priv, 0, E2000_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, config | DC0_CORE_RESET, 0, E2000_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config & (~DC0_CORE_RESET), 0, E2000_DC_CLOCK_CONTROL); + } else { + config = phytium_readl_reg(priv, 0, E2000_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, config | DC1_CORE_RESET, 0, E2000_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config & (~DC1_CORE_RESET), 0, E2000_DC_CLOCK_CONTROL); + } + udelay(20); +} + +int e2000_dc_hw_fb_format_check(const struct drm_mode_fb_cmd2 *mode_cmd, int count) +{ + int ret = 0; + + if (mode_cmd->modifier[count] != DRM_FORMAT_MOD_LINEAR) { + DRM_ERROR("unsupported fb modifier 0x%llx\n", mode_cmd->modifier[count]); + ret = -EINVAL; + } + + return ret; +} + +void e2000_dc_hw_plane_get_primary_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count) +{ + *format_modifiers = e2000_primary_formats_modifiers; + *formats = e2000_primary_formats; + *format_count = ARRAY_SIZE(e2000_primary_formats); +} + +void e2000_dc_hw_plane_get_cursor_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count) +{ + *format_modifiers = NULL; + *formats = e2000_cursor_formats; + *format_count = ARRAY_SIZE(e2000_cursor_formats); +} + +void e2000_dc_hw_update_primary_hi_addr(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + int phys_pipe = phytium_plane->phys_pipe; + + phytium_writel_reg(priv, (phytium_plane->iova[0] >> PREFIX_SHIFT) & PREFIX_MASK, + priv->dc_reg_base[phys_pipe], E2000_DC_FRAMEBUFFER_Y_HI_ADDRESS); + + phytium_writel_reg(priv, (phytium_plane->iova[1] >> U_PREFIX_SHIFT) & U_PREFIX_MASK, + priv->dc_reg_base[phys_pipe], E2000_DC_FRAMEBUFFER_U_HI_ADDRESS); + + phytium_writel_reg(priv, (phytium_plane->iova[2] >> V_PREFIX_SHIFT) & V_PREFIX_MASK, + priv->dc_reg_base[phys_pipe], E2000_DC_FRAMEBUFFER_V_HI_ADDRESS); +} + +void e2000_dc_hw_update_cursor_hi_addr(struct drm_plane *plane, uint64_t iova) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + int phys_pipe = phytium_plane->phys_pipe; + int config; + + config = ((iova >> CURSOR_PREFIX_SHIFT) & CURSOR_PREFIX_MASK); + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], E2000_DC_CURSOR_HI_ADDRESS); +} diff --git a/drivers/gpu/drm/phytium/e2000_dc.h b/drivers/gpu/drm/phytium/e2000_dc.h new file mode 100644 index 0000000000000000000000000000000000000000..87c24ab63aebb9e6b305956de1f7b59cc57737d3 --- /dev/null +++ b/drivers/gpu/drm/phytium/e2000_dc.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Phytium E2000 display controller DRM driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#ifndef __E2000_DC_H__ +#define __E2000_DC_H__ + +#define E2000_DC_PIX_CLOCK_MAX (594000) +#define E2000_DC_HDISPLAY_MAX 3840 +#define E2000_DC_VDISPLAY_MAX 2160 +#define E2000_DC_ADDRESS_MASK 0x3f + +extern void e2000_dc_hw_vram_init(struct phytium_display_private *priv, + resource_size_t vram_addr, + resource_size_t vram_size); +extern void e2000_dc_hw_config_pix_clock(struct drm_crtc *crtc, int clock); +extern void e2000_dc_hw_disable(struct drm_crtc *crtc); +extern int e2000_dc_hw_fb_format_check(const struct drm_mode_fb_cmd2 *mode_cmd, int count); +extern void e2000_dc_hw_plane_get_primary_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count); +extern void e2000_dc_hw_plane_get_cursor_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count); +extern void e2000_dc_hw_update_primary_hi_addr(struct drm_plane *plane); +extern void e2000_dc_hw_update_cursor_hi_addr(struct drm_plane *plane, uint64_t iova); +void e2000_dc_hw_reset(struct drm_crtc *crtc); +#endif /* __E2000_DC_H__ */ diff --git a/drivers/gpu/drm/phytium/e2000_dp.c b/drivers/gpu/drm/phytium/e2000_dp.c new file mode 100644 index 0000000000000000000000000000000000000000..89b66b420fc7b95b2b57254ab086f8f391b399f0 --- /dev/null +++ b/drivers/gpu/drm/phytium/e2000_dp.c @@ -0,0 +1,514 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium display port DRM driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#include "phytium_display_drv.h" +#include "e2000_reg.h" +#include "phytium_dp.h" +#include "e2000_dp.h" + +static uint8_t e2000_dp_source_lane_count[2] = {1, 1}; + +/* [reg][ling_rate 1.62->8.1] */ +static int vco_val[12][4] = { + {0x0509, 0x0509, 0x0509, 0x0509}, /* CP_PADJ */ + {0x0f00, 0x0f00, 0x0f00, 0x0f00}, /* CP_IADJ */ + {0x0F08, 0x0F08, 0x0F08, 0x0F08}, /* FILT_PADJ */ + {0x0061, 0x006C, 0x006C, 0x0051}, /* INTDIV */ + {0x3333, 0x0000, 0x0000, 0x0000}, /* FRACDIVL */ + {0x0000, 0x0000, 0x0000, 0x0000}, /* FRACDIVH */ + {0x0042, 0x0048, 0x0048, 0x0036}, /* HIGH_THR */ + {0x0002, 0x0002, 0x0002, 0x0002}, /* PDIAG_CTRL */ + {0x0c5e, 0x0c5e, 0x0c5e, 0x0c5e}, /* VCOCAL_PLLCNT_START */ + {0x00c7, 0x00c7, 0x00c7, 0x00c7}, /* LOCK_PEFCNT */ + {0x00c7, 0x00c7, 0x00c7, 0x00c7}, /* LOCK_PLLCNT_START */ + {0x0005, 0x0005, 0x0005, 0x0005}, /* LOCK_PLLCNT_THR */ +}; + +/* [link_rate][swing][emphasis] */ +static int mgnfs_val[4][4][4] = { + /* 1.62Gbps */ + { + {0x0026, 0x001f, 0x0012, 0x0000}, + {0x0013, 0x0013, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + /* 2.7Gbps */ + { + {0x0026, 0x001f, 0x0012, 0x0000}, + {0x0013, 0x0013, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + /* 5.4Gbps */ + { + {0x001f, 0x0013, 0x005, 0x0000}, + {0x0018, 0x006, 0x0000, 0x0000}, + {0x000c, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + /* 8.1Gbps */ + { + {0x0026, 0x0013, 0x005, 0x0000}, + {0x0013, 0x006, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, +}; + +/* [link_rate][swing][emphasis] */ +static int cpost_val[4][4][4] = { + /* 1.62Gbps */ + { + {0x0000, 0x0014, 0x0020, 0x002a}, + {0x0000, 0x0010, 0x001f, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + /* 2.7Gbps */ + { + {0x0000, 0x0014, 0x0020, 0x002a}, + {0x0000, 0x0010, 0x001f, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + /* 5.4Gbps */ + { + {0x0005, 0x0014, 0x0022, 0x002e}, + {0x0000, 0x0013, 0x0020, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + /* 8.1Gbps */ + { + {0x0000, 0x0014, 0x0022, 0x002e}, + {0x0000, 0x0013, 0x0020, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, +}; + +static int e2000_dp_hw_set_phy_lane_and_rate(struct phytium_dp_device *phytium_dp, + uint8_t link_lane_count, uint32_t link_rate) +{ + int port = phytium_dp->port%2; + int i = 0, data, tmp, tmp1, index = 0, mask = 0; + int timeout = 500, ret = 0; + + /* set pma powerdown */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) + data |= (A3_POWERDOWN3 << (i * A3_POWERDOWN3_SHIFT)); + phytium_phy_writel(phytium_dp, E2000_PHY_PMA0_POWER(port), data); + + /* lane pll disable */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) { + data |= (PLL_EN << (i * PLL_EN_SHIFT)); + mask |= (((1<source_max_lane_count; i++) + data |= (PLL_EN << (i * PLL_EN_SHIFT)); + phytium_phy_writel(phytium_dp, E2000_PHY_PLL_EN(port), data); + + /* set pma power active */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) + data |= (A0_ACTIVE << (i * A0_ACTIVE_SHIFT)); + phytium_phy_writel(phytium_dp, E2000_PHY_PMA0_POWER(port), data); + + mask = PLL0_LOCK_DONE; + do { + mdelay(1); + timeout--; + tmp = phytium_phy_readl(phytium_dp, E2000_PHY_PMA_CONTROL2(port)); + } while ((!(tmp & mask)) && timeout); + + if (timeout == 0) { + DRM_ERROR("dp(%d) phy pll lock failed\n", port); + ret = -1; + } + udelay(1); + + return ret; +} + +static void e2000_dp_hw_set_phy_lane_setting(struct phytium_dp_device *phytium_dp, + uint32_t link_rate, uint8_t train_set) +{ + int port = phytium_dp->port % 3; + int voltage_swing = 0; + int pre_emphasis = 0, link_rate_index = 0; + + switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: + voltage_swing = 1; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: + voltage_swing = 2; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: + voltage_swing = 3; + break; + default: + voltage_swing = 0; + break; + } + + switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { + case DP_TRAIN_PRE_EMPH_LEVEL_1: + pre_emphasis = 1; + break; + case DP_TRAIN_PRE_EMPH_LEVEL_2: + pre_emphasis = 2; + break; + case DP_TRAIN_PRE_EMPH_LEVEL_3: + pre_emphasis = 3; + break; + default: + pre_emphasis = 0; + break; + } + + switch (link_rate) { + case 810000: + link_rate_index = 3; + break; + case 540000: + link_rate_index = 2; + break; + case 270000: + link_rate_index = 1; + break; + case 162000: + link_rate_index = 0; + break; + default: + DRM_ERROR("phytium dp rate(%d) not support\n", link_rate); + link_rate_index = 2; + break; + } + + phytium_phy_writel(phytium_dp, E2000_PHY_PLL0_TX_DIAG_ACYA(port), LOCK); + phytium_phy_writel(phytium_dp, E2000_PHY_PLL0_TX_TXCC_CTRL(port), TX_TXCC_CTRL); + phytium_phy_writel(phytium_dp, E2000_PHY_PLL0_TX_DRV(port), TX_DRV); + phytium_phy_writel(phytium_dp, E2000_PHY_PLL0_TX_MGNFS(port), + mgnfs_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, E2000_PHY_PLL0_TX_CPOST(port), + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, E2000_PHY_PLL0_TX_DIAG_ACYA(port), UNLOCK); +} + +static int e2000_dp_hw_init_phy(struct phytium_dp_device *phytium_dp) +{ + int port = phytium_dp->port; + int i = 0, data, tmp, mask; + int timeout = 500, ret = 0; + + phytium_phy_writel(phytium_dp, E2000_PHY_APB_RESET(port), APB_RESET); + phytium_phy_writel(phytium_dp, E2000_PHY_PIPE_RESET(port), RESET); + + /* config lane to dp mode */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) + data |= (LANE_BIT << (i * LANE_BIT_SHIFT)); + phytium_phy_writel(phytium_dp, E2000_PHY_MODE(port), data); + + /* pll clock enable */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) + data |= (PLL_EN << (i * PLL_EN_SHIFT)); + phytium_phy_writel(phytium_dp, E2000_PHY_PLL_EN(port), data); + + /* config input 20 bit */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) + data |= (BIT_20 << (i * BIT_20_SHIFT)); + phytium_phy_writel(phytium_dp, E2000_PHY_PMA_WIDTH(port), data); + + /* config lane active power state */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) + data |= (A0_ACTIVE << (i * A0_ACTIVE_SHIFT)); + phytium_phy_writel(phytium_dp, E2000_PHY_PMA0_POWER(port), data); + + /* link reset */ + phytium_phy_writel(phytium_dp, E2000_PHY_LINK_RESET(port), LINK_RESET); + + phytium_phy_writel(phytium_dp, E2000_PHY_SGMII_DPSEL_INIT(port), DP_SEL); + + /* config single link */ + phytium_phy_writel(phytium_dp, E2000_PHY_PLL_CFG(port), SINGLE_LINK); + + /* pipe reset */ + phytium_phy_writel(phytium_dp, E2000_PHY_PIPE_RESET(port), RESET_DEASSERT); + + mask = PLL0_LOCK_DONE; + do { + mdelay(1); + timeout--; + tmp = phytium_phy_readl(phytium_dp, E2000_PHY_PMA_CONTROL2(port)); + } while ((!(tmp & mask)) && timeout); + + if (timeout == 0) { + DRM_ERROR("reset dp(%d) phy failed\n", port); + ret = -1; + } + udelay(1); + + return ret; +} + +static void e2000_dp_hw_poweron_panel(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | PANEL_POWER_ENABLE, + 0, E2000_DC_CMD_REGISTER(port)); + ret = phytium_wait_cmd_done(priv, E2000_DC_CMD_REGISTER(port), + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to poweron panel\n", __func__); +} + +static void e2000_dp_hw_poweroff_panel(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | PANEL_POWER_DISABLE, + 0, E2000_DC_CMD_REGISTER(port)); + ret = phytium_wait_cmd_done(priv, E2000_DC_CMD_REGISTER(port), + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to poweroff panel\n", __func__); +} + +static void e2000_dp_hw_enable_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | BACKLIGHT_ENABLE, + 0, E2000_DC_CMD_REGISTER(port)); + ret = phytium_wait_cmd_done(priv, E2000_DC_CMD_REGISTER(port), + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to enable backlight\n", __func__); +} + +static void e2000_dp_hw_disable_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | BACKLIGHT_DISABLE, + 0, E2000_DC_CMD_REGISTER(port)); + ret = phytium_wait_cmd_done(priv, E2000_DC_CMD_REGISTER(port), + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to disable backlight\n", __func__); +} + +static uint32_t e2000_dp_hw_get_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int config; + uint32_t group_offset = priv->address_transform_base; + + config = phytium_readl_reg(priv, group_offset, E2000_DC_ADDRESS_TRANSFORM_BACKLIGHT_VALUE); + return ((config >> BACKLIGHT_VALUE_SHIFT) & BACKLIGHT_VALUE_MASK); +} + +static int e2000_dp_hw_set_backlight(struct phytium_dp_device *phytium_dp, uint32_t level) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int config = 0; + int ret = 0; + + if (level > E2000_DP_BACKLIGHT_MAX) { + ret = -EINVAL; + goto out; + } + + config = FLAG_REQUEST | CMD_BACKLIGHT | ((level & BACKLIGHT_MASK) << BACKLIGHT_SHIFT); + phytium_writel_reg(priv, config, 0, E2000_DC_CMD_REGISTER(port)); + ret = phytium_wait_cmd_done(priv, E2000_DC_CMD_REGISTER(port), + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to set backlight\n", __func__); +out: + return ret; +} + +bool e2000_dp_hw_spread_is_enable(struct phytium_dp_device *phytium_dp) +{ + return false; +} + +int e2000_dp_hw_reset(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, DP_RESET, group_offset, E2000_DP_CONTROLLER_RESET); + udelay(500); + phytium_writel_reg(priv, AUX_CLK_DIVIDER_100, group_offset, PHYTIUM_DP_AUX_CLK_DIVIDER); + phytium_writel_reg(priv, SUPPORT_EDP_1_4, group_offset, PHYTIUM_EDP_CRC_ENABLE); + + return 0; +} + +uint8_t e2000_dp_hw_get_source_lane_count(struct phytium_dp_device *phytium_dp) +{ + return e2000_dp_source_lane_count[phytium_dp->port]; +} + +static struct phytium_dp_func e2000_dp_funcs = { + .dp_hw_get_source_lane_count = e2000_dp_hw_get_source_lane_count, + .dp_hw_reset = e2000_dp_hw_reset, + .dp_hw_spread_is_enable = e2000_dp_hw_spread_is_enable, + .dp_hw_set_backlight = e2000_dp_hw_set_backlight, + .dp_hw_get_backlight = e2000_dp_hw_get_backlight, + .dp_hw_disable_backlight = e2000_dp_hw_disable_backlight, + .dp_hw_enable_backlight = e2000_dp_hw_enable_backlight, + .dp_hw_poweroff_panel = e2000_dp_hw_poweroff_panel, + .dp_hw_poweron_panel = e2000_dp_hw_poweron_panel, + .dp_hw_init_phy = e2000_dp_hw_init_phy, + .dp_hw_set_phy_lane_setting = e2000_dp_hw_set_phy_lane_setting, + .dp_hw_set_phy_lane_and_rate = e2000_dp_hw_set_phy_lane_and_rate, +}; + +void e2000_dp_func_register(struct phytium_dp_device *phytium_dp) +{ + phytium_dp->funcs = &e2000_dp_funcs; +} diff --git a/drivers/gpu/drm/phytium/e2000_dp.h b/drivers/gpu/drm/phytium/e2000_dp.h new file mode 100644 index 0000000000000000000000000000000000000000..4db96044c167f4c8baf3edc7a6171c6682f9b7fa --- /dev/null +++ b/drivers/gpu/drm/phytium/e2000_dp.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Phytium display port DRM driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#ifndef __E2000_DP_H__ +#define __E2000_DP_H__ + +#define E2000_DP_BACKLIGHT_MAX 100 + +void e2000_dp_func_register(struct phytium_dp_device *phytium_dp); +#endif /* __E2000_DP_H__ */ diff --git a/drivers/gpu/drm/phytium/e2000_reg.h b/drivers/gpu/drm/phytium/e2000_reg.h new file mode 100644 index 0000000000000000000000000000000000000000..a9ed601e1674d7f35283b1f9e9200c98ab72ec1a --- /dev/null +++ b/drivers/gpu/drm/phytium/e2000_reg.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Phytium E2000 display engine register + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#ifndef __E2000_REG_H__ +#define __E2000_REG_H__ + +#include "phytium_reg.h" + +/* dc register */ +#define E2000_DC_CLOCK_CONTROL 0x0000 +#define DC1_CORE_RESET (1<<18) +#define DC0_CORE_RESET (1<<17) +#define AXI_RESET (1<<16) +#define AHB_RESET (1<<12) + +#define E2000_DC_CMD_REGISTER(pipe) (E2000_DC_BASE(0) + 0x00F0 + 0x4*(pipe)) +#define FLAG_REPLY (1<<31) +#define FLAG_REQUEST (1<<30) +#define CMD_PIXEL_CLOCK (0x0 << 28) +#define CMD_BACKLIGHT (0x1 << 28) +#define CMD_DC_DP_RESET (0x3 << 28) +#define BACKLIGHT_SHIFT 21 +#define BACKLIGHT_MASK 0x7f +#define BACKLIGHT_MAX 100 +#define BACKLIGHT_ENABLE (101 << BACKLIGHT_SHIFT) +#define BACKLIGHT_DISABLE (102 << BACKLIGHT_SHIFT) +#define PANEL_POWER_ENABLE (103 << BACKLIGHT_SHIFT) +#define PANEL_POWER_DISABLE (104 << BACKLIGHT_SHIFT) +#define PIXEL_CLOCK_MASK (0x1fffff) + +#define E2000_DC_FRAMEBUFFER_Y_HI_ADDRESS 0x1404 +#define PREFIX_MASK 0xff +#define PREFIX_SHIFT 32 + +#define E2000_DC_CURSOR_HI_ADDRESS 0x1490 +#define CURSOR_PREFIX_MASK 0xff +#define CURSOR_PREFIX_SHIFT 32 + +#define E2000_DC_FRAMEBUFFER_U_HI_ADDRESS 0x1534 +#define U_PREFIX_MASK 0xff +#define U_PREFIX_SHIFT 32 + +#define E2000_DC_FRAMEBUFFER_V_HI_ADDRESS 0x153c +#define V_PREFIX_MASK 0xff +#define V_PREFIX_SHIFT 32 + +/* dp register */ +#define E2000_DP_CONTROLLER_RESET 0x0850 +#define DP_RESET 0x1 + +/* address transform register */ +#define E2000_DC_ADDRESS_TRANSFORM_SRC_ADDR 0x0 +#define SRC_ADDR_OFFSET 22 +#define SRC_ADDR_MASK 0xffffffffff + +#define E2000_DC_ADDRESS_TRANSFORM_SIZE 0x4 +#define ADDRESS_TRANSFORM_ENABLE (0x1 << 31) +#define SIZE_OFFSET 22 + +#define E2000_DC_ADDRESS_TRANSFORM_DST_ADDR 0x8 +#define DST_ADDR_OFFSET 22 + +#define E2000_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS 0x48 +#define DC_DP_RESET_STATUS(pipe) (1 << pipe) +#define DP_SPREAD_ENABLE(pipe) (0x8 << pipe) + +#define E2000_DC_ADDRESS_TRANSFORM_BACKLIGHT_VALUE 0x4c +#define BACKLIGHT_VALUE_MASK (0x7f) +#define BACKLIGHT_VALUE_SHIFT 16 + +/* phy register start */ +#define E2000_PHY_BASE(pipe) (0x100000*pipe) + +#define E2000_PHY_PIPE_RESET(pipe) (E2000_PHY_BASE(pipe) + 0x40254) +#define RESET 0x0 +#define RESET_DEASSERT 0x1 + +#define E2000_PHY_MODE(pipe) (E2000_PHY_BASE(pipe) + 0x40034) +#define LANE_BIT (0x3) +#define LANE_BIT_SHIFT 0x2 + +#define E2000_PHY_LINK_CFG(pipe) (E2000_PHY_BASE(pipe) + 0x40044) +#define LANE_MASTER 0x1 +#define LANE_MASTER_SHIFT 1 + +#define E2000_PHY_PLL_EN(pipe) (E2000_PHY_BASE(pipe) + 0x40214) +#define PLL_EN 0x1 +#define PLL_EN_SHIFT 1 + +#define E2000_PHY_PMA_WIDTH(pipe) (E2000_PHY_BASE(pipe) + 0x4021c) +#define BIT_20 0x5 +#define BIT_20_SHIFT 4 + +#define E2000_PHY_PLL_SOURCE_SEL(pipe) (E2000_PHY_BASE(pipe) + 0x4004C) + +#define E2000_PHY_PMA0_POWER(pipe) (E2000_PHY_BASE(pipe) + 0x402bc) +#define A0_ACTIVE 0x1 +#define A0_ACTIVE_SHIFT 8 +#define A3_POWERDOWN3 0x8 +#define A3_POWERDOWN3_SHIFT 8 + +#define E2000_PHY_LINK_RESET(pipe) (E2000_PHY_BASE(pipe) + 0x40258) +#define LINK_RESET 0x1 +#define LINK_RESET_MASK 0x1 +#define LINTK_RESET_SHIFT 0x1 + +#define E2000_PHY_SGMII_DPSEL_INIT(pipe) (E2000_PHY_BASE(pipe) + 0x40260) +#define DP_SEL 0x1 + +#define E2000_PHY_APB_RESET(pipe) (E2000_PHY_BASE(pipe) + 0x40250) +#define APB_RESET 0x1 + +/* phy origin register */ +#define E2000_PHY_PLL_CFG(pipe) (E2000_PHY_BASE(pipe) + 0x30038) +#define SINGLE_LINK 0x0 + +#define E2000_PHY_PMA_CONTROL(pipe) (E2000_PHY_BASE(pipe) + 0x3800c) +#define CONTROL_ENABLE 0x1 +#define CONTROL_ENABLE_MASK 0x1 +#define CONTROL_ENABLE_SHIFT 0x1 + +#define E2000_PHY_PMA_CONTROL2(pipe) (E2000_PHY_BASE(pipe) + 0x38004) +#define PLL0_LOCK_DONE (0x1 << 6) + +#define E2000_PHY_PLL0_CLK_SEL(pipe) (E2000_PHY_BASE(pipe) + 0X684) +#define PLL_LINK_RATE_162000 0xf01 +#define PLL_LINK_RATE_270000 0x701 +#define PLL_LINK_RATE_540000 0x301 +#define PLL_LINK_RATE_810000 0x200 + +#define E2000_PHY_HSCLK0_SEL(pipe) (E2000_PHY_BASE(pipe) + 0x18398) +#define HSCLK_LINK_0 0x0 +#define HSCLK_LINK_1 0x1 + +#define E2000_PHY_HSCLK0_DIV(pipe) (E2000_PHY_BASE(pipe) + 0x1839c) +#define HSCLK_LINK_RATE_162000 0x2 +#define HSCLK_LINK_RATE_270000 0x1 +#define HSCLK_LINK_RATE_540000 0x0 +#define HSCLK_LINK_RATE_810000 0x0 + +#define E2000_PHY_PLLDRC0_CTRL(pipe) (E2000_PHY_BASE(pipe) + 0x18394) +#define PLLDRC_LINK0 0x1 +#define PLLDRC_LINK1 0x9 + +#define E2000_PHY_PLL0_DSM_M0(pipe) (E2000_PHY_BASE(pipe) + 0x250) +#define PLL0_DSM_M0 0x4 +#define E2000_PHY_PLL0_VCOCAL_START(pipe) (E2000_PHY_BASE(pipe) + 0x218) +#define PLL0_VCOCAL_START 0xc5e +#define E2000_PHY_PLL0_VCOCAL_CTRL(pipe) (E2000_PHY_BASE(pipe) + 0x208) +#define PLL0_VCOCAL_CTRL 0x3 + +#define E2000_PHY_PLL0_CP_PADJ(pipe) (E2000_PHY_BASE(pipe) + 0x690) +#define E2000_PHY_PLL0_CP_IADJ(pipe) (E2000_PHY_BASE(pipe) + 0x694) +#define E2000_PHY_PLL0_CP_FILT_PADJ(pipe) (E2000_PHY_BASE(pipe) + 0x698) +#define E2000_PHY_PLL0_INTDIV(pipe) (E2000_PHY_BASE(pipe) + 0x240) +#define E2000_PHY_PLL0_FRACDIVL(pipe) (E2000_PHY_BASE(pipe) + 0x244) +#define E2000_PHY_PLL0_FRACDIVH(pipe) (E2000_PHY_BASE(pipe) + 0x248) +#define E2000_PHY_PLL0_HIGH_THR(pipe) (E2000_PHY_BASE(pipe) + 0x24c) +#define E2000_PHY_PLL0_PDIAG_CTRL(pipe) (E2000_PHY_BASE(pipe) + 0x680) +#define E2000_PHY_PLL0_VCOCAL_PLLCNT_START(pipe) (E2000_PHY_BASE(pipe) + 0x220) +#define E2000_PHY_PLL0_LOCK_PEFCNT(pipe) (E2000_PHY_BASE(pipe) + 0x270) +#define E2000_PHY_PLL0_LOCK_PLLCNT_START(pipe) (E2000_PHY_BASE(pipe) + 0x278) +#define E2000_PHY_PLL0_LOCK_PLLCNT_THR(pipe) (E2000_PHY_BASE(pipe) + 0x27c) + +#define E2000_PHY_PLL0_TX_PSC_A0(pipe) (E2000_PHY_BASE(pipe) + 0x18400) +#define PLL0_TX_PSC_A0 0xfb +#define E2000_PHY_PLL0_TX_PSC_A2(pipe) (E2000_PHY_BASE(pipe) + 0x18408) +#define PLL0_TX_PSC_A2 0x4aa +#define E2000_PHY_PLL0_TX_PSC_A3(pipe) (E2000_PHY_BASE(pipe) + 0x1840c) +#define PLL0_TX_PSC_A3 0x4aa +#define E2000_PHY_PLL0_RX_PSC_A0(pipe) (E2000_PHY_BASE(pipe) + 0x28000) +#define PLL0_RX_PSC_A0 0x0 +#define E2000_PHY_PLL0_RX_PSC_A2(pipe) (E2000_PHY_BASE(pipe) + 0x28008) +#define PLL0_RX_PSC_A2 0x0 +#define E2000_PHY_PLL0_RX_PSC_A3(pipe) (E2000_PHY_BASE(pipe) + 0x2800C) +#define PLL0_RX_PSC_A3 0x0 +#define E2000_PHY_PLL0_RX_PSC_CAL(pipe) (E2000_PHY_BASE(pipe) + 0x28018) +#define PLL0_RX_PSC_CAL 0x0 + +#define E2000_PHY_PLL0_XCVR_CTRL(pipe) (E2000_PHY_BASE(pipe) + 0x183a8) +#define PLL0_XCVR_CTRL 0xf + +#define E2000_PHY_PLL0_RX_GCSM1_CTRL(pipe) (E2000_PHY_BASE(pipe) + 0x28420) +#define PLL0_RX_GCSM1_CTRL 0x0 +#define E2000_PHY_PLL0_RX_GCSM2_CTRL(pipe) (E2000_PHY_BASE(pipe) + 0x28440) +#define PLL0_RX_GCSM2_CTRL 0x0 +#define E2000_PHY_PLL0_RX_PERGCSM_CTRL(pipe) (E2000_PHY_BASE(pipe) + 0x28460) +#define PLL0_RX_PERGCSM_CTRL 0x0 + +/* swing and emphasis */ +#define E2000_PHY_PLL0_TX_DIAG_ACYA(pipe) (E2000_PHY_BASE(pipe) + 0x1879c) +#define LOCK 1 +#define UNLOCK 0 + +#define E2000_PHY_PLL0_TX_TXCC_CTRL(pipe) (E2000_PHY_BASE(pipe) + 0x18100) +#define TX_TXCC_CTRL 0x8a4 + +#define E2000_PHY_PLL0_TX_DRV(pipe) (E2000_PHY_BASE(pipe) + 0x18318) +#define TX_DRV 0x3 + +#define E2000_PHY_PLL0_TX_MGNFS(pipe) (E2000_PHY_BASE(pipe) + 0x18140) + +#define E2000_PHY_PLL0_TX_CPOST(pipe) (E2000_PHY_BASE(pipe) + 0x18130) + +#endif /* __E2000_REG_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_crtc.c b/drivers/gpu/drm/phytium/phytium_crtc.c new file mode 100644 index 0000000000000000000000000000000000000000..a967b1aa3f9cd8f12e9678b3370ae3c1e80a6971 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_crtc.c @@ -0,0 +1,458 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_crtc.h" +#include "phytium_plane.h" +#include "phytium_dp.h" +#include "x100_dc.h" +#include "e2000_dc.h" +#include "phytium_reg.h" + +#define MAXKERNELSIZE 9 +#define SUBPIXELINDEXBITS 5 +#define SUBPIXELCOUNT (1 << SUBPIXELINDEXBITS) +#define SUBPIXELLOADCOUNT (SUBPIXELCOUNT / 2 + 1) +#define WEIGHTSTATECOUNT (((SUBPIXELLOADCOUNT * MAXKERNELSIZE + 1) & ~1) / 2) +#define KERNELTABLESIZE (SUBPIXELLOADCOUNT * MAXKERNELSIZE * sizeof(uint16_t)) +#define PHYALIGN(n, align) (((n) + ((align) - 1)) & ~((align) - 1)) +#define KERNELSTATES (PHYALIGN(KERNELTABLESIZE + 4, 8)) +#define PHYPI 3.14159265358979323846f + +#define MATH_Add(X, Y) (float)((X) + (Y)) +#define MATH_Multiply(X, Y) (float)((X) * (Y)) +#define MATH_Divide(X, Y) (float)((X) / (Y)) +#define MATH_DivideFromUInteger(X, Y) ((float)(X) / (float)(Y)) +#define MATH_I2Float(X) (float)(X) + +struct filter_blit_array { + uint8_t kernelSize; + uint32_t scaleFactor; + uint32_t *kernelStates; +}; + +static void phytium_crtc_gamma_set(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + uint32_t config = 0; + struct drm_crtc_state *state = crtc->state; + struct drm_color_lut *lut; + int i; + + if (state->gamma_lut) { + if (WARN((state->gamma_lut->length/sizeof(struct drm_color_lut) != GAMMA_INDEX_MAX), + "gamma size is not match\n")) + return; + lut = (struct drm_color_lut *)state->gamma_lut->data; + for (i = 0; i < GAMMA_INDEX_MAX; i++) { + phytium_writel_reg(priv, i, group_offset, PHYTIUM_DC_GAMMA_INDEX); + config = ((lut[i].red >> 6) & GAMMA_RED_MASK) << GAMMA_RED_SHIFT; + config |= (((lut[i].green >> 6) & GAMMA_GREEN_MASK) << GAMMA_GREEN_SHIFT); + config |= (((lut[i].blue >> 6) & GAMMA_BLUE_MASK) << GAMMA_BLUE_SHIFT); + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_GAMMA_DATA); + } + } +} + +static void phytium_crtc_gamma_init(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + uint32_t config = 0; + uint16_t *red, *green, *blue; + int i; + + if (WARN((crtc->gamma_size != GAMMA_INDEX_MAX), "gamma size is not match\n")) + return; + + red = crtc->gamma_store; + green = red + crtc->gamma_size; + blue = green + crtc->gamma_size; + + for (i = 0; i < GAMMA_INDEX_MAX; i++) { + phytium_writel_reg(priv, i, group_offset, PHYTIUM_DC_GAMMA_INDEX); + config = ((*red++ >> 6) & GAMMA_RED_MASK) << GAMMA_RED_SHIFT; + config |= (((*green++ >> 6) & GAMMA_GREEN_MASK) << GAMMA_GREEN_SHIFT); + config |= (((*blue++ >> 6) & GAMMA_BLUE_MASK) << GAMMA_BLUE_SHIFT); + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_GAMMA_DATA); + } +} + +static void phytium_crtc_destroy(struct drm_crtc *crtc) +{ + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + + drm_crtc_cleanup(crtc); + kfree(phytium_crtc); +} + +struct drm_crtc_state * +phytium_crtc_atomic_duplicate_state(struct drm_crtc *crtc) +{ + struct phytium_crtc_state *phytium_crtc_state = NULL; + + phytium_crtc_state = kmemdup(crtc->state, sizeof(*phytium_crtc_state), + GFP_KERNEL); + if (!phytium_crtc_state) + return NULL; + __drm_atomic_helper_crtc_duplicate_state(crtc, + &phytium_crtc_state->base); + + return &phytium_crtc_state->base; +} + +void +phytium_crtc_atomic_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct phytium_crtc_state *phytium_crtc_state = + to_phytium_crtc_state(state); + + phytium_crtc_state = to_phytium_crtc_state(state); + __drm_atomic_helper_crtc_destroy_state(state); + kfree(phytium_crtc_state); +} + +static const struct drm_crtc_funcs phytium_crtc_funcs = { + .gamma_set = drm_atomic_helper_legacy_gamma_set, + .set_config = drm_atomic_helper_set_config, + .destroy = phytium_crtc_destroy, + .page_flip = drm_atomic_helper_page_flip, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = phytium_crtc_atomic_duplicate_state, + .atomic_destroy_state = phytium_crtc_atomic_destroy_state, +}; + +static void +phytium_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct drm_display_mode *mode = &crtc->state->adjusted_mode; + struct drm_atomic_state *state = old_state->state; + struct drm_connector_state *new_conn_state; + struct drm_connector *conn; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + int config = 0, i = 0; + + for_each_new_connector_in_state(state, conn, new_conn_state, i) { + if (new_conn_state->crtc != crtc) + continue; + + switch (conn->display_info.bpc) { + case 10: + phytium_crtc->bpc = DP_RGB101010; + break; + case 6: + phytium_crtc->bpc = DP_RGB666; + break; + default: + phytium_crtc->bpc = DP_RGB888; + break; + } + } + + /* config pix clock */ + phytium_crtc->dc_hw_config_pix_clock(crtc, mode->clock); + + config = ((mode->crtc_hdisplay & HDISPLAY_END_MASK) << HDISPLAY_END_SHIFT) + | ((mode->crtc_htotal&HDISPLAY_TOTAL_MASK) << HDISPLAY_TOTAL_SHIFT); + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_HDISPLAY); + config = ((mode->crtc_hsync_start & HSYNC_START_MASK) << HSYNC_START_SHIFT) + | ((mode->crtc_hsync_end & HSYNC_END_MASK) << HSYNC_END_SHIFT) + | HSYNC_PULSE_ENABLED; + config |= (mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : HSYNC_NEGATIVE; + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_HSYNC); + config = ((mode->crtc_vdisplay & VDISPLAY_END_MASK) << VDISPLAY_END_SHIFT) + | ((mode->crtc_vtotal & VDISPLAY_TOTAL_MASK) << VDISPLAY_TOTAL_SHIFT); + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_VDISPLAY); + config = ((mode->crtc_vsync_start & VSYNC_START_MASK) << VSYNC_START_SHIFT) + | ((mode->crtc_vsync_end & VSYNC_END_MASK) << VSYNC_END_SHIFT) + | VSYNC_PULSE_ENABLED; + config |= (mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : VSYNC_NEGATIVE; + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_VSYNC); + config = PANEL_DATAENABLE_ENABLE | PANEL_DATA_ENABLE | PANEL_CLOCK_ENABLE; + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_PANEL_CONFIG); + config = phytium_crtc->bpc | OUTPUT_DP; + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_DP_CONFIG); + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + + if (crtc->state->active) + config |= FRAMEBUFFER_OUTPUT | FRAMEBUFFER_RESET; + else + config &= (~(FRAMEBUFFER_OUTPUT | FRAMEBUFFER_RESET)); + + if (phytium_crtc->scale_enable) + config |= FRAMEBUFFER_SCALE_ENABLE; + else + config &= (~FRAMEBUFFER_SCALE_ENABLE); + + config |= FRAMEBUFFER_GAMMA_ENABLE; + + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + drm_crtc_vblank_on(crtc); +} + +static void +phytium_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) +{ + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + + drm_crtc_vblank_off(crtc); + phytium_crtc->dc_hw_disable(crtc); +} + +static void phytium_crtc_update_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, + const struct drm_display_mode *native_mode) +{ + if (native_mode->clock == drm_mode->clock && + native_mode->htotal == drm_mode->htotal && + native_mode->vtotal == drm_mode->vtotal) { + drm_mode->crtc_hdisplay = native_mode->crtc_hdisplay; + drm_mode->crtc_vdisplay = native_mode->crtc_vdisplay; + drm_mode->crtc_clock = native_mode->crtc_clock; + drm_mode->crtc_hblank_start = native_mode->crtc_hblank_start; + drm_mode->crtc_hblank_end = native_mode->crtc_hblank_end; + drm_mode->crtc_hsync_start = native_mode->crtc_hsync_start; + drm_mode->crtc_hsync_end = native_mode->crtc_hsync_end; + drm_mode->crtc_htotal = native_mode->crtc_htotal; + drm_mode->crtc_hskew = native_mode->crtc_hskew; + drm_mode->crtc_vblank_start = native_mode->crtc_vblank_start; + drm_mode->crtc_vblank_end = native_mode->crtc_vblank_end; + drm_mode->crtc_vsync_start = native_mode->crtc_vsync_start; + drm_mode->crtc_vsync_end = native_mode->crtc_vsync_end; + drm_mode->crtc_vtotal = native_mode->crtc_vtotal; + } +} + +static int +phytium_crtc_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state) +{ + struct drm_plane_state *new_plane_state = NULL; + int ret = 0; + struct drm_atomic_state *state = crtc_state->state; + struct drm_connector *connector; + struct drm_connector_state *new_con_state; + uint32_t i; + struct phytium_dp_device *phytium_dp = NULL; + + for_each_new_connector_in_state(state, connector, new_con_state, i) { + if (new_con_state->crtc == crtc) { + phytium_dp = connector_to_dp_device(connector); + break; + } + } + if (phytium_dp) + phytium_crtc_update_timing_for_drm_display_mode(&crtc_state->adjusted_mode, + &phytium_dp->native_mode); + + new_plane_state = drm_atomic_get_new_plane_state(crtc_state->state, + crtc->primary); + if (crtc_state->enable && new_plane_state && !new_plane_state->crtc) { + ret = -EINVAL; + goto fail; + } + + return 0; +fail: + return ret; +} + +static void +phytium_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe, config; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + if (config & FRAMEBUFFER_RESET) { + phytium_writel_reg(priv, config | FRAMEBUFFER_VALID_PENDING, + group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + } +} + +static void phytium_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + struct phytium_crtc_state *phytium_crtc_state = NULL; + int phys_pipe = phytium_crtc->phys_pipe, config; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + + DRM_DEBUG_KMS("crtc->state active:%d enable:%d\n", + crtc->state->active, crtc->state->enable); + phytium_crtc_state = to_phytium_crtc_state(crtc->state); + + if (crtc->state->color_mgmt_changed) + phytium_crtc_gamma_set(crtc); + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + phytium_writel_reg(priv, config&(~FRAMEBUFFER_VALID_PENDING), + group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + + if (crtc->state->event) { + DRM_DEBUG_KMS("vblank->refcount:%d\n", + atomic_read(&dev->vblank[0].refcount)); + spin_lock_irq(&dev->event_lock); + if (drm_crtc_vblank_get(crtc) == 0) + drm_crtc_arm_vblank_event(crtc, crtc->state->event); + else + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + spin_unlock_irq(&dev->event_lock); + } +} + +static enum drm_mode_status +phytium_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + + if (mode->crtc_clock > priv->info.crtc_clock_max) + return MODE_CLOCK_HIGH; + + if (mode->hdisplay > priv->info.hdisplay_max) + return MODE_BAD_HVALUE; + + if (mode->vdisplay > priv->info.vdisplay_max) + return MODE_BAD_VVALUE; + + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + return MODE_NO_INTERLACE; + + return MODE_OK; +} + +static const struct drm_crtc_helper_funcs phytium_crtc_helper_funcs = { + .mode_valid = phytium_crtc_mode_valid, + .atomic_check = phytium_crtc_atomic_check, + .atomic_begin = phytium_crtc_atomic_begin, + .atomic_flush = phytium_crtc_atomic_flush, + .atomic_enable = phytium_crtc_atomic_enable, + .atomic_disable = phytium_crtc_atomic_disable, +}; + +void phytium_crtc_resume(struct drm_device *drm_dev) +{ + struct drm_crtc *crtc; + struct phytium_crtc *phytium_crtc = NULL; + + drm_for_each_crtc(crtc, drm_dev) { + phytium_crtc = to_phytium_crtc(crtc); + if (phytium_crtc->dc_hw_reset) + phytium_crtc->dc_hw_reset(crtc); + phytium_crtc_gamma_init(crtc); + } +} + +int phytium_crtc_init(struct drm_device *dev, int phys_pipe) +{ + struct phytium_crtc *phytium_crtc; + struct phytium_crtc_state *phytium_crtc_state; + struct phytium_plane *phytium_primary_plane = NULL; + struct phytium_plane *phytium_cursor_plane = NULL; + struct phytium_display_private *priv = dev->dev_private; + int ret; + + phytium_crtc = kzalloc(sizeof(*phytium_crtc), GFP_KERNEL); + if (!phytium_crtc) { + ret = -ENOMEM; + goto failed_malloc_crtc; + } + + phytium_crtc_state = kzalloc(sizeof(*phytium_crtc_state), GFP_KERNEL); + if (!phytium_crtc_state) { + ret = -ENOMEM; + goto failed_malloc_crtc_state; + } + + phytium_crtc_state->base.crtc = &phytium_crtc->base; + phytium_crtc->base.state = &phytium_crtc_state->base; + phytium_crtc->phys_pipe = phys_pipe; + + if (IS_X100(priv)) { + phytium_crtc->dc_hw_config_pix_clock = x100_dc_hw_config_pix_clock; + phytium_crtc->dc_hw_disable = x100_dc_hw_disable; + phytium_crtc->dc_hw_reset = NULL; + priv->dc_reg_base[phys_pipe] = X100_DC_BASE(phys_pipe); + priv->dcreq_reg_base[phys_pipe] = X100_DCREQ_BASE(phys_pipe); + priv->address_transform_base = X100_ADDRESS_TRANSFORM_BASE; + } else if (IS_E2000(priv)) { + phytium_crtc->dc_hw_config_pix_clock = e2000_dc_hw_config_pix_clock; + phytium_crtc->dc_hw_disable = e2000_dc_hw_disable; + phytium_crtc->dc_hw_reset = e2000_dc_hw_reset; + priv->dc_reg_base[phys_pipe] = E2000_DC_BASE(phys_pipe); + priv->dcreq_reg_base[phys_pipe] = 0x0; + priv->address_transform_base = E2000_ADDRESS_TRANSFORM_BASE; + } + + phytium_primary_plane = phytium_primary_plane_create(dev, phys_pipe); + if (IS_ERR(phytium_primary_plane)) { + ret = PTR_ERR(phytium_primary_plane); + DRM_ERROR("create primary plane failed, phys_pipe(%d)\n", phys_pipe); + goto failed_create_primary; + } + + phytium_cursor_plane = phytium_cursor_plane_create(dev, phys_pipe); + if (IS_ERR(phytium_cursor_plane)) { + ret = PTR_ERR(phytium_cursor_plane); + DRM_ERROR("create cursor plane failed, phys_pipe(%d)\n", phys_pipe); + goto failed_create_cursor; + } + + ret = drm_crtc_init_with_planes(dev, &phytium_crtc->base, + &phytium_primary_plane->base, + &phytium_cursor_plane->base, + &phytium_crtc_funcs, + "phys_pipe %d", phys_pipe); + + if (ret) { + DRM_ERROR("init crtc with plane failed, phys_pipe(%d)\n", phys_pipe); + goto failed_crtc_init; + } + drm_crtc_helper_add(&phytium_crtc->base, &phytium_crtc_helper_funcs); + drm_crtc_vblank_reset(&phytium_crtc->base); + drm_mode_crtc_set_gamma_size(&phytium_crtc->base, GAMMA_INDEX_MAX); + drm_crtc_enable_color_mgmt(&phytium_crtc->base, 0, false, GAMMA_INDEX_MAX); + if (phytium_crtc->dc_hw_reset) + phytium_crtc->dc_hw_reset(&phytium_crtc->base); + phytium_crtc_gamma_init(&phytium_crtc->base); + + return 0; + +failed_crtc_init: +failed_create_cursor: + /* drm_mode_config_cleanup() will free any crtcs/planes already initialized */ +failed_create_primary: + kfree(phytium_crtc_state); +failed_malloc_crtc_state: + kfree(phytium_crtc); +failed_malloc_crtc: + return ret; +} diff --git a/drivers/gpu/drm/phytium/phytium_crtc.h b/drivers/gpu/drm/phytium/phytium_crtc.h new file mode 100644 index 0000000000000000000000000000000000000000..a1ae9a9736e996a0c14e083f8aa51e78fcce7a42 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_crtc.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_CRTC_H__ +#define __PHYTIUM_CRTC_H__ + +struct phytium_crtc { + struct drm_crtc base; + int phys_pipe; + unsigned int bpc; + + /* scale */ + uint32_t src_width; + uint32_t src_height; + uint32_t dst_width; + uint32_t dst_height; + uint32_t dst_x; + uint32_t dst_y; + bool scale_enable; + bool reserve[3]; + + void (*dc_hw_config_pix_clock)(struct drm_crtc *crtc, int clock); + void (*dc_hw_disable)(struct drm_crtc *crtc); + void (*dc_hw_reset)(struct drm_crtc *crtc); +}; + +struct phytium_crtc_state { + struct drm_crtc_state base; +}; + +#define to_phytium_crtc(x) container_of(x, struct phytium_crtc, base) +#define to_phytium_crtc_state(x) container_of(x, struct phytium_crtc_state, base) + +void phytium_crtc_resume(struct drm_device *drm_dev); +int phytium_crtc_init(struct drm_device *dev, int pipe); +#endif /* __PHYTIUM_CRTC_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_debugfs.c b/drivers/gpu/drm/phytium/phytium_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..d43fd120be59773aba5528c93d8cf5025436f699 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_debugfs.c @@ -0,0 +1,455 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#include +#include +#include "phytium_display_drv.h" +#include "phytium_dp.h" +#include "phytium_reg.h" + +const char *const mem_state[PHYTIUM_MEM_STATE_TYPE_COUNT] = { + "Memory_Vram_Total", + "Memory_Vram_Alloc", + "Memory_System_Carveout_Total", + "Memory_System_Carveout_Alloc", + "Memory_System_Alloc", +}; + +static ssize_t +phytium_dp_register_write(struct file *filp, + const char __user *ubuf, + size_t len, + loff_t *ppos) +{ + char tmp[16]; + + if (len >= sizeof(tmp)) + return -EINVAL; + + memset(tmp, 0, sizeof(tmp)); + if (copy_from_user(tmp, ubuf, len)) + return -EFAULT; + tmp[len] = '\0'; + + return len; +} + +static int phytium_dp_register_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_M_VID, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_M_VID)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_N_VID, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_N_VID)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_TRANSFER_UNIT_SIZE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_TRANSFER_UNIT_SIZE)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_DATA_COUNT, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_DATA_COUNT)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HTOTAL, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HTOTAL)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HRES, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HRES)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HSWIDTH, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HSWIDTH)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HSTART, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HSTART)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VTOTAL, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VTOTAL)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VRES, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VRES)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VSWIDTH, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VSWIDTH)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VSTART, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VSTART)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_POLARITY, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_POLARITY)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_MISC0, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_MISC0)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_MISC1, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_MISC1)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_USER_SYNC_POLARITY, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_USER_SYNC_POLARITY)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_VIDEO_STREAM_ENABLE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SECONDARY_STREAM_ENABLE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE)); + seq_puts(m, "audio:\n"); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_INPUT_SELECT, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_INPUT_SELECT)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_DIRECT_CLKDIV, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_DIRECT_CLKDIV)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CHANNEL_COUNT, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CHANNEL_COUNT)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CHANNEL_MAP, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CHANNEL_MAP)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_DATA_WINDOW, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_DATA_WINDOW)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_CATEGORY_CODE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_CATEGORY_CODE)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_MAUD, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_MAUD)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_NAUD, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_NAUD)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CLOCK_MODE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CLOCK_MODE)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_SOURCE_FORMAT, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_SOURCE_FORMAT)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_LENGTH_ORIG_FREQ, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_LENGTH_ORIG_FREQ)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_FREQ_CLOCK_ACCURACY, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_FREQ_CLOCK_ACCURACY)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_AUDIO_ENABLE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE)); + + return 0; +} + +static int phytium_dp_register_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_dp_register_show, inode->i_private); +} + +static const struct file_operations phytium_dp_register_fops = { + .owner = THIS_MODULE, + .open = phytium_dp_register_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = phytium_dp_register_write, +}; + +static ssize_t +phytium_dp_trigger_train_fail_write(struct file *filp, + const char __user *ubuf, + size_t len, + loff_t *ppos) +{ + struct seq_file *m = filp->private_data; + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + char tmp[16]; + + if (len >= sizeof(tmp)) + return -EINVAL; + + memset(tmp, 0, sizeof(tmp)); + if (copy_from_user(tmp, ubuf, len)) + return -EFAULT; + tmp[len] = '\0'; + + if (kstrtouint(tmp, 10, &phytium_dp->trigger_train_fail) != 0) + return -EINVAL; + + return len; +} + +static int phytium_dp_trigger_train_fail_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + seq_printf(m, "trigger_train_fail: %d\n", phytium_dp->trigger_train_fail); + seq_printf(m, "train_retry_count: %d\n", phytium_dp->train_retry_count); + + return 0; +} + +static int phytium_dp_trigger_train_fail_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_dp_trigger_train_fail_show, inode->i_private); +} + +static const struct file_operations phytium_dp_trigger_train_fail_fops = { + .owner = THIS_MODULE, + .open = phytium_dp_trigger_train_fail_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = phytium_dp_trigger_train_fail_write, +}; + +static int phytium_edp_backlight_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + if (!phytium_dp->is_edp) + return -ENODEV; + + mutex_lock(&phytium_dp->panel.panel_lock); + seq_printf(m, "backlight: %s\n", phytium_dp->panel.backlight_enabled?"enabled":"disabled"); + mutex_unlock(&phytium_dp->panel.panel_lock); + + return 0; +} + +static int phytium_edp_backlight_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_edp_backlight_show, inode->i_private); +} + +static const struct file_operations phytium_edp_backlight_fops = { + .owner = THIS_MODULE, + .open = phytium_edp_backlight_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int phytium_edp_power_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + if (!phytium_dp->is_edp) + return -ENODEV; + + mutex_lock(&phytium_dp->panel.panel_lock); + seq_printf(m, "power: %s\n", phytium_dp->panel.power_enabled?"enabled":"disabled"); + mutex_unlock(&phytium_dp->panel.panel_lock); + + return 0; +} + +static int phytium_edp_power_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_edp_power_show, inode->i_private); +} + +static const struct file_operations phytium_edp_power_fops = { + .owner = THIS_MODULE, + .open = phytium_edp_power_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +struct dpcd_block { + /* DPCD dump start address. */ + unsigned int offset; + /* DPCD dump end address, inclusive. If unset, .size will be used. */ + unsigned int end; + /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */ + size_t size; + /* Only valid for eDP. */ + bool edp; +}; + +static const struct dpcd_block phytium_dpcd_debug[] = { + { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE }, + { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS }, + { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 }, + { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET }, + { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 }, + { .offset = DP_SET_POWER }, + { .offset = DP_EDP_DPCD_REV }, + { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 }, + { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB }, + { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET }, + { .offset = DP_DEVICE_SERVICE_IRQ_VECTOR, .size = 1 }, + { .offset = DP_TEST_REQUEST, .end = DP_TEST_PATTERN }, +}; + +static int phytium_dpcd_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + uint8_t buf[16], i; + ssize_t err; + + if (connector->status != connector_status_connected) + return -ENODEV; + + for (i = 0; i < ARRAY_SIZE(phytium_dpcd_debug); i++) { + const struct dpcd_block *b = &phytium_dpcd_debug[i]; + size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1); + + if (WARN_ON(size > sizeof(buf))) + continue; + + err = drm_dp_dpcd_read(&phytium_dp->aux, b->offset, buf, size); + if (err <= 0) { + DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n", + size, b->offset, err); + continue; + } + + seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf); + } + + return 0; +} + +static int phytium_dpcd_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_dpcd_show, inode->i_private); +} + +static const struct file_operations phytium_dpcd_fops = { + .owner = THIS_MODULE, + .open = phytium_dpcd_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static ssize_t +phytium_dp_state_write(struct file *filp, + const char __user *ubuf, + size_t len, + loff_t *ppos) +{ + char tmp[16]; + + if (len >= sizeof(tmp)) + return -EINVAL; + + memset(tmp, 0, sizeof(tmp)); + if (copy_from_user(tmp, ubuf, len)) + return -EFAULT; + tmp[len] = '\0'; + + return len; +} + +static int phytium_dp_state_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + seq_printf(m, "port number: %d\n", phytium_dp->port); + seq_printf(m, "source_max_lane_count: %d\n", phytium_dp->source_max_lane_count); + seq_printf(m, "max_source_rates: %d\n", + phytium_dp->source_rates[phytium_dp->num_source_rates-1]); + if (connector->status == connector_status_connected) { + seq_printf(m, "sink_max_lane_count: %d\n", phytium_dp->sink_max_lane_count); + seq_printf(m, "max_sink_rates: %d\n", + phytium_dp->sink_rates[phytium_dp->num_sink_rates-1]); + seq_printf(m, "link_rate: %d\n", phytium_dp->link_rate); + seq_printf(m, "link_lane_count: %d\n", phytium_dp->link_lane_count); + seq_printf(m, "train_set[0]: %d\n", phytium_dp->train_set[0]); + seq_printf(m, "has_audio: %s\n", phytium_dp->has_audio?"yes":"no"); + } + + return 0; +} + +static int phytium_dp_state_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_dp_state_show, inode->i_private); +} + +static const struct file_operations phytium_dp_state_fops = { + .owner = THIS_MODULE, + .open = phytium_dp_state_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = phytium_dp_state_write, +}; + +static const struct phytium_debugfs_files { + const char *name; + const struct file_operations *fops; +} phytium_debugfs_connector_files[] = { + {"dp_state", &phytium_dp_state_fops}, + {"dpcd", &phytium_dpcd_fops}, + {"dp_register", &phytium_dp_register_fops}, + {"dp_trigger_train_fail", &phytium_dp_trigger_train_fail_fops}, +}; + +static const struct phytium_debugfs_files phytium_edp_debugfs_connector_files[] = { + {"edp_power", &phytium_edp_power_fops}, + {"edp_backlight", &phytium_edp_backlight_fops}, +}; + +int phytium_debugfs_connector_add(struct drm_connector *connector) +{ + struct dentry *root = connector->debugfs_entry; + struct dentry *ent; + int i; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + if (!root) + return -ENODEV; + + for (i = 0; i < ARRAY_SIZE(phytium_debugfs_connector_files); i++) { + ent = debugfs_create_file(phytium_debugfs_connector_files[i].name, + 0644, + root, + connector, + phytium_debugfs_connector_files[i].fops); + if (!ent) + return -ENOMEM; + } + + if (phytium_dp->is_edp) + for (i = 0; i < ARRAY_SIZE(phytium_edp_debugfs_connector_files); i++) { + ent = debugfs_create_file(phytium_edp_debugfs_connector_files[i].name, + 0644, + root, + connector, + phytium_edp_debugfs_connector_files[i].fops); + if (!ent) + return -ENOMEM; + } + + return 0; +} + +static int phytium_mem_state_show(struct seq_file *m, void *data) +{ + struct phytium_display_private *priv = m->private; + uint8_t i; + + for (i = 0; i < ARRAY_SIZE(mem_state); i++) + seq_printf(m, "%-34s %10lld\n", mem_state[i], priv->mem_state[i]); + + return 0; +} + +static int phytium_mem_state_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_mem_state_show, inode->i_private); +} + +static const struct file_operations phytium_mem_state_fops = { + .owner = THIS_MODULE, + .open = phytium_mem_state_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct phytium_debugfs_files phytium_debugfs_display_files[] = { + {"mem_state", &phytium_mem_state_fops}, +}; + +int phytium_debugfs_display_register(struct phytium_display_private *priv) +{ + struct drm_minor *minor = priv->dev->primary; + struct dentry *root = minor->debugfs_root; + struct dentry *ent; + + if (!root) + return -ENODEV; + + ent = debugfs_create_file(phytium_debugfs_display_files[0].name, + 0644, + root, + priv, + phytium_debugfs_display_files[0].fops); + if (!ent) + return -ENOMEM; + + return 0; +} diff --git a/drivers/gpu/drm/phytium/phytium_debugfs.h b/drivers/gpu/drm/phytium/phytium_debugfs.h new file mode 100644 index 0000000000000000000000000000000000000000..303068dca04940e55e6db4806b13b8f4a6730b65 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_debugfs.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_DEBUGFS_H__ +#define __PHYTIUM_DEBUGFS_H__ + +int phytium_debugfs_connector_add(struct drm_connector *connector); +int phytium_debugfs_display_register(struct phytium_display_private *priv); + +#endif /* __PHYTIUM_DEBUGFS_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_display_drv.c b/drivers/gpu/drm/phytium/phytium_display_drv.c new file mode 100644 index 0000000000000000000000000000000000000000..a4beb7110b96a410aa93ea35d5372d99ecba9027 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_display_drv.c @@ -0,0 +1,477 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_plane.h" +#include "phytium_crtc.h" +#include "phytium_dp.h" +#include "phytium_gem.h" +#include "phytium_fb.h" +#include "phytium_fbdev.h" +#include "phytium_reg.h" +#include "phytium_pci.h" +#include "phytium_platform.h" +#include "phytium_debugfs.h" + +int dc_fake_mode_enable; +module_param(dc_fake_mode_enable, int, 0644); +MODULE_PARM_DESC(dc_fake_mode_enable, "Enable DC fake mode (0-disabled; 1-enabled; default-0)"); + +int dc_fast_training_check = 1; +module_param(dc_fast_training_check, int, 0644); +MODULE_PARM_DESC(dc_fast_training_check, "Check dp fast training (0-disabled; 1-enabled; default-1)"); + +int num_source_rates = 4; +module_param(num_source_rates, int, 0644); +MODULE_PARM_DESC(num_source_rates, "set the source max rates (1-1.62Gbps; 2-2.7Gbps; 3-5.4Gbps; 4-8.1Gbps; default-4)"); + +int source_max_lane_count = 4; +module_param(source_max_lane_count, int, 0644); +MODULE_PARM_DESC(source_max_lane_count, "set the source lane count (1-1lane; 2-2lane; 4-4lane; default-4)"); + +int link_dynamic_adjust; +module_param(link_dynamic_adjust, int, 0644); +MODULE_PARM_DESC(link_dynamic_adjust, "dynamic select the train pamameter according to the display mode (0-disabled; 1-enabled; default-1)"); + +int phytium_wait_cmd_done(struct phytium_display_private *priv, + uint32_t register_offset, + uint32_t request_bit, + uint32_t reply_bit) +{ + int timeout = 500, config = 0, ret = 0; + + do { + mdelay(1); + timeout--; + config = phytium_readl_reg(priv, 0, register_offset); + } while ((!(config & reply_bit)) && timeout); + + phytium_writel_reg(priv, config & (~request_bit), 0, register_offset); + + if (timeout == 0) { + DRM_ERROR("wait cmd reply timeout\n"); + ret = -EBUSY; + } else { + timeout = 500; + do { + mdelay(1); + timeout--; + config = phytium_readl_reg(priv, 0, register_offset); + } while ((config & reply_bit) && timeout); + if (timeout == 0) { + DRM_ERROR("clear cmd timeout\n"); + ret = -EBUSY; + } + } + mdelay(5); + + return ret; +} + +static void phytium_irq_preinstall(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + int i, status; + + for_each_pipe_masked(priv, i) { + status = phytium_readl_reg(priv, priv->dc_reg_base[i], PHYTIUM_DC_INT_STATUS); + phytium_writel_reg(priv, INT_DISABLE, priv->dc_reg_base[i], PHYTIUM_DC_INT_ENABLE); + } +} + +static void phytium_irq_uninstall(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + int i, status; + + for_each_pipe_masked(priv, i) { + status = phytium_readl_reg(priv, priv->dc_reg_base[i], PHYTIUM_DC_INT_STATUS); + phytium_writel_reg(priv, INT_DISABLE, priv->dc_reg_base[i], PHYTIUM_DC_INT_ENABLE); + } +} + +static irqreturn_t phytium_display_irq_handler(int irq, void *data) +{ + struct drm_device *dev = data; + struct phytium_display_private *priv = dev->dev_private; + bool enabled = 0; + int i = 0, virt_pipe = 0; + irqreturn_t ret = IRQ_NONE, ret1 = IRQ_NONE; + + for_each_pipe_masked(priv, i) { + enabled = phytium_readl_reg(priv, priv->dc_reg_base[i], PHYTIUM_DC_INT_STATUS); + if (enabled & INT_STATUS) { + virt_pipe = phytium_get_virt_pipe(priv, i); + if (virt_pipe < 0) + return IRQ_NONE; + drm_handle_vblank(dev, virt_pipe); + ret = IRQ_HANDLED; + if (priv->dc_hw_clear_msi_irq) + priv->dc_hw_clear_msi_irq(priv, i); + } + } + + ret1 = phytium_dp_hpd_irq_handler(priv); + if (ret == IRQ_HANDLED || ret1 == IRQ_HANDLED) + return IRQ_HANDLED; + + return IRQ_NONE; +} + +static int phytium_enable_vblank(struct drm_device *dev, unsigned int virt_pipe) +{ + struct phytium_display_private *priv = dev->dev_private; + int phys_pipe; + + phys_pipe = phytium_get_phys_pipe(priv, virt_pipe); + if (phys_pipe < 0) + return phys_pipe; + + phytium_writel_reg(priv, INT_ENABLE, priv->dc_reg_base[phys_pipe], PHYTIUM_DC_INT_ENABLE); + + return 0; +} + +static void phytium_disable_vblank(struct drm_device *dev, unsigned int virt_pipe) +{ + struct phytium_display_private *priv = dev->dev_private; + int phys_pipe; + + phys_pipe = phytium_get_phys_pipe(priv, virt_pipe); + if (phys_pipe >= 0) + phytium_writel_reg(priv, INT_DISABLE, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_INT_ENABLE); +} + +static const struct drm_mode_config_funcs phytium_mode_funcs = { + .fb_create = phytium_fb_create, + .output_poll_changed = drm_fb_helper_output_poll_changed, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static void phytium_atomic_commit_tail(struct drm_atomic_state *state) +{ + struct drm_device *dev = state->dev; + + drm_atomic_helper_commit_modeset_disables(dev, state); + drm_atomic_helper_commit_planes(dev, state, false); + drm_atomic_helper_commit_modeset_enables(dev, state); + drm_atomic_helper_commit_hw_done(state); + drm_atomic_helper_wait_for_flip_done(dev, state); + drm_atomic_helper_cleanup_planes(dev, state); +} + +static struct drm_mode_config_helper_funcs phytium_mode_config_helpers = { + .atomic_commit_tail = phytium_atomic_commit_tail, +}; + +static int phytium_modeset_init(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + int i = 0, ret; + + drm_mode_config_init(dev); + dev->mode_config.min_width = 0; + dev->mode_config.min_height = 0; + dev->mode_config.max_width = 16384; + dev->mode_config.max_height = 16384; + dev->mode_config.cursor_width = 32; + dev->mode_config.cursor_height = 32; + + dev->mode_config.preferred_depth = 24; + dev->mode_config.prefer_shadow = 1; + dev->mode_config.allow_fb_modifiers = true; + + dev->mode_config.funcs = &phytium_mode_funcs; + dev->mode_config.helper_private = &phytium_mode_config_helpers; + + for_each_pipe_masked(priv, i) { + ret = phytium_crtc_init(dev, i); + if (ret) { + DRM_ERROR("phytium_crtc_init(pipe %d) return failed\n", i); + goto failed_crtc_init; + } + } + + for_each_pipe_masked(priv, i) { + ret = phytium_dp_init(dev, i); + if (ret) { + DRM_ERROR("phytium_dp_init(pipe %d) return failed\n", i); + goto failed_dp_init; + } + } + + drm_mode_config_reset(dev); + + return 0; +failed_dp_init: +failed_crtc_init: + drm_mode_config_cleanup(dev); + return ret; +} + +int phytium_get_virt_pipe(struct phytium_display_private *priv, int phys_pipe) +{ + int i = 0; + int virt_pipe = 0; + + for_each_pipe_masked(priv, i) { + if (i != phys_pipe) + virt_pipe++; + else + return virt_pipe; + } + + DRM_ERROR("%s %d failed\n", __func__, phys_pipe); + return -EINVAL; +} + +int phytium_get_phys_pipe(struct phytium_display_private *priv, int virt_pipe) +{ + int i = 0; + int tmp = 0; + + for_each_pipe_masked(priv, i) { + if (tmp != virt_pipe) + tmp++; + else + return i; + } + + DRM_ERROR("%s %d failed\n", __func__, virt_pipe); + return -EINVAL; +} + +static int phytium_display_load(struct drm_device *dev, unsigned long flags) +{ + struct phytium_display_private *priv = dev->dev_private; + int ret = 0; + + ret = drm_vblank_init(dev, priv->info.num_pipes); + if (ret) { + DRM_ERROR("vblank init failed\n"); + goto failed_vblank_init; + } + + ret = phytium_modeset_init(dev); + if (ret) { + DRM_ERROR("phytium_modeset_init failed\n"); + goto failed_modeset_init; + } + + if (priv->support_memory_type & MEMORY_TYPE_VRAM) + priv->vram_hw_init(priv); + + ret = drm_irq_install(dev, priv->irq); + if (ret) { + DRM_ERROR("install irq failed\n"); + goto failed_irq_install; + } + + ret = phytium_drm_fbdev_init(dev); + if (ret) + DRM_ERROR("failed to init dev\n"); + + phytium_debugfs_display_register(priv); + + return ret; + +failed_irq_install: + drm_mode_config_cleanup(dev); +failed_modeset_init: +failed_vblank_init: + return ret; +} + +static void phytium_display_unload(struct drm_device *dev) +{ + phytium_drm_fbdev_fini(dev); + drm_irq_uninstall(dev); + drm_mode_config_cleanup(dev); +} + +static const struct vm_operations_struct phytium_vm_ops = { + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +static const struct drm_ioctl_desc phytium_ioctls[] = { + /* for test, none so far */ +}; + +static const struct file_operations phytium_drm_driver_fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, + .compat_ioctl = drm_compat_ioctl, + .poll = drm_poll, + .read = drm_read, + .llseek = no_llseek, + .mmap = phytium_gem_mmap, +}; + +struct drm_driver phytium_display_drm_driver = { + .driver_features = DRIVER_HAVE_IRQ | + DRIVER_IRQ_SHARED | + DRIVER_PRIME | + DRIVER_MODESET | + DRIVER_ATOMIC | + DRIVER_GEM, + .load = phytium_display_load, + .unload = phytium_display_unload, + .lastclose = drm_fb_helper_lastclose, + .irq_handler = phytium_display_irq_handler, + .irq_preinstall = phytium_irq_preinstall, + .irq_uninstall = phytium_irq_uninstall, + .enable_vblank = phytium_enable_vblank, + .disable_vblank = phytium_disable_vblank, + .gem_free_object = phytium_gem_free_object, + .gem_vm_ops = &phytium_vm_ops, + .gem_prime_get_sg_table = phytium_gem_prime_get_sg_table, + .gem_prime_vmap = phytium_gem_prime_vmap, + .gem_prime_vunmap = phytium_gem_prime_vunmap, + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_export = drm_gem_prime_export, + .gem_prime_import = drm_gem_prime_import, + .gem_prime_import_sg_table = phytium_gem_prime_import_sg_table, + .gem_prime_mmap = phytium_gem_prime_mmap, + .dumb_create = phytium_gem_dumb_create, + .dumb_destroy = phytium_gem_dumb_destroy, + .ioctls = phytium_ioctls, + .num_ioctls = ARRAY_SIZE(phytium_ioctls), + .fops = &phytium_drm_driver_fops, + .name = DRV_NAME, + .desc = DRV_DESC, + .date = DRV_DATE, + .major = DRV_MAJOR, + .minor = DRV_MINOR, +}; + +static void phytium_display_shutdown(struct drm_device *dev) +{ + drm_atomic_helper_shutdown(dev); +} + +static int phytium_display_pm_suspend(struct drm_device *dev) +{ + struct drm_atomic_state *state; + struct phytium_display_private *priv = dev->dev_private; + int ret, ret1; + + phytium_dp_hpd_irq_setup(dev, false); + cancel_work_sync(&priv->hotplug_work); + drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 1); + state = drm_atomic_helper_suspend(dev); + if (IS_ERR(state)) { + DRM_ERROR("drm_atomic_helper_suspend failed: %ld\n", PTR_ERR(state)); + ret = PTR_ERR(state); + goto suspend_failed; + } + dev->mode_config.suspend_state = state; + ret = phytium_gem_suspend(dev); + if (ret) { + DRM_ERROR("phytium_gem_suspend failed: %d\n", ret); + goto gem_suspend_failed; + } + + return 0; + +gem_suspend_failed: + ret1 = drm_atomic_helper_resume(dev, dev->mode_config.suspend_state); + if (ret1) + DRM_ERROR("Failed to resume (%d)\n", ret1); + dev->mode_config.suspend_state = NULL; +suspend_failed: + drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); + phytium_dp_hpd_irq_setup(dev, true); + + return ret; +} + +static int phytium_display_pm_resume(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + int ret = 0; + + if (WARN_ON(!dev->mode_config.suspend_state)) + return -EINVAL; + + ret = phytium_dp_resume(dev); + if (ret) + return -EIO; + + phytium_crtc_resume(dev); + phytium_gem_resume(dev); + + if (priv->support_memory_type & MEMORY_TYPE_VRAM) + priv->vram_hw_init(priv); + + ret = drm_atomic_helper_resume(dev, dev->mode_config.suspend_state); + if (ret) { + DRM_ERROR("Failed to resume (%d)\n", ret); + return ret; + } + + dev->mode_config.suspend_state = NULL; + drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); + phytium_dp_hpd_irq_setup(dev, true); + + return 0; +} + +void phytium_display_private_init(struct phytium_display_private *priv, struct drm_device *dev) +{ + INIT_LIST_HEAD(&priv->gem_list_head); + spin_lock_init(&priv->hotplug_irq_lock); + INIT_WORK(&priv->hotplug_work, phytium_dp_hpd_work_func); + memset(priv->mem_state, 0, sizeof(priv->mem_state)); + priv->dev = dev; + priv->display_shutdown = phytium_display_shutdown; + priv->display_pm_suspend = phytium_display_pm_suspend; + priv->display_pm_resume = phytium_display_pm_resume; +} + +static int __init phytium_display_init(void) +{ + int ret = 0; + + ret = platform_driver_register(&phytium_platform_driver); + if (ret) + return ret; + + ret = pci_register_driver(&phytium_pci_driver); + + return ret; +} + +static void __exit phytium_display_exit(void) +{ + pci_unregister_driver(&phytium_pci_driver); + + platform_driver_unregister(&phytium_platform_driver); +} + +module_init(phytium_display_init); +module_exit(phytium_display_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Yang Xun "); +MODULE_DESCRIPTION("Phytium Display Controller"); diff --git a/drivers/gpu/drm/phytium/phytium_display_drv.h b/drivers/gpu/drm/phytium/phytium_display_drv.h new file mode 100644 index 0000000000000000000000000000000000000000..396cbe29f21a3cddeecef445483b996c205c752b --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_display_drv.h @@ -0,0 +1,175 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_DISPLAY_DRV_H__ +#define __PHYTIUM_DISPLAY_DRV_H__ + +#include +#include +#include + +#define DEBUG_LOG 0 + +#define PHYTIUM_FORMAT_MAX_PLANE 3 +#define DP_MAX_DOWNSTREAM_PORTS 0x10 + +#define DRV_NAME "dc" +#define DRV_DESC "phytium dc" +#define DRV_DATE "20201220" +#define DRV_MAJOR 1 +#define DRV_MINOR 1 + +/* come from GPU */ +#define DRM_FORMAT_MOD_VENDOR_PHYTIUM 0x92 + +/* dc:mode0 8x8 16bpp gpu: FBCDC_8X8_V10 */ +#define DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC fourcc_mod_code(PHYTIUM, 21) +/* dc:mode3 8x4 32bpp gpu: FBCDC_16X4_v10 */ +#define DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC fourcc_mod_code(PHYTIUM, 22) + +#define PIPE_MASK_SHIFT 0x0 +#define PIPE_MASK_MASK 0x7 +#define EDP_MASK_SHIFT 0x3 +#define EDP_MASK_MASK 0x7 + +enum phytium_platform { + PHYTIUM_PLATFORM_UNINITIALIZED = 0, + PHYTIUM_PLATFORM_X100, + PHYTIUM_PLATFORM_E2000, +}; + +enum phytium_mem_state_type { + PHYTIUM_MEM_VRAM_TOTAL = 0, + PHYTIUM_MEM_VRAM_ALLOC, + PHYTIUM_MEM_SYSTEM_CARVEOUT_TOTAL, + PHYTIUM_MEM_SYSTEM_CARVEOUT_ALLOC, + PHYTIUM_MEM_SYSTEM_UNIFIED_ALLOC, + PHYTIUM_MEM_STATE_TYPE_COUNT, +}; + +#define MEMORY_TYPE_VRAM 0x1 +#define MEMORY_TYPE_SYSTEM_CARVEOUT 0x2 +#define MEMORY_TYPE_SYSTEM_UNIFIED 0x4 + +#define IS_PLATFORM(priv, p) ((priv)->info.platform_mask & BIT(p)) + +#define IS_X100(priv) IS_PLATFORM(priv, PHYTIUM_PLATFORM_X100) +#define IS_E2000(priv) IS_PLATFORM(priv, PHYTIUM_PLATFORM_E2000) + +struct phytium_device_info { + unsigned char platform_mask; + unsigned char pipe_mask; + unsigned char num_pipes; + unsigned char total_pipes; + unsigned char edp_mask; + unsigned int crtc_clock_max; + unsigned int hdisplay_max; + unsigned int vdisplay_max; + unsigned int backlight_max; + unsigned long address_mask; +}; + +struct phytium_display_private { + /* hw */ + void __iomem *regs; + void __iomem *vram_addr; + struct phytium_device_info info; + char support_memory_type; + char reserve[3]; + uint32_t dc_reg_base[3]; + uint32_t dcreq_reg_base[3]; + uint32_t dp_reg_base[3]; + uint32_t address_transform_base; + uint32_t phy_access_base[3]; + + /* drm */ + struct drm_device *dev; + int irq; + + /* fb_dev */ + struct drm_fb_helper fbdev_helper; + struct phytium_gem_object *fbdev_phytium_gem; + + int save_reg[3]; + struct list_head gem_list_head; + + struct work_struct hotplug_work; + spinlock_t hotplug_irq_lock; + + void (*vram_hw_init)(struct phytium_display_private *priv); + void (*display_shutdown)(struct drm_device *dev); + int (*display_pm_suspend)(struct drm_device *dev); + int (*display_pm_resume)(struct drm_device *dev); + void (*dc_hw_clear_msi_irq)(struct phytium_display_private *priv, uint32_t phys_pipe); + int (*dc_hw_fb_format_check)(const struct drm_mode_fb_cmd2 *mode_cmd, int count); + + struct gen_pool *memory_pool; + resource_size_t pool_phys_addr; + resource_size_t pool_size; + void *pool_virt_addr; + uint64_t mem_state[PHYTIUM_MEM_STATE_TYPE_COUNT]; + + /* DMA info */ + int dma_inited; + struct dma_chan *dma_chan; +}; + +static inline unsigned int +phytium_readl_reg(struct phytium_display_private *priv, uint32_t group_offset, uint32_t reg_offset) +{ + unsigned int data; + + data = readl(priv->regs + group_offset + reg_offset); +#if DEBUG_LOG + pr_info("Read 32'h%08x 32'h%08x\n", group_offset + reg_offset, data); +#endif + return data; +} + +static inline void +phytium_writel_reg(struct phytium_display_private *priv, uint32_t data, + uint32_t group_offset, uint32_t reg_offset) +{ + + writel(data, priv->regs + group_offset + reg_offset); +#if DEBUG_LOG + pr_info("Write 32'h%08x 32'h%08x\n", group_offset + reg_offset, data); +#endif +} + +static inline void +phytium_writeb_reg(struct phytium_display_private *priv, uint8_t data, + uint32_t group_offset, uint32_t reg_offset) +{ + writeb(data, priv->regs + group_offset + reg_offset); +#if DEBUG_LOG + pr_info("Write 32'h%08x 8'h%08x\n", group_offset + reg_offset, data); +#endif +} + +#define for_each_pipe(__dev_priv, __p) \ + for ((__p) = 0; (__p) < __dev_priv->info.total_pipes; (__p)++) + +#define for_each_pipe_masked(__dev_priv, __p) \ + for ((__p) = 0; (__p) < __dev_priv->info.total_pipes; (__p)++) \ + for_each_if((__dev_priv->info.pipe_mask) & BIT(__p)) + +int phytium_get_virt_pipe(struct phytium_display_private *priv, int phys_pipe); +int phytium_get_phys_pipe(struct phytium_display_private *priv, int virt_pipe); +int phytium_wait_cmd_done(struct phytium_display_private *priv, + uint32_t register_offset, + uint32_t request_bit, + uint32_t reply_bit); +void phytium_display_private_init(struct phytium_display_private *priv, struct drm_device *dev); + +extern struct drm_driver phytium_display_drm_driver; +extern int dc_fake_mode_enable; +extern int dc_fast_training_check; +extern int num_source_rates; +extern int source_max_lane_count; +extern int link_dynamic_adjust; + +#endif /* __PHYTIUM_DISPLAY_DRV_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_dp.c b/drivers/gpu/drm/phytium/phytium_dp.c new file mode 100644 index 0000000000000000000000000000000000000000..ae1415c139474e31bb864261db110c3d4b049c9e --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_dp.c @@ -0,0 +1,2590 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_dp.h" +#include "phytium_debugfs.h" +#include "x100_dp.h" +#include "e2000_dp.h" +#include "phytium_panel.h" +#include "phytium_reg.h" + +static void phytium_dp_aux_init(struct phytium_dp_device *phytium_dp); +static void handle_plugged_change(struct phytium_dp_device *phytium_dp, bool plugged); +static bool phytium_edp_init_connector(struct phytium_dp_device *phytium_dp); +static void phytium_edp_panel_poweroff(struct phytium_dp_device *phytium_dp); + +static int phytium_rate[] = {162000, 270000, 540000, 810000}; + +void phytium_phy_writel(struct phytium_dp_device *phytium_dp, uint32_t address, uint32_t data) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->phy_access_base[port]; + +#if DEBUG_LOG + pr_info("phy address write: 0x%x data:0x%x\n", address, data); +#endif + phytium_writel_reg(priv, address, group_offset, PHYTIUM_PHY_ACCESS_ADDRESS); + phytium_writel_reg(priv, data, group_offset, PHYTIUM_PHY_WRITE_DATA); + phytium_writel_reg(priv, ACCESS_WRITE, group_offset, PHYTIUM_PHY_ACCESS_CTRL); + udelay(10); +} + +uint32_t phytium_phy_readl(struct phytium_dp_device *phytium_dp, uint32_t address) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->phy_access_base[port]; + uint32_t data; + + phytium_writel_reg(priv, address, group_offset, PHYTIUM_PHY_ACCESS_ADDRESS); + phytium_writel_reg(priv, ACCESS_READ, group_offset, PHYTIUM_PHY_ACCESS_CTRL); + udelay(10); + data = phytium_readl_reg(priv, group_offset, PHYTIUM_PHY_READ_DATA); +#if DEBUG_LOG + pr_info("phy address read: 0x%x data:0x%x\n", address, data); +#endif + + return data; +} + +static int +phytium_dp_hw_aux_transfer_write(struct phytium_dp_device *phytium_dp, struct drm_dp_aux_msg *msg) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + unsigned int i = 0, j = 0; + unsigned int cmd = 0; + unsigned int aux_status = 0, interrupt_status = 0; + unsigned char *data = msg->buffer; + int count_timeout = 0; + long ret = 0; + + for (i = 0; i < 3; i++) { + /* clear X100_DP_INTERRUPT_RAW_STATUS */ + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); + phytium_writel_reg(priv, msg->address, group_offset, PHYTIUM_DP_AUX_ADDRESS); + for (j = 0; j < msg->size; j++) + phytium_writeb_reg(priv, data[j], group_offset, PHYTIUM_DP_AUX_WRITE_FIFO); + + cmd = ((msg->request & COMMAND_MASK) << COMMAND_SHIFT); + if (msg->size == 0) + cmd |= ADDRESS_ONLY; + else + cmd |= (msg->size-1) & BYTE_COUNT_MASK; + phytium_writel_reg(priv, cmd, group_offset, PHYTIUM_DP_AUX_COMMAND); + + count_timeout = 0; + do { + mdelay(5); + interrupt_status = phytium_readl_reg(priv, group_offset, + PHYTIUM_DP_INTERRUPT_RAW_STATUS); + aux_status = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_STATUS); + if ((aux_status & REPLY_RECEIVED) || (aux_status & REPLY_ERROR) + || (interrupt_status & REPLY_TIMEOUT)) { + DRM_DEBUG_KMS("aux wait exit\n"); + break; + } + count_timeout++; + } while (count_timeout < 6); + + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); + if (interrupt_status & REPLY_TIMEOUT) { + DRM_DEBUG_KMS("aux write reply timeout\n"); + continue; + } else if (aux_status & REPLY_ERROR) { + DRM_DEBUG_KMS("aux write reply error\n"); + continue; + } else if (aux_status & REPLY_RECEIVED) { + DRM_DEBUG_KMS("aux write reply received succussful\n"); + break; + } + } + + if (interrupt_status & REPLY_TIMEOUT) { + DRM_NOTE("aux(%d) write reply timeout\n", phytium_dp->port); + ret = -EIO; + goto out; + } else if (aux_status & REPLY_ERROR) { + DRM_ERROR("aux(%d) write reply error\n", phytium_dp->port); + ret = -EIO; + goto out; + } else if ((aux_status & REPLY_RECEIVED) != REPLY_RECEIVED) { + DRM_ERROR("aux(%d) write reply no response\n", phytium_dp->port); + ret = -EIO; + goto out; + } + + msg->reply = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_CODE); + ret = msg->size; +out: + return ret; +} + +static int +phytium_dp_hw_aux_transfer_read(struct phytium_dp_device *phytium_dp, struct drm_dp_aux_msg *msg) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + unsigned int i = 0; + unsigned int cmd = 0; + unsigned int aux_status = 0, interrupt_status = 0; + unsigned char *data = msg->buffer; + int count_timeout = 0; + long ret = 0; + + for (i = 0; i < 3; i++) { + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); + phytium_writel_reg(priv, msg->address, group_offset, PHYTIUM_DP_AUX_ADDRESS); + cmd = ((msg->request & COMMAND_MASK) << COMMAND_SHIFT); + if (msg->size == 0) + cmd |= ADDRESS_ONLY; + else + cmd |= ((msg->size-1) & BYTE_COUNT_MASK); + phytium_writel_reg(priv, cmd, group_offset, PHYTIUM_DP_AUX_COMMAND); + + count_timeout = 0; + do { + mdelay(5); + interrupt_status = phytium_readl_reg(priv, group_offset, + PHYTIUM_DP_INTERRUPT_RAW_STATUS); + aux_status = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_STATUS); + if ((aux_status & REPLY_RECEIVED) || (aux_status & REPLY_ERROR) + || (interrupt_status & REPLY_TIMEOUT)) { + DRM_DEBUG_KMS("aux wait exit\n"); + break; + } + count_timeout++; + } while (count_timeout < 6); + + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); + if (interrupt_status & REPLY_TIMEOUT) { + DRM_DEBUG_KMS("aux read reply timeout\n"); + continue; + } else if (aux_status & REPLY_ERROR) { + DRM_DEBUG_KMS("aux read reply error\n"); + continue; + } else if (aux_status & REPLY_RECEIVED) { + DRM_DEBUG_KMS("aux read reply received succussful\n"); + break; + } + } + + if (interrupt_status & REPLY_TIMEOUT) { + DRM_NOTE("aux(%d) read reply timeout\n", phytium_dp->port); + ret = -EIO; + goto out; + } else if (aux_status & REPLY_ERROR) { + DRM_ERROR("aux(%d) read reply error\n", phytium_dp->port); + ret = -EIO; + goto out; + } else if ((aux_status & REPLY_RECEIVED) != REPLY_RECEIVED) { + DRM_ERROR("aux(%d) read reply no response\n", phytium_dp->port); + ret = -EIO; + goto out; + } + + msg->reply = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_CODE); + ret = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_DATA_COUNT); + + if (ret > msg->size) { + ret = msg->size; + } else if (ret != msg->size) { + DRM_DEBUG_KMS("aux read count error(ret:0x%lx != 0x%lx)\n", ret, msg->size); + ret = -EBUSY; + goto out; + } + + for (i = 0; i < ret; i++) + data[i] = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_DATA); + +out: + return ret; +} + +static void phytium_get_native_mode(struct phytium_dp_device *phytium_dp) +{ + struct drm_display_mode *t, *mode; + struct drm_connector *connector = &phytium_dp->connector; + struct drm_display_mode *native_mode = &phytium_dp->native_mode; + + list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { + if (mode->type & DRM_MODE_TYPE_PREFERRED) { + if (mode->hdisplay != native_mode->hdisplay || + mode->vdisplay != native_mode->vdisplay) { + memcpy(native_mode, mode, sizeof(*mode)); + drm_mode_set_crtcinfo(native_mode, 0); + } + break; + } + } + + if (&mode->head == &connector->probed_modes) + native_mode->clock = 0; +} + +static int phytium_connector_add_common_modes(struct phytium_dp_device *phytium_dp) +{ + int i = 0, ret = 0; + struct drm_device *dev = phytium_dp->dev; + struct drm_display_mode *mode = NULL, *current_mode = NULL; + struct drm_display_mode *native_mode = &phytium_dp->native_mode; + bool mode_existed = false; + struct mode_size { + char name[DRM_DISPLAY_MODE_LEN]; + int w; + int h; + } common_mode[] = { + { "640x480", 640, 480}, + { "800x600", 800, 600}, + { "1024x768", 1024, 768}, + { "1280x720", 1280, 720}, + { "1280x800", 1280, 800}, + {"1280x1024", 1280, 1024}, + { "1440x900", 1440, 900}, + {"1680x1050", 1680, 1050}, + {"1600x1200", 1600, 1200}, + {"1920x1080", 1920, 1080}, + {"1920x1200", 1920, 1200} + }; + + if (native_mode->clock == 0) + return ret; + + for (i = 0; i < ARRAY_SIZE(common_mode); i++) { + mode_existed = false; + + if (common_mode[i].w > native_mode->hdisplay || + common_mode[i].h > native_mode->vdisplay || + (common_mode[i].w == native_mode->hdisplay && + common_mode[i].h == native_mode->vdisplay)) + continue; + + list_for_each_entry(current_mode, &phytium_dp->connector.probed_modes, head) { + if (common_mode[i].w == current_mode->hdisplay && + common_mode[i].h == current_mode->vdisplay) { + mode_existed = true; + break; + } + } + + if (mode_existed) + continue; + + mode = drm_mode_duplicate(dev, native_mode); + if (mode == NULL) + continue; + + mode->hdisplay = common_mode[i].w; + mode->vdisplay = common_mode[i].h; + mode->type &= ~DRM_MODE_TYPE_PREFERRED; + strncpy(mode->name, common_mode[i].name, DRM_DISPLAY_MODE_LEN); + drm_mode_probed_add(&phytium_dp->connector, mode); + ret++; + } + + return ret; +} + +static int phytium_connector_get_modes(struct drm_connector *connector) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + struct edid *edid; + int ret = 0; + + edid = drm_get_edid(connector, &phytium_dp->aux.ddc); + if (edid && drm_edid_is_valid(edid)) { + drm_connector_update_edid_property(connector, edid); + ret = drm_add_edid_modes(connector, edid); + phytium_dp->has_audio = drm_detect_monitor_audio(edid); + phytium_get_native_mode(phytium_dp); + if (dc_fake_mode_enable) + ret += phytium_connector_add_common_modes(phytium_dp); + } else { + drm_connector_update_edid_property(connector, NULL); + phytium_dp->has_audio = false; + } + + kfree(edid); + + return ret; +} + +static int +phytium_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + struct drm_display_info *display_info = &phytium_dp->connector.display_info; + unsigned int requested, actual; + + switch (display_info->bpc) { + case 10: + case 6: + case 8: + break; + default: + DRM_INFO("not support bpc(%d)\n", display_info->bpc); + display_info->bpc = 8; + break; + } + + if ((display_info->color_formats & DRM_COLOR_FORMAT_RGB444) == 0) { + DRM_INFO("not support color_format(%d)\n", display_info->color_formats); + display_info->color_formats = DRM_COLOR_FORMAT_RGB444; + } + + requested = mode->clock * display_info->bpc * 3 / 1000; + actual = phytium_dp->max_link_rate * phytium_dp->max_link_lane_count / 100; + actual = actual * 8 / 10; + if (requested >= actual) { + DRM_DEBUG_KMS("requested=%d, actual=%d, clock=%d\n", requested, actual, + mode->clock); + return MODE_CLOCK_HIGH; + } + + if ((mode->hdisplay == 1600) && (mode->vdisplay == 900)) + return MODE_BAD_HVALUE; + + if ((mode->hdisplay == 1024) && (mode->clock > 78000)) + return MODE_BAD_HVALUE; + + if ((mode->hdisplay < 640) || (mode->vdisplay < 480)) + return MODE_BAD_HVALUE; + + return MODE_OK; +} + +static struct drm_encoder *phytium_dp_best_encoder(struct drm_connector *connector) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + return &phytium_dp->encoder; +} + +static const +struct drm_connector_helper_funcs phytium_connector_helper_funcs = { + .get_modes = phytium_connector_get_modes, + .mode_valid = phytium_connector_mode_valid, + .best_encoder = phytium_dp_best_encoder, +}; + +static void phytium_dp_set_sink_rates(struct phytium_dp_device *phytium_dp) +{ + static const int dp_rates[] = {162000, 270000, 540000, 810000}; + int i, max_rate; + + max_rate = drm_dp_bw_code_to_link_rate(phytium_dp->dpcd[DP_MAX_LINK_RATE]); + for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { + if (dp_rates[i] > max_rate) + break; + phytium_dp->sink_rates[i] = dp_rates[i]; + } + phytium_dp->num_sink_rates = i; +} + +static int get_common_rates(const int *source_rates, int source_len, const int *sink_rates, + int sink_len, int *common_rates) +{ + int i = 0, j = 0, k = 0; + + while (i < source_len && j < sink_len) { + if (source_rates[i] == sink_rates[j]) { + if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) + return k; + common_rates[k] = source_rates[i]; + ++k; + ++i; + ++j; + } else if (source_rates[i] < sink_rates[j]) { + ++i; + } else { + ++j; + } + } + return k; +} + +static void phytium_dp_set_common_rates(struct phytium_dp_device *phytium_dp) +{ + WARN_ON(!phytium_dp->num_source_rates || !phytium_dp->num_sink_rates); + + phytium_dp->num_common_rates = get_common_rates(phytium_dp->source_rates, + phytium_dp->num_source_rates, + phytium_dp->sink_rates, + phytium_dp->num_sink_rates, + phytium_dp->common_rates); + + if (WARN_ON(phytium_dp->num_common_rates == 0)) { + phytium_dp->common_rates[0] = 162000; + phytium_dp->num_common_rates = 1; + } +} + +static bool phytium_dp_get_dpcd(struct phytium_dp_device *phytium_dp) +{ + int ret; + unsigned char sink_count = 0; + + /* get dpcd capability,but don't check data error; so check revision */ + ret = drm_dp_dpcd_read(&phytium_dp->aux, 0x00, phytium_dp->dpcd, + sizeof(phytium_dp->dpcd)); + if (ret < 0) { + DRM_ERROR("port %d get DPCD capability fail\n", phytium_dp->port); + return false; + } + + if (phytium_dp->dpcd[DP_DPCD_REV] == 0) { + DRM_ERROR("DPCD data error: 0x%x\n", phytium_dp->dpcd[DP_DPCD_REV]); + return false; + } + + /* parse sink support link */ + phytium_dp_set_sink_rates(phytium_dp); + phytium_dp_set_common_rates(phytium_dp); + phytium_dp->sink_max_lane_count = drm_dp_max_lane_count(phytium_dp->dpcd); + phytium_dp->common_max_lane_count = min(phytium_dp->source_max_lane_count, + phytium_dp->sink_max_lane_count); + + /* get dpcd sink count */ + if (drm_dp_dpcd_readb(&phytium_dp->aux, DP_SINK_COUNT, &sink_count) <= 0) { + DRM_ERROR("get DPCD sink_count fail\n"); + return false; + } + + phytium_dp->sink_count = DP_GET_SINK_COUNT(sink_count); + if (!phytium_dp->sink_count) { + DRM_ERROR("DPCD sink_count should not be zero\n"); + return false; + } + + if (!drm_dp_is_branch(phytium_dp->dpcd)) + return true; + + if (phytium_dp->dpcd[DP_DPCD_REV] == 0x10) + return true; + + /* get downstream port for branch device */ + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_DOWNSTREAM_PORT_0, + phytium_dp->downstream_ports, DP_MAX_DOWNSTREAM_PORTS); + if (ret < 0) { + DRM_ERROR("get DPCD DFP fail\n"); + return false; + } + + return true; +} + +static enum drm_connector_status +phytium_dp_detect_dpcd(struct phytium_dp_device *phytium_dp) +{ + if (!phytium_dp_get_dpcd(phytium_dp)) + return connector_status_disconnected; + + if (!drm_dp_is_branch(phytium_dp->dpcd)) + return connector_status_connected; + + if (phytium_dp->downstream_ports[0] & DP_DS_PORT_HPD) { + return phytium_dp->sink_count ? connector_status_connected + : connector_status_disconnected; + } + return connector_status_connected; +} + +static void phytium_get_adjust_train(struct phytium_dp_device *phytium_dp, + const uint8_t link_status[DP_LINK_STATUS_SIZE], uint8_t lane_count) +{ + unsigned char v = 0; + unsigned char p = 0; + int lane; + unsigned char voltage_max; + unsigned char preemph_max; + + /* find max value */ + for (lane = 0; lane < lane_count; lane++) { + uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane); + uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); + + if (this_v > v) + v = this_v; + if (this_p > p) + p = this_p; + } + voltage_max = DP_TRAIN_VOLTAGE_SWING_LEVEL_3; + if (v >= voltage_max) + v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; + + preemph_max = DP_TRAIN_PRE_EMPH_LEVEL_3; + if (p >= preemph_max) + p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; + + for (lane = 0; lane < 4; lane++) + phytium_dp->train_set[lane] = v | p; +} + +bool phytium_dp_coding_8b10b_need_enable(unsigned char test_pattern) +{ + switch (test_pattern) { + case PHYTIUM_PHY_TP_D10_2: + case PHYTIUM_PHY_TP_SYMBOL_ERROR: + case PHYTIUM_PHY_TP_CP2520_1: + case PHYTIUM_PHY_TP_CP2520_2: + case PHYTIUM_PHY_TP_CP2520_3: + return true; + case PHYTIUM_PHY_TP_PRBS7: + case PHYTIUM_PHY_TP_80BIT_CUSTOM: + return false; + default: + return false; + } +} + +bool phytium_dp_scrambled_need_enable(unsigned char test_pattern) +{ + switch (test_pattern) { + case PHYTIUM_PHY_TP_SYMBOL_ERROR: + case PHYTIUM_PHY_TP_CP2520_1: + case PHYTIUM_PHY_TP_CP2520_2: + case PHYTIUM_PHY_TP_CP2520_3: + return true; + case PHYTIUM_PHY_TP_D10_2: + case PHYTIUM_PHY_TP_PRBS7: + case PHYTIUM_PHY_TP_80BIT_CUSTOM: + return false; + default: + return false; + } +} + +static void phytium_dp_hw_set_lane_setting(struct phytium_dp_device *phytium_dp, + uint32_t link_rate, + uint8_t train_set) +{ + phytium_dp->funcs->dp_hw_set_phy_lane_setting(phytium_dp, link_rate, train_set); +} + +static void phytium_dp_hw_set_link(struct phytium_dp_device *phytium_dp, + uint8_t lane_count, + uint32_t link_rate) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, ret = 0, retry = 3; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, lane_count, + group_offset, PHYTIUM_DP_LANE_COUNT_SET); + phytium_writel_reg(priv, + drm_dp_link_rate_to_bw_code(link_rate), + group_offset, PHYTIUM_DP_LINK_BW_SET); + + if (drm_dp_enhanced_frame_cap(phytium_dp->dpcd)) + phytium_writel_reg(priv, ENHANCED_FRAME_ENABLE, + group_offset, PHYTIUM_DP_ENHANCED_FRAME_EN); + else + phytium_writel_reg(priv, ENHANCED_FRAME_DISABLE, + group_offset, PHYTIUM_DP_ENHANCED_FRAME_EN); + +try_again: + ret = phytium_dp->funcs->dp_hw_set_phy_lane_and_rate(phytium_dp, lane_count, link_rate); + if ((ret < 0) && retry) { + retry--; + goto try_again; + } +} + +static void phytium_dp_hw_set_test_pattern(struct phytium_dp_device *phytium_dp, + uint8_t lane_count, + uint8_t test_pattern, + uint8_t *custom_pattern, + uint32_t custom_pattern_size) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, val = 0, tmp = 0, i; + uint32_t group_offset = priv->dp_reg_base[port]; + + if ((test_pattern == PHYTIUM_PHY_TP_80BIT_CUSTOM) + && custom_pattern && (custom_pattern_size > 0)) { + val = *(int *)custom_pattern; + phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_CUSTOM_80BIT_PATTERN_0); + val = *(int *)(custom_pattern + 4); + phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_CUSTOM_80BIT_PATTERN_1); + val = *(short int *)(custom_pattern + 8); + phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_CUSTOM_80BIT_PATTERN_2); + } + + if (test_pattern == PHYTIUM_PHY_TP_D10_2 || test_pattern == PHYTIUM_PHY_TP_PRBS7 + || test_pattern == PHYTIUM_PHY_TP_80BIT_CUSTOM) + phytium_writel_reg(priv, SCRAMBLING_DISABLE, group_offset, + PHYTIUM_DP_SCRAMBLING_DISABLE); + else + phytium_writel_reg(priv, SCRAMBLING_ENABLE, group_offset, + PHYTIUM_DP_SCRAMBLING_DISABLE); + + tmp = test_pattern - PHYTIUM_PHY_TP_NONE + TEST_PATTERN_NONE; + val = 0; + for (i = 0; i < lane_count; i++) + val |= (tmp << (TEST_PATTERN_LANE_SHIFT * i)); + phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_LINK_QUAL_PATTERN_SET); +} + +static void phytium_dp_hw_set_train_pattern(struct phytium_dp_device *phytium_dp, + uint8_t train_pattern) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, tmp = 0; + uint32_t group_offset = priv->dp_reg_base[port]; + + /* Scrambling is disabled for TPS1/TPS2/3 and enabled for TPS4 */ + if (train_pattern == DP_TRAINING_PATTERN_4 + || train_pattern == DP_TRAINING_PATTERN_DISABLE) { + phytium_writel_reg(priv, SCRAMBLING_ENABLE, group_offset, + PHYTIUM_DP_SCRAMBLING_DISABLE); + phytium_writel_reg(priv, SCRAMBLER_RESET, group_offset, + PHYTIUM_DP_FORCE_SCRAMBLER_RESET); + } else { + phytium_writel_reg(priv, SCRAMBLING_DISABLE, group_offset, + PHYTIUM_DP_SCRAMBLING_DISABLE); + } + switch (train_pattern) { + case DP_TRAINING_PATTERN_DISABLE: + tmp = TRAINING_OFF; + break; + case DP_TRAINING_PATTERN_1: + tmp = TRAINING_PATTERN_1; + break; + case DP_TRAINING_PATTERN_2: + tmp = TRAINING_PATTERN_2; + break; + case DP_TRAINING_PATTERN_3: + tmp = TRAINING_PATTERN_3; + break; + case DP_TRAINING_PATTERN_4: + tmp = TRAINING_PATTERN_4; + break; + default: + tmp = TRAINING_OFF; + break; + } + + phytium_writel_reg(priv, tmp, group_offset, PHYTIUM_DP_TRAINING_PATTERN_SET); +} + +void phytium_dp_hw_enable_audio(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int config = 0, config1, data_window = 0; + const struct dp_audio_n_m *n_m = NULL; + uint32_t group_offset = priv->dp_reg_base[port]; + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); + phytium_writel_reg(priv, CHANNEL_MUTE_ENABLE, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); + + data_window = 90*(phytium_dp->link_rate)/100 + *(phytium_dp->mode.htotal - phytium_dp->mode.hdisplay) + /phytium_dp->mode.clock/4; + + phytium_writel_reg(priv, data_window, group_offset, PHYTIUM_DP_SEC_DATA_WINDOW); + + n_m = phytium_dp_audio_get_n_m(phytium_dp->link_rate, phytium_dp->audio_info.sample_rate); + if (n_m == NULL) { + DRM_NOTE("can not get n_m for link_rate(%d) and sample_rate(%d)\n", + phytium_dp->link_rate, phytium_dp->audio_info.sample_rate); + phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_MAUD); + phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_NAUD); + } else { + phytium_writel_reg(priv, n_m->m, group_offset, PHYTIUM_DP_SEC_MAUD); + phytium_writel_reg(priv, n_m->n, group_offset, PHYTIUM_DP_SEC_NAUD); + } + + config1 = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); + phytium_writel_reg(priv, SECONDARY_STREAM_DISABLE, + group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); + phytium_writel_reg(priv, config1, group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); +} + +static void phytium_dp_hw_audio_shutdown(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, SECONDARY_STREAM_DISABLE, + group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); +} + +static void phytium_dp_hw_audio_digital_mute(struct phytium_dp_device *phytium_dp, bool enable) +{ + struct phytium_display_private *priv = phytium_dp->dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + if (enable) + phytium_writel_reg(priv, CHANNEL_MUTE_ENABLE, + group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); + else + phytium_writel_reg(priv, SEC_AUDIO_ENABLE, + group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); +} + +static int +phytium_dp_hw_audio_hw_params(struct phytium_dp_device *phytium_dp, struct audio_info audio_info) +{ + struct phytium_display_private *priv = phytium_dp->dev->dev_private; + int port = phytium_dp->port; + int ret = 0, data_window = 0; + const struct dp_audio_n_m *n_m = NULL; + uint32_t fs, ws, fs_accurac; + uint32_t group_offset = priv->dp_reg_base[port]; + + DRM_DEBUG_KMS("%s:set port%d sample_rate(%d) channels(%d) sample_width(%d)\n", + __func__, phytium_dp->port, audio_info.sample_rate, + audio_info.channels, audio_info.sample_width); + + phytium_writel_reg(priv, INPUT_SELECT_I2S, group_offset, PHYTIUM_DP_SEC_INPUT_SELECT); + phytium_writel_reg(priv, APB_CLOCK/audio_info.sample_rate, + group_offset, PHYTIUM_DP_SEC_DIRECT_CLKDIV); + phytium_writel_reg(priv, audio_info.channels & CHANNEL_MASK, + group_offset, PHYTIUM_DP_SEC_CHANNEL_COUNT); + phytium_writel_reg(priv, CHANNEL_MAP_DEFAULT, group_offset, PHYTIUM_DP_SEC_CHANNEL_MAP); + data_window = 90*(phytium_dp->link_rate)/100 + *(phytium_dp->mode.htotal - phytium_dp->mode.hdisplay) + /phytium_dp->mode.clock/4; + phytium_writel_reg(priv, data_window, group_offset, PHYTIUM_DP_SEC_DATA_WINDOW); + phytium_writel_reg(priv, 0xb5, group_offset, PHYTIUM_DP_SEC_CS_CATEGORY_CODE); + + phytium_writel_reg(priv, CLOCK_MODE_SYNC, group_offset, PHYTIUM_DP_SEC_CLOCK_MODE); + phytium_writel_reg(priv, CS_SOURCE_FORMAT_DEFAULT, + group_offset, PHYTIUM_DP_SEC_CS_SOURCE_FORMAT); + + switch (audio_info.sample_rate) { + case 32000: + fs = ORIG_FREQ_32000; + fs_accurac = SAMPLING_FREQ_32000; + break; + case 44100: + fs = ORIG_FREQ_44100; + fs_accurac = SAMPLING_FREQ_44100; + break; + case 48000: + fs = ORIG_FREQ_48000; + fs_accurac = SAMPLING_FREQ_48000; + break; + case 96000: + fs = ORIG_FREQ_96000; + fs_accurac = SAMPLING_FREQ_96000; + break; + case 176400: + fs = ORIG_FREQ_176400; + fs_accurac = SAMPLING_FREQ_176400; + break; + case 192000: + fs = ORIG_FREQ_192000; + fs_accurac = SAMPLING_FREQ_192000; + break; + default: + DRM_ERROR("dp not support sample_rate %d\n", audio_info.sample_rate); + goto out; + } + + switch (audio_info.sample_width) { + case 16: + ws = WORD_LENGTH_16; + break; + case 18: + ws = WORD_LENGTH_18; + break; + case 20: + ws = WORD_LENGTH_20; + break; + case 24: + ws = WORD_LENGTH_24; + break; + default: + DRM_ERROR("dp not support sample_width %d\n", audio_info.sample_width); + goto out; + } + + phytium_writel_reg(priv, ((fs&ORIG_FREQ_MASK)<link_rate, audio_info.sample_rate); + if (n_m == NULL) { + DRM_NOTE("can not get n_m for link_rate(%d) and sample_rate(%d)\n", + phytium_dp->link_rate, audio_info.sample_rate); + phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_MAUD); + phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_NAUD); + + } else { + phytium_writel_reg(priv, n_m->m, group_offset, PHYTIUM_DP_SEC_MAUD); + phytium_writel_reg(priv, n_m->n, group_offset, PHYTIUM_DP_SEC_NAUD); + } + phytium_writel_reg(priv, SECONDARY_STREAM_ENABLE, + group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); + phytium_dp->audio_info = audio_info; + + return 0; + +out: + phytium_writel_reg(priv, SECONDARY_STREAM_DISABLE, + group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); + + return ret; +} + +void phytium_dp_hw_disable_video(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, SST_MST_SOURCE_0_DISABLE, + group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE); +} + +bool phytium_dp_hw_video_is_enable(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, config; + uint32_t group_offset = priv->dp_reg_base[port]; + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE); + return config ? true : false; +} + +void phytium_dp_hw_enable_video(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, SST_MST_SOURCE_0_ENABLE, + group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE); + phytium_writel_reg(priv, LINK_SOFT_RESET, group_offset, PHYTIUM_DP_SOFT_RESET); +} + +void phytium_dp_hw_config_video(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + unsigned long link_bw, date_rate = 0; + struct drm_display_info *display_info = &phytium_dp->connector.display_info; + unsigned char tu_size = 64; + unsigned long data_per_tu = 0; + int symbols_per_tu, frac_symbols_per_tu, symbol_count, udc, value; + + /* cal M/N and tu_size */ + phytium_writel_reg(priv, phytium_dp->mode.crtc_clock/10, group_offset, PHYTIUM_DP_M_VID); + phytium_writel_reg(priv, phytium_dp->link_rate/10, group_offset, PHYTIUM_DP_N_VID); + link_bw = phytium_dp->link_rate * phytium_dp->link_lane_count; + date_rate = (phytium_dp->mode.crtc_clock * display_info->bpc * 3)/8; + + /* mul 10 for register setting */ + data_per_tu = 10*tu_size * date_rate/link_bw; + symbols_per_tu = (data_per_tu/10)&0xff; + frac_symbols_per_tu = (data_per_tu%10*16/10) & 0xf; + phytium_writel_reg(priv, frac_symbols_per_tu<<24 | symbols_per_tu<<16 | tu_size, + group_offset, PHYTIUM_DP_TRANSFER_UNIT_SIZE); + + symbol_count = (phytium_dp->mode.crtc_hdisplay*display_info->bpc*3 + 7)/8; + udc = (symbol_count + phytium_dp->link_lane_count - 1)/phytium_dp->link_lane_count; + phytium_writel_reg(priv, udc, group_offset, PHYTIUM_DP_DATA_COUNT); + + /* config main stream attributes */ + phytium_writel_reg(priv, phytium_dp->mode.crtc_htotal, + group_offset, PHYTIUM_DP_MAIN_LINK_HTOTAL); + phytium_writel_reg(priv, phytium_dp->mode.crtc_hdisplay, + group_offset, PHYTIUM_DP_MAIN_LINK_HRES); + phytium_writel_reg(priv, + phytium_dp->mode.crtc_hsync_end - phytium_dp->mode.crtc_hsync_start, + group_offset, PHYTIUM_DP_MAIN_LINK_HSWIDTH); + phytium_writel_reg(priv, phytium_dp->mode.crtc_htotal - phytium_dp->mode.crtc_hsync_start, + group_offset, PHYTIUM_DP_MAIN_LINK_HSTART); + phytium_writel_reg(priv, phytium_dp->mode.crtc_vtotal, + group_offset, PHYTIUM_DP_MAIN_LINK_VTOTAL); + phytium_writel_reg(priv, phytium_dp->mode.crtc_vdisplay, + group_offset, PHYTIUM_DP_MAIN_LINK_VRES); + phytium_writel_reg(priv, + phytium_dp->mode.crtc_vsync_end - phytium_dp->mode.crtc_vsync_start, + group_offset, PHYTIUM_DP_MAIN_LINK_VSWIDTH); + phytium_writel_reg(priv, phytium_dp->mode.crtc_vtotal - phytium_dp->mode.crtc_vsync_start, + group_offset, PHYTIUM_DP_MAIN_LINK_VSTART); + + value = 0; + if (phytium_dp->mode.flags & DRM_MODE_FLAG_PHSYNC) + value = value & (~HSYNC_POLARITY_LOW); + else + value = value | HSYNC_POLARITY_LOW; + + if (phytium_dp->mode.flags & DRM_MODE_FLAG_PVSYNC) + value = value & (~VSYNC_POLARITY_LOW); + else + value = value | VSYNC_POLARITY_LOW; + phytium_writel_reg(priv, value, group_offset, PHYTIUM_DP_MAIN_LINK_POLARITY); + + switch (display_info->bpc) { + case 10: + value = (MISC0_BIT_DEPTH_10BIT << MISC0_BIT_DEPTH_OFFSET); + break; + case 6: + value = (MISC0_BIT_DEPTH_6BIT << MISC0_BIT_DEPTH_OFFSET); + break; + default: + value = (MISC0_BIT_DEPTH_8BIT << MISC0_BIT_DEPTH_OFFSET); + break; + } + value |= (MISC0_COMPONENT_FORMAT_RGB << MISC0_COMPONENT_FORMAT_SHIFT) + | MISC0_SYNCHRONOUS_CLOCK; + phytium_writel_reg(priv, value, group_offset, PHYTIUM_DP_MAIN_LINK_MISC0); + phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_MAIN_LINK_MISC1); + + value = USER_ODDEVEN_POLARITY_HIGH | USER_DATA_ENABLE_POLARITY_HIGH; + if (phytium_dp->mode.flags & DRM_MODE_FLAG_PHSYNC) + value = value | USER_HSYNC_POLARITY_HIGH; + else + value = value & (~USER_HSYNC_POLARITY_HIGH); + if (phytium_dp->mode.flags & DRM_MODE_FLAG_PVSYNC) + value = value | USER_VSYNC_POLARITY_HIGH; + else + value = value & (~USER_VSYNC_POLARITY_HIGH); + phytium_writel_reg(priv, value, group_offset, PHYTIUM_DP_USER_SYNC_POLARITY); +} + +void phytium_dp_hw_disable_output(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, TRANSMITTER_OUTPUT_DISABLE, + group_offset, PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE); + phytium_writel_reg(priv, LINK_SOFT_RESET, group_offset, PHYTIUM_DP_SOFT_RESET); +} + +void phytium_dp_hw_enable_output(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, LINK_SOFT_RESET, group_offset, PHYTIUM_DP_SOFT_RESET); + phytium_writel_reg(priv, TRANSMITTER_OUTPUT_ENABLE, + group_offset, PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE); +} + +void phytium_dp_hw_enable_input_source(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, VIRTUAL_SOURCE_0_ENABLE, + group_offset, PHYTIUM_INPUT_SOURCE_ENABLE); +} + +void phytium_dp_hw_disable_input_source(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + + phytium_writel_reg(priv, (~VIRTUAL_SOURCE_0_ENABLE)&VIRTUAL_SOURCE_0_ENABLE_MASK, + priv->dp_reg_base[port], PHYTIUM_INPUT_SOURCE_ENABLE); +} + +bool phytium_dp_hw_output_is_enable(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + int config = 0; + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE); + return config ? true : false; +} + +static void phytium_dp_hw_get_hpd_state(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t val = 0, raw_state = 0; + uint32_t group_offset = priv->dp_reg_base[port]; + + val = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_RAW_STATUS); + + /* maybe miss hpd, so used for clear PHYTIUM_DP_INTERRUPT_RAW_STATUS */ + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); + raw_state = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SINK_HPD_STATE); + if (val & HPD_EVENT) + phytium_dp->dp_hpd_state.hpd_event_state = true; + + if (val & HPD_IRQ) + phytium_dp->dp_hpd_state.hpd_irq_state = true; + + if (raw_state & HPD_CONNECT) + phytium_dp->dp_hpd_state.hpd_raw_state = true; + else + phytium_dp->dp_hpd_state.hpd_raw_state = false; +} + +void phytium_dp_hw_hpd_irq_setup(struct phytium_dp_device *phytium_dp, bool enable) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_dp->dp_hpd_state.hpd_irq_enable = enable; + if (enable) + phytium_writel_reg(priv, HPD_OTHER_MASK, group_offset, PHYTIUM_DP_INTERRUPT_MASK); + else + phytium_writel_reg(priv, HPD_IRQ_MASK|HPD_EVENT_MASK|HPD_OTHER_MASK, + group_offset, PHYTIUM_DP_INTERRUPT_MASK); +} + +int phytium_dp_hw_init(struct phytium_dp_device *phytium_dp) +{ + int ret = 0; + uint8_t count = 0; + + phytium_dp->source_rates = phytium_rate; + phytium_dp->num_source_rates = num_source_rates; + count = phytium_dp->funcs->dp_hw_get_source_lane_count(phytium_dp); + phytium_dp->source_max_lane_count = count; + + ret = phytium_dp->funcs->dp_hw_reset(phytium_dp); + if (ret) + goto out; + ret = phytium_dp->funcs->dp_hw_init_phy(phytium_dp); + if (ret) + goto out; + + phytium_dp->fast_train_support = false; + phytium_dp->hw_spread_enable = phytium_dp->funcs->dp_hw_spread_is_enable(phytium_dp); + +out: + return ret; +} + +static int phytium_dp_dpcd_get_tp_link(struct phytium_dp_device *phytium_dp, + uint8_t *test_lane_count, + uint32_t *test_link_rate) +{ + uint8_t test_link_bw; + int ret; + + ret = drm_dp_dpcd_readb(&phytium_dp->aux, DP_TEST_LANE_COUNT, + test_lane_count); + if (ret <= 0) { + DRM_DEBUG_KMS("test pattern Lane count read failed(%d)\n", ret); + goto failed; + } + + ret = drm_dp_dpcd_readb(&phytium_dp->aux, DP_TEST_LINK_RATE, + &test_link_bw); + if (ret <= 0) { + DRM_DEBUG_KMS("test pattern link rate read failed(%d)\n", ret); + goto failed; + } + *test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); + + return 0; +failed: + return ret; +} + +static int phytium_dp_dpcd_set_link(struct phytium_dp_device *phytium_dp, + uint8_t lane_count, uint32_t link_rate) +{ + uint8_t link_config[2]; + int ret = 0; + + link_config[0] = drm_dp_link_rate_to_bw_code(link_rate); + link_config[1] = lane_count; + if (drm_dp_enhanced_frame_cap(phytium_dp->dpcd)) + link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + + ret = drm_dp_dpcd_write(&phytium_dp->aux, DP_LINK_BW_SET, link_config, 2); + if (ret < 0) { + DRM_NOTE("write dpcd DP_LINK_BW_SET fail: ret:%d\n", ret); + goto failed; + } + + if (phytium_dp->hw_spread_enable) + link_config[0] = DP_SPREAD_AMP_0_5; + else + link_config[0] = 0; + link_config[1] = DP_SET_ANSI_8B10B; + ret = drm_dp_dpcd_write(&phytium_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2); + if (ret < 0) { + DRM_ERROR("write DP_DOWNSPREAD_CTRL fail: ret:%d\n", ret); + goto failed; + } + + return 0; +failed: + return ret; +} + +static int phytium_dp_dpcd_set_test_pattern(struct phytium_dp_device *phytium_dp, + uint8_t test_pattern) +{ + unsigned char value; + int ret; + + if (phytium_dp_coding_8b10b_need_enable(test_pattern)) + value = DP_SET_ANSI_8B10B; + else + value = 0; + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_MAIN_LINK_CHANNEL_CODING_SET, value); + if (ret < 0) { + DRM_ERROR("write DP_MAIN_LINK_CHANNEL_CODING_SET fail: ret:%d\n", ret); + goto failed; + } + + if (phytium_dp_scrambled_need_enable(test_pattern)) + value = DP_TRAINING_PATTERN_DISABLE; + else + value = (DP_TRAINING_PATTERN_DISABLE | DP_LINK_SCRAMBLING_DISABLE); + + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_TRAINING_PATTERN_SET, value); + if (ret < 0) { + DRM_ERROR("write DP_TRAINING_PATTERN_SET fail: ret:%d\n", ret); + goto failed; + } + + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_LINK_QUAL_LANE0_SET, test_pattern); + if (ret < 0) { + DRM_ERROR("write DP_TRAINING_PATTERN_SET fail: ret:%d\n", ret); + goto failed; + } + + return 0; +failed: + return ret; +} + +static int phytium_dp_dpcd_set_train_pattern(struct phytium_dp_device *phytium_dp, + uint8_t train_pattern) +{ + uint8_t value; + int ret; + + /* Scrambling is disabled for TPS1/2/3 and enabled for TPS4 */ + if (train_pattern == DP_TRAINING_PATTERN_4 || train_pattern == DP_TRAINING_PATTERN_DISABLE) + value = train_pattern; + else + value = (train_pattern | DP_LINK_SCRAMBLING_DISABLE); + + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_TRAINING_PATTERN_SET, value); + if (ret < 0) { + DRM_NOTE("write DP_TRAINING_PATTERN_SET fail: ret:%d\n", ret); + goto failed; + } + + return 0; +failed: + return ret; +} + +static int +phytium_dp_dpcd_set_lane_setting(struct phytium_dp_device *phytium_dp, uint8_t *train_set) +{ + int ret = 0; + + ret = drm_dp_dpcd_write(&phytium_dp->aux, DP_TRAINING_LANE0_SET, + phytium_dp->train_set, 4); + if (ret < 0) { + DRM_ERROR("write DP_TRAINING_LANE0_SET fail: ret:%d\n", ret); + return ret; + } + + return 0; +} + +static int +phytium_dp_dpcd_get_adjust_request(struct phytium_dp_device *phytium_dp, uint8_t lane_count) +{ + int ret = 0; + uint8_t link_status[DP_LINK_STATUS_SIZE]; + + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret < 0) { + DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); + goto failed; + } + phytium_get_adjust_train(phytium_dp, link_status, lane_count); + + return 0; +failed: + return ret; +} + +void phytium_dp_dpcd_sink_dpms(struct phytium_dp_device *phytium_dp, int mode) +{ + int ret, i; + + if (phytium_dp->dpcd[DP_DPCD_REV] < 0x11) + return; + if (mode != DRM_MODE_DPMS_ON) { + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_SET_POWER, DP_SET_POWER_D3); + } else { + for (i = 0; i < 3; i++) { + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); + if (ret == 1) + break; + msleep(20); + } + } + + if (ret != 1) + DRM_DEBUG_KMS("failed to %s sink power state\n", + mode == DRM_MODE_DPMS_ON ? "enable" : "disable"); +} + +static bool phytium_dp_link_training_clock_recovery(struct phytium_dp_device *phytium_dp) +{ + int ret; + unsigned char voltage, max_vswing_tries; + int voltage_tries; + + /* clear the test pattern */ + phytium_dp_hw_set_test_pattern(phytium_dp, phytium_dp->link_lane_count, + PHYTIUM_PHY_TP_NONE, NULL, 0); + + /* config source and sink's link rate and lane count */ + phytium_dp_hw_set_link(phytium_dp, phytium_dp->link_lane_count, phytium_dp->link_rate); + ret = phytium_dp_dpcd_set_link(phytium_dp, phytium_dp->link_lane_count, + phytium_dp->link_rate); + if (ret < 0) { + DRM_NOTE("phytium_dp_dpcd_set_link failed(ret=%d)\n", ret); + return false; + } + + /* config source's voltage swing and pre-emphasis(103-106) */ + memset(phytium_dp->train_set, 0, sizeof(phytium_dp->train_set)); + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, + phytium_dp->train_set[0]); + + /* config train pattern */ + phytium_dp_hw_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_1); + ret = phytium_dp_dpcd_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_1); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_train_pattern fail: ret:%d\n", ret); + return false; + } + + /* config sink's voltage swing and pre-emphasis(103-106) */ + ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); + return false; + } + + voltage_tries = 1; + max_vswing_tries = 0; + for (;;) { + unsigned char link_status[DP_LINK_STATUS_SIZE]; + + drm_dp_link_train_clock_recovery_delay(phytium_dp->dpcd); + + /* get link status 0x202-0x207 */ + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret < 0) { + DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); + return false; + } + + if (drm_dp_clock_recovery_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("clock revorery ok\n"); + return true; + } + + if (voltage_tries == 5) { + DRM_DEBUG_KMS("Same voltage tried 5 times\n"); + return false; + } + + if (max_vswing_tries == 1) { + DRM_DEBUG_KMS("Max Voltage Swing reached\n"); + return false; + } + + voltage = phytium_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; + + /* config source and sink's voltage swing and pre-emphasis(103-106) */ + phytium_get_adjust_train(phytium_dp, link_status, phytium_dp->link_lane_count); + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, + phytium_dp->train_set[0]); + ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); + return false; + } + + if ((phytium_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) + ++voltage_tries; + else + voltage_tries = 1; + + if (phytium_dp->train_set[0] & DP_TRAIN_MAX_SWING_REACHED) + ++max_vswing_tries; + + DRM_DEBUG_KMS("try train_set:0x%x voltage_tries:%d max_vswing_tries:%d\n", + phytium_dp->train_set[0], voltage_tries, max_vswing_tries); + } +} + +static unsigned int phytium_dp_get_training_pattern(struct phytium_dp_device *phytium_dp) +{ + bool sink_tps3, sink_tps4; + + sink_tps4 = drm_dp_tps4_supported(phytium_dp->dpcd); + if (sink_tps4) + return DP_TRAINING_PATTERN_4; + else if (phytium_dp->link_rate == 810000) + DRM_DEBUG_KMS("8.1 Gbps link rate without sink TPS4 support\n"); + + sink_tps3 = drm_dp_tps3_supported(phytium_dp->dpcd); + if (sink_tps3) + return DP_TRAINING_PATTERN_3; + else if (phytium_dp->link_rate >= 540000) + DRM_DEBUG_KMS(">=5.4/6.48 Gbps link rate without sink TPS3 support\n"); + + return DP_TRAINING_PATTERN_2; +} + +static bool phytium_dp_link_training_channel_equalization(struct phytium_dp_device *phytium_dp) +{ + unsigned int training_pattern; + int tries, ret; + unsigned char link_status[DP_LINK_STATUS_SIZE]; + bool channel_eq = false; + + /* config source and sink's voltage swing and pre-emphasis(103-106), from clock recovery */ + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, + phytium_dp->train_set[0]); + ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); + return channel_eq; + } + + /* config source and sink's train_pattern x */ + training_pattern = phytium_dp_get_training_pattern(phytium_dp); + phytium_dp_hw_set_train_pattern(phytium_dp, training_pattern); + ret = phytium_dp_dpcd_set_train_pattern(phytium_dp, training_pattern); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_train_pattern fail: ret:%d\n", ret); + return channel_eq; + } + + for (tries = 0; tries < 5; tries++) { + drm_dp_link_train_channel_eq_delay(phytium_dp->dpcd); + + /* get link status 0x202-0x207 */ + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret < 0) { + DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); + break; + } + + /* Make sure clock is still ok */ + if (!drm_dp_clock_recovery_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("CR check failed, cannot continue channel equalization\n"); + break; + } + + if (drm_dp_channel_eq_ok(link_status, phytium_dp->link_lane_count)) { + channel_eq = true; + DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n"); + break; + } + + /* config source and sink's voltage swing and pre-emphasis(103-106) */ + phytium_get_adjust_train(phytium_dp, link_status, phytium_dp->link_lane_count); + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, + phytium_dp->train_set[0]); + ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); + break; + } + } + + /* Try 5 times, else fail and try at lower BW */ + if (tries == 5) + DRM_DEBUG_KMS("Channel equalization failed 5 times\n"); + + return channel_eq; +} + +static void phytium_dp_train_retry_work_fn(struct work_struct *work) +{ + struct phytium_dp_device *phytium_dp = train_retry_to_dp_device(work); + struct drm_connector *connector; + + connector = &phytium_dp->connector; + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); + mutex_lock(&connector->dev->mode_config.mutex); + drm_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD); + mutex_unlock(&connector->dev->mode_config.mutex); + drm_kms_helper_hotplug_event(connector->dev); +} + +/* return index of rate in rates array, or -1 if not found */ +static int phytium_dp_rate_index(const int *rates, int len, int rate) +{ + int i; + + for (i = 0; i < len; i++) + if (rate == rates[i]) + return i; + + return -1; +} + +int phytium_dp_get_link_train_fallback_values(struct phytium_dp_device *phytium_dp) +{ + int index, ret = 0; + + if (phytium_dp->is_edp) { + phytium_dp->train_retry_count++; + DRM_INFO("Retrying Link training for eDP(%d) with same parameters\n", + phytium_dp->port); + goto out; + } else { + index = phytium_dp_rate_index(phytium_dp->common_rates, + phytium_dp->num_common_rates, + phytium_dp->link_rate); + if (index > 0) { + phytium_dp->link_rate = phytium_dp->common_rates[index - 1]; + } else if (phytium_dp->link_lane_count > 1) { + phytium_dp->link_rate = phytium_dp->max_link_rate; + phytium_dp->link_lane_count = phytium_dp->link_lane_count >> 1; + } else { + phytium_dp->train_retry_count++; + phytium_dp->link_rate = phytium_dp->max_link_rate; + phytium_dp->link_lane_count = phytium_dp->max_link_lane_count; + DRM_INFO("Retrying Link training for DP(%d) with maximal parameters\n", + phytium_dp->port); + ret = -1; + } + } + +out: + return ret; +} + +static int +phytium_dp_stop_link_train(struct phytium_dp_device *phytium_dp) +{ + int ret; + + /* config source and sink's train_pattern x: DP_TRAINING_PATTERN_DISABLE */ + phytium_dp_hw_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_DISABLE); + + ret = phytium_dp_dpcd_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_DISABLE); + if (ret < 0) { + DRM_NOTE("phytium_dp_dpcd_set_train_pattern fail: ret:%d\n", ret); + return ret; + } + + return 0; +} + +int phytium_dp_start_link_train(struct phytium_dp_device *phytium_dp) +{ + int ret = 0; + + phytium_dp_hw_disable_output(phytium_dp); + phytium_dp_hw_disable_input_source(phytium_dp); + phytium_dp_hw_disable_video(phytium_dp); + phytium_dp_hw_enable_input_source(phytium_dp); + phytium_dp_hw_enable_output(phytium_dp); + phytium_dp_dpcd_sink_dpms(phytium_dp, DRM_MODE_DPMS_OFF); + phytium_dp_dpcd_sink_dpms(phytium_dp, DRM_MODE_DPMS_ON); + + if (!phytium_dp_link_training_clock_recovery(phytium_dp)) + goto failure_handling; + + if (!phytium_dp_link_training_channel_equalization(phytium_dp)) + goto failure_handling; + + ret = phytium_dp_stop_link_train(phytium_dp); + if (ret < 0) { + DRM_NOTE("phytium_dp_stop_link_train failed: ret = %d\n", ret); + goto out; + } + + if (phytium_dp->trigger_train_fail) { + phytium_dp->trigger_train_fail--; + goto failure_handling; + } + phytium_dp->train_retry_count = 0; + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training Pass at Link Rate = %d, Lane count = %d\n", + phytium_dp->connector.base.id, + phytium_dp->connector.name, phytium_dp->link_rate, + phytium_dp->link_lane_count); + + return 0; + +failure_handling: + DRM_INFO("[CONNECTOR:%d:%s] Link Training failed at Link Rate = %d, Lane count = %d", + phytium_dp->connector.base.id, + phytium_dp->connector.name, + phytium_dp->link_rate, phytium_dp->link_lane_count); + + ret = phytium_dp_stop_link_train(phytium_dp); + if (ret < 0) { + DRM_NOTE("phytium_dp_stop_link_train failed: ret = %d\n", ret); + goto out; + } + + phytium_dp_get_link_train_fallback_values(phytium_dp); + if (phytium_dp->train_retry_count < 5) + schedule_work(&phytium_dp->train_retry_work); + else + DRM_ERROR("DP(%d) Link Training Unsuccessful, and stop Training\n", + phytium_dp->port); + +out: + return -1; +} + +static bool phytium_dp_needs_link_retrain(struct phytium_dp_device *phytium_dp) +{ + unsigned char link_status[DP_LINK_STATUS_SIZE]; + int ret = 0; + + /* get link status 0x202-0x207 */ + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret < 0) { + DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); + return true; + } + + if ((phytium_dp->link_rate == 0) || (phytium_dp->link_lane_count == 0)) { + DRM_DEBUG_KMS("link_rate(%d) or lane_count(%d) is invalid\n", + phytium_dp->link_rate, phytium_dp->link_lane_count); + return true; + } + + /* Make sure clock is still ok */ + if (!drm_dp_clock_recovery_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("Clock recovery check failed\n"); + return true; + } + + if (!drm_dp_channel_eq_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("Channel EQ check failed\n"); + return true; + } + + if (!phytium_dp_hw_output_is_enable(phytium_dp)) { + DRM_DEBUG_KMS("check DP output enable failed\n"); + return true; + } + return false; +} + +static bool +phytium_dp_get_sink_irq(struct phytium_dp_device *phytium_dp, u8 *sink_irq_vector) +{ + return drm_dp_dpcd_readb(&phytium_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, + sink_irq_vector) == 1; +} + +static uint8_t phytium_dp_autotest_phy_pattern(struct phytium_dp_device *phytium_dp) +{ + union phytium_phy_tp phytium_phy_tp; + int ret; + unsigned char test_80_bit_pattern[ + (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 - + DP_TEST_80BIT_CUSTOM_PATTERN_7_0)+1] = {0}; + unsigned char test_pattern; + unsigned int offset; + + offset = DP_TEST_PHY_PATTERN; + + ret = drm_dp_dpcd_read(&phytium_dp->aux, offset, + &phytium_phy_tp.raw, + sizeof(phytium_phy_tp)); + if (ret <= 0) { + DRM_DEBUG_KMS("Could not read DP_TEST_PHY_PATTERN\n"); + goto failed; + } + + test_pattern = phytium_phy_tp.bits.PATTERN; + + if (test_pattern == PHYTIUM_PHY_TP_80BIT_CUSTOM) { + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_TEST_80BIT_CUSTOM_PATTERN_7_0, + test_80_bit_pattern, + sizeof(test_80_bit_pattern)); + if (ret <= 0) { + DRM_DEBUG_KMS("Could not read DP_TEST_PHY_PATTERN\n"); + goto failed; + } + } + + /* config source and sink's link rate and link count */ + ret = phytium_dp_dpcd_get_tp_link(phytium_dp, &phytium_dp->compliance.test_lane_count, + &phytium_dp->compliance.test_link_rate); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_get_tp_link fail: ret:%d\n", ret); + goto failed; + } + + phytium_dp_hw_set_link(phytium_dp, phytium_dp->compliance.test_lane_count, + phytium_dp->compliance.test_link_rate); + ret = phytium_dp_dpcd_set_link(phytium_dp, phytium_dp->compliance.test_lane_count, + phytium_dp->compliance.test_link_rate); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_link fail: ret:%d\n", ret); + goto failed_dpcd_set_link; + } + + /* config source and sink's lane setting: voltage swing and pre-emphasis */ + ret = phytium_dp_dpcd_get_adjust_request(phytium_dp, + phytium_dp->compliance.test_lane_count); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_get_adjust_request fail: ret:%d\n", ret); + goto failed_dpcd_get_adjust_request; + } + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->compliance.test_link_rate, + phytium_dp->train_set[0]); + ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); + goto failed_dpcd_set_lane_setting; + } + + /* config test pattern */ + phytium_dp_hw_set_test_pattern(phytium_dp, phytium_dp->compliance.test_lane_count, + test_pattern, test_80_bit_pattern, + sizeof(test_80_bit_pattern)); + ret = phytium_dp_dpcd_set_test_pattern(phytium_dp, test_pattern); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_test_pattern fail: ret:%d\n", ret); + goto failed_dpcd_set_tp; + } + + return DP_TEST_ACK; + +failed_dpcd_set_tp: + phytium_dp_hw_set_test_pattern(phytium_dp, phytium_dp->compliance.test_lane_count, + PHYTIUM_PHY_TP_NONE, test_80_bit_pattern, + sizeof(test_80_bit_pattern)); +failed_dpcd_set_link: +failed_dpcd_set_lane_setting: +failed_dpcd_get_adjust_request: +failed: + return DP_TEST_NAK; +} + +static void phytium_dp_handle_test_request(struct phytium_dp_device *phytium_dp) +{ + uint8_t response = DP_TEST_NAK; + uint8_t request = 0; + int status; + + status = drm_dp_dpcd_readb(&phytium_dp->aux, DP_TEST_REQUEST, &request); + if (status <= 0) { + DRM_DEBUG_KMS("Could not read test request from sink\n"); + goto update_status; + } + + switch (request) { + case DP_TEST_LINK_TRAINING: + case DP_TEST_LINK_VIDEO_PATTERN: + case DP_TEST_LINK_EDID_READ: + DRM_DEBUG_KMS("Not support test request '%02x'\n", request); + response = DP_TEST_NAK; + break; + case DP_TEST_LINK_PHY_TEST_PATTERN: + DRM_DEBUG_KMS("PHY_PATTERN test requested\n"); + response = phytium_dp_autotest_phy_pattern(phytium_dp); + break; + default: + DRM_DEBUG_KMS("Invalid test request '%02x'\n", request); + break; + } + +update_status: + status = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_TEST_RESPONSE, response); + if (status <= 0) + DRM_DEBUG_KMS("Could not write test response to sink\n"); + +} + +static int phytium_dp_long_pulse(struct drm_connector *connector, bool hpd_raw_state) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + enum drm_connector_status status = connector->status; + bool video_enable = false; + uint32_t index = 0; + + if (phytium_dp->is_edp) + status = connector_status_connected; + else if (hpd_raw_state) { + if (!phytium_dp_needs_link_retrain(phytium_dp)) { + status = connector_status_connected; + goto out; + } + } else { + status = connector_status_disconnected; + goto out; + } + + if (!phytium_dp->is_edp) { + status = phytium_dp_detect_dpcd(phytium_dp); + if (status == connector_status_disconnected) + goto out; + + index = phytium_dp->num_common_rates-1; + phytium_dp->max_link_rate = phytium_dp->common_rates[index]; + phytium_dp->max_link_lane_count = phytium_dp->common_max_lane_count; + phytium_dp->link_rate = phytium_dp->max_link_rate; + phytium_dp->link_lane_count = phytium_dp->max_link_lane_count; + DRM_DEBUG_KMS("common_max_lane_count: %d, common_max_rate:%d\n", + phytium_dp->max_link_lane_count, phytium_dp->max_link_rate); + + video_enable = phytium_dp_hw_video_is_enable(phytium_dp); + phytium_dp_start_link_train(phytium_dp); + + if (video_enable) { + mdelay(2); + phytium_dp_hw_enable_video(phytium_dp); + } + } + +out: + return status; +} + +static int phytium_dp_short_pulse(struct drm_connector *connector) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + enum drm_connector_status status = connector->status; + u8 sink_irq_vector = 0; + bool video_enable = false; + + /* handle the test pattern */ + if (phytium_dp_get_sink_irq(phytium_dp, &sink_irq_vector) && + sink_irq_vector != 0) { + drm_dp_dpcd_writeb(&phytium_dp->aux, + DP_DEVICE_SERVICE_IRQ_VECTOR, + sink_irq_vector); + if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) + phytium_dp_handle_test_request(phytium_dp); + if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ)) + DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); + } + if (!phytium_dp_needs_link_retrain(phytium_dp)) { + status = connector_status_connected; + goto out; + } + + video_enable = phytium_dp_hw_video_is_enable(phytium_dp); + phytium_dp_start_link_train(phytium_dp); + if (video_enable) { + mdelay(2); + phytium_dp_hw_enable_video(phytium_dp); + } + +out: + return status; +} + +void phytium_dp_hpd_poll_handler(struct phytium_display_private *priv) +{ + struct drm_device *dev = priv->dev; + struct drm_connector_list_iter conn_iter; + struct drm_connector *connector; + enum drm_connector_status old_status; + bool changed = false; + + mutex_lock(&dev->mode_config.mutex); + DRM_DEBUG_KMS("running encoder hotplug poll functions\n"); + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (connector->force) + continue; + old_status = connector->status; + connector->status = drm_helper_probe_detect(connector, NULL, false); + if (old_status != connector->status) { + const char *old, *new; + + old = drm_get_connector_status_name(old_status); + new = drm_get_connector_status_name(connector->status); + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", + connector->base.id, + connector->name, + old, new); + changed = true; + } + } + drm_connector_list_iter_end(&conn_iter); + mutex_unlock(&dev->mode_config.mutex); + + if (changed) + drm_kms_helper_hotplug_event(dev); +} + +void phytium_dp_hpd_irq_setup(struct drm_device *dev, bool enable) +{ + struct phytium_dp_device *phytium_dp; + struct drm_encoder *encoder; + struct phytium_display_private *priv = dev->dev_private; + bool handler = false; + bool hpd_raw_state_old = false; + + /* We might have missed any hotplugs that happened, so polling and handler */ + if (enable) { + spin_lock_irq(&priv->hotplug_irq_lock); + + drm_for_each_encoder(encoder, dev) { + phytium_dp = encoder_to_dp_device(encoder); + if (!phytium_dp->dp_hpd_state.hpd_irq_enable) { + hpd_raw_state_old = phytium_dp->dp_hpd_state.hpd_raw_state; + phytium_dp_hw_get_hpd_state(phytium_dp); + if (phytium_dp->dp_hpd_state.hpd_event_state + || phytium_dp->dp_hpd_state.hpd_irq_state + || (hpd_raw_state_old != phytium_dp->dp_hpd_state.hpd_raw_state)) { + handler = true; + } + } + } + spin_unlock_irq(&priv->hotplug_irq_lock); + if (handler) + phytium_dp_hpd_poll_handler(priv); + } + + drm_for_each_encoder(encoder, dev) { + phytium_dp = encoder_to_dp_device(encoder); + phytium_dp_hw_hpd_irq_setup(phytium_dp, enable); + } +} + +void phytium_dp_hpd_work_func(struct work_struct *work) +{ + struct phytium_display_private *priv = + container_of(work, struct phytium_display_private, hotplug_work); + struct drm_device *dev = priv->dev; + struct drm_connector_list_iter conn_iter; + struct drm_connector *connector; + enum drm_connector_status old_status; + bool changed = false; + + mutex_lock(&dev->mode_config.mutex); + DRM_DEBUG_KMS("running encoder hotplug work functions\n"); + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (connector->force) + continue; + old_status = connector->status; + connector->status = drm_helper_probe_detect(connector, NULL, false); + if (old_status != connector->status) { + const char *old, *new; + + old = drm_get_connector_status_name(old_status); + new = drm_get_connector_status_name(connector->status); + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", + connector->base.id, + connector->name, + old, new); + changed = true; + } + } + drm_connector_list_iter_end(&conn_iter); + mutex_unlock(&dev->mode_config.mutex); + + if (changed) + drm_kms_helper_hotplug_event(dev); + + phytium_dp_hpd_irq_setup(dev, true); +} + +irqreturn_t phytium_dp_hpd_irq_handler(struct phytium_display_private *priv) +{ + struct drm_encoder *encoder = NULL; + struct phytium_dp_device *phytium_dp = NULL; + struct drm_device *dev = priv->dev; + bool handler = false; + + spin_lock(&priv->hotplug_irq_lock); + + drm_for_each_encoder(encoder, dev) { + phytium_dp = encoder_to_dp_device(encoder); + if (phytium_dp->dp_hpd_state.hpd_irq_enable) { + phytium_dp_hw_get_hpd_state(phytium_dp); + if (phytium_dp->dp_hpd_state.hpd_event_state + || phytium_dp->dp_hpd_state.hpd_irq_state) { + handler = true; + } + } + } + spin_unlock(&priv->hotplug_irq_lock); + + if (handler) { + phytium_dp_hpd_irq_setup(dev, false); + schedule_work(&priv->hotplug_work); + return IRQ_HANDLED; + } + return IRQ_NONE; +} + + +static void phytium_dp_fast_link_train_detect(struct phytium_dp_device *phytium_dp) +{ + phytium_dp->fast_train_support = !!(phytium_dp->dpcd[DP_MAX_DOWNSPREAD] + & DP_NO_AUX_HANDSHAKE_LINK_TRAINING); + DRM_DEBUG_KMS("fast link training %s\n", + phytium_dp->fast_train_support ? "supported" : "unsupported"); +} + +bool phytium_dp_fast_link_train(struct phytium_dp_device *phytium_dp) +{ + int ret = 0; + unsigned int training_pattern; + + /* clear the test pattern */ + phytium_dp_hw_set_test_pattern(phytium_dp, phytium_dp->link_lane_count, + PHYTIUM_PHY_TP_NONE, NULL, 0); + + /* config source and sink's link rate and lane count */ + phytium_dp_hw_set_link(phytium_dp, phytium_dp->link_lane_count, phytium_dp->link_rate); + + /* config source and sink's voltage swing and pre-emphasis(103-106) */ + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, + phytium_dp->train_set[0]); + + /* config train pattern */ + phytium_dp_hw_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_1); + usleep_range(500, 600); + + training_pattern = phytium_dp_get_training_pattern(phytium_dp); + phytium_dp_hw_set_train_pattern(phytium_dp, training_pattern); + usleep_range(500, 600); + + phytium_dp_hw_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_DISABLE); + + if (dc_fast_training_check) { + unsigned char link_status[DP_LINK_STATUS_SIZE]; + + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret < 0) { + DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); + return false; + } + + if (!drm_dp_clock_recovery_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("check clock recovery failed\n"); + return false; + } + + if (!drm_dp_channel_eq_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("check channel equalization failed\n"); + return false; + } + } + + return true; +} + +static enum drm_connector_status +phytium_connector_detect(struct drm_connector *connector, bool force) +{ + enum drm_connector_status status = connector->status; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + bool hpd_event_state, hpd_irq_state, hpd_raw_state; + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + bool plugged = true; + + spin_lock_irq(&priv->hotplug_irq_lock); + hpd_event_state = phytium_dp->dp_hpd_state.hpd_event_state; + hpd_irq_state = phytium_dp->dp_hpd_state.hpd_irq_state; + hpd_raw_state = phytium_dp->dp_hpd_state.hpd_raw_state; + phytium_dp->dp_hpd_state.hpd_event_state = false; + phytium_dp->dp_hpd_state.hpd_irq_state = false; + spin_unlock_irq(&priv->hotplug_irq_lock); + + if (hpd_event_state) + status = phytium_dp_long_pulse(connector, hpd_raw_state); + + if (hpd_irq_state) + status = phytium_dp_short_pulse(connector); + + if (status == connector_status_unknown) + status = connector_status_disconnected; + + if ((!phytium_dp->is_edp) && (!hpd_raw_state)) + status = connector_status_disconnected; + + if (connector->status != status) { + if ((status == connector_status_connected) && phytium_dp->has_audio) + plugged = true; + else + plugged = false; + + handle_plugged_change(phytium_dp, plugged); + } + + return status; +} + +static void +phytium_connector_destroy(struct drm_connector *connector) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + drm_connector_cleanup(connector); + kfree(phytium_dp); +} + +static int +phytium_dp_connector_register(struct drm_connector *connector) +{ + int ret; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + phytium_dp_aux_init(phytium_dp); + if (phytium_dp->is_edp) { + phytium_edp_init_connector(phytium_dp); + ret = phytium_edp_backlight_device_register(phytium_dp); + if (ret) + DRM_ERROR("failed to register port(%d) backlight device(ret=%d)\n", + phytium_dp->port, ret); + } + + ret = phytium_debugfs_connector_add(connector); + if (ret) + DRM_ERROR("failed to register phytium connector debugfs(ret=%d)\n", ret); + + return 0; +} + +static void +phytium_dp_connector_unregister(struct drm_connector *connector) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + if (phytium_dp->is_edp) { + phytium_edp_backlight_device_unregister(phytium_dp); + phytium_edp_panel_poweroff(phytium_dp); + } + drm_dp_aux_unregister(&phytium_dp->aux); +} + +static const struct drm_connector_funcs phytium_connector_funcs = { + .dpms = drm_helper_connector_dpms, + .detect = phytium_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = phytium_connector_destroy, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .late_register = phytium_dp_connector_register, + .early_unregister = phytium_dp_connector_unregister, +}; + +static void phytium_dp_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted) +{ + struct phytium_dp_device *dp = encoder_to_dp_device(encoder); + + drm_mode_copy(&dp->mode, adjusted); +} + +static void phytium_edp_panel_poweron(struct phytium_dp_device *phytium_dp) +{ + phytium_panel_poweron(&phytium_dp->panel); +} + +static void phytium_edp_panel_poweroff(struct phytium_dp_device *phytium_dp) +{ + phytium_panel_poweroff(&phytium_dp->panel); +} + +static void phytium_edp_backlight_on(struct phytium_dp_device *phytium_dp) +{ + phytium_panel_enable_backlight(&phytium_dp->panel); +} + +static void phytium_edp_backlight_off(struct phytium_dp_device *phytium_dp) +{ + phytium_panel_disable_backlight(&phytium_dp->panel); +} + +static void phytium_encoder_disable(struct drm_encoder *encoder) +{ + struct phytium_dp_device *phytium_dp = encoder_to_dp_device(encoder); + + if (phytium_dp->is_edp) + phytium_edp_backlight_off(phytium_dp); + + phytium_dp_hw_disable_video(phytium_dp); + + mdelay(50); + + if (phytium_dp->is_edp) + phytium_edp_panel_poweroff(phytium_dp); +} + +void phytium_dp_adjust_link_train_parameter(struct phytium_dp_device *phytium_dp) +{ + struct drm_display_info *display_info = &phytium_dp->connector.display_info; + unsigned long link_bw, date_rate = 0, bs_limit, bs_request; + int rate = 0; + + bs_request = phytium_dp->mode.crtc_htotal/(phytium_dp->mode.crtc_clock/1000); + date_rate = (phytium_dp->mode.crtc_clock * display_info->bpc * 3)/8; + + for (;;) { + bs_limit = 8192 / (phytium_dp->link_rate/1000); + link_bw = phytium_dp->link_rate * phytium_dp->link_lane_count; + rate = 10 * date_rate / link_bw; + DRM_DEBUG_KMS("adjust link rate(%d), lane count(%d)\n", + phytium_dp->link_rate, phytium_dp->link_lane_count); + DRM_DEBUG_KMS("for crtc_clock(%d) bs_request(%ld) bs_limit(%ld) rate(%d)\n", + phytium_dp->mode.crtc_clock, bs_request, bs_limit, rate); + if ((link_dynamic_adjust && (bs_request < bs_limit) && rate < 10) || + ((!link_dynamic_adjust) && (rate < 10))) + break; + phytium_dp_get_link_train_fallback_values(phytium_dp); + } + + DRM_DEBUG_KMS("Try link training at Link Rate = %d, Lane count = %d\n", + phytium_dp->link_rate, phytium_dp->link_lane_count); +} + +static void phytium_encoder_enable(struct drm_encoder *encoder) +{ + struct phytium_dp_device *phytium_dp = encoder_to_dp_device(encoder); + int ret = 0; + + phytium_dp_hw_disable_video(phytium_dp); + + if (phytium_dp->is_edp) { + phytium_edp_panel_poweron(phytium_dp); + if (phytium_dp->fast_train_support) + phytium_dp_fast_link_train(phytium_dp); + else + ret = phytium_dp_start_link_train(phytium_dp); + mdelay(2); + phytium_dp_fast_link_train_detect(phytium_dp); + } else { + phytium_dp_adjust_link_train_parameter(phytium_dp); + ret = phytium_dp_start_link_train(phytium_dp); + mdelay(2); + } + + phytium_dp_hw_config_video(phytium_dp); + if (ret == 0) { + phytium_dp_hw_enable_video(phytium_dp); + if (phytium_dp->has_audio) + phytium_dp_hw_enable_audio(phytium_dp); + } + + if (phytium_dp->is_edp) + phytium_edp_backlight_on(phytium_dp); + +} + +static const struct drm_encoder_helper_funcs phytium_encoder_helper_funcs = { + .mode_set = phytium_dp_encoder_mode_set, + .disable = phytium_encoder_disable, + .enable = phytium_encoder_enable, +}; + +static const struct drm_encoder_funcs phytium_encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; + +static const struct dp_audio_n_m phytium_dp_audio_n_m[] = { + { 32000, 162000, 1024, 10125 }, + { 44100, 162000, 784, 5625 }, + { 48000, 162000, 512, 3375 }, + { 64000, 162000, 2048, 10125 }, + { 88200, 162000, 1568, 5625 }, + { 96000, 162000, 1024, 3375 }, + { 128000, 162000, 4096, 10125 }, + { 176400, 162000, 3136, 5625 }, + { 192000, 162000, 2048, 3375 }, + { 32000, 270000, 1024, 16875 }, + { 44100, 270000, 784, 9375 }, + { 48000, 270000, 512, 5625 }, + { 64000, 270000, 2048, 16875 }, + { 88200, 270000, 1568, 9375 }, + { 96000, 270000, 1024, 5625 }, + { 128000, 270000, 4096, 16875 }, + { 176400, 270000, 3136, 9375 }, + { 192000, 270000, 2048, 5625 }, + { 32000, 540000, 1024, 33750 }, + { 44100, 540000, 784, 18750 }, + { 48000, 540000, 512, 11250 }, + { 64000, 540000, 2048, 33750 }, + { 88200, 540000, 1568, 18750 }, + { 96000, 540000, 1024, 11250 }, + { 128000, 540000, 4096, 33750 }, + { 176400, 540000, 3136, 18750 }, + { 192000, 540000, 2048, 11250 }, + { 32000, 810000, 1024, 50625 }, + { 44100, 810000, 784, 28125 }, + { 48000, 810000, 512, 16875 }, + { 64000, 810000, 2048, 50625 }, + { 88200, 810000, 1568, 28125 }, + { 96000, 810000, 1024, 16875 }, + { 128000, 810000, 4096, 50625 }, + { 176400, 810000, 3136, 28125 }, + { 192000, 810000, 2048, 16875 }, +}; + +static int phytium_dp_audio_get_eld(struct device *dev, void *data, u8 *buf, size_t len) +{ + struct phytium_dp_device *phytium_dp = data; + + memcpy(buf, phytium_dp->connector.eld, min(sizeof(phytium_dp->connector.eld), len)); + + return 0; +} + +static int phytium_dp_audio_digital_mute(struct device *dev, void *data, bool enable) +{ + struct phytium_dp_device *phytium_dp = data; + + phytium_dp_hw_audio_digital_mute(phytium_dp, enable); + + return 0; +} + +const struct dp_audio_n_m *phytium_dp_audio_get_n_m(int link_rate, int sample_rate) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(phytium_dp_audio_n_m); i++) { + if (sample_rate == phytium_dp_audio_n_m[i].sample_rate + && link_rate == phytium_dp_audio_n_m[i].link_rate) + return &phytium_dp_audio_n_m[i]; + } + + return NULL; +} + +static int phytium_dp_audio_hw_params(struct device *dev, void *data, + struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params) +{ + struct phytium_dp_device *phytium_dp = data; + int ret = 0; + struct audio_info audio_info = { + .sample_width = params->sample_width, + .sample_rate = params->sample_rate, + .channels = params->channels, + }; + + if (daifmt->fmt != HDMI_I2S) { + DRM_ERROR("invalid audio format %d\n", daifmt->fmt); + ret = -EINVAL; + goto failed; + } + + ret = phytium_dp_hw_audio_hw_params(phytium_dp, audio_info); + +failed: + return ret; +} + +static void phytium_dp_audio_shutdown(struct device *dev, void *data) +{ + struct phytium_dp_device *phytium_dp = data; + + phytium_dp_hw_audio_shutdown(phytium_dp); +} + +static void handle_plugged_change(struct phytium_dp_device *phytium_dp, bool plugged) +{ + if (phytium_dp->plugged_cb && phytium_dp->codec_dev) + phytium_dp->plugged_cb(phytium_dp->codec_dev, plugged); +} + +static int phytium_dp_audio_hook_plugged_cb(struct device *dev, void *data, + hdmi_codec_plugged_cb fn, + struct device *codec_dev) +{ + struct phytium_dp_device *phytium_dp = data; + bool plugged; + + phytium_dp->plugged_cb = fn; + phytium_dp->codec_dev = codec_dev; + + if ((phytium_dp->connector.status == connector_status_connected) && phytium_dp->has_audio) + plugged = true; + else + plugged = false; + + handle_plugged_change(phytium_dp, plugged); + return 0; +} + + +static const struct hdmi_codec_ops phytium_audio_codec_ops = { + .hw_params = phytium_dp_audio_hw_params, + .audio_shutdown = phytium_dp_audio_shutdown, + .digital_mute = phytium_dp_audio_digital_mute, + .get_eld = phytium_dp_audio_get_eld, + .hook_plugged_cb = phytium_dp_audio_hook_plugged_cb, +}; + +static int phytium_dp_audio_codec_init(struct phytium_dp_device *phytium_dp) +{ + struct device *dev = phytium_dp->dev->dev; + struct hdmi_codec_pdata codec_data = { + .i2s = 1, + .spdif = 0, + .ops = &phytium_audio_codec_ops, + .max_i2s_channels = 2, + .data = phytium_dp, + }; + + phytium_dp->audio_pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME, + PLATFORM_DEVID_AUTO, + &codec_data, sizeof(codec_data)); + + return PTR_ERR_OR_ZERO(phytium_dp->audio_pdev); +} + +static long phytium_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) +{ + struct phytium_dp_device *phytium_dp = container_of(aux, struct phytium_dp_device, aux); + long ret = 0; + + DRM_DEBUG_KMS("msg->size: 0x%lx\n", msg->size); + + if (WARN_ON(msg->size > 16)) + return -E2BIG; + + switch (msg->request & ~DP_AUX_I2C_MOT) { + case DP_AUX_NATIVE_WRITE: + case DP_AUX_I2C_WRITE: + case DP_AUX_I2C_WRITE_STATUS_UPDATE: + ret = phytium_dp_hw_aux_transfer_write(phytium_dp, msg); + DRM_DEBUG_KMS("aux write reply:0x%x ret:0x%lx\n", msg->reply, ret); + break; + case DP_AUX_NATIVE_READ: + case DP_AUX_I2C_READ: + ret = phytium_dp_hw_aux_transfer_read(phytium_dp, msg); + DRM_DEBUG_KMS("aux read ret:0x%lx\n", ret); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static void phytium_dp_aux_init(struct phytium_dp_device *phytium_dp) +{ + drm_dp_aux_init(&phytium_dp->aux); + phytium_dp->aux.name = kasprintf(GFP_KERNEL, "dp-%d", phytium_dp->port); + phytium_dp->aux.transfer = phytium_dp_aux_transfer; +} + +int phytium_get_encoder_crtc_mask(struct phytium_dp_device *phytium_dp, int port) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int i, mask = 0; + + for_each_pipe_masked(priv, i) { + if (i != port) + mask++; + else + break; + } + + return BIT(mask); +} + +static bool phytium_dp_is_edp(struct phytium_dp_device *phytium_dp, int port) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + + if (priv->info.edp_mask & BIT(port)) + return true; + else + return false; +} + +static bool phytium_edp_init_connector(struct phytium_dp_device *phytium_dp) +{ + enum drm_connector_status status; + struct drm_connector *connector = &phytium_dp->connector; + + phytium_edp_panel_poweron(phytium_dp); + + status = phytium_dp_detect_dpcd(phytium_dp); + if (status == connector_status_disconnected) + return false; + + connector->status = status; + phytium_dp->max_link_rate = phytium_dp->common_rates[phytium_dp->num_common_rates-1]; + phytium_dp->max_link_lane_count = phytium_dp->common_max_lane_count; + phytium_dp->link_rate = phytium_dp->max_link_rate; + phytium_dp->link_lane_count = phytium_dp->max_link_lane_count; + DRM_DEBUG_KMS("common_max_lane_count: %d, common_max_rate:%d\n", + phytium_dp->max_link_lane_count, phytium_dp->max_link_rate); + + return true; +} + +int phytium_dp_resume(struct drm_device *drm_dev) +{ + struct phytium_dp_device *phytium_dp; + struct drm_encoder *encoder; + int ret = 0; + + drm_for_each_encoder(encoder, drm_dev) { + phytium_dp = encoder_to_dp_device(encoder); + if (phytium_dp->is_edp) { + phytium_edp_backlight_off(phytium_dp); + phytium_edp_panel_poweroff(phytium_dp); + } + ret = phytium_dp_hw_init(phytium_dp); + if (ret) { + DRM_ERROR("failed to initialize dp %d\n", phytium_dp->port); + return -EIO; + } + } + + return 0; +} + +int phytium_dp_init(struct drm_device *dev, int port) +{ + struct phytium_display_private *priv = dev->dev_private; + struct phytium_dp_device *phytium_dp = NULL; + int ret, type; + + DRM_DEBUG_KMS("%s: port %d\n", __func__, port); + phytium_dp = kzalloc(sizeof(*phytium_dp), GFP_KERNEL); + if (!phytium_dp) { + ret = -ENOMEM; + goto failed_malloc_dp; + } + + phytium_dp->dev = dev; + phytium_dp->port = port; + + if (IS_X100(priv)) { + x100_dp_func_register(phytium_dp); + priv->dp_reg_base[port] = X100_DP_BASE(port); + priv->phy_access_base[port] = X100_PHY_ACCESS_BASE(port); + } else if (IS_E2000(priv)) { + e2000_dp_func_register(phytium_dp); + priv->dp_reg_base[port] = E2000_DP_BASE(port); + priv->phy_access_base[port] = E2000_PHY_ACCESS_BASE(port); + } + + if (phytium_dp_is_edp(phytium_dp, port)) { + phytium_dp->is_edp = true; + type = DRM_MODE_CONNECTOR_eDP; + phytium_dp_panel_init_backlight_funcs(phytium_dp); + phytium_edp_backlight_off(phytium_dp); + phytium_edp_panel_poweroff(phytium_dp); + } else { + phytium_dp->is_edp = false; + type = DRM_MODE_CONNECTOR_DisplayPort; + } + + ret = phytium_dp_hw_init(phytium_dp); + if (ret) { + DRM_ERROR("failed to initialize dp %d\n", phytium_dp->port); + goto failed_init_dp; + } + + ret = drm_encoder_init(dev, &phytium_dp->encoder, + &phytium_encoder_funcs, + DRM_MODE_ENCODER_TMDS, "DP %d", port); + if (ret) { + DRM_ERROR("failed to initialize encoder with drm\n"); + goto failed_encoder_init; + } + drm_encoder_helper_add(&phytium_dp->encoder, &phytium_encoder_helper_funcs); + phytium_dp->encoder.possible_crtcs = phytium_get_encoder_crtc_mask(phytium_dp, port); + + phytium_dp->connector.dpms = DRM_MODE_DPMS_OFF; + phytium_dp->connector.polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; + ret = drm_connector_init(dev, &phytium_dp->connector, &phytium_connector_funcs, + type); + if (ret) { + DRM_ERROR("failed to initialize connector with drm\n"); + goto failed_connector_init; + } + drm_connector_helper_add(&phytium_dp->connector, &phytium_connector_helper_funcs); + drm_connector_attach_encoder(&phytium_dp->connector, &phytium_dp->encoder); + + ret = phytium_dp_audio_codec_init(phytium_dp); + if (ret) { + DRM_ERROR("failed to initialize audio codec\n"); + goto failed_connector_init; + } + + phytium_dp->train_retry_count = 0; + INIT_WORK(&phytium_dp->train_retry_work, phytium_dp_train_retry_work_fn); + drm_connector_register(&phytium_dp->connector); + + return 0; +failed_connector_init: +failed_encoder_init: +failed_init_dp: + kfree(phytium_dp); +failed_malloc_dp: + return ret; +} diff --git a/drivers/gpu/drm/phytium/phytium_dp.h b/drivers/gpu/drm/phytium/phytium_dp.h new file mode 100644 index 0000000000000000000000000000000000000000..532c04152b99ba6d179cd8cdfcf7c85ce95085d3 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_dp.h @@ -0,0 +1,153 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_DP_H__ +#define __PHYTIUM_DP_H__ + +#include +#include +#include + +struct phytium_dp_device; + +#include "phytium_panel.h" + +struct audio_info { + int sample_rate; + int channels; + int sample_width; +}; + +struct dp_audio_n_m { + int sample_rate; + int link_rate; + u16 m; + u16 n; +}; + +struct phytium_dp_compliance { + unsigned long test_type; + uint32_t test_link_rate; + u8 test_lane_count; + bool test_active; + u8 reserve[2]; +}; + +struct phytium_dp_func { + uint8_t (*dp_hw_get_source_lane_count)(struct phytium_dp_device *phytium_dp); + int (*dp_hw_reset)(struct phytium_dp_device *phytium_dp); + bool (*dp_hw_spread_is_enable)(struct phytium_dp_device *phytium_dp); + int (*dp_hw_set_backlight)(struct phytium_dp_device *phytium_dp, uint32_t level); + uint32_t (*dp_hw_get_backlight)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_disable_backlight)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_enable_backlight)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_poweroff_panel)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_poweron_panel)(struct phytium_dp_device *phytium_dp); + int (*dp_hw_init_phy)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_set_phy_lane_setting)(struct phytium_dp_device *phytium_dp, + uint32_t link_rate, uint8_t train_set); + int (*dp_hw_set_phy_lane_and_rate)(struct phytium_dp_device *phytium_dp, + uint8_t link_lane_count, + uint32_t link_rate); +}; + +struct phytium_dp_hpd_state { + bool hpd_event_state; + bool hpd_irq_state; + bool hpd_raw_state; + bool hpd_irq_enable; +}; + +struct phytium_dp_device { + struct drm_device *dev; + struct drm_encoder encoder; + struct drm_connector connector; + int port; + struct drm_display_mode mode; + bool link_trained; + bool detect_done; + bool is_edp; + bool reserve0; + struct drm_dp_aux aux; + unsigned char dpcd[DP_RECEIVER_CAP_SIZE]; + uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]; + unsigned char downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; + unsigned char sink_count; + + int *source_rates; + int num_source_rates; + int sink_rates[DP_MAX_SUPPORTED_RATES]; + int num_sink_rates; + int common_rates[DP_MAX_SUPPORTED_RATES]; + int num_common_rates; + + int source_max_lane_count; + int sink_max_lane_count; + int common_max_lane_count; + + int max_link_rate; + int max_link_lane_count; + int link_rate; + int link_lane_count; + struct work_struct train_retry_work; + int train_retry_count; + uint32_t trigger_train_fail; + + unsigned char train_set[4]; + bool has_audio; + bool fast_train_support; + bool hw_spread_enable; + bool reserve[1]; + struct platform_device *audio_pdev; + struct audio_info audio_info; + hdmi_codec_plugged_cb plugged_cb; + struct device *codec_dev; + struct phytium_dp_compliance compliance; + struct phytium_dp_func *funcs; + struct phytium_dp_hpd_state dp_hpd_state; + + struct phytium_panel panel; + struct drm_display_mode native_mode; +}; + +union phytium_phy_tp { + struct { + /* DpcdPhyTestPatterns. This field is 2 bits for DP1.1 + * and 3 bits for DP1.2. + */ + uint8_t PATTERN :3; + uint8_t RESERVED :5; + } bits; + uint8_t raw; +}; + +/* PHY test patterns + * The order of test patterns follows DPCD register PHY_TEST_PATTERN (0x248) + */ +enum phytium_dpcd_phy_tp { + PHYTIUM_PHY_TP_NONE = 0, + PHYTIUM_PHY_TP_D10_2, + PHYTIUM_PHY_TP_SYMBOL_ERROR, + PHYTIUM_PHY_TP_PRBS7, + PHYTIUM_PHY_TP_80BIT_CUSTOM, + PHYTIUM_PHY_TP_CP2520_1, + PHYTIUM_PHY_TP_CP2520_2, + PHYTIUM_PHY_TP_CP2520_3, +}; +#define encoder_to_dp_device(x) container_of(x, struct phytium_dp_device, encoder) +#define connector_to_dp_device(x) container_of(x, struct phytium_dp_device, connector) +#define panel_to_dp_device(x) container_of(x, struct phytium_dp_device, panel) +#define train_retry_to_dp_device(x) container_of(x, struct phytium_dp_device, train_retry_work) +void phytium_phy_writel(struct phytium_dp_device *phytium_dp, uint32_t address, uint32_t data); +uint32_t phytium_phy_readl(struct phytium_dp_device *phytium_dp, uint32_t address); + +int phytium_dp_init(struct drm_device *dev, int pipe); +int phytium_dp_resume(struct drm_device *drm_dev); +void phytium_dp_hpd_irq_setup(struct drm_device *dev, bool enable); +irqreturn_t phytium_dp_hpd_irq_handler(struct phytium_display_private *priv); +void phytium_dp_hpd_work_func(struct work_struct *work); +const struct dp_audio_n_m *phytium_dp_audio_get_n_m(int link_rate, int sample_rate); +#endif /* __PHYTIUM_DP_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_fb.c b/drivers/gpu/drm/phytium/phytium_fb.c new file mode 100644 index 0000000000000000000000000000000000000000..8349ff2873a9fb44dcfcecbbbf717623e85c59f7 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_fb.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_fb.h" +#include "phytium_gem.h" + +static int +phytium_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file_priv, + unsigned int *handle) +{ + struct phytium_framebuffer *phytium_fb = to_phytium_framebuffer(fb); + + return drm_gem_handle_create(file_priv, &phytium_fb->phytium_gem_obj[0]->base, handle); +} + +static void phytium_fb_destroy(struct drm_framebuffer *fb) +{ + struct phytium_framebuffer *phytium_fb = to_phytium_framebuffer(fb); + int i, num_planes; + struct drm_gem_object *obj = NULL; + const struct drm_format_info *info; + + info = drm_format_info(fb->format->format); + num_planes = info ? info->num_planes : 1; + + for (i = 0; i < num_planes; i++) { + obj = &phytium_fb->phytium_gem_obj[i]->base; + if (obj) + drm_gem_object_unreference_unlocked(obj); + } + + drm_framebuffer_cleanup(fb); + kfree(phytium_fb); +} + +static struct drm_framebuffer_funcs viv_fb_funcs = { + .create_handle = phytium_fb_create_handle, + .destroy = phytium_fb_destroy, +}; + +struct phytium_framebuffer * +phytium_fb_alloc(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd, + struct phytium_gem_object **phytium_gem_obj, unsigned int num_planes) +{ + struct phytium_framebuffer *phytium_fb; + int ret = 0, i; + + phytium_fb = kzalloc(sizeof(*phytium_fb), GFP_KERNEL); + if (!phytium_fb) + return ERR_PTR(-ENOMEM); + + drm_helper_mode_fill_fb_struct(dev, &phytium_fb->base, mode_cmd); + + ret = drm_framebuffer_init(dev, &phytium_fb->base, &viv_fb_funcs); + + if (ret) { + DRM_ERROR("Failed to initialize framebuffer: %d\n", ret); + kfree(phytium_fb); + return ERR_PTR(ret); + } + + for (i = 0; i < num_planes; i++) + phytium_fb->phytium_gem_obj[i] = phytium_gem_obj[i]; + + return phytium_fb; +} + +struct drm_framebuffer * +phytium_fb_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + int ret = 0, i, num_planes; + struct drm_gem_object *obj; + unsigned int hsub, vsub, size; + struct phytium_gem_object *phytium_gem_obj[PHYTIUM_FORMAT_MAX_PLANE] = {0}; + struct phytium_framebuffer *phytium_fb; + struct phytium_display_private *priv = dev->dev_private; + const struct drm_format_info *info; + + info = drm_format_info(mode_cmd->pixel_format); + hsub = info ? info->hsub : 1; + vsub = info ? info->vsub : 1; + num_planes = info ? info->num_planes : 1; + num_planes = min(num_planes, PHYTIUM_FORMAT_MAX_PLANE); + + for (i = 0; i < num_planes; i++) { + unsigned int height = mode_cmd->height / (i ? vsub : 1); + + size = height * mode_cmd->pitches[i] + mode_cmd->offsets[i]; + obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]); + if (!obj) { + DRM_ERROR("Failed to lookup GEM object\n"); + ret = -ENXIO; + goto error; + } + + if (obj->size < size) { + drm_gem_object_unreference_unlocked(obj); + ret = -EINVAL; + goto error; + } + + phytium_gem_obj[i] = to_phytium_gem_obj(obj); + + ret = priv->dc_hw_fb_format_check(mode_cmd, i); + if (ret < 0) + goto error; + } + + phytium_fb = phytium_fb_alloc(dev, mode_cmd, phytium_gem_obj, i); + if (IS_ERR(phytium_fb)) { + DRM_DEBUG_KMS("phytium_fb_alloc failed\n"); + ret = PTR_ERR(phytium_fb); + goto error; + } + + return &phytium_fb->base; +error: + for (i--; i >= 0; i--) + drm_gem_object_unreference_unlocked(&phytium_gem_obj[i]->base); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/phytium/phytium_fb.h b/drivers/gpu/drm/phytium/phytium_fb.h new file mode 100644 index 0000000000000000000000000000000000000000..c11c6c009b137d10261746cfd95cbe1add5b2967 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_fb.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_FB_H__ +#define __PHYTIUM_FB_H__ + +struct phytium_framebuffer { + struct drm_framebuffer base; + struct phytium_gem_object *phytium_gem_obj[PHYTIUM_FORMAT_MAX_PLANE]; +}; + +#define to_phytium_framebuffer(fb) container_of(fb, struct phytium_framebuffer, base) + +struct phytium_framebuffer *phytium_fb_alloc(struct drm_device *dev, + const struct drm_mode_fb_cmd2 *mode_cmd, + struct phytium_gem_object **phytium_gem_obj, + unsigned int num_planes); + +struct drm_framebuffer *phytium_fb_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_mode_fb_cmd2 *mode_cmd); +#endif /* __PHYTIUM_FB_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_fbdev.c b/drivers/gpu/drm/phytium/phytium_fbdev.c new file mode 100644 index 0000000000000000000000000000000000000000..0e5c1eac59fc48440694ce5e335d22297ea287ce --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_fbdev.c @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_gem.h" +#include "phytium_fb.h" + + +#define PHYTIUM_MAX_CONNECTOR 1 +#define helper_to_drm_private(x) container_of(x, struct phytium_display_private, fbdev_helper) + +static int phytium_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma) +{ + struct drm_fb_helper *helper = info->par; + struct phytium_display_private *priv = helper_to_drm_private(helper); + + return phytium_gem_mmap_obj(&priv->fbdev_phytium_gem->base, vma); +} + +static struct fb_ops phytium_fbdev_ops = { + .owner = THIS_MODULE, + DRM_FB_HELPER_DEFAULT_OPS, + .fb_mmap = phytium_fbdev_mmap, + .fb_fillrect = drm_fb_helper_cfb_fillrect, + .fb_copyarea = drm_fb_helper_cfb_copyarea, + .fb_imageblit = drm_fb_helper_cfb_imageblit, +}; + +static int +phytium_drm_fbdev_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) +{ + struct phytium_display_private *priv = helper_to_drm_private(helper); + struct drm_device *dev = helper->dev; + unsigned int bytes_per_pixel; + struct drm_mode_fb_cmd2 mode_cmd = {0}; + struct phytium_framebuffer *phytium_fb = NULL; + struct fb_info *fbi = NULL; + struct drm_framebuffer *fb = NULL; + size_t size = 0; + int ret = 0; + unsigned long offset; + + bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8); + mode_cmd.width = sizes->surface_width; + mode_cmd.height = sizes->surface_height; + mode_cmd.pitches[0] = ALIGN(sizes->surface_width * bytes_per_pixel, 128); + mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); + size = PAGE_ALIGN(mode_cmd.pitches[0] * mode_cmd.height); + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret < 0) { + DRM_ERROR("failed to get mutex lock\n"); + return ret; + } + + priv->fbdev_phytium_gem = phytium_gem_create_object(dev, size); + if (!priv->fbdev_phytium_gem) { + DRM_ERROR("failed to create gem object\n"); + return -ENOMEM; + } + mutex_unlock(&dev->struct_mutex); + + fbi = drm_fb_helper_alloc_fbi(helper); + if (IS_ERR(fbi)) { + DRM_DEV_ERROR(dev->dev, "Failed to create framebuffer info."); + ret = PTR_ERR(fbi); + goto out; + } + + phytium_fb = phytium_fb_alloc(dev, &mode_cmd, &priv->fbdev_phytium_gem, 1); + if (IS_ERR(phytium_fb)) { + DRM_DEV_ERROR(dev->dev, "Failed to alloc DRM framebuffer.\n"); + ret = PTR_ERR(phytium_fb); + goto out; + } + + helper->fb = &(phytium_fb->base); + fbi->par = helper; + fbi->flags = FBINFO_FLAG_DEFAULT; + fbi->fbops = &phytium_fbdev_ops; + + fb = helper->fb; + drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth); + drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); + + offset = fbi->var.xoffset * bytes_per_pixel; + offset += fbi->var.yoffset * fb->pitches[0]; + dev->mode_config.fb_base = 0; + fbi->screen_base = priv->fbdev_phytium_gem->vaddr + offset; + fbi->screen_size = priv->fbdev_phytium_gem->base.size; + fbi->fix.smem_len = priv->fbdev_phytium_gem->base.size; + DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%pa offset=%ld size=%zu\n", fb->width, fb->height, + fb->format->depth, &priv->fbdev_phytium_gem->iova, offset, size); + fbi->skip_vt_switch = true; + + return 0; +out: + phytium_gem_free_object(&priv->fbdev_phytium_gem->base); + return ret; +} + +static const struct drm_fb_helper_funcs phytium_drm_fb_helper_funcs = { + .fb_probe = phytium_drm_fbdev_create, +}; + +int phytium_drm_fbdev_init(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + struct drm_fb_helper *helper; + int ret; + + if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector) + return -EINVAL; + + helper = &priv->fbdev_helper; + drm_fb_helper_prepare(dev, helper, &phytium_drm_fb_helper_funcs); + + ret = drm_fb_helper_init(dev, helper, PHYTIUM_MAX_CONNECTOR); + if (ret < 0) { + DRM_DEV_ERROR(dev->dev, "Failed to initialize drm fb helper -ret %d\n", ret); + return ret; + } + + ret = drm_fb_helper_single_add_all_connectors(helper); + if (ret < 0) { + DRM_DEV_ERROR(dev->dev, "Failed to add connectors - %d/\n", ret); + goto err_drm_fb_helper_fini; + } + ret = drm_fb_helper_initial_config(helper, 32); + return 0; + +err_drm_fb_helper_fini: + drm_fb_helper_fini(helper); + return ret; +} + +void phytium_drm_fbdev_fini(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + struct drm_fb_helper *helper; + + helper = &priv->fbdev_helper; + drm_fb_helper_unregister_fbi(helper); + + if (helper->fb) + drm_framebuffer_put(helper->fb); + + drm_fb_helper_fini(helper); +} diff --git a/drivers/gpu/drm/phytium/phytium_fbdev.h b/drivers/gpu/drm/phytium/phytium_fbdev.h new file mode 100644 index 0000000000000000000000000000000000000000..d291d82c2706543c12727b36329eda1ba5bdc84b --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_fbdev.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#ifndef _PHYTIUM_FBDEV_H +#define _PHYTIUM_FBDEV_H + +int phytium_drm_fbdev_init(struct drm_device *dev); +void phytium_drm_fbdev_fini(struct drm_device *dev); + +#endif /* _PHYTIUM_FBDEV_H */ diff --git a/drivers/gpu/drm/phytium/phytium_gem.c b/drivers/gpu/drm/phytium/phytium_gem.c new file mode 100644 index 0000000000000000000000000000000000000000..90ac2d3b340b3cf44416adcdcfa2f1d893c466ec --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_gem.c @@ -0,0 +1,507 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_gem.h" + +#define VRAM_POOL_ALLOC_ORDER 12 + +int phytium_memory_pool_alloc(struct phytium_display_private *priv, void **pvaddr, + phys_addr_t *phys_addr, uint64_t size) +{ + unsigned long vaddr; + + vaddr = gen_pool_alloc(priv->memory_pool, size); + if (!vaddr) + return -ENOMEM; + + *phys_addr = gen_pool_virt_to_phys(priv->memory_pool, vaddr); + + *pvaddr = (void *)vaddr; + return 0; +} + +void phytium_memory_pool_free(struct phytium_display_private *priv, void *vaddr, uint64_t size) +{ + gen_pool_free(priv->memory_pool, (unsigned long)vaddr, size); +} + +int phytium_memory_pool_init(struct device *dev, struct phytium_display_private *priv) +{ + int ret = 0; + + priv->memory_pool = gen_pool_create(VRAM_POOL_ALLOC_ORDER, -1); + if (priv->memory_pool == NULL) { + DRM_ERROR("fail to create memory pool\n"); + ret = -1; + goto failed_create_pool; + } + + ret = gen_pool_add_virt(priv->memory_pool, (unsigned long)priv->pool_virt_addr, + priv->pool_phys_addr, priv->pool_size, -1); + if (ret) { + DRM_ERROR("fail to add vram pool\n"); + ret = -1; + goto failed_add_pool_virt; + } + + return 0; + +failed_add_pool_virt: + gen_pool_destroy(priv->memory_pool); + +failed_create_pool: + return ret; +} + +void phytium_memory_pool_fini(struct device *dev, struct phytium_display_private *priv) +{ + gen_pool_destroy(priv->memory_pool); +} + +struct sg_table * +phytium_gem_prime_get_sg_table(struct drm_gem_object *obj) +{ + struct phytium_gem_object *phytium_gem_obj = to_phytium_gem_obj(obj); + struct sg_table *sgt; + struct drm_device *dev = obj->dev; + int ret; + struct page *page = NULL; + + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) { + DRM_DEBUG_KMS("malloc sgt fail\n"); + return ERR_PTR(-ENOMEM); + } + + if ((phytium_gem_obj->memory_type == MEMORY_TYPE_VRAM) || + (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_CARVEOUT)) { + ret = sg_alloc_table(sgt, 1, GFP_KERNEL); + if (ret) { + DRM_ERROR("failed to allocate sg\n"); + goto sgt_free; + } + page = phys_to_page(phytium_gem_obj->phys_addr); + sg_set_page(sgt->sgl, page, PAGE_ALIGN(phytium_gem_obj->size), 0); + } else if (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_UNIFIED) { + ret = dma_get_sgtable_attrs(dev->dev, sgt, phytium_gem_obj->vaddr, + phytium_gem_obj->iova, phytium_gem_obj->size, + DMA_ATTR_WRITE_COMBINE); + if (ret) { + DRM_ERROR("failed to allocate sgt, %d\n", ret); + goto sgt_free; + } + } + + return sgt; +sgt_free: + kfree(sgt); + return ERR_PTR(ret); +} + +struct drm_gem_object * +phytium_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt) +{ + struct phytium_gem_object *phytium_gem_obj = NULL; + struct scatterlist *s; + dma_addr_t expected; + int ret, i; + + phytium_gem_obj = kzalloc(sizeof(*phytium_gem_obj), GFP_KERNEL); + if (!phytium_gem_obj) { + DRM_ERROR("failed to allocate phytium_gem_obj\n"); + ret = -ENOMEM; + goto failed_malloc; + } + + ret = drm_gem_object_init(dev, &phytium_gem_obj->base, attach->dmabuf->size); + if (ret) { + DRM_ERROR("failed to initialize drm gem object: %d\n", ret); + goto failed_object_init; + } + + expected = sg_dma_address(sgt->sgl); + for_each_sg(sgt->sgl, s, sgt->nents, i) { + if (sg_dma_address(s) != expected) { + DRM_ERROR("sg_table is not contiguous"); + ret = -EINVAL; + goto failed_check_continue; + } + expected = sg_dma_address(s) + sg_dma_len(s); + } + + phytium_gem_obj->iova = sg_dma_address(sgt->sgl); + phytium_gem_obj->sgt = sgt; + + return &phytium_gem_obj->base; +failed_check_continue: + drm_gem_object_release(&phytium_gem_obj->base); +failed_object_init: + kfree(phytium_gem_obj); +failed_malloc: + return ERR_PTR(ret); +} + +void *phytium_gem_prime_vmap(struct drm_gem_object *obj) +{ + struct phytium_gem_object *phytium_obj = to_phytium_gem_obj(obj); + + return phytium_obj->vaddr; +} + +void phytium_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) +{ +} + +int phytium_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + int ret = 0; + + ret = drm_gem_mmap_obj(obj, obj->size, vma); + if (ret < 0) + return ret; + + return phytium_gem_mmap_obj(obj, vma); +} + +static void phytium_dma_callback(void *callback_param) +{ + struct completion *comp = callback_param; + + complete(comp); +} + +int phytium_dma_transfer(struct drm_device *drm_dev, int dev_to_mem, void *addr, + dma_addr_t iova, uint64_t size) +{ + struct phytium_display_private *priv = drm_dev->dev_private; + struct dma_chan *dma_chan = priv->dma_chan; + struct sg_table st; + struct scatterlist *sgl; + int ret = 0, timeout; + uint32_t nents, i; + struct dma_slave_config cfg = {0}; + struct dma_async_tx_descriptor *desc; + struct completion comp; + enum dma_data_direction dir; + size_t min = 0; + + nents = DIV_ROUND_UP(size, PAGE_SIZE); + ret = sg_alloc_table(&st, nents, GFP_KERNEL); + if (ret) { + DRM_ERROR("failed to allocate sg_table\n"); + ret = -ENOMEM; + goto failed_sg_alloc_table; + } + + for_each_sg(st.sgl, sgl, st.nents, i) { + min = min_t(size_t, size, PAGE_SIZE - offset_in_page(addr)); + sg_set_page(sgl, vmalloc_to_page(addr), min, offset_in_page(addr)); + addr += min; + size -= min; + } + + memset(&cfg, 0, sizeof(cfg)); + if (dev_to_mem) { + cfg.direction = DMA_DEV_TO_MEM; + cfg.src_addr = iova; + cfg.dst_addr = 0; + dir = DMA_FROM_DEVICE; + } else { + cfg.direction = DMA_MEM_TO_DEV; + cfg.src_addr = 0; + cfg.dst_addr = iova; + dir = DMA_TO_DEVICE; + } + + dmaengine_slave_config(dma_chan, &cfg); + + nents = dma_map_sg(dma_chan->device->dev, st.sgl, st.nents, dir); + if (!nents) { + DRM_DEV_ERROR(drm_dev->dev, "failed to dma_map_sg for dmaengine\n"); + ret = -EINVAL; + goto failed_dma_map_sg; + } + st.nents = nents; + dma_sync_sg_for_device(dma_chan->device->dev, st.sgl, st.nents, dir); + + sgl = st.sgl; + desc = dmaengine_prep_slave_sg(dma_chan, + st.sgl, + st.nents, + cfg.direction, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) { + DRM_DEV_ERROR(drm_dev->dev, "failed to dmaengine_prep_slave_sg\n"); + ret = -EINVAL; + goto failed_prep_slave_sg; + } + init_completion(&comp); + desc->callback = phytium_dma_callback; + desc->callback_param = ∁ + + dmaengine_submit(desc); + dma_async_issue_pending(dma_chan); + + timeout = wait_for_completion_timeout(&comp, 2 * HZ); + if (timeout == 0) { + DRM_DEV_ERROR(drm_dev->dev, "wait for dma callback timeout\n"); + ret = -EIO; + } + dma_sync_sg_for_cpu(dma_chan->device->dev, st.sgl, st.nents, dir); + +failed_prep_slave_sg: + dma_unmap_sg(dma_chan->device->dev, st.sgl, st.nents, dir); +failed_dma_map_sg: + sg_free_table(&st); +failed_sg_alloc_table: + return ret; +} + +int phytium_gem_suspend(struct drm_device *drm_dev) +{ + struct phytium_display_private *priv = drm_dev->dev_private; + struct phytium_gem_object *phytium_gem_obj = NULL; + int ret = 0; + + list_for_each_entry(phytium_gem_obj, &priv->gem_list_head, list) { + if (phytium_gem_obj->memory_type != MEMORY_TYPE_VRAM) + continue; + + phytium_gem_obj->vaddr_save = vmalloc(phytium_gem_obj->size); + if (!phytium_gem_obj->vaddr_save) + goto malloc_failed; + + if (priv->dma_inited) + ret = phytium_dma_transfer(drm_dev, 1, phytium_gem_obj->vaddr_save, + phytium_gem_obj->iova, phytium_gem_obj->size); + + if ((!priv->dma_inited) || ret) + memcpy(phytium_gem_obj->vaddr_save, phytium_gem_obj->vaddr, + phytium_gem_obj->size); + } + + return 0; +malloc_failed: + list_for_each_entry(phytium_gem_obj, &priv->gem_list_head, list) { + if (phytium_gem_obj->memory_type != MEMORY_TYPE_VRAM) + continue; + + if (phytium_gem_obj->vaddr_save) { + vfree(phytium_gem_obj->vaddr_save); + phytium_gem_obj->vaddr_save = NULL; + } + } + return -ENOMEM; +} + +void phytium_gem_resume(struct drm_device *drm_dev) +{ + struct phytium_display_private *priv = drm_dev->dev_private; + struct phytium_gem_object *phytium_gem_obj = NULL; + + list_for_each_entry(phytium_gem_obj, &priv->gem_list_head, list) { + if (phytium_gem_obj->memory_type != MEMORY_TYPE_VRAM) + continue; + + memcpy(phytium_gem_obj->vaddr, phytium_gem_obj->vaddr_save, phytium_gem_obj->size); + vfree(phytium_gem_obj->vaddr_save); + phytium_gem_obj->vaddr_save = NULL; + } +} + +void phytium_gem_free_object(struct drm_gem_object *obj) +{ + struct phytium_gem_object *phytium_gem_obj = to_phytium_gem_obj(obj); + struct drm_device *dev = obj->dev; + struct phytium_display_private *priv = dev->dev_private; + uint64_t size = phytium_gem_obj->size; + + DRM_DEBUG_KMS("free phytium_gem_obj iova:0x%pa size:0x%lx\n", + &phytium_gem_obj->iova, phytium_gem_obj->size); + if (phytium_gem_obj->vaddr) { + if (phytium_gem_obj->memory_type == MEMORY_TYPE_VRAM) { + phytium_memory_pool_free(priv, phytium_gem_obj->vaddr, size); + priv->mem_state[PHYTIUM_MEM_VRAM_ALLOC] -= size; + } else if (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_CARVEOUT) { + dma_unmap_page(dev->dev, phytium_gem_obj->iova, size, DMA_TO_DEVICE); + phytium_memory_pool_free(priv, phytium_gem_obj->vaddr, size); + priv->mem_state[PHYTIUM_MEM_SYSTEM_CARVEOUT_ALLOC] -= size; + } else if (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_UNIFIED) { + dma_free_attrs(dev->dev, size, phytium_gem_obj->vaddr, + phytium_gem_obj->iova, 0); + priv->mem_state[PHYTIUM_MEM_SYSTEM_UNIFIED_ALLOC] -= size; + } + list_del(&phytium_gem_obj->list); + } else if (obj->import_attach) + drm_prime_gem_destroy(obj, phytium_gem_obj->sgt); + drm_gem_object_release(obj); + kfree(phytium_gem_obj); +} + +int phytium_gem_mmap_obj(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + int ret = 0; + struct phytium_gem_object *phytium_gem_obj = to_phytium_gem_obj(obj); + unsigned long pfn = PHYS_PFN(phytium_gem_obj->phys_addr); + /* + * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the + * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map + * the whole buffer. + */ + vma->vm_flags &= ~VM_PFNMAP; + vma->vm_pgoff = 0; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + + if (phytium_gem_obj->memory_type == MEMORY_TYPE_VRAM) { + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + ret = remap_pfn_range(vma, vma->vm_start, pfn, + vma->vm_end - vma->vm_start, vma->vm_page_prot); + } else if (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_CARVEOUT) { + ret = remap_pfn_range(vma, vma->vm_start, pfn, + vma->vm_end - vma->vm_start, vma->vm_page_prot); + } else if (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_UNIFIED) { + ret = dma_mmap_attrs(obj->dev->dev, vma, phytium_gem_obj->vaddr, + phytium_gem_obj->iova, vma->vm_end - vma->vm_start, 0); + } + if (ret) + drm_gem_vm_close(vma); + + return ret; +} + +int phytium_gem_mmap(struct file *filp, struct vm_area_struct *vma) +{ + int ret = 0; + + ret = drm_gem_mmap(filp, vma); + if (ret < 0) + return ret; + + return phytium_gem_mmap_obj(vma->vm_private_data, vma); +} + +int phytium_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, uint32_t handle) +{ + return drm_gem_dumb_destroy(file, dev, handle); +} + + +struct phytium_gem_object *phytium_gem_create_object(struct drm_device *dev, unsigned long size) +{ + struct phytium_gem_object *phytium_gem_obj = NULL; + struct phytium_display_private *priv = dev->dev_private; + struct page *page = NULL; + int ret = 0; + + phytium_gem_obj = kzalloc(sizeof(*phytium_gem_obj), GFP_KERNEL); + if (!phytium_gem_obj) { + DRM_ERROR("failed to allocate phytium_gem_obj\n"); + ret = -ENOMEM; + goto error; + } + + ret = drm_gem_object_init(dev, &phytium_gem_obj->base, size); + if (ret) { + DRM_ERROR("failed to initialize drm gem object: %d\n", ret); + goto failed_object_init; + } + + if (priv->support_memory_type & MEMORY_TYPE_VRAM) { + ret = phytium_memory_pool_alloc(priv, &phytium_gem_obj->vaddr, + &phytium_gem_obj->phys_addr, size); + if (ret) { + DRM_ERROR("fail to allocate vram buffer with size %lx\n", size); + goto failed_dma_alloc; + } + phytium_gem_obj->iova = phytium_gem_obj->phys_addr; + phytium_gem_obj->memory_type = MEMORY_TYPE_VRAM; + priv->mem_state[PHYTIUM_MEM_VRAM_ALLOC] += size; + } else if (priv->support_memory_type & MEMORY_TYPE_SYSTEM_CARVEOUT) { + ret = phytium_memory_pool_alloc(priv, &phytium_gem_obj->vaddr, + &phytium_gem_obj->phys_addr, size); + if (ret) { + DRM_ERROR("fail to allocate carveout memory with size %lx\n", size); + goto failed_dma_alloc; + } + page = phys_to_page(phytium_gem_obj->phys_addr); + phytium_gem_obj->iova = dma_map_page(dev->dev, page, 0, size, DMA_TO_DEVICE); + if (dma_mapping_error(dev->dev, phytium_gem_obj->iova)) { + DRM_ERROR("fail to dma map carveout memory with size %lx\n", size); + phytium_memory_pool_free(priv, phytium_gem_obj->vaddr, size); + ret = -ENOMEM; + goto failed_dma_alloc; + } + phytium_gem_obj->memory_type = MEMORY_TYPE_SYSTEM_CARVEOUT; + priv->mem_state[PHYTIUM_MEM_SYSTEM_CARVEOUT_ALLOC] += size; + } else if (priv->support_memory_type & MEMORY_TYPE_SYSTEM_UNIFIED) { + phytium_gem_obj->vaddr = dma_alloc_attrs(dev->dev, size, &phytium_gem_obj->iova, + GFP_KERNEL, 0); + if (!phytium_gem_obj->vaddr) { + DRM_ERROR("fail to allocate unified buffer with size %lx\n", size); + ret = -ENOMEM; + goto failed_dma_alloc; + } + phytium_gem_obj->memory_type = MEMORY_TYPE_SYSTEM_UNIFIED; + priv->mem_state[PHYTIUM_MEM_SYSTEM_UNIFIED_ALLOC] += size; + } else { + DRM_ERROR("fail to allocate buffer with size %lx\n", size); + ret = -ENOMEM; + goto failed_dma_alloc; + } + + phytium_gem_obj->size = size; + list_add_tail(&phytium_gem_obj->list, &priv->gem_list_head); + DRM_DEBUG_KMS("phytium_gem_obj iova:0x%pa size:0x%lx\n", + &phytium_gem_obj->iova, phytium_gem_obj->size); + return phytium_gem_obj; + +failed_dma_alloc: + drm_gem_object_unreference_unlocked(&phytium_gem_obj->base); + + return ERR_PTR(ret); +failed_object_init: + kfree(phytium_gem_obj); +error: + return ERR_PTR(ret); +} + +int phytium_gem_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + int size = 0; + struct phytium_gem_object *phytium_gem_obj = NULL; + int ret = 0; + + args->pitch = ALIGN(args->width*DIV_ROUND_UP(args->bpp, 8), 128); + args->size = args->pitch * args->height; + size = PAGE_ALIGN(args->size); + phytium_gem_obj = phytium_gem_create_object(dev, size); + if (IS_ERR(phytium_gem_obj)) + return PTR_ERR(phytium_gem_obj); + ret = drm_gem_handle_create(file, &phytium_gem_obj->base, &args->handle); + if (ret) { + DRM_ERROR("failed to drm_gem_handle_create\n"); + goto failed_gem_handle; + } + drm_gem_object_unreference_unlocked(&phytium_gem_obj->base); + + return 0; +failed_gem_handle: + phytium_gem_free_object(&phytium_gem_obj->base); + return ret; +} diff --git a/drivers/gpu/drm/phytium/phytium_gem.h b/drivers/gpu/drm/phytium/phytium_gem.h new file mode 100644 index 0000000000000000000000000000000000000000..310bcb4c21d0e2aada5c221b4e209c47ea0bc574 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_gem.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_GEM_H__ +#define __PHYTIUM_GEM_H__ + +#include + +struct phytium_gem_object { + struct drm_gem_object base; + phys_addr_t phys_addr; + dma_addr_t iova; + void *vaddr; + unsigned long size; + struct sg_table *sgt; + char memory_type; + char reserve[3]; + struct list_head list; + void *vaddr_save; +}; + +#define to_phytium_gem_obj(obj) container_of(obj, struct phytium_gem_object, base) + +int phytium_memory_pool_init(struct device *dev, struct phytium_display_private *priv); +void phytium_memory_pool_fini(struct device *dev, struct phytium_display_private *priv); +int phytium_gem_mmap_obj(struct drm_gem_object *obj, struct vm_area_struct *vma); +int phytium_gem_mmap(struct file *filp, struct vm_area_struct *vma); +void phytium_gem_free_object(struct drm_gem_object *obj); +struct sg_table *phytium_gem_prime_get_sg_table(struct drm_gem_object *obj); +struct drm_gem_object *phytium_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, struct sg_table *sgt); +void phytium_gem_free_object(struct drm_gem_object *obj); +int phytium_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, unsigned int handle); +struct phytium_gem_object *phytium_gem_create_object(struct drm_device *dev, unsigned long size); +int phytium_gem_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args); +void *phytium_gem_prime_vmap(struct drm_gem_object *obj); +void phytium_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); +int phytium_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); +int phytium_gem_suspend(struct drm_device *drm_dev); +void phytium_gem_resume(struct drm_device *drm_dev); +#endif /* __PHYTIUM_GEM_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_panel.c b/drivers/gpu/drm/phytium/phytium_panel.c new file mode 100644 index 0000000000000000000000000000000000000000..9481a7611124892aaa500df49c25a5ccb1749a57 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_panel.c @@ -0,0 +1,420 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_dp.h" +#include "phytium_panel.h" + +static int +phytium_dp_aux_set_backlight(struct phytium_panel *panel, unsigned int level) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + unsigned char vals[2] = { 0x0 }; + + vals[0] = level; + if (phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) { + vals[0] = (level & 0xFF00) >> 8; + vals[1] = (level & 0xFF); + } + + if (drm_dp_dpcd_write(&phytium_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, + vals, sizeof(vals)) < 0) { + DRM_DEBUG_KMS("Failed to write aux backlight level\n"); + return -EIO; + } + + return 0; +} + +static unsigned int phytium_dp_aux_get_backlight(struct phytium_panel *panel) +{ + unsigned char read_val[2] = { 0x0 }; + unsigned char level = 0; + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + if (drm_dp_dpcd_read(&phytium_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, + &read_val, sizeof(read_val)) < 0) { + DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", + DP_EDP_BACKLIGHT_BRIGHTNESS_MSB); + return 0; + } + + level = read_val[0]; + if (phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) + level = (read_val[0] << 8 | read_val[1]); + + return level; +} + +static void set_aux_backlight_enable(struct phytium_panel *panel, bool enable) +{ + u8 reg_val = 0; + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + if (!(phytium_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP)) + return; + + if (drm_dp_dpcd_readb(&phytium_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER, + ®_val) < 0) { + DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", + DP_EDP_DISPLAY_CONTROL_REGISTER); + return; + } + + if (enable) + reg_val |= DP_EDP_BACKLIGHT_ENABLE; + else + reg_val &= ~(DP_EDP_BACKLIGHT_ENABLE); + + if (drm_dp_dpcd_writeb(&phytium_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER, + reg_val) != 1) { + DRM_DEBUG_KMS("Failed to %s aux backlight\n", + enable ? "enable" : "disable"); + } +} + +static void phytium_dp_aux_enable_backlight(struct phytium_panel *panel) +{ + unsigned char dpcd_buf, new_dpcd_buf, edp_backlight_mode; + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + if (drm_dp_dpcd_readb(&phytium_dp->aux, + DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) != 1) { + DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", + DP_EDP_BACKLIGHT_MODE_SET_REGISTER); + return; + } + + new_dpcd_buf = dpcd_buf; + edp_backlight_mode = dpcd_buf & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK; + + switch (edp_backlight_mode) { + case DP_EDP_BACKLIGHT_CONTROL_MODE_PWM: + case DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET: + case DP_EDP_BACKLIGHT_CONTROL_MODE_PRODUCT: + new_dpcd_buf &= ~DP_EDP_BACKLIGHT_CONTROL_MODE_MASK; + new_dpcd_buf |= DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD; + break; + + /* Do nothing when it is already DPCD mode */ + case DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD: + default: + break; + } + + if (new_dpcd_buf != dpcd_buf) { + if (drm_dp_dpcd_writeb(&phytium_dp->aux, + DP_EDP_BACKLIGHT_MODE_SET_REGISTER, new_dpcd_buf) < 0) { + DRM_DEBUG_KMS("Failed to write aux backlight mode\n"); + } + } + + set_aux_backlight_enable(panel, true); + phytium_dp_aux_set_backlight(panel, panel->level); +} + +static void phytium_dp_aux_disable_backlight(struct phytium_panel *panel) +{ + set_aux_backlight_enable(panel, false); +} + +static void phytium_dp_aux_setup_backlight(struct phytium_panel *panel) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + if (phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) + phytium_dp->panel.max = 0xFFFF; + else + phytium_dp->panel.max = 0xFF; + + phytium_dp->panel.min = 0; + phytium_dp->panel.level = phytium_dp_aux_get_backlight(panel); + phytium_dp->panel.backlight_enabled = (phytium_dp->panel.level != 0); +} + +static void phytium_dp_hw_poweron_panel(struct phytium_panel *panel) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + phytium_dp->funcs->dp_hw_poweron_panel(phytium_dp); +} + +static void phytium_dp_hw_poweroff_panel(struct phytium_panel *panel) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + phytium_dp->funcs->dp_hw_poweroff_panel(phytium_dp); +} + +static int +phytium_dp_hw_set_backlight(struct phytium_panel *panel, uint32_t level) +{ + int ret; + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + ret = phytium_dp->funcs->dp_hw_set_backlight(phytium_dp, level); + + return ret; +} + +static uint32_t phytium_dp_hw_get_backlight(struct phytium_panel *panel) +{ + uint32_t ret; + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + ret = phytium_dp->funcs->dp_hw_get_backlight(phytium_dp); + + return ret; +} + +static void phytium_dp_hw_enable_backlight(struct phytium_panel *panel) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + phytium_dp->funcs->dp_hw_set_backlight(phytium_dp, phytium_dp->panel.level); + phytium_dp->funcs->dp_hw_enable_backlight(phytium_dp); +} + +static void phytium_dp_hw_disable_backlight(struct phytium_panel *panel) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + phytium_dp->funcs->dp_hw_disable_backlight(phytium_dp); +} + +static void phytium_dp_hw_setup_backlight(struct phytium_panel *panel) +{ + struct drm_device *dev = panel->dev; + struct phytium_display_private *priv = dev->dev_private; + + panel->max = priv->info.backlight_max; + panel->min = 0; + panel->level = phytium_dp_hw_get_backlight(panel); +} + +void phytium_dp_panel_init_backlight_funcs(struct phytium_dp_device *phytium_dp) +{ + if (phytium_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP && + (phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP) && + !(phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP)) { + DRM_DEBUG_KMS("AUX Backlight Control Supported!\n"); + phytium_dp->panel.setup_backlight = phytium_dp_aux_setup_backlight; + phytium_dp->panel.enable_backlight = phytium_dp_aux_enable_backlight; + phytium_dp->panel.disable_backlight = phytium_dp_aux_disable_backlight; + phytium_dp->panel.set_backlight = phytium_dp_aux_set_backlight; + phytium_dp->panel.get_backlight = phytium_dp_aux_get_backlight; + } else { + DRM_DEBUG_KMS("SE Backlight Control Supported!\n"); + phytium_dp->panel.setup_backlight = phytium_dp_hw_setup_backlight; + phytium_dp->panel.enable_backlight = phytium_dp_hw_enable_backlight; + phytium_dp->panel.disable_backlight = phytium_dp_hw_disable_backlight; + phytium_dp->panel.set_backlight = phytium_dp_hw_set_backlight; + phytium_dp->panel.get_backlight = phytium_dp_hw_get_backlight; + } + phytium_dp->panel.poweron = phytium_dp_hw_poweron_panel; + phytium_dp->panel.poweroff = phytium_dp_hw_poweroff_panel; + mutex_init(&phytium_dp->panel.panel_lock); + phytium_dp->panel.dev = phytium_dp->dev; + + /* Upper limits from eDP 1.3 spec */ + phytium_dp->panel.panel_power_up_delay = 210; /* t1_t3 */ + phytium_dp->panel.backlight_on_delay = 50; /* t7 */ + phytium_dp->panel.backlight_off_delay = 50; + phytium_dp->panel.panel_power_down_delay = 0; /* t10 */ + phytium_dp->panel.panel_power_cycle_delay = 510; /* t11 + t12 */ +} + +void phytium_dp_panel_release_backlight_funcs(struct phytium_dp_device *phytium_dp) +{ + phytium_dp->panel.setup_backlight = NULL; + phytium_dp->panel.enable_backlight = NULL; + phytium_dp->panel.disable_backlight = NULL; + phytium_dp->panel.set_backlight = NULL; + phytium_dp->panel.get_backlight = NULL; + phytium_dp->panel.poweron = NULL; + phytium_dp->panel.poweroff = NULL; +} + +void phytium_panel_enable_backlight(struct phytium_panel *panel) +{ + + if (panel->enable_backlight) { + mutex_lock(&panel->panel_lock); + msleep(panel->backlight_on_delay); + panel->enable_backlight(panel); + panel->backlight_enabled = true; + mutex_unlock(&panel->panel_lock); + } +} + +void phytium_panel_disable_backlight(struct phytium_panel *panel) +{ + if (panel->disable_backlight) { + mutex_lock(&panel->panel_lock); + panel->disable_backlight(panel); + panel->backlight_enabled = false; + msleep(panel->backlight_off_delay); + mutex_unlock(&panel->panel_lock); + } +} + +void phytium_panel_poweron(struct phytium_panel *panel) +{ + if (panel->poweron) { + mutex_lock(&panel->panel_lock); + panel->poweron(panel); + panel->power_enabled = true; + msleep(panel->panel_power_up_delay); + mutex_unlock(&panel->panel_lock); + } +} + +void phytium_panel_poweroff(struct phytium_panel *panel) +{ + if (panel->poweroff) { + mutex_lock(&panel->panel_lock); + msleep(panel->panel_power_down_delay); + panel->poweroff(panel); + panel->power_enabled = false; + mutex_unlock(&panel->panel_lock); + } +} + +static uint32_t phytium_scale(uint32_t source_val, + uint32_t source_min, uint32_t source_max, + uint32_t target_min, uint32_t target_max) +{ + uint64_t target_val; + + WARN_ON(source_min > source_max); + WARN_ON(target_min > target_max); + + /* defensive */ + source_val = clamp(source_val, source_min, source_max); + + /* avoid overflows */ + target_val = mul_u32_u32(source_val - source_min, target_max - target_min); + target_val = DIV_ROUND_CLOSEST_ULL(target_val, source_max - source_min); + target_val += target_min; + + return target_val; +} + +static inline uint32_t +phytium_scale_hw_to_user(struct phytium_panel *panel, uint32_t hw_level, uint32_t user_max) +{ + return phytium_scale(hw_level, panel->min, panel->max, + 0, user_max); +} + +static inline uint32_t +phytium_scale_user_to_hw(struct phytium_panel *panel, u32 user_level, u32 user_max) +{ + return phytium_scale(user_level, 0, user_max, + panel->min, panel->max); +} + +static int phytium_backlight_device_update_status(struct backlight_device *bd) +{ + struct phytium_panel *panel = bl_get_data(bd); + struct drm_device *dev = panel->dev; + uint32_t hw_level = 0; + int ret = 0; + + DRM_DEBUG_KMS("updating phytium_backlight, brightness=%d/%d\n", + bd->props.brightness, bd->props.max_brightness); + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + hw_level = phytium_scale_user_to_hw(panel, bd->props.brightness, bd->props.max_brightness); + + if ((panel->set_backlight) && (panel->backlight_enabled)) { + mutex_lock(&panel->panel_lock); + ret = panel->set_backlight(panel, hw_level); + panel->level = hw_level; + mutex_unlock(&panel->panel_lock); + } + drm_modeset_unlock(&dev->mode_config.connection_mutex); + + return ret; +} + +static int phytium_backlight_device_get_brightness(struct backlight_device *bd) +{ + struct phytium_panel *panel = bl_get_data(bd); + struct drm_device *dev = panel->dev; + uint32_t hw_level = 0; + int ret; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + if (panel->get_backlight && panel->backlight_enabled) { + mutex_lock(&panel->panel_lock); + hw_level = panel->get_backlight(panel); + panel->level = hw_level; + mutex_unlock(&panel->panel_lock); + } + drm_modeset_unlock(&dev->mode_config.connection_mutex); + ret = phytium_scale_hw_to_user(panel, hw_level, bd->props.max_brightness); + DRM_DEBUG_KMS("get phytium_backlight, brightness=%d/%d\n", + ret, bd->props.max_brightness); + + return ret; +} + +static const struct backlight_ops phytium_backlight_device_ops = { + .update_status = phytium_backlight_device_update_status, + .get_brightness = phytium_backlight_device_get_brightness, +}; + +int phytium_edp_backlight_device_register(struct phytium_dp_device *phytium_dp) +{ + struct backlight_properties props; + char bl_name[16]; + + if (phytium_dp->panel.setup_backlight) { + mutex_lock(&phytium_dp->panel.panel_lock); + phytium_dp->panel.setup_backlight(&phytium_dp->panel); + mutex_unlock(&phytium_dp->panel.panel_lock); + } else { + return -EINVAL; + } + + memset(&props, 0, sizeof(props)); + props.max_brightness = PHYTIUM_MAX_BL_LEVEL; + props.type = BACKLIGHT_RAW; + props.brightness = phytium_scale_hw_to_user(&phytium_dp->panel, phytium_dp->panel.level, + props.max_brightness); + snprintf(bl_name, sizeof(bl_name), "phytium_bl%d", phytium_dp->port); + + phytium_dp->panel.bl_device = + backlight_device_register(bl_name, + phytium_dp->connector.kdev, + &phytium_dp->panel, + &phytium_backlight_device_ops, + &props); + + if (IS_ERR(phytium_dp->panel.bl_device)) { + DRM_ERROR("Failed to register backlight: %ld\n", + PTR_ERR(phytium_dp->panel.bl_device)); + phytium_dp->panel.bl_device = NULL; + return -ENODEV; + } + + DRM_DEBUG_KMS("Connector %s backlight sysfs interface registered\n", + phytium_dp->connector.name); + + return 0; +} + +void phytium_edp_backlight_device_unregister(struct phytium_dp_device *phytium_dp) +{ + if (phytium_dp->panel.bl_device) { + backlight_device_unregister(phytium_dp->panel.bl_device); + phytium_dp->panel.bl_device = NULL; + } +} diff --git a/drivers/gpu/drm/phytium/phytium_panel.h b/drivers/gpu/drm/phytium/phytium_panel.h new file mode 100644 index 0000000000000000000000000000000000000000..e2d5f068064a7818c6ed553b1bdc11fe89f73ed3 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_panel.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_PANEL_H__ +#define __PHYTIUM_PANEL_H__ +#include "phytium_dp.h" + +#define PHYTIUM_MAX_BL_LEVEL 0xFF + +struct phytium_panel { + struct drm_device *dev; + bool backlight_enabled; + bool power_enabled; + bool reserve1[2]; + unsigned int min; + unsigned int level; + unsigned int max; + struct backlight_device *bl_device; + void (*setup_backlight)(struct phytium_panel *panel); + uint32_t (*get_backlight)(struct phytium_panel *panel); + int (*set_backlight)(struct phytium_panel *panel, uint32_t level); + void (*disable_backlight)(struct phytium_panel *panel); + void (*enable_backlight)(struct phytium_panel *panel); + void (*poweron)(struct phytium_panel *panel); + void (*poweroff)(struct phytium_panel *panel); + struct mutex panel_lock; + uint32_t panel_power_up_delay; + uint32_t backlight_on_delay; + uint32_t backlight_off_delay; + uint32_t panel_power_down_delay; + uint32_t panel_power_cycle_delay; +}; + +void phytium_dp_panel_init_backlight_funcs(struct phytium_dp_device *phytium_dp); +void phytium_panel_release_backlight_funcs(struct phytium_dp_device *phytium_dp); +int phytium_edp_backlight_device_register(struct phytium_dp_device *phytium_dp); +void phytium_edp_backlight_device_unregister(struct phytium_dp_device *phytium_dp); +void phytium_panel_enable_backlight(struct phytium_panel *panel); +void phytium_panel_disable_backlight(struct phytium_panel *panel); +void phytium_panel_poweron(struct phytium_panel *panel); +void phytium_panel_poweroff(struct phytium_panel *panel); + +#endif /* __PHYTIUM_PANEL_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_pci.c b/drivers/gpu/drm/phytium/phytium_pci.c new file mode 100644 index 0000000000000000000000000000000000000000..0fe066ad54f2aa762e28c0f08f63d694bddda7dd --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_pci.c @@ -0,0 +1,388 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_pci.h" +#include "phytium_dp.h" +#include "phytium_gem.h" +#include "x100_dc.h" +#include "x100_dp.h" +#include "e2000_dc.h" +#include "e2000_dp.h" + +int dc_msi_enable; +module_param(dc_msi_enable, int, 0644); +MODULE_PARM_DESC(dc_msi_enable, "Enable DC msi interrupt (0-disabled; 1-enabled; default-0)"); + +void phytium_pci_vram_hw_init(struct phytium_display_private *priv) +{ + struct phytium_pci_private *pci_priv = to_pci_priv(priv); + + pci_priv->dc_hw_vram_init(priv, priv->pool_phys_addr, priv->pool_size); +} + +int phytium_pci_vram_init(struct pci_dev *pdev, struct phytium_display_private *priv) +{ + int ret = 0; + + priv->pool_phys_addr = pci_resource_start(pdev, 2); + priv->pool_size = pci_resource_len(pdev, 2); + if ((priv->pool_phys_addr != 0) && (priv->pool_size != 0)) { + priv->pool_virt_addr = devm_ioremap_wc(&pdev->dev, priv->pool_phys_addr, + priv->pool_size); + if (priv->pool_virt_addr == NULL) { + DRM_ERROR("pci vram ioremap fail, addr:0x%llx, size:0x%llx\n", + priv->pool_phys_addr, priv->pool_size); + ret = -EINVAL; + goto failed_ioremap; + } + ret = phytium_memory_pool_init(&pdev->dev, priv); + if (ret) + goto failed_init_memory_pool; + + priv->mem_state[PHYTIUM_MEM_VRAM_TOTAL] = priv->pool_size; + priv->support_memory_type = MEMORY_TYPE_VRAM; + priv->vram_hw_init = phytium_pci_vram_hw_init; + } else { + DRM_DEBUG_KMS("not support vram\n"); + priv->pool_virt_addr = NULL; + priv->mem_state[PHYTIUM_MEM_VRAM_TOTAL] = 0; + priv->support_memory_type = MEMORY_TYPE_SYSTEM_UNIFIED; + priv->vram_hw_init = NULL; + } + + return 0; + +failed_init_memory_pool: + devm_iounmap(&pdev->dev, priv->pool_virt_addr); +failed_ioremap: + return ret; +} + +void phytium_pci_vram_fini(struct pci_dev *pdev, struct phytium_display_private *priv) +{ + if (priv->support_memory_type == MEMORY_TYPE_VRAM) { + phytium_memory_pool_fini(&pdev->dev, priv); + devm_iounmap(&pdev->dev, priv->pool_virt_addr); + } +} + +static bool phytium_pci_dma_chan_filter(struct dma_chan *chan, void *param) +{ + struct phytium_dma_slave *s = param; + + if (s->dma_dev != chan->device->dev) + return false; + + if (s->chan_id == chan->chan_id) + return true; + else + return false; +} + +int phytium_pci_dma_init(struct phytium_display_private *priv) +{ + struct pci_dev *dma_dev, *gpu_dev; + struct drm_device *drm_dev = priv->dev; + dma_cap_mask_t mask; + struct phytium_dma_slave s; + int ret = 0; + u16 cmd; + + /* check x100 gpu enable */ + gpu_dev = pci_get_device(PCI_VENDOR_ID_PHYTIUM, 0xdc20, NULL); + if (!gpu_dev) { + DRM_INFO("failed to get gpu_dev\n"); + ret = -ENODEV; + goto failed; + } + + pci_read_config_word(gpu_dev, PCI_COMMAND, &cmd); + if (!(cmd & PCI_COMMAND_MASTER)) { + DRM_INFO("gpu_dev master is disabled\n"); + ret = -ENODEV; + goto failed; + } + + dma_dev = pci_get_device(PCI_VENDOR_ID_PHYTIUM, 0xdc3c, NULL); + if (!dma_dev) { + DRM_INFO("failed to get dma_dev\n"); + ret = -ENODEV; + goto failed; + } + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + s.dma_dev = &dma_dev->dev; + s.chan_id = 2; + priv->dma_chan = dma_request_channel(mask, phytium_pci_dma_chan_filter, &s); + if (!priv->dma_chan) { + DRM_DEV_ERROR(drm_dev->dev, "failed to request dma chan\n"); + ret = -EBUSY; + goto failed; + } + priv->dma_inited = 1; + +failed: + return ret; +} + +void phytium_pci_dma_fini(struct phytium_display_private *priv) +{ + if (priv->dma_inited) + dma_release_channel(priv->dma_chan); + priv->dma_inited = 0; + priv->dma_chan = NULL; +} + +static struct phytium_display_private* +phytium_pci_private_init(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct phytium_display_private *priv = NULL; + struct phytium_pci_private *pci_priv = NULL; + struct phytium_device_info *phytium_info = (struct phytium_device_info *)ent->driver_data; + int i = 0; + resource_size_t io_addr, io_size; + + pci_priv = devm_kzalloc(&pdev->dev, sizeof(*pci_priv), GFP_KERNEL); + if (!pci_priv) { + DRM_ERROR("no memory to allocate for drm_display_private\n"); + goto failed_malloc_priv; + } + + memset(pci_priv, 0, sizeof(*pci_priv)); + priv = &pci_priv->base; + phytium_display_private_init(priv, dev); + + memcpy(&(priv->info), phytium_info, sizeof(struct phytium_device_info)); + DRM_DEBUG_KMS("priv->info.num_pipes :%d\n", priv->info.num_pipes); + priv->info.pipe_mask = ((pdev->subsystem_device >> PIPE_MASK_SHIFT) & PIPE_MASK_MASK); + priv->info.edp_mask = ((pdev->subsystem_device >> EDP_MASK_SHIFT) & EDP_MASK_MASK); + priv->info.num_pipes = 0; + for_each_pipe_masked(priv, i) + priv->info.num_pipes++; + if (priv->info.num_pipes == 0) { + DRM_ERROR("num_pipes is zero, so exit init\n"); + goto failed_init_numpipe; + } + + io_addr = pci_resource_start(pdev, 0); + io_size = pci_resource_len(pdev, 0); + priv->regs = ioremap(io_addr, io_size); + if (priv->regs == NULL) { + DRM_ERROR("pci bar0 ioremap fail, addr:0x%llx, size:0x%llx\n", io_addr, io_size); + goto failed_ioremap; + } + + priv->irq = pdev->irq; + if (IS_X100(priv)) { + pci_priv->dc_hw_vram_init = x100_dc_hw_vram_init; + priv->dc_hw_clear_msi_irq = x100_dc_hw_clear_msi_irq; + priv->dc_hw_fb_format_check = x100_dc_hw_fb_format_check; + } else if (IS_E2000(priv)) { + pci_priv->dc_hw_vram_init = e2000_dc_hw_vram_init; + priv->dc_hw_clear_msi_irq = NULL; + priv->dc_hw_fb_format_check = e2000_dc_hw_fb_format_check; + } + + return priv; + +failed_ioremap: +failed_init_numpipe: + devm_kfree(&pdev->dev, pci_priv); +failed_malloc_priv: + return NULL; +} + +static void +phytium_pci_private_fini(struct pci_dev *pdev, struct phytium_display_private *priv) +{ + struct phytium_pci_private *pci_priv = to_pci_priv(priv); + + if (priv->regs) + iounmap(priv->regs); + + devm_kfree(&pdev->dev, pci_priv); +} + +static int phytium_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct phytium_display_private *priv = NULL; + struct drm_device *dev = NULL; + int ret = 0; + + dev = drm_dev_alloc(&phytium_display_drm_driver, &pdev->dev); + if (IS_ERR(dev)) { + DRM_ERROR("failed to allocate drm_device\n"); + return PTR_ERR(dev); + } + dev->pdev = pdev; + pci_set_drvdata(pdev, dev); + pci_set_master(pdev); + ret = pci_enable_device(pdev); + if (ret) { + DRM_ERROR("pci enbale device fail\n"); + goto failed_enable_device; + } + + if (dc_msi_enable) { + ret = pci_enable_msi(pdev); + if (ret) + DRM_ERROR("pci enbale msi fail\n"); + } + + dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)); + + priv = phytium_pci_private_init(pdev, ent); + if (priv) + dev->dev_private = priv; + else + goto failed_pci_private_init; + + ret = phytium_pci_vram_init(pdev, priv); + if (ret) { + DRM_ERROR("failed to init pci vram\n"); + goto failed_pci_vram_init; + } + + ret = drm_dev_register(dev, 0); + if (ret) { + DRM_ERROR("failed to register drm dev\n"); + goto failed_register_drm; + } + + phytium_dp_hpd_irq_setup(dev, true); + + return 0; + +failed_register_drm: + phytium_pci_vram_fini(pdev, priv); +failed_pci_vram_init: + phytium_pci_private_fini(pdev, priv); +failed_pci_private_init: + if (pdev->msi_enabled) + pci_disable_msi(pdev); + pci_disable_device(pdev); +failed_enable_device: + pci_set_drvdata(pdev, NULL); + drm_dev_put(dev); + + return -1; +} + +static void phytium_pci_remove(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct phytium_display_private *priv = dev->dev_private; + + phytium_dp_hpd_irq_setup(dev, false); + cancel_work_sync(&priv->hotplug_work); + drm_dev_unregister(dev); + phytium_pci_vram_fini(pdev, priv); + phytium_pci_private_fini(pdev, priv); + if (pdev->msi_enabled) + pci_disable_msi(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + drm_dev_put(dev); +} + +static void phytium_pci_shutdown(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct phytium_display_private *priv = dev->dev_private; + + priv->display_shutdown(dev); +} + +static int phytium_pci_pm_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct phytium_display_private *priv = drm_dev->dev_private; + int ret = 0; + + if (IS_X100(priv)) + phytium_pci_dma_init(priv); + + ret = priv->display_pm_suspend(drm_dev); + if (ret < 0) + goto out; + + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); + udelay(200); + +out: + return ret; +} + +static int phytium_pci_pm_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct phytium_display_private *priv = drm_dev->dev_private; + int ret = 0; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + ret = pci_enable_device(pdev); + if (ret) + return ret; + pci_set_master(pdev); + + ret = priv->display_pm_resume(drm_dev); + if (IS_X100(priv)) + phytium_pci_dma_fini(priv); + + return ret; +} + +static const struct dev_pm_ops phytium_pci_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(phytium_pci_pm_suspend, phytium_pci_pm_resume) +}; + +static const struct phytium_device_info x100_info = { + .platform_mask = BIT(PHYTIUM_PLATFORM_X100), + .total_pipes = 3, + .crtc_clock_max = X100_DC_PIX_CLOCK_MAX, + .hdisplay_max = x100_DC_HDISPLAY_MAX, + .vdisplay_max = X100_DC_VDISPLAY_MAX, + .address_mask = X100_DC_ADDRESS_MASK, + .backlight_max = X100_DP_BACKLIGHT_MAX, +}; + +static const struct phytium_device_info e2000_info = { + .platform_mask = BIT(PHYTIUM_PLATFORM_E2000), + .total_pipes = 2, + .crtc_clock_max = E2000_DC_PIX_CLOCK_MAX, + .hdisplay_max = E2000_DC_HDISPLAY_MAX, + .vdisplay_max = E2000_DC_VDISPLAY_MAX, + .address_mask = E2000_DC_ADDRESS_MASK, + .backlight_max = E2000_DP_BACKLIGHT_MAX, +}; + +static const struct pci_device_id phytium_display_pci_ids[] = { + { PCI_VDEVICE(PHYTIUM, 0xdc22), (kernel_ulong_t)&x100_info }, + { PCI_VDEVICE(PHYTIUM, 0xdc3e), (kernel_ulong_t)&e2000_info }, + { /* End: all zeroes */ } +}; +MODULE_DEVICE_TABLE(pci, phytium_display_pci_ids); + +struct pci_driver phytium_pci_driver = { + .name = "phytium_display_pci", + .id_table = phytium_display_pci_ids, + .probe = phytium_pci_probe, + .remove = phytium_pci_remove, + .shutdown = phytium_pci_shutdown, + .driver.pm = &phytium_pci_pm_ops, +}; diff --git a/drivers/gpu/drm/phytium/phytium_pci.h b/drivers/gpu/drm/phytium/phytium_pci.h new file mode 100644 index 0000000000000000000000000000000000000000..316c313050a8f6ae844605299265608fcedee664 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_pci.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_PCI_H__ +#define __PHYTIUM_PCI_H__ + +#include "phytium_display_drv.h" + +struct phytium_pci_private { + struct phytium_display_private base; + void (*dc_hw_vram_init)(struct phytium_display_private *priv, resource_size_t vram_addr, + resource_size_t vram_size); +}; + +struct phytium_dma_slave { + struct device *dma_dev; + u32 chan_id; +}; + +#define to_pci_priv(priv) container_of(priv, struct phytium_pci_private, base) + +extern struct pci_driver phytium_pci_driver; +#endif /* __PHYTIUM_PCI_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_plane.c b/drivers/gpu/drm/phytium/phytium_plane.c new file mode 100644 index 0000000000000000000000000000000000000000..a34f27943345572f031bda2425e58b4fe0141357 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_plane.c @@ -0,0 +1,640 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include + +#include "phytium_display_drv.h" +#include "phytium_plane.h" +#include "phytium_fb.h" +#include "phytium_gem.h" +#include "phytium_crtc.h" +#include "x100_dc.h" +#include "e2000_dc.h" +#include "phytium_reg.h" + +#define PHYTIUM_CURS_W_SIZE 32 +#define PHYTIUM_CURS_H_SIZE 32 + +void phytium_plane_destroy(struct drm_plane *plane) +{ + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + + drm_plane_cleanup(plane); + kfree(phytium_plane); +} + +/** + * phytium_plane_atomic_get_property - fetch plane property value + * @plane: plane to fetch property for + * @state: state containing the property value + * @property: property to look up + * @val: pointer to write property value into + * + * The DRM core does not store shadow copies of properties for + * atomic-capable drivers. This entrypoint is used to fetch + * the current value of a driver-specific plane property. + */ +static int +phytium_plane_atomic_get_property(struct drm_plane *plane, + const struct drm_plane_state *state, + struct drm_property *property, + uint64_t *val) +{ + DRM_DEBUG_KMS("Unknown plane property [PROP:%d:%s]\n", property->base.id, property->name); + return -EINVAL; +} + +/** + * phytium_plane_atomic_set_property - set plane property value + * @plane: plane to set property for + * @state: state to update property value in + * @property: property to set + * @val: value to set property to + * + * Writes the specified property value for a plane into the provided atomic + * state object. + * + * Returns 0 on success, -EINVAL on unrecognized properties + */ +int +phytium_plane_atomic_set_property(struct drm_plane *plane, + struct drm_plane_state *state, + struct drm_property *property, + uint64_t val) +{ + DRM_DEBUG_KMS("Unknown plane property [PROP:%d:%s]\n", property->base.id, property->name); + return -EINVAL; +} + +struct drm_plane_state * +phytium_plane_atomic_duplicate_state(struct drm_plane *plane) +{ + struct drm_plane_state *state = NULL; + struct phytium_plane_state *phytium_state = NULL; + + phytium_state = kmemdup(plane->state, sizeof(*phytium_state), GFP_KERNEL); + + if (!phytium_state) + return NULL; + + state = &phytium_state->base; + if (state->fb) + drm_framebuffer_get(state->fb); + + state->fence = NULL; + state->commit = NULL; + + return state; +} + +void +phytium_plane_atomic_destroy_state(struct drm_plane *plane, struct drm_plane_state *state) +{ + struct phytium_plane_state *phytium_state = to_phytium_plane_state(state); + + __drm_atomic_helper_plane_destroy_state(state); + kfree(phytium_state); +} + +const struct drm_plane_funcs phytium_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = phytium_plane_destroy, + .reset = drm_atomic_helper_plane_reset, + .atomic_get_property = phytium_plane_atomic_get_property, + .atomic_set_property = phytium_plane_atomic_set_property, + .atomic_duplicate_state = phytium_plane_atomic_duplicate_state, + .atomic_destroy_state = phytium_plane_atomic_destroy_state, +}; + +static int phytium_plane_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct dma_buf *dma_buf; + struct dma_fence *fence; + + if (!state->fb) + return 0; + dma_buf = to_phytium_framebuffer(state->fb)->phytium_gem_obj[0]->base.dma_buf; + if (dma_buf) { + fence = reservation_object_get_excl_rcu(dma_buf->resv); + drm_atomic_set_fence_for_plane(state, fence); + } + + return 0; +} + +static int +phytium_plane_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct drm_framebuffer *fb = state->fb; + struct drm_crtc *crtc = state->crtc; + struct drm_crtc_state *crtc_state; + int src_x, src_y, src_w, src_h; + unsigned long base_offset; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + + if ((!fb) || (!crtc)) + return 0; + + crtc_state = drm_atomic_get_crtc_state(state->state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + if (plane->type == DRM_PLANE_TYPE_CURSOR) { + src_w = state->src_w >> 16; + src_h = state->src_h >> 16; + if (phytium_crtc->scale_enable) + return -EINVAL; + if ((src_w != PHYTIUM_CURS_W_SIZE) || (src_h != PHYTIUM_CURS_W_SIZE)) { + DRM_INFO("Invalid cursor size(%d, %d)\n", src_w, src_h); + return -EINVAL; + } + } else if (plane->type == DRM_PLANE_TYPE_PRIMARY) { + src_x = state->src_x >> 16; + src_y = state->src_y >> 16; + src_w = state->src_w >> 16; + src_h = state->src_h >> 16; + + base_offset = src_x * fb->format->cpp[0] + src_y*fb->pitches[0]; + if (base_offset & (priv->info.address_mask)) { + DRM_ERROR("fb base address is not aligned by 0x%lx byte\n", + priv->info.address_mask); + return -EINVAL; + } + + if (src_w != state->crtc_w || src_h != state->crtc_h) { + DRM_ERROR("scale not support: crtc_w(0x%x)/h(0x%x) src_w(0x%x)/h(0x%x)\n", + state->crtc_w, state->crtc_h, src_w, src_h); + return -EINVAL; + } + + if ((state->crtc_x < 0) || (state->crtc_y < 0)) { + DRM_ERROR("crtc_x(0x%x)/y(0x%x) of drm plane state is invalid\n", + state->crtc_x, state->crtc_y); + return -EINVAL; + } + + if ((state->crtc_x + state->crtc_w > crtc_state->adjusted_mode.hdisplay) + || (state->crtc_y + state->crtc_h > crtc_state->adjusted_mode.vdisplay)) { + DRM_ERROR("plane out of crtc region\n"); + return -EINVAL; + } + } + + return 0; +} + +static void phytium_dc_get_plane_parameter(struct drm_plane *plane) +{ + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + struct drm_framebuffer *fb = plane->state->fb; + struct phytium_framebuffer *phytium_fb = to_phytium_framebuffer(fb); + struct phytium_gem_object *phytium_gem_obj = NULL; + int i, num_planes = 0; + const struct drm_format_info *info; + + info = drm_format_info(fb->format->format); + num_planes = info ? info->num_planes : 1; + + for (i = 0; i < num_planes; i++) { + phytium_gem_obj = phytium_fb->phytium_gem_obj[i]; + phytium_plane->iova[i] = phytium_gem_obj->iova + fb->offsets[i]; + phytium_plane->size[i] = phytium_gem_obj->size - fb->offsets[i]; + + if (fb->modifier == DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC) + phytium_plane->tiling[i] = FRAMEBUFFER_TILE_MODE0; + else if (fb->modifier == DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC) + phytium_plane->tiling[i] = FRAMEBUFFER_TILE_MODE3; + else if (fb->modifier == DRM_FORMAT_MOD_LINEAR) + phytium_plane->tiling[i] = FRAMEBUFFER_LINEAR; + else + phytium_plane->tiling[i] = FRAMEBUFFER_LINEAR; + + if (i == 0) { + switch (fb->format->format) { + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_BGRA1010102: + phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB2101010; + break; + + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_BGRA8888: + phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB8888; + break; + + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_BGRX8888: + phytium_plane->format = FRAMEBUFFER_FORMAT_XRGB8888; + break; + + case DRM_FORMAT_ARGB4444: + case DRM_FORMAT_ABGR4444: + case DRM_FORMAT_RGBA4444: + case DRM_FORMAT_BGRA4444: + phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB4444; + break; + + case DRM_FORMAT_XRGB4444: + case DRM_FORMAT_XBGR4444: + case DRM_FORMAT_RGBX4444: + case DRM_FORMAT_BGRX4444: + phytium_plane->format = FRAMEBUFFER_FORMAT_XRGB4444; + break; + + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_BGRA5551: + phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB1555; + break; + + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_XBGR1555: + case DRM_FORMAT_RGBX5551: + case DRM_FORMAT_BGRX5551: + phytium_plane->format = FRAMEBUFFER_FORMAT_XRGB1555; + break; + + case DRM_FORMAT_RGB565: + case DRM_FORMAT_BGR565: + phytium_plane->format = FRAMEBUFFER_FORMAT_RGB565; + break; + + case DRM_FORMAT_YUYV: + phytium_plane->format = FRAMEBUFFER_FORMAT_YUYV; + break; + + case DRM_FORMAT_UYVY: + phytium_plane->format = FRAMEBUFFER_FORMAT_UYVY; + break; + case DRM_FORMAT_NV16: + phytium_plane->format = FRAMEBUFFER_FORMAT_NV16; + break; + case DRM_FORMAT_NV12: + phytium_plane->format = FRAMEBUFFER_FORMAT_NV12; + break; + case DRM_FORMAT_NV21: + phytium_plane->format = FRAMEBUFFER_FORMAT_NV12; + break; + default: + DRM_ERROR("unsupported pixel format (format = %d)\n", + fb->format->format); + return; + } + + switch (fb->format->format) { + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB4444: + case DRM_FORMAT_XRGB4444: + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_RGB565: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_ARGB; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ABGR4444: + case DRM_FORMAT_XBGR4444: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_XBGR1555: + case DRM_FORMAT_BGR565: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_ABGR; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_RGBA4444: + case DRM_FORMAT_RGBX4444: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_RGBX5551: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_RGBA; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + case DRM_FORMAT_BGRA1010102: + case DRM_FORMAT_BGRA8888: + case DRM_FORMAT_BGRX8888: + case DRM_FORMAT_BGRA4444: + case DRM_FORMAT_BGRX4444: + case DRM_FORMAT_BGRA5551: + case DRM_FORMAT_BGRX5551: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_BGRA; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + case DRM_FORMAT_YUYV: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV12: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_ARGB; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + default: + DRM_ERROR("unsupported pixel format (format = %d)\n", + fb->format->format); + return; + } + } + } +} + +static void phytium_dc_primary_plane_update(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + struct drm_framebuffer *fb = plane->state->fb; + int phys_pipe = phytium_plane->phys_pipe; + int src_x, src_y, crtc_x, crtc_y, crtc_w, crtc_h; + unsigned long base_offset; + int config; + + src_x = plane->state->src_x >> 16; + src_y = plane->state->src_y >> 16; + crtc_x = plane->state->crtc_x; + crtc_y = plane->state->crtc_y; + crtc_w = plane->state->crtc_w; + crtc_h = plane->state->crtc_h; + + if (phytium_plane->dc_hw_update_dcreq) + phytium_plane->dc_hw_update_dcreq(plane); + phytium_plane->dc_hw_update_primary_hi_addr(plane); + + /* config dc */ + /* Y */ + base_offset = src_x * fb->format->cpp[0] + src_y*fb->pitches[0]; + phytium_writel_reg(priv, (phytium_plane->iova[0] + base_offset) & ADDRESS_MASK, + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_Y_ADDRESS); + phytium_writel_reg(priv, ALIGN(fb->pitches[0], 128), + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_Y_STRIDE); + + /* U */ + phytium_writel_reg(priv, phytium_plane->iova[1] & 0xffffffff, + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_U_ADDRESS); + phytium_writel_reg(priv, ALIGN(fb->pitches[1], 128), + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_U_STRIDE); + + /* V */ + phytium_writel_reg(priv, phytium_plane->iova[2] & 0xffffffff, + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_V_ADDRESS); + phytium_writel_reg(priv, ALIGN(fb->pitches[2], 128), + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_V_STRIDE); + + /* size */ + phytium_writel_reg(priv, (crtc_w & WIDTH_MASK) | ((crtc_h&HEIGHT_MASK) << HEIGHT_SHIFT), + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_SIZE); + /* config */ + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); + config &= ~(FRAMEBUFFER_FORMAT_MASK << FRAMEBUFFER_FORMAT_SHIFT); + config |= (phytium_plane->format << FRAMEBUFFER_FORMAT_SHIFT); + config &= ~(1 << FRAMEBUFFER_UVSWIZZLE_SHIFT); + config |= (phytium_plane->uv_swizzle << FRAMEBUFFER_UVSWIZZLE_SHIFT); + config &= ~(FRAMEBUFFER_SWIZZLE_MASK << FRAMEBUFFER_SWIZZLE_SHIFT); + config |= (phytium_plane->swizzle << FRAMEBUFFER_SWIZZLE_SHIFT); + config &= ~(FRAMEBUFFER_TILE_MODE_MASK << FRAMEBUFFER_TILE_MODE_SHIFT); + config |= (phytium_plane->tiling[0] << FRAMEBUFFER_TILE_MODE_SHIFT); + config &= (~FRAMEBUFFER_CLEAR); + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); +} + +static void phytium_dc_cursor_plane_update(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + struct drm_framebuffer *fb = plane->state->fb; + int phys_pipe = phytium_plane->phys_pipe; + int config; + unsigned long iova; + + phytium_plane->enable = 1; + phytium_plane->cursor_hot_x = fb->hot_x; + phytium_plane->cursor_hot_y = fb->hot_y; + phytium_plane->cursor_x = plane->state->crtc_x + fb->hot_x; + phytium_plane->cursor_y = plane->state->crtc_y + fb->hot_y; + + config = CURSOR_FORMAT_ARGB8888 | + ((phytium_plane->cursor_hot_y & CURSOR_HOT_Y_MASK) << CURSOR_HOT_Y_SHIFT) | + ((phytium_plane->cursor_hot_x & CURSOR_HOT_X_MASK) << CURSOR_HOT_X_SHIFT); + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], PHYTIUM_DC_CURSOR_CONFIG); + + config = ((phytium_plane->cursor_x & CURSOR_X_MASK) << CURSOR_X_SHIFT) | + ((phytium_plane->cursor_y & CURSOR_Y_MASK) << CURSOR_Y_SHIFT); + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_CURSOR_LOCATION); + iova = phytium_plane->iova[0]; + phytium_writel_reg(priv, iova & 0xffffffff, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_CURSOR_ADDRESS); + if (phytium_plane->dc_hw_update_cursor_hi_addr) + phytium_plane->dc_hw_update_cursor_hi_addr(plane, iova); +} + +static void phytium_plane_atomic_update(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct drm_framebuffer *fb, *old_fb; + + DRM_DEBUG_KMS("update plane: type=%d\n", plane->type); + if (!plane->state->crtc || !plane->state->fb) + return; + + fb = plane->state->fb; + old_fb = old_state->fb; + + if (fb) + drm_framebuffer_get(fb); + if (old_fb) + drm_framebuffer_put(old_fb); + + phytium_dc_get_plane_parameter(plane); + + if (plane->type == DRM_PLANE_TYPE_PRIMARY) + phytium_dc_primary_plane_update(plane); + else if (plane->type == DRM_PLANE_TYPE_CURSOR) + phytium_dc_cursor_plane_update(plane); +} + +static void phytium_plane_atomic_disable(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + int phys_pipe = phytium_plane->phys_pipe; + int config; + struct drm_framebuffer *old_fb; + + old_fb = old_state->fb; + if (old_fb) + drm_framebuffer_put(old_fb); + + if (plane->type == DRM_PLANE_TYPE_PRIMARY) { + phytium_writel_reg(priv, CLEAR_VALUE_RED, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CLEARVALUE); + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); + config |= FRAMEBUFFER_CLEAR; + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); + } else if (plane->type == DRM_PLANE_TYPE_CURSOR) { + phytium_writel_reg(priv, CURSOR_FORMAT_DISABLED, + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_CURSOR_CONFIG); + } +} + +const struct drm_plane_helper_funcs phytium_plane_helper_funcs = { + .prepare_fb = phytium_plane_prepare_fb, + .atomic_check = phytium_plane_atomic_check, + .atomic_update = phytium_plane_atomic_update, + .atomic_disable = phytium_plane_atomic_disable, +}; + +struct phytium_plane *phytium_primary_plane_create(struct drm_device *dev, int phys_pipe) +{ + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = NULL; + struct phytium_plane_state *phytium_plane_state = NULL; + int ret = 0; + unsigned int flags = 0; + const uint32_t *formats = NULL; + uint32_t format_count; + const uint64_t *format_modifiers; + + phytium_plane = kzalloc(sizeof(*phytium_plane), GFP_KERNEL); + if (!phytium_plane) { + ret = -ENOMEM; + goto failed_malloc_plane; + } + + phytium_plane_state = kzalloc(sizeof(*phytium_plane_state), GFP_KERNEL); + if (!phytium_plane_state) { + ret = -ENOMEM; + goto failed_malloc_plane_state; + } + phytium_plane_state->base.plane = &phytium_plane->base; + phytium_plane_state->base.rotation = DRM_MODE_ROTATE_0; + phytium_plane->base.state = &phytium_plane_state->base; + phytium_plane->phys_pipe = phys_pipe; + + if (IS_X100(priv)) { + phytium_plane->dc_hw_plane_get_format = x100_dc_hw_plane_get_primary_format; + phytium_plane->dc_hw_update_dcreq = x100_dc_hw_update_dcreq; + phytium_plane->dc_hw_update_primary_hi_addr = x100_dc_hw_update_primary_hi_addr; + phytium_plane->dc_hw_update_cursor_hi_addr = NULL; + } else if (IS_E2000(priv)) { + phytium_plane->dc_hw_plane_get_format = e2000_dc_hw_plane_get_primary_format; + phytium_plane->dc_hw_update_dcreq = NULL; + phytium_plane->dc_hw_update_primary_hi_addr = e2000_dc_hw_update_primary_hi_addr; + phytium_plane->dc_hw_update_cursor_hi_addr = NULL; + } + + phytium_plane->dc_hw_plane_get_format(&format_modifiers, &formats, &format_count); + ret = drm_universal_plane_init(dev, &phytium_plane->base, 0x0, + &phytium_plane_funcs, formats, + format_count, + format_modifiers, + DRM_PLANE_TYPE_PRIMARY, "primary %d", phys_pipe); + + if (ret) + goto failed_plane_init; + + flags = DRM_MODE_ROTATE_0; + drm_plane_create_rotation_property(&phytium_plane->base, DRM_MODE_ROTATE_0, flags); + drm_plane_helper_add(&phytium_plane->base, &phytium_plane_helper_funcs); + + return phytium_plane; +failed_plane_init: + kfree(phytium_plane_state); +failed_malloc_plane_state: + kfree(phytium_plane); +failed_malloc_plane: + return ERR_PTR(ret); +} + +struct phytium_plane *phytium_cursor_plane_create(struct drm_device *dev, int phys_pipe) +{ + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = NULL; + struct phytium_plane_state *phytium_plane_state = NULL; + int ret = 0; + unsigned int flags = 0; + const uint32_t *formats = NULL; + uint32_t format_count; + const uint64_t *format_modifiers; + + phytium_plane = kzalloc(sizeof(*phytium_plane), GFP_KERNEL); + if (!phytium_plane) { + ret = -ENOMEM; + goto failed_malloc_plane; + } + + phytium_plane_state = kzalloc(sizeof(*phytium_plane_state), GFP_KERNEL); + if (!phytium_plane_state) { + ret = -ENOMEM; + goto failed_malloc_plane_state; + } + phytium_plane_state->base.plane = &phytium_plane->base; + phytium_plane_state->base.rotation = DRM_MODE_ROTATE_0; + phytium_plane->base.state = &phytium_plane_state->base; + phytium_plane->phys_pipe = phys_pipe; + + if (IS_X100(priv)) { + phytium_plane->dc_hw_plane_get_format = x100_dc_hw_plane_get_cursor_format; + phytium_plane->dc_hw_update_dcreq = NULL; + phytium_plane->dc_hw_update_primary_hi_addr = NULL; + phytium_plane->dc_hw_update_cursor_hi_addr = NULL; + } else if (IS_E2000(priv)) { + phytium_plane->dc_hw_plane_get_format = e2000_dc_hw_plane_get_cursor_format; + phytium_plane->dc_hw_update_dcreq = NULL; + phytium_plane->dc_hw_update_primary_hi_addr = NULL; + phytium_plane->dc_hw_update_cursor_hi_addr = e2000_dc_hw_update_cursor_hi_addr; + } + + phytium_plane->dc_hw_plane_get_format(&format_modifiers, &formats, &format_count); + ret = drm_universal_plane_init(dev, &phytium_plane->base, 0x0, + &phytium_plane_funcs, + formats, format_count, + format_modifiers, + DRM_PLANE_TYPE_CURSOR, "cursor %d", phys_pipe); + + if (ret) + goto failed_plane_init; + + flags = DRM_MODE_ROTATE_0; + drm_plane_create_rotation_property(&phytium_plane->base, DRM_MODE_ROTATE_0, flags); + drm_plane_helper_add(&phytium_plane->base, &phytium_plane_helper_funcs); + + return phytium_plane; +failed_plane_init: + kfree(phytium_plane_state); +failed_malloc_plane_state: + kfree(phytium_plane); +failed_malloc_plane: + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/phytium/phytium_plane.h b/drivers/gpu/drm/phytium/phytium_plane.h new file mode 100644 index 0000000000000000000000000000000000000000..41bb607d857e45e885ebb2157468501971c60144 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_plane.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_PLANE_H__ +#define __PHYTIUM_PLANE_H__ + +struct phytium_plane { + struct drm_plane base; + int phys_pipe; + unsigned long iova[PHYTIUM_FORMAT_MAX_PLANE]; + unsigned long size[PHYTIUM_FORMAT_MAX_PLANE]; + unsigned int format; + unsigned int tiling[PHYTIUM_FORMAT_MAX_PLANE]; + unsigned int swizzle; + unsigned int uv_swizzle; + unsigned int rot_angle; + + /* only for cursor */ + bool enable; + bool reserve[3]; + unsigned int cursor_x; + unsigned int cursor_y; + unsigned int cursor_hot_x; + unsigned int cursor_hot_y; + + void (*dc_hw_plane_get_format)(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count); + void (*dc_hw_update_dcreq)(struct drm_plane *plane); + void (*dc_hw_update_primary_hi_addr)(struct drm_plane *plane); + void (*dc_hw_update_cursor_hi_addr)(struct drm_plane *plane, uint64_t iova); +}; + +struct phytium_plane_state { + struct drm_plane_state base; +}; + +#define to_phytium_plane(x) container_of(x, struct phytium_plane, base) +#define to_phytium_plane_state(x) container_of(x, struct phytium_plane_state, base) + +struct phytium_plane *phytium_primary_plane_create(struct drm_device *dev, int pipe); +struct phytium_plane *phytium_cursor_plane_create(struct drm_device *dev, int pipe); +#endif /* __PHYTIUM_PLANE_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_platform.c b/drivers/gpu/drm/phytium/phytium_platform.c new file mode 100644 index 0000000000000000000000000000000000000000..a9c3e38493957580bb21c2da3e8914c4df7bcf98 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_platform.c @@ -0,0 +1,307 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium display engine DRM driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_platform.h" +#include "phytium_dp.h" +#include "phytium_gem.h" +#include "e2000_dc.h" +#include "e2000_dp.h" + +int phytium_platform_carveout_mem_init(struct platform_device *pdev, + struct phytium_display_private *priv) +{ + struct resource *res; + int ret = 0; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (res) { + priv->pool_size = resource_size(res); + priv->pool_phys_addr = res->start; + } + + if ((priv->pool_phys_addr != 0) && (priv->pool_size != 0)) { + priv->pool_virt_addr = ioremap_cache(priv->pool_phys_addr, priv->pool_size); + if (priv->pool_virt_addr == NULL) { + DRM_ERROR("failed to remap carveout mem(0x%llx)\n", priv->pool_phys_addr); + ret = -EINVAL; + goto failed_ioremap; + } + ret = phytium_memory_pool_init(&pdev->dev, priv); + if (ret) + goto failed_init_memory_pool; + + priv->mem_state[PHYTIUM_MEM_SYSTEM_CARVEOUT_TOTAL] = priv->pool_size; + priv->support_memory_type = MEMORY_TYPE_SYSTEM_CARVEOUT; + priv->vram_hw_init = NULL; + } else { + DRM_DEBUG_KMS("not support carveout memory\n"); + priv->mem_state[PHYTIUM_MEM_SYSTEM_CARVEOUT_TOTAL] = 0; + priv->support_memory_type = MEMORY_TYPE_SYSTEM_UNIFIED; + priv->vram_hw_init = NULL; + } + + return 0; + +failed_init_memory_pool: + iounmap(priv->pool_virt_addr); +failed_ioremap: + return ret; +} + +void phytium_platform_carveout_mem_fini(struct platform_device *pdev, + struct phytium_display_private *priv) +{ + if (priv->support_memory_type == MEMORY_TYPE_SYSTEM_CARVEOUT) { + phytium_memory_pool_fini(&pdev->dev, priv); + iounmap(priv->pool_virt_addr); + } +} + +static struct phytium_display_private * +phytium_platform_private_init(struct platform_device *pdev) +{ + struct drm_device *dev = dev_get_drvdata(&pdev->dev); + struct device_node *node; + struct fwnode_handle *np; + struct phytium_display_private *priv = NULL; + struct phytium_platform_private *platform_priv = NULL; + struct phytium_device_info *phytium_info = NULL; + int i = 0, ret = 0; + struct resource *res; + + platform_priv = devm_kzalloc(&pdev->dev, sizeof(*platform_priv), GFP_KERNEL); + if (!platform_priv) { + DRM_ERROR("no memory to allocate for phytium_platform_private\n"); + goto exit; + } + + memset(platform_priv, 0, sizeof(*platform_priv)); + priv = &platform_priv->base; + phytium_display_private_init(priv, dev); + + if (pdev->dev.of_node) { + phytium_info = (struct phytium_device_info *)of_device_get_match_data(&pdev->dev); + if (!phytium_info) { + DRM_ERROR("failed to get dts id data(phytium_info)\n"); + goto failed; + } + + memcpy(&(priv->info), phytium_info, sizeof(struct phytium_device_info)); + node = pdev->dev.of_node; + ret = of_property_read_u8(node, "pipe_mask", &priv->info.pipe_mask); + if (ret < 0) { + dev_err(&pdev->dev, "missing pipe_mask property from dts\n"); + goto failed; + } + + ret = of_property_read_u8(node, "edp_mask", &priv->info.edp_mask); + if (ret < 0) { + dev_err(&pdev->dev, "missing edp_mask property from dts\n"); + goto failed; + } + } else if (has_acpi_companion(&pdev->dev)) { + phytium_info = (struct phytium_device_info *)acpi_device_get_match_data(&pdev->dev); + if (!phytium_info) { + DRM_ERROR("failed to get acpi id data(phytium_info)\n"); + goto failed; + } + + memcpy(&(priv->info), phytium_info, sizeof(struct phytium_device_info)); + np = dev_fwnode(&(pdev->dev)); + ret = fwnode_property_read_u8(np, "pipe_mask", &priv->info.pipe_mask); + if (ret < 0) { + dev_err(&pdev->dev, "missing pipe_mask property from acpi\n"); + goto failed; + } + ret = fwnode_property_read_u8(np, "edp_mask", &priv->info.edp_mask); + if (ret < 0) { + dev_err(&pdev->dev, "missing edp_mask property from acpi\n"); + goto failed; + } + } + + priv->info.num_pipes = 0; + for_each_pipe_masked(priv, i) + priv->info.num_pipes++; + if (priv->info.num_pipes == 0) { + DRM_ERROR("num_pipes is zero, so exit init\n"); + goto failed; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->regs = devm_ioremap_resource(&pdev->dev, res); + if (priv->regs == NULL) { + DRM_ERROR("ioremap fail, addr:0x%llx, size:0x%llx\n", res->start, res->end); + goto failed; + } + + priv->irq = platform_get_irq(pdev, 0); + if (priv->irq < 0) { + dev_err(&pdev->dev, "failed to get irq\n"); + goto failed; + } + + if (IS_E2000(priv)) { + priv->dc_hw_clear_msi_irq = NULL; + priv->dc_hw_fb_format_check = e2000_dc_hw_fb_format_check; + } + + return priv; + +failed: + devm_kfree(&pdev->dev, platform_priv); +exit: + return NULL; +} + +static void phytium_platform_private_fini(struct platform_device *pdev) +{ + struct drm_device *dev = dev_get_drvdata(&pdev->dev); + struct phytium_display_private *priv = dev->dev_private; + struct phytium_platform_private *platform_priv = to_platform_priv(priv); + + devm_kfree(&pdev->dev, platform_priv); +} + +static int phytium_platform_probe(struct platform_device *pdev) +{ + struct phytium_display_private *priv = NULL; + struct drm_device *dev = NULL; + int ret = 0; + + dev = drm_dev_alloc(&phytium_display_drm_driver, &pdev->dev); + if (IS_ERR(dev)) { + DRM_ERROR("failed to allocate drm_device\n"); + return PTR_ERR(dev); + } + + dev_set_drvdata(&pdev->dev, dev); + dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)); + + priv = phytium_platform_private_init(pdev); + if (priv) + dev->dev_private = priv; + else + goto failed_platform_private_init; + + ret = phytium_platform_carveout_mem_init(pdev, priv); + if (ret) { + DRM_ERROR("failed to init system carveout memory\n"); + goto failed_carveout_mem_init; + } + + ret = drm_dev_register(dev, 0); + if (ret) { + DRM_ERROR("failed to register drm dev\n"); + goto failed_register_drm; + } + + phytium_dp_hpd_irq_setup(dev, true); + + return 0; + +failed_register_drm: + phytium_platform_carveout_mem_fini(pdev, priv); +failed_carveout_mem_init: + phytium_platform_private_fini(pdev); +failed_platform_private_init: + dev_set_drvdata(&pdev->dev, NULL); + drm_dev_put(dev); + return -1; +} + +static int phytium_platform_remove(struct platform_device *pdev) +{ + struct drm_device *dev = dev_get_drvdata(&pdev->dev); + struct phytium_display_private *priv = dev->dev_private; + + phytium_dp_hpd_irq_setup(dev, false); + cancel_work_sync(&priv->hotplug_work); + drm_dev_unregister(dev); + phytium_platform_private_fini(pdev); + dev_set_drvdata(&pdev->dev, NULL); + drm_dev_put(dev); + + return 0; +} + +static void phytium_platform_shutdown(struct platform_device *pdev) +{ + struct drm_device *dev = dev_get_drvdata(&pdev->dev); + struct phytium_display_private *priv = dev->dev_private; + + priv->display_shutdown(dev); +} + +static int phytium_platform_pm_suspend(struct device *dev) +{ + struct drm_device *drm_dev = dev_get_drvdata(dev); + struct phytium_display_private *priv = drm_dev->dev_private; + + return priv->display_pm_suspend(drm_dev); +} + +static int phytium_platform_pm_resume(struct device *dev) +{ + struct drm_device *drm_dev = dev_get_drvdata(dev); + struct phytium_display_private *priv = drm_dev->dev_private; + + return priv->display_pm_resume(drm_dev); +} + +static const struct dev_pm_ops phytium_platform_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(phytium_platform_pm_suspend, phytium_platform_pm_resume) +}; + +static const struct phytium_device_info e2000_info = { + .platform_mask = BIT(PHYTIUM_PLATFORM_E2000), + .total_pipes = 2, + .crtc_clock_max = E2000_DC_PIX_CLOCK_MAX, + .hdisplay_max = E2000_DC_HDISPLAY_MAX, + .vdisplay_max = E2000_DC_VDISPLAY_MAX, + .address_mask = E2000_DC_ADDRESS_MASK, + .backlight_max = E2000_DP_BACKLIGHT_MAX, +}; + +static const struct of_device_id display_of_match[] = { + { + .compatible = "phytium,dc", + .data = &e2000_info, + }, + { } +}; + +#ifdef CONFIG_ACPI +static const struct acpi_device_id display_acpi_ids[] = { + { + .id = "PHYT0015", + .driver_data = (kernel_ulong_t)&e2000_info, + }, + {}, +}; + +MODULE_DEVICE_TABLE(acpi, display_acpi_ids); +#else +#define display_acpi_ids NULL +#endif + +struct platform_driver phytium_platform_driver = { + .driver = { + .name = "phytium_display_platform", + .of_match_table = of_match_ptr(display_of_match), + .acpi_match_table = ACPI_PTR(display_acpi_ids), + }, + .probe = phytium_platform_probe, + .remove = phytium_platform_remove, + .shutdown = phytium_platform_shutdown, + .driver.pm = &phytium_platform_pm_ops, +}; diff --git a/drivers/gpu/drm/phytium/phytium_platform.h b/drivers/gpu/drm/phytium/phytium_platform.h new file mode 100644 index 0000000000000000000000000000000000000000..eadec4ac26c3c8e27f93260147c63f0a1d6a0a23 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_platform.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_PLATFORM_H__ +#define __PHYTIUM_PLATFORM_H__ + +struct phytium_platform_private { + struct phytium_display_private base; +}; + +#define to_platform_priv(priv) container_of(priv, struct phytium_platform_private, base) + +extern struct platform_driver phytium_platform_driver; + +#endif /* __PHYTIUM_PLATFORM_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_reg.h b/drivers/gpu/drm/phytium/phytium_reg.h new file mode 100644 index 0000000000000000000000000000000000000000..1034aaf448a8c45f8b92fae0c85f823ffb3c675c --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_reg.h @@ -0,0 +1,365 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_REG_H__ +#define __PHYTIUM_REG_H__ + +/******************************register base******************************************/ +#define X100_PIPE_BASE(pipe) (0x8000*pipe) +#define X100_DC_BASE(pipe) (X100_PIPE_BASE(pipe) + 0x0000) +#define X100_DCREQ_BASE(pipe) (X100_PIPE_BASE(pipe) + 0x2000) +#define X100_DP_BASE(pipe) (X100_PIPE_BASE(pipe) + 0x3000) +#define X100_ADDRESS_TRANSFORM_BASE 0x4000 +#define X100_PHY_ACCESS_BASE(pipe) (X100_PIPE_BASE(pipe) + 0x5000) + +#define E2000_DC_BASE(pipe) (0x1000*pipe) +#define E2000_DP_BASE(pipe) (0x4000 + 0x1000*pipe) +#define E2000_ADDRESS_TRANSFORM_BASE 0x8000 +#define E2000_PHY_ACCESS_BASE(pipe) (0x6000 + 0x1000*pipe) +/******************************register base end******************************************/ + +/******************************dc register start******************************************/ +#define PHYTIUM_DC_FRAMEBUFFER_Y_ADDRESS 0x1400 + #define ADDRESS_MASK 0xffffff80 +#define PHYTIUM_DC_FRAMEBUFFER_Y_STRIDE 0x1408 +#define PHYTIUM_DC_PANEL_CONFIG 0x1418 + #define PANEL_DATAENABLE_ENABLE (1<<0) + #define PANEL_DATA_ENABLE (1<<4) + #define PANEL_CLOCK_ENABLE (1<<8) +#define PHYTIUM_DC_HDISPLAY 0x1430 + #define HDISPLAY_END_SHIFT 0 + #define HDISPLAY_END_MASK 0x7fff + #define HDISPLAY_TOTAL_SHIFT 16 + #define HDISPLAY_TOTAL_MASK 0x7fff +#define PHYTIUM_DC_HSYNC 0x1438 + #define HSYNC_START_SHIFT 0 + #define HSYNC_START_MASK 0x7fff + #define HSYNC_END_SHIFT 15 + #define HSYNC_END_MASK 0x7fff + #define HSYNC_PULSE_ENABLED (1<<30) + #define HSYNC_NEGATIVE (1<<31) +#define PHYTIUM_DC_VDISPLAY 0x1440 + #define VDISPLAY_END_SHIFT 0 + #define VDISPLAY_END_MASK 0x7fff + #define VDISPLAY_TOTAL_SHIFT 16 + #define VDISPLAY_TOTAL_MASK 0x7fff +#define PHYTIUM_DC_VSYNC 0x1448 + #define VSYNC_START_SHIFT 0 + #define VSYNC_START_MASK 0x7fff + #define VSYNC_END_SHIFT 15 + #define VSYNC_END_MASK 0x7fff + #define VSYNC_PULSE_ENABLED (1<<30) + #define VSYNC_NEGATIVE (1<<31) +#define PHYTIUM_DC_DISPLAY_CURRENT_LOCATION 0x1450 +#define PHYTIUM_DC_GAMMA_INDEX 0x1458 + #define GAMMA_INDEX_MAX 256 +#define PHYTIUM_DC_GAMMA_DATA 0x1460 + #define GAMMA_BLUE_SHIFT 0 + #define GAMMA_BLUE_MASK 0x3ff + #define GAMMA_GREEN_SHIFT 10 + #define GAMMA_GREEN_MASK 0x3ff + #define GAMMA_RED_SHIFT 20 + #define GAMMA_RED_MASK 0x3ff +#define PHYTIUM_DC_CURSOR_CONFIG 0x1468 + #define CURSOR_FORMAT_DISABLED 0x0 + #define CURSOR_FORMAT_MASKMODE 0x3 + #define CURSOR_FORMAT_ARGB8888 0x2 + #define CURSOR_FORMAT_MASK 0x3 + #define CURSOR_HOT_Y_SHIFT 8 + #define CURSOR_HOT_Y_MASK 0x1f + #define CURSOR_HOT_X_SHIFT 16 + #define CURSOR_HOT_X_MASK 0x1f +#define PHYTIUM_DC_CURSOR_ADDRESS 0x146c +#define PHYTIUM_DC_CURSOR_LOCATION 0x1470 + #define CURSOR_X_SHIFT 0 + #define CURSOR_X_MASK 0x7fff + #define CURSOR_Y_SHIFT 16 + #define CURSOR_Y_MASK 0x7fff +#define PHYTIUM_DC_CURSOR_BACKGROUND 0x1474 +#define PHYTIUM_DC_CURSOR_FOREGROUND 0x1478 +#define PHYTIUM_DC_INT_STATUS 0x147c + #define INT_STATUS 0x1 +#define PHYTIUM_DC_INT_ENABLE 0x1480 + #define INT_ENABLE 0x1 + #define INT_DISABLE 0x0 + +#define PHYTIUM_DC_FRAMEBUFFER_CONFIG 0x1518 + #define FRAMEBUFFER_OUTPUT BIT(0) + #define FRAMEBUFFER_GAMMA_ENABLE BIT(2) + #define FRAMEBUFFER_VALID_PENDING BIT(3) + #define FRAMEBUFFER_RESET BIT(4) + #define FRAMEBUFFER_PROGRESS BIT(6) + #define FRAMEBUFFER_ROT_ANGLE_SHIFT (11) + #define FRAMEBUFFER_ROT_ANGLE_MASK (0x7) + #define FRAMEBUFFER_ROT_ANGLE_ROT0 (0) + #define FRAMEBUFFER_ROT_ANGLE_FLIP_X (1) + #define FRAMEBUFFER_ROT_ANGLE_FLIP_Y (2) + #define FRAMEBUFFER_TILE_MODE_SHIFT (17) + #define FRAMEBUFFER_TILE_MODE_MASK (0x1f) + #define FRAMEBUFFER_LINEAR 0 + #define FRAMEBUFFER_TILE_MODE0 4 + #define FRAMEBUFFER_TILE_MODE3 7 + #define FRAMEBUFFER_FORMAT_SHIFT 26 + #define FRAMEBUFFER_FORMAT_MASK 0x3f + #define FRAMEBUFFER_FORMAT_XRGB4444 0x0 + #define FRAMEBUFFER_FORMAT_ARGB4444 0x1 + #define FRAMEBUFFER_FORMAT_XRGB1555 0x2 + #define FRAMEBUFFER_FORMAT_ARGB1555 0x3 + #define FRAMEBUFFER_FORMAT_RGB565 0x4 + #define FRAMEBUFFER_FORMAT_XRGB8888 0x5 + #define FRAMEBUFFER_FORMAT_ARGB8888 0x6 + #define FRAMEBUFFER_FORMAT_YUYV 0x7 + #define FRAMEBUFFER_FORMAT_UYVY 0x8 + #define FRAMEBUFFER_FORMAT_NV12 0x11 + #define FRAMEBUFFER_FORMAT_NV16 0x12 + #define FRAMEBUFFER_FORMAT_ARGB2101010 0x16 + #define FRAMEBUFFER_SWIZZLE_SHIFT 23 + #define FRAMEBUFFER_SWIZZLE_MASK 0x3 + #define FRAMEBUFFER_SWIZZLE_ARGB 0 + #define FRAMEBUFFER_SWIZZLE_RGBA 1 + #define FRAMEBUFFER_SWIZZLE_ABGR 2 + #define FRAMEBUFFER_SWIZZLE_BGRA 3 + #define FRAMEBUFFER_UVSWIZZLE_SHIFT 25 + #define FRAMEBUFFER_UVSWIZZLE_DISABLE 0 + #define FRAMEBUFFER_UVSWIZZLE_ENABLE 1 + #define FRAMEBUFFER_CLEAR BIT(8) + #define FRAMEBUFFER_SCALE_ENABLE BIT(22) +#define PHYTIUM_DC_FRAMEBUFFER_SCALECONFIG 0x1520 + #define FRAMEBUFFER_FILTER_TAP 3 + #define FRAMEBUFFER_HORIZONTAL_FILTER_TAP 3 + #define FRAMEBUFFER_TAP 0x33 +#define PHYTIUM_DC_FRAMEBUFFER_U_ADDRESS 0x1530 +#define PHYTIUM_DC_FRAMEBUFFER_V_ADDRESS 0x1538 +#define PHYTIUM_DC_OVERLAY_CONFIG 0x1540 + #define X100_DC_OVERLAY_ENABLE BIT(24) + +#define PHYTIUM_DC_FRAMEBUFFER_U_STRIDE 0x1800 +#define PHYTIUM_DC_FRAMEBUFFER_V_STRIDE 0x1808 +#define PHYTIUM_DC_FRAMEBUFFER_SIZE 0x1810 + #define WIDTH_SHIFT 0 + #define WIDTH_MASK 0x7fff + #define HEIGHT_SHIFT 15 + #define HEIGHT_MASK 0x7fff + +#define PHYTIUM_DC_FRAMEBUFFER_SCALE_FACTOR_X 0x1828 + #define SCALE_FACTOR_X_MASK 0x7fffffff +#define PHYTIUM_DC_FRAMEBUFFER_SCALE_FACTOR_Y 0x1830 + #define SCALE_FACTOR_Y_MASK 0x7fffffff + #define SCALE_FACTOR_Y_MAX 0x3 + #define SCALE_FACTOR_SRC_OFFSET 16 + +#define PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER_INDEX 0x1838 + #define HORI_FILTER_INDEX 0x0 +#define PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER 0x1a00 +#define PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER_INDEX 0x1a08 + #define VERT_FILTER_INDEX 0x0 +#define PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER 0x1a10 +#define PHYTIUM_DC_FRAMEBUFFER_CLEARVALUE 0x1a18 + #define CLEAR_VALUE_RED 0x00ff0000 + #define CLEAR_VALUE_GREEN 0x0000ff00 + #define CLEAR_VALUE_BLACK 0x00000000 +#define PHYTIUM_DC_FRAMEBUFFER_INITIALOFFSET 0x1a20 + #define INITIALOFFSET (0x8000 | (0X8000 << 16)) +#define PHYTIUM_DC_DP_CONFIG 0x1cd0 + #define OUTPUT_DP (1<<3) + #define DP_RGB666 (0x1) + #define DP_RGB888 (0x2) + #define DP_RGB101010 (0x3) +/******************************dc register end********************************************/ + +/******************************phy access register****************************************/ +#define PHYTIUM_PHY_ACCESS_ADDRESS 0x0000 +#define PHYTIUM_PHY_WRITE_DATA 0x0004 +#define PHYTIUM_PHY_READ_DATA 0x0008 +#define PHYTIUM_PHY_ACCESS_CTRL 0x000c + #define ACCESS_WRITE (1<<0) + #define ACCESS_READ (1<<1) +/******************************phy access register end*************************************/ + +/******************************dp register start******************************************/ +#define PHYTIUM_DP_LINK_BW_SET 0x0000 +#define PHYTIUM_DP_LANE_COUNT_SET 0x0004 +#define PHYTIUM_DP_ENHANCED_FRAME_EN 0x0008 + #define ENHANCED_FRAME_ENABLE 0x1 + #define ENHANCED_FRAME_DISABLE 0x0 +#define PHYTIUM_DP_TRAINING_PATTERN_SET 0x000c + #define TRAINING_OFF 0x0 + #define TRAINING_PATTERN_1 0x1 + #define TRAINING_PATTERN_2 0x2 + #define TRAINING_PATTERN_3 0x3 + #define TRAINING_PATTERN_4 0x4 +#define PHYTIUM_DP_LINK_QUAL_PATTERN_SET 0x0010 + #define TEST_PATTERN_NONE 0x0 + #define TEST_PATTERN_D10_2 0x1 + #define TEST_PATTERN_SYMBOL_ERROR 0x2 + #define TEST_PATTERN_PRBS7 0x3 + #define TEST_PATTERN_80BIT_CUSTOM 0x4 + #define TEST_PATTERN_CP2520_1 0x5 + #define TEST_PATTERN_CP2520_2 0x6 + #define TEST_PATTERN_CP2520_3 0x7 + #define TEST_PATTERN_LANE_SHIFT 8 +#define PHYTIUM_DP_SCRAMBLING_DISABLE 0x0014 + #define SCRAMBLING_ENABLE 0x0 + #define SCRAMBLING_DISABLE 0x1 +#define PHYTIUM_DP_DOWNSPREAD_CTRL 0x0018 +#define PHYTIUM_DP_ALT_SCRAMBLER_RESET 0x001c +#define PHYTIUM_DP_HBR2_SCRAMBLER_RESET 0x0020 +#define PHYTIUM_DP_DISPLAYPORT_VERSION 0x0024 +#define PHYTIUM_DP_CUSTOM_80BIT_PATTERN_0 0x0030 +#define PHYTIUM_DP_CUSTOM_80BIT_PATTERN_1 0x0034 +#define PHYTIUM_DP_CUSTOM_80BIT_PATTERN_2 0x0038 +#define PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE 0x0080 + #define TRANSMITTER_OUTPUT_ENABLE BIT(0) + #define TRANSMITTER_OUTPUT_DISABLE 0 +#define PHYTIUM_DP_VIDEO_STREAM_ENABLE 0x0084 + #define SST_MST_SOURCE_0_ENABLE BIT(0) + #define SST_MST_SOURCE_0_ENABLE_MASK 0x1 + #define SST_MST_SOURCE_0_DISABLE 0 +#define PHYTIUM_DP_SECONDARY_STREAM_ENABLE 0x0088 + #define SECONDARY_STREAM_ENABLE 0x1 + #define SECONDARY_STREAM_DISABLE 0x0 +#define PHYTIUM_DP_SEC_DATA_WINDOW 0x008C +#define PHYTIUM_DP_SOFT_RESET 0x0090 + #define LINK_SOFT_RESET (0x1 << 0) + #define VIDEO_SOFT_RESET (0x1 << 1) +#define PHYTIUM_INPUT_SOURCE_ENABLE 0x0094 + #define VIRTUAL_SOURCE_0_ENABLE BIT(0) + #define VIRTUAL_SOURCE_0_ENABLE_MASK 0x1 +#define PHYTIUM_DP_FORCE_SCRAMBLER_RESET 0x00C0 + #define SCRAMBLER_RESET BIT(0) +#define PHYTIUM_DP_SOURCE_CONTROL_STATUS 0x00C4 +#define PHYTIUM_DP_DATA_CONTROL 0x00C8 +#define PHYTIUM_DP_CORE_CAPABILITY 0x00F8 +#define PHYTIUM_DP_CORE_ID 0x00FC +#define PHYTIUM_DP_AUX_COMMAND 0x0100 + #define BYTE_COUNT_MASK 0xf + #define COMMAND_SHIFT 8 + #define COMMAND_MASK 0xf + #define ADDRESS_ONLY (1<<12) +#define PHYTIUM_DP_AUX_WRITE_FIFO 0x0104 +#define PHYTIUM_DP_AUX_ADDRESS 0x0108 +#define PHYTIUM_DP_AUX_CLK_DIVIDER 0x010C + #define AUX_CLK_DIVIDER 48 + #define AUX_CLK_DIVIDER_100 100 +#define PHYTIUM_DP_SINK_HPD_STATE 0x0128 + #define HPD_CONNECT 0x1 + #define HPD_DISCONNECT 0x0 +#define PHYTIUM_DP_INTERRUPT_RAW_STATUS 0x0130 + #define REPLY_TIMEOUT (1<<3) + #define DP_STATUS_REQUEST_IN_PROGRESS (1<<1) + #define HPD_STATE (0<<1) +#define PHYTIUM_DP_AUX_REPLY_DATA 0x0134 +#define PHYTIUM_DP_AUX_REPLY_CODE 0x0138 + #define AUX_NATIVE_ACK (0x0<<0) + #define AUX_NATIVE_NACK (0x1<<0) + #define AUX_NATIVE_DEFER (0x2<<0) + #define AUX_NATIVE_MASK (0x3 << 0) + #define AUX_I2C_ACK (0x0<<2) + #define AUX_I2C_NACK (0x1<<2) + #define AUX_I2C_DEFER (0x2<<2) + #define AUX_I2C_MASK (0x3 << 2) +#define PHYTIUM_DP_INTERRUPT_STATUS 0x0140 + #define HPD_IRQ (1<<1) + #define HPD_EVENT (1<<0) +#define PHYTIUM_DP_INTERRUPT_MASK 0x0144 + #define HPD_IRQ_MASK (1<<1) + #define HPD_EVENT_MASK (1<<0) + #define HPD_OTHER_MASK 0x3c +#define PHYTIUM_DP_AUX_REPLY_DATA_COUNT 0x0148 +#define PHYTIUM_DP_AUX_STATUS 0x014C + #define REPLY_RECEIVED 0x1 + #define REPLY_IN_PROGRESS 0x2 + #define REQUEST_IN_PROGRESS 0x4 + #define REPLY_ERROR 0x8 +#define PHYTIUM_DP_AUX_TIMER 0x0158 +#define PHYTIUM_DP_MAIN_LINK_HTOTAL 0x0180 +#define PHYTIUM_DP_MAIN_LINK_VTOTAL 0x0184 +#define PHYTIUM_DP_MAIN_LINK_POLARITY 0x0188 + #define VSYNC_POLARITY_LOW BIT(1) + #define HSYNC_POLARITY_LOW BIT(0) +#define PHYTIUM_DP_MAIN_LINK_HSWIDTH 0x018C +#define PHYTIUM_DP_MAIN_LINK_VSWIDTH 0x0190 +#define PHYTIUM_DP_MAIN_LINK_HRES 0x0194 +#define PHYTIUM_DP_MAIN_LINK_VRES 0x0198 +#define PHYTIUM_DP_MAIN_LINK_HSTART 0x019C +#define PHYTIUM_DP_MAIN_LINK_VSTART 0x01A0 +#define PHYTIUM_DP_MAIN_LINK_MISC0 0x01A4 + #define MISC0_SYNCHRONOUS_CLOCK BIT(0) + #define MISC0_BIT_DEPTH_OFFSET 5 + #define MISC0_BIT_DEPTH_6BIT 0x0 + #define MISC0_BIT_DEPTH_8BIT 0x1 + #define MISC0_BIT_DEPTH_10BIT 0x2 + #define MISC0_COMPONENT_FORMAT_SHIFT 1 + #define MISC0_COMPONENT_FORMAT_RGB 0x0 +#define PHYTIUM_DP_MAIN_LINK_MISC1 0x01A8 +#define PHYTIUM_DP_M_VID 0x01AC +#define PHYTIUM_DP_TRANSFER_UNIT_SIZE 0x01B0 +#define PHYTIUM_DP_N_VID 0x01B4 +#define PHYTIUM_DP_USER_PIXEL_WIDTH 0x01B8 +#define PHYTIUM_DP_DATA_COUNT 0x01BC +#define PHYTIUM_DP_INTERLACED 0x01C0 +#define PHYTIUM_DP_USER_SYNC_POLARITY 0x01C4 + #define USER_ODDEVEN_POLARITY_HIGH BIT(3) + #define USER_DATA_ENABLE_POLARITY_HIGH BIT(2) + #define USER_VSYNC_POLARITY_HIGH BIT(1) + #define USER_HSYNC_POLARITY_HIGH BIT(0) +#define PHYTIUM_DP_USER_CONTROL 0x01C8 +#define PHYTIUM_EDP_CRC_ENABLE 0x01D0 + #define SUPPORT_EDP_1_4 BIT(1) +#define PHYTIUM_EDP_CRC_RED 0x01D4 +#define PHYTIUM_EDP_CRC_GREEN 0x01D8 +#define PHYTIUM_EDP_CRC_BLUE 0x01DC +#define PHYTIUM_DP_SEC_AUDIO_ENABLE 0x0300 + #define SEC_AUDIO_ENABLE BIT(0) + #define CHANNEL_MUTE_ENABLE BIT(1) +#define PHYTIUM_DP_SEC_INPUT_SELECT 0x0304 + #define INPUT_SELECT_I2S 0x0 +#define PHYTIUM_DP_SEC_CHANNEL_COUNT 0x0308 + #define CHANNEL_2 0x2 + #define CHANNEL_2_LFE 0x3 + #define CHANNEL_5_1 0x6 + #define CHANNEL_7_1 0x7 + #define CHANNEL_MASK 0xf +#define PHYTIUM_DP_SEC_DIRECT_CLKDIV 0x030c + #define APB_CLOCK 48000000 +#define PHYTIUM_DP_SEC_MAUD 0x0318 +#define PHYTIUM_DP_SEC_NAUD 0x031c +#define PHYTIUM_DP_SEC_CLOCK_MODE 0x0320 + #define CLOCK_MODE_SYNC 0x1 +#define PHYTIUM_DP_SEC_CS_SOURCE_FORMAT 0x0340 + #define CS_SOURCE_FORMAT_DEFAULT 0x0 +#define PHYTIUM_DP_SEC_CS_CATEGORY_CODE 0x0344 +#define PHYTIUM_DP_SEC_CS_LENGTH_ORIG_FREQ 0x0348 + #define ORIG_FREQ_32000 0xc + #define ORIG_FREQ_44100 0xf + #define ORIG_FREQ_48000 0xd + #define ORIG_FREQ_88200 0x7 + #define ORIG_FREQ_96000 0x5 + #define ORIG_FREQ_176400 0x3 + #define ORIG_FREQ_192000 0x1 + #define ORIG_FREQ_MASK 0xf + #define ORIG_FREQ_SHIFT 0 + #define WORD_LENGTH_16 0x4 + #define WORD_LENGTH_18 0x2 + #define WORD_LENGTH_20 0xc + #define WORD_LENGTH_24 0xd + #define WORD_LENGTH_MASK 0xf + #define WORD_LENGTH_SHIFT 4 +#define PHYTIUM_DP_SEC_CS_FREQ_CLOCK_ACCURACY 0x034c // not used + #define SAMPLING_FREQ_32000 0xc + #define SAMPLING_FREQ_44100 0x0 + #define SAMPLING_FREQ_48000 0x4 + #define SAMPLING_FREQ_88200 0x1 + #define SAMPLING_FREQ_96000 0x5 + #define SAMPLING_FREQ_176400 0x3 + #define SAMPLING_FREQ_192000 0x7 + #define SAMPLING_FREQ_MASK 0xf + #define SAMPLING_FREQ_SHIFT 4 +#define PHYTIUM_DP_SEC_CHANNEL_MAP 0x035C + #define CHANNEL_MAP_DEFAULT 0x87654321 +/******************************dp register end********************************************/ + +#endif /* __PHYTIUM_REG_H__ */ diff --git a/drivers/gpu/drm/phytium/x100_dc.c b/drivers/gpu/drm/phytium/x100_dc.c new file mode 100644 index 0000000000000000000000000000000000000000..483f8de523edffcd70a8843811b4b63e858ffec5 --- /dev/null +++ b/drivers/gpu/drm/phytium/x100_dc.c @@ -0,0 +1,321 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "x100_reg.h" +#include "phytium_crtc.h" +#include "phytium_plane.h" +#include "phytium_fb.h" +#include "phytium_gem.h" + +static const unsigned int x100_primary_formats[] = { + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_ABGR2101010, + DRM_FORMAT_RGBA1010102, + DRM_FORMAT_BGRA1010102, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_ARGB4444, + DRM_FORMAT_ABGR4444, + DRM_FORMAT_RGBA4444, + DRM_FORMAT_BGRA4444, + DRM_FORMAT_XRGB4444, + DRM_FORMAT_XBGR4444, + DRM_FORMAT_RGBX4444, + DRM_FORMAT_BGRX4444, + DRM_FORMAT_ARGB1555, + DRM_FORMAT_ABGR1555, + DRM_FORMAT_RGBA5551, + DRM_FORMAT_BGRA5551, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_XBGR1555, + DRM_FORMAT_RGBX5551, + DRM_FORMAT_BGRX5551, + DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, + DRM_FORMAT_YUYV, + DRM_FORMAT_UYVY, +}; + +static uint64_t x100_primary_formats_modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC, + DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC, + DRM_FORMAT_MOD_INVALID +}; + +static const unsigned int x100_cursor_formats[] = { + DRM_FORMAT_ARGB8888, +}; + +void x100_dc_hw_vram_init(struct phytium_display_private *priv, resource_size_t vram_addr, + resource_size_t vram_size) +{ + uint32_t config; + uint32_t group_offset = priv->address_transform_base; + + config = phytium_readl_reg(priv, group_offset, + X100_GPU_ADDRESS_TRANSFORM_SRC_ADDR); + if (config) + phytium_writel_reg(priv, config, group_offset, + X100_GPU_ADDRESS_TRANSFORM_SRC_ADDR); + + config = phytium_readl_reg(priv, group_offset, + X100_GPU_ADDRESS_TRANSFORM_SIZE); + if (config) + phytium_writel_reg(priv, config, group_offset, + X100_GPU_ADDRESS_TRANSFORM_SIZE); + + config = phytium_readl_reg(priv, group_offset, + X100_GPU_ADDRESS_TRANSFORM_DST_ADDR); + if (config) + phytium_writel_reg(priv, config, group_offset, + X100_GPU_ADDRESS_TRANSFORM_DST_ADDR); + + phytium_writel_reg(priv, (vram_addr & SRC_ADDR_MASK) >> SRC_ADDR_OFFSET, + group_offset, X100_DC_ADDRESS_TRANSFORM_SRC_ADDR); + phytium_writel_reg(priv, (vram_size >> SIZE_OFFSET) | ADDRESS_TRANSFORM_ENABLE, + group_offset, X100_DC_ADDRESS_TRANSFORM_SIZE); + config = phytium_readl_reg(priv, group_offset, X100_DC_ADDRESS_TRANSFORM_DST_ADDR); + phytium_writel_reg(priv, config, group_offset, X100_DC_ADDRESS_TRANSFORM_DST_ADDR); +} + +void x100_dc_hw_clear_msi_irq(struct phytium_display_private *priv, uint32_t phys_pipe) +{ + phytium_writel_reg(priv, MSI_CLEAR, priv->dcreq_reg_base[phys_pipe], X100_DCREQ_MSI_CLEAR); +} + +void x100_dc_hw_config_pix_clock(struct drm_crtc *crtc, int clock) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + uint32_t group_offset = priv->dcreq_reg_base[phys_pipe]; + int ret = 0; + + /* config pix clock */ + phytium_writel_reg(priv, FLAG_REQUEST | CMD_PIXEL_CLOCK | (clock & PIXEL_CLOCK_MASK), + group_offset, X100_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + X100_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to set pixel clock\n", __func__); +} + +void x100_dc_hw_disable(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int reset_timeout = 100; + int config = 0; + int phys_pipe = phytium_crtc->phys_pipe; + + // reset dc + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], X100_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, config | SOFT_RESET, priv->dc_reg_base[phys_pipe], + X100_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, 0, priv->dc_reg_base[phys_pipe], X100_DC_CLOCK_CONTROL); + do { + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], X100_DC_CLOCK_IDLE); + if (config | IS_IDLE) + break; + mdelay(1); + reset_timeout--; + } while (reset_timeout); + + /* reset pix clock */ + x100_dc_hw_config_pix_clock(crtc, 0); + + // reset dc + reset_timeout = 100; + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], X100_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, config | SOFT_RESET, priv->dc_reg_base[phys_pipe], + X100_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, 0, priv->dc_reg_base[phys_pipe], X100_DC_CLOCK_CONTROL); + do { + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], X100_DC_CLOCK_IDLE); + if (config | IS_IDLE) + break; + mdelay(1); + reset_timeout--; + } while (reset_timeout); + + /* reset dcreq */ + phytium_writel_reg(priv, DCREQ_PLAN_A, priv->dcreq_reg_base[phys_pipe], X100_DCREQ_PLAN); + phytium_writel_reg(priv, 0, priv->dcreq_reg_base[phys_pipe], X100_DCREQ_CONTROL); + phytium_writel_reg(priv, DCREQ_RESET, priv->dcreq_reg_base[phys_pipe], X100_DCREQ_RESET); + msleep(20); + phytium_writel_reg(priv, (~DCREQ_RESET)&DCREQ_RESET_MASK, + priv->dcreq_reg_base[phys_pipe], X100_DCREQ_RESET); +} + +int x100_dc_hw_fb_format_check(const struct drm_mode_fb_cmd2 *mode_cmd, int count) +{ + int ret = 0; + + switch (mode_cmd->modifier[count]) { + case DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC: + switch (mode_cmd->pixel_format) { + case DRM_FORMAT_ARGB4444: + case DRM_FORMAT_ABGR4444: + case DRM_FORMAT_RGBA4444: + case DRM_FORMAT_BGRA4444: + case DRM_FORMAT_XRGB4444: + case DRM_FORMAT_XBGR4444: + case DRM_FORMAT_RGBX4444: + case DRM_FORMAT_BGRX4444: + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_BGRA5551: + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_XBGR1555: + case DRM_FORMAT_RGBX5551: + case DRM_FORMAT_BGRX5551: + case DRM_FORMAT_RGB565: + case DRM_FORMAT_BGR565: + case DRM_FORMAT_YUYV: + case DRM_FORMAT_UYVY: + break; + default: + DRM_ERROR("TILE_MODE0_FBCDC not support DRM_FORMAT %d", + mode_cmd->pixel_format); + ret = -EINVAL; + goto error; + } + break; + case DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC: + switch (mode_cmd->pixel_format) { + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_BGRA1010102: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_BGRA8888: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_BGRX8888: + break; + default: + DRM_ERROR("TILE_MODE3_FBCDC not support DRM_FORMAT %d", + mode_cmd->pixel_format); + ret = -EINVAL; + goto error; + } + break; + case DRM_FORMAT_MOD_LINEAR: + break; + default: + DRM_ERROR("unsupported fb modifier 0x%llx\n", mode_cmd->modifier[0]); + ret = -EINVAL; + goto error; + } + + return 0; +error: + return ret; +} + +void x100_dc_hw_plane_get_primary_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count) +{ + *format_modifiers = x100_primary_formats_modifiers; + *formats = x100_primary_formats; + *format_count = ARRAY_SIZE(x100_primary_formats); +} + +void x100_dc_hw_plane_get_cursor_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count) +{ + *format_modifiers = NULL; + *formats = x100_cursor_formats; + *format_count = ARRAY_SIZE(x100_cursor_formats); +} + +void x100_dc_hw_update_dcreq(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + int phys_pipe = phytium_plane->phys_pipe; + uint32_t group_offset = priv->dcreq_reg_base[phys_pipe]; + int config; + + if (phytium_plane->tiling[0] == FRAMEBUFFER_LINEAR) { + phytium_writel_reg(priv, DCREQ_MODE_LINEAR, + group_offset, X100_DCREQ_PLANE0_CONFIG); + } else { + config = DCREQ_NO_LOSSY; + if (phytium_plane->tiling[0] == FRAMEBUFFER_TILE_MODE0) + config |= DCREQ_TILE_TYPE_MODE0; + else if (phytium_plane->tiling[0] == FRAMEBUFFER_TILE_MODE3) + config |= DCREQ_TILE_TYPE_MODE3; + else + config |= DCREQ_TILE_TYPE_MODE0; + + switch (phytium_plane->format) { + case FRAMEBUFFER_FORMAT_ARGB8888: + case FRAMEBUFFER_FORMAT_XRGB8888: + config |= DCREQ_COLOURFORMAT_BGRA8888; + break; + case FRAMEBUFFER_FORMAT_ARGB2101010: + config |= DCREQ_COLOURFORMAT_ARGB2101010; + break; + case FRAMEBUFFER_FORMAT_XRGB4444: + case FRAMEBUFFER_FORMAT_ARGB4444: + config |= DCREQ_COLOURFORMAT_ARGB4444; + break; + case FRAMEBUFFER_FORMAT_XRGB1555: + case FRAMEBUFFER_FORMAT_ARGB1555: + config |= DCREQ_COLOURFORMAT_ARGB1555; + break; + case FRAMEBUFFER_FORMAT_RGB565: + config |= DCREQ_COLOURFORMAT_RGB565; + break; + case FRAMEBUFFER_FORMAT_YUYV: + config |= DCREQ_COLOURFORMAT_YUYV; + break; + case FRAMEBUFFER_FORMAT_UYVY: + config |= DCREQ_COLOURFORMAT_UYVY; + break; + } + config |= DCREQ_ARGBSWIZZLE_ARGB; + config |= DCREQ_MODE_TILE; + phytium_writel_reg(priv, phytium_plane->iova[0] & 0xffffffff, + group_offset, X100_DCREQ_PLANE0_ADDR_START); + phytium_writel_reg(priv, (phytium_plane->iova[0] + phytium_plane->size[0]) & + 0xffffffff, group_offset, X100_DCREQ_PLANE0_ADDR_END); + phytium_writel_reg(priv, config, group_offset, X100_DCREQ_PLANE0_CONFIG); + } +} + +void x100_dc_hw_update_primary_hi_addr(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + int phys_pipe = phytium_plane->phys_pipe; + + phytium_writel_reg(priv, (phytium_plane->iova[0] >> PREFIX_SHIFT) & PREFIX_MASK, + priv->dcreq_reg_base[phys_pipe], X100_DCREQ_PIX_DMA_PREFIX); +} diff --git a/drivers/gpu/drm/phytium/x100_dc.h b/drivers/gpu/drm/phytium/x100_dc.h new file mode 100644 index 0000000000000000000000000000000000000000..ae98b4ffe0cfa4c4b6bf58f756e666e19e9c8369 --- /dev/null +++ b/drivers/gpu/drm/phytium/x100_dc.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#ifndef __X100_DC_H__ +#define __X100_DC_H__ + +#define X100_DC_PIX_CLOCK_MAX (594000) +#define x100_DC_HDISPLAY_MAX 3840 +#define X100_DC_VDISPLAY_MAX 2160 +#define X100_DC_ADDRESS_MASK 0x3f + +extern void x100_dc_hw_vram_init(struct phytium_display_private *priv, + resource_size_t vram_addr, + resource_size_t vram_size); +extern void x100_dc_hw_clear_msi_irq(struct phytium_display_private *priv, uint32_t phys_pipe); +extern void x100_dc_hw_config_pix_clock(struct drm_crtc *crtc, int clock); +extern void x100_dc_hw_disable(struct drm_crtc *crtc); +extern int x100_dc_hw_fb_format_check(const struct drm_mode_fb_cmd2 *mode_cmd, int count); +extern void x100_dc_hw_plane_get_primary_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count); +extern void x100_dc_hw_plane_get_cursor_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count); +void x100_dc_hw_update_dcreq(struct drm_plane *plane); +void x100_dc_hw_update_primary_hi_addr(struct drm_plane *plane); +#endif /* __X100_DC_H__ */ diff --git a/drivers/gpu/drm/phytium/x100_dp.c b/drivers/gpu/drm/phytium/x100_dp.c new file mode 100644 index 0000000000000000000000000000000000000000..45e138321953c7182f170a8c3b215493acada6f1 --- /dev/null +++ b/drivers/gpu/drm/phytium/x100_dp.c @@ -0,0 +1,920 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#include "phytium_display_drv.h" +#include "x100_reg.h" +#include "phytium_dp.h" +#include "x100_dp.h" + +static uint8_t x100_dp_source_lane_count[3] = {4, 4, 1}; + +/* [reg][ling_rate 1.62->8.1] */ +static int vco_val[12][4] = { + {0x0509, 0x0509, 0x0509, 0x0509}, // CP_PADJ + {0x0f00, 0x0f00, 0x0f00, 0x0f00}, // CP_IADJ + {0x0F08, 0x0F08, 0x0F08, 0x0F08}, // FILT_PADJ + {0x0061, 0x006C, 0x006C, 0x0051}, // INTDIV + {0x3333, 0x0000, 0x0000, 0x0000}, // FRACDIVL + {0x0000, 0x0000, 0x0000, 0x0000}, // FRACDIVH + {0x0042, 0x0048, 0x0048, 0x0036}, // HIGH_THR + {0x0002, 0x0002, 0x0002, 0x0002}, // PDIAG_CTRL + {0x0c5e, 0x0c5e, 0x0c5e, 0x0c5e}, // VCOCAL_PLLCNT_START + {0x00c7, 0x00c7, 0x00c7, 0x00c7}, // LOCK_PEFCNT + {0x00c7, 0x00c7, 0x00c7, 0x00c7}, // LOCK_PLLCNT_START + {0x0005, 0x0005, 0x0005, 0x0005}, // LOCK_PLLCNT_THR +}; + +static int mgnfs_val[4][4][4] = // [link_rate][swing][emphasis] +{ + /* 1.62Gbps */ + { + {0x0026, 0x001f, 0x0012, 0x0000}, + {0x0013, 0x0013, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 2.7Gbps */ + { + {0x0026, 0x001f, 0x0012, 0x0000}, + {0x0013, 0x0013, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 5.4Gbps */ + { + {0x0026, 0x0013, 0x005, 0x0000}, + {0x0018, 0x006, 0x0000, 0x0000}, + {0x000c, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 8.1Gbps */ + { + {0x0026, 0x0013, 0x005, 0x0000}, + {0x0013, 0x006, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, +}; + +static int cpost_val[4][4][4] = // [link_rate][swing][emphasis] +{ + /* 1.62Gbps */ + { + {0x0000, 0x0014, 0x0020, 0x002a}, + {0x0000, 0x0010, 0x001f, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 2.7Gbps */ + { + {0x0000, 0x0014, 0x0020, 0x002a}, + {0x0000, 0x0010, 0x001f, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 5.4Gbps */ + { + {0x0000, 0x0014, 0x0022, 0x002e}, + {0x0000, 0x0013, 0x0020, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 8.1Gbps */ + { + {0x0000, 0x0014, 0x0022, 0x002e}, + {0x0000, 0x0013, 0x0020, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, +}; + +static int x100_dp_hw_set_phy_lane_and_rate(struct phytium_dp_device *phytium_dp, + uint8_t link_lane_count, + uint32_t link_rate) +{ + int port = phytium_dp->port%3; + int i = 0, data, tmp, tmp1, index = 0, mask; + int timeout = 500, ret = 0; + + if (port == 0 || port == 1) { + /* set pma powerdown */ + data = 0; + mask = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) { + data |= (A3_POWERDOWN3 << i*A3_POWERDOWN3_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (PLL_EN << i*PLL_EN_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (PLL_EN << i*PLL_EN_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (A0_ACTIVE << i*A0_ACTIVE_SHIFT); + mask |= (((1<port%3; + int voltage_swing = 0; + int pre_emphasis = 0, link_rate_index = 0; + + switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: + default: + voltage_swing = 0; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: + voltage_swing = 1; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: + voltage_swing = 2; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: + voltage_swing = 3; + break; + } + switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { + case DP_TRAIN_PRE_EMPH_LEVEL_0: + default: + pre_emphasis = 0; + break; + case DP_TRAIN_PRE_EMPH_LEVEL_1: + pre_emphasis = 1; + break; + case DP_TRAIN_PRE_EMPH_LEVEL_2: + pre_emphasis = 2; + break; + case DP_TRAIN_PRE_EMPH_LEVEL_3: + pre_emphasis = 3; + break; + } + + switch (link_rate) { + case 810000: + link_rate_index = 3; + break; + case 540000: + link_rate_index = 2; + break; + case 270000: + link_rate_index = 1; + break; + case 162000: + link_rate_index = 0; + break; + default: + DRM_ERROR("phytium dp rate(%d) not support\n", link_rate); + link_rate_index = 2; + break; + } + + if (port == 0) { + phytium_phy_writel(phytium_dp, X100_PHY0_PLL0_TX_DIAG_ACYA, LOCK); + phytium_phy_writel(phytium_dp, X100_PHY0_PLL0_TX_TXCC_CTRL, TX_TXCC_CTRL); + phytium_phy_writel(phytium_dp, X100_PHY0_PLL0_TX_DRV, TX_DRV); + phytium_phy_writel(phytium_dp, X100_PHY0_PLL0_TX_MGNFS, + mgnfs_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, X100_PHY0_PLL0_TX_CPOST, + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, X100_PHY0_PLL0_TX_DIAG_ACYA, UNLOCK); + + } else if (port == 1) { + phytium_phy_writel(phytium_dp, X100_PHY0_PLL1_TX_DIAG_ACYA, LOCK); + phytium_phy_writel(phytium_dp, X100_PHY0_PLL1_TX_TXCC_CTRL, TX_TXCC_CTRL); + phytium_phy_writel(phytium_dp, X100_PHY0_PLL1_TX_DRV, TX_DRV); + phytium_phy_writel(phytium_dp, X100_PHY0_PLL1_TX_MGNFS, + mgnfs_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, X100_PHY0_PLL1_TX_CPOST, + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, X100_PHY0_PLL1_TX_CPOST1, + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, X100_PHY0_PLL1_TX_DIAG_ACYA, UNLOCK); + } else { + phytium_phy_writel(phytium_dp, X100_PHY1_PLL0_TX_DIAG_ACYA, LOCK); + phytium_phy_writel(phytium_dp, X100_PHY1_PLL0_TX_TXCC_CTRL, TX_TXCC_CTRL); + phytium_phy_writel(phytium_dp, X100_PHY1_PLL0_TX_DRV, TX_DRV); + phytium_phy_writel(phytium_dp, X100_PHY1_PLL0_TX_MGNFS, + mgnfs_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, X100_PHY1_PLL0_TX_CPOST, + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, X100_PHY1_PLL0_TX_DIAG_ACYA, UNLOCK); + } +} + +static int x100_dp_hw_init_phy(struct phytium_dp_device *phytium_dp) +{ + int port = phytium_dp->port; + int i = 0, data, tmp, mask; + int timeout = 500, ret = 0; + + if (port == 0 || port == 1) { + phytium_phy_writel(phytium_dp, X100_PHY0_APB_RESET, APB_RESET); + + phytium_phy_writel(phytium_dp, X100_PHY0_PIPE_RESET, RESET); + + /* config lane to dp mode */ + data = 0; + mask = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) { + data |= (LANE_BIT << i*LANE_BIT_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (LANE_MASTER << i*LANE_MASTER_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (PLL_EN << i*PLL_EN_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (BIT_20 << i*BIT_20_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (A0_ACTIVE << i*A0_ACTIVE_SHIFT); + mask |= (((1<dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dcreq_reg_base[port]; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | PANEL_POWER_ENABLE, + group_offset, X100_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + X100_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to poweron panel\n", __func__); +} + +static void x100_dp_hw_poweroff_panel(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dcreq_reg_base[port]; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | PANEL_POWER_DISABLE, + group_offset, X100_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + X100_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to poweroff panel\n", __func__); +} + +static void x100_dp_hw_enable_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, ret = 0; + uint32_t group_offset = priv->dcreq_reg_base[port]; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | BACKLIGHT_ENABLE, + group_offset, X100_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + X100_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to enable backlight\n", __func__); +} + +static void x100_dp_hw_disable_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dcreq_reg_base[port]; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | BACKLIGHT_DISABLE, + group_offset, X100_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + X100_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to disable backlight\n", __func__); +} + +static uint32_t x100_dp_hw_get_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int config; + uint32_t group_offset = priv->address_transform_base; + + config = phytium_readl_reg(priv, group_offset, X100_DC_ADDRESS_TRANSFORM_BACKLIGHT_VALUE); + return ((config >> BACKLIGHT_VALUE_SHIFT) & BACKLIGHT_VALUE_MASK); +} + +static int x100_dp_hw_set_backlight(struct phytium_dp_device *phytium_dp, uint32_t level) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dcreq_reg_base[port]; + int config = 0; + int ret = 0; + + if (level > X100_DP_BACKLIGHT_MAX) { + ret = -EINVAL; + goto out; + } + + config = FLAG_REQUEST | CMD_BACKLIGHT | ((level & BACKLIGHT_MASK) << BACKLIGHT_SHIFT); + phytium_writel_reg(priv, config, group_offset, X100_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + X100_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to set backlight\n", __func__); + +out: + return ret; +} + +bool x100_dp_hw_spread_is_enable(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, config; + uint32_t group_offset = priv->address_transform_base; + + config = phytium_readl_reg(priv, group_offset, X100_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); + + return ((config & DP_SPREAD_ENABLE(port)) ? true:false); +} + +int x100_dp_hw_reset(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int timeout = 100, config, ret = 0; + uint32_t group_offset = priv->address_transform_base; + uint32_t group_offset_dp = priv->dp_reg_base[port]; + + config = phytium_readl_reg(priv, group_offset, X100_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); + config &= (~DC_DP_RESET_STATUS(port)); + + phytium_writel_reg(priv, config, group_offset, X100_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); + phytium_writel_reg(priv, FLAG_REQUEST | CMD_DC_DP_RESET, + priv->dcreq_reg_base[port], X100_DCREQ_CMD_REGISTER); + do { + mdelay(10); + timeout--; + config = phytium_readl_reg(priv, group_offset, + X100_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); + if (config & DC_DP_RESET_STATUS(port)) + break; + } while (timeout); + if (timeout == 0) { + DRM_ERROR("reset dc/dp pipe(%d) failed\n", port); + ret = -1; + } + + phytium_writel_reg(priv, AUX_CLK_DIVIDER, group_offset_dp, PHYTIUM_DP_AUX_CLK_DIVIDER); + + return ret; +} + +uint8_t x100_dp_hw_get_source_lane_count(struct phytium_dp_device *phytium_dp) +{ + return x100_dp_source_lane_count[phytium_dp->port]; +} + +static struct phytium_dp_func x100_dp_funcs = { + .dp_hw_get_source_lane_count = x100_dp_hw_get_source_lane_count, + .dp_hw_reset = x100_dp_hw_reset, + .dp_hw_spread_is_enable = x100_dp_hw_spread_is_enable, + .dp_hw_set_backlight = x100_dp_hw_set_backlight, + .dp_hw_get_backlight = x100_dp_hw_get_backlight, + .dp_hw_disable_backlight = x100_dp_hw_disable_backlight, + .dp_hw_enable_backlight = x100_dp_hw_enable_backlight, + .dp_hw_poweroff_panel = x100_dp_hw_poweroff_panel, + .dp_hw_poweron_panel = x100_dp_hw_poweron_panel, + .dp_hw_init_phy = x100_dp_hw_init_phy, + .dp_hw_set_phy_lane_setting = x100_dp_hw_set_phy_lane_setting, + .dp_hw_set_phy_lane_and_rate = x100_dp_hw_set_phy_lane_and_rate, +}; + +void x100_dp_func_register(struct phytium_dp_device *phytium_dp) +{ + phytium_dp->funcs = &x100_dp_funcs; +} diff --git a/drivers/gpu/drm/phytium/x100_dp.h b/drivers/gpu/drm/phytium/x100_dp.h new file mode 100644 index 0000000000000000000000000000000000000000..a7a0fc48a58b035007e7a781d040a9b4858ae614 --- /dev/null +++ b/drivers/gpu/drm/phytium/x100_dp.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#ifndef __X100_DP_H__ +#define __X100_DP_H__ + +#define X100_DP_BACKLIGHT_MAX 100 + +void x100_dp_func_register(struct phytium_dp_device *phytium_dp); +#endif /* __X100_DP_H__ */ diff --git a/drivers/gpu/drm/phytium/x100_reg.h b/drivers/gpu/drm/phytium/x100_reg.h new file mode 100644 index 0000000000000000000000000000000000000000..130430e924b5b39ca90617aacdea43bd407a458f --- /dev/null +++ b/drivers/gpu/drm/phytium/x100_reg.h @@ -0,0 +1,349 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021 Phytium Technology Co., Ltd. + */ + +#ifndef __X100_REG_H__ +#define __X100_REG_H__ + +#include "phytium_reg.h" + +/******************************dc register start******************************************/ +#define X100_DC_CLOCK_CONTROL 0x0000 + #define SOFT_RESET (1<<12) +#define X100_DC_CLOCK_IDLE 0x0004 + #define IS_IDLE (1<<16) +/******************************dc register end********************************************/ + +/******************************dcreq register start**************************************/ +#define X100_DCREQ_PLANE0_ADDR_START 0x00 +#define X100_DCREQ_PLANE0_ADDR_END 0x04 +#define X100_DCREQ_PLANE1_ADDR_START 0x08 +#define X100_DCREQ_PLANE1_ADDR_END 0x0c +#define X100_DCREQ_PLANE0_CONFIG 0x10 + #define DCREQ_NO_LOSSY (0 << 0) + #define DCREQ_LOSSY (1 << 0) + #define DCREQ_TILE_TYPE_MASK (0x3 << 1) + #define DCREQ_TILE_TYPE_MODE0 (0x1 << 1) + #define DCREQ_TILE_TYPE_MODE3 (0x2 << 1) + #define DCREQ_COLOURFORMAT_MASK (0x7f << 8) + #define DCREQ_COLOURFORMAT_RGB565 (0x5 << 8) + #define DCREQ_COLOURFORMAT_ARGB1555 (0x4 << 8) + #define DCREQ_COLOURFORMAT_ARGB4444 (0x02 << 8) + #define DCREQ_COLOURFORMAT_BGRA8888 (0x29 << 8) + #define DCREQ_COLOURFORMAT_ARGB2101010 (0xe << 8) + #define DCREQ_COLOURFORMAT_YUYV (0x59 << 8) + #define DCREQ_COLOURFORMAT_UYVY (0x5b << 8) + #define DCREQ_ARGBSWIZZLE_MASK (0xf << 4) + #define DCREQ_ARGBSWIZZLE_ARGB (0X0 << 4) + #define DCREQ_ARGBSWIZZLE_BGRA (0XC << 4) + #define DCREQ_MODE_MASK (1 << 16) + #define DCREQ_MODE_LINEAR (0 << 16) + #define DCREQ_MODE_TILE (1 << 16) +#define X100_DCREQ_PLANE1_CONFIG(pipe) 0x14 +#define X100_DCREQ_PLANE0_CLEAR_COLOR_L 0x18 +#define X100_DCREQ_PLANE0_CLEAR_COLOR_H 0x1C +#define X100_DCREQ_PLANE1_CLEAR_COLOR_L 0x20 +#define X100_DCREQ_PLANE1_CLEAR_COLOR_H 0x24 +#define X100_DCREQ_CMD_REGISTER 0x38 + #define FLAG_REPLY (1<<31) + #define FLAG_REQUEST (1<<30) + #define CMD_PIXEL_CLOCK (0x0 << 28) + #define CMD_BACKLIGHT (0x1 << 28) + #define CMD_DC_DP_RESET (0x3 << 28) + #define BACKLIGHT_SHIFT 21 + #define BACKLIGHT_MASK 0x7f + #define BACKLIGHT_MAX 100 + #define BACKLIGHT_ENABLE (101 << BACKLIGHT_SHIFT) + #define BACKLIGHT_DISABLE (102 << BACKLIGHT_SHIFT) + #define PANEL_POWER_ENABLE (103 << BACKLIGHT_SHIFT) + #define PANEL_POWER_DISABLE (104 << BACKLIGHT_SHIFT) + #define PIXEL_CLOCK_MASK (0x1fffff) +#define X100_DCREQ_FBCD_CLOCK_CONFIG 0x3c +#define X100_DCREQ_PIX_DMA_PREFIX 0x50 + #define PREFIX_MASK 0xff + #define PREFIX_SHIFT 32 +#define X100_DCREQ_FRAME_START 0x54 +#define X100_DCREQ_FILTER_CONFIG 0x58 +#define X100_DCREQ_CONTROL 0x5C + #define DC_REQ_ENABLE (1<<0) +#define X100_DCREQ_MSI_CLEAR 0x60 + #define MSI_CLEAR 0x0 +#define X100_DCREQ_RESET 0x68 + #define DCREQ_RESET (0x3 << 0) + #define DCREQ_RESET_MASK 0x3 +#define X100_DCREQ_PLAN 0x94 + #define DCREQ_PLAN_A 0x0 + #define DCREQ_PLAN_B 0X5 +/******************************dcreq register end**************************************/ + +/******************************address transform register start**************************/ +#define X100_GPU_ADDRESS_TRANSFORM_SRC_ADDR 0x0 +#define X100_GPU_ADDRESS_TRANSFORM_SIZE 0x4 +#define X100_GPU_ADDRESS_TRANSFORM_DST_ADDR 0x8 + +#define X100_DC_ADDRESS_TRANSFORM_SRC_ADDR 0x24 + #define SRC_ADDR_OFFSET 22 + #define SRC_ADDR_MASK 0xffffffffff +#define X100_DC_ADDRESS_TRANSFORM_SIZE 0x28 + #define ADDRESS_TRANSFORM_ENABLE (0x1 << 31) + #define SIZE_OFFSET 22 +#define X100_DC_ADDRESS_TRANSFORM_DST_ADDR 0x2c + #define DST_ADDR_OFFSET 22 +#define X100_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS 0x48 + #define DC_DP_RESET_STATUS(pipe) (1 << pipe) + #define DP_SPREAD_ENABLE(pipe) (0x8 << pipe) +#define X100_DC_ADDRESS_TRANSFORM_BACKLIGHT_VALUE 0x4c + #define BACKLIGHT_VALUE_MASK (0x7f) + #define BACKLIGHT_VALUE_SHIFT 16 +/******************************address transform register end**************************/ + +/******************************phy register start******************************************/ +/* self define */ +#define X100_PHY0_PIPE_RESET 0x40104 + #define RESET 0x0 + #define RESET_DEASSERT 0x1 +#define X100_PHY1_PIPE_RESET 0x100100 + #define PHY1_PIPE_RESET 0x0 + #define PHY1_PIPE_RESET_DEASSERT 0x4 + +#define X100_PHY1_EN_REFCLK 0x100070 + +#define X100_PHY0_MODE 0x40088 + #define LANE_BIT (0x3) + #define LANE_BIT_SHIFT 0x2 +#define X100_PHY1_SEL 0x100004 + #define PHY1_DP_LANE_BIT 0x1 + #define PHY1_DP_LANE_BIT_SHIFT 2 + +#define X100_PHY0_LINK_CFG 0x40044 + #define LANE_MASTER 0x1 + #define LANE_MASTER_SHIFT 1 + +#define X100_PHY0_PLL_EN 0x40010 + #define PLL_EN 0x1 + #define PLL_EN_SHIFT 1 +#define X100_PHY0_PMA_WIDTH 0x40020 + #define BIT_20 0x5 + #define BIT_20_SHIFT 4 + +#define X100_PHY0_PMA0_POWER 0x40014 +#define X100_PHY0_PMA1_POWER 0x40018 + #define A0_ACTIVE 0x1 + #define A0_ACTIVE_SHIFT 8 + #define A3_POWERDOWN3 0x8 + #define A3_POWERDOWN3_SHIFT 8 + +#define X100_PHY1_PMA_MISC 0x1000a0 + #define PHY1_PLL_EN 0x1 + #define PHY1_PLL_EN_MASK 1 + #define PHY1_PLL_EN_SHIFT 8 + #define PHY1_BIT_20 0x5 + #define PHY1_BIT_20_SHIFT 9 + #define PHY1_A0_ACTIVE 0x1 + #define PHY1_A0_ACTIVE_SHIFT 2 + #define PHY1_A0_ACTIVE_MASK 0x3f + #define PHY1_A3_POWERDOWN3 0x8 + #define PHY1_A3_POWERDOWN3_MASK 0x3f + #define PHY1_A3_POWERDOWN3_SHIFT 2 + +#define X100_PHY0_LINK_RESET 0x40108 + #define LINK_RESET 0x1 + #define LINK_RESET_MASK 0x1 + #define LINTK_RESET_SHIFT 0x1 + +#define X100_PHY0_APB_RESET 0x40100 + #define APB_RESET 0x1 +#define X100_PHY1_APB_RESET 0x100104 + #define PHY1_APB_RESET 0x4 + +/* phy origin register */ +#define X100_PHY0_PLL_CFG 0x30038 +#define X100_PHY1_PLL_CFG 0xb0038 + #define SINGLE_LINK 0x0 + #define DOUBLE_LINK 0x2 + +#define X100_PHY0_PMA_CONTROL 0x3800c +#define X100_PHY1_PMA_CONTROL 0xb800c + #define CONTROL_ENABLE 0x1 + #define CONTROL_ENABLE_MASK 0x1 + #define CONTROL_ENABLE_SHIFT 0x1 + +#define X100_PHY0_PMA_CONTROL2 0x38004 +#define X100_PHY1_PMA_CONTROL2 0xb8004 + #define PLL0_LOCK_DONE (0x1 << 6) + #define PLL1_LOCK_DONE (0x1 << 7) + +#define X100_PHY0_PLL0_CLK_SEL 0X684 +#define X100_PHY0_PLL1_CLK_SEL 0x704 +#define X100_PHY1_PLL_CLK_SEL 0X80684 + #define PLL_LINK_RATE_162000 0xf01 + #define PLL_LINK_RATE_270000 0x701 + #define PLL_LINK_RATE_540000 0x301 + #define PLL_LINK_RATE_810000 0x200 + +#define X100_PHY0_HSCLK0_SEL 0x18398 +#define X100_PHY0_HSCLK1_SEL 0x1a398 +#define X100_PHY1_HSCLK_SEL 0x90398 + #define HSCLK_LINK_0 0x0 + #define HSCLK_LINK_1 0x1 + +#define X100_PHY0_HSCLK0_DIV 0x1839c +#define X100_PHY0_HSCLK1_DIV 0x1a39c +#define X100_PHY1_HSCLK_DIV 0x9039c + #define HSCLK_LINK_RATE_162000 0x2 + #define HSCLK_LINK_RATE_270000 0x1 + #define HSCLK_LINK_RATE_540000 0x0 + #define HSCLK_LINK_RATE_810000 0x0 + +#define X100_PHY0_PLLDRC0_CTRL 0x18394 +#define X100_PHY0_PLLDRC1_CTRL 0x1a394 +#define X100_PHY1_PLLDRC_CTRL 0x90394 + #define PLLDRC_LINK0 0x1 + #define PLLDRC_LINK1 0x9 + +#define X100_PHY0_PLL0_DSM_M0 0x250 +#define X100_PHY1_PLL0_DSM_M0 0x80250 + #define PLL0_DSM_M0 0x4 +#define X100_PHY0_PLL0_VCOCAL_START 0x218 +#define X100_PHY1_PLL0_VCOCAL_START 0x80218 + #define PLL0_VCOCAL_START 0xc5e +#define X100_PHY0_PLL0_VCOCAL_CTRL 0x208 +#define X100_PHY1_PLL0_VCOCAL_CTRL 0x80208 + #define PLL0_VCOCAL_CTRL 0x3 + +#define X100_PHY0_PLL1_DSM_M0 0x350 + #define PLL1_DSM_M0 0x4 +#define X100_PHY0_PLL1_VCOCAL_START 0x318 + #define PLL1_VCOCAL_START 0xc5e +#define X100_PHY0_PLL1_VCOCAL_CTRL 0x308 + #define PLL1_VCOCAL_CTRL 0x3 + +#define X100_PHY0_PLL0_CP_PADJ 0x690 +#define X100_PHY0_PLL0_CP_IADJ 0x694 +#define X100_PHY0_PLL0_CP_FILT_PADJ 0x698 +#define X100_PHY0_PLL0_INTDIV 0x240 +#define X100_PHY0_PLL0_FRACDIVL 0x244 +#define X100_PHY0_PLL0_FRACDIVH 0x248 +#define X100_PHY0_PLL0_HIGH_THR 0x24c +#define X100_PHY0_PLL0_PDIAG_CTRL 0x680 +#define X100_PHY0_PLL0_VCOCAL_PLLCNT_START 0x220 +#define X100_PHY0_PLL0_LOCK_PEFCNT 0x270 +#define X100_PHY0_PLL0_LOCK_PLLCNT_START 0x278 +#define X100_PHY0_PLL0_LOCK_PLLCNT_THR 0x27c + +#define X100_PHY0_PLL1_CP_PADJ 0x710 +#define X100_PHY0_PLL1_CP_IADJ 0x714 +#define X100_PHY0_PLL1_CP_FILT_PADJ 0x718 +#define X100_PHY0_PLL1_INTDIV 0x340 +#define X100_PHY0_PLL1_FRACDIVL 0x344 +#define X100_PHY0_PLL1_FRACDIVH 0x348 +#define X100_PHY0_PLL1_HIGH_THR 0x34c +#define X100_PHY0_PLL1_PDIAG_CTRL 0x700 +#define X100_PHY0_PLL1_VCOCAL_PLLCNT_START 0x320 +#define X100_PHY0_PLL1_LOCK_PEFCNT 0x370 +#define X100_PHY0_PLL1_LOCK_PLLCNT_START 0x378 +#define X100_PHY0_PLL1_LOCK_PLLCNT_THR 0x37c + +#define X100_PHY1_PLL0_CP_PADJ 0x80690 +#define X100_PHY1_PLL0_CP_IADJ 0x80694 +#define X100_PHY1_PLL0_CP_FILT_PADJ 0x80698 +#define X100_PHY1_PLL0_INTDIV 0x80240 +#define X100_PHY1_PLL0_FRACDIVL 0x80244 +#define X100_PHY1_PLL0_FRACDIVH 0x80248 +#define X100_PHY1_PLL0_HIGH_THR 0x8024c +#define X100_PHY1_PLL0_PDIAG_CTRL 0x80680 +#define X100_PHY1_PLL0_VCOCAL_PLLCNT_START 0x80220 +#define X100_PHY1_PLL0_LOCK_PEFCNT 0x80270 +#define X100_PHY1_PLL0_LOCK_PLLCNT_START 0x80278 +#define X100_PHY1_PLL0_LOCK_PLLCNT_THR 0x8027c + +#define X100_PHY0_PLL0_TX_PSC_A0 0x18400 +#define X100_PHY1_PLL0_TX_PSC_A0 0x90400 + #define PLL0_TX_PSC_A0 0xfb +#define X100_PHY0_PLL0_TX_PSC_A2 0x18408 +#define X100_PHY1_PLL0_TX_PSC_A2 0x90408 + #define PLL0_TX_PSC_A2 0x4aa +#define X100_PHY0_PLL0_TX_PSC_A3 0x1840c +#define X100_PHY1_PLL0_TX_PSC_A3 0x9040c + #define PLL0_TX_PSC_A3 0x4aa +#define X100_PHY0_PLL0_RX_PSC_A0 0x28000 +#define X100_PHY1_PLL0_RX_PSC_A0 0xa0000 + #define PLL0_RX_PSC_A0 0x0 +#define X100_PHY0_PLL0_RX_PSC_A2 0x28008 +#define X100_PHY1_PLL0_RX_PSC_A2 0xa0008 + #define PLL0_RX_PSC_A2 0x0 +#define X100_PHY0_PLL0_RX_PSC_A3 0x2800C +#define X100_PHY1_PLL0_RX_PSC_A3 0xa000C + #define PLL0_RX_PSC_A3 0x0 +#define X100_PHY0_PLL0_RX_PSC_CAL 0x28018 +#define X100_PHY1_PLL0_RX_PSC_CAL 0xa0018 + #define PLL0_RX_PSC_CAL 0x0 + +#define X100_PHY0_PLL1_TX_PSC_A0 0x1a400 + #define PLL1_TX_PSC_A0 0xfb +#define X100_PHY0_PLL1_TX_PSC_A2 0x1a408 + #define PLL1_TX_PSC_A2 0x4aa +#define X100_PHY0_PLL1_TX_PSC_A3 0x1a40c + #define PLL1_TX_PSC_A3 0x4aa +#define X100_PHY0_PLL1_RX_PSC_A0 0x2a000 + #define PLL1_RX_PSC_A0 0x0 +#define X100_PHY0_PLL1_RX_PSC_A2 0x2a008 + #define PLL1_RX_PSC_A2 0x0 +#define X100_PHY0_PLL1_RX_PSC_A3 0x2a00C + #define PLL1_RX_PSC_A3 0x0 +#define X100_PHY0_PLL1_RX_PSC_CAL 0x2a018 + #define PLL1_RX_PSC_CAL 0x0 + +#define X100_PHY0_PLL0_XCVR_CTRL 0x183a8 +#define X100_PHY1_PLL0_XCVR_CTRL 0x903a8 + #define PLL0_XCVR_CTRL 0xf +#define X100_PHY0_PLL1_XCVR_CTRL 0x1a3a8 + #define PLL1_XCVR_CTRL 0xf + +#define X100_PHY0_PLL0_RX_GCSM1_CTRL 0x28420 +#define X100_PHY1_PLL0_RX_GCSM1_CTRL 0xa0420 + #define PLL0_RX_GCSM1_CTRL 0x0 +#define X100_PHY0_PLL0_RX_GCSM2_CTRL 0x28440 +#define X100_PHY1_PLL0_RX_GCSM2_CTRL 0xa0440 + #define PLL0_RX_GCSM2_CTRL 0x0 +#define X100_PHY0_PLL0_RX_PERGCSM_CTRL 0x28460 +#define X100_PHY1_PLL0_RX_PERGCSM_CTRL 0xa0460 + #define PLL0_RX_PERGCSM_CTRL 0x0 + +#define X100_PHY0_PLL1_RX_GCSM1_CTRL 0x2a420 + #define PLL1_RX_GCSM1_CTRL 0x0 +#define X100_PHY0_PLL1_RX_GCSM2_CTRL 0x2a440 + #define PLL1_RX_GCSM2_CTRL 0x0 +#define X100_PHY0_PLL1_RX_PERGCSM_CTRL 0x2a460 + #define PLL1_RX_PERGCSM_CTRL 0x0 + +/* swing and emphasis */ +#define X100_PHY0_PLL0_TX_DIAG_ACYA 0x1879c +#define X100_PHY0_PLL1_TX_DIAG_ACYA 0x1a79c +#define X100_PHY1_PLL0_TX_DIAG_ACYA 0x9079c + #define LOCK 1 + #define UNLOCK 0 + +#define X100_PHY0_PLL0_TX_TXCC_CTRL 0x18100 +#define X100_PHY0_PLL1_TX_TXCC_CTRL 0x1a100 +#define X100_PHY1_PLL0_TX_TXCC_CTRL 0x90100 + #define TX_TXCC_CTRL 0x8a4 + +#define X100_PHY0_PLL0_TX_DRV 0x18318 +#define X100_PHY0_PLL1_TX_DRV 0x1a318 +#define X100_PHY1_PLL0_TX_DRV 0x90318 + #define TX_DRV 0x3 + +#define X100_PHY0_PLL0_TX_MGNFS 0x18140 +#define X100_PHY0_PLL1_TX_MGNFS 0x1a140 +#define X100_PHY1_PLL0_TX_MGNFS 0x90140 + +#define X100_PHY0_PLL0_TX_CPOST 0x18130 +#define X100_PHY0_PLL1_TX_CPOST 0x1a130 +#define X100_PHY0_PLL1_TX_CPOST1 0x1a13c +#define X100_PHY1_PLL0_TX_CPOST 0x90130 + +/******************************phy register end********************************************/ +#endif /* __X100_REG_H__ */ diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c index 754f6b25f2652ee83e578c1a6786190eb554e70f..6d9f78612deeb81cd8ec8d1adf71299d28efd2ad 100644 --- a/drivers/gpu/drm/pl111/pl111_display.c +++ b/drivers/gpu/drm/pl111/pl111_display.c @@ -531,14 +531,15 @@ pl111_init_clock_divider(struct drm_device *drm) dev_err(drm->dev, "CLCD: unable to get clcdclk.\n"); return PTR_ERR(parent); } + + spin_lock_init(&priv->tim2_lock); + /* If the clock divider is broken, use the parent directly */ if (priv->variant->broken_clockdivider) { priv->clk = parent; return 0; } parent_name = __clk_get_name(parent); - - spin_lock_init(&priv->tim2_lock); div->init = &init; ret = devm_clk_hw_register(drm->dev, div); diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c index b9baefdba38a17e5c2647075e588748ecfecf980..1c318ad32a8cd39c48b354312277ea924d20f94f 100644 --- a/drivers/gpu/drm/pl111/pl111_versatile.c +++ b/drivers/gpu/drm/pl111/pl111_versatile.c @@ -330,6 +330,7 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv) ret = vexpress_muxfpga_init(); if (ret) { dev_err(dev, "unable to initialize muxfpga driver\n"); + of_node_put(np); return ret; } @@ -337,17 +338,20 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv) pdev = of_find_device_by_node(np); if (!pdev) { dev_err(dev, "can't find the sysreg device, deferring\n"); + of_node_put(np); return -EPROBE_DEFER; } map = dev_get_drvdata(&pdev->dev); if (!map) { dev_err(dev, "sysreg has not yet probed\n"); platform_device_put(pdev); + of_node_put(np); return -EPROBE_DEFER; } } else { map = syscon_node_to_regmap(np); } + of_node_put(np); if (IS_ERR(map)) { dev_err(dev, "no Versatile syscon regmap\n"); diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index 2445e75cf7ea6664854325a6383fe0e32e2bdd57..d00f45eed03cac4655b8bd6bea72781b32365403 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -136,20 +136,11 @@ static int qxl_drm_freeze(struct drm_device *dev) { struct pci_dev *pdev = dev->pdev; struct qxl_device *qdev = dev->dev_private; - struct drm_crtc *crtc; - - drm_kms_helper_poll_disable(dev); - - console_lock(); - qxl_fbdev_set_suspend(qdev, 1); - console_unlock(); + int ret; - /* unpin the front buffers */ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; - if (crtc->enabled) - (*crtc_funcs->disable)(crtc); - } + ret = drm_mode_config_helper_suspend(dev); + if (ret) + return ret; qxl_destroy_monitors_object(qdev); qxl_surf_evict(qdev); @@ -175,14 +166,7 @@ static int qxl_drm_resume(struct drm_device *dev, bool thaw) } qxl_create_monitors_object(qdev); - drm_helper_resume_force_mode(dev); - - console_lock(); - qxl_fbdev_set_suspend(qdev, 0); - console_unlock(); - - drm_kms_helper_poll_enable(dev); - return 0; + return drm_mode_config_helper_resume(dev); } static int qxl_pm_suspend(struct device *dev) diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 01220d386b0aed446e43fb04db7f485122ffc5b9..1414907ddb503c8e1755d2195213a8a067f53a57 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -379,7 +379,7 @@ int qxl_gem_object_create_with_handle(struct qxl_device *qdev, u32 domain, size_t size, struct qxl_surface *surf, - struct qxl_bo **qobj, + struct drm_gem_object **gobj, uint32_t *handle); void qxl_gem_object_free(struct drm_gem_object *gobj); int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv); diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c index c666b89eed5d12d77c3be77ac3c23290895a9424..e5d5ab5b64ee95d796d98869141d1f8351bc4c33 100644 --- a/drivers/gpu/drm/qxl/qxl_dumb.c +++ b/drivers/gpu/drm/qxl/qxl_dumb.c @@ -34,6 +34,7 @@ int qxl_mode_dumb_create(struct drm_file *file_priv, { struct qxl_device *qdev = dev->dev_private; struct qxl_bo *qobj; + struct drm_gem_object *gobj; uint32_t handle; int r; struct qxl_surface surf; @@ -59,11 +60,13 @@ int qxl_mode_dumb_create(struct drm_file *file_priv, surf.format = format; r = qxl_gem_object_create_with_handle(qdev, file_priv, QXL_GEM_DOMAIN_VRAM, - args->size, &surf, &qobj, + args->size, &surf, &gobj, &handle); if (r) return r; + qobj = gem_to_qxl_bo(gobj); qobj->is_dumb = true; + drm_gem_object_put_unlocked(gobj); args->pitch = pitch; args->handle = handle; return 0; diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c index f5c1e7872e920d3a0a3c9e7ec0b0bcfa5779def8..886f0cae2dc5756bf15ba95af88126198daad378 100644 --- a/drivers/gpu/drm/qxl/qxl_gem.c +++ b/drivers/gpu/drm/qxl/qxl_gem.c @@ -73,32 +73,41 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size, return 0; } +/* + * If the caller passed a valid gobj pointer, it is responsible to call + * drm_gem_object_put() when it no longer needs to acess the object. + * + * If gobj is NULL, it is handled internally. + */ int qxl_gem_object_create_with_handle(struct qxl_device *qdev, struct drm_file *file_priv, u32 domain, size_t size, struct qxl_surface *surf, - struct qxl_bo **qobj, + struct drm_gem_object **gobj, uint32_t *handle) { - struct drm_gem_object *gobj; int r; + struct drm_gem_object *local_gobj; - BUG_ON(!qobj); BUG_ON(!handle); r = qxl_gem_object_create(qdev, size, 0, domain, false, false, surf, - &gobj); + &local_gobj); if (r) return -ENOMEM; - r = drm_gem_handle_create(file_priv, gobj, handle); + r = drm_gem_handle_create(file_priv, local_gobj, handle); if (r) return r; - /* drop reference from allocate - handle holds it now */ - *qobj = gem_to_qxl_bo(gobj); - drm_gem_object_put_unlocked(gobj); + + if (gobj) + *gobj = local_gobj; + else + /* drop reference from allocate - handle holds it now */ + drm_gem_object_put_unlocked(local_gobj); + return 0; } diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c index 6cc9f3367fa05581a90280b7cc111259cc2692fa..eb4d6827f74a3e6443f00c68c511b291878d0924 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c @@ -36,7 +36,6 @@ static int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct qxl_device *qdev = dev->dev_private; struct drm_qxl_alloc *qxl_alloc = data; int ret; - struct qxl_bo *qobj; uint32_t handle; u32 domain = QXL_GEM_DOMAIN_VRAM; @@ -48,7 +47,7 @@ static int qxl_alloc_ioctl(struct drm_device *dev, void *data, domain, qxl_alloc->size, NULL, - &qobj, &handle); + NULL, &handle); if (ret) { DRM_ERROR("%s: failed to create gem ret=%d\n", __func__, ret); @@ -162,8 +161,7 @@ static int qxl_process_single_command(struct qxl_device *qdev, if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info)) return -EINVAL; - if (!access_ok(VERIFY_READ, - u64_to_user_ptr(cmd->command), + if (!access_ok(u64_to_user_ptr(cmd->command), cmd->command_size)) return -EFAULT; @@ -392,7 +390,6 @@ static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, { struct qxl_device *qdev = dev->dev_private; struct drm_qxl_alloc_surf *param = data; - struct qxl_bo *qobj; int handle; int ret; int size, actual_stride; @@ -412,7 +409,7 @@ static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, QXL_GEM_DOMAIN_SURFACE, size, &surf, - &qobj, &handle); + NULL, &handle); if (ret) { DRM_ERROR("%s: failed to create gem ret=%d\n", __func__, ret); diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index d587779a80b4d0748785b5430078a638ef290798..a97294ac96d5914b2cfdd561649a0ccdf56a9fb6 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -5676,7 +5676,7 @@ int ci_dpm_init(struct radeon_device *rdev) u16 data_offset, size; u8 frev, crev; struct ci_power_info *pi; - enum pci_bus_speed speed_cap; + enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN; struct pci_dev *root = rdev->pdev->bus->self; int ret; @@ -5685,7 +5685,8 @@ int ci_dpm_init(struct radeon_device *rdev) return -ENOMEM; rdev->pm.dpm.priv = pi; - speed_cap = pcie_get_speed_cap(root); + if (!pci_is_root_bus(rdev->pdev->bus)) + speed_cap = pcie_get_speed_cap(root); if (speed_cap == PCI_SPEED_UNKNOWN) { pi->sys_pcie_mask = 0; } else { diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 5712d63dca2070c43fc3b85f9b6f214e9db3e1c9..da728f7fc42bec71a7fbc9398da4833dcf844d57 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -4815,14 +4815,15 @@ int evergreen_irq_process(struct radeon_device *rdev) break; case 44: /* hdmi */ afmt_idx = src_data; - if (!(afmt_status[afmt_idx] & AFMT_AZ_FORMAT_WTRIG)) - DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); - if (afmt_idx > 5) { DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); break; } + + if (!(afmt_status[afmt_idx] & AFMT_AZ_FORMAT_WTRIG)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + afmt_status[afmt_idx] &= ~AFMT_AZ_FORMAT_WTRIG; queue_hdmi = true; DRM_DEBUG("IH: HDMI%d\n", afmt_idx + 1); diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index 54324330b91f43a25f33617e8bb47b7203339f98..2f0a5bd5017460ea2cbbff5c6187e51494f71324 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c @@ -1299,6 +1299,7 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) return -EINVAL; } ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); + break; case CB_TARGET_MASK: track->cb_target_mask = radeon_get_ib_value(p, idx); track->cb_dirty = true; diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 7d39ed63e5be755e7d3de41899b46663086bf52b..b24401f21e934cbc02ef730c46ce9409a78f182a 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -1820,8 +1820,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p, track->textures[i].use_pitch = 1; } else { track->textures[i].use_pitch = 0; - track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); - track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); + track->textures[i].width = 1 << ((idx_value & RADEON_TXFORMAT_WIDTH_MASK) >> RADEON_TXFORMAT_WIDTH_SHIFT); + track->textures[i].height = 1 << ((idx_value & RADEON_TXFORMAT_HEIGHT_MASK) >> RADEON_TXFORMAT_HEIGHT_SHIFT); } if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE) track->textures[i].tex_coord_type = 2; diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index c22321cc5a4158fd9fc628e1c0ac2195247aed55..c2b506c707a28dc45f8b0c790fa4056e7d82d130 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c @@ -476,8 +476,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, track->textures[i].use_pitch = 1; } else { track->textures[i].use_pitch = 0; - track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); - track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); + track->textures[i].width = 1 << ((idx_value & RADEON_TXFORMAT_WIDTH_MASK) >> RADEON_TXFORMAT_WIDTH_SHIFT); + track->textures[i].height = 1 << ((idx_value & RADEON_TXFORMAT_HEIGHT_MASK) >> RADEON_TXFORMAT_HEIGHT_SHIFT); } if (idx_value & R200_TXFORMAT_LOOKUP_DISABLE) track->textures[i].lookup_disable = true; diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 414642e5b7a3110353bafb3ad022f0d7fee4e78f..263f1d5c10866049d615c91f4a76a7f16bf38836 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -476,6 +476,8 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode native_mode->vdisplay != 0 && native_mode->clock != 0) { mode = drm_mode_duplicate(dev, native_mode); + if (!mode) + return NULL; mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; drm_mode_set_name(mode); @@ -490,6 +492,8 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode * simpler. */ mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false); + if (!mode) + return NULL; mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; DRM_DEBUG_KMS("Adding cvt approximation of native panel mode %s\n", mode->name); } @@ -751,7 +755,7 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct radeon_encoder->output_csc = val; - if (connector->encoder->crtc) { + if (connector->encoder && connector->encoder->crtc) { struct drm_crtc *crtc = connector->encoder->crtc; struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 59c8a6647ff210346b4e4bd7916b2c5909ba6b22..cc1c07963116c3a60ad920c1cd41fd41b8e4f91e 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1625,6 +1625,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, if (r) { /* delay GPU reset to resume */ radeon_fence_driver_force_completion(rdev, i); + } else { + /* finish executing delayed work */ + flush_delayed_work(&rdev->fence_drv[i].lockup_work); } } diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 9d3ac8b981dab3d324e2864495c5fe739f4d675f..09522b9154c20314d371f2002b2c927d883b749b 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -672,11 +672,16 @@ static void radeon_crtc_init(struct drm_device *dev, int index) if (radeon_crtc == NULL) return; + radeon_crtc->flip_queue = alloc_workqueue("radeon-crtc", WQ_HIGHPRI, 0); + if (!radeon_crtc->flip_queue) { + kfree(radeon_crtc); + return; + } + drm_crtc_init(dev, &radeon_crtc->base, &radeon_crtc_funcs); drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256); radeon_crtc->crtc_id = index; - radeon_crtc->flip_queue = alloc_workqueue("radeon-crtc", WQ_HIGHPRI, 0); rdev->mode_info.crtcs[index] = radeon_crtc; if (rdev->family >= CHIP_BONAIRE) { @@ -921,12 +926,12 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div, ref_div_max = max(min(100 / post_div, ref_div_max), 1u); /* get matching reference and feedback divider */ - *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max); + *ref_div = min(max(den/post_div, 1u), ref_div_max); *fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den); /* limit fb divider to its maximum */ if (*fb_div > fb_div_max) { - *ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div); + *ref_div = (*ref_div * fb_div_max)/(*fb_div); *fb_div = fb_div_max; } } diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 2a7977a23b31cdf84c391ab86d55406ccc74d591..c26f09b47ecb2b1d305de20cbded4332047d69c0 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -340,8 +340,39 @@ static int radeon_kick_out_firmware_fb(struct pci_dev *pdev) static int radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { + unsigned long flags = 0; int ret; + if (!ent) + return -ENODEV; /* Avoid NULL-ptr deref in drm_get_pci_dev */ + + flags = ent->driver_data; + + if (!radeon_si_support) { + switch (flags & RADEON_FAMILY_MASK) { + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_VERDE: + case CHIP_OLAND: + case CHIP_HAINAN: + dev_info(&pdev->dev, + "SI support disabled by module param\n"); + return -ENODEV; + } + } + if (!radeon_cik_support) { + switch (flags & RADEON_FAMILY_MASK) { + case CHIP_KAVERI: + case CHIP_BONAIRE: + case CHIP_HAWAII: + case CHIP_KABINI: + case CHIP_MULLINS: + dev_info(&pdev->dev, + "CIK support disabled by module param\n"); + return -ENODEV; + } + } + if (vga_switcheroo_client_probe_defer(pdev)) return -EPROBE_DEFER; diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index dec1e081f52958e9f1d5cfe7813c95ef021ca320..3ff835767ac58fbab6526d7cb4df6ab7b53b217a 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -95,31 +95,6 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) struct radeon_device *rdev; int r, acpi_status; - if (!radeon_si_support) { - switch (flags & RADEON_FAMILY_MASK) { - case CHIP_TAHITI: - case CHIP_PITCAIRN: - case CHIP_VERDE: - case CHIP_OLAND: - case CHIP_HAINAN: - dev_info(dev->dev, - "SI support disabled by module param\n"); - return -ENODEV; - } - } - if (!radeon_cik_support) { - switch (flags & RADEON_FAMILY_MASK) { - case CHIP_KAVERI: - case CHIP_BONAIRE: - case CHIP_HAWAII: - case CHIP_KABINI: - case CHIP_MULLINS: - dev_info(dev->dev, - "CIK support disabled by module param\n"); - return -ENODEV; - } - } - rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL); if (rdev == NULL) { return -ENOMEM; @@ -172,6 +147,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) } if (radeon_is_px(dev)) { + dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP); pm_runtime_use_autosuspend(dev->dev); pm_runtime_set_autosuspend_delay(dev->dev, 5000); pm_runtime_set_active(dev->dev); diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 8fb60b3af015804d6d5ee3ef5d6f24ba74e2b1d9..db2d8b84e137be8182b32cf6c0df1a78498eeec6 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -1956,6 +1956,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev) case 0x682C: si_pi->cac_weights = cac_weights_cape_verde_pro; si_pi->dte_data = dte_data_sun_xt; + update_dte_from_pl2 = true; break; case 0x6825: case 0x6827: @@ -6899,7 +6900,7 @@ int si_dpm_init(struct radeon_device *rdev) struct ni_power_info *ni_pi; struct si_power_info *si_pi; struct atom_clock_dividers dividers; - enum pci_bus_speed speed_cap; + enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN; struct pci_dev *root = rdev->pdev->bus->self; int ret; @@ -6911,7 +6912,8 @@ int si_dpm_init(struct radeon_device *rdev) eg_pi = &ni_pi->eg; pi = &eg_pi->rv7xx; - speed_cap = pcie_get_speed_cap(root); + if (!pci_is_root_bus(rdev->pdev->bus)) + speed_cap = pcie_get_speed_cap(root); if (speed_cap == PCI_SPEED_UNKNOWN) { si_pi->sys_pcie_mask = 0; } else { diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c index 1e4975f3374ce8ba24df5a3f25320b1709dfba47..b9eb7e2d3352175eae46e3f17de9702345fd5c21 100644 --- a/drivers/gpu/drm/radeon/sumo_dpm.c +++ b/drivers/gpu/drm/radeon/sumo_dpm.c @@ -1620,6 +1620,8 @@ void sumo_construct_vid_mapping_table(struct radeon_device *rdev, for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { if (table[i].ulSupportedSCLK != 0) { + if (table[i].usVoltageIndex >= SUMO_MAX_NUMBER_VOLTAGES) + continue; vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit = table[i].usVoltageID; vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit = diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index f0bc7cc0e913f020ec0a25dfc65a7c0f65503ab0..0386b454e221847b26b6c33977245a812983fbcd 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -300,6 +300,7 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu, dev_dbg(rcdu->dev, "connected entity %pOF is disabled, skipping\n", entity); + of_node_put(entity); return -ENODEV; } @@ -335,6 +336,7 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu, dev_warn(rcdu->dev, "no encoder found for endpoint %pOF, skipping\n", ep->local_node); + of_node_put(entity); return -ENODEV; } @@ -516,12 +518,22 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) dev->mode_config.min_width = 0; dev->mode_config.min_height = 0; - dev->mode_config.max_width = 4095; - dev->mode_config.max_height = 2047; dev->mode_config.normalize_zpos = true; dev->mode_config.funcs = &rcar_du_mode_config_funcs; dev->mode_config.helper_private = &rcar_du_mode_config_helper; + if (rcdu->info->gen < 3) { + dev->mode_config.max_width = 4095; + dev->mode_config.max_height = 2047; + } else { + /* + * The Gen3 DU uses the VSP1 for memory access, and is limited + * to frame sizes of 8190x8190. + */ + dev->mode_config.max_width = 8190; + dev->mode_config.max_height = 8190; + } + rcdu->num_crtcs = hweight8(rcdu->info->channels_mask); ret = rcar_du_properties_init(rcdu); diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c index 080f0535219502306774ea799c10cc30793006ca..6a4da3a0ff1c3f8c4fb765d6d03fcf3c0428200c 100644 --- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c +++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c @@ -436,7 +436,7 @@ static int rockchip_dp_resume(struct device *dev) static const struct dev_pm_ops rockchip_dp_pm_ops = { #ifdef CONFIG_PM_SLEEP - .suspend = rockchip_dp_suspend, + .suspend_late = rockchip_dp_suspend, .resume_early = rockchip_dp_resume, #endif }; diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c index 3105965fc26034e921de4c86d39475180e70b03c..6c8b14fb1d2f3c11ddefff385bff3a070f99ba3b 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c @@ -113,7 +113,7 @@ static int cdp_dp_mailbox_write(struct cdn_dp_device *dp, u8 val) static int cdn_dp_mailbox_validate_receive(struct cdn_dp_device *dp, u8 module_id, u8 opcode, - u8 req_size) + u16 req_size) { u32 mbox_size, i; u8 header[4]; @@ -147,7 +147,7 @@ static int cdn_dp_mailbox_validate_receive(struct cdn_dp_device *dp, } static int cdn_dp_mailbox_read_receive(struct cdn_dp_device *dp, - u8 *buff, u8 buff_size) + u8 *buff, u16 buff_size) { u32 i; int ret; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index f814d37b1db21df942125d8bcdffab107813139c..00a06768edb2016423cd107456db44d4e2000bef 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -442,6 +442,14 @@ static int rockchip_drm_platform_remove(struct platform_device *pdev) return 0; } +static void rockchip_drm_platform_shutdown(struct platform_device *pdev) +{ + struct drm_device *drm = platform_get_drvdata(pdev); + + if (drm) + drm_atomic_helper_shutdown(drm); +} + static const struct of_device_id rockchip_drm_dt_ids[] = { { .compatible = "rockchip,display-subsystem", }, { /* sentinel */ }, @@ -451,6 +459,7 @@ MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids); static struct platform_driver rockchip_drm_platform_driver = { .probe = rockchip_drm_platform_probe, .remove = rockchip_drm_platform_remove, + .shutdown = rockchip_drm_platform_shutdown, .driver = { .name = "rockchip-drm", .of_match_table = rockchip_drm_dt_ids, diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c index 79d00d861a31f701451d3387a6cc6e8fc8e71234..01ff3c8588750ea466be307a1f820f4ed73f2fff 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c @@ -189,12 +189,14 @@ EXPORT_SYMBOL(rockchip_drm_psr_flush_all); int rockchip_drm_psr_register(struct drm_encoder *encoder, int (*psr_set)(struct drm_encoder *, bool enable)) { - struct rockchip_drm_private *drm_drv = encoder->dev->dev_private; + struct rockchip_drm_private *drm_drv; struct psr_drv *psr; if (!encoder || !psr_set) return -EINVAL; + drm_drv = encoder->dev->dev_private; + psr = kzalloc(sizeof(struct psr_drv), GFP_KERNEL); if (!psr) return -ENOMEM; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index 1359e5c773e4fe5bb9d31b465073897c3c741739..873624a11ce8805c702e3544eae227af957edb27 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -505,6 +505,18 @@ static void vop_core_clks_disable(struct vop *vop) clk_disable(vop->hclk); } +static void vop_win_disable(struct vop *vop, const struct vop_win_data *win) +{ + if (win->phy->scl && win->phy->scl->ext) { + VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, SCALE_NONE); + VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, SCALE_NONE); + VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, SCALE_NONE); + VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, SCALE_NONE); + } + + VOP_WIN_SET(vop, win, enable, 0); +} + static int vop_enable(struct drm_crtc *crtc) { struct vop *vop = to_vop(crtc); @@ -550,7 +562,7 @@ static int vop_enable(struct drm_crtc *crtc) struct vop_win *vop_win = &vop->win[i]; const struct vop_win_data *win = vop_win->data; - VOP_WIN_SET(vop, win, enable, 0); + vop_win_disable(vop, win); } spin_unlock(&vop->reg_lock); @@ -694,7 +706,7 @@ static void vop_plane_atomic_disable(struct drm_plane *plane, spin_lock(&vop->reg_lock); - VOP_WIN_SET(vop, win, enable, 0); + vop_win_disable(vop, win); spin_unlock(&vop->reg_lock); } @@ -868,7 +880,8 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc, struct vop *vop = to_vop(crtc); adjusted_mode->clock = - clk_round_rate(vop->dclk, mode->clock * 1000) / 1000; + DIV_ROUND_UP(clk_round_rate(vop->dclk, mode->clock * 1000), + 1000); return true; } @@ -1449,7 +1462,7 @@ static int vop_initial(struct vop *vop) int channel = i * 2 + 1; VOP_WIN_SET(vop, win, channel, (channel + 1) << 4 | channel); - VOP_WIN_SET(vop, win, enable, 0); + vop_win_disable(vop, win); VOP_WIN_SET(vop, win, gate, 1); } diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index 4fc211e19d6e5aac3dd0cfddc3b72f4a6f858730..e7485cb688df9abca75799393004816779854b04 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -309,6 +309,7 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity) */ if (spsc_queue_peek(&entity->job_queue)) { struct drm_sched_job *job; + struct dma_fence *f; int r; /* Park the kernel for a moment to make sure it isn't processing @@ -325,6 +326,10 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity) while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) { struct drm_sched_fence *s_fence = job->s_fence; + /* Wait for all dependencies to avoid data corruptions */ + while ((f = job->sched->ops->dependency(job, entity))) + dma_fence_wait(f, false); + drm_sched_fence_scheduled(s_fence); dma_fence_set_error(&s_fence->finished, -ESRCH); diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c index c32de6cbf06164cb9a95542dfcdc2173976249e6..0bd3230b2caacba13cd9264bbc948bba2d0ef779 100644 --- a/drivers/gpu/drm/sti/sti_gdp.c +++ b/drivers/gpu/drm/sti/sti_gdp.c @@ -629,6 +629,9 @@ static int sti_gdp_atomic_check(struct drm_plane *drm_plane, mixer = to_sti_mixer(crtc); crtc_state = drm_atomic_get_crtc_state(state->state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + mode = &crtc_state->mode; dst_x = state->crtc_x; dst_y = state->crtc_y; diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c index 808d9fb627e97ab07562c17183ade0508abfe0b7..477d0a27b9a5d7a0a4a4ee919e4515845015d6f0 100644 --- a/drivers/gpu/drm/stm/ltdc.c +++ b/drivers/gpu/drm/stm/ltdc.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -825,6 +826,7 @@ static const struct drm_plane_funcs ltdc_plane_funcs = { }; static const struct drm_plane_helper_funcs ltdc_plane_helper_funcs = { + .prepare_fb = drm_gem_fb_prepare_fb, .atomic_check = ltdc_plane_atomic_check, .atomic_update = ltdc_plane_atomic_update, .atomic_disable = ltdc_plane_atomic_disable, diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c index d7950b52a1fd996cdbbaba632dce92db3cfe3842..e30b1f5b9d91afeafe289b695d9f1210f645e3e6 100644 --- a/drivers/gpu/drm/sun4i/sun4i_backend.c +++ b/drivers/gpu/drm/sun4i/sun4i_backend.c @@ -717,17 +717,18 @@ static struct sun4i_frontend *sun4i_backend_find_frontend(struct sun4i_drv *drv, remote = of_graph_get_remote_port_parent(ep); if (!remote) continue; + of_node_put(remote); /* does this node match any registered engines? */ list_for_each_entry(frontend, &drv->frontend_list, list) { if (remote == frontend->node) { - of_node_put(remote); of_node_put(port); + of_node_put(ep); return frontend; } } } - + of_node_put(port); return ERR_PTR(-EINVAL); } diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index 8b0cd08034e0c74bb80f57fca43710228077b29c..57f61ec4bc6becade7f9c98ec81f2e8aff405c48 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -92,6 +92,8 @@ static int sun4i_drv_bind(struct device *dev) ret = -ENOMEM; goto free_drm; } + + dev_set_drvdata(dev, drm); drm->dev_private = drv; INIT_LIST_HEAD(&drv->frontend_list); INIT_LIST_HEAD(&drv->engine_list); @@ -156,7 +158,10 @@ static void sun4i_drv_unbind(struct device *dev) drm_kms_helper_poll_fini(drm); sun4i_framebuffer_free(drm); drm_mode_config_cleanup(drm); + + component_unbind_all(dev, NULL); of_reserved_mem_device_release(dev); + drm_dev_put(drm); } @@ -405,6 +410,8 @@ static int sun4i_drv_probe(struct platform_device *pdev) static int sun4i_drv_remove(struct platform_device *pdev) { + component_master_del(&pdev->dev, &sun4i_drv_master_ops); + return 0; } diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c index 061d2e0d9011ee88991b3f0fb1b4e2dd54925bee..8ad36f574df8cd93789205675ca8d59044b6237d 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c @@ -92,6 +92,8 @@ static void sun4i_hdmi_disable(struct drm_encoder *encoder) val = readl(hdmi->base + SUN4I_HDMI_VID_CTRL_REG); val &= ~SUN4I_HDMI_VID_CTRL_ENABLE; writel(val, hdmi->base + SUN4I_HDMI_VID_CTRL_REG); + + clk_disable_unprepare(hdmi->tmds_clk); } static void sun4i_hdmi_enable(struct drm_encoder *encoder) @@ -102,6 +104,8 @@ static void sun4i_hdmi_enable(struct drm_encoder *encoder) DRM_DEBUG_DRIVER("Enabling the HDMI Output\n"); + clk_prepare_enable(hdmi->tmds_clk); + sun4i_hdmi_setup_avi_infoframes(hdmi, mode); val |= SUN4I_HDMI_PKT_CTRL_TYPE(0, SUN4I_HDMI_PKT_AVI); val |= SUN4I_HDMI_PKT_CTRL_TYPE(1, SUN4I_HDMI_PKT_END); @@ -647,8 +651,6 @@ static void sun4i_hdmi_unbind(struct device *dev, struct device *master, struct sun4i_hdmi *hdmi = dev_get_drvdata(dev); cec_unregister_adapter(hdmi->cec_adap); - drm_connector_cleanup(&hdmi->connector); - drm_encoder_cleanup(&hdmi->encoder); i2c_del_adapter(hdmi->i2c); clk_disable_unprepare(hdmi->mod_clk); clk_disable_unprepare(hdmi->bus_clk); diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index 3fb084f802e298948d2115995dec660d1b1d22b0..fda1ae12069a7fae1aff579bbc90af283a992eed 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c @@ -423,7 +423,7 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon, WARN_ON(!tcon->quirks->has_channel_0); - tcon->dclk_min_div = 6; + tcon->dclk_min_div = 1; tcon->dclk_max_div = 127; sun4i_tcon0_mode_set_common(tcon, mode); @@ -672,6 +672,7 @@ static int sun4i_tcon_init_clocks(struct device *dev, return PTR_ERR(tcon->sclk0); } } + clk_prepare_enable(tcon->sclk0); if (tcon->quirks->has_channel_1) { tcon->sclk1 = devm_clk_get(dev, "tcon-ch1"); @@ -686,6 +687,7 @@ static int sun4i_tcon_init_clocks(struct device *dev, static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon) { + clk_disable_unprepare(tcon->sclk0); clk_disable_unprepare(tcon->clk); } diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c index e3b34a3455460fbab688148f6d19c2ed0607f4f4..97a0573cc51459aa81a9dff31534b35b7d581239 100644 --- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c +++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c @@ -357,7 +357,13 @@ static void sun6i_dsi_inst_init(struct sun6i_dsi *dsi, static u16 sun6i_dsi_get_video_start_delay(struct sun6i_dsi *dsi, struct drm_display_mode *mode) { - return mode->vtotal - (mode->vsync_end - mode->vdisplay) + 1; + u16 start = clamp(mode->vtotal - mode->vdisplay - 10, 8, 100); + u16 delay = mode->vtotal - (mode->vsync_end - mode->vdisplay) + start; + + if (delay > mode->vtotal) + delay = delay % mode->vtotal; + + return max_t(u16, delay, 1); } static void sun6i_dsi_setup_burst(struct sun6i_dsi *dsi, diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c index a564b5dfe082839896833db50ffbabbcfbe014ee..dc9b1398adb91f010dabdc847f5e0167881041f5 100644 --- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c +++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c @@ -177,7 +177,8 @@ static int sun8i_hdmi_phy_config_h3(struct dw_hdmi *hdmi, SUN8I_HDMI_PHY_ANA_CFG2_REG_BIGSW | SUN8I_HDMI_PHY_ANA_CFG2_REG_SLV(4); ana_cfg3_init |= SUN8I_HDMI_PHY_ANA_CFG3_REG_AMPCK(9) | - SUN8I_HDMI_PHY_ANA_CFG3_REG_AMP(13); + SUN8I_HDMI_PHY_ANA_CFG3_REG_AMP(13) | + SUN8I_HDMI_PHY_ANA_CFG3_REG_EMP(3); } regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG, @@ -501,22 +502,13 @@ int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node) goto err_put_clk_pll0; } } - - ret = sun8i_phy_clk_create(phy, dev, - phy->variant->has_second_pll); - if (ret) { - dev_err(dev, "Couldn't create the PHY clock\n"); - goto err_put_clk_pll1; - } - - clk_prepare_enable(phy->clk_phy); } phy->rst_phy = of_reset_control_get_shared(node, "phy"); if (IS_ERR(phy->rst_phy)) { dev_err(dev, "Could not get phy reset control\n"); ret = PTR_ERR(phy->rst_phy); - goto err_disable_clk_phy; + goto err_put_clk_pll1; } ret = reset_control_deassert(phy->rst_phy); @@ -537,18 +529,29 @@ int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node) goto err_disable_clk_bus; } + if (phy->variant->has_phy_clk) { + ret = sun8i_phy_clk_create(phy, dev, + phy->variant->has_second_pll); + if (ret) { + dev_err(dev, "Couldn't create the PHY clock\n"); + goto err_disable_clk_mod; + } + + clk_prepare_enable(phy->clk_phy); + } + hdmi->phy = phy; return 0; +err_disable_clk_mod: + clk_disable_unprepare(phy->clk_mod); err_disable_clk_bus: clk_disable_unprepare(phy->clk_bus); err_deassert_rst_phy: reset_control_assert(phy->rst_phy); err_put_rst_phy: reset_control_put(phy->rst_phy); -err_disable_clk_phy: - clk_disable_unprepare(phy->clk_phy); err_put_clk_pll1: clk_put(phy->clk_pll1); err_put_clk_pll0: diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c index d5240b777a8fdfc6da3aa5b9f57acab5597985ab..dfbcd1ad81a5442e05b9f81878d27351e2d9dff3 100644 --- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c +++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c @@ -168,6 +168,13 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master, goto err_assert_reset; } + /* + * At least on H6, some registers have some bits set by default + * which may cause issues. Clear them here. + */ + writel(0, regs + TCON_TOP_PORT_SEL_REG); + writel(0, regs + TCON_TOP_GATE_SRC_REG); + /* * TCON TOP has two muxes, which select parent clock for each TCON TV * channel clock. Parent could be either TCON TV or TVE clock. For now @@ -210,7 +217,7 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master, err_unregister_gates: for (i = 0; i < CLK_NUM; i++) - if (clk_data->hws[i]) + if (!IS_ERR_OR_NULL(clk_data->hws[i])) clk_hw_unregister_gate(clk_data->hws[i]); clk_disable_unprepare(tcon_top->bus); err_assert_reset: @@ -228,7 +235,8 @@ static void sun8i_tcon_top_unbind(struct device *dev, struct device *master, of_clk_del_provider(dev->of_node); for (i = 0; i < CLK_NUM; i++) - clk_hw_unregister_gate(clk_data->hws[i]); + if (clk_data->hws[i]) + clk_hw_unregister_gate(clk_data->hws[i]); clk_disable_unprepare(tcon_top->bus); reset_control_assert(tcon_top->rst); diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c index ee6ca8fa1c6554d89b5f0ff0c8cb35f8bb09a5ab..b842a50ba0138820a3ba750851a3069f5c056e9a 100644 --- a/drivers/gpu/drm/tegra/dsi.c +++ b/drivers/gpu/drm/tegra/dsi.c @@ -1452,9 +1452,11 @@ static int tegra_dsi_ganged_probe(struct tegra_dsi *dsi) np = of_parse_phandle(dsi->dev->of_node, "nvidia,ganged-mode", 0); if (np) { struct platform_device *gangster = of_find_device_by_node(np); + of_node_put(np); + if (!gangster) + return -EPROBE_DEFER; dsi->slave = platform_get_drvdata(gangster); - of_node_put(np); if (!dsi->slave) return -EPROBE_DEFER; diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index 4f80100ff5f34b7ca997fb5a31a8c3c6788e59cf..4cce11fd8836f93ec76cc43a0a535a9764617a09 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -204,7 +204,7 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo) { if (bo->pages) { dma_unmap_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents, - DMA_BIDIRECTIONAL); + DMA_FROM_DEVICE); drm_gem_put_pages(&bo->gem, bo->pages, true, true); sg_free_table(bo->sgt); kfree(bo->sgt); @@ -230,7 +230,7 @@ static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo) } err = dma_map_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents, - DMA_BIDIRECTIONAL); + DMA_FROM_DEVICE); if (err == 0) { err = -EFAULT; goto free_sgt; diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c index 8f4fcbb515fb4d16e27ed89f378993c214577efe..bb97cad1eb699007f6457d7cabac3ea78d576653 100644 --- a/drivers/gpu/drm/tegra/hub.c +++ b/drivers/gpu/drm/tegra/hub.c @@ -378,14 +378,16 @@ static int tegra_shared_plane_atomic_check(struct drm_plane *plane, static void tegra_shared_plane_atomic_disable(struct drm_plane *plane, struct drm_plane_state *old_state) { - struct tegra_dc *dc = to_tegra_dc(old_state->crtc); struct tegra_plane *p = to_tegra_plane(plane); + struct tegra_dc *dc; u32 value; /* rien ne va plus */ if (!old_state || !old_state->crtc) return; + dc = to_tegra_dc(old_state->crtc); + /* * XXX Legacy helpers seem to sometimes call ->atomic_disable() even * on planes that are already disabled. Make sure we fallback to the diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index d7fe9f15def1dbf426b4f23bb7fe46cfa64248af..89cb70da2bfe6456dbf3f9befdc6e92e9923b555 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -2922,6 +2922,11 @@ static int tegra_sor_parse_dt(struct tegra_sor *sor) * earlier */ sor->pad = TEGRA_IO_PAD_HDMI_DP0 + sor->index; + } else { + if (sor->soc->supports_edp) + sor->index = 0; + else + sor->index = 1; } return 0; diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index 0fb300d41a09c02508e70e89678695e09a8ff0f9..e1868776da2524be333c33f89cb4864ccdf11043 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -184,6 +184,12 @@ static void tilcdc_fini(struct drm_device *dev) { struct tilcdc_drm_private *priv = dev->dev_private; +#ifdef CONFIG_CPU_FREQ + if (priv->freq_transition.notifier_call) + cpufreq_unregister_notifier(&priv->freq_transition, + CPUFREQ_TRANSITION_NOTIFIER); +#endif + if (priv->crtc) tilcdc_crtc_shutdown(priv->crtc); @@ -198,12 +204,6 @@ static void tilcdc_fini(struct drm_device *dev) drm_mode_config_cleanup(dev); tilcdc_remove_external_device(dev); -#ifdef CONFIG_CPU_FREQ - if (priv->freq_transition.notifier_call) - cpufreq_unregister_notifier(&priv->freq_transition, - CPUFREQ_TRANSITION_NOTIFIER); -#endif - if (priv->clk) clk_put(priv->clk); @@ -274,17 +274,6 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev) goto init_failed; } -#ifdef CONFIG_CPU_FREQ - priv->freq_transition.notifier_call = cpufreq_transition; - ret = cpufreq_register_notifier(&priv->freq_transition, - CPUFREQ_TRANSITION_NOTIFIER); - if (ret) { - dev_err(dev, "failed to register cpufreq notifier\n"); - priv->freq_transition.notifier_call = NULL; - goto init_failed; - } -#endif - if (of_property_read_u32(node, "max-bandwidth", &priv->max_bandwidth)) priv->max_bandwidth = TILCDC_DEFAULT_MAX_BANDWIDTH; @@ -361,6 +350,17 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev) } modeset_init(ddev); +#ifdef CONFIG_CPU_FREQ + priv->freq_transition.notifier_call = cpufreq_transition; + ret = cpufreq_register_notifier(&priv->freq_transition, + CPUFREQ_TRANSITION_NOTIFIER); + if (ret) { + dev_err(dev, "failed to register cpufreq notifier\n"); + priv->freq_transition.notifier_call = NULL; + goto init_failed; + } +#endif + if (priv->is_componentized) { ret = component_bind_all(dev, ddev); if (ret < 0) diff --git a/drivers/gpu/drm/tinydrm/ili9225.c b/drivers/gpu/drm/tinydrm/ili9225.c index 455fefe012f5912e825b4296e8c96b0129b814ea..6044a01069ce2d8821d19c4b6d9a52c5848a683b 100644 --- a/drivers/gpu/drm/tinydrm/ili9225.c +++ b/drivers/gpu/drm/tinydrm/ili9225.c @@ -278,7 +278,7 @@ static void ili9225_pipe_disable(struct drm_simple_display_pipe *pipe) mipi->enabled = false; } -static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 cmd, u8 *par, +static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par, size_t num) { struct spi_device *spi = mipi->spi; @@ -288,11 +288,11 @@ static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 cmd, u8 *par, gpiod_set_value_cansleep(mipi->dc, 0); speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1); - ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, &cmd, 1); + ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, cmd, 1); if (ret || !num) return ret; - if (cmd == ILI9225_WRITE_DATA_TO_GRAM && !mipi->swap_bytes) + if (*cmd == ILI9225_WRITE_DATA_TO_GRAM && !mipi->swap_bytes) bpw = 16; gpiod_set_value_cansleep(mipi->dc, 1); diff --git a/drivers/gpu/drm/tinydrm/mipi-dbi.c b/drivers/gpu/drm/tinydrm/mipi-dbi.c index cb3441e51d5f03f4c3e1f6b82cb091ef64af7637..e772a8a9da80eceb73c17918b760907b3f9059da 100644 --- a/drivers/gpu/drm/tinydrm/mipi-dbi.c +++ b/drivers/gpu/drm/tinydrm/mipi-dbi.c @@ -144,16 +144,42 @@ EXPORT_SYMBOL(mipi_dbi_command_read); */ int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len) { + u8 *cmdbuf; int ret; + /* SPI requires dma-safe buffers */ + cmdbuf = kmemdup(&cmd, 1, GFP_KERNEL); + if (!cmdbuf) + return -ENOMEM; + mutex_lock(&mipi->cmdlock); - ret = mipi->command(mipi, cmd, data, len); + ret = mipi->command(mipi, cmdbuf, data, len); mutex_unlock(&mipi->cmdlock); + kfree(cmdbuf); + return ret; } EXPORT_SYMBOL(mipi_dbi_command_buf); +/* This should only be used by mipi_dbi_command() */ +int mipi_dbi_command_stackbuf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len) +{ + u8 *buf; + int ret; + + buf = kmemdup(data, len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = mipi_dbi_command_buf(mipi, cmd, buf, len); + + kfree(buf); + + return ret; +} +EXPORT_SYMBOL(mipi_dbi_command_stackbuf); + /** * mipi_dbi_buf_copy - Copy a framebuffer, transforming it if necessary * @dst: The destination buffer @@ -741,18 +767,18 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *mipi, int dc, return 0; } -static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 cmd, +static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 *cmd, u8 *parameters, size_t num) { - unsigned int bpw = (cmd == MIPI_DCS_WRITE_MEMORY_START) ? 16 : 8; + unsigned int bpw = (*cmd == MIPI_DCS_WRITE_MEMORY_START) ? 16 : 8; int ret; - if (mipi_dbi_command_is_read(mipi, cmd)) + if (mipi_dbi_command_is_read(mipi, *cmd)) return -ENOTSUPP; - MIPI_DBI_DEBUG_COMMAND(cmd, parameters, num); + MIPI_DBI_DEBUG_COMMAND(*cmd, parameters, num); - ret = mipi_dbi_spi1_transfer(mipi, 0, &cmd, 1, 8); + ret = mipi_dbi_spi1_transfer(mipi, 0, cmd, 1, 8); if (ret || !num) return ret; @@ -761,7 +787,7 @@ static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 cmd, /* MIPI DBI Type C Option 3 */ -static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd, +static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 *cmd, u8 *data, size_t len) { struct spi_device *spi = mipi->spi; @@ -770,7 +796,7 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd, struct spi_transfer tr[2] = { { .speed_hz = speed_hz, - .tx_buf = &cmd, + .tx_buf = cmd, .len = 1, }, { .speed_hz = speed_hz, @@ -788,8 +814,8 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd, * Support non-standard 24-bit and 32-bit Nokia read commands which * start with a dummy clock, so we need to read an extra byte. */ - if (cmd == MIPI_DCS_GET_DISPLAY_ID || - cmd == MIPI_DCS_GET_DISPLAY_STATUS) { + if (*cmd == MIPI_DCS_GET_DISPLAY_ID || + *cmd == MIPI_DCS_GET_DISPLAY_STATUS) { if (!(len == 3 || len == 4)) return -EINVAL; @@ -819,7 +845,7 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd, data[i] = (buf[i] << 1) | !!(buf[i + 1] & BIT(7)); } - MIPI_DBI_DEBUG_COMMAND(cmd, data, len); + MIPI_DBI_DEBUG_COMMAND(*cmd, data, len); err_free: kfree(buf); @@ -827,7 +853,7 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd, return ret; } -static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 cmd, +static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par, size_t num) { struct spi_device *spi = mipi->spi; @@ -835,18 +861,18 @@ static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 cmd, u32 speed_hz; int ret; - if (mipi_dbi_command_is_read(mipi, cmd)) + if (mipi_dbi_command_is_read(mipi, *cmd)) return mipi_dbi_typec3_command_read(mipi, cmd, par, num); - MIPI_DBI_DEBUG_COMMAND(cmd, par, num); + MIPI_DBI_DEBUG_COMMAND(*cmd, par, num); gpiod_set_value_cansleep(mipi->dc, 0); speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1); - ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, &cmd, 1); + ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, cmd, 1); if (ret || !num) return ret; - if (cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes) + if (*cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes) bpw = 16; gpiod_set_value_cansleep(mipi->dc, 1); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 7c484729f9b21ad9f7bcf99b810261e8b5d8f68b..268f5a3b312245e9a450cf49e7a515daa0076a8c 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1445,7 +1445,6 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj) container_of(kobj, struct ttm_bo_global, kobj); __free_page(glob->dummy_read_page); - kfree(glob); } void ttm_bo_global_release(struct drm_global_reference *ref) diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 046a6dda690a268df98159e5a8014b6826a739c9..40904e84f883acc24cb89da22714028fb46da0a3 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -492,8 +492,10 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, if (!fbo) return -ENOMEM; - ttm_bo_get(bo); fbo->base = *bo; + fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT; + + ttm_bo_get(bo); fbo->bo = bo; /** diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 6fe91c1b692d6fd547fcfff08b1633c229e77b0a..2119ae715962013e1629c0ee53d361c7da637fb6 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -61,9 +61,10 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, /* * If possible, avoid waiting for GPU with mmap_sem - * held. + * held. We only do this if the fault allows retry and this + * is the first attempt. */ - if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { + if (fault_flag_allow_retry_first(vmf->flags)) { ret = VM_FAULT_RETRY; if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) goto out_unlock; @@ -136,7 +137,12 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) if (err != -EBUSY) return VM_FAULT_NOPAGE; - if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { + /* + * If the fault allows retry and this is the first + * fault attempt, we try to release the mmap_sem + * before waiting + */ + if (fault_flag_allow_retry_first(vmf->flags)) { if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { ttm_bo_get(bo); up_read(&vmf->vma->vm_mm->mmap_sem); @@ -273,15 +279,13 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) else ret = vmf_insert_pfn(&cvma, address, pfn); - /* - * Somebody beat us to this PTE or prefaulting to - * an already populated PTE, or prefaulting error. - */ - - if (unlikely((ret == VM_FAULT_NOPAGE && i > 0))) - break; - else if (unlikely(ret & VM_FAULT_ERROR)) - goto out_io_unlock; + /* Never error on prefaulted PTEs */ + if (unlikely((ret & VM_FAULT_ERROR))) { + if (i == 0) + goto out_io_unlock; + else + break; + } address += PAGE_SIZE; if (unlikely(++page_offset >= page_last)) diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c index 450387c92b63510df8b9e8ff340b511472b69462..df73d5ff84a868c93b0a7a9e3193e7cbb726e54f 100644 --- a/drivers/gpu/drm/ttm/ttm_memory.c +++ b/drivers/gpu/drm/ttm/ttm_memory.c @@ -216,14 +216,6 @@ static ssize_t ttm_mem_global_store(struct kobject *kobj, return size; } -static void ttm_mem_global_kobj_release(struct kobject *kobj) -{ - struct ttm_mem_global *glob = - container_of(kobj, struct ttm_mem_global, kobj); - - kfree(glob); -} - static struct attribute *ttm_mem_global_attrs[] = { &ttm_mem_global_lower_mem_limit, NULL @@ -235,7 +227,6 @@ static const struct sysfs_ops ttm_mem_global_ops = { }; static struct kobj_type ttm_mem_glob_kobj_type = { - .release = &ttm_mem_global_kobj_release, .sysfs_ops = &ttm_mem_global_ops, .default_attrs = ttm_mem_global_attrs, }; diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index f841accc2c0064a3edd865423a10818480477f39..627f8dc91d0ed23e0958dfc39d106c967dcd376a 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -730,9 +730,10 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, } #ifdef CONFIG_TRANSPARENT_HUGEPAGE - if (!(flags & TTM_PAGE_FLAG_DMA32)) { - for (j = 0; j < HPAGE_PMD_NR; ++j) - if (p++ != pages[i + j]) + if (!(flags & TTM_PAGE_FLAG_DMA32) && + (npages - i) >= HPAGE_PMD_NR) { + for (j = 1; j < HPAGE_PMD_NR; ++j) + if (++p != pages[i + j]) break; if (j == HPAGE_PMD_NR) @@ -759,15 +760,15 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, unsigned max_size, n2free; spin_lock_irqsave(&huge->lock, irq_flags); - while (i < npages) { + while ((npages - i) >= HPAGE_PMD_NR) { struct page *p = pages[i]; unsigned j; if (!p) break; - for (j = 0; j < HPAGE_PMD_NR; ++j) - if (p++ != pages[i + j]) + for (j = 1; j < HPAGE_PMD_NR; ++j) + if (++p != pages[i + j]) break; if (j != HPAGE_PMD_NR) diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index e3a0691582ffdf0d85bc7c84ab424efc310e01be..68cfa25674e509960e51f9c6c27a9d40cea1325c 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -241,7 +241,6 @@ int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, ttm_tt_init_fields(ttm, bo, page_flags); if (ttm_tt_alloc_page_directory(ttm)) { - ttm_tt_destroy(ttm); pr_err("Failed allocating page table\n"); return -ENOMEM; } @@ -265,7 +264,6 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, INIT_LIST_HEAD(&ttm_dma->pages_list); if (ttm_dma_tt_alloc_page_directory(ttm_dma)) { - ttm_tt_destroy(ttm); pr_err("Failed allocating page table\n"); return -ENOMEM; } @@ -287,7 +285,6 @@ int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, else ret = ttm_dma_tt_alloc_page_directory(ttm_dma); if (ret) { - ttm_tt_destroy(ttm); pr_err("Failed allocating page table\n"); return -ENOMEM; } diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index 9ef515df724b14ae38b2061a26ce2fa2663b8678..f28703db8dbd65a4f68b2f7d960a3157858b72e5 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c @@ -47,10 +47,17 @@ static const struct file_operations udl_driver_fops = { .llseek = noop_llseek, }; +static void udl_driver_release(struct drm_device *dev) +{ + udl_fini(dev); + udl_modeset_cleanup(dev); + drm_dev_fini(dev); + kfree(dev); +} + static struct drm_driver driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, - .load = udl_driver_load, - .unload = udl_driver_unload, + .release = udl_driver_release, /* gem hooks */ .gem_free_object_unlocked = udl_gem_free_object, @@ -73,28 +80,56 @@ static struct drm_driver driver = { .patchlevel = DRIVER_PATCHLEVEL, }; +static struct udl_device *udl_driver_create(struct usb_interface *interface) +{ + struct usb_device *udev = interface_to_usbdev(interface); + struct udl_device *udl; + int r; + + udl = kzalloc(sizeof(*udl), GFP_KERNEL); + if (!udl) + return ERR_PTR(-ENOMEM); + + r = drm_dev_init(&udl->drm, &driver, &interface->dev); + if (r) { + kfree(udl); + return ERR_PTR(r); + } + + udl->udev = udev; + udl->drm.dev_private = udl; + + r = udl_init(udl); + if (r) { + drm_dev_fini(&udl->drm); + kfree(udl); + return ERR_PTR(r); + } + + usb_set_intfdata(interface, udl); + return udl; +} + static int udl_usb_probe(struct usb_interface *interface, const struct usb_device_id *id) { - struct usb_device *udev = interface_to_usbdev(interface); - struct drm_device *dev; int r; + struct udl_device *udl; - dev = drm_dev_alloc(&driver, &interface->dev); - if (IS_ERR(dev)) - return PTR_ERR(dev); + udl = udl_driver_create(interface); + if (IS_ERR(udl)) + return PTR_ERR(udl); - r = drm_dev_register(dev, (unsigned long)udev); + r = drm_dev_register(&udl->drm, 0); if (r) goto err_free; - usb_set_intfdata(interface, dev); - DRM_INFO("Initialized udl on minor %d\n", dev->primary->index); + DRM_INFO("Initialized udl on minor %d\n", udl->drm.primary->index); return 0; err_free: - drm_dev_unref(dev); + drm_dev_put(&udl->drm); return r; } diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h index e9e9b1ff678ee0a81d0d4b100b816b19122c7f0c..35c1f33fbc1a0b455c9d2dcf3d23d638923bc863 100644 --- a/drivers/gpu/drm/udl/udl_drv.h +++ b/drivers/gpu/drm/udl/udl_drv.h @@ -50,8 +50,8 @@ struct urb_list { struct udl_fbdev; struct udl_device { + struct drm_device drm; struct device *dev; - struct drm_device *ddev; struct usb_device *udev; struct drm_crtc *crtc; @@ -71,6 +71,8 @@ struct udl_device { atomic_t cpu_kcycles_used; /* transpired during pixel processing */ }; +#define to_udl(x) container_of(x, struct udl_device, drm) + struct udl_gem_object { struct drm_gem_object base; struct page **pages; @@ -102,8 +104,8 @@ struct urb *udl_get_urb(struct drm_device *dev); int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len); void udl_urb_completion(struct urb *urb); -int udl_driver_load(struct drm_device *dev, unsigned long flags); -void udl_driver_unload(struct drm_device *dev); +int udl_init(struct udl_device *udl); +void udl_fini(struct drm_device *dev); int udl_fbdev_init(struct drm_device *dev); void udl_fbdev_cleanup(struct drm_device *dev); diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index dd9ffded223b5fb09c025d518d5090b27716b560..4ab101bf1df010b58c377079e892fc8142c7064d 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c @@ -82,7 +82,7 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, int width, int height) { struct drm_device *dev = fb->base.dev; - struct udl_device *udl = dev->dev_private; + struct udl_device *udl = to_udl(dev); int i, ret; char *cmd; cycles_t start_cycles, end_cycles; @@ -210,10 +210,10 @@ static int udl_fb_open(struct fb_info *info, int user) { struct udl_fbdev *ufbdev = info->par; struct drm_device *dev = ufbdev->ufb.base.dev; - struct udl_device *udl = dev->dev_private; + struct udl_device *udl = to_udl(dev); /* If the USB device is gone, we don't accept new opens */ - if (drm_dev_is_unplugged(udl->ddev)) + if (drm_dev_is_unplugged(&udl->drm)) return -ENODEV; ufbdev->fb_count++; @@ -441,7 +441,7 @@ static void udl_fbdev_destroy(struct drm_device *dev, int udl_fbdev_init(struct drm_device *dev) { - struct udl_device *udl = dev->dev_private; + struct udl_device *udl = to_udl(dev); int bpp_sel = fb_bpp; struct udl_fbdev *ufbdev; int ret; @@ -480,7 +480,7 @@ int udl_fbdev_init(struct drm_device *dev) void udl_fbdev_cleanup(struct drm_device *dev) { - struct udl_device *udl = dev->dev_private; + struct udl_device *udl = to_udl(dev); if (!udl->fbdev) return; @@ -491,7 +491,7 @@ void udl_fbdev_cleanup(struct drm_device *dev) void udl_fbdev_unplug(struct drm_device *dev) { - struct udl_device *udl = dev->dev_private; + struct udl_device *udl = to_udl(dev); struct udl_fbdev *ufbdev; if (!udl->fbdev) return; diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c index d5a23295dd80c1a9c1f2cc202c4c93048fc163ef..3b3e17652bb20f341e68fb33302c8635a8db18c9 100644 --- a/drivers/gpu/drm/udl/udl_gem.c +++ b/drivers/gpu/drm/udl/udl_gem.c @@ -203,7 +203,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev, { struct udl_gem_object *gobj; struct drm_gem_object *obj; - struct udl_device *udl = dev->dev_private; + struct udl_device *udl = to_udl(dev); int ret = 0; mutex_lock(&udl->gem_lock); @@ -224,7 +224,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev, *offset = drm_vma_node_offset_addr(&gobj->base.vma_node); out: - drm_gem_object_put(&gobj->base); + drm_gem_object_put_unlocked(&gobj->base); unlock: mutex_unlock(&udl->gem_lock); return ret; diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c index f455f095a14685d234569eaed56121d6269393b1..8d22b6cd524123f60d2220bc32441cbf5ff8c285 100644 --- a/drivers/gpu/drm/udl/udl_main.c +++ b/drivers/gpu/drm/udl/udl_main.c @@ -29,7 +29,7 @@ static int udl_parse_vendor_descriptor(struct drm_device *dev, struct usb_device *usbdev) { - struct udl_device *udl = dev->dev_private; + struct udl_device *udl = to_udl(dev); char *desc; char *buf; char *desc_end; @@ -165,7 +165,7 @@ void udl_urb_completion(struct urb *urb) static void udl_free_urb_list(struct drm_device *dev) { - struct udl_device *udl = dev->dev_private; + struct udl_device *udl = to_udl(dev); int count = udl->urbs.count; struct list_head *node; struct urb_node *unode; @@ -198,7 +198,7 @@ static void udl_free_urb_list(struct drm_device *dev) static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size) { - struct udl_device *udl = dev->dev_private; + struct udl_device *udl = to_udl(dev); struct urb *urb; struct urb_node *unode; char *buf; @@ -262,7 +262,7 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size) struct urb *udl_get_urb(struct drm_device *dev) { - struct udl_device *udl = dev->dev_private; + struct udl_device *udl = to_udl(dev); int ret = 0; struct list_head *entry; struct urb_node *unode; @@ -295,7 +295,7 @@ struct urb *udl_get_urb(struct drm_device *dev) int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len) { - struct udl_device *udl = dev->dev_private; + struct udl_device *udl = to_udl(dev); int ret; BUG_ON(len > udl->urbs.size); @@ -310,20 +310,12 @@ int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len) return ret; } -int udl_driver_load(struct drm_device *dev, unsigned long flags) +int udl_init(struct udl_device *udl) { - struct usb_device *udev = (void*)flags; - struct udl_device *udl; + struct drm_device *dev = &udl->drm; int ret = -ENOMEM; DRM_DEBUG("\n"); - udl = kzalloc(sizeof(struct udl_device), GFP_KERNEL); - if (!udl) - return -ENOMEM; - - udl->udev = udev; - udl->ddev = dev; - dev->dev_private = udl; mutex_init(&udl->gem_lock); @@ -350,19 +342,13 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags) if (ret) goto err; - ret = drm_vblank_init(dev, 1); - if (ret) - goto err_fb; - drm_kms_helper_poll_init(dev); return 0; -err_fb: - udl_fbdev_cleanup(dev); + err: if (udl->urbs.count) udl_free_urb_list(dev); - kfree(udl); DRM_ERROR("%d\n", ret); return ret; } @@ -373,9 +359,9 @@ int udl_drop_usb(struct drm_device *dev) return 0; } -void udl_driver_unload(struct drm_device *dev) +void udl_fini(struct drm_device *dev) { - struct udl_device *udl = dev->dev_private; + struct udl_device *udl = to_udl(dev); drm_kms_helper_poll_fini(dev); @@ -383,6 +369,4 @@ void udl_driver_unload(struct drm_device *dev) udl_free_urb_list(dev); udl_fbdev_cleanup(dev); - udl_modeset_cleanup(dev); - kfree(udl); } diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c index 54d96518a131672ae02223be468fc6e6ca71fa96..a08766d39eab535d1c62d091cbed11ae781b4074 100644 --- a/drivers/gpu/drm/v3d/v3d_bo.c +++ b/drivers/gpu/drm/v3d/v3d_bo.c @@ -293,6 +293,7 @@ v3d_prime_import_sg_table(struct drm_device *dev, bo->resv = attach->dmabuf->resv; bo->sgt = sgt; + obj->import_attach = attach; v3d_bo_get_pages(bo); v3d_mmu_insert_ptes(bo); diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c index 4db62c54574828315f56215bf59ca5986cb7995e..26470c77eb6e5217910a075ee43409ecf413740e 100644 --- a/drivers/gpu/drm/v3d/v3d_debugfs.c +++ b/drivers/gpu/drm/v3d/v3d_debugfs.c @@ -71,10 +71,13 @@ static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused) V3D_READ(v3d_hub_reg_defs[i].reg)); } - for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) { - seq_printf(m, "%s (0x%04x): 0x%08x\n", - v3d_gca_reg_defs[i].name, v3d_gca_reg_defs[i].reg, - V3D_GCA_READ(v3d_gca_reg_defs[i].reg)); + if (v3d->ver < 41) { + for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) { + seq_printf(m, "%s (0x%04x): 0x%08x\n", + v3d_gca_reg_defs[i].name, + v3d_gca_reg_defs[i].reg, + V3D_GCA_READ(v3d_gca_reg_defs[i].reg)); + } } for (core = 0; core < v3d->cores; core++) { diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index 2a85fa68ffea51042b4c08dd8dc3497153590197..2a4c6187e675f713204efee885f0d97c6c568546 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -305,14 +305,18 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) if (ret) goto dev_destroy; - v3d_irq_init(v3d); + ret = v3d_irq_init(v3d); + if (ret) + goto gem_destroy; ret = drm_dev_register(drm, 0); if (ret) - goto gem_destroy; + goto irq_disable; return 0; +irq_disable: + v3d_irq_disable(v3d); gem_destroy: v3d_gem_destroy(drm); dev_destroy: diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index e6fed696ad869e2d700a94c2e6f4a10d36bf7529..0ad73f4b7509a08bff4c558f602c5db7a604a038 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -284,7 +284,7 @@ void v3d_invalidate_caches(struct v3d_dev *v3d); void v3d_flush_caches(struct v3d_dev *v3d); /* v3d_irq.c */ -void v3d_irq_init(struct v3d_dev *v3d); +int v3d_irq_init(struct v3d_dev *v3d); void v3d_irq_enable(struct v3d_dev *v3d); void v3d_irq_disable(struct v3d_dev *v3d); void v3d_irq_reset(struct v3d_dev *v3d); diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c index e07514eb11b511ddaacd9ddd0cf0c3e3fbbbb1a7..22be0f2dff99c79010f9cd4f05ef669a5a20c4af 100644 --- a/drivers/gpu/drm/v3d/v3d_irq.c +++ b/drivers/gpu/drm/v3d/v3d_irq.c @@ -137,7 +137,7 @@ v3d_hub_irq(int irq, void *arg) return status; } -void +int v3d_irq_init(struct v3d_dev *v3d) { int ret, core; @@ -154,13 +154,22 @@ v3d_irq_init(struct v3d_dev *v3d) ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0), v3d_hub_irq, IRQF_SHARED, "v3d_hub", v3d); + if (ret) + goto fail; + ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 1), v3d_irq, IRQF_SHARED, "v3d_core0", v3d); if (ret) - dev_err(v3d->dev, "IRQ setup failed: %d\n", ret); + goto fail; v3d_irq_enable(v3d); + return 0; + +fail: + if (ret != -EPROBE_DEFER) + dev_err(v3d->dev, "IRQ setup failed: %d\n", ret); + return ret; } void diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 0e6a121858d13ec335ae0a1f19ad839123f9cbe6..5615ceb15708f331911dae4cf81750d2c076e5cd 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -998,7 +998,7 @@ static void vc4_crtc_reset(struct drm_crtc *crtc) { if (crtc->state) - __drm_atomic_helper_crtc_destroy_state(crtc->state); + vc4_crtc_destroy_state(crtc, crtc->state); crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL); if (crtc->state) diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index fd5522fd179e56399c63ce819e0f2a3580bc9ec1..b187a46cc4bc6d551a38d41754161bd543eba65f 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -1086,6 +1086,8 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *hdmi) * This VC/MMU should probably be exposed to avoid this kind of hacks. */ addr = of_get_address(dev->of_node, 1, NULL, NULL); + if (!addr) + return -EINVAL; hdmi->audio.dma_data.addr = be32_to_cpup(addr) + VC4_HD_MAI_DATA; hdmi->audio.dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; hdmi->audio.dma_data.maxburst = 2; diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index ca5aa7fba7694e35e0fe4d543a9b01aa0b897424..f4d8a730e821b6af218d4eadc87802a979d3c22e 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -216,6 +216,12 @@ static int vc4_atomic_commit(struct drm_device *dev, return 0; } + /* We know for sure we don't want an async update here. Set + * state->legacy_cursor_update to false to prevent + * drm_atomic_helper_setup_commit() from auto-completing + * commit->flip_done. + */ + state->legacy_cursor_update = false; ret = drm_atomic_helper_setup_commit(state, nonblock); if (ret) return ret; diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index a3275fa66b7b9754c32580857e5b23fba0c58ec5..39e608271263dbde1454757018e5cfb655e1cd66 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -315,13 +315,16 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) vc4_get_scaling_mode(vc4_state->src_h[1], vc4_state->crtc_h); - /* YUV conversion requires that horizontal scaling be enabled, - * even on a plane that's otherwise 1:1. Looks like only PPF - * works in that case, so let's pick that one. + /* YUV conversion requires that horizontal scaling be enabled + * on the UV plane even if vc4_get_scaling_mode() returned + * VC4_SCALING_NONE (which can happen when the down-scaling + * ratio is 0.5). Let's force it to VC4_SCALING_PPF in this + * case. */ - if (vc4_state->is_unity) - vc4_state->x_scaling[0] = VC4_SCALING_PPF; + if (vc4_state->x_scaling[1] == VC4_SCALING_NONE) + vc4_state->x_scaling[1] = VC4_SCALING_PPF; } else { + vc4_state->is_yuv = false; vc4_state->x_scaling[1] = VC4_SCALING_NONE; vc4_state->y_scaling[1] = VC4_SCALING_NONE; } @@ -815,6 +818,7 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane, drm_atomic_set_fb_for_plane(plane->state, state->fb); } + swap(plane->state->fb, state->fb); /* Set the cursor's position on the screen. This is the * expected change from the drm_mode_cursor_universal() * helper. diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index 0e5620f76ee09b43d3fbc2b03ad20e3797bd1d2a..02f5188379784ad32e0a348bccbc074e9ab8d29e 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -189,15 +189,12 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev, return ERR_CAST(obj); ret = drm_gem_handle_create(file, &obj->base, handle); - drm_gem_object_put_unlocked(&obj->base); - if (ret) - goto err; + if (ret) { + drm_gem_object_put_unlocked(&obj->base); + return ERR_PTR(ret); + } return &obj->base; - -err: - __vgem_gem_destroy(obj); - return ERR_PTR(ret); } static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev, @@ -218,7 +215,9 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev, args->size = gem_object->size; args->pitch = pitch; - DRM_DEBUG_DRIVER("Created object of size %lld\n", size); + drm_gem_object_put_unlocked(gem_object); + + DRM_DEBUG_DRIVER("Created object of size %llu\n", args->size); return 0; } @@ -471,31 +470,31 @@ static int __init vgem_init(void) if (!vgem_device) return -ENOMEM; - ret = drm_dev_init(&vgem_device->drm, &vgem_driver, NULL); - if (ret) - goto out_free; - vgem_device->platform = platform_device_register_simple("vgem", -1, NULL, 0); if (IS_ERR(vgem_device->platform)) { ret = PTR_ERR(vgem_device->platform); - goto out_fini; + goto out_free; } dma_coerce_mask_and_coherent(&vgem_device->platform->dev, DMA_BIT_MASK(64)); + ret = drm_dev_init(&vgem_device->drm, &vgem_driver, + &vgem_device->platform->dev); + if (ret) + goto out_unregister; /* Final step: expose the device/driver to userspace */ ret = drm_dev_register(&vgem_device->drm, 0); if (ret) - goto out_unregister; + goto out_fini; return 0; -out_unregister: - platform_device_unregister(vgem_device->platform); out_fini: drm_dev_fini(&vgem_device->drm); +out_unregister: + platform_device_unregister(vgem_device->platform); out_free: kfree(vgem_device); return ret; diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c index 0f2768eacaee58b12dfd4f48c8bacdc286f8209f..692776abdcf19a2f5ec15cd32adb7b4f8d3f387e 100644 --- a/drivers/gpu/drm/virtio/virtgpu_gem.c +++ b/drivers/gpu/drm/virtio/virtgpu_gem.c @@ -71,9 +71,6 @@ int virtio_gpu_gem_create(struct drm_file *file, *obj_p = &obj->gem_base; - /* drop reference from allocate - handle holds it now */ - drm_gem_object_put_unlocked(&obj->gem_base); - *handle_p = handle; return 0; } @@ -107,6 +104,7 @@ int virtio_gpu_mode_dumb_create(struct drm_file *file_priv, /* attach the object to the resource */ obj = gem_to_virtio_gpu_obj(gobj); ret = virtio_gpu_object_attach(vgdev, obj, resid, NULL); + drm_gem_object_put_unlocked(&obj->gem_base); if (ret) goto fail; diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 7bdf6f0e58a5343a880470e61e69ee2292b7bd42..a539843a03ba3254c7e72b4b7df238e1092315b9 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -309,7 +309,6 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data, } return ret; } - drm_gem_object_put_unlocked(obj); rc->res_handle = res_id; /* similiar to a VM address */ rc->bo_handle = handle; @@ -318,6 +317,15 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data, virtio_gpu_unref_list(&validate_list); dma_fence_put(&fence->f); } + + /* + * The handle owns the reference now. But we must drop our + * remaining reference *after* we no longer need to dereference + * the obj. Otherwise userspace could guess the handle and + * race closing it from another thread. + */ + drm_gem_object_put_unlocked(obj); + return 0; fail_unref: if (vgdev->has_virgl_3d) { @@ -528,6 +536,9 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev, if (!ret) return -EBUSY; + /* is_valid check must proceed before copy of the cache entry. */ + smp_rmb(); + ptr = cache_ent->caps_cache; copy_exit: diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 020070d483d350a58695c7fa9f21f7c2be4db463..c8a581b1f4c40355a4bcb0eacc58d8043712bbbb 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -588,6 +588,8 @@ static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev, cache_ent->id == le32_to_cpu(cmd->capset_id)) { memcpy(cache_ent->caps_cache, resp->capset_data, cache_ent->size); + /* Copy must occur before is_valid is signalled. */ + smp_wmb(); atomic_set(&cache_ent->is_valid, 1); break; } diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c index 875fca662ac0c69c6dc56c0e0d4b1d48568140c4..0a271f762a0ab6489d291b73b00b5054e93691eb 100644 --- a/drivers/gpu/drm/vkms/vkms_crtc.c +++ b/drivers/gpu/drm/vkms/vkms_crtc.c @@ -1,10 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ #include "vkms_drv.h" #include @@ -61,6 +55,9 @@ bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe, *vblank_time = output->vblank_hrtimer.node.expires; + if (!in_vblank_irq) + *vblank_time -= output->period_ns; + return true; } diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index 6e728b8252596e02745831eee0a5351f698d8c11..d32e08f17427ca4f3517ae50029129718fd62ead 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -1,9 +1,4 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ #include #include @@ -44,7 +39,6 @@ static void vkms_release(struct drm_device *dev) struct vkms_device *vkms = container_of(dev, struct vkms_device, drm); platform_device_unregister(vkms->platform); - drm_atomic_helper_shutdown(&vkms->drm); drm_mode_config_cleanup(&vkms->drm); drm_dev_fini(&vkms->drm); } @@ -142,6 +136,7 @@ static void __exit vkms_exit(void) } drm_dev_unregister(&vkms_device->drm); + drm_atomic_helper_shutdown(&vkms_device->drm); drm_dev_put(&vkms_device->drm); kfree(vkms_device); diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h index 07be29f2dc44115f64c0e09875892c89223301d8..e018752d57bbf7e30dd55b978cf941d9ba458b0e 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.h +++ b/drivers/gpu/drm/vkms/vkms_drv.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + #ifndef _VKMS_DRV_H_ #define _VKMS_DRV_H_ diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c index c7e38368602b7a4dd0ee679062fd9ee56388ae64..ce394009a36c6739519183b57ff347b559a24ff6 100644 --- a/drivers/gpu/drm/vkms/vkms_gem.c +++ b/drivers/gpu/drm/vkms/vkms_gem.c @@ -1,10 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ #include @@ -116,11 +110,8 @@ struct drm_gem_object *vkms_gem_create(struct drm_device *dev, ret = drm_gem_handle_create(file, &obj->gem, handle); drm_gem_object_put_unlocked(&obj->gem); - if (ret) { - drm_gem_object_release(&obj->gem); - kfree(obj); + if (ret) return ERR_PTR(ret); - } return &obj->gem; } diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c index 901012cb1af1dfdbc29eb3bb458afe1bd0f1e68c..5697148e0b73532314a0204f1831a22b5324c998 100644 --- a/drivers/gpu/drm/vkms/vkms_output.c +++ b/drivers/gpu/drm/vkms/vkms_output.c @@ -1,10 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ #include "vkms_drv.h" #include diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c index 9f75b1e2c1c4bdcba7601e1ca93e8ed99fa1fbfa..ce043b721e0c620e97175835287f3a732bc63321 100644 --- a/drivers/gpu/drm/vkms/vkms_plane.c +++ b/drivers/gpu/drm/vkms/vkms_plane.c @@ -1,10 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ #include "vkms_drv.h" #include diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index bb6dbbe188358f040c1a1e17425874b9b581e367..05a800807c2685c6e92d1fe4cdbeb3ecd25ab381 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -627,13 +627,16 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) static int vmw_dma_masks(struct vmw_private *dev_priv) { struct drm_device *dev = dev_priv->dev; + int ret = 0; - if (intel_iommu_enabled && + ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)); + if (dev_priv->map_mode != vmw_dma_phys && (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { DRM_INFO("Restricting DMA addresses to 44 bits.\n"); - return dma_set_mask(dev->dev, DMA_BIT_MASK(44)); + return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44)); } - return 0; + + return ret; } #else static int vmw_dma_masks(struct vmw_private *dev_priv) @@ -786,6 +789,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) if (unlikely(ret != 0)) goto out_err0; + dma_set_max_seg_size(dev->dev, min_t(unsigned int, U32_MAX & PAGE_MASK, + SCATTERLIST_MAX_SEGMENT)); + if (dev_priv->capabilities & SVGA_CAP_GMR2) { DRM_INFO("Max GMR ids is %u\n", (unsigned)dev_priv->max_gmr_ids); @@ -1288,7 +1294,13 @@ static int vmw_master_set(struct drm_device *dev, } dev_priv->active_master = vmaster; - drm_sysfs_hotplug_event(dev); + + /* + * Inform a new master that the layout may have changed while + * it was gone. + */ + if (!from_open) + drm_sysfs_hotplug_event(dev); return 0; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 1abe21758b0d7523a0b66631f615f55e0d07796a..bca0b8980c0e7ef654fabccb0372f7314408ab61 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -855,15 +855,14 @@ extern int vmw_execbuf_fence_commands(struct drm_file *file_priv, struct vmw_private *dev_priv, struct vmw_fence_obj **p_fence, uint32_t *p_handle); -extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, +extern int vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, struct vmw_fpriv *vmw_fp, int ret, struct drm_vmw_fence_rep __user *user_fence_rep, struct vmw_fence_obj *fence, uint32_t fence_handle, - int32_t out_fence_fd, - struct sync_file *sync_file); + int32_t out_fence_fd); extern int vmw_validate_single_buffer(struct vmw_private *dev_priv, struct ttm_buffer_object *bo, bool interruptible, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index f0ab6b2313bbed89f5879fd27a6e2c268fc8c613..e65554f5a89d509b9c722adf398d2b4e25d44d35 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -2493,7 +2493,8 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv, cmd = container_of(header, typeof(*cmd), header); - if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) { + if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX || + cmd->body.type < SVGA3D_SHADERTYPE_MIN) { DRM_ERROR("Illegal shader type %u.\n", (unsigned) cmd->body.type); return -EINVAL; @@ -2732,6 +2733,10 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv, if (view_type == vmw_view_max) return -EINVAL; cmd = container_of(header, typeof(*cmd), header); + if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) { + DRM_ERROR("Invalid surface id.\n"); + return -EINVAL; + } ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, user_surface_converter, &cmd->sid, &srf_node); @@ -3843,7 +3848,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv, *p_fence = NULL; } - return 0; + return ret; } /** @@ -3868,20 +3873,19 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv, * object so we wait for it immediately, and then unreference the * user-space reference. */ -void +int vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, struct vmw_fpriv *vmw_fp, int ret, struct drm_vmw_fence_rep __user *user_fence_rep, struct vmw_fence_obj *fence, uint32_t fence_handle, - int32_t out_fence_fd, - struct sync_file *sync_file) + int32_t out_fence_fd) { struct drm_vmw_fence_rep fence_rep; if (user_fence_rep == NULL) - return; + return 0; memset(&fence_rep, 0, sizeof(fence_rep)); @@ -3909,20 +3913,14 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, * and unreference the handle. */ if (unlikely(ret != 0) && (fence_rep.error == 0)) { - if (sync_file) - fput(sync_file->file); - - if (fence_rep.fd != -1) { - put_unused_fd(fence_rep.fd); - fence_rep.fd = -1; - } - ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle, TTM_REF_USAGE); DRM_ERROR("Fence copy error. Syncing.\n"); (void) vmw_fence_obj_wait(fence, false, false, VMW_FENCE_WAIT_TIMEOUT); } + + return ret ? -EFAULT : 0; } /** @@ -4282,16 +4280,23 @@ int vmw_execbuf_process(struct drm_file *file_priv, (void) vmw_fence_obj_wait(fence, false, false, VMW_FENCE_WAIT_TIMEOUT); + } + } + + ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, + user_fence_rep, fence, handle, out_fence_fd); + + if (sync_file) { + if (ret) { + /* usercopy of fence failed, put the file object */ + fput(sync_file->file); + put_unused_fd(out_fence_fd); } else { /* Link the fence with the FD created earlier */ fd_install(out_fence_fd, sync_file->file); } } - vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, - user_fence_rep, fence, handle, - out_fence_fd, sync_file); - /* Don't unreference when handing fence out */ if (unlikely(out_fence != NULL)) { *out_fence = fence; @@ -4310,7 +4315,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, */ vmw_resource_list_unreference(sw_context, &resource_list); - return 0; + return ret; out_unlock_binding: mutex_unlock(&dev_priv->binding_mutex); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index b913a56f3426669f21582e271fac9add830bb91d..2a9112515f464c320628d64b8a9d92c645f730dd 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -564,11 +564,9 @@ static int vmw_fb_set_par(struct fb_info *info) 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }; - struct drm_display_mode *old_mode; struct drm_display_mode *mode; int ret; - old_mode = par->set_mode; mode = drm_mode_duplicate(vmw_priv->dev, &new_mode); if (!mode) { DRM_ERROR("Could not create new fb mode.\n"); @@ -579,11 +577,7 @@ static int vmw_fb_set_par(struct fb_info *info) mode->vdisplay = var->yres; vmw_guess_mode_timing(mode); - if (old_mode && drm_mode_equal(old_mode, mode)) { - drm_mode_destroy(vmw_priv->dev, mode); - mode = old_mode; - old_mode = NULL; - } else if (!vmw_kms_validate_mode_vram(vmw_priv, + if (!vmw_kms_validate_mode_vram(vmw_priv, mode->hdisplay * DIV_ROUND_UP(var->bits_per_pixel, 8), mode->vdisplay)) { @@ -620,8 +614,8 @@ static int vmw_fb_set_par(struct fb_info *info) schedule_delayed_work(&par->local_work, 0); out_unlock: - if (old_mode) - drm_mode_destroy(vmw_priv->dev, old_mode); + if (par->set_mode) + drm_mode_destroy(vmw_priv->dev, par->set_mode); par->set_mode = mode; mutex_unlock(&par->bo_mutex); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 3d546d4093341fd6ba329c06830f173666e2f707..e1b4f9612f5a33496e68a04f8e742f05e39dac68 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -1064,7 +1064,7 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv, } event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED; - event->event.base.length = sizeof(*event); + event->event.base.length = sizeof(event->event); event->event.user_data = user_data; ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base); @@ -1169,7 +1169,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, } vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence, - handle, -1, NULL); + handle, -1); vmw_fence_obj_unreference(&fence); return 0; out_no_create: diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index b93c558dd86e0121741284becc87434a27b39d2a..7da752ca1c34bd06497e1491d264921c33011c80 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -57,7 +57,7 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL); if (id < 0) - return id; + return (id != -ENOMEM ? 0 : id); spin_lock(&gman->lock); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 6a712a8d59e93b68fb68c98358af37b5e20d7680..befef87e0bb8f477d7b277809b9b50dd845cf529 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -179,7 +179,8 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, if (cmd->dma.guest.ptr.offset % PAGE_SIZE || box->x != 0 || box->y != 0 || box->z != 0 || box->srcx != 0 || box->srcy != 0 || box->srcz != 0 || - box->d != 1 || box_count != 1) { + box->d != 1 || box_count != 1 || + box->w > 64 || box->h > 64) { /* TODO handle none page aligned offsets */ /* TODO handle more dst & src != 0 */ /* TODO handle more then one copy */ @@ -2662,7 +2663,7 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, if (file_priv) vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, user_fence_rep, fence, - handle, -1, NULL); + handle, -1); if (out_fence) *out_fence = fence; else diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c index 8b9270f314091a82f27d659baf9305cc37aa1782..0af048d1a8156acfdb24af337f3b3479c4bb1004 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c @@ -136,6 +136,114 @@ static int vmw_close_channel(struct rpc_channel *channel) return 0; } +/** + * vmw_port_hb_out - Send the message payload either through the + * high-bandwidth port if available, or through the backdoor otherwise. + * @channel: The rpc channel. + * @msg: NULL-terminated message. + * @hb: Whether the high-bandwidth port is available. + * + * Return: The port status. + */ +static unsigned long vmw_port_hb_out(struct rpc_channel *channel, + const char *msg, bool hb) +{ + unsigned long si, di, eax, ebx, ecx, edx; + unsigned long msg_len = strlen(msg); + + if (hb) { + unsigned long bp = channel->cookie_high; + + si = (uintptr_t) msg; + di = channel->cookie_low; + + VMW_PORT_HB_OUT( + (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG, + msg_len, si, di, + VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16), + VMW_HYPERVISOR_MAGIC, bp, + eax, ebx, ecx, edx, si, di); + + return ebx; + } + + /* HB port not available. Send the message 4 bytes at a time. */ + ecx = MESSAGE_STATUS_SUCCESS << 16; + while (msg_len && (HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS)) { + unsigned int bytes = min_t(size_t, msg_len, 4); + unsigned long word = 0; + + memcpy(&word, msg, bytes); + msg_len -= bytes; + msg += bytes; + si = channel->cookie_high; + di = channel->cookie_low; + + VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_SENDPAYLOAD << 16), + word, si, di, + VMW_HYPERVISOR_PORT | (channel->channel_id << 16), + VMW_HYPERVISOR_MAGIC, + eax, ebx, ecx, edx, si, di); + } + + return ecx; +} + +/** + * vmw_port_hb_in - Receive the message payload either through the + * high-bandwidth port if available, or through the backdoor otherwise. + * @channel: The rpc channel. + * @reply: Pointer to buffer holding reply. + * @reply_len: Length of the reply. + * @hb: Whether the high-bandwidth port is available. + * + * Return: The port status. + */ +static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply, + unsigned long reply_len, bool hb) +{ + unsigned long si, di, eax, ebx, ecx, edx; + + if (hb) { + unsigned long bp = channel->cookie_low; + + si = channel->cookie_high; + di = (uintptr_t) reply; + + VMW_PORT_HB_IN( + (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG, + reply_len, si, di, + VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16), + VMW_HYPERVISOR_MAGIC, bp, + eax, ebx, ecx, edx, si, di); + + return ebx; + } + + /* HB port not available. Retrieve the message 4 bytes at a time. */ + ecx = MESSAGE_STATUS_SUCCESS << 16; + while (reply_len) { + unsigned int bytes = min_t(unsigned long, reply_len, 4); + + si = channel->cookie_high; + di = channel->cookie_low; + + VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_RECVPAYLOAD << 16), + MESSAGE_STATUS_SUCCESS, si, di, + VMW_HYPERVISOR_PORT | (channel->channel_id << 16), + VMW_HYPERVISOR_MAGIC, + eax, ebx, ecx, edx, si, di); + + if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) + break; + + memcpy(reply, &ebx, bytes); + reply_len -= bytes; + reply += bytes; + } + + return ecx; +} /** @@ -148,11 +256,10 @@ static int vmw_close_channel(struct rpc_channel *channel) */ static int vmw_send_msg(struct rpc_channel *channel, const char *msg) { - unsigned long eax, ebx, ecx, edx, si, di, bp; + unsigned long eax, ebx, ecx, edx, si, di; size_t msg_len = strlen(msg); int retries = 0; - while (retries < RETRIES) { retries++; @@ -166,23 +273,14 @@ static int vmw_send_msg(struct rpc_channel *channel, const char *msg) VMW_HYPERVISOR_MAGIC, eax, ebx, ecx, edx, si, di); - if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0 || - (HIGH_WORD(ecx) & MESSAGE_STATUS_HB) == 0) { - /* Expected success + high-bandwidth. Give up. */ + if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) { + /* Expected success. Give up. */ return -EINVAL; } /* Send msg */ - si = (uintptr_t) msg; - di = channel->cookie_low; - bp = channel->cookie_high; - - VMW_PORT_HB_OUT( - (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG, - msg_len, si, di, - VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16), - VMW_HYPERVISOR_MAGIC, bp, - eax, ebx, ecx, edx, si, di); + ebx = vmw_port_hb_out(channel, msg, + !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB)); if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) != 0) { return 0; @@ -211,7 +309,7 @@ STACK_FRAME_NON_STANDARD(vmw_send_msg); static int vmw_recv_msg(struct rpc_channel *channel, void **msg, size_t *msg_len) { - unsigned long eax, ebx, ecx, edx, si, di, bp; + unsigned long eax, ebx, ecx, edx, si, di; char *reply; size_t reply_len; int retries = 0; @@ -233,8 +331,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, VMW_HYPERVISOR_MAGIC, eax, ebx, ecx, edx, si, di); - if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0 || - (HIGH_WORD(ecx) & MESSAGE_STATUS_HB) == 0) { + if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) { DRM_ERROR("Failed to get reply size for host message.\n"); return -EINVAL; } @@ -252,20 +349,11 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, /* Receive buffer */ - si = channel->cookie_high; - di = (uintptr_t) reply; - bp = channel->cookie_low; - - VMW_PORT_HB_IN( - (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG, - reply_len, si, di, - VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16), - VMW_HYPERVISOR_MAGIC, bp, - eax, ebx, ecx, edx, si, di); - + ebx = vmw_port_hb_in(channel, reply, reply_len, + !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB)); if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) { kfree(reply); - + reply = NULL; if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) { /* A checkpoint occurred. Retry. */ continue; @@ -289,7 +377,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) { kfree(reply); - + reply = NULL; if ((HIGH_WORD(ecx) & MESSAGE_STATUS_CPT) != 0) { /* A checkpoint occurred. Retry. */ continue; @@ -301,7 +389,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, break; } - if (retries == RETRIES) + if (!reply) return -EINVAL; *msg_len = reply_len; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index 31786b200afc470d73d4f661a4e9358959d686f8..f388ad51e72b4461bffbd6bc1753ebdf38533d6a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -448,11 +448,11 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) if (unlikely(ret != 0)) return ret; - ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages, - vsgt->num_pages, 0, - (unsigned long) - vsgt->num_pages << PAGE_SHIFT, - GFP_KERNEL); + ret = __sg_alloc_table_from_pages + (&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0, + (unsigned long) vsgt->num_pages << PAGE_SHIFT, + dma_get_max_seg_size(dev_priv->dev->dev), + GFP_KERNEL); if (unlikely(ret != 0)) goto out_sg_alloc_fail; diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c index 815bdb42e3f0368f78c8397f4b2fbda3e94f8ccf..0121fe7a4548dbbfbc6a24ba21667db8f0ff93e7 100644 --- a/drivers/gpu/host1x/bus.c +++ b/drivers/gpu/host1x/bus.c @@ -423,6 +423,9 @@ static int host1x_device_add(struct host1x *host1x, of_dma_configure(&device->dev, host1x->dev->of_node, true); + device->dev.dma_parms = &device->dma_parms; + dma_set_max_seg_size(&device->dev, SZ_4M); + err = host1x_device_parse_dt(device, driver); if (err < 0) { kfree(device); diff --git a/drivers/gpu/host1x/hw/hw_host1x06_uclass.h b/drivers/gpu/host1x/hw/hw_host1x06_uclass.h index 4457486c72b05e0dd5f2b3ddade15276e5fa98c7..e599e15bf999aa44a70d0c796956739b02adc126 100644 --- a/drivers/gpu/host1x/hw/hw_host1x06_uclass.h +++ b/drivers/gpu/host1x/hw/hw_host1x06_uclass.h @@ -59,7 +59,7 @@ static inline u32 host1x_uclass_incr_syncpt_r(void) host1x_uclass_incr_syncpt_r() static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v) { - return (v & 0xff) << 8; + return (v & 0xff) << 10; } #define HOST1X_UCLASS_INCR_SYNCPT_COND_F(v) \ host1x_uclass_incr_syncpt_cond_f(v) diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c index 527a1cddb14fd5d2a23fa96559ed0e33c54bd690..916b2355e11a6a04f14a705760054f446f9bcc7b 100644 --- a/drivers/gpu/host1x/job.c +++ b/drivers/gpu/host1x/job.c @@ -447,7 +447,8 @@ static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g) return err; } -static inline int copy_gathers(struct host1x_job *job, struct device *dev) +static inline int copy_gathers(struct device *host, struct host1x_job *job, + struct device *dev) { struct host1x_firewall fw; size_t size = 0; @@ -470,12 +471,12 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev) * Try a non-blocking allocation from a higher priority pools first, * as awaiting for the allocation here is a major performance hit. */ - job->gather_copy_mapped = dma_alloc_wc(dev, size, &job->gather_copy, + job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy, GFP_NOWAIT); /* the higher priority allocation failed, try the generic-blocking */ if (!job->gather_copy_mapped) - job->gather_copy_mapped = dma_alloc_wc(dev, size, + job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy, GFP_KERNEL); if (!job->gather_copy_mapped) @@ -523,7 +524,7 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev) goto out; if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) { - err = copy_gathers(job, dev); + err = copy_gathers(host->dev, job, dev); if (err) goto out; } @@ -584,7 +585,7 @@ void host1x_job_unpin(struct host1x_job *job) job->num_unpins = 0; if (job->gather_copy_size) - dma_free_wc(job->channel->dev, job->gather_copy_size, + dma_free_wc(host->dev, job->gather_copy_size, job->gather_copy_mapped, job->gather_copy); } EXPORT_SYMBOL(host1x_job_unpin); diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index 474b00e19697d90e7a60ee60fb8f8badd330f81e..0a7d4395d427bacf0e27cb5251ab74bb6e816ac1 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c @@ -898,8 +898,8 @@ static struct ipu_devtype ipu_type_imx51 = { .cpmem_ofs = 0x1f000000, .srm_ofs = 0x1f040000, .tpm_ofs = 0x1f060000, - .csi0_ofs = 0x1f030000, - .csi1_ofs = 0x1f038000, + .csi0_ofs = 0x1e030000, + .csi1_ofs = 0x1e038000, .ic_ofs = 0x1e020000, .disp0_ofs = 0x1e040000, .disp1_ofs = 0x1e048000, @@ -914,8 +914,8 @@ static struct ipu_devtype ipu_type_imx53 = { .cpmem_ofs = 0x07000000, .srm_ofs = 0x07040000, .tpm_ofs = 0x07060000, - .csi0_ofs = 0x07030000, - .csi1_ofs = 0x07038000, + .csi0_ofs = 0x06030000, + .csi1_ofs = 0x06038000, .ic_ofs = 0x06020000, .disp0_ofs = 0x06040000, .disp1_ofs = 0x06048000, diff --git a/drivers/gpu/ipu-v3/ipu-dp.c b/drivers/gpu/ipu-v3/ipu-dp.c index 9b2b3fa479c462d1c4d7b8b02180ad22eb20a715..5e44ff1f20851a16afdb42bfdaf73caab97ebff5 100644 --- a/drivers/gpu/ipu-v3/ipu-dp.c +++ b/drivers/gpu/ipu-v3/ipu-dp.c @@ -195,7 +195,8 @@ int ipu_dp_setup_channel(struct ipu_dp *dp, ipu_dp_csc_init(flow, flow->foreground.in_cs, flow->out_cs, DP_COM_CONF_CSC_DEF_BOTH); } else { - if (flow->foreground.in_cs == flow->out_cs) + if (flow->foreground.in_cs == IPUV3_COLORSPACE_UNKNOWN || + flow->foreground.in_cs == flow->out_cs) /* * foreground identical to output, apply color * conversion on background @@ -261,6 +262,8 @@ void ipu_dp_disable_channel(struct ipu_dp *dp, bool sync) struct ipu_dp_priv *priv = flow->priv; u32 reg, csc; + dp->in_cs = IPUV3_COLORSPACE_UNKNOWN; + if (!dp->foreground) return; @@ -268,8 +271,9 @@ void ipu_dp_disable_channel(struct ipu_dp *dp, bool sync) reg = readl(flow->base + DP_COM_CONF); csc = reg & DP_COM_CONF_CSC_DEF_MASK; - if (csc == DP_COM_CONF_CSC_DEF_FG) - reg &= ~DP_COM_CONF_CSC_DEF_MASK; + reg &= ~DP_COM_CONF_CSC_DEF_MASK; + if (csc == DP_COM_CONF_CSC_DEF_BOTH || csc == DP_COM_CONF_CSC_DEF_BG) + reg |= DP_COM_CONF_CSC_DEF_BG; reg &= ~DP_COM_CONF_FG_EN; writel(reg, flow->base + DP_COM_CONF); @@ -347,6 +351,8 @@ int ipu_dp_init(struct ipu_soc *ipu, struct device *dev, unsigned long base) mutex_init(&priv->mutex); for (i = 0; i < IPUV3_NUM_FLOWS; i++) { + priv->flow[i].background.in_cs = IPUV3_COLORSPACE_UNKNOWN; + priv->flow[i].foreground.in_cs = IPUV3_COLORSPACE_UNKNOWN; priv->flow[i].foreground.foreground = true; priv->flow[i].base = priv->base + ipu_dp_flow_base[i]; priv->flow[i].priv = priv; diff --git a/drivers/gpu/ipu-v3/ipu-ic.c b/drivers/gpu/ipu-v3/ipu-ic.c index 67cc820253a99b84341825437e9e0bfce3cd16d6..fb79e118f26c8759cc37ec1b222b89f609a53fea 100644 --- a/drivers/gpu/ipu-v3/ipu-ic.c +++ b/drivers/gpu/ipu-v3/ipu-ic.c @@ -257,7 +257,7 @@ static int init_csc(struct ipu_ic *ic, writel(param, base++); param = ((a[0] & 0x1fe0) >> 5) | (params->scale << 8) | - (params->sat << 9); + (params->sat << 10); writel(param, base++); param = ((a[1] & 0x1f) << 27) | ((c[0][1] & 0x1ff) << 18) | diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c index f4081962784ccee322282a5099590c42dcbcccc9..91653adc41cc437fd857931ec29c597c5220f856 100644 --- a/drivers/gpu/ipu-v3/ipu-image-convert.c +++ b/drivers/gpu/ipu-v3/ipu-image-convert.c @@ -1524,7 +1524,7 @@ int ipu_image_convert_queue(struct ipu_image_convert_run *run) EXPORT_SYMBOL_GPL(ipu_image_convert_queue); /* Abort any active or pending conversions for this context */ -void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx) +static void __ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx) { struct ipu_image_convert_chan *chan = ctx->chan; struct ipu_image_convert_priv *priv = chan->priv; @@ -1551,7 +1551,7 @@ void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx) need_abort = (run_count || active_run); - ctx->aborting = need_abort; + ctx->aborting = true; spin_unlock_irqrestore(&chan->irqlock, flags); @@ -1572,7 +1572,11 @@ void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx) dev_warn(priv->ipu->dev, "%s: timeout\n", __func__); force_abort(ctx); } +} +void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx) +{ + __ipu_image_convert_abort(ctx); ctx->aborting = false; } EXPORT_SYMBOL_GPL(ipu_image_convert_abort); @@ -1586,7 +1590,7 @@ void ipu_image_convert_unprepare(struct ipu_image_convert_ctx *ctx) bool put_res; /* make sure no runs are hanging around */ - ipu_image_convert_abort(ctx); + __ipu_image_convert_abort(ctx); dev_dbg(priv->ipu->dev, "%s: task %u: removing ctx %p\n", __func__, chan->ic_task, ctx); diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c index 2f8db9d625514928006c6b31adeccbe312514ef3..4a28f3fbb0a28c55630be20edbb8c2df4de74645 100644 --- a/drivers/gpu/ipu-v3/ipu-pre.c +++ b/drivers/gpu/ipu-v3/ipu-pre.c @@ -106,6 +106,7 @@ struct ipu_pre { void *buffer_virt; bool in_use; unsigned int safe_window_end; + unsigned int last_bufaddr; }; static DEFINE_MUTEX(ipu_pre_list_mutex); @@ -185,6 +186,7 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width, writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF); writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); + pre->last_bufaddr = bufaddr; val = IPU_PRE_PREF_ENG_CTRL_INPUT_PIXEL_FORMAT(0) | IPU_PRE_PREF_ENG_CTRL_INPUT_ACTIVE_BPP(active_bpp) | @@ -242,7 +244,11 @@ void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr) unsigned short current_yblock; u32 val; + if (bufaddr == pre->last_bufaddr) + return; + writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); + pre->last_bufaddr = bufaddr; do { if (time_after(jiffies, timeout)) { diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index cf2a18571d484d078dc1eabc59a3d6ff0f11ab07..a132c37d733490fa70af2674237162d73459a31d 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c @@ -380,6 +380,9 @@ int vga_switcheroo_register_audio_client(struct pci_dev *pdev, mutex_unlock(&vgasr_mutex); return -EINVAL; } + /* notify if GPU has been already bound */ + if (ops->gpu_bound) + ops->gpu_bound(pdev, id); } mutex_unlock(&vgasr_mutex); diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 61e1953ff9219db2783d5c1b7b2af43a0d39d27a..98e91e14cefd3848c08410312da5b8ef7ab11a06 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -191,14 +191,14 @@ config HID_CHERRY config HID_CHICONY tristate "Chicony devices" - depends on HID + depends on USB_HID default !EXPERT ---help--- Support for Chicony Tactical pad and special keys on Chicony keyboards. config HID_CORSAIR tristate "Corsair devices" - depends on HID && USB && LEDS_CLASS + depends on USB_HID && LEDS_CLASS ---help--- Support for Corsair devices that are not fully compliant with the HID standard. @@ -219,7 +219,7 @@ config HID_COUGAR config HID_PRODIKEYS tristate "Prodikeys PC-MIDI Keyboard support" - depends on HID && SND + depends on USB_HID && SND select SND_RAWMIDI ---help--- Support for Prodikeys PC-MIDI Keyboard device support. @@ -484,7 +484,7 @@ config HID_LENOVO config HID_LOGITECH tristate "Logitech devices" - depends on HID + depends on USB_HID default !EXPERT ---help--- Support for Logitech devices that are not fully compliant with HID standard. @@ -822,7 +822,7 @@ config HID_SAITEK config HID_SAMSUNG tristate "Samsung InfraRed remote control or keyboards" - depends on HID + depends on USB_HID ---help--- Support for Samsung InfraRed remote control or keyboards. diff --git a/drivers/hid/hid-a4tech.c b/drivers/hid/hid-a4tech.c index 9428ea7cdf8a00dc686e00c6da77be5cb60215b7..c52bd163abb3e1a93714f0ddc64a418858f52881 100644 --- a/drivers/hid/hid-a4tech.c +++ b/drivers/hid/hid-a4tech.c @@ -26,12 +26,36 @@ #define A4_2WHEEL_MOUSE_HACK_7 0x01 #define A4_2WHEEL_MOUSE_HACK_B8 0x02 +#define A4_WHEEL_ORIENTATION (HID_UP_GENDESK | 0x000000b8) + struct a4tech_sc { unsigned long quirks; unsigned int hw_wheel; __s32 delayed_value; }; +static int a4_input_mapping(struct hid_device *hdev, struct hid_input *hi, + struct hid_field *field, struct hid_usage *usage, + unsigned long **bit, int *max) +{ + struct a4tech_sc *a4 = hid_get_drvdata(hdev); + + if (a4->quirks & A4_2WHEEL_MOUSE_HACK_B8 && + usage->hid == A4_WHEEL_ORIENTATION) { + /* + * We do not want to have this usage mapped to anything as it's + * nonstandard and doesn't really behave like an HID report. + * It's only selecting the orientation (vertical/horizontal) of + * the previous mouse wheel report. The input_events will be + * generated once both reports are recorded in a4_event(). + */ + return -1; + } + + return 0; + +} + static int a4_input_mapped(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) @@ -53,8 +77,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field, struct a4tech_sc *a4 = hid_get_drvdata(hdev); struct input_dev *input; - if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput || - !usage->type) + if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput) return 0; input = field->hidinput->input; @@ -65,7 +88,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field, return 1; } - if (usage->hid == 0x000100b8) { + if (usage->hid == A4_WHEEL_ORIENTATION) { input_event(input, EV_REL, value ? REL_HWHEEL : REL_WHEEL, a4->delayed_value); return 1; @@ -129,6 +152,7 @@ MODULE_DEVICE_TABLE(hid, a4_devices); static struct hid_driver a4_driver = { .name = "a4tech", .id_table = a4_devices, + .input_mapping = a4_input_mapping, .input_mapped = a4_input_mapped, .event = a4_event, .probe = a4_probe, diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c index aec253b44156891bf71c192c1cd1e41b2e5801a3..3cd7229b6e5465b88759d42a9a12084cd948eeab 100644 --- a/drivers/hid/hid-alps.c +++ b/drivers/hid/hid-alps.c @@ -660,6 +660,20 @@ static int T4_init(struct hid_device *hdev, struct alps_dev *pri_data) return ret; } +static int alps_sp_open(struct input_dev *dev) +{ + struct hid_device *hid = input_get_drvdata(dev); + + return hid_hw_open(hid); +} + +static void alps_sp_close(struct input_dev *dev) +{ + struct hid_device *hid = input_get_drvdata(dev); + + hid_hw_close(hid); +} + static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi) { struct alps_dev *data = hid_get_drvdata(hdev); @@ -733,6 +747,10 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi) input2->id.version = input->id.version; input2->dev.parent = input->dev.parent; + input_set_drvdata(input2, hdev); + input2->open = alps_sp_open; + input2->close = alps_sp_close; + __set_bit(EV_KEY, input2->evbit); data->sp_btn_cnt = (data->sp_btn_info & 0x0F); for (i = 0; i < data->sp_btn_cnt; i++) diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index 1cb41992aaa1f650f89cbf5ace72bd46786d7abc..d0a81a03ddbdd5cb206db6760a2954feaff0de41 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c @@ -57,7 +57,6 @@ MODULE_PARM_DESC(swap_opt_cmd, "Swap the Option (\"Alt\") and Command (\"Flag\") struct apple_sc { unsigned long quirks; unsigned int fn_on; - DECLARE_BITMAP(pressed_fn, KEY_CNT); DECLARE_BITMAP(pressed_numlock, KEY_CNT); }; @@ -184,6 +183,8 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input, { struct apple_sc *asc = hid_get_drvdata(hid); const struct apple_key_translation *trans, *table; + bool do_translate; + u16 code = 0; if (usage->code == KEY_FN) { asc->fn_on = !!value; @@ -192,8 +193,6 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input, } if (fnmode) { - int do_translate; - if (hid->product >= USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI && hid->product <= USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) table = macbookair_fn_keys; @@ -205,25 +204,33 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input, trans = apple_find_translation (table, usage->code); if (trans) { - if (test_bit(usage->code, asc->pressed_fn)) - do_translate = 1; - else if (trans->flags & APPLE_FLAG_FKEY) - do_translate = (fnmode == 2 && asc->fn_on) || - (fnmode == 1 && !asc->fn_on); - else - do_translate = asc->fn_on; - - if (do_translate) { - if (value) - set_bit(usage->code, asc->pressed_fn); - else - clear_bit(usage->code, asc->pressed_fn); - - input_event(input, usage->type, trans->to, - value); - - return 1; + if (test_bit(trans->from, input->key)) + code = trans->from; + else if (test_bit(trans->to, input->key)) + code = trans->to; + + if (!code) { + if (trans->flags & APPLE_FLAG_FKEY) { + switch (fnmode) { + case 1: + do_translate = !asc->fn_on; + break; + case 2: + do_translate = asc->fn_on; + break; + default: + /* should never happen */ + do_translate = false; + } + } else { + do_translate = asc->fn_on; + } + + code = do_translate ? trans->to : trans->from; } + + input_event(input, usage->type, code, value); + return 1; } if (asc->quirks & APPLE_NUMLOCK_EMULATION && diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c index 88a5672f42cd814187ede87e8783414023824bf2..4dddf3ce32d73a1e75f24226dfa97401c1fff8a7 100644 --- a/drivers/hid/hid-asus.c +++ b/drivers/hid/hid-asus.c @@ -84,6 +84,7 @@ struct asus_kbd_leds { struct hid_device *hdev; struct work_struct work; unsigned int brightness; + spinlock_t lock; bool removed; }; @@ -313,24 +314,42 @@ static int asus_kbd_get_functions(struct hid_device *hdev, return ret; } +static void asus_schedule_work(struct asus_kbd_leds *led) +{ + unsigned long flags; + + spin_lock_irqsave(&led->lock, flags); + if (!led->removed) + schedule_work(&led->work); + spin_unlock_irqrestore(&led->lock, flags); +} + static void asus_kbd_backlight_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct asus_kbd_leds *led = container_of(led_cdev, struct asus_kbd_leds, cdev); - if (led->brightness == brightness) - return; + unsigned long flags; + spin_lock_irqsave(&led->lock, flags); led->brightness = brightness; - schedule_work(&led->work); + spin_unlock_irqrestore(&led->lock, flags); + + asus_schedule_work(led); } static enum led_brightness asus_kbd_backlight_get(struct led_classdev *led_cdev) { struct asus_kbd_leds *led = container_of(led_cdev, struct asus_kbd_leds, cdev); + enum led_brightness brightness; + unsigned long flags; + + spin_lock_irqsave(&led->lock, flags); + brightness = led->brightness; + spin_unlock_irqrestore(&led->lock, flags); - return led->brightness; + return brightness; } static void asus_kbd_backlight_work(struct work_struct *work) @@ -338,11 +357,11 @@ static void asus_kbd_backlight_work(struct work_struct *work) struct asus_kbd_leds *led = container_of(work, struct asus_kbd_leds, work); u8 buf[] = { FEATURE_KBD_REPORT_ID, 0xba, 0xc5, 0xc4, 0x00 }; int ret; + unsigned long flags; - if (led->removed) - return; - + spin_lock_irqsave(&led->lock, flags); buf[4] = led->brightness; + spin_unlock_irqrestore(&led->lock, flags); ret = asus_kbd_set_report(led->hdev, buf, sizeof(buf)); if (ret < 0) @@ -383,6 +402,7 @@ static int asus_kbd_register_leds(struct hid_device *hdev) drvdata->kbd_backlight->cdev.brightness_set = asus_kbd_backlight_set; drvdata->kbd_backlight->cdev.brightness_get = asus_kbd_backlight_get; INIT_WORK(&drvdata->kbd_backlight->work, asus_kbd_backlight_work); + spin_lock_init(&drvdata->kbd_backlight->lock); ret = devm_led_classdev_register(&hdev->dev, &drvdata->kbd_backlight->cdev); if (ret < 0) { @@ -622,7 +642,7 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id) if (drvdata->quirks & QUIRK_IS_MULTITOUCH) drvdata->tp = &asus_i2c_tp; - if (drvdata->quirks & QUIRK_T100_KEYBOARD) { + if ((drvdata->quirks & QUIRK_T100_KEYBOARD) && hid_is_usb(hdev)) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); if (intf->altsetting->desc.bInterfaceNumber == T100_TPAD_INTF) { @@ -692,9 +712,13 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id) static void asus_remove(struct hid_device *hdev) { struct asus_drvdata *drvdata = hid_get_drvdata(hdev); + unsigned long flags; if (drvdata->kbd_backlight) { + spin_lock_irqsave(&drvdata->kbd_backlight->lock, flags); drvdata->kbd_backlight->removed = true; + spin_unlock_irqrestore(&drvdata->kbd_backlight->lock, flags); + cancel_work_sync(&drvdata->kbd_backlight->work); } diff --git a/drivers/hid/hid-axff.c b/drivers/hid/hid-axff.c index a594e478a1e218abd629b69f8ca9b19b9335e753..843aed4dec80a8378f42f24d71f56ed0c604e0b5 100644 --- a/drivers/hid/hid-axff.c +++ b/drivers/hid/hid-axff.c @@ -75,13 +75,20 @@ static int axff_init(struct hid_device *hid) { struct axff_device *axff; struct hid_report *report; - struct hid_input *hidinput = list_first_entry(&hid->inputs, struct hid_input, list); + struct hid_input *hidinput; struct list_head *report_list =&hid->report_enum[HID_OUTPUT_REPORT].report_list; - struct input_dev *dev = hidinput->input; + struct input_dev *dev; int field_count = 0; int i, j; int error; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_first_entry(&hid->inputs, struct hid_input, list); + dev = hidinput->input; + if (list_empty(report_list)) { hid_err(hid, "no output reports found\n"); return -ENODEV; diff --git a/drivers/hid/hid-betopff.c b/drivers/hid/hid-betopff.c index 69cfc8dc6af1e3459951898b9dc2cfe098cf4e6d..9b60efe6ec441e45a6b21f93047dcb88bf0cdf41 100644 --- a/drivers/hid/hid-betopff.c +++ b/drivers/hid/hid-betopff.c @@ -59,15 +59,22 @@ static int betopff_init(struct hid_device *hid) { struct betopff_device *betopff; struct hid_report *report; - struct hid_input *hidinput = - list_first_entry(&hid->inputs, struct hid_input, list); + struct hid_input *hidinput; struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; - struct input_dev *dev = hidinput->input; + struct input_dev *dev; int field_count = 0; int error; int i, j; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + + hidinput = list_first_entry(&hid->inputs, struct hid_input, list); + dev = hidinput->input; + if (list_empty(report_list)) { hid_err(hid, "no output reports found\n"); return -ENODEV; diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c index 397a789a41be942a24a8673767da277869a9b670..218f0e090f638e48b587f70e5bf7e946d9ba0ce4 100644 --- a/drivers/hid/hid-chicony.c +++ b/drivers/hid/hid-chicony.c @@ -61,8 +61,12 @@ static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi, static __u8 *ch_switch12_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { - struct usb_interface *intf = to_usb_interface(hdev->dev.parent); - + struct usb_interface *intf; + + if (!hid_is_usb(hdev)) + return rdesc; + + intf = to_usb_interface(hdev->dev.parent); if (intf->cur_altsetting->desc.bInterfaceNumber == 1) { /* Change usage maximum and logical maximum from 0x7fff to * 0x2fff, so they don't exceed HID_MAX_USAGES */ diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 44564f61e9cc3c85250e2e6d1e5ff47ed95dcd4d..7dd30deceda4676b204e71fc5ebb40047f3d45d9 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -93,7 +93,7 @@ EXPORT_SYMBOL_GPL(hid_register_report); * Register a new field for this report. */ -static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages, unsigned values) +static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages) { struct hid_field *field; @@ -104,7 +104,7 @@ static struct hid_field *hid_register_field(struct hid_report *report, unsigned field = kzalloc((sizeof(struct hid_field) + usages * sizeof(struct hid_usage) + - values * sizeof(unsigned)), GFP_KERNEL); + usages * sizeof(unsigned)), GFP_KERNEL); if (!field) return NULL; @@ -211,17 +211,38 @@ static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type) return 0; /* we know nothing about this usage type */ } +/* + * Concatenate usage which defines 16 bits or less with the + * currently defined usage page to form a 32 bit usage + */ + +static void complete_usage(struct hid_parser *parser, unsigned int index) +{ + parser->local.usage[index] &= 0xFFFF; + parser->local.usage[index] |= + (parser->global.usage_page & 0xFFFF) << 16; +} + /* * Add a usage to the temporary parser table. */ -static int hid_add_usage(struct hid_parser *parser, unsigned usage) +static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size) { if (parser->local.usage_index >= HID_MAX_USAGES) { hid_err(parser->device, "usage index exceeded\n"); return -1; } parser->local.usage[parser->local.usage_index] = usage; + + /* + * If Usage item only includes usage id, concatenate it with + * currently defined usage page + */ + if (size <= 2) + complete_usage(parser, parser->local.usage_index); + + parser->local.usage_size[parser->local.usage_index] = size; parser->local.collection_index[parser->local.usage_index] = parser->collection_stack_ptr ? parser->collection_stack[parser->collection_stack_ptr - 1] : 0; @@ -273,7 +294,7 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign usages = max_t(unsigned, parser->local.usage_index, parser->global.report_count); - field = hid_register_field(report, usages, parser->global.report_count); + field = hid_register_field(report, usages); if (!field) return 0; @@ -482,10 +503,7 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item) return 0; } - if (item->size <= 2) - data = (parser->global.usage_page << 16) + data; - - return hid_add_usage(parser, data); + return hid_add_usage(parser, data, item->size); case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM: @@ -494,9 +512,6 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item) return 0; } - if (item->size <= 2) - data = (parser->global.usage_page << 16) + data; - parser->local.usage_minimum = data; return 0; @@ -507,9 +522,6 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item) return 0; } - if (item->size <= 2) - data = (parser->global.usage_page << 16) + data; - count = data - parser->local.usage_minimum; if (count + parser->local.usage_index >= HID_MAX_USAGES) { /* @@ -529,7 +541,7 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item) } for (n = parser->local.usage_minimum; n <= data; n++) - if (hid_add_usage(parser, n)) { + if (hid_add_usage(parser, n, item->size)) { dbg_hid("hid_add_usage failed\n"); return -1; } @@ -543,6 +555,41 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item) return 0; } +/* + * Concatenate Usage Pages into Usages where relevant: + * As per specification, 6.2.2.8: "When the parser encounters a main item it + * concatenates the last declared Usage Page with a Usage to form a complete + * usage value." + */ + +static void hid_concatenate_last_usage_page(struct hid_parser *parser) +{ + int i; + unsigned int usage_page; + unsigned int current_page; + + if (!parser->local.usage_index) + return; + + usage_page = parser->global.usage_page; + + /* + * Concatenate usage page again only if last declared Usage Page + * has not been already used in previous usages concatenation + */ + for (i = parser->local.usage_index - 1; i >= 0; i--) { + if (parser->local.usage_size[i] > 2) + /* Ignore extended usages */ + continue; + + current_page = parser->local.usage[i] >> 16; + if (current_page == usage_page) + break; + + complete_usage(parser, i); + } +} + /* * Process a main item. */ @@ -552,6 +599,8 @@ static int hid_parser_main(struct hid_parser *parser, struct hid_item *item) __u32 data; int ret; + hid_concatenate_last_usage_page(parser); + data = item_udata(item); switch (item->tag) { @@ -731,6 +780,10 @@ static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage) if (usage == 0xff0000c5 && parser->global.report_count == 256 && parser->global.report_size == 8) parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8; + + if (usage == 0xff0000c6 && parser->global.report_count == 1 && + parser->global.report_size == 8) + parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8; } static void hid_scan_collection(struct hid_parser *parser, unsigned type) @@ -761,6 +814,8 @@ static int hid_scan_main(struct hid_parser *parser, struct hid_item *item) __u32 data; int i; + hid_concatenate_last_usage_page(parser); + data = item_udata(item); switch (item->tag) { @@ -919,8 +974,8 @@ struct hid_report *hid_validate_values(struct hid_device *hid, * Validating on id 0 means we should examine the first * report in the list. */ - report = list_entry( - hid->report_enum[type].report_list.next, + report = list_first_entry_or_null( + &hid->report_enum[type].report_list, struct hid_report, list); } else { report = hid->report_enum[type].report_id_hash[id]; @@ -963,6 +1018,7 @@ int hid_open_report(struct hid_device *device) __u8 *start; __u8 *buf; __u8 *end; + __u8 *next; int ret; static int (*dispatch_type[])(struct hid_parser *parser, struct hid_item *item) = { @@ -1016,7 +1072,8 @@ int hid_open_report(struct hid_device *device) device->collection_size = HID_DEFAULT_NUM_COLLECTIONS; ret = -EINVAL; - while ((start = fetch_item(start, end, &item)) != NULL) { + while ((next = fetch_item(start, end, &item)) != NULL) { + start = next; if (item.format != HID_ITEM_FORMAT_SHORT) { hid_err(device, "unexpected long global item\n"); @@ -1046,7 +1103,8 @@ int hid_open_report(struct hid_device *device) } } - hid_err(device, "item fetching failed at offset %d\n", (int)(end - start)); + hid_err(device, "item fetching failed at offset %u/%u\n", + size - (unsigned int)(end - start), size); err: kfree(parser->collection_stack); alloc_err: @@ -1064,6 +1122,9 @@ EXPORT_SYMBOL_GPL(hid_open_report); static s32 snto32(__u32 value, unsigned n) { + if (n > 32) + n = 32; + switch (n) { case 8: return ((__s8)value); case 16: return ((__s16)value); @@ -1183,7 +1244,6 @@ static void implement(const struct hid_device *hid, u8 *report, hid_warn(hid, "%s() called with too large value %d (n: %d)! (%s)\n", __func__, value, n, current->comm); - WARN_ON(1); value &= m; } } @@ -1361,6 +1421,17 @@ static void hid_output_field(const struct hid_device *hid, } } +/* + * Compute the size of a report. + */ +static size_t hid_compute_report_size(struct hid_report *report) +{ + if (report->size) + return ((report->size - 1) >> 3) + 1; + + return 0; +} + /* * Create a report. 'data' has to be allocated using * hid_alloc_report_buf() so that it has proper size. @@ -1373,7 +1444,7 @@ void hid_output_report(struct hid_report *report, __u8 *data) if (report->id > 0) *data++ = report->id; - memset(data, 0, ((report->size - 1) >> 3) + 1); + memset(data, 0, hid_compute_report_size(report)); for (n = 0; n < report->maxfield; n++) hid_output_field(report->device, report->field[n], data); } @@ -1391,7 +1462,7 @@ u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags) u32 len = hid_report_len(report) + 7; - return kmalloc(len, flags); + return kzalloc(len, flags); } EXPORT_SYMBOL_GPL(hid_alloc_report_buf); @@ -1500,7 +1571,7 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size, csize--; } - rsize = ((report->size - 1) >> 3) + 1; + rsize = hid_compute_report_size(report); if (rsize > HID_MAX_BUFFER_SIZE) rsize = HID_MAX_BUFFER_SIZE; diff --git a/drivers/hid/hid-corsair.c b/drivers/hid/hid-corsair.c index ec9e060ec46cc8ea381dfb70284e2c827eceee64..6ede03c9550d33dfb79bcf41f0e244ae72e14061 100644 --- a/drivers/hid/hid-corsair.c +++ b/drivers/hid/hid-corsair.c @@ -556,7 +556,12 @@ static int corsair_probe(struct hid_device *dev, const struct hid_device_id *id) int ret; unsigned long quirks = id->driver_data; struct corsair_drvdata *drvdata; - struct usb_interface *usbif = to_usb_interface(dev->dev.parent); + struct usb_interface *usbif; + + if (!hid_is_usb(dev)) + return -EINVAL; + + usbif = to_usb_interface(dev->dev.parent); drvdata = devm_kzalloc(&dev->dev, sizeof(struct corsair_drvdata), GFP_KERNEL); diff --git a/drivers/hid/hid-cougar.c b/drivers/hid/hid-cougar.c index ad2e87de7dc53ee2f2cf93e3e59a171b47d995b6..d58a108a96c0df7934918ad6afc39f4082d7672a 100644 --- a/drivers/hid/hid-cougar.c +++ b/drivers/hid/hid-cougar.c @@ -104,7 +104,7 @@ static void cougar_fix_g6_mapping(struct hid_device *hdev) static __u8 *cougar_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { - if (rdesc[2] == 0x09 && rdesc[3] == 0x02 && + if (*rsize >= 117 && rdesc[2] == 0x09 && rdesc[3] == 0x02 && (rdesc[115] | rdesc[116] << 8) >= HID_MAX_USAGES) { hid_info(hdev, "usage count exceeds max: fixing up report descriptor\n"); diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c index 271f31461da427d93459632b096c578d71f3ee44..09760ef85b68fba996769f70f0cbba661942fa35 100644 --- a/drivers/hid/hid-cp2112.c +++ b/drivers/hid/hid-cp2112.c @@ -1158,10 +1158,6 @@ static unsigned int cp2112_gpio_irq_startup(struct irq_data *d) struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct cp2112_device *dev = gpiochip_get_data(gc); - INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback); - - cp2112_gpio_direction_input(gc, d->hwirq); - if (!dev->gpio_poll) { dev->gpio_poll = true; schedule_delayed_work(&dev->gpio_poll_worker, 0); @@ -1209,6 +1205,12 @@ static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev, return PTR_ERR(dev->desc[pin]); } + ret = cp2112_gpio_direction_input(&dev->gc, pin); + if (ret < 0) { + dev_err(dev->gc.parent, "Failed to set GPIO to input dir\n"); + goto err_desc; + } + ret = gpiochip_lock_as_irq(&dev->gc, pin); if (ret) { dev_err(dev->gc.parent, "Failed to lock GPIO as interrupt\n"); @@ -1339,6 +1341,8 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id) dev->gc.can_sleep = 1; dev->gc.parent = &hdev->dev; + INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback); + ret = gpiochip_add_data(&dev->gc, dev); if (ret < 0) { hid_err(hdev, "error registering gpio chip\n"); diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index b48100236df890cdd1bbffa0daac97257357a38d..a353a011fbdff7ce44387f2bfd7f18965f64cd06 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -30,6 +30,7 @@ #include #include +#include #include #include #include @@ -661,17 +662,12 @@ EXPORT_SYMBOL_GPL(hid_dump_device); /* enqueue string to 'events' ring buffer */ void hid_debug_event(struct hid_device *hdev, char *buf) { - unsigned i; struct hid_debug_list *list; unsigned long flags; spin_lock_irqsave(&hdev->debug_list_lock, flags); - list_for_each_entry(list, &hdev->debug_list, node) { - for (i = 0; buf[i]; i++) - list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] = - buf[i]; - list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE; - } + list_for_each_entry(list, &hdev->debug_list, node) + kfifo_in(&list->hid_debug_fifo, buf, strlen(buf)); spin_unlock_irqrestore(&hdev->debug_list_lock, flags); wake_up_interruptible(&hdev->debug_wait); @@ -722,8 +718,7 @@ void hid_dump_input(struct hid_device *hdev, struct hid_usage *usage, __s32 valu hid_debug_event(hdev, buf); kfree(buf); - wake_up_interruptible(&hdev->debug_wait); - + wake_up_interruptible(&hdev->debug_wait); } EXPORT_SYMBOL_GPL(hid_dump_input); @@ -1065,10 +1060,15 @@ static int hid_debug_rdesc_show(struct seq_file *f, void *p) seq_printf(f, "\n\n"); /* dump parsed data and input mappings */ + if (down_interruptible(&hdev->driver_input_lock)) + return 0; + hid_dump_device(hdev, f); seq_printf(f, "\n"); hid_dump_input_mapping(hdev, f); + up(&hdev->driver_input_lock); + return 0; } @@ -1088,8 +1088,8 @@ static int hid_debug_events_open(struct inode *inode, struct file *file) goto out; } - if (!(list->hid_debug_buf = kzalloc(HID_DEBUG_BUFSIZE, GFP_KERNEL))) { - err = -ENOMEM; + err = kfifo_alloc(&list->hid_debug_fifo, HID_DEBUG_FIFOSIZE, GFP_KERNEL); + if (err) { kfree(list); goto out; } @@ -1109,77 +1109,57 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct hid_debug_list *list = file->private_data; - int ret = 0, len; + int ret = 0, copied; DECLARE_WAITQUEUE(wait, current); mutex_lock(&list->read_mutex); - while (ret == 0) { - if (list->head == list->tail) { - add_wait_queue(&list->hdev->debug_wait, &wait); - set_current_state(TASK_INTERRUPTIBLE); - - while (list->head == list->tail) { - if (file->f_flags & O_NONBLOCK) { - ret = -EAGAIN; - break; - } - if (signal_pending(current)) { - ret = -ERESTARTSYS; - break; - } + if (kfifo_is_empty(&list->hid_debug_fifo)) { + add_wait_queue(&list->hdev->debug_wait, &wait); + set_current_state(TASK_INTERRUPTIBLE); + + while (kfifo_is_empty(&list->hid_debug_fifo)) { + if (file->f_flags & O_NONBLOCK) { + ret = -EAGAIN; + break; + } - if (!list->hdev || !list->hdev->debug) { - ret = -EIO; - set_current_state(TASK_RUNNING); - goto out; - } + if (signal_pending(current)) { + ret = -ERESTARTSYS; + break; + } - /* allow O_NONBLOCK from other threads */ - mutex_unlock(&list->read_mutex); - schedule(); - mutex_lock(&list->read_mutex); - set_current_state(TASK_INTERRUPTIBLE); + /* if list->hdev is NULL we cannot remove_wait_queue(). + * if list->hdev->debug is 0 then hid_debug_unregister() + * was already called and list->hdev is being destroyed. + * if we add remove_wait_queue() here we can hit a race. + */ + if (!list->hdev || !list->hdev->debug) { + ret = -EIO; + set_current_state(TASK_RUNNING); + goto out; } - set_current_state(TASK_RUNNING); - remove_wait_queue(&list->hdev->debug_wait, &wait); + /* allow O_NONBLOCK from other threads */ + mutex_unlock(&list->read_mutex); + schedule(); + mutex_lock(&list->read_mutex); + set_current_state(TASK_INTERRUPTIBLE); } - if (ret) - goto out; + __set_current_state(TASK_RUNNING); + remove_wait_queue(&list->hdev->debug_wait, &wait); - /* pass the ringbuffer contents to userspace */ -copy_rest: - if (list->tail == list->head) + if (ret) goto out; - if (list->tail > list->head) { - len = list->tail - list->head; - if (len > count) - len = count; - - if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) { - ret = -EFAULT; - goto out; - } - ret += len; - list->head += len; - } else { - len = HID_DEBUG_BUFSIZE - list->head; - if (len > count) - len = count; - - if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) { - ret = -EFAULT; - goto out; - } - list->head = 0; - ret += len; - count -= len; - if (count > 0) - goto copy_rest; - } - } + + /* pass the fifo content to userspace, locking is not needed with only + * one concurrent reader and one concurrent writer + */ + ret = kfifo_to_user(&list->hid_debug_fifo, buffer, count, &copied); + if (ret) + goto out; + ret = copied; out: mutex_unlock(&list->read_mutex); return ret; @@ -1190,7 +1170,7 @@ static __poll_t hid_debug_events_poll(struct file *file, poll_table *wait) struct hid_debug_list *list = file->private_data; poll_wait(file, &list->hdev->debug_wait, wait); - if (list->head != list->tail) + if (!kfifo_is_empty(&list->hid_debug_fifo)) return EPOLLIN | EPOLLRDNORM; if (!list->hdev->debug) return EPOLLERR | EPOLLHUP; @@ -1205,7 +1185,7 @@ static int hid_debug_events_release(struct inode *inode, struct file *file) spin_lock_irqsave(&list->hdev->debug_list_lock, flags); list_del(&list->node); spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags); - kfree(list->hid_debug_buf); + kfifo_free(&list->hid_debug_fifo); kfree(list); return 0; @@ -1256,4 +1236,3 @@ void hid_debug_exit(void) { debugfs_remove_recursive(hid_debug_root); } - diff --git a/drivers/hid/hid-dr.c b/drivers/hid/hid-dr.c index 818ea7d935333046adc5036d141ffa6cf6be5c40..309969b8dc2ecfb5353d5fbb5799c02ee087f0be 100644 --- a/drivers/hid/hid-dr.c +++ b/drivers/hid/hid-dr.c @@ -87,13 +87,19 @@ static int drff_init(struct hid_device *hid) { struct drff_device *drff; struct hid_report *report; - struct hid_input *hidinput = list_first_entry(&hid->inputs, - struct hid_input, list); + struct hid_input *hidinput; struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; - struct input_dev *dev = hidinput->input; + struct input_dev *dev; int error; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_first_entry(&hid->inputs, struct hid_input, list); + dev = hidinput->input; + if (list_empty(report_list)) { hid_err(hid, "no output reports found\n"); return -ENODEV; diff --git a/drivers/hid/hid-elan.c b/drivers/hid/hid-elan.c index 07e26c3567eb979666be9c7966b67d6a5dbf56d0..896f4aceeef979f560dad0ab3f9a53566f1148ca 100644 --- a/drivers/hid/hid-elan.c +++ b/drivers/hid/hid-elan.c @@ -54,7 +54,7 @@ struct elan_drvdata { static int is_not_elan_touchpad(struct hid_device *hdev) { - if (hdev->bus == BUS_USB) { + if (hid_is_usb(hdev)) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); return (intf->altsetting->desc.bInterfaceNumber != diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c index 5eea6fe0d7bd8181082599a4ed0f05eda3268d5c..c3ecac13e62035a0ff761da030494e4d20d7e00f 100644 --- a/drivers/hid/hid-elo.c +++ b/drivers/hid/hid-elo.c @@ -230,6 +230,9 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id) struct elo_priv *priv; int ret; + if (!hid_is_usb(hdev)) + return -EINVAL; + priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; diff --git a/drivers/hid/hid-emsff.c b/drivers/hid/hid-emsff.c index d82d75bb11f78b591483233bc294aaf3a5939a72..80f9a02dfa69b212b523a9e19a724a14c9edf3a2 100644 --- a/drivers/hid/hid-emsff.c +++ b/drivers/hid/hid-emsff.c @@ -59,13 +59,19 @@ static int emsff_init(struct hid_device *hid) { struct emsff_device *emsff; struct hid_report *report; - struct hid_input *hidinput = list_first_entry(&hid->inputs, - struct hid_input, list); + struct hid_input *hidinput; struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; - struct input_dev *dev = hidinput->input; + struct input_dev *dev; int error; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_first_entry(&hid->inputs, struct hid_input, list); + dev = hidinput->input; + if (list_empty(report_list)) { hid_err(hid, "no output reports found\n"); return -ENODEV; diff --git a/drivers/hid/hid-gaff.c b/drivers/hid/hid-gaff.c index 2d8cead3adcaadf68e30ea9a4f37c7fc21452d50..5a02c50443cb70057c6c6ae0a0c9fc10f6564781 100644 --- a/drivers/hid/hid-gaff.c +++ b/drivers/hid/hid-gaff.c @@ -77,14 +77,20 @@ static int gaff_init(struct hid_device *hid) { struct gaff_device *gaff; struct hid_report *report; - struct hid_input *hidinput = list_entry(hid->inputs.next, - struct hid_input, list); + struct hid_input *hidinput; struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct list_head *report_ptr = report_list; - struct input_dev *dev = hidinput->input; + struct input_dev *dev; int error; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_entry(hid->inputs.next, struct hid_input, list); + dev = hidinput->input; + if (list_empty(report_list)) { hid_err(hid, "no output reports found\n"); return -ENODEV; diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c index 6bf4da7ad63a51f3b9aa6713552c96be6042bba2..8cb63ea9977d6807a10e86b568157d8f0f2f9762 100644 --- a/drivers/hid/hid-google-hammer.c +++ b/drivers/hid/hid-google-hammer.c @@ -120,6 +120,10 @@ static int hammer_input_configured(struct hid_device *hdev, static const struct hid_device_id hammer_devices[] = { { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) }, + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, + USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MAGNEMITE) }, + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, + USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MASTERBALL) }, { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_STAFF) }, { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c index 6e1a4a4fc0c109f7bf60083963d8c239a10939cf..2f8eb663974445f88d18e56e3d07a61b9a5385bb 100644 --- a/drivers/hid/hid-holtek-kbd.c +++ b/drivers/hid/hid-holtek-kbd.c @@ -126,9 +126,14 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type, /* Locate the boot interface, to receive the LED change events */ struct usb_interface *boot_interface = usb_ifnum_to_if(usb_dev, 0); + struct hid_device *boot_hid; + struct hid_input *boot_hid_input; - struct hid_device *boot_hid = usb_get_intfdata(boot_interface); - struct hid_input *boot_hid_input = list_first_entry(&boot_hid->inputs, + if (unlikely(boot_interface == NULL)) + return -ENODEV; + + boot_hid = usb_get_intfdata(boot_interface); + boot_hid_input = list_first_entry(&boot_hid->inputs, struct hid_input, list); return boot_hid_input->input->event(boot_hid_input->input, type, code, @@ -138,12 +143,17 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type, static int holtek_kbd_probe(struct hid_device *hdev, const struct hid_device_id *id) { - struct usb_interface *intf = to_usb_interface(hdev->dev.parent); - int ret = hid_parse(hdev); + struct usb_interface *intf; + int ret; + + if (!hid_is_usb(hdev)) + return -EINVAL; + ret = hid_parse(hdev); if (!ret) ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); + intf = to_usb_interface(hdev->dev.parent); if (!ret && intf->cur_altsetting->desc.bInterfaceNumber == 1) { struct hid_input *hidinput; list_for_each_entry(hidinput, &hdev->inputs, list) { diff --git a/drivers/hid/hid-holtek-mouse.c b/drivers/hid/hid-holtek-mouse.c index 78b3a0c767751534c16167846cfb3571641ce6af..96db7e96fcea9940c8a7848f9438d1cedb630908 100644 --- a/drivers/hid/hid-holtek-mouse.c +++ b/drivers/hid/hid-holtek-mouse.c @@ -65,6 +65,29 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, return rdesc; } +static int holtek_mouse_probe(struct hid_device *hdev, + const struct hid_device_id *id) +{ + int ret; + + if (!hid_is_usb(hdev)) + return -EINVAL; + + ret = hid_parse(hdev); + if (ret) { + hid_err(hdev, "hid parse failed: %d\n", ret); + return ret; + } + + ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); + if (ret) { + hid_err(hdev, "hw start failed: %d\n", ret); + return ret; + } + + return 0; +} + static const struct hid_device_id holtek_mouse_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) }, @@ -86,6 +109,7 @@ static struct hid_driver holtek_mouse_driver = { .name = "holtek_mouse", .id_table = holtek_mouse_devices, .report_fixup = holtek_mouse_report_fixup, + .probe = holtek_mouse_probe, }; module_hid_driver(holtek_mouse_driver); diff --git a/drivers/hid/hid-holtekff.c b/drivers/hid/hid-holtekff.c index edc0f64bb584806f080c1b6f682c53a20acc8466..c68486ee203c73bb5e3195afad10971257a9bf74 100644 --- a/drivers/hid/hid-holtekff.c +++ b/drivers/hid/hid-holtekff.c @@ -136,13 +136,19 @@ static int holtekff_init(struct hid_device *hid) { struct holtekff_device *holtekff; struct hid_report *report; - struct hid_input *hidinput = list_entry(hid->inputs.next, - struct hid_input, list); + struct hid_input *hidinput; struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; - struct input_dev *dev = hidinput->input; + struct input_dev *dev; int error; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_entry(hid->inputs.next, struct hid_input, list); + dev = hidinput->input; + if (list_empty(report_list)) { hid_err(hid, "no output report found\n"); return -ENODEV; diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c index b372854cf38d3221d8598affb395845fb42ce930..4d1496f60071fdeb195ebb03f60c50c0013df55a 100644 --- a/drivers/hid/hid-hyperv.c +++ b/drivers/hid/hid-hyperv.c @@ -309,7 +309,7 @@ static void mousevsc_on_receive(struct hv_device *device, hid_input_report(input_dev->hid_device, HID_INPUT_REPORT, input_dev->input_buf, len, 1); - pm_wakeup_event(&input_dev->device->device, 0); + pm_wakeup_hard_event(&input_dev->device->device); break; default: @@ -322,60 +322,24 @@ static void mousevsc_on_receive(struct hv_device *device, static void mousevsc_on_channel_callback(void *context) { - const int packet_size = 0x100; - int ret; struct hv_device *device = context; - u32 bytes_recvd; - u64 req_id; struct vmpacket_descriptor *desc; - unsigned char *buffer; - int bufferlen = packet_size; - - buffer = kmalloc(bufferlen, GFP_ATOMIC); - if (!buffer) - return; - - do { - ret = vmbus_recvpacket_raw(device->channel, buffer, - bufferlen, &bytes_recvd, &req_id); - - switch (ret) { - case 0: - if (bytes_recvd <= 0) { - kfree(buffer); - return; - } - desc = (struct vmpacket_descriptor *)buffer; - - switch (desc->type) { - case VM_PKT_COMP: - break; - - case VM_PKT_DATA_INBAND: - mousevsc_on_receive(device, desc); - break; - - default: - pr_err("unhandled packet type %d, tid %llx len %d\n", - desc->type, req_id, bytes_recvd); - break; - } + foreach_vmbus_pkt(desc, device->channel) { + switch (desc->type) { + case VM_PKT_COMP: break; - case -ENOBUFS: - kfree(buffer); - /* Handle large packet */ - bufferlen = bytes_recvd; - buffer = kmalloc(bytes_recvd, GFP_ATOMIC); - - if (!buffer) - return; + case VM_PKT_DATA_INBAND: + mousevsc_on_receive(device, desc); + break; + default: + pr_err("Unhandled packet type %d, tid %llx len %d\n", + desc->type, desc->trans_id, desc->len8 * 8); break; } - } while (1); - + } } static int mousevsc_connect_to_vsp(struct hv_device *device) diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index bc49909aba8e664b6675fcac13921128c661dce9..1949d6fca53e562d8fb86184c5dc9488752790be 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -17,6 +17,9 @@ #ifndef HID_IDS_H_FILE #define HID_IDS_H_FILE +#define USB_VENDOR_ID_258A 0x258a +#define USB_DEVICE_ID_258A_6A88 0x6a88 + #define USB_VENDOR_ID_3M 0x0596 #define USB_DEVICE_ID_3M1968 0x0500 #define USB_DEVICE_ID_3M2256 0x0502 @@ -79,6 +82,7 @@ #define HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP 0x1220 #define HID_DEVICE_ID_ALPS_U1 0x1215 #define HID_DEVICE_ID_ALPS_T4_BTNLESS 0x120C +#define HID_DEVICE_ID_ALPS_1222 0x1222 #define USB_VENDOR_ID_AMI 0x046b @@ -262,6 +266,7 @@ #define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d #define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618 #define USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE 0x1053 +#define USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE2 0x0939 #define USB_DEVICE_ID_CHICONY_WIRELESS2 0x1123 #define USB_DEVICE_ID_ASUS_AK1D 0x1125 #define USB_DEVICE_ID_CHICONY_ACER_SWITCH12 0x1421 @@ -271,6 +276,9 @@ #define USB_VENDOR_ID_CIDC 0x1677 +#define I2C_VENDOR_ID_CIRQUE 0x0488 +#define I2C_PRODUCT_ID_CIRQUE_121F 0x121F + #define USB_VENDOR_ID_CJTOUCH 0x24b8 #define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0020 0x0020 #define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0040 0x0040 @@ -342,6 +350,7 @@ #define USB_DEVICE_ID_DMI_ENC 0x5fab #define USB_VENDOR_ID_DRAGONRISE 0x0079 +#define USB_DEVICE_ID_REDRAGON_SEYMUR2 0x0006 #define USB_DEVICE_ID_DRAGONRISE_WIIU 0x1800 #define USB_DEVICE_ID_DRAGONRISE_PS3 0x1801 #define USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR 0x1803 @@ -457,6 +466,8 @@ #define USB_DEVICE_ID_GOOGLE_STAFF 0x502b #define USB_DEVICE_ID_GOOGLE_WAND 0x502d #define USB_DEVICE_ID_GOOGLE_WHISKERS 0x5030 +#define USB_DEVICE_ID_GOOGLE_MASTERBALL 0x503c +#define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d #define USB_VENDOR_ID_GOTOP 0x08f2 #define USB_DEVICE_ID_SUPER_Q2 0x007f @@ -550,6 +561,9 @@ #define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A 0x0b4a #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A 0x094a +#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0941 0x0941 +#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641 0x0641 +#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_1f4a 0x1f4a #define USB_VENDOR_ID_HUION 0x256c #define USB_DEVICE_ID_HUION_TABLET 0x006e @@ -702,6 +716,7 @@ #define USB_VENDOR_ID_LG 0x1fd2 #define USB_DEVICE_ID_LG_MULTITOUCH 0x0064 #define USB_DEVICE_ID_LG_MELFAS_MT 0x6007 +#define I2C_DEVICE_ID_LG_8001 0x8001 #define USB_VENDOR_ID_LOGITECH 0x046d #define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e @@ -799,6 +814,7 @@ #define USB_DEVICE_ID_MS_TOUCH_COVER_2 0x07a7 #define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9 #define USB_DEVICE_ID_MS_POWER_COVER 0x07da +#define USB_DEVICE_ID_MS_PIXART_MOUSE 0x00cb #define USB_VENDOR_ID_MOJO 0x8282 #define USB_DEVICE_ID_RETRO_ADAPTER 0x3201 @@ -921,12 +937,19 @@ #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003 0x3003 #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008 0x3008 +#define I2C_VENDOR_ID_RAYDIUM 0x2386 +#define I2C_PRODUCT_ID_RAYDIUM_4B33 0x4b33 + #define USB_VENDOR_ID_RAZER 0x1532 #define USB_DEVICE_ID_RAZER_BLADE_14 0x011D #define USB_VENDOR_ID_REALTEK 0x0bda #define USB_DEVICE_ID_REALTEK_READER 0x0152 +#define USB_VENDOR_ID_RETROUSB 0xf000 +#define USB_DEVICE_ID_RETROUSB_SNES_RETROPAD 0x0003 +#define USB_DEVICE_ID_RETROUSB_SNES_RETROPORT 0x00f1 + #define USB_VENDOR_ID_ROCCAT 0x1e7d #define USB_DEVICE_ID_ROCCAT_ARVO 0x30d4 #define USB_DEVICE_ID_ROCCAT_ISKU 0x319c @@ -953,6 +976,7 @@ #define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7 #define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa #define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0 +#define USB_DEVICE_ID_SAITEK_X52 0x075c #define USB_VENDOR_ID_SAMSUNG 0x0419 #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001 @@ -1034,6 +1058,7 @@ #define USB_VENDOR_ID_SYMBOL 0x05e0 #define USB_DEVICE_ID_SYMBOL_SCANNER_1 0x0800 #define USB_DEVICE_ID_SYMBOL_SCANNER_2 0x1300 +#define USB_DEVICE_ID_SYMBOL_SCANNER_3 0x1200 #define USB_VENDOR_ID_SYNAPTICS 0x06cb #define USB_DEVICE_ID_SYNAPTICS_TP 0x0001 @@ -1195,6 +1220,9 @@ #define USB_DEVICE_ID_PRIMAX_MOUSE_4D22 0x4d22 #define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05 #define USB_DEVICE_ID_PRIMAX_REZEL 0x4e72 +#define USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D0F 0x4d0f +#define USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D65 0x4d65 +#define USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4E22 0x4e22 #define USB_VENDOR_ID_RISO_KAGAKU 0x1294 /* Riso Kagaku Corp. */ diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index a481eaf39e887bad41d89bf251c0dc4a8f2a2096..0062b37ef98fd38df26a21280cab8a6f848fc1b2 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -325,6 +325,12 @@ static const struct hid_device_id hid_battery_quirks[] = { { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084), HID_BATTERY_QUIRK_IGNORE }, + { HID_USB_DEVICE(USB_VENDOR_ID_SYMBOL, + USB_DEVICE_ID_SYMBOL_SCANNER_3), + HID_BATTERY_QUIRK_IGNORE }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ASUSTEK, + USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD), + HID_BATTERY_QUIRK_IGNORE }, {} }; @@ -674,6 +680,14 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel break; } + if ((usage->hid & 0xf0) == 0xb0) { /* SC - Display */ + switch (usage->hid & 0xf) { + case 0x05: map_key_clear(KEY_SWITCHVIDEOMODE); break; + default: goto ignore; + } + break; + } + /* * Some lazy vendors declare 255 usages for System Control, * leading to the creation of ABS_X|Y axis and too many others. @@ -892,6 +906,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel case 0x074: map_key_clear(KEY_BRIGHTNESS_MAX); break; case 0x075: map_key_clear(KEY_BRIGHTNESS_AUTO); break; + case 0x079: map_key_clear(KEY_KBDILLUMUP); break; + case 0x07a: map_key_clear(KEY_KBDILLUMDOWN); break; + case 0x07c: map_key_clear(KEY_KBDILLUMTOGGLE); break; + case 0x082: map_key_clear(KEY_VIDEO_NEXT); break; case 0x083: map_key_clear(KEY_LAST); break; case 0x084: map_key_clear(KEY_ENTER); break; @@ -979,6 +997,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel case 0x1b8: map_key_clear(KEY_VIDEO); break; case 0x1bc: map_key_clear(KEY_MESSENGER); break; case 0x1bd: map_key_clear(KEY_INFO); break; + case 0x1cb: map_key_clear(KEY_ASSISTANT); break; case 0x201: map_key_clear(KEY_NEW); break; case 0x202: map_key_clear(KEY_OPEN); break; case 0x203: map_key_clear(KEY_CLOSE); break; @@ -1022,6 +1041,8 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel case 0x2cb: map_key_clear(KEY_KBDINPUTASSIST_ACCEPT); break; case 0x2cc: map_key_clear(KEY_KBDINPUTASSIST_CANCEL); break; + case 0x29f: map_key_clear(KEY_SCALE); break; + default: map_key_clear(KEY_UNKNOWN); } break; @@ -1104,9 +1125,19 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel } mapped: - if (device->driver->input_mapped && device->driver->input_mapped(device, - hidinput, field, usage, &bit, &max) < 0) - goto ignore; + /* Mapping failed, bail out */ + if (!bit) + return; + + if (device->driver->input_mapped && + device->driver->input_mapped(device, hidinput, field, usage, + &bit, &max) < 0) { + /* + * The driver indicated that no further generic handling + * of the usage is desired. + */ + return; + } set_bit(usage->type, input->evbit); @@ -1187,9 +1218,11 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel set_bit(MSC_SCAN, input->mscbit); } -ignore: return; +ignore: + usage->type = 0; + usage->code = 0; } void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct hid_usage *usage, __s32 value) diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c index 1882a4ab0f29f48c8bed3dffa3cecc61a22d1661..98b059d79bc891948695fe53a6a774bf171376c8 100644 --- a/drivers/hid/hid-ite.c +++ b/drivers/hid/hid-ite.c @@ -42,6 +42,7 @@ static int ite_event(struct hid_device *hdev, struct hid_field *field, static const struct hid_device_id ite_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) }, + { HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) }, { } }; MODULE_DEVICE_TABLE(hid, ite_devices); diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c index 643b6eb54442ed4bc297e182ad1b7c77a25e82c0..eacc76d2ab96019564ed6ca3b7442faf7d7b623d 100644 --- a/drivers/hid/hid-lenovo.c +++ b/drivers/hid/hid-lenovo.c @@ -743,7 +743,9 @@ static int lenovo_probe_tpkbd(struct hid_device *hdev) data_pointer->led_mute.brightness_get = lenovo_led_brightness_get_tpkbd; data_pointer->led_mute.brightness_set = lenovo_led_brightness_set_tpkbd; data_pointer->led_mute.dev = dev; - led_classdev_register(dev, &data_pointer->led_mute); + ret = led_classdev_register(dev, &data_pointer->led_mute); + if (ret < 0) + goto err; data_pointer->led_micmute.name = name_micmute; data_pointer->led_micmute.brightness_get = @@ -751,7 +753,11 @@ static int lenovo_probe_tpkbd(struct hid_device *hdev) data_pointer->led_micmute.brightness_set = lenovo_led_brightness_set_tpkbd; data_pointer->led_micmute.dev = dev; - led_classdev_register(dev, &data_pointer->led_micmute); + ret = led_classdev_register(dev, &data_pointer->led_micmute); + if (ret < 0) { + led_classdev_unregister(&data_pointer->led_mute); + goto err; + } lenovo_features_set_tpkbd(hdev); diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c index 596227ddb6e078af028b2d83102d40e5006b147a..ea4e10070851a86e4d83997b4ad7b7b6b790a8a6 100644 --- a/drivers/hid/hid-lg.c +++ b/drivers/hid/hid-lg.c @@ -714,12 +714,18 @@ static int lg_raw_event(struct hid_device *hdev, struct hid_report *report, static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id) { - struct usb_interface *iface = to_usb_interface(hdev->dev.parent); - __u8 iface_num = iface->cur_altsetting->desc.bInterfaceNumber; + struct usb_interface *iface; + __u8 iface_num; unsigned int connect_mask = HID_CONNECT_DEFAULT; struct lg_drv_data *drv_data; int ret; + if (!hid_is_usb(hdev)) + return -EINVAL; + + iface = to_usb_interface(hdev->dev.parent); + iface_num = iface->cur_altsetting->desc.bInterfaceNumber; + /* G29 only work with the 1st interface */ if ((hdev->product == USB_DEVICE_ID_LOGITECH_G29_WHEEL) && (iface_num != 0)) { @@ -763,7 +769,7 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id) if (!buf) { ret = -ENOMEM; - goto err_free; + goto err_stop; } ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(cbuf), @@ -795,9 +801,12 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id) ret = lg4ff_init(hdev); if (ret) - goto err_free; + goto err_stop; return 0; + +err_stop: + hid_hw_stop(hdev); err_free: kfree(drv_data); return ret; @@ -808,8 +817,7 @@ static void lg_remove(struct hid_device *hdev) struct lg_drv_data *drv_data = hid_get_drvdata(hdev); if (drv_data->quirks & LG_FF4) lg4ff_deinit(hdev); - else - hid_hw_stop(hdev); + hid_hw_stop(hdev); kfree(drv_data); } diff --git a/drivers/hid/hid-lg2ff.c b/drivers/hid/hid-lg2ff.c index 0e3fb1a7e42174dd1dfb953e7209fc14ab50ed7a..6909d9c2fc67a692a4b580b9749876658371c234 100644 --- a/drivers/hid/hid-lg2ff.c +++ b/drivers/hid/hid-lg2ff.c @@ -62,11 +62,17 @@ int lg2ff_init(struct hid_device *hid) { struct lg2ff_device *lg2ff; struct hid_report *report; - struct hid_input *hidinput = list_entry(hid->inputs.next, - struct hid_input, list); - struct input_dev *dev = hidinput->input; + struct hid_input *hidinput; + struct input_dev *dev; int error; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_entry(hid->inputs.next, struct hid_input, list); + dev = hidinput->input; + /* Check that the report looks ok */ report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7); if (!report) diff --git a/drivers/hid/hid-lg3ff.c b/drivers/hid/hid-lg3ff.c index 8c2da183d3bc71354d82b635b5234a70ca106850..acf739fc40603dcc9fdd4e44668231b760f931c5 100644 --- a/drivers/hid/hid-lg3ff.c +++ b/drivers/hid/hid-lg3ff.c @@ -129,12 +129,19 @@ static const signed short ff3_joystick_ac[] = { int lg3ff_init(struct hid_device *hid) { - struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); - struct input_dev *dev = hidinput->input; + struct hid_input *hidinput; + struct input_dev *dev; const signed short *ff_bits = ff3_joystick_ac; int error; int i; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_entry(hid->inputs.next, struct hid_input, list); + dev = hidinput->input; + /* Check that the report looks ok */ if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 35)) return -ENODEV; diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c index 512d67e1aae386e37b90288f800734b5a826aebe..ef80c592b88a3ef7b62f06d1737a3a8c71b16b27 100644 --- a/drivers/hid/hid-lg4ff.c +++ b/drivers/hid/hid-lg4ff.c @@ -1259,8 +1259,8 @@ static int lg4ff_handle_multimode_wheel(struct hid_device *hid, u16 *real_produc int lg4ff_init(struct hid_device *hid) { - struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); - struct input_dev *dev = hidinput->input; + struct hid_input *hidinput; + struct input_dev *dev; struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct hid_report *report = list_entry(report_list->next, struct hid_report, list); const struct usb_device_descriptor *udesc = &(hid_to_usb_dev(hid)->descriptor); @@ -1272,6 +1272,13 @@ int lg4ff_init(struct hid_device *hid) int mmode_ret, mmode_idx = -1; u16 real_product_id; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_entry(hid->inputs.next, struct hid_input, list); + dev = hidinput->input; + /* Check that the report looks ok */ if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7)) return -1; @@ -1483,7 +1490,6 @@ int lg4ff_deinit(struct hid_device *hid) } } #endif - hid_hw_stop(hid); drv_data->device_props = NULL; kfree(entry); diff --git a/drivers/hid/hid-lgff.c b/drivers/hid/hid-lgff.c index e1394af0ae7ba06701ad106896fb9391c417d6c0..1871cdcd1e0a8c5d93273b4dd58174efcaabdb14 100644 --- a/drivers/hid/hid-lgff.c +++ b/drivers/hid/hid-lgff.c @@ -127,12 +127,19 @@ static void hid_lgff_set_autocenter(struct input_dev *dev, u16 magnitude) int lgff_init(struct hid_device* hid) { - struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); - struct input_dev *dev = hidinput->input; + struct hid_input *hidinput; + struct input_dev *dev; const signed short *ff_bits = ff_joystick; int error; int i; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_entry(hid->inputs.next, struct hid_input, list); + dev = hidinput->input; + /* Check that the report looks ok */ if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7)) return -ENODEV; diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index 19cc980eebce6a3019c44d55dcbef0002e1cda10..eda8833b8c7f1832899ec8c4d33cd571774a2140 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -725,13 +725,16 @@ static int hidpp_root_get_feature(struct hidpp_device *hidpp, u16 feature, static int hidpp_root_get_protocol_version(struct hidpp_device *hidpp) { + const u8 ping_byte = 0x5a; + u8 ping_data[3] = { 0, 0, ping_byte }; struct hidpp_report response; int ret; - ret = hidpp_send_fap_command_sync(hidpp, + ret = hidpp_send_rap_command_sync(hidpp, + REPORT_ID_HIDPP_SHORT, HIDPP_PAGE_ROOT_IDX, CMD_ROOT_GET_PROTOCOL_VERSION, - NULL, 0, &response); + ping_data, sizeof(ping_data), &response); if (ret == HIDPP_ERROR_INVALID_SUBID) { hidpp->protocol_major = 1; @@ -751,8 +754,14 @@ static int hidpp_root_get_protocol_version(struct hidpp_device *hidpp) if (ret) return ret; - hidpp->protocol_major = response.fap.params[0]; - hidpp->protocol_minor = response.fap.params[1]; + if (response.rap.params[2] != ping_byte) { + hid_err(hidpp->hid_dev, "%s: ping mismatch 0x%02x != 0x%02x\n", + __func__, response.rap.params[2], ping_byte); + return -EPROTO; + } + + hidpp->protocol_major = response.rap.params[0]; + hidpp->protocol_minor = response.rap.params[1]; return ret; } @@ -901,7 +910,11 @@ static int hidpp_map_battery_level(int capacity) { if (capacity < 11) return POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL; - else if (capacity < 31) + /* + * The spec says this should be < 31 but some devices report 30 + * with brand new batteries and Windows reports 30 as "Good". + */ + else if (capacity < 30) return POWER_SUPPLY_CAPACITY_LEVEL_LOW; else if (capacity < 81) return POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; @@ -965,6 +978,9 @@ static int hidpp20_batterylevel_get_battery_capacity(struct hidpp_device *hidpp, ret = hidpp_send_fap_command_sync(hidpp, feature_index, CMD_BATTERY_LEVEL_STATUS_GET_BATTERY_LEVEL_STATUS, NULL, 0, &response); + /* Ignore these intermittent errors */ + if (ret == HIDPP_ERROR_RESOURCE_ERROR) + return -EIO; if (ret > 0) { hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", __func__, ret); @@ -1854,8 +1870,8 @@ static void hidpp_ff_destroy(struct ff_device *ff) static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index) { struct hid_device *hid = hidpp->hid_dev; - struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); - struct input_dev *dev = hidinput->input; + struct hid_input *hidinput; + struct input_dev *dev; const struct usb_device_descriptor *udesc = &(hid_to_usb_dev(hid)->descriptor); const u16 bcdDevice = le16_to_cpu(udesc->bcdDevice); struct ff_device *ff; @@ -1864,6 +1880,13 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index) int error, j, num_slots; u8 version; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_entry(hid->inputs.next, struct hid_input, list); + dev = hidinput->input; + if (!dev) { hid_err(hid, "Struct input_dev not set!\n"); return -EINVAL; @@ -1907,6 +1930,13 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index) kfree(data); return -ENOMEM; } + data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue"); + if (!data->wq) { + kfree(data->effect_ids); + kfree(data); + return -ENOMEM; + } + data->hidpp = hidpp; data->feature_index = feature_index; data->version = version; @@ -1951,7 +1981,6 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index) /* ignore boost value at response.fap.params[2] */ /* init the hardware command queue */ - data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue"); atomic_set(&data->workqueue_size, 0); /* initialize with zero autocenter to get wheel in usable state */ @@ -3051,7 +3080,8 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) /* Allow incoming packets */ hid_device_io_start(hdev); - hidpp_connect_event(hidpp); + schedule_work(&hidpp->work); + flush_work(&hidpp->work); return ret; diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index da954f3f4da7fcf5071522eec3a6b5b62dd46eed..dfb2548e00529cc14ca60e5188cb13b91b481ce7 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -641,6 +641,13 @@ static void mt_store_field(struct hid_device *hdev, if (*target != DEFAULT_TRUE && *target != DEFAULT_FALSE && *target != DEFAULT_ZERO) { + if (usage->contactid == DEFAULT_ZERO || + usage->x == DEFAULT_ZERO || + usage->y == DEFAULT_ZERO) { + hid_dbg(hdev, + "ignoring duplicate usage on incomplete"); + return; + } usage = mt_allocate_usage(hdev, application); if (!usage) return; @@ -834,6 +841,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi, code = BTN_0 + ((usage->hid - 1) & HID_USAGE); hid_map_usage(hi, usage, bit, max, EV_KEY, code); + if (!*bit) + return -1; input_set_capability(hi->input, EV_KEY, code); return 1; @@ -1781,6 +1790,10 @@ static const struct hid_device_id mt_devices[] = { HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP) }, + { .driver_data = MT_CLS_WIN_8_DUAL, + HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, + USB_VENDOR_ID_ALPS_JP, + HID_DEVICE_ID_ALPS_1222) }, /* Lenovo X1 TAB Gen 2 */ { .driver_data = MT_CLS_WIN_8_DUAL, @@ -1822,6 +1835,12 @@ static const struct hid_device_id mt_devices[] = { MT_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT, USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) }, + /* Cirque devices */ + { .driver_data = MT_CLS_WIN_8_DUAL, + HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, + I2C_VENDOR_ID_CIRQUE, + I2C_PRODUCT_ID_CIRQUE_121F) }, + /* CJTouch panels */ { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_CJTOUCH, diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c index 87eda34ea2f86aa2abb26ac40793ad2e9df40f56..efc995543aa112f63e347b80e7b9aed106f2e0c6 100644 --- a/drivers/hid/hid-prodikeys.c +++ b/drivers/hid/hid-prodikeys.c @@ -555,10 +555,14 @@ static void pcmidi_setup_extra_keys( static int pcmidi_set_operational(struct pcmidi_snd *pm) { + int rc; + if (pm->ifnum != 1) return 0; /* only set up ONCE for interace 1 */ - pcmidi_get_output_report(pm); + rc = pcmidi_get_output_report(pm); + if (rc < 0) + return rc; pcmidi_submit_output_report(pm, 0xc1); return 0; } @@ -687,7 +691,11 @@ static int pcmidi_snd_initialise(struct pcmidi_snd *pm) spin_lock_init(&pm->rawmidi_in_lock); init_sustain_timers(pm); - pcmidi_set_operational(pm); + err = pcmidi_set_operational(pm); + if (err < 0) { + pk_error("failed to find output report\n"); + goto fail_register; + } /* register it */ err = snd_card_register(card); @@ -794,12 +802,18 @@ static int pk_raw_event(struct hid_device *hdev, struct hid_report *report, static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; - struct usb_interface *intf = to_usb_interface(hdev->dev.parent); - unsigned short ifnum = intf->cur_altsetting->desc.bInterfaceNumber; + struct usb_interface *intf; + unsigned short ifnum; unsigned long quirks = id->driver_data; struct pk_device *pk; struct pcmidi_snd *pm = NULL; + if (!hid_is_usb(hdev)) + return -EINVAL; + + intf = to_usb_interface(hdev->dev.parent); + ifnum = intf->cur_altsetting->desc.bInterfaceNumber; + pk = kzalloc(sizeof(*pk), GFP_KERNEL); if (pk == NULL) { hid_err(hdev, "can't alloc descriptor\n"); diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 249d49b6b16c76c11ee1e8f9752cf755b6747b64..57d6fe9ed4163161d8d7b6fb278c0491bafb3d8d 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -45,6 +45,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE2), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD), HID_QUIRK_BADPAD }, { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK), HID_QUIRK_NOGET }, @@ -70,6 +71,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES), HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_REDRAGON_SEYMUR2), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3), HID_QUIRK_MULTI_INPUT }, @@ -92,6 +94,9 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0941), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_1f4a), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X), HID_QUIRK_MULTI_INPUT }, @@ -106,7 +111,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK), HID_QUIRK_MULTI_INPUT }, - { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS), HID_QUIRK_NOGET }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PIXART_MOUSE), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2), HID_QUIRK_NO_INIT_REPORTS }, @@ -129,12 +134,18 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_MOUSE_4D22), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D0F), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D65), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4E22), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER), HID_QUIRK_NO_INIT_REPORTS }, + { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPAD), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, + { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPORT), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD }, + { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET }, @@ -739,7 +750,6 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) }, { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) }, { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) }, - { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) }, { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) }, { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) }, { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) }, @@ -1020,6 +1030,10 @@ bool hid_ignore(struct hid_device *hdev) if (hdev->product == 0x0401 && strncmp(hdev->name, "ELAN0800", 8) != 0) return true; + /* Same with product id 0x0400 */ + if (hdev->product == 0x0400 && + strncmp(hdev->name, "QTEC0001", 8) != 0) + return true; break; } diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c index 9e33165250a34c7acf49e5e43c0a8ea482fdb1ec..a5b6b2be9cda8cea678e20b44dc050c2d1913ddb 100644 --- a/drivers/hid/hid-rmi.c +++ b/drivers/hid/hid-rmi.c @@ -737,7 +737,8 @@ static void rmi_remove(struct hid_device *hdev) { struct rmi_data *hdata = hid_get_drvdata(hdev); - if (hdata->device_flags & RMI_DEVICE) { + if ((hdata->device_flags & RMI_DEVICE) + && test_bit(RMI_STARTED, &hdata->flags)) { clear_bit(RMI_STARTED, &hdata->flags); cancel_work_sync(&hdata->reset_work); rmi_unregister_transport_device(&hdata->xport); diff --git a/drivers/hid/hid-roccat-arvo.c b/drivers/hid/hid-roccat-arvo.c index 329c5d1270f94331e609b01d48361e4575772bab..fb545a11214f05e6b6cce667dc1f5fa28b39a5e0 100644 --- a/drivers/hid/hid-roccat-arvo.c +++ b/drivers/hid/hid-roccat-arvo.c @@ -347,6 +347,9 @@ static int arvo_probe(struct hid_device *hdev, { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-isku.c b/drivers/hid/hid-roccat-isku.c index 02db537f8f3eaf79e50ed9126559735d7c6e388d..c07a7ea8a6873174fda73aade07c07694eca0f53 100644 --- a/drivers/hid/hid-roccat-isku.c +++ b/drivers/hid/hid-roccat-isku.c @@ -327,6 +327,9 @@ static int isku_probe(struct hid_device *hdev, { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c index bf4675a2739657cf735d74aeea2477ef181fe090..e102e06ad14c16ea0fab5cf19ebbb174835fa0ff 100644 --- a/drivers/hid/hid-roccat-kone.c +++ b/drivers/hid/hid-roccat-kone.c @@ -743,6 +743,9 @@ static int kone_probe(struct hid_device *hdev, const struct hid_device_id *id) { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-koneplus.c b/drivers/hid/hid-roccat-koneplus.c index 09e8fc72aa1d4a683f2e467baf3c9a5d5abe6518..b63de4c5b5dd38bac877141d04f45ba7fd3117ec 100644 --- a/drivers/hid/hid-roccat-koneplus.c +++ b/drivers/hid/hid-roccat-koneplus.c @@ -434,6 +434,9 @@ static int koneplus_probe(struct hid_device *hdev, { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-konepure.c b/drivers/hid/hid-roccat-konepure.c index 07de2f9014c67f914209a5dfc08185892955bd79..ef9508822e5f0b788efefa430c1d0c64e450e252 100644 --- a/drivers/hid/hid-roccat-konepure.c +++ b/drivers/hid/hid-roccat-konepure.c @@ -136,6 +136,9 @@ static int konepure_probe(struct hid_device *hdev, { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c index 317c9c2c0a7ce9d019ece767a140a451ad249379..6256c211398a199fd29413cf285392df109528ba 100644 --- a/drivers/hid/hid-roccat-kovaplus.c +++ b/drivers/hid/hid-roccat-kovaplus.c @@ -504,6 +504,9 @@ static int kovaplus_probe(struct hid_device *hdev, { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-lua.c b/drivers/hid/hid-roccat-lua.c index ac1a7313e25964c2e9922d2b9d43a71f41c1c40b..13ae2a7d176d34acb196a960d405d3837246ac54 100644 --- a/drivers/hid/hid-roccat-lua.c +++ b/drivers/hid/hid-roccat-lua.c @@ -163,6 +163,9 @@ static int lua_probe(struct hid_device *hdev, { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c index b30aa7b82bf8729def53a7a0632ad8100d57e67f..027aa9d0ec1f27b89ad289f9a0661a91e63e3600 100644 --- a/drivers/hid/hid-roccat-pyra.c +++ b/drivers/hid/hid-roccat-pyra.c @@ -452,6 +452,9 @@ static int pyra_probe(struct hid_device *hdev, const struct hid_device_id *id) { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-ryos.c b/drivers/hid/hid-roccat-ryos.c index 47cc8f30ff6d4fa4784dd96f7b31d2de8ecb9944..fda4a396a12e82a1e795f1c1c488f182723f51e9 100644 --- a/drivers/hid/hid-roccat-ryos.c +++ b/drivers/hid/hid-roccat-ryos.c @@ -144,6 +144,9 @@ static int ryos_probe(struct hid_device *hdev, { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-savu.c b/drivers/hid/hid-roccat-savu.c index 6dbf6e04dce75c82dd32109d46b330d5a60cd043..0230fb54f08a5c44757712b2dcfd497eddd0554d 100644 --- a/drivers/hid/hid-roccat-savu.c +++ b/drivers/hid/hid-roccat-savu.c @@ -116,6 +116,9 @@ static int savu_probe(struct hid_device *hdev, { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat.c b/drivers/hid/hid-roccat.c index 5be8de70c65172c23bf3c6b7e761219f534dc978..c9cec00b4e6e303031b6491f808cb47219364eec 100644 --- a/drivers/hid/hid-roccat.c +++ b/drivers/hid/hid-roccat.c @@ -260,6 +260,8 @@ int roccat_report_event(int minor, u8 const *data) if (!new_value) return -ENOMEM; + mutex_lock(&device->cbuf_lock); + report = &device->cbuf[device->cbuf_end]; /* passing NULL is safe */ @@ -279,6 +281,8 @@ int roccat_report_event(int minor, u8 const *data) reader->cbuf_start = (reader->cbuf_start + 1) % ROCCAT_CBUF_SIZE; } + mutex_unlock(&device->cbuf_lock); + wake_up_interruptible(&device->wait); return 0; } diff --git a/drivers/hid/hid-samsung.c b/drivers/hid/hid-samsung.c index 7cbb067d4a9e399f62b57b54830d384956037cbc..89bb2260367f3ce3c77bf73873b9b0bcc6ef111c 100644 --- a/drivers/hid/hid-samsung.c +++ b/drivers/hid/hid-samsung.c @@ -157,6 +157,9 @@ static int samsung_probe(struct hid_device *hdev, int ret; unsigned int cmask = HID_CONNECT_DEFAULT; + if (!hid_is_usb(hdev)) + return -EINVAL; + ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c index e8a114157f87b81593225469dbb7f838cb450480..bb012bc032e02635a5501dd6f9961b0da49fabc2 100644 --- a/drivers/hid/hid-sensor-custom.c +++ b/drivers/hid/hid-sensor-custom.c @@ -358,7 +358,7 @@ static ssize_t show_value(struct device *dev, struct device_attribute *attr, sensor_inst->hsdev, sensor_inst->hsdev->usage, usage, report_id, - SENSOR_HUB_SYNC); + SENSOR_HUB_SYNC, false); } else if (!strncmp(name, "units", strlen("units"))) value = sensor_inst->fields[field_index].attribute.units; else if (!strncmp(name, "unit-expo", strlen("unit-expo"))) diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c index 2b63487057c25b7fb931b8823db31d15f69667be..4256fdc5cd6d50db32f40447a37c2d310d8f1579 100644 --- a/drivers/hid/hid-sensor-hub.c +++ b/drivers/hid/hid-sensor-hub.c @@ -299,7 +299,8 @@ EXPORT_SYMBOL_GPL(sensor_hub_get_feature); int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev, u32 usage_id, u32 attr_usage_id, u32 report_id, - enum sensor_hub_read_flags flag) + enum sensor_hub_read_flags flag, + bool is_signed) { struct sensor_hub_data *data = hid_get_drvdata(hsdev->hdev); unsigned long flags; @@ -331,10 +332,16 @@ int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev, &hsdev->pending.ready, HZ*5); switch (hsdev->pending.raw_size) { case 1: - ret_val = *(u8 *)hsdev->pending.raw_data; + if (is_signed) + ret_val = *(s8 *)hsdev->pending.raw_data; + else + ret_val = *(u8 *)hsdev->pending.raw_data; break; case 2: - ret_val = *(u16 *)hsdev->pending.raw_data; + if (is_signed) + ret_val = *(s16 *)hsdev->pending.raw_data; + else + ret_val = *(u16 *)hsdev->pending.raw_data; break; case 4: ret_val = *(u32 *)hsdev->pending.raw_data; diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c index 9671a4bad64392b2348d977c424027b559994c6d..d05c387a588edf6627b232e8a88a408928155be7 100644 --- a/drivers/hid/hid-sony.c +++ b/drivers/hid/hid-sony.c @@ -587,10 +587,14 @@ static void sony_set_leds(struct sony_sc *sc); static inline void sony_schedule_work(struct sony_sc *sc, enum sony_worker which) { + unsigned long flags; + switch (which) { case SONY_WORKER_STATE: - if (!sc->defer_initialization) + spin_lock_irqsave(&sc->lock, flags); + if (!sc->defer_initialization && sc->state_worker_initialized) schedule_work(&sc->state_worker); + spin_unlock_irqrestore(&sc->lock, flags); break; case SONY_WORKER_HOTPLUG: if (sc->hotplug_worker_initialized) @@ -2245,9 +2249,15 @@ static int sony_play_effect(struct input_dev *dev, void *data, static int sony_init_ff(struct sony_sc *sc) { - struct hid_input *hidinput = list_entry(sc->hdev->inputs.next, - struct hid_input, list); - struct input_dev *input_dev = hidinput->input; + struct hid_input *hidinput; + struct input_dev *input_dev; + + if (list_empty(&sc->hdev->inputs)) { + hid_err(sc->hdev, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_entry(sc->hdev->inputs.next, struct hid_input, list); + input_dev = hidinput->input; input_set_capability(input_dev, EV_FF, FF_RUMBLE); return input_ff_create_memless(input_dev, NULL, sony_play_effect); @@ -2553,13 +2563,18 @@ static inline void sony_init_output_report(struct sony_sc *sc, static inline void sony_cancel_work_sync(struct sony_sc *sc) { + unsigned long flags; + if (sc->hotplug_worker_initialized) cancel_work_sync(&sc->hotplug_worker); - if (sc->state_worker_initialized) + if (sc->state_worker_initialized) { + spin_lock_irqsave(&sc->lock, flags); + sc->state_worker_initialized = 0; + spin_unlock_irqrestore(&sc->lock, flags); cancel_work_sync(&sc->state_worker); + } } - static int sony_input_configured(struct hid_device *hdev, struct hid_input *hidinput) { @@ -2797,7 +2812,6 @@ static int sony_input_configured(struct hid_device *hdev, sony_cancel_work_sync(sc); sony_remove_dev_list(sc); sony_release_device_id(sc); - hid_hw_stop(hdev); return ret; } @@ -2859,6 +2873,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) */ if (!(hdev->claimed & HID_CLAIMED_INPUT)) { hid_err(hdev, "failed to claim input\n"); + hid_hw_stop(hdev); return -ENODEV; } diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c index 0422ec2b13d208d98acdf22c5eb97b6393c5f530..8dae0f9b819e011d6695462fea7e88e85cd16669 100644 --- a/drivers/hid/hid-steam.c +++ b/drivers/hid/hid-steam.c @@ -23,8 +23,9 @@ * In order to avoid breaking them this driver creates a layered hidraw device, * so it can detect when the client is running and then: * - it will not send any command to the controller. - * - this input device will be disabled, to avoid double input of the same + * - this input device will be removed, to avoid double input of the same * user action. + * When the client is closed, this input device will be created again. * * For additional functions, such as changing the right-pad margin or switching * the led, you can use the user-space tool at: @@ -113,7 +114,7 @@ struct steam_device { spinlock_t lock; struct hid_device *hdev, *client_hdev; struct mutex mutex; - bool client_opened, input_opened; + bool client_opened; struct input_dev __rcu *input; unsigned long quirks; struct work_struct work_connect; @@ -279,29 +280,11 @@ static void steam_set_lizard_mode(struct steam_device *steam, bool enable) } } -static void steam_update_lizard_mode(struct steam_device *steam) -{ - mutex_lock(&steam->mutex); - if (!steam->client_opened) { - if (steam->input_opened) - steam_set_lizard_mode(steam, false); - else - steam_set_lizard_mode(steam, lizard_mode); - } - mutex_unlock(&steam->mutex); -} - static int steam_input_open(struct input_dev *dev) { struct steam_device *steam = input_get_drvdata(dev); - int ret; - - ret = hid_hw_open(steam->hdev); - if (ret) - return ret; mutex_lock(&steam->mutex); - steam->input_opened = true; if (!steam->client_opened && lizard_mode) steam_set_lizard_mode(steam, false); mutex_unlock(&steam->mutex); @@ -313,12 +296,9 @@ static void steam_input_close(struct input_dev *dev) struct steam_device *steam = input_get_drvdata(dev); mutex_lock(&steam->mutex); - steam->input_opened = false; if (!steam->client_opened && lizard_mode) steam_set_lizard_mode(steam, true); mutex_unlock(&steam->mutex); - - hid_hw_close(steam->hdev); } static enum power_supply_property steam_battery_props[] = { @@ -400,7 +380,7 @@ static int steam_battery_register(struct steam_device *steam) return 0; } -static int steam_register(struct steam_device *steam) +static int steam_input_register(struct steam_device *steam) { struct hid_device *hdev = steam->hdev; struct input_dev *input; @@ -414,17 +394,6 @@ static int steam_register(struct steam_device *steam) return 0; } - /* - * Unlikely, but getting the serial could fail, and it is not so - * important, so make up a serial number and go on. - */ - if (steam_get_serial(steam) < 0) - strlcpy(steam->serial_no, "XXXXXXXXXX", - sizeof(steam->serial_no)); - - hid_info(hdev, "Steam Controller '%s' connected", - steam->serial_no); - input = input_allocate_device(); if (!input) return -ENOMEM; @@ -492,11 +461,6 @@ static int steam_register(struct steam_device *steam) goto input_register_fail; rcu_assign_pointer(steam->input, input); - - /* ignore battery errors, we can live without it */ - if (steam->quirks & STEAM_QUIRK_WIRELESS) - steam_battery_register(steam); - return 0; input_register_fail: @@ -504,27 +468,93 @@ static int steam_register(struct steam_device *steam) return ret; } -static void steam_unregister(struct steam_device *steam) +static void steam_input_unregister(struct steam_device *steam) { struct input_dev *input; + rcu_read_lock(); + input = rcu_dereference(steam->input); + rcu_read_unlock(); + if (!input) + return; + RCU_INIT_POINTER(steam->input, NULL); + synchronize_rcu(); + input_unregister_device(input); +} + +static void steam_battery_unregister(struct steam_device *steam) +{ struct power_supply *battery; rcu_read_lock(); - input = rcu_dereference(steam->input); battery = rcu_dereference(steam->battery); rcu_read_unlock(); - if (battery) { - RCU_INIT_POINTER(steam->battery, NULL); - synchronize_rcu(); - power_supply_unregister(battery); + if (!battery) + return; + RCU_INIT_POINTER(steam->battery, NULL); + synchronize_rcu(); + power_supply_unregister(battery); +} + +static int steam_register(struct steam_device *steam) +{ + int ret; + bool client_opened; + + /* + * This function can be called several times in a row with the + * wireless adaptor, without steam_unregister() between them, because + * another client send a get_connection_status command, for example. + * The battery and serial number are set just once per device. + */ + if (!steam->serial_no[0]) { + /* + * Unlikely, but getting the serial could fail, and it is not so + * important, so make up a serial number and go on. + */ + mutex_lock(&steam->mutex); + if (steam_get_serial(steam) < 0) + strlcpy(steam->serial_no, "XXXXXXXXXX", + sizeof(steam->serial_no)); + mutex_unlock(&steam->mutex); + + hid_info(steam->hdev, "Steam Controller '%s' connected", + steam->serial_no); + + /* ignore battery errors, we can live without it */ + if (steam->quirks & STEAM_QUIRK_WIRELESS) + steam_battery_register(steam); + + mutex_lock(&steam_devices_lock); + list_add(&steam->list, &steam_devices); + mutex_unlock(&steam_devices_lock); } - if (input) { - RCU_INIT_POINTER(steam->input, NULL); - synchronize_rcu(); + + mutex_lock(&steam->mutex); + client_opened = steam->client_opened; + if (!client_opened) + steam_set_lizard_mode(steam, lizard_mode); + mutex_unlock(&steam->mutex); + + if (!client_opened) + ret = steam_input_register(steam); + else + ret = 0; + + return ret; +} + +static void steam_unregister(struct steam_device *steam) +{ + steam_battery_unregister(steam); + steam_input_unregister(steam); + if (steam->serial_no[0]) { hid_info(steam->hdev, "Steam Controller '%s' disconnected", steam->serial_no); - input_unregister_device(input); + mutex_lock(&steam_devices_lock); + list_del(&steam->list); + mutex_unlock(&steam_devices_lock); + steam->serial_no[0] = 0; } } @@ -591,31 +621,35 @@ static void steam_client_ll_stop(struct hid_device *hdev) static int steam_client_ll_open(struct hid_device *hdev) { struct steam_device *steam = hdev->driver_data; - int ret; - - ret = hid_hw_open(steam->hdev); - if (ret) - return ret; mutex_lock(&steam->mutex); steam->client_opened = true; mutex_unlock(&steam->mutex); - return ret; + + steam_input_unregister(steam); + + return 0; } static void steam_client_ll_close(struct hid_device *hdev) { struct steam_device *steam = hdev->driver_data; + unsigned long flags; + bool connected; + + spin_lock_irqsave(&steam->lock, flags); + connected = steam->connected; + spin_unlock_irqrestore(&steam->lock, flags); + mutex_lock(&steam->mutex); steam->client_opened = false; - if (steam->input_opened) - steam_set_lizard_mode(steam, false); - else + if (connected) steam_set_lizard_mode(steam, lizard_mode); mutex_unlock(&steam->mutex); - hid_hw_close(steam->hdev); + if (connected) + steam_input_register(steam); } static int steam_client_ll_raw_request(struct hid_device *hdev, @@ -724,14 +758,15 @@ static int steam_probe(struct hid_device *hdev, if (ret) goto client_hdev_add_fail; + ret = hid_hw_open(hdev); + if (ret) { + hid_err(hdev, + "%s:hid_hw_open\n", + __func__); + goto hid_hw_open_fail; + } + if (steam->quirks & STEAM_QUIRK_WIRELESS) { - ret = hid_hw_open(hdev); - if (ret) { - hid_err(hdev, - "%s:hid_hw_open for wireless\n", - __func__); - goto hid_hw_open_fail; - } hid_info(hdev, "Steam wireless receiver connected"); steam_request_conn_status(steam); } else { @@ -744,15 +779,10 @@ static int steam_probe(struct hid_device *hdev, } } - mutex_lock(&steam_devices_lock); - steam_update_lizard_mode(steam); - list_add(&steam->list, &steam_devices); - mutex_unlock(&steam_devices_lock); - return 0; -hid_hw_open_fail: input_register_fail: +hid_hw_open_fail: client_hdev_add_fail: hid_hw_stop(hdev); hid_hw_start_fail: @@ -774,17 +804,13 @@ static void steam_remove(struct hid_device *hdev) return; } - mutex_lock(&steam_devices_lock); - list_del(&steam->list); - mutex_unlock(&steam_devices_lock); - hid_destroy_device(steam->client_hdev); steam->client_opened = false; cancel_work_sync(&steam->work_connect); if (steam->quirks & STEAM_QUIRK_WIRELESS) { hid_info(hdev, "Steam wireless receiver disconnected"); - hid_hw_close(hdev); } + hid_hw_close(hdev); hid_hw_stop(hdev); steam_unregister(steam); } @@ -792,12 +818,14 @@ static void steam_remove(struct hid_device *hdev) static void steam_do_connect_event(struct steam_device *steam, bool connected) { unsigned long flags; + bool changed; spin_lock_irqsave(&steam->lock, flags); + changed = steam->connected != connected; steam->connected = connected; spin_unlock_irqrestore(&steam->lock, flags); - if (schedule_work(&steam->work_connect) == 0) + if (changed && schedule_work(&steam->work_connect) == 0) dbg_hid("%s: connected=%d event already queued\n", __func__, connected); } @@ -1019,13 +1047,8 @@ static int steam_raw_event(struct hid_device *hdev, return 0; rcu_read_lock(); input = rcu_dereference(steam->input); - if (likely(input)) { + if (likely(input)) steam_do_input_event(steam, input, data); - } else { - dbg_hid("%s: input data without connect event\n", - __func__); - steam_do_connect_event(steam, true); - } rcu_read_unlock(); break; case STEAM_EV_CONNECT: @@ -1074,7 +1097,10 @@ static int steam_param_set_lizard_mode(const char *val, mutex_lock(&steam_devices_lock); list_for_each_entry(steam, &steam_devices, list) { - steam_update_lizard_mode(steam); + mutex_lock(&steam->mutex); + if (!steam->client_opened) + steam_set_lizard_mode(steam, lizard_mode); + mutex_unlock(&steam->mutex); } mutex_unlock(&steam_devices_lock); return 0; diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c index bea8def64f437ed13ed5576a479634cee0a27ca4..efe8c2a0261ef0c5b275a26df21fb62c88b4c0f3 100644 --- a/drivers/hid/hid-tmff.c +++ b/drivers/hid/hid-tmff.c @@ -34,6 +34,8 @@ #include "hid-ids.h" +#define THRUSTMASTER_DEVICE_ID_2_IN_1_DT 0xb320 + static const signed short ff_rumble[] = { FF_RUMBLE, -1 @@ -88,6 +90,7 @@ static int tmff_play(struct input_dev *dev, void *data, struct hid_field *ff_field = tmff->ff_field; int x, y; int left, right; /* Rumbling */ + int motor_swap; switch (effect->type) { case FF_CONSTANT: @@ -112,6 +115,13 @@ static int tmff_play(struct input_dev *dev, void *data, ff_field->logical_minimum, ff_field->logical_maximum); + /* 2-in-1 strong motor is left */ + if (hid->product == THRUSTMASTER_DEVICE_ID_2_IN_1_DT) { + motor_swap = left; + left = right; + right = motor_swap; + } + dbg_hid("(left,right)=(%08x, %08x)\n", left, right); ff_field->value[0] = left; ff_field->value[1] = right; @@ -126,12 +136,18 @@ static int tmff_init(struct hid_device *hid, const signed short *ff_bits) struct tmff_device *tmff; struct hid_report *report; struct list_head *report_list; - struct hid_input *hidinput = list_entry(hid->inputs.next, - struct hid_input, list); - struct input_dev *input_dev = hidinput->input; + struct hid_input *hidinput; + struct input_dev *input_dev; int error; int i; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_entry(hid->inputs.next, struct hid_input, list); + input_dev = hidinput->input; + tmff = kzalloc(sizeof(struct tmff_device), GFP_KERNEL); if (!tmff) return -ENOMEM; @@ -238,6 +254,8 @@ static const struct hid_device_id tm_devices[] = { .driver_data = (unsigned long)ff_rumble }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304), /* FireStorm Dual Power 2 (and 3) */ .driver_data = (unsigned long)ff_rumble }, + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, THRUSTMASTER_DEVICE_ID_2_IN_1_DT), /* Dual Trigger 2-in-1 */ + .driver_data = (unsigned long)ff_rumble }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323), /* Dual Trigger 3-in-1 (PC Mode) */ .driver_data = (unsigned long)ff_rumble }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324), /* Dual Trigger 3-in-1 (PS3 Mode) */ diff --git a/drivers/hid/hid-uclogic.c b/drivers/hid/hid-uclogic.c index 56b196d600411ec7e5b18170b058b6445a97e5ae..e0bc31ee1576946c65ca6c7375bdde6a37492709 100644 --- a/drivers/hid/hid-uclogic.c +++ b/drivers/hid/hid-uclogic.c @@ -791,6 +791,9 @@ static int uclogic_tablet_enable(struct hid_device *hdev) __u8 *p; s32 v; + if (!hid_is_usb(hdev)) + return -EINVAL; + /* * Read string descriptor containing tablet parameters. The specific * string descriptor and data were discovered by sniffing the Windows diff --git a/drivers/hid/hid-zpff.c b/drivers/hid/hid-zpff.c index a29756c6ca02d064faee371143e750b19094f26c..4e7e01be99b13fcd28edc814f9d0a7425a74c1c0 100644 --- a/drivers/hid/hid-zpff.c +++ b/drivers/hid/hid-zpff.c @@ -66,11 +66,17 @@ static int zpff_init(struct hid_device *hid) { struct zpff_device *zpff; struct hid_report *report; - struct hid_input *hidinput = list_entry(hid->inputs.next, - struct hid_input, list); - struct input_dev *dev = hidinput->input; + struct hid_input *hidinput; + struct input_dev *dev; int i, error; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_entry(hid->inputs.next, struct hid_input, list); + dev = hidinput->input; + for (i = 0; i < 4; i++) { report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, i, 1); if (!report) diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index 4a44e48e08b225a6180ad014604dabc83ce65c2d..c7cff929b4190874790602119498e9331537956d 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c @@ -378,7 +378,7 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd, mutex_lock(&minors_lock); dev = hidraw_table[minor]; - if (!dev) { + if (!dev || !dev->exist) { ret = -ENODEV; goto out; } diff --git a/drivers/hid/i2c-hid/Makefile b/drivers/hid/i2c-hid/Makefile index 832d8f9aaba27fed7926177383f96ec1edaf9666..099e1ce2f2347592f8cf00cff8f620cc4d170301 100644 --- a/drivers/hid/i2c-hid/Makefile +++ b/drivers/hid/i2c-hid/Makefile @@ -3,3 +3,6 @@ # obj-$(CONFIG_I2C_HID) += i2c-hid.o + +i2c-hid-objs = i2c-hid-core.o +i2c-hid-$(CONFIG_DMI) += i2c-hid-dmi-quirks.o diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid-core.c similarity index 93% rename from drivers/hid/i2c-hid/i2c-hid.c rename to drivers/hid/i2c-hid/i2c-hid-core.c index 4e3592e7a3f7217f86fe0fba59d3ea73551dcab2..41771da10682b0fb2b2b18301bfafbd5373153a3 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid-core.c @@ -43,16 +43,19 @@ #include #include "../hid-ids.h" +#include "i2c-hid.h" /* quirks to control the device */ #define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0) #define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1) #define I2C_HID_QUIRK_NO_RUNTIME_PM BIT(2) +#define I2C_HID_QUIRK_DELAY_AFTER_SLEEP BIT(3) +#define I2C_HID_QUIRK_BOGUS_IRQ BIT(4) +#define I2C_HID_QUIRK_RESET_ON_RESUME BIT(5) /* flags */ #define I2C_HID_STARTED 0 #define I2C_HID_RESET_PENDING 1 -#define I2C_HID_READ_PENDING 2 #define I2C_HID_PWR_ON 0x00 #define I2C_HID_PWR_SLEEP 0x01 @@ -157,6 +160,8 @@ struct i2c_hid { bool irq_wake_enabled; struct mutex reset_lock; + + unsigned long sleep_delay; }; static const struct i2c_hid_quirks { @@ -171,6 +176,14 @@ static const struct i2c_hid_quirks { { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288, I2C_HID_QUIRK_NO_IRQ_AFTER_RESET | I2C_HID_QUIRK_NO_RUNTIME_PM }, + { I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_4B33, + I2C_HID_QUIRK_DELAY_AFTER_SLEEP }, + { USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_8001, + I2C_HID_QUIRK_NO_RUNTIME_PM }, + { USB_VENDOR_ID_ELAN, HID_ANY_ID, + I2C_HID_QUIRK_BOGUS_IRQ }, + { USB_VENDOR_ID_ALPS_JP, HID_ANY_ID, + I2C_HID_QUIRK_RESET_ON_RESUME }, { 0, 0 } }; @@ -239,7 +252,6 @@ static int __i2c_hid_command(struct i2c_client *client, msg[1].len = data_len; msg[1].buf = buf_recv; msg_num = 2; - set_bit(I2C_HID_READ_PENDING, &ihid->flags); } if (wait) @@ -247,9 +259,6 @@ static int __i2c_hid_command(struct i2c_client *client, ret = i2c_transfer(client->adapter, msg, msg_num); - if (data_len > 0) - clear_bit(I2C_HID_READ_PENDING, &ihid->flags); - if (ret != msg_num) return ret < 0 ? ret : -EIO; @@ -386,6 +395,7 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state) { struct i2c_hid *ihid = i2c_get_clientdata(client); int ret; + unsigned long now, delay; i2c_hid_dbg(ihid, "%s\n", __func__); @@ -403,9 +413,22 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state) goto set_pwr_exit; } + if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP && + power_state == I2C_HID_PWR_ON) { + now = jiffies; + if (time_after(ihid->sleep_delay, now)) { + delay = jiffies_to_usecs(ihid->sleep_delay - now); + usleep_range(delay, delay + 1); + } + } + ret = __i2c_hid_command(client, &hid_set_power_cmd, power_state, 0, NULL, 0, NULL, 0); + if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP && + power_state == I2C_HID_PWR_SLEEP) + ihid->sleep_delay = jiffies + msecs_to_jiffies(20); + if (ret) dev_err(&client->dev, "failed to change power setting.\n"); @@ -481,6 +504,12 @@ static void i2c_hid_get_input(struct i2c_hid *ihid) return; } + if (ihid->quirks & I2C_HID_QUIRK_BOGUS_IRQ && ret_size == 0xffff) { + dev_warn_once(&ihid->client->dev, "%s: IRQ triggered but " + "there's no data\n", __func__); + return; + } + if ((ret_size > size) || (ret_size < 2)) { dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n", __func__, size, ret_size); @@ -500,9 +529,6 @@ static irqreturn_t i2c_hid_irq(int irq, void *dev_id) { struct i2c_hid *ihid = dev_id; - if (test_bit(I2C_HID_READ_PENDING, &ihid->flags)) - return IRQ_HANDLED; - i2c_hid_get_input(ihid); return IRQ_HANDLED; @@ -668,6 +694,7 @@ static int i2c_hid_parse(struct hid_device *hid) char *rdesc; int ret; int tries = 3; + char *use_override; i2c_hid_dbg(ihid, "entering %s\n", __func__); @@ -686,26 +713,37 @@ static int i2c_hid_parse(struct hid_device *hid) if (ret) return ret; - rdesc = kzalloc(rsize, GFP_KERNEL); - - if (!rdesc) { - dbg_hid("couldn't allocate rdesc memory\n"); - return -ENOMEM; - } - - i2c_hid_dbg(ihid, "asking HID report descriptor\n"); + use_override = i2c_hid_get_dmi_hid_report_desc_override(client->name, + &rsize); - ret = i2c_hid_command(client, &hid_report_descr_cmd, rdesc, rsize); - if (ret) { - hid_err(hid, "reading report descriptor failed\n"); - kfree(rdesc); - return -EIO; + if (use_override) { + rdesc = use_override; + i2c_hid_dbg(ihid, "Using a HID report descriptor override\n"); + } else { + rdesc = kzalloc(rsize, GFP_KERNEL); + + if (!rdesc) { + dbg_hid("couldn't allocate rdesc memory\n"); + return -ENOMEM; + } + + i2c_hid_dbg(ihid, "asking HID report descriptor\n"); + + ret = i2c_hid_command(client, &hid_report_descr_cmd, + rdesc, rsize); + if (ret) { + hid_err(hid, "reading report descriptor failed\n"); + kfree(rdesc); + return -EIO; + } } i2c_hid_dbg(ihid, "Report Descriptor: %*ph\n", rsize, rdesc); ret = hid_parse_report(hid, rdesc, rsize); - kfree(rdesc); + if (!use_override) + kfree(rdesc); + if (ret) { dbg_hid("parsing report descriptor failed\n"); return ret; @@ -832,12 +870,19 @@ static int i2c_hid_fetch_hid_descriptor(struct i2c_hid *ihid) int ret; /* i2c hid fetch using a fixed descriptor size (30 bytes) */ - i2c_hid_dbg(ihid, "Fetching the HID descriptor\n"); - ret = i2c_hid_command(client, &hid_descr_cmd, ihid->hdesc_buffer, - sizeof(struct i2c_hid_desc)); - if (ret) { - dev_err(&client->dev, "hid_descr_cmd failed\n"); - return -ENODEV; + if (i2c_hid_get_dmi_i2c_hid_desc_override(client->name)) { + i2c_hid_dbg(ihid, "Using a HID descriptor override\n"); + ihid->hdesc = + *i2c_hid_get_dmi_i2c_hid_desc_override(client->name); + } else { + i2c_hid_dbg(ihid, "Fetching the HID descriptor\n"); + ret = i2c_hid_command(client, &hid_descr_cmd, + ihid->hdesc_buffer, + sizeof(struct i2c_hid_desc)); + if (ret) { + dev_err(&client->dev, "hid_descr_cmd failed\n"); + return -ENODEV; + } } /* Validate the length of HID descriptor, the 4 first bytes: @@ -1240,8 +1285,15 @@ static int i2c_hid_resume(struct device *dev) * solves "incomplete reports" on Raydium devices 2386:3118 and * 2386:4B33 and fixes various SIS touchscreens no longer sending * data after a suspend/resume. + * + * However some ALPS touchpads generate IRQ storm without reset, so + * let's still reset them here. */ - ret = i2c_hid_set_power(client, I2C_HID_PWR_ON); + if (ihid->quirks & I2C_HID_QUIRK_RESET_ON_RESUME) + ret = i2c_hid_hwreset(client); + else + ret = i2c_hid_set_power(client, I2C_HID_PWR_ON); + if (ret) return ret; diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c new file mode 100644 index 0000000000000000000000000000000000000000..10af8585c820d253b71301f23efae5db3b40fc14 --- /dev/null +++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c @@ -0,0 +1,412 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/* + * Quirks for I2C-HID devices that do not supply proper descriptors + * + * Copyright (c) 2018 Julian Sax + * + */ + +#include +#include +#include + +#include "i2c-hid.h" + + +struct i2c_hid_desc_override { + union { + struct i2c_hid_desc *i2c_hid_desc; + uint8_t *i2c_hid_desc_buffer; + }; + uint8_t *hid_report_desc; + unsigned int hid_report_desc_size; + uint8_t *i2c_name; +}; + + +/* + * descriptors for the SIPODEV SP1064 touchpad + * + * This device does not supply any descriptors and on windows a filter + * driver operates between the i2c-hid layer and the device and injects + * these descriptors when the device is prompted. The descriptors were + * extracted by listening to the i2c-hid traffic that occurs between the + * windows filter driver and the windows i2c-hid driver. + */ + +static const struct i2c_hid_desc_override sipodev_desc = { + .i2c_hid_desc_buffer = (uint8_t []) + {0x1e, 0x00, /* Length of descriptor */ + 0x00, 0x01, /* Version of descriptor */ + 0xdb, 0x01, /* Length of report descriptor */ + 0x21, 0x00, /* Location of report descriptor */ + 0x24, 0x00, /* Location of input report */ + 0x1b, 0x00, /* Max input report length */ + 0x25, 0x00, /* Location of output report */ + 0x11, 0x00, /* Max output report length */ + 0x22, 0x00, /* Location of command register */ + 0x23, 0x00, /* Location of data register */ + 0x11, 0x09, /* Vendor ID */ + 0x88, 0x52, /* Product ID */ + 0x06, 0x00, /* Version ID */ + 0x00, 0x00, 0x00, 0x00 /* Reserved */ + }, + + .hid_report_desc = (uint8_t []) + {0x05, 0x01, /* Usage Page (Desktop), */ + 0x09, 0x02, /* Usage (Mouse), */ + 0xA1, 0x01, /* Collection (Application), */ + 0x85, 0x01, /* Report ID (1), */ + 0x09, 0x01, /* Usage (Pointer), */ + 0xA1, 0x00, /* Collection (Physical), */ + 0x05, 0x09, /* Usage Page (Button), */ + 0x19, 0x01, /* Usage Minimum (01h), */ + 0x29, 0x02, /* Usage Maximum (02h), */ + 0x25, 0x01, /* Logical Maximum (1), */ + 0x75, 0x01, /* Report Size (1), */ + 0x95, 0x02, /* Report Count (2), */ + 0x81, 0x02, /* Input (Variable), */ + 0x95, 0x06, /* Report Count (6), */ + 0x81, 0x01, /* Input (Constant), */ + 0x05, 0x01, /* Usage Page (Desktop), */ + 0x09, 0x30, /* Usage (X), */ + 0x09, 0x31, /* Usage (Y), */ + 0x15, 0x81, /* Logical Minimum (-127), */ + 0x25, 0x7F, /* Logical Maximum (127), */ + 0x75, 0x08, /* Report Size (8), */ + 0x95, 0x02, /* Report Count (2), */ + 0x81, 0x06, /* Input (Variable, Relative), */ + 0xC0, /* End Collection, */ + 0xC0, /* End Collection, */ + 0x05, 0x0D, /* Usage Page (Digitizer), */ + 0x09, 0x05, /* Usage (Touchpad), */ + 0xA1, 0x01, /* Collection (Application), */ + 0x85, 0x04, /* Report ID (4), */ + 0x05, 0x0D, /* Usage Page (Digitizer), */ + 0x09, 0x22, /* Usage (Finger), */ + 0xA1, 0x02, /* Collection (Logical), */ + 0x15, 0x00, /* Logical Minimum (0), */ + 0x25, 0x01, /* Logical Maximum (1), */ + 0x09, 0x47, /* Usage (Touch Valid), */ + 0x09, 0x42, /* Usage (Tip Switch), */ + 0x95, 0x02, /* Report Count (2), */ + 0x75, 0x01, /* Report Size (1), */ + 0x81, 0x02, /* Input (Variable), */ + 0x95, 0x01, /* Report Count (1), */ + 0x75, 0x03, /* Report Size (3), */ + 0x25, 0x05, /* Logical Maximum (5), */ + 0x09, 0x51, /* Usage (Contact Identifier), */ + 0x81, 0x02, /* Input (Variable), */ + 0x75, 0x01, /* Report Size (1), */ + 0x95, 0x03, /* Report Count (3), */ + 0x81, 0x03, /* Input (Constant, Variable), */ + 0x05, 0x01, /* Usage Page (Desktop), */ + 0x26, 0x44, 0x0A, /* Logical Maximum (2628), */ + 0x75, 0x10, /* Report Size (16), */ + 0x55, 0x0E, /* Unit Exponent (14), */ + 0x65, 0x11, /* Unit (Centimeter), */ + 0x09, 0x30, /* Usage (X), */ + 0x46, 0x1A, 0x04, /* Physical Maximum (1050), */ + 0x95, 0x01, /* Report Count (1), */ + 0x81, 0x02, /* Input (Variable), */ + 0x46, 0xBC, 0x02, /* Physical Maximum (700), */ + 0x26, 0x34, 0x05, /* Logical Maximum (1332), */ + 0x09, 0x31, /* Usage (Y), */ + 0x81, 0x02, /* Input (Variable), */ + 0xC0, /* End Collection, */ + 0x05, 0x0D, /* Usage Page (Digitizer), */ + 0x09, 0x22, /* Usage (Finger), */ + 0xA1, 0x02, /* Collection (Logical), */ + 0x25, 0x01, /* Logical Maximum (1), */ + 0x09, 0x47, /* Usage (Touch Valid), */ + 0x09, 0x42, /* Usage (Tip Switch), */ + 0x95, 0x02, /* Report Count (2), */ + 0x75, 0x01, /* Report Size (1), */ + 0x81, 0x02, /* Input (Variable), */ + 0x95, 0x01, /* Report Count (1), */ + 0x75, 0x03, /* Report Size (3), */ + 0x25, 0x05, /* Logical Maximum (5), */ + 0x09, 0x51, /* Usage (Contact Identifier), */ + 0x81, 0x02, /* Input (Variable), */ + 0x75, 0x01, /* Report Size (1), */ + 0x95, 0x03, /* Report Count (3), */ + 0x81, 0x03, /* Input (Constant, Variable), */ + 0x05, 0x01, /* Usage Page (Desktop), */ + 0x26, 0x44, 0x0A, /* Logical Maximum (2628), */ + 0x75, 0x10, /* Report Size (16), */ + 0x09, 0x30, /* Usage (X), */ + 0x46, 0x1A, 0x04, /* Physical Maximum (1050), */ + 0x95, 0x01, /* Report Count (1), */ + 0x81, 0x02, /* Input (Variable), */ + 0x46, 0xBC, 0x02, /* Physical Maximum (700), */ + 0x26, 0x34, 0x05, /* Logical Maximum (1332), */ + 0x09, 0x31, /* Usage (Y), */ + 0x81, 0x02, /* Input (Variable), */ + 0xC0, /* End Collection, */ + 0x05, 0x0D, /* Usage Page (Digitizer), */ + 0x09, 0x22, /* Usage (Finger), */ + 0xA1, 0x02, /* Collection (Logical), */ + 0x25, 0x01, /* Logical Maximum (1), */ + 0x09, 0x47, /* Usage (Touch Valid), */ + 0x09, 0x42, /* Usage (Tip Switch), */ + 0x95, 0x02, /* Report Count (2), */ + 0x75, 0x01, /* Report Size (1), */ + 0x81, 0x02, /* Input (Variable), */ + 0x95, 0x01, /* Report Count (1), */ + 0x75, 0x03, /* Report Size (3), */ + 0x25, 0x05, /* Logical Maximum (5), */ + 0x09, 0x51, /* Usage (Contact Identifier), */ + 0x81, 0x02, /* Input (Variable), */ + 0x75, 0x01, /* Report Size (1), */ + 0x95, 0x03, /* Report Count (3), */ + 0x81, 0x03, /* Input (Constant, Variable), */ + 0x05, 0x01, /* Usage Page (Desktop), */ + 0x26, 0x44, 0x0A, /* Logical Maximum (2628), */ + 0x75, 0x10, /* Report Size (16), */ + 0x09, 0x30, /* Usage (X), */ + 0x46, 0x1A, 0x04, /* Physical Maximum (1050), */ + 0x95, 0x01, /* Report Count (1), */ + 0x81, 0x02, /* Input (Variable), */ + 0x46, 0xBC, 0x02, /* Physical Maximum (700), */ + 0x26, 0x34, 0x05, /* Logical Maximum (1332), */ + 0x09, 0x31, /* Usage (Y), */ + 0x81, 0x02, /* Input (Variable), */ + 0xC0, /* End Collection, */ + 0x05, 0x0D, /* Usage Page (Digitizer), */ + 0x09, 0x22, /* Usage (Finger), */ + 0xA1, 0x02, /* Collection (Logical), */ + 0x25, 0x01, /* Logical Maximum (1), */ + 0x09, 0x47, /* Usage (Touch Valid), */ + 0x09, 0x42, /* Usage (Tip Switch), */ + 0x95, 0x02, /* Report Count (2), */ + 0x75, 0x01, /* Report Size (1), */ + 0x81, 0x02, /* Input (Variable), */ + 0x95, 0x01, /* Report Count (1), */ + 0x75, 0x03, /* Report Size (3), */ + 0x25, 0x05, /* Logical Maximum (5), */ + 0x09, 0x51, /* Usage (Contact Identifier), */ + 0x81, 0x02, /* Input (Variable), */ + 0x75, 0x01, /* Report Size (1), */ + 0x95, 0x03, /* Report Count (3), */ + 0x81, 0x03, /* Input (Constant, Variable), */ + 0x05, 0x01, /* Usage Page (Desktop), */ + 0x26, 0x44, 0x0A, /* Logical Maximum (2628), */ + 0x75, 0x10, /* Report Size (16), */ + 0x09, 0x30, /* Usage (X), */ + 0x46, 0x1A, 0x04, /* Physical Maximum (1050), */ + 0x95, 0x01, /* Report Count (1), */ + 0x81, 0x02, /* Input (Variable), */ + 0x46, 0xBC, 0x02, /* Physical Maximum (700), */ + 0x26, 0x34, 0x05, /* Logical Maximum (1332), */ + 0x09, 0x31, /* Usage (Y), */ + 0x81, 0x02, /* Input (Variable), */ + 0xC0, /* End Collection, */ + 0x05, 0x0D, /* Usage Page (Digitizer), */ + 0x55, 0x0C, /* Unit Exponent (12), */ + 0x66, 0x01, 0x10, /* Unit (Seconds), */ + 0x47, 0xFF, 0xFF, 0x00, 0x00,/* Physical Maximum (65535), */ + 0x27, 0xFF, 0xFF, 0x00, 0x00,/* Logical Maximum (65535), */ + 0x75, 0x10, /* Report Size (16), */ + 0x95, 0x01, /* Report Count (1), */ + 0x09, 0x56, /* Usage (Scan Time), */ + 0x81, 0x02, /* Input (Variable), */ + 0x09, 0x54, /* Usage (Contact Count), */ + 0x25, 0x7F, /* Logical Maximum (127), */ + 0x75, 0x08, /* Report Size (8), */ + 0x81, 0x02, /* Input (Variable), */ + 0x05, 0x09, /* Usage Page (Button), */ + 0x09, 0x01, /* Usage (01h), */ + 0x25, 0x01, /* Logical Maximum (1), */ + 0x75, 0x01, /* Report Size (1), */ + 0x95, 0x01, /* Report Count (1), */ + 0x81, 0x02, /* Input (Variable), */ + 0x95, 0x07, /* Report Count (7), */ + 0x81, 0x03, /* Input (Constant, Variable), */ + 0x05, 0x0D, /* Usage Page (Digitizer), */ + 0x85, 0x02, /* Report ID (2), */ + 0x09, 0x55, /* Usage (Contact Count Maximum), */ + 0x09, 0x59, /* Usage (59h), */ + 0x75, 0x04, /* Report Size (4), */ + 0x95, 0x02, /* Report Count (2), */ + 0x25, 0x0F, /* Logical Maximum (15), */ + 0xB1, 0x02, /* Feature (Variable), */ + 0x05, 0x0D, /* Usage Page (Digitizer), */ + 0x85, 0x07, /* Report ID (7), */ + 0x09, 0x60, /* Usage (60h), */ + 0x75, 0x01, /* Report Size (1), */ + 0x95, 0x01, /* Report Count (1), */ + 0x25, 0x01, /* Logical Maximum (1), */ + 0xB1, 0x02, /* Feature (Variable), */ + 0x95, 0x07, /* Report Count (7), */ + 0xB1, 0x03, /* Feature (Constant, Variable), */ + 0x85, 0x06, /* Report ID (6), */ + 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ + 0x09, 0xC5, /* Usage (C5h), */ + 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ + 0x75, 0x08, /* Report Size (8), */ + 0x96, 0x00, 0x01, /* Report Count (256), */ + 0xB1, 0x02, /* Feature (Variable), */ + 0xC0, /* End Collection, */ + 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ + 0x09, 0x01, /* Usage (01h), */ + 0xA1, 0x01, /* Collection (Application), */ + 0x85, 0x0D, /* Report ID (13), */ + 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ + 0x19, 0x01, /* Usage Minimum (01h), */ + 0x29, 0x02, /* Usage Maximum (02h), */ + 0x75, 0x08, /* Report Size (8), */ + 0x95, 0x02, /* Report Count (2), */ + 0xB1, 0x02, /* Feature (Variable), */ + 0xC0, /* End Collection, */ + 0x05, 0x0D, /* Usage Page (Digitizer), */ + 0x09, 0x0E, /* Usage (Configuration), */ + 0xA1, 0x01, /* Collection (Application), */ + 0x85, 0x03, /* Report ID (3), */ + 0x09, 0x22, /* Usage (Finger), */ + 0xA1, 0x02, /* Collection (Logical), */ + 0x09, 0x52, /* Usage (Device Mode), */ + 0x25, 0x0A, /* Logical Maximum (10), */ + 0x95, 0x01, /* Report Count (1), */ + 0xB1, 0x02, /* Feature (Variable), */ + 0xC0, /* End Collection, */ + 0x09, 0x22, /* Usage (Finger), */ + 0xA1, 0x00, /* Collection (Physical), */ + 0x85, 0x05, /* Report ID (5), */ + 0x09, 0x57, /* Usage (57h), */ + 0x09, 0x58, /* Usage (58h), */ + 0x75, 0x01, /* Report Size (1), */ + 0x95, 0x02, /* Report Count (2), */ + 0x25, 0x01, /* Logical Maximum (1), */ + 0xB1, 0x02, /* Feature (Variable), */ + 0x95, 0x06, /* Report Count (6), */ + 0xB1, 0x03, /* Feature (Constant, Variable),*/ + 0xC0, /* End Collection, */ + 0xC0 /* End Collection */ + }, + .hid_report_desc_size = 475, + .i2c_name = "SYNA3602:00" +}; + + +static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = { + { + .ident = "Teclast F6 Pro", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TECLAST"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "F6 Pro"), + }, + .driver_data = (void *)&sipodev_desc + }, + { + .ident = "Teclast F7", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TECLAST"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "F7"), + }, + .driver_data = (void *)&sipodev_desc + }, + { + .ident = "Trekstor Primebook C13", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Primebook C13"), + }, + .driver_data = (void *)&sipodev_desc + }, + { + .ident = "Trekstor Primebook C11", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Primebook C11"), + }, + .driver_data = (void *)&sipodev_desc + }, + { + /* + * There are at least 2 Primebook C11B versions, the older + * version has a product-name of "Primebook C11B", and a + * bios version / release / firmware revision of: + * V2.1.2 / 05/03/2018 / 18.2 + * The new version has "PRIMEBOOK C11B" as product-name and a + * bios version / release / firmware revision of: + * CFALKSW05_BIOS_V1.1.2 / 11/19/2018 / 19.2 + * Only the older version needs this quirk, note the newer + * version will not match as it has a different product-name. + */ + .ident = "Trekstor Primebook C11B", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Primebook C11B"), + }, + .driver_data = (void *)&sipodev_desc + }, + { + .ident = "Direkt-Tek DTLAPY116-2", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Direkt-Tek"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "DTLAPY116-2"), + }, + .driver_data = (void *)&sipodev_desc + }, + { + .ident = "Direkt-Tek DTLAPY133-1", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Direkt-Tek"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "DTLAPY133-1"), + }, + .driver_data = (void *)&sipodev_desc + }, + { + .ident = "Mediacom Flexbook Edge 11", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MEDIACOM"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "FlexBook edge11 - M-FBE11"), + }, + .driver_data = (void *)&sipodev_desc + }, + { + .ident = "Odys Winbook 13", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AXDIA International GmbH"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "WINBOOK 13"), + }, + .driver_data = (void *)&sipodev_desc + }, + { } /* Terminate list */ +}; + + +struct i2c_hid_desc *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name) +{ + struct i2c_hid_desc_override *override; + const struct dmi_system_id *system_id; + + system_id = dmi_first_match(i2c_hid_dmi_desc_override_table); + if (!system_id) + return NULL; + + override = system_id->driver_data; + if (strcmp(override->i2c_name, i2c_name)) + return NULL; + + return override->i2c_hid_desc; +} + +char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name, + unsigned int *size) +{ + struct i2c_hid_desc_override *override; + const struct dmi_system_id *system_id; + + system_id = dmi_first_match(i2c_hid_dmi_desc_override_table); + if (!system_id) + return NULL; + + override = system_id->driver_data; + if (strcmp(override->i2c_name, i2c_name)) + return NULL; + + *size = override->hid_report_desc_size; + return override->hid_report_desc; +} diff --git a/drivers/hid/i2c-hid/i2c-hid.h b/drivers/hid/i2c-hid/i2c-hid.h new file mode 100644 index 0000000000000000000000000000000000000000..a8c19aef5824c5fd0f592cd84a596fc435c4bff2 --- /dev/null +++ b/drivers/hid/i2c-hid/i2c-hid.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef I2C_HID_H +#define I2C_HID_H + + +#ifdef CONFIG_DMI +struct i2c_hid_desc *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name); +char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name, + unsigned int *size); +#else +static inline struct i2c_hid_desc + *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name) +{ return NULL; } +static inline char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name, + unsigned int *size) +{ return NULL; } +#endif + +#endif diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c index bfbca7ec54ce4a9c97228368a8264c693d3e38da..e00b9dbe220f190d3b7dae9871cec78ec7a784b7 100644 --- a/drivers/hid/intel-ish-hid/ipc/ipc.c +++ b/drivers/hid/intel-ish-hid/ipc/ipc.c @@ -91,7 +91,10 @@ static bool check_generated_interrupt(struct ishtp_device *dev) IPC_INT_FROM_ISH_TO_HOST_CHV_AB(pisr_val); } else { pisr_val = ish_reg_read(dev, IPC_REG_PISR_BXT); - interrupt_generated = IPC_INT_FROM_ISH_TO_HOST_BXT(pisr_val); + interrupt_generated = !!pisr_val; + /* only busy-clear bit is RW, others are RO */ + if (pisr_val) + ish_reg_write(dev, IPC_REG_PISR_BXT, pisr_val); } return interrupt_generated; @@ -843,11 +846,11 @@ int ish_hw_start(struct ishtp_device *dev) { ish_set_host_rdy(dev); + set_host_ready(dev); + /* After that we can enable ISH DMA operation and wakeup ISHFW */ ish_wakeup(dev); - set_host_ready(dev); - /* wait for FW-initiated reset flow */ if (!dev->recvd_hw_ready) wait_event_interruptible_timeout(dev->wait_hw_ready, diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.c b/drivers/hid/intel-ish-hid/ishtp-hid.c index cd23903ddcf194e581902102ebcd56f70459ac9a..e918d78e541c0d072ea4fe89cdfec2eb9628da4c 100644 --- a/drivers/hid/intel-ish-hid/ishtp-hid.c +++ b/drivers/hid/intel-ish-hid/ishtp-hid.c @@ -222,7 +222,7 @@ int ishtp_hid_probe(unsigned int cur_hid_dev, err_hid_device: kfree(hid_data); err_hid_data: - kfree(hid); + hid_destroy_device(hid); return rv; } diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c index 2623a567ffba5ae51e90653e47bea42127ea9b02..f546635e9ac9daf26d2de2984fb2972e69781a9f 100644 --- a/drivers/hid/intel-ish-hid/ishtp/bus.c +++ b/drivers/hid/intel-ish-hid/ishtp/bus.c @@ -623,7 +623,8 @@ int ishtp_cl_device_bind(struct ishtp_cl *cl) spin_lock_irqsave(&cl->dev->device_list_lock, flags); list_for_each_entry(cl_device, &cl->dev->device_list, device_link) { - if (cl_device->fw_client->client_id == cl->fw_client_id) { + if (cl_device->fw_client && + cl_device->fw_client->client_id == cl->fw_client_id) { cl->device = cl_device; rv = 0; break; @@ -683,6 +684,7 @@ void ishtp_bus_remove_all_clients(struct ishtp_device *ishtp_dev, spin_lock_irqsave(&ishtp_dev->device_list_lock, flags); list_for_each_entry_safe(cl_device, n, &ishtp_dev->device_list, device_link) { + cl_device->fw_client = NULL; if (warm_reset && cl_device->reference_count) continue; diff --git a/drivers/hid/intel-ish-hid/ishtp/client-buffers.c b/drivers/hid/intel-ish-hid/ishtp/client-buffers.c index b9b917d2d50db3fedaa17ce8f6fcb3a05e2f38c3..c41dbb167c91ba23f3159ba1a629b118e26a0abd 100644 --- a/drivers/hid/intel-ish-hid/ishtp/client-buffers.c +++ b/drivers/hid/intel-ish-hid/ishtp/client-buffers.c @@ -90,7 +90,7 @@ int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl) return 0; out: dev_err(&cl->device->dev, "error in allocating Tx pool\n"); - ishtp_cl_free_rx_ring(cl); + ishtp_cl_free_tx_ring(cl); return -ENOMEM; } diff --git a/drivers/hid/intel-ish-hid/ishtp/dma-if.c b/drivers/hid/intel-ish-hid/ishtp/dma-if.c index 2783f366611496c5e31783011d3827961674075c..ff4419c8ed4f6c2223e242249c0017dbade85f78 100644 --- a/drivers/hid/intel-ish-hid/ishtp/dma-if.c +++ b/drivers/hid/intel-ish-hid/ishtp/dma-if.c @@ -113,6 +113,11 @@ void *ishtp_cl_get_dma_send_buf(struct ishtp_device *dev, int required_slots = (size / DMA_SLOT_SIZE) + 1 * (size % DMA_SLOT_SIZE != 0); + if (!dev->ishtp_dma_tx_map) { + dev_err(dev->devc, "Fail to allocate Tx map\n"); + return NULL; + } + spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags); for (i = 0; i <= (dev->ishtp_dma_num_slots - required_slots); i++) { free = 1; @@ -159,6 +164,11 @@ void ishtp_cl_release_dma_acked_mem(struct ishtp_device *dev, return; } + if (!dev->ishtp_dma_tx_map) { + dev_err(dev->devc, "Fail to allocate Tx map\n"); + return; + } + i = (msg_addr - dev->ishtp_host_dma_tx_buf) / DMA_SLOT_SIZE; spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags); for (j = 0; j < acked_slots; j++) { diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c index 3c55073136064263c0b1175cbcce60acd81d34ba..840634e0f1e3cc46235cf456a04e547f2be65ba8 100644 --- a/drivers/hid/uhid.c +++ b/drivers/hid/uhid.c @@ -12,6 +12,7 @@ #include #include +#include #include #include #include @@ -496,12 +497,13 @@ static int uhid_dev_create2(struct uhid_device *uhid, goto err_free; } - len = min(sizeof(hid->name), sizeof(ev->u.create2.name)); - strlcpy(hid->name, ev->u.create2.name, len); - len = min(sizeof(hid->phys), sizeof(ev->u.create2.phys)); - strlcpy(hid->phys, ev->u.create2.phys, len); - len = min(sizeof(hid->uniq), sizeof(ev->u.create2.uniq)); - strlcpy(hid->uniq, ev->u.create2.uniq, len); + /* @hid is zero-initialized, strncpy() is correct, strlcpy() not */ + len = min(sizeof(hid->name), sizeof(ev->u.create2.name)) - 1; + strncpy(hid->name, ev->u.create2.name, len); + len = min(sizeof(hid->phys), sizeof(ev->u.create2.phys)) - 1; + strncpy(hid->phys, ev->u.create2.phys, len); + len = min(sizeof(hid->uniq), sizeof(ev->u.create2.uniq)) - 1; + strncpy(hid->uniq, ev->u.create2.uniq, len); hid->ll_driver = &uhid_hid_driver; hid->bus = ev->u.create2.bus; @@ -722,6 +724,17 @@ static ssize_t uhid_char_write(struct file *file, const char __user *buffer, switch (uhid->input_buf.type) { case UHID_CREATE: + /* + * 'struct uhid_create_req' contains a __user pointer which is + * copied from, so it's unsafe to allow this with elevated + * privileges (e.g. from a setuid binary) or via kernel_write(). + */ + if (file->f_cred != current_cred() || uaccess_kernel()) { + pr_err_once("UHID_CREATE from different security context by process %d (%s), this is not allowed.\n", + task_tgid_vnr(current), current->comm); + ret = -EACCES; + goto unlock; + } ret = uhid_dev_create(uhid, &uhid->input_buf); break; case UHID_CREATE2: diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index 11103efebbaa86e5bf156de6a40544f0278026ca..0502696b2138e57ff9feee446a8e17bc9caa48b6 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c @@ -377,7 +377,7 @@ static int hid_submit_ctrl(struct hid_device *hid) raw_report = usbhid->ctrl[usbhid->ctrltail].raw_report; dir = usbhid->ctrl[usbhid->ctrltail].dir; - len = ((report->size - 1) >> 3) + 1 + (report->id > 0); + len = hid_report_len(report); if (dir == USB_DIR_OUT) { usbhid->urbctrl->pipe = usb_sndctrlpipe(hid_to_usb_dev(hid), 0); usbhid->urbctrl->transfer_buffer_length = len; @@ -506,7 +506,7 @@ static void hid_ctrl(struct urb *urb) if (unplug) { usbhid->ctrltail = usbhid->ctrlhead; - } else { + } else if (usbhid->ctrlhead != usbhid->ctrltail) { usbhid->ctrltail = (usbhid->ctrltail + 1) & (HID_CONTROL_FIFO_SIZE - 1); if (usbhid->ctrlhead != usbhid->ctrltail && @@ -1206,9 +1206,20 @@ static void usbhid_stop(struct hid_device *hid) } clear_bit(HID_STARTED, &usbhid->iofl); + spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */ set_bit(HID_DISCONNECTED, &usbhid->iofl); + while (usbhid->ctrltail != usbhid->ctrlhead) { + if (usbhid->ctrl[usbhid->ctrltail].dir == USB_DIR_OUT) { + kfree(usbhid->ctrl[usbhid->ctrltail].raw_report); + usbhid->ctrl[usbhid->ctrltail].raw_report = NULL; + } + + usbhid->ctrltail = (usbhid->ctrltail + 1) & + (HID_CONTROL_FIFO_SIZE - 1); + } spin_unlock_irq(&usbhid->lock); + usb_kill_urb(usbhid->urbin); usb_kill_urb(usbhid->urbout); usb_kill_urb(usbhid->urbctrl); diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index 23872d08308cdb5857d53b5bcdf907e20d74c345..5a949ca42b1d06c4279e08cbe0674f7fa85ef676 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -297,6 +297,14 @@ static int hiddev_open(struct inode *inode, struct file *file) spin_unlock_irq(&list->hiddev->list_lock); mutex_lock(&hiddev->existancelock); + /* + * recheck exist with existance lock held to + * avoid opening a disconnected device + */ + if (!list->hiddev->exist) { + res = -ENODEV; + goto bail_unlock; + } if (!list->hiddev->open++) if (list->hiddev->exist) { struct hid_device *hid = hiddev->hid; @@ -313,6 +321,10 @@ static int hiddev_open(struct inode *inode, struct file *file) hid_hw_power(hid, PM_HINT_NORMAL); bail_unlock: mutex_unlock(&hiddev->existancelock); + + spin_lock_irq(&list->hiddev->list_lock); + list_del(&list->node); + spin_unlock_irq(&list->hiddev->list_lock); bail: file->private_data = NULL; vfree(list); @@ -512,14 +524,24 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd, if (cmd == HIDIOCGCOLLECTIONINDEX) { if (uref->usage_index >= field->maxusage) goto inval; + uref->usage_index = + array_index_nospec(uref->usage_index, + field->maxusage); } else if (uref->usage_index >= field->report_count) goto inval; } - if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) && - (uref_multi->num_values > HID_MAX_MULTI_USAGES || - uref->usage_index + uref_multi->num_values > field->report_count)) - goto inval; + if (cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) { + if (uref_multi->num_values > HID_MAX_MULTI_USAGES || + uref->usage_index + uref_multi->num_values > + field->report_count) + goto inval; + + uref->usage_index = + array_index_nospec(uref->usage_index, + field->report_count - + uref_multi->num_values); + } switch (cmd) { case HIDIOCGUSAGE: diff --git a/drivers/hid/wacom.h b/drivers/hid/wacom.h index 3c37c3cbf6f10013b928e18c1ef245f9eac3d295..9c0900c35b236b71b9b89ab5d33a922f2f15c7b9 100644 --- a/drivers/hid/wacom.h +++ b/drivers/hid/wacom.h @@ -205,6 +205,21 @@ static inline void wacom_schedule_work(struct wacom_wac *wacom_wac, } } +/* + * Convert a signed 32-bit integer to an unsigned n-bit integer. Undoes + * the normally-helpful work of 'hid_snto32' for fields that use signed + * ranges for questionable reasons. + */ +static inline __u32 wacom_s32tou(s32 value, __u8 n) +{ + switch (n) { + case 8: return ((__u8)value); + case 16: return ((__u16)value); + case 32: return ((__u32)value); + } + return value & (1 << (n - 1)) ? value & (~(~0U << n)) : value; +} + extern const struct hid_device_id wacom_ids[]; void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len); diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index 0bdd85d486feeb2fec33f7651f201df146c8cd71..a3cc30d15629267b2b5ec96c6238a83b70645a03 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -91,7 +91,7 @@ static void wacom_wac_queue_flush(struct hid_device *hdev, } static int wacom_wac_pen_serial_enforce(struct hid_device *hdev, - struct hid_report *report, u8 *raw_data, int size) + struct hid_report *report, u8 *raw_data, int report_size) { struct wacom *wacom = hid_get_drvdata(hdev); struct wacom_wac *wacom_wac = &wacom->wacom_wac; @@ -152,7 +152,8 @@ static int wacom_wac_pen_serial_enforce(struct hid_device *hdev, if (flush) wacom_wac_queue_flush(hdev, &wacom_wac->pen_fifo); else if (insert) - wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo, raw_data, size); + wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo, + raw_data, report_size); return insert && !flush; } @@ -275,18 +276,23 @@ static void wacom_feature_mapping(struct hid_device *hdev, wacom_hid_usage_quirk(hdev, field, usage); switch (equivalent_usage) { + case WACOM_HID_WD_TOUCH_RING_SETTING: + wacom->generic_has_leds = true; + break; case HID_DG_CONTACTMAX: /* leave touch_max as is if predefined */ if (!features->touch_max) { /* read manually */ - data = kzalloc(2, GFP_KERNEL); + n = hid_report_len(field->report); + data = hid_alloc_report_buf(field->report, GFP_KERNEL); if (!data) break; data[0] = field->report->id; ret = wacom_get_report(hdev, HID_FEATURE_REPORT, - data, 2, WAC_CMD_RETRIES); - if (ret == 2) { - features->touch_max = data[1]; + data, n, WAC_CMD_RETRIES); + if (ret == n) { + ret = hid_report_raw_event(hdev, + HID_FEATURE_REPORT, data, n, 0); } else { features->touch_max = 16; hid_warn(hdev, "wacom_feature_mapping: " @@ -689,7 +695,7 @@ static void wacom_retrieve_hid_descriptor(struct hid_device *hdev, * Skip the query for this type and modify defaults based on * interface number. */ - if (features->type == WIRELESS) { + if (features->type == WIRELESS && intf) { if (intf->cur_altsetting->desc.bInterfaceNumber == 0) features->device_type = WACOM_DEVICETYPE_WL_MONITOR; else @@ -2142,13 +2148,13 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix) { struct wacom_wac *wacom_wac = &wacom->wacom_wac; struct wacom_features *features = &wacom_wac->features; - char name[WACOM_NAME_MAX]; + char name[WACOM_NAME_MAX - 20]; /* Leave some room for suffixes */ /* Generic devices name unspecified */ if ((features->type == HID_GENERIC) && !strcmp("Wacom HID", features->name)) { char *product_name = wacom->hdev->name; - if (hid_is_using_ll_driver(wacom->hdev, &usb_hid_driver)) { + if (hid_is_usb(wacom->hdev)) { struct usb_interface *intf = to_usb_interface(wacom->hdev->dev.parent); struct usb_device *dev = interface_to_usbdev(intf); product_name = dev->product; @@ -2379,6 +2385,9 @@ static void wacom_wireless_work(struct work_struct *work) wacom_destroy_battery(wacom); + if (!usbdev) + return; + /* Stylus interface */ hdev1 = usb_get_intfdata(usbdev->config->interface[1]); wacom1 = hid_get_drvdata(hdev1); @@ -2658,8 +2667,6 @@ static void wacom_mode_change_work(struct work_struct *work) static int wacom_probe(struct hid_device *hdev, const struct hid_device_id *id) { - struct usb_interface *intf = to_usb_interface(hdev->dev.parent); - struct usb_device *dev = interface_to_usbdev(intf); struct wacom *wacom; struct wacom_wac *wacom_wac; struct wacom_features *features; @@ -2696,8 +2703,14 @@ static int wacom_probe(struct hid_device *hdev, wacom_wac->hid_data.inputmode = -1; wacom_wac->mode_report = -1; - wacom->usbdev = dev; - wacom->intf = intf; + if (hid_is_usb(hdev)) { + struct usb_interface *intf = to_usb_interface(hdev->dev.parent); + struct usb_device *dev = interface_to_usbdev(intf); + + wacom->usbdev = dev; + wacom->intf = intf; + } + mutex_init(&wacom->lock); INIT_DELAYED_WORK(&wacom->init_work, wacom_init_work); INIT_WORK(&wacom->wireless_work, wacom_wireless_work); diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index e0a06be5ef5c0898d645464e807fd762c52e041f..77bb46948eea83251f98b9b8061f0f26f1a0fd0c 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -255,7 +255,7 @@ static int wacom_dtu_irq(struct wacom_wac *wacom) static int wacom_dtus_irq(struct wacom_wac *wacom) { - char *data = wacom->data; + unsigned char *data = wacom->data; struct input_dev *input = wacom->pen_input; unsigned short prox, pressure = 0; @@ -537,14 +537,14 @@ static int wacom_intuos_pad(struct wacom_wac *wacom) */ buttons = (data[4] << 1) | (data[3] & 0x01); } else if (features->type == CINTIQ_COMPANION_2) { - /* d-pad right -> data[4] & 0x10 - * d-pad up -> data[4] & 0x20 - * d-pad left -> data[4] & 0x40 - * d-pad down -> data[4] & 0x80 - * d-pad center -> data[3] & 0x01 + /* d-pad right -> data[2] & 0x10 + * d-pad up -> data[2] & 0x20 + * d-pad left -> data[2] & 0x40 + * d-pad down -> data[2] & 0x80 + * d-pad center -> data[1] & 0x01 */ buttons = ((data[2] >> 4) << 7) | - ((data[1] & 0x04) << 6) | + ((data[1] & 0x04) << 4) | ((data[2] & 0x0F) << 2) | (data[1] & 0x03); } else if (features->type >= INTUOS5S && features->type <= INTUOSPL) { @@ -576,7 +576,7 @@ static int wacom_intuos_pad(struct wacom_wac *wacom) strip2 = ((data[3] & 0x1f) << 8) | data[4]; } - prox = (buttons & ~(~0 << nbuttons)) | (keys & ~(~0 << nkeys)) | + prox = (buttons & ~(~0U << nbuttons)) | (keys & ~(~0U << nkeys)) | (ring1 & 0x80) | (ring2 & 0x80) | strip1 | strip2; wacom_report_numbered_buttons(input, nbuttons, buttons); @@ -848,6 +848,8 @@ static int wacom_intuos_general(struct wacom_wac *wacom) y >>= 1; distance >>= 1; } + if (features->type == INTUOSHT2) + distance = features->distance_max - distance; input_report_abs(input, ABS_X, x); input_report_abs(input, ABS_Y, y); input_report_abs(input, ABS_DISTANCE, distance); @@ -1061,7 +1063,7 @@ static int wacom_remote_irq(struct wacom_wac *wacom_wac, size_t len) input_report_key(input, BTN_BASE2, (data[11] & 0x02)); if (data[12] & 0x80) - input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f)); + input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f) - 1); else input_report_abs(input, ABS_WHEEL, 0); @@ -1234,13 +1236,13 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom) /* Add back in missing bits of ID for non-USI pens */ wacom->id[0] |= (wacom->serial[0] >> 32) & 0xFFFFF; } - wacom->tool[0] = wacom_intuos_get_tool_type(wacom_intuos_id_mangle(wacom->id[0])); for (i = 0; i < pen_frames; i++) { unsigned char *frame = &data[i*pen_frame_len + 1]; bool valid = frame[0] & 0x80; bool prox = frame[0] & 0x40; bool range = frame[0] & 0x20; + bool invert = frame[0] & 0x10; if (!valid) continue; @@ -1249,9 +1251,24 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom) wacom->shared->stylus_in_proximity = false; wacom_exit_report(wacom); input_sync(pen_input); + + wacom->tool[0] = 0; + wacom->id[0] = 0; + wacom->serial[0] = 0; return; } + if (range) { + if (!wacom->tool[0]) { /* first in range */ + /* Going into range select tool */ + if (invert) + wacom->tool[0] = BTN_TOOL_RUBBER; + else if (wacom->id[0]) + wacom->tool[0] = wacom_intuos_get_tool_type(wacom->id[0]); + else + wacom->tool[0] = BTN_TOOL_PEN; + } + input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1])); input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3])); @@ -1273,23 +1290,26 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom) get_unaligned_le16(&frame[11])); } } - input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5])); - if (wacom->features.type == INTUOSP2_BT) { - input_report_abs(pen_input, ABS_DISTANCE, - range ? frame[13] : wacom->features.distance_max); - } else { - input_report_abs(pen_input, ABS_DISTANCE, - range ? frame[7] : wacom->features.distance_max); - } - input_report_key(pen_input, BTN_TOUCH, frame[0] & 0x01); - input_report_key(pen_input, BTN_STYLUS, frame[0] & 0x02); - input_report_key(pen_input, BTN_STYLUS2, frame[0] & 0x04); + if (wacom->tool[0]) { + input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5])); + if (wacom->features.type == INTUOSP2_BT) { + input_report_abs(pen_input, ABS_DISTANCE, + range ? frame[13] : wacom->features.distance_max); + } else { + input_report_abs(pen_input, ABS_DISTANCE, + range ? frame[7] : wacom->features.distance_max); + } - input_report_key(pen_input, wacom->tool[0], prox); - input_event(pen_input, EV_MSC, MSC_SERIAL, wacom->serial[0]); - input_report_abs(pen_input, ABS_MISC, - wacom_intuos_id_mangle(wacom->id[0])); /* report tool id */ + input_report_key(pen_input, BTN_TOUCH, frame[0] & 0x09); + input_report_key(pen_input, BTN_STYLUS, frame[0] & 0x02); + input_report_key(pen_input, BTN_STYLUS2, frame[0] & 0x04); + + input_report_key(pen_input, wacom->tool[0], prox); + input_event(pen_input, EV_MSC, MSC_SERIAL, wacom->serial[0]); + input_report_abs(pen_input, ABS_MISC, + wacom_intuos_id_mangle(wacom->id[0])); /* report tool id */ + } wacom->shared->stylus_in_proximity = prox; @@ -1351,11 +1371,17 @@ static void wacom_intuos_pro2_bt_touch(struct wacom_wac *wacom) if (wacom->num_contacts_left <= 0) { wacom->num_contacts_left = 0; wacom->shared->touch_down = wacom_wac_finger_count_touches(wacom); + input_sync(touch_input); } } - input_report_switch(touch_input, SW_MUTE_DEVICE, !(data[281] >> 7)); - input_sync(touch_input); + if (wacom->num_contacts_left == 0) { + // Be careful that we don't accidentally call input_sync with + // only a partial set of fingers of processed + input_report_switch(touch_input, SW_MUTE_DEVICE, !(data[281] >> 7)); + input_sync(touch_input); + } + } static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom) @@ -1363,7 +1389,7 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom) struct input_dev *pad_input = wacom->pad_input; unsigned char *data = wacom->data; - int buttons = (data[282] << 1) | ((data[281] >> 6) & 0x01); + int buttons = data[282] | ((data[281] & 0x40) << 2); int ring = data[285] & 0x7F; bool ringstatus = data[285] & 0x80; bool prox = buttons || ringstatus; @@ -1904,8 +1930,6 @@ static void wacom_wac_pad_usage_mapping(struct hid_device *hdev, features->device_type |= WACOM_DEVICETYPE_PAD; break; case WACOM_HID_WD_BUTTONCENTER: - wacom->generic_has_leds = true; - /* fall through */ case WACOM_HID_WD_BUTTONHOME: case WACOM_HID_WD_BUTTONUP: case WACOM_HID_WD_BUTTONDOWN: @@ -2097,14 +2121,12 @@ static void wacom_wac_pad_report(struct hid_device *hdev, bool active = wacom_wac->hid_data.inrange_state != 0; /* report prox for expresskey events */ - if ((wacom_equivalent_usage(field->physical) == HID_DG_TABLETFUNCTIONKEY) && - wacom_wac->hid_data.pad_input_event_flag) { + if (wacom_wac->hid_data.pad_input_event_flag) { input_event(input, EV_ABS, ABS_MISC, active ? PAD_DEVICE_ID : 0); input_sync(input); if (!active) wacom_wac->hid_data.pad_input_event_flag = false; } - } static void wacom_wac_pen_usage_mapping(struct hid_device *hdev, @@ -2249,7 +2271,7 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field case HID_DG_TOOLSERIALNUMBER: if (value) { wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL); - wacom_wac->serial[0] |= (__u32)value; + wacom_wac->serial[0] |= wacom_s32tou(value, field->report_size); } return; case HID_DG_TWIST: @@ -2265,15 +2287,17 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field return; case WACOM_HID_WD_SERIALHI: if (value) { + __u32 raw_value = wacom_s32tou(value, field->report_size); + wacom_wac->serial[0] = (wacom_wac->serial[0] & 0xFFFFFFFF); - wacom_wac->serial[0] |= ((__u64)value) << 32; + wacom_wac->serial[0] |= ((__u64)raw_value) << 32; /* * Non-USI EMR devices may contain additional tool type * information here. See WACOM_HID_WD_TOOLTYPE case for * more details. */ if (value >> 20 == 1) { - wacom_wac->id[0] |= value & 0xFFFFF; + wacom_wac->id[0] |= raw_value & 0xFFFFF; } } return; @@ -2285,7 +2309,7 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field * bitwise OR so the complete value can be built * up over time :( */ - wacom_wac->id[0] |= value; + wacom_wac->id[0] |= wacom_s32tou(value, field->report_size); return; case WACOM_HID_WD_OFFSETLEFT: if (features->offset_left && value != features->offset_left) @@ -2511,6 +2535,7 @@ static void wacom_wac_finger_event(struct hid_device *hdev, struct wacom *wacom = hid_get_drvdata(hdev); struct wacom_wac *wacom_wac = &wacom->wacom_wac; unsigned equivalent_usage = wacom_equivalent_usage(usage->hid); + struct wacom_features *features = &wacom->wacom_wac.features; switch (equivalent_usage) { case HID_GD_X: @@ -2531,6 +2556,9 @@ static void wacom_wac_finger_event(struct hid_device *hdev, case HID_DG_TIPSWITCH: wacom_wac->hid_data.tipswitch = value; break; + case HID_DG_CONTACTMAX: + features->touch_max = value; + return; } @@ -2701,9 +2729,7 @@ static int wacom_wac_collection(struct hid_device *hdev, struct hid_report *repo if (report->type != HID_INPUT_REPORT) return -1; - if (WACOM_PAD_FIELD(field) && wacom->wacom_wac.pad_input) - wacom_wac_pad_report(hdev, report, field); - else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input) + if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input) wacom_wac_pen_report(hdev, report); else if (WACOM_FINGER_FIELD(field) && wacom->wacom_wac.touch_input) wacom_wac_finger_report(hdev, report); @@ -2717,7 +2743,7 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report) struct wacom_wac *wacom_wac = &wacom->wacom_wac; struct hid_field *field; bool pad_in_hid_field = false, pen_in_hid_field = false, - finger_in_hid_field = false; + finger_in_hid_field = false, true_pad = false; int r; int prev_collection = -1; @@ -2733,6 +2759,8 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report) pen_in_hid_field = true; if (WACOM_FINGER_FIELD(field)) finger_in_hid_field = true; + if (wacom_equivalent_usage(field->physical) == HID_DG_TABLETFUNCTIONKEY) + true_pad = true; } wacom_wac_battery_pre_report(hdev, report); @@ -2756,6 +2784,9 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report) } wacom_wac_battery_report(hdev, report); + + if (true_pad && wacom->wacom_wac.pad_input) + wacom_wac_pad_report(hdev, report, field); } static int wacom_bpt_touch(struct wacom_wac *wacom) @@ -3335,6 +3366,7 @@ static void wacom_setup_intuos(struct wacom_wac *wacom_wac) void wacom_setup_device_quirks(struct wacom *wacom) { + struct wacom_wac *wacom_wac = &wacom->wacom_wac; struct wacom_features *features = &wacom->wacom_wac.features; /* The pen and pad share the same interface on most devices */ @@ -3464,6 +3496,24 @@ void wacom_setup_device_quirks(struct wacom *wacom) if (features->type == REMOTE) features->device_type |= WACOM_DEVICETYPE_WL_MONITOR; + + /* HID descriptor for DTK-2451 / DTH-2452 claims to report lots + * of things it shouldn't. Lets fix up the damage... + */ + if (wacom->hdev->product == 0x382 || wacom->hdev->product == 0x37d) { + features->quirks &= ~WACOM_QUIRK_TOOLSERIAL; + __clear_bit(BTN_TOOL_BRUSH, wacom_wac->pen_input->keybit); + __clear_bit(BTN_TOOL_PENCIL, wacom_wac->pen_input->keybit); + __clear_bit(BTN_TOOL_AIRBRUSH, wacom_wac->pen_input->keybit); + __clear_bit(ABS_Z, wacom_wac->pen_input->absbit); + __clear_bit(ABS_DISTANCE, wacom_wac->pen_input->absbit); + __clear_bit(ABS_TILT_X, wacom_wac->pen_input->absbit); + __clear_bit(ABS_TILT_Y, wacom_wac->pen_input->absbit); + __clear_bit(ABS_WHEEL, wacom_wac->pen_input->absbit); + __clear_bit(ABS_MISC, wacom_wac->pen_input->absbit); + __clear_bit(MSC_SERIAL, wacom_wac->pen_input->mscbit); + __clear_bit(EV_MSC, wacom_wac->pen_input->evbit); + } } int wacom_setup_pen_input_capabilities(struct input_dev *input_dev, @@ -3692,7 +3742,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev, 0, 5920, 4, 0); } input_abs_set_res(input_dev, ABS_MT_POSITION_X, 40); - input_abs_set_res(input_dev, ABS_MT_POSITION_X, 40); + input_abs_set_res(input_dev, ABS_MT_POSITION_Y, 40); /* fall through */ @@ -3813,7 +3863,7 @@ static void wacom_24hd_update_leds(struct wacom *wacom, int mask, int group) static bool wacom_is_led_toggled(struct wacom *wacom, int button_count, int mask, int group) { - int button_per_group; + int group_button; /* * 21UX2 has LED group 1 to the left and LED group 0 @@ -3823,9 +3873,12 @@ static bool wacom_is_led_toggled(struct wacom *wacom, int button_count, if (wacom->wacom_wac.features.type == WACOM_21UX2) group = 1 - group; - button_per_group = button_count/wacom->led.count; + group_button = group * (button_count/wacom->led.count); + + if (wacom->wacom_wac.features.type == INTUOSP2_BT) + group_button = 8; - return mask & (1 << (group * button_per_group)); + return mask & (1 << group_button); } static void wacom_update_led(struct wacom *wacom, int button_count, int mask, diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h index 295fd3718caa0bed767683775c852d47de92fd7b..f67d871841c0c62859a963e2566b23835b7e2b27 100644 --- a/drivers/hid/wacom_wac.h +++ b/drivers/hid/wacom_wac.h @@ -145,6 +145,7 @@ #define WACOM_HID_WD_OFFSETBOTTOM (WACOM_HID_UP_WACOMDIGITIZER | 0x0d33) #define WACOM_HID_WD_DATAMODE (WACOM_HID_UP_WACOMDIGITIZER | 0x1002) #define WACOM_HID_WD_DIGITIZERINFO (WACOM_HID_UP_WACOMDIGITIZER | 0x1013) +#define WACOM_HID_WD_TOUCH_RING_SETTING (WACOM_HID_UP_WACOMDIGITIZER | 0x1032) #define WACOM_HID_UP_G9 0xff090000 #define WACOM_HID_G9_PEN (WACOM_HID_UP_G9 | 0x02) #define WACOM_HID_G9_TOUCHSCREEN (WACOM_HID_UP_G9 | 0x11) diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig index 97954f575c3f691df222aa7102f01b5d296ff893..1c1a2514d6f31b8a2e8ce04293c949922b4e92ee 100644 --- a/drivers/hv/Kconfig +++ b/drivers/hv/Kconfig @@ -4,7 +4,7 @@ menu "Microsoft Hyper-V guest support" config HYPERV tristate "Microsoft Hyper-V client drivers" - depends on X86 && ACPI && PCI && X86_LOCAL_APIC && HYPERVISOR_GUEST + depends on X86 && ACPI && X86_LOCAL_APIC && HYPERVISOR_GUEST select PARAVIRT help Select this option to run Linux as a Hyper-V client operating diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 741857d80da11d9a09444a2ad6c4eda32920854a..756d2fff50dcf155c02d1d9070907db2a459f8ba 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -38,7 +38,7 @@ static unsigned long virt_to_hvpfn(void *addr) { - unsigned long paddr; + phys_addr_t paddr; if (is_vmalloc_addr(addr)) paddr = page_to_phys(vmalloc_to_page(addr)) + @@ -91,11 +91,14 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, unsigned long flags; int ret, err = 0; struct page *page; + unsigned int order; if (send_ringbuffer_size % PAGE_SIZE || recv_ringbuffer_size % PAGE_SIZE) return -EINVAL; + order = get_order(send_ringbuffer_size + recv_ringbuffer_size); + spin_lock_irqsave(&newchannel->lock, flags); if (newchannel->state == CHANNEL_OPEN_STATE) { newchannel->state = CHANNEL_OPENING_STATE; @@ -110,21 +113,17 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, /* Allocate the ring buffer */ page = alloc_pages_node(cpu_to_node(newchannel->target_cpu), - GFP_KERNEL|__GFP_ZERO, - get_order(send_ringbuffer_size + - recv_ringbuffer_size)); + GFP_KERNEL|__GFP_ZERO, order); if (!page) - page = alloc_pages(GFP_KERNEL|__GFP_ZERO, - get_order(send_ringbuffer_size + - recv_ringbuffer_size)); + page = alloc_pages(GFP_KERNEL|__GFP_ZERO, order); if (!page) { err = -ENOMEM; goto error_set_chnstate; } - newchannel->ringbuffer_pages = page_address(page); + newchannel->ringbuffer_page = page; newchannel->ringbuffer_pagecount = (send_ringbuffer_size + recv_ringbuffer_size) >> PAGE_SHIFT; @@ -195,7 +194,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, if (newchannel->rescind) { err = -ENODEV; - goto error_free_gpadl; + goto error_clean_msglist; } ret = vmbus_post_msg(open_msg, @@ -239,8 +238,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, error_free_pages: hv_ringbuffer_cleanup(&newchannel->outbound); hv_ringbuffer_cleanup(&newchannel->inbound); - __free_pages(page, - get_order(send_ringbuffer_size + recv_ringbuffer_size)); + __free_pages(page, order); error_set_chnstate: newchannel->state = CHANNEL_OPEN_STATE; return err; @@ -482,6 +480,14 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer, } wait_for_completion(&msginfo->waitevent); + if (msginfo->response.gpadl_created.creation_status != 0) { + pr_err("Failed to establish GPADL: err = 0x%x\n", + msginfo->response.gpadl_created.creation_status); + + ret = -EDQUOT; + goto cleanup; + } + if (channel->rescind) { ret = -ENODEV; goto cleanup; @@ -658,8 +664,8 @@ static int vmbus_close_internal(struct vmbus_channel *channel) hv_ringbuffer_cleanup(&channel->outbound); hv_ringbuffer_cleanup(&channel->inbound); - free_pages((unsigned long)channel->ringbuffer_pages, - get_order(channel->ringbuffer_pagecount * PAGE_SIZE)); + __free_pages(channel->ringbuffer_page, + get_order(channel->ringbuffer_pagecount << PAGE_SHIFT)); out: return ret; diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 0f0e091c117c6b4a1991573e607f9f2dec5e6196..16eb9b3f1cb1b57cb244ed5b7f02f82b18d15a13 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -447,61 +447,16 @@ void vmbus_free_channels(void) } } -/* - * vmbus_process_offer - Process the offer by creating a channel/device - * associated with this offer - */ -static void vmbus_process_offer(struct vmbus_channel *newchannel) +/* Note: the function can run concurrently for primary/sub channels. */ +static void vmbus_add_channel_work(struct work_struct *work) { - struct vmbus_channel *channel; - bool fnew = true; + struct vmbus_channel *newchannel = + container_of(work, struct vmbus_channel, add_channel_work); + struct vmbus_channel *primary_channel = newchannel->primary_channel; unsigned long flags; u16 dev_type; int ret; - /* Make sure this is a new offer */ - mutex_lock(&vmbus_connection.channel_mutex); - - /* - * Now that we have acquired the channel_mutex, - * we can release the potentially racing rescind thread. - */ - atomic_dec(&vmbus_connection.offer_in_progress); - - list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { - if (!uuid_le_cmp(channel->offermsg.offer.if_type, - newchannel->offermsg.offer.if_type) && - !uuid_le_cmp(channel->offermsg.offer.if_instance, - newchannel->offermsg.offer.if_instance)) { - fnew = false; - break; - } - } - - if (fnew) - list_add_tail(&newchannel->listentry, - &vmbus_connection.chn_list); - - mutex_unlock(&vmbus_connection.channel_mutex); - - if (!fnew) { - /* - * Check to see if this is a sub-channel. - */ - if (newchannel->offermsg.offer.sub_channel_index != 0) { - /* - * Process the sub-channel. - */ - newchannel->primary_channel = channel; - spin_lock_irqsave(&channel->lock, flags); - list_add_tail(&newchannel->sc_list, &channel->sc_list); - channel->num_sc++; - spin_unlock_irqrestore(&channel->lock, flags); - } else { - goto err_free_chan; - } - } - dev_type = hv_get_dev_type(newchannel); init_vp_index(newchannel, dev_type); @@ -519,27 +474,26 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) /* * This state is used to indicate a successful open * so that when we do close the channel normally, we - * can cleanup properly + * can cleanup properly. */ newchannel->state = CHANNEL_OPEN_STATE; - if (!fnew) { - struct hv_device *dev - = newchannel->primary_channel->device_obj; + if (primary_channel != NULL) { + /* newchannel is a sub-channel. */ + struct hv_device *dev = primary_channel->device_obj; if (vmbus_add_channel_kobj(dev, newchannel)) - goto err_free_chan; + goto err_deq_chan; + + if (primary_channel->sc_creation_callback != NULL) + primary_channel->sc_creation_callback(newchannel); - if (channel->sc_creation_callback != NULL) - channel->sc_creation_callback(newchannel); newchannel->probe_done = true; return; } /* - * Start the process of binding this offer to the driver - * We need to set the DeviceObject field before calling - * vmbus_child_dev_add() + * Start the process of binding the primary channel to the driver */ newchannel->device_obj = vmbus_device_create( &newchannel->offermsg.offer.if_type, @@ -568,13 +522,28 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) err_deq_chan: mutex_lock(&vmbus_connection.channel_mutex); - list_del(&newchannel->listentry); + + /* + * We need to set the flag, otherwise + * vmbus_onoffer_rescind() can be blocked. + */ + newchannel->probe_done = true; + + if (primary_channel == NULL) { + list_del(&newchannel->listentry); + } else { + spin_lock_irqsave(&primary_channel->lock, flags); + list_del(&newchannel->sc_list); + spin_unlock_irqrestore(&primary_channel->lock, flags); + } + mutex_unlock(&vmbus_connection.channel_mutex); if (newchannel->target_cpu != get_cpu()) { put_cpu(); smp_call_function_single(newchannel->target_cpu, - percpu_channel_deq, newchannel, true); + percpu_channel_deq, + newchannel, true); } else { percpu_channel_deq(newchannel); put_cpu(); @@ -582,14 +551,104 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) vmbus_release_relid(newchannel->offermsg.child_relid); -err_free_chan: free_channel(newchannel); } +/* + * vmbus_process_offer - Process the offer by creating a channel/device + * associated with this offer + */ +static void vmbus_process_offer(struct vmbus_channel *newchannel) +{ + struct vmbus_channel *channel; + struct workqueue_struct *wq; + unsigned long flags; + bool fnew = true; + + mutex_lock(&vmbus_connection.channel_mutex); + + /* + * Now that we have acquired the channel_mutex, + * we can release the potentially racing rescind thread. + */ + atomic_dec(&vmbus_connection.offer_in_progress); + + list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { + if (!uuid_le_cmp(channel->offermsg.offer.if_type, + newchannel->offermsg.offer.if_type) && + !uuid_le_cmp(channel->offermsg.offer.if_instance, + newchannel->offermsg.offer.if_instance)) { + fnew = false; + break; + } + } + + if (fnew) + list_add_tail(&newchannel->listentry, + &vmbus_connection.chn_list); + else { + /* + * Check to see if this is a valid sub-channel. + */ + if (newchannel->offermsg.offer.sub_channel_index == 0) { + mutex_unlock(&vmbus_connection.channel_mutex); + /* + * Don't call free_channel(), because newchannel->kobj + * is not initialized yet. + */ + kfree(newchannel); + WARN_ON_ONCE(1); + return; + } + /* + * Process the sub-channel. + */ + newchannel->primary_channel = channel; + spin_lock_irqsave(&channel->lock, flags); + list_add_tail(&newchannel->sc_list, &channel->sc_list); + spin_unlock_irqrestore(&channel->lock, flags); + } + + mutex_unlock(&vmbus_connection.channel_mutex); + + /* + * vmbus_process_offer() mustn't call channel->sc_creation_callback() + * directly for sub-channels, because sc_creation_callback() -> + * vmbus_open() may never get the host's response to the + * OPEN_CHANNEL message (the host may rescind a channel at any time, + * e.g. in the case of hot removing a NIC), and vmbus_onoffer_rescind() + * may not wake up the vmbus_open() as it's blocked due to a non-zero + * vmbus_connection.offer_in_progress, and finally we have a deadlock. + * + * The above is also true for primary channels, if the related device + * drivers use sync probing mode by default. + * + * And, usually the handling of primary channels and sub-channels can + * depend on each other, so we should offload them to different + * workqueues to avoid possible deadlock, e.g. in sync-probing mode, + * NIC1's netvsc_subchan_work() can race with NIC2's netvsc_probe() -> + * rtnl_lock(), and causes deadlock: the former gets the rtnl_lock + * and waits for all the sub-channels to appear, but the latter + * can't get the rtnl_lock and this blocks the handling of + * sub-channels. + */ + INIT_WORK(&newchannel->add_channel_work, vmbus_add_channel_work); + wq = fnew ? vmbus_connection.handle_primary_chan_wq : + vmbus_connection.handle_sub_chan_wq; + queue_work(wq, &newchannel->add_channel_work); +} + /* * We use this state to statically distribute the channel interrupt load. */ static int next_numa_node_id; +/* + * init_vp_index() accesses global variables like next_numa_node_id, and + * it can run concurrently for primary channels and sub-channels: see + * vmbus_process_offer(), so we need the lock to protect the global + * variables. + */ +static DEFINE_SPINLOCK(bind_channel_to_cpu_lock); /* * Starting with Win8, we can statically distribute the incoming @@ -606,16 +665,18 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type) bool perf_chn = vmbus_devs[dev_type].perf_device; struct vmbus_channel *primary = channel->primary_channel; int next_node; - struct cpumask available_mask; + cpumask_var_t available_mask; struct cpumask *alloced_mask; if ((vmbus_proto_version == VERSION_WS2008) || - (vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) { + (vmbus_proto_version == VERSION_WIN7) || (!perf_chn) || + !alloc_cpumask_var(&available_mask, GFP_KERNEL)) { /* * Prior to win8, all channel interrupts are * delivered on cpu 0. * Also if the channel is not a performance critical * channel, bind it to cpu 0. + * In case alloc_cpumask_var() fails, bind it to cpu 0. */ channel->numa_node = 0; channel->target_cpu = 0; @@ -623,6 +684,8 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type) return; } + spin_lock(&bind_channel_to_cpu_lock); + /* * Based on the channel affinity policy, we will assign the NUMA * nodes. @@ -653,7 +716,7 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type) cpumask_clear(alloced_mask); } - cpumask_xor(&available_mask, alloced_mask, + cpumask_xor(available_mask, alloced_mask, cpumask_of_node(primary->numa_node)); cur_cpu = -1; @@ -671,10 +734,10 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type) } while (true) { - cur_cpu = cpumask_next(cur_cpu, &available_mask); + cur_cpu = cpumask_next(cur_cpu, available_mask); if (cur_cpu >= nr_cpu_ids) { cur_cpu = -1; - cpumask_copy(&available_mask, + cpumask_copy(available_mask, cpumask_of_node(primary->numa_node)); continue; } @@ -704,6 +767,10 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type) channel->target_cpu = cur_cpu; channel->target_vp = hv_cpu_number_to_vp_number(cur_cpu); + + spin_unlock(&bind_channel_to_cpu_lock); + + free_cpumask_var(available_mask); } static void vmbus_wait_for_unload(void) diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c index f4d08c8ac7f8ff8f101cbe477826a0924b63170d..4fe117b761ce03a3d6351b86270d08853131d355 100644 --- a/drivers/hv/connection.c +++ b/drivers/hv/connection.c @@ -190,6 +190,20 @@ int vmbus_connect(void) goto cleanup; } + vmbus_connection.handle_primary_chan_wq = + create_workqueue("hv_pri_chan"); + if (!vmbus_connection.handle_primary_chan_wq) { + ret = -ENOMEM; + goto cleanup; + } + + vmbus_connection.handle_sub_chan_wq = + create_workqueue("hv_sub_chan"); + if (!vmbus_connection.handle_sub_chan_wq) { + ret = -ENOMEM; + goto cleanup; + } + INIT_LIST_HEAD(&vmbus_connection.chn_msg_list); spin_lock_init(&vmbus_connection.channelmsg_lock); @@ -280,10 +294,14 @@ void vmbus_disconnect(void) */ vmbus_initiate_unload(false); - if (vmbus_connection.work_queue) { - drain_workqueue(vmbus_connection.work_queue); + if (vmbus_connection.handle_sub_chan_wq) + destroy_workqueue(vmbus_connection.handle_sub_chan_wq); + + if (vmbus_connection.handle_primary_chan_wq) + destroy_workqueue(vmbus_connection.handle_primary_chan_wq); + + if (vmbus_connection.work_queue) destroy_workqueue(vmbus_connection.work_queue); - } if (vmbus_connection.int_page) { free_pages((unsigned long)vmbus_connection.int_page, 0); diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c index 748a1c4172a6466b1ffd710942a50b378c202c26..12bc9fa2111178cd59e36cf81ebc9e4c70af89b6 100644 --- a/drivers/hv/hv.c +++ b/drivers/hv/hv.c @@ -189,6 +189,17 @@ static void hv_init_clockevent_device(struct clock_event_device *dev, int cpu) int hv_synic_alloc(void) { int cpu; + struct hv_per_cpu_context *hv_cpu; + + /* + * First, zero all per-cpu memory areas so hv_synic_free() can + * detect what memory has been allocated and cleanup properly + * after any failures. + */ + for_each_present_cpu(cpu) { + hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu); + memset(hv_cpu, 0, sizeof(*hv_cpu)); + } hv_context.hv_numa_map = kcalloc(nr_node_ids, sizeof(struct cpumask), GFP_KERNEL); @@ -198,10 +209,8 @@ int hv_synic_alloc(void) } for_each_present_cpu(cpu) { - struct hv_per_cpu_context *hv_cpu - = per_cpu_ptr(hv_context.cpu_context, cpu); + hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu); - memset(hv_cpu, 0, sizeof(*hv_cpu)); tasklet_init(&hv_cpu->msg_dpc, vmbus_on_msg_dpc, (unsigned long) hv_cpu); @@ -402,7 +411,6 @@ int hv_synic_cleanup(unsigned int cpu) clockevents_unbind_device(hv_cpu->clk_evt, cpu); hv_ce_shutdown(hv_cpu->clk_evt); - put_cpu_ptr(hv_cpu); } hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64); diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index b1b7880827931b3eab896c80d16865cfa207c746..d1c9a6adf485c221b5057fb56899d8f6f2992500 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c @@ -541,7 +541,6 @@ struct hv_dynmem_device { * State to synchronize hot-add. */ struct completion ol_waitevent; - bool ha_waiting; /* * This thread handles hot-add * requests from the host as well as notifying @@ -642,10 +641,7 @@ static int hv_memory_notifier(struct notifier_block *nb, unsigned long val, switch (val) { case MEM_ONLINE: case MEM_CANCEL_ONLINE: - if (dm_device.ha_waiting) { - dm_device.ha_waiting = false; - complete(&dm_device.ol_waitevent); - } + complete(&dm_device.ol_waitevent); break; case MEM_OFFLINE: @@ -731,8 +727,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size, has->covered_end_pfn += processed_pfn; spin_unlock_irqrestore(&dm_device.ha_lock, flags); - init_completion(&dm_device.ol_waitevent); - dm_device.ha_waiting = !memhp_auto_online; + reinit_completion(&dm_device.ol_waitevent); nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn)); ret = add_memory(nid, PFN_PHYS((start_pfn)), @@ -758,20 +753,19 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size, } /* - * Wait for the memory block to be onlined when memory onlining - * is done outside of kernel (memhp_auto_online). Since the hot - * add has succeeded, it is ok to proceed even if the pages in - * the hot added region have not been "onlined" within the - * allowed time. + * Wait for memory to get onlined. If the kernel onlined the + * memory when adding it, this will return directly. Otherwise, + * it will wait for user space to online the memory. This helps + * to avoid adding memory faster than it is getting onlined. As + * adding succeeded, it is ok to proceed even if the memory was + * not onlined in time. */ - if (dm_device.ha_waiting) - wait_for_completion_timeout(&dm_device.ol_waitevent, - 5*HZ); + wait_for_completion_timeout(&dm_device.ol_waitevent, 5 * HZ); post_status(&dm_device); } } -static void hv_online_page(struct page *pg) +static void hv_online_page(struct page *pg, unsigned int order) { struct hv_hotadd_state *has; unsigned long flags; @@ -780,10 +774,11 @@ static void hv_online_page(struct page *pg) spin_lock_irqsave(&dm_device.ha_lock, flags); list_for_each_entry(has, &dm_device.ha_region_list, list) { /* The page belongs to a different HAS. */ - if ((pfn < has->start_pfn) || (pfn >= has->end_pfn)) + if ((pfn < has->start_pfn) || + (pfn + (1UL << order) > has->end_pfn)) continue; - hv_page_online_one(has, pg); + hv_bring_pgs_online(has, pfn, 1UL << order); break; } spin_unlock_irqrestore(&dm_device.ha_lock, flags); @@ -888,12 +883,14 @@ static unsigned long handle_pg_range(unsigned long pg_start, pfn_cnt -= pgs_ol; /* * Check if the corresponding memory block is already - * online by checking its last previously backed page. - * In case it is we need to bring rest (which was not - * backed previously) online too. + * online. It is possible to observe struct pages still + * being uninitialized here so check section instead. + * In case the section is online we need to bring the + * rest of pfns (which were not backed previously) + * online too. */ if (start_pfn > has->start_pfn && - !PageReserved(pfn_to_page(start_pfn - 1))) + online_section_nr(pfn_to_section_nr(start_pfn))) hv_bring_pgs_online(has, start_pfn, pgs_ol); } @@ -1608,6 +1605,7 @@ static int balloon_probe(struct hv_device *dev, #ifdef CONFIG_MEMORY_HOTPLUG set_online_page_callback(&hv_online_page); + init_completion(&dm_device.ol_waitevent); register_memory_notifier(&hv_memory_nb); #endif diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c index 5eed1e7da15c4c2eff2302504078db826ab5bd7e..d6106e1a0d4af597d04cb833215c3b27c0b2aa0d 100644 --- a/drivers/hv/hv_kvp.c +++ b/drivers/hv/hv_kvp.c @@ -353,7 +353,9 @@ static void process_ib_ipinfo(void *in_msg, void *out_msg, int op) out->body.kvp_ip_val.dhcp_enabled = in->kvp_ip_val.dhcp_enabled; - default: + /* fallthrough */ + + case KVP_OP_GET_IP_INFO: utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.adapter_id, MAX_ADAPTER_ID_SIZE, UTF16_LITTLE_ENDIAN, @@ -406,6 +408,10 @@ kvp_send_key(struct work_struct *dummy) process_ib_ipinfo(in_msg, message, KVP_OP_SET_IP_INFO); break; case KVP_OP_GET_IP_INFO: + /* + * We only need to pass on the info of operation, adapter_id + * and addr_family to the userland kvp daemon. + */ process_ib_ipinfo(in_msg, message, KVP_OP_GET_IP_INFO); break; case KVP_OP_SET: @@ -421,7 +427,7 @@ kvp_send_key(struct work_struct *dummy) UTF16_LITTLE_ENDIAN, message->body.kvp_set.data.value, HV_KVP_EXCHANGE_MAX_VALUE_SIZE - 1) + 1; - break; + break; case REG_U32: /* @@ -446,7 +452,10 @@ kvp_send_key(struct work_struct *dummy) break; } - case KVP_OP_GET: + + /* + * The key is always a string - utf16 encoding. + */ message->body.kvp_set.data.key_size = utf16s_to_utf8s( (wchar_t *)in_msg->body.kvp_set.data.key, @@ -454,7 +463,18 @@ kvp_send_key(struct work_struct *dummy) UTF16_LITTLE_ENDIAN, message->body.kvp_set.data.key, HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1; - break; + + break; + + case KVP_OP_GET: + message->body.kvp_get.data.key_size = + utf16s_to_utf8s( + (wchar_t *)in_msg->body.kvp_get.data.key, + in_msg->body.kvp_get.data.key_size, + UTF16_LITTLE_ENDIAN, + message->body.kvp_get.data.key, + HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1; + break; case KVP_OP_DELETE: message->body.kvp_delete.key_size = @@ -464,12 +484,12 @@ kvp_send_key(struct work_struct *dummy) UTF16_LITTLE_ENDIAN, message->body.kvp_delete.key, HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1; - break; + break; case KVP_OP_ENUMERATE: message->body.kvp_enum_data.index = in_msg->body.kvp_enum_data.index; - break; + break; } kvp_transaction.state = HVUTIL_USERSPACE_REQ; diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index 72eaba3d50fc26da141993c5f1eadb9916d1f94d..87d3d7da78f876198e0a160f3c39b60044fbcad4 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h @@ -335,7 +335,14 @@ struct vmbus_connection { struct list_head chn_list; struct mutex channel_mutex; + /* + * An offer message is handled first on the work_queue, and then + * is further handled on handle_primary_chan_wq or + * handle_sub_chan_wq. + */ struct workqueue_struct *work_queue; + struct workqueue_struct *handle_primary_chan_wq; + struct workqueue_struct *handle_sub_chan_wq; }; diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c index 3e90eb91db45a76c22134d8991ab2bd0a143aa71..6cb45f256107e019e283d71c0068c1e35244b486 100644 --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c @@ -164,26 +164,25 @@ hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi, } /* Get various debug metrics for the specified ring buffer. */ -void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, - struct hv_ring_buffer_debug_info *debug_info) +int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, + struct hv_ring_buffer_debug_info *debug_info) { u32 bytes_avail_towrite; u32 bytes_avail_toread; - if (ring_info->ring_buffer) { - hv_get_ringbuffer_availbytes(ring_info, - &bytes_avail_toread, - &bytes_avail_towrite); - - debug_info->bytes_avail_toread = bytes_avail_toread; - debug_info->bytes_avail_towrite = bytes_avail_towrite; - debug_info->current_read_index = - ring_info->ring_buffer->read_index; - debug_info->current_write_index = - ring_info->ring_buffer->write_index; - debug_info->current_interrupt_mask = - ring_info->ring_buffer->interrupt_mask; - } + if (!ring_info->ring_buffer) + return -EINVAL; + + hv_get_ringbuffer_availbytes(ring_info, + &bytes_avail_toread, + &bytes_avail_towrite); + debug_info->bytes_avail_toread = bytes_avail_toread; + debug_info->bytes_avail_towrite = bytes_avail_towrite; + debug_info->current_read_index = ring_info->ring_buffer->read_index; + debug_info->current_write_index = ring_info->ring_buffer->write_index; + debug_info->current_interrupt_mask + = ring_info->ring_buffer->interrupt_mask; + return 0; } EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo); diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index c71cc857b649ddc23f3289e23ec43e898901fa1a..796764ff858157af1d7334055244156dd08c3034 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -313,10 +313,16 @@ static ssize_t out_intr_mask_show(struct device *dev, { struct hv_device *hv_dev = device_to_hv_device(dev); struct hv_ring_buffer_debug_info outbound; + int ret; if (!hv_dev->channel) return -ENODEV; - hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); + + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, + &outbound); + if (ret < 0) + return ret; + return sprintf(buf, "%d\n", outbound.current_interrupt_mask); } static DEVICE_ATTR_RO(out_intr_mask); @@ -326,10 +332,15 @@ static ssize_t out_read_index_show(struct device *dev, { struct hv_device *hv_dev = device_to_hv_device(dev); struct hv_ring_buffer_debug_info outbound; + int ret; if (!hv_dev->channel) return -ENODEV; - hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); + + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, + &outbound); + if (ret < 0) + return ret; return sprintf(buf, "%d\n", outbound.current_read_index); } static DEVICE_ATTR_RO(out_read_index); @@ -340,10 +351,15 @@ static ssize_t out_write_index_show(struct device *dev, { struct hv_device *hv_dev = device_to_hv_device(dev); struct hv_ring_buffer_debug_info outbound; + int ret; if (!hv_dev->channel) return -ENODEV; - hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); + + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, + &outbound); + if (ret < 0) + return ret; return sprintf(buf, "%d\n", outbound.current_write_index); } static DEVICE_ATTR_RO(out_write_index); @@ -354,10 +370,15 @@ static ssize_t out_read_bytes_avail_show(struct device *dev, { struct hv_device *hv_dev = device_to_hv_device(dev); struct hv_ring_buffer_debug_info outbound; + int ret; if (!hv_dev->channel) return -ENODEV; - hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); + + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, + &outbound); + if (ret < 0) + return ret; return sprintf(buf, "%d\n", outbound.bytes_avail_toread); } static DEVICE_ATTR_RO(out_read_bytes_avail); @@ -368,10 +389,15 @@ static ssize_t out_write_bytes_avail_show(struct device *dev, { struct hv_device *hv_dev = device_to_hv_device(dev); struct hv_ring_buffer_debug_info outbound; + int ret; if (!hv_dev->channel) return -ENODEV; - hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); + + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, + &outbound); + if (ret < 0) + return ret; return sprintf(buf, "%d\n", outbound.bytes_avail_towrite); } static DEVICE_ATTR_RO(out_write_bytes_avail); @@ -381,10 +407,15 @@ static ssize_t in_intr_mask_show(struct device *dev, { struct hv_device *hv_dev = device_to_hv_device(dev); struct hv_ring_buffer_debug_info inbound; + int ret; if (!hv_dev->channel) return -ENODEV; - hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); + + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); + if (ret < 0) + return ret; + return sprintf(buf, "%d\n", inbound.current_interrupt_mask); } static DEVICE_ATTR_RO(in_intr_mask); @@ -394,10 +425,15 @@ static ssize_t in_read_index_show(struct device *dev, { struct hv_device *hv_dev = device_to_hv_device(dev); struct hv_ring_buffer_debug_info inbound; + int ret; if (!hv_dev->channel) return -ENODEV; - hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); + + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); + if (ret < 0) + return ret; + return sprintf(buf, "%d\n", inbound.current_read_index); } static DEVICE_ATTR_RO(in_read_index); @@ -407,10 +443,15 @@ static ssize_t in_write_index_show(struct device *dev, { struct hv_device *hv_dev = device_to_hv_device(dev); struct hv_ring_buffer_debug_info inbound; + int ret; if (!hv_dev->channel) return -ENODEV; - hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); + + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); + if (ret < 0) + return ret; + return sprintf(buf, "%d\n", inbound.current_write_index); } static DEVICE_ATTR_RO(in_write_index); @@ -421,10 +462,15 @@ static ssize_t in_read_bytes_avail_show(struct device *dev, { struct hv_device *hv_dev = device_to_hv_device(dev); struct hv_ring_buffer_debug_info inbound; + int ret; if (!hv_dev->channel) return -ENODEV; - hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); + + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); + if (ret < 0) + return ret; + return sprintf(buf, "%d\n", inbound.bytes_avail_toread); } static DEVICE_ATTR_RO(in_read_bytes_avail); @@ -435,10 +481,15 @@ static ssize_t in_write_bytes_avail_show(struct device *dev, { struct hv_device *hv_dev = device_to_hv_device(dev); struct hv_ring_buffer_debug_info inbound; + int ret; if (!hv_dev->channel) return -ENODEV; - hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); + + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); + if (ret < 0) + return ret; + return sprintf(buf, "%d\n", inbound.bytes_avail_towrite); } static DEVICE_ATTR_RO(in_write_bytes_avail); @@ -1417,8 +1468,10 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel) kobj->kset = dev->channels_kset; ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL, "%u", relid); - if (ret) + if (ret) { + kobject_put(kobj); return ret; + } kobject_uevent(kobj, KOBJ_ADD); diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 81da17a42dc92830bef2968a06d153a51c759f59..1ef7c384d652ed34e4379598e7daf2ca809103d3 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -1720,6 +1720,15 @@ config SENSORS_VIA_CPUTEMP sensor inside your CPU. Supported are all known variants of the VIA C7 and Nano. +config SENSORS_ZHAOXIN_CPUTEMP + tristate "Zhaoxin CPU temperature sensor" + depends on X86 + select HWMON_VID + help + If you say yes here you get support for the temperature + sensor inside your CPU. Supported are all known variants of + the Zhaoxin processors. + config SENSORS_VIA686A tristate "VIA686A" depends on PCI @@ -1755,6 +1764,7 @@ config SENSORS_VT8231 config SENSORS_W83773G tristate "Nuvoton W83773G" depends on I2C + select REGMAP_I2C help If you say yes here you get support for the Nuvoton W83773G hardware monitoring chip. diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 93f7f41ea4ad514f88075ccd631358ae6b17ac93..4fe7d2f76f0d6b39ab73d91dce5130409e397a51 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -168,6 +168,7 @@ obj-$(CONFIG_SENSORS_TMP401) += tmp401.o obj-$(CONFIG_SENSORS_TMP421) += tmp421.o obj-$(CONFIG_SENSORS_VEXPRESS) += vexpress-hwmon.o obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o +obj-$(CONFIG_SENSORS_ZHAOXIN_CPUTEMP)+= zhaoxin-cputemp.o obj-$(CONFIG_SENSORS_VIA686A) += via686a.o obj-$(CONFIG_SENSORS_VT1211) += vt1211.o obj-$(CONFIG_SENSORS_VT8231) += vt8231.o diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c index 34e45b97629ed73869baf28edf3acef0376290c5..2f2fb19669580a1de5a17939e3bc0e9a87be5951 100644 --- a/drivers/hwmon/acpi_power_meter.c +++ b/drivers/hwmon/acpi_power_meter.c @@ -694,8 +694,8 @@ static int setup_attrs(struct acpi_power_meter_resource *resource) if (resource->caps.flags & POWER_METER_CAN_CAP) { if (!can_cap_in_hardware()) { - dev_err(&resource->acpi_dev->dev, - "Ignoring unsafe software power cap!\n"); + dev_warn(&resource->acpi_dev->dev, + "Ignoring unsafe software power cap!\n"); goto skip_unsafe_cap; } diff --git a/drivers/hwmon/adc128d818.c b/drivers/hwmon/adc128d818.c index bd2ca315c9d8bfeab1e937c8fc3f2f105ff04678..5abb28cd81bf8d333165e0e487bf91e59d791646 100644 --- a/drivers/hwmon/adc128d818.c +++ b/drivers/hwmon/adc128d818.c @@ -184,7 +184,7 @@ static ssize_t adc128_set_in(struct device *dev, struct device_attribute *attr, mutex_lock(&data->update_lock); /* 10 mV LSB on limit registers */ - regval = clamp_val(DIV_ROUND_CLOSEST(val, 10), 0, 255); + regval = DIV_ROUND_CLOSEST(clamp_val(val, 0, 2550), 10); data->in[index][nr] = regval << 4; reg = index == 1 ? ADC128_REG_IN_MIN(nr) : ADC128_REG_IN_MAX(nr); i2c_smbus_write_byte_data(data->client, reg, regval); @@ -222,7 +222,7 @@ static ssize_t adc128_set_temp(struct device *dev, return err; mutex_lock(&data->update_lock); - regval = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127); + regval = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000); data->temp[index] = regval << 1; i2c_smbus_write_byte_data(data->client, index == 1 ? ADC128_REG_TEMP_MAX diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index 10645c9bb7be14abd077bb73418503067eb21157..22ce0d9bf6822bae427e3aff1957ec19470509f5 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -256,10 +256,13 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) */ if (host_bridge && host_bridge->vendor == PCI_VENDOR_ID_INTEL) { for (i = 0; i < ARRAY_SIZE(tjmax_pci_table); i++) { - if (host_bridge->device == tjmax_pci_table[i].device) + if (host_bridge->device == tjmax_pci_table[i].device) { + pci_dev_put(host_bridge); return tjmax_pci_table[i].tjmax; + } } } + pci_dev_put(host_bridge); for (i = 0; i < ARRAY_SIZE(tjmax_table); i++) { if (strstr(c->x86_model_id, tjmax_table[i].id)) @@ -532,6 +535,10 @@ static void coretemp_remove_core(struct platform_data *pdata, int indx) { struct temp_data *tdata = pdata->core_data[indx]; + /* if we errored on add then this is already gone */ + if (!tdata) + return; + /* Remove the sysfs attributes */ sysfs_remove_group(&pdata->hwmon_dev->kobj, &tdata->attr_group); diff --git a/drivers/hwmon/f71805f.c b/drivers/hwmon/f71805f.c index 73c681162653bf5ff5be48b3d0f1d327f1511f68..623736d2a7c1d75b89b2729e0a23c01d5de547eb 100644 --- a/drivers/hwmon/f71805f.c +++ b/drivers/hwmon/f71805f.c @@ -96,17 +96,23 @@ superio_select(int base, int ld) outb(ld, base + 1); } -static inline void +static inline int superio_enter(int base) { + if (!request_muxed_region(base, 2, DRVNAME)) + return -EBUSY; + outb(0x87, base); outb(0x87, base); + + return 0; } static inline void superio_exit(int base) { outb(0xaa, base); + release_region(base, 2); } /* @@ -1561,7 +1567,7 @@ static int __init f71805f_device_add(unsigned short address, static int __init f71805f_find(int sioaddr, unsigned short *address, struct f71805f_sio_data *sio_data) { - int err = -ENODEV; + int err; u16 devid; static const char * const names[] = { @@ -1569,8 +1575,11 @@ static int __init f71805f_find(int sioaddr, unsigned short *address, "F71872F/FG or F71806F/FG", }; - superio_enter(sioaddr); + err = superio_enter(sioaddr); + if (err) + return err; + err = -ENODEV; devid = superio_inw(sioaddr, SIO_REG_MANID); if (devid != SIO_FINTEK_ID) goto exit; diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c index 33d51281272bb066762d80b46161b4f7f44113b6..6b3559f58b67d8861f04add30aa35e191a5f945b 100644 --- a/drivers/hwmon/hwmon.c +++ b/drivers/hwmon/hwmon.c @@ -619,7 +619,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata, if (err) goto free_hwmon; - if (dev && chip && chip->ops->read && + if (dev && dev->of_node && chip && chip->ops->read && chip->info[0]->type == hwmon_chip && (chip->info[0]->config[0] & HWMON_C_REGISTER_TZ)) { const struct hwmon_channel_info **info = chip->info; @@ -635,8 +635,10 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata, if (info[i]->config[j] & HWMON_T_INPUT) { err = hwmon_thermal_add_sensor(dev, hwdev, j); - if (err) - goto free_device; + if (err) { + device_unregister(hdev); + goto ida_remove; + } } } } @@ -644,8 +646,6 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata, return hdev; -free_device: - device_unregister(hdev); free_hwmon: kfree(hwdev); ida_remove: diff --git a/drivers/hwmon/ibmpex.c b/drivers/hwmon/ibmpex.c index ab72cabf5a9556cf38025683dd5a2bc28112e028..e289c845f970440df1d2e6f4b99cd7394b2c3b29 100644 --- a/drivers/hwmon/ibmpex.c +++ b/drivers/hwmon/ibmpex.c @@ -517,6 +517,7 @@ static void ibmpex_register_bmc(int iface, struct device *dev) return; out_register: + list_del(&data->list); hwmon_device_unregister(data->hwmon_dev); out_user: ipmi_destroy_user(data->user); diff --git a/drivers/hwmon/ibmpowernv.c b/drivers/hwmon/ibmpowernv.c index 83472808c8163275d3f95e136b8aa3eb0291ead2..64d05edff130470d4d85c0c429cfdb5312124e3f 100644 --- a/drivers/hwmon/ibmpowernv.c +++ b/drivers/hwmon/ibmpowernv.c @@ -181,7 +181,7 @@ static ssize_t show_label(struct device *dev, struct device_attribute *devattr, return sprintf(buf, "%s\n", sdata->label); } -static int __init get_logical_cpu(int hwcpu) +static int get_logical_cpu(int hwcpu) { int cpu; @@ -192,9 +192,8 @@ static int __init get_logical_cpu(int hwcpu) return -ENOENT; } -static void __init make_sensor_label(struct device_node *np, - struct sensor_data *sdata, - const char *label) +static void make_sensor_label(struct device_node *np, + struct sensor_data *sdata, const char *label) { u32 id; size_t n; diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c index 71d3445ba869c85654ae3dcaf3a5460e8fadb268..07ee19573b3f0f8d65d6a64710fb1bea557b42eb 100644 --- a/drivers/hwmon/ina2xx.c +++ b/drivers/hwmon/ina2xx.c @@ -274,7 +274,7 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg, break; case INA2XX_CURRENT: /* signed register, result in mA */ - val = regval * data->current_lsb_uA; + val = (s16)regval * data->current_lsb_uA; val = DIV_ROUND_CLOSEST(val, 1000); break; case INA2XX_CALIBRATION: @@ -491,7 +491,7 @@ static int ina2xx_probe(struct i2c_client *client, } data->groups[group++] = &ina2xx_group; - if (id->driver_data == ina226) + if (chip == ina226) data->groups[group++] = &ina226_group; hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, @@ -500,7 +500,7 @@ static int ina2xx_probe(struct i2c_client *client, return PTR_ERR(hwmon_dev); dev_info(dev, "power monitor %s (Rshunt = %li uOhm)\n", - id->name, data->rshunt); + client->name, data->rshunt); return 0; } diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c index e6b49500c52aedc8751afd7114ed2283e505ea56..8c9555313fc3db87be75921a24280bdf1f95e26d 100644 --- a/drivers/hwmon/ina3221.c +++ b/drivers/hwmon/ina3221.c @@ -38,9 +38,9 @@ #define INA3221_WARN3 0x0c #define INA3221_MASK_ENABLE 0x0f -#define INA3221_CONFIG_MODE_SHUNT BIT(1) -#define INA3221_CONFIG_MODE_BUS BIT(2) -#define INA3221_CONFIG_MODE_CONTINUOUS BIT(3) +#define INA3221_CONFIG_MODE_SHUNT BIT(0) +#define INA3221_CONFIG_MODE_BUS BIT(1) +#define INA3221_CONFIG_MODE_CONTINUOUS BIT(2) #define INA3221_RSHUNT_DEFAULT 10000 diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index bb15d7816a294f0fd3c9b99d4bc04195e374698e..11daf27bd7cafd86909c1a1b6386fc2da2bef33b 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -1,8 +1,15 @@ /* - * k10temp.c - AMD Family 10h/11h/12h/14h/15h/16h processor hardware monitoring + * k10temp.c - AMD Family 10h/11h/12h/14h/15h/16h/17h + * processor hardware monitoring * * Copyright (c) 2009 Clemens Ladisch + * Copyright (c) 2020 Guenter Roeck * + * Implementation notes: + * - CCD register address information as well as the calculation to + * convert raw register values is from https://github.com/ocerman/zenpower. + * The information is not confirmed from chip datasheets, but experiments + * suggest that it provides reasonable temperature values. * * This driver is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License; either @@ -17,12 +24,13 @@ * along with this driver; if not, see . */ +#include #include #include -#include #include #include #include +#include #include #include @@ -41,31 +49,23 @@ static DEFINE_MUTEX(nb_smu_ind_mutex); #define PCI_DEVICE_ID_AMD_15H_M70H_NB_F3 0x15b3 #endif -#ifndef PCI_DEVICE_ID_AMD_17H_DF_F3 -#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 -#endif - -#ifndef PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 -#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb -#endif - /* CPUID function 0x80000001, ebx */ -#define CPUID_PKGTYPE_MASK 0xf0000000 +#define CPUID_PKGTYPE_MASK GENMASK(31, 28) #define CPUID_PKGTYPE_F 0x00000000 #define CPUID_PKGTYPE_AM2R2_AM3 0x10000000 /* DRAM controller (PCI function 2) */ #define REG_DCT0_CONFIG_HIGH 0x094 -#define DDR3_MODE 0x00000100 +#define DDR3_MODE BIT(8) /* miscellaneous (PCI function 3) */ #define REG_HARDWARE_THERMAL_CONTROL 0x64 -#define HTC_ENABLE 0x00000001 +#define HTC_ENABLE BIT(0) #define REG_REPORTED_TEMPERATURE 0xa4 #define REG_NORTHBRIDGE_CAPABILITIES 0xe8 -#define NB_CAP_HTC 0x00000400 +#define NB_CAP_HTC BIT(10) /* * For F15h M60h and M70h, REG_HARDWARE_THERMAL_CONTROL @@ -76,8 +76,21 @@ static DEFINE_MUTEX(nb_smu_ind_mutex); #define F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET 0xd8200c64 #define F15H_M60H_REPORTED_TEMP_CTRL_OFFSET 0xd8200ca4 -/* F17h M01h Access througn SMN */ -#define F17H_M01H_REPORTED_TEMP_CTRL_OFFSET 0x00059800 +/* Common for Zen CPU families (Family 17h and 18h and 19h) */ +#define ZEN_REPORTED_TEMP_CTRL_BASE 0x00059800 + +#define ZEN_CCD_TEMP(offset, x) (ZEN_REPORTED_TEMP_CTRL_BASE + \ + (offset) + ((x) * 4)) +#define ZEN_CCD_TEMP_VALID BIT(11) +#define ZEN_CCD_TEMP_MASK GENMASK(10, 0) + +#define ZEN_CUR_TEMP_SHIFT 21 +#define ZEN_CUR_TEMP_RANGE_SEL_MASK BIT(19) + +struct hygon_private { + u32 index_2nd; + u32 offset_2nd; +}; struct k10temp_data { struct pci_dev *pdev; @@ -85,9 +98,19 @@ struct k10temp_data { void (*read_tempreg)(struct pci_dev *pdev, u32 *regval); int temp_offset; u32 temp_adjust_mask; - bool show_tdie; + u32 show_temp; + bool is_zen; + u32 ccd_offset; + void *priv; }; +#define TCTL_BIT 0 +#define TDIE_BIT 1 +#define TCCD_BIT(x) ((x) + 2) + +#define HAVE_TEMP(d, channel) ((d)->show_temp & BIT(channel)) +#define HAVE_TDIE(d) HAVE_TEMP(d, TDIE_BIT) + struct tctl_offset { u8 model; char const *id; @@ -136,136 +159,178 @@ static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval) F15H_M60H_REPORTED_TEMP_CTRL_OFFSET, regval); } -static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval) +static void read_tempreg_nb_zen(struct pci_dev *pdev, u32 *regval) { amd_smn_read(amd_pci_dev_to_node_id(pdev), - F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval); + ZEN_REPORTED_TEMP_CTRL_BASE, regval); } -static unsigned int get_raw_temp(struct k10temp_data *data) +static long get_raw_temp(struct k10temp_data *data) { - unsigned int temp; u32 regval; + long temp; data->read_tempreg(data->pdev, ®val); - temp = (regval >> 21) * 125; + temp = (regval >> ZEN_CUR_TEMP_SHIFT) * 125; if (regval & data->temp_adjust_mask) temp -= 49000; return temp; } -static ssize_t temp1_input_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct k10temp_data *data = dev_get_drvdata(dev); - unsigned int temp = get_raw_temp(data); - - if (temp > data->temp_offset) - temp -= data->temp_offset; - else - temp = 0; - - return sprintf(buf, "%u\n", temp); -} +static const char *k10temp_temp_label[] = { + "Tctl", + "Tdie", + "Tccd1", + "Tccd2", + "Tccd3", + "Tccd4", + "Tccd5", + "Tccd6", + "Tccd7", + "Tccd8", +}; -static ssize_t temp2_input_show(struct device *dev, - struct device_attribute *devattr, char *buf) +static int k10temp_read_labels(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, const char **str) { - struct k10temp_data *data = dev_get_drvdata(dev); - unsigned int temp = get_raw_temp(data); - - return sprintf(buf, "%u\n", temp); + switch (type) { + case hwmon_temp: + *str = k10temp_temp_label[channel]; + break; + default: + return -EOPNOTSUPP; + } + return 0; } -static ssize_t temp_label_show(struct device *dev, - struct device_attribute *devattr, char *buf) +static void hygon_read_temp(struct k10temp_data *data, int channel, + u32 *regval) { - struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); - - return sprintf(buf, "%s\n", attr->index ? "Tctl" : "Tdie"); -} + struct hygon_private *h_priv; -static ssize_t temp1_max_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "%d\n", 70 * 1000); + h_priv = (struct hygon_private *)data->priv; + if ((channel - 2) < h_priv->index_2nd) + amd_smn_read(amd_pci_dev_to_node_id(data->pdev), + ZEN_CCD_TEMP(data->ccd_offset, channel - 2), + regval); + else + amd_smn_read(amd_pci_dev_to_node_id(data->pdev), + ZEN_CCD_TEMP(h_priv->offset_2nd, + channel - 2 - h_priv->index_2nd), + regval); } -static ssize_t show_temp_crit(struct device *dev, - struct device_attribute *devattr, char *buf) +static int k10temp_read_temp(struct device *dev, u32 attr, int channel, + long *val) { - struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct k10temp_data *data = dev_get_drvdata(dev); - int show_hyst = attr->index; u32 regval; - int value; - data->read_htcreg(data->pdev, ®val); - value = ((regval >> 16) & 0x7f) * 500 + 52000; - if (show_hyst) - value -= ((regval >> 24) & 0xf) * 500; - return sprintf(buf, "%d\n", value); + switch (attr) { + case hwmon_temp_input: + switch (channel) { + case 0: /* Tctl */ + *val = get_raw_temp(data); + if (*val < 0) + *val = 0; + break; + case 1: /* Tdie */ + *val = get_raw_temp(data) - data->temp_offset; + if (*val < 0) + *val = 0; + break; + case 2 ... 9: /* Tccd{1-8} */ + if (hygon_f18h_m4h()) + hygon_read_temp(data, channel, ®val); + else + amd_smn_read(amd_pci_dev_to_node_id(data->pdev), + ZEN_CCD_TEMP(data->ccd_offset, channel - 2), + ®val); + *val = (regval & ZEN_CCD_TEMP_MASK) * 125 - 49000; + break; + default: + return -EOPNOTSUPP; + } + break; + case hwmon_temp_max: + *val = 70 * 1000; + break; + case hwmon_temp_crit: + data->read_htcreg(data->pdev, ®val); + *val = ((regval >> 16) & 0x7f) * 500 + 52000; + break; + case hwmon_temp_crit_hyst: + data->read_htcreg(data->pdev, ®val); + *val = (((regval >> 16) & 0x7f) + - ((regval >> 24) & 0xf)) * 500 + 52000; + break; + default: + return -EOPNOTSUPP; + } + return 0; } -static DEVICE_ATTR_RO(temp1_input); -static DEVICE_ATTR_RO(temp1_max); -static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp_crit, NULL, 0); -static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, show_temp_crit, NULL, 1); - -static SENSOR_DEVICE_ATTR(temp1_label, 0444, temp_label_show, NULL, 0); -static DEVICE_ATTR_RO(temp2_input); -static SENSOR_DEVICE_ATTR(temp2_label, 0444, temp_label_show, NULL, 1); +static int k10temp_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + switch (type) { + case hwmon_temp: + return k10temp_read_temp(dev, attr, channel, val); + default: + return -EOPNOTSUPP; + } +} -static umode_t k10temp_is_visible(struct kobject *kobj, - struct attribute *attr, int index) +static umode_t k10temp_is_visible(const void *_data, + enum hwmon_sensor_types type, + u32 attr, int channel) { - struct device *dev = container_of(kobj, struct device, kobj); - struct k10temp_data *data = dev_get_drvdata(dev); + const struct k10temp_data *data = _data; struct pci_dev *pdev = data->pdev; u32 reg; - switch (index) { - case 0 ... 1: /* temp1_input, temp1_max */ - default: - break; - case 2 ... 3: /* temp1_crit, temp1_crit_hyst */ - if (!data->read_htcreg) - return 0; - - pci_read_config_dword(pdev, REG_NORTHBRIDGE_CAPABILITIES, - ®); - if (!(reg & NB_CAP_HTC)) - return 0; - - data->read_htcreg(data->pdev, ®); - if (!(reg & HTC_ENABLE)) - return 0; - break; - case 4 ... 6: /* temp1_label, temp2_input, temp2_label */ - if (!data->show_tdie) + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_input: + if (!HAVE_TEMP(data, channel)) + return 0; + break; + case hwmon_temp_max: + if (channel || data->is_zen) + return 0; + break; + case hwmon_temp_crit: + case hwmon_temp_crit_hyst: + if (channel || !data->read_htcreg) + return 0; + + pci_read_config_dword(pdev, + REG_NORTHBRIDGE_CAPABILITIES, + ®); + if (!(reg & NB_CAP_HTC)) + return 0; + + data->read_htcreg(data->pdev, ®); + if (!(reg & HTC_ENABLE)) + return 0; + break; + case hwmon_temp_label: + /* Show temperature labels only on Zen CPUs */ + if (!data->is_zen || !HAVE_TEMP(data, channel)) + return 0; + break; + default: return 0; + } break; + default: + return 0; } - return attr->mode; + return 0444; } -static struct attribute *k10temp_attrs[] = { - &dev_attr_temp1_input.attr, - &dev_attr_temp1_max.attr, - &sensor_dev_attr_temp1_crit.dev_attr.attr, - &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, - &sensor_dev_attr_temp1_label.dev_attr.attr, - &dev_attr_temp2_input.attr, - &sensor_dev_attr_temp2_label.dev_attr.attr, - NULL -}; - -static const struct attribute_group k10temp_group = { - .attrs = k10temp_attrs, - .is_visible = k10temp_is_visible, -}; -__ATTRIBUTE_GROUPS(k10temp); - static bool has_erratum_319(struct pci_dev *pdev) { u32 pkg_type, reg_dram_cfg; @@ -300,15 +365,90 @@ static bool has_erratum_319(struct pci_dev *pdev) (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2); } -static int k10temp_probe(struct pci_dev *pdev, - const struct pci_device_id *id) +static const struct hwmon_channel_info *k10temp_info[] = { + HWMON_CHANNEL_INFO(temp, + HWMON_T_INPUT | HWMON_T_MAX | + HWMON_T_CRIT | HWMON_T_CRIT_HYST | + HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL), + NULL +}; + +static const struct hwmon_ops k10temp_hwmon_ops = { + .is_visible = k10temp_is_visible, + .read = k10temp_read, + .read_string = k10temp_read_labels, +}; + +static const struct hwmon_chip_info k10temp_chip_info = { + .ops = &k10temp_hwmon_ops, + .info = k10temp_info, +}; + +static void k10temp_get_ccd_support(struct pci_dev *pdev, + struct k10temp_data *data, int limit) +{ + u32 regval; + int i; + + for (i = 0; i < limit; i++) { + amd_smn_read(amd_pci_dev_to_node_id(pdev), + ZEN_CCD_TEMP(data->ccd_offset, i), ®val); + if (regval & ZEN_CCD_TEMP_VALID) + data->show_temp |= BIT(TCCD_BIT(i)); + } +} + +static void k10temp_get_ccd_support_2nd(struct pci_dev *pdev, + struct k10temp_data *data, int limit) +{ + struct hygon_private *h_priv; + u32 regval; + int i; + + h_priv = (struct hygon_private *)data->priv; + for (i = h_priv->index_2nd; i < limit; i++) { + amd_smn_read(amd_pci_dev_to_node_id(pdev), + ZEN_CCD_TEMP(h_priv->offset_2nd, + i - h_priv->index_2nd), + ®val); + if (regval & ZEN_CCD_TEMP_VALID) + data->show_temp |= BIT(TCCD_BIT(i)); + } +} + +static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int unreliable = has_erratum_319(pdev); struct device *dev = &pdev->dev; + struct hygon_private *h_priv; struct k10temp_data *data; struct device *hwmon_dev; + u8 df_id; int i; + if (hygon_f18h_m4h()) { + if (get_df_id(pdev, &df_id)) { + pr_err("Get DF ID failed.\n"); + return -ENODEV; + } + + /* + * The temperature should be get from the devices + * with id < 4. + */ + if (df_id >= 4) + return 0; + } + if (unreliable) { if (!force) { dev_err(dev, @@ -324,15 +464,62 @@ static int k10temp_probe(struct pci_dev *pdev, return -ENOMEM; data->pdev = pdev; + data->show_temp |= BIT(TCTL_BIT); /* Always show Tctl */ - if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 || - boot_cpu_data.x86_model == 0x70)) { + if (boot_cpu_data.x86 == 0x15 && + ((boot_cpu_data.x86_model & 0xf0) == 0x60 || + (boot_cpu_data.x86_model & 0xf0) == 0x70)) { data->read_htcreg = read_htcreg_nb_f15; data->read_tempreg = read_tempreg_nb_f15; } else if (boot_cpu_data.x86 == 0x17) { - data->temp_adjust_mask = 0x80000; - data->read_tempreg = read_tempreg_nb_f17; - data->show_tdie = true; + data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK; + data->read_tempreg = read_tempreg_nb_zen; + data->is_zen = true; + + switch (boot_cpu_data.x86_model) { + case 0x1: /* Zen */ + case 0x8: /* Zen+ */ + case 0x11: /* Zen APU */ + case 0x18: /* Zen+ APU */ + data->ccd_offset = 0x154; + k10temp_get_ccd_support(pdev, data, 4); + break; + case 0x31: /* Zen2 Threadripper */ + case 0x71: /* Zen2 */ + data->ccd_offset = 0x154; + k10temp_get_ccd_support(pdev, data, 8); + break; + } + } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) { + data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK; + data->read_tempreg = read_tempreg_nb_zen; + data->is_zen = true; + + if (boot_cpu_data.x86_model >= 0x4 && + boot_cpu_data.x86_model <= 0xf) { + data->ccd_offset = 0x154; + data->priv = devm_kzalloc(dev, sizeof(*h_priv), + GFP_KERNEL); + if (!data->priv) + return -ENOMEM; + h_priv = (struct hygon_private *)data->priv; + h_priv->offset_2nd = 0x2f8; + h_priv->index_2nd = 3; + k10temp_get_ccd_support(pdev, data, h_priv->index_2nd); + k10temp_get_ccd_support_2nd(pdev, data, 8); + } + } else if (boot_cpu_data.x86 == 0x19) { + data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK; + data->read_tempreg = read_tempreg_nb_zen; + data->is_zen = true; + + switch (boot_cpu_data.x86_model) { + case 0x0 ... 0x1: /* Zen3 */ + data->ccd_offset = 0x154; + k10temp_get_ccd_support(pdev, data, 8); + break; + } } else { data->read_htcreg = read_htcreg_pci; data->read_tempreg = read_tempreg_pci; @@ -343,13 +530,15 @@ static int k10temp_probe(struct pci_dev *pdev, if (boot_cpu_data.x86 == entry->model && strstr(boot_cpu_data.x86_model_id, entry->id)) { + data->show_temp |= BIT(TDIE_BIT); /* show Tdie */ data->temp_offset = entry->offset; break; } } - hwmon_dev = devm_hwmon_device_register_with_groups(dev, "k10temp", data, - k10temp_groups); + hwmon_dev = devm_hwmon_device_register_with_info(dev, "k10temp", data, + &k10temp_chip_info, + NULL); return PTR_ERR_OR_ZERO(hwmon_dev); } @@ -366,6 +555,11 @@ static const struct pci_device_id k10temp_id_table[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) }, + { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, + { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, + { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3) }, + { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_HYGON_18H_M10H_DF_F3) }, {} }; MODULE_DEVICE_TABLE(pci, k10temp_id_table); diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c index 08e3945a6fbfdebbfdb67cfa7fa5696814cca27b..dc2bd82b320217827beb8c470ffbb9e981620002 100644 --- a/drivers/hwmon/lm80.c +++ b/drivers/hwmon/lm80.c @@ -360,9 +360,11 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr, struct i2c_client *client = data->client; unsigned long min, val; u8 reg; - int err = kstrtoul(buf, 10, &val); - if (err < 0) - return err; + int rv; + + rv = kstrtoul(buf, 10, &val); + if (rv < 0) + return rv; /* Save fan_min */ mutex_lock(&data->update_lock); @@ -390,8 +392,13 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr, return -EINVAL; } - reg = (lm80_read_value(client, LM80_REG_FANDIV) & - ~(3 << (2 * (nr + 1)))) | (data->fan_div[nr] << (2 * (nr + 1))); + rv = lm80_read_value(client, LM80_REG_FANDIV); + if (rv < 0) { + mutex_unlock(&data->update_lock); + return rv; + } + reg = (rv & ~(3 << (2 * (nr + 1)))) + | (data->fan_div[nr] << (2 * (nr + 1))); lm80_write_value(client, LM80_REG_FANDIV, reg); /* Restore fan_min */ diff --git a/drivers/hwmon/lm95234.c b/drivers/hwmon/lm95234.c index c7fcc9e7f57a233e2e823faa6ebaa01628641a53..13912ac7c69fc1fa3b07cf96e9ca730c241aa1b2 100644 --- a/drivers/hwmon/lm95234.c +++ b/drivers/hwmon/lm95234.c @@ -310,7 +310,8 @@ static ssize_t set_tcrit2(struct device *dev, struct device_attribute *attr, if (ret < 0) return ret; - val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, index ? 255 : 127); + val = DIV_ROUND_CLOSEST(clamp_val(val, 0, (index ? 255 : 127) * 1000), + 1000); mutex_lock(&data->update_lock); data->tcrit2[index] = val; @@ -359,7 +360,7 @@ static ssize_t set_tcrit1(struct device *dev, struct device_attribute *attr, if (ret < 0) return ret; - val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 255); + val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 255000), 1000); mutex_lock(&data->update_lock); data->tcrit1[index] = val; @@ -400,7 +401,7 @@ static ssize_t set_tcrit1_hyst(struct device *dev, if (ret < 0) return ret; - val = DIV_ROUND_CLOSEST(val, 1000); + val = DIV_ROUND_CLOSEST(clamp_val(val, -255000, 255000), 1000); val = clamp_val((int)data->tcrit1[index] - val, 0, 31); mutex_lock(&data->update_lock); @@ -440,7 +441,7 @@ static ssize_t set_offset(struct device *dev, struct device_attribute *attr, return ret; /* Accuracy is 1/2 degrees C */ - val = clamp_val(DIV_ROUND_CLOSEST(val, 500), -128, 127); + val = DIV_ROUND_CLOSEST(clamp_val(val, -64000, 63500), 500); mutex_lock(&data->update_lock); data->toffset[index] = val; diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c index de46577c7d5a1711447f0f249604635260c8f958..e57b0c5119ce440320d3389dde1f0ad867e971ee 100644 --- a/drivers/hwmon/mlxreg-fan.c +++ b/drivers/hwmon/mlxreg-fan.c @@ -51,7 +51,7 @@ */ #define MLXREG_FAN_GET_RPM(rval, d, s) (DIV_ROUND_CLOSEST(15000000 * 100, \ ((rval) + (s)) * (d))) -#define MLXREG_FAN_GET_FAULT(val, mask) (!!((val) ^ (mask))) +#define MLXREG_FAN_GET_FAULT(val, mask) (!((val) ^ (mask))) #define MLXREG_FAN_PWM_DUTY2STATE(duty) (DIV_ROUND_CLOSEST((duty) * \ MLXREG_FAN_MAX_STATE, \ MLXREG_FAN_MAX_DUTY)) @@ -307,8 +307,8 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev, { struct mlxreg_fan *fan = cdev->devdata; unsigned long cur_state; + int i, config = 0; u32 regval; - int i; int err; /* @@ -321,6 +321,12 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev, * overwritten. */ if (state >= MLXREG_FAN_SPEED_MIN && state <= MLXREG_FAN_SPEED_MAX) { + /* + * This is configuration change, which is only supported through sysfs. + * For configuration non-zero value is to be returned to avoid thermal + * statistics update. + */ + config = 1; state -= MLXREG_FAN_MAX_STATE; for (i = 0; i < state; i++) fan->cooling_levels[i] = state; @@ -335,7 +341,7 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev, cur_state = MLXREG_FAN_PWM_DUTY2STATE(regval); if (state < cur_state) - return 0; + return config; state = cur_state; } @@ -351,7 +357,7 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev, dev_err(fan->dev, "Failed to write PWM duty\n"); return err; } - return 0; + return config; } static const struct thermal_cooling_device_ops mlxreg_fan_cooling_ops = { diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index 78603b78cf410de903aa22d55147e6b600ab0398..23581dc62246cf21e459042fdb45a19ae7c57bef 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -704,10 +704,10 @@ static const char *const nct6795_temp_label[] = { "PCH_CHIP_TEMP", "PCH_CPU_TEMP", "PCH_MCH_TEMP", - "PCH_DIM0_TEMP", - "PCH_DIM1_TEMP", - "PCH_DIM2_TEMP", - "PCH_DIM3_TEMP", + "Agent0 Dimm0", + "Agent0 Dimm1", + "Agent1 Dimm0", + "Agent1 Dimm1", "BYTE_TEMP0", "BYTE_TEMP1", "PECI Agent 0 Calibration", @@ -742,10 +742,10 @@ static const char *const nct6796_temp_label[] = { "PCH_CHIP_TEMP", "PCH_CPU_TEMP", "PCH_MCH_TEMP", - "PCH_DIM0_TEMP", - "PCH_DIM1_TEMP", - "PCH_DIM2_TEMP", - "PCH_DIM3_TEMP", + "Agent0 Dimm0", + "Agent0 Dimm1", + "Agent1 Dimm0", + "Agent1 Dimm1", "BYTE_TEMP0", "BYTE_TEMP1", "PECI Agent 0 Calibration", @@ -818,7 +818,7 @@ static const u16 NCT6106_REG_TARGET[] = { 0x111, 0x121, 0x131 }; static const u16 NCT6106_REG_WEIGHT_TEMP_SEL[] = { 0x168, 0x178, 0x188 }; static const u16 NCT6106_REG_WEIGHT_TEMP_STEP[] = { 0x169, 0x179, 0x189 }; static const u16 NCT6106_REG_WEIGHT_TEMP_STEP_TOL[] = { 0x16a, 0x17a, 0x18a }; -static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x17c }; +static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x18b }; static const u16 NCT6106_REG_WEIGHT_TEMP_BASE[] = { 0x16c, 0x17c, 0x18c }; static const u16 NCT6106_REG_WEIGHT_DUTY_BASE[] = { 0x16d, 0x17d, 0x18d }; @@ -2264,7 +2264,7 @@ store_temp_offset(struct device *dev, struct device_attribute *attr, if (err < 0) return err; - val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127); + val = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000); mutex_lock(&data->update_lock); data->temp_offset[nr] = val; @@ -3673,6 +3673,7 @@ static int nct6775_probe(struct platform_device *pdev) data->REG_FAN_TIME[0] = NCT6106_REG_FAN_STOP_TIME; data->REG_FAN_TIME[1] = NCT6106_REG_FAN_STEP_UP_TIME; data->REG_FAN_TIME[2] = NCT6106_REG_FAN_STEP_DOWN_TIME; + data->REG_TOLERANCE_H = NCT6106_REG_TOLERANCE_H; data->REG_PWM[0] = NCT6106_REG_PWM; data->REG_PWM[1] = NCT6106_REG_FAN_START_OUTPUT; data->REG_PWM[2] = NCT6106_REG_FAN_STOP_OUTPUT; diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c index 2876c18ed84115dbd97e7eb2c4f9b899ebf2cfe7..38ffbdb0a85fba288cdcac8e76cc7ff3eadec65d 100644 --- a/drivers/hwmon/nct7802.c +++ b/drivers/hwmon/nct7802.c @@ -768,7 +768,7 @@ static struct attribute *nct7802_in_attrs[] = { &sensor_dev_attr_in3_alarm.dev_attr.attr, &sensor_dev_attr_in3_beep.dev_attr.attr, - &sensor_dev_attr_in4_input.dev_attr.attr, /* 17 */ + &sensor_dev_attr_in4_input.dev_attr.attr, /* 16 */ &sensor_dev_attr_in4_min.dev_attr.attr, &sensor_dev_attr_in4_max.dev_attr.attr, &sensor_dev_attr_in4_alarm.dev_attr.attr, @@ -794,9 +794,9 @@ static umode_t nct7802_in_is_visible(struct kobject *kobj, if (index >= 6 && index < 11 && (reg & 0x03) != 0x03) /* VSEN1 */ return 0; - if (index >= 11 && index < 17 && (reg & 0x0c) != 0x0c) /* VSEN2 */ + if (index >= 11 && index < 16 && (reg & 0x0c) != 0x0c) /* VSEN2 */ return 0; - if (index >= 17 && (reg & 0x30) != 0x30) /* VSEN3 */ + if (index >= 16 && (reg & 0x30) != 0x30) /* VSEN3 */ return 0; return attr->mode; diff --git a/drivers/hwmon/npcm750-pwm-fan.c b/drivers/hwmon/npcm750-pwm-fan.c index b998f9fbed41e3fd854625f1a399936efd84835b..979b579bc118fe690d0d82f59841eb9e1ea9497a 100644 --- a/drivers/hwmon/npcm750-pwm-fan.c +++ b/drivers/hwmon/npcm750-pwm-fan.c @@ -52,7 +52,7 @@ /* Define the Counter Register, value = 100 for match 100% */ #define NPCM7XX_PWM_COUNTER_DEFAULT_NUM 255 -#define NPCM7XX_PWM_CMR_DEFAULT_NUM 127 +#define NPCM7XX_PWM_CMR_DEFAULT_NUM 255 #define NPCM7XX_PWM_CMR_MAX 255 /* default all PWM channels PRESCALE2 = 1 */ diff --git a/drivers/hwmon/pc87427.c b/drivers/hwmon/pc87427.c index dc5a9d5ada516c840f73ee7f33971ec1bcdddcfc..81a05cd1a5121afd608a003abbf18dacdaf35fc9 100644 --- a/drivers/hwmon/pc87427.c +++ b/drivers/hwmon/pc87427.c @@ -106,6 +106,13 @@ static const char *logdev_str[2] = { DRVNAME " FMC", DRVNAME " HMC" }; #define LD_IN 1 #define LD_TEMP 1 +static inline int superio_enter(int sioaddr) +{ + if (!request_muxed_region(sioaddr, 2, DRVNAME)) + return -EBUSY; + return 0; +} + static inline void superio_outb(int sioaddr, int reg, int val) { outb(reg, sioaddr); @@ -122,6 +129,7 @@ static inline void superio_exit(int sioaddr) { outb(0x02, sioaddr); outb(0x02, sioaddr + 1); + release_region(sioaddr, 2); } /* @@ -1220,7 +1228,11 @@ static int __init pc87427_find(int sioaddr, struct pc87427_sio_data *sio_data) { u16 val; u8 cfg, cfg_b; - int i, err = 0; + int i, err; + + err = superio_enter(sioaddr); + if (err) + return err; /* Identify device */ val = force_id ? force_id : superio_inb(sioaddr, SIOREG_DEVID); diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c index 7718e58dbda543d0687af136c53e551f799f349d..7688dab32f6e6088725d75427d9dccabba99f1fd 100644 --- a/drivers/hwmon/pmbus/pmbus.c +++ b/drivers/hwmon/pmbus/pmbus.c @@ -118,6 +118,8 @@ static int pmbus_identify(struct i2c_client *client, } else { info->pages = 1; } + + pmbus_clear_faults(client); } if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) { diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c index 82c3754e21e337c83b4d7e3a12236da78500f370..cd24b375df1eea3598d4c74995ba533f97e4b9a9 100644 --- a/drivers/hwmon/pmbus/pmbus_core.c +++ b/drivers/hwmon/pmbus/pmbus_core.c @@ -1230,7 +1230,8 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client, const struct pmbus_driver_info *info, const char *name, int index, int page, - const struct pmbus_sensor_attr *attr) + const struct pmbus_sensor_attr *attr, + bool paged) { struct pmbus_sensor *base; bool upper = !!(attr->gbit & 0xff00); /* need to check STATUS_WORD */ @@ -1238,7 +1239,7 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client, if (attr->label) { ret = pmbus_add_label(data, name, index, attr->label, - attr->paged ? page + 1 : 0); + paged ? page + 1 : 0); if (ret) return ret; } @@ -1271,6 +1272,30 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client, return 0; } +static bool pmbus_sensor_is_paged(const struct pmbus_driver_info *info, + const struct pmbus_sensor_attr *attr) +{ + int p; + + if (attr->paged) + return true; + + /* + * Some attributes may be present on more than one page despite + * not being marked with the paged attribute. If that is the case, + * then treat the sensor as being paged and add the page suffix to the + * attribute name. + * We don't just add the paged attribute to all such attributes, in + * order to maintain the un-suffixed labels in the case where the + * attribute is only on page 0. + */ + for (p = 1; p < info->pages; p++) { + if (info->func[p] & attr->func) + return true; + } + return false; +} + static int pmbus_add_sensor_attrs(struct i2c_client *client, struct pmbus_data *data, const char *name, @@ -1284,14 +1309,15 @@ static int pmbus_add_sensor_attrs(struct i2c_client *client, index = 1; for (i = 0; i < nattrs; i++) { int page, pages; + bool paged = pmbus_sensor_is_paged(info, attrs); - pages = attrs->paged ? info->pages : 1; + pages = paged ? info->pages : 1; for (page = 0; page < pages; page++) { if (!(info->func[page] & attrs->func)) continue; ret = pmbus_add_sensor_attrs_one(client, data, info, name, index, page, - attrs); + attrs, paged); if (ret) return ret; index++; @@ -2015,7 +2041,10 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data, if (ret >= 0 && (ret & PB_CAPABILITY_ERROR_CHECK)) client->flags |= I2C_CLIENT_PEC; - pmbus_clear_faults(client); + if (data->info->pages) + pmbus_clear_faults(client); + else + pmbus_clear_fault_page(client, -1); if (info->identify) { ret = (*info->identify)(client, info); diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c index 7838af58f92d51639570d4134b3a95e488d91225..65de80bd63d8cd90fd085911099752220af59d49 100644 --- a/drivers/hwmon/pwm-fan.c +++ b/drivers/hwmon/pwm-fan.c @@ -221,8 +221,12 @@ static int pwm_fan_probe(struct platform_device *pdev) ctx->pwm = devm_of_pwm_get(&pdev->dev, pdev->dev.of_node, NULL); if (IS_ERR(ctx->pwm)) { - dev_err(&pdev->dev, "Could not get PWM\n"); - return PTR_ERR(ctx->pwm); + ret = PTR_ERR(ctx->pwm); + + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, "Could not get PWM: %d\n", ret); + + return ret; } platform_set_drvdata(pdev, ctx); @@ -250,7 +254,7 @@ static int pwm_fan_probe(struct platform_device *pdev) ret = pwm_fan_of_get_cooling_data(&pdev->dev, ctx); if (ret) - return ret; + goto err_pwm_disable; ctx->pwm_fan_state = ctx->pwm_fan_max_state; if (IS_ENABLED(CONFIG_THERMAL)) { @@ -290,9 +294,19 @@ static int pwm_fan_remove(struct platform_device *pdev) static int pwm_fan_suspend(struct device *dev) { struct pwm_fan_ctx *ctx = dev_get_drvdata(dev); + struct pwm_args args; + int ret; + + pwm_get_args(ctx->pwm, &args); + + if (ctx->pwm_value) { + ret = pwm_config(ctx->pwm, 0, args.period); + if (ret < 0) + return ret; - if (ctx->pwm_value) pwm_disable(ctx->pwm); + } + return 0; } diff --git a/drivers/hwmon/raspberrypi-hwmon.c b/drivers/hwmon/raspberrypi-hwmon.c index be5ba469089531b26b0ba2d0b1866c20f3e5312b..0d0457245e7d0e19f1acad510cb5a3d295ed812f 100644 --- a/drivers/hwmon/raspberrypi-hwmon.c +++ b/drivers/hwmon/raspberrypi-hwmon.c @@ -115,7 +115,6 @@ static int rpi_hwmon_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct rpi_hwmon_data *data; - int ret; data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) @@ -124,11 +123,6 @@ static int rpi_hwmon_probe(struct platform_device *pdev) /* Parent driver assure that firmware is correct */ data->fw = dev_get_drvdata(dev->parent); - /* Init throttled */ - ret = rpi_firmware_property(data->fw, RPI_FIRMWARE_GET_THROTTLED, - &data->last_throttled, - sizeof(data->last_throttled)); - data->hwmon_dev = devm_hwmon_device_register_with_info(dev, "rpi_volt", data, &rpi_chip_info, diff --git a/drivers/hwmon/smsc47b397.c b/drivers/hwmon/smsc47b397.c index 6bd2007565603efde3b012865fd7091fff5478e9..cbdb5c4991ae3132538d36d54fb40aa2943bca1a 100644 --- a/drivers/hwmon/smsc47b397.c +++ b/drivers/hwmon/smsc47b397.c @@ -72,14 +72,19 @@ static inline void superio_select(int ld) superio_outb(0x07, ld); } -static inline void superio_enter(void) +static inline int superio_enter(void) { + if (!request_muxed_region(REG, 2, DRVNAME)) + return -EBUSY; + outb(0x55, REG); + return 0; } static inline void superio_exit(void) { outb(0xAA, REG); + release_region(REG, 2); } #define SUPERIO_REG_DEVID 0x20 @@ -300,8 +305,12 @@ static int __init smsc47b397_find(void) u8 id, rev; char *name; unsigned short addr; + int err; + + err = superio_enter(); + if (err) + return err; - superio_enter(); id = force_id ? force_id : superio_inb(SUPERIO_REG_DEVID); switch (id) { diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c index c7b6a425e2c0270f5b20a7b09241a0eadce94ce8..5eeac9853d0ae8dd4deeef82d2137d2c35c21314 100644 --- a/drivers/hwmon/smsc47m1.c +++ b/drivers/hwmon/smsc47m1.c @@ -73,16 +73,21 @@ superio_inb(int reg) /* logical device for fans is 0x0A */ #define superio_select() superio_outb(0x07, 0x0A) -static inline void +static inline int superio_enter(void) { + if (!request_muxed_region(REG, 2, DRVNAME)) + return -EBUSY; + outb(0x55, REG); + return 0; } static inline void superio_exit(void) { outb(0xAA, REG); + release_region(REG, 2); } #define SUPERIO_REG_ACT 0x30 @@ -531,8 +536,12 @@ static int __init smsc47m1_find(struct smsc47m1_sio_data *sio_data) { u8 val; unsigned short addr; + int err; + + err = superio_enter(); + if (err) + return err; - superio_enter(); val = force_id ? force_id : superio_inb(SUPERIO_REG_DEVID); /* @@ -608,13 +617,14 @@ static int __init smsc47m1_find(struct smsc47m1_sio_data *sio_data) static void smsc47m1_restore(const struct smsc47m1_sio_data *sio_data) { if ((sio_data->activate & 0x01) == 0) { - superio_enter(); - superio_select(); - - pr_info("Disabling device\n"); - superio_outb(SUPERIO_REG_ACT, sio_data->activate); - - superio_exit(); + if (!superio_enter()) { + superio_select(); + pr_info("Disabling device\n"); + superio_outb(SUPERIO_REG_ACT, sio_data->activate); + superio_exit(); + } else { + pr_warn("Failed to disable device\n"); + } } } diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c index e36399213324d9ab99e6d59f1ac1621445e2a9e2..ceb3db6f3fddac60fce847000e39f1de5ea16b39 100644 --- a/drivers/hwmon/tmp421.c +++ b/drivers/hwmon/tmp421.c @@ -88,7 +88,7 @@ static const struct of_device_id tmp421_of_match[] = { .data = (void *)2 }, { - .compatible = "ti,tmp422", + .compatible = "ti,tmp442", .data = (void *)3 }, { }, diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c index 0e81f287d3051930b5edc4d459d8957b05bfa979..3c960b999958b4b7fa842a32c0f8f2d783a1109a 100644 --- a/drivers/hwmon/via-cputemp.c +++ b/drivers/hwmon/via-cputemp.c @@ -287,7 +287,6 @@ static const struct x86_cpu_id __initconst cputemp_ids[] = { { X86_VENDOR_CENTAUR, 6, 0xa, }, /* C7 A */ { X86_VENDOR_CENTAUR, 6, 0xd, }, /* C7 D */ { X86_VENDOR_CENTAUR, 6, 0xf, }, /* Nano */ - { X86_VENDOR_CENTAUR, 7, X86_MODEL_ANY, }, {} }; MODULE_DEVICE_TABLE(x86cpu, cputemp_ids); diff --git a/drivers/hwmon/vt1211.c b/drivers/hwmon/vt1211.c index 3a6bfa51cb94f69cb8323ee69f32c88c62360575..95d5e8ec8b7fcbf84275e9ca15476ab3ecee0e22 100644 --- a/drivers/hwmon/vt1211.c +++ b/drivers/hwmon/vt1211.c @@ -226,15 +226,21 @@ static inline void superio_select(int sio_cip, int ldn) outb(ldn, sio_cip + 1); } -static inline void superio_enter(int sio_cip) +static inline int superio_enter(int sio_cip) { + if (!request_muxed_region(sio_cip, 2, DRVNAME)) + return -EBUSY; + outb(0x87, sio_cip); outb(0x87, sio_cip); + + return 0; } static inline void superio_exit(int sio_cip) { outb(0xaa, sio_cip); + release_region(sio_cip, 2); } /* --------------------------------------------------------------------- @@ -1282,11 +1288,14 @@ static int __init vt1211_device_add(unsigned short address) static int __init vt1211_find(int sio_cip, unsigned short *address) { - int err = -ENODEV; + int err; int devid; - superio_enter(sio_cip); + err = superio_enter(sio_cip); + if (err) + return err; + err = -ENODEV; devid = force_id ? force_id : superio_inb(sio_cip, SIO_VT1211_DEVID); if (devid != SIO_VT1211_ID) goto EXIT; diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c index ad68b6d9ff17e8adeeac55124845ab86d0577374..8da5f77b8987ce65f52ae10db5c34707cbc8ecbf 100644 --- a/drivers/hwmon/w83627ehf.c +++ b/drivers/hwmon/w83627ehf.c @@ -1519,7 +1519,7 @@ store_target_temp(struct device *dev, struct device_attribute *attr, if (err < 0) return err; - val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 127); + val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 127000), 1000); mutex_lock(&data->update_lock); data->target_temp[nr] = val; @@ -1545,7 +1545,7 @@ store_tolerance(struct device *dev, struct device_attribute *attr, return err; /* Limit the temp to 0C - 15C */ - val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 15); + val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 15000), 1000); mutex_lock(&data->update_lock); if (sio_data->kind == nct6775 || sio_data->kind == nct6776) { diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c index 49276bbdac3ddf0dc1153cb1941cf28f0ac10ea7..1bb80f992aa8609ed6df25973548efdfdbc7fe8c 100644 --- a/drivers/hwmon/w83795.c +++ b/drivers/hwmon/w83795.c @@ -1691,7 +1691,7 @@ store_sf_setup(struct device *dev, struct device_attribute *attr, * somewhere else in the code */ #define SENSOR_ATTR_TEMP(index) { \ - SENSOR_ATTR_2(temp##index##_type, S_IRUGO | (index < 4 ? S_IWUSR : 0), \ + SENSOR_ATTR_2(temp##index##_type, S_IRUGO | (index < 5 ? S_IWUSR : 0), \ show_temp_mode, store_temp_mode, NOT_USED, index - 1), \ SENSOR_ATTR_2(temp##index##_input, S_IRUGO, show_temp, \ NULL, TEMP_READ, index - 1), \ diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c index a3cd91f232679afd21c9ebbd206be377f5394333..2dd19a42030520c1b1835e7e8cac9cee07470158 100644 --- a/drivers/hwmon/xgene-hwmon.c +++ b/drivers/hwmon/xgene-hwmon.c @@ -780,6 +780,7 @@ static int xgene_hwmon_remove(struct platform_device *pdev) { struct xgene_hwmon_dev *ctx = platform_get_drvdata(pdev); + cancel_work_sync(&ctx->workq); hwmon_device_unregister(ctx->hwmon_dev); kfifo_free(&ctx->async_msg_fifo); if (acpi_disabled) diff --git a/drivers/hwmon/zhaoxin-cputemp.c b/drivers/hwmon/zhaoxin-cputemp.c new file mode 100644 index 0000000000000000000000000000000000000000..8b88228a334deea778f766ae0a95cf1b1a9b0cb3 --- /dev/null +++ b/drivers/hwmon/zhaoxin-cputemp.c @@ -0,0 +1,311 @@ +/* + * zhaoxin-cputemp.c - Driver for Zhaoxin CPU core temperature monitoring + * + * based on existing coretemp.c, which is + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301 USA. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRVNAME "zhaoxin_cputemp" + +enum { SHOW_TEMP, SHOW_LABEL, SHOW_NAME }; + +struct zhaoxin_cputemp_data { + struct device *hwmon_dev; + const char *name; + u8 vrm; + u32 id; + u32 msr_temp; + u32 msr_vid; +}; + +static ssize_t show_name(struct device *dev, struct device_attribute + *devattr, char *buf) +{ + int ret; + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + struct zhaoxin_cputemp_data *data = dev_get_drvdata(dev); + + if (attr->index == SHOW_NAME) + ret = sprintf(buf, "%s\n", data->name); + else /* show label */ + ret = sprintf(buf, "Core %d\n", data->id); + return ret; +} + +static ssize_t show_temp(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct zhaoxin_cputemp_data *data = dev_get_drvdata(dev); + u32 eax, edx; + int err; + + err = rdmsr_safe_on_cpu(data->id, data->msr_temp, &eax, &edx); + if (err) + return -EAGAIN; + + return sprintf(buf, "%lu\n", ((unsigned long)eax & 0xffffff) * 1000); +} + +static ssize_t cpu0_vid_show(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct zhaoxin_cputemp_data *data = dev_get_drvdata(dev); + u32 eax, edx; + int err; + + err = rdmsr_safe_on_cpu(data->id, data->msr_vid, &eax, &edx); + if (err) + return -EAGAIN; + + return sprintf(buf, "%d\n", vid_from_reg(~edx & 0x7f, data->vrm)); +} + +static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, + SHOW_TEMP); +static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_name, NULL, SHOW_LABEL); +static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, SHOW_NAME); + +static struct attribute *zhaoxin_cputemp_attributes[] = { + &sensor_dev_attr_name.dev_attr.attr, + &sensor_dev_attr_temp1_label.dev_attr.attr, + &sensor_dev_attr_temp1_input.dev_attr.attr, + NULL +}; + +static const struct attribute_group zhaoxin_cputemp_group = { + .attrs = zhaoxin_cputemp_attributes, +}; + +/* Optional attributes */ +static DEVICE_ATTR_RO(cpu0_vid); + +static int zhaoxin_cputemp_probe(struct platform_device *pdev) +{ + struct zhaoxin_cputemp_data *data; + int err; + u32 eax, edx; + + data = devm_kzalloc(&pdev->dev, sizeof(struct zhaoxin_cputemp_data), + GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->id = pdev->id; + data->name = "zhaoxin_cputemp"; + + data->msr_temp = 0x1423; + + /* test if we can access the TEMPERATURE MSR */ + err = rdmsr_safe_on_cpu(data->id, data->msr_temp, &eax, &edx); + if (err) { + dev_err(&pdev->dev, + "Unable to access TEMPERATURE MSR, giving up\n"); + return err; + } + + platform_set_drvdata(pdev, data); + + err = sysfs_create_group(&pdev->dev.kobj, &zhaoxin_cputemp_group); + if (err) + return err; + + if (data->msr_vid) + data->vrm = vid_which_vrm(); + + if (data->vrm) { + err = device_create_file(&pdev->dev, &dev_attr_cpu0_vid); + if (err) + goto exit_remove; + } + + data->hwmon_dev = hwmon_device_register(&pdev->dev); + if (IS_ERR(data->hwmon_dev)) { + err = PTR_ERR(data->hwmon_dev); + dev_err(&pdev->dev, "Class registration failed (%d)\n", + err); + goto exit_remove; + } + + return 0; + +exit_remove: + if (data->vrm) + device_remove_file(&pdev->dev, &dev_attr_cpu0_vid); + sysfs_remove_group(&pdev->dev.kobj, &zhaoxin_cputemp_group); + return err; +} + +static int zhaoxin_cputemp_remove(struct platform_device *pdev) +{ + struct zhaoxin_cputemp_data *data = platform_get_drvdata(pdev); + + hwmon_device_unregister(data->hwmon_dev); + if (data->vrm) + device_remove_file(&pdev->dev, &dev_attr_cpu0_vid); + sysfs_remove_group(&pdev->dev.kobj, &zhaoxin_cputemp_group); + return 0; +} + +static struct platform_driver zhaoxin_cputemp_driver = { + .driver = { + .name = DRVNAME, + }, + .probe = zhaoxin_cputemp_probe, + .remove = zhaoxin_cputemp_remove, +}; + +struct pdev_entry { + struct list_head list; + struct platform_device *pdev; + unsigned int cpu; +}; + +static LIST_HEAD(pdev_list); +static DEFINE_MUTEX(pdev_list_mutex); + +static int zhaoxin_cputemp_online(unsigned int cpu) +{ + int err; + struct platform_device *pdev; + struct pdev_entry *pdev_entry; + + pdev = platform_device_alloc(DRVNAME, cpu); + if (!pdev) { + err = -ENOMEM; + pr_err("Device allocation failed\n"); + goto exit; + } + + pdev_entry = kzalloc(sizeof(struct pdev_entry), GFP_KERNEL); + if (!pdev_entry) { + err = -ENOMEM; + goto exit_device_put; + } + + err = platform_device_add(pdev); + if (err) { + pr_err("Device addition failed (%d)\n", err); + goto exit_device_free; + } + + pdev_entry->pdev = pdev; + pdev_entry->cpu = cpu; + mutex_lock(&pdev_list_mutex); + list_add_tail(&pdev_entry->list, &pdev_list); + mutex_unlock(&pdev_list_mutex); + + return 0; + +exit_device_free: + kfree(pdev_entry); +exit_device_put: + platform_device_put(pdev); +exit: + return err; +} + +static int zhaoxin_cputemp_down_prep(unsigned int cpu) +{ + struct pdev_entry *p; + + mutex_lock(&pdev_list_mutex); + list_for_each_entry(p, &pdev_list, list) { + if (p->cpu == cpu) { + platform_device_unregister(p->pdev); + list_del(&p->list); + mutex_unlock(&pdev_list_mutex); + kfree(p); + return 0; + } + } + mutex_unlock(&pdev_list_mutex); + return 0; +} + +static const struct x86_cpu_id __initconst cputemp_ids[] = { + { X86_VENDOR_CENTAUR, 7, X86_MODEL_ANY, }, + { X86_VENDOR_ZHAOXIN, 7, X86_MODEL_ANY, }, + {} +}; +MODULE_DEVICE_TABLE(x86cpu, cputemp_ids); + +static enum cpuhp_state zhaoxin_temp_online; + +static int __init zhaoxin_cputemp_init(void) +{ + int err; + + if (!x86_match_cpu(cputemp_ids)) + return -ENODEV; + + err = platform_driver_register(&zhaoxin_cputemp_driver); + if (err) + goto exit; + + err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hwmon/zhaoxin:online", + zhaoxin_cputemp_online, zhaoxin_cputemp_down_prep); + if (err < 0) + goto exit_driver_unreg; + zhaoxin_temp_online = err; + +#ifndef CONFIG_HOTPLUG_CPU + if (list_empty(&pdev_list)) { + err = -ENODEV; + goto exit_hp_unreg; + } +#endif + return 0; + +#ifndef CONFIG_HOTPLUG_CPU +exit_hp_unreg: + cpuhp_remove_state_nocalls(zhaoxin_temp_online); +#endif +exit_driver_unreg: + platform_driver_unregister(&zhaoxin_cputemp_driver); +exit: + return err; +} + +static void __exit zhaoxin_cputemp_exit(void) +{ + cpuhp_remove_state(zhaoxin_temp_online); + platform_driver_unregister(&zhaoxin_cputemp_driver); +} + +MODULE_DESCRIPTION("Zhaoxin CPU temperature monitor"); +MODULE_LICENSE("GPL"); + +module_init(zhaoxin_cputemp_init) +module_exit(zhaoxin_cputemp_exit) diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c index 45b2460f31663e4d110ae3294cbbe0d9693c2faa..e8819d7509387a065842f577c20781757e0a94c3 100644 --- a/drivers/hwtracing/coresight/coresight-cpu-debug.c +++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c @@ -668,6 +668,10 @@ static const struct amba_id debug_ids[] = { .id = 0x000bbd08, .mask = 0x000fffff, }, + { /* Debug for Cortex-A73 */ + .id = 0x000bbd09, + .mask = 0x000fffff, + }, { 0, 0 }, }; diff --git a/drivers/hwtracing/coresight/coresight-dynamic-replicator.c b/drivers/hwtracing/coresight/coresight-dynamic-replicator.c index f6d0571ab9dd59e10066f6143d6206f69b022dc7..d31f1d8758b243253d985ea36859dbebee6d26e0 100644 --- a/drivers/hwtracing/coresight/coresight-dynamic-replicator.c +++ b/drivers/hwtracing/coresight/coresight-dynamic-replicator.c @@ -34,26 +34,42 @@ struct replicator_state { struct coresight_device *csdev; }; +/* + * replicator_reset : Reset the replicator configuration to sane values. + */ +static void replicator_reset(struct replicator_state *drvdata) +{ + CS_UNLOCK(drvdata->base); + + writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER0); + writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER1); + + CS_LOCK(drvdata->base); +} + static int replicator_enable(struct coresight_device *csdev, int inport, int outport) { + u32 reg; struct replicator_state *drvdata = dev_get_drvdata(csdev->dev.parent); + switch (outport) { + case 0: + reg = REPLICATOR_IDFILTER0; + break; + case 1: + reg = REPLICATOR_IDFILTER1; + break; + default: + WARN_ON(1); + return -EINVAL; + } + CS_UNLOCK(drvdata->base); - /* - * Ensure that the other port is disabled - * 0x00 - passing through the replicator unimpeded - * 0xff - disable (or impede) the flow of ATB data - */ - if (outport == 0) { - writel_relaxed(0x00, drvdata->base + REPLICATOR_IDFILTER0); - writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER1); - } else { - writel_relaxed(0x00, drvdata->base + REPLICATOR_IDFILTER1); - writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER0); - } + /* Ensure that the outport is enabled. */ + writel_relaxed(0x00, drvdata->base + reg); CS_LOCK(drvdata->base); dev_info(drvdata->dev, "REPLICATOR enabled\n"); @@ -63,15 +79,25 @@ static int replicator_enable(struct coresight_device *csdev, int inport, static void replicator_disable(struct coresight_device *csdev, int inport, int outport) { + u32 reg; struct replicator_state *drvdata = dev_get_drvdata(csdev->dev.parent); + switch (outport) { + case 0: + reg = REPLICATOR_IDFILTER0; + break; + case 1: + reg = REPLICATOR_IDFILTER1; + break; + default: + WARN_ON(1); + return; + } + CS_UNLOCK(drvdata->base); /* disable the flow of ATB data through port */ - if (outport == 0) - writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER0); - else - writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER1); + writel_relaxed(0xff, drvdata->base + reg); CS_LOCK(drvdata->base); @@ -156,7 +182,11 @@ static int replicator_probe(struct amba_device *adev, const struct amba_id *id) desc.groups = replicator_groups; drvdata->csdev = coresight_register(&desc); - return PTR_ERR_OR_ZERO(drvdata->csdev); + if (!IS_ERR(drvdata->csdev)) { + replicator_reset(drvdata); + return 0; + } + return PTR_ERR(drvdata->csdev); } #ifdef CONFIG_PM diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c index 306119eaf16a626c10133ff50b7423f7bc4bc35f..0dad8626bcfbf422f2a04c73304ab169ddb028eb 100644 --- a/drivers/hwtracing/coresight/coresight-etb10.c +++ b/drivers/hwtracing/coresight/coresight-etb10.c @@ -147,6 +147,10 @@ static int etb_enable(struct coresight_device *csdev, u32 mode) if (val == CS_MODE_PERF) return -EBUSY; + /* Don't let perf disturb sysFS sessions */ + if (val == CS_MODE_SYSFS && mode == CS_MODE_PERF) + return -EBUSY; + /* Nothing to do, the tracer is already enabled. */ if (val == CS_MODE_SYSFS) goto out; diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c index 6776956352114b78f9aa9bc5457424c9915b99b3..567f46ca259980918aec8fcecb936737e59fc972 100644 --- a/drivers/hwtracing/coresight/coresight-etm-perf.c +++ b/drivers/hwtracing/coresight/coresight-etm-perf.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -33,7 +34,7 @@ struct etm_event_data { struct work_struct work; cpumask_t mask; void *snk_config; - struct list_head **path; + struct list_head * __percpu *path; }; static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle); @@ -61,6 +62,18 @@ static const struct attribute_group *etm_pmu_attr_groups[] = { NULL, }; +static inline struct list_head ** +etm_event_cpu_path_ptr(struct etm_event_data *data, int cpu) +{ + return per_cpu_ptr(data->path, cpu); +} + +static inline struct list_head * +etm_event_cpu_path(struct etm_event_data *data, int cpu) +{ + return *etm_event_cpu_path_ptr(data, cpu); +} + static void etm_event_read(struct perf_event *event) {} static int etm_addr_filters_alloc(struct perf_event *event) @@ -120,23 +133,26 @@ static void free_event_data(struct work_struct *work) */ if (event_data->snk_config) { cpu = cpumask_first(mask); - sink = coresight_get_sink(event_data->path[cpu]); + sink = coresight_get_sink(etm_event_cpu_path(event_data, cpu)); if (sink_ops(sink)->free_buffer) sink_ops(sink)->free_buffer(event_data->snk_config); } for_each_cpu(cpu, mask) { - if (!(IS_ERR_OR_NULL(event_data->path[cpu]))) - coresight_release_path(event_data->path[cpu]); + struct list_head **ppath; + + ppath = etm_event_cpu_path_ptr(event_data, cpu); + if (!(IS_ERR_OR_NULL(*ppath))) + coresight_release_path(*ppath); + *ppath = NULL; } - kfree(event_data->path); + free_percpu(event_data->path); kfree(event_data); } static void *alloc_event_data(int cpu) { - int size; cpumask_t *mask; struct etm_event_data *event_data; @@ -147,7 +163,6 @@ static void *alloc_event_data(int cpu) /* Make sure nothing disappears under us */ get_online_cpus(); - size = num_online_cpus(); mask = &event_data->mask; if (cpu != -1) @@ -164,8 +179,8 @@ static void *alloc_event_data(int cpu) * unused memory when dealing with single CPU trace scenarios is small * compared to the cost of searching through an optimized array. */ - event_data->path = kcalloc(size, - sizeof(struct list_head *), GFP_KERNEL); + event_data->path = alloc_percpu(struct list_head *); + if (!event_data->path) { kfree(event_data); return NULL; @@ -181,15 +196,15 @@ static void etm_free_aux(void *data) schedule_work(&event_data->work); } -static void *etm_setup_aux(int event_cpu, void **pages, +static void *etm_setup_aux(struct perf_event *event, void **pages, int nr_pages, bool overwrite) { - int cpu; + int cpu = event->cpu; cpumask_t *mask; struct coresight_device *sink; struct etm_event_data *event_data = NULL; - event_data = alloc_event_data(event_cpu); + event_data = alloc_event_data(cpu); if (!event_data) return NULL; INIT_WORK(&event_data->work, free_event_data); @@ -213,6 +228,7 @@ static void *etm_setup_aux(int event_cpu, void **pages, /* Setup the path for each CPU in a trace session */ for_each_cpu(cpu, mask) { + struct list_head *path; struct coresight_device *csdev; csdev = per_cpu(csdev_src, cpu); @@ -224,9 +240,11 @@ static void *etm_setup_aux(int event_cpu, void **pages, * list of devices from source to sink that can be * referenced later when the path is actually needed. */ - event_data->path[cpu] = coresight_build_path(csdev, sink); - if (IS_ERR(event_data->path[cpu])) + path = coresight_build_path(csdev, sink); + if (IS_ERR(path)) goto err; + + *etm_event_cpu_path_ptr(event_data, cpu) = path; } if (!sink_ops(sink)->alloc_buffer) @@ -255,6 +273,7 @@ static void etm_event_start(struct perf_event *event, int flags) struct etm_event_data *event_data; struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle); struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu); + struct list_head *path; if (!csdev) goto fail; @@ -267,8 +286,9 @@ static void etm_event_start(struct perf_event *event, int flags) if (!event_data) goto fail; + path = etm_event_cpu_path(event_data, cpu); /* We need a sink, no need to continue without one */ - sink = coresight_get_sink(event_data->path[cpu]); + sink = coresight_get_sink(path); if (WARN_ON_ONCE(!sink || !sink_ops(sink)->set_buffer)) goto fail_end_stop; @@ -278,7 +298,7 @@ static void etm_event_start(struct perf_event *event, int flags) goto fail_end_stop; /* Nothing will happen without a path */ - if (coresight_enable_path(event_data->path[cpu], CS_MODE_PERF)) + if (coresight_enable_path(path, CS_MODE_PERF)) goto fail_end_stop; /* Tell the perf core the event is alive */ @@ -286,11 +306,13 @@ static void etm_event_start(struct perf_event *event, int flags) /* Finally enable the tracer */ if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF)) - goto fail_end_stop; + goto fail_disable_path; out: return; +fail_disable_path: + coresight_disable_path(path); fail_end_stop: perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED); perf_aux_output_end(handle, 0); @@ -306,6 +328,7 @@ static void etm_event_stop(struct perf_event *event, int mode) struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu); struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle); struct etm_event_data *event_data = perf_get_aux(handle); + struct list_head *path; if (event->hw.state == PERF_HES_STOPPED) return; @@ -313,7 +336,11 @@ static void etm_event_stop(struct perf_event *event, int mode) if (!csdev) return; - sink = coresight_get_sink(event_data->path[cpu]); + path = etm_event_cpu_path(event_data, cpu); + if (!path) + return; + + sink = coresight_get_sink(path); if (!sink) return; @@ -344,7 +371,7 @@ static void etm_event_stop(struct perf_event *event, int mode) } /* Disabling the path make its elements available to other sessions */ - coresight_disable_path(event_data->path[cpu]); + coresight_disable_path(path); } static int etm_event_add(struct perf_event *event, int mode) @@ -410,15 +437,16 @@ static int etm_addr_filters_validate(struct list_head *filters) static void etm_addr_filters_sync(struct perf_event *event) { struct perf_addr_filters_head *head = perf_event_addr_filters(event); - unsigned long start, stop, *offs = event->addr_filters_offs; + unsigned long start, stop; + struct perf_addr_filter_range *fr = event->addr_filter_ranges; struct etm_filters *filters = event->hw.addr_filters; struct etm_filter *etm_filter; struct perf_addr_filter *filter; int i = 0; list_for_each_entry(filter, &head->list, entry) { - start = filter->offset + offs[i]; - stop = start + filter->size; + start = fr[i].start; + stop = start + fr[i].size; etm_filter = &filters->etm_filter[i]; switch (filter->action) { diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c index a0365e23678e27fb968c47b76a5c2ba424ca07a4..f5fb1e7a9c17259c1f2a1daae0e7283e86e901c1 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c @@ -655,10 +655,13 @@ static ssize_t cyc_threshold_store(struct device *dev, if (kstrtoul(buf, 16, &val)) return -EINVAL; + + /* mask off max threshold before checking min value */ + val &= ETM_CYC_THRESHOLD_MASK; if (val < drvdata->ccitmin) return -EINVAL; - config->ccctlr = val & ETM_CYC_THRESHOLD_MASK; + config->ccctlr = val; return size; } static DEVICE_ATTR_RW(cyc_threshold); @@ -689,14 +692,16 @@ static ssize_t bb_ctrl_store(struct device *dev, return -EINVAL; if (!drvdata->nr_addr_cmp) return -EINVAL; + /* - * Bit[7:0] selects which address range comparator is used for - * branch broadcast control. + * Bit[8] controls include(1) / exclude(0), bits[0-7] select + * individual range comparators. If include then at least 1 + * range must be selected. */ - if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp) + if ((val & BIT(8)) && (BMVAL(val, 0, 7) == 0)) return -EINVAL; - config->bb_ctrl = val; + config->bb_ctrl = val & GENMASK(8, 0); return size; } static DEVICE_ATTR_RW(bb_ctrl); @@ -1329,8 +1334,8 @@ static ssize_t seq_event_store(struct device *dev, spin_lock(&drvdata->spinlock); idx = config->seq_idx; - /* RST, bits[7:0] */ - config->seq_ctrl[idx] = val & 0xFF; + /* Seq control has two masks B[15:8] F[7:0] */ + config->seq_ctrl[idx] = val & 0xFFFF; spin_unlock(&drvdata->spinlock); return size; } @@ -1585,7 +1590,7 @@ static ssize_t res_ctrl_store(struct device *dev, if (idx % 2 != 0) /* PAIRINV, bit[21] */ val &= ~BIT(21); - config->res_ctrl[idx] = val; + config->res_ctrl[idx] = val & GENMASK(21, 0); spin_unlock(&drvdata->spinlock); return size; } diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c index 1d94ebec027b52054f6aaf22a54dd29c96db535a..b7bc08cf90c6970bb9d415eebca2040f1eb8d242 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.c +++ b/drivers/hwtracing/coresight/coresight-etm4x.c @@ -28,6 +28,7 @@ #include #include #include +#include #include "coresight-etm4x.h" #include "coresight-etm-perf.h" @@ -54,7 +55,8 @@ static void etm4_os_unlock(struct etmv4_drvdata *drvdata) static bool etm4_arch_supported(u8 arch) { - switch (arch) { + /* Mask out the minor version number */ + switch (arch & 0xf0) { case ETM_ARCH_V4: break; default: @@ -173,6 +175,12 @@ static void etm4_enable_hw(void *info) if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 0)) dev_err(drvdata->dev, "timeout while waiting for Idle Trace Status\n"); + /* + * As recommended by section 4.3.7 ("Synchronization when using the + * memory-mapped interface") of ARM IHI 0064D + */ + dsb(sy); + isb(); CS_LOCK(drvdata->base); @@ -323,8 +331,12 @@ static void etm4_disable_hw(void *info) /* EN, bit[0] Trace unit enable bit */ control &= ~0x1; - /* make sure everything completes before disabling */ - mb(); + /* + * Make sure everything completes before disabling, as recommended + * by section 7.3.77 ("TRCVICTLR, ViewInst Main Control Register, + * SSTATUS") of ARM IHI 0064D + */ + dsb(sy); isb(); writel_relaxed(control, drvdata->base + TRCPRGCTLR); @@ -605,7 +617,7 @@ static void etm4_set_default_config(struct etmv4_config *config) config->vinst_ctrl |= BIT(0); } -static u64 etm4_get_access_type(struct etmv4_config *config) +static u64 etm4_get_ns_access_type(struct etmv4_config *config) { u64 access_type = 0; @@ -616,17 +628,26 @@ static u64 etm4_get_access_type(struct etmv4_config *config) * Bit[13] Exception level 1 - OS * Bit[14] Exception level 2 - Hypervisor * Bit[15] Never implemented - * - * Always stay away from hypervisor mode. */ - access_type = ETM_EXLEVEL_NS_HYP; - - if (config->mode & ETM_MODE_EXCL_KERN) - access_type |= ETM_EXLEVEL_NS_OS; + if (!is_kernel_in_hyp_mode()) { + /* Stay away from hypervisor mode for non-VHE */ + access_type = ETM_EXLEVEL_NS_HYP; + if (config->mode & ETM_MODE_EXCL_KERN) + access_type |= ETM_EXLEVEL_NS_OS; + } else if (config->mode & ETM_MODE_EXCL_KERN) { + access_type = ETM_EXLEVEL_NS_HYP; + } if (config->mode & ETM_MODE_EXCL_USER) access_type |= ETM_EXLEVEL_NS_APP; + return access_type; +} + +static u64 etm4_get_access_type(struct etmv4_config *config) +{ + u64 access_type = etm4_get_ns_access_type(config); + /* * EXLEVEL_S, bits[11:8], don't trace anything happening * in secure state. @@ -880,20 +901,10 @@ void etm4_config_trace_mode(struct etmv4_config *config) addr_acc = config->addr_acc[ETM_DEFAULT_ADDR_COMP]; /* clear default config */ - addr_acc &= ~(ETM_EXLEVEL_NS_APP | ETM_EXLEVEL_NS_OS); + addr_acc &= ~(ETM_EXLEVEL_NS_APP | ETM_EXLEVEL_NS_OS | + ETM_EXLEVEL_NS_HYP); - /* - * EXLEVEL_NS, bits[15:12] - * The Exception levels are: - * Bit[12] Exception level 0 - Application - * Bit[13] Exception level 1 - OS - * Bit[14] Exception level 2 - Hypervisor - * Bit[15] Never implemented - */ - if (mode & ETM_MODE_EXCL_KERN) - addr_acc |= ETM_EXLEVEL_NS_OS; - else - addr_acc |= ETM_EXLEVEL_NS_APP; + addr_acc |= etm4_get_ns_access_type(config); config->addr_acc[ETM_DEFAULT_ADDR_COMP] = addr_acc; config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = addr_acc; diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c index 0549249f4b3988050224ee016281922434bfde16..988cbb5f3e0bdad02f5fa2dbca9ff5e656785330 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etf.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c @@ -438,10 +438,10 @@ static void tmc_update_etf_buffer(struct coresight_device *csdev, case TMC_MEM_INTF_WIDTH_32BITS: case TMC_MEM_INTF_WIDTH_64BITS: case TMC_MEM_INTF_WIDTH_128BITS: - mask = GENMASK(31, 5); + mask = GENMASK(31, 4); break; case TMC_MEM_INTF_WIDTH_256BITS: - mask = GENMASK(31, 6); + mask = GENMASK(31, 5); break; } @@ -472,7 +472,7 @@ static void tmc_update_etf_buffer(struct coresight_device *csdev, buf_ptr = buf->data_pages[cur] + offset; *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD); - if (lost && *barrier) { + if (lost && i < CORESIGHT_BARRIER_PKT_SIZE) { *buf_ptr = *barrier; barrier++; } diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c index 2eda5de304c20b98f135382991246031fafc4736..2d6f428176ff86aa9438d8fde335bb509b1290a3 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etr.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c @@ -536,7 +536,7 @@ tmc_init_etr_sg_table(struct device *dev, int node, sg_table = tmc_alloc_sg_table(dev, node, nr_tpages, nr_dpages, pages); if (IS_ERR(sg_table)) { kfree(etr_table); - return ERR_PTR(PTR_ERR(sg_table)); + return ERR_CAST(sg_table); } etr_table->sg_table = sg_table; @@ -895,10 +895,15 @@ static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata) tmc_etr_buf_insert_barrier_packet(etr_buf, etr_buf->offset); } -static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata) +static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata, + struct etr_buf *etr_buf) { u32 axictl, sts; - struct etr_buf *etr_buf = drvdata->etr_buf; + + /* Callers should provide an appropriate buffer for use */ + if (WARN_ON(!etr_buf || drvdata->etr_buf)) + return; + drvdata->etr_buf = etr_buf; /* * If this ETR is connected to a CATU, enable it before we turn @@ -960,13 +965,16 @@ static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata) * also updating the @bufpp on where to find it. Since the trace data * starts at anywhere in the buffer, depending on the RRP, we adjust the * @len returned to handle buffer wrapping around. + * + * We are protected here by drvdata->reading != 0, which ensures the + * sysfs_buf stays alive. */ ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata, loff_t pos, size_t len, char **bufpp) { s64 offset; ssize_t actual = len; - struct etr_buf *etr_buf = drvdata->etr_buf; + struct etr_buf *etr_buf = drvdata->sysfs_buf; if (pos + actual > etr_buf->len) actual = etr_buf->len - pos; @@ -996,7 +1004,14 @@ tmc_etr_free_sysfs_buf(struct etr_buf *buf) static void tmc_etr_sync_sysfs_buf(struct tmc_drvdata *drvdata) { - tmc_sync_etr_buf(drvdata); + struct etr_buf *etr_buf = drvdata->etr_buf; + + if (WARN_ON(drvdata->sysfs_buf != etr_buf)) { + tmc_etr_free_sysfs_buf(drvdata->sysfs_buf); + drvdata->sysfs_buf = NULL; + } else { + tmc_sync_etr_buf(drvdata); + } } static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata) @@ -1017,6 +1032,8 @@ static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata) /* Disable CATU device if this ETR is connected to one */ tmc_etr_disable_catu(drvdata); + /* Reset the ETR buf used by hardware */ + drvdata->etr_buf = NULL; } static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev) @@ -1024,7 +1041,7 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev) int ret = 0; unsigned long flags; struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); - struct etr_buf *new_buf = NULL, *free_buf = NULL; + struct etr_buf *sysfs_buf = NULL, *new_buf = NULL, *free_buf = NULL; /* * If we are enabling the ETR from disabled state, we need to make @@ -1035,7 +1052,8 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev) * with the lock released. */ spin_lock_irqsave(&drvdata->spinlock, flags); - if (!drvdata->etr_buf || (drvdata->etr_buf->size != drvdata->size)) { + sysfs_buf = READ_ONCE(drvdata->sysfs_buf); + if (!sysfs_buf || (sysfs_buf->size != drvdata->size)) { spin_unlock_irqrestore(&drvdata->spinlock, flags); /* Allocate memory with the locks released */ @@ -1064,14 +1082,14 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev) * If we don't have a buffer or it doesn't match the requested size, * use the buffer allocated above. Otherwise reuse the existing buffer. */ - if (!drvdata->etr_buf || - (new_buf && drvdata->etr_buf->size != new_buf->size)) { - free_buf = drvdata->etr_buf; - drvdata->etr_buf = new_buf; + sysfs_buf = READ_ONCE(drvdata->sysfs_buf); + if (!sysfs_buf || (new_buf && sysfs_buf->size != new_buf->size)) { + free_buf = sysfs_buf; + drvdata->sysfs_buf = new_buf; } drvdata->mode = CS_MODE_SYSFS; - tmc_etr_enable_hw(drvdata); + tmc_etr_enable_hw(drvdata, drvdata->sysfs_buf); out: spin_unlock_irqrestore(&drvdata->spinlock, flags); @@ -1156,13 +1174,13 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata) goto out; } - /* If drvdata::etr_buf is NULL the trace data has been read already */ - if (drvdata->etr_buf == NULL) { + /* If sysfs_buf is NULL the trace data has been read already */ + if (!drvdata->sysfs_buf) { ret = -EINVAL; goto out; } - /* Disable the TMC if need be */ + /* Disable the TMC if we are trying to read from a running session */ if (drvdata->mode == CS_MODE_SYSFS) tmc_etr_disable_hw(drvdata); @@ -1176,7 +1194,7 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata) int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata) { unsigned long flags; - struct etr_buf *etr_buf = NULL; + struct etr_buf *sysfs_buf = NULL; /* config types are set a boot time and never change */ if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR)) @@ -1191,22 +1209,22 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata) * buffer. Since the tracer is still enabled drvdata::buf can't * be NULL. */ - tmc_etr_enable_hw(drvdata); + tmc_etr_enable_hw(drvdata, drvdata->sysfs_buf); } else { /* * The ETR is not tracing and the buffer was just read. * As such prepare to free the trace buffer. */ - etr_buf = drvdata->etr_buf; - drvdata->etr_buf = NULL; + sysfs_buf = drvdata->sysfs_buf; + drvdata->sysfs_buf = NULL; } drvdata->reading = false; spin_unlock_irqrestore(&drvdata->spinlock, flags); /* Free allocated memory out side of the spinlock */ - if (etr_buf) - tmc_free_etr_buf(etr_buf); + if (sysfs_buf) + tmc_etr_free_sysfs_buf(sysfs_buf); return 0; } diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h index 7027bd60c4cc8f73934ff58d613afbcbb32e303c..872f63e3651ba4de5ba225f74cb223b6b1e61168 100644 --- a/drivers/hwtracing/coresight/coresight-tmc.h +++ b/drivers/hwtracing/coresight/coresight-tmc.h @@ -170,6 +170,7 @@ struct etr_buf { * @trigger_cntr: amount of words to store after a trigger. * @etr_caps: Bitmask of capabilities of the TMC ETR, inferred from the * device configuration register (DEVID) + * @sysfs_data: SYSFS buffer for ETR. */ struct tmc_drvdata { void __iomem *base; @@ -189,6 +190,7 @@ struct tmc_drvdata { enum tmc_mem_intf_width memwidth; u32 trigger_cntr; u32 etr_caps; + struct etr_buf *sysfs_buf; }; struct etr_buf_operations { diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c index 3e07fd335f8cf287a19e6e74977a99ed8d9de6b4..c0dabbddc1e49dbdb66b0aa1944b23a7f8d87607 100644 --- a/drivers/hwtracing/coresight/coresight.c +++ b/drivers/hwtracing/coresight/coresight.c @@ -132,12 +132,14 @@ static int coresight_enable_sink(struct coresight_device *csdev, u32 mode) { int ret; - if (!csdev->enable) { - if (sink_ops(csdev)->enable) { - ret = sink_ops(csdev)->enable(csdev, mode); - if (ret) - return ret; - } + /* + * We need to make sure the "new" session is compatible with the + * existing "mode" of operation. + */ + if (sink_ops(csdev)->enable) { + ret = sink_ops(csdev)->enable(csdev, mode); + if (ret) + return ret; csdev->enable = true; } @@ -339,8 +341,14 @@ int coresight_enable_path(struct list_head *path, u32 mode) switch (type) { case CORESIGHT_DEV_TYPE_SINK: ret = coresight_enable_sink(csdev, mode); + /* + * Sink is the first component turned on. If we + * failed to enable the sink, there are no components + * that need disabling. Disabling the path here + * would mean we could disrupt an existing session. + */ if (ret) - goto err; + goto out; break; case CORESIGHT_DEV_TYPE_SOURCE: /* sources are enabled from either sysFS or Perf */ diff --git a/drivers/hwtracing/intel_th/Makefile b/drivers/hwtracing/intel_th/Makefile index d9252fa8d9ca19142bb72e808636de10006457fd..b63eb8f309ad116068b9f6ac1cf10d277a2cfed2 100644 --- a/drivers/hwtracing/intel_th/Makefile +++ b/drivers/hwtracing/intel_th/Makefile @@ -20,3 +20,6 @@ intel_th_msu-y := msu.o obj-$(CONFIG_INTEL_TH_PTI) += intel_th_pti.o intel_th_pti-y := pti.o + +obj-$(CONFIG_INTEL_TH_MSU) += intel_th_msu_sink.o +intel_th_msu_sink-y := msu-sink.o diff --git a/drivers/hwtracing/intel_th/acpi.c b/drivers/hwtracing/intel_th/acpi.c index 87bc3744755f25e2bc60b951d9e2180a0912c993..87f9024e4bbb7c945fb62b81540fd9b5b2cca209 100644 --- a/drivers/hwtracing/intel_th/acpi.c +++ b/drivers/hwtracing/intel_th/acpi.c @@ -37,15 +37,21 @@ MODULE_DEVICE_TABLE(acpi, intel_th_acpi_ids); static int intel_th_acpi_probe(struct platform_device *pdev) { struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); + struct resource resource[TH_MMIO_END]; const struct acpi_device_id *id; struct intel_th *th; + int i, r; id = acpi_match_device(intel_th_acpi_ids, &pdev->dev); if (!id) return -ENODEV; - th = intel_th_alloc(&pdev->dev, (void *)id->driver_data, - pdev->resource, pdev->num_resources, -1); + for (i = 0, r = 0; i < pdev->num_resources && r < TH_MMIO_END; i++) + if (pdev->resource[i].flags & + (IORESOURCE_IRQ | IORESOURCE_MEM)) + resource[r++] = pdev->resource[i]; + + th = intel_th_alloc(&pdev->dev, (void *)id->driver_data, resource, r); if (IS_ERR(th)) return PTR_ERR(th); diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c index fc6b7f8b62fb888238b83178212a16f9478564c8..52f000d37bacc6853a289912996f3ac19d4d871f 100644 --- a/drivers/hwtracing/intel_th/core.c +++ b/drivers/hwtracing/intel_th/core.c @@ -422,6 +422,7 @@ static const struct intel_th_subdevice { unsigned nres; unsigned type; unsigned otype; + bool mknode; unsigned scrpd; int id; } intel_th_subdevices[] = { @@ -429,9 +430,9 @@ static const struct intel_th_subdevice { .nres = 1, .res = { { - /* Handle TSCU from GTH driver */ + /* Handle TSCU and CTS from GTH driver */ .start = REG_GTH_OFFSET, - .end = REG_TSCU_OFFSET + REG_TSCU_LENGTH - 1, + .end = REG_CTS_OFFSET + REG_CTS_LENGTH - 1, .flags = IORESOURCE_MEM, }, }, @@ -456,6 +457,7 @@ static const struct intel_th_subdevice { .name = "msc", .id = 0, .type = INTEL_TH_OUTPUT, + .mknode = true, .otype = GTH_MSU, .scrpd = SCRPD_MEM_IS_PRIM_DEST | SCRPD_MSC0_IS_ENABLED, }, @@ -476,6 +478,7 @@ static const struct intel_th_subdevice { .name = "msc", .id = 1, .type = INTEL_TH_OUTPUT, + .mknode = true, .otype = GTH_MSU, .scrpd = SCRPD_MEM_IS_PRIM_DEST | SCRPD_MSC1_IS_ENABLED, }, @@ -488,7 +491,7 @@ static const struct intel_th_subdevice { .flags = IORESOURCE_MEM, }, { - .start = 1, /* use resource[1] */ + .start = TH_MMIO_SW, .end = 0, .flags = IORESOURCE_MEM, }, @@ -497,6 +500,24 @@ static const struct intel_th_subdevice { .name = "sth", .type = INTEL_TH_SOURCE, }, + { + .nres = 2, + .res = { + { + .start = REG_STH_OFFSET, + .end = REG_STH_OFFSET + REG_STH_LENGTH - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = TH_MMIO_RTIT, + .end = 0, + .flags = IORESOURCE_MEM, + }, + }, + .id = -1, + .name = "rtit", + .type = INTEL_TH_SOURCE, + }, { .nres = 1, .res = { @@ -581,7 +602,6 @@ intel_th_subdevice_alloc(struct intel_th *th, struct intel_th_device *thdev; struct resource res[3]; unsigned int req = 0; - bool is64bit = false; int r, err; thdev = intel_th_device_alloc(th, subdev->type, subdev->name, @@ -591,18 +611,12 @@ intel_th_subdevice_alloc(struct intel_th *th, thdev->drvdata = th->drvdata; - for (r = 0; r < th->num_resources; r++) - if (th->resource[r].flags & IORESOURCE_MEM_64) { - is64bit = true; - break; - } - memcpy(res, subdev->res, sizeof(struct resource) * subdev->nres); for (r = 0; r < subdev->nres; r++) { struct resource *devres = th->resource; - int bar = 0; /* cut subdevices' MMIO from resource[0] */ + int bar = TH_MMIO_CONFIG; /* * Take .end == 0 to mean 'take the whole bar', @@ -611,8 +625,9 @@ intel_th_subdevice_alloc(struct intel_th *th, */ if (!res[r].end && res[r].flags == IORESOURCE_MEM) { bar = res[r].start; - if (is64bit) - bar *= 2; + err = -ENODEV; + if (bar >= th->num_resources) + goto fail_put_device; res[r].start = 0; res[r].end = resource_size(&devres[bar]) - 1; } @@ -624,18 +639,22 @@ intel_th_subdevice_alloc(struct intel_th *th, dev_dbg(th->dev, "%s:%d @ %pR\n", subdev->name, r, &res[r]); } else if (res[r].flags & IORESOURCE_IRQ) { - res[r].start = th->irq; + /* + * Only pass on the IRQ if we have useful interrupts: + * the ones that can be configured via MINTCTL. + */ + if (INTEL_TH_CAP(th, has_mintctl) && th->irq != -1) + res[r].start = th->irq; } } err = intel_th_device_add_resources(thdev, res, subdev->nres); - if (err) { - put_device(&thdev->dev); + if (err) goto fail_put_device; - } if (subdev->type == INTEL_TH_OUTPUT) { - thdev->dev.devt = MKDEV(th->major, th->num_thdevs); + if (subdev->mknode) + thdev->dev.devt = MKDEV(th->major, th->num_thdevs); thdev->output.type = subdev->otype; thdev->output.port = -1; thdev->output.scratchpad = subdev->scrpd; @@ -646,10 +665,8 @@ intel_th_subdevice_alloc(struct intel_th *th, } err = device_add(&thdev->dev); - if (err) { - put_device(&thdev->dev); + if (err) goto fail_free_res; - } /* need switch driver to be loaded to enumerate the rest */ if (subdev->type == INTEL_TH_SWITCH && !req) { @@ -754,8 +771,13 @@ static int intel_th_populate(struct intel_th *th) thdev = intel_th_subdevice_alloc(th, subdev); /* note: caller should free subdevices from th::thdev[] */ - if (IS_ERR(thdev)) + if (IS_ERR(thdev)) { + /* ENODEV for individual subdevices is allowed */ + if (PTR_ERR(thdev) == -ENODEV) + continue; + return PTR_ERR(thdev); + } th->thdev[th->num_thdevs++] = thdev; } @@ -805,26 +827,37 @@ static const struct file_operations intel_th_output_fops = { .llseek = noop_llseek, }; +static irqreturn_t intel_th_irq(int irq, void *data) +{ + struct intel_th *th = data; + irqreturn_t ret = IRQ_NONE; + struct intel_th_driver *d; + int i; + + for (i = 0; i < th->num_thdevs; i++) { + if (th->thdev[i]->type != INTEL_TH_OUTPUT) + continue; + + d = to_intel_th_driver(th->thdev[i]->dev.driver); + if (d && d->irq) + ret |= d->irq(th->thdev[i]); + } + + return ret; +} + /** * intel_th_alloc() - allocate a new Intel TH device and its subdevices * @dev: parent device - * @devres: parent's resources - * @ndevres: number of resources + * @devres: resources indexed by th_mmio_idx * @irq: irq number */ struct intel_th * intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata, - struct resource *devres, unsigned int ndevres, int irq) + struct resource *devres, unsigned int ndevres) { + int err, r, nr_mmios = 0; struct intel_th *th; - int err, r; - - if (irq == -1) - for (r = 0; r < ndevres; r++) - if (devres[r].flags & IORESOURCE_IRQ) { - irq = devres[r].start; - break; - } th = kzalloc(sizeof(*th), GFP_KERNEL); if (!th) @@ -842,12 +875,33 @@ intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata, err = th->major; goto err_ida; } + th->irq = -1; th->dev = dev; th->drvdata = drvdata; - th->resource = devres; - th->num_resources = ndevres; - th->irq = irq; + for (r = 0; r < ndevres; r++) + switch (devres[r].flags & IORESOURCE_TYPE_BITS) { + case IORESOURCE_MEM: + th->resource[nr_mmios++] = devres[r]; + break; + case IORESOURCE_IRQ: + err = devm_request_irq(dev, devres[r].start, + intel_th_irq, IRQF_SHARED, + dev_name(dev), th); + if (err) + goto err_chrdev; + + if (th->irq == -1) + th->irq = devres[r].start; + th->num_irqs++; + break; + default: + dev_warn(dev, "Unknown resource type %lx\n", + devres[r].flags); + break; + } + + th->num_resources = nr_mmios; dev_set_drvdata(dev, th); @@ -864,6 +918,10 @@ intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata, return th; +err_chrdev: + __unregister_chrdev(th->major, 0, TH_POSSIBLE_OUTPUTS, + "intel_th/output"); + err_ida: ida_simple_remove(&intel_th_ida, th->id); @@ -889,6 +947,9 @@ void intel_th_free(struct intel_th *th) th->num_thdevs = 0; + for (i = 0; i < th->num_irqs; i++) + devm_free_irq(th->dev, th->irq + i, th); + pm_runtime_get_sync(th->dev); pm_runtime_forbid(th->dev); @@ -923,6 +984,27 @@ int intel_th_trace_enable(struct intel_th_device *thdev) } EXPORT_SYMBOL_GPL(intel_th_trace_enable); +/** + * intel_th_trace_switch() - execute a switch sequence + * @thdev: output device that requests tracing switch + */ +int intel_th_trace_switch(struct intel_th_device *thdev) +{ + struct intel_th_device *hub = to_intel_th_device(thdev->dev.parent); + struct intel_th_driver *hubdrv = to_intel_th_driver(hub->dev.driver); + + if (WARN_ON_ONCE(hub->type != INTEL_TH_SWITCH)) + return -EINVAL; + + if (WARN_ON_ONCE(thdev->type != INTEL_TH_OUTPUT)) + return -EINVAL; + + hubdrv->trig_switch(hub, &thdev->output); + + return 0; +} +EXPORT_SYMBOL_GPL(intel_th_trace_switch); + /** * intel_th_trace_disable() - disable tracing for an output device * @thdev: output device that requests tracing be disabled diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c index 8426b7970c148a6104fa6986050db4ea0dbf9cbf..4a77e35173113b177920741585a631b8012652c4 100644 --- a/drivers/hwtracing/intel_th/gth.c +++ b/drivers/hwtracing/intel_th/gth.c @@ -308,6 +308,11 @@ static int intel_th_gth_reset(struct gth_device *gth) iowrite32(0, gth->base + REG_GTH_SCR); iowrite32(0xfc, gth->base + REG_GTH_SCR2); + /* setup CTS for single trigger */ + iowrite32(CTS_EVENT_ENABLE_IF_ANYTHING, gth->base + REG_CTS_C0S0_EN); + iowrite32(CTS_ACTION_CONTROL_SET_STATE(CTS_STATE_IDLE) | + CTS_ACTION_CONTROL_TRIGGER, gth->base + REG_CTS_C0S0_ACT); + return 0; } @@ -456,6 +461,68 @@ static int intel_th_output_attributes(struct gth_device *gth) return sysfs_create_group(>h->dev->kobj, >h->output_group); } +/** + * intel_th_gth_stop() - stop tracing to an output device + * @gth: GTH device + * @output: output device's descriptor + * @capture_done: set when no more traces will be captured + * + * This will stop tracing using force storeEn off signal and wait for the + * pipelines to be empty for the corresponding output port. + */ +static void intel_th_gth_stop(struct gth_device *gth, + struct intel_th_output *output, + bool capture_done) +{ + struct intel_th_device *outdev = + container_of(output, struct intel_th_device, output); + struct intel_th_driver *outdrv = + to_intel_th_driver(outdev->dev.driver); + unsigned long count; + u32 reg; + u32 scr2 = 0xfc | (capture_done ? 1 : 0); + + iowrite32(0, gth->base + REG_GTH_SCR); + iowrite32(scr2, gth->base + REG_GTH_SCR2); + + /* wait on pipeline empty for the given port */ + for (reg = 0, count = GTH_PLE_WAITLOOP_DEPTH; + count && !(reg & BIT(output->port)); count--) { + reg = ioread32(gth->base + REG_GTH_STAT); + cpu_relax(); + } + + if (!count) + dev_dbg(gth->dev, "timeout waiting for GTH[%d] PLE\n", + output->port); + + /* wait on output piepline empty */ + if (outdrv->wait_empty) + outdrv->wait_empty(outdev); + + /* clear force capture done for next captures */ + iowrite32(0xfc, gth->base + REG_GTH_SCR2); +} + +/** + * intel_th_gth_start() - start tracing to an output device + * @gth: GTH device + * @output: output device's descriptor + * + * This will start tracing using force storeEn signal. + */ +static void intel_th_gth_start(struct gth_device *gth, + struct intel_th_output *output) +{ + u32 scr = 0xfc0000; + + if (output->multiblock) + scr |= 0xff; + + iowrite32(scr, gth->base + REG_GTH_SCR); + iowrite32(0, gth->base + REG_GTH_SCR2); +} + /** * intel_th_gth_disable() - disable tracing to an output device * @thdev: GTH device @@ -469,7 +536,6 @@ static void intel_th_gth_disable(struct intel_th_device *thdev, struct intel_th_output *output) { struct gth_device *gth = dev_get_drvdata(&thdev->dev); - unsigned long count; int master; u32 reg; @@ -477,27 +543,12 @@ static void intel_th_gth_disable(struct intel_th_device *thdev, output->active = false; for_each_set_bit(master, gth->output[output->port].master, - TH_CONFIGURABLE_MASTERS) { + TH_CONFIGURABLE_MASTERS + 1) { gth_master_set(gth, master, -1); } spin_unlock(>h->gth_lock); - iowrite32(0, gth->base + REG_GTH_SCR); - iowrite32(0xfd, gth->base + REG_GTH_SCR2); - - /* wait on pipeline empty for the given port */ - for (reg = 0, count = GTH_PLE_WAITLOOP_DEPTH; - count && !(reg & BIT(output->port)); count--) { - reg = ioread32(gth->base + REG_GTH_STAT); - cpu_relax(); - } - - /* clear force capture done for next captures */ - iowrite32(0xfc, gth->base + REG_GTH_SCR2); - - if (!count) - dev_dbg(&thdev->dev, "timeout waiting for GTH[%d] PLE\n", - output->port); + intel_th_gth_stop(gth, output, true); reg = ioread32(gth->base + REG_GTH_SCRPD0); reg &= ~output->scratchpad; @@ -526,8 +577,8 @@ static void intel_th_gth_enable(struct intel_th_device *thdev, { struct gth_device *gth = dev_get_drvdata(&thdev->dev); struct intel_th *th = to_intel_th(thdev); - u32 scr = 0xfc0000, scrpd; int master; + u32 scrpd; spin_lock(>h->gth_lock); for_each_set_bit(master, gth->output[output->port].master, @@ -535,9 +586,6 @@ static void intel_th_gth_enable(struct intel_th_device *thdev, gth_master_set(gth, master, output->port); } - if (output->multiblock) - scr |= 0xff; - output->active = true; spin_unlock(>h->gth_lock); @@ -548,8 +596,41 @@ static void intel_th_gth_enable(struct intel_th_device *thdev, scrpd |= output->scratchpad; iowrite32(scrpd, gth->base + REG_GTH_SCRPD0); - iowrite32(scr, gth->base + REG_GTH_SCR); - iowrite32(0, gth->base + REG_GTH_SCR2); + intel_th_gth_start(gth, output); +} + +/** + * intel_th_gth_switch() - execute a switch sequence + * @thdev: GTH device + * @output: output device's descriptor + * + * This will execute a switch sequence that will trigger a switch window + * when tracing to MSC in multi-block mode. + */ +static void intel_th_gth_switch(struct intel_th_device *thdev, + struct intel_th_output *output) +{ + struct gth_device *gth = dev_get_drvdata(&thdev->dev); + unsigned long count; + u32 reg; + + /* trigger */ + iowrite32(0, gth->base + REG_CTS_CTL); + iowrite32(CTS_CTL_SEQUENCER_ENABLE, gth->base + REG_CTS_CTL); + /* wait on trigger status */ + for (reg = 0, count = CTS_TRIG_WAITLOOP_DEPTH; + count && !(reg & BIT(4)); count--) { + reg = ioread32(gth->base + REG_CTS_STAT); + cpu_relax(); + } + if (!count) + dev_dbg(&thdev->dev, "timeout waiting for CTS Trigger\n"); + + /* De-assert the trigger */ + iowrite32(0, gth->base + REG_CTS_CTL); + + intel_th_gth_stop(gth, output, false); + intel_th_gth_start(gth, output); } /** @@ -607,6 +688,7 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev, { struct gth_device *gth = dev_get_drvdata(&thdev->dev); int port = othdev->output.port; + int master; if (thdev->host_mode) return; @@ -615,6 +697,9 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev, othdev->output.port = -1; othdev->output.active = false; gth->output[port].output = NULL; + for (master = 0; master <= TH_CONFIGURABLE_MASTERS; master++) + if (gth->master[master] == port) + gth->master[master] = -1; spin_unlock(>h->gth_lock); } @@ -731,6 +816,7 @@ static struct intel_th_driver intel_th_gth_driver = { .unassign = intel_th_gth_unassign, .set_output = intel_th_gth_set_output, .enable = intel_th_gth_enable, + .trig_switch = intel_th_gth_switch, .disable = intel_th_gth_disable, .driver = { .name = "gth", diff --git a/drivers/hwtracing/intel_th/gth.h b/drivers/hwtracing/intel_th/gth.h index 6f2b0b9308755ad2bfba17cf085418293ddd1e24..bfcc0fd011774cc2d96791e098174af06726cd42 100644 --- a/drivers/hwtracing/intel_th/gth.h +++ b/drivers/hwtracing/intel_th/gth.h @@ -49,6 +49,12 @@ enum { REG_GTH_SCRPD3 = 0xec, /* ScratchPad[3] */ REG_TSCU_TSUCTRL = 0x2000, /* TSCU control register */ REG_TSCU_TSCUSTAT = 0x2004, /* TSCU status register */ + + /* Common Capture Sequencer (CTS) registers */ + REG_CTS_C0S0_EN = 0x30c0, /* clause_event_enable_c0s0 */ + REG_CTS_C0S0_ACT = 0x3180, /* clause_action_control_c0s0 */ + REG_CTS_STAT = 0x32a0, /* cts_status */ + REG_CTS_CTL = 0x32a4, /* cts_control */ }; /* waiting for Pipeline Empty bit(s) to assert for GTH */ @@ -57,4 +63,17 @@ enum { #define TSUCTRL_CTCRESYNC BIT(0) #define TSCUSTAT_CTCSYNCING BIT(1) +/* waiting for Trigger status to assert for CTS */ +#define CTS_TRIG_WAITLOOP_DEPTH 10000 + +#define CTS_EVENT_ENABLE_IF_ANYTHING BIT(31) +#define CTS_ACTION_CONTROL_STATE_OFF 27 +#define CTS_ACTION_CONTROL_SET_STATE(x) \ + (((x) & 0x1f) << CTS_ACTION_CONTROL_STATE_OFF) +#define CTS_ACTION_CONTROL_TRIGGER BIT(4) + +#define CTS_STATE_IDLE 0x10u + +#define CTS_CTL_SEQUENCER_ENABLE BIT(0) + #endif /* __INTEL_TH_GTH_H__ */ diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h index 780206dc9012c26c534df934b593eaa455bec780..6f4f5486fe6dcccb3f9570cf3c0eb7da24b22b0c 100644 --- a/drivers/hwtracing/intel_th/intel_th.h +++ b/drivers/hwtracing/intel_th/intel_th.h @@ -8,6 +8,8 @@ #ifndef __INTEL_TH_H__ #define __INTEL_TH_H__ +#include + /* intel_th_device device types */ enum { /* Devices that generate trace data */ @@ -18,6 +20,8 @@ enum { INTEL_TH_SWITCH, }; +struct intel_th_device; + /** * struct intel_th_output - descriptor INTEL_TH_OUTPUT type devices * @port: output port number, assigned by the switch @@ -25,6 +29,7 @@ enum { * @scratchpad: scratchpad bits to flag when this output is enabled * @multiblock: true for multiblock output configuration * @active: true when this output is enabled + * @wait_empty: wait for device pipeline to be empty * * Output port descriptor, used by switch driver to tell which output * port this output device corresponds to. Filled in at output device's @@ -42,10 +47,12 @@ struct intel_th_output { /** * struct intel_th_drvdata - describes hardware capabilities and quirks * @tscu_enable: device needs SW to enable time stamping unit + * @has_mintctl: device has interrupt control (MINTCTL) register * @host_mode_only: device can only operate in 'host debugger' mode */ struct intel_th_drvdata { unsigned int tscu_enable : 1, + has_mintctl : 1, host_mode_only : 1; }; @@ -157,10 +164,13 @@ struct intel_th_driver { struct intel_th_device *othdev); void (*enable)(struct intel_th_device *thdev, struct intel_th_output *output); + void (*trig_switch)(struct intel_th_device *thdev, + struct intel_th_output *output); void (*disable)(struct intel_th_device *thdev, struct intel_th_output *output); /* output ops */ - void (*irq)(struct intel_th_device *thdev); + irqreturn_t (*irq)(struct intel_th_device *thdev); + void (*wait_empty)(struct intel_th_device *thdev); int (*activate)(struct intel_th_device *thdev); void (*deactivate)(struct intel_th_device *thdev); /* file_operations for those who want a device node */ @@ -213,21 +223,23 @@ static inline struct intel_th *to_intel_th(struct intel_th_device *thdev) struct intel_th * intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata, - struct resource *devres, unsigned int ndevres, int irq); + struct resource *devres, unsigned int ndevres); void intel_th_free(struct intel_th *th); int intel_th_driver_register(struct intel_th_driver *thdrv); void intel_th_driver_unregister(struct intel_th_driver *thdrv); int intel_th_trace_enable(struct intel_th_device *thdev); +int intel_th_trace_switch(struct intel_th_device *thdev); int intel_th_trace_disable(struct intel_th_device *thdev); int intel_th_set_output(struct intel_th_device *thdev, unsigned int master); int intel_th_output_enable(struct intel_th *th, unsigned int otype); -enum { +enum th_mmio_idx { TH_MMIO_CONFIG = 0, - TH_MMIO_SW = 2, + TH_MMIO_SW = 1, + TH_MMIO_RTIT = 2, TH_MMIO_END, }; @@ -237,6 +249,9 @@ enum { #define TH_CONFIGURABLE_MASTERS 256 #define TH_MSC_MAX 2 +/* Maximum IRQ vectors */ +#define TH_NVEC_MAX 8 + /** * struct intel_th - Intel TH controller * @dev: driver core's device @@ -244,8 +259,9 @@ enum { * @hub: "switch" subdevice (GTH) * @resource: resources of the entire controller * @num_thdevs: number of devices in the @thdev array - * @num_resources: number or resources in the @resource array + * @num_resources: number of resources in the @resource array * @irq: irq number + * @num_irqs: number of IRQs is use * @id: this Intel TH controller's device ID in the system * @major: device node major for output devices */ @@ -256,12 +272,13 @@ struct intel_th { struct intel_th_device *hub; struct intel_th_drvdata *drvdata; - struct resource *resource; + struct resource resource[TH_MMIO_END]; int (*activate)(struct intel_th *); void (*deactivate)(struct intel_th *); unsigned int num_thdevs; unsigned int num_resources; int irq; + int num_irqs; int id; int major; @@ -296,6 +313,9 @@ enum { REG_TSCU_OFFSET = 0x2000, REG_TSCU_LENGTH = 0x1000, + REG_CTS_OFFSET = 0x3000, + REG_CTS_LENGTH = 0x1000, + /* Software Trace Hub (STH) [0x4000..0x4fff] */ REG_STH_OFFSET = 0x4000, REG_STH_LENGTH = 0x2000, diff --git a/drivers/hwtracing/intel_th/msu-sink.c b/drivers/hwtracing/intel_th/msu-sink.c new file mode 100644 index 0000000000000000000000000000000000000000..2c7f5116be126609714bb5f290289447b8b40d76 --- /dev/null +++ b/drivers/hwtracing/intel_th/msu-sink.c @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * An example software sink buffer for Intel TH MSU. + * + * Copyright (C) 2019 Intel Corporation. + */ + +#include +#include +#include +#include +#include + +#define MAX_SGTS 16 + +struct msu_sink_private { + struct device *dev; + struct sg_table **sgts; + unsigned int nr_sgts; +}; + +static void *msu_sink_assign(struct device *dev, int *mode) +{ + struct msu_sink_private *priv; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return NULL; + + priv->sgts = kcalloc(MAX_SGTS, sizeof(void *), GFP_KERNEL); + if (!priv->sgts) { + kfree(priv); + return NULL; + } + + priv->dev = dev; + *mode = MSC_MODE_MULTI; + + return priv; +} + +static void msu_sink_unassign(void *data) +{ + struct msu_sink_private *priv = data; + + kfree(priv->sgts); + kfree(priv); +} + +/* See also: msc.c: __msc_buffer_win_alloc() */ +static int msu_sink_alloc_window(void *data, struct sg_table **sgt, size_t size) +{ + struct msu_sink_private *priv = data; + unsigned int nents; + struct scatterlist *sg_ptr; + void *block; + int ret, i; + + if (priv->nr_sgts == MAX_SGTS) + return -ENOMEM; + + nents = DIV_ROUND_UP(size, PAGE_SIZE); + + ret = sg_alloc_table(*sgt, nents, GFP_KERNEL); + if (ret) + return -ENOMEM; + + priv->sgts[priv->nr_sgts++] = *sgt; + + for_each_sg((*sgt)->sgl, sg_ptr, nents, i) { + block = dma_alloc_coherent(priv->dev->parent->parent, + PAGE_SIZE, &sg_dma_address(sg_ptr), + GFP_KERNEL); + sg_set_buf(sg_ptr, block, PAGE_SIZE); + } + + return nents; +} + +/* See also: msc.c: __msc_buffer_win_free() */ +static void msu_sink_free_window(void *data, struct sg_table *sgt) +{ + struct msu_sink_private *priv = data; + struct scatterlist *sg_ptr; + int i; + + for_each_sg(sgt->sgl, sg_ptr, sgt->nents, i) { + dma_free_coherent(priv->dev->parent->parent, PAGE_SIZE, + sg_virt(sg_ptr), sg_dma_address(sg_ptr)); + } + + sg_free_table(sgt); + priv->nr_sgts--; +} + +static int msu_sink_ready(void *data, struct sg_table *sgt, size_t bytes) +{ + struct msu_sink_private *priv = data; + + intel_th_msc_window_unlock(priv->dev, sgt); + + return 0; +} + +static const struct msu_buffer sink_mbuf = { + .name = "sink", + .assign = msu_sink_assign, + .unassign = msu_sink_unassign, + .alloc_window = msu_sink_alloc_window, + .free_window = msu_sink_free_window, + .ready = msu_sink_ready, +}; + +module_intel_th_msu_buffer(sink_mbuf); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c index d293e55553bd690ffc54f97a2b5e6b85916e2865..a0e61b8c966bd83492410f4c051c7b41fa7e9140 100644 --- a/drivers/hwtracing/intel_th/msu.c +++ b/drivers/hwtracing/intel_th/msu.c @@ -17,40 +17,63 @@ #include #include #include +#include #include #ifdef CONFIG_X86 #include #endif +#include #include "intel_th.h" #include "msu.h" #define msc_dev(x) (&(x)->thdev->dev) -/** - * struct msc_block - multiblock mode block descriptor - * @bdesc: pointer to hardware descriptor (beginning of the block) - * @addr: physical address of the block +/* + * Lockout state transitions: + * READY -> INUSE -+-> LOCKED -+-> READY -> etc. + * \-----------/ + * WIN_READY: window can be used by HW + * WIN_INUSE: window is in use + * WIN_LOCKED: window is filled up and is being processed by the buffer + * handling code + * + * All state transitions happen automatically, except for the LOCKED->READY, + * which needs to be signalled by the buffer code by calling + * intel_th_msc_window_unlock(). + * + * When the interrupt handler has to switch to the next window, it checks + * whether it's READY, and if it is, it performs the switch and tracing + * continues. If it's LOCKED, it stops the trace. */ -struct msc_block { - struct msc_block_desc *bdesc; - dma_addr_t addr; +enum lockout_state { + WIN_READY = 0, + WIN_INUSE, + WIN_LOCKED }; /** * struct msc_window - multiblock mode window descriptor * @entry: window list linkage (msc::win_list) * @pgoff: page offset into the buffer that this window starts at + * @lockout: lockout state, see comment below + * @lo_lock: lockout state serialization * @nr_blocks: number of blocks (pages) in this window - * @block: array of block descriptors + * @nr_segs: number of segments in this window (<= @nr_blocks) + * @_sgt: array of block descriptors + * @sgt: array of block descriptors */ struct msc_window { struct list_head entry; unsigned long pgoff; + enum lockout_state lockout; + spinlock_t lo_lock; unsigned int nr_blocks; + unsigned int nr_segs; struct msc *msc; - struct msc_block block[0]; + struct sg_table _sgt; + struct sg_table *sgt; }; /** @@ -83,7 +106,11 @@ struct msc_iter { * struct msc - MSC device representation * @reg_base: register window base address * @thdev: intel_th_device pointer + * @mbuf: MSU buffer, if assigned + * @mbuf_priv MSU buffer's private data, if @mbuf * @win_list: list of windows in multiblock mode + * @single_sgt: single mode buffer + * @cur_win: current window * @nr_pages: total number of pages allocated for this buffer * @single_sz: amount of data in single mode * @single_wrap: single mode wrap occurred @@ -101,9 +128,16 @@ struct msc_iter { */ struct msc { void __iomem *reg_base; + void __iomem *msu_base; struct intel_th_device *thdev; + const struct msu_buffer *mbuf; + void *mbuf_priv; + + struct work_struct work; struct list_head win_list; + struct sg_table single_sgt; + struct msc_window *cur_win; unsigned long nr_pages; unsigned long single_sz; unsigned int single_wrap : 1; @@ -120,12 +154,108 @@ struct msc { /* config */ unsigned int enabled : 1, - wrap : 1; + wrap : 1, + do_irq : 1; unsigned int mode; unsigned int burst_len; unsigned int index; }; +static LIST_HEAD(msu_buffer_list); +static DEFINE_MUTEX(msu_buffer_mutex); + +/** + * struct msu_buffer_entry - internal MSU buffer bookkeeping + * @entry: link to msu_buffer_list + * @mbuf: MSU buffer object + * @owner: module that provides this MSU buffer + */ +struct msu_buffer_entry { + struct list_head entry; + const struct msu_buffer *mbuf; + struct module *owner; +}; + +static struct msu_buffer_entry *__msu_buffer_entry_find(const char *name) +{ + struct msu_buffer_entry *mbe; + + lockdep_assert_held(&msu_buffer_mutex); + + list_for_each_entry(mbe, &msu_buffer_list, entry) { + if (!strcmp(mbe->mbuf->name, name)) + return mbe; + } + + return NULL; +} + +static const struct msu_buffer * +msu_buffer_get(const char *name) +{ + struct msu_buffer_entry *mbe; + + mutex_lock(&msu_buffer_mutex); + mbe = __msu_buffer_entry_find(name); + if (mbe && !try_module_get(mbe->owner)) + mbe = NULL; + mutex_unlock(&msu_buffer_mutex); + + return mbe ? mbe->mbuf : NULL; +} + +static void msu_buffer_put(const struct msu_buffer *mbuf) +{ + struct msu_buffer_entry *mbe; + + mutex_lock(&msu_buffer_mutex); + mbe = __msu_buffer_entry_find(mbuf->name); + if (mbe) + module_put(mbe->owner); + mutex_unlock(&msu_buffer_mutex); +} + +int intel_th_msu_buffer_register(const struct msu_buffer *mbuf, + struct module *owner) +{ + struct msu_buffer_entry *mbe; + int ret = 0; + + mbe = kzalloc(sizeof(*mbe), GFP_KERNEL); + if (!mbe) + return -ENOMEM; + + mutex_lock(&msu_buffer_mutex); + if (__msu_buffer_entry_find(mbuf->name)) { + ret = -EEXIST; + kfree(mbe); + goto unlock; + } + + mbe->mbuf = mbuf; + mbe->owner = owner; + list_add_tail(&mbe->entry, &msu_buffer_list); +unlock: + mutex_unlock(&msu_buffer_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(intel_th_msu_buffer_register); + +void intel_th_msu_buffer_unregister(const struct msu_buffer *mbuf) +{ + struct msu_buffer_entry *mbe; + + mutex_lock(&msu_buffer_mutex); + mbe = __msu_buffer_entry_find(mbuf->name); + if (mbe) { + list_del(&mbe->entry); + kfree(mbe); + } + mutex_unlock(&msu_buffer_mutex); +} +EXPORT_SYMBOL_GPL(intel_th_msu_buffer_unregister); + static inline bool msc_block_is_empty(struct msc_block_desc *bdesc) { /* header hasn't been written */ @@ -139,20 +269,87 @@ static inline bool msc_block_is_empty(struct msc_block_desc *bdesc) return false; } +static inline struct msc_block_desc * +msc_win_block(struct msc_window *win, unsigned int block) +{ + return sg_virt(&win->sgt->sgl[block]); +} + +static inline size_t +msc_win_actual_bsz(struct msc_window *win, unsigned int block) +{ + return win->sgt->sgl[block].length; +} + +static inline dma_addr_t +msc_win_baddr(struct msc_window *win, unsigned int block) +{ + return sg_dma_address(&win->sgt->sgl[block]); +} + +static inline unsigned long +msc_win_bpfn(struct msc_window *win, unsigned int block) +{ + return msc_win_baddr(win, block) >> PAGE_SHIFT; +} + /** - * msc_oldest_window() - locate the window with oldest data - * @msc: MSC device + * msc_is_last_win() - check if a window is the last one for a given MSC + * @win: window + * Return: true if @win is the last window in MSC's multiblock buffer + */ +static inline bool msc_is_last_win(struct msc_window *win) +{ + return win->entry.next == &win->msc->win_list; +} + +/** + * msc_next_window() - return next window in the multiblock buffer + * @win: current window * - * This should only be used in multiblock mode. Caller should hold the - * msc::user_count reference. + * Return: window following the current one + */ +static struct msc_window *msc_next_window(struct msc_window *win) +{ + if (msc_is_last_win(win)) + return list_first_entry(&win->msc->win_list, struct msc_window, + entry); + + return list_next_entry(win, entry); +} + +static size_t msc_win_total_sz(struct msc_window *win) +{ + unsigned int blk; + size_t size = 0; + + for (blk = 0; blk < win->nr_segs; blk++) { + struct msc_block_desc *bdesc = msc_win_block(win, blk); + + if (msc_block_wrapped(bdesc)) + return (size_t)win->nr_blocks << PAGE_SHIFT; + + size += msc_total_sz(bdesc); + if (msc_block_last_written(bdesc)) + break; + } + + return size; +} + +/** + * msc_find_window() - find a window matching a given sg_table + * @msc: MSC device + * @sgt: SG table of the window + * @nonempty: skip over empty windows * - * Return: the oldest window with valid data + * Return: MSC window structure pointer or NULL if the window + * could not be found. */ -static struct msc_window *msc_oldest_window(struct msc *msc) +static struct msc_window * +msc_find_window(struct msc *msc, struct sg_table *sgt, bool nonempty) { struct msc_window *win; - u32 reg = ioread32(msc->reg_base + REG_MSU_MSC0NWSA); - unsigned long win_addr = (unsigned long)reg << PAGE_SHIFT; unsigned int found = 0; if (list_empty(&msc->win_list)) @@ -164,18 +361,41 @@ static struct msc_window *msc_oldest_window(struct msc *msc) * something like 2, in which case we're good */ list_for_each_entry(win, &msc->win_list, entry) { - if (win->block[0].addr == win_addr) + if (win->sgt == sgt) found++; /* skip the empty ones */ - if (msc_block_is_empty(win->block[0].bdesc)) + if (nonempty && msc_block_is_empty(msc_win_block(win, 0))) continue; if (found) return win; } - return list_entry(msc->win_list.next, struct msc_window, entry); + return NULL; +} + +/** + * msc_oldest_window() - locate the window with oldest data + * @msc: MSC device + * + * This should only be used in multiblock mode. Caller should hold the + * msc::user_count reference. + * + * Return: the oldest window with valid data + */ +static struct msc_window *msc_oldest_window(struct msc *msc) +{ + struct msc_window *win; + + if (list_empty(&msc->win_list)) + return NULL; + + win = msc_find_window(msc, msc_next_window(msc->cur_win)->sgt, true); + if (win) + return win; + + return list_first_entry(&msc->win_list, struct msc_window, entry); } /** @@ -187,7 +407,7 @@ static struct msc_window *msc_oldest_window(struct msc *msc) static unsigned int msc_win_oldest_block(struct msc_window *win) { unsigned int blk; - struct msc_block_desc *bdesc = win->block[0].bdesc; + struct msc_block_desc *bdesc = msc_win_block(win, 0); /* without wrapping, first block is the oldest */ if (!msc_block_wrapped(bdesc)) @@ -197,8 +417,8 @@ static unsigned int msc_win_oldest_block(struct msc_window *win) * with wrapping, last written block contains both the newest and the * oldest data for this window. */ - for (blk = 0; blk < win->nr_blocks; blk++) { - bdesc = win->block[blk].bdesc; + for (blk = 0; blk < win->nr_segs; blk++) { + bdesc = msc_win_block(win, blk); if (msc_block_last_written(bdesc)) return blk; @@ -207,34 +427,9 @@ static unsigned int msc_win_oldest_block(struct msc_window *win) return 0; } -/** - * msc_is_last_win() - check if a window is the last one for a given MSC - * @win: window - * Return: true if @win is the last window in MSC's multiblock buffer - */ -static inline bool msc_is_last_win(struct msc_window *win) -{ - return win->entry.next == &win->msc->win_list; -} - -/** - * msc_next_window() - return next window in the multiblock buffer - * @win: current window - * - * Return: window following the current one - */ -static struct msc_window *msc_next_window(struct msc_window *win) -{ - if (msc_is_last_win(win)) - return list_entry(win->msc->win_list.next, struct msc_window, - entry); - - return list_entry(win->entry.next, struct msc_window, entry); -} - static struct msc_block_desc *msc_iter_bdesc(struct msc_iter *iter) { - return iter->win->block[iter->block].bdesc; + return msc_win_block(iter->win, iter->block); } static void msc_iter_init(struct msc_iter *iter) @@ -354,7 +549,7 @@ static int msc_iter_block_advance(struct msc_iter *iter) return msc_iter_win_advance(iter); /* block advance */ - if (++iter->block == iter->win->nr_blocks) + if (++iter->block == iter->win->nr_segs) iter->block = 0; /* no wrapping, sanity check in case there is no last written block */ @@ -466,14 +661,90 @@ static void msc_buffer_clear_hw_header(struct msc *msc) size_t hw_sz = sizeof(struct msc_block_desc) - offsetof(struct msc_block_desc, hw_tag); - for (blk = 0; blk < win->nr_blocks; blk++) { - struct msc_block_desc *bdesc = win->block[blk].bdesc; + for (blk = 0; blk < win->nr_segs; blk++) { + struct msc_block_desc *bdesc = msc_win_block(win, blk); memset(&bdesc->hw_tag, 0, hw_sz); } } } +static int intel_th_msu_init(struct msc *msc) +{ + u32 mintctl, msusts; + + if (!msc->do_irq) + return 0; + + if (!msc->mbuf) + return 0; + + mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL); + mintctl |= msc->index ? M1BLIE : M0BLIE; + iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL); + if (mintctl != ioread32(msc->msu_base + REG_MSU_MINTCTL)) { + dev_info(msc_dev(msc), "MINTCTL ignores writes: no usable interrupts\n"); + msc->do_irq = 0; + return 0; + } + + msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS); + iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS); + + return 0; +} + +static void intel_th_msu_deinit(struct msc *msc) +{ + u32 mintctl; + + if (!msc->do_irq) + return; + + mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL); + mintctl &= msc->index ? ~M1BLIE : ~M0BLIE; + iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL); +} + +static int msc_win_set_lockout(struct msc_window *win, + enum lockout_state expect, + enum lockout_state new) +{ + enum lockout_state old; + unsigned long flags; + int ret = 0; + + if (!win->msc->mbuf) + return 0; + + spin_lock_irqsave(&win->lo_lock, flags); + old = win->lockout; + + if (old != expect) { + ret = -EINVAL; + goto unlock; + } + + win->lockout = new; + +unlock: + spin_unlock_irqrestore(&win->lo_lock, flags); + + if (ret) { + if (expect == WIN_READY && old == WIN_LOCKED) + return -EBUSY; + + /* from intel_th_msc_window_unlock(), don't warn if not locked */ + if (expect == WIN_LOCKED && old == new) + return 0; + + dev_warn_ratelimited(msc_dev(win->msc), + "expected lockout state %d, got %d\n", + expect, old); + } + + return ret; +} /** * msc_configure() - set up MSC hardware * @msc: the MSC device to configure @@ -491,8 +762,12 @@ static int msc_configure(struct msc *msc) if (msc->mode > MSC_MODE_MULTI) return -ENOTSUPP; - if (msc->mode == MSC_MODE_MULTI) - msc_buffer_clear_hw_header(msc); + if (msc->mode == MSC_MODE_MULTI) { + if (msc_win_set_lockout(msc->cur_win, WIN_READY, WIN_INUSE)) + return -EBUSY; + + msc_buffer_clear_hw_header(msc); + } reg = msc->base_addr >> PAGE_SHIFT; iowrite32(reg, msc->reg_base + REG_MSU_MSC0BAR); @@ -514,10 +789,14 @@ static int msc_configure(struct msc *msc) iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL); + intel_th_msu_init(msc); + msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI; intel_th_trace_enable(msc->thdev); msc->enabled = 1; + if (msc->mbuf && msc->mbuf->activate) + msc->mbuf->activate(msc->mbuf_priv); return 0; } @@ -531,23 +810,21 @@ static int msc_configure(struct msc *msc) */ static void msc_disable(struct msc *msc) { - unsigned long count; + struct msc_window *win = msc->cur_win; u32 reg; lockdep_assert_held(&msc->buf_mutex); - intel_th_trace_disable(msc->thdev); - - for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH; - count && !(reg & MSCSTS_PLE); count--) { - reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); - cpu_relax(); - } + if (msc->mode == MSC_MODE_MULTI) + msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED); - if (!count) - dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n"); + if (msc->mbuf && msc->mbuf->deactivate) + msc->mbuf->deactivate(msc->mbuf_priv); + intel_th_msu_deinit(msc); + intel_th_trace_disable(msc->thdev); if (msc->mode == MSC_MODE_SINGLE) { + reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); msc->single_wrap = !!(reg & MSCSTS_WRAPSTAT); reg = ioread32(msc->reg_base + REG_MSU_MSC0MWP); @@ -559,6 +836,11 @@ static void msc_disable(struct msc *msc) reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL); reg &= ~MSC_EN; iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL); + + if (msc->mbuf && msc->mbuf->ready) + msc->mbuf->ready(msc->mbuf_priv, win->sgt, + msc_win_total_sz(win)); + msc->enabled = 0; iowrite32(0, msc->reg_base + REG_MSU_MSC0BAR); @@ -569,6 +851,10 @@ static void msc_disable(struct msc *msc) reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); dev_dbg(msc_dev(msc), "MSCnSTS: %08x\n", reg); + + reg = ioread32(msc->reg_base + REG_MSU_MSUSTS); + reg &= msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST; + iowrite32(reg, msc->reg_base + REG_MSU_MSUSTS); } static int intel_th_msc_activate(struct intel_th_device *thdev) @@ -617,22 +903,45 @@ static void intel_th_msc_deactivate(struct intel_th_device *thdev) */ static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size) { + unsigned long nr_pages = size >> PAGE_SHIFT; unsigned int order = get_order(size); struct page *page; + int ret; if (!size) return 0; - page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); + ret = sg_alloc_table(&msc->single_sgt, 1, GFP_KERNEL); + if (ret) + goto err_out; + + ret = -ENOMEM; + page = alloc_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32, order); if (!page) - return -ENOMEM; + goto err_free_sgt; split_page(page, order); - msc->nr_pages = size >> PAGE_SHIFT; + sg_set_buf(msc->single_sgt.sgl, page_address(page), size); + + ret = dma_map_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 1, + DMA_FROM_DEVICE); + if (ret < 0) + goto err_free_pages; + + msc->nr_pages = nr_pages; msc->base = page_address(page); - msc->base_addr = page_to_phys(page); + msc->base_addr = sg_dma_address(msc->single_sgt.sgl); return 0; + +err_free_pages: + __free_pages(page, order); + +err_free_sgt: + sg_free_table(&msc->single_sgt); + +err_out: + return ret; } /** @@ -643,6 +952,10 @@ static void msc_buffer_contig_free(struct msc *msc) { unsigned long off; + dma_unmap_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, + 1, DMA_FROM_DEVICE); + sg_free_table(&msc->single_sgt); + for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) { struct page *page = virt_to_page(msc->base + off); @@ -669,6 +982,64 @@ static struct page *msc_buffer_contig_get_page(struct msc *msc, return virt_to_page(msc->base + (pgoff << PAGE_SHIFT)); } +static int __msc_buffer_win_alloc(struct msc_window *win, + unsigned int nr_segs) +{ + struct scatterlist *sg_ptr; + void *block; + int i, ret; + + ret = sg_alloc_table(win->sgt, nr_segs, GFP_KERNEL); + if (ret) + return -ENOMEM; + + for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) { + block = dma_alloc_coherent(msc_dev(win->msc)->parent->parent, + PAGE_SIZE, &sg_dma_address(sg_ptr), + GFP_KERNEL); + if (!block) + goto err_nomem; + + sg_set_buf(sg_ptr, block, PAGE_SIZE); + } + + return nr_segs; + +err_nomem: + for (i--; i >= 0; i--) + dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, + msc_win_block(win, i), + msc_win_baddr(win, i)); + + sg_free_table(win->sgt); + + return -ENOMEM; +} + +#ifdef CONFIG_X86 +static void msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs) +{ + int i; + + for (i = 0; i < nr_segs; i++) + /* Set the page as uncached */ + set_memory_uc((unsigned long)msc_win_block(win, i), 1); +} + +static void msc_buffer_set_wb(struct msc_window *win) +{ + int i; + + for (i = 0; i < win->nr_segs; i++) + /* Reset the page to write-back */ + set_memory_wb((unsigned long)msc_win_block(win, i), 1); +} +#else /* !X86 */ +static inline void +msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs) {} +static inline void msc_buffer_set_wb(struct msc_window *win) {} +#endif /* CONFIG_X86 */ + /** * msc_buffer_win_alloc() - alloc a window for a multiblock mode * @msc: MSC device @@ -682,44 +1053,53 @@ static struct page *msc_buffer_contig_get_page(struct msc *msc, static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks) { struct msc_window *win; - unsigned long size = PAGE_SIZE; - int i, ret = -ENOMEM; + int ret = -ENOMEM; if (!nr_blocks) return 0; - win = kzalloc(offsetof(struct msc_window, block[nr_blocks]), - GFP_KERNEL); + /* + * This limitation hold as long as we need random access to the + * block. When that changes, this can go away. + */ + if (nr_blocks > SG_MAX_SINGLE_ALLOC) + return -EINVAL; + + win = kzalloc(sizeof(*win), GFP_KERNEL); if (!win) return -ENOMEM; + win->msc = msc; + win->sgt = &win->_sgt; + win->lockout = WIN_READY; + spin_lock_init(&win->lo_lock); + if (!list_empty(&msc->win_list)) { - struct msc_window *prev = list_entry(msc->win_list.prev, - struct msc_window, entry); + struct msc_window *prev = list_last_entry(&msc->win_list, + struct msc_window, + entry); win->pgoff = prev->pgoff + prev->nr_blocks; } - for (i = 0; i < nr_blocks; i++) { - win->block[i].bdesc = - dma_alloc_coherent(msc_dev(msc)->parent->parent, size, - &win->block[i].addr, GFP_KERNEL); + if (msc->mbuf && msc->mbuf->alloc_window) + ret = msc->mbuf->alloc_window(msc->mbuf_priv, &win->sgt, + nr_blocks << PAGE_SHIFT); + else + ret = __msc_buffer_win_alloc(win, nr_blocks); - if (!win->block[i].bdesc) - goto err_nomem; + if (ret <= 0) + goto err_nomem; -#ifdef CONFIG_X86 - /* Set the page as uncached */ - set_memory_uc((unsigned long)win->block[i].bdesc, 1); -#endif - } + msc_buffer_set_uc(win, ret); - win->msc = msc; + win->nr_segs = ret; win->nr_blocks = nr_blocks; if (list_empty(&msc->win_list)) { - msc->base = win->block[0].bdesc; - msc->base_addr = win->block[0].addr; + msc->base = msc_win_block(win, 0); + msc->base_addr = msc_win_baddr(win, 0); + msc->cur_win = win; } list_add_tail(&win->entry, &msc->win_list); @@ -728,19 +1108,25 @@ static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks) return 0; err_nomem: - for (i--; i >= 0; i--) { -#ifdef CONFIG_X86 - /* Reset the page to write-back before releasing */ - set_memory_wb((unsigned long)win->block[i].bdesc, 1); -#endif - dma_free_coherent(msc_dev(msc)->parent->parent, size, - win->block[i].bdesc, win->block[i].addr); - } kfree(win); return ret; } +static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win) +{ + int i; + + for (i = 0; i < win->nr_segs; i++) { + struct page *page = sg_page(&win->sgt->sgl[i]); + + page->mapping = NULL; + dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, + msc_win_block(win, i), msc_win_baddr(win, i)); + } + sg_free_table(win->sgt); +} + /** * msc_buffer_win_free() - free a window from MSC's window list * @msc: MSC device @@ -751,8 +1137,6 @@ static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks) */ static void msc_buffer_win_free(struct msc *msc, struct msc_window *win) { - int i; - msc->nr_pages -= win->nr_blocks; list_del(&win->entry); @@ -761,17 +1145,12 @@ static void msc_buffer_win_free(struct msc *msc, struct msc_window *win) msc->base_addr = 0; } - for (i = 0; i < win->nr_blocks; i++) { - struct page *page = virt_to_page(win->block[i].bdesc); + msc_buffer_set_wb(win); - page->mapping = NULL; -#ifdef CONFIG_X86 - /* Reset the page to write-back before releasing */ - set_memory_wb((unsigned long)win->block[i].bdesc, 1); -#endif - dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, - win->block[i].bdesc, win->block[i].addr); - } + if (msc->mbuf && msc->mbuf->free_window) + msc->mbuf->free_window(msc->mbuf_priv, win->sgt); + else + __msc_buffer_win_free(msc, win); kfree(win); } @@ -798,35 +1177,32 @@ static void msc_buffer_relink(struct msc *msc) */ if (msc_is_last_win(win)) { sw_tag |= MSC_SW_TAG_LASTWIN; - next_win = list_entry(msc->win_list.next, - struct msc_window, entry); + next_win = list_first_entry(&msc->win_list, + struct msc_window, entry); } else { - next_win = list_entry(win->entry.next, - struct msc_window, entry); + next_win = list_next_entry(win, entry); } - for (blk = 0; blk < win->nr_blocks; blk++) { - struct msc_block_desc *bdesc = win->block[blk].bdesc; + for (blk = 0; blk < win->nr_segs; blk++) { + struct msc_block_desc *bdesc = msc_win_block(win, blk); memset(bdesc, 0, sizeof(*bdesc)); - bdesc->next_win = next_win->block[0].addr >> PAGE_SHIFT; + bdesc->next_win = msc_win_bpfn(next_win, 0); /* * Similarly to last window, last block should point * to the first one. */ - if (blk == win->nr_blocks - 1) { + if (blk == win->nr_segs - 1) { sw_tag |= MSC_SW_TAG_LASTBLK; - bdesc->next_blk = - win->block[0].addr >> PAGE_SHIFT; + bdesc->next_blk = msc_win_bpfn(win, 0); } else { - bdesc->next_blk = - win->block[blk + 1].addr >> PAGE_SHIFT; + bdesc->next_blk = msc_win_bpfn(win, blk + 1); } bdesc->sw_tag = sw_tag; - bdesc->block_sz = PAGE_SIZE / 64; + bdesc->block_sz = msc_win_actual_bsz(win, blk) / 64; } } @@ -985,6 +1361,7 @@ static int msc_buffer_free_unless_used(struct msc *msc) static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff) { struct msc_window *win; + unsigned int blk; if (msc->mode == MSC_MODE_SINGLE) return msc_buffer_contig_get_page(msc, pgoff); @@ -997,7 +1374,18 @@ static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff) found: pgoff -= win->pgoff; - return virt_to_page(win->block[pgoff].bdesc); + + for (blk = 0; blk < win->nr_segs; blk++) { + struct page *page = sg_page(&win->sgt->sgl[blk]); + size_t pgsz = PFN_DOWN(msc_win_actual_bsz(win, blk)); + + if (pgoff < pgsz) + return page + pgoff; + + pgoff -= pgsz; + } + + return NULL; } /** @@ -1250,6 +1638,22 @@ static const struct file_operations intel_th_msc_fops = { .owner = THIS_MODULE, }; +static void intel_th_msc_wait_empty(struct intel_th_device *thdev) +{ + struct msc *msc = dev_get_drvdata(&thdev->dev); + unsigned long count; + u32 reg; + + for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH; + count && !(reg & MSCSTS_PLE); count--) { + reg = __raw_readl(msc->reg_base + REG_MSU_MSC0STS); + cpu_relax(); + } + + if (!count) + dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n"); +} + static int intel_th_msc_init(struct msc *msc) { atomic_set(&msc->user_count, -1); @@ -1266,6 +1670,102 @@ static int intel_th_msc_init(struct msc *msc) return 0; } +static int msc_win_switch(struct msc *msc) +{ + struct msc_window *first; + + if (list_empty(&msc->win_list)) + return -EINVAL; + + first = list_first_entry(&msc->win_list, struct msc_window, entry); + + if (msc_is_last_win(msc->cur_win)) + msc->cur_win = first; + else + msc->cur_win = list_next_entry(msc->cur_win, entry); + + msc->base = msc_win_block(msc->cur_win, 0); + msc->base_addr = msc_win_baddr(msc->cur_win, 0); + + intel_th_trace_switch(msc->thdev); + + return 0; +} + +/** + * intel_th_msc_window_unlock - put the window back in rotation + * @dev: MSC device to which this relates + * @sgt: buffer's sg_table for the window, does nothing if NULL + */ +void intel_th_msc_window_unlock(struct device *dev, struct sg_table *sgt) +{ + struct msc *msc = dev_get_drvdata(dev); + struct msc_window *win; + + if (!sgt) + return; + + win = msc_find_window(msc, sgt, false); + if (!win) + return; + + msc_win_set_lockout(win, WIN_LOCKED, WIN_READY); +} +EXPORT_SYMBOL_GPL(intel_th_msc_window_unlock); + +static void msc_work(struct work_struct *work) +{ + struct msc *msc = container_of(work, struct msc, work); + + intel_th_msc_deactivate(msc->thdev); +} + +static irqreturn_t intel_th_msc_interrupt(struct intel_th_device *thdev) +{ + struct msc *msc = dev_get_drvdata(&thdev->dev); + u32 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS); + u32 mask = msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST; + struct msc_window *win, *next_win; + + if (!msc->do_irq || !msc->mbuf) + return IRQ_NONE; + + msusts &= mask; + + if (!msusts) + return msc->enabled ? IRQ_HANDLED : IRQ_NONE; + + iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS); + + if (!msc->enabled) + return IRQ_NONE; + + /* grab the window before we do the switch */ + win = msc->cur_win; + if (!win) + return IRQ_HANDLED; + next_win = msc_next_window(win); + if (!next_win) + return IRQ_HANDLED; + + /* next window: if READY, proceed, if LOCKED, stop the trace */ + if (msc_win_set_lockout(next_win, WIN_READY, WIN_INUSE)) { + schedule_work(&msc->work); + return IRQ_HANDLED; + } + + /* current window: INUSE -> LOCKED */ + msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED); + + msc_win_switch(msc); + + if (msc->mbuf && msc->mbuf->ready) + msc->mbuf->ready(msc->mbuf_priv, win->sgt, + msc_win_total_sz(win)); + + return IRQ_HANDLED; +} + static const char * const msc_mode[] = { [MSC_MODE_SINGLE] = "single", [MSC_MODE_MULTI] = "multi", @@ -1300,21 +1800,43 @@ wrap_store(struct device *dev, struct device_attribute *attr, const char *buf, static DEVICE_ATTR_RW(wrap); +static void msc_buffer_unassign(struct msc *msc) +{ + lockdep_assert_held(&msc->buf_mutex); + + if (!msc->mbuf) + return; + + msc->mbuf->unassign(msc->mbuf_priv); + msu_buffer_put(msc->mbuf); + msc->mbuf_priv = NULL; + msc->mbuf = NULL; +} + static ssize_t mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct msc *msc = dev_get_drvdata(dev); + const char *mode = msc_mode[msc->mode]; + ssize_t ret; + + mutex_lock(&msc->buf_mutex); + if (msc->mbuf) + mode = msc->mbuf->name; + ret = scnprintf(buf, PAGE_SIZE, "%s\n", mode); + mutex_unlock(&msc->buf_mutex); - return scnprintf(buf, PAGE_SIZE, "%s\n", msc_mode[msc->mode]); + return ret; } static ssize_t mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { + const struct msu_buffer *mbuf = NULL; struct msc *msc = dev_get_drvdata(dev); size_t len = size; - char *cp; + char *cp, *mode; int i, ret; if (!capable(CAP_SYS_RAWIO)) @@ -1324,17 +1846,64 @@ mode_store(struct device *dev, struct device_attribute *attr, const char *buf, if (cp) len = cp - buf; - for (i = 0; i < ARRAY_SIZE(msc_mode); i++) - if (!strncmp(msc_mode[i], buf, len)) - goto found; + mode = kstrndup(buf, len, GFP_KERNEL); + if (!mode) + return -ENOMEM; + + i = match_string(msc_mode, ARRAY_SIZE(msc_mode), mode); + if (i >= 0) { + kfree(mode); + goto found; + } + + /* Buffer sinks only work with a usable IRQ */ + if (!msc->do_irq) { + kfree(mode); + return -EINVAL; + } + + mbuf = msu_buffer_get(mode); + kfree(mode); + if (mbuf) + goto found; return -EINVAL; found: mutex_lock(&msc->buf_mutex); + ret = 0; + + /* Same buffer: do nothing */ + if (mbuf && mbuf == msc->mbuf) { + /* put the extra reference we just got */ + msu_buffer_put(mbuf); + goto unlock; + } + ret = msc_buffer_unlocked_free_unless_used(msc); - if (!ret) - msc->mode = i; + if (ret) + goto unlock; + + if (mbuf) { + void *mbuf_priv = mbuf->assign(dev, &i); + + if (!mbuf_priv) { + ret = -ENOMEM; + goto unlock; + } + + msc_buffer_unassign(msc); + msc->mbuf_priv = mbuf_priv; + msc->mbuf = mbuf; + } else { + msc_buffer_unassign(msc); + } + + msc->mode = i; + +unlock: + if (ret && mbuf) + msu_buffer_put(mbuf); mutex_unlock(&msc->buf_mutex); return ret ? ret : size; @@ -1423,7 +1992,8 @@ nr_pages_store(struct device *dev, struct device_attribute *attr, if (!end) break; - len -= end - p; + /* consume the number and the following comma, hence +1 */ + len -= end - p + 1; p = end + 1; } while (len); @@ -1439,10 +2009,42 @@ nr_pages_store(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR_RW(nr_pages); +static ssize_t +win_switch_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t size) +{ + struct msc *msc = dev_get_drvdata(dev); + unsigned long val; + int ret; + + ret = kstrtoul(buf, 10, &val); + if (ret) + return ret; + + if (val != 1) + return -EINVAL; + + ret = -EINVAL; + mutex_lock(&msc->buf_mutex); + /* + * Window switch can only happen in the "multi" mode. + * If a external buffer is engaged, they have the full + * control over window switching. + */ + if (msc->mode == MSC_MODE_MULTI && !msc->mbuf) + ret = msc_win_switch(msc); + mutex_unlock(&msc->buf_mutex); + + return ret ? ret : size; +} + +static DEVICE_ATTR_WO(win_switch); + static struct attribute *msc_output_attrs[] = { &dev_attr_wrap.attr, &dev_attr_mode.attr, &dev_attr_nr_pages.attr, + &dev_attr_win_switch.attr, NULL, }; @@ -1470,11 +2072,17 @@ static int intel_th_msc_probe(struct intel_th_device *thdev) if (!msc) return -ENOMEM; + res = intel_th_device_get_resource(thdev, IORESOURCE_IRQ, 1); + if (!res) + msc->do_irq = 1; + msc->index = thdev->id; msc->thdev = thdev; msc->reg_base = base + msc->index * 0x100; + msc->msu_base = base; + INIT_WORK(&msc->work, msc_work); err = intel_th_msc_init(msc); if (err) return err; @@ -1503,6 +2111,8 @@ static void intel_th_msc_remove(struct intel_th_device *thdev) static struct intel_th_driver intel_th_msc_driver = { .probe = intel_th_msc_probe, .remove = intel_th_msc_remove, + .irq = intel_th_msc_interrupt, + .wait_empty = intel_th_msc_wait_empty, .activate = intel_th_msc_activate, .deactivate = intel_th_msc_deactivate, .fops = &intel_th_msc_fops, diff --git a/drivers/hwtracing/intel_th/msu.h b/drivers/hwtracing/intel_th/msu.h index 9cc8aced6116ac863bfdbd5beb4e2634829a54be..3f527dd4d72711eea1c0f1e2162286ad62cfa85a 100644 --- a/drivers/hwtracing/intel_th/msu.h +++ b/drivers/hwtracing/intel_th/msu.h @@ -11,6 +11,7 @@ enum { REG_MSU_MSUPARAMS = 0x0000, REG_MSU_MSUSTS = 0x0008, + REG_MSU_MINTCTL = 0x0004, /* MSU-global interrupt control */ REG_MSU_MSC0CTL = 0x0100, /* MSC0 control */ REG_MSU_MSC0STS = 0x0104, /* MSC0 status */ REG_MSU_MSC0BAR = 0x0108, /* MSC0 output base address */ @@ -28,6 +29,8 @@ enum { /* MSUSTS bits */ #define MSUSTS_MSU_INT BIT(0) +#define MSUSTS_MSC0BLAST BIT(16) +#define MSUSTS_MSC1BLAST BIT(24) /* MSCnCTL bits */ #define MSC_EN BIT(0) @@ -36,13 +39,10 @@ enum { #define MSC_MODE (BIT(4) | BIT(5)) #define MSC_LEN (BIT(8) | BIT(9) | BIT(10)) -/* MSC operating modes (MSC_MODE) */ -enum { - MSC_MODE_SINGLE = 0, - MSC_MODE_MULTI, - MSC_MODE_EXI, - MSC_MODE_DEBUG, -}; +/* MINTCTL bits */ +#define MICDE BIT(0) +#define M0BLIE BIT(16) +#define M1BLIE BIT(24) /* MSCnSTS bits */ #define MSCSTS_WRAPSTAT BIT(1) /* Wrap occurred */ @@ -85,9 +85,19 @@ static inline unsigned long msc_data_sz(struct msc_block_desc *bdesc) return bdesc->valid_dw * 4 - MSC_BDESC; } +static inline unsigned long msc_total_sz(struct msc_block_desc *bdesc) +{ + return bdesc->valid_dw * 4; +} + +static inline unsigned long msc_block_sz(struct msc_block_desc *bdesc) +{ + return bdesc->block_sz * 64 - MSC_BDESC; +} + static inline bool msc_block_wrapped(struct msc_block_desc *bdesc) { - if (bdesc->hw_tag & MSC_HW_TAG_BLOCKWRAP) + if (bdesc->hw_tag & (MSC_HW_TAG_BLOCKWRAP | MSC_HW_TAG_WINWRAP)) return true; return false; @@ -96,7 +106,7 @@ static inline bool msc_block_wrapped(struct msc_block_desc *bdesc) static inline bool msc_block_last_written(struct msc_block_desc *bdesc) { if ((bdesc->hw_tag & MSC_HW_TAG_ENDBIT) || - (msc_data_sz(bdesc) != DATA_IN_PAGE)) + (msc_data_sz(bdesc) != msc_block_sz(bdesc))) return true; return false; diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index 1cf6290d643555d1cd9edfd114f2756b66d1b4bb..e9d90b53bbc46325f199a9d684cdf9f5e12430cc 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c @@ -17,7 +17,13 @@ #define DRIVER_NAME "intel_th_pci" -#define BAR_MASK (BIT(TH_MMIO_CONFIG) | BIT(TH_MMIO_SW)) +enum { + TH_PCI_CONFIG_BAR = 0, + TH_PCI_STH_SW_BAR = 2, + TH_PCI_RTIT_BAR = 4, +}; + +#define BAR_MASK (BIT(TH_PCI_CONFIG_BAR) | BIT(TH_PCI_STH_SW_BAR)) #define PCI_REG_NPKDSC 0x80 #define NPKDSC_TSACT BIT(5) @@ -66,8 +72,12 @@ static int intel_th_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct intel_th_drvdata *drvdata = (void *)id->driver_data; + struct resource resource[TH_MMIO_END + TH_NVEC_MAX] = { + [TH_MMIO_CONFIG] = pdev->resource[TH_PCI_CONFIG_BAR], + [TH_MMIO_SW] = pdev->resource[TH_PCI_STH_SW_BAR], + }; + int err, r = TH_MMIO_SW + 1, i; struct intel_th *th; - int err; err = pcim_enable_device(pdev); if (err) @@ -77,8 +87,19 @@ static int intel_th_pci_probe(struct pci_dev *pdev, if (err) return err; - th = intel_th_alloc(&pdev->dev, drvdata, pdev->resource, - DEVICE_COUNT_RESOURCE, pdev->irq); + if (pdev->resource[TH_PCI_RTIT_BAR].start) { + resource[TH_MMIO_RTIT] = pdev->resource[TH_PCI_RTIT_BAR]; + r++; + } + + err = pci_alloc_irq_vectors(pdev, 1, 8, PCI_IRQ_ALL_TYPES); + if (err > 0) + for (i = 0; i < err; i++, r++) { + resource[r].flags = IORESOURCE_IRQ; + resource[r].start = pci_irq_vector(pdev, i); + } + + th = intel_th_alloc(&pdev->dev, drvdata, resource, r); if (IS_ERR(th)) return PTR_ERR(th); @@ -95,10 +116,13 @@ static void intel_th_pci_remove(struct pci_dev *pdev) struct intel_th *th = pci_get_drvdata(pdev); intel_th_free(th); + + pci_free_irq_vectors(pdev); } static const struct intel_th_drvdata intel_th_2x = { .tscu_enable = 1, + .has_mintctl = 1, }; static const struct pci_device_id intel_th_pci_id_table[] = { @@ -140,6 +164,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa1a6), .driver_data = (kernel_ulong_t)0, }, + { + /* Lewisburg PCH */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa226), + .driver_data = (kernel_ulong_t)0, + }, { /* Gemini Lake */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e), @@ -165,6 +194,51 @@ static const struct pci_device_id intel_th_pci_id_table[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x34a6), .driver_data = (kernel_ulong_t)&intel_th_2x, }, + { + /* Comet Lake */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x02a6), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { + /* Comet Lake PCH */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x06a6), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { + /* Comet Lake PCH-V */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa3a6), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { + /* Ice Lake NNPI */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { + /* Ice Lake CPU */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8a29), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { + /* Tiger Lake CPU */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9a33), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { + /* Tiger Lake PCH */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa0a6), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { + /* Jasper Lake PCH */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4da6), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { + /* Elkhart Lake */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4b26), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, { 0 }, }; diff --git a/drivers/hwtracing/intel_th/pti.c b/drivers/hwtracing/intel_th/pti.c index 56694339cb068591d255666244a7875a1c43be6e..0da6b787f55367ff0a1e07b07e4da673c2656be4 100644 --- a/drivers/hwtracing/intel_th/pti.c +++ b/drivers/hwtracing/intel_th/pti.c @@ -272,19 +272,17 @@ static ssize_t lpp_dest_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct pti_device *pti = dev_get_drvdata(dev); - ssize_t ret = -EINVAL; int i; - for (i = 0; i < ARRAY_SIZE(lpp_dest_str); i++) - if (sysfs_streq(buf, lpp_dest_str[i])) - break; + i = sysfs_match_string(lpp_dest_str, buf); + if (i < 0) + return i; - if (i < ARRAY_SIZE(lpp_dest_str) && pti->lpp_dest_mask & BIT(i)) { - pti->lpp_dest = i; - ret = size; - } + if (!(pti->lpp_dest_mask & BIT(i))) + return -EINVAL; - return ret; + pti->lpp_dest = i; + return size; } static DEVICE_ATTR_RW(lpp_dest); diff --git a/drivers/hwtracing/intel_th/sth.c b/drivers/hwtracing/intel_th/sth.c index 4b7ae47789d215d77ec834c243d2d1c4994e16c6..3a1f4e65037841eb8a878c3f1fc4177483566c52 100644 --- a/drivers/hwtracing/intel_th/sth.c +++ b/drivers/hwtracing/intel_th/sth.c @@ -84,8 +84,12 @@ static ssize_t notrace sth_stm_packet(struct stm_data *stm_data, /* Global packets (GERR, XSYNC, TRIG) are sent with register writes */ case STP_PACKET_GERR: reg += 4; + /* fall through */ + case STP_PACKET_XSYNC: reg += 8; + /* fall through */ + case STP_PACKET_TRIG: if (flags & STP_PACKET_TIMESTAMPED) reg += 4; diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c index 10bcb5d73f90ed45119f473b63ec3d6668c3c073..9bb85d20934a098f9a7f6f9ddd42bc74ee5b508b 100644 --- a/drivers/hwtracing/stm/core.c +++ b/drivers/hwtracing/stm/core.c @@ -166,11 +166,10 @@ stm_master(struct stm_device *stm, unsigned int idx) static int stp_master_alloc(struct stm_device *stm, unsigned int idx) { struct stp_master *master; - size_t size; - size = ALIGN(stm->data->sw_nchannels, 8) / 8; - size += sizeof(struct stp_master); - master = kzalloc(size, GFP_ATOMIC); + master = kzalloc(struct_size(master, chan_map, + BITS_TO_LONGS(stm->data->sw_nchannels)), + GFP_ATOMIC); if (!master) return -ENOMEM; @@ -218,8 +217,8 @@ stm_output_disclaim(struct stm_device *stm, struct stm_output *output) bitmap_release_region(&master->chan_map[0], output->channel, ilog2(output->nr_chans)); - output->nr_chans = 0; master->nr_free += output->nr_chans; + output->nr_chans = 0; } /* @@ -244,6 +243,9 @@ static int find_free_channels(unsigned long *bitmap, unsigned int start, ; if (i == width) return pos; + + /* step over [pos..pos+i) to continue search */ + pos += i; } return -1; @@ -550,7 +552,7 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg) { struct stm_device *stm = stmf->stm; struct stp_policy_id *id; - int ret = -EINVAL; + int ret = -EINVAL, wlimit = 1; u32 size; if (stmf->output.nr_chans) @@ -578,8 +580,10 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg) if (id->__reserved_0 || id->__reserved_1) goto err_free; - if (id->width < 1 || - id->width > PAGE_SIZE / stm->data->sw_mmiosz) + if (stm->data->sw_mmiosz) + wlimit = PAGE_SIZE / stm->data->sw_mmiosz; + + if (id->width < 1 || id->width > wlimit) goto err_free; ret = stm_file_assign(stmf, id->id, id->width); @@ -697,8 +701,11 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data, return -ENOMEM; stm->major = register_chrdev(0, stm_data->name, &stm_fops); - if (stm->major < 0) - goto err_free; + if (stm->major < 0) { + err = stm->major; + vfree(stm); + return err; + } device_initialize(&stm->dev); stm->dev.devt = MKDEV(stm->major, 0); @@ -742,10 +749,8 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data, err_device: unregister_chrdev(stm->major, stm_data->name); - /* matches device_initialize() above */ + /* calls stm_device_release() */ put_device(&stm->dev); -err_free: - vfree(stm); return err; } @@ -1094,7 +1099,6 @@ int stm_source_register_device(struct device *parent, err: put_device(&src->dev); - kfree(src); return err; } diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 451d4ae50e665bfcbcdabe7d7a0aceb8a0e1e29a..d2a6e42d75699bdc4c19c759ecc0fe80c7131ebd 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -131,6 +131,7 @@ config I2C_I801 Cannon Lake (PCH) Cedar Fork (PCH) Ice Lake (PCH) + Comet Lake (PCH) This driver can also be built as a module. If so, the module will be called i2c-i801. @@ -175,6 +176,7 @@ config I2C_PIIX4 AMD Hudson-2 AMD ML AMD CZ + Hygon CZ Serverworks OSB4 Serverworks CSB5 Serverworks CSB6 @@ -295,6 +297,18 @@ config I2C_VIAPRO This driver can also be built as a module. If so, the module will be called i2c-viapro. +config I2C_ZHAOXIN + tristate "Zhaoxin I2C controller driver" + depends on PCI && ACPI + select I2C_ALGOBIT + default m + help + If you say yes to this option, support will be included for the + Zhaoxin I2C interface + + This driver can also be built as a module. If so, the module + will be called i2c-zhaoxin. + if ACPI comment "ACPI drivers" @@ -432,12 +446,13 @@ config I2C_BCM_KONA If you do not need KONA I2C interface, say N. config I2C_BRCMSTB - tristate "BRCM Settop I2C controller" - depends on ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST + tristate "BRCM Settop/DSL I2C controller" + depends on ARCH_BRCMSTB || BMIPS_GENERIC || ARCH_BCM_63XX || \ + COMPILE_TEST default y help If you say yes to this option, support will be included for the - I2C interface on the Broadcom Settop SoCs. + I2C interface on the Broadcom Settop/DSL SoCs. If you do not need I2C interface, say N. @@ -529,6 +544,34 @@ config I2C_DESIGNWARE_BAYTRAIL the platform firmware controlling it. You should say Y if running on a BayTrail system using the AXP288. +config I2C_PHYTIUM_CORE + tristate + +config I2C_PHYTIUM_PCI + tristate "Phytium I2C PCI" + depends on PCI && ARCH_PHYTIUM + select I2C_PHYTIUM_CORE + select I2C_SMBUS + help + If you say yes to this option, support will be included for the + Phytium I2C adapter. Only master mode is supported. + + This driver can also be built as a module. If so, the module + will be called i2c-phytium-pci. + +config I2C_PHYTIUM_PLATFORM + tristate "Phytium I2C Platform" + depends on (ACPI && COMMON_CLK) || !ACPI + select I2C_SLAVE + select I2C_PHYTIUM_CORE + select I2C_SMBUS + help + If you say yes to this option, support will be included for the + Phytium I2C adapter. Only master mode is supported. + + This driver can also be built as a module. If so, the module + will be called i2c-phytium-platform. + config I2C_DIGICOLOR tristate "Conexant Digicolor I2C driver" depends on ARCH_DIGICOLOR @@ -603,6 +646,16 @@ config I2C_HIGHLANDER This driver can also be built as a module. If so, the module will be called i2c-highlander. +config I2C_HISI + tristate "HiSilicon I2C controller" + depends on ARM64 || COMPILE_TEST + help + Say Y here if you want to have Hisilicon I2C controller support + available on the Kunpeng Server. + + This driver can also be built as a module. If so, the module + will be called i2c-hisi. + config I2C_IBM_IIC tristate "IBM PPC 4xx on-chip I2C interface" depends on 4xx @@ -751,7 +804,7 @@ config I2C_OCORES config I2C_OMAP tristate "OMAP I2C adapter" - depends on ARCH_OMAP + depends on ARCH_OMAP || ARCH_K3 default y if MACH_OMAP_H3 || MACH_OMAP_OSK help If you say yes to this option, support will be included for the diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index 18b26af82b1c5425a9dcec9c61cca3cdff694d60..6e692f1cd0f235cb6376fac583a843025e38b3aa 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_I2C_SIS630) += i2c-sis630.o obj-$(CONFIG_I2C_SIS96X) += i2c-sis96x.o obj-$(CONFIG_I2C_VIA) += i2c-via.o obj-$(CONFIG_I2C_VIAPRO) += i2c-viapro.o +obj-$(CONFIG_I2C_ZHAOXIN) += i2c-zhaoxin.o # Mac SMBus host controller drivers obj-$(CONFIG_I2C_HYDRA) += i2c-hydra.o @@ -53,12 +54,17 @@ i2c-designware-platform-$(CONFIG_I2C_DESIGNWARE_BAYTRAIL) += i2c-designware-bayt obj-$(CONFIG_I2C_DESIGNWARE_PCI) += i2c-designware-pci.o i2c-designware-pci-objs := i2c-designware-pcidrv.o obj-$(CONFIG_I2C_DIGICOLOR) += i2c-digicolor.o +obj-$(CONFIG_I2C_PHYTIUM_CORE) += i2c-phytium-core.o +i2c-phytium-core-objs := i2c-phytium-common.o i2c-phytium-master.o i2c-phytium-slave.o +obj-$(CONFIG_I2C_PHYTIUM_PCI) += i2c-phytium-pci.o +obj-$(CONFIG_I2C_PHYTIUM_PLATFORM) += i2c-phytium-platform.o obj-$(CONFIG_I2C_EFM32) += i2c-efm32.o obj-$(CONFIG_I2C_EG20T) += i2c-eg20t.o obj-$(CONFIG_I2C_EMEV2) += i2c-emev2.o obj-$(CONFIG_I2C_EXYNOS5) += i2c-exynos5.o obj-$(CONFIG_I2C_GPIO) += i2c-gpio.o obj-$(CONFIG_I2C_HIGHLANDER) += i2c-highlander.o +obj-$(CONFIG_I2C_HISI) += i2c-hisi.o obj-$(CONFIG_I2C_HIX5HD2) += i2c-hix5hd2.o obj-$(CONFIG_I2C_IBM_IIC) += i2c-ibm_iic.o obj-$(CONFIG_I2C_IMG) += i2c-img-scb.o diff --git a/drivers/i2c/busses/i2c-acorn.c b/drivers/i2c/busses/i2c-acorn.c index f4a5ae69bf6a48775f9bb58c488eeb158174a797..fa3763e4b3ee26ca470175937e56ac0a75313292 100644 --- a/drivers/i2c/busses/i2c-acorn.c +++ b/drivers/i2c/busses/i2c-acorn.c @@ -81,6 +81,7 @@ static struct i2c_algo_bit_data ioc_data = { static struct i2c_adapter ioc_ops = { .nr = 0, + .name = "ioc", .algo_data = &ioc_data, }; diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c index a4f956c6d567d4b380505b0da1eedba97b795360..d9401b519106920655bb0469e83868a6f258f9c4 100644 --- a/drivers/i2c/busses/i2c-aspeed.c +++ b/drivers/i2c/busses/i2c-aspeed.c @@ -137,7 +137,8 @@ struct aspeed_i2c_bus { /* Synchronizes I/O mem access to base. */ spinlock_t lock; struct completion cmd_complete; - u32 (*get_clk_reg_val)(u32 divisor); + u32 (*get_clk_reg_val)(struct device *dev, + u32 divisor); unsigned long parent_clk_frequency; u32 bus_frequency; /* Transaction state. */ @@ -555,7 +556,7 @@ static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id) spin_lock(&bus->lock); #if IS_ENABLED(CONFIG_I2C_SLAVE) - if (aspeed_i2c_slave_irq(bus)) { + if (IS_ENABLED(CONFIG_I2C_SLAVE) && aspeed_i2c_slave_irq(bus)) { dev_dbg(bus->dev, "irq handled by slave.\n"); ret = true; goto out; @@ -564,7 +565,9 @@ static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id) ret = aspeed_i2c_master_irq(bus); +#if IS_ENABLED(CONFIG_I2C_SLAVE) out: +#endif spin_unlock(&bus->lock); return ret ? IRQ_HANDLED : IRQ_NONE; } @@ -684,16 +687,27 @@ static const struct i2c_algorithm aspeed_i2c_algo = { #endif /* CONFIG_I2C_SLAVE */ }; -static u32 aspeed_i2c_get_clk_reg_val(u32 clk_high_low_max, u32 divisor) +static u32 aspeed_i2c_get_clk_reg_val(struct device *dev, + u32 clk_high_low_mask, + u32 divisor) { - u32 base_clk, clk_high, clk_low, tmp; + u32 base_clk_divisor, clk_high_low_max, clk_high, clk_low, tmp; + + /* + * SCL_high and SCL_low represent a value 1 greater than what is stored + * since a zero divider is meaningless. Thus, the max value each can + * store is every bit set + 1. Since SCL_high and SCL_low are added + * together (see below), the max value of both is the max value of one + * them times two. + */ + clk_high_low_max = (clk_high_low_mask + 1) * 2; /* * The actual clock frequency of SCL is: * SCL_freq = APB_freq / (base_freq * (SCL_high + SCL_low)) * = APB_freq / divisor * where base_freq is a programmable clock divider; its value is - * base_freq = 1 << base_clk + * base_freq = 1 << base_clk_divisor * SCL_high is the number of base_freq clock cycles that SCL stays high * and SCL_low is the number of base_freq clock cycles that SCL stays * low for a period of SCL. @@ -703,47 +717,59 @@ static u32 aspeed_i2c_get_clk_reg_val(u32 clk_high_low_max, u32 divisor) * SCL_low = clk_low + 1 * Thus, * SCL_freq = APB_freq / - * ((1 << base_clk) * (clk_high + 1 + clk_low + 1)) + * ((1 << base_clk_divisor) * (clk_high + 1 + clk_low + 1)) * The documentation recommends clk_high >= clk_high_max / 2 and * clk_low >= clk_low_max / 2 - 1 when possible; this last constraint * gives us the following solution: */ - base_clk = divisor > clk_high_low_max ? + base_clk_divisor = divisor > clk_high_low_max ? ilog2((divisor - 1) / clk_high_low_max) + 1 : 0; - tmp = (divisor + (1 << base_clk) - 1) >> base_clk; - clk_low = tmp / 2; - clk_high = tmp - clk_low; - if (clk_high) - clk_high--; + if (base_clk_divisor > ASPEED_I2CD_TIME_BASE_DIVISOR_MASK) { + base_clk_divisor = ASPEED_I2CD_TIME_BASE_DIVISOR_MASK; + clk_low = clk_high_low_mask; + clk_high = clk_high_low_mask; + dev_err(dev, + "clamping clock divider: divider requested, %u, is greater than largest possible divider, %u.\n", + divisor, (1 << base_clk_divisor) * clk_high_low_max); + } else { + tmp = (divisor + (1 << base_clk_divisor) - 1) + >> base_clk_divisor; + clk_low = tmp / 2; + clk_high = tmp - clk_low; + + if (clk_high) + clk_high--; - if (clk_low) - clk_low--; + if (clk_low) + clk_low--; + } return ((clk_high << ASPEED_I2CD_TIME_SCL_HIGH_SHIFT) & ASPEED_I2CD_TIME_SCL_HIGH_MASK) | ((clk_low << ASPEED_I2CD_TIME_SCL_LOW_SHIFT) & ASPEED_I2CD_TIME_SCL_LOW_MASK) - | (base_clk & ASPEED_I2CD_TIME_BASE_DIVISOR_MASK); + | (base_clk_divisor + & ASPEED_I2CD_TIME_BASE_DIVISOR_MASK); } -static u32 aspeed_i2c_24xx_get_clk_reg_val(u32 divisor) +static u32 aspeed_i2c_24xx_get_clk_reg_val(struct device *dev, u32 divisor) { /* * clk_high and clk_low are each 3 bits wide, so each can hold a max * value of 8 giving a clk_high_low_max of 16. */ - return aspeed_i2c_get_clk_reg_val(16, divisor); + return aspeed_i2c_get_clk_reg_val(dev, GENMASK(2, 0), divisor); } -static u32 aspeed_i2c_25xx_get_clk_reg_val(u32 divisor) +static u32 aspeed_i2c_25xx_get_clk_reg_val(struct device *dev, u32 divisor) { /* * clk_high and clk_low are each 4 bits wide, so each can hold a max * value of 16 giving a clk_high_low_max of 32. */ - return aspeed_i2c_get_clk_reg_val(32, divisor); + return aspeed_i2c_get_clk_reg_val(dev, GENMASK(3, 0), divisor); } /* precondition: bus.lock has been acquired. */ @@ -756,7 +782,7 @@ static int aspeed_i2c_init_clk(struct aspeed_i2c_bus *bus) clk_reg_val &= (ASPEED_I2CD_TIME_TBUF_MASK | ASPEED_I2CD_TIME_THDSTA_MASK | ASPEED_I2CD_TIME_TACST_MASK); - clk_reg_val |= bus->get_clk_reg_val(divisor); + clk_reg_val |= bus->get_clk_reg_val(bus->dev, divisor); writel(clk_reg_val, bus->base + ASPEED_I2C_AC_TIMING_REG1); writel(ASPEED_NO_TIMEOUT_CTRL, bus->base + ASPEED_I2C_AC_TIMING_REG2); @@ -872,7 +898,8 @@ static int aspeed_i2c_probe_bus(struct platform_device *pdev) if (!match) bus->get_clk_reg_val = aspeed_i2c_24xx_get_clk_reg_val; else - bus->get_clk_reg_val = (u32 (*)(u32))match->data; + bus->get_clk_reg_val = (u32 (*)(struct device *, u32)) + match->data; /* Initialize the I2C adapter */ spin_lock_init(&bus->lock); diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c index 3f3e8b3bf5ff9df550991d18530fe45f41c870d3..d51bf536bdf7509b6e1ce70fd51f60c02a761272 100644 --- a/drivers/i2c/busses/i2c-at91.c +++ b/drivers/i2c/busses/i2c-at91.c @@ -270,9 +270,11 @@ static void at91_twi_write_next_byte(struct at91_twi_dev *dev) writeb_relaxed(*dev->buf, dev->base + AT91_TWI_THR); /* send stop when last byte has been written */ - if (--dev->buf_len == 0) + if (--dev->buf_len == 0) { if (!dev->use_alt_cmd) at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); + at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_TXRDY); + } dev_dbg(dev->dev, "wrote 0x%x, to go %zu\n", *dev->buf, dev->buf_len); @@ -690,9 +692,8 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev) } else { at91_twi_write_next_byte(dev); at91_twi_write(dev, AT91_TWI_IER, - AT91_TWI_TXCOMP | - AT91_TWI_NACK | - AT91_TWI_TXRDY); + AT91_TWI_TXCOMP | AT91_TWI_NACK | + (dev->buf_len ? AT91_TWI_TXRDY : 0)); } } @@ -913,7 +914,7 @@ static struct at91_twi_pdata sama5d4_config = { static struct at91_twi_pdata sama5d2_config = { .clk_max_div = 7, - .clk_offset = 4, + .clk_offset = 3, .has_unre_flag = true, .has_alt_cmd = true, .has_hold_field = true, diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c index 8e60048a33f8f88b5e10cf48d0cfc3a84f781424..fb5bac079e83f5e9c22f2ecfc5da0d0350c2da76 100644 --- a/drivers/i2c/busses/i2c-axxia.c +++ b/drivers/i2c/busses/i2c-axxia.c @@ -74,8 +74,7 @@ MST_STATUS_ND) #define MST_STATUS_ERR (MST_STATUS_NAK | \ MST_STATUS_AL | \ - MST_STATUS_IP | \ - MST_STATUS_TSS) + MST_STATUS_IP) #define MST_TX_BYTES_XFRD 0x50 #define MST_RX_BYTES_XFRD 0x54 #define SCL_HIGH_PERIOD 0x80 @@ -241,7 +240,7 @@ static int axxia_i2c_empty_rx_fifo(struct axxia_i2c_dev *idev) */ if (c <= 0 || c > I2C_SMBUS_BLOCK_MAX) { idev->msg_err = -EPROTO; - i2c_int_disable(idev, ~0); + i2c_int_disable(idev, ~MST_STATUS_TSS); complete(&idev->msg_complete); break; } @@ -297,17 +296,7 @@ static irqreturn_t axxia_i2c_isr(int irq, void *_dev) i2c_int_disable(idev, MST_STATUS_TFL); } - if (status & MST_STATUS_SCC) { - /* Stop completed */ - i2c_int_disable(idev, ~0); - complete(&idev->msg_complete); - } else if (status & MST_STATUS_SNS) { - /* Transfer done */ - i2c_int_disable(idev, ~0); - if (i2c_m_rd(idev->msg) && idev->msg_xfrd < idev->msg->len) - axxia_i2c_empty_rx_fifo(idev); - complete(&idev->msg_complete); - } else if (unlikely(status & MST_STATUS_ERR)) { + if (unlikely(status & MST_STATUS_ERR)) { /* Transfer error */ i2c_int_disable(idev, ~0); if (status & MST_STATUS_AL) @@ -324,6 +313,21 @@ static irqreturn_t axxia_i2c_isr(int irq, void *_dev) readl(idev->base + MST_TX_BYTES_XFRD), readl(idev->base + MST_TX_XFER)); complete(&idev->msg_complete); + } else if (status & MST_STATUS_SCC) { + /* Stop completed */ + i2c_int_disable(idev, ~MST_STATUS_TSS); + complete(&idev->msg_complete); + } else if (status & MST_STATUS_SNS) { + /* Transfer done */ + i2c_int_disable(idev, ~MST_STATUS_TSS); + if (i2c_m_rd(idev->msg) && idev->msg_xfrd < idev->msg->len) + axxia_i2c_empty_rx_fifo(idev); + complete(&idev->msg_complete); + } else if (status & MST_STATUS_TSS) { + /* Transfer timeout */ + idev->msg_err = -ETIMEDOUT; + i2c_int_disable(idev, ~MST_STATUS_TSS); + complete(&idev->msg_complete); } out: @@ -339,10 +343,10 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg) u32 rx_xfer, tx_xfer; u32 addr_1, addr_2; unsigned long time_left; + unsigned int wt_value; idev->msg = msg; idev->msg_xfrd = 0; - idev->msg_err = 0; reinit_completion(&idev->msg_complete); if (i2c_m_ten(msg)) { @@ -383,9 +387,18 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg) else if (axxia_i2c_fill_tx_fifo(idev) != 0) int_mask |= MST_STATUS_TFL; + wt_value = WT_VALUE(readl(idev->base + WAIT_TIMER_CONTROL)); + /* Disable wait timer temporarly */ + writel(wt_value, idev->base + WAIT_TIMER_CONTROL); + /* Check if timeout error happened */ + if (idev->msg_err) + goto out; + /* Start manual mode */ writel(CMD_MANUAL, idev->base + MST_COMMAND); + writel(WT_EN | wt_value, idev->base + WAIT_TIMER_CONTROL); + i2c_int_enable(idev, int_mask); time_left = wait_for_completion_timeout(&idev->msg_complete, @@ -396,13 +409,15 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg) if (readl(idev->base + MST_COMMAND) & CMD_BUSY) dev_warn(idev->dev, "busy after xfer\n"); - if (time_left == 0) + if (time_left == 0) { idev->msg_err = -ETIMEDOUT; - - if (idev->msg_err == -ETIMEDOUT) i2c_recover_bus(&idev->adapter); + axxia_i2c_init(idev); + } - if (unlikely(idev->msg_err) && idev->msg_err != -ENXIO) +out: + if (unlikely(idev->msg_err) && idev->msg_err != -ENXIO && + idev->msg_err != -ETIMEDOUT) axxia_i2c_init(idev); return idev->msg_err; @@ -410,7 +425,7 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg) static int axxia_i2c_stop(struct axxia_i2c_dev *idev) { - u32 int_mask = MST_STATUS_ERR | MST_STATUS_SCC; + u32 int_mask = MST_STATUS_ERR | MST_STATUS_SCC | MST_STATUS_TSS; unsigned long time_left; reinit_completion(&idev->msg_complete); @@ -437,6 +452,9 @@ axxia_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) int i; int ret = 0; + idev->msg_err = 0; + i2c_int_enable(idev, MST_STATUS_TSS); + for (i = 0; ret == 0 && i < num; ++i) ret = axxia_i2c_xfer_msg(idev, &msgs[i]); diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c index 44deae78913e5fa259927d1b5e8b6bf9aee60138..4d19254f78c8a233920da9e25e0c64355bfc1d4f 100644 --- a/drivers/i2c/busses/i2c-bcm2835.c +++ b/drivers/i2c/busses/i2c-bcm2835.c @@ -191,6 +191,15 @@ static void bcm2835_i2c_start_transfer(struct bcm2835_i2c_dev *i2c_dev) bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, c); } +static void bcm2835_i2c_finish_transfer(struct bcm2835_i2c_dev *i2c_dev) +{ + i2c_dev->curr_msg = NULL; + i2c_dev->num_msgs = 0; + + i2c_dev->msg_buf = NULL; + i2c_dev->msg_buf_remaining = 0; +} + /* * Note about I2C_C_CLEAR on error: * The I2C_C_CLEAR on errors will take some time to resolve -- if you were in @@ -291,6 +300,9 @@ static int bcm2835_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], time_left = wait_for_completion_timeout(&i2c_dev->completion, adap->timeout); + + bcm2835_i2c_finish_transfer(i2c_dev); + if (!time_left) { bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, BCM2835_I2C_C_CLEAR); diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c index b13605718291619f29e8fc6d21bb513ef513b21e..f64c7f8b924e6ff5540b7536f67c1b926f959928 100644 --- a/drivers/i2c/busses/i2c-cadence.c +++ b/drivers/i2c/busses/i2c-cadence.c @@ -382,8 +382,10 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id) * Check for the message size against FIFO depth and set the * 'hold bus' bit if it is greater than FIFO depth. */ - if (id->recv_count > CDNS_I2C_FIFO_DEPTH) + if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag) ctrl_reg |= CDNS_I2C_CR_HOLD; + else + ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD; cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); @@ -440,8 +442,11 @@ static void cdns_i2c_msend(struct cdns_i2c *id) * Check for the message size against FIFO depth and set the * 'hold bus' bit if it is greater than FIFO depth. */ - if (id->send_count > CDNS_I2C_FIFO_DEPTH) + if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag) ctrl_reg |= CDNS_I2C_CR_HOLD; + else + ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD; + cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); /* Clear the interrupts in interrupt status register. */ @@ -573,7 +578,7 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, struct cdns_i2c *id = adap->algo_data; bool hold_quirk; - ret = pm_runtime_get_sync(id->dev); + ret = pm_runtime_resume_and_get(id->dev); if (ret < 0) return ret; /* Check if the bus is free */ diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c index c4d176f5ed793c76c78c412d081c21bc8dff2327..f890af67f50173b727c5b388a29735bea3a481db 100644 --- a/drivers/i2c/busses/i2c-cht-wc.c +++ b/drivers/i2c/busses/i2c-cht-wc.c @@ -187,6 +187,51 @@ static const struct i2c_algorithm cht_wc_i2c_adap_algo = { .smbus_xfer = cht_wc_i2c_adap_smbus_xfer, }; +/* + * We are an i2c-adapter which itself is part of an i2c-client. This means that + * transfers done through us take adapter->bus_lock twice, once for our parent + * i2c-adapter and once to take our own bus_lock. Lockdep does not like this + * nested locking, to make lockdep happy in the case of busses with muxes, the + * i2c-core's i2c_adapter_lock_bus function calls: + * rt_mutex_lock_nested(&adapter->bus_lock, i2c_adapter_depth(adapter)); + * + * But i2c_adapter_depth only works when the direct parent of the adapter is + * another adapter, as it is only meant for muxes. In our case there is an + * i2c-client and MFD instantiated platform_device in the parent->child chain + * between the 2 devices. + * + * So we override the default i2c_lock_operations and pass a hardcoded + * depth of 1 to rt_mutex_lock_nested, to make lockdep happy. + * + * Note that if there were to be a mux attached to our adapter, this would + * break things again since the i2c-mux code expects the root-adapter to have + * a locking depth of 0. But we always have only 1 client directly attached + * in the form of the Charger IC paired with the CHT Whiskey Cove PMIC. + */ +static void cht_wc_i2c_adap_lock_bus(struct i2c_adapter *adapter, + unsigned int flags) +{ + rt_mutex_lock_nested(&adapter->bus_lock, 1); +} + +static int cht_wc_i2c_adap_trylock_bus(struct i2c_adapter *adapter, + unsigned int flags) +{ + return rt_mutex_trylock(&adapter->bus_lock); +} + +static void cht_wc_i2c_adap_unlock_bus(struct i2c_adapter *adapter, + unsigned int flags) +{ + rt_mutex_unlock(&adapter->bus_lock); +} + +static const struct i2c_lock_operations cht_wc_i2c_adap_lock_ops = { + .lock_bus = cht_wc_i2c_adap_lock_bus, + .trylock_bus = cht_wc_i2c_adap_trylock_bus, + .unlock_bus = cht_wc_i2c_adap_unlock_bus, +}; + /**** irqchip for the client connected to the extchgr i2c adapter ****/ static void cht_wc_i2c_irq_lock(struct irq_data *data) { @@ -295,6 +340,7 @@ static int cht_wc_i2c_adap_i2c_probe(struct platform_device *pdev) adap->adapter.owner = THIS_MODULE; adap->adapter.class = I2C_CLASS_HWMON; adap->adapter.algo = &cht_wc_i2c_adap_algo; + adap->adapter.lock_ops = &cht_wc_i2c_adap_lock_ops; strlcpy(adap->adapter.name, "PMIC I2C Adapter", sizeof(adap->adapter.name)); adap->adapter.dev.parent = &pdev->dev; diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index b5750fd851251e74b0558576774da4a82d81c757..03f4e6f3a9386d27277467f8b671f1490a2eecd9 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -151,6 +151,9 @@ static const struct acpi_device_id dw_i2c_acpi_match[] = { { "APMC0D0F", 0 }, { "HISI02A1", 0 }, { "HISI02A2", 0 }, + { "HISI02A3", 0 }, + { "PHYT0003", 0 }, + { "HYGO0010", ACCESS_INTR_MASK }, { } }; MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match); diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c index e7f9305b2dd9f661c8863839dee346aa49438c4e..f5f001738df5e2b1b8c6e9db4973a9ce8f065140 100644 --- a/drivers/i2c/busses/i2c-designware-slave.c +++ b/drivers/i2c/busses/i2c-designware-slave.c @@ -94,6 +94,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave) dev->disable_int(dev); dev->disable(dev); + synchronize_irq(dev->irq); dev->slave = NULL; pm_runtime_put(dev->dev); diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c index 35b302d983e0d93d74b255d57f113226f9c799f9..959d4912ec0d5ce1082c4713cee86e13b10a228d 100644 --- a/drivers/i2c/busses/i2c-emev2.c +++ b/drivers/i2c/busses/i2c-emev2.c @@ -69,6 +69,7 @@ struct em_i2c_device { struct completion msg_done; struct clk *sclk; struct i2c_client *slave; + int irq; }; static inline void em_clear_set_bit(struct em_i2c_device *priv, u8 clear, u8 set, u8 reg) @@ -339,6 +340,12 @@ static int em_i2c_unreg_slave(struct i2c_client *slave) writeb(0, priv->base + I2C_OFS_SVA0); + /* + * Wait for interrupt to finish. New slave irqs cannot happen because we + * cleared the slave address and, thus, only extension codes will be + * detected which do not use the slave ptr. + */ + synchronize_irq(priv->irq); priv->slave = NULL; return 0; @@ -355,7 +362,7 @@ static int em_i2c_probe(struct platform_device *pdev) { struct em_i2c_device *priv; struct resource *r; - int irq, ret; + int ret; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) @@ -390,8 +397,8 @@ static int em_i2c_probe(struct platform_device *pdev) em_i2c_reset(&priv->adap); - irq = platform_get_irq(pdev, 0); - ret = devm_request_irq(&pdev->dev, irq, em_i2c_irq_handler, 0, + priv->irq = platform_get_irq(pdev, 0); + ret = devm_request_irq(&pdev->dev, priv->irq, em_i2c_irq_handler, 0, "em_i2c", priv); if (ret) goto err_clk; @@ -401,7 +408,8 @@ static int em_i2c_probe(struct platform_device *pdev) if (ret) goto err_clk; - dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr, irq); + dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr, + priv->irq); return 0; diff --git a/drivers/i2c/busses/i2c-hisi.c b/drivers/i2c/busses/i2c-hisi.c new file mode 100644 index 0000000000000000000000000000000000000000..8c6ca1ec401a652f5719ea97e6105b7313de2dce --- /dev/null +++ b/drivers/i2c/busses/i2c-hisi.c @@ -0,0 +1,619 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * HiSilicon I2C Controller Driver for Kunpeng SoC + * + * Copyright (c) 2021 HiSilicon Technologies Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define HISI_I2C_FRAME_CTRL 0x0000 +#define HISI_I2C_FRAME_CTRL_SPEED_MODE GENMASK(1, 0) +#define HISI_I2C_FRAME_CTRL_ADDR_TEN BIT(2) +#define HISI_I2C_SLV_ADDR 0x0004 +#define HISI_I2C_SLV_ADDR_VAL GENMASK(9, 0) +#define HISI_I2C_SLV_ADDR_GC_S_MODE BIT(10) +#define HISI_I2C_SLV_ADDR_GC_S_EN BIT(11) +#define HISI_I2C_CMD_TXDATA 0x0008 +#define HISI_I2C_CMD_TXDATA_DATA GENMASK(7, 0) +#define HISI_I2C_CMD_TXDATA_RW BIT(8) +#define HISI_I2C_CMD_TXDATA_P_EN BIT(9) +#define HISI_I2C_CMD_TXDATA_SR_EN BIT(10) +#define HISI_I2C_RXDATA 0x000c +#define HISI_I2C_RXDATA_DATA GENMASK(7, 0) +#define HISI_I2C_SS_SCL_HCNT 0x0010 +#define HISI_I2C_SS_SCL_LCNT 0x0014 +#define HISI_I2C_FS_SCL_HCNT 0x0018 +#define HISI_I2C_FS_SCL_LCNT 0x001c +#define HISI_I2C_HS_SCL_HCNT 0x0020 +#define HISI_I2C_HS_SCL_LCNT 0x0024 +#define HISI_I2C_FIFO_CTRL 0x0028 +#define HISI_I2C_FIFO_RX_CLR BIT(0) +#define HISI_I2C_FIFO_TX_CLR BIT(1) +#define HISI_I2C_FIFO_RX_AF_THRESH GENMASK(7, 2) +#define HISI_I2C_FIFO_TX_AE_THRESH GENMASK(13, 8) +#define HISI_I2C_FIFO_STATE 0x002c +#define HISI_I2C_FIFO_STATE_RX_RERR BIT(0) +#define HISI_I2C_FIFO_STATE_RX_WERR BIT(1) +#define HISI_I2C_FIFO_STATE_RX_EMPTY BIT(3) +#define HISI_I2C_FIFO_STATE_TX_RERR BIT(6) +#define HISI_I2C_FIFO_STATE_TX_WERR BIT(7) +#define HISI_I2C_FIFO_STATE_TX_FULL BIT(11) +#define HISI_I2C_SDA_HOLD 0x0030 +#define HISI_I2C_SDA_HOLD_TX GENMASK(15, 0) +#define HISI_I2C_SDA_HOLD_RX GENMASK(23, 16) +#define HISI_I2C_FS_SPK_LEN 0x0038 +#define HISI_I2C_FS_SPK_LEN_CNT GENMASK(7, 0) +#define HISI_I2C_HS_SPK_LEN 0x003c +#define HISI_I2C_HS_SPK_LEN_CNT GENMASK(7, 0) +#define HISI_I2C_TX_INT_CLR 0x0040 +#define HISI_I2C_TX_AEMPTY_INT BIT(0) +#define HISI_I2C_INT_MSTAT 0x0044 +#define HISI_I2C_INT_CLR 0x0048 +#define HISI_I2C_INT_MASK 0x004C +#define HISI_I2C_TRANS_STATE 0x0050 +#define HISI_I2C_TRANS_ERR 0x0054 +#define HISI_I2C_VERSION 0x0058 + +#define HISI_I2C_INT_ALL GENMASK(4, 0) +#define HISI_I2C_INT_TRANS_CPLT BIT(0) +#define HISI_I2C_INT_TRANS_ERR BIT(1) +#define HISI_I2C_INT_FIFO_ERR BIT(2) +#define HISI_I2C_INT_RX_FULL BIT(3) +#define HISI_I2C_INT_TX_EMPTY BIT(4) +#define HISI_I2C_INT_ERR \ + (HISI_I2C_INT_TRANS_ERR | HISI_I2C_INT_FIFO_ERR) + +#define HISI_I2C_STD_SPEED_MODE 0 +#define HISI_I2C_FAST_SPEED_MODE 1 +#define HISI_I2C_HIGH_SPEED_MODE 2 + +#define HISI_I2C_TX_FIFO_DEPTH 64 +#define HISI_I2C_RX_FIFO_DEPTH 64 +#define HISI_I2C_TX_F_AE_THRESH 1 +#define HISI_I2C_RX_F_AF_THRESH 60 + +#define HZ_PER_KHZ 1000 + +#define NSEC_TO_CYCLES(ns, clk_rate_khz) \ + DIV_ROUND_UP_ULL((clk_rate_khz) * (ns), NSEC_PER_MSEC) + +struct hisi_i2c_controller { + struct i2c_adapter adapter; + void __iomem *iobase; + struct device *dev; + int irq; + + /* Intermediates for recording the transfer process */ + struct completion *completion; + struct i2c_msg *msgs; + int msg_num; + int msg_tx_idx; + int buf_tx_idx; + int msg_rx_idx; + int buf_rx_idx; + u16 tar_addr; + u32 xfer_err; + + /* I2C bus configuration */ + struct i2c_timings t; + u32 clk_rate_khz; + u32 spk_len; + + /* Bus recovery method */ + struct i2c_bus_recovery_info rinfo; +}; + +static void hisi_i2c_enable_int(struct hisi_i2c_controller *ctlr, u32 mask) +{ + writel_relaxed(mask, ctlr->iobase + HISI_I2C_INT_MASK); +} + +static void hisi_i2c_disable_int(struct hisi_i2c_controller *ctlr, u32 mask) +{ + writel_relaxed((~mask) & HISI_I2C_INT_ALL, ctlr->iobase + HISI_I2C_INT_MASK); +} + +static void hisi_i2c_clear_int(struct hisi_i2c_controller *ctlr, u32 mask) +{ + writel_relaxed(mask, ctlr->iobase + HISI_I2C_INT_CLR); +} + +static void hisi_i2c_clear_tx_int(struct hisi_i2c_controller *ctlr, u32 mask) +{ + writel_relaxed(mask, ctlr->iobase + HISI_I2C_TX_INT_CLR); +} + +static void hisi_i2c_handle_errors(struct hisi_i2c_controller *ctlr) +{ + u32 int_err = ctlr->xfer_err, reg; + + if (int_err & HISI_I2C_INT_FIFO_ERR) { + reg = readl(ctlr->iobase + HISI_I2C_FIFO_STATE); + + if (reg & HISI_I2C_FIFO_STATE_RX_RERR) + dev_err(ctlr->dev, "rx fifo error read\n"); + + if (reg & HISI_I2C_FIFO_STATE_RX_WERR) + dev_err(ctlr->dev, "rx fifo error write\n"); + + if (reg & HISI_I2C_FIFO_STATE_TX_RERR) + dev_err(ctlr->dev, "tx fifo error read\n"); + + if (reg & HISI_I2C_FIFO_STATE_TX_WERR) + dev_err(ctlr->dev, "tx fifo error write\n"); + } +} + +static int hisi_i2c_start_xfer(struct hisi_i2c_controller *ctlr) +{ + struct i2c_msg *msg = ctlr->msgs; + u32 reg; + + reg = readl(ctlr->iobase + HISI_I2C_FRAME_CTRL); + reg &= ~HISI_I2C_FRAME_CTRL_ADDR_TEN; + if (msg->flags & I2C_M_TEN) + reg |= HISI_I2C_FRAME_CTRL_ADDR_TEN; + writel(reg, ctlr->iobase + HISI_I2C_FRAME_CTRL); + + reg = readl(ctlr->iobase + HISI_I2C_SLV_ADDR); + reg &= ~HISI_I2C_SLV_ADDR_VAL; + reg |= FIELD_PREP(HISI_I2C_SLV_ADDR_VAL, msg->addr); + writel(reg, ctlr->iobase + HISI_I2C_SLV_ADDR); + + reg = readl(ctlr->iobase + HISI_I2C_FIFO_CTRL); + reg |= HISI_I2C_FIFO_RX_CLR | HISI_I2C_FIFO_TX_CLR; + writel(reg, ctlr->iobase + HISI_I2C_FIFO_CTRL); + reg &= ~(HISI_I2C_FIFO_RX_CLR | HISI_I2C_FIFO_TX_CLR); + writel(reg, ctlr->iobase + HISI_I2C_FIFO_CTRL); + + hisi_i2c_clear_int(ctlr, HISI_I2C_INT_ALL); + hisi_i2c_clear_tx_int(ctlr, HISI_I2C_TX_AEMPTY_INT); + hisi_i2c_enable_int(ctlr, HISI_I2C_INT_ALL); + + return 0; +} + +static void hisi_i2c_reset_xfer(struct hisi_i2c_controller *ctlr) +{ + ctlr->msg_num = 0; + ctlr->xfer_err = 0; + ctlr->msg_tx_idx = 0; + ctlr->msg_rx_idx = 0; + ctlr->buf_tx_idx = 0; + ctlr->buf_rx_idx = 0; +} + +/* + * Initialize the transfer information and start the I2C bus transfer. + * We only configure the transfer and do some pre/post works here, and + * wait for the transfer done. The major transfer process is performed + * in the IRQ handler. + */ +static int hisi_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, + int num) +{ + struct hisi_i2c_controller *ctlr = i2c_get_adapdata(adap); + DECLARE_COMPLETION_ONSTACK(done); + int ret = num; + + hisi_i2c_reset_xfer(ctlr); + ctlr->completion = &done; + ctlr->msg_num = num; + ctlr->msgs = msgs; + + hisi_i2c_start_xfer(ctlr); + + if (!wait_for_completion_timeout(ctlr->completion, adap->timeout)) { + hisi_i2c_disable_int(ctlr, HISI_I2C_INT_ALL); + synchronize_irq(ctlr->irq); + i2c_recover_bus(&ctlr->adapter); + dev_err(ctlr->dev, "bus transfer timeout\n"); + ret = -EIO; + } + + if (ctlr->xfer_err) { + hisi_i2c_handle_errors(ctlr); + ret = -EIO; + } + + hisi_i2c_reset_xfer(ctlr); + ctlr->completion = NULL; + + return ret; +} + +static u32 hisi_i2c_functionality(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm hisi_i2c_algo = { + .master_xfer = hisi_i2c_master_xfer, + .functionality = hisi_i2c_functionality, +}; + +static int hisi_i2c_read_rx_fifo(struct hisi_i2c_controller *ctlr) +{ + struct i2c_msg *cur_msg; + u32 fifo_state; + + while (ctlr->msg_rx_idx < ctlr->msg_num) { + cur_msg = ctlr->msgs + ctlr->msg_rx_idx; + + if (!(cur_msg->flags & I2C_M_RD)) { + ctlr->msg_rx_idx++; + continue; + } + + fifo_state = readl(ctlr->iobase + HISI_I2C_FIFO_STATE); + while (!(fifo_state & HISI_I2C_FIFO_STATE_RX_EMPTY) && + ctlr->buf_rx_idx < cur_msg->len) { + cur_msg->buf[ctlr->buf_rx_idx++] = readl(ctlr->iobase + HISI_I2C_RXDATA); + fifo_state = readl(ctlr->iobase + HISI_I2C_FIFO_STATE); + } + + if (ctlr->buf_rx_idx == cur_msg->len) { + ctlr->buf_rx_idx = 0; + ctlr->msg_rx_idx++; + } + + if (fifo_state & HISI_I2C_FIFO_STATE_RX_EMPTY) + break; + } + + return 0; +} + +static void hisi_i2c_xfer_msg(struct hisi_i2c_controller *ctlr) +{ + int max_write = HISI_I2C_TX_FIFO_DEPTH - HISI_I2C_TX_F_AE_THRESH; + bool need_restart = false, last_msg; + struct i2c_msg *cur_msg; + u32 cmd, fifo_state; + + while (ctlr->msg_tx_idx < ctlr->msg_num) { + cur_msg = ctlr->msgs + ctlr->msg_tx_idx; + last_msg = (ctlr->msg_tx_idx == ctlr->msg_num - 1); + + /* Signal the SR bit when we start transferring a new message */ + if (ctlr->msg_tx_idx && !ctlr->buf_tx_idx) + need_restart = true; + + fifo_state = readl(ctlr->iobase + HISI_I2C_FIFO_STATE); + while (!(fifo_state & HISI_I2C_FIFO_STATE_TX_FULL) && + ctlr->buf_tx_idx < cur_msg->len && max_write) { + cmd = 0; + + if (need_restart) { + cmd |= HISI_I2C_CMD_TXDATA_SR_EN; + need_restart = false; + } + + /* Signal the STOP bit at the last frame of the last message */ + if (ctlr->buf_tx_idx == cur_msg->len - 1 && last_msg) + cmd |= HISI_I2C_CMD_TXDATA_P_EN; + + if (cur_msg->flags & I2C_M_RD) + cmd |= HISI_I2C_CMD_TXDATA_RW; + else + cmd |= FIELD_PREP(HISI_I2C_CMD_TXDATA_DATA, + cur_msg->buf[ctlr->buf_tx_idx]); + + writel(cmd, ctlr->iobase + HISI_I2C_CMD_TXDATA); + ctlr->buf_tx_idx++; + max_write--; + + fifo_state = readl(ctlr->iobase + HISI_I2C_FIFO_STATE); + } + + /* Update the transfer index after per message transfer is done. */ + if (ctlr->buf_tx_idx == cur_msg->len) { + ctlr->buf_tx_idx = 0; + ctlr->msg_tx_idx++; + } + + if ((fifo_state & HISI_I2C_FIFO_STATE_TX_FULL) || + max_write == 0) + break; + } + + /* + * Disable the TX_EMPTY interrupt after finishing all the messages to + * avoid overwhelming the CPU. + */ + if (ctlr->msg_tx_idx == ctlr->msg_num) + hisi_i2c_disable_int(ctlr, HISI_I2C_INT_TX_EMPTY); + + hisi_i2c_clear_tx_int(ctlr, HISI_I2C_TX_AEMPTY_INT); +} + +static irqreturn_t hisi_i2c_irq(int irq, void *context) +{ + struct hisi_i2c_controller *ctlr = context; + u32 int_stat; + + /* + * Don't handle the interrupt if cltr->completion is NULL. We may + * reach here because the interrupt is spurious or the transfer is + * started by another port (e.g. firmware) rather than us. + */ + if (!ctlr->completion) + return IRQ_NONE; + + int_stat = readl(ctlr->iobase + HISI_I2C_INT_MSTAT); + hisi_i2c_clear_int(ctlr, int_stat); + if (!(int_stat & HISI_I2C_INT_ALL)) + return IRQ_NONE; + + if (int_stat & HISI_I2C_INT_TX_EMPTY) + hisi_i2c_xfer_msg(ctlr); + + if (int_stat & HISI_I2C_INT_ERR) { + ctlr->xfer_err = int_stat; + goto out; + } + + /* Drain the rx fifo before finish the transfer */ + if (int_stat & (HISI_I2C_INT_TRANS_CPLT | HISI_I2C_INT_RX_FULL)) + hisi_i2c_read_rx_fifo(ctlr); + +out: + /* + * Only use TRANS_CPLT to indicate the completion. On error cases we'll + * get two interrupts, INT_ERR first then TRANS_CPLT. + */ + if (int_stat & HISI_I2C_INT_TRANS_CPLT) { + hisi_i2c_disable_int(ctlr, HISI_I2C_INT_ALL); + hisi_i2c_clear_int(ctlr, HISI_I2C_INT_ALL); + hisi_i2c_clear_tx_int(ctlr, HISI_I2C_TX_AEMPTY_INT); + complete(ctlr->completion); + } + + return IRQ_HANDLED; +} + +/* + * Helper function for calculating and configuring the HIGH and LOW + * periods of SCL clock. The caller will pass the ratio of the + * counts (divide / divisor) according to the target speed mode, + * and the target registers. + */ +static void hisi_i2c_set_scl(struct hisi_i2c_controller *ctlr, + u32 divide, u32 divisor, + u32 reg_hcnt, u32 reg_lcnt) +{ + u32 total_cnt, t_scl_hcnt, t_scl_lcnt, scl_fall_cnt, scl_rise_cnt; + u32 scl_hcnt, scl_lcnt; + + /* Total SCL clock cycles per speed period */ + total_cnt = DIV_ROUND_UP_ULL(ctlr->clk_rate_khz * HZ_PER_KHZ, ctlr->t.bus_freq_hz); + /* Total HIGH level SCL clock cycles including edges */ + t_scl_hcnt = DIV_ROUND_UP_ULL(total_cnt * divide, divisor); + /* Total LOW level SCL clock cycles including edges */ + t_scl_lcnt = total_cnt - t_scl_hcnt; + /* Fall edge SCL clock cycles */ + scl_fall_cnt = NSEC_TO_CYCLES(ctlr->t.scl_fall_ns, ctlr->clk_rate_khz); + /* Rise edge SCL clock cycles */ + scl_rise_cnt = NSEC_TO_CYCLES(ctlr->t.scl_rise_ns, ctlr->clk_rate_khz); + + /* Calculated HIGH and LOW periods of SCL clock */ + scl_hcnt = t_scl_hcnt - ctlr->spk_len - 7 - scl_fall_cnt; + scl_lcnt = t_scl_lcnt - 1 - scl_rise_cnt; + + writel(scl_hcnt, ctlr->iobase + reg_hcnt); + writel(scl_lcnt, ctlr->iobase + reg_lcnt); +} + +static void hisi_i2c_configure_bus(struct hisi_i2c_controller *ctlr) +{ + u32 reg, sda_hold_cnt, speed_mode, digital_filter_width_ns; + + i2c_parse_fw_timings(ctlr->dev, &ctlr->t, true); + device_property_read_u32(ctlr->dev, "i2c-digital-filter-width-ns", + &digital_filter_width_ns); + ctlr->spk_len = NSEC_TO_CYCLES(digital_filter_width_ns, + ctlr->clk_rate_khz); + + switch (ctlr->t.bus_freq_hz) { + case I2C_MAX_FAST_MODE_FREQ: + speed_mode = HISI_I2C_FAST_SPEED_MODE; + hisi_i2c_set_scl(ctlr, 26, 76, HISI_I2C_FS_SCL_HCNT, HISI_I2C_FS_SCL_LCNT); + break; + case I2C_MAX_HIGH_SPEED_MODE_FREQ: + speed_mode = HISI_I2C_HIGH_SPEED_MODE; + hisi_i2c_set_scl(ctlr, 6, 22, HISI_I2C_HS_SCL_HCNT, HISI_I2C_HS_SCL_LCNT); + break; + case I2C_MAX_STANDARD_MODE_FREQ: + default: + speed_mode = HISI_I2C_STD_SPEED_MODE; + + /* For default condition force the bus speed to standard mode. */ + ctlr->t.bus_freq_hz = I2C_MAX_STANDARD_MODE_FREQ; + hisi_i2c_set_scl(ctlr, 40, 87, HISI_I2C_SS_SCL_HCNT, HISI_I2C_SS_SCL_LCNT); + break; + } + + reg = readl(ctlr->iobase + HISI_I2C_FRAME_CTRL); + reg &= ~HISI_I2C_FRAME_CTRL_SPEED_MODE; + reg |= FIELD_PREP(HISI_I2C_FRAME_CTRL_SPEED_MODE, speed_mode); + writel(reg, ctlr->iobase + HISI_I2C_FRAME_CTRL); + + sda_hold_cnt = NSEC_TO_CYCLES(ctlr->t.sda_hold_ns, ctlr->clk_rate_khz); + + reg = FIELD_PREP(HISI_I2C_SDA_HOLD_TX, sda_hold_cnt); + writel(reg, ctlr->iobase + HISI_I2C_SDA_HOLD); + + writel(ctlr->spk_len, ctlr->iobase + HISI_I2C_FS_SPK_LEN); + + reg = FIELD_PREP(HISI_I2C_FIFO_RX_AF_THRESH, HISI_I2C_RX_F_AF_THRESH); + reg |= FIELD_PREP(HISI_I2C_FIFO_TX_AE_THRESH, HISI_I2C_TX_F_AE_THRESH); + writel(reg, ctlr->iobase + HISI_I2C_FIFO_CTRL); +} + +#ifdef CONFIG_ACPI +#define HISI_I2C_PIN_MUX_METHOD "PMUX" + +/** + * i2c_dw_acpi_pin_mux_change - Change the I2C controller's pin mux through ACPI + * @dev: device owns the SCL/SDA pin + * @to_gpio: true to switch to GPIO, false to switch to SCL/SDA + * + * The function invokes the specific ACPI method "PMUX" for changing the + * pin mux of I2C controller between SCL/SDA and GPIO in order to help on + * the generic GPIO recovery process. + */ +static void i2c_hisi_pin_mux_change(struct device *dev, bool to_gpio) +{ + acpi_handle handle = ACPI_HANDLE(dev); + struct acpi_object_list arg_list; + unsigned long long data; + union acpi_object arg; + + arg.type = ACPI_TYPE_INTEGER; + arg.integer.value = to_gpio; + arg_list.count = 1; + arg_list.pointer = &arg; + + acpi_evaluate_integer(handle, HISI_I2C_PIN_MUX_METHOD, + &arg_list, &data); +} + +static void i2c_hisi_prepare_recovery(struct i2c_adapter *adap) +{ + struct hisi_i2c_controller *ctlr = i2c_get_adapdata(adap); + + i2c_hisi_pin_mux_change(ctlr->dev, true); +} + +static void i2c_hisi_unprepare_recovery(struct i2c_adapter *adap) +{ + struct hisi_i2c_controller *ctlr = i2c_get_adapdata(adap); + + i2c_hisi_pin_mux_change(ctlr->dev, false); +} + +static void hisi_i2c_init_recovery_info(struct hisi_i2c_controller *ctlr) +{ + struct i2c_bus_recovery_info *rinfo = &ctlr->rinfo; + struct acpi_device *adev = ACPI_COMPANION(ctlr->dev); + struct gpio_desc *gpio; + + if (!acpi_has_method(adev->handle, HISI_I2C_PIN_MUX_METHOD)) + return; + + gpio = devm_gpiod_get_optional(ctlr->dev, "scl", GPIOD_OUT_HIGH); + if (IS_ERR_OR_NULL(gpio)) + return; + + rinfo->scl_gpiod = gpio; + + gpio = devm_gpiod_get_optional(ctlr->dev, "sda", GPIOD_IN); + if (IS_ERR(gpio)) + return; + + rinfo->sda_gpiod = gpio; + rinfo->recover_bus = i2c_generic_scl_recovery; + rinfo->prepare_recovery = i2c_hisi_prepare_recovery; + rinfo->unprepare_recovery = i2c_hisi_unprepare_recovery; + + ctlr->adapter.bus_recovery_info = rinfo; +} +#else +static inline +void hisi_i2c_init_recovery_info(struct hisi_i2c_controller *ctlr) { } +#endif /* CONFIG_ACPI */ + +static int hisi_i2c_probe(struct platform_device *pdev) +{ + struct hisi_i2c_controller *ctlr; + struct device *dev = &pdev->dev; + struct i2c_adapter *adapter; + struct resource *res; + u64 clk_rate_hz; + u32 hw_version; + int ret; + + ctlr = devm_kzalloc(dev, sizeof(*ctlr), GFP_KERNEL); + if (!ctlr) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ctlr->iobase = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(ctlr->iobase)) + return PTR_ERR(ctlr->iobase); + + ctlr->irq = platform_get_irq(pdev, 0); + if (ctlr->irq < 0) + return ctlr->irq; + + ctlr->dev = dev; + + hisi_i2c_disable_int(ctlr, HISI_I2C_INT_ALL); + + ret = devm_request_irq(dev, ctlr->irq, hisi_i2c_irq, 0, "hisi-i2c", ctlr); + if (ret) { + dev_err(dev, "failed to request irq handler, ret = %d\n", ret); + return ret; + } + + ret = device_property_read_u64(dev, "clk_rate", &clk_rate_hz); + if (ret) { + dev_err(dev, "failed to get clock frequency, ret = %d\n", ret); + return ret; + } + + ctlr->clk_rate_khz = DIV_ROUND_UP_ULL(clk_rate_hz, HZ_PER_KHZ); + + hisi_i2c_configure_bus(ctlr); + + adapter = &ctlr->adapter; + snprintf(adapter->name, sizeof(adapter->name), + "HiSilicon I2C Controller %s", dev_name(dev)); + adapter->owner = THIS_MODULE; + adapter->algo = &hisi_i2c_algo; + adapter->dev.parent = dev; + i2c_set_adapdata(adapter, ctlr); + + hisi_i2c_init_recovery_info(ctlr); + + ret = devm_i2c_add_adapter(dev, adapter); + if (ret) + return ret; + + hw_version = readl(ctlr->iobase + HISI_I2C_VERSION); + dev_info(ctlr->dev, "speed mode is %s. hw version 0x%x\n", + i2c_freq_mode_string(ctlr->t.bus_freq_hz), hw_version); + + return 0; +} + +static const struct acpi_device_id hisi_i2c_acpi_ids[] = { + { "HISI03D1", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, hisi_i2c_acpi_ids); + +static struct platform_driver hisi_i2c_driver = { + .probe = hisi_i2c_probe, + .driver = { + .name = "hisi-i2c", + .acpi_match_table = hisi_i2c_acpi_ids, + }, +}; +module_platform_driver(hisi_i2c_driver); + +MODULE_AUTHOR("Yicong Yang "); +MODULE_DESCRIPTION("HiSilicon I2C Controller Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index c91e145ef5a56dbb1a512c23611f06ad7d22aa05..d3c33e8b9a928e713bfdbff7b90b8b1169111828 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -71,6 +71,7 @@ * Cannon Lake-LP (PCH) 0x9da3 32 hard yes yes yes * Cedar Fork (PCH) 0x18df 32 hard yes yes yes * Ice Lake-LP (PCH) 0x34a3 32 hard yes yes yes + * Comet Lake (PCH) 0x02a3 32 hard yes yes yes * * Features supported by this driver: * Software PEC no @@ -240,6 +241,7 @@ #define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS 0xa223 #define PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS 0xa2a3 #define PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS 0xa323 +#define PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS 0x02a3 struct i801_mux_config { char *gpio_chip; @@ -382,11 +384,9 @@ static int i801_check_post(struct i801_priv *priv, int status) dev_err(&priv->pci_dev->dev, "Transaction timeout\n"); /* try to stop the current command */ dev_dbg(&priv->pci_dev->dev, "Terminating the current operation\n"); - outb_p(inb_p(SMBHSTCNT(priv)) | SMBHSTCNT_KILL, - SMBHSTCNT(priv)); + outb_p(SMBHSTCNT_KILL, SMBHSTCNT(priv)); usleep_range(1000, 2000); - outb_p(inb_p(SMBHSTCNT(priv)) & (~SMBHSTCNT_KILL), - SMBHSTCNT(priv)); + outb_p(0, SMBHSTCNT(priv)); /* Check if it worked */ status = inb_p(SMBHSTSTS(priv)); @@ -1038,6 +1038,7 @@ static const struct pci_device_id i801_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS) }, { 0, } }; @@ -1534,6 +1535,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) case PCI_DEVICE_ID_INTEL_DNV_SMBUS: case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS: case PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS: + case PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS: priv->features |= FEATURE_I2C_BLOCK_READ; priv->features |= FEATURE_IRQ; priv->features |= FEATURE_SMBUS_PEC; diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c index f038858b6c5495fe700f177ffc862968d5d2b8b7..fecafb6493e2ac3bb888eb936b056b846f3eb6b9 100644 --- a/drivers/i2c/busses/i2c-img-scb.c +++ b/drivers/i2c/busses/i2c-img-scb.c @@ -1060,7 +1060,7 @@ static int img_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, atomic = true; } - ret = pm_runtime_get_sync(adap->dev.parent); + ret = pm_runtime_resume_and_get(adap->dev.parent); if (ret < 0) return ret; @@ -1161,7 +1161,7 @@ static int img_i2c_init(struct img_i2c *i2c) u32 rev; int ret; - ret = pm_runtime_get_sync(i2c->adap.dev.parent); + ret = pm_runtime_resume_and_get(i2c->adap.dev.parent); if (ret < 0) return ret; diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c index 06c4c767af322aa2704d5ae51a59e5c103579fc5..d5182c446b79f145e91743b27e4620ba8eb4fbb2 100644 --- a/drivers/i2c/busses/i2c-imx-lpi2c.c +++ b/drivers/i2c/busses/i2c-imx-lpi2c.c @@ -265,7 +265,7 @@ static int lpi2c_imx_master_enable(struct lpi2c_imx_struct *lpi2c_imx) unsigned int temp; int ret; - ret = pm_runtime_get_sync(lpi2c_imx->adapter.dev.parent); + ret = pm_runtime_resume_and_get(lpi2c_imx->adapter.dev.parent); if (ret < 0) return ret; diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index c406700789e1f9c5a3af5c3fa62f045180702bb1..d4b72e4ffd71f7b97e172f51ea95a96b0d29f8fe 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -510,9 +510,9 @@ static int i2c_imx_clk_notifier_call(struct notifier_block *nb, unsigned long action, void *data) { struct clk_notifier_data *ndata = data; - struct imx_i2c_struct *i2c_imx = container_of(&ndata->clk, + struct imx_i2c_struct *i2c_imx = container_of(nb, struct imx_i2c_struct, - clk); + clk_change_nb); if (action & POST_RATE_CHANGE) i2c_imx_set_clk(i2c_imx, ndata->new_rate); @@ -1090,7 +1090,8 @@ static int i2c_imx_probe(struct platform_device *pdev) /* Get I2C clock */ i2c_imx->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(i2c_imx->clk)) { - dev_err(&pdev->dev, "can't get I2C clock\n"); + if (PTR_ERR(i2c_imx->clk) != -EPROBE_DEFER) + dev_err(&pdev->dev, "can't get I2C clock\n"); return PTR_ERR(i2c_imx->clk); } diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c index 0d1c3ec8cb40734f96529dffadffc28d686c22ac..538883c6b83624a02e1fc796a2d6a0dd945d975d 100644 --- a/drivers/i2c/busses/i2c-ismt.c +++ b/drivers/i2c/busses/i2c-ismt.c @@ -498,6 +498,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr, if (read_write == I2C_SMBUS_WRITE) { /* Block Write */ dev_dbg(dev, "I2C_SMBUS_BLOCK_DATA: WRITE\n"); + if (data->block[0] < 1 || data->block[0] > I2C_SMBUS_BLOCK_MAX) + return -EINVAL; + dma_size = data->block[0] + 1; dma_direction = DMA_TO_DEVICE; desc->wr_len_cmd = dma_size; diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c index 745ed43a22d65cba3920aced8aa2f525d5e4e9a9..2fd717d8dd30eca66aa03305a3f01ce5d24a8774 100644 --- a/drivers/i2c/busses/i2c-mlxcpld.c +++ b/drivers/i2c/busses/i2c-mlxcpld.c @@ -503,6 +503,7 @@ static int mlxcpld_i2c_probe(struct platform_device *pdev) platform_set_drvdata(pdev, priv); priv->dev = &pdev->dev; + priv->base_addr = MLXPLAT_CPLD_LPC_I2C_BASE_ADDR; /* Register with i2c layer */ mlxcpld_i2c_adapter.timeout = usecs_to_jiffies(MLXCPLD_I2C_XFER_TO); @@ -518,7 +519,6 @@ static int mlxcpld_i2c_probe(struct platform_device *pdev) mlxcpld_i2c_adapter.nr = pdev->id; priv->adap = mlxcpld_i2c_adapter; priv->adap.dev.parent = &pdev->dev; - priv->base_addr = MLXPLAT_CPLD_LPC_I2C_BASE_ADDR; i2c_set_adapdata(&priv->adap, priv); err = i2c_add_numbered_adapter(&priv->adap); diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c index 1e57f58fcb00151e9ef274e25e3adf22f2c197b8..2bb4d20ead32b8895afc77024c1a01bfd05006ea 100644 --- a/drivers/i2c/busses/i2c-mt65xx.c +++ b/drivers/i2c/busses/i2c-mt65xx.c @@ -441,6 +441,8 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs, u16 control_reg; u16 restart_flag = 0; u32 reg_4g_mode; + u8 *dma_rd_buf = NULL; + u8 *dma_wr_buf = NULL; dma_addr_t rpaddr = 0; dma_addr_t wpaddr = 0; int ret; @@ -500,10 +502,18 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs, if (i2c->op == I2C_MASTER_RD) { writel(I2C_DMA_INT_FLAG_NONE, i2c->pdmabase + OFFSET_INT_FLAG); writel(I2C_DMA_CON_RX, i2c->pdmabase + OFFSET_CON); - rpaddr = dma_map_single(i2c->dev, msgs->buf, + + dma_rd_buf = i2c_get_dma_safe_msg_buf(msgs, 1); + if (!dma_rd_buf) + return -ENOMEM; + + rpaddr = dma_map_single(i2c->dev, dma_rd_buf, msgs->len, DMA_FROM_DEVICE); - if (dma_mapping_error(i2c->dev, rpaddr)) + if (dma_mapping_error(i2c->dev, rpaddr)) { + i2c_put_dma_safe_msg_buf(dma_rd_buf, msgs, false); + return -ENOMEM; + } if (i2c->dev_comp->support_33bits) { reg_4g_mode = mtk_i2c_set_4g_mode(rpaddr); @@ -515,10 +525,18 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs, } else if (i2c->op == I2C_MASTER_WR) { writel(I2C_DMA_INT_FLAG_NONE, i2c->pdmabase + OFFSET_INT_FLAG); writel(I2C_DMA_CON_TX, i2c->pdmabase + OFFSET_CON); - wpaddr = dma_map_single(i2c->dev, msgs->buf, + + dma_wr_buf = i2c_get_dma_safe_msg_buf(msgs, 1); + if (!dma_wr_buf) + return -ENOMEM; + + wpaddr = dma_map_single(i2c->dev, dma_wr_buf, msgs->len, DMA_TO_DEVICE); - if (dma_mapping_error(i2c->dev, wpaddr)) + if (dma_mapping_error(i2c->dev, wpaddr)) { + i2c_put_dma_safe_msg_buf(dma_wr_buf, msgs, false); + return -ENOMEM; + } if (i2c->dev_comp->support_33bits) { reg_4g_mode = mtk_i2c_set_4g_mode(wpaddr); @@ -530,16 +548,39 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs, } else { writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_INT_FLAG); writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_CON); - wpaddr = dma_map_single(i2c->dev, msgs->buf, + + dma_wr_buf = i2c_get_dma_safe_msg_buf(msgs, 1); + if (!dma_wr_buf) + return -ENOMEM; + + wpaddr = dma_map_single(i2c->dev, dma_wr_buf, msgs->len, DMA_TO_DEVICE); - if (dma_mapping_error(i2c->dev, wpaddr)) + if (dma_mapping_error(i2c->dev, wpaddr)) { + i2c_put_dma_safe_msg_buf(dma_wr_buf, msgs, false); + return -ENOMEM; - rpaddr = dma_map_single(i2c->dev, (msgs + 1)->buf, + } + + dma_rd_buf = i2c_get_dma_safe_msg_buf((msgs + 1), 1); + if (!dma_rd_buf) { + dma_unmap_single(i2c->dev, wpaddr, + msgs->len, DMA_TO_DEVICE); + + i2c_put_dma_safe_msg_buf(dma_wr_buf, msgs, false); + + return -ENOMEM; + } + + rpaddr = dma_map_single(i2c->dev, dma_rd_buf, (msgs + 1)->len, DMA_FROM_DEVICE); if (dma_mapping_error(i2c->dev, rpaddr)) { dma_unmap_single(i2c->dev, wpaddr, msgs->len, DMA_TO_DEVICE); + + i2c_put_dma_safe_msg_buf(dma_wr_buf, msgs, false); + i2c_put_dma_safe_msg_buf(dma_rd_buf, (msgs + 1), false); + return -ENOMEM; } @@ -578,14 +619,21 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs, if (i2c->op == I2C_MASTER_WR) { dma_unmap_single(i2c->dev, wpaddr, msgs->len, DMA_TO_DEVICE); + + i2c_put_dma_safe_msg_buf(dma_wr_buf, msgs, true); } else if (i2c->op == I2C_MASTER_RD) { dma_unmap_single(i2c->dev, rpaddr, msgs->len, DMA_FROM_DEVICE); + + i2c_put_dma_safe_msg_buf(dma_rd_buf, msgs, true); } else { dma_unmap_single(i2c->dev, wpaddr, msgs->len, DMA_TO_DEVICE); dma_unmap_single(i2c->dev, rpaddr, (msgs + 1)->len, DMA_FROM_DEVICE); + + i2c_put_dma_safe_msg_buf(dma_wr_buf, msgs, true); + i2c_put_dma_safe_msg_buf(dma_rd_buf, (msgs + 1), true); } if (ret == 0) { diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 65d06a8193074814cb18a0864eaea9cbc3c044de..cd9c65f3d404ff92f0eef87d4d99236b370e82cc 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -661,9 +661,6 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap, dev_dbg(omap->dev, "addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n", msg->addr, msg->len, msg->flags, stop); - if (msg->len == 0) - return -EINVAL; - omap->receiver = !!(msg->flags & I2C_M_RD); omap_i2c_resize_fifo(omap, msg->len, omap->receiver); @@ -1179,6 +1176,10 @@ static const struct i2c_algorithm omap_i2c_algo = { .functionality = omap_i2c_func, }; +static const struct i2c_adapter_quirks omap_i2c_quirks = { + .flags = I2C_AQ_NO_ZERO_LEN, +}; + #ifdef CONFIG_OF static struct omap_i2c_bus_platform_data omap2420_pdata = { .rev = OMAP_I2C_IP_VERSION_1, @@ -1453,6 +1454,7 @@ omap_i2c_probe(struct platform_device *pdev) adap->class = I2C_CLASS_DEPRECATED; strlcpy(adap->name, "OMAP I2C adapter", sizeof(adap->name)); adap->algo = &omap_i2c_algo; + adap->quirks = &omap_i2c_quirks; adap->dev.parent = &pdev->dev; adap->dev.of_node = pdev->dev.of_node; adap->bus_recovery_info = &omap_i2c_bus_recovery_info; @@ -1498,8 +1500,7 @@ static int omap_i2c_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM -static int omap_i2c_runtime_suspend(struct device *dev) +static int __maybe_unused omap_i2c_runtime_suspend(struct device *dev) { struct omap_i2c_dev *omap = dev_get_drvdata(dev); @@ -1525,7 +1526,7 @@ static int omap_i2c_runtime_suspend(struct device *dev) return 0; } -static int omap_i2c_runtime_resume(struct device *dev) +static int __maybe_unused omap_i2c_runtime_resume(struct device *dev) { struct omap_i2c_dev *omap = dev_get_drvdata(dev); @@ -1540,20 +1541,18 @@ static int omap_i2c_runtime_resume(struct device *dev) } static const struct dev_pm_ops omap_i2c_pm_ops = { + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) SET_RUNTIME_PM_OPS(omap_i2c_runtime_suspend, omap_i2c_runtime_resume, NULL) }; -#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops) -#else -#define OMAP_I2C_PM_OPS NULL -#endif /* CONFIG_PM */ static struct platform_driver omap_i2c_driver = { .probe = omap_i2c_probe, .remove = omap_i2c_remove, .driver = { .name = "omap_i2c", - .pm = OMAP_I2C_PM_OPS, + .pm = &omap_i2c_pm_ops, .of_match_table = of_match_ptr(omap_i2c_of_match), }, }; diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c index de3fe6e828cbdcf585aae98989ea418e6c8725fd..f50afa8e3cbad7879ab0d6f8f454946cd941eaee 100644 --- a/drivers/i2c/busses/i2c-pca-platform.c +++ b/drivers/i2c/busses/i2c-pca-platform.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include #include @@ -173,7 +172,7 @@ static int i2c_pca_pf_probe(struct platform_device *pdev) i2c->adap.dev.parent = &pdev->dev; i2c->adap.dev.of_node = np; - i2c->gpio = devm_gpiod_get_optional(&pdev->dev, "reset-gpios", GPIOD_OUT_LOW); + i2c->gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(i2c->gpio)) return PTR_ERR(i2c->gpio); diff --git a/drivers/i2c/busses/i2c-phytium-common.c b/drivers/i2c/busses/i2c-phytium-common.c new file mode 100644 index 0000000000000000000000000000000000000000..86c7c771e2f8be7bbe8c6b09d50e7b698257fa2d --- /dev/null +++ b/drivers/i2c/busses/i2c-phytium-common.c @@ -0,0 +1,207 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Phytium I2C adapter driver. + * + * Derived from Synopysys I2C driver. + * Copyright (C) 2006 Texas Instruments. + * Copyright (C) 2007 MontaVista Software Inc. + * Copyright (C) 2009 Provigent Ltd. + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "i2c-phytium-core.h" + +static char *abort_sources[] = { + [ABRT_7B_ADDR_NOACK] = + "slave address not acknowledged (7bit mode)", + [ABRT_10ADDR1_NOACK] = + "first address byte not acknowledged (10bit mode)", + [ABRT_10ADDR2_NOACK] = + "second address byte not acknowledged (10bit mode)", + [ABRT_TXDATA_NOACK] = + "data not acknowledged", + [ABRT_GCALL_NOACK] = + "no acknowledgment for a general call", + [ABRT_GCALL_READ] = + "read after general call", + [ABRT_SBYTE_ACKDET] = + "start byte acknowledged", + [ABRT_SBYTE_NORSTRT] = + "trying to send start byte when restart is disabled", + [ABRT_10B_RD_NORSTRT] = + "trying to read when restart is disabled (10bit mode)", + [ABRT_MASTER_DIS] = + "trying to use disabled adapter", + [ARB_LOST] = + "lost arbitration", + [ABRT_SLAVE_FLUSH_TXFIFO] = + "read command so flush old data in the TX FIFO", + [ABRT_SLAVE_ARBLOST] = + "slave lost the bus while transmitting data to a remote master", + [ABRT_SLAVE_RD_INTX] = + "incorrect slave-transmitter mode configuration", +}; + +u32 phytium_readl(struct phytium_i2c_dev *dev, int offset) +{ + return readl_relaxed(dev->base + offset); +} + +void phytium_writel(struct phytium_i2c_dev *dev, u32 b, int offset) +{ + writel_relaxed(b, dev->base + offset); +} + +u32 i2c_phytium_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset) +{ + if (cond) + return (ic_clk * tSYMBOL + 500000) / 1000000 - 8 + offset; + else + return (ic_clk * (tSYMBOL + tf) + 500000) / + 1000000 - 3 + offset; +} + +u32 i2c_phytium_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset) +{ + return ((ic_clk * (tLOW + tf) + 500000) / 1000000) - 1 + offset; +} + +int i2c_phytium_set_sda_hold(struct phytium_i2c_dev *dev) +{ + if (!dev->sda_hold_time) { + /* Keep previous hold time setting if no one set it */ + dev->sda_hold_time = phytium_readl(dev, IC_SDA_HOLD); + } + + if (!(dev->sda_hold_time & IC_SDA_HOLD_RX_MASK)) + dev->sda_hold_time |= 1 << IC_SDA_HOLD_RX_SHIFT; + + dev_dbg(dev->dev, "SDA Hold Time TX:RX = %d:%d\n", + dev->sda_hold_time & ~(u32)IC_SDA_HOLD_RX_MASK, + dev->sda_hold_time >> IC_SDA_HOLD_RX_SHIFT); + + return 0; +} + +void __i2c_phytium_disable(struct phytium_i2c_dev *dev) +{ + int timeout = 100; + + do { + __i2c_phytium_disable_nowait(dev); + if ((phytium_readl(dev, IC_ENABLE_STATUS) & 1) == 0) + return; + + /* + * Wait 10 times the signaling period of the highest I2C + * transfer supported by the driver (for 400KHz this is + * 25us). + */ + usleep_range(25, 250); + } while (timeout--); + + dev_warn(dev->dev, "timeout in disabling adapter\n"); +} + +unsigned long i2c_phytium_clk_rate(struct phytium_i2c_dev *dev) +{ + if (WARN_ON_ONCE(!dev->get_clk_rate_khz)) + return 0; + return dev->get_clk_rate_khz(dev); +} + +int i2c_phytium_prepare_clk(struct phytium_i2c_dev *dev, bool prepare) +{ + if (IS_ERR(dev->clk)) + return PTR_ERR(dev->clk); + + if (prepare) + return clk_prepare_enable(dev->clk); + + clk_disable_unprepare(dev->clk); + return 0; +} +EXPORT_SYMBOL_GPL(i2c_phytium_prepare_clk); + +int i2c_phytium_wait_bus_not_busy(struct phytium_i2c_dev *dev) +{ + int timeout = 20; /* 20 ms */ + + while (phytium_readl(dev, IC_STATUS) & IC_STATUS_ACTIVITY) { + if (timeout <= 0) { + dev_warn(dev->dev, "timeout waiting for bus ready\n"); + i2c_recover_bus(&dev->adapter); + + if (phytium_readl(dev, IC_STATUS) & IC_STATUS_ACTIVITY) + return -ETIMEDOUT; + return 0; + } + timeout--; + usleep_range(1000, 1100); + } + + return 0; +} + +int i2c_phytium_handle_tx_abort(struct phytium_i2c_dev *dev) +{ + unsigned long abort_source = dev->abort_source; + int i; + + if (abort_source & IC_TX_ABRT_NOACK) { + for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources)) + dev_dbg(dev->dev, + "%s: %s\n", __func__, abort_sources[i]); + return -EREMOTEIO; + } + + for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources)) + dev_err(dev->dev, "%s: %s\n", __func__, abort_sources[i]); + + if (abort_source & IC_TX_ARB_LOST) + return -EAGAIN; + else if (abort_source & IC_TX_ABRT_GCALL_READ) + return -EINVAL; + else + return -EIO; + + return 0; +} + +u32 i2c_phytium_func(struct i2c_adapter *adapter) +{ + struct phytium_i2c_dev *dev = i2c_get_adapdata(adapter); + + return dev->functionality; +} + +void i2c_phytium_disable(struct phytium_i2c_dev *dev) +{ + /* Disable controller */ + __i2c_phytium_disable(dev); + + /* Disable all interupts */ + phytium_writel(dev, 0, IC_INTR_MASK); + phytium_readl(dev, IC_CLR_INTR); +} + +void i2c_phytium_disable_int(struct phytium_i2c_dev *dev) +{ + phytium_writel(dev, 0, IC_INTR_MASK); +} + +MODULE_AUTHOR("Cheng Quan "); +MODULE_DESCRIPTION("Phytium I2C bus adapter core"); +MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-phytium-core.h b/drivers/i2c/busses/i2c-phytium-core.h new file mode 100644 index 0000000000000000000000000000000000000000..93a1dc4e6ac42fe54a9f15e3436e644b97b12943 --- /dev/null +++ b/drivers/i2c/busses/i2c-phytium-core.h @@ -0,0 +1,259 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Phytium I2C adapter driver. + * + * Derived from Synopysys I2C driver. + * Copyright (C) 2006 Texas Instruments. + * Copyright (C) 2007 MontaVista Software Inc. + * Copyright (C) 2009 Provigent Ltd. + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include + +#define IC_DEFAULT_FUNCTIONALITY (I2C_FUNC_I2C | \ + I2C_FUNC_SMBUS_BYTE | \ + I2C_FUNC_SMBUS_BYTE_DATA | \ + I2C_FUNC_SMBUS_WORD_DATA | \ + I2C_FUNC_SMBUS_BLOCK_DATA | \ + I2C_FUNC_SMBUS_I2C_BLOCK) + +#define IC_CON_MASTER 0x1 +#define IC_CON_SPEED_STD 0x2 +#define IC_CON_SPEED_FAST 0x4 +#define IC_CON_SPEED_HIGH 0x6 +#define IC_CON_SPEED_MASK 0x6 +#define IC_CON_10BITADDR_SLAVE 0x8 +#define IC_CON_10BITADDR_MASTER 0x10 +#define IC_CON_RESTART_EN 0x20 +#define IC_CON_SLAVE_DISABLE 0x40 +#define IC_CON_STOP_DET_IFADDRESSED 0x80 +#define IC_CON_TX_EMPTY_CTRL 0x100 +#define IC_CON_RX_FIFO_FULL_HLD_CTRL 0x200 + +#define IC_CON 0x0 +#define IC_TAR 0x4 +#define IC_SAR 0x8 +#define IC_DATA_CMD 0x10 +#define IC_SS_SCL_HCNT 0x14 +#define IC_SS_SCL_LCNT 0x18 +#define IC_FS_SCL_HCNT 0x1c +#define IC_FS_SCL_LCNT 0x20 +#define IC_HS_SCL_HCNT 0x24 +#define IC_HS_SCL_LCNT 0x28 +#define IC_INTR_STAT 0x2c +#define IC_INTR_MASK 0x30 +#define IC_RAW_INTR_STAT 0x34 +#define IC_RX_TL 0x38 +#define IC_TX_TL 0x3c +#define IC_CLR_INTR 0x40 +#define IC_CLR_RX_UNDER 0x44 +#define IC_CLR_RX_OVER 0x48 +#define IC_CLR_TX_OVER 0x4c +#define IC_CLR_RD_REQ 0x50 +#define IC_CLR_TX_ABRT 0x54 +#define IC_CLR_RX_DONE 0x58 +#define IC_CLR_ACTIVITY 0x5c +#define IC_CLR_STOP_DET 0x60 +#define IC_CLR_START_DET 0x64 +#define IC_CLR_GEN_CALL 0x68 +#define IC_ENABLE 0x6c +#define IC_STATUS 0x70 +#define IC_TXFLR 0x74 +#define IC_RXFLR 0x78 +#define IC_SDA_HOLD 0x7c +#define IC_TX_ABRT_SOURCE 0x80 +#define IC_ENABLE_STATUS 0x9c +#define IC_SMBCLK_LOW_MEXT 0xa8 +#define IC_SMBCLK_LOW_TIMEOUT 0xac +#define IC_SMBDAT_STUCK_TIMEOUT 0xb4 +#define IC_CLR_SMBCLK_EXT_LOW_TIMEOUT 0xbc +#define IC_CLR_SMBCLK_TMO_LOW_TIMEOUT 0xc0 +#define IC_CLR_SMBDAT_LOW_TIMEOUT 0xc4 +#define IC_CLR_SMBALERT_IN_N 0xd0 + +#define IC_INTR_RX_UNDER 0x001 +#define IC_INTR_RX_OVER 0x002 +#define IC_INTR_RX_FULL 0x004 +#define IC_INTR_TX_OVER 0x008 +#define IC_INTR_TX_EMPTY 0x010 +#define IC_INTR_RD_REQ 0x020 +#define IC_INTR_TX_ABRT 0x040 +#define IC_INTR_RX_DONE 0x080 +#define IC_INTR_ACTIVITY 0x100 +#define IC_INTR_STOP_DET 0x200 +#define IC_INTR_START_DET 0x400 +#define IC_INTR_GEN_CALL 0x800 +#define IC_INTR_SMBCLK_EXT_LOW_TIMEOUT 0x1000 +#define IC_INTR_SMBCLK_TMO_LOW_TIMEOUT 0x2000 +#define IC_INTR_SMBSDA_LOW_TIMEOUT 0x4000 +#define IC_INTR_SMBALERT_IN_N 0x20000 + +#define IC_INTR_DEFAULT_MASK (IC_INTR_RX_FULL | \ + IC_INTR_TX_ABRT | \ + IC_INTR_STOP_DET) +#define IC_INTR_MASTER_MASK (IC_INTR_DEFAULT_MASK | \ + IC_INTR_TX_EMPTY) +#define IC_INTR_SLAVE_MASK (IC_INTR_DEFAULT_MASK | \ + IC_INTR_RX_DONE | \ + IC_INTR_RX_UNDER | \ + IC_INTR_RD_REQ) +#define IC_INTR_SMBUS_MASK (IC_INTR_MASTER_MASK | \ + IC_INTR_SMBCLK_EXT_LOW_TIMEOUT | \ + IC_INTR_SMBCLK_TMO_LOW_TIMEOUT | \ + IC_INTR_SMBSDA_LOW_TIMEOUT) + +#define IC_STATUS_ACTIVITY 0x1 +#define IC_STATUS_TFE BIT(2) +#define IC_STATUS_MASTER_ACTIVITY BIT(5) +#define IC_STATUS_SLAVE_ACTIVITY BIT(6) + +#define IC_SDA_HOLD_RX_SHIFT 16 +#define IC_SDA_HOLD_RX_MASK GENMASK(23, IC_SDA_HOLD_RX_SHIFT) + +#define IC_ERR_TX_ABRT 0x1 + +#define IC_TAR_10BITADDR_MASTER BIT(12) + +#define IC_COMP_PARAM_1_SPEED_MODE_HIGH (BIT(2) | BIT(3)) +#define IC_COMP_PARAM_1_SPEED_MODE_MASK GENMASK(3, 2) + +#define STATUS_IDLE 0x0 +#define STATUS_WRITE_IN_PROGRESS 0x1 +#define STATUS_READ_IN_PROGRESS 0x2 + +/* + * operation modes + */ +#define PHYTIUM_IC_MASTER 0 +#define PHYTIUM_IC_SLAVE 1 + +#define ABRT_7B_ADDR_NOACK 0 +#define ABRT_10ADDR1_NOACK 1 +#define ABRT_10ADDR2_NOACK 2 +#define ABRT_TXDATA_NOACK 3 +#define ABRT_GCALL_NOACK 4 +#define ABRT_GCALL_READ 5 +#define ABRT_SBYTE_ACKDET 7 +#define ABRT_SBYTE_NORSTRT 9 +#define ABRT_10B_RD_NORSTRT 10 +#define ABRT_MASTER_DIS 11 +#define ARB_LOST 12 +#define ABRT_SLAVE_FLUSH_TXFIFO 13 +#define ABRT_SLAVE_ARBLOST 14 +#define ABRT_SLAVE_RD_INTX 15 + +#define IC_TX_ABRT_7B_ADDR_NOACK (1UL << ABRT_7B_ADDR_NOACK) +#define IC_TX_ABRT_10ADDR1_NOACK (1UL << ABRT_10ADDR1_NOACK) +#define IC_TX_ABRT_10ADDR2_NOACK (1UL << ABRT_10ADDR2_NOACK) +#define IC_TX_ABRT_TXDATA_NOACK (1UL << ABRT_TXDATA_NOACK) +#define IC_TX_ABRT_GCALL_NOACK (1UL << ABRT_GCALL_NOACK) +#define IC_TX_ABRT_GCALL_READ (1UL << ABRT_GCALL_READ) +#define IC_TX_ABRT_SBYTE_ACKDET (1UL << ABRT_SBYTE_ACKDET) +#define IC_TX_ABRT_SBYTE_NORSTRT (1UL << ABRT_SBYTE_NORSTRT) +#define IC_TX_ABRT_10B_RD_NORSTRT (1UL << ABRT_10B_RD_NORSTRT) +#define IC_TX_ABRT_MASTER_DIS (1UL << ABRT_MASTER_DIS) +#define IC_TX_ARB_LOST (1UL << ARB_LOST) +#define IC_RX_ABRT_SLAVE_RD_INTX (1UL << ABRT_SLAVE_RD_INTX) +#define IC_RX_ABRT_SLAVE_ARBLOST (1UL << ABRT_SLAVE_ARBLOST) +#define IC_RX_ABRT_SLAVE_FLUSH_TXFIFO (1UL << ABRT_SLAVE_FLUSH_TXFIFO) + +#define IC_TX_ABRT_NOACK (IC_TX_ABRT_7B_ADDR_NOACK | \ + IC_TX_ABRT_10ADDR1_NOACK | \ + IC_TX_ABRT_10ADDR2_NOACK | \ + IC_TX_ABRT_TXDATA_NOACK | \ + IC_TX_ABRT_GCALL_NOACK) +#define CONTROLLER_TYPE_IIC 0 +#define CONTROLLER_TYPE_SMBUS 1 + +struct phytium_i2c_dev { + struct device *dev; + void __iomem *base; + int irq; + u32 flags; + struct completion cmd_complete; + struct clk *clk; + struct reset_control *rst; + int mode; + struct i2c_client *slave; + u32 (*get_clk_rate_khz)(struct phytium_i2c_dev *dev); + + struct i2c_adapter adapter; + struct i2c_client *ara; + struct i2c_smbus_alert_setup alert_data; + + struct phytium_pci_i2c *controller; + + unsigned int status; + int cmd_err; + u32 abort_source; + + struct i2c_msg *msgs; + int msgs_num; + int msg_write_idx; + int msg_read_idx; + int msg_err; + u32 tx_buf_len; + u8 *tx_buf; + u32 rx_buf_len; + u8 *rx_buf; + + u32 master_cfg; + u32 slave_cfg; + u32 functionality; + unsigned int tx_fifo_depth; + unsigned int rx_fifo_depth; + int rx_outstanding; + + struct i2c_timings timings; + u32 sda_hold_time; + u16 ss_hcnt; + u16 ss_lcnt; + u16 fs_hcnt; + u16 fs_lcnt; + u16 fp_hcnt; + u16 fp_lcnt; + u16 hs_hcnt; + u16 hs_lcnt; + + bool pm_disabled; + void (*disable)(struct phytium_i2c_dev *dev); + void (*disable_int)(struct phytium_i2c_dev *dev); + int (*init)(struct phytium_i2c_dev *dev); +}; + +#define ACCESS_INTR_MASK 0x00000004 + +#define DEFAULT_CLOCK_FREQUENCY 48000000 + +u32 phytium_readl(struct phytium_i2c_dev *dev, int offset); +void phytium_writel(struct phytium_i2c_dev *dev, u32 b, int offset); +unsigned long i2c_phytium_clk_rate(struct phytium_i2c_dev *dev); +int i2c_phytium_prepare_clk(struct phytium_i2c_dev *dev, bool prepare); +int i2c_phytium_wait_bus_not_busy(struct phytium_i2c_dev *dev); +int i2c_phytium_handle_tx_abort(struct phytium_i2c_dev *dev); +u32 i2c_phytium_func(struct i2c_adapter *adap); +void i2c_phytium_disable(struct phytium_i2c_dev *dev); +void i2c_phytium_disable_int(struct phytium_i2c_dev *dev); +int i2c_phytium_set_sda_hold(struct phytium_i2c_dev *dev); +u32 i2c_phytium_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset); +u32 i2c_phytium_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset); + +static inline void __i2c_phytium_enable(struct phytium_i2c_dev *dev) +{ + phytium_writel(dev, 1, IC_ENABLE); +} + +static inline void __i2c_phytium_disable_nowait(struct phytium_i2c_dev *dev) +{ + phytium_writel(dev, 0, IC_ENABLE); +} + +void __i2c_phytium_disable(struct phytium_i2c_dev *dev); + +extern int i2c_phytium_probe(struct phytium_i2c_dev *dev); + +extern int i2c_phytium_probe_slave(struct phytium_i2c_dev *dev); diff --git a/drivers/i2c/busses/i2c-phytium-master.c b/drivers/i2c/busses/i2c-phytium-master.c new file mode 100644 index 0000000000000000000000000000000000000000..1be44002054c38386574be13ab1150b2b578e446 --- /dev/null +++ b/drivers/i2c/busses/i2c-phytium-master.c @@ -0,0 +1,585 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium I2C adapter driver. + * + * Derived from Synopysys I2C driver. + * Copyright (C) 2006 Texas Instruments. + * Copyright (C) 2007 MontaVista Software Inc. + * Copyright (C) 2009 Provigent Ltd.:w + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "i2c-phytium-core.h" + +static int i2c_phytium_init_master(struct phytium_i2c_dev *dev) +{ + /* Disable the adapter */ + __i2c_phytium_disable(dev); + + /* Write standard speed timing parameters */ + phytium_writel(dev, dev->ss_hcnt, IC_SS_SCL_HCNT); + phytium_writel(dev, dev->ss_lcnt, IC_SS_SCL_LCNT); + + /* Write fast mode/fast mode plus timing parameters */ + phytium_writel(dev, dev->fs_hcnt, IC_FS_SCL_HCNT); + phytium_writel(dev, dev->fs_lcnt, IC_FS_SCL_LCNT); + + /* Write high speed timing parameters if supported */ + if (dev->hs_hcnt && dev->hs_hcnt) { + phytium_writel(dev, dev->hs_hcnt, IC_HS_SCL_HCNT); + phytium_writel(dev, dev->hs_lcnt, IC_HS_SCL_LCNT); + } + + /* Write SDA hold time if supported */ + if (dev->sda_hold_time) + phytium_writel(dev, dev->sda_hold_time, IC_SDA_HOLD); + + /* Configure Tx/Rx FIFO threshold levels */ + phytium_writel(dev, dev->tx_fifo_depth >> 1, IC_TX_TL); + phytium_writel(dev, 0, IC_RX_TL); + + /* Configure the I2C master */ + phytium_writel(dev, dev->master_cfg, IC_CON); + + return 0; +} + +static void i2c_phytium_xfer_init(struct phytium_i2c_dev *dev) +{ + struct i2c_msg *msgs = dev->msgs; + u32 ic_con, ic_tar = 0; + + /* Disable the adapter */ + __i2c_phytium_disable(dev); + + /* If the slave address is 10-bit address, enable 10BITADDR */ + ic_con = phytium_readl(dev, IC_CON); + if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) { + ic_con |= IC_CON_10BITADDR_MASTER; + ic_tar = IC_TAR_10BITADDR_MASTER; + } else { + ic_con &= ~IC_CON_10BITADDR_MASTER; + } + + phytium_writel(dev, ic_con, IC_CON); + + /* + * Set the slave (target) address and enable 10-bit addressing mode + * if applicable. + */ + phytium_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, IC_TAR); + + /* Enforce disabled interrupts */ + i2c_phytium_disable_int(dev); + + /* Enable the adapter */ + __i2c_phytium_enable(dev); + + /* Dummy read */ + phytium_readl(dev, IC_ENABLE_STATUS); + + /* Clear and enable interrupts */ + phytium_readl(dev, IC_CLR_INTR); + phytium_writel(dev, IC_INTR_SMBUS_MASK, IC_INTR_MASK); +} + +static void i2c_phytium_xfer_msg(struct phytium_i2c_dev *dev) +{ + struct i2c_msg *msgs = dev->msgs; + u32 intr_mask; + int tx_limit, rx_limit; + u32 addr = msgs[dev->msg_write_idx].addr; + u32 buf_len = dev->tx_buf_len; + u8 *buf = dev->tx_buf; + bool need_restart = false; + + intr_mask = IC_INTR_MASTER_MASK; + + for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++) { + u32 flags = msgs[dev->msg_write_idx].flags; + + if (msgs[dev->msg_write_idx].addr != addr) { + dev_err(dev->dev, + "%s: invalid target address\n", __func__); + dev->msg_err = -EINVAL; + break; + } + + if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) { + /* new i2c_msg */ + buf = msgs[dev->msg_write_idx].buf; + buf_len = msgs[dev->msg_write_idx].len; + + if ((dev->master_cfg & IC_CON_RESTART_EN) && + (dev->msg_write_idx > 0)) + need_restart = true; + } + + tx_limit = dev->tx_fifo_depth - phytium_readl(dev, IC_TXFLR); + rx_limit = dev->tx_fifo_depth - phytium_readl(dev, IC_RXFLR); + + while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) { + u32 cmd = 0; + + if (dev->msg_write_idx == dev->msgs_num - 1 && + buf_len == 1 && !(flags & I2C_M_RECV_LEN)) + cmd |= BIT(9); + if (need_restart) { + cmd |= BIT(10); + need_restart = false; + } + + if (msgs[dev->msg_write_idx].flags & I2C_M_RD) { + /* avoid rx buffer overrun */ + if (dev->rx_outstanding >= dev->rx_fifo_depth) + break; + + phytium_writel(dev, cmd | 0x100, IC_DATA_CMD); + rx_limit--; + dev->rx_outstanding++; + } else { + phytium_writel(dev, cmd | *buf++, IC_DATA_CMD); + } + tx_limit--; + buf_len--; + } + + dev->tx_buf = buf; + dev->tx_buf_len = buf_len; + + /* + * Because we don't know the buffer length in the + * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop + * the transaction here. + */ + if (buf_len > 0 || flags & I2C_M_RECV_LEN) { + /* more bytes to be written */ + dev->status |= STATUS_WRITE_IN_PROGRESS; + break; + } + dev->status &= ~STATUS_WRITE_IN_PROGRESS; + } + + if (dev->msg_write_idx == dev->msgs_num) + intr_mask &= ~IC_INTR_TX_EMPTY; + + if (dev->msg_err) + intr_mask = 0; + + phytium_writel(dev, intr_mask, IC_INTR_MASK); +} + +static u8 i2c_phytium_recv_len(struct phytium_i2c_dev *dev, u8 len) +{ + struct i2c_msg *msgs = dev->msgs; + u32 flags = msgs[dev->msg_read_idx].flags; + + /* + * Adjust the buffer length and mask the flag + * after receiving the first byte. + */ + len += (flags & I2C_CLIENT_PEC) ? 2 : 1; + dev->tx_buf_len = len - min_t(u8, len, dev->rx_outstanding); + msgs[dev->msg_read_idx].len = len; + msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN; + + return len; +} + +static void i2c_phytium_read(struct phytium_i2c_dev *dev) +{ + struct i2c_msg *msgs = dev->msgs; + int rx_valid; + + for (; dev->msg_read_idx < dev->msgs_num; dev->msg_read_idx++) { + u32 len; + u8 *buf; + + if (!(msgs[dev->msg_read_idx].flags & I2C_M_RD)) + continue; + + if (!(dev->status & STATUS_READ_IN_PROGRESS)) { + len = msgs[dev->msg_read_idx].len; + buf = msgs[dev->msg_read_idx].buf; + } else { + len = dev->rx_buf_len; + buf = dev->rx_buf; + } + + rx_valid = phytium_readl(dev, IC_RXFLR); + + for (; len > 0 && rx_valid > 0; len--, rx_valid--) { + u32 flags = msgs[dev->msg_read_idx].flags; + + *buf = phytium_readl(dev, IC_DATA_CMD); + /* Ensure length byte is a valid value */ + if (flags & I2C_M_RECV_LEN && + *buf <= I2C_SMBUS_BLOCK_MAX && *buf > 0) { + len = i2c_phytium_recv_len(dev, *buf); + } + buf++; + dev->rx_outstanding--; + } + + if (len > 0) { + dev->status |= STATUS_READ_IN_PROGRESS; + dev->rx_buf_len = len; + dev->rx_buf = buf; + return; + } + + dev->status &= ~STATUS_READ_IN_PROGRESS; + } +} + +static int i2c_phytium_xfer(struct i2c_adapter *adapter, + struct i2c_msg msgs[], int num) +{ + struct phytium_i2c_dev *dev = i2c_get_adapdata(adapter); + int ret; + + dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num); + + pm_runtime_get_sync(dev->dev); + + reinit_completion(&dev->cmd_complete); + dev->msgs = msgs; + dev->msgs_num = num; + dev->cmd_err = 0; + dev->msg_write_idx = 0; + dev->msg_read_idx = 0; + dev->msg_err = 0; + dev->status = STATUS_IDLE; + dev->abort_source = 0; + dev->rx_outstanding = 0; + + ret = i2c_phytium_wait_bus_not_busy(dev); + if (ret < 0) + goto done; + + /* Start the transfers */ + i2c_phytium_xfer_init(dev); + + /* Wait for tx to complete */ + if (!wait_for_completion_timeout + (&dev->cmd_complete, adapter->timeout)) { + dev_err(dev->dev, "controller timed out\n"); + i2c_recover_bus(&dev->adapter); + i2c_phytium_init_master(dev); + ret = -ETIMEDOUT; + goto done; + } + + __i2c_phytium_disable_nowait(dev); + + if (dev->msg_err) { + ret = dev->msg_err; + goto done; + } + + if (likely(!dev->cmd_err && !dev->status)) { + ret = num; + goto done; + } + + /* We have got an error */ + if (dev->cmd_err == IC_ERR_TX_ABRT) { + ret = i2c_phytium_handle_tx_abort(dev); + goto done; + } + + if (dev->status) + dev_err(dev->dev, "transfer terminated early.\n"); + + ret = -EIO; + +done: + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); + + return ret; +} + +static const struct i2c_algorithm i2c_phytium_algo = { + .master_xfer = i2c_phytium_xfer, + .functionality = i2c_phytium_func, +}; + +static const struct i2c_adapter_quirks i2c_phytium_quirks = { + .flags = I2C_AQ_NO_ZERO_LEN, +}; + +static u32 i2c_phytium_read_clear_intrbits(struct phytium_i2c_dev *dev) +{ + u32 stat; + + stat = phytium_readl(dev, IC_INTR_STAT); + + if (stat & IC_INTR_RX_UNDER) + phytium_readl(dev, IC_CLR_RX_UNDER); + if (stat & IC_INTR_RX_OVER) + phytium_readl(dev, IC_CLR_RX_OVER); + if (stat & IC_INTR_TX_OVER) + phytium_readl(dev, IC_CLR_TX_OVER); + if (stat & IC_INTR_RD_REQ) + phytium_readl(dev, IC_CLR_RD_REQ); + if (stat & IC_INTR_TX_ABRT) { + dev->abort_source = phytium_readl(dev, IC_TX_ABRT_SOURCE); + phytium_readl(dev, IC_CLR_TX_ABRT); + } + if (stat & IC_INTR_RX_DONE) + phytium_readl(dev, IC_CLR_RX_DONE); + if (stat & IC_INTR_ACTIVITY) + phytium_readl(dev, IC_CLR_ACTIVITY); + if (stat & IC_INTR_STOP_DET) + phytium_readl(dev, IC_CLR_STOP_DET); + if (stat & IC_INTR_START_DET) + phytium_readl(dev, IC_CLR_START_DET); + if (stat & IC_INTR_GEN_CALL) + phytium_readl(dev, IC_CLR_GEN_CALL); + if (stat & IC_INTR_SMBCLK_EXT_LOW_TIMEOUT) + phytium_readl(dev, IC_CLR_SMBCLK_EXT_LOW_TIMEOUT); + if (stat & IC_INTR_SMBCLK_TMO_LOW_TIMEOUT) + phytium_readl(dev, IC_CLR_SMBCLK_TMO_LOW_TIMEOUT); + if (stat & IC_INTR_SMBSDA_LOW_TIMEOUT) + phytium_readl(dev, IC_CLR_SMBDAT_LOW_TIMEOUT); + if (stat & IC_INTR_SMBALERT_IN_N) + phytium_readl(dev, IC_CLR_SMBALERT_IN_N); + + return stat; +} + +static int i2c_phytium_irq_handler_master(struct phytium_i2c_dev *dev) +{ + u32 stat; + + stat = i2c_phytium_read_clear_intrbits(dev); + + /* SMBus interrupt */ + if (stat & (IC_INTR_SMBCLK_EXT_LOW_TIMEOUT | + IC_INTR_SMBCLK_TMO_LOW_TIMEOUT)) { + phytium_writel(dev, phytium_readl(dev, IC_ENABLE) & (~BIT(6)), + IC_ENABLE); + phytium_writel(dev, phytium_readl(dev, IC_ENABLE) | BIT(4), + IC_ENABLE); + goto abort; + } + + if (stat & IC_INTR_SMBSDA_LOW_TIMEOUT) { + phytium_writel(dev, phytium_readl(dev, IC_ENABLE) | BIT(6), + IC_ENABLE); + goto abort; + } + + if (stat & IC_INTR_SMBALERT_IN_N && dev->ara) + i2c_handle_smbus_alert(dev->ara); + + if (stat & IC_INTR_TX_ABRT) { + dev->cmd_err |= IC_ERR_TX_ABRT; + dev->status = STATUS_IDLE; + + /* Anytime TX_ABRT is set, the contents of the tx/rx + * buffers are flushed. Make sure to skip them. + */ + phytium_writel(dev, 0, IC_INTR_MASK); + goto abort; + } + + if (stat & IC_INTR_RX_FULL) + i2c_phytium_read(dev); + + if (stat & IC_INTR_TX_EMPTY) + i2c_phytium_xfer_msg(dev); + +abort: + if ((stat & (IC_INTR_TX_ABRT | IC_INTR_STOP_DET)) || + dev->msg_err) + complete(&dev->cmd_complete); + else if (unlikely(dev->flags & ACCESS_INTR_MASK)) { + /* Workaround to trigger pending interrupt */ + stat = phytium_readl(dev, IC_INTR_MASK); + i2c_phytium_disable_int(dev); + phytium_writel(dev, stat, IC_INTR_MASK); + } + + return 0; +} + +static int i2c_phytium_set_timings_master(struct phytium_i2c_dev *dev) +{ + const char *mode_str, *fp_str = ""; + u32 sda_falling_time, scl_falling_time; + struct i2c_timings *t = &dev->timings; + u32 ic_clk; + int ret; + + /* Set standard and fast speed dividers for high/low periods */ + sda_falling_time = t->sda_fall_ns ?: 300; /* ns */ + scl_falling_time = t->scl_fall_ns ?: 300; /* ns */ + + /* Calculate SCL timing parameters for standard mode if not set */ + if (!dev->ss_hcnt || !dev->ss_lcnt) { + ic_clk = i2c_phytium_clk_rate(dev); + dev->ss_hcnt = + i2c_phytium_scl_hcnt(ic_clk, + 4000, /* tHD;STA = tHIGH = 4.0 us */ + sda_falling_time, + 0, /* 0: DW default, 1: Ideal */ + 0); /* No offset */ + dev->ss_lcnt = + i2c_phytium_scl_lcnt(ic_clk, + 4700, /* tLOW = 4.7 us */ + scl_falling_time, + 0); /* No offset */ + } + dev_dbg(dev->dev, "Standard Mode HCNT:LCNT = %d:%d\n", + dev->ss_hcnt, dev->ss_lcnt); + /* + * Set SCL timing parameters for fast mode or fast mode plus. Only + * difference is the timing parameter values since the registers are + * the same. + */ + if (t->bus_freq_hz == 1000000) { + /* + * Check are fast mode plus parameters available and use + * fast mode if not. + */ + if (dev->fp_hcnt && dev->fp_lcnt) { + dev->fs_hcnt = dev->fp_hcnt; + dev->fs_lcnt = dev->fp_lcnt; + fp_str = " Plus"; + } + } + /* + * Calculate SCL timing parameters for fast mode if not set. They are + * needed also in high speed mode. + */ + if (!dev->fs_hcnt || !dev->fs_lcnt) { + ic_clk = i2c_phytium_clk_rate(dev); + dev->fs_hcnt = + i2c_phytium_scl_hcnt(ic_clk, + 600, /* tHD;STA = tHIGH = 0.6 us */ + sda_falling_time, + 0, /* 0: DW default, 1: Ideal */ + 0); /* No offset */ + dev->fs_lcnt = + i2c_phytium_scl_lcnt(ic_clk, + 1300, /* tLOW = 1.3 us */ + scl_falling_time, + 0); /* No offset */ + } + dev_dbg(dev->dev, "Fast Mode%s HCNT:LCNT = %d:%d\n", + fp_str, dev->fs_hcnt, dev->fs_lcnt); + + if (dev->hs_hcnt && dev->hs_lcnt) + dev_dbg(dev->dev, "High Speed Mode HCNT:LCNT = %d:%d\n", + dev->hs_hcnt, dev->hs_lcnt); + + ret = i2c_phytium_set_sda_hold(dev); + if (ret) + goto out; + + switch (dev->master_cfg & IC_CON_SPEED_MASK) { + case IC_CON_SPEED_STD: + mode_str = "Standard Mode"; + break; + case IC_CON_SPEED_HIGH: + mode_str = "High Speed Mode"; + break; + default: + mode_str = "Fast Mode"; + } + dev_dbg(dev->dev, "Bus speed: %s%s\n", mode_str, fp_str); + +out: + return ret; +} + +static irqreturn_t i2c_phytium_isr(int this_irq, void *dev_id) +{ + struct phytium_i2c_dev *dev = dev_id; + u32 stat, enabled; + + enabled = phytium_readl(dev, IC_ENABLE); + stat = phytium_readl(dev, IC_RAW_INTR_STAT); + if (!enabled || !(stat & ~IC_INTR_ACTIVITY)) + return IRQ_NONE; + + i2c_phytium_irq_handler_master(dev); + + return IRQ_HANDLED; +} + +int i2c_phytium_probe(struct phytium_i2c_dev *dev) +{ + struct i2c_adapter *adapter = &dev->adapter; + unsigned long irq_flags; + int ret; + + init_completion(&dev->cmd_complete); + + dev->init = i2c_phytium_init_master; + dev->disable = i2c_phytium_disable; + dev->disable_int = i2c_phytium_disable_int; + + ret = i2c_phytium_set_timings_master(dev); + if (ret) + return ret; + + ret = dev->init(dev); + if (ret) + return ret; + + /* XXX: should be initialized in firmware, remove it in future */ +#define DEFAULT_TIMEOUT (DEFAULT_CLOCK_FREQUENCY / 1000 * 35) + phytium_writel(dev, DEFAULT_TIMEOUT, IC_SMBCLK_LOW_MEXT); + phytium_writel(dev, DEFAULT_TIMEOUT, IC_SMBCLK_LOW_TIMEOUT); + phytium_writel(dev, DEFAULT_TIMEOUT, IC_SMBDAT_STUCK_TIMEOUT); + + snprintf(adapter->name, sizeof(adapter->name), "Phytium I2C adapter"); + adapter->retries = 3; + adapter->algo = &i2c_phytium_algo; + adapter->quirks = &i2c_phytium_quirks; + adapter->dev.parent = dev->dev; + i2c_set_adapdata(adapter, dev); + + irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND; + + i2c_phytium_disable_int(dev); + ret = devm_request_irq(dev->dev, dev->irq, i2c_phytium_isr, irq_flags, + dev_name(dev->dev), dev); + if (ret) { + dev_err(dev->dev, "failed to request irq %i: %d\n", + dev->irq, ret); + return ret; + } + + /* + * Increment PM usage count during adapter registration in order to + * avoid possible spurious runtime suspend when adapter device is + * registered to the device core and immediate resume in case bus has + * registered I2C slaves that do I2C transfers in their probe. + */ + pm_runtime_get_noresume(dev->dev); + ret = i2c_add_numbered_adapter(adapter); + if (ret) + dev_err(dev->dev, "fail to add adapter: %d\n", ret); + pm_runtime_put_noidle(dev->dev); + + return ret; +} +EXPORT_SYMBOL_GPL(i2c_phytium_probe); + +MODULE_DESCRIPTION("Phytium I2C bus master adapter"); +MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-phytium-pci.c b/drivers/i2c/busses/i2c-phytium-pci.c new file mode 100644 index 0000000000000000000000000000000000000000..426f6eb17bce750701df548763c6475e559b0f4c --- /dev/null +++ b/drivers/i2c/busses/i2c-phytium-pci.c @@ -0,0 +1,247 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * PCI driver for Phytium I2C adapter. + * + * Derived from Synopysys I2C driver. + * Copyright (C) 2006 Texas Instruments. + * Copyright (C) 2007 MontaVista Software Inc. + * Copyright (C) 2009 Provigent Ltd. + * Copyright (C) 2011, 2015, 2016 Intel Corporation. + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "i2c-phytium-core.h" + +#define DRV_NAME "i2c-phytium-pci" + +enum phytium_pci_ctl_id_t { + octopus_i2c, +}; + +struct scl_sda_cfg { + u32 ss_hcnt; + u32 fs_hcnt; + u32 ss_lcnt; + u32 fs_lcnt; + u32 sda_hold; +}; + +struct phytium_pci_i2c { + u32 bus_num; + u32 bus_cfg; + u32 tx_fifo_depth; + u32 rx_fifo_depth; + u32 clk_khz; + u32 functionality; + u32 flags; + struct scl_sda_cfg *scl_sda_cfg; + int (*setup)(struct pci_dev *pdev, struct phytium_pci_i2c *c); +}; + +/* Octopus HCNT/LCNT/SDA hold time */ +static struct scl_sda_cfg octopus_config = { + .ss_hcnt = 0x190, + .ss_lcnt = 0x1d6, + .fs_hcnt = 0x3c, + .fs_lcnt = 0x82, + .sda_hold = 0x0, // XXX +}; + +static int octopus_setup(struct pci_dev *pdev, struct phytium_pci_i2c *c) +{ + struct phytium_i2c_dev *i2c = pci_get_drvdata(pdev); + + if (pdev->device == 0xdc32) { + /* + * Since we have already register the adapter, the dev->irq + * must be valid. + */ + i2c->alert_data.irq = i2c->irq; + + i2c->ara = i2c_setup_smbus_alert(&i2c->adapter, + &i2c->alert_data); + if (!i2c->ara) + return -ENODEV; + } + + return 0; +} + +static struct phytium_pci_i2c pci_ctrl_info[] = { + [octopus_i2c] = { + .bus_num = -1, + .bus_cfg = IC_CON_MASTER | IC_CON_SLAVE_DISABLE | + IC_CON_RESTART_EN | IC_CON_SPEED_FAST, + .tx_fifo_depth = 7, + .rx_fifo_depth = 7, + .functionality = I2C_FUNC_10BIT_ADDR, + .clk_khz = 48000000, + .scl_sda_cfg = &octopus_config, + .setup = octopus_setup, + }, +}; + +#ifdef CONFIG_PM +static int i2c_phytium_pci_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct phytium_i2c_dev *i_dev = pci_get_drvdata(pdev); + + i_dev->disable(i_dev); + + return 0; +} + +static int i2c_phytium_pci_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct phytium_i2c_dev *i_dev = pci_get_drvdata(pdev); + + return i_dev->init(i_dev); +} +#endif + +static UNIVERSAL_DEV_PM_OPS(i2c_phytium_pm_ops, i2c_phytium_pci_suspend, + i2c_phytium_pci_resume, NULL); + +static u32 i2c_phytium_get_clk_rate_khz(struct phytium_i2c_dev *dev) +{ + return dev->controller->clk_khz; +} + +static int i2c_phytium_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct phytium_i2c_dev *dev; + struct i2c_adapter *adapter; + struct phytium_pci_i2c *controller; + struct scl_sda_cfg *cfg; + int ret; + + if (id->driver_data >= ARRAY_SIZE(pci_ctrl_info)) { + dev_err(&pdev->dev, "%s: invalid driver data %ld\n", __func__, + id->driver_data); + ret = -EINVAL; + goto out; + } + + controller = &pci_ctrl_info[id->driver_data]; + + ret = pcim_enable_device(pdev); + if (ret) { + dev_err(&pdev->dev, + "Failed to enable I2C PCI device (%d)\n", ret); + goto out; + } + + ret = pcim_iomap_regions(pdev, 0x1, pci_name(pdev)); + if (ret) { + dev_err(&pdev->dev, "I/O memory remapping failed\n"); + goto out; + } + + dev = devm_kzalloc(&pdev->dev, + sizeof(struct phytium_i2c_dev), GFP_KERNEL); + if (!dev) { + ret = -ENOMEM; + goto out; + } + + dev->controller = controller; + dev->get_clk_rate_khz = i2c_phytium_get_clk_rate_khz; + dev->base = pcim_iomap_table(pdev)[0]; + dev->dev = &pdev->dev; + dev->irq = pdev->irq; + dev->flags |= controller->flags; + + dev->functionality = controller->functionality | + IC_DEFAULT_FUNCTIONALITY; + dev->master_cfg = controller->bus_cfg; + if (controller->scl_sda_cfg) { + cfg = controller->scl_sda_cfg; + dev->ss_hcnt = cfg->ss_hcnt; + dev->fs_hcnt = cfg->fs_hcnt; + dev->ss_lcnt = cfg->ss_lcnt; + dev->fs_lcnt = cfg->fs_lcnt; + dev->sda_hold_time = cfg->sda_hold; + } + + pci_set_drvdata(pdev, dev); + + dev->tx_fifo_depth = controller->tx_fifo_depth; + dev->rx_fifo_depth = controller->rx_fifo_depth; + + adapter = &dev->adapter; + adapter->owner = THIS_MODULE; + adapter->class = 0; + ACPI_COMPANION_SET(&adapter->dev, ACPI_COMPANION(&pdev->dev)); + adapter->nr = controller->bus_num; + + ret = i2c_phytium_probe(dev); + if (ret) + goto out; + + if (controller->setup) { + ret = controller->setup(pdev, controller); + if (ret) + goto out; + } + + pm_runtime_set_autosuspend_delay(&pdev->dev, 1000); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); + pm_runtime_allow(&pdev->dev); + +out: + return ret; +} + +static void i2c_phytium_pci_remove(struct pci_dev *pdev) +{ + struct phytium_i2c_dev *dev = pci_get_drvdata(pdev); + + dev->disable(dev); + pm_runtime_forbid(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); + + i2c_del_adapter(&dev->adapter); +} + +static const struct pci_device_id i2_phytium_pci_ids[] = { + { PCI_VDEVICE(PHYTIUM, 0xdc32), octopus_i2c }, + { PCI_VDEVICE(PHYTIUM, 0xdc30), octopus_i2c }, + { } +}; +MODULE_DEVICE_TABLE(pci, i2_phytium_pci_ids); + +static struct pci_driver phytium_i2c_driver = { + .name = DRV_NAME, + .id_table = i2_phytium_pci_ids, + .probe = i2c_phytium_pci_probe, + .remove = i2c_phytium_pci_remove, + .driver = { + .pm = &i2c_phytium_pm_ops, + }, +}; + +module_pci_driver(phytium_i2c_driver); + +MODULE_ALIAS("i2c-phytium-pci"); +MODULE_AUTHOR("Cheng Quan "); +MODULE_DESCRIPTION("Phytium PCI I2C bus adapter"); +MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-phytium-platform.c b/drivers/i2c/busses/i2c-phytium-platform.c new file mode 100644 index 0000000000000000000000000000000000000000..68f022d71767e965f48df744d30061643cf30d1e --- /dev/null +++ b/drivers/i2c/busses/i2c-phytium-platform.c @@ -0,0 +1,374 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Phytium I2C adapter driver. + * + * Derived from Synopysys I2C driver. + * Copyright (C) 2006 Texas Instruments. + * Copyright (C) 2007 MontaVista Software Inc. + * Copyright (C) 2009 Provigent Ltd. + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "i2c-phytium-core.h" + +#define DRV_NAME "i2c-phytium-platform" + +static u32 i2c_phytium_get_clk_rate_khz(struct phytium_i2c_dev *dev) +{ + return clk_get_rate(dev->clk)/1000; +} + +#ifdef CONFIG_ACPI +static void phytium_i2c_acpi_params(struct platform_device *pdev, char method[], + u16 *hcnt, u16 *lcnt, u32 *sda_hold) +{ + struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER }; + acpi_handle handle = ACPI_HANDLE(&pdev->dev); + union acpi_object *obj; + + if (ACPI_FAILURE(acpi_evaluate_object(handle, method, NULL, &buf))) + return; + + obj = (union acpi_object *)buf.pointer; + if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 3) { + const union acpi_object *objs = obj->package.elements; + + *hcnt = (u16)objs[0].integer.value; + *lcnt = (u16)objs[1].integer.value; + *sda_hold = (u32)objs[2].integer.value; + } + + kfree(buf.pointer); +} + +static int phytium_i2c_acpi_configure(struct platform_device *pdev) +{ + struct phytium_i2c_dev *dev = platform_get_drvdata(pdev); + struct i2c_timings *t = &dev->timings; + u32 ss_ht = 0, fp_ht = 0, hs_ht = 0, fs_ht = 0; + acpi_handle handle = ACPI_HANDLE(&pdev->dev); + const struct acpi_device_id *id; + struct acpi_device *adev; + + dev->adapter.nr = -1; + dev->tx_fifo_depth = 32; + dev->rx_fifo_depth = 32; + + /* + * Try to get SDA hold time and *CNT values from an ACPI method for + * selected speed modes. + */ + phytium_i2c_acpi_params(pdev, "SSCN", &dev->ss_hcnt, + &dev->ss_lcnt, &ss_ht); + phytium_i2c_acpi_params(pdev, "FPCN", &dev->fp_hcnt, + &dev->fp_lcnt, &fp_ht); + phytium_i2c_acpi_params(pdev, "HSCN", &dev->hs_hcnt, + &dev->hs_lcnt, &hs_ht); + phytium_i2c_acpi_params(pdev, "FMCN", &dev->fs_hcnt, + &dev->fs_lcnt, &fs_ht); + + switch (t->bus_freq_hz) { + case 100000: + dev->sda_hold_time = ss_ht; + break; + case 1000000: + dev->sda_hold_time = fp_ht; + break; + case 3400000: + dev->sda_hold_time = hs_ht; + break; + case 400000: + default: + dev->sda_hold_time = fs_ht; + break; + } + + id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev); + if (id && id->driver_data) + dev->flags |= (u32)id->driver_data; + + if (acpi_bus_get_device(handle, &adev)) + return -ENODEV; + + return 0; +} + +static const struct acpi_device_id phytium_i2c_acpi_match[] = { + { "PHYT0038", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, phytium_i2c_acpi_match); +#else +static inline int phytium_i2c_acpi_configure(struct platform_device *pdev) +{ + return -ENODEV; +} +#endif + +static void i2c_phytium_configure_master(struct phytium_i2c_dev *dev) +{ + struct i2c_timings *t = &dev->timings; + + dev->functionality = I2C_FUNC_10BIT_ADDR | IC_DEFAULT_FUNCTIONALITY; + + dev->master_cfg = IC_CON_MASTER | IC_CON_SLAVE_DISABLE | + IC_CON_RESTART_EN; + + dev->mode = PHYTIUM_IC_MASTER; + + switch (t->bus_freq_hz) { + case 100000: + dev->master_cfg |= IC_CON_SPEED_STD; + break; + case 3400000: + dev->master_cfg |= IC_CON_SPEED_HIGH; + break; + default: + dev->master_cfg |= IC_CON_SPEED_FAST; + } +} + +static void i2c_phytium_configure_slave(struct phytium_i2c_dev *dev) +{ + dev->functionality = I2C_FUNC_SLAVE | IC_DEFAULT_FUNCTIONALITY; + + dev->slave_cfg = IC_CON_RX_FIFO_FULL_HLD_CTRL | + IC_CON_RESTART_EN | IC_CON_STOP_DET_IFADDRESSED; + + dev->mode = PHYTIUM_IC_SLAVE; +} + +static int phytium_i2c_plat_probe(struct platform_device *pdev) +{ + struct i2c_adapter *adap; + struct phytium_i2c_dev *dev; + struct i2c_timings *t; + u32 acpi_speed; + struct resource *mem; + int irq, ret, i; + static const int supported_speeds[] = { + 0, 100000, 400000, 1000000, 3400000 + }; + + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + dev = devm_kzalloc(&pdev->dev, + sizeof(struct phytium_i2c_dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + dev->base = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(dev->base)) + return PTR_ERR(dev->base); + + dev->dev = &pdev->dev; + dev->irq = irq; + platform_set_drvdata(pdev, dev); + + dev->rst = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL); + if (IS_ERR(dev->rst)) { + if (PTR_ERR(dev->rst) == -EPROBE_DEFER) + return -EPROBE_DEFER; + } else { + reset_control_deassert(dev->rst); + } + + t = &dev->timings; + i2c_parse_fw_timings(&pdev->dev, t, false); + + acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev); + /* + * Some DSTDs use a non standard speed, round down to the lowest + * standard speed. + */ + for (i = 1; i < ARRAY_SIZE(supported_speeds); i++) { + if (acpi_speed < supported_speeds[i]) + break; + } + acpi_speed = supported_speeds[i - 1]; + + /* + * Find bus speed from the "clock-frequency" device property, ACPI + * or by using fast mode if neither is set. + */ + if (acpi_speed && t->bus_freq_hz) + t->bus_freq_hz = min(t->bus_freq_hz, acpi_speed); + else if (acpi_speed || t->bus_freq_hz) + t->bus_freq_hz = max(t->bus_freq_hz, acpi_speed); + else + t->bus_freq_hz = 400000; + + if (has_acpi_companion(&pdev->dev)) + phytium_i2c_acpi_configure(pdev); + + /* + * Only standard mode at 100kHz, fast mode at 400kHz, + * fast mode plus at 1MHz and high speed mode at 3.4MHz are supported. + */ + if (t->bus_freq_hz != 100000 && t->bus_freq_hz != 400000 && + t->bus_freq_hz != 1000000 && t->bus_freq_hz != 3400000) { + dev_err(&pdev->dev, + "%d Hz is unsupported, only 100kHz, 400kHz, 1MHz and 3.4MHz are supported\n", + t->bus_freq_hz); + ret = -EINVAL; + goto exit_reset; + } + + if (i2c_detect_slave_mode(&pdev->dev)) + i2c_phytium_configure_slave(dev); + else + i2c_phytium_configure_master(dev); + + dev->clk = devm_clk_get(&pdev->dev, NULL); + if (!i2c_phytium_prepare_clk(dev, true)) { + u64 clk_khz; + + dev->get_clk_rate_khz = i2c_phytium_get_clk_rate_khz; + clk_khz = dev->get_clk_rate_khz(dev); + + if (!dev->sda_hold_time && t->sda_hold_ns) + dev->sda_hold_time = + div_u64(clk_khz * t->sda_hold_ns + 500000, 1000000); + } + + dev->tx_fifo_depth = 7; + dev->rx_fifo_depth = 7; + dev->adapter.nr = pdev->id; + + adap = &dev->adapter; + adap->owner = THIS_MODULE; + adap->class = I2C_CLASS_DEPRECATED; + ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev)); + adap->dev.of_node = pdev->dev.of_node; + + dev_pm_set_driver_flags(&pdev->dev, + DPM_FLAG_SMART_PREPARE | + DPM_FLAG_SMART_SUSPEND | + DPM_FLAG_LEAVE_SUSPENDED); + + /* The code below assumes runtime PM to be disabled. */ + WARN_ON(pm_runtime_enabled(&pdev->dev)); + + pm_runtime_set_autosuspend_delay(&pdev->dev, 1000); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + + pm_runtime_enable(&pdev->dev); + + if (dev->mode == PHYTIUM_IC_SLAVE) + ret = i2c_phytium_probe_slave(dev); + else + ret = i2c_phytium_probe(dev); + + if (ret) + goto exit_probe; + + return ret; + +exit_probe: + pm_runtime_disable(dev->dev); +exit_reset: + if (!IS_ERR_OR_NULL(dev->rst)) + reset_control_assert(dev->rst); + return ret; +} + +static int phytium_i2c_plat_remove(struct platform_device *pdev) +{ + struct phytium_i2c_dev *dev = platform_get_drvdata(pdev); + + pm_runtime_get_sync(&pdev->dev); + + i2c_del_adapter(&dev->adapter); + + dev->disable(dev); + + pm_runtime_dont_use_autosuspend(&pdev->dev); + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(dev->dev); + + if (!IS_ERR_OR_NULL(dev->rst)) + reset_control_assert(dev->rst); + + return 0; +} + +#ifdef CONFIG_OF +static const struct of_device_id phytium_i2c_of_match[] = { + { .compatible = "phytium,i2c", }, + {}, +}; +MODULE_DEVICE_TABLE(of, phytium_i2c_of_match); +#endif + +static int __maybe_unused phytium_i2c_plat_suspend(struct device *dev) +{ + struct phytium_i2c_dev *idev = dev_get_drvdata(dev); + + idev->disable(idev); + i2c_phytium_prepare_clk(idev, false); + + return 0; +} + +static int __maybe_unused phytium_i2c_plat_resume(struct device *dev) +{ + struct phytium_i2c_dev *idev = dev_get_drvdata(dev); + + i2c_phytium_prepare_clk(idev, true); + + idev->init(idev); + + return 0; +} + +static const struct dev_pm_ops phytium_i2c_dev_pm_ops = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(phytium_i2c_plat_suspend, + phytium_i2c_plat_resume) + SET_RUNTIME_PM_OPS(phytium_i2c_plat_suspend, + phytium_i2c_plat_resume, NULL) +}; + +static struct platform_driver phytium_i2c_driver = { + .probe = phytium_i2c_plat_probe, + .remove = phytium_i2c_plat_remove, + .driver = { + .name = DRV_NAME, + .of_match_table = of_match_ptr(phytium_i2c_of_match), + .acpi_match_table = ACPI_PTR(phytium_i2c_acpi_match), + .pm = &phytium_i2c_dev_pm_ops, + }, +}; +module_platform_driver(phytium_i2c_driver); + +MODULE_ALIAS("platform:i2c-phytium"); +MODULE_AUTHOR("Chen Baozi "); +MODULE_DESCRIPTION("Phytium I2C bus adapter"); +MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-phytium-slave.c b/drivers/i2c/busses/i2c-phytium-slave.c new file mode 100644 index 0000000000000000000000000000000000000000..eaadbd03dddd7dbcf26ff864e9b1b7c610bee198 --- /dev/null +++ b/drivers/i2c/busses/i2c-phytium-slave.c @@ -0,0 +1,266 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium I2C adapter driver (slave only). + * + * Derived from Synopysys I2C driver. + * Copyright (C) 2016 Synopsys Inc. + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "i2c-phytium-core.h" + +static void i2c_phytium_configure_fifo_slave(struct phytium_i2c_dev *dev) +{ + /* Configure Tx/Rx FIFO threshold levels. */ + phytium_writel(dev, 0, IC_TX_TL); + phytium_writel(dev, 0, IC_RX_TL); + + /* Configure the I2C slave. */ + phytium_writel(dev, dev->slave_cfg, IC_CON); + phytium_writel(dev, IC_INTR_SLAVE_MASK, IC_INTR_MASK); +} + +static int i2c_phytium_init_slave(struct phytium_i2c_dev *dev) +{ + /* Disable the adapter. */ + __i2c_phytium_disable(dev); + + /* Write SDA hold time if supported */ + if (dev->sda_hold_time) + phytium_writel(dev, dev->sda_hold_time, IC_SDA_HOLD); + + i2c_phytium_configure_fifo_slave(dev); + + return 0; +} + +static int i2c_phytium_reg_slave(struct i2c_client *slave) +{ + struct phytium_i2c_dev *dev = i2c_get_adapdata(slave->adapter); + + if (dev->slave) + return -EBUSY; + if (slave->flags & I2C_CLIENT_TEN) + return -EAFNOSUPPORT; + pm_runtime_get_sync(dev->dev); + + /* + * Set slave address in the IC_SAR register, + * the address to which the i2c responds. + */ + __i2c_phytium_disable_nowait(dev); + phytium_writel(dev, slave->addr, IC_SAR); + dev->slave = slave; + + __i2c_phytium_enable(dev); + + dev->cmd_err = 0; + dev->msg_write_idx = 0; + dev->msg_read_idx = 0; + dev->msg_err = 0; + dev->status = STATUS_IDLE; + dev->abort_source = 0; + dev->rx_outstanding = 0; + + return 0; +} + +static int i2c_phytium_unreg_slave(struct i2c_client *slave) +{ + struct phytium_i2c_dev *dev = i2c_get_adapdata(slave->adapter); + + dev->disable_int(dev); + dev->disable(dev); + dev->slave = NULL; + pm_runtime_put(dev->dev); + + return 0; +} + +static u32 i2c_phytium_read_clear_intrbits_slave(struct phytium_i2c_dev *dev) +{ + u32 stat; + + /* + * The IC_INTR_STAT register just indicates "enabled" interrupts. + * Ths unmasked raw version of interrupt status bits are available + * in the IC_RAW_INTR_STAT register. + * + * That is, + * stat = phytium_readl(IC_INTR_STAT); + * equals to, + * stat = phytium_readl(IC_RAW_INTR_STAT) & + * phytium_readl(IC_INTR_MASK); + * + * The raw version might be useful for debugging purposes. + */ + stat = phytium_readl(dev, IC_INTR_STAT); + + /* + * Do not use the IC_CLR_INTR register to clear interrupts, or + * you'll miss some interrupts, triggered during the period from + * phytium_readl(IC_INTR_STAT) to phytium_readl(IC_CLR_INTR). + * + * Instead, use the separately-prepared IC_CLR_* registers. + */ + if (stat & IC_INTR_TX_ABRT) + phytium_readl(dev, IC_CLR_TX_ABRT); + if (stat & IC_INTR_RX_UNDER) + phytium_readl(dev, IC_CLR_RX_UNDER); + if (stat & IC_INTR_RX_OVER) + phytium_readl(dev, IC_CLR_RX_OVER); + if (stat & IC_INTR_TX_OVER) + phytium_readl(dev, IC_CLR_TX_OVER); + if (stat & IC_INTR_RX_DONE) + phytium_readl(dev, IC_CLR_RX_DONE); + if (stat & IC_INTR_ACTIVITY) + phytium_readl(dev, IC_CLR_ACTIVITY); + if (stat & IC_INTR_STOP_DET) + phytium_readl(dev, IC_CLR_STOP_DET); + if (stat & IC_INTR_START_DET) + phytium_readl(dev, IC_CLR_START_DET); + if (stat & IC_INTR_GEN_CALL) + phytium_readl(dev, IC_CLR_GEN_CALL); + + return stat; +} + +/* + * Interrupt service routine. This gets called whenever an I2C slave interrupt + * occurs. + */ +static int i2c_phytium_irq_handler_slave(struct phytium_i2c_dev *dev) +{ + u32 raw_stat, st, enabled; + u8 val, slave_activity; + + st = phytium_readl(dev, IC_INTR_STAT); + enabled = phytium_readl(dev, IC_ENABLE); + raw_stat = phytium_readl(dev, IC_RAW_INTR_STAT); + slave_activity = ((phytium_readl(dev, IC_STATUS) & + IC_STATUS_SLAVE_ACTIVITY) >> 6); + + if (!enabled || !(raw_stat & ~IC_INTR_ACTIVITY) || !dev->slave) + return 0; + + dev_dbg(dev->dev, + "%#x STATUS SLAVE_ACT=%#x : RAW_INTR_ST=%#x : INTR_ST=%#x\n", + enabled, slave_activity, raw_stat, st); + + if ((st & IC_INTR_RX_FULL) && (st & IC_INTR_STOP_DET)) + i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_REQUESTED, &val); + + if (st & IC_INTR_RD_REQ) { + if (slave_activity) { + if (st & IC_INTR_RX_FULL) { + val = phytium_readl(dev, IC_DATA_CMD); + + if (!i2c_slave_event(dev->slave, + I2C_SLAVE_WRITE_RECEIVED, + &val)) { + dev_vdbg(dev->dev, "Byte %X acked!", + val); + } + phytium_readl(dev, IC_CLR_RD_REQ); + st = i2c_phytium_read_clear_intrbits_slave(dev); + } else { + phytium_readl(dev, IC_CLR_RD_REQ); + phytium_readl(dev, IC_CLR_RX_UNDER); + st = i2c_phytium_read_clear_intrbits_slave(dev); + } + if (!i2c_slave_event(dev->slave, + I2C_SLAVE_READ_REQUESTED, + &val)) + phytium_writel(dev, val, IC_DATA_CMD); + } + } + + if (st & IC_INTR_RX_DONE) { + if (!i2c_slave_event(dev->slave, I2C_SLAVE_READ_PROCESSED, + &val)) + phytium_readl(dev, IC_CLR_RX_DONE); + + i2c_slave_event(dev->slave, I2C_SLAVE_STOP, &val); + st = i2c_phytium_read_clear_intrbits_slave(dev); + return 1; + } + + if (st & IC_INTR_RX_FULL) { + val = phytium_readl(dev, IC_DATA_CMD); + if (!i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_RECEIVED, + &val)) + dev_vdbg(dev->dev, "Byte %X acked!", val); + } else { + i2c_slave_event(dev->slave, I2C_SLAVE_STOP, &val); + st = i2c_phytium_read_clear_intrbits_slave(dev); + } + + return 1; +} + +static irqreturn_t i2c_phytium_isr_slave(int this_irq, void *dev_id) +{ + struct phytium_i2c_dev *dev = dev_id; + int ret; + + i2c_phytium_read_clear_intrbits_slave(dev); + ret = i2c_phytium_irq_handler_slave(dev); + if (ret > 0) + complete(&dev->cmd_complete); + + return IRQ_RETVAL(ret); +} + +static const struct i2c_algorithm i2c_phytium_algo = { + .functionality = i2c_phytium_func, + .reg_slave = i2c_phytium_reg_slave, + .unreg_slave = i2c_phytium_unreg_slave, +}; + +int i2c_phytium_probe_slave(struct phytium_i2c_dev *dev) +{ + struct i2c_adapter *adap = &dev->adapter; + int ret; + + init_completion(&dev->cmd_complete); + + dev->init = i2c_phytium_init_slave; + dev->disable = i2c_phytium_disable; + dev->disable_int = i2c_phytium_disable_int; + + ret = dev->init(dev); + if (ret) + return ret; + + snprintf(adap->name, sizeof(adap->name), + "Synopsys DesignWare I2C Slave adapter"); + adap->retries = 3; + adap->algo = &i2c_phytium_algo; + adap->dev.parent = dev->dev; + i2c_set_adapdata(adap, dev); + + ret = devm_request_irq(dev->dev, dev->irq, i2c_phytium_isr_slave, + IRQF_SHARED, dev_name(dev->dev), dev); + if (ret) { + dev_err(dev->dev, "failure requesting irq %i: %d\n", + dev->irq, ret); + return ret; + } + + ret = i2c_add_numbered_adapter(adap); + if (ret) + dev_err(dev->dev, "failure adding adapter: %d\n", ret); + + return ret; +} +EXPORT_SYMBOL_GPL(i2c_phytium_probe_slave); diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index 90946a8b9a75a94a8eadc4d50071b166a9d44c81..d5feabe8bcbb8d3323507da0a128b17b85d88c84 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -19,6 +19,7 @@ Serverworks OSB4, CSB5, CSB6, HT-1000, HT-1100 ATI IXP200, IXP300, IXP400, SB600, SB700/SP5100, SB800 AMD Hudson-2, ML, CZ + Hygon CZ SMSC Victory66 Note: we assume there can only be one device, with one or more @@ -98,7 +99,7 @@ #define SB800_PIIX4_PORT_IDX_MASK 0x06 #define SB800_PIIX4_PORT_IDX_SHIFT 1 -/* On kerncz, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */ +/* On kerncz and Hudson2, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */ #define SB800_PIIX4_PORT_IDX_KERNCZ 0x02 #define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18 #define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3 @@ -289,7 +290,9 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev, PIIX4_dev->revision >= 0x41) || (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD && PIIX4_dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS && - PIIX4_dev->revision >= 0x49)) + PIIX4_dev->revision >= 0x49) || + (PIIX4_dev->vendor == PCI_VENDOR_ID_HYGON && + PIIX4_dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS)) smb_en = 0x00; else smb_en = (aux) ? 0x28 : 0x2c; @@ -361,19 +364,18 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev, piix4_smba, i2ccfg >> 4); /* Find which register is used for port selection */ - if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD) { - switch (PIIX4_dev->device) { - case PCI_DEVICE_ID_AMD_KERNCZ_SMBUS: + if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD || + PIIX4_dev->vendor == PCI_VENDOR_ID_HYGON) { + if (PIIX4_dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS || + (PIIX4_dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS && + PIIX4_dev->revision >= 0x1F)) { piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ; piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK_KERNCZ; piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ; - break; - case PCI_DEVICE_ID_AMD_HUDSON2_SMBUS: - default: + } else { piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT; piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK; piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT; - break; } } else { if (!request_muxed_region(SB800_PIIX4_SMB_IDX, 2, @@ -794,6 +796,7 @@ static const struct pci_device_id piix4_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_KERNCZ_SMBUS) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_KERNCZ_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4) }, { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, @@ -904,7 +907,8 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) if ((dev->vendor == PCI_VENDOR_ID_ATI && dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS && dev->revision >= 0x40) || - dev->vendor == PCI_VENDOR_ID_AMD) { + dev->vendor == PCI_VENDOR_ID_AMD || + dev->vendor == PCI_VENDOR_ID_HYGON) { bool notify_imc = false; is_sb800 = true; diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c index 6e0e546ef83fcabe595f6591867850b8730b2e14..4d09665a72e59b0c68fe0d1673dbaeaaf30e6587 100644 --- a/drivers/i2c/busses/i2c-pnx.c +++ b/drivers/i2c/busses/i2c-pnx.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include @@ -32,7 +31,6 @@ struct i2c_pnx_mif { int ret; /* Return value */ int mode; /* Interface mode */ struct completion complete; /* I/O completion */ - struct timer_list timer; /* Timeout */ u8 * buf; /* Data buffer */ int len; /* Length of data buffer */ int order; /* RX Bytes to order via TX */ @@ -117,24 +115,6 @@ static inline int wait_reset(struct i2c_pnx_algo_data *data) return (timeout <= 0); } -static inline void i2c_pnx_arm_timer(struct i2c_pnx_algo_data *alg_data) -{ - struct timer_list *timer = &alg_data->mif.timer; - unsigned long expires = msecs_to_jiffies(alg_data->timeout); - - if (expires <= 1) - expires = 2; - - del_timer_sync(timer); - - dev_dbg(&alg_data->adapter.dev, "Timer armed at %lu plus %lu jiffies.\n", - jiffies, expires); - - timer->expires = jiffies + expires; - - add_timer(timer); -} - /** * i2c_pnx_start - start a device * @slave_addr: slave address @@ -259,8 +239,6 @@ static int i2c_pnx_master_xmit(struct i2c_pnx_algo_data *alg_data) ~(mcntrl_afie | mcntrl_naie | mcntrl_drmie), I2C_REG_CTL(alg_data)); - del_timer_sync(&alg_data->mif.timer); - dev_dbg(&alg_data->adapter.dev, "%s(): Waking up xfer routine.\n", __func__); @@ -276,8 +254,6 @@ static int i2c_pnx_master_xmit(struct i2c_pnx_algo_data *alg_data) ~(mcntrl_afie | mcntrl_naie | mcntrl_drmie), I2C_REG_CTL(alg_data)); - /* Stop timer. */ - del_timer_sync(&alg_data->mif.timer); dev_dbg(&alg_data->adapter.dev, "%s(): Waking up xfer routine after zero-xfer.\n", __func__); @@ -364,8 +340,6 @@ static int i2c_pnx_master_rcv(struct i2c_pnx_algo_data *alg_data) mcntrl_drmie | mcntrl_daie); iowrite32(ctl, I2C_REG_CTL(alg_data)); - /* Kill timer. */ - del_timer_sync(&alg_data->mif.timer); complete(&alg_data->mif.complete); } } @@ -400,8 +374,6 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id) mcntrl_drmie); iowrite32(ctl, I2C_REG_CTL(alg_data)); - /* Stop timer, to prevent timeout. */ - del_timer_sync(&alg_data->mif.timer); complete(&alg_data->mif.complete); } else if (stat & mstatus_nai) { /* Slave did not acknowledge, generate a STOP */ @@ -419,8 +391,6 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id) /* Our return value. */ alg_data->mif.ret = -EIO; - /* Stop timer, to prevent timeout. */ - del_timer_sync(&alg_data->mif.timer); complete(&alg_data->mif.complete); } else { /* @@ -453,9 +423,8 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static void i2c_pnx_timeout(struct timer_list *t) +static void i2c_pnx_timeout(struct i2c_pnx_algo_data *alg_data) { - struct i2c_pnx_algo_data *alg_data = from_timer(alg_data, t, mif.timer); u32 ctl; dev_err(&alg_data->adapter.dev, @@ -472,7 +441,6 @@ static void i2c_pnx_timeout(struct timer_list *t) iowrite32(ctl, I2C_REG_CTL(alg_data)); wait_reset(alg_data); alg_data->mif.ret = -EIO; - complete(&alg_data->mif.complete); } static inline void bus_reset_if_active(struct i2c_pnx_algo_data *alg_data) @@ -514,6 +482,7 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) struct i2c_msg *pmsg; int rc = 0, completed = 0, i; struct i2c_pnx_algo_data *alg_data = adap->algo_data; + unsigned long time_left; u32 stat; dev_dbg(&alg_data->adapter.dev, @@ -548,7 +517,6 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) dev_dbg(&alg_data->adapter.dev, "%s(): mode %d, %d bytes\n", __func__, alg_data->mif.mode, alg_data->mif.len); - i2c_pnx_arm_timer(alg_data); /* initialize the completion var */ init_completion(&alg_data->mif.complete); @@ -564,7 +532,10 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) break; /* Wait for completion */ - wait_for_completion(&alg_data->mif.complete); + time_left = wait_for_completion_timeout(&alg_data->mif.complete, + alg_data->timeout); + if (time_left == 0) + i2c_pnx_timeout(alg_data); if (!(rc = alg_data->mif.ret)) completed++; @@ -657,7 +628,10 @@ static int i2c_pnx_probe(struct platform_device *pdev) alg_data->adapter.algo_data = alg_data; alg_data->adapter.nr = pdev->id; - alg_data->timeout = I2C_PNX_TIMEOUT_DEFAULT; + alg_data->timeout = msecs_to_jiffies(I2C_PNX_TIMEOUT_DEFAULT); + if (alg_data->timeout <= 1) + alg_data->timeout = 2; + #ifdef CONFIG_OF alg_data->adapter.dev.of_node = of_node_get(pdev->dev.of_node); if (pdev->dev.of_node) { @@ -677,8 +651,6 @@ static int i2c_pnx_probe(struct platform_device *pdev) if (IS_ERR(alg_data->clk)) return PTR_ERR(alg_data->clk); - timer_setup(&alg_data->mif.timer, i2c_pnx_timeout, 0); - snprintf(alg_data->adapter.name, sizeof(alg_data->adapter.name), "%s", pdev->name); diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c index 9f2eb02481d348c60512c69c1b58e375c698f36c..d7329177b0ea90d8e4366e431f466309755d03a0 100644 --- a/drivers/i2c/busses/i2c-qcom-geni.c +++ b/drivers/i2c/busses/i2c-qcom-geni.c @@ -590,18 +590,19 @@ static int geni_i2c_probe(struct platform_device *pdev) dev_dbg(&pdev->dev, "i2c fifo/se-dma mode. fifo depth:%d\n", tx_depth); - ret = i2c_add_adapter(&gi2c->adap); - if (ret) { - dev_err(&pdev->dev, "Error adding i2c adapter %d\n", ret); - return ret; - } - gi2c->suspended = 1; pm_runtime_set_suspended(gi2c->se.dev); pm_runtime_set_autosuspend_delay(gi2c->se.dev, I2C_AUTO_SUSPEND_DELAY); pm_runtime_use_autosuspend(gi2c->se.dev); pm_runtime_enable(gi2c->se.dev); + ret = i2c_add_adapter(&gi2c->adap); + if (ret) { + dev_err(&pdev->dev, "Error adding i2c adapter %d\n", ret); + pm_runtime_disable(gi2c->se.dev); + return ret; + } + return 0; } @@ -609,8 +610,8 @@ static int geni_i2c_remove(struct platform_device *pdev) { struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev); - pm_runtime_disable(gi2c->se.dev); i2c_del_adapter(&gi2c->adap); + pm_runtime_disable(gi2c->se.dev); return 0; } diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c index c86c3ae1318f200696f909f7563c8a114ee30995..e09cd0775ae91c60e052b22a748d870dfe037f7a 100644 --- a/drivers/i2c/busses/i2c-qup.c +++ b/drivers/i2c/busses/i2c-qup.c @@ -1088,11 +1088,6 @@ static int qup_i2c_xfer(struct i2c_adapter *adap, writel(I2C_MINI_CORE | I2C_N_VAL, qup->base + QUP_CONFIG); for (idx = 0; idx < num; idx++) { - if (msgs[idx].len == 0) { - ret = -EINVAL; - goto out; - } - if (qup_i2c_poll_state_i2c_master(qup)) { ret = -EIO; goto out; @@ -1520,9 +1515,6 @@ qup_i2c_determine_mode_v2(struct qup_i2c_dev *qup, /* All i2c_msgs should be transferred using either dma or cpu */ for (idx = 0; idx < num; idx++) { - if (msgs[idx].len == 0) - return -EINVAL; - if (msgs[idx].flags & I2C_M_RD) max_rx_len = max_t(unsigned int, max_rx_len, msgs[idx].len); @@ -1636,9 +1628,14 @@ static const struct i2c_algorithm qup_i2c_algo_v2 = { * which limits the possible read to 256 (QUP_READ_LIMIT) bytes. */ static const struct i2c_adapter_quirks qup_i2c_quirks = { + .flags = I2C_AQ_NO_ZERO_LEN, .max_read_len = QUP_READ_LIMIT, }; +static const struct i2c_adapter_quirks qup_i2c_quirks_v2 = { + .flags = I2C_AQ_NO_ZERO_LEN, +}; + static void qup_i2c_enable_clocks(struct qup_i2c_dev *qup) { clk_prepare_enable(qup->clk); @@ -1701,6 +1698,7 @@ static int qup_i2c_probe(struct platform_device *pdev) is_qup_v1 = true; } else { qup->adap.algo = &qup_i2c_algo_v2; + qup->adap.quirks = &qup_i2c_quirks_v2; is_qup_v1 = false; if (acpi_match_device(qup_i2c_acpi_match, qup->dev)) goto nodma; diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index 4aa7dde876f3f23dd38e2799270b1340aca3af2c..2c29f901d30908b3f0b4f9b5c29b6639ea46b398 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -139,6 +139,7 @@ struct rcar_i2c_priv { enum dma_data_direction dma_direction; struct reset_control *rstc; + int irq; }; #define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent) @@ -779,6 +780,11 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap, pm_runtime_get_sync(dev); + /* Check bus state before init otherwise bus busy info will be lost */ + ret = rcar_i2c_bus_barrier(priv); + if (ret < 0) + goto out; + /* Gen3 needs a reset before allowing RXDMA once */ if (priv->devtype == I2C_RCAR_GEN3) { priv->flags |= ID_P_NO_RXDMA; @@ -791,10 +797,6 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap, rcar_i2c_init(priv); - ret = rcar_i2c_bus_barrier(priv); - if (ret < 0) - goto out; - for (i = 0; i < num; i++) rcar_i2c_request_dma(priv, msgs + i); @@ -858,9 +860,11 @@ static int rcar_unreg_slave(struct i2c_client *slave) WARN_ON(!priv->slave); + /* disable irqs and ensure none is running before clearing ptr */ rcar_i2c_write(priv, ICSIER, 0); rcar_i2c_write(priv, ICSCR, 0); + synchronize_irq(priv->irq); priv->slave = NULL; pm_runtime_put(rcar_i2c_priv_to_dev(priv)); @@ -915,7 +919,7 @@ static int rcar_i2c_probe(struct platform_device *pdev) struct i2c_adapter *adap; struct device *dev = &pdev->dev; struct i2c_timings i2c_t; - int irq, ret; + int ret; priv = devm_kzalloc(dev, sizeof(struct rcar_i2c_priv), GFP_KERNEL); if (!priv) @@ -978,10 +982,10 @@ static int rcar_i2c_probe(struct platform_device *pdev) pm_runtime_put(dev); - irq = platform_get_irq(pdev, 0); - ret = devm_request_irq(dev, irq, rcar_i2c_irq, 0, dev_name(dev), priv); + priv->irq = platform_get_irq(pdev, 0); + ret = devm_request_irq(dev, priv->irq, rcar_i2c_irq, 0, dev_name(dev), priv); if (ret < 0) { - dev_err(dev, "cannot get irq %d\n", irq); + dev_err(dev, "cannot get irq %d\n", priv->irq); goto out_pm_disable; } diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c index b75ff144b5704293e0946c4919e747d91ede0b85..e6f351c92c02df434586ea8540176825211535a0 100644 --- a/drivers/i2c/busses/i2c-riic.c +++ b/drivers/i2c/busses/i2c-riic.c @@ -203,6 +203,7 @@ static irqreturn_t riic_tend_isr(int irq, void *data) if (readb(riic->base + RIIC_ICSR2) & ICSR2_NACKF) { /* We got a NACKIE */ readb(riic->base + RIIC_ICDRR); /* dummy read */ + riic_clear_set_bit(riic, ICSR2_NACKF, 0, RIIC_ICSR2); riic->err = -ENXIO; } else if (riic->bytes_left) { return IRQ_NONE; diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c index 7e9a2bbf5ddcb967459367778a834c8314ff6f2b..ff3f4553648f3c29a8c576172fc4c342cef6a94b 100644 --- a/drivers/i2c/busses/i2c-scmi.c +++ b/drivers/i2c/busses/i2c-scmi.c @@ -367,6 +367,7 @@ static int acpi_smbus_cmi_add(struct acpi_device *device) { struct acpi_smbus_cmi *smbus_cmi; const struct acpi_device_id *id; + int ret; smbus_cmi = kzalloc(sizeof(struct acpi_smbus_cmi), GFP_KERNEL); if (!smbus_cmi) @@ -388,8 +389,10 @@ static int acpi_smbus_cmi_add(struct acpi_device *device) acpi_walk_namespace(ACPI_TYPE_METHOD, smbus_cmi->handle, 1, acpi_smbus_cmi_query_methods, NULL, smbus_cmi, NULL); - if (smbus_cmi->cap_info == 0) + if (smbus_cmi->cap_info == 0) { + ret = -ENODEV; goto err; + } snprintf(smbus_cmi->adapter.name, sizeof(smbus_cmi->adapter.name), "SMBus CMI adapter %s", @@ -400,7 +403,8 @@ static int acpi_smbus_cmi_add(struct acpi_device *device) smbus_cmi->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD; smbus_cmi->adapter.dev.parent = &device->dev; - if (i2c_add_adapter(&smbus_cmi->adapter)) { + ret = i2c_add_adapter(&smbus_cmi->adapter); + if (ret) { dev_err(&device->dev, "Couldn't register adapter!\n"); goto err; } @@ -410,7 +414,7 @@ static int acpi_smbus_cmi_add(struct acpi_device *device) err: kfree(smbus_cmi); device->driver_data = NULL; - return -EIO; + return ret; } static int acpi_smbus_cmi_remove(struct acpi_device *device) diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index 818cab14e87c5ea47e5c6daaa7b76c818465df52..ddcfb6d349d11f733cc98700efd5d4990270ee9d 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c @@ -800,6 +800,7 @@ static const struct sh_mobile_dt_config r8a7740_dt_config = { static const struct of_device_id sh_mobile_i2c_dt_ids[] = { { .compatible = "renesas,iic-r8a73a4", .data = &fast_clock_dt_config }, { .compatible = "renesas,iic-r8a7740", .data = &r8a7740_dt_config }, + { .compatible = "renesas,iic-r8a774c0", .data = &fast_clock_dt_config }, { .compatible = "renesas,iic-r8a7790", .data = &v2_freq_calc_dt_config }, { .compatible = "renesas,iic-r8a7791", .data = &fast_clock_dt_config }, { .compatible = "renesas,iic-r8a7792", .data = &fast_clock_dt_config }, @@ -808,6 +809,7 @@ static const struct of_device_id sh_mobile_i2c_dt_ids[] = { { .compatible = "renesas,rcar-gen2-iic", .data = &fast_clock_dt_config }, { .compatible = "renesas,iic-r8a7795", .data = &fast_clock_dt_config }, { .compatible = "renesas,rcar-gen3-iic", .data = &fast_clock_dt_config }, + { .compatible = "renesas,iic-r8a77990", .data = &fast_clock_dt_config }, { .compatible = "renesas,iic-sh73a0", .data = &fast_clock_dt_config }, { .compatible = "renesas,rmobile-iic", .data = &default_dt_config }, {}, diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c index a94e724f51dcf5787b808881486acb1e44b689b9..2f82ec2c8ce5e5e1922fef9702913d971534df4a 100644 --- a/drivers/i2c/busses/i2c-sprd.c +++ b/drivers/i2c/busses/i2c-sprd.c @@ -287,7 +287,7 @@ static int sprd_i2c_master_xfer(struct i2c_adapter *i2c_adap, if (i2c_dev->is_suspended) return -EBUSY; - ret = pm_runtime_get_sync(i2c_dev->dev); + ret = pm_runtime_resume_and_get(i2c_dev->dev); if (ret < 0) return ret; @@ -573,7 +573,7 @@ static int sprd_i2c_remove(struct platform_device *pdev) struct sprd_i2c *i2c_dev = platform_get_drvdata(pdev); int ret; - ret = pm_runtime_get_sync(i2c_dev->dev); + ret = pm_runtime_resume_and_get(i2c_dev->dev); if (ret < 0) return ret; diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c index 62d023e737d9c2f60a26ea4174e78154dc10e877..f4e3613f9361b1e3b53d80ecd7012b63323fc863 100644 --- a/drivers/i2c/busses/i2c-stm32f7.c +++ b/drivers/i2c/busses/i2c-stm32f7.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include @@ -298,7 +297,7 @@ struct stm32f7_i2c_dev { bool use_dma; }; -/** +/* * All these values are coming from I2C Specification, Version 6.0, 4th of * April 2014. * @@ -424,7 +423,7 @@ static int stm32f7_i2c_compute_timing(struct stm32f7_i2c_dev *i2c_dev, STM32F7_I2C_ANALOG_FILTER_DELAY_MAX : 0); dnf_delay = setup->dnf * i2cclk; - sdadel_min = setup->fall_time - i2c_specs[setup->speed].hddat_min - + sdadel_min = i2c_specs[setup->speed].hddat_min + setup->fall_time - af_delay_min - (setup->dnf + 3) * i2cclk; sdadel_max = i2c_specs[setup->speed].vddat_max - setup->rise_time - @@ -1178,6 +1177,8 @@ static void stm32f7_i2c_slave_start(struct stm32f7_i2c_dev *i2c_dev) STM32F7_I2C_CR1_TXIE; stm32f7_i2c_set_bits(base + STM32F7_I2C_CR1, mask); + /* Write 1st data byte */ + writel_relaxed(value, base + STM32F7_I2C_TXDR); } else { /* Notify i2c slave that new write transfer is starting */ i2c_slave_event(slave, I2C_SLAVE_WRITE_REQUESTED, &value); @@ -1487,7 +1488,7 @@ static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data) void __iomem *base = i2c_dev->base; struct device *dev = i2c_dev->dev; struct stm32_i2c_dma *dma = i2c_dev->dma; - u32 mask, status; + u32 status; status = readl_relaxed(i2c_dev->base + STM32F7_I2C_ISR); @@ -1512,12 +1513,15 @@ static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data) f7_msg->result = -EINVAL; } - /* Disable interrupts */ - if (stm32f7_i2c_is_slave_registered(i2c_dev)) - mask = STM32F7_I2C_XFER_IRQ_MASK; - else - mask = STM32F7_I2C_ALL_IRQ_MASK; - stm32f7_i2c_disable_irq(i2c_dev, mask); + if (!i2c_dev->slave_running) { + u32 mask; + /* Disable interrupts */ + if (stm32f7_i2c_is_slave_registered(i2c_dev)) + mask = STM32F7_I2C_XFER_IRQ_MASK; + else + mask = STM32F7_I2C_ALL_IRQ_MASK; + stm32f7_i2c_disable_irq(i2c_dev, mask); + } /* Disable dma */ if (i2c_dev->use_dma) { @@ -1782,15 +1786,14 @@ static struct i2c_algorithm stm32f7_i2c_algo = { static int stm32f7_i2c_probe(struct platform_device *pdev) { - struct device_node *np = pdev->dev.of_node; struct stm32f7_i2c_dev *i2c_dev; const struct stm32f7_i2c_setup *setup; struct resource *res; - u32 irq_error, irq_event, clk_rate, rise_time, fall_time; + u32 clk_rate, rise_time, fall_time; struct i2c_adapter *adap; struct reset_control *rst; dma_addr_t phy_addr; - int ret; + int irq_error, irq_event, ret; i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*i2c_dev), GFP_KERNEL); if (!i2c_dev) @@ -1802,16 +1805,20 @@ static int stm32f7_i2c_probe(struct platform_device *pdev) return PTR_ERR(i2c_dev->base); phy_addr = (dma_addr_t)res->start; - irq_event = irq_of_parse_and_map(np, 0); - if (!irq_event) { - dev_err(&pdev->dev, "IRQ event missing or invalid\n"); - return -EINVAL; + irq_event = platform_get_irq(pdev, 0); + if (irq_event <= 0) { + if (irq_event != -EPROBE_DEFER) + dev_err(&pdev->dev, "Failed to get IRQ event: %d\n", + irq_event); + return irq_event ? : -ENOENT; } - irq_error = irq_of_parse_and_map(np, 1); - if (!irq_error) { - dev_err(&pdev->dev, "IRQ error missing or invalid\n"); - return -EINVAL; + irq_error = platform_get_irq(pdev, 1); + if (irq_error <= 0) { + if (irq_error != -EPROBE_DEFER) + dev_err(&pdev->dev, "Failed to get IRQ error: %d\n", + irq_error); + return irq_error ? : -ENOENT; } i2c_dev->clk = devm_clk_get(&pdev->dev, NULL); diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c index 915f5edbab3319212c1e22ae3274ce63de468d18..e47380b96b1d78fe98af8a16c5fe24f0699789c5 100644 --- a/drivers/i2c/busses/i2c-synquacer.c +++ b/drivers/i2c/busses/i2c-synquacer.c @@ -356,7 +356,7 @@ static int synquacer_i2c_doxfer(struct synquacer_i2c *i2c, /* wait 2 clock periods to ensure the stop has been through the bus */ udelay(DIV_ROUND_UP(2 * 1000, i2c->speed_khz)); - return 0; + return ret; } static irqreturn_t synquacer_i2c_isr(int irq, void *dev_id) @@ -602,6 +602,8 @@ static int synquacer_i2c_probe(struct platform_device *pdev) i2c->adapter = synquacer_i2c_ops; i2c_set_adapdata(&i2c->adapter, i2c); i2c->adapter.dev.parent = &pdev->dev; + i2c->adapter.dev.of_node = pdev->dev.of_node; + ACPI_COMPANION_SET(&i2c->adapter.dev, ACPI_COMPANION(&pdev->dev)); i2c->adapter.nr = pdev->id; init_completion(&i2c->completion); diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 60c8561fbe65e62d3bec217d8eb7157cd377609e..47d196c026ba6296120ce9b4f1bc7164c76fdc57 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -684,9 +684,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, tegra_i2c_flush_fifos(i2c_dev); - if (msg->len == 0) - return -EINVAL; - i2c_dev->msg_buf = msg->buf; i2c_dev->msg_buf_remaining = msg->len; i2c_dev->msg_err = I2C_ERR_NONE; @@ -831,8 +828,9 @@ static const struct i2c_algorithm tegra_i2c_algo = { /* payload size is only 12 bit */ static const struct i2c_adapter_quirks tegra_i2c_quirks = { + .flags = I2C_AQ_NO_ZERO_LEN, .max_read_len = 4096, - .max_write_len = 4096, + .max_write_len = 4096 - 12, }; static const struct tegra_i2c_hw_feature tegra20_i2c_hw = { diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c index a403e8579b652b6e2486f746b91b9357c3ab7381..dd0687e36a47ba4d37313ac749614eca305b429a 100644 --- a/drivers/i2c/busses/i2c-uniphier-f.c +++ b/drivers/i2c/busses/i2c-uniphier-f.c @@ -98,6 +98,7 @@ struct uniphier_fi2c_priv { unsigned int flags; unsigned int busy_cnt; unsigned int clk_cycle; + spinlock_t lock; /* IRQ synchronization */ }; static void uniphier_fi2c_fill_txfifo(struct uniphier_fi2c_priv *priv, @@ -142,9 +143,10 @@ static void uniphier_fi2c_set_irqs(struct uniphier_fi2c_priv *priv) writel(priv->enabled_irqs, priv->membase + UNIPHIER_FI2C_IE); } -static void uniphier_fi2c_clear_irqs(struct uniphier_fi2c_priv *priv) +static void uniphier_fi2c_clear_irqs(struct uniphier_fi2c_priv *priv, + u32 mask) { - writel(-1, priv->membase + UNIPHIER_FI2C_IC); + writel(mask, priv->membase + UNIPHIER_FI2C_IC); } static void uniphier_fi2c_stop(struct uniphier_fi2c_priv *priv) @@ -162,7 +164,10 @@ static irqreturn_t uniphier_fi2c_interrupt(int irq, void *dev_id) struct uniphier_fi2c_priv *priv = dev_id; u32 irq_status; + spin_lock(&priv->lock); + irq_status = readl(priv->membase + UNIPHIER_FI2C_INT); + irq_status &= priv->enabled_irqs; dev_dbg(&priv->adap.dev, "interrupt: enabled_irqs=%04x, irq_status=%04x\n", @@ -207,7 +212,13 @@ static irqreturn_t uniphier_fi2c_interrupt(int irq, void *dev_id) if (irq_status & (UNIPHIER_FI2C_INT_RF | UNIPHIER_FI2C_INT_RB)) { uniphier_fi2c_drain_rxfifo(priv); - if (!priv->len) + /* + * If the number of bytes to read is multiple of the FIFO size + * (msg->len == 8, 16, 24, ...), the INT_RF bit is set a little + * earlier than INT_RB. We wait for INT_RB to confirm the + * completion of the current message. + */ + if (!priv->len && (irq_status & UNIPHIER_FI2C_INT_RB)) goto data_done; if (unlikely(priv->flags & UNIPHIER_FI2C_MANUAL_NACK)) { @@ -230,6 +241,8 @@ static irqreturn_t uniphier_fi2c_interrupt(int irq, void *dev_id) goto handled; } + spin_unlock(&priv->lock); + return IRQ_NONE; data_done: @@ -244,7 +257,14 @@ static irqreturn_t uniphier_fi2c_interrupt(int irq, void *dev_id) } handled: - uniphier_fi2c_clear_irqs(priv); + /* + * This controller makes a pause while any bit of the IRQ status is + * asserted. Clear the asserted bit to kick the controller just before + * exiting the handler. + */ + uniphier_fi2c_clear_irqs(priv, irq_status); + + spin_unlock(&priv->lock); return IRQ_HANDLED; } @@ -252,6 +272,8 @@ static irqreturn_t uniphier_fi2c_interrupt(int irq, void *dev_id) static void uniphier_fi2c_tx_init(struct uniphier_fi2c_priv *priv, u16 addr) { priv->enabled_irqs |= UNIPHIER_FI2C_INT_TE; + uniphier_fi2c_set_irqs(priv); + /* do not use TX byte counter */ writel(0, priv->membase + UNIPHIER_FI2C_TBC); /* set slave address */ @@ -284,6 +306,8 @@ static void uniphier_fi2c_rx_init(struct uniphier_fi2c_priv *priv, u16 addr) priv->enabled_irqs |= UNIPHIER_FI2C_INT_RF; } + uniphier_fi2c_set_irqs(priv); + /* set slave address with RD bit */ writel(UNIPHIER_FI2C_DTTX_CMD | UNIPHIER_FI2C_DTTX_RD | addr << 1, priv->membase + UNIPHIER_FI2C_DTTX); @@ -307,14 +331,16 @@ static void uniphier_fi2c_recover(struct uniphier_fi2c_priv *priv) } static int uniphier_fi2c_master_xfer_one(struct i2c_adapter *adap, - struct i2c_msg *msg, bool stop) + struct i2c_msg *msg, bool repeat, + bool stop) { struct uniphier_fi2c_priv *priv = i2c_get_adapdata(adap); bool is_read = msg->flags & I2C_M_RD; - unsigned long time_left; + unsigned long time_left, flags; - dev_dbg(&adap->dev, "%s: addr=0x%02x, len=%d, stop=%d\n", - is_read ? "receive" : "transmit", msg->addr, msg->len, stop); + dev_dbg(&adap->dev, "%s: addr=0x%02x, len=%d, repeat=%d, stop=%d\n", + is_read ? "receive" : "transmit", msg->addr, msg->len, + repeat, stop); priv->len = msg->len; priv->buf = msg->buf; @@ -326,22 +352,36 @@ static int uniphier_fi2c_master_xfer_one(struct i2c_adapter *adap, priv->flags |= UNIPHIER_FI2C_STOP; reinit_completion(&priv->comp); - uniphier_fi2c_clear_irqs(priv); + uniphier_fi2c_clear_irqs(priv, U32_MAX); writel(UNIPHIER_FI2C_RST_TBRST | UNIPHIER_FI2C_RST_RBRST, priv->membase + UNIPHIER_FI2C_RST); /* reset TX/RX FIFO */ + spin_lock_irqsave(&priv->lock, flags); + if (is_read) uniphier_fi2c_rx_init(priv, msg->addr); else uniphier_fi2c_tx_init(priv, msg->addr); - uniphier_fi2c_set_irqs(priv); - dev_dbg(&adap->dev, "start condition\n"); - writel(UNIPHIER_FI2C_CR_MST | UNIPHIER_FI2C_CR_STA, - priv->membase + UNIPHIER_FI2C_CR); + /* + * For a repeated START condition, writing a slave address to the FIFO + * kicks the controller. So, the UNIPHIER_FI2C_CR register should be + * written only for a non-repeated START condition. + */ + if (!repeat) + writel(UNIPHIER_FI2C_CR_MST | UNIPHIER_FI2C_CR_STA, + priv->membase + UNIPHIER_FI2C_CR); + + spin_unlock_irqrestore(&priv->lock, flags); time_left = wait_for_completion_timeout(&priv->comp, adap->timeout); + + spin_lock_irqsave(&priv->lock, flags); + priv->enabled_irqs = 0; + uniphier_fi2c_set_irqs(priv); + spin_unlock_irqrestore(&priv->lock, flags); + if (!time_left) { dev_err(&adap->dev, "transaction timeout.\n"); uniphier_fi2c_recover(priv); @@ -394,6 +434,7 @@ static int uniphier_fi2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct i2c_msg *msg, *emsg = msgs + num; + bool repeat = false; int ret; ret = uniphier_fi2c_check_bus_busy(adap); @@ -404,9 +445,11 @@ static int uniphier_fi2c_master_xfer(struct i2c_adapter *adap, /* Emit STOP if it is the last message or I2C_M_STOP is set. */ bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP); - ret = uniphier_fi2c_master_xfer_one(adap, msg, stop); + ret = uniphier_fi2c_master_xfer_one(adap, msg, repeat, stop); if (ret) return ret; + + repeat = !stop; } return num; @@ -470,9 +513,26 @@ static void uniphier_fi2c_hw_init(struct uniphier_fi2c_priv *priv) uniphier_fi2c_reset(priv); + /* + * Standard-mode: tLOW + tHIGH = 10 us + * Fast-mode: tLOW + tHIGH = 2.5 us + */ writel(cyc, priv->membase + UNIPHIER_FI2C_CYC); - writel(cyc / 2, priv->membase + UNIPHIER_FI2C_LCTL); + /* + * Standard-mode: tLOW = 4.7 us, tHIGH = 4.0 us, tBUF = 4.7 us + * Fast-mode: tLOW = 1.3 us, tHIGH = 0.6 us, tBUF = 1.3 us + * "tLow/tHIGH = 5/4" meets both. + */ + writel(cyc * 5 / 9, priv->membase + UNIPHIER_FI2C_LCTL); + /* + * Standard-mode: tHD;STA = 4.0 us, tSU;STA = 4.7 us, tSU;STO = 4.0 us + * Fast-mode: tHD;STA = 0.6 us, tSU;STA = 0.6 us, tSU;STO = 0.6 us + */ writel(cyc / 2, priv->membase + UNIPHIER_FI2C_SSUT); + /* + * Standard-mode: tSU;DAT = 250 ns + * Fast-mode: tSU;DAT = 100 ns + */ writel(cyc / 16, priv->membase + UNIPHIER_FI2C_DSUT); uniphier_fi2c_prepare_operation(priv); @@ -529,6 +589,7 @@ static int uniphier_fi2c_probe(struct platform_device *pdev) priv->clk_cycle = clk_rate / bus_speed; init_completion(&priv->comp); + spin_lock_init(&priv->lock); priv->adap.owner = THIS_MODULE; priv->adap.algo = &uniphier_fi2c_algo; priv->adap.dev.parent = dev; diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c index 454f914ae66dbd49931575122bb7c7dea662b11b..c488e558aef709ee5097f05436624807df26ac4e 100644 --- a/drivers/i2c/busses/i2c-uniphier.c +++ b/drivers/i2c/busses/i2c-uniphier.c @@ -320,7 +320,13 @@ static void uniphier_i2c_hw_init(struct uniphier_i2c_priv *priv) uniphier_i2c_reset(priv, true); - writel((cyc / 2 << 16) | cyc, priv->membase + UNIPHIER_I2C_CLK); + /* + * Bit30-16: clock cycles of tLOW. + * Standard-mode: tLOW = 4.7 us, tHIGH = 4.0 us + * Fast-mode: tLOW = 1.3 us, tHIGH = 0.6 us + * "tLow/tHIGH = 5/4" meets both. + */ + writel((cyc * 5 / 9 << 16) | cyc, priv->membase + UNIPHIER_I2C_CLK); uniphier_i2c_reset(priv, false); } diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c index a7ac746018ad0c098d51eb56c1d5d8981d34c78c..7a746f41353514a7586626c913d77510452de35e 100644 --- a/drivers/i2c/busses/i2c-xgene-slimpro.c +++ b/drivers/i2c/busses/i2c-xgene-slimpro.c @@ -321,6 +321,9 @@ static int slimpro_i2c_blkwr(struct slimpro_i2c_dev *ctx, u32 chip, u32 msg[3]; int rc; + if (writelen > I2C_SMBUS_BLOCK_MAX) + return -EINVAL; + memcpy(ctx->dma_buffer, data, writelen); paddr = dma_map_single(ctx->dev, ctx->dma_buffer, writelen, DMA_TO_DEVICE); diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index 0c51c0ffdda9d99d03f28c5503ef1cffaa5316c2..f686a45801e695e19c7e3ae5082757b749c6886d 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c @@ -678,7 +678,7 @@ static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) dev_dbg(adap->dev.parent, "%s entry SR: 0x%x\n", __func__, xiic_getreg8(i2c, XIIC_SR_REG_OFFSET)); - err = pm_runtime_get_sync(i2c->dev); + err = pm_runtime_resume_and_get(i2c->dev); if (err < 0) return err; @@ -718,11 +718,16 @@ static const struct i2c_algorithm xiic_algorithm = { .functionality = xiic_func, }; +static const struct i2c_adapter_quirks xiic_quirks = { + .max_read_len = 255, +}; + static const struct i2c_adapter xiic_adapter = { .owner = THIS_MODULE, .name = DRIVER_NAME, .class = I2C_CLASS_DEPRECATED, .algo = &xiic_algorithm, + .quirks = &xiic_quirks, }; diff --git a/drivers/i2c/busses/i2c-zhaoxin.c b/drivers/i2c/busses/i2c-zhaoxin.c new file mode 100644 index 0000000000000000000000000000000000000000..29ae4b89d3f06d341a6f621cfeb2025f0df17473 --- /dev/null +++ b/drivers/i2c/busses/i2c-zhaoxin.c @@ -0,0 +1,686 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * i2c-zhaoxin-i2c.c - Zhaoxin I2C controller driver + * + * Copyright(c) 2021 Shanghai Zhaoxin Corporation. All rights reserved. + * + */ + +#define DRIVER_VERSION "1.3.0" + +#include +#include +#include +#include +#include +#include +#include +#include + +#define ZX_I2C_NAME "Zhaoxin-I2C" + +/* + * registers + */ +/* I2C MMIO Address Constants */ +#define IICCR_L 0x00 +#define MST_RST BIT(7) +#define MST_RST_PATCH BIT(6) +#define CPU_RDY BIT(3) +#define TX_END BIT(2) +#define RX_ACK BIT(1) +#define IICCR_H 0x01 +#define FIFO_EN BIT(6) +#define IICSLVADDR 0x02 +#define IICTCR 0x03 +#define FAST_SEL BIT(7) +#define MASTER_RECV BIT(6) +#define HS_SEL BIT(5) +#define IICSR 0x04 +#define SDA_I BIT(3) +#define SCL_I BIT(2) +#define READY BIT(1) +#define RCV_NACK BIT(0) +#define IICISR 0x06 +#define IRQ_STS_BYTENACK BIT(5) +#define IRQ_STS_FIFONACK BIT(4) +#define IRQ_STS_FIFOEND BIT(3) +#define IRQ_SCL_TIMEOUT BIT(2) +#define IRQ_STS_BYTEEND BIT(1) +#define IRQ_STS_ADDRNACK BIT(0) +#define IRQ_STS_MASK (IRQ_STS_FIFOEND | IRQ_SCL_TIMEOUT | \ + IRQ_STS_BYTEEND | IRQ_STS_ADDRNACK) +#define IICIMR 0x08 +#define IRQ_EN_FIFOEND BIT(3) +#define IRQ_EN_TIMEOUT BIT(2) +#define IRQ_EN_BYTEEND BIT(1) +#define IRQ_EN_ADDRNACK BIT(0) +#define IICDATA2IIC 0x0A +#define IICDATA2CPU 0x0B +#define IICTR_FSTP 0x0C +#define IICTR_SCLTP 0x0D +#define IICMCR 0x0E +#define DYCLK_EN BIT(0) +#define IIC_MST_CODE 0x0F +#define IICCS 0x10 +#define CLKSEL_50M BIT(0) +#define IICREV 0x11 +#define IICHCR 0x12 +#define FIFO_RST (BIT(1) | BIT(0)) +#define IICHTDR 0x13 +#define IICHRDR 0x14 +#define IICHTLR 0x15 +#define IICHRLR 0x16 +#define IICHWCNTR 0x18 +#define IICHRCNTR 0x19 + +enum { + STANDARD_MODE_50M = 1, + STANDARD_MODE_27M, + FAST_MODE_50M, + FAST_MODE_27M, + FAST_PLUSE_MODE_50M, + FAST_PLUSE_MODE_27M, + HIGH_SPEED_MODE, + SPEED_MODE_CNT = HIGH_SPEED_MODE +}; + +#define FIFO_SIZE 32 +#define RETRY_TIME 3 + +struct zxi2c { + /* controller resources information */ + struct device *dev; + struct pci_dev *pci; + struct i2c_adapter adap; + void __iomem *regs; + u8 irq; + const char *bus_uid; + u8 hrv; /* Hardware Revision */ + u8 speed_mode; + unsigned long speed; + u8 fstp; /* freq control */ + + /* process control information */ + u8 event; + u16 timeout; + u16 byte_left; + wait_queue_head_t waitq; + u8 retry; + bool busy; + + /* current msg information */ + u8 addr; + u16 len; + bool is_read; + bool is_last_msg; + bool dynamic; +}; + +#define set_byte(r, d) iowrite8(d, r+IICDATA2IIC) +#define get_byte(r) ioread8(r+IICDATA2CPU) +#define is_ready(r) (ioread8(r+IICSR)&READY) +#define is_nack(r) (ioread8(r+IICSR)&RCV_NACK) +#define get_irq_status(r) ioread8(r+IICISR) +#define get_reversion(r) ioread8(r+IICREV) +#define clear_irq_status(r) iowrite8(IRQ_STS_MASK, r+IICISR) +#define set_fifo_byte(r, d) iowrite8(d, r+IICHTDR) +#define get_fifo_byte(r) ioread8(r+IICHRDR) +#define set_fifo_wr_len(r, d) iowrite8(d, r+IICHTLR) +#define set_fifo_rd_len(r, d) iowrite8(d, r+IICHRLR) +#define get_fifo_wr_cnt(r) ioread8(r+IICHWCNTR) +#define get_fifo_rd_cnt(r) ioread8(r+IICHRCNTR) +#define master_regs_reset(r) iowrite8(MST_RST|0x41, r+IICCR_L) +#define set_dynamic_clock(r, d) iowrite8(d, r+IICMCR) +#define get_dynamic_clock(r) (ioread8(r+IICMCR) & DYCLK_EN) +#define stop_write_byte(r) iowrite8(TX_END|0x41, r+IICCR_L) +#define get_fstp_value(r) ioread8(r+IICTR_FSTP) + +static inline void zxi2c_prepare_next_read(void __iomem *regs, u16 left) +{ + u8 tmp = ioread8(regs + IICCR_L); + + if (left > 1) + tmp &= ~RX_ACK; + else + tmp |= RX_ACK; + + iowrite8(tmp, regs + IICCR_L); +} + +static inline void zxi2c_enable_irq(void __iomem *regs, u8 type, int mode) +{ + if (mode == true) + iowrite8(IRQ_EN_ADDRNACK | type, + regs + IICIMR); + else + iowrite8(0, regs + IICIMR); +} + +static inline void zxi2c_continue(struct zxi2c *i2c) +{ + u8 tmp; + + i2c->event = 0; + tmp = ioread8(i2c->regs + IICCR_L); + iowrite8(tmp |= CPU_RDY, i2c->regs + IICCR_L); +} + +static void zxi2c_enable_fifo(void __iomem *regs, int mode) +{ + if (mode == true) + iowrite8(FIFO_EN, regs + IICCR_H); + else + iowrite8(0, regs + IICCR_H); +} + +static void zxi2c_reset_fifo(void __iomem *regs) +{ + u8 tmp; + u8 count; + + tmp = ioread8(regs + IICHCR); + iowrite8(tmp | FIFO_RST, regs + IICHCR); + for (count = 0; count < 50; count++) + if (!(ioread8(regs + IICHCR) & FIFO_RST)) + break; + if (count >= 50) + pr_err("%s failed\n", __func__); +} + +static void zxi2c_set_wr(void __iomem *regs, bool is_read) +{ + u8 tmp; + + tmp = ioread8(regs + IICTCR); + if (is_read) + tmp |= MASTER_RECV; + else + tmp &= ~MASTER_RECV; + iowrite8(tmp, regs + IICTCR); +} + +static void zxi2c_start(struct zxi2c *i2c) +{ + i2c->event = 0; + iowrite8(i2c->addr & 0x7f, i2c->regs + IICSLVADDR); +} + +static const u8 speed_params_table[SPEED_MODE_CNT][5] = { + /* speed_mode, IICTCR, IICTR_FSTP, IICCS, IICTR_SCLTP */ + { STANDARD_MODE_27M, 0, 0x83, 0, 0x80 }, + { FAST_MODE_27M, FAST_SEL, 0x1e, 0, 0x80 }, + { FAST_PLUSE_MODE_27M, FAST_SEL, 10, 0, 0x80 }, + { STANDARD_MODE_50M, 0, 0xF3, CLKSEL_50M, 0xff }, + { FAST_MODE_50M, FAST_SEL, 0x38, CLKSEL_50M, 0xff }, + { FAST_PLUSE_MODE_50M, FAST_SEL, 19, CLKSEL_50M, 0xff }, + { HIGH_SPEED_MODE, HS_SEL, 0x37, CLKSEL_50M, 0xff } + +}; + +static void zxi2c_set_bus_speed(struct zxi2c *i2c) +{ + u8 i; + const u8 *params = NULL; + + for (i = 0; i < SPEED_MODE_CNT; i++) { + if (speed_params_table[i][0] == i2c->speed_mode) { + params = speed_params_table[i]; + break; + } + } + iowrite8(params[1], i2c->regs + IICTCR); + if (abs(i2c->fstp - params[2]) > 0x10) { + /* if BIOS setting value far from golden value, + * use golden value and warn user */ + dev_warn(i2c->dev, + "speed:%ld, fstp:0x%x, golden:0x%x\n", + i2c->speed, i2c->fstp, params[2]); + iowrite8(params[2], i2c->regs + IICTR_FSTP); + } else + iowrite8(i2c->fstp, i2c->regs + IICTR_FSTP); + iowrite8(params[3], i2c->regs + IICCS); + iowrite8(params[4], i2c->regs + IICTR_SCLTP); + + /* for Hs-mode, use 0000 1000 as master code */ + if (i2c->speed_mode == HIGH_SPEED_MODE) + iowrite8(0x08, i2c->regs + IIC_MST_CODE); +} + +static void zxi2c_module_reset(struct zxi2c *i2c) +{ + unsigned long uid; + u8 tmp; + u8 bit; + + bit = kstrtoul(i2c->bus_uid, 10, &uid) ? 0 : (1 << (4 + uid)); + + pci_read_config_byte(i2c->pci, 0x4F, &tmp); + usleep_range(3000, 5000); + pci_write_config_byte(i2c->pci, 0x4F, tmp & ~bit); + usleep_range(3000, 5000); + pci_write_config_byte(i2c->pci, 0x4F, tmp | bit); + usleep_range(3000, 5000); + + set_dynamic_clock(i2c->regs, i2c->dynamic); +} + +static irqreturn_t zxi2c_irq_handle(int irq, void *dev_id) +{ + struct zxi2c *i2c = (struct zxi2c *)dev_id; + void __iomem *regs = i2c->regs; + u8 status = get_irq_status(regs); + + if ((status & IRQ_STS_MASK) == 0) + return IRQ_NONE; + + if (status & IRQ_SCL_TIMEOUT) + dev_warn(i2c->dev, "timeout(HW), ID: 0x%X\n", i2c->addr); + + if (status & IRQ_STS_ADDRNACK) + dev_err(i2c->dev, "addr NACK, ID: 0x%X\n", i2c->addr); + else if (status & IRQ_STS_BYTEEND) { + i2c->byte_left--; + if (!i2c->is_read) { + if (is_nack(regs)) { + status = IRQ_STS_BYTENACK; + i2c->byte_left++; + dev_err(i2c->dev, "data NACK, ID: 0x%X\n", i2c->addr); + } else if (i2c->byte_left == 0 && i2c->is_last_msg) + stop_write_byte(regs); + } + } + + i2c->event = status; + clear_irq_status(regs); + wake_up(&i2c->waitq); + + return IRQ_HANDLED; +} + +static int zxi2c_wait_event(struct zxi2c *i2c, u8 event) +{ + int timeout; + + timeout = wait_event_interruptible_timeout(i2c->waitq, i2c->event != 0, + msecs_to_jiffies(i2c->timeout)); + + if (timeout == 0) { + dev_err(i2c->dev, "timeout(SW), ID: 0x%X\n", i2c->addr); + /* Clock streching timeout, do recovery */ + if (!is_nack(i2c->regs)) + dev_err(i2c->dev, "device hang? pls reset, ID: 0x%X\n", i2c->addr); + + master_regs_reset(i2c->regs); + zxi2c_set_bus_speed(i2c); + return -ENODEV; + } else if ((i2c->event & event) == 0) { + /* device NACK and so on, already print in interrupt */ + return -ENODEV; + } + return 0; +} + +static int zxi2c_byte_xfer(struct zxi2c *i2c, struct i2c_msg *msgs, int num) +{ + u16 i, finished; + int error; + u8 index, ret = 0; + struct i2c_msg *msg; + void __iomem *regs = i2c->regs; + + clear_irq_status(regs); + zxi2c_enable_fifo(regs, false); + zxi2c_enable_irq(regs, IRQ_EN_BYTEEND, true); + + for (index = 0; index < num; index++) { + msg = msgs + index; + + i2c->addr = msg->addr; + i2c->is_read = !!(msg->flags & I2C_M_RD); + i2c->byte_left = i2c->len = msg->len; + + zxi2c_set_wr(regs, i2c->is_read); + if (i2c->is_read) { + zxi2c_prepare_next_read(regs, i2c->byte_left); + zxi2c_start(i2c); + /* create restart for non-first msg*/ + if (index) + zxi2c_continue(i2c); + + for (i = 1; i <= msg->len; i++) { + error = zxi2c_wait_event(i2c, IRQ_STS_BYTEEND); + if (error) + break; + + msg->buf[i - 1] = get_byte(regs); + if (i2c->byte_left == 0) + break; + + zxi2c_prepare_next_read(regs, i2c->byte_left); + zxi2c_continue(i2c); + } + } else { + set_byte(regs, msg->buf[0]); + /* mark whether this is the last msg */ + i2c->is_last_msg = index == !!(num - 1); + zxi2c_start(i2c); + /* create restart for non-first msg */ + if (index) + zxi2c_continue(i2c); + + for (i = 1; i <= msg->len; i++) { + error = zxi2c_wait_event(i2c, IRQ_STS_BYTEEND); + if (error) + break; + + if (i2c->byte_left == 0) + break; + set_byte(regs, msg->buf[i]); + zxi2c_continue(i2c); + } + } + + if (error) { + finished = msg->len - i2c->byte_left; + + /* check if NACK during transmitting */ + if (finished) + dev_err(i2c->dev, + "%s: %s finished %d bytes: %*ph\n", + __func__, i2c->is_read ? "read" : "write", + finished, finished, msg->buf); + return error; + } + ret++; + } + + zxi2c_enable_irq(regs, IRQ_EN_BYTEEND, false); + return ret; +} + +static int zxi2c_fifo_xfer(struct zxi2c *i2c, struct i2c_msg *msgs) +{ + void __iomem *regs = i2c->regs; + struct i2c_msg *msg = msgs; + int i; + u8 finished; + + i2c->addr = msg->addr; + i2c->is_read = !!(msg->flags & I2C_M_RD); + i2c->len = msg->len; + + zxi2c_reset_fifo(regs); + zxi2c_enable_fifo(regs, true); + + clear_irq_status(regs); + zxi2c_enable_irq(regs, IRQ_EN_FIFOEND, true); + + zxi2c_set_wr(regs, i2c->is_read); + if (i2c->is_read) + set_fifo_rd_len(regs, msg->len - 1); + else { + set_fifo_wr_len(regs, msg->len - 1); + for (i = 0; i < msg->len; i++) + set_fifo_byte(regs, msg->buf[i]); + } + + zxi2c_start(i2c); + if (zxi2c_wait_event(i2c, IRQ_STS_FIFOEND)) + return -ENODEV; + + if (i2c->is_read) { + finished = get_fifo_rd_cnt(regs); + for (i = 0; i < finished; i++) + msg->buf[i] = get_fifo_byte(regs); + } else + finished = get_fifo_wr_cnt(regs); + + /* check if NACK during transmitting */ + if (finished != msg->len) { + if (finished) + dev_err(i2c->dev, + "%s: %s only finished %d/%d bytes: %*ph\n", + __func__, i2c->is_read ? "read" : "write", + finished, msg->len, finished, msg->buf); + return -EAGAIN; + } + + zxi2c_enable_irq(regs, IRQ_EN_FIFOEND, false); + return 1; +} + +static int zxi2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) +{ + struct zxi2c *i2c; + int ret; + + i2c = (struct zxi2c *)i2c_get_adapdata(adap); + if (!is_ready(i2c->regs)) { + if (i2c->busy == false) { + zxi2c_module_reset(i2c); + zxi2c_set_bus_speed(i2c); + dev_dbg(i2c->dev, "not ready, reset and retry\n"); + } + if (i2c->retry >= RETRY_TIME) { + dev_err(i2c->dev, "retried %d times, dropped\n", i2c->retry); + i2c->retry = 0; + } else + i2c->retry++; + return -EAGAIN; + } + i2c->retry = 0; + i2c->busy = true; + i2c->timeout = 1000; + + /* Freedom mode */ + if (num == 1 && msgs->len <= FIFO_SIZE && msgs->len >= 3) + ret = zxi2c_fifo_xfer(i2c, msgs); + else + ret = zxi2c_byte_xfer(i2c, msgs, num); + + i2c->busy = false; + return ret; +} + +static u32 zxi2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm zxi2c_algorithm = { + .master_xfer = zxi2c_master_xfer, + .functionality = zxi2c_func, +}; + +static const struct i2c_adapter_quirks zxi2c_quirks = { + .flags = I2C_AQ_NO_ZERO_LEN | I2C_AQ_COMB_WRITE_THEN_READ, +}; + +static void zxi2c_get_speed_mode(struct zxi2c *i2c) +{ + u32 speed = 400000; + + speed = i2c_acpi_find_bus_speed(i2c->dev); + if (speed >= 3400000) + i2c->speed_mode = HIGH_SPEED_MODE; + else if (speed >= 1000000) + i2c->speed_mode = FAST_PLUSE_MODE_50M; + else if (speed >= 400000) + i2c->speed_mode = FAST_MODE_50M; + else if (speed >= 100000) + i2c->speed_mode = STANDARD_MODE_50M; + else + i2c->speed_mode = FAST_MODE_50M; + i2c->speed = speed; +} + +static int zxi2c_parse_resources(struct zxi2c *i2c) +{ + struct resource *res; + struct platform_device *pdev = to_platform_device(i2c->dev); + struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); + + /* get IO resource */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (IS_ERR(res)) { + dev_err(&pdev->dev, "IORESOURCE_MEM failed\n"); + return -ENODEV; + } + i2c->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(i2c->regs)) { + dev_err(&pdev->dev, "devm ioremap failed\n"); + return -ENOMEM; + } + + /* get irq */ + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (IS_ERR(res)) { + dev_err(&pdev->dev, "get IORESOURCE_IRQ failed\n"); + return -ENODEV; + } + i2c->irq = res->start; + + /* get _UID */ + i2c->bus_uid = adev->pnp.unique_id; + if (!i2c->bus_uid) { + dev_err(&pdev->dev, "missing/incorrect UID/bus id!\n"); + return -ENODEV; + } + + /* get speed */ + zxi2c_get_speed_mode(i2c); + + return 0; +} + +static int zxi2c_probe(struct platform_device *pdev) +{ + int error; + struct zxi2c *i2c; + struct pci_dev *pci; + struct device *dev; + + dev = pdev->dev.parent; + if (dev && dev_is_pci(dev)) { + pci = to_pci_dev(dev); + if (pci->vendor != 0x1d17 || pci->device != 0x1001) + return -ENODEV; + } else + return -ENODEV; + + i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL); + if (IS_ERR(i2c)) { + dev_err(&pdev->dev, "devm_kzalloc FAILED\n"); + return -ENOMEM; + } + + i2c->dev = &pdev->dev; + error = zxi2c_parse_resources(i2c); + if (error) + return error; + + i2c->pci = pci; + i2c->hrv = get_reversion(i2c->regs); + + platform_set_drvdata(pdev, (void *)i2c); + + if (devm_request_irq(&pdev->dev, i2c->irq, zxi2c_irq_handle, IRQF_SHARED, + pdev->name, i2c)) { + dev_err(i2c->dev, "i2c IRQ%d allocate failed.\n", i2c->irq); + return -ENODEV; + } + + init_waitqueue_head(&i2c->waitq); + i2c->retry = 0; + i2c->busy = false; + + i2c->adap.owner = THIS_MODULE; + i2c->adap.algo = &zxi2c_algorithm; + i2c->adap.class = I2C_CLASS_HWMON | I2C_CLASS_SPD; + i2c->adap.retries = RETRY_TIME + 1; + i2c->adap.quirks = &zxi2c_quirks; + + i2c->adap.owner = THIS_MODULE; + i2c->adap.dev.parent = &pdev->dev; + ACPI_COMPANION_SET(&i2c->adap.dev, ACPI_COMPANION(&pdev->dev)); + snprintf(i2c->adap.name, sizeof(i2c->adap.name), "%s.%s", ZX_I2C_NAME, i2c->bus_uid); + i2c_set_adapdata(&i2c->adap, i2c); + + i2c->dynamic = get_dynamic_clock(i2c->regs); + set_dynamic_clock(i2c->regs, i2c->dynamic); + i2c->fstp = get_fstp_value(i2c->regs); + zxi2c_set_bus_speed(i2c); + + error = i2c_add_adapter(&i2c->adap); + if (unlikely(error)) { + dev_err(i2c->dev, "failed to register i2c, err: %d\n", error); + return error; + } + + dev_info(i2c->dev, "Adapter %s registered at /dev/i2c-%d\n", i2c->adap.name, i2c->adap.nr); + + return 0; +} + +static int zxi2c_remove(struct platform_device *pdev) +{ + struct zxi2c *i2c = platform_get_drvdata(pdev); + + zxi2c_module_reset(i2c); + master_regs_reset(i2c->regs); + + devm_free_irq(&pdev->dev, i2c->irq, i2c); + + i2c_del_adapter(&i2c->adap); + + platform_set_drvdata(pdev, NULL); + devm_kfree(&pdev->dev, i2c); + + dev_info(&pdev->dev, "i2c adapter unregistered.\n"); + + return 0; +} + +static int zxi2c_suspend(struct device *dev) +{ + return 0; +} + +static int zxi2c_resume(struct device *dev) +{ + struct zxi2c *i2c = dev_get_drvdata(dev); + + zxi2c_module_reset(i2c); + zxi2c_set_bus_speed(i2c); + + return 0; +} + +const struct dev_pm_ops zxi2c_pm = { + SET_SYSTEM_SLEEP_PM_OPS(zxi2c_suspend, zxi2c_resume) +}; + +static const struct acpi_device_id zxi2c_acpi_match[] = { + {"IIC1D17", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, zxi2c_acpi_match); + +static struct platform_driver zxi2c_driver = { + .probe = zxi2c_probe, + .remove = zxi2c_remove, + .driver = { + .name = ZX_I2C_NAME, + .owner = THIS_MODULE, + .acpi_match_table = ACPI_PTR(zxi2c_acpi_match), + .pm = &zxi2c_pm, + }, +}; + +module_platform_driver(zxi2c_driver); + +MODULE_AUTHOR("HansHu@zhaoxin.com"); +MODULE_DESCRIPTION("Shanghai Zhaoxin IIC driver"); +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/i2c/busses/i2c-zx2967.c b/drivers/i2c/busses/i2c-zx2967.c index 48281c1b30c6d56892cac6ad7388386aad586476..b8f9e020d80e6a1049ff0328788073b628f6777c 100644 --- a/drivers/i2c/busses/i2c-zx2967.c +++ b/drivers/i2c/busses/i2c-zx2967.c @@ -281,9 +281,6 @@ static int zx2967_i2c_xfer_msg(struct zx2967_i2c *i2c, int ret; int i; - if (msg->len == 0) - return -EINVAL; - zx2967_i2c_flush_fifos(i2c); i2c->cur_trans = msg->buf; @@ -498,6 +495,10 @@ static const struct i2c_algorithm zx2967_i2c_algo = { .functionality = zx2967_i2c_func, }; +static const struct i2c_adapter_quirks zx2967_i2c_quirks = { + .flags = I2C_AQ_NO_ZERO_LEN, +}; + static const struct of_device_id zx2967_i2c_of_match[] = { { .compatible = "zte,zx296718-i2c", }, { }, @@ -568,6 +569,7 @@ static int zx2967_i2c_probe(struct platform_device *pdev) strlcpy(i2c->adap.name, "zx2967 i2c adapter", sizeof(i2c->adap.name)); i2c->adap.algo = &zx2967_i2c_algo; + i2c->adap.quirks = &zx2967_i2c_quirks; i2c->adap.nr = pdev->id; i2c->adap.dev.parent = &pdev->dev; i2c->adap.dev.of_node = pdev->dev.of_node; diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c index 32affd3fa8bd1ffe462c5f66abd6b9e40ba6a36f..2db4c49bee7789972934f23e0988c3c7149b5da2 100644 --- a/drivers/i2c/i2c-core-acpi.c +++ b/drivers/i2c/i2c-core-acpi.c @@ -43,6 +43,7 @@ struct i2c_acpi_lookup { int index; u32 speed; u32 min_speed; + u32 force_speed; }; static int i2c_acpi_fill_info(struct acpi_resource *ares, void *data) @@ -240,6 +241,19 @@ i2c_acpi_match_device(const struct acpi_device_id *matches, return acpi_match_device(matches, &client->dev); } +static const struct acpi_device_id i2c_acpi_force_400khz_device_ids[] = { + /* + * These Silead touchscreen controllers only work at 400KHz, for + * some reason they do not work at 100KHz. On some devices the ACPI + * tables list another device at their bus as only being capable + * of 100KHz, testing has shown that these other devices work fine + * at 400KHz (as can be expected of any recent i2c hw) so we force + * the speed of the bus to 400 KHz if a Silead device is present. + */ + { "MSSL1680", 0 }, + {} +}; + static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level, void *data, void **return_value) { @@ -258,6 +272,9 @@ static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level, if (lookup->speed <= lookup->min_speed) lookup->min_speed = lookup->speed; + if (acpi_match_device_ids(adev, i2c_acpi_force_400khz_device_ids) == 0) + lookup->force_speed = 400000; + return AE_OK; } @@ -295,7 +312,16 @@ u32 i2c_acpi_find_bus_speed(struct device *dev) return 0; } - return lookup.min_speed != UINT_MAX ? lookup.min_speed : 0; + if (lookup.force_speed) { + if (lookup.force_speed != lookup.min_speed) + dev_warn(dev, FW_BUG "DSDT uses known not-working I2C bus speed %d, forcing it to %d\n", + lookup.min_speed, lookup.force_speed); + return lookup.force_speed; + } else if (lookup.min_speed != UINT_MAX) { + return lookup.min_speed; + } else { + return 0; + } } EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed); @@ -351,6 +377,7 @@ static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value, break; i2c_acpi_register_device(adapter, adev, &info); + put_device(&adapter->dev); break; case ACPI_RECONFIG_DEVICE_REMOVE: if (!acpi_device_enumerated(adev)) diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 9200e349f29e411d53d2dc4126ea2f7a450a9288..366bd86285d64e12bcfaddea9f05fa1081d1a8e1 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -83,6 +84,27 @@ void i2c_transfer_trace_unreg(void) static_branch_dec(&i2c_trace_msg_key); } +const char *i2c_freq_mode_string(u32 bus_freq_hz) +{ + switch (bus_freq_hz) { + case I2C_MAX_STANDARD_MODE_FREQ: + return "Standard Mode (100 kHz)"; + case I2C_MAX_FAST_MODE_FREQ: + return "Fast Mode (400 kHz)"; + case I2C_MAX_FAST_MODE_PLUS_FREQ: + return "Fast Mode Plus (1.0 MHz)"; + case I2C_MAX_TURBO_MODE_FREQ: + return "Turbo Mode (1.4 MHz)"; + case I2C_MAX_HIGH_SPEED_MODE_FREQ: + return "High Speed Mode (3.4 MHz)"; + case I2C_MAX_ULTRA_FAST_MODE_FREQ: + return "Ultra Fast Mode (5.0 MHz)"; + default: + return "Unknown Mode"; + } +} +EXPORT_SYMBOL_GPL(i2c_freq_mode_string); + const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id, const struct i2c_client *client) { @@ -185,7 +207,7 @@ static int i2c_generic_bus_free(struct i2c_adapter *adap) int i2c_generic_scl_recovery(struct i2c_adapter *adap) { struct i2c_bus_recovery_info *bri = adap->bus_recovery_info; - int i = 0, scl = 1, ret; + int i = 0, scl = 1, ret = 0; if (bri->prepare_recovery) bri->prepare_recovery(adap); @@ -306,10 +328,7 @@ static int i2c_smbus_host_notify_to_irq(const struct i2c_client *client) if (client->flags & I2C_CLIENT_TEN) return -EINVAL; - irq = irq_find_mapping(adap->host_notify_domain, client->addr); - if (!irq) - irq = irq_create_mapping(adap->host_notify_domain, - client->addr); + irq = irq_create_mapping(adap->host_notify_domain, client->addr); return irq > 0 ? irq : -ENXIO; } @@ -330,6 +349,8 @@ static int i2c_device_probe(struct device *dev) if (client->flags & I2C_CLIENT_HOST_NOTIFY) { dev_dbg(dev, "Using Host Notify IRQ\n"); + /* Keep adapter active when Host Notify is required */ + pm_runtime_get_sync(&client->adapter->dev); irq = i2c_smbus_host_notify_to_irq(client); } else if (dev->of_node) { irq = of_irq_get_byname(dev->of_node, "irq"); @@ -433,6 +454,10 @@ static int i2c_device_remove(struct device *dev) dev_pm_clear_wake_irq(&client->dev); device_init_wakeup(&client->dev, false); + client->irq = client->init_irq; + if (client->flags & I2C_CLIENT_HOST_NOTIFY) + pm_runtime_put(&client->adapter->dev); + return status; } @@ -446,6 +471,8 @@ static void i2c_device_shutdown(struct device *dev) driver = to_i2c_driver(dev->driver); if (driver->shutdown) driver->shutdown(client); + else if (client->irq > 0) + disable_irq(client->irq); } static void i2c_client_dev_release(struct device *dev) @@ -742,10 +769,11 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info) client->flags = info->flags; client->addr = info->addr; - client->irq = info->irq; - if (!client->irq) - client->irq = i2c_dev_irq_from_resources(info->resources, + client->init_irq = info->irq; + if (!client->init_irq) + client->init_irq = i2c_dev_irq_from_resources(info->resources, info->num_resources); + client->irq = client->init_irq; strlcpy(client->name, info->type, sizeof(client->name)); @@ -1518,6 +1546,32 @@ void i2c_del_adapter(struct i2c_adapter *adap) } EXPORT_SYMBOL(i2c_del_adapter); +static void devm_i2c_del_adapter(void *adapter) +{ + i2c_del_adapter(adapter); +} + +/** + * devm_i2c_add_adapter - device-managed variant of i2c_add_adapter() + * @dev: managing device for adding this I2C adapter + * @adapter: the adapter to add + * Context: can sleep + * + * Add adapter with dynamic bus number, same with i2c_add_adapter() + * but the adapter will be auto deleted on driver detach. + */ +int devm_i2c_add_adapter(struct device *dev, struct i2c_adapter *adapter) +{ + int ret; + + ret = i2c_add_adapter(adapter); + if (ret) + return ret; + + return devm_add_action_or_reset(dev, devm_i2c_del_adapter, adapter); +} +EXPORT_SYMBOL_GPL(devm_i2c_add_adapter); + /** * i2c_parse_fw_timings - get I2C related timing parameters from firmware * @dev: The device to scan for I2C timing properties @@ -1857,13 +1911,18 @@ static int i2c_check_for_quirks(struct i2c_adapter *adap, struct i2c_msg *msgs, * Returns negative errno, else the number of messages executed. * * Adapter lock must be held when calling this function. No debug logging - * takes place. adap->algo->master_xfer existence isn't checked. + * takes place. */ int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { unsigned long orig_jiffies; int ret, try; + if (!adap->algo->master_xfer) { + dev_dbg(&adap->dev, "I2C level transfers not supported\n"); + return -EOPNOTSUPP; + } + if (WARN_ON(!msgs || num < 1)) return -EINVAL; @@ -2258,8 +2317,9 @@ void i2c_put_adapter(struct i2c_adapter *adap) if (!adap) return; - put_device(&adap->dev); module_put(adap->owner); + /* Should be last, otherwise we risk use-after-free with 'adap' */ + put_device(&adap->dev); } EXPORT_SYMBOL(i2c_put_adapter); diff --git a/drivers/i2c/i2c-core-of.c b/drivers/i2c/i2c-core-of.c index 6cb7ad608bcd53d9c966752a342562208aa7617e..14d4884996968bbae11d68c6d3e6f7f2535e63be 100644 --- a/drivers/i2c/i2c-core-of.c +++ b/drivers/i2c/i2c-core-of.c @@ -121,6 +121,17 @@ static int of_dev_node_match(struct device *dev, void *data) return dev->of_node == data; } +static int of_dev_or_parent_node_match(struct device *dev, void *data) +{ + if (dev->of_node == data) + return 1; + + if (dev->parent) + return dev->parent->of_node == data; + + return 0; +} + /* must call put_device() when done with returned i2c_client device */ struct i2c_client *of_find_i2c_device_by_node(struct device_node *node) { @@ -145,7 +156,8 @@ struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node) struct device *dev; struct i2c_adapter *adapter; - dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match); + dev = bus_find_device(&i2c_bus_type, NULL, node, + of_dev_or_parent_node_match); if (!dev) return NULL; @@ -241,14 +253,14 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action, } client = of_i2c_register_device(adap, rd->dn); - put_device(&adap->dev); - if (IS_ERR(client)) { dev_err(&adap->dev, "failed to create client for '%pOF'\n", rd->dn); + put_device(&adap->dev); of_node_clear_flag(rd->dn, OF_POPULATED); return notifier_from_errno(PTR_ERR(client)); } + put_device(&adap->dev); break; case OF_RECONFIG_CHANGE_REMOVE: /* already depopulated? */ diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c index 1aca742fde4aefdf13069324abe7d4a93a0a3006..71ed4a99c7ee53193da4a15ea5706705d6805537 100644 --- a/drivers/i2c/i2c-dev.c +++ b/drivers/i2c/i2c-dev.c @@ -283,6 +283,7 @@ static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client, msgs[i].len < 1 || msgs[i].buf[0] < 1 || msgs[i].len < msgs[i].buf[0] + I2C_SMBUS_BLOCK_MAX) { + i++; res = -EINVAL; break; } @@ -470,9 +471,15 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) data_arg.data); } case I2C_RETRIES: + if (arg > INT_MAX) + return -EINVAL; + client->adapter->retries = arg; break; case I2C_TIMEOUT: + if (arg > INT_MAX) + return -EINVAL; + /* For historical reasons, user-space sets the timeout * value in units of 10 ms. */ @@ -529,6 +536,9 @@ static long compat_i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned lo sizeof(rdwr_arg))) return -EFAULT; + if (!rdwr_arg.msgs || rdwr_arg.nmsgs == 0) + return -EINVAL; + if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS) return -EINVAL; diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c index 45c9974303328b29af5a32d2ed5b47155aaa8e32..0e51803de0e7afdc212dc620511f5e9871343f42 100644 --- a/drivers/ide/ide-proc.c +++ b/drivers/ide/ide-proc.c @@ -544,7 +544,7 @@ void ide_proc_port_register_devices(ide_hwif_t *hwif) drive->proc = proc_mkdir(drive->name, parent); if (drive->proc) { ide_add_proc_entries(drive->proc, generic_drive_entries, drive); - proc_create_data("setting", S_IFREG|S_IRUSR|S_IWUSR, + proc_create_data("settings", S_IFREG|S_IRUSR|S_IWUSR, drive->proc, &ide_settings_proc_fops, drive); } diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c index c5b902b86b444773519edc1a52fe4d8782992f37..203ed4adc04ae6680de39910985b05996c227528 100644 --- a/drivers/ide/pmac.c +++ b/drivers/ide/pmac.c @@ -920,6 +920,7 @@ static u8 pmac_ide_cable_detect(ide_hwif_t *hwif) struct device_node *root = of_find_node_by_path("/"); const char *model = of_get_property(root, "model", NULL); + of_node_put(root); /* Get cable type from device-tree. */ if (cable && !strncmp(cable, "80-", 3)) { /* Some drives fail to detect 80c cable in PowerBook */ diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index b2ccce5fb0718303971dec46581482ff1d2e7e76..b86f7071a445ab6ce273eecbef3ffbc3002361af 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -53,16 +53,19 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include #include #include #include #include +#include #include #include #include #include #include +#include #include #include @@ -91,6 +94,7 @@ struct idle_cpu { unsigned long auto_demotion_disable_flags; bool byt_auto_demotion_disable_flag; bool disable_promotion_to_c1e; + bool use_acpi; }; static const struct idle_cpu *icpu; @@ -101,6 +105,11 @@ static void intel_idle_s2idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index); static struct cpuidle_state *cpuidle_state_table; +/* + * Enable this state by default even if the ACPI _CST does not list it. + */ +#define CPUIDLE_FLAG_ALWAYS_ENABLE BIT(15) + /* * Set this flag for states where the HW flushes the TLB for us * and so we don't need cross-calls to keep it consistent. @@ -109,6 +118,12 @@ static struct cpuidle_state *cpuidle_state_table; */ #define CPUIDLE_FLAG_TLB_FLUSHED 0x10000 +/* + * Disable IBRS across idle (when KERNEL_IBRS), is exclusive vs IRQ_ENABLE + * above. + */ +#define CPUIDLE_FLAG_IBRS BIT(16) + /* * MWAIT takes an 8-bit "hint" in EAX "suggesting" * the C-state (top nibble) and sub-state (bottom nibble) @@ -119,6 +134,24 @@ static struct cpuidle_state *cpuidle_state_table; #define flg2MWAIT(flags) (((flags) >> 24) & 0xFF) #define MWAIT2flg(eax) ((eax & 0xFF) << 24) +static __cpuidle int intel_idle_ibrs(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + bool smt_active = sched_smt_active(); + u64 spec_ctrl = spec_ctrl_current(); + int ret; + + if (smt_active) + wrmsrl(MSR_IA32_SPEC_CTRL, 0); + + ret = intel_idle(dev, drv, index); + + if (smt_active) + wrmsrl(MSR_IA32_SPEC_CTRL, spec_ctrl); + + return ret; +} + /* * States are indexed by the cstate number, * which is also the index into the MWAIT hint array. @@ -136,7 +169,7 @@ static struct cpuidle_state nehalem_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -173,7 +206,7 @@ static struct cpuidle_state snb_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -308,7 +341,7 @@ static struct cpuidle_state ivb_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -353,7 +386,7 @@ static struct cpuidle_state ivt_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 80, .enter = &intel_idle, @@ -390,7 +423,7 @@ static struct cpuidle_state ivt_cstates_4s[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 250, .enter = &intel_idle, @@ -427,7 +460,7 @@ static struct cpuidle_state ivt_cstates_8s[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 500, .enter = &intel_idle, @@ -464,7 +497,7 @@ static struct cpuidle_state hsw_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -532,7 +565,7 @@ static struct cpuidle_state bdw_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -601,7 +634,7 @@ static struct cpuidle_state skl_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -617,7 +650,7 @@ static struct cpuidle_state skl_cstates[] = { { .name = "C6", .desc = "MWAIT 0x20", - .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, .exit_latency = 85, .target_residency = 200, .enter = &intel_idle, @@ -625,7 +658,7 @@ static struct cpuidle_state skl_cstates[] = { { .name = "C7s", .desc = "MWAIT 0x33", - .flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED, + .flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, .exit_latency = 124, .target_residency = 800, .enter = &intel_idle, @@ -633,7 +666,7 @@ static struct cpuidle_state skl_cstates[] = { { .name = "C8", .desc = "MWAIT 0x40", - .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, + .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, .exit_latency = 200, .target_residency = 800, .enter = &intel_idle, @@ -641,7 +674,7 @@ static struct cpuidle_state skl_cstates[] = { { .name = "C9", .desc = "MWAIT 0x50", - .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, + .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, .exit_latency = 480, .target_residency = 5000, .enter = &intel_idle, @@ -649,7 +682,7 @@ static struct cpuidle_state skl_cstates[] = { { .name = "C10", .desc = "MWAIT 0x60", - .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, + .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, .exit_latency = 890, .target_residency = 5000, .enter = &intel_idle, @@ -670,7 +703,7 @@ static struct cpuidle_state skx_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -678,7 +711,7 @@ static struct cpuidle_state skx_cstates[] = { { .name = "C6", .desc = "MWAIT 0x20", - .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, .exit_latency = 133, .target_residency = 600, .enter = &intel_idle, @@ -687,6 +720,37 @@ static struct cpuidle_state skx_cstates[] = { .enter = NULL } }; + +static struct cpuidle_state icx_cstates[] = { + { + .name = "C1", + .desc = "MWAIT 0x00", + .flags = MWAIT2flg(0x00), + .exit_latency = 1, + .target_residency = 1, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C1E", + .desc = "MWAIT 0x01", + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, + .exit_latency = 4, + .target_residency = 4, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C6", + .desc = "MWAIT 0x20", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 128, + .target_residency = 384, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .enter = NULL } +}; + + static struct cpuidle_state atom_cstates[] = { { .name = "C1E", @@ -820,7 +884,7 @@ static struct cpuidle_state bxt_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -881,7 +945,7 @@ static struct cpuidle_state dnv_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -956,6 +1020,22 @@ static void intel_idle_s2idle(struct cpuidle_device *dev, mwait_idle_with_hints(eax, ecx); } +static bool intel_idle_verify_cstate(unsigned int mwait_hint) +{ + unsigned int mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint) + 1; + unsigned int num_substates = (mwait_substates >> mwait_cstate * 4) & + MWAIT_SUBSTATE_MASK; + + /* Ignore the C-state if there are NO sub-states in CPUID for it. */ + if (num_substates == 0) + return false; + + if (mwait_cstate > 2 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) + mark_tsc_unstable("TSC halts in idle states deeper than C2"); + + return true; +} + static void __setup_broadcast_timer(bool on) { if (on) @@ -987,6 +1067,13 @@ static const struct idle_cpu idle_cpu_nehalem = { .disable_promotion_to_c1e = true, }; +static const struct idle_cpu idle_cpu_nhx = { + .state_table = nehalem_cstates, + .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct idle_cpu idle_cpu_atom = { .state_table = atom_cstates, }; @@ -1005,6 +1092,12 @@ static const struct idle_cpu idle_cpu_snb = { .disable_promotion_to_c1e = true, }; +static const struct idle_cpu idle_cpu_snx = { + .state_table = snb_cstates, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct idle_cpu idle_cpu_byt = { .state_table = byt_cstates, .disable_promotion_to_c1e = true, @@ -1025,6 +1118,7 @@ static const struct idle_cpu idle_cpu_ivb = { static const struct idle_cpu idle_cpu_ivt = { .state_table = ivt_cstates, .disable_promotion_to_c1e = true, + .use_acpi = true, }; static const struct idle_cpu idle_cpu_hsw = { @@ -1032,11 +1126,23 @@ static const struct idle_cpu idle_cpu_hsw = { .disable_promotion_to_c1e = true, }; +static const struct idle_cpu idle_cpu_hsx = { + .state_table = hsw_cstates, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct idle_cpu idle_cpu_bdw = { .state_table = bdw_cstates, .disable_promotion_to_c1e = true, }; +static const struct idle_cpu idle_cpu_bdx = { + .state_table = bdw_cstates, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct idle_cpu idle_cpu_skl = { .state_table = skl_cstates, .disable_promotion_to_c1e = true, @@ -1045,15 +1151,26 @@ static const struct idle_cpu idle_cpu_skl = { static const struct idle_cpu idle_cpu_skx = { .state_table = skx_cstates, .disable_promotion_to_c1e = true, + .use_acpi = true, }; + +static const struct idle_cpu idle_cpu_icx = { + .state_table = icx_cstates, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + + static const struct idle_cpu idle_cpu_avn = { .state_table = avn_cstates, .disable_promotion_to_c1e = true, + .use_acpi = true, }; static const struct idle_cpu idle_cpu_knl = { .state_table = knl_cstates, + .use_acpi = true, }; static const struct idle_cpu idle_cpu_bxt = { @@ -1064,6 +1181,7 @@ static const struct idle_cpu idle_cpu_bxt = { static const struct idle_cpu idle_cpu_dnv = { .state_table = dnv_cstates, .disable_promotion_to_c1e = true, + .use_acpi = true, }; #define ICPU(model, cpu) \ @@ -1071,44 +1189,200 @@ static const struct idle_cpu idle_cpu_dnv = { static const struct x86_cpu_id intel_idle_ids[] __initconst = { ICPU(INTEL_FAM6_NEHALEM_EP, idle_cpu_nehalem), - ICPU(INTEL_FAM6_NEHALEM, idle_cpu_nehalem), + ICPU(INTEL_FAM6_NEHALEM, idle_cpu_nhx), ICPU(INTEL_FAM6_NEHALEM_G, idle_cpu_nehalem), ICPU(INTEL_FAM6_WESTMERE, idle_cpu_nehalem), - ICPU(INTEL_FAM6_WESTMERE_EP, idle_cpu_nehalem), - ICPU(INTEL_FAM6_NEHALEM_EX, idle_cpu_nehalem), - ICPU(INTEL_FAM6_ATOM_PINEVIEW, idle_cpu_atom), - ICPU(INTEL_FAM6_ATOM_LINCROFT, idle_cpu_lincroft), - ICPU(INTEL_FAM6_WESTMERE_EX, idle_cpu_nehalem), + ICPU(INTEL_FAM6_WESTMERE_EP, idle_cpu_nhx), + ICPU(INTEL_FAM6_NEHALEM_EX, idle_cpu_nhx), + ICPU(INTEL_FAM6_ATOM_BONNELL, idle_cpu_atom), + ICPU(INTEL_FAM6_ATOM_BONNELL_MID, idle_cpu_lincroft), + ICPU(INTEL_FAM6_WESTMERE_EX, idle_cpu_nhx), ICPU(INTEL_FAM6_SANDYBRIDGE, idle_cpu_snb), - ICPU(INTEL_FAM6_SANDYBRIDGE_X, idle_cpu_snb), - ICPU(INTEL_FAM6_ATOM_CEDARVIEW, idle_cpu_atom), - ICPU(INTEL_FAM6_ATOM_SILVERMONT1, idle_cpu_byt), - ICPU(INTEL_FAM6_ATOM_MERRIFIELD, idle_cpu_tangier), + ICPU(INTEL_FAM6_SANDYBRIDGE_X, idle_cpu_snx), + ICPU(INTEL_FAM6_ATOM_SALTWELL, idle_cpu_atom), + ICPU(INTEL_FAM6_ATOM_SILVERMONT, idle_cpu_byt), + ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID, idle_cpu_tangier), ICPU(INTEL_FAM6_ATOM_AIRMONT, idle_cpu_cht), ICPU(INTEL_FAM6_IVYBRIDGE, idle_cpu_ivb), ICPU(INTEL_FAM6_IVYBRIDGE_X, idle_cpu_ivt), ICPU(INTEL_FAM6_HASWELL_CORE, idle_cpu_hsw), - ICPU(INTEL_FAM6_HASWELL_X, idle_cpu_hsw), + ICPU(INTEL_FAM6_HASWELL_X, idle_cpu_hsx), ICPU(INTEL_FAM6_HASWELL_ULT, idle_cpu_hsw), ICPU(INTEL_FAM6_HASWELL_GT3E, idle_cpu_hsw), - ICPU(INTEL_FAM6_ATOM_SILVERMONT2, idle_cpu_avn), + ICPU(INTEL_FAM6_ATOM_SILVERMONT_X, idle_cpu_avn), ICPU(INTEL_FAM6_BROADWELL_CORE, idle_cpu_bdw), ICPU(INTEL_FAM6_BROADWELL_GT3E, idle_cpu_bdw), - ICPU(INTEL_FAM6_BROADWELL_X, idle_cpu_bdw), - ICPU(INTEL_FAM6_BROADWELL_XEON_D, idle_cpu_bdw), + ICPU(INTEL_FAM6_BROADWELL_X, idle_cpu_bdx), + ICPU(INTEL_FAM6_BROADWELL_XEON_D, idle_cpu_bdx), ICPU(INTEL_FAM6_SKYLAKE_MOBILE, idle_cpu_skl), ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, idle_cpu_skl), ICPU(INTEL_FAM6_KABYLAKE_MOBILE, idle_cpu_skl), ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, idle_cpu_skl), ICPU(INTEL_FAM6_SKYLAKE_X, idle_cpu_skx), + ICPU(INTEL_FAM6_ICELAKE_X, idle_cpu_icx), ICPU(INTEL_FAM6_XEON_PHI_KNL, idle_cpu_knl), ICPU(INTEL_FAM6_XEON_PHI_KNM, idle_cpu_knl), ICPU(INTEL_FAM6_ATOM_GOLDMONT, idle_cpu_bxt), - ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE, idle_cpu_bxt), - ICPU(INTEL_FAM6_ATOM_DENVERTON, idle_cpu_dnv), + ICPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS, idle_cpu_bxt), + ICPU(INTEL_FAM6_ATOM_GOLDMONT_X, idle_cpu_dnv), {} }; +#define INTEL_CPU_FAM6_MWAIT \ + { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_MWAIT, 0 } + +static const struct x86_cpu_id intel_mwait_ids[] __initconst = { + INTEL_CPU_FAM6_MWAIT, + {} +}; + +static bool intel_idle_max_cstate_reached(int cstate) +{ + if (cstate + 1 > max_cstate) { + pr_info("max_cstate %d reached\n", max_cstate); + return true; + } + return false; +} + +#ifdef CONFIG_ACPI_PROCESSOR_CSTATE +#include + +static bool no_acpi __read_mostly; +module_param(no_acpi, bool, 0444); +MODULE_PARM_DESC(no_acpi, "Do not use ACPI _CST for building the idle states list"); + +static struct acpi_processor_power acpi_state_table; + +/** + * intel_idle_cst_usable - Check if the _CST information can be used. + * + * Check if all of the C-states listed by _CST in the max_cstate range are + * ACPI_CSTATE_FFH, which means that they should be entered via MWAIT. + */ +static bool intel_idle_cst_usable(void) +{ + int cstate, limit; + + limit = min_t(int, min_t(int, CPUIDLE_STATE_MAX, max_cstate + 1), + acpi_state_table.count); + + for (cstate = 1; cstate < limit; cstate++) { + struct acpi_processor_cx *cx = &acpi_state_table.states[cstate]; + + if (cx->entry_method != ACPI_CSTATE_FFH) + return false; + } + + return true; +} + +static bool intel_idle_acpi_cst_extract(void) +{ + unsigned int cpu; + + if (no_acpi) { + pr_debug("Not allowed to use ACPI _CST\n"); + return false; + } + + for_each_possible_cpu(cpu) { + struct acpi_processor *pr = per_cpu(processors, cpu); + + if (!pr) + continue; + + if (acpi_processor_evaluate_cst(pr->handle, cpu, &acpi_state_table)) + continue; + + acpi_state_table.count++; + + if (!intel_idle_cst_usable()) + continue; + + if (!acpi_processor_claim_cst_control()) + break; + + return true; + } + + acpi_state_table.count = 0; + pr_debug("ACPI _CST not found or not usable\n"); + return false; +} + +static void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) +{ + int cstate, limit = min_t(int, CPUIDLE_STATE_MAX, acpi_state_table.count); + + /* + * If limit > 0, intel_idle_cst_usable() has returned 'true', so all of + * the interesting states are ACPI_CSTATE_FFH. + */ + for (cstate = 1; cstate < limit; cstate++) { + struct acpi_processor_cx *cx; + struct cpuidle_state *state; + + if (intel_idle_max_cstate_reached(cstate - 1)) + break; + + cx = &acpi_state_table.states[cstate]; + + state = &drv->states[drv->state_count++]; + + snprintf(state->name, CPUIDLE_NAME_LEN, "C%d_ACPI", cstate); + strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); + state->exit_latency = cx->latency; + /* + * For C1-type C-states use the same number for both the exit + * latency and target residency, because that is the case for + * C1 in the majority of the static C-states tables above. + * For the other types of C-states, however, set the target + * residency to 3 times the exit latency which should lead to + * a reasonable balance between energy-efficiency and + * performance in the majority of interesting cases. + */ + state->target_residency = cx->latency; + if (cx->type > ACPI_STATE_C1) + state->target_residency *= 3; + + state->flags = MWAIT2flg(cx->address); + if (cx->type > ACPI_STATE_C2) + state->flags |= CPUIDLE_FLAG_TLB_FLUSHED; + + state->enter = intel_idle; + state->enter_s2idle = intel_idle_s2idle; + } +} + +static bool intel_idle_off_by_default(u32 mwait_hint) +{ + int cstate, limit; + + /* + * If there are no _CST C-states, do not disable any C-states by + * default. + */ + if (!acpi_state_table.count) + return false; + + limit = min_t(int, CPUIDLE_STATE_MAX, acpi_state_table.count); + /* + * If limit > 0, intel_idle_cst_usable() has returned 'true', so all of + * the interesting states are ACPI_CSTATE_FFH. + */ + for (cstate = 1; cstate < limit; cstate++) { + if (acpi_state_table.states[cstate].address == mwait_hint) + return false; + } + return true; +} +#else /* !CONFIG_ACPI_PROCESSOR_CSTATE */ +static inline bool intel_idle_acpi_cst_extract(void) { return false; } +static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { } +static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; } +#endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */ + /* * intel_idle_probe() */ @@ -1123,17 +1397,15 @@ static int __init intel_idle_probe(void) } id = x86_match_cpu(intel_idle_ids); - if (!id) { - if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && - boot_cpu_data.x86 == 6) - pr_debug("does not run on family %d model %d\n", - boot_cpu_data.x86, boot_cpu_data.x86_model); - return -ENODEV; - } - - if (!boot_cpu_has(X86_FEATURE_MWAIT)) { - pr_debug("Please enable MWAIT in BIOS SETUP\n"); - return -ENODEV; + if (id) { + if (!boot_cpu_has(X86_FEATURE_MWAIT)) { + pr_debug("Please enable MWAIT in BIOS SETUP\n"); + return -ENODEV; + } + } else { + id = x86_match_cpu(intel_mwait_ids); + if (!id) + return -ENODEV; } if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) @@ -1149,7 +1421,13 @@ static int __init intel_idle_probe(void) pr_debug("MWAIT substates: 0x%x\n", mwait_substates); icpu = (const struct idle_cpu *)id->driver_data; - cpuidle_state_table = icpu->state_table; + if (icpu) { + cpuidle_state_table = icpu->state_table; + if (icpu->use_acpi) + intel_idle_acpi_cst_extract(); + } else if (!intel_idle_acpi_cst_extract()) { + return -ENODEV; + } pr_debug("v" INTEL_IDLE_VERSION " model 0x%X\n", boot_cpu_data.x86_model); @@ -1322,7 +1600,7 @@ static void intel_idle_state_table_update(void) ivt_idle_state_table_update(); break; case INTEL_FAM6_ATOM_GOLDMONT: - case INTEL_FAM6_ATOM_GEMINI_LAKE: + case INTEL_FAM6_ATOM_GOLDMONT_PLUS: bxt_idle_state_table_update(); break; case INTEL_FAM6_SKYLAKE_DESKTOP: @@ -1331,60 +1609,44 @@ static void intel_idle_state_table_update(void) } } -/* - * intel_idle_cpuidle_driver_init() - * allocate, initialize cpuidle_states - */ -static void __init intel_idle_cpuidle_driver_init(void) +static void intel_idle_init_cstates_icpu(struct cpuidle_driver *drv) { int cstate; - struct cpuidle_driver *drv = &intel_idle_driver; - - intel_idle_state_table_update(); - - cpuidle_poll_state_init(drv); - drv->state_count = 1; for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) { - int num_substates, mwait_hint, mwait_cstate; + unsigned int mwait_hint; - if ((cpuidle_state_table[cstate].enter == NULL) && - (cpuidle_state_table[cstate].enter_s2idle == NULL)) + if (intel_idle_max_cstate_reached(cstate)) break; - if (cstate + 1 > max_cstate) { - pr_info("max_cstate %d reached\n", max_cstate); + if (!cpuidle_state_table[cstate].enter && + !cpuidle_state_table[cstate].enter_s2idle) break; - } - - mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags); - mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint); - - /* number of sub-states for this state in CPUID.MWAIT */ - num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4)) - & MWAIT_SUBSTATE_MASK; - /* if NO sub-states for this state in CPUID, skip it */ - if (num_substates == 0) - continue; - - /* if state marked as disabled, skip it */ + /* If marked as unusable, skip this state. */ if (cpuidle_state_table[cstate].disabled != 0) { pr_debug("state %s is disabled\n", cpuidle_state_table[cstate].name); continue; } + mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags); + if (!intel_idle_verify_cstate(mwait_hint)) + continue; + + /* Structure copy. */ + drv->states[drv->state_count] = cpuidle_state_table[cstate]; - if (((mwait_cstate + 1) > 2) && - !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) - mark_tsc_unstable("TSC halts in idle" - " states deeper than C2"); + if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) && + cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IBRS) { + drv->states[drv->state_count].enter = intel_idle_ibrs; + } - drv->states[drv->state_count] = /* structure copy */ - cpuidle_state_table[cstate]; + if (icpu->use_acpi && intel_idle_off_by_default(mwait_hint) && + !(cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_ALWAYS_ENABLE)) + drv->states[drv->state_count].flags |= CPUIDLE_FLAG_OFF; - drv->state_count += 1; + drv->state_count++; } if (icpu->byt_auto_demotion_disable_flag) { @@ -1393,6 +1655,24 @@ static void __init intel_idle_cpuidle_driver_init(void) } } +/* + * intel_idle_cpuidle_driver_init() + * allocate, initialize cpuidle_states + */ +static void __init intel_idle_cpuidle_driver_init(void) +{ + struct cpuidle_driver *drv = &intel_idle_driver; + + intel_idle_state_table_update(); + + cpuidle_poll_state_init(drv); + drv->state_count = 1; + + if (icpu) + intel_idle_init_cstates_icpu(drv); + else + intel_idle_init_cstates_acpi(drv); +} /* * intel_idle_cpu_init() @@ -1411,6 +1691,9 @@ static int intel_idle_cpu_init(unsigned int cpu) return -EIO; } + if (!icpu) + return 0; + if (icpu->auto_demotion_disable_flags) auto_demotion_disable(); diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c index 383c802eb5b86a3d4865d28cecde8fba5319fdff..cb8c98a440109d12154bc56e5fff8e03b8d07998 100644 --- a/drivers/iio/accel/bmc150-accel-core.c +++ b/drivers/iio/accel/bmc150-accel-core.c @@ -125,7 +125,7 @@ #define BMC150_ACCEL_SLEEP_1_SEC 0x0F #define BMC150_ACCEL_REG_TEMP 0x08 -#define BMC150_ACCEL_TEMP_CENTER_VAL 24 +#define BMC150_ACCEL_TEMP_CENTER_VAL 23 #define BMC150_ACCEL_AXIS_TO_REG(axis) (BMC150_ACCEL_REG_XOUT_L + (axis * 2)) #define BMC150_AUTO_SUSPEND_DELAY_MS 2000 diff --git a/drivers/iio/accel/cros_ec_accel_legacy.c b/drivers/iio/accel/cros_ec_accel_legacy.c index 063e89eff791a7de06ec2abfa5f814d9ab0193ee..c776a3509a7173720435d70e3611f287c550fad2 100644 --- a/drivers/iio/accel/cros_ec_accel_legacy.c +++ b/drivers/iio/accel/cros_ec_accel_legacy.c @@ -328,7 +328,6 @@ static const struct iio_chan_spec_ext_info cros_ec_accel_legacy_ext_info[] = { .modified = 1, \ .info_mask_separate = \ BIT(IIO_CHAN_INFO_RAW) | \ - BIT(IIO_CHAN_INFO_SCALE) | \ BIT(IIO_CHAN_INFO_CALIBBIAS), \ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SCALE), \ .ext_info = cros_ec_accel_legacy_ext_info, \ diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c index 41d97faf50138905d24cb3dea7da5cbb185062c7..38ff374a3ca451071a5fba6a09483d43abce3139 100644 --- a/drivers/iio/accel/hid-sensor-accel-3d.c +++ b/drivers/iio/accel/hid-sensor-accel-3d.c @@ -149,6 +149,7 @@ static int accel_3d_read_raw(struct iio_dev *indio_dev, int report_id = -1; u32 address; int ret_type; + s32 min; struct hid_sensor_hub_device *hsdev = accel_state->common_attributes.hsdev; @@ -158,12 +159,14 @@ static int accel_3d_read_raw(struct iio_dev *indio_dev, case IIO_CHAN_INFO_RAW: hid_sensor_power_state(&accel_state->common_attributes, true); report_id = accel_state->accel[chan->scan_index].report_id; + min = accel_state->accel[chan->scan_index].logical_minimum; address = accel_3d_addresses[chan->scan_index]; if (report_id >= 0) *val = sensor_hub_input_attr_get_raw_value( accel_state->common_attributes.hsdev, hsdev->usage, address, report_id, - SENSOR_HUB_SYNC); + SENSOR_HUB_SYNC, + min < 0); else { *val = 0; hid_sensor_power_state(&accel_state->common_attributes, diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c index af53a1084ee537642906fc35e19986b0d5bc508b..249f39b91ec1486a059eac8c03a28326b7d4f3cd 100644 --- a/drivers/iio/accel/kxcjk-1013.c +++ b/drivers/iio/accel/kxcjk-1013.c @@ -1378,8 +1378,7 @@ static int kxcjk1013_probe(struct i2c_client *client, return 0; err_buffer_cleanup: - if (data->dready_trig) - iio_triggered_buffer_cleanup(indio_dev); + iio_triggered_buffer_cleanup(indio_dev); err_trigger_unregister: if (data->dready_trig) iio_trigger_unregister(data->dready_trig); @@ -1402,8 +1401,8 @@ static int kxcjk1013_remove(struct i2c_client *client) pm_runtime_set_suspended(&client->dev); pm_runtime_put_noidle(&client->dev); + iio_triggered_buffer_cleanup(indio_dev); if (data->dready_trig) { - iio_triggered_buffer_cleanup(indio_dev); iio_trigger_unregister(data->dready_trig); iio_trigger_unregister(data->motion_trig); } @@ -1437,6 +1436,8 @@ static int kxcjk1013_resume(struct device *dev) mutex_lock(&data->mutex); ret = kxcjk1013_set_mode(data, OPERATION); + if (ret == 0) + ret = kxcjk1013_set_range(data, data->range); mutex_unlock(&data->mutex); return ret; @@ -1490,6 +1491,7 @@ static const struct acpi_device_id kx_acpi_match[] = { {"KXCJ1008", KXCJ91008}, {"KXCJ9000", KXCJ91008}, {"KIOX000A", KXCJ91008}, + {"KIOX010A", KXCJ91008}, /* KXCJ91008 inside the display of a 2-in-1 */ {"KXTJ1009", KXTJ21009}, {"SMO8500", KXCJ91008}, { }, diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c index 421a0a8a1379b74dd5847245533b99b9f5a892fa..db815bab79b4f3c95a78a076b14e8d8aae5a4789 100644 --- a/drivers/iio/accel/mma8452.c +++ b/drivers/iio/accel/mma8452.c @@ -1465,7 +1465,7 @@ static int mma8452_trigger_setup(struct iio_dev *indio_dev) if (ret) return ret; - indio_dev->trig = trig; + indio_dev->trig = iio_trigger_get(trig); return 0; } diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index 4a754921fb6f9de7d488a2c7dc59bba0d3d11ddc..6789cd12bc0e0e0f360ba1830e8ba19e323d7a98 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig @@ -696,6 +696,7 @@ config STM32_DFSDM_ADC depends on (ARCH_STM32 && OF) || COMPILE_TEST select STM32_DFSDM_CORE select REGMAP_MMIO + select IIO_BUFFER select IIO_BUFFER_HW_CONSUMER help Select this option to support ADCSigma delta modulator for @@ -941,4 +942,16 @@ config XILINX_XADC The driver can also be build as a module. If so, the module will be called xilinx-xadc. +config PHYTIUM_ADC + tristate "Phytium ADC driver" + depends on ARCH_PHYTIUM || COMPILE_TEST + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER + help + Say yes here to build support for Phytium analog to digital + converters (ADC). + + To compile this driver as a module, choose M here: the module + will be called phytium-adc. + endmenu diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile index 03db7b578f9c077c4a5790f79f8ba48beb8d56dd..7d58314fb0cfbff7309d5030d51b88d081011146 100644 --- a/drivers/iio/adc/Makefile +++ b/drivers/iio/adc/Makefile @@ -86,3 +86,4 @@ obj-$(CONFIG_VIPERBOARD_ADC) += viperboard_adc.o xilinx-xadc-y := xilinx-xadc-core.o xilinx-xadc-events.o obj-$(CONFIG_XILINX_XADC) += xilinx-xadc.o obj-$(CONFIG_SD_ADC_MODULATOR) += sd_adc_modulator.o +obj-$(CONFIG_PHYTIUM_ADC) += phytium-adc.o diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c index e1da67d5ee2204b6ad1e97281653cb3e89ffbc86..9e61720db7eaf17a8a97fe539d97e738449846cf 100644 --- a/drivers/iio/adc/ad799x.c +++ b/drivers/iio/adc/ad799x.c @@ -814,10 +814,10 @@ static int ad799x_probe(struct i2c_client *client, ret = ad799x_write_config(st, st->chip_config->default_config); if (ret < 0) - goto error_disable_reg; + goto error_disable_vref; ret = ad799x_read_config(st); if (ret < 0) - goto error_disable_reg; + goto error_disable_vref; st->config = ret; ret = iio_triggered_buffer_setup(indio_dev, NULL, diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c index fc9510716ac771633213ad1d6c960b4c8023c7cf..25af4c76b57fe16d7cbb5e44401acf85c698e0f8 100644 --- a/drivers/iio/adc/ad_sigma_delta.c +++ b/drivers/iio/adc/ad_sigma_delta.c @@ -62,7 +62,7 @@ int ad_sd_write_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg, struct spi_transfer t = { .tx_buf = data, .len = size + 1, - .cs_change = sigma_delta->bus_locked, + .cs_change = sigma_delta->keep_cs_asserted, }; struct spi_message m; int ret; @@ -121,6 +121,7 @@ static int ad_sd_read_reg_raw(struct ad_sigma_delta *sigma_delta, if (sigma_delta->info->has_registers) { data[0] = reg << sigma_delta->info->addr_shift; data[0] |= sigma_delta->info->read_mask; + data[0] |= sigma_delta->comm; spi_message_add_tail(&t[0], &m); } spi_message_add_tail(&t[1], &m); @@ -217,6 +218,7 @@ static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta, spi_bus_lock(sigma_delta->spi->master); sigma_delta->bus_locked = true; + sigma_delta->keep_cs_asserted = true; reinit_completion(&sigma_delta->completion); ret = ad_sigma_delta_set_mode(sigma_delta, mode); @@ -234,9 +236,10 @@ static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta, ret = 0; } out: + sigma_delta->keep_cs_asserted = false; + ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE); sigma_delta->bus_locked = false; spi_bus_unlock(sigma_delta->spi->master); - ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE); return ret; } @@ -288,6 +291,7 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev, spi_bus_lock(sigma_delta->spi->master); sigma_delta->bus_locked = true; + sigma_delta->keep_cs_asserted = true; reinit_completion(&sigma_delta->completion); ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_SINGLE); @@ -297,9 +301,6 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev, ret = wait_for_completion_interruptible_timeout( &sigma_delta->completion, HZ); - sigma_delta->bus_locked = false; - spi_bus_unlock(sigma_delta->spi->master); - if (ret == 0) ret = -EIO; if (ret < 0) @@ -315,7 +316,10 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev, sigma_delta->irq_dis = true; } + sigma_delta->keep_cs_asserted = false; ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE); + sigma_delta->bus_locked = false; + spi_bus_unlock(sigma_delta->spi->master); mutex_unlock(&indio_dev->mlock); if (ret) @@ -352,6 +356,8 @@ static int ad_sd_buffer_postenable(struct iio_dev *indio_dev) spi_bus_lock(sigma_delta->spi->master); sigma_delta->bus_locked = true; + sigma_delta->keep_cs_asserted = true; + ret = ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_CONTINUOUS); if (ret) goto err_unlock; @@ -380,6 +386,7 @@ static int ad_sd_buffer_postdisable(struct iio_dev *indio_dev) sigma_delta->irq_dis = true; } + sigma_delta->keep_cs_asserted = false; ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE); sigma_delta->bus_locked = false; diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c index 44b516863c9d4d220f3323ca580d98fbb6f8e772..596841a3c4db77f59f5fc7c3c3f0f1fddc21aab7 100644 --- a/drivers/iio/adc/at91_adc.c +++ b/drivers/iio/adc/at91_adc.c @@ -248,12 +248,14 @@ static irqreturn_t at91_adc_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *idev = pf->indio_dev; struct at91_adc_state *st = iio_priv(idev); + struct iio_chan_spec const *chan; int i, j = 0; for (i = 0; i < idev->masklength; i++) { if (!test_bit(i, idev->active_scan_mask)) continue; - st->buffer[j] = at91_adc_readl(st, AT91_ADC_CHAN(st, i)); + chan = idev->channels + i; + st->buffer[j] = at91_adc_readl(st, AT91_ADC_CHAN(st, chan->channel)); j++; } @@ -279,6 +281,8 @@ static void handle_adc_eoc_trigger(int irq, struct iio_dev *idev) iio_trigger_poll(idev->trig); } else { st->last_value = at91_adc_readl(st, AT91_ADC_CHAN(st, st->chnb)); + /* Needed to ACK the DRDY interruption */ + at91_adc_readl(st, AT91_ADC_LCDR); st->done = true; wake_up_interruptible(&st->wq_data_avail); } @@ -700,23 +704,29 @@ static int at91_adc_read_raw(struct iio_dev *idev, ret = wait_event_interruptible_timeout(st->wq_data_avail, st->done, msecs_to_jiffies(1000)); - if (ret == 0) - ret = -ETIMEDOUT; - if (ret < 0) { - mutex_unlock(&st->lock); - return ret; - } - - *val = st->last_value; + /* Disable interrupts, regardless if adc conversion was + * successful or not + */ at91_adc_writel(st, AT91_ADC_CHDR, AT91_ADC_CH(chan->channel)); at91_adc_writel(st, AT91_ADC_IDR, BIT(chan->channel)); - st->last_value = 0; - st->done = false; + if (ret > 0) { + /* a valid conversion took place */ + *val = st->last_value; + st->last_value = 0; + st->done = false; + ret = IIO_VAL_INT; + } else if (ret == 0) { + /* conversion timeout */ + dev_err(&idev->dev, "ADC Channel %d timeout.\n", + chan->channel); + ret = -ETIMEDOUT; + } + mutex_unlock(&st->lock); - return IIO_VAL_INT; + return ret; case IIO_CHAN_INFO_SCALE: *val = st->vref_mv; diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c index 031d568b4972fa59e6426b3dfe013b0c3b94ad9e..e6ce25bcc01cac6946119cfb83698bf21bd43b37 100644 --- a/drivers/iio/adc/axp288_adc.c +++ b/drivers/iio/adc/axp288_adc.c @@ -16,6 +16,7 @@ * */ +#include #include #include #include @@ -27,9 +28,23 @@ #include #include -#define AXP288_ADC_EN_MASK 0xF1 -#define AXP288_ADC_TS_PIN_GPADC 0xF2 -#define AXP288_ADC_TS_PIN_ON 0xF3 +/* + * This mask enables all ADCs except for the battery temp-sensor (TS), that is + * left as-is to avoid breaking charging on devices without a temp-sensor. + */ +#define AXP288_ADC_EN_MASK 0xF0 +#define AXP288_ADC_TS_ENABLE 0x01 + +#define AXP288_ADC_TS_BIAS_MASK GENMASK(5, 4) +#define AXP288_ADC_TS_BIAS_20UA (0 << 4) +#define AXP288_ADC_TS_BIAS_40UA (1 << 4) +#define AXP288_ADC_TS_BIAS_60UA (2 << 4) +#define AXP288_ADC_TS_BIAS_80UA (3 << 4) +#define AXP288_ADC_TS_CURRENT_ON_OFF_MASK GENMASK(1, 0) +#define AXP288_ADC_TS_CURRENT_OFF (0 << 0) +#define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING (1 << 0) +#define AXP288_ADC_TS_CURRENT_ON_ONDEMAND (2 << 0) +#define AXP288_ADC_TS_CURRENT_ON (3 << 0) enum axp288_adc_id { AXP288_ADC_TS, @@ -44,6 +59,7 @@ enum axp288_adc_id { struct axp288_adc_info { int irq; struct regmap *regmap; + bool ts_enabled; }; static const struct iio_chan_spec axp288_adc_channels[] = { @@ -115,21 +131,33 @@ static int axp288_adc_read_channel(int *val, unsigned long address, return IIO_VAL_INT; } -static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode, - unsigned long address) +/* + * The current-source used for the battery temp-sensor (TS) is shared + * with the GPADC. For proper fuel-gauge and charger operation the TS + * current-source needs to be permanently on. But to read the GPADC we + * need to temporary switch the TS current-source to ondemand, so that + * the GPADC can use it, otherwise we will always read an all 0 value. + */ +static int axp288_adc_set_ts(struct axp288_adc_info *info, + unsigned int mode, unsigned long address) { int ret; - /* channels other than GPADC do not need to switch TS pin */ + /* No need to switch the current-source if the TS pin is disabled */ + if (!info->ts_enabled) + return 0; + + /* Channels other than GPADC do not need the current source */ if (address != AXP288_GP_ADC_H) return 0; - ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode); + ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL, + AXP288_ADC_TS_CURRENT_ON_OFF_MASK, mode); if (ret) return ret; /* When switching to the GPADC pin give things some time to settle */ - if (mode == AXP288_ADC_TS_PIN_GPADC) + if (mode == AXP288_ADC_TS_CURRENT_ON_ONDEMAND) usleep_range(6000, 10000); return 0; @@ -145,14 +173,14 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev, mutex_lock(&indio_dev->mlock); switch (mask) { case IIO_CHAN_INFO_RAW: - if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC, + if (axp288_adc_set_ts(info, AXP288_ADC_TS_CURRENT_ON_ONDEMAND, chan->address)) { dev_err(&indio_dev->dev, "GPADC mode\n"); ret = -EINVAL; break; } ret = axp288_adc_read_channel(val, chan->address, info->regmap); - if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON, + if (axp288_adc_set_ts(info, AXP288_ADC_TS_CURRENT_ON, chan->address)) dev_err(&indio_dev->dev, "TS pin restore\n"); break; @@ -164,13 +192,61 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev, return ret; } -static int axp288_adc_set_state(struct regmap *regmap) +/* + * We rely on the machine's firmware to correctly setup the TS pin bias current + * at boot. This lists systems with broken fw where we need to set it ourselves. + */ +static const struct dmi_system_id axp288_adc_ts_bias_override[] = { + { + /* Lenovo Ideapad 100S (11 inch) */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 100S-11IBY"), + }, + .driver_data = (void *)(uintptr_t)AXP288_ADC_TS_BIAS_80UA, + }, + {} +}; + +static int axp288_adc_initialize(struct axp288_adc_info *info) { - /* ADC should be always enabled for internal FG to function */ - if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON)) - return -EIO; + const struct dmi_system_id *bias_override; + int ret, adc_enable_val; + + bias_override = dmi_first_match(axp288_adc_ts_bias_override); + if (bias_override) { + ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL, + AXP288_ADC_TS_BIAS_MASK, + (uintptr_t)bias_override->driver_data); + if (ret) + return ret; + } + + /* + * Determine if the TS pin is enabled and set the TS current-source + * accordingly. + */ + ret = regmap_read(info->regmap, AXP20X_ADC_EN1, &adc_enable_val); + if (ret) + return ret; + + if (adc_enable_val & AXP288_ADC_TS_ENABLE) { + info->ts_enabled = true; + ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL, + AXP288_ADC_TS_CURRENT_ON_OFF_MASK, + AXP288_ADC_TS_CURRENT_ON); + } else { + info->ts_enabled = false; + ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL, + AXP288_ADC_TS_CURRENT_ON_OFF_MASK, + AXP288_ADC_TS_CURRENT_OFF); + } + if (ret) + return ret; - return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK); + /* Turn on the ADC for all channels except TS, leave TS as is */ + return regmap_update_bits(info->regmap, AXP20X_ADC_EN1, + AXP288_ADC_EN_MASK, AXP288_ADC_EN_MASK); } static const struct iio_info axp288_adc_iio_info = { @@ -200,7 +276,7 @@ static int axp288_adc_probe(struct platform_device *pdev) * Set ADC to enabled state at all time, including system suspend. * otherwise internal fuel gauge functionality may be affected. */ - ret = axp288_adc_set_state(axp20x->regmap); + ret = axp288_adc_initialize(info); if (ret) { dev_err(&pdev->dev, "unable to enable ADC device\n"); return ret; diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c index c64c6675cae6005a8634a1346ccb74d2cbed2ded..4ab052d76d9f521570de6f58eca99ae0bbfb684d 100644 --- a/drivers/iio/adc/dln2-adc.c +++ b/drivers/iio/adc/dln2-adc.c @@ -527,6 +527,10 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev) u16 conflict; unsigned int trigger_chan; + ret = iio_triggered_buffer_postenable(indio_dev); + if (ret) + return ret; + mutex_lock(&dln2->mutex); /* Enable ADC */ @@ -540,6 +544,7 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev) (int)conflict); ret = -EBUSY; } + iio_triggered_buffer_predisable(indio_dev); return ret; } @@ -553,6 +558,7 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev) mutex_unlock(&dln2->mutex); if (ret < 0) { dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__); + iio_triggered_buffer_predisable(indio_dev); return ret; } } else { @@ -560,12 +566,12 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev) mutex_unlock(&dln2->mutex); } - return iio_triggered_buffer_postenable(indio_dev); + return 0; } static int dln2_adc_triggered_buffer_predisable(struct iio_dev *indio_dev) { - int ret; + int ret, ret2; struct dln2_adc *dln2 = iio_priv(indio_dev); mutex_lock(&dln2->mutex); @@ -580,12 +586,14 @@ static int dln2_adc_triggered_buffer_predisable(struct iio_dev *indio_dev) ret = dln2_adc_set_port_enabled(dln2, false, NULL); mutex_unlock(&dln2->mutex); - if (ret < 0) { + if (ret < 0) dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__); - return ret; - } - return iio_triggered_buffer_predisable(indio_dev); + ret2 = iio_triggered_buffer_predisable(indio_dev); + if (ret == 0) + ret = ret2; + + return ret; } static const struct iio_buffer_setup_ops dln2_adc_buffer_setup_ops = { diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c index f10443f92e4ce8758a519d67ad95acf227881562..1ca2c4d39f87851dac86f3bf4724c3c2d7be9510 100644 --- a/drivers/iio/adc/exynos_adc.c +++ b/drivers/iio/adc/exynos_adc.c @@ -115,6 +115,8 @@ #define MAX_ADC_V2_CHANNELS 10 #define MAX_ADC_V1_CHANNELS 8 #define MAX_EXYNOS3250_ADC_CHANNELS 2 +#define MAX_EXYNOS4212_ADC_CHANNELS 4 +#define MAX_S5PV210_ADC_CHANNELS 10 /* Bit definitions common for ADC_V1 and ADC_V2 */ #define ADC_CON_EN_START (1u << 0) @@ -270,6 +272,19 @@ static void exynos_adc_v1_start_conv(struct exynos_adc *info, writel(con1 | ADC_CON_EN_START, ADC_V1_CON(info->regs)); } +/* Exynos4212 and 4412 is like ADCv1 but with four channels only */ +static const struct exynos_adc_data exynos4212_adc_data = { + .num_channels = MAX_EXYNOS4212_ADC_CHANNELS, + .mask = ADC_DATX_MASK, /* 12 bit ADC resolution */ + .needs_adc_phy = true, + .phy_offset = EXYNOS_ADCV1_PHY_OFFSET, + + .init_hw = exynos_adc_v1_init_hw, + .exit_hw = exynos_adc_v1_exit_hw, + .clear_irq = exynos_adc_v1_clear_irq, + .start_conv = exynos_adc_v1_start_conv, +}; + static const struct exynos_adc_data exynos_adc_v1_data = { .num_channels = MAX_ADC_V1_CHANNELS, .mask = ADC_DATX_MASK, /* 12 bit ADC resolution */ @@ -282,6 +297,16 @@ static const struct exynos_adc_data exynos_adc_v1_data = { .start_conv = exynos_adc_v1_start_conv, }; +static const struct exynos_adc_data exynos_adc_s5pv210_data = { + .num_channels = MAX_S5PV210_ADC_CHANNELS, + .mask = ADC_DATX_MASK, /* 12 bit ADC resolution */ + + .init_hw = exynos_adc_v1_init_hw, + .exit_hw = exynos_adc_v1_exit_hw, + .clear_irq = exynos_adc_v1_clear_irq, + .start_conv = exynos_adc_v1_start_conv, +}; + static void exynos_adc_s3c2416_start_conv(struct exynos_adc *info, unsigned long addr) { @@ -478,6 +503,12 @@ static const struct of_device_id exynos_adc_match[] = { }, { .compatible = "samsung,s3c6410-adc", .data = &exynos_adc_s3c64xx_data, + }, { + .compatible = "samsung,s5pv210-adc", + .data = &exynos_adc_s5pv210_data, + }, { + .compatible = "samsung,exynos4212-adc", + .data = &exynos4212_adc_data, }, { .compatible = "samsung,exynos-adc-v1", .data = &exynos_adc_v1_data, @@ -915,7 +946,7 @@ static int exynos_adc_remove(struct platform_device *pdev) struct iio_dev *indio_dev = platform_get_drvdata(pdev); struct exynos_adc *info = iio_priv(indio_dev); - if (IS_REACHABLE(CONFIG_INPUT)) { + if (IS_REACHABLE(CONFIG_INPUT) && info->input) { free_irq(info->tsirq, info); input_unregister_device(info->input); } diff --git a/drivers/iio/adc/fsl-imx25-gcq.c b/drivers/iio/adc/fsl-imx25-gcq.c index ea264fa9e567a4f5b2e1f3458e361693712e27b5..929c617db3645eca203ad098e92cd177f52b2c30 100644 --- a/drivers/iio/adc/fsl-imx25-gcq.c +++ b/drivers/iio/adc/fsl-imx25-gcq.c @@ -209,12 +209,14 @@ static int mx25_gcq_setup_cfgs(struct platform_device *pdev, ret = of_property_read_u32(child, "reg", ®); if (ret) { dev_err(dev, "Failed to get reg property\n"); + of_node_put(child); return ret; } if (reg >= MX25_NUM_CFGS) { dev_err(dev, "reg value is greater than the number of available configuration registers\n"); + of_node_put(child); return -EINVAL; } @@ -228,6 +230,7 @@ static int mx25_gcq_setup_cfgs(struct platform_device *pdev, if (IS_ERR(priv->vref[refp])) { dev_err(dev, "Error, trying to use external voltage reference without a vref-%s regulator.", mx25_gcq_refp_names[refp]); + of_node_put(child); return PTR_ERR(priv->vref[refp]); } priv->channel_vref_mv[reg] = @@ -240,6 +243,7 @@ static int mx25_gcq_setup_cfgs(struct platform_device *pdev, break; default: dev_err(dev, "Invalid positive reference %d\n", refp); + of_node_put(child); return -EINVAL; } @@ -254,10 +258,12 @@ static int mx25_gcq_setup_cfgs(struct platform_device *pdev, if ((refp & MX25_ADCQ_CFG_REFP_MASK) != refp) { dev_err(dev, "Invalid fsl,adc-refp property value\n"); + of_node_put(child); return -EINVAL; } if ((refn & MX25_ADCQ_CFG_REFN_MASK) != refn) { dev_err(dev, "Invalid fsl,adc-refn property value\n"); + of_node_put(child); return -EINVAL; } diff --git a/drivers/iio/adc/hx711.c b/drivers/iio/adc/hx711.c index 36b59d8957fb850a78638c2cff7dcd2ef222f67a..6c5d81a89aec9c6f7c077e2d78e19d9bfecd08d1 100644 --- a/drivers/iio/adc/hx711.c +++ b/drivers/iio/adc/hx711.c @@ -109,14 +109,14 @@ struct hx711_data { static int hx711_cycle(struct hx711_data *hx711_data) { - int val; + unsigned long flags; /* * if preempted for more then 60us while PD_SCK is high: * hx711 is going in reset * ==> measuring is false */ - preempt_disable(); + local_irq_save(flags); gpiod_set_value(hx711_data->gpiod_pd_sck, 1); /* @@ -126,7 +126,6 @@ static int hx711_cycle(struct hx711_data *hx711_data) */ ndelay(hx711_data->data_ready_delay_ns); - val = gpiod_get_value(hx711_data->gpiod_dout); /* * here we are not waiting for 0.2 us as suggested by the datasheet, * because the oscilloscope showed in a test scenario @@ -134,7 +133,7 @@ static int hx711_cycle(struct hx711_data *hx711_data) * and 0.56 us for PD_SCK low on TI Sitara with 800 MHz */ gpiod_set_value(hx711_data->gpiod_pd_sck, 0); - preempt_enable(); + local_irq_restore(flags); /* * make it a square wave for addressing cases with capacitance on @@ -142,7 +141,8 @@ static int hx711_cycle(struct hx711_data *hx711_data) */ ndelay(hx711_data->data_ready_delay_ns); - return val; + /* sample as late as possible */ + return gpiod_get_value(hx711_data->gpiod_dout); } static int hx711_read(struct hx711_data *hx711_data) diff --git a/drivers/iio/adc/max1027.c b/drivers/iio/adc/max1027.c index 311c1a89c329eb5599421508e43a7e3625cb821f..0939eb0384f1cf0e4a7c0476bcd738e879983975 100644 --- a/drivers/iio/adc/max1027.c +++ b/drivers/iio/adc/max1027.c @@ -460,6 +460,14 @@ static int max1027_probe(struct spi_device *spi) goto fail_dev_register; } + /* Internal reset */ + st->reg = MAX1027_RST_REG; + ret = spi_write(st->spi, &st->reg, 1); + if (ret < 0) { + dev_err(&indio_dev->dev, "Failed to reset the ADC\n"); + return ret; + } + /* Disable averaging */ st->reg = MAX1027_AVG_REG; ret = spi_write(st->spi, &st->reg, 1); diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c index 0538ff8c4ac1d2f1e242898daf644d3b69345c4d..9f1a5ef0b444c2b61eac95cccfe5fcd7a1f7c570 100644 --- a/drivers/iio/adc/max9611.c +++ b/drivers/iio/adc/max9611.c @@ -86,12 +86,18 @@ #define MAX9611_TEMP_MAX_POS 0x7f80 #define MAX9611_TEMP_MAX_NEG 0xff80 #define MAX9611_TEMP_MIN_NEG 0xd980 -#define MAX9611_TEMP_MASK GENMASK(7, 15) +#define MAX9611_TEMP_MASK GENMASK(15, 7) #define MAX9611_TEMP_SHIFT 0x07 #define MAX9611_TEMP_RAW(_r) ((_r) >> MAX9611_TEMP_SHIFT) #define MAX9611_TEMP_SCALE_NUM 1000000 #define MAX9611_TEMP_SCALE_DIV 2083 +/* + * Conversion time is 2 ms (typically) at Ta=25 degreeC + * No maximum value is known, so play it safe. + */ +#define MAX9611_CONV_TIME_US_RANGE 3000, 3300 + struct max9611_dev { struct device *dev; struct i2c_client *i2c_client; @@ -239,11 +245,9 @@ static int max9611_read_single(struct max9611_dev *max9611, return ret; } - /* - * need a delay here to make register configuration - * stabilize. 1 msec at least, from empirical testing. - */ - usleep_range(1000, 2000); + /* need a delay here to make register configuration stabilize. */ + + usleep_range(MAX9611_CONV_TIME_US_RANGE); ret = i2c_smbus_read_word_swapped(max9611->i2c_client, reg_addr); if (ret < 0) { @@ -289,7 +293,7 @@ static int max9611_read_csa_voltage(struct max9611_dev *max9611, return ret; if (*adc_raw > 0) { - *csa_gain = gain_selectors[i]; + *csa_gain = (enum max9611_csa_gain)gain_selectors[i]; return 0; } } @@ -483,7 +487,7 @@ static int max9611_init(struct max9611_dev *max9611) if (ret) return ret; - regval = ret & MAX9611_TEMP_MASK; + regval &= MAX9611_TEMP_MASK; if ((regval > MAX9611_TEMP_MAX_POS && regval < MAX9611_TEMP_MIN_NEG) || @@ -510,7 +514,7 @@ static int max9611_init(struct max9611_dev *max9611) MAX9611_REG_CTRL2, 0); return ret; } - usleep_range(1000, 2000); + usleep_range(MAX9611_CONV_TIME_US_RANGE); return 0; } diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c index c80261748d8fdc36229363d29e68837bc044b4ea..49827bb648971c8d635967ba380d5196555ef955 100644 --- a/drivers/iio/adc/men_z188_adc.c +++ b/drivers/iio/adc/men_z188_adc.c @@ -106,6 +106,7 @@ static int men_z188_probe(struct mcb_device *dev, struct z188_adc *adc; struct iio_dev *indio_dev; struct resource *mem; + int ret; indio_dev = devm_iio_device_alloc(&dev->dev, sizeof(struct z188_adc)); if (!indio_dev) @@ -132,8 +133,14 @@ static int men_z188_probe(struct mcb_device *dev, adc->mem = mem; mcb_set_drvdata(dev, indio_dev); - return iio_device_register(indio_dev); + ret = iio_device_register(indio_dev); + if (ret) + goto err_unmap; + + return 0; +err_unmap: + iounmap(adc->base); err: mcb_release_mem(mem); return -ENXIO; diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c index da2d16dfa63e3b4eacaca90d8e07c521a85284ea..6e0ef9bb2497f93c42ee6107a23533908ed10f74 100644 --- a/drivers/iio/adc/meson_saradc.c +++ b/drivers/iio/adc/meson_saradc.c @@ -587,8 +587,11 @@ static int meson_sar_adc_clk_init(struct iio_dev *indio_dev, struct clk_init_data init; const char *clk_parents[1]; - init.name = devm_kasprintf(&indio_dev->dev, GFP_KERNEL, "%pOF#adc_div", - indio_dev->dev.of_node); + init.name = devm_kasprintf(&indio_dev->dev, GFP_KERNEL, "%s#adc_div", + dev_name(indio_dev->dev.parent)); + if (!init.name) + return -ENOMEM; + init.flags = 0; init.ops = &clk_divider_ops; clk_parents[0] = __clk_get_name(priv->clkin); @@ -606,8 +609,11 @@ static int meson_sar_adc_clk_init(struct iio_dev *indio_dev, if (WARN_ON(IS_ERR(priv->adc_div_clk))) return PTR_ERR(priv->adc_div_clk); - init.name = devm_kasprintf(&indio_dev->dev, GFP_KERNEL, "%pOF#adc_en", - indio_dev->dev.of_node); + init.name = devm_kasprintf(&indio_dev->dev, GFP_KERNEL, "%s#adc_en", + dev_name(indio_dev->dev.parent)); + if (!init.name) + return -ENOMEM; + init.flags = CLK_SET_RATE_PARENT; init.ops = &clk_gate_ops; clk_parents[0] = __clk_get_name(priv->adc_div_clk); @@ -1017,6 +1023,11 @@ static int meson_sar_adc_probe(struct platform_device *pdev) if (IS_ERR(base)) return PTR_ERR(base); + priv->regmap = devm_regmap_init_mmio(&pdev->dev, base, + priv->data->param->regmap_config); + if (IS_ERR(priv->regmap)) + return PTR_ERR(priv->regmap); + irq = irq_of_parse_and_map(pdev->dev.of_node, 0); if (!irq) return -EINVAL; @@ -1026,11 +1037,6 @@ static int meson_sar_adc_probe(struct platform_device *pdev) if (ret) return ret; - priv->regmap = devm_regmap_init_mmio(&pdev->dev, base, - priv->data->param->regmap_config); - if (IS_ERR(priv->regmap)) - return PTR_ERR(priv->regmap); - priv->clkin = devm_clk_get(&pdev->dev, "clkin"); if (IS_ERR(priv->clkin)) { dev_err(&pdev->dev, "failed to get clkin\n"); diff --git a/drivers/iio/adc/phytium-adc.c b/drivers/iio/adc/phytium-adc.c new file mode 100644 index 0000000000000000000000000000000000000000..904855cb37829d54ae348f49010c8d2e59cbd0e6 --- /dev/null +++ b/drivers/iio/adc/phytium-adc.c @@ -0,0 +1,689 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium ADC device driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +/* ADC register */ +#define ADC_CTRL_REG 0x00 +#define ADC_CTRL_REG_PD_EN BIT(31) +#define ADC_CTRL_REG_CH_ONLY_S(x) ((x & 0x7) << 16) +#define ADC_CTRL_REG_CLK_DIV(x) ((x) << 12) +#define ADC_CTRL_REG_CHANNEL_EN(x) BIT((x) + 4) +#define ADC_CTRL_REG_CH_ONLY_EN BIT(3) +#define ADC_CTRL_REG_SINGLE_EN BIT(2) +#define ADC_CTRL_REG_SINGLE_SEL BIT(1) +#define ADC_CTRL_REG_SOC_EN BIT(0) +#define ADC_INTER_REG 0x04 +#define ADC_STATE_REG 0x08 +#define ADC_STATE_REG_B_STA(x) ((x) << 8) +#define ADC_STATE_REG_EOC_STA BIT(7) +#define ADC_STATE_REG_S_STA(x) ((x) << 4) +#define ADC_STATE_REG_SOC_STA BIT(3) +#define ADC_STATE_REG_ERR_STA BIT(2) +#define ADC_STATE_REG_COV_FINISH_STA BIT(1) +#define ADC_STATE_REG_ADCCTL_BUSY_STA BIT(0) +#define ADC_ERRCLR_REG 0x0c +#define ADC_LEVEL_REG(x) (0x10 + ((x) << 2)) +#define ADC_LEVEL_REG_HIGH_LEVEL(x) ((x) << 16) +#define ADC_LEVEL_REG_LOW_LEVEL(x) (x) +#define ADC_INTRMASK_REG 0x30 +#define ADC_INTRMASK_REG_ERR_INTR_MASK BIT(24) +#define ADC_INTRMASK_REG_ULIMIT_OFF(x) BIT(9 + ((x) << 1)) +#define ADC_INTRMASK_REG_DLIMIT_MASK(x) BIT(8 + ((x) << 1)) +#define ADC_INTRMASK_REG_COVFIN_MASK(x) BIT((x)) +#define ADC_INTR_REG 0x34 +#define ADC_INTR_REG_ERR BIT(24) +#define ADC_INTR_REG_ULIMIT(x) BIT(9 + ((x) << 1)) +#define ADC_INTR_REG_DLIMIT(x) BIT(8 + ((x) << 1)) +#define ADC_INTR_REG_LIMIT_MASK GENMASK(23, 8) +#define ADC_INTR_REG_COVFIN(x) BIT((x)) +#define ADC_INTR_REG_COVFIN_MASK GENMASK(7, 0) +#define ADC_COV_RESULT_REG(x) (0x38 + ((x) << 2)) +#define ADC_COV_RESULT_REG_MASK GENMASK(9, 0) +#define ADC_FINISH_CNT_REG(x) (0x58 + ((x) << 2)) +#define ADC_HIS_LIMIT_REG(x) (0x78 + ((x) << 2)) + +#define PHYTIUM_MAX_CHANNELS 8 +#define PHYTIUM_ADC_TIMEOUT usecs_to_jiffies(1000 * 1000) + +static const struct iio_event_spec phytium_adc_event[] = { + { + .type = IIO_EV_TYPE_THRESH, + .dir = IIO_EV_DIR_RISING, + .mask_separate = BIT(IIO_EV_INFO_VALUE), + }, { + .type = IIO_EV_TYPE_THRESH, + .dir = IIO_EV_DIR_FALLING, + .mask_separate = BIT(IIO_EV_INFO_VALUE), + }, +}; + +struct phytium_adc_data { + const struct iio_chan_spec *channels; + u8 num_channels; +}; + +struct phytium_adc { + struct device *dev; + void __iomem *regs; + struct clk *adc_clk; + + u32 interval; + u16 thresh_high[PHYTIUM_MAX_CHANNELS]; + u16 thresh_low[PHYTIUM_MAX_CHANNELS]; + u16 last_val[PHYTIUM_MAX_CHANNELS]; + const struct phytium_adc_data *data; + u16 *scan_data; + + struct completion completion; + struct mutex lock; +}; + +static ssize_t phytium_adc_show_conv_interval(struct iio_dev *indio_dev, + uintptr_t priv, + struct iio_chan_spec const *ch, + char *buf) +{ + struct phytium_adc *adc = iio_priv(indio_dev); + + return sprintf(buf, "%u\n", adc->interval); +} + +static ssize_t phytium_adc_store_conv_interval(struct iio_dev *indio_dev, + uintptr_t priv, + struct iio_chan_spec const *ch, + const char *buf, size_t len) +{ + struct phytium_adc *adc = iio_priv(indio_dev); + u32 interval; + int ret; + + ret = kstrtou32(buf, 0, &interval); + if (ret < 0) + return ret; + + mutex_lock(&adc->lock); + adc->interval = interval; + mutex_unlock(&adc->lock); + + return len; +} + +static const struct iio_chan_spec_ext_info phytium_adc_ext_info[] = { + { + .name = "conv_interval", + .read = phytium_adc_show_conv_interval, + .write = phytium_adc_store_conv_interval, + }, + { /* sentinel */ } +}; + +static int phytium_adc_parse_properties(struct platform_device *pdev, struct phytium_adc *adc) +{ + struct iio_chan_spec *chan_array; + struct fwnode_handle *fwnode; + struct phytium_adc_data *data; + unsigned int channel; + int num_channels; + int ret, i = 0; + + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + num_channels = device_get_child_node_count(&pdev->dev); + if (!num_channels) { + dev_err(&pdev->dev, "no channel children\n"); + return -ENODEV; + } + + if (num_channels > PHYTIUM_MAX_CHANNELS) { + dev_err(&pdev->dev, "num of channel children out of range\n"); + return -EINVAL; + } + + chan_array = devm_kcalloc(&pdev->dev, num_channels, sizeof(*chan_array), + GFP_KERNEL); + if (!chan_array) + return -ENOMEM; + + device_for_each_child_node(&pdev->dev, fwnode) { + ret = fwnode_property_read_u32(fwnode, "reg", &channel); + if (ret) + return ret; + + if (channel >= PHYTIUM_MAX_CHANNELS) + return -EINVAL; + + chan_array[i].type = IIO_VOLTAGE; + chan_array[i].indexed = 1; + chan_array[i].channel = channel; + chan_array[i].info_mask_separate = BIT(IIO_CHAN_INFO_RAW); + chan_array[i].event_spec = phytium_adc_event; + chan_array[i].num_event_specs = ARRAY_SIZE(phytium_adc_event); + chan_array[i].scan_index = channel; + chan_array[i].scan_type.sign = 'u'; + chan_array[i].scan_type.realbits = 10; + chan_array[i].scan_type.storagebits = 16; + chan_array[i].scan_type.endianness = IIO_LE; + chan_array[i].ext_info = phytium_adc_ext_info; + i++; + } + + data->num_channels = num_channels; + data->channels = chan_array; + adc->data = data; + + return 0; +} + +static void phytium_adc_start_stop(struct phytium_adc *adc, bool start) +{ + u32 ctrl; + + ctrl = readl(adc->regs + ADC_CTRL_REG); + if (start) + ctrl |= ADC_CTRL_REG_SOC_EN | ADC_CTRL_REG_SINGLE_EN; + else + ctrl &= ~ADC_CTRL_REG_SOC_EN; + /* Start conversion */ + writel(ctrl, adc->regs + ADC_CTRL_REG); +} + +static void phytium_adc_power_setup(struct phytium_adc *adc, bool on) +{ + u32 reg; + + reg = readl(adc->regs + ADC_CTRL_REG); + if (on) + reg &= ~ADC_CTRL_REG_PD_EN; + else + reg |= ADC_CTRL_REG_PD_EN; + writel(reg, adc->regs + ADC_CTRL_REG); +} + +static int phytium_adc_hw_init(struct phytium_adc *adc) +{ + int ret; + u32 reg; + + ret = clk_prepare_enable(adc->adc_clk); + if (ret) + return ret; + + /* + * Setup ctrl register: + * - Power up conversion module + * - Set the division by 4 as default + */ + reg = ADC_CTRL_REG_CLK_DIV(4); + writel(reg, adc->regs + ADC_CTRL_REG); + + /* Set all the interrupt mask, unmask them when necessary. */ + writel(0x1ffffff, adc->regs + ADC_INTRMASK_REG); + + /* Set default conversion interval */ + adc->interval = (clk_get_rate(adc->adc_clk) * 1000) / NSEC_PER_SEC; + + phytium_adc_power_setup(adc, true); + + return 0; +} + +static void phytium_adc_intrmask_setup(struct phytium_adc *adc, unsigned long chan_mask, bool on) +{ + u32 reg; + u16 limit_mask = 0; + int ch; + + for_each_set_bit(ch, &chan_mask, PHYTIUM_MAX_CHANNELS) + limit_mask |= BIT(ch << 1) | BIT((ch << 1) + 1); + + reg = readl(adc->regs + ADC_INTRMASK_REG); + if (on) + reg &= ~(ADC_INTRMASK_REG_ERR_INTR_MASK | + (limit_mask << 8) | chan_mask); + else + reg |= (ADC_INTRMASK_REG_ERR_INTR_MASK | + (limit_mask << 8) | chan_mask); + writel(reg, adc->regs + ADC_INTRMASK_REG); +} + +static void phytium_adc_single_conv_setup(struct phytium_adc *adc, u8 ch) +{ + u32 reg; + + /* + * Setup control register: + * - Single conversion mode selection + * - Single conversion enable + * - Fixed channel conversion + * - Target channel + */ + reg = readl(adc->regs + ADC_CTRL_REG); + + /* Clean ch_only_s bits */ + reg &= ~ADC_CTRL_REG_CH_ONLY_S(7); + + /* Clean channel_en bit */ + reg &= 0xFFF00F; + + reg |= ADC_CTRL_REG_SINGLE_SEL | ADC_CTRL_REG_SINGLE_EN | + ADC_CTRL_REG_CH_ONLY_EN | ADC_CTRL_REG_CH_ONLY_S(ch) | ADC_CTRL_REG_CHANNEL_EN(ch); + writel(reg, adc->regs + ADC_CTRL_REG); +} + +static int phytium_adc_single_conv(struct iio_dev *indio_dev, u8 ch) +{ + struct phytium_adc *adc = iio_priv(indio_dev); + int ret; + + ret = iio_device_claim_direct_mode(indio_dev); + if (ret) + return ret; + mutex_lock(&adc->lock); + + phytium_adc_intrmask_setup(adc, BIT(ch), true); + reinit_completion(&adc->completion); + phytium_adc_single_conv_setup(adc, ch); + phytium_adc_start_stop(adc, true); + + if (!wait_for_completion_timeout(&adc->completion, PHYTIUM_ADC_TIMEOUT)) + ret = -ETIMEDOUT; + + phytium_adc_start_stop(adc, false); + phytium_adc_intrmask_setup(adc, BIT(ch), false); + + mutex_unlock(&adc->lock); + iio_device_release_direct_mode(indio_dev); + + return ret; +} + +static int phytium_adc_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long mask) +{ + struct phytium_adc *adc = iio_priv(indio_dev); + int ret; + + switch (mask) { + case IIO_CHAN_INFO_RAW: + if (chan->type != IIO_VOLTAGE) + return -EINVAL; + + ret = phytium_adc_single_conv(indio_dev, chan->channel); + if (ret) + return ret; + *val = adc->last_val[chan->channel]; + + return IIO_VAL_INT; + default: + return -EINVAL; + } +} + +static int phytium_read_thresh(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, + enum iio_event_info iinfo, int *val, int *val2) +{ + struct phytium_adc *adc = iio_priv(indio_dev); + + if (dir == IIO_EV_DIR_FALLING) + *val = adc->thresh_low[chan->channel]; + else + *val = adc->thresh_high[chan->channel]; + + return IIO_VAL_INT; +} + +static int phytium_write_thresh(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, + enum iio_event_info einfo, int val, int val2) +{ + struct phytium_adc *adc = iio_priv(indio_dev); + u32 thresh; + + switch (dir) { + case IIO_EV_DIR_FALLING: + adc->thresh_low[chan->channel] = val; + thresh = readl(adc->regs + ADC_LEVEL_REG(chan->channel)) & 0x3ff0000; + thresh |= ADC_LEVEL_REG_LOW_LEVEL(val); + writel(thresh, adc->regs + ADC_LEVEL_REG(chan->channel)); + break; + case IIO_EV_DIR_RISING: + adc->thresh_high[chan->channel] = val; + thresh = readl(adc->regs + ADC_LEVEL_REG(chan->channel)) & 0xffff; + thresh |= ADC_LEVEL_REG_HIGH_LEVEL(val); + writel(thresh, adc->regs + ADC_LEVEL_REG(chan->channel)); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int phytium_update_scan_mode(struct iio_dev *indio_dev, const unsigned long *mask) +{ + struct phytium_adc *adc = iio_priv(indio_dev); + unsigned int n; + + n = bitmap_weight(mask, indio_dev->masklength); + + kfree(adc->scan_data); + adc->scan_data = kcalloc(n, sizeof(*adc->scan_data), GFP_KERNEL); + if (!adc->scan_data) + return -ENOMEM; + + return 0; +} + +static const u64 phytium_adc_event_codes[] = { + IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 0, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), + IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 0, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING), + IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 1, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), + IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 1, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING), + IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 2, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), + IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 2, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING), + IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 3, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), + IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 3, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING), + IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 4, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), + IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 4, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING), + IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 5, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), + IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 5, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING), + IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 6, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), + IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 6, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING), + IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 7, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), + IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 7, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING), +}; + +static irqreturn_t phytium_adc_threaded_irq(int irq, void *data) +{ + struct phytium_adc *adc = data; + struct iio_dev *indio_dev = iio_priv_to_dev(adc); + s64 timestamp = iio_get_time_ns(indio_dev); + unsigned long status; + int ch; + u32 intr; + + intr = readl(adc->regs + ADC_INTR_REG); + + if (intr & ADC_INTR_REG_ERR) { + dev_err(adc->dev, "conversion error: ADC_INTR_REG(0x%x)\n", intr); + writel(ADC_INTR_REG_ERR, adc->regs + ADC_INTR_REG); + return IRQ_HANDLED; + } + + status = (intr & ADC_INTR_REG_LIMIT_MASK) >> 8; + if (status) { + for_each_set_bit(ch, &status, PHYTIUM_MAX_CHANNELS * 2) + iio_push_event(indio_dev, phytium_adc_event_codes[ch], timestamp); + } + + status = intr & ADC_INTR_REG_COVFIN_MASK; + if (status) { + for_each_set_bit(ch, &status, PHYTIUM_MAX_CHANNELS) + adc->last_val[ch] = readl(adc->regs + ADC_COV_RESULT_REG(ch)) & + ADC_COV_RESULT_REG_MASK; + + if (iio_buffer_enabled(indio_dev)) + iio_trigger_poll(indio_dev->trig); + else + complete(&adc->completion); + } + + /* Clear all the interrupts */ + writel(status, adc->regs + ADC_INTR_REG); + + return IRQ_HANDLED; +} + +static void phytium_adc_cont_conv_setup(struct phytium_adc *adc, + unsigned long chan_mask, + u32 interval) +{ + u32 reg; + + /* + * Setup control register: + * - Continuous conversion mode + * - Multi-channel rotation mode + * - Channel enablement + */ + reg = readl(adc->regs + ADC_CTRL_REG); + reg &= ~(ADC_CTRL_REG_SINGLE_SEL | ADC_CTRL_REG_SINGLE_EN | + ADC_CTRL_REG_CH_ONLY_EN); + reg |= chan_mask << 4; + writel(reg, adc->regs + ADC_CTRL_REG); + + /* Setup interval between two conversions */ + writel(interval, adc->regs + ADC_INTER_REG); +} + +static int phytium_adc_preenable(struct iio_dev *indio_dev) +{ + struct phytium_adc *adc = iio_priv(indio_dev); + unsigned long scan_mask = *indio_dev->active_scan_mask; + + phytium_adc_cont_conv_setup(adc, scan_mask & 0xff, adc->interval); + phytium_adc_intrmask_setup(adc, scan_mask & 0xff, true); + + return 0; +} + +static int phytium_adc_postenable(struct iio_dev *indio_dev) +{ + struct phytium_adc *adc = iio_priv(indio_dev); + + iio_triggered_buffer_postenable(indio_dev); + phytium_adc_start_stop(adc, true); + + return 0; +} + +static int phytium_adc_postdisable(struct iio_dev *indio_dev) +{ + struct phytium_adc *adc = iio_priv(indio_dev); + unsigned long scan_mask = *indio_dev->active_scan_mask; + + phytium_adc_start_stop(adc, false); + phytium_adc_intrmask_setup(adc, scan_mask & 0xff, false); + + return 0; +} + +static const struct iio_buffer_setup_ops phytium_buffer_setup_ops = { + .preenable = &phytium_adc_preenable, + .postenable = &phytium_adc_postenable, + .predisable = &iio_triggered_buffer_predisable, + .postdisable = &phytium_adc_postdisable, +}; + +static irqreturn_t phytium_adc_trigger_handler(int irq, void *p) +{ + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct phytium_adc *adc = iio_priv(indio_dev); + int i, j = 0; + + if (!adc->scan_data) + goto out; + + for_each_set_bit(i, indio_dev->active_scan_mask, indio_dev->masklength) + adc->scan_data[j++] = adc->last_val[i]; + + iio_push_to_buffers(indio_dev, adc->scan_data); + +out: + iio_trigger_notify_done(indio_dev->trig); + + return IRQ_HANDLED; +} + +static const struct iio_info phytium_adc_iio_info = { + .read_raw = &phytium_adc_read_raw, + .read_event_value = &phytium_read_thresh, + .write_event_value = &phytium_write_thresh, + .update_scan_mode = &phytium_update_scan_mode, +}; + +static int phytium_adc_probe(struct platform_device *pdev) +{ + struct phytium_adc *adc; + struct iio_dev *indio_dev; + struct device *dev = &pdev->dev; + struct resource *res; + int ret; + + indio_dev = devm_iio_device_alloc(dev, sizeof(*adc)); + if (!indio_dev) + return -ENOMEM; + platform_set_drvdata(pdev, indio_dev); + + adc = iio_priv(indio_dev); + adc->dev = dev; + + ret = phytium_adc_parse_properties(pdev, adc); + if (ret) + return ret; + + mutex_init(&adc->lock); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + adc->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(adc->regs)) + return PTR_ERR(adc->regs); + + adc->adc_clk = devm_clk_get(dev, NULL); + if (IS_ERR(adc->adc_clk)) + return PTR_ERR(adc->adc_clk); + + init_completion(&adc->completion); + + indio_dev->name = dev_name(dev); + indio_dev->info = &phytium_adc_iio_info; + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->channels = adc->data->channels; + indio_dev->num_channels = adc->data->num_channels; + + ret = devm_request_threaded_irq(adc->dev, platform_get_irq(pdev, 0), + NULL, phytium_adc_threaded_irq, IRQF_ONESHOT, + dev_name(dev), adc); + if (ret) + return ret; + + ret = devm_iio_triggered_buffer_setup(dev, indio_dev, + &iio_pollfunc_store_time, + phytium_adc_trigger_handler, + &phytium_buffer_setup_ops); + if (ret) + return ret; + + ret = phytium_adc_hw_init(adc); + if (ret) { + dev_err(&pdev->dev, "failed to initialize Phytium ADC, %d\n", ret); + return ret; + } + + return devm_iio_device_register(dev, indio_dev); +} + +static int phytium_adc_remove(struct platform_device *pdev) +{ + struct iio_dev *indio_dev = platform_get_drvdata(pdev); + struct phytium_adc *adc = iio_priv(indio_dev); + + phytium_adc_power_setup(adc, false); + iio_device_unregister(indio_dev); + kfree(adc->scan_data); + + return 0; +} + +static const struct of_device_id phytium_of_match[] = { + { .compatible = "phytium,adc", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, phytium_of_match); + +#ifdef CONFIG_PM +static int phytium_adc_suspend(struct device *dev) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct phytium_adc *adc = iio_priv(indio_dev); + + phytium_adc_power_setup(adc, false); + clk_disable_unprepare(adc->adc_clk); + + return 0; +} + +static int phytium_adc_resume(struct device *dev) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct phytium_adc *adc = iio_priv(indio_dev); + + clk_prepare_enable(adc->adc_clk); + phytium_adc_power_setup(adc, true); + + return 0; +} +#endif + +SIMPLE_DEV_PM_OPS(phytium_adc_pm_ops, phytium_adc_suspend, phytium_adc_resume); + +static struct platform_driver phytium_adc_driver = { + .driver = { + .name = "phytium_adc", + .of_match_table = phytium_of_match, + .pm = &phytium_adc_pm_ops, + }, + .probe = phytium_adc_probe, + .remove = phytium_adc_remove, +}; +module_platform_driver(phytium_adc_driver); + +MODULE_AUTHOR("Yang Liu "); +MODULE_DESCRIPTION("Phytium ADC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/adc/qcom-pm8xxx-xoadc.c b/drivers/iio/adc/qcom-pm8xxx-xoadc.c index b093ecddf1a8ae4fc53410ed57cbd59a2472d9b8..54db848f0bcdf47c918e7fe28a3a7f4ed75a8553 100644 --- a/drivers/iio/adc/qcom-pm8xxx-xoadc.c +++ b/drivers/iio/adc/qcom-pm8xxx-xoadc.c @@ -423,18 +423,14 @@ static irqreturn_t pm8xxx_eoc_irq(int irq, void *d) static struct pm8xxx_chan_info * pm8xxx_get_channel(struct pm8xxx_xoadc *adc, u8 chan) { - struct pm8xxx_chan_info *ch; int i; for (i = 0; i < adc->nchans; i++) { - ch = &adc->chans[i]; + struct pm8xxx_chan_info *ch = &adc->chans[i]; if (ch->hwchan->amux_channel == chan) - break; + return ch; } - if (i == adc->nchans) - return NULL; - - return ch; + return NULL; } static int pm8xxx_read_channel_rsv(struct pm8xxx_xoadc *adc, diff --git a/drivers/iio/adc/rcar-gyroadc.c b/drivers/iio/adc/rcar-gyroadc.c index dcb50172186f49ab62722997cc34a55bdb667a06..f3a966ab35dcb41a614204527bc7b829d7033d92 100644 --- a/drivers/iio/adc/rcar-gyroadc.c +++ b/drivers/iio/adc/rcar-gyroadc.c @@ -391,7 +391,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev) dev_err(dev, "Only %i channels supported with %s, but reg = <%i>.\n", num_channels, child->name, reg); - return ret; + return -EINVAL; } } @@ -400,7 +400,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev) dev_err(dev, "Channel %i uses different ADC mode than the rest.\n", reg); - return ret; + return -EINVAL; } /* Channel is valid, grab the regulator. */ diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c index ca432e7b6ff1dbb61f5c30bdd20105cc37cec753..dc44d4de82cb343ee93ca0a506c0d9d02936c0d3 100644 --- a/drivers/iio/adc/stm32-adc-core.c +++ b/drivers/iio/adc/stm32-adc-core.c @@ -21,45 +21,22 @@ #include "stm32-adc-core.h" -/* STM32F4 - common registers for all ADC instances: 1, 2 & 3 */ -#define STM32F4_ADC_CSR (STM32_ADCX_COMN_OFFSET + 0x00) -#define STM32F4_ADC_CCR (STM32_ADCX_COMN_OFFSET + 0x04) - -/* STM32F4_ADC_CSR - bit fields */ -#define STM32F4_EOC3 BIT(17) -#define STM32F4_EOC2 BIT(9) -#define STM32F4_EOC1 BIT(1) - -/* STM32F4_ADC_CCR - bit fields */ -#define STM32F4_ADC_ADCPRE_SHIFT 16 -#define STM32F4_ADC_ADCPRE_MASK GENMASK(17, 16) - -/* STM32H7 - common registers for all ADC instances */ -#define STM32H7_ADC_CSR (STM32_ADCX_COMN_OFFSET + 0x00) -#define STM32H7_ADC_CCR (STM32_ADCX_COMN_OFFSET + 0x08) - -/* STM32H7_ADC_CSR - bit fields */ -#define STM32H7_EOC_SLV BIT(18) -#define STM32H7_EOC_MST BIT(2) - -/* STM32H7_ADC_CCR - bit fields */ -#define STM32H7_PRESC_SHIFT 18 -#define STM32H7_PRESC_MASK GENMASK(21, 18) -#define STM32H7_CKMODE_SHIFT 16 -#define STM32H7_CKMODE_MASK GENMASK(17, 16) - /** * stm32_adc_common_regs - stm32 common registers, compatible dependent data * @csr: common status register offset * @eoc1: adc1 end of conversion flag in @csr * @eoc2: adc2 end of conversion flag in @csr * @eoc3: adc3 end of conversion flag in @csr + * @ier: interrupt enable register offset for each adc + * @eocie_msk: end of conversion interrupt enable mask in @ier */ struct stm32_adc_common_regs { u32 csr; u32 eoc1_msk; u32 eoc2_msk; u32 eoc3_msk; + u32 ier; + u32 eocie_msk; }; struct stm32_adc_priv; @@ -268,6 +245,8 @@ static const struct stm32_adc_common_regs stm32f4_adc_common_regs = { .eoc1_msk = STM32F4_EOC1, .eoc2_msk = STM32F4_EOC2, .eoc3_msk = STM32F4_EOC3, + .ier = STM32F4_ADC_CR1, + .eocie_msk = STM32F4_EOCIE, }; /* STM32H7 common registers definitions */ @@ -275,8 +254,24 @@ static const struct stm32_adc_common_regs stm32h7_adc_common_regs = { .csr = STM32H7_ADC_CSR, .eoc1_msk = STM32H7_EOC_MST, .eoc2_msk = STM32H7_EOC_SLV, + .ier = STM32H7_ADC_IER, + .eocie_msk = STM32H7_EOCIE, +}; + +static const unsigned int stm32_adc_offset[STM32_ADC_MAX_ADCS] = { + 0, STM32_ADC_OFFSET, STM32_ADC_OFFSET * 2, }; +static unsigned int stm32_adc_eoc_enabled(struct stm32_adc_priv *priv, + unsigned int adc) +{ + u32 ier, offset = stm32_adc_offset[adc]; + + ier = readl_relaxed(priv->common.base + offset + priv->cfg->regs->ier); + + return ier & priv->cfg->regs->eocie_msk; +} + /* ADC common interrupt for all instances */ static void stm32_adc_irq_handler(struct irq_desc *desc) { @@ -287,13 +282,28 @@ static void stm32_adc_irq_handler(struct irq_desc *desc) chained_irq_enter(chip, desc); status = readl_relaxed(priv->common.base + priv->cfg->regs->csr); - if (status & priv->cfg->regs->eoc1_msk) + /* + * End of conversion may be handled by using IRQ or DMA. There may be a + * race here when two conversions complete at the same time on several + * ADCs. EOC may be read 'set' for several ADCs, with: + * - an ADC configured to use DMA (EOC triggers the DMA request, and + * is then automatically cleared by DR read in hardware) + * - an ADC configured to use IRQs (EOCIE bit is set. The handler must + * be called in this case) + * So both EOC status bit in CSR and EOCIE control bit must be checked + * before invoking the interrupt handler (e.g. call ISR only for + * IRQ-enabled ADCs). + */ + if (status & priv->cfg->regs->eoc1_msk && + stm32_adc_eoc_enabled(priv, 0)) generic_handle_irq(irq_find_mapping(priv->domain, 0)); - if (status & priv->cfg->regs->eoc2_msk) + if (status & priv->cfg->regs->eoc2_msk && + stm32_adc_eoc_enabled(priv, 1)) generic_handle_irq(irq_find_mapping(priv->domain, 1)); - if (status & priv->cfg->regs->eoc3_msk) + if (status & priv->cfg->regs->eoc3_msk && + stm32_adc_eoc_enabled(priv, 2)) generic_handle_irq(irq_find_mapping(priv->domain, 2)); chained_irq_exit(chip, desc); @@ -384,6 +394,8 @@ static int stm32_adc_probe(struct platform_device *pdev) struct stm32_adc_priv *priv; struct device *dev = &pdev->dev; struct device_node *np = pdev->dev.of_node; + const struct of_device_id *of_id; + struct resource *res; int ret; @@ -394,8 +406,11 @@ static int stm32_adc_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - priv->cfg = (const struct stm32_adc_priv_cfg *) - of_match_device(dev->driver->of_match_table, dev)->data; + of_id = of_match_device(dev->driver->of_match_table, dev); + if (!of_id) + return -ENODEV; + + priv->cfg = (const struct stm32_adc_priv_cfg *)of_id->data; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); priv->common.base = devm_ioremap_resource(&pdev->dev, res); diff --git a/drivers/iio/adc/stm32-adc-core.h b/drivers/iio/adc/stm32-adc-core.h index 8af507b3f32d914d70307bfff7e8819b4284581d..2579d514c2a3448cbd877ec8f3567e8103fc1399 100644 --- a/drivers/iio/adc/stm32-adc-core.h +++ b/drivers/iio/adc/stm32-adc-core.h @@ -25,8 +25,145 @@ * -------------------------------------------------------- */ #define STM32_ADC_MAX_ADCS 3 +#define STM32_ADC_OFFSET 0x100 #define STM32_ADCX_COMN_OFFSET 0x300 +/* STM32F4 - Registers for each ADC instance */ +#define STM32F4_ADC_SR 0x00 +#define STM32F4_ADC_CR1 0x04 +#define STM32F4_ADC_CR2 0x08 +#define STM32F4_ADC_SMPR1 0x0C +#define STM32F4_ADC_SMPR2 0x10 +#define STM32F4_ADC_HTR 0x24 +#define STM32F4_ADC_LTR 0x28 +#define STM32F4_ADC_SQR1 0x2C +#define STM32F4_ADC_SQR2 0x30 +#define STM32F4_ADC_SQR3 0x34 +#define STM32F4_ADC_JSQR 0x38 +#define STM32F4_ADC_JDR1 0x3C +#define STM32F4_ADC_JDR2 0x40 +#define STM32F4_ADC_JDR3 0x44 +#define STM32F4_ADC_JDR4 0x48 +#define STM32F4_ADC_DR 0x4C + +/* STM32F4 - common registers for all ADC instances: 1, 2 & 3 */ +#define STM32F4_ADC_CSR (STM32_ADCX_COMN_OFFSET + 0x00) +#define STM32F4_ADC_CCR (STM32_ADCX_COMN_OFFSET + 0x04) + +/* STM32F4_ADC_SR - bit fields */ +#define STM32F4_STRT BIT(4) +#define STM32F4_EOC BIT(1) + +/* STM32F4_ADC_CR1 - bit fields */ +#define STM32F4_RES_SHIFT 24 +#define STM32F4_RES_MASK GENMASK(25, 24) +#define STM32F4_SCAN BIT(8) +#define STM32F4_EOCIE BIT(5) + +/* STM32F4_ADC_CR2 - bit fields */ +#define STM32F4_SWSTART BIT(30) +#define STM32F4_EXTEN_SHIFT 28 +#define STM32F4_EXTEN_MASK GENMASK(29, 28) +#define STM32F4_EXTSEL_SHIFT 24 +#define STM32F4_EXTSEL_MASK GENMASK(27, 24) +#define STM32F4_EOCS BIT(10) +#define STM32F4_DDS BIT(9) +#define STM32F4_DMA BIT(8) +#define STM32F4_ADON BIT(0) + +/* STM32F4_ADC_CSR - bit fields */ +#define STM32F4_EOC3 BIT(17) +#define STM32F4_EOC2 BIT(9) +#define STM32F4_EOC1 BIT(1) + +/* STM32F4_ADC_CCR - bit fields */ +#define STM32F4_ADC_ADCPRE_SHIFT 16 +#define STM32F4_ADC_ADCPRE_MASK GENMASK(17, 16) + +/* STM32H7 - Registers for each ADC instance */ +#define STM32H7_ADC_ISR 0x00 +#define STM32H7_ADC_IER 0x04 +#define STM32H7_ADC_CR 0x08 +#define STM32H7_ADC_CFGR 0x0C +#define STM32H7_ADC_SMPR1 0x14 +#define STM32H7_ADC_SMPR2 0x18 +#define STM32H7_ADC_PCSEL 0x1C +#define STM32H7_ADC_SQR1 0x30 +#define STM32H7_ADC_SQR2 0x34 +#define STM32H7_ADC_SQR3 0x38 +#define STM32H7_ADC_SQR4 0x3C +#define STM32H7_ADC_DR 0x40 +#define STM32H7_ADC_DIFSEL 0xC0 +#define STM32H7_ADC_CALFACT 0xC4 +#define STM32H7_ADC_CALFACT2 0xC8 + +/* STM32H7 - common registers for all ADC instances */ +#define STM32H7_ADC_CSR (STM32_ADCX_COMN_OFFSET + 0x00) +#define STM32H7_ADC_CCR (STM32_ADCX_COMN_OFFSET + 0x08) + +/* STM32H7_ADC_ISR - bit fields */ +#define STM32MP1_VREGREADY BIT(12) +#define STM32H7_EOC BIT(2) +#define STM32H7_ADRDY BIT(0) + +/* STM32H7_ADC_IER - bit fields */ +#define STM32H7_EOCIE STM32H7_EOC + +/* STM32H7_ADC_CR - bit fields */ +#define STM32H7_ADCAL BIT(31) +#define STM32H7_ADCALDIF BIT(30) +#define STM32H7_DEEPPWD BIT(29) +#define STM32H7_ADVREGEN BIT(28) +#define STM32H7_LINCALRDYW6 BIT(27) +#define STM32H7_LINCALRDYW5 BIT(26) +#define STM32H7_LINCALRDYW4 BIT(25) +#define STM32H7_LINCALRDYW3 BIT(24) +#define STM32H7_LINCALRDYW2 BIT(23) +#define STM32H7_LINCALRDYW1 BIT(22) +#define STM32H7_ADCALLIN BIT(16) +#define STM32H7_BOOST BIT(8) +#define STM32H7_ADSTP BIT(4) +#define STM32H7_ADSTART BIT(2) +#define STM32H7_ADDIS BIT(1) +#define STM32H7_ADEN BIT(0) + +/* STM32H7_ADC_CFGR bit fields */ +#define STM32H7_EXTEN_SHIFT 10 +#define STM32H7_EXTEN_MASK GENMASK(11, 10) +#define STM32H7_EXTSEL_SHIFT 5 +#define STM32H7_EXTSEL_MASK GENMASK(9, 5) +#define STM32H7_RES_SHIFT 2 +#define STM32H7_RES_MASK GENMASK(4, 2) +#define STM32H7_DMNGT_SHIFT 0 +#define STM32H7_DMNGT_MASK GENMASK(1, 0) + +enum stm32h7_adc_dmngt { + STM32H7_DMNGT_DR_ONLY, /* Regular data in DR only */ + STM32H7_DMNGT_DMA_ONESHOT, /* DMA one shot mode */ + STM32H7_DMNGT_DFSDM, /* DFSDM mode */ + STM32H7_DMNGT_DMA_CIRC, /* DMA circular mode */ +}; + +/* STM32H7_ADC_CALFACT - bit fields */ +#define STM32H7_CALFACT_D_SHIFT 16 +#define STM32H7_CALFACT_D_MASK GENMASK(26, 16) +#define STM32H7_CALFACT_S_SHIFT 0 +#define STM32H7_CALFACT_S_MASK GENMASK(10, 0) + +/* STM32H7_ADC_CALFACT2 - bit fields */ +#define STM32H7_LINCALFACT_SHIFT 0 +#define STM32H7_LINCALFACT_MASK GENMASK(29, 0) + +/* STM32H7_ADC_CSR - bit fields */ +#define STM32H7_EOC_SLV BIT(18) +#define STM32H7_EOC_MST BIT(2) + +/* STM32H7_ADC_CCR - bit fields */ +#define STM32H7_PRESC_SHIFT 18 +#define STM32H7_PRESC_MASK GENMASK(21, 18) +#define STM32H7_CKMODE_SHIFT 16 +#define STM32H7_CKMODE_MASK GENMASK(17, 16) + /** * struct stm32_adc_common - stm32 ADC driver common data (for all instances) * @base: control registers base cpu addr diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c index 378411853d7516b1fc3121da560f9e1679583ed4..0409dcf5b04796643806cb164010171d497dcd42 100644 --- a/drivers/iio/adc/stm32-adc.c +++ b/drivers/iio/adc/stm32-adc.c @@ -27,115 +27,6 @@ #include "stm32-adc-core.h" -/* STM32F4 - Registers for each ADC instance */ -#define STM32F4_ADC_SR 0x00 -#define STM32F4_ADC_CR1 0x04 -#define STM32F4_ADC_CR2 0x08 -#define STM32F4_ADC_SMPR1 0x0C -#define STM32F4_ADC_SMPR2 0x10 -#define STM32F4_ADC_HTR 0x24 -#define STM32F4_ADC_LTR 0x28 -#define STM32F4_ADC_SQR1 0x2C -#define STM32F4_ADC_SQR2 0x30 -#define STM32F4_ADC_SQR3 0x34 -#define STM32F4_ADC_JSQR 0x38 -#define STM32F4_ADC_JDR1 0x3C -#define STM32F4_ADC_JDR2 0x40 -#define STM32F4_ADC_JDR3 0x44 -#define STM32F4_ADC_JDR4 0x48 -#define STM32F4_ADC_DR 0x4C - -/* STM32F4_ADC_SR - bit fields */ -#define STM32F4_STRT BIT(4) -#define STM32F4_EOC BIT(1) - -/* STM32F4_ADC_CR1 - bit fields */ -#define STM32F4_RES_SHIFT 24 -#define STM32F4_RES_MASK GENMASK(25, 24) -#define STM32F4_SCAN BIT(8) -#define STM32F4_EOCIE BIT(5) - -/* STM32F4_ADC_CR2 - bit fields */ -#define STM32F4_SWSTART BIT(30) -#define STM32F4_EXTEN_SHIFT 28 -#define STM32F4_EXTEN_MASK GENMASK(29, 28) -#define STM32F4_EXTSEL_SHIFT 24 -#define STM32F4_EXTSEL_MASK GENMASK(27, 24) -#define STM32F4_EOCS BIT(10) -#define STM32F4_DDS BIT(9) -#define STM32F4_DMA BIT(8) -#define STM32F4_ADON BIT(0) - -/* STM32H7 - Registers for each ADC instance */ -#define STM32H7_ADC_ISR 0x00 -#define STM32H7_ADC_IER 0x04 -#define STM32H7_ADC_CR 0x08 -#define STM32H7_ADC_CFGR 0x0C -#define STM32H7_ADC_SMPR1 0x14 -#define STM32H7_ADC_SMPR2 0x18 -#define STM32H7_ADC_PCSEL 0x1C -#define STM32H7_ADC_SQR1 0x30 -#define STM32H7_ADC_SQR2 0x34 -#define STM32H7_ADC_SQR3 0x38 -#define STM32H7_ADC_SQR4 0x3C -#define STM32H7_ADC_DR 0x40 -#define STM32H7_ADC_DIFSEL 0xC0 -#define STM32H7_ADC_CALFACT 0xC4 -#define STM32H7_ADC_CALFACT2 0xC8 - -/* STM32H7_ADC_ISR - bit fields */ -#define STM32MP1_VREGREADY BIT(12) -#define STM32H7_EOC BIT(2) -#define STM32H7_ADRDY BIT(0) - -/* STM32H7_ADC_IER - bit fields */ -#define STM32H7_EOCIE STM32H7_EOC - -/* STM32H7_ADC_CR - bit fields */ -#define STM32H7_ADCAL BIT(31) -#define STM32H7_ADCALDIF BIT(30) -#define STM32H7_DEEPPWD BIT(29) -#define STM32H7_ADVREGEN BIT(28) -#define STM32H7_LINCALRDYW6 BIT(27) -#define STM32H7_LINCALRDYW5 BIT(26) -#define STM32H7_LINCALRDYW4 BIT(25) -#define STM32H7_LINCALRDYW3 BIT(24) -#define STM32H7_LINCALRDYW2 BIT(23) -#define STM32H7_LINCALRDYW1 BIT(22) -#define STM32H7_ADCALLIN BIT(16) -#define STM32H7_BOOST BIT(8) -#define STM32H7_ADSTP BIT(4) -#define STM32H7_ADSTART BIT(2) -#define STM32H7_ADDIS BIT(1) -#define STM32H7_ADEN BIT(0) - -/* STM32H7_ADC_CFGR bit fields */ -#define STM32H7_EXTEN_SHIFT 10 -#define STM32H7_EXTEN_MASK GENMASK(11, 10) -#define STM32H7_EXTSEL_SHIFT 5 -#define STM32H7_EXTSEL_MASK GENMASK(9, 5) -#define STM32H7_RES_SHIFT 2 -#define STM32H7_RES_MASK GENMASK(4, 2) -#define STM32H7_DMNGT_SHIFT 0 -#define STM32H7_DMNGT_MASK GENMASK(1, 0) - -enum stm32h7_adc_dmngt { - STM32H7_DMNGT_DR_ONLY, /* Regular data in DR only */ - STM32H7_DMNGT_DMA_ONESHOT, /* DMA one shot mode */ - STM32H7_DMNGT_DFSDM, /* DFSDM mode */ - STM32H7_DMNGT_DMA_CIRC, /* DMA circular mode */ -}; - -/* STM32H7_ADC_CALFACT - bit fields */ -#define STM32H7_CALFACT_D_SHIFT 16 -#define STM32H7_CALFACT_D_MASK GENMASK(26, 16) -#define STM32H7_CALFACT_S_SHIFT 0 -#define STM32H7_CALFACT_S_MASK GENMASK(10, 0) - -/* STM32H7_ADC_CALFACT2 - bit fields */ -#define STM32H7_LINCALFACT_SHIFT 0 -#define STM32H7_LINCALFACT_MASK GENMASK(29, 0) - /* Number of linear calibration shadow registers / LINCALRDYW control bits */ #define STM32H7_LINCALFACT_NUM 6 @@ -1449,7 +1340,7 @@ static int stm32_adc_dma_start(struct iio_dev *indio_dev) cookie = dmaengine_submit(desc); ret = dma_submit_error(cookie); if (ret) { - dmaengine_terminate_all(adc->dma_chan); + dmaengine_terminate_sync(adc->dma_chan); return ret; } @@ -1522,7 +1413,7 @@ static int stm32_adc_buffer_predisable(struct iio_dev *indio_dev) dev_err(&indio_dev->dev, "predisable failed\n"); if (adc->dma_chan) - dmaengine_terminate_all(adc->dma_chan); + dmaengine_terminate_sync(adc->dma_chan); if (stm32_adc_set_trig(indio_dev, NULL)) dev_err(&indio_dev->dev, "Can't clear trigger\n"); diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c index fcd4a1c00ca0574d02326602f68da118a499fca4..f5586dd6414d2c579b0c7c927c8d58e37fea92ca 100644 --- a/drivers/iio/adc/stm32-dfsdm-adc.c +++ b/drivers/iio/adc/stm32-dfsdm-adc.c @@ -981,11 +981,11 @@ static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev, ch->info_mask_shared_by_all = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO); if (adc->dev_data->type == DFSDM_AUDIO) { - ch->scan_type.sign = 's'; ch->ext_info = dfsdm_adc_audio_ext_info; } else { - ch->scan_type.sign = 'u'; + ch->scan_type.shift = 8; } + ch->scan_type.sign = 's'; ch->scan_type.realbits = 24; ch->scan_type.storagebits = 32; @@ -1144,6 +1144,12 @@ static int stm32_dfsdm_adc_probe(struct platform_device *pdev) * So IRQ associated to filter instance 0 is dedicated to the Filter 0. */ irq = platform_get_irq(pdev, 0); + if (irq < 0) { + if (irq != -EPROBE_DEFER) + dev_err(dev, "Failed to get IRQ: %d\n", irq); + return irq; + } + ret = devm_request_irq(dev, irq, stm32_dfsdm_irq, 0, pdev->name, adc); if (ret < 0) { diff --git a/drivers/iio/adc/stm32-dfsdm-core.c b/drivers/iio/adc/stm32-dfsdm-core.c index bf089f5d622532740885146c0be6c513882d95a4..941630615e88535be61e6bb097b9af77ca8d2ac5 100644 --- a/drivers/iio/adc/stm32-dfsdm-core.c +++ b/drivers/iio/adc/stm32-dfsdm-core.c @@ -213,6 +213,8 @@ static int stm32_dfsdm_parse_of(struct platform_device *pdev, } priv->dfsdm.phys_base = res->start; priv->dfsdm.base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->dfsdm.base)) + return PTR_ERR(priv->dfsdm.base); /* * "dfsdm" clock is mandatory for DFSDM peripheral clocking. @@ -222,8 +224,10 @@ static int stm32_dfsdm_parse_of(struct platform_device *pdev, */ priv->clk = devm_clk_get(&pdev->dev, "dfsdm"); if (IS_ERR(priv->clk)) { - dev_err(&pdev->dev, "No stm32_dfsdm_clk clock found\n"); - return -EINVAL; + ret = PTR_ERR(priv->clk); + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, "Failed to get clock (%d)\n", ret); + return ret; } priv->aclk = devm_clk_get(&pdev->dev, "audio"); diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c index a5bd5944bc66083d1a866e54e873b7ed46bdd52f..c9cd7e5c1b61481a93850131a563a806fdad1d1c 100644 --- a/drivers/iio/adc/ti-ads7950.c +++ b/drivers/iio/adc/ti-ads7950.c @@ -56,6 +56,9 @@ struct ti_ads7950_state { struct spi_message ring_msg; struct spi_message scan_single_msg; + /* Lock to protect the spi xfer buffers */ + struct mutex slock; + struct regulator *reg; unsigned int vref_mv; @@ -277,6 +280,7 @@ static irqreturn_t ti_ads7950_trigger_handler(int irq, void *p) struct ti_ads7950_state *st = iio_priv(indio_dev); int ret; + mutex_lock(&st->slock); ret = spi_sync(st->spi, &st->ring_msg); if (ret < 0) goto out; @@ -285,6 +289,7 @@ static irqreturn_t ti_ads7950_trigger_handler(int irq, void *p) iio_get_time_ns(indio_dev)); out: + mutex_unlock(&st->slock); iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; @@ -295,7 +300,7 @@ static int ti_ads7950_scan_direct(struct iio_dev *indio_dev, unsigned int ch) struct ti_ads7950_state *st = iio_priv(indio_dev); int ret, cmd; - mutex_lock(&indio_dev->mlock); + mutex_lock(&st->slock); cmd = TI_ADS7950_CR_WRITE | TI_ADS7950_CR_CHAN(ch) | st->settings; st->single_tx = cpu_to_be16(cmd); @@ -307,7 +312,7 @@ static int ti_ads7950_scan_direct(struct iio_dev *indio_dev, unsigned int ch) ret = be16_to_cpu(st->single_rx); out: - mutex_unlock(&indio_dev->mlock); + mutex_unlock(&st->slock); return ret; } @@ -423,16 +428,19 @@ static int ti_ads7950_probe(struct spi_device *spi) if (ACPI_COMPANION(&spi->dev)) st->vref_mv = TI_ADS7950_VA_MV_ACPI_DEFAULT; + mutex_init(&st->slock); + st->reg = devm_regulator_get(&spi->dev, "vref"); if (IS_ERR(st->reg)) { dev_err(&spi->dev, "Failed get get regulator \"vref\"\n"); - return PTR_ERR(st->reg); + ret = PTR_ERR(st->reg); + goto error_destroy_mutex; } ret = regulator_enable(st->reg); if (ret) { dev_err(&spi->dev, "Failed to enable regulator \"vref\"\n"); - return ret; + goto error_destroy_mutex; } ret = iio_triggered_buffer_setup(indio_dev, NULL, @@ -454,6 +462,8 @@ static int ti_ads7950_probe(struct spi_device *spi) iio_triggered_buffer_cleanup(indio_dev); error_disable_reg: regulator_disable(st->reg); +error_destroy_mutex: + mutex_destroy(&st->slock); return ret; } @@ -466,6 +476,7 @@ static int ti_ads7950_remove(struct spi_device *spi) iio_device_unregister(indio_dev); iio_triggered_buffer_cleanup(indio_dev); regulator_disable(st->reg); + mutex_destroy(&st->slock); return 0; } diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c index 184d686ebd9958ec316a53b16e1cab667b4518fa..7f16c77b99fb4aea62c902ceb05473012b3b9821 100644 --- a/drivers/iio/adc/ti-ads8688.c +++ b/drivers/iio/adc/ti-ads8688.c @@ -41,6 +41,7 @@ #define ADS8688_VREF_MV 4096 #define ADS8688_REALBITS 16 +#define ADS8688_MAX_CHANNELS 8 /* * enum ads8688_range - ADS8688 reference voltage range @@ -385,7 +386,7 @@ static irqreturn_t ads8688_trigger_handler(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; - u16 buffer[8]; + u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)]; int i, j = 0; for (i = 0; i < indio_dev->masklength; i++) { @@ -396,7 +397,7 @@ static irqreturn_t ads8688_trigger_handler(int irq, void *p) } iio_push_to_buffers_with_timestamp(indio_dev, buffer, - pf->timestamp); + iio_get_time_ns(indio_dev)); iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c index 3f6be5ac049a864fcf792530afce1b909cc05f66..1ae86e7359f73dd8b40fd41d9abffe6ae8e49846 100644 --- a/drivers/iio/adc/xilinx-xadc-core.c +++ b/drivers/iio/adc/xilinx-xadc-core.c @@ -1290,6 +1290,7 @@ static int xadc_probe(struct platform_device *pdev) err_free_irq: free_irq(xadc->irq, indio_dev); + cancel_delayed_work_sync(&xadc->zynq_unmask_work); err_clk_disable_unprepare: clk_disable_unprepare(xadc->clk); err_free_samplerate_trigger: @@ -1319,8 +1320,8 @@ static int xadc_remove(struct platform_device *pdev) iio_triggered_buffer_cleanup(indio_dev); } free_irq(xadc->irq, indio_dev); + cancel_delayed_work_sync(&xadc->zynq_unmask_work); clk_disable_unprepare(xadc->clk); - cancel_delayed_work(&xadc->zynq_unmask_work); kfree(xadc->data); kfree(indio_dev->channels); diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c index a406ad31b096f6755121fede4b7a6e2de3a24342..3a20cb5d9bffc29b6869954a6bbef94ecdcd42b5 100644 --- a/drivers/iio/chemical/atlas-ph-sensor.c +++ b/drivers/iio/chemical/atlas-ph-sensor.c @@ -444,9 +444,8 @@ static int atlas_read_raw(struct iio_dev *indio_dev, case IIO_CHAN_INFO_SCALE: switch (chan->type) { case IIO_TEMP: - *val = 1; /* 0.01 */ - *val2 = 100; - break; + *val = 10; + return IIO_VAL_INT; case IIO_PH: *val = 1; /* 0.001 */ *val2 = 1000; @@ -477,7 +476,7 @@ static int atlas_write_raw(struct iio_dev *indio_dev, int val, int val2, long mask) { struct atlas_data *data = iio_priv(indio_dev); - __be32 reg = cpu_to_be32(val); + __be32 reg = cpu_to_be32(val / 10); if (val2 != 0 || val < 0 || val > 20000) return -EINVAL; diff --git a/drivers/iio/chemical/bme680.h b/drivers/iio/chemical/bme680.h index e049323f209ae83ba8a8fd35deec4e9934cc146e..71dd635fce2d0690a7454a1364d7ae64e4dc3a67 100644 --- a/drivers/iio/chemical/bme680.h +++ b/drivers/iio/chemical/bme680.h @@ -2,11 +2,9 @@ #ifndef BME680_H_ #define BME680_H_ -#define BME680_REG_CHIP_I2C_ID 0xD0 -#define BME680_REG_CHIP_SPI_ID 0x50 +#define BME680_REG_CHIP_ID 0xD0 #define BME680_CHIP_ID_VAL 0x61 -#define BME680_REG_SOFT_RESET_I2C 0xE0 -#define BME680_REG_SOFT_RESET_SPI 0x60 +#define BME680_REG_SOFT_RESET 0xE0 #define BME680_CMD_SOFTRESET 0xB6 #define BME680_REG_STATUS 0x73 #define BME680_SPI_MEM_PAGE_BIT BIT(4) diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c index 7d9bb62baa3fd3e8620f377143ecaca9dae69ae4..b2c677024521bfc951e957136b5bc821ee720ce4 100644 --- a/drivers/iio/chemical/bme680_core.c +++ b/drivers/iio/chemical/bme680_core.c @@ -63,9 +63,23 @@ struct bme680_data { s32 t_fine; }; +static const struct regmap_range bme680_volatile_ranges[] = { + regmap_reg_range(BME680_REG_MEAS_STAT_0, BME680_REG_GAS_R_LSB), + regmap_reg_range(BME680_REG_STATUS, BME680_REG_STATUS), + regmap_reg_range(BME680_T2_LSB_REG, BME680_GH3_REG), +}; + +static const struct regmap_access_table bme680_volatile_table = { + .yes_ranges = bme680_volatile_ranges, + .n_yes_ranges = ARRAY_SIZE(bme680_volatile_ranges), +}; + const struct regmap_config bme680_regmap_config = { .reg_bits = 8, .val_bits = 8, + .max_register = 0xef, + .volatile_table = &bme680_volatile_table, + .cache_type = REGCACHE_RBTREE, }; EXPORT_SYMBOL(bme680_regmap_config); @@ -330,10 +344,14 @@ static s16 bme680_compensate_temp(struct bme680_data *data, s64 var1, var2, var3; s16 calc_temp; - var1 = (adc_temp >> 3) - (calib->par_t1 << 1); + /* If the calibration is invalid, attempt to reload it */ + if (!calib->par_t2) + bme680_read_calib(data, calib); + + var1 = (adc_temp >> 3) - ((s32)calib->par_t1 << 1); var2 = (var1 * calib->par_t2) >> 11; var3 = ((var1 >> 1) * (var1 >> 1)) >> 12; - var3 = (var3 * (calib->par_t3 << 4)) >> 14; + var3 = (var3 * ((s32)calib->par_t3 << 4)) >> 14; data->t_fine = var2 + var3; calc_temp = (data->t_fine * 5 + 128) >> 8; @@ -356,9 +374,9 @@ static u32 bme680_compensate_press(struct bme680_data *data, var1 = (data->t_fine >> 1) - 64000; var2 = ((((var1 >> 2) * (var1 >> 2)) >> 11) * calib->par_p6) >> 2; var2 = var2 + (var1 * calib->par_p5 << 1); - var2 = (var2 >> 2) + (calib->par_p4 << 16); + var2 = (var2 >> 2) + ((s32)calib->par_p4 << 16); var1 = (((((var1 >> 2) * (var1 >> 2)) >> 13) * - (calib->par_p3 << 5)) >> 3) + + ((s32)calib->par_p3 << 5)) >> 3) + ((calib->par_p2 * var1) >> 1); var1 = var1 >> 18; var1 = ((32768 + var1) * calib->par_p1) >> 15; @@ -376,7 +394,7 @@ static u32 bme680_compensate_press(struct bme680_data *data, var3 = ((press_comp >> 8) * (press_comp >> 8) * (press_comp >> 8) * calib->par_p10) >> 17; - press_comp += (var1 + var2 + var3 + (calib->par_p7 << 7)) >> 4; + press_comp += (var1 + var2 + var3 + ((s32)calib->par_p7 << 7)) >> 4; return press_comp; } @@ -402,7 +420,7 @@ static u32 bme680_compensate_humid(struct bme680_data *data, (((temp_scaled * ((temp_scaled * calib->par_h5) / 100)) >> 6) / 100) + (1 << 14))) >> 10; var3 = var1 * var2; - var4 = calib->par_h6 << 7; + var4 = (s32)calib->par_h6 << 7; var4 = (var4 + ((temp_scaled * calib->par_h7) / 100)) >> 4; var5 = ((var3 >> 14) * (var3 >> 14)) >> 10; var6 = (var4 * var5) >> 1; @@ -591,8 +609,7 @@ static int bme680_gas_config(struct bme680_data *data) return ret; } -static int bme680_read_temp(struct bme680_data *data, - int *val, int *val2) +static int bme680_read_temp(struct bme680_data *data, int *val) { struct device *dev = regmap_get_device(data->regmap); int ret; @@ -625,10 +642,9 @@ static int bme680_read_temp(struct bme680_data *data, * compensate_press/compensate_humid to get compensated * pressure/humidity readings. */ - if (val && val2) { - *val = comp_temp; - *val2 = 100; - return IIO_VAL_FRACTIONAL; + if (val) { + *val = comp_temp * 10; /* Centidegrees to millidegrees */ + return IIO_VAL_INT; } return ret; @@ -643,7 +659,7 @@ static int bme680_read_press(struct bme680_data *data, s32 adc_press; /* Read and compensate temperature to get a reading of t_fine */ - ret = bme680_read_temp(data, NULL, NULL); + ret = bme680_read_temp(data, NULL); if (ret < 0) return ret; @@ -676,7 +692,7 @@ static int bme680_read_humid(struct bme680_data *data, u32 comp_humidity; /* Read and compensate temperature to get a reading of t_fine */ - ret = bme680_read_temp(data, NULL, NULL); + ret = bme680_read_temp(data, NULL); if (ret < 0) return ret; @@ -769,7 +785,7 @@ static int bme680_read_raw(struct iio_dev *indio_dev, case IIO_CHAN_INFO_PROCESSED: switch (chan->type) { case IIO_TEMP: - return bme680_read_temp(data, val, val2); + return bme680_read_temp(data, val); case IIO_PRESSURE: return bme680_read_press(data, val, val2); case IIO_HUMIDITYRELATIVE: @@ -905,8 +921,28 @@ int bme680_core_probe(struct device *dev, struct regmap *regmap, { struct iio_dev *indio_dev; struct bme680_data *data; + unsigned int val; int ret; + ret = regmap_write(regmap, BME680_REG_SOFT_RESET, + BME680_CMD_SOFTRESET); + if (ret < 0) { + dev_err(dev, "Failed to reset chip\n"); + return ret; + } + + ret = regmap_read(regmap, BME680_REG_CHIP_ID, &val); + if (ret < 0) { + dev_err(dev, "Error reading chip ID\n"); + return ret; + } + + if (val != BME680_CHIP_ID_VAL) { + dev_err(dev, "Wrong chip ID, got %x expected %x\n", + val, BME680_CHIP_ID_VAL); + return -ENODEV; + } + indio_dev = devm_iio_device_alloc(dev, sizeof(*data)); if (!indio_dev) return -ENOMEM; diff --git a/drivers/iio/chemical/bme680_i2c.c b/drivers/iio/chemical/bme680_i2c.c index 06d4be539d2e32092aa21a7d51aaceafcc88e2b8..cfc4449edf1b9ed5e0071b9399f5f98b9be1f634 100644 --- a/drivers/iio/chemical/bme680_i2c.c +++ b/drivers/iio/chemical/bme680_i2c.c @@ -23,8 +23,6 @@ static int bme680_i2c_probe(struct i2c_client *client, { struct regmap *regmap; const char *name = NULL; - unsigned int val; - int ret; regmap = devm_regmap_init_i2c(client, &bme680_regmap_config); if (IS_ERR(regmap)) { @@ -33,25 +31,6 @@ static int bme680_i2c_probe(struct i2c_client *client, return PTR_ERR(regmap); } - ret = regmap_write(regmap, BME680_REG_SOFT_RESET_I2C, - BME680_CMD_SOFTRESET); - if (ret < 0) { - dev_err(&client->dev, "Failed to reset chip\n"); - return ret; - } - - ret = regmap_read(regmap, BME680_REG_CHIP_I2C_ID, &val); - if (ret < 0) { - dev_err(&client->dev, "Error reading I2C chip ID\n"); - return ret; - } - - if (val != BME680_CHIP_ID_VAL) { - dev_err(&client->dev, "Wrong chip ID, got %x expected %x\n", - val, BME680_CHIP_ID_VAL); - return -ENODEV; - } - if (id) name = id->name; diff --git a/drivers/iio/chemical/bme680_spi.c b/drivers/iio/chemical/bme680_spi.c index c9fb05e8d0b9a926e3e0d750673192cd9eb772c9..881778e55d38e8e4d092a1178494771d7f122b1f 100644 --- a/drivers/iio/chemical/bme680_spi.c +++ b/drivers/iio/chemical/bme680_spi.c @@ -11,28 +11,93 @@ #include "bme680.h" +struct bme680_spi_bus_context { + struct spi_device *spi; + u8 current_page; +}; + +/* + * In SPI mode there are only 7 address bits, a "page" register determines + * which part of the 8-bit range is active. This function looks at the address + * and writes the page selection bit if needed + */ +static int bme680_regmap_spi_select_page( + struct bme680_spi_bus_context *ctx, u8 reg) +{ + struct spi_device *spi = ctx->spi; + int ret; + u8 buf[2]; + u8 page = (reg & 0x80) ? 0 : 1; /* Page "1" is low range */ + + if (page == ctx->current_page) + return 0; + + /* + * Data sheet claims we're only allowed to change bit 4, so we must do + * a read-modify-write on each and every page select + */ + buf[0] = BME680_REG_STATUS; + ret = spi_write_then_read(spi, buf, 1, buf + 1, 1); + if (ret < 0) { + dev_err(&spi->dev, "failed to set page %u\n", page); + return ret; + } + + buf[0] = BME680_REG_STATUS; + if (page) + buf[1] |= BME680_SPI_MEM_PAGE_BIT; + else + buf[1] &= ~BME680_SPI_MEM_PAGE_BIT; + + ret = spi_write(spi, buf, 2); + if (ret < 0) { + dev_err(&spi->dev, "failed to set page %u\n", page); + return ret; + } + + ctx->current_page = page; + + return 0; +} + static int bme680_regmap_spi_write(void *context, const void *data, size_t count) { - struct spi_device *spi = context; + struct bme680_spi_bus_context *ctx = context; + struct spi_device *spi = ctx->spi; + int ret; u8 buf[2]; memcpy(buf, data, 2); + + ret = bme680_regmap_spi_select_page(ctx, buf[0]); + if (ret) + return ret; + /* * The SPI register address (= full register address without bit 7) * and the write command (bit7 = RW = '0') */ buf[0] &= ~0x80; - return spi_write_then_read(spi, buf, 2, NULL, 0); + return spi_write(spi, buf, 2); } static int bme680_regmap_spi_read(void *context, const void *reg, size_t reg_size, void *val, size_t val_size) { - struct spi_device *spi = context; + struct bme680_spi_bus_context *ctx = context; + struct spi_device *spi = ctx->spi; + int ret; + u8 addr = *(const u8 *)reg; + + ret = bme680_regmap_spi_select_page(ctx, addr); + if (ret) + return ret; - return spi_write_then_read(spi, reg, reg_size, val, val_size); + addr |= 0x80; /* bit7 = RW = '1' */ + + return spi_write_then_read(spi, &addr, 1, val, val_size); } static struct regmap_bus bme680_regmap_bus = { @@ -45,8 +110,8 @@ static struct regmap_bus bme680_regmap_bus = { static int bme680_spi_probe(struct spi_device *spi) { const struct spi_device_id *id = spi_get_device_id(spi); + struct bme680_spi_bus_context *bus_context; struct regmap *regmap; - unsigned int val; int ret; spi->bits_per_word = 8; @@ -56,45 +121,21 @@ static int bme680_spi_probe(struct spi_device *spi) return ret; } + bus_context = devm_kzalloc(&spi->dev, sizeof(*bus_context), GFP_KERNEL); + if (!bus_context) + return -ENOMEM; + + bus_context->spi = spi; + bus_context->current_page = 0xff; /* Undefined on warm boot */ + regmap = devm_regmap_init(&spi->dev, &bme680_regmap_bus, - &spi->dev, &bme680_regmap_config); + bus_context, &bme680_regmap_config); if (IS_ERR(regmap)) { dev_err(&spi->dev, "Failed to register spi regmap %d\n", (int)PTR_ERR(regmap)); return PTR_ERR(regmap); } - ret = regmap_write(regmap, BME680_REG_SOFT_RESET_SPI, - BME680_CMD_SOFTRESET); - if (ret < 0) { - dev_err(&spi->dev, "Failed to reset chip\n"); - return ret; - } - - /* after power-on reset, Page 0(0x80-0xFF) of spi_mem_page is active */ - ret = regmap_read(regmap, BME680_REG_CHIP_SPI_ID, &val); - if (ret < 0) { - dev_err(&spi->dev, "Error reading SPI chip ID\n"); - return ret; - } - - if (val != BME680_CHIP_ID_VAL) { - dev_err(&spi->dev, "Wrong chip ID, got %x expected %x\n", - val, BME680_CHIP_ID_VAL); - return -ENODEV; - } - /* - * select Page 1 of spi_mem_page to enable access to - * to registers from address 0x00 to 0x7F. - */ - ret = regmap_write_bits(regmap, BME680_REG_STATUS, - BME680_SPI_MEM_PAGE_BIT, - BME680_SPI_MEM_PAGE_1_VAL); - if (ret < 0) { - dev_err(&spi->dev, "failed to set page 1 of spi_mem_page\n"); - return ret; - } - return bme680_core_probe(&spi->dev, regmap, id->name); } diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c index 89cb0066a6e0839f49fd68fb2395e8425b174c90..8d76afb87d87c58322b3ee8835ea31f5edc5a834 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c @@ -103,9 +103,10 @@ static int cros_ec_sensors_read(struct iio_dev *indio_dev, * Do not use IIO_DEGREE_TO_RAD to avoid precision * loss. Round to the nearest integer. */ - *val = div_s64(val64 * 314159 + 9000000ULL, 1000); - *val2 = 18000 << (CROS_EC_SENSOR_BITS - 1); - ret = IIO_VAL_FRACTIONAL; + *val = 0; + *val2 = div_s64(val64 * 3141592653ULL, + 180 << (CROS_EC_SENSOR_BITS - 1)); + ret = IIO_VAL_INT_PLUS_NANO; break; case MOTIONSENSE_TYPE_MAG: /* diff --git a/drivers/iio/common/ssp_sensors/ssp_iio.c b/drivers/iio/common/ssp_sensors/ssp_iio.c index 645f2e3975db45e7edc1a3640ba600c39b64b1ea..e38f704d88b7e7f9c52481b7eee5fb8115ea1566 100644 --- a/drivers/iio/common/ssp_sensors/ssp_iio.c +++ b/drivers/iio/common/ssp_sensors/ssp_iio.c @@ -81,7 +81,7 @@ int ssp_common_process_data(struct iio_dev *indio_dev, void *buf, unsigned int len, int64_t timestamp) { __le32 time; - int64_t calculated_time; + int64_t calculated_time = 0; struct ssp_sensor_data *spd = iio_priv(indio_dev); if (indio_dev->scan_bytes == 0) diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig index 80beb64e9e0c66d843fd1acb6d721d6c7c163994..69f4cfa6494b5bdb596b76f633ceea6a5fbb5a63 100644 --- a/drivers/iio/dac/Kconfig +++ b/drivers/iio/dac/Kconfig @@ -59,8 +59,8 @@ config AD5446 help Say yes here to build support for Analog Devices AD5300, AD5301, AD5310, AD5311, AD5320, AD5321, AD5444, AD5446, AD5450, AD5451, AD5452, AD5453, - AD5512A, AD5541A, AD5542A, AD5543, AD5553, AD5601, AD5602, AD5611, AD5612, - AD5620, AD5621, AD5622, AD5640, AD5641, AD5660, AD5662 DACs + AD5512A, AD5541A, AD5542A, AD5543, AD5553, AD5600, AD5601, AD5602, AD5611, + AD5612, AD5620, AD5621, AD5622, AD5640, AD5641, AD5660, AD5662 DACs as well as Texas Instruments DAC081S101, DAC101S101, DAC121S101. To compile this driver as a module, choose M here: the diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c index bf4fc40ec84d93bb5b39edd0f6c510605b8580cd..2f98cb2a3b9645ff08d522a5c7b260db3f986359 100644 --- a/drivers/iio/dac/ad5064.c +++ b/drivers/iio/dac/ad5064.c @@ -808,6 +808,40 @@ static int ad5064_set_config(struct ad5064_state *st, unsigned int val) return ad5064_write(st, cmd, 0, val, 0); } +static int ad5064_request_vref(struct ad5064_state *st, struct device *dev) +{ + unsigned int i; + int ret; + + for (i = 0; i < ad5064_num_vref(st); ++i) + st->vref_reg[i].supply = ad5064_vref_name(st, i); + + if (!st->chip_info->internal_vref) + return devm_regulator_bulk_get(dev, ad5064_num_vref(st), + st->vref_reg); + + /* + * This assumes that when the regulator has an internal VREF + * there is only one external VREF connection, which is + * currently the case for all supported devices. + */ + st->vref_reg[0].consumer = devm_regulator_get_optional(dev, "vref"); + if (!IS_ERR(st->vref_reg[0].consumer)) + return 0; + + ret = PTR_ERR(st->vref_reg[0].consumer); + if (ret != -ENODEV) + return ret; + + /* If no external regulator was supplied use the internal VREF */ + st->use_internal_vref = true; + ret = ad5064_set_config(st, AD5064_CONFIG_INT_VREF_ENABLE); + if (ret) + dev_err(dev, "Failed to enable internal vref: %d\n", ret); + + return ret; +} + static int ad5064_probe(struct device *dev, enum ad5064_type type, const char *name, ad5064_write_func write) { @@ -828,22 +862,11 @@ static int ad5064_probe(struct device *dev, enum ad5064_type type, st->dev = dev; st->write = write; - for (i = 0; i < ad5064_num_vref(st); ++i) - st->vref_reg[i].supply = ad5064_vref_name(st, i); + ret = ad5064_request_vref(st, dev); + if (ret) + return ret; - ret = devm_regulator_bulk_get(dev, ad5064_num_vref(st), - st->vref_reg); - if (ret) { - if (!st->chip_info->internal_vref) - return ret; - st->use_internal_vref = true; - ret = ad5064_set_config(st, AD5064_CONFIG_INT_VREF_ENABLE); - if (ret) { - dev_err(dev, "Failed to enable internal vref: %d\n", - ret); - return ret; - } - } else { + if (!st->use_internal_vref) { ret = regulator_bulk_enable(ad5064_num_vref(st), st->vref_reg); if (ret) return ret; diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c index fd26a4272fc5bc98284d1491e25cd86f16d8f80d..d3ce5def4f65970278768f295d11f81e78c28b4a 100644 --- a/drivers/iio/dac/ad5446.c +++ b/drivers/iio/dac/ad5446.c @@ -328,6 +328,7 @@ enum ad5446_supported_spi_device_ids { ID_AD5541A, ID_AD5512A, ID_AD5553, + ID_AD5600, ID_AD5601, ID_AD5611, ID_AD5621, @@ -382,6 +383,10 @@ static const struct ad5446_chip_info ad5446_spi_chip_info[] = { .channel = AD5446_CHANNEL(14, 16, 0), .write = ad5446_write, }, + [ID_AD5600] = { + .channel = AD5446_CHANNEL(16, 16, 0), + .write = ad5446_write, + }, [ID_AD5601] = { .channel = AD5446_CHANNEL_POWERDOWN(8, 16, 6), .write = ad5446_write, @@ -449,6 +454,7 @@ static const struct spi_device_id ad5446_spi_ids[] = { {"ad5542a", ID_AD5541A}, /* ad5541a and ad5542a are compatible */ {"ad5543", ID_AD5541A}, /* ad5541a and ad5543 are compatible */ {"ad5553", ID_AD5553}, + {"ad5600", ID_AD5600}, {"ad5601", ID_AD5601}, {"ad5611", ID_AD5611}, {"ad5621", ID_AD5621}, diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c index 2ddbfc3fdbae7430330466c9dc386ae8d3e26603..cba62ad26cd864fc5bbc8894b22b1d582acf48e3 100644 --- a/drivers/iio/dac/ad5686.c +++ b/drivers/iio/dac/ad5686.c @@ -124,7 +124,8 @@ static int ad5686_read_raw(struct iio_dev *indio_dev, mutex_unlock(&indio_dev->mlock); if (ret < 0) return ret; - *val = ret; + *val = (ret >> chan->scan_type.shift) & + GENMASK(chan->scan_type.realbits - 1, 0); return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: *val = st->vref_mv; diff --git a/drivers/iio/dac/ds4424.c b/drivers/iio/dac/ds4424.c index 883a475620550d4999e72b1067265aca8bb43ba7..714a97f9131997bac0bde2b7af829f449ac80bf3 100644 --- a/drivers/iio/dac/ds4424.c +++ b/drivers/iio/dac/ds4424.c @@ -166,7 +166,7 @@ static int ds4424_verify_chip(struct iio_dev *indio_dev) { int ret, val; - ret = ds4424_get_value(indio_dev, &val, DS4424_DAC_ADDR(0)); + ret = ds4424_get_value(indio_dev, &val, 0); if (ret < 0) dev_err(&indio_dev->dev, "%s failed. ret: %d\n", __func__, ret); diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c index 8b5aad4c32d90effd27cb6a19f3373d5c9060879..30dc2775cbfbd5c1d09e98d0e27930deaa1f78b5 100644 --- a/drivers/iio/dac/mcp4725.c +++ b/drivers/iio/dac/mcp4725.c @@ -98,6 +98,7 @@ static ssize_t mcp4725_store_eeprom(struct device *dev, inoutbuf[0] = 0x60; /* write EEPROM */ inoutbuf[0] |= data->ref_mode << 3; + inoutbuf[0] |= data->powerdown ? ((data->powerdown_mode + 1) << 1) : 0; inoutbuf[1] = data->dac_value >> 4; inoutbuf[2] = (data->dac_value & 0xf) << 4; diff --git a/drivers/iio/dac/mcp4922.c b/drivers/iio/dac/mcp4922.c index bf9aa3fc0534e6786c5f5fe605cfd43b9a383690..b5190d1dae8e33a15e148ff78471fb09dd4b90d1 100644 --- a/drivers/iio/dac/mcp4922.c +++ b/drivers/iio/dac/mcp4922.c @@ -94,17 +94,22 @@ static int mcp4922_write_raw(struct iio_dev *indio_dev, long mask) { struct mcp4922_state *state = iio_priv(indio_dev); + int ret; if (val2 != 0) return -EINVAL; switch (mask) { case IIO_CHAN_INFO_RAW: - if (val > GENMASK(chan->scan_type.realbits-1, 0)) + if (val < 0 || val > GENMASK(chan->scan_type.realbits - 1, 0)) return -EINVAL; val <<= chan->scan_type.shift; - state->value[chan->channel] = val; - return mcp4922_spi_write(state, chan->channel, val); + + ret = mcp4922_spi_write(state, chan->channel, val); + if (!ret) + state->value[chan->channel] = val; + return ret; + default: return -EINVAL; } diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c index 63ca31628a93af8454f5fce9ced15d752f28f16d..92c07ab826eb32c9d4665728159dbd4358cefc80 100644 --- a/drivers/iio/gyro/bmg160_core.c +++ b/drivers/iio/gyro/bmg160_core.c @@ -582,11 +582,10 @@ static int bmg160_read_raw(struct iio_dev *indio_dev, case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY: return bmg160_get_filter(data, val); case IIO_CHAN_INFO_SCALE: - *val = 0; switch (chan->type) { case IIO_TEMP: - *val2 = 500000; - return IIO_VAL_INT_PLUS_MICRO; + *val = 500; + return IIO_VAL_INT; case IIO_ANGL_VEL: { int i; @@ -594,6 +593,7 @@ static int bmg160_read_raw(struct iio_dev *indio_dev, for (i = 0; i < ARRAY_SIZE(bmg160_scale_table); ++i) { if (bmg160_scale_table[i].dps_range == data->dps_range) { + *val = 0; *val2 = bmg160_scale_table[i].scale; return IIO_VAL_INT_PLUS_MICRO; } diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c index 36941e69f95956a71898cb714fd3da49b1a19dbf..88e857c4baf4504370032610b3ae0d12058a622a 100644 --- a/drivers/iio/gyro/hid-sensor-gyro-3d.c +++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c @@ -111,6 +111,7 @@ static int gyro_3d_read_raw(struct iio_dev *indio_dev, int report_id = -1; u32 address; int ret_type; + s32 min; *val = 0; *val2 = 0; @@ -118,13 +119,15 @@ static int gyro_3d_read_raw(struct iio_dev *indio_dev, case IIO_CHAN_INFO_RAW: hid_sensor_power_state(&gyro_state->common_attributes, true); report_id = gyro_state->gyro[chan->scan_index].report_id; + min = gyro_state->gyro[chan->scan_index].logical_minimum; address = gyro_3d_addresses[chan->scan_index]; if (report_id >= 0) *val = sensor_hub_input_attr_get_raw_value( gyro_state->common_attributes.hsdev, HID_USAGE_SENSOR_GYRO_3D, address, report_id, - SENSOR_HUB_SYNC); + SENSOR_HUB_SYNC, + min < 0); else { *val = 0; hid_sensor_power_state(&gyro_state->common_attributes, diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c index 77fac81a3adce2245fe0bf499b60a382c08af98b..5ddebede31a6f6f3625263893ad27a0f3fb68b88 100644 --- a/drivers/iio/gyro/mpu3050-core.c +++ b/drivers/iio/gyro/mpu3050-core.c @@ -29,7 +29,8 @@ #include "mpu3050.h" -#define MPU3050_CHIP_ID 0x69 +#define MPU3050_CHIP_ID 0x68 +#define MPU3050_CHIP_ID_MASK 0x7E /* * Register map: anything suffixed *_H is a big-endian high byte and always @@ -1176,8 +1177,9 @@ int mpu3050_common_probe(struct device *dev, goto err_power_down; } - if (val != MPU3050_CHIP_ID) { - dev_err(dev, "unsupported chip id %02x\n", (u8)val); + if ((val & MPU3050_CHIP_ID_MASK) != MPU3050_CHIP_ID) { + dev_err(dev, "unsupported chip id %02x\n", + (u8)(val & MPU3050_CHIP_ID_MASK)); ret = -ENODEV; goto err_power_down; } diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c index a739fff01c6b140f809cf66e7346079820e0f581..2adbd5c2a9832cb53ddf19df52c390d86103febe 100644 --- a/drivers/iio/health/afe4403.c +++ b/drivers/iio/health/afe4403.c @@ -250,14 +250,14 @@ static int afe4403_read_raw(struct iio_dev *indio_dev, int *val, int *val2, long mask) { struct afe4403_data *afe = iio_priv(indio_dev); - unsigned int reg = afe4403_channel_values[chan->address]; - unsigned int field = afe4403_channel_leds[chan->address]; + unsigned int reg, field; int ret; switch (chan->type) { case IIO_INTENSITY: switch (mask) { case IIO_CHAN_INFO_RAW: + reg = afe4403_channel_values[chan->address]; ret = afe4403_read(afe, reg, val); if (ret) return ret; @@ -267,6 +267,7 @@ static int afe4403_read_raw(struct iio_dev *indio_dev, case IIO_CURRENT: switch (mask) { case IIO_CHAN_INFO_RAW: + field = afe4403_channel_leds[chan->address]; ret = regmap_field_read(afe->fields[field], val); if (ret) return ret; diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c index 11910922e6556499ec2315983075c8b525f1370d..56ff178bd07e59c0de0fd2df26fc367e77ed1615 100644 --- a/drivers/iio/health/afe4404.c +++ b/drivers/iio/health/afe4404.c @@ -256,20 +256,20 @@ static int afe4404_read_raw(struct iio_dev *indio_dev, int *val, int *val2, long mask) { struct afe4404_data *afe = iio_priv(indio_dev); - unsigned int value_reg = afe4404_channel_values[chan->address]; - unsigned int led_field = afe4404_channel_leds[chan->address]; - unsigned int offdac_field = afe4404_channel_offdacs[chan->address]; + unsigned int value_reg, led_field, offdac_field; int ret; switch (chan->type) { case IIO_INTENSITY: switch (mask) { case IIO_CHAN_INFO_RAW: + value_reg = afe4404_channel_values[chan->address]; ret = regmap_read(afe->regmap, value_reg, val); if (ret) return ret; return IIO_VAL_INT; case IIO_CHAN_INFO_OFFSET: + offdac_field = afe4404_channel_offdacs[chan->address]; ret = regmap_field_read(afe->fields[offdac_field], val); if (ret) return ret; @@ -279,6 +279,7 @@ static int afe4404_read_raw(struct iio_dev *indio_dev, case IIO_CURRENT: switch (mask) { case IIO_CHAN_INFO_RAW: + led_field = afe4404_channel_leds[chan->address]; ret = regmap_field_read(afe->fields[led_field], val); if (ret) return ret; @@ -301,19 +302,20 @@ static int afe4404_write_raw(struct iio_dev *indio_dev, int val, int val2, long mask) { struct afe4404_data *afe = iio_priv(indio_dev); - unsigned int led_field = afe4404_channel_leds[chan->address]; - unsigned int offdac_field = afe4404_channel_offdacs[chan->address]; + unsigned int led_field, offdac_field; switch (chan->type) { case IIO_INTENSITY: switch (mask) { case IIO_CHAN_INFO_OFFSET: + offdac_field = afe4404_channel_offdacs[chan->address]; return regmap_field_write(afe->fields[offdac_field], val); } break; case IIO_CURRENT: switch (mask) { case IIO_CHAN_INFO_RAW: + led_field = afe4404_channel_leds[chan->address]; return regmap_field_write(afe->fields[led_field], val); } break; diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c index 066e05f920810c74afdac0b535046af455c67388..ff6666ac5d68b7493564535b7c3139d6db258dd4 100644 --- a/drivers/iio/humidity/hdc100x.c +++ b/drivers/iio/humidity/hdc100x.c @@ -229,7 +229,7 @@ static int hdc100x_read_raw(struct iio_dev *indio_dev, *val2 = 65536; return IIO_VAL_FRACTIONAL; } else { - *val = 100; + *val = 100000; *val2 = 65536; return IIO_VAL_FRACTIONAL; } diff --git a/drivers/iio/humidity/hid-sensor-humidity.c b/drivers/iio/humidity/hid-sensor-humidity.c index beab6d6fd6e18bb5f0a927b9f7d70ea4b1e08b0d..4bc95f31c730ee99255ef179fde5466980661517 100644 --- a/drivers/iio/humidity/hid-sensor-humidity.c +++ b/drivers/iio/humidity/hid-sensor-humidity.c @@ -75,7 +75,8 @@ static int humidity_read_raw(struct iio_dev *indio_dev, HID_USAGE_SENSOR_HUMIDITY, HID_USAGE_SENSOR_ATMOSPHERIC_HUMIDITY, humid_st->humidity_attr.report_id, - SENSOR_HUB_SYNC); + SENSOR_HUB_SYNC, + humid_st->humidity_attr.logical_minimum < 0); hid_sensor_power_state(&humid_st->common_attributes, false); return IIO_VAL_INT; diff --git a/drivers/iio/imu/adis16400_buffer.c b/drivers/iio/imu/adis16400_buffer.c index e70a5339acb192006bf84e732a212d5106b26889..3fc11aec98b953089b9055eed07a1825dbac694d 100644 --- a/drivers/iio/imu/adis16400_buffer.c +++ b/drivers/iio/imu/adis16400_buffer.c @@ -38,8 +38,11 @@ int adis16400_update_scan_mode(struct iio_dev *indio_dev, return -ENOMEM; adis->buffer = kzalloc(burst_length + sizeof(u16), GFP_KERNEL); - if (!adis->buffer) + if (!adis->buffer) { + kfree(adis->xfer); + adis->xfer = NULL; return -ENOMEM; + } tx = adis->buffer + burst_length; tx[0] = ADIS_READ_REG(ADIS16400_GLOB_CMD); diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c index a27fe208f3aed80a949f50e98be96dc1f1a8f92e..ea24701beb3ea8f985846effcb79a0a1c46f3153 100644 --- a/drivers/iio/imu/adis16480.c +++ b/drivers/iio/imu/adis16480.c @@ -270,8 +270,11 @@ static int adis16480_set_freq(struct iio_dev *indio_dev, int val, int val2) struct adis16480 *st = iio_priv(indio_dev); unsigned int t; + if (val < 0 || val2 < 0) + return -EINVAL; + t = val * 1000 + val2 / 1000; - if (t <= 0) + if (t == 0) return -EINVAL; t = 2460000 / t; @@ -724,6 +727,7 @@ static const struct iio_info adis16480_info = { .read_raw = &adis16480_read_raw, .write_raw = &adis16480_write_raw, .update_scan_mode = adis_update_scan_mode, + .debugfs_reg_access = adis_debugfs_reg_access, }; static int adis16480_stop_device(struct iio_dev *indio_dev) diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c index 76643c5571aa87286bef0eeee0a557f04326ac6b..e59d0438de7326be74b3c225eb2247fbf6bdde0f 100644 --- a/drivers/iio/imu/adis_buffer.c +++ b/drivers/iio/imu/adis_buffer.c @@ -39,8 +39,11 @@ int adis_update_scan_mode(struct iio_dev *indio_dev, return -ENOMEM; adis->buffer = kcalloc(indio_dev->scan_bytes, 2, GFP_KERNEL); - if (!adis->buffer) + if (!adis->buffer) { + kfree(adis->xfer); + adis->xfer = NULL; return -ENOMEM; + } rx = adis->buffer; tx = rx + scan_count; diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig index 5483b2ea754dd37c624fbe513b5bf721266cf88b..d2fe9dbddda74e5937cc767649d555458b8d553f 100644 --- a/drivers/iio/imu/inv_mpu6050/Kconfig +++ b/drivers/iio/imu/inv_mpu6050/Kconfig @@ -13,8 +13,8 @@ config INV_MPU6050_I2C select INV_MPU6050_IIO select REGMAP_I2C help - This driver supports the Invensense MPU6050/6500/9150 and ICM20608 - motion tracking devices over I2C. + This driver supports the Invensense MPU6050/6500/9150 and + ICM20608/20602 motion tracking devices over I2C. This driver can be built as a module. The module will be called inv-mpu6050-i2c. @@ -24,7 +24,7 @@ config INV_MPU6050_SPI select INV_MPU6050_IIO select REGMAP_SPI help - This driver supports the Invensense MPU6050/6500/9150 and ICM20608 - motion tracking devices over SPI. + This driver supports the Invensense MPU6050/6500/9150 and + ICM20608/20602 motion tracking devices over SPI. This driver can be built as a module. The module will be called inv-mpu6050-spi. diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c index d80ef468508a19763ccbe0a70be5397158440c77..6b560d99f38518d8a7d43e21027a6c5cf4aac718 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c @@ -37,6 +37,29 @@ static const int gyro_scale_6050[] = {133090, 266181, 532362, 1064724}; */ static const int accel_scale[] = {598, 1196, 2392, 4785}; +static const struct inv_mpu6050_reg_map reg_set_icm20602 = { + .sample_rate_div = INV_MPU6050_REG_SAMPLE_RATE_DIV, + .lpf = INV_MPU6050_REG_CONFIG, + .accel_lpf = INV_MPU6500_REG_ACCEL_CONFIG_2, + .user_ctrl = INV_MPU6050_REG_USER_CTRL, + .fifo_en = INV_MPU6050_REG_FIFO_EN, + .gyro_config = INV_MPU6050_REG_GYRO_CONFIG, + .accl_config = INV_MPU6050_REG_ACCEL_CONFIG, + .fifo_count_h = INV_MPU6050_REG_FIFO_COUNT_H, + .fifo_r_w = INV_MPU6050_REG_FIFO_R_W, + .raw_gyro = INV_MPU6050_REG_RAW_GYRO, + .raw_accl = INV_MPU6050_REG_RAW_ACCEL, + .temperature = INV_MPU6050_REG_TEMPERATURE, + .int_enable = INV_MPU6050_REG_INT_ENABLE, + .int_status = INV_MPU6050_REG_INT_STATUS, + .pwr_mgmt_1 = INV_MPU6050_REG_PWR_MGMT_1, + .pwr_mgmt_2 = INV_MPU6050_REG_PWR_MGMT_2, + .int_pin_cfg = INV_MPU6050_REG_INT_PIN_CFG, + .accl_offset = INV_MPU6500_REG_ACCEL_OFFSET, + .gyro_offset = INV_MPU6050_REG_GYRO_OFFSET, + .i2c_if = INV_ICM20602_REG_I2C_IF, +}; + static const struct inv_mpu6050_reg_map reg_set_6500 = { .sample_rate_div = INV_MPU6050_REG_SAMPLE_RATE_DIV, .lpf = INV_MPU6050_REG_CONFIG, @@ -57,6 +80,7 @@ static const struct inv_mpu6050_reg_map reg_set_6500 = { .int_pin_cfg = INV_MPU6050_REG_INT_PIN_CFG, .accl_offset = INV_MPU6500_REG_ACCEL_OFFSET, .gyro_offset = INV_MPU6050_REG_GYRO_OFFSET, + .i2c_if = 0, }; static const struct inv_mpu6050_reg_map reg_set_6050 = { @@ -77,6 +101,7 @@ static const struct inv_mpu6050_reg_map reg_set_6050 = { .int_pin_cfg = INV_MPU6050_REG_INT_PIN_CFG, .accl_offset = INV_MPU6050_REG_ACCEL_OFFSET, .gyro_offset = INV_MPU6050_REG_GYRO_OFFSET, + .i2c_if = 0, }; static const struct inv_mpu6050_chip_config chip_config_6050 = { @@ -96,48 +121,72 @@ static const struct inv_mpu6050_hw hw_info[] = { .name = "MPU6050", .reg = ®_set_6050, .config = &chip_config_6050, + .fifo_size = 1024, + .temp = {INV_MPU6050_TEMP_OFFSET, INV_MPU6050_TEMP_SCALE}, }, { .whoami = INV_MPU6500_WHOAMI_VALUE, .name = "MPU6500", .reg = ®_set_6500, .config = &chip_config_6050, + .fifo_size = 512, + .temp = {INV_MPU6500_TEMP_OFFSET, INV_MPU6500_TEMP_SCALE}, }, { .whoami = INV_MPU6515_WHOAMI_VALUE, .name = "MPU6515", .reg = ®_set_6500, .config = &chip_config_6050, + .fifo_size = 512, + .temp = {INV_MPU6500_TEMP_OFFSET, INV_MPU6500_TEMP_SCALE}, }, { .whoami = INV_MPU6000_WHOAMI_VALUE, .name = "MPU6000", .reg = ®_set_6050, .config = &chip_config_6050, + .fifo_size = 1024, + .temp = {INV_MPU6050_TEMP_OFFSET, INV_MPU6050_TEMP_SCALE}, }, { .whoami = INV_MPU9150_WHOAMI_VALUE, .name = "MPU9150", .reg = ®_set_6050, .config = &chip_config_6050, + .fifo_size = 1024, + .temp = {INV_MPU6050_TEMP_OFFSET, INV_MPU6050_TEMP_SCALE}, }, { .whoami = INV_MPU9250_WHOAMI_VALUE, .name = "MPU9250", .reg = ®_set_6500, .config = &chip_config_6050, + .fifo_size = 512, + .temp = {INV_MPU6500_TEMP_OFFSET, INV_MPU6500_TEMP_SCALE}, }, { .whoami = INV_MPU9255_WHOAMI_VALUE, .name = "MPU9255", .reg = ®_set_6500, .config = &chip_config_6050, + .fifo_size = 512, + .temp = {INV_MPU6500_TEMP_OFFSET, INV_MPU6500_TEMP_SCALE}, }, { .whoami = INV_ICM20608_WHOAMI_VALUE, .name = "ICM20608", .reg = ®_set_6500, .config = &chip_config_6050, + .fifo_size = 512, + .temp = {INV_ICM20608_TEMP_OFFSET, INV_ICM20608_TEMP_SCALE}, + }, + { + .whoami = INV_ICM20602_WHOAMI_VALUE, + .name = "ICM20602", + .reg = ®_set_icm20602, + .config = &chip_config_6050, + .fifo_size = 1008, + .temp = {INV_ICM20608_TEMP_OFFSET, INV_ICM20608_TEMP_SCALE}, }, }; @@ -438,9 +487,8 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev, return IIO_VAL_INT_PLUS_MICRO; case IIO_TEMP: - *val = 0; - *val2 = INV_MPU6050_TEMP_SCALE; - + *val = st->hw->temp.scale / 1000000; + *val2 = st->hw->temp.scale % 1000000; return IIO_VAL_INT_PLUS_MICRO; default: return -EINVAL; @@ -448,8 +496,7 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev, case IIO_CHAN_INFO_OFFSET: switch (chan->type) { case IIO_TEMP: - *val = INV_MPU6050_TEMP_OFFSET; - + *val = st->hw->temp.offset; return IIO_VAL_INT; default: return -EINVAL; @@ -813,6 +860,73 @@ static const struct iio_chan_spec inv_mpu_channels[] = { INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Z, INV_MPU6050_SCAN_ACCL_Z), }; +static const unsigned long inv_mpu_scan_masks[] = { + /* 3-axis accel */ + BIT(INV_MPU6050_SCAN_ACCL_X) + | BIT(INV_MPU6050_SCAN_ACCL_Y) + | BIT(INV_MPU6050_SCAN_ACCL_Z), + /* 3-axis gyro */ + BIT(INV_MPU6050_SCAN_GYRO_X) + | BIT(INV_MPU6050_SCAN_GYRO_Y) + | BIT(INV_MPU6050_SCAN_GYRO_Z), + /* 6-axis accel + gyro */ + BIT(INV_MPU6050_SCAN_ACCL_X) + | BIT(INV_MPU6050_SCAN_ACCL_Y) + | BIT(INV_MPU6050_SCAN_ACCL_Z) + | BIT(INV_MPU6050_SCAN_GYRO_X) + | BIT(INV_MPU6050_SCAN_GYRO_Y) + | BIT(INV_MPU6050_SCAN_GYRO_Z), + 0, +}; + +static const struct iio_chan_spec inv_icm20602_channels[] = { + IIO_CHAN_SOFT_TIMESTAMP(INV_ICM20602_SCAN_TIMESTAMP), + { + .type = IIO_TEMP, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) + | BIT(IIO_CHAN_INFO_OFFSET) + | BIT(IIO_CHAN_INFO_SCALE), + .scan_index = INV_ICM20602_SCAN_TEMP, + .scan_type = { + .sign = 's', + .realbits = 16, + .storagebits = 16, + .shift = 0, + .endianness = IIO_BE, + }, + }, + + INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_X, INV_ICM20602_SCAN_GYRO_X), + INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Y, INV_ICM20602_SCAN_GYRO_Y), + INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Z, INV_ICM20602_SCAN_GYRO_Z), + + INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Y, INV_ICM20602_SCAN_ACCL_Y), + INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_X, INV_ICM20602_SCAN_ACCL_X), + INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Z, INV_ICM20602_SCAN_ACCL_Z), +}; + +static const unsigned long inv_icm20602_scan_masks[] = { + /* 3-axis accel + temp (mandatory) */ + BIT(INV_ICM20602_SCAN_ACCL_X) + | BIT(INV_ICM20602_SCAN_ACCL_Y) + | BIT(INV_ICM20602_SCAN_ACCL_Z) + | BIT(INV_ICM20602_SCAN_TEMP), + /* 3-axis gyro + temp (mandatory) */ + BIT(INV_ICM20602_SCAN_GYRO_X) + | BIT(INV_ICM20602_SCAN_GYRO_Y) + | BIT(INV_ICM20602_SCAN_GYRO_Z) + | BIT(INV_ICM20602_SCAN_TEMP), + /* 6-axis accel + gyro + temp (mandatory) */ + BIT(INV_ICM20602_SCAN_ACCL_X) + | BIT(INV_ICM20602_SCAN_ACCL_Y) + | BIT(INV_ICM20602_SCAN_ACCL_Z) + | BIT(INV_ICM20602_SCAN_GYRO_X) + | BIT(INV_ICM20602_SCAN_GYRO_Y) + | BIT(INV_ICM20602_SCAN_GYRO_Z) + | BIT(INV_ICM20602_SCAN_TEMP), + 0, +}; + /* * The user can choose any frequency between INV_MPU6050_MIN_FIFO_RATE and * INV_MPU6050_MAX_FIFO_RATE, but only these frequencies are matched by the @@ -1013,8 +1127,16 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name, indio_dev->name = name; else indio_dev->name = dev_name(dev); - indio_dev->channels = inv_mpu_channels; - indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels); + + if (chip_type == INV_ICM20602) { + indio_dev->channels = inv_icm20602_channels; + indio_dev->num_channels = ARRAY_SIZE(inv_icm20602_channels); + indio_dev->available_scan_masks = inv_icm20602_scan_masks; + } else { + indio_dev->channels = inv_mpu_channels; + indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels); + indio_dev->available_scan_masks = inv_mpu_scan_masks; + } indio_dev->info = &mpu_info; indio_dev->modes = INDIO_BUFFER_TRIGGERED; diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c index dd758e3d403da9c8a14dc41d4da2b0accc7e1779..e46eb4ddea210bc2265122264989dffccda00678 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c @@ -127,6 +127,7 @@ static int inv_mpu_probe(struct i2c_client *client, st = iio_priv(dev_get_drvdata(&client->dev)); switch (st->chip_type) { case INV_ICM20608: + case INV_ICM20602: /* no i2c auxiliary bus on the chip */ break; default: @@ -179,6 +180,7 @@ static const struct i2c_device_id inv_mpu_id[] = { {"mpu9250", INV_MPU9250}, {"mpu9255", INV_MPU9255}, {"icm20608", INV_ICM20608}, + {"icm20602", INV_ICM20602}, {} }; @@ -213,6 +215,10 @@ static const struct of_device_id inv_of_match[] = { .compatible = "invensense,icm20608", .data = (void *)INV_ICM20608 }, + { + .compatible = "invensense,icm20602", + .data = (void *)INV_ICM20602 + }, { } }; MODULE_DEVICE_TABLE(of, inv_of_match); diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h index e69a59659dbcf9036c24abe96b8a4ef314a89143..220eba58cfbb6d43d8f85c28bf237caca60dd62a 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h @@ -44,6 +44,7 @@ * @int_pin_cfg; Controls interrupt pin configuration. * @accl_offset: Controls the accelerometer calibration offset. * @gyro_offset: Controls the gyroscope calibration offset. + * @i2c_if: Controls the i2c interface */ struct inv_mpu6050_reg_map { u8 sample_rate_div; @@ -65,6 +66,7 @@ struct inv_mpu6050_reg_map { u8 int_pin_cfg; u8 accl_offset; u8 gyro_offset; + u8 i2c_if; }; /*device enum */ @@ -77,6 +79,7 @@ enum inv_devices { INV_MPU9250, INV_MPU9255, INV_ICM20608, + INV_ICM20602, INV_NUM_PARTS }; @@ -105,12 +108,19 @@ struct inv_mpu6050_chip_config { * @name: name of the chip. * @reg: register map of the chip. * @config: configuration of the chip. + * @fifo_size: size of the FIFO in bytes. + * @temp: offset and scale to apply to raw temperature. */ struct inv_mpu6050_hw { u8 whoami; u8 *name; const struct inv_mpu6050_reg_map *reg; const struct inv_mpu6050_chip_config *config; + size_t fifo_size; + struct { + int offset; + int scale; + } temp; }; /* @@ -193,12 +203,19 @@ struct inv_mpu6050_state { #define INV_MPU6050_BIT_PWR_ACCL_STBY 0x38 #define INV_MPU6050_BIT_PWR_GYRO_STBY 0x07 +/* ICM20602 register */ +#define INV_ICM20602_REG_I2C_IF 0x70 +#define INV_ICM20602_BIT_I2C_IF_DIS 0x40 + #define INV_MPU6050_REG_FIFO_COUNT_H 0x72 #define INV_MPU6050_REG_FIFO_R_W 0x74 #define INV_MPU6050_BYTES_PER_3AXIS_SENSOR 6 #define INV_MPU6050_FIFO_COUNT_BYTE 2 +/* ICM20602 FIFO samples include temperature readings */ +#define INV_ICM20602_BYTES_PER_TEMP_SENSOR 2 + /* mpu6500 registers */ #define INV_MPU6500_REG_ACCEL_CONFIG_2 0x1D #define INV_MPU6500_REG_ACCEL_OFFSET 0x77 @@ -212,14 +229,20 @@ struct inv_mpu6050_state { #define INV_MPU6050_REG_UP_TIME_MIN 5000 #define INV_MPU6050_REG_UP_TIME_MAX 10000 -#define INV_MPU6050_TEMP_OFFSET 12421 -#define INV_MPU6050_TEMP_SCALE 2941 +#define INV_MPU6050_TEMP_OFFSET 12420 +#define INV_MPU6050_TEMP_SCALE 2941176 #define INV_MPU6050_MAX_GYRO_FS_PARAM 3 #define INV_MPU6050_MAX_ACCL_FS_PARAM 3 #define INV_MPU6050_THREE_AXIS 3 #define INV_MPU6050_GYRO_CONFIG_FSR_SHIFT 3 #define INV_MPU6050_ACCL_CONFIG_FSR_SHIFT 3 +#define INV_MPU6500_TEMP_OFFSET 7011 +#define INV_MPU6500_TEMP_SCALE 2995178 + +#define INV_ICM20608_TEMP_OFFSET 8170 +#define INV_ICM20608_TEMP_SCALE 3059976 + /* 6 + 6 round up and plus 8 */ #define INV_MPU6050_OUTPUT_DATA_SIZE 24 @@ -259,8 +282,9 @@ struct inv_mpu6050_state { #define INV_MPU9255_WHOAMI_VALUE 0x73 #define INV_MPU6515_WHOAMI_VALUE 0x74 #define INV_ICM20608_WHOAMI_VALUE 0xAF +#define INV_ICM20602_WHOAMI_VALUE 0x12 -/* scan element definition */ +/* scan element definition for generic MPU6xxx devices */ enum inv_mpu6050_scan { INV_MPU6050_SCAN_ACCL_X, INV_MPU6050_SCAN_ACCL_Y, @@ -271,6 +295,18 @@ enum inv_mpu6050_scan { INV_MPU6050_SCAN_TIMESTAMP, }; +/* scan element definition for ICM20602, which includes temperature */ +enum inv_icm20602_scan { + INV_ICM20602_SCAN_ACCL_X, + INV_ICM20602_SCAN_ACCL_Y, + INV_ICM20602_SCAN_ACCL_Z, + INV_ICM20602_SCAN_TEMP, + INV_ICM20602_SCAN_GYRO_X, + INV_ICM20602_SCAN_GYRO_Y, + INV_ICM20602_SCAN_GYRO_Z, + INV_ICM20602_SCAN_TIMESTAMP, +}; + enum inv_mpu6050_filter_e { INV_MPU6050_FILTER_256HZ_NOLPF2 = 0, INV_MPU6050_FILTER_188HZ, diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c index 548e042f7b5bde8b508675b23f2070a78bef14a9..0e54f2d54bd70cc75815dbf2f7c2c7a33eaad2a2 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c @@ -188,9 +188,6 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p) "failed to ack interrupt\n"); goto flush_fifo; } - /* handle fifo overflow by reseting fifo */ - if (int_status & INV_MPU6050_BIT_FIFO_OVERFLOW_INT) - goto flush_fifo; if (!(int_status & INV_MPU6050_BIT_RAW_DATA_RDY_INT)) { dev_warn(regmap_get_device(st->map), "spurious interrupt with status 0x%x\n", int_status); @@ -207,6 +204,9 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p) if (st->chip_config.gyro_fifo_enable) bytes_per_datum += INV_MPU6050_BYTES_PER_3AXIS_SENSOR; + if (st->chip_type == INV_ICM20602) + bytes_per_datum += INV_ICM20602_BYTES_PER_TEMP_SENSOR; + /* * read fifo_count register to know how many bytes are inside the FIFO * right now @@ -216,6 +216,18 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p) if (result) goto end_session; fifo_count = get_unaligned_be16(&data[0]); + + /* + * Handle fifo overflow by resetting fifo. + * Reset if there is only 3 data set free remaining to mitigate + * possible delay between reading fifo count and fifo data. + */ + nb = 3 * bytes_per_datum; + if (fifo_count >= st->hw->fifo_size - nb) { + dev_warn(regmap_get_device(st->map), "fifo overflow reset\n"); + goto flush_fifo; + } + /* compute and process all complete datum */ nb = fifo_count / bytes_per_datum; inv_mpu6050_update_period(st, pf->timestamp, nb); diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c index 227f50afff22f9c5329aeca129364e8770088dfb..a112c3f45f748fc930714c64bb422fb9743d1d07 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c @@ -31,9 +31,14 @@ static int inv_mpu_i2c_disable(struct iio_dev *indio_dev) if (ret) return ret; - st->chip_config.user_ctrl |= INV_MPU6050_BIT_I2C_IF_DIS; - ret = regmap_write(st->map, st->reg->user_ctrl, - st->chip_config.user_ctrl); + if (st->reg->i2c_if) { + ret = regmap_write(st->map, st->reg->i2c_if, + INV_ICM20602_BIT_I2C_IF_DIS); + } else { + st->chip_config.user_ctrl |= INV_MPU6050_BIT_I2C_IF_DIS; + ret = regmap_write(st->map, st->reg->user_ctrl, + st->chip_config.user_ctrl); + } if (ret) { inv_mpu6050_set_power_itg(st, false); return ret; @@ -81,6 +86,7 @@ static const struct spi_device_id inv_mpu_id[] = { {"mpu9250", INV_MPU9250}, {"mpu9255", INV_MPU9255}, {"icm20608", INV_ICM20608}, + {"icm20602", INV_ICM20602}, {} }; diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index cd5bfe39591bb2b2d44b3848cc2d84ef2d4a38f2..dadd921a4a30fdb527faf9e0b8e359ba6fa61bc0 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -320,9 +320,8 @@ static int iio_scan_mask_set(struct iio_dev *indio_dev, const unsigned long *mask; unsigned long *trialmask; - trialmask = kmalloc_array(BITS_TO_LONGS(indio_dev->masklength), - sizeof(*trialmask), - GFP_KERNEL); + trialmask = kcalloc(BITS_TO_LONGS(indio_dev->masklength), + sizeof(*trialmask), GFP_KERNEL); if (trialmask == NULL) return -ENOMEM; if (!indio_dev->masklength) { diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index a062cfddc5af76da66a14bcf4178ee8487925099..49d4b4f1a4574771f63e6db6e2f2815d6f9a70f2 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -1735,10 +1735,10 @@ EXPORT_SYMBOL(__iio_device_register); **/ void iio_device_unregister(struct iio_dev *indio_dev) { - mutex_lock(&indio_dev->info_exist_lock); - cdev_device_del(&indio_dev->chrdev, &indio_dev->dev); + mutex_lock(&indio_dev->info_exist_lock); + iio_device_unregister_debugfs(indio_dev); iio_disable_all_buffers(indio_dev); diff --git a/drivers/iio/light/bh1750.c b/drivers/iio/light/bh1750.c index a814828e69f5c3c47de3c3a95a8131d042dbbec5..5f5d54ce882b0c519c87210155186199d480d481 100644 --- a/drivers/iio/light/bh1750.c +++ b/drivers/iio/light/bh1750.c @@ -62,9 +62,9 @@ struct bh1750_chip_info { u16 int_time_low_mask; u16 int_time_high_mask; -} +}; -static const bh1750_chip_info_tbl[] = { +static const struct bh1750_chip_info bh1750_chip_info_tbl[] = { [BH1710] = { 140, 1022, 300, 400, 250000000, 2, 0x001F, 0x03E0 }, [BH1721] = { 140, 1020, 300, 400, 250000000, 2, 0x0010, 0x03E0 }, [BH1750] = { 31, 254, 69, 1740, 57500000, 1, 0x001F, 0x00E0 }, diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c index 406caaee9a3c54b6da43a4b697f302a165be927a..94f33250ba5a671d41ee193d4bcbde069ab98d1d 100644 --- a/drivers/iio/light/hid-sensor-als.c +++ b/drivers/iio/light/hid-sensor-als.c @@ -93,6 +93,7 @@ static int als_read_raw(struct iio_dev *indio_dev, int report_id = -1; u32 address; int ret_type; + s32 min; *val = 0; *val2 = 0; @@ -102,8 +103,8 @@ static int als_read_raw(struct iio_dev *indio_dev, case CHANNEL_SCAN_INDEX_INTENSITY: case CHANNEL_SCAN_INDEX_ILLUM: report_id = als_state->als_illum.report_id; - address = - HID_USAGE_SENSOR_LIGHT_ILLUM; + min = als_state->als_illum.logical_minimum; + address = HID_USAGE_SENSOR_LIGHT_ILLUM; break; default: report_id = -1; @@ -116,7 +117,8 @@ static int als_read_raw(struct iio_dev *indio_dev, als_state->common_attributes.hsdev, HID_USAGE_SENSOR_ALS, address, report_id, - SENSOR_HUB_SYNC); + SENSOR_HUB_SYNC, + min < 0); hid_sensor_power_state(&als_state->common_attributes, false); } else { diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c index 45107f7537b5d8e0911f381c0ef2289d19b7730e..cf5a0c242609d4f53573a10c269ac3fe6c64a36f 100644 --- a/drivers/iio/light/hid-sensor-prox.c +++ b/drivers/iio/light/hid-sensor-prox.c @@ -73,6 +73,7 @@ static int prox_read_raw(struct iio_dev *indio_dev, int report_id = -1; u32 address; int ret_type; + s32 min; *val = 0; *val2 = 0; @@ -81,8 +82,8 @@ static int prox_read_raw(struct iio_dev *indio_dev, switch (chan->scan_index) { case CHANNEL_SCAN_INDEX_PRESENCE: report_id = prox_state->prox_attr.report_id; - address = - HID_USAGE_SENSOR_HUMAN_PRESENCE; + min = prox_state->prox_attr.logical_minimum; + address = HID_USAGE_SENSOR_HUMAN_PRESENCE; break; default: report_id = -1; @@ -95,7 +96,8 @@ static int prox_read_raw(struct iio_dev *indio_dev, prox_state->common_attributes.hsdev, HID_USAGE_SENSOR_PROX, address, report_id, - SENSOR_HUB_SYNC); + SENSOR_HUB_SYNC, + min < 0); hid_sensor_power_state(&prox_state->common_attributes, false); } else { diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c index 54d88b60e30359be56561767c668fbd3a46e2969..f9d13e4ec1083191bc482cf230ae2ebc20d14ee9 100644 --- a/drivers/iio/light/opt3001.c +++ b/drivers/iio/light/opt3001.c @@ -694,6 +694,7 @@ static irqreturn_t opt3001_irq(int irq, void *_iio) struct iio_dev *iio = _iio; struct opt3001 *opt = iio_priv(iio); int ret; + bool wake_result_ready_queue = false; if (!opt->ok_to_ignore_lock) mutex_lock(&opt->lock); @@ -728,13 +729,16 @@ static irqreturn_t opt3001_irq(int irq, void *_iio) } opt->result = ret; opt->result_ready = true; - wake_up(&opt->result_ready_queue); + wake_result_ready_queue = true; } out: if (!opt->ok_to_ignore_lock) mutex_unlock(&opt->lock); + if (wake_result_ready_queue) + wake_up(&opt->result_ready_queue); + return IRQ_HANDLED; } diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c index d55c4885211ad6329b9096760a93d3e4d176921f..f3c0d41e5a8c270728789ddda6f6cec61085097a 100644 --- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c +++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c @@ -163,21 +163,23 @@ static int magn_3d_read_raw(struct iio_dev *indio_dev, int report_id = -1; u32 address; int ret_type; + s32 min; *val = 0; *val2 = 0; switch (mask) { case IIO_CHAN_INFO_RAW: hid_sensor_power_state(&magn_state->magn_flux_attributes, true); - report_id = - magn_state->magn[chan->address].report_id; + report_id = magn_state->magn[chan->address].report_id; + min = magn_state->magn[chan->address].logical_minimum; address = magn_3d_addresses[chan->address]; if (report_id >= 0) *val = sensor_hub_input_attr_get_raw_value( magn_state->magn_flux_attributes.hsdev, HID_USAGE_SENSOR_COMPASS_3D, address, report_id, - SENSOR_HUB_SYNC); + SENSOR_HUB_SYNC, + min < 0); else { *val = 0; hid_sensor_power_state( diff --git a/drivers/iio/magnetometer/hmc5843_i2c.c b/drivers/iio/magnetometer/hmc5843_i2c.c index 3de7f4426ac409d534ec4ec32b587e9a82ba53b4..86abba5827a257befbf8612e22eb5a2e693e17c0 100644 --- a/drivers/iio/magnetometer/hmc5843_i2c.c +++ b/drivers/iio/magnetometer/hmc5843_i2c.c @@ -58,8 +58,13 @@ static const struct regmap_config hmc5843_i2c_regmap_config = { static int hmc5843_i2c_probe(struct i2c_client *cli, const struct i2c_device_id *id) { + struct regmap *regmap = devm_regmap_init_i2c(cli, + &hmc5843_i2c_regmap_config); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + return hmc5843_common_probe(&cli->dev, - devm_regmap_init_i2c(cli, &hmc5843_i2c_regmap_config), + regmap, id->driver_data, id->name); } diff --git a/drivers/iio/magnetometer/hmc5843_spi.c b/drivers/iio/magnetometer/hmc5843_spi.c index 535f03a70d630f7f37b0fda73c3e39da92c28c73..79b2b707f90e702d72c55f3165d42a5f8abe122f 100644 --- a/drivers/iio/magnetometer/hmc5843_spi.c +++ b/drivers/iio/magnetometer/hmc5843_spi.c @@ -58,6 +58,7 @@ static const struct regmap_config hmc5843_spi_regmap_config = { static int hmc5843_spi_probe(struct spi_device *spi) { int ret; + struct regmap *regmap; const struct spi_device_id *id = spi_get_device_id(spi); spi->mode = SPI_MODE_3; @@ -67,8 +68,12 @@ static int hmc5843_spi_probe(struct spi_device *spi) if (ret) return ret; + regmap = devm_regmap_init_spi(spi, &hmc5843_spi_regmap_config); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + return hmc5843_common_probe(&spi->dev, - devm_regmap_init_spi(spi, &hmc5843_spi_regmap_config), + regmap, id->driver_data, id->name); } diff --git a/drivers/iio/magnetometer/st_magn_buffer.c b/drivers/iio/magnetometer/st_magn_buffer.c index 0a9e8fadfa9de8a66dd3d1fee8d82f81cc6b10a4..37ab3056646497fd67280df4a7235917e07772f7 100644 --- a/drivers/iio/magnetometer/st_magn_buffer.c +++ b/drivers/iio/magnetometer/st_magn_buffer.c @@ -30,11 +30,6 @@ int st_magn_trig_set_state(struct iio_trigger *trig, bool state) return st_sensors_set_dataready_irq(indio_dev, state); } -static int st_magn_buffer_preenable(struct iio_dev *indio_dev) -{ - return st_sensors_set_enable(indio_dev, true); -} - static int st_magn_buffer_postenable(struct iio_dev *indio_dev) { int err; @@ -50,7 +45,7 @@ static int st_magn_buffer_postenable(struct iio_dev *indio_dev) if (err < 0) goto st_magn_buffer_postenable_error; - return err; + return st_sensors_set_enable(indio_dev, true); st_magn_buffer_postenable_error: kfree(mdata->buffer_data); @@ -63,11 +58,11 @@ static int st_magn_buffer_predisable(struct iio_dev *indio_dev) int err; struct st_sensor_data *mdata = iio_priv(indio_dev); - err = iio_triggered_buffer_predisable(indio_dev); + err = st_sensors_set_enable(indio_dev, false); if (err < 0) goto st_magn_buffer_predisable_error; - err = st_sensors_set_enable(indio_dev, false); + err = iio_triggered_buffer_predisable(indio_dev); st_magn_buffer_predisable_error: kfree(mdata->buffer_data); @@ -75,7 +70,6 @@ static int st_magn_buffer_predisable(struct iio_dev *indio_dev) } static const struct iio_buffer_setup_ops st_magn_buffer_setup_ops = { - .preenable = &st_magn_buffer_preenable, .postenable = &st_magn_buffer_postenable, .predisable = &st_magn_buffer_predisable, }; diff --git a/drivers/iio/orientation/hid-sensor-incl-3d.c b/drivers/iio/orientation/hid-sensor-incl-3d.c index 1e5451d1ff884b15514c92e624c0beec4aaaa924..bdc5e4554ee484cfc149f5d17a392853d9b754cf 100644 --- a/drivers/iio/orientation/hid-sensor-incl-3d.c +++ b/drivers/iio/orientation/hid-sensor-incl-3d.c @@ -111,21 +111,23 @@ static int incl_3d_read_raw(struct iio_dev *indio_dev, int report_id = -1; u32 address; int ret_type; + s32 min; *val = 0; *val2 = 0; switch (mask) { case IIO_CHAN_INFO_RAW: hid_sensor_power_state(&incl_state->common_attributes, true); - report_id = - incl_state->incl[chan->scan_index].report_id; + report_id = incl_state->incl[chan->scan_index].report_id; + min = incl_state->incl[chan->scan_index].logical_minimum; address = incl_3d_addresses[chan->scan_index]; if (report_id >= 0) *val = sensor_hub_input_attr_get_raw_value( incl_state->common_attributes.hsdev, HID_USAGE_SENSOR_INCLINOMETER_3D, address, report_id, - SENSOR_HUB_SYNC); + SENSOR_HUB_SYNC, + min < 0); else { hid_sensor_power_state(&incl_state->common_attributes, false); diff --git a/drivers/iio/pressure/hid-sensor-press.c b/drivers/iio/pressure/hid-sensor-press.c index 4c437918f1d282f55661213a098e9d0b6ca9049a..d7b1c00ceb4da30cc4f9dae2848206ecf7411a4d 100644 --- a/drivers/iio/pressure/hid-sensor-press.c +++ b/drivers/iio/pressure/hid-sensor-press.c @@ -77,6 +77,7 @@ static int press_read_raw(struct iio_dev *indio_dev, int report_id = -1; u32 address; int ret_type; + s32 min; *val = 0; *val2 = 0; @@ -85,8 +86,8 @@ static int press_read_raw(struct iio_dev *indio_dev, switch (chan->scan_index) { case CHANNEL_SCAN_INDEX_PRESSURE: report_id = press_state->press_attr.report_id; - address = - HID_USAGE_SENSOR_ATMOSPHERIC_PRESSURE; + min = press_state->press_attr.logical_minimum; + address = HID_USAGE_SENSOR_ATMOSPHERIC_PRESSURE; break; default: report_id = -1; @@ -99,7 +100,8 @@ static int press_read_raw(struct iio_dev *indio_dev, press_state->common_attributes.hsdev, HID_USAGE_SENSOR_PRESSURE, address, report_id, - SENSOR_HUB_SYNC); + SENSOR_HUB_SYNC, + min < 0); hid_sensor_power_state(&press_state->common_attributes, false); } else { diff --git a/drivers/iio/proximity/srf04.c b/drivers/iio/proximity/srf04.c index 09c7b9c095b07f4aaf24a15b89a02ab2d29ceb98..0428a0dfcbd4a99ea34710c12f1cb7802a126901 100644 --- a/drivers/iio/proximity/srf04.c +++ b/drivers/iio/proximity/srf04.c @@ -105,7 +105,7 @@ static int srf04_read(struct srf04_data *data) udelay(10); gpiod_set_value(data->gpiod_trig, 0); - /* it cannot take more than 20 ms */ + /* it should not take more than 20 ms until echo is rising */ ret = wait_for_completion_killable_timeout(&data->rising, HZ/50); if (ret < 0) { mutex_unlock(&data->lock); @@ -115,7 +115,8 @@ static int srf04_read(struct srf04_data *data) return -ETIMEDOUT; } - ret = wait_for_completion_killable_timeout(&data->falling, HZ/50); + /* it cannot take more than 50 ms until echo is falling */ + ret = wait_for_completion_killable_timeout(&data->falling, HZ/20); if (ret < 0) { mutex_unlock(&data->lock); return ret; @@ -130,19 +131,19 @@ static int srf04_read(struct srf04_data *data) dt_ns = ktime_to_ns(ktime_dt); /* - * measuring more than 3 meters is beyond the capabilities of - * the sensor + * measuring more than 6,45 meters is beyond the capabilities of + * the supported sensors * ==> filter out invalid results for not measuring echos of * another us sensor * * formula: - * distance 3 m - * time = ---------- = --------- = 9404389 ns - * speed 319 m/s + * distance 6,45 * 2 m + * time = ---------- = ------------ = 40438871 ns + * speed 319 m/s * * using a minimum speed at -20 °C of 319 m/s */ - if (dt_ns > 9404389) + if (dt_ns > 40438871) return -EIO; time_ns = dt_ns; @@ -154,20 +155,20 @@ static int srf04_read(struct srf04_data *data) * with Temp in °C * and speed in m/s * - * use 343 m/s as ultrasonic speed at 20 °C here in absence of the + * use 343,5 m/s as ultrasonic speed at 20 °C here in absence of the * temperature * * therefore: - * time 343 - * distance = ------ * ----- - * 10^6 2 + * time 343,5 time * 106 + * distance = ------ * ------- = ------------ + * 10^6 2 617176 * with time in ns * and distance in mm (one way) * - * because we limit to 3 meters the multiplication with 343 just + * because we limit to 6,45 meters the multiplication with 106 just * fits into 32 bit */ - distance_mm = time_ns * 343 / 2000000; + distance_mm = time_ns * 106 / 617176; return distance_mm; } diff --git a/drivers/iio/temperature/hid-sensor-temperature.c b/drivers/iio/temperature/hid-sensor-temperature.c index beaf6fd3e337c6f962bb3dbcd49a482150ca64bf..b592fc4f007e417c0b57437a8f55064cf5d55c7a 100644 --- a/drivers/iio/temperature/hid-sensor-temperature.c +++ b/drivers/iio/temperature/hid-sensor-temperature.c @@ -76,7 +76,8 @@ static int temperature_read_raw(struct iio_dev *indio_dev, HID_USAGE_SENSOR_TEMPERATURE, HID_USAGE_SENSOR_DATA_ENVIRONMENTAL_TEMPERATURE, temp_st->temperature_attr.report_id, - SENSOR_HUB_SYNC); + SENSOR_HUB_SYNC, + temp_st->temperature_attr.logical_minimum < 0); hid_sensor_power_state( &temp_st->common_attributes, false); diff --git a/drivers/iio/temperature/mlx90632.c b/drivers/iio/temperature/mlx90632.c index 9851311aa3fdc95795f4bcd75adb12583ade858d..2d54d9cac61dc0d08c0619662484d10f46c2b4d4 100644 --- a/drivers/iio/temperature/mlx90632.c +++ b/drivers/iio/temperature/mlx90632.c @@ -81,6 +81,8 @@ /* Magic constants */ #define MLX90632_ID_MEDICAL 0x0105 /* EEPROM DSPv5 Medical device id */ #define MLX90632_ID_CONSUMER 0x0205 /* EEPROM DSPv5 Consumer device id */ +#define MLX90632_DSP_VERSION 5 /* DSP version */ +#define MLX90632_DSP_MASK GENMASK(7, 0) /* DSP version in EE_VERSION */ #define MLX90632_RESET_CMD 0x0006 /* Reset sensor (address or global) */ #define MLX90632_REF_12 12LL /**< ResCtrlRef value of Ch 1 or Ch 2 */ #define MLX90632_REF_3 12LL /**< ResCtrlRef value of Channel 3 */ @@ -666,10 +668,13 @@ static int mlx90632_probe(struct i2c_client *client, } else if (read == MLX90632_ID_CONSUMER) { dev_dbg(&client->dev, "Detected Consumer EEPROM calibration %x\n", read); + } else if ((read & MLX90632_DSP_MASK) == MLX90632_DSP_VERSION) { + dev_dbg(&client->dev, + "Detected Unknown EEPROM calibration %x\n", read); } else { dev_err(&client->dev, - "EEPROM version mismatch %x (expected %x or %x)\n", - read, MLX90632_ID_CONSUMER, MLX90632_ID_MEDICAL); + "Wrong DSP version %x (expected %x)\n", + read, MLX90632_DSP_VERSION); return -EPROTONOSUPPORT; } diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 46b855a42884c89c5fdbb0988b29e1193f491326..8282d14439a35e5c7d11d35a9284e1ffd81dc051 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -408,16 +408,15 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, struct flowi6 fl6; struct dst_entry *dst; struct rt6_info *rt; - int ret; memset(&fl6, 0, sizeof fl6); fl6.daddr = dst_in->sin6_addr; fl6.saddr = src_in->sin6_addr; fl6.flowi6_oif = addr->bound_dev_if; - ret = ipv6_stub->ipv6_dst_lookup(addr->net, NULL, &dst, &fl6); - if (ret < 0) - return ret; + dst = ipv6_stub->ipv6_dst_lookup_flow(addr->net, NULL, &fl6, NULL); + if (IS_ERR(dst)) + return PTR_ERR(dst); rt = (struct rt6_info *)dst; if (ipv6_addr_any(&src_in->sin6_addr)) { @@ -716,22 +715,22 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid, struct rdma_dev_addr dev_addr; struct resolve_cb_context ctx; union { - struct sockaddr _sockaddr; struct sockaddr_in _sockaddr_in; struct sockaddr_in6 _sockaddr_in6; } sgid_addr, dgid_addr; int ret; - rdma_gid2ip(&sgid_addr._sockaddr, sgid); - rdma_gid2ip(&dgid_addr._sockaddr, dgid); + rdma_gid2ip((struct sockaddr *)&sgid_addr, sgid); + rdma_gid2ip((struct sockaddr *)&dgid_addr, dgid); memset(&dev_addr, 0, sizeof(dev_addr)); dev_addr.bound_dev_if = ndev->ifindex; dev_addr.net = &init_net; init_completion(&ctx.comp); - ret = rdma_resolve_ip(&sgid_addr._sockaddr, &dgid_addr._sockaddr, - &dev_addr, 1000, resolve_cb, &ctx); + ret = rdma_resolve_ip((struct sockaddr *)&sgid_addr, + (struct sockaddr *)&dgid_addr, &dev_addr, 1000, + resolve_cb, &ctx); if (ret) return ret; diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 3208ad6ad54014776cd333f4ac068c56cc737484..99e321c72e753572099783cd9e2249430aaab7b3 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -216,10 +216,6 @@ static void free_gid_entry_locked(struct ib_gid_table_entry *entry) device->name, port_num, entry->attr.index, entry->attr.gid.raw); - if (rdma_cap_roce_gid_table(device, port_num) && - entry->state != GID_TABLE_ENTRY_INVALID) - device->del_gid(&entry->attr, &entry->context); - write_lock_irq(&table->rwlock); /* @@ -367,6 +363,9 @@ static void del_gid(struct ib_device *ib_dev, u8 port, table->data_vec[ix] = NULL; write_unlock_irq(&table->rwlock); + if (rdma_cap_roce_gid_table(ib_dev, port)) + ib_dev->del_gid(&entry->attr, &entry->context); + put_gid_entry_locked(entry); } diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 6e39c27dca8ec1cf8205d4180609ea994f8d88ed..4c533275d1f206f44bd4daabc8343976aa6bb828 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -3292,8 +3292,11 @@ static int cm_lap_handler(struct cm_work *work) if (ret) goto unlock; - cm_init_av_by_path(param->alternate_path, NULL, &cm_id_priv->alt_av, - cm_id_priv); + ret = cm_init_av_by_path(param->alternate_path, NULL, + &cm_id_priv->alt_av, cm_id_priv); + if (ret) + goto unlock; + cm_id_priv->id.lap_state = IB_CM_LAP_RCVD; cm_id_priv->tid = lap_msg->hdr.tid; ret = atomic_inc_and_test(&cm_id_priv->work_count); diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index a36c94930c31de8a03652cbf07844c10bcc09877..be3a4a8da9a90e239bcc8cc4fca35288e5a3b020 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -1078,18 +1078,31 @@ static inline bool cma_any_addr(const struct sockaddr *addr) return cma_zero_addr(addr) || cma_loopback_addr(addr); } -static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst) +static int cma_addr_cmp(const struct sockaddr *src, const struct sockaddr *dst) { if (src->sa_family != dst->sa_family) return -1; switch (src->sa_family) { case AF_INET: - return ((struct sockaddr_in *) src)->sin_addr.s_addr != - ((struct sockaddr_in *) dst)->sin_addr.s_addr; - case AF_INET6: - return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr, - &((struct sockaddr_in6 *) dst)->sin6_addr); + return ((struct sockaddr_in *)src)->sin_addr.s_addr != + ((struct sockaddr_in *)dst)->sin_addr.s_addr; + case AF_INET6: { + struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *)src; + struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *)dst; + bool link_local; + + if (ipv6_addr_cmp(&src_addr6->sin6_addr, + &dst_addr6->sin6_addr)) + return 1; + link_local = ipv6_addr_type(&dst_addr6->sin6_addr) & + IPV6_ADDR_LINKLOCAL; + /* Link local must match their scope_ids */ + return link_local ? (src_addr6->sin6_scope_id != + dst_addr6->sin6_scope_id) : + 0; + } + default: return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr, &((struct sockaddr_ib *) dst)->sib_addr); @@ -1630,6 +1643,14 @@ static void cma_cancel_operation(struct rdma_id_private *id_priv, { switch (state) { case RDMA_CM_ADDR_QUERY: + /* + * We can avoid doing the rdma_addr_cancel() based on state, + * only RDMA_CM_ADDR_QUERY has a work that could still execute. + * Notice that the addr_handler work could still be exiting + * outside this state, however due to the interaction with the + * handler_mutex the work is guaranteed not to touch id_priv + * during exit. + */ rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); break; case RDMA_CM_ROUTE_QUERY: @@ -1710,8 +1731,8 @@ void rdma_destroy_id(struct rdma_cm_id *id) mutex_lock(&id_priv->handler_mutex); mutex_unlock(&id_priv->handler_mutex); + rdma_restrack_del(&id_priv->res); if (id_priv->cma_dev) { - rdma_restrack_del(&id_priv->res); if (rdma_cap_ib_cm(id_priv->id.device, 1)) { if (id_priv->cm_id.ib) ib_destroy_cm_id(id_priv->cm_id.ib); @@ -2257,9 +2278,10 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, conn_id->cm_id.iw = NULL; cma_exch(conn_id, RDMA_CM_DESTROYING); mutex_unlock(&conn_id->handler_mutex); + mutex_unlock(&listen_id->handler_mutex); cma_deref_id(conn_id); rdma_destroy_id(&conn_id->id); - goto out; + return ret; } mutex_unlock(&conn_id->handler_mutex); @@ -2533,7 +2555,8 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms) cma_init_resolve_route_work(work, id_priv); - route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); + if (!route->path_rec) + route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); if (!route->path_rec) { ret = -ENOMEM; goto err1; @@ -2854,13 +2877,22 @@ static void addr_handler(int status, struct sockaddr *src_addr, { struct rdma_id_private *id_priv = context; struct rdma_cm_event event = {}; + struct sockaddr *addr; + struct sockaddr_storage old_addr; mutex_lock(&id_priv->handler_mutex); if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_RESOLVED)) goto out; - memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr)); + /* + * Store the previous src address, so that if we fail to acquire + * matching rdma device, old address can be restored back, which helps + * to cancel the cma listen operation correctly. + */ + addr = cma_src_addr(id_priv); + memcpy(&old_addr, addr, rdma_addr_size(addr)); + memcpy(addr, src_addr, rdma_addr_size(src_addr)); if (!status && !id_priv->cma_dev) { status = cma_acquire_dev(id_priv, NULL); if (status) @@ -2871,6 +2903,8 @@ static void addr_handler(int status, struct sockaddr *src_addr, } if (status) { + memcpy(addr, &old_addr, + rdma_addr_size((struct sockaddr *)&old_addr)); if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ADDR_BOUND)) goto out; @@ -2944,70 +2978,6 @@ static int cma_resolve_ib_addr(struct rdma_id_private *id_priv) return ret; } -static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, - const struct sockaddr *dst_addr) -{ - if (!src_addr || !src_addr->sa_family) { - src_addr = (struct sockaddr *) &id->route.addr.src_addr; - src_addr->sa_family = dst_addr->sa_family; - if (IS_ENABLED(CONFIG_IPV6) && - dst_addr->sa_family == AF_INET6) { - struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr; - struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr; - src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; - if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) - id->route.addr.dev_addr.bound_dev_if = dst_addr6->sin6_scope_id; - } else if (dst_addr->sa_family == AF_IB) { - ((struct sockaddr_ib *) src_addr)->sib_pkey = - ((struct sockaddr_ib *) dst_addr)->sib_pkey; - } - } - return rdma_bind_addr(id, src_addr); -} - -int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, - const struct sockaddr *dst_addr, int timeout_ms) -{ - struct rdma_id_private *id_priv; - int ret; - - id_priv = container_of(id, struct rdma_id_private, id); - if (id_priv->state == RDMA_CM_IDLE) { - ret = cma_bind_addr(id, src_addr, dst_addr); - if (ret) - return ret; - } - - if (cma_family(id_priv) != dst_addr->sa_family) - return -EINVAL; - - if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) - return -EINVAL; - - memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr)); - atomic_inc(&id_priv->refcount); - if (cma_any_addr(dst_addr)) { - ret = cma_resolve_loopback(id_priv); - } else { - if (dst_addr->sa_family == AF_IB) { - ret = cma_resolve_ib_addr(id_priv); - } else { - ret = rdma_resolve_ip(cma_src_addr(id_priv), - dst_addr, &id->route.addr.dev_addr, - timeout_ms, addr_handler, id_priv); - } - } - if (ret) - goto err; - - return 0; -err: - cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); - cma_deref_id(id_priv); - return ret; -} -EXPORT_SYMBOL(rdma_resolve_addr); - int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse) { struct rdma_id_private *id_priv; @@ -3390,27 +3360,26 @@ int rdma_listen(struct rdma_cm_id *id, int backlog) } EXPORT_SYMBOL(rdma_listen); -int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) +static int rdma_bind_addr_dst(struct rdma_id_private *id_priv, + struct sockaddr *addr, const struct sockaddr *daddr) { - struct rdma_id_private *id_priv; + struct sockaddr *id_daddr; int ret; - struct sockaddr *daddr; if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 && addr->sa_family != AF_IB) return -EAFNOSUPPORT; - id_priv = container_of(id, struct rdma_id_private, id); if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND)) return -EINVAL; - ret = cma_check_linklocal(&id->route.addr.dev_addr, addr); + ret = cma_check_linklocal(&id_priv->id.route.addr.dev_addr, addr); if (ret) goto err1; memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr)); if (!cma_any_addr(addr)) { - ret = cma_translate_addr(addr, &id->route.addr.dev_addr); + ret = cma_translate_addr(addr, &id_priv->id.route.addr.dev_addr); if (ret) goto err1; @@ -3430,8 +3399,10 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) } #endif } - daddr = cma_dst_addr(id_priv); - daddr->sa_family = addr->sa_family; + id_daddr = cma_dst_addr(id_priv); + if (daddr != id_daddr) + memcpy(id_daddr, daddr, rdma_addr_size(addr)); + id_daddr->sa_family = addr->sa_family; ret = cma_get_port(id_priv); if (ret) @@ -3439,14 +3410,102 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) return 0; err2: - if (id_priv->cma_dev) { - rdma_restrack_del(&id_priv->res); + rdma_restrack_del(&id_priv->res); + if (id_priv->cma_dev) cma_release_dev(id_priv); - } err1: cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE); return ret; } + +static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, + const struct sockaddr *dst_addr) +{ + struct rdma_id_private *id_priv = + container_of(id, struct rdma_id_private, id); + + if (!src_addr || !src_addr->sa_family) { + src_addr = (struct sockaddr *) &id->route.addr.src_addr; + src_addr->sa_family = dst_addr->sa_family; + if (IS_ENABLED(CONFIG_IPV6) && + dst_addr->sa_family == AF_INET6) { + struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr; + struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr; + src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; + if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) + id->route.addr.dev_addr.bound_dev_if = dst_addr6->sin6_scope_id; + } else if (dst_addr->sa_family == AF_IB) { + ((struct sockaddr_ib *) src_addr)->sib_pkey = + ((struct sockaddr_ib *) dst_addr)->sib_pkey; + } + } + return rdma_bind_addr_dst(id_priv, src_addr, dst_addr); +} + +int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, + const struct sockaddr *dst_addr, int timeout_ms) +{ + struct rdma_id_private *id_priv; + int ret; + + id_priv = container_of(id, struct rdma_id_private, id); + if (id_priv->state == RDMA_CM_IDLE) { + ret = cma_bind_addr(id, src_addr, dst_addr); + if (ret) + return ret; + } + + if (cma_family(id_priv) != dst_addr->sa_family) + return -EINVAL; + + if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) + return -EINVAL; + + atomic_inc(&id_priv->refcount); + if (cma_any_addr(dst_addr)) { + ret = cma_resolve_loopback(id_priv); + } else { + if (dst_addr->sa_family == AF_IB) { + ret = cma_resolve_ib_addr(id_priv); + } else { + /* + * The FSM can return back to RDMA_CM_ADDR_BOUND after + * rdma_resolve_ip() is called, eg through the error + * path in addr_handler(). If this happens the existing + * request must be canceled before issuing a new one. + * Since canceling a request is a bit slow and this + * oddball path is rare, keep track once a request has + * been issued. The track turns out to be a permanent + * state since this is the only cancel as it is + * immediately before rdma_resolve_ip(). + */ + if (id_priv->used_resolve_ip) + rdma_addr_cancel(&id->route.addr.dev_addr); + else + id_priv->used_resolve_ip = 1; + ret = rdma_resolve_ip(cma_src_addr(id_priv), + dst_addr, &id->route.addr.dev_addr, + timeout_ms, addr_handler, id_priv); + } + } + if (ret) + goto err; + + return 0; +err: + cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); + cma_deref_id(id_priv); + return ret; +} +EXPORT_SYMBOL(rdma_resolve_addr); + +int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) +{ + struct rdma_id_private *id_priv = + container_of(id, struct rdma_id_private, id); + + return rdma_bind_addr_dst(id_priv, addr, cma_dst_addr(id_priv)); +} EXPORT_SYMBOL(rdma_bind_addr); static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv) @@ -4634,6 +4693,7 @@ static int __init cma_init(void) err: unregister_netdevice_notifier(&cma_nb); ib_sa_unregister_client(&sa_client); + unregister_pernet_subsys(&cma_pernet_operations); err_wq: destroy_workqueue(cma_wq); return ret; diff --git a/drivers/infiniband/core/cma_priv.h b/drivers/infiniband/core/cma_priv.h index 194cfe78c4475d7d3a3c18a08cdeec2cf15bb66a..e2e0bc89d847a1e361e488ee03f47f9f2630e680 100644 --- a/drivers/infiniband/core/cma_priv.h +++ b/drivers/infiniband/core/cma_priv.h @@ -87,6 +87,7 @@ struct rdma_id_private { bool tos_set; u8 reuseaddr; u8 afonly; + u8 used_resolve_ip; enum ib_gid_type gid_type; /* diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index 77c7005c396c53feb243c423ba989c9095001277..22b337a8bcc4943c538b9b325191884d688eb95a 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h @@ -295,6 +295,7 @@ static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map, #endif struct ib_device *ib_device_get_by_index(u32 ifindex); +void ib_device_put(struct ib_device *device); /* RDMA device netlink */ void nldev_init(void); void nldev_exit(void); diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c index af5ad6a56ae404d1cd2aae64f95e59d7ccded0ac..9271f72900052aa7007727364c4746f7e94675f3 100644 --- a/drivers/infiniband/core/cq.c +++ b/drivers/infiniband/core/cq.c @@ -112,12 +112,12 @@ static void ib_cq_poll_work(struct work_struct *work) IB_POLL_BATCH); if (completed >= IB_POLL_BUDGET_WORKQUEUE || ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) - queue_work(ib_comp_wq, &cq->work); + queue_work(cq->comp_wq, &cq->work); } static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private) { - queue_work(ib_comp_wq, &cq->work); + queue_work(cq->comp_wq, &cq->work); } /** @@ -175,9 +175,12 @@ struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); break; case IB_POLL_WORKQUEUE: + case IB_POLL_UNBOUND_WORKQUEUE: cq->comp_handler = ib_cq_completion_workqueue; INIT_WORK(&cq->work, ib_cq_poll_work); ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); + cq->comp_wq = (cq->poll_ctx == IB_POLL_WORKQUEUE) ? + ib_comp_wq : ib_comp_unbound_wq; break; default: ret = -EINVAL; @@ -213,6 +216,7 @@ void ib_free_cq(struct ib_cq *cq) irq_poll_disable(&cq->iop); break; case IB_POLL_WORKQUEUE: + case IB_POLL_UNBOUND_WORKQUEUE: cancel_work_sync(&cq->work); break; default: diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index db3b6271f09d5949a88c5f5a3ffab2d46fecb7f5..92ab95383d00f7f1ce0ec5af89b736cccf756d71 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -61,6 +61,7 @@ struct ib_client_data { }; struct workqueue_struct *ib_comp_wq; +struct workqueue_struct *ib_comp_unbound_wq; struct workqueue_struct *ib_wq; EXPORT_SYMBOL_GPL(ib_wq); @@ -143,7 +144,8 @@ static struct ib_device *__ib_device_get_by_index(u32 index) } /* - * Caller is responsible to return refrerence count by calling put_device() + * Caller must perform ib_device_put() to return the device reference count + * when ib_device_get_by_index() returns valid device pointer. */ struct ib_device *ib_device_get_by_index(u32 index) { @@ -151,13 +153,21 @@ struct ib_device *ib_device_get_by_index(u32 index) down_read(&lists_rwsem); device = __ib_device_get_by_index(index); - if (device) - get_device(&device->dev); - + if (device) { + /* Do not return a device if unregistration has started. */ + if (!refcount_inc_not_zero(&device->refcount)) + device = NULL; + } up_read(&lists_rwsem); return device; } +void ib_device_put(struct ib_device *device) +{ + if (refcount_dec_and_test(&device->refcount)) + complete(&device->unreg_completion); +} + static struct ib_device *__ib_device_get_by_name(const char *name) { struct ib_device *device; @@ -272,6 +282,8 @@ struct ib_device *ib_alloc_device(size_t size) spin_lock_init(&device->client_data_lock); INIT_LIST_HEAD(&device->client_data_list); INIT_LIST_HEAD(&device->port_list); + refcount_set(&device->refcount, 1); + init_completion(&device->unreg_completion); return device; } @@ -515,13 +527,13 @@ int ib_register_device(struct ib_device *device, ret = setup_port_pkey_list(device); if (ret) { pr_warn("Couldn't create per port_pkey_list\n"); - goto out; + goto port_cleanup; } ret = ib_cache_setup_one(device); if (ret) { pr_warn("Couldn't set up InfiniBand P_Key/GID cache\n"); - goto port_cleanup; + goto pkey_cleanup; } ret = ib_device_register_rdmacg(device); @@ -562,6 +574,8 @@ int ib_register_device(struct ib_device *device, cache_cleanup: ib_cache_cleanup_one(device); ib_cache_release_one(device); +pkey_cleanup: + kfree(device->port_pkey_list); port_cleanup: kfree(device->port_immutable); out: @@ -581,6 +595,13 @@ void ib_unregister_device(struct ib_device *device) struct ib_client_data *context, *tmp; unsigned long flags; + /* + * Wait for all netlink command callers to finish working on the + * device. + */ + ib_device_put(device); + wait_for_completion(&device->unreg_completion); + mutex_lock(&device_mutex); down_write(&lists_rwsem); @@ -598,8 +619,8 @@ void ib_unregister_device(struct ib_device *device) } up_read(&lists_rwsem); - ib_device_unregister_rdmacg(device); ib_device_unregister_sysfs(device); + ib_device_unregister_rdmacg(device); mutex_unlock(&device_mutex); @@ -1166,10 +1187,19 @@ static int __init ib_core_init(void) goto err; } + ib_comp_unbound_wq = + alloc_workqueue("ib-comp-unb-wq", + WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM | + WQ_SYSFS, WQ_UNBOUND_MAX_ACTIVE); + if (!ib_comp_unbound_wq) { + ret = -ENOMEM; + goto err_comp; + } + ret = class_register(&ib_class); if (ret) { pr_warn("Couldn't create InfiniBand device class\n"); - goto err_comp; + goto err_comp_unbound; } ret = rdma_nl_init(); @@ -1218,6 +1248,8 @@ static int __init ib_core_init(void) rdma_nl_exit(); err_sysfs: class_unregister(&ib_class); +err_comp_unbound: + destroy_workqueue(ib_comp_unbound_wq); err_comp: destroy_workqueue(ib_comp_wq); err: @@ -1236,6 +1268,7 @@ static void __exit ib_core_cleanup(void) addr_cleanup(); rdma_nl_exit(); class_unregister(&ib_class); + destroy_workqueue(ib_comp_unbound_wq); destroy_workqueue(ib_comp_wq); /* Make sure that any pending umem accounting work is done. */ destroy_workqueue(ib_wq); diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index 5d676cff41f496ce519f4dc000eda17f6fd43999..2573efba18777ed4e18c1874ee4878ffa01953f1 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c @@ -367,8 +367,10 @@ EXPORT_SYMBOL(iw_cm_disconnect); * * Clean up all resources associated with the connection and release * the initial reference taken by iw_create_cm_id. + * + * Returns true if and only if the last cm_id_priv reference has been dropped. */ -static void destroy_cm_id(struct iw_cm_id *cm_id) +static bool destroy_cm_id(struct iw_cm_id *cm_id) { struct iwcm_id_private *cm_id_priv; unsigned long flags; @@ -436,7 +438,7 @@ static void destroy_cm_id(struct iw_cm_id *cm_id) iwpm_remove_mapping(&cm_id->local_addr, RDMA_NL_IWCM); } - (void)iwcm_deref_id(cm_id_priv); + return iwcm_deref_id(cm_id_priv); } /* @@ -447,7 +449,8 @@ static void destroy_cm_id(struct iw_cm_id *cm_id) */ void iw_destroy_cm_id(struct iw_cm_id *cm_id) { - destroy_cm_id(cm_id); + if (!destroy_cm_id(cm_id)) + flush_workqueue(iwcm_wq); } EXPORT_SYMBOL(iw_destroy_cm_id); @@ -1020,7 +1023,7 @@ static void cm_work_handler(struct work_struct *_work) if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) { ret = process_event(cm_id_priv, &levent); if (ret) - destroy_cm_id(&cm_id_priv->id); + WARN_ON_ONCE(destroy_cm_id(&cm_id_priv->id)); } else pr_debug("dropping event %d\n", levent.event); if (iwcm_deref_id(cm_id_priv)) @@ -1174,7 +1177,7 @@ static int __init iw_cm_init(void) pr_err("iw_cm: couldn't init iwpm\n"); else rdma_nl_register(RDMA_NL_IWCM, iwcm_nl_cb_table); - iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", 0); + iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", WQ_MEM_RECLAIM); if (!iwcm_wq) return -ENOMEM; diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index ef459f2f2eeb859c5a7c7a4b4501e00adedd7ae1..218411282069b2a8056304a226e6a9dcaa46259d 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -223,30 +223,30 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, /* Validate parameters */ qpn = get_spl_qp_index(qp_type); if (qpn == -1) { - dev_notice(&device->dev, - "ib_register_mad_agent: invalid QP Type %d\n", - qp_type); + dev_dbg_ratelimited(&device->dev, "%s: invalid QP Type %d\n", + __func__, qp_type); goto error1; } if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) { - dev_notice(&device->dev, - "ib_register_mad_agent: invalid RMPP Version %u\n", - rmpp_version); + dev_dbg_ratelimited(&device->dev, + "%s: invalid RMPP Version %u\n", + __func__, rmpp_version); goto error1; } /* Validate MAD registration request if supplied */ if (mad_reg_req) { if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) { - dev_notice(&device->dev, - "ib_register_mad_agent: invalid Class Version %u\n", - mad_reg_req->mgmt_class_version); + dev_dbg_ratelimited(&device->dev, + "%s: invalid Class Version %u\n", + __func__, + mad_reg_req->mgmt_class_version); goto error1; } if (!recv_handler) { - dev_notice(&device->dev, - "ib_register_mad_agent: no recv_handler\n"); + dev_dbg_ratelimited(&device->dev, + "%s: no recv_handler\n", __func__); goto error1; } if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) { @@ -256,9 +256,9 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, */ if (mad_reg_req->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { - dev_notice(&device->dev, - "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n", - mad_reg_req->mgmt_class); + dev_dbg_ratelimited(&device->dev, + "%s: Invalid Mgmt Class 0x%x\n", + __func__, mad_reg_req->mgmt_class); goto error1; } } else if (mad_reg_req->mgmt_class == 0) { @@ -266,8 +266,9 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, * Class 0 is reserved in IBA and is used for * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE */ - dev_notice(&device->dev, - "ib_register_mad_agent: Invalid Mgmt Class 0\n"); + dev_dbg_ratelimited(&device->dev, + "%s: Invalid Mgmt Class 0\n", + __func__); goto error1; } else if (is_vendor_class(mad_reg_req->mgmt_class)) { /* @@ -275,18 +276,19 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, * ensure supplied OUI is not zero */ if (!is_vendor_oui(mad_reg_req->oui)) { - dev_notice(&device->dev, - "ib_register_mad_agent: No OUI specified for class 0x%x\n", - mad_reg_req->mgmt_class); + dev_dbg_ratelimited(&device->dev, + "%s: No OUI specified for class 0x%x\n", + __func__, + mad_reg_req->mgmt_class); goto error1; } } /* Make sure class supplied is consistent with RMPP */ if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) { if (rmpp_version) { - dev_notice(&device->dev, - "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n", - mad_reg_req->mgmt_class); + dev_dbg_ratelimited(&device->dev, + "%s: RMPP version for non-RMPP class 0x%x\n", + __func__, mad_reg_req->mgmt_class); goto error1; } } @@ -297,9 +299,9 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, IB_MGMT_CLASS_SUBN_LID_ROUTED) && (mad_reg_req->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { - dev_notice(&device->dev, - "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n", - mad_reg_req->mgmt_class); + dev_dbg_ratelimited(&device->dev, + "%s: Invalid SM QP type: class 0x%x\n", + __func__, mad_reg_req->mgmt_class); goto error1; } } else { @@ -307,9 +309,9 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, IB_MGMT_CLASS_SUBN_LID_ROUTED) || (mad_reg_req->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { - dev_notice(&device->dev, - "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n", - mad_reg_req->mgmt_class); + dev_dbg_ratelimited(&device->dev, + "%s: Invalid GS QP type: class 0x%x\n", + __func__, mad_reg_req->mgmt_class); goto error1; } } @@ -324,18 +326,18 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, /* Validate device and port */ port_priv = ib_get_mad_port(device, port_num); if (!port_priv) { - dev_notice(&device->dev, - "ib_register_mad_agent: Invalid port %d\n", - port_num); + dev_dbg_ratelimited(&device->dev, "%s: Invalid port %d\n", + __func__, port_num); ret = ERR_PTR(-ENODEV); goto error1; } - /* Verify the QP requested is supported. For example, Ethernet devices - * will not have QP0 */ + /* Verify the QP requested is supported. For example, Ethernet devices + * will not have QP0. + */ if (!port_priv->qp_info[qpn].qp) { - dev_notice(&device->dev, - "ib_register_mad_agent: QP %d not supported\n", qpn); + dev_dbg_ratelimited(&device->dev, "%s: QP %d not supported\n", + __func__, qpn); ret = ERR_PTR(-EPROTONOSUPPORT); goto error1; } @@ -3182,18 +3184,18 @@ static int ib_mad_port_open(struct ib_device *device, if (has_smi) cq_size *= 2; - port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0, - IB_POLL_WORKQUEUE); - if (IS_ERR(port_priv->cq)) { - dev_err(&device->dev, "Couldn't create ib_mad CQ\n"); - ret = PTR_ERR(port_priv->cq); - goto error3; - } - port_priv->pd = ib_alloc_pd(device, 0); if (IS_ERR(port_priv->pd)) { dev_err(&device->dev, "Couldn't create ib_mad PD\n"); ret = PTR_ERR(port_priv->pd); + goto error3; + } + + port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0, + IB_POLL_UNBOUND_WORKQUEUE); + if (IS_ERR(port_priv->cq)) { + dev_err(&device->dev, "Couldn't create ib_mad CQ\n"); + ret = PTR_ERR(port_priv->cq); goto error4; } @@ -3236,11 +3238,11 @@ static int ib_mad_port_open(struct ib_device *device, error7: destroy_mad_qp(&port_priv->qp_info[0]); error6: - ib_dealloc_pd(port_priv->pd); -error4: ib_free_cq(port_priv->cq); cleanup_recv_queue(&port_priv->qp_info[1]); cleanup_recv_queue(&port_priv->qp_info[0]); +error4: + ib_dealloc_pd(port_priv->pd); error3: kfree(port_priv); @@ -3270,8 +3272,8 @@ static int ib_mad_port_close(struct ib_device *device, int port_num) destroy_workqueue(port_priv->wq); destroy_mad_qp(&port_priv->qp_info[1]); destroy_mad_qp(&port_priv->qp_info[0]); - ib_dealloc_pd(port_priv->pd); ib_free_cq(port_priv->cq); + ib_dealloc_pd(port_priv->pd); cleanup_recv_queue(&port_priv->qp_info[1]); cleanup_recv_queue(&port_priv->qp_info[0]); /* XXX: Handle deallocation of MAD registration tables */ diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index 0385ab4383208b40a692f60cced8429d8486b528..6e190df8ab03c8ef816a50b6c048f7d915135f75 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -579,10 +579,6 @@ static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb, if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD)) goto err; - if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) && - nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY, - pd->unsafe_global_rkey)) - goto err; if (fill_res_name_pid(msg, res)) goto err; @@ -635,13 +631,13 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, nlmsg_end(msg, nlh); - put_device(&device->dev); + ib_device_put(device); return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); err_free: nlmsg_free(msg); err: - put_device(&device->dev); + ib_device_put(device); return err; } @@ -725,14 +721,14 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, goto err_free; nlmsg_end(msg, nlh); - put_device(&device->dev); + ib_device_put(device); return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); err_free: nlmsg_free(msg); err: - put_device(&device->dev); + ib_device_put(device); return err; } @@ -789,7 +785,7 @@ static int nldev_port_get_dumpit(struct sk_buff *skb, } out: - put_device(&device->dev); + ib_device_put(device); cb->args[0] = idx; return skb->len; } @@ -828,13 +824,13 @@ static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, goto err_free; nlmsg_end(msg, nlh); - put_device(&device->dev); + ib_device_put(device); return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); err_free: nlmsg_free(msg); err: - put_device(&device->dev); + ib_device_put(device); return ret; } @@ -1027,7 +1023,7 @@ next: idx++; if (!filled) goto err; - put_device(&device->dev); + ib_device_put(device); return skb->len; res_err: @@ -1038,7 +1034,7 @@ next: idx++; nlmsg_cancel(skb, nlh); err_index: - put_device(&device->dev); + ib_device_put(device); return ret; } diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c index 3b7fa0ccaa08a228d0d4523cb0a19a2cc96b23c2..279f0ae6591273b8e521e739bed5175039bd039b 100644 --- a/drivers/infiniband/core/restrack.c +++ b/drivers/infiniband/core/restrack.c @@ -209,7 +209,7 @@ void rdma_restrack_del(struct rdma_restrack_entry *res) struct ib_device *dev; if (!res->valid) - return; + goto out; dev = res_to_dev(res); if (!dev) @@ -222,8 +222,12 @@ void rdma_restrack_del(struct rdma_restrack_entry *res) down_write(&dev->res.rwsem); hash_del(&res->node); res->valid = false; - if (res->task) - put_task_struct(res->task); up_write(&dev->res.rwsem); + +out: + if (res->task) { + put_task_struct(res->task); + res->task = NULL; + } } EXPORT_SYMBOL(rdma_restrack_del); diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c index ee366199b169caa5a521c31e6a98e8ae6ecccbcf..558de0b9895cb979745b5002a623568bb5b21f7b 100644 --- a/drivers/infiniband/core/roce_gid_mgmt.c +++ b/drivers/infiniband/core/roce_gid_mgmt.c @@ -267,6 +267,9 @@ is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u8 port, struct net_device *cookie_ndev = cookie; bool match = false; + if (!rdma_ndev) + return false; + rcu_read_lock(); if (netif_is_bond_master(cookie_ndev) && rdma_is_upper_dev_rcu(rdma_ndev, cookie_ndev)) @@ -767,8 +770,10 @@ static int netdevice_event(struct notifier_block *this, unsigned long event, case NETDEV_CHANGEADDR: cmds[0] = netdev_del_cmd; - cmds[1] = add_default_gid_cmd; - cmds[2] = add_cmd; + if (ndev->reg_state == NETREG_REGISTERED) { + cmds[1] = add_default_gid_cmd; + cmds[2] = add_cmd; + } break; case NETDEV_CHANGEUPPER: diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 7b794a14d6e851fbc4afc82711ecdb6ecf336c61..8be082edf986fd1e4d763c69bbd06ef3b3bd8cdc 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -1232,7 +1232,6 @@ static int roce_resolve_route_from_path(struct sa_path_rec *rec, { struct rdma_dev_addr dev_addr = {}; union { - struct sockaddr _sockaddr; struct sockaddr_in _sockaddr_in; struct sockaddr_in6 _sockaddr_in6; } sgid_addr, dgid_addr; @@ -1249,12 +1248,12 @@ static int roce_resolve_route_from_path(struct sa_path_rec *rec, */ dev_addr.net = &init_net; - rdma_gid2ip(&sgid_addr._sockaddr, &rec->sgid); - rdma_gid2ip(&dgid_addr._sockaddr, &rec->dgid); + rdma_gid2ip((struct sockaddr *)&sgid_addr, &rec->sgid); + rdma_gid2ip((struct sockaddr *)&dgid_addr, &rec->dgid); /* validate the route */ - ret = rdma_resolve_ip_route(&sgid_addr._sockaddr, - &dgid_addr._sockaddr, &dev_addr); + ret = rdma_resolve_ip_route((struct sockaddr *)&sgid_addr, + (struct sockaddr *)&dgid_addr, &dev_addr); if (ret) return ret; diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c index 9b0bea8303e073c8983d2327d5fffc144b50e7ae..b79b61bd6ee4f2f256b4a5304f7c92cdfb4f87a6 100644 --- a/drivers/infiniband/core/security.c +++ b/drivers/infiniband/core/security.c @@ -711,16 +711,20 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent, agent->device->name, agent->port_num); if (ret) - return ret; + goto free_security; agent->lsm_nb.notifier_call = ib_mad_agent_security_change; ret = register_lsm_notifier(&agent->lsm_nb); if (ret) - return ret; + goto free_security; agent->smp_allowed = true; agent->lsm_nb_reg = true; return 0; + +free_security: + security_ib_free_security(agent->security); + return ret; } void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent) @@ -728,9 +732,10 @@ void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent) if (!rdma_protocol_ib(agent->device, agent->port_num)) return; - security_ib_free_security(agent->security); if (agent->lsm_nb_reg) unregister_lsm_notifier(&agent->lsm_nb); + + security_ib_free_security(agent->security); } int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index) diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 7fd14ead7b378ca4d5048e34e800af114d0088d6..ace40bb98624c6817c0864c8084e6b515d3c1fef 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -512,7 +512,7 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr, ret = get_perf_mad(p->ibdev, p->port_num, tab_attr->attr_id, &data, 40 + offset / 8, sizeof(data)); if (ret < 0) - return sprintf(buf, "N/A (no PMA)\n"); + return ret; switch (width) { case 4: @@ -1057,10 +1057,12 @@ static int add_port(struct ib_device *device, int port_num, goto err_put; } - p->pma_table = get_counter_table(device, port_num); - ret = sysfs_create_group(&p->kobj, p->pma_table); - if (ret) - goto err_put_gid_attrs; + if (device->process_mad) { + p->pma_table = get_counter_table(device, port_num); + ret = sysfs_create_group(&p->kobj, p->pma_table); + if (ret) + goto err_put_gid_attrs; + } p->gid_group.name = "gids"; p->gid_group.attrs = alloc_group_attrs(show_port_gid, attr.gid_tbl_len); @@ -1173,7 +1175,8 @@ static int add_port(struct ib_device *device, int port_num, p->gid_group.attrs = NULL; err_remove_pma: - sysfs_remove_group(&p->kobj, p->pma_table); + if (p->pma_table) + sysfs_remove_group(&p->kobj, p->pma_table); err_put_gid_attrs: kobject_put(&p->gid_attr_group->kobj); @@ -1285,7 +1288,9 @@ static void free_port_list_attributes(struct ib_device *device) kfree(port->hw_stats); free_hsag(&port->kobj, port->hw_stats_ag); } - sysfs_remove_group(p, port->pma_table); + + if (port->pma_table) + sysfs_remove_group(p, port->pma_table); sysfs_remove_group(p, &port->pkey_group); sysfs_remove_group(p, &port->gid_group); sysfs_remove_group(&port->gid_attr_group->kobj, diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 01d68ed46c1b6c530a717a7efd8866dd62dc6506..74086abd39316f17b5180eea382d5c449b680a9d 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -89,6 +89,7 @@ struct ucma_context { struct ucma_file *file; struct rdma_cm_id *cm_id; + struct mutex mutex; u64 uid; struct list_head list; @@ -215,6 +216,7 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file) init_completion(&ctx->comp); INIT_LIST_HEAD(&ctx->mc_list); ctx->file = file; + mutex_init(&ctx->mutex); mutex_lock(&mut); ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL); @@ -586,6 +588,7 @@ static int ucma_free_ctx(struct ucma_context *ctx) list_move_tail(&uevent->list, &list); } list_del(&ctx->list); + events_reported = ctx->events_reported; mutex_unlock(&ctx->file->mut); list_for_each_entry_safe(uevent, tmp, &list, list) { @@ -595,7 +598,7 @@ static int ucma_free_ctx(struct ucma_context *ctx) kfree(uevent); } - events_reported = ctx->events_reported; + mutex_destroy(&ctx->mutex); kfree(ctx); return events_reported; } @@ -665,7 +668,10 @@ static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); + mutex_unlock(&ctx->mutex); + ucma_put_ctx(ctx); return ret; } @@ -688,7 +694,9 @@ static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -712,8 +720,10 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -738,8 +748,10 @@ static ssize_t ucma_resolve_addr(struct ucma_file *file, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -759,7 +771,9 @@ static ssize_t ucma_resolve_route(struct ucma_file *file, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -848,6 +862,7 @@ static ssize_t ucma_query_route(struct ucma_file *file, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); memset(&resp, 0, sizeof resp); addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr; memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ? @@ -871,6 +886,7 @@ static ssize_t ucma_query_route(struct ucma_file *file, ucma_copy_iw_route(&resp, &ctx->cm_id->route); out: + mutex_unlock(&ctx->mutex); if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) ret = -EFAULT; @@ -1022,6 +1038,7 @@ static ssize_t ucma_query(struct ucma_file *file, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); switch (cmd.option) { case RDMA_USER_CM_QUERY_ADDR: ret = ucma_query_addr(ctx, response, out_len); @@ -1036,6 +1053,7 @@ static ssize_t ucma_query(struct ucma_file *file, ret = -ENOSYS; break; } + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; @@ -1076,7 +1094,9 @@ static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf, return PTR_ERR(ctx); ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); + mutex_lock(&ctx->mutex); ret = rdma_connect(ctx->cm_id, &conn_param); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -1097,7 +1117,9 @@ static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf, ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ? cmd.backlog : max_backlog; + mutex_lock(&ctx->mutex); ret = rdma_listen(ctx->cm_id, ctx->backlog); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -1120,13 +1142,17 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf, if (cmd.conn_param.valid) { ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); mutex_lock(&file->mut); + mutex_lock(&ctx->mutex); ret = __rdma_accept(ctx->cm_id, &conn_param, NULL); + mutex_unlock(&ctx->mutex); if (!ret) ctx->uid = cmd.uid; mutex_unlock(&file->mut); - } else + } else { + mutex_lock(&ctx->mutex); ret = __rdma_accept(ctx->cm_id, NULL, NULL); - + mutex_unlock(&ctx->mutex); + } ucma_put_ctx(ctx); return ret; } @@ -1145,7 +1171,9 @@ static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -1164,7 +1192,9 @@ static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); ret = rdma_disconnect(ctx->cm_id); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -1195,7 +1225,9 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file, resp.qp_attr_mask = 0; memset(&qp_attr, 0, sizeof qp_attr); qp_attr.qp_state = cmd.qp_state; + mutex_lock(&ctx->mutex); ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask); + mutex_unlock(&ctx->mutex); if (ret) goto out; @@ -1274,9 +1306,13 @@ static int ucma_set_ib_path(struct ucma_context *ctx, struct sa_path_rec opa; sa_convert_path_ib_to_opa(&opa, &sa_path); + mutex_lock(&ctx->mutex); ret = rdma_set_ib_path(ctx->cm_id, &opa); + mutex_unlock(&ctx->mutex); } else { + mutex_lock(&ctx->mutex); ret = rdma_set_ib_path(ctx->cm_id, &sa_path); + mutex_unlock(&ctx->mutex); } if (ret) return ret; @@ -1309,7 +1345,9 @@ static int ucma_set_option_level(struct ucma_context *ctx, int level, switch (level) { case RDMA_OPTION_ID: + mutex_lock(&ctx->mutex); ret = ucma_set_option_id(ctx, optname, optval, optlen); + mutex_unlock(&ctx->mutex); break; case RDMA_OPTION_IB: ret = ucma_set_option_ib(ctx, optname, optval, optlen); @@ -1369,8 +1407,10 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); if (ctx->cm_id->device) ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; @@ -1413,8 +1453,10 @@ static ssize_t ucma_process_join(struct ucma_file *file, mc->join_state = join_state; mc->uid = cmd->uid; memcpy(&mc->addr, addr, cmd->addr_size); + mutex_lock(&ctx->mutex); ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr, join_state, mc); + mutex_unlock(&ctx->mutex); if (ret) goto err2; @@ -1434,7 +1476,9 @@ static ssize_t ucma_process_join(struct ucma_file *file, return 0; err3: + mutex_lock(&ctx->mutex); rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr); + mutex_unlock(&ctx->mutex); ucma_cleanup_mc_events(mc); err2: mutex_lock(&mut); @@ -1518,7 +1562,10 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file, goto out; } + mutex_lock(&mc->ctx->mutex); rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr); + mutex_unlock(&mc->ctx->mutex); + mutex_lock(&mc->ctx->file->mut); ucma_cleanup_mc_events(mc); list_del(&mc->list); @@ -1535,45 +1582,15 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file, return ret; } -static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2) -{ - /* Acquire mutex's based on pointer comparison to prevent deadlock. */ - if (file1 < file2) { - mutex_lock(&file1->mut); - mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING); - } else { - mutex_lock(&file2->mut); - mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING); - } -} - -static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2) -{ - if (file1 < file2) { - mutex_unlock(&file2->mut); - mutex_unlock(&file1->mut); - } else { - mutex_unlock(&file1->mut); - mutex_unlock(&file2->mut); - } -} - -static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file) -{ - struct ucma_event *uevent, *tmp; - - list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) - if (uevent->ctx == ctx) - list_move_tail(&uevent->list, &file->event_list); -} - static ssize_t ucma_migrate_id(struct ucma_file *new_file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_migrate_id cmd; struct rdma_ucm_migrate_resp resp; + struct ucma_event *uevent, *tmp; struct ucma_context *ctx; + LIST_HEAD(event_list); struct fd f; struct ucma_file *cur_file; int ret = 0; @@ -1589,40 +1606,52 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file, ret = -EINVAL; goto file_put; } + cur_file = f.file->private_data; /* Validate current fd and prevent destruction of id. */ - ctx = ucma_get_ctx(f.file->private_data, cmd.id); + ctx = ucma_get_ctx(cur_file, cmd.id); if (IS_ERR(ctx)) { ret = PTR_ERR(ctx); goto file_put; } - cur_file = ctx->file; - if (cur_file == new_file) { - resp.events_reported = ctx->events_reported; - goto response; - } - /* - * Migrate events between fd's, maintaining order, and avoiding new - * events being added before existing events. + * ctx->file can only be changed under the handler & xa_lock. xa_load() + * must be checked again to ensure the ctx hasn't begun destruction + * since the ucma_get_ctx(). */ - ucma_lock_files(cur_file, new_file); mutex_lock(&mut); - list_move_tail(&ctx->list, &new_file->ctx_list); - ucma_move_events(ctx, new_file); + if (_ucma_find_context(cmd.id, cur_file) != ctx) { + mutex_unlock(&mut); + ret = -ENOENT; + goto err_unlock; + } ctx->file = new_file; + mutex_unlock(&mut); + + mutex_lock(&cur_file->mut); + list_del(&ctx->list); + /* + * At this point lock_handler() prevents addition of new uevents for + * this ctx. + */ + list_for_each_entry_safe(uevent, tmp, &cur_file->event_list, list) + if (uevent->ctx == ctx) + list_move_tail(&uevent->list, &event_list); resp.events_reported = ctx->events_reported; + mutex_unlock(&cur_file->mut); - mutex_unlock(&mut); - ucma_unlock_files(cur_file, new_file); + mutex_lock(&new_file->mut); + list_add_tail(&ctx->list, &new_file->ctx_list); + list_splice_tail(&event_list, &new_file->event_list); + mutex_unlock(&new_file->mut); -response: if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) ret = -EFAULT; +err_unlock: ucma_put_ctx(ctx); file_put: fdput(f); diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index a41792dbae1fb41e9a01e2ce0de28bae70658d08..5053a5ce5c07ed0813aaf14065a723f1ce2ca34a 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -85,6 +85,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, struct page **page_list; struct vm_area_struct **vma_list; unsigned long lock_limit; + unsigned long new_pinned; unsigned long cur_base; unsigned long npages; int ret; @@ -148,12 +149,13 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; down_write(¤t->mm->mmap_sem); - current->mm->pinned_vm += npages; - if ((current->mm->pinned_vm > lock_limit) && !capable(CAP_IPC_LOCK)) { + if (check_add_overflow(current->mm->pinned_vm, npages, &new_pinned) || + (new_pinned > lock_limit && !capable(CAP_IPC_LOCK))) { up_write(¤t->mm->mmap_sem); ret = -ENOMEM; - goto vma; + goto out; } + current->mm->pinned_vm = new_pinned; up_write(¤t->mm->mmap_sem); cur_base = addr & PAGE_MASK; diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index 6ec748eccff7e87d7691b92a6d50534d89d4625e..26dc31e8ecdba8786f05e9225f3eab50fb885e4c 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -191,15 +191,6 @@ static void ib_umem_notifier_release(struct mmu_notifier *mn, up_read(&context->umem_rwsem); } -static int invalidate_page_trampoline(struct ib_umem *item, u64 start, - u64 end, void *cookie) -{ - ib_umem_notifier_start_account(item); - item->context->invalidate_range(item, start, start + PAGE_SIZE); - ib_umem_notifier_end_account(item); - return 0; -} - static int invalidate_range_start_trampoline(struct ib_umem *item, u64 start, u64 end, void *cookie) { @@ -604,12 +595,13 @@ static int ib_umem_odp_map_dma_single_page( put_page(page); if (remove_existing_mapping && umem->context->invalidate_range) { - invalidate_page_trampoline( + ib_umem_notifier_start_account(umem); + umem->context->invalidate_range( umem, - ib_umem_start(umem) + (page_index >> umem->page_shift), - ib_umem_start(umem) + ((page_index + 1) >> - umem->page_shift), - NULL); + ib_umem_start(umem) + (page_index << umem->page_shift), + ib_umem_start(umem) + + ((page_index + 1) << umem->page_shift)); + ib_umem_notifier_end_account(umem); ret = -EAGAIN; } diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index c34a6852d691f666fb1d8deac965335c4f7c0840..b37aada92a62aa7ef41b0743b2667206ef460b1c 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -49,6 +49,7 @@ #include #include #include +#include #include @@ -61,6 +62,8 @@ MODULE_AUTHOR("Roland Dreier"); MODULE_DESCRIPTION("InfiniBand userspace MAD packet access"); MODULE_LICENSE("Dual BSD/GPL"); +#define MAX_UMAD_RECV_LIST_SIZE 200000 + enum { IB_UMAD_MAX_PORTS = RDMA_MAX_PORTS, IB_UMAD_MAX_AGENTS = 32, @@ -112,6 +115,7 @@ struct ib_umad_file { struct mutex mutex; struct ib_umad_port *port; struct list_head recv_list; + atomic_t recv_list_size; struct list_head send_list; struct list_head port_list; spinlock_t send_lock; @@ -167,24 +171,28 @@ static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id) return file->agents_dead ? NULL : file->agent[id]; } -static int queue_packet(struct ib_umad_file *file, - struct ib_mad_agent *agent, - struct ib_umad_packet *packet) +static int queue_packet(struct ib_umad_file *file, struct ib_mad_agent *agent, + struct ib_umad_packet *packet, bool is_recv_mad) { int ret = 1; mutex_lock(&file->mutex); + if (is_recv_mad && + atomic_read(&file->recv_list_size) > MAX_UMAD_RECV_LIST_SIZE) + goto unlock; + for (packet->mad.hdr.id = 0; packet->mad.hdr.id < IB_UMAD_MAX_AGENTS; packet->mad.hdr.id++) if (agent == __get_agent(file, packet->mad.hdr.id)) { list_add_tail(&packet->list, &file->recv_list); + atomic_inc(&file->recv_list_size); wake_up_interruptible(&file->recv_wait); ret = 0; break; } - +unlock: mutex_unlock(&file->mutex); return ret; @@ -211,7 +219,7 @@ static void send_handler(struct ib_mad_agent *agent, if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) { packet->length = IB_MGMT_MAD_HDR; packet->mad.hdr.status = ETIMEDOUT; - if (!queue_packet(file, agent, packet)) + if (!queue_packet(file, agent, packet, false)) return; } kfree(packet); @@ -271,7 +279,7 @@ static void recv_handler(struct ib_mad_agent *agent, rdma_destroy_ah_attr(&ah_attr); } - if (queue_packet(file, agent, packet)) + if (queue_packet(file, agent, packet, true)) goto err2; return; @@ -380,6 +388,7 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf, packet = list_entry(file->recv_list.next, struct ib_umad_packet, list); list_del(&packet->list); + atomic_dec(&file->recv_list_size); mutex_unlock(&file->mutex); @@ -392,6 +401,7 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf, /* Requeue packet */ mutex_lock(&file->mutex); list_add(&packet->list, &file->recv_list); + atomic_inc(&file->recv_list_size); mutex_unlock(&file->mutex); } else { if (packet->recv_wc) @@ -868,11 +878,14 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg) if (get_user(id, arg)) return -EFAULT; + if (id >= IB_UMAD_MAX_AGENTS) + return -EINVAL; mutex_lock(&file->port->file_mutex); mutex_lock(&file->mutex); - if (id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) { + id = array_index_nospec(id, IB_UMAD_MAX_AGENTS); + if (!__get_agent(file, id)) { ret = -EINVAL; goto out; } @@ -1237,19 +1250,13 @@ static void ib_umad_kill_port(struct ib_umad_port *port) struct ib_umad_file *file; int id; - dev_set_drvdata(port->dev, NULL); - dev_set_drvdata(port->sm_dev, NULL); - - device_destroy(umad_class, port->cdev.dev); - device_destroy(umad_class, port->sm_cdev.dev); - - cdev_del(&port->cdev); - cdev_del(&port->sm_cdev); - mutex_lock(&port->file_mutex); port->ib_dev = NULL; + /* Mark ib_dev NULL and block ioctl or other file ops to progress + * further. + */ list_for_each_entry(file, &port->file_list, port_list) { mutex_lock(&file->mutex); file->agents_dead = 1; @@ -1261,6 +1268,16 @@ static void ib_umad_kill_port(struct ib_umad_port *port) } mutex_unlock(&port->file_mutex); + + dev_set_drvdata(port->dev, NULL); + dev_set_drvdata(port->sm_dev, NULL); + + device_destroy(umad_class, port->cdev.dev); + device_destroy(umad_class, port->sm_cdev.dev); + + cdev_del(&port->cdev); + cdev_del(&port->sm_cdev); + clear_bit(port->dev_num, dev_map); } diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index 5df8e548cc146066879d6470a8482412d4f17ab3..4a14de2d8c716476fdf1e3fb10fd471629e4faee 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h @@ -98,7 +98,7 @@ ib_uverbs_init_udata_buf_or_null(struct ib_udata *udata, struct ib_uverbs_device { atomic_t refcount; - int num_comp_vectors; + u32 num_comp_vectors; struct completion comp; struct device *dev; struct ib_device __rcu *ib_dev; diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index e012ca80f9d196ddbb8723691ec4087a55c0d863..f60c1a3e63861a4a632884f3bcfd2ba4961126e8 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -3556,6 +3556,11 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, goto err_free_attr; } + if (!rdma_is_port_valid(uobj->context->device, cmd.flow_attr.port)) { + err = -EINVAL; + goto err_uobj; + } + qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file); if (!qp) { err = -EINVAL; diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 50152c1b100452f7a4c8a9f733739ac25cbe777d..8a4e3d45ecc6486f649a0343b2c2eec0bcfa4c01 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -264,6 +264,9 @@ void ib_uverbs_release_file(struct kref *ref) if (atomic_dec_and_test(&file->device->refcount)) ib_uverbs_comp_dev(file->device); + if (file->async_file) + kref_put(&file->async_file->ref, + ib_uverbs_release_async_event_file); kobject_put(&file->device->kobj); kfree(file); @@ -701,8 +704,7 @@ static ssize_t verify_hdr(struct ib_uverbs_cmd_hdr *hdr, if (!hdr->out_words && !ex_hdr->provider_out_words) return -EINVAL; - if (!access_ok(VERIFY_WRITE, - u64_to_user_ptr(ex_hdr->response), + if (!access_ok(u64_to_user_ptr(ex_hdr->response), (hdr->out_words + ex_hdr->provider_out_words) * 8)) return -EFAULT; } else { @@ -915,10 +917,6 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp) } mutex_unlock(&file->device->lists_mutex); - if (file->async_file) - kref_put(&file->async_file->ref, - ib_uverbs_release_async_event_file); - kref_put(&file->ref, ib_uverbs_release_file); return 0; diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c index b8d715c68ca44e3c41bf87f1547384b40c82d4a4..11a0806469162640d32c094f55c1ae2d3913b20f 100644 --- a/drivers/infiniband/core/uverbs_marshall.c +++ b/drivers/infiniband/core/uverbs_marshall.c @@ -66,7 +66,7 @@ void ib_copy_ah_attr_to_user(struct ib_device *device, struct rdma_ah_attr *src = ah_attr; struct rdma_ah_attr conv_ah; - memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved)); + memset(&dst->grh, 0, sizeof(dst->grh)); if ((ah_attr->type == RDMA_AH_ATTR_TYPE_OPA) && (rdma_ah_get_dlid(ah_attr) > be16_to_cpu(IB_LID_PERMISSIVE)) && diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 6ee03d6089ebcda1fbe872a6ae4730c5a207d3d2..47a969a7f6f62537483b48d0020f492b5df0d112 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -1087,8 +1087,8 @@ struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, } EXPORT_SYMBOL(ib_open_qp); -static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp, - struct ib_qp_init_attr *qp_init_attr) +static struct ib_qp *create_xrc_qp(struct ib_qp *qp, + struct ib_qp_init_attr *qp_init_attr) { struct ib_qp *real_qp = qp; @@ -1103,10 +1103,10 @@ static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp, qp = __ib_open_qp(real_qp, qp_init_attr->event_handler, qp_init_attr->qp_context); - if (!IS_ERR(qp)) - __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp); - else - real_qp->device->destroy_qp(real_qp); + if (IS_ERR(qp)) + return qp; + + __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp); return qp; } @@ -1137,10 +1137,8 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd, return qp; ret = ib_create_qp_security(qp, device); - if (ret) { - ib_destroy_qp(qp); - return ERR_PTR(ret); - } + if (ret) + goto err; qp->real_qp = qp; qp->qp_type = qp_init_attr->qp_type; @@ -1153,8 +1151,15 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd, INIT_LIST_HEAD(&qp->sig_mrs); qp->port = 0; - if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) - return ib_create_xrc_qp(qp, qp_init_attr); + if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) { + struct ib_qp *xrc_qp = create_xrc_qp(qp, qp_init_attr); + + if (IS_ERR(xrc_qp)) { + ret = PTR_ERR(xrc_qp); + goto err; + } + return xrc_qp; + } qp->event_handler = qp_init_attr->event_handler; qp->qp_context = qp_init_attr->qp_context; @@ -1181,11 +1186,8 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd, if (qp_init_attr->cap.max_rdma_ctxs) { ret = rdma_rw_init_mrs(qp, qp_init_attr); - if (ret) { - pr_err("failed to init MR pool ret= %d\n", ret); - ib_destroy_qp(qp); - return ERR_PTR(ret); - } + if (ret) + goto err; } /* @@ -1198,6 +1200,11 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd, device->attrs.max_sge_rd); return qp; + +err: + ib_destroy_qp(qp); + return ERR_PTR(ret); + } EXPORT_SYMBOL(ib_create_qp); @@ -2621,3 +2628,37 @@ void ib_drain_qp(struct ib_qp *qp) ib_drain_rq(qp); } EXPORT_SYMBOL(ib_drain_qp); + +void __rdma_block_iter_start(struct ib_block_iter *biter, + struct scatterlist *sglist, unsigned int nents, + unsigned long pgsz) +{ + memset(biter, 0, sizeof(struct ib_block_iter)); + biter->__sg = sglist; + biter->__sg_nents = nents; + + /* Driver provides best block size to use */ + biter->__pg_bit = __fls(pgsz); +} +EXPORT_SYMBOL(__rdma_block_iter_start); + +bool __rdma_block_iter_next(struct ib_block_iter *biter) +{ + unsigned int block_offset; + + if (!biter->__sg_nents || !biter->__sg) + return false; + + biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance; + block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1); + biter->__sg_advance += BIT_ULL(biter->__pg_bit) - block_offset; + + if (biter->__sg_advance >= sg_dma_len(biter->__sg)) { + biter->__sg_advance = 0; + biter->__sg = sg_next(biter->__sg); + biter->__sg_nents--; + } + + return true; +} +EXPORT_SYMBOL(__rdma_block_iter_next); diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h index 96f76896488da6c0414411fea440520e75bf2843..802942adea8e85d60356854cb49cda90d330134e 100644 --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h @@ -120,6 +120,8 @@ struct bnxt_re_dev { #define BNXT_RE_FLAG_HAVE_L2_REF 3 #define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4 #define BNXT_RE_FLAG_QOS_WORK_REG 5 +#define BNXT_RE_FLAG_RESOURCES_ALLOCATED 7 +#define BNXT_RE_FLAG_RESOURCES_INITIALIZED 8 #define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29 struct net_device *netdev; unsigned int version, major, minor; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index bc2b9e03843903750aba02c0994430cd024cf4e3..a019ff266f5275278768cddf4c43df779da035e1 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -1446,7 +1446,7 @@ struct ib_srq *bnxt_re_create_srq(struct ib_pd *ib_pd, dev_err(rdev_to_dev(rdev), "SRQ copy to udata failed!"); bnxt_qplib_destroy_srq(&rdev->qplib_res, &srq->qplib_srq); - goto exit; + goto fail; } } if (nq) @@ -2664,6 +2664,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev, nq->budget++; atomic_inc(&rdev->cq_count); + spin_lock_init(&cq->cq_lock); if (context) { struct bnxt_re_cq_resp resp; diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 85cd1a3593d610132ded3796b5b90384bdb0342c..f1b666c80f368ff43868300515596d9b384a65f5 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -753,7 +753,8 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event, struct ib_event event; unsigned int flags; - if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) { + if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR && + rdma_is_kernel_res(&qp->ib_qp.res)) { flags = bnxt_re_lock_cqs(qp); bnxt_qplib_add_flush_qp(&qp->qplib_qp); bnxt_re_unlock_cqs(qp, flags); @@ -864,10 +865,8 @@ static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev) { int i; - if (rdev->nq[0].hwq.max_elements) { - for (i = 1; i < rdev->num_msix; i++) - bnxt_qplib_disable_nq(&rdev->nq[i - 1]); - } + for (i = 1; i < rdev->num_msix; i++) + bnxt_qplib_disable_nq(&rdev->nq[i - 1]); if (rdev->qplib_res.rcfw) bnxt_qplib_cleanup_res(&rdev->qplib_res); @@ -876,6 +875,7 @@ static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev) static int bnxt_re_init_res(struct bnxt_re_dev *rdev) { int rc = 0, i; + int num_vec_enabled = 0; bnxt_qplib_init_res(&rdev->qplib_res); @@ -891,9 +891,13 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev) "Failed to enable NQ with rc = 0x%x", rc); goto fail; } + num_vec_enabled++; } return 0; fail: + for (i = num_vec_enabled; i >= 0; i--) + bnxt_qplib_disable_nq(&rdev->nq[i]); + return rc; } @@ -925,6 +929,7 @@ static void bnxt_re_free_res(struct bnxt_re_dev *rdev) static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev) { int rc = 0, i; + int num_vec_created = 0; /* Configure and allocate resources for qplib */ rdev->qplib_res.rcfw = &rdev->rcfw; @@ -951,7 +956,7 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev) if (rc) { dev_err(rdev_to_dev(rdev), "Alloc Failed NQ%d rc:%#x", i, rc); - goto dealloc_dpi; + goto free_nq; } rc = bnxt_re_net_ring_alloc (rdev, rdev->nq[i].hwq.pbl[PBL_LVL_0].pg_map_arr, @@ -964,14 +969,17 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev) dev_err(rdev_to_dev(rdev), "Failed to allocate NQ fw id with rc = 0x%x", rc); + bnxt_qplib_free_nq(&rdev->nq[i]); goto free_nq; } + num_vec_created++; } return 0; free_nq: - for (i = 0; i < rdev->num_msix - 1; i++) + for (i = num_vec_created; i >= 0; i--) { + bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id); bnxt_qplib_free_nq(&rdev->nq[i]); -dealloc_dpi: + } bnxt_qplib_dealloc_dpi(&rdev->qplib_res, &rdev->qplib_res.dpi_tbl, &rdev->dpi_privileged); @@ -989,12 +997,17 @@ static void bnxt_re_dispatch_event(struct ib_device *ibdev, struct ib_qp *qp, struct ib_event ib_event; ib_event.device = ibdev; - if (qp) + if (qp) { ib_event.element.qp = qp; - else + ib_event.event = event; + if (qp->event_handler) + qp->event_handler(&ib_event, qp->qp_context); + + } else { ib_event.element.port_num = port_num; - ib_event.event = event; - ib_dispatch_event(&ib_event); + ib_event.event = event; + ib_dispatch_event(&ib_event); + } } #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN 0x02 @@ -1201,8 +1214,11 @@ static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev) if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags)) cancel_delayed_work(&rdev->worker); - bnxt_re_cleanup_res(rdev); - bnxt_re_free_res(rdev); + if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED, + &rdev->flags)) + bnxt_re_cleanup_res(rdev); + if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags)) + bnxt_re_free_res(rdev); if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) { rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw); @@ -1252,6 +1268,7 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) /* Registered a new RoCE device instance to netdev */ rc = bnxt_re_register_netdev(rdev); if (rc) { + rtnl_unlock(); pr_err("Failed to register with netedev: %#x\n", rc); return -EINVAL; } @@ -1331,12 +1348,15 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) pr_err("Failed to allocate resources: %#x\n", rc); goto fail; } + set_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags); rc = bnxt_re_init_res(rdev); if (rc) { pr_err("Failed to initialize resources: %#x\n", rc); goto fail; } + set_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED, &rdev->flags); + if (!rdev->is_virtfn) { rc = bnxt_re_setup_qos(rdev); if (rc) @@ -1461,6 +1481,7 @@ static void bnxt_re_task(struct work_struct *work) "Failed to register with IB: %#x", rc); bnxt_re_remove_one(rdev); bnxt_re_dev_unreg(rdev); + goto exit; } break; case NETDEV_UP: @@ -1484,6 +1505,7 @@ static void bnxt_re_task(struct work_struct *work) } smp_mb__before_atomic(); atomic_dec(&rdev->sched_count); +exit: kfree(re_work); } diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 6ad0d46ab879a6a1bf6d231e22b6e766b796efd2..c828c715d3cfe6219a1cb8cca58c257cc3ca59f1 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -360,7 +360,8 @@ void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq) } /* Make sure the HW is stopped! */ - bnxt_qplib_nq_stop_irq(nq, true); + if (nq->requested) + bnxt_qplib_nq_stop_irq(nq, true); if (nq->bar_reg_iomem) iounmap(nq->bar_reg_iomem); @@ -1969,6 +1970,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) INIT_LIST_HEAD(&cq->sqf_head); INIT_LIST_HEAD(&cq->rqf_head); spin_lock_init(&cq->compl_lock); + spin_lock_init(&cq->flush_lock); bnxt_qplib_arm_cq_enable(cq); return 0; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 2852d350ada1657242d37bc521346557fd97be33..8b3b5fdc19bbbbbcbf46b9ecbaf2ed3b2c5309fb 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -309,8 +309,17 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, rcfw->aeq_handler(rcfw, qp_event, qp); break; default: - /* Command Response */ - spin_lock_irqsave(&cmdq->lock, flags); + /* + * Command Response + * cmdq->lock needs to be acquired to synchronie + * the command send and completion reaping. This function + * is always called with creq->lock held. Using + * the nested variant of spin_lock. + * + */ + + spin_lock_irqsave_nested(&cmdq->lock, flags, + SINGLE_DEPTH_NESTING); cookie = le16_to_cpu(qp_event->cookie); mcookie = qp_event->cookie; blocked = cookie & RCFW_CMD_IS_BLOCKING; @@ -605,13 +614,8 @@ void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) bnxt_qplib_rcfw_stop_irq(rcfw, true); - if (rcfw->cmdq_bar_reg_iomem) - iounmap(rcfw->cmdq_bar_reg_iomem); - rcfw->cmdq_bar_reg_iomem = NULL; - - if (rcfw->creq_bar_reg_iomem) - iounmap(rcfw->creq_bar_reg_iomem); - rcfw->creq_bar_reg_iomem = NULL; + iounmap(rcfw->cmdq_bar_reg_iomem); + iounmap(rcfw->creq_bar_reg_iomem); indx = find_first_bit(rcfw->cmdq_bitmap, rcfw->bmap_size); if (indx != rcfw->bmap_size) @@ -620,6 +624,8 @@ void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) kfree(rcfw->cmdq_bitmap); rcfw->bmap_size = 0; + rcfw->cmdq_bar_reg_iomem = NULL; + rcfw->creq_bar_reg_iomem = NULL; rcfw->aeq_handler = NULL; rcfw->vector = 0; } @@ -705,6 +711,8 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, dev_err(&rcfw->pdev->dev, "QPLIB: CREQ BAR region %d mapping failed", rcfw->creq_bar_reg); + iounmap(rcfw->cmdq_bar_reg_iomem); + rcfw->cmdq_bar_reg_iomem = NULL; return -ENOMEM; } rcfw->creq_qp_event_processed = 0; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index 4097f3fa25c5f71e1660bb3d4d5074b99d3331ca..09e7d3dd30553bdd038a1fe268e6faba2208ea13 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -775,9 +775,8 @@ int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids) req.cos0 = cpu_to_le16(cids[0]); req.cos1 = cpu_to_le16(cids[1]); - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, NULL, - 0); - return 0; + return bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, + NULL, 0); } int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw, diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 0f83cbec33f3464e0558fb71871f508264e4d982..4dcc92d11609728b1bc3136068ffb6730ff04693 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -458,6 +458,8 @@ static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp) skb_reset_transport_header(skb); } else { skb = alloc_skb(len, gfp); + if (!skb) + return NULL; } t4_set_arp_err_handler(skb, NULL, NULL); return skb; @@ -491,7 +493,6 @@ static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb) ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); release_ep_resources(ep); - kfree_skb(skb); return 0; } @@ -502,7 +503,6 @@ static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb) ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); c4iw_put_ep(&ep->parent_ep->com); release_ep_resources(ep); - kfree_skb(skb); return 0; } @@ -1904,8 +1904,10 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb) } mutex_unlock(&ep->com.mutex); - if (release) + if (release) { + close_complete_upcall(ep, -ECONNRESET); release_ep_resources(ep); + } c4iw_put_ep(&ep->com); return 0; } @@ -2376,20 +2378,6 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type; pr_debug("ep %p tid %u\n", ep, ep->hwtid); - - skb_get(skb); - rpl = cplhdr(skb); - if (!is_t4(adapter_type)) { - skb_trim(skb, roundup(sizeof(*rpl5), 16)); - rpl5 = (void *)rpl; - INIT_TP_WR(rpl5, ep->hwtid); - } else { - skb_trim(skb, sizeof(*rpl)); - INIT_TP_WR(rpl, ep->hwtid); - } - OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, - ep->hwtid)); - cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, enable_tcp_timestamps && req->tcpopt.tstamp, (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1); @@ -2435,6 +2423,20 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, if (tcph->ece && tcph->cwr) opt2 |= CCTRL_ECN_V(1); } + + skb_get(skb); + rpl = cplhdr(skb); + if (!is_t4(adapter_type)) { + skb_trim(skb, roundup(sizeof(*rpl5), 16)); + rpl5 = (void *)rpl; + INIT_TP_WR(rpl5, ep->hwtid); + } else { + skb_trim(skb, sizeof(*rpl)); + INIT_TP_WR(rpl, ep->hwtid); + } + OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, + ep->hwtid)); + if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) { u32 isn = (prandom_u32() & ~7UL) - 1; opt2 |= T5_OPT_2_VALID_F; @@ -2796,7 +2798,8 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) break; case MPA_REQ_SENT: (void)stop_ep_timer(ep); - if (mpa_rev == 1 || (mpa_rev == 2 && ep->tried_with_mpa_v1)) + if (status != CPL_ERR_CONN_RESET || mpa_rev == 1 || + (mpa_rev == 2 && ep->tried_with_mpa_v1)) connect_reply_upcall(ep, -ECONNRESET); else { /* @@ -3608,7 +3611,6 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) if (close) { if (abrupt) { set_bit(EP_DISC_ABORT, &ep->com.history); - close_complete_upcall(ep, -ECONNRESET); ret = send_abort(ep); } else { set_bit(EP_DISC_CLOSE, &ep->com.history); diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index 6d304279409427910f03a620dae31080aa49823b..1fd8798d91a737b1b34786ce90fe7fad5f01a148 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -161,7 +161,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, cq->gts = rdev->lldi.gts_reg; cq->rdev = rdev; - cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS, + cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, CXGB4_BAR2_QTYPE_INGRESS, &cq->bar2_qid, user ? &cq->bar2_pa : NULL); if (user && !cq->bar2_pa) { diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 7b76e6f81aeb477181afedc2f44fec990ce3090f..f2fb7318abc104e99e2495f42ace1950ab43bb34 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c @@ -274,13 +274,17 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, struct sk_buff *skb, struct c4iw_wr_wait *wr_waitp) { int err; - struct fw_ri_tpte tpt; + struct fw_ri_tpte *tpt; u32 stag_idx; static atomic_t key; if (c4iw_fatal_error(rdev)) return -EIO; + tpt = kmalloc(sizeof(*tpt), GFP_KERNEL); + if (!tpt) + return -ENOMEM; + stag_state = stag_state > 0; stag_idx = (*stag) >> 8; @@ -290,6 +294,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, mutex_lock(&rdev->stats.lock); rdev->stats.stag.fail++; mutex_unlock(&rdev->stats.lock); + kfree(tpt); return -ENOMEM; } mutex_lock(&rdev->stats.lock); @@ -304,28 +309,28 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, /* write TPT entry */ if (reset_tpt_entry) - memset(&tpt, 0, sizeof(tpt)); + memset(tpt, 0, sizeof(*tpt)); else { - tpt.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F | + tpt->valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F | FW_RI_TPTE_STAGKEY_V((*stag & FW_RI_TPTE_STAGKEY_M)) | FW_RI_TPTE_STAGSTATE_V(stag_state) | FW_RI_TPTE_STAGTYPE_V(type) | FW_RI_TPTE_PDID_V(pdid)); - tpt.locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) | + tpt->locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) | (bind_enabled ? FW_RI_TPTE_MWBINDEN_F : 0) | FW_RI_TPTE_ADDRTYPE_V((zbva ? FW_RI_ZERO_BASED_TO : FW_RI_VA_BASED_TO))| FW_RI_TPTE_PS_V(page_size)); - tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32( + tpt->nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32( FW_RI_TPTE_PBLADDR_V(PBL_OFF(rdev, pbl_addr)>>3)); - tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL)); - tpt.va_hi = cpu_to_be32((u32)(to >> 32)); - tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL)); - tpt.dca_mwbcnt_pstag = cpu_to_be32(0); - tpt.len_hi = cpu_to_be32((u32)(len >> 32)); + tpt->len_lo = cpu_to_be32((u32)(len & 0xffffffffUL)); + tpt->va_hi = cpu_to_be32((u32)(to >> 32)); + tpt->va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL)); + tpt->dca_mwbcnt_pstag = cpu_to_be32(0); + tpt->len_hi = cpu_to_be32((u32)(len >> 32)); } err = write_adapter_mem(rdev, stag_idx + (rdev->lldi.vr->stag.start >> 5), - sizeof(tpt), &tpt, skb, wr_waitp); + sizeof(*tpt), tpt, skb, wr_waitp); if (reset_tpt_entry) { c4iw_put_resource(&rdev->resource.tpt_table, stag_idx); @@ -333,6 +338,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, rdev->stats.stag.cur -= 32; mutex_unlock(&rdev->stats.lock); } + kfree(tpt); return err; } diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 347fe18b1a41c0990c753aa29f2efaaea0cd119b..a9e3a11bea54af54f0de1dcdae62be93b581194d 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -279,12 +279,13 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, wq->db = rdev->lldi.db_reg; - wq->sq.bar2_va = c4iw_bar2_addrs(rdev, wq->sq.qid, T4_BAR2_QTYPE_EGRESS, + wq->sq.bar2_va = c4iw_bar2_addrs(rdev, wq->sq.qid, + CXGB4_BAR2_QTYPE_EGRESS, &wq->sq.bar2_qid, user ? &wq->sq.bar2_pa : NULL); if (need_rq) wq->rq.bar2_va = c4iw_bar2_addrs(rdev, wq->rq.qid, - T4_BAR2_QTYPE_EGRESS, + CXGB4_BAR2_QTYPE_EGRESS, &wq->rq.bar2_qid, user ? &wq->rq.bar2_pa : NULL); @@ -2572,7 +2573,7 @@ static int alloc_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx, memset(wq->queue, 0, wq->memsize); pci_unmap_addr_set(wq, mapping, wq->dma_addr); - wq->bar2_va = c4iw_bar2_addrs(rdev, wq->qid, T4_BAR2_QTYPE_EGRESS, + wq->bar2_va = c4iw_bar2_addrs(rdev, wq->qid, CXGB4_BAR2_QTYPE_EGRESS, &wq->bar2_qid, user ? &wq->bar2_pa : NULL); diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index e1668bcc2d13d71aba2f35021ec25cd693e79682..b09a4b1cf397bbf37b540786de4090e4d368ef5f 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -1074,6 +1074,8 @@ static void log_state_transition(struct hfi1_pportdata *ppd, u32 state); static void log_physical_state(struct hfi1_pportdata *ppd, u32 state); static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state, int msecs); +static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd, + int msecs); static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc); static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr); static void handle_temp_err(struct hfi1_devdata *dd); @@ -9849,6 +9851,7 @@ void hfi1_quiet_serdes(struct hfi1_pportdata *ppd) /* disable the port */ clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); + cancel_work_sync(&ppd->freeze_work); } static inline int init_cpu_counters(struct hfi1_devdata *dd) @@ -10577,12 +10580,29 @@ void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason, } } -/* - * Verify if BCT for data VLs is non-zero. +/** + * data_vls_operational() - Verify if data VL BCT credits and MTU + * are both set. + * @ppd: pointer to hfi1_pportdata structure + * + * Return: true - Ok, false -otherwise. */ static inline bool data_vls_operational(struct hfi1_pportdata *ppd) { - return !!ppd->actual_vls_operational; + int i; + u64 reg; + + if (!ppd->actual_vls_operational) + return false; + + for (i = 0; i < ppd->vls_supported; i++) { + reg = read_csr(ppd->dd, SEND_CM_CREDIT_VL + (8 * i)); + if ((reg && !ppd->dd->vld[i].mtu) || + (!reg && ppd->dd->vld[i].mtu)) + return false; + } + + return true; } /* @@ -10695,7 +10715,8 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state) if (!data_vls_operational(ppd)) { dd_dev_err(dd, - "%s: data VLs not operational\n", __func__); + "%s: Invalid data VL credits or mtu\n", + __func__); ret = -EINVAL; break; } @@ -10768,13 +10789,15 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state) break; ppd->port_error_action = 0; - ppd->host_link_state = HLS_DN_POLL; if (quick_linkup) { /* quick linkup does not go into polling */ ret = do_quick_linkup(dd); } else { ret1 = set_physical_link_state(dd, PLS_POLLING); + if (!ret1) + ret1 = wait_phys_link_out_of_offline(ppd, + 3000); if (ret1 != HCMD_SUCCESS) { dd_dev_err(dd, "Failed to transition to Polling link state, return 0x%x\n", @@ -10782,6 +10805,14 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state) ret = -EINVAL; } } + + /* + * Change the host link state after requesting DC8051 to + * change its physical state so that we can ignore any + * interrupt with stale LNI(XX) error, which will not be + * cleared until DC8051 transitions to Polling state. + */ + ppd->host_link_state = HLS_DN_POLL; ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE); /* @@ -12485,7 +12516,8 @@ static int init_cntrs(struct hfi1_devdata *dd) } /* allocate space for the counter values */ - dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL); + dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64), + GFP_KERNEL); if (!dd->cntrs) goto bail; @@ -12912,6 +12944,39 @@ static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd, return read_state; } +/* + * wait_phys_link_out_of_offline - wait for any out of offline state + * @ppd: port device + * @msecs: the number of milliseconds to wait + * + * Wait up to msecs milliseconds for any out of offline physical link + * state change to occur. + * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT. + */ +static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd, + int msecs) +{ + u32 read_state; + unsigned long timeout; + + timeout = jiffies + msecs_to_jiffies(msecs); + while (1) { + read_state = read_physical_state(ppd->dd); + if ((read_state & 0xF0) != PLS_OFFLINE) + break; + if (time_after(jiffies, timeout)) { + dd_dev_err(ppd->dd, + "timeout waiting for phy link out of offline. Read state 0x%x, %dms\n", + read_state, msecs); + return -ETIMEDOUT; + } + usleep_range(1950, 2050); /* sleep 2ms-ish */ + } + + log_state_transition(ppd, read_state); + return read_state; +} + #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \ (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK) @@ -13387,7 +13452,7 @@ static int set_up_context_variables(struct hfi1_devdata *dd) int total_contexts; int ret; unsigned ngroups; - int qos_rmt_count; + int rmt_count; int user_rmt_reduced; u32 n_usr_ctxts; u32 send_contexts = chip_send_contexts(dd); @@ -13449,10 +13514,20 @@ static int set_up_context_variables(struct hfi1_devdata *dd) n_usr_ctxts = rcv_contexts - total_contexts; } - /* each user context requires an entry in the RMT */ - qos_rmt_count = qos_rmt_entries(dd, NULL, NULL); - if (qos_rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) { - user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count; + /* + * The RMT entries are currently allocated as shown below: + * 1. QOS (0 to 128 entries); + * 2. FECN for PSM (num_user_contexts + num_vnic_contexts); + * 3. VNIC (num_vnic_contexts). + * It should be noted that PSM FECN oversubscribe num_vnic_contexts + * entries of RMT because both VNIC and PSM could allocate any receive + * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts, + * and PSM FECN must reserve an RMT entry for each possible PSM receive + * context. + */ + rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2); + if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) { + user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count; dd_dev_err(dd, "RMT size is reducing the number of user receive contexts from %u to %d\n", n_usr_ctxts, @@ -14440,9 +14515,11 @@ static void init_user_fecn_handling(struct hfi1_devdata *dd, u64 reg; int i, idx, regoff, regidx; u8 offset; + u32 total_cnt; /* there needs to be enough room in the map table */ - if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) { + total_cnt = dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt; + if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) { dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n"); return; } @@ -14496,7 +14573,7 @@ static void init_user_fecn_handling(struct hfi1_devdata *dd, /* add rule 1 */ add_rsm_rule(dd, RSM_INS_FECN, &rrd); - rmt->used += dd->num_user_contexts; + rmt->used += total_cnt; } /* Initialize RSM for VNIC */ @@ -14572,7 +14649,7 @@ void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd) clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK); } -static void init_rxe(struct hfi1_devdata *dd) +static int init_rxe(struct hfi1_devdata *dd) { struct rsm_map_table *rmt; u64 val; @@ -14581,6 +14658,9 @@ static void init_rxe(struct hfi1_devdata *dd) write_csr(dd, RCV_ERR_MASK, ~0ull); rmt = alloc_rsm_map_table(dd); + if (!rmt) + return -ENOMEM; + /* set up QOS, including the QPN map table */ init_qos(dd, rmt); init_user_fecn_handling(dd, rmt); @@ -14607,6 +14687,7 @@ static void init_rxe(struct hfi1_devdata *dd) val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) << RCV_BYPASS_HDR_SIZE_SHIFT); write_csr(dd, RCV_BYPASS, val); + return 0; } static void init_other(struct hfi1_devdata *dd) @@ -15149,7 +15230,10 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev, goto bail_cleanup; /* set initial RXE CSRs */ - init_rxe(dd); + ret = init_rxe(dd); + if (ret) + goto bail_cleanup; + /* set initial TXE CSRs */ init_txe(dd); /* set initial non-RXE, non-TXE CSRs */ diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c index e2290f32c8d90c308e40f9f244c26714ae92e836..5bc811b7e6cf95d36b0242b5a969025520b05e33 100644 --- a/drivers/infiniband/hw/hfi1/fault.c +++ b/drivers/infiniband/hw/hfi1/fault.c @@ -141,18 +141,21 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf, if (!data) return -ENOMEM; copy = min(len, datalen - 1); - if (copy_from_user(data, buf, copy)) - return -EFAULT; + if (copy_from_user(data, buf, copy)) { + ret = -EFAULT; + goto free_data; + } ret = debugfs_file_get(file->f_path.dentry); if (unlikely(ret)) - return ret; + goto free_data; ptr = data; token = ptr; for (ptr = data; *ptr; ptr = end + 1, token = ptr) { char *dash; unsigned long range_start, range_end, i; bool remove = false; + unsigned long bound = 1U << BITS_PER_BYTE; end = strchr(ptr, ','); if (end) @@ -178,6 +181,10 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf, BITS_PER_BYTE); break; } + /* Check the inputs */ + if (range_start >= bound || range_end >= bound) + break; + for (i = range_start; i <= range_end; i++) { if (remove) clear_bit(i, fault->opcodes); @@ -190,6 +197,7 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf, ret = len; debugfs_file_put(file->f_path.dentry); +free_data: kfree(data); return ret; } @@ -209,7 +217,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf, return -ENOMEM; ret = debugfs_file_get(file->f_path.dentry); if (unlikely(ret)) - return ret; + goto free_data; bit = find_first_bit(fault->opcodes, bitsize); while (bit < bitsize) { zero = find_next_zero_bit(fault->opcodes, bitsize, bit); @@ -227,6 +235,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf, data[size - 1] = '\n'; data[size] = '\0'; ret = simple_read_from_buffer(buf, len, pos, data, size); +free_data: kfree(data); return ret; } diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index 1fc75647e47bf1839c3c743f4ad051785c4d4dc9..499bfae098c24019dca0e92a5c1edb8a133b5e0e 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c @@ -488,7 +488,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma) vmf = 1; break; case STATUS: - if (flags & (unsigned long)(VM_WRITE | VM_EXEC)) { + if (flags & VM_WRITE) { ret = -EPERM; goto done; } @@ -1345,12 +1345,15 @@ static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg, addr = arg + offsetof(struct hfi1_tid_info, tidcnt); if (copy_to_user((void __user *)addr, &tinfo.tidcnt, sizeof(tinfo.tidcnt))) - return -EFAULT; + ret = -EFAULT; addr = arg + offsetof(struct hfi1_tid_info, length); - if (copy_to_user((void __user *)addr, &tinfo.length, + if (!ret && copy_to_user((void __user *)addr, &tinfo.length, sizeof(tinfo.length))) ret = -EFAULT; + + if (ret) + hfi1_user_exp_rcv_invalid(fd, &tinfo); } return ret; diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h index d9470317983f615b387f2597244ce9a4cae269fb..2ea42c04cfd25fefb9c68f1ed9dd07463514cf54 100644 --- a/drivers/infiniband/hw/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h @@ -154,6 +154,8 @@ struct hfi1_ib_stats { extern struct hfi1_ib_stats hfi1_stats; extern const struct pci_error_handlers hfi1_pci_err_handler; +extern int num_driver_cntrs; + /* * First-cut criterion for "device is active" is * two thousand dwords combined Tx, Rx traffic per @@ -1423,7 +1425,7 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd, struct hfi1_devdata *dd, u8 hw_pidx, u8 port); void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd); int hfi1_rcd_put(struct hfi1_ctxtdata *rcd); -void hfi1_rcd_get(struct hfi1_ctxtdata *rcd); +int hfi1_rcd_get(struct hfi1_ctxtdata *rcd); struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd, u16 ctxt); struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt); diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index 758d273c32cfe32b6201277cb4796c8fc4450d7a..368f4f08b6866695cc37770ba8cf19a9f307370d 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c @@ -213,12 +213,12 @@ static void hfi1_rcd_free(struct kref *kref) struct hfi1_ctxtdata *rcd = container_of(kref, struct hfi1_ctxtdata, kref); - hfi1_free_ctxtdata(rcd->dd, rcd); - spin_lock_irqsave(&rcd->dd->uctxt_lock, flags); rcd->dd->rcd[rcd->ctxt] = NULL; spin_unlock_irqrestore(&rcd->dd->uctxt_lock, flags); + hfi1_free_ctxtdata(rcd->dd, rcd); + kfree(rcd); } @@ -241,10 +241,13 @@ int hfi1_rcd_put(struct hfi1_ctxtdata *rcd) * @rcd: pointer to an initialized rcd data structure * * Use this to get a reference after the init. + * + * Return : reflect kref_get_unless_zero(), which returns non-zero on + * increment, otherwise 0. */ -void hfi1_rcd_get(struct hfi1_ctxtdata *rcd) +int hfi1_rcd_get(struct hfi1_ctxtdata *rcd) { - kref_get(&rcd->kref); + return kref_get_unless_zero(&rcd->kref); } /** @@ -324,7 +327,8 @@ struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt) spin_lock_irqsave(&dd->uctxt_lock, flags); if (dd->rcd[ctxt]) { rcd = dd->rcd[ctxt]; - hfi1_rcd_get(rcd); + if (!hfi1_rcd_get(rcd)) + rcd = NULL; } spin_unlock_irqrestore(&dd->uctxt_lock, flags); @@ -794,7 +798,8 @@ static int create_workqueues(struct hfi1_devdata *dd) ppd->hfi1_wq = alloc_workqueue( "hfi%d_%d", - WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE, + WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE | + WQ_MEM_RECLAIM, HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES, dd->unit, pidx); if (!ppd->hfi1_wq) diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c index 0307405491e015e325d1ed692c3e873822b991fc..1669548e91dcf5e86a5ed936ca3a5ddf58ffeef4 100644 --- a/drivers/infiniband/hw/hfi1/mad.c +++ b/drivers/infiniband/hw/hfi1/mad.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2015-2017 Intel Corporation. + * Copyright(c) 2015-2018 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. @@ -2326,7 +2326,7 @@ struct opa_port_status_req { __be32 vl_select_mask; }; -#define VL_MASK_ALL 0x000080ff +#define VL_MASK_ALL 0x00000000000080ffUL struct opa_port_status_rsp { __u8 port_num; @@ -2625,15 +2625,14 @@ static int pma_get_opa_classportinfo(struct opa_pma_mad *pmp, } static void a0_portstatus(struct hfi1_pportdata *ppd, - struct opa_port_status_rsp *rsp, u32 vl_select_mask) + struct opa_port_status_rsp *rsp) { if (!is_bx(ppd->dd)) { unsigned long vl; u64 sum_vl_xmit_wait = 0; - u32 vl_all_mask = VL_MASK_ALL; + unsigned long vl_all_mask = VL_MASK_ALL; - for_each_set_bit(vl, (unsigned long *)&(vl_all_mask), - 8 * sizeof(vl_all_mask)) { + for_each_set_bit(vl, &vl_all_mask, BITS_PER_LONG) { u64 tmp = sum_vl_xmit_wait + read_port_cntr(ppd, C_TX_WAIT_VL, idx_from_vl(vl)); @@ -2730,12 +2729,12 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp, (struct opa_port_status_req *)pmp->data; struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct opa_port_status_rsp *rsp; - u32 vl_select_mask = be32_to_cpu(req->vl_select_mask); + unsigned long vl_select_mask = be32_to_cpu(req->vl_select_mask); unsigned long vl; size_t response_data_size; u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24; u8 port_num = req->port_num; - u8 num_vls = hweight32(vl_select_mask); + u8 num_vls = hweight64(vl_select_mask); struct _vls_pctrs *vlinfo; struct hfi1_ibport *ibp = to_iport(ibdev, port); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); @@ -2771,7 +2770,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp, hfi1_read_link_quality(dd, &rsp->link_quality_indicator); - rsp->vl_select_mask = cpu_to_be32(vl_select_mask); + rsp->vl_select_mask = cpu_to_be32((u32)vl_select_mask); rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS, CNTR_INVALID_VL)); rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS, @@ -2842,8 +2841,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp, * So in the for_each_set_bit() loop below, we don't need * any additional checks for vl. */ - for_each_set_bit(vl, (unsigned long *)&(vl_select_mask), - 8 * sizeof(vl_select_mask)) { + for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) { memset(vlinfo, 0, sizeof(*vlinfo)); tmp = read_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl)); @@ -2884,7 +2882,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp, vfi++; } - a0_portstatus(ppd, rsp, vl_select_mask); + a0_portstatus(ppd, rsp); if (resp_len) *resp_len += response_data_size; @@ -2931,16 +2929,14 @@ static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port, return error_counter_summary; } -static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp, - u32 vl_select_mask) +static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp) { if (!is_bx(ppd->dd)) { unsigned long vl; u64 sum_vl_xmit_wait = 0; - u32 vl_all_mask = VL_MASK_ALL; + unsigned long vl_all_mask = VL_MASK_ALL; - for_each_set_bit(vl, (unsigned long *)&(vl_all_mask), - 8 * sizeof(vl_all_mask)) { + for_each_set_bit(vl, &vl_all_mask, BITS_PER_LONG) { u64 tmp = sum_vl_xmit_wait + read_port_cntr(ppd, C_TX_WAIT_VL, idx_from_vl(vl)); @@ -2995,7 +2991,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp, u64 port_mask; u8 port_num; unsigned long vl; - u32 vl_select_mask; + unsigned long vl_select_mask; int vfi; u16 link_width; u16 link_speed; @@ -3073,8 +3069,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp, * So in the for_each_set_bit() loop below, we don't need * any additional checks for vl. */ - for_each_set_bit(vl, (unsigned long *)&(vl_select_mask), - 8 * sizeof(req->vl_select_mask)) { + for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) { memset(vlinfo, 0, sizeof(*vlinfo)); rsp->vls[vfi].port_vl_xmit_data = @@ -3122,7 +3117,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp, vfi++; } - a0_datacounters(ppd, rsp, vl_select_mask); + a0_datacounters(ppd, rsp); if (resp_len) *resp_len += response_data_size; @@ -3217,7 +3212,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp, struct _vls_ectrs *vlinfo; unsigned long vl; u64 port_mask, tmp; - u32 vl_select_mask; + unsigned long vl_select_mask; int vfi; req = (struct opa_port_error_counters64_msg *)pmp->data; @@ -3276,8 +3271,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp, vlinfo = &rsp->vls[0]; vfi = 0; vl_select_mask = be32_to_cpu(req->vl_select_mask); - for_each_set_bit(vl, (unsigned long *)&(vl_select_mask), - 8 * sizeof(req->vl_select_mask)) { + for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) { memset(vlinfo, 0, sizeof(*vlinfo)); rsp->vls[vfi].port_vl_xmit_discards = cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL, @@ -3488,7 +3482,7 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp, u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24; u64 portn = be64_to_cpu(req->port_select_mask[3]); u32 counter_select = be32_to_cpu(req->counter_select_mask); - u32 vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */ + unsigned long vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */ unsigned long vl; if ((nports != 1) || (portn != 1 << port)) { @@ -3582,8 +3576,7 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp, if (counter_select & CS_UNCORRECTABLE_ERRORS) write_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL, 0); - for_each_set_bit(vl, (unsigned long *)&(vl_select_mask), - 8 * sizeof(vl_select_mask)) { + for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) { if (counter_select & CS_PORT_XMIT_DATA) write_port_cntr(ppd, C_TX_FLIT_VL, idx_from_vl(vl), 0); @@ -4836,7 +4829,7 @@ static int hfi1_process_opa_mad(struct ib_device *ibdev, int mad_flags, int ret; int pkey_idx; int local_mad = 0; - u32 resp_len = 0; + u32 resp_len = in_wc->byte_len - sizeof(*in_grh); struct hfi1_ibport *ibp = to_iport(ibdev, port); pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY); diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c index 6c967dde58e702c228835a84c3b1ea19abc1ea64..a8dd12e525f8126d65d93023d8bbac9f52723efb 100644 --- a/drivers/infiniband/hw/hfi1/pcie.c +++ b/drivers/infiniband/hw/hfi1/pcie.c @@ -331,7 +331,9 @@ int pcie_speeds(struct hfi1_devdata *dd) /* * bus->max_bus_speed is set from the bridge's linkcap Max Link Speed */ - if (parent && dd->pcidev->bus->max_bus_speed != PCIE_SPEED_8_0GT) { + if (parent && + (dd->pcidev->bus->max_bus_speed == PCIE_SPEED_2_5GT || + dd->pcidev->bus->max_bus_speed == PCIE_SPEED_5_0GT)) { dd_dev_info(dd, "Parent PCIe bridge does not support Gen3\n"); dd->link_gen3_capable = 0; } diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c index 752057647f091734368f998c0173234d8a1f2dd0..d6d18bb94f7354e2f1cb74257956f9947d474d22 100644 --- a/drivers/infiniband/hw/hfi1/pio.c +++ b/drivers/infiniband/hw/hfi1/pio.c @@ -2117,7 +2117,7 @@ int init_credit_return(struct hfi1_devdata *dd) "Unable to allocate credit return DMA range for NUMA %d\n", i); ret = -ENOMEM; - goto done; + goto free_cr_base; } } set_dev_node(&dd->pcidev->dev, dd->node); @@ -2125,6 +2125,10 @@ int init_credit_return(struct hfi1_devdata *dd) ret = 0; done: return ret; + +free_cr_base: + free_credit_return(dd); + goto done; } void free_credit_return(struct hfi1_devdata *dd) diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c index 9b1e84a6b1ccb67e3ffae2eaf7603ae73d7ac03a..63c5ba66b305f56147a41e8a469830ed77823375 100644 --- a/drivers/infiniband/hw/hfi1/qp.c +++ b/drivers/infiniband/hw/hfi1/qp.c @@ -784,7 +784,7 @@ void notify_error_qp(struct rvt_qp *qp) write_seqlock(lock); if (!list_empty(&priv->s_iowait.list) && !(qp->s_flags & RVT_S_BUSY)) { - qp->s_flags &= ~RVT_S_ANY_WAIT_IO; + qp->s_flags &= ~HFI1_S_ANY_WAIT_IO; list_del_init(&priv->s_iowait.list); priv->s_iowait.lock = NULL; rvt_put_qp(qp); diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c index 9bd63abb2dfec66deb621e7a7e906118e0e3424c..770c78c6573067c61837a462ea8bead2b8178f79 100644 --- a/drivers/infiniband/hw/hfi1/rc.c +++ b/drivers/infiniband/hw/hfi1/rc.c @@ -1157,6 +1157,7 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah) if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 && cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) break; + rvt_qp_wqe_unreserve(qp, wqe); s_last = qp->s_last; trace_hfi1_qp_send_completion(qp, wqe, s_last); if (++s_last >= qp->s_size) @@ -1209,6 +1210,7 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, u32 s_last; rvt_put_swqe(wqe); + rvt_qp_wqe_unreserve(qp, wqe); s_last = qp->s_last; trace_hfi1_qp_send_completion(qp, wqe, s_last); if (++s_last >= qp->s_size) @@ -2301,7 +2303,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet) update_ack_queue(qp, next); } e = &qp->s_ack_queue[qp->r_head_ack_queue]; - if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { + if (e->rdma_sge.mr) { rvt_put_mr(e->rdma_sge.mr); e->rdma_sge.mr = NULL; } @@ -2375,7 +2377,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet) update_ack_queue(qp, next); } e = &qp->s_ack_queue[qp->r_head_ack_queue]; - if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { + if (e->rdma_sge.mr) { rvt_put_mr(e->rdma_sge.mr); e->rdma_sge.mr = NULL; } diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c index 5f56f3c1b4c4e42d9de35f014fb554252fa3f758..62a3832a1ebbc6d926fd0468b423954b1ac5974c 100644 --- a/drivers/infiniband/hw/hfi1/ruc.c +++ b/drivers/infiniband/hw/hfi1/ruc.c @@ -278,6 +278,8 @@ static void ruc_loopback(struct rvt_qp *sqp) goto op_err; if (!ret) goto rnr_nak; + if (wqe->length > qp->r_len) + goto inv_err; break; case IB_WR_RDMA_WRITE_WITH_IMM: @@ -445,7 +447,10 @@ static void ruc_loopback(struct rvt_qp *sqp) goto err; inv_err: - send_status = IB_WC_REM_INV_REQ_ERR; + send_status = + sqp->ibqp.qp_type == IB_QPT_RC ? + IB_WC_REM_INV_REQ_ERR : + IB_WC_SUCCESS; wc.status = IB_WC_LOC_QP_OP_ERR; goto err; diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c index 88e326d6cc494b528f9675e7c5bf45e9fbcee8f8..291c12f588b58b23e9e1b006ea5a0d862cd66df1 100644 --- a/drivers/infiniband/hw/hfi1/sdma.c +++ b/drivers/infiniband/hw/hfi1/sdma.c @@ -65,6 +65,7 @@ #define SDMA_DESCQ_CNT 2048 #define SDMA_DESC_INTR 64 #define INVALID_TAIL 0xffff +#define SDMA_PAD max_t(size_t, MAX_16B_PADDING, sizeof(u32)) static uint sdma_descq_cnt = SDMA_DESCQ_CNT; module_param(sdma_descq_cnt, uint, S_IRUGO); @@ -410,10 +411,7 @@ static void sdma_flush(struct sdma_engine *sde) sdma_flush_descq(sde); spin_lock_irqsave(&sde->flushlist_lock, flags); /* copy flush list */ - list_for_each_entry_safe(txp, txp_next, &sde->flushlist, list) { - list_del_init(&txp->list); - list_add_tail(&txp->list, &flushlist); - } + list_splice_init(&sde->flushlist, &flushlist); spin_unlock_irqrestore(&sde->flushlist_lock, flags); /* flush from flush list */ list_for_each_entry_safe(txp, txp_next, &flushlist, list) @@ -1283,7 +1281,7 @@ void sdma_clean(struct hfi1_devdata *dd, size_t num_engines) struct sdma_engine *sde; if (dd->sdma_pad_dma) { - dma_free_coherent(&dd->pcidev->dev, 4, + dma_free_coherent(&dd->pcidev->dev, SDMA_PAD, (void *)dd->sdma_pad_dma, dd->sdma_pad_phys); dd->sdma_pad_dma = NULL; @@ -1484,7 +1482,7 @@ int sdma_init(struct hfi1_devdata *dd, u8 port) /* Allocate memory for pad */ dd->sdma_pad_dma = dma_zalloc_coherent( &dd->pcidev->dev, - sizeof(u32), + SDMA_PAD, &dd->sdma_pad_phys, GFP_KERNEL ); @@ -1521,8 +1519,11 @@ int sdma_init(struct hfi1_devdata *dd, u8 port) } ret = rhashtable_init(tmp_sdma_rht, &sdma_rht_params); - if (ret < 0) + if (ret < 0) { + kfree(tmp_sdma_rht); goto bail; + } + dd->sdma_rht = tmp_sdma_rht; dd_dev_info(dd, "SDMA num_sdma: %u\n", dd->num_sdma); @@ -2426,7 +2427,7 @@ int sdma_send_txreq(struct sdma_engine *sde, wait->tx_count++; wait->count += tx->num_desc; } - schedule_work(&sde->flush_worker); + queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker); ret = -ECOMM; goto unlock; nodesc: @@ -2526,7 +2527,7 @@ int sdma_send_txlist(struct sdma_engine *sde, struct iowait *wait, } } spin_unlock(&sde->flushlist_lock); - schedule_work(&sde->flush_worker); + queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker); ret = -ECOMM; goto update_tail; nodesc: diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c index 70d39fc450a1e112b2f97b4e499cbf96623d19ad..54eb69564264926ab287c3e99015212f3b1f15f6 100644 --- a/drivers/infiniband/hw/hfi1/ud.c +++ b/drivers/infiniband/hw/hfi1/ud.c @@ -980,7 +980,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) { wc.ex.imm_data = packet->ohdr->u.ud.imm_data; wc.wc_flags = IB_WC_WITH_IMM; - tlen -= sizeof(u32); } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { wc.ex.imm_data = 0; wc.wc_flags = 0; diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c index dbe7d14a5c76d18f23ddf13e264f97c56d086d86..3592a9ec155e85b686dfaf68e87d8e006550d8bc 100644 --- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c +++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c @@ -232,7 +232,7 @@ static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf) } /* Verify that access is OK for the user buffer */ - if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, + if (!access_ok((void __user *)vaddr, npages * PAGE_SIZE)) { dd_dev_err(dd, "Fail vaddr %p, %u pages, !access_ok\n", (void *)vaddr, npages); @@ -324,6 +324,9 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd, u32 *tidlist = NULL; struct tid_user_buf *tidbuf; + if (!PAGE_ALIGNED(tinfo->vaddr)) + return -EINVAL; + tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL); if (!tidbuf) return -ENOMEM; diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index 5c88706121c1cb0faf2da79dae014113f7d0a0d9..684a298e150370af34024410ffdee906073c2b3c 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c @@ -132,25 +132,22 @@ static int defer_packet_queue( struct hfi1_user_sdma_pkt_q *pq = container_of(wait, struct hfi1_user_sdma_pkt_q, busy); struct hfi1_ibdev *dev = &pq->dd->verbs_dev; - struct user_sdma_txreq *tx = - container_of(txreq, struct user_sdma_txreq, txreq); - if (sdma_progress(sde, seq, txreq)) { - if (tx->busycount++ < MAX_DEFER_RETRY_COUNT) - goto eagain; - } + write_seqlock(&dev->iowait_lock); + if (sdma_progress(sde, seq, txreq)) + goto eagain; /* * We are assuming that if the list is enqueued somewhere, it * is to the dmawait list since that is the only place where * it is supposed to be enqueued. */ xchg(&pq->state, SDMA_PKT_Q_DEFERRED); - write_seqlock(&dev->iowait_lock); if (list_empty(&pq->busy.list)) iowait_queue(pkts_sent, &pq->busy, &sde->dmawait); write_sequnlock(&dev->iowait_lock); return -EBUSY; eagain: + write_sequnlock(&dev->iowait_lock); return -EAGAIN; } @@ -187,7 +184,6 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, pq->ctxt = uctxt->ctxt; pq->subctxt = fd->subctxt; pq->n_max_reqs = hfi1_sdma_comp_ring_size; - pq->state = SDMA_PKT_Q_INACTIVE; atomic_set(&pq->n_reqs, 0); init_waitqueue_head(&pq->wait); atomic_set(&pq->n_locked, 0); @@ -276,7 +272,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd, /* Wait until all requests have been freed. */ wait_event_interruptible( pq->wait, - (READ_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE)); + !atomic_read(&pq->n_reqs)); kfree(pq->reqs); kfree(pq->req_in_use); kmem_cache_destroy(pq->txreq_cache); @@ -312,6 +308,13 @@ static u8 dlid_to_selector(u16 dlid) return mapping[hash]; } +/** + * hfi1_user_sdma_process_request() - Process and start a user sdma request + * @fd: valid file descriptor + * @iovec: array of io vectors to process + * @dim: overall iovec array size + * @count: number of io vector array entries processed + */ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, struct iovec *iovec, unsigned long dim, unsigned long *count) @@ -328,7 +331,6 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, u8 opcode, sc, vl; u16 pkey; u32 slid; - int req_queued = 0; u16 dlid; u32 selector; @@ -392,7 +394,6 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, req->data_len = 0; req->pq = pq; req->cq = cq; - req->status = -1; req->ahg_idx = -1; req->iov_idx = 0; req->sent = 0; @@ -400,12 +401,14 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, req->seqcomp = 0; req->seqsubmitted = 0; req->tids = NULL; - req->done = 0; req->has_error = 0; INIT_LIST_HEAD(&req->txps); memcpy(&req->info, &info, sizeof(info)); + /* The request is initialized, count it */ + atomic_inc(&pq->n_reqs); + if (req_opcode(info.ctrl) == EXPECTED) { /* expected must have a TID info and at least one data vector */ if (req->data_iovs < 2) { @@ -500,7 +503,6 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, ret = pin_vector_pages(req, &req->iovs[i]); if (ret) { req->data_iovs = i; - req->status = ret; goto free_req; } req->data_len += req->iovs[i].iov.iov_len; @@ -561,23 +563,11 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, req->ahg_idx = sdma_ahg_alloc(req->sde); set_comp_state(pq, cq, info.comp_idx, QUEUED, 0); - atomic_inc(&pq->n_reqs); - req_queued = 1; + pq->state = SDMA_PKT_Q_ACTIVE; /* Send the first N packets in the request to buy us some time */ ret = user_sdma_send_pkts(req, pcount); - if (unlikely(ret < 0 && ret != -EBUSY)) { - req->status = ret; + if (unlikely(ret < 0 && ret != -EBUSY)) goto free_req; - } - - /* - * It is possible that the SDMA engine would have processed all the - * submitted packets by the time we get here. Therefore, only set - * packet queue state to ACTIVE if there are still uncompleted - * requests. - */ - if (atomic_read(&pq->n_reqs)) - xchg(&pq->state, SDMA_PKT_Q_ACTIVE); /* * This is a somewhat blocking send implementation. @@ -588,14 +578,8 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, while (req->seqsubmitted != req->info.npkts) { ret = user_sdma_send_pkts(req, pcount); if (ret < 0) { - if (ret != -EBUSY) { - req->status = ret; - WRITE_ONCE(req->has_error, 1); - if (READ_ONCE(req->seqcomp) == - req->seqsubmitted - 1) - goto free_req; - return ret; - } + if (ret != -EBUSY) + goto free_req; wait_event_interruptible_timeout( pq->busy.wait_dma, (pq->state == SDMA_PKT_Q_ACTIVE), @@ -606,10 +590,19 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, *count += idx; return 0; free_req: - user_sdma_free_request(req, true); - if (req_queued) + /* + * If the submitted seqsubmitted == npkts, the completion routine + * controls the final state. If sequbmitted < npkts, wait for any + * outstanding packets to finish before cleaning up. + */ + if (req->seqsubmitted < req->info.npkts) { + if (req->seqsubmitted) + wait_event(pq->busy.wait_dma, + (req->seqcomp == req->seqsubmitted - 1)); + user_sdma_free_request(req, true); pq_update(pq); - set_comp_state(pq, cq, info.comp_idx, ERROR, req->status); + set_comp_state(pq, cq, info.comp_idx, ERROR, ret); + } return ret; } @@ -807,7 +800,6 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts) tx->flags = 0; tx->req = req; - tx->busycount = 0; INIT_LIST_HEAD(&tx->list); /* @@ -864,8 +856,10 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts) changes = set_txreq_header_ahg(req, tx, datalen); - if (changes < 0) + if (changes < 0) { + ret = changes; goto free_tx; + } } } else { ret = sdma_txinit(&tx->txreq, 0, sizeof(req->hdr) + @@ -917,7 +911,6 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts) ret = sdma_send_txlist(req->sde, &pq->busy, &req->txps, &count); req->seqsubmitted += count; if (req->seqsubmitted == req->info.npkts) { - WRITE_ONCE(req->done, 1); /* * The txreq has already been submitted to the HW queue * so we can free the AHG entry now. Corruption will not @@ -1365,11 +1358,15 @@ static int set_txreq_header_ahg(struct user_sdma_request *req, return idx; } -/* - * SDMA tx request completion callback. Called when the SDMA progress - * state machine gets notification that the SDMA descriptors for this - * tx request have been processed by the DMA engine. Called in - * interrupt context. +/** + * user_sdma_txreq_cb() - SDMA tx request completion callback. + * @txreq: valid sdma tx request + * @status: success/failure of request + * + * Called when the SDMA progress state machine gets notification that + * the SDMA descriptors for this tx request have been processed by the + * DMA engine. Called in interrupt context. + * Only do work on completed sequences. */ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status) { @@ -1378,7 +1375,7 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status) struct user_sdma_request *req; struct hfi1_user_sdma_pkt_q *pq; struct hfi1_user_sdma_comp_q *cq; - u16 idx; + enum hfi1_sdma_comp_state state = COMPLETE; if (!tx->req) return; @@ -1391,39 +1388,25 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status) SDMA_DBG(req, "SDMA completion with error %d", status); WRITE_ONCE(req->has_error, 1); + state = ERROR; } req->seqcomp = tx->seqnum; kmem_cache_free(pq->txreq_cache, tx); - tx = NULL; - - idx = req->info.comp_idx; - if (req->status == -1 && status == SDMA_TXREQ_S_OK) { - if (req->seqcomp == req->info.npkts - 1) { - req->status = 0; - user_sdma_free_request(req, false); - pq_update(pq); - set_comp_state(pq, cq, idx, COMPLETE, 0); - } - } else { - if (status != SDMA_TXREQ_S_OK) - req->status = status; - if (req->seqcomp == (READ_ONCE(req->seqsubmitted) - 1) && - (READ_ONCE(req->done) || - READ_ONCE(req->has_error))) { - user_sdma_free_request(req, false); - pq_update(pq); - set_comp_state(pq, cq, idx, ERROR, req->status); - } - } + + /* sequence isn't complete? We are done */ + if (req->seqcomp != req->info.npkts - 1) + return; + + user_sdma_free_request(req, false); + set_comp_state(pq, cq, req->info.comp_idx, state, status); + pq_update(pq); } static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq) { - if (atomic_dec_and_test(&pq->n_reqs)) { - xchg(&pq->state, SDMA_PKT_Q_INACTIVE); + if (atomic_dec_and_test(&pq->n_reqs)) wake_up(&pq->wait); - } } static void user_sdma_free_request(struct user_sdma_request *req, bool unpin) @@ -1448,6 +1431,8 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin) if (!node) continue; + req->iovs[i].node = NULL; + if (unpin) hfi1_mmu_rb_remove(req->pq->handler, &node->rb); diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h index d2bc77f75253f3f775002a172ef20ddbd3a05d06..2c056702d9752c43d007a35e70c1052cfa82298b 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.h +++ b/drivers/infiniband/hw/hfi1/user_sdma.h @@ -105,9 +105,10 @@ static inline int ahg_header_set(u32 *arr, int idx, size_t array_size, #define TXREQ_FLAGS_REQ_ACK BIT(0) /* Set the ACK bit in the header */ #define TXREQ_FLAGS_REQ_DISABLE_SH BIT(1) /* Disable header suppression */ -#define SDMA_PKT_Q_INACTIVE BIT(0) -#define SDMA_PKT_Q_ACTIVE BIT(1) -#define SDMA_PKT_Q_DEFERRED BIT(2) +enum pkt_q_sdma_state { + SDMA_PKT_Q_ACTIVE, + SDMA_PKT_Q_DEFERRED, +}; /* * Maximum retry attempts to submit a TX request @@ -133,7 +134,7 @@ struct hfi1_user_sdma_pkt_q { struct user_sdma_request *reqs; unsigned long *req_in_use; struct iowait busy; - unsigned state; + enum pkt_q_sdma_state state; wait_queue_head_t wait; unsigned long unpinned; struct mmu_rb_handler *handler; @@ -205,8 +206,6 @@ struct user_sdma_request { /* Writeable fields shared with interrupt */ u64 seqcomp ____cacheline_aligned_in_smp; u64 seqsubmitted; - /* status of the last txreq completed */ - int status; /* Send side fields */ struct list_head txps ____cacheline_aligned_in_smp; @@ -228,7 +227,6 @@ struct user_sdma_request { u16 tididx; /* progress index moving along the iovs array */ u8 iov_idx; - u8 done; u8 has_error; struct user_sdma_iovec iovs[MAX_VECTORS_PER_REQ]; @@ -247,7 +245,6 @@ struct user_sdma_txreq { struct list_head list; struct user_sdma_request *req; u16 flags; - unsigned int busycount; u64 seqnum; }; diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index a7c586a5589d642524f7e659c6a4dfa512eed4e2..4e7b3c027901bd21fec925ef029cb471a1cc887c 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c @@ -54,6 +54,7 @@ #include #include #include +#include #include "hfi.h" #include "common.h" @@ -148,9 +149,6 @@ static int pio_wait(struct rvt_qp *qp, /* Length of buffer to create verbs txreq cache name */ #define TXREQ_NAME_LEN 24 -/* 16B trailing buffer */ -static const u8 trail_buf[MAX_16B_PADDING]; - static uint wss_threshold; module_param(wss_threshold, uint, S_IRUGO); MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy"); @@ -892,8 +890,8 @@ static int build_verbs_tx_desc( /* add icrc, lt byte, and padding to flit */ if (extra_bytes) - ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq, - (void *)trail_buf, extra_bytes); + ret = sdma_txadd_daddr(sde->dd, &tx->txreq, + sde->dd->sdma_pad_phys, extra_bytes); bail_txadd: return ret; @@ -1141,6 +1139,8 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, if (slen > len) slen = len; + if (slen > ss->sge.sge_length) + slen = ss->sge.sge_length; rvt_update_sge(ss, slen, false); seg_pio_copy_mid(pbuf, addr, slen); len -= slen; @@ -1148,7 +1148,8 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, } /* add icrc, lt byte, and padding to flit */ if (extra_bytes) - seg_pio_copy_mid(pbuf, trail_buf, extra_bytes); + seg_pio_copy_mid(pbuf, ppd->dd->sdma_pad_dma, + extra_bytes); seg_pio_copy_end(pbuf); } @@ -1416,8 +1417,6 @@ static void hfi1_fill_device_attr(struct hfi1_devdata *dd) rdi->dparms.props.max_cq = hfi1_max_cqs; rdi->dparms.props.max_ah = hfi1_max_ahs; rdi->dparms.props.max_cqe = hfi1_max_cqes; - rdi->dparms.props.max_mr = rdi->lkey_table.max; - rdi->dparms.props.max_fmr = rdi->lkey_table.max; rdi->dparms.props.max_map_per_fmr = 32767; rdi->dparms.props.max_pd = hfi1_max_pds; rdi->dparms.props.max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC; @@ -1596,6 +1595,7 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr) sl = rdma_ah_get_sl(ah_attr); if (sl >= ARRAY_SIZE(ibp->sl_to_sc)) return -EINVAL; + sl = array_index_nospec(sl, ARRAY_SIZE(ibp->sl_to_sc)); sc5 = ibp->sl_to_sc[sl]; if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf) @@ -1701,7 +1701,7 @@ static const char * const driver_cntr_names[] = { static DEFINE_MUTEX(cntr_names_lock); /* protects the *_cntr_names bufers */ static const char **dev_cntr_names; static const char **port_cntr_names; -static int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names); +int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names); static int num_dev_cntrs; static int num_port_cntrs; static int cntr_names_initialized; diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c index c4ab2d5b4502ee1e905ef2c193495f56e479eaf9..8f766dd3f61c894e83ee6cdf7885fddab1c16125 100644 --- a/drivers/infiniband/hw/hfi1/verbs_txreq.c +++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c @@ -100,7 +100,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { struct hfi1_qp_priv *priv; - tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC); + tx = kmem_cache_alloc(dev->verbs_txreq_cache, VERBS_TXREQ_GFP); if (tx) goto out; priv = qp->priv; diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h index 1c19bbc764b2d6f93134fe7775f55569d7b70b84..b1a78985b4ecad193f2f5e145ae2763414206440 100644 --- a/drivers/infiniband/hw/hfi1/verbs_txreq.h +++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h @@ -72,6 +72,7 @@ struct hfi1_ibdev; struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, struct rvt_qp *qp); +#define VERBS_TXREQ_GFP (GFP_ATOMIC | __GFP_NOWARN) static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, struct rvt_qp *qp) __must_hold(&qp->slock) @@ -79,7 +80,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, struct verbs_txreq *tx; struct hfi1_qp_priv *priv = qp->priv; - tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC); + tx = kmem_cache_alloc(dev->verbs_txreq_cache, VERBS_TXREQ_GFP); if (unlikely(!tx)) { /* call slow path to get the lock */ tx = __get_txreq(dev, qp); diff --git a/drivers/infiniband/hw/hfi1/vnic_sdma.c b/drivers/infiniband/hw/hfi1/vnic_sdma.c index c3c96c5869ed454680b619caef88a3fd369d5579..718dcdef946eeecd852a903cb89de1a7ae642c6b 100644 --- a/drivers/infiniband/hw/hfi1/vnic_sdma.c +++ b/drivers/infiniband/hw/hfi1/vnic_sdma.c @@ -57,7 +57,6 @@ #define HFI1_VNIC_TXREQ_NAME_LEN 32 #define HFI1_VNIC_SDMA_DESC_WTRMRK 64 -#define HFI1_VNIC_SDMA_RETRY_COUNT 1 /* * struct vnic_txreq - VNIC transmit descriptor @@ -67,7 +66,6 @@ * @pad: pad buffer * @plen: pad length * @pbc_val: pbc value - * @retry_count: tx retry count */ struct vnic_txreq { struct sdma_txreq txreq; @@ -77,8 +75,6 @@ struct vnic_txreq { unsigned char pad[HFI1_VNIC_MAX_PAD]; u16 plen; __le64 pbc_val; - - u32 retry_count; }; static void vnic_sdma_complete(struct sdma_txreq *txreq, @@ -196,7 +192,6 @@ int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx, ret = build_vnic_tx_desc(sde, tx, pbc); if (unlikely(ret)) goto free_desc; - tx->retry_count = 0; ret = sdma_send_txreq(sde, &vnic_sdma->wait, &tx->txreq, vnic_sdma->pkts_sent); @@ -238,14 +233,14 @@ static int hfi1_vnic_sdma_sleep(struct sdma_engine *sde, struct hfi1_vnic_sdma *vnic_sdma = container_of(wait, struct hfi1_vnic_sdma, wait); struct hfi1_ibdev *dev = &vnic_sdma->dd->verbs_dev; - struct vnic_txreq *tx = container_of(txreq, struct vnic_txreq, txreq); - if (sdma_progress(sde, seq, txreq)) - if (tx->retry_count++ < HFI1_VNIC_SDMA_RETRY_COUNT) - return -EAGAIN; + write_seqlock(&dev->iowait_lock); + if (sdma_progress(sde, seq, txreq)) { + write_sequnlock(&dev->iowait_lock); + return -EAGAIN; + } vnic_sdma->state = HFI1_VNIC_SDMA_Q_DEFERRED; - write_seqlock(&dev->iowait_lock); if (list_empty(&vnic_sdma->wait.list)) iowait_queue(pkts_sent, wait, &sde->dmawait); write_sequnlock(&dev->iowait_lock); diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig index fddb5fdf92de86b94afaa63f877698a5cb97ed9e..21daca203947f80da756add0e7f147a56b5f37bf 100644 --- a/drivers/infiniband/hw/hns/Kconfig +++ b/drivers/infiniband/hw/hns/Kconfig @@ -1,6 +1,7 @@ config INFINIBAND_HNS tristate "HNS RoCE Driver" depends on NET_VENDOR_HISILICON + depends on INFINIBAND_USER_ACCESS || !INFINIBAND_USER_ACCESS depends on ARM64 || (COMPILE_TEST && 64BIT) ---help--- This is a RoCE/RDMA driver for the Hisilicon RoCE engine. The engine @@ -29,3 +30,22 @@ config INFINIBAND_HNS_HIP08 To compile this driver as a module, choose M here: the module will be called hns-roce-hw-v2. + +config INFINIBAND_HNS_DFX + tristate "Hisilicon Hip08 Family RoCE DFX support" + depends on INFINIBAND_HNS_HIP08 + help + RoCE DFX driver support for Hisilicon RoCE engine in Hisilicon Hip08 + SoC. + + To compile this driver as a module, choose M here: the module + will be called hns-roce-cae. + +config INFINIBAND_HNS_TEST + tristate "Hisilicon Hip08 Family RoCE test support" + depends on INFINIBAND_HNS_HIP08 + help + This option provides the ability to adjust some parameters of roce + driver for debugging. + + To turn this feature on, choose M here. diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile index cf03404b9d5865dd08f179442a0ebd97a60652f7..2a499e8ea0079ccf814807f1bacde3483a029819 100644 --- a/drivers/infiniband/hw/hns/Makefile +++ b/drivers/infiniband/hw/hns/Makefile @@ -4,11 +4,22 @@ ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 +PWD = $(srctree)/drivers/infiniband/hw/hns/ +ccflags-y += -I$(PWD) \ + -I$(PWD)/roce-customer/ +ifeq ($(CONFIG_INFINIBAND_HNS_DFX), m) + ccflags-y += -DCONFIG_INFINIBAND_HNS_DFX +endif + obj-$(CONFIG_INFINIBAND_HNS) += hns-roce.o hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \ hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \ - hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o + hns_roce_sysfs.o \ + hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o \ + roce-customer/rdfx_intf.o roce-customer/rdfx_entry.o obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o hns-roce-hw-v1-objs := hns_roce_hw_v1.o obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o -hns-roce-hw-v2-objs := hns_roce_hw_v2.o +hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o hns_roce_hw_sysfs_v2.o +obj-$(CONFIG_INFINIBAND_HNS_DFX) += hns-roce-cae.o +hns-roce-cae-$(CONFIG_INFINIBAND_HNS_DFX) := roce-customer/rdfx_sysfs.o roce-customer/rdfx_hw_v2.o roce-customer/rdfx_main.o diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c index 0d96c5bb38cdf5bc066a1caf8bb0f2e28d1a7892..82e64d193cd679cabb475c8024f35f5e6dbbe9a4 100644 --- a/drivers/infiniband/hw/hns/hns_roce_ah.c +++ b/drivers/infiniband/hw/hns/hns_roce_ah.c @@ -29,6 +29,7 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ +#include "roce_k_compat.h" #include #include @@ -43,43 +44,88 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *ah_attr, struct ib_udata *udata) { - struct hns_roce_dev *hr_dev = to_hr_dev(ibpd->device); +#ifdef CONFIG_KERNEL_419 const struct ib_gid_attr *gid_attr; +#else + struct hns_roce_dev *hr_dev = to_hr_dev(ibpd->device); struct device *dev = hr_dev->dev; + struct ib_gid_attr gid_attr; + union ib_gid sgid; + int ret; +#endif struct hns_roce_ah *ah; u16 vlan_tag = 0xffff; + struct in6_addr in6; const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr); + u8 vlan_en = 0; ah = kzalloc(sizeof(*ah), GFP_ATOMIC); if (!ah) return ERR_PTR(-ENOMEM); - /* Get mac address */ - memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN); - - gid_attr = ah_attr->grh.sgid_attr; - if (is_vlan_dev(gid_attr->ndev)) - vlan_tag = vlan_dev_vlan_id(gid_attr->ndev); - - if (vlan_tag < 0x1000) - vlan_tag |= (rdma_ah_get_sl(ah_attr) & - HNS_ROCE_VLAN_SL_BIT_MASK) << - HNS_ROCE_VLAN_SL_SHIFT; - - ah->av.port_pd = cpu_to_be32(to_hr_pd(ibpd)->pdn | - (rdma_ah_get_port_num(ah_attr) << - HNS_ROCE_PORT_NUM_SHIFT)); - ah->av.gid_index = grh->sgid_index; - ah->av.vlan = cpu_to_le16(vlan_tag); - dev_dbg(dev, "gid_index = 0x%x,vlan = 0x%x\n", ah->av.gid_index, - ah->av.vlan); - - if (rdma_ah_get_static_rate(ah_attr)) - ah->av.stat_rate = IB_RATE_10_GBPS; - - memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE); - ah->av.sl_tclass_flowlabel = cpu_to_le32(rdma_ah_get_sl(ah_attr) << - HNS_ROCE_SL_SHIFT); + if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) { + /* Get mac address */ + memcpy(&in6, grh->dgid.raw, sizeof(grh->dgid.raw)); + if (rdma_is_multicast_addr(&in6)) { + rdma_get_mcast_mac(&in6, ah->av.mac); + } else { + u8 *dmac = rdma_ah_retrieve_dmac(ah_attr); + + if (!dmac) { + kfree(ah); + return ERR_PTR(-EINVAL); + } + memcpy(ah->av.mac, dmac, ETH_ALEN); + } + +#ifdef CONFIG_KERNEL_419 + gid_attr = ah_attr->grh.sgid_attr; + if (is_vlan_dev(gid_attr->ndev)) { + vlan_tag = vlan_dev_vlan_id(gid_attr->ndev); + vlan_en = 1; + } +#else + /* Get source gid */ + ret = ib_get_cached_gid(ibpd->device, + rdma_ah_get_port_num(ah_attr), + grh->sgid_index, &sgid, &gid_attr); + if (ret) { + dev_err(dev, "Get index %u of sgid on port %u failed(%d)!\n", + grh->sgid_index, rdma_ah_get_port_num(ah_attr), + ret); + kfree(ah); + return ERR_PTR(ret); + } + + if (gid_attr.ndev) { + if (is_vlan_dev(gid_attr.ndev)) { + vlan_tag = vlan_dev_vlan_id(gid_attr.ndev); + vlan_en = 1; + } + dev_put(gid_attr.ndev); + } +#endif + + if (vlan_tag < 0x1000) + vlan_tag |= (rdma_ah_get_sl(ah_attr) & + HNS_ROCE_VLAN_SL_BIT_MASK) << + HNS_ROCE_VLAN_SL_SHIFT; + + ah->av.port = (rdma_ah_get_port_num(ah_attr) << + HNS_ROCE_PORT_NUM_SHIFT); + ah->av.gid_index = grh->sgid_index; + ah->av.vlan = vlan_tag; + ah->av.vlan_en = vlan_en; + + if (rdma_ah_get_static_rate(ah_attr)) + ah->av.stat_rate = IB_RATE_10_GBPS; + + memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE); + ah->av.flowlabel = grh->flow_label; + ah->av.sl = rdma_ah_get_sl(ah_attr); + ah->av.tclass = grh->traffic_class; + ah->av.hop_limit = grh->hop_limit; + } return &ah->ibah; } @@ -88,19 +134,15 @@ int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr) { struct hns_roce_ah *ah = to_hr_ah(ibah); + rdfx_func_cnt(to_hr_dev(ibah->device), RDFX_FUNC_QUERY_AH); + memset(ah_attr, 0, sizeof(*ah_attr)); - rdma_ah_set_sl(ah_attr, (le32_to_cpu(ah->av.sl_tclass_flowlabel) >> - HNS_ROCE_SL_SHIFT)); - rdma_ah_set_port_num(ah_attr, (le32_to_cpu(ah->av.port_pd) >> - HNS_ROCE_PORT_NUM_SHIFT)); + rdma_ah_set_sl(ah_attr, ah->av.sl); + rdma_ah_set_port_num(ah_attr, ah->av.port); rdma_ah_set_static_rate(ah_attr, ah->av.stat_rate); - rdma_ah_set_grh(ah_attr, NULL, - (le32_to_cpu(ah->av.sl_tclass_flowlabel) & - HNS_ROCE_FLOW_LABEL_MASK), ah->av.gid_index, - ah->av.hop_limit, - (le32_to_cpu(ah->av.sl_tclass_flowlabel) >> - HNS_ROCE_TCLASS_SHIFT)); + rdma_ah_set_grh(ah_attr, NULL, ah->av.flowlabel, ah->av.gid_index, + ah->av.hop_limit, ah->av.tclass); rdma_ah_set_dgid_raw(ah_attr, ah->av.dgid); return 0; @@ -108,6 +150,8 @@ int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr) int hns_roce_destroy_ah(struct ib_ah *ah) { + rdfx_func_cnt(to_hr_dev(ah->device), RDFX_FUNC_DESTROY_AH); + kfree(to_hr_ah(ah)); return 0; diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c index 46f65f9f59d0a654a3ff137dea657387c948eecf..595e8e6663e5b78b50149cc73a81dba2ac953729 100644 --- a/drivers/infiniband/hw/hns/hns_roce_alloc.c +++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c @@ -30,10 +30,11 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ +#include "roce_k_compat.h" #include -#include #include "hns_roce_device.h" +#include int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj) { @@ -61,6 +62,7 @@ int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj) return ret; } +EXPORT_SYMBOL_GPL(hns_roce_bitmap_alloc); void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj, int rr) @@ -112,16 +114,15 @@ void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap, unsigned long obj, int cnt, int rr) { + unsigned long base = obj & (bitmap->max + bitmap->reserved_top - 1); int i; - obj &= bitmap->max + bitmap->reserved_top - 1; - spin_lock(&bitmap->lock); for (i = 0; i < cnt; i++) - clear_bit(obj + i, bitmap->table); + clear_bit(base + i, bitmap->table); if (!rr) - bitmap->last = min(bitmap->last, obj); + bitmap->last = min(bitmap->last, base); bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) & bitmap->mask; spin_unlock(&bitmap->lock); @@ -157,91 +158,156 @@ void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap) kfree(bitmap->table); } -void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size, - struct hns_roce_buf *buf) +void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf) { - int i; - struct device *dev = hr_dev->dev; + struct hns_roce_buf_list *trunks; + u32 i; - if (buf->nbufs == 1) { - dma_free_coherent(dev, size, buf->direct.buf, buf->direct.map); - } else { - for (i = 0; i < buf->nbufs; ++i) - if (buf->page_list[i].buf) - dma_free_coherent(dev, 1 << buf->page_shift, - buf->page_list[i].buf, - buf->page_list[i].map); - kfree(buf->page_list); + if (!buf) + return; + + trunks = buf->trunk_list; + if (trunks) { + buf->trunk_list = NULL; + for (i = 0; i < buf->ntrunks; i++) + dma_free_coherent(hr_dev->dev, 1 << buf->trunk_shift, + trunks[i].buf, trunks[i].map); + + kfree(trunks); } + + kfree(buf); } EXPORT_SYMBOL_GPL(hns_roce_buf_free); -int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct, - struct hns_roce_buf *buf, u32 page_shift) +/* + * Allocate the dma buffer for storing ROCEE table entries + * + * @size: required size + * @page_shift: the unit size in a continuous dma address range + * @flags: HNS_ROCE_BUF_ flags to control the allocation flow. + */ +struct hns_roce_buf *hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, + u32 page_shift, u32 flags) { - int i = 0; - dma_addr_t t; - struct device *dev = hr_dev->dev; - u32 page_size = 1 << page_shift; - u32 order; - - /* SQ/RQ buf lease than one page, SQ + RQ = 8K */ - if (size <= max_direct) { - buf->nbufs = 1; - /* Npages calculated by page_size */ - order = get_order(size); - if (order <= page_shift - PAGE_SHIFT) - order = 0; - else - order -= page_shift - PAGE_SHIFT; - buf->npages = 1 << order; - buf->page_shift = page_shift; - /* MTT PA must be recorded in 4k alignment, t is 4k aligned */ - buf->direct.buf = dma_zalloc_coherent(dev, - size, &t, GFP_KERNEL); - if (!buf->direct.buf) - return -ENOMEM; - - buf->direct.map = t; - - while (t & ((1 << buf->page_shift) - 1)) { - --buf->page_shift; - buf->npages *= 2; - } + u32 trunk_size, page_size, alloced_size; + struct hns_roce_buf_list *trunks; + struct hns_roce_buf *buf; + gfp_t gfp_flags; + u32 ntrunk, i; + + /* The minimum shift of the page accessed by hw is HNS_HW_PAGE_SHIFT */ + if (WARN_ON(page_shift < HNS_HW_PAGE_SHIFT)) + return ERR_PTR(-EINVAL); + + gfp_flags = (flags & HNS_ROCE_BUF_NOSLEEP) ? GFP_ATOMIC : GFP_KERNEL; + buf = kzalloc(sizeof(*buf), gfp_flags); + if (!buf) + return ERR_PTR(-ENOMEM); + + buf->page_shift = page_shift; + page_size = 1 << buf->page_shift; + + /* Calc the trunk size and num by required size and page_shift */ + if (flags & HNS_ROCE_BUF_DIRECT) { + buf->trunk_shift = order_base_2(ALIGN(size, PAGE_SIZE)); + ntrunk = 1; } else { - buf->nbufs = (size + page_size - 1) / page_size; - buf->npages = buf->nbufs; - buf->page_shift = page_shift; - buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), - GFP_KERNEL); + buf->trunk_shift = order_base_2(ALIGN(page_size, PAGE_SIZE)); + ntrunk = DIV_ROUND_UP(size, 1 << buf->trunk_shift); + } + + trunks = kcalloc(ntrunk, sizeof(*trunks), gfp_flags); + if (!trunks) { + kfree(buf); + return ERR_PTR(-ENOMEM); + } + + trunk_size = 1 << buf->trunk_shift; + alloced_size = 0; + for (i = 0; i < ntrunk; i++) { + trunks[i].buf = dma_alloc_coherent(hr_dev->dev, trunk_size, + &trunks[i].map, gfp_flags); + if (!trunks[i].buf) + break; - if (!buf->page_list) - return -ENOMEM; + alloced_size += trunk_size; + } - for (i = 0; i < buf->nbufs; ++i) { - buf->page_list[i].buf = dma_zalloc_coherent(dev, - page_size, &t, - GFP_KERNEL); + buf->ntrunks = i; - if (!buf->page_list[i].buf) - goto err_free; + /* In nofail mode, it's only failed when the alloced size is 0 */ + if ((flags & HNS_ROCE_BUF_NOFAIL) ? i == 0 : i != ntrunk) { + for (i = 0; i < buf->ntrunks; i++) + dma_free_coherent(hr_dev->dev, trunk_size, + trunks[i].buf, trunks[i].map); - buf->page_list[i].map = t; - } + kfree(trunks); + kfree(buf); + return ERR_PTR(-ENOMEM); } - return 0; + buf->npages = DIV_ROUND_UP(alloced_size, page_size); + buf->trunk_list = trunks; + + return buf; +} +EXPORT_SYMBOL_GPL(hns_roce_buf_alloc); + +int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs, + int buf_cnt, struct hns_roce_buf *buf, + unsigned int page_shift) +{ + unsigned int offset, max_size; + int total = 0; + int i; + + if (page_shift > buf->trunk_shift) { + dev_err(hr_dev->dev, "failed to check kmem buf shift %u > %u\n", + page_shift, buf->trunk_shift); + return -EINVAL; + } + + offset = 0; + max_size = buf->ntrunks << buf->trunk_shift; + for (i = 0; i < buf_cnt && offset < max_size; i++) { + bufs[total++] = hns_roce_buf_dma_addr(buf, offset); + offset += (1 << page_shift); + } + + return total; +} +EXPORT_SYMBOL_GPL(hns_roce_get_kmem_bufs); + +int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs, + int buf_cnt, struct ib_umem *umem, + unsigned int page_shift) +{ + struct ib_block_iter biter; + int total = 0; + + /* convert system page cnt to hw page cnt */ + rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap, + 1 << page_shift) { + bufs[total++] = rdma_block_iter_dma_address(&biter); + if (total >= buf_cnt) + goto done; + } -err_free: - hns_roce_buf_free(hr_dev, size, buf); - return -ENOMEM; +done: + return total; } +EXPORT_SYMBOL_GPL(hns_roce_get_umem_bufs); void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev) { + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) + hns_roce_cleanup_srq_table(hr_dev); hns_roce_cleanup_qp_table(hr_dev); hns_roce_cleanup_cq_table(hr_dev); hns_roce_cleanup_mr_table(hr_dev); + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) + hns_roce_cleanup_xrcd_table(hr_dev); hns_roce_cleanup_pd_table(hr_dev); hns_roce_cleanup_uar_table(hr_dev); } diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c index a0ba19d4a10ece8947a467dd81af9033152e3277..dc2c4ecb6818fc730502fb85e40bf5b20fef7e58 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cmd.c +++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c @@ -32,7 +32,6 @@ #include #include -#include "hns_roce_common.h" #include "hns_roce_device.h" #include "hns_roce_cmd.h" @@ -69,7 +68,8 @@ static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param, in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0); if (ret) { - dev_err(dev, "[cmd_poll]hns_roce_cmd_mbox_post_hw failed\n"); + dev_err(dev, "[cmd_poll]hns_roce_cmd_mbox_post_hw failed(%d).\n", + ret); return ret; } @@ -138,14 +138,15 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param, */ if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) { - dev_err(dev, "[cmd]wait_for_completion_timeout timeout\n"); + dev_err(dev, "[cmd_wait]wait_for_completion_timeout timeout.\n"); ret = -EBUSY; goto out; } ret = context->result; if (ret) { - dev_err(dev, "[cmd]event mod cmd process error!err=%d\n", ret); + dev_err(dev, "[cmd_wait]event mod cmd process error(%d)!\n", + ret); goto out; } @@ -162,7 +163,7 @@ static int hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param, unsigned long in_modifier, u8 op_modifier, u16 op, unsigned long timeout) { - int ret = 0; + int ret; down(&hr_dev->cmd.event_sem); ret = __hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param, @@ -176,17 +177,33 @@ int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param, unsigned long in_modifier, u8 op_modifier, u16 op, unsigned long timeout) { - if (hr_dev->is_reset) - return 0; + int ret; + + if (hr_dev->hw->rst_prc_mbox) { + ret = hr_dev->hw->rst_prc_mbox(hr_dev); + if (ret == CMD_RST_PRC_SUCCESS) + return 0; + else if (ret == CMD_RST_PRC_EBUSY) + return -EBUSY; + } if (hr_dev->cmd.use_events) - return hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param, - in_modifier, op_modifier, op, - timeout); + ret = hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param, + in_modifier, op_modifier, op, + timeout); else - return hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param, - in_modifier, op_modifier, op, - timeout); + ret = hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param, + in_modifier, op_modifier, op, + timeout); + + if (ret == CMD_RST_PRC_EBUSY) + return -EBUSY; + + if (ret && (hr_dev->hw->rst_prc_mbox && + hr_dev->hw->rst_prc_mbox(hr_dev) == CMD_RST_PRC_SUCCESS)) + return 0; + + return ret; } EXPORT_SYMBOL_GPL(hns_roce_cmd_mbox); @@ -238,23 +255,17 @@ int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev) hr_cmd->token_mask = CMD_TOKEN_MASK; hr_cmd->use_events = 1; - down(&hr_cmd->poll_sem); - return 0; } void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev) { struct hns_roce_cmdq *hr_cmd = &hr_dev->cmd; - int i; - hr_cmd->use_events = 0; - - for (i = 0; i < hr_cmd->max_cmds; ++i) - down(&hr_cmd->event_sem); - - kfree(hr_cmd->context); - up(&hr_cmd->poll_sem); + if (hr_cmd->use_events) { + kfree(hr_cmd->context); + hr_cmd->use_events = 0; + } } struct hns_roce_cmd_mailbox diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.h b/drivers/infiniband/hw/hns/hns_roce_cmd.h index 9549ae51a0dd5dc009b4aa2119b444d158c0af9b..45c91301ff39458bc4bed5d9ebf00cd9aaa2ce4e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cmd.h +++ b/drivers/infiniband/hw/hns/hns_roce_cmd.h @@ -53,6 +53,7 @@ enum { HNS_ROCE_CMD_QUERY_QPC = 0x42, HNS_ROCE_CMD_MODIFY_CQC = 0x52, + HNS_ROCE_CMD_QUERY_CQC = 0x53, /* CQC BT commands */ HNS_ROCE_CMD_WRITE_CQC_BT0 = 0x10, HNS_ROCE_CMD_WRITE_CQC_BT1 = 0x11, @@ -89,6 +90,18 @@ enum { HNS_ROCE_CMD_DESTROY_SRQC_BT1 = 0x39, HNS_ROCE_CMD_DESTROY_SRQC_BT2 = 0x3a, + /* CTX BT commands */ + HNS_ROCE_CMD_READ_SCC_CTX_BT0 = 0xa4, + HNS_ROCE_CMD_WRITE_SCC_CTX_BT0 = 0xa5, + + /* QPC TIMER commands */ + HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0 = 0x33, + HNS_ROCE_CMD_READ_QPC_TIMER_BT0 = 0x37, + + /* CQC TIMER commands */ + HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0 = 0x23, + HNS_ROCE_CMD_READ_CQC_TIMER_BT0 = 0x27, + /* EQC commands */ HNS_ROCE_CMD_CREATE_AEQC = 0x80, HNS_ROCE_CMD_MODIFY_AEQC = 0x81, @@ -102,12 +115,12 @@ enum { enum { /* TPT commands */ - HNS_ROCE_CMD_SW2HW_MPT = 0xd, - HNS_ROCE_CMD_HW2SW_MPT = 0xf, + HNS_ROCE_CMD_CREATE_MPT = 0xd, + HNS_ROCE_CMD_DESTROY_MPT = 0xf, /* CQ commands */ - HNS_ROCE_CMD_SW2HW_CQ = 0x16, - HNS_ROCE_CMD_HW2SW_CQ = 0x17, + HNS_ROCE_CMD_CREATE_CQ = 0x16, + HNS_ROCE_CMD_DESTROY_CQ = 0x17, /* QP/EE commands */ HNS_ROCE_CMD_RST2INIT_QP = 0x19, @@ -120,6 +133,10 @@ enum { HNS_ROCE_CMD_SQD2RTS_QP = 0x20, HNS_ROCE_CMD_2RST_QP = 0x21, HNS_ROCE_CMD_QUERY_QP = 0x22, + HNS_ROCE_CMD_CREATE_SRQ = 0x70, + HNS_ROCE_CMD_MODIFY_SRQC = 0x72, + HNS_ROCE_CMD_QUERY_SRQC = 0x73, + HNS_ROCE_CMD_DESTROY_SRQ = 0x74, }; int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param, diff --git a/drivers/infiniband/hw/hns/hns_roce_common.h b/drivers/infiniband/hw/hns/hns_roce_common.h index 93d4b4ec002d201f78cfc1b2477864c265785746..8eb4b8efcff2b683aa550c87a84e43557a26875c 100644 --- a/drivers/infiniband/hw/hns/hns_roce_common.h +++ b/drivers/infiniband/hw/hns/hns_roce_common.h @@ -33,6 +33,8 @@ #ifndef _HNS_ROCE_COMMON_H #define _HNS_ROCE_COMMON_H +#include "roce_k_compat.h" + #ifndef assert #define assert(cond) #endif @@ -57,32 +59,6 @@ #define roce_set_bit(origin, shift, val) \ roce_set_field((origin), (1ul << (shift)), (shift), (val)) -/* - * roce_hw_index_cmp_lt - Compare two hardware index values in hisilicon - * SOC, check if a is less than b. - * @a: hardware index value - * @b: hardware index value - * @bits: the number of bits of a and b, range: 0~31. - * - * Hardware index increases continuously till max value, and then restart - * from zero, again and again. Because the bits of reg field is often - * limited, the reg field can only hold the low bits of the hardware index - * in hisilicon SOC. - * In some scenes we need to compare two values(a,b) getted from two reg - * fields in this driver, for example: - * If a equals 0xfffe, b equals 0x1 and bits equals 16, we think b has - * incresed from 0xffff to 0x1 and a is less than b. - * If a equals 0xfffe, b equals 0x0xf001 and bits equals 16, we think a - * is bigger than b. - * - * Return true on a less than b, otherwise false. - */ -#define roce_hw_index_mask(bits) ((1ul << (bits)) - 1) -#define roce_hw_index_shift(bits) (32 - (bits)) -#define roce_hw_index_cmp_lt(a, b, bits) \ - ((int)((((a) - (b)) & roce_hw_index_mask(bits)) << \ - roce_hw_index_shift(bits)) < 0) - #define ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S 3 #define ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S 4 @@ -271,8 +247,6 @@ #define ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M \ (((1UL << 28) - 1) << ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) -#define ROCEE_SDB_PTR_CMP_BITS 28 - #define ROCEE_SDB_INV_CNT_SDB_INV_CNT_S 0 #define ROCEE_SDB_INV_CNT_SDB_INV_CNT_M \ (((1UL << 16) - 1) << ROCEE_SDB_INV_CNT_SDB_INV_CNT_S) @@ -353,13 +327,8 @@ #define ROCEE_CAEP_AE_MASK_REG 0x6C8 #define ROCEE_CAEP_AE_ST_REG 0x6CC -#define ROCEE_SDB_ISSUE_PTR_REG 0x758 -#define ROCEE_SDB_SEND_PTR_REG 0x75C #define ROCEE_CAEP_CQE_WCMD_EMPTY 0x850 #define ROCEE_SCAEP_WR_CQE_CNT 0x8D0 -#define ROCEE_SDB_INV_CNT_REG 0x9A4 -#define ROCEE_SDB_RETRY_CNT_REG 0x9AC -#define ROCEE_TSP_BP_ST_REG 0x9EC #define ROCEE_ECC_UCERR_ALM0_REG 0xB34 #define ROCEE_ECC_CERR_ALM0_REG 0xB40 @@ -376,9 +345,6 @@ #define ROCEE_RX_CMQ_TAIL_REG 0x07024 #define ROCEE_RX_CMQ_HEAD_REG 0x07028 -#define ROCEE_VF_MB_CFG0_REG 0x40 -#define ROCEE_VF_MB_STATUS_REG 0x58 - #define ROCEE_VF_EQ_DB_CFG0_REG 0x238 #define ROCEE_VF_EQ_DB_CFG1_REG 0x23C diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index 3a485f50fede1d59d30bd73240c3517c50dd9450..5420bec32da881bf1b1b947f6b42917478f37821 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c @@ -29,6 +29,7 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ +#include "roce_k_compat.h" #include #include @@ -59,7 +60,7 @@ static void hns_roce_ib_cq_event(struct hns_roce_cq *hr_cq, event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR && event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) { dev_err(hr_dev->dev, - "hns_roce_ib: Unexpected event type 0x%x on CQ %06lx\n", + "hns_roce_ib: Unexpected event type 0x%x on CQ 0x%lx\n", event_type, hr_cq->cqn); return; } @@ -72,12 +73,12 @@ static void hns_roce_ib_cq_event(struct hns_roce_cq *hr_cq, } } -static int hns_roce_sw2hw_cq(struct hns_roce_dev *dev, - struct hns_roce_cmd_mailbox *mailbox, - unsigned long cq_num) +static int hns_roce_hw_create_cq(struct hns_roce_dev *dev, + struct hns_roce_cmd_mailbox *mailbox, + unsigned long cq_num) { return hns_roce_cmd_mbox(dev, mailbox->dma, 0, cq_num, 0, - HNS_ROCE_CMD_SW2HW_CQ, HNS_ROCE_CMD_TIMEOUT_MSECS); + HNS_ROCE_CMD_CREATE_CQ, HNS_ROCE_CMD_TIMEOUT_MSECS); } static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent, @@ -104,26 +105,27 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent, mtts = hns_roce_table_find(hr_dev, mtt_table, hr_mtt->first_seg, &dma_handle); if (!mtts) { - dev_err(dev, "CQ alloc.Failed to find cq buf addr.\n"); + dev_err(dev, "Failed to find mtt for cq buf.\n"); return -EINVAL; } if (vector >= hr_dev->caps.num_comp_vectors) { - dev_err(dev, "CQ alloc.Invalid vector.\n"); + dev_err(dev, "Invalid vector(0x%x) for CQ alloc.\n", vector); return -EINVAL; } hr_cq->vector = vector; ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn); if (ret == -1) { - dev_err(dev, "CQ alloc.Failed to alloc index.\n"); + dev_err(dev, "Num of cq out of range.\n"); return -ENOMEM; } /* Get CQC memory HEM(Hardware Entry Memory) table */ ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn); if (ret) { - dev_err(dev, "CQ alloc.Failed to get context mem.\n"); + dev_err(dev, "Get context mem failed(%d) when CQ(0x%lx) alloc.\n", + ret, hr_cq->cqn); goto err_out; } @@ -133,7 +135,8 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent, ret = radix_tree_insert(&cq_table->tree, hr_cq->cqn, hr_cq); spin_unlock_irq(&cq_table->lock); if (ret) { - dev_err(dev, "CQ alloc.Failed to radix_tree_insert.\n"); + dev_err(dev, + "Failed to xa_store for cqn(0x%lx).\n", hr_cq->cqn); goto err_put; } @@ -148,10 +151,11 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent, nent, vector); /* Send mailbox to hw */ - ret = hns_roce_sw2hw_cq(hr_dev, mailbox, hr_cq->cqn); + ret = hns_roce_hw_create_cq(hr_dev, mailbox, hr_cq->cqn); hns_roce_free_cmd_mailbox(hr_dev, mailbox); if (ret) { - dev_err(dev, "CQ alloc.Failed to cmd mailbox.\n"); + dev_err(dev, "Send cmd mailbox failed(%d) when CQ(0x%lx) alloc.\n", + ret, hr_cq->cqn); goto err_radix; } @@ -177,12 +181,12 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent, return ret; } -static int hns_roce_hw2sw_cq(struct hns_roce_dev *dev, - struct hns_roce_cmd_mailbox *mailbox, - unsigned long cq_num) +static int hns_roce_hw_destroy_cq(struct hns_roce_dev *dev, + struct hns_roce_cmd_mailbox *mailbox, + unsigned long cq_num) { return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, cq_num, - mailbox ? 0 : 1, HNS_ROCE_CMD_HW2SW_CQ, + mailbox ? 0 : 1, HNS_ROCE_CMD_DESTROY_CQ, HNS_ROCE_CMD_TIMEOUT_MSECS); } @@ -192,9 +196,9 @@ void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) struct device *dev = hr_dev->dev; int ret; - ret = hns_roce_hw2sw_cq(hr_dev, NULL, hr_cq->cqn); + ret = hns_roce_hw_destroy_cq(hr_dev, NULL, hr_cq->cqn); if (ret) - dev_err(dev, "HW2SW_CQ failed (%d) for CQN %06lx\n", ret, + dev_err(dev, "DESTROY_CQ failed(%d) for CQN 0x%0lx\n", ret, hr_cq->cqn); /* Waiting interrupt process procedure carried out */ @@ -245,12 +249,18 @@ static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev, (*umem)->page_shift, &buf->hr_mtt); } - if (ret) + if (ret) { + dev_err(hr_dev->dev, "hns_roce_mtt_init error(%d) for create cq.\n", + ret); goto err_buf; + } ret = hns_roce_ib_umem_write_mtt(hr_dev, &buf->hr_mtt, *umem); - if (ret) + if (ret) { + dev_err(hr_dev->dev, "hns_roce_ib_umem_write_mtt error(%d) for create cq.\n", + ret); goto err_mtt; + } return 0; @@ -265,28 +275,37 @@ static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev, static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq_buf *buf, u32 nent) { - int ret; u32 page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz; + struct hns_roce_buf *kbuf; + int ret; - ret = hns_roce_buf_alloc(hr_dev, nent * hr_dev->caps.cq_entry_sz, - (1 << page_shift) * 2, &buf->hr_buf, - page_shift); - if (ret) + kbuf = hns_roce_buf_alloc(hr_dev, nent * hr_dev->caps.cq_entry_sz, + page_shift, 0); + if (IS_ERR(kbuf)) { + ret = -ENOMEM; goto out; + } + buf->hr_buf = kbuf; if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) buf->hr_mtt.mtt_type = MTT_TYPE_CQE; else buf->hr_mtt.mtt_type = MTT_TYPE_WQE; - ret = hns_roce_mtt_init(hr_dev, buf->hr_buf.npages, - buf->hr_buf.page_shift, &buf->hr_mtt); - if (ret) + ret = hns_roce_mtt_init(hr_dev, kbuf->npages, kbuf->page_shift, + &buf->hr_mtt); + if (ret) { + dev_err(hr_dev->dev, "hns_roce_mtt_init error(%d) for kernel create cq.\n", + ret); goto err_buf; + } - ret = hns_roce_buf_write_mtt(hr_dev, &buf->hr_mtt, &buf->hr_buf); - if (ret) + ret = hns_roce_buf_write_mtt(hr_dev, &buf->hr_mtt, buf->hr_buf); + if (ret) { + dev_err(hr_dev->dev, "hns_roce_ib_umem_write_mtt error(%d) for kernel create cq.\n", + ret); goto err_mtt; + } return 0; @@ -294,8 +313,7 @@ static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev, hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt); err_buf: - hns_roce_buf_free(hr_dev, nent * hr_dev->caps.cq_entry_sz, - &buf->hr_buf); + hns_roce_buf_free(hr_dev, buf->hr_buf); out: return ret; } @@ -303,8 +321,122 @@ static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev, static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq_buf *buf, int cqe) { - hns_roce_buf_free(hr_dev, (cqe + 1) * hr_dev->caps.cq_entry_sz, - &buf->hr_buf); + hns_roce_buf_free(hr_dev, buf->hr_buf); +} + +static int create_user_cq(struct hns_roce_dev *hr_dev, + struct hns_roce_cq *hr_cq, + struct ib_ucontext *context, + struct ib_udata *udata, + struct hns_roce_ib_create_cq_resp *resp, + struct hns_roce_uar *uar, + int cq_entries) +{ + struct hns_roce_ib_create_cq ucmd; + struct device *dev = hr_dev->dev; + int ret; + + if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { + dev_err(dev, "Copy_from_udata failed.\n"); + return -EFAULT; + } + + /* Get user space address, write it into mtt table */ + ret = hns_roce_ib_get_cq_umem(hr_dev, context, &hr_cq->hr_buf, + &hr_cq->umem, ucmd.buf_addr, + cq_entries); + if (ret) { + dev_err(dev, "Get_cq_umem failed(%d).\n", ret); + return ret; + } + + if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && + (udata->outlen >= sizeof(*resp))) { + ret = hns_roce_db_map_user(to_hr_ucontext(context), + ucmd.db_addr, &hr_cq->db); + if (ret) { + dev_err(dev, "cq record doorbell map failed(%d)!\n", + ret); + goto err_mtt; + } + hr_cq->db_en = 1; + resp->cap_flags |= HNS_ROCE_SUPPORT_CQ_RECORD_DB; + } + + /* Get user space parameters */ + uar = &to_hr_ucontext(context)->uar; + + return 0; + +err_mtt: + hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt); + ib_umem_release(hr_cq->umem); + + return ret; +} + +static int create_kernel_cq(struct hns_roce_dev *hr_dev, + struct hns_roce_cq *hr_cq, struct hns_roce_uar *uar, + int cq_entries) +{ + struct device *dev = hr_dev->dev; + int ret; + + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) { + ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1); + if (ret) { + dev_err(dev, "Alloc db for cq failed(%d).\n", ret); + return ret; + } + + hr_cq->set_ci_db = hr_cq->db.db_record; + *hr_cq->set_ci_db = 0; + hr_cq->db_en = 1; + } + + /* Init mmt table and write buff address to mtt table */ + ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf, cq_entries); + if (ret) { + dev_err(dev, "Alloc cq buf failed(%d).\n", ret); + goto err_db; + } + + uar = &hr_dev->priv_uar; + hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset + + DB_REG_OFFSET * uar->index; + + return 0; + +err_db: + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) + hns_roce_free_db(hr_dev, &hr_cq->db); + + return ret; +} + +static void destroy_user_cq(struct hns_roce_dev *hr_dev, + struct hns_roce_cq *hr_cq, + struct ib_ucontext *context, + struct ib_udata *udata, + struct hns_roce_ib_create_cq_resp *resp) +{ + if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && + (udata->outlen >= sizeof(*resp))) + hns_roce_db_unmap_user(to_hr_ucontext(context), + &hr_cq->db); + + hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt); + ib_umem_release(hr_cq->umem); +} + +static void destroy_kernel_cq(struct hns_roce_dev *hr_dev, + struct hns_roce_cq *hr_cq) +{ + hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt); + hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, hr_cq->ib_cq.cqe); + + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) + hns_roce_free_db(hr_dev, &hr_cq->db); } struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, @@ -314,7 +446,6 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, { struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); struct device *dev = hr_dev->dev; - struct hns_roce_ib_create_cq ucmd; struct hns_roce_ib_create_cq_resp resp = {}; struct hns_roce_cq *hr_cq = NULL; struct hns_roce_uar *uar = NULL; @@ -322,8 +453,10 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, int cq_entries = attr->cqe; int ret; + rdfx_func_cnt(hr_dev, RDFX_FUNC_CREATE_CQ); + if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) { - dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n", + dev_err(dev, "Create CQ failed. entries is %d, max cqe is %d\n", cq_entries, hr_dev->caps.max_cqes); return ERR_PTR(-EINVAL); } @@ -338,66 +471,31 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, cq_entries = roundup_pow_of_two((unsigned int)cq_entries); hr_cq->ib_cq.cqe = cq_entries - 1; spin_lock_init(&hr_cq->lock); + INIT_LIST_HEAD(&hr_cq->sq_list); + INIT_LIST_HEAD(&hr_cq->rq_list); if (context) { - if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { - dev_err(dev, "Failed to copy_from_udata.\n"); - ret = -EFAULT; - goto err_cq; - } - - /* Get user space address, write it into mtt table */ - ret = hns_roce_ib_get_cq_umem(hr_dev, context, &hr_cq->hr_buf, - &hr_cq->umem, ucmd.buf_addr, - cq_entries); + ret = create_user_cq(hr_dev, hr_cq, context, udata, &resp, uar, + cq_entries); if (ret) { - dev_err(dev, "Failed to get_cq_umem.\n"); + dev_err(dev, "Create cq for user mode failed(%d)!\n", + ret); goto err_cq; } - - if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && - (udata->outlen >= sizeof(resp))) { - ret = hns_roce_db_map_user(to_hr_ucontext(context), - ucmd.db_addr, &hr_cq->db); - if (ret) { - dev_err(dev, "cq record doorbell map failed!\n"); - goto err_mtt; - } - hr_cq->db_en = 1; - resp.cap_flags |= HNS_ROCE_SUPPORT_CQ_RECORD_DB; - } - - /* Get user space parameters */ - uar = &to_hr_ucontext(context)->uar; } else { - if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) { - ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1); - if (ret) - goto err_cq; - - hr_cq->set_ci_db = hr_cq->db.db_record; - *hr_cq->set_ci_db = 0; - hr_cq->db_en = 1; - } - - /* Init mmt table and write buff address to mtt table */ - ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf, - cq_entries); + ret = create_kernel_cq(hr_dev, hr_cq, uar, cq_entries); if (ret) { - dev_err(dev, "Failed to alloc_cq_buf.\n"); - goto err_db; + dev_err(dev, "Create cq for kernel mode failed(%d)!\n", + ret); + goto err_cq; } - - uar = &hr_dev->priv_uar; - hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset + - DB_REG_OFFSET * uar->index; } /* Allocate cq index, fill cq_context */ ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt, uar, hr_cq, vector); if (ret) { - dev_err(dev, "Creat CQ .Failed to cq_alloc.\n"); + dev_err(dev, "Cq alloc failed(%d).\n", ret); goto err_dbmap; } @@ -417,33 +515,24 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, if (context) { resp.cqn = hr_cq->cqn; - ret = ib_copy_to_udata(udata, &resp, sizeof(resp)); + ret = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp))); if (ret) goto err_cqc; } + rdfx_alloc_cq_buf(hr_dev, hr_cq); + + hns_roce_inc_rdma_hw_stats(ib_dev, HW_STATS_CQ_ALLOC); return &hr_cq->ib_cq; err_cqc: hns_roce_free_cq(hr_dev, hr_cq); err_dbmap: - if (context && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && - (udata->outlen >= sizeof(resp))) - hns_roce_db_unmap_user(to_hr_ucontext(context), - &hr_cq->db); - -err_mtt: - hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt); if (context) - ib_umem_release(hr_cq->umem); + destroy_user_cq(hr_dev, hr_cq, context, udata, &resp); else - hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, - hr_cq->ib_cq.cqe); - -err_db: - if (!context && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) - hns_roce_free_db(hr_dev, &hr_cq->db); + destroy_kernel_cq(hr_dev, hr_cq); err_cq: kfree(hr_cq); @@ -457,6 +546,11 @@ int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq) struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); int ret = 0; + rdfx_func_cnt(hr_dev, RDFX_FUNC_DESTROY_CQ); + rdfx_inc_dealloc_cq_cnt(hr_dev); + rdfx_free_cq_buff(hr_dev, hr_cq); + hns_roce_inc_rdma_hw_stats(ib_cq->device, HW_STATS_CQ_DEALLOC); + if (hr_dev->hw->destroy_cq) { ret = hr_dev->hw->destroy_cq(ib_cq); } else { @@ -514,7 +608,7 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type) atomic_inc(&cq->refcount); if (!cq) { - dev_warn(dev, "Async event for bogus CQ %08x\n", cqn); + dev_warn(dev, "Async event for bogus CQ 0x%08x\n", cqn); return; } diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c index e2f93c1ce86a3a8732b253d41af6c40351c5fee1..ed540894c67488642a7882da5f4e1fb3288151fb 100644 --- a/drivers/infiniband/hw/hns/hns_roce_db.c +++ b/drivers/infiniband/hw/hns/hns_roce_db.c @@ -6,18 +6,21 @@ #include #include +#include "roce_k_compat.h" #include "hns_roce_device.h" int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt, struct hns_roce_db *db) { + unsigned long page_addr = virt & PAGE_MASK; struct hns_roce_user_db_page *page; + unsigned int offset; int ret = 0; mutex_lock(&context->page_mutex); list_for_each_entry(page, &context->page_list, list) - if (page->user_virt == (virt & PAGE_MASK)) + if (page->user_virt == page_addr) goto found; page = kmalloc(sizeof(*page), GFP_KERNEL); @@ -27,11 +30,14 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt, } refcount_set(&page->refcount, 1); - page->user_virt = (virt & PAGE_MASK); - page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, - PAGE_SIZE, 0, 0); - if (IS_ERR(page->umem)) { - ret = PTR_ERR(page->umem); + page->user_virt = page_addr; + page->umem = ib_umem_get(&context->ibucontext, page_addr, PAGE_SIZE, + 0, 0); + if (IS_ERR_OR_NULL(page->umem)) { + if (!page->umem) + ret = -EINVAL; + else + ret = PTR_ERR(page->umem); kfree(page); goto out; } @@ -39,10 +45,9 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt, list_add(&page->list, &context->page_list); found: - db->dma = sg_dma_address(page->umem->sg_head.sgl) + - (virt & ~PAGE_MASK); - page->umem->sg_head.sgl->offset = virt & ~PAGE_MASK; - db->virt_addr = sg_virt(page->umem->sg_head.sgl); + offset = virt - page_addr; + db->dma = sg_dma_address(page->umem->sg_head.sgl) + offset; + db->virt_addr = sg_virt(page->umem->sg_head.sgl) + offset; db->u.user_page = page; refcount_inc(&page->refcount); @@ -78,7 +83,8 @@ static struct hns_roce_db_pgdir *hns_roce_alloc_db_pgdir( if (!pgdir) return NULL; - bitmap_fill(pgdir->order1, HNS_ROCE_DB_PER_PAGE / 2); + bitmap_fill(pgdir->order1, + HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT); pgdir->bits[0] = pgdir->order0; pgdir->bits[1] = pgdir->order1; pgdir->page = dma_alloc_coherent(dma_device, PAGE_SIZE, @@ -116,7 +122,7 @@ static int hns_roce_alloc_db_from_pgdir(struct hns_roce_db_pgdir *pgdir, db->u.pgdir = pgdir; db->index = i; db->db_record = pgdir->page + db->index; - db->dma = pgdir->db_dma + db->index * 4; + db->dma = pgdir->db_dma + db->index * HNS_ROCE_DB_UNIT_SIZE; db->order = order; return 0; @@ -170,7 +176,8 @@ void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db) i >>= o; set_bit(i, db->u.pgdir->bits[o]); - if (bitmap_full(db->u.pgdir->order1, HNS_ROCE_DB_PER_PAGE / 2)) { + if (bitmap_full(db->u.pgdir->order1, + HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT)) { dma_free_coherent(hr_dev->dev, PAGE_SIZE, db->u.pgdir->page, db->u.pgdir->db_dma); list_del(&db->u.pgdir->list); diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 9a24fd0ee3e78c5b7574b8ec7cc99b56ea32a729..d11b6066a5308c3a5c79fd623933fa06fb7d22e1 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -37,17 +37,24 @@ #define DRV_NAME "hns_roce" +/* hip08 is a pci device, it includes two version according pci version id */ +#define PCI_REVISION_ID_HIP08_A 0x20 +#define PCI_REVISION_ID_HIP08_B 0x21 + #define HNS_ROCE_HW_VER1 ('h' << 24 | 'i' << 16 | '0' << 8 | '6') -#define MAC_ADDR_OCTET_NUM 6 #define HNS_ROCE_MAX_MSG_LEN 0x80000000 -#define HNS_ROCE_ALOGN_UP(a, b) ((((a) + (b) - 1) / (b)) * (b)) +#define HNS_ROCE_ALIGN_UP(a, b) ((((a) + (b) - 1) / (b)) * (b)) #define HNS_ROCE_IB_MIN_SQ_STRIDE 6 #define HNS_ROCE_BA_SIZE (32 * 4096) +#define BA_BYTE_LEN 8 + +#define BITS_PER_BYTE 8 + /* Hardware specification only for v1 engine */ #define HNS_ROCE_MIN_CQE_NUM 0x40 #define HNS_ROCE_MIN_WQE_NUM 0x20 @@ -55,6 +62,8 @@ /* Hardware specification only for v1 engine */ #define HNS_ROCE_MAX_INNER_MTPT_NUM 0x7 #define HNS_ROCE_MAX_MTPT_PBL_NUM 0x100000 +#define HNS_ROCE_MAX_SGE_NUM 2 + #define HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS 20 #define HNS_ROCE_MAX_FREE_CQ_WAIT_CNT \ @@ -62,8 +71,15 @@ #define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2 #define HNS_ROCE_MIN_CQE_CNT 16 +#define HNS_ROCE_WORKQ_NAME_LEN 32 + +#define HNS_ROCE_RESERVED_SGE 1 + #define HNS_ROCE_MAX_IRQ_NUM 128 +#define HNS_ROCE_SGE_IN_WQE 2 +#define HNS_ROCE_SGE_SHIFT 4 + #define EQ_ENABLE 1 #define EQ_DISABLE 0 @@ -73,7 +89,6 @@ #define HNS_ROCE_CEQ_ENTRY_SIZE 0x4 #define HNS_ROCE_AEQ_ENTRY_SIZE 0x10 -/* 4G/4K = 1M */ #define HNS_ROCE_SL_SHIFT 28 #define HNS_ROCE_TCLASS_SHIFT 20 #define HNS_ROCE_FLOW_LABEL_MASK 0xfffff @@ -82,24 +97,33 @@ #define HNS_ROCE_MAX_GID_NUM 16 #define HNS_ROCE_GID_SIZE 16 +#define HNS_ROCE_SGE_SIZE 16 + #define HNS_ROCE_HOP_NUM_0 0xff #define BITMAP_NO_RR 0 #define BITMAP_RR 1 #define MR_TYPE_MR 0x00 +#define MR_TYPE_FRMR 0x01 #define MR_TYPE_DMA 0x03 +#define MR_TYPE_UMM 0x04 #define PKEY_ID 0xffff #define GUID_LEN 8 #define NODE_DESC_SIZE 64 #define DB_REG_OFFSET 0x1000 +#define HNS_ROCE_CEQ_MAX_BURST_NUM 0xffff +#define HNS_ROCE_CEQ_MAX_INTERVAL 0xffff +#define HNS_ROCE_EQ_MAXCNT_MASK 1 +#define HNS_ROCE_EQ_PERIOD_MASK 2 + #define SERV_TYPE_RC 0 -#define SERV_TYPE_RD 1 -#define SERV_TYPE_UC 2 +#define SERV_TYPE_RD 2 +#define SERV_TYPE_UC 1 #define SERV_TYPE_UD 3 - +#define SERV_TYPE_XRC 5 /* Configure to HW for PAGE_SIZE larger than 4KB */ #define PG_SHIFT_OFFSET (PAGE_SHIFT - 12) @@ -108,6 +132,19 @@ #define PAGES_SHIFT_24 24 #define PAGES_SHIFT_32 32 +#define HNS_ROCE_PCI_BAR_NR 2 + +#define HNS_ROCE_IDX_QUE_ENTRY_SZ 4 + +#define HNS_ROCE_FRMR_MAX_PA 512 + +#define SRQ_DB_REG 0x230 + +/* The chip implementation of the consumer index is calculated + * according to twice the actual EQ depth + */ +#define EQ_DEPTH_COEFF 2 + enum { HNS_ROCE_SUPPORT_RQ_RECORD_DB = 1 << 0, HNS_ROCE_SUPPORT_SQ_RECORD_DB = 1 << 1, @@ -193,17 +230,87 @@ enum { HNS_ROCE_CAP_FLAG_RQ_INLINE = BIT(2), HNS_ROCE_CAP_FLAG_RECORD_DB = BIT(3), HNS_ROCE_CAP_FLAG_SQ_RECORD_DB = BIT(4), + HNS_ROCE_CAP_FLAG_XRC = BIT(6), + HNS_ROCE_CAP_FLAG_SRQ = BIT(5), + HNS_ROCE_CAP_FLAG_MW = BIT(7), + HNS_ROCE_CAP_FLAG_FRMR = BIT(8), + HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL = BIT(9), + HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10), }; enum hns_roce_mtt_type { MTT_TYPE_WQE, MTT_TYPE_CQE, + MTT_TYPE_SRQWQE, + MTT_TYPE_IDX }; +#define HNS_ROCE_DB_TYPE_COUNT 2 +#define HNS_ROCE_DB_UNIT_SIZE 4 + enum { HNS_ROCE_DB_PER_PAGE = PAGE_SIZE / 4 }; +enum hns_roce_reset_stage { + HNS_ROCE_STATE_NON_RST, + HNS_ROCE_STATE_RST_BEF_DOWN, + HNS_ROCE_STATE_RST_DOWN, + HNS_ROCE_STATE_RST_UNINIT, + HNS_ROCE_STATE_RST_INIT, + HNS_ROCE_STATE_RST_INITED, +}; + +enum hns_roce_instance_state { + HNS_ROCE_STATE_NON_INIT, + HNS_ROCE_STATE_INIT, + HNS_ROCE_STATE_INITED, + HNS_ROCE_STATE_UNINIT, +}; + +enum { + HNS_ROCE_RST_DIRECT_RETURN = 0, +}; + +enum { + CMD_RST_PRC_OTHERS, + CMD_RST_PRC_SUCCESS, + CMD_RST_PRC_EBUSY, +}; + +enum hns_roce_hw_stats { + HW_STATS_PD_ALLOC, + HW_STATS_PD_DEALLOC, + HW_STATS_PD_ACTIVE_MAX, + HW_STATS_MR_ALLOC, + HW_STATS_MR_DEALLOC, + HW_STATS_MR_ACTIVE_MAX, + HW_STATS_CQ_ALLOC, + HW_STATS_CQ_DEALLOC, + HW_STATS_QP_ALLOC, + HW_STATS_QP_DEALLOC, + HW_STATS_PD_ACTIVE, + HW_STATS_MR_ACTIVE, + HW_STATS_CQ_ACTIVE, + HW_STATS_CQ_ACTIVE_MAX, + HW_STATS_QP_ACTIVE, + HW_STATS_QP_ACTIVE_MAX, + HW_STATS_SRQ_ACTIVE, + HW_STATS_SRQ_ACTIVE_MAX, + HW_STATS_UAR_ACTIVE, + HW_STATS_UAR_ACTIVE_MAX, + HW_STATS_MR_REREG, + HW_STATS_AEQE, + HW_STATS_CEQE, + HW_STATS_TOTAL +}; + +static inline void hns_roce_inc_rdma_hw_stats(struct ib_device *dev, int stats) +{ + if (dev->hw_stats) + dev->hw_stats->value[stats]++; +} + #define HNS_ROCE_CMD_SUCCESS 1 #define HNS_ROCE_PORT_DOWN 0 @@ -211,8 +318,14 @@ enum { #define HNS_ROCE_MTT_ENTRY_PER_SEG 8 +/* The minimum page size is 4K for hardware */ +#define HNS_HW_PAGE_SHIFT 12 +#define HNS_HW_PAGE_SIZE (1 << HNS_HW_PAGE_SHIFT) + #define PAGE_ADDR_SHIFT 12 +#define HNS_ROCE_IS_RESETTING 1 + struct hns_roce_uar { u64 pfn; unsigned long index; @@ -232,6 +345,7 @@ struct hns_roce_ucontext { struct mutex page_mutex; struct list_head vma_list; struct mutex vma_list_mutex; + struct kref uctx_ref; }; struct hns_roce_pd { @@ -239,6 +353,13 @@ struct hns_roce_pd { unsigned long pdn; }; +struct hns_roce_xrcd { + struct ib_xrcd ibxrcd; + unsigned long xrcdn; + struct ib_pd *pd; + struct ib_cq *cq; +}; + struct hns_roce_bitmap { /* Bitmap Traversal last a bit which is 1 */ unsigned long last; @@ -274,7 +395,7 @@ struct hns_roce_hem_table { unsigned long num_hem; /* HEM entry record obj total num */ unsigned long num_obj; - /*Single obj size */ + /* Single obj size */ unsigned long obj_size; unsigned long table_chunk_size; int lowmem; @@ -293,6 +414,62 @@ struct hns_roce_mtt { enum hns_roce_mtt_type mtt_type; }; +struct hns_roce_buf_region { + u32 offset; /* page offset */ + u32 count; /* page count */ + int hopnum; /* addressing hop num */ +}; + +#define HNS_ROCE_MAX_BT_REGION 3 +#define HNS_ROCE_MAX_BT_LEVEL 3 +struct hns_roce_hem_list { + struct list_head root_bt; + /* link all bt dma mem by hop config */ + struct list_head mid_bt[HNS_ROCE_MAX_BT_REGION][HNS_ROCE_MAX_BT_LEVEL]; + struct list_head btm_bt; /* link all bottom bt in @mid_bt */ + dma_addr_t root_ba; /* pointer to the root ba table */ +}; + +struct hns_roce_buf_attr { + struct { + size_t size; /* region size */ + int hopnum; /* multi-hop addressing hop num */ + } region[HNS_ROCE_MAX_BT_REGION]; + unsigned int region_count; /* valid region count */ + unsigned int page_shift; /* buffer page shift */ + unsigned int user_access; /* umem access flag */ + int user_dmasync; /* umem dma sync flag */ + bool mtt_only; /* only alloc buffer-required MTT memory */ +}; + +struct hns_roce_hem_cfg { + dma_addr_t root_ba; /* root BA table's address */ + bool is_direct; /* addressing without BA table */ + unsigned int ba_pg_shift; /* BA table page shift */ + unsigned int buf_pg_shift; /* buffer page shift */ + unsigned int buf_pg_count; /* buffer page count */ + struct hns_roce_buf_region region[HNS_ROCE_MAX_BT_REGION]; + unsigned int region_count; +}; + +/* memory translate region */ +struct hns_roce_mtr { + struct hns_roce_hem_list hem_list; /* multi-hop addressing resource */ + struct ib_umem *umem; /* user space buffer */ + struct hns_roce_buf *kmem; /* kernel space buffer */ + struct hns_roce_hem_cfg hem_cfg; /* config for hardware addressing */ +}; + +struct hns_roce_mw { + struct ib_mw ibmw; + u32 pdn; + u32 rkey; + int enabled; /* MW's active status */ + u32 pbl_buf_pg_sz; + u32 pbl_ba_pg_sz; + u32 pbl_hop_num; +}; + /* Only support 4K page size for mr register */ #define MR_SIZE_4K 0 @@ -303,24 +480,25 @@ struct hns_roce_mr { u64 size; /* Address range of MR */ u32 key; /* Key of MR */ u32 pd; /* PD num of MR */ - u32 access;/* Access permission of MR */ + u32 access; /* Access permission of MR */ + u32 npages; int enabled; /* MR's active status */ int type; /* MR's register type */ - u64 *pbl_buf;/* MR's PBL space */ + u64 *pbl_buf; /* MR's PBL space */ dma_addr_t pbl_dma_addr; /* MR's PBL space PA */ - u32 pbl_size;/* PA number in the PBL */ - u64 pbl_ba;/* page table address */ - u32 l0_chunk_last_num;/* L0 last number */ - u32 l1_chunk_last_num;/* L1 last number */ - u64 **pbl_bt_l2;/* PBL BT L2 */ - u64 **pbl_bt_l1;/* PBL BT L1 */ - u64 *pbl_bt_l0;/* PBL BT L0 */ - dma_addr_t *pbl_l2_dma_addr;/* PBL BT L2 dma addr */ - dma_addr_t *pbl_l1_dma_addr;/* PBL BT L1 dma addr */ - dma_addr_t pbl_l0_dma_addr;/* PBL BT L0 dma addr */ - u32 pbl_ba_pg_sz;/* BT chunk page size */ - u32 pbl_buf_pg_sz;/* buf chunk page size */ - u32 pbl_hop_num;/* multi-hop number */ + u32 pbl_size; /* PA number in the PBL */ + u64 pbl_ba; /* page table address */ + u32 l0_chunk_last_num; /* L0 last number */ + u32 l1_chunk_last_num; /* L1 last number */ + u64 **pbl_bt_l2; /* PBL BT L2 */ + u64 **pbl_bt_l1; /* PBL BT L1 */ + u64 *pbl_bt_l0; /* PBL BT L0 */ + dma_addr_t *pbl_l2_dma_addr; /* PBL BT L2 dma addr */ + dma_addr_t *pbl_l1_dma_addr; /* PBL BT L1 dma addr */ + dma_addr_t pbl_l0_dma_addr; /* PBL BT L0 dma addr */ + u32 pbl_ba_pg_sz; /* BT chunk page size */ + u32 pbl_buf_pg_sz; /* buf chunk page size */ + u32 pbl_hop_num; /* multi-hop number */ }; struct hns_roce_mr_table { @@ -330,16 +508,20 @@ struct hns_roce_mr_table { struct hns_roce_hem_table mtpt_table; struct hns_roce_buddy mtt_cqe_buddy; struct hns_roce_hem_table mtt_cqe_table; + struct hns_roce_buddy mtt_srqwqe_buddy; + struct hns_roce_hem_table mtt_srqwqe_table; + struct hns_roce_buddy mtt_idx_buddy; + struct hns_roce_hem_table mtt_idx_table; }; struct hns_roce_wq { u64 *wrid; /* Work request ID */ spinlock_t lock; int wqe_cnt; /* WQE num */ - u32 max_post; int max_gs; + u32 rsv_sge; int offset; - int wqe_shift;/* WQE size */ + int wqe_shift; /* WQE size */ u32 head; u32 tail; void __iomem *db_reg_l; @@ -348,7 +530,7 @@ struct hns_roce_wq { struct hns_roce_sge { int sge_cnt; /* SGE num */ int offset; - int sge_shift;/* SGE size */ + int sge_shift; /* SGE size */ }; struct hns_roce_buf_list { @@ -356,19 +538,34 @@ struct hns_roce_buf_list { dma_addr_t map; }; +/* + * %HNS_ROCE_BUF_DIRECT indicates that the all memory must be in a continuous + * dma address range. + * + * %HNS_ROCE_BUF_NOSLEEP indicates that the caller cannot sleep. + * + * %HNS_ROCE_BUF_NOFAIL allocation only failed when allocated size is zero, even + * the allocated size is smaller than the required size. + */ +enum { + HNS_ROCE_BUF_DIRECT = BIT(0), + HNS_ROCE_BUF_NOSLEEP = BIT(1), + HNS_ROCE_BUF_NOFAIL = BIT(2), +}; + struct hns_roce_buf { - struct hns_roce_buf_list direct; - struct hns_roce_buf_list *page_list; - int nbufs; + struct hns_roce_buf_list *trunk_list; + u32 ntrunks; u32 npages; - int page_shift; + unsigned int trunk_shift; + unsigned int page_shift; }; struct hns_roce_db_pgdir { struct list_head list; DECLARE_BITMAP(order0, HNS_ROCE_DB_PER_PAGE); - DECLARE_BITMAP(order1, HNS_ROCE_DB_PER_PAGE / 2); - unsigned long *bits[2]; + DECLARE_BITMAP(order1, HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT); + unsigned long *bits[HNS_ROCE_DB_TYPE_COUNT]; u32 *page; dma_addr_t db_dma; }; @@ -393,10 +590,16 @@ struct hns_roce_db { }; struct hns_roce_cq_buf { - struct hns_roce_buf hr_buf; + struct hns_roce_buf *hr_buf; struct hns_roce_mtt hr_mtt; }; +enum hns_roce_cq_dfx_cnt { + HNS_ROCE_SQ_CQE, + HNS_ROCE_RQ_CQE, + HNS_ROCE_CQ_DFX_TOTAL +}; + struct hns_roce_cq { struct ib_cq ib_cq; struct hns_roce_cq_buf hr_buf; @@ -418,11 +621,45 @@ struct hns_roce_cq { u32 vector; atomic_t refcount; struct completion free; + struct list_head sq_list; /* list of all send cqs */ + struct list_head rq_list; /* list of all recv cqs */ + int comp_state; + struct list_head list; /* all armed cps are on a list */ + u32 dfx_cnt[HNS_ROCE_CQ_DFX_TOTAL]; +}; + +struct hns_roce_idx_que { + struct hns_roce_buf *idx_buf; + int entry_sz; + u32 buf_size; + struct ib_umem *umem; + struct hns_roce_mtt mtt; + unsigned long *bitmap; + u32 head; + u32 tail; }; struct hns_roce_srq { struct ib_srq ibsrq; - int srqn; + void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event); + unsigned long srqn; + int max; + int max_gs; + u32 rsv_sge; + int wqe_shift; + void __iomem *db_reg_l; + + refcount_t refcount; + struct completion free; + + struct hns_roce_buf *buf; + u64 *wrid; + struct ib_umem *umem; + struct hns_roce_mtt mtt; + struct hns_roce_idx_que idx_que; + spinlock_t lock; + u16 wqe_ctr; + struct mutex mutex; }; struct hns_roce_uar_table { @@ -435,6 +672,14 @@ struct hns_roce_qp_table { struct hns_roce_hem_table qp_table; struct hns_roce_hem_table irrl_table; struct hns_roce_hem_table trrl_table; + struct hns_roce_hem_table scc_ctx_table; +}; + +struct hns_roce_qpc_timer_table { + struct hns_roce_bitmap bitmap; + spinlock_t lock; + struct radix_tree_root tree; + struct hns_roce_hem_table table; }; struct hns_roce_cq_table { @@ -444,19 +689,37 @@ struct hns_roce_cq_table { struct hns_roce_hem_table table; }; +struct hns_roce_cqc_timer_table { + struct hns_roce_bitmap bitmap; + spinlock_t lock; + struct radix_tree_root tree; + struct hns_roce_hem_table table; +}; + +struct hns_roce_srq_table { + struct hns_roce_bitmap bitmap; + spinlock_t lock; + struct radix_tree_root tree; + struct hns_roce_hem_table table; +}; + struct hns_roce_raq_table { struct hns_roce_buf_list *e_raq_buf; }; struct hns_roce_av { - __le32 port_pd; - u8 gid_index; - u8 stat_rate; - u8 hop_limit; - __le32 sl_tclass_flowlabel; - u8 dgid[HNS_ROCE_GID_SIZE]; - u8 mac[6]; - __le16 vlan; + u32 port; + u8 gid_index; + u8 stat_rate; + u8 hop_limit; + u32 flowlabel; + u16 udp_sport; + u8 sl; + u8 tclass; + u8 dgid[HNS_ROCE_GID_SIZE]; + u8 mac[ETH_ALEN]; + u16 vlan; + u8 vlan_en; }; struct hns_roce_ah { @@ -498,6 +761,7 @@ struct hns_roce_cmdq { */ u8 use_events; u8 toggle; + int state; }; struct hns_roce_cmd_mailbox { @@ -522,9 +786,17 @@ struct hns_roce_rinl_buf { u32 wqe_cnt; }; +enum hns_roce_qp_dfx_cnt { + HNS_ROCE_QP_DFX_SIGNAL_WQE, + HNS_ROCE_QP_DFX_INLINE_WQE, + HNS_ROCE_QP_DFX_POST_SEND, + HNS_ROCE_QP_DFX_POST_RECV, + HNS_ROCE_QP_DFX_TOTAL +}; + + struct hns_roce_qp { struct ib_qp ibqp; - struct hns_roce_buf hr_buf; struct hns_roce_wq rq; struct hns_roce_db rdb; struct hns_roce_db sdb; @@ -533,19 +805,21 @@ struct hns_roce_qp { u32 doorbell_qpn; __le32 sq_signal_bits; u32 sq_next_wqe; - int sq_max_wqes_per_wr; - int sq_spare_wqes; struct hns_roce_wq sq; - struct ib_umem *umem; - struct hns_roce_mtt mtt; + struct hns_roce_mtr mtr; u32 buff_size; + struct mutex mutex; + u16 xrcdn; u8 port; u8 phy_port; u8 sl; u8 resp_depth; u8 state; + u8 flush_en; + u8 next_state; /* record for flush cqe */ + int attr_mask; /* record for flush cqe */ u32 access_flags; u32 atomic_rd_en; u32 pkey_index; @@ -561,10 +835,10 @@ struct hns_roce_qp { u32 next_sge; struct hns_roce_rinl_buf rq_inl_buf; -}; - -struct hns_roce_sqp { - struct hns_roce_qp hr_qp; + struct list_head list; /* all qps are on a list */ + struct list_head recv_list; /* all recv cqs are on a list */ + struct list_head send_list; /* all send cqs are on a list */ + u32 dfx_cnt[HNS_ROCE_QP_DFX_TOTAL]; }; struct hns_roce_ib_iboe { @@ -572,15 +846,16 @@ struct hns_roce_ib_iboe { struct net_device *netdevs[HNS_ROCE_MAX_PORTS]; struct notifier_block nb; u8 phy_port[HNS_ROCE_MAX_PORTS]; + u8 last_port_state[HNS_ROCE_MAX_PORTS]; }; enum { HNS_ROCE_EQ_STAT_INVALID = 0, - HNS_ROCE_EQ_STAT_VALID = 2, + HNS_ROCE_EQ_STAT_VALID = 1, }; struct hns_roce_ceqe { - u32 comp; + __le32 comp; }; struct hns_roce_aeqe { @@ -592,6 +867,12 @@ struct hns_roce_aeqe { u32 rsv1; } qp_event; + struct { + __le32 srq; + u32 rsv0; + u32 rsv1; + } srq_event; + struct { __le32 cq; u32 rsv0; @@ -656,19 +937,27 @@ struct hns_roce_eq_table { }; struct hns_roce_caps { + u64 fw_ver; u8 num_ports; int gid_table_len[HNS_ROCE_MAX_PORTS]; int pkey_table_len[HNS_ROCE_MAX_PORTS]; int local_ca_ack_delay; int num_uars; u32 phy_num_uars; - u32 max_sq_sg; /* 2 */ - u32 max_sq_inline; /* 32 */ - u32 max_rq_sg; /* 2 */ - int num_qps; /* 256k */ - u32 max_wqes; /* 16k */ - u32 max_sq_desc_sz; /* 64 */ - u32 max_rq_desc_sz; /* 64 */ + u32 max_sq_sg; + u32 max_sq_inline; + u32 max_rq_sg; + u32 max_extend_sg; + int num_qps; + int reserved_qps; + int num_qpc_timer; + int num_cqc_timer; + int num_srqs; + u32 max_wqes; + u32 max_srq_wrs; + u32 max_srq_sges; + u32 max_sq_desc_sz; + u32 max_rq_desc_sz; u32 max_srq_desc_sz; int max_qp_init_rdma; int max_qp_dest_rdma; @@ -677,16 +966,21 @@ struct hns_roce_caps { int min_cqes; u32 min_wqes; int reserved_cqs; - int num_aeq_vectors; /* 1 */ + int reserved_srqs; + int num_aeq_vectors; int num_comp_vectors; int num_other_vectors; int num_mtpts; u32 num_mtt_segs; u32 num_cqe_segs; + u32 num_srqwqe_segs; + u32 num_idx_segs; int reserved_mrws; int reserved_uars; int num_pds; int reserved_pds; + int num_xrcds; + int reserved_xrcds; u32 mtt_entry_sz; u32 cq_entry_sz; u32 page_size_cap; @@ -696,6 +990,11 @@ struct hns_roce_caps { int irrl_entry_sz; int trrl_entry_sz; int cqc_entry_sz; + int srqc_entry_sz; + int idx_entry_sz; + int scc_ctx_entry_sz; + int qpc_timer_entry_sz; + int cqc_timer_entry_sz; u32 pbl_ba_pg_sz; u32 pbl_buf_pg_sz; u32 pbl_hop_num; @@ -703,9 +1002,12 @@ struct hns_roce_caps { int ceqe_depth; enum ib_mtu max_mtu; u32 qpc_bt_num; + u32 qpc_timer_bt_num; u32 srqc_bt_num; u32 cqc_bt_num; + u32 cqc_timer_bt_num; u32 mpt_bt_num; + u32 scc_ctx_bt_num; u32 qpc_ba_pg_sz; u32 qpc_buf_pg_sz; u32 qpc_hop_num; @@ -721,27 +1023,106 @@ struct hns_roce_caps { u32 mtt_ba_pg_sz; u32 mtt_buf_pg_sz; u32 mtt_hop_num; - u32 cqe_ba_pg_sz; + u32 wqe_sq_hop_num; + u32 wqe_sge_hop_num; + u32 wqe_rq_hop_num; + u32 scc_ctx_ba_pg_sz; + u32 scc_ctx_buf_pg_sz; + u32 scc_ctx_hop_num; + u32 qpc_timer_ba_pg_sz; + u32 qpc_timer_buf_pg_sz; + u32 qpc_timer_hop_num; + u32 cqc_timer_ba_pg_sz; + u32 cqc_timer_buf_pg_sz; + u32 cqc_timer_hop_num; + u32 cqe_ba_pg_sz; /* page_size = 4K*(2^cqe_ba_pg_sz) */ u32 cqe_buf_pg_sz; u32 cqe_hop_num; + u32 srqwqe_ba_pg_sz; + u32 srqwqe_buf_pg_sz; + u32 srqwqe_hop_num; + u32 idx_ba_pg_sz; + u32 idx_buf_pg_sz; + u32 idx_hop_num; u32 eqe_ba_pg_sz; u32 eqe_buf_pg_sz; u32 eqe_hop_num; u32 sl_num; u32 tsq_buf_pg_sz; u32 tpq_buf_pg_sz; - u32 chunk_sz; /* chunk size in non multihop mode*/ + u32 chunk_sz; /* chunk size in non multihop mode */ u64 flags; + u16 default_ceq_max_cnt; + u16 default_ceq_period; + u16 default_aeq_max_cnt; + u16 default_aeq_period; + u16 default_aeq_arm_st; + u16 default_ceq_arm_st; }; struct hns_roce_work { struct hns_roce_dev *hr_dev; struct work_struct work; u32 qpn; + u32 cqn; int event_type; int sub_type; }; +struct hns_roce_flush_work { + struct hns_roce_dev *hr_dev; + struct work_struct work; + struct hns_roce_qp *hr_qp; +}; + +struct hns_roce_stat { + u32 cqn; + u32 srqn; + u32 ceqn; + u32 qpn; + u32 aeqn; + u32 key; +}; + +struct hns_roce_dfx_hw { + int (*query_cqc_info)(struct hns_roce_dev *hr_dev, u32 cqn, + int *buffer); + int (*query_qpc_info)(struct hns_roce_dev *hr_dev, u32 qpn, + int *buffer); + int (*query_mpt_info)(struct hns_roce_dev *hr_dev, u32 key, + int *buffer); + int (*query_cqc_stat)(struct hns_roce_dev *hr_dev, + char *buf, int *desc); + int (*query_cmd_stat)(struct hns_roce_dev *hr_dev, + char *buf, int *desc); + int (*query_qpc_stat)(struct hns_roce_dev *hr_dev, + char *buf, int *desc); + int (*query_aeqc_stat)(struct hns_roce_dev *hr_dev, + char *buf, int *desc); + int (*query_srqc_stat)(struct hns_roce_dev *hr_dev, + char *buf, int *desc); + int (*query_pkt_stat)(struct hns_roce_dev *hr_dev, + char *buf, int *desc); + int (*query_mpt_stat)(struct hns_roce_dev *hr_dev, + char *buf, int *desc); + int (*query_ceqc_stat)(struct hns_roce_dev *hr_dev, + char *buf, int *desc); + int (*modify_eq)(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq, + u16 eq_count, u16 eq_period, u16 type); + +}; + +enum { + HNS_ROCE_CMDQ_NORMAL, + HNS_ROCE_CMDQ_TIMEOUT, +}; + +enum hns_roce_device_state { + HNS_ROCE_DEVICE_STATE_INITED, + HNS_ROCE_DEVICE_STATE_RST_DOWN, + HNS_ROCE_DEVICE_STATE_UNINIT, +}; + struct hns_roce_hw { int (*reset)(struct hns_roce_dev *hr_dev, bool enable); int (*cmq_init)(struct hns_roce_dev *hr_dev); @@ -753,8 +1134,14 @@ struct hns_roce_hw { u64 out_param, u32 in_modifier, u8 op_modifier, u16 op, u16 token, int event); int (*chk_mbox)(struct hns_roce_dev *hr_dev, unsigned long timeout); + int (*rst_prc_mbox)(struct hns_roce_dev *hr_dev); +#ifdef CONFIG_KERNEL_419 int (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index, const union ib_gid *gid, const struct ib_gid_attr *attr); +#else + int (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index, + union ib_gid *gid, const struct ib_gid_attr *attr); +#endif int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr); void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port, enum ib_mtu mtu); @@ -764,12 +1151,14 @@ struct hns_roce_hw { struct hns_roce_mr *mr, int flags, u32 pdn, int mr_access_flags, u64 iova, u64 size, void *mb_buf); + int (*frmr_write_mtpt)(void *mb_buf, struct hns_roce_mr *mr); + int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw); void (*write_cqc)(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts, dma_addr_t dma_handle, int nent, u32 vector); int (*set_hem)(struct hns_roce_dev *hr_dev, struct hns_roce_hem_table *table, int obj, int step_idx); - int (*clear_hem)(struct hns_roce_dev *hr_dev, + void (*clear_hem)(struct hns_roce_dev *hr_dev, struct hns_roce_hem_table *table, int obj, int step_idx); int (*query_qp)(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, @@ -778,10 +1167,19 @@ struct hns_roce_hw { int attr_mask, enum ib_qp_state cur_state, enum ib_qp_state new_state); int (*destroy_qp)(struct ib_qp *ibqp); + int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev, + struct hns_roce_qp *hr_qp); +#ifdef CONFIG_KERNEL_419 int (*post_send)(struct ib_qp *ibqp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr); int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr, const struct ib_recv_wr **bad_recv_wr); +#else + int (*post_send)(struct ib_qp *ibqp, struct ib_send_wr *wr, + struct ib_send_wr **bad_wr); + int (*post_recv)(struct ib_qp *qp, struct ib_recv_wr *recv_wr, + struct ib_recv_wr **bad_recv_wr); +#endif int (*req_notify_cq)(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr); @@ -789,6 +1187,31 @@ struct hns_roce_hw { int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period); int (*init_eq)(struct hns_roce_dev *hr_dev); void (*cleanup_eq)(struct hns_roce_dev *hr_dev); + int (*create_workq)(struct hns_roce_dev *hr_dev); + void (*destroy_workq)(struct hns_roce_dev *hr_dev); + void (*write_srqc)(struct hns_roce_dev *hr_dev, + struct hns_roce_srq *srq, u32 pdn, u16 xrcd, u32 cqn, + void *mb_buf, u64 *mtts_wqe, u64 *mtts_idx, + dma_addr_t dma_handle_wqe, + dma_addr_t dma_handle_idx); + int (*modify_srq)(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr, + enum ib_srq_attr_mask srq_attr_mask, + struct ib_udata *udata); + int (*query_srq)(struct ib_srq *ibsrq, struct ib_srq_attr *attr); +#ifdef CONFIG_KERNEL_419 + int (*post_srq_recv)(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, + const struct ib_recv_wr **bad_wr); +#else + int (*post_srq_recv)(struct ib_srq *ibsrq, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr); +#endif +}; + +/* HW STATS cannot support EQ interrupt event,so add counter of dev for CI */ +enum hns_roce_dev_dfx_cnt { + HNS_ROCE_DFX_AEQE, + HNS_ROCE_DFX_CEQE, + HNS_ROCE_DFX_TOTAL }; struct hns_roce_dev { @@ -802,7 +1225,12 @@ struct hns_roce_dev { spinlock_t bt_cmd_lock; bool active; bool is_reset; + bool dis_db; + unsigned long reset_cnt; struct hns_roce_ib_iboe iboe; + enum hns_roce_device_state state; + struct list_head qp_list; /* list of all qps on this dev */ + spinlock_t qp_lock; /* protect qp_list */ struct list_head pgdir_list; struct mutex pgdir_mutex; @@ -811,7 +1239,7 @@ struct hns_roce_dev { struct hns_roce_caps caps; struct radix_tree_root qp_table_tree; - unsigned char dev_addr[HNS_ROCE_MAX_PORTS][MAC_ADDR_OCTET_NUM]; + unsigned char dev_addr[HNS_ROCE_MAX_PORTS][ETH_ALEN]; u64 sys_image_guid; u32 vendor_id; u32 vendor_part_id; @@ -820,21 +1248,33 @@ struct hns_roce_dev { struct hns_roce_cmdq cmd; struct hns_roce_bitmap pd_bitmap; + struct hns_roce_bitmap xrcd_bitmap; struct hns_roce_uar_table uar_table; struct hns_roce_mr_table mr_table; struct hns_roce_cq_table cq_table; + struct hns_roce_srq_table srq_table; struct hns_roce_qp_table qp_table; struct hns_roce_eq_table eq_table; + struct hns_roce_qpc_timer_table qpc_timer_table; + struct hns_roce_cqc_timer_table cqc_timer_table; int cmd_mod; int loop_idc; u32 sdb_offset; u32 odb_offset; - dma_addr_t tptr_dma_addr; /*only for hw v1*/ - u32 tptr_size; /*only for hw v1*/ + dma_addr_t tptr_dma_addr; /* only for hw v1 */ + u32 tptr_size; /* only for hw v1 */ + void *reset_page; /* store reset state for hw v2 */ const struct hns_roce_hw *hw; + const struct hns_roce_dfx_hw *dfx; void *priv; + void *dfx_priv; struct workqueue_struct *irq_workq; + struct workqueue_struct *flush_workq; + struct hns_roce_stat hr_stat; + u32 func_num; + u32 mac_id; + u64 dfx_cnt[HNS_ROCE_DFX_TOTAL]; }; static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev) @@ -853,6 +1293,11 @@ static inline struct hns_roce_pd *to_hr_pd(struct ib_pd *ibpd) return container_of(ibpd, struct hns_roce_pd, ibpd); } +static inline struct hns_roce_xrcd *to_hr_xrcd(struct ib_xrcd *ibxrcd) +{ + return container_of(ibxrcd, struct hns_roce_xrcd, ibxrcd); +} + static inline struct hns_roce_ah *to_hr_ah(struct ib_ah *ibah) { return container_of(ibah, struct hns_roce_ah, ibah); @@ -863,6 +1308,11 @@ static inline struct hns_roce_mr *to_hr_mr(struct ib_mr *ibmr) return container_of(ibmr, struct hns_roce_mr, ibmr); } +static inline struct hns_roce_mw *to_hr_mw(struct ib_mw *ibmw) +{ + return container_of(ibmw, struct hns_roce_mw, ibmw); +} + static inline struct hns_roce_qp *to_hr_qp(struct ib_qp *ibqp) { return container_of(ibqp, struct hns_roce_qp, ibqp); @@ -878,11 +1328,6 @@ static inline struct hns_roce_srq *to_hr_srq(struct ib_srq *ibsrq) return container_of(ibsrq, struct hns_roce_srq, ibsrq); } -static inline struct hns_roce_sqp *hr_to_hr_sqp(struct hns_roce_qp *hr_qp) -{ - return container_of(hr_qp, struct hns_roce_sqp, hr_qp); -} - static inline void hns_roce_write64_k(__le32 val[2], void __iomem *dest) { __raw_writeq(*(u64 *) val, dest); @@ -895,15 +1340,73 @@ static inline struct hns_roce_qp qpn & (hr_dev->caps.num_qps - 1)); } -static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf, int offset) +static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf, + unsigned int offset) +{ + return (char *)(buf->trunk_list[offset >> buf->trunk_shift].buf) + + (offset & ((1 << buf->trunk_shift) - 1)); +} + +static inline dma_addr_t hns_roce_buf_dma_addr(struct hns_roce_buf *buf, + unsigned int offset) +{ + return buf->trunk_list[offset >> buf->trunk_shift].map + + (offset & ((1 << buf->trunk_shift) - 1)); +} + +static inline dma_addr_t hns_roce_buf_page(struct hns_roce_buf *buf, u32 idx) +{ + return hns_roce_buf_dma_addr(buf, idx << buf->page_shift); +} + +#define hr_hw_page_align(x) ALIGN(x, 1 << HNS_HW_PAGE_SHIFT) + +static inline u64 to_hr_hw_page_addr(u64 addr) +{ + return addr >> HNS_HW_PAGE_SHIFT; +} + +static inline u32 to_hr_hw_page_shift(u32 page_shift) +{ + return page_shift - HNS_HW_PAGE_SHIFT; +} + +static inline u32 to_hr_hem_hopnum(u32 hopnum, u32 count) +{ + if (count > 0) + return hopnum == HNS_ROCE_HOP_NUM_0 ? 0 : hopnum; + + return 0; +} + +static inline u32 to_hr_hem_entries_size(u32 count, u32 buf_shift) +{ + return hr_hw_page_align(count << buf_shift); +} + +static inline u32 to_hr_hem_entries_count(u32 count, u32 buf_shift) { - u32 page_size = 1 << buf->page_shift; + return hr_hw_page_align(count << buf_shift) >> buf_shift; +} + +static inline u32 to_hr_hem_entries_shift(u32 count, u32 buf_shift) +{ + if (!count) + return 0; - if (buf->nbufs == 1) - return (char *)(buf->direct.buf) + offset; - else - return (char *)(buf->page_list[offset >> buf->page_shift].buf) + - (offset & (page_size - 1)); + return ilog2(buf_shift ? + to_hr_hem_entries_count(count, buf_shift) : count); +} + +static inline u8 to_rdma_port_num(u8 phy_port_num) +{ + return phy_port_num + 1; +} + +static inline enum ib_port_state get_port_state(struct net_device *net_dev) +{ + return (netif_running(net_dev) && netif_carrier_ok(net_dev)) ? + IB_PORT_ACTIVE : IB_PORT_DOWN; } int hns_roce_init_uar_table(struct hns_roce_dev *dev); @@ -925,17 +1428,35 @@ void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt, struct hns_roce_buf *buf); +/* hns roce hw need current block and next block addr from mtt */ +#define MTT_MIN_COUNT 2 +int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, + int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr); +int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, + struct hns_roce_buf_attr *buf_attr, + unsigned int ba_page_shift, + struct ib_ucontext *ucontext, + unsigned long user_addr); +void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev, + struct hns_roce_mtr *mtr); +int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, + dma_addr_t *pages, unsigned int page_cnt); + int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev); +int hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev); int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev); int hns_roce_init_eq_table(struct hns_roce_dev *hr_dev); int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev); int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev); +int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev); void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev); +void hns_roce_cleanup_xrcd_table(struct hns_roce_dev *hr_dev); void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev); void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev); void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev); void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev); +void hns_roce_cleanup_srq_table(struct hns_roce_dev *hr_dev); int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj); void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj, @@ -961,6 +1482,11 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev, struct ib_udata *udata); int hns_roce_dealloc_pd(struct ib_pd *pd); +struct ib_xrcd *hns_roce_ib_alloc_xrcd(struct ib_device *ib_dev, + struct ib_ucontext *context, + struct ib_udata *udata); +int hns_roce_ib_dealloc_xrcd(struct ib_xrcd *xrcd); + struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc); struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt_addr, int access_flags, @@ -968,25 +1494,48 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, int hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, u64 length, u64 virt_addr, int mr_access_flags, struct ib_pd *pd, struct ib_udata *udata); +struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, + u32 max_num_sg); +int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, + unsigned int *sg_offset); int hns_roce_dereg_mr(struct ib_mr *ibmr); -int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev, - struct hns_roce_cmd_mailbox *mailbox, - unsigned long mpt_index); +int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev, + struct hns_roce_cmd_mailbox *mailbox, + unsigned long mpt_index); unsigned long key_to_hw_index(u32 key); -void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size, - struct hns_roce_buf *buf); -int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct, - struct hns_roce_buf *buf, u32 page_shift); +struct ib_mw *hns_roce_alloc_mw(struct ib_pd *pd, enum ib_mw_type, + struct ib_udata *udata); +int hns_roce_dealloc_mw(struct ib_mw *ibmw); + +void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf); +struct hns_roce_buf *hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, + u32 page_shift, u32 flags); int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt, struct ib_umem *umem); +int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs, + int buf_cnt, struct hns_roce_buf *buf, + unsigned int page_shift); +int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs, + int buf_cnt, struct ib_umem *umem, + unsigned int page_shift); + +struct ib_srq *hns_roce_create_srq(struct ib_pd *pd, + struct ib_srq_init_attr *srq_init_attr, + struct ib_udata *udata); +int hns_roce_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr, + enum ib_srq_attr_mask srq_attr_mask, + struct ib_udata *udata); +int hns_roce_destroy_srq(struct ib_srq *ibsrq); + struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata); int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata); +void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp); void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n); void *get_send_wqe(struct hns_roce_qp *hr_qp, int n); void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n); @@ -998,10 +1547,8 @@ void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq); void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp); -void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp); -void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn, - int cnt); -__be32 send_ieth(const struct ib_send_wr *wr); +void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp); +__be32 send_ieth(struct ib_send_wr *wr); int to_hr_qp_type(int qp_type); struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, @@ -1023,8 +1570,167 @@ void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db); void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn); void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type); void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type); +void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type); int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index); +void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev); int hns_roce_init(struct hns_roce_dev *hr_dev); void hns_roce_exit(struct hns_roce_dev *hr_dev); +int hns_roce_fill_res_entry(struct sk_buff *msg, + struct rdma_restrack_entry *res); + +int hns_roce_register_sysfs(struct hns_roce_dev *hr_dev); +void hns_roce_unregister_sysfs(struct hns_roce_dev *hr_dev); + +u32 hw_index_to_key(unsigned long ind); +int hns_roce_mr_enable(struct hns_roce_dev *hr_dev, + struct hns_roce_mr *mr); +void hns_roce_mr_free(struct hns_roce_dev *hr_dev, + struct hns_roce_mr *mr); +int hns_roce_ib_umem_write_mr(struct hns_roce_dev *hr_dev, + struct hns_roce_mr *mr, + struct ib_umem *umem); +int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova, + u64 size, u32 access, int npages, + struct hns_roce_mr *mr); +enum hns_phy_state { + HNS_ROCE_PHY_SLEEP = 1, + HNS_ROCE_PHY_POLLING = 2, + HNS_ROCE_PHY_DISABLED = 3, + HNS_ROCE_PHY_TRAINING = 4, + HNS_ROCE_PHY_LINKUP = 5, + HNS_ROCE_PHY_LINKERR = 6, + HNS_ROCE_PHY_TEST = 7 +}; + +#ifdef CONFIG_INFINIBAND_HNS_DFX +enum { + RDFX_FUNC_MODIFY_DEVICE, + RDFX_FUNC_QUERY_DEVICE, + RDFX_FUNC_QUERY_PORT, + RDFX_FUNC_MODIFY_PORT, + RDFX_FUNC_GET_LINK_LAYER, + RDFX_FUNC_GET_NETDEV, + RDFX_FUNC_QUERY_GID, + RDFX_FUNC_ADD_GID, + RDFX_FUNC_DEL_GID, + RDFX_FUNC_QUERY_PKEY, + RDFX_FUNC_ALLOC_UCONTEXT, + RDFX_FUNC_DEALLOC_UCONTEXT, + RDFX_FUNC_MMAP, + RDFX_FUNC_ALLOC_PD, + RDFX_FUNC_DEALLOC_PD, + RDFX_FUNC_CREATE_AH, + RDFX_FUNC_QUERY_AH, + RDFX_FUNC_DESTROY_AH, + RDFX_FUNC_CREATE_QP, + RDFX_FUNC_MODIFY_QP, + RDFX_FUNC_QUERY_QP, + RDFX_FUNC_DESTROY_QP, + RDFX_FUNC_POST_SEND, + RDFX_FUNC_POST_RECV, + RDFX_FUNC_CREATE_CQ, + RDFX_FUNC_MODIFY_CQ, + RDFX_FUNC_DESTROY_CQ, + RDFX_FUNC_REQ_NOTIFY_CQ, + RDFX_FUNC_POLL_CQ, + RDFX_FUNC_RESIZE_CQ, + RDFX_FUNC_GET_DMA_MR, + RDFX_FUNC_REG_USER_MR, + RDFX_FUNC_REREG_USER_MR, + RDFX_FUNC_DEREG_MR, + RDFX_FUNC_PORT_IMMUTABLE, + RDFX_FUNC_REG_UMM_MR, + RDFX_FUNC_DEREG_UMM_MR, +}; +int alloc_rdfx_info(struct hns_roce_dev *hr_dev); +void rdfx_set_dev_name(struct hns_roce_dev *hr_dev); +void free_rdfx_info(struct hns_roce_dev *hr_dev); +void rdfx_func_cnt(struct hns_roce_dev *hr_dev, int func); +void rdfx_inc_dealloc_qp_cnt(struct hns_roce_dev *hr_dev); +void rdfx_inc_arm_cq_cnt(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, + enum ib_cq_notify_flags flags); +void rdfx_inc_dereg_mr_cnt(struct hns_roce_dev *hr_dev); +void rdfx_inc_sq_db_cnt(struct hns_roce_dev *hr_dev, u32 qpn); +void rdfx_inc_rq_db_cnt(struct hns_roce_dev *hr_dev, u32 qpn); +void rdfx_inc_ceqe_cnt(struct hns_roce_dev *hr_dev, int ceqn); +void rdfx_inc_dealloc_cq_cnt(struct hns_roce_dev *hr_dev); +struct rdfx_qp_info *rdfx_get_rdfx_qp(struct hns_roce_dev *hr_dev, + unsigned long qpn); +void rdfx_put_rdfx_qp(struct hns_roce_dev *hr_dev, unsigned long qpn); +#ifndef CONFIG_INFINIBAND_HNS_DFX_ENHANCE +void rdfx_release_rdfx_qp(struct hns_roce_dev *hr_dev, unsigned long qpn); +#else +#define rdfx_release_rdfx_qp(hr_dev, qpn) +#endif +struct rdfx_cq_info *rdfx_get_rdfx_cq(struct hns_roce_dev *hr_dev, + unsigned long cqn); +void rdfx_put_rdfx_cq(struct hns_roce_dev *hr_dev, unsigned long cqn); +void rdfx_release_rdfx_cq(struct hns_roce_dev *hr_dev, unsigned long cqn); +struct rdfx_ceq_info *rdfx_get_rdfx_ceq(struct hns_roce_dev *hr_dev, + unsigned long ceqn); +void rdfx_put_rdfx_ceq(struct hns_roce_dev *hr_dev, unsigned long ceqn); +void rdfx_release_rdfx_ceq(struct hns_roce_dev *hr_dev, unsigned long ceqn); +void rdfx_alloc_rdfx_ceq(struct hns_roce_dev *hr_dev, unsigned long ceqn, + unsigned int eq_cmd); +void rdfx_alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq); +void rdfx_free_cq_buff(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq); +void rdfx_alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp); +void rdfx_set_qp_attr(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, + const struct ib_qp_attr *attr, int attr_mask, + enum ib_qp_state new_state); +void rdfx_alloc_rdfx_mr(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr); +void rdfx_release_rdfx_mr(struct hns_roce_dev *hr_dev, unsigned long key); +void rdfx_alloc_rdfx_pd(struct hns_roce_dev *hr_dev, struct hns_roce_pd *pd); +void rdfx_release_rdfx_pd(struct hns_roce_dev *hr_dev, unsigned long pdn); + +#ifdef CONFIG_KERNEL_419 +void rdfx_cp_rq_wqe_buf(struct hns_roce_dev *hr_dev, + struct hns_roce_qp *hr_qp, int ind, void *wqe, + const struct ib_recv_wr *wr); + +#else +void rdfx_cp_rq_wqe_buf(struct hns_roce_dev *hr_dev, + struct hns_roce_qp *hr_qp, int ind, void *wqe, + struct ib_recv_wr *wr); +#endif +void rdfx_cp_cqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, + void *cqe); +void rdfx_set_rdfx_cq_ci(struct hns_roce_dev *hr_dev, + struct hns_roce_cq *hr_cq); +#else +#define alloc_rdfx_info(hr_dev) (0) +#define rdfx_set_dev_name(hr_dev) +#define free_rdfx_info(hr_dev) +#define rdfx_func_cnt(hr_dev, func) +#define rdfx_inc_dealloc_qp_cnt(hr_dev) +#define rdfx_inc_arm_cq_cnt(hr_dev, hr_cq, flags) +#define rdfx_inc_dereg_mr_cnt(hr_dev) +#define rdfx_inc_sq_db_cnt(hr_dev, qpn) +#define rdfx_inc_rq_db_cnt(hr_dev, qpn) +#define rdfx_inc_ceqe_cnt(hr_dev, ceqn) +#define rdfx_inc_dealloc_cq_cnt(hr_dev) +#define rdfx_get_rdfx_qp(hr_dev, qpn) +#define rdfx_put_rdfx_qp(hr_dev, qpn) +#define rdfx_release_rdfx_qp(hr_dev, qpn) +#define rdfx_get_rdfx_cq(hr_dev, cqn) +#define rdfx_put_rdfx_cq(hr_dev, cqn) +#define rdfx_release_rdfx_cq(hr_dev, cqn) +#define rdfx_get_rdfx_ceq(hr_dev, ceqn) +#define rdfx_put_rdfx_ceq(hr_dev, ceqn) +#define rdfx_release_rdfx_ceq(hr_dev, ceqn) +#define rdfx_alloc_rdfx_ceq(hr_dev, ceqn, eq_cmd) +#define rdfx_alloc_cq_buf(hr_dev, hr_cq) +#define rdfx_free_cq_buff(hr_dev, hr_cq) +#define rdfx_alloc_qp_buf(hr_dev, hr_qp) +#define rdfx_set_qp_attr(hr_dev, hr_qp, attr, attr_mask, new_state) +#define rdfx_alloc_rdfx_mr(hr_dev, mr) +#define rdfx_release_rdfx_mr(hr_dev, key) +#define rdfx_alloc_rdfx_pd(hr_dev, pd) +#define rdfx_release_rdfx_pd(hr_dev, pdn) +#define rdfx_cp_rq_wqe_buf(hr_dev, hr_qp, ind, wqe, wr) +#define rdfx_cp_cqe_buf(hr_dev, hr_cq, cqe) +#define rdfx_set_rdfx_cq_ci(hr_dev, hr_cq) +#endif + #endif /* _HNS_ROCE_DEVICE_H */ diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index f6faefed96e8bedfab852f27540a62fe90c1e3dd..a810fde3e356d406b9babbd064fd5548f3c61a84 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c @@ -30,6 +30,7 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ +#include "roce_k_compat.h" #include #include "hns_roce_device.h" @@ -41,25 +42,58 @@ bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type) { - if ((hr_dev->caps.qpc_hop_num && type == HEM_TYPE_QPC) || - (hr_dev->caps.mpt_hop_num && type == HEM_TYPE_MTPT) || - (hr_dev->caps.cqc_hop_num && type == HEM_TYPE_CQC) || - (hr_dev->caps.srqc_hop_num && type == HEM_TYPE_SRQC) || - (hr_dev->caps.cqe_hop_num && type == HEM_TYPE_CQE) || - (hr_dev->caps.mtt_hop_num && type == HEM_TYPE_MTT)) - return true; - - return false; + int hop_num = 0; + + switch (type) { + case HEM_TYPE_QPC: + hop_num = hr_dev->caps.qpc_hop_num; + break; + case HEM_TYPE_MTPT: + hop_num = hr_dev->caps.mpt_hop_num; + break; + case HEM_TYPE_CQC: + hop_num = hr_dev->caps.cqc_hop_num; + break; + case HEM_TYPE_SRQC: + hop_num = hr_dev->caps.srqc_hop_num; + break; + case HEM_TYPE_SCC_CTX: + hop_num = hr_dev->caps.scc_ctx_hop_num; + break; + case HEM_TYPE_QPC_TIMER: + hop_num = hr_dev->caps.qpc_timer_hop_num; + break; + case HEM_TYPE_CQC_TIMER: + hop_num = hr_dev->caps.cqc_timer_hop_num; + break; + case HEM_TYPE_CQE: + hop_num = hr_dev->caps.cqe_hop_num; + break; + case HEM_TYPE_MTT: + hop_num = hr_dev->caps.mtt_hop_num; + break; + case HEM_TYPE_SRQWQE: + hop_num = hr_dev->caps.srqwqe_hop_num; + break; + case HEM_TYPE_IDX: + hop_num = hr_dev->caps.idx_hop_num; + break; + default: + return false; + } + + return hop_num ? true : false; } EXPORT_SYMBOL_GPL(hns_roce_check_whether_mhop); static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 start_idx, - u32 bt_chunk_num) + u32 bt_chunk_num, u64 hem_max_num) { - int i; + u64 check_max_num = start_idx + bt_chunk_num; + u64 i; - for (i = 0; i < bt_chunk_num; i++) - if (hem[start_idx + i]) + for (i = start_idx; (i < check_max_num) && (i < hem_max_num); i++) + if (hem[i]) return false; return true; @@ -88,17 +122,13 @@ static int hns_roce_get_bt_num(u32 table_type, u32 hop_num) return 0; } -int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, - struct hns_roce_hem_table *table, unsigned long *obj, - struct hns_roce_hem_mhop *mhop) +static int get_hem_table_config(struct hns_roce_dev *hr_dev, + struct hns_roce_hem_mhop *mhop, + u32 type) { struct device *dev = hr_dev->dev; - u32 chunk_ba_num; - u32 table_idx; - u32 bt_num; - u32 chunk_size; - switch (table->type) { + switch (type) { case HEM_TYPE_QPC: mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz + PAGE_SHIFT); @@ -123,6 +153,30 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, mhop->ba_l0_num = hr_dev->caps.cqc_bt_num; mhop->hop_num = hr_dev->caps.cqc_hop_num; break; + case HEM_TYPE_SCC_CTX: + mhop->buf_chunk_size = 1 << (hr_dev->caps.scc_ctx_buf_pg_sz + + PAGE_SHIFT); + mhop->bt_chunk_size = 1 << (hr_dev->caps.scc_ctx_ba_pg_sz + + PAGE_SHIFT); + mhop->ba_l0_num = hr_dev->caps.scc_ctx_bt_num; + mhop->hop_num = hr_dev->caps.scc_ctx_hop_num; + break; + case HEM_TYPE_QPC_TIMER: + mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_timer_buf_pg_sz + + PAGE_SHIFT); + mhop->bt_chunk_size = 1 << (hr_dev->caps.qpc_timer_ba_pg_sz + + PAGE_SHIFT); + mhop->ba_l0_num = hr_dev->caps.qpc_timer_bt_num; + mhop->hop_num = hr_dev->caps.qpc_timer_hop_num; + break; + case HEM_TYPE_CQC_TIMER: + mhop->buf_chunk_size = 1 << (hr_dev->caps.cqc_timer_buf_pg_sz + + PAGE_SHIFT); + mhop->bt_chunk_size = 1 << (hr_dev->caps.cqc_timer_ba_pg_sz + + PAGE_SHIFT); + mhop->ba_l0_num = hr_dev->caps.cqc_timer_bt_num; + mhop->hop_num = hr_dev->caps.cqc_timer_hop_num; + break; case HEM_TYPE_SRQC: mhop->buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz + PAGE_SHIFT); @@ -136,7 +190,7 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, + PAGE_SHIFT); mhop->bt_chunk_size = 1 << (hr_dev->caps.mtt_ba_pg_sz + PAGE_SHIFT); - mhop->ba_l0_num = mhop->bt_chunk_size / 8; + mhop->ba_l0_num = mhop->bt_chunk_size / BA_BYTE_LEN; mhop->hop_num = hr_dev->caps.mtt_hop_num; break; case HEM_TYPE_CQE: @@ -144,24 +198,56 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, + PAGE_SHIFT); mhop->bt_chunk_size = 1 << (hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT); - mhop->ba_l0_num = mhop->bt_chunk_size / 8; + mhop->ba_l0_num = mhop->bt_chunk_size / BA_BYTE_LEN; mhop->hop_num = hr_dev->caps.cqe_hop_num; break; + case HEM_TYPE_SRQWQE: + mhop->buf_chunk_size = 1 << (hr_dev->caps.srqwqe_buf_pg_sz + + PAGE_SHIFT); + mhop->bt_chunk_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz + + PAGE_SHIFT); + mhop->ba_l0_num = mhop->bt_chunk_size / BA_BYTE_LEN; + mhop->hop_num = hr_dev->caps.srqwqe_hop_num; + break; + case HEM_TYPE_IDX: + mhop->buf_chunk_size = 1 << (hr_dev->caps.idx_buf_pg_sz + + PAGE_SHIFT); + mhop->bt_chunk_size = 1 << (hr_dev->caps.idx_ba_pg_sz + + PAGE_SHIFT); + mhop->ba_l0_num = mhop->bt_chunk_size / BA_BYTE_LEN; + mhop->hop_num = hr_dev->caps.idx_hop_num; + break; default: dev_err(dev, "Table %d not support multi-hop addressing!\n", - table->type); + type); return -EINVAL; } + return 0; +} + +int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, + struct hns_roce_hem_table *table, unsigned long *obj, + struct hns_roce_hem_mhop *mhop) +{ + struct device *dev = hr_dev->dev; + u32 chunk_ba_num; + u32 table_idx; + u32 bt_num; + u32 chunk_size; + + if (get_hem_table_config(hr_dev, mhop, table->type)) + return -EINVAL; + if (!obj) return 0; /* - * QPC/MTPT/CQC/SRQC alloc hem for buffer pages. + * QPC/MTPT/CQC/SRQC/SCC_CTX alloc hem for buffer pages. * MTT/CQE alloc hem for bt pages. */ bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num); - chunk_ba_num = mhop->bt_chunk_size / 8; + chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN; chunk_size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : mhop->bt_chunk_size; table_idx = (*obj & (table->num_obj - 1)) / @@ -201,6 +287,7 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, struct scatterlist *mem; int order; void *buf; + int left; WARN_ON(gfp_mask & __GFP_HIGHMEM); @@ -213,8 +300,8 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, INIT_LIST_HEAD(&hem->chunk_list); order = get_order(hem_alloc_size); - - while (npages > 0) { + left = npages; + while (left > 0) { if (!chunk) { chunk = kmalloc(sizeof(*chunk), gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); @@ -228,7 +315,7 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, list_add_tail(&chunk->list, &hem->chunk_list); } - while (1 << order > npages) + while (1 << order > left) --order; /* @@ -246,7 +333,7 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, ++chunk->npages; ++chunk->nsg; - npages -= 1 << order; + left -= 1 << order; } return hem; @@ -285,9 +372,9 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev, unsigned long flags; struct hns_roce_hem_iter iter; void __iomem *bt_cmd; - u32 bt_cmd_h_val = 0; - u32 bt_cmd_val[2]; - u32 bt_cmd_l = 0; + __le32 bt_cmd_h_val = 0; + __le32 bt_cmd_val[2]; + __le32 bt_cmd_l = 0; u64 bt_ba = 0; int ret = 0; @@ -331,21 +418,22 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev, bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG; - end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies; - while (1) { - if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) { - if (!(time_before(jiffies, end))) { - dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n"); - spin_unlock_irqrestore(lock, flags); - return -EBUSY; - } - } else { + end = HW_SYNC_TIMEOUT_MSECS; + while (end > 0) { + if (!(readl(bt_cmd) >> BT_CMD_SYNC_SHIFT)) break; - } + mdelay(HW_SYNC_SLEEP_TIME_INTERVAL); + end -= HW_SYNC_SLEEP_TIME_INTERVAL; + } + + if (end <= 0) { + dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n"); + spin_unlock_irqrestore(lock, flags); + return -EBUSY; } - bt_cmd_l = (u32)bt_ba; + bt_cmd_l = cpu_to_le32(bt_ba); roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> BT_BA_SHIFT); @@ -390,7 +478,7 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev, buf_chunk_size = mhop.buf_chunk_size; bt_chunk_size = mhop.bt_chunk_size; hop_num = mhop.hop_num; - chunk_ba_num = bt_chunk_size / 8; + chunk_ba_num = bt_chunk_size / BA_BYTE_LEN; bt_num = hns_roce_get_bt_num(table->type, hop_num); switch (bt_num) { @@ -413,8 +501,19 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev, return -EINVAL; } + if (unlikely(hem_idx >= table->num_hem)) { + dev_err(dev, "Table %d exceed hem limt idx = %llu,max = %lu!\n", + table->type, hem_idx, table->num_hem); + return -EINVAL; + } + mutex_lock(&table->mutex); + if (!table->hem) { + ret = -ENODEV; + goto out; + } + if (table->hem[hem_idx]) { ++table->hem[hem_idx]->refcount; goto out; @@ -436,9 +535,11 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev, /* set base address to hardware */ if (table->type < HEM_TYPE_MTT) { step_idx = 0; - if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) { + ret = hr_dev->hw->set_hem(hr_dev, table, obj, step_idx); + if (ret) { + dev_err(dev, "set HEM base address to HW failed(%d), type = %d\n", + ret, table->type); ret = -ENODEV; - dev_err(dev, "set HEM base address to HW failed!\n"); goto err_dma_alloc_l1; } } @@ -460,15 +561,17 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev, /* set base address to hardware */ step_idx = 1; - if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) { + ret = hr_dev->hw->set_hem(hr_dev, table, obj, step_idx); + if (ret) { + dev_err(dev, "set HEM base address to HW failed(%d), type = %d\n", + ret, table->type); ret = -ENODEV; - dev_err(dev, "set HEM base address to HW failed!\n"); goto err_alloc_hem_buf; } } /* - * alloc buffer space chunk for QPC/MTPT/CQC/SRQC. + * alloc buffer space chunk for QPC/MTPT/CQC/SRQC/SCC_CTX. * alloc bt space chunk for MTT/CQE. */ size = table->type < HEM_TYPE_MTT ? buf_chunk_size : bt_chunk_size; @@ -500,9 +603,11 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev, } /* set HEM base address to hardware */ - if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) { + ret = hr_dev->hw->set_hem(hr_dev, table, obj, step_idx); + if (ret) { + dev_err(dev, "set HEM base address to HW failed(%d), type = %d\n", + ret, table->type); ret = -ENODEV; - dev_err(dev, "set HEM base address to HW failed!\n"); goto err_alloc_hem_buf; } } else if (hop_num == 2) { @@ -546,6 +651,11 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev, mutex_lock(&table->mutex); + if (!table->hem) { + ret = -ENODEV; + goto out; + } + if (table->hem[i]) { ++table->hem[i]->refcount; goto out; @@ -575,6 +685,7 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev, mutex_unlock(&table->mutex); return ret; } +EXPORT_SYMBOL_GPL(hns_roce_table_get); static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev, struct hns_roce_hem_table *table, @@ -599,7 +710,7 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev, bt_chunk_size = mhop.bt_chunk_size; hop_num = mhop.hop_num; - chunk_ba_num = bt_chunk_size / 8; + chunk_ba_num = bt_chunk_size / BA_BYTE_LEN; bt_num = hns_roce_get_bt_num(table->type, hop_num); switch (bt_num) { @@ -622,25 +733,25 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev, mutex_lock(&table->mutex); - if (check_refcount && (--table->hem[hem_idx]->refcount > 0)) { + if (!table->hem) { mutex_unlock(&table->mutex); return; } - if (table->type < HEM_TYPE_MTT && hop_num == 1) { - if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1)) - dev_warn(dev, "Clear HEM base address failed.\n"); - } else if (table->type < HEM_TYPE_MTT && hop_num == 2) { - if (hr_dev->hw->clear_hem(hr_dev, table, obj, 2)) - dev_warn(dev, "Clear HEM base address failed.\n"); - } else if (table->type < HEM_TYPE_MTT && - hop_num == HNS_ROCE_HOP_NUM_0) { - if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0)) - dev_warn(dev, "Clear HEM base address failed.\n"); + if (check_refcount && (--table->hem[hem_idx]->refcount > 0)) { + mutex_unlock(&table->mutex); + return; } + if (table->type < HEM_TYPE_MTT && hop_num == 1) + hr_dev->hw->clear_hem(hr_dev, table, obj, 1); + else if (table->type < HEM_TYPE_MTT && hop_num == 2) + hr_dev->hw->clear_hem(hr_dev, table, obj, 2); + else if (table->type < HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0) + hr_dev->hw->clear_hem(hr_dev, table, obj, 0); + /* - * free buffer space chunk for QPC/MTPT/CQC/SRQC. + * free buffer space chunk for QPC/MTPT/CQC/SRQC/SCC_CTX. * free bt space chunk for MTT/CQE. */ hns_roce_free_hem(hr_dev, table->hem[hem_idx]); @@ -649,10 +760,9 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev, if (check_whether_bt_num_2(table->type, hop_num)) { start_idx = mhop.l0_idx * chunk_ba_num; if (hns_roce_check_hem_null(table->hem, start_idx, - chunk_ba_num)) { - if (table->type < HEM_TYPE_MTT && - hr_dev->hw->clear_hem(hr_dev, table, obj, 0)) - dev_warn(dev, "Clear HEM base address failed.\n"); + chunk_ba_num, table->num_hem)) { + if (table->type < HEM_TYPE_MTT) + hr_dev->hw->clear_hem(hr_dev, table, obj, 0); dma_free_coherent(dev, bt_chunk_size, table->bt_l0[mhop.l0_idx], @@ -663,9 +773,8 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev, start_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num + mhop.l1_idx * chunk_ba_num; if (hns_roce_check_hem_null(table->hem, start_idx, - chunk_ba_num)) { - if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1)) - dev_warn(dev, "Clear HEM base address failed.\n"); + chunk_ba_num, table->num_hem)) { + hr_dev->hw->clear_hem(hr_dev, table, obj, 1); dma_free_coherent(dev, bt_chunk_size, table->bt_l1[bt_l1_idx], @@ -675,9 +784,7 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev, start_idx = mhop.l0_idx * chunk_ba_num; if (hns_roce_check_bt_null(table->bt_l1, start_idx, chunk_ba_num)) { - if (hr_dev->hw->clear_hem(hr_dev, table, obj, - 0)) - dev_warn(dev, "Clear HEM base address failed.\n"); + hr_dev->hw->clear_hem(hr_dev, table, obj, 0); dma_free_coherent(dev, bt_chunk_size, table->bt_l0[mhop.l0_idx], @@ -693,7 +800,6 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev, void hns_roce_table_put(struct hns_roce_dev *hr_dev, struct hns_roce_hem_table *table, unsigned long obj) { - struct device *dev = hr_dev->dev; unsigned long i; if (hns_roce_check_whether_mhop(hr_dev, table->type)) { @@ -706,10 +812,14 @@ void hns_roce_table_put(struct hns_roce_dev *hr_dev, mutex_lock(&table->mutex); + if (!table->hem) { + mutex_unlock(&table->mutex); + return; + } + if (--table->hem[i]->refcount == 0) { /* Clear HEM base address */ - if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0)) - dev_warn(dev, "Clear HEM base address failed.\n"); + hr_dev->hw->clear_hem(hr_dev, table, obj, 0); hns_roce_free_hem(hr_dev, table->hem[i]); table->hem[i] = NULL; @@ -717,6 +827,7 @@ void hns_roce_table_put(struct hns_roce_dev *hr_dev, mutex_unlock(&table->mutex); } +EXPORT_SYMBOL_GPL(hns_roce_table_put); void *hns_roce_table_find(struct hns_roce_dev *hr_dev, struct hns_roce_hem_table *table, @@ -739,25 +850,34 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev, mutex_lock(&table->mutex); + if (!table->hem) { + mutex_unlock(&table->mutex); + return NULL; + } + if (!hns_roce_check_whether_mhop(hr_dev, table->type)) { obj_per_chunk = table->table_chunk_size / table->obj_size; hem = table->hem[(obj & (table->num_obj - 1)) / obj_per_chunk]; idx_offset = (obj & (table->num_obj - 1)) % obj_per_chunk; dma_offset = offset = idx_offset * table->obj_size; } else { - hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop); + u32 seg_size; + + if (hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop)) + goto out; /* mtt mhop */ i = mhop.l0_idx; j = mhop.l1_idx; if (mhop.hop_num == 2) - hem_idx = i * (mhop.bt_chunk_size / 8) + j; + hem_idx = i * (mhop.bt_chunk_size / BA_BYTE_LEN) + j; else if (mhop.hop_num == 1 || mhop.hop_num == HNS_ROCE_HOP_NUM_0) hem_idx = i; hem = table->hem[hem_idx]; - dma_offset = offset = (obj & (table->num_obj - 1)) * - table->obj_size % mhop.bt_chunk_size; + seg_size = HNS_ROCE_MTT_ENTRY_PER_SEG * BA_BYTE_LEN; + dma_offset = offset = (obj & (table->num_obj - 1)) * seg_size % + mhop.bt_chunk_size; if (mhop.hop_num == 2) dma_offset = offset = 0; } @@ -799,7 +919,9 @@ int hns_roce_table_get_range(struct hns_roce_dev *hr_dev, int ret; if (hns_roce_check_whether_mhop(hr_dev, table->type)) { - hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop); + ret = get_hem_table_config(hr_dev, &mhop, table->type); + if (ret) + return ret; inc = mhop.bt_chunk_size / table->obj_size; } @@ -829,7 +951,8 @@ void hns_roce_table_put_range(struct hns_roce_dev *hr_dev, unsigned long i; if (hns_roce_check_whether_mhop(hr_dev, table->type)) { - hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop); + if (get_hem_table_config(hr_dev, &mhop, table->type)) + return; inc = mhop.bt_chunk_size / table->obj_size; } @@ -842,7 +965,6 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev, unsigned long obj_size, unsigned long nobj, int use_lowmem) { - struct device *dev = hr_dev->dev; unsigned long obj_per_chunk; unsigned long num_hem; @@ -855,66 +977,24 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev, if (!table->hem) return -ENOMEM; } else { + struct hns_roce_hem_mhop mhop = {}; unsigned long buf_chunk_size; unsigned long bt_chunk_size; unsigned long bt_chunk_num; unsigned long num_bt_l0 = 0; u32 hop_num; - switch (type) { - case HEM_TYPE_QPC: - buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz - + PAGE_SHIFT); - bt_chunk_size = 1 << (hr_dev->caps.qpc_ba_pg_sz - + PAGE_SHIFT); - num_bt_l0 = hr_dev->caps.qpc_bt_num; - hop_num = hr_dev->caps.qpc_hop_num; - break; - case HEM_TYPE_MTPT: - buf_chunk_size = 1 << (hr_dev->caps.mpt_buf_pg_sz - + PAGE_SHIFT); - bt_chunk_size = 1 << (hr_dev->caps.mpt_ba_pg_sz - + PAGE_SHIFT); - num_bt_l0 = hr_dev->caps.mpt_bt_num; - hop_num = hr_dev->caps.mpt_hop_num; - break; - case HEM_TYPE_CQC: - buf_chunk_size = 1 << (hr_dev->caps.cqc_buf_pg_sz - + PAGE_SHIFT); - bt_chunk_size = 1 << (hr_dev->caps.cqc_ba_pg_sz - + PAGE_SHIFT); - num_bt_l0 = hr_dev->caps.cqc_bt_num; - hop_num = hr_dev->caps.cqc_hop_num; - break; - case HEM_TYPE_SRQC: - buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz - + PAGE_SHIFT); - bt_chunk_size = 1 << (hr_dev->caps.srqc_ba_pg_sz - + PAGE_SHIFT); - num_bt_l0 = hr_dev->caps.srqc_bt_num; - hop_num = hr_dev->caps.srqc_hop_num; - break; - case HEM_TYPE_MTT: - buf_chunk_size = 1 << (hr_dev->caps.mtt_ba_pg_sz - + PAGE_SHIFT); - bt_chunk_size = buf_chunk_size; - hop_num = hr_dev->caps.mtt_hop_num; - break; - case HEM_TYPE_CQE: - buf_chunk_size = 1 << (hr_dev->caps.cqe_ba_pg_sz - + PAGE_SHIFT); - bt_chunk_size = buf_chunk_size; - hop_num = hr_dev->caps.cqe_hop_num; - break; - default: - dev_err(dev, - "Table %d not support to init hem table here!\n", - type); + if (get_hem_table_config(hr_dev, &mhop, type)) return -EINVAL; - } + + buf_chunk_size = mhop.buf_chunk_size; + bt_chunk_size = mhop.bt_chunk_size; + num_bt_l0 = mhop.ba_l0_num; + hop_num = mhop.hop_num; + obj_per_chunk = buf_chunk_size / obj_size; num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk; - bt_chunk_num = bt_chunk_size / 8; + bt_chunk_num = bt_chunk_size / BA_BYTE_LEN; if (type >= HEM_TYPE_MTT) num_bt_l0 = bt_chunk_num; @@ -991,19 +1071,23 @@ static void hns_roce_cleanup_mhop_hem_table(struct hns_roce_dev *hr_dev, { struct hns_roce_hem_mhop mhop; u32 buf_chunk_size; - int i; + int ret; u64 obj; + int i; - hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop); - buf_chunk_size = table->type < HEM_TYPE_MTT ? mhop.buf_chunk_size : - mhop.bt_chunk_size; + ret = get_hem_table_config(hr_dev, &mhop, table->type); + if (!ret) { + buf_chunk_size = table->type < HEM_TYPE_MTT ? + mhop.buf_chunk_size : mhop.bt_chunk_size; - for (i = 0; i < table->num_hem; ++i) { - obj = i * buf_chunk_size / table->obj_size; - if (table->hem[i]) - hns_roce_table_mhop_put(hr_dev, table, obj, 0); + for (i = 0; i < table->num_hem; ++i) { + obj = i * buf_chunk_size / table->obj_size; + if (table->hem[i]) + hns_roce_table_mhop_put(hr_dev, table, obj, 0); + } } + mutex_lock(&table->mutex); kfree(table->hem); table->hem = NULL; kfree(table->bt_l1); @@ -1014,12 +1098,12 @@ static void hns_roce_cleanup_mhop_hem_table(struct hns_roce_dev *hr_dev, table->bt_l0 = NULL; kfree(table->bt_l0_dma_addr); table->bt_l0_dma_addr = NULL; + mutex_unlock(&table->mutex); } void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev, struct hns_roce_hem_table *table) { - struct device *dev = hr_dev->dev; unsigned long i; if (hns_roce_check_whether_mhop(hr_dev, table->type)) { @@ -1029,9 +1113,8 @@ void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev, for (i = 0; i < table->num_hem; ++i) if (table->hem[i]) { - if (hr_dev->hw->clear_hem(hr_dev, table, - i * table->table_chunk_size / table->obj_size, 0)) - dev_err(dev, "Clear HEM base address failed.\n"); + hr_dev->hw->clear_hem(hr_dev, table, + i * table->table_chunk_size / table->obj_size, 0); hns_roce_free_hem(hr_dev, table->hem[i]); } @@ -1041,7 +1124,25 @@ void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev, void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev) { + if ((hr_dev->caps.num_idx_segs)) + hns_roce_cleanup_hem_table(hr_dev, + &hr_dev->mr_table.mtt_idx_table); + if (hr_dev->caps.num_srqwqe_segs) + hns_roce_cleanup_hem_table(hr_dev, + &hr_dev->mr_table.mtt_srqwqe_table); + if (hr_dev->caps.srqc_entry_sz) + hns_roce_cleanup_hem_table(hr_dev, + &hr_dev->srq_table.table); hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table); + if (hr_dev->caps.qpc_timer_entry_sz) + hns_roce_cleanup_hem_table(hr_dev, + &hr_dev->qpc_timer_table.table); + if (hr_dev->caps.cqc_timer_entry_sz) + hns_roce_cleanup_hem_table(hr_dev, + &hr_dev->cqc_timer_table.table); + if (hr_dev->caps.scc_ctx_entry_sz) + hns_roce_cleanup_hem_table(hr_dev, + &hr_dev->qp_table.scc_ctx_table); if (hr_dev->caps.trrl_entry_sz) hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.trrl_table); @@ -1053,3 +1154,469 @@ void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev) &hr_dev->mr_table.mtt_cqe_table); hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table); } + +struct roce_hem_item { + struct list_head list; /* link all hems in the same bt level */ + struct list_head sibling; /* link all hems in last hop for mtt */ + void *addr; + dma_addr_t dma_addr; + size_t count; /* max ba numbers */ + int start; /* start buf offset in this hem */ + int end; /* end buf offset in this hem */ +}; + +static struct roce_hem_item *hem_list_alloc_item(struct hns_roce_dev *hr_dev, + int start, int end, + int count, bool exist_bt, + int bt_level) +{ + struct roce_hem_item *hem; + + hem = kzalloc(sizeof(*hem), GFP_KERNEL); + if (!hem) + return NULL; + + if (exist_bt) { + hem->addr = dma_alloc_coherent(hr_dev->dev, count * BA_BYTE_LEN, + &hem->dma_addr, GFP_KERNEL); + if (!hem->addr) { + kfree(hem); + return NULL; + } + } + + hem->count = count; + hem->start = start; + hem->end = end; + INIT_LIST_HEAD(&hem->list); + INIT_LIST_HEAD(&hem->sibling); + + return hem; +} + +static void hem_list_free_item(struct hns_roce_dev *hr_dev, + struct roce_hem_item *hem, bool exist_bt) +{ + if (exist_bt) + dma_free_coherent(hr_dev->dev, hem->count * BA_BYTE_LEN, + hem->addr, hem->dma_addr); + kfree(hem); +} + +static void hem_list_free_all(struct hns_roce_dev *hr_dev, + struct list_head *head, bool exist_bt) +{ + struct roce_hem_item *hem, *temp_hem; + + list_for_each_entry_safe(hem, temp_hem, head, list) { + list_del(&hem->list); + hem_list_free_item(hr_dev, hem, exist_bt); + } +} + +static void hem_list_link_bt(struct hns_roce_dev *hr_dev, void *base_addr, + u64 table_addr) +{ + *(u64 *)(base_addr) = table_addr; +} + +/* assign L0 table address to hem from root bt */ +static void hem_list_assign_bt(struct hns_roce_dev *hr_dev, + struct roce_hem_item *hem, void *cpu_addr, + u64 phy_addr) +{ + hem->addr = cpu_addr; + hem->dma_addr = (dma_addr_t)phy_addr; +} + +static inline bool hem_list_page_is_in_range(struct roce_hem_item *hem, + int offset) +{ + return (hem->start <= offset && offset <= hem->end); +} + +static struct roce_hem_item *hem_list_search_item(struct list_head *ba_list, + int page_offset) +{ + struct roce_hem_item *hem, *temp_hem; + struct roce_hem_item *found = NULL; + + list_for_each_entry_safe(hem, temp_hem, ba_list, list) { + if (hem_list_page_is_in_range(hem, page_offset)) { + found = hem; + break; + } + } + + return found; +} + +static bool hem_list_is_bottom_bt(int hopnum, int bt_level) +{ + /* + * hopnum base address table levels + * 0 L0(buf) + * 1 L0 -> buf + * 2 L0 -> L1 -> buf + * 3 L0 -> L1 -> L2 -> buf + */ + return bt_level >= (hopnum ? hopnum - 1 : hopnum); +} + +/** + * calc base address entries num + * @hopnum: num of mutihop addressing + * @bt_level: base address table level + * @unit: ba entries per bt page + */ +static u32 hem_list_calc_ba_range(int hopnum, int bt_level, int unit) +{ + u32 step; + int max; + int i; + + if (hopnum <= bt_level) + return 0; + /* + * hopnum bt_level range + * 1 0 unit + * ------------ + * 2 0 unit * unit + * 2 1 unit + * ------------ + * 3 0 unit * unit * unit + * 3 1 unit * unit + * 3 2 unit + */ + step = 1; + max = hopnum - bt_level; + for (i = 0; i < max; i++) + step = step * unit; + + return step; +} + +/** + * calc the root ba entries which could cover all regions + * @regions: buf region array + * @region_cnt: array size of @regions + * @unit: ba entries per bt page + */ +int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions, + int region_cnt, int unit) +{ + struct hns_roce_buf_region *r; + int total = 0; + int step; + int i; + + for (i = 0; i < region_cnt; i++) { + r = (struct hns_roce_buf_region *)®ions[i]; + if (r->hopnum > 1) { + step = hem_list_calc_ba_range(r->hopnum, 1, unit); + if (step > 0) + total += (r->count + step - 1) / step; + } else { + total += r->count; + } + } + + return total; +} + +static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev, + const struct hns_roce_buf_region *r, int unit, + int offset, struct list_head *mid_bt, + struct list_head *btm_bt) +{ + struct roce_hem_item *hem_ptrs[HNS_ROCE_MAX_BT_LEVEL] = { NULL }; + struct list_head temp_list[HNS_ROCE_MAX_BT_LEVEL]; + struct roce_hem_item *cur, *pre; + const int hopnum = r->hopnum; + int start_aligned; + int distance; + int ret = 0; + int max_ofs; + int level; + u32 step; + int end; + + if (hopnum <= 1) + return 0; + + if (hopnum > HNS_ROCE_MAX_BT_LEVEL) { + dev_err(hr_dev->dev, "invalid hopnum %d!\n", hopnum); + return -EINVAL; + } + + if (offset < r->offset) { + dev_err(hr_dev->dev, "invalid offset %d,min %u!\n", + offset, r->offset); + return -EINVAL; + } + + distance = offset - r->offset; + max_ofs = r->offset + r->count - 1; + for (level = 0; level < hopnum; level++) + INIT_LIST_HEAD(&temp_list[level]); + + /* config L1 bt to last bt and link them to corresponding parent */ + for (level = 1; level < hopnum; level++) { + cur = hem_list_search_item(&mid_bt[level], offset); + if (cur) { + hem_ptrs[level] = cur; + continue; + } + + step = hem_list_calc_ba_range(hopnum, level, unit); + if (step < 1) { + ret = -EINVAL; + goto err_exit; + } + + start_aligned = (distance / step) * step + r->offset; + end = min_t(int, start_aligned + step - 1, max_ofs); + cur = hem_list_alloc_item(hr_dev, start_aligned, end, unit, + true, level); + if (!cur) { + ret = -ENOMEM; + goto err_exit; + } + hem_ptrs[level] = cur; + list_add(&cur->list, &temp_list[level]); + if (hem_list_is_bottom_bt(hopnum, level)) + list_add(&cur->sibling, &temp_list[0]); + + /* link bt to parent bt */ + if (level > 1) { + pre = hem_ptrs[level - 1]; + step = (cur->start - pre->start) / step * BA_BYTE_LEN; + hem_list_link_bt(hr_dev, pre->addr + step, + cur->dma_addr); + } + } + + list_splice(&temp_list[0], btm_bt); + for (level = 1; level < hopnum; level++) + list_splice(&temp_list[level], &mid_bt[level]); + + return 0; + +err_exit: + for (level = 1; level < hopnum; level++) + hem_list_free_all(hr_dev, &temp_list[level], true); + + return ret; +} + +static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev, + struct hns_roce_hem_list *hem_list, int unit, + const struct hns_roce_buf_region *regions, + int region_cnt) +{ + struct list_head temp_list[HNS_ROCE_MAX_BT_REGION]; + struct roce_hem_item *hem, *temp_hem, *root_hem; + const struct hns_roce_buf_region *r; + struct list_head temp_root; + struct list_head temp_btm; + void *cpu_base; + u64 phy_base; + int ret = 0; + int ba_num; + int offset; + int total; + int step; + int i; + + r = ®ions[0]; + root_hem = hem_list_search_item(&hem_list->root_bt, r->offset); + if (root_hem) + return 0; + + ba_num = hns_roce_hem_list_calc_root_ba(regions, region_cnt, unit); + if (ba_num < 1) + return -ENOMEM; + + if (ba_num > unit) + return -ENOBUFS; + + ba_num = min_t(int, ba_num, unit); + INIT_LIST_HEAD(&temp_root); + offset = r->offset; + /* indicate to last region */ + r = ®ions[region_cnt - 1]; + root_hem = hem_list_alloc_item(hr_dev, offset, r->offset + r->count - 1, + ba_num, true, 0); + if (!root_hem) + return -ENOMEM; + list_add(&root_hem->list, &temp_root); + + hem_list->root_ba = root_hem->dma_addr; + + INIT_LIST_HEAD(&temp_btm); + for (i = 0; i < region_cnt; i++) + INIT_LIST_HEAD(&temp_list[i]); + + total = 0; + for (i = 0; i < region_cnt && total < ba_num; i++) { + r = ®ions[i]; + if (!r->count) + continue; + + /* all regions's mid[x][0] shared the root_bt's trunk */ + cpu_base = root_hem->addr + total * BA_BYTE_LEN; + phy_base = root_hem->dma_addr + total * BA_BYTE_LEN; + + /* if hopnum is 0 or 1, cut a new fake hem from the root bt + * which's address share to all regions. + */ + if (hem_list_is_bottom_bt(r->hopnum, 0)) { + hem = hem_list_alloc_item(hr_dev, r->offset, + r->offset + r->count - 1, + r->count, false, 0); + if (!hem) { + ret = -ENOMEM; + goto err_exit; + } + hem_list_assign_bt(hr_dev, hem, cpu_base, phy_base); + list_add(&hem->list, &temp_list[i]); + list_add(&hem->sibling, &temp_btm); + total += r->count; + } else { + step = hem_list_calc_ba_range(r->hopnum, 1, unit); + if (step < 1) { + ret = -EINVAL; + goto err_exit; + } + /* if exist mid bt, link L1 to L0 */ + list_for_each_entry_safe(hem, temp_hem, + &hem_list->mid_bt[i][1], list) { + offset = (hem->start - r->offset) / step * + BA_BYTE_LEN; + hem_list_link_bt(hr_dev, cpu_base + offset, + hem->dma_addr); + total++; + } + } + } + + list_splice(&temp_btm, &hem_list->btm_bt); + list_splice(&temp_root, &hem_list->root_bt); + for (i = 0; i < region_cnt; i++) + list_splice(&temp_list[i], &hem_list->mid_bt[i][0]); + + return 0; + +err_exit: + for (i = 0; i < region_cnt; i++) + hem_list_free_all(hr_dev, &temp_list[i], false); + + hem_list_free_all(hr_dev, &temp_root, true); + + return ret; +} + +/* construct the base address table and link them by address hop config */ +int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev, + struct hns_roce_hem_list *hem_list, + const struct hns_roce_buf_region *regions, + int region_cnt, unsigned int bt_pg_shift) +{ + const struct hns_roce_buf_region *r; + int ofs, end; + int unit; + int ret; + int i; + + if (region_cnt > HNS_ROCE_MAX_BT_REGION) { + dev_err(hr_dev->dev, "invalid region region_cnt %d!\n", + region_cnt); + return -EINVAL; + } + + unit = (1 << bt_pg_shift) / BA_BYTE_LEN; + for (i = 0; i < region_cnt; i++) { + r = ®ions[i]; + if (!r->count) + continue; + + end = r->offset + r->count; + for (ofs = r->offset; ofs < end; ofs += unit) { + ret = hem_list_alloc_mid_bt(hr_dev, r, unit, ofs, + hem_list->mid_bt[i], + &hem_list->btm_bt); + if (ret) { + dev_err(hr_dev->dev, + "alloc hem trunk fail ret=%d!\n", ret); + goto err_alloc; + } + } + } + + ret = hem_list_alloc_root_bt(hr_dev, hem_list, unit, regions, + region_cnt); + if (ret) + dev_err(hr_dev->dev, "alloc hem root fail ret=%d!\n", ret); + else + return 0; + +err_alloc: + hns_roce_hem_list_release(hr_dev, hem_list); + + return ret; +} + +void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev, + struct hns_roce_hem_list *hem_list) +{ + int i, j; + + for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++) + for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++) + hem_list_free_all(hr_dev, &hem_list->mid_bt[i][j], + j != 0); + + hem_list_free_all(hr_dev, &hem_list->root_bt, true); + INIT_LIST_HEAD(&hem_list->btm_bt); + hem_list->root_ba = 0; +} + +void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list) +{ + int i, j; + + INIT_LIST_HEAD(&hem_list->root_bt); + INIT_LIST_HEAD(&hem_list->btm_bt); + for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++) + for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++) + INIT_LIST_HEAD(&hem_list->mid_bt[i][j]); +} + +void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev, + struct hns_roce_hem_list *hem_list, + int offset, int *mtt_cnt, u64 *phy_addr) +{ + struct list_head *head = &hem_list->btm_bt; + struct roce_hem_item *hem, *temp_hem; + void *cpu_base = NULL; + u64 phy_base = 0; + int nr = 0; + + list_for_each_entry_safe(hem, temp_hem, head, sibling) { + if (hem_list_page_is_in_range(hem, offset)) { + nr = offset - hem->start; + cpu_base = hem->addr + nr * BA_BYTE_LEN; + phy_base = hem->dma_addr + nr * BA_BYTE_LEN; + nr = hem->end + 1 - offset; + break; + } + } + + if (mtt_cnt) + *mtt_cnt = nr; + + if (phy_addr) + *phy_addr = phy_base; + + return cpu_base; +} diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h index e8850d59e7804caa45dd5e2cd77b140c7bfd7047..8033c123175b2801ed852ac1b944937ed44ccbe7 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.h +++ b/drivers/infiniband/hw/hns/hns_roce_hem.h @@ -44,29 +44,34 @@ enum { HEM_TYPE_MTPT, HEM_TYPE_CQC, HEM_TYPE_SRQC, + HEM_TYPE_SCC_CTX, + HEM_TYPE_QPC_TIMER, + HEM_TYPE_CQC_TIMER, /* UNMAP HEM */ HEM_TYPE_MTT, HEM_TYPE_CQE, + HEM_TYPE_SRQWQE, + HEM_TYPE_IDX, HEM_TYPE_IRRL, HEM_TYPE_TRRL, }; #define HNS_ROCE_HEM_CHUNK_LEN \ ((256 - sizeof(struct list_head) - 2 * sizeof(int)) / \ - (sizeof(struct scatterlist))) + (sizeof(struct scatterlist) + sizeof(void *))) #define check_whether_bt_num_3(type, hop_num) \ - (type < HEM_TYPE_MTT && hop_num == 2) + ((type) < HEM_TYPE_MTT && (hop_num) == 2) #define check_whether_bt_num_2(type, hop_num) \ - ((type < HEM_TYPE_MTT && hop_num == 1) || \ - (type >= HEM_TYPE_MTT && hop_num == 2)) + (((type) < HEM_TYPE_MTT && (hop_num) == 1) || \ + ((type) >= HEM_TYPE_MTT && (hop_num) == 2)) #define check_whether_bt_num_1(type, hop_num) \ - ((type < HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0) || \ - (type >= HEM_TYPE_MTT && hop_num == 1) || \ - (type >= HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0)) + (((type) < HEM_TYPE_MTT && (hop_num) == HNS_ROCE_HOP_NUM_0) || \ + ((type) >= HEM_TYPE_MTT && (hop_num) == 1) || \ + ((type) >= HEM_TYPE_MTT && (hop_num) == HNS_ROCE_HOP_NUM_0)) enum { HNS_ROCE_HEM_PAGE_SHIFT = 12, @@ -97,9 +102,9 @@ struct hns_roce_hem_mhop { u32 buf_chunk_size; u32 bt_chunk_size; u32 ba_l0_num; - u32 l0_idx;/* level 0 base address table index */ - u32 l1_idx;/* level 1 base address table index */ - u32 l2_idx;/* level 2 base address table index */ + u32 l0_idx; /* level 0 base address table index */ + u32 l1_idx; /* level 1 base address table index */ + u32 l2_idx; /* level 2 base address table index */ }; void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem); @@ -128,6 +133,19 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, struct hns_roce_hem_mhop *mhop); bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type); +void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list); +int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions, + int region_cnt, int unit); +int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev, + struct hns_roce_hem_list *hem_list, + const struct hns_roce_buf_region *regions, + int region_cnt, unsigned int bt_pg_shift); +void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev, + struct hns_roce_hem_list *hem_list); +void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev, + struct hns_roce_hem_list *hem_list, + int offset, int *mtt_cnt, u64 *phy_addr); + static inline void hns_roce_hem_first(struct hns_roce_hem *hem, struct hns_roce_hem_iter *iter) { diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_sysfs_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_sysfs_v2.c new file mode 100644 index 0000000000000000000000000000000000000000..c6106379d24337a923b9611256ebcc361595400d --- /dev/null +++ b/drivers/infiniband/hw/hns/hns_roce_hw_sysfs_v2.c @@ -0,0 +1,632 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2016-2017 Hisilicon Limited. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hnae3.h" +#include "hns_roce_common.h" +#include "hns_roce_device.h" +#include "hns_roce_cmd.h" +#include "hns_roce_hem.h" +#include "hns_roce_hw_v2.h" + +int hns_roce_v2_query_mpt_stat(struct hns_roce_dev *hr_dev, + char *buf, int *desc) +{ + struct hns_roce_v2_mpt_entry *mpt_ctx; + struct hns_roce_cmd_mailbox *mailbox; + u32 key = hr_dev->hr_stat.key; + int cur_len = 0; + char *out = buf; + u64 bt0_ba = 0; + u64 bt1_ba = 0; + int *mpt; + int ret; + int i; + + mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, key, 0, + HNS_ROCE_CMD_READ_MPT_BT0, + HNS_ROCE_CMD_TIMEOUT_MSECS); + if (!ret) + memcpy(&bt0_ba, mailbox->buf, sizeof(bt0_ba)); + else + goto err_cmd; + + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, key, 0, + HNS_ROCE_CMD_READ_MPT_BT1, + HNS_ROCE_CMD_TIMEOUT_MSECS); + if (!ret) + memcpy(&bt1_ba, mailbox->buf, sizeof(bt1_ba)); + else + goto err_cmd; + + mpt_ctx = kzalloc(sizeof(*mpt_ctx), GFP_KERNEL); + if (ZERO_OR_NULL_PTR(mpt_ctx)) { + ret = -ENOMEM; + goto err_cmd; + } + + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, key_to_hw_index(key), + 0, HNS_ROCE_CMD_QUERY_MPT, + HNS_ROCE_CMD_TIMEOUT_MSECS); + if (!ret) + memcpy(mpt_ctx, mailbox->buf, sizeof(*mpt_ctx)); + else + goto err_mailbox; + + hns_roce_v2_sysfs_print(out, cur_len, + "MPT(0x%x) BT0: 0x%llx\n", key, bt0_ba); + hns_roce_v2_sysfs_print(out, cur_len, + "MPT(0x%x) BT1: 0x%llx\n", key, bt1_ba); + mpt = (int *)mpt_ctx; + for (i = 0; i < (sizeof(*mpt_ctx) >> 2); i += 8) { + hns_roce_v2_sysfs_print(out, cur_len, + "MPT(0x%x): %08x %08x %08x %08x %08x %08x %08x %08x\n", + key, *mpt, *(mpt + 1), *(mpt + 2), + *(mpt + 3), *(mpt + 4), *(mpt + 5), + *(mpt + 6), *(mpt + 7)); + mpt += 8; + } + *desc += cur_len; + +err_mailbox: + kfree(mpt_ctx); +err_cmd: + hns_roce_free_cmd_mailbox(hr_dev, mailbox); + + return ret; +} +int hns_roce_v2_query_srqc_stat(struct hns_roce_dev *hr_dev, + char *buf, int *desc) +{ + struct hns_roce_srq_context *srq_context; + struct hns_roce_cmd_mailbox *mailbox; + u32 srqn = hr_dev->hr_stat.srqn; + int cur_len = 0; + char *out = buf; + u64 bt0_ba = 0; + u64 bt1_ba = 0; + int *srqc; + int i = 0; + int ret; + + mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srqn, 0, + HNS_ROCE_CMD_READ_SRQC_BT0, + HNS_ROCE_CMD_TIMEOUT_MSECS); + if (!ret) + memcpy(&bt0_ba, mailbox->buf, sizeof(bt0_ba)); + else + goto err_cmd; + + srq_context = kzalloc(sizeof(*srq_context), GFP_KERNEL); + if (!srq_context) { + ret = -ENOMEM; + goto err_cmd; + } + + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srqn, 0, + HNS_ROCE_CMD_QUERY_SRQC, + HNS_ROCE_CMD_TIMEOUT_MSECS); + if (!ret) + memcpy(srq_context, mailbox->buf, sizeof(*srq_context)); + else + goto err_mailbox; + + hns_roce_v2_sysfs_print(out, cur_len, + "SRQC(0x%x) BT0: 0x%llx\n", srqn, bt0_ba); + hns_roce_v2_sysfs_print(out, cur_len, + "SRQC(0x%x) BT1: 0x%llx\n", srqn, bt1_ba); + srqc = (int *)srq_context; + for (i = 0; i < (sizeof(*srq_context) >> 2); i += 8) { + hns_roce_v2_sysfs_print(out, cur_len, + "SRQC(0x%x): %08x %08x %08x %08x %08x %08x %08x %08x\n", + srqn, *srqc, *(srqc + 1), *(srqc + 2), + *(srqc + 3), *(srqc + 4), *(srqc + 5), + *(srqc + 6), *(srqc + 7)); + srqc += 8; + } + *desc += cur_len; + +err_mailbox: + kfree(srq_context); +err_cmd: + hns_roce_free_cmd_mailbox(hr_dev, mailbox); + + return ret; +} +int hns_roce_v2_query_qpc_stat(struct hns_roce_dev *hr_dev, + char *buf, int *desc) +{ + struct hns_roce_cmd_mailbox *mailbox; + struct hns_roce_v2_qp_context *qp_context; + u32 qpn = hr_dev->hr_stat.qpn; + int cur_len = 0; + char *out = buf; + u64 bt0_ba = 0; + u64 bt1_ba = 0; + int *qpc; + int i = 0; + int ret; + + mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, qpn, 0, + HNS_ROCE_CMD_READ_QPC_BT0, + HNS_ROCE_CMD_TIMEOUT_MSECS); + if (!ret) + memcpy(&bt0_ba, mailbox->buf, sizeof(bt0_ba)); + else + goto err_cmd; + + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, qpn, 0, + HNS_ROCE_CMD_READ_QPC_BT1, + HNS_ROCE_CMD_TIMEOUT_MSECS); + if (!ret) + memcpy(&bt1_ba, mailbox->buf, sizeof(bt1_ba)); + else + goto err_cmd; + + qp_context = kzalloc(sizeof(*qp_context), GFP_KERNEL); + if (!qp_context) { + ret = -ENOMEM; + goto err_cmd; + } + + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, qpn, 0, + HNS_ROCE_CMD_QUERY_QPC, + HNS_ROCE_CMD_TIMEOUT_MSECS); + if (!ret) + memcpy(qp_context, mailbox->buf, sizeof(*qp_context)); + else + goto err_mailbox; + + hns_roce_v2_sysfs_print(out, cur_len, + "QPC(0x%x) BT0: 0x%llx\n", qpn, bt0_ba); + hns_roce_v2_sysfs_print(out, cur_len, + "QPC(0x%x) BT1: 0x%llx\n", qpn, bt1_ba); + qpc = (int *)qp_context; + for (i = 0; i < (sizeof(*qp_context) >> 2); i += 8) { + hns_roce_v2_sysfs_print(out, cur_len, + "QPC(0x%x): %08x %08x %08x %08x %08x %08x %08x %08x\n", + qpn, *qpc, *(qpc + 1), *(qpc + 2), + *(qpc + 3), *(qpc + 4), *(qpc + 5), + *(qpc + 6), *(qpc + 7)); + qpc += 8; + } + *desc += cur_len; + +err_mailbox: + kfree(qp_context); +err_cmd: + hns_roce_free_cmd_mailbox(hr_dev, mailbox); + + return ret; +} + +int hns_roce_v2_query_aeqc_stat(struct hns_roce_dev *hr_dev, + char *buf, int *desc) +{ + struct hns_roce_eq_context *eq_context; + struct hns_roce_cmd_mailbox *mailbox; + u32 aeqn = hr_dev->hr_stat.aeqn; + int cur_len = 0; + char *out = buf; + int i = 0; + int *aeqc; + int ret; + + mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + eq_context = kzalloc(sizeof(*eq_context), GFP_KERNEL); + if (ZERO_OR_NULL_PTR(eq_context)) { + ret = -ENOMEM; + goto err_context; + } + + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, aeqn, 0, + HNS_ROCE_CMD_QUERY_AEQC, + HNS_ROCE_CMD_TIMEOUT_MSECS); + if (!ret) + memcpy(eq_context, mailbox->buf, sizeof(*eq_context)); + else + goto err_mailbox; + + aeqc = (int *)eq_context; + for (i = 0; i < (sizeof(*eq_context) >> 2); i += 8) { + hns_roce_v2_sysfs_print(out, cur_len, + "AEQC(0x%x): %08x %08x %08x %08x %08x %08x %08x %08x\n", + aeqn, *aeqc, *(aeqc + 1), *(aeqc + 2), + *(aeqc + 3), *(aeqc + 4), *(aeqc + 5), + *(aeqc + 6), *(aeqc + 7)); + aeqc += 8; + } + *desc += cur_len; + +err_mailbox: + kfree(eq_context); +err_context: + hns_roce_free_cmd_mailbox(hr_dev, mailbox); + + return ret; +} +#define CMD_NUM_QUERY_PKT_CNT (8) +int hns_roce_v2_query_pkt_stat(struct hns_roce_dev *hr_dev, + char *buf, int *buff_size) +{ + struct hns_roce_cmq_desc desc[CMD_NUM_QUERY_PKT_CNT] = { {0} }; + struct rdfx_query_pkt_cnt *resp_query[CMD_NUM_QUERY_PKT_CNT]; + struct hns_roce_cmq_desc desc_cqe = {0}; + struct rdfx_query_cqe_cnt *resp_cqe = + (struct rdfx_query_cqe_cnt *)desc_cqe.data; + struct hns_roce_cmq_desc desc_cnp_rx = {0}; + struct rdfx_query_cnp_rx_cnt *resp_cnp_rx = + (struct rdfx_query_cnp_rx_cnt *)desc_cnp_rx.data; + struct hns_roce_cmq_desc desc_cnp_tx = {0}; + struct rdfx_query_cnp_tx_cnt *resp_cnp_tx = + (struct rdfx_query_cnp_tx_cnt *)desc_cnp_tx.data; + int cur_len = 0; + char *out = buf; + int status; + int i; + + for (i = 0; i < CMD_NUM_QUERY_PKT_CNT; i++) { + hns_roce_cmq_setup_basic_desc(&desc[i], + HNS_ROCE_OPC_QUEYR_PKT_CNT, true); + + if (i < (CMD_NUM_QUERY_PKT_CNT - 1)) + desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); + else + desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); + resp_query[i] = (struct rdfx_query_pkt_cnt *)desc[i].data; + } + + status = hns_roce_cmq_send(hr_dev, desc, CMD_NUM_QUERY_PKT_CNT); + if (status) + return status; + + hns_roce_cmq_setup_basic_desc(&desc_cqe, + HNS_ROCE_OPC_QUEYR_CQE_CNT, true); + status = hns_roce_cmq_send(hr_dev, &desc_cqe, 1); + if (status) + return status; + + if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP08_B) { + hns_roce_cmq_setup_basic_desc(&desc_cnp_rx, + HNS_ROCE_OPC_QUEYR_CNP_RX_CNT, true); + status = hns_roce_cmq_send(hr_dev, &desc_cnp_rx, 1); + if (status) + return status; + + hns_roce_cmq_setup_basic_desc(&desc_cnp_tx, + HNS_ROCE_OPC_QUEYR_CNP_TX_CNT, true); + status = hns_roce_cmq_send(hr_dev, &desc_cnp_tx, 1); + if (status) + return status; + } + + hns_roce_v2_sysfs_print(out, cur_len, + "RX RC PKT : 0x%08x 0x%08x 0x%08x 0x%08x\n", + resp_query[0]->rc_pkt_num, resp_query[1]->rc_pkt_num, + resp_query[2]->rc_pkt_num, resp_query[3]->rc_pkt_num); + hns_roce_v2_sysfs_print(out, cur_len, + "RX UC PKT : 0x%08x 0x%08x 0x%08x 0x%08x\n", + resp_query[0]->uc_pkt_num, resp_query[1]->uc_pkt_num, + resp_query[2]->uc_pkt_num, resp_query[3]->uc_pkt_num); + hns_roce_v2_sysfs_print(out, cur_len, + "RX UD PKT : 0x%08x 0x%08x 0x%08x 0x%08x\n", + resp_query[0]->ud_pkt_num, resp_query[1]->ud_pkt_num, + resp_query[2]->ud_pkt_num, resp_query[3]->ud_pkt_num); + hns_roce_v2_sysfs_print(out, cur_len, + "RX XRC PKT: 0x%08x 0x%08x 0x%08x 0x%08x\n", + resp_query[0]->xrc_pkt_num, resp_query[1]->xrc_pkt_num, + resp_query[2]->xrc_pkt_num, resp_query[3]->xrc_pkt_num); + hns_roce_v2_sysfs_print(out, cur_len, + "RX ALL PKT: 0x%08x 0x%08x 0x%08x 0x%08x\n", + resp_query[0]->total_pkt_num, resp_query[1]->total_pkt_num, + resp_query[2]->total_pkt_num, resp_query[3]->total_pkt_num); + hns_roce_v2_sysfs_print(out, cur_len, + "RX ERR PKT: 0x%08x 0x%08x 0x%08x 0x%08x\n", + resp_query[0]->error_pkt_num, resp_query[1]->error_pkt_num, + resp_query[2]->error_pkt_num, resp_query[3]->error_pkt_num); + hns_roce_v2_sysfs_print(out, cur_len, + "TX RC PKT : 0x%08x 0x%08x 0x%08x 0x%08x\n", + resp_query[4]->rc_pkt_num, resp_query[5]->rc_pkt_num, + resp_query[6]->rc_pkt_num, resp_query[7]->rc_pkt_num); + hns_roce_v2_sysfs_print(out, cur_len, + "TX UC PKT : 0x%08x 0x%08x 0x%08x 0x%08x\n", + resp_query[4]->uc_pkt_num, resp_query[5]->uc_pkt_num, + resp_query[6]->uc_pkt_num, resp_query[7]->uc_pkt_num); + hns_roce_v2_sysfs_print(out, cur_len, + "TX UD PKT : 0x%08x 0x%08x 0x%08x 0x%08x\n", + resp_query[4]->ud_pkt_num, resp_query[5]->ud_pkt_num, + resp_query[6]->ud_pkt_num, resp_query[7]->ud_pkt_num); + hns_roce_v2_sysfs_print(out, cur_len, + "TX XRC PKT: 0x%08x 0x%08x 0x%08x 0x%08x\n", + resp_query[4]->xrc_pkt_num, resp_query[5]->xrc_pkt_num, + resp_query[6]->xrc_pkt_num, resp_query[7]->xrc_pkt_num); + hns_roce_v2_sysfs_print(out, cur_len, + "TX ALL PKT: 0x%08x 0x%08x 0x%08x 0x%08x\n", + resp_query[4]->total_pkt_num, resp_query[5]->total_pkt_num, + resp_query[6]->total_pkt_num, resp_query[7]->total_pkt_num); + hns_roce_v2_sysfs_print(out, cur_len, + "TX ERR PKT: 0x%08x 0x%08x 0x%08x 0x%08x\n", + resp_query[4]->error_pkt_num, resp_query[5]->error_pkt_num, + resp_query[6]->error_pkt_num, resp_query[7]->error_pkt_num); + hns_roce_v2_sysfs_print(out, cur_len, + "CQE : 0x%08x 0x%08x 0x%08x 0x%08x\n", + resp_cqe->port0_cqe, resp_cqe->port1_cqe, + resp_cqe->port2_cqe, resp_cqe->port3_cqe); + hns_roce_v2_sysfs_print(out, cur_len, + "CNP RX : 0x%08x 0x%08x 0x%08x 0x%08x\n", + resp_cnp_rx->port0_cnp_rx, resp_cnp_rx->port1_cnp_rx, + resp_cnp_rx->port2_cnp_rx, resp_cnp_rx->port3_cnp_rx); + hns_roce_v2_sysfs_print(out, cur_len, + "CNP TX : 0x%08x 0x%08x 0x%08x 0x%08x\n", + resp_cnp_tx->port0_cnp_tx, resp_cnp_tx->port1_cnp_tx, + resp_cnp_tx->port2_cnp_tx, resp_cnp_tx->port3_cnp_tx); + + *buff_size += cur_len; + return status; +} + +int hns_roce_v2_query_ceqc_stat(struct hns_roce_dev *hr_dev, + char *buf, int *desc) +{ + struct hns_roce_cmd_mailbox *mailbox; + struct hns_roce_eq_context *eq_context; + u32 ceqn = hr_dev->hr_stat.ceqn; + int cur_len = 0; + char *out = buf; + int *ceqc; + int i = 0; + int ret; + + mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + eq_context = kzalloc(sizeof(*eq_context), GFP_KERNEL); + if (ZERO_OR_NULL_PTR(eq_context)) { + ret = -ENOMEM; + goto err_context; + } + + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, ceqn, 0, + HNS_ROCE_CMD_QUERY_CEQC, + HNS_ROCE_CMD_TIMEOUT_MSECS); + if (!ret) + memcpy(eq_context, mailbox->buf, sizeof(*eq_context)); + else + goto err_mailbox; + ceqc = (int *)eq_context; + for (i = 0; i < (sizeof(*eq_context) >> 2); i += 8) { + hns_roce_v2_sysfs_print(out, cur_len, + "CEQC(0x%x): %08x %08x %08x %08x %08x %08x %08x %08x\n", + ceqn, *ceqc, *(ceqc + 1), *(ceqc + 2), + *(ceqc + 3), *(ceqc + 4), *(ceqc + 5), + *(ceqc + 6), *(ceqc + 7)); + ceqc += 8; + } + *desc += cur_len; +err_mailbox: + kfree(eq_context); +err_context: + hns_roce_free_cmd_mailbox(hr_dev, mailbox); + + return ret; +} + +int hns_roce_v2_query_cmd_stat(struct hns_roce_dev *hr_dev, + char *buf, int *desc) +{ + struct hns_roce_cmq_desc desc_cnt; + struct hns_roce_query_mbdb_cnt *resp_cnt = + (struct hns_roce_query_mbdb_cnt *)desc_cnt.data; + struct hns_roce_cmq_desc desc_dfx; + int cur_len = 0; + char *out = buf; + int status; + + hns_roce_cmq_setup_basic_desc(&desc_cnt, + HNS_ROCE_OPC_QUEYR_MBDB_CNT, true); + status = hns_roce_cmq_send(hr_dev, &desc_cnt, 1); + if (status) + return status; + + hns_roce_cmq_setup_basic_desc(&desc_dfx, + HNS_ROCE_OPC_QUEYR_MDB_DFX, true); + status = hns_roce_cmq_send(hr_dev, &desc_dfx, 1); + if (status) + return status; + + hns_roce_v2_sysfs_print(out, cur_len, "MB ISSUE CNT : 0x%08x\n", + resp_cnt->mailbox_issue_cnt); + hns_roce_v2_sysfs_print(out, cur_len, "MB EXEC CNT : 0x%08x\n", + resp_cnt->mailbox_exe_cnt); + hns_roce_v2_sysfs_print(out, cur_len, "DB ISSUE CNT : 0x%08x\n", + resp_cnt->doorbell_issue_cnt); + hns_roce_v2_sysfs_print(out, cur_len, "DB EXEC CNT : 0x%08x\n", + resp_cnt->doorbell_exe_cnt); + hns_roce_v2_sysfs_print(out, cur_len, "EQDB ISSUE CNT : 0x%08x\n", + resp_cnt->eq_doorbell_issue_cnt); + hns_roce_v2_sysfs_print(out, cur_len, "EQDB EXEC CNT : 0x%08x\n", + resp_cnt->eq_doorbell_exe_cnt); + *desc += cur_len; + return status; +} + +static int hns_roce_v2_query_cqc(struct hns_roce_dev *hr_dev, + u64 *bt0_ba, u64 *bt1_ba, u32 cqn, + struct hns_roce_v2_cq_context *cq_context) +{ + + struct hns_roce_cmd_mailbox *mailbox; + int ret; + + mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cqn, 0, + HNS_ROCE_CMD_READ_CQC_BT0, + HNS_ROCE_CMD_TIMEOUT_MSECS); + if (!ret) + memcpy(bt0_ba, mailbox->buf, sizeof(*bt0_ba)); + else { + pr_err("Query CQ bt0 cmd process error(%d).\n", ret); + goto out; + } + + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cqn, 0, + HNS_ROCE_CMD_READ_CQC_BT1, + HNS_ROCE_CMD_TIMEOUT_MSECS); + if (!ret) + memcpy(bt1_ba, mailbox->buf, sizeof(*bt1_ba)); + else { + pr_err("Query CQ bt1 cmd process error(%d).\n", ret); + goto out; + } + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cqn, 0, + HNS_ROCE_CMD_QUERY_CQC, + HNS_ROCE_CMD_TIMEOUT_MSECS); + + memcpy(cq_context, mailbox->buf, sizeof(*cq_context)); + +out: + hns_roce_free_cmd_mailbox(hr_dev, mailbox); + return ret; +} + +int hns_roce_v2_query_cqc_stat(struct hns_roce_dev *hr_dev, + char *buf, int *desc) +{ + struct hns_roce_v2_cq_context *cq_context; + u32 cqn = hr_dev->hr_stat.cqn; + int cur_len = 0; + char *out = buf; + u64 bt0_ba = 0; + u64 bt1_ba = 0; + int *cqc; + int i, ret; + + cq_context = kzalloc(sizeof(*cq_context), GFP_KERNEL); + if (!cq_context) + return -ENOMEM; + + ret = hns_roce_v2_query_cqc(hr_dev, &bt0_ba, &bt1_ba, cqn, cq_context); + if (ret) + goto out; + + hns_roce_v2_sysfs_print(out, cur_len, + "CQC(0x%x) BT0: 0x%llx\n", cqn, bt0_ba); + hns_roce_v2_sysfs_print(out, cur_len, + "CQC(0x%x) BT1: 0x%llx\n", cqn, bt1_ba); + + cqc = (int *)cq_context; + for (i = 0; i < (sizeof(*cq_context) >> 2); i += 8) { + hns_roce_v2_sysfs_print(out, cur_len, + "CQC(0x%x): %08x %08x %08x %08x %08x %08x %08x %08x\n", + cqn, *cqc, *(cqc + 1), *(cqc + 2), + *(cqc + 3), *(cqc + 4), *(cqc + 5), + *(cqc + 6), *(cqc + 7)); + cqc += 8; + } + *desc += cur_len; +out: + kfree(cq_context); + return ret; +} + +int hns_roce_v2_modify_eq(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq, + u16 eq_count, u16 eq_period, u16 type) +{ + struct hns_roce_eq_context *eqc; + struct hns_roce_eq_context *eqc_mask; + struct hns_roce_cmd_mailbox *mailbox; + unsigned int eq_cmd; + int ret; + + mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + eqc = mailbox->buf; + eqc_mask = (struct hns_roce_eq_context *)mailbox->buf + 1; + + memset(eqc_mask, 0xff, sizeof(*eqc_mask)); + + if (type == HNS_ROCE_EQ_MAXCNT_MASK) { + roce_set_field(eqc->byte_12, + HNS_ROCE_EQC_MAX_CNT_M, + HNS_ROCE_EQC_MAX_CNT_S, eq_count); + roce_set_field(eqc_mask->byte_12, + HNS_ROCE_EQC_MAX_CNT_M, + HNS_ROCE_EQC_MAX_CNT_S, 0); + } else if (type == HNS_ROCE_EQ_PERIOD_MASK) { + roce_set_field(eqc->byte_12, + HNS_ROCE_EQC_PERIOD_M, + HNS_ROCE_EQC_PERIOD_S, eq_period); + roce_set_field(eqc_mask->byte_12, + HNS_ROCE_EQC_PERIOD_M, + HNS_ROCE_EQC_PERIOD_S, 0); + } + eq_cmd = HNS_ROCE_CMD_MODIFY_CEQC; + ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 1, + eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS); + hns_roce_free_cmd_mailbox(hr_dev, mailbox); + if (ret) + dev_err(hr_dev->dev, "Modify EQ Failed(%d) for cmd mailbox.\n", + ret); + + return ret; +} diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 081aa91fc162da6035a26881200323b4283e463d..fcf12eaacd6a5142252e9633da34a2d5172b8ac2 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -43,6 +43,8 @@ #include "hns_roce_hem.h" #include "hns_roce_hw_v1.h" +static int loopback; + static void set_data_seg(struct hns_roce_wqe_data_seg *dseg, struct ib_sge *sg) { dseg->lkey = cpu_to_le32(sg->lkey); @@ -58,9 +60,14 @@ static void set_raddr_seg(struct hns_roce_wqe_raddr_seg *rseg, u64 remote_addr, rseg->len = 0; } +#ifdef CONFIG_KERNEL_419 static int hns_roce_v1_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) +#else +static int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, + struct ib_send_wr **bad_wr) +#endif { struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah); @@ -175,13 +182,11 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp, roce_set_field(ud_sq_wqe->u32_36, UD_SEND_WQE_U32_36_FLOW_LABEL_M, UD_SEND_WQE_U32_36_FLOW_LABEL_S, - ah->av.sl_tclass_flowlabel & - HNS_ROCE_FLOW_LABEL_MASK); + ah->av.flowlabel); roce_set_field(ud_sq_wqe->u32_36, UD_SEND_WQE_U32_36_PRIORITY_M, UD_SEND_WQE_U32_36_PRIORITY_S, - le32_to_cpu(ah->av.sl_tclass_flowlabel) >> - HNS_ROCE_SL_SHIFT); + ah->av.sl); roce_set_field(ud_sq_wqe->u32_36, UD_SEND_WQE_U32_36_SGID_INDEX_M, UD_SEND_WQE_U32_36_SGID_INDEX_S, @@ -195,8 +200,7 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp, roce_set_field(ud_sq_wqe->u32_40, UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M, UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S, - ah->av.sl_tclass_flowlabel >> - HNS_ROCE_TCLASS_SHIFT); + ah->av.tclass); memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN); @@ -347,9 +351,14 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp, return ret; } +#ifdef CONFIG_KERNEL_419 static int hns_roce_v1_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) +#else +static int hns_roce_v1_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr) +#endif { int ret = 0; int nreq = 0; @@ -771,7 +780,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0); rdma_ah_set_static_rate(&attr.ah_attr, 3); - subnet_prefix = cpu_to_be64(0xfe80000000000000LL); + subnet_prefix = 0xfe80000000000000LL; for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) : (i % HNS_ROCE_MAX_PORTS); @@ -813,7 +822,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) attr.dest_qp_num = hr_qp->qpn; memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr), hr_dev->dev_addr[port], - MAC_ADDR_OCTET_NUM); + ETH_ALEN); memcpy(&dgid.raw, &subnet_prefix, sizeof(u64)); memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3); @@ -999,8 +1008,12 @@ static int hns_roce_v1_send_lp_wqe(struct hns_roce_qp *hr_qp) { struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device); struct device *dev = &hr_dev->pdev->dev; +#ifdef CONFIG_KERNEL_419 struct ib_send_wr send_wr; const struct ib_send_wr *bad_wr; +#else + struct ib_send_wr send_wr, *bad_wr; +#endif int ret; memset(&send_wr, 0, sizeof(send_wr)); @@ -1087,7 +1100,6 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work) free_work: if (mr_work->comp_flag) complete(mr_work->comp); - kfree(mr_work); } static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev, @@ -1108,9 +1120,10 @@ static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev, free_mr = &priv->free_mr; if (mr->enabled) { - if (hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key) - & (hr_dev->caps.num_mtpts - 1))) - dev_warn(dev, "HW2SW_MPT failed!\n"); + if (hns_roce_hw_destroy_mpt(hr_dev, NULL, + key_to_hw_index(mr->key) & + (hr_dev->caps.num_mtpts - 1))) + dev_warn(dev, "DESTROY_MPT failed!\n"); } mr_work = kzalloc(sizeof(*mr_work), GFP_KERNEL); @@ -1131,17 +1144,19 @@ static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev, while (time_before_eq(jiffies, end)) { if (try_wait_for_completion(&comp)) - goto free_mr; + goto err; msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE); } mr_work->comp_flag = 0; if (try_wait_for_completion(&comp)) - goto free_mr; + goto err; dev_warn(dev, "Free mr work 0x%x over 50s and failed!\n", mr->key); ret = -ETIMEDOUT; +err: + kfree(mr_work); free_mr: dev_dbg(dev, "Free mr 0x%x use 0x%x us.\n", mr->key, jiffies_to_usecs(jiffies) - jiffies_to_usecs(start)); @@ -1480,15 +1495,22 @@ static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset) } fwnode = &dsaf_node->fwnode; } else if (is_acpi_device_node(dev->fwnode)) { +#ifdef CONFIG_KERNEL_419 struct fwnode_reference_args args; - +#else + struct acpi_reference_args args; +#endif ret = acpi_node_get_property_reference(dev->fwnode, "dsaf-handle", 0, &args); if (ret) { dev_err(dev, "could not find dsaf-handle\n"); return ret; } +#ifdef CONFIG_KERNEL_419 fwnode = args.fwnode; +#else + fwnode = acpi_fwnode_handle(args.adev); +#endif } else { dev_err(dev, "cannot read data from DT or ACPI\n"); return -ENXIO; @@ -1506,38 +1528,6 @@ static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset) return ret; } -static int hns_roce_des_qp_init(struct hns_roce_dev *hr_dev) -{ - struct device *dev = &hr_dev->pdev->dev; - struct hns_roce_v1_priv *priv; - struct hns_roce_des_qp *des_qp; - - priv = (struct hns_roce_v1_priv *)hr_dev->priv; - des_qp = &priv->des_qp; - - des_qp->requeue_flag = 1; - des_qp->qp_wq = create_singlethread_workqueue("hns_roce_destroy_qp"); - if (!des_qp->qp_wq) { - dev_err(dev, "Create destroy qp workqueue failed!\n"); - return -ENOMEM; - } - - return 0; -} - -static void hns_roce_des_qp_free(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv; - struct hns_roce_des_qp *des_qp; - - priv = (struct hns_roce_v1_priv *)hr_dev->priv; - des_qp = &priv->des_qp; - - des_qp->requeue_flag = 0; - flush_workqueue(des_qp->qp_wq); - destroy_workqueue(des_qp->qp_wq); -} - static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev) { int i = 0; @@ -1583,6 +1573,7 @@ static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev) caps->reserved_mrws = 1; caps->reserved_uars = 0; caps->reserved_cqs = 0; + caps->reserved_qps = 12; /* 2 SQP per port, six ports total 12 */ caps->chunk_sz = HNS_ROCE_V1_TABLE_CHUNK_SIZE; for (i = 0; i < caps->num_ports; i++) @@ -1656,12 +1647,6 @@ static int hns_roce_v1_init(struct hns_roce_dev *hr_dev) goto error_failed_tptr_init; } - ret = hns_roce_des_qp_init(hr_dev); - if (ret) { - dev_err(dev, "des qp init failed!\n"); - goto error_failed_des_qp_init; - } - ret = hns_roce_free_mr_init(hr_dev); if (ret) { dev_err(dev, "free mr init failed!\n"); @@ -1673,9 +1658,6 @@ static int hns_roce_v1_init(struct hns_roce_dev *hr_dev) return 0; error_failed_free_mr_init: - hns_roce_des_qp_free(hr_dev); - -error_failed_des_qp_init: hns_roce_tptr_free(hr_dev); error_failed_tptr_init: @@ -1693,7 +1675,6 @@ static void hns_roce_v1_exit(struct hns_roce_dev *hr_dev) { hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN); hns_roce_free_mr_free(hr_dev); - hns_roce_des_qp_free(hr_dev); hns_roce_tptr_free(hr_dev); hns_roce_bt_free(hr_dev); hns_roce_raq_free(hr_dev); @@ -1776,15 +1757,24 @@ static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev, return 0; } +#ifdef CONFIG_KERNEL_419 static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port, int gid_index, const union ib_gid *gid, const struct ib_gid_attr *attr) +#else +static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port, + int gid_index, union ib_gid *gid, + const struct ib_gid_attr *attr) +#endif { u32 *p = NULL; u8 gid_idx = 0; + unsigned long flags; gid_idx = hns_get_gid_index(hr_dev, port, gid_index); + spin_lock_irqsave(&hr_dev->iboe.lock, flags); + p = (u32 *)&gid->raw[0]; roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_L_0_REG + (HNS_ROCE_V1_GID_NUM * gid_idx)); @@ -1801,6 +1791,8 @@ static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port, roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_H_0_REG + (HNS_ROCE_V1_GID_NUM * gid_idx)); + spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); + return 0; } @@ -2012,7 +2004,7 @@ static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, static void *get_cqe(struct hns_roce_cq *hr_cq, int n) { - return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf, + return hns_roce_buf_offset(hr_cq->hr_buf.hr_buf, n * HNS_ROCE_V1_CQE_ENTRY_SIZE); } @@ -2453,7 +2445,7 @@ int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) return ret; } -static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev, +static void hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem_table *table, int obj, int step_idx) { @@ -2484,9 +2476,9 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev, break; case HEM_TYPE_SRQC: dev_dbg(dev, "HEM_TYPE_SRQC not support.\n"); - return -EINVAL; + return; default: - return 0; + return; } roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj); @@ -2497,33 +2489,31 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev, bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG; - end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies; - while (1) { - if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) { - if (!(time_before(jiffies, end))) { - dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n"); - spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, - flags); - return -EBUSY; - } - } else { + end = HW_SYNC_TIMEOUT_MSECS; + while (end > 0) { + if (!(readl(bt_cmd) >> BT_CMD_SYNC_SHIFT)) break; - } - msleep(HW_SYNC_SLEEP_TIME_INTERVAL); + + mdelay(HW_SYNC_SLEEP_TIME_INTERVAL); + end -= HW_SYNC_SLEEP_TIME_INTERVAL; + } + if (end <= 0) { + dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n"); + spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags); + return; } - bt_cmd_val[0] = (__le32)bt_ba; + bt_cmd_val[0] = cpu_to_le32(bt_ba); roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32); hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG); spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags); - return 0; + return; } static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev, - struct hns_roce_mtt *mtt, enum hns_roce_qp_state cur_state, enum hns_roce_qp_state new_state, struct hns_roce_qp_context *context, @@ -2570,7 +2560,7 @@ static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev, struct hns_roce_cmd_mailbox *mailbox; struct device *dev = &hr_dev->pdev->dev; - int ret = 0; + int ret; if (cur_state >= HNS_ROCE_QP_NUM_STATE || new_state >= HNS_ROCE_QP_NUM_STATE || @@ -2604,6 +2594,27 @@ static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev, return ret; } +static int find_wqe_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, + u64 *sq_ba, u64 *rq_ba, dma_addr_t *bt_ba) +{ + int count; + + count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, sq_ba, 1, bt_ba); + if (count < 1) { + dev_err(hr_dev->dev, "Failed to find SQ ba\n"); + return -ENOBUFS; + } + + count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, rq_ba, + 1, NULL); + if (!count) { + dev_err(hr_dev->dev, "Failed to find RQ ba\n"); + return -ENOBUFS; + } + + return 0; +} + static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, int attr_mask, enum ib_qp_state cur_state, enum ib_qp_state new_state) @@ -2611,25 +2622,20 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct hns_roce_sqp_context *context; - struct device *dev = &hr_dev->pdev->dev; dma_addr_t dma_handle = 0; u32 __iomem *addr; - int rq_pa_start; + u64 sq_ba = 0; + u64 rq_ba = 0; __le32 tmp; u32 reg_val; - u64 *mtts; context = kzalloc(sizeof(*context), GFP_KERNEL); if (!context) return -ENOMEM; /* Search QP buf's MTTs */ - mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table, - hr_qp->mtt.first_seg, &dma_handle); - if (!mtts) { - dev_err(dev, "qp buf pa find failed\n"); + if (find_wqe_mtt(hr_dev, hr_qp, &sq_ba, &rq_ba, &dma_handle)) goto out; - } if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { roce_set_field(context->qp1c_bytes_4, @@ -2643,11 +2649,11 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, roce_set_field(context->qp1c_bytes_4, QP1C_BYTES_4_PD_M, QP1C_BYTES_4_PD_S, to_hr_pd(ibqp->pd)->pdn); - context->sq_rq_bt_l = cpu_to_le32((u32)(dma_handle)); + context->sq_rq_bt_l = cpu_to_le32(dma_handle); roce_set_field(context->qp1c_bytes_12, QP1C_BYTES_12_SQ_RQ_BT_H_M, QP1C_BYTES_12_SQ_RQ_BT_H_S, - ((u32)(dma_handle >> 32))); + upper_32_bits(dma_handle)); roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M, QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head); @@ -2655,7 +2661,7 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port); roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SIGNALING_TYPE_S, - le32_to_cpu(hr_qp->sq_signal_bits)); + hr_qp->sq_signal_bits); roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S, 1); roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S, @@ -2668,14 +2674,12 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_PKEY_IDX_M, QP1C_BYTES_20_PKEY_IDX_S, attr->pkey_index); - rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE; - context->cur_rq_wqe_ba_l = - cpu_to_le32((u32)(mtts[rq_pa_start])); + context->cur_rq_wqe_ba_l = cpu_to_le32(rq_ba); roce_set_field(context->qp1c_bytes_28, QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M, QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S, - (mtts[rq_pa_start]) >> 32); + upper_32_bits(rq_ba)); roce_set_field(context->qp1c_bytes_28, QP1C_BYTES_28_RQ_CUR_IDX_M, QP1C_BYTES_28_RQ_CUR_IDX_S, 0); @@ -2689,12 +2693,12 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, QP1C_BYTES_32_TX_CQ_NUM_S, to_hr_cq(ibqp->send_cq)->cqn); - context->cur_sq_wqe_ba_l = cpu_to_le32((u32)mtts[0]); + context->cur_sq_wqe_ba_l = cpu_to_le32(sq_ba); roce_set_field(context->qp1c_bytes_40, QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M, QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S, - (mtts[0]) >> 32); + upper_32_bits(sq_ba)); roce_set_field(context->qp1c_bytes_40, QP1C_BYTES_40_SQ_CUR_IDX_M, QP1C_BYTES_40_SQ_CUR_IDX_S, 0); @@ -2761,10 +2765,10 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, dma_addr_t dma_handle_2 = 0; dma_addr_t dma_handle = 0; __le32 doorbell[2] = {0}; - int rq_pa_start = 0; u64 *mtts_2 = NULL; int ret = -EINVAL; - u64 *mtts = NULL; + u64 sq_ba = 0; + u64 rq_ba = 0; int port; u8 port_num; u8 *dmac; @@ -2775,12 +2779,8 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, return -ENOMEM; /* Search qp buf's mtts */ - mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table, - hr_qp->mtt.first_seg, &dma_handle); - if (mtts == NULL) { - dev_err(dev, "qp buf pa find failed\n"); + if (find_wqe_mtt(hr_dev, hr_qp, &sq_ba, &rq_ba, &dma_handle)) goto out; - } /* Search IRRL's mtts */ mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table, @@ -2853,7 +2853,6 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, roce_set_field(context->qpc_bytes_16, QP_CONTEXT_QPC_BYTES_16_QP_NUM_M, QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn); - } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) { roce_set_field(context->qpc_bytes_4, QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M, @@ -2935,11 +2934,11 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, dmac = (u8 *)attr->ah_attr.roce.dmac; - context->sq_rq_bt_l = cpu_to_le32((u32)(dma_handle)); + context->sq_rq_bt_l = cpu_to_le32(dma_handle); roce_set_field(context->qpc_bytes_24, QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M, QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S, - ((u32)(dma_handle >> 32))); + upper_32_bits(dma_handle)); roce_set_bit(context->qpc_bytes_24, QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S, 1); @@ -2961,7 +2960,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, 1); roce_set_bit(context->qpc_bytes_32, QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S, - le32_to_cpu(hr_qp->sq_signal_bits)); + hr_qp->sq_signal_bits); port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port; @@ -3038,14 +3037,12 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M, QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0); - rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE; - context->cur_rq_wqe_ba_l = - cpu_to_le32((u32)(mtts[rq_pa_start])); + context->cur_rq_wqe_ba_l = cpu_to_le32(rq_ba); roce_set_field(context->qpc_bytes_76, QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M, QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S, - mtts[rq_pa_start] >> 32); + upper_32_bits(rq_ba)); roce_set_field(context->qpc_bytes_76, QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M, QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S, 0); @@ -3107,8 +3104,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, QP_CONTEXT_QPC_BYTES_156_SL_S, rdma_ah_get_sl(&attr->ah_attr)); hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr); - } else if (cur_state == IB_QPS_RTR && - new_state == IB_QPS_RTS) { + } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) { /* If exist optional param, return error */ if ((attr_mask & IB_QP_ALT_PATH) || (attr_mask & IB_QP_ACCESS_FLAGS) || @@ -3120,12 +3116,12 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, goto out; } - context->rx_cur_sq_wqe_ba_l = cpu_to_le32((u32)(mtts[0])); + context->rx_cur_sq_wqe_ba_l = cpu_to_le32(sq_ba); roce_set_field(context->qpc_bytes_120, QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M, QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S, - (mtts[0]) >> 32); + upper_32_bits(sq_ba)); roce_set_field(context->qpc_bytes_124, QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M, @@ -3268,12 +3264,12 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M, QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S, 0); - context->tx_cur_sq_wqe_ba_l = cpu_to_le32((u32)(mtts[0])); + context->tx_cur_sq_wqe_ba_l = cpu_to_le32(sq_ba); roce_set_field(context->qpc_bytes_188, QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M, QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S, - (mtts[0]) >> 32); + upper_32_bits(sq_ba)); roce_set_bit(context->qpc_bytes_188, QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S, 0); roce_set_field(context->qpc_bytes_188, @@ -3298,8 +3294,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, new_state); /* SW pass context to HW */ - ret = hns_roce_v1_qp_modify(hr_dev, &hr_qp->mtt, - to_hns_roce_state(cur_state), + ret = hns_roce_v1_qp_modify(hr_dev, to_hns_roce_state(cur_state), to_hns_roce_state(new_state), context, hr_qp); if (ret) { @@ -3426,6 +3421,9 @@ static int hns_roce_v1_q_sqp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, struct hns_roce_sqp_context context; u32 addr; + memset(qp_attr, 0, sizeof(*qp_attr)); + memset(qp_init_attr, 0, sizeof(*qp_init_attr)); + mutex_lock(&hr_qp->mutex); if (hr_qp->state == IB_QPS_RESET) { @@ -3606,7 +3604,7 @@ static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, qp_attr->retry_cnt = roce_get_field(context->qpc_bytes_148, QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M, QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S); - qp_attr->rnr_retry = (u8)context->rnr_retry; + qp_attr->rnr_retry = le32_to_cpu(context->rnr_retry); done: qp_attr->cur_qp_state = qp_attr->qp_state; @@ -3640,356 +3638,41 @@ static int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, hns_roce_v1_q_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr); } -static void hns_roce_check_sdb_status(struct hns_roce_dev *hr_dev, - u32 *old_send, u32 *old_retry, - u32 *tsp_st, u32 *success_flags) +static void get_cqs(struct ib_qp *ibqp, struct hns_roce_cq **send_cq, + struct hns_roce_cq **recv_cq) { - __le32 *old_send_tmp, *old_retry_tmp; - u32 sdb_retry_cnt; - u32 sdb_send_ptr; - u32 cur_cnt, old_cnt; - __le32 tmp, tmp1; - u32 send_ptr; - - sdb_send_ptr = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG); - sdb_retry_cnt = roce_read(hr_dev, ROCEE_SDB_RETRY_CNT_REG); - tmp = cpu_to_le32(sdb_send_ptr); - tmp1 = cpu_to_le32(sdb_retry_cnt); - cur_cnt = roce_get_field(tmp, ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M, - ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) + - roce_get_field(tmp1, ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M, - ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S); - - old_send_tmp = (__le32 *)old_send; - old_retry_tmp = (__le32 *)old_retry; - if (!roce_get_bit(*tsp_st, ROCEE_CNT_CLR_CE_CNT_CLR_CE_S)) { - old_cnt = roce_get_field(*old_send_tmp, - ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M, - ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) + - roce_get_field(*old_retry_tmp, - ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M, - ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S); - if (cur_cnt - old_cnt > SDB_ST_CMP_VAL) - *success_flags = 1; - } else { - old_cnt = roce_get_field(*old_send_tmp, - ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M, - ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S); - if (cur_cnt - old_cnt > SDB_ST_CMP_VAL) { - *success_flags = 1; - } else { - send_ptr = roce_get_field(*old_send_tmp, - ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M, - ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) + - roce_get_field(tmp1, - ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M, - ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S); - roce_set_field(*old_send_tmp, - ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M, - ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S, - send_ptr); - } - } -} - -static int check_qp_db_process_status(struct hns_roce_dev *hr_dev, - struct hns_roce_qp *hr_qp, - u32 sdb_issue_ptr, - u32 *sdb_inv_cnt, - u32 *wait_stage) -{ - struct device *dev = &hr_dev->pdev->dev; - u32 sdb_send_ptr, old_send; - __le32 sdb_issue_ptr_tmp; - __le32 sdb_send_ptr_tmp; - u32 success_flags = 0; - unsigned long end; - u32 old_retry; - u32 inv_cnt; - u32 tsp_st; - __le32 tmp; - - if (*wait_stage > HNS_ROCE_V1_DB_STAGE2 || - *wait_stage < HNS_ROCE_V1_DB_STAGE1) { - dev_err(dev, "QP(0x%lx) db status wait stage(%d) error!\n", - hr_qp->qpn, *wait_stage); - return -EINVAL; - } - - /* Calculate the total timeout for the entire verification process */ - end = msecs_to_jiffies(HNS_ROCE_V1_CHECK_DB_TIMEOUT_MSECS) + jiffies; - - if (*wait_stage == HNS_ROCE_V1_DB_STAGE1) { - /* Query db process status, until hw process completely */ - sdb_send_ptr = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG); - while (roce_hw_index_cmp_lt(sdb_send_ptr, sdb_issue_ptr, - ROCEE_SDB_PTR_CMP_BITS)) { - if (!time_before(jiffies, end)) { - dev_dbg(dev, "QP(0x%lx) db process stage1 timeout. issue 0x%x send 0x%x.\n", - hr_qp->qpn, sdb_issue_ptr, - sdb_send_ptr); - return 0; - } - - msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS); - sdb_send_ptr = roce_read(hr_dev, - ROCEE_SDB_SEND_PTR_REG); - } - - sdb_send_ptr_tmp = cpu_to_le32(sdb_send_ptr); - sdb_issue_ptr_tmp = cpu_to_le32(sdb_issue_ptr); - if (roce_get_field(sdb_issue_ptr_tmp, - ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_M, - ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S) == - roce_get_field(sdb_send_ptr_tmp, - ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M, - ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S)) { - old_send = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG); - old_retry = roce_read(hr_dev, ROCEE_SDB_RETRY_CNT_REG); - - do { - tsp_st = roce_read(hr_dev, ROCEE_TSP_BP_ST_REG); - tmp = cpu_to_le32(tsp_st); - if (roce_get_bit(tmp, - ROCEE_TSP_BP_ST_QH_FIFO_ENTRY_S) == 1) { - *wait_stage = HNS_ROCE_V1_DB_WAIT_OK; - return 0; - } - - if (!time_before(jiffies, end)) { - dev_dbg(dev, "QP(0x%lx) db process stage1 timeout when send ptr equals issue ptr.\n" - "issue 0x%x send 0x%x.\n", - hr_qp->qpn, - le32_to_cpu(sdb_issue_ptr_tmp), - le32_to_cpu(sdb_send_ptr_tmp)); - return 0; - } - - msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS); - - hns_roce_check_sdb_status(hr_dev, &old_send, - &old_retry, &tsp_st, - &success_flags); - } while (!success_flags); - } - - *wait_stage = HNS_ROCE_V1_DB_STAGE2; - - /* Get list pointer */ - *sdb_inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG); - dev_dbg(dev, "QP(0x%lx) db process stage2. inv cnt = 0x%x.\n", - hr_qp->qpn, *sdb_inv_cnt); - } - - if (*wait_stage == HNS_ROCE_V1_DB_STAGE2) { - /* Query db's list status, until hw reversal */ - inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG); - while (roce_hw_index_cmp_lt(inv_cnt, - *sdb_inv_cnt + SDB_INV_CNT_OFFSET, - ROCEE_SDB_CNT_CMP_BITS)) { - if (!time_before(jiffies, end)) { - dev_dbg(dev, "QP(0x%lx) db process stage2 timeout. inv cnt 0x%x.\n", - hr_qp->qpn, inv_cnt); - return 0; - } - - msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS); - inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG); - } - - *wait_stage = HNS_ROCE_V1_DB_WAIT_OK; - } - - return 0; -} - -static int check_qp_reset_state(struct hns_roce_dev *hr_dev, - struct hns_roce_qp *hr_qp, - struct hns_roce_qp_work *qp_work_entry, - int *is_timeout) -{ - struct device *dev = &hr_dev->pdev->dev; - u32 sdb_issue_ptr; - int ret; - - if (hr_qp->state != IB_QPS_RESET) { - /* Set qp to ERR, waiting for hw complete processing all dbs */ - ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state, - IB_QPS_ERR); - if (ret) { - dev_err(dev, "Modify QP(0x%lx) to ERR failed!\n", - hr_qp->qpn); - return ret; - } - - /* Record issued doorbell */ - sdb_issue_ptr = roce_read(hr_dev, ROCEE_SDB_ISSUE_PTR_REG); - qp_work_entry->sdb_issue_ptr = sdb_issue_ptr; - qp_work_entry->db_wait_stage = HNS_ROCE_V1_DB_STAGE1; - - /* Query db process status, until hw process completely */ - ret = check_qp_db_process_status(hr_dev, hr_qp, sdb_issue_ptr, - &qp_work_entry->sdb_inv_cnt, - &qp_work_entry->db_wait_stage); - if (ret) { - dev_err(dev, "Check QP(0x%lx) db process status failed!\n", - hr_qp->qpn); - return ret; - } - - if (qp_work_entry->db_wait_stage != HNS_ROCE_V1_DB_WAIT_OK) { - qp_work_entry->sche_cnt = 0; - *is_timeout = 1; - return 0; - } - - /* Modify qp to reset before destroying qp */ - ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state, - IB_QPS_RESET); - if (ret) { - dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", - hr_qp->qpn); - return ret; - } - } - - return 0; -} - -static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work) -{ - struct hns_roce_qp_work *qp_work_entry; - struct hns_roce_v1_priv *priv; - struct hns_roce_dev *hr_dev; - struct hns_roce_qp *hr_qp; - struct device *dev; - unsigned long qpn; - int ret; - - qp_work_entry = container_of(work, struct hns_roce_qp_work, work); - hr_dev = to_hr_dev(qp_work_entry->ib_dev); - dev = &hr_dev->pdev->dev; - priv = (struct hns_roce_v1_priv *)hr_dev->priv; - hr_qp = qp_work_entry->qp; - qpn = hr_qp->qpn; - - dev_dbg(dev, "Schedule destroy QP(0x%lx) work.\n", qpn); - - qp_work_entry->sche_cnt++; - - /* Query db process status, until hw process completely */ - ret = check_qp_db_process_status(hr_dev, hr_qp, - qp_work_entry->sdb_issue_ptr, - &qp_work_entry->sdb_inv_cnt, - &qp_work_entry->db_wait_stage); - if (ret) { - dev_err(dev, "Check QP(0x%lx) db process status failed!\n", - qpn); - return; - } - - if (qp_work_entry->db_wait_stage != HNS_ROCE_V1_DB_WAIT_OK && - priv->des_qp.requeue_flag) { - queue_work(priv->des_qp.qp_wq, work); - return; - } - - /* Modify qp to reset before destroying qp */ - ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state, - IB_QPS_RESET); - if (ret) { - dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", qpn); - return; - } - - hns_roce_qp_remove(hr_dev, hr_qp); - hns_roce_qp_free(hr_dev, hr_qp); - - if (hr_qp->ibqp.qp_type == IB_QPT_RC) { - /* RC QP, release QPN */ - hns_roce_release_range_qp(hr_dev, qpn, 1); - kfree(hr_qp); - } else - kfree(hr_to_hr_sqp(hr_qp)); - - kfree(qp_work_entry); - - dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", qpn); + *send_cq = ibqp->send_cq ? to_hr_cq(ibqp->send_cq) : NULL; + *recv_cq = ibqp->recv_cq ? to_hr_cq(ibqp->recv_cq) : NULL; } int hns_roce_v1_destroy_qp(struct ib_qp *ibqp) { struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); - struct device *dev = &hr_dev->pdev->dev; - struct hns_roce_qp_work qp_work_entry; - struct hns_roce_qp_work *qp_work; - struct hns_roce_v1_priv *priv; struct hns_roce_cq *send_cq, *recv_cq; - int is_user = !!ibqp->pd->uobject; - int is_timeout = 0; + bool is_user = ibqp->uobject; int ret; - ret = check_qp_reset_state(hr_dev, hr_qp, &qp_work_entry, &is_timeout); - if (ret) { - dev_err(dev, "QP reset state check failed(%d)!\n", ret); + ret = hns_roce_v1_modify_qp(ibqp, NULL, 0, hr_qp->state, IB_QPS_RESET); + if (ret) return ret; - } - send_cq = to_hr_cq(hr_qp->ibqp.send_cq); - recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq); + get_cqs(&hr_qp->ibqp, &send_cq, &recv_cq); hns_roce_lock_cqs(send_cq, recv_cq); if (!is_user) { - __hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ? - to_hr_srq(hr_qp->ibqp.srq) : NULL); - if (send_cq != recv_cq) + if (recv_cq) + __hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn, + (hr_qp->ibqp.srq ? + to_hr_srq(hr_qp->ibqp.srq) : + NULL)); + if (send_cq && send_cq != recv_cq) __hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL); } - hns_roce_unlock_cqs(send_cq, recv_cq); - - if (!is_timeout) { - hns_roce_qp_remove(hr_dev, hr_qp); - hns_roce_qp_free(hr_dev, hr_qp); - - /* RC QP, release QPN */ - if (hr_qp->ibqp.qp_type == IB_QPT_RC) - hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1); - } - - hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt); - if (is_user) - ib_umem_release(hr_qp->umem); - else { - kfree(hr_qp->sq.wrid); - kfree(hr_qp->rq.wrid); - - hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf); - } + hns_roce_qp_remove(hr_dev, hr_qp); - if (!is_timeout) { - if (hr_qp->ibqp.qp_type == IB_QPT_RC) - kfree(hr_qp); - else - kfree(hr_to_hr_sqp(hr_qp)); - } else { - qp_work = kzalloc(sizeof(*qp_work), GFP_KERNEL); - if (!qp_work) - return -ENOMEM; - - INIT_WORK(&qp_work->work, hns_roce_v1_destroy_qp_work_fn); - qp_work->ib_dev = &hr_dev->ib_dev; - qp_work->qp = hr_qp; - qp_work->db_wait_stage = qp_work_entry.db_wait_stage; - qp_work->sdb_issue_ptr = qp_work_entry.sdb_issue_ptr; - qp_work->sdb_inv_cnt = qp_work_entry.sdb_inv_cnt; - qp_work->sche_cnt = qp_work_entry.sche_cnt; - - priv = (struct hns_roce_v1_priv *)hr_dev->priv; - queue_work(priv->des_qp.qp_wq, &qp_work->work); - dev_dbg(dev, "Begin destroy QP(0x%lx) work.\n", hr_qp->qpn); - } + hns_roce_qp_destroy(hr_dev, hr_qp); return 0; } @@ -4001,7 +3684,6 @@ static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq) struct device *dev = &hr_dev->pdev->dev; u32 cqe_cnt_ori; u32 cqe_cnt_cur; - u32 cq_buf_size; int wait_time = 0; int ret = 0; @@ -4035,11 +3717,8 @@ static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq) if (ibcq->uobject) ib_umem_release(hr_cq->umem); - else { - /* Free the buff of stored cq */ - cq_buf_size = (ibcq->cqe + 1) * hr_dev->caps.cq_entry_sz; - hns_roce_buf_free(hr_dev, cq_buf_size, &hr_cq->hr_buf.hr_buf); - } + else + hns_roce_buf_free(hr_dev, hr_cq->hr_buf.hr_buf); kfree(hr_cq); @@ -4247,7 +3926,8 @@ static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev, */ dma_rmb(); - dev_dbg(dev, "aeqe = %p, aeqe->asyn.event_type = 0x%lx\n", aeqe, + dev_dbg(dev, "aeqe = %pK, aeqe->asyn.event_type = 0x%lx\n", + aeqe, roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)); @@ -4310,10 +3990,9 @@ static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev, eq->cons_index++; aeqes_found = 1; - if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1) { - dev_warn(dev, "cons_index overflow, set back to 0.\n"); + if (eq->cons_index > + EQ_DEPTH_COEFF * hr_dev->caps.aeqe_depth - 1) eq->cons_index = 0; - } } set_eq_cons_index_v1(eq, 0); @@ -4362,11 +4041,9 @@ static int hns_roce_v1_ceq_int(struct hns_roce_dev *hr_dev, ++eq->cons_index; ceqes_found = 1; - if (eq->cons_index > 2 * hr_dev->caps.ceqe_depth - 1) { - dev_warn(&eq->hr_dev->pdev->dev, - "cons_index overflow, set back to 0.\n"); + if (eq->cons_index > + EQ_DEPTH_COEFF * hr_dev->caps.ceqe_depth - 1) eq->cons_index = 0; - } } set_eq_cons_index_v1(eq, 0); @@ -4917,14 +4594,24 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev) continue; pdev = of_find_device_by_node(net_node); } else if (is_acpi_device_node(dev->fwnode)) { +#ifdef CONFIG_KERNEL_419 struct fwnode_reference_args args; +#else + struct acpi_reference_args args; + struct fwnode_handle *fwnode; +#endif ret = acpi_node_get_property_reference(dev->fwnode, "eth-handle", i, &args); if (ret) continue; +#ifdef CONFIG_KERNEL_419 pdev = hns_roce_find_pdev(args.fwnode); +#else + fwnode = acpi_fwnode_handle(args.adev); + pdev = hns_roce_find_pdev(fwnode); +#endif } else { dev_err(dev, "cannot read data from DT or ACPI\n"); return -ENXIO; @@ -4954,7 +4641,7 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev) /* cmd issue mode: 0 is poll, 1 is event */ hr_dev->cmd_mod = 1; - hr_dev->loop_idc = 0; + hr_dev->loop_idc = loopback; hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG; hr_dev->odb_offset = ROCEE_DB_OTHERS_L_0_REG; @@ -5063,7 +4750,7 @@ static struct platform_driver hns_roce_driver = { module_platform_driver(hns_roce_driver); MODULE_LICENSE("Dual BSD/GPL"); -MODULE_AUTHOR("Wei Hu "); -MODULE_AUTHOR("Nenglong Zhao "); -MODULE_AUTHOR("Lijun Ou "); +MODULE_AUTHOR("Huawei Tech. Co., Ltd."); MODULE_DESCRIPTION("Hisilicon Hip06 Family RoCE Driver"); +module_param(loopback, int, 0444); +MODULE_PARM_DESC(loopback, "default: 0"); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h index 66440147d9eb210604354b8c91b5576be8cd6db9..7edd2eb8214e873fc87cb8bc8c7b865ba55d7e4b 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h @@ -110,11 +110,6 @@ #define HNS_ROCE_V1_EXT_ODB_ALFUL \ (HNS_ROCE_V1_EXT_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD) -#define HNS_ROCE_V1_DB_WAIT_OK 0 -#define HNS_ROCE_V1_DB_STAGE1 1 -#define HNS_ROCE_V1_DB_STAGE2 2 -#define HNS_ROCE_V1_CHECK_DB_TIMEOUT_MSECS 10000 -#define HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS 20 #define HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS 50000 #define HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS 10000 #define HNS_ROCE_V1_FREE_MR_WAIT_VALUE 5 @@ -162,7 +157,6 @@ #define SQ_PSN_SHIFT 8 #define QKEY_VAL 0x80010000 #define SDB_INV_CNT_OFFSET 8 -#define SDB_ST_CMP_VAL 8 #define HNS_ROCE_CEQ_DEFAULT_INTERVAL 0x10 #define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x10 @@ -1068,11 +1062,6 @@ struct hns_roce_qp_work { u32 sche_cnt; }; -struct hns_roce_des_qp { - struct workqueue_struct *qp_wq; - int requeue_flag; -}; - struct hns_roce_mr_free_work { struct work_struct work; struct ib_device *ib_dev; @@ -1100,7 +1089,6 @@ struct hns_roce_v1_priv { struct hns_roce_raq_table raq_table; struct hns_roce_bt_table bt_table; struct hns_roce_tptr_table tptr_table; - struct hns_roce_des_qp des_qp; struct hns_roce_free_mr free_mr; }; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 0218c0f8c2a7ddb7bc08e60645fecb8a19aa157c..b579402d3c0b69ea226a16b0a124725276a1c0f1 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include "hnae3.h" @@ -46,6 +47,37 @@ #include "hns_roce_hem.h" #include "hns_roce_hw_v2.h" +static int loopback; + +static bool qp_lock = true; +static bool cq_lock = true; + +static inline void v2_spin_lock_irqsave(bool has_lock, spinlock_t *lock, + unsigned long *flags) +{ + if (likely(has_lock)) + spin_lock_irqsave(lock, *flags); +} + +static inline void v2_spin_unlock_irqrestore(bool has_lock, spinlock_t *lock, + unsigned long *flags) +{ + if (likely(has_lock)) + spin_unlock_irqrestore(lock, *flags); +} + +static inline void v2_spin_lock_irq(bool has_lock, spinlock_t *lock) +{ + if (likely(has_lock)) + spin_lock_irq(lock); +} + +static inline void v2_spin_unlock_irq(bool has_lock, spinlock_t *lock) +{ + if (likely(has_lock)) + spin_unlock_irq(lock); +} + static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg, struct ib_sge *sg) { @@ -54,8 +86,55 @@ static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg, dseg->len = cpu_to_le32(sg->length); } +#ifdef CONFIG_KERNEL_419 +static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, + struct hns_roce_wqe_frmr_seg *fseg, + const struct ib_reg_wr *wr) +#else +static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, + struct hns_roce_wqe_frmr_seg *fseg, + struct ib_reg_wr *wr) +#endif +{ + struct hns_roce_mr *mr = to_hr_mr(wr->mr); + + /* use ib_access_flags */ + roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S, + wr->access & IB_ACCESS_MW_BIND ? 1 : 0); + roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S, + wr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0); + roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_RR_S, + wr->access & IB_ACCESS_REMOTE_READ ? 1 : 0); + roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_RW_S, + wr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0); + roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_LW_S, + wr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0); + + /* Data structure reuse may lead to confusion */ + rc_sq_wqe->msg_len = cpu_to_le32(mr->pbl_ba & 0xffffffff); + rc_sq_wqe->inv_key = cpu_to_le32(mr->pbl_ba >> 32); + + rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff); + rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32); + rc_sq_wqe->rkey = cpu_to_le32(wr->key); + rc_sq_wqe->va = cpu_to_le64(wr->mr->iova); + + fseg->pbl_size = cpu_to_le32(mr->pbl_size); + roce_set_field(fseg->mode_buf_pg_sz, + V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M, + V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S, + mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET); + roce_set_bit(fseg->mode_buf_pg_sz, V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S, + 0); +} + +#ifdef CONFIG_KERNEL_419 static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr, - unsigned int *sge_ind) + unsigned int *sge_ind, int valid_num_sge) +#else +static void set_extend_sge(struct hns_roce_qp *qp, struct ib_send_wr *wr, + unsigned int *sge_ind, int valid_num_sge) +#endif { struct hns_roce_v2_wqe_data_seg *dseg; struct ib_sge *sg; @@ -64,13 +143,12 @@ static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr, int fi_sge_num; int se_sge_num; int shift; - int i; if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) num_in_wqe = HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE; - extend_sge_num = wr->num_sge - num_in_wqe; + extend_sge_num = valid_num_sge - num_in_wqe; sg = wr->sg_list + num_in_wqe; - shift = qp->hr_buf.page_shift; + shift = qp->mtr.kmem->page_shift; /* * Check whether wr->num_sge sges are in the same page. If not, we @@ -83,45 +161,135 @@ static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr, sizeof(struct hns_roce_v2_wqe_data_seg); if (extend_sge_num > fi_sge_num) { se_sge_num = extend_sge_num - fi_sge_num; - for (i = 0; i < fi_sge_num; i++) { - set_data_seg_v2(dseg++, sg + i); - (*sge_ind)++; + while (fi_sge_num > 0) { + if (likely(sg->length)) { + set_data_seg_v2(dseg++, sg); + (*sge_ind)++; + fi_sge_num--; + } + sg++; } dseg = get_send_extend_sge(qp, (*sge_ind) & (qp->sge.sge_cnt - 1)); - for (i = 0; i < se_sge_num; i++) { - set_data_seg_v2(dseg++, sg + fi_sge_num + i); - (*sge_ind)++; + while (se_sge_num > 0) { + if (likely(sg->length)) { + set_data_seg_v2(dseg++, sg + fi_sge_num); + (*sge_ind)++; + se_sge_num--; + } + sg++; } } else { - for (i = 0; i < extend_sge_num; i++) { - set_data_seg_v2(dseg++, sg + i); - (*sge_ind)++; + while (extend_sge_num > 0) { + if (likely(sg->length)) { + set_data_seg_v2(dseg++, sg); + (*sge_ind)++; + extend_sge_num--; + } + sg++; } } } +static void set_extend_atomic_seg(struct hns_roce_qp *qp, u32 sge_num, + unsigned int *sge_ind, u64 data) +{ + u64 *ext_seg; + int i; + + for (i = 0; i < sge_num; i += 2, (*sge_ind)++) { + ext_seg = get_send_extend_sge(qp, + (*sge_ind) & (qp->sge.sge_cnt - 1)); + *ext_seg = data ? cpu_to_le64(*(uint64_t *)(uintptr_t) + (data + i * 8)) : 0; + *(ext_seg + 1) = data ? cpu_to_le64(*(uint64_t *)(uintptr_t) + (data + (i + 1) * 8)) : 0; + } +} + +#ifdef CONFIG_KERNEL_419 +static int set_atomic_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr, + struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, + void *wqe, unsigned int *sge_ind) +#else +static int set_atomic_seg(struct ib_qp *ibqp, struct ib_send_wr *wr, + struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, + void *wqe, unsigned int *sge_ind) +#endif +{ + struct hns_roce_qp *qp = to_hr_qp(ibqp); + struct hns_roce_wqe_atomic_seg *aseg; + u32 sge_num = le32_to_cpu(rc_sq_wqe->msg_len) >> 3; + + wqe += sizeof(struct hns_roce_v2_wqe_data_seg); + aseg = wqe; + + if ((sge_num == 2) || (sge_num == 4) || (sge_num == 8)) { + aseg->fetchadd_swap_data = 0; + aseg->cmp_data = 0; + if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { + set_extend_atomic_seg(qp, sge_num, sge_ind, + atomic_wr(wr)->swap); + set_extend_atomic_seg(qp, sge_num, sge_ind, + atomic_wr(wr)->compare_add); + } else { + set_extend_atomic_seg(qp, sge_num, sge_ind, + atomic_wr(wr)->compare_add); + set_extend_atomic_seg(qp, sge_num, sge_ind, 0); + } + return 0; + } else if (sge_num == 1) { + if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { + aseg->fetchadd_swap_data = + cpu_to_le64(atomic_wr(wr)->swap); + aseg->cmp_data = + cpu_to_le64(atomic_wr(wr)->compare_add); + } else { + aseg->fetchadd_swap_data = + cpu_to_le64(atomic_wr(wr)->compare_add); + aseg->cmp_data = 0; + } + return 0; + } else { + return -EINVAL; + } + +} +#ifdef CONFIG_KERNEL_419 static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr, struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, void *wqe, unsigned int *sge_ind, + int valid_num_sge, const struct ib_send_wr **bad_wr) +#else +static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr, + struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, + void *wqe, unsigned int *sge_ind, + int valid_num_sge, + struct ib_send_wr **bad_wr) +#endif { struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_v2_wqe_data_seg *dseg = wqe; struct hns_roce_qp *qp = to_hr_qp(ibqp); + int j = 0; int i; - if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) { + roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S, + !!(wr->send_flags & IB_SEND_INLINE)); + + if (wr->send_flags & IB_SEND_INLINE && valid_num_sge) { if (le32_to_cpu(rc_sq_wqe->msg_len) > hr_dev->caps.max_sq_inline) { *bad_wr = wr; - dev_err(hr_dev->dev, "inline len(1-%d)=%d, illegal", + dev_err(hr_dev->dev, "Inline len(0x%x)illegal, max is 0x%x.\n", rc_sq_wqe->msg_len, hr_dev->caps.max_sq_inline); return -EINVAL; } if (wr->opcode == IB_WR_RDMA_READ) { - dev_err(hr_dev->dev, "Not support inline data!\n"); + *bad_wr = wr; + dev_err(hr_dev->dev, "Not support inline data in rdma read!\n"); return -EINVAL; } @@ -130,11 +298,8 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr, wr->sg_list[i].length); wqe += wr->sg_list[i].length; } - - roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S, - 1); } else { - if (wr->num_sge <= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) { + if (valid_num_sge <= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) { for (i = 0; i < wr->num_sge; i++) { if (likely(wr->sg_list[i].length)) { set_data_seg_v2(dseg, wr->sg_list + i); @@ -142,75 +307,94 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr, } } } else { - roce_set_field(rc_sq_wqe->byte_20, - V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M, - V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S, - (*sge_ind) & (qp->sge.sge_cnt - 1)); - - for (i = 0; i < HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE; i++) { + for (i = 0; i < wr->num_sge && + j < HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE; i++) { if (likely(wr->sg_list[i].length)) { set_data_seg_v2(dseg, wr->sg_list + i); dseg++; + j++; } } - set_extend_sge(qp, wr, sge_ind); + set_extend_sge(qp, wr, sge_ind, valid_num_sge); } roce_set_field(rc_sq_wqe->byte_16, V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M, - V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, wr->num_sge); + V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge); } return 0; } -static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, - const struct ib_qp_attr *attr, - int attr_mask, enum ib_qp_state cur_state, - enum ib_qp_state new_state); +static void hns_roce_dfx_record_post_send_wqe(struct hns_roce_qp *qp, + const struct ib_send_wr *wr) +{ + if (wr->send_flags & IB_SEND_INLINE) + qp->dfx_cnt[HNS_ROCE_QP_DFX_INLINE_WQE]++; + + if (wr->send_flags & IB_SEND_SIGNALED) + qp->dfx_cnt[HNS_ROCE_QP_DFX_SIGNAL_WQE]++; +} +#ifdef CONFIG_KERNEL_419 static int hns_roce_v2_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) +#else +static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, + struct ib_send_wr **bad_wr) +#endif { struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); - struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah); struct hns_roce_v2_ud_send_wqe *ud_sq_wqe; struct hns_roce_v2_rc_send_wqe *rc_sq_wqe; struct hns_roce_qp *qp = to_hr_qp(ibqp); + struct hns_roce_v2_wqe_data_seg *dseg; + struct hns_roce_wqe_frmr_seg *fseg; struct device *dev = hr_dev->dev; + struct hns_roce_ah *ah = NULL; struct hns_roce_v2_db sq_db; - struct ib_qp_attr attr; - unsigned int sge_ind = 0; + unsigned int sge_ind; unsigned int owner_bit; - unsigned long flags; + unsigned long flags = 0; + int valid_num_sge; unsigned int ind; void *wqe = NULL; - bool loopback; - int attr_mask; u32 tmp_len; int ret = 0; - u8 *smac; + u32 hr_op; int nreq; int i; if (unlikely(ibqp->qp_type != IB_QPT_RC && - ibqp->qp_type != IB_QPT_GSI && - ibqp->qp_type != IB_QPT_UD)) { - dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type); + ibqp->qp_type != IB_QPT_UC && + ibqp->qp_type != IB_QPT_GSI) && + (ibqp->qp_type != IB_QPT_XRC_INI) && + (ibqp->qp_type != IB_QPT_XRC_TGT)) { + dev_err(dev, "Not supported QP type, type-0x%x, qpn-0x%x!\n", + ibqp->qp_type, ibqp->qp_num); *bad_wr = wr; return -EOPNOTSUPP; } if (unlikely(qp->state == IB_QPS_RESET || qp->state == IB_QPS_INIT || qp->state == IB_QPS_RTR)) { - dev_err(dev, "Post WQE fail, QP state %d err!\n", qp->state); + dev_err(dev, "Post WQE fail, QP(0x%x) state %d err!\n", + ibqp->qp_num, qp->state); *bad_wr = wr; return -EINVAL; } - spin_lock_irqsave(&qp->sq.lock, flags); + v2_spin_lock_irqsave(qp_lock, &qp->sq.lock, &flags); + + if (hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN) { + *bad_wr = wr; + ret = -EIO; + nreq = 0; + goto out; + } + ind = qp->sq_next_wqe; sge_ind = qp->next_sge; @@ -218,6 +402,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { ret = -ENOMEM; *bad_wr = wr; + dev_err(dev, "qp(0x%x): wq overflow, nreq=0x%x\n", + ibqp->qp_num, nreq); goto out; } @@ -235,10 +421,19 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, owner_bit = ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1); + valid_num_sge = 0; tmp_len = 0; + for (i = 0; i < wr->num_sge; i++) { + if (likely(wr->sg_list[i].length)) { + tmp_len += wr->sg_list[i].length; + valid_num_sge++; + } + } + /* Corresponding to the QP type, wqe process separately */ if (ibqp->qp_type == IB_QPT_GSI) { + ah = to_hr_ah(ud_wr(wr)->ah); ud_sq_wqe = wqe; memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe)); @@ -259,36 +454,33 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, V2_UD_SEND_WQE_BYTE_48_DMAC_5_S, ah->av.mac[5]); - /* MAC loopback */ - smac = (u8 *)hr_dev->dev_addr[qp->port]; - loopback = ether_addr_equal_unaligned(ah->av.mac, - smac) ? 1 : 0; - + /* When lbi is set, the roce port support loopback */ roce_set_bit(ud_sq_wqe->byte_40, - V2_UD_SEND_WQE_BYTE_40_LBI_S, loopback); - - roce_set_field(ud_sq_wqe->byte_4, - V2_UD_SEND_WQE_BYTE_4_OPCODE_M, - V2_UD_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_SEND); - - for (i = 0; i < wr->num_sge; i++) - tmp_len += wr->sg_list[i].length; + V2_UD_SEND_WQE_BYTE_40_LBI_S, + hr_dev->loop_idc); ud_sq_wqe->msg_len = cpu_to_le32(le32_to_cpu(ud_sq_wqe->msg_len) + tmp_len); switch (wr->opcode) { + case IB_WR_SEND: + hr_op = HNS_ROCE_V2_WQE_OP_SEND; + ud_sq_wqe->immtdata = 0; + break; case IB_WR_SEND_WITH_IMM: - case IB_WR_RDMA_WRITE_WITH_IMM: + hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM; ud_sq_wqe->immtdata = cpu_to_le32(be32_to_cpu(wr->ex.imm_data)); break; default: - ud_sq_wqe->immtdata = 0; + hr_op = HNS_ROCE_V2_WQE_OP_MASK; break; } + roce_set_field(ud_sq_wqe->byte_4, + V2_UD_SEND_WQE_BYTE_4_OPCODE_M, + V2_UD_SEND_WQE_BYTE_4_OPCODE_S, hr_op); + /* Set sig attr */ roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_CQE_S, @@ -310,7 +502,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, roce_set_field(ud_sq_wqe->byte_16, V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M, V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S, - wr->num_sge); + valid_num_sge); roce_set_field(ud_sq_wqe->byte_20, V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M, @@ -331,7 +523,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_VLAN_M, V2_UD_SEND_WQE_BYTE_36_VLAN_S, - le16_to_cpu(ah->av.vlan)); + ah->av.vlan); roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M, V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S, @@ -339,39 +531,37 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_TCLASS_M, V2_UD_SEND_WQE_BYTE_36_TCLASS_S, - ah->av.sl_tclass_flowlabel >> - HNS_ROCE_TCLASS_SHIFT); + ah->av.tclass); roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M, V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S, - ah->av.sl_tclass_flowlabel & - HNS_ROCE_FLOW_LABEL_MASK); + ah->av.flowlabel); roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_SL_M, V2_UD_SEND_WQE_BYTE_40_SL_S, - le32_to_cpu(ah->av.sl_tclass_flowlabel) >> - HNS_ROCE_SL_SHIFT); + ah->av.sl); roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_PORTN_M, V2_UD_SEND_WQE_BYTE_40_PORTN_S, qp->port); + roce_set_bit(ud_sq_wqe->byte_40, + V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S, + ah->av.vlan_en ? 1 : 0); roce_set_field(ud_sq_wqe->byte_48, V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M, V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S, - hns_get_gid_index(hr_dev, qp->phy_port, - ah->av.gid_index)); + ah->av.gid_index); memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN_V2); - set_extend_sge(qp, wr, &sge_ind); + set_extend_sge(qp, wr, &sge_ind, valid_num_sge); ind++; - } else if (ibqp->qp_type == IB_QPT_RC) { + } else if (ibqp->qp_type == IB_QPT_RC || + ibqp->qp_type == IB_QPT_UC) { rc_sq_wqe = wqe; memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe)); - for (i = 0; i < wr->num_sge; i++) - tmp_len += wr->sg_list[i].length; rc_sq_wqe->msg_len = cpu_to_le32(le32_to_cpu(rc_sq_wqe->msg_len) + tmp_len); @@ -406,103 +596,114 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OWNER_S, owner_bit); + wqe += sizeof(struct hns_roce_v2_rc_send_wqe); switch (wr->opcode) { case IB_WR_RDMA_READ: - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_RDMA_READ); + hr_op = HNS_ROCE_V2_WQE_OP_RDMA_READ; rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey); rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr); break; case IB_WR_RDMA_WRITE: - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_RDMA_WRITE); + hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE; rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey); rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr); break; case IB_WR_RDMA_WRITE_WITH_IMM: - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM); + hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM; rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey); rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr); break; case IB_WR_SEND: - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_SEND); + hr_op = HNS_ROCE_V2_WQE_OP_SEND; break; case IB_WR_SEND_WITH_INV: - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_SEND_WITH_INV); + hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_INV; break; case IB_WR_SEND_WITH_IMM: - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM); + hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM; break; case IB_WR_LOCAL_INV: - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_LOCAL_INV); + hr_op = HNS_ROCE_V2_WQE_OP_LOCAL_INV; + roce_set_bit(rc_sq_wqe->byte_4, + V2_RC_SEND_WQE_BYTE_4_SO_S, 1); + rc_sq_wqe->inv_key = + cpu_to_le32(wr->ex.invalidate_rkey); + break; + case IB_WR_REG_MR: + hr_op = HNS_ROCE_V2_WQE_OP_FAST_REG_PMR; + fseg = wqe; + set_frmr_seg(rc_sq_wqe, fseg, reg_wr(wr)); break; case IB_WR_ATOMIC_CMP_AND_SWP: - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP); + hr_op = HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP; + rc_sq_wqe->rkey = + cpu_to_le32(atomic_wr(wr)->rkey); + rc_sq_wqe->va = + cpu_to_le64(atomic_wr(wr)->remote_addr); break; case IB_WR_ATOMIC_FETCH_AND_ADD: - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD); + hr_op = HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD; + rc_sq_wqe->rkey = + cpu_to_le32(atomic_wr(wr)->rkey); + rc_sq_wqe->va = + cpu_to_le64(atomic_wr(wr)->remote_addr); break; case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP); + hr_op = + HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP; break; case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD: - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD); + hr_op = + HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD; break; default: - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_MASK); + hr_op = HNS_ROCE_V2_WQE_OP_MASK; break; } - wqe += sizeof(struct hns_roce_v2_rc_send_wqe); + roce_set_field(rc_sq_wqe->byte_4, + V2_RC_SEND_WQE_BYTE_4_OPCODE_M, + V2_RC_SEND_WQE_BYTE_4_OPCODE_S, hr_op); + + roce_set_field(rc_sq_wqe->byte_20, + V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M, + V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S, + sge_ind & (qp->sge.sge_cnt - 1)); + if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || + wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { + dseg = wqe; + set_data_seg_v2(dseg, wr->sg_list); + ret = set_atomic_seg(ibqp, wr, rc_sq_wqe, wqe, + &sge_ind); + if (ret) { + *bad_wr = wr; + goto out; + } + roce_set_field(rc_sq_wqe->byte_16, + V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M, + V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, + valid_num_sge); + } else if (wr->opcode != IB_WR_REG_MR) { + ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe, + wqe, &sge_ind, + valid_num_sge, bad_wr); + if (ret) + goto out; + } + hns_roce_dfx_record_post_send_wqe(qp, wr); - ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe, wqe, - &sge_ind, bad_wr); - if (ret) - goto out; ind++; } else { - dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type); - spin_unlock_irqrestore(&qp->sq.lock, flags); + dev_err(dev, "Post send failed for illegal qp(0x%x) type:0x%x\n", + ibqp->qp_num, ibqp->qp_type); + v2_spin_unlock_irqrestore(qp_lock, &qp->sq.lock, + &flags); *bad_wr = wr; return -EOPNOTSUPP; } @@ -527,57 +728,67 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M, V2_DB_PARAMETER_SL_S, qp->sl); - hns_roce_write64_k((__le32 *)&sq_db, qp->sq.db_reg_l); + /* when qp is err, stop to write db */ + if (qp->state == IB_QPS_ERR) { + if (qp_lock) + init_flush_work(hr_dev, qp); + } else { + hns_roce_write64(hr_dev, (__le32 *)&sq_db, + qp->sq.db_reg_l); + } qp->sq_next_wqe = ind; qp->next_sge = sge_ind; - - if (qp->state == IB_QPS_ERR) { - attr_mask = IB_QP_STATE; - attr.qp_state = IB_QPS_ERR; - - ret = hns_roce_v2_modify_qp(&qp->ibqp, &attr, attr_mask, - qp->state, IB_QPS_ERR); - if (ret) { - spin_unlock_irqrestore(&qp->sq.lock, flags); - *bad_wr = wr; - return ret; - } - } } + qp->dfx_cnt[HNS_ROCE_QP_DFX_POST_SEND]++; - spin_unlock_irqrestore(&qp->sq.lock, flags); + v2_spin_unlock_irqrestore(qp_lock, &qp->sq.lock, &flags); return ret; } +#ifdef CONFIG_KERNEL_419 static int hns_roce_v2_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) +#else +static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr) +#endif { struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct hns_roce_v2_wqe_data_seg *dseg; struct hns_roce_rinl_sge *sge_list; struct device *dev = hr_dev->dev; - struct ib_qp_attr attr; - unsigned long flags; + unsigned long flags = 0; void *wqe = NULL; - int attr_mask; + u32 max_sge; int ret = 0; int nreq; int ind; int i; - spin_lock_irqsave(&hr_qp->rq.lock, flags); + v2_spin_lock_irqsave(qp_lock, &hr_qp->rq.lock, &flags); + + if (hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN) { + *bad_wr = wr; + ret = -EIO; + nreq = 0; + goto out; + } + ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1); - if (hr_qp->state == IB_QPS_RESET) { - spin_unlock_irqrestore(&hr_qp->rq.lock, flags); + if (hr_qp->state == IB_QPS_RESET || hr_qp->ibqp.srq) { + v2_spin_unlock_irqrestore(qp_lock, &hr_qp->rq.lock, &flags); *bad_wr = wr; + dev_err(dev, "Post recv failed: QP state is RESET, qp num is 0x%lx.\n", + hr_qp->qpn); return -EINVAL; } + max_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge; for (nreq = 0; wr; ++nreq, wr = wr->next) { if (hns_roce_wq_overflow(&hr_qp->rq, nreq, hr_qp->ibqp.recv_cq)) { @@ -586,9 +797,9 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, goto out; } - if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) { - dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n", - wr->num_sge, hr_qp->rq.max_gs); + if (unlikely(wr->num_sge > max_sge)) { + dev_err(dev, "RQ: sge num(%d) is larger than max sge num(%d)\n", + wr->num_sge, max_sge); ret = -EINVAL; *bad_wr = wr; goto out; @@ -603,9 +814,10 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, dseg++; } - if (i < hr_qp->rq.max_gs) { + if (hr_qp->rq.rsv_sge) { dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY); dseg->addr = 0; + dseg->len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH); } /* rq support inline data */ @@ -621,7 +833,6 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, } hr_qp->rq.wrid[ind] = wr->wr_id; - ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1); } @@ -631,27 +842,147 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, /* Memory barrier */ wmb(); - *hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff; - + /* when qp is err, stop to write record db */ if (hr_qp->state == IB_QPS_ERR) { - attr_mask = IB_QP_STATE; - attr.qp_state = IB_QPS_ERR; - - ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr, - attr_mask, hr_qp->state, - IB_QPS_ERR); - if (ret) { - spin_unlock_irqrestore(&hr_qp->rq.lock, flags); - *bad_wr = wr; - return ret; - } + if (qp_lock) + init_flush_work(hr_dev, hr_qp); + } else { + *hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff; } } - spin_unlock_irqrestore(&hr_qp->rq.lock, flags); + + hr_qp->dfx_cnt[HNS_ROCE_QP_DFX_POST_RECV]++; + + v2_spin_unlock_irqrestore(qp_lock, &hr_qp->rq.lock, &flags); return ret; } +static int hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev, + unsigned long instance_stage, + unsigned long reset_stage) +{ + /* When hardware reset has been completed once or more, we should stop + * sending mailbox&cmq&doorbell to hardware. If now in .init_instance() + * function, we should exit with error. If now at HNAE3_INIT_CLIENT + * stage of soft reset process, we should exit with error, and then + * HNAE3_INIT_CLIENT related process can rollback the operation like + * notifing hardware to free resources, HNAE3_INIT_CLIENT related + * process will exit with error to notify NIC driver to reschedule soft + * reset process once again. + */ + hr_dev->is_reset = true; + hr_dev->dis_db = true; + if (reset_stage == HNS_ROCE_STATE_RST_INIT || + instance_stage == HNS_ROCE_STATE_INIT) + return CMD_RST_PRC_EBUSY; + + return CMD_RST_PRC_SUCCESS; +} + +static int hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev, + unsigned long instance_stage, + unsigned long reset_stage) +{ + struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv; + struct hnae3_handle *handle = priv->handle; + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + unsigned long end; + + /* When hardware reset is detected, we should stop sending mailbox&cmq& + * doorbell to hardware, and wait until hardware reset finished. If now + * in .init_instance() function, we should exit with error. If now at + * HNAE3_INIT_CLIENT stage of soft reset process, we should exit with + * error, and then HNAE3_INIT_CLIENT related process can rollback the + * operation like notifing hardware to free resources, HNAE3_INIT_CLIENT + * related process will exit with error to notify NIC driver to + * reschedule soft reset process once again. + */ + hr_dev->dis_db = true; + end = HNS_ROCE_V2_HW_RST_TIMEOUT*1000; + while (ops->get_hw_reset_stat(handle) && end) { + udelay(1); + end -= 1; + } + + if (!ops->get_hw_reset_stat(handle)) + hr_dev->is_reset = true; + else + dev_warn(hr_dev->dev, "hw_resetting!\n"); + + if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT || + instance_stage == HNS_ROCE_STATE_INIT) + return CMD_RST_PRC_EBUSY; + + return CMD_RST_PRC_SUCCESS; +} + +static int hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev) +{ + struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv; + struct hnae3_handle *handle = priv->handle; + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + unsigned long end; + + /* When software reset is detected at .init_instance() function, we + * should stop sending mailbox&cmq&doorbell to hardware, and + * wait until hardware reset finished, we should exit with error. + */ + hr_dev->dis_db = true; + end = HNS_ROCE_V2_HW_RST_TIMEOUT*1000; + while (ops->ae_dev_reset_cnt(handle) == hr_dev->reset_cnt && + end) { + udelay(1); + end -= 1; + } + + if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt) + hr_dev->is_reset = true; + else + dev_warn(hr_dev->dev, "reset_cnt no change!\n"); + + return CMD_RST_PRC_EBUSY; +} + +static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev) +{ + struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv; + struct hnae3_handle *handle = priv->handle; + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + unsigned long instance_stage; /* the current instance stage */ + unsigned long reset_stage; /* the current reset stage */ + unsigned long reset_cnt; + bool sw_resetting; + bool hw_resetting; + + if (hr_dev->is_reset) + return CMD_RST_PRC_SUCCESS; + + /* Get information about reset from NIC driver or RoCE driver itself, + * the meaning of the following variables from NIC driver are described + * as below: + * reset_cnt -- The count value of completed hardware reset. + * hw_resetting -- Whether hardware device is resetting now. + * sw_resetting -- Whether NIC's software reset process is running now. + */ + instance_stage = handle->rinfo.instance_state; + reset_stage = handle->rinfo.reset_state; + reset_cnt = ops->ae_dev_reset_cnt(handle); + hw_resetting = ops->get_cmdq_stat(handle); + sw_resetting = ops->ae_dev_resetting(handle); + + if (reset_cnt != hr_dev->reset_cnt) + return hns_roce_v2_cmd_hw_reseted(hr_dev, instance_stage, + reset_stage); + else if (hw_resetting) + return hns_roce_v2_cmd_hw_resetting(hr_dev, instance_stage, + reset_stage); + else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT) + return hns_roce_v2_cmd_sw_resetting(hr_dev); + + return 0; +} + static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring) { int ntu = ring->next_to_use; @@ -718,17 +1049,17 @@ static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type) roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG, upper_32_bits(dma)); roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG, - (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) | - HNS_ROCE_CMQ_ENABLE); - roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0); + ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S); + + /* Make sure to write tail first and then head */ roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0); + roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0); } else { roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma); roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG, upper_32_bits(dma)); roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG, - (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) | - HNS_ROCE_CMQ_ENABLE); + ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S); roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0); roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0); } @@ -753,14 +1084,14 @@ static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev) /* Init CSQ */ ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CSQ); if (ret) { - dev_err(hr_dev->dev, "Init CSQ error, ret = %d.\n", ret); + dev_err(hr_dev->dev, "Init CSQ error(%d).\n", ret); return ret; } /* Init CRQ */ ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CRQ); if (ret) { - dev_err(hr_dev->dev, "Init CRQ error, ret = %d.\n", ret); + dev_err(hr_dev->dev, "Init CRQ error(%d).\n", ret); goto err_crq; } @@ -786,7 +1117,7 @@ static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev) hns_roce_free_cmq_desc(hr_dev, &priv->cmq.crq); } -static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc, +void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc, enum hns_roce_opcode_type opcode, bool is_read) { @@ -799,6 +1130,7 @@ static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc, else desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR); } +EXPORT_SYMBOL(hns_roce_cmq_setup_basic_desc); static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev) { @@ -832,8 +1164,8 @@ static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev) return clean; } -static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev, - struct hns_roce_cmq_desc *desc, int num) +static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, + struct hns_roce_cmq_desc *desc, int num) { struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv; struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq; @@ -845,12 +1177,11 @@ static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev, int ret = 0; int ntc; - if (hr_dev->is_reset) - return 0; - spin_lock_bh(&csq->lock); if (num > hns_roce_cmq_space(csq)) { + dev_err(hr_dev->dev, "cmq num(%d) is out of space %pK\n", + num, csq); spin_unlock_bh(&csq->lock); return -EBUSY; } @@ -871,6 +1202,9 @@ static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev, handle++; } + /* Memory barrier */ + wmb(); + /* Write to hardware */ roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, csq->next_to_use); @@ -878,7 +1212,7 @@ static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev, * If the command is sync, wait for the firmware to write back, * if multi descriptors to be sent, use the first one to check */ - if ((desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) { + if ((desc->flag) & cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR)) { do { if (hns_roce_cmq_csq_done(hr_dev)) break; @@ -895,12 +1229,15 @@ static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev, desc_to_use = &csq->desc[ntc]; desc[handle] = *desc_to_use; dev_dbg(hr_dev->dev, "Get cmq desc:\n"); - desc_ret = desc[handle].retval; + desc_ret = le16_to_cpu(desc[handle].retval); if (desc_ret == CMD_EXEC_SUCCESS) ret = 0; - else + else if (desc_ret == CMD_EXEC_TIMEOUT) { + priv->cmq.last_status = desc_ret; + ret = -ETIME; + } else ret = -EIO; - priv->cmq.last_status = desc_ret; + ntc++; handle++; if (ntc == csq->desc_num) @@ -922,6 +1259,31 @@ static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev, return ret; } +int hns_roce_cmq_send(struct hns_roce_dev *hr_dev, + struct hns_roce_cmq_desc *desc, int num) +{ + int retval; + int ret; + + ret = hns_roce_v2_rst_process_cmd(hr_dev); + if (ret == CMD_RST_PRC_SUCCESS) + return 0; + if (ret == CMD_RST_PRC_EBUSY) + return -EBUSY; + + ret = __hns_roce_cmq_send(hr_dev, desc, num); + if (ret) { + retval = hns_roce_v2_rst_process_cmd(hr_dev); + if (retval == CMD_RST_PRC_SUCCESS) + return 0; + else if (retval == CMD_RST_PRC_EBUSY) + return -EBUSY; + } + + return ret; +} +EXPORT_SYMBOL(hns_roce_cmq_send); + static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev) { struct hns_roce_query_version *resp; @@ -930,93 +1292,369 @@ static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev) hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true); ret = hns_roce_cmq_send(hr_dev, &desc, 1); - if (ret) + if (ret) { + dev_warn(hr_dev->dev, "query hw version failed(%d)\n", ret); return ret; + } resp = (struct hns_roce_query_version *)desc.data; - hr_dev->hw_rev = le32_to_cpu(resp->rocee_hw_version); - hr_dev->vendor_id = le32_to_cpu(resp->rocee_vendor_id); + hr_dev->hw_rev = (u32)le16_to_cpu(resp->rocee_hw_version); + hr_dev->vendor_id = hr_dev->pci_dev->vendor; return 0; } -static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev) +static bool hns_roce_func_clr_chk_rst(struct hns_roce_dev *hr_dev) { - struct hns_roce_cfg_global_param *req; - struct hns_roce_cmq_desc desc; + struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv; + struct hnae3_handle *handle = priv->handle; + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + unsigned long reset_cnt; + bool sw_resetting; + bool hw_resetting; - hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM, - false); + reset_cnt = ops->ae_dev_reset_cnt(handle); + hw_resetting = ops->get_hw_reset_stat(handle); + sw_resetting = ops->ae_dev_resetting(handle); - req = (struct hns_roce_cfg_global_param *)desc.data; - memset(req, 0, sizeof(*req)); - roce_set_field(req->time_cfg_udp_port, - CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M, - CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S, 0x3e8); - roce_set_field(req->time_cfg_udp_port, - CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M, - CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S, 0x12b7); + if (reset_cnt != hr_dev->reset_cnt || hw_resetting || sw_resetting) + return true; - return hns_roce_cmq_send(hr_dev, &desc, 1); + return false; } -static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev) +static void hns_roce_func_clr_rst_prc(struct hns_roce_dev *hr_dev, int retval, + int flag) { - struct hns_roce_cmq_desc desc[2]; - struct hns_roce_pf_res_a *req_a; - struct hns_roce_pf_res_b *req_b; - int ret; - int i; + struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv; + struct hnae3_handle *handle = priv->handle; + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + unsigned long instance_stage; + unsigned long reset_cnt; + unsigned long end; + bool sw_resetting; + bool hw_resetting; + + instance_stage = handle->rinfo.instance_state; + reset_cnt = ops->ae_dev_reset_cnt(handle); + hw_resetting = ops->get_hw_reset_stat(handle); + sw_resetting = ops->ae_dev_resetting(handle); + + if (reset_cnt != hr_dev->reset_cnt) { + hr_dev->dis_db = true; + hr_dev->is_reset = true; + dev_info(hr_dev->dev, "Func clear success after reset.\n"); + } else if (hw_resetting) { + hr_dev->dis_db = true; + + dev_warn(hr_dev->dev, + "Func clear is pending, device in resetting state.\n"); + end = HNS_ROCE_V2_HW_RST_TIMEOUT; + while (end) { + if (!ops->get_hw_reset_stat(handle)) { + hr_dev->is_reset = true; + dev_info(hr_dev->dev, + "Func clear success after reset.\n"); + return; + } + msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT); + end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT; + } - for (i = 0; i < 2; i++) { - hns_roce_cmq_setup_basic_desc(&desc[i], - HNS_ROCE_OPC_QUERY_PF_RES, true); + dev_warn(hr_dev->dev, "Func clear failed.\n"); + } else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT) { + hr_dev->dis_db = true; + + dev_warn(hr_dev->dev, + "Func clear is pending, device in resetting state.\n"); + end = HNS_ROCE_V2_HW_RST_TIMEOUT; + while (end) { + if (ops->ae_dev_reset_cnt(handle) != + hr_dev->reset_cnt) { + hr_dev->is_reset = true; + dev_info(hr_dev->dev, + "Func clear success after sw reset\n"); + return; + } + msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT); + end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT; + } - if (i == 0) - desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); - else - desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); - } + dev_warn(hr_dev->dev, "Func clear failed because of unfinished sw reset\n"); + } else { + if (retval && !flag) + dev_warn(hr_dev->dev, + "Func clear read failed, ret = %d.\n", retval); - ret = hns_roce_cmq_send(hr_dev, desc, 2); - if (ret) - return ret; + dev_warn(hr_dev->dev, "Func clear failed.\n"); + } +} - req_a = (struct hns_roce_pf_res_a *)desc[0].data; - req_b = (struct hns_roce_pf_res_b *)desc[1].data; +static int hns_roce_query_func_info(struct hns_roce_dev *hr_dev) +{ + struct hns_roce_cmq_desc desc; + struct hns_roce_pf_func_info *resp; + int ret = 0; - hr_dev->caps.qpc_bt_num = roce_get_field(req_a->qpc_bt_idx_num, - PF_RES_DATA_1_PF_QPC_BT_NUM_M, - PF_RES_DATA_1_PF_QPC_BT_NUM_S); - hr_dev->caps.srqc_bt_num = roce_get_field(req_a->srqc_bt_idx_num, - PF_RES_DATA_2_PF_SRQC_BT_NUM_M, - PF_RES_DATA_2_PF_SRQC_BT_NUM_S); - hr_dev->caps.cqc_bt_num = roce_get_field(req_a->cqc_bt_idx_num, - PF_RES_DATA_3_PF_CQC_BT_NUM_M, - PF_RES_DATA_3_PF_CQC_BT_NUM_S); - hr_dev->caps.mpt_bt_num = roce_get_field(req_a->mpt_bt_idx_num, - PF_RES_DATA_4_PF_MPT_BT_NUM_M, - PF_RES_DATA_4_PF_MPT_BT_NUM_S); + hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_FUNC_INFO, + true); + ret = hns_roce_cmq_send(hr_dev, &desc, 1); + if (ret) { + dev_err(hr_dev->dev, "Query function info failed(%d).\n", + ret); + return ret; + } - hr_dev->caps.sl_num = roce_get_field(req_b->qid_idx_sl_num, - PF_RES_DATA_3_PF_SL_NUM_M, - PF_RES_DATA_3_PF_SL_NUM_S); + resp = (struct hns_roce_pf_func_info *)desc.data; + hr_dev->func_num = le32_to_cpu(resp->pf_own_func_num); + hr_dev->mac_id = le32_to_cpu(resp->pf_own_mac_id); - return 0; + return ret; } -static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev) +static void hns_roce_clear_func(struct hns_roce_dev *hr_dev, int vf_id) { - struct hns_roce_cmq_desc desc[2]; - struct hns_roce_vf_res_a *req_a; - struct hns_roce_vf_res_b *req_b; - int i; + struct hns_roce_func_clear *resp; + struct hns_roce_cmq_desc desc; + unsigned long end; + bool fclr_write_fail_flag = false; + int ret = 0; + + if (hns_roce_func_clr_chk_rst(hr_dev)) + goto out; + + resp = (struct hns_roce_func_clear *)desc.data; + + hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR, false); + resp->rst_funcid_en = cpu_to_le32(vf_id); + + ret = hns_roce_cmq_send(hr_dev, &desc, 1); + if (ret) { + fclr_write_fail_flag = true; + dev_err(hr_dev->dev, "Func clear write failed(%d).\n", ret); + goto out; + } + + msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL); + end = HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS; + while (end) { + if (hns_roce_func_clr_chk_rst(hr_dev)) + goto out; + msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT); + end -= HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT; + + hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR, + true); + resp->rst_funcid_en = cpu_to_le32(vf_id); + + ret = hns_roce_cmq_send(hr_dev, &desc, 1); + if (ret) { + continue; + } + + if (roce_get_bit(resp->func_done, FUNC_CLEAR_RST_FUN_DONE_S)) { + if (vf_id == 0) + hr_dev->is_reset = true; + return; + } + } + +out: + (void)hns_roce_func_clr_rst_prc(hr_dev, ret, fclr_write_fail_flag); +} + +static void hns_roce_function_clear(struct hns_roce_dev *hr_dev) +{ + int vf_num = 0;/* should be (hr_dev->func_num-1) when enable ROCE VF */ + + /* Clear vf first, then clear pf */ + for (; vf_num >= 0; vf_num--) + hns_roce_clear_func(hr_dev, vf_num); +} + +static void hns_roce_clear_extdb_list_info(struct hns_roce_dev *hr_dev) +{ + struct hns_roce_cmq_desc desc; + int ret; + + hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLEAR_EXTDB_LIST_INFO, + false); + ret = hns_roce_cmq_send(hr_dev, &desc, 1); + if (ret) + dev_warn(hr_dev->dev, "clear extend doorbell memory failed, ret = %d.\n", + ret); +} + +static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev) +{ + struct hns_roce_query_fw_info *resp; + struct hns_roce_cmq_desc desc; + int ret; + + hns_roce_cmq_setup_basic_desc(&desc, HNS_QUERY_FW_VER, true); + ret = hns_roce_cmq_send(hr_dev, &desc, 1); + if (ret) { + dev_err(hr_dev->dev, "Query fw version failed(%d)!\n", ret); + return ret; + } + + resp = (struct hns_roce_query_fw_info *)desc.data; + hr_dev->caps.fw_ver = (u64)(le32_to_cpu(resp->fw_ver)); + + return 0; +} + +static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev) +{ + struct hns_roce_cfg_global_param *req; + struct hns_roce_cmq_desc desc; + u32 clock_cycles_of_1us; + + hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM, + false); + + req = (struct hns_roce_cfg_global_param *)desc.data; + memset(req, 0, sizeof(*req)); + + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B) + clock_cycles_of_1us = HNS_ROCE_1NS_CFG; + else + clock_cycles_of_1us = HNS_ROCE_1US_CFG; + + roce_set_field(req->time_cfg_udp_port, + CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M, + CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S, + clock_cycles_of_1us); + roce_set_field(req->time_cfg_udp_port, + CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M, + CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S, 0x12b7); + + return hns_roce_cmq_send(hr_dev, &desc, 1); +} + +static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev) +{ + struct hns_roce_cmq_desc desc[QUERY_PF_RES_CMDQ_DESC_NUM]; + struct hns_roce_pf_res_a *req_a; + struct hns_roce_pf_res_b *req_b; + int ret; + int i; + + for (i = 0; i < QUERY_PF_RES_CMDQ_DESC_NUM; i++) { + hns_roce_cmq_setup_basic_desc(&desc[i], + HNS_ROCE_OPC_QUERY_PF_RES, true); + + if (i == 0) + desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); + else + desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); + } + + ret = hns_roce_cmq_send(hr_dev, desc, QUERY_PF_RES_CMDQ_DESC_NUM); + if (ret) + return ret; + + req_a = (struct hns_roce_pf_res_a *)desc[0].data; + req_b = (struct hns_roce_pf_res_b *)desc[1].data; + + hr_dev->caps.qpc_bt_num = roce_get_field(req_a->qpc_bt_idx_num, + PF_RES_DATA_1_PF_QPC_BT_NUM_M, + PF_RES_DATA_1_PF_QPC_BT_NUM_S); + hr_dev->caps.srqc_bt_num = roce_get_field(req_a->srqc_bt_idx_num, + PF_RES_DATA_2_PF_SRQC_BT_NUM_M, + PF_RES_DATA_2_PF_SRQC_BT_NUM_S); + hr_dev->caps.cqc_bt_num = roce_get_field(req_a->cqc_bt_idx_num, + PF_RES_DATA_3_PF_CQC_BT_NUM_M, + PF_RES_DATA_3_PF_CQC_BT_NUM_S); + hr_dev->caps.mpt_bt_num = roce_get_field(req_a->mpt_bt_idx_num, + PF_RES_DATA_4_PF_MPT_BT_NUM_M, + PF_RES_DATA_4_PF_MPT_BT_NUM_S); + + hr_dev->caps.sl_num = roce_get_field(req_b->qid_idx_sl_num, + PF_RES_DATA_3_PF_SL_NUM_M, + PF_RES_DATA_3_PF_SL_NUM_S); + hr_dev->caps.scc_ctx_bt_num = roce_get_field(req_b->scc_ctx_bt_idx_num, + PF_RES_DATA_4_PF_SCC_CTX_BT_NUM_M, + PF_RES_DATA_4_PF_SCC_CTX_BT_NUM_S); + + return 0; +} + +static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev) +{ + struct hns_roce_cmq_desc desc[QUERY_PF_TIMER_RES_CMDQ_DESC_NUM]; + struct hns_roce_pf_timer_res_a *req_a; + int ret; + int i; + + for (i = 0; i < QUERY_PF_TIMER_RES_CMDQ_DESC_NUM; i++) { + hns_roce_cmq_setup_basic_desc(&desc[i], + HNS_ROCE_OPC_QUERY_PF_TIMER_RES, + true); + + if (i == 0) + desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); + else + desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); + } + + ret = hns_roce_cmq_send(hr_dev, desc, QUERY_PF_TIMER_RES_CMDQ_DESC_NUM); + if (ret) + return ret; + + req_a = (struct hns_roce_pf_timer_res_a *)desc[0].data; + + hr_dev->caps.qpc_timer_bt_num = + roce_get_field(req_a->qpc_timer_bt_idx_num, + PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M, + PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S); + hr_dev->caps.cqc_timer_bt_num = + roce_get_field(req_a->cqc_timer_bt_idx_num, + PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M, + PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S); + + return 0; +} + +static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev, + int vf_id) +{ + struct hns_roce_cmq_desc desc; + struct hns_roce_vf_switch *swt; + int ret; + + swt = (struct hns_roce_vf_switch *)desc.data; + hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true); + swt->rocee_sel |= cpu_to_le32(HNS_ICL_SWITCH_CMD_ROCEE_SEL); + roce_set_field(swt->fun_id, + VF_SWITCH_DATA_FUN_ID_VF_ID_M, + VF_SWITCH_DATA_FUN_ID_VF_ID_S, + vf_id); + ret = hns_roce_cmq_send(hr_dev, &desc, 1); + if (ret) + return ret; + desc.flag = + cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN); + desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR); + roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LPBK_S, 1); + roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S, 0); + roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S, 1); + + return hns_roce_cmq_send(hr_dev, &desc, 1); +} + +static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev) +{ + struct hns_roce_cmq_desc desc[ALLOC_VF_RES_CMDQ_DESC_NUM]; + struct hns_roce_vf_res_a *req_a; + struct hns_roce_vf_res_b *req_b; + int i; req_a = (struct hns_roce_vf_res_a *)desc[0].data; req_b = (struct hns_roce_vf_res_b *)desc[1].data; memset(req_a, 0, sizeof(*req_a)); memset(req_b, 0, sizeof(*req_b)); - for (i = 0; i < 2; i++) { + for (i = 0; i < ALLOC_VF_RES_CMDQ_DESC_NUM; i++) { hns_roce_cmq_setup_basic_desc(&desc[i], HNS_ROCE_OPC_ALLOC_VF_RES, false); @@ -1089,10 +1727,18 @@ static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev) VF_RES_B_DATA_3_VF_SL_NUM_M, VF_RES_B_DATA_3_VF_SL_NUM_S, HNS_ROCE_VF_SL_NUM); + + roce_set_field(req_b->vf_sccc_idx_num, + VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M, + VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S, 0); + roce_set_field(req_b->vf_sccc_idx_num, + VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M, + VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S, + HNS_ROCE_VF_SCCC_BT_NUM); } } - return hns_roce_cmq_send(hr_dev, desc, 2); + return hns_roce_cmq_send(hr_dev, desc, ALLOC_VF_RES_CMDQ_DESC_NUM); } static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev) @@ -1101,6 +1747,7 @@ static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev) u8 qpc_hop_num = hr_dev->caps.qpc_hop_num; u8 cqc_hop_num = hr_dev->caps.cqc_hop_num; u8 mpt_hop_num = hr_dev->caps.mpt_hop_num; + u8 scc_ctx_hop_num = hr_dev->caps.scc_ctx_hop_num; struct hns_roce_cfg_bt_attr *req; struct hns_roce_cmq_desc desc; @@ -1148,61 +1795,47 @@ static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev) CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S, mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num); + roce_set_field(req->vf_scc_ctx_cfg, + CFG_BT_ATTR_DATA_4_VF_SCC_CTX_BA_PGSZ_M, + CFG_BT_ATTR_DATA_4_VF_SCC_CTX_BA_PGSZ_S, + hr_dev->caps.scc_ctx_ba_pg_sz + PG_SHIFT_OFFSET); + roce_set_field(req->vf_scc_ctx_cfg, + CFG_BT_ATTR_DATA_4_VF_SCC_CTX_BUF_PGSZ_M, + CFG_BT_ATTR_DATA_4_VF_SCC_CTX_BUF_PGSZ_S, + hr_dev->caps.scc_ctx_buf_pg_sz + PG_SHIFT_OFFSET); + roce_set_field(req->vf_scc_ctx_cfg, + CFG_BT_ATTR_DATA_4_VF_SCC_CTX_HOPNUM_M, + CFG_BT_ATTR_DATA_4_VF_SCC_CTX_HOPNUM_S, + scc_ctx_hop_num == + HNS_ROCE_HOP_NUM_0 ? 0 : scc_ctx_hop_num); + return hns_roce_cmq_send(hr_dev, &desc, 1); } -static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) +static void set_default_caps(struct hns_roce_dev *hr_dev) { struct hns_roce_caps *caps = &hr_dev->caps; - int ret; - - ret = hns_roce_cmq_query_hw_info(hr_dev); - if (ret) { - dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n", - ret); - return ret; - } - - ret = hns_roce_config_global_param(hr_dev); - if (ret) { - dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n", - ret); - return ret; - } - - /* Get pf resource owned by every pf */ - ret = hns_roce_query_pf_resource(hr_dev); - if (ret) { - dev_err(hr_dev->dev, "Query pf resource fail, ret = %d.\n", - ret); - return ret; - } - - ret = hns_roce_alloc_vf_resource(hr_dev); - if (ret) { - dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n", - ret); - return ret; - } - - hr_dev->vendor_part_id = 0; - hr_dev->sys_image_guid = 0; caps->num_qps = HNS_ROCE_V2_MAX_QP_NUM; caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM; caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM; + caps->num_srqs = HNS_ROCE_V2_MAX_SRQ_NUM; + caps->min_cqes = HNS_ROCE_MIN_CQE_NUM; caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM; caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM; + caps->max_extend_sg = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM; caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM; caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE; caps->num_uars = HNS_ROCE_V2_UAR_NUM; caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM; caps->num_aeq_vectors = HNS_ROCE_V2_AEQE_VEC_NUM; caps->num_comp_vectors = HNS_ROCE_V2_COMP_VEC_NUM; - caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM; + caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM; caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM; caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS; caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS; + caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS; + caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS; caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM; caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA; caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA; @@ -1213,37 +1846,45 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ; caps->trrl_entry_sz = HNS_ROCE_V2_TRRL_ENTRY_SZ; caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ; + caps->srqc_entry_sz = HNS_ROCE_V2_SRQC_ENTRY_SZ; caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ; caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ; + caps->idx_entry_sz = HNS_ROCE_V2_IDX_ENTRY_SZ; caps->cq_entry_sz = HNS_ROCE_V2_CQE_ENTRY_SIZE; caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED; - caps->reserved_lkey = 0; - caps->reserved_pds = 0; caps->reserved_mrws = 1; - caps->reserved_uars = 0; - caps->reserved_cqs = 0; + caps->reserved_qps = HNS_ROCE_V2_RSV_QPS; caps->qpc_ba_pg_sz = 0; caps->qpc_buf_pg_sz = 0; caps->qpc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM; caps->srqc_ba_pg_sz = 0; caps->srqc_buf_pg_sz = 0; - caps->srqc_hop_num = HNS_ROCE_HOP_NUM_0; + caps->srqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM; caps->cqc_ba_pg_sz = 0; caps->cqc_buf_pg_sz = 0; caps->cqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM; caps->mpt_ba_pg_sz = 0; caps->mpt_buf_pg_sz = 0; caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM; - caps->pbl_ba_pg_sz = 0; + caps->pbl_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_16K; caps->pbl_buf_pg_sz = 0; caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM; - caps->mtt_ba_pg_sz = 0; + caps->mtt_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_256K; caps->mtt_buf_pg_sz = 0; caps->mtt_hop_num = HNS_ROCE_MTT_HOP_NUM; - caps->cqe_ba_pg_sz = 0; + caps->wqe_sq_hop_num = HNS_ROCE_SQWQE_HOP_NUM; + caps->wqe_sge_hop_num = HNS_ROCE_EXT_SGE_HOP_NUM; + caps->wqe_rq_hop_num = HNS_ROCE_RQWQE_HOP_NUM; + caps->cqe_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_256K; caps->cqe_buf_pg_sz = 0; caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM; + caps->srqwqe_ba_pg_sz = 0; + caps->srqwqe_buf_pg_sz = 0; + caps->srqwqe_hop_num = HNS_ROCE_SRQWQE_HOP_NUM; + caps->idx_ba_pg_sz = 0; + caps->idx_buf_pg_sz = 0; + caps->idx_hop_num = HNS_ROCE_IDX_HOP_NUM; caps->eqe_ba_pg_sz = 0; caps->eqe_buf_pg_sz = 0; caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM; @@ -1252,101 +1893,469 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR | HNS_ROCE_CAP_FLAG_ROCE_V1_V2 | - HNS_ROCE_CAP_FLAG_RQ_INLINE | HNS_ROCE_CAP_FLAG_RECORD_DB | HNS_ROCE_CAP_FLAG_SQ_RECORD_DB; + caps->pkey_table_len[0] = 1; - caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM; + caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM; caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM; caps->aeqe_depth = HNS_ROCE_V2_ASYNC_EQE_NUM; caps->local_ca_ack_delay = 0; caps->max_mtu = IB_MTU_4096; - ret = hns_roce_v2_set_bt(hr_dev); - if (ret) - dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n", - ret); - - return ret; + caps->max_srq_wrs = HNS_ROCE_V2_MAX_SRQ_WR; + caps->max_srq_sges = HNS_ROCE_V2_MAX_SRQ_SGE; + + if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP08_B) { + caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC | HNS_ROCE_CAP_FLAG_MW | + HNS_ROCE_CAP_FLAG_SRQ | HNS_ROCE_CAP_FLAG_FRMR | + HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL; + + caps->num_qpc_timer = HNS_ROCE_V2_MAX_QPC_TIMER_NUM; + caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ; + caps->qpc_timer_ba_pg_sz = 0; + caps->qpc_timer_buf_pg_sz = 0; + caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0; + caps->num_cqc_timer = HNS_ROCE_V2_MAX_CQC_TIMER_NUM; + caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ; + caps->cqc_timer_ba_pg_sz = 0; + caps->cqc_timer_buf_pg_sz = 0; + caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0; + + caps->scc_ctx_entry_sz = HNS_ROCE_V2_SCC_CTX_ENTRY_SZ; + caps->scc_ctx_ba_pg_sz = 0; + caps->scc_ctx_buf_pg_sz = 0; + caps->scc_ctx_hop_num = HNS_ROCE_SCC_CTX_HOP_NUM; + } } -static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev, - enum hns_roce_link_table_type type) +static void calc_pg_sz(int obj_num, int obj_size, int hop_num, int ctx_bt_num, + int *buf_page_size, int *bt_page_size, u32 hem_type) { - struct hns_roce_cmq_desc desc[2]; - struct hns_roce_cfg_llm_a *req_a = - (struct hns_roce_cfg_llm_a *)desc[0].data; - struct hns_roce_cfg_llm_b *req_b = - (struct hns_roce_cfg_llm_b *)desc[1].data; - struct hns_roce_v2_priv *priv = hr_dev->priv; - struct hns_roce_link_table *link_tbl; - struct hns_roce_link_table_entry *entry; - enum hns_roce_opcode_type opcode; - u32 page_num; - int i; - - switch (type) { - case TSQ_LINK_TABLE: - link_tbl = &priv->tsq; - opcode = HNS_ROCE_OPC_CFG_EXT_LLM; + u64 obj_per_chunk; + u64 bt_chunk_sz = 1 << PAGE_SHIFT; + u64 obj_chunk_sz = 1 << PAGE_SHIFT; + + *buf_page_size = 0; + *bt_page_size = 0; + + switch (hop_num) { + case 3: + obj_per_chunk = ctx_bt_num * (bt_chunk_sz / BA_BYTE_LEN) * + (bt_chunk_sz / BA_BYTE_LEN) * + (bt_chunk_sz / BA_BYTE_LEN) * + (obj_chunk_sz / obj_size); break; - case TPQ_LINK_TABLE: - link_tbl = &priv->tpq; - opcode = HNS_ROCE_OPC_CFG_TMOUT_LLM; + case 2: + obj_per_chunk = ctx_bt_num * (bt_chunk_sz / BA_BYTE_LEN) * + (bt_chunk_sz / BA_BYTE_LEN) * + (obj_chunk_sz / obj_size); + break; + case 1: + obj_per_chunk = ctx_bt_num * (bt_chunk_sz / BA_BYTE_LEN) * + (obj_chunk_sz / obj_size); + break; + case HNS_ROCE_HOP_NUM_0: + obj_per_chunk = ctx_bt_num * (obj_chunk_sz / obj_size); break; default: - return -EINVAL; + pr_err("Table %d not support hop_num = %d!\n", hem_type, + hop_num); + return; } - page_num = link_tbl->npages; - entry = link_tbl->table.buf; - memset(req_a, 0, sizeof(*req_a)); - memset(req_b, 0, sizeof(*req_b)); + if (hem_type >= HEM_TYPE_MTT) + *bt_page_size = ilog2((obj_num + obj_per_chunk - 1) / + obj_per_chunk); + else + *buf_page_size = ilog2((obj_num + obj_per_chunk - 1) / + obj_per_chunk); +} - for (i = 0; i < 2; i++) { - hns_roce_cmq_setup_basic_desc(&desc[i], opcode, false); +static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev) +{ + struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM]; + struct hns_roce_caps *caps = &hr_dev->caps; + struct hns_roce_query_pf_caps_a *resp_a; + struct hns_roce_query_pf_caps_b *resp_b; + struct hns_roce_query_pf_caps_c *resp_c; + struct hns_roce_query_pf_caps_d *resp_d; + struct hns_roce_query_pf_caps_e *resp_e; + int ctx_hop_num; + int pbl_hop_num; + int ret; + int i; - if (i == 0) + for (i = 0; i < HNS_ROCE_QUERY_PF_CAPS_CMD_NUM; i++) { + hns_roce_cmq_setup_basic_desc(&desc[i], + HNS_ROCE_OPC_QUERY_PF_CAPS_NUM, + true); + if (i < (HNS_ROCE_QUERY_PF_CAPS_CMD_NUM - 1)) desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); else desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); - - if (i == 0) { - req_a->base_addr_l = link_tbl->table.map & 0xffffffff; - req_a->base_addr_h = (link_tbl->table.map >> 32) & - 0xffffffff; - roce_set_field(req_a->depth_pgsz_init_en, - CFG_LLM_QUE_DEPTH_M, - CFG_LLM_QUE_DEPTH_S, - link_tbl->npages); - roce_set_field(req_a->depth_pgsz_init_en, - CFG_LLM_QUE_PGSZ_M, - CFG_LLM_QUE_PGSZ_S, - link_tbl->pg_sz); - req_a->head_ba_l = entry[0].blk_ba0; - req_a->head_ba_h_nxtptr = entry[0].blk_ba1_nxt_ptr; - roce_set_field(req_a->head_ptr, - CFG_LLM_HEAD_PTR_M, - CFG_LLM_HEAD_PTR_S, 0); - } else { - req_b->tail_ba_l = entry[page_num - 1].blk_ba0; - roce_set_field(req_b->tail_ba_h, - CFG_LLM_TAIL_BA_H_M, - CFG_LLM_TAIL_BA_H_S, - entry[page_num - 1].blk_ba1_nxt_ptr & - HNS_ROCE_LINK_TABLE_BA1_M); - roce_set_field(req_b->tail_ptr, - CFG_LLM_TAIL_PTR_M, - CFG_LLM_TAIL_PTR_S, - (entry[page_num - 2].blk_ba1_nxt_ptr & - HNS_ROCE_LINK_TABLE_NXT_PTR_M) >> - HNS_ROCE_LINK_TABLE_NXT_PTR_S); - } } - roce_set_field(req_a->depth_pgsz_init_en, - CFG_LLM_INIT_EN_M, CFG_LLM_INIT_EN_S, 1); - return hns_roce_cmq_send(hr_dev, desc, 2); + ret = hns_roce_cmq_send(hr_dev, desc, HNS_ROCE_QUERY_PF_CAPS_CMD_NUM); + if (ret) + return ret; + + resp_a = (struct hns_roce_query_pf_caps_a *)desc[0].data; + resp_b = (struct hns_roce_query_pf_caps_b *)desc[1].data; + resp_c = (struct hns_roce_query_pf_caps_c *)desc[2].data; + resp_d = (struct hns_roce_query_pf_caps_d *)desc[3].data; + resp_e = (struct hns_roce_query_pf_caps_e *)desc[4].data; + + caps->local_ca_ack_delay = resp_a->local_ca_ack_delay; + caps->max_sq_sg = le16_to_cpu(resp_a->max_sq_sg); + caps->max_sq_inline = le16_to_cpu(resp_a->max_sq_inline); + caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg); + caps->max_rq_sg = roundup_pow_of_two(caps->max_rq_sg); + caps->max_extend_sg = le32_to_cpu(resp_a->max_extend_sg); + caps->num_qpc_timer = le16_to_cpu(resp_a->num_qpc_timer); + caps->num_cqc_timer = le16_to_cpu(resp_a->num_cqc_timer); + caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges); + caps->max_srq_sges = roundup_pow_of_two(caps->max_srq_sges); + caps->num_aeq_vectors = resp_a->num_aeq_vectors; + caps->num_other_vectors = resp_a->num_other_vectors; + caps->max_sq_desc_sz = resp_a->max_sq_desc_sz; + caps->max_rq_desc_sz = resp_a->max_rq_desc_sz; + caps->max_srq_desc_sz = resp_a->max_srq_desc_sz; + caps->cq_entry_sz = resp_a->cq_entry_sz; + + caps->mtpt_entry_sz = resp_b->mtpt_entry_sz; + caps->irrl_entry_sz = resp_b->irrl_entry_sz; + caps->trrl_entry_sz = resp_b->trrl_entry_sz; + caps->cqc_entry_sz = resp_b->cqc_entry_sz; + caps->srqc_entry_sz = resp_b->srqc_entry_sz; + caps->idx_entry_sz = resp_b->idx_entry_sz; + caps->scc_ctx_entry_sz = resp_b->scc_ctx_entry_sz; + caps->max_mtu = resp_b->max_mtu; + caps->qpc_entry_sz = le16_to_cpu(resp_b->qpc_entry_sz); + caps->min_cqes = resp_b->min_cqes; + caps->min_wqes = resp_b->min_wqes; + caps->page_size_cap = le32_to_cpu(resp_b->page_size_cap); + caps->pkey_table_len[0] = resp_b->pkey_table_len; + caps->phy_num_uars = resp_b->phy_num_uars; + ctx_hop_num = resp_b->ctx_hop_num; + pbl_hop_num = resp_b->pbl_hop_num; + + caps->num_pds = 1 << roce_get_field(resp_c->cap_flags_num_pds, + V2_QUERY_PF_CAPS_C_NUM_PDS_M, + V2_QUERY_PF_CAPS_C_NUM_PDS_S); + caps->flags = roce_get_field(resp_c->cap_flags_num_pds, + V2_QUERY_PF_CAPS_C_CAP_FLAGS_M, + V2_QUERY_PF_CAPS_C_CAP_FLAGS_S); + caps->num_cqs = 1 << roce_get_field(resp_c->max_gid_num_cqs, + V2_QUERY_PF_CAPS_C_NUM_CQS_M, + V2_QUERY_PF_CAPS_C_NUM_CQS_S); + caps->gid_table_len[0] = roce_get_field(resp_c->max_gid_num_cqs, + V2_QUERY_PF_CAPS_C_MAX_GID_M, + V2_QUERY_PF_CAPS_C_MAX_GID_S); + caps->max_cqes = 1 << roce_get_field(resp_c->cq_depth, + V2_QUERY_PF_CAPS_C_CQ_DEPTH_M, + V2_QUERY_PF_CAPS_C_CQ_DEPTH_S); + caps->num_mtpts = 1 << roce_get_field(resp_c->num_mrws, + V2_QUERY_PF_CAPS_C_NUM_MRWS_M, + V2_QUERY_PF_CAPS_C_NUM_MRWS_S); + caps->num_qps = 1 << roce_get_field(resp_c->ord_num_qps, + V2_QUERY_PF_CAPS_C_NUM_QPS_M, + V2_QUERY_PF_CAPS_C_NUM_QPS_S); + caps->max_qp_init_rdma = roce_get_field(resp_c->ord_num_qps, + V2_QUERY_PF_CAPS_C_MAX_ORD_M, + V2_QUERY_PF_CAPS_C_MAX_ORD_S); + caps->max_qp_dest_rdma = caps->max_qp_init_rdma; + caps->max_wqes = 1 << le16_to_cpu(resp_c->sq_depth); + caps->num_srqs = 1 << roce_get_field(resp_d->wq_hop_num_max_srqs, + V2_QUERY_PF_CAPS_D_NUM_SRQS_M, + V2_QUERY_PF_CAPS_D_NUM_SRQS_S); + caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth); + caps->ceqe_depth = 1 << roce_get_field(resp_d->num_ceqs_ceq_depth, + V2_QUERY_PF_CAPS_D_CEQ_DEPTH_M, + V2_QUERY_PF_CAPS_D_CEQ_DEPTH_S); + caps->num_comp_vectors = roce_get_field(resp_d->num_ceqs_ceq_depth, + V2_QUERY_PF_CAPS_D_NUM_CEQS_M, + V2_QUERY_PF_CAPS_D_NUM_CEQS_S); + caps->aeqe_depth = 1 << roce_get_field(resp_d->arm_st_aeq_depth, + V2_QUERY_PF_CAPS_D_AEQ_DEPTH_M, + V2_QUERY_PF_CAPS_D_AEQ_DEPTH_S); + caps->default_aeq_arm_st = roce_get_field(resp_d->arm_st_aeq_depth, + V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_M, + V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_S); + caps->default_ceq_arm_st = roce_get_field(resp_d->arm_st_aeq_depth, + V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_M, + V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_S); + caps->reserved_pds = roce_get_field(resp_d->num_uars_rsv_pds, + V2_QUERY_PF_CAPS_D_RSV_PDS_M, + V2_QUERY_PF_CAPS_D_RSV_PDS_S); + caps->num_uars = 1 << roce_get_field(resp_d->num_uars_rsv_pds, + V2_QUERY_PF_CAPS_D_NUM_UARS_M, + V2_QUERY_PF_CAPS_D_NUM_UARS_S); + caps->reserved_qps = roce_get_field(resp_d->rsv_uars_rsv_qps, + V2_QUERY_PF_CAPS_D_RSV_QPS_M, + V2_QUERY_PF_CAPS_D_RSV_QPS_S); + caps->reserved_uars = roce_get_field(resp_d->rsv_uars_rsv_qps, + V2_QUERY_PF_CAPS_D_RSV_UARS_M, + V2_QUERY_PF_CAPS_D_RSV_UARS_S); + caps->reserved_mrws = roce_get_field(resp_e->chunk_size_shift_rsv_mrws, + V2_QUERY_PF_CAPS_E_RSV_MRWS_M, + V2_QUERY_PF_CAPS_E_RSV_MRWS_S); + caps->chunk_sz = 1 << roce_get_field(resp_e->chunk_size_shift_rsv_mrws, + V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_M, + V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_S); + caps->reserved_cqs = roce_get_field(resp_e->rsv_cqs, + V2_QUERY_PF_CAPS_E_RSV_CQS_M, + V2_QUERY_PF_CAPS_E_RSV_CQS_S); + caps->reserved_srqs = roce_get_field(resp_e->rsv_srqs, + V2_QUERY_PF_CAPS_E_RSV_SRQS_M, + V2_QUERY_PF_CAPS_E_RSV_SRQS_S); + caps->reserved_lkey = roce_get_field(resp_e->rsv_lkey, + V2_QUERY_PF_CAPS_E_RSV_LKEYS_M, + V2_QUERY_PF_CAPS_E_RSV_LKEYS_S); + caps->default_ceq_max_cnt = le16_to_cpu(resp_e->ceq_max_cnt); + caps->default_ceq_period = le16_to_cpu(resp_e->ceq_period); + caps->default_aeq_max_cnt = le16_to_cpu(resp_e->aeq_max_cnt); + caps->default_aeq_period = le16_to_cpu(resp_e->aeq_period); + + caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ; + caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ; + caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ; + caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS; + caps->mtt_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_256K; + caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS; + caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS; + caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS; + + caps->qpc_hop_num = ctx_hop_num; + caps->srqc_hop_num = ctx_hop_num; + caps->cqc_hop_num = ctx_hop_num; + caps->mpt_hop_num = ctx_hop_num; + caps->mtt_hop_num = pbl_hop_num; + caps->cqe_hop_num = pbl_hop_num; + caps->srqwqe_hop_num = pbl_hop_num; + caps->idx_hop_num = pbl_hop_num; + caps->wqe_sq_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs, + V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_M, + V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_S); + caps->wqe_sge_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs, + V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_M, + V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_S); + caps->wqe_rq_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs, + V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_M, + V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_S); + + calc_pg_sz(caps->num_qps, caps->qpc_entry_sz, caps->qpc_hop_num, + caps->qpc_bt_num, &caps->qpc_buf_pg_sz, &caps->qpc_ba_pg_sz, + HEM_TYPE_QPC); + calc_pg_sz(caps->num_mtpts, caps->mtpt_entry_sz, caps->mpt_hop_num, + caps->mpt_bt_num, &caps->mpt_buf_pg_sz, &caps->mpt_ba_pg_sz, + HEM_TYPE_MTPT); + calc_pg_sz(caps->num_cqs, caps->cqc_entry_sz, caps->cqc_hop_num, + caps->cqc_bt_num, &caps->cqc_buf_pg_sz, &caps->cqc_ba_pg_sz, + HEM_TYPE_CQC); + calc_pg_sz(caps->num_srqs, caps->srqc_entry_sz, caps->srqc_hop_num, + caps->srqc_bt_num, &caps->srqc_buf_pg_sz, + &caps->srqc_ba_pg_sz, HEM_TYPE_SRQC); + + if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP08_B) { + caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0; + caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0; + caps->scc_ctx_hop_num = ctx_hop_num; + + calc_pg_sz(caps->num_qps, caps->scc_ctx_entry_sz, + caps->scc_ctx_hop_num, caps->scc_ctx_bt_num, + &caps->scc_ctx_buf_pg_sz, + &caps->scc_ctx_ba_pg_sz, + HEM_TYPE_SCC_CTX); + } + + calc_pg_sz(caps->num_cqe_segs, caps->mtt_entry_sz, caps->cqe_hop_num, + 1, &caps->cqe_buf_pg_sz, &caps->cqe_ba_pg_sz, HEM_TYPE_CQE); + calc_pg_sz(caps->num_srqwqe_segs, caps->mtt_entry_sz, + caps->srqwqe_hop_num, 1, &caps->srqwqe_buf_pg_sz, + &caps->srqwqe_ba_pg_sz, HEM_TYPE_SRQWQE); + calc_pg_sz(caps->num_idx_segs, caps->idx_entry_sz, caps->idx_hop_num, + 1, &caps->idx_buf_pg_sz, &caps->idx_ba_pg_sz, HEM_TYPE_IDX); + + return 0; +} + +static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) +{ + struct hns_roce_caps *caps = &hr_dev->caps; + int ret; + + ret = hns_roce_cmq_query_hw_info(hr_dev); + if (ret) { + dev_err(hr_dev->dev, "Query hardware version failed(%d).\n", + ret); + return ret; + } + + ret = hns_roce_query_fw_ver(hr_dev); + if (ret) { + dev_err(hr_dev->dev, "Query firmware version failed(%d).\n", + ret); + return ret; + } + + ret = hns_roce_config_global_param(hr_dev); + if (ret) { + dev_err(hr_dev->dev, "Configure global param failed(%d).\n", + ret); + return ret; + } + + /* Get pf resource owned by every pf */ + ret = hns_roce_query_pf_resource(hr_dev); + if (ret) { + dev_err(hr_dev->dev, "Query pf resource failed(%d).\n", ret); + return ret; + } + + ret = hns_roce_query_func_info(hr_dev); + if (ret) + return ret; + + if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP08_B) { + ret = hns_roce_query_pf_timer_resource(hr_dev); + if (ret) { + dev_err(hr_dev->dev, + "Query pf timer resource failed(%d).\n", ret); + return ret; + } + } + + ret = hns_roce_alloc_vf_resource(hr_dev); + if (ret) { + dev_err(hr_dev->dev, "Allocate vf resource failed(%d).\n", + ret); + return ret; + } + + if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP08_B) { + ret = hns_roce_set_vf_switch_param(hr_dev, 0); + if (ret) { + dev_err(hr_dev->dev, + "Set function switch param failed(%d).\n", + ret); + return ret; + } + } + + hr_dev->vendor_part_id = hr_dev->pci_dev->device; + hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid); + + caps->num_xrcds = HNS_ROCE_V2_MAX_XRCD_NUM; + caps->reserved_xrcds = 0; + caps->eqe_ba_pg_sz = 0; + caps->eqe_buf_pg_sz = 0; + caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM; + caps->tsq_buf_pg_sz = 0; + + caps->tpq_buf_pg_sz = 0; + caps->pbl_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_16K; + caps->pbl_buf_pg_sz = 0; + caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM; + + ret = hns_roce_query_pf_caps(hr_dev); + if (ret) { + set_default_caps(hr_dev); + ret = 0; + } + + ret = hns_roce_v2_set_bt(hr_dev); + if (ret) + dev_err(hr_dev->dev, "Configure bt attribute failed(%d).\n", + ret); + + return ret; +} + +static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev, + enum hns_roce_link_table_type type) +{ + struct hns_roce_cmq_desc desc[CONFIG_LLM_CMDQ_DESC_NUM]; + struct hns_roce_cfg_llm_a *req_a = + (struct hns_roce_cfg_llm_a *)desc[0].data; + struct hns_roce_cfg_llm_b *req_b = + (struct hns_roce_cfg_llm_b *)desc[1].data; + struct hns_roce_v2_priv *priv = hr_dev->priv; + struct hns_roce_link_table *link_tbl; + struct hns_roce_link_table_entry *entry; + enum hns_roce_opcode_type opcode; + u32 page_num; + int i; + + switch (type) { + case TSQ_LINK_TABLE: + link_tbl = &priv->tsq; + opcode = HNS_ROCE_OPC_CFG_EXT_LLM; + break; + case TPQ_LINK_TABLE: + link_tbl = &priv->tpq; + opcode = HNS_ROCE_OPC_CFG_TMOUT_LLM; + break; + default: + dev_err(hr_dev->dev, "Not supported link table type: 0x%x!\n", + type); + return -EINVAL; + } + + page_num = link_tbl->npages; + entry = link_tbl->table.buf; + memset(req_a, 0, sizeof(*req_a)); + memset(req_b, 0, sizeof(*req_b)); + + for (i = 0; i < CONFIG_LLM_CMDQ_DESC_NUM; i++) { + hns_roce_cmq_setup_basic_desc(&desc[i], opcode, false); + + if (i == 0) + desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); + else + desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); + + if (i == 0) { + req_a->base_addr_l = cpu_to_le32(link_tbl->table.map & + 0xffffffff); + req_a->base_addr_h = cpu_to_le32((link_tbl->table.map >> + 32) & + 0xffffffff); + roce_set_field(req_a->depth_pgsz_init_en, + CFG_LLM_QUE_DEPTH_M, + CFG_LLM_QUE_DEPTH_S, + link_tbl->npages); + roce_set_field(req_a->depth_pgsz_init_en, + CFG_LLM_QUE_PGSZ_M, + CFG_LLM_QUE_PGSZ_S, + link_tbl->pg_sz); + req_a->head_ba_l = entry[0].blk_ba0; + req_a->head_ba_h_nxtptr = entry[0].blk_ba1_nxt_ptr; + roce_set_field(req_a->head_ptr, + CFG_LLM_HEAD_PTR_M, + CFG_LLM_HEAD_PTR_S, 0); + } else { + req_b->tail_ba_l = entry[page_num - 1].blk_ba0; + roce_set_field(req_b->tail_ba_h, + CFG_LLM_TAIL_BA_H_M, + CFG_LLM_TAIL_BA_H_S, + le32_to_cpu( + entry[page_num - 1].blk_ba1_nxt_ptr) & + HNS_ROCE_LINK_TABLE_BA1_M); + /* (page_num - 2) indicates to the second last page */ + roce_set_field(req_b->tail_ptr, + CFG_LLM_TAIL_PTR_M, + CFG_LLM_TAIL_PTR_S, + (le32_to_cpu( + entry[page_num - 2].blk_ba1_nxt_ptr) & + HNS_ROCE_LINK_TABLE_NXT_PTR_M) >> + HNS_ROCE_LINK_TABLE_NXT_PTR_S); + } + } + roce_set_field(req_a->depth_pgsz_init_en, + CFG_LLM_INIT_EN_M, CFG_LLM_INIT_EN_S, 1); + + return hns_roce_cmq_send(hr_dev, desc, CONFIG_LLM_CMDQ_DESC_NUM); } static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev, @@ -1369,16 +2378,23 @@ static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev, case TSQ_LINK_TABLE: link_tbl = &priv->tsq; buf_chk_sz = 1 << (hr_dev->caps.tsq_buf_pg_sz + PAGE_SHIFT); - pg_num_a = hr_dev->caps.num_qps * 8 / buf_chk_sz; + pg_num_a = hr_dev->caps.num_qps * QP_EX_DB_SIZE / buf_chk_sz; + /* + * every transport service queue(tsq) need 2 page and reserved 1 + * page, it includes tx queue and rx queue. + */ pg_num_b = hr_dev->caps.sl_num * 4 + 2; break; case TPQ_LINK_TABLE: link_tbl = &priv->tpq; buf_chk_sz = 1 << (hr_dev->caps.tpq_buf_pg_sz + PAGE_SHIFT); - pg_num_a = hr_dev->caps.num_cqs * 4 / buf_chk_sz; - pg_num_b = 2 * 4 * func_num + 2; + pg_num_a = hr_dev->caps.num_cqs * CQ_EX_DB_SIZE / buf_chk_sz; + /* every function need 2 page and reserved 2 page */ + pg_num_b = 2 * TIMEOUT_POLL_QUEUE_NUM * func_num + 2; break; default: + dev_err(hr_dev->dev, "Not supported link table type: 0x%x\n", + type); return -EINVAL; } @@ -1391,6 +2407,8 @@ static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev, if (!link_tbl->table.buf) goto out; + memset(link_tbl->table.buf, 0, size); + link_tbl->pg_list = kcalloc(pg_num, sizeof(*link_tbl->pg_list), GFP_KERNEL); if (!link_tbl->pg_list) @@ -1406,7 +2424,7 @@ static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev, link_tbl->pg_list[i].map = t; memset(link_tbl->pg_list[i].buf, 0, buf_chk_sz); - entry[i].blk_ba0 = (t >> 12) & 0xffffffff; + entry[i].blk_ba0 = cpu_to_le32((t >> 12) & 0xffffffff); roce_set_field(entry[i].blk_ba1_nxt_ptr, HNS_ROCE_LINK_TABLE_BA1_M, HNS_ROCE_LINK_TABLE_BA1_S, @@ -1458,16 +2476,35 @@ static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev, link_tbl->table.map); } +static int hns_roce_v2_get_reset_page(struct hns_roce_dev *hr_dev) +{ + hr_dev->reset_page = (void *)get_zeroed_page(GFP_KERNEL); + if (!hr_dev->reset_page) + return -ENOMEM; + + return 0; +} + static int hns_roce_v2_init(struct hns_roce_dev *hr_dev) { struct hns_roce_v2_priv *priv = hr_dev->priv; + int qpc_count; + int cqc_count; int ret; + int i; + + ret = hns_roce_v2_get_reset_page(hr_dev); + if (ret) { + dev_err(hr_dev->dev, + "reset state init failed, ret = %d.\n", ret); + return ret; + } /* TSQ includes SQ doorbell and ack doorbell */ ret = hns_roce_init_link_table(hr_dev, TSQ_LINK_TABLE); if (ret) { dev_err(hr_dev->dev, "TSQ init failed, ret = %d.\n", ret); - return ret; + goto err_tsq_init_failed; } ret = hns_roce_init_link_table(hr_dev, TPQ_LINK_TABLE); @@ -1476,11 +2513,48 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev) goto err_tpq_init_failed; } + /* Alloc memory for QPC Timer buffer space chunk */ + for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num; + qpc_count++) { + ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table.table, + qpc_count); + if (ret) { + dev_err(hr_dev->dev, "QPC Timer get failed\n"); + goto err_qpc_timer_failed; + } + } + + /* Alloc memory for CQC Timer buffer space chunk */ + for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num; + cqc_count++) { + ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table.table, + cqc_count); + if (ret) { + dev_err(hr_dev->dev, "CQC Timer get failed\n"); + goto err_cqc_timer_failed; + } + } + if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP08_B) + hns_roce_clear_extdb_list_info(hr_dev); + return 0; +err_cqc_timer_failed: + for (i = 0; i < cqc_count; i++) + hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table.table, i); + +err_qpc_timer_failed: + for (i = 0; i < qpc_count; i++) + hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table.table, i); + + hns_roce_free_link_table(hr_dev, &priv->tpq); + err_tpq_init_failed: hns_roce_free_link_table(hr_dev, &priv->tsq); +err_tsq_init_failed: + free_page((unsigned long)hr_dev->reset_page); + return ret; } @@ -1488,34 +2562,71 @@ static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev) { struct hns_roce_v2_priv *priv = hr_dev->priv; + if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP08_B) + hns_roce_function_clear(hr_dev); + hns_roce_free_link_table(hr_dev, &priv->tpq); hns_roce_free_link_table(hr_dev, &priv->tsq); + free_page((unsigned long)hr_dev->reset_page); +} + +static int hns_roce_query_mbox_status(struct hns_roce_dev *hr_dev) +{ + struct hns_roce_cmq_desc desc; + struct hns_roce_mbox_status *mb_st = + (struct hns_roce_mbox_status *)desc.data; + enum hns_roce_cmd_return_status status; + + hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST, true); + + status = hns_roce_cmq_send(hr_dev, &desc, 1); + if (status) + return status; + + return le32_to_cpu(mb_st->mb_status_hw_run); } static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev) { - u32 status = readl(hr_dev->reg_base + ROCEE_VF_MB_STATUS_REG); + u32 status = hns_roce_query_mbox_status(hr_dev); return status >> HNS_ROCE_HW_RUN_BIT_SHIFT; } static int hns_roce_v2_cmd_complete(struct hns_roce_dev *hr_dev) { - u32 status = readl(hr_dev->reg_base + ROCEE_VF_MB_STATUS_REG); + u32 status = hns_roce_query_mbox_status(hr_dev); return status & HNS_ROCE_HW_MB_STATUS_MASK; } +static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param, + u64 out_param, u32 in_modifier, u8 op_modifier, + u16 op, u16 token, int event) +{ + struct hns_roce_cmq_desc desc; + struct hns_roce_post_mbox *mb = (struct hns_roce_post_mbox *)desc.data; + + hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false); + + mb->in_param_l = cpu_to_le32(in_param); + mb->in_param_h = cpu_to_le32(in_param >> 32); + mb->out_param_l = cpu_to_le32(out_param); + mb->out_param_h = cpu_to_le32(out_param >> 32); + mb->cmd_tag = cpu_to_le32(in_modifier << HNS_ROCE_MB_TAG_S | op); + mb->token_event_en = cpu_to_le32(token); + mb->token_event_en |= cpu_to_le32(event << HNS_ROCE_MB_EVENT_EN_S); + + return hns_roce_cmq_send(hr_dev, &desc, 1); +} + static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param, u32 in_modifier, u8 op_modifier, u16 op, u16 token, int event) { struct device *dev = hr_dev->dev; - u32 __iomem *hcr = (u32 __iomem *)(hr_dev->reg_base + - ROCEE_VF_MB_CFG0_REG); unsigned long end; - u32 val0 = 0; - u32 val1 = 0; + int ret; end = msecs_to_jiffies(HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS) + jiffies; while (hns_roce_v2_cmd_pending(hr_dev)) { @@ -1527,34 +2638,19 @@ static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param, cond_resched(); } - roce_set_field(val0, HNS_ROCE_VF_MB4_TAG_MASK, - HNS_ROCE_VF_MB4_TAG_SHIFT, in_modifier); - roce_set_field(val0, HNS_ROCE_VF_MB4_CMD_MASK, - HNS_ROCE_VF_MB4_CMD_SHIFT, op); - roce_set_field(val1, HNS_ROCE_VF_MB5_EVENT_MASK, - HNS_ROCE_VF_MB5_EVENT_SHIFT, event); - roce_set_field(val1, HNS_ROCE_VF_MB5_TOKEN_MASK, - HNS_ROCE_VF_MB5_TOKEN_SHIFT, token); - - writeq(in_param, hcr + 0); - writeq(out_param, hcr + 2); - - /* Memory barrier */ - wmb(); - - writel(val0, hcr + 4); - writel(val1, hcr + 5); - - mmiowb(); + ret = hns_roce_mbox_post(hr_dev, in_param, out_param, in_modifier, + op_modifier, op, token, event); + if (ret) + dev_err(dev, "Post mailbox fail(%d)\n", ret); - return 0; + return ret; } static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev, unsigned long timeout) { struct device *dev = hr_dev->dev; - unsigned long end = 0; + unsigned long end; u32 status; end = msecs_to_jiffies(timeout) + jiffies; @@ -1567,7 +2663,10 @@ static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev, } status = hns_roce_v2_cmd_complete(hr_dev); - if (status != 0x1) { + if (unlikely(status != HNS_ROCE_CMD_SUCCESS)) { + if (status == CMD_RST_PRC_EBUSY) + return status; + dev_err(dev, "mailbox status 0x%x!\n", status); return -EBUSY; } @@ -1602,17 +2701,23 @@ static int hns_roce_config_sgid_table(struct hns_roce_dev *hr_dev, p = (u32 *)&gid->raw[8]; sgid_tb->vf_sgid_mh = cpu_to_le32(*p); - p = (u32 *)&gid->raw[0xc]; + p = (u32 *)&gid->raw[12]; sgid_tb->vf_sgid_h = cpu_to_le32(*p); return hns_roce_cmq_send(hr_dev, &desc, 1); } +#ifdef CONFIG_KERNEL_419 static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port, int gid_index, const union ib_gid *gid, const struct ib_gid_attr *attr) +#else +static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port, + int gid_index, union ib_gid *gid, + const struct ib_gid_attr *attr) +#endif { - enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1; + enum hns_roce_sgid_type sgid_type; int ret; if (!gid || !attr) @@ -1630,7 +2735,8 @@ static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port, ret = hns_roce_config_sgid_table(hr_dev, gid_index, gid, sgid_type); if (ret) - dev_err(hr_dev->dev, "Configure sgid table failed(%d)!\n", ret); + dev_err(hr_dev->dev, "Configure sgid table failed(%d), gid index is %d, sgid type is %d!\n", + ret, gid_index, sgid_type); return ret; } @@ -1656,21 +2762,75 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, roce_set_field(smac_tb->vf_smac_h_rsv, CFG_SMAC_TB_VF_SMAC_H_M, CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h); - smac_tb->vf_smac_l = reg_smac_l; + smac_tb->vf_smac_l = cpu_to_le32(reg_smac_l); return hns_roce_cmq_send(hr_dev, &desc, 1); } -static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, - unsigned long mtpt_idx) +static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry, + struct hns_roce_mr *mr) { - struct hns_roce_v2_mpt_entry *mpt_entry; struct scatterlist *sg; u64 page_addr; u64 *pages; - int i, j; - int len; int entry; + int len; + int i; + int j; + + mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size); + mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3)); + roce_set_field(mpt_entry->byte_48_mode_ba, + V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S, + upper_32_bits(mr->pbl_ba >> 3)); + + pages = (u64 *)__get_free_page(GFP_KERNEL); + if (!pages) + return -ENOMEM; + + i = 0; + for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) { + len = sg_dma_len(sg) >> mr->umem->page_shift; + for (j = 0; j < len; ++j) { + page_addr = sg_dma_address(sg) + + (j << mr->umem->page_shift); + pages[i] = page_addr >> 6; + /* Record the first 2 entry directly to MTPT table */ + if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1) + goto found; + i++; + } + } +found: + mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0])); + roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M, + V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0])); + + mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1])); + roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M, + V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1])); + + if (mr->type != MR_TYPE_UMM) + roce_set_field(mpt_entry->byte_64_buf_pa1, + V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M, + V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S, + mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET); + else + roce_set_field(mpt_entry->byte_64_buf_pa1, + V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M, + V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S, + mr->pbl_buf_pg_sz + mr->umem->page_shift - 12); + + free_page((unsigned long)pages); + + return 0; +} + +static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, + unsigned long mtpt_idx) +{ + struct hns_roce_v2_mpt_entry *mpt_entry; + int ret; mpt_entry = mb_buf; memset(mpt_entry, 0, sizeof(*mpt_entry)); @@ -1686,27 +2846,25 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET); roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M, V2_MPT_BYTE_4_PD_S, mr->pd); - mpt_entry->byte_4_pd_hop_st = cpu_to_le32(mpt_entry->byte_4_pd_hop_st); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0); - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1); - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 0); + roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 0); + roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S, (mr->access & IB_ACCESS_MW_BIND ? 1 : 0)); - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S, 0); + roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S, + mr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S, (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0)); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S, (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0)); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S, (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0)); - mpt_entry->byte_8_mw_cnt_en = cpu_to_le32(mpt_entry->byte_8_mw_cnt_en); roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, - mr->type == MR_TYPE_MR ? 0 : 1); + (mr->type == MR_TYPE_MR || mr->type == MR_TYPE_UMM) ? 0 : 1); roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S, 1); - mpt_entry->byte_12_mw_pa = cpu_to_le32(mpt_entry->byte_12_mw_pa); mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size)); mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size)); @@ -1717,53 +2875,9 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, if (mr->type == MR_TYPE_DMA) return 0; - mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size); + ret = set_mtpt_pbl(mpt_entry, mr); - mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3)); - roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M, - V2_MPT_BYTE_48_PBL_BA_H_S, - upper_32_bits(mr->pbl_ba >> 3)); - mpt_entry->byte_48_mode_ba = cpu_to_le32(mpt_entry->byte_48_mode_ba); - - pages = (u64 *)__get_free_page(GFP_KERNEL); - if (!pages) - return -ENOMEM; - - i = 0; - for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) { - len = sg_dma_len(sg) >> PAGE_SHIFT; - for (j = 0; j < len; ++j) { - page_addr = sg_dma_address(sg) + - (j << mr->umem->page_shift); - pages[i] = page_addr >> 6; - - /* Record the first 2 entry directly to MTPT table */ - if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1) - goto found; - i++; - } - } - -found: - mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0])); - roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M, - V2_MPT_BYTE_56_PA0_H_S, - upper_32_bits(pages[0])); - mpt_entry->byte_56_pa0_h = cpu_to_le32(mpt_entry->byte_56_pa0_h); - - mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1])); - roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M, - V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1])); - - free_page((unsigned long)pages); - - roce_set_field(mpt_entry->byte_64_buf_pa1, - V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M, - V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S, - mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET); - mpt_entry->byte_64_buf_pa1 = cpu_to_le32(mpt_entry->byte_64_buf_pa1); - - return 0; + return ret; } static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev, @@ -1772,6 +2886,10 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev, u64 size, void *mb_buf) { struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf; + int ret = 0; + + roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M, + V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID); if (flags & IB_MR_REREG_PD) { roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M, @@ -1784,14 +2902,14 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev, V2_MPT_BYTE_8_BIND_EN_S, (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0)); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, - V2_MPT_BYTE_8_ATOMIC_EN_S, - (mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0)); + V2_MPT_BYTE_8_ATOMIC_EN_S, + mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S, - (mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0)); + mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S, - (mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0)); + mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S, - (mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0)); + mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0); } if (flags & IB_MR_REREG_TRANS) { @@ -1800,26 +2918,99 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev, mpt_entry->len_l = cpu_to_le32(lower_32_bits(size)); mpt_entry->len_h = cpu_to_le32(upper_32_bits(size)); - mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size); - mpt_entry->pbl_ba_l = - cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3)); - roce_set_field(mpt_entry->byte_48_mode_ba, - V2_MPT_BYTE_48_PBL_BA_H_M, - V2_MPT_BYTE_48_PBL_BA_H_S, - upper_32_bits(mr->pbl_ba >> 3)); - mpt_entry->byte_48_mode_ba = - cpu_to_le32(mpt_entry->byte_48_mode_ba); - mr->iova = iova; mr->size = size; + + ret = set_mtpt_pbl(mpt_entry, mr); } + return ret; +} + +static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr) +{ + struct hns_roce_v2_mpt_entry *mpt_entry; + + mpt_entry = mb_buf; + memset(mpt_entry, 0, sizeof(*mpt_entry)); + + roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M, + V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE); + roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M, + V2_MPT_BYTE_4_PBL_HOP_NUM_S, 1); + roce_set_field(mpt_entry->byte_4_pd_hop_st, + V2_MPT_BYTE_4_PBL_BA_PG_SZ_M, + V2_MPT_BYTE_4_PBL_BA_PG_SZ_S, + mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET); + roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M, + V2_MPT_BYTE_4_PD_S, mr->pd); + mpt_entry->byte_4_pd_hop_st = mpt_entry->byte_4_pd_hop_st; + + roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 1); + roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1); + roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1); + + roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_FRE_S, 1); + roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0); + roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 0); + roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1); + + mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size); + + mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3)); + roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M, + V2_MPT_BYTE_48_PBL_BA_H_S, + upper_32_bits(mr->pbl_ba >> 3)); + + roce_set_field(mpt_entry->byte_64_buf_pa1, + V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M, + V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S, + mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET); + + return 0; +} + +static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw) +{ + struct hns_roce_v2_mpt_entry *mpt_entry; + + mpt_entry = mb_buf; + memset(mpt_entry, 0, sizeof(*mpt_entry)); + + roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M, + V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE); + roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M, + V2_MPT_BYTE_4_PD_S, mw->pdn); + roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M, + V2_MPT_BYTE_4_PBL_HOP_NUM_S, mw->pbl_hop_num == + HNS_ROCE_HOP_NUM_0 ? 0 : mw->pbl_hop_num); + roce_set_field(mpt_entry->byte_4_pd_hop_st, + V2_MPT_BYTE_4_PBL_BA_PG_SZ_M, + V2_MPT_BYTE_4_PBL_BA_PG_SZ_S, + mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET); + + roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1); + roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1); + + roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0); + roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1); + roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1); + roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BQP_S, + mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1); + + roce_set_field(mpt_entry->byte_64_buf_pa1, + V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M, + V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S, + mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET); + + mpt_entry->lkey = cpu_to_le32(mw->rkey); + return 0; } static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n) { - return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf, + return hns_roce_buf_offset(hr_cq->hr_buf.hr_buf, n * HNS_ROCE_V2_CQE_ENTRY_SIZE); } @@ -1837,6 +3028,22 @@ static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *hr_cq) return get_sw_cqe_v2(hr_cq, hr_cq->cons_index); } +static void *get_srq_wqe(struct hns_roce_srq *srq, int n) +{ + return hns_roce_buf_offset(srq->buf, n << srq->wqe_shift); +} + +static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index) +{ + /* always called with interrupts disabled. */ + spin_lock(&srq->lock); + + bitmap_clear(srq->idx_que.bitmap, wqe_index, 1); + srq->idx_que.tail++; + + spin_unlock(&srq->lock); +} + static void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index) { *hr_cq->set_ci_db = cons_index & 0xffffff; @@ -1848,11 +3055,12 @@ static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, struct hns_roce_v2_cqe *cqe, *dest; u32 prod_index; int nfreed = 0; + int wqe_index; u8 owner_bit; for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index); ++prod_index) { - if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe) + if (prod_index > hr_cq->cons_index + hr_cq->ib_cq.cqe) break; } @@ -1865,7 +3073,13 @@ static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, if ((roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M, V2_CQE_BYTE_16_LCL_QPN_S) & HNS_ROCE_V2_CQE_QPN_MASK) == qpn) { - /* In v1 engine, not support SRQ */ + if (srq && + roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S)) { + wqe_index = roce_get_field(cqe->byte_4, + V2_CQE_BYTE_4_WQE_INDX_M, + V2_CQE_BYTE_4_WQE_INDX_S); + hns_roce_free_srq_wqe(srq, wqe_index); + } ++nfreed; } else if (nfreed) { dest = get_cqe_v2(hr_cq, (prod_index + nfreed) & @@ -1892,9 +3106,9 @@ static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, struct hns_roce_srq *srq) { - spin_lock_irq(&hr_cq->lock); + v2_spin_lock_irq(cq_lock, &hr_cq->lock); __hns_roce_v2_cq_clean(hr_cq, qpn, srq); - spin_unlock_irq(&hr_cq->lock); + v2_spin_unlock_irq(cq_lock, &hr_cq->lock); } static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev, @@ -1903,6 +3117,8 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev, u32 vector) { struct hns_roce_v2_cq_context *cq_context; + unsigned int cq_period = HNS_ROCE_V2_CQ_DEFAULT_INTERVAL; + unsigned int cq_max_cnt = HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM; cq_context = mb_buf; memset(cq_context, 0, sizeof(*cq_context)); @@ -1910,34 +3126,31 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev, roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CQ_ST_M, V2_CQC_BYTE_4_CQ_ST_S, V2_CQ_STATE_VALID); roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M, - V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE); + V2_CQC_BYTE_4_ARM_ST_S, NO_ARMED); roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M, V2_CQC_BYTE_4_SHIFT_S, ilog2((unsigned int)nent)); roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M, V2_CQC_BYTE_4_CEQN_S, vector); - cq_context->byte_4_pg_ceqn = cpu_to_le32(cq_context->byte_4_pg_ceqn); roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M, V2_CQC_BYTE_8_CQN_S, hr_cq->cqn); - cq_context->cqe_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT); - cq_context->cqe_cur_blk_addr = - cpu_to_le32(cq_context->cqe_cur_blk_addr); + cq_context->cqe_cur_blk_addr = cpu_to_le32(mtts[0] >> PAGE_ADDR_SHIFT); roce_set_field(cq_context->byte_16_hop_addr, V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M, V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S, - cpu_to_le32((mtts[0]) >> (32 + PAGE_ADDR_SHIFT))); + (mtts[0]) >> (32 + PAGE_ADDR_SHIFT)); roce_set_field(cq_context->byte_16_hop_addr, V2_CQC_BYTE_16_CQE_HOP_NUM_M, V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num); - cq_context->cqe_nxt_blk_addr = (u32)(mtts[1] >> PAGE_ADDR_SHIFT); + cq_context->cqe_nxt_blk_addr = cpu_to_le32(mtts[1] >> PAGE_ADDR_SHIFT); roce_set_field(cq_context->byte_24_pgsz_addr, V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M, V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S, - cpu_to_le32((mtts[1]) >> (32 + PAGE_ADDR_SHIFT))); + (mtts[1]) >> (32 + PAGE_ADDR_SHIFT)); roce_set_field(cq_context->byte_24_pgsz_addr, V2_CQC_BYTE_24_CQE_BA_PG_SZ_M, V2_CQC_BYTE_24_CQE_BA_PG_SZ_S, @@ -1947,7 +3160,7 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev, V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S, hr_dev->caps.cqe_buf_pg_sz + PG_SHIFT_OFFSET); - cq_context->cqe_ba = (u32)(dma_handle >> 3); + cq_context->cqe_ba = cpu_to_le32(dma_handle >> 3); roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M, V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3))); @@ -1960,24 +3173,30 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev, V2_CQC_BYTE_44_DB_RECORD_ADDR_M, V2_CQC_BYTE_44_DB_RECORD_ADDR_S, ((u32)hr_cq->db.dma) >> 1); - cq_context->db_record_addr = hr_cq->db.dma >> 32; + cq_context->db_record_addr = cpu_to_le32(hr_cq->db.dma >> 32); + + if (cq_period * HNS_ROCE_CLOCK_ADJUST > 0xFFFF) { + dev_info(hr_dev->dev, "Config cq_period param(0x%x) out of range for write_cqc, adjusted to 65.\n", + cq_period); + cq_period = HNS_ROCE_MAX_CQ_PERIOD; + } roce_set_field(cq_context->byte_56_cqe_period_maxcnt, V2_CQC_BYTE_56_CQ_MAX_CNT_M, - V2_CQC_BYTE_56_CQ_MAX_CNT_S, - HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM); + V2_CQC_BYTE_56_CQ_MAX_CNT_S, cq_max_cnt); roce_set_field(cq_context->byte_56_cqe_period_maxcnt, - V2_CQC_BYTE_56_CQ_PERIOD_M, + V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S, - HNS_ROCE_V2_CQ_DEFAULT_INTERVAL); + cq_period * HNS_ROCE_CLOCK_ADJUST); } static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) { + struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device); struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); u32 notification_flag; - u32 doorbell[2]; + __le32 doorbell[2]; doorbell[0] = 0; doorbell[1] = 0; @@ -1985,8 +3204,8 @@ static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq, notification_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL; /* - * flags = 0; Notification Flag = 1, next - * flags = 1; Notification Flag = 0, solocited + * flags is 0; Notification Flag is 1, next + * flags is 1; Notification Flag is 0, solocited */ roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_TAG_M, V2_DB_BYTE_4_TAG_S, hr_cq->cqn); @@ -2000,7 +3219,7 @@ static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq, roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S, notification_flag); - hns_roce_write64_k(doorbell, hr_cq->cq_db_l); + hns_roce_write64(hr_dev, doorbell, hr_cq->cq_db_l); return 0; } @@ -2010,9 +3229,13 @@ static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe, struct ib_wc *wc) { struct hns_roce_rinl_sge *sge_list; - u32 wr_num, wr_cnt, sge_num; - u32 sge_cnt, data_len, size; void *wqe_buf; + u32 data_len; + u32 sge_num; + u32 sge_cnt; + u32 wr_num; + u32 wr_cnt; + u32 size; wr_num = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M, V2_CQE_BYTE_4_WQE_INDX_S) & 0xffff; @@ -2039,15 +3262,59 @@ static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe, return 0; } +static void sw_comp(struct hns_roce_qp *hr_qp, int num_entries, + struct ib_wc *wc, int *npolled, struct hns_roce_wq *wq) +{ + unsigned int left; + int np; + int i; + + left = wq->head - wq->tail; + np = *npolled; + + if (left == 0) + return; + + for (i = 0; i < left && np < num_entries; i++) { + wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; + wc->status = IB_WC_WR_FLUSH_ERR; + wc->vendor_err = 0; + wc->qp = &hr_qp->ibqp; + wq->tail++; + np++; + wc++; + } + *npolled = np; +} + +static void hns_roce_v2_poll_sw_cq(struct hns_roce_cq *hr_cq, int num_entries, + struct ib_wc *wc, int *npolled) +{ + struct hns_roce_qp *hr_qp; + + *npolled = 0; + + list_for_each_entry(hr_qp, &hr_cq->sq_list, send_list) { + sw_comp(hr_qp, num_entries, wc + *npolled, npolled, &hr_qp->sq); + if (*npolled >= num_entries) + return; + } + + list_for_each_entry(hr_qp, &hr_cq->rq_list, recv_list) { + sw_comp(hr_qp, num_entries, wc + *npolled, npolled, &hr_qp->rq); + if (*npolled >= num_entries) + return; + } +} + static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, struct hns_roce_qp **cur_qp, struct ib_wc *wc) { - struct hns_roce_dev *hr_dev; + struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device); + struct hns_roce_srq *srq = NULL; struct hns_roce_v2_cqe *cqe; struct hns_roce_qp *hr_qp; struct hns_roce_wq *wq; - struct ib_qp_attr attr; - int attr_mask; int is_send; u16 wqe_ctr; u32 opcode; @@ -2071,10 +3338,9 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, V2_CQE_BYTE_16_LCL_QPN_S); if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) { - hr_dev = to_hr_dev(hr_cq->ib_cq.device); hr_qp = __hns_roce_qp_lookup(hr_dev, qpn); if (unlikely(!hr_qp)) { - dev_err(hr_dev->dev, "CQ %06lx with entry for unknown QPN %06x\n", + dev_err(hr_dev->dev, "CQ 0x%06lx with entry for unknown QPN 0x%06x\n", hr_cq->cqn, (qpn & HNS_ROCE_V2_CQE_QPN_MASK)); return -EINVAL; } @@ -2084,6 +3350,37 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, wc->qp = &(*cur_qp)->ibqp; wc->vendor_err = 0; + if (is_send) { + wq = &(*cur_qp)->sq; + if ((*cur_qp)->sq_signal_bits) { + /* + * If sg_signal_bit is 1, + * firstly tail pointer updated to wqe + * which current cqe correspond to + */ + wqe_ctr = (u16)roce_get_field(cqe->byte_4, + V2_CQE_BYTE_4_WQE_INDX_M, + V2_CQE_BYTE_4_WQE_INDX_S); + wq->tail += (wqe_ctr - (u16)wq->tail) & + (wq->wqe_cnt - 1); + } + + wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; + ++wq->tail; + } else if ((*cur_qp)->ibqp.srq) { + srq = to_hr_srq((*cur_qp)->ibqp.srq); + wqe_ctr = roce_get_field(cqe->byte_4, + V2_CQE_BYTE_4_WQE_INDX_M, + V2_CQE_BYTE_4_WQE_INDX_S); + wc->wr_id = srq->wrid[wqe_ctr & (srq->max - 1)]; + hns_roce_free_srq_wqe(srq, wqe_ctr); + } else { + /* Update tail pointer, record wr_id */ + wq = &(*cur_qp)->rq; + wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; + ++wq->tail; + } + status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M, V2_CQE_BYTE_4_STATUS_S); switch (status & HNS_ROCE_V2_CQE_STATUS_MASK) { @@ -2135,13 +3432,18 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, } /* flush cqe if wc status is error, excluding flush error */ - if ((wc->status != IB_WC_SUCCESS) && - (wc->status != IB_WC_WR_FLUSH_ERR)) { - attr_mask = IB_QP_STATE; - attr.qp_state = IB_QPS_ERR; - return hns_roce_v2_modify_qp(&(*cur_qp)->ibqp, - &attr, attr_mask, - (*cur_qp)->state, IB_QPS_ERR); + if (wc->status != IB_WC_SUCCESS && + wc->status != IB_WC_WR_FLUSH_ERR) { + dev_err(hr_dev->dev, "error cqe status is: 0x%x\n", + status & HNS_ROCE_V2_CQE_STATUS_MASK); + dev_err(hr_dev->dev, + "dump cqe:%08x %08x %08x %08x %08x %08x %08x\n", + cqe->byte_4, cqe->rkey, cqe->byte_12, cqe->byte_16, + cqe->byte_cnt, cqe->byte_28, cqe->byte_32); + if (qp_lock) + init_flush_work(hr_dev, *cur_qp); + + return 0; } if (wc->status == IB_WC_WR_FLUSH_ERR) @@ -2179,19 +3481,19 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, break; case HNS_ROCE_SQ_OPCODE_ATOMIC_COMP_AND_SWAP: wc->opcode = IB_WC_COMP_SWAP; - wc->byte_len = 8; + wc->byte_len = le32_to_cpu(cqe->byte_cnt); break; case HNS_ROCE_SQ_OPCODE_ATOMIC_FETCH_AND_ADD: wc->opcode = IB_WC_FETCH_ADD; - wc->byte_len = 8; + wc->byte_len = le32_to_cpu(cqe->byte_cnt); break; case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_COMP_AND_SWAP: wc->opcode = IB_WC_MASKED_COMP_SWAP; - wc->byte_len = 8; + wc->byte_len = le32_to_cpu(cqe->byte_cnt); break; case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_FETCH_AND_ADD: wc->opcode = IB_WC_MASKED_FETCH_ADD; - wc->byte_len = 8; + wc->byte_len = le32_to_cpu(cqe->byte_cnt); break; case HNS_ROCE_SQ_OPCODE_FAST_REG_WR: wc->opcode = IB_WC_REG_MR; @@ -2204,22 +3506,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, break; } - wq = &(*cur_qp)->sq; - if ((*cur_qp)->sq_signal_bits) { - /* - * If sg_signal_bit is 1, - * firstly tail pointer updated to wqe - * which current cqe correspond to - */ - wqe_ctr = (u16)roce_get_field(cqe->byte_4, - V2_CQE_BYTE_4_WQE_INDX_M, - V2_CQE_BYTE_4_WQE_INDX_S); - wq->tail += (wqe_ctr - (u16)wq->tail) & - (wq->wqe_cnt - 1); - } - - wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; - ++wq->tail; + hr_cq->dfx_cnt[HNS_ROCE_SQ_CQE]++; } else { /* RQ correspond to CQE */ wc->byte_len = le32_to_cpu(cqe->byte_cnt); @@ -2264,34 +3551,40 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, return -EAGAIN; } - /* Update tail pointer, record wr_id */ - wq = &(*cur_qp)->rq; - wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; - ++wq->tail; - wc->sl = (u8)roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_SL_M, V2_CQE_BYTE_32_SL_S); wc->src_qp = (u8)roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_RMT_QPN_M, V2_CQE_BYTE_32_RMT_QPN_S); + wc->slid = 0; wc->wc_flags |= (roce_get_bit(cqe->byte_32, V2_CQE_BYTE_32_GRH_S) ? IB_WC_GRH : 0); wc->port_num = roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_PORTN_M, V2_CQE_BYTE_32_PORTN_S); wc->pkey_index = 0; - memcpy(wc->smac, cqe->smac, 4); + memcpy(wc->smac, cqe->smac, sizeof(u32)); wc->smac[4] = roce_get_field(cqe->byte_28, V2_CQE_BYTE_28_SMAC_4_M, V2_CQE_BYTE_28_SMAC_4_S); wc->smac[5] = roce_get_field(cqe->byte_28, V2_CQE_BYTE_28_SMAC_5_M, V2_CQE_BYTE_28_SMAC_5_S); - wc->vlan_id = 0xffff; - wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC); + wc->wc_flags |= IB_WC_WITH_SMAC; + if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) { + wc->vlan_id = (u16)roce_get_field(cqe->byte_28, + V2_CQE_BYTE_28_VID_M, + V2_CQE_BYTE_28_VID_S); + wc->wc_flags |= IB_WC_WITH_VLAN; + } else { + wc->vlan_id = 0xffff; + } + wc->network_hdr_type = roce_get_field(cqe->byte_28, V2_CQE_BYTE_28_PORT_TYPE_M, V2_CQE_BYTE_28_PORT_TYPE_S); + + hr_cq->dfx_cnt[HNS_ROCE_RQ_CQE]++; } return 0; @@ -2300,12 +3593,18 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) { + struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device); struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); struct hns_roce_qp *cur_qp = NULL; - unsigned long flags; + unsigned long flags = 0; int npolled; - spin_lock_irqsave(&hr_cq->lock, flags); + v2_spin_lock_irqsave(cq_lock, &hr_cq->lock, &flags); + + if (hr_dev->state == HNS_ROCE_DEVICE_STATE_UNINIT) { + hns_roce_v2_poll_sw_cq(hr_cq, num_entries, wc, &npolled); + goto out; + } for (npolled = 0; npolled < num_entries; ++npolled) { if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled)) @@ -2318,29 +3617,71 @@ static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries, hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index); } - spin_unlock_irqrestore(&hr_cq->lock, flags); +out: + v2_spin_unlock_irqrestore(cq_lock, &hr_cq->lock, &flags); return npolled; } +static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type, + int step_idx) +{ + int op; + + switch (type) { + case HEM_TYPE_QPC: + op = HNS_ROCE_CMD_WRITE_QPC_BT0; + break; + case HEM_TYPE_MTPT: + op = HNS_ROCE_CMD_WRITE_MPT_BT0; + break; + case HEM_TYPE_CQC: + op = HNS_ROCE_CMD_WRITE_CQC_BT0; + break; + case HEM_TYPE_SRQC: + op = HNS_ROCE_CMD_WRITE_SRQC_BT0; + break; + case HEM_TYPE_SCC_CTX: + if (step_idx) { + /* No need to notify Hardware when step_idx is 1 or 2 */ + return -EINVAL; + } + op = HNS_ROCE_CMD_WRITE_SCC_CTX_BT0; + break; + case HEM_TYPE_QPC_TIMER: + op = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0; + break; + case HEM_TYPE_CQC_TIMER: + op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0; + break; + default: + dev_warn(hr_dev->dev, + "Table %d not to be written by mailbox!\n", type); + return -EINVAL; + } + + return op + step_idx; +} + static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem_table *table, int obj, int step_idx) { - struct device *dev = hr_dev->dev; struct hns_roce_cmd_mailbox *mailbox; struct hns_roce_hem_iter iter; struct hns_roce_hem_mhop mhop; struct hns_roce_hem *hem; unsigned long mhop_obj = obj; - int i, j, k; int ret = 0; u64 hem_idx = 0; u64 l1_idx = 0; u64 bt_ba = 0; u32 chunk_ba_num; u32 hop_num; - u16 op = 0xff; + int op; + int i; + int j; + int k; if (!hns_roce_check_whether_mhop(hr_dev, table->type)) return 0; @@ -2350,7 +3691,7 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev, j = mhop.l1_idx; k = mhop.l2_idx; hop_num = mhop.hop_num; - chunk_ba_num = mhop.bt_chunk_size / 8; + chunk_ba_num = mhop.bt_chunk_size / BA_BYTE_LEN; if (hop_num == 2) { hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num + @@ -2362,30 +3703,17 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev, hem_idx = i; } - switch (table->type) { - case HEM_TYPE_QPC: - op = HNS_ROCE_CMD_WRITE_QPC_BT0; - break; - case HEM_TYPE_MTPT: - op = HNS_ROCE_CMD_WRITE_MPT_BT0; - break; - case HEM_TYPE_CQC: - op = HNS_ROCE_CMD_WRITE_CQC_BT0; - break; - case HEM_TYPE_SRQC: - op = HNS_ROCE_CMD_WRITE_SRQC_BT0; - break; - default: - dev_warn(dev, "Table %d not to be written by mailbox!\n", - table->type); + op = get_op_for_set_hem(hr_dev, table->type, step_idx); + if (op == -EINVAL) return 0; - } - op += step_idx; mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); + if (table->type == HEM_TYPE_SCC_CTX) + obj = mhop.l0_idx; + if (check_whether_last_step(hop_num, step_idx)) { hem = table->hem[hem_idx]; for (hns_roce_hem_first(hem, &iter); @@ -2412,17 +3740,16 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev, return ret; } -static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev, +static void hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem_table *table, int obj, int step_idx) { struct device *dev = hr_dev->dev; struct hns_roce_cmd_mailbox *mailbox; - int ret = 0; u16 op = 0xff; if (!hns_roce_check_whether_mhop(hr_dev, table->type)) - return 0; + return; switch (table->type) { case HEM_TYPE_QPC: @@ -2434,32 +3761,35 @@ static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev, case HEM_TYPE_CQC: op = HNS_ROCE_CMD_DESTROY_CQC_BT0; break; + case HEM_TYPE_SCC_CTX: + case HEM_TYPE_QPC_TIMER: + case HEM_TYPE_CQC_TIMER: + /* there is no need to destroy these ctx */ + return; case HEM_TYPE_SRQC: op = HNS_ROCE_CMD_DESTROY_SRQC_BT0; break; default: dev_warn(dev, "Table %d not to be destroyed by mailbox!\n", table->type); - return 0; + return; } op += step_idx; mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); if (IS_ERR(mailbox)) - return PTR_ERR(mailbox); + return; /* configure the tag and op */ - ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, obj, 0, op, - HNS_ROCE_CMD_TIMEOUT_MSECS); + if (hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, obj, 0, op, + HNS_ROCE_CMD_TIMEOUT_MSECS)) + dev_warn(dev, "Failed to clear HEM.\n"); hns_roce_free_cmd_mailbox(hr_dev, mailbox); - return ret; + } static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev, - struct hns_roce_mtt *mtt, - enum ib_qp_state cur_state, - enum ib_qp_state new_state, struct hns_roce_v2_qp_context *context, struct hns_roce_qp *hr_qp) { @@ -2470,6 +3800,10 @@ static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev, if (IS_ERR(mailbox)) return PTR_ERR(mailbox); + /* + * The context include qp context and qp mask context, + * it needs to be guaranteed to be continuous + */ memcpy(mailbox->buf, context, sizeof(*context) * 2); ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0, @@ -2509,17 +3843,101 @@ static void set_access_flags(struct hns_roce_qp *hr_qp, roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC)); roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0); + + roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_EXT_ATE_S, + !!(access_flags & IB_ACCESS_REMOTE_ATOMIC)); + roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_EXT_ATE_S, 0); } -static void modify_qp_reset_to_init(struct ib_qp *ibqp, - const struct ib_qp_attr *attr, - int attr_mask, - struct hns_roce_v2_qp_context *context, - struct hns_roce_v2_qp_context *qpc_mask) +static inline enum ib_qp_state to_ib_qp_st(enum hns_roce_v2_qp_state state) +{ + switch (state) { + case HNS_ROCE_QP_ST_RST: return IB_QPS_RESET; + case HNS_ROCE_QP_ST_INIT: return IB_QPS_INIT; + case HNS_ROCE_QP_ST_RTR: return IB_QPS_RTR; + case HNS_ROCE_QP_ST_RTS: return IB_QPS_RTS; + case HNS_ROCE_QP_ST_SQ_DRAINING: + case HNS_ROCE_QP_ST_SQD: return IB_QPS_SQD; + case HNS_ROCE_QP_ST_SQER: return IB_QPS_SQE; + case HNS_ROCE_QP_ST_ERR: return IB_QPS_ERR; + default: return -1; + } +} + +static inline enum hns_roce_v2_qp_state to_hns_roce_qp_st( + enum ib_qp_state state) +{ + switch (state) { + case IB_QPS_RESET: return HNS_ROCE_QP_ST_RST; + case IB_QPS_INIT: return HNS_ROCE_QP_ST_INIT; + case IB_QPS_RTR: return HNS_ROCE_QP_ST_RTR; + case IB_QPS_RTS: return HNS_ROCE_QP_ST_RTS; + case IB_QPS_SQD: return HNS_ROCE_QP_ST_SQD; + case IB_QPS_SQE: return HNS_ROCE_QP_ST_SQER; + case IB_QPS_ERR: return HNS_ROCE_QP_ST_ERR; + default: return -1; + } +} + +static void hns_roce_get_cqs(struct ib_qp *ibqp, struct hns_roce_cq **send_cq, + struct hns_roce_cq **recv_cq) +{ + switch (ibqp->qp_type) { + case IB_QPT_XRC_TGT: + *send_cq = to_hr_cq(to_hr_xrcd(ibqp->xrcd)->cq); + *recv_cq = *send_cq; + break; + case IB_QPT_XRC_INI: + *send_cq = ibqp->send_cq ? to_hr_cq(ibqp->send_cq) : NULL; + *recv_cq = *send_cq; + break; + default: + *send_cq = ibqp->send_cq ? to_hr_cq(ibqp->send_cq) : NULL; + *recv_cq = ibqp->recv_cq ? to_hr_cq(ibqp->recv_cq) : NULL; + break; + } +} + +static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp, + struct hns_roce_v2_qp_context *context, + struct hns_roce_v2_qp_context *qpc_mask) +{ + roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M, + V2_QPC_BYTE_4_SGE_SHIFT_S, + to_hr_hem_entries_shift(hr_qp->sge.sge_cnt, + hr_qp->sge.sge_shift)); + + roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M, + V2_QPC_BYTE_4_SGE_SHIFT_S, 0); + + roce_set_field(context->byte_20_smac_sgid_idx, + V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, + ilog2((unsigned int)hr_qp->sq.wqe_cnt)); + roce_set_field(qpc_mask->byte_20_smac_sgid_idx, + V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0); + + roce_set_field(context->byte_20_smac_sgid_idx, + V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, + (hr_qp->ibqp.qp_type == IB_QPT_XRC_INI || + hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT || + hr_qp->ibqp.srq) ? 0 : + ilog2((unsigned int)hr_qp->rq.wqe_cnt)); + + roce_set_field(qpc_mask->byte_20_smac_sgid_idx, + V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0); +} + +static void modify_qp_reset_to_init(struct ib_qp *ibqp, + const struct ib_qp_attr *attr, + int attr_mask, + struct hns_roce_v2_qp_context *context, + struct hns_roce_v2_qp_context *qpc_mask) { struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); + struct hns_roce_cq *send_cq, *recv_cq; + hns_roce_get_cqs(ibqp, &send_cq, &recv_cq); /* * In v2 engine, software pass context and context mask to hardware * when modifying qp. If software need modify some fields in context, @@ -2531,53 +3949,42 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M, V2_QPC_BYTE_4_TST_S, 0); - if (ibqp->qp_type == IB_QPT_GSI) - roce_set_field(context->byte_4_sqpn_tst, - V2_QPC_BYTE_4_SGE_SHIFT_M, - V2_QPC_BYTE_4_SGE_SHIFT_S, - ilog2((unsigned int)hr_qp->sge.sge_cnt)); - else - roce_set_field(context->byte_4_sqpn_tst, - V2_QPC_BYTE_4_SGE_SHIFT_M, - V2_QPC_BYTE_4_SGE_SHIFT_S, - hr_qp->sq.max_gs > 2 ? - ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0); - - roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M, - V2_QPC_BYTE_4_SGE_SHIFT_S, 0); - roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M, V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn); roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M, V2_QPC_BYTE_4_SQPN_S, 0); roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M, - V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn); + V2_QPC_BYTE_16_PD_S, + (hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT) ? + to_hr_pd(to_hr_xrcd(ibqp->xrcd)->pd)->pdn : + to_hr_pd(ibqp->pd)->pdn); roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M, V2_QPC_BYTE_16_PD_S, 0); roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M, - V2_QPC_BYTE_20_RQWS_S, ilog2(hr_qp->rq.max_gs)); + V2_QPC_BYTE_20_RQWS_S, + (hr_qp->ibqp.qp_type == IB_QPT_XRC_INI || + hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT || ibqp->srq) ? 0 : + ilog2(hr_qp->rq.max_gs)); roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M, V2_QPC_BYTE_20_RQWS_S, 0); - roce_set_field(context->byte_20_smac_sgid_idx, - V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, - ilog2((unsigned int)hr_qp->sq.wqe_cnt)); - roce_set_field(qpc_mask->byte_20_smac_sgid_idx, - V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0); - - roce_set_field(context->byte_20_smac_sgid_idx, - V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, - ilog2((unsigned int)hr_qp->rq.wqe_cnt)); - roce_set_field(qpc_mask->byte_20_smac_sgid_idx, - V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0); + set_qpc_wqe_cnt(hr_qp, context, qpc_mask); /* No VLAN need to set 0xFFF */ roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M, V2_QPC_BYTE_24_VLAN_ID_S, 0xfff); roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M, V2_QPC_BYTE_24_VLAN_ID_S, 0); + roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQ_VLAN_EN_S, + 0); + roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQ_VLAN_EN_S, + 0); + roce_set_bit(context->byte_168_irrl_idx, V2_QPC_BYTE_168_SQ_VLAN_EN_S, + 0); + roce_set_bit(qpc_mask->byte_168_irrl_idx, V2_QPC_BYTE_168_SQ_VLAN_EN_S, + 0); /* * Set some fields in context to zero, Because the default values @@ -2589,28 +3996,24 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_TX_ERR_S, 0); roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_RX_ERR_S, 0); - roce_set_field(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_MAPID_M, - V2_QPC_BYTE_60_MAPID_S, 0); + roce_set_field(context->byte_60_qpst_tempid, V2_QPC_BYTE_60_TEMPID_M, + V2_QPC_BYTE_60_TEMPID_S, hr_dev->mac_id); + roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_TEMPID_M, + V2_QPC_BYTE_60_TEMPID_S, 0); - roce_set_bit(qpc_mask->byte_60_qpst_mapid, - V2_QPC_BYTE_60_INNER_MAP_IND_S, 0); - roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_MAP_IND_S, - 0); - roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_RQ_MAP_IND_S, - 0); - roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_EXT_MAP_IND_S, - 0); - roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_RLS_IND_S, - 0); - roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_EXT_IND_S, - 0); + roce_set_field(qpc_mask->byte_60_qpst_tempid, + V2_QPC_BYTE_60_SCC_TOKEN_M, V2_QPC_BYTE_60_SCC_TOKEN_S, + 0); + roce_set_bit(qpc_mask->byte_60_qpst_tempid, + V2_QPC_BYTE_60_SQ_DB_DOING_S, 0); + roce_set_bit(qpc_mask->byte_60_qpst_tempid, + V2_QPC_BYTE_60_RQ_DB_DOING_S, 0); roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0); roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0); - if (attr_mask & IB_QP_QKEY) { - context->qkey_xrcd = attr->qkey; + if (to_hr_qp_type(hr_qp->ibqp.qp_type) == SERV_TYPE_XRC) { + context->qkey_xrcd = cpu_to_le32(hr_qp->xrcdn); qpc_mask->qkey_xrcd = 0; - hr_qp->qkey = attr->qkey; } if (hr_qp->rdb_en) { @@ -2620,6 +4023,13 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, V2_QPC_BYTE_68_RQ_RECORD_EN_S, 0); } + /* + * 63 bits of the record db address are needed for hardware: + * first, right shift 1 bit and fill the low 31 bits into the low + * positions of rq_db_record_address field; then right shift 32 bits + * and fill the high 32 bits into the high positions of + * rq_db_record_address + */ roce_set_field(context->byte_68_rq_db, V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M, V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S, @@ -2627,15 +4037,16 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, roce_set_field(qpc_mask->byte_68_rq_db, V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M, V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S, 0); - context->rq_db_record_addr = hr_qp->rdb.dma >> 32; + context->rq_db_record_addr = cpu_to_le32(hr_qp->rdb.dma >> 32); qpc_mask->rq_db_record_addr = 0; - roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, - (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) ? 1 : 0); + if (ibqp->qp_type != IB_QPT_UD && ibqp->qp_type != IB_QPT_GSI) + roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, + !!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)); roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0); roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M, - V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn); + V2_QPC_BYTE_80_RX_CQN_S, recv_cq->cqn); roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M, V2_QPC_BYTE_80_RX_CQN_S, 0); if (ibqp->srq) { @@ -2685,7 +4096,8 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M, V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0); - roce_set_bit(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RSVD_RAQ_MAP_S, 0); + roce_set_bit(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RQ_RTY_WAIT_DO_S, + 0); roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M, V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S, 0); roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M, @@ -2694,8 +4106,6 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, roce_set_field(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M, V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S, 0); - roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_RTY_INI_IND_S, - 0); roce_set_field(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_CREDIT_M, V2_QPC_BYTE_144_RAQ_CREDIT_S, 0); roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RESP_RTY_FLG_S, 0); @@ -2721,14 +4131,12 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M, V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S, 0); - roce_set_field(context->byte_168_irrl_idx, - V2_QPC_BYTE_168_SQ_SHIFT_BAK_M, - V2_QPC_BYTE_168_SQ_SHIFT_BAK_S, - ilog2((unsigned int)hr_qp->sq.wqe_cnt)); - roce_set_field(qpc_mask->byte_168_irrl_idx, - V2_QPC_BYTE_168_SQ_SHIFT_BAK_M, - V2_QPC_BYTE_168_SQ_SHIFT_BAK_S, 0); - + roce_set_bit(qpc_mask->byte_168_irrl_idx, + V2_QPC_BYTE_168_POLL_DB_WAIT_DO_S, 0); + roce_set_bit(qpc_mask->byte_168_irrl_idx, + V2_QPC_BYTE_168_SCC_TOKEN_FORBID_SQ_DEQ_S, 0); + roce_set_bit(qpc_mask->byte_168_irrl_idx, + V2_QPC_BYTE_168_WAIT_ACK_TIMEOUT_S, 0); roce_set_bit(qpc_mask->byte_168_irrl_idx, V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S, 0); roce_set_bit(qpc_mask->byte_168_irrl_idx, @@ -2737,8 +4145,6 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, V2_QPC_BYTE_168_IRRL_IDX_LSB_M, V2_QPC_BYTE_168_IRRL_IDX_LSB_S, 0); - roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M, - V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 4); roce_set_field(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M, V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 0); @@ -2746,6 +4152,9 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_MSG_RNR_FLG_S, 0); + roce_set_bit(context->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 1); + roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 0); + roce_set_field(qpc_mask->byte_176_msg_pktn, V2_QPC_BYTE_176_MSG_USE_PKTN_M, V2_QPC_BYTE_176_MSG_USE_PKTN_S, 0); @@ -2790,6 +4199,13 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, V2_QPC_BYTE_232_IRRL_SGE_IDX_M, V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0); + roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_SO_LP_VLD_S, + 0); + roce_set_bit(qpc_mask->byte_232_irrl_sge, + V2_QPC_BYTE_232_FENCE_LP_VLD_S, 0); + roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_IRRL_LP_VLD_S, + 0); + qpc_mask->irrl_cur_sge_offset = 0; roce_set_field(qpc_mask->byte_240_irrl_tail, @@ -2802,6 +4218,10 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, V2_QPC_BYTE_240_RX_ACK_MSN_M, V2_QPC_BYTE_240_RX_ACK_MSN_S, 0); + roce_set_bit(qpc_mask->byte_244_rnr_rxack, V2_QPC_BYTE_244_LCL_OP_FLG_S, + 0); + roce_set_bit(qpc_mask->byte_244_rnr_rxack, + V2_QPC_BYTE_244_IRRL_RD_FLG_S, 0); roce_set_field(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_M, V2_QPC_BYTE_248_IRRL_PSN_S, 0); roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_ACK_PSN_ERR_S, @@ -2817,9 +4237,8 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, 0); hr_qp->access_flags = attr->qp_access_flags; - hr_qp->pkey_index = attr->pkey_index; roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M, - V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn); + V2_QPC_BYTE_252_TX_CQN_S, send_cq->cqn); roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M, V2_QPC_BYTE_252_TX_CQN_S, 0); @@ -2840,7 +4259,9 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp, struct hns_roce_v2_qp_context *qpc_mask) { struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); + struct hns_roce_cq *send_cq, *recv_cq; + hns_roce_get_cqs(ibqp, &send_cq, &recv_cq); /* * In v2 engine, software pass context and context mask to hardware * when modifying qp. If software need modify some fields in context, @@ -2852,20 +4273,6 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp, roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M, V2_QPC_BYTE_4_TST_S, 0); - if (ibqp->qp_type == IB_QPT_GSI) - roce_set_field(context->byte_4_sqpn_tst, - V2_QPC_BYTE_4_SGE_SHIFT_M, - V2_QPC_BYTE_4_SGE_SHIFT_S, - ilog2((unsigned int)hr_qp->sge.sge_cnt)); - else - roce_set_field(context->byte_4_sqpn_tst, - V2_QPC_BYTE_4_SGE_SHIFT_M, - V2_QPC_BYTE_4_SGE_SHIFT_S, hr_qp->sq.max_gs > 2 ? - ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0); - - roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M, - V2_QPC_BYTE_4_SGE_SHIFT_S, 0); - if (attr_mask & IB_QP_ACCESS_FLAGS) { roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ)); @@ -2883,6 +4290,13 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp, IB_ACCESS_REMOTE_ATOMIC)); roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0); + + roce_set_bit(context->byte_76_srqn_op_en, + V2_QPC_BYTE_76_EXT_ATE_S, + !!(attr->qp_access_flags & + IB_ACCESS_REMOTE_ATOMIC)); + roce_set_bit(qpc_mask->byte_76_srqn_op_en, + V2_QPC_BYTE_76_EXT_ATE_S, 0); } else { roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, !!(hr_qp->access_flags & IB_ACCESS_REMOTE_READ)); @@ -2898,32 +4312,30 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp, !!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC)); roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0); - } - - roce_set_field(context->byte_20_smac_sgid_idx, - V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, - ilog2((unsigned int)hr_qp->sq.wqe_cnt)); - roce_set_field(qpc_mask->byte_20_smac_sgid_idx, - V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0); - roce_set_field(context->byte_20_smac_sgid_idx, - V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, - ilog2((unsigned int)hr_qp->rq.wqe_cnt)); - roce_set_field(qpc_mask->byte_20_smac_sgid_idx, - V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0); + roce_set_bit(context->byte_76_srqn_op_en, + V2_QPC_BYTE_76_EXT_ATE_S, + !!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC)); + roce_set_bit(qpc_mask->byte_76_srqn_op_en, + V2_QPC_BYTE_76_EXT_ATE_S, + 0); + } roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M, - V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn); + V2_QPC_BYTE_16_PD_S, + (hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT) ? + to_hr_pd(to_hr_xrcd(ibqp->xrcd)->pd)->pdn : + to_hr_pd(ibqp->pd)->pdn); roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M, V2_QPC_BYTE_16_PD_S, 0); roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M, - V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn); + V2_QPC_BYTE_80_RX_CQN_S, recv_cq->cqn); roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M, V2_QPC_BYTE_80_RX_CQN_S, 0); roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M, - V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn); + V2_QPC_BYTE_252_TX_CQN_S, send_cq->cqn); roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M, V2_QPC_BYTE_252_TX_CQN_S, 0); @@ -2939,11 +4351,6 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp, V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0); } - if (attr_mask & IB_QP_QKEY) { - context->qkey_xrcd = attr->qkey; - qpc_mask->qkey_xrcd = 0; - } - roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M, V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn); roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M, @@ -2955,13 +4362,6 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp, roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0); } - roce_set_field(context->byte_168_irrl_idx, - V2_QPC_BYTE_168_SQ_SHIFT_BAK_M, - V2_QPC_BYTE_168_SQ_SHIFT_BAK_S, - ilog2((unsigned int)hr_qp->sq.wqe_cnt)); - roce_set_field(qpc_mask->byte_168_irrl_idx, - V2_QPC_BYTE_168_SQ_SHIFT_BAK_M, - V2_QPC_BYTE_168_SQ_SHIFT_BAK_S, 0); } static int modify_qp_init_to_rtr(struct ib_qp *ibqp, @@ -2973,23 +4373,25 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct device *dev = hr_dev->dev; + u64 mtts[MTT_MIN_COUNT] = { 0 }; dma_addr_t dma_handle_3; dma_addr_t dma_handle_2; - dma_addr_t dma_handle; - u32 page_size; + u64 wqe_sge_ba; + u8 lp_pktn_ini; + enum ib_mtu mtu; u8 port_num; u64 *mtts_3; u64 *mtts_2; - u64 *mtts; + int count; u8 *dmac; - u8 *smac; int port; /* Search qp buf's mtts */ - mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table, - hr_qp->mtt.first_seg, &dma_handle); - if (!mtts) { - dev_err(dev, "qp buf pa find failed\n"); + count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, + mtts, ARRAY_SIZE(mtts), &wqe_sge_ba); + if (hr_qp->rq.wqe_cnt && count < 1) { + dev_err(dev, "failed to find RQ WQE, QPN = 0x%lx.\n", + hr_qp->qpn); return -EINVAL; } @@ -2997,7 +4399,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table, hr_qp->qpn, &dma_handle_2); if (!mtts_2) { - dev_err(dev, "qp irrl_table find failed\n"); + dev_err(dev, "qp(0x%lx) irrl_table find failed\n", hr_qp->qpn); return -EINVAL; } @@ -3005,7 +4407,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, mtts_3 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table, hr_qp->qpn, &dma_handle_3); if (!mtts_3) { - dev_err(dev, "qp trrl_table find failed\n"); + dev_err(dev, "qp(0x%lx) trrl_table find failed\n", hr_qp->qpn); return -EINVAL; } @@ -3015,7 +4417,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, } dmac = (u8 *)attr->ah_attr.roce.dmac; - context->wqe_sge_ba = (u32)(dma_handle >> 3); + context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3); qpc_mask->wqe_sge_ba = 0; /* @@ -3025,22 +4427,22 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, * 0 at the same time, else set them to 0x1. */ roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M, - V2_QPC_BYTE_12_WQE_SGE_BA_S, dma_handle >> (32 + 3)); + V2_QPC_BYTE_12_WQE_SGE_BA_S, wqe_sge_ba >> (32 + 3)); roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M, V2_QPC_BYTE_12_WQE_SGE_BA_S, 0); roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M, V2_QPC_BYTE_12_SQ_HOP_NUM_S, - hr_dev->caps.mtt_hop_num == HNS_ROCE_HOP_NUM_0 ? - 0 : hr_dev->caps.mtt_hop_num); + to_hr_hem_hopnum(hr_dev->caps.wqe_sq_hop_num, + hr_qp->sq.wqe_cnt)); roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M, V2_QPC_BYTE_12_SQ_HOP_NUM_S, 0); roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_SGE_HOP_NUM_M, V2_QPC_BYTE_20_SGE_HOP_NUM_S, - ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ? - hr_dev->caps.mtt_hop_num : 0); + to_hr_hem_hopnum(hr_dev->caps.wqe_sge_hop_num, + hr_qp->sge.sge_cnt)); roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_SGE_HOP_NUM_M, V2_QPC_BYTE_20_SGE_HOP_NUM_S, 0); @@ -3048,8 +4450,8 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQ_HOP_NUM_M, V2_QPC_BYTE_20_RQ_HOP_NUM_S, - hr_dev->caps.mtt_hop_num == HNS_ROCE_HOP_NUM_0 ? - 0 : hr_dev->caps.mtt_hop_num); + to_hr_hem_hopnum(hr_dev->caps.wqe_rq_hop_num, + hr_qp->rq.wqe_cnt)); roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQ_HOP_NUM_M, V2_QPC_BYTE_20_RQ_HOP_NUM_S, 0); @@ -3057,7 +4459,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M, V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S, - hr_dev->caps.mtt_ba_pg_sz + PG_SHIFT_OFFSET); + to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.ba_pg_shift)); roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M, V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S, 0); @@ -3065,57 +4467,38 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M, V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, - hr_dev->caps.mtt_buf_pg_sz + PG_SHIFT_OFFSET); + to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.buf_pg_shift)); roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M, V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0); - roce_set_field(context->byte_80_rnr_rx_cqn, - V2_QPC_BYTE_80_MIN_RNR_TIME_M, - V2_QPC_BYTE_80_MIN_RNR_TIME_S, attr->min_rnr_timer); - roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, - V2_QPC_BYTE_80_MIN_RNR_TIME_M, - V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0); - - page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT); - context->rq_cur_blk_addr = (u32)(mtts[hr_qp->rq.offset / page_size] - >> PAGE_ADDR_SHIFT); + context->rq_cur_blk_addr = cpu_to_le32(mtts[0] >> PAGE_ADDR_SHIFT); qpc_mask->rq_cur_blk_addr = 0; roce_set_field(context->byte_92_srq_info, V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M, V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, - mtts[hr_qp->rq.offset / page_size] - >> (32 + PAGE_ADDR_SHIFT)); + mtts[0] >> (32 + PAGE_ADDR_SHIFT)); roce_set_field(qpc_mask->byte_92_srq_info, V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M, V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, 0); - context->rq_nxt_blk_addr = (u32)(mtts[hr_qp->rq.offset / page_size + 1] - >> PAGE_ADDR_SHIFT); + context->rq_nxt_blk_addr = cpu_to_le32(mtts[1] >> PAGE_ADDR_SHIFT); qpc_mask->rq_nxt_blk_addr = 0; roce_set_field(context->byte_104_rq_sge, V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M, V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, - mtts[hr_qp->rq.offset / page_size + 1] - >> (32 + PAGE_ADDR_SHIFT)); + mtts[1] >> (32 + PAGE_ADDR_SHIFT)); roce_set_field(qpc_mask->byte_104_rq_sge, V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M, V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0); - roce_set_field(context->byte_108_rx_reqepsn, - V2_QPC_BYTE_108_RX_REQ_EPSN_M, - V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn); - roce_set_field(qpc_mask->byte_108_rx_reqepsn, - V2_QPC_BYTE_108_RX_REQ_EPSN_M, - V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0); - roce_set_field(context->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M, V2_QPC_BYTE_132_TRRL_BA_S, dma_handle_3 >> 4); roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M, V2_QPC_BYTE_132_TRRL_BA_S, 0); - context->trrl_ba = (u32)(dma_handle_3 >> (16 + 4)); + context->trrl_ba = cpu_to_le32(dma_handle_3 >> (16 + 4)); qpc_mask->trrl_ba = 0; roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M, V2_QPC_BYTE_140_TRRL_BA_S, @@ -3123,7 +4506,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M, V2_QPC_BYTE_140_TRRL_BA_S, 0); - context->irrl_ba = (u32)(dma_handle_2 >> 6); + context->irrl_ba = cpu_to_le32(dma_handle_2 >> 6); qpc_mask->irrl_ba = 0; roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M, V2_QPC_BYTE_208_IRRL_BA_S, @@ -3135,29 +4518,19 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 0); roce_set_bit(context->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S, - hr_qp->sq_signal_bits); + le32_to_cpu(hr_qp->sq_signal_bits)); roce_set_bit(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S, 0); port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port; - smac = (u8 *)hr_dev->dev_addr[port]; - /* when dmac equals smac or loop_idc is 1, it should loopback */ - if (ether_addr_equal_unaligned(dmac, smac) || - hr_dev->loop_idc == 0x1) { - roce_set_bit(context->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 1); + /* when loop_idc is 1, it should loopback */ + if (ibqp->qp_type == IB_QPT_RC) { + roce_set_bit(context->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, + hr_dev->loop_idc); roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0); } - if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) && - attr->max_dest_rd_atomic) { - roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M, - V2_QPC_BYTE_140_RR_MAX_S, - fls(attr->max_dest_rd_atomic - 1)); - roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M, - V2_QPC_BYTE_140_RR_MAX_S, 0); - } - if (attr_mask & IB_QP_DEST_QPN) { roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num); @@ -3175,27 +4548,32 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, 0); - memcpy(&(context->dmac), dmac, 4); + memcpy(&(context->dmac), dmac, sizeof(u32)); roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M, V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4]))); qpc_mask->dmac = 0; roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M, V2_QPC_BYTE_52_DMAC_S, 0); - roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M, - V2_QPC_BYTE_56_LP_PKTN_INI_S, 4); - roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M, - V2_QPC_BYTE_56_LP_PKTN_INI_S, 0); - if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD) + mtu = IB_MTU_4096; + else + mtu = attr->path_mtu; + + if (attr_mask & IB_QP_PATH_MTU) { roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M, - V2_QPC_BYTE_24_MTU_S, IB_MTU_4096); - else if (attr_mask & IB_QP_PATH_MTU) - roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M, - V2_QPC_BYTE_24_MTU_S, attr->path_mtu); + V2_QPC_BYTE_24_MTU_S, mtu); + roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M, + V2_QPC_BYTE_24_MTU_S, 0); + } - roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M, - V2_QPC_BYTE_24_MTU_S, 0); +#define MAX_LP_MSG_LEN 65536 + /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 64KB */ + lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / ib_mtu_enum_to_int(mtu)); + roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M, + V2_QPC_BYTE_56_LP_PKTN_INI_S, lp_pktn_ini); + roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M, + V2_QPC_BYTE_56_LP_PKTN_INI_S, 0); roce_set_field(context->byte_84_rq_ci_pi, V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M, @@ -3207,6 +4585,12 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, roce_set_field(qpc_mask->byte_84_rq_ci_pi, V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M, V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0); + /* ACK_REQ_FREQ should be larger than or equal to LP_PKTN_INI */ + roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M, + V2_QPC_BYTE_172_ACK_REQ_FREQ_S, lp_pktn_ini); + roce_set_field(qpc_mask->byte_172_sq_psn, + V2_QPC_BYTE_172_ACK_REQ_FREQ_M, + V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 0); roce_set_bit(qpc_mask->byte_108_rx_reqepsn, V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0); roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M, @@ -3218,16 +4602,12 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, context->rq_rnr_timer = 0; qpc_mask->rq_rnr_timer = 0; - roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M, - V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1); - roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M, - V2_QPC_BYTE_152_RAQ_PSN_S, 0); - roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M, V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0); roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M, V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0); + /* rocee send 2^lp_sgen_ini segs every time */ roce_set_field(context->byte_168_irrl_idx, V2_QPC_BYTE_168_LP_SGEN_INI_M, V2_QPC_BYTE_168_LP_SGEN_INI_S, 3); @@ -3246,18 +4626,29 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct device *dev = hr_dev->dev; - dma_addr_t dma_handle; - u32 page_size; - u64 *mtts; - - /* Search qp buf's mtts */ - mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table, - hr_qp->mtt.first_seg, &dma_handle); - if (!mtts) { - dev_err(dev, "qp buf pa find failed\n"); + u64 sge_cur_blk = 0; + u64 sq_cur_blk = 0; + int count; + + /* search qp buf's mtts */ + count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->sq.offset, + &sq_cur_blk, 1, NULL); + if (count < 1) { + dev_err(dev, "failed to find QP(0x%lx) SQ buf.\n", hr_qp->qpn); return -EINVAL; } + if (hr_qp->sge.sge_cnt > 0) { + count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, + hr_qp->sge.offset, &sge_cur_blk, + 1, NULL); + if (count < 1) { + dev_err(dev, "failed to find QP(0x%lx) SGE buf.\n", + hr_qp->qpn); + return -EINVAL; + } + } + /* Not support alternate path and path migration */ if ((attr_mask & IB_QP_ALT_PATH) || (attr_mask & IB_QP_PATH_MIG_STATE)) { @@ -3271,44 +4662,38 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, * we should set all bits of the relevant fields in context mask to * 0 at the same time, else set them to 0x1. */ - roce_set_field(context->byte_60_qpst_mapid, - V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M, - V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S, attr->retry_cnt); - roce_set_field(qpc_mask->byte_60_qpst_mapid, - V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M, - V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S, 0); - - context->sq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT); + context->sq_cur_blk_addr = cpu_to_le32(sq_cur_blk >> PAGE_ADDR_SHIFT); roce_set_field(context->byte_168_irrl_idx, V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M, V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, - mtts[0] >> (32 + PAGE_ADDR_SHIFT)); + sq_cur_blk >> (32 + PAGE_ADDR_SHIFT)); qpc_mask->sq_cur_blk_addr = 0; roce_set_field(qpc_mask->byte_168_irrl_idx, V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M, V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0); - page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT); - context->sq_cur_sge_blk_addr = - ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ? - ((u32)(mtts[hr_qp->sge.offset / page_size] - >> PAGE_ADDR_SHIFT)) : 0; + context->sq_cur_sge_blk_addr = (ibqp->qp_type == IB_QPT_GSI || + hr_qp->sq.max_gs > + HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ? + (cpu_to_le32(sge_cur_blk >> + PAGE_ADDR_SHIFT)) : 0; roce_set_field(context->byte_184_irrl_idx, V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M, V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, - ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ? - (mtts[hr_qp->sge.offset / page_size] >> - (32 + PAGE_ADDR_SHIFT)) : 0); + (ibqp->qp_type == IB_QPT_GSI || + hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ? + (sge_cur_blk >> (32 + PAGE_ADDR_SHIFT)) : 0); qpc_mask->sq_cur_sge_blk_addr = 0; roce_set_field(qpc_mask->byte_184_irrl_idx, V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M, V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, 0); - context->rx_sq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT); + context->rx_sq_cur_blk_addr = cpu_to_le32(sq_cur_blk >> + PAGE_ADDR_SHIFT); roce_set_field(context->byte_232_irrl_sge, V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M, V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, - mtts[0] >> (32 + PAGE_ADDR_SHIFT)); + sq_cur_blk >> (32 + PAGE_ADDR_SHIFT)); qpc_mask->rx_sq_cur_blk_addr = 0; roce_set_field(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M, @@ -3327,13 +4712,6 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, V2_QPC_BYTE_240_RX_ACK_MSN_M, V2_QPC_BYTE_240_RX_ACK_MSN_S, 0); - roce_set_field(context->byte_244_rnr_rxack, - V2_QPC_BYTE_244_RX_ACK_EPSN_M, - V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn); - roce_set_field(qpc_mask->byte_244_rnr_rxack, - V2_QPC_BYTE_244_RX_ACK_EPSN_M, - V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0); - roce_set_field(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M, V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0); @@ -3347,27 +4725,6 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, V2_QPC_BYTE_240_IRRL_TAIL_REAL_M, V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0); - roce_set_field(context->byte_220_retry_psn_msn, - V2_QPC_BYTE_220_RETRY_MSG_PSN_M, - V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn); - roce_set_field(qpc_mask->byte_220_retry_psn_msn, - V2_QPC_BYTE_220_RETRY_MSG_PSN_M, - V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0); - - roce_set_field(context->byte_224_retry_msg, - V2_QPC_BYTE_224_RETRY_MSG_PSN_M, - V2_QPC_BYTE_224_RETRY_MSG_PSN_S, attr->sq_psn >> 16); - roce_set_field(qpc_mask->byte_224_retry_msg, - V2_QPC_BYTE_224_RETRY_MSG_PSN_M, - V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0); - - roce_set_field(context->byte_224_retry_msg, - V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M, - V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, attr->sq_psn); - roce_set_field(qpc_mask->byte_224_retry_msg, - V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M, - V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0); - roce_set_field(qpc_mask->byte_220_retry_psn_msn, V2_QPC_BYTE_220_RETRY_MSG_MSN_M, V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0); @@ -3378,88 +4735,187 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M, V2_QPC_BYTE_212_CHECK_FLG_S, 0); - roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M, - V2_QPC_BYTE_212_RETRY_CNT_S, attr->retry_cnt); - roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M, - V2_QPC_BYTE_212_RETRY_CNT_S, 0); + roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_LSN_M, + V2_QPC_BYTE_212_LSN_S, 0); + roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_LSN_M, + V2_QPC_BYTE_212_LSN_S, 0); - roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M, - V2_QPC_BYTE_212_RETRY_NUM_INIT_S, attr->retry_cnt); - roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M, - V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0); + roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M, + V2_QPC_BYTE_196_IRRL_HEAD_S, 0); - roce_set_field(context->byte_244_rnr_rxack, - V2_QPC_BYTE_244_RNR_NUM_INIT_M, - V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry); - roce_set_field(qpc_mask->byte_244_rnr_rxack, - V2_QPC_BYTE_244_RNR_NUM_INIT_M, - V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0); + return 0; +} - roce_set_field(context->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M, - V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry); - roce_set_field(qpc_mask->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M, - V2_QPC_BYTE_244_RNR_CNT_S, 0); +static int hns_roce_v2_set_path(struct ib_qp *ibqp, + const struct ib_qp_attr *attr, + int attr_mask, + struct hns_roce_v2_qp_context *context, + struct hns_roce_v2_qp_context *qpc_mask) +{ + const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr); + struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); + struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); - roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_LSN_M, - V2_QPC_BYTE_212_LSN_S, 0x100); - roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_LSN_M, - V2_QPC_BYTE_212_LSN_S, 0); +#ifdef CONFIG_KERNEL_419 + const struct ib_gid_attr *gid_attr = attr->ah_attr.grh.sgid_attr; +#else + struct ib_gid_attr gid_attr = {.gid_type = IB_GID_TYPE_ROCE}; + union ib_gid zgid = { {0} }; + union ib_gid gid; + int status = 0; +#endif + int is_roce_protocol; + u16 vlan = 0xffff; + u8 ib_port; + u8 hr_port; + + ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num : hr_qp->port + 1; + hr_port = ib_port - 1; + is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) && + rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH; + +#ifdef CONFIG_KERNEL_419 + if (is_roce_protocol) { + vlan = rdma_vlan_dev_vlan_id(gid_attr->ndev); + + if (is_vlan_dev(gid_attr->ndev)) { + roce_set_bit(context->byte_76_srqn_op_en, + V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1); + roce_set_bit(qpc_mask->byte_76_srqn_op_en, + V2_QPC_BYTE_76_RQ_VLAN_EN_S, 0); + roce_set_bit(context->byte_168_irrl_idx, + V2_QPC_BYTE_168_SQ_VLAN_EN_S, 1); + roce_set_bit(qpc_mask->byte_168_irrl_idx, + V2_QPC_BYTE_168_SQ_VLAN_EN_S, 0); + } + } +#else + if (is_roce_protocol) { + int index = grh->sgid_index; + + status = ib_get_cached_gid(ibqp->device, ib_port, index, &gid, + &gid_attr); + if (!status && !memcmp(&gid, &zgid, sizeof(gid))) + status = -ENOENT; + if (!status && gid_attr.ndev) { + vlan = rdma_vlan_dev_vlan_id(gid_attr.ndev); + + if (is_vlan_dev(gid_attr.ndev)) { + roce_set_bit(context->byte_76_srqn_op_en, + V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1); + roce_set_bit(qpc_mask->byte_76_srqn_op_en, + V2_QPC_BYTE_76_RQ_VLAN_EN_S, 0); + roce_set_bit(context->byte_168_irrl_idx, + V2_QPC_BYTE_168_SQ_VLAN_EN_S, 1); + roce_set_bit(qpc_mask->byte_168_irrl_idx, + V2_QPC_BYTE_168_SQ_VLAN_EN_S, 0); + } - if (attr_mask & IB_QP_TIMEOUT) { - roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_AT_M, - V2_QPC_BYTE_28_AT_S, attr->timeout); - roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_AT_M, - V2_QPC_BYTE_28_AT_S, 0); + dev_put(gid_attr.ndev); + } } - roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M, - V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn); - roce_set_field(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M, - V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0); + if (status) { + dev_err(hr_dev->dev, + "get gid during modifing QP(0x%x) failed, status %d\n", + ibqp->qp_num, status); + return -EAGAIN; + } +#endif - roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M, - V2_QPC_BYTE_196_IRRL_HEAD_S, 0); - roce_set_field(context->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M, - V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn); - roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M, - V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0); + roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M, + V2_QPC_BYTE_24_VLAN_ID_S, vlan); + roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M, + V2_QPC_BYTE_24_VLAN_ID_S, 0); - if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) { - roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M, - V2_QPC_BYTE_208_SR_MAX_S, - fls(attr->max_rd_atomic - 1)); - roce_set_field(qpc_mask->byte_208_irrl, - V2_QPC_BYTE_208_SR_MAX_M, - V2_QPC_BYTE_208_SR_MAX_S, 0); + if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) { + dev_err(hr_dev->dev, + "sgid_index(%u) too large. max is %d\n", + grh->sgid_index, hr_dev->caps.gid_table_len[hr_port]); + return -EINVAL; + } + + if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) { + dev_err(hr_dev->dev, "ah attr is not RDMA roce type\n"); + return -EINVAL; + } + +#ifdef CONFIG_KERNEL_419 + roce_set_field(context->byte_52_udpspn_dmac, + V2_QPC_BYTE_52_UDPSPN_M, V2_QPC_BYTE_52_UDPSPN_S, + (gid_attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) ? + 0 : 0x12b7); +#else + roce_set_field(context->byte_52_udpspn_dmac, + V2_QPC_BYTE_52_UDPSPN_M, V2_QPC_BYTE_52_UDPSPN_S, + (gid_attr.gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) ? + 0 : 0x12b7); +#endif + + roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M, + V2_QPC_BYTE_52_UDPSPN_S, 0); + + roce_set_field(context->byte_20_smac_sgid_idx, + V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, + grh->sgid_index); + + roce_set_field(qpc_mask->byte_20_smac_sgid_idx, + V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, 0); + + roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M, + V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit); + roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M, + V2_QPC_BYTE_24_HOP_LIMIT_S, 0); + +#ifdef CONFIG_KERNEL_419 + if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP08_B && + gid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) +#else + if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP08_B && + gid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) +#endif + roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M, + V2_QPC_BYTE_24_TC_S, grh->traffic_class >> 2); + else + roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M, + V2_QPC_BYTE_24_TC_S, grh->traffic_class); + roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M, + V2_QPC_BYTE_24_TC_S, 0); + roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M, + V2_QPC_BYTE_28_FL_S, grh->flow_label); + roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M, + V2_QPC_BYTE_28_FL_S, 0); + memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw)); + memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw)); + + hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr); + if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) { + dev_err(hr_dev->dev, + "failed to fill QPC, sl (%d) shouldn't be larger than %d.\n", + hr_qp->sl, MAX_SERVICE_LEVEL); + return -EINVAL; } + roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M, + V2_QPC_BYTE_28_SL_S, hr_qp->sl); + roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M, + V2_QPC_BYTE_28_SL_S, 0); + return 0; } -static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, - const struct ib_qp_attr *attr, - int attr_mask, enum ib_qp_state cur_state, - enum ib_qp_state new_state) +static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp, + const struct ib_qp_attr *attr, + int attr_mask, + enum ib_qp_state cur_state, + enum ib_qp_state new_state, + struct hns_roce_v2_qp_context *context, + struct hns_roce_v2_qp_context *qpc_mask) { struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); - struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); - struct hns_roce_v2_qp_context *context; - struct hns_roce_v2_qp_context *qpc_mask; - struct device *dev = hr_dev->dev; - int ret = -EINVAL; - - context = kcalloc(2, sizeof(*context), GFP_KERNEL); - if (!context) - return -ENOMEM; + int ret = 0; - qpc_mask = context + 1; - /* - * In v2 engine, software pass context and context mask to hardware - * when modifying qp. If software need modify some fields in context, - * we should set all bits of the relevant fields in context mask to - * 0 at the same time, else set them to 0x1. - */ - memset(qpc_mask, 0xff, sizeof(*qpc_mask)); if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { + memset(qpc_mask, 0, sizeof(*qpc_mask)); modify_qp_reset_to_init(ibqp, attr, attr_mask, context, qpc_mask); } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) { @@ -3475,151 +4931,214 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, qpc_mask); if (ret) goto out; - } else if ((cur_state == IB_QPS_RTS && new_state == IB_QPS_RTS) || - (cur_state == IB_QPS_SQE && new_state == IB_QPS_RTS) || - (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD) || - (cur_state == IB_QPS_SQD && new_state == IB_QPS_SQD) || - (cur_state == IB_QPS_SQD && new_state == IB_QPS_RTS) || - (cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) || - (cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) || - (cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) || - (cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) || - (cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) || - (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) || - (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) || - (cur_state == IB_QPS_SQD && new_state == IB_QPS_ERR) || - (cur_state == IB_QPS_SQE && new_state == IB_QPS_ERR) || - (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR)) { + } else if (V2_QP_SUPPORT_STATE(cur_state, new_state)) { /* Nothing */ ; } else { - dev_err(dev, "Illegal state for QP!\n"); - ret = -EINVAL; + dev_err(hr_dev->dev, "Illegal state for QP(0x%x),cur state-%d, new_state-%d!\n", + ibqp->qp_num, cur_state, new_state); + ret = -EAGAIN; goto out; } - /* When QP state is err, SQ and RQ WQE should be flushed */ - if (new_state == IB_QPS_ERR) { - roce_set_field(context->byte_160_sq_ci_pi, - V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M, - V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, - hr_qp->sq.head); - roce_set_field(qpc_mask->byte_160_sq_ci_pi, - V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M, - V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0); - roce_set_field(context->byte_84_rq_ci_pi, - V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M, - V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, - hr_qp->rq.head); - roce_set_field(qpc_mask->byte_84_rq_ci_pi, - V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M, - V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0); - } +out: + return ret; +} - if (attr_mask & IB_QP_AV) { - const struct ib_global_route *grh = - rdma_ah_read_grh(&attr->ah_attr); - const struct ib_gid_attr *gid_attr = NULL; - u8 src_mac[ETH_ALEN]; - int is_roce_protocol; - u16 vlan = 0xffff; - u8 ib_port; - u8 hr_port; - - ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num : - hr_qp->port + 1; - hr_port = ib_port - 1; - is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) && - rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH; - - if (is_roce_protocol) { - gid_attr = attr->ah_attr.grh.sgid_attr; - vlan = rdma_vlan_dev_vlan_id(gid_attr->ndev); - memcpy(src_mac, gid_attr->ndev->dev_addr, ETH_ALEN); +static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout) +{ +#define QP_TIMEOUT_MAX_HIP08 20 +#define QP_TIMEOUT_MAX 31 + + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B) { + if (*timeout > QP_TIMEOUT_MAX_HIP08) { + dev_warn(hr_dev->dev, + "Local ACK timeout shall be 0 to 20.\n"); + return false; + } + *timeout += HNS_ROCE_QP_TIMEOUT_OFFSET; + } else if (hr_dev->pci_dev->revision > PCI_REVISION_ID_HIP08_B) { + if (*timeout > QP_TIMEOUT_MAX) { + dev_warn(hr_dev->dev, + "Local ACK timeout shall be 0 to 31.\n"); + return false; } + } - roce_set_field(context->byte_24_mtu_tc, - V2_QPC_BYTE_24_VLAN_ID_M, - V2_QPC_BYTE_24_VLAN_ID_S, vlan); - roce_set_field(qpc_mask->byte_24_mtu_tc, - V2_QPC_BYTE_24_VLAN_ID_M, - V2_QPC_BYTE_24_VLAN_ID_S, 0); + return true; +} - if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) { - dev_err(hr_dev->dev, - "sgid_index(%u) too large. max is %d\n", - grh->sgid_index, - hr_dev->caps.gid_table_len[hr_port]); - ret = -EINVAL; - goto out; - } +static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp, + const struct ib_qp_attr *attr, + int attr_mask, + struct hns_roce_v2_qp_context *context, + struct hns_roce_v2_qp_context *qpc_mask) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); + struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); + int ret = 0; + u8 timeout; - if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) { - dev_err(hr_dev->dev, "ah attr is not RDMA roce type\n"); - ret = -EINVAL; - goto out; - } + /* The AV component shall be modified for RoCEv2 */ + if (attr_mask & IB_QP_AV) { + ret = hns_roce_v2_set_path(ibqp, attr, attr_mask, context, + qpc_mask); + if (ret) + return ret; + } - roce_set_field(context->byte_52_udpspn_dmac, - V2_QPC_BYTE_52_UDPSPN_M, V2_QPC_BYTE_52_UDPSPN_S, - (gid_attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) ? - 0 : 0x12b7); + if (attr_mask & IB_QP_TIMEOUT) { + timeout = attr->timeout; + if (check_qp_timeout_cfg_range(hr_dev, &timeout)) { + roce_set_field(context->byte_28_at_fl, + V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S, + timeout); + roce_set_field(qpc_mask->byte_28_at_fl, + V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S, + 0); + } + } - roce_set_field(qpc_mask->byte_52_udpspn_dmac, - V2_QPC_BYTE_52_UDPSPN_M, - V2_QPC_BYTE_52_UDPSPN_S, 0); + if (attr_mask & IB_QP_RETRY_CNT) { + roce_set_field(context->byte_212_lsn, + V2_QPC_BYTE_212_RETRY_NUM_INIT_M, + V2_QPC_BYTE_212_RETRY_NUM_INIT_S, + attr->retry_cnt); + roce_set_field(qpc_mask->byte_212_lsn, + V2_QPC_BYTE_212_RETRY_NUM_INIT_M, + V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0); + + roce_set_field(context->byte_212_lsn, + V2_QPC_BYTE_212_RETRY_CNT_M, + V2_QPC_BYTE_212_RETRY_CNT_S, + attr->retry_cnt); + roce_set_field(qpc_mask->byte_212_lsn, + V2_QPC_BYTE_212_RETRY_CNT_M, + V2_QPC_BYTE_212_RETRY_CNT_S, 0); + } - roce_set_field(context->byte_20_smac_sgid_idx, - V2_QPC_BYTE_20_SGID_IDX_M, - V2_QPC_BYTE_20_SGID_IDX_S, grh->sgid_index); + if (attr_mask & IB_QP_RNR_RETRY) { + roce_set_field(context->byte_244_rnr_rxack, + V2_QPC_BYTE_244_RNR_NUM_INIT_M, + V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry); + roce_set_field(qpc_mask->byte_244_rnr_rxack, + V2_QPC_BYTE_244_RNR_NUM_INIT_M, + V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0); + + roce_set_field(context->byte_244_rnr_rxack, + V2_QPC_BYTE_244_RNR_CNT_M, + V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry); + roce_set_field(qpc_mask->byte_244_rnr_rxack, + V2_QPC_BYTE_244_RNR_CNT_M, + V2_QPC_BYTE_244_RNR_CNT_S, 0); + } - roce_set_field(qpc_mask->byte_20_smac_sgid_idx, - V2_QPC_BYTE_20_SGID_IDX_M, - V2_QPC_BYTE_20_SGID_IDX_S, 0); + /* RC&UC&UD required attr */ + if (attr_mask & IB_QP_SQ_PSN) { + roce_set_field(context->byte_172_sq_psn, + V2_QPC_BYTE_172_SQ_CUR_PSN_M, + V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn); + roce_set_field(qpc_mask->byte_172_sq_psn, + V2_QPC_BYTE_172_SQ_CUR_PSN_M, + V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0); + + roce_set_field(context->byte_196_sq_psn, + V2_QPC_BYTE_196_SQ_MAX_PSN_M, + V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn); + roce_set_field(qpc_mask->byte_196_sq_psn, + V2_QPC_BYTE_196_SQ_MAX_PSN_M, + V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0); + + roce_set_field(context->byte_220_retry_psn_msn, + V2_QPC_BYTE_220_RETRY_MSG_PSN_M, + V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn); + roce_set_field(qpc_mask->byte_220_retry_psn_msn, + V2_QPC_BYTE_220_RETRY_MSG_PSN_M, + V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0); + + roce_set_field(context->byte_224_retry_msg, + V2_QPC_BYTE_224_RETRY_MSG_PSN_M, + V2_QPC_BYTE_224_RETRY_MSG_PSN_S, + attr->sq_psn >> V2_QPC_BYTE_220_RETRY_MSG_PSN_S); + roce_set_field(qpc_mask->byte_224_retry_msg, + V2_QPC_BYTE_224_RETRY_MSG_PSN_M, + V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0); + + roce_set_field(context->byte_224_retry_msg, + V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M, + V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, + attr->sq_psn); + roce_set_field(qpc_mask->byte_224_retry_msg, + V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M, + V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0); + + roce_set_field(context->byte_244_rnr_rxack, + V2_QPC_BYTE_244_RX_ACK_EPSN_M, + V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn); + roce_set_field(qpc_mask->byte_244_rnr_rxack, + V2_QPC_BYTE_244_RX_ACK_EPSN_M, + V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0); + } - roce_set_field(context->byte_24_mtu_tc, - V2_QPC_BYTE_24_HOP_LIMIT_M, - V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit); - roce_set_field(qpc_mask->byte_24_mtu_tc, - V2_QPC_BYTE_24_HOP_LIMIT_M, - V2_QPC_BYTE_24_HOP_LIMIT_S, 0); + if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) && + attr->max_dest_rd_atomic) { + roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M, + V2_QPC_BYTE_140_RR_MAX_S, + fls(attr->max_dest_rd_atomic - 1)); + roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M, + V2_QPC_BYTE_140_RR_MAX_S, 0); + } - roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M, - V2_QPC_BYTE_24_TC_S, grh->traffic_class); - roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M, - V2_QPC_BYTE_24_TC_S, 0); - roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M, - V2_QPC_BYTE_28_FL_S, grh->flow_label); - roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M, - V2_QPC_BYTE_28_FL_S, 0); - memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw)); - memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw)); - roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M, - V2_QPC_BYTE_28_SL_S, - rdma_ah_get_sl(&attr->ah_attr)); - roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M, - V2_QPC_BYTE_28_SL_S, 0); - hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr); + if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) { + roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M, + V2_QPC_BYTE_208_SR_MAX_S, + fls(attr->max_rd_atomic - 1)); + roce_set_field(qpc_mask->byte_208_irrl, + V2_QPC_BYTE_208_SR_MAX_M, + V2_QPC_BYTE_208_SR_MAX_S, 0); } if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask); - /* Every status migrate must change state */ - roce_set_field(context->byte_60_qpst_mapid, V2_QPC_BYTE_60_QP_ST_M, - V2_QPC_BYTE_60_QP_ST_S, new_state); - roce_set_field(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_QP_ST_M, - V2_QPC_BYTE_60_QP_ST_S, 0); + roce_set_field(context->byte_80_rnr_rx_cqn, + V2_QPC_BYTE_80_MIN_RNR_TIME_M, + V2_QPC_BYTE_80_MIN_RNR_TIME_S, 1); + roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, + V2_QPC_BYTE_80_MIN_RNR_TIME_M, + V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0); - /* SW pass context to HW */ - ret = hns_roce_v2_qp_modify(hr_dev, &hr_qp->mtt, cur_state, new_state, - context, hr_qp); - if (ret) { - dev_err(dev, "hns_roce_qp_modify failed(%d)\n", ret); - goto out; + /* RC&UC required attr */ + if (attr_mask & IB_QP_RQ_PSN) { + roce_set_field(context->byte_108_rx_reqepsn, + V2_QPC_BYTE_108_RX_REQ_EPSN_M, + V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn); + roce_set_field(qpc_mask->byte_108_rx_reqepsn, + V2_QPC_BYTE_108_RX_REQ_EPSN_M, + V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0); + + roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M, + V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1); + roce_set_field(qpc_mask->byte_152_raq, + V2_QPC_BYTE_152_RAQ_PSN_M, + V2_QPC_BYTE_152_RAQ_PSN_S, 0); } - hr_qp->state = new_state; + if (attr_mask & IB_QP_QKEY) { + context->qkey_xrcd = cpu_to_le32(attr->qkey); + qpc_mask->qkey_xrcd = 0; + hr_qp->qkey = attr->qkey; + } + + return ret; +} + +static void hns_roce_v2_record_opt_fields(struct ib_qp *ibqp, + const struct ib_qp_attr *attr, + int attr_mask) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); + struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); if (attr_mask & IB_QP_ACCESS_FLAGS) hr_qp->atomic_rd_en = attr->qp_access_flags; @@ -3630,13 +5149,119 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, hr_qp->port = attr->port_num - 1; hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; } +} + +static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, + const struct ib_qp_attr *attr, + int attr_mask, enum ib_qp_state cur_state, + enum ib_qp_state new_state) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); + struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); + struct hns_roce_v2_qp_context cmd_qpc[2]; + struct hns_roce_v2_qp_context *context = &cmd_qpc[0]; + struct hns_roce_v2_qp_context *qpc_mask = &cmd_qpc[1]; + struct device *dev = hr_dev->dev; + unsigned long sq_flags = 0; + unsigned long rq_flags = 0; + int ret; + + /* + * In v2 engine, software pass context and context mask to hardware + * when modifying qp. If software need modify some fields in context, + * we should set all bits of the relevant fields in context mask to + * 0 at the same time, else set them to 0x1. + */ + memset(context, 0, sizeof(*context)); + memset(qpc_mask, 0xff, sizeof(*qpc_mask)); + + /* Configure the mandatory fields */ + ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state, + new_state, context, qpc_mask); + if (ret) { + dev_err(dev, "set fields for modify qp(0x%x) from state %d to state %d failed, ret = %d\n", + ibqp->qp_num, to_hns_roce_qp_st(cur_state), + to_hns_roce_qp_st(new_state), ret); + goto out; + } + + /* Configure the optional fields */ + ret = hns_roce_v2_set_opt_fields(ibqp, attr, attr_mask, context, + qpc_mask); + if (ret) + goto out; + + /* When locks are used for post verbs, flush cqe should be enabled */ + if (qp_lock) + hr_qp->flush_en = 1; + + /* Ensure that the value of flush_en can be read correctly later */ + rmb(); + + /* When QP state is err, SQ and RQ WQE should be flushed */ + if (new_state == IB_QPS_ERR) { + v2_spin_lock_irqsave(qp_lock, &hr_qp->sq.lock, &sq_flags); + roce_set_field(context->byte_160_sq_ci_pi, + V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M, + V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, + hr_qp->sq.head); + if (hr_qp->flush_en == 1) + roce_set_field(qpc_mask->byte_160_sq_ci_pi, + V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M, + V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0); + + hr_qp->state = IB_QPS_ERR; + v2_spin_unlock_irqrestore(qp_lock, &hr_qp->sq.lock, &sq_flags); + if (!ibqp->srq) { + v2_spin_lock_irqsave(qp_lock, &hr_qp->rq.lock, + &rq_flags); + roce_set_field(context->byte_84_rq_ci_pi, + V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M, + V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, + hr_qp->rq.head); + if (hr_qp->flush_en == 1) + roce_set_field(qpc_mask->byte_84_rq_ci_pi, + V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M, + V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0); + v2_spin_unlock_irqrestore(qp_lock, &hr_qp->rq.lock, + &rq_flags); + } + } + + roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S, + ((ibqp->srq || + (to_hr_qp_type(hr_qp->ibqp.qp_type) == SERV_TYPE_XRC)) ? + 1 : 0)); + roce_set_bit(qpc_mask->byte_108_rx_reqepsn, + V2_QPC_BYTE_108_INV_CREDIT_S, 0); + + /* Every status migrate must change state */ + roce_set_field(context->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M, + V2_QPC_BYTE_60_QP_ST_S, to_hns_roce_qp_st(new_state)); + roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M, + V2_QPC_BYTE_60_QP_ST_S, 0); + + /* SW pass context to HW */ + ret = hns_roce_v2_qp_modify(hr_dev, context, hr_qp); + if (ret) { + dev_err(dev, "modify qp(0x%x) from state %d to state %d failed, ret = %d\n", + ibqp->qp_num, to_hns_roce_qp_st(cur_state), + to_hns_roce_qp_st(new_state), ret); + goto out; + } + + hr_qp->state = new_state; + + hns_roce_v2_record_opt_fields(ibqp, attr, attr_mask); if (new_state == IB_QPS_RESET && !ibqp->uobject) { - hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn, + struct hns_roce_cq *send_cq, *recv_cq; + + hns_roce_get_cqs(ibqp, &send_cq, &recv_cq); + hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, ibqp->srq ? to_hr_srq(ibqp->srq) : NULL); - if (ibqp->send_cq != ibqp->recv_cq) - hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq), - hr_qp->qpn, NULL); + if (send_cq != recv_cq) + hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL); hr_qp->rq.head = 0; hr_qp->rq.tail = 0; @@ -3648,26 +5273,12 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, *hr_qp->rdb.db_record = 0; } + rdfx_set_qp_attr(hr_dev, hr_qp, attr, attr_mask, new_state); + out: - kfree(context); return ret; } -static inline enum ib_qp_state to_ib_qp_st(enum hns_roce_v2_qp_state state) -{ - switch (state) { - case HNS_ROCE_QP_ST_RST: return IB_QPS_RESET; - case HNS_ROCE_QP_ST_INIT: return IB_QPS_INIT; - case HNS_ROCE_QP_ST_RTR: return IB_QPS_RTR; - case HNS_ROCE_QP_ST_RTS: return IB_QPS_RTS; - case HNS_ROCE_QP_ST_SQ_DRAINING: - case HNS_ROCE_QP_ST_SQD: return IB_QPS_SQD; - case HNS_ROCE_QP_ST_SQER: return IB_QPS_SQE; - case HNS_ROCE_QP_ST_ERR: return IB_QPS_ERR; - default: return -1; - } -} - static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, struct hns_roce_v2_qp_context *hr_context) @@ -3683,7 +5294,7 @@ static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev, HNS_ROCE_CMD_QUERY_QPC, HNS_ROCE_CMD_TIMEOUT_MSECS); if (ret) { - dev_err(hr_dev->dev, "QUERY QP cmd process error\n"); + dev_err(hr_dev->dev, "QUERY QP cmd process error(%d).\n", ret); goto out; } @@ -3706,6 +5317,8 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int state; int ret; + rdfx_func_cnt(hr_dev, RDFX_FUNC_QUERY_QP); + context = kzalloc(sizeof(*context), GFP_KERNEL); if (!context) return -ENOMEM; @@ -3723,12 +5336,13 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, context); if (ret) { - dev_err(dev, "query qpc error\n"); + dev_err(dev, "query qpc(0x%x) error, ret = %d\n", + ibqp->qp_num, ret); ret = -EINVAL; goto out; } - state = roce_get_field(context->byte_60_qpst_mapid, + state = roce_get_field(context->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S); tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state); if (tmp_qp_state == -1) { @@ -3756,11 +5370,12 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S); qp_attr->qp_access_flags = ((roce_get_bit(context->byte_76_srqn_op_en, - V2_QPC_BYTE_76_RRE_S)) << 2) | - ((roce_get_bit(context->byte_76_srqn_op_en, - V2_QPC_BYTE_76_RWE_S)) << 1) | - ((roce_get_bit(context->byte_76_srqn_op_en, - V2_QPC_BYTE_76_ATE_S)) << 3); + V2_QPC_BYTE_76_RRE_S)) << V2_QP_RRE_S) | + ((roce_get_bit(context->byte_76_srqn_op_en, + V2_QPC_BYTE_76_RWE_S)) << V2_QP_RWE_S) | + ((roce_get_bit(context->byte_76_srqn_op_en, + V2_QPC_BYTE_76_ATE_S)) << V2_QP_ATE_S); + if (hr_qp->ibqp.qp_type == IB_QPT_RC || hr_qp->ibqp.qp_type == IB_QPT_UC) { struct ib_global_route *grh = @@ -3787,7 +5402,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, } qp_attr->port_num = hr_qp->port + 1; - qp_attr->sq_draining = 0; + qp_attr->sq_draining = (state == HNS_ROCE_QP_ST_SQ_DRAINING); qp_attr->max_rd_atomic = 1 << roce_get_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M, V2_QPC_BYTE_208_SR_MAX_S); @@ -3801,14 +5416,17 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S); qp_attr->retry_cnt = roce_get_field(context->byte_212_lsn, - V2_QPC_BYTE_212_RETRY_CNT_M, - V2_QPC_BYTE_212_RETRY_CNT_S); - qp_attr->rnr_retry = context->rq_rnr_timer; + V2_QPC_BYTE_212_RETRY_NUM_INIT_M, + V2_QPC_BYTE_212_RETRY_NUM_INIT_S); + qp_attr->rnr_retry = roce_get_field(context->byte_244_rnr_rxack, + V2_QPC_BYTE_244_RNR_NUM_INIT_M, + V2_QPC_BYTE_244_RNR_NUM_INIT_S); done: qp_attr->cur_qp_state = qp_attr->qp_state; qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt; - qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs; + qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge; + qp_attr->cap.max_inline_data = hr_dev->caps.max_sq_inline; if (!ibqp->uobject) { qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt; @@ -3832,90 +5450,120 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, { struct hns_roce_cq *send_cq, *recv_cq; struct device *dev = hr_dev->dev; - int ret; + unsigned long flags; + int ret = 0; if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) { /* Modify qp to reset before destroying qp */ ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state, IB_QPS_RESET); - if (ret) { - dev_err(dev, "modify QP %06lx to ERR failed.\n", - hr_qp->qpn); - return ret; - } + if (ret) + dev_err(dev, + "Modify QP 0x%06lx to Reset failed(%d).\n", + hr_qp->qpn, ret); } - send_cq = to_hr_cq(hr_qp->ibqp.send_cq); - recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq); + hns_roce_get_cqs(&hr_qp->ibqp, &send_cq, &recv_cq); - hns_roce_lock_cqs(send_cq, recv_cq); + spin_lock_irqsave(&hr_dev->qp_lock, flags); + if (cq_lock) + hns_roce_lock_cqs(send_cq, recv_cq); + list_del(&hr_qp->list); + list_del(&hr_qp->send_list); + list_del(&hr_qp->recv_list); if (!is_user) { - __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ? - to_hr_srq(hr_qp->ibqp.srq) : NULL); - if (send_cq != recv_cq) + if (recv_cq) + __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn, + (hr_qp->ibqp.srq ? + to_hr_srq(hr_qp->ibqp.srq) : + NULL)); + if (send_cq && send_cq != recv_cq) __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL); } hns_roce_qp_remove(hr_dev, hr_qp); - hns_roce_unlock_cqs(send_cq, recv_cq); - - hns_roce_qp_free(hr_dev, hr_qp); - - /* Not special_QP, free their QPN */ - if ((hr_qp->ibqp.qp_type == IB_QPT_RC) || - (hr_qp->ibqp.qp_type == IB_QPT_UC) || - (hr_qp->ibqp.qp_type == IB_QPT_UD)) - hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1); + if (cq_lock) + hns_roce_unlock_cqs(send_cq, recv_cq); + spin_unlock_irqrestore(&hr_dev->qp_lock, flags); - hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt); + return ret; +} - if (is_user) { - if (hr_qp->sq.wqe_cnt && (hr_qp->sdb_en == 1)) - hns_roce_db_unmap_user( - to_hr_ucontext(hr_qp->ibqp.uobject->context), - &hr_qp->sdb); +static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); + struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); + int ret; - if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1)) - hns_roce_db_unmap_user( - to_hr_ucontext(hr_qp->ibqp.uobject->context), - &hr_qp->rdb); - ib_umem_release(hr_qp->umem); - } else { - kfree(hr_qp->sq.wrid); - kfree(hr_qp->rq.wrid); - hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf); - if (hr_qp->rq.wqe_cnt) - hns_roce_free_db(hr_dev, &hr_qp->rdb); - } + rdfx_inc_dealloc_qp_cnt(hr_dev); + rdfx_func_cnt(hr_dev, RDFX_FUNC_DESTROY_QP); + rdfx_release_rdfx_qp(hr_dev, ibqp->qp_num); + hns_roce_inc_rdma_hw_stats(ibqp->device, HW_STATS_QP_DEALLOC); - if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) { - kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list); - kfree(hr_qp->rq_inl_buf.wqe_list); - } + ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, !!ibqp->pd->uobject); + if (ret) + dev_err(hr_dev->dev, "Destroy qp 0x%06lx failed(%d)\n", + hr_qp->qpn, ret); + hns_roce_qp_destroy(hr_dev, hr_qp); return 0; } -static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp) +static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev, + struct hns_roce_qp *hr_qp) { - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); - struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); + struct hns_roce_scc_ctx_clr *scc_cxt_clr; + struct hns_roce_scc_ctx_clr_done *resp; + struct hns_roce_scc_ctx_clr_done *rst; + struct hns_roce_cmq_desc desc; int ret; + int i; - ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, !!ibqp->pd->uobject); - if (ret) { - dev_err(hr_dev->dev, "Destroy qp failed(%d)\n", ret); + /* set scc ctx clear done flag */ + hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCC_CTX, + false); + + rst = (struct hns_roce_scc_ctx_clr_done *)desc.data; + memset(rst, 0, sizeof(*rst)); + roce_set_bit(rst->rocee_scc_ctx_clr_done, + HNS_ROCE_V2_SCC_CTX_DONE_S, + 0); + + ret = hns_roce_cmq_send(hr_dev, &desc, 1); + if (ret) return ret; - } - if (hr_qp->ibqp.qp_type == IB_QPT_GSI) - kfree(hr_to_hr_sqp(hr_qp)); - else - kfree(hr_qp); + /* clear scc context */ + hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_SCC_CTX_CLR, + false); - return 0; + scc_cxt_clr = (struct hns_roce_scc_ctx_clr *)desc.data; + memset(scc_cxt_clr, 0, sizeof(*scc_cxt_clr)); + scc_cxt_clr->rocee_scc_ctx_clr_qpn = cpu_to_le32(hr_qp->qpn); + + ret = hns_roce_cmq_send(hr_dev, &desc, 1); + if (ret) + return ret; + + /* query scc context clear is done or not */ + for (i = 0; i <= HNS_ROCE_CMQ_SCC_CLR_DONE_CNT; i++) { + hns_roce_cmq_setup_basic_desc(&desc, + HNS_ROCE_OPC_QUERY_SCC_CTX, true); + resp = (struct hns_roce_scc_ctx_clr_done *)desc.data; + memset(resp, 0, sizeof(*resp)); + + ret = hns_roce_cmq_send(hr_dev, &desc, 1); + if (ret) + return ret; + + if (resp->rocee_scc_ctx_clr_done) + return 0; + } + + dev_err(hr_dev->dev, "Clear scc ctx failure!"); + return -EINVAL; } static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) @@ -3942,6 +5590,15 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt, V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S, 0); + + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B) { + if (cq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) { + dev_info(hr_dev->dev, "cq_period(%d) reached the upper limit, adjusted to 65.\n", + cq_period); + cq_period = HNS_ROCE_MAX_CQ_PERIOD; + } + cq_period *= HNS_ROCE_CLOCK_ADJUST; + } roce_set_field(cq_context->byte_56_cqe_period_maxcnt, V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S, cq_period); @@ -3954,54 +5611,65 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) HNS_ROCE_CMD_TIMEOUT_MSECS); hns_roce_free_cmd_mailbox(hr_dev, mailbox); if (ret) - dev_err(hr_dev->dev, "MODIFY CQ Failed to cmd mailbox.\n"); + dev_err(hr_dev->dev, "Modify CQ(0x%lx) cmd process error(%d).\n", + hr_cq->cqn, ret); return ret; } -static void hns_roce_set_qps_to_err(struct hns_roce_dev *hr_dev, u32 qpn) -{ - struct hns_roce_qp *hr_qp; - struct ib_qp_attr attr; - int attr_mask; - int ret; - - hr_qp = __hns_roce_qp_lookup(hr_dev, qpn); - if (!hr_qp) { - dev_warn(hr_dev->dev, "no hr_qp can be found!\n"); - return; - } - - if (hr_qp->ibqp.uobject) { - if (hr_qp->sdb_en == 1) { - hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr); - hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr); - } else { - dev_warn(hr_dev->dev, "flush cqe is unsupported in userspace!\n"); - return; - } - } - - attr_mask = IB_QP_STATE; - attr.qp_state = IB_QPS_ERR; - ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr, attr_mask, - hr_qp->state, IB_QPS_ERR); - if (ret) - dev_err(hr_dev->dev, "failed to modify qp %d to err state.\n", - qpn); -} - static void hns_roce_irq_work_handle(struct work_struct *work) { struct hns_roce_work *irq_work = container_of(work, struct hns_roce_work, work); + struct device *dev = irq_work->hr_dev->dev; u32 qpn = irq_work->qpn; + u32 cqn = irq_work->cqn; switch (irq_work->event_type) { + case HNS_ROCE_EVENT_TYPE_PATH_MIG: + dev_info(dev, "Path migrated succeeded.\n"); + break; + case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: + dev_warn(dev, "Path migration failed.\n"); + break; + case HNS_ROCE_EVENT_TYPE_COMM_EST: + dev_info(dev, "Communication established.\n"); + break; + case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: + dev_warn(dev, "Send queue drained.\n"); + break; case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: + dev_err(dev, "Local work queue 0x%x catast error, sub_event type is: %d\n", + qpn, irq_work->sub_type); + break; case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: + dev_err(dev, "Invalid request local work queue 0x%x error.\n", + qpn); + break; case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: - hns_roce_set_qps_to_err(irq_work->hr_dev, qpn); + dev_err(dev, "Local access violation work queue 0x%x error, sub_event type is: %d\n", + qpn, irq_work->sub_type); + break; + case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: + dev_warn(dev, "SRQ limit reach.\n"); + break; + case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: + dev_warn(dev, "SRQ last wqe reach.\n"); + break; + case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR: + dev_err(dev, "SRQ catas error.\n"); + break; + case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: + dev_err(dev, "CQ 0x%x access err.\n", cqn); + break; + case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: + dev_warn(dev, "CQ 0x%x overflow\n", cqn); + break; + case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW: + dev_warn(dev, "DB overflow.\n"); + break; + case HNS_ROCE_EVENT_TYPE_FLR: + dev_warn(dev, "Function level reset.\n"); break; default: break; @@ -4011,7 +5679,8 @@ static void hns_roce_irq_work_handle(struct work_struct *work) } static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev, - struct hns_roce_eq *eq, u32 qpn) + struct hns_roce_eq *eq, + u32 qpn, u32 cqn) { struct hns_roce_work *irq_work; @@ -4022,6 +5691,7 @@ static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev, INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle); irq_work->hr_dev = hr_dev; irq_work->qpn = qpn; + irq_work->cqn = cqn; irq_work->event_type = eq->event_type; irq_work->sub_type = eq->sub_type; queue_work(hr_dev->irq_workq, &(irq_work->work)); @@ -4029,7 +5699,8 @@ static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev, static void set_eq_cons_index_v2(struct hns_roce_eq *eq) { - u32 doorbell[2]; + struct hns_roce_dev *hr_dev = eq->hr_dev; + __le32 doorbell[2]; doorbell[0] = 0; doorbell[1] = 0; @@ -4055,131 +5726,13 @@ static void set_eq_cons_index_v2(struct hns_roce_eq *eq) HNS_ROCE_V2_EQ_DB_PARA_S, (eq->cons_index & HNS_ROCE_V2_CONS_IDX_M)); - hns_roce_write64_k(doorbell, eq->doorbell); + hns_roce_write64(hr_dev, doorbell, eq->doorbell); } -static void hns_roce_v2_wq_catas_err_handle(struct hns_roce_dev *hr_dev, - struct hns_roce_aeqe *aeqe, - u32 qpn) +static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry) { - struct device *dev = hr_dev->dev; - int sub_type; - - dev_warn(dev, "Local work queue catastrophic error.\n"); - sub_type = roce_get_field(aeqe->asyn, HNS_ROCE_V2_AEQE_SUB_TYPE_M, - HNS_ROCE_V2_AEQE_SUB_TYPE_S); - switch (sub_type) { - case HNS_ROCE_LWQCE_QPC_ERROR: - dev_warn(dev, "QP %d, QPC error.\n", qpn); - break; - case HNS_ROCE_LWQCE_MTU_ERROR: - dev_warn(dev, "QP %d, MTU error.\n", qpn); - break; - case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR: - dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn); - break; - case HNS_ROCE_LWQCE_WQE_ADDR_ERROR: - dev_warn(dev, "QP %d, WQE addr error.\n", qpn); - break; - case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR: - dev_warn(dev, "QP %d, WQE shift error.\n", qpn); - break; - default: - dev_err(dev, "Unhandled sub_event type %d.\n", sub_type); - break; - } -} - -static void hns_roce_v2_local_wq_access_err_handle(struct hns_roce_dev *hr_dev, - struct hns_roce_aeqe *aeqe, u32 qpn) -{ - struct device *dev = hr_dev->dev; - int sub_type; - - dev_warn(dev, "Local access violation work queue error.\n"); - sub_type = roce_get_field(aeqe->asyn, HNS_ROCE_V2_AEQE_SUB_TYPE_M, - HNS_ROCE_V2_AEQE_SUB_TYPE_S); - switch (sub_type) { - case HNS_ROCE_LAVWQE_R_KEY_VIOLATION: - dev_warn(dev, "QP %d, R_key violation.\n", qpn); - break; - case HNS_ROCE_LAVWQE_LENGTH_ERROR: - dev_warn(dev, "QP %d, length error.\n", qpn); - break; - case HNS_ROCE_LAVWQE_VA_ERROR: - dev_warn(dev, "QP %d, VA error.\n", qpn); - break; - case HNS_ROCE_LAVWQE_PD_ERROR: - dev_err(dev, "QP %d, PD error.\n", qpn); - break; - case HNS_ROCE_LAVWQE_RW_ACC_ERROR: - dev_warn(dev, "QP %d, rw acc error.\n", qpn); - break; - case HNS_ROCE_LAVWQE_KEY_STATE_ERROR: - dev_warn(dev, "QP %d, key state error.\n", qpn); - break; - case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR: - dev_warn(dev, "QP %d, MR operation error.\n", qpn); - break; - default: - dev_err(dev, "Unhandled sub_event type %d.\n", sub_type); - break; - } -} - -static void hns_roce_v2_qp_err_handle(struct hns_roce_dev *hr_dev, - struct hns_roce_aeqe *aeqe, - int event_type, u32 qpn) -{ - struct device *dev = hr_dev->dev; - - switch (event_type) { - case HNS_ROCE_EVENT_TYPE_COMM_EST: - dev_warn(dev, "Communication established.\n"); - break; - case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: - dev_warn(dev, "Send queue drained.\n"); - break; - case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: - hns_roce_v2_wq_catas_err_handle(hr_dev, aeqe, qpn); - break; - case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: - dev_warn(dev, "Invalid request local work queue error.\n"); - break; - case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: - hns_roce_v2_local_wq_access_err_handle(hr_dev, aeqe, qpn); - break; - default: - break; - } - - hns_roce_qp_event(hr_dev, qpn, event_type); -} - -static void hns_roce_v2_cq_err_handle(struct hns_roce_dev *hr_dev, - struct hns_roce_aeqe *aeqe, - int event_type, u32 cqn) -{ - struct device *dev = hr_dev->dev; - - switch (event_type) { - case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: - dev_warn(dev, "CQ 0x%x access err.\n", cqn); - break; - case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: - dev_warn(dev, "CQ 0x%x overflow\n", cqn); - break; - default: - break; - } - - hns_roce_cq_event(hr_dev, cqn, event_type); -} - -static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry) -{ - u32 buf_chk_sz; - unsigned long off; + u32 buf_chk_sz; + unsigned long off; buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT); off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE; @@ -4190,19 +5743,19 @@ static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry) static struct hns_roce_aeqe *mhop_get_aeqe(struct hns_roce_eq *eq, u32 entry) { - u32 buf_chk_sz; + u32 chk_sz; unsigned long off; - buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT); + chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT); off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE; if (eq->hop_num == HNS_ROCE_HOP_NUM_0) return (struct hns_roce_aeqe *)((u8 *)(eq->bt_l0) + - off % buf_chk_sz); + off % chk_sz); else - return (struct hns_roce_aeqe *)((u8 *) - (eq->buf[off / buf_chk_sz]) + off % buf_chk_sz); + return (struct hns_roce_aeqe *)((u8 *)(eq->buf[off / chk_sz]) + + off % chk_sz); } static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq) @@ -4226,6 +5779,7 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, int aeqe_found = 0; int event_type; int sub_type; + u32 srqn; u32 qpn; u32 cqn; @@ -4248,34 +5802,30 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, cqn = roce_get_field(aeqe->event.cq_event.cq, HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M, HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S); + srqn = roce_get_field(aeqe->event.srq_event.srq, + HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M, + HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S); switch (event_type) { case HNS_ROCE_EVENT_TYPE_PATH_MIG: - dev_warn(dev, "Path migrated succeeded.\n"); - break; case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: - dev_warn(dev, "Path migration failed.\n"); - break; case HNS_ROCE_EVENT_TYPE_COMM_EST: case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: + case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: - hns_roce_v2_qp_err_handle(hr_dev, aeqe, event_type, - qpn); + hns_roce_qp_event(hr_dev, qpn, event_type); break; case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: - case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR: - dev_warn(dev, "SRQ not support.\n"); + hns_roce_srq_event(hr_dev, srqn, event_type); break; case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: - hns_roce_v2_cq_err_handle(hr_dev, aeqe, event_type, - cqn); + hns_roce_cq_event(hr_dev, cqn, event_type); break; case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW: - dev_warn(dev, "DB overflow.\n"); break; case HNS_ROCE_EVENT_TYPE_MB: hns_roce_cmd_event(hr_dev, @@ -4284,10 +5834,8 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, le64_to_cpu(aeqe->event.cmd.out_param)); break; case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW: - dev_warn(dev, "CEQ overflow.\n"); break; case HNS_ROCE_EVENT_TYPE_FLR: - dev_warn(dev, "Function level reset.\n"); break; default: dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n", @@ -4298,13 +5846,13 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, eq->event_type = event_type; eq->sub_type = sub_type; ++eq->cons_index; + hr_dev->dfx_cnt[HNS_ROCE_DFX_AEQE]++; aeqe_found = 1; - if (eq->cons_index > (2 * eq->entries - 1)) { - dev_warn(dev, "cons_index overflow, set back to 0.\n"); + if (eq->cons_index > EQ_DEPTH_COEFF * eq->entries - 1) eq->cons_index = 0; - } - hns_roce_v2_init_irq_work(hr_dev, eq, qpn); + + hns_roce_v2_init_irq_work(hr_dev, eq, qpn, cqn); } set_eq_cons_index_v2(eq); @@ -4356,7 +5904,6 @@ static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq) static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) { - struct device *dev = hr_dev->dev; struct hns_roce_ceqe *ceqe; int ceqe_found = 0; u32 cqn; @@ -4375,12 +5922,11 @@ static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev, hns_roce_cq_completion(hr_dev, cqn); ++eq->cons_index; + hr_dev->dfx_cnt[HNS_ROCE_DFX_CEQE]++; ceqe_found = 1; - if (eq->cons_index > (2 * eq->entries - 1)) { - dev_warn(dev, "cons_index overflow, set back to 0.\n"); + if (eq->cons_index > EQ_DEPTH_COEFF * eq->entries - 1) eq->cons_index = 0; - } } set_eq_cons_index_v2(eq); @@ -4409,41 +5955,58 @@ static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id) struct hns_roce_dev *hr_dev = dev_id; struct device *dev = hr_dev->dev; int int_work = 0; - u32 int_st; - u32 int_en; + __le32 int_st; + __le32 int_en; /* Abnormal interrupt */ - int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG); - int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG); + int_st = cpu_to_le32(roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG)); + int_en = cpu_to_le32(roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG)); if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) { + struct pci_dev *pdev = hr_dev->pci_dev; + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + const struct hnae3_ae_ops *ops = ae_dev->ops; + dev_err(dev, "AEQ overflow!\n"); roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S, 1); - roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st); + roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, + le32_to_cpu(int_st)); + + /* Set reset level for the following reset_event() call */ + if (ops->set_default_reset_request) + ops->set_default_reset_request(ae_dev, HNAE3_FUNC_RESET); + + if (ops->reset_event) + ops->reset_event(pdev, NULL); roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1); - roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en); + roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, + le32_to_cpu(int_en)); int_work = 1; } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S)) { dev_err(dev, "BUS ERR!\n"); roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S, 1); - roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st); + roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, + le32_to_cpu(int_st)); roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1); - roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en); + roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, + le32_to_cpu(int_en)); int_work = 1; } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S)) { dev_err(dev, "OTHER ERR!\n"); roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S, 1); - roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st); + roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, + le32_to_cpu(int_st)); roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1); - roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en); + roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, + le32_to_cpu(int_en)); int_work = 1; } else @@ -4485,16 +6048,20 @@ static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn) struct device *dev = hr_dev->dev; int ret; - if (eqn < hr_dev->caps.num_comp_vectors) + if (eqn < hr_dev->caps.num_comp_vectors) { ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M, 0, HNS_ROCE_CMD_DESTROY_CEQC, HNS_ROCE_CMD_TIMEOUT_MSECS); - else + + rdfx_release_rdfx_ceq(hr_dev, eqn); + } else { ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M, 0, HNS_ROCE_CMD_DESTROY_AEQC, HNS_ROCE_CMD_TIMEOUT_MSECS); + } if (ret) - dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn); + dev_err(dev, "[mailbox cmd] destroy eqc(0x%x) failed(%d).\n", + eqn, ret); } static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev, @@ -4514,14 +6081,12 @@ static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev, buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT); bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT); - /* hop_num = 0 */ if (mhop_num == HNS_ROCE_HOP_NUM_0) { dma_free_coherent(dev, (unsigned int)(eq->entries * eq->eqe_size), eq->bt_l0, eq->l0_dma); return; } - /* hop_num = 1 or hop = 2 */ dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma); if (mhop_num == 1) { for (i = 0; i < eq->l0_last_num; i++) { @@ -4540,8 +6105,8 @@ static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev, dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i], eq->l1_dma[i]); - for (j = 0; j < bt_chk_sz / 8; j++) { - idx = i * (bt_chk_sz / 8) + j; + for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) { + idx = i * (bt_chk_sz / BA_BYTE_LEN) + j; if ((i == eq->l0_last_num - 1) && j == eq->l1_last_num - 1) { eqe_alloc = (buf_chk_sz / eq->eqe_size) @@ -4580,30 +6145,44 @@ static void hns_roce_v2_free_eq(struct hns_roce_dev *hr_dev, return; } - if (eq->buf_list) - dma_free_coherent(hr_dev->dev, buf_chk_sz, - eq->buf_list->buf, eq->buf_list->map); + dma_free_coherent(hr_dev->dev, buf_chk_sz, eq->buf_list->buf, + eq->buf_list->map); + kfree(eq->buf_list); } static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq, - void *mb_buf) + struct hns_roce_eq_context *mb_buf) { struct hns_roce_eq_context *eqc; + unsigned int eq_period = HNS_ROCE_V2_EQ_DEFAULT_INTERVAL; + unsigned int eq_max_cnt = HNS_ROCE_V2_EQ_DEFAULT_BURST_NUM; + unsigned int eq_arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED; eqc = mb_buf; memset(eqc, 0, sizeof(struct hns_roce_eq_context)); + eq->eq_period = eq_period; + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B) { + if (eq->eq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) { + dev_info(hr_dev->dev, "eq_period(%d) reached the upper limit, adjusted to 65.\n", + eq->eq_period); + eq->eq_period = HNS_ROCE_MAX_EQ_PERIOD; + } + eq->eq_period *= HNS_ROCE_CLOCK_ADJUST; + } + /* init eqc */ eq->doorbell = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG; eq->hop_num = hr_dev->caps.eqe_hop_num; eq->cons_index = 0; eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0; eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0; - eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED; eq->eqe_ba_pg_sz = hr_dev->caps.eqe_ba_pg_sz; eq->eqe_buf_pg_sz = hr_dev->caps.eqe_buf_pg_sz; eq->shift = ilog2((unsigned int)eq->entries); + eq->eq_max_cnt = eq_max_cnt; + eq->arm_st = eq_arm_st; if (!eq->hop_num) eq->eqe_ba = eq->buf_list->map; @@ -4757,15 +6336,14 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev, buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT); bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT); - ba_num = (PAGE_ALIGN(eq->entries * eq->eqe_size) + buf_chk_sz - 1) - / buf_chk_sz; - bt_num = (ba_num + bt_chk_sz / 8 - 1) / (bt_chk_sz / 8); + ba_num = DIV_ROUND_UP(PAGE_ALIGN(eq->entries * eq->eqe_size), + buf_chk_sz); + bt_num = DIV_ROUND_UP(ba_num, bt_chk_sz / BA_BYTE_LEN); - /* hop_num = 0 */ if (mhop_num == HNS_ROCE_HOP_NUM_0) { if (eq->entries > buf_chk_sz / eq->eqe_size) { - dev_err(dev, "eq entries %d is larger than buf_pg_sz!", - eq->entries); + dev_err(dev, "eq entries %d is larger than buf_pg_sz %d!", + eq->entries, buf_chk_sz / eq->eqe_size); return -EINVAL; } eq->bt_l0 = dma_alloc_coherent(dev, eq->entries * eq->eqe_size, @@ -4803,13 +6381,15 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev, if (!eq->bt_l0) goto err_dma_alloc_l0; + memset(eq->bt_l0, 0, bt_chk_sz); + if (mhop_num == 1) { - if (ba_num > (bt_chk_sz / 8)) + if (ba_num > (bt_chk_sz / BA_BYTE_LEN)) dev_err(dev, "ba_num %d is too large for 1 hop\n", ba_num); /* alloc buf */ - for (i = 0; i < bt_chk_sz / 8; i++) { + for (i = 0; i < bt_chk_sz / BA_BYTE_LEN; i++) { if (eq_buf_cnt + 1 < ba_num) { size = buf_chk_sz; } else { @@ -4830,20 +6410,24 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev, break; } eq->cur_eqe_ba = eq->buf_dma[0]; - eq->nxt_eqe_ba = eq->buf_dma[1]; + if (ba_num > 1) + eq->nxt_eqe_ba = eq->buf_dma[1]; } else if (mhop_num == 2) { /* alloc L1 BT and buf */ - for (i = 0; i < bt_chk_sz / 8; i++) { + for (i = 0; i < bt_chk_sz / BA_BYTE_LEN; i++) { eq->bt_l1[i] = dma_alloc_coherent(dev, bt_chk_sz, &(eq->l1_dma[i]), GFP_KERNEL); if (!eq->bt_l1[i]) goto err_dma_alloc_l1; + + memset(eq->bt_l1[i], 0, bt_chk_sz); + *(eq->bt_l0 + i) = eq->l1_dma[i]; - for (j = 0; j < bt_chk_sz / 8; j++) { - idx = i * bt_chk_sz / 8 + j; + for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) { + idx = i * (bt_chk_sz / BA_BYTE_LEN) + j; if (eq_buf_cnt + 1 < ba_num) { size = buf_chk_sz; } else { @@ -4872,7 +6456,8 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev, break; } eq->cur_eqe_ba = eq->buf_dma[0]; - eq->nxt_eqe_ba = eq->buf_dma[1]; + if (ba_num > 1) + eq->nxt_eqe_ba = eq->buf_dma[1]; } eq->l0_last_num = i + 1; @@ -4889,8 +6474,8 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev, dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i], eq->l1_dma[i]); - for (j = 0; j < bt_chk_sz / 8; j++) { - idx = i * bt_chk_sz / 8 + j; + for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) { + idx = i * (bt_chk_sz / BA_BYTE_LEN) + j; dma_free_coherent(dev, buf_chk_sz, eq->buf[idx], eq->buf_dma[idx]); } @@ -4913,11 +6498,11 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev, dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i], eq->l1_dma[i]); - for (j = 0; j < bt_chk_sz / 8; j++) { + for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) { if (i == record_i && j >= record_j) break; - idx = i * bt_chk_sz / 8 + j; + idx = i * (bt_chk_sz / BA_BYTE_LEN) + j; dma_free_coherent(dev, buf_chk_sz, eq->buf[idx], eq->buf_dma[idx]); @@ -4985,12 +6570,16 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev, } } - hns_roce_config_eqc(hr_dev, eq, mailbox->buf); + rdfx_alloc_rdfx_ceq(hr_dev, eq->eqn, eq_cmd); + + hns_roce_config_eqc(hr_dev, eq, + (struct hns_roce_eq_context *)mailbox->buf); ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0, eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS); if (ret) { - dev_err(dev, "[mailbox cmd] create eqc failed.\n"); + dev_err(dev, "[mailbox cmd] create eqc(0x%x) failed(%d).\n", + eq->eqn, ret); goto err_cmd_mbox; } @@ -5016,18 +6605,108 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev, return ret; } +static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num, + int comp_num, int aeq_num, int other_num) +{ + struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; + int i, j; + int ret; + + for (i = 0; i < irq_num; i++) { + hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN, + GFP_KERNEL); + if (!hr_dev->irq_names[i]) { + ret = -ENOMEM; + goto err_kzalloc_failed; + } + } + + /* irq contains: abnormal + AEQ + CEQ */ + for (j = 0; j < irq_num; j++) + if (j < other_num) + snprintf((char *)hr_dev->irq_names[j], + HNS_ROCE_INT_NAME_LEN, "hns-abn-%d", j); + else if (j < (other_num + aeq_num)) + snprintf((char *)hr_dev->irq_names[j], + HNS_ROCE_INT_NAME_LEN, "hns-aeq-%d", + j - other_num); + else + snprintf((char *)hr_dev->irq_names[j], + HNS_ROCE_INT_NAME_LEN, "hns-ceq-%d", + j - other_num - aeq_num); + + for (j = 0; j < irq_num; j++) { + if (j < other_num) + ret = request_irq(hr_dev->irq[j], + hns_roce_v2_msix_interrupt_abn, + 0, hr_dev->irq_names[j], hr_dev); + + else if (j < (other_num + comp_num)) + ret = request_irq(eq_table->eq[j - other_num].irq, + hns_roce_v2_msix_interrupt_eq, + 0, hr_dev->irq_names[j + aeq_num], + &eq_table->eq[j - other_num]); + else + ret = request_irq(eq_table->eq[j - other_num].irq, + hns_roce_v2_msix_interrupt_eq, + 0, hr_dev->irq_names[j - comp_num], + &eq_table->eq[j - other_num]); + if (ret) { + dev_err(hr_dev->dev, "Request irq error(%d)\n", ret); + goto err_request_failed; + } + } + + return 0; + +err_request_failed: + for (j -= 1; j >= 0; j--) + if (j < other_num) + free_irq(hr_dev->irq[j], hr_dev); + else + free_irq(eq_table->eq[j - other_num].irq, + &eq_table->eq[j - other_num]); + +err_kzalloc_failed: + for (i -= 1; i >= 0; i--) + kfree(hr_dev->irq_names[i]); + + return ret; +} + +static void __hns_roce_free_irq(struct hns_roce_dev *hr_dev) +{ + int irq_num; + int eq_num; + int i; + + eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors; + irq_num = eq_num + hr_dev->caps.num_other_vectors; + + for (i = 0; i < hr_dev->caps.num_other_vectors; i++) + free_irq(hr_dev->irq[i], hr_dev); + + for (i = 0; i < eq_num; i++) + free_irq(hr_dev->eq_table.eq[i].irq, &hr_dev->eq_table.eq[i]); + + for (i = 0; i < irq_num; i++) + kfree(hr_dev->irq_names[i]); +} + static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) { struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; struct device *dev = hr_dev->dev; struct hns_roce_eq *eq; + static int dev_id = 0; + char queue_name[30]; unsigned int eq_cmd; int irq_num; int eq_num; int other_num; int comp_num; int aeq_num; - int i, j, k; + int i; int ret; other_num = hr_dev->caps.num_other_vectors; @@ -5041,27 +6720,18 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) if (!eq_table->eq) return -ENOMEM; - for (i = 0; i < irq_num; i++) { - hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN, - GFP_KERNEL); - if (!hr_dev->irq_names[i]) { - ret = -ENOMEM; - goto err_failed_kzalloc; - } - } - /* create eq */ - for (j = 0; j < eq_num; j++) { - eq = &eq_table->eq[j]; + for (i = 0; i < eq_num; i++) { + eq = &eq_table->eq[i]; eq->hr_dev = hr_dev; - eq->eqn = j; - if (j < comp_num) { + eq->eqn = i; + if (i < comp_num) { /* CEQ */ eq_cmd = HNS_ROCE_CMD_CREATE_CEQC; eq->type_flag = HNS_ROCE_CEQ; eq->entries = hr_dev->caps.ceqe_depth; eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE; - eq->irq = hr_dev->irq[j + other_num + aeq_num]; + eq->irq = hr_dev->irq[i + other_num + aeq_num]; eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM; eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL; } else { @@ -5070,14 +6740,15 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) eq->type_flag = HNS_ROCE_AEQ; eq->entries = hr_dev->caps.aeqe_depth; eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE; - eq->irq = hr_dev->irq[j - comp_num + other_num]; + eq->irq = hr_dev->irq[i - comp_num + other_num]; eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM; eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL; } ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd); if (ret) { - dev_err(dev, "eq create failed.\n"); + dev_err(dev, "eq(0x%x) create failed(%d).\n", eq->eqn, + ret); goto err_create_eq_fail; } } @@ -5085,66 +6756,34 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) /* enable irq */ hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE); - /* irq contains: abnormal + AEQ + CEQ*/ - for (k = 0; k < irq_num; k++) - if (k < other_num) - snprintf((char *)hr_dev->irq_names[k], - HNS_ROCE_INT_NAME_LEN, "hns-abn-%d", k); - else if (k < (other_num + aeq_num)) - snprintf((char *)hr_dev->irq_names[k], - HNS_ROCE_INT_NAME_LEN, "hns-aeq-%d", - k - other_num); - else - snprintf((char *)hr_dev->irq_names[k], - HNS_ROCE_INT_NAME_LEN, "hns-ceq-%d", - k - other_num - aeq_num); - - for (k = 0; k < irq_num; k++) { - if (k < other_num) - ret = request_irq(hr_dev->irq[k], - hns_roce_v2_msix_interrupt_abn, - 0, hr_dev->irq_names[k], hr_dev); - - else if (k < (other_num + comp_num)) - ret = request_irq(eq_table->eq[k - other_num].irq, - hns_roce_v2_msix_interrupt_eq, - 0, hr_dev->irq_names[k + aeq_num], - &eq_table->eq[k - other_num]); - else - ret = request_irq(eq_table->eq[k - other_num].irq, - hns_roce_v2_msix_interrupt_eq, - 0, hr_dev->irq_names[k - comp_num], - &eq_table->eq[k - other_num]); - if (ret) { - dev_err(dev, "Request irq error!\n"); - goto err_request_irq_fail; - } + ret = __hns_roce_request_irq(hr_dev, irq_num, comp_num, + aeq_num, other_num); + if (ret) { + dev_err(dev, "Request irq failed(%d).\n", ret); + goto err_request_irq_fail; } + snprintf(queue_name, 29, "hns_roce_%d_irq_wq", dev_id); + dev_id++; hr_dev->irq_workq = - create_singlethread_workqueue("hns_roce_irq_workqueue"); + create_singlethread_workqueue(queue_name); if (!hr_dev->irq_workq) { dev_err(dev, "Create irq workqueue failed!\n"); - goto err_request_irq_fail; + ret = -ENOMEM; + goto err_create_wq_fail; } return 0; +err_create_wq_fail: + __hns_roce_free_irq(hr_dev); + err_request_irq_fail: - for (k -= 1; k >= 0; k--) - if (k < other_num) - free_irq(hr_dev->irq[k], hr_dev); - else - free_irq(eq_table->eq[k - other_num].irq, - &eq_table->eq[k - other_num]); + hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE); err_create_eq_fail: - for (j -= 1; j >= 0; j--) - hns_roce_v2_free_eq(hr_dev, &eq_table->eq[j]); - -err_failed_kzalloc: for (i -= 1; i >= 0; i--) - kfree(hr_dev->irq_names[i]); + hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]); kfree(eq_table->eq); return ret; @@ -5163,26 +6802,374 @@ static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev) /* Disable irq */ hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE); - for (i = 0; i < hr_dev->caps.num_other_vectors; i++) - free_irq(hr_dev->irq[i], hr_dev); + __hns_roce_free_irq(hr_dev); for (i = 0; i < eq_num; i++) { hns_roce_v2_destroy_eqc(hr_dev, i); - free_irq(eq_table->eq[i].irq, &eq_table->eq[i]); - hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]); } - for (i = 0; i < irq_num; i++) - kfree(hr_dev->irq_names[i]); - kfree(eq_table->eq); flush_workqueue(hr_dev->irq_workq); destroy_workqueue(hr_dev->irq_workq); } +static int hns_roce_v2_create_workq(struct hns_roce_dev *hr_dev) +{ + char workq_name[HNS_ROCE_WORKQ_NAME_LEN]; + struct device *dev = hr_dev->dev; + + snprintf(workq_name, HNS_ROCE_WORKQ_NAME_LEN - 1, "%s_flush_wq", + hr_dev->ib_dev.name); + + hr_dev->flush_workq = create_singlethread_workqueue(workq_name); + if (!hr_dev->flush_workq) { + dev_err(dev, "Failed to create flush workqueue!\n"); + return -ENOMEM; + } + + return 0; +} + +static void hns_roce_v2_destroy_workq(struct hns_roce_dev *hr_dev) +{ + flush_workqueue(hr_dev->flush_workq); + destroy_workqueue(hr_dev->flush_workq); +} + +static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev, + struct hns_roce_srq *srq, u32 pdn, u16 xrcd, + u32 cqn, void *mb_buf, u64 *mtts_wqe, + u64 *mtts_idx, dma_addr_t dma_handle_wqe, + dma_addr_t dma_handle_idx) +{ + struct hns_roce_srq_context *srq_context; + + srq_context = mb_buf; + memset(srq_context, 0, sizeof(*srq_context)); + + roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQ_ST_M, + SRQC_BYTE_4_SRQ_ST_S, 1); + + roce_set_field(srq_context->byte_4_srqn_srqst, + SRQC_BYTE_4_SRQ_WQE_HOP_NUM_M, + SRQC_BYTE_4_SRQ_WQE_HOP_NUM_S, + (hr_dev->caps.srqwqe_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : + hr_dev->caps.srqwqe_hop_num)); + roce_set_field(srq_context->byte_4_srqn_srqst, + SRQC_BYTE_4_SRQ_SHIFT_M, SRQC_BYTE_4_SRQ_SHIFT_S, + ilog2(srq->max)); + + roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQN_M, + SRQC_BYTE_4_SRQN_S, srq->srqn); + + roce_set_field(srq_context->byte_8_limit_wl, SRQC_BYTE_8_SRQ_LIMIT_WL_M, + SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0); + + roce_set_field(srq_context->byte_12_xrcd, SRQC_BYTE_12_SRQ_XRCD_M, + SRQC_BYTE_12_SRQ_XRCD_S, xrcd); + + srq_context->wqe_bt_ba = cpu_to_le32((u32)(dma_handle_wqe >> 3)); + + roce_set_field(srq_context->byte_24_wqe_bt_ba, + SRQC_BYTE_24_SRQ_WQE_BT_BA_M, + SRQC_BYTE_24_SRQ_WQE_BT_BA_S, + dma_handle_wqe >> 35); + + roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_PD_M, + SRQC_BYTE_28_PD_S, pdn); + roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_RQWS_M, + SRQC_BYTE_28_RQWS_S, srq->max_gs <= 0 ? 0 : + fls(srq->max_gs - 1)); + + srq_context->idx_bt_ba = cpu_to_le32(dma_handle_idx >> 3); + roce_set_field(srq_context->rsv_idx_bt_ba, + SRQC_BYTE_36_SRQ_IDX_BT_BA_M, + SRQC_BYTE_36_SRQ_IDX_BT_BA_S, + dma_handle_idx >> 35); + + srq_context->idx_cur_blk_addr = cpu_to_le32(mtts_idx[0] >> + PAGE_ADDR_SHIFT); + roce_set_field(srq_context->byte_44_idxbufpgsz_addr, + SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_M, + SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_S, + (mtts_idx[0]) >> (32 + PAGE_ADDR_SHIFT)); + roce_set_field(srq_context->byte_44_idxbufpgsz_addr, + SRQC_BYTE_44_SRQ_IDX_HOP_NUM_M, + SRQC_BYTE_44_SRQ_IDX_HOP_NUM_S, + hr_dev->caps.idx_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : + hr_dev->caps.idx_hop_num); + + roce_set_field(srq_context->byte_44_idxbufpgsz_addr, + SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M, + SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S, + hr_dev->caps.idx_ba_pg_sz + PG_SHIFT_OFFSET); + roce_set_field(srq_context->byte_44_idxbufpgsz_addr, + SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M, + SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S, + hr_dev->caps.idx_buf_pg_sz + PG_SHIFT_OFFSET); + + srq_context->idx_nxt_blk_addr = cpu_to_le32(mtts_idx[1] >> + PAGE_ADDR_SHIFT); + roce_set_field(srq_context->rsv_idxnxtblkaddr, + SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_M, + SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_S, + (mtts_idx[1]) >> (32 + PAGE_ADDR_SHIFT)); + roce_set_field(srq_context->byte_56_xrc_cqn, + SRQC_BYTE_56_SRQ_XRC_CQN_M, SRQC_BYTE_56_SRQ_XRC_CQN_S, + cqn); + roce_set_field(srq_context->byte_56_xrc_cqn, + SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_M, + SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_S, + hr_dev->caps.srqwqe_ba_pg_sz + PG_SHIFT_OFFSET); + roce_set_field(srq_context->byte_56_xrc_cqn, + SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_M, + SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_S, + hr_dev->caps.srqwqe_buf_pg_sz + PG_SHIFT_OFFSET); + + roce_set_bit(srq_context->db_record_addr_record_en, + SRQC_BYTE_60_SRQ_RECORD_EN_S, 0); +} + +static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq, + struct ib_srq_attr *srq_attr, + enum ib_srq_attr_mask srq_attr_mask, + struct ib_udata *udata) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device); + struct hns_roce_srq *srq = to_hr_srq(ibsrq); + struct hns_roce_srq_context *srq_context; + struct hns_roce_srq_context *srqc_mask; + struct hns_roce_cmd_mailbox *mailbox; + int ret; + + /* Resizing SRQs is not supported yet */ + if (srq_attr_mask & IB_SRQ_MAX_WR) + return -EINVAL; + + if (srq_attr_mask & IB_SRQ_LIMIT) { + if (srq_attr->srq_limit >= srq->max) { + dev_err(hr_dev->dev, + "Modify SRQ failed: limit(%d) larger than max wr num(%d).\n", + srq_attr->srq_limit, srq->max); + return -EINVAL; + } + mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + srq_context = mailbox->buf; + srqc_mask = (struct hns_roce_srq_context *)mailbox->buf + 1; + + memset(srqc_mask, 0xff, sizeof(*srqc_mask)); + + roce_set_field(srq_context->byte_8_limit_wl, + SRQC_BYTE_8_SRQ_LIMIT_WL_M, + SRQC_BYTE_8_SRQ_LIMIT_WL_S, srq_attr->srq_limit); + roce_set_field(srqc_mask->byte_8_limit_wl, + SRQC_BYTE_8_SRQ_LIMIT_WL_M, + SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0); + + ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, srq->srqn, 0, + HNS_ROCE_CMD_MODIFY_SRQC, + HNS_ROCE_CMD_TIMEOUT_MSECS); + hns_roce_free_cmd_mailbox(hr_dev, mailbox); + if (ret) { + dev_err(hr_dev->dev, + "MODIFY SRQ(0x%lx) cmd process error(%d).\n", + srq->srqn, ret); + return ret; + } + } + + return 0; +} + +int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device); + struct hns_roce_srq *srq = to_hr_srq(ibsrq); + struct hns_roce_srq_context *srq_context; + struct hns_roce_cmd_mailbox *mailbox; + int limit_wl; + int ret; + + mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + srq_context = mailbox->buf; + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srq->srqn, 0, + HNS_ROCE_CMD_QUERY_SRQC, + HNS_ROCE_CMD_TIMEOUT_MSECS); + if (ret) { + dev_err(hr_dev->dev, "QUERY SRQ(0x%lx) cmd process error(%d).\n", + srq->srqn, ret); + goto out; + } + + limit_wl = roce_get_field(srq_context->byte_8_limit_wl, + SRQC_BYTE_8_SRQ_LIMIT_WL_M, + SRQC_BYTE_8_SRQ_LIMIT_WL_S); + + attr->srq_limit = limit_wl; + attr->max_wr = srq->max - 1; + attr->max_sge = srq->max_gs - srq->rsv_sge; + + memcpy(srq_context, mailbox->buf, sizeof(*srq_context)); + +out: + hns_roce_free_cmd_mailbox(hr_dev, mailbox); + return ret; +} + +int hns_roce_srqwq_overflow(struct hns_roce_srq *srq, int nreq) +{ + struct hns_roce_idx_que *idx_que = &srq->idx_que; + unsigned int cur; + + cur = idx_que->head - idx_que->tail; + return cur + nreq >= srq->max - 1; +} + +static int find_empty_entry(struct hns_roce_idx_que *idx_que, + unsigned long size) +{ + int wqe_idx; + + if (unlikely(bitmap_full(idx_que->bitmap, size))) + return -ENOSPC; + + wqe_idx = find_first_zero_bit(idx_que->bitmap, size); + + bitmap_set(idx_que->bitmap, wqe_idx, 1); + + return wqe_idx; +} + +static void fill_idx_queue(struct hns_roce_idx_que *idx_que, + int cur_idx, int wqe_idx) +{ + unsigned int *addr; + + addr = (unsigned int *)hns_roce_buf_offset(idx_que->idx_buf, + cur_idx * idx_que->entry_sz); + *addr = wqe_idx; +} + +#ifdef CONFIG_KERNEL_419 +static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq, + const struct ib_recv_wr *wr, + const struct ib_recv_wr **bad_wr) +#else +static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq, + struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr) +#endif +{ + struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device); + struct hns_roce_srq *srq = to_hr_srq(ibsrq); + struct hns_roce_v2_wqe_data_seg *dseg; + struct hns_roce_v2_db srq_db; + unsigned long flags; + int ret = 0; + int wqe_idx; + u32 max_sge; + void *wqe; + int nreq; + int ind; + int i; + + spin_lock_irqsave(&srq->lock, flags); + + ind = srq->idx_que.head & (srq->max - 1); + max_sge = srq->max_gs - srq->rsv_sge; + for (nreq = 0; wr; ++nreq, wr = wr->next) { + if (unlikely(wr->num_sge > max_sge)) { + dev_err(hr_dev->dev, + "srq(0x%lx) wr sge num(%d) exceed the max num %d.\n", + srq->srqn, wr->num_sge, max_sge); + ret = -EINVAL; + *bad_wr = wr; + break; + } + + if (unlikely(hns_roce_srqwq_overflow(srq, nreq))) { + dev_err(hr_dev->dev, "srq(0x%lx) head equals tail\n", + srq->srqn); + ret = -ENOMEM; + *bad_wr = wr; + break; + } + + wqe_idx = find_empty_entry(&srq->idx_que, srq->max); + if (wqe_idx < 0) { + ret = -ENOMEM; + *bad_wr = wr; + break; + } + + fill_idx_queue(&srq->idx_que, ind, wqe_idx); + wqe = get_srq_wqe(srq, wqe_idx); + dseg = (struct hns_roce_v2_wqe_data_seg *)wqe; + + for (i = 0; i < wr->num_sge; ++i) { + dseg[i].len = cpu_to_le32(wr->sg_list[i].length); + dseg[i].lkey = cpu_to_le32(wr->sg_list[i].lkey); + dseg[i].addr = cpu_to_le64(wr->sg_list[i].addr); + } + + if (srq->rsv_sge) { + dseg[i].len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH); + dseg[i].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY); + dseg[i].addr = 0; + } + + srq->wrid[wqe_idx] = wr->wr_id; + ind = (ind + 1) & (srq->max - 1); + } + + if (likely(nreq)) { + srq->idx_que.head += nreq; + + /* + * Make sure that descriptors are written before + * doorbell record. + */ + wmb(); + + srq_db.byte_4 = cpu_to_le32(HNS_ROCE_V2_SRQ_DB << + V2_DB_BYTE_4_CMD_S | + (srq->srqn & V2_DB_BYTE_4_TAG_M)); + srq_db.parameter = cpu_to_le32(srq->idx_que.head); + + hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l); + + } + + spin_unlock_irqrestore(&srq->lock, flags); + + return ret; +} + +static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = { + .query_cqc_info = hns_roce_v2_query_cqc_info, + .query_qpc_info = hns_roce_v2_query_qpc_info, + .query_mpt_info = hns_roce_v2_query_mpt_info, + .query_cqc_stat = hns_roce_v2_query_cqc_stat, + .query_cmd_stat = hns_roce_v2_query_cmd_stat, + .query_ceqc_stat = hns_roce_v2_query_ceqc_stat, + .query_aeqc_stat = hns_roce_v2_query_aeqc_stat, + .query_qpc_stat = hns_roce_v2_query_qpc_stat, + .query_srqc_stat = hns_roce_v2_query_srqc_stat, + .query_mpt_stat = hns_roce_v2_query_mpt_stat, + .query_pkt_stat = hns_roce_v2_query_pkt_stat, + .modify_eq = hns_roce_v2_modify_eq, + +}; + static const struct hns_roce_hw hns_roce_hw_v2 = { .cmq_init = hns_roce_v2_cmq_init, .cmq_exit = hns_roce_v2_cmq_exit, @@ -5191,16 +7178,20 @@ static const struct hns_roce_hw hns_roce_hw_v2 = { .hw_exit = hns_roce_v2_exit, .post_mbox = hns_roce_v2_post_mbox, .chk_mbox = hns_roce_v2_chk_mbox, + .rst_prc_mbox = hns_roce_v2_rst_process_cmd, .set_gid = hns_roce_v2_set_gid, .set_mac = hns_roce_v2_set_mac, .write_mtpt = hns_roce_v2_write_mtpt, .rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt, + .frmr_write_mtpt = hns_roce_v2_frmr_write_mtpt, + .mw_write_mtpt = hns_roce_v2_mw_write_mtpt, .write_cqc = hns_roce_v2_write_cqc, .set_hem = hns_roce_v2_set_hem, .clear_hem = hns_roce_v2_clear_hem, .modify_qp = hns_roce_v2_modify_qp, .query_qp = hns_roce_v2_query_qp, .destroy_qp = hns_roce_v2_destroy_qp, + .qp_flow_control_init = hns_roce_v2_qp_flow_control_init, .modify_cq = hns_roce_v2_modify_cq, .post_send = hns_roce_v2_post_send, .post_recv = hns_roce_v2_post_recv, @@ -5208,6 +7199,12 @@ static const struct hns_roce_hw hns_roce_hw_v2 = { .poll_cq = hns_roce_v2_poll_cq, .init_eq = hns_roce_v2_init_eq_table, .cleanup_eq = hns_roce_v2_cleanup_eq_table, + .create_workq = hns_roce_v2_create_workq, + .destroy_workq = hns_roce_v2_destroy_workq, + .write_srqc = hns_roce_v2_write_srqc, + .modify_srq = hns_roce_v2_modify_srq, + .query_srq = hns_roce_v2_query_srq, + .post_srq_recv = hns_roce_v2_post_srq_recv, }; static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = { @@ -5225,16 +7222,11 @@ MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl); static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev, struct hnae3_handle *handle) { - const struct pci_device_id *id; + struct hns_roce_v2_priv *priv = hr_dev->priv; int i; - id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev); - if (!id) { - dev_err(hr_dev->dev, "device is not compatible!\n"); - return -ENXIO; - } - hr_dev->hw = &hns_roce_hw_v2; + hr_dev->dfx = &hns_roce_dfx_hw_v2; hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG; hr_dev->odb_offset = hr_dev->sdb_offset; @@ -5252,13 +7244,16 @@ static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev, i + handle->rinfo.base_vector); /* cmd issue mode: 0 is poll, 1 is event */ - hr_dev->cmd_mod = 1; - hr_dev->loop_idc = 0; + hr_dev->cmd_mod = 0; + hr_dev->loop_idc = loopback; + + hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle); + priv->handle = handle; return 0; } -static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle) +static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle) { struct hns_roce_dev *hr_dev; int ret; @@ -5275,20 +7270,21 @@ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle) hr_dev->pci_dev = handle->pdev; hr_dev->dev = &handle->pdev->dev; - handle->priv = hr_dev; ret = hns_roce_hw_v2_get_cfg(hr_dev, handle); if (ret) { - dev_err(hr_dev->dev, "Get Configuration failed!\n"); + dev_err(hr_dev->dev, "Get Configuration failed(%d)!\n", ret); goto error_failed_get_cfg; } ret = hns_roce_init(hr_dev); if (ret) { - dev_err(hr_dev->dev, "RoCE Engine init failed!\n"); + dev_err(hr_dev->dev, "RoCE Engine init failed(%d)!\n", ret); goto error_failed_get_cfg; } + handle->priv = hr_dev; + return 0; error_failed_get_cfg: @@ -5300,7 +7296,7 @@ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle) return ret; } -static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle, +static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle, bool reset) { struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv; @@ -5308,29 +7304,167 @@ static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle, if (!hr_dev) return; + handle->priv = NULL; + + hr_dev->state = HNS_ROCE_DEVICE_STATE_UNINIT; + hns_roce_handle_device_err(hr_dev); + hns_roce_exit(hr_dev); kfree(hr_dev->priv); ib_dealloc_device(&hr_dev->ib_dev); } +static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle) +{ + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + const struct pci_device_id *id; + struct hns_roce_dev *hr_dev; + unsigned long end; + int ret; + + handle->rinfo.instance_state = HNS_ROCE_STATE_INIT; + + if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle)) { + handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT; + goto head_chk_err; + } + + id = pci_match_id(hns_roce_hw_v2_pci_tbl, handle->pdev); + if (!id) + return 0; + + ret = __hns_roce_hw_v2_init_instance(handle); + if (ret) { + handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT; + dev_err(&handle->pdev->dev, + "RoCE instance init failed! ret = %d\n", ret); + if (ops->ae_dev_resetting(handle) || + ops->get_hw_reset_stat(handle)) + goto head_chk_err; + else + return ret; + } + + handle->rinfo.instance_state = HNS_ROCE_STATE_INITED; + + hr_dev = (struct hns_roce_dev *)handle->priv; + if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle) || + hr_dev->reset_cnt != ops->ae_dev_reset_cnt(handle)) { + handle->rinfo.instance_state = HNS_ROCE_STATE_INIT; + goto tail_chk_err; + } + + return 0; + +tail_chk_err: + /* Wait until software reset process finished, in order to ensure that + * reset process and this function will not call + * __hns_roce_hw_v2_uninit_instance at the same time. + * If a timeout occurs, it indicates that the network subsystem has + * encountered a serious error and cannot be recovered from the reset + * processing. + */ + end = HNS_ROCE_V2_RST_PRC_MAX_TIME; + while (ops->ae_dev_resetting(handle) && end) { + msleep(20); + end -= 20; + } + + if (!ops->ae_dev_resetting(handle)) + dev_info(&handle->pdev->dev, "Device completed reset.\n"); + else { + dev_warn(&handle->pdev->dev, + "Device is still resetting! timeout!\n"); + WARN_ON(1); + } + + __hns_roce_hw_v2_uninit_instance(handle, false); + handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT; + +head_chk_err: + dev_err(&handle->pdev->dev, "Device is busy in resetting state.\n" + "please retry later.\n"); + + return -EBUSY; +} + +static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle, + bool reset) +{ + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + unsigned long end; + + if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) + return; + + handle->rinfo.instance_state = HNS_ROCE_STATE_UNINIT; + + /* Check the status of the current software reset process, if in + * software reset process, wait until software reset process finished, + * in order to ensure that reset process and this function will not call + * __hns_roce_hw_v2_uninit_instance at the same time. + * If a timeout occurs, it indicates that the network subsystem has + * encountered a serious error and cannot be recovered from the reset + * processing. + */ + if (ops->ae_dev_resetting(handle)) { + dev_warn(&handle->pdev->dev, + "Device is busy in resetting state. waiting.\n"); + end = HNS_ROCE_V2_RST_PRC_MAX_TIME; + while (ops->ae_dev_resetting(handle) && + end) { + msleep(20); + end -= 20; + } + + if (!ops->ae_dev_resetting(handle)) + dev_info(&handle->pdev->dev, + "Device completed reset.\n"); + else { + dev_warn(&handle->pdev->dev, + "Device is still resetting! timeout!\n"); + WARN_ON(1); + } + } + + __hns_roce_hw_v2_uninit_instance(handle, reset); + + handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT; +} + +static void hns_roce_v2_reset_notify_user(struct hns_roce_dev *hr_dev) +{ + struct hns_roce_v2_reset_state *state; + + state = (struct hns_roce_v2_reset_state *)hr_dev->reset_page; + + state->reset_state = HNS_ROCE_IS_RESETTING; + + /* Ensure reset state was flushed in memory */ + wmb(); +} + static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle) { struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv; - struct ib_event event; - if (!hr_dev) { - dev_err(&handle->pdev->dev, - "Input parameter handle->priv is NULL!\n"); - return -EINVAL; + if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) { + set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state); + return 0; } - hr_dev->active = false; + handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN; + clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state); + + if (!hr_dev) + return 0; + hr_dev->is_reset = true; + hr_dev->active = false; + hr_dev->dis_db = true; + hns_roce_v2_reset_notify_user(hr_dev); - event.event = IB_EVENT_DEVICE_FATAL; - event.device = &hr_dev->ib_dev; - event.element.port_num = 1; - ib_dispatch_event(&event); + hr_dev->state = HNS_ROCE_DEVICE_STATE_RST_DOWN; return 0; } @@ -5339,7 +7473,16 @@ static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle) { int ret; - ret = hns_roce_hw_v2_init_instance(handle); + if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state)) { + clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state); + handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED; + return 0; + } + + handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INIT; + + dev_info(&handle->pdev->dev, "In reset process RoCE client reinit.\n"); + ret = __hns_roce_hw_v2_init_instance(handle); if (ret) { /* when reset notify type is HNAE3_INIT_CLIENT In reset notify * callback function, RoCE Engine reinitialize. If RoCE reinit @@ -5348,6 +7491,10 @@ static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle) handle->priv = NULL; dev_err(&handle->pdev->dev, "In reset process RoCE reinit failed %d.\n", ret); + } else { + handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED; + dev_info(&handle->pdev->dev, + "Reset done, RoCE client reinit finished.\n"); } return ret; @@ -5355,8 +7502,13 @@ static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle) static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle) { + if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state)) + return 0; + handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT; + msleep(100); - hns_roce_hw_v2_uninit_instance(handle, false); + __hns_roce_hw_v2_uninit_instance(handle, false); + return 0; } @@ -5382,9 +7534,43 @@ static int hns_roce_hw_v2_reset_notify(struct hnae3_handle *handle, return ret; } +static void hns_roce_hw_v2_link_status_change(struct hnae3_handle *handle, + bool linkup) +{ + struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv; + struct net_device *netdev = handle->rinfo.netdev; + struct ib_event event; + unsigned long flags; + u8 phy_port; + + if (linkup || !hr_dev) + return; + + for (phy_port = 0; phy_port < hr_dev->caps.num_ports; phy_port++) + if (netdev == hr_dev->iboe.netdevs[phy_port]) + break; + + if (phy_port == hr_dev->caps.num_ports) + return; + + spin_lock_irqsave(&hr_dev->iboe.lock, flags); + if (hr_dev->iboe.last_port_state[phy_port] == IB_PORT_DOWN) { + spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); + return; + } + hr_dev->iboe.last_port_state[phy_port] = IB_PORT_DOWN; + spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); + + event.device = &hr_dev->ib_dev; + event.element.port_num = to_rdma_port_num(phy_port); + event.event = IB_EVENT_PORT_ERR; + ib_dispatch_event(&event); +} + static const struct hnae3_client_ops hns_roce_hw_v2_ops = { .init_instance = hns_roce_hw_v2_init_instance, .uninit_instance = hns_roce_hw_v2_uninit_instance, + .link_status_change = hns_roce_hw_v2_link_status_change, .reset_notify = hns_roce_hw_v2_reset_notify, }; @@ -5408,7 +7594,11 @@ module_init(hns_roce_hw_v2_init); module_exit(hns_roce_hw_v2_exit); MODULE_LICENSE("Dual BSD/GPL"); -MODULE_AUTHOR("Wei Hu "); -MODULE_AUTHOR("Lijun Ou "); -MODULE_AUTHOR("Shaobo Xu "); +MODULE_AUTHOR("Huawei Tech. Co., Ltd."); MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver"); +module_param(loopback, int, 0444); +MODULE_PARM_DESC(loopback, "default: 0"); +module_param(qp_lock, bool, 0444); +MODULE_PARM_DESC(qp_lock, "default: true"); +module_param(cq_lock, bool, 0444); +MODULE_PARM_DESC(cq_lock, "default: true"); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index 14aa308befef913d6c62d6c21c345f7c45ccbc87..fe030f20ef39438ffb1e1a2340244128a55912f8 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -35,7 +35,16 @@ #include +#define HNS_ROCE_CLOCK_ADJUST 1000 +#define HNS_ROCE_MAX_CQ_PERIOD 65 +#define HNS_ROCE_MAX_EQ_PERIOD 65 +#define HNS_ROCE_QP_TIMEOUT_OFFSET 10 + +#define HNS_ROCE_1US_CFG 999 +#define HNS_ROCE_1NS_CFG 0 + #define HNS_ROCE_VF_QPC_BT_NUM 256 +#define HNS_ROCE_VF_SCCC_BT_NUM 64 #define HNS_ROCE_VF_SRQC_BT_NUM 64 #define HNS_ROCE_VF_CQC_BT_NUM 64 #define HNS_ROCE_VF_MPT_BT_NUM 64 @@ -44,12 +53,22 @@ #define HNS_ROCE_VF_SGID_NUM 32 #define HNS_ROCE_VF_SL_NUM 8 -#define HNS_ROCE_V2_MAX_QP_NUM 0x2000 +#define HNS_ROCE_V2_MAX_QP_NUM 0x100000 +#define HNS_ROCE_V2_MAX_QPC_TIMER_NUM 0x200 #define HNS_ROCE_V2_MAX_WQE_NUM 0x8000 -#define HNS_ROCE_V2_MAX_CQ_NUM 0x8000 -#define HNS_ROCE_V2_MAX_CQE_NUM 0x10000 +#define HNS_ROCE_V2_MAX_SRQ 0x100000 +#define HNS_ROCE_V2_MAX_SRQ_WR 0x8000 +#define HNS_ROCE_V2_MAX_SRQ_SGE 0x100 +#define HNS_ROCE_V2_MAX_CQ_NUM 0x100000 +#define HNS_ROCE_V2_MAX_CQC_TIMER_NUM 0x100 +#define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000 +#define HNS_ROCE_V2_MAX_CQE_NUM 0x400000 +#define HNS_ROCE_V2_MAX_SRQWQE_NUM 0x8000 +/* reserve one sge to circumvent a hardware issue */ #define HNS_ROCE_V2_MAX_RQ_SGE_NUM 0x100 #define HNS_ROCE_V2_MAX_SQ_SGE_NUM 0xff +#define HNS_ROCE_V2_MAX_SRQ_SGE_NUM 0x100 +#define HNS_ROCE_V2_MAX_EXTEND_SGE_NUM 0x200000 #define HNS_ROCE_V2_MAX_SQ_INLINE 0x20 #define HNS_ROCE_V2_UAR_NUM 256 #define HNS_ROCE_V2_PHY_UAR_NUM 1 @@ -57,10 +76,13 @@ #define HNS_ROCE_V2_COMP_VEC_NUM 63 #define HNS_ROCE_V2_AEQE_VEC_NUM 1 #define HNS_ROCE_V2_ABNORMAL_VEC_NUM 1 -#define HNS_ROCE_V2_MAX_MTPT_NUM 0x8000 +#define HNS_ROCE_V2_MAX_MTPT_NUM 0x100000 #define HNS_ROCE_V2_MAX_MTT_SEGS 0x1000000 #define HNS_ROCE_V2_MAX_CQE_SEGS 0x1000000 +#define HNS_ROCE_V2_MAX_SRQWQE_SEGS 0x1000000 +#define HNS_ROCE_V2_MAX_IDX_SEGS 0x1000000 #define HNS_ROCE_V2_MAX_PD_NUM 0x1000000 +#define HNS_ROCE_V2_MAX_XRCD_NUM 0x1000000 #define HNS_ROCE_V2_MAX_QP_INIT_RDMA 128 #define HNS_ROCE_V2_MAX_QP_DEST_RDMA 128 #define HNS_ROCE_V2_MAX_SQ_DESC_SZ 64 @@ -69,25 +91,55 @@ #define HNS_ROCE_V2_QPC_ENTRY_SZ 256 #define HNS_ROCE_V2_IRRL_ENTRY_SZ 64 #define HNS_ROCE_V2_TRRL_ENTRY_SZ 48 +#define HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ 100 #define HNS_ROCE_V2_CQC_ENTRY_SZ 64 +#define HNS_ROCE_V2_SRQC_ENTRY_SZ 64 #define HNS_ROCE_V2_MTPT_ENTRY_SZ 64 #define HNS_ROCE_V2_MTT_ENTRY_SZ 64 +#define HNS_ROCE_V2_IDX_ENTRY_SZ 4 #define HNS_ROCE_V2_CQE_ENTRY_SIZE 32 +#define HNS_ROCE_V2_SCC_CTX_ENTRY_SZ 32 +#define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ PAGE_SIZE +#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE #define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000 #define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2 -#define HNS_ROCE_INVALID_LKEY 0x100 +#define HNS_ROCE_INVALID_LKEY 0x0 +#define HNS_ROCE_INVALID_SGE_LENGTH 0x80000000 #define HNS_ROCE_CMQ_TX_TIMEOUT 30000 #define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2 +#define HNS_ROCE_V2_RSV_QPS 8 + +/* Time out for hardware to complete reset */ +#define HNS_ROCE_V2_HW_RST_TIMEOUT 1000 + +#define HNS_ROCE_V2_HW_RST_COMPLETION_WAIT 20 + +/* The longest time for software reset process in NIC subsystem, if a timeout + * occurs, it indicates that the network subsystem has encountered a serious + * error and cannot be recovered from the reset processing. + */ +#define HNS_ROCE_V2_RST_PRC_MAX_TIME 300000 #define HNS_ROCE_CONTEXT_HOP_NUM 1 +#define HNS_ROCE_SCC_CTX_HOP_NUM 1 #define HNS_ROCE_MTT_HOP_NUM 1 #define HNS_ROCE_CQE_HOP_NUM 1 +#define HNS_ROCE_SRQWQE_HOP_NUM 1 +#define HNS_ROCE_SQWQE_HOP_NUM 2 +#define HNS_ROCE_EXT_SGE_HOP_NUM 1 +#define HNS_ROCE_RQWQE_HOP_NUM 2 + +#define HNS_ROCE_BA_PG_SZ_SUPPORTED_256K 6 +#define HNS_ROCE_BA_PG_SZ_SUPPORTED_16K 2 + #define HNS_ROCE_PBL_HOP_NUM 2 #define HNS_ROCE_EQE_HOP_NUM 2 +#define HNS_ROCE_IDX_HOP_NUM 1 +#define HNS_ROCE_MEM_PAGE_SUPPORT_8K 2 -#define HNS_ROCE_V2_GID_INDEX_NUM 256 +#define HNS_ROCE_V2_GID_INDEX_NUM 32 -#define HNS_ROCE_V2_TABLE_CHUNK_SIZE (1 << 18) +#define HNS_ROCE_V2_TABLE_CHUNK_SIZE (1 << 18) #define HNS_ROCE_CMD_FLAG_IN_VALID_SHIFT 0 #define HNS_ROCE_CMD_FLAG_OUT_VALID_SHIFT 1 @@ -104,16 +156,38 @@ #define HNS_ROCE_CMD_FLAG_ERR_INTR BIT(HNS_ROCE_CMD_FLAG_ERR_INTR_SHIFT) #define HNS_ROCE_CMQ_DESC_NUM_S 3 -#define HNS_ROCE_CMQ_EN_B 16 -#define HNS_ROCE_CMQ_ENABLE BIT(HNS_ROCE_CMQ_EN_B) + +#define HNS_ROCE_CMQ_SCC_CLR_DONE_CNT 100 #define check_whether_last_step(hop_num, step_idx) \ ((step_idx == 0 && hop_num == HNS_ROCE_HOP_NUM_0) || \ (step_idx == 1 && hop_num == 1) || \ (step_idx == 2 && hop_num == 2)) -#define CMD_CSQ_DESC_NUM 1024 -#define CMD_CRQ_DESC_NUM 1024 +#define V2_QP_SUPPORT_STATE(cur_state, new_state) \ + ((cur_state == IB_QPS_RTS && new_state == IB_QPS_RTS) || \ + (cur_state == IB_QPS_SQE && new_state == IB_QPS_RTS) || \ + (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD) || \ + (cur_state == IB_QPS_SQD && new_state == IB_QPS_SQD) || \ + (cur_state == IB_QPS_SQD && new_state == IB_QPS_RTS) || \ + (cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) || \ + (cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) || \ + (cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) || \ + (cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) || \ + (cur_state == IB_QPS_SQD && new_state == IB_QPS_RESET) || \ + (cur_state == IB_QPS_SQE && new_state == IB_QPS_RESET) || \ + (cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) || \ + (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) || \ + (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) || \ + (cur_state == IB_QPS_SQD && new_state == IB_QPS_ERR) || \ + (cur_state == IB_QPS_SQE && new_state == IB_QPS_ERR) || \ + (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR)) + +#define HNS_ICL_SWITCH_CMD_ROCEE_SEL_SHIFT 0 +#define HNS_ICL_SWITCH_CMD_ROCEE_SEL BIT(HNS_ICL_SWITCH_CMD_ROCEE_SEL_SHIFT) + +#define CMD_CSQ_DESC_NUM 1024 +#define CMD_CRQ_DESC_NUM 1024 enum { NO_ARMED = 0x0, @@ -208,9 +282,30 @@ enum hns_roce_opcode_type { HNS_ROCE_OPC_ALLOC_VF_RES = 0x8401, HNS_ROCE_OPC_CFG_EXT_LLM = 0x8403, HNS_ROCE_OPC_CFG_TMOUT_LLM = 0x8404, + HNS_ROCE_OPC_QUERY_PF_TIMER_RES = 0x8406, + HNS_ROCE_OPC_QUERY_FUNC_INFO = 0x8407, + HNS_ROCE_OPC_QUERY_PF_CAPS_NUM = 0x8408, HNS_ROCE_OPC_CFG_SGID_TB = 0x8500, HNS_ROCE_OPC_CFG_SMAC_TB = 0x8501, + HNS_ROCE_OPC_POST_MB = 0x8504, + HNS_ROCE_OPC_QUERY_MB_ST = 0x8505, HNS_ROCE_OPC_CFG_BT_ATTR = 0x8506, + HNS_ROCE_OPC_FUNC_CLEAR = 0x8508, + HNS_ROCE_OPC_SCC_CTX_CLR = 0x8509, + HNS_ROCE_OPC_QUERY_SCC_CTX = 0x850a, + HNS_ROCE_OPC_RESET_SCC_CTX = 0x850b, + HNS_ROCE_OPC_CLEAR_EXTDB_LIST_INFO = 0x850d, + HNS_QUERY_FW_VER = 0x0001, + HNS_SWITCH_PARAMETER_CFG = 0x1033, + + /* DFx command */ + HNS_ROCE_OPC_CNT_SNAP = 0x8006, + HNS_ROCE_OPC_QUEYR_PKT_CNT = 0x8200, + HNS_ROCE_OPC_QUEYR_CQE_CNT = 0x8201, + HNS_ROCE_OPC_QUEYR_MBDB_CNT = 0x8202, + HNS_ROCE_OPC_QUEYR_CNP_RX_CNT = 0x8203, + HNS_ROCE_OPC_QUEYR_CNP_TX_CNT = 0x8204, + HNS_ROCE_OPC_QUEYR_MDB_DFX = 0x8300, }; enum { @@ -223,6 +318,7 @@ enum hns_roce_cmd_return_status { CMD_NO_AUTH = 1, CMD_NOT_EXEC = 2, CMD_QUEUE_FULL = 3, + CMD_EXEC_TIMEOUT = 8, }; enum hns_roce_sgid_type { @@ -322,8 +418,93 @@ struct hns_roce_v2_cq_context { #define V2_CQC_BYTE_64_SE_CQE_IDX_S 0 #define V2_CQC_BYTE_64_SE_CQE_IDX_M GENMASK(23, 0) -enum{ +struct hns_roce_srq_context { + __le32 byte_4_srqn_srqst; + __le32 byte_8_limit_wl; + __le32 byte_12_xrcd; + __le32 byte_16_pi_ci; + __le32 wqe_bt_ba; /* Aligned with 8B, so store [:3] */ + __le32 byte_24_wqe_bt_ba; + __le32 byte_28_rqws_pd; + __le32 idx_bt_ba; /* Aligned with 8B, so store [:3] */ + __le32 rsv_idx_bt_ba; + __le32 idx_cur_blk_addr; + __le32 byte_44_idxbufpgsz_addr; + __le32 idx_nxt_blk_addr; + __le32 rsv_idxnxtblkaddr; + __le32 byte_56_xrc_cqn; + __le32 db_record_addr_record_en; + __le32 db_record_addr; +}; + +#define SRQC_BYTE_4_SRQ_ST_S 0 +#define SRQC_BYTE_4_SRQ_ST_M GENMASK(1, 0) + +#define SRQC_BYTE_4_SRQ_WQE_HOP_NUM_S 2 +#define SRQC_BYTE_4_SRQ_WQE_HOP_NUM_M GENMASK(3, 2) + +#define SRQC_BYTE_4_SRQ_SHIFT_S 4 +#define SRQC_BYTE_4_SRQ_SHIFT_M GENMASK(7, 4) + +#define SRQC_BYTE_4_SRQN_S 8 +#define SRQC_BYTE_4_SRQN_M GENMASK(31, 8) + +#define SRQC_BYTE_8_SRQ_LIMIT_WL_S 0 +#define SRQC_BYTE_8_SRQ_LIMIT_WL_M GENMASK(15, 0) + +#define SRQC_BYTE_12_SRQ_XRCD_S 0 +#define SRQC_BYTE_12_SRQ_XRCD_M GENMASK(23, 0) + +#define SRQC_BYTE_16_SRQ_PRODUCER_IDX_S 0 +#define SRQC_BYTE_16_SRQ_PRODUCER_IDX_M GENMASK(15, 0) + +#define SRQC_BYTE_16_SRQ_CONSUMER_IDX_S 0 +#define SRQC_BYTE_16_SRQ_CONSUMER_IDX_M GENMASK(31, 16) + +#define SRQC_BYTE_24_SRQ_WQE_BT_BA_S 0 +#define SRQC_BYTE_24_SRQ_WQE_BT_BA_M GENMASK(28, 0) + +#define SRQC_BYTE_28_PD_S 0 +#define SRQC_BYTE_28_PD_M GENMASK(23, 0) + +#define SRQC_BYTE_28_RQWS_S 24 +#define SRQC_BYTE_28_RQWS_M GENMASK(27, 24) + +#define SRQC_BYTE_36_SRQ_IDX_BT_BA_S 0 +#define SRQC_BYTE_36_SRQ_IDX_BT_BA_M GENMASK(28, 0) + +#define SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_S 0 +#define SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_M GENMASK(19, 0) + +#define SRQC_BYTE_44_SRQ_IDX_HOP_NUM_S 22 +#define SRQC_BYTE_44_SRQ_IDX_HOP_NUM_M GENMASK(23, 22) + +#define SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S 24 +#define SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M GENMASK(27, 24) + +#define SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S 28 +#define SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M GENMASK(31, 28) + +#define SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_S 0 +#define SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_M GENMASK(19, 0) + +#define SRQC_BYTE_56_SRQ_XRC_CQN_S 0 +#define SRQC_BYTE_56_SRQ_XRC_CQN_M GENMASK(23, 0) + +#define SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_S 24 +#define SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_M GENMASK(27, 24) + +#define SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_S 28 +#define SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_M GENMASK(31, 28) + +#define SRQC_BYTE_60_SRQ_RECORD_EN_S 0 + +#define SRQC_BYTE_60_SRQ_DB_RECORD_ADDR_S 1 +#define SRQC_BYTE_60_SRQ_DB_RECORD_ADDR_M GENMASK(31, 1) + +enum { V2_MPT_ST_VALID = 0x1, + V2_MPT_ST_FREE = 0x2, }; enum hns_roce_v2_qp_state { @@ -331,8 +512,8 @@ enum hns_roce_v2_qp_state { HNS_ROCE_QP_ST_INIT, HNS_ROCE_QP_ST_RTR, HNS_ROCE_QP_ST_RTS, - HNS_ROCE_QP_ST_SQER, HNS_ROCE_QP_ST_SQD, + HNS_ROCE_QP_ST_SQER, HNS_ROCE_QP_ST_ERR, HNS_ROCE_QP_ST_SQ_DRAINING, HNS_ROCE_QP_NUM_ST @@ -340,7 +521,7 @@ enum hns_roce_v2_qp_state { struct hns_roce_v2_qp_context { __le32 byte_4_sqpn_tst; - __le32 wqe_sge_ba; + __le32 wqe_sge_ba; /* Aligned with 8B, so store [:3] */ __le32 byte_12_sq_hop; __le32 byte_16_buf_ba_pg_sz; __le32 byte_20_smac_sgid_idx; @@ -350,7 +531,7 @@ struct hns_roce_v2_qp_context { __le32 dmac; __le32 byte_52_udpspn_dmac; __le32 byte_56_dqpn_err; - __le32 byte_60_qpst_mapid; + __le32 byte_60_qpst_tempid; __le32 qkey_xrcd; __le32 byte_68_rq_db; __le32 rq_db_record_addr; @@ -368,7 +549,7 @@ struct hns_roce_v2_qp_context { __le32 rx_rkey_pkt_info; __le64 rx_va; __le32 byte_132_trrl; - __le32 trrl_ba; + __le32 trrl_ba; /* Aligned with 64B, but store [:4] */ __le32 byte_140_raq; __le32 byte_144_raq; __le32 byte_148_raq; @@ -385,7 +566,7 @@ struct hns_roce_v2_qp_context { __le32 byte_192_ext_sge; __le32 byte_196_sq_psn; __le32 byte_200_sq_max; - __le32 irrl_ba; + __le32 irrl_ba; /* Aligned with 64B, so store [:6] */ __le32 byte_208_irrl; __le32 byte_212_lsn; __le32 sq_timer; @@ -492,26 +673,15 @@ struct hns_roce_v2_qp_context { #define V2_QPC_BYTE_56_LP_PKTN_INI_S 28 #define V2_QPC_BYTE_56_LP_PKTN_INI_M GENMASK(31, 28) -#define V2_QPC_BYTE_60_MAPID_S 0 -#define V2_QPC_BYTE_60_MAPID_M GENMASK(12, 0) - -#define V2_QPC_BYTE_60_INNER_MAP_IND_S 13 - -#define V2_QPC_BYTE_60_SQ_MAP_IND_S 14 - -#define V2_QPC_BYTE_60_RQ_MAP_IND_S 15 +#define V2_QPC_BYTE_60_TEMPID_S 0 +#define V2_QPC_BYTE_60_TEMPID_M GENMASK(7, 0) -#define V2_QPC_BYTE_60_TEMPID_S 16 -#define V2_QPC_BYTE_60_TEMPID_M GENMASK(22, 16) +#define V2_QPC_BYTE_60_SCC_TOKEN_S 8 +#define V2_QPC_BYTE_60_SCC_TOKEN_M GENMASK(26, 8) -#define V2_QPC_BYTE_60_EXT_MAP_IND_S 23 +#define V2_QPC_BYTE_60_SQ_DB_DOING_S 27 -#define V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S 24 -#define V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M GENMASK(26, 24) - -#define V2_QPC_BYTE_60_SQ_RLS_IND_S 27 - -#define V2_QPC_BYTE_60_SQ_EXT_IND_S 28 +#define V2_QPC_BYTE_60_RQ_DB_DOING_S 28 #define V2_QPC_BYTE_60_QP_ST_S 29 #define V2_QPC_BYTE_60_QP_ST_M GENMASK(31, 29) @@ -534,6 +704,10 @@ struct hns_roce_v2_qp_context { #define V2_QPC_BYTE_76_RQIE_S 28 +#define V2_QPC_BYTE_76_EXT_ATE_S 29 + +#define V2_QPC_BYTE_76_RQ_VLAN_EN_S 30 +#define V2_QPC_BYTE_76_RQ_RTY_TX_ERR_S 31 #define V2_QPC_BYTE_80_RX_CQN_S 0 #define V2_QPC_BYTE_80_RX_CQN_M GENMASK(23, 0) @@ -588,7 +762,7 @@ struct hns_roce_v2_qp_context { #define V2_QPC_BYTE_140_RR_MAX_S 12 #define V2_QPC_BYTE_140_RR_MAX_M GENMASK(14, 12) -#define V2_QPC_BYTE_140_RSVD_RAQ_MAP_S 15 +#define V2_QPC_BYTE_140_RQ_RTY_WAIT_DO_S 15 #define V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S 16 #define V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M GENMASK(23, 16) @@ -599,8 +773,6 @@ struct hns_roce_v2_qp_context { #define V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S 0 #define V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M GENMASK(23, 0) -#define V2_QPC_BYTE_144_RAQ_RTY_INI_IND_S 24 - #define V2_QPC_BYTE_144_RAQ_CREDIT_S 25 #define V2_QPC_BYTE_144_RAQ_CREDIT_M GENMASK(29, 25) @@ -612,8 +784,8 @@ struct hns_roce_v2_qp_context { #define V2_QPC_BYTE_148_RAQ_SYNDROME_S 24 #define V2_QPC_BYTE_148_RAQ_SYNDROME_M GENMASK(31, 24) -#define V2_QPC_BYTE_152_RAQ_PSN_S 8 -#define V2_QPC_BYTE_152_RAQ_PSN_M GENMASK(31, 8) +#define V2_QPC_BYTE_152_RAQ_PSN_S 0 +#define V2_QPC_BYTE_152_RAQ_PSN_M GENMASK(23, 0) #define V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S 24 #define V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M GENMASK(31, 24) @@ -637,9 +809,10 @@ struct hns_roce_v2_qp_context { #define V2_QPC_BYTE_168_LP_SGEN_INI_S 22 #define V2_QPC_BYTE_168_LP_SGEN_INI_M GENMASK(23, 22) -#define V2_QPC_BYTE_168_SQ_SHIFT_BAK_S 24 -#define V2_QPC_BYTE_168_SQ_SHIFT_BAK_M GENMASK(27, 24) - +#define V2_QPC_BYTE_168_SQ_VLAN_EN_S 24 +#define V2_QPC_BYTE_168_POLL_DB_WAIT_DO_S 25 +#define V2_QPC_BYTE_168_SCC_TOKEN_FORBID_SQ_DEQ_S 26 +#define V2_QPC_BYTE_168_WAIT_ACK_TIMEOUT_S 27 #define V2_QPC_BYTE_168_IRRL_IDX_LSB_S 28 #define V2_QPC_BYTE_168_IRRL_IDX_LSB_M GENMASK(31, 28) @@ -725,6 +898,10 @@ struct hns_roce_v2_qp_context { #define V2_QPC_BYTE_232_IRRL_SGE_IDX_S 20 #define V2_QPC_BYTE_232_IRRL_SGE_IDX_M GENMASK(28, 20) +#define V2_QPC_BYTE_232_SO_LP_VLD_S 29 +#define V2_QPC_BYTE_232_FENCE_LP_VLD_S 30 +#define V2_QPC_BYTE_232_IRRL_LP_VLD_S 31 + #define V2_QPC_BYTE_240_IRRL_TAIL_REAL_S 0 #define V2_QPC_BYTE_240_IRRL_TAIL_REAL_M GENMASK(7, 0) @@ -743,6 +920,9 @@ struct hns_roce_v2_qp_context { #define V2_QPC_BYTE_244_RNR_CNT_S 27 #define V2_QPC_BYTE_244_RNR_CNT_M GENMASK(29, 27) +#define V2_QPC_BYTE_244_LCL_OP_FLG_S 30 +#define V2_QPC_BYTE_244_IRRL_RD_FLG_S 31 + #define V2_QPC_BYTE_248_IRRL_PSN_S 0 #define V2_QPC_BYTE_248_IRRL_PSN_M GENMASK(23, 0) @@ -771,6 +951,10 @@ struct hns_roce_v2_qp_context { #define V2_QPC_BYTE_256_SQ_FLUSH_IDX_S 16 #define V2_QPC_BYTE_256_SQ_FLUSH_IDX_M GENMASK(31, 16) +#define V2_QP_RWE_S 1 /* rdma write enable */ +#define V2_QP_RRE_S 2 /* rdma read enable */ +#define V2_QP_ATE_S 3 /* rdma atomic enable */ + struct hns_roce_v2_cqe { __le32 byte_4; union { @@ -818,6 +1002,11 @@ struct hns_roce_v2_cqe { #define V2_CQE_BYTE_28_PORT_TYPE_S 16 #define V2_CQE_BYTE_28_PORT_TYPE_M GENMASK(17, 16) +#define V2_CQE_BYTE_28_VID_S 18 +#define V2_CQE_BYTE_28_VID_M GENMASK(29, 18) + +#define V2_CQE_BYTE_28_VID_VLD_S 30 + #define V2_CQE_BYTE_32_RMT_QPN_S 0 #define V2_CQE_BYTE_32_RMT_QPN_M GENMASK(23, 0) @@ -842,7 +1031,7 @@ struct hns_roce_v2_mpt_entry { __le32 va_l; __le32 va_h; __le32 pbl_size; - __le32 pbl_ba_l; + __le32 pbl_ba_l; /* Aligned with 8B, so store [:3] */ __le32 byte_48_mode_ba; __le32 pa0_l; __le32 byte_56_pa0_h; @@ -878,8 +1067,19 @@ struct hns_roce_v2_mpt_entry { #define V2_MPT_BYTE_8_LW_EN_S 7 +#define V2_MPT_BYTE_8_MW_CNT_S 8 +#define V2_MPT_BYTE_8_MW_CNT_M GENMASK(31, 8) + +#define V2_MPT_BYTE_12_FRE_S 0 + #define V2_MPT_BYTE_12_PA_S 1 +#define V2_MPT_BYTE_12_MR_MW_S 4 + +#define V2_MPT_BYTE_12_BPD_S 5 + +#define V2_MPT_BYTE_12_BQP_S 6 + #define V2_MPT_BYTE_12_INNER_PA_VLD_S 7 #define V2_MPT_BYTE_12_MW_BIND_QPN_S 8 @@ -988,6 +1188,8 @@ struct hns_roce_v2_ud_send_wqe { #define V2_UD_SEND_WQE_BYTE_40_PORTN_S 24 #define V2_UD_SEND_WQE_BYTE_40_PORTN_M GENMASK(26, 24) +#define V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S 30 + #define V2_UD_SEND_WQE_BYTE_40_LBI_S 31 #define V2_UD_SEND_WQE_DMAC_0_S 0 @@ -1042,6 +1244,16 @@ struct hns_roce_v2_rc_send_wqe { #define V2_RC_SEND_WQE_BYTE_4_INLINE_S 12 +#define V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S 19 + +#define V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S 20 + +#define V2_RC_FRMR_WQE_BYTE_4_RR_S 21 + +#define V2_RC_FRMR_WQE_BYTE_4_RW_S 22 + +#define V2_RC_FRMR_WQE_BYTE_4_LW_S 23 + #define V2_RC_SEND_WQE_BYTE_16_XRC_SRQN_S 0 #define V2_RC_SEND_WQE_BYTE_16_XRC_SRQN_M GENMASK(23, 0) @@ -1051,6 +1263,16 @@ struct hns_roce_v2_rc_send_wqe { #define V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S 0 #define V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M GENMASK(23, 0) +struct hns_roce_wqe_frmr_seg { + __le32 pbl_size; + __le32 mode_buf_pg_sz; +}; + +#define V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S 4 +#define V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M GENMASK(7, 4) + +#define V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S 8 + struct hns_roce_v2_wqe_data_seg { __le32 len; __le32 lkey; @@ -1068,6 +1290,192 @@ struct hns_roce_query_version { __le32 rsv[5]; }; +struct hns_roce_query_fw_info { + __le32 fw_ver; + __le32 rsv[5]; +}; + +struct hns_roce_func_clear { + __le32 rst_funcid_en; + __le32 func_done; + __le32 rsv[4]; +}; + +struct hns_roce_pf_func_info { + __le32 pf_own_func_num; + __le32 pf_own_mac_id; + __le32 rsv[4]; +}; + +#define FUNC_CLEAR_RST_FUN_EN_S 8 + +#define FUNC_CLEAR_RST_FUN_DONE_S 0 + +#define HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS (512 * 100 - 40) + +#define HNS_ROCE_QUERY_PF_CAPS_CMD_NUM 5 +struct hns_roce_query_pf_caps_a { + u8 number_ports; + u8 local_ca_ack_delay; + __le16 max_sq_sg; + __le16 max_sq_inline; + __le16 max_rq_sg; + __le32 max_extend_sg; + __le16 num_qpc_timer; + __le16 num_cqc_timer; + __le16 max_srq_sges; + u8 num_aeq_vectors; + u8 num_other_vectors; + u8 max_sq_desc_sz; + u8 max_rq_desc_sz; + u8 max_srq_desc_sz; + u8 cq_entry_sz; +}; + +struct hns_roce_query_pf_caps_b { + u8 mtpt_entry_sz; + u8 irrl_entry_sz; + u8 trrl_entry_sz; + u8 cqc_entry_sz; + u8 srqc_entry_sz; + u8 idx_entry_sz; + u8 scc_ctx_entry_sz; + u8 max_mtu; + __le16 qpc_entry_sz; + __le16 qpc_timer_entry_sz; + __le16 cqc_timer_entry_sz; + u8 min_cqes; + u8 min_wqes; + __le32 page_size_cap; + u8 pkey_table_len; + u8 phy_num_uars; + u8 ctx_hop_num; + u8 pbl_hop_num; +}; + +struct hns_roce_query_pf_caps_c { + __le32 cap_flags_num_pds; + __le32 max_gid_num_cqs; + __le32 cq_depth; + __le32 num_mrws; + __le32 ord_num_qps; + __le16 sq_depth; + __le16 rq_depth; +}; + +#define V2_QUERY_PF_CAPS_C_NUM_PDS_S 0 +#define V2_QUERY_PF_CAPS_C_NUM_PDS_M GENMASK(19, 0) + +#define V2_QUERY_PF_CAPS_C_CAP_FLAGS_S 20 +#define V2_QUERY_PF_CAPS_C_CAP_FLAGS_M GENMASK(31, 20) + +#define V2_QUERY_PF_CAPS_C_NUM_CQS_S 0 +#define V2_QUERY_PF_CAPS_C_NUM_CQS_M GENMASK(19, 0) + +#define V2_QUERY_PF_CAPS_C_MAX_GID_S 20 +#define V2_QUERY_PF_CAPS_C_MAX_GID_M GENMASK(28, 20) + +#define V2_QUERY_PF_CAPS_C_CQ_DEPTH_S 0 +#define V2_QUERY_PF_CAPS_C_CQ_DEPTH_M GENMASK(22, 0) + +#define V2_QUERY_PF_CAPS_C_NUM_MRWS_S 0 +#define V2_QUERY_PF_CAPS_C_NUM_MRWS_M GENMASK(19, 0) + +#define V2_QUERY_PF_CAPS_C_NUM_QPS_S 0 +#define V2_QUERY_PF_CAPS_C_NUM_QPS_M GENMASK(19, 0) + +#define V2_QUERY_PF_CAPS_C_MAX_ORD_S 20 +#define V2_QUERY_PF_CAPS_C_MAX_ORD_M GENMASK(27, 20) + +struct hns_roce_query_pf_caps_d { + __le32 wq_hop_num_max_srqs; + __le16 srq_depth; + __le16 rsv; + __le32 num_ceqs_ceq_depth; + __le32 arm_st_aeq_depth; + __le32 num_uars_rsv_pds; + __le32 rsv_uars_rsv_qps; +}; + +#define V2_QUERY_PF_CAPS_D_NUM_SRQS_S 0 +#define V2_QUERY_PF_CAPS_D_NUM_SRQS_M GENMASK(19, 0) + +#define V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_S 20 +#define V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_M GENMASK(21, 20) + +#define V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_S 22 +#define V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_M GENMASK(23, 22) + +#define V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_S 24 +#define V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_M GENMASK(25, 24) + + +#define V2_QUERY_PF_CAPS_D_CEQ_DEPTH_S 0 +#define V2_QUERY_PF_CAPS_D_CEQ_DEPTH_M GENMASK(21, 0) + +#define V2_QUERY_PF_CAPS_D_NUM_CEQS_S 22 +#define V2_QUERY_PF_CAPS_D_NUM_CEQS_M GENMASK(31, 22) + +#define V2_QUERY_PF_CAPS_D_AEQ_DEPTH_S 0 +#define V2_QUERY_PF_CAPS_D_AEQ_DEPTH_M GENMASK(21, 0) + +#define V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_S 22 +#define V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_M GENMASK(23, 22) + +#define V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_S 24 +#define V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_M GENMASK(25, 24) + +#define V2_QUERY_PF_CAPS_D_RSV_PDS_S 0 +#define V2_QUERY_PF_CAPS_D_RSV_PDS_M GENMASK(19, 0) + +#define V2_QUERY_PF_CAPS_D_NUM_UARS_S 20 +#define V2_QUERY_PF_CAPS_D_NUM_UARS_M GENMASK(27, 20) + +#define V2_QUERY_PF_CAPS_D_RSV_QPS_S 0 +#define V2_QUERY_PF_CAPS_D_RSV_QPS_M GENMASK(19, 0) + +#define V2_QUERY_PF_CAPS_D_RSV_UARS_S 20 +#define V2_QUERY_PF_CAPS_D_RSV_UARS_M GENMASK(27, 20) + +struct hns_roce_query_pf_caps_e { + __le32 chunk_size_shift_rsv_mrws; + __le32 rsv_cqs; + __le32 rsv_srqs; + __le32 rsv_lkey; + __le16 ceq_max_cnt; + __le16 ceq_period; + __le16 aeq_max_cnt; + __le16 aeq_period; +}; + +#define V2_QUERY_PF_CAPS_E_RSV_MRWS_S 0 +#define V2_QUERY_PF_CAPS_E_RSV_MRWS_M GENMASK(19, 0) + +#define V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_S 20 +#define V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_M GENMASK(31, 20) + +#define V2_QUERY_PF_CAPS_E_RSV_CQS_S 0 +#define V2_QUERY_PF_CAPS_E_RSV_CQS_M GENMASK(19, 0) + +#define V2_QUERY_PF_CAPS_E_RSV_SRQS_S 0 +#define V2_QUERY_PF_CAPS_E_RSV_SRQS_M GENMASK(19, 0) + +#define V2_QUERY_PF_CAPS_E_RSV_LKEYS_S 0 +#define V2_QUERY_PF_CAPS_E_RSV_LKEYS_M GENMASK(19, 0) + +#define HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL 40 +#define HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT 20 + +#define QUERY_PF_RES_CMDQ_DESC_NUM 2 +#define QUERY_PF_TIMER_RES_CMDQ_DESC_NUM 2 +#define ALLOC_VF_RES_CMDQ_DESC_NUM 2 +#define CONFIG_LLM_CMDQ_DESC_NUM 2 + +/* TSQ and RAQ each account for 4B */ +#define QP_EX_DB_SIZE 8 +#define CQ_EX_DB_SIZE 4 +#define TIMEOUT_POLL_QUEUE_NUM 4 + struct hns_roce_cfg_llm_a { __le32 base_addr_l; __le32 base_addr_h; @@ -1157,7 +1565,8 @@ struct hns_roce_pf_res_b { __le32 smac_idx_num; __le32 sgid_idx_num; __le32 qid_idx_sl_num; - __le32 rsv[2]; + __le32 scc_ctx_bt_idx_num; + __le32 rsv; }; #define PF_RES_DATA_1_PF_SMAC_IDX_S 0 @@ -1178,6 +1587,31 @@ struct hns_roce_pf_res_b { #define PF_RES_DATA_3_PF_SL_NUM_S 16 #define PF_RES_DATA_3_PF_SL_NUM_M GENMASK(26, 16) +#define PF_RES_DATA_4_PF_SCC_CTX_BT_IDX_S 0 +#define PF_RES_DATA_4_PF_SCC_CTX_BT_IDX_M GENMASK(8, 0) + +#define PF_RES_DATA_4_PF_SCC_CTX_BT_NUM_S 9 +#define PF_RES_DATA_4_PF_SCC_CTX_BT_NUM_M GENMASK(17, 9) + +struct hns_roce_pf_timer_res_a { + __le32 rsv0; + __le32 qpc_timer_bt_idx_num; + __le32 cqc_timer_bt_idx_num; + __le32 rsv[3]; +}; + +#define PF_RES_DATA_1_PF_QPC_TIMER_BT_IDX_S 0 +#define PF_RES_DATA_1_PF_QPC_TIMER_BT_IDX_M GENMASK(11, 0) + +#define PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S 16 +#define PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M GENMASK(28, 16) + +#define PF_RES_DATA_2_PF_CQC_TIMER_BT_IDX_S 0 +#define PF_RES_DATA_2_PF_CQC_TIMER_BT_IDX_M GENMASK(10, 0) + +#define PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S 16 +#define PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M GENMASK(27, 16) + struct hns_roce_vf_res_a { __le32 vf_id; __le32 vf_qpc_bt_idx_num; @@ -1222,7 +1656,8 @@ struct hns_roce_vf_res_b { __le32 vf_smac_idx_num; __le32 vf_sgid_idx_num; __le32 vf_qid_idx_sl_num; - __le32 rsv[2]; + __le32 vf_sccc_idx_num; + __le32 rsv1; }; #define VF_RES_B_DATA_0_VF_ID_S 0 @@ -1246,12 +1681,49 @@ struct hns_roce_vf_res_b { #define VF_RES_B_DATA_3_VF_SL_NUM_S 16 #define VF_RES_B_DATA_3_VF_SL_NUM_M GENMASK(19, 16) +#define VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S 0 +#define VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M GENMASK(8, 0) + +#define VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S 9 +#define VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M GENMASK(17, 9) + +struct hns_roce_vf_switch { + __le32 rocee_sel; + __le32 fun_id; + __le32 cfg; + __le32 resv1; + __le32 resv2; + __le32 resv3; +}; + +#define VF_SWITCH_DATA_FUN_ID_VF_ID_S 3 +#define VF_SWITCH_DATA_FUN_ID_VF_ID_M GENMASK(10, 3) + +#define VF_SWITCH_DATA_CFG_ALW_LPBK_S 1 +#define VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S 2 +#define VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S 3 + +struct hns_roce_post_mbox { + __le32 in_param_l; + __le32 in_param_h; + __le32 out_param_l; + __le32 out_param_h; + __le32 cmd_tag; + __le32 token_event_en; +}; + +struct hns_roce_mbox_status { + __le32 mb_status_hw_run; + __le32 rsv[5]; +}; + struct hns_roce_cfg_bt_attr { __le32 vf_qpc_cfg; __le32 vf_srqc_cfg; __le32 vf_cqc_cfg; __le32 vf_mpt_cfg; - __le32 rsv[2]; + __le32 vf_scc_ctx_cfg; + __le32 rsv; }; #define CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S 0 @@ -1290,6 +1762,15 @@ struct hns_roce_cfg_bt_attr { #define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S 8 #define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M GENMASK(9, 8) +#define CFG_BT_ATTR_DATA_4_VF_SCC_CTX_BA_PGSZ_S 0 +#define CFG_BT_ATTR_DATA_4_VF_SCC_CTX_BA_PGSZ_M GENMASK(3, 0) + +#define CFG_BT_ATTR_DATA_4_VF_SCC_CTX_BUF_PGSZ_S 4 +#define CFG_BT_ATTR_DATA_4_VF_SCC_CTX_BUF_PGSZ_M GENMASK(7, 4) + +#define CFG_BT_ATTR_DATA_4_VF_SCC_CTX_HOPNUM_S 8 +#define CFG_BT_ATTR_DATA_4_VF_SCC_CTX_HOPNUM_M GENMASK(9, 8) + struct hns_roce_cfg_sgid_tb { __le32 table_idx_rsv; __le32 vf_sgid_l; @@ -1329,17 +1810,8 @@ struct hns_roce_cmq_desc { #define HNS_ROCE_HW_RUN_BIT_SHIFT 31 #define HNS_ROCE_HW_MB_STATUS_MASK 0xFF -#define HNS_ROCE_VF_MB4_TAG_MASK 0xFFFFFF00 -#define HNS_ROCE_VF_MB4_TAG_SHIFT 8 - -#define HNS_ROCE_VF_MB4_CMD_MASK 0xFF -#define HNS_ROCE_VF_MB4_CMD_SHIFT 0 - -#define HNS_ROCE_VF_MB5_EVENT_MASK 0x10000 -#define HNS_ROCE_VF_MB5_EVENT_SHIFT 16 - -#define HNS_ROCE_VF_MB5_TOKEN_MASK 0xFFFF -#define HNS_ROCE_VF_MB5_TOKEN_SHIFT 0 +#define HNS_ROCE_MB_TAG_S 8 +#define HNS_ROCE_MB_EVENT_EN_S 16 struct hns_roce_v2_cmq_ring { dma_addr_t desc_dma_addr; @@ -1375,8 +1847,8 @@ struct hns_roce_link_table { }; struct hns_roce_link_table_entry { - u32 blk_ba0; - u32 blk_ba1_nxt_ptr; + __le32 blk_ba0; /* Aligned with 4KB regardless of kernel page size */ + __le32 blk_ba1_nxt_ptr; }; #define HNS_ROCE_LINK_TABLE_BA1_S 0 #define HNS_ROCE_LINK_TABLE_BA1_M GENMASK(19, 0) @@ -1384,10 +1856,18 @@ struct hns_roce_link_table_entry { #define HNS_ROCE_LINK_TABLE_NXT_PTR_S 20 #define HNS_ROCE_LINK_TABLE_NXT_PTR_M GENMASK(31, 20) +#define HNS_ROCE_V2_UAR_BUF_SIZE 4096 + +struct hns_roce_v2_reset_state { + u32 reset_state; /* stored to use in user space */ +}; + struct hns_roce_v2_priv { + struct hnae3_handle *handle; struct hns_roce_v2_cmq cmq; struct hns_roce_link_table tsq; struct hns_roce_link_table tpq; + struct hns_roce_buf_list uar; }; struct hns_roce_eq_context { @@ -1425,6 +1905,9 @@ struct hns_roce_eq_context { #define HNS_ROCE_V2_EQ_ARMED 1 #define HNS_ROCE_V2_EQ_ALWAYS_ARMED 3 +#define HNS_ROCE_V2_EQ_DEFAULT_INTERVAL 0x0 +#define HNS_ROCE_V2_EQ_DEFAULT_BURST_NUM 0x0 + #define HNS_ROCE_EQ_INIT_EQE_CNT 0 #define HNS_ROCE_EQ_INIT_PROD_IDX 0 #define HNS_ROCE_EQ_INIT_REPORT_TIMER 0 @@ -1435,8 +1918,8 @@ struct hns_roce_eq_context { #define HNS_ROCE_V2_CEQ_CEQE_OWNER_S 31 #define HNS_ROCE_V2_AEQ_AEQE_OWNER_S 31 -#define HNS_ROCE_V2_COMP_EQE_NUM 0x1000 -#define HNS_ROCE_V2_ASYNC_EQE_NUM 0x1000 +#define HNS_ROCE_V2_COMP_EQE_NUM 0x2000 +#define HNS_ROCE_V2_ASYNC_EQE_NUM 0x2000 #define HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S 0 #define HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S 1 @@ -1564,4 +2047,143 @@ struct hns_roce_eq_context { #define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S 0 #define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M GENMASK(23, 0) +#define MAX_SERVICE_LEVEL 0x7 + +struct hns_roce_wqe_atomic_seg { + __le64 fetchadd_swap_data; + __le64 cmp_data; +}; + +struct hns_roce_query_mbdb_cnt { + __le32 mailbox_issue_cnt; + __le32 mailbox_exe_cnt; + __le32 doorbell_issue_cnt; + __le32 doorbell_exe_cnt; + __le32 eq_doorbell_issue_cnt; + __le32 eq_doorbell_exe_cnt; +}; + +struct rdfx_cnt_snap { + __le32 data_0; + __le32 rsv[5]; +}; + +struct rdfx_query_pkt_cnt { + __le32 rc_pkt_num; + __le32 uc_pkt_num; + __le32 ud_pkt_num; + __le32 xrc_pkt_num; + __le32 total_pkt_num; + __le32 error_pkt_num; +}; + +struct rdfx_query_cqe_cnt { + __le32 port0_cqe; + __le32 port1_cqe; + __le32 port2_cqe; + __le32 port3_cqe; + __le32 rsv[2]; +}; + +struct rdfx_query_cnp_rx_cnt { + __le32 port0_cnp_rx; + __le32 port1_cnp_rx; + __le32 port2_cnp_rx; + __le32 port3_cnp_rx; + __le32 rsv[2]; +}; + +struct rdfx_query_cnp_tx_cnt { + __le32 port0_cnp_tx; + __le32 port1_cnp_tx; + __le32 port2_cnp_tx; + __le32 port3_cnp_tx; + __le32 rsv[2]; +}; + +#define HNS_ROCE_V2_SYSFS_BUF_MAX_SIZE 1024 +#define hns_roce_v2_sysfs_print(out, cur, fmt, ...) do {\ + if (cur < HNS_ROCE_V2_SYSFS_BUF_MAX_SIZE) { \ + cur += snprintf(out + cur, \ + HNS_ROCE_V2_SYSFS_BUF_MAX_SIZE - cur,\ + fmt, ##__VA_ARGS__); \ + } \ + } while (0) + +int hns_roce_v2_query_mpt_stat(struct hns_roce_dev *hr_dev, + char *buf, int *desc); +int hns_roce_v2_query_srqc_stat(struct hns_roce_dev *hr_dev, + char *buf, int *desc); +int hns_roce_v2_query_qpc_stat(struct hns_roce_dev *hr_dev, + char *buf, int *desc); +int hns_roce_v2_query_aeqc_stat(struct hns_roce_dev *hr_dev, + char *buf, int *desc); +int hns_roce_v2_query_pkt_stat(struct hns_roce_dev *hr_dev, + char *buf, int *buff_size); +int hns_roce_v2_query_ceqc_stat(struct hns_roce_dev *hr_dev, + char *buf, int *desc); +int hns_roce_v2_query_cmd_stat(struct hns_roce_dev *hr_dev, + char *buf, int *desc); +int hns_roce_v2_query_cqc_stat(struct hns_roce_dev *hr_dev, + char *buf, int *desc); +int hns_roce_v2_modify_eq(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq, + u16 eq_count, u16 eq_period, u16 type); + +int hns_roce_v2_query_cqc_info(struct hns_roce_dev *hr_dev, u32 cqn, + int *buffer); +int hns_roce_v2_query_qpc_info(struct hns_roce_dev *hr_dev, u32 qpn, + int *buffer); +int hns_roce_v2_query_mpt_info(struct hns_roce_dev *hr_dev, u32 key, + int *buffer); +void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc, + enum hns_roce_opcode_type opcode, + bool is_read); +int hns_roce_cmq_send(struct hns_roce_dev *hr_dev, + struct hns_roce_cmq_desc *desc, int num); + +#ifdef CONFIG_INFINIBAND_HNS_DFX +#ifdef CONFIG_KERNEL_419 +void rdfx_cp_sq_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp, + unsigned int ind, void *wqe, + struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, + const struct ib_send_wr *wr); + + +#else +void rdfx_cp_sq_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp, + unsigned int ind, void *wqe, + struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, + struct ib_send_wr *wr); +#endif + +void rdfx_set_cqe_info(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, + struct hns_roce_v2_cqe *cqe); +#else +#define rdfx_set_cqe_info(hr_dev, hr_cq, cqe) +#define rdfx_cp_sq_wqe_buf(hr_dev, qp, ind, wqe, rc_sq_wqe, wr) +#endif + +#define HNS_ROCE_V2_SCC_CTX_DONE_S 0 + +struct hns_roce_scc_ctx_clr { + __le32 rocee_scc_ctx_clr_qpn; + __le32 rsv[5]; +}; + +struct hns_roce_scc_ctx_clr_done { + __le32 rocee_scc_ctx_clr_done; + __le32 rsv[5]; +}; + +static inline void hns_roce_write64(struct hns_roce_dev *hr_dev, __le32 val[2], + void __iomem *dest) +{ + struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv; + struct hnae3_handle *handle = priv->handle; + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + + if (!hr_dev->dis_db && !ops->get_hw_reset_stat(handle)) + hns_roce_write64_k(val, dest); +} + #endif diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c new file mode 100644 index 0000000000000000000000000000000000000000..5946e5e148639137dbc04cd142ba9f5559141ac5 --- /dev/null +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +// Copyright (c) 2018 Hisilicon Limited. + +#include "hnae3.h" +#include "hns_roce_device.h" +#include "hns_roce_cmd.h" +#include "hns_roce_hw_v2.h" + +int hns_roce_v2_query_cqc_info(struct hns_roce_dev *hr_dev, u32 cqn, + int *buffer) +{ + struct hns_roce_v2_cq_context *context; + struct hns_roce_cmd_mailbox *mailbox; + int ret; + + mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + context = mailbox->buf; + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cqn, 0, + HNS_ROCE_CMD_QUERY_CQC, + HNS_ROCE_CMD_TIMEOUT_MSECS); + if (ret) { + dev_err(hr_dev->dev, "QUERY cqc cmd process error\n"); + goto err_mailbox; + } + + memcpy(buffer, context, sizeof(*context)); + +err_mailbox: + hns_roce_free_cmd_mailbox(hr_dev, mailbox); + + return ret; +} + + +int hns_roce_v2_query_qpc_info(struct hns_roce_dev *hr_dev, u32 qpn, + int *buffer) +{ + struct hns_roce_v2_qp_context *context; + struct hns_roce_cmd_mailbox *mailbox; + int ret; + + mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, qpn, 0, + HNS_ROCE_CMD_QUERY_QPC, + HNS_ROCE_CMD_TIMEOUT_MSECS); + if (ret) { + dev_err(hr_dev->dev, "QUERY qpc cmd process error\n"); + goto err_mailbox; + } + + context = mailbox->buf; + memcpy(buffer, context, sizeof(*context)); + +err_mailbox: + hns_roce_free_cmd_mailbox(hr_dev, mailbox); + + return ret; +} + +int hns_roce_v2_query_mpt_info(struct hns_roce_dev *hr_dev, u32 key, + int *buffer) +{ + struct hns_roce_v2_mpt_entry *context; + struct hns_roce_cmd_mailbox *mailbox; + int ret; + + mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + context = mailbox->buf; + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, key_to_hw_index(key), + 0, HNS_ROCE_CMD_QUERY_MPT, + HNS_ROCE_CMD_TIMEOUT_MSECS); + if (ret) { + dev_err(hr_dev->dev, "QUERY mpt cmd process error\n"); + goto err_mailbox; + } + + memcpy(buffer, context, sizeof(*context)); + +err_mailbox: + hns_roce_free_cmd_mailbox(hr_dev, mailbox); + + return ret; +} + diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index c5cae9a38c0443e41e4275577e5392bfc1b6f968..f3112683b9b248c3141981db0f9343d74fc84c49 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -30,9 +30,22 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ +#include "roce_k_compat.h" + #include #include #include +#include +#ifdef HAVE_LINUX_MM_H +#include +#else +#include +#endif +#ifdef HAVE_LINUX_SCHED_H +#include +#else +#include +#endif #include #include #include @@ -64,31 +77,45 @@ static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr) u8 phy_port; u32 i = 0; - if (!memcmp(hr_dev->dev_addr[port], addr, MAC_ADDR_OCTET_NUM)) + if (!memcmp(hr_dev->dev_addr[port], addr, ETH_ALEN)) return 0; - for (i = 0; i < MAC_ADDR_OCTET_NUM; i++) + for (i = 0; i < ETH_ALEN; i++) hr_dev->dev_addr[port][i] = addr[i]; phy_port = hr_dev->iboe.phy_port[port]; return hr_dev->hw->set_mac(hr_dev, phy_port, addr); } +#ifdef CONFIG_NEW_KERNEL +#ifdef CONFIG_KERNEL_419 static int hns_roce_add_gid(const struct ib_gid_attr *attr, void **context) +#else +static int hns_roce_add_gid(const union ib_gid *gid, + const struct ib_gid_attr *attr, void **context) +#endif { struct hns_roce_dev *hr_dev = to_hr_dev(attr->device); u8 port = attr->port_num - 1; - unsigned long flags; int ret; - if (port >= hr_dev->caps.num_ports) + if (port >= hr_dev->caps.num_ports || + attr->index > hr_dev->caps.gid_table_len[port]) { + dev_err(hr_dev->dev, "add gid failed. port - %u, index - %u\n", + port, attr->index); return -EINVAL; + } - spin_lock_irqsave(&hr_dev->iboe.lock, flags); - +#ifdef CONFIG_KERNEL_419 ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &attr->gid, attr); +#else + ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, + (union ib_gid *)gid, attr); +#endif - spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); + if (ret) + dev_err(hr_dev->dev, "set gid failed(%d), index = %u", ret, + attr->index); return ret; } @@ -98,48 +125,128 @@ static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context) struct hns_roce_dev *hr_dev = to_hr_dev(attr->device); struct ib_gid_attr zattr = { }; u8 port = attr->port_num - 1; - unsigned long flags; int ret; - if (port >= hr_dev->caps.num_ports) + if (port >= hr_dev->caps.num_ports) { + dev_err(hr_dev->dev, + "Port num %u id large than max port num %u.\n", + port, hr_dev->caps.num_ports); return -EINVAL; - - spin_lock_irqsave(&hr_dev->iboe.lock, flags); + } ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &zgid, &zattr); + if (ret) + dev_warn(hr_dev->dev, "del gid failed(%d), index = %u", ret, + attr->index); - spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); + return ret; +} +#else +static int hns_roce_add_gid(struct ib_device *device, u8 port_num, + unsigned int index, const union ib_gid *gid, + const struct ib_gid_attr *attr, void **context) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(device); + u8 port = port_num - 1; + int ret; + + rdfx_func_cnt(hr_dev, RDFX_FUNC_ADD_GID); + + if (port >= hr_dev->caps.num_ports || + index > hr_dev->caps.gid_table_len[port]) { + dev_err(hr_dev->dev, "add gid failed. port - %u, index - %u\n", + port, index); + return -EINVAL; + } + + ret = hr_dev->hw->set_gid(hr_dev, port, index, (union ib_gid *)gid, + attr); + if (ret) + dev_err(hr_dev->dev, "set gid failed(%d), index = %u", + ret, index); return ret; } +static int hns_roce_del_gid(struct ib_device *device, u8 port_num, + unsigned int index, void **context) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(device); + struct ib_gid_attr zattr = { }; + union ib_gid zgid = { {0} }; + u8 port = port_num - 1; + int ret; + + rdfx_func_cnt(hr_dev, RDFX_FUNC_DEL_GID); + + if (port >= hr_dev->caps.num_ports) { + dev_err(hr_dev->dev, + "Port num %u id large than max port num %u.\n", + port, hr_dev->caps.num_ports); + return -EINVAL; + } + + ret = hr_dev->hw->set_gid(hr_dev, port, index, &zgid, &zattr); + if (ret) + dev_warn(hr_dev->dev, "del gid failed(%d), index = %u", ret, + index); + + return ret; +} +#endif + static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port, - unsigned long event) + unsigned long dev_event) { struct device *dev = hr_dev->dev; + enum ib_port_state port_state; struct net_device *netdev; + struct ib_event event; + unsigned long flags; int ret = 0; netdev = hr_dev->iboe.netdevs[port]; if (!netdev) { - dev_err(dev, "port(%d) can't find netdev\n", port); + dev_err(dev, "port(%u) can't find netdev\n", port); return -ENODEV; } - switch (event) { - case NETDEV_UP: - case NETDEV_CHANGE: + switch (dev_event) { case NETDEV_REGISTER: case NETDEV_CHANGEADDR: ret = hns_roce_set_mac(hr_dev, port, netdev->dev_addr); + if (ret) + dev_err(dev, "set mac failed(%d), event = 0x%x\n", ret, + (u32)dev_event); break; + case NETDEV_UP: + case NETDEV_CHANGE: + ret = hns_roce_set_mac(hr_dev, port, netdev->dev_addr); + if (ret) + dev_err(dev, "set mac failed(%d), event = 0x%x\n", ret, + (u32)dev_event); + /* fallthrough */ case NETDEV_DOWN: - /* - * In v1 engine, only support all ports closed together. - */ + port_state = get_port_state(netdev); + + spin_lock_irqsave(&hr_dev->iboe.lock, flags); + if (hr_dev->iboe.last_port_state[port] == port_state) { + spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); + return NOTIFY_DONE; + } + hr_dev->iboe.last_port_state[port] = port_state; + spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); + + event.device = &hr_dev->ib_dev; + event.event = (port_state == IB_PORT_ACTIVE) ? + IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; + event.element.port_num = to_rdma_port_num(port); + ib_dispatch_event(&event); + break; + case NETDEV_UNREGISTER: break; default: - dev_dbg(dev, "NETDEV event = 0x%x!\n", (u32)(event)); + dev_dbg(dev, "NETDEV event = 0x%x!\n", (u32)(dev_event)); break; } @@ -150,10 +257,10 @@ static int hns_roce_netdev_event(struct notifier_block *self, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); - struct hns_roce_ib_iboe *iboe = NULL; - struct hns_roce_dev *hr_dev = NULL; - u8 port = 0; - int ret = 0; + struct hns_roce_ib_iboe *iboe; + struct hns_roce_dev *hr_dev; + u8 port; + int ret; hr_dev = container_of(self, struct hns_roce_dev, iboe.nb); iboe = &hr_dev->iboe; @@ -170,19 +277,22 @@ static int hns_roce_netdev_event(struct notifier_block *self, return NOTIFY_DONE; } -static int hns_roce_setup_mtu_mac(struct hns_roce_dev *hr_dev) +static int hns_roce_setup_mtu_mac_state(struct hns_roce_dev *hr_dev) { int ret; u8 i; for (i = 0; i < hr_dev->caps.num_ports; i++) { + hr_dev->iboe.last_port_state[i] = IB_PORT_DOWN; if (hr_dev->hw->set_mtu) hr_dev->hw->set_mtu(hr_dev, hr_dev->iboe.phy_port[i], hr_dev->caps.max_mtu); ret = hns_roce_set_mac(hr_dev, i, hr_dev->iboe.netdevs[i]->dev_addr); - if (ret) + if (ret) { + dev_err(hr_dev->dev, "set mac failed(%d)\n", ret); return ret; + } } return 0; @@ -194,8 +304,11 @@ static int hns_roce_query_device(struct ib_device *ib_dev, { struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); + rdfx_func_cnt(hr_dev, RDFX_FUNC_QUERY_DEVICE); + memset(props, 0, sizeof(*props)); + props->fw_ver = hr_dev->caps.fw_ver; props->sys_image_guid = cpu_to_be64(hr_dev->sys_image_guid); props->max_mr_size = (u64)(~(0ULL)); props->page_size_cap = hr_dev->caps.page_size_cap; @@ -206,8 +319,14 @@ static int hns_roce_query_device(struct ib_device *ib_dev, props->max_qp_wr = hr_dev->caps.max_wqes; props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_RC_RNR_NAK_GEN; + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) + props->device_cap_flags |= IB_DEVICE_XRC; +#ifdef CONFIG_KERNEL_419 props->max_send_sge = hr_dev->caps.max_sq_sg; props->max_recv_sge = hr_dev->caps.max_rq_sg; +#else + props->max_sge = min(hr_dev->caps.max_sq_sg, hr_dev->caps.max_rq_sg); +#endif props->max_sge_rd = 1; props->max_cq = hr_dev->caps.num_cqs; props->max_cqe = hr_dev->caps.max_cqes; @@ -215,10 +334,28 @@ static int hns_roce_query_device(struct ib_device *ib_dev, props->max_pd = hr_dev->caps.num_pds; props->max_qp_rd_atom = hr_dev->caps.max_qp_dest_rdma; props->max_qp_init_rd_atom = hr_dev->caps.max_qp_init_rdma; - props->atomic_cap = IB_ATOMIC_NONE; + props->atomic_cap = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_ATOMIC ? + IB_ATOMIC_HCA : IB_ATOMIC_NONE; props->max_pkeys = 1; props->local_ca_ack_delay = hr_dev->caps.local_ca_ack_delay; + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) { + props->max_srq = hr_dev->caps.num_srqs; + props->max_srq_wr = hr_dev->caps.max_srq_wrs; + props->max_srq_sge = hr_dev->caps.max_srq_sges; + } + + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_MW) { + props->max_mw = hr_dev->caps.num_mtpts; + props->device_cap_flags |= IB_DEVICE_MEM_WINDOW | + IB_DEVICE_MEM_WINDOW_TYPE_2B; + } + + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR) { + props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; + props->max_fast_reg_page_list_len = HNS_ROCE_FRMR_MAX_PA; + } + return 0; } @@ -228,6 +365,8 @@ static struct net_device *hns_roce_get_netdev(struct ib_device *ib_dev, struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); struct net_device *ndev; + rdfx_func_cnt(hr_dev, RDFX_FUNC_GET_NETDEV); + if (port_num < 1 || port_num > hr_dev->caps.num_ports) return NULL; @@ -251,7 +390,13 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num, enum ib_mtu mtu; u8 port; - assert(port_num > 0); + rdfx_func_cnt(hr_dev, RDFX_FUNC_QUERY_PORT); + + if (port_num < 1) { + dev_err(dev, "invalid port num!\n"); + return -EINVAL; + } + port = port_num - 1; /* props being zeroed by the caller, avoid zeroing it here */ @@ -271,15 +416,15 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num, net_dev = hr_dev->iboe.netdevs[port]; if (!net_dev) { spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); - dev_err(dev, "find netdev %d failed!\r\n", port); + dev_err(dev, "find netdev %u failed!\r\n", port); return -EINVAL; } mtu = iboe_get_mtu(net_dev->mtu); props->active_mtu = mtu ? min(props->max_mtu, mtu) : IB_MTU_256; - props->state = (netif_running(net_dev) && netif_carrier_ok(net_dev)) ? - IB_PORT_ACTIVE : IB_PORT_DOWN; - props->phys_state = (props->state == IB_PORT_ACTIVE) ? 5 : 3; + props->state = get_port_state(net_dev); + props->phys_state = (props->state == IB_PORT_ACTIVE) ? + HNS_ROCE_PHY_LINKUP : HNS_ROCE_PHY_DISABLED; spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); @@ -289,14 +434,26 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num, static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device, u8 port_num) { + rdfx_func_cnt(to_hr_dev(device), RDFX_FUNC_GET_LINK_LAYER); + return IB_LINK_LAYER_ETHERNET; } +static int hns_roce_query_gid(struct ib_device *ib_dev, u8 port_num, int index, + union ib_gid *gid) +{ + rdfx_func_cnt(to_hr_dev(ib_dev), RDFX_FUNC_QUERY_GID); + + return 0; +} + static int hns_roce_query_pkey(struct ib_device *ib_dev, u8 port, u16 index, u16 *pkey) { *pkey = PKEY_ID; + rdfx_func_cnt(to_hr_dev(ib_dev), RDFX_FUNC_QUERY_PKEY); + return 0; } @@ -305,6 +462,8 @@ static int hns_roce_modify_device(struct ib_device *ib_dev, int mask, { unsigned long flags; + rdfx_func_cnt(to_hr_dev(ib_dev), RDFX_FUNC_MODIFY_DEVICE); + if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) return -EOPNOTSUPP; @@ -320,23 +479,30 @@ static int hns_roce_modify_device(struct ib_device *ib_dev, int mask, static int hns_roce_modify_port(struct ib_device *ib_dev, u8 port_num, int mask, struct ib_port_modify *props) { + rdfx_func_cnt(to_hr_dev(ib_dev), RDFX_FUNC_MODIFY_PORT); + return 0; } static struct ib_ucontext *hns_roce_alloc_ucontext(struct ib_device *ib_dev, struct ib_udata *udata) { - int ret = 0; + int ret; struct hns_roce_ucontext *context; struct hns_roce_ib_alloc_ucontext_resp resp = {}; struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); - if (!hr_dev->active) + if (!hr_dev->active) { + dev_err(hr_dev->dev, + "alloc uncontext failed, hr_dev is not active\n"); return ERR_PTR(-EAGAIN); + } + + rdfx_func_cnt(hr_dev, RDFX_FUNC_ALLOC_UCONTEXT); resp.qp_tab_size = hr_dev->caps.num_qps; - context = kmalloc(sizeof(*context), GFP_KERNEL); + context = kzalloc(sizeof(*context), GFP_KERNEL); if (!context) return ERR_PTR(-ENOMEM); @@ -351,10 +517,12 @@ static struct ib_ucontext *hns_roce_alloc_ucontext(struct ib_device *ib_dev, mutex_init(&context->page_mutex); } - ret = ib_copy_to_udata(udata, &resp, sizeof(resp)); + ret = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp))); if (ret) goto error_fail_copy_to_udata; + kref_init(&context->uctx_ref); + return &context->ibucontext; error_fail_copy_to_udata: @@ -366,12 +534,24 @@ static struct ib_ucontext *hns_roce_alloc_ucontext(struct ib_device *ib_dev, return ERR_PTR(ret); } +static inline void release_ucontext(struct kref *kref) +{ + struct hns_roce_ucontext *context = + container_of(kref, struct hns_roce_ucontext, uctx_ref); + + kfree(context); +} + static int hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext) { struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext); + rdfx_func_cnt(to_hr_dev(ibcontext->device), + RDFX_FUNC_DEALLOC_UCONTEXT); + hns_roce_uar_free(to_hr_dev(ibcontext->device), &context->uar); - kfree(context); + + kref_put(&context->uctx_ref, release_ucontext); return 0; } @@ -384,13 +564,19 @@ static void hns_roce_vma_open(struct vm_area_struct *vma) static void hns_roce_vma_close(struct vm_area_struct *vma) { struct hns_roce_vma_data *vma_data; + struct hns_roce_ucontext *context; vma_data = (struct hns_roce_vma_data *)vma->vm_private_data; + context = container_of(vma_data->vma_list_mutex, + struct hns_roce_ucontext, vma_list_mutex); + vma_data->vma = NULL; mutex_lock(vma_data->vma_list_mutex); list_del(&vma_data->list); mutex_unlock(vma_data->vma_list_mutex); kfree(vma_data); + + kref_put(&context->uctx_ref, release_ucontext); } static const struct vm_operations_struct hns_roce_vm_ops = { @@ -404,6 +590,8 @@ static int hns_roce_set_vma_data(struct vm_area_struct *vma, struct list_head *vma_head = &context->vma_list; struct hns_roce_vma_data *vma_data; + kref_get(&context->uctx_ref); + vma_data = kzalloc(sizeof(*vma_data), GFP_KERNEL); if (!vma_data) return -ENOMEM; @@ -425,8 +613,12 @@ static int hns_roce_mmap(struct ib_ucontext *context, { struct hns_roce_dev *hr_dev = to_hr_dev(context->device); - if (((vma->vm_end - vma->vm_start) % PAGE_SIZE) != 0) + rdfx_func_cnt(hr_dev, RDFX_FUNC_MMAP); + + if (((vma->vm_end - vma->vm_start) % PAGE_SIZE) != 0) { + dev_err(hr_dev->dev, "mmap failed, unexpected vm area size.\n"); return -EINVAL; + } if (vma->vm_pgoff == 0) { vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); @@ -434,16 +626,30 @@ static int hns_roce_mmap(struct ib_ucontext *context, to_hr_ucontext(context)->uar.pfn, PAGE_SIZE, vma->vm_page_prot)) return -EAGAIN; - } else if (vma->vm_pgoff == 1 && hr_dev->tptr_dma_addr && - hr_dev->tptr_size) { - /* vm_pgoff: 1 -- TPTR */ - if (io_remap_pfn_range(vma, vma->vm_start, - hr_dev->tptr_dma_addr >> PAGE_SHIFT, - hr_dev->tptr_size, - vma->vm_page_prot)) - return -EAGAIN; - } else + } else if (vma->vm_pgoff == 1) { + /* vm_pgoff: 1 -- TPTR(hw v1), reset_page(hw v2) */ + if (hr_dev->tptr_dma_addr && hr_dev->tptr_size) { + if (io_remap_pfn_range(vma, vma->vm_start, + hr_dev->tptr_dma_addr >> PAGE_SHIFT, + hr_dev->tptr_size, vma->vm_page_prot)){ + dev_err(hr_dev->dev, + "mmap tptr page failed.\n"); + return -EAGAIN; + } + } + + if (hr_dev->reset_page) + if (remap_pfn_range(vma, vma->vm_start, + page_to_pfn(virt_to_page(hr_dev->reset_page)), + PAGE_SIZE, vma->vm_page_prot)) { + dev_err(hr_dev->dev, + "mmap reset page failed.\n"); + return -EAGAIN; + } + } else { + dev_err(hr_dev->dev, "mmap failed, vm_pgoff is unsupported.\n"); return -EINVAL; + } return hns_roce_set_vma_data(vma, to_hr_ucontext(context)); } @@ -454,9 +660,14 @@ static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num, struct ib_port_attr attr; int ret; + rdfx_func_cnt(to_hr_dev(ib_dev), RDFX_FUNC_PORT_IMMUTABLE); + ret = ib_query_port(ib_dev, port_num, &attr); - if (ret) + if (ret) { + dev_err(to_hr_dev(ib_dev)->dev, "ib_query_port failed(%d)!\n", + ret); return ret; + } immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; @@ -484,10 +695,175 @@ static void hns_roce_disassociate_ucontext(struct ib_ucontext *ibcontext) vma->vm_ops = NULL; list_del(&vma_data->list); kfree(vma_data); + + kref_put(&context->uctx_ref, release_ucontext); } mutex_unlock(&context->vma_list_mutex); } +static void hns_roce_get_fw_ver(struct ib_device *device, char *str) +{ + u64 fw_ver = to_hr_dev(device)->caps.fw_ver; + unsigned int major, minor, sub_minor; + + major = upper_32_bits(fw_ver); + minor = high_16_bits(lower_32_bits(fw_ver)); + sub_minor = low_16_bits(fw_ver); + + snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u.%04u", major, minor, + sub_minor); +} + +static const char * const hns_roce_hw_stats_name[] = { + "pd_alloc", + "pd_dealloc", + "pd_active_max", + "mr_alloc", + "mr_dealloc", + "mr_active_max", + "cq_alloc", + "cq_dealloc", + "qp_alloc", + "qp_dealloc", + "pd_active", + "mr_active", + "cq_active", + "cq_active_max", + "qp_active", + "qp_active_max", + "srq_active", + "srq_active_max", + "uar_active", + "uar_active_max", + "mr_rereg", + "aeqe", + "ceqe", +}; + +/** + *port 0:/sys/devices/../infiniband/hnsethX/hw_counters + *port 1:/sys/devices/../infiniband/hnsethX/ports/1/hw_counters + */ +static struct rdma_hw_stats *hns_roce_alloc_hw_stats(struct ib_device *device, + u8 port_num) +{ + BUILD_BUG_ON(ARRAY_SIZE(hns_roce_hw_stats_name) != HW_STATS_TOTAL); + + if (port_num != 0) + return NULL; /* nothing to do for port */ + + return rdma_alloc_hw_stats_struct(hns_roce_hw_stats_name, + ARRAY_SIZE(hns_roce_hw_stats_name), + RDMA_HW_STATS_DEFAULT_LIFESPAN); +} +static int hns_roce_get_hw_stats_for_armci(struct ib_device *device, + struct rdma_hw_stats *stats, + u8 port, int index) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(device); + unsigned long *table; + unsigned long max; + + switch (index) { + case HW_STATS_PD_ACTIVE: + table = hr_dev->pd_bitmap.table; + max = hr_dev->pd_bitmap.max; + stats->value[index] = bitmap_weight(table, max) - + hr_dev->caps.reserved_pds; + break; + case HW_STATS_MR_ACTIVE: + table = hr_dev->mr_table.mtpt_bitmap.table; + max = hr_dev->mr_table.mtpt_bitmap.max; + stats->value[index] = bitmap_weight(table, max) - + hr_dev->caps.reserved_mrws; + break; + case HW_STATS_CQ_ACTIVE: + table = hr_dev->cq_table.bitmap.table; + max = hr_dev->cq_table.bitmap.max; + stats->value[index] = bitmap_weight(table, max) - + hr_dev->caps.reserved_cqs; + break; + case HW_STATS_CQ_ACTIVE_MAX: + table = hr_dev->cq_table.bitmap.table; + max = hr_dev->cq_table.bitmap.max; + stats->value[index] = find_last_bit(table, max); + break; + case HW_STATS_QP_ACTIVE: + table = hr_dev->qp_table.bitmap.table; + max = hr_dev->qp_table.bitmap.max; + stats->value[index] = bitmap_weight(table, max) - + hr_dev->caps.reserved_qps; + break; + case HW_STATS_QP_ACTIVE_MAX: + table = hr_dev->qp_table.bitmap.table; + max = hr_dev->qp_table.bitmap.max; + stats->value[index] = find_last_bit(table, max); + break; + case HW_STATS_SRQ_ACTIVE: + table = hr_dev->srq_table.bitmap.table; + max = hr_dev->srq_table.bitmap.max; + stats->value[index] = bitmap_weight(table, max) - + hr_dev->caps.reserved_srqs; + break; + case HW_STATS_SRQ_ACTIVE_MAX: + table = hr_dev->srq_table.bitmap.table; + max = hr_dev->srq_table.bitmap.max; + stats->value[index] = find_last_bit(table, max); + break; + case HW_STATS_UAR_ACTIVE: + table = hr_dev->uar_table.bitmap.table; + max = hr_dev->uar_table.bitmap.max; + stats->value[index] = bitmap_weight(table, max); + break; + case HW_STATS_UAR_ACTIVE_MAX: + table = hr_dev->uar_table.bitmap.table; + max = hr_dev->uar_table.bitmap.max; + stats->value[index] = find_last_bit(table, max); + break; + case HW_STATS_AEQE: + stats->value[index] = hr_dev->dfx_cnt[HNS_ROCE_DFX_AEQE]; + break; + case HW_STATS_CEQE: + stats->value[index] = hr_dev->dfx_cnt[HNS_ROCE_DFX_CEQE]; + break; + default: + break; + } + + return index; +} + +static int hns_roce_get_hw_stats(struct ib_device *device, + struct rdma_hw_stats *stats, + u8 port, int index) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(device); + unsigned long *table; + unsigned long max; + + if (port != 0) + return 0; /* nothing to do for port */ + + switch (index) { + case HW_STATS_PD_ACTIVE_MAX: + table = hr_dev->pd_bitmap.table; + max = hr_dev->pd_bitmap.max; + stats->value[index] = find_last_bit(table, max); + break; + case HW_STATS_MR_ACTIVE_MAX: + table = hr_dev->mr_table.mtpt_bitmap.table; + max = hr_dev->mr_table.mtpt_bitmap.max; + stats->value[index] = find_last_bit(table, max); + break; + default: + hns_roce_get_hw_stats_for_armci(device, stats, port, index); + break; + } + + return index; +} + + static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev) { struct hns_roce_ib_iboe *iboe = &hr_dev->iboe; @@ -499,16 +875,17 @@ static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev) static int hns_roce_register_device(struct hns_roce_dev *hr_dev) { - int ret; - struct hns_roce_ib_iboe *iboe = NULL; - struct ib_device *ib_dev = NULL; struct device *dev = hr_dev->dev; + struct hns_roce_ib_iboe *iboe; + struct ib_device *ib_dev; + int ret; iboe = &hr_dev->iboe; spin_lock_init(&iboe->lock); ib_dev = &hr_dev->ib_dev; - strlcpy(ib_dev->name, "hns_%d", IB_DEVICE_NAME_MAX); + if (!strlen(ib_dev->name)) + strlcpy(ib_dev->name, "hns_%d", IB_DEVICE_NAME_MAX); ib_dev->owner = THIS_MODULE; ib_dev->node_type = RDMA_NODE_IB_CA; @@ -532,11 +909,18 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) (1ULL << IB_USER_VERBS_CMD_CREATE_QP) | (1ULL << IB_USER_VERBS_CMD_MODIFY_QP) | (1ULL << IB_USER_VERBS_CMD_QUERY_QP) | - (1ULL << IB_USER_VERBS_CMD_DESTROY_QP); - + (1ULL << IB_USER_VERBS_CMD_DESTROY_QP) | + (1ULL << IB_USER_VERBS_CMD_CREATE_SRQ) | + (1ULL << IB_USER_VERBS_CMD_MODIFY_SRQ) | + (1ULL << IB_USER_VERBS_CMD_QUERY_SRQ) | + (1ULL << IB_USER_VERBS_CMD_DESTROY_SRQ) | + (1ULL << IB_USER_VERBS_CMD_POST_SRQ_RECV) | + (1ULL << IB_USER_VERBS_CMD_CREATE_XSRQ); + +#ifdef MODIFY_CQ_MASK ib_dev->uverbs_ex_cmd_mask |= (1ULL << IB_USER_VERBS_EX_CMD_MODIFY_CQ); - +#endif /* HCA||device||port */ ib_dev->modify_device = hns_roce_modify_device; ib_dev->query_device = hns_roce_query_device; @@ -544,6 +928,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) ib_dev->modify_port = hns_roce_modify_port; ib_dev->get_link_layer = hns_roce_get_link_layer; ib_dev->get_netdev = hns_roce_get_netdev; + ib_dev->query_gid = hns_roce_query_gid; ib_dev->add_gid = hns_roce_add_gid; ib_dev->del_gid = hns_roce_del_gid; ib_dev->query_pkey = hns_roce_query_pkey; @@ -551,6 +936,9 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) ib_dev->dealloc_ucontext = hns_roce_dealloc_ucontext; ib_dev->mmap = hns_roce_mmap; + /* FW */ + ib_dev->get_dev_fw_str = hns_roce_get_fw_ver; + /* PD */ ib_dev->alloc_pd = hns_roce_alloc_pd; ib_dev->dealloc_pd = hns_roce_dealloc_pd; @@ -559,6 +947,12 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) ib_dev->create_ah = hns_roce_create_ah; ib_dev->query_ah = hns_roce_query_ah; ib_dev->destroy_ah = hns_roce_destroy_ah; + /* SRQ */ + ib_dev->create_srq = hns_roce_create_srq; + ib_dev->modify_srq = hr_dev->hw->modify_srq; + ib_dev->query_srq = hr_dev->hw->query_srq; + ib_dev->destroy_srq = hns_roce_destroy_srq; + ib_dev->post_srq_recv = hr_dev->hw->post_srq_recv; /* QP */ ib_dev->create_qp = hns_roce_create_qp; @@ -584,34 +978,63 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) ib_dev->uverbs_cmd_mask |= (1ULL << IB_USER_VERBS_CMD_REREG_MR); } + /* MW */ + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_MW) { + ib_dev->alloc_mw = hns_roce_alloc_mw; + ib_dev->dealloc_mw = hns_roce_dealloc_mw; + ib_dev->uverbs_cmd_mask |= + (1ULL << IB_USER_VERBS_CMD_ALLOC_MW) | + (1ULL << IB_USER_VERBS_CMD_DEALLOC_MW); + } + + /* FRMR */ + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR) { + ib_dev->alloc_mr = hns_roce_alloc_mr; + ib_dev->map_mr_sg = hns_roce_map_mr_sg; + } + /* OTHERS */ ib_dev->get_port_immutable = hns_roce_port_immutable; ib_dev->disassociate_ucontext = hns_roce_disassociate_ucontext; + ib_dev->res.fill_res_entry = hns_roce_fill_res_entry; + ib_dev->alloc_hw_stats = hns_roce_alloc_hw_stats; + ib_dev->get_hw_stats = hns_roce_get_hw_stats; + + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) { + ib_dev->alloc_xrcd = hns_roce_ib_alloc_xrcd; + ib_dev->dealloc_xrcd = hns_roce_ib_dealloc_xrcd; + ib_dev->uverbs_cmd_mask |= + (1ULL << IB_USER_VERBS_CMD_OPEN_XRCD) | + (1ULL << IB_USER_VERBS_CMD_CLOSE_XRCD); + } +#ifdef CONFIG_NEW_KERNEL ib_dev->driver_id = RDMA_DRIVER_HNS; +#endif ret = ib_register_device(ib_dev, NULL); if (ret) { - dev_err(dev, "ib_register_device failed!\n"); + dev_err(dev, "ib_register_device failed(%d)!\n", ret); return ret; } - ret = hns_roce_setup_mtu_mac(hr_dev); + ret = hns_roce_setup_mtu_mac_state(hr_dev); if (ret) { - dev_err(dev, "setup_mtu_mac failed!\n"); - goto error_failed_setup_mtu_mac; + dev_err(dev, "setup_mtu_mac_state failed, ret = %d\n", ret); + goto error_failed_setup_mtu_mac_state; } iboe->nb.notifier_call = hns_roce_netdev_event; ret = register_netdevice_notifier(&iboe->nb); if (ret) { - dev_err(dev, "register_netdevice_notifier failed!\n"); - goto error_failed_setup_mtu_mac; + iboe->nb.notifier_call = NULL; + dev_err(dev, "register_netdevice_notifier failed(%d)!\n", ret); + goto error_failed_setup_mtu_mac_state; } hr_dev->active = true; return 0; -error_failed_setup_mtu_mac: +error_failed_setup_mtu_mac_state: ib_unregister_device(ib_dev); return ret; @@ -626,7 +1049,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev) HEM_TYPE_MTT, hr_dev->caps.mtt_entry_sz, hr_dev->caps.num_mtt_segs, 1); if (ret) { - dev_err(dev, "Failed to init MTT context memory, aborting.\n"); + dev_err(dev, "Init MTT context memory failed(%d).\n", ret); return ret; } @@ -636,7 +1059,8 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev) HEM_TYPE_CQE, hr_dev->caps.mtt_entry_sz, hr_dev->caps.num_cqe_segs, 1); if (ret) { - dev_err(dev, "Failed to init MTT CQE context memory, aborting.\n"); + dev_err(dev, "Init MTT CQE context memory failed(%d).\n", + ret); goto err_unmap_cqe; } } @@ -645,7 +1069,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev) HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz, hr_dev->caps.num_mtpts, 1); if (ret) { - dev_err(dev, "Failed to init MTPT context memory, aborting.\n"); + dev_err(dev, "Init MTPT context memory failed(%d).\n", ret); goto err_unmap_mtt; } @@ -653,7 +1077,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev) HEM_TYPE_QPC, hr_dev->caps.qpc_entry_sz, hr_dev->caps.num_qps, 1); if (ret) { - dev_err(dev, "Failed to init QP context memory, aborting.\n"); + dev_err(dev, "Init QP context memory failed(%d).\n", ret); goto err_unmap_dmpt; } @@ -663,7 +1087,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev) hr_dev->caps.max_qp_init_rdma, hr_dev->caps.num_qps, 1); if (ret) { - dev_err(dev, "Failed to init irrl_table memory, aborting.\n"); + dev_err(dev, "Init irrl_table memory failed(%d).\n", ret); goto err_unmap_qp; } @@ -675,8 +1099,8 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev) hr_dev->caps.max_qp_dest_rdma, hr_dev->caps.num_qps, 1); if (ret) { - dev_err(dev, - "Failed to init trrl_table memory, aborting.\n"); + dev_err(dev, "Init trrl_table memory failed(%d).\n", + ret); goto err_unmap_irrl; } } @@ -685,12 +1109,114 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev) HEM_TYPE_CQC, hr_dev->caps.cqc_entry_sz, hr_dev->caps.num_cqs, 1); if (ret) { - dev_err(dev, "Failed to init CQ context memory, aborting.\n"); + dev_err(dev, "Init CQ context memory failed(%d).\n", ret); goto err_unmap_trrl; } + if (hr_dev->caps.scc_ctx_entry_sz) { + ret = hns_roce_init_hem_table(hr_dev, + &hr_dev->qp_table.scc_ctx_table, + HEM_TYPE_SCC_CTX, + hr_dev->caps.scc_ctx_entry_sz, + hr_dev->caps.num_qps, 1); + if (ret) { + dev_err(dev, "Init SCC context memory failed(%d).\n", + ret); + goto err_unmap_cq; + } + } + + if (hr_dev->caps.qpc_timer_entry_sz) { + ret = hns_roce_init_hem_table(hr_dev, + &hr_dev->qpc_timer_table.table, + HEM_TYPE_QPC_TIMER, + hr_dev->caps.qpc_timer_entry_sz, + hr_dev->caps.num_qpc_timer, 1); + if (ret) { + dev_err(dev, "Init QPC timer memory failed(%d).\n", + ret); + goto err_unmap_ctx; + } + } + + if (hr_dev->caps.cqc_timer_entry_sz) { + ret = hns_roce_init_hem_table(hr_dev, + &hr_dev->cqc_timer_table.table, + HEM_TYPE_CQC_TIMER, + hr_dev->caps.cqc_timer_entry_sz, + hr_dev->caps.num_cqc_timer, 1); + if (ret) { + dev_err(dev, "Init CQC timer memory failed(%d).\n", + ret); + goto err_unmap_qpc_timer; + } + } + + if (hr_dev->caps.srqc_entry_sz) { + ret = hns_roce_init_hem_table(hr_dev, &hr_dev->srq_table.table, + HEM_TYPE_SRQC, + hr_dev->caps.srqc_entry_sz, + hr_dev->caps.num_srqs, 1); + if (ret) { + dev_err(dev, "Init SRQ context memory failed(%d).\n", + ret); + goto err_unmap_cqc_timer; + } + } + + if (hr_dev->caps.num_srqwqe_segs) { + ret = hns_roce_init_hem_table(hr_dev, + &hr_dev->mr_table.mtt_srqwqe_table, + HEM_TYPE_SRQWQE, + hr_dev->caps.mtt_entry_sz, + hr_dev->caps.num_srqwqe_segs, 1); + if (ret) { + dev_err(dev, "Init MTT srqwqe memory failed(%d).\n", + ret); + goto err_unmap_srq; + } + } + + if (hr_dev->caps.num_idx_segs) { + ret = hns_roce_init_hem_table(hr_dev, + &hr_dev->mr_table.mtt_idx_table, + HEM_TYPE_IDX, + hr_dev->caps.idx_entry_sz, + hr_dev->caps.num_idx_segs, 1); + if (ret) { + dev_err(dev, "Init MTT idx memory failed(%d).\n", ret); + goto err_unmap_srqwqe; + } + } + return 0; +err_unmap_srqwqe: + if (hr_dev->caps.num_srqwqe_segs) + hns_roce_cleanup_hem_table(hr_dev, + &hr_dev->mr_table.mtt_srqwqe_table); + +err_unmap_srq: + if (hr_dev->caps.srqc_entry_sz) + hns_roce_cleanup_hem_table(hr_dev, &hr_dev->srq_table.table); + +err_unmap_cqc_timer: + if (hr_dev->caps.cqc_timer_entry_sz) + hns_roce_cleanup_hem_table(hr_dev, + &hr_dev->cqc_timer_table.table); +err_unmap_qpc_timer: + if (hr_dev->caps.qpc_timer_entry_sz) + hns_roce_cleanup_hem_table(hr_dev, + &hr_dev->qpc_timer_table.table); + +err_unmap_ctx: + if (hr_dev->caps.scc_ctx_entry_sz) + hns_roce_cleanup_hem_table(hr_dev, + &hr_dev->qp_table.scc_ctx_table); + +err_unmap_cq: + hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table); + err_unmap_trrl: if (hr_dev->caps.trrl_entry_sz) hns_roce_cleanup_hem_table(hr_dev, @@ -736,48 +1262,72 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) ret = hns_roce_init_uar_table(hr_dev); if (ret) { - dev_err(dev, "Failed to initialize uar table. aborting\n"); + dev_err(dev, "Failed to init uar table(%d). aborting\n", ret); return ret; } ret = hns_roce_uar_alloc(hr_dev, &hr_dev->priv_uar); if (ret) { - dev_err(dev, "Failed to allocate priv_uar.\n"); + dev_err(dev, "Failed to allocate priv_uar(%d).\n", ret); goto err_uar_table_free; } ret = hns_roce_init_pd_table(hr_dev); if (ret) { - dev_err(dev, "Failed to init protected domain table.\n"); + dev_err(dev, "Failed to init pd table(%d).\n", ret); goto err_uar_alloc_free; } + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) { + ret = hns_roce_init_xrcd_table(hr_dev); + if (ret) { + dev_err(dev, "Failed to init xrcd table(%d).\n", ret); + goto err_pd_table_free; + } + } + ret = hns_roce_init_mr_table(hr_dev); if (ret) { - dev_err(dev, "Failed to init memory region table.\n"); - goto err_pd_table_free; + dev_err(dev, "Failed to init mr table(%d).\n", ret); + goto err_xrcd_table_free; } ret = hns_roce_init_cq_table(hr_dev); if (ret) { - dev_err(dev, "Failed to init completion queue table.\n"); + dev_err(dev, "Failed to init cq table(%d).\n", ret); goto err_mr_table_free; } ret = hns_roce_init_qp_table(hr_dev); if (ret) { - dev_err(dev, "Failed to init queue pair table.\n"); + dev_err(dev, "Failed to init qp table(%d).\n", ret); goto err_cq_table_free; } + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) { + ret = hns_roce_init_srq_table(hr_dev); + if (ret) { + dev_err(dev, + "Failed to init srq table(%d).\n", ret); + goto err_qp_table_free; + } + } + return 0; +err_qp_table_free: + hns_roce_cleanup_qp_table(hr_dev); + err_cq_table_free: hns_roce_cleanup_cq_table(hr_dev); err_mr_table_free: hns_roce_cleanup_mr_table(hr_dev); +err_xrcd_table_free: + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) + hns_roce_cleanup_xrcd_table(hr_dev); + err_pd_table_free: hns_roce_cleanup_pd_table(hr_dev); @@ -789,80 +1339,161 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) return ret; } -int hns_roce_init(struct hns_roce_dev *hr_dev) +static int hns_roce_reset(struct hns_roce_dev *hr_dev) { int ret; - struct device *dev = hr_dev->dev; if (hr_dev->hw->reset) { ret = hr_dev->hw->reset(hr_dev, true); - if (ret) { - dev_err(dev, "Reset RoCE engine failed!\n"); + if (ret) return ret; - } } hr_dev->is_reset = false; + return 0; +} + +static void hns_roce_find_armed_cq(struct list_head *cq_list, struct ib_cq *cq) +{ + struct hns_roce_cq *hr_cq = to_hr_cq(cq); + unsigned long flags; + + spin_lock_irqsave(&hr_cq->lock, flags); + if (hr_cq->comp && cq->comp_handler) { + if (!hr_cq->comp_state) { + hr_cq->comp_state = 1; + list_add_tail(&hr_cq->list, cq_list); + } + } + spin_unlock_irqrestore(&hr_cq->lock, flags); +} + +/* + * We need set device state before handle device err. So, sq/rq lock will be + * effect to return error or involve cq. + */ +void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev) +{ + struct hns_roce_qp *hr_qp; + struct hns_roce_cq *hr_cq; + struct list_head cq_list; + unsigned long flags_qp; + unsigned long flags; + + INIT_LIST_HEAD(&cq_list); + + spin_lock_irqsave(&hr_dev->qp_lock, flags); + list_for_each_entry(hr_qp, &hr_dev->qp_list, list) { + spin_lock_irqsave(&hr_qp->sq.lock, flags_qp); + if (hr_qp->sq.tail != hr_qp->sq.head) + hns_roce_find_armed_cq(&cq_list, hr_qp->ibqp.send_cq); + spin_unlock_irqrestore(&hr_qp->sq.lock, flags_qp); + + spin_lock_irqsave(&hr_qp->rq.lock, flags_qp); + if ((!hr_qp->ibqp.srq) && (hr_qp->rq.tail != hr_qp->rq.head)) + hns_roce_find_armed_cq(&cq_list, hr_qp->ibqp.recv_cq); + spin_unlock_irqrestore(&hr_qp->rq.lock, flags_qp); + } + + list_for_each_entry(hr_cq, &cq_list, list) + hr_cq->comp(hr_cq); + + spin_unlock_irqrestore(&hr_dev->qp_lock, flags); +} +EXPORT_SYMBOL_GPL(hns_roce_handle_device_err); + +int hns_roce_init(struct hns_roce_dev *hr_dev) +{ + int ret; + struct device *dev = hr_dev->dev; + + ret = alloc_rdfx_info(hr_dev); + if (ret) { + dev_err(dev, "Alloc RoCE DFX failed(%d)!\n", ret); + return ret; + } + ret = hns_roce_reset(hr_dev); + if (ret) { + free_rdfx_info(hr_dev); + dev_err(dev, "Reset RoCE engine failed(%d)!\n", ret); + return ret; + } + if (hr_dev->hw->cmq_init) { ret = hr_dev->hw->cmq_init(hr_dev); if (ret) { - dev_err(dev, "Init RoCE Command Queue failed!\n"); + dev_err(dev, "Init RoCE cmq failed(%d)!\n", ret); goto error_failed_cmq_init; } } ret = hr_dev->hw->hw_profile(hr_dev); if (ret) { - dev_err(dev, "Get RoCE engine profile failed!\n"); + dev_err(dev, "Get RoCE engine profile failed(%d)!\n", ret); goto error_failed_cmd_init; } ret = hns_roce_cmd_init(hr_dev); if (ret) { - dev_err(dev, "cmd init failed!\n"); + dev_err(dev, "Cmd init failed(%d)!\n", ret); goto error_failed_cmd_init; } ret = hr_dev->hw->init_eq(hr_dev); if (ret) { - dev_err(dev, "eq init failed!\n"); + dev_err(dev, "Eq init failed(%d)!\n", ret); goto error_failed_eq_table; } if (hr_dev->cmd_mod) { ret = hns_roce_cmd_use_events(hr_dev); if (ret) { - dev_err(dev, "Switch to event-driven cmd failed!\n"); - goto error_failed_use_event; + dev_warn(dev, + "Cmd event mode failed(%d), set back to poll!\n", + ret); + hns_roce_cmd_use_polling(hr_dev); } } ret = hns_roce_init_hem(hr_dev); if (ret) { - dev_err(dev, "init HEM(Hardware Entry Memory) failed!\n"); + dev_err(dev, "Init HEM(Hardware Entry Memory) failed(%d)!\n", + ret); goto error_failed_init_hem; } ret = hns_roce_setup_hca(hr_dev); if (ret) { - dev_err(dev, "setup hca failed!\n"); + dev_err(dev, "Setup hca failed(%d)!\n", ret); goto error_failed_setup_hca; } - if (hr_dev->hw->hw_init) { - ret = hr_dev->hw->hw_init(hr_dev); - if (ret) { - dev_err(dev, "hw_init failed!\n"); - goto error_failed_engine_init; - } + ret = hr_dev->hw->hw_init(hr_dev); + if (ret) { + dev_err(dev, "Hw_init failed(%d)!\n", ret); + goto error_failed_engine_init; } + INIT_LIST_HEAD(&hr_dev->qp_list); + spin_lock_init(&hr_dev->qp_lock); + ret = hns_roce_register_device(hr_dev); if (ret) goto error_failed_register_device; + if (hr_dev->hw->create_workq) { + ret = hr_dev->hw->create_workq(hr_dev); + if (ret) + goto error_failed_create_workq; + } + + (void)hns_roce_register_sysfs(hr_dev); + rdfx_set_dev_name(hr_dev); return 0; +error_failed_create_workq: + hns_roce_unregister_device(hr_dev); + error_failed_register_device: if (hr_dev->hw->hw_exit) hr_dev->hw->hw_exit(hr_dev); @@ -876,8 +1507,6 @@ int hns_roce_init(struct hns_roce_dev *hr_dev) error_failed_init_hem: if (hr_dev->cmd_mod) hns_roce_cmd_use_polling(hr_dev); - -error_failed_use_event: hr_dev->hw->cleanup_eq(hr_dev); error_failed_eq_table: @@ -893,6 +1522,8 @@ int hns_roce_init(struct hns_roce_dev *hr_dev) dev_err(dev, "Dereset RoCE engine failed!\n"); } + free_rdfx_info(hr_dev); + return ret; } EXPORT_SYMBOL_GPL(hns_roce_init); @@ -901,6 +1532,9 @@ void hns_roce_exit(struct hns_roce_dev *hr_dev) { hns_roce_unregister_device(hr_dev); + if (hr_dev->hw->destroy_workq) + hr_dev->hw->destroy_workq(hr_dev); + if (hr_dev->hw->hw_exit) hr_dev->hw->hw_exit(hr_dev); hns_roce_cleanup_bitmap(hr_dev); @@ -915,11 +1549,11 @@ void hns_roce_exit(struct hns_roce_dev *hr_dev) hr_dev->hw->cmq_exit(hr_dev); if (hr_dev->hw->reset) hr_dev->hw->reset(hr_dev, false); + + free_rdfx_info(hr_dev); } EXPORT_SYMBOL_GPL(hns_roce_exit); MODULE_LICENSE("Dual BSD/GPL"); -MODULE_AUTHOR("Wei Hu "); -MODULE_AUTHOR("Nenglong Zhao "); -MODULE_AUTHOR("Lijun Ou "); +MODULE_AUTHOR("Huawei Tech. Co., Ltd."); MODULE_DESCRIPTION("HNS RoCE Driver"); diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index eb26a5f6fc58c21c2cc3545b7047da0889db8e0a..f81fb0359afd621dfc4b09d46464349815fa853e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -30,6 +30,7 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ +#include "roce_k_compat.h" #include #include @@ -38,10 +39,11 @@ #include "hns_roce_cmd.h" #include "hns_roce_hem.h" -static u32 hw_index_to_key(unsigned long ind) +u32 hw_index_to_key(unsigned long ind) { return (u32)(ind >> 24) | (ind << 8); } +EXPORT_SYMBOL_GPL(hw_index_to_key); unsigned long key_to_hw_index(u32 key) { @@ -49,24 +51,24 @@ unsigned long key_to_hw_index(u32 key) } EXPORT_SYMBOL_GPL(key_to_hw_index); -static int hns_roce_sw2hw_mpt(struct hns_roce_dev *hr_dev, - struct hns_roce_cmd_mailbox *mailbox, - unsigned long mpt_index) +static int hns_roce_hw_create_mpt(struct hns_roce_dev *hr_dev, + struct hns_roce_cmd_mailbox *mailbox, + unsigned long mpt_index) { return hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, mpt_index, 0, - HNS_ROCE_CMD_SW2HW_MPT, + HNS_ROCE_CMD_CREATE_MPT, HNS_ROCE_CMD_TIMEOUT_MSECS); } -int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev, - struct hns_roce_cmd_mailbox *mailbox, - unsigned long mpt_index) +int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev, + struct hns_roce_cmd_mailbox *mailbox, + unsigned long mpt_index) { return hns_roce_cmd_mbox(hr_dev, 0, mailbox ? mailbox->dma : 0, - mpt_index, !mailbox, HNS_ROCE_CMD_HW2SW_MPT, + mpt_index, !mailbox, HNS_ROCE_CMD_DESTROY_MPT, HNS_ROCE_CMD_TIMEOUT_MSECS); } -EXPORT_SYMBOL_GPL(hns_roce_hw2sw_mpt); +EXPORT_SYMBOL_GPL(hns_roce_hw_destroy_mpt); static int hns_roce_buddy_alloc(struct hns_roce_buddy *buddy, int order, unsigned long *seg) @@ -184,12 +186,27 @@ static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order, struct hns_roce_buddy *buddy; int ret; - if (mtt_type == MTT_TYPE_WQE) { + switch (mtt_type) { + case MTT_TYPE_WQE: buddy = &mr_table->mtt_buddy; table = &mr_table->mtt_table; - } else { + break; + case MTT_TYPE_CQE: buddy = &mr_table->mtt_cqe_buddy; table = &mr_table->mtt_cqe_table; + break; + case MTT_TYPE_SRQWQE: + buddy = &mr_table->mtt_srqwqe_buddy; + table = &mr_table->mtt_srqwqe_table; + break; + case MTT_TYPE_IDX: + buddy = &mr_table->mtt_idx_buddy; + table = &mr_table->mtt_idx_table; + break; + default: + dev_err(hr_dev->dev, "Unsupport MTT table type: %d\n", + mtt_type); + return -EINVAL; } ret = hns_roce_buddy_alloc(buddy, order, seg); @@ -229,7 +246,7 @@ int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift, /* Allocate MTT entry */ ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg, mtt->mtt_type); - if (ret == -1) + if (ret != 0) return -ENOMEM; return 0; @@ -242,18 +259,40 @@ void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt) if (mtt->order < 0) return; - if (mtt->mtt_type == MTT_TYPE_WQE) { + switch (mtt->mtt_type) { + case MTT_TYPE_WQE: hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order); hns_roce_table_put_range(hr_dev, &mr_table->mtt_table, mtt->first_seg, mtt->first_seg + (1 << mtt->order) - 1); - } else { + break; + case MTT_TYPE_CQE: hns_roce_buddy_free(&mr_table->mtt_cqe_buddy, mtt->first_seg, mtt->order); hns_roce_table_put_range(hr_dev, &mr_table->mtt_cqe_table, mtt->first_seg, mtt->first_seg + (1 << mtt->order) - 1); + break; + case MTT_TYPE_SRQWQE: + hns_roce_buddy_free(&mr_table->mtt_srqwqe_buddy, mtt->first_seg, + mtt->order); + hns_roce_table_put_range(hr_dev, &mr_table->mtt_srqwqe_table, + mtt->first_seg, + mtt->first_seg + (1 << mtt->order) - 1); + break; + case MTT_TYPE_IDX: + hns_roce_buddy_free(&mr_table->mtt_idx_buddy, mtt->first_seg, + mtt->order); + hns_roce_table_put_range(hr_dev, &mr_table->mtt_idx_table, + mtt->first_seg, + mtt->first_seg + (1 << mtt->order) - 1); + break; + default: + dev_err(hr_dev->dev, + "Unsupport mtt type %d, clean mtt failed\n", + mtt->mtt_type); + break; } } EXPORT_SYMBOL_GPL(hns_roce_mtt_cleanup); @@ -277,11 +316,11 @@ static void hns_roce_loop_free(struct hns_roce_dev *hr_dev, dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i], mr->pbl_l1_dma_addr[i]); - for (j = 0; j < pbl_bt_sz / 8; j++) { + for (j = 0; j < pbl_bt_sz / BA_BYTE_LEN; j++) { if (i == loop_i && j >= loop_j) break; - bt_idx = i * pbl_bt_sz / 8 + j; + bt_idx = i * (pbl_bt_sz / BA_BYTE_LEN) + j; dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l2[bt_idx], mr->pbl_l2_dma_addr[bt_idx]); @@ -292,8 +331,8 @@ static void hns_roce_loop_free(struct hns_roce_dev *hr_dev, dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i], mr->pbl_l1_dma_addr[i]); - for (j = 0; j < pbl_bt_sz / 8; j++) { - bt_idx = i * pbl_bt_sz / 8 + j; + for (j = 0; j < pbl_bt_sz / BA_BYTE_LEN; j++) { + bt_idx = i * (pbl_bt_sz / BA_BYTE_LEN) + j; dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l2[bt_idx], mr->pbl_l2_dma_addr[bt_idx]); @@ -313,155 +352,205 @@ static void hns_roce_loop_free(struct hns_roce_dev *hr_dev, mr->pbl_bt_l0 = NULL; mr->pbl_l0_dma_addr = 0; } +static int pbl_1hop_alloc(struct hns_roce_dev *hr_dev, int npages, + struct hns_roce_mr *mr, u32 pbl_bt_sz) +{ + struct device *dev = hr_dev->dev; -/* PBL multi hop addressing */ -static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages, - struct hns_roce_mr *mr) + if (npages > pbl_bt_sz / BA_BYTE_LEN) { + dev_err(dev, "Npages %d is larger than buf_pg_sz!", npages); + return -EINVAL; + } + mr->pbl_buf = dma_alloc_coherent(dev, npages * BA_BYTE_LEN, + &(mr->pbl_dma_addr), + GFP_KERNEL); + if (!mr->pbl_buf) + return -ENOMEM; + + mr->pbl_size = npages; + mr->pbl_ba = mr->pbl_dma_addr; + mr->pbl_hop_num = 1; + mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz; + mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz; + return 0; + +} + + +static int pbl_2hop_alloc(struct hns_roce_dev *hr_dev, int npages, + struct hns_roce_mr *mr, u32 pbl_bt_sz) { struct device *dev = hr_dev->dev; - int mr_alloc_done = 0; - int npages_allocated; - int i = 0, j = 0; - u32 pbl_bt_sz; - u32 mhop_num; + int npages_alloced; u64 pbl_last_bt_num; u64 pbl_bt_cnt = 0; - u64 bt_idx; u64 size; + int i; - mhop_num = hr_dev->caps.pbl_hop_num; - pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT); - pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8); - - if (mhop_num == HNS_ROCE_HOP_NUM_0) - return 0; + pbl_last_bt_num = DIV_ROUND_UP(npages, pbl_bt_sz / BA_BYTE_LEN); - /* hop_num = 1 */ - if (mhop_num == 1) { - if (npages > pbl_bt_sz / 8) { - dev_err(dev, "npages %d is larger than buf_pg_sz!", - npages); - return -EINVAL; + /* alloc L1 BT */ + for (i = 0; i < pbl_bt_sz / BA_BYTE_LEN; i++) { + if (pbl_bt_cnt + 1 < pbl_last_bt_num) { + size = pbl_bt_sz; + } else { + npages_alloced = i * (pbl_bt_sz / BA_BYTE_LEN); + size = (npages - npages_alloced) * BA_BYTE_LEN; } - mr->pbl_buf = dma_alloc_coherent(dev, npages * 8, - &(mr->pbl_dma_addr), - GFP_KERNEL); - if (!mr->pbl_buf) + mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, size, + &(mr->pbl_l1_dma_addr[i]), + GFP_KERNEL); + if (!mr->pbl_bt_l1[i]) { + hns_roce_loop_free(hr_dev, mr, 1, i, 0); return -ENOMEM; + } - mr->pbl_size = npages; - mr->pbl_ba = mr->pbl_dma_addr; - mr->pbl_hop_num = hr_dev->caps.pbl_hop_num; - mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz; - mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz; - return 0; + *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i]; + + pbl_bt_cnt++; + if (pbl_bt_cnt >= pbl_last_bt_num) + break; } - mr->pbl_l1_dma_addr = kcalloc(pbl_bt_sz / 8, - sizeof(*mr->pbl_l1_dma_addr), + mr->l0_chunk_last_num = i + 1; + + return 0; +} + +static int pbl_3hop_alloc(struct hns_roce_dev *hr_dev, int npages, + struct hns_roce_mr *mr, u32 pbl_bt_sz) +{ + struct device *dev = hr_dev->dev; + int mr_alloc_done = 0; + int npages_alloced; + u64 pbl_last_bt_num; + u64 pbl_bt_cnt = 0; + u64 bt_idx; + u64 size; + int i; + int j = 0; + + pbl_last_bt_num = DIV_ROUND_UP(npages, pbl_bt_sz / BA_BYTE_LEN); + + mr->pbl_l2_dma_addr = kcalloc(pbl_last_bt_num, + sizeof(*mr->pbl_l2_dma_addr), GFP_KERNEL); - if (!mr->pbl_l1_dma_addr) + if (!mr->pbl_l2_dma_addr) return -ENOMEM; - mr->pbl_bt_l1 = kcalloc(pbl_bt_sz / 8, sizeof(*mr->pbl_bt_l1), + mr->pbl_bt_l2 = kcalloc(pbl_last_bt_num, + sizeof(*mr->pbl_bt_l2), GFP_KERNEL); - if (!mr->pbl_bt_l1) - goto err_kcalloc_bt_l1; - - if (mhop_num == 3) { - mr->pbl_l2_dma_addr = kcalloc(pbl_last_bt_num, - sizeof(*mr->pbl_l2_dma_addr), - GFP_KERNEL); - if (!mr->pbl_l2_dma_addr) - goto err_kcalloc_l2_dma; + if (!mr->pbl_bt_l2) + goto err_kcalloc_bt_l2; + + /* alloc L1, L2 BT */ + for (i = 0; i < pbl_bt_sz / BA_BYTE_LEN; i++) { + mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, pbl_bt_sz, + &(mr->pbl_l1_dma_addr[i]), + GFP_KERNEL); + if (!mr->pbl_bt_l1[i]) { + hns_roce_loop_free(hr_dev, mr, 1, i, 0); + goto err_dma_alloc_l0; + } - mr->pbl_bt_l2 = kcalloc(pbl_last_bt_num, - sizeof(*mr->pbl_bt_l2), - GFP_KERNEL); - if (!mr->pbl_bt_l2) - goto err_kcalloc_bt_l2; - } + *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i]; - /* alloc L0 BT */ - mr->pbl_bt_l0 = dma_alloc_coherent(dev, pbl_bt_sz, - &(mr->pbl_l0_dma_addr), - GFP_KERNEL); - if (!mr->pbl_bt_l0) - goto err_dma_alloc_l0; + for (j = 0; j < pbl_bt_sz / BA_BYTE_LEN; j++) { + bt_idx = i * (pbl_bt_sz / BA_BYTE_LEN) + j; - if (mhop_num == 2) { - /* alloc L1 BT */ - for (i = 0; i < pbl_bt_sz / 8; i++) { if (pbl_bt_cnt + 1 < pbl_last_bt_num) { size = pbl_bt_sz; } else { - npages_allocated = i * (pbl_bt_sz / 8); - size = (npages - npages_allocated) * 8; + npages_alloced = bt_idx * + (pbl_bt_sz / BA_BYTE_LEN); + size = (npages - npages_alloced) * BA_BYTE_LEN; } - mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, size, - &(mr->pbl_l1_dma_addr[i]), - GFP_KERNEL); - if (!mr->pbl_bt_l1[i]) { - hns_roce_loop_free(hr_dev, mr, 1, i, 0); + mr->pbl_bt_l2[bt_idx] = dma_alloc_coherent( + dev, size, + &(mr->pbl_l2_dma_addr[bt_idx]), + GFP_KERNEL); + if (!mr->pbl_bt_l2[bt_idx]) { + hns_roce_loop_free(hr_dev, mr, 2, i, j); goto err_dma_alloc_l0; } - *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i]; + *(mr->pbl_bt_l1[i] + j) = + mr->pbl_l2_dma_addr[bt_idx]; pbl_bt_cnt++; - if (pbl_bt_cnt >= pbl_last_bt_num) + if (pbl_bt_cnt >= pbl_last_bt_num) { + mr_alloc_done = 1; break; - } - } else if (mhop_num == 3) { - /* alloc L1, L2 BT */ - for (i = 0; i < pbl_bt_sz / 8; i++) { - mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, pbl_bt_sz, - &(mr->pbl_l1_dma_addr[i]), - GFP_KERNEL); - if (!mr->pbl_bt_l1[i]) { - hns_roce_loop_free(hr_dev, mr, 1, i, 0); - goto err_dma_alloc_l0; } + } + + if (mr_alloc_done) + break; + } - *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i]; + mr->l0_chunk_last_num = i + 1; + mr->l1_chunk_last_num = j + 1; - for (j = 0; j < pbl_bt_sz / 8; j++) { - bt_idx = i * pbl_bt_sz / 8 + j; - if (pbl_bt_cnt + 1 < pbl_last_bt_num) { - size = pbl_bt_sz; - } else { - npages_allocated = bt_idx * - (pbl_bt_sz / 8); - size = (npages - npages_allocated) * 8; - } - mr->pbl_bt_l2[bt_idx] = dma_alloc_coherent( - dev, size, - &(mr->pbl_l2_dma_addr[bt_idx]), - GFP_KERNEL); - if (!mr->pbl_bt_l2[bt_idx]) { - hns_roce_loop_free(hr_dev, mr, 2, i, j); - goto err_dma_alloc_l0; - } + return 0; - *(mr->pbl_bt_l1[i] + j) = - mr->pbl_l2_dma_addr[bt_idx]; +err_dma_alloc_l0: + kfree(mr->pbl_bt_l2); + mr->pbl_bt_l2 = NULL; - pbl_bt_cnt++; - if (pbl_bt_cnt >= pbl_last_bt_num) { - mr_alloc_done = 1; - break; - } - } +err_kcalloc_bt_l2: + kfree(mr->pbl_l2_dma_addr); + mr->pbl_l2_dma_addr = NULL; - if (mr_alloc_done) - break; - } + return -ENOMEM; +} + + +/* PBL multi hop addressing */ +static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages, + struct hns_roce_mr *mr) +{ + struct device *dev = hr_dev->dev; + u32 pbl_bt_sz; + u32 mhop_num; + + mhop_num = (mr->type == MR_TYPE_FRMR ? 1 : hr_dev->caps.pbl_hop_num); + pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT); + + if (mhop_num == HNS_ROCE_HOP_NUM_0) + return 0; + + if (mhop_num == 1) + return pbl_1hop_alloc(hr_dev, npages, mr, pbl_bt_sz); + + mr->pbl_l1_dma_addr = kcalloc(pbl_bt_sz / BA_BYTE_LEN, + sizeof(*mr->pbl_l1_dma_addr), + GFP_KERNEL); + if (!mr->pbl_l1_dma_addr) + return -ENOMEM; + + mr->pbl_bt_l1 = kcalloc(pbl_bt_sz / BA_BYTE_LEN, sizeof(*mr->pbl_bt_l1), + GFP_KERNEL); + if (!mr->pbl_bt_l1) + goto err_kcalloc_bt_l1; + + /* alloc L0 BT */ + mr->pbl_bt_l0 = dma_alloc_coherent(dev, pbl_bt_sz, + &(mr->pbl_l0_dma_addr), + GFP_KERNEL); + if (!mr->pbl_bt_l0) + goto err_kcalloc_l2_dma; + + if (mhop_num == 2) { + if (pbl_2hop_alloc(hr_dev, npages, mr, pbl_bt_sz)) + goto err_kcalloc_l2_dma; } - mr->l0_chunk_last_num = i + 1; - if (mhop_num == 3) - mr->l1_chunk_last_num = j + 1; + if (mhop_num == 3) { + if (pbl_3hop_alloc(hr_dev, npages, mr, pbl_bt_sz)) + goto err_kcalloc_l2_dma; + } mr->pbl_size = npages; mr->pbl_ba = mr->pbl_l0_dma_addr; @@ -471,14 +560,6 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages, return 0; -err_dma_alloc_l0: - kfree(mr->pbl_bt_l2); - mr->pbl_bt_l2 = NULL; - -err_kcalloc_bt_l2: - kfree(mr->pbl_l2_dma_addr); - mr->pbl_l2_dma_addr = NULL; - err_kcalloc_l2_dma: kfree(mr->pbl_bt_l1); mr->pbl_bt_l1 = NULL; @@ -490,13 +571,13 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages, return -ENOMEM; } -static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova, +int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova, u64 size, u32 access, int npages, struct hns_roce_mr *mr) { struct device *dev = hr_dev->dev; unsigned long index = 0; - int ret = 0; + int ret; /* Allocate a key for mr from mr_table */ ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index); @@ -511,7 +592,6 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova, mr->key = hw_index_to_key(index); /* MR key */ if (size == ~0ull) { - mr->type = MR_TYPE_DMA; mr->pbl_buf = NULL; mr->pbl_dma_addr = 0; /* PBL multi-hop addressing parameters */ @@ -522,9 +602,9 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova, mr->pbl_l1_dma_addr = NULL; mr->pbl_l0_dma_addr = 0; } else { - mr->type = MR_TYPE_MR; if (!hr_dev->caps.pbl_hop_num) { - mr->pbl_buf = dma_alloc_coherent(dev, npages * 8, + mr->pbl_buf = dma_alloc_coherent(dev, + npages * BA_BYTE_LEN, &(mr->pbl_dma_addr), GFP_KERNEL); if (!mr->pbl_buf) @@ -536,28 +616,28 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova, return ret; } +EXPORT_SYMBOL_GPL(hns_roce_mr_alloc); static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr) { struct device *dev = hr_dev->dev; - int npages_allocated; + int npages_alloced; int npages; int i, j; u32 pbl_bt_sz; u32 mhop_num; u64 bt_idx; - npages = ib_umem_page_count(mr->umem); + npages = mr->pbl_size; pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT); - mhop_num = hr_dev->caps.pbl_hop_num; + mhop_num = (mr->type == MR_TYPE_FRMR) ? 1 : hr_dev->caps.pbl_hop_num; if (mhop_num == HNS_ROCE_HOP_NUM_0) return; - /* hop_num = 1 */ if (mhop_num == 1) { - dma_free_coherent(dev, (unsigned int)(npages * 8), + dma_free_coherent(dev, (unsigned int)(npages * BA_BYTE_LEN), mr->pbl_buf, mr->pbl_dma_addr); return; } @@ -568,12 +648,11 @@ static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev, if (mhop_num == 2) { for (i = 0; i < mr->l0_chunk_last_num; i++) { if (i == mr->l0_chunk_last_num - 1) { - npages_allocated = i * (pbl_bt_sz / 8); + npages_alloced = i * (pbl_bt_sz / BA_BYTE_LEN); dma_free_coherent(dev, - (npages - npages_allocated) * 8, - mr->pbl_bt_l1[i], - mr->pbl_l1_dma_addr[i]); + (npages - npages_alloced) * BA_BYTE_LEN, + mr->pbl_bt_l1[i], mr->pbl_l1_dma_addr[i]); break; } @@ -586,16 +665,16 @@ static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev, dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i], mr->pbl_l1_dma_addr[i]); - for (j = 0; j < pbl_bt_sz / 8; j++) { - bt_idx = i * (pbl_bt_sz / 8) + j; + for (j = 0; j < pbl_bt_sz / BA_BYTE_LEN; j++) { + bt_idx = i * (pbl_bt_sz / BA_BYTE_LEN) + j; if ((i == mr->l0_chunk_last_num - 1) && j == mr->l1_chunk_last_num - 1) { - npages_allocated = bt_idx * - (pbl_bt_sz / 8); + npages_alloced = bt_idx * + (pbl_bt_sz / BA_BYTE_LEN); dma_free_coherent(dev, - (npages - npages_allocated) * 8, + (npages - npages_alloced) * BA_BYTE_LEN, mr->pbl_bt_l2[bt_idx], mr->pbl_l2_dma_addr[bt_idx]); @@ -621,7 +700,7 @@ static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev, } } -static void hns_roce_mr_free(struct hns_roce_dev *hr_dev, +void hns_roce_mr_free(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr) { struct device *dev = hr_dev->dev; @@ -629,17 +708,20 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev, int ret; if (mr->enabled) { - ret = hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key) - & (hr_dev->caps.num_mtpts - 1)); + ret = hns_roce_hw_destroy_mpt(hr_dev, NULL, + key_to_hw_index(mr->key) & + (hr_dev->caps.num_mtpts - 1)); if (ret) - dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret); + dev_warn(dev, "DESTROY_MPT failed (%d)\n", ret); } if (mr->size != ~0ULL) { - npages = ib_umem_page_count(mr->umem); + if (mr->type == MR_TYPE_MR) + npages = ib_umem_page_count(mr->umem); if (!hr_dev->caps.pbl_hop_num) - dma_free_coherent(dev, (unsigned int)(npages * 8), + dma_free_coherent(dev, + (unsigned int)(npages * BA_BYTE_LEN), mr->pbl_buf, mr->pbl_dma_addr); else hns_roce_mhop_free(hr_dev, mr); @@ -652,8 +734,9 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev, hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap, key_to_hw_index(mr->key), BITMAP_NO_RR); } +EXPORT_SYMBOL_GPL(hns_roce_mr_free); -static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev, +int hns_roce_mr_enable(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr) { int ret; @@ -664,9 +747,11 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev, /* Prepare HEM entry memory */ ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx); - if (ret) + if (ret) { + dev_err(dev, "Get mtpt table(0x%lx) failed(%d).", + mtpt_idx, ret); return ret; - + } /* Allocate mailbox memory */ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); if (IS_ERR(mailbox)) { @@ -674,16 +759,20 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev, goto err_table; } - ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx); + if (mr->type != MR_TYPE_FRMR) + ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx); + else + ret = hr_dev->hw->frmr_write_mtpt(mailbox->buf, mr); if (ret) { - dev_err(dev, "Write mtpt fail!\n"); + dev_err(dev, "Write mtpt fail(%d)!\n", ret); goto err_page; } - ret = hns_roce_sw2hw_mpt(hr_dev, mailbox, - mtpt_idx & (hr_dev->caps.num_mtpts - 1)); + ret = hns_roce_hw_create_mpt(hr_dev, mailbox, + mtpt_idx & (hr_dev->caps.num_mtpts - 1)); if (ret) { - dev_err(dev, "SW2HW_MPT failed (%d)\n", ret); + dev_err(dev, "CREATE_MPT(0x%lx) failed(%d) for mr_enable.\n", + mtpt_idx, ret); goto err_page; } @@ -699,6 +788,7 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev, hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx); return ret; } +EXPORT_SYMBOL_GPL(hns_roce_mr_enable); static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt, u32 start_index, @@ -707,14 +797,32 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev, struct hns_roce_hem_table *table; dma_addr_t dma_handle; __le64 *mtts; - u32 s = start_index * sizeof(u64); u32 bt_page_size; u32 i; - if (mtt->mtt_type == MTT_TYPE_WQE) + switch (mtt->mtt_type) { + case MTT_TYPE_WQE: + table = &hr_dev->mr_table.mtt_table; bt_page_size = 1 << (hr_dev->caps.mtt_ba_pg_sz + PAGE_SHIFT); - else + break; + case MTT_TYPE_CQE: + table = &hr_dev->mr_table.mtt_cqe_table; bt_page_size = 1 << (hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT); + break; + case MTT_TYPE_SRQWQE: + table = &hr_dev->mr_table.mtt_srqwqe_table; + bt_page_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz + PAGE_SHIFT); + break; + case MTT_TYPE_IDX: + table = &hr_dev->mr_table.mtt_idx_table; + bt_page_size = 1 << (hr_dev->caps.idx_ba_pg_sz + PAGE_SHIFT); + break; + default: + dev_err(hr_dev->dev, + "Unsupport mtt type %d, write mtt chunk failed\n", + mtt->mtt_type); + return -EINVAL; + } /* All MTTs must fit in the same page */ if (start_index / (bt_page_size / sizeof(u64)) != @@ -724,14 +832,10 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev, if (start_index & (HNS_ROCE_MTT_ENTRY_PER_SEG - 1)) return -EINVAL; - if (mtt->mtt_type == MTT_TYPE_WQE) - table = &hr_dev->mr_table.mtt_table; - else - table = &hr_dev->mr_table.mtt_cqe_table; - mtts = hns_roce_table_find(hr_dev, table, - mtt->first_seg + s / hr_dev->caps.mtt_entry_sz, - &dma_handle); + mtt->first_seg + + start_index / HNS_ROCE_MTT_ENTRY_PER_SEG, + &dma_handle); if (!mtts) return -ENOMEM; @@ -757,10 +861,25 @@ static int hns_roce_write_mtt(struct hns_roce_dev *hr_dev, if (mtt->order < 0) return -EINVAL; - if (mtt->mtt_type == MTT_TYPE_WQE) + switch (mtt->mtt_type) { + case MTT_TYPE_WQE: bt_page_size = 1 << (hr_dev->caps.mtt_ba_pg_sz + PAGE_SHIFT); - else + break; + case MTT_TYPE_CQE: bt_page_size = 1 << (hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT); + break; + case MTT_TYPE_SRQWQE: + bt_page_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz + PAGE_SHIFT); + break; + case MTT_TYPE_IDX: + bt_page_size = 1 << (hr_dev->caps.idx_ba_pg_sz + PAGE_SHIFT); + break; + default: + dev_err(hr_dev->dev, + "Unsupport mtt type %d, write mtt failed\n", + mtt->mtt_type); + return -EINVAL; + } while (npages > 0) { chunk = min_t(int, bt_page_size / sizeof(u64), npages); @@ -789,13 +908,9 @@ int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev, if (!page_list) return -ENOMEM; - for (i = 0; i < buf->npages; ++i) { - if (buf->nbufs == 1) - page_list[i] = buf->direct.map + (i << buf->page_shift); - else - page_list[i] = buf->page_list[i].map; + for (i = 0; i < buf->npages; ++i) + page_list[i] = hns_roce_buf_page(buf, i); - } ret = hns_roce_write_mtt(hr_dev, mtt, 0, buf->npages, page_list); kfree(page_list); @@ -812,9 +927,11 @@ int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev) hr_dev->caps.num_mtpts, hr_dev->caps.num_mtpts - 1, hr_dev->caps.reserved_mrws, 0); - if (ret) + if (ret) { + dev_err(hr_dev->dev, + "mtpt bitmap init failed, ret = %d\n", ret); return ret; - + } ret = hns_roce_buddy_init(&mr_table->mtt_buddy, ilog2(hr_dev->caps.num_mtt_segs)); if (ret) @@ -826,8 +943,26 @@ int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev) if (ret) goto err_buddy_cqe; } + + ret = hns_roce_buddy_init(&mr_table->mtt_srqwqe_buddy, + ilog2(hr_dev->caps.num_srqwqe_segs)); + if (ret) + goto err_buddy_srqwqe; + + ret = hns_roce_buddy_init(&mr_table->mtt_idx_buddy, + ilog2(hr_dev->caps.num_idx_segs)); + if (ret) + goto err_buddy_idx; + return 0; +err_buddy_idx: + hns_roce_buddy_cleanup(&mr_table->mtt_srqwqe_buddy); + +err_buddy_srqwqe: + if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) + hns_roce_buddy_cleanup(&mr_table->mtt_cqe_buddy); + err_buddy_cqe: hns_roce_buddy_cleanup(&mr_table->mtt_buddy); @@ -840,6 +975,8 @@ void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev) { struct hns_roce_mr_table *mr_table = &hr_dev->mr_table; + hns_roce_buddy_cleanup(&mr_table->mtt_idx_buddy); + hns_roce_buddy_cleanup(&mr_table->mtt_srqwqe_buddy); hns_roce_buddy_cleanup(&mr_table->mtt_buddy); if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) hns_roce_buddy_cleanup(&mr_table->mtt_cqe_buddy); @@ -851,15 +988,21 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc) struct hns_roce_mr *mr; int ret; - mr = kmalloc(sizeof(*mr), GFP_KERNEL); + mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (mr == NULL) return ERR_PTR(-ENOMEM); + mr->type = MR_TYPE_DMA; + /* Allocate memory region key */ ret = hns_roce_mr_alloc(to_hr_dev(pd->device), to_hr_pd(pd)->pdn, 0, ~0ULL, acc, 0, mr); - if (ret) + if (ret) { + dev_err(to_hr_dev(pd->device)->dev, + "alloc mr failed(%d), pd is 0x%lx , access is 0x%x.\n", + ret, to_hr_pd(pd)->pdn, acc); goto err_free; + } ret = hns_roce_mr_enable(to_hr_dev(pd->device), mr); if (ret) @@ -868,6 +1011,9 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc) mr->ibmr.rkey = mr->ibmr.lkey = mr->key; mr->umem = NULL; + rdfx_func_cnt(to_hr_dev(pd->device), RDFX_FUNC_GET_DMA_MR); + rdfx_alloc_rdfx_mr(to_hr_dev(pd->device), mr); + return &mr->ibmr; err_mr: @@ -893,8 +1039,25 @@ int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev, u32 bt_page_size; u32 n; - order = mtt->mtt_type == MTT_TYPE_WQE ? hr_dev->caps.mtt_ba_pg_sz : - hr_dev->caps.cqe_ba_pg_sz; + switch (mtt->mtt_type) { + case MTT_TYPE_WQE: + order = hr_dev->caps.mtt_ba_pg_sz; + break; + case MTT_TYPE_CQE: + order = hr_dev->caps.cqe_ba_pg_sz; + break; + case MTT_TYPE_SRQWQE: + order = hr_dev->caps.srqwqe_ba_pg_sz; + break; + case MTT_TYPE_IDX: + order = hr_dev->caps.idx_ba_pg_sz; + break; + default: + dev_err(dev, "Unsupport mtt type %d, umem write mtt failed\n", + mtt->mtt_type); + return -EINVAL; + } + bt_page_size = 1 << (order + PAGE_SHIFT); pages = (u64 *) __get_free_pages(GFP_KERNEL, order); @@ -937,7 +1100,7 @@ int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev, return ret; } -static int hns_roce_ib_umem_write_mr(struct hns_roce_dev *hr_dev, +int hns_roce_ib_umem_write_mr(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr, struct ib_umem *umem) { @@ -953,12 +1116,13 @@ static int hns_roce_ib_umem_write_mr(struct hns_roce_dev *hr_dev, pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT); for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { - len = sg_dma_len(sg) >> PAGE_SHIFT; + len = sg_dma_len(sg) >> umem->page_shift; for (k = 0; k < len; ++k) { page_addr = sg_dma_address(sg) + (k << umem->page_shift); if (!hr_dev->caps.pbl_hop_num) { + /* for hip06, page addr is aligned to 4K */ mr->pbl_buf[i++] = page_addr >> 12; } else if (hr_dev->caps.pbl_hop_num == 1) { mr->pbl_buf[i++] = page_addr; @@ -969,7 +1133,7 @@ static int hns_roce_ib_umem_write_mr(struct hns_roce_dev *hr_dev, mr->pbl_bt_l2[i][j] = page_addr; j++; - if (j >= (pbl_bt_sz / 8)) { + if (j >= (pbl_bt_sz / BA_BYTE_LEN)) { i++; j = 0; } @@ -982,6 +1146,7 @@ static int hns_roce_ib_umem_write_mr(struct hns_roce_dev *hr_dev, return 0; } +EXPORT_SYMBOL_GPL(hns_roce_ib_umem_write_mr); struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt_addr, int access_flags, @@ -995,7 +1160,7 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, int n; int i; - mr = kmalloc(sizeof(*mr), GFP_KERNEL); + mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) return ERR_PTR(-ENOMEM); @@ -1003,6 +1168,7 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, access_flags, 0); if (IS_ERR(mr->umem)) { ret = PTR_ERR(mr->umem); + dev_err(dev, " ib_umem_get failed, ret = %d\n", ret); goto err_free; } @@ -1017,20 +1183,23 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, goto err_umem; } } else { - int pbl_size = 1; + u64 pbl_size = 1; - bt_size = (1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT)) / 8; + bt_size = (1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT)) / + BA_BYTE_LEN; for (i = 0; i < hr_dev->caps.pbl_hop_num; i++) pbl_size *= bt_size; if (n > pbl_size) { dev_err(dev, - " MR len %lld err. MR page num is limited to %d!\n", + " MR len %lld err. MR page num is limited to %lld!\n", length, pbl_size); ret = -EINVAL; goto err_umem; } } + mr->type = MR_TYPE_MR; + ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, virt_addr, length, access_flags, n, mr); if (ret) @@ -1046,6 +1215,10 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, mr->ibmr.rkey = mr->ibmr.lkey = mr->key; + rdfx_func_cnt(to_hr_dev(pd->device), RDFX_FUNC_REG_USER_MR); + rdfx_alloc_rdfx_mr(to_hr_dev(pd->device), mr); + hns_roce_inc_rdma_hw_stats(pd->device, HW_STATS_MR_ALLOC); + return &mr->ibmr; err_mr: @@ -1059,6 +1232,80 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, return ERR_PTR(ret); } +static int rereg_mr_trans(struct ib_mr *ibmr, int flags, + u64 start, u64 length, + u64 virt_addr, int mr_access_flags, + struct hns_roce_cmd_mailbox *mailbox, + u32 pdn) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device); + struct hns_roce_mr *mr = to_hr_mr(ibmr); + struct device *dev = hr_dev->dev; + int npages; + int ret; + + if (mr->size != ~0ULL) { + npages = ib_umem_page_count(mr->umem); + + if (hr_dev->caps.pbl_hop_num) + hns_roce_mhop_free(hr_dev, mr); + else + dma_free_coherent(dev, npages * BA_BYTE_LEN, + mr->pbl_buf, mr->pbl_dma_addr); + } + ib_umem_release(mr->umem); + + mr->umem = ib_umem_get(ibmr->uobject->context, start, length, + mr_access_flags, 0); + if (IS_ERR(mr->umem)) { + ret = PTR_ERR(mr->umem); + mr->umem = NULL; + return -ENOMEM; + } + npages = ib_umem_page_count(mr->umem); + + if (hr_dev->caps.pbl_hop_num) { + ret = hns_roce_mhop_alloc(hr_dev, npages, mr); + if (ret) + goto release_umem; + } else { + mr->pbl_buf = dma_alloc_coherent(dev, npages * BA_BYTE_LEN, + &(mr->pbl_dma_addr), + GFP_KERNEL); + if (!mr->pbl_buf) { + ret = -ENOMEM; + goto release_umem; + } + } + + ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn, + mr_access_flags, virt_addr, + length, mailbox->buf); + if (ret) + goto release_umem; + + ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem); + if (ret) { + if (mr->size != ~0ULL) { + npages = ib_umem_page_count(mr->umem); + + if (hr_dev->caps.pbl_hop_num) + hns_roce_mhop_free(hr_dev, mr); + else + dma_free_coherent(dev, npages * BA_BYTE_LEN, + mr->pbl_buf, + mr->pbl_dma_addr); + } + + goto release_umem; + } + + return 0; +release_umem: + ib_umem_release(mr->umem); + return ret; +} + int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length, u64 virt_addr, int mr_access_flags, struct ib_pd *pd, struct ib_udata *udata) @@ -1069,9 +1316,11 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length, struct device *dev = hr_dev->dev; unsigned long mtpt_idx; u32 pdn = 0; - int npages; int ret; + rdfx_func_cnt(hr_dev, RDFX_FUNC_REREG_USER_MR); + hns_roce_inc_rdma_hw_stats(ibmr->device, HW_STATS_MR_REREG); + if (!mr->enabled) return -EINVAL; @@ -1086,9 +1335,9 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length, if (ret) goto free_cmd_mbox; - ret = hns_roce_hw2sw_mpt(hr_dev, NULL, mtpt_idx); + ret = hns_roce_hw_destroy_mpt(hr_dev, NULL, mtpt_idx); if (ret) - dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret); + dev_warn(dev, "DESTROY_MPT failed (%d)\n", ret); mr->enabled = 0; @@ -1096,73 +1345,25 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length, pdn = to_hr_pd(pd)->pdn; if (flags & IB_MR_REREG_TRANS) { - if (mr->size != ~0ULL) { - npages = ib_umem_page_count(mr->umem); - - if (hr_dev->caps.pbl_hop_num) - hns_roce_mhop_free(hr_dev, mr); - else - dma_free_coherent(dev, npages * 8, mr->pbl_buf, - mr->pbl_dma_addr); - } - ib_umem_release(mr->umem); - - mr->umem = ib_umem_get(ibmr->uobject->context, start, length, - mr_access_flags, 0); - if (IS_ERR(mr->umem)) { - ret = PTR_ERR(mr->umem); - mr->umem = NULL; + ret = rereg_mr_trans(ibmr, flags, + start, length, + virt_addr, mr_access_flags, + mailbox, pdn); + if (ret) goto free_cmd_mbox; - } - npages = ib_umem_page_count(mr->umem); - - if (hr_dev->caps.pbl_hop_num) { - ret = hns_roce_mhop_alloc(hr_dev, npages, mr); - if (ret) - goto release_umem; - } else { - mr->pbl_buf = dma_alloc_coherent(dev, npages * 8, - &(mr->pbl_dma_addr), - GFP_KERNEL); - if (!mr->pbl_buf) { - ret = -ENOMEM; - goto release_umem; - } - } - } - - ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn, - mr_access_flags, virt_addr, - length, mailbox->buf); - if (ret) { - if (flags & IB_MR_REREG_TRANS) - goto release_umem; - else + } else { + ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn, + mr_access_flags, virt_addr, + length, mailbox->buf); + if (ret) goto free_cmd_mbox; } - if (flags & IB_MR_REREG_TRANS) { - ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem); - if (ret) { - if (mr->size != ~0ULL) { - npages = ib_umem_page_count(mr->umem); - - if (hr_dev->caps.pbl_hop_num) - hns_roce_mhop_free(hr_dev, mr); - else - dma_free_coherent(dev, npages * 8, - mr->pbl_buf, - mr->pbl_dma_addr); - } - - goto release_umem; - } - } - - ret = hns_roce_sw2hw_mpt(hr_dev, mailbox, mtpt_idx); + ret = hns_roce_hw_create_mpt(hr_dev, mailbox, mtpt_idx); if (ret) { - dev_err(dev, "SW2HW_MPT failed (%d)\n", ret); - goto release_umem; + dev_err(dev, "CREATE_MPT failed(%d) for rereg_usr_mr\n", ret); + ib_umem_release(mr->umem); + goto free_cmd_mbox; } mr->enabled = 1; @@ -1173,9 +1374,6 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length, return 0; -release_umem: - ib_umem_release(mr->umem); - free_cmd_mbox: hns_roce_free_cmd_mailbox(hr_dev, mailbox); @@ -1188,6 +1386,11 @@ int hns_roce_dereg_mr(struct ib_mr *ibmr) struct hns_roce_mr *mr = to_hr_mr(ibmr); int ret = 0; + rdfx_func_cnt(hr_dev, RDFX_FUNC_DEREG_MR); + rdfx_inc_dereg_mr_cnt(hr_dev); + rdfx_release_rdfx_mr(hr_dev, mr->key); + hns_roce_inc_rdma_hw_stats(ibmr->device, HW_STATS_MR_DEALLOC); + if (hr_dev->hw->dereg_mr) { ret = hr_dev->hw->dereg_mr(hr_dev, mr); } else { @@ -1201,3 +1404,672 @@ int hns_roce_dereg_mr(struct ib_mr *ibmr) return ret; } + +struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, + u32 max_num_sg) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); + struct device *dev = hr_dev->dev; + struct hns_roce_mr *mr; + u64 length; + u32 page_size; + int ret; + + page_size = 1 << (hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT); + length = max_num_sg * page_size; + + if (mr_type != IB_MR_TYPE_MEM_REG) + return ERR_PTR(-EINVAL); + + if (max_num_sg > HNS_ROCE_FRMR_MAX_PA) { + dev_err(dev, "max_num_sg larger than %d\n", + HNS_ROCE_FRMR_MAX_PA); + return ERR_PTR(-EINVAL); + } + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + + mr->type = MR_TYPE_FRMR; + + /* Allocate memory region key */ + ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, 0, length, + 0, max_num_sg, mr); + if (ret) + goto err_free; + + ret = hns_roce_mr_enable(hr_dev, mr); + if (ret) + goto err_free_mr; + + mr->ibmr.rkey = mr->ibmr.lkey = mr->key; + mr->umem = NULL; + + rdfx_func_cnt(hr_dev, RDFX_FUNC_REG_USER_MR); + rdfx_alloc_rdfx_mr(hr_dev, mr); + hns_roce_inc_rdma_hw_stats(pd->device, HW_STATS_MR_ALLOC); + + return &mr->ibmr; + +err_free_mr: + hns_roce_mr_free(to_hr_dev(pd->device), mr); + +err_free: + kfree(mr); + return ERR_PTR(ret); +} + +static int hns_roce_set_page(struct ib_mr *ibmr, u64 addr) +{ + struct hns_roce_mr *mr = to_hr_mr(ibmr); + + mr->pbl_buf[mr->npages++] = addr; + + return 0; +} + +int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, + unsigned int *sg_offset) +{ + struct hns_roce_mr *mr = to_hr_mr(ibmr); + + mr->npages = 0; + + return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page); +} + +static void hns_roce_mw_free(struct hns_roce_dev *hr_dev, + struct hns_roce_mw *mw) +{ + struct device *dev = hr_dev->dev; + int ret; + + if (mw->enabled) { + ret = hns_roce_hw_destroy_mpt(hr_dev, NULL, + key_to_hw_index(mw->rkey) & + (hr_dev->caps.num_mtpts - 1)); + if (ret) + dev_warn(dev, "MW DESTROY_MPT failed (%d)\n", ret); + + hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table, + key_to_hw_index(mw->rkey)); + } + + hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap, + key_to_hw_index(mw->rkey), BITMAP_NO_RR); +} + +static int hns_roce_mw_enable(struct hns_roce_dev *hr_dev, + struct hns_roce_mw *mw) +{ + unsigned long mtpt_idx = key_to_hw_index(mw->rkey); + struct device *dev = hr_dev->dev; + struct hns_roce_cmd_mailbox *mailbox; + struct hns_roce_mr_table *mr_table = &hr_dev->mr_table; + int ret; + + /* prepare HEM entry memory */ + ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx); + if (ret) + return ret; + + /* allocate mailbox memory */ + mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); + if (IS_ERR(mailbox)) { + ret = PTR_ERR(mailbox); + goto err_table; + } + + ret = hr_dev->hw->mw_write_mtpt(mailbox->buf, mw); + if (ret) { + dev_err(dev, "MW write mtpt failed(%d)!\n", ret); + goto err_page; + } + + ret = hns_roce_hw_create_mpt(hr_dev, mailbox, + mtpt_idx & (hr_dev->caps.num_mtpts - 1)); + if (ret) { + dev_err(dev, "MW CREATE_MPT failed (%d).\n", ret); + goto err_page; + } + + mw->enabled = 1; + + hns_roce_free_cmd_mailbox(hr_dev, mailbox); + + return 0; + +err_page: + hns_roce_free_cmd_mailbox(hr_dev, mailbox); + +err_table: + hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx); + + return ret; +} + +struct ib_mw *hns_roce_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type, + struct ib_udata *udata) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(ib_pd->device); + struct hns_roce_mw *mw; + unsigned long index = 0; + int ret; + + mw = kzalloc(sizeof(*mw), GFP_KERNEL); + if (!mw) + return ERR_PTR(-ENOMEM); + + /* Allocate a key for mw from bitmap */ + ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index); + if (ret) + goto err_bitmap; + + mw->rkey = hw_index_to_key(index); + + mw->ibmw.rkey = mw->rkey; + mw->ibmw.type = type; + mw->pdn = to_hr_pd(ib_pd)->pdn; + mw->pbl_hop_num = hr_dev->caps.pbl_hop_num; + mw->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz; + mw->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz; + + ret = hns_roce_mw_enable(hr_dev, mw); + if (ret) + goto err_mw; + + return &mw->ibmw; + +err_mw: + hns_roce_mw_free(hr_dev, mw); + +err_bitmap: + kfree(mw); + + return ERR_PTR(ret); +} + +int hns_roce_dealloc_mw(struct ib_mw *ibmw) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device); + struct hns_roce_mw *mw = to_hr_mw(ibmw); + + hns_roce_mw_free(hr_dev, mw); + kfree(mw); + + return 0; +} + +static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, + struct hns_roce_buf_region *region, dma_addr_t *pages, + int max_count) +{ + int count, npage; + int offset, end; + __le64 *mtts; + u64 addr; + int i; + + offset = region->offset; + end = offset + region->count; + npage = 0; + while (offset < end && npage < max_count) { + count = 0; + mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list, + offset, &count, NULL); + if (!mtts) + return -ENOBUFS; + + for (i = 0; i < count && npage < max_count; i++) { + if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) + addr = to_hr_hw_page_addr(pages[npage]); + else + addr = pages[npage]; + + mtts[i] = cpu_to_le64(addr); + npage++; + } + offset += count; + } + + return npage; +} + +static inline bool mtr_has_mtt(struct hns_roce_buf_attr *attr) +{ + int i; + + for (i = 0; i < attr->region_count; i++) + if (attr->region[i].hopnum != HNS_ROCE_HOP_NUM_0 && + attr->region[i].hopnum > 0) + return true; + + /* because the mtr only one root base address, when hopnum is 0 means + * root base address equals the first buffer address, thus all alloced + * memory must in a continuous space accessed by direct mode. + */ + return false; +} + +static inline size_t mtr_bufs_size(struct hns_roce_buf_attr *attr) +{ + size_t size = 0; + int i; + + for (i = 0; i < attr->region_count; i++) + size += attr->region[i].size; + + return size; +} + +/* + * check the given pages in continuous address space + * Returns 0 on success, or the error page num. + */ +static inline int mtr_check_direct_pages(dma_addr_t *pages, int page_count, + unsigned int page_shift) +{ + size_t page_size = 1 << page_shift; + int i; + + for (i = 1; i < page_count; i++) + if (pages[i] - pages[i - 1] != page_size) + return i; + + return 0; +} + +static void mtr_free_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr) +{ + /* release user buffers */ + if (mtr->umem) { + ib_umem_release(mtr->umem); + mtr->umem = NULL; + } + + /* release kernel buffers */ + if (mtr->kmem) { + hns_roce_buf_free(hr_dev, mtr->kmem); + mtr->kmem = NULL; + } +} + +static struct ib_umem * +mtr_get_umem(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, + struct hns_roce_buf_attr *buf_attr, size_t buf_size, + struct ib_ucontext *ucontext, unsigned long user_addr) +{ + return ib_umem_get(ucontext, user_addr, buf_size, + buf_attr->user_access, + buf_attr->user_dmasync); +} + +static struct hns_roce_buf * +mtr_get_kmem(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, + struct hns_roce_buf_attr *buf_attr, int pg_shift, size_t buf_size, + bool is_direct) +{ + struct device *dev = hr_dev->dev; + struct hns_roce_buf *hr_buf; + + hr_buf = hns_roce_buf_alloc(hr_dev, buf_size, pg_shift, + is_direct ? HNS_ROCE_BUF_DIRECT : 0); + if (IS_ERR_OR_NULL(hr_buf)) { + dev_err(dev, "Failed to alloc kmem, ret %ld\n", + PTR_ERR(hr_buf)); + return NULL; + } + + return hr_buf; +} + +static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, + struct hns_roce_buf_attr *buf_attr, + struct ib_ucontext *ucontext, unsigned long user_addr) +{ + struct device *dev = hr_dev->dev; + size_t total_size; + + total_size = mtr_bufs_size(buf_attr); + if (ucontext) { + mtr->kmem = NULL; + mtr->umem = mtr_get_umem(hr_dev, mtr, buf_attr, total_size, + ucontext, user_addr); + if (IS_ERR_OR_NULL(mtr->umem)) { + dev_err(dev, "Failed to get umem, ret %ld\n", + PTR_ERR(mtr->umem)); + return -ENOMEM; + } + } else { + mtr->umem = NULL; + mtr->kmem = mtr_get_kmem(hr_dev, mtr, buf_attr, + buf_attr->page_shift, total_size, + mtr->hem_cfg.is_direct); + if (!mtr->kmem) { + dev_err(dev, "Failed to alloc kmem\n"); + return -ENOMEM; + } + } + + return 0; +} + +static int mtr_map_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, + int page_count, unsigned int page_shift) +{ + struct device *dev = hr_dev->dev; + dma_addr_t *pages; + int npage; + int ret; + + /* alloc a tmp array to store buffer's dma address */ + pages = kvcalloc(page_count, sizeof(dma_addr_t), GFP_KERNEL); + if (!pages) + return -ENOMEM; + + if (mtr->umem) + npage = hns_roce_get_umem_bufs(hr_dev, pages, page_count, + mtr->umem, page_shift); + else + npage = hns_roce_get_kmem_bufs(hr_dev, pages, page_count, + mtr->kmem, page_shift); + + if (npage != page_count) { + dev_err(dev, "failed to get mtr page %d != %d.\n", npage, + page_count); + ret = -ENOBUFS; + goto err_alloc_list; + } + + if (mtr->hem_cfg.is_direct && npage > 1) { + ret = mtr_check_direct_pages(pages, npage, page_shift); + if (ret) { + dev_err(dev, "failed to check %s page: %d / %d.\n", + mtr->umem ? "umtr" : "kmtr", ret, npage); + ret = -ENOBUFS; + goto err_alloc_list; + } + } + + ret = hns_roce_mtr_map(hr_dev, mtr, pages, page_count); + if (ret) + dev_err(dev, "failed to map mtr pages, ret = %d.\n", ret); + +err_alloc_list: + /* drop tmp array */ + kvfree(pages); + + return ret; +} + +int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, + dma_addr_t *pages, unsigned int page_cnt) +{ + struct device *dev = hr_dev->dev; + struct hns_roce_buf_region *r; + unsigned int i, mapped_cnt; + int ret; + + /* + * Only use the first page address as root ba when hopnum is 0, this + * is because the addresses of all pages are consecutive in this case. + */ + if (mtr->hem_cfg.is_direct) { + mtr->hem_cfg.root_ba = pages[0]; + return 0; + } + + for (i = 0, mapped_cnt = 0; i < mtr->hem_cfg.region_count && + mapped_cnt < page_cnt; i++) { + r = &mtr->hem_cfg.region[i]; + /* if hopnum is 0, no need to map pages in this region */ + if (!r->hopnum) { + mapped_cnt += r->count; + continue; + } + + if (r->offset + r->count > page_cnt) { + ret = -EINVAL; + dev_err(dev, + "failed to check mtr%u count %u + %u > %u\n", + i, r->offset, r->count, page_cnt); + return ret; + } + + ret = mtr_map_region(hr_dev, mtr, r, &pages[r->offset], + page_cnt - mapped_cnt); + if (ret < 0) { + dev_err(dev, "failed to map mtr%u offset %u, ret = %d.\n", + i, r->offset, ret); + return ret; + } + mapped_cnt += ret; + ret = 0; + } + + if (mapped_cnt < page_cnt) { + ret = -ENOBUFS; + dev_err(dev, "failed to map mtr pages count: %u < %u.\n", + mapped_cnt, page_cnt); + } + + return ret; +} +EXPORT_SYMBOL_GPL(hns_roce_mtr_map); + +int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, + int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr) +{ + struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg; + int mtt_count, left; + int start_index; + int total = 0; + __le64 *mtts; + u32 npage; + u64 addr; + + if (!mtt_buf || mtt_max < 1) + goto done; + + /* no mtt memory in direct mode, so just return the buffer address */ + if (cfg->is_direct) { + start_index = offset >> HNS_HW_PAGE_SHIFT; + for (mtt_count = 0; mtt_count < cfg->region_count && + total < mtt_max; mtt_count++) { + npage = cfg->region[mtt_count].offset; + if (npage < start_index) + continue; + + addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT); + if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) + mtt_buf[total] = to_hr_hw_page_addr(addr); + else + mtt_buf[total] = addr; + + total++; + } + + goto done; + } + + start_index = offset >> cfg->buf_pg_shift; + left = mtt_max; + while (left > 0) { + mtt_count = 0; + mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list, + start_index + total, + &mtt_count, NULL); + if (!mtts || !mtt_count) + goto done; + + npage = min(mtt_count, left); + left -= npage; + for (mtt_count = 0; mtt_count < npage; mtt_count++) + mtt_buf[total++] = le64_to_cpu(mtts[mtt_count]); + } + +done: + if (base_addr) + *base_addr = cfg->root_ba; + + return total; +} +EXPORT_SYMBOL_GPL(hns_roce_mtr_find); + +static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev, + struct hns_roce_buf_attr *attr, + struct hns_roce_hem_cfg *cfg, + unsigned int *buf_page_shift, int unalinged_size) +{ + struct hns_roce_buf_region *r; + int first_region_padding; + int page_cnt, region_cnt; + unsigned int page_shift; + size_t buf_size; + + /* if disable mtt, all pages must in a continuous address range */ + cfg->is_direct = !mtr_has_mtt(attr); + buf_size = mtr_bufs_size(attr); + if (cfg->is_direct) { + /* When HEM buffer use level-0 addressing, the page size is + * equal the whole buffer size, and we split whole buffer as + * small pages which is used to check whether the adjacent units + * are in the continuous space and the size is fixed as 4K for + * the hns ROCEE required. + */ + page_shift = HNS_HW_PAGE_SHIFT; + /* The ROCEE requires the page size is 4K * 2^N. */ + cfg->buf_pg_count = 1; + cfg->buf_pg_shift = HNS_HW_PAGE_SHIFT + + order_base_2(DIV_ROUND_UP(buf_size, HNS_HW_PAGE_SIZE)); + first_region_padding = 0; + } else { + page_shift = attr->page_shift; + cfg->buf_pg_count = DIV_ROUND_UP(buf_size + unalinged_size, + 1 << page_shift); + cfg->buf_pg_shift = page_shift; + first_region_padding = unalinged_size; + } + + /* Convert buffer size to page index and page count for each region and + * the buffer's offset need append to the first region. + */ + for (page_cnt = 0, region_cnt = 0; region_cnt < attr->region_count && + region_cnt < ARRAY_SIZE(cfg->region); region_cnt++) { + r = &cfg->region[region_cnt]; + r->offset = page_cnt; + buf_size = hr_hw_page_align(attr->region[region_cnt].size + + first_region_padding); + r->count = DIV_ROUND_UP(buf_size, 1 << page_shift); + first_region_padding = 0; + page_cnt += r->count; + r->hopnum = to_hr_hem_hopnum(attr->region[region_cnt].hopnum, + r->count); + } + + cfg->region_count = region_cnt; + *buf_page_shift = page_shift; + + return page_cnt; +} + +static int mtr_alloc_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, + unsigned int ba_page_shift) +{ + struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg; + int ret; + + hns_roce_hem_list_init(&mtr->hem_list); + if (!cfg->is_direct) { + ret = hns_roce_hem_list_request(hr_dev, &mtr->hem_list, + cfg->region, cfg->region_count, + ba_page_shift); + if (ret) + return ret; + cfg->root_ba = mtr->hem_list.root_ba; + cfg->ba_pg_shift = ba_page_shift; + } else { + cfg->ba_pg_shift = cfg->buf_pg_shift; + } + + return 0; +} + +static void mtr_free_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr) +{ + hns_roce_hem_list_release(hr_dev, &mtr->hem_list); +} + +/** + * hns_roce_mtr_create - Create hns memory translate region. + * + * @mtr: memory translate region + * @buf_attr: buffer attribute for creating mtr + * @ba_page_shift: page shift for multi-hop base address table + * @ucontext: user space context, if it's NULL, means kernel space + * @user_addr: userspace virtual address to start at + */ +int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, + struct hns_roce_buf_attr *buf_attr, + unsigned int ba_page_shift, + struct ib_ucontext *ucontext, unsigned long user_addr) +{ + struct device *dev = hr_dev->dev; + unsigned int buf_page_shift = 0; + int buf_page_cnt; + int ret; + + buf_page_cnt = mtr_init_buf_cfg(hr_dev, buf_attr, &mtr->hem_cfg, + &buf_page_shift, + ucontext ? user_addr & ~PAGE_MASK : 0); + if (buf_page_cnt < 1 || buf_page_shift < HNS_HW_PAGE_SHIFT) { + dev_err(dev, "failed to init mtr cfg, count %d shift %u.\n", + buf_page_cnt, buf_page_shift); + return -EINVAL; + } + + ret = mtr_alloc_mtt(hr_dev, mtr, ba_page_shift); + if (ret) { + dev_err(dev, "failed to alloc mtr mtt, ret = %d.\n", ret); + return ret; + } + + /* The caller has its own buffer list and invokes the hns_roce_mtr_map() + * to finish the MTT configure. + */ + if (buf_attr->mtt_only) { + mtr->umem = NULL; + mtr->kmem = NULL; + return 0; + } + + ret = mtr_alloc_bufs(hr_dev, mtr, buf_attr, ucontext, user_addr); + if (ret) { + dev_err(dev, "failed to alloc mtr bufs, ret = %d.\n", ret); + goto err_alloc_mtt; + } + + /* Write buffer's dma address to MTT */ + ret = mtr_map_bufs(hr_dev, mtr, buf_page_cnt, buf_page_shift); + if (ret) + dev_err(dev, "failed to map mtr bufs, ret = %d.\n", ret); + else + return 0; + + mtr_free_bufs(hr_dev, mtr); +err_alloc_mtt: + mtr_free_mtt(hr_dev, mtr); + return ret; +} +EXPORT_SYMBOL_GPL(hns_roce_mtr_create); + +void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr) +{ + /* release multi-hop addressing resource */ + hns_roce_hem_list_release(hr_dev, &mtr->hem_list); + + /* free buffers */ + mtr_free_bufs(hr_dev, mtr); +} +EXPORT_SYMBOL_GPL(hns_roce_mtr_destroy); diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c index e11c149da04d55f29df784e7aa90ac44f418ee9a..b5c14db838e2b9bbed289ffd4969fcbbaf2210ab 100644 --- a/drivers/infiniband/hw/hns/hns_roce_pd.c +++ b/drivers/infiniband/hw/hns/hns_roce_pd.c @@ -29,6 +29,7 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ +#include "roce_k_compat.h" #include #include @@ -37,7 +38,7 @@ static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn) { - return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn) ? -ENOMEM : 0; + return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn); } static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn) @@ -45,6 +46,18 @@ static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn) hns_roce_bitmap_free(&hr_dev->pd_bitmap, pdn, BITMAP_NO_RR); } +static int hns_roce_xrcd_alloc(struct hns_roce_dev *hr_dev, + unsigned long *xrcdn) +{ + return hns_roce_bitmap_alloc(&hr_dev->xrcd_bitmap, xrcdn); +} + +static void hns_roce_xrcd_free(struct hns_roce_dev *hr_dev, + unsigned long xrcdn) +{ + hns_roce_bitmap_free(&hr_dev->xrcd_bitmap, xrcdn, BITMAP_NO_RR); +} + int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev) { return hns_roce_bitmap_init(&hr_dev->pd_bitmap, hr_dev->caps.num_pds, @@ -57,6 +70,19 @@ void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev) hns_roce_bitmap_cleanup(&hr_dev->pd_bitmap); } +int hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev) +{ + return hns_roce_bitmap_init(&hr_dev->xrcd_bitmap, + hr_dev->caps.num_xrcds, + hr_dev->caps.num_xrcds - 1, + hr_dev->caps.reserved_xrcds, 0); +} + +void hns_roce_cleanup_xrcd_table(struct hns_roce_dev *hr_dev) +{ + hns_roce_bitmap_cleanup(&hr_dev->xrcd_bitmap); +} + struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev, struct ib_ucontext *context, struct ib_udata *udata) @@ -66,34 +92,58 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev, struct hns_roce_pd *pd; int ret; - pd = kmalloc(sizeof(*pd), GFP_KERNEL); + pd = kzalloc(sizeof(*pd), GFP_KERNEL); if (!pd) return ERR_PTR(-ENOMEM); ret = hns_roce_pd_alloc(to_hr_dev(ib_dev), &pd->pdn); if (ret) { kfree(pd); - dev_err(dev, "[alloc_pd]hns_roce_pd_alloc failed!\n"); + dev_err(dev, "[alloc_pd]hns_roce_pd_alloc failed(%d)!\n", ret); return ERR_PTR(ret); } +#ifdef CONFIG_NEW_KERNEL if (context) { struct hns_roce_ib_alloc_pd_resp uresp = {.pdn = pd->pdn}; if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) { hns_roce_pd_free(to_hr_dev(ib_dev), pd->pdn); - dev_err(dev, "[alloc_pd]ib_copy_to_udata failed!\n"); + dev_err(dev, "[alloc_pd]ib_copy_to_udata failed, pd - 0x%lx!\n", + pd->pdn); kfree(pd); return ERR_PTR(-EFAULT); } } +#else + if (context) { + if (ib_copy_to_udata(udata, &pd->pdn, sizeof(u64))) { + hns_roce_pd_free(to_hr_dev(ib_dev), pd->pdn); + dev_err(dev, "[alloc_pd]ib_copy_to_udata failed!, pd -0x%lx\n", + pd->pdn); + kfree(pd); + return ERR_PTR(-EFAULT); + } + } + +#endif + + rdfx_func_cnt(hr_dev, RDFX_FUNC_ALLOC_PD); + rdfx_alloc_rdfx_pd(hr_dev, pd); + hns_roce_inc_rdma_hw_stats(ib_dev, HW_STATS_PD_ALLOC); + return &pd->ibpd; } EXPORT_SYMBOL_GPL(hns_roce_alloc_pd); int hns_roce_dealloc_pd(struct ib_pd *pd) { + + rdfx_func_cnt(to_hr_dev(pd->device), RDFX_FUNC_DEALLOC_PD); + rdfx_release_rdfx_pd(to_hr_dev(pd->device), to_hr_pd(pd)->pdn); + hns_roce_inc_rdma_hw_stats(pd->device, HW_STATS_PD_DEALLOC); + hns_roce_pd_free(to_hr_dev(pd->device), to_hr_pd(pd)->pdn); kfree(to_hr_pd(pd)); @@ -101,10 +151,69 @@ int hns_roce_dealloc_pd(struct ib_pd *pd) } EXPORT_SYMBOL_GPL(hns_roce_dealloc_pd); +struct ib_xrcd *hns_roce_ib_alloc_xrcd(struct ib_device *ib_dev, + struct ib_ucontext *context, + struct ib_udata *udata) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); + struct ib_cq_init_attr cq_attr = {}; + struct hns_roce_xrcd *xrcd; + int ret; + + if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)) + return ERR_PTR(-EINVAL); + + xrcd = kzalloc(sizeof(*xrcd), GFP_KERNEL); + if (!xrcd) + return ERR_PTR(-ENOMEM); + + ret = hns_roce_xrcd_alloc(hr_dev, &xrcd->xrcdn); + if (ret) { + kfree(xrcd); + dev_err(hr_dev->dev, + "[alloc_xrcd]hns_roce_xrcd_alloc failed(%d)!\n", ret); + return ERR_PTR(ret); + } + + xrcd->pd = ib_alloc_pd(ib_dev, 0); + if (IS_ERR_OR_NULL(xrcd->pd)) { + ret = PTR_ERR(xrcd->pd); + goto err_dealloc_xrcd; + } + + cq_attr.cqe = 1; + xrcd->cq = ib_create_cq(ib_dev, NULL, NULL, xrcd, &cq_attr); + if (IS_ERR_OR_NULL(xrcd->cq)) { + ret = PTR_ERR(xrcd->cq); + goto err_dealloc_pd; + } + + return &xrcd->ibxrcd; + +err_dealloc_pd: + ib_dealloc_pd(xrcd->pd); + +err_dealloc_xrcd: + hns_roce_xrcd_free(hr_dev, xrcd->xrcdn); + + kfree(xrcd); + return ERR_PTR(ret); +} + +int hns_roce_ib_dealloc_xrcd(struct ib_xrcd *xrcd) +{ + ib_destroy_cq(to_hr_xrcd(xrcd)->cq); + ib_dealloc_pd(to_hr_xrcd(xrcd)->pd); + hns_roce_xrcd_free(to_hr_dev(xrcd->device), to_hr_xrcd(xrcd)->xrcdn); + kfree(xrcd); + + return 0; +} + int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar) { struct resource *res; - int ret = 0; + int ret; /* Using bitmap to manager UAR index */ ret = hns_roce_bitmap_alloc(&hr_dev->uar_table.bitmap, &uar->logic_idx); @@ -125,8 +234,8 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar) } uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index; } else { - uar->pfn = ((pci_resource_start(hr_dev->pci_dev, 2)) - >> PAGE_SHIFT); + uar->pfn = ((pci_resource_start(hr_dev->pci_dev, + HNS_ROCE_PCI_BAR_NR)) >> PAGE_SHIFT); } return 0; diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index efb7e961ca6510eabaf73711b29bd0c4f37defbe..df14a6909fd7846338494ab04c3f5ce4e9f81f63 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -31,6 +31,7 @@ * SOFTWARE. */ +#include #include #include #include @@ -41,6 +42,46 @@ #define SQP_NUM (2 * HNS_ROCE_MAX_PORTS) +static void flush_work_handle(struct work_struct *work) +{ + struct hns_roce_flush_work *flush_work = container_of(work, + struct hns_roce_flush_work, work); + struct hns_roce_qp *hr_qp = flush_work->hr_qp; + struct device *dev = flush_work->hr_dev->dev; + struct ib_qp_attr attr; + int attr_mask; + int ret; + + attr_mask = IB_QP_STATE; + attr.qp_state = IB_QPS_ERR; + hr_qp->flush_en = 1; + + ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL); + if (ret) + dev_err(dev, "Modify qp to err for flush cqe fail(%d)\n", ret); + + kfree(flush_work); + if (atomic_dec_and_test(&hr_qp->refcount)) + complete(&hr_qp->free); +} + +void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) +{ + struct hns_roce_flush_work *flush_work; + + flush_work = kzalloc(sizeof(struct hns_roce_flush_work), GFP_ATOMIC); + if (ZERO_OR_NULL_PTR(flush_work)) { + dev_err(hr_dev->dev, "Init flush work queue fail!\n"); + return; + } + flush_work->hr_dev = hr_dev; + flush_work->hr_qp = hr_qp; + INIT_WORK(&flush_work->work, flush_work_handle); + atomic_inc(&hr_qp->refcount); + queue_work(hr_dev->flush_workq, &flush_work->work); +} +EXPORT_SYMBOL_GPL(init_flush_work); + void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) { struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; @@ -56,10 +97,17 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) spin_unlock(&qp_table->lock); if (!qp) { - dev_warn(dev, "Async event for bogus QP %08x\n", qpn); + dev_warn(dev, "Async event for bogus QP 0x%08x\n", qpn); return; } + if (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR || + event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR || + event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR) { + qp->state = IB_QPS_ERR; + init_flush_work(hr_dev, qp); + } + qp->event(qp, (enum hns_roce_event)event_type); if (atomic_dec_and_test(&qp->refcount)) @@ -102,7 +150,8 @@ static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp, event.event = IB_EVENT_QP_ACCESS_ERR; break; default: - dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n", + dev_dbg(ibqp->device->dev.parent, + "roce_ib:Unexpected eventtype %d on QP%06lx\n", type, hr_qp->qpn); return; } @@ -115,10 +164,7 @@ static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt, { struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; - return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, - base) ? - -ENOMEM : - 0; + return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, base); } enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state) @@ -158,7 +204,8 @@ static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp); spin_unlock_irq(&qp_table->lock); if (ret) { - dev_err(hr_dev->dev, "QPC radix_tree_insert failed\n"); + dev_err(hr_dev->dev, "GSI QPC radix insert failed(%d), qpn is 0x%lx\n", + ret, hr_qp->qpn); goto err_put_irrl; } @@ -187,14 +234,16 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, /* Alloc memory for QPC */ ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn); if (ret) { - dev_err(dev, "QPC table get failed\n"); + dev_err(dev, "QPC table get failed(%d), qpn 0x%lx\n", ret, + hr_qp->qpn); goto err_out; } /* Alloc memory for IRRL */ ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn); if (ret) { - dev_err(dev, "IRRL table get failed\n"); + dev_err(dev, "IRRL table get failed(%d), qpn 0x%lx\n", ret, + hr_qp->qpn); goto err_put_qp; } @@ -203,18 +252,31 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table, hr_qp->qpn); if (ret) { - dev_err(dev, "TRRL table get failed\n"); + dev_err(dev, "TRRL table get failed(%d), qpn 0x%lx\n", + ret, hr_qp->qpn); goto err_put_irrl; } } + if (hr_dev->caps.scc_ctx_entry_sz) { + /* Alloc memory for SCC CTX */ + ret = hns_roce_table_get(hr_dev, &qp_table->scc_ctx_table, + hr_qp->qpn); + if (ret) { + dev_err(dev, "SCC CTX table get failed(%d), qpn 0x%lx\n", + ret, hr_qp->qpn); + goto err_put_trrl; + } + } + spin_lock_irq(&qp_table->lock); ret = radix_tree_insert(&hr_dev->qp_table_tree, hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp); spin_unlock_irq(&qp_table->lock); if (ret) { - dev_err(dev, "QPC radix_tree_insert failed\n"); - goto err_put_trrl; + dev_err(dev, "QPC radix_tree_insert failed(%d), qpn - 0x%lx\n", + ret, hr_qp->qpn); + goto err_put_scc_ctx; } atomic_set(&hr_qp->refcount, 1); @@ -222,6 +284,11 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, return 0; +err_put_scc_ctx: + if (hr_dev->caps.scc_ctx_entry_sz) + hns_roce_table_put(hr_dev, &qp_table->scc_ctx_table, + hr_qp->qpn); + err_put_trrl: if (hr_dev->caps.trrl_entry_sz) hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); @@ -261,7 +328,6 @@ void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); - hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn); } } EXPORT_SYMBOL_GPL(hns_roce_qp_free); @@ -271,219 +337,252 @@ void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn, { struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; - if (base_qpn < SQP_NUM) + if (base_qpn < hr_dev->caps.reserved_qps) return; hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, BITMAP_RR); } EXPORT_SYMBOL_GPL(hns_roce_release_range_qp); -static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev, - struct ib_qp_cap *cap, int is_user, int has_srq, - struct hns_roce_qp *hr_qp) +static u32 proc_rq_sge(struct hns_roce_dev *dev, struct hns_roce_qp *hr_qp, + int user) +{ + u32 max_sge = dev->caps.max_rq_sg; + + if (dev->pci_dev->revision > PCI_REVISION_ID_HIP08_B) + return max_sge; + + /* Reserve SGEs only for HIP08 in kernel; The userspace driver will + * calculate number of max_sge with reserved SGEs when allocating wqe + * buf, so there is no need to do this again in kernel. But the number + * may exceed the capacity of SGEs recorded in the firmware, so the + * kernel driver should just adapt the value accordingly. + */ + if (user) + max_sge = roundup_pow_of_two(max_sge + 1); + else + hr_qp->rq.rsv_sge = 1; + + return max_sge; +} + +static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, + struct hns_roce_qp *hr_qp, int has_rq, bool user) { + u32 max_sge = proc_rq_sge(hr_dev, hr_qp, user); struct device *dev = hr_dev->dev; - u32 max_cnt; + u32 cnt; + + /* If srq exist, set zero for relative number of rq */ + if (!has_rq) { + hr_qp->rq.wqe_cnt = 0; + hr_qp->rq.max_gs = 0; + hr_qp->rq_inl_buf.wqe_cnt = 0; + cap->max_recv_wr = 0; + cap->max_recv_sge = 0; + + return 0; + } /* Check the validity of QP support capacity */ - if (cap->max_recv_wr > hr_dev->caps.max_wqes || - cap->max_recv_sge > hr_dev->caps.max_rq_sg) { - dev_err(dev, "RQ WR or sge error!max_recv_wr=%d max_recv_sge=%d\n", + if (!cap->max_recv_sge || cap->max_recv_wr > hr_dev->caps.max_wqes || + cap->max_recv_sge > max_sge) { + dev_err(dev, "RQ config error, depth=%u, sge=%u\n", cap->max_recv_wr, cap->max_recv_sge); return -EINVAL; } - /* If srq exit, set zero for relative number of rq */ - if (has_srq) { - if (cap->max_recv_wr) { - dev_dbg(dev, "srq no need config max_recv_wr\n"); - return -EINVAL; - } + cnt = roundup_pow_of_two(max(cap->max_recv_wr, hr_dev->caps.min_wqes)); + hr_qp->rq.max_gs = + roundup_pow_of_two(cap->max_recv_sge + hr_qp->rq.rsv_sge); - hr_qp->rq.wqe_cnt = hr_qp->rq.max_gs = 0; - } else { - if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge)) { - dev_err(dev, "user space no need config max_recv_wr max_recv_sge\n"); - return -EINVAL; - } + if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE) + hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz); + else + hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz * + hr_qp->rq.max_gs); + + hr_qp->rq.wqe_cnt = cnt; + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE && + hr_qp->ibqp.qp_type != IB_QPT_UD && + hr_qp->ibqp.qp_type != IB_QPT_GSI) + hr_qp->rq_inl_buf.wqe_cnt = cnt; + else + hr_qp->rq_inl_buf.wqe_cnt = 0; - if (hr_dev->caps.min_wqes) - max_cnt = max(cap->max_recv_wr, hr_dev->caps.min_wqes); - else - max_cnt = cap->max_recv_wr; + cap->max_recv_wr = cnt; + cap->max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge; - hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt); + return 0; +} - if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) { - dev_err(dev, "while setting rq size, rq.wqe_cnt too large\n"); - return -EINVAL; - } +static u32 get_wqe_ext_sge_cnt(struct hns_roce_qp *qp) +{ + /* GSI/UD QP only has extended sge */ + if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_UD) + return qp->sq.max_gs; - max_cnt = max(1U, cap->max_recv_sge); - hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt); - if (hr_dev->caps.max_rq_sg <= 2) - hr_qp->rq.wqe_shift = - ilog2(hr_dev->caps.max_rq_desc_sz); - else - hr_qp->rq.wqe_shift = - ilog2(hr_dev->caps.max_rq_desc_sz - * hr_qp->rq.max_gs); + if (qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE) + return qp->sq.max_gs - HNS_ROCE_SGE_IN_WQE; + + return 0; +} + +static void set_ext_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt, + struct hns_roce_qp *hr_qp, struct ib_qp_cap *cap) +{ + u32 total_sge_cnt; + u32 wqe_sge_cnt; + + hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT; + + if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) { + hr_qp->sq.max_gs = HNS_ROCE_SGE_IN_WQE; + return; } - cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt; - cap->max_recv_sge = hr_qp->rq.max_gs; + hr_qp->sq.max_gs = max(1U, cap->max_send_sge); - return 0; + wqe_sge_cnt = get_wqe_ext_sge_cnt(hr_qp); + + /* If the number of extended sge is not zero, they MUST use the + * space of HNS_HW_PAGE_SIZE at least. + */ + if (wqe_sge_cnt) { + total_sge_cnt = roundup_pow_of_two(sq_wqe_cnt * wqe_sge_cnt); + hr_qp->sge.sge_cnt = max(total_sge_cnt, + (u32)HNS_HW_PAGE_SIZE / HNS_ROCE_SGE_SIZE); + } } -static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, - struct ib_qp_cap *cap, - struct hns_roce_qp *hr_qp, - struct hns_roce_ib_create_qp *ucmd) +static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev, + struct ib_qp_cap *cap, + struct hns_roce_ib_create_qp *ucmd) { u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz); u8 max_sq_stride = ilog2(roundup_sq_stride); - u32 page_size; - u32 max_cnt; /* Sanity check SQ size before proceeding */ - if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes || - ucmd->log_sq_stride > max_sq_stride || - ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) { - dev_err(hr_dev->dev, "check SQ size error!\n"); + if (ucmd->log_sq_stride > max_sq_stride || + ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) { + dev_err(hr_dev->dev, "failed to check SQ stride size\n"); return -EINVAL; } if (cap->max_send_sge > hr_dev->caps.max_sq_sg) { - dev_err(hr_dev->dev, "SQ sge error! max_send_sge=%d\n", + dev_err(hr_dev->dev, "failed to check SQ SGE size %u\n", cap->max_send_sge); return -EINVAL; } - hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; - hr_qp->sq.wqe_shift = ucmd->log_sq_stride; - - max_cnt = max(1U, cap->max_send_sge); - if (hr_dev->caps.max_sq_sg <= 2) - hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt); - else - hr_qp->sq.max_gs = max_cnt; - - if (hr_qp->sq.max_gs > 2) - hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt * - (hr_qp->sq.max_gs - 2)); - hr_qp->sge.sge_shift = 4; - - /* Get buf size, SQ and RQ are aligned to page_szie */ - if (hr_dev->caps.max_sq_sg <= 2) { - hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << - hr_qp->rq.wqe_shift), PAGE_SIZE) + - HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt << - hr_qp->sq.wqe_shift), PAGE_SIZE); - - hr_qp->sq.offset = 0; - hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt << - hr_qp->sq.wqe_shift), PAGE_SIZE); - } else { - page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT); - hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << - hr_qp->rq.wqe_shift), page_size) + - HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt << - hr_qp->sge.sge_shift), page_size) + - HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt << - hr_qp->sq.wqe_shift), page_size); - - hr_qp->sq.offset = 0; - if (hr_qp->sge.sge_cnt) { - hr_qp->sge.offset = HNS_ROCE_ALOGN_UP( - (hr_qp->sq.wqe_cnt << - hr_qp->sq.wqe_shift), - page_size); - hr_qp->rq.offset = hr_qp->sge.offset + - HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt << - hr_qp->sge.sge_shift), - page_size); - } else { - hr_qp->rq.offset = HNS_ROCE_ALOGN_UP( - (hr_qp->sq.wqe_cnt << - hr_qp->sq.wqe_shift), - page_size); - } - } - return 0; } -static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, - struct ib_qp_cap *cap, - struct hns_roce_qp *hr_qp) +static int set_user_sq_size(struct hns_roce_dev *hr_dev, + struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp, + struct hns_roce_ib_create_qp *ucmd) { struct device *dev = hr_dev->dev; - u32 page_size; - u32 max_cnt; - int size; - - if (cap->max_send_wr > hr_dev->caps.max_wqes || - cap->max_send_sge > hr_dev->caps.max_sq_sg || - cap->max_inline_data > hr_dev->caps.max_sq_inline) { - dev_err(dev, "SQ WR or sge or inline data error!\n"); + u32 cnt = 0; + int ret; + + if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) || + cnt > hr_dev->caps.max_wqes) return -EINVAL; + + ret = check_sq_size_with_integrity(hr_dev, cap, ucmd); + if (ret) { + dev_err(dev, "failed to check user SQ size\n"); + return ret; } - hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz); - hr_qp->sq_max_wqes_per_wr = 1; - hr_qp->sq_spare_wqes = 0; + set_ext_sge_param(hr_dev, cnt, hr_qp, cap); - if (hr_dev->caps.min_wqes) - max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes); - else - max_cnt = cap->max_send_wr; + hr_qp->sq.wqe_shift = ucmd->log_sq_stride; + hr_qp->sq.wqe_cnt = cnt; - hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt); - if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) { - dev_err(dev, "while setting kernel sq size, sq.wqe_cnt too large\n"); - return -EINVAL; - } + return 0; +} - /* Get data_seg numbers */ - max_cnt = max(1U, cap->max_send_sge); - if (hr_dev->caps.max_sq_sg <= 2) - hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt); - else - hr_qp->sq.max_gs = max_cnt; +static int set_wqe_buf_attr(struct hns_roce_dev *hr_dev, + struct hns_roce_qp *hr_qp, + struct hns_roce_buf_attr *buf_attr) +{ + int buf_size; + int idx = 0; - if (hr_qp->sq.max_gs > 2) { - hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt * - (hr_qp->sq.max_gs - 2)); - hr_qp->sge.sge_shift = 4; + hr_qp->buff_size = 0; + + /* SQ WQE */ + hr_qp->sq.offset = 0; + buf_size = to_hr_hem_entries_size(hr_qp->sq.wqe_cnt, + hr_qp->sq.wqe_shift); + if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) { + buf_attr->region[idx].size = buf_size; + buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sq_hop_num; + idx++; + hr_qp->buff_size += buf_size; } - /* ud sqwqe's sge use extend sge */ - if (hr_dev->caps.max_sq_sg > 2 && hr_qp->ibqp.qp_type == IB_QPT_GSI) { - hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt * - hr_qp->sq.max_gs); - hr_qp->sge.sge_shift = 4; + /* extend SGE WQE in SQ */ + hr_qp->sge.offset = hr_qp->buff_size; + buf_size = to_hr_hem_entries_size(hr_qp->sge.sge_cnt, + hr_qp->sge.sge_shift); + if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) { + buf_attr->region[idx].size = buf_size; + buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sge_hop_num; + idx++; + hr_qp->buff_size += buf_size; } - /* Get buf size, SQ and RQ are aligned to PAGE_SIZE */ - page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT); - hr_qp->sq.offset = 0; - size = HNS_ROCE_ALOGN_UP(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift, - page_size); + /* RQ WQE */ + hr_qp->rq.offset = hr_qp->buff_size; + buf_size = to_hr_hem_entries_size(hr_qp->rq.wqe_cnt, + hr_qp->rq.wqe_shift); + if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) { + buf_attr->region[idx].size = buf_size; + buf_attr->region[idx].hopnum = hr_dev->caps.wqe_rq_hop_num; + idx++; + hr_qp->buff_size += buf_size; + } + + if (hr_qp->buff_size < 1) + return -EINVAL; - if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) { - hr_qp->sge.offset = size; - size += HNS_ROCE_ALOGN_UP(hr_qp->sge.sge_cnt << - hr_qp->sge.sge_shift, page_size); + buf_attr->region_count = idx; + + buf_attr->mtt_only = false; + buf_attr->page_shift = HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz; + + return 0; +} + +static int set_kernel_sq_size(struct hns_roce_dev *hr_dev, + struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp) +{ + struct device *dev = hr_dev->dev; + u32 cnt; + + if (!cap->max_send_wr || cap->max_send_wr > hr_dev->caps.max_wqes || + cap->max_send_sge > hr_dev->caps.max_sq_sg) { + dev_err(dev, "failed to check SQ WR or SGE num, ret = %d.\n", + -EINVAL); + return -EINVAL; } - hr_qp->rq.offset = size; - size += HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift), - page_size); - hr_qp->buff_size = size; + cnt = roundup_pow_of_two(max(cap->max_send_wr, hr_dev->caps.min_wqes)); + if (cnt > hr_dev->caps.max_wqes) { + dev_err(dev, "failed to check WQE num %u\n", cnt); + return -EINVAL; + } + + hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz); + hr_qp->sq.wqe_cnt = cnt; + + set_ext_sge_param(hr_dev, cnt, hr_qp, cap); - /* Get wr and sge number which send */ - cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt; + /* sync the parameters of kernel QP to user's configuration */ + cap->max_send_wr = cnt; cap->max_send_sge = hr_qp->sq.max_gs; /* We don't support inline sends for kernel QPs (yet) */ @@ -494,7 +593,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr) { - if (attr->qp_type == IB_QPT_XRC_TGT) + if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr) return 0; return 1; @@ -503,127 +602,256 @@ static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr) static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr) { if (attr->qp_type == IB_QPT_XRC_INI || - attr->qp_type == IB_QPT_XRC_TGT || attr->srq) + attr->qp_type == IB_QPT_XRC_TGT || attr->srq || + !attr->cap.max_recv_wr) return 0; return 1; } -static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, - struct ib_pd *ib_pd, - struct ib_qp_init_attr *init_attr, - struct ib_udata *udata, unsigned long sqpn, - struct hns_roce_qp *hr_qp) +static int hns_roce_alloc_recv_inline_buffer(struct hns_roce_qp *hr_qp, + struct ib_qp_init_attr *init_attr) { - struct device *dev = hr_dev->dev; - struct hns_roce_ib_create_qp ucmd; - struct hns_roce_ib_create_qp_resp resp = {}; - unsigned long qpn = 0; - int ret = 0; - u32 page_shift; - u32 npages; + int ret; int i; - mutex_init(&hr_qp->mutex); - spin_lock_init(&hr_qp->sq.lock); - spin_lock_init(&hr_qp->rq.lock); + /* allocate recv inline buf */ + hr_qp->rq_inl_buf.wqe_list = kcalloc(hr_qp->rq.wqe_cnt, + sizeof(struct hns_roce_rinl_wqe), + GFP_KERNEL); + if (!hr_qp->rq_inl_buf.wqe_list) { + ret = -ENOMEM; + goto err; + } - hr_qp->state = IB_QPS_RESET; + hr_qp->rq_inl_buf.wqe_cnt = hr_qp->rq.wqe_cnt; + + /* Firstly, allocate a list of sge space buffer */ + hr_qp->rq_inl_buf.wqe_list[0].sg_list = + kcalloc(hr_qp->rq_inl_buf.wqe_cnt, + init_attr->cap.max_recv_sge * + sizeof(struct hns_roce_rinl_sge), + GFP_KERNEL); + if (!hr_qp->rq_inl_buf.wqe_list[0].sg_list) { + ret = -ENOMEM; + goto err_wqe_list; + } + + for (i = 1; i < hr_qp->rq_inl_buf.wqe_cnt; i++) + /* Secondly, reallocate the buffer */ + hr_qp->rq_inl_buf.wqe_list[i].sg_list = + &hr_qp->rq_inl_buf.wqe_list[0].sg_list[i * + init_attr->cap.max_recv_sge]; + + return 0; + +err_wqe_list: + kfree(hr_qp->rq_inl_buf.wqe_list); + +err: + return ret; +} + +static void hns_roce_free_recv_inline_buffer(struct hns_roce_qp *hr_qp) +{ + kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list); + kfree(hr_qp->rq_inl_buf.wqe_list); +} + +static int alloc_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, + struct hns_roce_buf_attr *buf_attr, + struct ib_uobject *uobject, unsigned long addr) +{ + struct device *dev = hr_dev->dev; + int ret; + + ret = hns_roce_mtr_create(hr_dev, &hr_qp->mtr, buf_attr, + PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz, + uobject ? uobject->context : NULL, addr); + if (ret) + dev_err(dev, "failed to create WQE mtr, ret = %d.\n", ret); + + return ret; +} + +static void free_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, + struct ib_uobject *uobject) +{ + hns_roce_mtr_destroy(hr_dev, &hr_qp->mtr); +} + +static void hns_roce_add_cq_to_qp(struct hns_roce_dev *hr_dev, + struct hns_roce_qp *hr_qp, + struct ib_cq *send_cq, struct ib_cq *recv_cq) +{ + struct hns_roce_cq *hr_send_cq, *hr_recv_cq; + unsigned long flags; + + if (hr_dev->hw_rev != HNS_ROCE_HW_VER1) { + hr_send_cq = send_cq ? to_hr_cq(send_cq) : NULL; + hr_recv_cq = recv_cq ? to_hr_cq(recv_cq) : NULL; + + spin_lock_irqsave(&hr_dev->qp_lock, flags); + hns_roce_lock_cqs(hr_send_cq, hr_recv_cq); + + list_add_tail(&hr_qp->list, &hr_dev->qp_list); + if (hr_send_cq) + list_add_tail(&hr_qp->send_list, &hr_send_cq->sq_list); + if (hr_recv_cq) + list_add_tail(&hr_qp->recv_list, &hr_recv_cq->rq_list); + + hns_roce_unlock_cqs(hr_send_cq, hr_recv_cq); + spin_unlock_irqrestore(&hr_dev->qp_lock, flags); + } +} + +static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, + struct ib_qp_init_attr *init_attr, + struct ib_uobject *uobject, unsigned long addr) +{ + struct hns_roce_buf_attr buf_attr = {}; + struct device *dev = hr_dev->dev; + bool is_rq_buf_inline; + int ret; + + is_rq_buf_inline = (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) && + hns_roce_qp_has_rq(init_attr); + if (is_rq_buf_inline) { + ret = hns_roce_alloc_recv_inline_buffer(hr_qp, init_attr); + if (ret) { + dev_err(hr_dev->dev, "Failed to alloc inline RQ buffer\n"); + return ret; + } + } + + ret = set_wqe_buf_attr(hr_dev, hr_qp, &buf_attr); + if (ret) { + dev_err(dev, "failed to set WQE attr, ret = %d.\n", ret); + goto err_inline; + } + + ret = alloc_wqe_buf(hr_dev, hr_qp, &buf_attr, uobject, addr); + if (ret) { + dev_err(dev, "failed to alloc WQE buf, ret = %d.\n", ret); + goto err_inline; + } + + return 0; + +err_inline: + if (is_rq_buf_inline) + hns_roce_free_recv_inline_buffer(hr_qp); + + return ret; +} + +static void free_qp_buf(struct hns_roce_qp *hr_qp, struct ib_pd *ib_pd) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device); + + free_wqe_buf(hr_dev, hr_qp, ib_pd->uobject); + + if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) && + hr_qp->rq.wqe_cnt) + hns_roce_free_recv_inline_buffer(hr_qp); +} + +static int set_max_inline_data(struct hns_roce_dev *hr_dev, + struct ib_qp_init_attr *init_attr) +{ + if (init_attr->cap.max_inline_data > hr_dev->caps.max_sq_inline) + return -EINVAL; + + if (init_attr->qp_type == IB_QPT_UD) + init_attr->cap.max_inline_data = 0; + + if (init_attr->cap.max_inline_data) + init_attr->cap.max_inline_data = roundup_pow_of_two( + init_attr->cap.max_inline_data); + + return 0; +} + +static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata, + struct hns_roce_ib_create_qp *ucmd) +{ + int ret; + + ret = set_max_inline_data(hr_dev, init_attr); + if (ret != 0) + return -EINVAL; hr_qp->ibqp.qp_type = init_attr->qp_type; if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) - hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_ALL_WR); + hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR; else - hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_REQ_WR); + hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR; - ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, !!ib_pd->uobject, - !!init_attr->srq, hr_qp); + ret = set_rq_size(hr_dev, &init_attr->cap, hr_qp, + hns_roce_qp_has_rq(init_attr), !!udata); if (ret) { - dev_err(dev, "hns_roce_set_rq_size failed\n"); - goto err_out; + dev_err(hr_dev->dev, "Failed to set user RQ size\n"); + return ret; } - if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) { - /* allocate recv inline buf */ - hr_qp->rq_inl_buf.wqe_list = kcalloc(hr_qp->rq.wqe_cnt, - sizeof(struct hns_roce_rinl_wqe), - GFP_KERNEL); - if (!hr_qp->rq_inl_buf.wqe_list) { - ret = -ENOMEM; - goto err_out; + if (udata) { + if (ib_copy_from_udata(ucmd, udata, sizeof(*ucmd))) { + dev_err(hr_dev->dev, "Failed to copy QP ucmd\n"); + return -EFAULT; } - hr_qp->rq_inl_buf.wqe_cnt = hr_qp->rq.wqe_cnt; + ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd); + if (ret) + dev_err(hr_dev->dev, "Failed to set user SQ size\n"); + } else { + if (init_attr->create_flags & + IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { + dev_err(hr_dev->dev, "Failed to check multicast loopback\n"); + return -EINVAL; + } - /* Firstly, allocate a list of sge space buffer */ - hr_qp->rq_inl_buf.wqe_list[0].sg_list = - kcalloc(hr_qp->rq_inl_buf.wqe_cnt, - init_attr->cap.max_recv_sge * - sizeof(struct hns_roce_rinl_sge), - GFP_KERNEL); - if (!hr_qp->rq_inl_buf.wqe_list[0].sg_list) { - ret = -ENOMEM; - goto err_wqe_list; + if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) { + dev_err(hr_dev->dev, "Failed to check ipoib ud lso\n"); + return -EINVAL; } - for (i = 1; i < hr_qp->rq_inl_buf.wqe_cnt; i++) - /* Secondly, reallocate the buffer */ - hr_qp->rq_inl_buf.wqe_list[i].sg_list = - &hr_qp->rq_inl_buf.wqe_list[0].sg_list[i * - init_attr->cap.max_recv_sge]; + ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp); + if (ret) + dev_err(hr_dev->dev, "Failed to set kernel SQ size\n"); } - if (ib_pd->uobject) { - if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { - dev_err(dev, "ib_copy_from_udata error for create qp\n"); - ret = -EFAULT; - goto err_rq_sge_list; - } + return ret; +} - ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, - &ucmd); - if (ret) { - dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n"); - goto err_rq_sge_list; - } +static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, + struct ib_pd *ib_pd, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata, unsigned long sqpn, + struct hns_roce_qp *hr_qp) +{ + struct device *dev = hr_dev->dev; + struct hns_roce_ib_create_qp ucmd; + struct hns_roce_ib_create_qp_resp resp = {}; + unsigned long qpn = 0; + int ret; - hr_qp->umem = ib_umem_get(ib_pd->uobject->context, - ucmd.buf_addr, hr_qp->buff_size, 0, - 0); - if (IS_ERR(hr_qp->umem)) { - dev_err(dev, "ib_umem_get error for create qp\n"); - ret = PTR_ERR(hr_qp->umem); - goto err_rq_sge_list; - } + mutex_init(&hr_qp->mutex); + spin_lock_init(&hr_qp->sq.lock); + spin_lock_init(&hr_qp->rq.lock); - hr_qp->mtt.mtt_type = MTT_TYPE_WQE; - if (hr_dev->caps.mtt_buf_pg_sz) { - npages = (ib_umem_page_count(hr_qp->umem) + - (1 << hr_dev->caps.mtt_buf_pg_sz) - 1) / - (1 << hr_dev->caps.mtt_buf_pg_sz); - page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz; - ret = hns_roce_mtt_init(hr_dev, npages, - page_shift, - &hr_qp->mtt); - } else { - ret = hns_roce_mtt_init(hr_dev, - ib_umem_page_count(hr_qp->umem), - hr_qp->umem->page_shift, - &hr_qp->mtt); - } - if (ret) { - dev_err(dev, "hns_roce_mtt_init error for create qp\n"); - goto err_buf; - } + hr_qp->state = IB_QPS_RESET; + hr_qp->next_state = IB_QPS_RESET; - ret = hns_roce_ib_umem_write_mtt(hr_dev, &hr_qp->mtt, - hr_qp->umem); - if (ret) { - dev_err(dev, "hns_roce_ib_umem_write_mtt error for create qp\n"); - goto err_mtt; - } + ret = set_qp_param(hr_dev, hr_qp, init_attr, udata, &ucmd); + if (ret) { + dev_err(dev, "hns_roce_set_rq_size failed(%d).\n", ret); + return ret; + } + if (ib_pd->uobject) { if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) && (udata->inlen >= sizeof(ucmd)) && (udata->outlen >= sizeof(resp)) && @@ -632,8 +860,9 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, to_hr_ucontext(ib_pd->uobject->context), ucmd.sdb_addr, &hr_qp->sdb); if (ret) { - dev_err(dev, "sq record doorbell map failed!\n"); - goto err_mtt; + dev_err(dev, "SQ record doorbell map failed(%d)!\n", + ret); + goto err_out; } /* indicate kernel supports sq record db */ @@ -648,32 +877,16 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, to_hr_ucontext(ib_pd->uobject->context), ucmd.db_addr, &hr_qp->rdb); if (ret) { - dev_err(dev, "rq record doorbell map failed!\n"); + dev_err(dev, "RQ record doorbell map failed(%d)!\n", + ret); goto err_sq_dbmap; } - } - } else { - if (init_attr->create_flags & - IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { - dev_err(dev, "init_attr->create_flags error!\n"); - ret = -EINVAL; - goto err_rq_sge_list; - } - - if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) { - dev_err(dev, "init_attr->create_flags error!\n"); - ret = -EINVAL; - goto err_rq_sge_list; - } - /* Set SQ size */ - ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap, - hr_qp); - if (ret) { - dev_err(dev, "hns_roce_set_kernel_sq_size error!\n"); - goto err_rq_sge_list; + /* indicate kernel supports rq record db */ + resp.cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB; + hr_qp->rdb_en = 1; } - + } else { /* QP doorbell register address */ hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset + DB_REG_OFFSET * hr_dev->priv_uar.index; @@ -684,46 +897,28 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, hns_roce_qp_has_rq(init_attr)) { ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0); if (ret) { - dev_err(dev, "rq record doorbell alloc failed!\n"); - goto err_rq_sge_list; + dev_err(dev, "RQ record doorbell alloc failed(%d)!\n", + ret); + goto err_out; } *hr_qp->rdb.db_record = 0; hr_qp->rdb_en = 1; } - /* Allocate QP buf */ - page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz; - if (hns_roce_buf_alloc(hr_dev, hr_qp->buff_size, - (1 << page_shift) * 2, - &hr_qp->hr_buf, page_shift)) { - dev_err(dev, "hns_roce_buf_alloc error!\n"); + hr_qp->sq.wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), + GFP_KERNEL); + if (ZERO_OR_NULL_PTR(hr_qp->sq.wrid)) { ret = -ENOMEM; goto err_db; } - hr_qp->mtt.mtt_type = MTT_TYPE_WQE; - /* Write MTT */ - ret = hns_roce_mtt_init(hr_dev, hr_qp->hr_buf.npages, - hr_qp->hr_buf.page_shift, &hr_qp->mtt); - if (ret) { - dev_err(dev, "hns_roce_mtt_init error for kernel create qp\n"); - goto err_buf; - } - - ret = hns_roce_buf_write_mtt(hr_dev, &hr_qp->mtt, - &hr_qp->hr_buf); - if (ret) { - dev_err(dev, "hns_roce_buf_write_mtt error for kernel create qp\n"); - goto err_mtt; - } - - hr_qp->sq.wrid = kmalloc_array(hr_qp->sq.wqe_cnt, sizeof(u64), - GFP_KERNEL); - hr_qp->rq.wrid = kmalloc_array(hr_qp->rq.wqe_cnt, sizeof(u64), - GFP_KERNEL); - if (!hr_qp->sq.wrid || !hr_qp->rq.wrid) { - ret = -ENOMEM; - goto err_wrid; + if (hr_qp->rq.wqe_cnt) { + hr_qp->rq.wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), + GFP_KERNEL); + if (ZERO_OR_NULL_PTR(hr_qp->rq.wrid)) { + ret = -ENOMEM; + goto err_sq_wrid; + } } } @@ -734,22 +929,29 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ret = hns_roce_reserve_range_qp(hr_dev, 1, 1, &qpn); if (ret) { dev_err(dev, "hns_roce_reserve_range_qp alloc qpn error\n"); - goto err_wrid; + goto err_buf; } } + ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, ib_pd->uobject, + ucmd.buf_addr); + if (ret) { + dev_err(hr_dev->dev, "Failed to alloc QP buffer\n"); + goto err_db; + } + if (init_attr->qp_type == IB_QPT_GSI && hr_dev->hw_rev == HNS_ROCE_HW_VER1) { /* In v1 engine, GSI QP context in RoCE engine's register */ ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp); if (ret) { - dev_err(dev, "hns_roce_qp_alloc failed!\n"); + dev_err(dev, "Alloc GSI QP failed(%d)!\n", ret); goto err_qpn; } } else { ret = hns_roce_qp_alloc(hr_dev, qpn, hr_qp); if (ret) { - dev_err(dev, "hns_roce_qp_alloc failed!\n"); + dev_err(dev, "Failed to alloc qp!\n"); goto err_qpn; } } @@ -757,20 +959,26 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, if (sqpn) hr_qp->doorbell_qpn = 1; else - hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn); + hr_qp->doorbell_qpn = (u32)(hr_qp->qpn); - if (ib_pd->uobject && (udata->outlen >= sizeof(resp)) && - (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) { - - /* indicate kernel supports rq record db */ - resp.cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB; - ret = ib_copy_to_udata(udata, &resp, sizeof(resp)); + if (ib_pd->uobject) { + ret = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp))); if (ret) goto err_qp; + } - hr_qp->rdb_en = 1; + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) { + ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp); + if (ret) { + dev_err(hr_dev->dev, "QP flow control init failure(%d)!", + ret); + goto err_qp; + } } + hr_qp->event = hns_roce_ib_qp_event; + hns_roce_add_cq_to_qp(hr_dev, hr_qp, init_attr->send_cq, + init_attr->recv_cq); return 0; @@ -785,7 +993,9 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, if (!sqpn) hns_roce_release_range_qp(hr_dev, qpn, 1); -err_wrid: +err_buf: + free_qp_buf(hr_qp, ib_pd); + if (ib_pd->uobject) { if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && (udata->outlen >= sizeof(resp)) && @@ -794,8 +1004,8 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, to_hr_ucontext(ib_pd->uobject->context), &hr_qp->rdb); } else { - kfree(hr_qp->sq.wrid); - kfree(hr_qp->rq.wrid); + if (hr_qp->rq.wqe_cnt) + kfree(hr_qp->rq.wrid); } err_sq_dbmap: @@ -807,44 +1017,72 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, hns_roce_db_unmap_user( to_hr_ucontext(ib_pd->uobject->context), &hr_qp->sdb); - -err_mtt: - hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt); - -err_buf: - if (ib_pd->uobject) - ib_umem_release(hr_qp->umem); - else - hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf); +err_sq_wrid: + if (!ib_pd->uobject) + kfree(hr_qp->sq.wrid); err_db: if (!ib_pd->uobject && hns_roce_qp_has_rq(init_attr) && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) hns_roce_free_db(hr_dev, &hr_qp->rdb); -err_rq_sge_list: - if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) - kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list); - -err_wqe_list: - if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) - kfree(hr_qp->rq_inl_buf.wqe_list); - err_out: return ret; } +void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) +{ + hns_roce_qp_free(hr_dev, hr_qp); + + /* Not special_QP, free their QPN */ + if (hr_qp->ibqp.qp_type != IB_QPT_GSI) + hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1); + + free_qp_buf(hr_qp, hr_qp->ibqp.pd); + + if (hr_qp->ibqp.pd->uobject) { + struct hns_roce_ucontext *context = + to_hr_ucontext(hr_qp->ibqp.pd->uobject->context); + + if (hr_qp->sq.wqe_cnt && (hr_qp->sdb_en == 1)) + hns_roce_db_unmap_user(context, &hr_qp->sdb); + + if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1)) + hns_roce_db_unmap_user(context, &hr_qp->rdb); + } else { + kfree(hr_qp->sq.wrid); + kfree(hr_qp->rq.wrid); + if (hr_qp->rq.wqe_cnt) + hns_roce_free_db(hr_dev, &hr_qp->rdb); + } + + kfree(hr_qp); +} +EXPORT_SYMBOL_GPL(hns_roce_qp_destroy); + struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata) { - struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); + struct hns_roce_dev *hr_dev = pd ? to_hr_dev(pd->device) : + to_hr_dev(init_attr->xrcd->device); struct device *dev = hr_dev->dev; - struct hns_roce_sqp *hr_sqp; struct hns_roce_qp *hr_qp; + u16 xrcdn = 0; int ret; switch (init_attr->qp_type) { + case IB_QPT_XRC_TGT: + if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)) + return ERR_PTR(-EINVAL); + pd = to_hr_xrcd(init_attr->xrcd)->pd; + xrcdn = to_hr_xrcd(init_attr->xrcd)->xrcdn; + init_attr->send_cq = to_hr_xrcd(init_attr->xrcd)->cq; + case IB_QPT_XRC_INI: + if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)) + return ERR_PTR(-EINVAL); + init_attr->recv_cq = init_attr->send_cq; + case IB_QPT_UC: case IB_QPT_RC: { hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL); if (!hr_qp) @@ -853,13 +1091,14 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0, hr_qp); if (ret) { - dev_err(dev, "Create RC QP failed\n"); + dev_err(dev, "Create RC QP 0x%06lx failed(%d)\n", + hr_qp->qpn, ret); kfree(hr_qp); return ERR_PTR(ret); } hr_qp->ibqp.qp_num = hr_qp->qpn; - + hr_qp->xrcdn = xrcdn; break; } case IB_QPT_GSI: { @@ -869,16 +1108,15 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, return ERR_PTR(-EINVAL); } - hr_sqp = kzalloc(sizeof(*hr_sqp), GFP_KERNEL); - if (!hr_sqp) + hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL); + if (!hr_qp) return ERR_PTR(-ENOMEM); - hr_qp = &hr_sqp->hr_qp; hr_qp->port = init_attr->port_num - 1; hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; /* when hw version is v1, the sqpn is allocated */ - if (hr_dev->caps.max_sq_sg <= 2) + if (hr_dev->caps.max_sq_sg <= HNS_ROCE_MAX_SGE_NUM) hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS + hr_dev->iboe.phy_port[hr_qp->port]; else @@ -887,8 +1125,8 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, hr_qp->ibqp.qp_num, hr_qp); if (ret) { - dev_err(dev, "Create GSI QP failed!\n"); - kfree(hr_sqp); + dev_err(dev, "Create GSI QP failed(%d)!\n", ret); + kfree(hr_qp); return ERR_PTR(ret); } @@ -900,6 +1138,10 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, } } + rdfx_func_cnt(hr_dev, RDFX_FUNC_CREATE_QP); + rdfx_alloc_qp_buf(hr_dev, hr_qp); + + hns_roce_inc_rdma_hw_stats(pd->device, HW_STATS_QP_ALLOC); return &hr_qp->ibqp; } EXPORT_SYMBOL_GPL(hns_roce_create_qp); @@ -916,6 +1158,8 @@ int to_hr_qp_type(int qp_type) transport_type = SERV_TYPE_UD; else if (qp_type == IB_QPT_GSI) transport_type = SERV_TYPE_UD; + else if (qp_type == IB_QPT_XRC_INI || qp_type == IB_QPT_XRC_TGT) + transport_type = SERV_TYPE_XRC; else transport_type = -1; @@ -923,94 +1167,133 @@ int to_hr_qp_type(int qp_type) } EXPORT_SYMBOL_GPL(to_hr_qp_type); -int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, - int attr_mask, struct ib_udata *udata) +static int check_mtu_validate(struct hns_roce_dev *hr_dev, + struct hns_roce_qp *hr_qp, + struct ib_qp_attr *attr, int attr_mask) { - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); - struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); - enum ib_qp_state cur_state, new_state; struct device *dev = hr_dev->dev; - int ret = -EINVAL; - int p; enum ib_mtu active_mtu; + int p; - mutex_lock(&hr_qp->mutex); - - cur_state = attr_mask & IB_QP_CUR_STATE ? - attr->cur_qp_state : (enum ib_qp_state)hr_qp->state; - new_state = attr_mask & IB_QP_STATE ? - attr->qp_state : cur_state; + p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; + active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu); - if (ibqp->uobject && - (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) { - if (hr_qp->sdb_en == 1) { - hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr); - hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr); - } else { - dev_warn(dev, "flush cqe is not supported in userspace!\n"); - goto out; - } + if ((hr_dev->caps.max_mtu >= IB_MTU_2048 && + attr->path_mtu > hr_dev->caps.max_mtu) || + attr->path_mtu < IB_MTU_256 || attr->path_mtu > active_mtu) { + dev_err(dev, "attr path_mtu(%d)invalid while modify qp(0x%lx)", + attr->path_mtu, hr_qp->qpn); + return -EINVAL; } - if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask, - IB_LINK_LAYER_ETHERNET)) { - dev_err(dev, "ib_modify_qp_is_ok failed\n"); - goto out; - } + return 0; +} + +static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); + struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); + struct device *dev = hr_dev->dev; + int ret = 0; + int p; if ((attr_mask & IB_QP_PORT) && (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) { dev_err(dev, "attr port_num invalid.attr->port_num=%d\n", attr->port_num); - goto out; + return -EINVAL; } if (attr_mask & IB_QP_PKEY_INDEX) { p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) { - dev_err(dev, "attr pkey_index invalid.attr->pkey_index=%d\n", - attr->pkey_index); - goto out; + dev_err(dev, + "Attr pkey_index(%d) invalid.Max index is %d.\n", + attr->pkey_index, + hr_dev->caps.pkey_table_len[p]); + return -EINVAL; } } if (attr_mask & IB_QP_PATH_MTU) { - p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; - active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu); - - if ((hr_dev->caps.max_mtu == IB_MTU_4096 && - attr->path_mtu > IB_MTU_4096) || - (hr_dev->caps.max_mtu == IB_MTU_2048 && - attr->path_mtu > IB_MTU_2048) || - attr->path_mtu < IB_MTU_256 || - attr->path_mtu > active_mtu) { - dev_err(dev, "attr path_mtu(%d)invalid while modify qp", - attr->path_mtu); - goto out; - } + ret = check_mtu_validate(hr_dev, hr_qp, attr, attr_mask); + if (ret) + return ret; } if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) { - dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n", - attr->max_rd_atomic); - goto out; + dev_err(dev, "Attr max_rd_atomic(%d) invalid, max is %d.\n", + attr->max_rd_atomic, hr_dev->caps.max_qp_init_rdma); + return -EINVAL; } if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) { - dev_err(dev, "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n", - attr->max_dest_rd_atomic); + dev_err(dev, + "Attr max_dest_rd_atomic(%d) invalid, max is %d.\n", + attr->max_dest_rd_atomic, + hr_dev->caps.max_qp_dest_rdma); + return -EINVAL; + } + + return ret; +} + +int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); + struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); + enum ib_qp_state cur_state, new_state; + struct device *dev = hr_dev->dev; + int ret = 0; + + rdfx_func_cnt(hr_dev, RDFX_FUNC_MODIFY_QP); + + mutex_lock(&hr_qp->mutex); + + if (attr_mask & IB_QP_CUR_STATE && attr->cur_qp_state != hr_qp->state) + goto out; + + cur_state = hr_qp->state; + new_state = attr_mask & IB_QP_STATE ? + attr->qp_state : cur_state; + + hr_qp->next_state = new_state; + hr_qp->attr_mask = attr_mask; + + if (ibqp->pd->uobject && + (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) { + if (hr_qp->sdb_en == 1) { + hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr); + + if (hr_qp->rdb_en == 1) + hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr); + } else { + dev_warn(dev, "flush cqe is not supported in userspace!\n"); + ret = -EINVAL; + goto out; + } + } + + if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask, + IB_LINK_LAYER_ETHERNET)) { + dev_err(dev, "ib_modify_qp_is_ok failed. type: %d, cur_state: %d, new_state: %d, mask: 0x%x.\n", + ibqp->qp_type, cur_state, new_state, attr_mask); + ret = -EINVAL; goto out; } + ret = hns_roce_check_qp_attr(ibqp, attr, attr_mask); + if (ret) + goto out; + if (cur_state == new_state && cur_state == IB_QPS_RESET) { - if (hr_dev->caps.min_wqes) { + if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) { ret = -EPERM; - dev_err(dev, "cur_state=%d new_state=%d\n", cur_state, - new_state); - } else { - ret = 0; + dev_err(dev, "RST2RST state is not supported\n"); } goto out; @@ -1018,6 +1301,9 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state, new_state); + if (ret) + dev_err(dev, "Modify QP(%08x) failed(%d).\n", ibqp->qp_num, + ret); out: mutex_unlock(&hr_qp->mutex); @@ -1028,7 +1314,16 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq) __acquires(&send_cq->lock) __acquires(&recv_cq->lock) { - if (send_cq == recv_cq) { + if (unlikely(!send_cq && !recv_cq)) { + __acquire(&send_cq->lock); + __acquire(&recv_cq->lock); + } else if (unlikely(send_cq && !recv_cq)) { + spin_lock_irq(&send_cq->lock); + __acquire(&recv_cq->lock); + } else if (unlikely(!send_cq && recv_cq)) { + spin_lock_irq(&recv_cq->lock); + __acquire(&send_cq->lock); + } else if (send_cq == recv_cq) { spin_lock_irq(&send_cq->lock); __acquire(&recv_cq->lock); } else if (send_cq->cqn < recv_cq->cqn) { @@ -1045,7 +1340,16 @@ void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq) __releases(&send_cq->lock) __releases(&recv_cq->lock) { - if (send_cq == recv_cq) { + if (unlikely(!send_cq && !recv_cq)) { + __release(&recv_cq->lock); + __release(&send_cq->lock); + } else if (unlikely(send_cq && !recv_cq)) { + spin_unlock(&send_cq->lock); + __release(&recv_cq->lock); + } else if (unlikely(!send_cq && recv_cq)) { + spin_unlock(&recv_cq->lock); + __release(&send_cq->lock); + } else if (send_cq == recv_cq) { __release(&recv_cq->lock); spin_unlock_irq(&send_cq->lock); } else if (send_cq->cqn < recv_cq->cqn) { @@ -1060,8 +1364,7 @@ EXPORT_SYMBOL_GPL(hns_roce_unlock_cqs); static void *get_wqe(struct hns_roce_qp *hr_qp, int offset) { - - return hns_roce_buf_offset(&hr_qp->hr_buf, offset); + return hns_roce_buf_offset(hr_qp->mtr.kmem, offset); } void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n) @@ -1078,8 +1381,7 @@ EXPORT_SYMBOL_GPL(get_send_wqe); void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n) { - return hns_roce_buf_offset(&hr_qp->hr_buf, hr_qp->sge.offset + - (n << hr_qp->sge.sge_shift)); + return get_wqe(hr_qp, hr_qp->sge.offset + (n << hr_qp->sge.sge_shift)); } EXPORT_SYMBOL_GPL(get_send_extend_sge); @@ -1090,7 +1392,7 @@ bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq, u32 cur; cur = hr_wq->head - hr_wq->tail; - if (likely(cur + nreq < hr_wq->max_post)) + if (likely(cur + nreq < hr_wq->wqe_cnt)) return false; hr_cq = to_hr_cq(ib_cq); @@ -1098,7 +1400,7 @@ bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq, cur = hr_wq->head - hr_wq->tail; spin_unlock(&hr_cq->lock); - return cur + nreq >= hr_wq->max_post; + return cur + nreq >= hr_wq->wqe_cnt; } EXPORT_SYMBOL_GPL(hns_roce_wq_overflow); @@ -1106,14 +1408,16 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev) { struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; int reserved_from_top = 0; + int reserved_from_bot; int ret; spin_lock_init(&qp_table->lock); INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC); - /* A port include two SQP, six port total 12 */ + reserved_from_bot = hr_dev->caps.reserved_qps; + ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps, - hr_dev->caps.num_qps - 1, SQP_NUM, + hr_dev->caps.num_qps - 1, reserved_from_bot, reserved_from_top); if (ret) { dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n", diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c new file mode 100644 index 0000000000000000000000000000000000000000..4bfab0a278619da4fdffb8f917ccfa915b833864 --- /dev/null +++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c @@ -0,0 +1,655 @@ +// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +// Copyright (c) 2018 Hisilicon Limited. + +#include +#include +#include +#include "hnae3.h" +#include "hns_roce_common.h" +#include "hns_roce_device.h" +#include "hns_roce_hw_v2.h" + +static int hns_roce_fill_cq_dev_count(struct hns_roce_cq *hr_cq, + struct sk_buff *msg, + struct hns_roce_v2_cq_context *context) +{ + if (rdma_nl_put_driver_u32(msg, "send_cqe", + hr_cq->dfx_cnt[HNS_ROCE_SQ_CQE])) + goto err; + + if (rdma_nl_put_driver_u32(msg, "recv_cqe", + hr_cq->dfx_cnt[HNS_ROCE_RQ_CQE])) + goto err; + + if (rdma_nl_put_driver_u32(msg, "arm", hr_cq->arm_sn)) + goto err; + + return 0; + +err: + return -EMSGSIZE; +} + +static int hns_roce_fill_cq(struct hns_roce_cq *hr_cq, + struct sk_buff *msg, + struct hns_roce_v2_cq_context *context) +{ + if (rdma_nl_put_driver_u32(msg, "state", + roce_get_field(context->byte_4_pg_ceqn, + V2_CQC_BYTE_4_ARM_ST_M, + V2_CQC_BYTE_4_ARM_ST_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "ceqn", + roce_get_field(context->byte_4_pg_ceqn, + V2_CQC_BYTE_4_CEQN_M, + V2_CQC_BYTE_4_CEQN_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "poll", + roce_get_bit(context->byte_4_pg_ceqn, + V2_CQC_BYTE_4_POLL_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "shift", + roce_get_field(context->byte_4_pg_ceqn, + V2_CQC_BYTE_4_SHIFT_M, + V2_CQC_BYTE_4_SHIFT_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "cmd_sn", + roce_get_field(context->byte_4_pg_ceqn, + V2_CQC_BYTE_4_CMD_SN_M, + V2_CQC_BYTE_4_CMD_SN_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "cqn", + roce_get_field(context->byte_8_cqn, + V2_CQC_BYTE_8_CQN_M, + V2_CQC_BYTE_8_CQN_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "hopnum", + roce_get_field(context->byte_16_hop_addr, + V2_CQC_BYTE_16_CQE_HOP_NUM_M, + V2_CQC_BYTE_16_CQE_HOP_NUM_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "pi", + roce_get_field(context->byte_28_cq_pi, + V2_CQC_BYTE_28_CQ_PRODUCER_IDX_M, + V2_CQC_BYTE_28_CQ_PRODUCER_IDX_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "ci", + roce_get_field(context->byte_32_cq_ci, + V2_CQC_BYTE_32_CQ_CONSUMER_IDX_M, + V2_CQC_BYTE_32_CQ_CONSUMER_IDX_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "rdb_en", + roce_get_field(context->byte_44_db_record, + V2_CQC_BYTE_44_DB_RECORD_ADDR_M, + V2_CQC_BYTE_44_DB_RECORD_ADDR_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "coalesce", + roce_get_field( + context->byte_56_cqe_period_maxcnt, + V2_CQC_BYTE_56_CQ_MAX_CNT_M, + V2_CQC_BYTE_56_CQ_MAX_CNT_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "period", + roce_get_field( + context->byte_56_cqe_period_maxcnt, + V2_CQC_BYTE_56_CQ_PERIOD_M, + V2_CQC_BYTE_56_CQ_PERIOD_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "cnt", + roce_get_field(context->byte_52_cqe_cnt, + V2_CQC_BYTE_52_CQE_CNT_M, + V2_CQC_BYTE_52_CQE_CNT_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "se_idx", + roce_get_field(context->byte_64_se_cqe_idx, + V2_CQC_BYTE_64_SE_CQE_IDX_M, + V2_CQC_BYTE_64_SE_CQE_IDX_S))) + goto err; + + return hns_roce_fill_cq_dev_count(hr_cq, msg, context); + +err: + return -EMSGSIZE; +} + +static int hns_roce_fill_res_cq_entry(struct sk_buff *msg, + struct rdma_restrack_entry *res) +{ + struct ib_cq *ib_cq = container_of(res, struct ib_cq, res); + struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device); + struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); + struct hns_roce_v2_cq_context context; + struct nlattr *table_attr; + int ret; + + if (!hr_dev->dfx->query_cqc_info) + return -EINVAL; + + ret = hr_dev->dfx->query_cqc_info(hr_dev, hr_cq->cqn, (int *)&context); + if (ret) + goto err; + + table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); + if (!table_attr) + goto err; + + if (hns_roce_fill_cq(hr_cq, msg, &context)) + goto err_cancel_table; + + nla_nest_end(msg, table_attr); + + return 0; + +err_cancel_table: + nla_nest_cancel(msg, table_attr); +err: + return -EMSGSIZE; +} + +static int hns_roce_qp_fill_rq(struct hns_roce_qp *hr_qp, struct sk_buff *msg, + struct hns_roce_v2_qp_context *context) +{ + if (rdma_nl_put_driver_u32(msg, "rq_pi", + roce_get_field(context->byte_84_rq_ci_pi, + V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M, + V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "rq_ci", + roce_get_field(context->byte_84_rq_ci_pi, + V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M, + V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "rq_shift", + roce_get_field( + context->byte_20_smac_sgid_idx, + V2_QPC_BYTE_20_RQ_SHIFT_M, + V2_QPC_BYTE_20_RQ_SHIFT_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "rq_cqeidx", + roce_get_field( + context->byte_256_sqflush_rqcqe, + V2_QPC_BYTE_256_RQ_CQE_IDX_M, + V2_QPC_BYTE_256_RQ_CQE_IDX_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "rq_rx_err", + roce_get_bit(context->byte_56_dqpn_err, + V2_QPC_BYTE_56_RQ_RX_ERR_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "rq_tx_err", + roce_get_bit(context->byte_56_dqpn_err, + V2_QPC_BYTE_56_RQ_TX_ERR_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "rq_rty_tx_err", + roce_get_bit(context->byte_76_srqn_op_en, + V2_QPC_BYTE_76_RQ_RTY_TX_ERR_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "rq_db_doing", + roce_get_bit(context->byte_60_qpst_tempid, + V2_QPC_BYTE_60_RQ_DB_DOING_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "rx_cqn", + roce_get_field(context->byte_80_rnr_rx_cqn, + V2_QPC_BYTE_80_RX_CQN_M, + V2_QPC_BYTE_80_RX_CQN_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "head_wqe", hr_qp->rq.head)) + goto err; + + if (rdma_nl_put_driver_u32(msg, "tail_wqe", hr_qp->rq.tail)) + goto err; + + return 0; + +err: + return -EMSGSIZE; +} + +static int hns_roce_qp_fill_srq(struct hns_roce_qp *hr_qp, struct sk_buff *msg, + struct hns_roce_v2_qp_context *context) +{ + if (rdma_nl_put_driver_u32(msg, "srqn", + roce_get_field(context->byte_76_srqn_op_en, + V2_QPC_BYTE_76_SRQN_M, + V2_QPC_BYTE_76_SRQN_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "srq_en", + roce_get_bit(context->byte_76_srqn_op_en, + V2_QPC_BYTE_76_SRQ_EN_S))) + goto err; + + return 0; + +err: + return -EMSGSIZE; +} + +static int hns_roce_qp_fill_sq(struct hns_roce_qp *hr_qp, struct sk_buff *msg, + struct hns_roce_v2_qp_context *context) +{ + if (rdma_nl_put_driver_u32(msg, "sq_pi", + roce_get_field(context->byte_160_sq_ci_pi, + V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M, + V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "sq_ci", + roce_get_field(context->byte_160_sq_ci_pi, + V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M, + V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "sq_shift", + roce_get_field( + context->byte_20_smac_sgid_idx, + V2_QPC_BYTE_20_SQ_SHIFT_M, + V2_QPC_BYTE_20_SQ_SHIFT_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "sq_maxidx", + roce_get_field(context->byte_200_sq_max, + V2_QPC_BYTE_200_SQ_MAX_IDX_M, + V2_QPC_BYTE_200_SQ_MAX_IDX_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "sq_rx_err", + roce_get_bit(context->byte_56_dqpn_err, + V2_QPC_BYTE_56_SQ_RX_ERR_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "sq_tx_err", + roce_get_bit(context->byte_56_dqpn_err, + V2_QPC_BYTE_56_SQ_TX_ERR_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "sq_db_doing", + roce_get_bit(context->byte_60_qpst_tempid, + V2_QPC_BYTE_60_SQ_DB_DOING_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "sq_tx_err", + roce_get_bit(context->byte_56_dqpn_err, + V2_QPC_BYTE_56_SQ_TX_ERR_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "tx_cqn", + roce_get_field(context->byte_252_err_txcqn, + V2_QPC_BYTE_252_TX_CQN_M, + V2_QPC_BYTE_252_TX_CQN_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "head_wqe", hr_qp->sq.head)) + goto err; + + if (rdma_nl_put_driver_u32(msg, "tail_wqe", hr_qp->sq.tail)) + goto err; + + if (rdma_nl_put_driver_u32(msg, "signal_wqe", + hr_qp->dfx_cnt[HNS_ROCE_QP_DFX_SIGNAL_WQE])) + goto err; + + if (rdma_nl_put_driver_u32(msg, "inline_wqe", + hr_qp->dfx_cnt[HNS_ROCE_QP_DFX_INLINE_WQE])) + goto err; + + return 0; + +err: + return -EMSGSIZE; +} + +static int hns_roce_qp_fill_cnt(struct hns_roce_qp *hr_qp, struct sk_buff *msg, + struct hns_roce_v2_qp_context *context) +{ + if (rdma_nl_put_driver_u32(msg, "post_send", + hr_qp->dfx_cnt[HNS_ROCE_QP_DFX_POST_SEND])) + goto err; + + if (rdma_nl_put_driver_u32(msg, "post_recv", + hr_qp->dfx_cnt[HNS_ROCE_QP_DFX_POST_RECV])) + goto err; + + return 0; + +err: + return -EMSGSIZE; +} + +static int hns_roce_qp_fill_ext(struct hns_roce_qp *hr_qp, struct sk_buff *msg, + struct hns_roce_v2_qp_context *context) +{ + if (rdma_nl_put_driver_u32(msg, "st", + roce_get_field(context->byte_60_qpst_tempid, + V2_QPC_BYTE_60_QP_ST_M, + V2_QPC_BYTE_60_QP_ST_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "pdn", + roce_get_field(context->byte_16_buf_ba_pg_sz, + V2_QPC_BYTE_16_PD_M, + V2_QPC_BYTE_16_PD_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "rre", + roce_get_bit(context->byte_76_srqn_op_en, + V2_QPC_BYTE_76_RRE_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "rwe", + roce_get_bit(context->byte_76_srqn_op_en, + V2_QPC_BYTE_76_RWE_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "ate", + roce_get_bit(context->byte_76_srqn_op_en, + V2_QPC_BYTE_76_ATE_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "max_ird", + roce_get_field(context->byte_208_irrl, + V2_QPC_BYTE_208_SR_MAX_M, + V2_QPC_BYTE_208_SR_MAX_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "max_ord", + roce_get_field(context->byte_140_raq, + V2_QPC_BYTE_140_RR_MAX_M, + V2_QPC_BYTE_140_RR_MAX_S))) + goto err; + + return 0; + +err: + return -EMSGSIZE; +} + +static int hns_roce_fill_qp(struct hns_roce_qp *hr_qp, struct sk_buff *msg, + struct hns_roce_v2_qp_context *context) +{ + if (rdma_nl_put_driver_u32(msg, "smac_idx", + roce_get_field( + context->byte_20_smac_sgid_idx, + V2_QPC_BYTE_20_SMAC_IDX_M, + V2_QPC_BYTE_20_SMAC_IDX_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "vid", + roce_get_field(context->byte_24_mtu_tc, + V2_QPC_BYTE_24_VLAN_ID_M, + V2_QPC_BYTE_24_VLAN_ID_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "mtu", + roce_get_field(context->byte_24_mtu_tc, + V2_QPC_BYTE_24_MTU_M, + V2_QPC_BYTE_24_MTU_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "sgid_idx", + roce_get_field( + context->byte_20_smac_sgid_idx, + V2_QPC_BYTE_20_SGID_IDX_M, + V2_QPC_BYTE_20_SGID_IDX_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "chk_flg", + roce_get_field(context->byte_212_lsn, + V2_QPC_BYTE_212_CHECK_FLG_M, + V2_QPC_BYTE_212_CHECK_FLG_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "retry_cnt", + roce_get_field(context->byte_212_lsn, + V2_QPC_BYTE_212_RETRY_CNT_M, + V2_QPC_BYTE_212_RETRY_CNT_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "err_type", + roce_get_field(context->byte_252_err_txcqn, + V2_QPC_BYTE_252_ERR_TYPE_M, + V2_QPC_BYTE_252_ERR_TYPE_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "flush_idx", + roce_get_field( + context->byte_256_sqflush_rqcqe, + V2_QPC_BYTE_256_SQ_FLUSH_IDX_M, + V2_QPC_BYTE_256_SQ_FLUSH_IDX_S))) + goto err; + + if (hns_roce_qp_fill_ext(hr_qp, msg, context)) + goto err; + + if (hns_roce_qp_fill_rq(hr_qp, msg, context)) + goto err; + + if (hns_roce_qp_fill_srq(hr_qp, msg, context)) + goto err; + + if (hns_roce_qp_fill_sq(hr_qp, msg, context)) + goto err; + + if (hns_roce_qp_fill_cnt(hr_qp, msg, context)) + goto err; + + return 0; + +err: + return -EMSGSIZE; +} + +static int hns_roce_fill_res_qp_entry(struct sk_buff *msg, + struct rdma_restrack_entry *res) +{ + struct ib_qp *ib_qp = container_of(res, struct ib_qp, res); + struct hns_roce_dev *hr_dev = to_hr_dev(ib_qp->device); + struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp); + struct hns_roce_v2_qp_context context; + struct nlattr *table_attr; + int ret; + + if (!hr_dev->dfx->query_qpc_info) + return -EINVAL; + + ret = hr_dev->dfx->query_qpc_info(hr_dev, hr_qp->qpn, (int *)&context); + if (ret) + goto err; + + table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); + if (!table_attr) + goto err; + + if (hns_roce_fill_qp(hr_qp, msg, &context)) + goto err_cancel_table; + + nla_nest_end(msg, table_attr); + + return 0; + +err_cancel_table: + nla_nest_cancel(msg, table_attr); +err: + return -EMSGSIZE; +} + + +static int hns_roce_fill_mr(struct sk_buff *msg, + struct hns_roce_v2_mpt_entry *context) +{ + u64 val_h32; + + if (rdma_nl_put_driver_u32(msg, "status", + roce_get_field(context->byte_4_pd_hop_st, + V2_MPT_BYTE_4_MPT_ST_M, + V2_MPT_BYTE_4_MPT_ST_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "lkey", le32_to_cpu(context->lkey))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "size", le32_to_cpu(context->pbl_size))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "ra", + roce_get_bit(context->byte_8_mw_cnt_en, + V2_MPT_BYTE_8_RA_EN_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "ri", + roce_get_bit(context->byte_8_mw_cnt_en, + V2_MPT_BYTE_8_R_INV_EN_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "li", + roce_get_bit(context->byte_8_mw_cnt_en, + V2_MPT_BYTE_8_L_INV_EN_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "atomic_en", + roce_get_bit(context->byte_8_mw_cnt_en, + V2_MPT_BYTE_8_ATOMIC_EN_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "rr_en", + roce_get_bit(context->byte_8_mw_cnt_en, + V2_MPT_BYTE_8_RR_EN_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "rw_en", + roce_get_bit(context->byte_8_mw_cnt_en, + V2_MPT_BYTE_8_RW_EN_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "lw_en", + roce_get_bit(context->byte_8_mw_cnt_en, + V2_MPT_BYTE_8_LW_EN_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "pbl_buf_pgsz", + roce_get_field(context->byte_64_buf_pa1, + V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M, + V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S))) + goto err; + + val_h32 = le32_to_cpu(context->len_h); + if (rdma_nl_put_driver_u64(msg, "len", + val_h32 << 32 | le32_to_cpu(context->len_l))) + goto err; + + return 0; + +err: + return -EMSGSIZE; +} + +static int hns_roce_fill_res_mr_entry(struct sk_buff *msg, + struct rdma_restrack_entry *res) +{ + struct ib_mr *ib_mr = container_of(res, struct ib_mr, res); + struct hns_roce_dev *hr_dev = to_hr_dev(ib_mr->device); + struct hns_roce_mr *hr_mr = to_hr_mr(ib_mr); + struct hns_roce_v2_mpt_entry context; + int key = hr_mr->key; + struct nlattr *table_attr; + int ret; + + if (!hr_dev->dfx->query_mpt_info) + return -EINVAL; + + ret = hr_dev->dfx->query_mpt_info(hr_dev, key, (int *)&context); + if (ret) + goto err; + + table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); + if (!table_attr) + goto err; + + if (hns_roce_fill_mr(msg, &context)) + goto err_cancel_table; + + nla_nest_end(msg, table_attr); + + return 0; + +err_cancel_table: + nla_nest_cancel(msg, table_attr); +err: + return -EMSGSIZE; +} + +static int hns_roce_fill_pd(struct sk_buff *msg, + struct hns_roce_pd *hr_pd) +{ + if (rdma_nl_put_driver_u32(msg, "pdn", hr_pd->pdn)) + goto err; + + return 0; + +err: + return -EMSGSIZE; +} + +static int hns_roce_fill_res_pd_entry(struct sk_buff *msg, + struct rdma_restrack_entry *res) +{ + struct ib_pd *ib_pd = container_of(res, struct ib_pd, res); + struct hns_roce_pd *hr_pd = to_hr_pd(ib_pd); + struct nlattr *table_attr; + + table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); + if (!table_attr) + goto err; + + if (hns_roce_fill_pd(msg, hr_pd)) + goto err_cancel_table; + + nla_nest_end(msg, table_attr); + + return 0; + +err_cancel_table: + nla_nest_cancel(msg, table_attr); +err: + return -EMSGSIZE; +} + + +int hns_roce_fill_res_entry(struct sk_buff *msg, + struct rdma_restrack_entry *res) +{ + if (res->type == RDMA_RESTRACK_PD) + return hns_roce_fill_res_pd_entry(msg, res); + + if (res->type == RDMA_RESTRACK_CQ) + return hns_roce_fill_res_cq_entry(msg, res); + + if (res->type == RDMA_RESTRACK_QP) + return hns_roce_fill_res_qp_entry(msg, res); + + if (res->type == RDMA_RESTRACK_MR) + return hns_roce_fill_res_mr_entry(msg, res); + + return 0; +} diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c new file mode 100644 index 0000000000000000000000000000000000000000..52a9b23c13e0bc18c9228a6af37c6081f2848bc8 --- /dev/null +++ b/drivers/infiniband/hw/hns/hns_roce_srq.c @@ -0,0 +1,589 @@ +/* + * Copyright (c) 2018 Hisilicon Limited. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "roce_k_compat.h" +#include +#include +#include +#include "hns_roce_device.h" +#include "hns_roce_cmd.h" +#include "hns_roce_hem.h" + +void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type) +{ + struct hns_roce_srq_table *srq_table = &hr_dev->srq_table; + struct hns_roce_srq *srq; + + rcu_read_lock(); + srq = radix_tree_lookup(&srq_table->tree, + srqn & (hr_dev->caps.num_srqs - 1)); + rcu_read_unlock(); + if (srq) { + refcount_inc(&srq->refcount); + } else { + dev_warn(hr_dev->dev, "Async event for bogus SRQ 0x%08x\n", + srqn); + return; + } + + srq->event(srq, event_type); + + if (refcount_dec_and_test(&srq->refcount)) + complete(&srq->free); +} +EXPORT_SYMBOL_GPL(hns_roce_srq_event); + +static void hns_roce_ib_srq_event(struct hns_roce_srq *srq, + enum hns_roce_event event_type) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device); + struct ib_srq *ibsrq = &srq->ibsrq; + struct ib_event event; + + if (ibsrq->event_handler) { + event.device = ibsrq->device; + event.element.srq = ibsrq; + switch (event_type) { + case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: + event.event = IB_EVENT_SRQ_LIMIT_REACHED; + break; + case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR: + event.event = IB_EVENT_SRQ_ERR; + break; + default: + dev_err(hr_dev->dev, + "hns_roce:Unexpected event type 0x%x on SRQ 0x%06lx\n", + event_type, srq->srqn); + return; + } + + ibsrq->event_handler(&event, ibsrq->srq_context); + } +} + +static int hns_roce_hw_create_srq(struct hns_roce_dev *dev, + struct hns_roce_cmd_mailbox *mailbox, + unsigned long srq_num) +{ + return hns_roce_cmd_mbox(dev, mailbox->dma, 0, srq_num, 0, + HNS_ROCE_CMD_CREATE_SRQ, + HNS_ROCE_CMD_TIMEOUT_MSECS); +} + +static int hns_roce_hw_destroy_srq(struct hns_roce_dev *dev, + struct hns_roce_cmd_mailbox *mailbox, + unsigned long srq_num) +{ + return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, srq_num, + mailbox ? 0 : 1, HNS_ROCE_CMD_DESTROY_SRQ, + HNS_ROCE_CMD_TIMEOUT_MSECS); +} + +static int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn, + u16 xrcd, struct hns_roce_mtt *hr_mtt, + u64 db_rec_addr, struct hns_roce_srq *srq) +{ + struct hns_roce_srq_table *srq_table = &hr_dev->srq_table; + struct hns_roce_cmd_mailbox *mailbox; + dma_addr_t dma_handle_wqe; + dma_addr_t dma_handle_idx; + u64 *mtts_wqe; + u64 *mtts_idx; + int ret; + + /* Get the physical address of srq buf */ + mtts_wqe = hns_roce_table_find(hr_dev, + &hr_dev->mr_table.mtt_srqwqe_table, + srq->mtt.first_seg, + &dma_handle_wqe); + if (!mtts_wqe) { + dev_err(hr_dev->dev, "Failed to find mtt for srq buf.\n"); + return -EINVAL; + } + + /* Get physical address of idx que buf */ + mtts_idx = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_idx_table, + srq->idx_que.mtt.first_seg, + &dma_handle_idx); + if (!mtts_idx) { + dev_err(hr_dev->dev, + "Failed to find mtt for srq idx queue buf.\n"); + return -EINVAL; + } + + ret = hns_roce_bitmap_alloc(&srq_table->bitmap, &srq->srqn); + if (ret == -1) { + dev_err(hr_dev->dev, + "Failed to alloc a bit from srq bitmap.\n"); + return -ENOMEM; + } + + ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn); + if (ret) { + dev_err(hr_dev->dev, "Get table failed(%d) for SRQ(0x%lx) alloc.\n", + ret, srq->srqn); + goto err_out; + } + + spin_lock_irq(&srq_table->lock); + ret = radix_tree_insert(&srq_table->tree, srq->srqn, srq); + spin_unlock_irq(&srq_table->lock); + if (ret) + goto err_put; + + mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); + if (IS_ERR(mailbox)) { + ret = PTR_ERR(mailbox); + goto err_radix; + } + + hr_dev->hw->write_srqc(hr_dev, srq, pdn, xrcd, cqn, mailbox->buf, + mtts_wqe, mtts_idx, dma_handle_wqe, + dma_handle_idx); + + ret = hns_roce_hw_create_srq(hr_dev, mailbox, srq->srqn); + hns_roce_free_cmd_mailbox(hr_dev, mailbox); + if (ret) { + dev_err(hr_dev->dev, "CREATE_SRQ(0x%lx) failed(%d).\n", + srq->srqn, ret); + goto err_radix; + } + + refcount_set(&srq->refcount, 1); + init_completion(&srq->free); + return ret; + +err_radix: + spin_lock_irq(&srq_table->lock); + radix_tree_delete(&srq_table->tree, srq->srqn); + spin_unlock_irq(&srq_table->lock); + +err_put: + hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn); + +err_out: + hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR); + return ret; +} + +static void hns_roce_srq_free(struct hns_roce_dev *hr_dev, + struct hns_roce_srq *srq) +{ + struct hns_roce_srq_table *srq_table = &hr_dev->srq_table; + int ret; + + ret = hns_roce_hw_destroy_srq(hr_dev, NULL, srq->srqn); + if (ret) + dev_err(hr_dev->dev, "DESTROY_SRQ failed (%d) for SRQN 0x%06lx.\n", + ret, srq->srqn); + + spin_lock_irq(&srq_table->lock); + radix_tree_delete(&srq_table->tree, srq->srqn); + spin_unlock_irq(&srq_table->lock); + + if (refcount_dec_and_test(&srq->refcount)) + complete(&srq->free); + wait_for_completion(&srq->free); + + hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn); + hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR); +} + +static int create_user_srq(struct ib_pd *pd, struct hns_roce_srq *srq, + struct ib_udata *udata, int srq_buf_size) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); + struct hns_roce_ib_create_srq ucmd; + struct hns_roce_buf *buf; + int ret; + + if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) + return -EFAULT; + + srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, + srq_buf_size, 0, 0); + if (IS_ERR(srq->umem)) + return PTR_ERR(srq->umem); + + if (hr_dev->caps.srqwqe_buf_pg_sz) { + buf = srq->buf; + buf->npages = (ib_umem_page_count(srq->umem) + + (1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) / + (1 << hr_dev->caps.srqwqe_buf_pg_sz); + buf->page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz; + ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift, + &srq->mtt); + } else + ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(srq->umem), + srq->umem->page_shift, &srq->mtt); + if (ret) { + dev_err(hr_dev->dev, "Mtt init error(%d) when create srq.\n", + ret); + goto err_user_buf; + } + + ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->mtt, srq->umem); + if (ret) + goto err_user_srq_mtt; + + /* config index queue BA */ + srq->idx_que.umem = ib_umem_get(pd->uobject->context, ucmd.que_addr, + srq->idx_que.buf_size, 0, 0); + if (IS_ERR(srq->idx_que.umem)) { + dev_err(hr_dev->dev, "umem get error for idx que\n"); + goto err_user_srq_mtt; + } + + if (hr_dev->caps.idx_buf_pg_sz) { + buf = srq->idx_que.idx_buf; + buf->npages = DIV_ROUND_UP(ib_umem_page_count(srq->idx_que.umem), + 1 << hr_dev->caps.idx_buf_pg_sz); + buf->page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz; + ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift, + &srq->idx_que.mtt); + } else { + ret = hns_roce_mtt_init(hr_dev, + ib_umem_page_count(srq->idx_que.umem), + srq->idx_que.umem->page_shift, + &srq->idx_que.mtt); + } + + if (ret) { + dev_err(hr_dev->dev, "User mtt init error for idx que\n"); + goto err_user_idx_mtt; + } + + ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->idx_que.mtt, + srq->idx_que.umem); + if (ret) { + dev_err(hr_dev->dev, + "Write mtt error(%d) for idx que\n", ret); + goto err_user_idx_buf; + } + + return 0; + +err_user_idx_buf: + hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt); + +err_user_idx_mtt: + ib_umem_release(srq->idx_que.umem); + +err_user_srq_mtt: + hns_roce_mtt_cleanup(hr_dev, &srq->mtt); + +err_user_buf: + ib_umem_release(srq->umem); + + return ret; +} + +static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq, + u32 page_shift) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); + struct hns_roce_idx_que *idx_que = &srq->idx_que; + struct hns_roce_buf *kbuf; + + idx_que->entry_sz = HNS_ROCE_IDX_QUE_ENTRY_SZ; + + idx_que->bitmap = bitmap_zalloc(srq->max, GFP_KERNEL); + if (!idx_que->bitmap) + return -ENOMEM; + + idx_que->buf_size = srq->max * idx_que->entry_sz; + + kbuf = hns_roce_buf_alloc(hr_dev, idx_que->buf_size, page_shift, 0); + if (IS_ERR(kbuf)) { + bitmap_free(idx_que->bitmap); + return -ENOMEM; + } + + idx_que->idx_buf = kbuf; + + idx_que->head = 0; + idx_que->tail = 0; + + return 0; +} + +static int create_kernel_srq(struct ib_pd *pd, struct hns_roce_srq *srq, + int srq_buf_size) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); + u32 page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz; + struct hns_roce_buf *kbuf; + int ret; + + kbuf = hns_roce_buf_alloc(hr_dev, srq_buf_size, page_shift, 0); + if (IS_ERR(kbuf)) + return -ENOMEM; + + srq->buf = kbuf; + srq->wqe_ctr = 0; + + ret = hns_roce_mtt_init(hr_dev, kbuf->npages, kbuf->page_shift, + &srq->mtt); + if (ret) { + dev_err(hr_dev->dev, "Mtt init error(%d) when create srq.\n", + ret); + goto err_kernel_buf; + } + + ret = hns_roce_buf_write_mtt(hr_dev, &srq->mtt, srq->buf); + if (ret) + goto err_kernel_srq_mtt; + + page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz; + ret = hns_roce_create_idx_que(pd, srq, page_shift); + if (ret) { + dev_err(hr_dev->dev, "Create idx queue fail(%d)!\n", ret); + goto err_kernel_srq_mtt; + } + + /* Init mtt table for idx_que */ + ret = hns_roce_mtt_init(hr_dev, srq->idx_que.idx_buf->npages, + srq->idx_que.idx_buf->page_shift, + &srq->idx_que.mtt); + if (ret) { + dev_err(hr_dev->dev, "Kernel mtt init error(%d) for idx que.\n", + ret); + goto err_kernel_create_idx; + } + /* Write buffer address into the mtt table */ + ret = hns_roce_buf_write_mtt(hr_dev, &srq->idx_que.mtt, + srq->idx_que.idx_buf); + if (ret) { + dev_err(hr_dev->dev, "Write mtt error(%d) for idx que.\n", ret); + goto err_kernel_idx_buf; + } + srq->wrid = kcalloc(srq->max, sizeof(u64), GFP_KERNEL); + if (!srq->wrid) { + ret = -ENOMEM; + goto err_kernel_idx_buf; + } + + return 0; + +err_kernel_idx_buf: + hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt); + +err_kernel_create_idx: + hns_roce_buf_free(hr_dev, srq->idx_que.idx_buf); + bitmap_free(srq->idx_que.bitmap); + +err_kernel_srq_mtt: + hns_roce_mtt_cleanup(hr_dev, &srq->mtt); + +err_kernel_buf: + hns_roce_buf_free(hr_dev, srq->buf); + + return ret; +} + +static void destroy_user_srq(struct hns_roce_dev *hr_dev, + struct hns_roce_srq *srq) +{ + hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt); + ib_umem_release(srq->idx_que.umem); + hns_roce_mtt_cleanup(hr_dev, &srq->mtt); + ib_umem_release(srq->umem); +} + +static void destroy_kernel_srq(struct hns_roce_dev *hr_dev, + struct hns_roce_srq *srq, int srq_buf_size) +{ + kfree(srq->wrid); + hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt); + hns_roce_buf_free(hr_dev, srq->idx_que.idx_buf); + kfree(srq->idx_que.bitmap); + hns_roce_mtt_cleanup(hr_dev, &srq->mtt); + hns_roce_buf_free(hr_dev, srq->buf); +} + +static u32 proc_srq_sge(struct hns_roce_dev *dev, struct hns_roce_srq *hr_srq, + bool user) +{ + u32 max_sge = dev->caps.max_srq_sges; + + if (dev->pci_dev->revision > PCI_REVISION_ID_HIP08_B) + return max_sge; + /* Reserve SGEs only for HIP08 in kernel; The userspace driver will + * calculate number of max_sge with reserved SGEs when allocating wqe + * buf, so there is no need to do this again in kernel. But the number + * may exceed the capacity of SGEs recorded in the firmware, so the + * kernel driver should just adapt the value accordingly. + */ + if (user) + max_sge = roundup_pow_of_two(max_sge + 1); + else + hr_srq->rsv_sge = 1; + + return max_sge; +} + + +struct ib_srq *hns_roce_create_srq(struct ib_pd *pd, + struct ib_srq_init_attr *srq_init_attr, + struct ib_udata *udata) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); + struct hns_roce_srq *srq; + int srq_desc_size; + int srq_buf_size; + u32 max_sge; + int ret; + u32 cqn; + + srq = kzalloc(sizeof(*srq), GFP_KERNEL); + if (!srq) + return ERR_PTR(-ENOMEM); + + max_sge = proc_srq_sge(hr_dev, srq, !!udata); + + if (srq_init_attr->attr.max_wr >= hr_dev->caps.max_srq_wrs || + srq_init_attr->attr.max_sge > max_sge) + return ERR_PTR(-EINVAL); + + mutex_init(&srq->mutex); + spin_lock_init(&srq->lock); + + srq->max = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1); + srq->max_gs = + roundup_pow_of_two(srq_init_attr->attr.max_sge + srq->rsv_sge); + + srq_desc_size = max(HNS_ROCE_SGE_SIZE, HNS_ROCE_SGE_SIZE * srq->max_gs); + srq_desc_size = roundup_pow_of_two(srq_desc_size); + + srq->wqe_shift = ilog2(srq_desc_size); + + srq_buf_size = srq->max * srq_desc_size; + + srq->idx_que.entry_sz = HNS_ROCE_IDX_QUE_ENTRY_SZ; + srq->idx_que.buf_size = srq->max * srq->idx_que.entry_sz; + srq->mtt.mtt_type = MTT_TYPE_SRQWQE; + srq->idx_que.mtt.mtt_type = MTT_TYPE_IDX; + + if (pd->uobject) { + ret = create_user_srq(pd, srq, udata, srq_buf_size); + if (ret) { + dev_err(hr_dev->dev, "Create user srq fail\n"); + goto err_srq; + } + } else { + ret = create_kernel_srq(pd, srq, srq_buf_size); + if (ret) { + dev_err(hr_dev->dev, "Create kernel srq fail\n"); + goto err_srq; + } + } + + cqn = ib_srq_has_cq(srq_init_attr->srq_type) ? + to_hr_cq(srq_init_attr->ext.cq)->cqn : 0; + + srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG; + + ret = hns_roce_srq_alloc(hr_dev, to_hr_pd(pd)->pdn, cqn, 0, &srq->mtt, + 0, srq); + if (ret) { + dev_err(hr_dev->dev, + "Alloc srq failed(%d), cqn is 0x%x, pdn is 0x%lx.\n", + ret, cqn, to_hr_pd(pd)->pdn); + goto err_wrid; + } + + srq->event = hns_roce_ib_srq_event; + srq->ibsrq.ext.xrc.srq_num = srq->srqn; + srq_init_attr->attr.max_wr = srq->max; + srq_init_attr->attr.max_sge = srq->max_gs - srq->rsv_sge; + srq_init_attr->attr.srq_limit = 0; + + if (pd->uobject) { + if (ib_copy_to_udata(udata, &srq->srqn, sizeof(__u32))) { + ret = -EFAULT; + goto err_srqc_alloc; + } + } + + return &srq->ibsrq; + +err_srqc_alloc: + hns_roce_srq_free(hr_dev, srq); + +err_wrid: + if (pd->uobject) + destroy_user_srq(hr_dev, srq); + else + destroy_kernel_srq(hr_dev, srq, srq_buf_size); + +err_srq: + kfree(srq); + return ERR_PTR(ret); +} + +int hns_roce_destroy_srq(struct ib_srq *ibsrq) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device); + struct hns_roce_srq *srq = to_hr_srq(ibsrq); + + hns_roce_srq_free(hr_dev, srq); + hns_roce_mtt_cleanup(hr_dev, &srq->mtt); + + if (ibsrq->uobject) { + hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt); + ib_umem_release(srq->idx_que.umem); + ib_umem_release(srq->umem); + } else { + kfree(srq->wrid); + hns_roce_buf_free(hr_dev, srq->buf); + } + + kfree(srq); + + return 0; +} + +int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev) +{ + struct hns_roce_srq_table *srq_table = &hr_dev->srq_table; + + spin_lock_init(&srq_table->lock); + INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC); + + return hns_roce_bitmap_init(&srq_table->bitmap, hr_dev->caps.num_srqs, + hr_dev->caps.num_srqs - 1, + hr_dev->caps.reserved_srqs, 0); +} + +void hns_roce_cleanup_srq_table(struct hns_roce_dev *hr_dev) +{ + hns_roce_bitmap_cleanup(&hr_dev->srq_table.bitmap); +} diff --git a/drivers/infiniband/hw/hns/hns_roce_sysfs.c b/drivers/infiniband/hw/hns/hns_roce_sysfs.c new file mode 100644 index 0000000000000000000000000000000000000000..47a0e31d85b2d9e1d561df9efe0bc1acb2f7bb44 --- /dev/null +++ b/drivers/infiniband/hw/hns/hns_roce_sysfs.c @@ -0,0 +1,442 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2016-2017 Hisilicon Limited. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hnae3.h" +#include "hns_roce_common.h" +#include "hns_roce_device.h" +#include "hns_roce_cmd.h" +#include "hns_roce_hem.h" +#include "hns_roce_hw_v2.h" + + + +static ssize_t cqc_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct hns_roce_dev *hr_dev = + container_of(dev, struct hns_roce_dev, ib_dev.dev); + int ret; + + ret = kstrtou32(buf, 10, &hr_dev->hr_stat.cqn); + if (ret) { + dev_err(dev, "Input params format unmatch\n"); + return -EINVAL; + } + + return strnlen(buf, count); +} + +static ssize_t cqc_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hns_roce_dev *hr_dev = + container_of(dev, struct hns_roce_dev, ib_dev.dev); + int ret; + int count = 0; + + ret = hr_dev->dfx->query_cqc_stat(hr_dev, buf, &count); + if (ret) { + dev_err(dev, "CQC query failed(%d).", ret); + return -EBUSY; + } + + return count; +} + +static ssize_t cmd_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hns_roce_dev *hr_dev = + container_of(dev, struct hns_roce_dev, ib_dev.dev); + int ret; + int count = 0; + + ret = hr_dev->dfx->query_cmd_stat(hr_dev, buf, &count); + if (ret) { + dev_err(dev, "Cmd query failed(%d).", ret); + return -EBUSY; + } + + return count; +} + +static ssize_t pkt_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hns_roce_dev *hr_dev = + container_of(dev, struct hns_roce_dev, ib_dev.dev); + int ret; + int count = 0; + + ret = hr_dev->dfx->query_pkt_stat(hr_dev, buf, &count); + if (ret) { + dev_err(dev, "Pkt query failed(%d).", ret); + return -EBUSY; + } + + return count; +} + +static ssize_t ceqc_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct hns_roce_dev *hr_dev = + container_of(dev, struct hns_roce_dev, ib_dev.dev); + int ret; + + ret = kstrtou32(buf, 10, &hr_dev->hr_stat.ceqn); + if (ret) { + dev_err(dev, "Input params format unmatch\n"); + return -EINVAL; + } + + return strnlen(buf, count); +} + +static ssize_t ceqc_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hns_roce_dev *hr_dev = + container_of(dev, struct hns_roce_dev, ib_dev.dev); + int ret; + int count = 0; + + ret = hr_dev->dfx->query_ceqc_stat(hr_dev, buf, &count); + if (ret) { + dev_err(dev, "CEQC query failed"); + return -EBUSY; + } + + return count; +} + +static ssize_t aeqc_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct hns_roce_dev *hr_dev = + container_of(dev, struct hns_roce_dev, ib_dev.dev); + int ret; + + ret = kstrtou32(buf, 10, &hr_dev->hr_stat.aeqn); + if (ret) { + dev_err(dev, "Input params format unmatch\n"); + return -EINVAL; + } + + return strnlen(buf, count); +} + +static ssize_t aeqc_show(struct device *dev, struct device_attribute *attr, + char *buf) + +{ + struct hns_roce_dev *hr_dev = + container_of(dev, struct hns_roce_dev, ib_dev.dev); + int ret; + int count = 0; + + ret = hr_dev->dfx->query_aeqc_stat(hr_dev, buf, &count); + if (ret) { + dev_err(dev, "aeqc query failed"); + return -EBUSY; + } + + return count; +} + +static ssize_t qpc_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct hns_roce_dev *hr_dev = + container_of(dev, struct hns_roce_dev, ib_dev.dev); + int ret; + + ret = kstrtou32(buf, 10, &hr_dev->hr_stat.qpn); + if (ret) { + dev_err(dev, "Input params format unmatch\n"); + return -EINVAL; + } + + return strnlen(buf, count); +} + +static ssize_t qpc_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hns_roce_dev *hr_dev = + container_of(dev, struct hns_roce_dev, ib_dev.dev); + int ret; + int count = 0; + + ret = hr_dev->dfx->query_qpc_stat(hr_dev, + buf, &count); + if (ret) { + dev_err(dev, "QPC query failed"); + return -EBUSY; + } + + return count; +} + +static ssize_t srqc_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct hns_roce_dev *hr_dev = + container_of(dev, struct hns_roce_dev, ib_dev.dev); + int ret; + + ret = kstrtou32(buf, 10, &hr_dev->hr_stat.srqn); + if (ret) { + dev_err(dev, "Input params format unmatch\n"); + return -EINVAL; + } + + return strnlen(buf, count); +} + +static ssize_t srqc_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hns_roce_dev *hr_dev = + container_of(dev, struct hns_roce_dev, ib_dev.dev); + int ret; + int count = 0; + + ret = hr_dev->dfx->query_srqc_stat(hr_dev, buf, &count); + if (ret) { + dev_err(dev, "SRQC query failed"); + return -EBUSY; + } + + return count; +} + +static ssize_t mpt_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct hns_roce_dev *hr_dev = + container_of(dev, struct hns_roce_dev, ib_dev.dev); + int ret; + + ret = kstrtou32(buf, 10, &hr_dev->hr_stat.key); + if (ret) { + dev_err(dev, "Input params format unmatch\n"); + return -EINVAL; + } + + return strnlen(buf, count); +} + +static ssize_t mpt_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hns_roce_dev *hr_dev = + container_of(dev, struct hns_roce_dev, ib_dev.dev); + int ret; + int count = 0; + + ret = hr_dev->dfx->query_mpt_stat(hr_dev, buf, &count); + if (ret) { + dev_err(dev, "mpt query failed"); + return -EBUSY; + } + + return count; +} + +static ssize_t coalesce_maxcnt_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hns_roce_dev *hr_dev = container_of(dev, struct hns_roce_dev, + ib_dev.dev); + struct hns_roce_eq *eq = hr_dev->eq_table.eq; + + return scnprintf(buf, PAGE_SIZE, "%d\n", eq->eq_max_cnt); +} + +static ssize_t coalesce_maxcnt_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct hns_roce_dev *hr_dev = container_of(dev, struct hns_roce_dev, + ib_dev.dev); + struct hns_roce_eq *eq; + u32 int_maxcnt; + int ceq_num; + int i; + int ret; + + ceq_num = hr_dev->caps.num_comp_vectors; + + ret = kstrtou32(buf, 10, &int_maxcnt); + if (ret) { + dev_err(dev, + "Input params of irq coalesce maxcnt format unmatch\n"); + return -EINVAL; + } + + if (int_maxcnt > HNS_ROCE_CEQ_MAX_BURST_NUM) { + dev_err(dev, "int_maxcnt(%d) must be less than 2^16!\n", + int_maxcnt); + return -EINVAL; + } + + for (i = 0; i < ceq_num; i++) { + eq = &hr_dev->eq_table.eq[i]; + eq->eq_max_cnt = int_maxcnt; + ret = hr_dev->dfx->modify_eq(hr_dev, eq, eq->eq_max_cnt, 0, + HNS_ROCE_EQ_MAXCNT_MASK); + if (ret) { + dev_err(dev, "EQC(%d) modify failed(%d).\n", eq->eqn, + ret); + return -EBUSY; + } + } + + return count; +} + +static ssize_t coalesce_period_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hns_roce_dev *hr_dev = container_of(dev, struct hns_roce_dev, + ib_dev.dev); + struct hns_roce_eq *eq = hr_dev->eq_table.eq; + + return scnprintf(buf, PAGE_SIZE, "%d\n", eq->eq_period); +} + +static ssize_t coalesce_period_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct hns_roce_dev *hr_dev = container_of(dev, struct hns_roce_dev, + ib_dev.dev); + struct hns_roce_eq *eq; + u32 int_period; + int ceq_num; + int i; + int ret; + + ceq_num = hr_dev->caps.num_comp_vectors; + + ret = kstrtou32(buf, 10, &int_period); + if (ret) { + dev_err(dev, + "Input params of irq coalesce period format unmatch\n"); + return -EINVAL; + } + + if (int_period > HNS_ROCE_CEQ_MAX_INTERVAL) { + dev_err(dev, "int_period(%d) must be less than 2^16!\n", + int_period); + return -EINVAL; + } + + for (i = 0; i < ceq_num; i++) { + eq = &hr_dev->eq_table.eq[i]; + eq->eq_period = int_period; + ret = hr_dev->dfx->modify_eq(hr_dev, eq, 0, eq->eq_period, + HNS_ROCE_EQ_PERIOD_MASK); + if (ret) { + dev_err(dev, "EQC(%d) modify failed(%d).\n", eq->eqn, + ret); + return -EBUSY; + } + } + + return count; +} + +static DEVICE_ATTR_RW(aeqc); +static DEVICE_ATTR_RW(qpc); +static DEVICE_ATTR_RW(srqc); +static DEVICE_ATTR_RW(mpt); +static DEVICE_ATTR_RW(ceqc); +static DEVICE_ATTR_RO(pkt); +static DEVICE_ATTR_RO(cmd); +static DEVICE_ATTR_RW(cqc); +static DEVICE_ATTR_RW(coalesce_maxcnt); +static DEVICE_ATTR_RW(coalesce_period); + +static struct device_attribute *hns_roce_hw_attrs_list[] = { + &dev_attr_cmd, + &dev_attr_cqc, + &dev_attr_aeqc, + &dev_attr_qpc, + &dev_attr_mpt, + &dev_attr_pkt, + &dev_attr_ceqc, + &dev_attr_srqc, + &dev_attr_coalesce_maxcnt, + &dev_attr_coalesce_period, +}; + +int hns_roce_register_sysfs(struct hns_roce_dev *hr_dev) +{ + int ret; + int i; + + for (i = 0; i < ARRAY_SIZE(hns_roce_hw_attrs_list); i++) { + ret = device_create_file(&hr_dev->ib_dev.dev, + hns_roce_hw_attrs_list[i]); + if (ret) { + dev_err(hr_dev->dev, "register_sysfs failed!\n"); + return ret; + } + } + + return 0; +} + +void hns_roce_unregister_sysfs(struct hns_roce_dev *hr_dev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(hns_roce_hw_attrs_list); i++) + device_remove_file(&hr_dev->ib_dev.dev, + hns_roce_hw_attrs_list[i]); +} diff --git a/drivers/infiniband/hw/hns/roce-customer/rdfx_common.h b/drivers/infiniband/hw/hns/roce-customer/rdfx_common.h new file mode 100644 index 0000000000000000000000000000000000000000..589888ec47c4861d2d2f2f4adac29d8f9f256c10 --- /dev/null +++ b/drivers/infiniband/hw/hns/roce-customer/rdfx_common.h @@ -0,0 +1,228 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __RDFX_COMMON__ +#define __RDFX_COMMON__ + +#include +#include "hns_roce_device.h" +#include "hns_roce_common.h" +#include "hnae3.h" +#include "hns_roce_hw_v2.h" + +#define WQE_TYPE_MAX (IB_WR_REG_SIG_MR + 1) +#define CQ_ST_MAX (IB_WC_GENERAL_ERR + 1) +#define RCQE_TYPE_MAX (RECV_SEND_WITH_INV + 1) +#define RDFX_FUNC_MAX (37) + +extern const char *rdfx_func_name[RDFX_FUNC_MAX]; +struct rdfx_info; + +/**************** kobject attribute ****************/ +struct rdfx_hw_sys_attr { + struct attribute attr; + int (*pub_show)(struct rdfx_info *rdfx); + int (*pub_store)(const char *buf, struct rdfx_info *rdfx); +}; + +#define rdfx_hw_file_attr_def(file_name, func_show, func_store) \ +static struct rdfx_hw_sys_attr g_sysfs_roce_##file_name##_attr = {\ + {\ + .name = #file_name,\ + .mode = 0644,\ + },\ + .pub_show = func_show,\ + .pub_store = func_store,\ +} + +#define HW_ATTRS_LIST_MEMBER(file_name)(&g_sysfs_roce_##file_name##_attr.attr) + +enum { + RECV_RDMA_WITH_IMM, + RECV_SEND, + RECV_SEND_WITH_IMM, + RECV_SEND_WITH_INV, +}; + +#define MAX_CHAR_NUM_DEV_NAME (12) +struct rdfx_dev_info { + atomic_t fc[RDFX_FUNC_MAX]; + char dev_name[MAX_CHAR_NUM_DEV_NAME]; +}; + +struct rdfx_pd_info { + struct ib_pd *pd; + unsigned long pdn; + struct list_head list; + void *priv; + struct kref cnt; +}; + +struct rdfx_pd_top_info { + atomic_t alloc_pd_cnt; + atomic_t dealloc_pd_cnt; + atomic_t top_pd_index; + struct list_head list; + spinlock_t pd_lock; +}; + +struct rdfx_sq_info { + atomic_t sq_wqe_cnt[WQE_TYPE_MAX]; + u32 sq_wqe_size; + u32 sq_sge_size; + u32 offset; + u32 sq_depth; + atomic_t sig_wqe_cnt; + atomic_t db_cnt; + atomic_t inline_cnt; + atomic_t head; + atomic_t tail; + atomic_t last_comp; + u64 head_addr; + u64 tail_addr; +}; + +struct rdfx_rq_info { + atomic_t rq_wqe_cnt; + u32 rq_wqe_size; + u32 offset; + u32 rq_depth; + atomic_t db_cnt; + atomic_t inline_cnt; + atomic_t head; + atomic_t tail; + u64 head_addr; + u64 tail_addr; +}; + +struct rdfx_qp_attr { + atomic_t state; + u32 read_en; + u32 write_en; + u32 fast_reg_en; + u32 atomic_en; + u8 max_ord; + u8 max_ird; + u32 pd_id; + u32 err_code; + u32 max_sge[2]; +}; + +struct rdfx_qp_info { + struct ib_qp *qp; + unsigned long qpn; + struct rdfx_sq_info sq; + struct rdfx_rq_info rq; + struct rdfx_qp_attr attr; + struct list_head list; + void *priv; + void *buf; + u32 buf_size; + struct kref cnt; +}; + +struct rdfx_qp_top_info { + atomic_t alloc_qp_cnt; + atomic_t dealloc_qp_cnt; + atomic_t top_qp_index; + atomic_t dwqe_cnt; + struct list_head list; + spinlock_t qp_lock; +}; + +struct rdfx_cq_info { + struct ib_cq *cq; + unsigned long cqn; + atomic_t scqe_cnt[WQE_TYPE_MAX]; + atomic_t rcqe_cnt[RCQE_TYPE_MAX]; + atomic_t arm_cnt[2]; + atomic_t st_cnt[CQ_ST_MAX]; + atomic_t ci; + u32 cqe_size; + u32 cq_depth; + struct list_head list; + void *priv; + void *buf; + struct kref cnt; +}; + +struct rdfx_cq_top_info { + atomic_t alloc_cq_cnt; + atomic_t dealloc_cq_cnt; + atomic_t top_cq_index; + struct list_head list; + spinlock_t cq_lock; +}; + +struct rdfx_mr_info { + struct ib_mr *mr; + u32 pd; + struct list_head list; + struct kref cnt; +}; + +struct rdfx_mr_top_info { + atomic_t alloc_mr_cnt; + atomic_t dealloc_mr_cnt; + atomic_t top_mr_index; + struct list_head list; + spinlock_t mr_lock; +}; + +struct rdfx_ceq_info { + struct list_head list; + unsigned long ceqn; + atomic_t ceqe_cnt; + void *priv; + struct kref cnt; +}; + +struct rdfx_aeq_info { + struct list_head list; + atomic_t aeqe_cnt; + void *priv; + struct kref cnt; +}; + +struct rdfx_eq_top_info { + struct list_head ceq_list; + struct list_head aeq_list; + spinlock_t eq_lock; +}; + +struct rdfx_ops { + int (*add_sysfs)(struct rdfx_info *info); + void (*del_sysfs)(struct rdfx_info *info); + void (*print_sq_wqe)(void *wqe); + void (*print_rq_wqe)(void *wqe); + void *(*get_dfx)(struct ib_device *ib_dev); +}; + +struct rdfx_info { + struct rdfx_dev_info dev; + struct rdfx_pd_top_info pd; + struct rdfx_qp_top_info qp; + struct rdfx_cq_top_info cq; + struct rdfx_mr_top_info mr; + struct rdfx_eq_top_info eq; + struct rdfx_ops *ops; + + struct device *drv_dev; + struct kobject kobj; + + void *priv; +}; + +struct rdfx_dev_id { + char name[20]; + struct rdfx_ops *ops; +}; + +struct rdfx_top_info { + struct ib_device *dev; + struct rdfx_info *rdfx; +}; + +void qp_release(struct kref *ref); +void cq_release(struct kref *ref); + +#endif diff --git a/drivers/infiniband/hw/hns/roce-customer/rdfx_entry.c b/drivers/infiniband/hw/hns/roce-customer/rdfx_entry.c new file mode 100644 index 0000000000000000000000000000000000000000..160e0dfd71551b743825fa146d0ab456eb25445d --- /dev/null +++ b/drivers/infiniband/hw/hns/roce-customer/rdfx_entry.c @@ -0,0 +1,331 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. +#ifdef CONFIG_INFINIBAND_HNS_DFX +#include "roce_k_compat.h" +#include "rdfx_common.h" +#include "rdfx_intf.h" + +#ifdef CONFIG_KERNEL_419 +void rdfx_cp_rq_wqe_buf(struct hns_roce_dev *hr_dev, + struct hns_roce_qp *hr_qp, int ind, void *wqe, + const struct ib_recv_wr *wr) + +#else +void rdfx_cp_rq_wqe_buf(struct hns_roce_dev *hr_dev, + struct hns_roce_qp *hr_qp, int ind, void *wqe, + struct ib_recv_wr *wr) +#endif +{ + struct rdfx_info *rdfx = (struct rdfx_info *)hr_dev->dfx_priv; + struct rdfx_qp_info *rdfx_qp = NULL; + struct rdfx_rq_info *rq = NULL; + void *dfx_qp_buf = NULL; + + spin_lock(&(rdfx->qp.qp_lock)); + + rdfx_qp = rdfx_find_rdfx_qp(rdfx, hr_qp->qpn); + if (ZERO_OR_NULL_PTR(rdfx_qp)) { + dev_err(hr_dev->dev, "find qp 0x%lx failed while cp sq wqe buf\n", + hr_qp->qpn); + spin_unlock(&(rdfx->qp.qp_lock)); + return; + } + + spin_unlock(&(rdfx->qp.qp_lock)); + + rq = &rdfx_qp->rq; + atomic_inc(&rq->rq_wqe_cnt); + + /*copy wqe buf*/ + dfx_qp_buf = hns_roce_buf_offset(rdfx_qp->buf, + hr_qp->rq.offset + (ind << hr_qp->rq.wqe_shift)); + memcpy(dfx_qp_buf, (void *)wqe, + (wr->num_sge) * sizeof(struct hns_roce_v2_wqe_data_seg)); + + atomic_set(&rq->head, (int)hr_qp->rq.head); + rq->head_addr = + (u64)get_recv_wqe(hr_qp, hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1)); + atomic_set(&rq->tail, (int)hr_qp->rq.tail); + rq->tail_addr = + (u64)get_recv_wqe(hr_qp, hr_qp->rq.tail & (hr_qp->rq.wqe_cnt - 1)); +} +EXPORT_SYMBOL_GPL(rdfx_cp_rq_wqe_buf); + + +#ifdef CONFIG_KERNEL_419 +static void rdfx_change_sq_buf(const struct ib_send_wr *wr, int atomic_en, + void *dfx_qp_buf, const void *dfx_hns_wqe_sge, + struct rdfx_sq_info *sq, + struct hns_roce_dev *hr_dev, + struct hns_roce_qp *qp) + +#else + +static void rdfx_change_sq_buf(struct ib_send_wr *wr, int atomic_en, + void *dfx_qp_buf, void *dfx_hns_wqe_sge, + struct rdfx_sq_info *sq, + struct hns_roce_dev *hr_dev, + struct hns_roce_qp *qp) +#endif +{ + if (atomic_en && wr->num_sge >= 2) + memcpy(dfx_qp_buf, dfx_hns_wqe_sge, wr->num_sge * + sizeof(struct hns_roce_v2_wqe_data_seg)); + else if (wr->num_sge > 2) + memcpy(dfx_qp_buf, dfx_hns_wqe_sge, (wr->num_sge - 2) * + sizeof(struct hns_roce_v2_wqe_data_seg)); + + if (wr->opcode <= IB_WR_REG_SIG_MR) + atomic_inc(&(sq->sq_wqe_cnt[wr->opcode])); + else + dev_err(hr_dev->dev, "Error opcode - %d while post send.\n", + wr->opcode); + + if (wr->send_flags & IB_SEND_SIGNALED) + atomic_inc(&sq->sig_wqe_cnt); + if (wr->send_flags & IB_SEND_INLINE) + atomic_inc(&sq->inline_cnt); + + atomic_set(&sq->head, (int)qp->sq.head); + sq->head_addr = + (u64)get_send_wqe(qp, qp->sq.head & + (unsigned int)(qp->sq.wqe_cnt - 1)); + atomic_set(&sq->tail, (int)qp->sq.tail); + sq->tail_addr = + (u64)get_send_wqe(qp, qp->sq.tail & + (unsigned int)(qp->sq.wqe_cnt - 1)); +} + +#ifdef CONFIG_KERNEL_419 +void rdfx_cp_sq_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp, + unsigned int ind, void *wqe, + struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, + const struct ib_send_wr *wr) + +#else + +void rdfx_cp_sq_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp, + unsigned int ind, void *wqe, + struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, + struct ib_send_wr *wr) +#endif +{ + struct rdfx_info *rdfx = (struct rdfx_info *)hr_dev->dfx_priv; + struct rdfx_qp_info *rdfx_qp = NULL; + struct rdfx_sq_info *sq = NULL; + void *dfx_hns_wqe_sge = NULL; + void *dfx_qp_buf = NULL; + int atomic_en = 0; + + spin_lock(&(rdfx->qp.qp_lock)); + + rdfx_qp = rdfx_find_rdfx_qp(rdfx, qp->qpn); + if (ZERO_OR_NULL_PTR(rdfx_qp)) { + dev_err(hr_dev->dev, "find qp 0x%lx failed while cp sq wqe buf\n", + qp->qpn); + spin_unlock(&(rdfx->qp.qp_lock)); + return; + } + + spin_unlock(&(rdfx->qp.qp_lock)); + + sq = &rdfx_qp->sq; + + if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || + wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) + atomic_en = 1; + //copy wqe buf + dfx_qp_buf = hns_roce_buf_offset(rdfx_qp->buf, qp->sq.offset + + ((ind & (qp->sq.wqe_cnt - 1)) << qp->sq.wqe_shift)); + memcpy(dfx_qp_buf, (void *)rc_sq_wqe, + sizeof(struct hns_roce_v2_rc_send_wqe)); + + dfx_qp_buf += sizeof(struct hns_roce_v2_rc_send_wqe); + dfx_hns_wqe_sge = wqe; + memcpy(dfx_qp_buf, dfx_hns_wqe_sge, + 2 * sizeof(struct hns_roce_v2_wqe_data_seg)); + dfx_qp_buf = hns_roce_buf_offset(rdfx_qp->buf, qp->sge.offset); + dfx_hns_wqe_sge = hns_roce_buf_offset(qp->mtr.kmem, qp->sge.offset); + rdfx_change_sq_buf(wr, atomic_en, dfx_qp_buf, + dfx_hns_wqe_sge, sq, hr_dev, qp); +} +EXPORT_SYMBOL_GPL(rdfx_cp_sq_wqe_buf); + +void rdfx_cp_cqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, + void *cqe) +{ + struct rdfx_info *rdfx = (struct rdfx_info *)hr_dev->dfx_priv; + struct rdfx_cq_info *rdfx_cq = NULL; + void *dfx_cq_buf = NULL; + u32 ci; + + spin_lock(&rdfx->cq.cq_lock); + + rdfx_cq = rdfx_find_rdfx_cq(rdfx, hr_cq->cqn); + if (ZERO_OR_NULL_PTR(rdfx_cq)) { + dev_err(hr_dev->dev, "find cq 0x%lx failed while cp cqe buf\n", + hr_cq->cqn); + spin_unlock(&rdfx->cq.cq_lock); + return; + } + + spin_unlock(&rdfx->cq.cq_lock); + + ci = hr_cq->cons_index & (hr_cq->ib_cq.cqe); + dfx_cq_buf = hns_roce_buf_offset(rdfx_cq->buf, + ci * HNS_ROCE_V2_CQE_ENTRY_SIZE); + if (cqe) + memcpy(dfx_cq_buf, cqe, sizeof(struct hns_roce_v2_cqe)); +} +EXPORT_SYMBOL_GPL(rdfx_cp_cqe_buf); + +void rdfx_set_err_cqe_info(u32 status, struct rdfx_cq_info *rdfx_cq) +{ + switch (status & HNS_ROCE_V2_CQE_STATUS_MASK) { + case HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR: + atomic_inc(&(rdfx_cq->st_cnt[IB_WC_LOC_LEN_ERR])); + break; + case HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR: + atomic_inc(&(rdfx_cq->st_cnt[IB_WC_LOC_QP_OP_ERR])); + break; + case HNS_ROCE_CQE_V2_LOCAL_PROT_ERR: + atomic_inc(&(rdfx_cq->st_cnt[IB_WC_LOC_PROT_ERR])); + break; + case HNS_ROCE_CQE_V2_WR_FLUSH_ERR: + atomic_inc(&(rdfx_cq->st_cnt[IB_WC_WR_FLUSH_ERR])); + break; + case HNS_ROCE_CQE_V2_MW_BIND_ERR: + atomic_inc(&(rdfx_cq->st_cnt[IB_WC_MW_BIND_ERR])); + break; + case HNS_ROCE_CQE_V2_BAD_RESP_ERR: + atomic_inc(&(rdfx_cq->st_cnt[IB_WC_BAD_RESP_ERR])); + break; + case HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR: + atomic_inc(&(rdfx_cq->st_cnt[IB_WC_LOC_ACCESS_ERR])); + break; + case HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR: + atomic_inc(&(rdfx_cq->st_cnt[IB_WC_REM_INV_REQ_ERR])); + break; + case HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR: + atomic_inc(&(rdfx_cq->st_cnt[IB_WC_REM_ACCESS_ERR])); + break; + case HNS_ROCE_CQE_V2_REMOTE_OP_ERR: + atomic_inc(&(rdfx_cq->st_cnt[IB_WC_REM_OP_ERR])); + break; + case HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR: + atomic_inc(&(rdfx_cq->st_cnt[IB_WC_RETRY_EXC_ERR])); + break; + case HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR: + atomic_inc(&(rdfx_cq->st_cnt[IB_WC_RNR_RETRY_EXC_ERR])); + break; + case HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR: + atomic_inc(&(rdfx_cq->st_cnt[IB_WC_REM_ABORT_ERR])); + break; + default: + atomic_inc(&(rdfx_cq->st_cnt[IB_WC_GENERAL_ERR])); + break; + } +} + +void rdfx_set_send_cqe_info(u32 opcode, struct rdfx_cq_info *rdfx_cq) +{ + switch (opcode) { + case HNS_ROCE_SQ_OPCODE_SEND: + atomic_inc(&(rdfx_cq->scqe_cnt[IB_WR_SEND])); + break; + case HNS_ROCE_SQ_OPCODE_SEND_WITH_INV: + atomic_inc(&(rdfx_cq->scqe_cnt[IB_WR_SEND_WITH_INV])); + break; + case HNS_ROCE_SQ_OPCODE_SEND_WITH_IMM: + atomic_inc(&(rdfx_cq->scqe_cnt[IB_WR_SEND_WITH_IMM])); + break; + case HNS_ROCE_SQ_OPCODE_RDMA_READ: + atomic_inc(&(rdfx_cq->scqe_cnt[IB_WR_RDMA_READ])); + break; + case HNS_ROCE_SQ_OPCODE_RDMA_WRITE: + atomic_inc(&(rdfx_cq->scqe_cnt[IB_WR_RDMA_WRITE])); + break; + case HNS_ROCE_SQ_OPCODE_RDMA_WRITE_WITH_IMM: + atomic_inc(&(rdfx_cq->scqe_cnt[IB_WR_RDMA_WRITE_WITH_IMM])); + break; + case HNS_ROCE_SQ_OPCODE_LOCAL_INV: + atomic_inc(&(rdfx_cq->scqe_cnt[IB_WR_LOCAL_INV])); + break; + case HNS_ROCE_SQ_OPCODE_ATOMIC_COMP_AND_SWAP: + atomic_inc(&(rdfx_cq->scqe_cnt[IB_WR_ATOMIC_CMP_AND_SWP])); + break; + case HNS_ROCE_SQ_OPCODE_ATOMIC_FETCH_AND_ADD: + atomic_inc(&(rdfx_cq->scqe_cnt[IB_WR_ATOMIC_FETCH_AND_ADD])); + break; + case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_COMP_AND_SWAP: + atomic_inc( + &(rdfx_cq->scqe_cnt[IB_WR_MASKED_ATOMIC_CMP_AND_SWP])); + break; + case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_FETCH_AND_ADD: + atomic_inc( + &(rdfx_cq->scqe_cnt[IB_WR_MASKED_ATOMIC_FETCH_AND_ADD])); + break; + default: + break; + } +} + +void rdfx_set_other_cqe_info(u32 opcode, struct rdfx_cq_info *rdfx_cq) +{ + switch (opcode) { + case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM: + atomic_inc(&(rdfx_cq->rcqe_cnt[RECV_RDMA_WITH_IMM])); + break; + case HNS_ROCE_V2_OPCODE_SEND: + atomic_inc(&(rdfx_cq->rcqe_cnt[RECV_SEND])); + break; + case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM: + atomic_inc(&(rdfx_cq->rcqe_cnt[RECV_SEND_WITH_IMM])); + break; + case HNS_ROCE_V2_OPCODE_SEND_WITH_INV: + atomic_inc(&(rdfx_cq->rcqe_cnt[RECV_SEND_WITH_INV])); + break; + default: + break; + } +} + +void rdfx_set_cqe_info(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, + struct hns_roce_v2_cqe *cqe) +{ + struct rdfx_info *rdfx = (struct rdfx_info *)hr_dev->dfx_priv; + struct rdfx_cq_info *rdfx_cq; + u32 status; + u32 opcode; + int is_send; + + spin_lock(&rdfx->cq.cq_lock); + + rdfx_cq = rdfx_find_rdfx_cq(rdfx, hr_cq->cqn); + if (!rdfx_cq) { + dev_err(hr_dev->dev, "find cq 0x%lx failed while set cqe info\n", + hr_cq->cqn); + spin_unlock(&rdfx->cq.cq_lock); + return; + } + + spin_unlock(&rdfx->cq.cq_lock); + + status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M, + V2_CQE_BYTE_4_STATUS_S); + if (status) + rdfx_set_err_cqe_info(status, rdfx_cq); + else { + atomic_inc(&(rdfx_cq->st_cnt[IB_WC_SUCCESS])); + + opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M, + V2_CQE_BYTE_4_OPCODE_S) & 0x1f; + is_send = !roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S); + if (is_send) + rdfx_set_send_cqe_info(opcode, rdfx_cq); + else + rdfx_set_other_cqe_info(opcode, rdfx_cq); + } +} +EXPORT_SYMBOL_GPL(rdfx_set_cqe_info); +#endif diff --git a/drivers/infiniband/hw/hns/roce-customer/rdfx_hw_v2.c b/drivers/infiniband/hw/hns/roce-customer/rdfx_hw_v2.c new file mode 100644 index 0000000000000000000000000000000000000000..08f5d237d2ca67a3288efc8c4c8681b36521a42d --- /dev/null +++ b/drivers/infiniband/hw/hns/roce-customer/rdfx_hw_v2.c @@ -0,0 +1,975 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hns_roce_common.h" +#include "hns_roce_device.h" +#include "hns_roce_cmd.h" +#include "hnae3.h" +#include "hns_roce_hw_v2.h" + +#include "rdfx_main.h" +#include "rdfx_common.h" +#include "rdfx_hw_v2.h" + +static ssize_t rdfx_v2_show(struct kobject *kobj, struct attribute *attr, + char *buf); +static ssize_t rdfx_v2_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count); + +static void rdfx_v2_print_sq_wqe(void *wqe) +{ + pr_info("Print sq wqe\n"); +} + +static void rdfx_v2_print_rq_wqe(void *wqe) +{ + pr_info("Print rq wqe\n"); +} + +static void *rdfx_v2_get_dfx(struct ib_device *ib_dev) +{ + struct hns_roce_dev *hr_dev = + container_of(ib_dev, struct hns_roce_dev, ib_dev); + + return hr_dev->dfx_priv; +} + +static int rdfx_v2_ooo_show(struct rdfx_info *rdfx) +{ + pr_info("************** OOO INFO ***************\n"); + + return 0; +} + +static int rdfx_v2_err_show(struct rdfx_info *rdfx) +{ + pr_info("************** ERR INFO ***************\n"); + + return 0; +} + +enum { + DIS_READ_CLEAR, + EN_READ_CLEAR, +}; +#define CMD_NUM_QUERY_PKT_CNT 8 + +void rdfx_v2_pkt_stroe_query_pkt_read_pkt_cnt(struct hns_roce_cmq_desc *desc) +{ + int i; + + for (i = 0; i < CMD_NUM_QUERY_PKT_CNT; i++) { + (void)hns_roce_cmq_setup_basic_desc(&desc[i], + HNS_ROCE_OPC_QUEYR_PKT_CNT, true); + + if (i < (CMD_NUM_QUERY_PKT_CNT - 1)) + desc[i].flag |= + cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); + else + desc[i].flag &= + ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); + } +} + +static int rdfx_v2_pkt_stroe_query_pkt(struct hns_roce_dev *hr_dev, + struct hns_roce_cmq_desc *desc) +{ + struct hns_roce_cmq_desc desc_cnp_rx = {0}; + struct hns_roce_cmq_desc desc_cnp_tx = {0}; + struct hns_roce_cmq_desc desc_cqe = {0}; + struct rdfx_cnt_snap *resp; + int status; + + /* config read clear enable */ + resp = (struct rdfx_cnt_snap *)desc[0].data; + (void)hns_roce_cmq_setup_basic_desc(&desc[0], + HNS_ROCE_OPC_CNT_SNAP, false); + roce_set_bit(resp->data_0, CNT_SNAP_PARAM_DATA_0_CNT_CLR_CE_S, + EN_READ_CLEAR); + status = hns_roce_cmq_send(hr_dev, desc, 1); + if (status) + return status; + + rdfx_v2_pkt_stroe_query_pkt_read_pkt_cnt(desc); + + status = hns_roce_cmq_send(hr_dev, desc, CMD_NUM_QUERY_PKT_CNT); + if (status) + return status; + + (void)hns_roce_cmq_setup_basic_desc(&desc_cqe, + HNS_ROCE_OPC_QUEYR_CQE_CNT, true); + status = hns_roce_cmq_send(hr_dev, &desc_cqe, 1); + if (status) + return status; + + if (hr_dev->pci_dev->revision == 0x21) { + (void)hns_roce_cmq_setup_basic_desc(&desc_cnp_rx, + HNS_ROCE_OPC_QUEYR_CNP_RX_CNT, true); + status = hns_roce_cmq_send(hr_dev, &desc_cnp_rx, 1); + if (status) + return status; + + (void)hns_roce_cmq_setup_basic_desc(&desc_cnp_tx, + HNS_ROCE_OPC_QUEYR_CNP_TX_CNT, true); + status = hns_roce_cmq_send(hr_dev, &desc_cnp_tx, 1); + if (status) + return status; + } + + /* config read clear disable */ + resp = (struct rdfx_cnt_snap *)desc[0].data; + (void)hns_roce_cmq_setup_basic_desc(&desc[0], + HNS_ROCE_OPC_CNT_SNAP, false); + roce_set_bit(resp->data_0, CNT_SNAP_PARAM_DATA_0_CNT_CLR_CE_S, + DIS_READ_CLEAR); + status = hns_roce_cmq_send(hr_dev, desc, 1); + if (status) + return status; + + return 0; +} + +void rdfx_v2_pkt_store_print(struct rdfx_query_pkt_cnt **resp_query, + struct rdfx_query_cqe_cnt *resp_cqe, + struct rdfx_query_cnp_rx_cnt *resp_cnp_rx, + struct rdfx_query_cnp_tx_cnt *resp_cnp_tx) +{ + pr_info("**************** PKT INFO ********************************\n"); + pr_info(" port0 port1 port2 port3\n"); + pr_info("RX RC PKT : 0x%08x 0x%08x 0x%08x 0x%08x\n", + resp_query[0]->rc_pkt_num, resp_query[1]->rc_pkt_num, + resp_query[2]->rc_pkt_num, resp_query[3]->rc_pkt_num); + pr_info("RX UC PKT : 0x%08x 0x%08x 0x%08x 0x%08x\n", + resp_query[0]->uc_pkt_num, resp_query[1]->uc_pkt_num, + resp_query[2]->uc_pkt_num, resp_query[3]->uc_pkt_num); + pr_info("RX UD PKT : 0x%08x 0x%08x 0x%08x 0x%08x\n", + resp_query[0]->ud_pkt_num, resp_query[1]->ud_pkt_num, + resp_query[2]->ud_pkt_num, resp_query[3]->ud_pkt_num); + pr_info("RX XRC PKT: 0x%08x 0x%08x 0x%08x 0x%08x\n", + resp_query[0]->xrc_pkt_num, resp_query[1]->xrc_pkt_num, + resp_query[2]->xrc_pkt_num, resp_query[3]->xrc_pkt_num); + pr_info("RX ALL PKT: 0x%08x 0x%08x 0x%08x 0x%08x\n", + resp_query[0]->total_pkt_num, resp_query[1]->total_pkt_num, + resp_query[2]->total_pkt_num, resp_query[3]->total_pkt_num); + pr_info("RX ERR PKT: 0x%08x 0x%08x 0x%08x 0x%08x\n", + resp_query[0]->error_pkt_num, resp_query[1]->error_pkt_num, + resp_query[2]->error_pkt_num, resp_query[3]->error_pkt_num); + pr_info("TX RC PKT : 0x%08x 0x%08x 0x%08x 0x%08x\n", + resp_query[4]->rc_pkt_num, resp_query[5]->rc_pkt_num, + resp_query[6]->rc_pkt_num, resp_query[7]->rc_pkt_num); + pr_info("TX UC PKT : 0x%08x 0x%08x 0x%08x 0x%08x\n", + resp_query[4]->uc_pkt_num, resp_query[5]->uc_pkt_num, + resp_query[6]->uc_pkt_num, resp_query[7]->uc_pkt_num); + pr_info("TX UD PKT : 0x%08x 0x%08x 0x%08x 0x%08x\n", + resp_query[4]->ud_pkt_num, resp_query[5]->ud_pkt_num, + resp_query[6]->ud_pkt_num, resp_query[7]->ud_pkt_num); + pr_info("TX XRC PKT: 0x%08x 0x%08x 0x%08x 0x%08x\n", + resp_query[4]->xrc_pkt_num, resp_query[5]->xrc_pkt_num, + resp_query[6]->xrc_pkt_num, resp_query[7]->xrc_pkt_num); + pr_info("TX ALL PKT: 0x%08x 0x%08x 0x%08x 0x%08x\n", + resp_query[4]->total_pkt_num, resp_query[5]->total_pkt_num, + resp_query[6]->total_pkt_num, resp_query[7]->total_pkt_num); + pr_info("TX ERR PKT: 0x%08x 0x%08x 0x%08x 0x%08x\n", + resp_query[4]->error_pkt_num, resp_query[5]->error_pkt_num, + resp_query[6]->error_pkt_num, resp_query[7]->error_pkt_num); + pr_info("CQE : 0x%08x 0x%08x 0x%08x 0x%08x\n", + resp_cqe->port0_cqe, resp_cqe->port1_cqe, + resp_cqe->port2_cqe, resp_cqe->port3_cqe); + pr_info("CNP RX : 0x%08x 0x%08x 0x%08x 0x%08x\n", + resp_cnp_rx->port0_cnp_rx, resp_cnp_rx->port1_cnp_rx, + resp_cnp_rx->port2_cnp_rx, resp_cnp_rx->port3_cnp_rx); + pr_info("CNP TX : 0x%08x 0x%08x 0x%08x 0x%08x\n", + resp_cnp_tx->port0_cnp_tx, resp_cnp_tx->port1_cnp_tx, + resp_cnp_tx->port2_cnp_tx, resp_cnp_tx->port3_cnp_tx); + pr_info("**********************************************************\n"); +} + +static int rdfx_v2_pkt_store(const char *p_buf, struct rdfx_info *rdfx) +{ + struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)rdfx->priv; + struct hns_roce_cmq_desc desc[CMD_NUM_QUERY_PKT_CNT] = { {0} }; + struct rdfx_query_pkt_cnt *resp_query[CMD_NUM_QUERY_PKT_CNT]; + struct hns_roce_cmq_desc desc_cqe = {0}; + struct rdfx_query_cqe_cnt *resp_cqe = + (struct rdfx_query_cqe_cnt *)desc_cqe.data; + struct hns_roce_cmq_desc desc_cnp_tx = {0}; + struct rdfx_query_cnp_tx_cnt *resp_cnp_tx = + (struct rdfx_query_cnp_tx_cnt *)desc_cnp_tx.data; + struct hns_roce_cmq_desc desc_cnp_rx = {0}; + struct rdfx_query_cnp_rx_cnt *resp_cnp_rx = + (struct rdfx_query_cnp_rx_cnt *)desc_cnp_rx.data; + char *buf = (char *)p_buf; + char str[DEF_OPT_STR_LEN] = {0}; + int ret; + int i; + + if (!parg_getopt(buf, "c", str)) + return rdfx_v2_pkt_stroe_query_pkt(hr_dev, desc); + + for (i = 0; i < CMD_NUM_QUERY_PKT_CNT; i++) { + (void)hns_roce_cmq_setup_basic_desc(&desc[i], + HNS_ROCE_OPC_QUEYR_PKT_CNT, true); + if (i < (CMD_NUM_QUERY_PKT_CNT - 1)) + desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); + else + desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); + resp_query[i] = (struct rdfx_query_pkt_cnt *)desc[i].data; + } + ret = hns_roce_cmq_send(hr_dev, desc, CMD_NUM_QUERY_PKT_CNT); + if (ret) + return ret; + + (void)hns_roce_cmq_setup_basic_desc(&desc_cqe, + HNS_ROCE_OPC_QUEYR_CQE_CNT, true); + ret = hns_roce_cmq_send(hr_dev, &desc_cqe, 1); + if (ret) + return ret; + + if (hr_dev->pci_dev->revision == 0x21) { + (void)hns_roce_cmq_setup_basic_desc(&desc_cnp_rx, + HNS_ROCE_OPC_QUEYR_CNP_RX_CNT, true); + ret = hns_roce_cmq_send(hr_dev, &desc_cnp_rx, 1); + if (ret) + return ret; + + (void)hns_roce_cmq_setup_basic_desc(&desc_cnp_tx, + HNS_ROCE_OPC_QUEYR_CNP_TX_CNT, true); + ret = hns_roce_cmq_send(hr_dev, &desc_cnp_tx, 1); + if (ret) + return ret; + } + + rdfx_v2_pkt_store_print(resp_query, resp_cqe, resp_cnp_rx, resp_cnp_tx); + + return 0; +} + +static int rdfx_v2_cmd_show(struct rdfx_info *rdfx) +{ + struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)rdfx->priv; + struct hns_roce_cmq_desc desc_cnt; + struct rdfx_query_mbdb_cnt *resp_cnt = + (struct rdfx_query_mbdb_cnt *)desc_cnt.data; + struct hns_roce_cmq_desc desc_dfx; + int status; + + (void)hns_roce_cmq_setup_basic_desc(&desc_cnt, + HNS_ROCE_OPC_QUEYR_MBDB_CNT, true); + status = hns_roce_cmq_send(hr_dev, &desc_cnt, 1); + if (status) + return status; + + (void)hns_roce_cmq_setup_basic_desc(&desc_dfx, + HNS_ROCE_OPC_QUEYR_MDB_DFX, true); + status = hns_roce_cmq_send(hr_dev, &desc_dfx, 1); + if (status) + return status; + + pr_info("*************** cmd INFO **************\n"); + pr_info("MB ISSUE CNT : 0x%08x\n", + resp_cnt->mailbox_issue_cnt); + pr_info("MB EXEC CNT : 0x%08x\n", + resp_cnt->mailbox_exe_cnt); + pr_info("DB ISSUE CNT : 0x%08x\n", + resp_cnt->doorbell_issue_cnt); + pr_info("DB EXEC CNT : 0x%08x\n", + resp_cnt->doorbell_exe_cnt); + pr_info("EQDB ISSUE CNT : 0x%08x\n", + resp_cnt->eq_doorbell_issue_cnt); + pr_info("EQDB EXEC CNT : 0x%08x\n", + resp_cnt->eq_doorbell_exe_cnt); + pr_info(" EMPTY FULL ERR"); + pr_info("***************************************\n"); + + return 0; +} + +void rdfx_v2_ceqc_store_print(struct hns_roce_eq_context *eq_context, u32 ceqn) +{ + int i; + int *eqc; + eqc = (int *)eq_context; + pr_info("************** CEQC INFO ***************\n"); + for (i = 0; i < (sizeof(*eq_context) >> 2); i += 8) { + pr_info("CEQC(0x%x): %08x %08x %08x %08x %08x %08x %08x %08x\n", + ceqn, *eqc, *(eqc + 1), *(eqc + 2), + *(eqc + 3), *(eqc + 4), *(eqc + 5), + *(eqc + 6), *(eqc + 7)); + eqc += 8; + } + pr_info("****************************************\n"); +} + +int rdfx_ceqc_store_mbox_check(struct hns_roce_dev *hr_dev, + struct hns_roce_cmd_mailbox *mailbox, u32 ceqn) +{ + int ret; + struct hns_roce_eq_context *eq_context; + + eq_context = kzalloc(sizeof(*eq_context), GFP_KERNEL); + + if (ZERO_OR_NULL_PTR(eq_context)) { + pr_info("alloc mailbox mem for ceqc failed\n"); + ret = -ENOMEM; + goto err_context; + } + + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, ceqn, 0, + HNS_ROCE_CMD_QUERY_CEQC, + HNS_ROCE_CMD_TIMEOUT_MSECS); + if (!ret) + memcpy(eq_context, mailbox->buf, sizeof(*eq_context)); + else { + dev_err(hr_dev->dev, "QUERY CEQ cmd process error\n"); + goto err_mailbox; + } + rdfx_v2_ceqc_store_print(eq_context, ceqn); + +err_mailbox: + kfree(eq_context); +err_context: + hns_roce_free_cmd_mailbox(hr_dev, mailbox); + + return ret; +} + +static int rdfx_v2_ceqc_store(const char *p_buf, struct rdfx_info *rdfx) +{ + struct hns_roce_cmd_mailbox *mailbox; + struct hns_roce_dev *hr_dev; + long long convert_val; + char *buf = (char *)p_buf; + char str[DEF_OPT_STR_LEN] = {0}; + u32 ceqn = 0; + int ret; + + hr_dev = (struct hns_roce_dev *)rdfx->priv; + + parg_getopt(buf, "v:", str); + if (kstrtoll(str, 0, &convert_val)) { + pr_info("convert str failed\n"); + return -EINVAL; + } + ceqn = (u32)convert_val; + + mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + ret = rdfx_ceqc_store_mbox_check(hr_dev, mailbox, ceqn); + + return ret; +} + +void rdfx_v2_aeqc_store_print(u32 aeqn, struct hns_roce_eq_context *eq_context) +{ + int i; + int *eqc; + + eqc = (int *)eq_context; + pr_info("************** AEQC(0x%x) INFO ***************\n", aeqn); + for (i = 0; i < (sizeof(*eq_context) >> 2); i += 8) { + pr_info("AEQC(0x%x): %08x %08x %08x %08x %08x %08x %08x %08x\n", + aeqn, *eqc, *(eqc + 1), *(eqc + 2), + *(eqc + 3), *(eqc + 4), *(eqc + 5), + *(eqc + 6), *(eqc + 7)); + eqc += 8; + } + pr_info("***************************************\n"); +} + +int rdfx_aeqc_store_mbox_check(struct hns_roce_dev *hr_dev, + struct hns_roce_cmd_mailbox *mailbox, u32 aeqn) +{ + int ret; + struct hns_roce_eq_context *eq_context; + + eq_context = kzalloc(sizeof(*eq_context), GFP_KERNEL); + if (ZERO_OR_NULL_PTR(eq_context)) { + pr_info("alloc mailbox mem for aeqc failed\n"); + ret = -ENOMEM; + goto err_context; + } + + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, aeqn, 0, + HNS_ROCE_CMD_QUERY_AEQC, + HNS_ROCE_CMD_TIMEOUT_MSECS); + if (!ret) + memcpy(eq_context, mailbox->buf, sizeof(*eq_context)); + else { + dev_err(hr_dev->dev, "QUERY CEQ cmd process error\n"); + goto err_mailbox; + } + + rdfx_v2_aeqc_store_print(aeqn, eq_context); + +err_mailbox: + kfree(eq_context); +err_context: + hns_roce_free_cmd_mailbox(hr_dev, mailbox); + + return ret; +} + +static int rdfx_v2_aeqc_store(const char *p_buf, struct rdfx_info *rdfx) +{ + struct hns_roce_cmd_mailbox *mailbox; + struct hns_roce_dev *hr_dev; + long long convert_val; + char *buf = (char *)p_buf; + char str[DEF_OPT_STR_LEN] = {0}; + u32 aeqn = 0; + int ret; + + hr_dev = (struct hns_roce_dev *)rdfx->priv; + + parg_getopt(buf, "v:", str); + if (kstrtoll(str, 0, &convert_val)) { + pr_info("convert str failed\n"); + return -EINVAL; + } + aeqn = (u32)convert_val; + + mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + ret = rdfx_aeqc_store_mbox_check(hr_dev, mailbox, aeqn); + + return ret; +} + +void rdfx_v2_qpc_store_print(u32 qpn, u64 bt0_ba, u64 bt1_ba, + struct hns_roce_v2_qp_context *qp_context) +{ + int i; + int *qpc = (int *)qp_context; + + pr_info("************** QPC INFO ***************\n"); + pr_info("QPC(0x%x) BT0: 0x%llx\n", qpn, bt0_ba); + pr_info("QPC(0x%x) BT1: 0x%llx\n", qpn, bt1_ba); + for (i = 0; i < (sizeof(*qp_context) >> 2); i += 8) { + pr_info("QPC(0x%x): %08x %08x %08x %08x %08x %08x %08x %08x\n", + qpn, *qpc, *(qpc + 1), *(qpc + 2), + *(qpc + 3), *(qpc + 4), *(qpc + 5), + *(qpc + 6), *(qpc + 7)); + qpc += 8; + } + pr_info("***************************************\n"); +} + +int rdfx_qpc_store_mbox_check(struct hns_roce_dev *hr_dev, + struct hns_roce_cmd_mailbox *mailbox, + u32 qpn, u64 bt0_ba, u64 bt1_ba) +{ + int ret; + struct hns_roce_v2_qp_context *qp_context; + + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, qpn, 0, + HNS_ROCE_CMD_READ_QPC_BT0, + HNS_ROCE_CMD_TIMEOUT_MSECS); + if (!ret) + memcpy(&bt0_ba, mailbox->buf, sizeof(bt0_ba)); + else { + dev_err(hr_dev->dev, "QUERY QP bt0 cmd process error\n"); + goto err_cmd; + } + + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, qpn, 0, + HNS_ROCE_CMD_READ_QPC_BT1, + HNS_ROCE_CMD_TIMEOUT_MSECS); + if (!ret) + memcpy(&bt1_ba, mailbox->buf, sizeof(bt1_ba)); + else { + dev_err(hr_dev->dev, "QUERY QP bt1 cmd process error\n"); + goto err_cmd; + } + + qp_context = kzalloc(sizeof(*qp_context), GFP_KERNEL); + if (ZERO_OR_NULL_PTR(qp_context)) { + pr_info("alloc mailbox mem for qpc failed\n"); + ret = -ENOMEM; + goto err_cmd; + } + + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, qpn, 0, + HNS_ROCE_CMD_QUERY_QPC, + HNS_ROCE_CMD_TIMEOUT_MSECS); + if (!ret) + memcpy(qp_context, mailbox->buf, sizeof(*qp_context)); + else { + dev_err(hr_dev->dev, "QUERY QP cmd process error\n"); + goto err_mailbox; + } + + rdfx_v2_qpc_store_print(qpn, bt0_ba, bt1_ba, qp_context); + +err_mailbox: + kfree(qp_context); +err_cmd: + hns_roce_free_cmd_mailbox(hr_dev, mailbox); + + return ret; +} + +static int rdfx_v2_qpc_store(const char *p_buf, struct rdfx_info *rdfx) +{ + struct hns_roce_cmd_mailbox *mailbox; + struct hns_roce_dev *hr_dev; + long long convert_val; + char *buf = (char *)p_buf; + char str[DEF_OPT_STR_LEN] = {0}; + u32 qpn = 0; + u64 bt0_ba = 0; + u64 bt1_ba = 0; + int ret; + + hr_dev = (struct hns_roce_dev *)rdfx->priv; + + parg_getopt(buf, "v:", str); + if (kstrtoll(str, 0, &convert_val)) { + pr_info("convert str failed\n"); + return -EINVAL; + } + qpn = (u32)convert_val; + + mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + ret = rdfx_qpc_store_mbox_check(hr_dev, mailbox, qpn, bt0_ba, bt1_ba); + + return ret; +} + +void rdfx_v2_cqc_store_print(u32 cqn, u64 bt0_ba, u64 bt1_ba, + struct hns_roce_v2_cq_context *cq_context) +{ + int i; + int *cqc; + + cqc = (int *)cq_context; + pr_info("************** CQC INFO ***************\n"); + pr_info("CQC(0x%x) BT0: 0x%llx\n", cqn, bt0_ba); + pr_info("CQC(0x%x) BT1: 0x%llx\n", cqn, bt1_ba); + for (i = 0; i < (sizeof(*cq_context) >> 2); i += 8) { + pr_info("CQC(0x%x): %08x %08x %08x %08x %08x %08x %08x %08x\n", + cqn, *cqc, *(cqc + 1), *(cqc + 2), + *(cqc + 3), *(cqc + 4), *(cqc + 5), + *(cqc + 6), *(cqc + 7)); + cqc += 8; + } +} + +int rdfx_cqc_store_mbox_check(struct hns_roce_dev *hr_dev, + struct hns_roce_cmd_mailbox *mailbox, + u32 cqn, u64 bt0_ba, u64 bt1_ba) +{ + int ret; + struct hns_roce_v2_cq_context *cq_context; + + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cqn, 0, + HNS_ROCE_CMD_READ_CQC_BT0, + HNS_ROCE_CMD_TIMEOUT_MSECS); + if (!ret) + memcpy(&bt0_ba, mailbox->buf, sizeof(bt0_ba)); + else { + dev_err(hr_dev->dev, "QUERY CQ bt0 cmd process error\n"); + goto err_cmd; + } + + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cqn, 0, + HNS_ROCE_CMD_READ_CQC_BT1, + HNS_ROCE_CMD_TIMEOUT_MSECS); + if (!ret) + memcpy(&bt1_ba, mailbox->buf, sizeof(bt1_ba)); + else { + dev_err(hr_dev->dev, "QUERY CQ bt1 cmd process error\n"); + goto err_cmd; + } + + cq_context = kzalloc(sizeof(*cq_context), GFP_KERNEL); + if (ZERO_OR_NULL_PTR(cq_context)) { + pr_info("alloc mailbox mem for cqc failed\n"); + ret = -ENOMEM; + goto err_cmd; + } + + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cqn, 0, + HNS_ROCE_CMD_QUERY_CQC, + HNS_ROCE_CMD_TIMEOUT_MSECS); + if (!ret) + memcpy(cq_context, mailbox->buf, sizeof(*cq_context)); + else { + dev_err(hr_dev->dev, "QUERY CQ cmd process error\n"); + goto err_mailbox; + } + + rdfx_v2_cqc_store_print(cqn, bt0_ba, bt1_ba, cq_context); + +err_mailbox: + kfree(cq_context); +err_cmd: + hns_roce_free_cmd_mailbox(hr_dev, mailbox); + + return ret; +} + +static int rdfx_v2_cqc_store(const char *p_buf, struct rdfx_info *rdfx) +{ + struct hns_roce_cmd_mailbox *mailbox; + struct hns_roce_dev *hr_dev; + long long convert_val; + char *buf = (char *)p_buf; + char str[DEF_OPT_STR_LEN] = {0}; + u64 bt0_ba = 0; + u64 bt1_ba = 0; + u32 cqn = 0; + int ret; + + hr_dev = (struct hns_roce_dev *)rdfx->priv; + + parg_getopt(buf, "v:", str); + if (kstrtoll(str, 0, &convert_val)) { + pr_info("convert str failed\n"); + return -EINVAL; + } + cqn = (u32)convert_val; + + mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + ret = rdfx_cqc_store_mbox_check(hr_dev, mailbox, cqn, bt0_ba, bt1_ba); + + return ret; +} + +void rdfx_v2_srqc_store_print(u32 srqn, u64 bt0_ba, u64 bt1_ba, + struct hns_roce_srq_context *srq_context) +{ + int i; + int *srqc = (int *)srq_context; + + pr_info("************** SRQC INFO ***************\n"); + pr_info("SRQC(0x%x) BT0: 0x%llx\n", srqn, bt0_ba); + pr_info("SRQC(0x%x) BT1: 0x%llx\n", srqn, bt1_ba); + for (i = 0; i < (sizeof(*srq_context) >> 2); i += 8) { + pr_info("SRQC(0x%x): %08x %08x %08x %08x %08x %08x %08x %08x\n", + srqn, *srqc, *(srqc + 1), *(srqc + 2), + *(srqc + 3), *(srqc + 4), *(srqc + 5), + *(srqc + 6), *(srqc + 7)); + srqc += 8; + } + pr_info("***************************************\n"); +} + +int rdfx_srqc_store_mbox_check(struct hns_roce_dev *hr_dev, + struct hns_roce_cmd_mailbox *mailbox, + u32 srqn, u64 bt0_ba, u64 bt1_ba) +{ + int ret; + struct hns_roce_srq_context *srq_context; + + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srqn, 0, + HNS_ROCE_CMD_READ_SRQC_BT0, + HNS_ROCE_CMD_TIMEOUT_MSECS); + if (!ret) + memcpy(&bt0_ba, mailbox->buf, sizeof(bt0_ba)); + else { + dev_err(hr_dev->dev, "QUERY SRQ bt0 cmd process error\n"); + goto err_cmd; + } + + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srqn, 0, + HNS_ROCE_CMD_READ_SRQC_BT1, + HNS_ROCE_CMD_TIMEOUT_MSECS); + if (!ret) + memcpy(&bt1_ba, mailbox->buf, sizeof(bt1_ba)); + else { + dev_err(hr_dev->dev, "QUERY SRQ bt1 cmd process error\n"); + goto err_cmd; + } + + srq_context = kzalloc(sizeof(*srq_context), GFP_KERNEL); + if (ZERO_OR_NULL_PTR(srq_context)) { + pr_info("alloc mailbox mem for srqc failed\n"); + ret = -ENOMEM; + goto err_cmd; + } + + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srqn, 0, + HNS_ROCE_CMD_QUERY_SRQC, + HNS_ROCE_CMD_TIMEOUT_MSECS); + if (!ret) + memcpy(srq_context, mailbox->buf, sizeof(*srq_context)); + else { + dev_err(hr_dev->dev, "QUERY SRQ cmd process error\n"); + goto err_mailbox; + } + + rdfx_v2_srqc_store_print(srqn, bt0_ba, bt1_ba, srq_context); + +err_mailbox: + kfree(srq_context); +err_cmd: + hns_roce_free_cmd_mailbox(hr_dev, mailbox); + + return ret; +} + +static int rdfx_v2_srqc_store(const char *p_buf, struct rdfx_info *rdfx) +{ + struct hns_roce_cmd_mailbox *mailbox; + struct hns_roce_dev *hr_dev; + long long convert_val; + char *buf = (char *)p_buf; + char str[DEF_OPT_STR_LEN] = {0}; + u64 bt0_ba = 0; + u64 bt1_ba = 0; + u32 srqn = 0; + int ret; + + hr_dev = (struct hns_roce_dev *)rdfx->priv; + + parg_getopt(buf, "v:", str); + if (kstrtoll(str, 0, &convert_val)) { + pr_info("convert str failed\n"); + return -EINVAL; + } + srqn = (u32)convert_val; + + mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + ret = rdfx_srqc_store_mbox_check(hr_dev, mailbox, srqn, bt0_ba, bt1_ba); + + return ret; +} + +void rdfx_v2_mpt_store_print(int key, u64 bt0_ba, u64 bt1_ba, + struct hns_roce_v2_mpt_entry *mpt_ctx) +{ + int i; + int *mpt = (int *)mpt_ctx; + + pr_info("************** MPT INFO ***************\n"); + pr_info("MPT(0x%x) BT0: 0x%llx\n", key, bt0_ba); + pr_info("MPT(0x%x) BT1: 0x%llx\n", key, bt1_ba); + for (i = 0; i < (sizeof(*mpt_ctx) >> 2); i += 8) { + pr_info("MPT(0x%x): %08x %08x %08x %08x %08x %08x %08x %08x\n", + key, *mpt, *(mpt + 1), *(mpt + 2), + *(mpt + 3), *(mpt + 4), *(mpt + 5), + *(mpt + 6), *(mpt + 7)); + mpt += 8; + } + pr_info("***************************************\n"); +} + +int rdfx_mpt_store_mbox_check(struct hns_roce_dev *hr_dev, + struct hns_roce_cmd_mailbox *mailbox, + int key, u64 bt0_ba, u64 bt1_ba) +{ + struct hns_roce_v2_mpt_entry *mpt_ctx; + int ret; + + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, key, 0, + HNS_ROCE_CMD_READ_MPT_BT0, + HNS_ROCE_CMD_TIMEOUT_MSECS); + if (!ret) + memcpy(&bt0_ba, mailbox->buf, sizeof(bt0_ba)); + else { + dev_err(hr_dev->dev, "QUERY MPT bt0 cmd process error\n"); + goto err_cmd; + } + + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, key, 0, + HNS_ROCE_CMD_READ_MPT_BT1, + HNS_ROCE_CMD_TIMEOUT_MSECS); + if (!ret) + memcpy(&bt1_ba, mailbox->buf, sizeof(bt1_ba)); + else { + dev_err(hr_dev->dev, "QUERY MPT bt1 cmd process error\n"); + goto err_cmd; + } + + mpt_ctx = kzalloc(sizeof(*mpt_ctx), GFP_KERNEL); + if (ZERO_OR_NULL_PTR(mpt_ctx)) { + pr_info("alloc mailbox mem for mpt failed\n"); + ret = -ENOMEM; + goto err_cmd; + } + + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, key, 0, + HNS_ROCE_CMD_QUERY_MPT, + HNS_ROCE_CMD_TIMEOUT_MSECS); + if (!ret) + memcpy(mpt_ctx, mailbox->buf, sizeof(*mpt_ctx)); + else { + dev_err(hr_dev->dev, "QUERY mtpt cmd process error\n"); + goto err_mailbox; + } + + rdfx_v2_mpt_store_print(key, bt0_ba, bt1_ba, mpt_ctx); + +err_mailbox: + kfree(mpt_ctx); +err_cmd: + hns_roce_free_cmd_mailbox(hr_dev, mailbox); + + return ret; +} + +static int rdfx_v2_mpt_store(const char *p_buf, struct rdfx_info *rdfx) +{ + struct hns_roce_cmd_mailbox *mailbox; + struct hns_roce_dev *hr_dev; + long long convert_val; + char *buf = (char *)p_buf; + char str[DEF_OPT_STR_LEN] = {0}; + u64 bt0_ba = 0; + u64 bt1_ba = 0; + int key; + int ret; + + hr_dev = (struct hns_roce_dev *)rdfx->priv; + + parg_getopt(buf, "v:", str); + if (kstrtoll(str, 0, &convert_val)) { + pr_info("convert str failed\n"); + return -EINVAL; + } + key = (int)convert_val; + + mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + ret = rdfx_mpt_store_mbox_check(hr_dev, mailbox, key, bt0_ba, bt1_ba); + + return ret; +} + +rdfx_hw_file_attr_def(ooo, rdfx_v2_ooo_show, NULL); +rdfx_hw_file_attr_def(err, rdfx_v2_err_show, NULL); +rdfx_hw_file_attr_def(cmd, rdfx_v2_cmd_show, NULL); + +rdfx_hw_file_attr_def(pkt, NULL, rdfx_v2_pkt_store); +rdfx_hw_file_attr_def(ceqc, NULL, rdfx_v2_ceqc_store); +rdfx_hw_file_attr_def(aeqc, NULL, rdfx_v2_aeqc_store); +rdfx_hw_file_attr_def(qpc, NULL, rdfx_v2_qpc_store); +rdfx_hw_file_attr_def(cqc, NULL, rdfx_v2_cqc_store); +rdfx_hw_file_attr_def(mpt, NULL, rdfx_v2_mpt_store); +rdfx_hw_file_attr_def(srqc, NULL, rdfx_v2_srqc_store); + +static struct attribute *rdfx_hw_v2_attrs_list[] = { + HW_ATTRS_LIST_MEMBER(ooo), + HW_ATTRS_LIST_MEMBER(err), + HW_ATTRS_LIST_MEMBER(pkt), + HW_ATTRS_LIST_MEMBER(cmd), + HW_ATTRS_LIST_MEMBER(ceqc), + HW_ATTRS_LIST_MEMBER(aeqc), + HW_ATTRS_LIST_MEMBER(qpc), + HW_ATTRS_LIST_MEMBER(cqc), + HW_ATTRS_LIST_MEMBER(mpt), + HW_ATTRS_LIST_MEMBER(srqc), + NULL +}; + +static const struct sysfs_ops rdfx_hw_v2_file_ops = { + .show = rdfx_v2_show, + .store = rdfx_v2_store, +}; + +static struct kobj_type rdfx_hw_v2_kobj_ktype = { + .release = NULL, + .sysfs_ops = &rdfx_hw_v2_file_ops, + .default_attrs = rdfx_hw_v2_attrs_list, +}; + +static ssize_t rdfx_v2_show(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + struct rdfx_hw_sys_attr *p_roce_sys_attr = + container_of(attr, struct rdfx_hw_sys_attr, attr); + struct rdfx_info *rdfx = container_of(kobj, struct rdfx_info, kobj); + int ret = 0; + + memset(buf, 0, SYSFS_PAGE_SIZE); + if (p_roce_sys_attr->pub_show) { + ret = p_roce_sys_attr->pub_show(rdfx); + if (ret) + return ret; + else + return strlen(buf); + } + + return -EPERM; +} + +static ssize_t rdfx_v2_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + struct rdfx_hw_sys_attr *p_roce_sys_attr = + container_of(attr, struct rdfx_hw_sys_attr, attr); + struct rdfx_info *rdfx = container_of(kobj, struct rdfx_info, kobj); + int ret = 0; + + if (p_roce_sys_attr->pub_store) { + ret = p_roce_sys_attr->pub_store((char *)buf, rdfx); + if (ret) + return ret; + else + return count; + } + + return -EPERM; +} + +static int rdfx_v2_add_sysfs(struct rdfx_info *rdfx) +{ + struct device *dev = rdfx->drv_dev; + int ret = 0; + + ret = kobject_init_and_add(&(rdfx->kobj), + &rdfx_hw_v2_kobj_ktype, + &(dev->kobj), + "%s", rdfx->dev.dev_name); + if (ret) { + pr_info("kobject_init_and_add failed!\r\n"); + return ret; + } + + return ret; +} + +static void rdfx_v2_del_sysfs(struct rdfx_info *rdfx) +{ + kobject_del(&(rdfx->kobj)); +} + +struct rdfx_ops rdfx_ops_hw_v2 = { + .add_sysfs = rdfx_v2_add_sysfs, + .del_sysfs = rdfx_v2_del_sysfs, + .print_sq_wqe = rdfx_v2_print_sq_wqe, + .print_rq_wqe = rdfx_v2_print_rq_wqe, + .get_dfx = rdfx_v2_get_dfx, +}; + diff --git a/drivers/infiniband/hw/hns/roce-customer/rdfx_hw_v2.h b/drivers/infiniband/hw/hns/roce-customer/rdfx_hw_v2.h new file mode 100644 index 0000000000000000000000000000000000000000..16950eae842f6a43ac8d8b5b2b11c22a5d8da7f2 --- /dev/null +++ b/drivers/infiniband/hw/hns/roce-customer/rdfx_hw_v2.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __RDFX_HW_V2__ +#define __RDFX_HW_V2__ + +#define CNT_SNAP_PARAM_DATA_0_CNT_CLR_CE_S 0 +#define CNT_SNAP_PARAM_DATA_0_SNAP_EN_S 1 + +struct rdfx_query_mbdb_cnt { + __le32 mailbox_issue_cnt; + __le32 mailbox_exe_cnt; + __le32 doorbell_issue_cnt; + __le32 doorbell_exe_cnt; + __le32 eq_doorbell_issue_cnt; + __le32 eq_doorbell_exe_cnt; +}; + +struct rdfx_query_mdb_dfx { + __le32 empty_info; + __le32 data_1; + __le32 rsv[2]; +}; + +#define QUERY_MDB_DFX_EMPTY_INFO_EQDB_EMPTY_S 0 + +#define QUERY_MDB_DFX_EMPTY_INFO_MB_EMPTY_S 1 +#define QUERY_MDB_DFX_EMPTY_INFO_MB_EMPTY_M \ + (((1UL << 6) - 1) << QUERY_MDB_DFX_EMPTY_INFO_MB_EMPTY_S) + +#define QUERY_MDB_DFX_EMPTY_INFO_DB_EMPTY_S 7 +#define QUERY_MDB_DFX_EMPTY_INFO_DB_EMPTY_M \ + (((1UL << 4) - 1) << QUERY_MDB_DFX_EMPTY_INFO_DB_EMPTY_S) + +#define QUERY_MDB_DFX_DATA_1_EQDB_FULL_S 0 + +#define QUERY_MDB_DFX_DATA_1_MB_FULL_S 1 +#define QUERY_MDB_DFX_DATA_1_MB_FULL_M \ + (((1UL << 6) - 1) << QUERY_MDB_DFX_DATA_1_MB_FULL_S) + +#define QUERY_MDB_DFX_DATA_1_DB_FULL_S 7 +#define QUERY_MDB_DFX_DATA_1_DB_FULL_M \ + (((1UL << 4) - 1) << QUERY_MDB_DFX_DATA_1_DB_FULL_S) + +#define QUERY_MDB_DFX_DATA_1_DB_CMD_ERR_S 11 +#define QUERY_MDB_DFX_DATA_1_MB_CMD_ERR_S 12 + +#endif diff --git a/drivers/infiniband/hw/hns/roce-customer/rdfx_intf.c b/drivers/infiniband/hw/hns/roce-customer/rdfx_intf.c new file mode 100644 index 0000000000000000000000000000000000000000..d658621d93a2284cdfc74990c0f45944710e6a17 --- /dev/null +++ b/drivers/infiniband/hw/hns/roce-customer/rdfx_intf.c @@ -0,0 +1,927 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. +#ifdef CONFIG_INFINIBAND_HNS_DFX +#include "roce_k_compat.h" +#include "hns_roce_device.h" +#include "hns_roce_common.h" +#include "hns_roce_cmd.h" +#include "hnae3.h" +#include "hns_roce_hw_v2.h" +#include "rdfx_common.h" + +unsigned int hr_fc_print; +module_param(hr_fc_print, uint, 0644); +MODULE_PARM_DESC(hr_fc_print, "enable function call print"); + +struct rdfx_qp_info *rdfx_find_rdfx_qp(struct rdfx_info *rdfx, + unsigned long qpn); +static struct rdfx_ceq_info *rdfx_find_rdfx_ceq(struct rdfx_info *rdfx, + int ceqn); +struct rdfx_cq_info *rdfx_find_rdfx_cq(struct rdfx_info *rdfx, + unsigned long cqn); + +static void rdfx_v2_free_cqe_dma_buf(struct rdfx_cq_info *rdfx_cq) +{ + struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)rdfx_cq->priv; + + hns_roce_buf_free(hr_dev, (struct hns_roce_buf *)rdfx_cq->buf); + rdfx_cq->buf = NULL; +} + +static void rdfx_v2_free_wqe_dma_buf(struct rdfx_qp_info *rdfx_qp) +{ + struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)rdfx_qp->priv; + + hns_roce_buf_free(hr_dev, (struct hns_roce_buf *)rdfx_qp->buf); + rdfx_qp->buf = NULL; +} + +void qp_release(struct kref *ref) +{ + struct rdfx_qp_info *rdfx_qp = + container_of(ref, struct rdfx_qp_info, cnt); + rdfx_v2_free_wqe_dma_buf(rdfx_qp); + kfree(rdfx_qp); +} +EXPORT_SYMBOL_GPL(qp_release); + +void cq_release(struct kref *ref) +{ + struct rdfx_cq_info *rdfx_cq = + container_of(ref, struct rdfx_cq_info, cnt); + rdfx_v2_free_cqe_dma_buf(rdfx_cq); + kfree(rdfx_cq); +} +EXPORT_SYMBOL_GPL(cq_release); + +static void ceq_release(struct kref *ref) +{ + struct rdfx_ceq_info *rdfx_ceq = + container_of(ref, struct rdfx_ceq_info, cnt); + + kfree(rdfx_ceq); +} + +static void pd_release(struct kref *ref) +{ + struct rdfx_pd_info *rdfx_pd = + container_of(ref, struct rdfx_pd_info, cnt); + + kfree(rdfx_pd); +} + +static void mr_release(struct kref *ref) +{ + struct rdfx_mr_info *rdfx_mr = + container_of(ref, struct rdfx_mr_info, cnt); + + kfree(rdfx_mr); +} + +int alloc_rdfx_info(struct hns_roce_dev *hr_dev) +{ + struct rdfx_info *rdfx; + + rdfx = kzalloc(sizeof(*rdfx), GFP_KERNEL); + if (ZERO_OR_NULL_PTR(rdfx)) + return -ENOMEM; + + hr_dev->dfx_priv = rdfx; + rdfx->priv = hr_dev; + + INIT_LIST_HEAD(&rdfx->pd.list); + INIT_LIST_HEAD(&rdfx->qp.list); + INIT_LIST_HEAD(&rdfx->cq.list); + INIT_LIST_HEAD(&rdfx->mr.list); + INIT_LIST_HEAD(&rdfx->eq.ceq_list); + INIT_LIST_HEAD(&rdfx->eq.aeq_list); + + spin_lock_init(&rdfx->pd.pd_lock); + spin_lock_init(&rdfx->qp.qp_lock); + spin_lock_init(&rdfx->cq.cq_lock); + spin_lock_init(&rdfx->mr.mr_lock); + spin_lock_init(&rdfx->eq.eq_lock); + + return 0; +} + +void rdfx_set_dev_name(struct hns_roce_dev *hr_dev) +{ + struct rdfx_info *rdfx = (struct rdfx_info *)hr_dev->dfx_priv; + + if (!hr_dev->is_reset) { + strlcpy(rdfx->dev.dev_name, hr_dev->ib_dev.name, + IB_DEVICE_NAME_MAX); + dev_info(hr_dev->dev, "config dfx dev name - %s\n", + rdfx->dev.dev_name); + } +} + +/*unregister struct rdfx_info*/ +static void rdfx_clean_list(struct rdfx_info *rdfx) +{ + struct rdfx_qp_info *rdfx_qp; + struct rdfx_cq_info *rdfx_cq; + struct rdfx_mr_info *rdfx_mr; + struct rdfx_pd_info *rdfx_pd; + struct rdfx_ceq_info *rdfx_ceq; + struct list_head *pos; + struct list_head *q; + + if (!list_empty(&rdfx->qp.list)) { + list_for_each_safe(pos, q, &rdfx->qp.list) { + rdfx_qp = list_entry(pos, struct rdfx_qp_info, list); + list_del(pos); + kref_put(&(rdfx_qp->cnt), qp_release); + } + } + + if (!list_empty(&rdfx->cq.list)) { + list_for_each_safe(pos, q, &rdfx->cq.list) { + rdfx_cq = list_entry(pos, struct rdfx_cq_info, list); + list_del(pos); + kref_put(&(rdfx_cq->cnt), cq_release); + } + } + + if (!list_empty(&rdfx->mr.list)) { + list_for_each_safe(pos, q, &rdfx->mr.list) { + rdfx_mr = list_entry(pos, struct rdfx_mr_info, list); + list_del(pos); + kref_put(&(rdfx_mr->cnt), mr_release); + } + } + + if (!list_empty(&rdfx->pd.list)) { + list_for_each_safe(pos, q, &rdfx->pd.list) { + rdfx_pd = list_entry(pos, struct rdfx_pd_info, list); + list_del(pos); + kref_put(&(rdfx_pd->cnt), pd_release); + } + } + if (!list_empty(&rdfx->eq.ceq_list)) { + list_for_each_safe(pos, q, &rdfx->eq.ceq_list) { + rdfx_ceq = list_entry(pos, struct rdfx_ceq_info, list); + list_del(pos); + kref_put(&(rdfx_ceq->cnt), ceq_release); + } + } +} + +void free_rdfx_info(struct hns_roce_dev *hr_dev) +{ + struct rdfx_info *rdfx = (struct rdfx_info *)hr_dev->dfx_priv; + + if (!rdfx) + return; + rdfx_clean_list(rdfx); + kfree(rdfx); + hr_dev->dfx_priv = NULL; +} + +const char *rdfx_func_name[RDFX_FUNC_MAX] = { + "modify_dev", + "query_dev", + "query_port", + "modify_port", + "get_link_lyr", + "get_netdev", + "query_gid", + "add_gid", + "del_gid", + "query_pkey", + "alloc_uctx", + "dealloc_uctx", + "hr_mmap", + "alloc_pd", + "dealloc_pd", + "create_ah", + "query_ah", + "destroy_ah", + "create_qp", + "modify_qp", + "query_qp", + "destroy_qp", + "post_send", + "post_recv", + "create_cq", + "modify_cq", + "destroy_cq", + "notify_cq", + "poll_cq", + "resize_cq", + "get_dma_mr", + "reg_user_mr", + "rereg_mr", + "dereg_mr", + "port_immutbl", + "reg_umm_mr", + "dereg_umm_mr", +}; +EXPORT_SYMBOL(rdfx_func_name); + +void rdfx_func_cnt(struct hns_roce_dev *hr_dev, int func) +{ + struct rdfx_info *rdfx = (struct rdfx_info *)hr_dev->dfx_priv; + + atomic_inc(&rdfx->dev.fc[func]); + + if (hr_fc_print && rdfx_func_name[func]) + pr_info("%s has been called %d times!", + rdfx_func_name[func], atomic_read(&rdfx->dev.fc[func])); +} +EXPORT_SYMBOL_GPL(rdfx_func_cnt); + +inline void rdfx_inc_dealloc_qp_cnt(struct hns_roce_dev *hr_dev) +{ + struct rdfx_info *rdfx = (struct rdfx_info *)hr_dev->dfx_priv; + + atomic_inc(&rdfx->qp.dealloc_qp_cnt); +} +EXPORT_SYMBOL_GPL(rdfx_inc_dealloc_qp_cnt); + +struct rdfx_cq_info *rdfx_get_rdfx_cq(struct hns_roce_dev *hr_dev, + unsigned long cqn); + +void rdfx_inc_arm_cq_cnt(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, + enum ib_cq_notify_flags flags) +{ + struct rdfx_cq_info *rdfx_cq; + + rdfx_cq = rdfx_get_rdfx_cq(hr_dev, hr_cq->cqn); + if (!rdfx_cq) { + dev_err(hr_dev->dev, "get cq 0x%lx failed while inc arm cq cnt\n", + hr_cq->cqn); + return; + } + + if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED) + atomic_inc(&(rdfx_cq->arm_cnt[0])); + else + atomic_inc(&(rdfx_cq->arm_cnt[1])); + + kref_put(&(rdfx_cq->cnt), cq_release); +} +EXPORT_SYMBOL_GPL(rdfx_inc_arm_cq_cnt); + +inline void rdfx_inc_dereg_mr_cnt(struct hns_roce_dev *hr_dev) +{ + struct rdfx_info *rdfx = (struct rdfx_info *)hr_dev->dfx_priv; + + atomic_inc(&rdfx->mr.dealloc_mr_cnt); +} + +inline void rdfx_inc_dealloc_cq_cnt(struct hns_roce_dev *hr_dev) +{ + struct rdfx_info *rdfx = (struct rdfx_info *)hr_dev->dfx_priv; + + atomic_inc(&rdfx->cq.dealloc_cq_cnt); +} + +void rdfx_inc_sq_db_cnt(struct hns_roce_dev *hr_dev, u32 qpn) +{ + struct rdfx_info *rdfx = (struct rdfx_info *)hr_dev->dfx_priv; + struct rdfx_qp_info *rdfx_qp; + + spin_lock(&(rdfx->qp.qp_lock)); + + rdfx_qp = rdfx_find_rdfx_qp(rdfx, qpn); + if (!rdfx_qp) { + dev_err(hr_dev->dev, "find qp 0x%x failed while inc sq db cnt!\n", + qpn); + spin_unlock(&(rdfx->qp.qp_lock)); + return; + } + atomic_inc(&rdfx_qp->sq.db_cnt); + + spin_unlock(&(rdfx->qp.qp_lock)); +} +EXPORT_SYMBOL_GPL(rdfx_inc_sq_db_cnt); + +void rdfx_inc_rq_db_cnt(struct hns_roce_dev *hr_dev, u32 qpn) +{ + struct rdfx_info *rdfx = (struct rdfx_info *)hr_dev->dfx_priv; + struct rdfx_qp_info *rdfx_qp; + + spin_lock(&(rdfx->qp.qp_lock)); + + rdfx_qp = rdfx_find_rdfx_qp(rdfx, qpn); + if (!rdfx_qp) { + dev_err(hr_dev->dev, "find qp 0x%x failed while inc rq db cnt!\n", + qpn); + spin_unlock(&(rdfx->qp.qp_lock)); + return; + } + atomic_inc(&rdfx_qp->rq.db_cnt); + + spin_unlock(&(rdfx->qp.qp_lock)); +} +EXPORT_SYMBOL_GPL(rdfx_inc_rq_db_cnt); + +void rdfx_inc_ceqe_cnt(struct hns_roce_dev *hr_dev, int ceqn) +{ + struct rdfx_info *rdfx = (struct rdfx_info *)hr_dev->dfx_priv; + struct rdfx_ceq_info *rdfx_ceq; + unsigned long flags; + + spin_lock_irqsave(&rdfx->eq.eq_lock, flags); + + rdfx_ceq = rdfx_find_rdfx_ceq(rdfx, ceqn); + if (!rdfx_ceq) { + dev_err(hr_dev->dev, "find ceq 0x%x failed while inc ceqe cnt!\n", + ceqn); + spin_unlock_irqrestore(&rdfx->eq.eq_lock, flags); + return; + } + atomic_inc(&rdfx_ceq->ceqe_cnt); + + spin_unlock_irqrestore(&rdfx->eq.eq_lock, flags); +} +EXPORT_SYMBOL_GPL(rdfx_inc_ceqe_cnt); + +/* This function should be called while get rdfx->qp.qp_lock */ +struct rdfx_qp_info *rdfx_find_rdfx_qp(struct rdfx_info *rdfx, + unsigned long qpn) +{ + struct rdfx_qp_info *rdfx_qp; + struct list_head *pos; + struct list_head *q; + u32 is_existed = 0; + + list_for_each_safe(pos, q, &(rdfx->qp.list)) { + rdfx_qp = list_entry(pos, struct rdfx_qp_info, list); + if (qpn == rdfx_qp->qpn) { + is_existed = 1; + break; + } + } + + if (!is_existed) + return NULL; + + return rdfx_qp; +} + +struct rdfx_qp_info *rdfx_get_rdfx_qp(struct hns_roce_dev *hr_dev, + unsigned long qpn) +{ + struct rdfx_info *rdfx = (struct rdfx_info *)hr_dev->dfx_priv; + struct rdfx_qp_info *rdfx_qp; + + spin_lock(&(rdfx->qp.qp_lock)); + + rdfx_qp = rdfx_find_rdfx_qp(rdfx, qpn); + if (!rdfx_qp) { + dev_err(hr_dev->dev, + "find qp 0x%lx failed while get rdfx qp!\n", qpn); + spin_unlock(&(rdfx->qp.qp_lock)); + return NULL; + } + + kref_get(&(rdfx_qp->cnt)); + + spin_unlock(&(rdfx->qp.qp_lock)); + + return rdfx_qp; +} +EXPORT_SYMBOL_GPL(rdfx_get_rdfx_qp); + +void rdfx_put_rdfx_qp(struct hns_roce_dev *hr_dev, unsigned long qpn) +{ + struct rdfx_info *rdfx = (struct rdfx_info *)hr_dev->dfx_priv; + struct rdfx_qp_info *rdfx_qp; + unsigned long flags; + + spin_lock_irqsave(&rdfx->qp.qp_lock, flags); + + rdfx_qp = rdfx_find_rdfx_qp(rdfx, qpn); + if (!rdfx_qp) { + dev_err(hr_dev->dev, "find qp 0x%lx failed while put rdfx qp!\n", + qpn); + spin_unlock_irqrestore(&rdfx->qp.qp_lock, flags); + return; + } + + spin_unlock_irqrestore(&rdfx->qp.qp_lock, flags); + + kref_put(&(rdfx_qp->cnt), qp_release); +} +EXPORT_SYMBOL_GPL(rdfx_put_rdfx_qp); + +#ifndef CONFIG_INFINIBAND_HNS_DFX_ENHANCE +void rdfx_release_rdfx_qp(struct hns_roce_dev *hr_dev, unsigned long qpn) +{ + struct rdfx_info *rdfx = (struct rdfx_info *)hr_dev->dfx_priv; + struct rdfx_qp_info *rdfx_qp; + unsigned long flags; + + spin_lock_irqsave(&rdfx->qp.qp_lock, flags); + + rdfx_qp = rdfx_find_rdfx_qp(rdfx, qpn); + if (!rdfx_qp) { + dev_err(hr_dev->dev, "find qp 0x%lx failed while release rdfx qp!\n", + qpn); + spin_unlock_irqrestore(&rdfx->qp.qp_lock, flags); + return; + } + list_del(&(rdfx_qp->list)); + + spin_unlock_irqrestore(&rdfx->qp.qp_lock, flags); + + kref_put(&(rdfx_qp->cnt), qp_release); +} +EXPORT_SYMBOL_GPL(rdfx_release_rdfx_qp); +#endif + +/* This function should be called while get rdfx->cq.cq_lock */ +struct rdfx_cq_info *rdfx_find_rdfx_cq(struct rdfx_info *rdfx, + unsigned long cqn) +{ + struct rdfx_cq_info *rdfx_cq; + struct list_head *pos; + struct list_head *q; + u32 is_existed = 0; + + list_for_each_safe(pos, q, &(rdfx->cq.list)) { + rdfx_cq = list_entry(pos, struct rdfx_cq_info, list); + if (cqn == rdfx_cq->cqn) { + is_existed = 1; + break; + } + } + + if (!is_existed) + return NULL; + + return rdfx_cq; +} + +struct rdfx_cq_info *rdfx_get_rdfx_cq(struct hns_roce_dev *hr_dev, + unsigned long cqn) +{ + struct rdfx_info *rdfx = (struct rdfx_info *)hr_dev->dfx_priv; + struct rdfx_cq_info *rdfx_cq; + + spin_lock(&rdfx->cq.cq_lock); + + rdfx_cq = rdfx_find_rdfx_cq(rdfx, cqn); + if (!rdfx_cq) { + dev_err(hr_dev->dev, "find cqn %lu failed while get rdfx cq!\n", + cqn); + spin_unlock(&rdfx->cq.cq_lock); + return NULL; + } + kref_get(&(rdfx_cq->cnt)); + + spin_unlock(&rdfx->cq.cq_lock); + + return rdfx_cq; +} +EXPORT_SYMBOL_GPL(rdfx_get_rdfx_cq); + +void rdfx_put_rdfx_cq(struct hns_roce_dev *hr_dev, unsigned long cqn) +{ + struct rdfx_info *rdfx = (struct rdfx_info *)hr_dev->dfx_priv; + struct rdfx_cq_info *rdfx_cq; + unsigned long flags; + + spin_lock_irqsave(&rdfx->cq.cq_lock, flags); + + rdfx_cq = rdfx_find_rdfx_cq(rdfx, cqn); + if (!rdfx_cq) { + dev_err(hr_dev->dev, "find cq %lu failed while get rdfx cq!\n", + cqn); + spin_unlock_irqrestore(&rdfx->cq.cq_lock, flags); + return; + } + + spin_unlock_irqrestore(&rdfx->cq.cq_lock, flags); + + kref_put(&(rdfx_cq->cnt), cq_release); +} +EXPORT_SYMBOL_GPL(rdfx_put_rdfx_cq); + +void rdfx_release_rdfx_cq(struct hns_roce_dev *hr_dev, unsigned long cqn) +{ + struct rdfx_info *rdfx = (struct rdfx_info *)hr_dev->dfx_priv; + struct rdfx_cq_info *rdfx_cq; + unsigned long flags; + + spin_lock_irqsave(&rdfx->cq.cq_lock, flags); + + rdfx_cq = rdfx_find_rdfx_cq(rdfx, cqn); + if (!rdfx_cq) { + dev_err(hr_dev->dev, "find cqn %lu failed while get rdfx cq!\n", + cqn); + spin_unlock_irqrestore(&rdfx->cq.cq_lock, flags); + return; + } + + list_del(&(rdfx_cq->list)); + + spin_unlock_irqrestore(&rdfx->cq.cq_lock, flags); + + kref_put(&(rdfx_cq->cnt), cq_release); +} + +/* This function should be called while get rdfx->cq.cq_lock */ +static struct rdfx_ceq_info *rdfx_find_rdfx_ceq(struct rdfx_info *rdfx, + int ceqn) +{ + struct rdfx_ceq_info *rdfx_ceq; + struct list_head *pos; + struct list_head *q; + u32 is_existed = 0; + + list_for_each_safe(pos, q, &(rdfx->eq.ceq_list)) { + rdfx_ceq = list_entry(pos, struct rdfx_ceq_info, list); + if (ceqn == rdfx_ceq->ceqn) { + is_existed = 1; + break; + } + } + + if (!is_existed) + return NULL; + + return rdfx_ceq; +} + +struct rdfx_ceq_info *rdfx_get_rdfx_ceq(struct hns_roce_dev *hr_dev, + unsigned long ceqn) +{ + struct rdfx_info *rdfx = (struct rdfx_info *)hr_dev->dfx_priv; + struct rdfx_ceq_info *rdfx_ceq; + + spin_lock(&rdfx->eq.eq_lock); + + rdfx_ceq = rdfx_find_rdfx_ceq(rdfx, ceqn); + if (!rdfx_ceq) { + dev_err(hr_dev->dev, "find ceqn %lu failed while get rdfx ceq!\n", + ceqn); + spin_unlock(&rdfx->eq.eq_lock); + return NULL; + } + kref_get(&(rdfx_ceq->cnt)); + + spin_unlock(&rdfx->eq.eq_lock); + + return rdfx_ceq; +} +EXPORT_SYMBOL_GPL(rdfx_get_rdfx_ceq); + +void rdfx_put_rdfx_ceq(struct hns_roce_dev *hr_dev, unsigned long ceqn) +{ + struct rdfx_info *rdfx = (struct rdfx_info *)hr_dev->dfx_priv; + struct rdfx_ceq_info *rdfx_ceq; + unsigned long flags; + + spin_lock_irqsave(&rdfx->eq.eq_lock, flags); + + rdfx_ceq = rdfx_find_rdfx_ceq(rdfx, ceqn); + if (!rdfx_ceq) { + dev_err(hr_dev->dev, "find ceq %lu failed while get rdfx ceq!\n", + ceqn); + spin_unlock_irqrestore(&rdfx->eq.eq_lock, flags); + return; + } + + spin_unlock_irqrestore(&rdfx->eq.eq_lock, flags); + + kref_put(&(rdfx_ceq->cnt), ceq_release); +} +EXPORT_SYMBOL_GPL(rdfx_put_rdfx_ceq); + +void rdfx_release_rdfx_ceq(struct hns_roce_dev *hr_dev, unsigned long ceqn) +{ + struct rdfx_info *rdfx = (struct rdfx_info *)hr_dev->dfx_priv; + struct rdfx_ceq_info *rdfx_ceq; + unsigned long flags; + + spin_lock_irqsave(&rdfx->eq.eq_lock, flags); + + rdfx_ceq = rdfx_find_rdfx_ceq(rdfx, ceqn); + if (!rdfx_ceq) { + dev_err(hr_dev->dev, "find ceq %lu failed while release rdfx ceq!\n", + ceqn); + spin_unlock_irqrestore(&rdfx->eq.eq_lock, flags); + return; + } + list_del(&(rdfx_ceq->list)); + + spin_unlock_irqrestore(&rdfx->eq.eq_lock, flags); + + kref_put(&(rdfx_ceq->cnt), ceq_release); +} +EXPORT_SYMBOL_GPL(rdfx_release_rdfx_ceq); + +void rdfx_alloc_rdfx_ceq(struct hns_roce_dev *hr_dev, unsigned long ceqn, + unsigned int eq_cmd) +{ + struct rdfx_info *rdfx = (struct rdfx_info *)hr_dev->dfx_priv; + struct rdfx_ceq_info *rdfx_ceq; + unsigned long flags; + + if (eq_cmd == HNS_ROCE_CMD_CREATE_CEQC) { + rdfx_ceq = kzalloc(sizeof(struct rdfx_ceq_info), GFP_KERNEL); + if (ZERO_OR_NULL_PTR(rdfx_ceq)) + return; + + rdfx_ceq->ceqn = ceqn; + kref_init(&(rdfx_ceq->cnt)); + + spin_lock_irqsave(&rdfx->eq.eq_lock, flags); + list_add_tail(&rdfx_ceq->list, &rdfx->eq.ceq_list); + spin_unlock_irqrestore(&rdfx->eq.eq_lock, flags); + } +} +EXPORT_SYMBOL_GPL(rdfx_alloc_rdfx_ceq); + +void rdfx_alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) +{ + struct rdfx_info *rdfx = (struct rdfx_info *)hr_dev->dfx_priv; + struct hns_roce_buf *dfx_cq_buf; + struct rdfx_cq_info *rdfx_cq; + unsigned long flags; + u32 page_shift; + int cq_entries; + + cq_entries = hr_cq->cq_depth; + + page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz; + dfx_cq_buf = hns_roce_buf_alloc(hr_dev, + cq_entries * hr_dev->caps.cq_entry_sz, + page_shift, 0); + if (IS_ERR(dfx_cq_buf)) { + dev_err(hr_dev->dev, "hns_roce_dfx_buf_alloc error!\n"); + return; + } + +#ifdef CONFIG_INFINIBAND_HNS_DFX_ENHANCE + rdfx_put_rdfx_cq(hr_dev, hr_cq->cqn); +#endif + + rdfx_cq = kzalloc(sizeof(*rdfx_cq), GFP_KERNEL); + if (ZERO_OR_NULL_PTR(rdfx_cq)) + goto err_buf; + + rdfx_cq->buf = dfx_cq_buf; + rdfx_cq->cq_depth = cq_entries; + rdfx_cq->cqn = hr_cq->cqn; + rdfx_cq->cq = &hr_cq->ib_cq; + rdfx_cq->priv = hr_dev; + + atomic_inc(&rdfx->cq.alloc_cq_cnt); + if (hr_cq->cqn > rdfx->cq.top_cq_index.counter) + atomic_set(&rdfx->cq.top_cq_index, (int)hr_cq->cqn); + kref_init(&(rdfx_cq->cnt)); + + spin_lock_irqsave(&rdfx->cq.cq_lock, flags); + list_add_tail(&rdfx_cq->list, &rdfx->cq.list); + spin_unlock_irqrestore(&rdfx->cq.cq_lock, flags); + + return; + +err_buf: + hns_roce_buf_free(hr_dev, dfx_cq_buf); +} + +void rdfx_free_cq_buff(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) +{ +#ifndef CONFIG_INFINIBAND_HNS_DFX_ENHANCE + rdfx_release_rdfx_cq(hr_dev, hr_cq->cqn); +#endif +} + +void rdfx_alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) +{ + struct rdfx_info *rdfx = (struct rdfx_info *)hr_dev->dfx_priv; + struct hns_roce_buf *dfx_qp_buf; + struct rdfx_qp_info *rdfx_qp; + u32 page_shift = 0; + unsigned long flags; + + page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz; + dfx_qp_buf = hns_roce_buf_alloc(hr_dev, hr_qp->buff_size, page_shift, + 0); + if (IS_ERR(dfx_qp_buf)) { + dev_err(hr_dev->dev, "alloc dfx qp 0x%lx buff failed!\n", + hr_qp->qpn); + return; + } + + rdfx_qp = kzalloc(sizeof(*rdfx_qp), GFP_KERNEL); + if (ZERO_OR_NULL_PTR(rdfx_qp)) { + hns_roce_buf_free(hr_dev, dfx_qp_buf); + return; + } + + rdfx_qp->buf = dfx_qp_buf; + rdfx_qp->priv = hr_dev; + rdfx_qp->buf_size = hr_qp->buff_size; + rdfx_qp->sq.sq_depth = hr_qp->sq.wqe_cnt; + rdfx_qp->rq.rq_depth = hr_qp->rq.wqe_cnt; + rdfx_qp->sq.offset = hr_qp->sq.offset; + rdfx_qp->rq.offset = hr_qp->rq.offset; + rdfx_qp->sq.sq_wqe_size = hr_qp->sq.wqe_shift; + rdfx_qp->rq.rq_wqe_size = hr_qp->rq.wqe_shift; + rdfx_qp->qp = &hr_qp->ibqp; + rdfx_qp->qpn = hr_qp->ibqp.qp_num; + + kref_init(&(rdfx_qp->cnt)); + atomic_inc(&rdfx->qp.alloc_qp_cnt); + if (hr_qp->ibqp.qp_num > rdfx->qp.top_qp_index.counter) + atomic_set(&rdfx->qp.top_qp_index, (int)hr_qp->ibqp.qp_num); + + spin_lock_irqsave(&rdfx->qp.qp_lock, flags); + list_add_tail(&rdfx_qp->list, &rdfx->qp.list); + spin_unlock_irqrestore(&rdfx->qp.qp_lock, flags); +} + +void rdfx_set_qp_attr(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, + const struct ib_qp_attr *attr, int attr_mask, + enum ib_qp_state new_state) +{ + struct rdfx_qp_info *rdfx_qp; + + if (attr_mask & IB_QP_ACCESS_FLAGS) { + rdfx_qp = rdfx_get_rdfx_qp(hr_dev, hr_qp->qpn); + if (!rdfx_qp) { + dev_err(hr_dev->dev, "get rdfx qp 0x%lx failed while set qp!\n", + hr_qp->qpn); + return; + } + + rdfx_qp->attr.read_en = + attr->qp_access_flags & IB_ACCESS_REMOTE_READ; + rdfx_qp->attr.write_en = + attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE; + rdfx_qp->attr.atomic_en = + attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC; + atomic_set(&rdfx_qp->attr.state, (int)new_state); + rdfx_qp->attr.max_ird = attr->max_rd_atomic; + rdfx_qp->attr.max_ord = attr->max_dest_rd_atomic; + rdfx_qp->attr.max_sge[0] = attr->cap.max_send_sge; + rdfx_qp->attr.max_sge[1] = attr->cap.max_recv_sge; + rdfx_qp->attr.pd_id = to_hr_pd(hr_qp->ibqp.pd)->pdn; + + kref_put(&(rdfx_qp->cnt), qp_release); + } +} +EXPORT_SYMBOL_GPL(rdfx_set_qp_attr); + +void rdfx_alloc_rdfx_mr(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr) +{ + struct rdfx_info *rdfx = (struct rdfx_info *)hr_dev->dfx_priv; + struct rdfx_mr_info *rdfx_mr; + unsigned long flags; + + rdfx_mr = kzalloc(sizeof(*rdfx_mr), GFP_KERNEL); + if (ZERO_OR_NULL_PTR(rdfx_mr)) + return; + + rdfx_mr->pd = mr->pd; + rdfx_mr->mr = &mr->ibmr; + kref_init(&(rdfx_mr->cnt)); + atomic_inc(&rdfx->mr.alloc_mr_cnt); + + spin_lock_irqsave(&rdfx->mr.mr_lock, flags); + list_add_tail(&rdfx_mr->list, &rdfx->mr.list); + spin_unlock_irqrestore(&rdfx->mr.mr_lock, flags); +} + +/* This function should be called while get rdfx->mr.mr_lock */ +static struct rdfx_mr_info *rdfx_find_rdfx_mr(struct rdfx_info *rdfx, + unsigned long key) +{ + struct rdfx_mr_info *rdfx_mr; + struct list_head *pos; + struct list_head *q; + u32 is_existed = 0; + + list_for_each_safe(pos, q, &(rdfx->mr.list)) { + rdfx_mr = list_entry(pos, struct rdfx_mr_info, list); + if (key == rdfx_mr->mr->lkey) { + is_existed = 1; + break; + } + } + + if (!is_existed) + return NULL; + + return rdfx_mr; +} + +void rdfx_release_rdfx_mr(struct hns_roce_dev *hr_dev, unsigned long key) +{ + struct rdfx_info *rdfx = (struct rdfx_info *)hr_dev->dfx_priv; + struct rdfx_mr_info *rdfx_mr; + unsigned long flags; + + spin_lock_irqsave(&rdfx->mr.mr_lock, flags); + + rdfx_mr = rdfx_find_rdfx_mr(rdfx, key); + if (!rdfx_mr) { + dev_err(hr_dev->dev, "find mr 0x%lx failed while release rdfx mr!\n", + key); + spin_unlock_irqrestore(&rdfx->mr.mr_lock, flags); + return; + } + list_del(&(rdfx_mr->list)); + + spin_unlock_irqrestore(&rdfx->mr.mr_lock, flags); + + kref_put(&(rdfx_mr->cnt), mr_release); +} + +void rdfx_alloc_rdfx_pd(struct hns_roce_dev *hr_dev, struct hns_roce_pd *pd) +{ + struct rdfx_info *rdfx = (struct rdfx_info *)hr_dev->dfx_priv; + struct rdfx_pd_info *rdfx_pd; + unsigned long flags; + + rdfx_pd = kzalloc(sizeof(struct rdfx_pd_info), GFP_KERNEL); + if (ZERO_OR_NULL_PTR(rdfx_pd)) + return; + + rdfx_pd->pdn = pd->pdn; + rdfx_pd->pd = &pd->ibpd; + kref_init(&(rdfx_pd->cnt)); + atomic_inc(&rdfx->pd.alloc_pd_cnt); + + if (pd->pdn > rdfx->pd.top_pd_index.counter) + atomic_set(&rdfx->pd.top_pd_index, (int)pd->pdn); + + spin_lock_irqsave(&rdfx->pd.pd_lock, flags); + list_add_tail(&rdfx_pd->list, &rdfx->pd.list); + spin_unlock_irqrestore(&rdfx->pd.pd_lock, flags); +} + +static struct rdfx_pd_info *rdfx_find_rdfx_pd(struct hns_roce_dev *hr_dev, + unsigned long pdn) +{ + struct rdfx_info *rdfx = (struct rdfx_info *)hr_dev->dfx_priv; + struct rdfx_pd_info *rdfx_pd; + struct list_head *pos; + struct list_head *q; + u32 is_existed = 0; + + list_for_each_safe(pos, q, &(rdfx->pd.list)) { + rdfx_pd = list_entry(pos, struct rdfx_pd_info, list); + if (pdn == rdfx_pd->pdn) { + is_existed = 1; + break; + } + } + + if (!is_existed) + return NULL; + + return rdfx_pd; +} + +void rdfx_release_rdfx_pd(struct hns_roce_dev *hr_dev, unsigned long pdn) +{ + struct rdfx_info *rdfx = (struct rdfx_info *)hr_dev->dfx_priv; + struct rdfx_pd_info *rdfx_pd; + unsigned long flags; + + spin_lock_irqsave(&rdfx->pd.pd_lock, flags); + + rdfx_pd = rdfx_find_rdfx_pd(hr_dev, pdn); + if (!rdfx_pd) { + dev_err(hr_dev->dev, "find pd 0x%lx failed while release rdfx pd!\n", + pdn); + spin_unlock_irqrestore(&rdfx->pd.pd_lock, flags); + return; + } + list_del(&(rdfx_pd->list)); + + spin_unlock_irqrestore(&rdfx->pd.pd_lock, flags); + + kref_put(&(rdfx_pd->cnt), pd_release); +} + +void rdfx_set_rdfx_cq_ci(struct hns_roce_dev *hr_dev, + struct hns_roce_cq *hr_cq) +{ + struct rdfx_info *rdfx = (struct rdfx_info *)hr_dev->dfx_priv; + struct rdfx_cq_info *rdfx_cq; + + spin_lock(&rdfx->cq.cq_lock); + + rdfx_cq = rdfx_find_rdfx_cq(rdfx, hr_cq->cqn); + if (!rdfx_cq) { + dev_err(hr_dev->dev, "find cq 0x%lx failed while set cq ci\n", + hr_cq->cqn); + spin_unlock(&rdfx->cq.cq_lock); + return; + } + + atomic_set(&rdfx_cq->ci, (int)(hr_cq->cons_index & 0xffffff)); + + spin_unlock(&rdfx->cq.cq_lock); +} +EXPORT_SYMBOL_GPL(rdfx_set_rdfx_cq_ci); +#endif diff --git a/drivers/infiniband/hw/hns/roce-customer/rdfx_intf.h b/drivers/infiniband/hw/hns/roce-customer/rdfx_intf.h new file mode 100644 index 0000000000000000000000000000000000000000..c52acec3387d735485a68a92217738e1e5cf94d7 --- /dev/null +++ b/drivers/infiniband/hw/hns/roce-customer/rdfx_intf.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _HNS_ROCE_INTF_H +#define _HNS_ROCE_INTF_H + +struct rdfx_qp_info *rdfx_find_rdfx_qp(struct rdfx_info *rdfx, + unsigned long qpn); +struct rdfx_cq_info *rdfx_find_rdfx_cq(struct rdfx_info *rdfx, + unsigned long cqn); + +#endif diff --git a/drivers/infiniband/hw/hns/roce-customer/rdfx_main.c b/drivers/infiniband/hw/hns/roce-customer/rdfx_main.c new file mode 100644 index 0000000000000000000000000000000000000000..3d7d8635110d96ab628ce236eb21457ec16583aa --- /dev/null +++ b/drivers/infiniband/hw/hns/roce-customer/rdfx_main.c @@ -0,0 +1,391 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rdfx_main.h" +#include "rdfx_common.h" + +struct class *drv_class; +struct device *drv_device; +static int major; + +struct rdfx_top_info rdfx_top_info_list[MAX_IB_DEV]; + +const struct file_operations chr_ops = { + .owner = THIS_MODULE, +}; + +static const struct rdfx_dev_id rdfx_dev_tbl[] = { + {.name = "hisi_", .ops = NULL}, + {.name = "hns", .ops = &rdfx_ops_hw_v2}, +}; + +/* + * if can not find optstring, return -EINVAL; + * if find optstring, return 0 + * if there's input value, parg will be set with input + * if there's no input value, parg will be set to '\0' + */ +int parg_getopt(char *input, char *optstring, char *parg) +{ + char *_input; + char *p; + int cnt = 0; + char _optstring[3]; + + if (input == NULL || optstring == NULL) + return -EINVAL; + _input = kmalloc(strlen(input) + 1, GFP_KERNEL); + if (ZERO_OR_NULL_PTR(_input)) + return -ENOMEM; + strcpy(_input, input); + _optstring[0] = '-'; + _optstring[1] = optstring[0]; + _optstring[2] = '\0'; + p = strstr(_input, _optstring); + if (!p) { + kfree(_input); + return -EINVAL; + } + if (optstring[1] == '\0' || parg == NULL) { + kfree(_input); + return 0; + } + p += 2; + while (*p == ' ') + p++; + while (*p != ' ' && *p != '\0') { + p++; + cnt++; + } + if (cnt >= DEF_OPT_STR_LEN) { + kfree(_input); + return -EINVAL; + } + *p = '\0'; + p -= cnt; + strcpy(parg, p); + kfree(_input); + + return 0; +} + +static char *strtok_r(char *s, const char *delim, char **save_ptr) +{ + char *token; + + if (s == NULL) + s = *save_ptr; + + /* Scan to move after delimiters */ + s += strspn(s, delim); + if (*s == (char)0) + return NULL; + + /* Find the end of the token. */ + token = s; + s = strpbrk(token, delim); + if (s == NULL)/* This token finishes the string. */ + *save_ptr = strchr(token, 0); + else {/* Terminate the token and make *SAVE_PTR point past it. */ + *s = 0; + *save_ptr = s + 1; + } + + return token; +} + +char *strtok(char *s, const char *delim) +{ + static char *last; + + return strtok_r(s, delim, &last); +} + +int str_to_ll(char *p_buf, unsigned long long *pll_val, unsigned int *num) +{ + unsigned long long lng = 0; + long long convert_val; + char *p = NULL; + char delim[] = ","; + unsigned int idx = 0; + unsigned int i = 0; + unsigned long long *arr = NULL; + int ret = 0; + + arr = kzalloc(sizeof(unsigned long long) * + SYSFS_MAX_PARA, GFP_KERNEL); + if (ZERO_OR_NULL_PTR(arr)) + return -ENOMEM; + + p = strtok(p_buf, delim); + while (p) { + if (kstrtoll(p, 0, &convert_val)) { + pr_info("convert str failed\n"); + ret = -EINVAL; + goto out; + } + lng = (unsigned long long)convert_val; + arr[idx] = lng; + pr_info("arr[%u] = 0x%llx\n", idx, arr[idx]); + idx++; + if (idx >= SYSFS_MAX_PARA) { + pr_err("sub string num should not bigger than 16\n"); + ret = -EINVAL; + goto out; + } + p = strtok(NULL, delim); + } + + *num = idx; + for (i = 0; i < idx; i++) + *(pll_val + i) = arr[i]; + +out: + kfree(arr); + + return ret; +} + +int str_match(char *s, const char *delim) +{ + int num = strspn(s, delim); + + if (num != 0) + return 1; + else + return 0; +} + +int check_input(char *buf, unsigned long long *a_val, unsigned int max, + unsigned int min, unsigned int *param_num) +{ + int ret; + + ret = str_to_ll(buf, a_val, param_num); + if (ret) { + pr_info("parse input string err.\r\n"); + return ret; + } + + if ((*param_num > max) || (*param_num < min)) { + pr_info("para num(0x%x) is invalid.\n", *param_num); + return -EINVAL; + } + + return 0; +} + +struct rdfx_info *rdfx_find_rdfx_info(char *dev_name) +{ + int i; + + if (!strlen(dev_name)) + return NULL; + + for (i = 0; i < MAX_IB_DEV; i++) { + if (!rdfx_top_info_list[i].dev) + continue; + if (!memcmp(dev_name, + (rdfx_top_info_list[i].rdfx)->dev.dev_name, + strlen((rdfx_top_info_list[i].rdfx)->dev.dev_name))) { + pr_info("get rdfx info, name:%s, num: %d", + dev_name, i); + return rdfx_top_info_list[i].rdfx; + } + } + + return NULL; +} + +void *rdfx_buf_offset(struct dfx_buf *buf, int offset) +{ + u32 bits_per_long_val = BITS_PER_LONG; + u32 page_size = 1 << buf->page_shift; + + if ((bits_per_long_val == 64 && buf->page_shift == PAGE_SHIFT) || + buf->nbufs == 1) + return buf->direct.buf ? + (void *)((char *)(buf->direct.buf) + offset) : NULL; + else + return (buf->page_list && + buf->page_list[offset >> buf->page_shift].buf) ? + (void *)((char *) + (buf->page_list[offset >> buf->page_shift].buf) + + (offset & (page_size - 1))) : NULL; +} + +static int rdfx_info_init(struct ib_device *ib_dev, int i) +{ + int j; + + for (j = 0; j < sizeof(rdfx_dev_tbl)/sizeof(struct rdfx_dev_id); j++) { + if (!memcmp(rdfx_dev_tbl[j].name, ib_dev->name, + strlen(rdfx_dev_tbl[j].name)) && rdfx_dev_tbl[j].ops) { + rdfx_top_info_list[i].rdfx = + rdfx_dev_tbl[j].ops->get_dfx(ib_dev); + (rdfx_top_info_list[i].rdfx)->ops = rdfx_dev_tbl[j].ops; + (rdfx_top_info_list[i].rdfx)->drv_dev = drv_device; + + memset(&((rdfx_top_info_list[i].rdfx)->kobj), 0, + sizeof(struct kobject)); + strlcpy((rdfx_top_info_list[i].rdfx)->dev.dev_name, + ib_dev->name, IB_DEVICE_NAME_MAX); + pr_info("init dev %s success\n", ib_dev->name); + break; + } + } + + if (!rdfx_top_info_list[i].rdfx) { + pr_err("dev(%s) not support\n", ib_dev->name); + return -EINVAL; + } + return 0; +} + +static void rdfx_add_device(struct ib_device *ib_dev) +{ + int i = 0; + int ret = 0; + struct rdfx_ops *ops = NULL; + + for (i = 0; i < MAX_IB_DEV; i++) + if (!rdfx_top_info_list[i].dev) + break; + if (i == MAX_IB_DEV) { + pr_err("rdfx add device failed, rdfx top info list is full\n."); + return; + } + + rdfx_top_info_list[i].dev = ib_dev; + pr_info("rdfx add ib device(%pK), idx - %d, name - %s\n", + ib_dev, i, ib_dev->name); + ret = rdfx_info_init(ib_dev, i); + if (ret) { + pr_err("rdfx info init failed\n"); + rdfx_top_info_list[i].dev = NULL; + return; + } + + ops = (rdfx_top_info_list[i].rdfx)->ops; + ret = ops->add_sysfs(rdfx_top_info_list[i].rdfx); + if (ret) { + rdfx_top_info_list[i].rdfx = NULL; + rdfx_top_info_list[i].dev = NULL; + pr_err("rdfx add hw sysfs failed\n"); + } +} + +static void rdfx_remove_device(struct ib_device *ib_dev, void *client_data) +{ + int i = 0; + struct rdfx_ops *ops = NULL; + + for (i = 0; i < MAX_IB_DEV; i++) { + if (rdfx_top_info_list[i].dev && + (rdfx_top_info_list[i].dev == ib_dev)) { + pr_info("rdfx rm ib device(%pK), idx - %d, name - %s\n", + ib_dev, i, ib_dev->name); + ops = (rdfx_top_info_list[i].rdfx)->ops; + ops->del_sysfs(rdfx_top_info_list[i].rdfx); + memset(&rdfx_top_info_list[i], 0, + sizeof(struct rdfx_top_info)); + } + } +} + +struct ib_client rdfx_client = { + .name = "rdfx_client", + .add = rdfx_add_device, + .remove = rdfx_remove_device, +}; + +static int __init rdfx_init(void) +{ + int ret = 0; + + major = register_chrdev(0, DFX_DEVICE_NAME, &chr_ops); + if (major < 0) { + pr_err("Sorry, register the character device failed\n "); + return major; + } + + /*default content:/sys/class */ + drv_class = class_create(THIS_MODULE, DFX_DEVICE_NAME); + if (IS_ERR(drv_class)) { + pr_err("rdfx register client failed\n"); + goto class_create_failed; + } + drv_device = device_create(drv_class, NULL, MKDEV(major, 0), + NULL, DFX_DEVICE_NAME); + if (IS_ERR(drv_device)) { + pr_err("rdfx register device failed\n"); + goto device_regist_failed; + } + + memset(rdfx_top_info_list, 0, sizeof(rdfx_top_info_list)); + + ret = ib_register_client(&rdfx_client); + if (ret) { + pr_err("rdfx register client failed, ret = %d\n", ret); + goto register_client_failed; + } + + /*init and add kobjects*/ + ret = rdfx_add_common_sysfs(drv_device); + if (ret) { + pr_err("rdfx add common sysfs failed, ret = %d\n", ret); + goto add_common_sysfs_failed; + } + + return 0; + +add_common_sysfs_failed: + ib_unregister_client(&rdfx_client); +register_client_failed: + device_unregister(drv_device); +device_regist_failed: + class_destroy(drv_class); +class_create_failed: + unregister_chrdev(major, DFX_DEVICE_NAME); + + return ret; +} + +void __exit rdfx_exit(void) +{ + pr_info("rmmod rdfx module\n"); + + rdfx_del_common_sysfs(); + ib_unregister_client(&rdfx_client); + + device_destroy(drv_class, MKDEV(major, 0)); + class_destroy(drv_class); + unregister_chrdev(major, DFX_DEVICE_NAME); +} + +module_init(rdfx_init); +module_exit(rdfx_exit); + +MODULE_AUTHOR("Huawei Tech. Co., Ltd."); +MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE DFx Driver"); +MODULE_LICENSE("Dual BSD/GPL"); + diff --git a/drivers/infiniband/hw/hns/roce-customer/rdfx_main.h b/drivers/infiniband/hw/hns/roce-customer/rdfx_main.h new file mode 100644 index 0000000000000000000000000000000000000000..e4865c0130a941ca025089a71971964879b9281a --- /dev/null +++ b/drivers/infiniband/hw/hns/roce-customer/rdfx_main.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __HI1610_ROCE_KTEST_H__ +#define __HI1610_ROCE_KTEST_H__ + +#define DFX_DEVICE_NAME "rdma_dfx" + +#ifndef SYSFS_PAGE_SIZE +#define SYSFS_PAGE_SIZE (4096) /* sysfs�ļ��Ĵ�С */ +#endif +#define SYSFS_MAX_PARA (16) +#define MAX_IB_DEV (12) +#define CQE_SIZE (32) + +#define DEF_OPT_STR_LEN (10) + +int rdfx_add_common_sysfs(struct device *p_dev); +void rdfx_del_common_sysfs(void); + +int parg_getopt(char *input, char *optstring, char *parg); +char *strtok(char *s, const char *delim); +int str_to_ll(char *p_buf, unsigned long long *pll_val, unsigned int *num); +int str_match(char *s, const char *delim); +int check_input(char *buf, unsigned long long *a_val, unsigned int max, + unsigned int min, unsigned int *param_num); +struct rdfx_info *rdfx_find_rdfx_info(char *dev_name); + +struct dfx_buf_list { + void *buf; + dma_addr_t map; +}; + +struct dfx_buf { + struct dfx_buf_list direct; + struct dfx_buf_list *page_list; + int nbufs; + u32 npages; + unsigned int page_shift; +}; + +extern struct rdfx_ops rdfx_ops_hw_v2; + +void *rdfx_buf_offset(struct dfx_buf *buf, int offset); + +#endif diff --git a/drivers/infiniband/hw/hns/roce-customer/rdfx_sysfs.c b/drivers/infiniband/hw/hns/roce-customer/rdfx_sysfs.c new file mode 100644 index 0000000000000000000000000000000000000000..25f73967e4fd2534d24b78fe2fda6511e4192cc0 --- /dev/null +++ b/drivers/infiniband/hw/hns/roce-customer/rdfx_sysfs.c @@ -0,0 +1,687 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rdfx_common.h" +#include "rdfx_main.h" + +static struct kobject rdfx_common_kobj; + +static ssize_t rdfx_common_show(struct kobject *kobj, struct attribute *attr, + char *buf); +static ssize_t rdfx_common_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count); + +static int show_pd_detail(struct rdfx_pd_info *rdfx_pd) +{ + pr_info("***************** PD INFO *****************\n"); + + return 0; +} + +static int rdfx_pd_store(const char *p_buf) +{ + struct rdfx_pd_info *rdfx_pd; + char *buf = (char *)p_buf; + struct rdfx_info *rdfx; + long long convert_val; + char dev_name[DEF_OPT_STR_LEN] = {0}; + char str[DEF_OPT_STR_LEN] = {0}; + u32 pdn = 0; + + parg_getopt(buf, "d:", dev_name); + rdfx = rdfx_find_rdfx_info(dev_name); + if (!rdfx) { + pr_err("pd: can't find device of %s\n", dev_name); + return -EINVAL; + } + + parg_getopt(buf, "v:", str); + if (kstrtoll(str, 0, &convert_val)) { + pr_info("convert str failed\n"); + return -EINVAL; + } + pdn = (u32)convert_val; + + pr_info("***************** PD(0x%x) INFO *****************\n", + pdn); + pr_info("alloc_pd_cnt : 0x%x\n", + atomic_read(&(rdfx->pd.alloc_pd_cnt))); + pr_info("dealloc_pd_cnt : 0x%x\n", + atomic_read(&(rdfx->pd.dealloc_pd_cnt))); + pr_info("top_pd_index : 0x%x\n", + atomic_read(&(rdfx->pd.top_pd_index))); + + list_for_each_entry(rdfx_pd, &(rdfx->pd.list), list) { + if (pdn == rdfx_pd->pdn) + return show_pd_detail(rdfx_pd); + } + + pr_err("pd index(0x%x) is invalid\n", pdn); + return -EINVAL; +} + +static int show_qp_detail(struct rdfx_qp_info *rdfx_qp) +{ + pr_info("***************** SQ INFO *****************\n"); + pr_info("sq_wqe_cnt:\n"); + + pr_info("IB_WR_RDMA_WRITE IB_WR_RDMA_WRITE_WITH_IMM\n"); + pr_info(" 0x%x 0x%x\n", + atomic_read(&rdfx_qp->sq.sq_wqe_cnt[IB_WR_RDMA_WRITE]), + atomic_read( + &rdfx_qp->sq.sq_wqe_cnt[IB_WR_RDMA_WRITE_WITH_IMM])); + + pr_info("IB_WR_SEND IB_WR_RDMA_READ\n"); + pr_info(" 0x%x 0x%x\n", + atomic_read(&rdfx_qp->sq.sq_wqe_cnt[IB_WR_SEND]), + atomic_read(&rdfx_qp->sq.sq_wqe_cnt[IB_WR_RDMA_READ])); + + pr_info("IB_WR_SEND_WITH_INV IB_WR_SEND_WITH_IMM\n"); + pr_info(" 0x%x 0x%x\n", + atomic_read(&rdfx_qp->sq.sq_wqe_cnt[IB_WR_SEND_WITH_INV]), + atomic_read(&rdfx_qp->sq.sq_wqe_cnt[IB_WR_SEND_WITH_IMM])); + + pr_info("IB_WR_ATOMIC_FETCH_AND_ADD IB_WR_ATOMIC_CMP_AND_SWP\n"); + pr_info(" 0x%x 0x%x\n", + atomic_read( + &rdfx_qp->sq.sq_wqe_cnt[IB_WR_ATOMIC_FETCH_AND_ADD]), + atomic_read(&rdfx_qp->sq.sq_wqe_cnt[IB_WR_ATOMIC_CMP_AND_SWP])); + + pr_info("IB_WR_MASKED_ATOMIC_FETCH_AND_ADD IB_WR_MASKED_ATOMIC_CMP_AND_SWP\n"); + pr_info(" 0x%x 0x%x\n", + atomic_read( + &rdfx_qp->sq.sq_wqe_cnt[IB_WR_MASKED_ATOMIC_FETCH_AND_ADD]), + atomic_read( + &rdfx_qp->sq.sq_wqe_cnt[IB_WR_MASKED_ATOMIC_CMP_AND_SWP])); + + pr_info("IB_WR_REG_MR IB_WR_LOCAL_INV\n"); + pr_info(" 0x%x 0x%x\n", + atomic_read(&rdfx_qp->sq.sq_wqe_cnt[IB_WR_REG_MR]), + atomic_read(&rdfx_qp->sq.sq_wqe_cnt[IB_WR_LOCAL_INV])); + + pr_info("IB_WR_RDMA_READ_WITH_INV IB_WR_LSO\n"); + pr_info(" 0x%x 0x%x\n", + atomic_read(&rdfx_qp->sq.sq_wqe_cnt[IB_WR_RDMA_READ_WITH_INV]), + atomic_read(&rdfx_qp->sq.sq_wqe_cnt[IB_WR_LSO])); + + + pr_info("\n"); + pr_info("sig_wqe_cnt db_cnt inline_cnt\n"); + pr_info(" 0x%x 0x%x 0x%x\n", + atomic_read(&rdfx_qp->sq.sig_wqe_cnt), + atomic_read(&rdfx_qp->sq.db_cnt), + atomic_read(&rdfx_qp->sq.inline_cnt)); + pr_info("\n"); + pr_info("***************** RQ INFO *****************\n"); + pr_info("rq_wqe_cnt db_cnt inline_cnt\n"); + pr_info(" 0x%x 0x%x 0x%x\n", + atomic_read(&rdfx_qp->rq.rq_wqe_cnt), + atomic_read(&rdfx_qp->rq.db_cnt), + atomic_read(&rdfx_qp->rq.inline_cnt)); + pr_info("\n"); + pr_info("***************** QP ATTR *****************\n"); + pr_info("state : 0x%x\n", + atomic_read(&rdfx_qp->attr.state)); + pr_info("read_en : 0x%x\n", rdfx_qp->attr.read_en); + pr_info("write_en : 0x%x\n", rdfx_qp->attr.write_en); + pr_info("fast_reg_en : 0x%x\n", rdfx_qp->attr.fast_reg_en); + pr_info("atomic_en : 0x%x\n", rdfx_qp->attr.atomic_en); + pr_info("max_ord : 0x%x\n", rdfx_qp->attr.max_ord); + pr_info("max_ird : 0x%x\n", rdfx_qp->attr.max_ird); + pr_info("pd_id : 0x%x\n", rdfx_qp->attr.pd_id); + pr_info("err_rode : 0x%x\n", rdfx_qp->attr.err_code); + pr_info("max_send_sge : 0x%x\n", rdfx_qp->attr.max_sge[0]); + pr_info("max_recv_sge : 0x%x\n", rdfx_qp->attr.max_sge[1]); + + return 0; +} + +static void show_valid_qpn(struct list_head *head) +{ + struct rdfx_qp_info *rdfx_qp; + int line_len = 0; + + pr_info("current valid qpn:\n"); + list_for_each_entry(rdfx_qp, head, list) { + if (rdfx_qp->qp != NULL) { + pr_info("0x%lx ", rdfx_qp->qpn); + line_len++; + if (line_len == 10) { + pr_info("\n"); + line_len = 0; + } + + } + } + pr_info("\n"); +} + +static int show_wqe(struct rdfx_qp_info *rdfx_qp, u32 sq_or_rq, int wqe_index) +{ + u32 *wqe; + + if (sq_or_rq == 1) { + wqe_index = wqe_index & (rdfx_qp->sq.sq_depth - 1); + wqe = rdfx_buf_offset(rdfx_qp->buf, rdfx_qp->sq.offset + + (wqe_index << rdfx_qp->sq.sq_wqe_size)); + } else if (sq_or_rq == 2) { + wqe_index = wqe_index & (rdfx_qp->sq.sq_depth - 1); + wqe = rdfx_buf_offset(rdfx_qp->buf, rdfx_qp->rq.offset + + (wqe_index << rdfx_qp->rq.rq_wqe_size)); + } + if (wqe) + pr_info("%08x %08x %08x %08x %08x %08x %08x %08x\n", + *wqe, *(wqe + 1), *(wqe + 2), *(wqe + 3), + *(wqe + 4), *(wqe + 5), *(wqe + 6), *(wqe + 7)); + else + pr_info("wqe buf was not alloced\n"); + + return 0; +} + +static int show_cqe(struct rdfx_cq_info *rdfx_cq, int cqe_index) +{ + u32 *cqe; + + rdfx_cq->cqe_size = CQE_SIZE; + cqe_index = cqe_index & (rdfx_cq->cq_depth - 1); + cqe = rdfx_buf_offset(rdfx_cq->buf, (cqe_index * rdfx_cq->cqe_size)); + if (cqe) + pr_info("%08x %08x %08x %08x %08x %08x %08x %08x\n", + *cqe, *(cqe + 1), *(cqe + 2), *(cqe + 3), + *(cqe + 4), *(cqe + 5), *(cqe + 6), *(cqe + 7)); + else + pr_info("cqe buf was not alloced\n"); + + return 0; +} + +static inline int rdfx_convert_str(const char *str, u32 *val) +{ + long long convert_val; + + if (kstrtoll(str, 0, &convert_val)) { + pr_info("convert str failed\n"); + return -EINVAL; + } + *val = (u32)convert_val; + + return 0; +} + +static inline int rdfx_show_qp_wqe(const char *sq_rq, void *buf, u32 qpn, + struct rdfx_info *rdfx) +{ + struct rdfx_qp_info *rdfx_qp; + char str[DEF_OPT_STR_LEN] = {0}; + u32 sq_or_rq = 0; + u32 wqe_index = 0; + + if (!memcmp(sq_rq, "sq", strlen("sq"))) { + sq_or_rq = 1; + parg_getopt(buf, "i:", str); + if (rdfx_convert_str(str, &wqe_index)) + return -EINVAL; + + pr_info("show sq(0x%x) wqe(0x%x) info:\n", qpn, wqe_index); + } + + if (!memcmp(sq_rq, "rq", strlen("rq"))) { + sq_or_rq = 2; + parg_getopt(buf, "i:", str); + if (rdfx_convert_str(str, &wqe_index)) + return -EINVAL; + + pr_info("show rq(0x%x) wqe(0x%x) info:\n", qpn, wqe_index); + } + + if (sq_or_rq) { + list_for_each_entry(rdfx_qp, &(rdfx->qp.list), list) { + if (qpn == rdfx_qp->qpn) + return show_wqe(rdfx_qp, sq_or_rq, wqe_index); + } + pr_err("QPN %u is not in dfx list!\n", qpn); + } + + return -EINVAL; +} + +static int rdfx_qp_store(const char *p_buf) +{ + struct rdfx_qp_info *rdfx_qp; + struct rdfx_info *rdfx; + char *buf = (char *)p_buf; + char dev_name[DEF_OPT_STR_LEN] = {0}; + char str[DEF_OPT_STR_LEN] = {0}; + char sq_rq[DEF_OPT_STR_LEN] = {0}; + u32 qpn; + + parg_getopt(buf, "d:", dev_name); + rdfx = rdfx_find_rdfx_info(dev_name); + if (!rdfx) { + pr_err("cann't find dev of %s\n", dev_name); + return -EINVAL; + } + + if (!parg_getopt(buf, "a", NULL)) { + show_valid_qpn(&(rdfx->qp.list)); + return 0; + } + + parg_getopt(buf, "v:", str); + if (rdfx_convert_str(str, &qpn)) + return -EINVAL; + + if (!parg_getopt(buf, "s:", sq_rq)) + return rdfx_show_qp_wqe(sq_rq, buf, qpn, rdfx); + + pr_info("***************** QP(0x%x) INFO *****************\n", qpn); + pr_info("alloc_qp_cnt : 0x%x\n", + atomic_read(&rdfx->qp.alloc_qp_cnt)); + pr_info("dealloc_qp_cnt : 0x%x\n", + atomic_read(&rdfx->qp.dealloc_qp_cnt)); + pr_info("top_qp_index : 0x%x\n", + atomic_read(&rdfx->qp.top_qp_index)); + + list_for_each_entry(rdfx_qp, &(rdfx->qp.list), list) { + if (qpn == rdfx_qp->qpn) + return show_qp_detail(rdfx_qp); + } + pr_err("qp index(0x%x) is invalid\n", qpn); + + return -EINVAL; +} + +static int show_cq_detail(struct rdfx_cq_info *rdfx_cq) +{ + + pr_info("***************** CQ INFO *****************\n"); + pr_info("scqe_cnt:\n"); + pr_info("RDMA_READ RDMA_WRITE\n"); + pr_info(" 0x%x 0x%x\n", + atomic_read(&rdfx_cq->scqe_cnt[IB_WR_RDMA_READ]), + atomic_read(&rdfx_cq->scqe_cnt[IB_WR_RDMA_WRITE])); + pr_info("RDMA_WRITE_WITH_IMM SEND\n"); + pr_info(" 0x%x 0x%x\n", + atomic_read(&rdfx_cq->scqe_cnt[IB_WR_RDMA_WRITE_WITH_IMM]), + atomic_read(&rdfx_cq->scqe_cnt[IB_WR_SEND])); + pr_info("SEND_WITH_INV SEND_WITH_IMM\n"); + pr_info(" 0x%x 0x%x\n", + atomic_read(&rdfx_cq->scqe_cnt[IB_WR_SEND_WITH_INV]), + atomic_read(&rdfx_cq->scqe_cnt[IB_WR_SEND_WITH_IMM])); + pr_info("LOCAL_INV ATOMIC_CMP_AND_SWP\n"); + pr_info(" 0x%x 0x%x\n", + atomic_read(&rdfx_cq->scqe_cnt[IB_WR_LOCAL_INV]), + atomic_read(&rdfx_cq->scqe_cnt[IB_WR_ATOMIC_CMP_AND_SWP])); + pr_info("ATOMIC_FETCH_AND_ADD MASKED_ATOMIC_CMP_AND_SWP\n"); + pr_info(" 0x%x 0x%x\n", + atomic_read(&rdfx_cq->scqe_cnt[IB_WR_ATOMIC_FETCH_AND_ADD]), + atomic_read( + &rdfx_cq->scqe_cnt[IB_WR_MASKED_ATOMIC_CMP_AND_SWP])); + pr_info("MASKED_ATOMIC_FETCH_AND_ADD : 0x%x\n", + atomic_read( + &rdfx_cq->scqe_cnt[IB_WR_MASKED_ATOMIC_FETCH_AND_ADD])); + pr_info("\n"); + + pr_info("st_cnt:\n"); + pr_info("IB_WC_LOC_LEN_ERR IB_WC_LOC_QP_OP_ERR\n"); + pr_info(" 0x%x 0x%x\n", + atomic_read(&rdfx_cq->st_cnt[IB_WC_LOC_LEN_ERR]), + atomic_read(&rdfx_cq->st_cnt[IB_WC_LOC_QP_OP_ERR])); + pr_info("IB_WC_LOC_PROT_ERR IB_WC_WR_FLUSH_ERR\n"); + pr_info(" 0x%x 0x%x\n", + atomic_read(&rdfx_cq->st_cnt[IB_WC_LOC_PROT_ERR]), + atomic_read(&rdfx_cq->st_cnt[IB_WC_WR_FLUSH_ERR])); + pr_info("IB_WC_MW_BIND_ERR IB_WC_BAD_RESP_ERR\n"); + pr_info(" 0x%x 0x%x\n", + atomic_read(&rdfx_cq->st_cnt[IB_WC_MW_BIND_ERR]), + atomic_read(&rdfx_cq->st_cnt[IB_WC_BAD_RESP_ERR])); + pr_info("IB_WC_LOC_ACCESS_ERR IB_WC_REM_INV_REQ_ERR\n"); + pr_info(" 0x%x 0x%x\n", + atomic_read(&rdfx_cq->st_cnt[IB_WC_LOC_ACCESS_ERR]), + atomic_read(&rdfx_cq->st_cnt[IB_WC_REM_INV_REQ_ERR])); + pr_info("IB_WC_REM_ACCESS_ERR IB_WC_REM_OP_ERR\n"); + pr_info(" 0x%x 0x%x\n", + atomic_read(&rdfx_cq->st_cnt[IB_WC_REM_ACCESS_ERR]), + atomic_read(&rdfx_cq->st_cnt[IB_WC_REM_OP_ERR])); + pr_info("IB_WC_RETRY_EXC_ERR IB_WC_RNR_RETRY_EXC_ERR\n"); + pr_info(" 0x%x 0x%x\n", + atomic_read(&rdfx_cq->st_cnt[IB_WC_RETRY_EXC_ERR]), + atomic_read(&rdfx_cq->st_cnt[IB_WC_RNR_RETRY_EXC_ERR])); + pr_info("IB_WC_REM_ABORT_ERR IB_WC_GENERAL_ERR\n"); + pr_info(" 0x%x 0x%x\n", + atomic_read(&rdfx_cq->st_cnt[IB_WC_REM_ABORT_ERR]), + atomic_read(&rdfx_cq->st_cnt[IB_WC_GENERAL_ERR])); + pr_info("IB_WC_SUCCESS : 0x%x\n", + atomic_read(&rdfx_cq->st_cnt[IB_WC_SUCCESS])); + pr_info("\n"); + pr_info("arm_cnt:\n"); + pr_info("IB_CQ_SOLICITED : 0x%x\n", + atomic_read(&rdfx_cq->arm_cnt[0])); + pr_info("IB_CQ_NEXT_COMP : 0x%x\n", + atomic_read(&rdfx_cq->arm_cnt[1])); + pr_info("CQ_CI : 0x%x\n", atomic_read(&rdfx_cq->ci)); + + return 0; +} + +static void show_valid_cqn(struct list_head *head) +{ + struct rdfx_cq_info *rdfx_cq; + int line_len = 0; + + pr_info("current valid cqn:\n"); + list_for_each_entry(rdfx_cq, head, list) { + pr_info("0x%lx ", rdfx_cq->cqn); + line_len++; + if (line_len == 10) { + pr_info("\n"); + line_len = 0; + } + } + pr_info("\n"); +} + +static inline int rdfx_show_cq_detail(u32 cqn, struct rdfx_info *rdfx) +{ + struct rdfx_cq_info *rdfx_cq = NULL; + struct hns_roce_dev *hr_dev; + struct hns_roce_cq *cq = NULL; + + hr_dev = (struct hns_roce_dev *)rdfx->priv; + + pr_info("***************** CQ(0x%x) INFO *****************\n", cqn); + pr_info("alloc_cq_cnt : 0x%x\n", + atomic_read(&rdfx->cq.alloc_cq_cnt)); + pr_info("dealloc_cq_cnt : 0x%x\n", + atomic_read(&rdfx->cq.dealloc_cq_cnt)); + pr_info("top_cq_index : 0x%x\n", + atomic_read(&rdfx->cq.top_cq_index)); + + cq = radix_tree_lookup(&hr_dev->cq_table.tree, + cqn & (u32)(hr_dev->caps.num_cqs - 1)); + if (cq) + pr_info("arm_sn_cnt : 0x%x\n", cq->arm_sn); + + list_for_each_entry(rdfx_cq, &rdfx->cq.list, list) { + if (cqn == rdfx_cq->cqn) + return show_cq_detail(rdfx_cq); + } + + pr_info("cq index(0x%x) is invalid\n", cqn); + return -EINVAL; +} + +static int rdfx_cq_store(const char *p_buf) +{ + struct rdfx_cq_info *rdfx_cq = NULL; + struct rdfx_info *rdfx; + char *buf = (char *)p_buf; + char dev_name[DEF_OPT_STR_LEN] = {0}; + char str[DEF_OPT_STR_LEN] = {0}; + u32 cqe_index = 0; + u32 cqn = 0; + + parg_getopt(buf, "d:", dev_name); + rdfx = rdfx_find_rdfx_info(dev_name); + if (!rdfx) { + pr_err("cq: can't find device of %s\n", dev_name); + return -EINVAL; + } + + if (!parg_getopt(buf, "a", NULL)) { + show_valid_cqn(&(rdfx->cq.list)); + return 0; + } + + parg_getopt(buf, "v:", str); + if (rdfx_convert_str(str, &cqn)) + return -EINVAL; + + if (!parg_getopt(buf, "i:", str)) { + if (rdfx_convert_str(str, &cqe_index)) + return -EINVAL; + pr_info("show cq(0x%x) cqe(0x%x) info:\n", cqn, cqe_index); + list_for_each_entry(rdfx_cq, &rdfx->cq.list, list) { + if (cqn == rdfx_cq->cqn) + return show_cqe(rdfx_cq, cqe_index); + } + pr_err("CQN %u is not in dfx list!\n", cqn); + return -EINVAL; + } + + return rdfx_show_cq_detail(cqn, rdfx); +} + +static int show_mr_detail(struct rdfx_mr_info *rdfx_mr) +{ + + pr_info("***************** MR INFO *****************\n"); + + return 0; +} + +static int rdfx_mr_store(const char *p_buf) +{ + struct rdfx_mr_info *rdfx_mr; + char *buf = (char *)p_buf; + struct rdfx_info *rdfx; + long long convert_val; + char dev_name[DEF_OPT_STR_LEN] = {0}; + char str[DEF_OPT_STR_LEN] = {0}; + u32 key; + + parg_getopt(buf, "d:", dev_name); + rdfx = rdfx_find_rdfx_info(dev_name); + if (!rdfx) { + pr_err("mr: can't find device of %s\n", dev_name); + return -EINVAL; + } + + parg_getopt(buf, "v:", str); + if (kstrtoll(str, 0, &convert_val)) { + pr_info("convert str failed\n"); + return -EINVAL; + } + key = (u32)convert_val; + + pr_info("**************** MR(0x%x) INFO ****************\n", key); + pr_info("alloc_mr_cnt : 0x%x\n", + atomic_read(&rdfx->mr.alloc_mr_cnt)); + pr_info("dealloc_mr_cnt : 0x%x\n", + atomic_read(&rdfx->mr.dealloc_mr_cnt)); + pr_info("top_mr_index : 0x%x\n", + atomic_read(&rdfx->mr.top_mr_index)); + + list_for_each_entry(rdfx_mr, &rdfx->mr.list, list) { + if (key == rdfx_mr->mr->lkey) + return show_mr_detail(rdfx_mr); + } + + pr_info("mr index(0x%x) is invalid.\n", key); + + return -EINVAL; +} + +static int show_ceq_detail(struct rdfx_ceq_info *rdfx_ceq) +{ + pr_info("\n"); + pr_info("***************** CEQ INFO *****************\n"); + pr_info("*ceqn: %lu\n", rdfx_ceq->ceqn); + pr_info("*ceqe_cnt: %d\n", rdfx_ceq->ceqe_cnt.counter); + + return 0; +} + +static int rdfx_eq_store(const char *p_buf) +{ + struct rdfx_ceq_info *rdfx_ceq; + long long convert_val; + char *buf = (char *)p_buf; + struct rdfx_info *rdfx; + char dev_name[DEF_OPT_STR_LEN] = {0}; + char str[DEF_OPT_STR_LEN] = {0}; + u32 ceqn; + + parg_getopt(buf, "d:", dev_name); + rdfx = rdfx_find_rdfx_info(dev_name); + if (!rdfx) { + pr_err("eq: can't find device of %s\n", dev_name); + return -EINVAL; + } + + parg_getopt(buf, "v:", str); + if (kstrtoll(str, 0, &convert_val)) { + pr_info("convert str failed\n"); + return -EINVAL; + } + ceqn = (u32)convert_val; + + list_for_each_entry(rdfx_ceq, &rdfx->eq.ceq_list, list) { + if (ceqn == rdfx_ceq->ceqn) + return show_ceq_detail(rdfx_ceq); + } + + pr_info("ceq index(0x%x) is invalid.\n", ceqn); + + return -EINVAL; +} + +static int roce_dev_store(const char *p_buf) +{ + char *buf = (char *)p_buf; + struct rdfx_info *rdfx; + char dev_name[DEF_OPT_STR_LEN] = {0}; + int i; + + parg_getopt(buf, "d:", dev_name); + rdfx = rdfx_find_rdfx_info(dev_name); + if (!rdfx) { + pr_err("cann't find dev of %s\n", dev_name); + return -EINVAL; + } + + pr_info("***************** DEV INFO ******************\n"); + for (i = 0; i < RDFX_FUNC_MAX; i++) + pr_info("intf_cnt[%s]: 0x%x\n", + rdfx_func_name[i], atomic_read(&rdfx->dev.fc[i])); + + return 0; +} + +/**************** kobject attribute ****************/ +struct rdfx_common_sys_attr { + struct attribute attr; + int (*pub_show)(void); + int (*pub_store)(const char *buf); +}; + +#define rdfx_common_file_attr_def(file_name, func_show, func_store) \ + static struct rdfx_common_sys_attr g_rdfx_common_##file_name##_attr = {\ + {\ + .name = #file_name,\ + .mode = 0640,\ + },\ + .pub_show = func_show,\ + .pub_store = func_store,\ + } + +rdfx_common_file_attr_def(pd, NULL, rdfx_pd_store); +rdfx_common_file_attr_def(qp, NULL, rdfx_qp_store); +rdfx_common_file_attr_def(cq, NULL, rdfx_cq_store); +rdfx_common_file_attr_def(mr, NULL, rdfx_mr_store); +rdfx_common_file_attr_def(eq, NULL, rdfx_eq_store); +rdfx_common_file_attr_def(dev, NULL, roce_dev_store); + +#define COMM_ATTRS_LIST_MEMBER(file_name) \ + (&g_rdfx_common_##file_name##_attr.attr) +static struct attribute *rdfx_common_attrs_list[] = { + COMM_ATTRS_LIST_MEMBER(pd), + COMM_ATTRS_LIST_MEMBER(qp), + COMM_ATTRS_LIST_MEMBER(cq), + COMM_ATTRS_LIST_MEMBER(mr), + COMM_ATTRS_LIST_MEMBER(eq), + COMM_ATTRS_LIST_MEMBER(dev), + NULL +}; + +static const struct sysfs_ops rdfx_common_file_ops = { + .show = rdfx_common_show, + .store = rdfx_common_store, +}; + +static struct kobj_type rdfx_common_kobj_ktype = { + .release = NULL, + .sysfs_ops = &rdfx_common_file_ops, + .default_attrs = rdfx_common_attrs_list, +}; + +static ssize_t rdfx_common_show(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + struct rdfx_common_sys_attr *p_roce_sys_attr = + container_of(attr, struct rdfx_common_sys_attr, attr); + int ret = 0; + + memset(buf, 0, SYSFS_PAGE_SIZE); + if (p_roce_sys_attr->pub_show) { + ret = p_roce_sys_attr->pub_show(); + if (ret) + return ret; + else + return strlen(buf); + } + + return -EPERM; +} + +static ssize_t rdfx_common_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + struct rdfx_common_sys_attr *p_roce_sys_attr = + container_of(attr, struct rdfx_common_sys_attr, attr); + int ret = 0; + + if (p_roce_sys_attr->pub_store) { + ret = p_roce_sys_attr->pub_store((char *)buf); + return ret ? ret : count; + } + + return -EPERM; +} + +int rdfx_add_common_sysfs(struct device *p_dev) +{ + int ret = 0; + + ret = kobject_init_and_add(&rdfx_common_kobj, + &rdfx_common_kobj_ktype, + &(p_dev->kobj), "common"); + if (ret) { + pr_info("kobject_init_and_add failed!\r\n"); + return ret; + } + + return ret; +} + +void rdfx_del_common_sysfs(void) +{ + kobject_del(&rdfx_common_kobj); +} + diff --git a/drivers/infiniband/hw/hns/roce_k_compat.h b/drivers/infiniband/hw/hns/roce_k_compat.h new file mode 100644 index 0000000000000000000000000000000000000000..cc997ffc59a5ec8da8a47dbc660c9278fba79b5d --- /dev/null +++ b/drivers/infiniband/hw/hns/roce_k_compat.h @@ -0,0 +1,324 @@ +#ifndef _ROCE_K_COMPAT_H +#define _ROCE_K_COMPAT_H + +#ifndef LINUX_VERSION_CODE +#include +#else +#define KERNEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c)) +#endif + +#ifndef PCI_VENDOR_ID_HUAWEI +#define PCI_VENDOR_ID_HUAWEI 0x19e5 +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) + +/** + * OFED didn't provide a version code + * !!!!! This is a TEMPORARILY solution !!!!! + */ + +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) +typedef unsigned long long __u64; + +#if defined(__GNUC__) +typedef __u64 uint64_t; +#endif + +typedef uint64_t u64; +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) +#undef pci_irq_vector +#define pci_irq_vector _kc_pci_irq_vector +#ifdef CONFIG_PCI_MSI +#include +#include +/** + * pci_irq_vector - return Linux IRQ number of a device vector + * @dev: PCI device to operate on + * @nr: device-relative interrupt vector index (0-based). + */ +static inline int _kc_pci_irq_vector(struct pci_dev *dev, unsigned int nr) +{ + if (dev->msix_enabled) { + struct msi_desc *entry; + int i = 0; + + for_each_pci_msi_entry(entry, dev) { + if (i == nr) + return entry->irq; + i++; + } + WARN_ON_ONCE(1); + return -EINVAL; + } + + if (dev->msi_enabled) { + struct msi_desc *entry = first_pci_msi_entry(dev); + + if (WARN_ON_ONCE(nr >= entry->nvec_used)) + return -EINVAL; + } else { + if (WARN_ON_ONCE(nr > 0)) + return -EINVAL; + } + + return dev->irq + nr; +} +#else +static inline int _kc_pci_irq_vector(struct pci_dev *dev, unsigned int nr) +{ + if (WARN_ON_ONCE(nr > 0)) + return -EINVAL; + return dev->irq; +} +#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) + +#ifndef HAVE_LINUX_MM_H +#define HAVE_LINUX_MM_H +#endif + +#ifndef HAVE_LINUX_SCHED_H +#define HAVE_LINUX_SCHED_H +#endif +/** + * struct refcount_t - variant of atomic_t specialized for reference counts + * @refs: atomic_t counter field + * + * The counter saturates at UINT_MAX and will not move once + * there. This avoids wrapping the counter and causing 'spurious' + * use-after-free bugs. + */ +typedef struct refcount_struct { + atomic_t refs; +} refcount_t; + +/** + * refcount_set - set a refcount's value + * @r: the refcount + * @n: value to which the refcount will be set + */ +#undef refcount_set +#define refcount_set _kc_refcount_set +static inline void _kc_refcount_set(refcount_t *r, unsigned int n) +{ + atomic_set(&r->refs, n); +} + +#undef refcount_dec_and_test +#define refcount_dec_and_test _kc_refcount_dec_and_test +static inline __must_check bool _kc_refcount_dec_and_test(refcount_t *r) +{ + return atomic_dec_and_test(&r->refs); +} + +/* + * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN. + * + * Provides no memory ordering, it is assumed the caller has guaranteed the + * object memory to be stable (RCU, etc.). It does provide a control dependency + * and thereby orders future stores. See the comment on top. + */ +static inline bool refcount_inc_not_zero(refcount_t *r) +{ + unsigned int old, new, val = atomic_read(&r->refs); + + for (;;) { + new = val + 1; + + if (!val) + return false; + + if (unlikely(!new)) + return true; + + old = atomic_cmpxchg_relaxed(&r->refs, val, new); + if (old == val) + break; + + val = old; + } + + WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); + + return true; +} + +/* + * Similar to atomic_inc(), will saturate at UINT_MAX and WARN. + * + * Provides no memory ordering, it is assumed the caller already has a + * reference on the object, will WARN when this is not so. + */ +static inline void refcount_inc(refcount_t *r) +{ + WARN_ONCE(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n"); +} + +/* + * Similar to atomic_dec(), it will WARN on underflow and fail to decrement + * when saturated at UINT_MAX. + * + * Provides release memory ordering, such that prior loads and stores are done + * before. + */ + +static inline void refcount_dec(refcount_t *r) +{ + WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n"); +} + +/* + * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the + * success thereof. + * + * Like all decrement operations, it provides release memory order and provides + * a control dependency. + * + * It can be used like a try-delete operator; this explicit case is provided + * and not cmpxchg in generic, because that would allow implementing unsafe + * operations. + */ +static inline bool refcount_dec_if_one(refcount_t *r) +{ + return atomic_cmpxchg_release(&r->refs, 1, 0) == 1; +} + +/** + * Here we call kmalloc_array for mem allocate + * Kernel optimize from 4.11 + */ +#undef kvmalloc_array +#define kvmalloc_array _kc_kvmalloc_array +static inline void *_kc_kvmalloc_array(size_t n, size_t size, gfp_t flags) +{ + return kmalloc_array(n, size, flags); +} +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) +#undef addrconf_addr_eui48_base +#define addrconf_addr_eui48_base _kc_addrconf_addr_eui48_base +static inline void _kc_addrconf_addr_eui48_base(u8 *eui, + const char *const addr) +{ + memcpy(eui, addr, 3); + eui[3] = 0xFF; + eui[4] = 0xFE; + memcpy(eui + 5, addr + 3, 3); +} + +#undef addrconf_addr_eui48 +#define addrconf_addr_eui48 _kc_addrconf_addr_eui48 +static inline void _kc_addrconf_addr_eui48(u8 *eui, const char *const addr) +{ + addrconf_addr_eui48_base(eui, addr); + eui[0] ^= 2; +} +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0)) +#define is_signed_type(type) (((type)(-1)) < (type)1) +#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type))) +#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T))) +#define type_min(T) ((T)((T)-type_max(T)-(T)1)) + +/* + * If one of a or b is a compile-time constant, this avoids a division. + */ +#define __unsigned_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a * __b; \ + __builtin_constant_p(__b) ? \ + __b > 0 && __a > type_max(typeof(__a)) / __b : \ + __a > 0 && __b > type_max(typeof(__b)) / __a; \ +}) + +/* + * Signed multiplication is rather hard. gcc always follows C99, so + * division is truncated towards 0. This means that we can write the + * overflow check like this: + * + * (a > 0 && (b > MAX/a || b < MIN/a)) || + * (a < -1 && (b > MIN/a || b < MAX/a) || + * (a == -1 && b == MIN) + * + * The redundant casts of -1 are to silence an annoying -Wtype-limits + * (included in -Wextra) warning: When the type is u8 or u16, the + * __b_c_e in check_mul_overflow obviously selects + * __unsigned_mul_overflow, but unfortunately gcc still parses this + * code and warns about the limited range of __b. + */ + +#define __signed_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + typeof(a) __tmax = type_max(typeof(a)); \ + typeof(a) __tmin = type_min(typeof(a)); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a * (u64)__b; \ + (__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \ + (__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \ + (__b == (typeof(__b))-1 && __a == __tmin); \ +}) + +#define check_mul_overflow(a, b, d) \ + __builtin_choose_expr(is_signed_type(typeof(a)), \ + __signed_mul_overflow(a, b, d), \ + __unsigned_mul_overflow(a, b, d)) + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0)) +#define __must_check __attribute__((warn_unused_result)) + +typedef unsigned long __kernel_ulong_t; +typedef __kernel_ulong_t __kernel_size_t; +typedef __kernel_size_t size_t; + +#define SIZE_MAX (~(size_t)0) +#endif + +/** + * array_size() - Calculate size of 2-dimensional array. + * + * @a: dimension one + * @b: dimension two + * + * Calculates size of 2-dimensional array: @a * @b. + * + * Returns: number of bytes needed to represent the array or SIZE_MAX on + * overflow. + */ +static inline __must_check size_t array_size(size_t a, size_t b) +{ + size_t bytes; + + if (check_mul_overflow(a, b, &bytes)) + return SIZE_MAX; + + return bytes; +} +#endif + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 16, 0)) +#define CONFIG_NEW_KERNEL +#define MODIFY_CQ_MASK +#else +#endif + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 18, 0)) +#define CONFIG_KERNEL_419 +#endif + +#endif /*_ROCE_K_COMPAT_H*/ diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c index 423818a7d3330d2bd8598c40e672312e81a69f3e..771eb6bd0785482beb9b9ae031de94c6cb1b93ea 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.c +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c @@ -1689,7 +1689,7 @@ static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev, unsigned long flags; rtnl_lock(); - for_each_netdev_rcu(&init_net, ip_dev) { + for_each_netdev(&init_net, ip_dev) { if ((((rdma_vlan_dev_vlan_id(ip_dev) < I40IW_NO_VLAN) && (rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) || (ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) { diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c index 4d841a3c68f33dfa7454d002ccbd3d239484ac1a..026557aa23077456b11f55a28ef96844be3723d7 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c +++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c @@ -2945,6 +2945,9 @@ static enum i40iw_status_code i40iw_sc_alloc_stag( u64 header; enum i40iw_page_size page_size; + if (!info->total_len && !info->all_memory) + return -EINVAL; + page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K; cqp = dev->cqp; wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); @@ -3003,6 +3006,9 @@ static enum i40iw_status_code i40iw_sc_mr_reg_non_shared( u8 addr_type; enum i40iw_page_size page_size; + if (!info->total_len && !info->all_memory) + return -EINVAL; + page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K; if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY | I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY)) diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h index adc8d2ec523deaf6714262b594f09560d2773966..5c4e2f206105e00dea7921060fbb2bfbc0be6747 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_type.h +++ b/drivers/infiniband/hw/i40iw/i40iw_type.h @@ -779,6 +779,7 @@ struct i40iw_allocate_stag_info { bool use_hmc_fcn_index; u8 hmc_fcn_index; bool use_pf_rid; + bool all_memory; }; struct i40iw_reg_ns_stag_info { @@ -797,6 +798,7 @@ struct i40iw_reg_ns_stag_info { bool use_hmc_fcn_index; u8 hmc_fcn_index; bool use_pf_rid; + bool all_memory; }; struct i40iw_fast_reg_stag_info { diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c index a9ea966877f21d30010256264153e2d355fbc3c1..dda8e79d4b27e6448a08b11d1842824fb712a143 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_utils.c +++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c @@ -173,7 +173,12 @@ int i40iw_inetaddr_event(struct notifier_block *notifier, rcu_read_lock(); in = __in_dev_get_rcu(upper_dev); - local_ipaddr = ntohl(in->ifa_list->ifa_address); + + if (!in->ifa_list) + local_ipaddr = 0; + else + local_ipaddr = ntohl(in->ifa_list->ifa_address); + rcu_read_unlock(); } else { local_ipaddr = ntohl(ifa->ifa_address); @@ -185,6 +190,11 @@ int i40iw_inetaddr_event(struct notifier_block *notifier, case NETDEV_UP: /* Fall through */ case NETDEV_CHANGEADDR: + + /* Just skip if no need to handle ARP cache */ + if (!local_ipaddr) + break; + i40iw_manage_arp_cache(iwdev, netdev->dev_addr, &local_ipaddr, diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index e2e6c74a74522598e2fc02a7c5d78bb68939d746..9cf8bf2c87e7a940e1d08a8dae22f6f1356d82c6 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -806,6 +806,8 @@ static int i40iw_query_qp(struct ib_qp *ibqp, struct i40iw_qp *iwqp = to_iwqp(ibqp); struct i40iw_sc_qp *qp = &iwqp->sc_qp; + attr->qp_state = iwqp->ibqp_state; + attr->cur_qp_state = attr->qp_state; attr->qp_access_flags = 0; attr->cap.max_send_wr = qp->qp_uk.sq_size; attr->cap.max_recv_wr = qp->qp_uk.rq_size; @@ -1601,7 +1603,8 @@ static int i40iw_handle_q_mem(struct i40iw_device *iwdev, static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr) { struct i40iw_allocate_stag_info *info; - struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd); + struct ib_pd *pd = iwmr->ibmr.pd; + struct i40iw_pd *iwpd = to_iwpd(pd); enum i40iw_status_code status; int err = 0; struct i40iw_cqp_request *cqp_request; @@ -1618,6 +1621,7 @@ static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT; info->pd_id = iwpd->sc_pd.pd_id; info->total_len = iwmr->length; + info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY; info->remote_access = true; cqp_info->cqp_cmd = OP_ALLOC_STAG; cqp_info->post_sq = 1; @@ -1671,6 +1675,8 @@ static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd, iwmr->type = IW_MEMREG_TYPE_MEM; palloc = &iwpbl->pble_alloc; iwmr->page_cnt = max_num_sg; + /* Use system PAGE_SIZE as the sg page sizes are unknown at this point */ + iwmr->length = max_num_sg * PAGE_SIZE; mutex_lock(&iwdev->pbl_mutex); status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt); mutex_unlock(&iwdev->pbl_mutex); @@ -1767,7 +1773,8 @@ static int i40iw_hwreg_mr(struct i40iw_device *iwdev, { struct i40iw_pbl *iwpbl = &iwmr->iwpbl; struct i40iw_reg_ns_stag_info *stag_info; - struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd); + struct ib_pd *pd = iwmr->ibmr.pd; + struct i40iw_pd *iwpd = to_iwpd(pd); struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc; enum i40iw_status_code status; int err = 0; @@ -1787,6 +1794,7 @@ static int i40iw_hwreg_mr(struct i40iw_device *iwdev, stag_info->total_len = iwmr->length; stag_info->access_rights = access; stag_info->pd_id = iwpd->sc_pd.pd_id; + stag_info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY; stag_info->addr_type = I40IW_ADDR_TYPE_VA_BASED; stag_info->page_size = iwmr->page_size; diff --git a/drivers/infiniband/hw/mlx4/Kconfig b/drivers/infiniband/hw/mlx4/Kconfig index db4aa13ebae0c693138bdb51832961e2a1d492ba..d1de3285fd8856ddf255e44e82f6c556c67aad84 100644 --- a/drivers/infiniband/hw/mlx4/Kconfig +++ b/drivers/infiniband/hw/mlx4/Kconfig @@ -1,6 +1,7 @@ config MLX4_INFINIBAND tristate "Mellanox ConnectX HCA support" depends on NETDEVICES && ETHERNET && PCI && INET + depends on INFINIBAND_USER_ACCESS || !INFINIBAND_USER_ACCESS depends on MAY_USE_DEVLINK select NET_VENDOR_MELLANOX select MLX4_CORE diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c index 155b4dfc0ae837392d0db8d2d07a9b551634d777..baab9afa9174b60989ed95443a7ca8602dfc501d 100644 --- a/drivers/infiniband/hw/mlx4/alias_GUID.c +++ b/drivers/infiniband/hw/mlx4/alias_GUID.c @@ -804,8 +804,8 @@ void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev) unsigned long flags; for (i = 0 ; i < dev->num_ports; i++) { - cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work); det = &sriov->alias_guid.ports_guid[i]; + cancel_delayed_work_sync(&det->alias_guid_work); spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags); while (!list_empty(&det->cb_list)) { cb_ctx = list_entry(det->cb_list.next, diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c index fedaf82601054a38c10c67cc81cf92b0c0612953..8c79a480f2b7665779c710a7e778f0f3b166c432 100644 --- a/drivers/infiniband/hw/mlx4/cm.c +++ b/drivers/infiniband/hw/mlx4/cm.c @@ -39,7 +39,7 @@ #include "mlx4_ib.h" -#define CM_CLEANUP_CACHE_TIMEOUT (5 * HZ) +#define CM_CLEANUP_CACHE_TIMEOUT (30 * HZ) struct id_map_entry { struct rb_node node; diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index e5466d786bb1e9ed2ed8b600228a8e9e361a12f7..670b51a92abc442a19ec75a1f52206c0fc9390e7 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -1409,7 +1409,7 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port, sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr); if (sqp->tx_ring[wire_tx_ix].ah) - rdma_destroy_ah(sqp->tx_ring[wire_tx_ix].ah); + mlx4_ib_destroy_ah(sqp->tx_ring[wire_tx_ix].ah); sqp->tx_ring[wire_tx_ix].ah = ah; ib_dma_sync_single_for_cpu(&dev->ib_dev, sqp->tx_ring[wire_tx_ix].buf.map, @@ -1668,8 +1668,6 @@ static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx, tx_buf_size, DMA_TO_DEVICE); kfree(tun_qp->tx_ring[i].buf.addr); } - kfree(tun_qp->tx_ring); - tun_qp->tx_ring = NULL; i = MLX4_NUM_TUNNEL_BUFS; err: while (i > 0) { @@ -1678,6 +1676,8 @@ static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx, rx_buf_size, DMA_FROM_DEVICE); kfree(tun_qp->ring[i].addr); } + kfree(tun_qp->tx_ring); + tun_qp->tx_ring = NULL; kfree(tun_qp->ring); tun_qp->ring = NULL; return -ENOMEM; @@ -1900,7 +1900,7 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work) if (wc.status == IB_WC_SUCCESS) { switch (wc.opcode) { case IB_WC_SEND: - rdma_destroy_ah(sqp->tx_ring[wc.wr_id & + mlx4_ib_destroy_ah(sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah); sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah = NULL; @@ -1929,7 +1929,7 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work) " status = %d, wrid = 0x%llx\n", ctx->slave, wc.status, wc.wr_id); if (!MLX4_TUN_IS_RECV(wc.wr_id)) { - rdma_destroy_ah(sqp->tx_ring[wc.wr_id & + mlx4_ib_destroy_ah(sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah); sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah = NULL; diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 0bbeaaae47e07d2e99529925bb6236ab98be377b..ed6130cba39d0c45930a31bdac8370941b3bb8a3 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -1813,9 +1813,6 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, struct mlx4_dev *dev = (to_mdev(qp->device))->dev; int is_bonded = mlx4_is_bonded(dev); - if (flow_attr->port < 1 || flow_attr->port > qp->device->phys_port_cnt) - return ERR_PTR(-EINVAL); - if (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP) return ERR_PTR(-EOPNOTSUPP); @@ -3069,16 +3066,17 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) ibdev->ib_active = false; flush_workqueue(wq); - mlx4_ib_close_sriov(ibdev); - mlx4_ib_mad_cleanup(ibdev); - ib_unregister_device(&ibdev->ib_dev); - mlx4_ib_diag_cleanup(ibdev); if (ibdev->iboe.nb.notifier_call) { if (unregister_netdevice_notifier(&ibdev->iboe.nb)) pr_warn("failure unregistering notifier\n"); ibdev->iboe.nb.notifier_call = NULL; } + mlx4_ib_close_sriov(ibdev); + mlx4_ib_mad_cleanup(ibdev); + ib_unregister_device(&ibdev->ib_dev); + mlx4_ib_diag_cleanup(ibdev); + mlx4_qp_release_range(dev, ibdev->steer_qpn_base, ibdev->steer_qpn_count); kfree(ibdev->ib_uc_qpns_bitmap); diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c index e219093d2764536a58c7c362a08167361ed0e93a..d2da28d613f2cc41397de8f2a9d7af99510aaa54 100644 --- a/drivers/infiniband/hw/mlx4/sysfs.c +++ b/drivers/infiniband/hw/mlx4/sysfs.c @@ -353,16 +353,12 @@ static int add_port_entries(struct mlx4_ib_dev *device, int port_num) static void get_name(struct mlx4_ib_dev *dev, char *name, int i, int max) { - char base_name[9]; - - /* pci_name format is: bus:dev:func -> xxxx:yy:zz.n */ - strlcpy(name, pci_name(dev->dev->persist->pdev), max); - strncpy(base_name, name, 8); /*till xxxx:yy:*/ - base_name[8] = '\0'; - /* with no ARI only 3 last bits are used so when the fn is higher than 8 + /* pci_name format is: bus:dev:func -> xxxx:yy:zz.n + * with no ARI only 3 last bits are used so when the fn is higher than 8 * need to add it to the dev num, so count in the last number will be * modulo 8 */ - sprintf(name, "%s%.2d.%d", base_name, (i/8), (i%8)); + snprintf(name, max, "%.8s%.2d.%d", pci_name(dev->dev->persist->pdev), + i / 8, i % 8); } struct mlx4_port { diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 088205d7f1a1955231ebe410d51f6837147e8d39..872985e4eebec98546d91f7e103f221aaa58de7a 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -896,15 +896,14 @@ static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context) ib_umem_release(cq->buf.umem); } -static void init_cq_frag_buf(struct mlx5_ib_cq *cq, - struct mlx5_ib_cq_buf *buf) +static void init_cq_frag_buf(struct mlx5_ib_cq_buf *buf) { int i; void *cqe; struct mlx5_cqe64 *cqe64; for (i = 0; i < buf->nent; i++) { - cqe = get_cqe(cq, i); + cqe = mlx5_frag_buf_get_wqe(&buf->fbc, i); cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64; cqe64->op_own = MLX5_CQE_INVALID << 4; } @@ -930,7 +929,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, if (err) goto err_db; - init_cq_frag_buf(cq, &cq->buf); + init_cq_frag_buf(&cq->buf); *inlen = MLX5_ST_SZ_BYTES(create_cq_in) + MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * @@ -1253,7 +1252,7 @@ static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, if (err) goto ex; - init_cq_frag_buf(cq, cq->resize_buf); + init_cq_frag_buf(cq->resize_buf); return 0; diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index f2f11e652dcd2a751d10397c8c65d6be8a53b53e..02f36ab72ad42afa8122b9db4e0f569d6e4781a1 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -857,7 +857,9 @@ static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext, err = uverbs_get_flags32(&access, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS, - IB_ACCESS_SUPPORTED); + IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_WRITE | + IB_ACCESS_REMOTE_READ); if (err) return err; diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c index 32a9e9228b13554c2d1d5db057a5a484efc4f2bf..cdf6e26ebc87da8ca5e4747f9835ea959d8885db 100644 --- a/drivers/infiniband/hw/mlx5/mad.c +++ b/drivers/infiniband/hw/mlx5/mad.c @@ -197,19 +197,33 @@ static void pma_cnt_assign(struct ib_pma_portcounters *pma_cnt, vl_15_dropped); } -static int process_pma_cmd(struct mlx5_core_dev *mdev, u8 port_num, +static int process_pma_cmd(struct mlx5_ib_dev *dev, u8 port_num, const struct ib_mad *in_mad, struct ib_mad *out_mad) { - int err; + struct mlx5_core_dev *mdev; + bool native_port = true; + u8 mdev_port_num; void *out_cnt; + int err; + mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num); + if (!mdev) { + /* Fail to get the native port, likely due to 2nd port is still + * unaffiliated. In such case default to 1st port and attached + * PF device. + */ + native_port = false; + mdev = dev->mdev; + mdev_port_num = 1; + } /* Declaring support of extended counters */ if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) { struct ib_class_port_info cpi = {}; cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH; memcpy((out_mad->data + 40), &cpi, sizeof(cpi)); - return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; + err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; + goto done; } if (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT) { @@ -218,11 +232,13 @@ static int process_pma_cmd(struct mlx5_core_dev *mdev, u8 port_num, int sz = MLX5_ST_SZ_BYTES(query_vport_counter_out); out_cnt = kvzalloc(sz, GFP_KERNEL); - if (!out_cnt) - return IB_MAD_RESULT_FAILURE; + if (!out_cnt) { + err = IB_MAD_RESULT_FAILURE; + goto done; + } err = mlx5_core_query_vport_counter(mdev, 0, 0, - port_num, out_cnt, sz); + mdev_port_num, out_cnt, sz); if (!err) pma_cnt_ext_assign(pma_cnt_ext, out_cnt); } else { @@ -231,20 +247,23 @@ static int process_pma_cmd(struct mlx5_core_dev *mdev, u8 port_num, int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); out_cnt = kvzalloc(sz, GFP_KERNEL); - if (!out_cnt) - return IB_MAD_RESULT_FAILURE; + if (!out_cnt) { + err = IB_MAD_RESULT_FAILURE; + goto done; + } - err = mlx5_core_query_ib_ppcnt(mdev, port_num, + err = mlx5_core_query_ib_ppcnt(mdev, mdev_port_num, out_cnt, sz); if (!err) pma_cnt_assign(pma_cnt, out_cnt); - } - + } kvfree(out_cnt); - if (err) - return IB_MAD_RESULT_FAILURE; - - return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; + err = err ? IB_MAD_RESULT_FAILURE : + IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; +done: + if (native_port) + mlx5_ib_put_native_port_mdev(dev, port_num); + return err; } int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, @@ -256,8 +275,6 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, struct mlx5_ib_dev *dev = to_mdev(ibdev); const struct ib_mad *in_mad = (const struct ib_mad *)in; struct ib_mad *out_mad = (struct ib_mad *)out; - struct mlx5_core_dev *mdev; - u8 mdev_port_num; int ret; if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) || @@ -266,19 +283,14 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, memset(out_mad->data, 0, sizeof(out_mad->data)); - mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num); - if (!mdev) - return IB_MAD_RESULT_FAILURE; - - if (MLX5_CAP_GEN(mdev, vport_counters) && + if (MLX5_CAP_GEN(dev->mdev, vport_counters) && in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT && in_mad->mad_hdr.method == IB_MGMT_METHOD_GET) { - ret = process_pma_cmd(mdev, mdev_port_num, in_mad, out_mad); + ret = process_pma_cmd(dev, port_num, in_mad, out_mad); } else { ret = process_mad(ibdev, mad_flags, port_num, in_wc, in_grh, in_mad, out_mad); } - mlx5_ib_put_native_port_mdev(dev, port_num); return ret; } diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index c414f3809e5c24a5d112d982c98badfe2a473dd8..4f340d6db582072bed41ae56b3d1566ed6dd766a 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -939,15 +939,19 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, } if (MLX5_CAP_GEN(mdev, tag_matching)) { - props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE; props->tm_caps.max_num_tags = (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1; - props->tm_caps.flags = IB_TM_CAP_RC; props->tm_caps.max_ops = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); props->tm_caps.max_sge = MLX5_TM_MAX_SGE; } + if (MLX5_CAP_GEN(mdev, tag_matching) && + MLX5_CAP_GEN(mdev, rndv_offload_rc)) { + props->tm_caps.flags = IB_TM_CAP_RNDV_RC; + props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE; + } + if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) { props->cq_caps.max_cq_moderation_count = MLX5_MAX_CQ_COUNT; @@ -1066,12 +1070,10 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) resp.tunnel_offloads_caps |= MLX5_IB_TUNNELED_OFFLOADS_GRE; - if (MLX5_CAP_GEN(mdev, flex_parser_protocols) & - MLX5_FLEX_PROTO_CW_MPLS_GRE) + if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre)) resp.tunnel_offloads_caps |= MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE; - if (MLX5_CAP_GEN(mdev, flex_parser_protocols) & - MLX5_FLEX_PROTO_CW_MPLS_UDP) + if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_udp)) resp.tunnel_offloads_caps |= MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP; } @@ -1094,31 +1096,26 @@ enum mlx5_ib_width { MLX5_IB_WIDTH_12X = 1 << 4 }; -static int translate_active_width(struct ib_device *ibdev, u8 active_width, +static void translate_active_width(struct ib_device *ibdev, u8 active_width, u8 *ib_width) { struct mlx5_ib_dev *dev = to_mdev(ibdev); - int err = 0; - if (active_width & MLX5_IB_WIDTH_1X) { + if (active_width & MLX5_IB_WIDTH_1X) *ib_width = IB_WIDTH_1X; - } else if (active_width & MLX5_IB_WIDTH_2X) { - mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n", - (int)active_width); - err = -EINVAL; - } else if (active_width & MLX5_IB_WIDTH_4X) { + else if (active_width & MLX5_IB_WIDTH_4X) *ib_width = IB_WIDTH_4X; - } else if (active_width & MLX5_IB_WIDTH_8X) { + else if (active_width & MLX5_IB_WIDTH_8X) *ib_width = IB_WIDTH_8X; - } else if (active_width & MLX5_IB_WIDTH_12X) { + else if (active_width & MLX5_IB_WIDTH_12X) *ib_width = IB_WIDTH_12X; - } else { - mlx5_ib_dbg(dev, "Invalid active_width %d\n", + else { + mlx5_ib_dbg(dev, "Invalid active_width %d, setting width to default value: 4x\n", (int)active_width); - err = -EINVAL; + *ib_width = IB_WIDTH_4X; } - return err; + return; } static int mlx5_mtu_to_ib_mtu(int mtu) @@ -1225,10 +1222,8 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port, if (err) goto out; - err = translate_active_width(ibdev, ib_link_width_oper, - &props->active_width); - if (err) - goto out; + translate_active_width(ibdev, ib_link_width_oper, &props->active_width); + err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port); if (err) goto out; @@ -1826,6 +1821,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, context->lib_caps = req.lib_caps; print_lib_caps(dev, context->lib_caps); + if (mlx5_lag_is_active(dev->mdev)) { + u8 port = mlx5_core_native_port_num(dev->mdev); + + atomic_set(&context->tx_port_affinity, + atomic_add_return( + 1, &dev->roce[port].tx_port_affinity)); + } + return &context->ibucontext; out_mdev: @@ -2021,6 +2024,7 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev, if (vma->vm_flags & VM_WRITE) return -EPERM; + vma->vm_flags &= ~VM_MAYWRITE; if (!dev->mdev->clock_info_page) return -EOPNOTSUPP; @@ -2204,6 +2208,7 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm if (vma->vm_flags & VM_WRITE) return -EPERM; + vma->vm_flags &= ~VM_MAYWRITE; /* Don't expose to user-space information it shouldn't have */ if (PAGE_SIZE > 4096) @@ -2383,10 +2388,29 @@ static u8 get_match_criteria_enable(u32 *match_criteria) return match_criteria_enable; } -static void set_proto(void *outer_c, void *outer_v, u8 mask, u8 val) +static int set_proto(void *outer_c, void *outer_v, u8 mask, u8 val) { - MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask); - MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val); + u8 entry_mask; + u8 entry_val; + int err = 0; + + if (!mask) + goto out; + + entry_mask = MLX5_GET(fte_match_set_lyr_2_4, outer_c, + ip_protocol); + entry_val = MLX5_GET(fte_match_set_lyr_2_4, outer_v, + ip_protocol); + if (!entry_mask) { + MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask); + MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val); + goto out; + } + /* Don't override existing ip protocol */ + if (mask != entry_mask || val != entry_val) + err = -EINVAL; +out: + return err; } static void set_flow_label(void *misc_c, void *misc_v, u32 mask, u32 val, @@ -2590,8 +2614,10 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c, set_tos(headers_c, headers_v, ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos); - set_proto(headers_c, headers_v, - ib_spec->ipv4.mask.proto, ib_spec->ipv4.val.proto); + if (set_proto(headers_c, headers_v, + ib_spec->ipv4.mask.proto, + ib_spec->ipv4.val.proto)) + return -EINVAL; break; case IB_FLOW_SPEC_IPV6: if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD)) @@ -2630,9 +2656,10 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c, ib_spec->ipv6.mask.traffic_class, ib_spec->ipv6.val.traffic_class); - set_proto(headers_c, headers_v, - ib_spec->ipv6.mask.next_hdr, - ib_spec->ipv6.val.next_hdr); + if (set_proto(headers_c, headers_v, + ib_spec->ipv6.mask.next_hdr, + ib_spec->ipv6.val.next_hdr)) + return -EINVAL; set_flow_label(misc_params_c, misc_params_v, ntohl(ib_spec->ipv6.mask.flow_label), @@ -2653,10 +2680,8 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c, LAST_TCP_UDP_FIELD)) return -EOPNOTSUPP; - MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol, - 0xff); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, - IPPROTO_TCP); + if (set_proto(headers_c, headers_v, 0xff, IPPROTO_TCP)) + return -EINVAL; MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport, ntohs(ib_spec->tcp_udp.mask.src_port)); @@ -2673,10 +2698,8 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c, LAST_TCP_UDP_FIELD)) return -EOPNOTSUPP; - MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol, - 0xff); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, - IPPROTO_UDP); + if (set_proto(headers_c, headers_v, 0xff, IPPROTO_UDP)) + return -EINVAL; MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport, ntohs(ib_spec->tcp_udp.mask.src_port)); @@ -2692,6 +2715,9 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c, if (ib_spec->gre.mask.c_ks_res0_ver) return -EOPNOTSUPP; + if (set_proto(headers_c, headers_v, 0xff, IPPROTO_GRE)) + return -EINVAL; + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol, 0xff); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, @@ -3279,10 +3305,6 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev, } INIT_LIST_HEAD(&handler->list); - if (dst) { - memcpy(&dest_arr[0], dst, sizeof(*dst)); - dest_num++; - } for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { err = parse_flow_attr(dev->mdev, spec->match_criteria, @@ -3296,6 +3318,11 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev, ib_flow += ((union ib_flow_spec *)ib_flow)->size; } + if (dst && !(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP)) { + memcpy(&dest_arr[0], dst, sizeof(*dst)); + dest_num++; + } + if (!flow_is_multicast_only(flow_attr)) set_underlay_qp(dev, spec, underlay_qpn); @@ -3333,10 +3360,8 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev, } if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) { - if (!(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT)) { + if (!dest_num) rule_dst = NULL; - dest_num = 0; - } } else { if (is_egress) flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW; @@ -3566,7 +3591,6 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp, } if (domain != IB_FLOW_DOMAIN_USER || - flow_attr->port > dev->num_ports || (flow_attr->flags & ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP | IB_FLOW_ATTR_FLAGS_EGRESS))) { err = -EINVAL; @@ -5303,14 +5327,6 @@ static void init_delay_drop(struct mlx5_ib_dev *dev) mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n"); } -static const struct cpumask * -mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector) -{ - struct mlx5_ib_dev *dev = to_mdev(ibdev); - - return mlx5_get_vector_affinity_hint(dev->mdev, comp_vector); -} - /* The mlx5_ib_multiport_mutex should be held when calling this function */ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev, struct mlx5_ib_multiport_info *mpi) @@ -5823,7 +5839,6 @@ int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev) dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg; dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; dev->ib_dev.get_dev_fw_str = get_dev_fw_str; - dev->ib_dev.get_vector_affinity = mlx5_ib_get_vector_affinity; if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev; @@ -6371,6 +6386,7 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context) mlx5_ib_unbind_slave_port(mpi->ibdev, mpi); list_del(&mpi->list); mutex_unlock(&mlx5_ib_multiport_mutex); + kfree(mpi); return; } diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 320d4dfe8c2f419cfdb9f53ddcdcc122eed294f1..6a060c84598feb61a75d45b7d50e9e20c7fd351e 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -139,6 +139,8 @@ struct mlx5_ib_ucontext { u64 lib_caps; DECLARE_BITMAP(dm_pages, MLX5_MAX_MEMIC_PAGES); u16 devx_uid; + /* For RoCE LAG TX affinity */ + atomic_t tx_port_affinity; }; static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext) @@ -467,6 +469,7 @@ struct mlx5_umr_wr { u64 length; int access_flags; u32 mkey; + u8 ignore_free_state:1; }; static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr) @@ -699,7 +702,7 @@ struct mlx5_roce { rwlock_t netdev_lock; struct net_device *netdev; struct notifier_block nb; - atomic_t next_port; + atomic_t tx_port_affinity; enum ib_port_state last_port_state; struct mlx5_ib_dev *dev; u8 native_port_num; diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index e223148376458fdd3c8dba767137abfbbfd0345a..bd1fdadf7ba01907078b9cae895be3a14fa4151e 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -51,22 +51,12 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); static int mr_cache_max_order(struct mlx5_ib_dev *dev); static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); -static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev) -{ - return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled); -} static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev) { return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled); } -static bool use_umr(struct mlx5_ib_dev *dev, int order) -{ - return order <= mr_cache_max_order(dev) && - umr_can_modify_entity_size(dev); -} - static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) { int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); @@ -548,13 +538,16 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) return; c = order2idx(dev, mr->order); - if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) { - mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c); - return; - } + WARN_ON(c < 0 || c >= MAX_MR_CACHE_ENTRIES); - if (unreg_umr(dev, mr)) + if (unreg_umr(dev, mr)) { + mr->allocated_from_cache = false; + destroy_mkey(dev, mr); + ent = &cache->ent[c]; + if (ent->cur < ent->limit) + queue_work(cache->wq, &ent->work); return; + } ent = &cache->ent[c]; spin_lock_irq(&ent->lock); @@ -691,7 +684,6 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) init_completion(&ent->compl); INIT_WORK(&ent->work, cache_work_func); INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func); - queue_work(cache->wq, &ent->work); if (i > MR_CACHE_LAST_STD_ENTRY) { mlx5_odp_init_mr_cache_entry(ent); @@ -711,6 +703,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) ent->limit = dev->mdev->profile->mr_cache[i].limit; else ent->limit = 0; + queue_work(cache->wq, &ent->work); } err = mlx5_mr_cache_debugfs_init(dev); @@ -1302,7 +1295,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, { struct mlx5_ib_dev *dev = to_mdev(pd->device); struct mlx5_ib_mr *mr = NULL; - bool populate_mtts = false; + bool use_umr; struct ib_umem *umem; int page_shift; int npages; @@ -1335,29 +1328,30 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, if (err < 0) return ERR_PTR(err); - if (use_umr(dev, order)) { + use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) && + (!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) || + !MLX5_CAP_GEN(dev->mdev, atomic)); + + if (order <= mr_cache_max_order(dev) && use_umr) { mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont, page_shift, order, access_flags); if (PTR_ERR(mr) == -EAGAIN) { mlx5_ib_dbg(dev, "cache empty for order %d\n", order); mr = NULL; } - populate_mtts = false; } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) { if (access_flags & IB_ACCESS_ON_DEMAND) { err = -EINVAL; pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n"); goto error; } - populate_mtts = true; + use_umr = false; } if (!mr) { - if (!umr_can_modify_entity_size(dev)) - populate_mtts = true; mutex_lock(&dev->slow_path_mutex); mr = reg_create(NULL, pd, virt_addr, length, umem, ncont, - page_shift, access_flags, populate_mtts); + page_shift, access_flags, !use_umr); mutex_unlock(&dev->slow_path_mutex); } @@ -1375,7 +1369,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, update_odp_mr(mr); #endif - if (!populate_mtts) { + if (use_umr) { int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE; if (access_flags & IB_ACCESS_ON_DEMAND) @@ -1408,9 +1402,11 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) return 0; umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR | - MLX5_IB_SEND_UMR_FAIL_IF_FREE; + MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS; umrwr.wr.opcode = MLX5_IB_WR_UMR; + umrwr.pd = dev->umrc.pd; umrwr.mkey = mr->mmkey.key; + umrwr.ignore_free_state = 1; return mlx5_ib_post_send_wait(dev, &umrwr); } @@ -1615,10 +1611,10 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) mr->sig = NULL; } - mlx5_free_priv_descs(mr); - - if (!allocated_from_cache) + if (!allocated_from_cache) { destroy_mkey(dev, mr); + mlx5_free_priv_descs(mr); + } } static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index d216e0d2921dafc28b59e9a56dfbd82c660cc0d8..ade39ab49c3c267b7e75f11144c6e68a8263f0a6 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -497,13 +497,12 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr) static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt, u32 *bytes_mapped) { - u64 access_mask = ODP_READ_ALLOWED_BIT; - int npages = 0, page_shift, np; + int npages = 0, current_seq, page_shift, ret, np; + bool implicit = false; + u64 access_mask; u64 start_idx, page_mask; struct ib_umem_odp *odp; - int current_seq; size_t size; - int ret; if (!mr->umem->odp_data->page_list) { odp = implicit_mr_get_data(mr, io_virt, bcnt); @@ -511,6 +510,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, if (IS_ERR(odp)) return PTR_ERR(odp); mr = odp->private; + implicit = true; } else { odp = mr->umem->odp_data; @@ -522,6 +522,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, page_shift = mr->umem->page_shift; page_mask = ~(BIT(page_shift) - 1); start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift; + access_mask = ODP_READ_ALLOWED_BIT; if (mr->umem->writable) access_mask |= ODP_WRITE_ALLOWED_BIT; @@ -589,7 +590,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, out: if (ret == -EAGAIN) { - if (mr->parent || !odp->dying) { + if (implicit || !odp->dying) { unsigned long timeout = msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT); @@ -663,6 +664,15 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev, goto srcu_unlock; } + if (!mr->umem->odp_data) { + mlx5_ib_dbg(dev, "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n", + key); + if (bytes_mapped) + *bytes_mapped += bcnt; + ret = 0; + goto srcu_unlock; + } + ret = pagefault_mr(dev, mr, io_virt, bcnt, bytes_mapped); if (ret < 0) goto srcu_unlock; @@ -724,6 +734,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev, head = frame; bcnt -= frame->bcnt; + offset = 0; } break; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 6cba2a02d11bad71c555b236b9beaae8c29b9cfa..ef0f710587ad849e134f2a48e63d943f1937753a 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1501,7 +1501,6 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, } MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ); - MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); memcpy(rss_key, ucmd.rx_hash_key, len); break; } @@ -2829,10 +2828,12 @@ static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev, } /* Only remove the old rate after new rate was set */ - if ((old_rl.rate && - !mlx5_rl_are_equal(&old_rl, &new_rl)) || - (new_state != MLX5_SQC_STATE_RDY)) + if ((old_rl.rate && !mlx5_rl_are_equal(&old_rl, &new_rl)) || + (new_state != MLX5_SQC_STATE_RDY)) { mlx5_rl_remove_rate(dev, &old_rl); + if (new_state != MLX5_SQC_STATE_RDY) + memset(&new_rl, 0, sizeof(new_rl)); + } ibqp->rl = new_rl; sq->state = new_state; @@ -2909,6 +2910,37 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, return 0; } +static unsigned int get_tx_affinity(struct mlx5_ib_dev *dev, + struct mlx5_ib_pd *pd, + struct mlx5_ib_qp_base *qp_base, + u8 port_num) +{ + struct mlx5_ib_ucontext *ucontext = NULL; + unsigned int tx_port_affinity; + + if (pd && pd->ibpd.uobject && pd->ibpd.uobject->context) + ucontext = to_mucontext(pd->ibpd.uobject->context); + + if (ucontext) { + tx_port_affinity = (unsigned int)atomic_add_return( + 1, &ucontext->tx_port_affinity) % + MLX5_MAX_PORTS + + 1; + mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x ucontext %p\n", + tx_port_affinity, qp_base->mqp.qpn, ucontext); + } else { + tx_port_affinity = + (unsigned int)atomic_add_return( + 1, &dev->roce[port_num].tx_port_affinity) % + MLX5_MAX_PORTS + + 1; + mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x\n", + tx_port_affinity, qp_base->mqp.qpn); + } + + return tx_port_affinity; +} + static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, int attr_mask, enum ib_qp_state cur_state, enum ib_qp_state new_state, @@ -2974,6 +3006,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, if (!context) return -ENOMEM; + pd = get_pd(qp); context->flags = cpu_to_be32(mlx5_st << 16); if (!(attr_mask & IB_QP_PATH_MIG_STATE)) { @@ -3002,9 +3035,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, (ibqp->qp_type == IB_QPT_XRC_TGT)) { if (mlx5_lag_is_active(dev->mdev)) { u8 p = mlx5_core_native_port_num(dev->mdev); - tx_affinity = (unsigned int)atomic_add_return(1, - &dev->roce[p].next_port) % - MLX5_MAX_PORTS + 1; + tx_affinity = get_tx_affinity(dev, pd, base, p); context->flags |= cpu_to_be32(tx_affinity << 24); } } @@ -3062,7 +3093,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, goto out; } - pd = get_pd(qp); get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq, &send_cq, &recv_cq); @@ -3243,7 +3273,9 @@ static bool modify_dci_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state new int req = IB_QP_STATE; int opt = 0; - if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { + if (new_state == IB_QPS_RESET) { + return is_valid_mask(attr_mask, req, opt); + } else if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { req |= IB_QP_PKEY_INDEX | IB_QP_PORT; return is_valid_mask(attr_mask, req, opt); } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) { @@ -3715,10 +3747,14 @@ static int set_reg_umr_segment(struct mlx5_ib_dev *dev, memset(umr, 0, sizeof(*umr)); - if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE) - umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */ - else - umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */ + if (!umrwr->ignore_free_state) { + if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE) + /* fail if free */ + umr->flags = MLX5_UMR_CHECK_FREE; + else + /* fail if not free */ + umr->flags = MLX5_UMR_CHECK_NOT_FREE; + } umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size)); if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) { @@ -4371,6 +4407,12 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, u8 next_fence = 0; u8 fence; + if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR && + !drain)) { + *bad_wr = wr; + return -EIO; + } + if (unlikely(ibqp->qp_type == IB_QPT_GSI)) return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr); @@ -4380,13 +4422,6 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, spin_lock_irqsave(&qp->sq.lock, flags); - if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR && !drain) { - err = -EIO; - *bad_wr = wr; - nreq = 0; - goto out; - } - for (nreq = 0; wr; nreq++, wr = wr->next) { if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) { mlx5_ib_warn(dev, "\n"); @@ -4411,17 +4446,18 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, goto out; } - if (wr->opcode == IB_WR_LOCAL_INV || - wr->opcode == IB_WR_REG_MR) { + if (wr->opcode == IB_WR_REG_MR) { fence = dev->umr_fence; next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; - } else if (wr->send_flags & IB_SEND_FENCE) { - if (qp->next_fence) - fence = MLX5_FENCE_MODE_SMALL_AND_FENCE; - else - fence = MLX5_FENCE_MODE_FENCE; - } else { - fence = qp->next_fence; + } else { + if (wr->send_flags & IB_SEND_FENCE) { + if (qp->next_fence) + fence = MLX5_FENCE_MODE_SMALL_AND_FENCE; + else + fence = MLX5_FENCE_MODE_FENCE; + } else { + fence = qp->next_fence; + } } switch (ibqp->qp_type) { @@ -4700,18 +4736,17 @@ static int _mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, int ind; int i; + if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR && + !drain)) { + *bad_wr = wr; + return -EIO; + } + if (unlikely(ibqp->qp_type == IB_QPT_GSI)) return mlx5_ib_gsi_post_recv(ibqp, wr, bad_wr); spin_lock_irqsave(&qp->rq.lock, flags); - if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR && !drain) { - err = -EIO; - *bad_wr = wr; - nreq = 0; - goto out; - } - ind = qp->rq.head & (qp->rq.wqe_cnt - 1); for (nreq = 0; wr; nreq++, wr = wr->next) { diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c index f3e80dec13344b03ac8b4d93e6486976f060d814..af7f2083d4d1a3b554b66273449751227522a8f2 100644 --- a/drivers/infiniband/hw/mthca/mthca_main.c +++ b/drivers/infiniband/hw/mthca/mthca_main.c @@ -986,7 +986,8 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type) goto err_free_dev; } - if (mthca_cmd_init(mdev)) { + err = mthca_cmd_init(mdev); + if (err) { mthca_err(mdev, "Failed to init command interface, aborting.\n"); goto err_free_dev; } diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 0d3473b4596e166646273cfeabceaabaa54c52e1..21f4239022c7aa5a380b7058807734580d04ad54 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -533,7 +533,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd, { struct mthca_ucontext *context; - qp = kmalloc(sizeof *qp, GFP_KERNEL); + qp = kzalloc(sizeof(*qp), GFP_KERNEL); if (!qp) return ERR_PTR(-ENOMEM); @@ -599,7 +599,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd, if (pd->uobject) return ERR_PTR(-EINVAL); - qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL); + qp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL); if (!qp) return ERR_PTR(-ENOMEM); diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c index 58188fe5aed25d852a18f34a309ef0808d92fe95..32aaa4ef481cadcdee5e2c87bc34bae0b264b617 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c @@ -83,7 +83,6 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, struct iphdr ipv4; const struct ib_global_route *ib_grh; union { - struct sockaddr _sockaddr; struct sockaddr_in _sockaddr_in; struct sockaddr_in6 _sockaddr_in6; } sgid_addr, dgid_addr; @@ -133,9 +132,9 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, ipv4.tot_len = htons(0); ipv4.ttl = ib_grh->hop_limit; ipv4.protocol = nxthdr; - rdma_gid2ip(&sgid_addr._sockaddr, sgid); + rdma_gid2ip((struct sockaddr *)&sgid_addr, sgid); ipv4.saddr = sgid_addr._sockaddr_in.sin_addr.s_addr; - rdma_gid2ip(&dgid_addr._sockaddr, &ib_grh->dgid); + rdma_gid2ip((struct sockaddr*)&dgid_addr, &ib_grh->dgid); ipv4.daddr = dgid_addr._sockaddr_in.sin_addr.s_addr; memcpy((u8 *)ah->av + eth_sz, &ipv4, sizeof(struct iphdr)); } else { diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index e578281471af79b45b31891e058712cd5288ecad..28181f01734cdc401d550b140b3248f701ae1d59 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c @@ -2499,7 +2499,6 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp, u32 vlan_id = 0xFFFF; u8 mac_addr[6], hdr_type; union { - struct sockaddr _sockaddr; struct sockaddr_in _sockaddr_in; struct sockaddr_in6 _sockaddr_in6; } sgid_addr, dgid_addr; @@ -2541,8 +2540,8 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp, hdr_type = rdma_gid_attr_network_type(sgid_attr); if (hdr_type == RDMA_NETWORK_IPV4) { - rdma_gid2ip(&sgid_addr._sockaddr, &sgid_attr->gid); - rdma_gid2ip(&dgid_addr._sockaddr, &grh->dgid); + rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid_attr->gid); + rdma_gid2ip((struct sockaddr *)&dgid_addr, &grh->dgid); memcpy(&cmd->params.dgid[0], &dgid_addr._sockaddr_in.sin_addr.s_addr, 4); memcpy(&cmd->params.sgid[0], diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index a0af6d424aeda58b54c58146f9761e782713e89b..d1680d3b5825076ddd47ab2df24329cbe5e5662d 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -77,7 +77,7 @@ static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str) struct qedr_dev *qedr = get_qedr_dev(ibdev); u32 fw_ver = (u32)qedr->attr.fw_ver; - snprintf(str, IB_FW_VERSION_NAME_MAX, "%d. %d. %d. %d", + snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d", (fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF, (fw_ver >> 8) & 0xFF, fw_ver & 0xFF); } diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c index 505fa36487629ce003b1ac78a2414efbe18e4195..93b16237b76774c986580792b7160cd7a9c85c3e 100644 --- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c @@ -492,6 +492,8 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) int i; qp = idr_find(&dev->qpidr.idr, conn_param->qpn); + if (unlikely(!qp)) + return -EINVAL; laddr = (struct sockaddr_in *)&cm_id->m_local_addr; raddr = (struct sockaddr_in *)&cm_id->m_remote_addr; diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 8cc3df24e04e4a481f93e088a6bcdce8acabd6e9..830d94e3313491bb6e8c1c91181735fdd1936aa8 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -1701,6 +1701,14 @@ static void qedr_cleanup_user(struct qedr_dev *dev, struct qedr_qp *qp) if (qp->urq.umem) ib_umem_release(qp->urq.umem); qp->urq.umem = NULL; + + if (rdma_protocol_roce(&dev->ibdev, 1)) { + qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl); + qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl); + } else { + kfree(qp->usq.pbl_tbl); + kfree(qp->urq.pbl_tbl); + } } static int qedr_create_user_qp(struct qedr_dev *dev, @@ -1736,8 +1744,17 @@ static int qedr_create_user_qp(struct qedr_dev *dev, /* RQ - read access only (0), dma sync not required (0) */ rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr, ureq.rq_len, 0, 0, alloc_and_init); - if (rc) + if (rc) { + ib_umem_release(qp->usq.umem); + qp->usq.umem = NULL; + if (rdma_protocol_roce(&dev->ibdev, 1)) { + qedr_free_pbl(dev, &qp->usq.pbl_info, + qp->usq.pbl_tbl); + } else { + kfree(qp->usq.pbl_tbl); + } return rc; + } } memset(&in_params, 0, sizeof(in_params)); @@ -2809,8 +2826,8 @@ int qedr_dereg_mr(struct ib_mr *ib_mr) dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid); - if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR)) - qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table); + if (mr->type != QEDR_MR_DMA) + free_mr_info(dev, &mr->info); /* it could be user registered memory. */ if (mr->umem) diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index 98e1ce14fa2ab901a4d8fe53f50e3cde5bc2daf5..78fa634de98a3db4a16e345007ee28bee0cf9c42 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c @@ -343,7 +343,7 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp, /* virtual address of first page in transfer */ vaddr = ti->tidvaddr; - if (!access_ok(VERIFY_WRITE, (void __user *) vaddr, + if (!access_ok((void __user *) vaddr, cnt * PAGE_SIZE)) { ret = -EFAULT; goto done; diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c index f8a7de795beb6cec734860018e375d19a4bf2bd7..563f71e6d1d3d27b9d2b0b566c1a612f04291b0d 100644 --- a/drivers/infiniband/hw/qib/qib_ruc.c +++ b/drivers/infiniband/hw/qib/qib_ruc.c @@ -274,6 +274,8 @@ static void qib_ruc_loopback(struct rvt_qp *sqp) goto op_err; if (!ret) goto rnr_nak; + if (wqe->length > qp->r_len) + goto inv_err; break; case IB_WR_RDMA_WRITE_WITH_IMM: @@ -434,7 +436,10 @@ static void qib_ruc_loopback(struct rvt_qp *sqp) goto err; inv_err: - send_status = IB_WC_REM_INV_REQ_ERR; + send_status = + sqp->ibqp.qp_type == IB_QPT_RC ? + IB_WC_REM_INV_REQ_ERR : + IB_WC_SUCCESS; wc.status = IB_WC_LOC_QP_OP_ERR; goto err; diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c index d0723d4aef5c980e69d2a1218cd7ac0e8062f9b3..7424e88b0d9184d14fa5cd3ac9b0f53bb07fcf16 100644 --- a/drivers/infiniband/hw/qib/qib_sdma.c +++ b/drivers/infiniband/hw/qib/qib_sdma.c @@ -576,8 +576,10 @@ int qib_sdma_verbs_send(struct qib_pportdata *ppd, dw = (len + 3) >> 2; addr = dma_map_single(&ppd->dd->pcidev->dev, sge->vaddr, dw << 2, DMA_TO_DEVICE); - if (dma_mapping_error(&ppd->dd->pcidev->dev, addr)) + if (dma_mapping_error(&ppd->dd->pcidev->dev, addr)) { + ret = -ENOMEM; goto unmap; + } sdmadesc[0] = 0; make_sdma_desc(ppd, sdmadesc, (u64) addr, dw, dwoffset); /* SDmaUseLargeBuf has to be set in every descriptor */ diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c index ca2638d8f35ef182d9feb4bb8e6b4362d17e37bd..d831f3e61ae8ff6b15f5c734098a702746f1f34f 100644 --- a/drivers/infiniband/hw/qib/qib_sysfs.c +++ b/drivers/infiniband/hw/qib/qib_sysfs.c @@ -301,6 +301,9 @@ static ssize_t qib_portattr_show(struct kobject *kobj, struct qib_pportdata *ppd = container_of(kobj, struct qib_pportdata, pport_kobj); + if (!pattr->show) + return -EIO; + return pattr->show(ppd, buf); } @@ -312,6 +315,9 @@ static ssize_t qib_portattr_store(struct kobject *kobj, struct qib_pportdata *ppd = container_of(kobj, struct qib_pportdata, pport_kobj); + if (!pattr->store) + return -EIO; + return pattr->store(ppd, buf, len); } diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c index f8d029a2390ff59c288538e949a4b7de9532e4fb..bce2b5cd3c7b7a926c8c06cc668d2e5b1cd4c6e6 100644 --- a/drivers/infiniband/hw/qib/qib_ud.c +++ b/drivers/infiniband/hw/qib/qib_ud.c @@ -513,7 +513,6 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr, opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) { wc.ex.imm_data = ohdr->u.ud.imm_data; wc.wc_flags = IB_WC_WITH_IMM; - tlen -= sizeof(u32); } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { wc.ex.imm_data = 0; wc.wc_flags = 0; diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c index 926f3c8eba69fa6e07f86a3777fb0d89fc92d823..b3b49d2290b3a54944246c22a526133cc0390091 100644 --- a/drivers/infiniband/hw/qib/qib_user_sdma.c +++ b/drivers/infiniband/hw/qib/qib_user_sdma.c @@ -606,7 +606,7 @@ static int qib_user_sdma_coalesce(const struct qib_devdata *dd, /* * How many pages in this iovec element? */ -static int qib_user_sdma_num_pages(const struct iovec *iov) +static size_t qib_user_sdma_num_pages(const struct iovec *iov) { const unsigned long addr = (unsigned long) iov->iov_base; const unsigned long len = iov->iov_len; @@ -662,7 +662,7 @@ static void qib_user_sdma_free_pkt_frag(struct device *dev, static int qib_user_sdma_pin_pages(const struct qib_devdata *dd, struct qib_user_sdma_queue *pq, struct qib_user_sdma_pkt *pkt, - unsigned long addr, int tlen, int npages) + unsigned long addr, int tlen, size_t npages) { struct page *pages[8]; int i, j; @@ -726,7 +726,7 @@ static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd, unsigned long idx; for (idx = 0; idx < niov; idx++) { - const int npages = qib_user_sdma_num_pages(iov + idx); + const size_t npages = qib_user_sdma_num_pages(iov + idx); const unsigned long addr = (unsigned long) iov[idx].iov_base; ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr, @@ -828,8 +828,8 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd, unsigned pktnw; unsigned pktnwc; int nfrags = 0; - int npages = 0; - int bytes_togo = 0; + size_t npages = 0; + size_t bytes_togo = 0; int tiddma = 0; int cfur; @@ -889,7 +889,11 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd, npages += qib_user_sdma_num_pages(&iov[idx]); - bytes_togo += slen; + if (check_add_overflow(bytes_togo, slen, &bytes_togo) || + bytes_togo > type_max(typeof(pkt->bytes_togo))) { + ret = -EINVAL; + goto free_pbc; + } pktnwc += slen >> 2; idx++; nfrags++; @@ -908,7 +912,7 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd, } if (frag_size) { - int pktsize, tidsmsize, n; + size_t tidsmsize, n, pktsize, sz, addrlimit; n = npages*((2*PAGE_SIZE/frag_size)+1); pktsize = sizeof(*pkt) + sizeof(pkt->addr[0])*n; @@ -926,14 +930,24 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd, else tidsmsize = 0; - pkt = kmalloc(pktsize+tidsmsize, GFP_KERNEL); + if (check_add_overflow(pktsize, tidsmsize, &sz)) { + ret = -EINVAL; + goto free_pbc; + } + pkt = kmalloc(sz, GFP_KERNEL); if (!pkt) { ret = -ENOMEM; goto free_pbc; } pkt->largepkt = 1; pkt->frag_size = frag_size; - pkt->addrlimit = n + ARRAY_SIZE(pkt->addr); + if (check_add_overflow(n, ARRAY_SIZE(pkt->addr), + &addrlimit) || + addrlimit > type_max(typeof(pkt->addrlimit))) { + ret = -EINVAL; + goto free_pkt; + } + pkt->addrlimit = addrlimit; if (tiddma) { char *tidsm = (char *)pkt + pktsize; diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 41babbc0db583c96e5def53fafe9596ee3ae01ee..803c3544c75b5fbfbd3d8a35c96dcc3504e42ca8 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -1495,8 +1495,6 @@ static void qib_fill_device_attr(struct qib_devdata *dd) rdi->dparms.props.max_cq = ib_qib_max_cqs; rdi->dparms.props.max_cqe = ib_qib_max_cqes; rdi->dparms.props.max_ah = ib_qib_max_ahs; - rdi->dparms.props.max_mr = rdi->lkey_table.max; - rdi->dparms.props.max_fmr = rdi->lkey_table.max; rdi->dparms.props.max_map_per_fmr = 32767; rdi->dparms.props.max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC; rdi->dparms.props.max_qp_init_rd_atom = 255; diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c index 9973ac893635c098800d0885997dae99ec1099d4..e611f133aa97b2e947c962edd8a7ad743bf1555c 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c @@ -334,13 +334,16 @@ int usnic_ib_query_port(struct ib_device *ibdev, u8 port, usnic_dbg("\n"); - mutex_lock(&us_ibdev->usdev_lock); if (ib_get_eth_speed(ibdev, port, &props->active_speed, - &props->active_width)) { - mutex_unlock(&us_ibdev->usdev_lock); + &props->active_width)) return -EINVAL; - } + /* + * usdev_lock is acquired after (and not before) ib_get_eth_speed call + * because acquiring rtnl_lock in ib_get_eth_speed, while holding + * usdev_lock could lead to a deadlock. + */ + mutex_lock(&us_ibdev->usdev_lock); /* props being zeroed by the caller, avoid zeroing it here */ props->lid = 0; @@ -444,7 +447,7 @@ struct net_device *usnic_get_netdev(struct ib_device *device, u8 port_num) int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) { - if (index > 1) + if (index > 0) return -EINVAL; *pkey = 0xffff; diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c index 9dd39daa602bd68d671309e62129c7e740db9de2..9b7c6b4ed661f3b4c8747a038a191bde92ed357f 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom.c @@ -60,7 +60,7 @@ static void usnic_uiom_reg_account(struct work_struct *work) struct usnic_uiom_reg, work); down_write(&umem->mm->mmap_sem); - umem->mm->locked_vm -= umem->diff; + atomic_long_sub(umem->diff, &umem->mm->locked_vm); up_write(&umem->mm->mmap_sem); mmput(umem->mm); kfree(umem); diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h index 42b8685c997eb9b5dd59397e094ec84e5bce1bcb..3c633ab58052847e4ba05da464f420a937750c2f 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h @@ -427,7 +427,40 @@ static inline enum ib_qp_state pvrdma_qp_state_to_ib(enum pvrdma_qp_state state) static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op) { - return (enum pvrdma_wr_opcode)op; + switch (op) { + case IB_WR_RDMA_WRITE: + return PVRDMA_WR_RDMA_WRITE; + case IB_WR_RDMA_WRITE_WITH_IMM: + return PVRDMA_WR_RDMA_WRITE_WITH_IMM; + case IB_WR_SEND: + return PVRDMA_WR_SEND; + case IB_WR_SEND_WITH_IMM: + return PVRDMA_WR_SEND_WITH_IMM; + case IB_WR_RDMA_READ: + return PVRDMA_WR_RDMA_READ; + case IB_WR_ATOMIC_CMP_AND_SWP: + return PVRDMA_WR_ATOMIC_CMP_AND_SWP; + case IB_WR_ATOMIC_FETCH_AND_ADD: + return PVRDMA_WR_ATOMIC_FETCH_AND_ADD; + case IB_WR_LSO: + return PVRDMA_WR_LSO; + case IB_WR_SEND_WITH_INV: + return PVRDMA_WR_SEND_WITH_INV; + case IB_WR_RDMA_READ_WITH_INV: + return PVRDMA_WR_RDMA_READ_WITH_INV; + case IB_WR_LOCAL_INV: + return PVRDMA_WR_LOCAL_INV; + case IB_WR_REG_MR: + return PVRDMA_WR_FAST_REG_MR; + case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: + return PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP; + case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD: + return PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD; + case IB_WR_REG_SIG_MR: + return PVRDMA_WR_REG_SIG_MR; + default: + return PVRDMA_WR_ERROR; + } } static inline enum ib_wc_status pvrdma_wc_status_to_ib( diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c index a5719899f49ad6ed91f216c3e1c3ea588d934c1a..ed99f0a08dc4e942fdb9c3a10a1a8d734bdac366 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c @@ -1123,6 +1123,8 @@ static void pvrdma_pci_remove(struct pci_dev *pdev) pvrdma_page_dir_cleanup(dev, &dev->cq_pdir); pvrdma_page_dir_cleanup(dev, &dev->async_pdir); pvrdma_free_slots(dev); + dma_free_coherent(&pdev->dev, sizeof(*dev->dsr), dev->dsr, + dev->dsrbase); iounmap(dev->regs); kfree(dev->sgid_tbl); diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c index 60083c0363a5751167d1a36dfd7912763a33f1ac..9aeb330932794d19661028e0f8afd861b5b71a14 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c @@ -721,6 +721,12 @@ int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) wqe_hdr->ex.imm_data = wr->ex.imm_data; + if (unlikely(wqe_hdr->opcode == PVRDMA_WR_ERROR)) { + *bad_wr = wr; + ret = -EINVAL; + goto out; + } + switch (qp->ibqp.qp_type) { case IB_QPT_GSI: case IB_QPT_UD: diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c index b65d10b0a87591e8d1049d95806b2208cc2120fd..f4cb5cf26006f00316e76299ce60fe452760216b 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c @@ -555,7 +555,7 @@ struct ib_ah *pvrdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, if (!atomic_add_unless(&dev->num_ahs, 1, dev->dsr->caps.max_ah)) return ERR_PTR(-ENOMEM); - ah = kzalloc(sizeof(*ah), GFP_KERNEL); + ah = kzalloc(sizeof(*ah), GFP_ATOMIC); if (!ah) { atomic_dec(&dev->num_ahs); return ERR_PTR(-ENOMEM); diff --git a/drivers/infiniband/sw/rdmavt/ah.c b/drivers/infiniband/sw/rdmavt/ah.c index 89ec0f64abfc35b64ed5058c85a054e13f3dab26..084bb4baebb50a1fe061bd3fa2be0e2deed2ae89 100644 --- a/drivers/infiniband/sw/rdmavt/ah.c +++ b/drivers/infiniband/sw/rdmavt/ah.c @@ -91,13 +91,15 @@ EXPORT_SYMBOL(rvt_check_ah); * rvt_create_ah - create an address handle * @pd: the protection domain * @ah_attr: the attributes of the AH + * @udata: pointer to user's input output buffer information. * * This may be called from interrupt context. * * Return: newly allocated ah */ struct ib_ah *rvt_create_ah(struct ib_pd *pd, - struct rdma_ah_attr *ah_attr) + struct rdma_ah_attr *ah_attr, + struct ib_udata *udata) { struct rvt_ah *ah; struct rvt_dev_info *dev = ib_to_rvt(pd->device); diff --git a/drivers/infiniband/sw/rdmavt/ah.h b/drivers/infiniband/sw/rdmavt/ah.h index 16105af9918908b4d6d417513560c273080465fc..25271b48a6830c5b6b962095c8fee2b33503036c 100644 --- a/drivers/infiniband/sw/rdmavt/ah.h +++ b/drivers/infiniband/sw/rdmavt/ah.h @@ -51,7 +51,8 @@ #include struct ib_ah *rvt_create_ah(struct ib_pd *pd, - struct rdma_ah_attr *ah_attr); + struct rdma_ah_attr *ah_attr, + struct ib_udata *udata); int rvt_destroy_ah(struct ib_ah *ibah); int rvt_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); int rvt_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c index 49c9541050d4fc13b14b3753eed03fe33593097b..39d101df229d158cd137e2d1b4803ec5d15351aa 100644 --- a/drivers/infiniband/sw/rdmavt/mr.c +++ b/drivers/infiniband/sw/rdmavt/mr.c @@ -96,6 +96,8 @@ int rvt_driver_mr_init(struct rvt_dev_info *rdi) for (i = 0; i < rdi->lkey_table.max; i++) RCU_INIT_POINTER(rdi->lkey_table.table[i], NULL); + rdi->dparms.props.max_mr = rdi->lkey_table.max; + rdi->dparms.props.max_fmr = rdi->lkey_table.max; return 0; } @@ -611,11 +613,6 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr) if (unlikely(mapped_segs == mr->mr.max_segs)) return -ENOMEM; - if (mr->mr.length == 0) { - mr->mr.user_base = addr; - mr->mr.iova = addr; - } - m = mapped_segs / RVT_SEGSZ; n = mapped_segs % RVT_SEGSZ; mr->mr.map[m]->segs[n].vaddr = (void *)addr; @@ -633,17 +630,24 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr) * @sg_nents: number of entries in sg * @sg_offset: offset in bytes into sg * + * Overwrite rvt_mr length with mr length calculated by ib_sg_to_pages. + * * Return: number of sg elements mapped to the memory region */ int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset) { struct rvt_mr *mr = to_imr(ibmr); + int ret; mr->mr.length = 0; mr->mr.page_shift = PAGE_SHIFT; - return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, - rvt_set_page); + ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rvt_set_page); + mr->mr.user_base = ibmr->iova; + mr->mr.iova = ibmr->iova; + mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr; + mr->mr.length = (size_t)ibmr->length; + return ret; } /** @@ -674,6 +678,7 @@ int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key, ibmr->rkey = key; mr->mr.lkey = key; mr->mr.access_flags = access; + mr->mr.iova = ibmr->iova; atomic_set(&mr->mr.lkey_invalid, 0); return 0; diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 5ce403c6cddba43e00aa123575abf38d79a51a59..7d03680afd9145c535f1a8dc45845cb50fbffb35 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -412,7 +412,8 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, offset = qpt->incr | ((offset & 1) ^ 1); } /* there can be no set bits in low-order QoS bits */ - WARN_ON(offset & (BIT(rdi->dparms.qos_shift) - 1)); + WARN_ON(rdi->dparms.qos_shift > 1 && + offset & ((BIT(rdi->dparms.qos_shift - 1) - 1) << 1)); qpn = mk_qpn(qpt, map, offset); } diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c index 83311dd07019b81bb65fbd4aa9810dbc6f352f8e..5c2bc22bc9d6419fb4c0c07ae6375830bf73051d 100644 --- a/drivers/infiniband/sw/rxe/rxe_comp.c +++ b/drivers/infiniband/sw/rxe/rxe_comp.c @@ -191,6 +191,7 @@ static inline void reset_retry_counters(struct rxe_qp *qp) { qp->comp.retry_cnt = qp->attr.retry_cnt; qp->comp.rnr_retry = qp->attr.rnr_retry; + qp->comp.started_retry = 0; } static inline enum comp_state check_psn(struct rxe_qp *qp, @@ -253,6 +254,17 @@ static inline enum comp_state check_ack(struct rxe_qp *qp, case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE: if (pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE && pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST) { + /* read retries of partial data may restart from + * read response first or response only. + */ + if ((pkt->psn == wqe->first_psn && + pkt->opcode == + IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) || + (wqe->first_psn == wqe->last_psn && + pkt->opcode == + IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY)) + break; + return COMPST_ERROR; } break; @@ -361,13 +373,15 @@ static inline enum comp_state do_read(struct rxe_qp *qp, ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &wqe->dma, payload_addr(pkt), payload_size(pkt), to_mem_obj, NULL); - if (ret) + if (ret) { + wqe->status = IB_WC_LOC_PROT_ERR; return COMPST_ERROR; + } if (wqe->dma.resid == 0 && (pkt->mask & RXE_END_MASK)) return COMPST_COMP_ACK; - else - return COMPST_UPDATE_COMP; + + return COMPST_UPDATE_COMP; } static inline enum comp_state do_atomic(struct rxe_qp *qp, @@ -381,10 +395,12 @@ static inline enum comp_state do_atomic(struct rxe_qp *qp, ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &wqe->dma, &atomic_orig, sizeof(u64), to_mem_obj, NULL); - if (ret) + if (ret) { + wqe->status = IB_WC_LOC_PROT_ERR; return COMPST_ERROR; - else - return COMPST_COMP_ACK; + } + + return COMPST_COMP_ACK; } static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe, @@ -499,11 +515,11 @@ static inline enum comp_state complete_wqe(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) { - qp->comp.opcode = -1; - - if (pkt) { - if (psn_compare(pkt->psn, qp->comp.psn) >= 0) - qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK; + if (pkt && wqe->state == wqe_state_pending) { + if (psn_compare(wqe->last_psn, qp->comp.psn) >= 0) { + qp->comp.psn = (wqe->last_psn + 1) & BTH_PSN_MASK; + qp->comp.opcode = -1; + } if (qp->req.wait_psn) { qp->req.wait_psn = 0; @@ -676,6 +692,20 @@ int rxe_completer(void *arg) goto exit; } + /* if we've started a retry, don't start another + * retry sequence, unless this is a timeout. + */ + if (qp->comp.started_retry && + !qp->comp.timeout_retry) { + if (pkt) { + rxe_drop_ref(pkt->qp); + kfree_skb(skb); + skb = NULL; + } + + goto done; + } + if (qp->comp.retry_cnt > 0) { if (qp->comp.retry_cnt != 7) qp->comp.retry_cnt--; @@ -692,6 +722,7 @@ int rxe_completer(void *arg) rxe_counter_inc(rxe, RXE_CNT_COMP_RETRY); qp->req.need_retry = 1; + qp->comp.started_retry = 1; rxe_run_task(&qp->req.task, 1); } @@ -701,7 +732,7 @@ int rxe_completer(void *arg) skb = NULL; } - goto exit; + goto done; } else { rxe_counter_inc(rxe, RXE_CNT_RETRY_EXCEEDED); diff --git a/drivers/infiniband/sw/rxe/rxe_hw_counters.c b/drivers/infiniband/sw/rxe/rxe_hw_counters.c index 6aeb7a165e46919c1c3651d72776fd05f03173b1..ea4542a9d69e68523558455c138412f9b6341684 100644 --- a/drivers/infiniband/sw/rxe/rxe_hw_counters.c +++ b/drivers/infiniband/sw/rxe/rxe_hw_counters.c @@ -59,7 +59,7 @@ int rxe_ib_get_hw_stats(struct ib_device *ibdev, return -EINVAL; for (cnt = 0; cnt < ARRAY_SIZE(rxe_counter_name); cnt++) - stats->value[cnt] = dev->stats_counters[cnt]; + stats->value[cnt] = atomic64_read(&dev->stats_counters[cnt]); return ARRAY_SIZE(rxe_counter_name); } diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index 8094cbaa54a9e2684bd41ae6774a886919461c46..95cf4fd69c55724901b1fb38f95fb5e4d5abdad3 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -154,10 +154,12 @@ static struct dst_entry *rxe_find_route6(struct net_device *ndev, memcpy(&fl6.daddr, daddr, sizeof(*daddr)); fl6.flowi6_proto = IPPROTO_UDP; - if (unlikely(ipv6_stub->ipv6_dst_lookup(sock_net(recv_sockets.sk6->sk), - recv_sockets.sk6->sk, &ndst, &fl6))) { + ndst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(recv_sockets.sk6->sk), + recv_sockets.sk6->sk, &fl6, + NULL); + if (unlikely(IS_ERR(ndst))) { pr_err_ratelimited("no route to %pI6\n", daddr); - goto put; + return NULL; } if (unlikely(ndst->error)) { diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index c58452daffc7449875f362269b7cd51bb9f8803e..4c0caca6da509a706cf3aa085aa7ca4755d105d1 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -249,6 +249,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, if (err) { kvfree(qp->sq.queue->buf); kfree(qp->sq.queue); + qp->sq.queue = NULL; return err; } @@ -302,6 +303,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, if (err) { kvfree(qp->rq.queue->buf); kfree(qp->rq.queue); + qp->rq.queue = NULL; return err; } } @@ -362,6 +364,11 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, err2: rxe_queue_cleanup(qp->sq.queue); err1: + qp->pd = NULL; + qp->rcq = NULL; + qp->scq = NULL; + qp->srq = NULL; + if (srq) rxe_drop_ref(srq); rxe_drop_ref(scq); diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c index d30dbac24583a33d4e6d5f8b41c3017a330bbdb5..695a607e2d14c8b315f2782f22cf11b50b624676 100644 --- a/drivers/infiniband/sw/rxe/rxe_recv.c +++ b/drivers/infiniband/sw/rxe/rxe_recv.c @@ -391,7 +391,7 @@ void rxe_rcv(struct sk_buff *skb) calc_icrc = rxe_icrc_hdr(pkt, skb); calc_icrc = rxe_crc32(rxe, calc_icrc, (u8 *)payload_addr(pkt), - payload_size(pkt)); + payload_size(pkt) + bth_pad(pkt)); calc_icrc = (__force u32)cpu_to_be32(~calc_icrc); if (unlikely(calc_icrc != pack_icrc)) { if (skb->protocol == htons(ETH_P_IPV6)) diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index 8be27238a86e4ee1f160b4058e9517ef58708d26..1c1eae0ef8c28239fcfb544927fd56dc95d9757a 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c @@ -73,9 +73,6 @@ static void req_retry(struct rxe_qp *qp) int npsn; int first = 1; - wqe = queue_head(qp->sq.queue); - npsn = (qp->comp.psn - wqe->first_psn) & BTH_PSN_MASK; - qp->req.wqe_index = consumer_index(qp->sq.queue); qp->req.psn = qp->comp.psn; qp->req.opcode = -1; @@ -107,11 +104,17 @@ static void req_retry(struct rxe_qp *qp) if (first) { first = 0; - if (mask & WR_WRITE_OR_SEND_MASK) + if (mask & WR_WRITE_OR_SEND_MASK) { + npsn = (qp->comp.psn - wqe->first_psn) & + BTH_PSN_MASK; retry_first_write_send(qp, wqe, mask, npsn); + } - if (mask & WR_READ_MASK) + if (mask & WR_READ_MASK) { + npsn = (wqe->dma.length - wqe->dma.resid) / + qp->mtu; wqe->iova += npsn * qp->mtu; + } } wqe->state = wqe_state_posted; @@ -435,7 +438,7 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp, if (pkt->mask & RXE_RETH_MASK) { reth_set_rkey(pkt, ibwr->wr.rdma.rkey); reth_set_va(pkt, wqe->iova); - reth_set_len(pkt, wqe->dma.length); + reth_set_len(pkt, wqe->dma.resid); } if (pkt->mask & RXE_IMMDT_MASK) @@ -497,6 +500,12 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe, if (err) return err; } + if (bth_pad(pkt)) { + u8 *pad = payload_addr(pkt) + paylen; + + memset(pad, 0, bth_pad(pkt)); + crc = rxe_crc32(rxe, crc, pad, bth_pad(pkt)); + } } p = payload_addr(pkt) + paylen + bth_pad(pkt); @@ -640,6 +649,7 @@ int rxe_requester(void *arg) rmr->access = wqe->wr.wr.reg.access; rmr->lkey = wqe->wr.wr.reg.key; rmr->rkey = wqe->wr.wr.reg.key; + rmr->iova = wqe->wr.wr.reg.mr->iova; wqe->state = wqe_state_done; wqe->status = IB_WC_SUCCESS; } else { diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index aa5833318372b001e107621745b903a60d739093..9078cfd3b8bdd99ac14f17170e3a47032af8d45f 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -435,6 +435,7 @@ static enum resp_states check_rkey(struct rxe_qp *qp, qp->resp.va = reth_va(pkt); qp->resp.rkey = reth_rkey(pkt); qp->resp.resid = reth_len(pkt); + qp->resp.length = reth_len(pkt); } access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ : IB_ACCESS_REMOTE_WRITE; @@ -682,6 +683,7 @@ static enum resp_states read_reply(struct rxe_qp *qp, rxe_advance_resp_resource(qp); res->type = RXE_READ_MASK; + res->replay = 0; res->read.va = qp->resp.va; res->read.va_org = qp->resp.va; @@ -735,6 +737,13 @@ static enum resp_states read_reply(struct rxe_qp *qp, if (err) pr_err("Failed copying memory\n"); + if (bth_pad(&ack_pkt)) { + struct rxe_dev *rxe = to_rdev(qp->ibqp.device); + u8 *pad = payload_addr(&ack_pkt) + payload; + + memset(pad, 0, bth_pad(&ack_pkt)); + icrc = rxe_crc32(rxe, icrc, pad, bth_pad(&ack_pkt)); + } p = payload_addr(&ack_pkt) + payload + bth_pad(&ack_pkt); *p = ~icrc; @@ -752,7 +761,8 @@ static enum resp_states read_reply(struct rxe_qp *qp, state = RESPST_DONE; } else { qp->resp.res = NULL; - qp->resp.opcode = -1; + if (!res->replay) + qp->resp.opcode = -1; if (psn_compare(res->cur_psn, qp->resp.psn) >= 0) qp->resp.psn = res->cur_psn; state = RESPST_CLEANUP; @@ -814,6 +824,7 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt) /* next expected psn, read handles this separately */ qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK; + qp->resp.ack_psn = qp->resp.psn; qp->resp.opcode = pkt->opcode; qp->resp.status = IB_WC_SUCCESS; @@ -841,17 +852,24 @@ static enum resp_states do_complete(struct rxe_qp *qp, memset(&cqe, 0, sizeof(cqe)); - wc->wr_id = wqe->wr_id; - wc->status = qp->resp.status; - wc->qp = &qp->ibqp; + if (qp->rcq->is_user) { + uwc->status = qp->resp.status; + uwc->qp_num = qp->ibqp.qp_num; + uwc->wr_id = wqe->wr_id; + } else { + wc->status = qp->resp.status; + wc->qp = &qp->ibqp; + wc->wr_id = wqe->wr_id; + } - /* fields after status are not required for errors */ if (wc->status == IB_WC_SUCCESS) { wc->opcode = (pkt->mask & RXE_IMMDT_MASK && pkt->mask & RXE_WRITE_MASK) ? IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV; wc->vendor_err = 0; - wc->byte_len = wqe->dma.length - wqe->dma.resid; + wc->byte_len = (pkt->mask & RXE_IMMDT_MASK && + pkt->mask & RXE_WRITE_MASK) ? + qp->resp.length : wqe->dma.length - wqe->dma.resid; /* fields after byte_len are different between kernel and user * space @@ -1065,7 +1083,7 @@ static enum resp_states duplicate_request(struct rxe_qp *qp, struct rxe_pkt_info *pkt) { enum resp_states rc; - u32 prev_psn = (qp->resp.psn - 1) & BTH_PSN_MASK; + u32 prev_psn = (qp->resp.ack_psn - 1) & BTH_PSN_MASK; if (pkt->mask & RXE_SEND_MASK || pkt->mask & RXE_WRITE_MASK) { @@ -1108,6 +1126,7 @@ static enum resp_states duplicate_request(struct rxe_qp *qp, res->state = (pkt->psn == res->first_psn) ? rdatm_res_state_new : rdatm_res_state_replay; + res->replay = 1; /* Reset the resource, except length. */ res->read.va_org = iova; diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c index 0d6c04ba7fc36c922e3f6733d4093e9a8cd9753c..c41a5fee81f711e183a43630150bff2a60da4708 100644 --- a/drivers/infiniband/sw/rxe/rxe_srq.c +++ b/drivers/infiniband/sw/rxe/rxe_srq.c @@ -31,6 +31,7 @@ * SOFTWARE. */ +#include #include "rxe.h" #include "rxe_loc.h" #include "rxe_queue.h" @@ -129,13 +130,18 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, context, q->buf, q->buf_size, &q->ip); - if (err) + if (err) { + vfree(q->buf); + kfree(q); return err; + } if (uresp) { if (copy_to_user(&uresp->srq_num, &srq->srq_num, - sizeof(uresp->srq_num))) + sizeof(uresp->srq_num))) { + rxe_queue_cleanup(q); return -EFAULT; + } } return 0; diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h index af1470d293912e99ddaff4113b51d634b8c5743f..6a75f96b90962db0806613b7e31eba6d66d5a986 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.h +++ b/drivers/infiniband/sw/rxe/rxe_verbs.h @@ -158,6 +158,7 @@ struct rxe_comp_info { int opcode; int timeout; int timeout_retry; + int started_retry; u32 retry_cnt; u32 rnr_retry; struct rxe_task task; @@ -171,6 +172,7 @@ enum rdatm_res_state { struct resp_res { int type; + int replay; u32 first_psn; u32 last_psn; u32 cur_psn; @@ -195,6 +197,7 @@ struct rxe_resp_info { enum rxe_qp_state state; u32 msn; u32 psn; + u32 ack_psn; int opcode; int drop_msg; int goto_error; @@ -210,6 +213,7 @@ struct rxe_resp_info { struct rxe_mem *mr; u32 resid; u32 rkey; + u32 length; u64 atomic_orig; /* SRQ only */ @@ -405,16 +409,16 @@ struct rxe_dev { spinlock_t mmap_offset_lock; /* guard mmap_offset */ int mmap_offset; - u64 stats_counters[RXE_NUM_OF_COUNTERS]; + atomic64_t stats_counters[RXE_NUM_OF_COUNTERS]; struct rxe_port port; struct list_head list; struct crypto_shash *tfm; }; -static inline void rxe_counter_inc(struct rxe_dev *rxe, enum rxe_counters cnt) +static inline void rxe_counter_inc(struct rxe_dev *rxe, enum rxe_counters index) { - rxe->stats_counters[cnt]++; + atomic64_inc(&rxe->stats_counters[index]); } static inline struct rxe_dev *to_rdev(struct ib_device *dev) diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 1abe3c62f106d8d1d478e2d36f670b10790ee3fe..b22d02c9de90fd604f1e7acc5fa9a1ecb76e17e2 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -248,7 +248,6 @@ struct ipoib_cm_tx { struct list_head list; struct net_device *dev; struct ipoib_neigh *neigh; - struct ipoib_path *path; struct ipoib_tx_buf *tx_ring; unsigned int tx_head; unsigned int tx_tail; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 3d5424f335cb06e1dcaad16a4f4c3c7910d3295e..aa9dcfc36cd35b81b9ad961f13c5e7303467ad5e 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -1312,7 +1312,6 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path neigh->cm = tx; tx->neigh = neigh; - tx->path = path; tx->dev = dev; list_add(&tx->list, &priv->cm.start_list); set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags); @@ -1371,7 +1370,7 @@ static void ipoib_cm_tx_start(struct work_struct *work) neigh->daddr + QPN_AND_OPTIONS_OFFSET); goto free_neigh; } - memcpy(&pathrec, &p->path->pathrec, sizeof(pathrec)); + memcpy(&pathrec, &path->pathrec, sizeof(pathrec)); spin_unlock_irqrestore(&priv->lock, flags); netif_tx_unlock_bh(dev); @@ -1438,11 +1437,15 @@ static void ipoib_cm_skb_reap(struct work_struct *work) spin_unlock_irqrestore(&priv->lock, flags); netif_tx_unlock_bh(dev); - if (skb->protocol == htons(ETH_P_IP)) + if (skb->protocol == htons(ETH_P_IP)) { + memset(IPCB(skb), 0, sizeof(*IPCB(skb))); icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); + } #if IS_ENABLED(CONFIG_IPV6) - else if (skb->protocol == htons(ETH_P_IPV6)) + else if (skb->protocol == htons(ETH_P_IPV6)) { + memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); + } #endif dev_kfree_skb_any(skb); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index e3d28f9ad9c0bfa1363723ffc1b95a174c7dbac3..d8cb5bbe6eb5859a9205fd7b05610024706bf9d8 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -243,7 +243,8 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu) return 0; } - if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu)) + if (new_mtu < (ETH_MIN_MTU + IPOIB_ENCAP_LEN) || + new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu)) return -EINVAL; priv->admin_mtu = new_mtu; @@ -1880,6 +1881,8 @@ static int ipoib_parent_init(struct net_device *ndev) sizeof(union ib_gid)); SET_NETDEV_DEV(priv->dev, priv->ca->dev.parent); + priv->dev->dev_port = priv->port - 1; + /* Let's set this one too for backwards compatibility. */ priv->dev->dev_id = priv->port - 1; return 0; @@ -1890,12 +1893,6 @@ static void ipoib_child_init(struct net_device *ndev) struct ipoib_dev_priv *priv = ipoib_priv(ndev); struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent); - dev_hold(priv->parent); - - down_write(&ppriv->vlan_rwsem); - list_add_tail(&priv->list, &ppriv->child_intfs); - up_write(&ppriv->vlan_rwsem); - priv->max_ib_mtu = ppriv->max_ib_mtu; set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags); memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN); @@ -1938,6 +1935,17 @@ static int ipoib_ndo_init(struct net_device *ndev) if (rc) { pr_warn("%s: failed to initialize device: %s port %d (ret = %d)\n", priv->ca->name, priv->dev->name, priv->port, rc); + return rc; + } + + if (priv->parent) { + struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent); + + dev_hold(priv->parent); + + down_write(&ppriv->vlan_rwsem); + list_add_tail(&priv->list, &ppriv->child_intfs); + up_write(&ppriv->vlan_rwsem); } return 0; @@ -1955,6 +1963,14 @@ static void ipoib_ndo_uninit(struct net_device *dev) */ WARN_ON(!list_empty(&priv->child_intfs)); + if (priv->parent) { + struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent); + + down_write(&ppriv->vlan_rwsem); + list_del(&priv->list); + up_write(&ppriv->vlan_rwsem); + } + ipoib_neigh_hash_uninit(dev); ipoib_ib_dev_cleanup(dev); @@ -1966,15 +1982,8 @@ static void ipoib_ndo_uninit(struct net_device *dev) priv->wq = NULL; } - if (priv->parent) { - struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent); - - down_write(&ppriv->vlan_rwsem); - list_del(&priv->list); - up_write(&ppriv->vlan_rwsem); - + if (priv->parent) dev_put(priv->parent); - } } static int ipoib_set_vf_link_state(struct net_device *dev, int vf, int link_state) @@ -1995,6 +2004,7 @@ static int ipoib_get_vf_config(struct net_device *dev, int vf, return err; ivf->vf = vf; + memcpy(ivf->mac, dev->dev_addr, dev->addr_len); return 0; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index b9e9562f503471c19a63ae44e186058d2e1c749f..de82fb0cb1d5bd1f4dc3b24de836d59b517b3953 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -543,21 +543,18 @@ static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast) /* SM supports sendonly-fullmember, otherwise fallback to full-member */ rec.join_state = SENDONLY_FULLMEMBER_JOIN; } - spin_unlock_irq(&priv->lock); multicast = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port, - &rec, comp_mask, GFP_KERNEL, + &rec, comp_mask, GFP_ATOMIC, ipoib_mcast_join_complete, mcast); - spin_lock_irq(&priv->lock); if (IS_ERR(multicast)) { ret = PTR_ERR(multicast); ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret); /* Requeue this join task with a backoff delay */ __ipoib_mcast_schedule_join_thread(priv, mcast, 1); clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); - spin_unlock_irq(&priv->lock); complete(&mcast->done); - spin_lock_irq(&priv->lock); + return ret; } return 0; } diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 3fecd87c9f2bdc2ce23f88343471153f9cf3a698..b4e0ae0245754d77ac7e51ea79abb3f2179e82b5 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -646,6 +646,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep, if (ib_conn->pi_support) { u32 sig_caps = ib_conn->device->ib_device->attrs.sig_prot_cap; + shost->sg_prot_tablesize = shost->sg_tablesize; scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps)); scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP | SHOST_DIX_GUARD_CRC); diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 120b408295603d31cccceeffaeea9b7ee4778540..a7aeaa0c6fbc9281cde32678dfc8f087ba97ddfd 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -197,7 +197,7 @@ struct iser_data_buf { struct scatterlist *sg; int size; unsigned long data_len; - unsigned int dma_nents; + int dma_nents; }; /* fwd declarations */ diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 2f6388596f886c262222ec8feefefca23b83b1db..96af06cfe0afd9386235007a762eff92197b42a4 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c @@ -589,13 +589,19 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc) ib_conn->post_recv_buf_count--; } -static inline void +static inline int iser_inv_desc(struct iser_fr_desc *desc, u32 rkey) { - if (likely(rkey == desc->rsc.mr->rkey)) + if (likely(rkey == desc->rsc.mr->rkey)) { desc->rsc.mr_valid = 0; - else if (likely(rkey == desc->pi_ctx->sig_mr->rkey)) + } else if (likely(desc->pi_ctx && rkey == desc->pi_ctx->sig_mr->rkey)) { desc->pi_ctx->sig_mr_valid = 0; + } else { + iser_err("Bogus remote invalidation for rkey %#x\n", rkey); + return -EINVAL; + } + + return 0; } static int @@ -623,12 +629,14 @@ iser_check_remote_inv(struct iser_conn *iser_conn, if (iser_task->dir[ISER_DIR_IN]) { desc = iser_task->rdma_reg[ISER_DIR_IN].mem_h; - iser_inv_desc(desc, rkey); + if (unlikely(iser_inv_desc(desc, rkey))) + return -EINVAL; } if (iser_task->dir[ISER_DIR_OUT]) { desc = iser_task->rdma_reg[ISER_DIR_OUT].mem_h; - iser_inv_desc(desc, rkey); + if (unlikely(iser_inv_desc(desc, rkey))) + return -EINVAL; } } else { iser_err("failed to get task for itt=%d\n", hdr->itt); diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 009be8889d71d05c7eeeb7d6684276ae54c76911..e49361cda964cd920eb3e20fc494ecf29684320d 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -240,8 +240,8 @@ int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task, page_vec->npages = 0; page_vec->fake_mr.page_size = SIZE_4K; plen = ib_sg_to_pages(&page_vec->fake_mr, mem->sg, - mem->size, NULL, iser_set_page); - if (unlikely(plen < mem->size)) { + mem->dma_nents, NULL, iser_set_page); + if (unlikely(plen < mem->dma_nents)) { iser_err("page vec too short to hold this SG\n"); iser_data_buf_dump(mem, device->ib_device); iser_dump_page_vec(page_vec); @@ -451,10 +451,10 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey)); - n = ib_map_mr_sg(mr, mem->sg, mem->size, NULL, SIZE_4K); - if (unlikely(n != mem->size)) { + n = ib_map_mr_sg(mr, mem->sg, mem->dma_nents, NULL, SIZE_4K); + if (unlikely(n != mem->dma_nents)) { iser_err("failed to map sg (%d/%d)\n", - n, mem->size); + n, mem->dma_nents); return n < 0 ? n : -EINVAL; } @@ -526,14 +526,14 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task, int err; use_dma_key = mem->dma_nents == 1 && (all_imm || !iser_always_reg) && - scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL; + scsi_prot_op_normal(task->sc); if (!use_dma_key) { desc = device->reg_ops->reg_desc_get(ib_conn); reg->mem_h = desc; } - if (scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL) + if (scsi_prot_op_normal(task->sc)) data_reg = reg; else data_reg = &task->desc.data_reg; @@ -542,7 +542,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task, if (unlikely(err)) goto err_reg; - if (scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) { + if (!scsi_prot_op_normal(task->sc)) { struct iser_mem_reg *prot_reg = &task->desc.prot_reg; if (scsi_prot_sg_count(task->sc)) { diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index b686a4aaffe8663b8ca6b78078a04860d047234d..bee8c0b1d6a518c926c7bb382abac78d78494d31 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -1123,7 +1123,9 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, IB_MR_CHECK_SIG_STATUS, &mr_status); if (ret) { pr_err("ib_check_mr_status failed, ret %d\n", ret); - goto err; + /* Not a lot we can do, return ambiguous guard error */ + *sector = 0; + return 0x1; } if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { @@ -1151,9 +1153,6 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, } return 0; -err: - /* Not alot we can do here, return ambiguous guard error */ - return 0x1; } void iser_err_comp(struct ib_wc *wc, const char *type) diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c index 267da8215e08fe71b9d4920e1accf851d7723538..31cd361416ac9e1fc21d2d103c0743369eb34251 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c @@ -351,7 +351,8 @@ static uint32_t opa_vnic_get_dlid(struct opa_vnic_adapter *adapter, if (unlikely(!dlid)) v_warn("Null dlid in MAC address\n"); } else if (def_port != OPA_VNIC_INVALID_PORT) { - dlid = info->vesw.u_ucast_dlid[def_port]; + if (def_port < OPA_VESW_MAX_NUM_DEF_PORT) + dlid = info->vesw.u_ucast_dlid[def_port]; } } diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 0b34e909505f5fa4c6a1404227a137ae94e54aa9..ec54cd687ef126addee11a6388725fd813578765 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -2357,6 +2357,7 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) if (srp_post_send(ch, iu, len)) { shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n"); + scmnd->result = DID_ERROR << 16; goto err_unmap; } @@ -2916,7 +2917,6 @@ static int srp_abort(struct scsi_cmnd *scmnd) u32 tag; u16 ch_idx; struct srp_rdma_ch *ch; - int ret; shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); @@ -2932,26 +2932,20 @@ static int srp_abort(struct scsi_cmnd *scmnd) shost_printk(KERN_ERR, target->scsi_host, "Sending SRP abort for tag %#x\n", tag); if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun, - SRP_TSK_ABORT_TASK, NULL) == 0) - ret = SUCCESS; - else if (target->rport->state == SRP_RPORT_LOST) - ret = FAST_IO_FAIL; - else - ret = FAILED; - if (ret == SUCCESS) { + SRP_TSK_ABORT_TASK, NULL) == 0) { srp_free_req(ch, req, scmnd, 0); - scmnd->result = DID_ABORT << 16; - scmnd->scsi_done(scmnd); + return SUCCESS; } + if (target->rport->state == SRP_RPORT_LOST) + return FAST_IO_FAIL; - return ret; + return FAILED; } static int srp_reset_device(struct scsi_cmnd *scmnd) { struct srp_target_port *target = host_to_target(scmnd->device->host); struct srp_rdma_ch *ch; - int i, j; u8 status; shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); @@ -2963,15 +2957,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd) if (status) return FAILED; - for (i = 0; i < target->ch_count; i++) { - ch = &target->ch[i]; - for (j = 0; j < target->req_ring_size; ++j) { - struct srp_request *req = &ch->req_ring[j]; - - srp_finish_req(ch, req, scmnd->device, DID_RESET << 16); - } - } - return SUCCESS; } @@ -3411,13 +3396,17 @@ static const match_table_t srp_opt_tokens = { /** * srp_parse_in - parse an IP address and port number combination + * @net: [in] Network namespace. + * @sa: [out] Address family, IP address and port number. + * @addr_port_str: [in] IP address and port number. + * @has_port: [out] Whether or not @addr_port_str includes a port number. * * Parse the following address formats: * - IPv4: :, e.g. 1.2.3.4:5. * - IPv6: \[\]:, e.g. [1::2:3%4]:5. */ static int srp_parse_in(struct net *net, struct sockaddr_storage *sa, - const char *addr_port_str) + const char *addr_port_str, bool *has_port) { char *addr_end, *addr = kstrdup(addr_port_str, GFP_KERNEL); char *port_str; @@ -3426,9 +3415,12 @@ static int srp_parse_in(struct net *net, struct sockaddr_storage *sa, if (!addr) return -ENOMEM; port_str = strrchr(addr, ':'); - if (!port_str) - return -EINVAL; - *port_str++ = '\0'; + if (port_str && strchr(port_str, ']')) + port_str = NULL; + if (port_str) + *port_str++ = '\0'; + if (has_port) + *has_port = port_str != NULL; ret = inet_pton_with_scope(net, AF_INET, addr, port_str, sa); if (ret && addr[0]) { addr_end = addr + strlen(addr) - 1; @@ -3450,6 +3442,7 @@ static int srp_parse_options(struct net *net, const char *buf, char *p; substring_t args[MAX_OPT_ARGS]; unsigned long long ull; + bool has_port; int opt_mask = 0; int token; int ret = -EINVAL; @@ -3548,7 +3541,8 @@ static int srp_parse_options(struct net *net, const char *buf, ret = -ENOMEM; goto out; } - ret = srp_parse_in(net, &target->rdma_cm.src.ss, p); + ret = srp_parse_in(net, &target->rdma_cm.src.ss, p, + NULL); if (ret < 0) { pr_warn("bad source parameter '%s'\n", p); kfree(p); @@ -3564,7 +3558,10 @@ static int srp_parse_options(struct net *net, const char *buf, ret = -ENOMEM; goto out; } - ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p); + ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p, + &has_port); + if (!has_port) + ret = -EINVAL; if (ret < 0) { pr_warn("bad dest parameter '%s'\n", p); kfree(p); @@ -4151,9 +4148,11 @@ static void srp_remove_one(struct ib_device *device, void *client_data) spin_unlock(&host->target_lock); /* - * Wait for tl_err and target port removal tasks. + * srp_queue_remove_work() queues a call to + * srp_remove_target(). The latter function cancels + * target->tl_err_work so waiting for the remove works to + * finish is sufficient. */ - flush_workqueue(system_long_wq); flush_workqueue(srp_remove_wq); kfree(host); diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index f37cbad022a24e8ee054c51ee7e3069bd763ecd6..4948d5b446b6d578dfc56a9ed8ecd813e983f686 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -82,12 +82,16 @@ module_param(srpt_srq_size, int, 0444); MODULE_PARM_DESC(srpt_srq_size, "Shared receive queue (SRQ) size."); +static int srpt_set_u64_x(const char *buffer, const struct kernel_param *kp) +{ + return kstrtou64(buffer, 16, (u64 *)kp->arg); +} static int srpt_get_u64_x(char *buffer, const struct kernel_param *kp) { return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg); } -module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid, - 0444); +module_param_call(srpt_service_guid, srpt_set_u64_x, srpt_get_u64_x, + &srpt_service_guid, 0444); MODULE_PARM_DESC(srpt_service_guid, "Using this value for ioc_guid, id_ext, and cm_listen_id" " instead of using the node_guid of the first HCA."); @@ -2009,6 +2013,14 @@ static void srpt_free_ch(struct kref *kref) kfree_rcu(ch, rcu); } +/* + * Shut down the SCSI target session, tell the connection manager to + * disconnect the associated RDMA channel, transition the QP to the error + * state and remove the channel from the channel list. This function is + * typically called from inside srpt_zerolength_write_done(). Concurrent + * srpt_zerolength_write() calls from inside srpt_close_ch() are possible + * as long as the channel is on sport->nexus_list. + */ static void srpt_release_channel_work(struct work_struct *w) { struct srpt_rdma_ch *ch; @@ -2036,6 +2048,11 @@ static void srpt_release_channel_work(struct work_struct *w) else ib_destroy_cm_id(ch->ib_cm.cm_id); + sport = ch->sport; + mutex_lock(&sport->mutex); + list_del_rcu(&ch->list); + mutex_unlock(&sport->mutex); + srpt_destroy_ch_ib(ch); srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring, @@ -2046,11 +2063,6 @@ static void srpt_release_channel_work(struct work_struct *w) sdev, ch->rq_size, srp_max_req_size, DMA_FROM_DEVICE); - sport = ch->sport; - mutex_lock(&sport->mutex); - list_del_rcu(&ch->list); - mutex_unlock(&sport->mutex); - wake_up(&sport->ch_releaseQ); kref_put(&ch->kref, srpt_free_ch); @@ -2785,8 +2797,19 @@ static void srpt_queue_tm_rsp(struct se_cmd *cmd) srpt_queue_response(cmd); } +/* + * This function is called for aborted commands if no response is sent to the + * initiator. Make sure that the credits freed by aborting a command are + * returned to the initiator the next time a response is sent by incrementing + * ch->req_lim_delta. + */ static void srpt_aborted_task(struct se_cmd *cmd) { + struct srpt_send_ioctx *ioctx = container_of(cmd, + struct srpt_send_ioctx, cmd); + struct srpt_rdma_ch *ch = ioctx->ch; + + atomic_inc(&ch->req_lim_delta); } static int srpt_queue_status(struct se_cmd *cmd) @@ -3035,7 +3058,6 @@ static void srpt_add_one(struct ib_device *device) INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device, srpt_event_handler); - ib_register_event_handler(&sdev->event_handler); for (i = 1; i <= sdev->device->phys_port_cnt; i++) { sport = &sdev->port[i - 1]; @@ -3053,10 +3075,11 @@ static void srpt_add_one(struct ib_device *device) if (srpt_refresh_port(sport)) { pr_err("MAD registration failed for %s-%d.\n", sdev->device->name, i); - goto err_event; + goto err_cm; } } + ib_register_event_handler(&sdev->event_handler); spin_lock(&srpt_dev_lock); list_add_tail(&sdev->list, &srpt_dev_list); spin_unlock(&srpt_dev_lock); @@ -3066,8 +3089,6 @@ static void srpt_add_one(struct ib_device *device) pr_debug("added %s.\n", device->name); return; -err_event: - ib_unregister_event_handler(&sdev->event_handler); err_cm: if (sdev->cm_id) ib_destroy_cm_id(sdev->cm_id); diff --git a/drivers/input/ff-memless.c b/drivers/input/ff-memless.c index 2743ed4656e4a25387da3b3b40ec1f0b862d990a..1cd23bf3236c2e577317b68c69cc86b2ab832cc2 100644 --- a/drivers/input/ff-memless.c +++ b/drivers/input/ff-memless.c @@ -501,6 +501,15 @@ static void ml_ff_destroy(struct ff_device *ff) { struct ml_device *ml = ff->private; + /* + * Even though we stop all playing effects when tearing down + * an input device (via input_device_flush() that calls into + * input_ff_flush() that stops and erases all effects), we + * do not actually stop the timer, and therefore we should + * do it here. + */ + del_timer_sync(&ml->timer); + kfree(ml->private); } diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c index 6c7326c93721c495c4e61a73cac2dfaf9a5bc8fc..fd01c9306e66ce9d77c74748bffcae1546cb5298 100644 --- a/drivers/input/input-mt.c +++ b/drivers/input/input-mt.c @@ -48,6 +48,9 @@ int input_mt_init_slots(struct input_dev *dev, unsigned int num_slots, return 0; if (mt) return mt->num_slots != num_slots ? -EINVAL : 0; + /* Arbitrary limit for avoiding too large memory allocation. */ + if (num_slots > 1024) + return -EINVAL; mt = kzalloc(struct_size(mt, slots, num_slots), GFP_KERNEL); if (!mt) diff --git a/drivers/input/input.c b/drivers/input/input.c index 3304aaaffe87df323c758c0dc645b1f34a3d3fd4..dcbf53b5b2bc19c24c93dd95e897482318250b8f 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -50,6 +50,17 @@ static DEFINE_MUTEX(input_mutex); static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 }; +static const unsigned int input_max_code[EV_CNT] = { + [EV_KEY] = KEY_MAX, + [EV_REL] = REL_MAX, + [EV_ABS] = ABS_MAX, + [EV_MSC] = MSC_MAX, + [EV_SW] = SW_MAX, + [EV_LED] = LED_MAX, + [EV_SND] = SND_MAX, + [EV_FF] = FF_MAX, +}; + static inline int is_event_supported(unsigned int code, unsigned long *bm, unsigned int max) { @@ -858,16 +869,18 @@ static int input_default_setkeycode(struct input_dev *dev, } } - __clear_bit(*old_keycode, dev->keybit); - __set_bit(ke->keycode, dev->keybit); - - for (i = 0; i < dev->keycodemax; i++) { - if (input_fetch_keycode(dev, i) == *old_keycode) { - __set_bit(*old_keycode, dev->keybit); - break; /* Setting the bit twice is useless, so break */ + if (*old_keycode <= KEY_MAX) { + __clear_bit(*old_keycode, dev->keybit); + for (i = 0; i < dev->keycodemax; i++) { + if (input_fetch_keycode(dev, i) == *old_keycode) { + __set_bit(*old_keycode, dev->keybit); + /* Setting the bit twice is useless, so break */ + break; + } } } + __set_bit(ke->keycode, dev->keybit); return 0; } @@ -923,9 +936,13 @@ int input_set_keycode(struct input_dev *dev, * Simulate keyup event if keycode is not present * in the keymap anymore */ - if (test_bit(EV_KEY, dev->evbit) && - !is_event_supported(old_keycode, dev->keybit, KEY_MAX) && - __test_and_clear_bit(old_keycode, dev->key)) { + if (old_keycode > KEY_MAX) { + dev_warn(dev->dev.parent ?: &dev->dev, + "%s: got too big old keycode %#x\n", + __func__, old_keycode); + } else if (test_bit(EV_KEY, dev->evbit) && + !is_event_supported(old_keycode, dev->keybit, KEY_MAX) && + __test_and_clear_bit(old_keycode, dev->key)) { struct input_value vals[] = { { EV_KEY, old_keycode, 0 }, input_value_sync @@ -1909,6 +1926,14 @@ EXPORT_SYMBOL(input_free_device); */ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code) { + if (type < EV_CNT && input_max_code[type] && + code > input_max_code[type]) { + pr_err("%s: invalid code %u for type %u\n", __func__, code, + type); + dump_stack(); + return; + } + switch (type) { case EV_KEY: __set_bit(code, dev->keybit); diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c index 4c1e427dfabb984b04af577c749299336cd510d2..9123799365e63cf8e1a237e514fac3b0833ed5c5 100644 --- a/drivers/input/joydev.c +++ b/drivers/input/joydev.c @@ -460,7 +460,7 @@ static int joydev_handle_JSIOCSAXMAP(struct joydev *joydev, if (IS_ERR(abspam)) return PTR_ERR(abspam); - for (i = 0; i < joydev->nabs; i++) { + for (i = 0; i < len && i < joydev->nabs; i++) { if (abspam[i] > ABS_MAX) { retval = -EINVAL; goto out; @@ -484,6 +484,9 @@ static int joydev_handle_JSIOCSBTNMAP(struct joydev *joydev, int i; int retval = 0; + if (len % sizeof(*keypam)) + return -EINVAL; + len = min(len, sizeof(joydev->keypam)); /* Validate the map. */ @@ -491,7 +494,7 @@ static int joydev_handle_JSIOCSBTNMAP(struct joydev *joydev, if (IS_ERR(keypam)) return PTR_ERR(keypam); - for (i = 0; i < joydev->nkey; i++) { + for (i = 0; i < (len / 2) && i < joydev->nkey; i++) { if (keypam[i] > KEY_MAX || keypam[i] < BTN_MISC) { retval = -EINVAL; goto out; @@ -501,7 +504,7 @@ static int joydev_handle_JSIOCSBTNMAP(struct joydev *joydev, memcpy(joydev->keypam, keypam, len); for (i = 0; i < joydev->nkey; i++) - joydev->keymap[keypam[i] - BTN_MISC] = i; + joydev->keymap[joydev->keypam[i] - BTN_MISC] = i; out: kfree(keypam); diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c index 78073259c9a1ad3d49575379e675239566ea295c..c431df7401b44bc8767bec6fa88b88209e7ac17d 100644 --- a/drivers/input/joystick/iforce/iforce-usb.c +++ b/drivers/input/joystick/iforce/iforce-usb.c @@ -141,7 +141,12 @@ static int iforce_usb_probe(struct usb_interface *intf, return -ENODEV; epirq = &interface->endpoint[0].desc; + if (!usb_endpoint_is_int_in(epirq)) + return -ENODEV; + epout = &interface->endpoint[1].desc; + if (!usb_endpoint_is_int_out(epout)) + return -ENODEV; if (!(iforce = kzalloc(sizeof(struct iforce) + 32, GFP_KERNEL))) goto fail; diff --git a/drivers/input/joystick/psxpad-spi.c b/drivers/input/joystick/psxpad-spi.c index 28b473f6cbb631e53d3268467a118ffabf6a9b6f..092096ee06b96c0cf6abe64e0fbc6d0fc624e906 100644 --- a/drivers/input/joystick/psxpad-spi.c +++ b/drivers/input/joystick/psxpad-spi.c @@ -292,7 +292,7 @@ static int psxpad_spi_probe(struct spi_device *spi) if (!pad) return -ENOMEM; - pdev = input_allocate_polled_device(); + pdev = devm_input_allocate_polled_device(&spi->dev); if (!pdev) { dev_err(&spi->dev, "failed to allocate input device\n"); return -ENOMEM; diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index d4b9db487b16fa3f9a87e4f5fd6732a8b4d9c9b4..aa4e431cbcd3543ebd617eb4c7a880868eb3f723 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -252,6 +252,8 @@ static const struct xpad_device { { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX }, { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX }, { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX }, + { 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, + { 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 }, { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, { 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 }, @@ -428,6 +430,7 @@ static const struct usb_device_id xpad_table[] = { XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */ XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */ XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */ + XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries Controllers */ XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */ XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */ XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ @@ -480,18 +483,18 @@ static const u8 xboxone_hori_init[] = { }; /* - * This packet is required for some of the PDP pads to start + * This packet is required for most (all?) of the PDP pads to start * sending input reports. These pads include: (0x0e6f:0x02ab), - * (0x0e6f:0x02a4). + * (0x0e6f:0x02a4), (0x0e6f:0x02a6). */ static const u8 xboxone_pdp_init1[] = { 0x0a, 0x20, 0x00, 0x03, 0x00, 0x01, 0x14 }; /* - * This packet is required for some of the PDP pads to start + * This packet is required for most (all?) of the PDP pads to start * sending input reports. These pads include: (0x0e6f:0x02ab), - * (0x0e6f:0x02a4). + * (0x0e6f:0x02a4), (0x0e6f:0x02a6). */ static const u8 xboxone_pdp_init2[] = { 0x06, 0x20, 0x00, 0x02, 0x01, 0x00 @@ -527,12 +530,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = { XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init), XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init), XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init), - XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init1), - XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init2), - XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init1), - XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init2), - XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init1), - XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init2), + XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init1), + XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init2), XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init), XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init), XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init), diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index 4713957b0cbba11e942a01c2b2138854d648637f..d134c5fd5767f52bbc16d9367a2c9288244fcf04 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -756,4 +756,15 @@ config KEYBOARD_MTK_PMIC To compile this driver as a module, choose M here: the module will be called pmic-keys. +config KEYBOARD_PHYTIUM + tristate "Phytium keypad support" + depends on ARCH_PHYTIUM + select INPUT_MATRIXKMAP + help + Say Y here if you want to enable support for Phytium keypad + port. + + To compile this driver as a module, choose M here: the + module will be called phytium_keypad. + endif diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile index 182e92985dbf69f18c783db7b04523fbc5ae56a3..e410f6e9c97c2416f783bd27e9ab7775ea776c02 100644 --- a/drivers/input/keyboard/Makefile +++ b/drivers/input/keyboard/Makefile @@ -67,3 +67,4 @@ obj-$(CONFIG_KEYBOARD_TM2_TOUCHKEY) += tm2-touchkey.o obj-$(CONFIG_KEYBOARD_TWL4030) += twl4030_keypad.o obj-$(CONFIG_KEYBOARD_XTKBD) += xtkbd.o obj-$(CONFIG_KEYBOARD_W90P910) += w90p910_keypad.o +obj-$(CONFIG_KEYBOARD_PHYTIUM) += phytium-keypad.o diff --git a/drivers/input/keyboard/cap11xx.c b/drivers/input/keyboard/cap11xx.c index 312916f99597ad5dc6949296edc7b42cd459f9b8..73686c2460ce2e94e9fe757a5ad6e49852fd5170 100644 --- a/drivers/input/keyboard/cap11xx.c +++ b/drivers/input/keyboard/cap11xx.c @@ -75,9 +75,7 @@ struct cap11xx_led { struct cap11xx_priv *priv; struct led_classdev cdev; - struct work_struct work; u32 reg; - enum led_brightness new_brightness; }; #endif @@ -233,30 +231,21 @@ static void cap11xx_input_close(struct input_dev *idev) } #ifdef CONFIG_LEDS_CLASS -static void cap11xx_led_work(struct work_struct *work) +static int cap11xx_led_set(struct led_classdev *cdev, + enum led_brightness value) { - struct cap11xx_led *led = container_of(work, struct cap11xx_led, work); + struct cap11xx_led *led = container_of(cdev, struct cap11xx_led, cdev); struct cap11xx_priv *priv = led->priv; - int value = led->new_brightness; /* - * All LEDs share the same duty cycle as this is a HW limitation. - * Brightness levels per LED are either 0 (OFF) and 1 (ON). + * All LEDs share the same duty cycle as this is a HW + * limitation. Brightness levels per LED are either + * 0 (OFF) and 1 (ON). */ - regmap_update_bits(priv->regmap, CAP11XX_REG_LED_OUTPUT_CONTROL, - BIT(led->reg), value ? BIT(led->reg) : 0); -} - -static void cap11xx_led_set(struct led_classdev *cdev, - enum led_brightness value) -{ - struct cap11xx_led *led = container_of(cdev, struct cap11xx_led, cdev); - - if (led->new_brightness == value) - return; - - led->new_brightness = value; - schedule_work(&led->work); + return regmap_update_bits(priv->regmap, + CAP11XX_REG_LED_OUTPUT_CONTROL, + BIT(led->reg), + value ? BIT(led->reg) : 0); } static int cap11xx_init_leds(struct device *dev, @@ -299,7 +288,7 @@ static int cap11xx_init_leds(struct device *dev, led->cdev.default_trigger = of_get_property(child, "linux,default-trigger", NULL); led->cdev.flags = 0; - led->cdev.brightness_set = cap11xx_led_set; + led->cdev.brightness_set_blocking = cap11xx_led_set; led->cdev.max_brightness = 1; led->cdev.brightness = LED_OFF; @@ -312,8 +301,6 @@ static int cap11xx_init_leds(struct device *dev, led->reg = reg; led->priv = priv; - INIT_WORK(&led->work, cap11xx_led_work); - error = devm_led_classdev_register(dev, &led->cdev); if (error) { of_node_put(child); diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c index 81be6f781f0b60207088114e0b72b6e0e12d1b14..d5600118159835321c3c55b1d10e1cf4468cc22e 100644 --- a/drivers/input/keyboard/cros_ec_keyb.c +++ b/drivers/input/keyboard/cros_ec_keyb.c @@ -493,7 +493,8 @@ static int cros_ec_keyb_register_bs(struct cros_ec_keyb *ckdev) for (i = 0; i < ARRAY_SIZE(cros_ec_keyb_bs); i++) { const struct cros_ec_bs_map *map = &cros_ec_keyb_bs[i]; - if (buttons & BIT(map->bit)) + if ((map->ev_type == EV_KEY && (buttons & BIT(map->bit))) || + (map->ev_type == EV_SW && (switches & BIT(map->bit)))) input_set_capability(idev, map->ev_type, map->code); } diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c index 539cb670de41d6955e6ea5353749dac27d96b798..ae9c51cc85f997b92a6581a74a0ba65f6a673cb1 100644 --- a/drivers/input/keyboard/imx_keypad.c +++ b/drivers/input/keyboard/imx_keypad.c @@ -526,11 +526,12 @@ static int imx_keypad_probe(struct platform_device *pdev) return 0; } -static int __maybe_unused imx_kbd_suspend(struct device *dev) +static int __maybe_unused imx_kbd_noirq_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct imx_keypad *kbd = platform_get_drvdata(pdev); struct input_dev *input_dev = kbd->input_dev; + unsigned short reg_val = readw(kbd->mmio_base + KPSR); /* imx kbd can wake up system even clock is disabled */ mutex_lock(&input_dev->mutex); @@ -540,13 +541,20 @@ static int __maybe_unused imx_kbd_suspend(struct device *dev) mutex_unlock(&input_dev->mutex); - if (device_may_wakeup(&pdev->dev)) + if (device_may_wakeup(&pdev->dev)) { + if (reg_val & KBD_STAT_KPKD) + reg_val |= KBD_STAT_KRIE; + if (reg_val & KBD_STAT_KPKR) + reg_val |= KBD_STAT_KDIE; + writew(reg_val, kbd->mmio_base + KPSR); + enable_irq_wake(kbd->irq); + } return 0; } -static int __maybe_unused imx_kbd_resume(struct device *dev) +static int __maybe_unused imx_kbd_noirq_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct imx_keypad *kbd = platform_get_drvdata(pdev); @@ -570,7 +578,9 @@ static int __maybe_unused imx_kbd_resume(struct device *dev) return ret; } -static SIMPLE_DEV_PM_OPS(imx_kbd_pm_ops, imx_kbd_suspend, imx_kbd_resume); +static const struct dev_pm_ops imx_kbd_pm_ops = { + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_kbd_noirq_suspend, imx_kbd_noirq_resume) +}; static struct platform_driver imx_keypad_driver = { .driver = { diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c index f51ae09596ef25942ff6bab030be9697c3a0eca7..3d1cb7bf5e35feba1b355cef7e81c657db97be07 100644 --- a/drivers/input/keyboard/matrix_keypad.c +++ b/drivers/input/keyboard/matrix_keypad.c @@ -222,7 +222,7 @@ static void matrix_keypad_stop(struct input_dev *dev) keypad->stopped = true; spin_unlock_irq(&keypad->lock); - flush_work(&keypad->work.work); + flush_delayed_work(&keypad->work); /* * matrix_keypad_scan() will leave IRQs enabled; * we should disable them now. @@ -407,7 +407,7 @@ matrix_keypad_parse_dt(struct device *dev) struct matrix_keypad_platform_data *pdata; struct device_node *np = dev->of_node; unsigned int *gpios; - int i, nrow, ncol; + int ret, i, nrow, ncol; if (!np) { dev_err(dev, "device lacks DT data\n"); @@ -452,12 +452,19 @@ matrix_keypad_parse_dt(struct device *dev) return ERR_PTR(-ENOMEM); } - for (i = 0; i < pdata->num_row_gpios; i++) - gpios[i] = of_get_named_gpio(np, "row-gpios", i); + for (i = 0; i < nrow; i++) { + ret = of_get_named_gpio(np, "row-gpios", i); + if (ret < 0) + return ERR_PTR(ret); + gpios[i] = ret; + } - for (i = 0; i < pdata->num_col_gpios; i++) - gpios[pdata->num_row_gpios + i] = - of_get_named_gpio(np, "col-gpios", i); + for (i = 0; i < ncol; i++) { + ret = of_get_named_gpio(np, "col-gpios", i); + if (ret < 0) + return ERR_PTR(ret); + gpios[nrow + i] = ret; + } pdata->row_gpios = gpios; pdata->col_gpios = &gpios[pdata->num_row_gpios]; @@ -484,10 +491,8 @@ static int matrix_keypad_probe(struct platform_device *pdev) pdata = dev_get_platdata(&pdev->dev); if (!pdata) { pdata = matrix_keypad_parse_dt(&pdev->dev); - if (IS_ERR(pdata)) { - dev_err(&pdev->dev, "no platform data defined\n"); + if (IS_ERR(pdata)) return PTR_ERR(pdata); - } } else if (!pdata->keymap_data) { dev_err(&pdev->dev, "no keymap data defined\n"); return -EINVAL; diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c index 46406345742b97c06595ab7e3b785ee2a6daca1b..840e53732753f556a6f89e7afde3b2c1bf65edcc 100644 --- a/drivers/input/keyboard/omap4-keypad.c +++ b/drivers/input/keyboard/omap4-keypad.c @@ -60,8 +60,18 @@ /* OMAP4 values */ #define OMAP4_VAL_IRQDISABLE 0x0 -#define OMAP4_VAL_DEBOUNCINGTIME 0x7 -#define OMAP4_VAL_PVT 0x7 + +/* + * Errata i689: If a key is released for a time shorter than debounce time, + * the keyboard will idle and never detect the key release. The workaround + * is to use at least a 12ms debounce time. See omap5432 TRM chapter + * "26.4.6.2 Keyboard Controller Timer" for more information. + */ +#define OMAP4_KEYPAD_PTV_DIV_128 0x6 +#define OMAP4_KEYPAD_DEBOUNCINGTIME_MS(dbms, ptv) \ + ((((dbms) * 1000) / ((1 << ((ptv) + 1)) * (1000000 / 32768))) - 1) +#define OMAP4_VAL_DEBOUNCINGTIME_16MS \ + OMAP4_KEYPAD_DEBOUNCINGTIME_MS(16, OMAP4_KEYPAD_PTV_DIV_128) enum { KBD_REVISION_OMAP4 = 0, @@ -116,12 +126,8 @@ static irqreturn_t omap4_keypad_irq_handler(int irq, void *dev_id) { struct omap4_keypad *keypad_data = dev_id; - if (kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS)) { - /* Disable interrupts */ - kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE, - OMAP4_VAL_IRQDISABLE); + if (kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS)) return IRQ_WAKE_THREAD; - } return IRQ_NONE; } @@ -163,11 +169,6 @@ static irqreturn_t omap4_keypad_irq_thread_fn(int irq, void *dev_id) kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS, kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS)); - /* enable interrupts */ - kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE, - OMAP4_DEF_IRQENABLE_EVENTEN | - OMAP4_DEF_IRQENABLE_LONGKEY); - return IRQ_HANDLED; } @@ -181,9 +182,9 @@ static int omap4_keypad_open(struct input_dev *input) kbd_writel(keypad_data, OMAP4_KBD_CTRL, OMAP4_DEF_CTRL_NOSOFTMODE | - (OMAP4_VAL_PVT << OMAP4_DEF_CTRL_PTV_SHIFT)); + (OMAP4_KEYPAD_PTV_DIV_128 << OMAP4_DEF_CTRL_PTV_SHIFT)); kbd_writel(keypad_data, OMAP4_KBD_DEBOUNCINGTIME, - OMAP4_VAL_DEBOUNCINGTIME); + OMAP4_VAL_DEBOUNCINGTIME_16MS); /* clear pending interrupts */ kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS, kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS)); @@ -204,9 +205,10 @@ static void omap4_keypad_close(struct input_dev *input) disable_irq(keypad_data->irq); - /* Disable interrupts */ + /* Disable interrupts and wake-up events */ kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE, OMAP4_VAL_IRQDISABLE); + kbd_writel(keypad_data, OMAP4_KBD_WAKEUPENABLE, 0); /* clear pending interrupts */ kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS, @@ -355,7 +357,7 @@ static int omap4_keypad_probe(struct platform_device *pdev) } error = request_threaded_irq(keypad_data->irq, omap4_keypad_irq_handler, - omap4_keypad_irq_thread_fn, 0, + omap4_keypad_irq_thread_fn, IRQF_ONESHOT, "omap4-keypad", keypad_data); if (error) { dev_err(&pdev->dev, "failed to register interrupt\n"); diff --git a/drivers/input/keyboard/phytium-keypad.c b/drivers/input/keyboard/phytium-keypad.c new file mode 100644 index 0000000000000000000000000000000000000000..9fe330e320cf471197b7fcbb8b624a0cf803efa6 --- /dev/null +++ b/drivers/input/keyboard/phytium-keypad.c @@ -0,0 +1,581 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for the Phytium keypad port. + * + * Copyright (C) 2020-2021, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Keypad Controller registers + */ +#define KPCR 0x00 /* Keypad Control Register */ + +#define KPSR 0x04 /* Keypad Status Register */ +#define KBD_STAT_KPKD (0x1 << 0) /* Key Press Interrupt Status bit (w1c) */ +#define KBD_STAT_KPKR (0x1 << 1) /* Key Release Interrupt Status bit (w1c) */ +#define KBD_STAT_KDSC (0x1 << 2) /* Key Depress Synch Chain Status bit (w1c)*/ +#define KBD_STAT_KRSS (0x1 << 3) /* Key Release Synch Status bit (w1c)*/ +#define KBD_STAT_KDIE (0x1 << 8) /* Key Depress Interrupt Enable Status bit */ +#define KBD_STAT_KRIE (0x1 << 9) /* Key Release Interrupt Enable */ + +#define KDDR 0x08 /* Keypad Data Direction Register */ +#define KPDR 0x0C /* Keypad Data Register */ + +#define MAX_MATRIX_KEY_ROWS 8 +#define MAX_MATRIX_KEY_COLS 8 + +#define MAX_MATRIX_KEY_NUM (MAX_MATRIX_KEY_ROWS * MAX_MATRIX_KEY_COLS) + +struct phytium_keypad { + struct input_dev *input_dev; + void __iomem *mmio_base; + + int irq; + struct timer_list check_matrix_timer; + + /* + * The matrix is stable only if no changes are detected after + * PHYTIUM_KEYPAD_SCANS_FOR_STABILITY scans + */ +#define PHYTIUM_KEYPAD_SCANS_FOR_STABILITY 3 + + int stable_count; + + bool enabled; + + unsigned int n_rows; + unsigned int n_cols; + int row_shift; + + /* Masks for enabled rows/cols */ + unsigned short rows_en_mask; + unsigned short cols_en_mask; + + unsigned short keycodes[MAX_MATRIX_KEY_NUM]; + + /* + * Matrix states: + * -stable: achieved after a complete debounce process. + * -unstable: used in the debouncing process. + */ + unsigned short matrix_stable_state[MAX_MATRIX_KEY_COLS]; + unsigned short matrix_unstable_state[MAX_MATRIX_KEY_COLS]; +}; + +static u32 phytium_read(struct phytium_keypad *keypad, int reg) +{ + return readl(keypad->mmio_base + reg); +} + +static void phytium_write(struct phytium_keypad *keypad, u32 value, int reg) +{ + writel(value, keypad->mmio_base + reg); +} + +/* Scan the matrix and return the new state in *matrix_volatile_state. */ +static void phytium_keypad_scan_matrix(struct phytium_keypad *keypad, + unsigned short *matrix_volatile_state) +{ + int col; + u32 reg_val; + + for (col = 0; col < keypad->n_cols; col++) { + if ((keypad->cols_en_mask & (1 << col)) == 0) + continue; + /* + * Discharge keypad capacitance: + * 2. write 0s on KDDR[KCD], configure columns as input. + */ + reg_val = phytium_read(keypad, KDDR); + reg_val = 0x00000000; + phytium_write(keypad, reg_val, KDDR); + + /* + * 3. Write a single column to 0, others to 1. + * 4. Sample row inputs and save data. + * 5. Repeat steps 3 - 4 for remaining columns. + */ + reg_val = 0; + reg_val |= (1 << (16 + col)); + phytium_write(keypad, reg_val, KDDR); + reg_val = phytium_read(keypad, KPDR); + reg_val = 0x00000000; + phytium_write(keypad, reg_val, KPDR); + + /* + * Delay added to avoid propagating the 0 from column to row + * when scanning. + */ + udelay(5); + + /* + * 1s in matrix_volatile_state[col] means key pressures + * throw data from non enabled rows. + */ + reg_val = phytium_read(keypad, KPDR); + matrix_volatile_state[col] = (~reg_val) & keypad->rows_en_mask; + } + + /* + * Return in standby mode: + * 6. write 0s to columns + */ + /* Configure columns as output, output 0 */ + reg_val = 0; + reg_val |= (keypad->cols_en_mask & 0xffff) << 16; + phytium_write(keypad, reg_val, KDDR); + phytium_write(keypad, 0x00000000, KPDR); +} + +/* + * Compare the new matrix state (volatile) with the stable one stored in + * keypad->matrix_stable_state and fire events if changes are detected. + */ +static void phytium_keypad_fire_events(struct phytium_keypad *keypad, + unsigned short *matrix_volatile_state) +{ + struct input_dev *input_dev = keypad->input_dev; + int row, col; + + for (col = 0; col < keypad->n_cols; col++) { + unsigned short bits_changed; + int code; + + if ((keypad->cols_en_mask & (1 << col)) == 0) + continue; /* Column is not enabled */ + + bits_changed = keypad->matrix_stable_state[col] ^ + matrix_volatile_state[col]; + + if (bits_changed == 0) + continue; /* Column does not contain changes */ + + for (row = 0; row < keypad->n_rows; row++) { + if ((keypad->rows_en_mask & (1 << row)) == 0) + continue; /* Row is not enabled */ + if ((bits_changed & (1 << row)) == 0) + continue; /* Row does not contain changes */ + + code = MATRIX_SCAN_CODE(row, col, keypad->row_shift); + input_event(input_dev, EV_MSC, MSC_SCAN, code); + input_report_key(input_dev, keypad->keycodes[code], + matrix_volatile_state[col] & (1 << row)); + dev_dbg(&input_dev->dev, "Event code: %d, val: %d", + keypad->keycodes[code], + matrix_volatile_state[col] & (1 << row)); + } + } + input_sync(input_dev); +} + +/* + * phytium_keypad_check_for_events is the timer handler. + */ +static void phytium_keypad_check_for_events(struct timer_list *t) +{ + struct phytium_keypad *keypad = from_timer(keypad, + t, check_matrix_timer); + unsigned short matrix_volatile_state[MAX_MATRIX_KEY_COLS]; + u32 reg_val; + bool state_changed, is_zero_matrix; + int i; + + memset(matrix_volatile_state, 0, sizeof(matrix_volatile_state)); + + phytium_keypad_scan_matrix(keypad, matrix_volatile_state); + + state_changed = false; + for (i = 0; i < keypad->n_cols; i++) { + if ((keypad->cols_en_mask & (1 << i)) == 0) + continue; + + if (keypad->matrix_unstable_state[i] ^ + matrix_volatile_state[i]) { + state_changed = true; + break; + } + } + + /* + * If the matrix state is changed from the previous scan + * (Re)Begin the debouncing process, saving the new state in + * keypad->matrix_unstable_state. + * else + * Increase the count of number of scans with a stable state. + */ + if (state_changed) { + memcpy(keypad->matrix_unstable_state, matrix_volatile_state, + sizeof(matrix_volatile_state)); + keypad->stable_count = 0; + } else { + keypad->stable_count++; + } + + /* + * If the matrix is not as stable as we want reschedule scan + * in the near future. + */ + if (keypad->stable_count < PHYTIUM_KEYPAD_SCANS_FOR_STABILITY) { + mod_timer(&keypad->check_matrix_timer, + jiffies + msecs_to_jiffies(10)); + return; + } + + /* + * If the matrix state is stable, fire the events and save the new + * stable state. Note, if the matrix is kept stable for longer + * (keypad->stable_count > PHYTIUM_KEYPAD_SCANS_FOR_STABILITY) all + * events have already been generated. + */ + if (keypad->stable_count == PHYTIUM_KEYPAD_SCANS_FOR_STABILITY) { + phytium_keypad_fire_events(keypad, matrix_volatile_state); + memcpy(keypad->matrix_stable_state, matrix_volatile_state, + sizeof(matrix_volatile_state)); + } + + is_zero_matrix = true; + for (i = 0; i < keypad->n_cols; i++) { + if (matrix_volatile_state[i] != 0) { + is_zero_matrix = false; + break; + } + } + + if (is_zero_matrix) { + /* + * All keys have been released. Enable only the KDI + * interrupt for future key presses (clear the KDI + * status bit and its sync chain before that). + */ + reg_val = phytium_read(keypad, KPSR); + reg_val |= KBD_STAT_KPKD | KBD_STAT_KDSC; + phytium_write(keypad, reg_val, KPSR); + + reg_val = phytium_read(keypad, KPSR); + reg_val |= KBD_STAT_KDIE; + reg_val &= ~KBD_STAT_KRIE; + phytium_write(keypad, reg_val, KPSR); + } else { + /* + * Some keys are still pressed. Schedule a rescan in + * attempt to detect multiple key presses and enable + * the KRI interrupt to react quickly to key release + * event. + */ + mod_timer(&keypad->check_matrix_timer, + jiffies + msecs_to_jiffies(60)); + + reg_val = phytium_read(keypad, KPSR); + reg_val |= KBD_STAT_KPKR | KBD_STAT_KRSS; + phytium_write(keypad, reg_val, KPSR); + + reg_val = phytium_read(keypad, KPSR); + reg_val |= KBD_STAT_KRIE; + reg_val &= ~KBD_STAT_KDIE; + phytium_write(keypad, reg_val, KPSR); + } +} + +static irqreturn_t phytium_keypad_irq_handler(int irq, void *dev_id) +{ + struct phytium_keypad *keypad = dev_id; + u32 reg_val; + + reg_val = phytium_read(keypad, KPSR); + /* Disable both interrupt types */ + reg_val &= ~(KBD_STAT_KRIE | KBD_STAT_KDIE); + /* Clear interrupts status bits */ + reg_val |= KBD_STAT_KPKR | KBD_STAT_KPKD; + phytium_write(keypad, reg_val, KPSR); + + if (keypad->enabled) { + /* The matrix is supposed to be changed */ + keypad->stable_count = 0; + + /* Schedule the scanning procedure near in the future */ + mod_timer(&keypad->check_matrix_timer, + jiffies + msecs_to_jiffies(2)); + } + + return IRQ_HANDLED; +} + +static void phytium_keypad_config(struct phytium_keypad *keypad) +{ + u32 reg_val; + + /* + * Include enabled rows in interrupt generation (KPCR[15:0]) + * Configure keypad columns as open-drain (KPCR[31:16]) + */ + reg_val = phytium_read(keypad, KPCR); + reg_val |= keypad->rows_en_mask & 0xffff; /* rows */ + reg_val |= (keypad->cols_en_mask & 0xffff) << 16; /* cols */ + phytium_write(keypad, reg_val, KPCR); + + /* Configure columns as output, output 0 */ + reg_val = 0; + reg_val |= (keypad->cols_en_mask & 0xffff) << 16; + phytium_write(keypad, reg_val, KDDR); + phytium_write(keypad, 0x00000000, KPDR); + + /* + * Clear Key Depress and Key Release status bit. + * Clear both synchronizer chain. + */ + reg_val = phytium_read(keypad, KPSR); + reg_val |= KBD_STAT_KPKR | KBD_STAT_KPKD | + KBD_STAT_KDSC | KBD_STAT_KRSS; + phytium_write(keypad, reg_val, KPSR); + + /* Enable KDI and disable KRI (avoid false release events). */ + reg_val |= KBD_STAT_KDIE; + reg_val &= ~KBD_STAT_KRIE; + phytium_write(keypad, reg_val, KPSR); +} + +static void phytium_keypad_inhibit(struct phytium_keypad *keypad) +{ + unsigned short reg_val; + + /* Inhibit KDI and KRI interrupts. */ + reg_val = phytium_read(keypad, KPSR); + reg_val &= ~(KBD_STAT_KRIE | KBD_STAT_KDIE); + reg_val |= KBD_STAT_KPKR | KBD_STAT_KPKD; + phytium_write(keypad, reg_val, KPSR); +} + +static void phytium_keypad_close(struct input_dev *dev) +{ + struct phytium_keypad *keypad = input_get_drvdata(dev); + + dev_dbg(&dev->dev, ">%s\n", __func__); + + /* Mark keypad as being inactive */ + keypad->enabled = false; + synchronize_irq(keypad->irq); + del_timer_sync(&keypad->check_matrix_timer); + + phytium_keypad_inhibit(keypad); +} + +static int phytium_keypad_open(struct input_dev *dev) +{ + struct phytium_keypad *keypad = input_get_drvdata(dev); + + dev_dbg(&dev->dev, ">%s\n", __func__); + + /* We became active from now */ + keypad->enabled = true; + + phytium_keypad_config(keypad); + + /* Sanity control, not all the rows must be activated now. */ + if ((phytium_read(keypad, KPDR) & keypad->rows_en_mask) == 0) { + dev_err(&dev->dev, + "too many keys pressed, control pins initialisation\n"); + goto open_err; + } + + return 0; + +open_err: + phytium_keypad_close(dev); + return -EIO; +} + +#ifdef CONFIG_OF +static const struct of_device_id phytium_keypad_of_match[] = { + { .compatible = "phytium,keypad", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, phytium_keypad_of_match); +#endif + +static int phytium_keypad_probe(struct platform_device *pdev) +{ + const struct matrix_keymap_data *keymap_data = + dev_get_platdata(&pdev->dev); + struct phytium_keypad *keypad; + struct input_dev *input_dev; + struct resource *res; + int irq, error, i, row, col; + + if (!keymap_data && !pdev->dev.of_node) { + dev_err(&pdev->dev, "no keymap defined\n"); + return -EINVAL; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "no irq defined in platform data\n"); + return irq; + } + + input_dev = devm_input_allocate_device(&pdev->dev); + if (!input_dev) { + dev_err(&pdev->dev, "failed to allocate the input device\n"); + return -ENOMEM; + } + + keypad = devm_kzalloc(&pdev->dev, sizeof(*keypad), GFP_KERNEL); + if (!keypad) + return -ENOMEM; + + keypad->input_dev = input_dev; + keypad->irq = irq; + keypad->stable_count = 0; + + timer_setup(&keypad->check_matrix_timer, + phytium_keypad_check_for_events, 0); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + keypad->mmio_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(keypad->mmio_base)) + return PTR_ERR(keypad->mmio_base); + + /* Init the Input device */ + input_dev->name = pdev->name; + input_dev->id.bustype = BUS_HOST; + input_dev->dev.parent = &pdev->dev; + input_dev->open = phytium_keypad_open; + input_dev->close = phytium_keypad_close; + + error = matrix_keypad_parse_properties(&pdev->dev, + &keypad->n_rows, &keypad->n_cols); + if (error) { + dev_err(&pdev->dev, "failed to parse phytium kp params\n"); + return error; + } + + error = matrix_keypad_build_keymap(keymap_data, NULL, + keypad->n_rows, + keypad->n_cols, + keypad->keycodes, input_dev); + if (error) { + dev_err(&pdev->dev, "failed to build keymap\n"); + return error; + } + + keypad->row_shift = get_count_order(keypad->n_cols); + + /* Search for rows and cols enabled */ + for (row = 0; row < keypad->n_rows; row++) { + for (col = 0; col < keypad->n_cols; col++) { + i = MATRIX_SCAN_CODE(row, col, keypad->row_shift); + if (keypad->keycodes[i] != KEY_RESERVED) { + keypad->rows_en_mask |= 1 << row; + keypad->cols_en_mask |= 1 << col; + } + } + } + + __set_bit(EV_REP, input_dev->evbit); + input_set_capability(input_dev, EV_MSC, MSC_SCAN); + input_set_drvdata(input_dev, keypad); + + phytium_keypad_inhibit(keypad); + + error = devm_request_irq(&pdev->dev, irq, phytium_keypad_irq_handler, 0, + pdev->name, keypad); + if (error) { + dev_err(&pdev->dev, "failed to request IRQ\n"); + return error; + } + + /* Register the input device */ + error = input_register_device(input_dev); + if (error) { + dev_err(&pdev->dev, "failed to register input device\n"); + return error; + } + + platform_set_drvdata(pdev, keypad); + device_init_wakeup(&pdev->dev, 1); + + return 0; +} + +static int phytium_keypad_remove(struct platform_device *pdev) +{ + struct phytium_keypad *keypad = platform_get_drvdata(pdev); + + input_unregister_device(keypad->input_dev); + devm_kfree(&pdev->dev, keypad); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int phytium_keypad_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct phytium_keypad *keypad = platform_get_drvdata(pdev); + struct input_dev *input_dev = keypad->input_dev; + + mutex_lock(&input_dev->mutex); + + if (input_dev->users) + phytium_keypad_inhibit(keypad); + + mutex_unlock(&input_dev->mutex); + + if (device_may_wakeup(&pdev->dev)) + enable_irq_wake(keypad->irq); + + return 0; +} + +static int phytium_keypad_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct phytium_keypad *keypad = platform_get_drvdata(pdev); + struct input_dev *input_dev = keypad->input_dev; + int ret = 0; + + if (device_may_wakeup(&pdev->dev)) + disable_irq_wake(keypad->irq); + + mutex_lock(&input_dev->mutex); + + if (input_dev->users) + phytium_keypad_config(keypad); + + mutex_unlock(&input_dev->mutex); + + return ret; +} +#endif + +static SIMPLE_DEV_PM_OPS(phytium_keypad_pm_ops, + phytium_keypad_suspend, phytium_keypad_resume); + +static struct platform_driver phytium_keypad_driver = { + .driver = { + .name = "phytium-keypad", + .pm = &phytium_keypad_pm_ops, + .of_match_table = of_match_ptr(phytium_keypad_of_match), + }, + .probe = phytium_keypad_probe, + .remove = phytium_keypad_remove, +}; +module_platform_driver(phytium_keypad_driver); + +MODULE_AUTHOR("Song Wenting "); +MODULE_DESCRIPTION("PHYTIUM Keypad Port Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:phytium-keypad"); diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c index effb63205d3d7783e8e4e598407332892ef6aae2..4c67cf30a5d9ab14bff5f5c53d289ba347d241f4 100644 --- a/drivers/input/keyboard/snvs_pwrkey.c +++ b/drivers/input/keyboard/snvs_pwrkey.c @@ -148,6 +148,9 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev) return error; } + pdata->input = input; + platform_set_drvdata(pdev, pdata); + error = devm_request_irq(&pdev->dev, pdata->irq, imx_snvs_pwrkey_interrupt, 0, pdev->name, pdev); @@ -163,9 +166,6 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev) return error; } - pdata->input = input; - platform_set_drvdata(pdev, pdata); - device_init_wakeup(&pdev->dev, pdata->wakeup); return 0; diff --git a/drivers/input/keyboard/st-keyscan.c b/drivers/input/keyboard/st-keyscan.c index babcfb165e4f0060c854ee524582b3ec436e8978..3b85631fde9182931a1a5f3a43cf2ec64d52dbb9 100644 --- a/drivers/input/keyboard/st-keyscan.c +++ b/drivers/input/keyboard/st-keyscan.c @@ -153,6 +153,8 @@ static int keyscan_probe(struct platform_device *pdev) input_dev->id.bustype = BUS_HOST; + keypad_data->input_dev = input_dev; + error = keypad_matrix_key_parse_dt(keypad_data); if (error) return error; @@ -168,8 +170,6 @@ static int keyscan_probe(struct platform_device *pdev) input_set_drvdata(input_dev, keypad_data); - keypad_data->input_dev = input_dev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); keypad_data->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(keypad_data->base)) diff --git a/drivers/input/keyboard/sunkbd.c b/drivers/input/keyboard/sunkbd.c index ad5d7f94f95a68d281df9e3bbd2ed321a798b70b..1c7aa86c92ab1714c5b205f6be1c7585c9c70fd8 100644 --- a/drivers/input/keyboard/sunkbd.c +++ b/drivers/input/keyboard/sunkbd.c @@ -111,7 +111,8 @@ static irqreturn_t sunkbd_interrupt(struct serio *serio, switch (data) { case SUNKBD_RET_RESET: - schedule_work(&sunkbd->tq); + if (sunkbd->enabled) + schedule_work(&sunkbd->tq); sunkbd->reset = -1; break; @@ -212,16 +213,12 @@ static int sunkbd_initialize(struct sunkbd *sunkbd) } /* - * sunkbd_reinit() sets leds and beeps to a state the computer remembers they - * were in. + * sunkbd_set_leds_beeps() sets leds and beeps to a state the computer remembers + * they were in. */ -static void sunkbd_reinit(struct work_struct *work) +static void sunkbd_set_leds_beeps(struct sunkbd *sunkbd) { - struct sunkbd *sunkbd = container_of(work, struct sunkbd, tq); - - wait_event_interruptible_timeout(sunkbd->wait, sunkbd->reset >= 0, HZ); - serio_write(sunkbd->serio, SUNKBD_CMD_SETLED); serio_write(sunkbd->serio, (!!test_bit(LED_CAPSL, sunkbd->dev->led) << 3) | @@ -234,11 +231,39 @@ static void sunkbd_reinit(struct work_struct *work) SUNKBD_CMD_BELLOFF - !!test_bit(SND_BELL, sunkbd->dev->snd)); } + +/* + * sunkbd_reinit() wait for the keyboard reset to complete and restores state + * of leds and beeps. + */ + +static void sunkbd_reinit(struct work_struct *work) +{ + struct sunkbd *sunkbd = container_of(work, struct sunkbd, tq); + + /* + * It is OK that we check sunkbd->enabled without pausing serio, + * as we only want to catch true->false transition that will + * happen once and we will be woken up for it. + */ + wait_event_interruptible_timeout(sunkbd->wait, + sunkbd->reset >= 0 || !sunkbd->enabled, + HZ); + + if (sunkbd->reset >= 0 && sunkbd->enabled) + sunkbd_set_leds_beeps(sunkbd); +} + static void sunkbd_enable(struct sunkbd *sunkbd, bool enable) { serio_pause_rx(sunkbd->serio); sunkbd->enabled = enable; serio_continue_rx(sunkbd->serio); + + if (!enable) { + wake_up_interruptible(&sunkbd->wait); + cancel_work_sync(&sunkbd->tq); + } } /* diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c index 1efcfdf9f8a84c89ab0e1a8d5729046473b79ed7..dd9dd4e4082718f43b923ff8ad984b148a15ee29 100644 --- a/drivers/input/misc/bma150.c +++ b/drivers/input/misc/bma150.c @@ -481,13 +481,14 @@ static int bma150_register_input_device(struct bma150_data *bma150) idev->close = bma150_irq_close; input_set_drvdata(idev, bma150); + bma150->input = idev; + error = input_register_device(idev); if (error) { input_free_device(idev); return error; } - bma150->input = idev; return 0; } @@ -510,15 +511,15 @@ static int bma150_register_polled_device(struct bma150_data *bma150) bma150_init_input_device(bma150, ipoll_dev->input); + bma150->input_polled = ipoll_dev; + bma150->input = ipoll_dev->input; + error = input_register_polled_device(ipoll_dev); if (error) { input_free_polled_device(ipoll_dev); return error; } - bma150->input_polled = ipoll_dev; - bma150->input = ipoll_dev->input; - return 0; } diff --git a/drivers/input/misc/da9063_onkey.c b/drivers/input/misc/da9063_onkey.c index 3e9c353d82effcd4a6e29396470bde27e5f19000..a01b25facf464a8ae90126218a02fca5744edc71 100644 --- a/drivers/input/misc/da9063_onkey.c +++ b/drivers/input/misc/da9063_onkey.c @@ -248,10 +248,7 @@ static int da9063_onkey_probe(struct platform_device *pdev) onkey->input->phys = onkey->phys; onkey->input->dev.parent = &pdev->dev; - if (onkey->key_power) - input_set_capability(onkey->input, EV_KEY, KEY_POWER); - - input_set_capability(onkey->input, EV_KEY, KEY_SLEEP); + input_set_capability(onkey->input, EV_KEY, KEY_POWER); INIT_DELAYED_WORK(&onkey->work, da9063_poll_on); diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c index e8de3aaf9f6330a276c99387fab80573a5bf6dd6..14f48e10f589e81b8d5f38314cb0d6f6708589bb 100644 --- a/drivers/input/misc/powermate.c +++ b/drivers/input/misc/powermate.c @@ -424,6 +424,7 @@ static void powermate_disconnect(struct usb_interface *intf) pm->requires_update = 0; usb_kill_urb(pm->irq); input_unregister_device(pm->input); + usb_kill_urb(pm->config); usb_free_urb(pm->irq); usb_free_urb(pm->config); powermate_free_buffers(interface_to_usbdev(intf), pm); diff --git a/drivers/input/misc/pwm-vibra.c b/drivers/input/misc/pwm-vibra.c index 55da191ae550743edf6505a01cc3af53404910a9..dbb6d9e1b9471380b50d66af222c89c75d472ac5 100644 --- a/drivers/input/misc/pwm-vibra.c +++ b/drivers/input/misc/pwm-vibra.c @@ -34,6 +34,7 @@ struct pwm_vibrator { struct work_struct play_work; u16 level; u32 direction_duty_cycle; + bool vcc_on; }; static int pwm_vibrator_start(struct pwm_vibrator *vibrator) @@ -42,10 +43,13 @@ static int pwm_vibrator_start(struct pwm_vibrator *vibrator) struct pwm_state state; int err; - err = regulator_enable(vibrator->vcc); - if (err) { - dev_err(pdev, "failed to enable regulator: %d", err); - return err; + if (!vibrator->vcc_on) { + err = regulator_enable(vibrator->vcc); + if (err) { + dev_err(pdev, "failed to enable regulator: %d", err); + return err; + } + vibrator->vcc_on = true; } pwm_get_state(vibrator->pwm, &state); @@ -76,11 +80,14 @@ static int pwm_vibrator_start(struct pwm_vibrator *vibrator) static void pwm_vibrator_stop(struct pwm_vibrator *vibrator) { - regulator_disable(vibrator->vcc); - if (vibrator->pwm_dir) pwm_disable(vibrator->pwm_dir); pwm_disable(vibrator->pwm); + + if (vibrator->vcc_on) { + regulator_disable(vibrator->vcc); + vibrator->vcc_on = false; + } } static void pwm_vibrator_play_work(struct work_struct *work) diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c index 23520df7650f5bc9261c3b58cac22a8dfc27423f..55cd6e0b409c31469ba15a1e40b52873ddf2fcd8 100644 --- a/drivers/input/misc/soc_button_array.c +++ b/drivers/input/misc/soc_button_array.c @@ -373,7 +373,7 @@ static struct soc_button_info soc_button_PNP0C40[] = { { "home", 1, EV_KEY, KEY_LEFTMETA, false, true }, { "volume_up", 2, EV_KEY, KEY_VOLUMEUP, true, false }, { "volume_down", 3, EV_KEY, KEY_VOLUMEDOWN, true, false }, - { "rotation_lock", 4, EV_SW, SW_ROTATE_LOCK, false, false }, + { "rotation_lock", 4, EV_KEY, KEY_ROTATE_LOCK_TOGGLE, false, false }, { } }; diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index 8ec483e8688be194078d07f3b47fa40d7f75e9ac..845e3632f8aa80cf27366f636bf324ffbf13a747 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include "../input-compat.h" @@ -405,7 +406,7 @@ static int uinput_open(struct inode *inode, struct file *file) static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code, const struct input_absinfo *abs) { - int min, max; + int min, max, range; min = abs->minimum; max = abs->maximum; @@ -417,13 +418,27 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code, return -EINVAL; } - if (abs->flat > max - min) { + if (!check_sub_overflow(max, min, &range) && abs->flat > range) { printk(KERN_DEBUG "%s: abs_flat #%02x out of range: %d (min:%d/max:%d)\n", UINPUT_NAME, code, abs->flat, min, max); return -EINVAL; } + /* + * Limit number of contacts to a reasonable value (100). This + * ensures that we need less than 2 pages for struct input_mt + * (we are not using in-kernel slot assignment so not going to + * allocate memory for the "red" table), and we should have no + * trouble getting this much memory. + */ + if (code == ABS_MT_SLOT && max > 99) { + printk(KERN_DEBUG + "%s: unreasonably large number of slots requested: %d\n", + UINPUT_NAME, max); + return -EINVAL; + } + return 0; } @@ -1050,13 +1065,31 @@ static long uinput_ioctl(struct file *file, unsigned int cmd, unsigned long arg) #ifdef CONFIG_COMPAT -#define UI_SET_PHYS_COMPAT _IOW(UINPUT_IOCTL_BASE, 108, compat_uptr_t) +/* + * These IOCTLs change their size and thus their numbers between + * 32 and 64 bits. + */ +#define UI_SET_PHYS_COMPAT \ + _IOW(UINPUT_IOCTL_BASE, 108, compat_uptr_t) +#define UI_BEGIN_FF_UPLOAD_COMPAT \ + _IOWR(UINPUT_IOCTL_BASE, 200, struct uinput_ff_upload_compat) +#define UI_END_FF_UPLOAD_COMPAT \ + _IOW(UINPUT_IOCTL_BASE, 201, struct uinput_ff_upload_compat) static long uinput_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - if (cmd == UI_SET_PHYS_COMPAT) + switch (cmd) { + case UI_SET_PHYS_COMPAT: cmd = UI_SET_PHYS; + break; + case UI_BEGIN_FF_UPLOAD_COMPAT: + cmd = UI_BEGIN_FF_UPLOAD; + break; + case UI_END_FF_UPLOAD_COMPAT: + cmd = UI_END_FF_UPLOAD; + break; + } return uinput_ioctl_handler(file, cmd, arg, compat_ptr(arg)); } diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 0a6f7ca883e7fe816a82d0dae915ab120e6681e9..dd80ff6cc4273079fa8f75806d89df97e914fa01 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -24,6 +24,7 @@ #include "psmouse.h" #include "alps.h" +#include "trackpoint.h" /* * Definitions for ALPS version 3 and 4 command mode protocol @@ -2864,6 +2865,23 @@ static const struct alps_protocol_info *alps_match_table(unsigned char *e7, return NULL; } +static bool alps_is_cs19_trackpoint(struct psmouse *psmouse) +{ + u8 param[2] = { 0 }; + + if (ps2_command(&psmouse->ps2dev, + param, MAKE_PS2_CMD(0, 2, TP_READ_ID))) + return false; + + /* + * param[0] contains the trackpoint device variant_id while + * param[1] contains the firmware_id. So far all alps + * trackpoint-only devices have their variant_ids equal + * TP_VARIANT_ALPS and their firmware_ids are in 0x20~0x2f range. + */ + return param[0] == TP_VARIANT_ALPS && ((param[1] & 0xf0) == 0x20); +} + static int alps_identify(struct psmouse *psmouse, struct alps_data *priv) { const struct alps_protocol_info *protocol; @@ -3164,6 +3182,20 @@ int alps_detect(struct psmouse *psmouse, bool set_properties) if (error) return error; + /* + * ALPS cs19 is a trackpoint-only device, and uses different + * protocol than DualPoint ones, so we return -EINVAL here and let + * trackpoint.c drive this device. If the trackpoint driver is not + * enabled, the device will fall back to a bare PS/2 mouse. + * If ps2_command() fails here, we depend on the immediately + * followed psmouse_reset() to reset the device to normal state. + */ + if (alps_is_cs19_trackpoint(psmouse)) { + psmouse_dbg(psmouse, + "ALPS CS19 trackpoint-only device detected, ignoring\n"); + return -EINVAL; + } + /* * Reset the device to make sure it is fully operational: * on some laptops, like certain Dell Latitudes, we may diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c index f1e66e257cff7843d93c3f1f10dcf2311b805b3b..e305a4f2b0f049de084169f9164998ed145c0f5e 100644 --- a/drivers/input/mouse/appletouch.c +++ b/drivers/input/mouse/appletouch.c @@ -930,6 +930,8 @@ static int atp_probe(struct usb_interface *iface, set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit); set_bit(BTN_LEFT, input_dev->keybit); + INIT_WORK(&dev->work, atp_reinit); + error = input_register_device(dev->input); if (error) goto err_free_buffer; @@ -937,8 +939,6 @@ static int atp_probe(struct usb_interface *iface, /* save our data pointer in this interface device */ usb_set_intfdata(iface, dev); - INIT_WORK(&dev->work, atp_reinit); - return 0; err_free_buffer: diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index b0f9d19b3410ae1867e1c134b30f8ccb8a1e5bd3..ae012639ae1d52bf30ee59f03884a3fea52eeeac 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -1337,18 +1337,48 @@ static const struct acpi_device_id elan_acpi_id[] = { { "ELAN0000", 0 }, { "ELAN0100", 0 }, { "ELAN0600", 0 }, + { "ELAN0601", 0 }, { "ELAN0602", 0 }, + { "ELAN0603", 0 }, + { "ELAN0604", 0 }, { "ELAN0605", 0 }, + { "ELAN0606", 0 }, + { "ELAN0607", 0 }, { "ELAN0608", 0 }, { "ELAN0609", 0 }, { "ELAN060B", 0 }, { "ELAN060C", 0 }, + { "ELAN060F", 0 }, + { "ELAN0610", 0 }, { "ELAN0611", 0 }, { "ELAN0612", 0 }, + { "ELAN0615", 0 }, + { "ELAN0616", 0 }, + { "ELAN0617", 0 }, { "ELAN0618", 0 }, + { "ELAN0619", 0 }, + { "ELAN061A", 0 }, +/* { "ELAN061B", 0 }, not working on the Lenovo Legion Y7000 */ { "ELAN061C", 0 }, { "ELAN061D", 0 }, + { "ELAN061E", 0 }, + { "ELAN061F", 0 }, + { "ELAN0620", 0 }, + { "ELAN0621", 0 }, { "ELAN0622", 0 }, + { "ELAN0623", 0 }, + { "ELAN0624", 0 }, + { "ELAN0625", 0 }, + { "ELAN0626", 0 }, + { "ELAN0627", 0 }, + { "ELAN0628", 0 }, + { "ELAN0629", 0 }, + { "ELAN062A", 0 }, + { "ELAN062B", 0 }, + { "ELAN062C", 0 }, + { "ELAN062D", 0 }, + { "ELAN0631", 0 }, + { "ELAN0632", 0 }, { "ELAN1000", 0 }, { } }; diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 2d95e8d93cc761aefb102217473d940faf1e4d02..530142b5a115457588a7330131f462ca4b7830c6 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -1119,6 +1119,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse, * Asus UX31 0x361f00 20, 15, 0e clickpad * Asus UX32VD 0x361f02 00, 15, 0e clickpad * Avatar AVIU-145A2 0x361f00 ? clickpad + * Fujitsu CELSIUS H760 0x570f02 40, 14, 0c 3 hw buttons (**) + * Fujitsu CELSIUS H780 0x5d0f02 41, 16, 0d 3 hw buttons (**) * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons * Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons @@ -1171,6 +1173,13 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = { DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"), }, }, + { + /* Fujitsu H780 also has a middle button */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H780"), + }, + }, #endif { } }; @@ -1180,6 +1189,8 @@ static const char * const middle_button_pnp_ids[] = { "LEN2132", /* ThinkPad P52 */ "LEN2133", /* ThinkPad P72 w/ NFC */ "LEN2134", /* ThinkPad P72 */ + "LEN0407", + "LEN0408", NULL }; @@ -1767,6 +1778,18 @@ static int elantech_smbus = IS_ENABLED(CONFIG_MOUSE_ELAN_I2C_SMBUS) ? module_param_named(elantech_smbus, elantech_smbus, int, 0644); MODULE_PARM_DESC(elantech_smbus, "Use a secondary bus for the Elantech device."); +static const char * const i2c_blacklist_pnp_ids[] = { + /* + * These are known to not be working properly as bits are missing + * in elan_i2c. + */ + "LEN2131", /* ThinkPad P52 w/ NFC */ + "LEN2132", /* ThinkPad P52 */ + "LEN2133", /* ThinkPad P72 w/ NFC */ + "LEN2134", /* ThinkPad P72 */ + NULL +}; + static int elantech_create_smbus(struct psmouse *psmouse, struct elantech_device_info *info, bool leave_breadcrumbs) @@ -1802,10 +1825,12 @@ static int elantech_setup_smbus(struct psmouse *psmouse, if (elantech_smbus == ELANTECH_SMBUS_NOT_SET) { /* - * New ICs are enabled by default. + * New ICs are enabled by default, unless mentioned in + * i2c_blacklist_pnp_ids. * Old ICs are up to the user to decide. */ - if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version)) + if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version) || + psmouse_matches_pnp_id(psmouse, i2c_blacklist_pnp_ids)) return -ENXIO; } diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 55d33500d55ec4ffb71ebc31c9a3e604cf00ea06..e8d1134943c4fb155e3246d2c06d236fdabe7a26 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -99,9 +99,7 @@ static int synaptics_mode_cmd(struct psmouse *psmouse, u8 mode) int synaptics_detect(struct psmouse *psmouse, bool set_properties) { struct ps2dev *ps2dev = &psmouse->ps2dev; - u8 param[4]; - - param[0] = 0; + u8 param[4] = { 0 }; ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES); ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES); @@ -172,13 +170,23 @@ static const char * const smbus_pnp_ids[] = { "LEN0048", /* X1 Carbon 3 */ "LEN0046", /* X250 */ "LEN004a", /* W541 */ + "LEN005b", /* P50 */ + "LEN005e", /* T560 */ "LEN0071", /* T480 */ "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */ "LEN0073", /* X1 Carbon G5 (Elantech) */ + "LEN0091", /* X1 Carbon 6 */ "LEN0092", /* X1 Carbon 6 */ + "LEN0093", /* T480 */ "LEN0096", /* X280 */ "LEN0097", /* X280 -> ALPS trackpoint */ + "LEN009b", /* T580 */ "LEN200f", /* T450s */ + "LEN2054", /* E480 */ + "LEN2055", /* E580 */ + "SYN3052", /* HP EliteBook 840 G4 */ + "SYN3221", /* HP 15-ay000 */ + "SYN323d", /* HP Spectre X360 13-w013dx */ NULL }; diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h index 10a0391482343d4ba482336367b204cf64f6e47c..538986e5ac5bcf4f5c38267db393a30fedb0dee6 100644 --- a/drivers/input/mouse/trackpoint.h +++ b/drivers/input/mouse/trackpoint.h @@ -161,7 +161,8 @@ struct trackpoint_data { #ifdef CONFIG_MOUSE_PS2_TRACKPOINT int trackpoint_detect(struct psmouse *psmouse, bool set_properties); #else -inline int trackpoint_detect(struct psmouse *psmouse, bool set_properties) +static inline int trackpoint_detect(struct psmouse *psmouse, + bool set_properties) { return -ENOSYS; } diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c index bd0d5ff01b08f9c88920b03f56dbb4a3eed21af3..02408487b442364e1e69dc3bb543f6a0a8d40e66 100644 --- a/drivers/input/rmi4/rmi_bus.c +++ b/drivers/input/rmi4/rmi_bus.c @@ -279,11 +279,11 @@ void rmi_unregister_function(struct rmi_function *fn) device_del(&fn->dev); of_node_put(fn->dev.of_node); - put_device(&fn->dev); for (i = 0; i < fn->num_of_irqs; i++) irq_dispose_mapping(fn->irq[i]); + put_device(&fn->dev); } /** diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c index fc3ab93b7aea454475ee324eecee91470c4a9dc3..162526a0d463eba4e465e211d63e449348d0a3f2 100644 --- a/drivers/input/rmi4/rmi_driver.c +++ b/drivers/input/rmi4/rmi_driver.c @@ -149,7 +149,7 @@ static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev) } mutex_lock(&data->irq_mutex); - bitmap_and(data->irq_status, data->irq_status, data->current_irq_mask, + bitmap_and(data->irq_status, data->irq_status, data->fn_irq_bits, data->irq_count); /* * At this point, irq_status has all bits that are set in the @@ -388,6 +388,8 @@ static int rmi_driver_set_irq_bits(struct rmi_device *rmi_dev, bitmap_copy(data->current_irq_mask, data->new_irq_mask, data->num_of_irq_regs); + bitmap_or(data->fn_irq_bits, data->fn_irq_bits, mask, data->irq_count); + error_unlock: mutex_unlock(&data->irq_mutex); return error; @@ -401,6 +403,8 @@ static int rmi_driver_clear_irq_bits(struct rmi_device *rmi_dev, struct device *dev = &rmi_dev->dev; mutex_lock(&data->irq_mutex); + bitmap_andnot(data->fn_irq_bits, + data->fn_irq_bits, mask, data->irq_count); bitmap_andnot(data->new_irq_mask, data->current_irq_mask, mask, data->irq_count); @@ -860,7 +864,7 @@ static int rmi_create_function(struct rmi_device *rmi_dev, error = rmi_register_function(fn); if (error) - goto err_put_fn; + return error; if (pdt->function_number == 0x01) data->f01_container = fn; @@ -870,10 +874,6 @@ static int rmi_create_function(struct rmi_device *rmi_dev, list_add_tail(&fn->node, &data->function_list); return RMI_SCAN_CONTINUE; - -err_put_fn: - put_device(&fn->dev); - return error; } void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake) diff --git a/drivers/input/rmi4/rmi_f11.c b/drivers/input/rmi4/rmi_f11.c index df64d6aed4f7e10b8eb78eb78619a15d7bcaaf56..c8e07ea2422b264253d233b4d91486f0c5300043 100644 --- a/drivers/input/rmi4/rmi_f11.c +++ b/drivers/input/rmi4/rmi_f11.c @@ -1230,7 +1230,7 @@ static int rmi_f11_initialize(struct rmi_function *fn) } rc = f11_write_control_regs(fn, &f11->sens_query, - &f11->dev_controls, fn->fd.query_base_addr); + &f11->dev_controls, fn->fd.control_base_addr); if (rc) dev_warn(&fn->dev, "Failed to write control registers\n"); @@ -1287,8 +1287,8 @@ static irqreturn_t rmi_f11_attention(int irq, void *ctx) valid_bytes = f11->sensor.attn_size; memcpy(f11->sensor.data_pkt, drvdata->attn_data.data, valid_bytes); - drvdata->attn_data.data += f11->sensor.attn_size; - drvdata->attn_data.size -= f11->sensor.attn_size; + drvdata->attn_data.data += valid_bytes; + drvdata->attn_data.size -= valid_bytes; } else { error = rmi_read_block(rmi_dev, data_base_addr, f11->sensor.data_pkt, diff --git a/drivers/input/rmi4/rmi_f12.c b/drivers/input/rmi4/rmi_f12.c index 5c7f489157792bf32da34e982b715824ec17eaff..9066f2b70ff0e49c9be1ae9568c6120f099046b0 100644 --- a/drivers/input/rmi4/rmi_f12.c +++ b/drivers/input/rmi4/rmi_f12.c @@ -58,6 +58,9 @@ struct f12_data { const struct rmi_register_desc_item *data15; u16 data15_offset; + + unsigned long *abs_mask; + unsigned long *rel_mask; }; static int rmi_f12_read_sensor_tuning(struct f12_data *f12) @@ -214,8 +217,8 @@ static irqreturn_t rmi_f12_attention(int irq, void *ctx) valid_bytes = sensor->attn_size; memcpy(sensor->data_pkt, drvdata->attn_data.data, valid_bytes); - drvdata->attn_data.data += sensor->attn_size; - drvdata->attn_data.size -= sensor->attn_size; + drvdata->attn_data.data += valid_bytes; + drvdata->attn_data.size -= valid_bytes; } else { retval = rmi_read_block(rmi_dev, f12->data_addr, sensor->data_pkt, sensor->pkt_size); @@ -296,9 +299,18 @@ static int rmi_f12_write_control_regs(struct rmi_function *fn) static int rmi_f12_config(struct rmi_function *fn) { struct rmi_driver *drv = fn->rmi_dev->driver; + struct f12_data *f12 = dev_get_drvdata(&fn->dev); + struct rmi_2d_sensor *sensor; int ret; - drv->set_irq_bits(fn->rmi_dev, fn->irq_mask); + sensor = &f12->sensor; + + if (!sensor->report_abs) + drv->clear_irq_bits(fn->rmi_dev, f12->abs_mask); + else + drv->set_irq_bits(fn->rmi_dev, f12->abs_mask); + + drv->clear_irq_bits(fn->rmi_dev, f12->rel_mask); ret = rmi_f12_write_control_regs(fn); if (ret) @@ -320,9 +332,12 @@ static int rmi_f12_probe(struct rmi_function *fn) struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev); struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev); u16 data_offset = 0; + int mask_size; rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s\n", __func__); + mask_size = BITS_TO_LONGS(drvdata->irq_count) * sizeof(unsigned long); + ret = rmi_read(fn->rmi_dev, query_addr, &buf); if (ret < 0) { dev_err(&fn->dev, "Failed to read general info register: %d\n", @@ -337,10 +352,19 @@ static int rmi_f12_probe(struct rmi_function *fn) return -ENODEV; } - f12 = devm_kzalloc(&fn->dev, sizeof(struct f12_data), GFP_KERNEL); + f12 = devm_kzalloc(&fn->dev, sizeof(struct f12_data) + mask_size * 2, + GFP_KERNEL); if (!f12) return -ENOMEM; + f12->abs_mask = (unsigned long *)((char *)f12 + + sizeof(struct f12_data)); + f12->rel_mask = (unsigned long *)((char *)f12 + + sizeof(struct f12_data) + mask_size); + + set_bit(fn->irq_pos, f12->abs_mask); + set_bit(fn->irq_pos + 1, f12->rel_mask); + f12->has_dribble = !!(buf & BIT(3)); if (fn->dev.of_node) { diff --git a/drivers/input/rmi4/rmi_f34v7.c b/drivers/input/rmi4/rmi_f34v7.c index 3991d2943660c56d189fba623efbe014ee0d586b..099dde68e332dc2a1448232543bdecd6de1b38e2 100644 --- a/drivers/input/rmi4/rmi_f34v7.c +++ b/drivers/input/rmi4/rmi_f34v7.c @@ -1192,6 +1192,9 @@ int rmi_f34v7_do_reflash(struct f34_data *f34, const struct firmware *fw) { int ret; + f34->fn->rmi_dev->driver->set_irq_bits(f34->fn->rmi_dev, + f34->fn->irq_mask); + rmi_f34v7_read_queries_bl_version(f34); f34->v7.image = fw->data; diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c index a6f515bcab2228a8783f10dbf10fae30462fd852..539a47425fcd964367466179997b0e6faa93671b 100644 --- a/drivers/input/rmi4/rmi_f54.c +++ b/drivers/input/rmi4/rmi_f54.c @@ -362,7 +362,7 @@ static const struct vb2_ops rmi_f54_queue_ops = { static const struct vb2_queue rmi_f54_queue = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ, - .buf_struct_size = sizeof(struct vb2_buffer), + .buf_struct_size = sizeof(struct vb2_v4l2_buffer), .ops = &rmi_f54_queue_ops, .mem_ops = &vb2_vmalloc_memops, .timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC, @@ -614,7 +614,7 @@ static int rmi_f54_config(struct rmi_function *fn) { struct rmi_driver *drv = fn->rmi_dev->driver; - drv->set_irq_bits(fn->rmi_dev, fn->irq_mask); + drv->clear_irq_bits(fn->rmi_dev, fn->irq_mask); return 0; } @@ -742,6 +742,7 @@ static void rmi_f54_remove(struct rmi_function *fn) video_unregister_device(&f54->vdev); v4l2_device_unregister(&f54->v4l2); + destroy_workqueue(f54->workqueue); } struct rmi_function_handler rmi_f54_handler = { diff --git a/drivers/input/rmi4/rmi_smbus.c b/drivers/input/rmi4/rmi_smbus.c index b6ccf39c6a7bb46bc67bf086952a799e27f4f930..4b2466cf2fb1c9181ce6038750a9586f10ee983a 100644 --- a/drivers/input/rmi4/rmi_smbus.c +++ b/drivers/input/rmi4/rmi_smbus.c @@ -166,7 +166,6 @@ static int rmi_smb_write_block(struct rmi_transport_dev *xport, u16 rmiaddr, /* prepare to write next block of bytes */ cur_len -= SMB_MAX_COUNT; databuff += SMB_MAX_COUNT; - rmiaddr += SMB_MAX_COUNT; } exit: mutex_unlock(&rmi_smb->page_mutex); @@ -218,7 +217,6 @@ static int rmi_smb_read_block(struct rmi_transport_dev *xport, u16 rmiaddr, /* prepare to read next block of bytes */ cur_len -= SMB_MAX_COUNT; databuff += SMB_MAX_COUNT; - rmiaddr += SMB_MAX_COUNT; } retval = 0; diff --git a/drivers/input/serio/gscps2.c b/drivers/input/serio/gscps2.c index 49d8d53e50b7bd105ceeca5150dda23f2f3771d7..96f9b5397367fca2388b3186296611ae7287dec9 100644 --- a/drivers/input/serio/gscps2.c +++ b/drivers/input/serio/gscps2.c @@ -381,9 +381,9 @@ static int __init gscps2_probe(struct parisc_device *dev) goto fail; #endif - printk(KERN_INFO "serio: %s port at 0x%p irq %d @ %s\n", + pr_info("serio: %s port at 0x%08lx irq %d @ %s\n", ps2port->port->name, - ps2port->addr, + hpa, ps2port->padev->irq, ps2port->port->phys); diff --git a/drivers/input/serio/hp_sdc.c b/drivers/input/serio/hp_sdc.c index 0b8a25c58d02e65865aabfceca26444c93b7aa4a..654252361653df1cb61d1ffd105d5ed4aa7fca71 100644 --- a/drivers/input/serio/hp_sdc.c +++ b/drivers/input/serio/hp_sdc.c @@ -884,8 +884,8 @@ static int __init hp_sdc_init(void) "HP SDC NMI", &hp_sdc)) goto err2; - printk(KERN_INFO PREFIX "HP SDC at 0x%p, IRQ %d (NMI IRQ %d)\n", - (void *)hp_sdc.base_io, hp_sdc.irq, hp_sdc.nmi); + pr_info(PREFIX "HP SDC at 0x%08lx, IRQ %d (NMI IRQ %d)\n", + hp_sdc.base_io, hp_sdc.irq, hp_sdc.nmi); hp_sdc_status_in8(); hp_sdc_data_in8(); diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c index 47a0e81a2989c93389e2affd20414d17057cde79..7d0a5ccf5775122f7240f32061372d72de77f089 100644 --- a/drivers/input/serio/hyperv-keyboard.c +++ b/drivers/input/serio/hyperv-keyboard.c @@ -177,7 +177,7 @@ static void hv_kbd_on_receive(struct hv_device *hv_dev, * state because the Enter-UP can trigger a wakeup at once. */ if (!(info & IS_BREAK)) - pm_wakeup_event(&hv_dev->device, 0); + pm_wakeup_hard_event(&hv_dev->device); break; @@ -245,40 +245,17 @@ static void hv_kbd_handle_received_packet(struct hv_device *hv_dev, static void hv_kbd_on_channel_callback(void *context) { + struct vmpacket_descriptor *desc; struct hv_device *hv_dev = context; - void *buffer; - int bufferlen = 0x100; /* Start with sensible size */ u32 bytes_recvd; u64 req_id; - int error; - buffer = kmalloc(bufferlen, GFP_ATOMIC); - if (!buffer) - return; - - while (1) { - error = vmbus_recvpacket_raw(hv_dev->channel, buffer, bufferlen, - &bytes_recvd, &req_id); - switch (error) { - case 0: - if (bytes_recvd == 0) { - kfree(buffer); - return; - } - - hv_kbd_handle_received_packet(hv_dev, buffer, - bytes_recvd, req_id); - break; + foreach_vmbus_pkt(desc, hv_dev->channel) { + bytes_recvd = desc->len8 * 8; + req_id = desc->trans_id; - case -ENOBUFS: - kfree(buffer); - /* Handle large packet */ - bufferlen = bytes_recvd; - buffer = kmalloc(bytes_recvd, GFP_ATOMIC); - if (!buffer) - return; - break; - } + hv_kbd_handle_received_packet(hv_dev, desc, bytes_recvd, + req_id); } } diff --git a/drivers/input/serio/ps2-gpio.c b/drivers/input/serio/ps2-gpio.c index c62cceb97bb15bc9ff172a51757ea88673226bde..5e8d8384aa2a5da4508f789f96a23ddd3cbde425 100644 --- a/drivers/input/serio/ps2-gpio.c +++ b/drivers/input/serio/ps2-gpio.c @@ -76,6 +76,7 @@ static void ps2_gpio_close(struct serio *serio) { struct ps2_gpio_data *drvdata = serio->port_data; + flush_delayed_work(&drvdata->tx_work); disable_irq(drvdata->irq); } diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c index c82cd5079d0e6207cc22a6ed5f2155f2e6d156e2..8f418d984a2d6ed49302bbe3a9916e4d501656fb 100644 --- a/drivers/input/tablet/aiptek.c +++ b/drivers/input/tablet/aiptek.c @@ -1814,15 +1814,13 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id) input_set_abs_params(inputdev, ABS_TILT_Y, AIPTEK_TILT_MIN, AIPTEK_TILT_MAX, 0, 0); input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0); - /* Verify that a device really has an endpoint */ - if (intf->altsetting[0].desc.bNumEndpoints < 1) { + err = usb_find_common_endpoints(intf->cur_altsetting, + NULL, NULL, &endpoint, NULL); + if (err) { dev_err(&intf->dev, - "interface has %d endpoints, but must have minimum 1\n", - intf->altsetting[0].desc.bNumEndpoints); - err = -EINVAL; + "interface has no int in endpoints, but must have minimum 1\n"); goto fail3; } - endpoint = &intf->altsetting[0].endpoint[0].desc; /* Go set up our URB, which is called when the tablet receives * input. diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c index 4b8b9d7aa75e2785991fc5838bf55728c715e6fe..35031228a6d076cf0dd91dab411f049ec679df6b 100644 --- a/drivers/input/tablet/gtco.c +++ b/drivers/input/tablet/gtco.c @@ -78,6 +78,7 @@ Scott Hill shill@gtcocalcomp.com /* Max size of a single report */ #define REPORT_MAX_SIZE 10 +#define MAX_COLLECTION_LEVELS 10 /* Bitmask whether pen is in range */ @@ -223,8 +224,7 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report, char maintype = 'x'; char globtype[12]; int indent = 0; - char indentstr[10] = ""; - + char indentstr[MAX_COLLECTION_LEVELS + 1] = { 0 }; dev_dbg(ddev, "======>>>>>>PARSE<<<<<<======\n"); @@ -350,6 +350,13 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report, case TAG_MAIN_COL_START: maintype = 'S'; + if (indent == MAX_COLLECTION_LEVELS) { + dev_err(ddev, "Collection level %d would exceed limit of %d\n", + indent + 1, + MAX_COLLECTION_LEVELS); + break; + } + if (data == 0) { dev_dbg(ddev, "======>>>>>> Physical\n"); strcpy(globtype, "Physical"); @@ -369,8 +376,15 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report, break; case TAG_MAIN_COL_END: - dev_dbg(ddev, "<<<<<<======\n"); maintype = 'E'; + + if (indent == 0) { + dev_err(ddev, "Collection level already at zero\n"); + break; + } + + dev_dbg(ddev, "<<<<<<======\n"); + indent--; for (x = 0; x < indent; x++) indentstr[x] = '-'; diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c index 75b500651e4e4051f1ec403e02febed6d59cd72f..b1cf0c9712740dc9b552907160d11b37fedecfb2 100644 --- a/drivers/input/tablet/kbtab.c +++ b/drivers/input/tablet/kbtab.c @@ -116,6 +116,10 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i if (intf->cur_altsetting->desc.bNumEndpoints < 1) return -ENODEV; + endpoint = &intf->cur_altsetting->endpoint[0].desc; + if (!usb_endpoint_is_int_in(endpoint)) + return -ENODEV; + kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL); input_dev = input_allocate_device(); if (!kbtab || !input_dev) @@ -154,8 +158,6 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i input_set_abs_params(input_dev, ABS_Y, 0, 0x1750, 4, 0); input_set_abs_params(input_dev, ABS_PRESSURE, 0, 0xff, 0, 0); - endpoint = &intf->cur_altsetting->endpoint[0].desc; - usb_fill_int_urb(kbtab->irq, dev, usb_rcvintpipe(dev, endpoint->bEndpointAddress), kbtab->data, 8, diff --git a/drivers/input/tablet/wacom_serial4.c b/drivers/input/tablet/wacom_serial4.c index 38bfaca48eab188cd50404428976a2e1661a172b..150f9eecaca706b887cbddbe122e71fe274a350a 100644 --- a/drivers/input/tablet/wacom_serial4.c +++ b/drivers/input/tablet/wacom_serial4.c @@ -187,6 +187,7 @@ enum { MODEL_DIGITIZER_II = 0x5544, /* UD */ MODEL_GRAPHIRE = 0x4554, /* ET */ MODEL_PENPARTNER = 0x4354, /* CT */ + MODEL_ARTPAD_II = 0x4B54, /* KT */ }; static void wacom_handle_model_response(struct wacom *wacom) @@ -245,6 +246,7 @@ static void wacom_handle_model_response(struct wacom *wacom) wacom->flags = F_HAS_STYLUS2 | F_HAS_SCROLLWHEEL; break; + case MODEL_ARTPAD_II: case MODEL_DIGITIZER_II: wacom->dev->name = "Wacom Digitizer II"; wacom->dev->id.version = MODEL_DIGITIZER_II; diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c index 3232af5dcf89460db7c06fa531d7f58b4b6f8132..e8f98de60df3acaff89740688e14e073b5e8eb0e 100644 --- a/drivers/input/touchscreen/atmel_mxt_ts.c +++ b/drivers/input/touchscreen/atmel_mxt_ts.c @@ -1586,10 +1586,10 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *fw) /* T7 config may have changed */ mxt_init_t7_power_cfg(data); -release_raw: - kfree(cfg.raw); release_mem: kfree(cfg.mem); +release_raw: + kfree(cfg.raw); return ret; } @@ -3162,6 +3162,8 @@ static int __maybe_unused mxt_suspend(struct device *dev) mutex_unlock(&input_dev->mutex); + disable_irq(data->irq); + return 0; } @@ -3174,6 +3176,8 @@ static int __maybe_unused mxt_resume(struct device *dev) if (!input_dev) return 0; + enable_irq(data->irq); + mutex_lock(&input_dev->mutex); if (input_dev->users) diff --git a/drivers/input/touchscreen/cyttsp4_core.c b/drivers/input/touchscreen/cyttsp4_core.c index 727c3232517cdbded6488e353e30b203c2110650..c84ee739a8d50cd4e811dac9b5c4aae9db728172 100644 --- a/drivers/input/touchscreen/cyttsp4_core.c +++ b/drivers/input/touchscreen/cyttsp4_core.c @@ -2000,11 +2000,6 @@ static int cyttsp4_mt_probe(struct cyttsp4 *cd) /* get sysinfo */ md->si = &cd->sysinfo; - if (!md->si) { - dev_err(dev, "%s: Fail get sysinfo pointer from core p=%p\n", - __func__, md->si); - goto error_get_sysinfo; - } rc = cyttsp4_setup_input_device(cd); if (rc) @@ -2014,8 +2009,6 @@ static int cyttsp4_mt_probe(struct cyttsp4 *cd) error_init_input: input_free_device(md->input); -error_get_sysinfo: - input_set_drvdata(md->input, NULL); error_alloc_failed: dev_err(dev, "%s failed.\n", __func__); return rc; diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c index f2d9c2c4188558a38cc8458e4561d4ead7d7a5f3..b20ba65992735253bf71ebce5a10ab2ea1b14d40 100644 --- a/drivers/input/touchscreen/goodix.c +++ b/drivers/input/touchscreen/goodix.c @@ -127,6 +127,15 @@ static const unsigned long goodix_irq_flags[] = { */ static const struct dmi_system_id rotated_screen[] = { #if defined(CONFIG_DMI) && defined(CONFIG_X86) + { + .ident = "Teclast X89", + .matches = { + /* tPAD is too generic, also match on bios date */ + DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"), + DMI_MATCH(DMI_BOARD_NAME, "tPAD"), + DMI_MATCH(DMI_BIOS_DATE, "12/19/2014"), + }, + }, { .ident = "WinBook TW100", .matches = { diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c index d196ac3d8b8cda8e1cf405101ed5603473db821d..06f0eb04a8fd4b2d331d084b885516de9e3bc30d 100644 --- a/drivers/input/touchscreen/silead.c +++ b/drivers/input/touchscreen/silead.c @@ -558,20 +558,33 @@ static int __maybe_unused silead_ts_suspend(struct device *dev) static int __maybe_unused silead_ts_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); + bool second_try = false; int error, status; silead_ts_set_power(client, SILEAD_POWER_ON); + retry: error = silead_ts_reset(client); if (error) return error; + if (second_try) { + error = silead_ts_load_fw(client); + if (error) + return error; + } + error = silead_ts_startup(client); if (error) return error; status = silead_ts_get_status(client); if (status != SILEAD_STATUS_OK) { + if (!second_try) { + second_try = true; + dev_dbg(dev, "Reloading firmware after unsuccessful resume\n"); + goto retry; + } dev_err(dev, "Resume error, status: 0x%02x\n", status); return -ENODEV; } @@ -604,6 +617,7 @@ static const struct acpi_device_id silead_ts_acpi_match[] = { { "MSSL1680", 0 }, { "MSSL0001", 0 }, { "MSSL0002", 0 }, + { "MSSL0017", 0 }, { } }; MODULE_DEVICE_TABLE(acpi, silead_ts_acpi_match); diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c index d5dfa4053bbf2359c94bef8748cab1cda367aaf4..b71673911aac303bbc61e34c1375582c6e7dbddf 100644 --- a/drivers/input/touchscreen/st1232.c +++ b/drivers/input/touchscreen/st1232.c @@ -195,6 +195,7 @@ static int st1232_ts_probe(struct i2c_client *client, input_dev->id.bustype = BUS_I2C; input_dev->dev.parent = &client->dev; + __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); __set_bit(EV_SYN, input_dev->evbit); __set_bit(EV_KEY, input_dev->evbit); __set_bit(EV_ABS, input_dev->evbit); diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c index 704e9904691642750143a92af565afb9a06e8203..b6f95f20f92442678c4de31a6d7576dea0cb7056 100644 --- a/drivers/input/touchscreen/stmfts.c +++ b/drivers/input/touchscreen/stmfts.c @@ -106,27 +106,29 @@ struct stmfts_data { bool running; }; -static void stmfts_brightness_set(struct led_classdev *led_cdev, +static int stmfts_brightness_set(struct led_classdev *led_cdev, enum led_brightness value) { struct stmfts_data *sdata = container_of(led_cdev, struct stmfts_data, led_cdev); int err; - if (value == sdata->led_status || !sdata->ledvdd) - return; - - if (!value) { - regulator_disable(sdata->ledvdd); - } else { - err = regulator_enable(sdata->ledvdd); - if (err) - dev_warn(&sdata->client->dev, - "failed to disable ledvdd regulator: %d\n", - err); + if (value != sdata->led_status && sdata->ledvdd) { + if (!value) { + regulator_disable(sdata->ledvdd); + } else { + err = regulator_enable(sdata->ledvdd); + if (err) { + dev_warn(&sdata->client->dev, + "failed to disable ledvdd regulator: %d\n", + err); + return err; + } + } + sdata->led_status = value; } - sdata->led_status = value; + return 0; } static enum led_brightness stmfts_brightness_get(struct led_classdev *led_cdev) @@ -608,7 +610,7 @@ static int stmfts_enable_led(struct stmfts_data *sdata) sdata->led_cdev.name = STMFTS_DEV_NAME; sdata->led_cdev.max_brightness = LED_ON; sdata->led_cdev.brightness = LED_OFF; - sdata->led_cdev.brightness_set = stmfts_brightness_set; + sdata->led_cdev.brightness_set_blocking = stmfts_brightness_set; sdata->led_cdev.brightness_get = stmfts_brightness_get; err = devm_led_classdev_register(&sdata->client->dev, &sdata->led_cdev); diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c index d61570d64ee76bd8ac161daa6a9dc55b23a5ee40..48304e26f988b15f0947d1ecccfa4e6e5878b995 100644 --- a/drivers/input/touchscreen/usbtouchscreen.c +++ b/drivers/input/touchscreen/usbtouchscreen.c @@ -1672,6 +1672,8 @@ static int usbtouch_probe(struct usb_interface *intf, if (!usbtouch || !input_dev) goto out_free; + mutex_init(&usbtouch->pm_mutex); + type = &usbtouch_dev_info[id->driver_info]; usbtouch->type = type; if (!type->process_pkt) diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c index 2566b4d8b3428286adb1cc6c4988ffe09d271f34..73856c2a8ac0f8dba61e72853e5b4f2e839c8487 100644 --- a/drivers/input/touchscreen/wm97xx-core.c +++ b/drivers/input/touchscreen/wm97xx-core.c @@ -929,7 +929,8 @@ static int __init wm97xx_init(void) static void __exit wm97xx_exit(void) { - driver_unregister(&wm97xx_driver); + if (IS_BUILTIN(CONFIG_AC97_BUS)) + driver_unregister(&wm97xx_driver); platform_driver_unregister(&wm97xx_mfd_driver); } diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index c60395b7470f640cc229c53bbd825235d61194c1..53dd814adeb57f94400b165f60d925188db887f2 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -60,6 +60,23 @@ config IOMMU_IO_PGTABLE_ARMV7S_SELFTEST endmenu +menu "Generic PASID table support" + +config IOMMU_PASID_TABLE + bool + +config ARM_SMMU_V3_CONTEXT + bool "ARM SMMU v3 Context Descriptor tables" + select IOMMU_PASID_TABLE + depends on ARM64 + help + Enable support for ARM SMMU v3 Context Descriptor tables, used for + DMA and PASID support. + + If unsure, say N here. + +endmenu + config IOMMU_DEBUGFS bool "Export IOMMU internals in DebugFS" depends on DEBUG_FS @@ -95,6 +112,15 @@ config IOMMU_DMA select IOMMU_IOVA select NEED_SG_DMA_LENGTH +config IOMMU_SVA + bool + select IOMMU_API + select MMU_NOTIFIER + +config IOMMU_PAGE_FAULT + bool + select IOMMU_API + config FSL_PAMU bool "Freescale IOMMU support" depends on PCI @@ -135,7 +161,7 @@ config AMD_IOMMU select PCI_PASID select IOMMU_API select IOMMU_IOVA - depends on X86_64 && PCI && ACPI + depends on X86_64 && PCI && ACPI && HAVE_CMPXCHG_DOUBLE ---help--- With this option you can enable support for AMD IOMMU hardware in your system. An IOMMU is a hardware component which provides @@ -348,7 +374,10 @@ config ARM_SMMU_V3 bool "ARM Ltd. System MMU Version 3 (SMMUv3) Support" depends on ARM64 select IOMMU_API + select IOMMU_SVA + select IOMMU_PAGE_FAULT select IOMMU_IO_PGTABLE_LPAE + select ARM_SMMU_V3_CONTEXT select GENERIC_MSI_IRQ_DOMAIN help Support for implementations of the ARM System MMU architecture @@ -414,4 +443,18 @@ config QCOM_IOMMU help Support for IOMMU on certain Qualcomm SoCs. +config SMMU_BYPASS_DEV + bool "SMMU bypass streams for some specific devices or CPU SoCs" + help + Using the smmu.bypassdev cmdline, to collect the devices that SMMU + performs attribute transformation only, with no address translation. + This function will be replaced by IORT RMR node, which will be + upstreamed in mainline. + Also extended this function to support SMMU bypass for some CPU SoCs + which the SMMU for some platform is not functional well. Since mainline + kernel already has the .def_domain_type hook for iommu_ops, so if we + update the kernel in the future, we can add the SMMU bypass code for + some CPU SoCs in the .def_domain_type hook, for now we just reuse the + dev bypass. + endif # IOMMU_SUPPORT diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index ab5eba6edf82b308f81a7c5acf9f992e8e0e2dde..a6f94cc89f92444a1e72efb7013c426409fe06b7 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -4,13 +4,17 @@ obj-$(CONFIG_IOMMU_API) += iommu-traces.o obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o obj-$(CONFIG_IOMMU_DEBUGFS) += iommu-debugfs.o obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o +obj-$(CONFIG_IOMMU_SVA) += iommu-sva.o +obj-$(CONFIG_IOMMU_PAGE_FAULT) += io-pgfault.o obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o +obj-$(CONFIG_IOMMU_PASID_TABLE) += iommu-pasid-table.o +obj-$(CONFIG_ARM_SMMU_V3_CONTEXT) += arm-smmu-v3-context.o obj-$(CONFIG_IOMMU_IOVA) += iova.o obj-$(CONFIG_OF_IOMMU) += of_iommu.o obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o -obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o +obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o obj-$(CONFIG_ARM_SMMU) += arm-smmu.o diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index bee0dfb7b93b1b219b393405d32870cab9964cf1..9db6a8af9617e813a0858b9dd974007b489f3e96 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -139,10 +139,14 @@ static struct lock_class_key reserved_rbtree_key; static inline int match_hid_uid(struct device *dev, struct acpihid_map_entry *entry) { + struct acpi_device *adev = ACPI_COMPANION(dev); const char *hid, *uid; - hid = acpi_device_hid(ACPI_COMPANION(dev)); - uid = acpi_device_uid(ACPI_COMPANION(dev)); + if (!adev) + return -ENODEV; + + hid = acpi_device_hid(adev); + uid = acpi_device_uid(adev); if (!hid || !(*hid)) return -ENODEV; @@ -438,7 +442,14 @@ static int iommu_init_device(struct device *dev) dev_data->alias = get_alias(dev); - if (dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) { + /* + * By default we use passthrough mode for IOMMUv2 capable device. + * But if amd_iommu=force_isolation is set (e.g. to debug DMA to + * invalid address), we ignore the capability for the device so + * it'll be forced to go into translation mode. + */ + if ((iommu_pass_through || !amd_iommu_force_isolation) && + dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) { struct amd_iommu *iommu; iommu = amd_iommu_rlookup_table[dev_data->devid]; @@ -538,7 +549,7 @@ static void amd_iommu_report_page_fault(u16 devid, u16 domain_id, dev_data = get_dev_data(&pdev->dev); if (dev_data && __ratelimit(&dev_data->rs)) { - dev_err(&pdev->dev, "AMD-Vi: Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%016llx flags=0x%04x]\n", + dev_err(&pdev->dev, "Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%016llx flags=0x%04x]\n", domain_id, address, flags); } else if (printk_ratelimit()) { pr_err("AMD-Vi: Event logged [IO_PAGE_FAULT device=%02x:%02x.%x domain=0x%04x address=0x%016llx flags=0x%04x]\n", @@ -561,7 +572,8 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt) retry: type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK; devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK; - pasid = PPR_PASID(*(u64 *)&event[0]); + pasid = (event[0] & EVENT_DOMID_MASK_HI) | + (event[1] & EVENT_DOMID_MASK_LO); flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; address = (u64)(((u64)event[3]) << 32) | event[2]; @@ -578,43 +590,41 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt) if (type == EVENT_TYPE_IO_FAULT) { amd_iommu_report_page_fault(devid, pasid, address, flags); return; - } else { - dev_err(dev, "AMD-Vi: Event logged ["); } switch (type) { case EVENT_TYPE_ILL_DEV: - dev_err(dev, "ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x pasid=0x%05x address=0x%016llx flags=0x%04x]\n", + dev_err(dev, "Event logged [ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x pasid=0x%05x address=0x%016llx flags=0x%04x]\n", PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), pasid, address, flags); dump_dte_entry(devid); break; case EVENT_TYPE_DEV_TAB_ERR: - dev_err(dev, "DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x " + dev_err(dev, "Event logged [DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x " "address=0x%016llx flags=0x%04x]\n", PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), address, flags); break; case EVENT_TYPE_PAGE_TAB_ERR: - dev_err(dev, "PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x domain=0x%04x address=0x%016llx flags=0x%04x]\n", + dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x pasid=0x%04x address=0x%016llx flags=0x%04x]\n", PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), pasid, address, flags); break; case EVENT_TYPE_ILL_CMD: - dev_err(dev, "ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address); + dev_err(dev, "Event logged [ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address); dump_command(address); break; case EVENT_TYPE_CMD_HARD_ERR: - dev_err(dev, "COMMAND_HARDWARE_ERROR address=0x%016llx flags=0x%04x]\n", + dev_err(dev, "Event logged [COMMAND_HARDWARE_ERROR address=0x%016llx flags=0x%04x]\n", address, flags); break; case EVENT_TYPE_IOTLB_INV_TO: - dev_err(dev, "IOTLB_INV_TIMEOUT device=%02x:%02x.%x address=0x%016llx]\n", + dev_err(dev, "Event logged [IOTLB_INV_TIMEOUT device=%02x:%02x.%x address=0x%016llx]\n", PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), address); break; case EVENT_TYPE_INV_DEV_REQ: - dev_err(dev, "INVALID_DEVICE_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%016llx flags=0x%04x]\n", + dev_err(dev, "Event logged [INVALID_DEVICE_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%016llx flags=0x%04x]\n", PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), pasid, address, flags); break; @@ -622,12 +632,12 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt) pasid = ((event[0] >> 16) & 0xFFFF) | ((event[1] << 6) & 0xF0000); tag = event[1] & 0x03FF; - dev_err(dev, "INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%016llx flags=0x%04x]\n", + dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%016llx flags=0x%04x]\n", PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), pasid, address, flags); break; default: - dev_err(dev, "UNKNOWN event[0]=0x%08x event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n", + dev_err(dev, "Event logged [UNKNOWN event[0]=0x%08x event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n", event[0], event[1], event[2], event[3]); } @@ -1146,6 +1156,17 @@ static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu) iommu_completion_wait(iommu); } +static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id) +{ + struct iommu_cmd cmd; + + build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, + dom_id, 1); + iommu_queue_command(iommu, &cmd); + + iommu_completion_wait(iommu); +} + static void amd_iommu_flush_all(struct amd_iommu *iommu) { struct iommu_cmd cmd; @@ -1322,18 +1343,21 @@ static void domain_flush_devices(struct protection_domain *domain) * another level increases the size of the address space by 9 bits to a size up * to 64 bits. */ -static bool increase_address_space(struct protection_domain *domain, +static void increase_address_space(struct protection_domain *domain, gfp_t gfp) { + unsigned long flags; u64 *pte; - if (domain->mode == PAGE_MODE_6_LEVEL) + spin_lock_irqsave(&domain->lock, flags); + + if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL)) /* address space already 64 bit large */ - return false; + goto out; pte = (void *)get_zeroed_page(gfp); if (!pte) - return false; + goto out; *pte = PM_LEVEL_PDE(domain->mode, iommu_virt_to_phys(domain->pt_root)); @@ -1341,7 +1365,10 @@ static bool increase_address_space(struct protection_domain *domain, domain->mode += 1; domain->updated = true; - return true; +out: + spin_unlock_irqrestore(&domain->lock, flags); + + return; } static u64 *alloc_pte(struct protection_domain *domain, @@ -1831,6 +1858,7 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain, { u64 pte_root = 0; u64 flags = 0; + u32 old_domid; if (domain->mode != PAGE_MODE_NONE) pte_root = iommu_virt_to_phys(domain->pt_root); @@ -1880,8 +1908,20 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain, flags &= ~DEV_DOMID_MASK; flags |= domain->id; + old_domid = amd_iommu_dev_table[devid].data[1] & DEV_DOMID_MASK; amd_iommu_dev_table[devid].data[1] = flags; amd_iommu_dev_table[devid].data[0] = pte_root; + + /* + * A kdump kernel might be replacing a domain ID that was copied from + * the previous kernel--if so, it needs to flush the translation cache + * entries for the old domain ID that is being overwritten + */ + if (old_domid) { + struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; + + amd_iommu_flush_tlb_domid(iommu, old_domid); + } } static void clear_dte_entry(u16 devid) @@ -1922,16 +1962,13 @@ static void do_attach(struct iommu_dev_data *dev_data, static void do_detach(struct iommu_dev_data *dev_data) { + struct protection_domain *domain = dev_data->domain; struct amd_iommu *iommu; u16 alias; iommu = amd_iommu_rlookup_table[dev_data->devid]; alias = dev_data->alias; - /* decrease reference counters */ - dev_data->domain->dev_iommu[iommu->index] -= 1; - dev_data->domain->dev_cnt -= 1; - /* Update data structures */ dev_data->domain = NULL; list_del(&dev_data->list); @@ -1941,6 +1978,16 @@ static void do_detach(struct iommu_dev_data *dev_data) /* Flush the DTE entry */ device_flush_dte(dev_data); + + /* Flush IOTLB */ + domain_flush_tlb_pde(domain); + + /* Wait for the flushes to finish */ + domain_flush_complete(domain); + + /* decrease reference counters - needs to happen after the flushes */ + domain->dev_iommu[iommu->index] -= 1; + domain->dev_cnt -= 1; } /* @@ -2042,23 +2089,6 @@ static int pdev_iommuv2_enable(struct pci_dev *pdev) return ret; } -/* FIXME: Move this to PCI code */ -#define PCI_PRI_TLP_OFF (1 << 15) - -static bool pci_pri_tlp_required(struct pci_dev *pdev) -{ - u16 status; - int pos; - - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI); - if (!pos) - return false; - - pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status); - - return (status & PCI_PRI_TLP_OFF) ? true : false; -} - /* * If a device is not yet associated with a domain, this function makes the * device visible in the domain @@ -2087,7 +2117,8 @@ static int attach_device(struct device *dev, dev_data->ats.enabled = true; dev_data->ats.qdep = pci_ats_queue_depth(pdev); - dev_data->pri_tlp = pci_pri_tlp_required(pdev); + dev_data->pri_tlp = + pci_prg_resp_requires_prefix(pdev); } } else if (amd_iommu_iotlb_sup && pci_enable_ats(pdev, PAGE_SHIFT) == 0) { @@ -2519,7 +2550,9 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, bus_addr = address + s->dma_address + (j << PAGE_SHIFT); phys_addr = (sg_phys(s) & PAGE_MASK) + (j << PAGE_SHIFT); - ret = iommu_map_page(domain, bus_addr, phys_addr, PAGE_SIZE, prot, GFP_ATOMIC); + ret = iommu_map_page(domain, bus_addr, phys_addr, + PAGE_SIZE, prot, + GFP_ATOMIC | __GFP_NOWARN); if (ret) goto out_unmap; @@ -2529,7 +2562,12 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, /* Everything is mapped - write the right values into s->dma_address */ for_each_sg(sglist, s, nelems, i) { - s->dma_address += address + s->offset; + /* + * Add in the remaining piece of the scatter-gather offset that + * was masked out when we were determining the physical address + * via (sg_phys(s) & PAGE_MASK) earlier. + */ + s->dma_address += address + (s->offset & ~PAGE_MASK); s->dma_length = s->length; } @@ -2548,13 +2586,13 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, bus_addr = address + s->dma_address + (j << PAGE_SHIFT); iommu_unmap_page(domain, bus_addr, PAGE_SIZE); - if (--mapped_pages) + if (--mapped_pages == 0) goto out_free_iova; } } out_free_iova: - free_iova_fast(&dma_dom->iovad, address, npages); + free_iova_fast(&dma_dom->iovad, address >> PAGE_SHIFT, npages); out_err: return 0; @@ -3100,21 +3138,24 @@ static void amd_iommu_get_resv_regions(struct device *dev, return; list_for_each_entry(entry, &amd_iommu_unity_map, list) { + int type, prot = 0; size_t length; - int prot = 0; if (devid < entry->devid_start || devid > entry->devid_end) continue; + type = IOMMU_RESV_DIRECT; length = entry->address_end - entry->address_start; if (entry->prot & IOMMU_PROT_IR) prot |= IOMMU_READ; if (entry->prot & IOMMU_PROT_IW) prot |= IOMMU_WRITE; + if (entry->prot & IOMMU_UNITY_MAP_FLAG_EXCL_RANGE) + /* Exclusion range */ + type = IOMMU_RESV_RESERVED; region = iommu_alloc_resv_region(entry->address_start, - length, prot, - IOMMU_RESV_DIRECT); + length, prot, type); if (!region) { pr_err("Out of memory allocating dm-regions for %s\n", dev_name(dev)); @@ -3766,6 +3807,7 @@ static int alloc_irq_index(u16 devid, int count, bool align) static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte, struct amd_ir_data *data) { + bool ret; struct irq_remap_table *table; struct amd_iommu *iommu; unsigned long flags; @@ -3783,10 +3825,18 @@ static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte, entry = (struct irte_ga *)table->table; entry = &entry[index]; - entry->lo.fields_remap.valid = 0; - entry->hi.val = irte->hi.val; - entry->lo.val = irte->lo.val; - entry->lo.fields_remap.valid = 1; + + ret = cmpxchg_double(&entry->lo.val, &entry->hi.val, + entry->lo.val, entry->hi.val, + irte->lo.val, irte->hi.val); + /* + * We use cmpxchg16 to atomically update the 128-bit IRTE, + * and it cannot be updated by the hardware or other processors + * behind us, so the return value of cmpxchg16 should be the + * same as the old value. + */ + WARN_ON(!ret); + if (data) data->ref = entry; @@ -4187,8 +4237,8 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, for (i = 0; i < nr_irqs; i++) { irq_data = irq_domain_get_irq_data(domain, virq + i); - cfg = irqd_cfg(irq_data); - if (!irq_data || !cfg) { + cfg = irq_data ? irqd_cfg(irq_data) : NULL; + if (!cfg) { ret = -EINVAL; goto out_free_data; } @@ -4338,10 +4388,12 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info) } else { /* Un-Setting */ struct irq_cfg *cfg = irqd_cfg(data); + u64 valid = irte->lo.fields_remap.valid; irte->hi.val = 0; irte->lo.val = 0; irte->hi.fields.vector = cfg->vector; + irte->lo.fields_remap.valid = valid; irte->lo.fields_remap.guest_mode = 0; irte->lo.fields_remap.destination = APICID_TO_IRTE_DEST_LO(cfg->dest_apicid); diff --git a/drivers/iommu/amd_iommu.h b/drivers/iommu/amd_iommu.h new file mode 100644 index 0000000000000000000000000000000000000000..12d540d9b59b0a8e1941934d695b1bc7312a7b0f --- /dev/null +++ b/drivers/iommu/amd_iommu.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef AMD_IOMMU_H +#define AMD_IOMMU_H + +int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line); + +#ifdef CONFIG_DMI +void amd_iommu_apply_ivrs_quirks(void); +#else +static void amd_iommu_apply_ivrs_quirks(void) { } +#endif + +#endif diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 84b3e4445d46d0a6064437dc57f1ff0ecbdd9f36..6c365277de3e84d462f98b4859037dd9dcf4d5f1 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -39,6 +40,7 @@ #include #include +#include "amd_iommu.h" #include "amd_iommu_proto.h" #include "amd_iommu_types.h" #include "irq_remapping.h" @@ -356,7 +358,7 @@ static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val) static void iommu_set_exclusion_range(struct amd_iommu *iommu) { u64 start = iommu->exclusion_start & PAGE_MASK; - u64 limit = (start + iommu->exclusion_length) & PAGE_MASK; + u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK; u64 entry; if (!iommu->exclusion_start) @@ -768,6 +770,7 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu) status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); if (status & (MMIO_STATUS_GALOG_RUN_MASK)) break; + udelay(10); } if (i >= LOOP_TIMEOUT) @@ -797,7 +800,8 @@ static int iommu_init_ga_log(struct amd_iommu *iommu) entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512; memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET, &entry, sizeof(entry)); - entry = (iommu_virt_to_phys(iommu->ga_log) & 0xFFFFFFFFFFFFFULL) & ~7ULL; + entry = (iommu_virt_to_phys(iommu->ga_log_tail) & + (BIT_ULL(52)-1)) & ~7ULL; memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET, &entry, sizeof(entry)); writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); @@ -1001,7 +1005,7 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, set_iommu_for_device(iommu, devid); } -static int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line) +int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line) { struct devid_map *entry; struct list_head *list; @@ -1152,6 +1156,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, if (ret) return ret; + amd_iommu_apply_ivrs_quirks(); + /* * First save the recommended feature enable bits from ACPI */ @@ -1519,7 +1525,14 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) iommu->mmio_phys_end = MMIO_REG_END_OFFSET; else iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; - if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)) + + /* + * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports. + * GAM also requires GA mode. Therefore, we need to + * check cmpxchg16b support before enabling it. + */ + if (!boot_cpu_has(X86_FEATURE_CX16) || + ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)) amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; if (((h->efr_attr & (0x1 << IOMMU_FEAT_XTSUP_SHIFT)) == 0)) amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE; @@ -1530,7 +1543,14 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) iommu->mmio_phys_end = MMIO_REG_END_OFFSET; else iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; - if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) + + /* + * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports. + * XT, GAM also requires GA mode. Therefore, we need to + * check cmpxchg16b support before enabling them. + */ + if (!boot_cpu_has(X86_FEATURE_CX16) || + ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; if (((h->efr_reg & (0x1 << IOMMU_EFR_XTSUP_SHIFT)) == 0)) amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE; @@ -1709,7 +1729,7 @@ static const struct attribute_group *amd_iommu_groups[] = { NULL, }; -static int iommu_init_pci(struct amd_iommu *iommu) +static int __init iommu_init_pci(struct amd_iommu *iommu) { int cap_ptr = iommu->cap_ptr; u32 range, misc, low, high; @@ -2000,6 +2020,9 @@ static int __init init_unity_map_range(struct ivmd_header *m) if (e == NULL) return -ENOMEM; + if (m->flags & IVMD_FLAG_EXCL_RANGE) + init_exclusion_range(m); + switch (m->type) { default: kfree(e); @@ -2046,9 +2069,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table) while (p < end) { m = (struct ivmd_header *)p; - if (m->flags & IVMD_FLAG_EXCL_RANGE) - init_exclusion_range(m); - else if (m->flags & IVMD_FLAG_UNITY_MAP) + if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE)) init_unity_map_range(m); p += m->length; @@ -2343,6 +2364,9 @@ static void __init free_iommu_resources(void) /* SB IOAPIC is always on this device in AMD systems */ #define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0)) +/* SB IOAPIC for Hygon family 18h model 4h is on the device 0xb */ +#define IOAPIC_SB_DEVID_FAM18H_M4H ((0x00 << 8) | PCI_DEVFN(0xb, 0)) + static bool __init check_ioapic_information(void) { const char *fw_bug = FW_BUG; @@ -2368,7 +2392,12 @@ static bool __init check_ioapic_information(void) pr_err("%sAMD-Vi: IOAPIC[%d] not in IVRS table\n", fw_bug, id); ret = false; - } else if (devid == IOAPIC_SB_DEVID) { + } else if (devid == IOAPIC_SB_DEVID || + (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18 && + boot_cpu_data.x86_model >= 0x4 && + boot_cpu_data.x86_model <= 0xf && + devid == IOAPIC_SB_DEVID_FAM18H_M4H)) { has_sb_ioapic = true; ret = true; } diff --git a/drivers/iommu/amd_iommu_quirks.c b/drivers/iommu/amd_iommu_quirks.c new file mode 100644 index 0000000000000000000000000000000000000000..5120ce4fdce326b6b59185d66e7ba8cb146bf4d5 --- /dev/null +++ b/drivers/iommu/amd_iommu_quirks.c @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +/* + * Quirks for AMD IOMMU + * + * Copyright (C) 2019 Kai-Heng Feng + */ + +#ifdef CONFIG_DMI +#include + +#include "amd_iommu.h" + +#define IVHD_SPECIAL_IOAPIC 1 + +struct ivrs_quirk_entry { + u8 id; + u16 devid; +}; + +enum { + DELL_INSPIRON_7375 = 0, + DELL_LATITUDE_5495, + LENOVO_IDEAPAD_330S_15ARR, +}; + +static const struct ivrs_quirk_entry ivrs_ioapic_quirks[][3] __initconst = { + /* ivrs_ioapic[4]=00:14.0 ivrs_ioapic[5]=00:00.2 */ + [DELL_INSPIRON_7375] = { + { .id = 4, .devid = 0xa0 }, + { .id = 5, .devid = 0x2 }, + {} + }, + /* ivrs_ioapic[4]=00:14.0 */ + [DELL_LATITUDE_5495] = { + { .id = 4, .devid = 0xa0 }, + {} + }, + /* ivrs_ioapic[32]=00:14.0 */ + [LENOVO_IDEAPAD_330S_15ARR] = { + { .id = 32, .devid = 0xa0 }, + {} + }, + {} +}; + +static int __init ivrs_ioapic_quirk_cb(const struct dmi_system_id *d) +{ + const struct ivrs_quirk_entry *i; + + for (i = d->driver_data; i->id != 0 && i->devid != 0; i++) + add_special_device(IVHD_SPECIAL_IOAPIC, i->id, (u16 *)&i->devid, 0); + + return 0; +} + +static const struct dmi_system_id ivrs_quirks[] __initconst = { + { + .callback = ivrs_ioapic_quirk_cb, + .ident = "Dell Inspiron 7375", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7375"), + }, + .driver_data = (void *)&ivrs_ioapic_quirks[DELL_INSPIRON_7375], + }, + { + .callback = ivrs_ioapic_quirk_cb, + .ident = "Dell Latitude 5495", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude 5495"), + }, + .driver_data = (void *)&ivrs_ioapic_quirks[DELL_LATITUDE_5495], + }, + { + /* + * Acer Aspire A315-41 requires the very same workaround as + * Dell Latitude 5495 + */ + .callback = ivrs_ioapic_quirk_cb, + .ident = "Acer Aspire A315-41", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A315-41"), + }, + .driver_data = (void *)&ivrs_ioapic_quirks[DELL_LATITUDE_5495], + }, + { + .callback = ivrs_ioapic_quirk_cb, + .ident = "Lenovo ideapad 330S-15ARR", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "81FB"), + }, + .driver_data = (void *)&ivrs_ioapic_quirks[LENOVO_IDEAPAD_330S_15ARR], + }, + {} +}; + +void __init amd_iommu_apply_ivrs_quirks(void) +{ + dmi_check_system(ivrs_quirks); +} +#endif diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index e2b342e65a7b86ea75f8c1b8bb8826ac0e7b3018..52a97189eb81d91fcf0adf5edd9a96684d14a927 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -136,8 +136,8 @@ #define EVENT_TYPE_INV_PPR_REQ 0x9 #define EVENT_DEVID_MASK 0xffff #define EVENT_DEVID_SHIFT 0 -#define EVENT_DOMID_MASK 0xffff -#define EVENT_DOMID_SHIFT 0 +#define EVENT_DOMID_MASK_LO 0xffff +#define EVENT_DOMID_MASK_HI 0xf0000 #define EVENT_FLAGS_MASK 0xfff #define EVENT_FLAGS_SHIFT 0x10 @@ -373,6 +373,8 @@ #define IOMMU_PROT_IR 0x01 #define IOMMU_PROT_IW 0x02 +#define IOMMU_UNITY_MAP_FLAG_EXCL_RANGE (1 << 2) + /* IOMMU capabilities */ #define IOMMU_CAP_IOTLB 24 #define IOMMU_CAP_NPCACHE 26 diff --git a/drivers/iommu/arm-smmu-v3-context.c b/drivers/iommu/arm-smmu-v3-context.c new file mode 100644 index 0000000000000000000000000000000000000000..d87d6f72e864296e28d0f08b71196bdb3f33daab --- /dev/null +++ b/drivers/iommu/arm-smmu-v3-context.c @@ -0,0 +1,744 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Context descriptor table driver for SMMUv3 + * + * Copyright (C) 2018 ARM Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "io-pgtable-arm.h" +#include "iommu-pasid-table.h" + +/* + * Linear: when less than 1024 SSIDs are supported + * 2lvl: at most 1024 L1 entrie, + * 1024 lazy entries per table. + */ +#define CTXDESC_SPLIT 10 +#define CTXDESC_NUM_L2_ENTRIES (1 << CTXDESC_SPLIT) + +#define CTXDESC_L1_DESC_DWORD 1 +#define CTXDESC_L1_DESC_VALID 1 +#define CTXDESC_L1_DESC_L2PTR_MASK GENMASK_ULL(51, 12) + +#define CTXDESC_CD_DWORDS 8 +#define CTXDESC_CD_0_TCR_T0SZ GENMASK_ULL(5, 0) +#define ARM64_TCR_T0SZ GENMASK_ULL(5, 0) +#define CTXDESC_CD_0_TCR_TG0 GENMASK_ULL(7, 6) +#define ARM64_TCR_TG0 GENMASK_ULL(15, 14) +#define CTXDESC_CD_0_TCR_IRGN0 GENMASK_ULL(9, 8) +#define ARM64_TCR_IRGN0 GENMASK_ULL(9, 8) +#define CTXDESC_CD_0_TCR_ORGN0 GENMASK_ULL(11, 10) +#define ARM64_TCR_ORGN0 GENMASK_ULL(11, 10) +#define CTXDESC_CD_0_TCR_SH0 GENMASK_ULL(13, 12) +#define ARM64_TCR_SH0 GENMASK_ULL(13, 12) +#define CTXDESC_CD_0_TCR_EPD0 (1ULL << 14) +#define ARM64_TCR_EPD0 (1ULL << 7) +#define CTXDESC_CD_0_TCR_EPD1 (1ULL << 30) +#define ARM64_TCR_EPD1 (1ULL << 23) + +#define CTXDESC_CD_0_ENDI (1UL << 15) +#define CTXDESC_CD_0_V (1UL << 31) + +#define CTXDESC_CD_0_TCR_IPS GENMASK_ULL(34, 32) +#define ARM64_TCR_IPS GENMASK_ULL(34, 32) +#define CTXDESC_CD_0_TCR_TBI0 (1ULL << 38) +#define ARM64_TCR_TBI0 (1ULL << 37) + +#define CTXDESC_CD_0_TCR_HA (1UL << 43) +#define ARM64_TCR_HA (1ULL << 39) +#define CTXDESC_CD_0_TCR_HD (1UL << 42) +#define ARM64_TCR_HD (1ULL << 40) + +#define CTXDESC_CD_0_AA64 (1UL << 41) +#define CTXDESC_CD_0_S (1UL << 44) +#define CTXDESC_CD_0_R (1UL << 45) +#define CTXDESC_CD_0_A (1UL << 46) +#define CTXDESC_CD_0_ASET (1UL << 47) +#define CTXDESC_CD_0_ASID GENMASK_ULL(63, 48) + +#define CTXDESC_CD_1_TTB0_MASK GENMASK_ULL(51, 4) + +#define CTXDESC_CD_5_PARTID_MASK GENMASK_ULL(47, 32) +#define CTXDESC_CD_5_PMG_MASK GENMASK_ULL(55, 48) + +/* Convert between AArch64 (CPU) TCR format and SMMU CD format */ +#define ARM_SMMU_TCR2CD(tcr, fld) FIELD_PREP(CTXDESC_CD_0_TCR_##fld, \ + FIELD_GET(ARM64_TCR_##fld, tcr)) + +#define ARM_SMMU_NO_PASID (-1) + +struct arm_smmu_cd { + struct iommu_pasid_entry entry; + + u64 ttbr; + u64 tcr; + u64 mair; + + int pasid; + + /* 'refs' tracks alloc/free */ + refcount_t refs; + /* 'users' tracks attach/detach, and is only used for sanity checking */ + unsigned int users; + struct mm_struct *mm; + struct arm_smmu_cd_tables *tbl; +}; + +#define pasid_entry_to_cd(entry) \ + container_of((entry), struct arm_smmu_cd, entry) + +struct arm_smmu_cd_table { + __le64 *ptr; + dma_addr_t ptr_dma; +}; + +struct arm_smmu_cd_tables { + struct iommu_pasid_table pasid; + bool linear; + union { + struct arm_smmu_cd_table table; + struct { + __le64 *ptr; + dma_addr_t ptr_dma; + size_t num_entries; + + struct arm_smmu_cd_table *tables; + } l1; + }; +}; + +#define pasid_to_cd_tables(pasid_table) \ + container_of((pasid_table), struct arm_smmu_cd_tables, pasid) + +#define pasid_ops_to_tables(ops) \ + pasid_to_cd_tables(iommu_pasid_table_ops_to_table(ops)) + +static DEFINE_SPINLOCK(contexts_lock); +static DEFINE_SPINLOCK(asid_lock); +static DEFINE_IDR(asid_idr); + +static int arm_smmu_alloc_cd_leaf_table(struct device *dev, + struct arm_smmu_cd_table *desc, + size_t num_entries) +{ + size_t size = num_entries * (CTXDESC_CD_DWORDS << 3); + + desc->ptr = dmam_alloc_coherent(dev, size, &desc->ptr_dma, + GFP_ATOMIC | __GFP_ZERO); + if (!desc->ptr) { + dev_warn(dev, "failed to allocate context descriptor table\n"); + return -ENOMEM; + } + + return 0; +} + +static void arm_smmu_free_cd_leaf_table(struct device *dev, + struct arm_smmu_cd_table *desc, + size_t num_entries) +{ + size_t size = num_entries * (CTXDESC_CD_DWORDS << 3); + + dmam_free_coherent(dev, size, desc->ptr, desc->ptr_dma); +} + +static void arm_smmu_write_cd_l1_desc(__le64 *dst, + struct arm_smmu_cd_table *desc) +{ + u64 val = (desc->ptr_dma & CTXDESC_L1_DESC_L2PTR_MASK) | + CTXDESC_L1_DESC_VALID; + + *dst = cpu_to_le64(val); +} + +static __le64 *arm_smmu_get_cd_ptr(struct arm_smmu_cd_tables *tbl, u32 ssid) +{ + unsigned long idx; + struct arm_smmu_cd_table *l1_desc; + struct iommu_pasid_table_cfg *cfg = &tbl->pasid.cfg; + + if (tbl->linear) + return tbl->table.ptr + ssid * CTXDESC_CD_DWORDS; + + idx = ssid >> CTXDESC_SPLIT; + if (idx >= tbl->l1.num_entries) + return NULL; + + l1_desc = &tbl->l1.tables[idx]; + if (!l1_desc->ptr) { + __le64 *l1ptr = tbl->l1.ptr + idx * CTXDESC_L1_DESC_DWORD; + + if (arm_smmu_alloc_cd_leaf_table(cfg->iommu_dev, l1_desc, + CTXDESC_NUM_L2_ENTRIES)) + return NULL; + + arm_smmu_write_cd_l1_desc(l1ptr, l1_desc); + /* An invalid L1 entry is allowed to be cached */ + iommu_pasid_flush(&tbl->pasid, idx << CTXDESC_SPLIT, false); + } + + idx = ssid & (CTXDESC_NUM_L2_ENTRIES - 1); + + return l1_desc->ptr + idx * CTXDESC_CD_DWORDS; +} + +static u64 arm_smmu_cpu_tcr_to_cd(struct arm_smmu_context_cfg *cfg, u64 tcr) +{ + u64 val = 0; + + /* Repack the TCR. Just care about TTBR0 for now */ + val |= ARM_SMMU_TCR2CD(tcr, T0SZ); + val |= ARM_SMMU_TCR2CD(tcr, TG0); + val |= ARM_SMMU_TCR2CD(tcr, IRGN0); + val |= ARM_SMMU_TCR2CD(tcr, ORGN0); + val |= ARM_SMMU_TCR2CD(tcr, SH0); + val |= ARM_SMMU_TCR2CD(tcr, EPD0); + val |= ARM_SMMU_TCR2CD(tcr, EPD1); + val |= ARM_SMMU_TCR2CD(tcr, IPS); + + if (cfg->hw_access) + val |= ARM_SMMU_TCR2CD(tcr, HA); + + if (cfg->hw_dirty) + val |= ARM_SMMU_TCR2CD(tcr, HD); + + return val; +} + +static int __arm_smmu_write_ctx_desc(struct arm_smmu_cd_tables *tbl, int ssid, + struct arm_smmu_cd *cd) +{ + u64 val; + bool cd_live; + __le64 *cdptr = arm_smmu_get_cd_ptr(tbl, ssid); + struct arm_smmu_context_cfg *cfg = &tbl->pasid.cfg.arm_smmu; + + /* + * This function handles the following cases: + * + * (1) Install primary CD, for normal DMA traffic (SSID = 0). + * (2) Install a secondary CD, for SID+SSID traffic, followed by an + * invalidation. + * (3) Update ASID of primary CD. This is allowed by atomically writing + * the first 64 bits of the CD, followed by invalidation of the old + * entry and mappings. + * (4) Remove a secondary CD and invalidate it. + */ + + if (!cdptr) + return -ENOMEM; + + val = le64_to_cpu(cdptr[0]); + cd_live = !!(val & CTXDESC_CD_0_V); + + if (!cd) { /* (4) */ + cdptr[0] = 0; + } else if (cd_live) { /* (3) */ + val &= ~CTXDESC_CD_0_ASID; + val |= FIELD_PREP(CTXDESC_CD_0_ASID, cd->entry.tag); + + cdptr[0] = cpu_to_le64(val); + /* + * Until CD+TLB invalidation, both ASIDs may be used for tagging + * this substream's traffic + */ + } else { /* (1) and (2) */ + cdptr[1] = cpu_to_le64(cd->ttbr & CTXDESC_CD_1_TTB0_MASK); + cdptr[2] = 0; + cdptr[3] = cpu_to_le64(cd->mair); + + /* + * STE is live, and the SMMU might fetch this CD at any + * time. Ensure it observes the rest of the CD before we + * enable it. + */ + iommu_pasid_flush(&tbl->pasid, ssid, true); + + + val = arm_smmu_cpu_tcr_to_cd(cfg, cd->tcr) | +#ifdef __BIG_ENDIAN + CTXDESC_CD_0_ENDI | +#endif + CTXDESC_CD_0_R | CTXDESC_CD_0_A | + (cd->mm ? 0 : CTXDESC_CD_0_ASET) | + CTXDESC_CD_0_AA64 | + FIELD_PREP(CTXDESC_CD_0_ASID, cd->entry.tag) | + CTXDESC_CD_0_V; + + if (cfg->stall) + val |= CTXDESC_CD_0_S; + + cdptr[0] = cpu_to_le64(val); + } + + iommu_pasid_flush(&tbl->pasid, ssid, true); + + return 0; +} + +static int arm_smmu_write_ctx_desc(struct arm_smmu_cd_tables *tbl, int ssid, + struct arm_smmu_cd *cd) +{ + int ret; + + spin_lock(&contexts_lock); + ret = __arm_smmu_write_ctx_desc(tbl, ssid, cd); + spin_unlock(&contexts_lock); + + return ret; +} + +static bool arm_smmu_free_asid(struct arm_smmu_cd *cd) +{ + bool free; + struct arm_smmu_cd *old_cd; + + spin_lock(&asid_lock); + free = refcount_dec_and_test(&cd->refs); + if (free) { + old_cd = idr_remove(&asid_idr, (u16)cd->entry.tag); + WARN_ON(old_cd != cd); + } + spin_unlock(&asid_lock); + + return free; +} + +static void arm_smmu_free_cd(struct iommu_pasid_entry *entry) +{ + struct arm_smmu_cd *cd = pasid_entry_to_cd(entry); + + if (!arm_smmu_free_asid(cd)) + return; + + if (cd->mm) { + /* Unpin ASID */ + mm_context_put(cd->mm); + } + + kfree(cd); +} + +static struct arm_smmu_cd *arm_smmu_alloc_cd(struct arm_smmu_cd_tables *tbl) +{ + struct arm_smmu_cd *cd; + + cd = kzalloc(sizeof(*cd), GFP_KERNEL); + if (!cd) + return NULL; + + cd->pasid = ARM_SMMU_NO_PASID; + cd->tbl = tbl; + cd->entry.release = arm_smmu_free_cd; + refcount_set(&cd->refs, 1); + + return cd; +} + +/* + * Try to reserve this ASID in the SMMU. If it is in use, try to steal it from + * the private entry. Careful here, we may be modifying the context tables of + * another SMMU! + */ +static struct arm_smmu_cd *arm_smmu_share_asid(u16 asid) +{ + int ret; + struct arm_smmu_cd *cd; + struct arm_smmu_cd_tables *tbl; + struct arm_smmu_context_cfg *cfg; + struct iommu_pasid_entry old_entry; + + cd = idr_find(&asid_idr, asid); + if (!cd) + return NULL; + + if (cd->mm) { + /* + * It's pretty common to find a stale CD when doing unbind-bind, + * given that the release happens after a RCU grace period. + * Simply reuse it, but check that it isn't active, because it's + * going to be assigned a different PASID. + */ + if (WARN_ON(cd->users)) + return ERR_PTR(-EINVAL); + + refcount_inc(&cd->refs); + return cd; + } + + tbl = cd->tbl; + cfg = &tbl->pasid.cfg.arm_smmu; + + ret = idr_alloc_cyclic(&asid_idr, cd, 0, 1 << cfg->asid_bits, + GFP_ATOMIC); + if (ret < 0) + return ERR_PTR(-ENOSPC); + + /* Save the previous ASID */ + old_entry = cd->entry; + + /* + * Race with unmap; TLB invalidations will start targeting the new ASID, + * which isn't assigned yet. We'll do an invalidate-all on the old ASID + * later, so it doesn't matter. + */ + cd->entry.tag = ret; + + /* + * Update ASID and invalidate CD in all associated masters. There will + * be some overlap between use of both ASIDs, until we invalidate the + * TLB. + */ + arm_smmu_write_ctx_desc(tbl, cd->pasid, cd); + + /* Invalidate TLB entries previously associated with that context */ + iommu_pasid_flush_tlbs(&tbl->pasid, cd->pasid, &old_entry); + + idr_remove(&asid_idr, asid); + + return NULL; +} + +static struct iommu_pasid_entry * +arm_smmu_alloc_shared_cd(struct iommu_pasid_table_ops *ops, + struct mm_struct *mm) +{ + u16 asid; + u64 tcr, par, reg; + int ret = -ENOMEM; + struct arm_smmu_cd *cd; + struct arm_smmu_cd *old_cd = NULL; + struct arm_smmu_cd_tables *tbl = pasid_ops_to_tables(ops); + + asid = mm_context_get(mm); + if (!asid) + return ERR_PTR(-ESRCH); + + cd = arm_smmu_alloc_cd(tbl); + if (!cd) + goto err_put_context; + + idr_preload(GFP_KERNEL); + spin_lock(&asid_lock); + old_cd = arm_smmu_share_asid(asid); + if (!old_cd) + ret = idr_alloc(&asid_idr, cd, asid, asid + 1, GFP_ATOMIC); + spin_unlock(&asid_lock); + idr_preload_end(); + + if (!IS_ERR_OR_NULL(old_cd)) { + if (WARN_ON(old_cd->mm != mm)) { + ret = -EINVAL; + goto err_free_cd; + } + kfree(cd); + mm_context_put(mm); + return &old_cd->entry; + } else if (old_cd) { + ret = PTR_ERR(old_cd); + goto err_free_cd; + } + + tcr = TCR_T0SZ(VA_BITS) | TCR_IRGN0_WBWA | TCR_ORGN0_WBWA | + TCR_SH0_INNER | ARM_LPAE_TCR_EPD1; + + switch (PAGE_SIZE) { + case SZ_4K: + tcr |= TCR_TG0_4K; + break; + case SZ_16K: + tcr |= TCR_TG0_16K; + break; + case SZ_64K: + tcr |= TCR_TG0_64K; + break; + default: + WARN_ON(1); + ret = -EINVAL; + goto err_free_asid; + } + + reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); + par = cpuid_feature_extract_unsigned_field(reg, + ID_AA64MMFR0_PARANGE_SHIFT); + tcr |= par << ARM_LPAE_TCR_IPS_SHIFT; + tcr |= TCR_HA | TCR_HD; + + cd->ttbr = virt_to_phys(mm->pgd); + cd->tcr = tcr; + /* + * MAIR value is pretty much constant and global, so we can just get it + * from the current CPU register + */ + cd->mair = read_sysreg(mair_el1); + + cd->mm = mm; + cd->entry.tag = asid; + + return &cd->entry; + +err_free_asid: + arm_smmu_free_asid(cd); + +err_free_cd: + kfree(cd); + +err_put_context: + mm_context_put(mm); + + return ERR_PTR(ret); +} + +static struct iommu_pasid_entry * +arm_smmu_alloc_priv_cd(struct iommu_pasid_table_ops *ops, + enum io_pgtable_fmt fmt, + struct io_pgtable_cfg *cfg) +{ + int ret; + int asid; + struct arm_smmu_cd *cd; + struct arm_smmu_cd_tables *tbl = pasid_ops_to_tables(ops); + struct arm_smmu_context_cfg *ctx_cfg = &tbl->pasid.cfg.arm_smmu; + + cd = arm_smmu_alloc_cd(tbl); + if (!cd) + return ERR_PTR(-ENOMEM); + + idr_preload(GFP_KERNEL); + spin_lock(&asid_lock); + asid = idr_alloc_cyclic(&asid_idr, cd, 0, 1 << ctx_cfg->asid_bits, + GFP_ATOMIC); + cd->entry.tag = asid; + spin_unlock(&asid_lock); + idr_preload_end(); + + if (asid < 0) { + kfree(cd); + return ERR_PTR(asid); + } + + switch (fmt) { + case ARM_64_LPAE_S1: + cd->ttbr = cfg->arm_lpae_s1_cfg.ttbr[0]; + cd->tcr = cfg->arm_lpae_s1_cfg.tcr; + cd->mair = cfg->arm_lpae_s1_cfg.mair[0]; + break; + default: + pr_err("Unsupported pgtable format 0x%x\n", fmt); + ret = -EINVAL; + goto err_free_cd; + } + + return &cd->entry; + +err_free_cd: + arm_smmu_free_cd(&cd->entry); + + return ERR_PTR(ret); +} + +static int arm_smmu_set_cd(struct iommu_pasid_table_ops *ops, int pasid, + struct iommu_pasid_entry *entry) +{ + struct arm_smmu_cd_tables *tbl = pasid_ops_to_tables(ops); + struct arm_smmu_cd *cd = pasid_entry_to_cd(entry); + + if (WARN_ON(pasid > (1 << tbl->pasid.cfg.order))) + return -EINVAL; + + if (WARN_ON(cd->pasid != ARM_SMMU_NO_PASID && cd->pasid != pasid)) + return -EEXIST; + + /* + * There is a single cd structure for each address space, multiple + * devices may use the same in different tables. + */ + cd->users++; + cd->pasid = pasid; + return arm_smmu_write_ctx_desc(tbl, pasid, cd); +} + +int arm_smmu_set_cd_mpam(struct iommu_pasid_table_ops *ops, + int ssid, int partid, int pmg) +{ + struct arm_smmu_cd_tables *tbl = pasid_ops_to_tables(ops); + u64 val; + __le64 *cdptr = arm_smmu_get_cd_ptr(tbl, ssid); + + if (!cdptr) + return -ENOMEM; + + val = le64_to_cpu(cdptr[5]); + val &= ~CTXDESC_CD_5_PARTID_MASK; + val |= FIELD_PREP(CTXDESC_CD_5_PARTID_MASK, partid); + val &= ~CTXDESC_CD_5_PMG_MASK; + val |= FIELD_PREP(CTXDESC_CD_5_PMG_MASK, pmg); + WRITE_ONCE(cdptr[5], cpu_to_le64(val)); + + iommu_pasid_flush(&tbl->pasid, ssid, true); + + return 0; +} + +int arm_smmu_get_cd_mpam(struct iommu_pasid_table_ops *ops, + int ssid, int *partid, int *pmg) +{ + struct arm_smmu_cd_tables *tbl = pasid_ops_to_tables(ops); + u64 val; + __le64 *cdptr = arm_smmu_get_cd_ptr(tbl, ssid); + + if (!cdptr) + return -ENOMEM; + + val = le64_to_cpu(cdptr[5]); + *partid = FIELD_GET(CTXDESC_CD_5_PARTID_MASK, val); + *pmg = FIELD_GET(CTXDESC_CD_5_PMG_MASK, val); + + return 0; +} + +static void arm_smmu_clear_cd(struct iommu_pasid_table_ops *ops, int pasid, + struct iommu_pasid_entry *entry) +{ + struct arm_smmu_cd_tables *tbl = pasid_ops_to_tables(ops); + struct arm_smmu_cd *cd = pasid_entry_to_cd(entry); + + if (WARN_ON(pasid > (1 << tbl->pasid.cfg.order))) + return; + + WARN_ON(cd->pasid != pasid); + + if (!(--cd->users)) + cd->pasid = ARM_SMMU_NO_PASID; + + arm_smmu_write_ctx_desc(tbl, pasid, NULL); + + /* + * The ASID allocator won't broadcast the final TLB invalidations for + * this ASID, so we need to do it manually. For private contexts, + * freeing io-pgtable ops performs the invalidation. + */ + if (cd->mm) + iommu_pasid_flush_tlbs(&tbl->pasid, pasid, entry); +} + +static struct iommu_pasid_table * +arm_smmu_alloc_cd_tables(struct iommu_pasid_table_cfg *cfg, void *cookie) +{ + int ret; + size_t size = 0; + struct arm_smmu_cd_tables *tbl; + struct device *dev = cfg->iommu_dev; + struct arm_smmu_cd_table *leaf_table; + size_t num_contexts, num_leaf_entries; + + tbl = devm_kzalloc(dev, sizeof(*tbl), GFP_KERNEL); + if (!tbl) + return NULL; + + num_contexts = 1 << cfg->order; + if (num_contexts <= CTXDESC_NUM_L2_ENTRIES) { + /* Fits in a single table */ + tbl->linear = true; + num_leaf_entries = num_contexts; + leaf_table = &tbl->table; + } else { + /* + * SSID[S1CDmax-1:10] indexes 1st-level table, SSID[9:0] indexes + * 2nd-level + */ + tbl->l1.num_entries = num_contexts / CTXDESC_NUM_L2_ENTRIES; + + tbl->l1.tables = devm_kzalloc(dev, + sizeof(struct arm_smmu_cd_table) * + tbl->l1.num_entries, GFP_KERNEL); + if (!tbl->l1.tables) + goto err_free_tbl; + + size = tbl->l1.num_entries * (CTXDESC_L1_DESC_DWORD << 3); + tbl->l1.ptr = dmam_alloc_coherent(dev, size, &tbl->l1.ptr_dma, + GFP_KERNEL | __GFP_ZERO); + if (!tbl->l1.ptr) { + dev_warn(dev, "failed to allocate L1 context table\n"); + devm_kfree(dev, tbl->l1.tables); + goto err_free_tbl; + } + + num_leaf_entries = CTXDESC_NUM_L2_ENTRIES; + leaf_table = tbl->l1.tables; + } + + ret = arm_smmu_alloc_cd_leaf_table(dev, leaf_table, num_leaf_entries); + if (ret) + goto err_free_l1; + + tbl->pasid.ops = (struct iommu_pasid_table_ops) { + .alloc_priv_entry = arm_smmu_alloc_priv_cd, + .alloc_shared_entry = arm_smmu_alloc_shared_cd, + .set_entry = arm_smmu_set_cd, + .clear_entry = arm_smmu_clear_cd, + }; + + if (tbl->linear) { + cfg->base = leaf_table->ptr_dma; + cfg->arm_smmu.s1fmt = ARM_SMMU_S1FMT_LINEAR; + } else { + cfg->base = tbl->l1.ptr_dma; + cfg->arm_smmu.s1fmt = ARM_SMMU_S1FMT_64K_L2; + arm_smmu_write_cd_l1_desc(tbl->l1.ptr, leaf_table); + } + + return &tbl->pasid; + +err_free_l1: + if (!tbl->linear) { + dmam_free_coherent(dev, size, tbl->l1.ptr, tbl->l1.ptr_dma); + devm_kfree(dev, tbl->l1.tables); + } +err_free_tbl: + devm_kfree(dev, tbl); + + return NULL; +} + +static void arm_smmu_free_cd_tables(struct iommu_pasid_table *pasid_table) +{ + struct iommu_pasid_table_cfg *cfg = &pasid_table->cfg; + struct device *dev = cfg->iommu_dev; + struct arm_smmu_cd_tables *tbl = pasid_to_cd_tables(pasid_table); + + if (tbl->linear) { + arm_smmu_free_cd_leaf_table(dev, &tbl->table, 1 << cfg->order); + } else { + size_t i, size; + + for (i = 0; i < tbl->l1.num_entries; i++) { + struct arm_smmu_cd_table *table = &tbl->l1.tables[i]; + + if (!table->ptr) + continue; + + arm_smmu_free_cd_leaf_table(dev, table, + CTXDESC_NUM_L2_ENTRIES); + } + + size = tbl->l1.num_entries * (CTXDESC_L1_DESC_DWORD << 3); + dmam_free_coherent(dev, size, tbl->l1.ptr, tbl->l1.ptr_dma); + devm_kfree(dev, tbl->l1.tables); + } + + devm_kfree(dev, tbl); +} + +struct iommu_pasid_init_fns arm_smmu_v3_pasid_init_fns = { + .alloc = arm_smmu_alloc_cd_tables, + .free = arm_smmu_free_cd_tables, +}; diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 5059d09f32020a43edaebc8420b5e4ef8969b888..05cb92da6836fa5c7b50427ef22a21ce406d948c 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -24,13 +24,16 @@ #include #include #include +#include #include #include #include #include #include +#include #include #include +#include #include #include #include @@ -39,10 +42,12 @@ #include #include #include +#include +#include #include -#include "io-pgtable.h" +#include "iommu-pasid-table.h" /* MMIO registers */ #define ARM_SMMU_IDR0 0x0 @@ -63,6 +68,9 @@ #define IDR0_ASID16 (1 << 12) #define IDR0_ATS (1 << 10) #define IDR0_HYP (1 << 9) +#define IDR0_HD (1 << 7) +#define IDR0_HA (1 << 6) +#define IDR0_BTM (1 << 5) #define IDR0_COHACC (1 << 4) #define IDR0_TTF GENMASK(3, 2) #define IDR0_TTF_AARCH64 2 @@ -80,6 +88,10 @@ #define IDR1_SSIDSIZE GENMASK(10, 6) #define IDR1_SIDSIZE GENMASK(5, 0) +#define ARM_SMMU_IDR3 0xc +#define IDR3_MPAM (1 << 7) +#define ARM_SMMU_IDR3_CFG 0x140C + #define ARM_SMMU_IDR5 0x14 #define IDR5_STALL_MAX GENMASK(31, 16) #define IDR5_GRAN64K (1 << 6) @@ -141,7 +153,7 @@ #define GERROR_PRIQ_ABT_ERR (1 << 3) #define GERROR_EVTQ_ABT_ERR (1 << 2) #define GERROR_CMDQ_ERR (1 << 0) -#define GERROR_ERR_MASK 0xfd +#define GERROR_ERR_MASK 0x1fd #define ARM_SMMU_GERRORN 0x64 @@ -178,6 +190,13 @@ #define ARM_SMMU_PRIQ_IRQ_CFG1 0xd8 #define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc +#define ARM_SMMU_MPAMIDR 0x130 +#define MPAMIDR_PMG_MAX GENMASK(23, 16) +#define MPAMIDR_PARTID_MAX GENMASK(15, 0) + +#define ARM_SMMU_USER_CFG0 0xe00 +#define ARM_SMMU_USER_MPAM_EN (1UL << 30) + /* Common MSI config fields */ #define MSI_CFG0_ADDR_MASK GENMASK_ULL(51, 2) #define MSI_CFG2_SH GENMASK(5, 4) @@ -190,16 +209,18 @@ #define ARM_SMMU_MEMATTR_DEVICE_nGnRE 0x1 #define ARM_SMMU_MEMATTR_OIWB 0xf -#define Q_IDX(q, p) ((p) & ((1 << (q)->max_n_shift) - 1)) -#define Q_WRP(q, p) ((p) & (1 << (q)->max_n_shift)) -#define Q_OVERFLOW_FLAG (1 << 31) -#define Q_OVF(q, p) ((p) & Q_OVERFLOW_FLAG) +#define Q_IDX(llq, p) ((p) & ((1 << (llq)->max_n_shift) - 1)) +#define Q_WRP(llq, p) ((p) & (1 << (llq)->max_n_shift)) +#define Q_OVERFLOW_FLAG (1U << 31) +#define Q_OVF(p) ((p) & Q_OVERFLOW_FLAG) #define Q_ENT(q, p) ((q)->base + \ - Q_IDX(q, p) * (q)->ent_dwords) + Q_IDX(&((q)->llq), p) * \ + (q)->ent_dwords) #define Q_BASE_RWA (1UL << 62) #define Q_BASE_ADDR_MASK GENMASK_ULL(51, 5) #define Q_BASE_LOG2SIZE GENMASK(4, 0) +#define Q_MAX_SZ_SHIFT (PAGE_SHIFT + CONFIG_CMA_ALIGNMENT) /* * Stream table. @@ -224,10 +245,14 @@ #define STRTAB_STE_0_CFG_S2_TRANS 6 #define STRTAB_STE_0_S1FMT GENMASK_ULL(5, 4) -#define STRTAB_STE_0_S1FMT_LINEAR 0 #define STRTAB_STE_0_S1CTXPTR_MASK GENMASK_ULL(51, 6) #define STRTAB_STE_0_S1CDMAX GENMASK_ULL(63, 59) +#define STRTAB_STE_1_S1DSS GENMASK_ULL(1, 0) +#define STRTAB_STE_1_S1DSS_TERMINATE 0x0 +#define STRTAB_STE_1_S1DSS_BYPASS 0x1 +#define STRTAB_STE_1_S1DSS_SSID0 0x2 + #define STRTAB_STE_1_S1C_CACHE_NC 0UL #define STRTAB_STE_1_S1C_CACHE_WBRA 1UL #define STRTAB_STE_1_S1C_CACHE_WT 2UL @@ -236,6 +261,7 @@ #define STRTAB_STE_1_S1COR GENMASK_ULL(5, 4) #define STRTAB_STE_1_S1CSH GENMASK_ULL(7, 6) +#define STRTAB_STE_1_S1MPAM (1UL << 26) #define STRTAB_STE_1_S1STALLD (1UL << 27) #define STRTAB_STE_1_EATS GENMASK_ULL(29, 28) @@ -259,53 +285,23 @@ #define STRTAB_STE_3_S2TTB_MASK GENMASK_ULL(51, 4) -/* Context descriptor (stage-1 only) */ -#define CTXDESC_CD_DWORDS 8 -#define CTXDESC_CD_0_TCR_T0SZ GENMASK_ULL(5, 0) -#define ARM64_TCR_T0SZ GENMASK_ULL(5, 0) -#define CTXDESC_CD_0_TCR_TG0 GENMASK_ULL(7, 6) -#define ARM64_TCR_TG0 GENMASK_ULL(15, 14) -#define CTXDESC_CD_0_TCR_IRGN0 GENMASK_ULL(9, 8) -#define ARM64_TCR_IRGN0 GENMASK_ULL(9, 8) -#define CTXDESC_CD_0_TCR_ORGN0 GENMASK_ULL(11, 10) -#define ARM64_TCR_ORGN0 GENMASK_ULL(11, 10) -#define CTXDESC_CD_0_TCR_SH0 GENMASK_ULL(13, 12) -#define ARM64_TCR_SH0 GENMASK_ULL(13, 12) -#define CTXDESC_CD_0_TCR_EPD0 (1ULL << 14) -#define ARM64_TCR_EPD0 (1ULL << 7) -#define CTXDESC_CD_0_TCR_EPD1 (1ULL << 30) -#define ARM64_TCR_EPD1 (1ULL << 23) - -#define CTXDESC_CD_0_ENDI (1UL << 15) -#define CTXDESC_CD_0_V (1UL << 31) - -#define CTXDESC_CD_0_TCR_IPS GENMASK_ULL(34, 32) -#define ARM64_TCR_IPS GENMASK_ULL(34, 32) -#define CTXDESC_CD_0_TCR_TBI0 (1ULL << 38) -#define ARM64_TCR_TBI0 (1ULL << 37) - -#define CTXDESC_CD_0_AA64 (1UL << 41) -#define CTXDESC_CD_0_S (1UL << 44) -#define CTXDESC_CD_0_R (1UL << 45) -#define CTXDESC_CD_0_A (1UL << 46) -#define CTXDESC_CD_0_ASET (1UL << 47) -#define CTXDESC_CD_0_ASID GENMASK_ULL(63, 48) - -#define CTXDESC_CD_1_TTB0_MASK GENMASK_ULL(51, 4) - -/* Convert between AArch64 (CPU) TCR format and SMMU CD format */ -#define ARM_SMMU_TCR2CD(tcr, fld) FIELD_PREP(CTXDESC_CD_0_TCR_##fld, \ - FIELD_GET(ARM64_TCR_##fld, tcr)) +#define STRTAB_STE_4_PARTID_MASK GENMASK_ULL(31, 16) + +#define STRTAB_STE_5_MPAM_NS (1UL << 8) +#define STRTAB_STE_5_PMG_MASK GENMASK_ULL(7, 0) /* Command queue */ -#define CMDQ_ENT_DWORDS 2 -#define CMDQ_MAX_SZ_SHIFT 8 +#define CMDQ_ENT_SZ_SHIFT 4 +#define CMDQ_ENT_DWORDS ((1 << CMDQ_ENT_SZ_SHIFT) >> 3) +#define CMDQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - CMDQ_ENT_SZ_SHIFT) #define CMDQ_CONS_ERR GENMASK(30, 24) #define CMDQ_ERR_CERROR_NONE_IDX 0 #define CMDQ_ERR_CERROR_ILL_IDX 1 #define CMDQ_ERR_CERROR_ABT_IDX 2 +#define CMDQ_PROD_OWNED_FLAG Q_OVERFLOW_FLAG + #define CMDQ_0_OP GENMASK_ULL(7, 0) #define CMDQ_0_SSV (1UL << 11) @@ -313,6 +309,7 @@ #define CMDQ_PREFETCH_1_SIZE GENMASK_ULL(4, 0) #define CMDQ_PREFETCH_1_ADDR_MASK GENMASK_ULL(63, 12) +#define CMDQ_CFGI_0_SSID GENMASK_ULL(31, 12) #define CMDQ_CFGI_0_SID GENMASK_ULL(63, 32) #define CMDQ_CFGI_1_LEAF (1UL << 0) #define CMDQ_CFGI_1_RANGE GENMASK_ULL(4, 0) @@ -328,6 +325,11 @@ #define CMDQ_PRI_1_GRPID GENMASK_ULL(8, 0) #define CMDQ_PRI_1_RESP GENMASK_ULL(13, 12) +#define CMDQ_RESUME_0_SID GENMASK_ULL(63, 32) +#define CMDQ_RESUME_0_ACTION_RETRY (1UL << 12) +#define CMDQ_RESUME_0_ACTION_ABORT (1UL << 13) +#define CMDQ_RESUME_1_STAG GENMASK_ULL(15, 0) + #define CMDQ_SYNC_0_CS GENMASK_ULL(13, 12) #define CMDQ_SYNC_0_CS_NONE 0 #define CMDQ_SYNC_0_CS_IRQ 1 @@ -338,14 +340,34 @@ #define CMDQ_SYNC_1_MSIADDR_MASK GENMASK_ULL(51, 2) /* Event queue */ -#define EVTQ_ENT_DWORDS 4 -#define EVTQ_MAX_SZ_SHIFT 7 +#define EVTQ_ENT_SZ_SHIFT 5 +#define EVTQ_ENT_DWORDS ((1 << EVTQ_ENT_SZ_SHIFT) >> 3) +#define EVTQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - EVTQ_ENT_SZ_SHIFT) #define EVTQ_0_ID GENMASK_ULL(7, 0) +#define EVT_ID_TRANSLATION_FAULT 0x10 +#define EVT_ID_ADDR_SIZE_FAULT 0x11 +#define EVT_ID_ACCESS_FAULT 0x12 +#define EVT_ID_PERMISSION_FAULT 0x13 + +#define EVTQ_0_SSV GENMASK_ULL(11, 11) +#define EVTQ_0_SSID GENMASK_ULL(31, 12) +#define EVTQ_0_SID GENMASK_ULL(63, 32) +#define EVTQ_1_STAG GENMASK_ULL(15, 0) +#define EVTQ_1_STALL (1UL << 31) +#define EVTQ_1_PRIV (1UL << 33) +#define EVTQ_1_EXEC (1UL << 34) +#define EVTQ_1_READ (1UL << 35) +#define EVTQ_1_S2 (1UL << 39) +#define EVTQ_1_CLASS GENMASK_ULL(41, 40) +#define EVTQ_1_TT_READ (1UL << 44) +#define EVTQ_2_ADDR GENMASK_ULL(63, 0) +#define EVTQ_3_IPA GENMASK_ULL(51, 12) /* PRI queue */ -#define PRIQ_ENT_DWORDS 2 -#define PRIQ_MAX_SZ_SHIFT 8 +#define PRIQ_ENT_SZ_SHIFT 4 +#define PRIQ_ENT_DWORDS ((1 << PRIQ_ENT_SZ_SHIFT) >> 3) +#define PRIQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - PRIQ_ENT_SZ_SHIFT) #define PRIQ_0_SID GENMASK_ULL(31, 0) #define PRIQ_0_SSID GENMASK_ULL(51, 32) @@ -360,9 +382,8 @@ #define PRIQ_1_ADDR_MASK GENMASK_ULL(63, 12) /* High-level queue structures */ -#define ARM_SMMU_POLL_TIMEOUT_US 100 -#define ARM_SMMU_CMDQ_SYNC_TIMEOUT_US 1000000 /* 1s! */ -#define ARM_SMMU_CMDQ_SYNC_SPIN_COUNT 10 +#define ARM_SMMU_POLL_TIMEOUT_US 1000000 /* 1s! */ +#define ARM_SMMU_POLL_SPIN_COUNT 10 #define MSI_IOVA_BASE 0x8000000 #define MSI_IOVA_LENGTH 0x100000 @@ -372,6 +393,42 @@ module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO); MODULE_PARM_DESC(disable_bypass, "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU."); +#ifdef CONFIG_SMMU_BYPASS_DEV +struct smmu_bypass_device { + unsigned short vendor; + unsigned short device; +}; +#define MAX_CMDLINE_SMMU_BYPASS_DEV 16 + +static struct smmu_bypass_device smmu_bypass_devices[MAX_CMDLINE_SMMU_BYPASS_DEV]; +static int smmu_bypass_devices_num; + +static int __init arm_smmu_bypass_dev_setup(char *str) +{ + unsigned short vendor; + unsigned short device; + int ret; + + if (!str) + return -EINVAL; + + ret = sscanf(str, "%hx:%hx", &vendor, &device); + if (ret != 2) + return -EINVAL; + + if (smmu_bypass_devices_num >= MAX_CMDLINE_SMMU_BYPASS_DEV) + return -ERANGE; + + smmu_bypass_devices[smmu_bypass_devices_num].vendor = vendor; + smmu_bypass_devices[smmu_bypass_devices_num].device = device; + smmu_bypass_devices_num++; + + return 0; +} + +__setup("smmu.bypassdev=", arm_smmu_bypass_dev_setup); +#endif + enum pri_resp { PRI_RESP_DENY = 0, PRI_RESP_FAIL = 1, @@ -419,8 +476,11 @@ struct arm_smmu_cmdq_ent { #define CMDQ_OP_CFGI_STE 0x3 #define CMDQ_OP_CFGI_ALL 0x4 + #define CMDQ_OP_CFGI_CD 0x5 + #define CMDQ_OP_CFGI_CD_ALL 0x6 struct { u32 sid; + u32 ssid; union { bool leaf; u8 span; @@ -430,6 +490,8 @@ struct arm_smmu_cmdq_ent { #define CMDQ_OP_TLBI_NH_ASID 0x11 #define CMDQ_OP_TLBI_NH_VA 0x12 #define CMDQ_OP_TLBI_EL2_ALL 0x20 + #define CMDQ_OP_TLBI_EL2_ASID 0x21 + #define CMDQ_OP_TLBI_EL2_VA 0x22 #define CMDQ_OP_TLBI_S12_VMALL 0x28 #define CMDQ_OP_TLBI_S2_IPA 0x2a #define CMDQ_OP_TLBI_NSNH_ALL 0x30 @@ -448,15 +510,38 @@ struct arm_smmu_cmdq_ent { enum pri_resp resp; } pri; + #define CMDQ_OP_RESUME 0x44 + struct { + u32 sid; + u16 stag; + enum page_response_code resp; + } resume; + #define CMDQ_OP_CMD_SYNC 0x46 struct { - u32 msidata; u64 msiaddr; } sync; }; }; +struct arm_smmu_ll_queue { + union { + u64 val; + struct { + u32 prod; + u32 cons; + }; + struct { + atomic_t prod; + atomic_t cons; + } atomic; + u8 __pad[SMP_CACHE_BYTES]; + } ____cacheline_aligned_in_smp; + u32 max_n_shift; +}; + struct arm_smmu_queue { + struct arm_smmu_ll_queue llq; int irq; /* Wired interrupt */ __le64 *base; @@ -464,17 +549,27 @@ struct arm_smmu_queue { u64 q_base; size_t ent_dwords; - u32 max_n_shift; - u32 prod; - u32 cons; u32 __iomem *prod_reg; u32 __iomem *cons_reg; + + /* Event and PRI */ + u64 batch; + wait_queue_head_t wq; +}; + +struct arm_smmu_queue_poll { + ktime_t timeout; + unsigned int delay; + unsigned int spin_cnt; + bool wfe; }; struct arm_smmu_cmdq { struct arm_smmu_queue q; - spinlock_t lock; + atomic_long_t *valid_map; + atomic_t owner_prod; + atomic_t lock; }; struct arm_smmu_evtq { @@ -495,15 +590,9 @@ struct arm_smmu_strtab_l1_desc { }; struct arm_smmu_s1_cfg { - __le64 *cdptr; - dma_addr_t cdptr_dma; - - struct arm_smmu_ctx_desc { - u16 asid; - u64 ttbr; - u64 tcr; - u64 mair; - } cd; + struct iommu_pasid_table_cfg tables; + struct iommu_pasid_table_ops *ops; + struct iommu_pasid_entry *cd0; /* Default context */ }; struct arm_smmu_s2_cfg { @@ -523,6 +612,8 @@ struct arm_smmu_strtab_ent { bool assigned; struct arm_smmu_s1_cfg *s1_cfg; struct arm_smmu_s2_cfg *s2_cfg; + + bool can_stall; }; struct arm_smmu_strtab_cfg { @@ -555,27 +646,33 @@ struct arm_smmu_device { #define ARM_SMMU_FEAT_HYP (1 << 12) #define ARM_SMMU_FEAT_STALL_FORCE (1 << 13) #define ARM_SMMU_FEAT_VAX (1 << 14) +#define ARM_SMMU_FEAT_E2H (1 << 15) +#define ARM_SMMU_FEAT_BTM (1 << 16) +#define ARM_SMMU_FEAT_SVA (1 << 17) +#define ARM_SMMU_FEAT_HA (1 << 18) +#define ARM_SMMU_FEAT_HD (1 << 19) +#define ARM_SMMU_FEAT_MPAM (1 << 20) u32 features; #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0) #define ARM_SMMU_OPT_PAGE0_REGS_ONLY (1 << 1) +#define ARM_SMMU_OPT_MESSAGE_BASED_SPI (1 << 2) u32 options; + u64 spi_base; + struct arm_smmu_cmdq cmdq; struct arm_smmu_evtq evtq; struct arm_smmu_priq priq; int gerr_irq; int combined_irq; - atomic_t sync_nr; unsigned long ias; /* IPA */ unsigned long oas; /* PA */ unsigned long pgsize_bitmap; -#define ARM_SMMU_MAX_ASIDS (1 << 16) unsigned int asid_bits; - DECLARE_BITMAP(asid_map, ARM_SMMU_MAX_ASIDS); #define ARM_SMMU_MAX_VMIDS (1 << 16) unsigned int vmid_bits; @@ -586,16 +683,37 @@ struct arm_smmu_device { struct arm_smmu_strtab_cfg strtab_cfg; - u32 sync_count; - /* IOMMU core code handle */ struct iommu_device iommu; + + struct rb_root streams; + struct mutex streams_mutex; + + struct iopf_queue *iopf_queue; + + unsigned int mpam_partid_max; + unsigned int mpam_pmg_max; + bool bypass; +}; + +struct arm_smmu_stream { + u32 id; + struct arm_smmu_master_data *master; + struct rb_node node; }; /* SMMU private data for each master */ struct arm_smmu_master_data { struct arm_smmu_device *smmu; struct arm_smmu_strtab_ent ste; + + struct arm_smmu_domain *domain; + struct list_head list; /* domain->devices */ + struct arm_smmu_stream *streams; + + struct device *dev; + size_t ssid_bits; + bool can_fault; }; /* SMMU private data for an IOMMU domain */ @@ -611,6 +729,7 @@ struct arm_smmu_domain { struct mutex init_mutex; /* Protects smmu pointer */ struct io_pgtable_ops *pgtbl_ops; + bool non_strict; enum arm_smmu_domain_stage stage; union { @@ -619,6 +738,14 @@ struct arm_smmu_domain { }; struct iommu_domain domain; + + struct list_head devices; + spinlock_t devices_lock; +}; + +struct arm_smmu_mm { + struct io_mm io_mm; + struct iommu_pasid_entry *cd; }; struct arm_smmu_option_prop { @@ -629,6 +756,7 @@ struct arm_smmu_option_prop { static struct arm_smmu_option_prop arm_smmu_options[] = { { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" }, { ARM_SMMU_OPT_PAGE0_REGS_ONLY, "cavium,cn9900-broken-page1-regspace"}, + { ARM_SMMU_OPT_MESSAGE_BASED_SPI, "hisilicon,message-based-spi"}, { 0, NULL}, }; @@ -647,6 +775,11 @@ static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) return container_of(dom, struct arm_smmu_domain, domain); } +static struct arm_smmu_mm *to_smmu_mm(struct io_mm *io_mm) +{ + return container_of(io_mm, struct arm_smmu_mm, io_mm); +} + static void parse_driver_options(struct arm_smmu_device *smmu) { int i = 0; @@ -662,79 +795,97 @@ static void parse_driver_options(struct arm_smmu_device *smmu) } /* Low-level queue manipulation functions */ -static bool queue_full(struct arm_smmu_queue *q) +static bool queue_has_space(struct arm_smmu_ll_queue *q, u32 n) +{ + u32 space, prod, cons; + + prod = Q_IDX(q, q->prod); + cons = Q_IDX(q, q->cons); + + if (Q_WRP(q, q->prod) == Q_WRP(q, q->cons)) + space = (1 << q->max_n_shift) - (prod - cons); + else + space = cons - prod; + + return space >= n; +} + +static bool queue_full(struct arm_smmu_ll_queue *q) { return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) && Q_WRP(q, q->prod) != Q_WRP(q, q->cons); } -static bool queue_empty(struct arm_smmu_queue *q) +static bool queue_empty(struct arm_smmu_ll_queue *q) { return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) && Q_WRP(q, q->prod) == Q_WRP(q, q->cons); } -static void queue_sync_cons(struct arm_smmu_queue *q) +static bool queue_consumed(struct arm_smmu_ll_queue *q, u32 prod) { - q->cons = readl_relaxed(q->cons_reg); + return ((Q_WRP(q, q->cons) == Q_WRP(q, prod)) && + (Q_IDX(q, q->cons) > Q_IDX(q, prod))) || + ((Q_WRP(q, q->cons) != Q_WRP(q, prod)) && + (Q_IDX(q, q->cons) <= Q_IDX(q, prod))); } -static void queue_inc_cons(struct arm_smmu_queue *q) +static void queue_sync_cons_out(struct arm_smmu_queue *q) { - u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1; + /* + * Ensure that all CPU accesses (reads and writes) to the queue + * are complete before we update the cons pointer. + */ + mb(); + writel_relaxed(q->llq.cons, q->cons_reg); +} - q->cons = Q_OVF(q, q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons); - writel(q->cons, q->cons_reg); +static void queue_inc_cons(struct arm_smmu_ll_queue *q) +{ + u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1; + q->cons = Q_OVF(q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons); } -static int queue_sync_prod(struct arm_smmu_queue *q) +static int queue_sync_prod_in(struct arm_smmu_queue *q) { int ret = 0; u32 prod = readl_relaxed(q->prod_reg); - if (Q_OVF(q, prod) != Q_OVF(q, q->prod)) + if (Q_OVF(prod) != Q_OVF(q->llq.prod)) ret = -EOVERFLOW; - q->prod = prod; + q->llq.prod = prod; return ret; } -static void queue_inc_prod(struct arm_smmu_queue *q) +static u32 queue_inc_prod_n(struct arm_smmu_ll_queue *q, int n) { - u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + 1; - - q->prod = Q_OVF(q, q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod); - writel(q->prod, q->prod_reg); + u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + n; + return Q_OVF(q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod); } -/* - * Wait for the SMMU to consume items. If drain is true, wait until the queue - * is empty. Otherwise, wait until there is at least one free slot. - */ -static int queue_poll_cons(struct arm_smmu_queue *q, bool sync, bool wfe) +static void queue_poll_init(struct arm_smmu_device *smmu, + struct arm_smmu_queue_poll *qp) { - ktime_t timeout; - unsigned int delay = 1, spin_cnt = 0; - - /* Wait longer if it's a CMD_SYNC */ - timeout = ktime_add_us(ktime_get(), sync ? - ARM_SMMU_CMDQ_SYNC_TIMEOUT_US : - ARM_SMMU_POLL_TIMEOUT_US); + qp->delay = 1; + qp->spin_cnt = 0; + qp->wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV); + qp->timeout = ktime_add_us(ktime_get(), ARM_SMMU_POLL_TIMEOUT_US); +} - while (queue_sync_cons(q), (sync ? !queue_empty(q) : queue_full(q))) { - if (ktime_compare(ktime_get(), timeout) > 0) - return -ETIMEDOUT; +static int queue_poll(struct arm_smmu_queue_poll *qp) +{ + if (ktime_compare(ktime_get(), qp->timeout) > 0) + return -ETIMEDOUT; - if (wfe) { - wfe(); - } else if (++spin_cnt < ARM_SMMU_CMDQ_SYNC_SPIN_COUNT) { - cpu_relax(); - continue; - } else { - udelay(delay); - delay *= 2; - spin_cnt = 0; - } + if (qp->wfe) { + wfe(); + } else if (++qp->spin_cnt < ARM_SMMU_POLL_SPIN_COUNT) { + cpu_relax(); + } else { + udelay(qp->delay); + qp->delay *= 2; + qp->spin_cnt = 0; } return 0; @@ -748,16 +899,6 @@ static void queue_write(__le64 *dst, u64 *src, size_t n_dwords) *dst++ = cpu_to_le64(*src++); } -static int queue_insert_raw(struct arm_smmu_queue *q, u64 *ent) -{ - if (queue_full(q)) - return -ENOSPC; - - queue_write(Q_ENT(q, q->prod), ent, q->ent_dwords); - queue_inc_prod(q); - return 0; -} - static void queue_read(__le64 *dst, u64 *src, size_t n_dwords) { int i; @@ -768,18 +909,19 @@ static void queue_read(__le64 *dst, u64 *src, size_t n_dwords) static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent) { - if (queue_empty(q)) + if (queue_empty(&q->llq)) return -EAGAIN; - queue_read(ent, Q_ENT(q, q->cons), q->ent_dwords); - queue_inc_cons(q); + queue_read(ent, Q_ENT(q, q->llq.cons), q->ent_dwords); + queue_inc_cons(&q->llq); + queue_sync_cons_out(q); return 0; } /* High-level queue accessors */ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) { - memset(cmd, 0, CMDQ_ENT_DWORDS << 3); + memset(cmd, 0, 1 << CMDQ_ENT_SZ_SHIFT); cmd[0] |= FIELD_PREP(CMDQ_0_OP, ent->opcode); switch (ent->opcode) { @@ -791,15 +933,22 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) cmd[1] |= FIELD_PREP(CMDQ_PREFETCH_1_SIZE, ent->prefetch.size); cmd[1] |= ent->prefetch.addr & CMDQ_PREFETCH_1_ADDR_MASK; break; + case CMDQ_OP_CFGI_CD: + cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SSID, ent->cfgi.ssid); + /* Fallthrough */ case CMDQ_OP_CFGI_STE: cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid); cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_LEAF, ent->cfgi.leaf); break; + case CMDQ_OP_CFGI_CD_ALL: + cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid); + break; case CMDQ_OP_CFGI_ALL: /* Cover the entire SID range */ cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_RANGE, 31); break; case CMDQ_OP_TLBI_NH_VA: + case CMDQ_OP_TLBI_EL2_VA: cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid); cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf); cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK; @@ -815,6 +964,9 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) case CMDQ_OP_TLBI_S12_VMALL: cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid); break; + case CMDQ_OP_TLBI_EL2_ASID: + cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid); + break; case CMDQ_OP_PRI_RESP: cmd[0] |= FIELD_PREP(CMDQ_0_SSV, ent->substream_valid); cmd[0] |= FIELD_PREP(CMDQ_PRI_0_SSID, ent->pri.ssid); @@ -830,15 +982,30 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) } cmd[1] |= FIELD_PREP(CMDQ_PRI_1_RESP, ent->pri.resp); break; + case CMDQ_OP_RESUME: + cmd[0] |= FIELD_PREP(CMDQ_RESUME_0_SID, ent->resume.sid); + cmd[1] |= FIELD_PREP(CMDQ_RESUME_1_STAG, ent->resume.stag); + switch (ent->resume.resp) { + case IOMMU_PAGE_RESP_INVALID: + case IOMMU_PAGE_RESP_FAILURE: + cmd[0] |= CMDQ_RESUME_0_ACTION_ABORT; + break; + case IOMMU_PAGE_RESP_SUCCESS: + cmd[0] |= CMDQ_RESUME_0_ACTION_RETRY; + break; + default: + return -EINVAL; + } + break; case CMDQ_OP_CMD_SYNC: - if (ent->sync.msiaddr) + if (ent->sync.msiaddr) { cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_IRQ); - else + cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK; + } else { cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_SEV); + } cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSH, ARM_SMMU_SH_ISH); cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIATTR, ARM_SMMU_MEMATTR_OIWB); - cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIDATA, ent->sync.msidata); - cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK; break; default: return -ENOENT; @@ -847,6 +1014,28 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) return 0; } +static void arm_smmu_cmdq_build_sync_cmd(u64 *cmd, struct arm_smmu_device *smmu, + u32 prod) +{ + struct arm_smmu_queue *q = &smmu->cmdq.q; + struct arm_smmu_cmdq_ent ent = { + .opcode = CMDQ_OP_CMD_SYNC, + }; + + /* + * Beware that Hi16xx adds an extra 32 bits of goodness to its MSI + * payload, so the write will zero the entire command on that platform. + */ + if (!(smmu->options & ARM_SMMU_OPT_MESSAGE_BASED_SPI) && + smmu->features & ARM_SMMU_FEAT_MSI && + smmu->features & ARM_SMMU_FEAT_COHERENCY) { + ent.sync.msiaddr = q->base_dma + Q_IDX(&q->llq, prod) * + q->ent_dwords * 8; + } + + arm_smmu_cmdq_build_cmd(cmd, &ent); +} + static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu) { static const char *cerror_str[] = { @@ -896,206 +1085,527 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu) queue_write(Q_ENT(q, cons), cmd, q->ent_dwords); } -static void arm_smmu_cmdq_insert_cmd(struct arm_smmu_device *smmu, u64 *cmd) +/* + * Command queue locking. + * This is a form of bastardised rwlock with the following major changes: + * + * - The only LOCK routines are exclusive_trylock() and shared_lock(). + * Neither have barrier semantics, and instead provide only a control + * dependency. + * + * - The UNLOCK routines are supplemented with shared_tryunlock(), which + * fails if the caller appears to be the last lock holder (yes, this is + * racy). All successful UNLOCK routines have RELEASE semantics. + */ +static void arm_smmu_cmdq_shared_lock(struct arm_smmu_cmdq *cmdq) { - struct arm_smmu_queue *q = &smmu->cmdq.q; - bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV); + int val; - while (queue_insert_raw(q, cmd) == -ENOSPC) { - if (queue_poll_cons(q, false, wfe)) - dev_err_ratelimited(smmu->dev, "CMDQ timeout\n"); - } + /* + * We can try to avoid the cmpxchg() loop by simply incrementing the + * lock counter. When held in exclusive state, the lock counter is set + * to INT_MIN so these increments won't hurt as the value will remain + * negative. + */ + if (atomic_fetch_inc_relaxed(&cmdq->lock) >= 0) + return; + + do { + val = atomic_cond_read_relaxed(&cmdq->lock, VAL >= 0); + } while (atomic_cmpxchg_relaxed(&cmdq->lock, val, val + 1) != val); } -static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, - struct arm_smmu_cmdq_ent *ent) +static void arm_smmu_cmdq_shared_unlock(struct arm_smmu_cmdq *cmdq) { - u64 cmd[CMDQ_ENT_DWORDS]; - unsigned long flags; + (void)atomic_dec_return_release(&cmdq->lock); +} - if (arm_smmu_cmdq_build_cmd(cmd, ent)) { - dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n", - ent->opcode); - return; - } +static bool arm_smmu_cmdq_shared_tryunlock(struct arm_smmu_cmdq *cmdq) +{ + if (atomic_read(&cmdq->lock) == 1) + return false; - spin_lock_irqsave(&smmu->cmdq.lock, flags); - arm_smmu_cmdq_insert_cmd(smmu, cmd); - spin_unlock_irqrestore(&smmu->cmdq.lock, flags); + arm_smmu_cmdq_shared_unlock(cmdq); + return true; } -/* - * The difference between val and sync_idx is bounded by the maximum size of - * a queue at 2^20 entries, so 32 bits is plenty for wrap-safe arithmetic. - */ -static int __arm_smmu_sync_poll_msi(struct arm_smmu_device *smmu, u32 sync_idx) -{ - ktime_t timeout; - u32 val; +#define arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags) \ +({ \ + bool __ret; \ + local_irq_save(flags); \ + __ret = !atomic_cmpxchg_relaxed(&cmdq->lock, 0, INT_MIN); \ + if (!__ret) \ + local_irq_restore(flags); \ + __ret; \ +}) - timeout = ktime_add_us(ktime_get(), ARM_SMMU_CMDQ_SYNC_TIMEOUT_US); - val = smp_cond_load_acquire(&smmu->sync_count, - (int)(VAL - sync_idx) >= 0 || - !ktime_before(ktime_get(), timeout)); +#define arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags) \ +({ \ + atomic_set_release(&cmdq->lock, 0); \ + local_irq_restore(flags); \ +}) - return (int)(val - sync_idx) < 0 ? -ETIMEDOUT : 0; -} -static int __arm_smmu_cmdq_issue_sync_msi(struct arm_smmu_device *smmu) +/* + * Command queue insertion. + * This is made fiddly by our attempts to achieve some sort of scalability + * since there is one queue shared amongst all of the CPUs in the system. If + * you like mixed-size concurrency, dependency ordering and relaxed atomics, + * then you'll *love* this monstrosity. + * + * The basic idea is to split the queue up into ranges of commands that are + * owned by a given CPU; the owner may not have written all of the commands + * itself, but is responsible for advancing the hardware prod pointer when + * the time comes. The algorithm is roughly: + * + * 1. Allocate some space in the queue. At this point we also discover + * whether the head of the queue is currently owned by another CPU, + * or whether we are the owner. + * + * 2. Write our commands into our allocated slots in the queue. + * + * 3. Mark our slots as valid in arm_smmu_cmdq.valid_map. + * + * 4. If we are an owner: + * a. Wait for the previous owner to finish. + * b. Mark the queue head as unowned, which tells us the range + * that we are responsible for publishing. + * c. Wait for all commands in our owned range to become valid. + * d. Advance the hardware prod pointer. + * e. Tell the next owner we've finished. + * + * 5. If we are inserting a CMD_SYNC (we may or may not have been an + * owner), then we need to stick around until it has completed: + * a. If we have MSIs, the SMMU can write back into the CMD_SYNC + * to clear the first 4 bytes. + * b. Otherwise, we spin waiting for the hardware cons pointer to + * advance past our command. + * + * The devil is in the details, particularly the use of locking for handling + * SYNC completion and freeing up space in the queue before we think that it is + * full. + */ +static void __arm_smmu_cmdq_poll_set_valid_map(struct arm_smmu_cmdq *cmdq, + u32 sprod, u32 eprod, bool set) { - u64 cmd[CMDQ_ENT_DWORDS]; - unsigned long flags; - struct arm_smmu_cmdq_ent ent = { - .opcode = CMDQ_OP_CMD_SYNC, - .sync = { - .msidata = atomic_inc_return_relaxed(&smmu->sync_nr), - .msiaddr = virt_to_phys(&smmu->sync_count), - }, + u32 swidx, sbidx, ewidx, ebidx; + struct arm_smmu_ll_queue llq = { + .max_n_shift = cmdq->q.llq.max_n_shift, + .prod = sprod, }; - arm_smmu_cmdq_build_cmd(cmd, &ent); + ewidx = BIT_WORD(Q_IDX(&llq, eprod)); + ebidx = Q_IDX(&llq, eprod) % BITS_PER_LONG; - spin_lock_irqsave(&smmu->cmdq.lock, flags); - arm_smmu_cmdq_insert_cmd(smmu, cmd); - spin_unlock_irqrestore(&smmu->cmdq.lock, flags); + while (llq.prod != eprod) { + unsigned long mask; + atomic_long_t *ptr; + u32 limit = BITS_PER_LONG; - return __arm_smmu_sync_poll_msi(smmu, ent.sync.msidata); -} + swidx = BIT_WORD(Q_IDX(&llq, llq.prod)); + sbidx = Q_IDX(&llq, llq.prod) % BITS_PER_LONG; -static int __arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu) -{ - u64 cmd[CMDQ_ENT_DWORDS]; - unsigned long flags; - bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV); - struct arm_smmu_cmdq_ent ent = { .opcode = CMDQ_OP_CMD_SYNC }; - int ret; + ptr = &cmdq->valid_map[swidx]; - arm_smmu_cmdq_build_cmd(cmd, &ent); + if ((swidx == ewidx) && (sbidx < ebidx)) + limit = ebidx; - spin_lock_irqsave(&smmu->cmdq.lock, flags); - arm_smmu_cmdq_insert_cmd(smmu, cmd); - ret = queue_poll_cons(&smmu->cmdq.q, true, wfe); - spin_unlock_irqrestore(&smmu->cmdq.lock, flags); + mask = GENMASK(limit - 1, sbidx); - return ret; + /* + * The valid bit is the inverse of the wrap bit. This means + * that a zero-initialised queue is invalid and, after marking + * all entries as valid, they become invalid again when we + * wrap. + */ + if (set) { + atomic_long_xor(mask, ptr); + } else { /* Poll */ + unsigned long valid; + + valid = (ULONG_MAX + !!Q_WRP(&llq, llq.prod)) & mask; + atomic_long_cond_read_relaxed(ptr, (VAL & mask) == valid); + } + + llq.prod = queue_inc_prod_n(&llq, limit - sbidx); + } } -static void arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu) +/* Mark all entries in the range [sprod, eprod) as valid */ +static void arm_smmu_cmdq_set_valid_map(struct arm_smmu_cmdq *cmdq, + u32 sprod, u32 eprod) { - int ret; - bool msi = (smmu->features & ARM_SMMU_FEAT_MSI) && - (smmu->features & ARM_SMMU_FEAT_COHERENCY); + __arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, true); +} - ret = msi ? __arm_smmu_cmdq_issue_sync_msi(smmu) - : __arm_smmu_cmdq_issue_sync(smmu); - if (ret) - dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n"); +/* Wait for all entries in the range [sprod, eprod) to become valid */ +static void arm_smmu_cmdq_poll_valid_map(struct arm_smmu_cmdq *cmdq, + u32 sprod, u32 eprod) +{ + __arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, false); } -/* Context descriptor manipulation functions */ -static u64 arm_smmu_cpu_tcr_to_cd(u64 tcr) +/* Wait for the command queue to become non-full */ +static int arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu, + struct arm_smmu_ll_queue *llq) { - u64 val = 0; + unsigned long flags; + struct arm_smmu_queue_poll qp; + struct arm_smmu_cmdq *cmdq = &smmu->cmdq; + int ret = 0; + + /* + * Try to update our copy of cons by grabbing exclusive cmdq access. If + * that fails, spin until somebody else updates it for us. + */ + if (arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags)) { + WRITE_ONCE(cmdq->q.llq.cons, readl_relaxed(cmdq->q.cons_reg)); + arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags); + llq->val = READ_ONCE(cmdq->q.llq.val); + return 0; + } - /* Repack the TCR. Just care about TTBR0 for now */ - val |= ARM_SMMU_TCR2CD(tcr, T0SZ); - val |= ARM_SMMU_TCR2CD(tcr, TG0); - val |= ARM_SMMU_TCR2CD(tcr, IRGN0); - val |= ARM_SMMU_TCR2CD(tcr, ORGN0); - val |= ARM_SMMU_TCR2CD(tcr, SH0); - val |= ARM_SMMU_TCR2CD(tcr, EPD0); - val |= ARM_SMMU_TCR2CD(tcr, EPD1); - val |= ARM_SMMU_TCR2CD(tcr, IPS); - val |= ARM_SMMU_TCR2CD(tcr, TBI0); + queue_poll_init(smmu, &qp); + do { + llq->val = READ_ONCE(smmu->cmdq.q.llq.val); + if (!queue_full(llq)) + break; - return val; + ret = queue_poll(&qp); + } while (!ret); + + return ret; } -static void arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu, - struct arm_smmu_s1_cfg *cfg) +/* + * Wait until the SMMU signals a CMD_SYNC completion MSI. + * Must be called with the cmdq lock held in some capacity. + */ +static int __arm_smmu_cmdq_poll_until_msi(struct arm_smmu_device *smmu, + struct arm_smmu_ll_queue *llq) { - u64 val; + int ret = 0; + struct arm_smmu_queue_poll qp; + struct arm_smmu_cmdq *cmdq = &smmu->cmdq; + u32 *cmd = (u32 *)(Q_ENT(&cmdq->q, llq->prod)); + + queue_poll_init(smmu, &qp); /* - * We don't need to issue any invalidation here, as we'll invalidate - * the STE when installing the new entry anyway. + * The MSI won't generate an event, since it's being written back + * into the command queue. */ - val = arm_smmu_cpu_tcr_to_cd(cfg->cd.tcr) | -#ifdef __BIG_ENDIAN - CTXDESC_CD_0_ENDI | -#endif - CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET | - CTXDESC_CD_0_AA64 | FIELD_PREP(CTXDESC_CD_0_ASID, cfg->cd.asid) | - CTXDESC_CD_0_V; + qp.wfe = false; + smp_cond_load_relaxed(cmd, !VAL || (ret = queue_poll(&qp))); + llq->cons = ret ? llq->prod : queue_inc_prod_n(llq, 1); + return ret; +} + +/* + * Wait until the SMMU cons index passes llq->prod. + * Must be called with the cmdq lock held in some capacity. + */ +static int __arm_smmu_cmdq_poll_until_consumed(struct arm_smmu_device *smmu, + struct arm_smmu_ll_queue *llq) +{ + struct arm_smmu_queue_poll qp; + struct arm_smmu_cmdq *cmdq = &smmu->cmdq; + u32 prod = llq->prod; + int ret = 0; - /* STALL_MODEL==0b10 && CD.S==0 is ILLEGAL */ - if (smmu->features & ARM_SMMU_FEAT_STALL_FORCE) - val |= CTXDESC_CD_0_S; + queue_poll_init(smmu, &qp); + llq->val = READ_ONCE(smmu->cmdq.q.llq.val); + do { + if (queue_consumed(llq, prod)) + break; - cfg->cdptr[0] = cpu_to_le64(val); + ret = queue_poll(&qp); - val = cfg->cd.ttbr & CTXDESC_CD_1_TTB0_MASK; - cfg->cdptr[1] = cpu_to_le64(val); + /* + * This needs to be a readl() so that our subsequent call + * to arm_smmu_cmdq_shared_tryunlock() can fail accurately. + * + * Specifically, we need to ensure that we observe all + * shared_lock()s by other CMD_SYNCs that share our owner, + * so that a failing call to tryunlock() means that we're + * the last one out and therefore we can safely advance + * cmdq->q.llq.cons. Roughly speaking: + * + * CPU 0 CPU1 CPU2 (us) + * + * if (sync) + * shared_lock(); + * + * dma_wmb(); + * set_valid_map(); + * + * if (owner) { + * poll_valid_map(); + * + * writel(prod_reg); + * + * readl(cons_reg); + * tryunlock(); + * + * Requires us to see CPU 0's shared_lock() acquisition. + */ + llq->cons = readl(cmdq->q.cons_reg); + } while (!ret); - cfg->cdptr[3] = cpu_to_le64(cfg->cd.mair); + return ret; } -/* Stream table manipulation functions */ -static void -arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc) +static int arm_smmu_cmdq_poll_until_sync(struct arm_smmu_device *smmu, + struct arm_smmu_ll_queue *llq) { - u64 val = 0; - - val |= FIELD_PREP(STRTAB_L1_DESC_SPAN, desc->span); - val |= desc->l2ptr_dma & STRTAB_L1_DESC_L2PTR_MASK; + if (!(smmu->options & ARM_SMMU_OPT_MESSAGE_BASED_SPI) && + smmu->features & ARM_SMMU_FEAT_MSI && + smmu->features & ARM_SMMU_FEAT_COHERENCY) + return __arm_smmu_cmdq_poll_until_msi(smmu, llq); - *dst = cpu_to_le64(val); + return __arm_smmu_cmdq_poll_until_consumed(smmu, llq); } -static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid) +static void arm_smmu_cmdq_write_entries(struct arm_smmu_cmdq *cmdq, u64 *cmds, + u32 prod, int n) { - struct arm_smmu_cmdq_ent cmd = { - .opcode = CMDQ_OP_CFGI_STE, - .cfgi = { - .sid = sid, - .leaf = true, - }, + int i; + struct arm_smmu_ll_queue llq = { + .max_n_shift = cmdq->q.llq.max_n_shift, + .prod = prod, }; - arm_smmu_cmdq_issue_cmd(smmu, &cmd); - arm_smmu_cmdq_issue_sync(smmu); + for (i = 0; i < n; ++i) { + u64 *cmd = &cmds[i * CMDQ_ENT_DWORDS]; + + prod = queue_inc_prod_n(&llq, i); + queue_write(Q_ENT(&cmdq->q, prod), cmd, CMDQ_ENT_DWORDS); + } } -static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, - __le64 *dst, struct arm_smmu_strtab_ent *ste) +static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu, + u64 *cmds, int n, bool sync) { + u64 cmd_sync[CMDQ_ENT_DWORDS]; + u32 prod; + unsigned long flags; + bool owner; + struct arm_smmu_cmdq *cmdq = &smmu->cmdq; + struct arm_smmu_ll_queue llq = { + .max_n_shift = cmdq->q.llq.max_n_shift, + }, head = llq; + int ret = 0; + + /* 1. Allocate some space in the queue */ + local_irq_save(flags); + llq.val = READ_ONCE(cmdq->q.llq.val); + do { + u64 old; + + while (!queue_has_space(&llq, n + sync)) { + local_irq_restore(flags); + if (arm_smmu_cmdq_poll_until_not_full(smmu, &llq)) + dev_err_ratelimited(smmu->dev, "CMDQ timeout\n"); + local_irq_save(flags); + } + + head.cons = llq.cons; + head.prod = queue_inc_prod_n(&llq, n + sync) | + CMDQ_PROD_OWNED_FLAG; + + old = cmpxchg_relaxed(&cmdq->q.llq.val, llq.val, head.val); + if (old == llq.val) + break; + + llq.val = old; + } while (1); + owner = !(llq.prod & CMDQ_PROD_OWNED_FLAG); + head.prod &= ~CMDQ_PROD_OWNED_FLAG; + llq.prod &= ~CMDQ_PROD_OWNED_FLAG; + /* - * This is hideously complicated, but we only really care about - * three cases at the moment: - * - * 1. Invalid (all zero) -> bypass/fault (init) - * 2. Bypass/fault -> translation/bypass (attach) - * 3. Translation/bypass -> bypass/fault (detach) - * - * Given that we can't update the STE atomically and the SMMU - * doesn't read the thing in a defined order, that leaves us - * with the following maintenance requirements: - * - * 1. Update Config, return (init time STEs aren't live) - * 2. Write everything apart from dword 0, sync, write dword 0, sync - * 3. Update Config, sync + * 2. Write our commands into the queue + * Dependency ordering from the cmpxchg() loop above. */ - u64 val = le64_to_cpu(dst[0]); - bool ste_live = false; - struct arm_smmu_cmdq_ent prefetch_cmd = { - .opcode = CMDQ_OP_PREFETCH_CFG, - .prefetch = { - .sid = sid, - }, - }; + arm_smmu_cmdq_write_entries(cmdq, cmds, llq.prod, n); + if (sync) { + prod = queue_inc_prod_n(&llq, n); + arm_smmu_cmdq_build_sync_cmd(cmd_sync, smmu, prod); + queue_write(Q_ENT(&cmdq->q, prod), cmd_sync, CMDQ_ENT_DWORDS); - if (val & STRTAB_STE_0_V) { - switch (FIELD_GET(STRTAB_STE_0_CFG, val)) { + /* + * In order to determine completion of our CMD_SYNC, we must + * ensure that the queue can't wrap twice without us noticing. + * We achieve that by taking the cmdq lock as shared before + * marking our slot as valid. + */ + arm_smmu_cmdq_shared_lock(cmdq); + } + + /* 3. Mark our slots as valid, ensuring commands are visible first */ + dma_wmb(); + arm_smmu_cmdq_set_valid_map(cmdq, llq.prod, head.prod); + + /* 4. If we are the owner, take control of the SMMU hardware */ + if (owner) { + /* a. Wait for previous owner to finish */ + atomic_cond_read_relaxed(&cmdq->owner_prod, VAL == llq.prod); + + /* b. Stop gathering work by clearing the owned flag */ + prod = atomic_fetch_andnot_relaxed(CMDQ_PROD_OWNED_FLAG, + &cmdq->q.llq.atomic.prod); + prod &= ~CMDQ_PROD_OWNED_FLAG; + + /* + * c. Wait for any gathered work to be written to the queue. + * Note that we read our own entries so that we have the control + * dependency required by (d). + */ + arm_smmu_cmdq_poll_valid_map(cmdq, llq.prod, prod); + + /* + * d. Advance the hardware prod pointer + * Control dependency ordering from the entries becoming valid. + */ + writel_relaxed(prod, cmdq->q.prod_reg); + + /* + * e. Tell the next owner we're done + * Make sure we've updated the hardware first, so that we don't + * race to update prod and potentially move it backwards. + */ + atomic_set_release(&cmdq->owner_prod, prod); + } + + /* 5. If we are inserting a CMD_SYNC, we must wait for it to complete */ + if (sync) { + llq.prod = queue_inc_prod_n(&llq, n); + ret = arm_smmu_cmdq_poll_until_sync(smmu, &llq); + if (ret) { + dev_err_ratelimited(smmu->dev, + "CMD_SYNC timeout at 0x%08x [hwprod 0x%08x, hwcons 0x%08x]\n", + llq.prod, + readl_relaxed(cmdq->q.prod_reg), + readl_relaxed(cmdq->q.cons_reg)); + } + + /* + * Try to unlock the cmq lock. This will fail if we're the last + * reader, in which case we can safely update cmdq->q.llq.cons + */ + if (!arm_smmu_cmdq_shared_tryunlock(cmdq)) { + WRITE_ONCE(cmdq->q.llq.cons, llq.cons); + arm_smmu_cmdq_shared_unlock(cmdq); + } + } + + local_irq_restore(flags); + return ret; +} + +static int arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, + struct arm_smmu_cmdq_ent *ent) +{ + u64 cmd[CMDQ_ENT_DWORDS]; + + if (arm_smmu_cmdq_build_cmd(cmd, ent)) { + dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n", + ent->opcode); + return -EINVAL; + } + + return arm_smmu_cmdq_issue_cmdlist(smmu, cmd, 1, false); +} + +static int arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu) +{ + return arm_smmu_cmdq_issue_cmdlist(smmu, NULL, 0, true); +} + +static int arm_smmu_page_response(struct device *dev, + struct page_response_msg *resp) +{ + int sid = dev->iommu_fwspec->ids[0]; + struct arm_smmu_cmdq_ent cmd = {0}; + struct arm_smmu_master_data *master = dev->iommu_fwspec->iommu_priv; + + if (master->ste.can_stall) { + cmd.opcode = CMDQ_OP_RESUME; + cmd.resume.sid = sid; + cmd.resume.stag = resp->page_req_group_id; + cmd.resume.resp = resp->resp_code; + } else { + /* TODO: put PRI response here */ + return -ENODEV; + } + + arm_smmu_cmdq_issue_cmd(master->smmu, &cmd); + /* + * Don't send a SYNC, it doesn't do anything for RESUME or PRI_RESP. + * RESUME consumption guarantees that the stalled transaction will be + * terminated... at some point in the future. PRI_RESP is fire and + * forget. + */ + + return 0; +} + +/* Stream table manipulation functions */ +static void +arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc) +{ + u64 val = 0; + + val |= FIELD_PREP(STRTAB_L1_DESC_SPAN, desc->span); + val |= desc->l2ptr_dma & STRTAB_L1_DESC_L2PTR_MASK; + + *dst = cpu_to_le64(val); +} + +static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid) +{ + struct arm_smmu_cmdq_ent cmd = { + .opcode = CMDQ_OP_CFGI_STE, + .cfgi = { + .sid = sid, + .leaf = true, + }, + }; + + arm_smmu_cmdq_issue_cmd(smmu, &cmd); + arm_smmu_cmdq_issue_sync(smmu); +} + +static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, + __le64 *dst, struct arm_smmu_strtab_ent *ste) +{ + /* + * This is hideously complicated, but we only really care about + * three cases at the moment: + * + * 1. Invalid (all zero) -> bypass/fault (init) + * 2. Bypass/fault -> translation/bypass (attach) + * 3. Translation/bypass -> bypass/fault (detach) + * + * Given that we can't update the STE atomically and the SMMU + * doesn't read the thing in a defined order, that leaves us + * with the following maintenance requirements: + * + * 1. Update Config, return (init time STEs aren't live) + * 2. Write everything apart from dword 0, sync, write dword 0, sync + * 3. Update Config, sync + */ + u64 val = le64_to_cpu(dst[0]); + bool ste_live = false; + struct arm_smmu_cmdq_ent prefetch_cmd = { + .opcode = CMDQ_OP_PREFETCH_CFG, + .prefetch = { + .sid = sid, + }, + }; + + if (val & STRTAB_STE_0_V) { + switch (FIELD_GET(STRTAB_STE_0_CFG, val)) { case STRTAB_STE_0_CFG_BYPASS: break; case STRTAB_STE_0_CFG_S1_TRANS: @@ -1103,8 +1613,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, ste_live = true; break; case STRTAB_STE_0_CFG_ABORT: - if (disable_bypass) - break; + BUG_ON(!disable_bypass); + break; default: BUG(); /* STE corruption */ } @@ -1134,22 +1644,30 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, } if (ste->s1_cfg) { + struct iommu_pasid_table_cfg *cfg = &ste->s1_cfg->tables; + int strw = smmu->features & ARM_SMMU_FEAT_E2H ? + STRTAB_STE_1_STRW_EL2 : STRTAB_STE_1_STRW_NSEL1; + BUG_ON(ste_live); dst[1] = cpu_to_le64( + FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) | FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) | FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) | FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) | #ifdef CONFIG_PCI_ATS FIELD_PREP(STRTAB_STE_1_EATS, STRTAB_STE_1_EATS_TRANS) | #endif - FIELD_PREP(STRTAB_STE_1_STRW, STRTAB_STE_1_STRW_NSEL1)); + FIELD_PREP(STRTAB_STE_1_STRW, strw)); if (smmu->features & ARM_SMMU_FEAT_STALLS && - !(smmu->features & ARM_SMMU_FEAT_STALL_FORCE)) + !(smmu->features & ARM_SMMU_FEAT_STALL_FORCE) && + !ste->can_stall) dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD); - val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK) | - FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS); + val |= (ste->s1_cfg->tables.base & STRTAB_STE_0_S1CTXPTR_MASK) | + FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) | + FIELD_PREP(STRTAB_STE_0_S1CDMAX, cfg->order) | + FIELD_PREP(STRTAB_STE_0_S1FMT, cfg->arm_smmu.s1fmt); } if (ste->s2_cfg) { @@ -1169,7 +1687,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, } arm_smmu_sync_ste_for_sid(smmu, sid); - dst[0] = cpu_to_le64(val); + /* See comment in arm_smmu_write_ctx_desc() */ + WRITE_ONCE(dst[0], cpu_to_le64(val)); arm_smmu_sync_ste_for_sid(smmu, sid); /* It's likely that we'll want to use the new STE soon */ @@ -1216,18 +1735,142 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid) return 0; } +static struct arm_smmu_master_data * +arm_smmu_find_master(struct arm_smmu_device *smmu, u32 sid) +{ + struct rb_node *node; + struct arm_smmu_stream *stream; + struct arm_smmu_master_data *master = NULL; + + lockdep_assert_held(&smmu->streams_mutex); + + node = smmu->streams.rb_node; + while (node) { + stream = rb_entry(node, struct arm_smmu_stream, node); + if (stream->id < sid) { + node = node->rb_right; + } else if (stream->id > sid) { + node = node->rb_left; + } else { + master = stream->master; + break; + } + } + + return master; +} + +static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt) +{ + int ret; + struct arm_smmu_master_data *master; + u8 type = FIELD_GET(EVTQ_0_ID, evt[0]); + u32 sid = FIELD_GET(EVTQ_0_SID, evt[0]); + + struct iommu_fault_event fault = { + .page_req_group_id = FIELD_GET(EVTQ_1_STAG, evt[1]), + .addr = FIELD_GET(EVTQ_2_ADDR, evt[2]), + .last_req = true, + }; + + switch (type) { + case EVT_ID_TRANSLATION_FAULT: + case EVT_ID_ADDR_SIZE_FAULT: + case EVT_ID_ACCESS_FAULT: + fault.reason = IOMMU_FAULT_REASON_PTE_FETCH; + break; + case EVT_ID_PERMISSION_FAULT: + fault.reason = IOMMU_FAULT_REASON_PERMISSION; + break; + default: + /* TODO: report other unrecoverable faults. */ + return -EFAULT; + } + + /* Stage-2 is always pinned at the moment */ + if (evt[1] & EVTQ_1_S2) + return -EFAULT; + + mutex_lock(&smmu->streams_mutex); + master = arm_smmu_find_master(smmu, sid); + if (!master) { + ret = -EINVAL; + goto out_unlock; + } + + /* + * The domain is valid until the fault returns, because detach() flushes + * the fault queue. + */ + if (evt[1] & EVTQ_1_STALL) + fault.type = IOMMU_FAULT_PAGE_REQ; + else + fault.type = IOMMU_FAULT_DMA_UNRECOV; + + if (evt[1] & EVTQ_1_READ) + fault.prot |= IOMMU_FAULT_READ; + else + fault.prot |= IOMMU_FAULT_WRITE; + + if (evt[1] & EVTQ_1_EXEC) + fault.prot |= IOMMU_FAULT_EXEC; + + if (evt[1] & EVTQ_1_PRIV) + fault.prot |= IOMMU_FAULT_PRIV; + + if (evt[0] & EVTQ_0_SSV) { + fault.pasid_valid = true; + fault.pasid = FIELD_GET(EVTQ_0_SSID, evt[0]); + } + + ret = iommu_report_device_fault(master->dev, &fault); + if (ret && fault.type == IOMMU_FAULT_PAGE_REQ) { + /* Nobody cared, abort the access */ + struct page_response_msg resp = { + .addr = fault.addr, + .pasid = fault.pasid, + .pasid_present = fault.pasid_valid, + .page_req_group_id = fault.page_req_group_id, + .resp_code = IOMMU_PAGE_RESP_FAILURE, + }; + arm_smmu_page_response(master->dev, &resp); + } + +out_unlock: + mutex_unlock(&smmu->streams_mutex); + return ret; +} + /* IRQ and event handlers */ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev) { - int i; + int i, ret; + int num_handled = 0; struct arm_smmu_device *smmu = dev; struct arm_smmu_queue *q = &smmu->evtq.q; + struct arm_smmu_ll_queue *llq = &q->llq; + size_t queue_size = 1 << q->llq.max_n_shift; u64 evt[EVTQ_ENT_DWORDS]; + spin_lock(&q->wq.lock); do { while (!queue_remove_raw(q, evt)) { u8 id = FIELD_GET(EVTQ_0_ID, evt[0]); + spin_unlock(&q->wq.lock); + cond_resched(); + ret = arm_smmu_handle_evt(smmu, evt); + spin_lock(&q->wq.lock); + + if (++num_handled == queue_size) { + q->batch++; + wake_up_all_locked(&q->wq); + num_handled = 0; + } + + if (!ret) + continue; + dev_info(smmu->dev, "event 0x%02x received:\n", id); for (i = 0; i < ARRAY_SIZE(evt); ++i) dev_info(smmu->dev, "\t0x%016llx\n", @@ -1239,12 +1882,18 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev) * Not much we can do on overflow, so scream and pretend we're * trying harder. */ - if (queue_sync_prod(q) == -EOVERFLOW) + if (queue_sync_prod_in(q) == -EOVERFLOW) dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n"); - } while (!queue_empty(q)); + } while (!queue_empty(llq)); /* Sync our overflow flag, as we believe we're up to speed */ - q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons); + llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) | + Q_IDX(llq, llq->cons); + + q->batch++; + wake_up_all_locked(&q->wq); + spin_unlock(&q->wq.lock); + return IRQ_HANDLED; } @@ -1288,24 +1937,91 @@ static void arm_smmu_handle_ppr(struct arm_smmu_device *smmu, u64 *evt) static irqreturn_t arm_smmu_priq_thread(int irq, void *dev) { + int num_handled = 0; struct arm_smmu_device *smmu = dev; struct arm_smmu_queue *q = &smmu->priq.q; + struct arm_smmu_ll_queue *llq = &q->llq; + size_t queue_size = 1 << q->llq.max_n_shift; u64 evt[PRIQ_ENT_DWORDS]; + spin_lock(&q->wq.lock); do { - while (!queue_remove_raw(q, evt)) + while (!queue_remove_raw(q, evt)) { + spin_unlock(&q->wq.lock); arm_smmu_handle_ppr(smmu, evt); + spin_lock(&q->wq.lock); + if (++num_handled == queue_size) { + q->batch++; + wake_up_all_locked(&q->wq); + num_handled = 0; + } + } - if (queue_sync_prod(q) == -EOVERFLOW) + if (queue_sync_prod_in(q) == -EOVERFLOW) dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n"); - } while (!queue_empty(q)); + } while (!queue_empty(llq)); /* Sync our overflow flag, as we believe we're up to speed */ - q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons); - writel(q->cons, q->cons_reg); + llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) | + Q_IDX(llq, llq->cons); + queue_sync_cons_out(q); + + q->batch++; + wake_up_all_locked(&q->wq); + spin_unlock(&q->wq.lock); + return IRQ_HANDLED; } +/* + * arm_smmu_flush_queue - wait until all events/PPRs currently in the queue have + * been consumed. + * + * Wait until the queue thread finished a batch, or until the queue is empty. + * Note that we don't handle overflows on q->batch. If it occurs, just wait for + * the queue to be empty. + */ +static int arm_smmu_flush_queue(struct arm_smmu_device *smmu, + struct arm_smmu_queue *q, const char *name) +{ + int ret; + u64 batch; + + spin_lock(&q->wq.lock); + if (queue_sync_prod_in(q) == -EOVERFLOW) + dev_err(smmu->dev, "%s overflow detected -- requests lost\n", + name); + + batch = q->batch; + ret = wait_event_interruptible_locked(q->wq, queue_empty(&q->llq) || + q->batch >= batch + 2); + spin_unlock(&q->wq.lock); + + return ret; +} + +static int arm_smmu_flush_queues(void *cookie, struct device *dev) +{ + struct arm_smmu_master_data *master; + struct arm_smmu_device *smmu = cookie; + + if (dev) { + master = dev->iommu_fwspec->iommu_priv; + if (master->ste.can_stall) + arm_smmu_flush_queue(smmu, &smmu->evtq.q, "evtq"); + /* TODO: add support for PRI */ + return 0; + } + + /* No target device, flush all queues. */ + if (smmu->features & ARM_SMMU_FEAT_STALLS) + arm_smmu_flush_queue(smmu, &smmu->evtq.q, "evtq"); + if (smmu->features & ARM_SMMU_FEAT_PRI) + arm_smmu_flush_queue(smmu, &smmu->priq.q, "priq"); + + return 0; +} + static int arm_smmu_device_disable(struct arm_smmu_device *smmu); static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev) @@ -1390,14 +2106,24 @@ static void arm_smmu_tlb_inv_context(void *cookie) struct arm_smmu_cmdq_ent cmd; if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { - cmd.opcode = CMDQ_OP_TLBI_NH_ASID; - cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid; + if (unlikely(!smmu_domain->s1_cfg.cd0)) + return; + cmd.opcode = smmu->features & ARM_SMMU_FEAT_E2H ? + CMDQ_OP_TLBI_EL2_ASID : CMDQ_OP_TLBI_NH_ASID; + cmd.tlbi.asid = smmu_domain->s1_cfg.cd0->tag; cmd.tlbi.vmid = 0; } else { cmd.opcode = CMDQ_OP_TLBI_S12_VMALL; cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; } + /* + * NOTE: when io-pgtable is in non-strict mode, we may get here with + * PTEs previously cleared by unmaps on the current CPU not yet visible + * to the SMMU. We are relying on the dma_wmb() implicit during cmd + * insertion to guarantee those are observed before the TLBI. Do be + * careful, 007. + */ arm_smmu_cmdq_issue_cmd(smmu, &cmd); __arm_smmu_tlb_sync(smmu); } @@ -1415,8 +2141,11 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, }; if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { - cmd.opcode = CMDQ_OP_TLBI_NH_VA; - cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid; + if (unlikely(!smmu_domain->s1_cfg.cd0)) + return; + cmd.opcode = smmu->features & ARM_SMMU_FEAT_E2H ? + CMDQ_OP_TLBI_EL2_VA : CMDQ_OP_TLBI_NH_VA; + cmd.tlbi.asid = smmu_domain->s1_cfg.cd0->tag; } else { cmd.opcode = CMDQ_OP_TLBI_S2_IPA; cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; @@ -1428,12 +2157,78 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, } while (size -= granule); } -static const struct iommu_gather_ops arm_smmu_gather_ops = { +static const struct iommu_flush_ops arm_smmu_flush_ops = { .tlb_flush_all = arm_smmu_tlb_inv_context, .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, .tlb_sync = arm_smmu_tlb_sync, }; +/* PASID TABLE API */ +static void __arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain, + struct arm_smmu_cmdq_ent *cmd) +{ + size_t i; + unsigned long flags; + struct arm_smmu_master_data *master; + struct arm_smmu_device *smmu = smmu_domain->smmu; + + spin_lock_irqsave(&smmu_domain->devices_lock, flags); + list_for_each_entry(master, &smmu_domain->devices, list) { + struct iommu_fwspec *fwspec = master->dev->iommu_fwspec; + + for (i = 0; i < fwspec->num_ids; i++) { + cmd->cfgi.sid = fwspec->ids[i]; + arm_smmu_cmdq_issue_cmd(smmu, cmd); + } + } + spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); + + __arm_smmu_tlb_sync(smmu); +} + +static void arm_smmu_sync_cd(void *cookie, int ssid, bool leaf) +{ + struct arm_smmu_cmdq_ent cmd = { + .opcode = CMDQ_OP_CFGI_CD_ALL, + .cfgi = { + .ssid = ssid, + .leaf = leaf, + }, + }; + + __arm_smmu_sync_cd(cookie, &cmd); +} + +static void arm_smmu_sync_cd_all(void *cookie) +{ + struct arm_smmu_cmdq_ent cmd = { + .opcode = CMDQ_OP_CFGI_CD_ALL, + }; + + __arm_smmu_sync_cd(cookie, &cmd); +} + +static void arm_smmu_tlb_inv_ssid(void *cookie, int ssid, + struct iommu_pasid_entry *entry) +{ + struct arm_smmu_domain *smmu_domain = cookie; + struct arm_smmu_device *smmu = smmu_domain->smmu; + struct arm_smmu_cmdq_ent cmd = { + .opcode = smmu->features & ARM_SMMU_FEAT_E2H ? + CMDQ_OP_TLBI_EL2_ASID : CMDQ_OP_TLBI_NH_ASID, + .tlbi.asid = entry->tag, + }; + + arm_smmu_cmdq_issue_cmd(smmu, &cmd); + __arm_smmu_tlb_sync(smmu); +} + +static struct iommu_pasid_sync_ops arm_smmu_ctx_sync = { + .cfg_flush = arm_smmu_sync_cd, + .cfg_flush_all = arm_smmu_sync_cd_all, + .tlb_flush = arm_smmu_tlb_inv_ssid, +}; + /* IOMMU API */ static bool arm_smmu_capable(enum iommu_cap cap) { @@ -1472,6 +2267,9 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) } mutex_init(&smmu_domain->init_mutex); + INIT_LIST_HEAD(&smmu_domain->devices); + spin_lock_init(&smmu_domain->devices_lock); + return &smmu_domain->domain; } @@ -1503,15 +2301,11 @@ static void arm_smmu_domain_free(struct iommu_domain *domain) /* Free the CD and ASID, if we allocated them */ if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { - struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg; + struct iommu_pasid_table_ops *ops = smmu_domain->s1_cfg.ops; - if (cfg->cdptr) { - dmam_free_coherent(smmu_domain->smmu->dev, - CTXDESC_CD_DWORDS << 3, - cfg->cdptr, - cfg->cdptr_dma); - - arm_smmu_bitmap_free(smmu->asid_map, cfg->cd.asid); + if (ops) { + iommu_free_pasid_entry(smmu_domain->s1_cfg.cd0); + iommu_free_pasid_ops(ops); } } else { struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg; @@ -1523,38 +2317,56 @@ static void arm_smmu_domain_free(struct iommu_domain *domain) } static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain, + struct arm_smmu_master_data *master, struct io_pgtable_cfg *pgtbl_cfg) { int ret; - int asid; + struct iommu_pasid_entry *entry; + struct iommu_pasid_table_ops *ops; struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg; + struct iommu_pasid_table_cfg pasid_cfg = { + .iommu_dev = smmu->dev, + .order = master->ssid_bits, + .sync = &arm_smmu_ctx_sync, + .arm_smmu = { + .stall = !!(smmu->features & + ARM_SMMU_FEAT_STALL_FORCE) || + master->ste.can_stall, + .asid_bits = smmu->asid_bits, + .hw_access = !!(smmu->features & ARM_SMMU_FEAT_HA), + .hw_dirty = !!(smmu->features & ARM_SMMU_FEAT_HD), + }, + }; - asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits); - if (asid < 0) - return asid; + ops = iommu_alloc_pasid_ops(PASID_TABLE_ARM_SMMU_V3, &pasid_cfg, + smmu_domain); + if (!ops) + return -ENOMEM; - cfg->cdptr = dmam_alloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3, - &cfg->cdptr_dma, - GFP_KERNEL | __GFP_ZERO); - if (!cfg->cdptr) { - dev_warn(smmu->dev, "failed to allocate context descriptor\n"); - ret = -ENOMEM; - goto out_free_asid; + /* Create default entry */ + entry = ops->alloc_priv_entry(ops, ARM_64_LPAE_S1, pgtbl_cfg); + if (IS_ERR(entry)) { + iommu_free_pasid_ops(ops); + return PTR_ERR(entry); } - cfg->cd.asid = (u16)asid; - cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0]; - cfg->cd.tcr = pgtbl_cfg->arm_lpae_s1_cfg.tcr; - cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair[0]; - return 0; + ret = ops->set_entry(ops, 0, entry); + if (ret) { + iommu_free_pasid_entry(entry); + iommu_free_pasid_ops(ops); + return ret; + } + + cfg->tables = pasid_cfg; + cfg->ops = ops; + cfg->cd0 = entry; -out_free_asid: - arm_smmu_bitmap_free(smmu->asid_map, asid); return ret; } static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain, + struct arm_smmu_master_data *master, struct io_pgtable_cfg *pgtbl_cfg) { int vmid; @@ -1571,7 +2383,8 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain, return 0; } -static int arm_smmu_domain_finalise(struct iommu_domain *domain) +static int arm_smmu_domain_finalise(struct iommu_domain *domain, + struct arm_smmu_master_data *master) { int ret; unsigned long ias, oas; @@ -1579,6 +2392,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain) struct io_pgtable_cfg pgtbl_cfg; struct io_pgtable_ops *pgtbl_ops; int (*finalise_stage_fn)(struct arm_smmu_domain *, + struct arm_smmu_master_data *, struct io_pgtable_cfg *); struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_device *smmu = smmu_domain->smmu; @@ -1617,12 +2431,13 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain) .pgsize_bitmap = smmu->pgsize_bitmap, .ias = ias, .oas = oas, - .tlb = &arm_smmu_gather_ops, + .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENCY, + .tlb = &arm_smmu_flush_ops, .iommu_dev = smmu->dev, }; - if (smmu->features & ARM_SMMU_FEAT_COHERENCY) - pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA; + if (smmu_domain->non_strict) + pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT; pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); if (!pgtbl_ops) @@ -1632,7 +2447,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain) domain->geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1; domain->geometry.force_aperture = true; - ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg); + ret = finalise_stage_fn(smmu_domain, master, &pgtbl_cfg); if (ret < 0) { free_io_pgtable_ops(pgtbl_ops); return ret; @@ -1687,7 +2502,19 @@ static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec) static void arm_smmu_detach_dev(struct device *dev) { + unsigned long flags; struct arm_smmu_master_data *master = dev->iommu_fwspec->iommu_priv; + struct arm_smmu_domain *smmu_domain = master->domain; + + if (smmu_domain) { + __iommu_sva_unbind_dev_all(dev); + + spin_lock_irqsave(&smmu_domain->devices_lock, flags); + list_del(&master->list); + spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); + + master->domain = NULL; + } master->ste.assigned = false; arm_smmu_install_ste_for_dev(dev->iommu_fwspec); @@ -1696,6 +2523,7 @@ static void arm_smmu_detach_dev(struct device *dev) static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) { int ret = 0; + unsigned long flags; struct arm_smmu_device *smmu; struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_master_data *master; @@ -1716,7 +2544,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) if (!smmu_domain->smmu) { smmu_domain->smmu = smmu; - ret = arm_smmu_domain_finalise(domain); + ret = arm_smmu_domain_finalise(domain, master); if (ret) { smmu_domain->smmu = NULL; goto out_unlock; @@ -1731,6 +2559,11 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) } ste->assigned = true; + master->domain = smmu_domain; + + spin_lock_irqsave(&smmu_domain->devices_lock, flags); + list_add(&master->list, &smmu_domain->devices); + spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS) { ste->s1_cfg = NULL; @@ -1738,7 +2571,6 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { ste->s1_cfg = &smmu_domain->s1_cfg; ste->s2_cfg = NULL; - arm_smmu_write_ctx_desc(smmu, ste->s1_cfg); } else { ste->s1_cfg = NULL; ste->s2_cfg = &smmu_domain->s2_cfg; @@ -1772,6 +2604,14 @@ arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) return ops->unmap(ops, iova, size); } +static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain) +{ + struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + + if (smmu_domain->smmu) + arm_smmu_tlb_inv_context(smmu_domain); +} + static void arm_smmu_iotlb_sync(struct iommu_domain *domain) { struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu; @@ -1794,6 +2634,121 @@ arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) return ops->iova_to_phys(ops, iova); } +static int arm_smmu_sva_init(struct device *dev, struct iommu_sva_param *param) +{ + int ret; + struct arm_smmu_master_data *master = dev->iommu_fwspec->iommu_priv; + + /* SSID support is mandatory for the moment */ + if (!master->ssid_bits) + return -EINVAL; + + if (param->features & ~IOMMU_SVA_FEAT_IOPF) + return -EINVAL; + + if (param->features & IOMMU_SVA_FEAT_IOPF) { + if (!master->can_fault) + return -EINVAL; + ret = iopf_queue_add_device(master->smmu->iopf_queue, dev); + if (ret) + return ret; + } + + if (!param->max_pasid) + param->max_pasid = 0xfffffU; + + /* SSID support in the SMMU requires at least one SSID bit */ + param->min_pasid = max(param->min_pasid, 1U); + param->max_pasid = min(param->max_pasid, (1U << master->ssid_bits) - 1); + + return 0; +} + +static void arm_smmu_sva_shutdown(struct device *dev, + struct iommu_sva_param *param) +{ + iopf_queue_remove_device(dev); +} + +static struct io_mm *arm_smmu_mm_alloc(struct iommu_domain *domain, + struct mm_struct *mm, + unsigned long flags) +{ + struct arm_smmu_mm *smmu_mm; + struct iommu_pasid_entry *cd; + struct iommu_pasid_table_ops *ops; + struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + + if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1) + return NULL; + + smmu_mm = kzalloc(sizeof(*smmu_mm), GFP_KERNEL); + if (!smmu_mm) + return NULL; + + ops = smmu_domain->s1_cfg.ops; + cd = ops->alloc_shared_entry(ops, mm); + if (IS_ERR(cd)) { + kfree(smmu_mm); + return ERR_CAST(cd); + } + + smmu_mm->cd = cd; + return &smmu_mm->io_mm; +} + +static void arm_smmu_mm_free(struct io_mm *io_mm) +{ + struct arm_smmu_mm *smmu_mm = to_smmu_mm(io_mm); + + iommu_free_pasid_entry(smmu_mm->cd); + kfree(smmu_mm); +} + +static int arm_smmu_mm_attach(struct iommu_domain *domain, struct device *dev, + struct io_mm *io_mm, bool attach_domain) +{ + struct arm_smmu_mm *smmu_mm = to_smmu_mm(io_mm); + struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + struct iommu_pasid_table_ops *ops = smmu_domain->s1_cfg.ops; + struct arm_smmu_master_data *master = dev->iommu_fwspec->iommu_priv; + + if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1) + return -EINVAL; + + if (!(master->smmu->features & ARM_SMMU_FEAT_SVA)) + return -ENODEV; + + if (!attach_domain) + return 0; + + return ops->set_entry(ops, io_mm->pasid, smmu_mm->cd); +} + +static void arm_smmu_mm_detach(struct iommu_domain *domain, struct device *dev, + struct io_mm *io_mm, bool detach_domain) +{ + struct arm_smmu_mm *smmu_mm = to_smmu_mm(io_mm); + struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + struct iommu_pasid_table_ops *ops = smmu_domain->s1_cfg.ops; + + if (detach_domain) + ops->clear_entry(ops, io_mm->pasid, smmu_mm->cd); + + /* TODO: Invalidate ATC. */ + /* TODO: Invalidate all mappings if last and not DVM. */ +} + +static void arm_smmu_mm_invalidate(struct iommu_domain *domain, + struct device *dev, struct io_mm *io_mm, + unsigned long iova, size_t size) +{ + /* + * TODO: Invalidate ATC. + * TODO: Invalidate mapping if not DVM + */ +} + static struct platform_driver arm_smmu_driver; static int arm_smmu_match_node(struct device *dev, void *data) @@ -1820,6 +2775,71 @@ static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid) return sid < limit; } +static int arm_smmu_insert_master(struct arm_smmu_device *smmu, + struct arm_smmu_master_data *master) +{ + int i; + int ret = 0; + struct arm_smmu_stream *new_stream, *cur_stream; + struct rb_node **new_node, *parent_node = NULL; + struct iommu_fwspec *fwspec = master->dev->iommu_fwspec; + + master->streams = kcalloc(fwspec->num_ids, + sizeof(struct arm_smmu_stream), GFP_KERNEL); + if (!master->streams) + return -ENOMEM; + + mutex_lock(&smmu->streams_mutex); + for (i = 0; i < fwspec->num_ids && !ret; i++) { + new_stream = &master->streams[i]; + new_stream->id = fwspec->ids[i]; + new_stream->master = master; + + new_node = &(smmu->streams.rb_node); + while (*new_node) { + cur_stream = rb_entry(*new_node, struct arm_smmu_stream, + node); + parent_node = *new_node; + if (cur_stream->id > new_stream->id) { + new_node = &((*new_node)->rb_left); + } else if (cur_stream->id < new_stream->id) { + new_node = &((*new_node)->rb_right); + } else { + dev_warn(master->dev, + "stream %u already in tree\n", + cur_stream->id); + ret = -EINVAL; + break; + } + } + + if (!ret) { + rb_link_node(&new_stream->node, parent_node, new_node); + rb_insert_color(&new_stream->node, &smmu->streams); + } + } + mutex_unlock(&smmu->streams_mutex); + + return ret; +} + +static void arm_smmu_remove_master(struct arm_smmu_device *smmu, + struct arm_smmu_master_data *master) +{ + int i; + struct iommu_fwspec *fwspec = master->dev->iommu_fwspec; + + if (!master->streams) + return; + + mutex_lock(&smmu->streams_mutex); + for (i = 0; i < fwspec->num_ids; i++) + rb_erase(&master->streams[i].node, &smmu->streams); + mutex_unlock(&smmu->streams_mutex); + + kfree(master->streams); +} + static struct iommu_ops arm_smmu_ops; static int arm_smmu_add_device(struct device *dev) @@ -1849,6 +2869,7 @@ static int arm_smmu_add_device(struct device *dev) return -ENOMEM; master->smmu = smmu; + master->dev = dev; fwspec->iommu_priv = master; } @@ -1856,24 +2877,50 @@ static int arm_smmu_add_device(struct device *dev) for (i = 0; i < fwspec->num_ids; i++) { u32 sid = fwspec->ids[i]; - if (!arm_smmu_sid_in_range(smmu, sid)) - return -ERANGE; + if (!arm_smmu_sid_in_range(smmu, sid)) { + ret = -ERANGE; + goto err_free_master; + } /* Ensure l2 strtab is initialised */ if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) { ret = arm_smmu_init_l2_strtab(smmu, sid); if (ret) - return ret; + goto err_free_master; } } + master->ssid_bits = min(smmu->ssid_bits, fwspec->num_pasid_bits); + + if (fwspec->can_stall && smmu->features & ARM_SMMU_FEAT_STALLS) { + master->can_fault = true; + master->ste.can_stall = true; + } + + ret = iommu_device_link(&smmu->iommu, dev); + if (ret) + goto err_free_master; + group = iommu_group_get_for_dev(dev); - if (!IS_ERR(group)) { - iommu_group_put(group); - iommu_device_link(&smmu->iommu, dev); + if (IS_ERR(group)) { + ret = PTR_ERR(group); + goto err_remove_master; } - return PTR_ERR_OR_ZERO(group); + arm_smmu_insert_master(smmu, master); + iommu_group_put(group); + + return 0; + +err_remove_master: + arm_smmu_remove_master(smmu, master); + iommu_device_unlink(&smmu->iommu, dev); + +err_free_master: + kfree(master); + fwspec->iommu_priv = NULL; + + return ret; } static void arm_smmu_remove_device(struct device *dev) @@ -1886,9 +2933,14 @@ static void arm_smmu_remove_device(struct device *dev) return; master = fwspec->iommu_priv; + if (!master) + return; + smmu = master->smmu; - if (master && master->ste.assigned) + iopf_queue_remove_device(dev); + if (master->ste.assigned) arm_smmu_detach_dev(dev); + arm_smmu_remove_master(smmu, master); iommu_group_remove_device(dev); iommu_device_unlink(&smmu->iommu, dev); kfree(master); @@ -1917,15 +2969,27 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain, { struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - if (domain->type != IOMMU_DOMAIN_UNMANAGED) - return -EINVAL; - - switch (attr) { - case DOMAIN_ATTR_NESTING: - *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); - return 0; + switch (domain->type) { + case IOMMU_DOMAIN_UNMANAGED: + switch (attr) { + case DOMAIN_ATTR_NESTING: + *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); + return 0; + default: + return -ENODEV; + } + break; + case IOMMU_DOMAIN_DMA: + switch (attr) { + case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: + *(int *)data = smmu_domain->non_strict; + return 0; + default: + return -ENODEV; + } + break; default: - return -ENODEV; + return -EINVAL; } } @@ -1935,26 +2999,37 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain, int ret = 0; struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - if (domain->type != IOMMU_DOMAIN_UNMANAGED) - return -EINVAL; - mutex_lock(&smmu_domain->init_mutex); - switch (attr) { - case DOMAIN_ATTR_NESTING: - if (smmu_domain->smmu) { - ret = -EPERM; - goto out_unlock; + switch (domain->type) { + case IOMMU_DOMAIN_UNMANAGED: + switch (attr) { + case DOMAIN_ATTR_NESTING: + if (smmu_domain->smmu) { + ret = -EPERM; + goto out_unlock; + } + + if (*(int *)data) + smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; + else + smmu_domain->stage = ARM_SMMU_DOMAIN_S1; + break; + default: + ret = -ENODEV; + } + break; + case IOMMU_DOMAIN_DMA: + switch(attr) { + case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: + smmu_domain->non_strict = *(int *)data; + break; + default: + ret = -ENODEV; } - - if (*(int *)data) - smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; - else - smmu_domain->stage = ARM_SMMU_DOMAIN_S1; - break; default: - ret = -ENODEV; + ret = -EINVAL; } out_unlock: @@ -1992,14 +3067,46 @@ static void arm_smmu_put_resv_regions(struct device *dev, kfree(entry); } +#ifdef CONFIG_SMMU_BYPASS_DEV +static int arm_smmu_device_domain_type(struct device *dev, unsigned int *type) +{ + int i; + struct pci_dev *pdev; + + if (!dev_is_pci(dev)) + return -ERANGE; + + pdev = to_pci_dev(dev); + for (i = 0; i < smmu_bypass_devices_num; i++) { + if ((smmu_bypass_devices[i].vendor == pdev->vendor) + && (smmu_bypass_devices[i].device == pdev->device)) { + dev_info(dev, "device 0x%hx:0x%hx uses identity mapping.", + pdev->vendor, pdev->device); + *type = IOMMU_DOMAIN_IDENTITY; + return 0; + } + } + + return -ERANGE; +} +#endif + static struct iommu_ops arm_smmu_ops = { .capable = arm_smmu_capable, .domain_alloc = arm_smmu_domain_alloc, .domain_free = arm_smmu_domain_free, .attach_dev = arm_smmu_attach_dev, + .sva_device_init = arm_smmu_sva_init, + .sva_device_shutdown = arm_smmu_sva_shutdown, + .mm_alloc = arm_smmu_mm_alloc, + .mm_free = arm_smmu_mm_free, + .mm_attach = arm_smmu_mm_attach, + .mm_detach = arm_smmu_mm_detach, + .mm_invalidate = arm_smmu_mm_invalidate, + .page_response = arm_smmu_page_response, .map = arm_smmu_map, .unmap = arm_smmu_unmap, - .flush_iotlb_all = arm_smmu_iotlb_sync, + .flush_iotlb_all = arm_smmu_flush_iotlb_all, .iotlb_sync = arm_smmu_iotlb_sync, .iova_to_phys = arm_smmu_iova_to_phys, .add_device = arm_smmu_add_device, @@ -2011,6 +3118,9 @@ static struct iommu_ops arm_smmu_ops = { .get_resv_regions = arm_smmu_get_resv_regions, .put_resv_regions = arm_smmu_put_resv_regions, .pgsize_bitmap = -1UL, /* Restricted during device attach */ +#ifdef CONFIG_SMMU_BYPASS_DEV + .device_domain_type = arm_smmu_device_domain_type, +#endif }; /* Probing and initialisation functions */ @@ -2018,27 +3128,74 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu, struct arm_smmu_queue *q, unsigned long prod_off, unsigned long cons_off, - size_t dwords) + size_t dwords, const char *name) { - size_t qsz = ((1 << q->max_n_shift) * dwords) << 3; + size_t qsz; + + do { + qsz = ((1 << q->llq.max_n_shift) * dwords) << 3; + q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma, + GFP_KERNEL); + if (q->base || qsz < PAGE_SIZE) + break; + + q->llq.max_n_shift--; + } while (1); - q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma, GFP_KERNEL); if (!q->base) { - dev_err(smmu->dev, "failed to allocate queue (0x%zx bytes)\n", - qsz); + dev_err(smmu->dev, + "failed to allocate queue (0x%zx bytes) for %s\n", + qsz, name); return -ENOMEM; } + if (!WARN_ON(q->base_dma & (qsz - 1))) { + dev_info(smmu->dev, "allocated %u entries for %s\n", + 1 << q->llq.max_n_shift, name); + } + q->prod_reg = arm_smmu_page1_fixup(prod_off, smmu); q->cons_reg = arm_smmu_page1_fixup(cons_off, smmu); q->ent_dwords = dwords; - q->q_base = Q_BASE_RWA; - q->q_base |= q->base_dma & Q_BASE_ADDR_MASK; - q->q_base |= FIELD_PREP(Q_BASE_LOG2SIZE, q->max_n_shift); + q->q_base = Q_BASE_RWA; + q->q_base |= q->base_dma & Q_BASE_ADDR_MASK; + q->q_base |= FIELD_PREP(Q_BASE_LOG2SIZE, q->llq.max_n_shift); + + q->llq.prod = q->llq.cons = 0; + + init_waitqueue_head(&q->wq); + q->batch = 0; + + return 0; +} + +static void arm_smmu_cmdq_free_bitmap(void *data) +{ + unsigned long *bitmap = data; + bitmap_free(bitmap); +} + +static int arm_smmu_cmdq_init(struct arm_smmu_device *smmu) +{ + int ret = 0; + struct arm_smmu_cmdq *cmdq = &smmu->cmdq; + unsigned int nents = 1 << cmdq->q.llq.max_n_shift; + atomic_long_t *bitmap; + + atomic_set(&cmdq->owner_prod, 0); + atomic_set(&cmdq->lock, 0); - q->prod = q->cons = 0; - return 0; + bitmap = (atomic_long_t *)bitmap_zalloc(nents, GFP_KERNEL); + if (!bitmap) { + dev_err(smmu->dev, "failed to allocate cmdq bitmap\n"); + ret = -ENOMEM; + } else { + cmdq->valid_map = bitmap; + devm_add_action(smmu->dev, arm_smmu_cmdq_free_bitmap, bitmap); + } + + return ret; } static int arm_smmu_init_queues(struct arm_smmu_device *smmu) @@ -2046,15 +3203,20 @@ static int arm_smmu_init_queues(struct arm_smmu_device *smmu) int ret; /* cmdq */ - spin_lock_init(&smmu->cmdq.lock); ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD, - ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS); + ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS, + "cmdq"); + if (ret) + return ret; + + ret = arm_smmu_cmdq_init(smmu); if (ret) return ret; /* evtq */ ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD, - ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS); + ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS, + "evtq"); if (ret) return ret; @@ -2063,7 +3225,8 @@ static int arm_smmu_init_queues(struct arm_smmu_device *smmu) return 0; return arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD, - ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS); + ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS, + "priq"); } static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu) @@ -2087,12 +3250,60 @@ static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu) return 0; } +#ifdef CONFIG_SMMU_BYPASS_DEV +static void arm_smmu_install_bypass_ste_for_dev(struct arm_smmu_device *smmu, + u32 sid) +{ + u64 val; + __le64 *step = arm_smmu_get_step_for_sid(smmu, sid); + + if (!step) + return; + + val = STRTAB_STE_0_V; + val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS); + step[0] = cpu_to_le64(val); + step[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG, + STRTAB_STE_1_SHCFG_INCOMING)); + step[2] = 0; + +} + +static int arm_smmu_prepare_init_l2_strtab(struct device *dev, void *data) +{ + u32 sid; + int ret; + unsigned int type; + struct pci_dev *pdev; + struct arm_smmu_device *smmu = (struct arm_smmu_device *)data; + + if (arm_smmu_device_domain_type(dev, &type)) + return 0; + + pdev = to_pci_dev(dev); + sid = PCI_DEVID(pdev->bus->number, pdev->devfn); + if (!arm_smmu_sid_in_range(smmu, sid)) + return -ERANGE; + + ret = arm_smmu_init_l2_strtab(smmu, sid); + if (ret) + return ret; + + arm_smmu_install_bypass_ste_for_dev(smmu, sid); + + return 0; +} +#endif + static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) { void *strtab; u64 reg; u32 size, l1size; struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; +#ifdef CONFIG_SMMU_BYPASS_DEV + int ret; +#endif /* Calculate the L1 size, capped to the SIDSIZE. */ size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3); @@ -2122,7 +3333,19 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) reg |= FIELD_PREP(STRTAB_BASE_CFG_SPLIT, STRTAB_SPLIT); cfg->strtab_base_cfg = reg; +#ifdef CONFIG_SMMU_BYPASS_DEV + ret = arm_smmu_init_l1_strtab(smmu); + if (ret) + return ret; + + if (smmu_bypass_devices_num) { + ret = bus_for_each_dev(&pci_bus_type, NULL, (void *)smmu, + arm_smmu_prepare_init_l2_strtab); + } + return ret; +#else return arm_smmu_init_l1_strtab(smmu); +#endif } static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu) @@ -2180,7 +3403,9 @@ static int arm_smmu_init_structures(struct arm_smmu_device *smmu) { int ret; - atomic_set(&smmu->sync_nr, 0); + mutex_init(&smmu->streams_mutex); + smmu->streams = RB_ROOT; + ret = arm_smmu_init_queues(smmu); if (ret) return ret; @@ -2236,6 +3461,13 @@ static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo; doorbell &= MSI_CFG0_ADDR_MASK; +#ifdef CONFIG_PM_SLEEP + /* Saves the msg (base addr of msi irq) and restores it during resume */ + desc->msg.address_lo = msg->address_lo; + desc->msg.address_hi = msg->address_hi; + desc->msg.data = msg->data; +#endif + writeq_relaxed(doorbell, smmu->base + cfg[0]); writel_relaxed(msg->data, smmu->base + cfg[1]); writel_relaxed(ARM_SMMU_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]); @@ -2291,11 +3523,82 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu) devm_add_action(dev, arm_smmu_free_msis, dev); } -static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu) +#ifdef CONFIG_PM_SLEEP +static void arm_smmu_resume_msis(struct arm_smmu_device *smmu) +{ + struct msi_desc *desc; + struct device *dev = smmu->dev; + + for_each_msi_entry(desc, dev) { + switch (desc->platform.msi_index) { + case EVTQ_MSI_INDEX: + case GERROR_MSI_INDEX: + case PRIQ_MSI_INDEX: { + phys_addr_t *cfg = arm_smmu_msi_cfg[desc->platform.msi_index]; + struct msi_msg *msg = &desc->msg; + phys_addr_t doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo; + + doorbell &= MSI_CFG0_ADDR_MASK; + writeq_relaxed(doorbell, smmu->base + cfg[0]); + writel_relaxed(msg->data, smmu->base + cfg[1]); + writel_relaxed(ARM_SMMU_MEMATTR_DEVICE_nGnRE, + smmu->base + cfg[2]); + break; + } + default: + continue; + + } + } +} +#else +static void arm_smmu_resume_msis(struct arm_smmu_device *smmu) +{ +} +#endif + +static void arm_smmu_setup_message_based_spi(struct arm_smmu_device *smmu) +{ + struct irq_desc *desc; + u32 event_hwirq, gerror_hwirq, pri_hwirq; + + desc = irq_to_desc(smmu->gerr_irq); + gerror_hwirq = desc->irq_data.hwirq; + writeq_relaxed(smmu->spi_base, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0); + writel_relaxed(gerror_hwirq, smmu->base + ARM_SMMU_GERROR_IRQ_CFG1); + writel_relaxed(ARM_SMMU_MEMATTR_DEVICE_nGnRE, + smmu->base + ARM_SMMU_GERROR_IRQ_CFG2); + + desc = irq_to_desc(smmu->evtq.q.irq); + event_hwirq = desc->irq_data.hwirq; + writeq_relaxed(smmu->spi_base, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0); + writel_relaxed(event_hwirq, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG1); + writel_relaxed(ARM_SMMU_MEMATTR_DEVICE_nGnRE, + smmu->base + ARM_SMMU_EVTQ_IRQ_CFG2); + + if (smmu->features & ARM_SMMU_FEAT_PRI) { + desc = irq_to_desc(smmu->priq.q.irq); + pri_hwirq = desc->irq_data.hwirq; + + writeq_relaxed(smmu->spi_base, + smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0); + writel_relaxed(pri_hwirq, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG1); + writel_relaxed(ARM_SMMU_MEMATTR_DEVICE_nGnRE, + smmu->base + ARM_SMMU_PRIQ_IRQ_CFG2); + } +} + +static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu, bool resume) { int irq, ret; - arm_smmu_setup_msis(smmu); + if (!resume) + arm_smmu_setup_msis(smmu); + else { + /* The irq doesn't need to be re-requested during resume */ + arm_smmu_resume_msis(smmu); + return; + } /* Request interrupt lines */ irq = smmu->evtq.q.irq; @@ -2337,7 +3640,7 @@ static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu) } } -static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) +static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu, bool resume) { int ret, irq; u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN; @@ -2353,8 +3656,8 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) irq = smmu->combined_irq; if (irq) { /* - * Cavium ThunderX2 implementation doesn't not support unique - * irq lines. Use single irq line for all the SMMUv3 interrupts. + * Cavium ThunderX2 implementation doesn't support unique irq + * lines. Use a single irq line for all the SMMUv3 interrupts. */ ret = devm_request_threaded_irq(smmu->dev, irq, arm_smmu_combined_irq_handler, @@ -2364,11 +3667,14 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) if (ret < 0) dev_warn(smmu->dev, "failed to enable combined irq\n"); } else - arm_smmu_setup_unique_irqs(smmu); + arm_smmu_setup_unique_irqs(smmu, resume); if (smmu->features & ARM_SMMU_FEAT_PRI) irqen_flags |= IRQ_CTRL_PRIQ_IRQEN; + if (smmu->options & ARM_SMMU_OPT_MESSAGE_BASED_SPI) + arm_smmu_setup_message_based_spi(smmu); + /* Enable interrupt generation on the SMMU */ ret = arm_smmu_write_reg_sync(smmu, irqen_flags, ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK); @@ -2389,7 +3695,7 @@ static int arm_smmu_device_disable(struct arm_smmu_device *smmu) return ret; } -static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) +static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool resume) { int ret; u32 reg, enables; @@ -2398,13 +3704,9 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) /* Clear CR0 and sync (disables SMMU and queue processing) */ reg = readl_relaxed(smmu->base + ARM_SMMU_CR0); if (reg & CR0_SMMUEN) { - if (is_kdump_kernel()) { - arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0); - arm_smmu_device_disable(smmu); - return -EBUSY; - } - dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n"); + WARN_ON(is_kdump_kernel() && !disable_bypass); + arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0); } ret = arm_smmu_device_disable(smmu); @@ -2421,7 +3723,14 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) writel_relaxed(reg, smmu->base + ARM_SMMU_CR1); /* CR2 (random crap) */ - reg = CR2_PTM | CR2_RECINVSID | CR2_E2H; + reg = CR2_RECINVSID; + + if (smmu->features & ARM_SMMU_FEAT_E2H) + reg |= CR2_E2H; + + if (!(smmu->features & ARM_SMMU_FEAT_BTM)) + reg |= CR2_PTM; + writel_relaxed(reg, smmu->base + ARM_SMMU_CR2); /* Stream table */ @@ -2432,8 +3741,8 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) /* Command queue */ writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE); - writel_relaxed(smmu->cmdq.q.prod, smmu->base + ARM_SMMU_CMDQ_PROD); - writel_relaxed(smmu->cmdq.q.cons, smmu->base + ARM_SMMU_CMDQ_CONS); + writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD); + writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS); enables = CR0_CMDQEN; ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, @@ -2460,9 +3769,9 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) /* Event queue */ writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE); - writel_relaxed(smmu->evtq.q.prod, + writel_relaxed(smmu->evtq.q.llq.prod, arm_smmu_page1_fixup(ARM_SMMU_EVTQ_PROD, smmu)); - writel_relaxed(smmu->evtq.q.cons, + writel_relaxed(smmu->evtq.q.llq.cons, arm_smmu_page1_fixup(ARM_SMMU_EVTQ_CONS, smmu)); enables |= CR0_EVTQEN; @@ -2477,9 +3786,9 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) if (smmu->features & ARM_SMMU_FEAT_PRI) { writeq_relaxed(smmu->priq.q.q_base, smmu->base + ARM_SMMU_PRIQ_BASE); - writel_relaxed(smmu->priq.q.prod, + writel_relaxed(smmu->priq.q.llq.prod, arm_smmu_page1_fixup(ARM_SMMU_PRIQ_PROD, smmu)); - writel_relaxed(smmu->priq.q.cons, + writel_relaxed(smmu->priq.q.llq.cons, arm_smmu_page1_fixup(ARM_SMMU_PRIQ_CONS, smmu)); enables |= CR0_PRIQEN; @@ -2491,15 +3800,17 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) } } - ret = arm_smmu_setup_irqs(smmu); + ret = arm_smmu_setup_irqs(smmu, resume); if (ret) { dev_err(smmu->dev, "failed to setup irqs\n"); return ret; } + if (is_kdump_kernel()) + enables &= ~(CR0_EVTQEN | CR0_PRIQEN); /* Enable the SMMU interface, or ensure bypass */ - if (!bypass || disable_bypass) { + if (!smmu->bypass || disable_bypass) { enables |= CR0_SMMUEN; } else { ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT); @@ -2516,10 +3827,81 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) return 0; } +static bool arm_smmu_supports_sva(struct arm_smmu_device *smmu) +{ + unsigned long reg, fld; + unsigned long oas; + unsigned long asid_bits; + + u32 feat_mask = ARM_SMMU_FEAT_BTM | ARM_SMMU_FEAT_COHERENCY; + + if ((smmu->features & feat_mask) != feat_mask) + return false; + + if (!(smmu->pgsize_bitmap & PAGE_SIZE)) + return false; + + /* + * Get the smallest PA size of all CPUs (sanitized by cpufeature). We're + * not even pretending to support AArch32 here. + */ + reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); + fld = cpuid_feature_extract_unsigned_field(reg, + ID_AA64MMFR0_PARANGE_SHIFT); + switch (fld) { + case 0x0: + oas = 32; + break; + case 0x1: + oas = 36; + break; + case 0x2: + oas = 40; + break; + case 0x3: + oas = 42; + break; + case 0x4: + oas = 44; + break; + case 0x5: + oas = 48; + break; + case 0x6: + oas = 52; + break; + default: + return false; + } + + /* abort if MMU outputs addresses greater than what we support. */ + if (smmu->oas < oas) + return false; + + /* We can support bigger ASIDs than the CPU, but not smaller */ + fld = cpuid_feature_extract_unsigned_field(reg, + ID_AA64MMFR0_ASID_SHIFT); + asid_bits = fld ? 16 : 8; + if (smmu->asid_bits < asid_bits) + return false; + + /* + * See max_pinned_asids in arch/arm64/mm/context.c. The following is + * generally the maximum number of bindable processes. + */ + if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) + asid_bits--; + dev_dbg(smmu->dev, "%d shared contexts\n", (1 << asid_bits) - + num_possible_cpus() - 2); + + return true; +} + static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) { u32 reg; bool coherent = smmu->features & ARM_SMMU_FEAT_COHERENCY; + bool vhe = cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN); /* IDR0 */ reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0); @@ -2567,8 +3949,26 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) if (reg & IDR0_MSI) smmu->features |= ARM_SMMU_FEAT_MSI; - if (reg & IDR0_HYP) + if (reg & IDR0_HYP) { smmu->features |= ARM_SMMU_FEAT_HYP; + if (vhe) + smmu->features |= ARM_SMMU_FEAT_E2H; + } + + if (reg & (IDR0_HA | IDR0_HD)) { + smmu->features |= ARM_SMMU_FEAT_HA; + if (reg & IDR0_HD) + smmu->features |= ARM_SMMU_FEAT_HD; + } + + /* + * If the CPU is using VHE, but the SMMU doesn't support it, the SMMU + * will create TLB entries for NH-EL1 world and will miss the + * broadcasted TLB invalidations that target EL2-E2H world. Don't enable + * BTM in that case. + */ + if (reg & IDR0_BTM && (!vhe || reg & IDR0_HYP)) + smmu->features |= ARM_SMMU_FEAT_BTM; /* * The coherency feature as set by FW is used in preference to the ID @@ -2620,19 +4020,25 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) return -ENXIO; } - /* Queue sizes, capped at 4k */ - smmu->cmdq.q.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT, - FIELD_GET(IDR1_CMDQS, reg)); - if (!smmu->cmdq.q.max_n_shift) { - /* Odd alignment restrictions on the base, so ignore for now */ - dev_err(smmu->dev, "unit-length command queue not supported\n"); + /* Queue sizes, capped to ensure natural alignment */ + smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT, + FIELD_GET(IDR1_CMDQS, reg)); + if (smmu->cmdq.q.llq.max_n_shift < ilog2(BITS_PER_LONG)) { + /* + * The cmdq valid_map relies on the total number of entries + * being a multiple of BITS_PER_LONG. There's also no way + * we can handle the weird alignment restrictions on the + * base pointer for a unit-length queue. + */ + dev_err(smmu->dev, "command queue size < %d entries not supported\n", + BITS_PER_LONG); return -ENXIO; } - smmu->evtq.q.max_n_shift = min_t(u32, EVTQ_MAX_SZ_SHIFT, - FIELD_GET(IDR1_EVTQS, reg)); - smmu->priq.q.max_n_shift = min_t(u32, PRIQ_MAX_SZ_SHIFT, - FIELD_GET(IDR1_PRIQS, reg)); + smmu->evtq.q.llq.max_n_shift = min_t(u32, EVTQ_MAX_SZ_SHIFT, + FIELD_GET(IDR1_EVTQS, reg)); + smmu->priq.q.llq.max_n_shift = min_t(u32, PRIQ_MAX_SZ_SHIFT, + FIELD_GET(IDR1_PRIQS, reg)); /* SID/SSID sizes */ smmu->ssid_bits = FIELD_GET(IDR1_SSIDSIZE, reg); @@ -2645,6 +4051,16 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) if (smmu->sid_bits <= STRTAB_SPLIT) smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB; + /* IDR3 */ + reg = readl_relaxed(smmu->base + ARM_SMMU_IDR3); + if (reg & IDR3_MPAM) { + reg = readl_relaxed(smmu->base + ARM_SMMU_MPAMIDR); + smmu->mpam_partid_max = FIELD_GET(MPAMIDR_PARTID_MAX, reg); + smmu->mpam_pmg_max = FIELD_GET(MPAMIDR_PMG_MAX, reg); + if (smmu->mpam_partid_max || smmu->mpam_pmg_max) + smmu->features |= ARM_SMMU_FEAT_MPAM; + } + /* IDR5 */ reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5); @@ -2704,6 +4120,9 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) smmu->ias = max(smmu->ias, smmu->oas); + if (arm_smmu_supports_sva(smmu)) + smmu->features |= ARM_SMMU_FEAT_SVA; + dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n", smmu->ias, smmu->oas, smmu->features); return 0; @@ -2767,6 +4186,14 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev, parse_driver_options(smmu); + if (smmu->options & ARM_SMMU_OPT_MESSAGE_BASED_SPI) { + if (of_property_read_u64(dev->of_node, "iommu-spi-base", + &smmu->spi_base)) { + dev_err(dev, "missing irq base address\n"); + ret = -EINVAL; + } + } + if (of_dma_is_coherent(dev->of_node)) smmu->features |= ARM_SMMU_FEAT_COHERENCY; @@ -2781,6 +4208,245 @@ static unsigned long arm_smmu_resource_size(struct arm_smmu_device *smmu) return SZ_128K; } +static int arm_smmu_set_ste_mpam(struct arm_smmu_device *smmu, + int sid, int partid, int pmg, int s1mpam) +{ + u64 val; + __le64 *ste; + + if (!arm_smmu_sid_in_range(smmu, sid)) + return -ERANGE; + + /* get ste ptr */ + ste = arm_smmu_get_step_for_sid(smmu, sid); + + /* write s1mpam to ste */ + val = le64_to_cpu(ste[1]); + val &= ~STRTAB_STE_1_S1MPAM; + val |= FIELD_PREP(STRTAB_STE_1_S1MPAM, s1mpam); + WRITE_ONCE(ste[1], cpu_to_le64(val)); + + val = le64_to_cpu(ste[4]); + val &= ~STRTAB_STE_4_PARTID_MASK; + val |= FIELD_PREP(STRTAB_STE_4_PARTID_MASK, partid); + WRITE_ONCE(ste[4], cpu_to_le64(val)); + + val = le64_to_cpu(ste[5]); + val &= ~STRTAB_STE_5_PMG_MASK; + val |= FIELD_PREP(STRTAB_STE_5_PMG_MASK, pmg); + WRITE_ONCE(ste[5], cpu_to_le64(val)); + + arm_smmu_sync_ste_for_sid(smmu, sid); + + return 0; +} + +static int arm_smmu_get_ste_mpam(struct arm_smmu_device *smmu, + int sid, int *partid, int *pmg, int *s1mpam) +{ + u64 val; + __le64 *ste; + + if (!arm_smmu_sid_in_range(smmu, sid)) + return -ERANGE; + + /* get ste ptr */ + ste = arm_smmu_get_step_for_sid(smmu, sid); + + val = le64_to_cpu(ste[1]); + *s1mpam = FIELD_GET(STRTAB_STE_1_S1MPAM, val); + if (*s1mpam) + return 0; + + val = le64_to_cpu(ste[4]); + *partid = FIELD_GET(STRTAB_STE_4_PARTID_MASK, val); + + val = le64_to_cpu(ste[5]); + *pmg = FIELD_GET(STRTAB_STE_5_PMG_MASK, val); + + return 0; +} + +int arm_smmu_set_cd_mpam(struct iommu_pasid_table_ops *ops, + int ssid, int partid, int pmg); + +int arm_smmu_get_cd_mpam(struct iommu_pasid_table_ops *ops, + int ssid, int *partid, int *pmg); + +static int arm_smmu_set_mpam(struct arm_smmu_device *smmu, + int sid, int ssid, int partid, int pmg, int s1mpam) +{ + struct arm_smmu_master_data *master = arm_smmu_find_master(smmu, sid); + struct arm_smmu_s1_cfg *cfg = master ? master->ste.s1_cfg : NULL; + struct arm_smmu_domain *domain = master ? master->domain : NULL; + int ret; + + struct arm_smmu_cmdq_ent prefetch_cmd = { + .opcode = CMDQ_OP_PREFETCH_CFG, + .prefetch = { + .sid = sid, + }, + }; + + if (!(smmu->features & ARM_SMMU_FEAT_MPAM)) + return -ENODEV; + + if (WARN_ON(!domain)) + return -EINVAL; + + if (WARN_ON(!cfg)) + return -EINVAL; + + if (WARN_ON(ssid >= (1 << master->ssid_bits))) + return -E2BIG; + + if (partid > smmu->mpam_partid_max || pmg > smmu->mpam_pmg_max) { + dev_err(smmu->dev, + "mpam rmid out of range: partid[0, %d] pmg[0, %d]\n", + smmu->mpam_partid_max, smmu->mpam_pmg_max); + return -ERANGE; + } + + ret = arm_smmu_set_ste_mpam(smmu, sid, partid, pmg, s1mpam); + if (ret < 0) { + dev_err(smmu->dev, "set ste mpam configuration error %d\n", + ret); + return ret; + } + + /* do not modify cd table which owned by guest */ + if (domain->stage == ARM_SMMU_DOMAIN_NESTED) { + dev_err(smmu->dev, + "mpam: smmu cd is owned by guest, not modified\n"); + return 0; + } + + ret = arm_smmu_set_cd_mpam(cfg->ops, ssid, partid, pmg); + if (s1mpam && ret < 0) { + dev_err(smmu->dev, "set cd mpam configuration error %d\n", + ret); + return ret; + } + + /* It's likely that we'll want to use the new STE soon */ + if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH)) + arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd); + + dev_info(smmu->dev, "partid %d, pmg %d\n", partid, pmg); + + return 0; +} + +/** + * arm_smmu_set_dev_mpam() - Set mpam configuration to SMMU STE/CD + */ +int arm_smmu_set_dev_mpam(struct device *dev, int ssid, int partid, int pmg, + int s1mpam) +{ + struct arm_smmu_master_data *master = dev->iommu_fwspec->iommu_priv; + struct arm_smmu_device *smmu = master->domain->smmu; + int sid = master->streams->id; + + return arm_smmu_set_mpam(smmu, sid, ssid, partid, pmg, s1mpam); +} +EXPORT_SYMBOL(arm_smmu_set_dev_mpam); + +static int arm_smmu_get_mpam(struct arm_smmu_device *smmu, + int sid, int ssid, int *partid, int *pmg, int *s1mpam) +{ + struct arm_smmu_master_data *master = arm_smmu_find_master(smmu, sid); + struct arm_smmu_s1_cfg *cfg = master ? master->ste.s1_cfg : NULL; + int ret; + + if (!(smmu->features & ARM_SMMU_FEAT_MPAM)) + return -ENODEV; + + ret = arm_smmu_get_ste_mpam(smmu, sid, partid, pmg, s1mpam); + if (ret) + return ret; + + /* return STE mpam configuration when s1mpam == 0 */ + if (!(*s1mpam)) + return 0; + + if (WARN_ON(!cfg)) + return -EINVAL; + + if (WARN_ON(ssid >= (1 << master->ssid_bits))) + return -E2BIG; + + return arm_smmu_get_cd_mpam(cfg->ops, ssid, partid, pmg); +} + +/** + * arm_smmu_get_dev_mpam() - get mpam configuration + * @dev: the device + */ +int arm_smmu_get_dev_mpam(struct device *dev, int ssid, int *partid, int *pmg, + int *s1mpam) +{ + struct arm_smmu_master_data *master = dev->iommu_fwspec->iommu_priv; + struct arm_smmu_device *smmu = master->domain->smmu; + int sid = master->streams->id; + + return arm_smmu_get_mpam(smmu, sid, ssid, partid, pmg, s1mpam); +} +EXPORT_SYMBOL(arm_smmu_get_dev_mpam); + +/** + * arm_smmu_set_dev_user_mpam_en() - set user_mpam_en to smmu user cfg0 + */ +int arm_smmu_set_dev_user_mpam_en(struct device *dev, int user_mpam_en) +{ + struct arm_smmu_master_data *master = dev->iommu_fwspec->iommu_priv; + struct arm_smmu_device *smmu = master->domain->smmu; + u32 reg, __iomem *cfg = smmu->base + ARM_SMMU_USER_CFG0; + + reg = readl_relaxed(cfg); + reg &= ~ARM_SMMU_USER_MPAM_EN; + reg |= FIELD_PREP(ARM_SMMU_USER_MPAM_EN, user_mpam_en); + writel_relaxed(reg, cfg); + + return 0; +} +EXPORT_SYMBOL(arm_smmu_set_dev_user_mpam_en); + +/** + * arm_smmu_get_dev_user_mpam_en() - get user_mpam_en from smmu user cfg0 + */ +int arm_smmu_get_dev_user_mpam_en(struct device *dev, int *user_mpam_en) +{ + struct arm_smmu_master_data *master = dev->iommu_fwspec->iommu_priv; + struct arm_smmu_device *smmu = master->domain->smmu; + u32 reg, __iomem *cfg = smmu->base + ARM_SMMU_USER_CFG0; + + reg = readl_relaxed(cfg); + *user_mpam_en = FIELD_GET(ARM_SMMU_USER_MPAM_EN, reg); + + return 0; +} +EXPORT_SYMBOL(arm_smmu_get_dev_user_mpam_en); + +#ifdef CONFIG_PM_SLEEP +static int arm_smmu_suspend(struct device *dev) +{ + /* + * The smmu is powered off and related registers are automatically + * cleared when suspend. No need to do anything. + */ + return 0; +} + +static int arm_smmu_resume(struct device *dev) +{ + struct arm_smmu_device *smmu = dev_get_drvdata(dev); + + arm_smmu_device_reset(smmu, true); + + return 0; +} +#endif + static int arm_smmu_device_probe(struct platform_device *pdev) { int irq, ret; @@ -2788,7 +4454,6 @@ static int arm_smmu_device_probe(struct platform_device *pdev) resource_size_t ioaddr; struct arm_smmu_device *smmu; struct device *dev = &pdev->dev; - bool bypass; smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); if (!smmu) { @@ -2806,7 +4471,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev) } /* Set bypass mode according to firmware probing result */ - bypass = !!ret; + smmu->bypass = !!ret; /* Base address */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -2852,10 +4517,18 @@ static int arm_smmu_device_probe(struct platform_device *pdev) platform_set_drvdata(pdev, smmu); /* Reset the device */ - ret = arm_smmu_device_reset(smmu, bypass); + ret = arm_smmu_device_reset(smmu, false); if (ret) return ret; + if (smmu->features & (ARM_SMMU_FEAT_STALLS | ARM_SMMU_FEAT_PRI)) { + smmu->iopf_queue = iopf_queue_alloc(dev_name(dev), + arm_smmu_flush_queues, + smmu); + if (!smmu->iopf_queue) + return -ENOMEM; + } + /* And we're up. Go go go! */ ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, "smmu3.%pa", &ioaddr); @@ -2898,6 +4571,9 @@ static int arm_smmu_device_remove(struct platform_device *pdev) { struct arm_smmu_device *smmu = platform_get_drvdata(pdev); + if (smmu->iopf_queue) + iopf_queue_free(smmu->iopf_queue); + arm_smmu_device_disable(smmu); return 0; @@ -2914,10 +4590,22 @@ static const struct of_device_id arm_smmu_of_match[] = { }; MODULE_DEVICE_TABLE(of, arm_smmu_of_match); +#ifdef CONFIG_PM_SLEEP +static const struct dev_pm_ops arm_smmu_pm_ops = { + .suspend = arm_smmu_suspend, + .resume = arm_smmu_resume, +}; +#define ARM_SMMU_PM_OPS (&arm_smmu_pm_ops) +#else +#define ARM_SMMU_PM_OPS NULL +#endif + static struct platform_driver arm_smmu_driver = { .driver = { - .name = "arm-smmu-v3", - .of_match_table = of_match_ptr(arm_smmu_of_match), + .name = "arm-smmu-v3", + .of_match_table = of_match_ptr(arm_smmu_of_match), + .suppress_bind_attrs = true, + .pm = ARM_SMMU_PM_OPS, }, .probe = arm_smmu_device_probe, .remove = arm_smmu_device_remove, diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index fd1b80ef9490d2f4044f0704e9ebc5843d21b2bf..d1c00b1dfd2efac78dc89288dc8dcee0eb6e5e94 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include #include @@ -53,9 +54,21 @@ #include -#include "io-pgtable.h" #include "arm-smmu-regs.h" +#ifdef CONFIG_ARCH_PHYTIUM +#include +#endif + +/* + * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU + * global register space are still, in fact, using a hypervisor to mediate it + * by trapping and emulating register accesses. Sadly, some deployed versions + * of said trapping code have bugs wherein they go horribly wrong for stores + * using r31 (i.e. XZR/WZR) as the source register. + */ +#define QCOM_DUMMY_VAL -1 + #define ARM_MMU500_ACTLR_CPRE (1 << 1) #define ARM_MMU500_ACR_CACHE_LOCK (1 << 26) @@ -118,6 +131,7 @@ enum arm_smmu_implementation { GENERIC_SMMU, ARM_MMU500, CAVIUM_SMMUV2, + QCOM_SMMUV2, }; struct arm_smmu_s2cr { @@ -243,9 +257,10 @@ enum arm_smmu_domain_stage { struct arm_smmu_domain { struct arm_smmu_device *smmu; struct io_pgtable_ops *pgtbl_ops; - const struct iommu_gather_ops *tlb_ops; + const struct iommu_flush_ops *tlb_ops; struct arm_smmu_cfg cfg; enum arm_smmu_domain_stage stage; + bool non_strict; struct mutex init_mutex; /* Protects smmu pointer */ spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */ struct iommu_domain domain; @@ -397,7 +412,7 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, { unsigned int spin_cnt, delay; - writel_relaxed(0, sync); + writel_relaxed(QCOM_DUMMY_VAL, sync); for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) { for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) { if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE)) @@ -447,7 +462,11 @@ static void arm_smmu_tlb_inv_context_s1(void *cookie) struct arm_smmu_cfg *cfg = &smmu_domain->cfg; void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx); - writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID); + /* + * NOTE: this is not a relaxed write; it needs to guarantee that PTEs + * cleared by the current CPU are visible to the SMMU before the TLBI. + */ + writel(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID); arm_smmu_tlb_sync_context(cookie); } @@ -457,7 +476,8 @@ static void arm_smmu_tlb_inv_context_s2(void *cookie) struct arm_smmu_device *smmu = smmu_domain->smmu; void __iomem *base = ARM_SMMU_GR0(smmu); - writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID); + /* NOTE: see above */ + writel(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID); arm_smmu_tlb_sync_global(smmu); } @@ -469,6 +489,9 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx); + if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) + wmb(); + if (stage1) { reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; @@ -510,22 +533,25 @@ static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size, struct arm_smmu_domain *smmu_domain = cookie; void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu); + if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) + wmb(); + writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID); } -static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = { +static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = { .tlb_flush_all = arm_smmu_tlb_inv_context_s1, .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, .tlb_sync = arm_smmu_tlb_sync_context, }; -static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = { +static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = { .tlb_flush_all = arm_smmu_tlb_inv_context_s2, .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, .tlb_sync = arm_smmu_tlb_sync_context, }; -static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = { +static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = { .tlb_flush_all = arm_smmu_tlb_inv_context_s2, .tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync, .tlb_sync = arm_smmu_tlb_sync_vmid, @@ -856,12 +882,13 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, .pgsize_bitmap = smmu->pgsize_bitmap, .ias = ias, .oas = oas, + .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK, .tlb = smmu_domain->tlb_ops, .iommu_dev = smmu->dev, }; - if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) - pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA; + if (smmu_domain->non_strict) + pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT; smmu_domain->smmu = smmu; pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); @@ -1252,6 +1279,14 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, return ops->unmap(ops, iova, size); } +static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain) +{ + struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + + if (smmu_domain->tlb_ops) + smmu_domain->tlb_ops->tlb_flush_all(smmu_domain); +} + static void arm_smmu_iotlb_sync(struct iommu_domain *domain) { struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); @@ -1376,6 +1411,20 @@ static int arm_smmu_add_device(struct device *dev) return -ENODEV; } +#ifdef CONFIG_ARCH_PHYTIUM + /* ft2000+ */ + if (typeof_ft2000plus()) { + int num = fwspec->num_ids; + + for (i = 0; i < num; i++) { +#define FWID_READ(id) (((u16)(id) >> 3) | (((id) >> SMR_MASK_SHIFT | 0x7000) << SMR_MASK_SHIFT)) + u32 fwid = FWID_READ(fwspec->ids[i]); + + iommu_fwspec_add_ids(dev, &fwid, 1); + } + } +#endif + ret = -EINVAL; for (i = 0; i < fwspec->num_ids; i++) { u16 sid = fwspec->ids[i]; @@ -1450,6 +1499,12 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev) if (group && smmu->s2crs[idx].group && group != smmu->s2crs[idx].group) return ERR_PTR(-EINVAL); +#ifdef CONFIG_ARCH_PHYTIUM + if (typeof_s2500()) + break; + if (typeof_ft2000plus() && !smmu->s2crs[idx].group) + continue; +#endif group = smmu->s2crs[idx].group; } @@ -1470,15 +1525,27 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain, { struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - if (domain->type != IOMMU_DOMAIN_UNMANAGED) - return -EINVAL; - - switch (attr) { - case DOMAIN_ATTR_NESTING: - *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); - return 0; + switch(domain->type) { + case IOMMU_DOMAIN_UNMANAGED: + switch (attr) { + case DOMAIN_ATTR_NESTING: + *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); + return 0; + default: + return -ENODEV; + } + break; + case IOMMU_DOMAIN_DMA: + switch (attr) { + case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: + *(int *)data = smmu_domain->non_strict; + return 0; + default: + return -ENODEV; + } + break; default: - return -ENODEV; + return -EINVAL; } } @@ -1488,28 +1555,38 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain, int ret = 0; struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - if (domain->type != IOMMU_DOMAIN_UNMANAGED) - return -EINVAL; - mutex_lock(&smmu_domain->init_mutex); - switch (attr) { - case DOMAIN_ATTR_NESTING: - if (smmu_domain->smmu) { - ret = -EPERM; - goto out_unlock; + switch(domain->type) { + case IOMMU_DOMAIN_UNMANAGED: + switch (attr) { + case DOMAIN_ATTR_NESTING: + if (smmu_domain->smmu) { + ret = -EPERM; + goto out_unlock; + } + + if (*(int *)data) + smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; + else + smmu_domain->stage = ARM_SMMU_DOMAIN_S1; + break; + default: + ret = -ENODEV; + } + break; + case IOMMU_DOMAIN_DMA: + switch (attr) { + case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: + smmu_domain->non_strict = *(int *)data; + break; + default: + ret = -ENODEV; } - - if (*(int *)data) - smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; - else - smmu_domain->stage = ARM_SMMU_DOMAIN_S1; - break; default: - ret = -ENODEV; + ret = -EINVAL; } - out_unlock: mutex_unlock(&smmu_domain->init_mutex); return ret; @@ -1555,6 +1632,27 @@ static void arm_smmu_put_resv_regions(struct device *dev, kfree(entry); } +#ifdef CONFIG_SMMU_BYPASS_DEV + +#ifdef CONFIG_ARCH_PHYTIUM +static int phytium_smmu_def_domain_type(struct device *dev, unsigned int *type) +{ + if (typeof_ft2000plus() || typeof_s2500()) { + *type = IOMMU_DOMAIN_IDENTITY; + return 0; + } + + return -EINVAL; +} +#else +static inline int phytium_smmu_def_domain_type(struct device *dev, unsigned int *type) +{ + return -EINVAL; +} +#endif + +#endif + static struct iommu_ops arm_smmu_ops = { .capable = arm_smmu_capable, .domain_alloc = arm_smmu_domain_alloc, @@ -1562,7 +1660,7 @@ static struct iommu_ops arm_smmu_ops = { .attach_dev = arm_smmu_attach_dev, .map = arm_smmu_map, .unmap = arm_smmu_unmap, - .flush_iotlb_all = arm_smmu_iotlb_sync, + .flush_iotlb_all = arm_smmu_flush_iotlb_all, .iotlb_sync = arm_smmu_iotlb_sync, .iova_to_phys = arm_smmu_iova_to_phys, .add_device = arm_smmu_add_device, @@ -1574,6 +1672,9 @@ static struct iommu_ops arm_smmu_ops = { .get_resv_regions = arm_smmu_get_resv_regions, .put_resv_regions = arm_smmu_put_resv_regions, .pgsize_bitmap = -1UL, /* Restricted during device attach */ +#ifdef CONFIG_SMMU_BYPASS_DEV + .device_domain_type = phytium_smmu_def_domain_type, +#endif }; static void arm_smmu_device_reset(struct arm_smmu_device *smmu) @@ -1630,8 +1731,8 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) } /* Invalidate the TLB, just in case */ - writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH); - writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); + writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLH); + writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); @@ -1906,6 +2007,7 @@ ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU); ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU); ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500); ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2); +ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2); static const struct of_device_id arm_smmu_of_match[] = { { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 }, @@ -1914,6 +2016,7 @@ static const struct of_device_id arm_smmu_of_match[] = { { .compatible = "arm,mmu-401", .data = &arm_mmu401 }, { .compatible = "arm,mmu-500", .data = &arm_mmu500 }, { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 }, + { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 }, { }, }; MODULE_DEVICE_TABLE(of, arm_smmu_of_match); @@ -2204,9 +2307,10 @@ static SIMPLE_DEV_PM_OPS(arm_smmu_pm_ops, NULL, arm_smmu_pm_resume); static struct platform_driver arm_smmu_driver = { .driver = { - .name = "arm-smmu", - .of_match_table = of_match_ptr(arm_smmu_of_match), - .pm = &arm_smmu_pm_ops, + .name = "arm-smmu", + .of_match_table = of_match_ptr(arm_smmu_of_match), + .pm = &arm_smmu_pm_ops, + .suppress_bind_attrs = true, }, .probe = arm_smmu_device_probe, .remove = arm_smmu_device_remove, diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 511ff9a1d6d94087bffaaac226055bf892e08d91..b68d9fd27bba149b2e9295012f217528826e3907 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -55,6 +55,9 @@ struct iommu_dma_cookie { }; struct list_head msi_page_list; spinlock_t msi_lock; + + /* Domain for flush queue callback; NULL if flush queue not in use */ + struct iommu_domain *fq_domain; }; static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) @@ -190,15 +193,15 @@ static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie, start -= iova_offset(iovad, start); num_pages = iova_align(iovad, end - start) >> iova_shift(iovad); - msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL); - if (!msi_page) - return -ENOMEM; - for (i = 0; i < num_pages; i++) { - msi_page[i].phys = start; - msi_page[i].iova = start; - INIT_LIST_HEAD(&msi_page[i].list); - list_add(&msi_page[i].list, &cookie->msi_page_list); + msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL); + if (!msi_page) + return -ENOMEM; + + msi_page->phys = start; + msi_page->iova = start; + INIT_LIST_HEAD(&msi_page->list); + list_add(&msi_page->list, &cookie->msi_page_list); start += iovad->granule; } @@ -257,6 +260,20 @@ static int iova_reserve_iommu_regions(struct device *dev, return ret; } +static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad) +{ + struct iommu_dma_cookie *cookie; + struct iommu_domain *domain; + + cookie = container_of(iovad, struct iommu_dma_cookie, iovad); + domain = cookie->fq_domain; + /* + * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE + * implies that ops->flush_iotlb_all must be non-NULL. + */ + domain->ops->flush_iotlb_all(domain); +} + /** * iommu_dma_init_domain - Initialise a DMA mapping domain * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() @@ -273,12 +290,15 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size, struct device *dev) { struct iommu_dma_cookie *cookie = domain->iova_cookie; - struct iova_domain *iovad = &cookie->iovad; unsigned long order, base_pfn, end_pfn; + struct iova_domain *iovad; + int attr; if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) return -EINVAL; + iovad = &cookie->iovad; + /* Use the smallest supported page size for IOVA granularity */ order = __ffs(domain->pgsize_bitmap); base_pfn = max_t(unsigned long, 1, base >> order); @@ -308,6 +328,13 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, } init_iova_domain(iovad, 1UL << order, base_pfn); + + if (!cookie->fq_domain && !iommu_domain_get_attr(domain, + DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) { + cookie->fq_domain = domain; + init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, NULL); + } + if (!dev) return 0; @@ -393,6 +420,9 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, /* The MSI case is only ever cleaning up its most recent allocation */ if (cookie->type == IOMMU_DMA_MSI_COOKIE) cookie->msi_iova -= size; + else if (cookie->fq_domain) /* non-strict mode */ + queue_iova(iovad, iova_pfn(iovad, iova), + size >> iova_shift(iovad), 0); else free_iova_fast(iovad, iova_pfn(iovad, iova), size >> iova_shift(iovad)); @@ -408,7 +438,9 @@ static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr, dma_addr -= iova_off; size = iova_align(iovad, size + iova_off); - WARN_ON(iommu_unmap(domain, dma_addr, size) != size); + WARN_ON(iommu_unmap_fast(domain, dma_addr, size) != size); + if (!cookie->fq_domain) + iommu_tlb_sync(domain); iommu_dma_free_iova(cookie, dma_addr, size); } @@ -419,20 +451,17 @@ static void __iommu_dma_free_pages(struct page **pages, int count) kvfree(pages); } -static struct page **__iommu_dma_alloc_pages(unsigned int count, - unsigned long order_mask, gfp_t gfp) +static struct page **__iommu_dma_alloc_pages(struct device *dev, + unsigned int count, unsigned long order_mask, gfp_t gfp) { struct page **pages; - unsigned int i = 0, array_size = count * sizeof(*pages); + unsigned int i = 0, nid = dev_to_node(dev); order_mask &= (2U << MAX_ORDER) - 1; if (!order_mask) return NULL; - if (array_size <= PAGE_SIZE) - pages = kzalloc(array_size, GFP_KERNEL); - else - pages = vzalloc(array_size); + pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL); if (!pages) return NULL; @@ -451,10 +480,12 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count, for (order_mask &= (2U << __fls(count)) - 1; order_mask; order_mask &= ~order_size) { unsigned int order = __fls(order_mask); + gfp_t alloc_flags = gfp; order_size = 1U << order; - page = alloc_pages((order_mask - order_size) ? - gfp | __GFP_NORETRY : gfp, order); + if (order_mask > order_size) + alloc_flags |= __GFP_NORETRY; + page = alloc_pages_node(nid, alloc_flags, order); if (!page) continue; if (!order) @@ -491,7 +522,7 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count, void iommu_dma_free(struct device *dev, struct page **pages, size_t size, dma_addr_t *handle) { - __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size); + __iommu_dma_unmap(iommu_get_dma_domain(dev), *handle, size); __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT); *handle = IOMMU_MAPPING_ERROR; } @@ -518,7 +549,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, unsigned long attrs, int prot, dma_addr_t *handle, void (*flush_page)(struct device *, const void *, phys_addr_t)) { - struct iommu_domain *domain = iommu_get_domain_for_dev(dev); + struct iommu_domain *domain = iommu_get_dma_domain(dev); struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iova_domain *iovad = &cookie->iovad; struct page **pages; @@ -539,7 +570,8 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, alloc_sizes = min_size; count = PAGE_ALIGN(size) >> PAGE_SHIFT; - pages = __iommu_dma_alloc_pages(count, alloc_sizes >> PAGE_SHIFT, gfp); + pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT, + gfp); if (!pages) return NULL; @@ -606,9 +638,8 @@ int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma) } static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, - size_t size, int prot) + size_t size, int prot, struct iommu_domain *domain) { - struct iommu_domain *domain = iommu_get_domain_for_dev(dev); struct iommu_dma_cookie *cookie = domain->iova_cookie; size_t iova_off = 0; dma_addr_t iova; @@ -632,13 +663,14 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, int prot) { - return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot); + return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot, + iommu_get_dma_domain(dev)); } void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir, unsigned long attrs) { - __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size); + __iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size); } /* @@ -675,7 +707,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, * - and wouldn't make the resulting output segment too long */ if (cur_len && !s_iova_off && (dma_addr & seg_mask) && - (cur_len + s_length <= max_len)) { + (max_len - cur_len >= s_length)) { /* ...then concatenate it with the previous one */ cur_len += s_length; } else { @@ -726,7 +758,7 @@ static void __invalidate_sg(struct scatterlist *sg, int nents) int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, int prot) { - struct iommu_domain *domain = iommu_get_domain_for_dev(dev); + struct iommu_domain *domain = iommu_get_dma_domain(dev); struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iova_domain *iovad = &cookie->iovad; struct scatterlist *s, *prev = NULL; @@ -811,20 +843,21 @@ void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, sg = tmp; } end = sg_dma_address(sg) + sg_dma_len(sg); - __iommu_dma_unmap(iommu_get_domain_for_dev(dev), start, end - start); + __iommu_dma_unmap(iommu_get_dma_domain(dev), start, end - start); } dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, size_t size, enum dma_data_direction dir, unsigned long attrs) { return __iommu_dma_map(dev, phys, size, - dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO); + dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO, + iommu_get_dma_domain(dev)); } void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir, unsigned long attrs) { - __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size); + __iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size); } int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) @@ -850,7 +883,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, if (!msi_page) return NULL; - iova = __iommu_dma_map(dev, msi_addr, size, prot); + iova = __iommu_dma_map(dev, msi_addr, size, prot, domain); if (iommu_dma_mapping_error(dev, iova)) goto out_free_page; diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index d9c748b6f9e452bf521626d4c1174af8694558bf..6d608f71867ce02a1e2c3f107cb8241862b0f3bb 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -144,7 +144,7 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event) for (tmp = dev; tmp; tmp = tmp->bus->self) level++; - size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path); + size = sizeof(*info) + level * sizeof(info->path[0]); if (size <= sizeof(dmar_pci_notify_info_buf)) { info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf; } else { @@ -218,7 +218,7 @@ static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus, } /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */ -int dmar_insert_dev_scope(struct dmar_pci_notify_info *info, +int dmar_pci_insert_dev_scope(struct dmar_pci_notify_info *info, void *start, void*end, u16 segment, struct dmar_dev_scope *devices, int devices_cnt) @@ -307,7 +307,7 @@ static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info) drhd = container_of(dmaru->hdr, struct acpi_dmar_hardware_unit, header); - ret = dmar_insert_dev_scope(info, (void *)(drhd + 1), + ret = dmar_pci_insert_dev_scope(info, (void *)(drhd + 1), ((void *)drhd) + drhd->header.length, dmaru->segment, dmaru->devices, dmaru->devices_cnt); @@ -698,47 +698,58 @@ dmar_find_matched_drhd_unit(struct pci_dev *dev) return dmaru; } -static void __init dmar_acpi_insert_dev_scope(u8 device_number, - struct acpi_device *adev) +/* Return: > 0 if match found, 0 if no match found */ +bool dmar_acpi_insert_dev_scope(u8 device_number, + struct acpi_device *adev, + void *start, void *end, + struct dmar_dev_scope *devices, + int devices_cnt) { - struct dmar_drhd_unit *dmaru; - struct acpi_dmar_hardware_unit *drhd; struct acpi_dmar_device_scope *scope; struct device *tmp; int i; struct acpi_dmar_pci_path *path; + for (; start < end; start += scope->length) { + scope = start; + if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE) + continue; + if (scope->enumeration_id != device_number) + continue; + path = (void *)(scope + 1); + for_each_dev_scope(devices, devices_cnt, i, tmp) + if (tmp == NULL) { + devices[i].bus = scope->bus; + devices[i].devfn = PCI_DEVFN(path->device, path->function); + rcu_assign_pointer(devices[i].dev, + get_device(&adev->dev)); + return true; + } + WARN_ON(i >= devices_cnt); + } + return false; +} + +static int dmar_acpi_bus_add_dev(u8 device_number, struct acpi_device *adev) +{ + struct dmar_drhd_unit *dmaru; + struct acpi_dmar_hardware_unit *drhd; + int ret; + for_each_drhd_unit(dmaru) { drhd = container_of(dmaru->hdr, struct acpi_dmar_hardware_unit, header); - for (scope = (void *)(drhd + 1); - (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length; - scope = ((void *)scope) + scope->length) { - if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE) - continue; - if (scope->enumeration_id != device_number) - continue; - - path = (void *)(scope + 1); - pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n", - dev_name(&adev->dev), dmaru->reg_base_addr, - scope->bus, path->device, path->function); - for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp) - if (tmp == NULL) { - dmaru->devices[i].bus = scope->bus; - dmaru->devices[i].devfn = PCI_DEVFN(path->device, - path->function); - rcu_assign_pointer(dmaru->devices[i].dev, - get_device(&adev->dev)); - return; - } - BUG_ON(i >= dmaru->devices_cnt); - } + ret = dmar_acpi_insert_dev_scope(device_number, adev, (void *)(drhd+1), + ((void *)drhd)+drhd->header.length, + dmaru->devices, dmaru->devices_cnt); + if (ret) + break; } - pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n", - device_number, dev_name(&adev->dev)); + if (ret > 0) + ret = dmar_rmrr_add_acpi_dev(device_number, adev); + return ret; } static int __init dmar_acpi_dev_scope_init(void) @@ -767,7 +778,7 @@ static int __init dmar_acpi_dev_scope_init(void) andd->device_name); continue; } - dmar_acpi_insert_dev_scope(andd->device_number, adev); + dmar_acpi_bus_add_dev(andd->device_number, adev); } } return 0; @@ -795,6 +806,7 @@ int __init dmar_dev_scope_init(void) info = dmar_alloc_pci_notify_info(dev, BUS_NOTIFY_ADD_DEVICE); if (!info) { + pci_dev_put(dev); return dmar_dev_scope_status; } else { dmar_pci_bus_add_dev(info); @@ -1094,13 +1106,15 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) err = iommu_device_register(&iommu->iommu); if (err) - goto err_unmap; + goto err_sysfs; } drhd->iommu = iommu; return 0; +err_sysfs: + iommu_device_sysfs_remove(&iommu->iommu); err_unmap: unmap_iommu(iommu); error_free_seq_id: diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index bedc801b06a0bf2c6745511acbab08e769a54eb2..fb63abcf6592784e63924d069e14b1e56bb07f63 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -387,7 +387,6 @@ struct dmar_rmrr_unit { u64 end_address; /* reserved end address */ struct dmar_dev_scope *devices; /* target devices */ int devices_cnt; /* target device count */ - struct iommu_resv_region *resv; /* reserved region handle */ }; struct dmar_atsr_unit { @@ -827,12 +826,39 @@ static int iommu_dummy(struct device *dev) return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; } +/** + * is_downstream_to_pci_bridge - test if a device belongs to the PCI + * sub-hierarchy of a candidate PCI-PCI bridge + * @dev: candidate PCI device belonging to @bridge PCI sub-hierarchy + * @bridge: the candidate PCI-PCI bridge + * + * Return: true if @dev belongs to @bridge PCI sub-hierarchy, else false. + */ +static bool +is_downstream_to_pci_bridge(struct device *dev, struct device *bridge) +{ + struct pci_dev *pdev, *pbridge; + + if (!dev_is_pci(dev) || !dev_is_pci(bridge)) + return false; + + pdev = to_pci_dev(dev); + pbridge = to_pci_dev(bridge); + + if (pbridge->subordinate && + pbridge->subordinate->number <= pdev->bus->number && + pbridge->subordinate->busn_res.end >= pdev->bus->number) + return true; + + return false; +} + static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn) { struct dmar_drhd_unit *drhd = NULL; struct intel_iommu *iommu; struct device *tmp; - struct pci_dev *ptmp, *pdev = NULL; + struct pci_dev *pdev = NULL; u16 segment = 0; int i; @@ -878,13 +904,7 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf goto out; } - if (!pdev || !dev_is_pci(tmp)) - continue; - - ptmp = to_pci_dev(tmp); - if (ptmp->subordinate && - ptmp->subordinate->number <= pdev->bus->number && - ptmp->subordinate->busn_res.end >= pdev->bus->number) + if (is_downstream_to_pci_bridge(dev, tmp)) goto got_pdev; } @@ -1624,6 +1644,9 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) u32 pmen; unsigned long flags; + if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap)) + return; + raw_spin_lock_irqsave(&iommu->register_lock, flags); pmen = readl(iommu->reg + DMAR_PMEN_REG); pmen &= ~DMA_PMEN_EPM; @@ -1926,7 +1949,7 @@ static inline int guestwidth_to_adjustwidth(int gaw) static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu, int guest_width) { - int adjust_width, agaw; + int adjust_width, agaw, cap_width; unsigned long sagaw; int err; @@ -1940,8 +1963,9 @@ static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu, domain_reserve_special_ranges(domain); /* calculate AGAW */ - if (guest_width > cap_mgaw(iommu->cap)) - guest_width = cap_mgaw(iommu->cap); + cap_width = min_t(int, cap_mgaw(iommu->cap), agaw_to_width(iommu->agaw)); + if (guest_width > cap_width) + guest_width = cap_width; domain->gaw = guest_width; adjust_width = guestwidth_to_adjustwidth(guest_width); agaw = width_to_agaw(adjust_width); @@ -2069,7 +2093,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, * than default. Unnecessary for PT mode. */ if (translation != CONTEXT_TT_PASS_THROUGH) { - for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) { + for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { ret = -ENOMEM; pgd = phys_to_virt(dma_pte_addr(pgd)); if (!dma_pte_present(pgd)) @@ -2083,7 +2107,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, translation = CONTEXT_TT_MULTI_LEVEL; context_set_address_root(context, virt_to_phys(pgd)); - context_set_address_width(context, iommu->agaw); + context_set_address_width(context, agaw); } else { /* * In pass through mode, AW must be programmed to @@ -2733,6 +2757,27 @@ static int domain_prepare_identity_map(struct device *dev, return iommu_domain_identity_map(domain, start, end); } +static struct device *acpi_dev_find_pci_dev(struct device *dev) +{ + struct acpi_device_physical_node *pn; + struct acpi_device *adev; + + if (dev->bus == &acpi_bus_type) { + adev = to_acpi_device(dev); + + mutex_lock(&adev->physical_node_lock); + list_for_each_entry(pn, &adev->physical_node_list, node) { + if (dev_is_pci(pn->dev)) { + mutex_unlock(&adev->physical_node_lock); + return pn->dev; + } + } + mutex_unlock(&adev->physical_node_lock); + } + + return dev; +} + static int iommu_prepare_identity_map(struct device *dev, unsigned long long start, unsigned long long end) @@ -2740,6 +2785,8 @@ static int iommu_prepare_identity_map(struct device *dev, struct dmar_domain *domain; int ret; + dev = acpi_dev_find_pci_dev(dev); + domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH); if (!domain) return -ENOMEM; @@ -2865,7 +2912,8 @@ static bool device_has_rmrr(struct device *dev) */ for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt, i, tmp) - if (tmp == dev) { + if (tmp == dev || + is_downstream_to_pci_bridge(dev, tmp)) { rcu_read_unlock(); return true; } @@ -3100,7 +3148,7 @@ static int copy_context_table(struct intel_iommu *iommu, } if (old_ce) - iounmap(old_ce); + memunmap(old_ce); ret = 0; if (devfn < 0x80) @@ -3384,9 +3432,12 @@ static int __init init_dmars(void) iommu_identity_mapping |= IDENTMAP_ALL; #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA - iommu_identity_mapping |= IDENTMAP_GFX; + dmar_map_gfx = 0; #endif + if (!dmar_map_gfx) + iommu_identity_mapping |= IDENTMAP_GFX; + check_tylersburg_isoch(); if (iommu_identity_mapping) { @@ -3468,7 +3519,13 @@ static int __init init_dmars(void) #ifdef CONFIG_INTEL_IOMMU_SVM if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) { + /* + * Call dmar_alloc_hwirq() with dmar_global_lock held, + * could cause possible lock race condition. + */ + up_write(&dmar_global_lock); ret = intel_svm_enable_prq(iommu); + down_write(&dmar_global_lock); if (ret) goto free_iommu; } @@ -3718,7 +3775,7 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size) freelist = domain_unmap(domain, start_pfn, last_pfn); - if (intel_iommu_strict) { + if (intel_iommu_strict || !has_iova_flush_queue(&domain->iovad)) { iommu_flush_iotlb_psi(iommu, domain, start_pfn, nrpages, !freelist, 0); /* free iova */ @@ -4030,9 +4087,7 @@ static void __init init_no_remapping_devices(void) /* This IOMMU has *only* gfx devices. Either bypass it or set the gfx_mapped flag, as appropriate */ - if (dmar_map_gfx) { - intel_iommu_gfx_mapped = 1; - } else { + if (!dmar_map_gfx) { drhd->ignored = 1; for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) @@ -4181,7 +4236,6 @@ static inline void init_iommu_pm_ops(void) {} int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg) { struct acpi_dmar_reserved_memory *rmrr; - int prot = DMA_PTE_READ|DMA_PTE_WRITE; struct dmar_rmrr_unit *rmrru; size_t length; @@ -4195,22 +4249,16 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg) rmrru->end_address = rmrr->end_address; length = rmrr->end_address - rmrr->base_address + 1; - rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot, - IOMMU_RESV_DIRECT); - if (!rmrru->resv) - goto free_rmrru; rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1), ((void *)rmrr) + rmrr->header.length, &rmrru->devices_cnt); if (rmrru->devices_cnt && rmrru->devices == NULL) - goto free_all; + goto free_rmrru; list_add(&rmrru->list, &dmar_rmrr_units); return 0; -free_all: - kfree(rmrru->resv); free_rmrru: kfree(rmrru); out: @@ -4428,7 +4476,6 @@ static void intel_iommu_free_dmars(void) list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) { list_del(&rmrru->list); dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt); - kfree(rmrru->resv); kfree(rmrru); } @@ -4482,6 +4529,26 @@ int dmar_find_matched_atsr_unit(struct pci_dev *dev) return ret; } +int dmar_rmrr_add_acpi_dev(u8 device_number, struct acpi_device *adev) +{ + int ret; + struct dmar_rmrr_unit *rmrru; + struct acpi_dmar_reserved_memory *rmrr; + + list_for_each_entry(rmrru, &dmar_rmrr_units, list) { + rmrr = container_of(rmrru->hdr, + struct acpi_dmar_reserved_memory, + header); + ret = dmar_acpi_insert_dev_scope(device_number, adev, (void *)(rmrr + 1), + ((void *)rmrr) + rmrr->header.length, + rmrru->devices, rmrru->devices_cnt); + if (ret) + break; + } + pr_info("Add acpi_dev:%s to rmrru->devices\n", dev_name(&adev->dev)); + return 0; +} + int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) { int ret = 0; @@ -4497,7 +4564,7 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) rmrr = container_of(rmrru->hdr, struct acpi_dmar_reserved_memory, header); if (info->event == BUS_NOTIFY_ADD_DEVICE) { - ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1), + ret = dmar_pci_insert_dev_scope(info, (void *)(rmrr + 1), ((void *)rmrr) + rmrr->header.length, rmrr->segment, rmrru->devices, rmrru->devices_cnt); @@ -4515,7 +4582,7 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); if (info->event == BUS_NOTIFY_ADD_DEVICE) { - ret = dmar_insert_dev_scope(info, (void *)(atsr + 1), + ret = dmar_pci_insert_dev_scope(info, (void *)(atsr + 1), (void *)atsr + atsr->header.length, atsr->segment, atsru->devices, atsru->devices_cnt); @@ -4548,16 +4615,19 @@ static int device_notifier(struct notifier_block *nb, if (iommu_dummy(dev)) return 0; - if (action != BUS_NOTIFY_REMOVED_DEVICE) - return 0; - - domain = find_domain(dev); - if (!domain) - return 0; + if (action == BUS_NOTIFY_REMOVED_DEVICE) { + domain = find_domain(dev); + if (!domain) + return 0; - dmar_remove_one_dev_info(domain, dev); - if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices)) - domain_exit(domain); + dmar_remove_one_dev_info(domain, dev); + if (!domain_type_is_vm_or_si(domain) && + list_empty(&domain->devices)) + domain_exit(domain); + } else if (action == BUS_NOTIFY_ADD_DEVICE) { + if (iommu_should_identity_map(dev, 1)) + domain_add_dev_info(si_domain, dev); + } return 0; } @@ -4828,6 +4898,9 @@ int __init intel_iommu_init(void) goto out_free_reserved_range; } + if (dmar_map_gfx) + intel_iommu_gfx_mapped = 1; + init_no_remapping_devices(); ret = init_dmars(); @@ -5166,6 +5239,7 @@ static int intel_iommu_add_device(struct device *dev) struct intel_iommu *iommu; struct iommu_group *group; u8 bus, devfn; + int ret; iommu = device_to_iommu(dev, &bus, &devfn); if (!iommu) @@ -5175,11 +5249,17 @@ static int intel_iommu_add_device(struct device *dev) group = iommu_group_get_for_dev(dev); - if (IS_ERR(group)) - return PTR_ERR(group); + if (IS_ERR(group)) { + ret = PTR_ERR(group); + goto unlink; + } iommu_group_put(group); return 0; + +unlink: + iommu_device_unlink(&iommu->iommu, dev); + return ret; } static void intel_iommu_remove_device(struct device *dev) @@ -5199,22 +5279,34 @@ static void intel_iommu_remove_device(struct device *dev) static void intel_iommu_get_resv_regions(struct device *device, struct list_head *head) { + int prot = DMA_PTE_READ | DMA_PTE_WRITE; struct iommu_resv_region *reg; struct dmar_rmrr_unit *rmrr; struct device *i_dev; int i; - rcu_read_lock(); + down_read(&dmar_global_lock); for_each_rmrr_units(rmrr) { for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt, i, i_dev) { - if (i_dev != device) + struct iommu_resv_region *resv; + size_t length; + + if (i_dev != device && + !is_downstream_to_pci_bridge(device, i_dev)) continue; - list_add_tail(&rmrr->resv->list, head); + length = rmrr->end_address - rmrr->base_address + 1; + resv = iommu_alloc_resv_region(rmrr->base_address, + length, prot, + IOMMU_RESV_DIRECT); + if (!resv) + break; + + list_add_tail(&resv->list, head); } } - rcu_read_unlock(); + up_read(&dmar_global_lock); reg = iommu_alloc_resv_region(IOAPIC_RANGE_START, IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1, @@ -5229,10 +5321,8 @@ static void intel_iommu_put_resv_regions(struct device *dev, { struct iommu_resv_region *entry, *next; - list_for_each_entry_safe(entry, next, head, list) { - if (entry->type == IOMMU_RESV_RESERVED) - kfree(entry); - } + list_for_each_entry_safe(entry, next, head, list) + kfree(entry); } #ifdef CONFIG_INTEL_IOMMU_SVM diff --git a/drivers/iommu/intel-pasid.c b/drivers/iommu/intel-pasid.c index fe95c9bd4d338396ef9a6b2a3cc547253c04260b..1a82ee1c6e3d90bced9c86170ffc5159a169e2f4 100644 --- a/drivers/iommu/intel-pasid.c +++ b/drivers/iommu/intel-pasid.c @@ -149,8 +149,10 @@ int intel_pasid_alloc_table(struct device *dev) pages = alloc_pages_node(info->iommu->node, GFP_ATOMIC | __GFP_ZERO, order); - if (!pages) + if (!pages) { + kfree(pasid_table); return -ENOMEM; + } pasid_table->table = page_address(pages); pasid_table->order = order; diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index 4a03e50909520e5eb40d6245fa3e1affda769780..5d284c00dd27ba394d4fb61f3f0625b63042bcb8 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -293,7 +293,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ int pasid_max; int ret; - if (!iommu) + if (!iommu || dmar_disabled) return -EINVAL; if (dev_is_pci(dev)) { @@ -596,7 +596,7 @@ static irqreturn_t prq_event_thread(int irq, void *d) pr_err("%s: Page request without PASID: %08llx %08llx\n", iommu->name, ((unsigned long long *)req)[0], ((unsigned long long *)req)[1]); - goto bad_req; + goto no_pasid; } if (!svm || svm->pasid != req->pasid) { @@ -620,14 +620,15 @@ static irqreturn_t prq_event_thread(int irq, void *d) * any faults on kernel addresses. */ if (!svm->mm) goto bad_req; - /* If the mm is already defunct, don't handle faults. */ - if (!mmget_not_zero(svm->mm)) - goto bad_req; /* If address is not canonical, return invalid response */ if (!is_canonical_address(address)) goto bad_req; + /* If the mm is already defunct, don't handle faults. */ + if (!mmget_not_zero(svm->mm)) + goto bad_req; + down_read(&svm->mm->mmap_sem); vma = find_extend_vma(svm->mm, address); if (!vma || address < vma->vm_start) diff --git a/drivers/iommu/io-pgfault.c b/drivers/iommu/io-pgfault.c new file mode 100644 index 0000000000000000000000000000000000000000..1029a22441bb0e70631fb75fc18f9a451c907e0e --- /dev/null +++ b/drivers/iommu/io-pgfault.c @@ -0,0 +1,464 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Handle device page faults + * + * Copyright (C) 2018 ARM Ltd. + */ + +#include +#include +#include +#include +#include + +/** + * struct iopf_queue - IO Page Fault queue + * @wq: the fault workqueue + * @flush: low-level flush callback + * @flush_arg: flush() argument + * @refs: references to this structure taken by producers + */ +struct iopf_queue { + struct workqueue_struct *wq; + iopf_queue_flush_t flush; + void *flush_arg; + refcount_t refs; +}; + +/** + * struct iopf_device_param - IO Page Fault data attached to a device + * @queue: IOPF queue + * @partial: faults that are part of a Page Request Group for which the last + * request hasn't been submitted yet. + */ +struct iopf_device_param { + struct iopf_queue *queue; + struct list_head partial; +}; + +struct iopf_context { + struct device *dev; + struct iommu_fault_event evt; + struct list_head head; +}; + +struct iopf_group { + struct iopf_context last_fault; + struct list_head faults; + struct work_struct work; +}; + +int enable_iopf_hipri __read_mostly; + +static int iopf_complete(struct device *dev, struct iommu_fault_event *evt, + enum page_response_code status) +{ + struct page_response_msg resp = { + .addr = evt->addr, + .pasid = evt->pasid, + .pasid_present = evt->pasid_valid, + .page_req_group_id = evt->page_req_group_id, + .private_data = evt->iommu_private, + .resp_code = status, + }; + + return iommu_page_response(dev, &resp); +} + +static enum page_response_code +iopf_handle_single(struct iopf_context *fault) +{ + int ret; + struct mm_struct *mm; + struct vm_area_struct *vma; + unsigned int access_flags = 0; + unsigned int fault_flags = FAULT_FLAG_REMOTE; + struct iommu_fault_event *evt = &fault->evt; + enum page_response_code status = IOMMU_PAGE_RESP_INVALID; + + if (!evt->pasid_valid) + return status; + + mm = iommu_sva_find(evt->pasid); + if (!mm) + return status; + + down_read(&mm->mmap_sem); + + vma = find_extend_vma(mm, evt->addr); + if (!vma) + /* Unmapped area */ + goto out_put_mm; + + if (evt->prot & IOMMU_FAULT_READ) + access_flags |= VM_READ; + + if (evt->prot & IOMMU_FAULT_WRITE) { + access_flags |= VM_WRITE; + fault_flags |= FAULT_FLAG_WRITE; + } + + if (evt->prot & IOMMU_FAULT_EXEC) { + access_flags |= VM_EXEC; + fault_flags |= FAULT_FLAG_INSTRUCTION; + } + + if (!(evt->prot & IOMMU_FAULT_PRIV)) + fault_flags |= FAULT_FLAG_USER; + + if (access_flags & ~vma->vm_flags) + /* Access fault */ + goto out_put_mm; + + ret = handle_mm_fault(vma, evt->addr, fault_flags); + status = ret & VM_FAULT_ERROR ? IOMMU_PAGE_RESP_INVALID : + IOMMU_PAGE_RESP_SUCCESS; + +out_put_mm: + up_read(&mm->mmap_sem); + + /* + * If the process exits while we're handling the fault on its mm, we + * can't do mmput(). exit_mmap() would release the MMU notifier, calling + * iommu_notifier_release(), which has to flush the fault queue that + * we're executing on... So mmput_async() moves the release of the mm to + * another thread, if we're the last user. + */ + mmput_async(mm); + + return status; +} + +static void iopf_handle_group(struct work_struct *work) +{ + struct iopf_group *group; + struct iopf_context *fault, *next; + enum page_response_code status = IOMMU_PAGE_RESP_SUCCESS; + + group = container_of(work, struct iopf_group, work); + + list_for_each_entry_safe(fault, next, &group->faults, head) { + struct iommu_fault_event *evt = &fault->evt; + /* + * Errors are sticky: don't handle subsequent faults in the + * group if there is an error. + */ + if (status == IOMMU_PAGE_RESP_SUCCESS) + status = iopf_handle_single(fault); + + if (!evt->last_req) + kfree(fault); + } + + iopf_complete(group->last_fault.dev, &group->last_fault.evt, status); + kfree(group); +} + +/** + * iommu_queue_iopf - IO Page Fault handler + * @evt: fault event + * @cookie: struct device, passed to iommu_register_device_fault_handler. + * + * Add a fault to the device workqueue, to be handled by mm. + * + * This module doesn't handle PCI PASID Stop Marker; IOMMU drivers must discard + * them before reporting faults. A PASID Stop Marker (LRW = 0b100) doesn't + * expect a response. It may be generated when disabling a PASID (issuing a + * PASID stop request) by some PCI devices. + * + * The PASID stop request is triggered by the mm_exit() callback. When the + * callback returns from the device driver, no page request is generated for + * this PASID anymore and outstanding ones have been pushed to the IOMMU (as per + * PCIe 4.0r1.0 - 6.20.1 and 10.4.1.2 - Managing PASID TLP Prefix Usage). Some + * PCI devices will wait for all outstanding page requests to come back with a + * response before completing the PASID stop request. Others do not wait for + * page responses, and instead issue this Stop Marker that tells us when the + * PASID can be reallocated. + * + * It is safe to discard the Stop Marker because it is an optimization. + * a. Page requests, which are posted requests, have been flushed to the IOMMU + * when mm_exit() returns, + * b. We flush all fault queues after mm_exit() returns and before freeing the + * PASID. + * + * So even though the Stop Marker might be issued by the device *after* the stop + * request completes, outstanding faults will have been dealt with by the time + * we free the PASID. + */ +int iommu_queue_iopf(struct iommu_fault_event *evt, void *cookie) +{ + struct iopf_group *group; + struct iopf_context *fault, *next; + struct iopf_device_param *iopf_param; + + struct device *dev = cookie; + struct iommu_param *param = dev->iommu_param; + + if (WARN_ON(!mutex_is_locked(¶m->lock))) + return -EINVAL; + + if (evt->type != IOMMU_FAULT_PAGE_REQ) + /* Not a recoverable page fault */ + return IOMMU_PAGE_RESP_CONTINUE; + + /* + * As long as we're holding param->lock, the queue can't be unlinked + * from the device and therefore cannot disappear. + */ + iopf_param = param->iopf_param; + if (!iopf_param) + return -ENODEV; + + if (!evt->last_req) { + fault = kzalloc(sizeof(*fault), GFP_KERNEL); + if (!fault) + return -ENOMEM; + + fault->evt = *evt; + fault->dev = dev; + + /* Non-last request of a group. Postpone until the last one */ + list_add(&fault->head, &iopf_param->partial); + + return IOMMU_PAGE_RESP_HANDLED; + } + + group = kzalloc(sizeof(*group), GFP_KERNEL); + if (!group) + return -ENOMEM; + + group->last_fault.evt = *evt; + group->last_fault.dev = dev; + INIT_LIST_HEAD(&group->faults); + list_add(&group->last_fault.head, &group->faults); + INIT_WORK(&group->work, iopf_handle_group); + + /* See if we have partial faults for this group */ + list_for_each_entry_safe(fault, next, &iopf_param->partial, head) { + if (fault->evt.page_req_group_id == evt->page_req_group_id) + /* Insert *before* the last fault */ + list_move(&fault->head, &group->faults); + } + + queue_work(iopf_param->queue->wq, &group->work); + + /* Postpone the fault completion */ + return IOMMU_PAGE_RESP_HANDLED; +} +EXPORT_SYMBOL_GPL(iommu_queue_iopf); + +/** + * iopf_queue_flush_dev - Ensure that all queued faults have been processed + * @dev: the endpoint whose faults need to be flushed. + * + * Users must call this function when releasing a PASID, to ensure that all + * pending faults for this PASID have been handled, and won't hit the address + * space of the next process that uses this PASID. + * + * Return 0 on success. + */ +int iopf_queue_flush_dev(struct device *dev) +{ + int ret = 0; + struct iopf_queue *queue; + struct iommu_param *param = dev->iommu_param; + + if (!param) + return -ENODEV; + + /* + * It is incredibly easy to find ourselves in a deadlock situation if + * we're not careful, because we're taking the opposite path as + * iommu_queue_iopf: + * + * iopf_queue_flush_dev() | PRI queue handler + * lock(mutex) | iommu_queue_iopf() + * queue->flush() | lock(mutex) + * wait PRI queue empty | + * + * So we can't hold the device param lock while flushing. We don't have + * to, because the queue or the device won't disappear until all flush + * are finished. + */ + mutex_lock(¶m->lock); + if (param->iopf_param) + queue = param->iopf_param->queue; + else + ret = -ENODEV; + mutex_unlock(¶m->lock); + if (ret) + return ret; + + queue->flush(queue->flush_arg, dev); + + /* + * No need to clear the partial list. All PRGs containing the PASID that + * needs to be decommissioned are whole (the device driver made sure of + * it before this function was called). They have been submitted to the + * queue by the above flush(). + */ + flush_workqueue(queue->wq); + + return 0; +} +EXPORT_SYMBOL_GPL(iopf_queue_flush_dev); + +/** + * iopf_queue_add_device - Add producer to the fault queue + * @queue: IOPF queue + * @dev: device to add + */ +int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev) +{ + int ret = -EINVAL; + struct iopf_device_param *iopf_param; + struct iommu_param *param = dev->iommu_param; + + if (!param) + return -ENODEV; + + iopf_param = kzalloc(sizeof(*iopf_param), GFP_KERNEL); + if (!iopf_param) + return -ENOMEM; + + INIT_LIST_HEAD(&iopf_param->partial); + iopf_param->queue = queue; + + mutex_lock(¶m->lock); + if (!param->iopf_param) { + refcount_inc(&queue->refs); + param->iopf_param = iopf_param; + ret = 0; + } + mutex_unlock(¶m->lock); + + if (ret) + kfree(iopf_param); + + return ret; +} +EXPORT_SYMBOL_GPL(iopf_queue_add_device); + +/** + * iopf_queue_remove_device - Remove producer from fault queue + * @dev: device to remove + * + * Caller makes sure that no more fault is reported for this device, and no more + * flush is scheduled for this device. + * + * Note: safe to call unconditionally on a cleanup path, even if the device + * isn't registered to any IOPF queue. + * + * Return 0 if the device was attached to the IOPF queue + */ +int iopf_queue_remove_device(struct device *dev) +{ + struct iopf_context *fault, *next; + struct iopf_device_param *iopf_param; + struct iommu_param *param = dev->iommu_param; + + if (!param) + return -EINVAL; + + mutex_lock(¶m->lock); + iopf_param = param->iopf_param; + if (iopf_param) { + refcount_dec(&iopf_param->queue->refs); + param->iopf_param = NULL; + } + mutex_unlock(¶m->lock); + if (!iopf_param) + return -EINVAL; + + list_for_each_entry_safe(fault, next, &iopf_param->partial, head) + kfree(fault); + + /* + * No more flush is scheduled, and the caller removed all bonds from + * this device. unbind() waited until any concurrent mm_exit() finished, + * therefore there is no flush() running anymore and we can free the + * param. + */ + kfree(iopf_param); + + return 0; +} +EXPORT_SYMBOL_GPL(iopf_queue_remove_device); + +/** + * iopf_queue_alloc - Allocate and initialize a fault queue + * @name: a unique string identifying the queue (for workqueue) + * @flush: a callback that flushes the low-level queue + * @cookie: driver-private data passed to the flush callback + * + * The callback is called before the workqueue is flushed. The IOMMU driver must + * commit all faults that are pending in its low-level queues at the time of the + * call, into the IOPF queue (with iommu_report_device_fault). The callback + * takes a device pointer as argument, hinting what endpoint is causing the + * flush. When the device is NULL, all faults should be committed. + */ +struct iopf_queue * +iopf_queue_alloc(const char *name, iopf_queue_flush_t flush, void *cookie) +{ + struct iopf_queue *queue; + unsigned int type = WQ_UNBOUND; + + queue = kzalloc(sizeof(*queue), GFP_KERNEL); + if (!queue) + return NULL; + + /* + * The WQ is unordered because the low-level handler enqueues faults by + * group. PRI requests within a group have to be ordered, but once + * that's dealt with, the high-level function can handle groups out of + * order. + */ + if (enable_iopf_hipri) + type = WQ_HIGHPRI; + + queue->wq = alloc_workqueue("iopf_queue/%s", type, 0, name); + if (!queue->wq) { + kfree(queue); + return NULL; + } + + queue->flush = flush; + queue->flush_arg = cookie; + refcount_set(&queue->refs, 1); + + return queue; +} +EXPORT_SYMBOL_GPL(iopf_queue_alloc); + +/** + * iopf_queue_free - Free IOPF queue + * @queue: queue to free + * + * Counterpart to iopf_queue_alloc(). Caller must make sure that all producers + * have been removed. + */ +void iopf_queue_free(struct iopf_queue *queue) +{ + + /* Caller should have removed all producers first */ + if (WARN_ON(!refcount_dec_and_test(&queue->refs))) + return; + + destroy_workqueue(queue->wq); + kfree(queue); +} +EXPORT_SYMBOL_GPL(iopf_queue_free); + +#ifdef CONFIG_ASCEND_IOPF_HIPRI + +static int __init ascend_enable_iopf_hipri(char *s) +{ + enable_iopf_hipri = 1; + + pr_info("Ascend enable iopf workqueue highpri\n"); + + return 1; +} +__setup("enable_iopf_hipri", ascend_enable_iopf_hipri); + +#endif diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index b5948ba6b3b369edf0096127302e986cf7a439bc..f1f781498c37e595c91e04592514fcdae0d9e001 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include @@ -45,8 +46,6 @@ #include -#include "io-pgtable.h" - /* Struct accessors */ #define io_pgtable_to_data(x) \ container_of((x), struct arm_v7s_io_pgtable, iop) @@ -161,6 +160,14 @@ #define ARM_V7S_TCR_PD1 BIT(5) +#ifdef CONFIG_ZONE_DMA32 +#define ARM_V7S_TABLE_GFP_DMA GFP_DMA32 +#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA32 +#else +#define ARM_V7S_TABLE_GFP_DMA GFP_DMA +#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA +#endif + typedef u32 arm_v7s_iopte; static bool selftest_running; @@ -198,14 +205,17 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp, void *table = NULL; if (lvl == 1) - table = (void *)__get_dma_pages(__GFP_ZERO, get_order(size)); + table = (void *)__get_free_pages( + __GFP_ZERO | ARM_V7S_TABLE_GFP_DMA, get_order(size)); else if (lvl == 2) - table = kmem_cache_zalloc(data->l2_tables, gfp | GFP_DMA); + table = kmem_cache_zalloc(data->l2_tables, gfp); phys = virt_to_phys(table); - if (phys != (arm_v7s_iopte)phys) + if (phys != (arm_v7s_iopte)phys) { /* Doesn't fit in PTE */ + dev_err(dev, "Page table does not fit in PTE: %pa", &phys); goto out_free; - if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) { + } + if (table && !cfg->coherent_walk) { dma = dma_map_single(dev, table, size, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma)) goto out_free; @@ -217,7 +227,8 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp, if (dma != phys) goto out_unmap; } - kmemleak_ignore(table); + if (lvl == 2) + kmemleak_ignore(table); return table; out_unmap: @@ -238,7 +249,7 @@ static void __arm_v7s_free_table(void *table, int lvl, struct device *dev = cfg->iommu_dev; size_t size = ARM_V7S_TABLE_SIZE(lvl); - if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) + if (!cfg->coherent_walk) dma_unmap_single(dev, __arm_v7s_dma_addr(table), size, DMA_TO_DEVICE); if (lvl == 1) @@ -250,7 +261,7 @@ static void __arm_v7s_free_table(void *table, int lvl, static void __arm_v7s_pte_sync(arm_v7s_iopte *ptep, int num_entries, struct io_pgtable_cfg *cfg) { - if (cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) + if (cfg->coherent_walk) return; dma_sync_single_for_device(cfg->iommu_dev, __arm_v7s_dma_addr(ptep), @@ -642,6 +653,13 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, io_pgtable_tlb_sync(iop); ptep = iopte_deref(pte[i], lvl); __arm_v7s_free_table(ptep, lvl + 1, data); + } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) { + /* + * Order the PTE update against queueing the IOVA, to + * guarantee that a flush callback from a different CPU + * has observed it before the TLBIALL can be issued. + */ + smp_wmb(); } else { io_pgtable_tlb_add_flush(iop, iova, blk_size, blk_size, true); @@ -701,10 +719,6 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg, { struct arm_v7s_io_pgtable *data; -#ifdef PHYS_OFFSET - if (upper_32_bits(PHYS_OFFSET)) - return NULL; -#endif if (cfg->ias > ARM_V7S_ADDR_BITS || cfg->oas > ARM_V7S_ADDR_BITS) return NULL; @@ -712,7 +726,7 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg, IO_PGTABLE_QUIRK_NO_PERMS | IO_PGTABLE_QUIRK_TLBI_ON_MAP | IO_PGTABLE_QUIRK_ARM_MTK_4GB | - IO_PGTABLE_QUIRK_NO_DMA)) + IO_PGTABLE_QUIRK_NON_STRICT)) return NULL; /* If ARM_MTK_4GB is enabled, the NO_PERMS is also expected. */ @@ -728,7 +742,7 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg, data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2", ARM_V7S_TABLE_SIZE(2), ARM_V7S_TABLE_SIZE(2), - SLAB_CACHE_DMA, NULL); + ARM_V7S_TABLE_SLAB_FLAGS, NULL); if (!data->l2_tables) goto out_free_data; @@ -811,7 +825,7 @@ static void dummy_tlb_sync(void *cookie) WARN_ON(cookie != cfg_cookie); } -static const struct iommu_gather_ops dummy_tlb_ops = { +static const struct iommu_flush_ops dummy_tlb_ops = { .tlb_flush_all = dummy_tlb_flush_all, .tlb_add_flush = dummy_tlb_add_flush, .tlb_sync = dummy_tlb_sync, @@ -830,7 +844,8 @@ static int __init arm_v7s_do_selftests(void) .tlb = &dummy_tlb_ops, .oas = 32, .ias = 32, - .quirks = IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA, + .coherent_walk = true, + .quirks = IO_PGTABLE_QUIRK_ARM_NS, .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M, }; unsigned int iova, size, iova_start; diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 88641b4560bc8e87117bdbf1d8b5b1d2117f8d30..3f99077b3611a7567d66e6ed42f3ad5ef8821d7e 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -22,6 +22,7 @@ #include #include +#include #include #include #include @@ -31,7 +32,7 @@ #include -#include "io-pgtable.h" +#include "io-pgtable-arm.h" #define ARM_LPAE_MAX_ADDR_BITS 52 #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16 @@ -121,54 +122,6 @@ #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2) /* Register bits */ -#define ARM_32_LPAE_TCR_EAE (1 << 31) -#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31) - -#define ARM_LPAE_TCR_EPD1 (1 << 23) - -#define ARM_LPAE_TCR_TG0_4K (0 << 14) -#define ARM_LPAE_TCR_TG0_64K (1 << 14) -#define ARM_LPAE_TCR_TG0_16K (2 << 14) - -#define ARM_LPAE_TCR_SH0_SHIFT 12 -#define ARM_LPAE_TCR_SH0_MASK 0x3 -#define ARM_LPAE_TCR_SH_NS 0 -#define ARM_LPAE_TCR_SH_OS 2 -#define ARM_LPAE_TCR_SH_IS 3 - -#define ARM_LPAE_TCR_ORGN0_SHIFT 10 -#define ARM_LPAE_TCR_IRGN0_SHIFT 8 -#define ARM_LPAE_TCR_RGN_MASK 0x3 -#define ARM_LPAE_TCR_RGN_NC 0 -#define ARM_LPAE_TCR_RGN_WBWA 1 -#define ARM_LPAE_TCR_RGN_WT 2 -#define ARM_LPAE_TCR_RGN_WB 3 - -#define ARM_LPAE_TCR_SL0_SHIFT 6 -#define ARM_LPAE_TCR_SL0_MASK 0x3 - -#define ARM_LPAE_TCR_T0SZ_SHIFT 0 -#define ARM_LPAE_TCR_SZ_MASK 0xf - -#define ARM_LPAE_TCR_PS_SHIFT 16 -#define ARM_LPAE_TCR_PS_MASK 0x7 - -#define ARM_LPAE_TCR_IPS_SHIFT 32 -#define ARM_LPAE_TCR_IPS_MASK 0x7 - -#define ARM_LPAE_TCR_PS_32_BIT 0x0ULL -#define ARM_LPAE_TCR_PS_36_BIT 0x1ULL -#define ARM_LPAE_TCR_PS_40_BIT 0x2ULL -#define ARM_LPAE_TCR_PS_42_BIT 0x3ULL -#define ARM_LPAE_TCR_PS_44_BIT 0x4ULL -#define ARM_LPAE_TCR_PS_48_BIT 0x5ULL -#define ARM_LPAE_TCR_PS_52_BIT 0x6ULL - -#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3) -#define ARM_LPAE_MAIR_ATTR_MASK 0xff -#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04 -#define ARM_LPAE_MAIR_ATTR_NC 0x44 -#define ARM_LPAE_MAIR_ATTR_WBRWA 0xff #define ARM_LPAE_MAIR_ATTR_IDX_NC 0 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1 #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2 @@ -243,7 +196,7 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, return NULL; pages = page_address(p); - if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) { + if (!cfg->coherent_walk) { dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma)) goto out_free; @@ -269,7 +222,7 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, static void __arm_lpae_free_pages(void *pages, size_t size, struct io_pgtable_cfg *cfg) { - if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) + if (!cfg->coherent_walk) dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages), size, DMA_TO_DEVICE); free_pages((unsigned long)pages, get_order(size)); @@ -287,7 +240,7 @@ static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte, { *ptep = pte; - if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) + if (!cfg->coherent_walk) __arm_lpae_sync_pte(ptep, cfg); } @@ -346,11 +299,12 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table, arm_lpae_iopte *ptep, arm_lpae_iopte curr, - struct io_pgtable_cfg *cfg) + struct arm_lpae_io_pgtable *data) { arm_lpae_iopte old, new; + struct io_pgtable_cfg *cfg = &data->iop.cfg; - new = __pa(table) | ARM_LPAE_PTE_TYPE_TABLE; + new = paddr_to_iopte(__pa(table), data) | ARM_LPAE_PTE_TYPE_TABLE; if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) new |= ARM_LPAE_PTE_NSTABLE; @@ -363,8 +317,7 @@ static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table, old = cmpxchg64_relaxed(ptep, curr, new); - if ((cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) || - (old & ARM_LPAE_PTE_SW_SYNC)) + if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC)) return old; /* Even if it's not ours, there's no point waiting; just kick it */ @@ -402,11 +355,10 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, if (!cptep) return -ENOMEM; - pte = arm_lpae_install_table(cptep, ptep, 0, cfg); + pte = arm_lpae_install_table(cptep, ptep, 0, data); if (pte) __arm_lpae_free_pages(cptep, tblsz, cfg); - } else if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) && - !(pte & ARM_LPAE_PTE_SW_SYNC)) { + } else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) { __arm_lpae_sync_pte(ptep, cfg); } @@ -562,7 +514,7 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, __arm_lpae_init_pte(data, blk_paddr, pte, lvl, &tablep[i]); } - pte = arm_lpae_install_table(tablep, ptep, blk_pte, cfg); + pte = arm_lpae_install_table(tablep, ptep, blk_pte, data); if (pte != blk_pte) { __arm_lpae_free_pages(tablep, tablesz, cfg); /* @@ -574,13 +526,12 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, return 0; tablep = iopte_deref(pte, data); + } else if (unmap_idx >= 0) { + io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true); + return size; } - if (unmap_idx < 0) - return __arm_lpae_unmap(data, iova, size, lvl, tablep); - - io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true); - return size; + return __arm_lpae_unmap(data, iova, size, lvl, tablep); } static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, @@ -610,6 +561,13 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, io_pgtable_tlb_sync(iop); ptep = iopte_deref(pte, data); __arm_lpae_free_pgtable(data, lvl + 1, ptep); + } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) { + /* + * Order the PTE update against queueing the IOVA, to + * guarantee that a flush callback from a different CPU + * has observed it before the TLBIALL can be issued. + */ + smp_wmb(); } else { io_pgtable_tlb_add_flush(iop, iova, size, size, true); } @@ -772,7 +730,8 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) u64 reg; struct arm_lpae_io_pgtable *data; - if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA)) + if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | + IO_PGTABLE_QUIRK_NON_STRICT)) return NULL; data = arm_lpae_alloc_pgtable(cfg); @@ -864,7 +823,7 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) struct arm_lpae_io_pgtable *data; /* The NS quirk doesn't apply at stage 2 */ - if (cfg->quirks & ~IO_PGTABLE_QUIRK_NO_DMA) + if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NON_STRICT)) return NULL; data = arm_lpae_alloc_pgtable(cfg); @@ -1028,7 +987,7 @@ static void dummy_tlb_sync(void *cookie) WARN_ON(cookie != cfg_cookie); } -static const struct iommu_gather_ops dummy_tlb_ops __initconst = { +static const struct iommu_flush_ops dummy_tlb_ops __initconst = { .tlb_flush_all = dummy_tlb_flush_all, .tlb_add_flush = dummy_tlb_add_flush, .tlb_sync = dummy_tlb_sync, @@ -1168,7 +1127,7 @@ static int __init arm_lpae_do_selftests(void) struct io_pgtable_cfg cfg = { .tlb = &dummy_tlb_ops, .oas = 48, - .quirks = IO_PGTABLE_QUIRK_NO_DMA, + .coherent_walk = true, }; for (i = 0; i < ARRAY_SIZE(pgsize); ++i) { diff --git a/drivers/iommu/io-pgtable-arm.h b/drivers/iommu/io-pgtable-arm.h new file mode 100644 index 0000000000000000000000000000000000000000..e35ba4666214256afa2e4dc6f38741ae2871994c --- /dev/null +++ b/drivers/iommu/io-pgtable-arm.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __IO_PGTABLE_ARM_H +#define __IO_PGTABLE_ARM_H + +#define ARM_32_LPAE_TCR_EAE (1 << 31) +#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31) + +#define ARM_LPAE_TCR_EPD1 (1 << 23) + +#define ARM_LPAE_TCR_TG0_4K (0 << 14) +#define ARM_LPAE_TCR_TG0_64K (1 << 14) +#define ARM_LPAE_TCR_TG0_16K (2 << 14) + +#define ARM_LPAE_TCR_SH0_SHIFT 12 +#define ARM_LPAE_TCR_SH0_MASK 0x3 +#define ARM_LPAE_TCR_SH_NS 0 +#define ARM_LPAE_TCR_SH_OS 2 +#define ARM_LPAE_TCR_SH_IS 3 + +#define ARM_LPAE_TCR_ORGN0_SHIFT 10 +#define ARM_LPAE_TCR_IRGN0_SHIFT 8 +#define ARM_LPAE_TCR_RGN_MASK 0x3 +#define ARM_LPAE_TCR_RGN_NC 0 +#define ARM_LPAE_TCR_RGN_WBWA 1 +#define ARM_LPAE_TCR_RGN_WT 2 +#define ARM_LPAE_TCR_RGN_WB 3 + +#define ARM_LPAE_TCR_SL0_SHIFT 6 +#define ARM_LPAE_TCR_SL0_MASK 0x3 + +#define ARM_LPAE_TCR_T0SZ_SHIFT 0 +#define ARM_LPAE_TCR_SZ_MASK 0x3f + +#define ARM_LPAE_TCR_PS_SHIFT 16 +#define ARM_LPAE_TCR_PS_MASK 0x7 + +#define ARM_LPAE_TCR_IPS_SHIFT 32 +#define ARM_LPAE_TCR_IPS_MASK 0x7 + +#define ARM_LPAE_TCR_PS_32_BIT 0x0ULL +#define ARM_LPAE_TCR_PS_36_BIT 0x1ULL +#define ARM_LPAE_TCR_PS_40_BIT 0x2ULL +#define ARM_LPAE_TCR_PS_42_BIT 0x3ULL +#define ARM_LPAE_TCR_PS_44_BIT 0x4ULL +#define ARM_LPAE_TCR_PS_48_BIT 0x5ULL +#define ARM_LPAE_TCR_PS_52_BIT 0x6ULL + +#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3) +#define ARM_LPAE_MAIR_ATTR_MASK 0xff +#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04 +#define ARM_LPAE_MAIR_ATTR_NC 0x44 +#define ARM_LPAE_MAIR_ATTR_WBRWA 0xff + +#endif /* __IO_PGTABLE_ARM_H */ diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c index 127558d83667966b4056fc34ad98de88c87e217b..93f2880be6c67ccede1e19f45025379fdd4859cf 100644 --- a/drivers/iommu/io-pgtable.c +++ b/drivers/iommu/io-pgtable.c @@ -19,11 +19,10 @@ */ #include +#include #include #include -#include "io-pgtable.h" - static const struct io_pgtable_init_fns * io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = { #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE @@ -61,6 +60,7 @@ struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt, return &iop->ops; } +EXPORT_SYMBOL_GPL(alloc_io_pgtable_ops); /* * It is the IOMMU driver's responsibility to ensure that the page table @@ -77,3 +77,4 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops) io_pgtable_tlb_flush_all(iop); io_pgtable_init_table[iop->fmt]->free(iop); } +EXPORT_SYMBOL_GPL(free_io_pgtable_ops); diff --git a/drivers/iommu/iommu-debugfs.c b/drivers/iommu/iommu-debugfs.c index 3b1bf88fd1b0494a819e5ed2eb93bac89f539189..f0354894209648fdd18a36701d3d0bc377ee0230 100644 --- a/drivers/iommu/iommu-debugfs.c +++ b/drivers/iommu/iommu-debugfs.c @@ -12,6 +12,7 @@ #include struct dentry *iommu_debugfs_dir; +EXPORT_SYMBOL_GPL(iommu_debugfs_dir); /** * iommu_debugfs_setup - create the top-level iommu directory in debugfs @@ -23,9 +24,9 @@ struct dentry *iommu_debugfs_dir; * Emit a strong warning at boot time to indicate that this feature is * enabled. * - * This function is called from iommu_init; drivers may then call - * iommu_debugfs_new_driver_dir() to instantiate a vendor-specific - * directory to be used to expose internal data. + * This function is called from iommu_init; drivers may then use + * iommu_debugfs_dir to instantiate a vendor-specific directory to be used + * to expose internal data. */ void iommu_debugfs_setup(void) { @@ -48,19 +49,3 @@ void iommu_debugfs_setup(void) pr_warn("*************************************************************\n"); } } - -/** - * iommu_debugfs_new_driver_dir - create a vendor directory under debugfs/iommu - * @vendor: name of the vendor-specific subdirectory to create - * - * This function is called by an IOMMU driver to create the top-level debugfs - * directory for that driver. - * - * Return: upon success, a pointer to the dentry for the new directory. - * NULL in case of failure. - */ -struct dentry *iommu_debugfs_new_driver_dir(const char *vendor) -{ - return debugfs_create_dir(vendor, iommu_debugfs_dir); -} -EXPORT_SYMBOL_GPL(iommu_debugfs_new_driver_dir); diff --git a/drivers/iommu/iommu-pasid-table.c b/drivers/iommu/iommu-pasid-table.c new file mode 100644 index 0000000000000000000000000000000000000000..2b6a8a5857718a26230a142a0c26bdefe801aa1a --- /dev/null +++ b/drivers/iommu/iommu-pasid-table.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PASID table management for the IOMMU + * + * Copyright (C) 2018 ARM Ltd. + */ + +#include + +#include "iommu-pasid-table.h" + +static const struct iommu_pasid_init_fns * +pasid_table_init_fns[PASID_TABLE_NUM_FMTS] = { + [PASID_TABLE_ARM_SMMU_V3] = &arm_smmu_v3_pasid_init_fns, +}; + +struct iommu_pasid_table_ops * +iommu_alloc_pasid_ops(enum iommu_pasid_table_fmt fmt, + struct iommu_pasid_table_cfg *cfg, void *cookie) +{ + struct iommu_pasid_table *table; + const struct iommu_pasid_init_fns *fns; + + if (fmt >= PASID_TABLE_NUM_FMTS) + return NULL; + + fns = pasid_table_init_fns[fmt]; + if (!fns) + return NULL; + + table = fns->alloc(cfg, cookie); + if (!table) + return NULL; + + table->fmt = fmt; + table->cookie = cookie; + table->cfg = *cfg; + + return &table->ops; +} + +void iommu_free_pasid_ops(struct iommu_pasid_table_ops *ops) +{ + struct iommu_pasid_table *table; + + if (!ops) + return; + + table = container_of(ops, struct iommu_pasid_table, ops); + iommu_pasid_flush_all(table); + pasid_table_init_fns[table->fmt]->free(table); +} diff --git a/drivers/iommu/iommu-pasid-table.h b/drivers/iommu/iommu-pasid-table.h new file mode 100644 index 0000000000000000000000000000000000000000..1401e00383183f9bd107d2e666b454180f3f0515 --- /dev/null +++ b/drivers/iommu/iommu-pasid-table.h @@ -0,0 +1,177 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * PASID table management for the IOMMU + * + * Copyright (C) 2018 ARM Ltd. + */ +#ifndef __IOMMU_PASID_TABLE_H +#define __IOMMU_PASID_TABLE_H + +#include +#include +#include "linux/io-pgtable.h" + +struct mm_struct; + +enum iommu_pasid_table_fmt { + PASID_TABLE_ARM_SMMU_V3, + PASID_TABLE_NUM_FMTS, +}; + +/** + * iommu_pasid_entry - Entry of a PASID table + * + * @tag: architecture-specific data needed to uniquely identify the entry. Most + * notably used for TLB invalidation + * @release: function that frees the entry and its content. PASID entries may be + * freed well after the PASID table ops are released, and may be shared between + * different PASID tables, so the release method has to be standalone. + */ +struct iommu_pasid_entry { + u64 tag; + void (*release)(struct iommu_pasid_entry *); +}; + +/** + * iommu_pasid_table_ops - Operations on a PASID table + * + * @alloc_shared_entry: allocate an entry for sharing an mm (SVA). Returns the + * pointer to a new entry or an error. + * @alloc_priv_entry: allocate an entry for map/unmap operations. Returns the + * pointer to a new entry or an error. + * @set_entry: write PASID table entry + * @clear_entry: clear PASID table entry + */ +struct iommu_pasid_table_ops { + struct iommu_pasid_entry * + (*alloc_shared_entry)(struct iommu_pasid_table_ops *ops, + struct mm_struct *mm); + struct iommu_pasid_entry * + (*alloc_priv_entry)(struct iommu_pasid_table_ops *ops, + enum io_pgtable_fmt fmt, + struct io_pgtable_cfg *cfg); + int (*set_entry)(struct iommu_pasid_table_ops *ops, int pasid, + struct iommu_pasid_entry *entry); + void (*clear_entry)(struct iommu_pasid_table_ops *ops, int pasid, + struct iommu_pasid_entry *entry); +}; + +/** + * iommu_pasid_sync_ops - Callbacks into the IOMMU driver + * + * @cfg_flush: flush cached configuration for one entry. For a multi-level PASID + * table, 'leaf' tells whether to only flush cached leaf entries or intermediate + * levels as well. + * @cfg_flush_all: flush cached configuration for all entries of the PASID table + * @tlb_flush: flush TLB entries for one entry + */ +struct iommu_pasid_sync_ops { + void (*cfg_flush)(void *cookie, int pasid, bool leaf); + void (*cfg_flush_all)(void *cookie); + void (*tlb_flush)(void *cookie, int pasid, + struct iommu_pasid_entry *entry); +}; + +/** + * arm_smmu_context_cfg - PASID table configuration for ARM SMMU v3 + * + * SMMU properties: + * @stall: devices attached to the domain are allowed to stall. + * @asid_bits: number of ASID bits supported by the SMMU + * @hw_dirty: hardware may update dirty flag + * @hw_access: hardware may update access flag + * + * @s1fmt: PASID table format, chosen by the allocator. + */ +struct arm_smmu_context_cfg { + u8 stall:1; + u8 asid_bits; + u8 hw_dirty:1; + u8 hw_access:1; + +#define ARM_SMMU_S1FMT_LINEAR 0x0 +#define ARM_SMMU_S1FMT_4K_L2 0x1 +#define ARM_SMMU_S1FMT_64K_L2 0x2 + u8 s1fmt; +}; + +/** + * struct iommu_pasid_table_cfg - Configuration data for a set of PASID tables. + * + * @iommu_dev device performing the DMA table walks + * @order: number of PASID bits, set by IOMMU driver + * @flush: TLB management callbacks for this set of tables. + * + * @base: DMA address of the allocated table, set by the allocator. + */ +struct iommu_pasid_table_cfg { + struct device *iommu_dev; + size_t order; + const struct iommu_pasid_sync_ops *sync; + dma_addr_t base; + + /* Low-level data specific to the IOMMU */ + union { + struct arm_smmu_context_cfg arm_smmu; + }; +}; + +struct iommu_pasid_table_ops * +iommu_alloc_pasid_ops(enum iommu_pasid_table_fmt fmt, + struct iommu_pasid_table_cfg *cfg, + void *cookie); +void iommu_free_pasid_ops(struct iommu_pasid_table_ops *ops); + +static inline void iommu_free_pasid_entry(struct iommu_pasid_entry *entry) +{ + if (WARN_ON(!entry->release)) + return; + entry->release(entry); +} + +/** + * struct iommu_pasid_table - describes a set of PASID tables + * + * @fmt: The PASID table format. + * @cookie: An opaque token provided by the IOMMU driver and passed back to any + * callback routine. + * @cfg: A copy of the PASID table configuration. + * @ops: The PASID table operations in use for this set of page tables. + */ +struct iommu_pasid_table { + enum iommu_pasid_table_fmt fmt; + void *cookie; + struct iommu_pasid_table_cfg cfg; + struct iommu_pasid_table_ops ops; +}; + +#define iommu_pasid_table_ops_to_table(ops) \ + container_of((ops), struct iommu_pasid_table, ops) + +struct iommu_pasid_init_fns { + struct iommu_pasid_table *(*alloc)(struct iommu_pasid_table_cfg *cfg, + void *cookie); + void (*free)(struct iommu_pasid_table *table); +}; + +static inline void iommu_pasid_flush_all(struct iommu_pasid_table *table) +{ + table->cfg.sync->cfg_flush_all(table->cookie); +} + +static inline void iommu_pasid_flush(struct iommu_pasid_table *table, + int pasid, bool leaf) +{ + table->cfg.sync->cfg_flush(table->cookie, pasid, leaf); +} + +static inline void iommu_pasid_flush_tlbs(struct iommu_pasid_table *table, + int pasid, + struct iommu_pasid_entry *entry) +{ + table->cfg.sync->tlb_flush(table->cookie, pasid, entry); +} + +extern struct iommu_pasid_init_fns arm_smmu_v3_pasid_init_fns; + +#endif /* __IOMMU_PASID_TABLE_H */ diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c new file mode 100644 index 0000000000000000000000000000000000000000..b666fae6f55c12a284c30d8b010517078c1b39d5 --- /dev/null +++ b/drivers/iommu/iommu-sva.c @@ -0,0 +1,794 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Manage PASIDs and bind process address spaces to devices. + * + * Copyright (C) 2018 ARM Ltd. + */ + +#include +#include +#include +#include +#include +#include + +/** + * DOC: io_mm model + * + * The io_mm keeps track of process address spaces shared between CPU and IOMMU. + * The following example illustrates the relation between structures + * iommu_domain, io_mm and iommu_bond. An iommu_bond is a link between io_mm and + * device. A device can have multiple io_mm and an io_mm may be bound to + * multiple devices. + * ___________________________ + * | IOMMU domain A | + * | ________________ | + * | | IOMMU group | +------- io_pgtables + * | | | | + * | | dev 00:00.0 ----+------- bond --- io_mm X + * | |________________| \ | + * | '----- bond ---. + * |___________________________| \ + * ___________________________ \ + * | IOMMU domain B | io_mm Y + * | ________________ | / / + * | | IOMMU group | | / / + * | | | | / / + * | | dev 00:01.0 ------------ bond -' / + * | | dev 00:01.1 ------------ bond --' + * | |________________| | + * | +------- io_pgtables + * |___________________________| + * + * In this example, device 00:00.0 is in domain A, devices 00:01.* are in domain + * B. All devices within the same domain access the same address spaces. Device + * 00:00.0 accesses address spaces X and Y, each corresponding to an mm_struct. + * Devices 00:01.* only access address space Y. In addition each + * IOMMU_DOMAIN_DMA domain has a private address space, io_pgtable, that is + * managed with iommu_map()/iommu_unmap(), and isn't shared with the CPU MMU. + * + * To obtain the above configuration, users would for instance issue the + * following calls: + * + * iommu_sva_bind_device(dev 00:00.0, mm X, ...) -> PASID 1 + * iommu_sva_bind_device(dev 00:00.0, mm Y, ...) -> PASID 2 + * iommu_sva_bind_device(dev 00:01.0, mm Y, ...) -> PASID 2 + * iommu_sva_bind_device(dev 00:01.1, mm Y, ...) -> PASID 2 + * + * A single Process Address Space ID (PASID) is allocated for each mm. In the + * example, devices use PASID 1 to read/write into address space X and PASID 2 + * to read/write into address space Y. + * + * Hardware tables describing this configuration in the IOMMU would typically + * look like this: + * + * PASID tables + * of domain A + * .->+--------+ + * / 0 | |-------> io_pgtable + * / +--------+ + * Device tables / 1 | |-------> pgd X + * +--------+ / +--------+ + * 00:00.0 | A |-' 2 | |--. + * +--------+ +--------+ \ + * : : 3 | | \ + * +--------+ +--------+ --> pgd Y + * 00:01.0 | B |--. / + * +--------+ \ | + * 00:01.1 | B |----+ PASID tables | + * +--------+ \ of domain B | + * '->+--------+ | + * 0 | |-- | --> io_pgtable + * +--------+ | + * 1 | | | + * +--------+ | + * 2 | |---' + * +--------+ + * 3 | | + * +--------+ + * + * With this model, a single call binds all devices in a given domain to an + * address space. Other devices in the domain will get the same bond implicitly. + * However, users must issue one bind() for each device, because IOMMUs may + * implement SVA differently. Furthermore, mandating one bind() per device + * allows the driver to perform sanity-checks on device capabilities. + * + * On Arm and AMD IOMMUs, entry 0 of the PASID table can be used to hold + * non-PASID translations. In this case PASID 0 is reserved and entry 0 points + * to the io_pgtable base. On Intel IOMMU, the io_pgtable base would be held in + * the device table and PASID 0 would be available to the allocator. + */ + +struct iommu_bond { + struct io_mm *io_mm; + struct device *dev; + struct iommu_domain *domain; + + struct list_head mm_head; + struct list_head dev_head; + struct list_head domain_head; + refcount_t refs; + struct wait_queue_head mm_exit_wq; + bool mm_exit_active; + + void *drvdata; +}; + +/* + * Because we're using an IDR, PASIDs are limited to 31 bits (the sign bit is + * used for returning errors). In practice implementations will use at most 20 + * bits, which is the PCI limit. + */ +static DEFINE_IDR(iommu_pasid_idr); + +/* + * For the moment this is an all-purpose lock. It serializes + * access/modifications to bonds, access/modifications to the PASID IDR, and + * changes to io_mm refcount as well. + */ +static DEFINE_SPINLOCK(iommu_sva_lock); + +static struct mmu_notifier_ops iommu_mmu_notifier; + +static struct io_mm * +io_mm_alloc(struct iommu_domain *domain, struct device *dev, + struct mm_struct *mm, unsigned long flags) +{ + int ret; + int pasid; + struct io_mm *io_mm; + struct iommu_sva_param *param = dev->iommu_param->sva_param; + + if (!domain->ops->mm_alloc || !domain->ops->mm_free) + return ERR_PTR(-ENODEV); + + io_mm = domain->ops->mm_alloc(domain, mm, flags); + if (IS_ERR(io_mm)) + return io_mm; + if (!io_mm) + return ERR_PTR(-ENOMEM); + + /* + * The mm must not be freed until after the driver frees the io_mm + * (which may involve unpinning the CPU ASID for instance, requiring a + * valid mm struct.) + */ + mmgrab(mm); + + io_mm->flags = flags; + io_mm->mm = mm; + io_mm->notifier.ops = &iommu_mmu_notifier; + io_mm->release = domain->ops->mm_free; + INIT_LIST_HEAD(&io_mm->devices); + + idr_preload(GFP_KERNEL); + spin_lock(&iommu_sva_lock); + pasid = idr_alloc(&iommu_pasid_idr, io_mm, param->min_pasid, + param->max_pasid + 1, GFP_ATOMIC); + io_mm->pasid = pasid; + spin_unlock(&iommu_sva_lock); + idr_preload_end(); + + if (pasid < 0) { + ret = pasid; + goto err_free_mm; + } + + ret = mmu_notifier_register(&io_mm->notifier, mm); + if (ret) + goto err_free_pasid; + + /* + * Now that the MMU notifier is valid, we can allow users to grab this + * io_mm by setting a valid refcount. Before that it was accessible in + * the IDR but invalid. + * + * The following barrier ensures that users, who obtain the io_mm with + * kref_get_unless_zero, don't read uninitialized fields in the + * structure. + */ + smp_wmb(); + kref_init(&io_mm->kref); + + return io_mm; + +err_free_pasid: + /* + * Even if the io_mm is accessible from the IDR at this point, kref is + * 0 so no user could get a reference to it. Free it manually. + */ + spin_lock(&iommu_sva_lock); + idr_remove(&iommu_pasid_idr, io_mm->pasid); + spin_unlock(&iommu_sva_lock); + +err_free_mm: + domain->ops->mm_free(io_mm); + mmdrop(mm); + + return ERR_PTR(ret); +} + +static void io_mm_free(struct rcu_head *rcu) +{ + struct io_mm *io_mm; + struct mm_struct *mm; + + io_mm = container_of(rcu, struct io_mm, rcu); + mm = io_mm->mm; + + io_mm->release(io_mm); + mmdrop(mm); +} + +static void io_mm_release(struct kref *kref) +{ + struct io_mm *io_mm; + + io_mm = container_of(kref, struct io_mm, kref); + WARN_ON(!list_empty(&io_mm->devices)); + + idr_remove(&iommu_pasid_idr, io_mm->pasid); + + /* + * If we're being released from mm exit, the notifier callback ->release + * has already been called. Otherwise we don't need ->release, the io_mm + * isn't attached to anything anymore. Hence no_release. + */ + mmu_notifier_unregister_no_release(&io_mm->notifier, io_mm->mm); + + /* + * We can't free the structure here, because if mm exits during + * unbind(), then ->release might be attempting to grab the io_mm + * concurrently. And in the other case, if ->release is calling + * io_mm_release, then __mmu_notifier_release expects to still have a + * valid mn when returning. So free the structure when it's safe, after + * the RCU grace period elapsed. + */ + mmu_notifier_call_srcu(&io_mm->rcu, io_mm_free); +} + +/* + * Returns non-zero if a reference to the io_mm was successfully taken. + * Returns zero if the io_mm is being freed and should not be used. + */ +static int io_mm_get_locked(struct io_mm *io_mm) +{ + if (io_mm && kref_get_unless_zero(&io_mm->kref)) { + /* + * kref_get_unless_zero doesn't provide ordering for reads. This + * barrier pairs with the one in io_mm_alloc. + */ + smp_rmb(); + return 1; + } + + return 0; +} + +static void io_mm_put_locked(struct io_mm *io_mm) +{ + kref_put(&io_mm->kref, io_mm_release); +} + +static void io_mm_put(struct io_mm *io_mm) +{ + spin_lock(&iommu_sva_lock); + io_mm_put_locked(io_mm); + spin_unlock(&iommu_sva_lock); +} + +static int io_mm_attach(struct iommu_domain *domain, struct device *dev, + struct io_mm *io_mm, void *drvdata) +{ + int ret; + bool attach_domain = true; + int pasid = io_mm->pasid; + struct iommu_bond *bond, *tmp; + struct iommu_sva_param *param = dev->iommu_param->sva_param; + + if (!domain->ops->mm_attach || !domain->ops->mm_detach || + !domain->ops->mm_invalidate) + return -ENODEV; + + if (pasid > param->max_pasid || pasid < param->min_pasid) + return -ERANGE; + + bond = kzalloc(sizeof(*bond), GFP_KERNEL); + if (!bond) + return -ENOMEM; + + bond->domain = domain; + bond->io_mm = io_mm; + bond->dev = dev; + bond->drvdata = drvdata; + refcount_set(&bond->refs, 1); + init_waitqueue_head(&bond->mm_exit_wq); + + spin_lock(&iommu_sva_lock); + /* + * Check if this io_mm is already bound to the domain. In which case the + * IOMMU driver doesn't have to install the PASID table entry. + */ + list_for_each_entry(tmp, &domain->mm_list, domain_head) { + if (tmp->io_mm == io_mm) { + attach_domain = false; + break; + } + } + + ret = domain->ops->mm_attach(domain, dev, io_mm, attach_domain); + if (ret) { + kfree(bond); + spin_unlock(&iommu_sva_lock); + return ret; + } + + list_add(&bond->mm_head, &io_mm->devices); + list_add(&bond->domain_head, &domain->mm_list); + list_add(&bond->dev_head, ¶m->mm_list); + spin_unlock(&iommu_sva_lock); + + return 0; +} + +static void io_mm_detach_locked(struct iommu_bond *bond, bool wait) +{ + struct iommu_bond *tmp; + bool detach_domain = true; + struct iommu_domain *domain = bond->domain; + + if (wait) { + bool do_detach = true; + /* + * If we're unbind() then we're deleting the bond no matter + * what. Tell the mm_exit thread that we're cleaning up, and + * wait until it finishes using the bond. + * + * refs is guaranteed to be one or more, otherwise it would + * already have been removed from the list. Check is someone is + * already waiting, in which case we wait but do not free. + */ + if (refcount_read(&bond->refs) > 1) + do_detach = false; + + refcount_inc(&bond->refs); + wait_event_lock_irq(bond->mm_exit_wq, !bond->mm_exit_active, + iommu_sva_lock); + if (!do_detach) + return; + + } else if (!refcount_dec_and_test(&bond->refs)) { + /* unbind() is waiting to free the bond */ + return; + } + + list_for_each_entry(tmp, &domain->mm_list, domain_head) { + if (tmp->io_mm == bond->io_mm && tmp->dev != bond->dev) { + detach_domain = false; + break; + } + } + + domain->ops->mm_detach(domain, bond->dev, bond->io_mm, detach_domain); + + list_del(&bond->mm_head); + list_del(&bond->domain_head); + list_del(&bond->dev_head); + io_mm_put_locked(bond->io_mm); + + kfree(bond); +} + +static int iommu_signal_mm_exit(struct iommu_bond *bond) +{ + struct device *dev = bond->dev; + struct io_mm *io_mm = bond->io_mm; + struct iommu_sva_param *param = dev->iommu_param->sva_param; + + /* + * We can't hold the device's param_lock. If we did and the device + * driver used a global lock around io_mm, we would risk getting the + * following deadlock: + * + * exit_mm() | Shutdown SVA + * mutex_lock(param->lock) | mutex_lock(glob lock) + * param->mm_exit() | sva_device_shutdown() + * mutex_lock(glob lock) | mutex_lock(param->lock) + * + * Fortunately unbind() waits for us to finish, and sva_device_shutdown + * requires that any bond is removed, so we can safely access mm_exit + * and drvdata without taking any lock. + */ + if (!param || !param->mm_exit) + return 0; + + return param->mm_exit(dev, io_mm->pasid, bond->drvdata); +} + +/* Called when the mm exits. Can race with unbind(). */ +static void iommu_notifier_release(struct mmu_notifier *mn, + struct mm_struct *mm) +{ + struct iommu_bond *bond, *next; + struct io_mm *io_mm = container_of(mn, struct io_mm, notifier); + + /* + * If the mm is exiting then devices are still bound to the io_mm. + * A few things need to be done before it is safe to release: + * + * - As the mmu notifier doesn't hold any reference to the io_mm when + * calling ->release(), try to take a reference. + * - Tell the device driver to stop using this PASID. + * - Clear the PASID table and invalidate TLBs. + * - Drop all references to this io_mm by freeing the bonds. + */ + spin_lock(&iommu_sva_lock); + if (!io_mm_get_locked(io_mm)) { + /* Someone's already taking care of it. */ + spin_unlock(&iommu_sva_lock); + return; + } + + list_for_each_entry_safe(bond, next, &io_mm->devices, mm_head) { + /* + * Release the lock to let the handler sleep. We need to be + * careful about concurrent modifications to the list and to the + * bond. Tell unbind() not to free the bond until we're done. + */ + bond->mm_exit_active = true; + spin_unlock(&iommu_sva_lock); + + if (iommu_signal_mm_exit(bond)) + dev_WARN(bond->dev, "possible leak of PASID %u", + io_mm->pasid); + + iopf_queue_flush_dev(bond->dev); + + spin_lock(&iommu_sva_lock); + next = list_next_entry(bond, mm_head); + + /* If someone is waiting, let them delete the bond now */ + bond->mm_exit_active = false; + wake_up_all(&bond->mm_exit_wq); + + /* Otherwise, do it ourselves */ + io_mm_detach_locked(bond, false); + } + spin_unlock(&iommu_sva_lock); + + /* + * We're now reasonably certain that no more fault is being handled for + * this io_mm, since we just flushed them all out of the fault queue. + * Release the last reference to free the io_mm. + */ + io_mm_put(io_mm); +} + +static void iommu_notifier_invalidate_range(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, + unsigned long end) +{ + struct iommu_bond *bond; + struct io_mm *io_mm = container_of(mn, struct io_mm, notifier); + + spin_lock(&iommu_sva_lock); + list_for_each_entry(bond, &io_mm->devices, mm_head) { + struct iommu_domain *domain = bond->domain; + + domain->ops->mm_invalidate(domain, bond->dev, io_mm, start, + end - start); + } + spin_unlock(&iommu_sva_lock); +} + +static int iommu_notifier_clear_flush_young(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, + unsigned long end) +{ + iommu_notifier_invalidate_range(mn, mm, start, end); + return 0; +} + +static void iommu_notifier_change_pte(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long address, pte_t pte) +{ + iommu_notifier_invalidate_range(mn, mm, address, address + PAGE_SIZE); +} + +static struct mmu_notifier_ops iommu_mmu_notifier = { + .release = iommu_notifier_release, + .clear_flush_young = iommu_notifier_clear_flush_young, + .change_pte = iommu_notifier_change_pte, + .invalidate_range = iommu_notifier_invalidate_range, +}; + +/** + * iommu_sva_device_init() - Initialize Shared Virtual Addressing for a device + * @dev: the device + * @features: bitmask of features that need to be initialized + * @max_pasid: max PASID value supported by the device + * @mm_exit: callback to notify the device driver of an mm exiting + * + * Users of the bind()/unbind() API must call this function to initialize all + * features required for SVA. + * + * The device must support multiple address spaces (e.g. PCI PASID). By default + * the PASID allocated during bind() is limited by the IOMMU capacity, and by + * the device PASID width defined in the PCI capability or in the firmware + * description. Setting @max_pasid to a non-zero value smaller than this limit + * overrides it. + * + * If the device should support recoverable I/O Page Faults (e.g. PCI PRI), the + * IOMMU_SVA_FEAT_IOPF feature must be requested. + * + * If the driver intends to share process address spaces, it should pass a valid + * @mm_exit handler. Otherwise @mm_exit can be NULL. After @mm_exit returns, the + * device must not issue any more transaction with the PASID given as argument. + * The handler gets an opaque pointer corresponding to the drvdata passed as + * argument of bind(). + * + * The @mm_exit handler is allowed to sleep. Be careful about the locks taken in + * @mm_exit, because they might lead to deadlocks if they are also held when + * dropping references to the mm. Consider the following call chain: + * mutex_lock(A); mmput(mm) -> exit_mm() -> @mm_exit() -> mutex_lock(A) + * Using mmput_async() prevents this scenario. + * + * The device should not be performing any DMA while this function is running, + * otherwise the behavior is undefined. + * + * Return 0 if initialization succeeded, or an error. + */ +int iommu_sva_device_init(struct device *dev, unsigned long features, + unsigned int max_pasid, + iommu_mm_exit_handler_t mm_exit) +{ + int ret; + struct iommu_sva_param *param; + struct iommu_domain *domain = iommu_get_domain_for_dev(dev); + + if (!domain || !domain->ops->sva_device_init) + return -ENODEV; + + if (features & ~IOMMU_SVA_FEAT_IOPF) + return -EINVAL; + + if (features & IOMMU_SVA_FEAT_IOPF) { + ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, + dev); + if (ret) + return ret; + } + + param = kzalloc(sizeof(*param), GFP_KERNEL); + if (!param) { + ret = -ENOMEM; + goto err_remove_handler; + } + + param->features = features; + param->max_pasid = max_pasid; + param->mm_exit = mm_exit; + INIT_LIST_HEAD(¶m->mm_list); + + /* + * IOMMU driver updates the limits depending on the IOMMU and device + * capabilities. + */ + ret = domain->ops->sva_device_init(dev, param); + if (ret) + goto err_free_param; + + mutex_lock(&dev->iommu_param->lock); + if (dev->iommu_param->sva_param) + ret = -EEXIST; + else + dev->iommu_param->sva_param = param; + mutex_unlock(&dev->iommu_param->lock); + if (ret) + goto err_device_shutdown; + + return 0; + +err_device_shutdown: + if (domain->ops->sva_device_shutdown) + domain->ops->sva_device_shutdown(dev, param); + +err_free_param: + kfree(param); + +err_remove_handler: + iommu_unregister_device_fault_handler(dev); + + return ret; +} +EXPORT_SYMBOL_GPL(iommu_sva_device_init); + +/** + * iommu_sva_device_shutdown() - Shutdown Shared Virtual Addressing for a device + * @dev: the device + * + * Disable SVA. Device driver should ensure that the device isn't performing any + * DMA while this function is running. In addition all faults should have been + * flushed to the IOMMU. + */ +int iommu_sva_device_shutdown(struct device *dev) +{ + struct iommu_sva_param *param; + struct iommu_domain *domain = iommu_get_domain_for_dev(dev); + + if (!domain) + return -ENODEV; + + __iommu_sva_unbind_dev_all(dev); + + mutex_lock(&dev->iommu_param->lock); + param = dev->iommu_param->sva_param; + dev->iommu_param->sva_param = NULL; + mutex_unlock(&dev->iommu_param->lock); + if (!param) + return -ENODEV; + + if (domain->ops->sva_device_shutdown) + domain->ops->sva_device_shutdown(dev, param); + + kfree(param); + + iommu_unregister_device_fault_handler(dev); + + return 0; +} +EXPORT_SYMBOL_GPL(iommu_sva_device_shutdown); + +int __iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, + int *pasid, unsigned long flags, void *drvdata) +{ + int i, ret = 0; + struct io_mm *io_mm = NULL; + struct iommu_domain *domain; + struct iommu_bond *bond = NULL, *tmp; + struct iommu_sva_param *param = dev->iommu_param->sva_param; + + domain = iommu_get_domain_for_dev(dev); + if (!domain) + return -EINVAL; + + /* + * The device driver does not call sva_device_init/shutdown and + * bind/unbind concurrently, so no need to take the param lock. + */ + if (WARN_ON_ONCE(!param) || (flags & ~param->features)) + return -EINVAL; + + /* If an io_mm already exists, use it */ + spin_lock(&iommu_sva_lock); + idr_for_each_entry(&iommu_pasid_idr, io_mm, i) { + if (io_mm->mm == mm && io_mm_get_locked(io_mm)) { + /* ... Unless it's already bound to this device */ + list_for_each_entry(tmp, &io_mm->devices, mm_head) { + if (tmp->dev == dev) { + bond = tmp; + io_mm_put_locked(io_mm); + break; + } + } + break; + } + } + spin_unlock(&iommu_sva_lock); + + if (bond) { + *pasid = bond->io_mm->pasid; + return ret; + } + /* Require identical features within an io_mm for now */ + if (io_mm && (flags != io_mm->flags)) { + io_mm_put(io_mm); + return -EDOM; + } + + if (!io_mm) { + io_mm = io_mm_alloc(domain, dev, mm, flags); + if (IS_ERR(io_mm)) + return PTR_ERR(io_mm); + } + + ret = io_mm_attach(domain, dev, io_mm, drvdata); + if (ret) + io_mm_put(io_mm); + else + *pasid = io_mm->pasid; + + return ret; +} +EXPORT_SYMBOL_GPL(__iommu_sva_bind_device); + +int __iommu_sva_unbind_device(struct device *dev, int pasid) +{ + int ret = -ESRCH; + struct iommu_domain *domain; + struct iommu_bond *bond = NULL; + struct iommu_sva_param *param = dev->iommu_param->sva_param; + + domain = iommu_get_domain_for_dev(dev); + if (!param || WARN_ON(!domain)) + return -EINVAL; + + /* + * Caller stopped the device from issuing PASIDs, now make sure they are + * out of the fault queue. + */ + iopf_queue_flush_dev(dev); + + /* spin_lock_irq matches the one in wait_event_lock_irq */ + spin_lock_irq(&iommu_sva_lock); + list_for_each_entry(bond, ¶m->mm_list, dev_head) { + if (bond->io_mm->pasid == pasid) { + io_mm_detach_locked(bond, true); + ret = 0; + break; + } + } + spin_unlock_irq(&iommu_sva_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(__iommu_sva_unbind_device); + +/** + * __iommu_sva_unbind_dev_all() - Detach all address spaces from this device + * @dev: the device + * + * When detaching @device from a domain, IOMMU drivers should use this helper. + * This function may sleep while waiting for bonds to be released. + */ +void __iommu_sva_unbind_dev_all(struct device *dev) +{ + struct iommu_sva_param *param; + struct iommu_bond *bond, *next; + + iopf_queue_flush_dev(dev); + + /* + * io_mm_detach_locked might wait, so we shouldn't call it with the dev + * param lock held. It's fine to read sva_param outside the lock because + * it can only be freed by iommu_sva_device_shutdown when there are no + * more bonds in the list. + */ + param = dev->iommu_param->sva_param; + if (param) { + spin_lock_irq(&iommu_sva_lock); + list_for_each_entry_safe(bond, next, ¶m->mm_list, dev_head) + io_mm_detach_locked(bond, true); + spin_unlock_irq(&iommu_sva_lock); + } +} +EXPORT_SYMBOL_GPL(__iommu_sva_unbind_dev_all); + +/** + * iommu_sva_find() - Find mm associated to the given PASID + * @pasid: Process Address Space ID assigned to the mm + * + * Returns the mm corresponding to this PASID, or NULL if not found. A reference + * to the mm is taken, and must be released with mmput(). + */ +struct mm_struct *iommu_sva_find(int pasid) +{ + struct io_mm *io_mm; + struct mm_struct *mm = NULL; + + spin_lock(&iommu_sva_lock); + io_mm = idr_find(&iommu_pasid_idr, pasid); + if (io_mm && io_mm_get_locked(io_mm)) { + if (mmget_not_zero(io_mm->mm)) + mm = io_mm->mm; + + io_mm_put_locked(io_mm); + } + spin_unlock(&iommu_sva_lock); + + return mm; +} +EXPORT_SYMBOL_GPL(iommu_sva_find); diff --git a/drivers/iommu/iommu-sysfs.c b/drivers/iommu/iommu-sysfs.c index 36d1a7ce7fc4cc922cade9642b5d64c05e5d8e19..05e430644b6672c51eea2b601e0d8127cc9fa1d8 100644 --- a/drivers/iommu/iommu-sysfs.c +++ b/drivers/iommu/iommu-sysfs.c @@ -90,6 +90,7 @@ int iommu_device_sysfs_add(struct iommu_device *iommu, put_device(iommu->dev); return ret; } +EXPORT_SYMBOL_GPL(iommu_device_sysfs_add); void iommu_device_sysfs_remove(struct iommu_device *iommu) { @@ -97,6 +98,8 @@ void iommu_device_sysfs_remove(struct iommu_device *iommu) device_unregister(iommu->dev); iommu->dev = NULL; } +EXPORT_SYMBOL_GPL(iommu_device_sysfs_remove); + /* * IOMMU drivers can indicate a device is managed by a given IOMMU using * this interface. A link to the device will be created in the "devices" @@ -122,6 +125,7 @@ int iommu_device_link(struct iommu_device *iommu, struct device *link) return ret; } +EXPORT_SYMBOL_GPL(iommu_device_link); void iommu_device_unlink(struct iommu_device *iommu, struct device *link) { @@ -131,3 +135,4 @@ void iommu_device_unlink(struct iommu_device *iommu, struct device *link) sysfs_remove_link(&link->kobj, "iommu"); sysfs_remove_link_from_group(&iommu->dev->kobj, "devices", dev_name(link)); } +EXPORT_SYMBOL_GPL(iommu_device_unlink); diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 8c15c598029934484e26520aa4aa7aa703a04ca3..1c39d1b8a80a8d0d10fc34b911fafb90bbfdcdca 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -41,6 +41,7 @@ static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; #else static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA; #endif +static bool iommu_dma_strict __read_mostly; struct iommu_callback_data { const struct iommu_ops *ops; @@ -58,6 +59,9 @@ struct iommu_group { int id; struct iommu_domain *default_domain; struct iommu_domain *domain; + atomic_t domain_shared_ref; /* Number of user of current domain. + * The domain cannot be modified if ref > 0 + */ }; struct group_device { @@ -100,6 +104,7 @@ int iommu_device_register(struct iommu_device *iommu) return 0; } +EXPORT_SYMBOL_GPL(iommu_device_register); void iommu_device_unregister(struct iommu_device *iommu) { @@ -107,6 +112,7 @@ void iommu_device_unregister(struct iommu_device *iommu) list_del(&iommu->list); spin_unlock(&iommu_device_lock); } +EXPORT_SYMBOL_GPL(iommu_device_unregister); static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, unsigned type); @@ -131,6 +137,12 @@ static int __init iommu_set_def_domain_type(char *str) } early_param("iommu.passthrough", iommu_set_def_domain_type); +static int __init iommu_dma_setup(char *str) +{ + return kstrtobool(str, &iommu_dma_strict); +} +early_param("iommu.strict", iommu_dma_setup); + static ssize_t iommu_group_attr_show(struct kobject *kobj, struct attribute *__attr, char *buf) { @@ -211,18 +223,21 @@ static int iommu_insert_resv_region(struct iommu_resv_region *new, pos = pos->next; } else if ((start >= a) && (end <= b)) { if (new->type == type) - goto done; + return 0; else pos = pos->next; } else { if (new->type == type) { phys_addr_t new_start = min(a, start); phys_addr_t new_end = max(b, end); + int ret; list_del(&entry->list); entry->start = new_start; entry->length = new_end - new_start + 1; - iommu_insert_resv_region(entry, regions); + ret = iommu_insert_resv_region(entry, regions); + kfree(entry); + return ret; } else { pos = pos->next; } @@ -235,7 +250,6 @@ static int iommu_insert_resv_region(struct iommu_resv_region *new, return -ENOMEM; list_add_tail(®ion->list, pos); -done: return 0; } @@ -315,7 +329,7 @@ static ssize_t iommu_group_show_type(struct iommu_group *group, type = "unmanaged\n"; break; case IOMMU_DOMAIN_DMA: - type = "DMA"; + type = "DMA\n"; break; } } @@ -385,12 +399,13 @@ struct iommu_group *iommu_group_alloc(void) return ERR_PTR(ret); } group->id = ret; + atomic_set(&group->domain_shared_ref, 0); ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype, NULL, "%d", group->id); if (ret) { ida_simple_remove(&iommu_group_ida, group->id); - kfree(group); + kobject_put(&group->kobj); return ERR_PTR(ret); } @@ -518,6 +533,26 @@ int iommu_group_set_name(struct iommu_group *group, const char *name) } EXPORT_SYMBOL_GPL(iommu_group_set_name); +struct iommu_domain *iommu_group_share_domain(struct iommu_group *group) +{ + /* the domain can be shared only when the default domain is used */ + /* todo: more shareable check */ + if (group->domain != group->default_domain) + return ERR_PTR(-EINVAL); + + atomic_inc(&group->domain_shared_ref); + return group->domain; +} +EXPORT_SYMBOL_GPL(iommu_group_share_domain); + +struct iommu_domain *iommu_group_unshare_domain(struct iommu_group *group) +{ + atomic_dec(&group->domain_shared_ref); + WARN_ON(atomic_read(&group->domain_shared_ref) < 0); + return group->domain; +} +EXPORT_SYMBOL_GPL(iommu_group_unshare_domain); + static int iommu_group_create_direct_mappings(struct iommu_group *group, struct device *dev) { @@ -618,6 +653,13 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev) goto err_free_name; } + dev->iommu_param = kzalloc(sizeof(*dev->iommu_param), GFP_KERNEL); + if (!dev->iommu_param) { + ret = -ENOMEM; + goto err_free_name; + } + mutex_init(&dev->iommu_param->lock); + kobject_get(group->devices_kobj); dev->iommu_group = group; @@ -648,6 +690,8 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev) mutex_unlock(&group->mutex); dev->iommu_group = NULL; kobject_put(group->devices_kobj); + kfree(dev->iommu_param); + sysfs_remove_link(group->devices_kobj, device->name); err_free_name: kfree(device->name); err_remove_link: @@ -694,7 +738,7 @@ void iommu_group_remove_device(struct device *dev) sysfs_remove_link(&dev->kobj, "iommu_group"); trace_remove_device_from_group(group->id, dev); - + kfree(dev->iommu_param); kfree(device->name); kfree(device); dev->iommu_group = NULL; @@ -783,6 +827,7 @@ struct iommu_group *iommu_group_ref_get(struct iommu_group *group) kobject_get(group->devices_kobj); return group; } +EXPORT_SYMBOL_GPL(iommu_group_ref_get); /** * iommu_group_put - Decrement group reference @@ -828,6 +873,195 @@ int iommu_group_unregister_notifier(struct iommu_group *group, } EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier); +/* Max time to wait for a pending page request */ +#define IOMMU_PAGE_RESPONSE_MAXTIME (HZ * 10) +static void iommu_dev_fault_timer_fn(struct timer_list *t) +{ + struct iommu_fault_param *fparam = from_timer(fparam, t, timer); + struct iommu_fault_event *evt, *iter; + + u64 now; + + now = get_jiffies_64(); + + /* The goal is to ensure driver or guest page fault handler(via vfio) + * send page response on time. Otherwise, limited queue resources + * may be occupied by some irresponsive guests or drivers. + * When per device pending fault list is not empty, + * we periodically checks + * if any anticipated page response time has expired. + * + * TODO: + * We could do the following if response time expires: + * 1. send page response code FAILURE to all pending PRQ + * 2. inform device driver or vfio + * 3. drain in-flight page requests and responses for this device + * 4. clear pending fault list such that driver can unregister fault + * handler(otherwise blocked when pending faults are present). + */ + list_for_each_entry_safe(evt, iter, &fparam->faults, list) { + if (time_after64(evt->expire, now)) + pr_err("Page response time expired!, pasid %d gid %d exp %llu now %llu\n", + evt->pasid, evt->page_req_group_id, + evt->expire, now); + } + mod_timer(t, now + IOMMU_PAGE_RESPONSE_MAXTIME); +} + +/** + * iommu_register_device_fault_handler() - Register a device fault handler + * @dev: the device + * @handler: the fault handler + * @data: private data passed as argument to the handler + * + * When an IOMMU fault event is received, call this handler with the fault event + * and data as argument. The handler should return 0 on success. If the fault is + * recoverable (IOMMU_FAULT_PAGE_REQ), the handler can also complete + * the fault by calling iommu_page_response() with one of the following + * response code: + * - IOMMU_PAGE_RESP_SUCCESS: retry the translation + * - IOMMU_PAGE_RESP_INVALID: terminate the fault + * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting + * page faults if possible. + * + * Return 0 if the fault handler was installed successfully, or an error. + */ +int iommu_register_device_fault_handler(struct device *dev, + iommu_dev_fault_handler_t handler, + void *data) +{ + struct iommu_param *param = dev->iommu_param; + + /* + * Device iommu_param should have been allocated when device is + * added to its iommu_group. + */ + if (!param) + return -EINVAL; + + /* Only allow one fault handler registered for each device */ + if (param->fault_param) + return -EBUSY; + + mutex_lock(¶m->lock); + get_device(dev); + param->fault_param = + kzalloc(sizeof(struct iommu_fault_param), GFP_ATOMIC); + if (!param->fault_param) { + put_device(dev); + mutex_unlock(¶m->lock); + return -ENOMEM; + } + mutex_init(¶m->fault_param->lock); + param->fault_param->handler = handler; + param->fault_param->data = data; + INIT_LIST_HEAD(¶m->fault_param->faults); + + timer_setup(¶m->fault_param->timer, iommu_dev_fault_timer_fn, + TIMER_DEFERRABLE); + + mutex_unlock(¶m->lock); + + return 0; +} +EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler); + +/** + * iommu_unregister_device_fault_handler() - Unregister the device fault handler + * @dev: the device + * + * Remove the device fault handler installed with + * iommu_register_device_fault_handler(). + * + * Return 0 on success, or an error. + */ +int iommu_unregister_device_fault_handler(struct device *dev) +{ + struct iommu_param *param = dev->iommu_param; + int ret = 0; + + if (!param) + return -EINVAL; + + mutex_lock(¶m->lock); + /* we cannot unregister handler if there are pending faults */ + if (!list_empty(¶m->fault_param->faults)) { + ret = -EBUSY; + goto unlock; + } + + list_del(¶m->fault_param->faults); + kfree(param->fault_param); + param->fault_param = NULL; + put_device(dev); + +unlock: + mutex_unlock(¶m->lock); + + return 0; +} +EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler); + + +/** + * iommu_report_device_fault() - Report fault event to device + * @dev: the device + * @evt: fault event data + * + * Called by IOMMU model specific drivers when fault is detected, typically + * in a threaded IRQ handler. + * + * Return 0 on success, or an error. + */ +int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) +{ + int ret = 0; + struct iommu_fault_event *evt_pending; + struct timer_list *tmr; + u64 exp; + struct iommu_fault_param *fparam; + + /* iommu_param is allocated when device is added to group */ + if (!dev->iommu_param | !evt) + return -EINVAL; + /* we only report device fault if there is a handler registered */ + mutex_lock(&dev->iommu_param->lock); + if (!dev->iommu_param->fault_param || + !dev->iommu_param->fault_param->handler) { + ret = -EINVAL; + goto done_unlock; + } + fparam = dev->iommu_param->fault_param; + if (evt->type == IOMMU_FAULT_PAGE_REQ && evt->last_req) { + evt_pending = kzalloc(sizeof(*evt_pending), GFP_ATOMIC); + if (!evt_pending) { + ret = -ENOMEM; + goto done_unlock; + } + memcpy(evt_pending, evt, sizeof(struct iommu_fault_event)); + /* Keep track of response expiration time */ + exp = get_jiffies_64() + IOMMU_PAGE_RESPONSE_MAXTIME; + evt_pending->expire = exp; + + if (list_empty(&fparam->faults)) { + /* First pending event, start timer */ + tmr = &dev->iommu_param->fault_param->timer; + WARN_ON(timer_pending(tmr)); + mod_timer(tmr, exp); + } + + mutex_lock(&fparam->lock); + list_add_tail(&evt_pending->list, &fparam->faults); + mutex_unlock(&fparam->lock); + } + ret = fparam->handler(evt, fparam->data); + trace_dev_fault(dev, evt); +done_unlock: + mutex_unlock(&dev->iommu_param->lock); + return ret; +} +EXPORT_SYMBOL_GPL(iommu_report_device_fault); + /** * iommu_group_id - Return ID for a group * @group: the group to ID @@ -956,6 +1190,7 @@ struct iommu_group *generic_device_group(struct device *dev) { return iommu_group_alloc(); } +EXPORT_SYMBOL_GPL(generic_device_group); /* * Use standard PCI bus topology, isolation features, and DMA alias quirks @@ -1023,6 +1258,7 @@ struct iommu_group *pci_device_group(struct device *dev) /* No shared group found, allocate new */ return iommu_group_alloc(); } +EXPORT_SYMBOL_GPL(pci_device_group); /** * iommu_group_get_for_dev - Find or create the IOMMU group for a device @@ -1039,6 +1275,7 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev) const struct iommu_ops *ops = dev->bus->iommu_ops; struct iommu_group *group; int ret; + unsigned int type = iommu_def_domain_type; group = iommu_group_get(dev); if (group) @@ -1061,17 +1298,33 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev) if (!group->default_domain) { struct iommu_domain *dom; - dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type); - if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) { - dev_warn(dev, - "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA", - iommu_def_domain_type); +#ifdef CONFIG_SMMU_BYPASS_DEV + /* direct allocate required default domain type for some specific devices. */ + if (ops->device_domain_type) { + if (ops->device_domain_type(dev, &type)) + type = iommu_def_domain_type; + } +#endif + dom = __iommu_domain_alloc(dev->bus, type); + if (!dom && type != IOMMU_DOMAIN_DMA) { dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA); + if (dom) { + dev_warn(dev, + "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA", + iommu_def_domain_type); + } } group->default_domain = dom; if (!group->domain) group->domain = dom; + + if (dom && !iommu_dma_strict) { + int attr = 1; + iommu_domain_set_attr(dom, + DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, + &attr); + } } ret = iommu_group_add_device(group, dev); @@ -1082,6 +1335,7 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev) return group; } +EXPORT_SYMBOL_GPL(iommu_group_get_for_dev); struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) { @@ -1233,6 +1487,11 @@ int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops) { int err; + if (ops == NULL) { + bus->iommu_ops = NULL; + return 0; + } + if (bus->iommu_ops != NULL) return -EBUSY; @@ -1301,6 +1560,7 @@ static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, domain->type = type; /* Assume all sizes by default; the driver may override this later */ domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap; + INIT_LIST_HEAD(&domain->mm_list); return domain; } @@ -1362,6 +1622,93 @@ int iommu_attach_device(struct iommu_domain *domain, struct device *dev) } EXPORT_SYMBOL_GPL(iommu_attach_device); +int iommu_bind_pasid_table(struct iommu_domain *domain, struct device *dev, + struct pasid_table_config *pasidt_binfo) +{ + if (unlikely(!domain->ops->bind_pasid_table)) + return -ENODEV; + + return domain->ops->bind_pasid_table(domain, dev, pasidt_binfo); +} +EXPORT_SYMBOL_GPL(iommu_bind_pasid_table); + +void iommu_unbind_pasid_table(struct iommu_domain *domain, struct device *dev) +{ + if (unlikely(!domain->ops->unbind_pasid_table)) + return; + + domain->ops->unbind_pasid_table(domain, dev); +} +EXPORT_SYMBOL_GPL(iommu_unbind_pasid_table); + +int iommu_sva_invalidate(struct iommu_domain *domain, + struct device *dev, struct tlb_invalidate_info *inv_info) +{ + int ret = 0; + + if (unlikely(!domain->ops->sva_invalidate)) + return -ENODEV; + + ret = domain->ops->sva_invalidate(domain, dev, inv_info); + trace_sva_invalidate(dev, inv_info); + + return ret; +} +EXPORT_SYMBOL_GPL(iommu_sva_invalidate); + +int iommu_page_response(struct device *dev, + struct page_response_msg *msg) +{ + struct iommu_param *param = dev->iommu_param; + int ret = -EINVAL; + struct iommu_fault_event *evt, *iter; + struct iommu_domain *domain = iommu_get_domain_for_dev(dev); + + if (!domain || !domain->ops->page_response) + return -ENODEV; + + /* + * Device iommu_param should have been allocated when device is + * added to its iommu_group. + */ + if (!param || !param->fault_param) + return -EINVAL; + + /* Only send response if there is a fault report pending */ + mutex_lock(¶m->fault_param->lock); + if (list_empty(¶m->fault_param->faults)) { + pr_warn("no pending PRQ, drop response\n"); + goto done_unlock; + } + /* + * Check if we have a matching page request pending to respond, + * otherwise return -EINVAL + */ + list_for_each_entry_safe(evt, iter, ¶m->fault_param->faults, list) { + if (evt->pasid == msg->pasid && + msg->page_req_group_id == evt->page_req_group_id) { + msg->private_data = evt->iommu_private; + trace_dev_page_response(dev, msg); + ret = domain->ops->page_response(dev, msg); + list_del(&evt->list); + kfree(evt); + break; + } + } + + /* stop response timer if no more pending request */ + if (list_empty(¶m->fault_param->faults) && + timer_pending(¶m->fault_param->timer)) { + pr_debug("no pending PRQ, stop timer\n"); + del_timer(¶m->fault_param->timer); + } + +done_unlock: + mutex_unlock(¶m->fault_param->lock); + return ret; +} +EXPORT_SYMBOL_GPL(iommu_page_response); + static void __iommu_detach_device(struct iommu_domain *domain, struct device *dev) { @@ -1415,6 +1762,15 @@ struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) } EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev); +/* + * For IOMMU_DOMAIN_DMA implementations which already provide their own + * guarantees that the group and its default domain are valid and correct. + */ +struct iommu_domain *iommu_get_dma_domain(struct device *dev) +{ + return dev->iommu_group->default_domain; +} + /* * IOMMU groups are really the natrual working unit of the IOMMU, but * the IOMMU API works on domains and devices. Bridge that gap by @@ -1437,7 +1793,8 @@ static int __iommu_attach_group(struct iommu_domain *domain, { int ret; - if (group->default_domain && group->domain != group->default_domain) + if ((group->default_domain && group->domain != group->default_domain) || + atomic_read(&group->domain_shared_ref) > 0) return -EBUSY; ret = __iommu_group_for_each_dev(group, domain, @@ -1474,6 +1831,8 @@ static void __iommu_detach_group(struct iommu_domain *domain, { int ret; + WARN_ON(atomic_read(&group->domain_shared_ref) > 0); + if (!group->default_domain) { __iommu_group_for_each_dev(group, domain, iommu_group_do_detach_device); @@ -1888,6 +2247,7 @@ struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, region->type = type; return region; } +EXPORT_SYMBOL_GPL(iommu_alloc_resv_region); /* Request that a device is direct mapped by the IOMMU */ int iommu_request_dm_for_dev(struct device *dev) @@ -1897,9 +2257,9 @@ int iommu_request_dm_for_dev(struct device *dev) int ret; /* Device must already be in a group before calling this function */ - group = iommu_group_get_for_dev(dev); - if (IS_ERR(group)) - return PTR_ERR(group); + group = iommu_group_get(dev); + if (!group) + return -EINVAL; mutex_lock(&group->mutex); @@ -1941,6 +2301,7 @@ int iommu_request_dm_for_dev(struct device *dev) return ret; } +EXPORT_SYMBOL_GPL(iommu_request_dm_for_dev); const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) { @@ -2014,3 +2375,86 @@ int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) return 0; } EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); + +/** + * iommu_sva_bind_device() - Bind a process address space to a device + * @dev: the device + * @mm: the mm to bind, caller must hold a reference to it + * @pasid: valid address where the PASID will be stored + * @flags: bond properties + * @drvdata: private data passed to the mm exit handler + * + * Create a bond between device and task, allowing the device to access the mm + * using the returned PASID. If unbind() isn't called first, a subsequent bind() + * for the same device and mm fails with -EEXIST. + * + * iommu_sva_device_init() must be called first, to initialize the required SVA + * features. @flags is a subset of these features. + * + * If IOMMU_SVA_FEAT_IOPF isn't requested, the caller must pin down using + * get_user_pages*() all mappings shared with the device. mlock() isn't + * sufficient, as it doesn't prevent minor page faults (e.g. copy-on-write). + * + * On success, 0 is returned and @pasid contains a valid ID. Otherwise, an error + * is returned. + */ +int iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, int *pasid, + unsigned long flags, void *drvdata) +{ + int ret = -EINVAL; + struct iommu_group *group; + + if (!pasid) + return -EINVAL; + + group = iommu_group_get(dev); + if (!group) + return -ENODEV; + + /* Ensure device count and domain don't change while we're binding */ + mutex_lock(&group->mutex); + if (iommu_group_device_count(group) != 1) + goto out_unlock; + + ret = __iommu_sva_bind_device(dev, mm, pasid, flags, drvdata); + +out_unlock: + mutex_unlock(&group->mutex); + iommu_group_put(group); + + return ret; +} +EXPORT_SYMBOL_GPL(iommu_sva_bind_device); + +/** + * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device + * @dev: the device + * @pasid: the pasid returned by bind() + * + * Remove bond between device and address space identified by @pasid. Users + * should not call unbind() if the corresponding mm exited (as the PASID might + * have been reallocated for another process). + * + * The device must not be issuing any more transaction for this PASID. All + * outstanding page requests for this PASID must have been flushed to the IOMMU. + * + * Returns 0 on success, or an error value + */ +int iommu_sva_unbind_device(struct device *dev, int pasid) +{ + int ret = -EINVAL; + struct iommu_group *group; + + group = iommu_group_get(dev); + if (!group) + return -ENODEV; + + mutex_lock(&group->mutex); + ret = __iommu_sva_unbind_device(dev, pasid); + mutex_unlock(&group->mutex); + + iommu_group_put(group); + + return ret; +} +EXPORT_SYMBOL_GPL(iommu_sva_unbind_device); diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 83fe2621effe72bc1cbeecd80df4030235d87328..7f18dbeb7b7c29e9bf098335aacd9dac8d44a717 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -56,6 +56,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule, iovad->granule = granule; iovad->start_pfn = start_pfn; iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad)); + iovad->max32_alloc_size = iovad->dma_32bit_pfn; iovad->flush_cb = NULL; iovad->fq = NULL; iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR; @@ -65,14 +66,19 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule, } EXPORT_SYMBOL_GPL(init_iova_domain); +bool has_iova_flush_queue(struct iova_domain *iovad) +{ + return !!iovad->fq; +} + static void free_iova_flush_queue(struct iova_domain *iovad) { - if (!iovad->fq) + if (!has_iova_flush_queue(iovad)) return; - if (timer_pending(&iovad->fq_timer)) - del_timer(&iovad->fq_timer); + del_timer_sync(&iovad->fq_timer); + flush_work(&iovad->free_iova_work); fq_destroy_all_entries(iovad); free_percpu(iovad->fq); @@ -82,16 +88,35 @@ static void free_iova_flush_queue(struct iova_domain *iovad) iovad->entry_dtor = NULL; } +static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq); +static void free_iova_work_func(struct work_struct *work) +{ + struct iova_domain *iovad; + int cpu; + + iovad = container_of(work, struct iova_domain, free_iova_work); + for_each_possible_cpu(cpu) { + unsigned long flags; + struct iova_fq *fq; + + fq = per_cpu_ptr(iovad->fq, cpu); + spin_lock_irqsave(&fq->lock, flags); + fq_ring_free(iovad, fq); + spin_unlock_irqrestore(&fq->lock, flags); + } +} + int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb, iova_entry_dtor entry_dtor) { + struct iova_fq __percpu *queue; int cpu; atomic64_set(&iovad->fq_flush_start_cnt, 0); atomic64_set(&iovad->fq_flush_finish_cnt, 0); - iovad->fq = alloc_percpu(struct iova_fq); - if (!iovad->fq) + queue = alloc_percpu(struct iova_fq); + if (!queue) return -ENOMEM; iovad->flush_cb = flush_cb; @@ -100,13 +125,18 @@ int init_iova_flush_queue(struct iova_domain *iovad, for_each_possible_cpu(cpu) { struct iova_fq *fq; - fq = per_cpu_ptr(iovad->fq, cpu); + fq = per_cpu_ptr(queue, cpu); fq->head = 0; fq->tail = 0; spin_lock_init(&fq->lock); } + smp_wmb(); + + iovad->fq = queue; + + INIT_WORK(&iovad->free_iova_work, free_iova_work_func); timer_setup(&iovad->fq_timer, fq_flush_timeout, 0); atomic_set(&iovad->fq_timer_on, 0); @@ -138,9 +168,12 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) struct iova *cached_iova; cached_iova = rb_entry(iovad->cached32_node, struct iova, node); - if (free->pfn_hi < iovad->dma_32bit_pfn && - free->pfn_lo >= cached_iova->pfn_lo) + if (free == cached_iova || + (free->pfn_hi < iovad->dma_32bit_pfn && + free->pfn_lo >= cached_iova->pfn_lo)) { iovad->cached32_node = rb_next(&free->node); + iovad->max32_alloc_size = iovad->dma_32bit_pfn; + } cached_iova = rb_entry(iovad->cached_node, struct iova, node); if (free->pfn_lo >= cached_iova->pfn_lo) @@ -190,6 +223,10 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad, /* Walk the tree backwards */ spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); + if (limit_pfn <= iovad->dma_32bit_pfn && + size >= iovad->max32_alloc_size) + goto iova32_full; + curr = __get_cached_rbnode(iovad, limit_pfn); curr_iova = rb_entry(curr, struct iova, node); do { @@ -201,8 +238,8 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad, } while (curr && new_pfn <= curr_iova->pfn_hi); if (limit_pfn < size || new_pfn < iovad->start_pfn) { - spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); - return -ENOMEM; + iovad->max32_alloc_size = size; + goto iova32_full; } /* pfn_lo will point to size aligned address if size_aligned is set */ @@ -214,9 +251,11 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad, __cached_rbnode_insert_update(iovad, new); spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); - - return 0; + +iova32_full: + spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); + return -ENOMEM; } static struct kmem_cache *iova_cache; @@ -225,7 +264,7 @@ static DEFINE_MUTEX(iova_cache_mutex); struct iova *alloc_iova_mem(void) { - return kmem_cache_alloc(iova_cache, GFP_ATOMIC); + return kmem_cache_zalloc(iova_cache, GFP_ATOMIC); } EXPORT_SYMBOL(alloc_iova_mem); @@ -522,20 +561,11 @@ static void fq_destroy_all_entries(struct iova_domain *iovad) static void fq_flush_timeout(struct timer_list *t) { struct iova_domain *iovad = from_timer(iovad, t, fq_timer); - int cpu; atomic_set(&iovad->fq_timer_on, 0); iova_domain_flush(iovad); - for_each_possible_cpu(cpu) { - unsigned long flags; - struct iova_fq *fq; - - fq = per_cpu_ptr(iovad->fq, cpu); - spin_lock_irqsave(&fq->lock, flags); - fq_ring_free(iovad, fq); - spin_unlock_irqrestore(&fq->lock, flags); - } + schedule_work(&iovad->free_iova_work); } void queue_iova(struct iova_domain *iovad, @@ -569,7 +599,9 @@ void queue_iova(struct iova_domain *iovad, spin_unlock_irqrestore(&fq->lock, flags); - if (atomic_cmpxchg(&iovad->fq_timer_on, 0, 1) == 0) + /* Avoid false sharing as much as possible. */ + if (!atomic_read(&iovad->fq_timer_on) && + !atomic_cmpxchg(&iovad->fq_timer_on, 0, 1)) mod_timer(&iovad->fq_timer, jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT)); } @@ -801,7 +833,9 @@ iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad) for (i = 0 ; i < mag->size; ++i) { struct iova *iova = private_find_iova(iovad, mag->pfns[i]); - BUG_ON(!iova); + if (WARN_ON(!iova)) + continue; + private_free_iova(iovad, iova); } diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 22b94f8a9a04fb34e1d288b3adb2690d4b070f1a..8e1601e05fedaad54b769b358d3e999ccfd6f59e 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -37,8 +38,6 @@ #define arm_iommu_detach_device(...) do {} while (0) #endif -#include "io-pgtable.h" - #define IPMMU_CTX_MAX 8 struct ipmmu_features { @@ -361,7 +360,7 @@ static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, /* The hardware doesn't support selective TLB flush. */ } -static const struct iommu_gather_ops ipmmu_gather_ops = { +static const struct iommu_flush_ops ipmmu_flush_ops = { .tlb_flush_all = ipmmu_tlb_flush_all, .tlb_add_flush = ipmmu_tlb_add_flush, .tlb_sync = ipmmu_tlb_flush_all, @@ -425,13 +424,14 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K; domain->cfg.ias = 32; domain->cfg.oas = 40; - domain->cfg.tlb = &ipmmu_gather_ops; + domain->cfg.tlb = &ipmmu_flush_ops; domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32); domain->io_domain.geometry.force_aperture = true; /* * TODO: Add support for coherent walk through CCI with DVM and remove * cache handling. For now, delegate it to the io-pgtable code. */ + domain->cfg.coherent_walk = false; domain->cfg.iommu_dev = domain->mmu->root->dev; /* @@ -501,6 +501,9 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain) { + if (!domain->mmu) + return; + /* * Disable the context. Flush the TLB as required when modifying the * context registers. diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index fc5f0b53adaf3c8ec4c072ed7ada204ca14aa06f..15bd2a35472fda856d59dff841037348a2cc9c09 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -35,7 +36,6 @@ #include "msm_iommu_hw-8xxx.h" #include "msm_iommu.h" -#include "io-pgtable.h" #define MRC(reg, processor, op1, crn, crm, op2) \ __asm__ __volatile__ ( \ @@ -189,7 +189,7 @@ static void __flush_iotlb_sync(void *cookie) */ } -static const struct iommu_gather_ops msm_iommu_gather_ops = { +static const struct iommu_flush_ops msm_iommu_flush_ops = { .tlb_flush_all = __flush_iotlb, .tlb_add_flush = __flush_iotlb_range, .tlb_sync = __flush_iotlb_sync, @@ -356,7 +356,7 @@ static int msm_iommu_domain_config(struct msm_priv *priv) .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap, .ias = 32, .oas = 32, - .tlb = &msm_iommu_gather_ops, + .tlb = &msm_iommu_flush_ops, .iommu_dev = priv->dev, }; diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index f9f69f7111a91e99c1552e16533afac7d0d5f275..998aa6f3c768bb4503f793167829d982e88a4222 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -196,7 +196,7 @@ static void mtk_iommu_tlb_sync(void *cookie) } } -static const struct iommu_gather_ops mtk_iommu_gather_ops = { +static const struct iommu_flush_ops mtk_iommu_flush_ops = { .tlb_flush_all = mtk_iommu_tlb_flush_all, .tlb_add_flush = mtk_iommu_tlb_add_flush_nosync, .tlb_sync = mtk_iommu_tlb_sync, @@ -275,7 +275,7 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom) .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap, .ias = 32, .oas = 32, - .tlb = &mtk_iommu_gather_ops, + .tlb = &mtk_iommu_flush_ops, .iommu_dev = data->dev, }; diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h index 778498b8633fc63d4383ee0975741a8acffb3b5f..62c2c3e8c5dfb1148e933aa1fdd91f2033158f3b 100644 --- a/drivers/iommu/mtk_iommu.h +++ b/drivers/iommu/mtk_iommu.h @@ -19,13 +19,12 @@ #include #include #include +#include #include #include #include #include -#include "io-pgtable.h" - struct mtk_iommu_suspend_reg { u32 standard_axi_mode; u32 dcm_dis; diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c index f7787e757244d474875260af17f063c655a4d351..2a34fa23978be372d3fa57071c6e9998e61b3771 100644 --- a/drivers/iommu/of_iommu.c +++ b/drivers/iommu/of_iommu.c @@ -187,6 +187,19 @@ const struct iommu_ops *of_iommu_configure(struct device *dev, if (err) break; } + + fwspec = dev->iommu_fwspec; + if (!err && fwspec) { + const __be32 *prop; + + if (of_get_property(master_np, "dma-can-stall", NULL)) + fwspec->can_stall = true; + + prop = of_get_property(master_np, + "pasid-num-bits", NULL); + if (prop) + fwspec->num_pasid_bits = be32_to_cpu(*prop); + } } /* diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index ee70e9921cf167dc80a5aac17e3b5139ec5f5c06..bd44dbaae895eee5269290b2b19fb4645a4d11ef 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -42,7 +43,6 @@ #include #include -#include "io-pgtable.h" #include "arm-smmu-regs.h" #define SMMU_INTR_SEL_NS 0x2000 @@ -175,7 +175,7 @@ static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size, } } -static const struct iommu_gather_ops qcom_gather_ops = { +static const struct iommu_flush_ops qcom_flush_ops = { .tlb_flush_all = qcom_iommu_tlb_inv_context, .tlb_add_flush = qcom_iommu_tlb_inv_range_nosync, .tlb_sync = qcom_iommu_tlb_sync, @@ -226,7 +226,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, .pgsize_bitmap = qcom_iommu_ops.pgsize_bitmap, .ias = 32, .oas = 40, - .tlb = &qcom_gather_ops, + .tlb = &qcom_flush_ops, .iommu_dev = qcom_iommu->dev, }; diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index ad3e2b97469ed6de44f3524cafd677825d7afca5..140b287e886c843289e90ba211ae115148d7b49f 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -977,13 +977,13 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type) if (!dma_dev) return NULL; - rk_domain = devm_kzalloc(dma_dev, sizeof(*rk_domain), GFP_KERNEL); + rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL); if (!rk_domain) return NULL; if (type == IOMMU_DOMAIN_DMA && iommu_get_dma_cookie(&rk_domain->domain)) - return NULL; + goto err_free_domain; /* * rk32xx iommus use a 2 level pagetable. @@ -1018,6 +1018,8 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type) err_put_cookie: if (type == IOMMU_DOMAIN_DMA) iommu_put_dma_cookie(&rk_domain->domain); +err_free_domain: + kfree(rk_domain); return NULL; } @@ -1046,6 +1048,7 @@ static void rk_iommu_domain_free(struct iommu_domain *domain) if (domain->type == IOMMU_DOMAIN_DMA) iommu_put_dma_cookie(&rk_domain->domain); + kfree(rk_domain); } static int rk_iommu_add_device(struct device *dev) diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 0d03341317c466be5f71d6dd7cae5d53939eef26..fa0ecb5e6380998691ad8d3f06f2c2cd898043b8 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -102,7 +102,6 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset) #define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0) #define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0) #define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0) -#define SMMU_TLB_FLUSH_ASID(x) (((x) & 0x7f) << 24) #define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \ SMMU_TLB_FLUSH_VA_MATCH_SECTION) #define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \ @@ -165,9 +164,9 @@ static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr) return (addr & smmu->pfn_mask) == addr; } -static dma_addr_t smmu_pde_to_dma(u32 pde) +static dma_addr_t smmu_pde_to_dma(struct tegra_smmu *smmu, u32 pde) { - return pde << 12; + return (dma_addr_t)(pde & smmu->pfn_mask) << 12; } static void smmu_flush_ptc_all(struct tegra_smmu *smmu) @@ -205,8 +204,12 @@ static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu, { u32 value; - value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) | - SMMU_TLB_FLUSH_VA_MATCH_ALL; + if (smmu->soc->num_asids == 4) + value = (asid & 0x3) << 29; + else + value = (asid & 0x7f) << 24; + + value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_MATCH_ALL; smmu_writel(smmu, value, SMMU_TLB_FLUSH); } @@ -216,8 +219,12 @@ static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu, { u32 value; - value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) | - SMMU_TLB_FLUSH_VA_SECTION(iova); + if (smmu->soc->num_asids == 4) + value = (asid & 0x3) << 29; + else + value = (asid & 0x7f) << 24; + + value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_SECTION(iova); smmu_writel(smmu, value, SMMU_TLB_FLUSH); } @@ -227,8 +234,12 @@ static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu, { u32 value; - value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) | - SMMU_TLB_FLUSH_VA_GROUP(iova); + if (smmu->soc->num_asids == 4) + value = (asid & 0x3) << 29; + else + value = (asid & 0x7f) << 24; + + value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_GROUP(iova); smmu_writel(smmu, value, SMMU_TLB_FLUSH); } @@ -540,6 +551,7 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova, dma_addr_t *dmap) { unsigned int pd_index = iova_pd_index(iova); + struct tegra_smmu *smmu = as->smmu; struct page *pt_page; u32 *pd; @@ -548,7 +560,7 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova, return NULL; pd = page_address(as->pd); - *dmap = smmu_pde_to_dma(pd[pd_index]); + *dmap = smmu_pde_to_dma(smmu, pd[pd_index]); return tegra_smmu_pte_offset(pt_page, iova); } @@ -590,7 +602,7 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova, } else { u32 *pd = page_address(as->pd); - *dmap = smmu_pde_to_dma(pd[pde]); + *dmap = smmu_pde_to_dma(smmu, pd[pde]); } return tegra_smmu_pte_offset(as->pts[pde], iova); @@ -615,7 +627,7 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova) if (--as->count[pde] == 0) { struct tegra_smmu *smmu = as->smmu; u32 *pd = page_address(as->pd); - dma_addr_t pte_dma = smmu_pde_to_dma(pd[pde]); + dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pd[pde]); tegra_smmu_set_pde(as, iova, 0); diff --git a/drivers/ipack/devices/ipoctal.c b/drivers/ipack/devices/ipoctal.c index 75dd15d66df6fa415066e5a87f9786f46d066275..dc39a84086c7ca3bc146c31814c279cac0e9e9a9 100644 --- a/drivers/ipack/devices/ipoctal.c +++ b/drivers/ipack/devices/ipoctal.c @@ -86,22 +86,34 @@ static int ipoctal_port_activate(struct tty_port *port, struct tty_struct *tty) return 0; } -static int ipoctal_open(struct tty_struct *tty, struct file *file) +static int ipoctal_install(struct tty_driver *driver, struct tty_struct *tty) { struct ipoctal_channel *channel = dev_get_drvdata(tty->dev); struct ipoctal *ipoctal = chan_to_ipoctal(channel, tty->index); - int err; - - tty->driver_data = channel; + int res; if (!ipack_get_carrier(ipoctal->dev)) return -EBUSY; - err = tty_port_open(&channel->tty_port, tty, file); - if (err) - ipack_put_carrier(ipoctal->dev); + res = tty_standard_install(driver, tty); + if (res) + goto err_put_carrier; + + tty->driver_data = channel; + + return 0; + +err_put_carrier: + ipack_put_carrier(ipoctal->dev); + + return res; +} - return err; +static int ipoctal_open(struct tty_struct *tty, struct file *file) +{ + struct ipoctal_channel *channel = tty->driver_data; + + return tty_port_open(&channel->tty_port, tty, file); } static void ipoctal_reset_stats(struct ipoctal_stats *stats) @@ -269,7 +281,6 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr, int res; int i; struct tty_driver *tty; - char name[20]; struct ipoctal_channel *channel; struct ipack_region *region; void __iomem *addr; @@ -360,8 +371,11 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr, /* Fill struct tty_driver with ipoctal data */ tty->owner = THIS_MODULE; tty->driver_name = KBUILD_MODNAME; - sprintf(name, KBUILD_MODNAME ".%d.%d.", bus_nr, slot); - tty->name = name; + tty->name = kasprintf(GFP_KERNEL, KBUILD_MODNAME ".%d.%d.", bus_nr, slot); + if (!tty->name) { + res = -ENOMEM; + goto err_put_driver; + } tty->major = 0; tty->minor_start = 0; @@ -377,8 +391,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr, res = tty_register_driver(tty); if (res) { dev_err(&ipoctal->dev->dev, "Can't register tty driver.\n"); - put_tty_driver(tty); - return res; + goto err_free_name; } /* Save struct tty_driver for use it when uninstalling the device */ @@ -415,6 +428,13 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr, ipoctal_irq_handler, ipoctal); return 0; + +err_free_name: + kfree(tty->name); +err_put_driver: + put_tty_driver(tty); + + return res; } static inline int ipoctal_copy_write_buffer(struct ipoctal_channel *channel, @@ -655,6 +675,7 @@ static void ipoctal_cleanup(struct tty_struct *tty) static const struct tty_operations ipoctal_fops = { .ioctl = NULL, + .install = ipoctal_install, .open = ipoctal_open, .close = ipoctal_close, .write = ipoctal_write_tty, @@ -703,6 +724,7 @@ static void __ipoctal_remove(struct ipoctal *ipoctal) } tty_unregister_driver(ipoctal->tty_drv); + kfree(ipoctal->tty_drv->name); put_tty_driver(ipoctal->tty_drv); kfree(ipoctal); } diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 383e7b70221d21fd1197ffd1191828f3f76d204c..e0a844c1b8e549a5c817eb5269edf9ce7f34e0b8 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -57,6 +57,15 @@ config ARM_GIC_V3_ITS_FSL_MC depends on FSL_MC_BUS default ARM_GIC_V3_ITS +config ARM_GIC_PHYTIUM_2500 + bool + select IRQ_DOMAIN + select GENERIC_IRQ_MULTI_HANDLER + select IRQ_DOMAIN_HIERARCHY + select PARTITION_PERCPU + select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_MSI_IRQ_DOMAIN + config ARM_NVIC bool select IRQ_DOMAIN @@ -145,6 +154,16 @@ config HISILICON_IRQ_MBIGEN select ARM_GIC_V3 select ARM_GIC_V3_ITS +if ASCEND_FEATURES + +config ASCEND_INIT_ALL_GICR + bool "Enable init all GICR for Ascend" + depends on ARM_GIC_V3 + depends on ARM_GIC_V3_ITS + default n + +endif + config IMGPDC_IRQ bool select GENERIC_IRQ_CHIP diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index fbd1ec8070efa89e5439321f410af09579e76b1e..7ee4db82d983942b5add3a4eadd7bea7f68ec05a 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -31,6 +31,7 @@ obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-v3-mbi.o irq-gic-common.o obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-platform-msi.o irq-gic-v4.o obj-$(CONFIG_ARM_GIC_V3_ITS_PCI) += irq-gic-v3-its-pci-msi.o obj-$(CONFIG_ARM_GIC_V3_ITS_FSL_MC) += irq-gic-v3-its-fsl-mc-msi.o +obj-$(CONFIG_ARM_GIC_PHYTIUM_2500) += irq-gic-phytium-2500.o irq-gic-phytium-2500-its.o obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o obj-$(CONFIG_ARM_NVIC) += irq-nvic.o diff --git a/drivers/irqchip/irq-alpine-msi.c b/drivers/irqchip/irq-alpine-msi.c index 23a3b877f7f1dfe350ef70f26c78efeefdae0060..fc958d1acdcee17083ced7b4e47fb335b76d560c 100644 --- a/drivers/irqchip/irq-alpine-msi.c +++ b/drivers/irqchip/irq-alpine-msi.c @@ -165,8 +165,7 @@ static int alpine_msix_middle_domain_alloc(struct irq_domain *domain, return 0; err_sgi: - while (--i >= 0) - irq_domain_free_irqs_parent(domain, virq, i); + irq_domain_free_irqs_parent(domain, virq, i); alpine_msix_free_sgi(priv, sgi, nr_irqs); return err; } diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c index c9bdc5221b82f9bf404c84d2930b4455cc3f4fe9..3eebcd90749ad8d519fb0e9056a10143bc4582fc 100644 --- a/drivers/irqchip/irq-armada-370-xp.c +++ b/drivers/irqchip/irq-armada-370-xp.c @@ -350,6 +350,10 @@ static struct irq_chip armada_370_xp_irq_chip = { static int armada_370_xp_mpic_irq_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { + /* IRQs 0 and 1 cannot be mapped, they are handled internally */ + if (hw <= 1) + return -EINVAL; + armada_370_xp_irq_mask(irq_get_irq_data(virq)); if (!is_percpu_irq(hw)) writel(hw, per_cpu_int_base + diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c index 0f6e30e9009da533c6a4bc6c3cf5e79fe2ae748c..f53dfc5aa7c56ad5332c1b26bef8652ccc177af2 100644 --- a/drivers/irqchip/irq-bcm7038-l1.c +++ b/drivers/irqchip/irq-bcm7038-l1.c @@ -284,6 +284,10 @@ static int __init bcm7038_l1_init_one(struct device_node *dn, pr_err("failed to map parent interrupt %d\n", parent_irq); return -EINVAL; } + + if (of_property_read_bool(dn, "brcm,irq-can-wake")) + enable_irq_wake(parent_irq); + irq_set_chained_handler_and_data(parent_irq, bcm7038_l1_irq_handle, intc); diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c index 0e65f609352ecee0519e2b207391b8ccace57869..83364fedbf0ab57962a7b325663ab910d6173a0a 100644 --- a/drivers/irqchip/irq-brcmstb-l2.c +++ b/drivers/irqchip/irq-brcmstb-l2.c @@ -129,8 +129,9 @@ static void brcmstb_l2_intc_suspend(struct irq_data *d) struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct irq_chip_type *ct = irq_data_get_chip_type(d); struct brcmstb_l2_intc_data *b = gc->private; + unsigned long flags; - irq_gc_lock(gc); + irq_gc_lock_irqsave(gc, flags); /* Save the current mask */ b->saved_mask = irq_reg_readl(gc, ct->regs.mask); @@ -139,7 +140,7 @@ static void brcmstb_l2_intc_suspend(struct irq_data *d) irq_reg_writel(gc, ~gc->wake_active, ct->regs.disable); irq_reg_writel(gc, gc->wake_active, ct->regs.enable); } - irq_gc_unlock(gc); + irq_gc_unlock_irqrestore(gc, flags); } static void brcmstb_l2_intc_resume(struct irq_data *d) @@ -147,8 +148,9 @@ static void brcmstb_l2_intc_resume(struct irq_data *d) struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct irq_chip_type *ct = irq_data_get_chip_type(d); struct brcmstb_l2_intc_data *b = gc->private; + unsigned long flags; - irq_gc_lock(gc); + irq_gc_lock_irqsave(gc, flags); if (ct->chip.irq_ack) { /* Clear unmasked non-wakeup interrupts */ irq_reg_writel(gc, ~b->saved_mask & ~gc->wake_active, @@ -158,7 +160,7 @@ static void brcmstb_l2_intc_resume(struct irq_data *d) /* Restore the saved mask */ irq_reg_writel(gc, b->saved_mask, ct->regs.disable); irq_reg_writel(gc, ~b->saved_mask, ct->regs.enable); - irq_gc_unlock(gc); + irq_gc_unlock_irqrestore(gc, flags); } static int __init brcmstb_l2_intc_of_init(struct device_node *np, diff --git a/drivers/irqchip/irq-gic-phytium-2500-its.c b/drivers/irqchip/irq-gic-phytium-2500-its.c new file mode 100644 index 0000000000000000000000000000000000000000..dd24af3793ca5f0a744f5559f28c0a00aa863468 --- /dev/null +++ b/drivers/irqchip/irq-gic-phytium-2500-its.c @@ -0,0 +1,4110 @@ +/* + * Copyright (C) 2020 Phytium Corporation. + * Author: Wang Yinfeng + * Chen Baozi + * Mao Honngbo + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include "irq-gic-common.h" + +#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) +#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) +#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2) + +#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) +#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1) + +static u32 lpi_id_bits; + +/* + * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to + * deal with (one configuration byte per interrupt). PENDBASE has to + * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI). + */ +#define LPI_NRBITS lpi_id_bits +#define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K) +#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K) + +#define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI + +/* + * Collection structure - just an ID, and a redistributor address to + * ping. We use one per CPU as a bag of interrupts assigned to this + * CPU. + */ +struct its_collection { + u64 target_address; + u16 col_id; +}; + +/* + * The ITS_BASER structure - contains memory information, cached + * value of BASER register configuration and ITS page size. + */ +struct its_baser { + void *base; + u64 val; + u32 order; + u32 psz; +}; + +struct its_device; + +/* + * The ITS structure - contains most of the infrastructure, with the + * top-level MSI domain, the command queue, the collections, and the + * list of devices writing to it. + * + * dev_alloc_lock has to be taken for device allocations, while the + * spinlock must be taken to parse data structures such as the device + * list. + */ +struct its_node { + raw_spinlock_t lock; + struct mutex dev_alloc_lock; + struct list_head entry; + void __iomem *base; + phys_addr_t phys_base; + struct its_cmd_block *cmd_base; + struct its_cmd_block *cmd_write; + struct its_baser tables[GITS_BASER_NR_REGS]; + struct its_collection *collections; + struct fwnode_handle *fwnode_handle; + u64 (*get_msi_base)(struct its_device *its_dev); + u64 cbaser_save; + u32 ctlr_save; + struct list_head its_device_list; + u64 flags; + unsigned long list_nr; + u32 ite_size; + u32 device_ids; + int numa_node; + unsigned int msi_domain_flags; + u32 pre_its_base; /* for Socionext Synquacer */ + bool is_v4; + int vlpi_redist_offset; +}; + +#define ITS_ITT_ALIGN SZ_256 + +/* The maximum number of VPEID bits supported by VLPI commands */ +#define ITS_MAX_VPEID_BITS (16) +#define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS)) + +/* Convert page order to size in bytes */ +#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o)) + +struct event_lpi_map { + unsigned long *lpi_map; + u16 *col_map; + irq_hw_number_t lpi_base; + int nr_lpis; + raw_spinlock_t vlpi_lock; + struct its_vm *vm; + struct its_vlpi_map *vlpi_maps; + int nr_vlpis; +}; + +/* + * The ITS view of a device - belongs to an ITS, owns an interrupt + * translation table, and a list of interrupts. If it some of its + * LPIs are injected into a guest (GICv4), the event_map.vm field + * indicates which one. + */ +struct its_device { + struct list_head entry; + struct its_node *its; + struct event_lpi_map event_map; + void *itt; + u32 nr_ites; + u32 device_id; + bool shared; +}; + +static struct { + raw_spinlock_t lock; + struct its_device *dev; + struct its_vpe **vpes; + int next_victim; +} vpe_proxy; + +static LIST_HEAD(its_nodes); +static DEFINE_RAW_SPINLOCK(its_lock); +static struct rdists *gic_rdists; +static struct irq_domain *its_parent; + +static unsigned long its_list_map; +static u16 vmovp_seq_num; +static DEFINE_RAW_SPINLOCK(vmovp_lock); + +static DEFINE_IDA(its_vpeid_ida); + +#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) +#define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu)) +#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) +#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) + +static u16 get_its_list(struct its_vm *vm) +{ + struct its_node *its; + unsigned long its_list = 0; + + list_for_each_entry(its, &its_nodes, entry) { + if (!its->is_v4) + continue; + + if (vm->vlpi_count[its->list_nr]) + __set_bit(its->list_nr, &its_list); + } + + return (u16)its_list; +} + +static struct its_collection *dev_event_to_col(struct its_device *its_dev, + u32 event) +{ + struct its_node *its = its_dev->its; + + return its->collections + its_dev->event_map.col_map[event]; +} + +static struct its_collection *valid_col(struct its_collection *col) +{ + if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0))) + return NULL; + + return col; +} + +static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe) +{ + if (valid_col(its->collections + vpe->col_idx)) + return vpe; + + return NULL; +} + +/* + * ITS command descriptors - parameters to be encoded in a command + * block. + */ +struct its_cmd_desc { + union { + struct { + struct its_device *dev; + u32 event_id; + } its_inv_cmd; + + struct { + struct its_device *dev; + u32 event_id; + } its_clear_cmd; + + struct { + struct its_device *dev; + u32 event_id; + } its_int_cmd; + + struct { + struct its_device *dev; + int valid; + } its_mapd_cmd; + + struct { + struct its_collection *col; + int valid; + } its_mapc_cmd; + + struct { + struct its_device *dev; + u32 phys_id; + u32 event_id; + } its_mapti_cmd; + + struct { + struct its_device *dev; + struct its_collection *col; + u32 event_id; + } its_movi_cmd; + + struct { + struct its_device *dev; + u32 event_id; + } its_discard_cmd; + + struct { + struct its_collection *col; + } its_invall_cmd; + + struct { + struct its_vpe *vpe; + } its_vinvall_cmd; + + struct { + struct its_vpe *vpe; + struct its_collection *col; + bool valid; + } its_vmapp_cmd; + + struct { + struct its_vpe *vpe; + struct its_device *dev; + u32 virt_id; + u32 event_id; + bool db_enabled; + } its_vmapti_cmd; + + struct { + struct its_vpe *vpe; + struct its_device *dev; + u32 event_id; + bool db_enabled; + } its_vmovi_cmd; + + struct { + struct its_vpe *vpe; + struct its_collection *col; + u16 seq_num; + u16 its_list; + } its_vmovp_cmd; + }; +}; + +/* + * The ITS command block, which is what the ITS actually parses. + */ +struct its_cmd_block { + u64 raw_cmd[4]; +}; + +#define ITS_CMD_QUEUE_SZ SZ_64K +#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block)) + +typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *, + struct its_cmd_block *, + struct its_cmd_desc *); + +typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *, + struct its_cmd_block *, + struct its_cmd_desc *); + +static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l) +{ + u64 mask = GENMASK_ULL(h, l); + *raw_cmd &= ~mask; + *raw_cmd |= (val << l) & mask; +} + +static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr) +{ + its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0); +} + +static void its_encode_devid(struct its_cmd_block *cmd, u32 devid) +{ + its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32); +} + +static void its_encode_event_id(struct its_cmd_block *cmd, u32 id) +{ + its_mask_encode(&cmd->raw_cmd[1], id, 31, 0); +} + +static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id) +{ + its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32); +} + +static void its_encode_size(struct its_cmd_block *cmd, u8 size) +{ + its_mask_encode(&cmd->raw_cmd[1], size, 4, 0); +} + +static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr) +{ + its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8); +} + +static void its_encode_valid(struct its_cmd_block *cmd, int valid) +{ + its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63); +} + +static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr) +{ + its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16); +} + +static void its_encode_collection(struct its_cmd_block *cmd, u16 col) +{ + its_mask_encode(&cmd->raw_cmd[2], col, 15, 0); +} + +static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid) +{ + its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32); +} + +static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id) +{ + its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0); +} + +static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id) +{ + its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32); +} + +static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid) +{ + its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0); +} + +static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num) +{ + its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32); +} + +static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list) +{ + its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0); +} + +static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa) +{ + its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16); +} + +static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size) +{ + its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0); +} + +static inline void its_fixup_cmd(struct its_cmd_block *cmd) +{ + /* Let's fixup BE commands */ + cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]); + cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]); + cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]); + cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]); +} + +static struct its_collection *its_build_mapd_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + unsigned long itt_addr; + u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites); + + itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt); + itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN); + + its_encode_cmd(cmd, GITS_CMD_MAPD); + its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id); + its_encode_size(cmd, size - 1); + its_encode_itt(cmd, itt_addr); + its_encode_valid(cmd, desc->its_mapd_cmd.valid); + + its_fixup_cmd(cmd); + + return NULL; +} + +static struct its_collection *its_build_mapc_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + its_encode_cmd(cmd, GITS_CMD_MAPC); + its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); + its_encode_target(cmd, desc->its_mapc_cmd.col->target_address); + its_encode_valid(cmd, desc->its_mapc_cmd.valid); + + its_fixup_cmd(cmd); + + return desc->its_mapc_cmd.col; +} + +static struct its_collection *its_build_mapti_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_mapti_cmd.dev, + desc->its_mapti_cmd.event_id); + col->col_id = col->col_id % 64; + + its_encode_cmd(cmd, GITS_CMD_MAPTI); + its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_mapti_cmd.event_id); + its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id); + its_encode_collection(cmd, col->col_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_movi_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_movi_cmd.dev, + desc->its_movi_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_MOVI); + its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_movi_cmd.event_id); + its_encode_collection(cmd, desc->its_movi_cmd.col->col_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_discard_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_discard_cmd.dev, + desc->its_discard_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_DISCARD); + its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_discard_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_inv_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_inv_cmd.dev, + desc->its_inv_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_INV); + its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_inv_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_int_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_int_cmd.dev, + desc->its_int_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_INT); + its_encode_devid(cmd, desc->its_int_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_int_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_clear_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_clear_cmd.dev, + desc->its_clear_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_CLEAR); + its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_clear_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_invall_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + its_encode_cmd(cmd, GITS_CMD_INVALL); + its_encode_collection(cmd, desc->its_invall_cmd.col->col_id); + + its_fixup_cmd(cmd); + + return NULL; +} + +static struct its_vpe *its_build_vinvall_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + its_encode_cmd(cmd, GITS_CMD_VINVALL); + its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vinvall_cmd.vpe); +} + +static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + unsigned long vpt_addr; + u64 target; + + vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page)); + target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset; + + its_encode_cmd(cmd, GITS_CMD_VMAPP); + its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id); + its_encode_valid(cmd, desc->its_vmapp_cmd.valid); + its_encode_target(cmd, target); + its_encode_vpt_addr(cmd, vpt_addr); + its_encode_vpt_size(cmd, LPI_NRBITS - 1); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vmapp_cmd.vpe); +} + +static struct its_vpe *its_build_vmapti_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + u32 db; + + if (desc->its_vmapti_cmd.db_enabled) + db = desc->its_vmapti_cmd.vpe->vpe_db_lpi; + else + db = 1023; + + its_encode_cmd(cmd, GITS_CMD_VMAPTI); + its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id); + its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id); + its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id); + its_encode_db_phys_id(cmd, db); + its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vmapti_cmd.vpe); +} + +static struct its_vpe *its_build_vmovi_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + u32 db; + + if (desc->its_vmovi_cmd.db_enabled) + db = desc->its_vmovi_cmd.vpe->vpe_db_lpi; + else + db = 1023; + + its_encode_cmd(cmd, GITS_CMD_VMOVI); + its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id); + its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id); + its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id); + its_encode_db_phys_id(cmd, db); + its_encode_db_valid(cmd, true); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vmovi_cmd.vpe); +} + +static struct its_vpe *its_build_vmovp_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + u64 target; + + target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset; + its_encode_cmd(cmd, GITS_CMD_VMOVP); + its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num); + its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list); + its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id); + its_encode_target(cmd, target); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vmovp_cmd.vpe); +} + +static u64 its_cmd_ptr_to_offset(struct its_node *its, + struct its_cmd_block *ptr) +{ + return (ptr - its->cmd_base) * sizeof(*ptr); +} + +static int its_queue_full(struct its_node *its) +{ + int widx; + int ridx; + + widx = its->cmd_write - its->cmd_base; + ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block); + + /* This is incredibly unlikely to happen, unless the ITS locks up. */ + if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx) + return 1; + + return 0; +} + +static struct its_cmd_block *its_allocate_entry(struct its_node *its) +{ + struct its_cmd_block *cmd; + u32 count = 1000000; /* 1s! */ + + while (its_queue_full(its)) { + count--; + if (!count) { + pr_err_ratelimited("ITS queue not draining\n"); + return NULL; + } + cpu_relax(); + udelay(1); + } + + cmd = its->cmd_write++; + + /* Handle queue wrapping */ + if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES)) + its->cmd_write = its->cmd_base; + + /* Clear command */ + cmd->raw_cmd[0] = 0; + cmd->raw_cmd[1] = 0; + cmd->raw_cmd[2] = 0; + cmd->raw_cmd[3] = 0; + + return cmd; +} + +static struct its_cmd_block *its_post_commands(struct its_node *its) +{ + u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write); + + writel_relaxed(wr, its->base + GITS_CWRITER); + + return its->cmd_write; +} + +static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd) +{ + /* + * Make sure the commands written to memory are observable by + * the ITS. + */ + if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING) + gic_flush_dcache_to_poc(cmd, sizeof(*cmd)); + else + dsb(ishst); +} + +static int its_wait_for_range_completion(struct its_node *its, + u64 prev_idx, + struct its_cmd_block *to) +{ + u64 rd_idx, to_idx, linear_idx; + u32 count = 1000000; /* 1s! */ + + /* Linearize to_idx if the command set has wrapped around */ + to_idx = its_cmd_ptr_to_offset(its, to); + if (to_idx < prev_idx) + to_idx += ITS_CMD_QUEUE_SZ; + + linear_idx = prev_idx; + + while (1) { + s64 delta; + + rd_idx = readl_relaxed(its->base + GITS_CREADR); + + /* + * Compute the read pointer progress, taking the + * potential wrap-around into account. + */ + delta = rd_idx - prev_idx; + if (rd_idx < prev_idx) + delta += ITS_CMD_QUEUE_SZ; + + linear_idx += delta; + if (linear_idx >= to_idx) + break; + + count--; + if (!count) { + pr_err_ratelimited("ITS queue timeout (%llu %llu)\n", + to_idx, linear_idx); + return -1; + } + prev_idx = rd_idx; + cpu_relax(); + udelay(1); + } + + return 0; +} + +/* Warning, macro hell follows */ +#define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \ +void name(struct its_node *its, \ + buildtype builder, \ + struct its_cmd_desc *desc) \ +{ \ + struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \ + synctype *sync_obj; \ + unsigned long flags; \ + u64 rd_idx; \ + \ + raw_spin_lock_irqsave(&its->lock, flags); \ + \ + cmd = its_allocate_entry(its); \ + if (!cmd) { /* We're soooooo screewed... */ \ + raw_spin_unlock_irqrestore(&its->lock, flags); \ + return; \ + } \ + sync_obj = builder(its, cmd, desc); \ + its_flush_cmd(its, cmd); \ + \ + if (sync_obj) { \ + sync_cmd = its_allocate_entry(its); \ + if (!sync_cmd) \ + goto post; \ + \ + buildfn(its, sync_cmd, sync_obj); \ + its_flush_cmd(its, sync_cmd); \ + } \ + \ +post: \ + rd_idx = readl_relaxed(its->base + GITS_CREADR); \ + next_cmd = its_post_commands(its); \ + raw_spin_unlock_irqrestore(&its->lock, flags); \ + \ + if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \ + pr_err_ratelimited("ITS cmd %ps failed\n", builder); \ +} + +static void its_build_sync_cmd(struct its_node *its, + struct its_cmd_block *sync_cmd, + struct its_collection *sync_col) +{ + its_encode_cmd(sync_cmd, GITS_CMD_SYNC); + its_encode_target(sync_cmd, sync_col->target_address); + + its_fixup_cmd(sync_cmd); +} + +static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t, + struct its_collection, its_build_sync_cmd) + +static void its_build_vsync_cmd(struct its_node *its, + struct its_cmd_block *sync_cmd, + struct its_vpe *sync_vpe) +{ + its_encode_cmd(sync_cmd, GITS_CMD_VSYNC); + its_encode_vpeid(sync_cmd, sync_vpe->vpe_id); + + its_fixup_cmd(sync_cmd); +} + +static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t, + struct its_vpe, its_build_vsync_cmd) + +static void its_send_int(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + desc.its_int_cmd.dev = dev; + desc.its_int_cmd.event_id = event_id; + + its_send_single_command(dev->its, its_build_int_cmd, &desc); +} + +static void its_send_clear(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + desc.its_clear_cmd.dev = dev; + desc.its_clear_cmd.event_id = event_id; + + its_send_single_command(dev->its, its_build_clear_cmd, &desc); +} + +static void its_send_inv(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + desc.its_inv_cmd.dev = dev; + desc.its_inv_cmd.event_id = event_id; + + its_send_single_command(dev->its, its_build_inv_cmd, &desc); +} + +static void its_send_mapd(struct its_device *dev, int valid) +{ + struct its_cmd_desc desc; + + desc.its_mapd_cmd.dev = dev; + desc.its_mapd_cmd.valid = !!valid; + + its_send_single_command(dev->its, its_build_mapd_cmd, &desc); +} + +static void its_send_mapc(struct its_node *its, struct its_collection *col, + int valid) +{ + struct its_cmd_desc desc; + + desc.its_mapc_cmd.col = col; + desc.its_mapc_cmd.valid = !!valid; + + its_send_single_command(its, its_build_mapc_cmd, &desc); +} + +static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id) +{ + struct its_cmd_desc desc; + + desc.its_mapti_cmd.dev = dev; + desc.its_mapti_cmd.phys_id = irq_id; + desc.its_mapti_cmd.event_id = id; + + its_send_single_command(dev->its, its_build_mapti_cmd, &desc); +} + +static void its_send_movi(struct its_device *dev, + struct its_collection *col, u32 id) +{ + struct its_cmd_desc desc; + + desc.its_movi_cmd.dev = dev; + desc.its_movi_cmd.col = col; + desc.its_movi_cmd.event_id = id; + + its_send_single_command(dev->its, its_build_movi_cmd, &desc); +} + +static void its_send_discard(struct its_device *dev, u32 id) +{ + struct its_cmd_desc desc; + + desc.its_discard_cmd.dev = dev; + desc.its_discard_cmd.event_id = id; + + its_send_single_command(dev->its, its_build_discard_cmd, &desc); +} + +static void its_send_invall(struct its_node *its, struct its_collection *col) +{ + struct its_cmd_desc desc; + + desc.its_invall_cmd.col = col; + + its_send_single_command(its, its_build_invall_cmd, &desc); +} + +static void its_send_vmapti(struct its_device *dev, u32 id) +{ + struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id]; + struct its_cmd_desc desc; + + desc.its_vmapti_cmd.vpe = map->vpe; + desc.its_vmapti_cmd.dev = dev; + desc.its_vmapti_cmd.virt_id = map->vintid; + desc.its_vmapti_cmd.event_id = id; + desc.its_vmapti_cmd.db_enabled = map->db_enabled; + + its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc); +} + +static void its_send_vmovi(struct its_device *dev, u32 id) +{ + struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id]; + struct its_cmd_desc desc; + + desc.its_vmovi_cmd.vpe = map->vpe; + desc.its_vmovi_cmd.dev = dev; + desc.its_vmovi_cmd.event_id = id; + desc.its_vmovi_cmd.db_enabled = map->db_enabled; + + its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc); +} + +static void its_send_vmapp(struct its_node *its, + struct its_vpe *vpe, bool valid) +{ + struct its_cmd_desc desc; + + desc.its_vmapp_cmd.vpe = vpe; + desc.its_vmapp_cmd.valid = valid; + desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx]; + + its_send_single_vcommand(its, its_build_vmapp_cmd, &desc); +} + +static void its_send_vmovp(struct its_vpe *vpe) +{ + struct its_cmd_desc desc = {}; + struct its_node *its; + unsigned long flags; + int col_id = vpe->col_idx; + + desc.its_vmovp_cmd.vpe = vpe; + + if (!its_list_map) { + its = list_first_entry(&its_nodes, struct its_node, entry); + desc.its_vmovp_cmd.col = &its->collections[col_id]; + its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); + return; + } + + /* + * Yet another marvel of the architecture. If using the + * its_list "feature", we need to make sure that all ITSs + * receive all VMOVP commands in the same order. The only way + * to guarantee this is to make vmovp a serialization point. + * + * Wall <-- Head. + */ + raw_spin_lock_irqsave(&vmovp_lock, flags); + + desc.its_vmovp_cmd.seq_num = vmovp_seq_num++; + desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm); + + /* Emit VMOVPs */ + list_for_each_entry(its, &its_nodes, entry) { + if (!its->is_v4) + continue; + + if (!vpe->its_vm->vlpi_count[its->list_nr]) + continue; + + desc.its_vmovp_cmd.col = &its->collections[col_id]; + its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); + } + + raw_spin_unlock_irqrestore(&vmovp_lock, flags); +} + +static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe) +{ + struct its_cmd_desc desc; + + desc.its_vinvall_cmd.vpe = vpe; + its_send_single_vcommand(its, its_build_vinvall_cmd, &desc); +} + +/* + * irqchip functions - assumes MSI, mostly. + */ + +static inline u32 its_get_event_id(struct irq_data *d) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + return d->hwirq - its_dev->event_map.lpi_base; +} + +static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) +{ + irq_hw_number_t hwirq; + void *va; + u8 *cfg; + + if (irqd_is_forwarded_to_vcpu(d)) { + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + struct its_vlpi_map *map; + + va = page_address(its_dev->event_map.vm->vprop_page); + map = &its_dev->event_map.vlpi_maps[event]; + hwirq = map->vintid; + + /* Remember the updated property */ + map->properties &= ~clr; + map->properties |= set | LPI_PROP_GROUP1; + } else { + va = gic_rdists->prop_table_va; + hwirq = d->hwirq; + } + + cfg = va + hwirq - 8192; + *cfg &= ~clr; + *cfg |= set | LPI_PROP_GROUP1; + + /* + * Make the above write visible to the redistributors. + * And yes, we're flushing exactly: One. Single. Byte. + * Humpf... + */ + if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING) + gic_flush_dcache_to_poc(cfg, sizeof(*cfg)); + else + dsb(ishst); +} + +static void lpi_update_config(struct irq_data *d, u8 clr, u8 set) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + + lpi_write_config(d, clr, set); + its_send_inv(its_dev, its_get_event_id(d)); +} + +static void its_vlpi_set_doorbell(struct irq_data *d, bool enable) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + + if (its_dev->event_map.vlpi_maps[event].db_enabled == enable) + return; + + its_dev->event_map.vlpi_maps[event].db_enabled = enable; + + /* + * More fun with the architecture: + * + * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI + * value or to 1023, depending on the enable bit. But that + * would be issueing a mapping for an /existing/ DevID+EventID + * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI + * to the /same/ vPE, using this opportunity to adjust the + * doorbell. Mouahahahaha. We loves it, Precious. + */ + its_send_vmovi(its_dev, event); +} + +static void its_mask_irq(struct irq_data *d) +{ + if (irqd_is_forwarded_to_vcpu(d)) + its_vlpi_set_doorbell(d, false); + + lpi_update_config(d, LPI_PROP_ENABLED, 0); +} + +static void its_unmask_irq(struct irq_data *d) +{ + if (irqd_is_forwarded_to_vcpu(d)) + its_vlpi_set_doorbell(d, true); + + lpi_update_config(d, 0, LPI_PROP_ENABLED); +} + +#define MAX_MARS3_SKT_COUNT 8 + +static int its_cpumask_select(struct its_device *its_dev, + const struct cpumask *mask_val, + const struct cpumask *cpu_mask) +{ + unsigned int skt, skt_id, i; + phys_addr_t its_phys_base; + unsigned int cpu, cpus = 0; + + unsigned int skt_cpu_cnt[MAX_MARS3_SKT_COUNT] = {0}; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SKT_COUNT)) + skt_cpu_cnt[skt]++; + else if (skt != 0xff) + pr_err("socket address: %d is out of range.", skt); + } + + its_phys_base = its_dev->its->phys_base; + skt_id = (its_phys_base >> 41) & 0x7; + + if (0 != skt_id) { + for (i = 0; i < skt_id; i++) + cpus += skt_cpu_cnt[i]; + } + + cpu = cpumask_any_and(mask_val, cpu_mask); + cpus = cpus + cpu % skt_cpu_cnt[skt_id]; + + if (is_kdump_kernel()) { + skt = (cpu_logical_map(cpu) >> 16) & 0xff; + if (skt_id == skt) { + return cpu; + } + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SKT_COUNT)) { + if (skt_id == skt) { + return i; + } + } else if (0xff != skt) { + pr_err("socket address: %d is out of range.", skt); + } + } + } + + return cpus; +} + + +static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, + bool force) +{ + unsigned int cpu; + const struct cpumask *cpu_mask = cpu_online_mask; + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_collection *target_col; + u32 id = its_get_event_id(d); + + /* A forwarded interrupt should use irq_set_vcpu_affinity */ + if (irqd_is_forwarded_to_vcpu(d)) + return -EINVAL; + + /* lpi cannot be routed to a redistributor that is on a foreign node */ + if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { + if (its_dev->its->numa_node >= 0) { + cpu_mask = cpumask_of_node(its_dev->its->numa_node); + if (!cpumask_intersects(mask_val, cpu_mask)) + return -EINVAL; + } + } + + cpu = its_cpumask_select(its_dev, mask_val, cpu_mask); + + if (cpu >= nr_cpu_ids) + return -EINVAL; + + /* don't set the affinity when the target cpu is same as current one */ + if (cpu != its_dev->event_map.col_map[id]) { + target_col = &its_dev->its->collections[cpu]; + its_send_movi(its_dev, target_col, id); + its_dev->event_map.col_map[id] = cpu; + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + } + + return IRQ_SET_MASK_OK_DONE; +} + +static u64 its_irq_get_msi_base(struct its_device *its_dev) +{ + struct its_node *its = its_dev->its; + + return its->phys_base + GITS_TRANSLATER; +} + +static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_node *its; + u64 addr; + + its = its_dev->its; + addr = its->get_msi_base(its_dev); + + msg->address_lo = lower_32_bits(addr); + msg->address_hi = upper_32_bits(addr); + msg->data = its_get_event_id(d); +} + +static int its_irq_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool state) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + + if (which != IRQCHIP_STATE_PENDING) + return -EINVAL; + + if (state) + its_send_int(its_dev, event); + else + its_send_clear(its_dev, event); + + return 0; +} + +static void its_map_vm(struct its_node *its, struct its_vm *vm) +{ + unsigned long flags; + + /* Not using the ITS list? Everything is always mapped. */ + if (!its_list_map) + return; + + raw_spin_lock_irqsave(&vmovp_lock, flags); + + /* + * If the VM wasn't mapped yet, iterate over the vpes and get + * them mapped now. + */ + vm->vlpi_count[its->list_nr]++; + + if (vm->vlpi_count[its->list_nr] == 1) { + int i; + + for (i = 0; i < vm->nr_vpes; i++) { + struct its_vpe *vpe = vm->vpes[i]; + struct irq_data *d = irq_get_irq_data(vpe->irq); + + /* Map the VPE to the first possible CPU */ + vpe->col_idx = cpumask_first(cpu_online_mask); + its_send_vmapp(its, vpe, true); + its_send_vinvall(its, vpe); + irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); + } + } + + raw_spin_unlock_irqrestore(&vmovp_lock, flags); +} + +static void its_unmap_vm(struct its_node *its, struct its_vm *vm) +{ + unsigned long flags; + + /* Not using the ITS list? Everything is always mapped. */ + if (!its_list_map) + return; + + raw_spin_lock_irqsave(&vmovp_lock, flags); + + if (!--vm->vlpi_count[its->list_nr]) { + int i; + + for (i = 0; i < vm->nr_vpes; i++) + its_send_vmapp(its, vm->vpes[i], false); + } + + raw_spin_unlock_irqrestore(&vmovp_lock, flags); +} + +static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + int ret = 0; + + if (!info->map) + return -EINVAL; + + raw_spin_lock(&its_dev->event_map.vlpi_lock); + + if (!its_dev->event_map.vm) { + struct its_vlpi_map *maps; + + maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps), + GFP_ATOMIC); + if (!maps) { + ret = -ENOMEM; + goto out; + } + + its_dev->event_map.vm = info->map->vm; + its_dev->event_map.vlpi_maps = maps; + } else if (its_dev->event_map.vm != info->map->vm) { + ret = -EINVAL; + goto out; + } + + /* Get our private copy of the mapping information */ + its_dev->event_map.vlpi_maps[event] = *info->map; + + if (irqd_is_forwarded_to_vcpu(d)) { + /* Already mapped, move it around */ + its_send_vmovi(its_dev, event); + } else { + /* Ensure all the VPEs are mapped on this ITS */ + its_map_vm(its_dev->its, info->map->vm); + + /* + * Flag the interrupt as forwarded so that we can + * start poking the virtual property table. + */ + irqd_set_forwarded_to_vcpu(d); + + /* Write out the property to the prop table */ + lpi_write_config(d, 0xff, info->map->properties); + + /* Drop the physical mapping */ + its_send_discard(its_dev, event); + + /* and install the virtual one */ + its_send_vmapti(its_dev, event); + + /* Increment the number of VLPIs */ + its_dev->event_map.nr_vlpis++; + } + +out: + raw_spin_unlock(&its_dev->event_map.vlpi_lock); + return ret; +} + +static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + int ret = 0; + + raw_spin_lock(&its_dev->event_map.vlpi_lock); + + if (!its_dev->event_map.vm || + !its_dev->event_map.vlpi_maps[event].vm) { + ret = -EINVAL; + goto out; + } + + /* Copy our mapping information to the incoming request */ + *info->map = its_dev->event_map.vlpi_maps[event]; + +out: + raw_spin_unlock(&its_dev->event_map.vlpi_lock); + return ret; +} + +static int its_vlpi_unmap(struct irq_data *d) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + int ret = 0; + + raw_spin_lock(&its_dev->event_map.vlpi_lock); + + if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) { + ret = -EINVAL; + goto out; + } + + /* Drop the virtual mapping */ + its_send_discard(its_dev, event); + + /* and restore the physical one */ + irqd_clr_forwarded_to_vcpu(d); + its_send_mapti(its_dev, d->hwirq, event); + lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO | + LPI_PROP_ENABLED | + LPI_PROP_GROUP1)); + + /* Potentially unmap the VM from this ITS */ + its_unmap_vm(its_dev->its, its_dev->event_map.vm); + + /* + * Drop the refcount and make the device available again if + * this was the last VLPI. + */ + if (!--its_dev->event_map.nr_vlpis) { + its_dev->event_map.vm = NULL; + kfree(its_dev->event_map.vlpi_maps); + } + +out: + raw_spin_unlock(&its_dev->event_map.vlpi_lock); + return ret; +} + +static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + + if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) + return -EINVAL; + + if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI) + lpi_update_config(d, 0xff, info->config); + else + lpi_write_config(d, 0xff, info->config); + its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED)); + + return 0; +} + +static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_cmd_info *info = vcpu_info; + + /* Need a v4 ITS */ + if (!its_dev->its->is_v4) + return -EINVAL; + + /* Unmap request? */ + if (!info) + return its_vlpi_unmap(d); + + switch (info->cmd_type) { + case MAP_VLPI: + return its_vlpi_map(d, info); + + case GET_VLPI: + return its_vlpi_get(d, info); + + case PROP_UPDATE_VLPI: + case PROP_UPDATE_AND_INV_VLPI: + return its_vlpi_prop_update(d, info); + + default: + return -EINVAL; + } +} + +static int its_irq_retrigger(struct irq_data *d) +{ + return !its_irq_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true); +} + +static struct irq_chip its_irq_chip = { + .name = "ITS", + .irq_mask = its_mask_irq, + .irq_unmask = its_unmask_irq, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = its_set_affinity, + .irq_compose_msi_msg = its_irq_compose_msi_msg, + .irq_set_irqchip_state = its_irq_set_irqchip_state, + .irq_retrigger = its_irq_retrigger, + .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity, +}; + + +/* + * How we allocate LPIs: + * + * lpi_range_list contains ranges of LPIs that are to available to + * allocate from. To allocate LPIs, just pick the first range that + * fits the required allocation, and reduce it by the required + * amount. Once empty, remove the range from the list. + * + * To free a range of LPIs, add a free range to the list, sort it and + * merge the result if the new range happens to be adjacent to an + * already free block. + * + * The consequence of the above is that allocation is cost is low, but + * freeing is expensive. We assumes that freeing rarely occurs. + */ +#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */ + +static DEFINE_MUTEX(lpi_range_lock); +static LIST_HEAD(lpi_range_list); + +struct lpi_range { + struct list_head entry; + u32 base_id; + u32 span; +}; + +static struct lpi_range *mk_lpi_range(u32 base, u32 span) +{ + struct lpi_range *range; + + range = kzalloc(sizeof(*range), GFP_KERNEL); + if (range) { + INIT_LIST_HEAD(&range->entry); + range->base_id = base; + range->span = span; + } + + return range; +} + +static int lpi_range_cmp(void *priv, struct list_head *a, struct list_head *b) +{ + struct lpi_range *ra, *rb; + + ra = container_of(a, struct lpi_range, entry); + rb = container_of(b, struct lpi_range, entry); + + return ra->base_id - rb->base_id; +} + +static void merge_lpi_ranges(void) +{ + struct lpi_range *range, *tmp; + + list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) { + if (!list_is_last(&range->entry, &lpi_range_list) && + (tmp->base_id == (range->base_id + range->span))) { + tmp->base_id = range->base_id; + tmp->span += range->span; + list_del(&range->entry); + kfree(range); + } + } +} + +static int alloc_lpi_range(u32 nr_lpis, u32 *base) +{ + struct lpi_range *range, *tmp; + int err = -ENOSPC; + + mutex_lock(&lpi_range_lock); + + list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) { + if (range->span >= nr_lpis) { + *base = range->base_id; + range->base_id += nr_lpis; + range->span -= nr_lpis; + + if (range->span == 0) { + list_del(&range->entry); + kfree(range); + } + + err = 0; + break; + } + } + + mutex_unlock(&lpi_range_lock); + + pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis); + return err; +} + +static int free_lpi_range(u32 base, u32 nr_lpis) +{ + struct lpi_range *new; + int err = 0; + + mutex_lock(&lpi_range_lock); + + new = mk_lpi_range(base, nr_lpis); + if (!new) { + err = -ENOMEM; + goto out; + } + + list_add(&new->entry, &lpi_range_list); + list_sort(NULL, &lpi_range_list, lpi_range_cmp); + merge_lpi_ranges(); +out: + mutex_unlock(&lpi_range_lock); + return err; +} + +static int __init its_lpi_init(u32 id_bits) +{ + u32 lpis = (1UL << id_bits) - 8192; + u32 numlpis; + int err; + + numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer); + + if (numlpis > 2 && !WARN_ON(numlpis > lpis)) { + lpis = numlpis; + pr_info("ITS: Using hypervisor restricted LPI range [%u]\n", + lpis); + } + + /* + * Initializing the allocator is just the same as freeing the + * full range of LPIs. + */ + err = free_lpi_range(8192, lpis); + pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis); + return err; +} + +static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids) +{ + unsigned long *bitmap = NULL; + int err = 0; + + do { + err = alloc_lpi_range(nr_irqs, base); + if (!err) + break; + + nr_irqs /= 2; + } while (nr_irqs > 0); + + if (!nr_irqs) + err = -ENOSPC; + + if (err) + goto out; + + bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC); + if (!bitmap) + goto out; + + *nr_ids = nr_irqs; + +out: + if (!bitmap) + *base = *nr_ids = 0; + + return bitmap; +} + +static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids) +{ + WARN_ON(free_lpi_range(base, nr_ids)); + kfree(bitmap); +} + +static void gic_reset_prop_table(void *va) +{ + /* Priority 0xa0, Group-1, disabled */ + memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ); + + /* Make sure the GIC will observe the written configuration */ + gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ); +} + +static struct page *its_allocate_prop_table(gfp_t gfp_flags) +{ + struct page *prop_page; + + prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ)); + if (!prop_page) + return NULL; + + gic_reset_prop_table(page_address(prop_page)); + + return prop_page; +} + +static void its_free_prop_table(struct page *prop_page) +{ + free_pages((unsigned long)page_address(prop_page), + get_order(LPI_PROPBASE_SZ)); +} + +static int __init its_setup_lpi_prop_table(void) +{ + if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) { + u64 val; + + val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER); + lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1; + + gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12); + gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa, + LPI_PROPBASE_SZ, + MEMREMAP_WB); + gic_reset_prop_table(gic_rdists->prop_table_va); + } else { + struct page *page; + + lpi_id_bits = min_t(u32, + GICD_TYPER_ID_BITS(gic_rdists->gicd_typer), + ITS_MAX_LPI_NRBITS); + page = its_allocate_prop_table(GFP_NOWAIT); + if (!page) { + pr_err("Failed to allocate PROPBASE\n"); + return -ENOMEM; + } + + gic_rdists->prop_table_pa = page_to_phys(page); + gic_rdists->prop_table_va = page_address(page); + } + + pr_info("GIC-2500: using LPI property table @%pa\n", + &gic_rdists->prop_table_pa); + + return its_lpi_init(lpi_id_bits); +} + +static const char *its_base_type_string[] = { + [GITS_BASER_TYPE_DEVICE] = "Devices", + [GITS_BASER_TYPE_VCPU] = "Virtual CPUs", + [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)", + [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections", + [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)", + [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)", + [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)", +}; + +static u64 its_read_baser(struct its_node *its, struct its_baser *baser) +{ + u32 idx = baser - its->tables; + + return gits_read_baser(its->base + GITS_BASER + (idx << 3)); +} + +static void its_write_baser(struct its_node *its, struct its_baser *baser, + u64 val) +{ + u32 idx = baser - its->tables; + + gits_write_baser(val, its->base + GITS_BASER + (idx << 3)); + baser->val = its_read_baser(its, baser); +} + +static int its_setup_baser(struct its_node *its, struct its_baser *baser, + u64 cache, u64 shr, u32 order, bool indirect) +{ + u64 val = its_read_baser(its, baser); + u64 esz = GITS_BASER_ENTRY_SIZE(val); + u64 type = GITS_BASER_TYPE(val); + u64 baser_phys, tmp; + u32 alloc_pages, psz; + void *base; + + psz = baser->psz; + alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz); + if (alloc_pages > GITS_BASER_PAGES_MAX) { + pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n", + &its->phys_base, its_base_type_string[type], + alloc_pages, GITS_BASER_PAGES_MAX); + alloc_pages = GITS_BASER_PAGES_MAX; + order = get_order(GITS_BASER_PAGES_MAX * psz); + } + + base = (void *)page_address(alloc_pages_node(its->numa_node, + GFP_KERNEL | __GFP_ZERO, order)); + if (!base) + return -ENOMEM; + + baser_phys = virt_to_phys(base); + + /* Check if the physical address of the memory is above 48bits */ + if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) { + + /* 52bit PA is supported only when PageSize=64K */ + if (psz != SZ_64K) { + pr_err("ITS: no 52bit PA support when psz=%d\n", psz); + free_pages((unsigned long)base, order); + return -ENXIO; + } + + /* Convert 52bit PA to 48bit field */ + baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys); + } + +retry_baser: + val = (baser_phys | + (type << GITS_BASER_TYPE_SHIFT) | + ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | + ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) | + cache | + shr | + GITS_BASER_VALID); + + val |= indirect ? GITS_BASER_INDIRECT : 0x0; + + switch (psz) { + case SZ_4K: + val |= GITS_BASER_PAGE_SIZE_4K; + break; + case SZ_16K: + val |= GITS_BASER_PAGE_SIZE_16K; + break; + case SZ_64K: + val |= GITS_BASER_PAGE_SIZE_64K; + break; + } + + its_write_baser(its, baser, val); + tmp = baser->val; + + if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) { + /* + * Shareability didn't stick. Just use + * whatever the read reported, which is likely + * to be the only thing this redistributor + * supports. If that's zero, make it + * non-cacheable as well. + */ + shr = tmp & GITS_BASER_SHAREABILITY_MASK; + if (!shr) { + cache = GITS_BASER_nC; + gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order)); + } + goto retry_baser; + } + + if (val != tmp) { + pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n", + &its->phys_base, its_base_type_string[type], + val, tmp); + free_pages((unsigned long)base, order); + return -ENXIO; + } + + baser->order = order; + baser->base = base; + baser->psz = psz; + tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz; + + pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n", + &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp), + its_base_type_string[type], + (unsigned long)virt_to_phys(base), + indirect ? "indirect" : "flat", (int)esz, + psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); + + return 0; +} + +static bool its_parse_indirect_baser(struct its_node *its, + struct its_baser *baser, + u32 *order, u32 ids) +{ + u64 tmp = its_read_baser(its, baser); + u64 type = GITS_BASER_TYPE(tmp); + u64 esz = GITS_BASER_ENTRY_SIZE(tmp); + u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb; + u32 new_order = *order; + u32 psz = baser->psz; + bool indirect = false; + + /* No need to enable Indirection if memory requirement < (psz*2)bytes */ + if ((esz << ids) > (psz * 2)) { + /* + * Find out whether hw supports a single or two-level table by + * table by reading bit at offset '62' after writing '1' to it. + */ + its_write_baser(its, baser, val | GITS_BASER_INDIRECT); + indirect = !!(baser->val & GITS_BASER_INDIRECT); + + if (indirect) { + /* + * The size of the lvl2 table is equal to ITS page size + * which is 'psz'. For computing lvl1 table size, + * subtract ID bits that sparse lvl2 table from 'ids' + * which is reported by ITS hardware times lvl1 table + * entry size. + */ + ids -= ilog2(psz / (int)esz); + esz = GITS_LVL1_ENTRY_SIZE; + } + } + + /* + * Allocate as many entries as required to fit the + * range of device IDs that the ITS can grok... The ID + * space being incredibly sparse, this results in a + * massive waste of memory if two-level device table + * feature is not supported by hardware. + */ + new_order = max_t(u32, get_order(esz << ids), new_order); + if (new_order >= MAX_ORDER) { + new_order = MAX_ORDER - 1; + ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz); + pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n", + &its->phys_base, its_base_type_string[type], + its->device_ids, ids); + } + + *order = new_order; + + return indirect; +} + +static void its_free_tables(struct its_node *its) +{ + int i; + + for (i = 0; i < GITS_BASER_NR_REGS; i++) { + if (its->tables[i].base) { + free_pages((unsigned long)its->tables[i].base, + its->tables[i].order); + its->tables[i].base = NULL; + } + } +} + +static int its_probe_baser_psz(struct its_node *its, struct its_baser *baser) +{ + u64 psz = SZ_64K; + + while (psz) { + u64 val, gpsz; + + val = its_read_baser(its, baser); + val &= ~GITS_BASER_PAGE_SIZE_MASK; + + switch (psz) { + case SZ_64K: + gpsz = GITS_BASER_PAGE_SIZE_64K; + break; + case SZ_16K: + gpsz = GITS_BASER_PAGE_SIZE_16K; + break; + case SZ_4K: + default: + gpsz = GITS_BASER_PAGE_SIZE_4K; + break; + } + + gpsz >>= GITS_BASER_PAGE_SIZE_SHIFT; + + val |= FIELD_PREP(GITS_BASER_PAGE_SIZE_MASK, gpsz); + its_write_baser(its, baser, val); + + if (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser->val) == gpsz) + break; + + switch (psz) { + case SZ_64K: + psz = SZ_16K; + break; + case SZ_16K: + psz = SZ_4K; + break; + case SZ_4K: + default: + return -1; + } + } + + baser->psz = psz; + return 0; +} + +static int its_alloc_tables(struct its_node *its) +{ + u64 shr = GITS_BASER_InnerShareable; + u64 cache = GITS_BASER_RaWaWb; + int err, i; + + if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) + /* erratum 24313: ignore memory access type */ + cache = GITS_BASER_nCnB; + + for (i = 0; i < GITS_BASER_NR_REGS; i++) { + struct its_baser *baser = its->tables + i; + u64 val = its_read_baser(its, baser); + u64 type = GITS_BASER_TYPE(val); + bool indirect = false; + u32 order; + + if (type == GITS_BASER_TYPE_NONE) + continue; + + if (its_probe_baser_psz(its, baser)) { + its_free_tables(its); + return -ENXIO; + } + + order = get_order(baser->psz); + + switch (type) { + case GITS_BASER_TYPE_DEVICE: + indirect = its_parse_indirect_baser(its, baser, &order, + its->device_ids); + break; + + case GITS_BASER_TYPE_VCPU: + indirect = its_parse_indirect_baser(its, baser, &order, + ITS_MAX_VPEID_BITS); + break; + } + + err = its_setup_baser(its, baser, cache, shr, order, indirect); + if (err < 0) { + its_free_tables(its); + return err; + } + + /* Update settings which will be used for next BASERn */ + cache = baser->val & GITS_BASER_CACHEABILITY_MASK; + shr = baser->val & GITS_BASER_SHAREABILITY_MASK; + } + + return 0; +} + +static int its_alloc_collections(struct its_node *its) +{ + int i; + + its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections), + GFP_KERNEL); + if (!its->collections) + return -ENOMEM; + + for (i = 0; i < nr_cpu_ids; i++) + its->collections[i].target_address = ~0ULL; + + return 0; +} + +static struct page *its_allocate_pending_table(gfp_t gfp_flags) +{ + struct page *pend_page; + + pend_page = alloc_pages(gfp_flags | __GFP_ZERO, + get_order(LPI_PENDBASE_SZ)); + if (!pend_page) + return NULL; + + /* Make sure the GIC will observe the zero-ed page */ + gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ); + + return pend_page; +} + +static void its_free_pending_table(struct page *pt) +{ + free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ)); +} + +/* + * Booting with kdump and LPIs enabled is generally fine. + */ +static bool enabled_lpis_allowed(void) +{ + /* Allow a kdump kernel */ + if (is_kdump_kernel()) + return true; + + return false; +} + +static int __init allocate_lpi_tables(void) +{ + u64 val; + int err, cpu; + + /* + * If LPIs are enabled while we run this from the boot CPU, + * flag the RD tables as pre-allocated if the stars do align. + */ + val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR); + if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) { + gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED | + RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING); + pr_info("GIC-2500: Using preallocated redistributor tables\n"); + } + + err = its_setup_lpi_prop_table(); + if (err) + return err; + + /* + * We allocate all the pending tables anyway, as we may have a + * mix of RDs that have had LPIs enabled, and some that + * don't. We'll free the unused ones as each CPU comes online. + */ + for_each_possible_cpu(cpu) { + struct page *pend_page; + + pend_page = its_allocate_pending_table(GFP_NOWAIT); + if (!pend_page) { + pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu); + return -ENOMEM; + } + + gic_data_rdist_cpu(cpu)->pend_page = pend_page; + } + + return 0; +} + +static u64 its_clear_vpend_valid(void __iomem *vlpi_base) +{ + u32 count = 1000000; /* 1s! */ + bool clean; + u64 val; + + val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); + val &= ~GICR_VPENDBASER_Valid; + gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); + + do { + val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); + clean = !(val & GICR_VPENDBASER_Dirty); + if (!clean) { + count--; + cpu_relax(); + udelay(1); + } + } while (!clean && count); + + return val; +} + +static void its_cpu_init_lpis(void) +{ + void __iomem *rbase = gic_data_rdist_rd_base(); + struct page *pend_page; + phys_addr_t paddr; + u64 val, tmp; + + if (gic_data_rdist()->lpi_enabled) + return; + + val = readl_relaxed(rbase + GICR_CTLR); + if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) && + (val & GICR_CTLR_ENABLE_LPIS)) { + + paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER); + paddr &= GENMASK_ULL(51, 16); + + its_free_pending_table(gic_data_rdist()->pend_page); + gic_data_rdist()->pend_page = NULL; + + goto out; + } + + pend_page = gic_data_rdist()->pend_page; + paddr = page_to_phys(pend_page); + + /* set PROPBASE */ + val = (gic_rdists->prop_table_pa | + GICR_PROPBASER_InnerShareable | + GICR_PROPBASER_RaWaWb | + ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK)); + + gicr_write_propbaser(val, rbase + GICR_PROPBASER); + tmp = gicr_read_propbaser(rbase + GICR_PROPBASER); + + if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { + if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) { + /* + * The HW reports non-shareable, we must + * remove the cacheability attributes as + * well. + */ + val &= ~(GICR_PROPBASER_SHAREABILITY_MASK | + GICR_PROPBASER_CACHEABILITY_MASK); + val |= GICR_PROPBASER_nC; + gicr_write_propbaser(val, rbase + GICR_PROPBASER); + } + pr_info_once("GIC: using cache flushing for LPI property table\n"); + gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING; + } + + /* set PENDBASE */ + val = (page_to_phys(pend_page) | + GICR_PENDBASER_InnerShareable | + GICR_PENDBASER_RaWaWb); + + gicr_write_pendbaser(val, rbase + GICR_PENDBASER); + tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER); + + if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) { + /* + * The HW reports non-shareable, we must remove the + * cacheability attributes as well. + */ + val &= ~(GICR_PENDBASER_SHAREABILITY_MASK | + GICR_PENDBASER_CACHEABILITY_MASK); + val |= GICR_PENDBASER_nC; + gicr_write_pendbaser(val, rbase + GICR_PENDBASER); + } + + /* Enable LPIs */ + val = readl_relaxed(rbase + GICR_CTLR); + val |= GICR_CTLR_ENABLE_LPIS; + writel_relaxed(val, rbase + GICR_CTLR); + + if (gic_rdists->has_vlpis) { + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + + /* + * It's possible for CPU to receive VLPIs before it is + * sheduled as a vPE, especially for the first CPU, and the + * VLPI with INTID larger than 2^(IDbits+1) will be considered + * as out of range and dropped by GIC. + * So we initialize IDbits to known value to avoid VLPI drop. + */ + val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; + pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n", + smp_processor_id(), val); + gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + + /* + * Also clear Valid bit of GICR_VPENDBASER, in case some + * ancient programming gets left in and has possibility of + * corrupting memory. + */ + val = its_clear_vpend_valid(vlpi_base); + WARN_ON(val & GICR_VPENDBASER_Dirty); + } + + /* Make sure the GIC has seen the above */ + dsb(sy); +out: + gic_data_rdist()->lpi_enabled = true; + pr_info("GIC-2500: CPU%d: using %s LPI pending table @%pa\n", + smp_processor_id(), + gic_data_rdist()->pend_page ? "allocated" : "reserved", + &paddr); +} + +static void its_cpu_init_collection(struct its_node *its) +{ + int cpu = smp_processor_id(); + unsigned long mpid, skt_id; + phys_addr_t its_phys_base; + u64 target; + + /* avoid cross node collections and its mapping */ + if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { + struct device_node *cpu_node; + + cpu_node = of_get_cpu_node(cpu, NULL); + if (its->numa_node != NUMA_NO_NODE && + its->numa_node != of_node_to_nid(cpu_node)) + return; + } + mpid = cpu_logical_map(cpu); + its_phys_base = its->phys_base; + skt_id = (its_phys_base >> 41) & 0x7; + + /* + * We now have to bind each collection to its target + * redistributor. + */ + if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) { + /* + * This ITS wants the physical address of the + * redistributor. + */ + target = gic_data_rdist()->phys_base; + } else { + /* This ITS wants a linear CPU number. */ + target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); + target = GICR_TYPER_CPU_NUMBER(target) << 16; + } + + /* Perform collection mapping */ + its->collections[cpu].target_address = target; + its->collections[cpu].col_id = cpu % 64; + + its_send_mapc(its, &its->collections[cpu], 1); + its_send_invall(its, &its->collections[cpu]); +} + +static void its_cpu_init_collections(void) +{ + struct its_node *its; + + raw_spin_lock(&its_lock); + + list_for_each_entry(its, &its_nodes, entry) + its_cpu_init_collection(its); + + raw_spin_unlock(&its_lock); +} + +static struct its_device *its_find_device(struct its_node *its, u32 dev_id) +{ + struct its_device *its_dev = NULL, *tmp; + unsigned long flags; + + raw_spin_lock_irqsave(&its->lock, flags); + + list_for_each_entry(tmp, &its->its_device_list, entry) { + if (tmp->device_id == dev_id) { + its_dev = tmp; + break; + } + } + + raw_spin_unlock_irqrestore(&its->lock, flags); + + return its_dev; +} + +static struct its_baser *its_get_baser(struct its_node *its, u32 type) +{ + int i; + + for (i = 0; i < GITS_BASER_NR_REGS; i++) { + if (GITS_BASER_TYPE(its->tables[i].val) == type) + return &its->tables[i]; + } + + return NULL; +} + +static bool its_alloc_table_entry(struct its_node *its, + struct its_baser *baser, u32 id) +{ + struct page *page; + u32 esz, idx; + __le64 *table; + + /* Don't allow device id that exceeds single, flat table limit */ + esz = GITS_BASER_ENTRY_SIZE(baser->val); + if (!(baser->val & GITS_BASER_INDIRECT)) + return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz)); + + /* Compute 1st level table index & check if that exceeds table limit */ + idx = id >> ilog2(baser->psz / esz); + if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE)) + return false; + + table = baser->base; + + /* Allocate memory for 2nd level table */ + if (!table[idx]) { + page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, + get_order(baser->psz)); + if (!page) + return false; + + /* Flush Lvl2 table to PoC if hw doesn't support coherency */ + if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) + gic_flush_dcache_to_poc(page_address(page), baser->psz); + + table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID); + + /* Flush Lvl1 entry to PoC if hw doesn't support coherency */ + if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) + gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE); + + /* Ensure updated table contents are visible to ITS hardware */ + dsb(sy); + } + + return true; +} + +static bool its_alloc_device_table(struct its_node *its, u32 dev_id) +{ + struct its_baser *baser; + + baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE); + + /* Don't allow device id that exceeds ITS hardware limit */ + if (!baser) + return (ilog2(dev_id) < its->device_ids); + + return its_alloc_table_entry(its, baser, dev_id); +} + +static bool its_alloc_vpe_table(u32 vpe_id) +{ + struct its_node *its; + + /* + * Make sure the L2 tables are allocated on *all* v4 ITSs. We + * could try and only do it on ITSs corresponding to devices + * that have interrupts targeted at this VPE, but the + * complexity becomes crazy (and you have tons of memory + * anyway, right?). + */ + list_for_each_entry(its, &its_nodes, entry) { + struct its_baser *baser; + + if (!its->is_v4) + continue; + + baser = its_get_baser(its, GITS_BASER_TYPE_VCPU); + if (!baser) + return false; + + if (!its_alloc_table_entry(its, baser, vpe_id)) + return false; + } + + return true; +} + +static struct its_device *its_create_device(struct its_node *its, u32 dev_id, + int nvecs, bool alloc_lpis) +{ + struct its_device *dev; + unsigned long *lpi_map = NULL; + unsigned long flags; + u16 *col_map = NULL; + void *itt; + int lpi_base; + int nr_lpis; + int nr_ites; + int sz; + + if (!its_alloc_device_table(its, dev_id)) + return NULL; + + if (WARN_ON(!is_power_of_2(nvecs))) + nvecs = roundup_pow_of_two(nvecs); + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + /* + * Even if the device wants a single LPI, the ITT must be + * sized as a power of two (and you need at least one bit...). + */ + nr_ites = max(2, nvecs); + sz = nr_ites * its->ite_size; + sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; + itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node); + if (alloc_lpis) { + lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis); + if (lpi_map) + col_map = kcalloc(nr_lpis, sizeof(*col_map), + GFP_KERNEL); + } else { + col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL); + nr_lpis = 0; + lpi_base = 0; + } + + if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) { + kfree(dev); + kfree(itt); + kfree(lpi_map); + kfree(col_map); + return NULL; + } + + gic_flush_dcache_to_poc(itt, sz); + + dev->its = its; + dev->itt = itt; + dev->nr_ites = nr_ites; + dev->event_map.lpi_map = lpi_map; + dev->event_map.col_map = col_map; + dev->event_map.lpi_base = lpi_base; + dev->event_map.nr_lpis = nr_lpis; + raw_spin_lock_init(&dev->event_map.vlpi_lock); + dev->device_id = dev_id; + INIT_LIST_HEAD(&dev->entry); + + raw_spin_lock_irqsave(&its->lock, flags); + list_add(&dev->entry, &its->its_device_list); + raw_spin_unlock_irqrestore(&its->lock, flags); + + /* Map device to its ITT */ + its_send_mapd(dev, 1); + + return dev; +} + +static void its_free_device(struct its_device *its_dev) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&its_dev->its->lock, flags); + list_del(&its_dev->entry); + raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); + kfree(its_dev->itt); + kfree(its_dev); +} + +static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq) +{ + int idx; + + idx = bitmap_find_free_region(dev->event_map.lpi_map, + dev->event_map.nr_lpis, + get_count_order(nvecs)); + if (idx < 0) + return -ENOSPC; + + *hwirq = dev->event_map.lpi_base + idx; + set_bit(idx, dev->event_map.lpi_map); + + return 0; +} + +static int its_msi_prepare(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *info) +{ + struct its_node *its; + struct its_device *its_dev; + struct msi_domain_info *msi_info; + u32 dev_id; + int err = 0; + + /* + * We ignore "dev" entierely, and rely on the dev_id that has + * been passed via the scratchpad. This limits this domain's + * usefulness to upper layers that definitely know that they + * are built on top of the ITS. + */ + dev_id = info->scratchpad[0].ul; + + msi_info = msi_get_domain_info(domain); + its = msi_info->data; + + if (!gic_rdists->has_direct_lpi && + vpe_proxy.dev && + vpe_proxy.dev->its == its && + dev_id == vpe_proxy.dev->device_id) { + /* Bad luck. Get yourself a better implementation */ + WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n", + dev_id); + return -EINVAL; + } + + mutex_lock(&its->dev_alloc_lock); + its_dev = its_find_device(its, dev_id); + if (its_dev) { + /* + * We already have seen this ID, probably through + * another alias (PCI bridge of some sort). No need to + * create the device. + */ + its_dev->shared = true; + pr_debug("Reusing ITT for devID %x\n", dev_id); + goto out; + } + + its_dev = its_create_device(its, dev_id, nvec, true); + if (!its_dev) { + err = -ENOMEM; + goto out; + } + + pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec)); +out: + mutex_unlock(&its->dev_alloc_lock); + info->scratchpad[0].ptr = its_dev; + return err; +} + +static struct msi_domain_ops its_msi_domain_ops = { + .msi_prepare = its_msi_prepare, +}; + +static int its_irq_gic_domain_alloc(struct irq_domain *domain, + unsigned int virq, + irq_hw_number_t hwirq) +{ + struct irq_fwspec fwspec; + + if (irq_domain_get_of_node(domain->parent)) { + fwspec.fwnode = domain->parent->fwnode; + fwspec.param_count = 3; + fwspec.param[0] = GIC_IRQ_TYPE_LPI; + fwspec.param[1] = hwirq; + fwspec.param[2] = IRQ_TYPE_EDGE_RISING; + } else if (is_fwnode_irqchip(domain->parent->fwnode)) { + fwspec.fwnode = domain->parent->fwnode; + fwspec.param_count = 2; + fwspec.param[0] = hwirq; + fwspec.param[1] = IRQ_TYPE_EDGE_RISING; + } else { + return -EINVAL; + } + + return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); +} + +static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *args) +{ + msi_alloc_info_t *info = args; + struct its_device *its_dev = info->scratchpad[0].ptr; + struct irq_data *irqd; + irq_hw_number_t hwirq; + int err; + int i; + + err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq); + if (err) + return err; + + for (i = 0; i < nr_irqs; i++) { + err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i); + if (err) + return err; + + irq_domain_set_hwirq_and_chip(domain, virq + i, + hwirq + i, &its_irq_chip, its_dev); + irqd = irq_get_irq_data(virq + i); + irqd_set_single_target(irqd); + irqd_set_affinity_on_activate(irqd); + pr_debug("ID:%d pID:%d vID:%d\n", + (int)(hwirq + i - its_dev->event_map.lpi_base), + (int)(hwirq + i), virq + i); + } + + return 0; +} +static int its_cpumask_first(struct its_device *its_dev, + const struct cpumask *cpu_mask) +{ + unsigned int skt, skt_id, i; + phys_addr_t its_phys_base; + unsigned int cpu, cpus = 0; + unsigned int skt_cpu_cnt[MAX_MARS3_SKT_COUNT] = {0}; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SKT_COUNT)) + skt_cpu_cnt[skt]++; + else if (0xff != skt) + pr_err("socket address: %d is out of range.", skt); + } + + its_phys_base = its_dev->its->phys_base; + skt_id = (its_phys_base >> 41) & 0x7; + + if (0 != skt_id) { + for (i = 0; i < skt_id; i++) + cpus += skt_cpu_cnt[i]; + } + + cpu = cpumask_first(cpu_mask); + if ((cpu > cpus) && (cpu < (cpus + skt_cpu_cnt[skt_id]))) + cpus = cpu; + + if (is_kdump_kernel()) { + skt = (cpu_logical_map(cpu) >> 16) & 0xff; + if (skt_id == skt) { + return cpu; + } + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SKT_COUNT)) { + if (skt_id == skt) { + return i; + } + } else if (0xff != skt) { + pr_err("socket address: %d is out of range.", skt); + } + } + } + + return cpus; +} + +static int its_irq_domain_activate(struct irq_domain *domain, + struct irq_data *d, bool reserve) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + const struct cpumask *cpu_mask = cpu_online_mask; + int cpu; + + /* get the cpu_mask of local node */ + if (its_dev->its->numa_node >= 0) + cpu_mask = cpumask_of_node(its_dev->its->numa_node); + + /* Bind the LPI to the first possible CPU */ + cpu = its_cpumask_first(its_dev, cpu_mask); + + its_dev->event_map.col_map[event] = cpu; + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + + /* Map the GIC IRQ and event to the device */ + its_send_mapti(its_dev, d->hwirq, event); + return 0; +} + +static void its_irq_domain_deactivate(struct irq_domain *domain, + struct irq_data *d) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + + /* Stop the delivery of interrupts */ + its_send_discard(its_dev, event); +} + +static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_node *its = its_dev->its; + int i; + + bitmap_release_region(its_dev->event_map.lpi_map, + its_get_event_id(irq_domain_get_irq_data(domain, virq)), + get_count_order(nr_irqs)); + + for (i = 0; i < nr_irqs; i++) { + struct irq_data *data = irq_domain_get_irq_data(domain, + virq + i); + /* Nuke the entry in the domain */ + irq_domain_reset_irq_data(data); + } + + mutex_lock(&its->dev_alloc_lock); + + /* + * If all interrupts have been freed, start mopping the + * floor. This is conditionned on the device not being shared. + */ + if (!its_dev->shared && + bitmap_empty(its_dev->event_map.lpi_map, + its_dev->event_map.nr_lpis)) { + its_lpi_free(its_dev->event_map.lpi_map, + its_dev->event_map.lpi_base, + its_dev->event_map.nr_lpis); + kfree(its_dev->event_map.col_map); + + /* Unmap device/itt */ + its_send_mapd(its_dev, 0); + its_free_device(its_dev); + } + + mutex_unlock(&its->dev_alloc_lock); + + irq_domain_free_irqs_parent(domain, virq, nr_irqs); +} + +static const struct irq_domain_ops its_domain_ops = { + .alloc = its_irq_domain_alloc, + .free = its_irq_domain_free, + .activate = its_irq_domain_activate, + .deactivate = its_irq_domain_deactivate, +}; + +/* + * This is insane. + * + * If a GICv4 doesn't implement Direct LPIs (which is extremely + * likely), the only way to perform an invalidate is to use a fake + * device to issue an INV command, implying that the LPI has first + * been mapped to some event on that device. Since this is not exactly + * cheap, we try to keep that mapping around as long as possible, and + * only issue an UNMAP if we're short on available slots. + * + * Broken by design(tm). + */ +static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe) +{ + /* Already unmapped? */ + if (vpe->vpe_proxy_event == -1) + return; + + its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event); + vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL; + + /* + * We don't track empty slots at all, so let's move the + * next_victim pointer if we can quickly reuse that slot + * instead of nuking an existing entry. Not clear that this is + * always a win though, and this might just generate a ripple + * effect... Let's just hope VPEs don't migrate too often. + */ + if (vpe_proxy.vpes[vpe_proxy.next_victim]) + vpe_proxy.next_victim = vpe->vpe_proxy_event; + + vpe->vpe_proxy_event = -1; +} + +static void its_vpe_db_proxy_unmap(struct its_vpe *vpe) +{ + if (!gic_rdists->has_direct_lpi) { + unsigned long flags; + + raw_spin_lock_irqsave(&vpe_proxy.lock, flags); + its_vpe_db_proxy_unmap_locked(vpe); + raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); + } +} + +static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe) +{ + /* Already mapped? */ + if (vpe->vpe_proxy_event != -1) + return; + + /* This slot was already allocated. Kick the other VPE out. */ + if (vpe_proxy.vpes[vpe_proxy.next_victim]) + its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]); + + /* Map the new VPE instead */ + vpe_proxy.vpes[vpe_proxy.next_victim] = vpe; + vpe->vpe_proxy_event = vpe_proxy.next_victim; + vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites; + + vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx; + its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event); +} + +static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to) +{ + unsigned long flags; + struct its_collection *target_col; + + if (gic_rdists->has_direct_lpi) { + void __iomem *rdbase; + + rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base; + gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); + while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) + cpu_relax(); + + return; + } + + raw_spin_lock_irqsave(&vpe_proxy.lock, flags); + + its_vpe_db_proxy_map_locked(vpe); + + target_col = &vpe_proxy.dev->its->collections[to]; + its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event); + vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to; + + raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); +} + +static int its_vpe_set_affinity(struct irq_data *d, + const struct cpumask *mask_val, + bool force) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + int cpu = cpumask_first(mask_val); + + /* + * Changing affinity is mega expensive, so let's be as lazy as + * we can and only do it if we really have to. Also, if mapped + * into the proxy device, we need to move the doorbell + * interrupt to its new location. + */ + if (vpe->col_idx != cpu) { + int from = vpe->col_idx; + + vpe->col_idx = cpu; + its_send_vmovp(vpe); + its_vpe_db_proxy_move(vpe, from, cpu); + } + + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + + return IRQ_SET_MASK_OK_DONE; +} + +static void its_vpe_schedule(struct its_vpe *vpe) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val; + + /* Schedule the VPE */ + val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) & + GENMASK_ULL(51, 12); + val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; + val |= GICR_VPROPBASER_RaWb; + val |= GICR_VPROPBASER_InnerShareable; + gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + + val = virt_to_phys(page_address(vpe->vpt_page)) & + GENMASK_ULL(51, 16); + val |= GICR_VPENDBASER_RaWaWb; + val |= GICR_VPENDBASER_InnerShareable; + /* + * There is no good way of finding out if the pending table is + * empty as we can race against the doorbell interrupt very + * easily. So in the end, vpe->pending_last is only an + * indication that the vcpu has something pending, not one + * that the pending table is empty. A good implementation + * would be able to read its coarse map pretty quickly anyway, + * making this a tolerable issue. + */ + val |= GICR_VPENDBASER_PendingLast; + val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0; + val |= GICR_VPENDBASER_Valid; + gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); +} + +static void its_vpe_deschedule(struct its_vpe *vpe) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val; + + val = its_clear_vpend_valid(vlpi_base); + + if (unlikely(val & GICR_VPENDBASER_Dirty)) { + pr_err_ratelimited("ITS virtual pending table not cleaning\n"); + vpe->idai = false; + vpe->pending_last = true; + } else { + vpe->idai = !!(val & GICR_VPENDBASER_IDAI); + vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); + } +} + +static void its_vpe_invall(struct its_vpe *vpe) +{ + struct its_node *its; + + list_for_each_entry(its, &its_nodes, entry) { + if (!its->is_v4) + continue; + + if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr]) + continue; + + /* + * Sending a VINVALL to a single ITS is enough, as all + * we need is to reach the redistributors. + */ + its_send_vinvall(its, vpe); + return; + } +} + +static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_cmd_info *info = vcpu_info; + + switch (info->cmd_type) { + case SCHEDULE_VPE: + its_vpe_schedule(vpe); + return 0; + + case DESCHEDULE_VPE: + its_vpe_deschedule(vpe); + return 0; + + case INVALL_VPE: + its_vpe_invall(vpe); + return 0; + + default: + return -EINVAL; + } +} + +static void its_vpe_send_cmd(struct its_vpe *vpe, + void (*cmd)(struct its_device *, u32)) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&vpe_proxy.lock, flags); + + its_vpe_db_proxy_map_locked(vpe); + cmd(vpe_proxy.dev, vpe->vpe_proxy_event); + + raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); +} + +static void its_vpe_send_inv(struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + if (gic_rdists->has_direct_lpi) { + void __iomem *rdbase; + + rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; + gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_INVLPIR); + while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) + cpu_relax(); + } else { + its_vpe_send_cmd(vpe, its_send_inv); + } +} + +static void its_vpe_mask_irq(struct irq_data *d) +{ + /* + * We need to unmask the LPI, which is described by the parent + * irq_data. Instead of calling into the parent (which won't + * exactly do the right thing, let's simply use the + * parent_data pointer. Yes, I'm naughty. + */ + lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0); + its_vpe_send_inv(d); +} + +static void its_vpe_unmask_irq(struct irq_data *d) +{ + /* Same hack as above... */ + lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED); + its_vpe_send_inv(d); +} + +static int its_vpe_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool state) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + if (which != IRQCHIP_STATE_PENDING) + return -EINVAL; + + if (gic_rdists->has_direct_lpi) { + void __iomem *rdbase; + + rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; + if (state) { + gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR); + } else { + gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); + while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) + cpu_relax(); + } + } else { + if (state) + its_vpe_send_cmd(vpe, its_send_int); + else + its_vpe_send_cmd(vpe, its_send_clear); + } + + return 0; +} + +static int its_vpe_retrigger(struct irq_data *d) +{ + return !its_vpe_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true); +} + +static struct irq_chip its_vpe_irq_chip = { + .name = "GICv4-vpe", + .irq_mask = its_vpe_mask_irq, + .irq_unmask = its_vpe_unmask_irq, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = its_vpe_set_affinity, + .irq_retrigger = its_vpe_retrigger, + .irq_set_irqchip_state = its_vpe_set_irqchip_state, + .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity, +}; + +static int its_vpe_id_alloc(void) +{ + return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL); +} + +static void its_vpe_id_free(u16 id) +{ + ida_simple_remove(&its_vpeid_ida, id); +} + +static int its_vpe_init(struct its_vpe *vpe) +{ + struct page *vpt_page; + int vpe_id; + + /* Allocate vpe_id */ + vpe_id = its_vpe_id_alloc(); + if (vpe_id < 0) + return vpe_id; + + /* Allocate VPT */ + vpt_page = its_allocate_pending_table(GFP_KERNEL); + if (!vpt_page) { + its_vpe_id_free(vpe_id); + return -ENOMEM; + } + + if (!its_alloc_vpe_table(vpe_id)) { + its_vpe_id_free(vpe_id); + its_free_pending_table(vpt_page); + return -ENOMEM; + } + + vpe->vpe_id = vpe_id; + vpe->vpt_page = vpt_page; + vpe->vpe_proxy_event = -1; + + return 0; +} + +static void its_vpe_teardown(struct its_vpe *vpe) +{ + its_vpe_db_proxy_unmap(vpe); + its_vpe_id_free(vpe->vpe_id); + its_free_pending_table(vpe->vpt_page); +} + +static void its_vpe_irq_domain_free(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs) +{ + struct its_vm *vm = domain->host_data; + int i; + + irq_domain_free_irqs_parent(domain, virq, nr_irqs); + + for (i = 0; i < nr_irqs; i++) { + struct irq_data *data = irq_domain_get_irq_data(domain, + virq + i); + struct its_vpe *vpe = irq_data_get_irq_chip_data(data); + + BUG_ON(vm != vpe->its_vm); + + clear_bit(data->hwirq, vm->db_bitmap); + its_vpe_teardown(vpe); + irq_domain_reset_irq_data(data); + } + + if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) { + its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis); + its_free_prop_table(vm->vprop_page); + } +} + +static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *args) +{ + struct its_vm *vm = args; + unsigned long *bitmap; + struct page *vprop_page; + int base, nr_ids, i, err = 0; + + BUG_ON(!vm); + + bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids); + if (!bitmap) + return -ENOMEM; + + if (nr_ids < nr_irqs) { + its_lpi_free(bitmap, base, nr_ids); + return -ENOMEM; + } + + vprop_page = its_allocate_prop_table(GFP_KERNEL); + if (!vprop_page) { + its_lpi_free(bitmap, base, nr_ids); + return -ENOMEM; + } + + vm->db_bitmap = bitmap; + vm->db_lpi_base = base; + vm->nr_db_lpis = nr_ids; + vm->vprop_page = vprop_page; + + for (i = 0; i < nr_irqs; i++) { + vm->vpes[i]->vpe_db_lpi = base + i; + err = its_vpe_init(vm->vpes[i]); + if (err) + break; + err = its_irq_gic_domain_alloc(domain, virq + i, + vm->vpes[i]->vpe_db_lpi); + if (err) + break; + irq_domain_set_hwirq_and_chip(domain, virq + i, i, + &its_vpe_irq_chip, vm->vpes[i]); + set_bit(i, bitmap); + } + + if (err) { + if (i > 0) + its_vpe_irq_domain_free(domain, virq, i - 1); + + its_lpi_free(bitmap, base, nr_ids); + its_free_prop_table(vprop_page); + } + + return err; +} + +static int its_vpe_irq_domain_activate(struct irq_domain *domain, + struct irq_data *d, bool reserve) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_node *its; + + /* If we use the list map, we issue VMAPP on demand... */ + if (its_list_map) + return 0; + + /* Map the VPE to the first possible CPU */ + vpe->col_idx = cpumask_first(cpu_online_mask); + + list_for_each_entry(its, &its_nodes, entry) { + if (!its->is_v4) + continue; + + its_send_vmapp(its, vpe, true); + its_send_vinvall(its, vpe); + } + + irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); + + return 0; +} + +static void its_vpe_irq_domain_deactivate(struct irq_domain *domain, + struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_node *its; + + /* + * If we use the list map, we unmap the VPE once no VLPIs are + * associated with the VM. + */ + if (its_list_map) + return; + + list_for_each_entry(its, &its_nodes, entry) { + if (!its->is_v4) + continue; + + its_send_vmapp(its, vpe, false); + } +} + +static const struct irq_domain_ops its_vpe_domain_ops = { + .alloc = its_vpe_irq_domain_alloc, + .free = its_vpe_irq_domain_free, + .activate = its_vpe_irq_domain_activate, + .deactivate = its_vpe_irq_domain_deactivate, +}; + +static int its_force_quiescent(void __iomem *base) +{ + u32 count = 1000000; /* 1s */ + u32 val; + + val = readl_relaxed(base + GITS_CTLR); + /* + * GIC architecture specification requires the ITS to be both + * disabled and quiescent for writes to GITS_BASER or + * GITS_CBASER to not have UNPREDICTABLE results. + */ + if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE)) + return 0; + + /* Disable the generation of all interrupts to this ITS */ + val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe); + writel_relaxed(val, base + GITS_CTLR); + + /* Poll GITS_CTLR and wait until ITS becomes quiescent */ + while (1) { + val = readl_relaxed(base + GITS_CTLR); + if (val & GITS_CTLR_QUIESCENT) + return 0; + + count--; + if (!count) + return -EBUSY; + + cpu_relax(); + udelay(1); + } +} + +static bool __maybe_unused its_enable_quirk_cavium_22375(void *data) +{ + struct its_node *its = data; + + /* erratum 22375: only alloc 8MB table size */ + its->device_ids = 0x14; /* 20 bits, 8MB */ + its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; + + return true; +} + +static bool __maybe_unused its_enable_quirk_cavium_23144(void *data) +{ + struct its_node *its = data; + + its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; + + return true; +} + +static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data) +{ + struct its_node *its = data; + + /* On QDF2400, the size of the ITE is 16Bytes */ + its->ite_size = 16; + + return true; +} + +static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev) +{ + struct its_node *its = its_dev->its; + + /* + * The Socionext Synquacer SoC has a so-called 'pre-ITS', + * which maps 32-bit writes targeted at a separate window of + * size '4 << device_id_bits' onto writes to GITS_TRANSLATER + * with device ID taken from bits [device_id_bits + 1:2] of + * the window offset. + */ + return its->pre_its_base + (its_dev->device_id << 2); +} + +static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data) +{ + struct its_node *its = data; + u32 pre_its_window[2]; + u32 ids; + + if (!fwnode_property_read_u32_array(its->fwnode_handle, + "socionext,synquacer-pre-its", + pre_its_window, + ARRAY_SIZE(pre_its_window))) { + + its->pre_its_base = pre_its_window[0]; + its->get_msi_base = its_irq_get_msi_base_pre_its; + + ids = ilog2(pre_its_window[1]) - 2; + if (its->device_ids > ids) + its->device_ids = ids; + + /* the pre-ITS breaks isolation, so disable MSI remapping */ + its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP; + return true; + } + return false; +} + +static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data) +{ + struct its_node *its = data; + + /* + * Hip07 insists on using the wrong address for the VLPI + * page. Trick it into doing the right thing... + */ + its->vlpi_redist_offset = SZ_128K; + return true; +} + +static const struct gic_quirk its_quirks[] = { +#ifdef CONFIG_CAVIUM_ERRATUM_22375 + { + .desc = "ITS: Cavium errata 22375, 24313", + .iidr = 0xa100034c, /* ThunderX pass 1.x */ + .mask = 0xffff0fff, + .init = its_enable_quirk_cavium_22375, + }, +#endif +#ifdef CONFIG_CAVIUM_ERRATUM_23144 + { + .desc = "ITS: Cavium erratum 23144", + .iidr = 0xa100034c, /* ThunderX pass 1.x */ + .mask = 0xffff0fff, + .init = its_enable_quirk_cavium_23144, + }, +#endif +#ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065 + { + .desc = "ITS: QDF2400 erratum 0065", + .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */ + .mask = 0xffffffff, + .init = its_enable_quirk_qdf2400_e0065, + }, +#endif +#ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS + { + /* + * The Socionext Synquacer SoC incorporates ARM's own GIC-500 + * implementation, but with a 'pre-ITS' added that requires + * special handling in software. + */ + .desc = "ITS: Socionext Synquacer pre-ITS", + .iidr = 0x0001143b, + .mask = 0xffffffff, + .init = its_enable_quirk_socionext_synquacer, + }, +#endif +#ifdef CONFIG_HISILICON_ERRATUM_161600802 + { + .desc = "ITS: Hip07 erratum 161600802", + .iidr = 0x00000004, + .mask = 0xffffffff, + .init = its_enable_quirk_hip07_161600802, + }, +#endif + { + } +}; + +static void its_enable_quirks(struct its_node *its) +{ + u32 iidr = readl_relaxed(its->base + GITS_IIDR); + + gic_enable_quirks(iidr, its_quirks, its); +} + +static int its_save_disable(void) +{ + struct its_node *its; + int err = 0; + + raw_spin_lock(&its_lock); + list_for_each_entry(its, &its_nodes, entry) { + void __iomem *base; + + base = its->base; + its->ctlr_save = readl_relaxed(base + GITS_CTLR); + err = its_force_quiescent(base); + if (err) { + pr_err("ITS@%pa: failed to quiesce: %d\n", + &its->phys_base, err); + writel_relaxed(its->ctlr_save, base + GITS_CTLR); + goto err; + } + + its->cbaser_save = gits_read_cbaser(base + GITS_CBASER); + } + +err: + if (err) { + list_for_each_entry_continue_reverse(its, &its_nodes, entry) { + void __iomem *base; + + base = its->base; + writel_relaxed(its->ctlr_save, base + GITS_CTLR); + } + } + raw_spin_unlock(&its_lock); + + return err; +} + +static void its_restore_enable(void) +{ + struct its_node *its; + int ret; + + raw_spin_lock(&its_lock); + list_for_each_entry(its, &its_nodes, entry) { + void __iomem *base; + int i; + + base = its->base; + + /* + * Make sure that the ITS is disabled. If it fails to quiesce, + * don't restore it since writing to CBASER or BASER + * registers is undefined according to the GIC v3 ITS + * Specification. + * + * Firmware resuming with the ITS enabled is terminally broken. + */ + WARN_ON(readl_relaxed(base + GITS_CTLR) & GITS_CTLR_ENABLE); + ret = its_force_quiescent(base); + if (ret) { + pr_err("ITS@%pa: failed to quiesce on resume: %d\n", + &its->phys_base, ret); + continue; + } + + gits_write_cbaser(its->cbaser_save, base + GITS_CBASER); + + /* + * Writing CBASER resets CREADR to 0, so make CWRITER and + * cmd_write line up with it. + */ + its->cmd_write = its->cmd_base; + gits_write_cwriter(0, base + GITS_CWRITER); + + /* Restore GITS_BASER from the value cache. */ + for (i = 0; i < GITS_BASER_NR_REGS; i++) { + struct its_baser *baser = &its->tables[i]; + + if (!(baser->val & GITS_BASER_VALID)) + continue; + + its_write_baser(its, baser, baser->val); + } + writel_relaxed(its->ctlr_save, base + GITS_CTLR); + + /* + * Reinit the collection if it's stored in the ITS. This is + * indicated by the col_id being less than the HCC field. + * CID < HCC as specified in the GIC v3 Documentation. + */ + if (its->collections[smp_processor_id()].col_id < + GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER))) + its_cpu_init_collection(its); + } + raw_spin_unlock(&its_lock); +} + +static struct syscore_ops its_syscore_ops = { + .suspend = its_save_disable, + .resume = its_restore_enable, +}; + +static int its_init_domain(struct fwnode_handle *handle, struct its_node *its) +{ + struct irq_domain *inner_domain; + struct msi_domain_info *info; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its); + if (!inner_domain) { + kfree(info); + return -ENOMEM; + } + + inner_domain->parent = its_parent; + irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS); + inner_domain->flags |= its->msi_domain_flags; + info->ops = &its_msi_domain_ops; + info->data = its; + inner_domain->host_data = info; + + return 0; +} + +static int its_init_vpe_domain(void) +{ + struct its_node *its; + u32 devid; + int entries; + + if (gic_rdists->has_direct_lpi) { + pr_info("ITS: Using DirectLPI for VPE invalidation\n"); + return 0; + } + + /* Any ITS will do, even if not v4 */ + its = list_first_entry(&its_nodes, struct its_node, entry); + + entries = roundup_pow_of_two(nr_cpu_ids); + vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes), + GFP_KERNEL); + if (!vpe_proxy.vpes) { + pr_err("ITS: Can't allocate GICv4 proxy device array\n"); + return -ENOMEM; + } + + /* Use the last possible DevID */ + devid = GENMASK(its->device_ids - 1, 0); + vpe_proxy.dev = its_create_device(its, devid, entries, false); + if (!vpe_proxy.dev) { + kfree(vpe_proxy.vpes); + pr_err("ITS: Can't allocate GICv4 proxy device\n"); + return -ENOMEM; + } + + BUG_ON(entries > vpe_proxy.dev->nr_ites); + + raw_spin_lock_init(&vpe_proxy.lock); + vpe_proxy.next_victim = 0; + pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n", + devid, vpe_proxy.dev->nr_ites); + + return 0; +} + +static int __init its_compute_its_list_map(struct resource *res, + void __iomem *its_base) +{ + int its_number; + u32 ctlr; + + /* + * This is assumed to be done early enough that we're + * guaranteed to be single-threaded, hence no + * locking. Should this change, we should address + * this. + */ + its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX); + if (its_number >= GICv4_ITS_LIST_MAX) { + pr_err("ITS@%pa: No ITSList entry available!\n", + &res->start); + return -EINVAL; + } + + ctlr = readl_relaxed(its_base + GITS_CTLR); + ctlr &= ~GITS_CTLR_ITS_NUMBER; + ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT; + writel_relaxed(ctlr, its_base + GITS_CTLR); + ctlr = readl_relaxed(its_base + GITS_CTLR); + if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) { + its_number = ctlr & GITS_CTLR_ITS_NUMBER; + its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT; + } + + if (test_and_set_bit(its_number, &its_list_map)) { + pr_err("ITS@%pa: Duplicate ITSList entry %d\n", + &res->start, its_number); + return -EINVAL; + } + + return its_number; +} + +static int __init its_probe_one(struct resource *res, + struct fwnode_handle *handle, int numa_node) +{ + struct its_node *its; + void __iomem *its_base; + u32 val, ctlr; + u64 baser, tmp, typer; + int err; + + its_base = ioremap(res->start, resource_size(res)); + if (!its_base) { + pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start); + return -ENOMEM; + } + + val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK; + if (val != 0x30 && val != 0x40) { + pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start); + err = -ENODEV; + goto out_unmap; + } + + err = its_force_quiescent(its_base); + if (err) { + pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start); + goto out_unmap; + } + + pr_info("ITS %pR\n", res); + + its = kzalloc(sizeof(*its), GFP_KERNEL); + if (!its) { + err = -ENOMEM; + goto out_unmap; + } + + raw_spin_lock_init(&its->lock); + mutex_init(&its->dev_alloc_lock); + INIT_LIST_HEAD(&its->entry); + INIT_LIST_HEAD(&its->its_device_list); + typer = gic_read_typer(its_base + GITS_TYPER); + its->base = its_base; + its->phys_base = res->start; + its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer); + its->device_ids = GITS_TYPER_DEVBITS(typer); + its->is_v4 = !!(typer & GITS_TYPER_VLPIS); + if (its->is_v4) { + if (!(typer & GITS_TYPER_VMOVP)) { + err = its_compute_its_list_map(res, its_base); + if (err < 0) + goto out_free_its; + + its->list_nr = err; + + pr_info("ITS@%pa: Using ITS number %d\n", + &res->start, err); + } else { + pr_info("ITS@%pa: Single VMOVP capable\n", &res->start); + } + } + + its->numa_node = numa_node; + + its->cmd_base = (void *)page_address(alloc_pages_node(its->numa_node, + GFP_KERNEL | __GFP_ZERO, + get_order(ITS_CMD_QUEUE_SZ))); + if (!its->cmd_base) { + err = -ENOMEM; + goto out_free_its; + } + its->cmd_write = its->cmd_base; + its->fwnode_handle = handle; + its->get_msi_base = its_irq_get_msi_base; + its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP; + + its_enable_quirks(its); + + err = its_alloc_tables(its); + if (err) + goto out_free_cmd; + + err = its_alloc_collections(its); + if (err) + goto out_free_tables; + + baser = (virt_to_phys(its->cmd_base) | + GITS_CBASER_RaWaWb | + GITS_CBASER_InnerShareable | + (ITS_CMD_QUEUE_SZ / SZ_4K - 1) | + GITS_CBASER_VALID); + + gits_write_cbaser(baser, its->base + GITS_CBASER); + tmp = gits_read_cbaser(its->base + GITS_CBASER); + + if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) { + if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) { + /* + * The HW reports non-shareable, we must + * remove the cacheability attributes as + * well. + */ + baser &= ~(GITS_CBASER_SHAREABILITY_MASK | + GITS_CBASER_CACHEABILITY_MASK); + baser |= GITS_CBASER_nC; + gits_write_cbaser(baser, its->base + GITS_CBASER); + } + pr_info("ITS: using cache flushing for cmd queue\n"); + its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING; + } + + gits_write_cwriter(0, its->base + GITS_CWRITER); + ctlr = readl_relaxed(its->base + GITS_CTLR); + ctlr |= GITS_CTLR_ENABLE; + if (its->is_v4) + ctlr |= GITS_CTLR_ImDe; + writel_relaxed(ctlr, its->base + GITS_CTLR); + + err = its_init_domain(handle, its); + if (err) + goto out_free_tables; + + raw_spin_lock(&its_lock); + list_add(&its->entry, &its_nodes); + raw_spin_unlock(&its_lock); + + return 0; + +out_free_tables: + its_free_tables(its); +out_free_cmd: + free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ)); +out_free_its: + kfree(its); +out_unmap: + iounmap(its_base); + pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err); + return err; +} + +static bool gic_rdists_supports_plpis(void) +{ + return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS); +} + +static int redist_disable_lpis(void) +{ + void __iomem *rbase = gic_data_rdist_rd_base(); + u64 timeout = USEC_PER_SEC; + u64 val; + + if (!gic_rdists_supports_plpis()) { + pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); + return -ENXIO; + } + + val = readl_relaxed(rbase + GICR_CTLR); + if (!(val & GICR_CTLR_ENABLE_LPIS)) + return 0; + + /* + * If coming via a CPU hotplug event, we don't need to disable + * LPIs before trying to re-enable them. They are already + * configured and all is well in the world. + * + * If running with preallocated tables, there is nothing to do. + */ + if (gic_data_rdist()->lpi_enabled || + (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED)) + return 0; + + /* + * From that point on, we only try to do some damage control. + */ + pr_warn("GIC-2500: CPU%d: Booted with LPIs enabled, memory probably corrupted\n", + smp_processor_id()); + add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); + + /* Disable LPIs */ + val &= ~GICR_CTLR_ENABLE_LPIS; + writel_relaxed(val, rbase + GICR_CTLR); + + /* Make sure any change to GICR_CTLR is observable by the GIC */ + dsb(sy); + + /* + * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs + * from 1 to 0 before programming GICR_PEND{PROP}BASER registers. + * Error out if we time out waiting for RWP to clear. + */ + while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) { + if (!timeout) { + pr_err("CPU%d: Timeout while disabling LPIs\n", + smp_processor_id()); + return -ETIMEDOUT; + } + udelay(1); + timeout--; + } + + /* + * After it has been written to 1, it is IMPLEMENTATION + * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be + * cleared to 0. Error out if clearing the bit failed. + */ + if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) { + pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id()); + return -EBUSY; + } + + return 0; +} + +int phytium_its_cpu_init(void) +{ + if (!list_empty(&its_nodes)) { + int ret; + + ret = redist_disable_lpis(); + if (ret) + return ret; + + its_cpu_init_lpis(); + its_cpu_init_collections(); + } + + return 0; +} + +static const struct of_device_id its_device_id[] = { + { .compatible = "arm,gic-phytium-2500-its", }, + {}, +}; + +static int __init its_of_probe(struct device_node *node) +{ + struct device_node *np; + struct resource res; + + for (np = of_find_matching_node(node, its_device_id); np; + np = of_find_matching_node(np, its_device_id)) { + if (!of_device_is_available(np)) + continue; + if (!of_property_read_bool(np, "msi-controller")) { + pr_warn("%pOF: no msi-controller property, ITS ignored\n", + np); + continue; + } + + if (of_address_to_resource(np, 0, &res)) { + pr_warn("%pOF: no regs?\n", np); + continue; + } + + its_probe_one(&res, &np->fwnode, of_node_to_nid(np)); + } + return 0; +} + +#ifdef CONFIG_ACPI + +#define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K) + +#ifdef CONFIG_ACPI_NUMA +struct its_srat_map { + /* numa node id */ + u32 numa_node; + /* GIC ITS ID */ + u32 its_id; +}; + +static struct its_srat_map *its_srat_maps __initdata; +static int its_in_srat __initdata; + +static int __init acpi_get_its_numa_node(u32 its_id) +{ + int i; + + for (i = 0; i < its_in_srat; i++) { + if (its_id == its_srat_maps[i].its_id) + return its_srat_maps[i].numa_node; + } + return NUMA_NO_NODE; +} + +static int __init gic_acpi_match_srat_its(union acpi_subtable_headers *header, + const unsigned long end) +{ + return 0; +} + +static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *header, + const unsigned long end) +{ + int node; + struct acpi_srat_gic_its_affinity *its_affinity; + + its_affinity = (struct acpi_srat_gic_its_affinity *)header; + if (!its_affinity) + return -EINVAL; + + if (its_affinity->header.length < sizeof(*its_affinity)) { + pr_err("SRAT: Invalid header length %d in ITS affinity\n", + its_affinity->header.length); + return -EINVAL; + } + + node = acpi_map_pxm_to_node(its_affinity->proximity_domain); + + if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) { + pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node); + return 0; + } + + its_srat_maps[its_in_srat].numa_node = node; + its_srat_maps[its_in_srat].its_id = its_affinity->its_id; + its_in_srat++; + pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n", + its_affinity->proximity_domain, its_affinity->its_id, node); + + return 0; +} + +static void __init acpi_table_parse_srat_its(void) +{ + int count; + + count = acpi_table_parse_entries(ACPI_SIG_SRAT, + sizeof(struct acpi_table_srat), + ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, + gic_acpi_match_srat_its, 0); + if (count <= 0) + return; + + its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map), + GFP_KERNEL); + if (!its_srat_maps) { + pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n"); + return; + } + + acpi_table_parse_entries(ACPI_SIG_SRAT, + sizeof(struct acpi_table_srat), + ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, + gic_acpi_parse_srat_its, 0); +} + +/* free the its_srat_maps after ITS probing */ +static void __init acpi_its_srat_maps_free(void) +{ + kfree(its_srat_maps); +} +#else +static void __init acpi_table_parse_srat_its(void) { } +static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; } +static void __init acpi_its_srat_maps_free(void) { } +#endif + +static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_translator *its_entry; + struct fwnode_handle *dom_handle; + struct resource res; + int err; + + its_entry = (struct acpi_madt_generic_translator *)header; + memset(&res, 0, sizeof(res)); + res.start = its_entry->base_address; + res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1; + res.flags = IORESOURCE_MEM; + + dom_handle = irq_domain_alloc_fwnode((void *)its_entry->base_address); + if (!dom_handle) { + pr_err("ITS@%pa: Unable to allocate GIC-Phytium-2500 ITS domain token\n", + &res.start); + return -ENOMEM; + } + + err = iort_register_domain_token(its_entry->translation_id, res.start, + dom_handle); + if (err) { + pr_err("ITS@%pa: Unable to register GIC-Phytium-2500 ITS domain token (ITS ID %d) to IORT\n", + &res.start, its_entry->translation_id); + goto dom_err; + } + + err = its_probe_one(&res, dom_handle, + acpi_get_its_numa_node(its_entry->translation_id)); + if (!err) + return 0; + + iort_deregister_domain_token(its_entry->translation_id); +dom_err: + irq_domain_free_fwnode(dom_handle); + return err; +} + +static void __init its_acpi_probe(void) +{ + acpi_table_parse_srat_its(); + acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, + gic_acpi_parse_madt_its, 0); + acpi_its_srat_maps_free(); +} +#else +static void __init its_acpi_probe(void) { } +#endif + +int __init phytium_its_init(struct fwnode_handle *handle, struct rdists *rdists, + struct irq_domain *parent_domain) +{ + struct device_node *of_node; + struct its_node *its; + bool has_v4 = false; + int err; + + its_parent = parent_domain; + of_node = to_of_node(handle); + if (of_node) + its_of_probe(of_node); + else + its_acpi_probe(); + + if (list_empty(&its_nodes)) { + pr_warn("ITS: No ITS available, not enabling LPIs\n"); + return -ENXIO; + } + + gic_rdists = rdists; + + err = allocate_lpi_tables(); + if (err) + return err; + + list_for_each_entry(its, &its_nodes, entry) + has_v4 |= its->is_v4; + + if (has_v4 & rdists->has_vlpis) { + if (its_init_vpe_domain() || + its_init_v4(parent_domain, &its_vpe_domain_ops)) { + rdists->has_vlpis = false; + pr_err("ITS: Disabling GICv4 support\n"); + } + } + + register_syscore_ops(&its_syscore_ops); + + return 0; +} diff --git a/drivers/irqchip/irq-gic-phytium-2500.c b/drivers/irqchip/irq-gic-phytium-2500.c new file mode 100644 index 0000000000000000000000000000000000000000..f5a6260b3a2266191dbcbdb1fb0751f4951d4d71 --- /dev/null +++ b/drivers/irqchip/irq-gic-phytium-2500.c @@ -0,0 +1,2135 @@ +/* + * Copyright (C) 2020 Phytium Corporation. + * Author: Wang Yinfeng + * Chen Baozi + * Mao hongbo + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#define pr_fmt(fmt) "GIC-2500: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "irq-gic-common.h" + +#define MAX_MARS3_SOC_COUNT 8 +#define MARS3_ADDR_SKTID_SHIFT 41 + +struct gic_dist_desc { + void __iomem *dist_base; + phys_addr_t phys_base; + unsigned long size; +}; + +#define GICD_INT_NMI_PRI (GICD_INT_DEF_PRI & ~0x80) + +struct redist_region { + void __iomem *redist_base; + phys_addr_t phys_base; + bool single_redist; +}; + +static struct gic_dist_desc mars3_gic_dists[MAX_MARS3_SOC_COUNT] __read_mostly; + +static unsigned int mars3_sockets_bitmap = 0x1; + +#define mars3_irq_to_skt(hwirq) (((hwirq) - 32) % 8) + +struct gic_chip_data { + struct fwnode_handle *fwnode; + void __iomem *dist_base; + struct redist_region *redist_regions; + struct rdists rdists; + struct irq_domain *domain; + u64 redist_stride; + u32 nr_redist_regions; + bool has_rss; + unsigned int irq_nr; + struct partition_desc *ppi_descs[16]; +}; + +static struct gic_chip_data gic_data __read_mostly; +static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); + +/* + * The behaviours of RPR and PMR registers differ depending on the value of + * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the + * distributor and redistributors depends on whether security is enabled in the + * GIC. + * + * When security is enabled, non-secure priority values from the (re)distributor + * are presented to the GIC CPUIF as follow: + * (GIC_(R)DIST_PRI[irq] >> 1) | 0x80; + * + * If SCR_EL3.FIQ == 1, the values writen to/read from PMR and RPR at non-secure + * EL1 are subject to a similar operation thus matching the priorities presented + * from the (re)distributor when security is enabled. + * + * see GICv3/GICv4 Architecture Specification (IHI0069D): + * - section 4.8.1 Non-secure accesses to register fields for Secure interrupt + * priorities. + * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1 + * interrupt. + * + * For now, we only support pseudo-NMIs if we have non-secure view of + * priorities. + */ +static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis); + + +/* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */ +static refcount_t ppi_nmi_refs[16]; + +static struct gic_kvm_info gic_v3_kvm_info; +static DEFINE_PER_CPU(bool, has_rss1); + +#define MPIDR_RS(mpidr) (((mpidr) & 0xF0UL) >> 4) +#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist)) +#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) +#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K) + +/* Our default, arbitrary priority value. Linux only uses one anyway. */ +#define DEFAULT_PMR_VALUE 0xf0 + +static inline unsigned int gic_irq(struct irq_data *d) +{ + return d->hwirq; +} + +static inline int gic_irq_in_rdist(struct irq_data *d) +{ + return gic_irq(d) < 32; +} + +static inline void __iomem *gic_dist_base(struct irq_data *d) +{ + if (gic_irq_in_rdist(d)) /* SGI+PPI -> SGI_base for this CPU */ + return gic_data_rdist_sgi_base(); + + if (d->hwirq <= 1023) /* SPI -> dist_base */ + return gic_data.dist_base; + + return NULL; +} + +static void gic_do_wait_for_rwp(void __iomem *base) +{ + u32 count = 1000000; /* 1s! */ + + while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) { + count--; + if (!count) { + pr_err_ratelimited("RWP timeout, gone fishing\n"); + return; + } + cpu_relax(); + udelay(1); + }; +} + +/* Wait for completion of a distributor change */ +static void gic_dist_wait_for_rwp(void) +{ + gic_do_wait_for_rwp(gic_data.dist_base); +} + +/* Wait for completion of a redistributor change */ +static void gic_redist_wait_for_rwp(void) +{ + gic_do_wait_for_rwp(gic_data_rdist_rd_base()); +} + +#ifdef CONFIG_ARM64 + +static u64 __maybe_unused gic_read_iar(void) +{ + if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154)) + return gic_read_iar_cavium_thunderx(); + else + return gic_read_iar_common(); +} +#endif + +static void gic_enable_redist(bool enable) +{ + void __iomem *rbase; + u32 count = 1000000; /* 1s! */ + u32 val; + unsigned long mpidr; + int i; + + rbase = gic_data_rdist_rd_base(); + + val = readl_relaxed(rbase + GICR_WAKER); + if (enable) + /* Wake up this CPU redistributor */ + val &= ~GICR_WAKER_ProcessorSleep; + else + val |= GICR_WAKER_ProcessorSleep; + writel_relaxed(val, rbase + GICR_WAKER); + + if (!enable) { /* Check that GICR_WAKER is writeable */ + val = readl_relaxed(rbase + GICR_WAKER); + if (!(val & GICR_WAKER_ProcessorSleep)) + return; /* No PM support in this redistributor */ + } + + while (--count) { + val = readl_relaxed(rbase + GICR_WAKER); + if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep)) + break; + cpu_relax(); + udelay(1); + }; + if (!count) + pr_err_ratelimited("redistributor failed to %s...\n", + enable ? "wakeup" : "sleep"); + + mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); + + /* Either Aff0 or Aff1 is not zero */ + if (mpidr & 0xffff) + return; + + /* Skip 64 Redistributors */ + rbase = rbase + 64 * SZ_128K; + + for (i = 0; i < 4; i++) { + val = readl_relaxed(rbase + GICR_WAKER); + if (enable) + val &= ~GICR_WAKER_ProcessorSleep; + else + val |= GICR_WAKER_ProcessorSleep; + writel_relaxed(val, rbase + GICR_WAKER); + + if (!enable) { + val = readl_relaxed(rbase + GICR_WAKER); + if (!(val & GICR_WAKER_ProcessorSleep)) + return; + } + + count = 1000000; /* 1s! */ + while (--count) { + val = readl_relaxed(rbase + GICR_WAKER); + if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep)) + break; + cpu_relax(); + udelay(1); + }; + + if (!count) + pr_err_ratelimited("CPU MPIDR 0x%lx: redistributor %d failed to %s...\n", + mpidr, 64 + i, enable ? "wakeup" : "sleep"); + + rbase = rbase + SZ_128K; + } +} + +/* + * Routines to disable, enable, EOI and route interrupts + */ +static int gic_peek_irq(struct irq_data *d, u32 offset) +{ + u32 mask = 1 << (gic_irq(d) % 32); + void __iomem *base; + unsigned int skt; + + if (gic_irq_in_rdist(d)) + base = gic_data_rdist_sgi_base(); + else { + skt = mars3_irq_to_skt(gic_irq(d)); + base = mars3_gic_dists[skt].dist_base; + } + + return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask); +} + +static void gic_poke_irq(struct irq_data *d, u32 offset) +{ + u32 mask = 1 << (gic_irq(d) % 32); + void __iomem *base; + unsigned long mpidr; + void __iomem *rbase; + int i; + unsigned int skt; + + if (gic_irq_in_rdist(d)) { + base = gic_data_rdist_sgi_base(); + writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4); + gic_redist_wait_for_rwp(); + + mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); + + if ((mpidr & 0xffff) == 0) { + rbase = base + 64*SZ_128K; + + for (i = 0; i < 4; i++) { + writel_relaxed(mask, rbase + offset + (gic_irq(d) / 32) * 4); + gic_do_wait_for_rwp(rbase - SZ_64K); + rbase = rbase + SZ_128K; + } + } + } else { + skt = mars3_irq_to_skt(gic_irq(d)); + base = mars3_gic_dists[skt].dist_base; + writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4); + gic_do_wait_for_rwp(base); + } + +} + +static void gic_mask_irq(struct irq_data *d) +{ + gic_poke_irq(d, GICD_ICENABLER); +} + +static void gic_eoimode1_mask_irq(struct irq_data *d) +{ + gic_mask_irq(d); + /* + * When masking a forwarded interrupt, make sure it is + * deactivated as well. + * + * This ensures that an interrupt that is getting + * disabled/masked will not get "stuck", because there is + * noone to deactivate it (guest is being terminated). + */ + if (irqd_is_forwarded_to_vcpu(d)) + gic_poke_irq(d, GICD_ICACTIVER); +} + +static void gic_unmask_irq(struct irq_data *d) +{ + gic_poke_irq(d, GICD_ISENABLER); +} + +static inline bool gic_supports_nmi(void) +{ + return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && + static_branch_likely(&supports_pseudo_nmis); +} + + +static int gic_irq_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, bool val) +{ + u32 reg; + + if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */ + return -EINVAL; + + switch (which) { + case IRQCHIP_STATE_PENDING: + reg = val ? GICD_ISPENDR : GICD_ICPENDR; + break; + + case IRQCHIP_STATE_ACTIVE: + reg = val ? GICD_ISACTIVER : GICD_ICACTIVER; + break; + + case IRQCHIP_STATE_MASKED: + reg = val ? GICD_ICENABLER : GICD_ISENABLER; + break; + + default: + return -EINVAL; + } + + gic_poke_irq(d, reg); + return 0; +} + +static int gic_irq_get_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, bool *val) +{ + if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */ + return -EINVAL; + + switch (which) { + case IRQCHIP_STATE_PENDING: + *val = gic_peek_irq(d, GICD_ISPENDR); + break; + + case IRQCHIP_STATE_ACTIVE: + *val = gic_peek_irq(d, GICD_ISACTIVER); + break; + + case IRQCHIP_STATE_MASKED: + *val = !gic_peek_irq(d, GICD_ISENABLER); + break; + + default: + return -EINVAL; + } + + return 0; +} + +static void gic_irq_set_prio(struct irq_data *d, u8 prio) +{ + void __iomem *base = gic_dist_base(d); + + writeb_relaxed(prio, base + GICD_IPRIORITYR + gic_irq(d)); +} + +static int gic_irq_nmi_setup(struct irq_data *d) +{ + struct irq_desc *desc = irq_to_desc(d->irq); + + if (!gic_supports_nmi()) + return -EINVAL; + + if (gic_peek_irq(d, GICD_ISENABLER)) { + pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq); + return -EINVAL; + } + + /* + * A secondary irq_chip should be in charge of LPI request, + * it should not be possible to get there + */ + if (WARN_ON(gic_irq(d) >= 8192)) + return -EINVAL; + + /* desc lock should already be held */ + if (gic_irq(d) < 32) { + /* Setting up PPI as NMI, only switch handler for first NMI */ + if (!refcount_inc_not_zero(&ppi_nmi_refs[gic_irq(d) - 16])) { + refcount_set(&ppi_nmi_refs[gic_irq(d) - 16], 1); + desc->handle_irq = handle_percpu_devid_fasteoi_nmi; + } + } else { + desc->handle_irq = handle_fasteoi_nmi; + } + + gic_irq_set_prio(d, GICD_INT_NMI_PRI); + + return 0; +} + +static void gic_irq_nmi_teardown(struct irq_data *d) +{ + struct irq_desc *desc = irq_to_desc(d->irq); + + if (WARN_ON(!gic_supports_nmi())) + return; + + if (gic_peek_irq(d, GICD_ISENABLER)) { + pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq); + return; + } + + /* + * A secondary irq_chip should be in charge of LPI request, + * it should not be possible to get there + */ + if (WARN_ON(gic_irq(d) >= 8192)) + return; + + /* desc lock should already be held */ + if (gic_irq(d) < 32) { + /* Tearing down NMI, only switch handler for last NMI */ + if (refcount_dec_and_test(&ppi_nmi_refs[gic_irq(d) - 16])) + desc->handle_irq = handle_percpu_devid_irq; + } else { + desc->handle_irq = handle_fasteoi_irq; + } + + gic_irq_set_prio(d, GICD_INT_DEF_PRI); +} + +static void gic_eoi_irq(struct irq_data *d) +{ + gic_write_eoir(gic_irq(d)); +} + +static void gic_eoimode1_eoi_irq(struct irq_data *d) +{ + /* + * No need to deactivate an LPI, or an interrupt that + * is is getting forwarded to a vcpu. + */ + if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d)) + return; + gic_write_dir(gic_irq(d)); +} + +static int gic_set_type(struct irq_data *d, unsigned int type) +{ + unsigned int irq = gic_irq(d); + unsigned long mpidr; + int i; + void __iomem *base; + void __iomem *rbase; + unsigned int skt; + int ret; + + /* Interrupt configuration for SGIs can't be changed */ + if (irq < 16) + return -EINVAL; + + /* SPIs have restrictions on the supported types */ + if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH && + type != IRQ_TYPE_EDGE_RISING) + return -EINVAL; + + if (gic_irq_in_rdist(d)) { + base = gic_data_rdist_sgi_base(); + ret = gic_configure_irq(irq, type, base, gic_redist_wait_for_rwp); + + mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); + + if ((mpidr & 0xffff) == 0) { + rbase = base + 64*SZ_128K; + + for (i = 0; i < 4; i++) { + ret = gic_configure_irq(irq, type, rbase, NULL); + gic_do_wait_for_rwp(rbase - SZ_64K); + rbase = rbase + SZ_128K; + } + } + } else { + skt = mars3_irq_to_skt(gic_irq(d)); + base = mars3_gic_dists[skt].dist_base; + ret = gic_configure_irq(irq, type, base, NULL); + gic_do_wait_for_rwp(base); + } + + return ret; +} + +static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) +{ + if (vcpu) + irqd_set_forwarded_to_vcpu(d); + else + irqd_clr_forwarded_to_vcpu(d); + return 0; +} + +static u64 gic_mpidr_to_affinity(unsigned long mpidr) +{ + u64 aff; + + aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 | + MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | + MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | + MPIDR_AFFINITY_LEVEL(mpidr, 0)); + + return aff; +} + +static void gic_deactivate_unhandled(u32 irqnr) +{ + if (static_branch_likely(&supports_deactivate_key)) { + if (irqnr < 8192) + gic_write_dir(irqnr); + } else { + gic_write_eoir(irqnr); + } +} + +static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs) +{ + bool irqs_enabled = interrupts_enabled(regs); + int err; + + if (unlikely(irqnr < 16)) { + gic_write_eoir(irqnr); + if (static_branch_likely(&supports_deactivate_key)) + gic_write_dir(irqnr); +#ifdef CONFIG_SMP + handle_IPI(irqnr, regs); +#endif + return; + } + + if (irqs_enabled) + nmi_enter(); + + if (static_branch_likely(&supports_deactivate_key)) + gic_write_eoir(irqnr); + /* + * Leave the PSR.I bit set to prevent other NMIs to be + * received while handling this one. + * PSR.I will be restored when we ERET to the + * interrupted context. + */ + err = handle_domain_nmi(gic_data.domain, irqnr, regs); + if (err) + gic_deactivate_unhandled(irqnr); + + if (irqs_enabled) + nmi_exit(); +} + +static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) +{ + u32 irqnr; + + irqnr = gic_read_iar(); + + /* Check for special IDs first */ + if ((irqnr >= 1020 && irqnr <= 1023)) + return; + + if (gic_supports_nmi() && + unlikely(gic_read_rpr() == GICD_INT_NMI_PRI)) { + gic_handle_nmi(irqnr, regs); + return; + } + + if (gic_prio_masking_enabled()) { + gic_pmr_mask_irqs(); + gic_arch_enable_irqs(); + } + + if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) { + int err; + + if (static_branch_likely(&supports_deactivate_key)) + gic_write_eoir(irqnr); + else + isb(); + + err = handle_domain_irq(gic_data.domain, irqnr, regs); + if (err) { + WARN_ONCE(true, "Unexpected interrupt received!\n"); + gic_deactivate_unhandled(irqnr); + } + return; + } + if (irqnr < 16) { + gic_write_eoir(irqnr); + if (static_branch_likely(&supports_deactivate_key)) + gic_write_dir(irqnr); +#ifdef CONFIG_SMP + /* + * Unlike GICv2, we don't need an smp_rmb() here. + * The control dependency from gic_read_iar to + * the ISB in gic_write_eoir is enough to ensure + * that any shared data read by handle_IPI will + * be read after the ACK. + */ + handle_IPI(irqnr, regs); +#else + WARN_ONCE(true, "Unexpected SGI received!\n"); +#endif + } +} + +static u32 gic_get_pribits(void) +{ + u32 pribits; + + pribits = gic_read_ctlr(); + pribits &= ICC_CTLR_EL1_PRI_BITS_MASK; + pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT; + pribits++; + + return pribits; +} + +static bool gic_has_group0(void) +{ + u32 val; + u32 old_pmr; + + old_pmr = gic_read_pmr(); + + /* + * Let's find out if Group0 is under control of EL3 or not by + * setting the highest possible, non-zero priority in PMR. + * + * If SCR_EL3.FIQ is set, the priority gets shifted down in + * order for the CPU interface to set bit 7, and keep the + * actual priority in the non-secure range. In the process, it + * looses the least significant bit and the actual priority + * becomes 0x80. Reading it back returns 0, indicating that + * we're don't have access to Group0. + */ + gic_write_pmr(BIT(8 - gic_get_pribits())); + val = gic_read_pmr(); + + gic_write_pmr(old_pmr); + + return val != 0; +} + +static void __init gic_dist_init(void) +{ + unsigned int i; + u64 affinity; + void __iomem *base; + unsigned int skt; + + for (skt = 0; skt < MAX_MARS3_SOC_COUNT; skt++) { + if ((((unsigned int)1 << skt) & mars3_sockets_bitmap) == 0) + continue; + + base = mars3_gic_dists[skt].dist_base; + + /* Disable the distributor */ + writel_relaxed(0, base + GICD_CTLR); + gic_do_wait_for_rwp(base); + + /* + * Configure SPIs as non-secure Group-1. This will only matter + * if the GIC only has a single security state. This will not + * do the right thing if the kernel is running in secure mode, + * but that's not the intended use case anyway. + */ + for (i = 32; i < gic_data.irq_nr; i += 32) + writel_relaxed(~0, base + GICD_IGROUPR + i / 8); + + gic_dist_config(base, gic_data.irq_nr, NULL); + gic_do_wait_for_rwp(base); + + /* Enable distributor with ARE, Group1 */ + writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1, + base + GICD_CTLR); + + /* + * Set all global interrupts to the boot CPU only. ARE must be + * enabled. + */ + affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id())); + for (i = 32; i < gic_data.irq_nr; i++) + gic_write_irouter(affinity, base + GICD_IROUTER + i * 8); + } +} + +static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *)) +{ + int ret = -ENODEV; + int i; + + for (i = 0; i < gic_data.nr_redist_regions; i++) { + void __iomem *ptr = gic_data.redist_regions[i].redist_base; + u64 typer; + u32 reg; + + reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK; + if (reg != GIC_PIDR2_ARCH_GICv3 && + reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */ + pr_warn("No redistributor present @%p\n", ptr); + break; + } + + do { + typer = gic_read_typer(ptr + GICR_TYPER); + ret = fn(gic_data.redist_regions + i, ptr); + if (!ret) + return 0; + + if (gic_data.redist_regions[i].single_redist) + break; + + if (gic_data.redist_stride) { + ptr += gic_data.redist_stride; + } else { + ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */ + if (typer & GICR_TYPER_VLPIS) + ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */ + } + } while (!(typer & GICR_TYPER_LAST)); + } + + return ret ? -ENODEV : 0; +} + +static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr) +{ + unsigned long mpidr = cpu_logical_map(smp_processor_id()); + u64 typer; + u32 aff, aff2_skt, rdist_skt; + + /* + * Convert affinity to a 32bit value that can be matched to + * GICR_TYPER bits [63:32]. + */ + aff = (MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | + MPIDR_AFFINITY_LEVEL(mpidr, 0)); + + aff2_skt = MPIDR_AFFINITY_LEVEL(mpidr, 2) & 0x7; + rdist_skt = (((u64)region->phys_base >> MARS3_ADDR_SKTID_SHIFT) & 0x7); + + if (aff2_skt != rdist_skt) + return 1; + + typer = gic_read_typer(ptr + GICR_TYPER); + if ((typer >> 32) == aff) { + u64 offset = ptr - region->redist_base; + gic_data_rdist_rd_base() = ptr; + gic_data_rdist()->phys_base = region->phys_base + offset; + + pr_info("CPU%d: found redistributor %lx region %d:%pa\n", + smp_processor_id(), mpidr, + (int)(region - gic_data.redist_regions), + &gic_data_rdist()->phys_base); + return 0; + } + + /* Try next one */ + return 1; +} + +static int gic_populate_rdist(void) +{ + if (gic_iterate_rdists(__gic_populate_rdist) == 0) + return 0; + + /* We couldn't even deal with ourselves... */ + WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n", + smp_processor_id(), + (unsigned long)cpu_logical_map(smp_processor_id())); + return -ENODEV; +} + +static int __gic_update_vlpi_properties(struct redist_region *region, + void __iomem *ptr) +{ + u64 typer = gic_read_typer(ptr + GICR_TYPER); + gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS); + gic_data.rdists.has_direct_lpi &= !!(typer & GICR_TYPER_DirectLPIS); + + return 1; +} + +static void gic_update_vlpi_properties(void) +{ + gic_iterate_rdists(__gic_update_vlpi_properties); + pr_info("%sVLPI support, %sdirect LPI support\n", + !gic_data.rdists.has_vlpis ? "no " : "", + !gic_data.rdists.has_direct_lpi ? "no " : ""); +} + +/* Check whether it's single security state view */ +static inline bool gic_dist_security_disabled(void) +{ + return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS; +} + +static void gic_cpu_sys_reg_init(void) +{ + int i, cpu = smp_processor_id(); + u64 mpidr = cpu_logical_map(cpu); + u64 need_rss = MPIDR_RS(mpidr); + bool group0; + u32 pribits; + + /* + * Need to check that the SRE bit has actually been set. If + * not, it means that SRE is disabled at EL2. We're going to + * die painfully, and there is nothing we can do about it. + * + * Kindly inform the luser. + */ + if (!gic_enable_sre()) + pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n"); + + pribits = gic_get_pribits(); + + group0 = gic_has_group0(); + + /* Set priority mask register */ + if (!gic_prio_masking_enabled()) { + write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1); + } else { + /* + * Mismatch configuration with boot CPU, the system is likely + * to die as interrupt masking will not work properly on all + * CPUs + */ + WARN_ON(gic_supports_nmi() && group0 && + !gic_dist_security_disabled()); + } + + /* + * Some firmwares hand over to the kernel with the BPR changed from + * its reset value (and with a value large enough to prevent + * any pre-emptive interrupts from working at all). Writing a zero + * to BPR restores is reset value. + */ + gic_write_bpr1(0); + + if (static_branch_likely(&supports_deactivate_key)) { + /* EOI drops priority only (mode 1) */ + gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop); + } else { + /* EOI deactivates interrupt too (mode 0) */ + gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir); + } + + /* Always whack Group0 before Group1 */ + if (group0) { + switch (pribits) { + case 8: + case 7: + write_gicreg(0, ICC_AP0R3_EL1); + write_gicreg(0, ICC_AP0R2_EL1); + case 6: + write_gicreg(0, ICC_AP0R1_EL1); + case 5: + case 4: + write_gicreg(0, ICC_AP0R0_EL1); + } + + isb(); + } + + switch (pribits) { + case 8: + case 7: + write_gicreg(0, ICC_AP1R3_EL1); + write_gicreg(0, ICC_AP1R2_EL1); + case 6: + write_gicreg(0, ICC_AP1R1_EL1); + case 5: + case 4: + write_gicreg(0, ICC_AP1R0_EL1); + } + + isb(); + + /* ... and let's hit the road... */ + gic_write_grpen1(1); + + /* Keep the RSS capability status in per_cpu variable */ + per_cpu(has_rss1, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS); + + /* Check all the CPUs have capable of sending SGIs to other CPUs */ + for_each_online_cpu(i) { + bool have_rss = per_cpu(has_rss1, i) && per_cpu(has_rss1, cpu); + + need_rss |= MPIDR_RS(cpu_logical_map(i)); + if (need_rss && (!have_rss)) + pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n", + cpu, (unsigned long)mpidr, + i, (unsigned long)cpu_logical_map(i)); + } + + /** + * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0, + * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED + * UNPREDICTABLE choice of : + * - The write is ignored. + * - The RS field is treated as 0. + */ + if (need_rss && (!gic_data.has_rss)) + pr_crit_once("RSS is required but GICD doesn't support it\n"); +} + +static bool gicv3_nolpi; + +static int __init gicv3_nolpi_cfg(char *buf) +{ + return strtobool(buf, &gicv3_nolpi); +} +early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg); + +static int gic_dist_supports_lpis(void) +{ + return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && + !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) && + !gicv3_nolpi); +} + +static void gic_cpu_init(void) +{ + void __iomem *rbase; + unsigned long mpidr; + int i; + + /* Register ourselves with the rest of the world */ + if (gic_populate_rdist()) + return; + + gic_enable_redist(true); + + rbase = gic_data_rdist_sgi_base(); + + /* Configure SGIs/PPIs as non-secure Group-1 */ + writel_relaxed(~0, rbase + GICR_IGROUPR0); + + gic_cpu_config(rbase, gic_redist_wait_for_rwp); + + mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); + + if ((mpidr & 0xFFFF) == 0) { + rbase = rbase + 64*SZ_128K; + + for (i = 0; i < 4; i++) { + /* Configure SGIs/PPIs as non-secure Group-1 */ + writel_relaxed(~0, rbase + GICR_IGROUPR0); + + gic_cpu_config(rbase, NULL); + gic_do_wait_for_rwp(rbase - SZ_64K); + + rbase = rbase + SZ_128K; + } + } + + /* initialise system registers */ + gic_cpu_sys_reg_init(); +} + +#ifdef CONFIG_SMP + +#define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT) +#define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL) + +static int gic_starting_cpu(unsigned int cpu) +{ + gic_cpu_init(); + + if (gic_dist_supports_lpis()) + phytium_its_cpu_init(); + + return 0; +} + +static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, + unsigned long cluster_id) +{ + int next_cpu, cpu = *base_cpu; + unsigned long mpidr = cpu_logical_map(cpu); + u16 tlist = 0; + + while (cpu < nr_cpu_ids) { + tlist |= 1 << (mpidr & 0xf); + + next_cpu = cpumask_next(cpu, mask); + if (next_cpu >= nr_cpu_ids) + goto out; + cpu = next_cpu; + + mpidr = cpu_logical_map(cpu); + + if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) { + cpu--; + goto out; + } + } +out: + *base_cpu = cpu; + return tlist; +} + +#define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \ + (MPIDR_AFFINITY_LEVEL(cluster_id, level) \ + << ICC_SGI1R_AFFINITY_## level ##_SHIFT) + +static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq) +{ + u64 val; + + val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) | + MPIDR_TO_SGI_AFFINITY(cluster_id, 2) | + irq << ICC_SGI1R_SGI_ID_SHIFT | + MPIDR_TO_SGI_AFFINITY(cluster_id, 1) | + MPIDR_TO_SGI_RS(cluster_id) | + tlist << ICC_SGI1R_TARGET_LIST_SHIFT); + + pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); + gic_write_sgi1r(val); +} + +static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) +{ + int cpu; + + if (WARN_ON(irq >= 16)) + return; + + /* + * Ensure that stores to Normal memory are visible to the + * other CPUs before issuing the IPI. + */ + wmb(); + + for_each_cpu(cpu, mask) { + u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu)); + u16 tlist; + + tlist = gic_compute_target_list(&cpu, mask, cluster_id); + gic_send_sgi(cluster_id, tlist, irq); + } + + /* Force the above writes to ICC_SGI1R_EL1 to be executed */ + isb(); +} + +static void gic_smp_init(void) +{ + set_smp_cross_call(gic_raise_softirq); + cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING, + "irqchip/arm/gic_phytium_2500:starting", + gic_starting_cpu, NULL); +} + +static int gic_cpumask_select(struct irq_data *d, const struct cpumask *mask_val) +{ + unsigned int skt, irq_skt, i; + unsigned int cpu, cpus = 0; + unsigned int skt_cpu_cnt[MAX_MARS3_SOC_COUNT] = {0}; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SOC_COUNT)) + skt_cpu_cnt[skt]++; + else if (skt != 0xff) + pr_err("socket address: %d is out of range.", skt); + } + + irq_skt = mars3_irq_to_skt(gic_irq(d)); + + if (0 != irq_skt) { + for (i = 0; i < irq_skt; i++) + cpus += skt_cpu_cnt[i]; + } + + cpu = cpumask_any_and(mask_val, cpu_online_mask); + cpus = cpus + cpu % skt_cpu_cnt[irq_skt]; + + if (is_kdump_kernel()) { + skt = (cpu_logical_map(cpu) >> 16) & 0xff; + if (irq_skt == skt) { + return cpu; + } + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SOC_COUNT)) { + if (irq_skt == skt) { + return i; + } + } else if (0xff != skt) { + pr_err("socket address: %d is out of range.", skt); + } + } + } + return cpus; +} + +static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, + bool force) +{ + unsigned int cpu, skt; + void __iomem *reg; + int enabled; + u64 val; + + if (force) + cpu = cpumask_first(mask_val); + else + cpu = gic_cpumask_select(d, mask_val); + + if (cpu >= nr_cpu_ids) + return -EINVAL; + + if (gic_irq_in_rdist(d)) + return -EINVAL; + + /* If interrupt was enabled, disable it first */ + enabled = gic_peek_irq(d, GICD_ISENABLER); + if (enabled) + gic_mask_irq(d); + + skt = mars3_irq_to_skt(gic_irq(d)); + reg = mars3_gic_dists[skt].dist_base + GICD_IROUTER + (gic_irq(d) * 8); + val = gic_mpidr_to_affinity(cpu_logical_map(cpu)); + + gic_write_irouter(val, reg); + + /* + * If the interrupt was enabled, enabled it again. Otherwise, + * just wait for the distributor to have digested our changes. + */ + if (enabled) + gic_unmask_irq(d); + else + gic_dist_wait_for_rwp(); + + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + + return IRQ_SET_MASK_OK_DONE; +} +#else +#define gic_set_affinity NULL +#define gic_smp_init() do { } while (0) +#endif + +static int gic_retrigger(struct irq_data *data) +{ + return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true); +} + +#ifdef CONFIG_CPU_PM +static int gic_cpu_pm_notifier(struct notifier_block *self, + unsigned long cmd, void *v) +{ + if (cmd == CPU_PM_EXIT) { + if (gic_dist_security_disabled()) + gic_enable_redist(true); + gic_cpu_sys_reg_init(); + } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) { + gic_write_grpen1(0); + gic_enable_redist(false); + } + return NOTIFY_OK; +} + +static struct notifier_block gic_cpu_pm_notifier_block = { + .notifier_call = gic_cpu_pm_notifier, +}; + +static void gic_cpu_pm_init(void) +{ + cpu_pm_register_notifier(&gic_cpu_pm_notifier_block); +} + +#else +static inline void gic_cpu_pm_init(void) { } +#endif /* CONFIG_CPU_PM */ + +static struct irq_chip gic_chip = { + .name = "GIC-Phytium-2500", + .irq_mask = gic_mask_irq, + .irq_unmask = gic_unmask_irq, + .irq_eoi = gic_eoi_irq, + .irq_set_type = gic_set_type, + .irq_set_affinity = gic_set_affinity, + .irq_retrigger = gic_retrigger, + .irq_get_irqchip_state = gic_irq_get_irqchip_state, + .irq_set_irqchip_state = gic_irq_set_irqchip_state, + .irq_nmi_setup = gic_irq_nmi_setup, + .irq_nmi_teardown = gic_irq_nmi_teardown, + .flags = IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, +}; + +static struct irq_chip gic_eoimode1_chip = { + .name = "GIC-Phytium-2500", + .irq_mask = gic_eoimode1_mask_irq, + .irq_unmask = gic_unmask_irq, + .irq_eoi = gic_eoimode1_eoi_irq, + .irq_set_type = gic_set_type, + .irq_set_affinity = gic_set_affinity, + .irq_retrigger = gic_retrigger, + .irq_get_irqchip_state = gic_irq_get_irqchip_state, + .irq_set_irqchip_state = gic_irq_set_irqchip_state, + .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity, + .irq_nmi_setup = gic_irq_nmi_setup, + .irq_nmi_teardown = gic_irq_nmi_teardown, + .flags = IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, +}; + +#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer)) + +static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) +{ + struct irq_chip *chip = &gic_chip; + struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq)); + + if (static_branch_likely(&supports_deactivate_key)) + chip = &gic_eoimode1_chip; + + /* SGIs are private to the core kernel */ + if (hw < 16) + return -EPERM; + /* Nothing here */ + if (hw >= gic_data.irq_nr && hw < 8192) + return -EPERM; + /* Off limits */ + if (hw >= GIC_ID_NR) + return -EPERM; + + /* PPIs */ + if (hw < 32) { + irq_set_percpu_devid(irq); + irq_domain_set_info(d, irq, hw, chip, d->host_data, + handle_percpu_devid_irq, NULL, NULL); + irq_set_status_flags(irq, IRQ_NOAUTOEN); + } + /* SPIs */ + if (hw >= 32 && hw < gic_data.irq_nr) { + irq_domain_set_info(d, irq, hw, chip, d->host_data, + handle_fasteoi_irq, NULL, NULL); + irq_set_probe(irq); + irqd_set_single_target(irqd); + } + /* LPIs */ + if (hw >= 8192 && hw < GIC_ID_NR) { + if (!gic_dist_supports_lpis()) + return -EPERM; + irq_domain_set_info(d, irq, hw, chip, d->host_data, + handle_fasteoi_edge_irq, NULL, NULL); + } + + /* Prevents SW retriggers which mess up the ACK/EOI ordering */ + irqd_set_handle_enforce_irqctx(irqd); + return 0; +} + +#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1) + +static int gic_irq_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + if (is_of_node(fwspec->fwnode)) { + if (fwspec->param_count < 3) + return -EINVAL; + + switch (fwspec->param[0]) { + case 0: /* SPI */ + *hwirq = fwspec->param[1] + 32; + break; + case 1: /* PPI */ + case GIC_IRQ_TYPE_PARTITION: + *hwirq = fwspec->param[1] + 16; + break; + case GIC_IRQ_TYPE_LPI: /* LPI */ + *hwirq = fwspec->param[1]; + break; + default: + return -EINVAL; + } + + *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; + + /* + * Make it clear that broken DTs are... broken. + * Partitionned PPIs are an unfortunate exception. + */ + WARN_ON(*type == IRQ_TYPE_NONE && + fwspec->param[0] != GIC_IRQ_TYPE_PARTITION); + return 0; + } + + if (is_fwnode_irqchip(fwspec->fwnode)) { + if (fwspec->param_count != 2) + return -EINVAL; + + *hwirq = fwspec->param[0]; + *type = fwspec->param[1]; + + WARN_ON(*type == IRQ_TYPE_NONE); + return 0; + } + + return -EINVAL; +} + +static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + int i, ret; + irq_hw_number_t hwirq; + unsigned int type = IRQ_TYPE_NONE; + struct irq_fwspec *fwspec = arg; + + ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type); + if (ret) + return ret; + + for (i = 0; i < nr_irqs; i++) { + ret = gic_irq_domain_map(domain, virq + i, hwirq + i); + if (ret) + return ret; + } + + return 0; +} + +static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + int i; + + for (i = 0; i < nr_irqs; i++) { + struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); + irq_set_handler(virq + i, NULL); + irq_domain_reset_irq_data(d); + } +} + +static int gic_irq_domain_select(struct irq_domain *d, + struct irq_fwspec *fwspec, + enum irq_domain_bus_token bus_token) +{ + /* Not for us */ + if (fwspec->fwnode != d->fwnode) + return 0; + + /* If this is not DT, then we have a single domain */ + if (!is_of_node(fwspec->fwnode)) + return 1; + + /* + * If this is a PPI and we have a 4th (non-null) parameter, + * then we need to match the partition domain. + */ + if (fwspec->param_count >= 4 && + fwspec->param[0] == 1 && fwspec->param[3] != 0) + return d == partition_get_domain(gic_data.ppi_descs[fwspec->param[1]]); + + return d == gic_data.domain; +} + +static const struct irq_domain_ops gic_irq_domain_ops = { + .translate = gic_irq_domain_translate, + .alloc = gic_irq_domain_alloc, + .free = gic_irq_domain_free, + .select = gic_irq_domain_select, +}; + +static int partition_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + struct device_node *np; + int ret; + + np = of_find_node_by_phandle(fwspec->param[3]); + if (WARN_ON(!np)) + return -EINVAL; + + ret = partition_translate_id(gic_data.ppi_descs[fwspec->param[1]], + of_node_to_fwnode(np)); + if (ret < 0) + return ret; + + *hwirq = ret; + *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; + + return 0; +} + +static const struct irq_domain_ops partition_domain_ops = { + .translate = partition_domain_translate, + .select = gic_irq_domain_select, +}; + +static void gic_enable_nmi_support(void) +{ + int i; + + for (i = 0; i < 16; i++) + refcount_set(&ppi_nmi_refs[i], 0); + + /* + * Linux itself doesn't use 1:N distribution, so has no need to + * set PMHE. The only reason to have it set is if EL3 requires it + * (and we can't change it). + */ + + static_branch_enable(&supports_pseudo_nmis); + + if (static_branch_likely(&supports_deactivate_key)) + gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI; + else + gic_chip.flags |= IRQCHIP_SUPPORTS_NMI; +} + +static int __init gic_init_bases(void __iomem *dist_base, + struct redist_region *rdist_regs, + u32 nr_redist_regions, + u64 redist_stride, + struct fwnode_handle *handle) +{ + u32 typer; + int gic_irqs; + int err; + + if (!is_hyp_mode_available()) + static_branch_disable(&supports_deactivate_key); + + if (static_branch_likely(&supports_deactivate_key)) + pr_info("GIC: Using split EOI/Deactivate mode\n"); + + gic_data.fwnode = handle; + gic_data.dist_base = dist_base; + gic_data.redist_regions = rdist_regs; + gic_data.nr_redist_regions = nr_redist_regions; + gic_data.redist_stride = redist_stride; + + /* + * Find out how many interrupts are supported. + * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI) + */ + typer = readl_relaxed(gic_data.dist_base + GICD_TYPER); + gic_data.rdists.gicd_typer = typer; + gic_irqs = GICD_TYPER_IRQS(typer); + if (gic_irqs > 1020) + gic_irqs = 1020; + gic_data.irq_nr = gic_irqs; + + gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops, + &gic_data); + irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED); + gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); + gic_data.rdists.has_vlpis = true; + gic_data.rdists.has_direct_lpi = true; + + if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) { + err = -ENOMEM; + goto out_free; + } + + gic_data.has_rss = !!(typer & GICD_TYPER_RSS); + pr_info("Distributor has %sRange Selector support\n", + gic_data.has_rss ? "" : "no "); + + + set_handle_irq(gic_handle_irq); + + gic_update_vlpi_properties(); + + /* + * NMI backtrace DFX need check nmi support, this should be + * called before enable NMI backtrace DFX. + */ + if (gic_prio_masking_enabled()) { + if (!gic_has_group0() || gic_dist_security_disabled()) + gic_enable_nmi_support(); + else + pr_warn("SCR_EL3.FIQ is cleared, cannot enable use of pseudo-NMIs\n"); + } + + gic_smp_init(); + gic_dist_init(); + gic_cpu_init(); + gic_cpu_pm_init(); + + if (gic_dist_supports_lpis()) { + phytium_its_init(handle, &gic_data.rdists, gic_data.domain); + phytium_its_cpu_init(); + } + + return 0; + +out_free: + if (gic_data.domain) + irq_domain_remove(gic_data.domain); + free_percpu(gic_data.rdists.rdist); + return err; +} + +static int __init gic_validate_dist_version(void __iomem *dist_base) +{ + u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; + + if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) + return -ENODEV; + + return 0; +} + +/* Create all possible partitions at boot time */ +static void __init gic_populate_ppi_partitions(struct device_node *gic_node) +{ + struct device_node *parts_node, *child_part; + int part_idx = 0, i; + int nr_parts; + struct partition_affinity *parts; + + parts_node = of_get_child_by_name(gic_node, "ppi-partitions"); + if (!parts_node) + return; + + nr_parts = of_get_child_count(parts_node); + + if (!nr_parts) + goto out_put_node; + + parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL); + if (WARN_ON(!parts)) + goto out_put_node; + + for_each_child_of_node(parts_node, child_part) { + struct partition_affinity *part; + int n; + + part = &parts[part_idx]; + + part->partition_id = of_node_to_fwnode(child_part); + + pr_info("GIC: PPI partition %s[%d] { ", + child_part->name, part_idx); + + n = of_property_count_elems_of_size(child_part, "affinity", + sizeof(u32)); + WARN_ON(n <= 0); + + for (i = 0; i < n; i++) { + int err, cpu; + u32 cpu_phandle; + struct device_node *cpu_node; + + err = of_property_read_u32_index(child_part, "affinity", + i, &cpu_phandle); + if (WARN_ON(err)) + continue; + + cpu_node = of_find_node_by_phandle(cpu_phandle); + if (WARN_ON(!cpu_node)) + continue; + + cpu = of_cpu_node_to_id(cpu_node); + if (WARN_ON(cpu < 0)) + continue; + + pr_cont("%pOF[%d] ", cpu_node, cpu); + + cpumask_set_cpu(cpu, &part->mask); + } + + pr_cont("}\n"); + part_idx++; + } + + for (i = 0; i < 16; i++) { + unsigned int irq; + struct partition_desc *desc; + struct irq_fwspec ppi_fwspec = { + .fwnode = gic_data.fwnode, + .param_count = 3, + .param = { + [0] = GIC_IRQ_TYPE_PARTITION, + [1] = i, + [2] = IRQ_TYPE_NONE, + }, + }; + + irq = irq_create_fwspec_mapping(&ppi_fwspec); + if (WARN_ON(!irq)) + continue; + desc = partition_create_desc(gic_data.fwnode, parts, nr_parts, + irq, &partition_domain_ops); + if (WARN_ON(!desc)) + continue; + + gic_data.ppi_descs[i] = desc; + } + +out_put_node: + of_node_put(parts_node); +} + +static void __init gic_of_setup_kvm_info(struct device_node *node) +{ + int ret; + struct resource r; + u32 gicv_idx; + + gic_v3_kvm_info.type = GIC_V3; + + gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0); + if (!gic_v3_kvm_info.maint_irq) + return; + + if (of_property_read_u32(node, "#redistributor-regions", + &gicv_idx)) + gicv_idx = 1; + + gicv_idx += 3; /* Also skip GICD, GICC, GICH */ + ret = of_address_to_resource(node, gicv_idx, &r); + if (!ret) + gic_v3_kvm_info.vcpu = r; + + gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; + gic_set_kvm_info(&gic_v3_kvm_info); +} + +static int __init gic_of_init(struct device_node *node, struct device_node *parent) +{ + void __iomem *dist_base; + struct redist_region *rdist_regs; + u64 redist_stride; + u32 nr_redist_regions; + int err, i, skt; + struct resource res; + + dist_base = of_iomap(node, 0); + if (!dist_base) { + pr_err("%pOF: unable to map gic dist registers\n", node); + return -ENXIO; + } + + err = gic_validate_dist_version(dist_base); + if (err) { + pr_err("%pOF: no distributor detected, giving up\n", node); + goto out_unmap_dist; + } + + if (of_address_to_resource(node, 0, &res)) { + goto out_unmap_dist; + } + + mars3_gic_dists[0].phys_base = res.start; + mars3_gic_dists[0].size = resource_size(&res); + mars3_gic_dists[0].dist_base = dist_base; + + if (of_property_read_u32(node, "#mars3-soc-bitmap", &mars3_sockets_bitmap)) + mars3_sockets_bitmap = 0x1; + + for (skt = 1; skt < MAX_MARS3_SOC_COUNT; skt++) { + if ((((unsigned int)1 << skt) & mars3_sockets_bitmap) == 0) + continue; + + mars3_gic_dists[skt].phys_base = ((unsigned long)skt << MARS3_ADDR_SKTID_SHIFT) | + mars3_gic_dists[0].phys_base; + mars3_gic_dists[skt].size = mars3_gic_dists[0].size; + mars3_gic_dists[skt].dist_base = ioremap(mars3_gic_dists[skt].phys_base, + mars3_gic_dists[skt].size); + } + + if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions)) + nr_redist_regions = 1; + + rdist_regs = kcalloc(nr_redist_regions, sizeof(*rdist_regs), + GFP_KERNEL); + if (!rdist_regs) { + err = -ENOMEM; + goto out_unmap_dist; + } + + for (i = 0; i < nr_redist_regions; i++) { + struct resource res; + int ret; + + ret = of_address_to_resource(node, 1 + i, &res); + rdist_regs[i].redist_base = of_iomap(node, 1 + i); + if (ret || !rdist_regs[i].redist_base) { + pr_err("%pOF: couldn't map region %d\n", node, i); + err = -ENODEV; + goto out_unmap_rdist; + } + rdist_regs[i].phys_base = res.start; + } + + if (of_property_read_u64(node, "redistributor-stride", &redist_stride)) + redist_stride = 0; + + err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions, + redist_stride, &node->fwnode); + if (err) + goto out_unmap_rdist; + + gic_populate_ppi_partitions(node); + + if (static_branch_likely(&supports_deactivate_key)) + gic_of_setup_kvm_info(node); + return 0; + +out_unmap_rdist: + for (i = 0; i < nr_redist_regions; i++) + if (rdist_regs[i].redist_base) + iounmap(rdist_regs[i].redist_base); + kfree(rdist_regs); +out_unmap_dist: + iounmap(dist_base); + return err; +} + +IRQCHIP_DECLARE(gic_phyt_2500, "arm,gic-phytium-2500", gic_of_init); + +#ifdef CONFIG_ACPI +static struct +{ + void __iomem *dist_base; + struct redist_region *redist_regs; + u32 nr_redist_regions; + bool single_redist; + int enabled_rdists; + u32 maint_irq; + int maint_irq_mode; + phys_addr_t vcpu_base; +} acpi_data __initdata; + +static int gic_mars3_sockets_bitmap(void) +{ + unsigned int skt, i; + int skt_bitmap = 0; + unsigned int skt_cpu_cnt[MAX_MARS3_SOC_COUNT] = {0}; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SOC_COUNT)) + skt_cpu_cnt[skt]++; + else if (skt != 0xff) + pr_err("socket address: %d is out of range.", skt); + } + + for (i = 0; i < MAX_MARS3_SOC_COUNT; i++) { + if (skt_cpu_cnt[i] > 0) + skt_bitmap |= (1 << i); + } + + return skt_bitmap; +} + +static void __init +gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base) +{ + static int count; + + acpi_data.redist_regs[count].phys_base = phys_base; + acpi_data.redist_regs[count].redist_base = redist_base; + acpi_data.redist_regs[count].single_redist = acpi_data.single_redist; + count++; +} + +static int __init +gic_acpi_parse_madt_redist(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_redistributor *redist = + (struct acpi_madt_generic_redistributor *)header; + void __iomem *redist_base; + + redist_base = ioremap(redist->base_address, redist->length); + if (!redist_base) { + pr_err("Couldn't map GICR region @%llx\n", redist->base_address); + return -ENOMEM; + } + + gic_acpi_register_redist(redist->base_address, redist_base); + return 0; +} + +static int __init +gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_interrupt *gicc = + (struct acpi_madt_generic_interrupt *)header; + u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; + u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2; + void __iomem *redist_base; + + /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */ + if (!(gicc->flags & ACPI_MADT_ENABLED)) + return 0; + + redist_base = ioremap(gicc->gicr_base_address, size); + if (!redist_base) + return -ENOMEM; + + gic_acpi_register_redist(gicc->gicr_base_address, redist_base); + return 0; +} + +static int __init gic_acpi_collect_gicr_base(void) +{ + acpi_tbl_entry_handler redist_parser; + enum acpi_madt_type type; + + if (acpi_data.single_redist) { + type = ACPI_MADT_TYPE_GENERIC_INTERRUPT; + redist_parser = gic_acpi_parse_madt_gicc; + } else { + type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR; + redist_parser = gic_acpi_parse_madt_redist; + } + + /* Collect redistributor base addresses in GICR entries */ + if (acpi_table_parse_madt(type, redist_parser, 0) > 0) + return 0; + + pr_info("No valid GICR entries exist\n"); + return -ENODEV; +} + +static int __init gic_acpi_match_gicr(union acpi_subtable_headers *header, + const unsigned long end) +{ + /* Subtable presence means that redist exists, that's it */ + return 0; +} + +static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_interrupt *gicc = + (struct acpi_madt_generic_interrupt *)header; + + /* + * If GICC is enabled and has valid gicr base address, then it means + * GICR base is presented via GICC + */ + if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) { + acpi_data.enabled_rdists++; + return 0; + } + + /* + * It's perfectly valid firmware can pass disabled GICC entry, driver + * should not treat as errors, skip the entry instead of probe fail. + */ + if (!(gicc->flags & ACPI_MADT_ENABLED)) + return 0; + + return -ENODEV; +} + +static int __init gic_acpi_count_gicr_regions(void) +{ + int count; + + /* + * Count how many redistributor regions we have. It is not allowed + * to mix redistributor description, GICR and GICC subtables have to be + * mutually exclusive. + */ + count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR, + gic_acpi_match_gicr, 0); + if (count > 0) { + acpi_data.single_redist = false; + return count; + } + + count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, + gic_acpi_match_gicc, 0); + if (count > 0) { + acpi_data.single_redist = true; + count = acpi_data.enabled_rdists; + } + + return count; +} + +static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header, + struct acpi_probe_entry *ape) +{ + struct acpi_madt_generic_distributor *dist; + int count; + + dist = (struct acpi_madt_generic_distributor *)header; + if (dist->version != ape->driver_data) + return false; + + /* We need to do that exercise anyway, the sooner the better */ + count = gic_acpi_count_gicr_regions(); + if (count <= 0) + return false; + + acpi_data.nr_redist_regions = count; + return true; +} + +static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_interrupt *gicc = + (struct acpi_madt_generic_interrupt *)header; + int maint_irq_mode; + static int first_madt = true; + + /* Skip unusable CPUs */ + if (!(gicc->flags & ACPI_MADT_ENABLED)) + return 0; + + maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ? + ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE; + + if (first_madt) { + first_madt = false; + + acpi_data.maint_irq = gicc->vgic_interrupt; + acpi_data.maint_irq_mode = maint_irq_mode; + acpi_data.vcpu_base = gicc->gicv_base_address; + + return 0; + } + + /* + * The maintenance interrupt and GICV should be the same for every CPU + */ + if ((acpi_data.maint_irq != gicc->vgic_interrupt) || + (acpi_data.maint_irq_mode != maint_irq_mode) || + (acpi_data.vcpu_base != gicc->gicv_base_address)) + return -EINVAL; + + return 0; +} + +static bool __init gic_acpi_collect_virt_info(void) +{ + int count; + + count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, + gic_acpi_parse_virt_madt_gicc, 0); + + return (count > 0); +} + +#define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K) +#define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K) +#define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K) + +static void __init gic_acpi_setup_kvm_info(void) +{ + int irq; + + if (!gic_acpi_collect_virt_info()) { + pr_warn("Unable to get hardware information used for virtualization\n"); + return; + } + + gic_v3_kvm_info.type = GIC_V3; + + irq = acpi_register_gsi(NULL, acpi_data.maint_irq, + acpi_data.maint_irq_mode, + ACPI_ACTIVE_HIGH); + if (irq <= 0) + return; + + gic_v3_kvm_info.maint_irq = irq; + + if (acpi_data.vcpu_base) { + struct resource *vcpu = &gic_v3_kvm_info.vcpu; + + vcpu->flags = IORESOURCE_MEM; + vcpu->start = acpi_data.vcpu_base; + vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1; + } + + gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; + gic_set_kvm_info(&gic_v3_kvm_info); +} + +static int __init +gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end) +{ + struct acpi_madt_generic_distributor *dist; + struct fwnode_handle *domain_handle; + size_t size; + int i, err, skt; + + /* Get distributor base address */ + dist = (struct acpi_madt_generic_distributor *)header; + acpi_data.dist_base = ioremap(dist->base_address, + ACPI_GICV3_DIST_MEM_SIZE); + if (!acpi_data.dist_base) { + pr_err("Unable to map GICD registers\n"); + return -ENOMEM; + } + + err = gic_validate_dist_version(acpi_data.dist_base); + if (err) { + pr_err("No distributor detected at @%p, giving up\n", + acpi_data.dist_base); + goto out_dist_unmap; + } + + mars3_gic_dists[0].phys_base = dist->base_address; + mars3_gic_dists[0].size = ACPI_GICV3_DIST_MEM_SIZE; + mars3_gic_dists[0].dist_base = acpi_data.dist_base; + + mars3_sockets_bitmap = gic_mars3_sockets_bitmap(); + if (is_kdump_kernel()) { + mars3_sockets_bitmap = 0x3; + } + if (mars3_sockets_bitmap == 0) { + mars3_sockets_bitmap = 0x1; + pr_err("No socket, please check cpus MPIDR_AFFINITY_LEVEL!"); + } else + pr_info("mars3_sockets_bitmap = 0x%x\n", mars3_sockets_bitmap); + + for (skt = 1; skt < MAX_MARS3_SOC_COUNT; skt++) { + if ((((unsigned int)1 << skt) & mars3_sockets_bitmap) == 0) + continue; + + mars3_gic_dists[skt].phys_base = ((unsigned long)skt << MARS3_ADDR_SKTID_SHIFT) | + mars3_gic_dists[0].phys_base; + mars3_gic_dists[skt].size = mars3_gic_dists[0].size; + mars3_gic_dists[skt].dist_base = ioremap(mars3_gic_dists[skt].phys_base, + mars3_gic_dists[skt].size); + } + + size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions; + acpi_data.redist_regs = kzalloc(size, GFP_KERNEL); + if (!acpi_data.redist_regs) { + err = -ENOMEM; + goto out_dist_unmap; + } + + err = gic_acpi_collect_gicr_base(); + if (err) + goto out_redist_unmap; + + domain_handle = irq_domain_alloc_fwnode(acpi_data.dist_base); + if (!domain_handle) { + err = -ENOMEM; + goto out_redist_unmap; + } + + err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs, + acpi_data.nr_redist_regions, 0, domain_handle); + if (err) + goto out_fwhandle_free; + + acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle); + + if (static_branch_likely(&supports_deactivate_key)) + gic_acpi_setup_kvm_info(); + + return 0; + +out_fwhandle_free: + irq_domain_free_fwnode(domain_handle); +out_redist_unmap: + for (i = 0; i < acpi_data.nr_redist_regions; i++) + if (acpi_data.redist_regs[i].redist_base) + iounmap(acpi_data.redist_regs[i].redist_base); + kfree(acpi_data.redist_regs); +out_dist_unmap: + iounmap(acpi_data.dist_base); + return err; +} +IRQCHIP_ACPI_DECLARE(gic_phyt_2500, ACPI_MADT_TYPE_PHYTIUM_2500, + acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3, + gic_acpi_init); +#endif diff --git a/drivers/irqchip/irq-gic-realview.c b/drivers/irqchip/irq-gic-realview.c index b4c1924f0255453937d11204f879a71d48068abc..38fab02ffe9d05e2a89cb6ca7159ce9b685efad9 100644 --- a/drivers/irqchip/irq-gic-realview.c +++ b/drivers/irqchip/irq-gic-realview.c @@ -57,6 +57,7 @@ realview_gic_of_init(struct device_node *node, struct device_node *parent) /* The PB11MPCore GIC needs to be configured in the syscon */ map = syscon_node_to_regmap(np); + of_node_put(np); if (!IS_ERR(map)) { /* new irq mode with no DCC */ regmap_write(map, REALVIEW_SYS_LOCK_OFFSET, diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c index f5fe0100f9ffd043d251d96ce473775bfdafd3b4..de14e06fd9ec86c48d340a9a1da78f5d33af5efa 100644 --- a/drivers/irqchip/irq-gic-v2m.c +++ b/drivers/irqchip/irq-gic-v2m.c @@ -446,7 +446,7 @@ static struct fwnode_handle *gicv2m_get_fwnode(struct device *dev) } static int __init -acpi_parse_madt_msi(struct acpi_subtable_header *header, +acpi_parse_madt_msi(union acpi_subtable_headers *header, const unsigned long end) { int ret; diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c index 8d6d009d1d586f9271c508138bdbc55c064c8f9e..c81d5b81da56d0f54ef73b0415470ea6270e4a61 100644 --- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c @@ -159,7 +159,7 @@ static int __init its_pci_of_msi_init(void) #ifdef CONFIG_ACPI static int __init -its_pci_msi_parse_madt(struct acpi_subtable_header *header, +its_pci_msi_parse_madt(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_generic_translator *its_entry; diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c index 7b8e87b493fe5defd6ebd5968f92c856c8fdcc8c..9cdcda5bb3bd50b8a7212589fcb4481a3874cd02 100644 --- a/drivers/irqchip/irq-gic-v3-its-platform-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-platform-msi.c @@ -117,7 +117,7 @@ static int __init its_pmsi_init_one(struct fwnode_handle *fwnode, #ifdef CONFIG_ACPI static int __init -its_pmsi_parse_madt(struct acpi_subtable_header *header, +its_pmsi_parse_madt(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_generic_translator *its_entry; diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index c2df341ff6fafd83df6463669c9ad6a1ceac56c9..65306a1ed3dccde312b112708c0475596136721c 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -17,15 +17,19 @@ #include #include +#include #include #include +#include #include #include +#include #include #include #include #include #include +#include #include #include #include @@ -35,6 +39,7 @@ #include #include #include +#include #include #include @@ -46,12 +51,17 @@ #include "irq-gic-common.h" +#ifdef CONFIG_ARCH_PHYTIUM +#include +#endif + #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2) -#define ITS_FLAGS_SAVE_SUSPEND_STATE (1ULL << 3) +#define ITS_FLAGS_WORKAROUND_HISILICON_162100801 (1ULL << 3) #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) +#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1) static u32 lpi_id_bits; @@ -64,7 +74,7 @@ static u32 lpi_id_bits; #define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K) #define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K) -#define LPI_PROP_DEFAULT_PRIO 0xa0 +#define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI /* * Collection structure - just an ID, and a redistributor address to @@ -93,9 +103,14 @@ struct its_device; * The ITS structure - contains most of the infrastructure, with the * top-level MSI domain, the command queue, the collections, and the * list of devices writing to it. + * + * dev_alloc_lock has to be taken for device allocations, while the + * spinlock must be taken to parse data structures such as the device + * list. */ struct its_node { raw_spinlock_t lock; + struct mutex dev_alloc_lock; struct list_head entry; void __iomem *base; phys_addr_t phys_base; @@ -105,6 +120,7 @@ struct its_node { struct its_collection *collections; struct fwnode_handle *fwnode_handle; u64 (*get_msi_base)(struct its_device *its_dev); + u64 typer; u64 cbaser_save; u32 ctlr_save; struct list_head its_device_list; @@ -115,10 +131,11 @@ struct its_node { int numa_node; unsigned int msi_domain_flags; u32 pre_its_base; /* for Socionext Synquacer */ - bool is_v4; int vlpi_redist_offset; }; +#define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS)) +#define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP)) #define ITS_ITT_ALIGN SZ_256 /* The maximum number of VPEID bits supported by VLPI commands */ @@ -133,7 +150,7 @@ struct event_lpi_map { u16 *col_map; irq_hw_number_t lpi_base; int nr_lpis; - struct mutex vlpi_lock; + raw_spinlock_t vlpi_lock; struct its_vm *vm; struct its_vlpi_map *vlpi_maps; int nr_vlpis; @@ -152,6 +169,7 @@ struct its_device { void *itt; u32 nr_ites; u32 device_id; + bool shared; }; static struct { @@ -161,6 +179,13 @@ static struct { int next_victim; } vpe_proxy; +struct cpu_lpi_count { + atomic_t managed; + atomic_t unmanaged; +}; + +static DEFINE_PER_CPU(struct cpu_lpi_count, cpu_lpi_count); + static LIST_HEAD(its_nodes); static DEFINE_RAW_SPINLOCK(its_lock); static struct rdists *gic_rdists; @@ -172,10 +197,35 @@ static DEFINE_RAW_SPINLOCK(vmovp_lock); static DEFINE_IDA(its_vpeid_ida); +#ifdef CONFIG_ASCEND_INIT_ALL_GICR +static bool init_all_gicr; +static int nr_gicr; +#else +#define init_all_gicr false +#define nr_gicr 0 +#endif + #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) +#define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu)) #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) +static u16 get_its_list(struct its_vm *vm) +{ + struct its_node *its; + unsigned long its_list = 0; + + list_for_each_entry(its, &its_nodes, entry) { + if (!is_v4(its)) + continue; + + if (vm->vlpi_count[its->list_nr]) + __set_bit(its->list_nr, &its_list); + } + + return (u16)its_list; +} + static struct its_collection *dev_event_to_col(struct its_device *its_dev, u32 event) { @@ -186,7 +236,7 @@ static struct its_collection *dev_event_to_col(struct its_device *its_dev, static struct its_collection *valid_col(struct its_collection *col) { - if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(0, 15))) + if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0))) return NULL; return col; @@ -556,11 +606,11 @@ static struct its_collection *its_build_invall_cmd(struct its_node *its, struct its_cmd_desc *desc) { its_encode_cmd(cmd, GITS_CMD_INVALL); - its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); + its_encode_collection(cmd, desc->its_invall_cmd.col->col_id); its_fixup_cmd(cmd); - return NULL; + return desc->its_invall_cmd.col; } static struct its_vpe *its_build_vinvall_cmd(struct its_node *its, @@ -734,32 +784,43 @@ static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd) } static int its_wait_for_range_completion(struct its_node *its, - struct its_cmd_block *from, + u64 prev_idx, struct its_cmd_block *to) { - u64 rd_idx, from_idx, to_idx; + u64 rd_idx, to_idx, linear_idx; u32 count = 1000000; /* 1s! */ - from_idx = its_cmd_ptr_to_offset(its, from); + /* Linearize to_idx if the command set has wrapped around */ to_idx = its_cmd_ptr_to_offset(its, to); + if (to_idx < prev_idx) + to_idx += ITS_CMD_QUEUE_SZ; + + linear_idx = prev_idx; while (1) { + s64 delta; + rd_idx = readl_relaxed(its->base + GITS_CREADR); - /* Direct case */ - if (from_idx < to_idx && rd_idx >= to_idx) - break; + /* + * Compute the read pointer progress, taking the + * potential wrap-around into account. + */ + delta = rd_idx - prev_idx; + if (rd_idx < prev_idx) + delta += ITS_CMD_QUEUE_SZ; - /* Wrapped case */ - if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx) + linear_idx += delta; + if (linear_idx >= to_idx) break; count--; if (!count) { - pr_err_ratelimited("ITS queue timeout (%llu %llu %llu)\n", - from_idx, to_idx, rd_idx); + pr_err_ratelimited("ITS queue timeout (%llu %llu)\n", + to_idx, linear_idx); return -1; } + prev_idx = rd_idx; cpu_relax(); udelay(1); } @@ -776,6 +837,7 @@ void name(struct its_node *its, \ struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \ synctype *sync_obj; \ unsigned long flags; \ + u64 rd_idx; \ \ raw_spin_lock_irqsave(&its->lock, flags); \ \ @@ -797,10 +859,11 @@ void name(struct its_node *its, \ } \ \ post: \ + rd_idx = readl_relaxed(its->base + GITS_CREADR); \ next_cmd = its_post_commands(its); \ raw_spin_unlock_irqrestore(&its->lock, flags); \ \ - if (its_wait_for_range_completion(its, cmd, next_cmd)) \ + if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \ pr_err_ratelimited("ITS cmd %ps failed\n", builder); \ } @@ -962,19 +1025,25 @@ static void its_send_vmapp(struct its_node *its, its_send_single_vcommand(its, its_build_vmapp_cmd, &desc); } -static void its_send_vmovp(struct its_vpe *vpe) +static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe) { struct its_cmd_desc desc; + + desc.its_vinvall_cmd.vpe = vpe; + its_send_single_vcommand(its, its_build_vinvall_cmd, &desc); +} + +static void its_send_vmovp(struct its_vpe *vpe) +{ + struct its_cmd_desc desc = {}; struct its_node *its; unsigned long flags; int col_id = vpe->col_idx; desc.its_vmovp_cmd.vpe = vpe; - desc.its_vmovp_cmd.its_list = (u16)its_list_map; if (!its_list_map) { its = list_first_entry(&its_nodes, struct its_node, entry); - desc.its_vmovp_cmd.seq_num = 0; desc.its_vmovp_cmd.col = &its->collections[col_id]; its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); return; @@ -991,10 +1060,11 @@ static void its_send_vmovp(struct its_vpe *vpe) raw_spin_lock_irqsave(&vmovp_lock, flags); desc.its_vmovp_cmd.seq_num = vmovp_seq_num++; + desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm); /* Emit VMOVPs */ list_for_each_entry(its, &its_nodes, entry) { - if (!its->is_v4) + if (!is_v4(its)) continue; if (!vpe->its_vm->vlpi_count[its->list_nr]) @@ -1002,19 +1072,14 @@ static void its_send_vmovp(struct its_vpe *vpe) desc.its_vmovp_cmd.col = &its->collections[col_id]; its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); + if (is_v4_1(its) && (its->flags & + ITS_FLAGS_WORKAROUND_HISILICON_162100801)) + its_send_vinvall(its, vpe); } raw_spin_unlock_irqrestore(&vmovp_lock, flags); } -static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe) -{ - struct its_cmd_desc desc; - - desc.its_vinvall_cmd.vpe = vpe; - its_send_single_vcommand(its, its_build_vinvall_cmd, &desc); -} - /* * irqchip functions - assumes MSI, mostly. */ @@ -1028,7 +1093,7 @@ static inline u32 its_get_event_id(struct irq_data *d) static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) { irq_hw_number_t hwirq; - struct page *prop_page; + void *va; u8 *cfg; if (irqd_is_forwarded_to_vcpu(d)) { @@ -1036,7 +1101,7 @@ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) u32 event = its_get_event_id(d); struct its_vlpi_map *map; - prop_page = its_dev->event_map.vm->vprop_page; + va = page_address(its_dev->event_map.vm->vprop_page); map = &its_dev->event_map.vlpi_maps[event]; hwirq = map->vintid; @@ -1044,11 +1109,11 @@ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) map->properties &= ~clr; map->properties |= set | LPI_PROP_GROUP1; } else { - prop_page = gic_rdists->prop_page; + va = gic_rdists->prop_table_va; hwirq = d->hwirq; } - cfg = page_address(prop_page) + hwirq - 8192; + cfg = va + hwirq - 8192; *cfg &= ~clr; *cfg |= set | LPI_PROP_GROUP1; @@ -1110,42 +1175,167 @@ static void its_unmask_irq(struct irq_data *d) lpi_update_config(d, 0, LPI_PROP_ENABLED); } +static __maybe_unused u32 its_read_lpi_count(struct irq_data *d, int cpu) +{ + if (irqd_affinity_is_managed(d)) + return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed); + + return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged); +} + +static void its_inc_lpi_count(struct irq_data *d, int cpu) +{ + if (irqd_affinity_is_managed(d)) + atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed); + else + atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged); +} + +static void its_dec_lpi_count(struct irq_data *d, int cpu) +{ + if (irqd_affinity_is_managed(d)) + atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed); + else + atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged); +} + +static unsigned int cpumask_pick_least_loaded(struct irq_data *d, + const struct cpumask *cpu_mask) +{ + unsigned int cpu = nr_cpu_ids, tmp; + int count = S32_MAX; + + for_each_cpu(tmp, cpu_mask) { + int this_count = its_read_lpi_count(d, tmp); + if (this_count < count) { + cpu = tmp; + count = this_count; + } + } + + return cpu; +} + +/* + * As suggested by Thomas Gleixner in: + * https://lore.kernel.org/r/87h80q2aoc.fsf@nanos.tec.linutronix.de + */ +static int its_select_cpu(struct irq_data *d, + const struct cpumask *aff_mask) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + cpumask_var_t tmpmask; + int cpu, node; + + if (!alloc_cpumask_var(&tmpmask, GFP_ATOMIC)) + return -ENOMEM; + + node = its_dev->its->numa_node; + + if (!irqd_affinity_is_managed(d)) { + /* First try the NUMA node */ + if (node != NUMA_NO_NODE) { + /* + * Try the intersection of the affinity mask and the + * node mask (and the online mask, just to be safe). + */ + cpumask_and(tmpmask, cpumask_of_node(node), aff_mask); + cpumask_and(tmpmask, tmpmask, cpu_online_mask); + + /* + * Ideally, we would check if the mask is empty, and + * try again on the full node here. + * + * But it turns out that the way ACPI describes the + * affinity for ITSs only deals about memory, and + * not target CPUs, so it cannot describe a single + * ITS placed next to two NUMA nodes. + * + * Instead, just fallback on the online mask. This + * diverges from Thomas' suggestion above. + */ + cpu = cpumask_pick_least_loaded(d, tmpmask); + if (cpu < nr_cpu_ids) + goto out; + + /* If we can't cross sockets, give up */ + if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)) + goto out; + + /* If the above failed, expand the search */ + } + + /* Try the intersection of the affinity and online masks */ + cpumask_and(tmpmask, aff_mask, cpu_online_mask); + + /* If that doesn't fly, the online mask is the last resort */ + if (cpumask_empty(tmpmask)) + cpumask_copy(tmpmask, cpu_online_mask); + + cpu = cpumask_pick_least_loaded(d, tmpmask); + } else { + cpumask_copy(tmpmask, aff_mask); + + /* If we cannot cross sockets, limit the search to that node */ + if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) && + node != NUMA_NO_NODE) + cpumask_and(tmpmask, tmpmask, cpumask_of_node(node)); + + cpu = cpumask_pick_least_loaded(d, tmpmask); + } +out: + free_cpumask_var(tmpmask); + + pr_debug("IRQ%d -> %*pbl CPU%d\n", d->irq, cpumask_pr_args(aff_mask), cpu); + return cpu; +} + static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { - unsigned int cpu; - const struct cpumask *cpu_mask = cpu_online_mask; + unsigned int max_cpu; struct its_device *its_dev = irq_data_get_irq_chip_data(d); struct its_collection *target_col; u32 id = its_get_event_id(d); + int cpu, prev_cpu; /* A forwarded interrupt should use irq_set_vcpu_affinity */ if (irqd_is_forwarded_to_vcpu(d)) return -EINVAL; - /* lpi cannot be routed to a redistributor that is on a foreign node */ - if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { - if (its_dev->its->numa_node >= 0) { - cpu_mask = cpumask_of_node(its_dev->its->numa_node); - if (!cpumask_intersects(mask_val, cpu_mask)) - return -EINVAL; - } - } + prev_cpu = its_dev->event_map.col_map[id]; + its_dec_lpi_count(d, prev_cpu); - cpu = cpumask_any_and(mask_val, cpu_mask); + if (!force) + cpu = its_select_cpu(d, mask_val); + else + cpu = cpumask_pick_least_loaded(d, mask_val); - if (cpu >= nr_cpu_ids) - return -EINVAL; + max_cpu = nr_cpu_ids; + + if (init_all_gicr) { + max_cpu = nr_gicr; + } + + if (cpu < 0 || cpu >= max_cpu) + goto err; /* don't set the affinity when the target cpu is same as current one */ - if (cpu != its_dev->event_map.col_map[id]) { + if (cpu != prev_cpu) { target_col = &its_dev->its->collections[cpu]; its_send_movi(its_dev, target_col, id); its_dev->event_map.col_map[id] = cpu; irq_data_update_effective_affinity(d, cpumask_of(cpu)); } + its_inc_lpi_count(d, cpu); + return IRQ_SET_MASK_OK_DONE; + +err: + its_inc_lpi_count(d, prev_cpu); + return -EINVAL; + } static u64 its_irq_get_msi_base(struct its_device *its_dev) @@ -1168,6 +1358,11 @@ static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) msg->address_hi = upper_32_bits(addr); msg->data = its_get_event_id(d); +#ifdef CONFIG_ARCH_PHYTIUM + if (typeof_ft2000plus()) + return; +#endif + iommu_dma_map_msi_msg(d->irq, msg); } @@ -1252,13 +1447,13 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) if (!info->map) return -EINVAL; - mutex_lock(&its_dev->event_map.vlpi_lock); + raw_spin_lock(&its_dev->event_map.vlpi_lock); if (!its_dev->event_map.vm) { struct its_vlpi_map *maps; maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps), - GFP_KERNEL); + GFP_ATOMIC); if (!maps) { ret = -ENOMEM; goto out; @@ -1301,7 +1496,7 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) } out: - mutex_unlock(&its_dev->event_map.vlpi_lock); + raw_spin_unlock(&its_dev->event_map.vlpi_lock); return ret; } @@ -1311,7 +1506,7 @@ static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info) u32 event = its_get_event_id(d); int ret = 0; - mutex_lock(&its_dev->event_map.vlpi_lock); + raw_spin_lock(&its_dev->event_map.vlpi_lock); if (!its_dev->event_map.vm || !its_dev->event_map.vlpi_maps[event].vm) { @@ -1323,7 +1518,7 @@ static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info) *info->map = its_dev->event_map.vlpi_maps[event]; out: - mutex_unlock(&its_dev->event_map.vlpi_lock); + raw_spin_unlock(&its_dev->event_map.vlpi_lock); return ret; } @@ -1333,7 +1528,7 @@ static int its_vlpi_unmap(struct irq_data *d) u32 event = its_get_event_id(d); int ret = 0; - mutex_lock(&its_dev->event_map.vlpi_lock); + raw_spin_lock(&its_dev->event_map.vlpi_lock); if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) { ret = -EINVAL; @@ -1363,7 +1558,7 @@ static int its_vlpi_unmap(struct irq_data *d) } out: - mutex_unlock(&its_dev->event_map.vlpi_lock); + raw_spin_unlock(&its_dev->event_map.vlpi_lock); return ret; } @@ -1389,7 +1584,7 @@ static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) struct its_cmd_info *info = vcpu_info; /* Need a v4 ITS */ - if (!its_dev->its->is_v4) + if (!is_v4(its_dev->its)) return -EINVAL; /* Unmap request? */ @@ -1412,6 +1607,11 @@ static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) } } +static int its_irq_retrigger(struct irq_data *d) +{ + return !its_irq_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true); +} + static struct irq_chip its_irq_chip = { .name = "ITS", .irq_mask = its_mask_irq, @@ -1420,6 +1620,7 @@ static struct irq_chip its_irq_chip = { .irq_set_affinity = its_set_affinity, .irq_compose_msi_msg = its_irq_compose_msi_msg, .irq_set_irqchip_state = its_irq_set_irqchip_state, + .irq_retrigger = its_irq_retrigger, .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity, }; @@ -1471,7 +1672,7 @@ static int lpi_range_cmp(void *priv, struct list_head *a, struct list_head *b) ra = container_of(a, struct lpi_range, entry); rb = container_of(b, struct lpi_range, entry); - return rb->base_id - ra->base_id; + return ra->base_id - rb->base_id; } static void merge_lpi_ranges(void) @@ -1575,6 +1776,9 @@ static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids) nr_irqs /= 2; } while (nr_irqs > 0); + if (!nr_irqs) + err = -ENOSPC; + if (err) goto out; @@ -1597,6 +1801,15 @@ static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids) kfree(bitmap); } +static void gic_reset_prop_table(void *va) +{ + /* Priority 0xa0, Group-1, disabled */ + memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ); + + /* Make sure the GIC will observe the written configuration */ + gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ); +} + static struct page *its_allocate_prop_table(gfp_t gfp_flags) { struct page *prop_page; @@ -1605,13 +1818,7 @@ static struct page *its_allocate_prop_table(gfp_t gfp_flags) if (!prop_page) return NULL; - /* Priority 0xa0, Group-1, disabled */ - memset(page_address(prop_page), - LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, - LPI_PROPBASE_SZ); - - /* Make sure the GIC will observe the written configuration */ - gic_flush_dcache_to_poc(page_address(prop_page), LPI_PROPBASE_SZ); + gic_reset_prop_table(page_address(prop_page)); return prop_page; } @@ -1622,20 +1829,74 @@ static void its_free_prop_table(struct page *prop_page) get_order(LPI_PROPBASE_SZ)); } -static int __init its_alloc_lpi_tables(void) +static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size) { - phys_addr_t paddr; + phys_addr_t start, end, addr_end; + u64 i; - lpi_id_bits = min_t(u32, GICD_TYPER_ID_BITS(gic_rdists->gicd_typer), - ITS_MAX_LPI_NRBITS); - gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT); - if (!gic_rdists->prop_page) { - pr_err("Failed to allocate PROPBASE\n"); - return -ENOMEM; + /* + * We don't bother checking for a kdump kernel as by + * construction, the LPI tables are out of this kernel's + * memory map. + */ + if (is_kdump_kernel()) + return true; + + addr_end = addr + size - 1; + + for_each_reserved_mem_region(i, &start, &end) { + if (addr >= start && addr_end <= end) + return true; } - paddr = page_to_phys(gic_rdists->prop_page); - pr_info("GIC: using LPI property table @%pa\n", &paddr); + /* Not found, not a good sign... */ + pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n", + &addr, &addr_end); + add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); + return false; +} + +static int gic_reserve_range(phys_addr_t addr, unsigned long size) +{ + if (efi_enabled(EFI_CONFIG_TABLES)) + return efi_mem_reserve_persistent(addr, size); + + return 0; +} + +static int __init its_setup_lpi_prop_table(void) +{ + if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) { + u64 val; + + val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER); + lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1; + + gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12); + gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa, + LPI_PROPBASE_SZ, + MEMREMAP_WB); + gic_reset_prop_table(gic_rdists->prop_table_va); + } else { + struct page *page; + + lpi_id_bits = min_t(u32, + GICD_TYPER_ID_BITS(gic_rdists->gicd_typer), + ITS_MAX_LPI_NRBITS); + page = its_allocate_prop_table(GFP_NOWAIT); + if (!page) { + pr_err("Failed to allocate PROPBASE\n"); + return -ENOMEM; + } + + gic_rdists->prop_table_pa = page_to_phys(page); + gic_rdists->prop_table_va = page_address(page); + WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa, + LPI_PROPBASE_SZ)); + } + + pr_info("GICv3: using LPI property table @%pa\n", + &gic_rdists->prop_table_pa); return its_lpi_init(lpi_id_bits); } @@ -1667,17 +1928,16 @@ static void its_write_baser(struct its_node *its, struct its_baser *baser, } static int its_setup_baser(struct its_node *its, struct its_baser *baser, - u64 cache, u64 shr, u32 psz, u32 order, - bool indirect) + u64 cache, u64 shr, u32 order, bool indirect) { u64 val = its_read_baser(its, baser); u64 esz = GITS_BASER_ENTRY_SIZE(val); u64 type = GITS_BASER_TYPE(val); u64 baser_phys, tmp; - u32 alloc_pages; + u32 alloc_pages, psz; void *base; -retry_alloc_baser: + psz = baser->psz; alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz); if (alloc_pages > GITS_BASER_PAGES_MAX) { pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n", @@ -1687,7 +1947,8 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser, order = get_order(GITS_BASER_PAGES_MAX * psz); } - base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); + base = (void *)page_address(alloc_pages_node(its->numa_node, + GFP_KERNEL | __GFP_ZERO, order)); if (!base) return -ENOMEM; @@ -1749,25 +2010,6 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser, goto retry_baser; } - if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) { - /* - * Page size didn't stick. Let's try a smaller - * size and retry. If we reach 4K, then - * something is horribly wrong... - */ - free_pages((unsigned long)base, order); - baser->base = NULL; - - switch (psz) { - case SZ_16K: - psz = SZ_4K; - goto retry_alloc_baser; - case SZ_64K: - psz = SZ_16K; - goto retry_alloc_baser; - } - } - if (val != tmp) { pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n", &its->phys_base, its_base_type_string[type], @@ -1793,13 +2035,14 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser, static bool its_parse_indirect_baser(struct its_node *its, struct its_baser *baser, - u32 psz, u32 *order, u32 ids) + u32 *order, u32 ids) { u64 tmp = its_read_baser(its, baser); u64 type = GITS_BASER_TYPE(tmp); u64 esz = GITS_BASER_ENTRY_SIZE(tmp); u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb; u32 new_order = *order; + u32 psz = baser->psz; bool indirect = false; /* No need to enable Indirection if memory requirement < (psz*2)bytes */ @@ -1858,11 +2101,58 @@ static void its_free_tables(struct its_node *its) } } +static int its_probe_baser_psz(struct its_node *its, struct its_baser *baser) +{ + u64 psz = SZ_64K; + + while (psz) { + u64 val, gpsz; + + val = its_read_baser(its, baser); + val &= ~GITS_BASER_PAGE_SIZE_MASK; + + switch (psz) { + case SZ_64K: + gpsz = GITS_BASER_PAGE_SIZE_64K; + break; + case SZ_16K: + gpsz = GITS_BASER_PAGE_SIZE_16K; + break; + case SZ_4K: + default: + gpsz = GITS_BASER_PAGE_SIZE_4K; + break; + } + + gpsz >>= GITS_BASER_PAGE_SIZE_SHIFT; + + val |= FIELD_PREP(GITS_BASER_PAGE_SIZE_MASK, gpsz); + its_write_baser(its, baser, val); + + if (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser->val) == gpsz) + break; + + switch (psz) { + case SZ_64K: + psz = SZ_16K; + break; + case SZ_16K: + psz = SZ_4K; + break; + case SZ_4K: + default: + return -1; + } + } + + baser->psz = psz; + return 0; +} + static int its_alloc_tables(struct its_node *its) { u64 shr = GITS_BASER_InnerShareable; u64 cache = GITS_BASER_RaWaWb; - u32 psz = SZ_64K; int err, i; if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) @@ -1873,32 +2163,42 @@ static int its_alloc_tables(struct its_node *its) struct its_baser *baser = its->tables + i; u64 val = its_read_baser(its, baser); u64 type = GITS_BASER_TYPE(val); - u32 order = get_order(psz); bool indirect = false; + u32 order; - switch (type) { - case GITS_BASER_TYPE_NONE: + if (type == GITS_BASER_TYPE_NONE) continue; + if (its_probe_baser_psz(its, baser)) { + its_free_tables(its); + return -ENXIO; + } + + order = get_order(baser->psz); + + switch (type) { case GITS_BASER_TYPE_DEVICE: - indirect = its_parse_indirect_baser(its, baser, - psz, &order, + indirect = its_parse_indirect_baser(its, baser, &order, its->device_ids); + break; + case GITS_BASER_TYPE_VCPU: - indirect = its_parse_indirect_baser(its, baser, - psz, &order, + indirect = its_parse_indirect_baser(its, baser, &order, ITS_MAX_VPEID_BITS); break; + case GITS_BASER_TYPE_COLLECTION: + indirect = its_parse_indirect_baser(its, baser, &order, + order_base_2(num_possible_cpus())); + break; } - err = its_setup_baser(its, baser, cache, shr, psz, order, indirect); + err = its_setup_baser(its, baser, cache, shr, order, indirect); if (err < 0) { its_free_tables(its); return err; } /* Update settings which will be used for next BASERn */ - psz = baser->psz; cache = baser->val & GITS_BASER_CACHEABILITY_MASK; shr = baser->val & GITS_BASER_SHAREABILITY_MASK; } @@ -1909,8 +2209,12 @@ static int its_alloc_tables(struct its_node *its) static int its_alloc_collections(struct its_node *its) { int i; + int cpu_nr = nr_cpu_ids; - its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections), + if (init_all_gicr) + cpu_nr = nr_gicr; + + its->collections = kcalloc(cpu_nr, sizeof(*its->collections), GFP_KERNEL); if (!its->collections) return -ENOMEM; @@ -1924,12 +2228,9 @@ static int its_alloc_collections(struct its_node *its) static struct page *its_allocate_pending_table(gfp_t gfp_flags) { struct page *pend_page; - /* - * The pending pages have to be at least 64kB aligned, - * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below. - */ + pend_page = alloc_pages(gfp_flags | __GFP_ZERO, - get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K))); + get_order(LPI_PENDBASE_SZ)); if (!pend_page) return NULL; @@ -1941,36 +2242,126 @@ static struct page *its_allocate_pending_table(gfp_t gfp_flags) static void its_free_pending_table(struct page *pt) { - free_pages((unsigned long)page_address(pt), - get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K))); + free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ)); +} + +/* + * Booting with kdump and LPIs enabled is generally fine. Any other + * case is wrong in the absence of firmware/EFI support. + */ +static bool enabled_lpis_allowed(void) +{ + phys_addr_t addr; + u64 val; + + /* Check whether the property table is in a reserved region */ + val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER); + addr = val & GENMASK_ULL(51, 12); + + return gic_check_reserved_range(addr, LPI_PROPBASE_SZ); +} + +static int __init allocate_lpi_tables(void) +{ + u64 val; + int err, cpu; + + /* + * If LPIs are enabled while we run this from the boot CPU, + * flag the RD tables as pre-allocated if the stars do align. + */ + val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR); + if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) { + gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED | + RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING); + pr_info("GICv3: Using preallocated redistributor tables\n"); + } + + err = its_setup_lpi_prop_table(); + if (err) + return err; + + /* + * We allocate all the pending tables anyway, as we may have a + * mix of RDs that have had LPIs enabled, and some that + * don't. We'll free the unused ones as each CPU comes online. + */ + for_each_possible_cpu(cpu) { + struct page *pend_page; + + pend_page = its_allocate_pending_table(GFP_NOWAIT); + if (!pend_page) { + pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu); + return -ENOMEM; + } + + gic_data_rdist_cpu(cpu)->pend_page = pend_page; + } + + return 0; +} + +static u64 its_clear_vpend_valid(void __iomem *vlpi_base) +{ + u32 count = 1000000; /* 1s! */ + bool clean; + u64 val; + + val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); + val &= ~GICR_VPENDBASER_Valid; + gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); + + do { + val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); + clean = !(val & GICR_VPENDBASER_Dirty); + if (!clean) { + count--; + cpu_relax(); + udelay(1); + } + } while (!clean && count); + + return val; } static void its_cpu_init_lpis(void) { void __iomem *rbase = gic_data_rdist_rd_base(); struct page *pend_page; + phys_addr_t paddr; u64 val, tmp; - /* If we didn't allocate the pending table yet, do it now */ - pend_page = gic_data_rdist()->pend_page; - if (!pend_page) { - phys_addr_t paddr; + if (gic_data_rdist()->lpi_enabled) + return; - pend_page = its_allocate_pending_table(GFP_NOWAIT); - if (!pend_page) { - pr_err("Failed to allocate PENDBASE for CPU%d\n", - smp_processor_id()); - return; - } + val = readl_relaxed(rbase + GICR_CTLR); + if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) && + (val & GICR_CTLR_ENABLE_LPIS)) { + /* + * Check that we get the same property table on all + * RDs. If we don't, this is hopeless. + */ + paddr = gicr_read_propbaser(rbase + GICR_PROPBASER); + paddr &= GENMASK_ULL(51, 12); + if (WARN_ON(gic_rdists->prop_table_pa != paddr)) + add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); + + paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER); + paddr &= GENMASK_ULL(51, 16); - paddr = page_to_phys(pend_page); - pr_info("CPU%d: using LPI pending table @%pa\n", - smp_processor_id(), &paddr); - gic_data_rdist()->pend_page = pend_page; + WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ)); + its_free_pending_table(gic_data_rdist()->pend_page); + gic_data_rdist()->pend_page = NULL; + + goto out; } + pend_page = gic_data_rdist()->pend_page; + paddr = page_to_phys(pend_page); + WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ)); + /* set PROPBASE */ - val = (page_to_phys(gic_rdists->prop_page) | + val = (gic_rdists->prop_table_pa | GICR_PROPBASER_InnerShareable | GICR_PROPBASER_RaWaWb | ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK)); @@ -2018,8 +2409,38 @@ static void its_cpu_init_lpis(void) val |= GICR_CTLR_ENABLE_LPIS; writel_relaxed(val, rbase + GICR_CTLR); + if (gic_rdists->has_vlpis) { + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + + /* + * It's possible for CPU to receive VLPIs before it is + * sheduled as a vPE, especially for the first CPU, and the + * VLPI with INTID larger than 2^(IDbits+1) will be considered + * as out of range and dropped by GIC. + * So we initialize IDbits to known value to avoid VLPI drop. + */ + val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; + pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n", + smp_processor_id(), val); + gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + + /* + * Also clear Valid bit of GICR_VPENDBASER, in case some + * ancient programming gets left in and has possibility of + * corrupting memory. + */ + val = its_clear_vpend_valid(vlpi_base); + WARN_ON(val & GICR_VPENDBASER_Dirty); + } + /* Make sure the GIC has seen the above */ dsb(sy); +out: + gic_data_rdist()->lpi_enabled = true; + pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n", + smp_processor_id(), + gic_data_rdist()->pend_page ? "allocated" : "reserved", + &paddr); } static void its_cpu_init_collection(struct its_node *its) @@ -2073,6 +2494,195 @@ static void its_cpu_init_collections(void) raw_spin_unlock(&its_lock); } +#ifdef CONFIG_ASCEND_INIT_ALL_GICR +void its_set_gicr_nr(int nr) +{ + nr_gicr = nr; +} + +int its_gicr_nr(void) +{ + return nr_gicr; +} + +void its_enable_init_all_gicr(void) +{ + init_all_gicr = true; +} + +bool its_init_all_gicr(void) +{ + return init_all_gicr; +} + +static void its_cpu_init_lpis_others(void __iomem *rbase, int cpu) +{ + struct page *pend_page; + phys_addr_t paddr; + u64 val, tmp; + + if (!init_all_gicr) + return; + + val = readl_relaxed(rbase + GICR_CTLR); + if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) && + (val & GICR_CTLR_ENABLE_LPIS)) { + /* + * Check that we get the same property table on all + * RDs. If we don't, this is hopeless. + */ + paddr = gicr_read_propbaser(rbase + GICR_PROPBASER); + paddr &= GENMASK_ULL(51, 12); + if (WARN_ON(gic_rdists->prop_table_pa != paddr)) + add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); + + paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER); + paddr &= GENMASK_ULL(51, 16); + + WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ)); + its_free_pending_table(gic_data_rdist()->pend_page); + gic_data_rdist()->pend_page = NULL; + + goto out; + } + + /* If we didn't allocate the pending table yet, do it now */ + pend_page = its_allocate_pending_table(GFP_NOWAIT); + if (!pend_page) { + pr_err("Failed to allocate PENDBASE for GICR:%p\n", rbase); + return; + } + + paddr = page_to_phys(pend_page); + pr_info("GICR:%p using LPI pending table @%pa\n", + rbase, &paddr); + + WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ)); + + /* Disable LPIs */ + val = readl_relaxed(rbase + GICR_CTLR); + val &= ~GICR_CTLR_ENABLE_LPIS; + writel_relaxed(val, rbase + GICR_CTLR); + + /* + * Make sure any change to the table is observable by the GIC. + */ + dsb(sy); + + /* set PROPBASE */ + val = (gic_rdists->prop_table_pa | + GICR_PROPBASER_InnerShareable | + GICR_PROPBASER_RaWaWb | + ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK)); + + gicr_write_propbaser(val, rbase + GICR_PROPBASER); + tmp = gicr_read_propbaser(rbase + GICR_PROPBASER); + + if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { + if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) { + /* + * The HW reports non-shareable, we must + * remove the cacheability attributes as + * well. + */ + val &= ~(GICR_PROPBASER_SHAREABILITY_MASK | + GICR_PROPBASER_CACHEABILITY_MASK); + val |= GICR_PROPBASER_nC; + gicr_write_propbaser(val, rbase + GICR_PROPBASER); + } + pr_info_once("GIC: using cache flushing for LPI property table\n"); + gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING; + } + + /* set PENDBASE */ + val = (page_to_phys(pend_page) | + GICR_PENDBASER_InnerShareable | + GICR_PENDBASER_RaWaWb); + + gicr_write_pendbaser(val, rbase + GICR_PENDBASER); + tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER); + + if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) { + /* + * The HW reports non-shareable, we must remove the + * cacheability attributes as well. + */ + val &= ~(GICR_PENDBASER_SHAREABILITY_MASK | + GICR_PENDBASER_CACHEABILITY_MASK); + val |= GICR_PENDBASER_nC; + gicr_write_pendbaser(val, rbase + GICR_PENDBASER); + } + + /* Enable LPIs */ + val = readl_relaxed(rbase + GICR_CTLR); + val |= GICR_CTLR_ENABLE_LPIS; + writel_relaxed(val, rbase + GICR_CTLR); + + /* Make sure the GIC has seen the above */ + dsb(sy); +out: + pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n", + cpu, pend_page ? "allocated" : "reserved", &paddr); +} + +static void its_cpu_init_collection_others(void __iomem *rbase, + phys_addr_t phys_base, int cpu) +{ + struct its_node *its; + + if (!init_all_gicr) + return; + + raw_spin_lock(&its_lock); + + list_for_each_entry(its, &its_nodes, entry) { + u64 target; + + /* + * We now have to bind each collection to its target + * redistributor. + */ + if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) { + /* + * This ITS wants the physical address of the + * redistributor. + */ + target = phys_base; + } else { + /* + * This ITS wants a linear CPU number. + */ + target = gic_read_typer(rbase + GICR_TYPER); + target = GICR_TYPER_CPU_NUMBER(target) << 16; + } + + /* Perform collection mapping */ + its->collections[cpu].target_address = target; + its->collections[cpu].col_id = cpu; + + its_send_mapc(its, &its->collections[cpu], 1); + its_send_invall(its, &its->collections[cpu]); + } + + raw_spin_unlock(&its_lock); +} + +int its_cpu_init_others(void __iomem *base, phys_addr_t phys_base, int cpu) +{ + if (!list_empty(&its_nodes)) { + if (!(gic_read_typer(base + GICR_TYPER) & GICR_TYPER_PLPIS)) { + pr_err("GICR:%p: LPIs not supported\n", base); + return -ENXIO; + } + + its_cpu_init_lpis_others(base, cpu); + its_cpu_init_collection_others(base, phys_base, cpu); + } + + return 0; +} +#endif + static struct its_device *its_find_device(struct its_node *its, u32 dev_id) { struct its_device *its_dev = NULL, *tmp; @@ -2104,7 +2714,8 @@ static struct its_baser *its_get_baser(struct its_node *its, u32 type) return NULL; } -static bool its_alloc_table_entry(struct its_baser *baser, u32 id) +static bool its_alloc_table_entry(struct its_node *its, + struct its_baser *baser, u32 id) { struct page *page; u32 esz, idx; @@ -2124,7 +2735,8 @@ static bool its_alloc_table_entry(struct its_baser *baser, u32 id) /* Allocate memory for 2nd level table */ if (!table[idx]) { - page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(baser->psz)); + page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, + get_order(baser->psz)); if (!page) return false; @@ -2155,7 +2767,7 @@ static bool its_alloc_device_table(struct its_node *its, u32 dev_id) if (!baser) return (ilog2(dev_id) < its->device_ids); - return its_alloc_table_entry(baser, dev_id); + return its_alloc_table_entry(its, baser, dev_id); } static bool its_alloc_vpe_table(u32 vpe_id) @@ -2172,14 +2784,14 @@ static bool its_alloc_vpe_table(u32 vpe_id) list_for_each_entry(its, &its_nodes, entry) { struct its_baser *baser; - if (!its->is_v4) + if (!is_v4(its)) continue; baser = its_get_baser(its, GITS_BASER_TYPE_VCPU); if (!baser) return false; - if (!its_alloc_table_entry(baser, vpe_id)) + if (!its_alloc_table_entry(its, baser, vpe_id)) return false; } @@ -2213,7 +2825,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, nr_ites = max(2, nvecs); sz = nr_ites * its->ite_size; sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; - itt = kzalloc(sz, GFP_KERNEL); + itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node); if (alloc_lpis) { lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis); if (lpi_map) @@ -2242,7 +2854,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, dev->event_map.col_map = col_map; dev->event_map.lpi_base = lpi_base; dev->event_map.nr_lpis = nr_lpis; - mutex_init(&dev->event_map.vlpi_lock); + raw_spin_lock_init(&dev->event_map.vlpi_lock); dev->device_id = dev_id; INIT_LIST_HEAD(&dev->entry); @@ -2267,13 +2879,14 @@ static void its_free_device(struct its_device *its_dev) kfree(its_dev); } -static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq) +static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq) { int idx; - idx = find_first_zero_bit(dev->event_map.lpi_map, - dev->event_map.nr_lpis); - if (idx == dev->event_map.nr_lpis) + idx = bitmap_find_free_region(dev->event_map.lpi_map, + dev->event_map.nr_lpis, + get_count_order(nvecs)); + if (idx < 0) return -ENOSPC; *hwirq = dev->event_map.lpi_base + idx; @@ -2289,6 +2902,7 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev, struct its_device *its_dev; struct msi_domain_info *msi_info; u32 dev_id; + int err = 0; /* * We ignore "dev" entierely, and rely on the dev_id that has @@ -2311,6 +2925,7 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev, return -EINVAL; } + mutex_lock(&its->dev_alloc_lock); its_dev = its_find_device(its, dev_id); if (its_dev) { /* @@ -2318,18 +2933,22 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev, * another alias (PCI bridge of some sort). No need to * create the device. */ + its_dev->shared = true; pr_debug("Reusing ITT for devID %x\n", dev_id); goto out; } its_dev = its_create_device(its, dev_id, nvec, true); - if (!its_dev) - return -ENOMEM; + if (!its_dev) { + err = -ENOMEM; + goto out; + } pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec)); out: + mutex_unlock(&its->dev_alloc_lock); info->scratchpad[0].ptr = its_dev; - return 0; + return err; } static struct msi_domain_ops its_msi_domain_ops = { @@ -2365,25 +2984,28 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, { msi_alloc_info_t *info = args; struct its_device *its_dev = info->scratchpad[0].ptr; + struct irq_data *irqd; irq_hw_number_t hwirq; int err; int i; - for (i = 0; i < nr_irqs; i++) { - err = its_alloc_device_irq(its_dev, &hwirq); - if (err) - return err; + err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq); + if (err) + return err; - err = its_irq_gic_domain_alloc(domain, virq + i, hwirq); + for (i = 0; i < nr_irqs; i++) { + err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i); if (err) return err; irq_domain_set_hwirq_and_chip(domain, virq + i, - hwirq, &its_irq_chip, its_dev); - irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i))); + hwirq + i, &its_irq_chip, its_dev); + irqd = irq_get_irq_data(virq + i); + irqd_set_single_target(irqd); + irqd_set_affinity_on_activate(irqd); pr_debug("ID:%d pID:%d vID:%d\n", - (int)(hwirq - its_dev->event_map.lpi_base), - (int) hwirq, virq + i); + (int)(hwirq + i - its_dev->event_map.lpi_base), + (int)(hwirq + i), virq + i); } return 0; @@ -2394,22 +3016,13 @@ static int its_irq_domain_activate(struct irq_domain *domain, { struct its_device *its_dev = irq_data_get_irq_chip_data(d); u32 event = its_get_event_id(d); - const struct cpumask *cpu_mask = cpu_online_mask; int cpu; - /* get the cpu_mask of local node */ - if (its_dev->its->numa_node >= 0) - cpu_mask = cpumask_of_node(its_dev->its->numa_node); - - /* Bind the LPI to the first possible CPU */ - cpu = cpumask_first_and(cpu_mask, cpu_online_mask); - if (cpu >= nr_cpu_ids) { - if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) - return -EINVAL; - - cpu = cpumask_first(cpu_online_mask); - } + cpu = its_select_cpu(d, cpu_online_mask); + if (cpu < 0 || cpu >= nr_cpu_ids) + return -EINVAL; + its_inc_lpi_count(d, cpu); its_dev->event_map.col_map[event] = cpu; irq_data_update_effective_affinity(d, cpumask_of(cpu)); @@ -2424,6 +3037,7 @@ static void its_irq_domain_deactivate(struct irq_domain *domain, struct its_device *its_dev = irq_data_get_irq_chip_data(d); u32 event = its_get_event_id(d); + its_dec_lpi_count(d, its_dev->event_map.col_map[event]); /* Stop the delivery of interrupts */ its_send_discard(its_dev, event); } @@ -2433,22 +3047,28 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, { struct irq_data *d = irq_domain_get_irq_data(domain, virq); struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_node *its = its_dev->its; int i; + bitmap_release_region(its_dev->event_map.lpi_map, + its_get_event_id(irq_domain_get_irq_data(domain, virq)), + get_count_order(nr_irqs)); + for (i = 0; i < nr_irqs; i++) { struct irq_data *data = irq_domain_get_irq_data(domain, virq + i); - u32 event = its_get_event_id(data); - - /* Mark interrupt index as unused */ - clear_bit(event, its_dev->event_map.lpi_map); - /* Nuke the entry in the domain */ irq_domain_reset_irq_data(data); } - /* If all interrupts have been freed, start mopping the floor */ - if (bitmap_empty(its_dev->event_map.lpi_map, + mutex_lock(&its->dev_alloc_lock); + + /* + * If all interrupts have been freed, start mopping the + * floor. This is conditionned on the device not being shared. + */ + if (!its_dev->shared && + bitmap_empty(its_dev->event_map.lpi_map, its_dev->event_map.nr_lpis)) { its_lpi_free(its_dev->event_map.lpi_map, its_dev->event_map.lpi_base, @@ -2460,6 +3080,8 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, its_free_device(its_dev); } + mutex_unlock(&its->dev_alloc_lock); + irq_domain_free_irqs_parent(domain, virq, nr_irqs); } @@ -2603,7 +3225,7 @@ static void its_vpe_schedule(struct its_vpe *vpe) val = virt_to_phys(page_address(vpe->vpt_page)) & GENMASK_ULL(51, 16); val |= GICR_VPENDBASER_RaWaWb; - val |= GICR_VPENDBASER_NonShareable; + val |= GICR_VPENDBASER_InnerShareable; /* * There is no good way of finding out if the pending table is * empty as we can race against the doorbell interrupt very @@ -2622,26 +3244,11 @@ static void its_vpe_schedule(struct its_vpe *vpe) static void its_vpe_deschedule(struct its_vpe *vpe) { void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); - u32 count = 1000000; /* 1s! */ - bool clean; u64 val; - /* We're being scheduled out */ - val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); - val &= ~GICR_VPENDBASER_Valid; - gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); + val = its_clear_vpend_valid(vlpi_base); - do { - val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); - clean = !(val & GICR_VPENDBASER_Dirty); - if (!clean) { - count--; - cpu_relax(); - udelay(1); - } - } while (!clean && count); - - if (unlikely(!clean && !count)) { + if (unlikely(val & GICR_VPENDBASER_Dirty)) { pr_err_ratelimited("ITS virtual pending table not cleaning\n"); vpe->idai = false; vpe->pending_last = true; @@ -2656,7 +3263,7 @@ static void its_vpe_invall(struct its_vpe *vpe) struct its_node *its; list_for_each_entry(its, &its_nodes, entry) { - if (!its->is_v4) + if (!is_v4(its)) continue; if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr]) @@ -2772,12 +3379,18 @@ static int its_vpe_set_irqchip_state(struct irq_data *d, return 0; } +static int its_vpe_retrigger(struct irq_data *d) +{ + return !its_vpe_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true); +} + static struct irq_chip its_vpe_irq_chip = { .name = "GICv4-vpe", .irq_mask = its_vpe_mask_irq, .irq_unmask = its_vpe_unmask_irq, .irq_eoi = irq_chip_eoi_parent, .irq_set_affinity = its_vpe_set_affinity, + .irq_retrigger = its_vpe_retrigger, .irq_set_irqchip_state = its_vpe_set_irqchip_state, .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity, }; @@ -2811,7 +3424,7 @@ static int its_vpe_init(struct its_vpe *vpe) if (!its_alloc_vpe_table(vpe_id)) { its_vpe_id_free(vpe_id); - its_free_pending_table(vpe->vpt_page); + its_free_pending_table(vpt_page); return -ENOMEM; } @@ -2864,8 +3477,6 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq struct page *vprop_page; int base, nr_ids, i, err = 0; - BUG_ON(!vm); - bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids); if (!bitmap) return -ENOMEM; @@ -2900,13 +3511,8 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq set_bit(i, bitmap); } - if (err) { - if (i > 0) - its_vpe_irq_domain_free(domain, virq, i - 1); - - its_lpi_free(bitmap, base, nr_ids); - its_free_prop_table(vprop_page); - } + if (err) + its_vpe_irq_domain_free(domain, virq, i); return err; } @@ -2925,7 +3531,7 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain, vpe->col_idx = cpumask_first(cpu_online_mask); list_for_each_entry(its, &its_nodes, entry) { - if (!its->is_v4) + if (!is_v4(its)) continue; its_send_vmapp(its, vpe, true); @@ -2951,7 +3557,7 @@ static void its_vpe_irq_domain_deactivate(struct irq_domain *domain, return; list_for_each_entry(its, &its_nodes, entry) { - if (!its->is_v4) + if (!is_v4(its)) continue; its_send_vmapp(its, vpe, false); @@ -3079,6 +3685,14 @@ static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data) return true; } +static bool __maybe_unused its_enable_quirk_hip09_162100801(void *data) +{ + struct its_node *its = data; + + its->flags |= ITS_FLAGS_WORKAROUND_HISILICON_162100801; + return true; +} + static const struct gic_quirk its_quirks[] = { #ifdef CONFIG_CAVIUM_ERRATUM_22375 { @@ -3124,6 +3738,14 @@ static const struct gic_quirk its_quirks[] = { .mask = 0xffffffff, .init = its_enable_quirk_hip07_161600802, }, +#endif +#ifdef CONFIG_HISILICON_ERRATUM_162100801 + { + .desc = "ITS: Hip09 erratum 162100801", + .iidr = 0x00051736, + .mask = 0xffffffff, + .init = its_enable_quirk_hip09_162100801, + }, #endif { } @@ -3145,9 +3767,6 @@ static int its_save_disable(void) list_for_each_entry(its, &its_nodes, entry) { void __iomem *base; - if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE)) - continue; - base = its->base; its->ctlr_save = readl_relaxed(base + GITS_CTLR); err = its_force_quiescent(base); @@ -3166,9 +3785,6 @@ static int its_save_disable(void) list_for_each_entry_continue_reverse(its, &its_nodes, entry) { void __iomem *base; - if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE)) - continue; - base = its->base; writel_relaxed(its->ctlr_save, base + GITS_CTLR); } @@ -3188,9 +3804,6 @@ static void its_restore_enable(void) void __iomem *base; int i; - if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE)) - continue; - base = its->base; /* @@ -3198,7 +3811,10 @@ static void its_restore_enable(void) * don't restore it since writing to CBASER or BASER * registers is undefined according to the GIC v3 ITS * Specification. + * + * Firmware resuming with the ITS enabled is terminally broken. */ + WARN_ON(readl_relaxed(base + GITS_CTLR) & GITS_CTLR_ENABLE); ret = its_force_quiescent(base); if (ret) { pr_err("ITS@%pa: failed to quiesce on resume: %d\n", @@ -3384,15 +4000,16 @@ static int __init its_probe_one(struct resource *res, } raw_spin_lock_init(&its->lock); + mutex_init(&its->dev_alloc_lock); INIT_LIST_HEAD(&its->entry); INIT_LIST_HEAD(&its->its_device_list); typer = gic_read_typer(its_base + GITS_TYPER); + its->typer = typer; its->base = its_base; its->phys_base = res->start; its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer); its->device_ids = GITS_TYPER_DEVBITS(typer); - its->is_v4 = !!(typer & GITS_TYPER_VLPIS); - if (its->is_v4) { + if (is_v4(its)) { if (!(typer & GITS_TYPER_VMOVP)) { err = its_compute_its_list_map(res, its_base); if (err < 0) @@ -3409,8 +4026,9 @@ static int __init its_probe_one(struct resource *res, its->numa_node = numa_node; - its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, - get_order(ITS_CMD_QUEUE_SZ)); + its->cmd_base = (void *)page_address(alloc_pages_node(its->numa_node, + GFP_KERNEL | __GFP_ZERO, + get_order(ITS_CMD_QUEUE_SZ))); if (!its->cmd_base) { err = -ENOMEM; goto out_free_its; @@ -3458,13 +4076,10 @@ static int __init its_probe_one(struct resource *res, gits_write_cwriter(0, its->base + GITS_CWRITER); ctlr = readl_relaxed(its->base + GITS_CTLR); ctlr |= GITS_CTLR_ENABLE; - if (its->is_v4) + if (is_v4(its)) ctlr |= GITS_CTLR_ImDe; writel_relaxed(ctlr, its->base + GITS_CTLR); - if (GITS_TYPER_HCC(typer)) - its->flags |= ITS_FLAGS_SAVE_SUSPEND_STATE; - err = its_init_domain(handle, its); if (err) goto out_free_tables; @@ -3498,16 +4113,6 @@ static int redist_disable_lpis(void) u64 timeout = USEC_PER_SEC; u64 val; - /* - * If coming via a CPU hotplug event, we don't need to disable - * LPIs before trying to re-enable them. They are already - * configured and all is well in the world. Detect this case - * by checking the allocation of the pending table for the - * current CPU. - */ - if (gic_data_rdist()->pend_page) - return 0; - if (!gic_rdists_supports_plpis()) { pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); return -ENXIO; @@ -3517,7 +4122,21 @@ static int redist_disable_lpis(void) if (!(val & GICR_CTLR_ENABLE_LPIS)) return 0; - pr_warn("CPU%d: Booted with LPIs enabled, memory probably corrupted\n", + /* + * If coming via a CPU hotplug event, we don't need to disable + * LPIs before trying to re-enable them. They are already + * configured and all is well in the world. + * + * If running with preallocated tables, there is nothing to do. + */ + if (gic_data_rdist()->lpi_enabled || + (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED)) + return 0; + + /* + * From that point on, we only try to do some damage control. + */ + pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n", smp_processor_id()); add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); @@ -3628,13 +4247,13 @@ static int __init acpi_get_its_numa_node(u32 its_id) return NUMA_NO_NODE; } -static int __init gic_acpi_match_srat_its(struct acpi_subtable_header *header, +static int __init gic_acpi_match_srat_its(union acpi_subtable_headers *header, const unsigned long end) { return 0; } -static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header, +static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *header, const unsigned long end) { int node; @@ -3701,7 +4320,7 @@ static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; } static void __init acpi_its_srat_maps_free(void) { } #endif -static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header, +static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_generic_translator *its_entry; @@ -3773,12 +4392,13 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, } gic_rdists = rdists; - err = its_alloc_lpi_tables(); + + err = allocate_lpi_tables(); if (err) return err; list_for_each_entry(its, &its_nodes, entry) - has_v4 |= its->is_v4; + has_v4 |= is_v4(its); if (has_v4 & rdists->has_vlpis) { if (its_init_vpe_domain() || diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c index ad70e7c416e3014ca95d76e7e058cdec986c17ab..9d011281d4b5c98b3b72aa41fec30aa3b2a0d5de 100644 --- a/drivers/irqchip/irq-gic-v3-mbi.c +++ b/drivers/irqchip/irq-gic-v3-mbi.c @@ -24,7 +24,7 @@ struct mbi_range { unsigned long *bm; }; -static struct mutex mbi_lock; +static DEFINE_MUTEX(mbi_lock); static phys_addr_t mbi_phys_base; static struct mbi_range *mbi_ranges; static unsigned int mbi_range_nr; @@ -297,7 +297,7 @@ int __init mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent) reg = of_get_property(np, "mbi-alias", NULL); if (reg) { mbi_phys_base = of_translate_address(np, reg); - if (mbi_phys_base == OF_BAD_ADDR) { + if (mbi_phys_base == (phys_addr_t)OF_BAD_ADDR) { ret = -ENXIO; goto err_free_mbi; } diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index d5912f1ec8848d19ff93161102d6690eb7bb43d2..2ddc3674387588adaba7852eac9c3c1011f1576b 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -41,6 +42,8 @@ #include "irq-gic-common.h" +#define GICD_INT_NMI_PRI (GICD_INT_DEF_PRI & ~0x80) + struct redist_region { void __iomem *redist_base; phys_addr_t phys_base; @@ -63,6 +66,43 @@ struct gic_chip_data { static struct gic_chip_data gic_data __read_mostly; static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); +/* + * The behaviours of RPR and PMR registers differ depending on the value of + * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the + * distributor and redistributors depends on whether security is enabled in the + * GIC. + * + * When security is enabled, non-secure priority values from the (re)distributor + * are presented to the GIC CPUIF as follow: + * (GIC_(R)DIST_PRI[irq] >> 1) | 0x80; + * + * If SCR_EL3.FIQ == 1, the values writen to/read from PMR and RPR at non-secure + * EL1 are subject to a similar operation thus matching the priorities presented + * from the (re)distributor when security is enabled. + * + * see GICv3/GICv4 Architecture Specification (IHI0069D): + * - section 4.8.1 Non-secure accesses to register fields for Secure interrupt + * priorities. + * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1 + * interrupt. + * + * For now, we only support pseudo-NMIs if we have non-secure view of + * priorities. + */ +static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis); + +/* + * Global static key controlling whether an update to PMR allowing more + * interrupts requires to be propagated to the redistributor (DSB SY). + * And this needs to be exported for modules to be able to enable + * interrupts... + */ +DEFINE_STATIC_KEY_FALSE(gic_pmr_sync); +EXPORT_SYMBOL(gic_pmr_sync); + +/* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */ +static refcount_t ppi_nmi_refs[16]; + static struct gic_kvm_info gic_v3_kvm_info; static DEFINE_PER_CPU(bool, has_rss); @@ -95,11 +135,11 @@ static inline void __iomem *gic_dist_base(struct irq_data *d) return NULL; } -static void gic_do_wait_for_rwp(void __iomem *base) +static void gic_do_wait_for_rwp(void __iomem *base, u32 bit) { u32 count = 1000000; /* 1s! */ - while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) { + while (readl_relaxed(base + GICD_CTLR) & bit) { count--; if (!count) { pr_err_ratelimited("RWP timeout, gone fishing\n"); @@ -113,13 +153,13 @@ static void gic_do_wait_for_rwp(void __iomem *base) /* Wait for completion of a distributor change */ static void gic_dist_wait_for_rwp(void) { - gic_do_wait_for_rwp(gic_data.dist_base); + gic_do_wait_for_rwp(gic_data.dist_base, GICD_CTLR_RWP); } /* Wait for completion of a redistributor change */ static void gic_redist_wait_for_rwp(void) { - gic_do_wait_for_rwp(gic_data_rdist_rd_base()); + gic_do_wait_for_rwp(gic_data_rdist_rd_base(), GICR_CTLR_RWP); } #ifdef CONFIG_ARM64 @@ -226,6 +266,17 @@ static void gic_unmask_irq(struct irq_data *d) gic_poke_irq(d, GICD_ISENABLER); } +static inline bool gic_supports_nmi(void) +{ + return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && + static_branch_likely(&supports_pseudo_nmis); +} + +bool gic_supports_pseudo_nmis(void) +{ + return gic_supports_nmi(); +} + static int gic_irq_set_irqchip_state(struct irq_data *d, enum irqchip_irq_state which, bool val) { @@ -252,6 +303,13 @@ static int gic_irq_set_irqchip_state(struct irq_data *d, } gic_poke_irq(d, reg); + + /* + * Force read-back to guarantee that the active state has taken + * effect, and won't race with a guest-driven deactivation. + */ + if (reg == GICD_ISACTIVER) + gic_peek_irq(d, reg); return 0; } @@ -281,6 +339,79 @@ static int gic_irq_get_irqchip_state(struct irq_data *d, return 0; } +static void gic_irq_set_prio(struct irq_data *d, u8 prio) +{ + void __iomem *base = gic_dist_base(d); + + writeb_relaxed(prio, base + GICD_IPRIORITYR + gic_irq(d)); +} + +static int gic_irq_nmi_setup(struct irq_data *d) +{ + struct irq_desc *desc = irq_to_desc(d->irq); + + if (!gic_supports_nmi()) + return -EINVAL; + + if (gic_peek_irq(d, GICD_ISENABLER)) { + pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq); + return -EINVAL; + } + + /* + * A secondary irq_chip should be in charge of LPI request, + * it should not be possible to get there + */ + if (WARN_ON(gic_irq(d) >= 8192)) + return -EINVAL; + + /* desc lock should already be held */ + if (gic_irq(d) < 32) { + /* Setting up PPI as NMI, only switch handler for first NMI */ + if (!refcount_inc_not_zero(&ppi_nmi_refs[gic_irq(d) - 16])) { + refcount_set(&ppi_nmi_refs[gic_irq(d) - 16], 1); + desc->handle_irq = handle_percpu_devid_fasteoi_nmi; + } + } else { + desc->handle_irq = handle_fasteoi_nmi; + } + + gic_irq_set_prio(d, GICD_INT_NMI_PRI); + + return 0; +} + +static void gic_irq_nmi_teardown(struct irq_data *d) +{ + struct irq_desc *desc = irq_to_desc(d->irq); + + if (WARN_ON(!gic_supports_nmi())) + return; + + if (gic_peek_irq(d, GICD_ISENABLER)) { + pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq); + return; + } + + /* + * A secondary irq_chip should be in charge of LPI request, + * it should not be possible to get there + */ + if (WARN_ON(gic_irq(d) >= 8192)) + return; + + /* desc lock should already be held */ + if (gic_irq(d) < 32) { + /* Tearing down NMI, only switch handler for last NMI */ + if (refcount_dec_and_test(&ppi_nmi_refs[gic_irq(d) - 16])) + desc->handle_irq = handle_percpu_devid_irq; + } else { + desc->handle_irq = handle_fasteoi_irq; + } + + gic_irq_set_prio(d, GICD_INT_DEF_PRI); +} + static void gic_eoi_irq(struct irq_data *d) { gic_write_eoir(gic_irq(d)); @@ -344,52 +475,144 @@ static u64 gic_mpidr_to_affinity(unsigned long mpidr) return aff; } +static void gic_deactivate_unhandled(u32 irqnr) +{ + if (static_branch_likely(&supports_deactivate_key)) { + if (irqnr < 8192) + gic_write_dir(irqnr); + } else { + gic_write_eoir(irqnr); + } +} + +static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs) +{ + bool irqs_enabled = interrupts_enabled(regs); + int err; + + if (unlikely(irqnr < 16)) { + gic_write_eoir(irqnr); + if (static_branch_likely(&supports_deactivate_key)) + gic_write_dir(irqnr); +#ifdef CONFIG_SMP + handle_IPI(irqnr, regs); +#endif + return; + } + + if (irqs_enabled) + nmi_enter(); + + if (static_branch_likely(&supports_deactivate_key)) + gic_write_eoir(irqnr); + else + isb(); + + /* + * Leave the PSR.I bit set to prevent other NMIs to be + * received while handling this one. + * PSR.I will be restored when we ERET to the + * interrupted context. + */ + err = handle_domain_nmi(gic_data.domain, irqnr, regs); + if (err) + gic_deactivate_unhandled(irqnr); + + if (irqs_enabled) + nmi_exit(); +} + static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) { u32 irqnr; - do { - irqnr = gic_read_iar(); - - if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) { - int err; - - if (static_branch_likely(&supports_deactivate_key)) - gic_write_eoir(irqnr); - else - isb(); - - err = handle_domain_irq(gic_data.domain, irqnr, regs); - if (err) { - WARN_ONCE(true, "Unexpected interrupt received!\n"); - if (static_branch_likely(&supports_deactivate_key)) { - if (irqnr < 8192) - gic_write_dir(irqnr); - } else { - gic_write_eoir(irqnr); - } - } - continue; - } - if (irqnr < 16) { + irqnr = gic_read_iar(); + + /* Check for special IDs first */ + if ((irqnr >= 1020 && irqnr <= 1023)) + return; + + if (gic_supports_nmi() && + unlikely(gic_read_rpr() == GICD_INT_NMI_PRI)) { + gic_handle_nmi(irqnr, regs); + return; + } + + if (gic_prio_masking_enabled()) { + gic_pmr_mask_irqs(); + gic_arch_enable_irqs(); + } + + if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) { + int err; + + if (static_branch_likely(&supports_deactivate_key)) gic_write_eoir(irqnr); - if (static_branch_likely(&supports_deactivate_key)) - gic_write_dir(irqnr); + else + isb(); + + err = handle_domain_irq(gic_data.domain, irqnr, regs); + if (err) { + WARN_ONCE(true, "Unexpected interrupt received!\n"); + gic_deactivate_unhandled(irqnr); + } + return; + } + if (irqnr < 16) { + gic_write_eoir(irqnr); + if (static_branch_likely(&supports_deactivate_key)) + gic_write_dir(irqnr); #ifdef CONFIG_SMP - /* - * Unlike GICv2, we don't need an smp_rmb() here. - * The control dependency from gic_read_iar to - * the ISB in gic_write_eoir is enough to ensure - * that any shared data read by handle_IPI will - * be read after the ACK. - */ - handle_IPI(irqnr, regs); + /* + * Unlike GICv2, we don't need an smp_rmb() here. + * The control dependency from gic_read_iar to + * the ISB in gic_write_eoir is enough to ensure + * that any shared data read by handle_IPI will + * be read after the ACK. + */ + handle_IPI(irqnr, regs); #else - WARN_ONCE(true, "Unexpected SGI received!\n"); + WARN_ONCE(true, "Unexpected SGI received!\n"); #endif - continue; - } - } while (irqnr != ICC_IAR1_EL1_SPURIOUS); + } +} + +static u32 gic_get_pribits(void) +{ + u32 pribits; + + pribits = gic_read_ctlr(); + pribits &= ICC_CTLR_EL1_PRI_BITS_MASK; + pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT; + pribits++; + + return pribits; +} + +static bool gic_has_group0(void) +{ + u32 val; + u32 old_pmr; + + old_pmr = gic_read_pmr(); + + /* + * Let's find out if Group0 is under control of EL3 or not by + * setting the highest possible, non-zero priority in PMR. + * + * If SCR_EL3.FIQ is set, the priority gets shifted down in + * order for the CPU interface to set bit 7, and keep the + * actual priority in the non-secure range. In the process, it + * looses the least significant bit and the actual priority + * becomes 0x80. Reading it back returns 0, indicating that + * we're don't have access to Group0. + */ + gic_write_pmr(BIT(8 - gic_get_pribits())); + val = gic_read_pmr(); + + gic_write_pmr(old_pmr); + + return val != 0; } static void __init gic_dist_init(void) @@ -527,13 +750,19 @@ static void gic_update_vlpi_properties(void) !gic_data.rdists.has_direct_lpi ? "no " : ""); } +/* Check whether it's single security state view */ +static inline bool gic_dist_security_disabled(void) +{ + return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS; +} + static void gic_cpu_sys_reg_init(void) { int i, cpu = smp_processor_id(); u64 mpidr = cpu_logical_map(cpu); u64 need_rss = MPIDR_RS(mpidr); bool group0; - u32 val, pribits; + u32 pribits; /* * Need to check that the SRE bit has actually been set. If @@ -545,28 +774,22 @@ static void gic_cpu_sys_reg_init(void) if (!gic_enable_sre()) pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n"); - pribits = gic_read_ctlr(); - pribits &= ICC_CTLR_EL1_PRI_BITS_MASK; - pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT; - pribits++; + pribits = gic_get_pribits(); - /* - * Let's find out if Group0 is under control of EL3 or not by - * setting the highest possible, non-zero priority in PMR. - * - * If SCR_EL3.FIQ is set, the priority gets shifted down in - * order for the CPU interface to set bit 7, and keep the - * actual priority in the non-secure range. In the process, it - * looses the least significant bit and the actual priority - * becomes 0x80. Reading it back returns 0, indicating that - * we're don't have access to Group0. - */ - write_gicreg(BIT(8 - pribits), ICC_PMR_EL1); - val = read_gicreg(ICC_PMR_EL1); - group0 = val != 0; + group0 = gic_has_group0(); /* Set priority mask register */ - write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1); + if (!gic_prio_masking_enabled()) { + write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1); + } else { + /* + * Mismatch configuration with boot CPU, the system is likely + * to die as interrupt masking will not work properly on all + * CPUs + */ + WARN_ON(gic_supports_nmi() && group0 && + !gic_dist_security_disabled()); + } /* * Some firmwares hand over to the kernel with the BPR changed from @@ -653,7 +876,9 @@ early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg); static int gic_dist_supports_lpis(void) { - return !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) && !gicv3_nolpi; + return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && + !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) && + !gicv3_nolpi); } static void gic_cpu_init(void) @@ -673,14 +898,225 @@ static void gic_cpu_init(void) gic_cpu_config(rbase, gic_redist_wait_for_rwp); - /* Give LPIs a spin */ - if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis()) - its_cpu_init(); +#ifdef CONFIG_ARM64_PSEUDO_NMI + if (gic_supports_nmi()) + ipi_set_nmi_prio(rbase, GICD_INT_NMI_PRI); +#endif /* initialise system registers */ gic_cpu_sys_reg_init(); } +#ifdef CONFIG_ASCEND_INIT_ALL_GICR +struct workaround_oem_info { + char oem_id[ACPI_OEM_ID_SIZE + 1]; + char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; + u32 oem_revision; +}; + +static struct workaround_oem_info gicr_wkrd_info[] = { + { + .oem_id = "HISI ", + .oem_table_id = "HIP08 ", + .oem_revision = 0x300, + }, { + .oem_id = "HISI ", + .oem_table_id = "HIP08 ", + .oem_revision = 0x301, + }, { + .oem_id = "HISI ", + .oem_table_id = "HIP08 ", + .oem_revision = 0x400, + }, { + .oem_id = "HISI ", + .oem_table_id = "HIP08 ", + .oem_revision = 0x401, + }, { + .oem_id = "HISI ", + .oem_table_id = "HIP08 ", + .oem_revision = 0x402, + } +}; + +static void gic_check_hisi_workaround(struct fwnode_handle *handle) +{ + struct acpi_table_header *tbl; + acpi_status status = AE_OK; + struct device_node *node = to_of_node(handle); + int i; + + if ((node != NULL) && of_property_read_bool(node, "enable-init-all-gicr")) { + its_enable_init_all_gicr(); + return; + } + + status = acpi_get_table(ACPI_SIG_MADT, 0, &tbl); + if (ACPI_FAILURE(status) || !tbl) + return; + + for (i = 0; i < ARRAY_SIZE(gicr_wkrd_info); i++) { + if (!memcmp(gicr_wkrd_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) && + !memcmp(gicr_wkrd_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && + gicr_wkrd_info[i].oem_revision == tbl->oem_revision) { + its_enable_init_all_gicr(); + break; + } + } + + acpi_put_table(tbl); +} + +static void gic_compute_nr_gicr(void) +{ + int i; + int sum = 0; + + for (i = 0; i < gic_data.nr_redist_regions; i++) { + u64 typer; + void __iomem *ptr = gic_data.redist_regions[i].redist_base; + + do { + typer = gic_read_typer(ptr + GICR_TYPER); + sum++; + + if (gic_data.redist_regions[i].single_redist) + break; + + if (gic_data.redist_stride) { + ptr += gic_data.redist_stride; + } else { + ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */ + if (typer & GICR_TYPER_VLPIS) + /* Skip VLPI_base + reserved page */ + ptr += SZ_64K * 2; + } + } while (!(typer & GICR_TYPER_LAST)); + } + + its_set_gicr_nr(sum); +} + +static void gic_enable_redist_others(void __iomem *rbase, bool enable) +{ + u32 count = 1000000; /* 1s! */ + u32 val; + + val = readl_relaxed(rbase + GICR_WAKER); + if (enable) + /* Wake up this CPU redistributor */ + val &= ~GICR_WAKER_ProcessorSleep; + else + val |= GICR_WAKER_ProcessorSleep; + writel_relaxed(val, rbase + GICR_WAKER); + + if (!enable) { /* Check that GICR_WAKER is writeable */ + val = readl_relaxed(rbase + GICR_WAKER); + if (!(val & GICR_WAKER_ProcessorSleep)) + return; /* No PM support in this redistributor */ + } + + while (--count) { + val = readl_relaxed(rbase + GICR_WAKER); + if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep)) + break; + cpu_relax(); + udelay(1); + }; + if (!count) + pr_err_ratelimited("redistributor failed to %s...\n", + enable ? "wakeup" : "sleep"); +} + +static int gic_rdist_cpu(void __iomem *ptr, unsigned int cpu) +{ + unsigned long mpidr = cpu_logical_map(cpu); + u64 typer; + u32 aff; + + /* + * Convert affinity to a 32bit value that can be matched to + * GICR_TYPER bits [63:32]. + */ + aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 | + MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | + MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | + MPIDR_AFFINITY_LEVEL(mpidr, 0)); + + typer = gic_read_typer(ptr + GICR_TYPER); + if ((typer >> 32) == aff) + return 0; + + return 1; +} + +static int gic_rdist_cpus(void __iomem *ptr) +{ + unsigned int i; + + for (i = 0; i < nr_cpu_ids; i++) { + if (gic_rdist_cpu(ptr, i) == 0) + return 0; + } + + return 1; +} + +static void gic_cpu_init_others(void) +{ + int i, cpu = nr_cpu_ids; + int gicr_nr = its_gicr_nr(); + + if (!its_init_all_gicr()) + return; + + for (i = 0; i < gic_data.nr_redist_regions; i++) { + u64 typer; + void __iomem *redist_base = + gic_data.redist_regions[i].redist_base; + phys_addr_t phys_base = gic_data.redist_regions[i].phys_base; + + do { + typer = gic_read_typer(redist_base + GICR_TYPER); + + if (gic_rdist_cpus(redist_base) == 1) { + if (cpu >= gicr_nr) { + pr_err("CPU over GICR number.\n"); + break; + } + gic_enable_redist_others(redist_base, true); + + if (gic_dist_supports_lpis()) + its_cpu_init_others(redist_base, phys_base, cpu); + cpu++; + } + + if (gic_data.redist_regions[i].single_redist) + break; + + if (gic_data.redist_stride) { + redist_base += gic_data.redist_stride; + phys_base += gic_data.redist_stride; + } else { + /* Skip RD_base + SGI_base */ + redist_base += SZ_64K * 2; + phys_base += SZ_64K * 2; + if (typer & GICR_TYPER_VLPIS) { + /* Skip VLPI_base + reserved page */ + redist_base += SZ_64K * 2; + phys_base += SZ_64K * 2; + } + } + } while (!(typer & GICR_TYPER_LAST)); + } +} +#else +#define gic_check_hisi_workaround(x) + +#define gic_compute_nr_gicr() + +#define gic_cpu_init_others() +#endif + #ifdef CONFIG_SMP #define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT) @@ -689,6 +1125,10 @@ static void gic_cpu_init(void) static int gic_starting_cpu(unsigned int cpu) { gic_cpu_init(); + + if (gic_dist_supports_lpis()) + its_cpu_init(); + return 0; } @@ -818,13 +1258,12 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, #define gic_smp_init() do { } while(0) #endif -#ifdef CONFIG_CPU_PM -/* Check whether it's single security state view */ -static bool gic_dist_security_disabled(void) +static int gic_retrigger(struct irq_data *data) { - return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS; + return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true); } +#ifdef CONFIG_CPU_PM static int gic_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd, void *v) { @@ -859,8 +1298,11 @@ static struct irq_chip gic_chip = { .irq_eoi = gic_eoi_irq, .irq_set_type = gic_set_type, .irq_set_affinity = gic_set_affinity, + .irq_retrigger = gic_retrigger, .irq_get_irqchip_state = gic_irq_get_irqchip_state, .irq_set_irqchip_state = gic_irq_set_irqchip_state, + .irq_nmi_setup = gic_irq_nmi_setup, + .irq_nmi_teardown = gic_irq_nmi_teardown, .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, @@ -873,9 +1315,12 @@ static struct irq_chip gic_eoimode1_chip = { .irq_eoi = gic_eoimode1_eoi_irq, .irq_set_type = gic_set_type, .irq_set_affinity = gic_set_affinity, + .irq_retrigger = gic_retrigger, .irq_get_irqchip_state = gic_irq_get_irqchip_state, .irq_set_irqchip_state = gic_irq_set_irqchip_state, .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity, + .irq_nmi_setup = gic_irq_nmi_setup, + .irq_nmi_teardown = gic_irq_nmi_teardown, .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, @@ -887,6 +1332,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { struct irq_chip *chip = &gic_chip; + struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq)); if (static_branch_likely(&supports_deactivate_key)) chip = &gic_eoimode1_chip; @@ -913,16 +1359,18 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_domain_set_info(d, irq, hw, chip, d->host_data, handle_fasteoi_irq, NULL, NULL); irq_set_probe(irq); - irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq))); + irqd_set_single_target(irqd); } /* LPIs */ if (hw >= 8192 && hw < GIC_ID_NR) { if (!gic_dist_supports_lpis()) return -EPERM; irq_domain_set_info(d, irq, hw, chip, d->host_data, - handle_fasteoi_irq, NULL, NULL); + handle_fasteoi_edge_irq, NULL, NULL); } + /* Prevents SW retriggers which mess up the ACK/EOI ordering */ + irqd_set_handle_enforce_irqctx(irqd); return 0; } @@ -1068,6 +1516,32 @@ static const struct irq_domain_ops partition_domain_ops = { .select = gic_irq_domain_select, }; +static void gic_enable_nmi_support(void) +{ + int i; + + for (i = 0; i < 16; i++) + refcount_set(&ppi_nmi_refs[i], 0); + + /* + * Linux itself doesn't use 1:N distribution, so has no need to + * set PMHE. The only reason to have it set is if EL3 requires it + * (and we can't change it). + */ + if (gic_read_ctlr() & ICC_CTLR_EL1_PMHE_MASK) + static_branch_enable(&gic_pmr_sync); + + pr_info("%s ICC_PMR_EL1 synchronisation\n", + static_branch_unlikely(&gic_pmr_sync) ? "Forcing" : "Relaxing"); + + static_branch_enable(&supports_pseudo_nmis); + + if (static_branch_likely(&supports_deactivate_key)) + gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI; + else + gic_chip.flags |= IRQCHIP_SUPPORTS_NMI; +} + static int __init gic_init_bases(void __iomem *dist_base, struct redist_region *rdist_regs, u32 nr_redist_regions, @@ -1107,6 +1581,8 @@ static int __init gic_init_bases(void __iomem *dist_base, gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); gic_data.rdists.has_vlpis = true; gic_data.rdists.has_direct_lpi = true; + gic_check_hisi_workaround(handle); + gic_compute_nr_gicr(); if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) { err = -ENOMEM; @@ -1127,14 +1603,29 @@ static int __init gic_init_bases(void __iomem *dist_base, gic_update_vlpi_properties(); - if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis()) - its_init(handle, &gic_data.rdists, gic_data.domain); + /* + * NMI backtrace DFX need check nmi support, this should be + * called before enable NMI backtrace DFX. + */ + if (gic_prio_masking_enabled()) { + if (!gic_has_group0() || gic_dist_security_disabled()) + gic_enable_nmi_support(); + else + pr_warn("SCR_EL3.FIQ is cleared, cannot enable use of pseudo-NMIs\n"); + } gic_smp_init(); gic_dist_init(); gic_cpu_init(); gic_cpu_pm_init(); + if (gic_dist_supports_lpis()) { + its_init(handle, &gic_data.rdists, gic_data.domain); + its_cpu_init(); + } + + gic_cpu_init_others(); + return 0; out_free: @@ -1205,12 +1696,15 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node) continue; cpu = of_cpu_node_to_id(cpu_node); - if (WARN_ON(cpu < 0)) + if (WARN_ON(cpu < 0)) { + of_node_put(cpu_node); continue; + } pr_cont("%pOF[%d] ", cpu_node, cpu); cpumask_set_cpu(cpu, &part->mask); + of_node_put(cpu_node); } pr_cont("}\n"); @@ -1347,6 +1841,7 @@ static struct struct redist_region *redist_regs; u32 nr_redist_regions; bool single_redist; + int enabled_rdists; u32 maint_irq; int maint_irq_mode; phys_addr_t vcpu_base; @@ -1364,7 +1859,7 @@ gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base) } static int __init -gic_acpi_parse_madt_redist(struct acpi_subtable_header *header, +gic_acpi_parse_madt_redist(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_generic_redistributor *redist = @@ -1382,7 +1877,7 @@ gic_acpi_parse_madt_redist(struct acpi_subtable_header *header, } static int __init -gic_acpi_parse_madt_gicc(struct acpi_subtable_header *header, +gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_generic_interrupt *gicc = @@ -1424,14 +1919,14 @@ static int __init gic_acpi_collect_gicr_base(void) return -ENODEV; } -static int __init gic_acpi_match_gicr(struct acpi_subtable_header *header, +static int __init gic_acpi_match_gicr(union acpi_subtable_headers *header, const unsigned long end) { /* Subtable presence means that redist exists, that's it */ return 0; } -static int __init gic_acpi_match_gicc(struct acpi_subtable_header *header, +static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_generic_interrupt *gicc = @@ -1441,8 +1936,10 @@ static int __init gic_acpi_match_gicc(struct acpi_subtable_header *header, * If GICC is enabled and has valid gicr base address, then it means * GICR base is presented via GICC */ - if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) + if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) { + acpi_data.enabled_rdists++; return 0; + } /* * It's perfectly valid firmware can pass disabled GICC entry, driver @@ -1472,8 +1969,10 @@ static int __init gic_acpi_count_gicr_regions(void) count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, gic_acpi_match_gicc, 0); - if (count > 0) + if (count > 0) { acpi_data.single_redist = true; + count = acpi_data.enabled_rdists; + } return count; } @@ -1497,7 +1996,7 @@ static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header, return true; } -static int __init gic_acpi_parse_virt_madt_gicc(struct acpi_subtable_header *header, +static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_generic_interrupt *gicc = diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index ced10c44b68a88d2024fe052300021ac7c5486e8..71e20e9f31501a16f3b4565fbba04588bbe4cd44 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -350,6 +350,11 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, } #endif +static int gic_retrigger(struct irq_data *data) +{ + return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true); +} + static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) { u32 irqstat, irqnr; @@ -420,6 +425,7 @@ static const struct irq_chip gic_chip = { .irq_unmask = gic_unmask_irq, .irq_eoi = gic_eoi_irq, .irq_set_type = gic_set_type, + .irq_retrigger = gic_retrigger, .irq_get_irqchip_state = gic_irq_get_irqchip_state, .irq_set_irqchip_state = gic_irq_set_irqchip_state, .flags = IRQCHIP_SET_TYPE_MASKED | @@ -972,6 +978,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { struct gic_chip_data *gic = d->host_data; + struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq)); if (hw < 32) { irq_set_percpu_devid(irq); @@ -982,8 +989,11 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data, handle_fasteoi_irq, NULL, NULL); irq_set_probe(irq); - irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq))); + irqd_set_single_target(irqd); } + + /* Prevents SW retriggers which mess up the ACK/EOI ordering */ + irqd_set_handle_enforce_irqctx(irqd); return 0; } @@ -1508,7 +1518,7 @@ static struct } acpi_data __initdata; static int __init -gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header, +gic_acpi_parse_madt_cpu(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_generic_interrupt *processor; @@ -1540,7 +1550,7 @@ gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header, } /* The things you have to do to just *count* something... */ -static int __init acpi_dummy_func(struct acpi_subtable_header *header, +static int __init acpi_dummy_func(union acpi_subtable_headers *header, const unsigned long end) { return 0; diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c index 4760307ab43fc33404b6b2ec07b2c3b49a6f6405..cef8f5e2e8fce9a1bb5e9609ec500fa1a2d9d149 100644 --- a/drivers/irqchip/irq-imx-gpcv2.c +++ b/drivers/irqchip/irq-imx-gpcv2.c @@ -131,6 +131,7 @@ static struct irq_chip gpcv2_irqchip_data_chip = { .irq_unmask = imx_gpcv2_irq_unmask, .irq_set_wake = imx_gpcv2_irq_set_wake, .irq_retrigger = irq_chip_retrigger_hierarchy, + .irq_set_type = irq_chip_set_type_parent, #ifdef CONFIG_SMP .irq_set_affinity = irq_chip_set_affinity_parent, #endif diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c index 2ff08986b536195ca97eab893d31a594abc2490b..be6923abf9a4db34f9c7f401d43c493e72ebd079 100644 --- a/drivers/irqchip/irq-ingenic.c +++ b/drivers/irqchip/irq-ingenic.c @@ -117,6 +117,14 @@ static int __init ingenic_intc_of_init(struct device_node *node, goto out_unmap_irq; } + domain = irq_domain_add_legacy(node, num_chips * 32, + JZ4740_IRQ_BASE, 0, + &irq_domain_simple_ops, NULL); + if (!domain) { + err = -ENOMEM; + goto out_unmap_base; + } + for (i = 0; i < num_chips; i++) { /* Mask all irqs */ writel(0xffffffff, intc->base + (i * CHIP_SIZE) + @@ -143,14 +151,11 @@ static int __init ingenic_intc_of_init(struct device_node *node, IRQ_NOPROBE | IRQ_LEVEL); } - domain = irq_domain_add_legacy(node, num_chips * 32, JZ4740_IRQ_BASE, 0, - &irq_domain_simple_ops, NULL); - if (!domain) - pr_warn("unable to register IRQ domain\n"); - setup_irq(parent_irq, &intc_cascade_action); return 0; +out_unmap_base: + iounmap(intc->base); out_unmap_irq: irq_dispose_mapping(parent_irq); out_free: diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c index 567b29c476081056232f13eed15a27c8d8d4fa64..6ccce7edf7b9523a36a66e8df189e9f2edf7f178 100644 --- a/drivers/irqchip/irq-mbigen.c +++ b/drivers/irqchip/irq-mbigen.c @@ -75,6 +75,20 @@ struct mbigen_device { void __iomem *base; }; +static inline unsigned int get_mbigen_node_offset(unsigned int nid) +{ + unsigned int offset = nid * MBIGEN_NODE_OFFSET; + + /* + * To avoid touched clear register in unexpected way, we need to directly + * skip clear register when access to more than 10 mbigen nodes. + */ + if (nid >= (REG_MBIGEN_CLEAR_OFFSET / MBIGEN_NODE_OFFSET)) + offset += MBIGEN_NODE_OFFSET; + + return offset; +} + static inline unsigned int get_mbigen_vec_reg(irq_hw_number_t hwirq) { unsigned int nid, pin; @@ -83,8 +97,7 @@ static inline unsigned int get_mbigen_vec_reg(irq_hw_number_t hwirq) nid = hwirq / IRQS_PER_MBIGEN_NODE + 1; pin = hwirq % IRQS_PER_MBIGEN_NODE; - return pin * 4 + nid * MBIGEN_NODE_OFFSET - + REG_MBIGEN_VEC_OFFSET; + return pin * 4 + get_mbigen_node_offset(nid) + REG_MBIGEN_VEC_OFFSET; } static inline void get_mbigen_type_reg(irq_hw_number_t hwirq, @@ -99,8 +112,7 @@ static inline void get_mbigen_type_reg(irq_hw_number_t hwirq, *mask = 1 << (irq_ofst % 32); ofst = irq_ofst / 32 * 4; - *addr = ofst + nid * MBIGEN_NODE_OFFSET - + REG_MBIGEN_TYPE_OFFSET; + *addr = ofst + get_mbigen_node_offset(nid) + REG_MBIGEN_TYPE_OFFSET; } static inline void get_mbigen_clear_reg(irq_hw_number_t hwirq, @@ -161,6 +173,9 @@ static void mbigen_write_msg(struct msi_desc *desc, struct msi_msg *msg) void __iomem *base = d->chip_data; u32 val; + if (!msg->address_lo && !msg->address_hi) + return; + base += get_mbigen_vec_reg(d->hwirq); val = readl_relaxed(base); @@ -228,10 +243,16 @@ static int mbigen_irq_domain_alloc(struct irq_domain *domain, return 0; } +static void mbigen_irq_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + platform_msi_domain_free(domain, virq, nr_irqs); +} + static const struct irq_domain_ops mbigen_domain_ops = { .translate = mbigen_domain_translate, .alloc = mbigen_irq_domain_alloc, - .free = irq_domain_free_irqs_common, + .free = mbigen_irq_domain_free, }; static int mbigen_of_create_domain(struct platform_device *pdev, @@ -249,12 +270,15 @@ static int mbigen_of_create_domain(struct platform_device *pdev, parent = platform_bus_type.dev_root; child = of_platform_device_create(np, NULL, parent); - if (!child) + if (!child) { + of_node_put(np); return -ENOMEM; + } if (of_property_read_u32(child->dev.of_node, "num-pins", &num_pins) < 0) { dev_err(&pdev->dev, "No num-pins property\n"); + of_node_put(np); return -EINVAL; } @@ -262,8 +286,10 @@ static int mbigen_of_create_domain(struct platform_device *pdev, mbigen_write_msg, &mbigen_domain_ops, mgn_chip); - if (!domain) + if (!domain) { + of_node_put(np); return -ENOMEM; + } } return 0; @@ -378,6 +404,7 @@ static struct platform_driver mbigen_platform_driver = { .name = "Hisilicon MBIGEN-V2", .of_match_table = mbigen_of_match, .acpi_match_table = ACPI_PTR(mbigen_acpi_match), + .suppress_bind_attrs = true, }, .probe = mbigen_device_probe, }; diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c index 7b531fd075b885396c378ef84bf0aaa110dc3c0f..7599b10ecf09d153e8b2b563f2ce515f19eb3358 100644 --- a/drivers/irqchip/irq-meson-gpio.c +++ b/drivers/irqchip/irq-meson-gpio.c @@ -73,6 +73,7 @@ static const struct of_device_id meson_irq_gpio_matches[] = { { .compatible = "amlogic,meson-gxbb-gpio-intc", .data = &gxbb_params }, { .compatible = "amlogic,meson-gxl-gpio-intc", .data = &gxl_params }, { .compatible = "amlogic,meson-axg-gpio-intc", .data = &axg_params }, + { .compatible = "amlogic,meson-g12a-gpio-intc", .data = &axg_params }, { } }; diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index d32268cc1174c75b8cc0942c15637916a5a333ff..f3985469c2211c7fea4b40469a47c1e27711a510 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -388,7 +388,7 @@ static void gic_all_vpes_irq_cpu_online(struct irq_data *d) intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); cd = irq_data_get_irq_chip_data(d); - write_gic_vl_map(intr, cd->map); + write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map); if (cd->mask) write_gic_vl_smask(BIT(intr)); } @@ -517,7 +517,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, spin_lock_irqsave(&gic_lock, flags); for_each_online_cpu(cpu) { write_gic_vl_other(mips_cm_vp_id(cpu)); - write_gic_vo_map(intr, map); + write_gic_vo_map(mips_gic_vx_map_reg(intr), map); } spin_unlock_irqrestore(&gic_lock, flags); diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c index 25f32e1d77647d0be5d8a59d3d214127ba05b9ac..3496b61a312aef87cc9668189fd9047a844e8ca3 100644 --- a/drivers/irqchip/irq-mmp.c +++ b/drivers/irqchip/irq-mmp.c @@ -34,6 +34,9 @@ #define SEL_INT_PENDING (1 << 6) #define SEL_INT_NUM_MASK 0x3f +#define MMP2_ICU_INT_ROUTE_PJ4_IRQ (1 << 5) +#define MMP2_ICU_INT_ROUTE_PJ4_FIQ (1 << 6) + struct icu_chip_data { int nr_irqs; unsigned int virq_base; @@ -190,7 +193,8 @@ static const struct mmp_intc_conf mmp_conf = { static const struct mmp_intc_conf mmp2_conf = { .conf_enable = 0x20, .conf_disable = 0x0, - .conf_mask = 0x7f, + .conf_mask = MMP2_ICU_INT_ROUTE_PJ4_IRQ | + MMP2_ICU_INT_ROUTE_PJ4_FIQ, }; static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs) diff --git a/drivers/irqchip/irq-mvebu-icu.c b/drivers/irqchip/irq-mvebu-icu.c index 13063339b416dac51bf84e77662aa28d70080be7..a2a3acd744911014cfe46389151b68d03b54504c 100644 --- a/drivers/irqchip/irq-mvebu-icu.c +++ b/drivers/irqchip/irq-mvebu-icu.c @@ -105,7 +105,7 @@ static int mvebu_icu_irq_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec, unsigned long *hwirq, unsigned int *type) { - struct mvebu_icu *icu = d->host_data; + struct mvebu_icu *icu = platform_msi_get_host_data(d); unsigned int icu_group; /* Check the count of the parameters in dt */ diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c index 0a2088e12d96fdf5525b48613ac818e5061064c9..97b27f338c305898f7b1c829214ab00ecc377912 100644 --- a/drivers/irqchip/irq-stm32-exti.c +++ b/drivers/irqchip/irq-stm32-exti.c @@ -650,11 +650,6 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data, */ writel_relaxed(0, base + stm32_bank->imr_ofst); writel_relaxed(0, base + stm32_bank->emr_ofst); - writel_relaxed(0, base + stm32_bank->rtsr_ofst); - writel_relaxed(0, base + stm32_bank->ftsr_ofst); - writel_relaxed(~0UL, base + stm32_bank->rpr_ofst); - if (stm32_bank->fpr_ofst != UNDEF_REG) - writel_relaxed(~0UL, base + stm32_bank->fpr_ofst); pr_info("%s: bank%d, External IRQs available:%#x\n", node->full_name, bank_idx, irqs_mask); diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c index b1b47a40a278686a7830e29e1c7c5c15b6c53b87..faa7d61b9d6c421ceaa4078d09e391bdec13e7f0 100644 --- a/drivers/irqchip/qcom-pdc.c +++ b/drivers/irqchip/qcom-pdc.c @@ -124,6 +124,7 @@ static int qcom_pdc_gic_set_type(struct irq_data *d, unsigned int type) break; case IRQ_TYPE_EDGE_BOTH: pdc_type = PDC_EDGE_DUAL; + type = IRQ_TYPE_EDGE_RISING; break; case IRQ_TYPE_LEVEL_HIGH: pdc_type = PDC_LEVEL_HIGH; diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c index ef5560b848ab3a66adc29d119d5608bf726642e0..c67fd2fb333c13677796c5efa35e2cfd43a396d6 100644 --- a/drivers/isdn/capi/capi.c +++ b/drivers/isdn/capi/capi.c @@ -688,6 +688,9 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos if (!cdev->ap.applid) return -ENODEV; + if (count < CAPIMSG_BASELEN) + return -EINVAL; + skb = alloc_skb(count, GFP_USER); if (!skb) return -ENOMEM; @@ -698,7 +701,8 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos } mlen = CAPIMSG_LEN(skb->data); if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) { - if ((size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) { + if (count < CAPI_DATA_B3_REQ_LEN || + (size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) { kfree_skb(skb); return -EINVAL; } @@ -711,6 +715,10 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos CAPIMSG_SETAPPID(skb->data, cdev->ap.applid); if (CAPIMSG_CMD(skb->data) == CAPI_DISCONNECT_B3_RESP) { + if (count < CAPI_DISCONNECT_B3_RESP_LEN) { + kfree_skb(skb); + return -EINVAL; + } mutex_lock(&cdev->lock); capincci_free(cdev, CAPIMSG_NCCI(skb->data)); mutex_unlock(&cdev->lock); @@ -736,7 +744,7 @@ capi_poll(struct file *file, poll_table *wait) poll_wait(file, &(cdev->recvwait), wait); mask = EPOLLOUT | EPOLLWRNORM; - if (!skb_queue_empty(&cdev->recvqueue)) + if (!skb_queue_empty_lockless(&cdev->recvqueue)) mask |= EPOLLIN | EPOLLRDNORM; return mask; } diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c index 0ff517d3c98f98e91aaabafc8051440a0486ce74..ed9ee2bbf232e978e9ef7e67f5113ddbf66bbd29 100644 --- a/drivers/isdn/capi/kcapi.c +++ b/drivers/isdn/capi/kcapi.c @@ -565,6 +565,11 @@ int detach_capi_ctr(struct capi_ctr *ctr) ctr_down(ctr, CAPI_CTR_DETACHED); + if (ctr->cnr < 1 || ctr->cnr - 1 >= CAPI_MAXCONTR) { + err = -EINVAL; + goto unlock_out; + } + if (capi_controller[ctr->cnr - 1] != ctr) { err = -EINVAL; goto unlock_out; @@ -852,7 +857,7 @@ u16 capi20_get_manufacturer(u32 contr, u8 *buf) u16 ret; if (contr == 0) { - strlcpy(buf, capi_manufakturer, CAPI_MANUFACTURER_LEN); + strncpy(buf, capi_manufakturer, CAPI_MANUFACTURER_LEN); return CAPI_NOERROR; } @@ -860,7 +865,7 @@ u16 capi20_get_manufacturer(u32 contr, u8 *buf) ctr = get_capi_ctr_by_nr(contr); if (ctr && ctr->state == CAPI_CTR_RUNNING) { - strlcpy(buf, ctr->manu, CAPI_MANUFACTURER_LEN); + strncpy(buf, ctr->manu, CAPI_MANUFACTURER_LEN); ret = CAPI_NOERROR; } else ret = CAPI_REGNOTINSTALLED; diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c index ecdeb89645d00454381ef4020986fdf5e38f2eec..149b1aca52a23e4cc28897064861abec8ce3326c 100644 --- a/drivers/isdn/gigaset/bas-gigaset.c +++ b/drivers/isdn/gigaset/bas-gigaset.c @@ -958,6 +958,7 @@ static void write_iso_callback(struct urb *urb) */ static int starturbs(struct bc_state *bcs) { + struct usb_device *udev = bcs->cs->hw.bas->udev; struct bas_bc_state *ubc = bcs->hw.bas; struct urb *urb; int j, k; @@ -975,8 +976,8 @@ static int starturbs(struct bc_state *bcs) rc = -EFAULT; goto error; } - usb_fill_int_urb(urb, bcs->cs->hw.bas->udev, - usb_rcvisocpipe(urb->dev, 3 + 2 * bcs->channel), + usb_fill_int_urb(urb, udev, + usb_rcvisocpipe(udev, 3 + 2 * bcs->channel), ubc->isoinbuf + k * BAS_INBUFSIZE, BAS_INBUFSIZE, read_iso_callback, bcs, BAS_FRAMETIME); @@ -1006,8 +1007,8 @@ static int starturbs(struct bc_state *bcs) rc = -EFAULT; goto error; } - usb_fill_int_urb(urb, bcs->cs->hw.bas->udev, - usb_sndisocpipe(urb->dev, 4 + 2 * bcs->channel), + usb_fill_int_urb(urb, udev, + usb_sndisocpipe(udev, 4 + 2 * bcs->channel), ubc->isooutbuf->data, sizeof(ubc->isooutbuf->data), write_iso_callback, &ubc->isoouturbs[k], diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c index eade36dafa3408f192b618cd1c52323235ed2d34..4c239f18240db8bc2944ab80eae983508de3b902 100644 --- a/drivers/isdn/gigaset/usb-gigaset.c +++ b/drivers/isdn/gigaset/usb-gigaset.c @@ -574,8 +574,7 @@ static int gigaset_initcshw(struct cardstate *cs) { struct usb_cardstate *ucs; - cs->hw.usb = ucs = - kmalloc(sizeof(struct usb_cardstate), GFP_KERNEL); + cs->hw.usb = ucs = kzalloc(sizeof(struct usb_cardstate), GFP_KERNEL); if (!ucs) { pr_err("out of memory\n"); return -ENOMEM; @@ -587,9 +586,6 @@ static int gigaset_initcshw(struct cardstate *cs) ucs->bchars[3] = 0; ucs->bchars[4] = 0x11; ucs->bchars[5] = 0x13; - ucs->bulk_out_buffer = NULL; - ucs->bulk_out_urb = NULL; - ucs->read_urb = NULL; tasklet_init(&cs->write_tasklet, gigaset_modem_fill, (unsigned long) cs); @@ -688,6 +684,11 @@ static int gigaset_probe(struct usb_interface *interface, return -ENODEV; } + if (hostif->desc.bNumEndpoints < 2) { + dev_err(&interface->dev, "missing endpoints\n"); + return -ENODEV; + } + dev_info(&udev->dev, "%s: Device matched ... !\n", __func__); /* allocate memory for our device state and initialize it */ @@ -707,6 +708,12 @@ static int gigaset_probe(struct usb_interface *interface, endpoint = &hostif->endpoint[0].desc; + if (!usb_endpoint_is_bulk_out(endpoint)) { + dev_err(&interface->dev, "missing bulk-out endpoint\n"); + retval = -ENODEV; + goto error; + } + buffer_size = le16_to_cpu(endpoint->wMaxPacketSize); ucs->bulk_out_size = buffer_size; ucs->bulk_out_epnum = usb_endpoint_num(endpoint); @@ -726,6 +733,12 @@ static int gigaset_probe(struct usb_interface *interface, endpoint = &hostif->endpoint[1].desc; + if (!usb_endpoint_is_int_in(endpoint)) { + dev_err(&interface->dev, "missing int-in endpoint\n"); + retval = -ENODEV; + goto error; + } + ucs->busy = 0; ucs->read_urb = usb_alloc_urb(0, GFP_KERNEL); diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c index 4ac378e489023f19b42eaf95eff35d6a8ef1913d..40ca1e8fa09fc7884e664b473274df8cb6101a86 100644 --- a/drivers/isdn/hardware/avm/b1.c +++ b/drivers/isdn/hardware/avm/b1.c @@ -423,7 +423,7 @@ void b1_parse_version(avmctrl_info *cinfo) int i, j; for (j = 0; j < AVM_MAXVERSION; j++) - cinfo->version[j] = "\0\0" + 1; + cinfo->version[j] = ""; for (i = 0, j = 0; j < AVM_MAXVERSION && i < cinfo->versionlen; j++, i += cinfo->versionbuf[i] + 1) diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c index 4d85645c87f78721a83fcef94be1feb3bce8c094..c03ded7ffb0b5dcc92aad44fb28e1054fa840945 100644 --- a/drivers/isdn/hardware/mISDN/hfcmulti.c +++ b/drivers/isdn/hardware/mISDN/hfcmulti.c @@ -1945,7 +1945,7 @@ hfcmulti_dtmf(struct hfc_multi *hc) static void hfcmulti_tx(struct hfc_multi *hc, int ch) { - int i, ii, temp, len = 0; + int i, ii, temp, tmp_len, len = 0; int Zspace, z1, z2; /* must be int for calculation */ int Fspace, f1, f2; u_char *d; @@ -2166,14 +2166,15 @@ hfcmulti_tx(struct hfc_multi *hc, int ch) HFC_wait_nodebug(hc); } + tmp_len = (*sp)->len; dev_kfree_skb(*sp); /* check for next frame */ if (bch && get_next_bframe(bch)) { - len = (*sp)->len; + len = tmp_len; goto next_frame; } if (dch && get_next_dframe(dch)) { - len = (*sp)->len; + len = tmp_len; goto next_frame; } @@ -4365,7 +4366,8 @@ setup_pci(struct hfc_multi *hc, struct pci_dev *pdev, if (m->clock2) test_and_set_bit(HFC_CHIP_CLOCK2, &hc->chip); - if (ent->device == 0xB410) { + if (ent->vendor == PCI_VENDOR_ID_DIGIUM && + ent->device == PCI_DEVICE_ID_DIGIUM_HFC4S) { test_and_set_bit(HFC_CHIP_B410P, &hc->chip); test_and_set_bit(HFC_CHIP_PCM_MASTER, &hc->chip); test_and_clear_bit(HFC_CHIP_PCM_SLAVE, &hc->chip); diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c index ebb3fa2e1d00f1627d42363c3478999b1b0ee234..737a66a3dfde43194e378e59a08c54d743081f98 100644 --- a/drivers/isdn/hardware/mISDN/hfcpci.c +++ b/drivers/isdn/hardware/mISDN/hfcpci.c @@ -2347,8 +2347,7 @@ HFC_init(void) static void __exit HFC_cleanup(void) { - if (timer_pending(&hfc_tl)) - del_timer(&hfc_tl); + del_timer_sync(&hfc_tl); pci_unregister_driver(&hfc_driver); } diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c index 6d05946b445eb039aeb6c9c755e94dbe8b8f1dac..c952002c6301d8c219c51243e22d38c5bee8b2ec 100644 --- a/drivers/isdn/hardware/mISDN/hfcsusb.c +++ b/drivers/isdn/hardware/mISDN/hfcsusb.c @@ -1406,6 +1406,7 @@ start_isoc_chain(struct usb_fifo *fifo, int num_packets_per_urb, printk(KERN_DEBUG "%s: %s: alloc urb for fifo %i failed", hw->name, __func__, fifo->fifonum); + continue; } fifo->iso[i].owner_fifo = (struct usb_fifo *) fifo; fifo->iso[i].indx = i; @@ -1704,13 +1705,23 @@ hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel) static int setup_hfcsusb(struct hfcsusb *hw) { + void *dmabuf = kmalloc(sizeof(u_char), GFP_KERNEL); u_char b; + int ret; if (debug & DBG_HFC_CALL_TRACE) printk(KERN_DEBUG "%s: %s\n", hw->name, __func__); + if (!dmabuf) + return -ENOMEM; + + ret = read_reg_atomic(hw, HFCUSB_CHIP_ID, dmabuf); + + memcpy(&b, dmabuf, sizeof(u_char)); + kfree(dmabuf); + /* check the chip id */ - if (read_reg_atomic(hw, HFCUSB_CHIP_ID, &b) != 1) { + if (ret != 1) { printk(KERN_DEBUG "%s: %s: cannot read chip id\n", hw->name, __func__); return 1; @@ -1967,6 +1978,9 @@ hfcsusb_probe(struct usb_interface *intf, const struct usb_device_id *id) /* get endpoint base */ idx = ((ep_addr & 0x7f) - 1) * 2; + if (idx > 15) + return -EIO; + if (ep_addr & 0x80) idx++; attr = ep->desc.bmAttributes; diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c index 2b317cb63d068c00938fefed7e22cc8cfb27156f..4a342daac98dc51d3f3d45925174fc0ef7c9ebdc 100644 --- a/drivers/isdn/hardware/mISDN/netjet.c +++ b/drivers/isdn/hardware/mISDN/netjet.c @@ -963,8 +963,8 @@ nj_release(struct tiger_hw *card) nj_disable_hwirq(card); mode_tiger(&card->bc[0], ISDN_P_NONE); mode_tiger(&card->bc[1], ISDN_P_NONE); - card->isac.release(&card->isac); spin_unlock_irqrestore(&card->lock, flags); + card->isac.release(&card->isac); release_region(card->base, card->base_s); card->base_s = 0; } @@ -1114,7 +1114,6 @@ nj_probe(struct pci_dev *pdev, const struct pci_device_id *ent) card->typ = NETJET_S_TJ300; card->base = pci_resource_start(pdev, 0); - card->irq = pdev->irq; pci_set_drvdata(pdev, card); err = setup_instance(card); if (err) diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c index 8e5b03161b2ff035e7d9092a6f56f5b861e0a2a1..64a63711fd953e4f0afaa0c220ac0161a2f8cfe7 100644 --- a/drivers/isdn/hisax/hfc_pci.c +++ b/drivers/isdn/hisax/hfc_pci.c @@ -1170,11 +1170,13 @@ HFCPCI_l1hw(struct PStack *st, int pr, void *arg) if (cs->debug & L1_DEB_LAPD) debugl1(cs, "-> PH_REQUEST_PULL"); #endif + spin_lock_irqsave(&cs->lock, flags); if (!cs->tx_skb) { test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags); st->l1.l1l2(st, PH_PULL | CONFIRM, NULL); } else test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags); + spin_unlock_irqrestore(&cs->lock, flags); break; case (HW_RESET | REQUEST): spin_lock_irqsave(&cs->lock, flags); diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c index b730037a0e2d383b2f6037561ccd396c558077a8..9cff667b2d2455c0b455d3f60714b582ba3f2f93 100644 --- a/drivers/isdn/i4l/isdn_tty.c +++ b/drivers/isdn/i4l/isdn_tty.c @@ -1456,15 +1456,19 @@ isdn_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios) { modem_info *info = (modem_info *) tty->driver_data; + mutex_lock(&modem_info_mutex); if (!old_termios) isdn_tty_change_speed(info); else { if (tty->termios.c_cflag == old_termios->c_cflag && tty->termios.c_ispeed == old_termios->c_ispeed && - tty->termios.c_ospeed == old_termios->c_ospeed) + tty->termios.c_ospeed == old_termios->c_ospeed) { + mutex_unlock(&modem_info_mutex); return; + } isdn_tty_change_speed(info); } + mutex_unlock(&modem_info_mutex); } /* diff --git a/drivers/isdn/mISDN/dsp_pipeline.c b/drivers/isdn/mISDN/dsp_pipeline.c index e72b4e73cd615f1fa0410b97661d7f07ef9bbaca..fb8153f1aff73f9bf3e8d6183527c880d73f60a7 100644 --- a/drivers/isdn/mISDN/dsp_pipeline.c +++ b/drivers/isdn/mISDN/dsp_pipeline.c @@ -236,7 +236,7 @@ void dsp_pipeline_destroy(struct dsp_pipeline *pipeline) int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg) { int incomplete = 0, found = 0; - char *dup, *tok, *name, *args; + char *dup, *next, *tok, *name, *args; struct dsp_element_entry *entry, *n; struct dsp_pipeline_entry *pipeline_entry; struct mISDN_dsp_element *elem; @@ -247,10 +247,10 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg) if (!list_empty(&pipeline->list)) _dsp_pipeline_destroy(pipeline); - dup = kstrdup(cfg, GFP_ATOMIC); + dup = next = kstrdup(cfg, GFP_ATOMIC); if (!dup) return 0; - while ((tok = strsep(&dup, "|"))) { + while ((tok = strsep(&next, "|"))) { if (!strlen(tok)) continue; name = strsep(&tok, "("); diff --git a/drivers/isdn/mISDN/l1oip.h b/drivers/isdn/mISDN/l1oip.h index 7ea10db20e3a6567d91b4d6d8b63daa99eadaa98..48133d022812071543fb4e0fee48af6968619f2d 100644 --- a/drivers/isdn/mISDN/l1oip.h +++ b/drivers/isdn/mISDN/l1oip.h @@ -59,6 +59,7 @@ struct l1oip { int bundle; /* bundle channels in one frm */ int codec; /* codec to use for transmis. */ int limit; /* limit number of bchannels */ + bool shutdown; /* if card is released */ /* timer */ struct timer_list keep_tl; diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c index b05022f94f18c22ff2c72fa71cf11984077152b7..2f4a01ab25e8de09c087ea6d7d49f80ee0b1b29b 100644 --- a/drivers/isdn/mISDN/l1oip_core.c +++ b/drivers/isdn/mISDN/l1oip_core.c @@ -289,7 +289,7 @@ l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask, p = frame; /* restart timer */ - if (time_before(hc->keep_tl.expires, jiffies + 5 * HZ)) + if (time_before(hc->keep_tl.expires, jiffies + 5 * HZ) && !hc->shutdown) mod_timer(&hc->keep_tl, jiffies + L1OIP_KEEPALIVE * HZ); else hc->keep_tl.expires = jiffies + L1OIP_KEEPALIVE * HZ; @@ -615,7 +615,9 @@ l1oip_socket_parse(struct l1oip *hc, struct sockaddr_in *sin, u8 *buf, int len) goto multiframe; /* restart timer */ - if (time_before(hc->timeout_tl.expires, jiffies + 5 * HZ) || !hc->timeout_on) { + if ((time_before(hc->timeout_tl.expires, jiffies + 5 * HZ) || + !hc->timeout_on) && + !hc->shutdown) { hc->timeout_on = 1; mod_timer(&hc->timeout_tl, jiffies + L1OIP_TIMEOUT * HZ); } else /* only adjust timer */ @@ -1247,11 +1249,10 @@ release_card(struct l1oip *hc) { int ch; - if (timer_pending(&hc->keep_tl)) - del_timer(&hc->keep_tl); + hc->shutdown = true; - if (timer_pending(&hc->timeout_tl)) - del_timer(&hc->timeout_tl); + del_timer_sync(&hc->keep_tl); + del_timer_sync(&hc->timeout_tl); cancel_work_sync(&hc->workq); diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c index 18c0a1281914fa3218761bd20b2a2e0c85e8aae6..db588a79a9f043f2ef4a63329ac1842bf9391ae6 100644 --- a/drivers/isdn/mISDN/socket.c +++ b/drivers/isdn/mISDN/socket.c @@ -394,7 +394,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) memcpy(di.channelmap, dev->channelmap, sizeof(di.channelmap)); di.nrbchan = dev->nrbchan; - strcpy(di.name, dev_name(&dev->dev)); + strscpy(di.name, dev_name(&dev->dev), sizeof(di.name)); if (copy_to_user((void __user *)arg, &di, sizeof(di))) err = -EFAULT; } else @@ -677,7 +677,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) memcpy(di.channelmap, dev->channelmap, sizeof(di.channelmap)); di.nrbchan = dev->nrbchan; - strcpy(di.name, dev_name(&dev->dev)); + strscpy(di.name, dev_name(&dev->dev), sizeof(di.name)); if (copy_to_user((void __user *)arg, &di, sizeof(di))) err = -EFAULT; } else @@ -691,6 +691,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) err = -EFAULT; break; } + dn.name[sizeof(dn.name) - 1] = '\0'; dev = get_mdevice(dn.id); if (dev) err = device_rename(&dev->dev, dn.name); @@ -711,10 +712,10 @@ base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) struct sock *sk = sock->sk; int err = 0; - if (!maddr || maddr->family != AF_ISDN) + if (addr_len < sizeof(struct sockaddr_mISDN)) return -EINVAL; - if (addr_len < sizeof(struct sockaddr_mISDN)) + if (!maddr || maddr->family != AF_ISDN) return -EINVAL; lock_sock(sk); @@ -763,6 +764,8 @@ base_sock_create(struct net *net, struct socket *sock, int protocol, int kern) if (sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; + if (!capable(CAP_NET_RAW)) + return -EPERM; sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto, kern); if (!sk) diff --git a/drivers/isdn/mISDN/tei.c b/drivers/isdn/mISDN/tei.c index 12d9e5f4beb1f81c5aa5e5af81bc9aca61c21668..58635b5f296f0cd10284ea01ba94b9c71c613082 100644 --- a/drivers/isdn/mISDN/tei.c +++ b/drivers/isdn/mISDN/tei.c @@ -1180,8 +1180,7 @@ static int ctrl_teimanager(struct manager *mgr, void *arg) { /* currently we only have one option */ - int *val = (int *)arg; - int ret = 0; + unsigned int *val = (unsigned int *)arg; switch (val[0]) { case IMCLEAR_L2: @@ -1197,9 +1196,9 @@ ctrl_teimanager(struct manager *mgr, void *arg) test_and_clear_bit(OPTION_L1_HOLD, &mgr->options); break; default: - ret = -EINVAL; + return -EINVAL; } - return ret; + return 0; } /* This function does create a L2 for fixed TEI in NT Mode */ diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c index 211ed6cffd10e1be817884a7724f3f37687afb93..57897871188706339dc9dfd140a9b55426b478af 100644 --- a/drivers/isdn/mISDN/timerdev.c +++ b/drivers/isdn/mISDN/timerdev.c @@ -170,8 +170,8 @@ dev_expire_timer(struct timer_list *t) spin_lock_irqsave(&timer->dev->lock, flags); if (timer->id >= 0) list_move_tail(&timer->list, &timer->dev->expired); - spin_unlock_irqrestore(&timer->dev->lock, flags); wake_up_interruptible(&timer->dev->wait); + spin_unlock_irqrestore(&timer->dev->lock, flags); } static int diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c index 17d73db1456ebb4009c9dfa396820aa2e2485acd..896087487e382b2dc425de2151bb6e4c07e11b36 100644 --- a/drivers/leds/led-triggers.c +++ b/drivers/leds/led-triggers.c @@ -125,9 +125,9 @@ int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig) flags); cancel_work_sync(&led_cdev->set_brightness_work); led_stop_software_blink(led_cdev); + device_remove_groups(led_cdev->dev, led_cdev->trigger->groups); if (led_cdev->trigger->deactivate) led_cdev->trigger->deactivate(led_cdev); - device_remove_groups(led_cdev->dev, led_cdev->trigger->groups); led_cdev->trigger = NULL; led_cdev->trigger_data = NULL; led_cdev->activated = false; @@ -177,6 +177,7 @@ int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig) list_del(&led_cdev->trig_list); write_unlock_irqrestore(&led_cdev->trigger->leddev_list_lock, flags); led_set_brightness(led_cdev, LED_OFF); + kfree(event); return ret; } diff --git a/drivers/leds/leds-lm3692x.c b/drivers/leds/leds-lm3692x.c index 4f413a7c5f05669c3f57275e37f0e5e200002c27..d79a66a73169fa3f7a031b8ad5049093c2dd4d29 100644 --- a/drivers/leds/leds-lm3692x.c +++ b/drivers/leds/leds-lm3692x.c @@ -337,9 +337,18 @@ static int lm3692x_probe_dt(struct lm3692x_led *led) return ret; } - led->regulator = devm_regulator_get(&led->client->dev, "vled"); - if (IS_ERR(led->regulator)) + led->regulator = devm_regulator_get_optional(&led->client->dev, "vled"); + if (IS_ERR(led->regulator)) { + ret = PTR_ERR(led->regulator); + if (ret != -ENODEV) { + if (ret != -EPROBE_DEFER) + dev_err(&led->client->dev, + "Failed to get vled regulator: %d\n", + ret); + return ret; + } led->regulator = NULL; + } child = device_get_next_child_node(&led->client->dev, child); if (!child) { diff --git a/drivers/leds/leds-lp5562.c b/drivers/leds/leds-lp5562.c index 2a9009fe5545d059d625075eb04a0ed53dbb432e..18edc8bdc9f775d52867f1453d0976f770d82dd6 100644 --- a/drivers/leds/leds-lp5562.c +++ b/drivers/leds/leds-lp5562.c @@ -263,7 +263,11 @@ static void lp5562_firmware_loaded(struct lp55xx_chip *chip) { const struct firmware *fw = chip->fw; - if (fw->size > LP5562_PROGRAM_LENGTH) { + /* + * the firmware is encoded in ascii hex character, with 2 chars + * per byte + */ + if (fw->size > (LP5562_PROGRAM_LENGTH * 2)) { dev_err(&chip->cl->dev, "firmware data size overflow: %zu\n", fw->size); return; diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c index 3d79a63807615a33bb40577195973ee05125ba16..723f2f17497a8f902d479620458b208e1fcca65c 100644 --- a/drivers/leds/leds-lp55xx-common.c +++ b/drivers/leds/leds-lp55xx-common.c @@ -201,7 +201,7 @@ static void lp55xx_firmware_loaded(const struct firmware *fw, void *context) if (!fw) { dev_err(dev, "firmware request failed\n"); - goto out; + return; } /* handling firmware data is chip dependent */ @@ -214,9 +214,9 @@ static void lp55xx_firmware_loaded(const struct firmware *fw, void *context) mutex_unlock(&chip->lock); -out: /* firmware should be released for other channel use */ release_firmware(chip->fw); + chip->fw = NULL; } static int lp55xx_request_firmware(struct lp55xx_chip *chip) diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c index 7fea18b0c15d115178c874163232fa6c2e3706ce..7cb4d685a1f107f335c7cf475d06aae6688ab6bc 100644 --- a/drivers/leds/leds-pca9532.c +++ b/drivers/leds/leds-pca9532.c @@ -513,6 +513,7 @@ static int pca9532_probe(struct i2c_client *client, const struct i2c_device_id *id) { int devid; + const struct of_device_id *of_id; struct pca9532_data *data = i2c_get_clientdata(client); struct pca9532_platform_data *pca9532_pdata = dev_get_platdata(&client->dev); @@ -528,8 +529,11 @@ static int pca9532_probe(struct i2c_client *client, dev_err(&client->dev, "no platform data\n"); return -EINVAL; } - devid = (int)(uintptr_t)of_match_device( - of_pca9532_leds_match, &client->dev)->data; + of_id = of_match_device(of_pca9532_leds_match, + &client->dev); + if (unlikely(!of_id)) + return -EINVAL; + devid = (int)(uintptr_t) of_id->data; } else { devid = id->driver_data; } diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c index df80c89ebe7fac93fce0b323aa101b6b56d2e5d9..5d3faae51d59e6c3428ec17988a91a5458603ba0 100644 --- a/drivers/leds/leds-pwm.c +++ b/drivers/leds/leds-pwm.c @@ -100,8 +100,9 @@ static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv, led_data->pwm = devm_pwm_get(dev, led->name); if (IS_ERR(led_data->pwm)) { ret = PTR_ERR(led_data->pwm); - dev_err(dev, "unable to request PWM for %s: %d\n", - led->name, ret); + if (ret != -EPROBE_DEFER) + dev_err(dev, "unable to request PWM for %s: %d\n", + led->name, ret); return ret; } diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c index 3dd3ed46d473b673fd916085044bd3c3b38b0759..d5e774d8302158179110d40d5b6aa10ddc68f804 100644 --- a/drivers/leds/trigger/ledtrig-netdev.c +++ b/drivers/leds/trigger/ledtrig-netdev.c @@ -122,7 +122,8 @@ static ssize_t device_name_store(struct device *dev, trigger_data->net_dev = NULL; } - strncpy(trigger_data->device_name, buf, size); + memcpy(trigger_data->device_name, buf, size); + trigger_data->device_name[size] = 0; if (size > 0 && trigger_data->device_name[size - 1] == '\n') trigger_data->device_name[size - 1] = 0; @@ -305,7 +306,9 @@ static int netdev_trig_notify(struct notifier_block *nb, && evt != NETDEV_CHANGENAME) return NOTIFY_DONE; - if (strcmp(dev->name, trigger_data->device_name)) + if (!(dev == trigger_data->net_dev || + (evt == NETDEV_CHANGENAME && !strcmp(dev->name, trigger_data->device_name)) || + (evt == NETDEV_REGISTER && !strcmp(dev->name, trigger_data->device_name)))) return NOTIFY_DONE; cancel_delayed_work_sync(&trigger_data->work); @@ -314,18 +317,16 @@ static int netdev_trig_notify(struct notifier_block *nb, clear_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode); switch (evt) { + case NETDEV_CHANGENAME: case NETDEV_REGISTER: if (trigger_data->net_dev) dev_put(trigger_data->net_dev); dev_hold(dev); trigger_data->net_dev = dev; break; - case NETDEV_CHANGENAME: case NETDEV_UNREGISTER: - if (trigger_data->net_dev) { - dev_put(trigger_data->net_dev); - trigger_data->net_dev = NULL; - } + dev_put(trigger_data->net_dev); + trigger_data->net_dev = NULL; break; case NETDEV_UP: case NETDEV_CHANGE: diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig index 439bf90d084dde47fd7013f4b7474886d3f706e6..1db33cb288595c42d8f395b3772521b3a511f5fe 100644 --- a/drivers/lightnvm/Kconfig +++ b/drivers/lightnvm/Kconfig @@ -4,7 +4,7 @@ menuconfig NVM bool "Open-Channel SSD target support" - depends on BLOCK && PCI + depends on BLOCK && PCI && BROKEN select BLK_DEV_NVME help Say Y here to get to enable Open-channel SSDs. diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c index 00984b486fea72a953652385c6133468f6f96810..8dce31dbf2cbcf0c037d0688aa239652c37c5753 100644 --- a/drivers/lightnvm/pblk-core.c +++ b/drivers/lightnvm/pblk-core.c @@ -288,14 +288,16 @@ void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type) void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off, int nr_pages) { - struct bio_vec bv; - int i; - - WARN_ON(off + nr_pages != bio->bi_vcnt); + struct bio_vec *bv; + struct page *page; + int i, e, nbv = 0; - for (i = off; i < nr_pages + off; i++) { - bv = bio->bi_io_vec[i]; - mempool_free(bv.bv_page, &pblk->page_bio_pool); + for (i = 0; i < bio->bi_vcnt; i++) { + bv = &bio->bi_io_vec[i]; + page = bv->bv_page; + for (e = 0; e < bv->bv_len; e += PBLK_EXPOSED_PAGE_SIZE, nbv++) + if (nbv >= off) + mempool_free(page++, &pblk->page_bio_pool); } } @@ -891,10 +893,8 @@ static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd, static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa) { - struct nvm_rq rqd; - int ret = 0; - - memset(&rqd, 0, sizeof(struct nvm_rq)); + struct nvm_rq rqd = {NULL}; + int ret; pblk_setup_e_rq(pblk, &rqd, ppa); @@ -902,19 +902,6 @@ static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa) * with writes. Thus, there is no need to take the LUN semaphore. */ ret = pblk_submit_io_sync(pblk, &rqd); - if (ret) { - struct nvm_tgt_dev *dev = pblk->dev; - struct nvm_geo *geo = &dev->geo; - - pblk_err(pblk, "could not sync erase line:%d,blk:%d\n", - pblk_ppa_to_line(ppa), - pblk_ppa_to_pos(geo, ppa)); - - rqd.error = ret; - goto out; - } - -out: rqd.private = pblk; __pblk_end_io_erase(pblk, &rqd); @@ -1252,15 +1239,22 @@ int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line) ret = pblk_line_alloc_bitmaps(pblk, line); if (ret) - return ret; + goto fail; if (!pblk_line_init_bb(pblk, line, 0)) { - list_add(&line->list, &l_mg->free_list); - return -EINTR; + ret = -EINTR; + goto fail; } pblk_rl_free_lines_dec(&pblk->rl, line, true); return 0; + +fail: + spin_lock(&l_mg->free_lock); + list_add(&line->list, &l_mg->free_list); + spin_unlock(&l_mg->free_lock); + + return ret; } void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line) @@ -1539,13 +1533,14 @@ struct pblk_line *pblk_line_replace_data(struct pblk *pblk) struct pblk_line *cur, *new = NULL; unsigned int left_seblks; - cur = l_mg->data_line; new = l_mg->data_next; if (!new) goto out; - l_mg->data_line = new; spin_lock(&l_mg->free_lock); + cur = l_mg->data_line; + l_mg->data_line = new; + pblk_line_setup_metadata(new, l_mg, &pblk->lm); spin_unlock(&l_mg->free_lock); @@ -1778,6 +1773,17 @@ void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line) wa->pad = cpu_to_le64(atomic64_read(&pblk->pad_wa)); wa->gc = cpu_to_le64(atomic64_read(&pblk->gc_wa)); + if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC) { + emeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC); + memcpy(emeta_buf->header.uuid, pblk->instance_uuid, 16); + emeta_buf->header.id = cpu_to_le32(line->id); + emeta_buf->header.type = cpu_to_le16(line->type); + emeta_buf->header.version_major = EMETA_VERSION_MAJOR; + emeta_buf->header.version_minor = EMETA_VERSION_MINOR; + emeta_buf->header.crc = cpu_to_le32( + pblk_calc_meta_header_crc(pblk, &emeta_buf->header)); + } + emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas); emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf)); @@ -1795,8 +1801,6 @@ void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line) spin_unlock(&l_mg->close_lock); pblk_line_should_sync_meta(pblk); - - } static void pblk_save_lba_list(struct pblk *pblk, struct pblk_line *line) diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c index 537e98f2b24a2d67b4b23b8c4b9a135672d27848..88b632787abd65dda14ea979ad914640bcc2c451 100644 --- a/drivers/lightnvm/pblk-init.c +++ b/drivers/lightnvm/pblk-init.c @@ -181,7 +181,8 @@ static int pblk_rwb_init(struct pblk *pblk) unsigned int power_size, power_seg_sz; int pgs_in_buffer; - pgs_in_buffer = max(geo->mw_cunits, geo->ws_opt) * geo->all_luns; + pgs_in_buffer = (max(geo->mw_cunits, geo->ws_opt) + geo->ws_opt) + * geo->all_luns; if (write_buffer_size && (write_buffer_size > pgs_in_buffer)) buffer_size = write_buffer_size; @@ -371,9 +372,11 @@ static int pblk_core_init(struct pblk *pblk) atomic64_set(&pblk->nr_flush, 0); pblk->nr_flush_rst = 0; - pblk->min_write_pgs = geo->ws_opt * (geo->csecs / PAGE_SIZE); + pblk->min_write_pgs = geo->ws_opt; max_write_ppas = pblk->min_write_pgs * geo->all_luns; pblk->max_write_pgs = min_t(int, max_write_ppas, NVM_MAX_VLBA); + pblk->max_write_pgs = min_t(int, pblk->max_write_pgs, + queue_max_hw_sectors(dev->q) / (geo->csecs >> SECTOR_SHIFT)); pblk_set_sec_per_write(pblk, pblk->min_write_pgs); if (pblk->max_write_pgs > PBLK_MAX_REQ_ADDRS) { @@ -1083,7 +1086,8 @@ static int pblk_lines_init(struct pblk *pblk) if (!nr_free_chks) { pblk_err(pblk, "too many bad blocks prevent for sane instance\n"); - return -EINTR; + ret = -EINTR; + goto fail_free_lines; } pblk_set_provision(pblk, nr_free_chks); diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c index e232e47e13532ea0d27afc87b59b5fb5d011fc76..df75d9caec45e6ccf2bb4cfbf6043c3d8a68c260 100644 --- a/drivers/lightnvm/pblk-recovery.c +++ b/drivers/lightnvm/pblk-recovery.c @@ -956,12 +956,14 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk) } } - spin_lock(&l_mg->free_lock); if (!open_lines) { + spin_lock(&l_mg->free_lock); WARN_ON_ONCE(!test_and_clear_bit(meta_line, &l_mg->meta_bitmap)); + spin_unlock(&l_mg->free_lock); pblk_line_replace_data(pblk); } else { + spin_lock(&l_mg->free_lock); /* Allocate next line for preparation */ l_mg->data_next = pblk_line_get(pblk); if (l_mg->data_next) { @@ -969,8 +971,8 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk) l_mg->data_next->type = PBLK_LINETYPE_DATA; is_next = 1; } + spin_unlock(&l_mg->free_lock); } - spin_unlock(&l_mg->free_lock); if (is_next) pblk_line_erase(pblk, l_mg->data_next); diff --git a/drivers/lightnvm/pblk-sysfs.c b/drivers/lightnvm/pblk-sysfs.c index 9fc3dfa168b4bb40f3b43043c02200c4aa6a1b9d..bdc86ee4c77935fa4f68be4bca2dbd567ed6a961 100644 --- a/drivers/lightnvm/pblk-sysfs.c +++ b/drivers/lightnvm/pblk-sysfs.c @@ -262,8 +262,14 @@ static ssize_t pblk_sysfs_lines(struct pblk *pblk, char *page) sec_in_line = l_mg->data_line->sec_in_line; meta_weight = bitmap_weight(&l_mg->meta_bitmap, PBLK_DATA_LINES); - map_weight = bitmap_weight(l_mg->data_line->map_bitmap, + + spin_lock(&l_mg->data_line->lock); + if (l_mg->data_line->map_bitmap) + map_weight = bitmap_weight(l_mg->data_line->map_bitmap, lm->sec_per_line); + else + map_weight = 0; + spin_unlock(&l_mg->data_line->lock); } spin_unlock(&l_mg->free_lock); @@ -337,7 +343,6 @@ static ssize_t pblk_get_write_amp(u64 user, u64 gc, u64 pad, { int sz; - sz = snprintf(page, PAGE_SIZE, "user:%lld gc:%lld pad:%lld WA:", user, gc, pad); @@ -349,7 +354,7 @@ static ssize_t pblk_get_write_amp(u64 user, u64 gc, u64 pad, u32 wa_frac; wa_int = (user + gc + pad) * 100000; - wa_int = div_u64(wa_int, user); + wa_int = div64_u64(wa_int, user); wa_int = div_u64_rem(wa_int, 100000, &wa_frac); sz += snprintf(page + sz, PAGE_SIZE - sz, "%llu.%05u\n", diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c index ee774a86cf1e6ee017f942c3c3a1ba5cdf602e50..c3e038d4b22e17fe0d09a319a0e6864c7d92836c 100644 --- a/drivers/lightnvm/pblk-write.c +++ b/drivers/lightnvm/pblk-write.c @@ -158,9 +158,11 @@ static void pblk_prepare_resubmit(struct pblk *pblk, unsigned int sentry, w_ctx = &entry->w_ctx; /* Check if the lba has been overwritten */ - ppa_l2p = pblk_trans_map_get(pblk, w_ctx->lba); - if (!pblk_ppa_comp(ppa_l2p, entry->cacheline)) - w_ctx->lba = ADDR_EMPTY; + if (w_ctx->lba != ADDR_EMPTY) { + ppa_l2p = pblk_trans_map_get(pblk, w_ctx->lba); + if (!pblk_ppa_comp(ppa_l2p, entry->cacheline)) + w_ctx->lba = ADDR_EMPTY; + } /* Mark up the entry as submittable again */ flags = READ_ONCE(w_ctx->flags); @@ -417,12 +419,11 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line) rqd->ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id); } + spin_lock(&l_mg->close_lock); emeta->mem += rq_len; - if (emeta->mem >= lm->emeta_len[0]) { - spin_lock(&l_mg->close_lock); + if (emeta->mem >= lm->emeta_len[0]) list_del(&meta_line->list); - spin_unlock(&l_mg->close_lock); - } + spin_unlock(&l_mg->close_lock); pblk_down_page(pblk, rqd->ppa_list, rqd->nr_ppas); @@ -491,14 +492,15 @@ static struct pblk_line *pblk_should_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line; spin_lock(&l_mg->close_lock); -retry: if (list_empty(&l_mg->emeta_list)) { spin_unlock(&l_mg->close_lock); return NULL; } meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list); - if (meta_line->emeta->mem >= lm->emeta_len[0]) - goto retry; + if (meta_line->emeta->mem >= lm->emeta_len[0]) { + spin_unlock(&l_mg->close_lock); + return NULL; + } spin_unlock(&l_mg->close_lock); if (!pblk_valid_meta_ppa(pblk, meta_line, data_rqd)) diff --git a/drivers/macintosh/ans-lcd.c b/drivers/macintosh/ans-lcd.c index c8e078b911c743e048fa9588c63a042aa49ffd0c..f76e8618ead4817614b53168be7d653c0279d27d 100644 --- a/drivers/macintosh/ans-lcd.c +++ b/drivers/macintosh/ans-lcd.c @@ -64,7 +64,7 @@ anslcd_write( struct file * file, const char __user * buf, printk(KERN_DEBUG "LCD: write\n"); #endif - if (!access_ok(VERIFY_READ, buf, count)) + if (!access_ok(buf, count)) return -EFAULT; mutex_lock(&anslcd_mutex); diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c index cf6f7d52d6beef08a7ea9c15eff259f3578a17ed..e1773524f4f1cfac93ca1ab04376723650889a01 100644 --- a/drivers/macintosh/via-macii.c +++ b/drivers/macintosh/via-macii.c @@ -151,22 +151,19 @@ static int macii_probe(void) /* Initialize the driver */ int macii_init(void) { - unsigned long flags; int err; - local_irq_save(flags); - err = macii_init_via(); - if (err) goto out; + if (err) + return err; err = request_irq(IRQ_MAC_ADB, macii_interrupt, 0, "ADB", macii_interrupt); - if (err) goto out; + if (err) + return err; macii_state = idle; -out: - local_irq_restore(flags); - return err; + return 0; } /* initialize the hardware */ diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index d72c450aebe5c3aa79bf8322d93507aef8ad990c..d832fb3f342b5d85f351d2b21d533c9aa16f3721 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c @@ -2155,7 +2155,7 @@ pmu_read(struct file *file, char __user *buf, if (count < 1 || !pp) return -EINVAL; - if (!access_ok(VERIFY_WRITE, buf, count)) + if (!access_ok(buf, count)) return -EFAULT; spin_lock_irqsave(&pp->lock, flags); diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c index da7f4fc1a51d17b0824c661d4cb473cf5f4165c4..a0f61eb853c55155e6f4f2df37f7bb2c53a1381c 100644 --- a/drivers/macintosh/windfarm_smu_sat.c +++ b/drivers/macintosh/windfarm_smu_sat.c @@ -22,14 +22,6 @@ #define VERSION "1.0" -#define DEBUG - -#ifdef DEBUG -#define DBG(args...) printk(args) -#else -#define DBG(args...) do { } while(0) -#endif - /* If the cache is older than 800ms we'll refetch it */ #define MAX_AGE msecs_to_jiffies(800) @@ -106,13 +98,10 @@ struct smu_sdbp_header *smu_sat_get_sdb_partition(unsigned int sat_id, int id, buf[i+2] = data[3]; buf[i+3] = data[2]; } -#ifdef DEBUG - DBG(KERN_DEBUG "sat %d partition %x:", sat_id, id); - for (i = 0; i < len; ++i) - DBG(" %x", buf[i]); - DBG("\n"); -#endif + printk(KERN_DEBUG "sat %d partition %x:", sat_id, id); + print_hex_dump(KERN_DEBUG, " ", DUMP_PREFIX_OFFSET, + 16, 1, buf, len, false); if (size) *size = len; return (struct smu_sdbp_header *) buf; @@ -132,13 +121,13 @@ static int wf_sat_read_cache(struct wf_sat *sat) if (err < 0) return err; sat->last_read = jiffies; + #ifdef LOTSA_DEBUG { int i; - DBG(KERN_DEBUG "wf_sat_get: data is"); - for (i = 0; i < 16; ++i) - DBG(" %.2x", sat->cache[i]); - DBG("\n"); + printk(KERN_DEBUG "wf_sat_get: data is"); + print_hex_dump(KERN_DEBUG, " ", DUMP_PREFIX_OFFSET, + 16, 1, sat->cache, 16, false); } #endif return 0; diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig index 841c005d8ebb2f720047a984ff25c0d974a37dff..461e2397dc88387b8c1b0e5aa36835ed7f1d96d8 100644 --- a/drivers/mailbox/Kconfig +++ b/drivers/mailbox/Kconfig @@ -21,6 +21,12 @@ config IMX_MBOX help Mailbox implementation for i.MX Messaging Unit (MU). +config PHYTIUM_MBOX + tristate "Phytium SoC Mailbox Support" + depends on ARCH_PHYTIUM || COMPILE_TEST + help + This driver provides the support for the Phytium mailbox controller. + config PLATFORM_MHU tristate "Platform MHU Mailbox" depends on OF diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile index c818b5d011aef28ae2e4632c3e93ff05f446590d..de3cbe3ffa44e7969a82815273fff688ab147c3b 100644 --- a/drivers/mailbox/Makefile +++ b/drivers/mailbox/Makefile @@ -9,6 +9,8 @@ obj-$(CONFIG_ARM_MHU) += arm_mhu.o obj-$(CONFIG_IMX_MBOX) += imx-mailbox.o +obj-$(CONFIG_PHYTIUM_MBOX) += phytium_mailbox.o + obj-$(CONFIG_PLATFORM_MHU) += platform_mhu.o obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c index 8ab077ff58f4a8a58cebf7a8d3aa1a93417b088e..96bcabfebc23937f477acaa3a9ce835a0b0ce36f 100644 --- a/drivers/mailbox/bcm-flexrm-mailbox.c +++ b/drivers/mailbox/bcm-flexrm-mailbox.c @@ -1396,9 +1396,9 @@ static void flexrm_shutdown(struct mbox_chan *chan) /* Clear ring flush state */ timeout = 1000; /* timeout of 1s */ - writel_relaxed(0x0, ring + RING_CONTROL); + writel_relaxed(0x0, ring->regs + RING_CONTROL); do { - if (!(readl_relaxed(ring + RING_FLUSH_DONE) & + if (!(readl_relaxed(ring->regs + RING_FLUSH_DONE) & FLUSH_DONE_MASK)) break; mdelay(1); diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c index 363d35d5e49dcb22bec03bf748536dee93b301d8..2f47023cab2b226adaf0526529efa0f01d9ae367 100644 --- a/drivers/mailbox/imx-mailbox.c +++ b/drivers/mailbox/imx-mailbox.c @@ -214,8 +214,10 @@ static void imx_mu_shutdown(struct mbox_chan *chan) struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox); struct imx_mu_con_priv *cp = chan->con_priv; - if (cp->type == IMX_MU_TYPE_TXDB) + if (cp->type == IMX_MU_TYPE_TXDB) { tasklet_kill(&cp->txdb_tasklet); + return; + } imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_TIEn(cp->idx) | IMX_MU_xCR_RIEn(cp->idx)); diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c index 58bfafc34bc46978121d81c63662677392499f20..129b3656c453a68eb295b859baac6a26eac86a65 100644 --- a/drivers/mailbox/mailbox-test.c +++ b/drivers/mailbox/mailbox-test.c @@ -363,22 +363,24 @@ static int mbox_test_probe(struct platform_device *pdev) /* It's okay for MMIO to be NULL */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - size = resource_size(res); tdev->tx_mmio = devm_ioremap_resource(&pdev->dev, res); - if (PTR_ERR(tdev->tx_mmio) == -EBUSY) + if (PTR_ERR(tdev->tx_mmio) == -EBUSY) { /* if reserved area in SRAM, try just ioremap */ + size = resource_size(res); tdev->tx_mmio = devm_ioremap(&pdev->dev, res->start, size); - else if (IS_ERR(tdev->tx_mmio)) + } else if (IS_ERR(tdev->tx_mmio)) { tdev->tx_mmio = NULL; + } /* If specified, second reg entry is Rx MMIO */ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - size = resource_size(res); tdev->rx_mmio = devm_ioremap_resource(&pdev->dev, res); - if (PTR_ERR(tdev->rx_mmio) == -EBUSY) + if (PTR_ERR(tdev->rx_mmio) == -EBUSY) { + size = resource_size(res); tdev->rx_mmio = devm_ioremap(&pdev->dev, res->start, size); - else if (IS_ERR(tdev->rx_mmio)) + } else if (IS_ERR(tdev->rx_mmio)) { tdev->rx_mmio = tdev->tx_mmio; + } tdev->tx_channel = mbox_test_request_channel(pdev, "tx"); tdev->rx_channel = mbox_test_request_channel(pdev, "rx"); diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c index 674b35f402f5e939833bdbde4a39399689d653f9..055c90b8253cbe37749cb94338ef45b01574056b 100644 --- a/drivers/mailbox/mailbox.c +++ b/drivers/mailbox/mailbox.c @@ -391,11 +391,13 @@ struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl, of_property_for_each_string(np, "mbox-names", prop, mbox_name) { if (!strncmp(name, mbox_name, strlen(name))) - break; + return mbox_request_channel(cl, index); index++; } - return mbox_request_channel(cl, index); + dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n", + __func__, name); + return ERR_PTR(-EINVAL); } EXPORT_SYMBOL_GPL(mbox_request_channel_byname); diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c index 311e91b1a14f3f24ac2f861d882d38a026281b7b..08a0a3517138e6c78943f7eac9454178726ea9b5 100644 --- a/drivers/mailbox/pcc.c +++ b/drivers/mailbox/pcc.c @@ -382,7 +382,7 @@ static const struct mbox_chan_ops pcc_chan_ops = { * * This gets called for each entry in the PCC table. */ -static int parse_pcc_subspace(struct acpi_subtable_header *header, +static int parse_pcc_subspace(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_pcct_subspace *ss = (struct acpi_pcct_subspace *) header; @@ -461,8 +461,11 @@ static int __init acpi_pcc_probe(void) count = acpi_table_parse_entries_array(ACPI_SIG_PCCT, sizeof(struct acpi_table_pcct), proc, ACPI_PCCT_TYPE_RESERVED, MAX_PCC_SUBSPACES); - if (count == 0 || count > MAX_PCC_SUBSPACES) { - pr_warn("Invalid PCCT: %d PCC subspaces\n", count); + if (count <= 0 || count > MAX_PCC_SUBSPACES) { + if (count < 0) + pr_warn("Error parsing PCC subspaces from PCCT\n"); + else + pr_warn("Invalid PCCT: %d PCC subspaces\n", count); return -EINVAL; } diff --git a/drivers/mailbox/phytium_mailbox.c b/drivers/mailbox/phytium_mailbox.c new file mode 100644 index 0000000000000000000000000000000000000000..b6dd69582f279afeda02ac8024cf02fd338d8a02 --- /dev/null +++ b/drivers/mailbox/phytium_mailbox.c @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium SoC mailbox driver + * + * Copyright (c) 2020 Phytium Corporation. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define INTR_STAT 0x0 +#define INTR_SET 0x8 +#define INTR_CLR 0x10 + +#define TX_REG 0x100 + +#define NR_CHANS 1 + +struct phytium_mbox_link { + unsigned int irq; + void __iomem *tx_reg; + void __iomem *rx_reg; +}; + +struct phytium_mbox { + void __iomem *base; + struct phytium_mbox_link mlink; + struct mbox_chan chan; + struct mbox_controller mbox; +}; + +static irqreturn_t phytium_mbox_rx_irq(int irq, void *ch) +{ + struct mbox_chan *chan = ch; + struct phytium_mbox_link *mlink = chan->con_priv; + u32 val; + + val = readl_relaxed(mlink->rx_reg + INTR_STAT); + if (!val) + return IRQ_NONE; + + mbox_chan_received_data(chan, (void *)&val); + + writel_relaxed(val, mlink->rx_reg + INTR_CLR); + + return IRQ_HANDLED; +} + +static int phytium_mbox_send_data(struct mbox_chan *chan, void *data) +{ + struct phytium_mbox_link *mlink = chan->con_priv; + u32 *arg = data; + + writel_relaxed(*arg, mlink->tx_reg + INTR_SET); + + return 0; +} + +static int phytium_mbox_startup(struct mbox_chan *chan) +{ + struct phytium_mbox_link *mlink = chan->con_priv; + u32 val; + int ret; + + val = readl_relaxed(mlink->tx_reg + INTR_STAT); + writel_relaxed(val, mlink->tx_reg + INTR_CLR); + + ret = request_irq(mlink->irq, phytium_mbox_rx_irq, + IRQF_SHARED, "phytium_mbox_link", chan); + if (ret) { + dev_err(chan->mbox->dev, + "Unable to acquire IRQ %d\n", mlink->irq); + } + + return ret; +} + +static void phytium_mbox_shutdown(struct mbox_chan *chan) +{ + struct phytium_mbox_link *mlink = chan->con_priv; + + free_irq(mlink->irq, chan); +} + +static bool phytium_mbox_last_tx_done(struct mbox_chan *chan) +{ + struct phytium_mbox_link *mlink = chan->con_priv; + u32 val = readl_relaxed(mlink->tx_reg + INTR_STAT); + + return (val == (u32)(1U << 31)); +} + +static const struct mbox_chan_ops phytium_mbox_ops = { + .send_data = phytium_mbox_send_data, + .startup = phytium_mbox_startup, + .shutdown = phytium_mbox_shutdown, + .last_tx_done = phytium_mbox_last_tx_done, +}; + +static const struct acpi_device_id phytium_mbox_acpi_match[] = { + { "PHYT0009", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, phytium_mbox_acpi_match); + +static const struct of_device_id phytium_mbox_of_match[] = { + { .compatible = "phytium,mbox", }, + { }, +}; +MODULE_DEVICE_TABLE(of, phytium_mbox_of_match); + +static int phytium_mbox_probe(struct platform_device *pdev) +{ + struct phytium_mbox *mbox; + struct resource *res; + int err, irq; + + /* Allocate memory for device */ + mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox), GFP_KERNEL); + if (!mbox) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + mbox->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(mbox->base)) { + dev_err(&pdev->dev, "ioremap base failed\n"); + return PTR_ERR(mbox->base); + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "cannot obtain irq\n"); + return irq; + } + + mbox->chan.con_priv = &mbox->mlink; + mbox->mlink.irq = irq; + mbox->mlink.rx_reg = mbox->base; + mbox->mlink.tx_reg = mbox->mlink.rx_reg + TX_REG; + + mbox->mbox.dev = &pdev->dev; + mbox->mbox.chans = &mbox->chan; + mbox->mbox.num_chans = NR_CHANS; + mbox->mbox.ops = &phytium_mbox_ops; + mbox->mbox.txdone_irq = false; + mbox->mbox.txdone_poll = true; + mbox->mbox.txpoll_period = 1; + + platform_set_drvdata(pdev, mbox); + + err = mbox_controller_register(&mbox->mbox); + if (err) { + dev_err(&pdev->dev, "Failed to register mailboxes %d\n", err); + goto fail; + } + + dev_info(&pdev->dev, "Phytium SoC Mailbox registered\n"); +fail: + return err; +} + +static int phytium_mbox_remove(struct platform_device *pdev) +{ + struct phytium_mbox *mbox = platform_get_drvdata(pdev); + + mbox_controller_unregister(&mbox->mbox); + + return 0; +} + +static struct platform_driver phytium_mbox_driver = { + .probe = phytium_mbox_probe, + .remove = phytium_mbox_remove, + .driver = { + .name = "phytium-mbox", + .of_match_table = of_match_ptr(phytium_mbox_of_match), + .acpi_match_table = ACPI_PTR(phytium_mbox_acpi_match), + }, +}; + +module_platform_driver(phytium_mbox_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Phytium SoC Mailbox Driver"); +MODULE_AUTHOR("Chen Baozi "); +MODULE_AUTHOR("Chen Ziqiang "); diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c index 333ed4a9d4b8fc62d14fdc9abc8db0242a80026c..5255dcb551a788a3ca4802e3c9cfc13fe60f83c6 100644 --- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c +++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c @@ -55,7 +55,6 @@ static const struct mbox_chan_ops qcom_apcs_ipc_ops = { static int qcom_apcs_ipc_probe(struct platform_device *pdev) { - struct device_node *np = pdev->dev.of_node; struct qcom_apcs_ipc *apcs; struct regmap *regmap; struct resource *res; @@ -63,6 +62,11 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev) void __iomem *base; unsigned long i; int ret; + const struct of_device_id apcs_clk_match_table[] = { + { .compatible = "qcom,msm8916-apcs-kpss-global", }, + { .compatible = "qcom,qcs404-apcs-apps-global", }, + {} + }; apcs = devm_kzalloc(&pdev->dev, sizeof(*apcs), GFP_KERNEL); if (!apcs) @@ -97,7 +101,7 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev) return ret; } - if (of_device_is_compatible(np, "qcom,msm8916-apcs-kpss-global")) { + if (of_match_device(apcs_clk_match_table, &pdev->dev)) { apcs->clk = platform_device_register_data(&pdev->dev, "qcom-apcs-msm8916-clk", -1, NULL, 0); diff --git a/drivers/mailbox/stm32-ipcc.c b/drivers/mailbox/stm32-ipcc.c index 533b0da5235d43b52e1f75ab68537ff5f7ab1e35..e31322225e93934d6382abdba527c62093c9e032 100644 --- a/drivers/mailbox/stm32-ipcc.c +++ b/drivers/mailbox/stm32-ipcc.c @@ -8,9 +8,9 @@ #include #include #include +#include #include #include -#include #include #include @@ -50,6 +50,7 @@ struct stm32_ipcc { void __iomem *reg_base; void __iomem *reg_proc; struct clk *clk; + spinlock_t lock; /* protect access to IPCC registers */ int irqs[IPCC_IRQ_NUM]; int wkp; u32 proc_id; @@ -58,14 +59,24 @@ struct stm32_ipcc { u32 xmr; }; -static inline void stm32_ipcc_set_bits(void __iomem *reg, u32 mask) +static inline void stm32_ipcc_set_bits(spinlock_t *lock, void __iomem *reg, + u32 mask) { + unsigned long flags; + + spin_lock_irqsave(lock, flags); writel_relaxed(readl_relaxed(reg) | mask, reg); + spin_unlock_irqrestore(lock, flags); } -static inline void stm32_ipcc_clr_bits(void __iomem *reg, u32 mask) +static inline void stm32_ipcc_clr_bits(spinlock_t *lock, void __iomem *reg, + u32 mask) { + unsigned long flags; + + spin_lock_irqsave(lock, flags); writel_relaxed(readl_relaxed(reg) & ~mask, reg); + spin_unlock_irqrestore(lock, flags); } static irqreturn_t stm32_ipcc_rx_irq(int irq, void *data) @@ -92,7 +103,7 @@ static irqreturn_t stm32_ipcc_rx_irq(int irq, void *data) mbox_chan_received_data(&ipcc->controller.chans[chan], NULL); - stm32_ipcc_set_bits(ipcc->reg_proc + IPCC_XSCR, + stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XSCR, RX_BIT_CHAN(chan)); ret = IRQ_HANDLED; @@ -121,7 +132,7 @@ static irqreturn_t stm32_ipcc_tx_irq(int irq, void *data) dev_dbg(dev, "%s: chan:%d tx\n", __func__, chan); /* mask 'tx channel free' interrupt */ - stm32_ipcc_set_bits(ipcc->reg_proc + IPCC_XMR, + stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR, TX_BIT_CHAN(chan)); mbox_chan_txdone(&ipcc->controller.chans[chan], 0); @@ -141,10 +152,12 @@ static int stm32_ipcc_send_data(struct mbox_chan *link, void *data) dev_dbg(ipcc->controller.dev, "%s: chan:%d\n", __func__, chan); /* set channel n occupied */ - stm32_ipcc_set_bits(ipcc->reg_proc + IPCC_XSCR, TX_BIT_CHAN(chan)); + stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XSCR, + TX_BIT_CHAN(chan)); /* unmask 'tx channel free' interrupt */ - stm32_ipcc_clr_bits(ipcc->reg_proc + IPCC_XMR, TX_BIT_CHAN(chan)); + stm32_ipcc_clr_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR, + TX_BIT_CHAN(chan)); return 0; } @@ -163,7 +176,8 @@ static int stm32_ipcc_startup(struct mbox_chan *link) } /* unmask 'rx channel occupied' interrupt */ - stm32_ipcc_clr_bits(ipcc->reg_proc + IPCC_XMR, RX_BIT_CHAN(chan)); + stm32_ipcc_clr_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR, + RX_BIT_CHAN(chan)); return 0; } @@ -175,7 +189,7 @@ static void stm32_ipcc_shutdown(struct mbox_chan *link) controller); /* mask rx/tx interrupt */ - stm32_ipcc_set_bits(ipcc->reg_proc + IPCC_XMR, + stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR, RX_BIT_CHAN(chan) | TX_BIT_CHAN(chan)); clk_disable_unprepare(ipcc->clk); @@ -208,6 +222,8 @@ static int stm32_ipcc_probe(struct platform_device *pdev) if (!ipcc) return -ENOMEM; + spin_lock_init(&ipcc->lock); + /* proc_id */ if (of_property_read_u32(np, "st,proc-id", &ipcc->proc_id)) { dev_err(dev, "Missing st,proc-id\n"); @@ -240,9 +256,11 @@ static int stm32_ipcc_probe(struct platform_device *pdev) /* irq */ for (i = 0; i < IPCC_IRQ_NUM; i++) { - ipcc->irqs[i] = of_irq_get_byname(dev->of_node, irq_name[i]); + ipcc->irqs[i] = platform_get_irq_byname(pdev, irq_name[i]); if (ipcc->irqs[i] < 0) { - dev_err(dev, "no IRQ specified %s\n", irq_name[i]); + if (ipcc->irqs[i] != -EPROBE_DEFER) + dev_err(dev, "no IRQ specified %s\n", + irq_name[i]); ret = ipcc->irqs[i]; goto err_clk; } @@ -257,15 +275,17 @@ static int stm32_ipcc_probe(struct platform_device *pdev) } /* mask and enable rx/tx irq */ - stm32_ipcc_set_bits(ipcc->reg_proc + IPCC_XMR, + stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR, RX_BIT_MASK | TX_BIT_MASK); - stm32_ipcc_set_bits(ipcc->reg_proc + IPCC_XCR, XCR_RXOIE | XCR_TXOIE); + stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XCR, + XCR_RXOIE | XCR_TXOIE); /* wakeup */ if (of_property_read_bool(np, "wakeup-source")) { - ipcc->wkp = of_irq_get_byname(dev->of_node, "wakeup"); + ipcc->wkp = platform_get_irq_byname(pdev, "wakeup"); if (ipcc->wkp < 0) { - dev_err(dev, "could not get wakeup IRQ\n"); + if (ipcc->wkp != -EPROBE_DEFER) + dev_err(dev, "could not get wakeup IRQ\n"); ret = ipcc->wkp; goto err_clk; } diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c index bb5c5692dedc5af698e1c6b0ba3bea9c38efffab..4a2002ba2e23f41dbae127043c33ba0799319ee7 100644 --- a/drivers/mcb/mcb-core.c +++ b/drivers/mcb/mcb-core.c @@ -280,8 +280,8 @@ struct mcb_bus *mcb_alloc_bus(struct device *carrier) bus_nr = ida_simple_get(&mcb_ida, 0, 0, GFP_KERNEL); if (bus_nr < 0) { - rc = bus_nr; - goto err_free; + kfree(bus); + return ERR_PTR(bus_nr); } bus->bus_nr = bus_nr; @@ -296,12 +296,12 @@ struct mcb_bus *mcb_alloc_bus(struct device *carrier) dev_set_name(&bus->dev, "mcb:%d", bus_nr); rc = device_add(&bus->dev); if (rc) - goto err_free; + goto err_put; return bus; -err_free: - put_device(carrier); - kfree(bus); + +err_put: + put_device(&bus->dev); return ERR_PTR(rc); } EXPORT_SYMBOL_GPL(mcb_alloc_bus); @@ -390,17 +390,13 @@ EXPORT_SYMBOL_GPL(mcb_free_dev); static int __mcb_bus_add_devices(struct device *dev, void *data) { - struct mcb_device *mdev = to_mcb_device(dev); int retval; - if (mdev->is_added) - return 0; - retval = device_attach(dev); - if (retval < 0) + if (retval < 0) { dev_err(dev, "Error adding device (%d)\n", retval); - - mdev->is_added = true; + return retval; + } return 0; } diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c index 7369bda3442fbc10197a00b35553cbac3c365adf..d5bfeef602d8dbdac679efcdbea4037299840012 100644 --- a/drivers/mcb/mcb-parse.c +++ b/drivers/mcb/mcb-parse.c @@ -98,8 +98,6 @@ static int chameleon_parse_gdd(struct mcb_bus *bus, mdev->mem.end = mdev->mem.start + size - 1; mdev->mem.flags = IORESOURCE_MEM; - mdev->is_added = false; - ret = mcb_device_register(bus, mdev); if (ret < 0) goto err; diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 8b8c123cae66fb5d6c83d709ced6d9cca1d4d157..6d052c11d66d3164bf0a807dfdb5966ee9d402fe 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -479,6 +479,18 @@ config DM_VERITY If unsure, say N. +config DM_VERITY_VERIFY_ROOTHASH_SIG + def_bool n + bool "Verity data device root hash signature verification support" + depends on DM_VERITY + select SYSTEM_DATA_VERIFICATION + help + Add ability for dm-verity device to be validated if the + pre-generated tree of cryptographic checksums passed has a pkcs#7 + signature file that can validate the roothash of the tree. + + If unsure, say N. + config DM_VERITY_FEC bool "Verity forward error correction support" depends on DM_VERITY diff --git a/drivers/md/Makefile b/drivers/md/Makefile index 822f4e8753bc4b197b93a90df936bd0eeeff464a..32d6d6e4a756e7a5d59b19e8633761b3c7f5f63f 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile @@ -76,3 +76,7 @@ endif ifeq ($(CONFIG_DM_VERITY_FEC),y) dm-verity-objs += dm-verity-fec.o endif + +ifeq ($(CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG),y) +dm-verity-objs += dm-verity-verify-sig.o +endif diff --git a/drivers/md/bcache/Makefile b/drivers/md/bcache/Makefile index d26b35195825223ff7d45d9cabbcb4ab88761f62..41e323f7e1815ca5d954cf3ed1ab7b1995ff9eb4 100644 --- a/drivers/md/bcache/Makefile +++ b/drivers/md/bcache/Makefile @@ -3,7 +3,7 @@ obj-$(CONFIG_BCACHE) += bcache.o bcache-y := alloc.o bset.o btree.o closure.o debug.o extents.o\ - io.o journal.o movinggc.o request.o stats.o super.o sysfs.o trace.o\ + io.o journal.o movinggc.o request.o stats.o acache.o super.o sysfs.o trace.o\ util.o writeback.o CFLAGS_request.o += -Iblock diff --git a/drivers/md/bcache/acache.c b/drivers/md/bcache/acache.c new file mode 100644 index 0000000000000000000000000000000000000000..e87c53d4d6091c284ddf6756134543ba47909061 --- /dev/null +++ b/drivers/md/bcache/acache.c @@ -0,0 +1,591 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "acache.h" +#include "request.h" + +#include + +#define DEV_NAME "acache" + +int acache_dev_size = (1024 * 4096 + 4096); + +module_param_named(acache_size, acache_dev_size, int, 0444); +MODULE_PARM_DESC(acache_size, "size of ring buffer for size in byte"); + +int acache_prefetch_workers = 1000; + +module_param_named(prefetch_workers, acache_prefetch_workers, int, 0444); +MODULE_PARM_DESC(prefetch_workers, "num of workers for processing prefetch requests"); + +struct inflight_list_head { + struct list_head entry; + spinlock_t io_lock; + bool initialized; +}; + +struct prefetch_worker { + struct acache_info s; + struct work_struct work; + struct list_head list; +}; + +struct acache_device { + bool initialized; + + dev_t devno; + struct cdev cdev; + struct class *class; + struct mem_reg *mem_regionp; + + struct acache_info *readbuf; + struct acache_info *writebuf; + + struct acache_circ *acache_info_circ; + + struct inflight_list_head inflight_list; + + struct workqueue_struct *wq; + struct prefetch_worker *prefetch_workers; + struct list_head prefetch_workers_free; + spinlock_t prefetch_workers_free_list_lock; +} adev; + +#define MAX_TRANSFER_SIZE (1024 * 1024) + +static atomic_t acache_opened_dev = ATOMIC_INIT(0); +static struct acache_metadata metadata; + + +int acache_open(struct inode *inode, struct file *filp) +{ + struct mem_reg *dev; + + int minor = MINOR(inode->i_rdev); + + if (minor >= ACACHE_NR_DEVS) + return -ENODEV; + if (atomic_xchg(&acache_opened_dev, 1)) + return -EPERM; + + dev = &adev.mem_regionp[minor]; + + filp->private_data = dev; + + return 0; +} + +int acache_release(struct inode *inode, struct file *filp) +{ + atomic_dec(&acache_opened_dev); + return 0; +} + +ssize_t read_circ_slice(struct acache_circ *circ, struct acache_info *buf, + size_t size) +{ + unsigned long first, todo, flags; + + spin_lock_irqsave(&circ->lock, flags); + + todo = CIRC_CNT(circ->head, circ->tail, circ->size); + if (todo == 0) { + spin_unlock_irqrestore(&circ->lock, flags); + return 0; + } + if (todo > size / sizeof(struct acache_info)) + todo = size / sizeof(struct acache_info); + + first = CIRC_CNT_TO_END(circ->head, circ->tail, circ->size); + if (first > todo) + first = todo; + + memcpy(buf, circ->data + circ->tail, first * sizeof(struct acache_info)); + if (first < todo) + memcpy(buf + first, circ->data, + (todo - first) * sizeof(struct acache_info)); + circ->tail = (circ->tail + todo) & (circ->size - 1); + + spin_unlock_irqrestore(&circ->lock, flags); + return todo * sizeof(struct acache_info); +} + +static ssize_t acache_read(struct file *filp, char __user *buf, + size_t size, loff_t *ppos) +{ + long ret, cut; + + if (metadata.conntype != ACACHE_READWRITE_CONN) + return -EINVAL; + + if (size > MAX_TRANSFER_SIZE) + size = MAX_TRANSFER_SIZE; + + ret = read_circ_slice(adev.acache_info_circ, adev.readbuf, size); + if (ret <= 0) + return ret; + + cut = copy_to_user(buf, adev.readbuf, size); + return ret - cut; +} + +int process_one_request(struct acache_info *item); +static void prefetch_worker_func(struct work_struct *work) +{ + struct prefetch_worker *sw = + container_of(work, struct prefetch_worker, work); + + process_one_request(&sw->s); + spin_lock(&adev.prefetch_workers_free_list_lock); + list_add_tail(&sw->list, &adev.prefetch_workers_free); + spin_unlock(&adev.prefetch_workers_free_list_lock); +} + +static int queue_prefetch_item(struct acache_info *s) +{ + struct prefetch_worker *sw; + + spin_lock(&adev.prefetch_workers_free_list_lock); + sw = list_first_entry_or_null(&adev.prefetch_workers_free, + struct prefetch_worker, list); + if (!sw) { + spin_unlock(&adev.prefetch_workers_free_list_lock); + return -1; + } + list_del_init(&sw->list); + spin_unlock(&adev.prefetch_workers_free_list_lock); + + memcpy(&sw->s, s, sizeof(struct acache_info)); + INIT_WORK(&sw->work, prefetch_worker_func); + queue_work(adev.wq, &sw->work); + return 0; +} + +static ssize_t acache_write(struct file *filp, const char __user *buf, + size_t size, loff_t *ppos) +{ + long cut; + int i; + + if (metadata.conntype != ACACHE_READWRITE_CONN) + return -EINVAL; + + if (size > MAX_TRANSFER_SIZE) + size = MAX_TRANSFER_SIZE; + + cut = copy_from_user(adev.writebuf, buf, size); + for (i = 0; i < (size - cut) / sizeof(struct acache_info); i++) { + if (queue_prefetch_item(adev.writebuf + i)) + break; + } + return i * sizeof(struct acache_info); +} + +static long acache_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + switch (cmd) { + case ACACHE_GET_METADATA: + return copy_to_user((struct acache_metadata __user *)arg, + &metadata, sizeof(struct acache_metadata)); + default: + return -EINVAL; + } +} + +static const struct file_operations acache_fops = { + .owner = THIS_MODULE, + .read = acache_read, + .write = acache_write, + .open = acache_open, + .release = acache_release, + .unlocked_ioctl = acache_ioctl, +}; + +void save_circ_item(struct acache_info *data) +{ + unsigned long flags; + struct acache_circ *circ = adev.acache_info_circ; + + spin_lock_irqsave(&circ->lock, flags); + if (CIRC_SPACE(circ->head, circ->tail, circ->size) >= 1) { + memcpy(&circ->data[circ->head], data, sizeof(struct acache_info)); + circ->head = (circ->head + 1) & (circ->size - 1); + } else { + pr_debug("ringbuffer is full; discard new request."); + } + spin_unlock_irqrestore(&circ->lock, flags); +} + +void init_acache_circ(struct acache_circ **circ, void *startaddr) +{ + *circ = (struct acache_circ *)startaddr; + (*circ)->head = 0; + (*circ)->tail = 0; + (*circ)->size = ACACHE_CIRC_SIZE; + spin_lock_init(&(*circ)->lock); +} + +static void acache_free_mem(void) +{ + int i; + + for (i = 0; i < ACACHE_NR_DEVS; i++) + vfree(adev.mem_regionp[i].data); + + if (adev.readbuf) { + vfree(adev.readbuf); + adev.readbuf = NULL; + } + if (adev.writebuf) { + vfree(adev.writebuf); + adev.writebuf = NULL; + } + + kfree(adev.prefetch_workers); + adev.prefetch_workers = NULL; +} + +int acache_prefetch_init(struct acache_device *adev) +{ + int i; + + if (acache_prefetch_workers <= 0) { + pr_err("acache_dev_size should not be less than zero"); + return -1; + } + adev->prefetch_workers = kmalloc_array(acache_prefetch_workers, + sizeof(struct prefetch_worker), + GFP_KERNEL); + if (!adev->prefetch_workers) + goto fail_prefetch_workers_alloc; + + INIT_LIST_HEAD(&adev->prefetch_workers_free); + spin_lock_init(&adev->prefetch_workers_free_list_lock); + for (i = 0; i < acache_prefetch_workers; i++) { + spin_lock(&adev->prefetch_workers_free_list_lock); + list_add_tail(&adev->prefetch_workers[i].list, + &adev->prefetch_workers_free); + spin_unlock(&adev->prefetch_workers_free_list_lock); + } + + adev->wq = alloc_workqueue("acache_prefetch", WQ_MEM_RECLAIM, 0); + if (!adev->wq) + goto fail_workqueue_alloc; + + return 0; + +fail_workqueue_alloc: + kfree(adev->prefetch_workers); + adev->prefetch_workers = NULL; +fail_prefetch_workers_alloc: + if (adev->wq) + destroy_workqueue(adev->wq); + return -1; +} + +int acache_dev_init(void) +{ + int ret; + int i; + int major; + struct device *dev; + + inflight_list_ops.init(); + major = alloc_chrdev_region(&adev.devno, 0, ACACHE_NR_DEVS, DEV_NAME); + if (major < 0) { + pr_err("failed to allocate chrdev region: %d", major); + return major; + goto fail_allocdev; + } + + adev.class = class_create(THIS_MODULE, DEV_NAME); + if (IS_ERR(adev.class)) { + pr_err("failed to create acache class"); + ret = -1; + goto fail_class; + } + + if (acache_dev_size < PAGE_SIZE) { + pr_err("acache_dev_size should not be less than PAGE_SIZE"); + ret = -1; + goto fail_dev_add; + } + metadata.devsize = acache_dev_size; + metadata.magic = ACACHE_MAGIC; + metadata.conntype = ACACHE_READWRITE_CONN; + cdev_init(&adev.cdev, &acache_fops); + adev.cdev.owner = THIS_MODULE; + + ret = cdev_add(&adev.cdev, adev.devno, ACACHE_NR_DEVS); + if (ret < 0) { + pr_err("failed to add cdev"); + goto fail_dev_add; + } + + dev = device_create(adev.class, NULL, adev.devno, NULL, DEV_NAME); + if (IS_ERR(dev)) { + pr_err("Could not create device"); + ret = -1; + goto fail_device; + } + + adev.readbuf = vmalloc(MAX_TRANSFER_SIZE); + adev.writebuf = vmalloc(MAX_TRANSFER_SIZE); + if (!adev.readbuf || !adev.writebuf) { + ret = -ENOMEM; + goto fail_malloc; + } + + adev.initialized = true; + adev.mem_regionp = + kmalloc_array(ACACHE_NR_DEVS, sizeof(struct mem_reg), GFP_KERNEL); + if (!adev.mem_regionp) { + ret = -ENOMEM; + goto fail_malloc; + } + memset(adev.mem_regionp, 0, sizeof(struct mem_reg) * ACACHE_NR_DEVS); + + for (i = 0; i < ACACHE_NR_DEVS; i++) { + adev.mem_regionp[i].size = ACACHE_DEV_SIZE; + adev.mem_regionp[i].data = vmalloc(ACACHE_DEV_SIZE); + if (!adev.mem_regionp[i].data) { + ret = -ENOMEM; + goto fail_memregion_data_malloc; + } + memset(adev.mem_regionp[i].data, 0, ACACHE_DEV_SIZE); + } + + init_acache_circ(&adev.acache_info_circ, adev.mem_regionp[0].data); + if (acache_prefetch_init(&adev)) + goto fail_prefetch_init; + + return 0; + +fail_prefetch_init: +fail_memregion_data_malloc: + acache_free_mem(); +fail_malloc: + device_destroy(adev.class, adev.devno); +fail_device: + cdev_del(&adev.cdev); +fail_dev_add: + class_destroy(adev.class); +fail_class: + unregister_chrdev_region(adev.devno, ACACHE_NR_DEVS); +fail_allocdev: + inflight_list_ops.exit(); + return ret; +} + +void acache_dev_exit(void) +{ + if (!adev.initialized) + return; + + if (adev.wq) { + flush_workqueue(adev.wq); + destroy_workqueue(adev.wq); + } + device_destroy(adev.class, adev.devno); + cdev_del(&adev.cdev); + acache_free_mem(); + kfree(adev.mem_regionp); + unregister_chrdev_region(adev.devno, ACACHE_NR_DEVS); + class_destroy(adev.class); + inflight_list_ops.exit(); + kfree(adev.prefetch_workers); +} + +static struct search *__inflight_list_lookup_locked(struct search *s) +{ + struct search *iter; + struct bio *bio, *sbio; + + if (!adev.inflight_list.initialized) + return NULL; + sbio = &s->bio.bio; + list_for_each_entry(iter, &adev.inflight_list.entry, list_node) { + bio = &iter->bio.bio; + if (sbio->bi_disk == bio->bi_disk && + sbio->bi_iter.bi_sector < bio_end_sector(bio) && + bio_end_sector(sbio) > bio->bi_iter.bi_sector) { + return iter; + } + } + return NULL; +} + +static void inflight_list_init(void) +{ + INIT_LIST_HEAD(&adev.inflight_list.entry); + spin_lock_init(&adev.inflight_list.io_lock); + adev.inflight_list.initialized = true; +} + +static void inflight_list_exit(void) +{ + if (!list_empty(&adev.inflight_list.entry)) + pr_err("existing with inflight list not empty"); +} + +static int inflight_list_insert(struct search *s) +{ + if (!adev.inflight_list.initialized) + return -1; + + init_waitqueue_head(&s->wqh); + spin_lock(&adev.inflight_list.io_lock); + list_add_tail(&s->list_node, &adev.inflight_list.entry); + spin_unlock(&adev.inflight_list.io_lock); + + trace_bcache_inflight_list_insert(s->d, s->orig_bio); + return 0; +} + +static int inflight_list_remove(struct search *s) +{ + if (!adev.inflight_list.initialized) + return -1; + + spin_lock(&adev.inflight_list.io_lock); + list_del_init(&s->list_node); + spin_unlock(&adev.inflight_list.io_lock); + + wake_up_interruptible_all(&s->wqh); + + trace_bcache_inflight_list_remove(s->d, s->orig_bio); + return 0; +} + +static bool inflight_list_wait(struct search *s) +{ + struct search *pfs = NULL; + struct cached_dev *dc; + DEFINE_WAIT(wqe); + + if (!adev.inflight_list.initialized) + return false; + + spin_lock(&adev.inflight_list.io_lock); + pfs = __inflight_list_lookup_locked(s); + if (pfs == NULL) { + spin_unlock(&adev.inflight_list.io_lock); + return false; + } + + dc = container_of(pfs->d, struct cached_dev, disk); + if (!dc->inflight_block_enable) { + spin_unlock(&adev.inflight_list.io_lock); + return true; + } + + prepare_to_wait(&pfs->wqh, &wqe, TASK_INTERRUPTIBLE); + + /* unlock here to ensure pfs not changed. */ + spin_unlock(&adev.inflight_list.io_lock); + schedule(); + + finish_wait(&pfs->wqh, &wqe); + + return true; +} + +const struct inflight_queue_ops inflight_list_ops = { + .init = inflight_list_init, + .exit = inflight_list_exit, + .insert = inflight_list_insert, + .remove = inflight_list_remove, + .wait = inflight_list_wait, +}; + +struct cached_dev *get_cached_device_by_dev(dev_t dev) +{ + struct cache_set *c, *tc; + struct cached_dev *dc, *t; + + list_for_each_entry_safe(c, tc, &bch_cache_sets, list) + list_for_each_entry_safe(dc, t, &c->cached_devs, list) + if (dc->bdev->bd_dev == dev && cached_dev_get(dc)) + return dc; + + return NULL; +} + +struct bio *get_bio_by_item(struct cached_dev *dc, struct acache_info *item) +{ + struct bio *bio; + uint64_t offset = item->offset + dc->sb.data_offset; + + if (get_capacity(dc->bdev->bd_disk) < offset + (item->length >> 9)) { + pr_err("prefetch area exceeds the capacity of disk(%d:%d), end: %llx, capacity: %lx", + MAJOR(dc->bdev->bd_dev), MINOR(dc->bdev->bd_dev), + offset + (item->length >> 9), + get_capacity(dc->bdev->bd_disk)); + return NULL; + } + + bio = bio_alloc_bioset(GFP_NOWAIT, DIV_ROUND_UP(item->length >> 9, PAGE_SECTORS), &dc->disk.bio_split); + if (!bio) { + bio = bio_alloc_bioset(GFP_NOWAIT, DIV_ROUND_UP(item->length >> 9, PAGE_SECTORS), NULL); + if (!bio) + return NULL; + } + + bio_set_dev(bio, dc->bdev); + bio->bi_iter.bi_sector = item->offset + dc->sb.data_offset; + bio->bi_iter.bi_size = (item->length >> 9) << 9; + + bch_bio_map(bio, NULL); + if (bch_bio_alloc_pages(bio, __GFP_NOWARN | GFP_NOIO)) + goto out_put; + + return bio; +out_put: + bio_put(bio); + return NULL; +} + +int process_one_request(struct acache_info *item) +{ + struct cached_dev *dc; + struct bio *cache_bio; + struct search *s; + + dc = get_cached_device_by_dev(item->dev); + if (dc == NULL) + return -1; + cache_bio = get_bio_by_item(dc, item); + if (cache_bio == NULL) { + pr_err("acache: failed to alloc bio for prefetch"); + goto put_dev; + } + + s = search_alloc(cache_bio, &dc->disk, true); + + trace_bcache_prefetch_request(&dc->disk, cache_bio); + generic_start_io_acct(cache_bio->bi_disk->queue, + bio_op(cache_bio), + bio_sectors(cache_bio), + &s->d->disk->part0); + + cached_dev_read(dc, s); + return 0; + +put_dev: + cached_dev_put(dc); + return -1; +} + diff --git a/drivers/md/bcache/acache.h b/drivers/md/bcache/acache.h new file mode 100644 index 0000000000000000000000000000000000000000..3c6453d0c4da9fab6dd16f8221a1ef4ec3bd9ef2 --- /dev/null +++ b/drivers/md/bcache/acache.h @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0 + +#ifndef _ACHACHE_INTERFACE_H_ +#define _ACHACHE_INTERFACE_H_ + +#define ACACHE_NR_DEVS 1 + +#define RING_SIZE + +#include "bcache.h" + +struct mem_reg { + char *data; + unsigned long size; +}; + +struct acache_info { + uint64_t length; + uint64_t offset; + uint64_t start_time; + dev_t dev; + int type; +}; + +enum acache_info_type { + ACACHE_INFO_READ = 0, + ACACHE_INFO_WRITE, + ACACHE_INFO_CACHE_INSERT, + ACACHE_INFO_LATENCY, +}; + +struct acache_circ { + spinlock_t lock; + int tail; + int head; + int size; + int item_size; + struct acache_info data[0]; +}; + +struct acache_metadata { + uint32_t magic; + uint32_t conntype; + uint32_t devsize; +}; + +#define ACACHE_DEV_SIZE acache_dev_size +#define ACACHE_MAGIC 2 + +enum acache_conn_types { + ACACHE_NO_CONN = 0, + ACACHE_READWRITE_CONN = 2, +}; + +#define ACACHE_CIRC_SIZE \ + ({int i = (ACACHE_DEV_SIZE - sizeof(struct acache_circ))/sizeof(struct acache_info); \ + int bits = 0; \ + while (i > 0) {i >>= 1; bits++; } \ + 1 << (bits - 1); }) + + +#define ACACHE_GET_METADATA _IOR('a', 1, struct acache_metadata) + +int acache_dev_init(void); +void acache_dev_exit(void); +struct acache_info *fetch_circ_item(struct acache_circ *circ); +void save_circ_item(struct acache_info *data); + +struct inflight_queue_ops { + void (*init)(void); + void (*exit)(void); + + int (*insert)(struct search *s); + int (*remove)(struct search *s); + bool (*wait)(struct search *s); +}; +extern const struct inflight_queue_ops inflight_list_ops; + +#endif diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 7a28232d868bd1b2c20081149d1ef6f3afec786a..a6ce0636f3237ebe69d4e5d38de9eae32b13fb38 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -327,10 +327,11 @@ static int bch_allocator_thread(void *arg) * possibly issue discards to them, then we add the bucket to * the free list: */ - while (!fifo_empty(&ca->free_inc)) { + while (1) { long bucket; - fifo_pop(&ca->free_inc, bucket); + if (!fifo_pop(&ca->free_inc, bucket)) + break; if (ca->discard) { mutex_unlock(&ca->set->bucket_lock); @@ -376,7 +377,10 @@ static int bch_allocator_thread(void *arg) if (!fifo_full(&ca->free_inc)) goto retry_invalidate; - bch_prio_write(ca); + if (bch_prio_write(ca, false) < 0) { + ca->invalidate_needs_gc = 1; + wake_up_gc(ca->set); + } } } out: @@ -392,6 +396,11 @@ long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait) struct bucket *b; long r; + + /* No allocation if CACHE_SET_IO_DISABLE bit is set */ + if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags))) + return -1; + /* fastpath */ if (fifo_pop(&ca->free[RESERVE_NONE], r) || fifo_pop(&ca->free[reserve], r)) @@ -462,6 +471,7 @@ void __bch_bucket_free(struct cache *ca, struct bucket *b) { SET_GC_MARK(b, 0); SET_GC_SECTORS_USED(b, 0); + SET_GC_DIRTY_SECTORS(b, 0); if (ca->set->avail_nbuckets < ca->set->nbuckets) { ca->set->avail_nbuckets++; @@ -483,6 +493,10 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, { int i; + /* No allocation if CACHE_SET_IO_DISABLE bit is set */ + if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags))) + return -1; + lockdep_assert_held(&c->bucket_lock); BUG_ON(!n || n > c->caches_loaded || n > 8); diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 954dad29e6e8fca910b0ebd24171591f2acd0831..76d5026c924a4afc86c7effafd455f7f24ccfd8d 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -176,8 +176,11 @@ * - updates to non leaf nodes just happen synchronously (see btree_split()). */ -#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__ +#ifdef pr_fmt +#undef pr_fmt +#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__ +#endif #include #include #include @@ -199,7 +202,7 @@ struct bucket { uint16_t prio; uint8_t gen; uint8_t last_gc; /* Most out of date gen in the btree */ - uint16_t gc_mark; /* Bitfield used by GC. See below for field */ + uint32_t gc_mark; /* Bitfield used by GC. See below for field */ }; /* @@ -215,6 +218,7 @@ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2); #define MAX_GC_SECTORS_USED (~(~0ULL << GC_SECTORS_USED_SIZE)) BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE); BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1); +BITMASK(GC_DIRTY_SECTORS, struct bucket, gc_mark, 16, GC_SECTORS_USED_SIZE); #include "journal.h" #include "stats.h" @@ -329,6 +333,9 @@ struct cached_dev { */ atomic_t has_dirty; +#define BCH_CACHE_READA_ALL 0 +#define BCH_CACHE_READA_META_ONLY 1 + unsigned int cache_readahead_policy; struct bch_ratelimit writeback_rate; struct delayed_work writeback_rate_update; @@ -372,6 +379,9 @@ struct cached_dev { unsigned char writeback_percent; unsigned int writeback_delay; + unsigned int inflight_block_enable; + unsigned int read_bypass; + uint64_t writeback_rate_target; int64_t writeback_rate_proportional; int64_t writeback_rate_integral; @@ -390,6 +400,28 @@ struct cached_dev { unsigned int offline_seconds; char backing_dev_name[BDEVNAME_SIZE]; + + /* Count the front and writeback io bandwidth per second */ + atomic_t writeback_sector_size; + atomic_t writeback_io_num; + atomic_t front_io_num; + unsigned int writeback_sector_size_per_sec; + unsigned int writeback_io_num_per_sec; + unsigned int front_io_num_per_sec; + struct timer_list io_stat_timer; + + unsigned int writeback_state; +#define WRITEBACK_DEFAULT 0 +#define WRITEBACK_QUICK 1 +#define WRITEBACK_SLOW 2 + + /* realize for token bucket */ + spinlock_t token_lock; + unsigned int max_sector_size; + unsigned int max_io_num; + unsigned int write_token_sector_size; + unsigned int write_token_io_num; + struct timer_list token_assign_timer; }; enum alloc_reserve { @@ -705,11 +737,14 @@ struct cache_set { unsigned int gc_always_rewrite:1; unsigned int shrinker_disabled:1; unsigned int copy_gc_enabled:1; + unsigned int gc_only_dirty_data:1; #define BUCKET_HASH_BITS 12 struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS]; - - DECLARE_HEAP(struct btree *, flush_btree); + unsigned int cutoff_writeback_sync; + bool traffic_policy_start; + bool force_write_through; + unsigned int gc_sectors; }; struct bbio { @@ -725,6 +760,29 @@ struct bbio { struct bio bio; }; +struct get_bcache_status { + unsigned int writeback_sector_size_per_sec; + unsigned int writeback_io_num_per_sec; + unsigned int front_io_num_per_sec; + uint64_t dirty_rate; + unsigned int available; +}; + +struct set_bcache_status { + unsigned int write_token_sector_size; + unsigned int write_token_io_num; + bool traffic_policy_start; + bool force_write_through; + bool copy_gc_enabled; + bool trigger_gc; + unsigned int writeback_state; + unsigned int gc_sectors; + unsigned int cutoff_writeback_sync; +}; +#define BCACHE_MAJOR 'B' +#define BCACHE_GET_WRITE_STATUS _IOR(BCACHE_MAJOR, 0x0, struct get_bcache_status) +#define BCACHE_SET_WRITE_STATUS _IOW(BCACHE_MAJOR, 0x1, struct set_bcache_status) + #define BTREE_PRIO USHRT_MAX #define INITIAL_PRIO 32768U @@ -961,7 +1019,7 @@ bool bch_cached_dev_error(struct cached_dev *dc); __printf(2, 3) bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...); -void bch_prio_write(struct cache *ca); +int bch_prio_write(struct cache *ca, bool wait); void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent); extern struct workqueue_struct *bcache_wq; diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index 8f07fa6e17394b5c0c54f7250f960f7361fd2111..268f1b6850840ad70711bca42276da11e6d5c18a 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -887,12 +887,22 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k, struct bset *i = bset_tree_last(b)->data; struct bkey *m, *prev = NULL; struct btree_iter iter; + struct bkey preceding_key_on_stack = ZERO_KEY; + struct bkey *preceding_key_p = &preceding_key_on_stack; BUG_ON(b->ops->is_extents && !KEY_SIZE(k)); - m = bch_btree_iter_init(b, &iter, b->ops->is_extents - ? PRECEDING_KEY(&START_KEY(k)) - : PRECEDING_KEY(k)); + /* + * If k has preceding key, preceding_key_p will be set to address + * of k's preceding key; otherwise preceding_key_p will be set + * to NULL inside preceding_key(). + */ + if (b->ops->is_extents) + preceding_key(&START_KEY(k), &preceding_key_p); + else + preceding_key(k, &preceding_key_p); + + m = bch_btree_iter_init(b, &iter, preceding_key_p); if (b->ops->insert_fixup(b, k, &iter, replace_key)) return status; diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h index bac76aabca6d7977abfecfaee4d884719f3699f4..c71365e7c1fac7fc87b051b10e8697edbe29f8c4 100644 --- a/drivers/md/bcache/bset.h +++ b/drivers/md/bcache/bset.h @@ -434,20 +434,26 @@ static inline bool bch_cut_back(const struct bkey *where, struct bkey *k) return __bch_cut_back(where, k); } -#define PRECEDING_KEY(_k) \ -({ \ - struct bkey *_ret = NULL; \ - \ - if (KEY_INODE(_k) || KEY_OFFSET(_k)) { \ - _ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0); \ - \ - if (!_ret->low) \ - _ret->high--; \ - _ret->low--; \ - } \ - \ - _ret; \ -}) +/* + * Pointer '*preceding_key_p' points to a memory object to store preceding + * key of k. If the preceding key does not exist, set '*preceding_key_p' to + * NULL. So the caller of preceding_key() needs to take care of memory + * which '*preceding_key_p' pointed to before calling preceding_key(). + * Currently the only caller of preceding_key() is bch_btree_insert_key(), + * and it points to an on-stack variable, so the memory release is handled + * by stackframe itself. + */ +static inline void preceding_key(struct bkey *k, struct bkey **preceding_key_p) +{ + if (KEY_INODE(k) || KEY_OFFSET(k)) { + (**preceding_key_p) = KEY(KEY_INODE(k), KEY_OFFSET(k), 0); + if (!(*preceding_key_p)->low) + (*preceding_key_p)->high--; + (*preceding_key_p)->low--; + } else { + (*preceding_key_p) = NULL; + } +} static inline bool bch_ptr_invalid(struct btree_keys *b, const struct bkey *k) { diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index e7d4817681f223f456330f9c63eb6e958f2b9a76..24e70ee342f0df8f9566f1758a44b4e2b6ae81bd 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -35,7 +35,7 @@ #include #include #include - +#include #include /* @@ -101,65 +101,6 @@ #define insert_lock(s, b) ((b)->level <= (s)->lock) -/* - * These macros are for recursing down the btree - they handle the details of - * locking and looking up nodes in the cache for you. They're best treated as - * mere syntax when reading code that uses them. - * - * op->lock determines whether we take a read or a write lock at a given depth. - * If you've got a read lock and find that you need a write lock (i.e. you're - * going to have to split), set op->lock and return -EINTR; btree_root() will - * call you again and you'll have the correct lock. - */ - -/** - * btree - recurse down the btree on a specified key - * @fn: function to call, which will be passed the child node - * @key: key to recurse on - * @b: parent btree node - * @op: pointer to struct btree_op - */ -#define btree(fn, key, b, op, ...) \ -({ \ - int _r, l = (b)->level - 1; \ - bool _w = l <= (op)->lock; \ - struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \ - _w, b); \ - if (!IS_ERR(_child)) { \ - _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \ - rw_unlock(_w, _child); \ - } else \ - _r = PTR_ERR(_child); \ - _r; \ -}) - -/** - * btree_root - call a function on the root of the btree - * @fn: function to call, which will be passed the child node - * @c: cache set - * @op: pointer to struct btree_op - */ -#define btree_root(fn, c, op, ...) \ -({ \ - int _r = -EINTR; \ - do { \ - struct btree *_b = (c)->root; \ - bool _w = insert_lock(op, _b); \ - rw_lock(_w, _b, _b->level); \ - if (_b == (c)->root && \ - _w == insert_lock(op, _b)) { \ - _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \ - } \ - rw_unlock(_w, _b); \ - bch_cannibalize_unlock(c); \ - if (_r == -EINTR) \ - schedule(); \ - } while (_r == -EINTR); \ - \ - finish_wait(&(c)->btree_cache_wait, &(op)->wait); \ - _r; \ -}) - static inline struct bset *write_block(struct btree *b) { return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c); @@ -649,7 +590,25 @@ static int mca_reap(struct btree *b, unsigned int min_order, bool flush) up(&b->io_mutex); } +retry: + /* + * BTREE_NODE_dirty might be cleared in btree_flush_btree() by + * __bch_btree_node_write(). To avoid an extra flush, acquire + * b->write_lock before checking BTREE_NODE_dirty bit. + */ mutex_lock(&b->write_lock); + /* + * If this btree node is selected in btree_flush_write() by journal + * code, delay and retry until the node is flushed by journal code + * and BTREE_NODE_journal_flush bit cleared by btree_flush_write(). + */ + if (btree_node_journal_flush(b)) { + pr_debug("bnode %p is flushing by journal, retry", b); + mutex_unlock(&b->write_lock); + udelay(1); + goto retry; + } + if (btree_node_dirty(b)) __bch_btree_node_write(b, &cl); mutex_unlock(&b->write_lock); @@ -695,6 +654,8 @@ static unsigned long bch_mca_scan(struct shrinker *shrink, * IO can always make forward progress: */ nr /= c->btree_pages; + if (nr == 0) + nr = 1; nr = min_t(unsigned long, nr, mca_can_free(c)); i = 0; @@ -772,10 +733,15 @@ void bch_btree_cache_free(struct cache_set *c) while (!list_empty(&c->btree_cache)) { b = list_first_entry(&c->btree_cache, struct btree, list); - if (btree_node_dirty(b)) + /* + * This function is called by cache_set_free(), no I/O + * request on cache now, it is unnecessary to acquire + * b->write_lock before clearing BTREE_NODE_dirty anymore. + */ + if (btree_node_dirty(b)) { btree_complete_write(b, btree_current_write(b)); - clear_bit(BTREE_NODE_dirty, &b->flags); - + clear_bit(BTREE_NODE_dirty, &b->flags); + } mca_data_free(b); } @@ -1061,11 +1027,25 @@ static void btree_node_free(struct btree *b) BUG_ON(b == b->c->root); +retry: mutex_lock(&b->write_lock); + /* + * If the btree node is selected and flushing in btree_flush_write(), + * delay and retry until the BTREE_NODE_journal_flush bit cleared, + * then it is safe to free the btree node here. Otherwise this btree + * node will be in race condition. + */ + if (btree_node_journal_flush(b)) { + mutex_unlock(&b->write_lock); + pr_debug("bnode %p journal_flush set, retry", b); + udelay(1); + goto retry; + } - if (btree_node_dirty(b)) + if (btree_node_dirty(b)) { btree_complete_write(b, btree_current_write(b)); - clear_bit(BTREE_NODE_dirty, &b->flags); + clear_bit(BTREE_NODE_dirty, &b->flags); + } mutex_unlock(&b->write_lock); @@ -1218,12 +1198,16 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level, c, "inconsistent ptrs: mark = %llu, level = %i", GC_MARK(g), level); - if (level) + if (level) { SET_GC_MARK(g, GC_MARK_METADATA); - else if (KEY_DIRTY(k)) + } else if (KEY_DIRTY(k)) { SET_GC_MARK(g, GC_MARK_DIRTY); - else if (!GC_MARK(g)) + SET_GC_DIRTY_SECTORS(g, min_t(unsigned int, + GC_DIRTY_SECTORS(g) + KEY_SIZE(k), + MAX_GC_SECTORS_USED)); + } else if (!GC_MARK(g)) { SET_GC_MARK(g, GC_MARK_RECLAIMABLE); + } /* guard against overflow */ SET_GC_SECTORS_USED(g, min_t(unsigned int, @@ -1393,7 +1377,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, if (__set_blocks(n1, n1->keys + n2->keys, block_bytes(b->c)) > btree_blocks(new_nodes[i])) - goto out_nocoalesce; + goto out_unlock_nocoalesce; keys = n2->keys; /* Take the key of the node we're getting rid of */ @@ -1422,7 +1406,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, if (__bch_keylist_realloc(&keylist, bkey_u64s(&new_nodes[i]->key))) - goto out_nocoalesce; + goto out_unlock_nocoalesce; bch_btree_node_write(new_nodes[i], &cl); bch_keylist_add(&keylist, &new_nodes[i]->key); @@ -1468,6 +1452,10 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, /* Invalidated our iterator */ return -EINTR; +out_unlock_nocoalesce: + for (i = 0; i < nodes; i++) + mutex_unlock(&new_nodes[i]->write_lock); + out_nocoalesce: closure_sync(&cl); bch_keylist_free(&keylist); @@ -1703,6 +1691,7 @@ static void btree_gc_start(struct cache_set *c) if (!atomic_read(&b->pin)) { SET_GC_MARK(b, 0); SET_GC_SECTORS_USED(b, 0); + SET_GC_DIRTY_SECTORS(b, 0); } } @@ -1817,7 +1806,7 @@ static void bch_btree_gc(struct cache_set *c) trace_bcache_gc_end(c); - bch_moving_gc(c); + bch_moving_gc(c, c->gc_only_dirty_data); } static bool gc_should_run(struct cache_set *c) @@ -1901,13 +1890,170 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op) return ret; } +static int bch_btree_check_thread(void *arg) +{ + int ret; + struct btree_check_info *info = arg; + struct btree_check_state *check_state = info->state; + struct cache_set *c = check_state->c; + struct btree_iter iter; + struct bkey *k, *p; + int cur_idx, prev_idx, skip_nr; + + k = p = NULL; + cur_idx = prev_idx = 0; + ret = 0; + + /* root node keys are checked before thread created */ + bch_btree_iter_init(&c->root->keys, &iter, NULL); + k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad); + WARN_ON(!k); + + p = k; + while (k) { + /* + * Fetch a root node key index, skip the keys which + * should be fetched by other threads, then check the + * sub-tree indexed by the fetched key. + */ + spin_lock(&check_state->idx_lock); + cur_idx = check_state->key_idx; + check_state->key_idx++; + spin_unlock(&check_state->idx_lock); + + skip_nr = cur_idx - prev_idx; + + while (skip_nr) { + k = bch_btree_iter_next_filter(&iter, + &c->root->keys, + bch_ptr_bad); + if (k) + p = k; + else { + /* + * No more keys to check in root node, + * current checking threads are enough, + * stop creating more. + */ + atomic_set(&check_state->enough, 1); + /* Update check_state->enough earlier */ + smp_mb(); + goto out; + } + skip_nr--; + cond_resched(); + } + + if (p) { + struct btree_op op; + + btree_node_prefetch(c->root, p); + c->gc_stats.nodes++; + bch_btree_op_init(&op, 0); + ret = btree(check_recurse, p, c->root, &op); + if (ret) + goto out; + } + p = NULL; + prev_idx = cur_idx; + cond_resched(); + } + +out: + info->result = ret; + /* update check_state->started among all CPUs */ + smp_mb(); + if (atomic_dec_and_test(&check_state->started)) + wake_up(&check_state->wait); + + return ret; +} + +static int bch_btree_chkthread_nr(void) +{ + int n = num_online_cpus() / 2; + + if (n == 0) + n = 1; + else if (n > BCH_BTR_CHKTHREAD_MAX) + n = BCH_BTR_CHKTHREAD_MAX; + + return n; +} + int bch_btree_check(struct cache_set *c) { - struct btree_op op; + int ret = 0; + int i; + struct bkey *k = NULL; + struct btree_iter iter; + struct btree_check_state *check_state; + char name[32]; - bch_btree_op_init(&op, SHRT_MAX); + /* check and mark root node keys */ + for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid) + bch_initial_mark_key(c, c->root->level, k); + + bch_initial_mark_key(c, c->root->level + 1, &c->root->key); + + if (c->root->level == 0) + return 0; + + check_state = kzalloc(sizeof(struct btree_check_state), GFP_KERNEL); + if (!check_state) + return -ENOMEM; + + check_state->c = c; + check_state->total_threads = bch_btree_chkthread_nr(); + check_state->key_idx = 0; + spin_lock_init(&check_state->idx_lock); + atomic_set(&check_state->started, 0); + atomic_set(&check_state->enough, 0); + init_waitqueue_head(&check_state->wait); + + /* + * Run multiple threads to check btree nodes in parallel, + * if check_state->enough is non-zero, it means current + * running check threads are enough, unncessary to create + * more. + */ + for (i = 0; i < check_state->total_threads; i++) { + /* fetch latest check_state->enough earlier */ + smp_mb(); + if (atomic_read(&check_state->enough)) + break; - return btree_root(check_recurse, c, &op); + check_state->infos[i].result = 0; + check_state->infos[i].state = check_state; + snprintf(name, sizeof(name), "bch_btrchk[%u]", i); + atomic_inc(&check_state->started); + + check_state->infos[i].thread = + kthread_run(bch_btree_check_thread, + &check_state->infos[i], + name); + if (IS_ERR(check_state->infos[i].thread)) { + pr_err("fails to run thread bch_btrchk[%d]\n", i); + for (--i; i >= 0; i--) + kthread_stop(check_state->infos[i].thread); + ret = -ENOMEM; + goto out; + } + } + + wait_event_interruptible(check_state->wait, + atomic_read(&check_state->started) == 0); + + for (i = 0; i < check_state->total_threads; i++) { + if (check_state->infos[i].result) { + ret = check_state->infos[i].result; + goto out; + } + } + +out: + kfree(check_state); + return ret; } void bch_initial_gc_finish(struct cache_set *c) @@ -1959,12 +2105,12 @@ static bool btree_insert_key(struct btree *b, struct bkey *k, BUG_ON(bkey_cmp(k, &b->key) > 0); status = bch_btree_insert_key(&b->keys, k, replace_key); + trace_bcache_btree_insert_key(b, k, replace_key != NULL, + status); if (status != BTREE_INSERT_STATUS_NO_INSERT) { bch_check_keys(&b->keys, "%u for %s", status, replace_key ? "replace" : "insert"); - trace_bcache_btree_insert_key(b, k, replace_key != NULL, - status); return true; } else return false; @@ -2368,7 +2514,7 @@ int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c, return btree_root(map_nodes_recurse, c, op, from, fn, flags); } -static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op, +int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op, struct bkey *from, btree_map_keys_fn *fn, int flags) { @@ -2434,7 +2580,7 @@ static int refill_keybuf_fn(struct btree_op *op, struct btree *b, struct keybuf *buf = refill->buf; int ret = MAP_CONTINUE; - if (bkey_cmp(k, refill->end) >= 0) { + if (bkey_cmp(k, refill->end) > 0) { ret = MAP_DONE; goto out; } diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h index a68d6c55783bd97eaf49d5744c9422f8af24be07..42954927abd09150bbbe3703206787be0d69c443 100644 --- a/drivers/md/bcache/btree.h +++ b/drivers/md/bcache/btree.h @@ -158,11 +158,13 @@ enum btree_flags { BTREE_NODE_io_error, BTREE_NODE_dirty, BTREE_NODE_write_idx, + BTREE_NODE_journal_flush, }; BTREE_FLAG(io_error); BTREE_FLAG(dirty); BTREE_FLAG(write_idx); +BTREE_FLAG(journal_flush); static inline struct btree_write *btree_current_write(struct btree *b) { @@ -191,7 +193,11 @@ static inline unsigned int bset_block_offset(struct btree *b, struct bset *i) static inline void set_gc_sectors(struct cache_set *c) { - atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 16); + if (c->gc_sectors == 0) + atomic_set(&c->sectors_to_gc, + c->sb.bucket_size * c->nbuckets / 16); + else + atomic_set(&c->sectors_to_gc, c->gc_sectors); } void bkey_put(struct cache_set *c, struct bkey *k); @@ -216,6 +222,25 @@ struct btree_op { unsigned int insert_collision:1; }; +struct btree_check_state; +struct btree_check_info { + struct btree_check_state *state; + struct task_struct *thread; + int result; +}; + +#define BCH_BTR_CHKTHREAD_MAX 64 +struct btree_check_state { + struct cache_set *c; + int total_threads; + int key_idx; + spinlock_t idx_lock; + atomic_t started; + atomic_t enough; + wait_queue_head_t wait; + struct btree_check_info infos[BCH_BTR_CHKTHREAD_MAX]; +}; + static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level) { memset(op, 0, sizeof(struct btree_op)); @@ -257,15 +282,81 @@ int bch_btree_insert(struct cache_set *c, struct keylist *keys, int bch_gc_thread_start(struct cache_set *c); void bch_initial_gc_finish(struct cache_set *c); -void bch_moving_gc(struct cache_set *c); +void bch_moving_gc(struct cache_set *c, bool only_move_dirty); int bch_btree_check(struct cache_set *c); void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k); +typedef int (btree_map_keys_fn)(struct btree_op *op, struct btree *b, + struct bkey *k); +int bch_btree_map_keys(struct btree_op *op, struct cache_set *c, + struct bkey *from, btree_map_keys_fn *fn, int flags); +int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op, + struct bkey *from, btree_map_keys_fn *fn, + int flags); static inline void wake_up_gc(struct cache_set *c) { wake_up(&c->gc_wait); } +/* + * These macros are for recursing down the btree - they handle the details of + * locking and looking up nodes in the cache for you. They're best treated as + * mere syntax when reading code that uses them. + * + * op->lock determines whether we take a read or a write lock at a given depth. + * If you've got a read lock and find that you need a write lock (i.e. you're + * going to have to split), set op->lock and return -EINTR; btree_root() will + * call you again and you'll have the correct lock. + */ + +/** + * btree - recurse down the btree on a specified key + * @fn: function to call, which will be passed the child node + * @key: key to recurse on + * @b: parent btree node + * @op: pointer to struct btree_op + */ +#define btree(fn, key, b, op, ...) \ +({ \ + int _r, l = (b)->level - 1; \ + bool _w = l <= (op)->lock; \ + struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \ + _w, b); \ + if (!IS_ERR(_child)) { \ + _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \ + rw_unlock(_w, _child); \ + } else \ + _r = PTR_ERR(_child); \ + _r; \ +}) + +/** + * btree_root - call a function on the root of the btree + * @fn: function to call, which will be passed the child node + * @c: cache set + * @op: pointer to struct btree_op + */ +#define btree_root(fn, c, op, ...) \ +({ \ + int _r = -EINTR; \ + do { \ + struct btree *_b = (c)->root; \ + bool _w = insert_lock(op, _b); \ + rw_lock(_w, _b, _b->level); \ + if (_b == (c)->root && \ + _w == insert_lock(op, _b)) { \ + _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \ + } \ + rw_unlock(_w, _b); \ + bch_cannibalize_unlock(c); \ + if (_r == -EINTR) \ + schedule(); \ + } while (_r == -EINTR); \ + \ + finish_wait(&(c)->btree_cache_wait, &(op)->wait); \ + _r; \ +}) + #define MAP_DONE 0 #define MAP_CONTINUE 1 diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c index 73f5319295bc9c1c8a3caf47ac7b7ab4ee5cfcdd..c12cd809ab1938c3aedf64e86c30dfeb0655b354 100644 --- a/drivers/md/bcache/closure.c +++ b/drivers/md/bcache/closure.c @@ -105,8 +105,14 @@ struct closure_syncer { static void closure_sync_fn(struct closure *cl) { - cl->s->done = 1; - wake_up_process(cl->s->task); + struct closure_syncer *s = cl->s; + struct task_struct *p; + + rcu_read_lock(); + p = READ_ONCE(s->task); + s->done = 1; + wake_up_process(p); + rcu_read_unlock(); } void __sched __closure_sync(struct closure *cl) diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index 06da66b2488ae8f5371fcfeb92c7b336d24e78ca..8c53d874ada4aa92526d328dd93203c400564254 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c @@ -249,8 +249,7 @@ void bch_debug_init_cache_set(struct cache_set *c) void bch_debug_exit(void) { - if (!IS_ERR_OR_NULL(bcache_debug)) - debugfs_remove_recursive(bcache_debug); + debugfs_remove_recursive(bcache_debug); } void __init bch_debug_init(struct kobject *kobj) diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c index c809724e6571e4be1d61ed0198a471ec8889c044..886710043025f21fe4f55fb4cb9fc1e6196a081e 100644 --- a/drivers/md/bcache/extents.c +++ b/drivers/md/bcache/extents.c @@ -538,6 +538,7 @@ static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k) { struct btree *b = container_of(bk, struct btree, keys); unsigned int i, stale; + char buf[80]; if (!KEY_PTRS(k) || bch_extent_invalid(bk, k)) @@ -547,19 +548,19 @@ static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k) if (!ptr_available(b->c, k, i)) return true; - if (!expensive_debug_checks(b->c) && KEY_DIRTY(k)) - return false; - for (i = 0; i < KEY_PTRS(k); i++) { stale = ptr_stale(b->c, k, i); - btree_bug_on(stale > 96, b, + if (stale && KEY_DIRTY(k)) { + bch_extent_to_text(buf, sizeof(buf), k); + pr_info("stale dirty pointer, stale %u, key: %s", + stale, buf); + } + + btree_bug_on(stale > BUCKET_GC_GEN_MAX, b, "key too stale: %i, need_gc %u", stale, b->c->need_gc); - btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k), - b, "stale dirty pointer"); - if (stale) return true; diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index c250979683194c69f8ea1b9585e13907c877eb3a..4d93f07f63e515c23e0792331d87f3c2cea1c485 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c @@ -58,6 +58,18 @@ void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio) WARN_ONCE(!dc, "NULL pointer of struct cached_dev"); + /* + * Read-ahead requests on a degrading and recovering md raid + * (e.g. raid6) device might be failured immediately by md + * raid code, which is not a real hardware media failure. So + * we shouldn't count failed REQ_RAHEAD bio to dc->io_errors. + */ + if (bio->bi_opf & REQ_RAHEAD) { + pr_warn_ratelimited("%s: Read-ahead I/O failed on backing device, ignore", + dc->backing_dev_name); + return; + } + errors = atomic_add_return(1, &dc->io_errors); if (errors < dc->error_limit) pr_err("%s: IO error on backing device, unrecoverable", diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 522c7426f3a05cee10df0e984521472849b9b707..7bb15cddca5ecb6dbd25d3f21b2ca201e9d106f8 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -317,6 +317,18 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list) } } +bool is_discard_enabled(struct cache_set *s) +{ + struct cache *ca; + unsigned int i; + + for_each_cache(ca, s, i) + if (ca->discard) + return true; + + return false; +} + int bch_journal_replay(struct cache_set *s, struct list_head *list) { int ret = 0, keys = 0, entries = 0; @@ -330,9 +342,17 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list) list_for_each_entry(i, list, list) { BUG_ON(i->pin && atomic_read(i->pin) != 1); - cache_set_err_on(n != i->j.seq, s, -"bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)", - n, i->j.seq - 1, start, end); + if (n != i->j.seq) { + if (n == start && is_discard_enabled(s)) + pr_info("bcache: journal entries %llu-%llu may be discarded! (replaying %llu-%llu)", + n, i->j.seq - 1, start, end); + else { + pr_err("bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)", + n, i->j.seq - 1, start, end); + ret = -EIO; + goto err; + } + } for (k = i->j.start; k < bset_bkey_last(&i->j); @@ -370,12 +390,6 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list) } /* Journalling */ -#define journal_max_cmp(l, r) \ - (fifo_idx(&c->journal.pin, btree_current_write(l)->journal) < \ - fifo_idx(&(c)->journal.pin, btree_current_write(r)->journal)) -#define journal_min_cmp(l, r) \ - (fifo_idx(&c->journal.pin, btree_current_write(l)->journal) > \ - fifo_idx(&(c)->journal.pin, btree_current_write(r)->journal)) static void btree_flush_write(struct cache_set *c) { @@ -383,38 +397,34 @@ static void btree_flush_write(struct cache_set *c) * Try to find the btree node with that references the oldest journal * entry, best is our current candidate and is locked if non NULL: */ - struct btree *b; - int i; + struct btree *b, *best; + unsigned int i; atomic_long_inc(&c->flush_write); - retry: - spin_lock(&c->journal.lock); - if (heap_empty(&c->flush_btree)) { - for_each_cached_btree(b, c, i) - if (btree_current_write(b)->journal) { - if (!heap_full(&c->flush_btree)) - heap_add(&c->flush_btree, b, - journal_max_cmp); - else if (journal_max_cmp(b, - heap_peek(&c->flush_btree))) { - c->flush_btree.data[0] = b; - heap_sift(&c->flush_btree, 0, - journal_max_cmp); - } + best = NULL; + + mutex_lock(&c->bucket_lock); + for_each_cached_btree(b, c, i) + if (btree_current_write(b)->journal) { + if (!best) + best = b; + else if (journal_pin_cmp(c, + btree_current_write(best)->journal, + btree_current_write(b)->journal)) { + best = b; } + } - for (i = c->flush_btree.used / 2 - 1; i >= 0; --i) - heap_sift(&c->flush_btree, i, journal_min_cmp); - } - - b = NULL; - heap_pop(&c->flush_btree, b, journal_min_cmp); - spin_unlock(&c->journal.lock); + b = best; + if (b) + set_btree_node_journal_flush(b); + mutex_unlock(&c->bucket_lock); if (b) { mutex_lock(&b->write_lock); if (!btree_current_write(b)->journal) { + clear_bit(BTREE_NODE_journal_flush, &b->flags); mutex_unlock(&b->write_lock); /* We raced */ atomic_long_inc(&c->retry_flush_write); @@ -422,6 +432,7 @@ static void btree_flush_write(struct cache_set *c) } __bch_btree_node_write(b, NULL); + clear_bit(BTREE_NODE_journal_flush, &b->flags); mutex_unlock(&b->write_lock); } } @@ -540,11 +551,11 @@ static void journal_reclaim(struct cache_set *c) ca->sb.nr_this_dev); } - bkey_init(k); - SET_KEY_PTRS(k, n); - - if (n) + if (n) { + bkey_init(k); + SET_KEY_PTRS(k, n); c->journal.blocks_free = c->sb.bucket_size >> c->block_bits; + } out: if (!journal_full(&c->journal)) __closure_wake_up(&c->journal.wait); @@ -671,6 +682,9 @@ static void journal_write_unlocked(struct closure *cl) ca->journal.seq[ca->journal.cur_idx] = w->data->seq; } + /* If KEY_PTRS(k) == 0, this jset gets lost in air */ + BUG_ON(i == 0); + atomic_dec_bug(&fifo_back(&c->journal.pin)); bch_journal_next(&c->journal); journal_reclaim(c); @@ -787,6 +801,10 @@ atomic_t *bch_journal(struct cache_set *c, struct journal_write *w; atomic_t *ret; + /* No journaling if CACHE_SET_IO_DISABLE set already */ + if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags))) + return NULL; + if (!CACHE_SYNC(&c->sb)) return NULL; @@ -831,7 +849,6 @@ void bch_journal_free(struct cache_set *c) free_pages((unsigned long) c->journal.w[1].data, JSET_BITS); free_pages((unsigned long) c->journal.w[0].data, JSET_BITS); free_fifo(&c->journal.pin); - free_heap(&c->flush_btree); } int bch_journal_alloc(struct cache_set *c) @@ -846,8 +863,7 @@ int bch_journal_alloc(struct cache_set *c) j->w[0].c = c; j->w[1].c = c; - if (!(init_heap(&c->flush_btree, 128, GFP_KERNEL)) || - !(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) || + if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) || !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) || !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS))) return -ENOMEM; diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index 7891fb512736dc82b780311e4d4df33351aad473..749422b927394617719149f0e45001d99deebe2e 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c @@ -123,7 +123,7 @@ static void read_moving_submit(struct closure *cl) continue_at(cl, write_moving, io->op.wq); } -static void read_moving(struct cache_set *c) +static void read_moving(struct cache_set *c, bool only_move_dirty) { struct keybuf_key *w; struct moving_io *io; @@ -140,7 +140,8 @@ static void read_moving(struct cache_set *c) if (!w) break; - if (ptr_stale(c, &w->key, 0)) { + if (ptr_stale(c, &w->key, 0) || + (only_move_dirty && (!KEY_DIRTY(&w->key)))) { bch_keybuf_del(&c->moving_gc_keys, w); continue; } @@ -187,22 +188,43 @@ static bool bucket_cmp(struct bucket *l, struct bucket *r) return GC_SECTORS_USED(l) < GC_SECTORS_USED(r); } -static unsigned int bucket_heap_top(struct cache *ca) +static bool bucket_cmp_dirty(struct bucket *l, struct bucket *r) +{ + return GC_DIRTY_SECTORS(l) < GC_DIRTY_SECTORS(r); +} + +static unsigned int bucket_heap_top(struct cache *ca, bool only_dirty) { struct bucket *b; + if (only_dirty) + return (b = heap_peek(&ca->heap)) ? GC_DIRTY_SECTORS(b) : 0; + else + return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0; +} - return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0; +static unsigned bucket_sectors(struct bucket *b, bool only_dirty) +{ + if (only_dirty) + return GC_DIRTY_SECTORS(b); + else + return GC_SECTORS_USED(b); } -void bch_moving_gc(struct cache_set *c) +void bch_moving_gc(struct cache_set *c, bool only_move_dirty) { struct cache *ca; struct bucket *b; unsigned int i; + bool (*cmp)(struct bucket*, struct bucket*); if (!c->copy_gc_enabled) return; + if (only_move_dirty) + cmp = &bucket_cmp_dirty; + else + cmp = &bucket_cmp; + mutex_lock(&c->bucket_lock); for_each_cache(ca, c, i) { @@ -214,29 +236,30 @@ void bch_moving_gc(struct cache_set *c) for_each_bucket(b, ca) { if (GC_MARK(b) == GC_MARK_METADATA || - !GC_SECTORS_USED(b) || - GC_SECTORS_USED(b) == ca->sb.bucket_size || + !bucket_sectors(b, only_move_dirty) || + ((!only_move_dirty) && + (GC_SECTORS_USED(b) == ca->sb.bucket_size)) || atomic_read(&b->pin)) continue; if (!heap_full(&ca->heap)) { - sectors_to_move += GC_SECTORS_USED(b); - heap_add(&ca->heap, b, bucket_cmp); - } else if (bucket_cmp(b, heap_peek(&ca->heap))) { - sectors_to_move -= bucket_heap_top(ca); - sectors_to_move += GC_SECTORS_USED(b); + sectors_to_move += bucket_sectors(b, only_move_dirty); + heap_add(&ca->heap, b, (*cmp)); + } else if ((*cmp)(b, heap_peek(&ca->heap))) { + sectors_to_move -= bucket_heap_top(ca, only_move_dirty); + sectors_to_move += bucket_sectors(b, only_move_dirty); ca->heap.data[0] = b; - heap_sift(&ca->heap, 0, bucket_cmp); + heap_sift(&ca->heap, 0, (*cmp)); } } while (sectors_to_move > reserve_sectors) { - heap_pop(&ca->heap, b, bucket_cmp); - sectors_to_move -= GC_SECTORS_USED(b); + heap_pop(&ca->heap, b, (*cmp)); + sectors_to_move -= bucket_sectors(b, only_move_dirty); } - while (heap_pop(&ca->heap, b, bucket_cmp)) + while (heap_pop(&ca->heap, b, (*cmp))) SET_GC_MOVE(b, 1); } @@ -244,7 +267,7 @@ void bch_moving_gc(struct cache_set *c) c->moving_gc_keys.last_scanned = ZERO_KEY; - read_moving(c); + read_moving(c, only_move_dirty); } void bch_moving_init_cache_set(struct cache_set *c) diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 51be355a3309fe993e4e14c0c5ac8d3e9955b45c..cd1184537af6b05096d21f4abe4cf2272b1b0a2a 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -8,11 +8,13 @@ */ #include "bcache.h" +#include "acache.h" #include "btree.h" #include "debug.h" #include "request.h" #include "writeback.h" +#include #include #include #include @@ -26,6 +28,7 @@ struct kmem_cache *bch_search_cache; static void bch_data_insert_start(struct closure *cl); +static void alloc_token(struct cached_dev *dc, unsigned int sectors); static unsigned int cache_mode(struct cached_dev *dc) { @@ -320,10 +323,20 @@ static void bch_data_insert_start(struct closure *cl) void bch_data_insert(struct closure *cl) { struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); + struct acache_info msg; trace_bcache_write(op->c, op->inode, op->bio, op->writeback, op->bypass); + if (op->bio->bi_disk) { + msg.offset = op->bio->bi_iter.bi_sector; + msg.length = op->bio->bi_iter.bi_size; + msg.type = ACACHE_INFO_CACHE_INSERT; + msg.dev = bio_dev(op->bio); + msg.start_time = ktime_get_ns(); + save_circ_item(&msg); + } + bch_keylist_init(&op->insert_keys); bio_get(op->bio); bch_data_insert_start(cl); @@ -359,6 +372,7 @@ unsigned int bch_get_congested(struct cache_set *c) return i > 0 ? i : 1; } +#if IS_ENABLED(CONFIG_BCACHE) static void add_sequential(struct task_struct *t) { ewma_add(t->sequential_io_avg, @@ -366,6 +380,7 @@ static void add_sequential(struct task_struct *t) t->sequential_io = 0; } +#endif static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k) { @@ -377,7 +392,9 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) struct cache_set *c = dc->disk.c; unsigned int mode = cache_mode(dc); unsigned int sectors, congested = bch_get_congested(c); +#if IS_ENABLED(CONFIG_BCACHE) struct task_struct *task = current; +#endif struct io *i; if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || @@ -386,17 +403,26 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) goto skip; if (mode == CACHE_MODE_NONE || - (mode == CACHE_MODE_WRITEAROUND && + ((mode == CACHE_MODE_WRITEAROUND || + c->force_write_through == true) && op_is_write(bio_op(bio)))) goto skip; /* - * Flag for bypass if the IO is for read-ahead or background, - * unless the read-ahead request is for metadata (eg, for gfs2). + * If the bio is for read-ahead or background IO, bypass it or + * not depends on the following situations, + * - If the IO is for meta data, always cache it and no bypass + * - If the IO is not meta data, check dc->cache_reada_policy, + * BCH_CACHE_READA_ALL: cache it and not bypass + * BCH_CACHE_READA_META_ONLY: not cache it and bypass + * That is, read-ahead request for metadata always get cached + * (eg, for gfs2 or xfs). */ - if (bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND) && - !(bio->bi_opf & REQ_META)) - goto skip; + if ((bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND))) { + if (!(bio->bi_opf & (REQ_META|REQ_PRIO)) && + (dc->cache_readahead_policy != BCH_CACHE_READA_ALL)) + goto skip; + } if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) || bio_sectors(bio) & (c->sb.block_size - 1)) { @@ -423,7 +449,9 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) i = list_first_entry(&dc->io_lru, struct io, lru); +#if IS_ENABLED(CONFIG_BCACHE) add_sequential(task); +#endif i->sequential = 0; found: if (i->sequential + bio->bi_iter.bi_size > i->sequential) @@ -431,7 +459,9 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) i->last = bio_end_sector(bio); i->jiffies = jiffies + msecs_to_jiffies(5000); +#if IS_ENABLED(CONFIG_BCACHE) task->sequential_io = i->sequential; +#endif hlist_del(&i->hash); hlist_add_head(&i->hash, iohash(dc, i->last)); @@ -439,8 +469,12 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) spin_unlock(&dc->io_lock); +#if IS_ENABLED(CONFIG_BCACHE) sectors = max(task->sequential_io, task->sequential_io_avg) >> 9; +#else + sectors = i->sequential >> 9; +#endif if (dc->sequential_cutoff && sectors >= dc->sequential_cutoff >> 9) { @@ -463,27 +497,6 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) /* Cache lookup */ -struct search { - /* Stack frame for bio_complete */ - struct closure cl; - - struct bbio bio; - struct bio *orig_bio; - struct bio *cache_miss; - struct bcache_device *d; - - unsigned int insert_bio_sectors; - unsigned int recoverable:1; - unsigned int write:1; - unsigned int read_dirty_data:1; - unsigned int cache_missed:1; - - unsigned long start_time; - - struct btree_op op; - struct data_insert_op iop; -}; - static void bch_cache_read_endio(struct bio *bio) { struct bbio *b = container_of(bio, struct bbio, bio); @@ -542,6 +555,7 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) return MAP_CONTINUE; /* XXX: figure out best pointer - for multiple cache devices */ + ptr = 0; PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO; @@ -559,6 +573,7 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key); bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key); + if (!s->prefetch) { n->bi_end_io = bch_cache_read_endio; n->bi_private = &s->cl; @@ -574,6 +589,9 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) */ __bch_submit_bbio(n, b->c); + } else { + bio_put(n); + } return n == bio ? MAP_DONE : MAP_CONTINUE; } @@ -676,7 +694,12 @@ static void bio_complete(struct search *s) trace_bcache_request_end(s->d, s->orig_bio); s->orig_bio->bi_status = s->iop.status; - bio_endio(s->orig_bio); + if (s->prefetch) { + bio_free_pages(s->orig_bio); + bio_put(s->orig_bio); + } else { + bio_endio(s->orig_bio); + } s->orig_bio = NULL; } } @@ -701,7 +724,7 @@ static void do_bio_hook(struct search *s, bio_cnt_set(bio, 3); } -static void search_free(struct closure *cl) +void search_free(struct closure *cl) { struct search *s = container_of(cl, struct search, cl); @@ -710,13 +733,16 @@ static void search_free(struct closure *cl) if (s->iop.bio) bio_put(s->iop.bio); + if (s->prefetch) + inflight_list_ops.remove(s); + bio_complete(s); closure_debug_destroy(cl); mempool_free(s, &s->d->c->search); } -static inline struct search *search_alloc(struct bio *bio, - struct bcache_device *d) +struct search *search_alloc(struct bio *bio, + struct bcache_device *d, bool prefetch) { struct search *s; @@ -734,6 +760,8 @@ static inline struct search *search_alloc(struct bio *bio, s->write = op_is_write(bio_op(bio)); s->read_dirty_data = 0; s->start_time = jiffies; + s->prefetch = prefetch; + s->write_inval_data_putoff = false; s->iop.c = d->c; s->iop.bio = NULL; @@ -755,6 +783,10 @@ static void cached_dev_bio_complete(struct closure *cl) struct search *s = container_of(cl, struct search, cl); struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); + /* eusure this lock is released after data_insert */ + if (s->write_inval_data_putoff) + up_read_non_owner(&dc->writeback_lock); + search_free(cl); cached_dev_put(dc); } @@ -818,25 +850,34 @@ static void cached_dev_read_done(struct closure *cl) if (s->iop.bio) { bio_reset(s->iop.bio); s->iop.bio->bi_iter.bi_sector = - s->cache_miss->bi_iter.bi_sector; + s->cache_miss->bi_iter.bi_sector; bio_copy_dev(s->iop.bio, s->cache_miss); s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9; bch_bio_map(s->iop.bio, NULL); - bio_copy_data(s->cache_miss, s->iop.bio); + if (!s->prefetch) + bio_copy_data(s->cache_miss, s->iop.bio); + else + trace_bcache_prefetch_cache_miss(s->iop.bio); bio_put(s->cache_miss); s->cache_miss = NULL; + } if (verify(dc) && s->recoverable && !s->read_dirty_data) bch_data_verify(dc, s->orig_bio); - bio_complete(s); + if (!s->prefetch) + bio_complete(s); - if (s->iop.bio && + if (s->iop.bio && (!dc->read_bypass || s->prefetch) && !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) { BUG_ON(!s->iop.replace); + if ((dc->disk.c->traffic_policy_start == true) && + (dc->disk.c->force_write_through != true)) { + alloc_token(dc, bio_sectors(s->iop.bio)); + } closure_call(&s->iop.cl, bch_data_insert, NULL, cl); } @@ -848,9 +889,18 @@ static void cached_dev_read_done_bh(struct closure *cl) struct search *s = container_of(cl, struct search, cl); struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); - bch_mark_cache_accounting(s->iop.c, s->d, + if (s->prefetch) + pr_debug("prefetch request; do not count cache_missed"); + else + bch_mark_cache_accounting(s->iop.c, s->d, !s->cache_missed, s->iop.bypass); - trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass); + trace_bcache_read(s->orig_bio, !s->cache_missed, s->iop.bypass); + + if (!s->prefetch && !s->iop.status) { + s->smp.type = ACACHE_INFO_LATENCY; + s->smp.start_time = ktime_get_ns() - s->smp.start_time; + save_circ_item(&s->smp); + } if (s->iop.status) continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq); @@ -867,22 +917,29 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, unsigned int reada = 0; struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); struct bio *miss, *cache_bio; + unsigned int size_limit; s->cache_missed = 1; - if (s->cache_miss || s->iop.bypass) { + if (s->cache_miss || s->iop.bypass || + (dc->read_bypass && !s->prefetch)) { miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split); ret = miss == bio ? MAP_DONE : MAP_CONTINUE; goto out_submit; } + /* if called form do_readahead, no need to do this */ if (!(bio->bi_opf & REQ_RAHEAD) && !(bio->bi_opf & REQ_META) && - s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA) + s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA && + !s->prefetch) reada = min_t(sector_t, dc->readahead >> 9, get_capacity(bio->bi_disk) - bio_end_sector(bio)); - s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); + /* Limitation for valid replace key size and cache_bio bvecs number */ + size_limit = min_t(unsigned int, BIO_MAX_PAGES * PAGE_SECTORS, + (1 << KEY_SIZE_BITS) - 1); + s->insert_bio_sectors = min3(size_limit, sectors, bio_sectors(bio)); s->iop.replace_key = KEY(s->iop.inode, bio->bi_iter.bi_sector + s->insert_bio_sectors, @@ -894,7 +951,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, s->iop.replace = true; - miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split); + miss = bio_next_split(bio, s->insert_bio_sectors, GFP_NOIO, + &s->d->bio_split); /* btree_search_recurse()'s btree iterator is no good anymore */ ret = miss == bio ? MAP_DONE : -EINTR; @@ -919,8 +977,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, if (reada) bch_mark_cache_readahead(s->iop.c, s->d); - s->cache_miss = miss; - s->iop.bio = cache_bio; + s->cache_miss = miss; + s->iop.bio = cache_bio; bio_get(cache_bio); /* I/O request sent to backing device */ closure_bio_submit(s->iop.c, cache_bio, &s->cl); @@ -929,17 +987,26 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, out_put: bio_put(cache_bio); out_submit: - miss->bi_end_io = backing_request_endio; - miss->bi_private = &s->cl; - /* I/O request sent to backing device */ - closure_bio_submit(s->iop.c, miss, &s->cl); + if (!s->prefetch) { + miss->bi_end_io = backing_request_endio; + miss->bi_private = &s->cl; + /* I/O request sent to backing device */ + closure_bio_submit(s->iop.c, miss, &s->cl); + } else { + bio_put(miss); + } return ret; } -static void cached_dev_read(struct cached_dev *dc, struct search *s) +void cached_dev_read(struct cached_dev *dc, struct search *s) { struct closure *cl = &s->cl; + if (s->prefetch) + inflight_list_ops.insert(s); + else if (inflight_list_ops.wait(s)) + bch_mark_cache_prefetch_fake_hit(s->iop.c, s->d); + closure_call(&s->iop.cl, cache_lookup, NULL, cl); continue_at(cl, cached_dev_read_done_bh, NULL); } @@ -951,8 +1018,40 @@ static void cached_dev_write_complete(struct closure *cl) struct search *s = container_of(cl, struct search, cl); struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); - up_read_non_owner(&dc->writeback_lock); - cached_dev_bio_complete(cl); + if (s->write_inval_data_putoff) + closure_call(&s->iop.cl, bch_data_insert, NULL, cl); + else + up_read_non_owner(&dc->writeback_lock); + continue_at(cl, cached_dev_bio_complete, NULL); +} + +static void alloc_token(struct cached_dev *dc, unsigned int sectors) +{ + int count = 0; + + spin_lock_bh(&dc->token_lock); + + while ((dc->write_token_sector_size < sectors) && + (dc->write_token_io_num == 0)) { + spin_unlock_bh(&dc->token_lock); + schedule_timeout_interruptible(msecs_to_jiffies(10)); + count++; + if ((dc->disk.c->traffic_policy_start != true) || + (cache_mode(dc) != CACHE_MODE_WRITEBACK) || + (count > 100)) + return; + spin_lock_bh(&dc->token_lock); + } + + if (dc->write_token_sector_size >= sectors) + dc->write_token_sector_size -= sectors; + else + dc->write_token_sector_size = 0; + + if (dc->write_token_io_num > 0) + dc->write_token_io_num--; + + spin_unlock_bh(&dc->token_lock); } static void cached_dev_write(struct cached_dev *dc, struct search *s) @@ -1003,6 +1102,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) bio->bi_end_io = backing_request_endio; closure_bio_submit(s->iop.c, bio, cl); + s->write_inval_data_putoff = true; } else if (s->iop.writeback) { bch_writeback_add(dc); s->iop.bio = bio; @@ -1035,7 +1135,8 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) } insert_data: - closure_call(&s->iop.cl, bch_data_insert, NULL, cl); + if (!s->write_inval_data_putoff) + closure_call(&s->iop.cl, bch_data_insert, NULL, cl); continue_at(cl, cached_dev_write_complete, NULL); } @@ -1183,11 +1284,12 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q, bio_sectors(bio), &d->disk->part0); + bio_set_dev(bio, dc->bdev); bio->bi_iter.bi_sector += dc->sb.data_offset; if (cached_dev_get(dc)) { - s = search_alloc(bio, d); + s = search_alloc(bio, d, false); trace_bcache_request_start(s->d, bio); if (!bio->bi_iter.bi_size) { @@ -1199,12 +1301,28 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q, cached_dev_nodata, bcache_wq); } else { + atomic_inc(&dc->front_io_num); s->iop.bypass = check_should_bypass(dc, bio); - if (rw) + s->smp.offset = bio->bi_iter.bi_sector - dc->sb.data_offset; + s->smp.length = bio->bi_iter.bi_size; + s->smp.type = rw; + s->smp.dev = dc->bdev->bd_dev; + s->smp.start_time = ktime_get_ns(); + if (!s->iop.bypass && bio->bi_iter.bi_size && !rw) + save_circ_item(&s->smp); + + if (rw) { + if ((s->iop.bypass == false) && + (dc->disk.c->traffic_policy_start == true) && + (cache_mode(dc) == CACHE_MODE_WRITEBACK) && + (bio_op(bio) != REQ_OP_DISCARD)) { + alloc_token(dc, bio_sectors(bio)); + } cached_dev_write(dc, s); - else + } else { cached_dev_read(dc, s); + } } } else /* I/O request sent to backing device */ @@ -1213,12 +1331,81 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q, return BLK_QC_T_NONE; } +static int bcache_get_write_status(struct cached_dev *dc, unsigned long arg) +{ + struct get_bcache_status a; + uint64_t cache_sectors; + struct cache_set *c = dc->disk.c; + + if (c == NULL) + return -ENODEV; + + a.writeback_sector_size_per_sec = dc->writeback_sector_size_per_sec; + a.writeback_io_num_per_sec = dc->writeback_io_num_per_sec; + a.front_io_num_per_sec = dc->front_io_num_per_sec; + cache_sectors = c->nbuckets * c->sb.bucket_size - + atomic_long_read(&c->flash_dev_dirty_sectors); + a.dirty_rate = div64_u64(bcache_dev_sectors_dirty(&dc->disk) * 100, + cache_sectors); + a.available = 100 - c->gc_stats.in_use; + if (copy_to_user((struct get_bcache_status *)arg, &a, + sizeof(struct get_bcache_status))) + return -EFAULT; + return 0; +} + +static int bcache_set_write_status(struct cached_dev *dc, unsigned long arg) +{ + struct set_bcache_status a; + struct cache_set *c = dc->disk.c; + + if (c == NULL) + return -ENODEV; + if (copy_from_user(&a, (struct set_bcache_status *)arg, + sizeof(struct set_bcache_status))) + return -EFAULT; + + if (c->traffic_policy_start != a.traffic_policy_start) + pr_info("%s traffic policy %s", dc->disk.disk->disk_name, + (a.traffic_policy_start == true) ? "enable" : "disable"); + if (c->force_write_through != a.force_write_through) + pr_info("%s force write through %s", dc->disk.disk->disk_name, + (a.force_write_through == true) ? "enable" : "disable"); + if (a.trigger_gc) { + pr_info("trigger %s gc", dc->disk.disk->disk_name); + atomic_set(&c->sectors_to_gc, -1); + wake_up_gc(c); + } + if ((a.cutoff_writeback_sync >= MIN_CUTOFF_WRITEBACK_SYNC) && + (a.cutoff_writeback_sync <= MAX_CUTOFF_WRITEBACK_SYNC)) { + c->cutoff_writeback_sync = a.cutoff_writeback_sync; + } + + dc->max_sector_size = a.write_token_sector_size; + dc->max_io_num = a.write_token_io_num; + c->traffic_policy_start = a.traffic_policy_start; + c->force_write_through = a.force_write_through; + c->gc_sectors = a.gc_sectors; + dc->writeback_state = a.writeback_state; + return 0; +} + static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode, unsigned int cmd, unsigned long arg) { struct cached_dev *dc = container_of(d, struct cached_dev, disk); - return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg); + if (dc->io_disable) + return -EIO; + + switch (cmd) { + case BCACHE_GET_WRITE_STATUS: + return bcache_get_write_status(dc, arg); + case BCACHE_SET_WRITE_STATUS: + return bcache_set_write_status(dc, arg); + default: + return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg); + } } static int cached_dev_congested(void *data, int bits) @@ -1300,7 +1487,7 @@ static blk_qc_t flash_dev_make_request(struct request_queue *q, generic_start_io_acct(q, bio_op(bio), bio_sectors(bio), &d->disk->part0); - s = search_alloc(bio, d); + s = search_alloc(bio, d, false); cl = &s->cl; bio = &s->bio.bio; @@ -1378,3 +1565,29 @@ int __init bch_request_init(void) return 0; } + +static void token_assign(struct timer_list *t) +{ + struct cached_dev *dc = from_timer(dc, t, token_assign_timer); + + dc->token_assign_timer.expires = jiffies + HZ / 8; + add_timer(&dc->token_assign_timer); + + spin_lock(&dc->token_lock); + dc->write_token_sector_size = dc->max_sector_size / 8; + dc->write_token_io_num = dc->max_io_num / 8; + dc->write_token_io_num = + (dc->write_token_io_num == 0) ? 1 : dc->write_token_io_num; + spin_unlock(&dc->token_lock); +} + +void bch_traffic_policy_init(struct cached_dev *dc) +{ + spin_lock_init(&dc->token_lock); + dc->write_token_sector_size = 0; + dc->write_token_io_num = 0; + + timer_setup(&dc->token_assign_timer, token_assign, 0); + dc->token_assign_timer.expires = jiffies + HZ / 8; + add_timer(&dc->token_assign_timer); +} diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h index aa055cfeb0998cbc0b1c65203376bd5b9d4bbfc3..f677ba87049401873a6f58aadd3b54028ff15136 100644 --- a/drivers/md/bcache/request.h +++ b/drivers/md/bcache/request.h @@ -1,6 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _BCACHE_REQUEST_H_ #define _BCACHE_REQUEST_H_ +#include "btree.h" +#include "acache.h" struct data_insert_op { struct closure cl; @@ -39,6 +41,49 @@ void bch_data_insert(struct closure *cl); void bch_cached_dev_request_init(struct cached_dev *dc); void bch_flash_dev_request_init(struct bcache_device *d); +void bch_traffic_policy_init(struct cached_dev *dc); + extern struct kmem_cache *bch_search_cache, *bch_passthrough_cache; +struct search { + /* Stack frame for bio_complete */ + struct closure cl; + + struct bbio bio; + struct bio *orig_bio; + struct bio *cache_miss; + struct bcache_device *d; + + unsigned int insert_bio_sectors; + unsigned int recoverable:1; + unsigned int write:1; + unsigned int read_dirty_data:1; + unsigned int cache_missed:1; + + unsigned long start_time; + /* for prefetch, we do not need copy data to bio */ + bool prefetch; + /* + * The function bch_data_insert() is invoked asynchronously as the bio + * subbmited to backend block device, therefore there may be a read + * request subbmited after the bch_data_insert() done and ended before + * the backend bio is end. This read request will read data from the + * backend block device, and insert dirty data to cache device. However + * by writearound cache mode, bcache will not invalidate data again, + * so that read request after will read dirty data from the cache, + * causing a data corruption. + * So that we should put off this invalidation. This switch is for + */ + bool write_inval_data_putoff; + struct list_head list_node; + wait_queue_head_t wqh; + struct acache_info smp; + + struct btree_op op; + struct data_insert_op iop; +}; + +void search_free(struct closure *cl); +struct search *search_alloc(struct bio *bio, struct bcache_device *d, bool prefetch); +void cached_dev_read(struct cached_dev *dc, struct search *s); #endif /* _BCACHE_REQUEST_H_ */ diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c index 894410f3f829c8fdfbbca244e5c97e91b423f13d..f49d7d5ce9efa03c6e978381ad5ed9e1a7cbfa2e 100644 --- a/drivers/md/bcache/stats.c +++ b/drivers/md/bcache/stats.c @@ -48,6 +48,7 @@ read_attribute(cache_bypass_misses); read_attribute(cache_hit_ratio); read_attribute(cache_readaheads); read_attribute(cache_miss_collisions); +read_attribute(cache_prefetch_fake_hits); read_attribute(bypassed); SHOW(bch_stats) @@ -66,6 +67,7 @@ SHOW(bch_stats) var_print(cache_readaheads); var_print(cache_miss_collisions); + var_print(cache_prefetch_fake_hits); sysfs_hprint(bypassed, var(sectors_bypassed) << 9); #undef var return 0; @@ -88,6 +90,7 @@ static struct attribute *bch_stats_files[] = { &sysfs_cache_hit_ratio, &sysfs_cache_readaheads, &sysfs_cache_miss_collisions, + &sysfs_cache_prefetch_fake_hits, &sysfs_bypassed, NULL }; @@ -143,6 +146,7 @@ static void scale_stats(struct cache_stats *stats, unsigned long rescale_at) scale_stat(&stats->cache_bypass_misses); scale_stat(&stats->cache_readaheads); scale_stat(&stats->cache_miss_collisions); + scale_stat(&stats->cache_prefetch_fake_hits); scale_stat(&stats->sectors_bypassed); } } @@ -166,6 +170,7 @@ static void scale_accounting(struct timer_list *t) move_stat(cache_bypass_misses); move_stat(cache_readaheads); move_stat(cache_miss_collisions); + move_stat(cache_prefetch_fake_hits); move_stat(sectors_bypassed); scale_stats(&acc->total, 0); @@ -221,6 +226,14 @@ void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d) atomic_inc(&c->accounting.collector.cache_miss_collisions); } +void bch_mark_cache_prefetch_fake_hit(struct cache_set *c, struct bcache_device *d) +{ + struct cached_dev *dc = container_of(d, struct cached_dev, disk); + + atomic_inc(&dc->accounting.collector.cache_prefetch_fake_hits); + atomic_inc(&c->accounting.collector.cache_prefetch_fake_hits); +} + void bch_mark_sectors_bypassed(struct cache_set *c, struct cached_dev *dc, int sectors) { diff --git a/drivers/md/bcache/stats.h b/drivers/md/bcache/stats.h index abfaabf7e7fcf8fee33c8370074551bf5fc91768..302b76e982b46105ac3c0f367b1acdeec8cca975 100644 --- a/drivers/md/bcache/stats.h +++ b/drivers/md/bcache/stats.h @@ -9,6 +9,7 @@ struct cache_stat_collector { atomic_t cache_bypass_misses; atomic_t cache_readaheads; atomic_t cache_miss_collisions; + atomic_t cache_prefetch_fake_hits; atomic_t sectors_bypassed; }; @@ -21,6 +22,7 @@ struct cache_stats { unsigned long cache_bypass_misses; unsigned long cache_readaheads; unsigned long cache_miss_collisions; + unsigned long cache_prefetch_fake_hits; unsigned long sectors_bypassed; unsigned int rescale; @@ -58,6 +60,7 @@ void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d, void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d); void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d); +void bch_mark_cache_prefetch_fake_hit(struct cache_set *c, struct bcache_device *d); void bch_mark_sectors_bypassed(struct cache_set *c, struct cached_dev *dc, int sectors); diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 30ba9aeb5ee8345ac192e34e51e67beae2127950..3f858de9e9602fcc0f175a84316b0d96ff5c7f41 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -8,6 +8,7 @@ */ #include "bcache.h" +#include "acache.h" #include "btree.h" #include "debug.h" #include "extents.h" @@ -418,6 +419,7 @@ static int __uuid_write(struct cache_set *c) { BKEY_PADDED(key) k; struct closure cl; + struct cache *ca; closure_init_stack(&cl); lockdep_assert_held(&bch_register_lock); @@ -429,6 +431,10 @@ static int __uuid_write(struct cache_set *c) uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl); closure_sync(&cl); + /* Only one bucket used for uuid write */ + ca = PTR_CACHE(c, &k.key, 0); + atomic_long_add(ca->sb.bucket_size, &ca->meta_sectors_written); + bkey_copy(&c->uuid_bucket, &k.key); bkey_put(c, &k.key); return 0; @@ -520,12 +526,29 @@ static void prio_io(struct cache *ca, uint64_t bucket, int op, closure_sync(cl); } -void bch_prio_write(struct cache *ca) +int bch_prio_write(struct cache *ca, bool wait) { int i; struct bucket *b; struct closure cl; + pr_debug("free_prio=%zu, free_none=%zu, free_inc=%zu", + fifo_used(&ca->free[RESERVE_PRIO]), + fifo_used(&ca->free[RESERVE_NONE]), + fifo_used(&ca->free_inc)); + + /* + * Pre-check if there are enough free buckets. In the non-blocking + * scenario it's better to fail early rather than starting to allocate + * buckets and do a cleanup later in case of failure. + */ + if (!wait) { + size_t avail = fifo_used(&ca->free[RESERVE_PRIO]) + + fifo_used(&ca->free[RESERVE_NONE]); + if (prio_buckets(ca) > avail) + return -ENOMEM; + } + closure_init_stack(&cl); lockdep_assert_held(&ca->set->bucket_lock); @@ -535,9 +558,6 @@ void bch_prio_write(struct cache *ca) atomic_long_add(ca->sb.bucket_size * prio_buckets(ca), &ca->meta_sectors_written); - //pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free), - // fifo_used(&ca->free_inc), fifo_used(&ca->unused)); - for (i = prio_buckets(ca) - 1; i >= 0; --i) { long bucket; struct prio_set *p = ca->disk_buckets; @@ -555,7 +575,7 @@ void bch_prio_write(struct cache *ca) p->magic = pset_magic(&ca->sb); p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8); - bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true); + bucket = bch_bucket_alloc(ca, RESERVE_PRIO, wait); BUG_ON(bucket == -1); mutex_unlock(&ca->set->bucket_lock); @@ -584,6 +604,7 @@ void bch_prio_write(struct cache *ca) ca->prio_last_buckets[i] = ca->prio_buckets[i]; } + return 0; } static void prio_read(struct cache *ca, uint64_t bucket) @@ -643,10 +664,6 @@ static int ioctl_dev(struct block_device *b, fmode_t mode, unsigned int cmd, unsigned long arg) { struct bcache_device *d = b->bd_disk->private_data; - struct cached_dev *dc = container_of(d, struct cached_dev, disk); - - if (dc->io_disable) - return -EIO; return d->ioctl(d, mode, cmd, arg); } @@ -746,20 +763,28 @@ static inline int idx_to_first_minor(int idx) static void bcache_device_free(struct bcache_device *d) { + struct gendisk *disk = d->disk; + lockdep_assert_held(&bch_register_lock); - pr_info("%s stopped", d->disk->disk_name); + if (disk) + pr_info("%s stopped", disk->disk_name); + else + pr_err("bcache device (NULL gendisk) stopped"); if (d->c) bcache_device_detach(d); - if (d->disk && d->disk->flags & GENHD_FL_UP) - del_gendisk(d->disk); - if (d->disk && d->disk->queue) - blk_cleanup_queue(d->disk->queue); - if (d->disk) { + + if (disk) { + if (disk->flags & GENHD_FL_UP) + del_gendisk(disk); + + if (disk->queue) + blk_cleanup_queue(disk->queue); + ida_simple_remove(&bcache_device_idx, - first_minor_to_idx(d->disk->first_minor)); - put_disk(d->disk); + first_minor_to_idx(disk->first_minor)); + put_disk(disk); } bioset_exit(&d->bio_split); @@ -1008,6 +1033,7 @@ static void cached_dev_detach_finish(struct work_struct *w) bch_write_bdev_super(dc, &cl); closure_sync(&cl); + calc_cached_dev_sectors(dc->disk.c); bcache_device_detach(&dc->disk); list_move(&dc->list, &uncached_devices); @@ -1152,11 +1178,12 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, } if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { - bch_sectors_dirty_init(&dc->disk); atomic_set(&dc->has_dirty, 1); bch_writeback_queue(dc); } + bch_sectors_dirty_init(&dc->disk); + bch_cached_dev_run(dc); bcache_device_link(&dc->disk, c, "bdev"); atomic_inc(&c->attached_dev_nr); @@ -1183,18 +1210,18 @@ static void cached_dev_free(struct closure *cl) { struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); - mutex_lock(&bch_register_lock); - + del_timer_sync(&dc->io_stat_timer); + del_timer_sync(&dc->token_assign_timer); if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) cancel_writeback_rate_update_dwork(dc); if (!IS_ERR_OR_NULL(dc->writeback_thread)) kthread_stop(dc->writeback_thread); - if (dc->writeback_write_wq) - destroy_workqueue(dc->writeback_write_wq); if (!IS_ERR_OR_NULL(dc->status_update_thread)) kthread_stop(dc->status_update_thread); + mutex_lock(&bch_register_lock); + if (atomic_read(&dc->running)) bd_unlink_disk_holder(dc->bdev, dc->disk.disk); bcache_device_free(&dc->disk); @@ -1225,6 +1252,36 @@ static void cached_dev_flush(struct closure *cl) continue_at(cl, cached_dev_free, system_wq); } +static void cached_dev_io_stat(struct timer_list *t) +{ + struct cached_dev *dc = from_timer(dc, t, io_stat_timer); + + dc->io_stat_timer.expires = jiffies + HZ; + add_timer(&dc->io_stat_timer); + + dc->writeback_sector_size_per_sec = + atomic_read(&dc->writeback_sector_size); + dc->writeback_io_num_per_sec = atomic_read(&dc->writeback_io_num); + dc->front_io_num_per_sec = atomic_read(&dc->front_io_num); + atomic_set(&dc->writeback_sector_size, 0); + atomic_set(&dc->writeback_io_num, 0); + atomic_set(&dc->front_io_num, 0); +} + +static void cached_dev_timer_init(struct cached_dev *dc) +{ + dc->writeback_sector_size_per_sec = 0; + dc->writeback_io_num_per_sec = 0; + dc->front_io_num_per_sec = 0; + atomic_set(&dc->writeback_sector_size, 0); + atomic_set(&dc->writeback_io_num, 0); + atomic_set(&dc->front_io_num, 0); + + timer_setup(&dc->io_stat_timer, cached_dev_io_stat, 0); + dc->io_stat_timer.expires = jiffies + HZ; + add_timer(&dc->io_stat_timer); +} + static int cached_dev_init(struct cached_dev *dc, unsigned int block_size) { int ret; @@ -1241,8 +1298,12 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size) INIT_LIST_HEAD(&dc->io_lru); spin_lock_init(&dc->io_lock); bch_cache_accounting_init(&dc->accounting, &dc->disk.cl); + cached_dev_timer_init(dc); + bch_traffic_policy_init(dc); dc->sequential_cutoff = 4 << 20; + dc->inflight_block_enable = 1; + dc->read_bypass = 0; for (io = dc->io; io < dc->io + RECENT_IO; io++) { list_add(&io->lru, &dc->io_lru); @@ -1428,8 +1489,6 @@ int bch_flash_dev_create(struct cache_set *c, uint64_t size) bool bch_cached_dev_error(struct cached_dev *dc) { - struct cache_set *c; - if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags)) return false; @@ -1440,21 +1499,6 @@ bool bch_cached_dev_error(struct cached_dev *dc) pr_err("stop %s: too many IO errors on backing device %s\n", dc->disk.disk->disk_name, dc->backing_dev_name); - /* - * If the cached device is still attached to a cache set, - * even dc->io_disable is true and no more I/O requests - * accepted, cache device internal I/O (writeback scan or - * garbage collection) may still prevent bcache device from - * being stopped. So here CACHE_SET_IO_DISABLE should be - * set to c->flags too, to make the internal I/O to cache - * device rejected and stopped immediately. - * If c is NULL, that means the bcache device is not attached - * to any cache set, then no CACHE_SET_IO_DISABLE bit to set. - */ - c = dc->disk.c; - if (c && test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags)) - pr_info("CACHE_SET_IO_DISABLE already set"); - bcache_device_stop(&dc->disk); return true; } @@ -1507,13 +1551,13 @@ static void cache_set_free(struct closure *cl) struct cache *ca; unsigned int i; - if (!IS_ERR_OR_NULL(c->debug)) - debugfs_remove(c->debug); + debugfs_remove(c->debug); bch_open_buckets_free(c); bch_btree_cache_free(c); bch_journal_free(c); + mutex_lock(&bch_register_lock); for_each_cache(ca, c, i) if (ca) { ca->set = NULL; @@ -1532,7 +1576,6 @@ static void cache_set_free(struct closure *cl) mempool_exit(&c->search); kfree(c->devices); - mutex_lock(&bch_register_lock); list_del(&c->list); mutex_unlock(&bch_register_lock); @@ -1555,7 +1598,7 @@ static void cache_set_flush(struct closure *cl) kobject_put(&c->internal); kobject_del(&c->kobj); - if (c->gc_thread) + if (!IS_ERR_OR_NULL(c->gc_thread)) kthread_stop(c->gc_thread); if (!IS_ERR_OR_NULL(c->root)) @@ -1765,6 +1808,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) c->congested_read_threshold_us = 2000; c->congested_write_threshold_us = 20000; c->error_limit = DEFAULT_IO_ERROR_LIMIT; + c->cutoff_writeback_sync = MIN_CUTOFF_WRITEBACK_SYNC; WARN_ON(test_and_clear_bit(CACHE_SET_IO_DISABLE, &c->flags)); return c; @@ -1773,13 +1817,15 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) return NULL; } -static void run_cache_set(struct cache_set *c) +static int run_cache_set(struct cache_set *c) { const char *err = "cannot allocate memory"; struct cached_dev *dc, *t; struct cache *ca; struct closure cl; unsigned int i; + LIST_HEAD(journal); + struct journal_replay *l; closure_init_stack(&cl); @@ -1788,7 +1834,6 @@ static void run_cache_set(struct cache_set *c) set_gc_sectors(c); if (CACHE_SYNC(&c->sb)) { - LIST_HEAD(journal); struct bkey *k; struct jset *j; @@ -1867,7 +1912,9 @@ static void run_cache_set(struct cache_set *c) if (j->version < BCACHE_JSET_VERSION_UUID) __uuid_write(c); - bch_journal_replay(c, &journal); + err = "bcache: replay journal failed"; + if (bch_journal_replay(c, &journal)) + goto err; } else { pr_notice("invalidating existing data"); @@ -1890,7 +1937,7 @@ static void run_cache_set(struct cache_set *c) mutex_lock(&c->bucket_lock); for_each_cache(ca, c, i) - bch_prio_write(ca); + bch_prio_write(ca, true); mutex_unlock(&c->bucket_lock); err = "cannot allocate new UUID bucket"; @@ -1935,11 +1982,19 @@ static void run_cache_set(struct cache_set *c) flash_devs_run(c); set_bit(CACHE_SET_RUNNING, &c->flags); - return; + return 0; err: + while (!list_empty(&journal)) { + l = list_first_entry(&journal, struct journal_replay, list); + list_del(&l->list); + kfree(l); + } + closure_sync(&cl); /* XXX: test this, it's broken */ bch_cache_set_error(c, "%s", err); + + return -EIO; } static bool can_attach_cache(struct cache *ca, struct cache_set *c) @@ -2003,8 +2058,11 @@ static const char *register_cache_set(struct cache *ca) ca->set->cache[ca->sb.nr_this_dev] = ca; c->cache_by_alloc[c->caches_loaded++] = ca; - if (c->caches_loaded == c->sb.nr_in_set) - run_cache_set(c); + if (c->caches_loaded == c->sb.nr_in_set) { + err = "failed to run cache set"; + if (run_cache_set(c) < 0) + goto err; + } return NULL; err: @@ -2347,6 +2405,7 @@ static void bcache_exit(void) if (bcache_major) unregister_blkdev(bcache_major, "bcache"); + acache_dev_exit(); unregister_reboot_notifier(&reboot); mutex_destroy(&bch_register_lock); } @@ -2388,6 +2447,8 @@ static int __init bcache_init(void) bch_debug_init(bcache_kobj); closure_debug_init(); + if (acache_dev_init()) + goto err; return 0; err: diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 150cf4f4cf749d8725de356f4b56357b2f8a4198..178a66455481ec9558f58d88f11e4e590426c973 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -26,6 +26,12 @@ static const char * const bch_cache_modes[] = { }; /* Default is -1; we skip past it for stop_when_cache_set_failed */ +static const char * const bch_reada_cache_policies[] = { + "all", + "meta-only", + NULL +}; + static const char * const bch_stop_on_failure_modes[] = { "auto", "always", @@ -45,6 +51,13 @@ static const char * const error_actions[] = { NULL }; +static const char * const writeback_state[] = { + "default", + "quick", + "slow", + NULL +}; + write_attribute(attach); write_attribute(detach); write_attribute(unregister); @@ -90,10 +103,16 @@ read_attribute(io_errors); read_attribute(congested); rw_attribute(congested_read_threshold_us); rw_attribute(congested_write_threshold_us); +rw_attribute(gc_sectors); +rw_attribute(traffic_policy_start); +rw_attribute(force_write_through); rw_attribute(sequential_cutoff); +rw_attribute(read_bypass); +rw_attribute(inflight_block_enable); rw_attribute(data_csum); rw_attribute(cache_mode); +rw_attribute(readahead_cache_policy); rw_attribute(stop_when_cache_set_failed); rw_attribute(writeback_metadata); rw_attribute(writeback_running); @@ -105,7 +124,13 @@ rw_attribute(writeback_rate_update_seconds); rw_attribute(writeback_rate_i_term_inverse); rw_attribute(writeback_rate_p_term_inverse); rw_attribute(writeback_rate_minimum); +rw_attribute(writeback_state); +read_attribute(writeback_sector_size_per_sec); +read_attribute(writeback_io_num_per_sec); +read_attribute(front_io_num_per_sec); read_attribute(writeback_rate_debug); +read_attribute(write_token_sector_size); +read_attribute(write_token_io_num); read_attribute(stripe_size); read_attribute(partial_stripes_expensive); @@ -128,6 +153,7 @@ rw_attribute(expensive_debug_checks); rw_attribute(cache_replacement_policy); rw_attribute(btree_shrinker_disabled); rw_attribute(copy_gc_enabled); +rw_attribute(gc_only_dirty_data); rw_attribute(size); static ssize_t bch_snprint_string_list(char *buf, @@ -160,6 +186,16 @@ SHOW(__bch_cached_dev) bch_cache_modes, BDEV_CACHE_MODE(&dc->sb)); + if (attr == &sysfs_writeback_state) + return bch_snprint_string_list(buf, PAGE_SIZE, + writeback_state, + dc->writeback_state); + + if (attr == &sysfs_readahead_cache_policy) + return bch_snprint_string_list(buf, PAGE_SIZE, + bch_reada_cache_policies, + dc->cache_readahead_policy); + if (attr == &sysfs_stop_when_cache_set_failed) return bch_snprint_string_list(buf, PAGE_SIZE, bch_stop_on_failure_modes, @@ -172,10 +208,13 @@ SHOW(__bch_cached_dev) var_printf(writeback_metadata, "%i"); var_printf(writeback_running, "%i"); var_print(writeback_delay); + var_print(writeback_sector_size_per_sec); + var_print(writeback_io_num_per_sec); + var_print(front_io_num_per_sec); var_print(writeback_percent); sysfs_hprint(writeback_rate, wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0); - sysfs_hprint(io_errors, atomic_read(&dc->io_errors)); + sysfs_printf(io_errors, "%i", atomic_read(&dc->io_errors)); sysfs_printf(io_error_limit, "%i", dc->error_limit); sysfs_printf(io_disable, "%i", dc->io_disable); var_print(writeback_rate_update_seconds); @@ -228,10 +267,14 @@ SHOW(__bch_cached_dev) var_printf(partial_stripes_expensive, "%u"); var_hprint(sequential_cutoff); + var_print(inflight_block_enable); + var_print(read_bypass); var_hprint(readahead); sysfs_print(running, atomic_read(&dc->running)); sysfs_print(state, states[BDEV_STATE(&dc->sb)]); + var_print(write_token_sector_size); + var_print(write_token_io_num); if (attr == &sysfs_label) { memcpy(buf, dc->sb.label, SB_LABEL_SIZE); @@ -283,8 +326,15 @@ STORE(__cached_dev) sysfs_strtoul_clamp(writeback_rate_update_seconds, dc->writeback_rate_update_seconds, 1, WRITEBACK_RATE_UPDATE_SECS_MAX); - d_strtoul(writeback_rate_i_term_inverse); - d_strtoul_nonzero(writeback_rate_p_term_inverse); + sysfs_strtoul_clamp(writeback_rate_i_term_inverse, + dc->writeback_rate_i_term_inverse, + 1, UINT_MAX); + sysfs_strtoul_clamp(writeback_rate_p_term_inverse, + dc->writeback_rate_p_term_inverse, + 1, UINT_MAX); + sysfs_strtoul_clamp(writeback_rate_minimum, + dc->writeback_rate_minimum, + 1, UINT_MAX); sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX); @@ -294,7 +344,15 @@ STORE(__cached_dev) dc->io_disable = v ? 1 : 0; } - d_strtoi_h(sequential_cutoff); + sysfs_strtoul_clamp(sequential_cutoff, + dc->sequential_cutoff, + 0, UINT_MAX); + sysfs_strtoul_clamp(read_bypass, + dc->read_bypass, + 0, 1); + sysfs_strtoul_clamp(inflight_block_enable, + dc->inflight_block_enable, + 0, 1); d_strtoi_h(readahead); if (attr == &sysfs_clear_stats) @@ -315,6 +373,24 @@ STORE(__cached_dev) } } + if (attr == &sysfs_writeback_state) { + v = __sysfs_match_string(writeback_state, -1, buf); + + if (v < 0) + return v; + + dc->writeback_state = v; + } + + if (attr == &sysfs_readahead_cache_policy) { + v = __sysfs_match_string(bch_reada_cache_policies, -1, buf); + if (v < 0) + return v; + + if ((unsigned int) v != dc->cache_readahead_policy) + dc->cache_readahead_policy = v; + } + if (attr == &sysfs_stop_when_cache_set_failed) { v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf); if (v < 0) @@ -386,8 +462,13 @@ STORE(bch_cached_dev) if (attr == &sysfs_writeback_running) bch_writeback_queue(dc); + /* + * Only set BCACHE_DEV_WB_RUNNING when cached device attached to + * a cache set, otherwise it doesn't make sense. + */ if (attr == &sysfs_writeback_percent) - if (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) + if ((dc->disk.c != NULL) && + (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))) schedule_delayed_work(&dc->writeback_rate_update, dc->writeback_rate_update_seconds * HZ); @@ -403,23 +484,33 @@ static struct attribute *bch_cached_dev_files[] = { &sysfs_data_csum, #endif &sysfs_cache_mode, + &sysfs_writeback_state, + &sysfs_readahead_cache_policy, &sysfs_stop_when_cache_set_failed, &sysfs_writeback_metadata, &sysfs_writeback_running, &sysfs_writeback_delay, + &sysfs_writeback_sector_size_per_sec, + &sysfs_writeback_io_num_per_sec, &sysfs_writeback_percent, &sysfs_writeback_rate, &sysfs_writeback_rate_update_seconds, &sysfs_writeback_rate_i_term_inverse, &sysfs_writeback_rate_p_term_inverse, + &sysfs_writeback_rate_minimum, &sysfs_writeback_rate_debug, - &sysfs_errors, + &sysfs_write_token_sector_size, + &sysfs_write_token_io_num, + &sysfs_front_io_num_per_sec, + &sysfs_io_errors, &sysfs_io_error_limit, &sysfs_io_disable, &sysfs_dirty_data, &sysfs_stripe_size, &sysfs_partial_stripes_expensive, &sysfs_sequential_cutoff, + &sysfs_read_bypass, + &sysfs_inflight_block_enable, &sysfs_clear_stats, &sysfs_running, &sysfs_state, @@ -665,6 +756,12 @@ SHOW(__bch_cache_set) c->congested_read_threshold_us); sysfs_print(congested_write_threshold_us, c->congested_write_threshold_us); + sysfs_print(gc_sectors, + c->gc_sectors); + sysfs_print(traffic_policy_start, + c->traffic_policy_start); + sysfs_print(force_write_through, + c->force_write_through); sysfs_print(active_journal_entries, fifo_used(&c->journal.pin)); sysfs_printf(verify, "%i", c->verify); @@ -674,6 +771,7 @@ SHOW(__bch_cache_set) sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite); sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled); sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled); + sysfs_printf(gc_only_dirty_data, "%i", c->gc_only_dirty_data); sysfs_printf(io_disable, "%i", test_bit(CACHE_SET_IO_DISABLE, &c->flags)); @@ -751,6 +849,12 @@ STORE(__bch_cache_set) c->congested_read_threshold_us); sysfs_strtoul(congested_write_threshold_us, c->congested_write_threshold_us); + sysfs_strtoul(gc_sectors, + c->gc_sectors); + sysfs_strtoul(traffic_policy_start, + c->traffic_policy_start); + sysfs_strtoul(force_write_through, + c->force_write_through); if (attr == &sysfs_errors) { v = __sysfs_match_string(error_actions, -1, buf); @@ -764,8 +868,17 @@ STORE(__bch_cache_set) c->error_limit = strtoul_or_return(buf); /* See count_io_errors() for why 88 */ - if (attr == &sysfs_io_error_halflife) - c->error_decay = strtoul_or_return(buf) / 88; + if (attr == &sysfs_io_error_halflife) { + unsigned long v = 0; + ssize_t ret; + + ret = strtoul_safe_clamp(buf, v, 0, UINT_MAX); + if (!ret) { + c->error_decay = v / 88; + return size; + } + return ret; + } if (attr == &sysfs_io_disable) { v = strtoul_or_return(buf); @@ -787,6 +900,7 @@ STORE(__bch_cache_set) sysfs_strtoul(gc_always_rewrite, c->gc_always_rewrite); sysfs_strtoul(btree_shrinker_disabled, c->shrinker_disabled); sysfs_strtoul(copy_gc_enabled, c->copy_gc_enabled); + sysfs_strtoul(gc_only_dirty_data, c->gc_only_dirty_data); return size; } @@ -867,7 +981,11 @@ static struct attribute *bch_cache_set_internal_files[] = { &sysfs_gc_always_rewrite, &sysfs_btree_shrinker_disabled, &sysfs_copy_gc_enabled, + &sysfs_gc_only_dirty_data, &sysfs_io_disable, + &sysfs_gc_sectors, + &sysfs_traffic_policy_start, + &sysfs_force_write_through, NULL }; KTYPE(bch_cache_set_internal); diff --git a/drivers/md/bcache/sysfs.h b/drivers/md/bcache/sysfs.h index 3fe82425859c233761189483dd11bfb4ee81bb41..0ad2715a884e871de949f76af75dacb4c6d6b79e 100644 --- a/drivers/md/bcache/sysfs.h +++ b/drivers/md/bcache/sysfs.h @@ -81,9 +81,16 @@ do { \ #define sysfs_strtoul_clamp(file, var, min, max) \ do { \ - if (attr == &sysfs_ ## file) \ - return strtoul_safe_clamp(buf, var, min, max) \ - ?: (ssize_t) size; \ + if (attr == &sysfs_ ## file) { \ + unsigned long v = 0; \ + ssize_t ret; \ + ret = strtoul_safe_clamp(buf, v, min, max); \ + if (!ret) { \ + var = v; \ + return size; \ + } \ + return ret; \ + } \ } while (0) #define strtoul_or_return(cp) \ diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h index 00aab6abcfe4fd3bb4e625780de0badee109dbf0..b1f5b7aea8724e33fa9eb28d51983f6885e369a7 100644 --- a/drivers/md/bcache/util.h +++ b/drivers/md/bcache/util.h @@ -113,8 +113,6 @@ do { \ #define heap_full(h) ((h)->used == (h)->size) -#define heap_empty(h) ((h)->used == 0) - #define DECLARE_FIFO(type, name) \ struct { \ size_t front, back, size, mask; \ diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 08c3a9f9676c9e89649b56f765fb378973149a62..901ad8bae761425235926b4049031914f5f911a4 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -222,7 +222,13 @@ static unsigned int writeback_delay(struct cached_dev *dc, !dc->writeback_percent) return 0; - return bch_next_delay(&dc->writeback_rate, sectors); + if (dc->writeback_state == WRITEBACK_DEFAULT) { + return bch_next_delay(&dc->writeback_rate, sectors); + } else if (dc->writeback_state == WRITEBACK_QUICK) { + return 0; + } else { + return msecs_to_jiffies(1000); + } } struct dirty_io { @@ -287,6 +293,9 @@ static void write_dirty_finish(struct closure *cl) : &dc->disk.c->writeback_keys_done); } + atomic_add(KEY_SIZE(&w->key), &dc->writeback_sector_size); + atomic_inc(&dc->writeback_io_num); + bch_keybuf_del(&dc->writeback_keys, w); up(&dc->in_flight); @@ -708,6 +717,10 @@ static int bch_writeback_thread(void *arg) } } + if (dc->writeback_write_wq) { + flush_workqueue(dc->writeback_write_wq); + destroy_workqueue(dc->writeback_write_wq); + } cached_dev_put(dc); wait_for_kthread_stop(); @@ -777,7 +790,7 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc) bch_keybuf_init(&dc->writeback_keys); dc->writeback_metadata = true; - dc->writeback_running = true; + dc->writeback_running = false; dc->writeback_percent = 10; dc->writeback_delay = 30; atomic_long_set(&dc->writeback_rate.rate, 1024); @@ -803,8 +816,10 @@ int bch_cached_dev_writeback_start(struct cached_dev *dc) "bcache_writeback"); if (IS_ERR(dc->writeback_thread)) { cached_dev_put(dc); + destroy_workqueue(dc->writeback_write_wq); return PTR_ERR(dc->writeback_thread); } + dc->writeback_running = true; WARN_ON(test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)); schedule_delayed_work(&dc->writeback_rate_update, diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h index d2b9fdbc8994905e48dba5f2126cbea983ae602a..a3151c0e966095576687e47ea1e51d3d5a90dcaf 100644 --- a/drivers/md/bcache/writeback.h +++ b/drivers/md/bcache/writeback.h @@ -3,7 +3,8 @@ #define _BCACHE_WRITEBACK_H #define CUTOFF_WRITEBACK 40 -#define CUTOFF_WRITEBACK_SYNC 70 +#define MIN_CUTOFF_WRITEBACK_SYNC 70 +#define MAX_CUTOFF_WRITEBACK_SYNC 90 #define MAX_WRITEBACKS_IN_PASS 5 #define MAX_WRITESIZE_IN_PASS 5000 /* *512b */ @@ -57,10 +58,14 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio, unsigned int cache_mode, bool would_skip) { unsigned int in_use = dc->disk.c->gc_stats.in_use; + unsigned int cutoff = dc->disk.c->cutoff_writeback_sync; if (cache_mode != CACHE_MODE_WRITEBACK || test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || - in_use > CUTOFF_WRITEBACK_SYNC) + in_use > cutoff) + return false; + + if (bio_op(bio) == REQ_OP_DISCARD) return false; if (dc->partial_stripes_expensive && diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index dc385b70e4c336fc2322c9538f86538b000a8556..0b41e1db4de99b69acb226b39827c82caa3d6259 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -1463,6 +1463,10 @@ EXPORT_SYMBOL_GPL(dm_bufio_get_block_size); sector_t dm_bufio_get_device_size(struct dm_bufio_client *c) { sector_t s = i_size_read(c->bdev->bd_inode) >> SECTOR_SHIFT; + if (s >= c->start) + s -= c->start; + else + s = 0; if (likely(c->sectors_per_block_bits >= 0)) s >>= c->sectors_per_block_bits; else @@ -1471,6 +1475,12 @@ sector_t dm_bufio_get_device_size(struct dm_bufio_client *c) } EXPORT_SYMBOL_GPL(dm_bufio_get_device_size); +struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c) +{ + return c->dm_io; +} +EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client); + sector_t dm_bufio_get_block_number(struct dm_buffer *b) { return b->block; @@ -1790,6 +1800,12 @@ void dm_bufio_client_destroy(struct dm_bufio_client *c) } EXPORT_SYMBOL_GPL(dm_bufio_client_destroy); +void dm_bufio_client_reset(struct dm_bufio_client *c) +{ + drop_buffers(c); +} +EXPORT_SYMBOL_GPL(dm_bufio_client_reset); + void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start) { c->start = start; diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 5936de71883fb7f637b0192bf3144910d6e193d5..af6d4f898e4c1de8d9b72f053beb454d5617a1e8 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -537,12 +537,16 @@ static int __create_persistent_data_objects(struct dm_cache_metadata *cmd, CACHE_MAX_CONCURRENT_LOCKS); if (IS_ERR(cmd->bm)) { DMERR("could not create block manager"); - return PTR_ERR(cmd->bm); + r = PTR_ERR(cmd->bm); + cmd->bm = NULL; + return r; } r = __open_or_format_metadata(cmd, may_format_device); - if (r) + if (r) { dm_block_manager_destroy(cmd->bm); + cmd->bm = NULL; + } return r; } @@ -930,6 +934,10 @@ static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd, bool dirty_flag; *result = true; + if (from_cblock(cmd->cache_blocks) == 0) + /* Nothing to do */ + return 0; + r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root, from_cblock(cmd->cache_blocks), &cmd->dirty_cursor); if (r) { @@ -1163,11 +1171,18 @@ static int __load_discards(struct dm_cache_metadata *cmd, if (r) return r; - for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) { + for (b = 0; ; b++) { r = fn(context, cmd->discard_block_size, to_dblock(b), dm_bitset_cursor_get_value(&c)); if (r) break; + + if (b >= (from_dblock(cmd->discard_nr_blocks) - 1)) + break; + + r = dm_bitset_cursor_next(&c); + if (r) + break; } dm_bitset_cursor_end(&c); diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index b29a8327eed15641df9000e019c82ad5c1cffedc..67dba1a2c47e25bdfc5dadb36dad61d953125a19 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -541,7 +541,7 @@ static void wake_migration_worker(struct cache *cache) static struct dm_bio_prison_cell_v2 *alloc_prison_cell(struct cache *cache) { - return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOWAIT); + return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOIO); } static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell_v2 *cell) @@ -553,9 +553,7 @@ static struct dm_cache_migration *alloc_migration(struct cache *cache) { struct dm_cache_migration *mg; - mg = mempool_alloc(&cache->migration_pool, GFP_NOWAIT); - if (!mg) - return NULL; + mg = mempool_alloc(&cache->migration_pool, GFP_NOIO); memset(mg, 0, sizeof(*mg)); @@ -663,10 +661,6 @@ static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bi struct dm_bio_prison_cell_v2 *cell_prealloc, *cell; cell_prealloc = alloc_prison_cell(cache); /* FIXME: allow wait if calling from worker */ - if (!cell_prealloc) { - defer_bio(cache, bio); - return false; - } build_key(oblock, end, &key); r = dm_cell_get_v2(cache->prison, &key, lock_level(bio), bio, cell_prealloc, &cell); @@ -1492,11 +1486,6 @@ static int mg_lock_writes(struct dm_cache_migration *mg) struct dm_bio_prison_cell_v2 *prealloc; prealloc = alloc_prison_cell(cache); - if (!prealloc) { - DMERR_LIMIT("%s: alloc_prison_cell failed", cache_device_name(cache)); - mg_complete(mg, false); - return -ENOMEM; - } /* * Prevent writes to the block, but allow reads to continue. @@ -1534,11 +1523,6 @@ static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio } mg = alloc_migration(cache); - if (!mg) { - policy_complete_background_work(cache->policy, op, false); - background_work_end(cache); - return -ENOMEM; - } mg->op = op; mg->overwrite_bio = bio; @@ -1627,10 +1611,6 @@ static int invalidate_lock(struct dm_cache_migration *mg) struct dm_bio_prison_cell_v2 *prealloc; prealloc = alloc_prison_cell(cache); - if (!prealloc) { - invalidate_complete(mg, false); - return -ENOMEM; - } build_key(mg->invalidate_oblock, oblock_succ(mg->invalidate_oblock), &key); r = dm_cell_lock_v2(cache->prison, &key, @@ -1668,10 +1648,6 @@ static int invalidate_start(struct cache *cache, dm_cblock_t cblock, return -EPERM; mg = alloc_migration(cache); - if (!mg) { - background_work_end(cache); - return -ENOMEM; - } mg->overwrite_bio = bio; mg->invalidate_cblock = cblock; @@ -2883,8 +2859,8 @@ static void cache_postsuspend(struct dm_target *ti) prevent_background_work(cache); BUG_ON(atomic_read(&cache->nr_io_migrations)); - cancel_delayed_work(&cache->waker); - flush_workqueue(cache->wq); + cancel_delayed_work_sync(&cache->waker); + drain_workqueue(cache->wq); WARN_ON(cache->tracker.in_flight); /* @@ -3010,19 +2986,19 @@ static dm_cblock_t get_cache_dev_size(struct cache *cache) static bool can_resize(struct cache *cache, dm_cblock_t new_size) { if (from_cblock(new_size) > from_cblock(cache->cache_size)) { - if (cache->sized) { - DMERR("%s: unable to extend cache due to missing cache table reload", - cache_device_name(cache)); - return false; - } + DMERR("%s: unable to extend cache due to missing cache table reload", + cache_device_name(cache)); + return false; } /* * We can't drop a dirty block when shrinking the cache. */ - while (from_cblock(new_size) < from_cblock(cache->cache_size)) { - new_size = to_cblock(from_cblock(new_size) + 1); - if (is_dirty(cache, new_size)) { + if (cache->loaded_mappings) { + new_size = to_cblock(find_next_bit(cache->dirty_bitset, + from_cblock(cache->cache_size), + from_cblock(new_size))); + if (new_size != cache->cache_size) { DMERR("%s: unable to shrink cache; cache block %llu is dirty", cache_device_name(cache), (unsigned long long) from_cblock(new_size)); @@ -3058,20 +3034,15 @@ static int cache_preresume(struct dm_target *ti) /* * Check to see if the cache has resized. */ - if (!cache->sized) { - r = resize_cache_dev(cache, csize); - if (r) - return r; - - cache->sized = true; - - } else if (csize != cache->cache_size) { + if (!cache->sized || csize != cache->cache_size) { if (!can_resize(cache, csize)) return -EINVAL; r = resize_cache_dev(cache, csize); if (r) return r; + + cache->sized = true; } if (!cache->loaded_mappings) { diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 7d480c930eaf0a1f4fe90238d830ea4ee61daebb..8cda3f7ddbae88ac62c275eabd8fd1aacdadca8f 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -110,6 +110,10 @@ struct mapped_device { /* zero-length flush that will be cloned and submitted to targets */ struct bio flush_bio; + int swap_bios; + struct semaphore swap_bios_semaphore; + struct mutex swap_bios_lock; + struct dm_stats stats; struct kthread_worker kworker; @@ -130,6 +134,7 @@ struct mapped_device { }; int md_in_flight(struct mapped_device *md); +void disable_discard(struct mapped_device *md); void disable_write_same(struct mapped_device *md); void disable_write_zeroes(struct mapped_device *md); diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 0481223b1deb826af43fa8f0da0b81e5b6654009..fee23d3e2eba9100d0fe5b5dc540f04baaba05cf 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -49,7 +49,7 @@ struct convert_context { struct bio *bio_out; struct bvec_iter iter_in; struct bvec_iter iter_out; - sector_t cc_sector; + u64 cc_sector; atomic_t cc_pending; union { struct skcipher_request *req; @@ -81,7 +81,7 @@ struct dm_crypt_request { struct convert_context *ctx; struct scatterlist sg_in[4]; struct scatterlist sg_out[4]; - sector_t iv_sector; + u64 iv_sector; }; struct crypt_config; @@ -160,7 +160,7 @@ struct crypt_config { struct iv_lmk_private lmk; struct iv_tcw_private tcw; } iv_gen_private; - sector_t iv_offset; + u64 iv_offset; unsigned int iv_size; unsigned short int sector_size; unsigned char sector_shift; @@ -482,8 +482,14 @@ static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, const char *opts) { - unsigned bs = crypto_skcipher_blocksize(any_tfm(cc)); - int log = ilog2(bs); + unsigned bs; + int log; + + if (test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags)) + bs = crypto_aead_blocksize(any_tfm_aead(cc)); + else + bs = crypto_skcipher_blocksize(any_tfm(cc)); + log = ilog2(bs); /* we need to calculate how far we must shift the sector count * to get the cipher block count, we use this shift in _gen */ @@ -932,7 +938,7 @@ static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio) if (IS_ERR(bip)) return PTR_ERR(bip); - tag_len = io->cc->on_disk_tag_size * bio_sectors(bio); + tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift); bip->bip_iter.bi_size = tag_len; bip->bip_iter.bi_sector = io->cc->start + io->sector; @@ -949,6 +955,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti) { #ifdef CONFIG_BLK_DEV_INTEGRITY struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk); + struct mapped_device *md = dm_table_get_md(ti->table); /* From now we require underlying device with our integrity profile */ if (!bi || strcasecmp(bi->profile->name, "DM-DIF-EXT-TAG")) { @@ -968,7 +975,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti) if (crypt_integrity_aead(cc)) { cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size; - DMINFO("Integrity AEAD, tag size %u, IV size %u.", + DMDEBUG("%s: Integrity AEAD, tag size %u, IV size %u.", dm_device_name(md), cc->integrity_tag_size, cc->integrity_iv_size); if (crypto_aead_setauthsize(any_tfm_aead(cc), cc->integrity_tag_size)) { @@ -976,7 +983,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti) return -EINVAL; } } else if (cc->integrity_iv_size) - DMINFO("Additional per-sector space %u bytes for IV.", + DMDEBUG("%s: Additional per-sector space %u bytes for IV.", dm_device_name(md), cc->integrity_iv_size); if ((cc->integrity_tag_size + cc->integrity_iv_size) != bi->tag_size) { @@ -1726,6 +1733,12 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) io->ctx.bio_out = clone; io->ctx.iter_out = clone->bi_iter; + if (crypt_integrity_aead(cc)) { + bio_copy_data(clone, io->base_bio); + io->ctx.bio_in = clone; + io->ctx.iter_in = clone->bi_iter; + } + sector += bio_sectors(clone); crypt_inc_pending(io); @@ -2405,9 +2418,21 @@ static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key * capi:cipher_api_spec-iv:ivopts */ tmp = &cipher_in[strlen("capi:")]; - cipher_api = strsep(&tmp, "-"); - *ivmode = strsep(&tmp, ":"); - *ivopts = tmp; + + /* Separate IV options if present, it can contain another '-' in hash name */ + *ivopts = strrchr(tmp, ':'); + if (*ivopts) { + **ivopts = '\0'; + (*ivopts)++; + } + /* Parse IV mode */ + *ivmode = strrchr(tmp, '-'); + if (*ivmode) { + **ivmode = '\0'; + (*ivmode)++; + } + /* The rest is crypto API spec */ + cipher_api = tmp; if (*ivmode && !strcmp(*ivmode, "lmk")) cc->tfms_count = 64; @@ -2477,11 +2502,8 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key goto bad_mem; chainmode = strsep(&tmp, "-"); - *ivopts = strsep(&tmp, "-"); - *ivmode = strsep(&*ivopts, ":"); - - if (tmp) - DMWARN("Ignoring unexpected additional cipher options"); + *ivmode = strsep(&tmp, ":"); + *ivopts = tmp; /* * For compatibility with the original dm-crypt mapping format, if @@ -2780,7 +2802,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) } ret = -EINVAL; - if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) { + if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) { ti->error = "Invalid device sector"; goto bad; } @@ -2806,17 +2828,16 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) } ret = -ENOMEM; - cc->io_queue = alloc_workqueue("kcryptd_io", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1); + cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1); if (!cc->io_queue) { ti->error = "Couldn't create kcryptd io queue"; goto bad; } if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags)) - cc->crypt_queue = alloc_workqueue("kcryptd", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1); + cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1); else - cc->crypt_queue = alloc_workqueue("kcryptd", - WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, + cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus()); if (!cc->crypt_queue) { ti->error = "Couldn't create kcryptd queue"; @@ -2836,6 +2857,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) wake_up_process(cc->write_thread); ti->num_flush_bios = 1; + ti->limit_swap_bios = true; return 0; diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index 2fb7bb4304ad7f0ade6eb10ccd02d07f36271acf..f496213f8b6753b8760901848140c3f9901e7e06 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c @@ -121,7 +121,8 @@ static void delay_dtr(struct dm_target *ti) { struct delay_c *dc = ti->private; - destroy_workqueue(dc->kdelayd_wq); + if (dc->kdelayd_wq) + destroy_workqueue(dc->kdelayd_wq); if (dc->read.dev) dm_put_device(ti, dc->read.dev); @@ -141,7 +142,7 @@ static int delay_class_ctr(struct dm_target *ti, struct delay_class *c, char **a unsigned long long tmpll; char dummy; - if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1) { + if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) { ti->error = "Invalid device sector"; return -EINVAL; } diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index 32aabe27b37ce94d06d8df4e855ae6790d200dfd..2fcf62fb2844f467326c653ae20f59caf4b9d8b8 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -213,7 +213,7 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv) devname = dm_shift_arg(&as); r = -EINVAL; - if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1) { + if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) { ti->error = "Invalid device sector"; goto bad; } @@ -287,20 +287,31 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio) static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc) { - unsigned bio_bytes = bio_cur_bytes(bio); - char *data = bio_data(bio); + unsigned int corrupt_bio_byte = fc->corrupt_bio_byte - 1; + + struct bvec_iter iter; + struct bio_vec bvec; + + if (!bio_has_data(bio)) + return; /* - * Overwrite the Nth byte of the data returned. + * Overwrite the Nth byte of the bio's data, on whichever page + * it falls. */ - if (data && bio_bytes >= fc->corrupt_bio_byte) { - data[fc->corrupt_bio_byte - 1] = fc->corrupt_bio_value; - - DMDEBUG("Corrupting data bio=%p by writing %u to byte %u " - "(rw=%c bi_opf=%u bi_sector=%llu cur_bytes=%u)\n", - bio, fc->corrupt_bio_value, fc->corrupt_bio_byte, - (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf, - (unsigned long long)bio->bi_iter.bi_sector, bio_bytes); + bio_for_each_segment(bvec, bio, iter) { + if (bio_iter_len(bio, iter) > corrupt_bio_byte) { + char *segment = (page_address(bio_iter_page(bio, iter)) + + bio_iter_offset(bio, iter)); + segment[corrupt_bio_byte] = fc->corrupt_bio_value; + DMDEBUG("Corrupting data bio=%p by writing %u to byte %u " + "(rw=%c bi_opf=%u bi_sector=%llu size=%u)\n", + bio, fc->corrupt_bio_value, fc->corrupt_bio_byte, + (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf, + (unsigned long long)bio->bi_iter.bi_sector, bio->bi_iter.bi_size); + break; + } + corrupt_bio_byte -= bio_iter_len(bio, iter); } } diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index e1fa6baf4e8e39ad79d39254c04be23333992f65..66016ce253ceae2460f5a711aab39246c683b5aa 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -186,12 +186,13 @@ struct dm_integrity_c { __u8 sectors_per_block; unsigned char mode; - int suspending; int failed; struct crypto_shash *internal_hash; + struct dm_target *ti; + /* these variables are locked with endio_wait.lock */ struct rb_root in_progress; struct list_head wait_list; @@ -236,6 +237,7 @@ struct dm_integrity_c { bool journal_uptodate; bool just_formatted; + bool legacy_recalculate; struct alg_spec internal_hash_alg; struct alg_spec journal_crypt_alg; @@ -345,6 +347,14 @@ static int dm_integrity_failed(struct dm_integrity_c *ic) return READ_ONCE(ic->failed); } +static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic) +{ + if ((ic->internal_hash_alg.key || ic->journal_mac_alg.key) && + !ic->legacy_recalculate) + return true; + return false; +} + static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i, unsigned j, unsigned char seq) { @@ -908,7 +918,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsig static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2) { return range1->logical_sector < range2->logical_sector + range2->n_sectors && - range2->logical_sector + range2->n_sectors > range2->logical_sector; + range1->logical_sector + range1->n_sectors > range2->logical_sector; } static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting) @@ -954,8 +964,6 @@ static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity struct dm_integrity_range *last_range = list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry); struct task_struct *last_range_task; - if (!ranges_overlap(range, last_range)) - break; last_range_task = last_range->task; list_del(&last_range->wait_entry); if (!add_new_range(ic, last_range, false)) { @@ -1155,12 +1163,52 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se return 0; } -static void dm_integrity_flush_buffers(struct dm_integrity_c *ic) +struct flush_request { + struct dm_io_request io_req; + struct dm_io_region io_reg; + struct dm_integrity_c *ic; + struct completion comp; +}; + +static void flush_notify(unsigned long error, void *fr_) +{ + struct flush_request *fr = fr_; + if (unlikely(error != 0)) + dm_integrity_io_error(fr->ic, "flusing disk cache", -EIO); + complete(&fr->comp); +} + +static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_data) { int r; + + struct flush_request fr; + + if (!ic->meta_dev) + flush_data = false; + if (flush_data) { + fr.io_req.bi_op = REQ_OP_WRITE, + fr.io_req.bi_op_flags = REQ_PREFLUSH | REQ_SYNC, + fr.io_req.mem.type = DM_IO_KMEM, + fr.io_req.mem.ptr.addr = NULL, + fr.io_req.notify.fn = flush_notify, + fr.io_req.notify.context = &fr; + fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio), + fr.io_reg.bdev = ic->dev->bdev, + fr.io_reg.sector = 0, + fr.io_reg.count = 0, + fr.ic = ic; + init_completion(&fr.comp); + r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL); + BUG_ON(r); + } + r = dm_bufio_write_dirty_buffers(ic->bufio); if (unlikely(r)) dm_integrity_io_error(ic, "writing tags", r); + + if (flush_data) + wait_for_completion(&fr.comp); } static void sleep_on_endio_wait(struct dm_integrity_c *ic) @@ -1357,8 +1405,8 @@ static void integrity_metadata(struct work_struct *w) checksums_ptr - checksums, !dio->write ? TAG_CMP : TAG_WRITE); if (unlikely(r)) { if (r > 0) { - DMERR("Checksum failed at sector 0x%llx", - (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size))); + DMERR_LIMIT("Checksum failed at sector 0x%llx", + (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size))); r = -EILSEQ; atomic64_inc(&ic->number_of_mismatches); } @@ -1550,8 +1598,8 @@ static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio, integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack); if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) { - DMERR("Checksum failed when reading from journal, at sector 0x%llx", - (unsigned long long)logical_sector); + DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx", + (unsigned long long)logical_sector); } } #endif @@ -1751,7 +1799,22 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map queue_work(ic->wait_wq, &dio->work); return; } + if (journal_read_pos != NOT_FOUND) + dio->range.n_sectors = ic->sectors_per_block; wait_and_add_new_range(ic, &dio->range); + /* + * wait_and_add_new_range drops the spinlock, so the journal + * may have been changed arbitrarily. We need to recheck. + * To simplify the code, we restrict I/O size to just one block. + */ + if (journal_read_pos != NOT_FOUND) { + sector_t next_sector; + unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector); + if (unlikely(new_pos != journal_read_pos)) { + remove_range_unlocked(ic, &dio->range); + goto retry; + } + } } spin_unlock_irq(&ic->endio_wait.lock); @@ -1845,7 +1908,7 @@ static void integrity_commit(struct work_struct *w) flushes = bio_list_get(&ic->flush_bio_list); if (unlikely(ic->mode != 'J')) { spin_unlock_irq(&ic->endio_wait.lock); - dm_integrity_flush_buffers(ic); + dm_integrity_flush_buffers(ic, true); goto release_flush_bios; } @@ -2056,7 +2119,7 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start, complete_journal_op(&comp); wait_for_completion_io(&comp.comp); - dm_integrity_flush_buffers(ic); + dm_integrity_flush_buffers(ic, true); } static void integrity_writer(struct work_struct *w) @@ -2067,7 +2130,7 @@ static void integrity_writer(struct work_struct *w) unsigned prev_free_sectors; /* the following test is not needed, but it tests the replay code */ - if (READ_ONCE(ic->suspending) && !ic->meta_dev) + if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev) return; spin_lock_irq(&ic->endio_wait.lock); @@ -2098,7 +2161,7 @@ static void recalc_write_super(struct dm_integrity_c *ic) { int r; - dm_integrity_flush_buffers(ic); + dm_integrity_flush_buffers(ic, false); if (dm_integrity_failed(ic)) return; @@ -2126,7 +2189,7 @@ static void integrity_recalc(struct work_struct *w) next_chunk: - if (unlikely(READ_ONCE(ic->suspending))) + if (unlikely(dm_post_suspending(ic->ti))) goto unlock_ret; range.logical_sector = le64_to_cpu(ic->sb->recalc_sector); @@ -2398,8 +2461,6 @@ static void dm_integrity_postsuspend(struct dm_target *ti) del_timer_sync(&ic->autocommit_timer); - WRITE_ONCE(ic->suspending, 1); - if (ic->recalc_wq) drain_workqueue(ic->recalc_wq); @@ -2410,11 +2471,9 @@ static void dm_integrity_postsuspend(struct dm_target *ti) if (ic->meta_dev) queue_work(ic->writer_wq, &ic->writer_work); drain_workqueue(ic->writer_wq); - dm_integrity_flush_buffers(ic); + dm_integrity_flush_buffers(ic, true); } - WRITE_ONCE(ic->suspending, 0); - BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress)); ic->journal_uptodate = true; @@ -2466,6 +2525,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type, arg_count += !!ic->internal_hash_alg.alg_string; arg_count += !!ic->journal_crypt_alg.alg_string; arg_count += !!ic->journal_mac_alg.alg_string; + arg_count += ic->legacy_recalculate; DMEMIT("%s %llu %u %c %u", ic->dev->name, (unsigned long long)ic->start, ic->tag_size, ic->mode, arg_count); if (ic->meta_dev) @@ -2479,6 +2539,8 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type, DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors); DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage); DMEMIT(" commit_time:%u", ic->autocommit_msec); + if (ic->legacy_recalculate) + DMEMIT(" legacy_recalculate"); #define EMIT_ALG(a, n) \ do { \ @@ -2559,7 +2621,7 @@ static int calculate_device_limits(struct dm_integrity_c *ic) if (last_sector < ic->start || last_sector >= ic->meta_device_sectors) return -EINVAL; } else { - __u64 meta_size = ic->provided_data_sectors * ic->tag_size; + __u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size; meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1)) >> (ic->log2_buffer_sectors + SECTOR_SHIFT); meta_size <<= ic->log2_buffer_sectors; @@ -3081,7 +3143,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) unsigned extra_args; struct dm_arg_set as; static const struct dm_arg _args[] = { - {0, 9, "Invalid number of feature args"}, + {0, 12, "Invalid number of feature args"}, }; unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec; bool recalculate; @@ -3103,6 +3165,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) } ti->private = ic; ti->per_io_data_size = sizeof(struct dm_integrity_io); + ic->ti = ti; ic->in_progress = RB_ROOT; INIT_LIST_HEAD(&ic->wait_list); @@ -3174,7 +3237,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) journal_watermark = val; else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1) sync_msec = val; - else if (!memcmp(opt_string, "meta_device:", strlen("meta_device:"))) { + else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) { if (ic->meta_dev) { dm_put_device(ti, ic->meta_dev); ic->meta_dev = NULL; @@ -3193,23 +3256,25 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) goto bad; } ic->sectors_per_block = val >> SECTOR_SHIFT; - } else if (!memcmp(opt_string, "internal_hash:", strlen("internal_hash:"))) { + } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) { r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error, "Invalid internal_hash argument"); if (r) goto bad; - } else if (!memcmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) { + } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) { r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error, "Invalid journal_crypt argument"); if (r) goto bad; - } else if (!memcmp(opt_string, "journal_mac:", strlen("journal_mac:"))) { + } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) { r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error, "Invalid journal_mac argument"); if (r) goto bad; } else if (!strcmp(opt_string, "recalculate")) { recalculate = true; + } else if (!strcmp(opt_string, "legacy_recalculate")) { + ic->legacy_recalculate = true; } else { r = -EINVAL; ti->error = "Invalid argument"; @@ -3430,7 +3495,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections)); DEBUG_print(" journal_entries %u\n", ic->journal_entries); DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors); - DEBUG_print(" device_sectors 0x%llx\n", (unsigned long long)ic->device_sectors); + DEBUG_print(" data_device_sectors 0x%llx\n", (unsigned long long)ic->data_device_sectors); DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors); DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run); DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run); @@ -3469,6 +3534,20 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) r = -ENOMEM; goto bad; } + } else { + if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) { + ti->error = "Recalculate can only be specified with internal_hash"; + r = -EINVAL; + goto bad; + } + } + + if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) && + le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors && + dm_integrity_disable_recalculate(ic)) { + ti->error = "Recalculating with HMAC is disabled for security reasons - if you really need it, use the argument \"legacy_recalculate\""; + r = -EOPNOTSUPP; + goto bad; } ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev, diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index b810ea77e6b162df29b0f3f57605317000461285..c8c27d23bb456ec7d0dc33b8b334abcbd61bdc7d 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -17,6 +17,7 @@ #include #include #include +#include #include @@ -529,7 +530,7 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_ * Grab our output buffer. */ nl = orig_nl = get_result_buffer(param, param_size, &len); - if (len < needed) { + if (len < needed || len < sizeof(nl->dev)) { param->flags |= DM_BUFFER_FULL_FLAG; goto out; } @@ -1409,11 +1410,12 @@ static int table_clear(struct file *filp, struct dm_ioctl *param, size_t param_s hc->new_map = NULL; } - param->flags &= ~DM_INACTIVE_PRESENT_FLAG; - - __dev_status(hc->md, param); md = hc->md; up_write(&_hash_lock); + + param->flags &= ~DM_INACTIVE_PRESENT_FLAG; + __dev_status(md, param); + if (old_map) { dm_sync_table(md); dm_table_destroy(old_map); @@ -1669,6 +1671,7 @@ static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags) if (unlikely(cmd >= ARRAY_SIZE(_ioctls))) return NULL; + cmd = array_index_nospec(cmd, ARRAY_SIZE(_ioctls)); *ioctl_flags = _ioctls[cmd].flags; return _ioctls[cmd].fn; } @@ -1720,8 +1723,7 @@ static void free_params(struct dm_ioctl *param, size_t param_size, int param_fla } static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kernel, - int ioctl_flags, - struct dm_ioctl **param, int *param_flags) + int ioctl_flags, struct dm_ioctl **param, int *param_flags) { struct dm_ioctl *dmi; int secure_data; @@ -1762,18 +1764,13 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern *param_flags |= DM_PARAMS_MALLOC; - if (copy_from_user(dmi, user, param_kernel->data_size)) - goto bad; + /* Copy from param_kernel (which was already copied from user) */ + memcpy(dmi, param_kernel, minimum_data_size); -data_copied: - /* - * Abort if something changed the ioctl data while it was being copied. - */ - if (dmi->data_size != param_kernel->data_size) { - DMERR("rejecting ioctl: data size modified while processing parameters"); + if (copy_from_user(&dmi->data, (char __user *)user + minimum_data_size, + param_kernel->data_size - minimum_data_size)) goto bad; - } - +data_copied: /* Wipe the user buffer so we do not return it to userspace */ if (secure_data && clear_user(user, param_kernel->data_size)) goto bad; diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index 2fc4213e02b5fa396c4fc380d0b6d886750e1d17..3f694d9061ec5fd53e2449f601cee75d3ec6023e 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -56,15 +56,17 @@ struct dm_kcopyd_client { atomic_t nr_jobs; /* - * We maintain three lists of jobs: + * We maintain four lists of jobs: * * i) jobs waiting for pages * ii) jobs that have pages, and are waiting for the io to be issued. - * iii) jobs that have completed. + * iii) jobs that don't need to do any IO and just run a callback + * iv) jobs that have completed. * - * All three of these are protected by job_lock. + * All four of these are protected by job_lock. */ spinlock_t job_lock; + struct list_head callback_jobs; struct list_head complete_jobs; struct list_head io_jobs; struct list_head pages_jobs; @@ -546,8 +548,10 @@ static int run_io_job(struct kcopyd_job *job) * no point in continuing. */ if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) && - job->master_job->write_err) + job->master_job->write_err) { + job->write_err = job->master_job->write_err; return -EIO; + } io_job_start(job->kc->throttle); @@ -599,6 +603,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc, else job->read_err = 1; push(&kc->complete_jobs, job); + wake(kc); break; } @@ -625,6 +630,7 @@ static void do_work(struct work_struct *work) struct dm_kcopyd_client *kc = container_of(work, struct dm_kcopyd_client, kcopyd_work); struct blk_plug plug; + unsigned long flags; /* * The order that these are called is *very* important. @@ -633,6 +639,10 @@ static void do_work(struct work_struct *work) * list. io jobs call wake when they complete and it all * starts again. */ + spin_lock_irqsave(&kc->job_lock, flags); + list_splice_tail_init(&kc->callback_jobs, &kc->complete_jobs); + spin_unlock_irqrestore(&kc->job_lock, flags); + blk_start_plug(&plug); process_jobs(&kc->complete_jobs, kc, run_complete_job); process_jobs(&kc->pages_jobs, kc, run_pages_job); @@ -650,7 +660,7 @@ static void dispatch_job(struct kcopyd_job *job) struct dm_kcopyd_client *kc = job->kc; atomic_inc(&kc->nr_jobs); if (unlikely(!job->source.count)) - push(&kc->complete_jobs, job); + push(&kc->callback_jobs, job); else if (job->pages == &zero_page_list) push(&kc->io_jobs, job); else @@ -858,7 +868,7 @@ void dm_kcopyd_do_callback(void *j, int read_err, unsigned long write_err) job->read_err = read_err; job->write_err = write_err; - push(&kc->complete_jobs, job); + push(&kc->callback_jobs, job); wake(kc); } EXPORT_SYMBOL(dm_kcopyd_do_callback); @@ -888,6 +898,7 @@ struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *thro return ERR_PTR(-ENOMEM); spin_lock_init(&kc->job_lock); + INIT_LIST_HEAD(&kc->callback_jobs); INIT_LIST_HEAD(&kc->complete_jobs); INIT_LIST_HEAD(&kc->io_jobs); INIT_LIST_HEAD(&kc->pages_jobs); @@ -939,6 +950,7 @@ void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc) /* Wait for completion of all jobs submitted by this client. */ wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs)); + BUG_ON(!list_empty(&kc->callback_jobs)); BUG_ON(!list_empty(&kc->complete_jobs)); BUG_ON(!list_empty(&kc->io_jobs)); BUG_ON(!list_empty(&kc->pages_jobs)); diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 2f7c44a006c417c0c8c98eb585d97e4682e8112b..caa08c4b84cd4984b81850eab6ce638f1d172899 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -45,7 +45,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv) } ret = -EINVAL; - if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1) { + if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 || tmp != (sector_t)tmp) { ti->error = "Invalid device sector"; goto bad; } diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index 9ea2b0291f20d781b355cca994192a595c6a1b39..e549392e0ea517a55ec672a95a909a8082d19414 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c @@ -60,6 +60,7 @@ #define WRITE_LOG_VERSION 1ULL #define WRITE_LOG_MAGIC 0x6a736677736872ULL +#define WRITE_LOG_SUPER_SECTOR 0 /* * The disk format for this is braindead simple. @@ -115,6 +116,7 @@ struct log_writes_c { struct list_head logging_blocks; wait_queue_head_t wait; struct task_struct *log_kthread; + struct completion super_done; }; struct pending_block { @@ -180,6 +182,14 @@ static void log_end_io(struct bio *bio) bio_put(bio); } +static void log_end_super(struct bio *bio) +{ + struct log_writes_c *lc = bio->bi_private; + + complete(&lc->super_done); + log_end_io(bio); +} + /* * Meant to be called if there is an error, it will free all the pages * associated with the block. @@ -215,7 +225,8 @@ static int write_metadata(struct log_writes_c *lc, void *entry, bio->bi_iter.bi_size = 0; bio->bi_iter.bi_sector = sector; bio_set_dev(bio, lc->logdev->bdev); - bio->bi_end_io = log_end_io; + bio->bi_end_io = (sector == WRITE_LOG_SUPER_SECTOR) ? + log_end_super : log_end_io; bio->bi_private = lc; bio_set_op_attrs(bio, REQ_OP_WRITE, 0); @@ -418,11 +429,18 @@ static int log_super(struct log_writes_c *lc) super.nr_entries = cpu_to_le64(lc->logged_entries); super.sectorsize = cpu_to_le32(lc->sectorsize); - if (write_metadata(lc, &super, sizeof(super), NULL, 0, 0)) { + if (write_metadata(lc, &super, sizeof(super), NULL, 0, + WRITE_LOG_SUPER_SECTOR)) { DMERR("Couldn't write super"); return -1; } + /* + * Super sector should be writen in-order, otherwise the + * nr_entries could be rewritten incorrectly by an old bio. + */ + wait_for_completion_io(&lc->super_done); + return 0; } @@ -531,6 +549,7 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv) INIT_LIST_HEAD(&lc->unflushed_blocks); INIT_LIST_HEAD(&lc->logging_blocks); init_waitqueue_head(&lc->wait); + init_completion(&lc->super_done); atomic_set(&lc->io_blocks, 0); atomic_set(&lc->pending_blocks, 0); diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 419362c2d8aca1b95e745633f937ecd2a3325aed..6c43c09689c7f8afdc2552da91dc0e0b383408cf 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -554,8 +554,23 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, return DM_MAPIO_REMAPPED; } -static void multipath_release_clone(struct request *clone) +static void multipath_release_clone(struct request *clone, + union map_info *map_context) { + if (unlikely(map_context)) { + /* + * non-NULL map_context means caller is still map + * method; must undo multipath_clone_and_map() + */ + struct dm_mpath_io *mpio = get_mpio(map_context); + struct pgpath *pgpath = mpio->pgpath; + + if (pgpath && pgpath->pg->ps.type->end_io) + pgpath->pg->ps.type->end_io(&pgpath->pg->ps, + &pgpath->path, + mpio->nr_bytes); + } + blk_put_request(clone); } @@ -571,10 +586,12 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio) /* Do we need to select a new pgpath? */ pgpath = READ_ONCE(m->current_pgpath); - queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags); - if (!pgpath || !queue_io) + if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags)) pgpath = choose_pgpath(m, bio->bi_iter.bi_size); + /* MPATHF_QUEUE_IO might have been cleared by choose_pgpath. */ + queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags); + if ((pgpath && queue_io) || (!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) { /* Queue for the daemon to resubmit */ @@ -594,45 +611,10 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio) return pgpath; } -static struct pgpath *__map_bio_fast(struct multipath *m, struct bio *bio) -{ - struct pgpath *pgpath; - unsigned long flags; - - /* Do we need to select a new pgpath? */ - /* - * FIXME: currently only switching path if no path (due to failure, etc) - * - which negates the point of using a path selector - */ - pgpath = READ_ONCE(m->current_pgpath); - if (!pgpath) - pgpath = choose_pgpath(m, bio->bi_iter.bi_size); - - if (!pgpath) { - if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { - /* Queue for the daemon to resubmit */ - spin_lock_irqsave(&m->lock, flags); - bio_list_add(&m->queued_bios, bio); - spin_unlock_irqrestore(&m->lock, flags); - queue_work(kmultipathd, &m->process_queued_bios); - - return ERR_PTR(-EAGAIN); - } - return NULL; - } - - return pgpath; -} - static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_mpath_io *mpio) { - struct pgpath *pgpath; - - if (!m->hw_handler_name) - pgpath = __map_bio_fast(m, bio); - else - pgpath = __map_bio(m, bio); + struct pgpath *pgpath = __map_bio(m, bio); if (IS_ERR(pgpath)) return DM_MAPIO_SUBMITTED; @@ -892,6 +874,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps if (attached_handler_name || m->hw_handler_name) { INIT_DELAYED_WORK(&p->activate_path, activate_path_work); r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error); + kfree(attached_handler_name); if (r) { dm_put_device(ti, p->path.dev); goto bad; @@ -906,7 +889,6 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps return p; bad: - kfree(attached_handler_name); free_pgpath(p); return ERR_PTR(r); } @@ -1862,7 +1844,7 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv, goto out; } - r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev); + r = __dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev, false); if (r) { DMWARN("message: error getting device %s", argv[1]); diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index c44925e4e4813d246d0d208fef5587019e4afb17..cb34dbf8bf315927ca5c81f1875946e915aa58ff 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -1892,6 +1892,14 @@ static bool rs_takeover_requested(struct raid_set *rs) return rs->md.new_level != rs->md.level; } +/* True if layout is set to reshape. */ +static bool rs_is_layout_change(struct raid_set *rs, bool use_mddev) +{ + return (use_mddev ? rs->md.delta_disks : rs->delta_disks) || + rs->md.new_layout != rs->md.layout || + rs->md.new_chunk_sectors != rs->md.chunk_sectors; +} + /* True if @rs is requested to reshape by ctr */ static bool rs_reshape_requested(struct raid_set *rs) { @@ -1904,9 +1912,7 @@ static bool rs_reshape_requested(struct raid_set *rs) if (rs_is_raid0(rs)) return false; - change = mddev->new_layout != mddev->layout || - mddev->new_chunk_sectors != mddev->chunk_sectors || - rs->delta_disks; + change = rs_is_layout_change(rs, false); /* Historical case to support raid1 reshape without delta disks */ if (rs_is_raid1(rs)) { @@ -2475,7 +2481,7 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev) } /* Enable bitmap creation for RAID levels != 0 */ - mddev->bitmap_info.offset = rt_is_raid0(rs->raid_type) ? 0 : to_sector(4096); + mddev->bitmap_info.offset = (rt_is_raid0(rs->raid_type) || rs->journal_dev.dev) ? 0 : to_sector(4096); mddev->bitmap_info.default_offset = mddev->bitmap_info.offset; if (!test_and_clear_bit(FirstUse, &rdev->flags)) { @@ -2843,7 +2849,7 @@ static sector_t _get_reshape_sectors(struct raid_set *rs) } /* - * + * Reshape: * - change raid layout * - change chunk size * - add disks @@ -2952,6 +2958,20 @@ static int rs_setup_reshape(struct raid_set *rs) return r; } +/* + * If the md resync thread has updated superblock with max reshape position + * at the end of a reshape but not (yet) reset the layout configuration + * changes -> reset the latter. + */ +static void rs_reset_inconclusive_reshape(struct raid_set *rs) +{ + if (!rs_is_reshaping(rs) && rs_is_layout_change(rs, true)) { + rs_set_cur(rs); + rs->md.delta_disks = 0; + rs->md.reshape_backwards = 0; + } +} + /* * Enable/disable discard support on RAID set depending on * RAID level and discard properties of underlying RAID members. @@ -3199,7 +3219,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) */ r = rs_prepare_reshape(rs); if (r) - return r; + goto bad; /* Reshaping ain't recovery, so disable recovery */ rs_setup_recovery(rs, MaxSector); @@ -3221,11 +3241,14 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) if (r) goto bad; + /* Catch any inconclusive reshape superblock content. */ + rs_reset_inconclusive_reshape(rs); + /* Start raid set read-only and assumed clean to change in raid_resume() */ rs->md.ro = 1; rs->md.in_sync = 1; - /* Keep array frozen */ + /* Keep array frozen until resume. */ set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery); /* Has to be held on running the array */ @@ -3239,7 +3262,6 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) } r = md_start(&rs->md); - if (r) { ti->error = "Failed to start raid array"; mddev_unlock(&rs->md); @@ -3681,17 +3703,13 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv, return -EINVAL; if (!strcasecmp(argv[0], "frozen")) - set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + frozen_sync_thread(mddev); + else if (!strcasecmp(argv[0], "idle")) + idle_sync_thread(mddev); else clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); - if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) { - if (mddev->sync_thread) { - set_bit(MD_RECOVERY_INTR, &mddev->recovery); - md_reap_sync_thread(mddev); - } - } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || - test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) + if (decipher_sync_action(mddev, mddev->recovery) != st_idle) return -EBUSY; else if (!strcasecmp(argv[0], "resync")) ; /* MD_RECOVERY_NEEDED set below */ diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 79eab1071ec22ba73458cc99c5841de30f2105b0..5a51151f680d6b0d552a80f549dfce0ede0d6b24 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -943,7 +943,8 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti, char dummy; int ret; - if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1) { + if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1 || + offset != (sector_t)offset) { ti->error = "Invalid offset"; return -EINVAL; } diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 6e547b8dd2982425ed51dff7b5c3a027ccb6cab7..288064e94e52611b32965a8643d111de5949eeb4 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -219,7 +219,7 @@ static void dm_end_request(struct request *clone, blk_status_t error) struct request *rq = tio->orig; blk_rq_unprep_clone(clone); - tio->ti->type->release_clone_rq(clone); + tio->ti->type->release_clone_rq(clone, NULL); rq_end_stats(md, rq); if (!rq->q->mq_ops) @@ -270,7 +270,7 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_ rq_end_stats(md, rq); if (tio->clone) { blk_rq_unprep_clone(tio->clone); - tio->ti->type->release_clone_rq(tio->clone); + tio->ti->type->release_clone_rq(tio->clone, NULL); } if (!rq->q->mq_ops) @@ -295,11 +295,14 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped) } if (unlikely(error == BLK_STS_TARGET)) { - if (req_op(clone) == REQ_OP_WRITE_SAME && - !clone->q->limits.max_write_same_sectors) + if (req_op(clone) == REQ_OP_DISCARD && + !clone->q->limits.max_discard_sectors) + disable_discard(tio->md); + else if (req_op(clone) == REQ_OP_WRITE_SAME && + !clone->q->limits.max_write_same_sectors) disable_write_same(tio->md); - if (req_op(clone) == REQ_OP_WRITE_ZEROES && - !clone->q->limits.max_write_zeroes_sectors) + else if (req_op(clone) == REQ_OP_WRITE_ZEROES && + !clone->q->limits.max_write_zeroes_sectors) disable_write_zeroes(tio->md); } @@ -407,7 +410,7 @@ static blk_status_t dm_dispatch_clone_request(struct request *clone, struct requ clone->rq_flags |= RQF_IO_STAT; clone->start_time_ns = ktime_get_ns(); - r = blk_insert_cloned_request(clone->q, clone); + r = __blk_insert_cloned_request(clone->q, clone, true); if (r != BLK_STS_OK && r != BLK_STS_RESOURCE && r != BLK_STS_DEV_RESOURCE) /* must complete clone in terms of original request */ dm_complete_request(rq, r); @@ -492,7 +495,7 @@ static int map_request(struct dm_rq_target_io *tio) case DM_MAPIO_REMAPPED: if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { /* -ENOMEM */ - ti->type->release_clone_rq(clone); + ti->type->release_clone_rq(clone, &tio->info); return DM_MAPIO_REQUEUE; } @@ -502,7 +505,8 @@ static int map_request(struct dm_rq_target_io *tio) ret = dm_dispatch_clone_request(clone, rq); if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) { blk_rq_unprep_clone(clone); - tio->ti->type->release_clone_rq(clone); + blk_mq_cleanup_rq(clone); + tio->ti->type->release_clone_rq(clone, &tio->info); tio->clone = NULL; if (!rq->q->mq_ops) r = DM_MAPIO_DELAY_REQUEUE; @@ -748,8 +752,13 @@ static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, if (unlikely(!ti)) { int srcu_idx; - struct dm_table *map = dm_get_live_table(md, &srcu_idx); + struct dm_table *map; + map = dm_get_live_table(md, &srcu_idx); + if (unlikely(!map)) { + dm_put_live_table(md, srcu_idx); + return BLK_STS_RESOURCE; + } ti = dm_table_find_target(map, 0); dm_put_live_table(md, srcu_idx); } @@ -830,6 +839,7 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t) blk_mq_free_tag_set(md->tag_set); out_kfree_tag_set: kfree(md->tag_set); + md->tag_set = NULL; return err; } @@ -839,6 +849,7 @@ void dm_mq_cleanup_mapped_device(struct mapped_device *md) if (md->tag_set) { blk_mq_free_tag_set(md->tag_set); kfree(md->tag_set); + md->tag_set = NULL; } } diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index ae4b33d109246e305c73f8231d122ce5b5cfdb41..ad65b4a8464d71dca989efd37869427b98d1e93b 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -105,6 +105,9 @@ struct dm_snapshot { /* The on disk metadata handler */ struct dm_exception_store *store; + unsigned in_progress; + struct wait_queue_head in_progress_wait; + struct dm_kcopyd_client *kcopyd_client; /* Wait for events based on state_bits */ @@ -134,6 +137,11 @@ struct dm_snapshot { * for them to be committed. */ struct bio_list bios_queued_during_merge; + + /* + * Flush data after merge. + */ + struct bio flush_bio; }; /* @@ -145,6 +153,19 @@ struct dm_snapshot { #define RUNNING_MERGE 0 #define SHUTDOWN_MERGE 1 +/* + * Maximum number of chunks being copied on write. + * + * The value was decided experimentally as a trade-off between memory + * consumption, stalling the kernel's workqueues and maintaining a high enough + * throughput. + */ +#define DEFAULT_COW_THRESHOLD 2048 + +static unsigned cow_threshold = DEFAULT_COW_THRESHOLD; +module_param_named(snapshot_cow_threshold, cow_threshold, uint, 0644); +MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write"); + DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle, "A percentage of time allocated for copy on write"); @@ -629,8 +650,10 @@ static void dm_exception_table_exit(struct dm_exception_table *et, for (i = 0; i < size; i++) { slot = et->table + i; - list_for_each_entry_safe (ex, next, slot, hash_list) + list_for_each_entry_safe (ex, next, slot, hash_list) { kmem_cache_free(mem, ex); + cond_resched(); + } } vfree(et->table); @@ -773,7 +796,7 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new) static uint32_t __minimum_chunk_size(struct origin *o) { struct dm_snapshot *snap; - unsigned chunk_size = 0; + unsigned chunk_size = rounddown_pow_of_two(UINT_MAX); if (o) list_for_each_entry(snap, &o->snapshots, list) @@ -1045,6 +1068,17 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s) static void error_bios(struct bio *bio); +static int flush_data(struct dm_snapshot *s) +{ + struct bio *flush_bio = &s->flush_bio; + + bio_reset(flush_bio); + bio_set_dev(flush_bio, s->origin->bdev); + flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; + + return submit_bio_wait(flush_bio); +} + static void merge_callback(int read_err, unsigned long write_err, void *context) { struct dm_snapshot *s = context; @@ -1058,6 +1092,11 @@ static void merge_callback(int read_err, unsigned long write_err, void *context) goto shut; } + if (flush_data(s) < 0) { + DMERR("Flush after merge failed: shutting down merge"); + goto shut; + } + if (s->store->type->commit_merge(s->store, s->num_merging_chunks) < 0) { DMERR("Write error in exception store: shutting down merge"); @@ -1182,6 +1221,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) s->first_merging_chunk = 0; s->num_merging_chunks = 0; bio_list_init(&s->bios_queued_during_merge); + bio_init(&s->flush_bio, NULL, 0); /* Allocate hash table for COW data */ if (init_hash_tables(s)) { @@ -1190,6 +1230,8 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad_hash_tables; } + init_waitqueue_head(&s->in_progress_wait); + s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle); if (IS_ERR(s->kcopyd_client)) { r = PTR_ERR(s->kcopyd_client); @@ -1246,6 +1288,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) if (!s->store->chunk_size) { ti->error = "Chunk size not set"; + r = -EINVAL; goto bad_read_metadata; } @@ -1373,13 +1416,62 @@ static void snapshot_dtr(struct dm_target *ti) mutex_destroy(&s->lock); + bio_uninit(&s->flush_bio); + dm_put_device(ti, s->cow); dm_put_device(ti, s->origin); + WARN_ON(s->in_progress); + kfree(s); } +static void account_start_copy(struct dm_snapshot *s) +{ + spin_lock(&s->in_progress_wait.lock); + s->in_progress++; + spin_unlock(&s->in_progress_wait.lock); +} + +static void account_end_copy(struct dm_snapshot *s) +{ + spin_lock(&s->in_progress_wait.lock); + BUG_ON(!s->in_progress); + s->in_progress--; + if (likely(s->in_progress <= cow_threshold) && + unlikely(waitqueue_active(&s->in_progress_wait))) + wake_up_locked(&s->in_progress_wait); + spin_unlock(&s->in_progress_wait.lock); +} + +static bool wait_for_in_progress(struct dm_snapshot *s, bool unlock_origins) +{ + if (unlikely(s->in_progress > cow_threshold)) { + spin_lock(&s->in_progress_wait.lock); + if (likely(s->in_progress > cow_threshold)) { + /* + * NOTE: this throttle doesn't account for whether + * the caller is servicing an IO that will trigger a COW + * so excess throttling may result for chunks not required + * to be COW'd. But if cow_threshold was reached, extra + * throttling is unlikely to negatively impact performance. + */ + DECLARE_WAITQUEUE(wait, current); + __add_wait_queue(&s->in_progress_wait, &wait); + __set_current_state(TASK_UNINTERRUPTIBLE); + spin_unlock(&s->in_progress_wait.lock); + if (unlock_origins) + up_read(&_origins_lock); + io_schedule(); + remove_wait_queue(&s->in_progress_wait, &wait); + return false; + } + spin_unlock(&s->in_progress_wait.lock); + } + return true; +} + /* * Flush a list of buffers. */ @@ -1395,7 +1487,7 @@ static void flush_bios(struct bio *bio) } } -static int do_origin(struct dm_dev *origin, struct bio *bio); +static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit); /* * Flush a list of buffers. @@ -1408,7 +1500,7 @@ static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio) while (bio) { n = bio->bi_next; bio->bi_next = NULL; - r = do_origin(s->origin, bio); + r = do_origin(s->origin, bio, false); if (r == DM_MAPIO_REMAPPED) generic_make_request(bio); bio = n; @@ -1575,6 +1667,7 @@ static void copy_callback(int read_err, unsigned long write_err, void *context) rb_link_node(&pe->out_of_order_node, parent, p); rb_insert_color(&pe->out_of_order_node, &s->out_of_order_tree); } + account_end_copy(s); } /* @@ -1598,6 +1691,7 @@ static void start_copy(struct dm_snap_pending_exception *pe) dest.count = src.count; /* Hand over to kcopyd */ + account_start_copy(s); dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe); } @@ -1617,6 +1711,7 @@ static void start_full_bio(struct dm_snap_pending_exception *pe, pe->full_bio = bio; pe->full_bio_end_io = bio->bi_end_io; + account_start_copy(s); callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client, copy_callback, pe); @@ -1707,6 +1802,11 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) if (!s->valid) return DM_MAPIO_KILL; + if (bio_data_dir(bio) == WRITE) { + while (unlikely(!wait_for_in_progress(s, false))) + ; /* wait_for_in_progress() has slept */ + } + mutex_lock(&s->lock); if (!s->valid || (unlikely(s->snapshot_overflowed) && @@ -1855,7 +1955,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio) if (bio_data_dir(bio) == WRITE) { mutex_unlock(&s->lock); - return do_origin(s->origin, bio); + return do_origin(s->origin, bio, false); } out_unlock: @@ -2192,15 +2292,24 @@ static int __origin_write(struct list_head *snapshots, sector_t sector, /* * Called on a write from the origin driver. */ -static int do_origin(struct dm_dev *origin, struct bio *bio) +static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit) { struct origin *o; int r = DM_MAPIO_REMAPPED; +again: down_read(&_origins_lock); o = __lookup_origin(origin->bdev); - if (o) + if (o) { + if (limit) { + struct dm_snapshot *s; + list_for_each_entry(s, &o->snapshots, list) + if (unlikely(!wait_for_in_progress(s, true))) + goto again; + } + r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio); + } up_read(&_origins_lock); return r; @@ -2313,7 +2422,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio) dm_accept_partial_bio(bio, available_sectors); /* Only tell snapshots if this is a write */ - return do_origin(o->dev, bio); + return do_origin(o->dev, bio, true); } static long origin_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c index 21de30b4e2a16051490ce5a2548c3711a3f0284e..0eb48e739f7ea234e601e8f2f7260bbaa794c192 100644 --- a/drivers/md/dm-stats.c +++ b/drivers/md/dm-stats.c @@ -188,7 +188,7 @@ static int dm_stat_in_flight(struct dm_stat_shared *shared) atomic_read(&shared->in_flight[WRITE]); } -void dm_stats_init(struct dm_stats *stats) +int dm_stats_init(struct dm_stats *stats) { int cpu; struct dm_stats_last_position *last; @@ -196,11 +196,16 @@ void dm_stats_init(struct dm_stats *stats) mutex_init(&stats->mutex); INIT_LIST_HEAD(&stats->list); stats->last = alloc_percpu(struct dm_stats_last_position); + if (!stats->last) + return -ENOMEM; + for_each_possible_cpu(cpu) { last = per_cpu_ptr(stats->last, cpu); last->last_sector = (sector_t)ULLONG_MAX; last->last_rw = UINT_MAX; } + + return 0; } void dm_stats_cleanup(struct dm_stats *stats) @@ -224,6 +229,7 @@ void dm_stats_cleanup(struct dm_stats *stats) atomic_read(&shared->in_flight[READ]), atomic_read(&shared->in_flight[WRITE])); } + cond_resched(); } dm_stat_free(&s->rcu_head); } @@ -313,6 +319,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end, for (ni = 0; ni < n_entries; ni++) { atomic_set(&s->stat_shared[ni].in_flight[READ], 0); atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0); + cond_resched(); } if (s->n_histogram_entries) { @@ -325,6 +332,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end, for (ni = 0; ni < n_entries; ni++) { s->stat_shared[ni].tmp.histogram = hi; hi += s->n_histogram_entries + 1; + cond_resched(); } } @@ -345,6 +353,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end, for (ni = 0; ni < n_entries; ni++) { p[ni].histogram = hi; hi += s->n_histogram_entries + 1; + cond_resched(); } } } @@ -474,6 +483,7 @@ static int dm_stats_list(struct dm_stats *stats, const char *program, } DMEMIT("\n"); } + cond_resched(); } mutex_unlock(&stats->mutex); @@ -750,6 +760,7 @@ static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end, local_irq_enable(); } } + cond_resched(); } } @@ -865,6 +876,8 @@ static int dm_stats_print(struct dm_stats *stats, int id, if (unlikely(sz + 1 >= maxlen)) goto buffer_overflow; + + cond_resched(); } if (clear) diff --git a/drivers/md/dm-stats.h b/drivers/md/dm-stats.h index 2ddfae678f320f84776c772f1107da8eab485385..dcac11fce03bba613da83b35c22db732b19ed8af 100644 --- a/drivers/md/dm-stats.h +++ b/drivers/md/dm-stats.h @@ -22,7 +22,7 @@ struct dm_stats_aux { unsigned long long duration_ns; }; -void dm_stats_init(struct dm_stats *st); +int dm_stats_init(struct dm_stats *st); void dm_stats_cleanup(struct dm_stats *st); struct mapped_device; diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 3d0e2c198f0614dbaf22db657a2bfc9336f89ebd..b00f767af0e3a456772cfc70d0682e9be6281bdf 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -422,39 +422,46 @@ dev_t dm_get_dev_t(const char *path) } EXPORT_SYMBOL_GPL(dm_get_dev_t); -/* - * Add a device to the list, or just increment the usage count if - * it's already present. - */ -int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, - struct dm_dev **result) +int __dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, + struct dm_dev **result, bool create_dd) { int r; dev_t dev; + unsigned int major, minor; + char dummy; struct dm_dev_internal *dd; struct dm_table *t = ti->table; BUG_ON(!t); - dev = dm_get_dev_t(path); - if (!dev) - return -ENODEV; + if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) { + /* Extract the major/minor numbers */ + dev = MKDEV(major, minor); + if (MAJOR(dev) != major || MINOR(dev) != minor) + return -EOVERFLOW; + } else { + dev = dm_get_dev_t(path); + if (!dev) + return -ENODEV; + } dd = find_device(&t->devices, dev); if (!dd) { - dd = kmalloc(sizeof(*dd), GFP_KERNEL); - if (!dd) - return -ENOMEM; - - if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) { - kfree(dd); - return r; - } + if (create_dd) { + dd = kmalloc(sizeof(*dd), GFP_KERNEL); + if (!dd) + return -ENOMEM; - refcount_set(&dd->count, 1); - list_add(&dd->list, &t->devices); - goto out; + if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) { + kfree(dd); + return r; + } + refcount_set(&dd->count, 1); + list_add(&dd->list, &t->devices); + goto out; + } else + return -ENODEV; } else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) { r = upgrade_mode(dd, mode, t->md); if (r) @@ -465,6 +472,17 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, *result = dd->dm_dev; return 0; } +EXPORT_SYMBOL(__dm_get_device); + +/* + * Add a device to the list, or just increment the usage count if + * it's already present. + */ +int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, + struct dm_dev **result) +{ + return __dm_get_device(ti, path, mode, result, true); +} EXPORT_SYMBOL(dm_get_device); static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, @@ -562,7 +580,7 @@ static char **realloc_argv(unsigned *size, char **old_argv) gfp = GFP_NOIO; } argv = kmalloc_array(new_size, sizeof(*argv), gfp); - if (argv) { + if (argv && old_argv) { memcpy(argv, old_argv, *size * sizeof(*argv)); *size = new_size; } @@ -882,10 +900,10 @@ void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type) } EXPORT_SYMBOL_GPL(dm_table_set_type); -static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev, +static int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { - return bdev_dax_supported(dev->bdev, PAGE_SIZE); + return !bdev_dax_supported(dev->bdev, PAGE_SIZE); } static bool dm_table_supports_dax(struct dm_table *t) @@ -901,7 +919,7 @@ static bool dm_table_supports_dax(struct dm_table *t) return false; if (!ti->type->iterate_devices || - !ti->type->iterate_devices(ti, device_supports_dax, NULL)) + ti->type->iterate_devices(ti, device_not_dax_capable, NULL)) return false; } @@ -1349,7 +1367,7 @@ void dm_table_event(struct dm_table *t) } EXPORT_SYMBOL(dm_table_event); -sector_t dm_table_get_size(struct dm_table *t) +inline sector_t dm_table_get_size(struct dm_table *t) { return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0; } @@ -1374,6 +1392,9 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) unsigned int l, n = 0, k = 0; sector_t *node; + if (unlikely(sector >= dm_table_get_size(t))) + return &t->targets[t->num_targets]; + for (l = 0; l < t->depth; l++) { n = get_child(n, k); node = get_node(t, l, n); @@ -1386,6 +1407,46 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) return &t->targets[(KEYS_PER_NODE * n) + k]; } +/* + * type->iterate_devices() should be called when the sanity check needs to + * iterate and check all underlying data devices. iterate_devices() will + * iterate all underlying data devices until it encounters a non-zero return + * code, returned by whether the input iterate_devices_callout_fn, or + * iterate_devices() itself internally. + * + * For some target type (e.g. dm-stripe), one call of iterate_devices() may + * iterate multiple underlying devices internally, in which case a non-zero + * return code returned by iterate_devices_callout_fn will stop the iteration + * in advance. + * + * Cases requiring _any_ underlying device supporting some kind of attribute, + * should use the iteration structure like dm_table_any_dev_attr(), or call + * it directly. @func should handle semantics of positive examples, e.g. + * capable of something. + * + * Cases requiring _all_ underlying devices supporting some kind of attribute, + * should use the iteration structure like dm_table_supports_nowait() or + * dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that + * uses an @anti_func that handle semantics of counter examples, e.g. not + * capable of something. So: return !dm_table_any_dev_attr(t, anti_func, data); + */ +static bool dm_table_any_dev_attr(struct dm_table *t, + iterate_devices_callout_fn func, void *data) +{ + struct dm_target *ti; + unsigned int i; + + for (i = 0; i < dm_table_get_num_targets(t); i++) { + ti = dm_table_get_target(t, i); + + if (ti->type->iterate_devices && + ti->type->iterate_devices(ti, func, data)) + return true; + } + + return false; +} + static int count_device(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { @@ -1422,13 +1483,13 @@ bool dm_table_has_no_data_devices(struct dm_table *table) return true; } -static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) +static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev); enum blk_zoned_model *zoned_model = data; - return q && blk_queue_zoned_model(q) == *zoned_model; + return !q || blk_queue_zoned_model(q) != *zoned_model; } static bool dm_table_supports_zoned_model(struct dm_table *t, @@ -1445,37 +1506,20 @@ static bool dm_table_supports_zoned_model(struct dm_table *t, return false; if (!ti->type->iterate_devices || - !ti->type->iterate_devices(ti, device_is_zoned_model, &zoned_model)) + ti->type->iterate_devices(ti, device_not_zoned_model, &zoned_model)) return false; } return true; } -static int device_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) +static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev); unsigned int *zone_sectors = data; - return q && blk_queue_zone_sectors(q) == *zone_sectors; -} - -static bool dm_table_matches_zone_sectors(struct dm_table *t, - unsigned int zone_sectors) -{ - struct dm_target *ti; - unsigned i; - - for (i = 0; i < dm_table_get_num_targets(t); i++) { - ti = dm_table_get_target(t, i); - - if (!ti->type->iterate_devices || - !ti->type->iterate_devices(ti, device_matches_zone_sectors, &zone_sectors)) - return false; - } - - return true; + return !q || blk_queue_zone_sectors(q) != *zone_sectors; } static int validate_hardware_zoned_model(struct dm_table *table, @@ -1495,7 +1539,7 @@ static int validate_hardware_zoned_model(struct dm_table *table, if (!zone_sectors || !is_power_of_2(zone_sectors)) return -EINVAL; - if (!dm_table_matches_zone_sectors(table, zone_sectors)) { + if (dm_table_any_dev_attr(table, device_not_matches_zone_sectors, &zone_sectors)) { DMERR("%s: zone sectors is not consistent across all devices", dm_device_name(table->md)); return -EINVAL; @@ -1685,29 +1729,12 @@ static int device_dax_write_cache_enabled(struct dm_target *ti, return false; } -static int dm_table_supports_dax_write_cache(struct dm_table *t) -{ - struct dm_target *ti; - unsigned i; - - for (i = 0; i < dm_table_get_num_targets(t); i++) { - ti = dm_table_get_target(t, i); - - if (ti->type->iterate_devices && - ti->type->iterate_devices(ti, - device_dax_write_cache_enabled, NULL)) - return true; - } - - return false; -} - -static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) +static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev); - return q && blk_queue_nonrot(q); + return q && !blk_queue_nonrot(q); } static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, @@ -1718,43 +1745,26 @@ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, return q && !blk_queue_add_random(q); } -static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) +static int queue_no_sg_merge(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev); - return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags); + return q && test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags); } -static bool dm_table_all_devices_attribute(struct dm_table *t, - iterate_devices_callout_fn func) -{ - struct dm_target *ti; - unsigned i; - - for (i = 0; i < dm_table_get_num_targets(t); i++) { - ti = dm_table_get_target(t, i); - - if (!ti->type->iterate_devices || - !ti->type->iterate_devices(ti, func, NULL)) - return false; - } - - return true; -} - -static int device_no_partial_completion(struct dm_target *ti, struct dm_dev *dev, +static int device_is_partial_completion(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { char b[BDEVNAME_SIZE]; /* For now, NVMe devices are the only devices of this class */ - return (strncmp(bdevname(dev->bdev, b), "nvme", 4) == 0); + return (strncmp(bdevname(dev->bdev, b), "nvme", 4) != 0); } static bool dm_table_does_not_support_partial_completion(struct dm_table *t) { - return dm_table_all_devices_attribute(t, device_no_partial_completion); + return !dm_table_any_dev_attr(t, device_is_partial_completion, NULL); } static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev, @@ -1872,6 +1882,15 @@ static bool dm_table_supports_secure_erase(struct dm_table *t) return true; } +static int device_requires_stable_pages(struct dm_target *ti, + struct dm_dev *dev, sector_t start, + sector_t len, void *data) +{ + struct request_queue *q = bdev_get_queue(dev->bdev); + + return q && bdi_cap_stable_pages_required(q->backing_dev_info); +} + void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, struct queue_limits *limits) { @@ -1908,35 +1927,51 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, else blk_queue_flag_clear(QUEUE_FLAG_DAX, q); - if (dm_table_supports_dax_write_cache(t)) + if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL)) dax_write_cache(t->md->dax_dev, true); /* Ensure that all underlying devices are non-rotational. */ - if (dm_table_all_devices_attribute(t, device_is_nonrot)) - blk_queue_flag_set(QUEUE_FLAG_NONROT, q); - else + if (dm_table_any_dev_attr(t, device_is_rotational, NULL)) blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); + else + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); if (!dm_table_supports_write_same(t)) q->limits.max_write_same_sectors = 0; if (!dm_table_supports_write_zeroes(t)) q->limits.max_write_zeroes_sectors = 0; - if (dm_table_all_devices_attribute(t, queue_supports_sg_merge)) - blk_queue_flag_clear(QUEUE_FLAG_NO_SG_MERGE, q); - else + if (dm_table_any_dev_attr(t, queue_no_sg_merge, NULL)) blk_queue_flag_set(QUEUE_FLAG_NO_SG_MERGE, q); + else + blk_queue_flag_clear(QUEUE_FLAG_NO_SG_MERGE, q); dm_table_verify_integrity(t); + /* + * Some devices don't use blk_integrity but still want stable pages + * because they do their own checksumming. + * If any underlying device requires stable pages, a table must require + * them as well. Only targets that support iterate_devices are considered: + * don't want error, zero, etc to require stable pages. + */ + if (dm_table_any_dev_attr(t, device_requires_stable_pages, NULL)) + q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; + else + q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES; + /* * Determine whether or not this queue's I/O timings contribute * to the entropy pool, Only request-based targets use this. * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not * have it set. */ - if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random)) + if (blk_queue_add_random(q) && + dm_table_any_dev_attr(t, device_is_not_random, NULL)) blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); + + /* Allow reads to exceed readahead limits */ + q->backing_dev_info->io_pages = limits->max_sectors >> (PAGE_SHIFT - 9); } unsigned int dm_table_get_num_targets(struct dm_table *t) diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c index 314d17ca64668a70ea1f6445111ca19b2024141e..64dd0b34fcf490cee3779e179e9b8c7c543b7e53 100644 --- a/drivers/md/dm-target.c +++ b/drivers/md/dm-target.c @@ -136,7 +136,8 @@ static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq, return DM_MAPIO_KILL; } -static void io_err_release_clone_rq(struct request *clone) +static void io_err_release_clone_rq(struct request *clone, + union map_info *map_context) { } diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 20b0776e39ef3307aa5c418016afff4758850a68..bd9466dc94009cc39ebde88ae1b3278914d29caa 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -533,6 +533,8 @@ static int __format_metadata(struct dm_pool_metadata *pmd) r = dm_tm_create_with_sm(pmd->bm, THIN_SUPERBLOCK_LOCATION, &pmd->tm, &pmd->metadata_sm); if (r < 0) { + pmd->tm = NULL; + pmd->metadata_sm = NULL; DMERR("tm_create_with_sm failed"); return r; } @@ -541,6 +543,7 @@ static int __format_metadata(struct dm_pool_metadata *pmd) if (IS_ERR(pmd->data_sm)) { DMERR("sm_disk_create failed"); r = PTR_ERR(pmd->data_sm); + pmd->data_sm = NULL; goto bad_cleanup_tm; } @@ -571,11 +574,15 @@ static int __format_metadata(struct dm_pool_metadata *pmd) bad_cleanup_nb_tm: dm_tm_destroy(pmd->nb_tm); + pmd->nb_tm = NULL; bad_cleanup_data_sm: dm_sm_destroy(pmd->data_sm); + pmd->data_sm = NULL; bad_cleanup_tm: dm_tm_destroy(pmd->tm); + pmd->tm = NULL; dm_sm_destroy(pmd->metadata_sm); + pmd->metadata_sm = NULL; return r; } @@ -641,6 +648,8 @@ static int __open_metadata(struct dm_pool_metadata *pmd) sizeof(disk_super->metadata_space_map_root), &pmd->tm, &pmd->metadata_sm); if (r < 0) { + pmd->tm = NULL; + pmd->metadata_sm = NULL; DMERR("tm_open_with_sm failed"); goto bad_unlock_sblock; } @@ -650,6 +659,7 @@ static int __open_metadata(struct dm_pool_metadata *pmd) if (IS_ERR(pmd->data_sm)) { DMERR("sm_disk_open failed"); r = PTR_ERR(pmd->data_sm); + pmd->data_sm = NULL; goto bad_cleanup_tm; } @@ -660,6 +670,15 @@ static int __open_metadata(struct dm_pool_metadata *pmd) goto bad_cleanup_data_sm; } + /* + * For pool metadata opening process, root setting is redundant + * because it will be set again in __begin_transaction(). But dm + * pool aborting process really needs to get last transaction's + * root to avoid accessing broken btree. + */ + pmd->root = le64_to_cpu(disk_super->data_mapping_root); + pmd->details_root = le64_to_cpu(disk_super->device_details_root); + __setup_btree_details(pmd); dm_bm_unlock(sblock); @@ -667,9 +686,12 @@ static int __open_metadata(struct dm_pool_metadata *pmd) bad_cleanup_data_sm: dm_sm_destroy(pmd->data_sm); + pmd->data_sm = NULL; bad_cleanup_tm: dm_tm_destroy(pmd->tm); + pmd->tm = NULL; dm_sm_destroy(pmd->metadata_sm); + pmd->metadata_sm = NULL; bad_unlock_sblock: dm_bm_unlock(sblock); @@ -698,23 +720,33 @@ static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool f THIN_MAX_CONCURRENT_LOCKS); if (IS_ERR(pmd->bm)) { DMERR("could not create block manager"); - return PTR_ERR(pmd->bm); + r = PTR_ERR(pmd->bm); + pmd->bm = NULL; + return r; } r = __open_or_format_metadata(pmd, format_device); - if (r) + if (r) { dm_block_manager_destroy(pmd->bm); + pmd->bm = NULL; + } return r; } -static void __destroy_persistent_data_objects(struct dm_pool_metadata *pmd) +static void __destroy_persistent_data_objects(struct dm_pool_metadata *pmd, + bool destroy_bm) { dm_sm_destroy(pmd->data_sm); + pmd->data_sm = NULL; dm_sm_destroy(pmd->metadata_sm); + pmd->metadata_sm = NULL; dm_tm_destroy(pmd->nb_tm); + pmd->nb_tm = NULL; dm_tm_destroy(pmd->tm); - dm_block_manager_destroy(pmd->bm); + pmd->tm = NULL; + if (destroy_bm) + dm_block_manager_destroy(pmd->bm); } static int __begin_transaction(struct dm_pool_metadata *pmd) @@ -897,15 +929,14 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd) return -EBUSY; } - if (!dm_bm_is_read_only(pmd->bm) && !pmd->fail_io) { + if (!pmd->fail_io && !dm_bm_is_read_only(pmd->bm)) { r = __commit_transaction(pmd); if (r < 0) DMWARN("%s: __commit_transaction() failed, error = %d", __func__, r); } - if (!pmd->fail_io) - __destroy_persistent_data_objects(pmd); + __destroy_persistent_data_objects(pmd, true); kfree(pmd); return 0; @@ -1678,15 +1709,17 @@ int dm_thin_remove_range(struct dm_thin_device *td, return r; } -int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result) +int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result) { - int r; + int r = -EINVAL; uint32_t ref_count; down_read(&pmd->root_lock); - r = dm_sm_get_count(pmd->data_sm, b, &ref_count); - if (!r) - *result = (ref_count != 0); + if (!pmd->fail_io) { + r = dm_sm_get_count(pmd->data_sm, b, &ref_count); + if (!r) + *result = (ref_count > 1); + } up_read(&pmd->root_lock); return r; @@ -1697,10 +1730,14 @@ int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_ int r = 0; down_write(&pmd->root_lock); - for (; b != e; b++) { - r = dm_sm_inc_block(pmd->data_sm, b); - if (r) - break; + if (!pmd->fail_io) { + for (; b != e; b++) { + r = dm_sm_inc_block(pmd->data_sm, b); + if (r) + break; + } + } else { + r = -EINVAL; } up_write(&pmd->root_lock); @@ -1712,10 +1749,14 @@ int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_ int r = 0; down_write(&pmd->root_lock); - for (; b != e; b++) { - r = dm_sm_dec_block(pmd->data_sm, b); - if (r) - break; + if (!pmd->fail_io) { + for (; b != e; b++) { + r = dm_sm_dec_block(pmd->data_sm, b); + if (r) + break; + } + } else { + r = -EINVAL; } up_write(&pmd->root_lock); @@ -1806,19 +1847,29 @@ int dm_pool_abort_metadata(struct dm_pool_metadata *pmd) { int r = -EINVAL; + /* fail_io is double-checked with pmd->root_lock held below */ + if (unlikely(pmd->fail_io)) + return r; + down_write(&pmd->root_lock); - if (pmd->fail_io) - goto out; + if (pmd->fail_io) { + up_write(&pmd->root_lock); + return r; + } __set_abort_with_changes_flags(pmd); - __destroy_persistent_data_objects(pmd); - r = __create_persistent_data_objects(pmd, false); + + /* destroy data_sm/metadata_sm/nb_tm/tm */ + __destroy_persistent_data_objects(pmd, false); + + /* reset bm */ + dm_block_manager_reset(pmd->bm); + + /* rebuild data_sm/metadata_sm/nb_tm/tm */ + r = __open_or_format_metadata(pmd, false); if (r) pmd->fail_io = true; - -out: up_write(&pmd->root_lock); - return r; } @@ -1993,24 +2044,33 @@ int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd, int r; down_write(&pmd->root_lock); + if (pmd->fail_io) { + r = -EINVAL; + goto out; + } + r = dm_sm_register_threshold_callback(pmd->metadata_sm, threshold, fn, context); - up_write(&pmd->root_lock); +out: + up_write(&pmd->root_lock); return r; } int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd) { - int r; + int r = -EINVAL; struct dm_block *sblock; struct thin_disk_superblock *disk_super; down_write(&pmd->root_lock); + if (pmd->fail_io) + goto out; + pmd->flags |= THIN_METADATA_NEEDS_CHECK_FLAG; r = superblock_lock(pmd, &sblock); if (r) { - DMERR("couldn't read superblock"); + DMERR("couldn't lock superblock"); goto out; } diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h index 35e954ea20a9b5923ffc5b4d1a7a2cb5cdb3c314..f6be0d733c20267f569b72ab14314d0565425e80 100644 --- a/drivers/md/dm-thin-metadata.h +++ b/drivers/md/dm-thin-metadata.h @@ -195,7 +195,7 @@ int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd, int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result); -int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result); +int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result); int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e); int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e); diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index aaf1ad481ee88e59b90273133b7a7bd90c69d3fb..d884bb9cef943252f3023c8ecc1d09d200efd559 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -195,7 +195,7 @@ static void throttle_unlock(struct throttle *t) struct dm_thin_new_mapping; /* - * The pool runs in 4 modes. Ordered in degraded order for comparisons. + * The pool runs in various modes. Ordered in degraded order for comparisons. */ enum pool_mode { PM_WRITE, /* metadata may be changed */ @@ -257,6 +257,7 @@ struct pool { spinlock_t lock; struct bio_list deferred_flush_bios; + struct bio_list deferred_flush_completions; struct list_head prepared_mappings; struct list_head prepared_discards; struct list_head prepared_discards_pt2; @@ -282,9 +283,38 @@ struct pool { mempool_t mapping_pool; }; -static enum pool_mode get_pool_mode(struct pool *pool); static void metadata_operation_failed(struct pool *pool, const char *op, int r); +static enum pool_mode get_pool_mode(struct pool *pool) +{ + return pool->pf.mode; +} + +static void notify_of_pool_mode_change(struct pool *pool) +{ + const char *descs[] = { + "write", + "out-of-data-space", + "read-only", + "read-only", + "fail" + }; + const char *extra_desc = NULL; + enum pool_mode mode = get_pool_mode(pool); + + if (mode == PM_OUT_OF_DATA_SPACE) { + if (!pool->pf.error_if_no_space) + extra_desc = " (queue IO)"; + else + extra_desc = " (error IO)"; + } + + dm_table_event(pool->ti->table); + DMINFO("%s: switching pool to %s%s mode", + dm_device_name(pool->pool_md), + descs[(int)mode], extra_desc ? : ""); +} + /* * Target context for a pool. */ @@ -325,7 +355,7 @@ struct thin_c { * Ensures the thin is not destroyed until the worker has finished * iterating the active_thins list. */ - atomic_t refcount; + refcount_t refcount; struct completion can_destroy; }; @@ -927,6 +957,39 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m) mempool_free(m, &m->tc->pool->mapping_pool); } +static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio) +{ + struct pool *pool = tc->pool; + unsigned long flags; + + /* + * If the bio has the REQ_FUA flag set we must commit the metadata + * before signaling its completion. + */ + if (!bio_triggers_commit(tc, bio)) { + bio_endio(bio); + return; + } + + /* + * Complete bio with an error if earlier I/O caused changes to the + * metadata that can't be committed, e.g, due to I/O errors on the + * metadata device. + */ + if (dm_thin_aborted_changes(tc->td)) { + bio_io_error(bio); + return; + } + + /* + * Batch together any bios that trigger commits and then issue a + * single commit for them in process_deferred_bios(). + */ + spin_lock_irqsave(&pool->lock, flags); + bio_list_add(&pool->deferred_flush_completions, bio); + spin_unlock_irqrestore(&pool->lock, flags); +} + static void process_prepared_mapping(struct dm_thin_new_mapping *m) { struct thin_c *tc = m->tc; @@ -959,7 +1022,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m) */ if (bio) { inc_remap_and_issue_cell(tc, m->cell, m->data_block); - bio_endio(bio); + complete_overwrite_bio(tc, bio); } else { inc_all_io_entry(tc->pool, m->cell->holder); remap_and_issue(tc, m->cell->holder, m->data_block); @@ -1019,7 +1082,7 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m * passdown we have to check that these blocks are now unused. */ int r = 0; - bool used = true; + bool shared = true; struct thin_c *tc = m->tc; struct pool *pool = tc->pool; dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin; @@ -1029,11 +1092,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m while (b != end) { /* find start of unmapped run */ for (; b < end; b++) { - r = dm_pool_block_is_used(pool->pmd, b, &used); + r = dm_pool_block_is_shared(pool->pmd, b, &shared); if (r) goto out; - if (!used) + if (!shared) break; } @@ -1042,11 +1105,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m /* find end of run */ for (e = b + 1; e != end; e++) { - r = dm_pool_block_is_used(pool->pmd, e, &used); + r = dm_pool_block_is_shared(pool->pmd, e, &shared); if (r) goto out; - if (used) + if (shared) break; } @@ -2288,7 +2351,7 @@ static void process_deferred_bios(struct pool *pool) { unsigned long flags; struct bio *bio; - struct bio_list bios; + struct bio_list bios, bio_completions; struct thin_c *tc; tc = get_first_thin(pool); @@ -2299,26 +2362,36 @@ static void process_deferred_bios(struct pool *pool) } /* - * If there are any deferred flush bios, we must commit - * the metadata before issuing them. + * If there are any deferred flush bios, we must commit the metadata + * before issuing them or signaling their completion. */ bio_list_init(&bios); + bio_list_init(&bio_completions); + spin_lock_irqsave(&pool->lock, flags); bio_list_merge(&bios, &pool->deferred_flush_bios); bio_list_init(&pool->deferred_flush_bios); + + bio_list_merge(&bio_completions, &pool->deferred_flush_completions); + bio_list_init(&pool->deferred_flush_completions); spin_unlock_irqrestore(&pool->lock, flags); - if (bio_list_empty(&bios) && + if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) && !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool))) return; if (commit(pool)) { + bio_list_merge(&bios, &bio_completions); + while ((bio = bio_list_pop(&bios))) bio_io_error(bio); return; } pool->last_commit_jiffies = jiffies; + while ((bio = bio_list_pop(&bio_completions))) + bio_endio(bio); + while ((bio = bio_list_pop(&bios))) generic_make_request(bio); } @@ -2351,8 +2424,6 @@ static void do_waker(struct work_struct *ws) queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD); } -static void notify_of_pool_mode_change_to_oods(struct pool *pool); - /* * We're holding onto IO to allow userland time to react. After the * timeout either the pool will have been resized (and thus back in @@ -2365,7 +2436,7 @@ static void do_no_space_timeout(struct work_struct *ws) if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) { pool->pf.error_if_no_space = true; - notify_of_pool_mode_change_to_oods(pool); + notify_of_pool_mode_change(pool); error_retry_list_with_code(pool, BLK_STS_NOSPC); } } @@ -2433,26 +2504,6 @@ static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *)) /*----------------------------------------------------------------*/ -static enum pool_mode get_pool_mode(struct pool *pool) -{ - return pool->pf.mode; -} - -static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode) -{ - dm_table_event(pool->ti->table); - DMINFO("%s: switching pool to %s mode", - dm_device_name(pool->pool_md), new_mode); -} - -static void notify_of_pool_mode_change_to_oods(struct pool *pool) -{ - if (!pool->pf.error_if_no_space) - notify_of_pool_mode_change(pool, "out-of-data-space (queue IO)"); - else - notify_of_pool_mode_change(pool, "out-of-data-space (error IO)"); -} - static bool passdown_enabled(struct pool_c *pt) { return pt->adjusted_pf.discard_passdown; @@ -2501,8 +2552,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) switch (new_mode) { case PM_FAIL: - if (old_mode != new_mode) - notify_of_pool_mode_change(pool, "failure"); dm_pool_metadata_read_only(pool->pmd); pool->process_bio = process_bio_fail; pool->process_discard = process_bio_fail; @@ -2516,8 +2565,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) case PM_OUT_OF_METADATA_SPACE: case PM_READ_ONLY: - if (!is_read_only_pool_mode(old_mode)) - notify_of_pool_mode_change(pool, "read-only"); dm_pool_metadata_read_only(pool->pmd); pool->process_bio = process_bio_read_only; pool->process_discard = process_bio_success; @@ -2538,8 +2585,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) * alarming rate. Adjust your low water mark if you're * frequently seeing this mode. */ - if (old_mode != new_mode) - notify_of_pool_mode_change_to_oods(pool); pool->out_of_data_space = true; pool->process_bio = process_bio_read_only; pool->process_discard = process_discard_bio; @@ -2552,8 +2597,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) break; case PM_WRITE: - if (old_mode != new_mode) - notify_of_pool_mode_change(pool, "write"); if (old_mode == PM_OUT_OF_DATA_SPACE) cancel_delayed_work_sync(&pool->no_space_timeout); pool->out_of_data_space = false; @@ -2573,6 +2616,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) * doesn't cause an unexpected mode transition on resume. */ pt->adjusted_pf.mode = new_mode; + + if (old_mode != new_mode) + notify_of_pool_mode_change(pool); } static void abort_transaction(struct pool *pool) @@ -2952,6 +2998,7 @@ static struct pool *pool_create(struct mapped_device *pool_md, INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout); spin_lock_init(&pool->lock); bio_list_init(&pool->deferred_flush_bios); + bio_list_init(&pool->deferred_flush_completions); INIT_LIST_HEAD(&pool->prepared_mappings); INIT_LIST_HEAD(&pool->prepared_discards); INIT_LIST_HEAD(&pool->prepared_discards_pt2); @@ -3236,6 +3283,13 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) as.argc = argc; as.argv = argv; + /* make sure metadata and data are different devices */ + if (!strcmp(argv[0], argv[1])) { + ti->error = "Error setting metadata or data device"; + r = -EINVAL; + goto out_unlock; + } + /* * Set default pool features. */ @@ -3307,6 +3361,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) pt->low_water_blocks = low_water_blocks; pt->adjusted_pf = pt->requested_pf = pf; ti->num_flush_bios = 1; + ti->limit_swap_bios = true; /* * Only need to enable discards if the pool should pass @@ -3329,8 +3384,10 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) calc_metadata_threshold(pt), metadata_low_callback, pool); - if (r) + if (r) { + ti->error = "Error registering metadata threshold"; goto out_flags_changed; + } pt->callbacks.congested_fn = pool_is_congested; dm_table_add_target_callbacks(ti->table, &pt->callbacks); @@ -3493,20 +3550,29 @@ static int pool_preresume(struct dm_target *ti) */ r = bind_control_target(pool, ti); if (r) - return r; + goto out; r = maybe_resize_data_dev(ti, &need_commit1); if (r) - return r; + goto out; r = maybe_resize_metadata_dev(ti, &need_commit2); if (r) - return r; + goto out; if (need_commit1 || need_commit2) (void) commit(pool); - return 0; +out: + /* + * When thinpool is PM_FAIL, it cannot be rebuilt if + * bio is in deferred list. Therefor need to return 0 and + * call pool_resume() to flush IO. + */ + if (r && get_pool_mode(pool) == PM_FAIL) + r = 0; + + return r; } static void pool_suspend_active_thins(struct pool *pool) @@ -4044,12 +4110,12 @@ static struct target_type pool_target = { *--------------------------------------------------------------*/ static void thin_get(struct thin_c *tc) { - atomic_inc(&tc->refcount); + refcount_inc(&tc->refcount); } static void thin_put(struct thin_c *tc) { - if (atomic_dec_and_test(&tc->refcount)) + if (refcount_dec_and_test(&tc->refcount)) complete(&tc->can_destroy); } @@ -4120,6 +4186,12 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) tc->sort_bio_list = RB_ROOT; if (argc == 3) { + if (!strcmp(argv[0], argv[2])) { + ti->error = "Error setting origin device"; + r = -EINVAL; + goto bad_origin_dev; + } + r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev); if (r) { ti->error = "Error opening origin device"; @@ -4173,6 +4245,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) goto bad; ti->num_flush_bios = 1; + ti->limit_swap_bios = true; ti->flush_supported = true; ti->per_io_data_size = sizeof(struct dm_thin_endio_hook); @@ -4193,7 +4266,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) r = -EINVAL; goto bad; } - atomic_set(&tc->refcount, 1); + refcount_set(&tc->refcount, 1); init_completion(&tc->can_destroy); list_add_tail_rcu(&tc->list, &tc->pool->active_thins); spin_unlock_irqrestore(&tc->pool->lock, flags); diff --git a/drivers/md/dm-unstripe.c b/drivers/md/dm-unstripe.c index 954b7ab4e684d3fdec832abc557373a8ca19e756..e673dacf6418191854689e4a4337f40927b89e5a 100644 --- a/drivers/md/dm-unstripe.c +++ b/drivers/md/dm-unstripe.c @@ -78,7 +78,7 @@ static int unstripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto err; } - if (sscanf(argv[4], "%llu%c", &start, &dummy) != 1) { + if (sscanf(argv[4], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) { ti->error = "Invalid striped device offset"; goto err; } diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c index 684af08d07478cb4652ad6642fc99d9bc3be189f..bb8327999705ee683760f4153ab9d039e1958f97 100644 --- a/drivers/md/dm-verity-fec.c +++ b/drivers/md/dm-verity-fec.c @@ -436,7 +436,7 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io, fio->level++; if (type == DM_VERITY_BLOCK_TYPE_METADATA) - block += v->data_blocks; + block = block - v->hash_start + v->data_blocks; /* * For RS(M, N), the continuous FEC data is divided into blocks of N @@ -552,6 +552,7 @@ void verity_fec_dtr(struct dm_verity *v) mempool_exit(&f->rs_pool); mempool_exit(&f->prealloc_pool); mempool_exit(&f->extra_pool); + mempool_exit(&f->output_pool); kmem_cache_destroy(f->cache); if (f->data_bufio) diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index fc65f0dedf7f702b31d6adf21b8a2b26238c8b43..e6dd19d14cdee89717a64165c9fca8629cc7de9d 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -16,7 +16,7 @@ #include "dm-verity.h" #include "dm-verity-fec.h" - +#include "dm-verity-verify-sig.h" #include #include @@ -34,7 +34,8 @@ #define DM_VERITY_OPT_IGN_ZEROES "ignore_zero_blocks" #define DM_VERITY_OPT_AT_MOST_ONCE "check_at_most_once" -#define DM_VERITY_OPTS_MAX (2 + DM_VERITY_OPTS_FEC) +#define DM_VERITY_OPTS_MAX (3 + DM_VERITY_OPTS_FEC + \ + DM_VERITY_ROOT_HASH_VERIFICATION_OPTS) static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE; @@ -236,8 +237,8 @@ static int verity_handle_err(struct dm_verity *v, enum verity_block_type type, BUG(); } - DMERR("%s: %s block %llu is corrupted", v->data_dev->name, type_str, - block); + DMERR_LIMIT("%s: %s block %llu is corrupted", v->data_dev->name, + type_str, block); if (v->corrupted_errs == DM_VERITY_MAX_CORRUPTED_ERRS) DMERR("%s: reached maximum errors", v->data_dev->name); @@ -533,6 +534,15 @@ static int verity_verify_io(struct dm_verity_io *io) return 0; } +/* + * Skip verity work in response to I/O error when system is shutting down. + */ +static inline bool verity_is_system_shutting_down(void) +{ + return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF + || system_state == SYSTEM_RESTART; +} + /* * End one "io" structure with a given error. */ @@ -560,7 +570,8 @@ static void verity_end_io(struct bio *bio) { struct dm_verity_io *io = bio->bi_private; - if (bio->bi_status && !verity_fec_is_enabled(io->v)) { + if (bio->bi_status && + (!verity_fec_is_enabled(io->v) || verity_is_system_shutting_down())) { verity_finish_io(io, bio->bi_status); return; } @@ -611,8 +622,22 @@ static void verity_prefetch_io(struct work_struct *work) static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io) { + sector_t block = io->block; + unsigned int n_blocks = io->n_blocks; struct dm_verity_prefetch_work *pw; + if (v->validated_blocks) { + while (n_blocks && test_bit(block, v->validated_blocks)) { + block++; + n_blocks--; + } + while (n_blocks && test_bit(block + n_blocks - 1, + v->validated_blocks)) + n_blocks--; + if (!n_blocks) + return; + } + pw = kmalloc(sizeof(struct dm_verity_prefetch_work), GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); @@ -621,8 +646,8 @@ static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io) INIT_WORK(&pw->work, verity_prefetch_io); pw->v = v; - pw->block = io->block; - pw->n_blocks = io->n_blocks; + pw->block = block; + pw->n_blocks = n_blocks; queue_work(v->verify_wq, &pw->work); } @@ -714,6 +739,8 @@ static void verity_status(struct dm_target *ti, status_type_t type, args++; if (v->validated_blocks) args++; + if (v->signature_key_desc) + args += DM_VERITY_ROOT_HASH_VERIFICATION_OPTS; if (!args) return; DMEMIT(" %u", args); @@ -735,6 +762,9 @@ static void verity_status(struct dm_target *ti, status_type_t type, if (v->validated_blocks) DMEMIT(" " DM_VERITY_OPT_AT_MOST_ONCE); sz = verity_fec_status_table(v, sz, result, maxlen); + if (v->signature_key_desc) + DMEMIT(" " DM_VERITY_ROOT_HASH_VERIFICATION_OPT_SIG_KEY + " %s", v->signature_key_desc); break; } } @@ -800,6 +830,8 @@ static void verity_dtr(struct dm_target *ti) verity_fec_dtr(v); + kfree(v->signature_key_desc); + kfree(v); } @@ -855,7 +887,27 @@ static int verity_alloc_zero_digest(struct dm_verity *v) return r; } -static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v) +static inline bool verity_is_verity_mode(const char *arg_name) +{ + return (!strcasecmp(arg_name, DM_VERITY_OPT_LOGGING) || + !strcasecmp(arg_name, DM_VERITY_OPT_RESTART)); +} + +static int verity_parse_verity_mode(struct dm_verity *v, const char *arg_name) +{ + if (v->mode) + return -EINVAL; + + if (!strcasecmp(arg_name, DM_VERITY_OPT_LOGGING)) + v->mode = DM_VERITY_MODE_LOGGING; + else if (!strcasecmp(arg_name, DM_VERITY_OPT_RESTART)) + v->mode = DM_VERITY_MODE_RESTART; + + return 0; +} + +static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v, + struct dm_verity_sig_opts *verify_args) { int r; unsigned argc; @@ -877,12 +929,12 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v) arg_name = dm_shift_arg(as); argc--; - if (!strcasecmp(arg_name, DM_VERITY_OPT_LOGGING)) { - v->mode = DM_VERITY_MODE_LOGGING; - continue; - - } else if (!strcasecmp(arg_name, DM_VERITY_OPT_RESTART)) { - v->mode = DM_VERITY_MODE_RESTART; + if (verity_is_verity_mode(arg_name)) { + r = verity_parse_verity_mode(v, arg_name); + if (r) { + ti->error = "Conflicting error handling parameters"; + return r; + } continue; } else if (!strcasecmp(arg_name, DM_VERITY_OPT_IGN_ZEROES)) { @@ -904,6 +956,14 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v) if (r) return r; continue; + } else if (verity_verify_is_sig_opt_arg(arg_name)) { + r = verity_verify_sig_parse_opt_args(as, v, + verify_args, + &argc, arg_name); + if (r) + return r; + continue; + } ti->error = "Unrecognized verity feature request"; @@ -930,6 +990,7 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v) static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv) { struct dm_verity *v; + struct dm_verity_sig_opts verify_args = {0}; struct dm_arg_set as; unsigned int num; unsigned long long num_ll; @@ -937,6 +998,7 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv) int i; sector_t hash_position; char dummy; + char *root_hash_digest_to_validate; v = kzalloc(sizeof(struct dm_verity), GFP_KERNEL); if (!v) { @@ -1061,6 +1123,7 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv) r = -EINVAL; goto bad; } + root_hash_digest_to_validate = argv[8]; if (strcmp(argv[9], "-")) { v->salt_size = strlen(argv[9]) / 2; @@ -1086,11 +1149,20 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv) as.argc = argc; as.argv = argv; - r = verity_parse_opt_args(&as, v); + r = verity_parse_opt_args(&as, v, &verify_args); if (r < 0) goto bad; } + /* Root hash signature is a optional parameter*/ + r = verity_verify_root_hash(root_hash_digest_to_validate, + strlen(root_hash_digest_to_validate), + verify_args.sig, + verify_args.sig_size); + if (r < 0) { + ti->error = "Root hash verification failed"; + goto bad; + } v->hash_per_block_bits = __fls((1 << v->hash_dev_block_bits) / v->digest_size); @@ -1156,9 +1228,13 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv) ti->per_io_data_size = roundup(ti->per_io_data_size, __alignof__(struct dm_verity_io)); + verity_verify_sig_opts_cleanup(&verify_args); + return 0; bad: + + verity_verify_sig_opts_cleanup(&verify_args); verity_dtr(ti); return r; @@ -1166,7 +1242,8 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv) static struct target_type verity_target = { .name = "verity", - .version = {1, 4, 0}, + .features = DM_TARGET_IMMUTABLE, + .version = {1, 5, 0}, .module = THIS_MODULE, .ctr = verity_ctr, .dtr = verity_dtr, diff --git a/drivers/md/dm-verity-verify-sig.c b/drivers/md/dm-verity-verify-sig.c new file mode 100644 index 0000000000000000000000000000000000000000..614e43db93aa835245324b6c88a1092cda10f6a7 --- /dev/null +++ b/drivers/md/dm-verity-verify-sig.c @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Microsoft Corporation. + * + * Author: Jaskaran Singh Khurana + * + */ +#include +#include +#include +#include +#include "dm-verity.h" +#include "dm-verity-verify-sig.h" + +#define DM_VERITY_VERIFY_ERR(s) DM_VERITY_ROOT_HASH_VERIFICATION " " s + +static bool require_signatures; +module_param(require_signatures, bool, false); +MODULE_PARM_DESC(require_signatures, + "Verify the roothash of dm-verity hash tree"); + +#define DM_VERITY_IS_SIG_FORCE_ENABLED() \ + (require_signatures != false) + +bool verity_verify_is_sig_opt_arg(const char *arg_name) +{ + return (!strcasecmp(arg_name, + DM_VERITY_ROOT_HASH_VERIFICATION_OPT_SIG_KEY)); +} + +static int verity_verify_get_sig_from_key(const char *key_desc, + struct dm_verity_sig_opts *sig_opts) +{ + struct key *key; + const struct user_key_payload *ukp; + int ret = 0; + + key = request_key(&key_type_user, + key_desc, NULL); + if (IS_ERR(key)) + return PTR_ERR(key); + + down_read(&key->sem); + + ukp = user_key_payload_locked(key); + if (!ukp) { + ret = -EKEYREVOKED; + goto end; + } + + sig_opts->sig = kmalloc(ukp->datalen, GFP_KERNEL); + if (!sig_opts->sig) { + ret = -ENOMEM; + goto end; + } + sig_opts->sig_size = ukp->datalen; + + memcpy(sig_opts->sig, ukp->data, sig_opts->sig_size); + +end: + up_read(&key->sem); + key_put(key); + + return ret; +} + +int verity_verify_sig_parse_opt_args(struct dm_arg_set *as, + struct dm_verity *v, + struct dm_verity_sig_opts *sig_opts, + unsigned int *argc, + const char *arg_name) +{ + struct dm_target *ti = v->ti; + int ret = 0; + const char *sig_key = NULL; + + if (!*argc) { + ti->error = DM_VERITY_VERIFY_ERR("Signature key not specified"); + return -EINVAL; + } + + sig_key = dm_shift_arg(as); + (*argc)--; + + ret = verity_verify_get_sig_from_key(sig_key, sig_opts); + if (ret < 0) + ti->error = DM_VERITY_VERIFY_ERR("Invalid key specified"); + + v->signature_key_desc = kstrdup(sig_key, GFP_KERNEL); + if (!v->signature_key_desc) + return -ENOMEM; + + return ret; +} + +/* + * verify_verify_roothash - Verify the root hash of the verity hash device + * using builtin trusted keys. + * + * @root_hash: For verity, the roothash/data to be verified. + * @root_hash_len: Size of the roothash/data to be verified. + * @sig_data: The trusted signature that verifies the roothash/data. + * @sig_len: Size of the signature. + * + */ +int verity_verify_root_hash(const void *root_hash, size_t root_hash_len, + const void *sig_data, size_t sig_len) +{ + int ret; + + if (!root_hash || root_hash_len == 0) + return -EINVAL; + + if (!sig_data || sig_len == 0) { + if (DM_VERITY_IS_SIG_FORCE_ENABLED()) + return -ENOKEY; + else + return 0; + } + + ret = verify_pkcs7_signature(root_hash, root_hash_len, sig_data, + sig_len, NULL, VERIFYING_UNSPECIFIED_SIGNATURE, + NULL, NULL); + + return ret; +} + +void verity_verify_sig_opts_cleanup(struct dm_verity_sig_opts *sig_opts) +{ + kfree(sig_opts->sig); + sig_opts->sig = NULL; + sig_opts->sig_size = 0; +} diff --git a/drivers/md/dm-verity-verify-sig.h b/drivers/md/dm-verity-verify-sig.h new file mode 100644 index 0000000000000000000000000000000000000000..3987c7141f79af8189b60d4ed0ad8359a1ae8abd --- /dev/null +++ b/drivers/md/dm-verity-verify-sig.h @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Microsoft Corporation. + * + * Author: Jaskaran Singh Khurana + * + */ +#ifndef DM_VERITY_SIG_VERIFICATION_H +#define DM_VERITY_SIG_VERIFICATION_H + +#define DM_VERITY_ROOT_HASH_VERIFICATION "DM Verity Sig Verification" +#define DM_VERITY_ROOT_HASH_VERIFICATION_OPT_SIG_KEY "root_hash_sig_key_desc" + +struct dm_verity_sig_opts { + unsigned int sig_size; + u8 *sig; +}; + +#ifdef CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG + +#define DM_VERITY_ROOT_HASH_VERIFICATION_OPTS 2 + +int verity_verify_root_hash(const void *data, size_t data_len, + const void *sig_data, size_t sig_len); +bool verity_verify_is_sig_opt_arg(const char *arg_name); + +int verity_verify_sig_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v, + struct dm_verity_sig_opts *sig_opts, + unsigned int *argc, const char *arg_name); + +void verity_verify_sig_opts_cleanup(struct dm_verity_sig_opts *sig_opts); + +#else + +#define DM_VERITY_ROOT_HASH_VERIFICATION_OPTS 0 + +static inline int verity_verify_root_hash(const void *data, size_t data_len, + const void *sig_data, size_t sig_len) +{ + return 0; +} + +static inline bool verity_verify_is_sig_opt_arg(const char *arg_name) +{ + return false; +} + +static inline int verity_verify_sig_parse_opt_args(struct dm_arg_set *as, + struct dm_verity *v, struct dm_verity_sig_opts *sig_opts, + unsigned int *argc, const char *arg_name) +{ + return -EINVAL; +} + +static inline void verity_verify_sig_opts_cleanup(struct dm_verity_sig_opts *sig_opts) +{ +} + +#endif /* CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG */ +#endif /* DM_VERITY_SIG_VERIFICATION_H */ diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h index 3441c10b840ca912e752c18360c738878d9ebc40..42183a3903aebc5199fcb70d16411e9b79b99142 100644 --- a/drivers/md/dm-verity.h +++ b/drivers/md/dm-verity.h @@ -64,6 +64,8 @@ struct dm_verity { struct dm_verity_fec *fec; /* forward error correction */ unsigned long *validated_blocks; /* bitset blocks validated */ + + char *signature_key_desc; /* signature keyring reference */ }; struct dm_verity_io { diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index 5f1f80d424dd370f306e15033e12bb77de72f3b7..6c6c23634bcfa5cf1b67b724fb86e0f7683eb919 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -142,6 +142,7 @@ struct dm_writecache { size_t metadata_sectors; size_t n_blocks; uint64_t seq_count; + sector_t data_device_sectors; void *block_start; struct wc_entry *entries; unsigned block_size; @@ -153,6 +154,7 @@ struct dm_writecache { bool overwrote_committed:1; bool memory_vmapped:1; + bool start_sector_set:1; bool high_wm_percent_set:1; bool low_wm_percent_set:1; bool max_writeback_jobs_set:1; @@ -161,6 +163,10 @@ struct dm_writecache { bool writeback_fua_set:1; bool flush_on_suspend:1; + unsigned high_wm_percent_value; + unsigned low_wm_percent_value; + unsigned autocommit_time_value; + unsigned writeback_all; struct workqueue_struct *writeback_wq; struct work_struct writeback_work; @@ -226,6 +232,7 @@ static int persistent_memory_claim(struct dm_writecache *wc) pfn_t pfn; int id; struct page **pages; + sector_t offset; wc->memory_vmapped = false; @@ -244,9 +251,16 @@ static int persistent_memory_claim(struct dm_writecache *wc) goto err1; } + offset = get_start_sect(wc->ssd_dev->bdev); + if (offset & (PAGE_SIZE / 512 - 1)) { + r = -EINVAL; + goto err1; + } + offset >>= PAGE_SHIFT - 9; + id = dax_read_lock(); - da = dax_direct_access(wc->ssd_dev->dax_dev, 0, p, &wc->memory_map, &pfn); + da = dax_direct_access(wc->ssd_dev->dax_dev, offset, p, &wc->memory_map, &pfn); if (da < 0) { wc->memory_map = NULL; r = da; @@ -268,7 +282,7 @@ static int persistent_memory_claim(struct dm_writecache *wc) i = 0; do { long daa; - daa = dax_direct_access(wc->ssd_dev->dax_dev, i, p - i, + daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + i, p - i, NULL, &pfn); if (daa <= 0) { r = daa ? daa : -EINVAL; @@ -281,6 +295,8 @@ static int persistent_memory_claim(struct dm_writecache *wc) while (daa-- && i < p) { pages[i++] = pfn_t_to_page(pfn); pfn.val++; + if (!(i & 15)) + cond_resched(); } } while (i < p); wc->memory_map = vmap(pages, p, VM_MAP, PAGE_KERNEL); @@ -447,7 +463,13 @@ static void writecache_notify_io(unsigned long error, void *context) complete(&endio->c); } -static void ssd_commit_flushed(struct dm_writecache *wc) +static void writecache_wait_for_ios(struct dm_writecache *wc, int direction) +{ + wait_event(wc->bio_in_progress_wait[direction], + !atomic_read(&wc->bio_in_progress[direction])); +} + +static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios) { struct dm_io_region region; struct dm_io_request req; @@ -493,17 +515,20 @@ static void ssd_commit_flushed(struct dm_writecache *wc) writecache_notify_io(0, &endio); wait_for_completion_io(&endio.c); + if (wait_for_ios) + writecache_wait_for_ios(wc, WRITE); + writecache_disk_flush(wc, wc->ssd_dev); memset(wc->dirty_bitmap, 0, wc->dirty_bitmap_size); } -static void writecache_commit_flushed(struct dm_writecache *wc) +static void writecache_commit_flushed(struct dm_writecache *wc, bool wait_for_ios) { if (WC_MODE_PMEM(wc)) wmb(); else - ssd_commit_flushed(wc); + ssd_commit_flushed(wc, wait_for_ios); } static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev) @@ -527,12 +552,6 @@ static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev) writecache_error(wc, r, "error flushing metadata: %d", r); } -static void writecache_wait_for_ios(struct dm_writecache *wc, int direction) -{ - wait_event(wc->bio_in_progress_wait[direction], - !atomic_read(&wc->bio_in_progress[direction])); -} - #define WFE_RETURN_FOLLOWING 1 #define WFE_LOWEST_SEQ 2 @@ -628,6 +647,12 @@ static void writecache_add_to_freelist(struct dm_writecache *wc, struct wc_entry wc->freelist_size++; } +static inline void writecache_verify_watermark(struct dm_writecache *wc) +{ + if (unlikely(wc->freelist_size + wc->writeback_size <= wc->freelist_high_watermark)) + queue_work(wc->writeback_wq, &wc->writeback_work); +} + static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc) { struct wc_entry *e; @@ -649,8 +674,8 @@ static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc) list_del(&e->lru); } wc->freelist_size--; - if (unlikely(wc->freelist_size + wc->writeback_size <= wc->freelist_high_watermark)) - queue_work(wc->writeback_wq, &wc->writeback_work); + + writecache_verify_watermark(wc); return e; } @@ -730,14 +755,12 @@ static void writecache_flush(struct dm_writecache *wc) e = e2; cond_resched(); } - writecache_commit_flushed(wc); - - writecache_wait_for_ios(wc, WRITE); + writecache_commit_flushed(wc, true); wc->seq_count++; pmem_assign(sb(wc)->seq_count, cpu_to_le64(wc->seq_count)); writecache_flush_region(wc, &sb(wc)->seq_count, sizeof sb(wc)->seq_count); - writecache_commit_flushed(wc); + writecache_commit_flushed(wc, false); wc->overwrote_committed = false; @@ -761,7 +784,7 @@ static void writecache_flush(struct dm_writecache *wc) } if (need_flush_after_free) - writecache_commit_flushed(wc); + writecache_commit_flushed(wc, false); } static void writecache_flush_work(struct work_struct *work) @@ -804,6 +827,8 @@ static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_ writecache_wait_for_ios(wc, WRITE); discarded_something = true; } + if (!writecache_entry_is_committed(wc, e)) + wc->uncommitted_blocks--; writecache_free_entry(wc, e); } @@ -814,7 +839,7 @@ static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_ } if (discarded_something) - writecache_commit_flushed(wc); + writecache_commit_flushed(wc, false); } static bool writecache_wait_for_writeback(struct dm_writecache *wc) @@ -843,7 +868,7 @@ static void writecache_suspend(struct dm_target *ti) } wc_unlock(wc); - flush_workqueue(wc->writeback_wq); + drain_workqueue(wc->writeback_wq); wc_lock(wc); if (flush_on_suspend) @@ -871,11 +896,30 @@ static int writecache_alloc_entries(struct dm_writecache *wc) struct wc_entry *e = &wc->entries[b]; e->index = b; e->write_in_progress = false; + cond_resched(); } return 0; } +static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors) +{ + struct dm_io_region region; + struct dm_io_request req; + + region.bdev = wc->ssd_dev->bdev; + region.sector = wc->start_sector; + region.count = n_sectors; + req.bi_op = REQ_OP_READ; + req.bi_op_flags = REQ_SYNC; + req.mem.type = DM_IO_VMA; + req.mem.ptr.vma = (char *)wc->memory_map; + req.client = wc->dm_io; + req.notify.fn = NULL; + + return dm_io(&req, 1, ®ion, NULL); +} + static void writecache_resume(struct dm_target *ti) { struct dm_writecache *wc = ti->private; @@ -886,8 +930,20 @@ static void writecache_resume(struct dm_target *ti) wc_lock(wc); - if (WC_MODE_PMEM(wc)) + wc->data_device_sectors = i_size_read(wc->dev->bdev->bd_inode) >> SECTOR_SHIFT; + + if (WC_MODE_PMEM(wc)) { persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size); + } else { + r = writecache_read_metadata(wc, wc->metadata_sectors); + if (r) { + size_t sb_entries_offset; + writecache_error(wc, r, "unable to read metadata: %d", r); + sb_entries_offset = offsetof(struct wc_memory_superblock, entries); + memset((char *)wc->memory_map + sb_entries_offset, -1, + (wc->metadata_sectors << SECTOR_SHIFT) - sb_entries_offset); + } + } wc->tree = RB_ROOT; INIT_LIST_HEAD(&wc->lru); @@ -925,6 +981,7 @@ static void writecache_resume(struct dm_target *ti) e->original_sector = le64_to_cpu(wme.original_sector); e->seq_count = le64_to_cpu(wme.seq_count); } + cond_resched(); } #endif for (b = 0; b < wc->n_blocks; b++) { @@ -963,9 +1020,11 @@ static void writecache_resume(struct dm_target *ti) if (need_flush) { writecache_flush_all_metadata(wc); - writecache_commit_flushed(wc); + writecache_commit_flushed(wc, false); } + writecache_verify_watermark(wc); + wc_unlock(wc); } @@ -1223,7 +1282,8 @@ static int writecache_map(struct dm_target *ti, struct bio *bio) } } while (bio->bi_iter.bi_size); - if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks)) + if (unlikely(bio->bi_opf & REQ_FUA || + wc->uncommitted_blocks >= wc->autocommit_blocks)) writecache_flush(wc); else writecache_schedule_autocommit(wc); @@ -1346,7 +1406,7 @@ static void __writecache_endio_pmem(struct dm_writecache *wc, struct list_head * wc->writeback_size--; n_walked++; if (unlikely(n_walked >= ENDIO_LATENCY)) { - writecache_commit_flushed(wc); + writecache_commit_flushed(wc, false); wc_unlock(wc); wc_lock(wc); n_walked = 0; @@ -1427,7 +1487,7 @@ static int writecache_endio_thread(void *data) writecache_wait_for_ios(wc, READ); } - writecache_commit_flushed(wc); + writecache_commit_flushed(wc, false); wc_unlock(wc); } @@ -1442,6 +1502,10 @@ static bool wc_add_block(struct writeback_struct *wb, struct wc_entry *e, gfp_t void *address = memory_data(wc, e); persistent_memory_flush_cache(address, block_size); + + if (unlikely(bio_end_sector(&wb->bio) >= wc->data_device_sectors)) + return true; + return bio_add_page(&wb->bio, persistent_memory_page(address), block_size, persistent_memory_page_offset(address)) != 0; } @@ -1514,6 +1578,9 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba if (writecache_has_error(wc)) { bio->bi_status = BLK_STS_IOERR; bio_endio(&wb->bio); + } else if (unlikely(!bio_sectors(&wb->bio))) { + bio->bi_status = BLK_STS_OK; + bio_endio(&wb->bio); } else { submit_bio(&wb->bio); } @@ -1557,6 +1624,14 @@ static void __writecache_writeback_ssd(struct dm_writecache *wc, struct writebac e = f; } + if (unlikely(to.sector + to.count > wc->data_device_sectors)) { + if (to.sector >= wc->data_device_sectors) { + writecache_copy_endio(0, 0, c); + continue; + } + from.count = to.count = wc->data_device_sectors - to.sector; + } + dm_kcopyd_copy(wc->dm_kcopyd, &from, 1, &to, 0, writecache_copy_endio, c); __writeback_throttle(wc, wbl); @@ -1754,14 +1829,16 @@ static int init_memory(struct dm_writecache *wc) pmem_assign(sb(wc)->n_blocks, cpu_to_le64(wc->n_blocks)); pmem_assign(sb(wc)->seq_count, cpu_to_le64(0)); - for (b = 0; b < wc->n_blocks; b++) + for (b = 0; b < wc->n_blocks; b++) { write_original_sector_seq_count(wc, &wc->entries[b], -1, -1); + cond_resched(); + } writecache_flush_all_metadata(wc); - writecache_commit_flushed(wc); + writecache_commit_flushed(wc, false); pmem_assign(sb(wc)->magic, cpu_to_le32(MEMORY_SUPERBLOCK_MAGIC)); writecache_flush_region(wc, &sb(wc)->magic, sizeof sb(wc)->magic); - writecache_commit_flushed(wc); + writecache_commit_flushed(wc, false); return 0; } @@ -1964,6 +2041,12 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv) ti->error = "Invalid block size"; goto bad; } + if (wc->block_size < bdev_logical_block_size(wc->dev->bdev) || + wc->block_size < bdev_logical_block_size(wc->ssd_dev->bdev)) { + r = -EINVAL; + ti->error = "Block size is smaller than device logical block size"; + goto bad; + } wc->block_size_bits = __ffs(wc->block_size); wc->max_writeback_jobs = MAX_WRITEBACK_JOBS; @@ -1985,6 +2068,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv) if (sscanf(string, "%llu%c", &start_sector, &dummy) != 1) goto invalid_optional; wc->start_sector = start_sector; + wc->start_sector_set = true; if (wc->start_sector != start_sector || wc->start_sector >= wc->memory_map_size >> SECTOR_SHIFT) goto invalid_optional; @@ -1994,6 +2078,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv) goto invalid_optional; if (high_wm_percent < 0 || high_wm_percent > 100) goto invalid_optional; + wc->high_wm_percent_value = high_wm_percent; wc->high_wm_percent_set = true; } else if (!strcasecmp(string, "low_watermark") && opt_params >= 1) { string = dm_shift_arg(&as), opt_params--; @@ -2001,6 +2086,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv) goto invalid_optional; if (low_wm_percent < 0 || low_wm_percent > 100) goto invalid_optional; + wc->low_wm_percent_value = low_wm_percent; wc->low_wm_percent_set = true; } else if (!strcasecmp(string, "writeback_jobs") && opt_params >= 1) { string = dm_shift_arg(&as), opt_params--; @@ -2020,6 +2106,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv) if (autocommit_msecs > 3600000) goto invalid_optional; wc->autocommit_jiffies = msecs_to_jiffies(autocommit_msecs); + wc->autocommit_time_value = autocommit_msecs; wc->autocommit_time_set = true; } else if (!strcasecmp(string, "fua")) { if (WC_MODE_PMEM(wc)) { @@ -2052,8 +2139,6 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv) goto bad; } } else { - struct dm_io_region region; - struct dm_io_request req; size_t n_blocks, n_metadata_blocks; uint64_t n_bitmap_bits; @@ -2110,19 +2195,9 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv) goto bad; } - region.bdev = wc->ssd_dev->bdev; - region.sector = wc->start_sector; - region.count = wc->metadata_sectors; - req.bi_op = REQ_OP_READ; - req.bi_op_flags = REQ_SYNC; - req.mem.type = DM_IO_VMA; - req.mem.ptr.vma = (char *)wc->memory_map; - req.client = wc->dm_io; - req.notify.fn = NULL; - - r = dm_io(&req, 1, ®ion, NULL); + r = writecache_read_metadata(wc, wc->block_size >> SECTOR_SHIFT); if (r) { - ti->error = "Unable to read metadata"; + ti->error = "Unable to read first block of metadata"; goto bad; } } @@ -2227,7 +2302,6 @@ static void writecache_status(struct dm_target *ti, status_type_t type, struct dm_writecache *wc = ti->private; unsigned extra_args; unsigned sz = 0; - uint64_t x; switch (type) { case STATUSTYPE_INFO: @@ -2239,7 +2313,7 @@ static void writecache_status(struct dm_target *ti, status_type_t type, DMEMIT("%c %s %s %u ", WC_MODE_PMEM(wc) ? 'p' : 's', wc->dev->name, wc->ssd_dev->name, wc->block_size); extra_args = 0; - if (wc->start_sector) + if (wc->start_sector_set) extra_args += 2; if (wc->high_wm_percent_set) extra_args += 2; @@ -2255,26 +2329,18 @@ static void writecache_status(struct dm_target *ti, status_type_t type, extra_args++; DMEMIT("%u", extra_args); - if (wc->start_sector) + if (wc->start_sector_set) DMEMIT(" start_sector %llu", (unsigned long long)wc->start_sector); - if (wc->high_wm_percent_set) { - x = (uint64_t)wc->freelist_high_watermark * 100; - x += wc->n_blocks / 2; - do_div(x, (size_t)wc->n_blocks); - DMEMIT(" high_watermark %u", 100 - (unsigned)x); - } - if (wc->low_wm_percent_set) { - x = (uint64_t)wc->freelist_low_watermark * 100; - x += wc->n_blocks / 2; - do_div(x, (size_t)wc->n_blocks); - DMEMIT(" low_watermark %u", 100 - (unsigned)x); - } + if (wc->high_wm_percent_set) + DMEMIT(" high_watermark %u", wc->high_wm_percent_value); + if (wc->low_wm_percent_set) + DMEMIT(" low_watermark %u", wc->low_wm_percent_value); if (wc->max_writeback_jobs_set) DMEMIT(" writeback_jobs %u", wc->max_writeback_jobs); if (wc->autocommit_blocks_set) DMEMIT(" autocommit_blocks %u", wc->autocommit_blocks); if (wc->autocommit_time_set) - DMEMIT(" autocommit_time %u", jiffies_to_msecs(wc->autocommit_jiffies)); + DMEMIT(" autocommit_time %u", wc->autocommit_time_value); if (wc->writeback_fua_set) DMEMIT(" %sfua", wc->writeback_fua ? "" : "no"); break; diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c index 969954915566f2cf1a15b684eb5b3d33dca58160..5c2bbdf67f25d7bb9d2f16e92e6ec9abe439df53 100644 --- a/drivers/md/dm-zoned-metadata.c +++ b/drivers/md/dm-zoned-metadata.c @@ -99,7 +99,7 @@ struct dmz_mblock { struct rb_node node; struct list_head link; sector_t no; - atomic_t ref; + unsigned int ref; unsigned long state; struct page *page; void *data; @@ -132,6 +132,7 @@ struct dmz_metadata { sector_t zone_bitmap_size; unsigned int zone_nr_bitmap_blocks; + unsigned int zone_bits_per_mblk; unsigned int nr_bitmap_blocks; unsigned int nr_map_blocks; @@ -296,7 +297,7 @@ static struct dmz_mblock *dmz_alloc_mblock(struct dmz_metadata *zmd, RB_CLEAR_NODE(&mblk->node); INIT_LIST_HEAD(&mblk->link); - atomic_set(&mblk->ref, 0); + mblk->ref = 0; mblk->state = 0; mblk->no = mblk_no; mblk->data = page_address(mblk->page); @@ -339,10 +340,11 @@ static void dmz_insert_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk) } /* - * Lookup a metadata block in the rbtree. + * Lookup a metadata block in the rbtree. If the block is found, increment + * its reference count. */ -static struct dmz_mblock *dmz_lookup_mblock(struct dmz_metadata *zmd, - sector_t mblk_no) +static struct dmz_mblock *dmz_get_mblock_fast(struct dmz_metadata *zmd, + sector_t mblk_no) { struct rb_root *root = &zmd->mblk_rbtree; struct rb_node *node = root->rb_node; @@ -350,8 +352,17 @@ static struct dmz_mblock *dmz_lookup_mblock(struct dmz_metadata *zmd, while (node) { mblk = container_of(node, struct dmz_mblock, node); - if (mblk->no == mblk_no) + if (mblk->no == mblk_no) { + /* + * If this is the first reference to the block, + * remove it from the LRU list. + */ + mblk->ref++; + if (mblk->ref == 1 && + !test_bit(DMZ_META_DIRTY, &mblk->state)) + list_del_init(&mblk->link); return mblk; + } node = (mblk->no < mblk_no) ? node->rb_left : node->rb_right; } @@ -382,32 +393,50 @@ static void dmz_mblock_bio_end_io(struct bio *bio) } /* - * Read a metadata block from disk. + * Read an uncached metadata block from disk and add it to the cache. */ -static struct dmz_mblock *dmz_fetch_mblock(struct dmz_metadata *zmd, - sector_t mblk_no) +static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd, + sector_t mblk_no) { - struct dmz_mblock *mblk; + struct dmz_mblock *mblk, *m; sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no; struct bio *bio; - /* Get block and insert it */ + if (dmz_bdev_is_dying(zmd->dev)) + return ERR_PTR(-EIO); + + /* Get a new block and a BIO to read it */ mblk = dmz_alloc_mblock(zmd, mblk_no); if (!mblk) - return NULL; - - spin_lock(&zmd->mblk_lock); - atomic_inc(&mblk->ref); - set_bit(DMZ_META_READING, &mblk->state); - dmz_insert_mblock(zmd, mblk); - spin_unlock(&zmd->mblk_lock); + return ERR_PTR(-ENOMEM); bio = bio_alloc(GFP_NOIO, 1); if (!bio) { dmz_free_mblock(zmd, mblk); - return NULL; + return ERR_PTR(-ENOMEM); } + spin_lock(&zmd->mblk_lock); + + /* + * Make sure that another context did not start reading + * the block already. + */ + m = dmz_get_mblock_fast(zmd, mblk_no); + if (m) { + spin_unlock(&zmd->mblk_lock); + dmz_free_mblock(zmd, mblk); + bio_put(bio); + return m; + } + + mblk->ref++; + set_bit(DMZ_META_READING, &mblk->state); + dmz_insert_mblock(zmd, mblk); + + spin_unlock(&zmd->mblk_lock); + + /* Submit read BIO */ bio->bi_iter.bi_sector = dmz_blk2sect(block); bio_set_dev(bio, zmd->dev->bdev); bio->bi_private = mblk; @@ -484,7 +513,8 @@ static void dmz_release_mblock(struct dmz_metadata *zmd, spin_lock(&zmd->mblk_lock); - if (atomic_dec_and_test(&mblk->ref)) { + mblk->ref--; + if (mblk->ref == 0) { if (test_bit(DMZ_META_ERROR, &mblk->state)) { rb_erase(&mblk->node, &zmd->mblk_rbtree); dmz_free_mblock(zmd, mblk); @@ -508,20 +538,14 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd, /* Check rbtree */ spin_lock(&zmd->mblk_lock); - mblk = dmz_lookup_mblock(zmd, mblk_no); - if (mblk) { - /* Cache hit: remove block from LRU list */ - if (atomic_inc_return(&mblk->ref) == 1 && - !test_bit(DMZ_META_DIRTY, &mblk->state)) - list_del_init(&mblk->link); - } + mblk = dmz_get_mblock_fast(zmd, mblk_no); spin_unlock(&zmd->mblk_lock); if (!mblk) { /* Cache miss: read the block from disk */ - mblk = dmz_fetch_mblock(zmd, mblk_no); - if (!mblk) - return ERR_PTR(-ENOMEM); + mblk = dmz_get_mblock_slow(zmd, mblk_no); + if (IS_ERR(mblk)) + return mblk; } /* Wait for on-going read I/O and check for error */ @@ -529,6 +553,7 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd, TASK_UNINTERRUPTIBLE); if (test_bit(DMZ_META_ERROR, &mblk->state)) { dmz_release_mblock(zmd, mblk); + dmz_check_bdev(zmd->dev); return ERR_PTR(-EIO); } @@ -549,16 +574,19 @@ static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk) /* * Issue a metadata block write BIO. */ -static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk, - unsigned int set) +static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk, + unsigned int set) { sector_t block = zmd->sb[set].block + mblk->no; struct bio *bio; + if (dmz_bdev_is_dying(zmd->dev)) + return -EIO; + bio = bio_alloc(GFP_NOIO, 1); if (!bio) { set_bit(DMZ_META_ERROR, &mblk->state); - return; + return -ENOMEM; } set_bit(DMZ_META_WRITING, &mblk->state); @@ -570,6 +598,8 @@ static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk, bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO); bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0); submit_bio(bio); + + return 0; } /* @@ -581,6 +611,9 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block, struct bio *bio; int ret; + if (dmz_bdev_is_dying(zmd->dev)) + return -EIO; + bio = bio_alloc(GFP_NOIO, 1); if (!bio) return -ENOMEM; @@ -592,6 +625,8 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block, ret = submit_bio_wait(bio); bio_put(bio); + if (ret) + dmz_check_bdev(zmd->dev); return ret; } @@ -638,22 +673,30 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd, { struct dmz_mblock *mblk; struct blk_plug plug; - int ret = 0; + int ret = 0, nr_mblks_submitted = 0; /* Issue writes */ blk_start_plug(&plug); - list_for_each_entry(mblk, write_list, link) - dmz_write_mblock(zmd, mblk, set); + list_for_each_entry(mblk, write_list, link) { + ret = dmz_write_mblock(zmd, mblk, set); + if (ret) + break; + nr_mblks_submitted++; + } blk_finish_plug(&plug); /* Wait for completion */ list_for_each_entry(mblk, write_list, link) { + if (!nr_mblks_submitted) + break; wait_on_bit_io(&mblk->state, DMZ_META_WRITING, TASK_UNINTERRUPTIBLE); if (test_bit(DMZ_META_ERROR, &mblk->state)) { clear_bit(DMZ_META_ERROR, &mblk->state); + dmz_check_bdev(zmd->dev); ret = -EIO; } + nr_mblks_submitted--; } /* Flush drive cache (this will also sync data) */ @@ -715,6 +758,11 @@ int dmz_flush_metadata(struct dmz_metadata *zmd) */ dmz_lock_flush(zmd); + if (dmz_bdev_is_dying(zmd->dev)) { + ret = -EIO; + goto out; + } + /* Get dirty blocks */ spin_lock(&zmd->mblk_lock); list_splice_init(&zmd->mblk_dirty_list, &write_list); @@ -723,7 +771,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd) /* If there are no dirty metadata blocks, just flush the device cache */ if (list_empty(&write_list)) { ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL); - goto out; + goto err; } /* @@ -733,7 +781,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd) */ ret = dmz_log_dirty_mblocks(zmd, &write_list); if (ret) - goto out; + goto err; /* * The log is on disk. It is now safe to update in place @@ -741,11 +789,11 @@ int dmz_flush_metadata(struct dmz_metadata *zmd) */ ret = dmz_write_dirty_mblocks(zmd, &write_list, zmd->mblk_primary); if (ret) - goto out; + goto err; ret = dmz_write_sb(zmd, zmd->mblk_primary); if (ret) - goto out; + goto err; while (!list_empty(&write_list)) { mblk = list_first_entry(&write_list, struct dmz_mblock, link); @@ -753,23 +801,27 @@ int dmz_flush_metadata(struct dmz_metadata *zmd) spin_lock(&zmd->mblk_lock); clear_bit(DMZ_META_DIRTY, &mblk->state); - if (atomic_read(&mblk->ref) == 0) + if (mblk->ref == 0) list_add_tail(&mblk->link, &zmd->mblk_lru_list); spin_unlock(&zmd->mblk_lock); } zmd->sb_gen++; out: - if (ret && !list_empty(&write_list)) { - spin_lock(&zmd->mblk_lock); - list_splice(&write_list, &zmd->mblk_dirty_list); - spin_unlock(&zmd->mblk_lock); - } - dmz_unlock_flush(zmd); up_write(&zmd->mblk_sem); return ret; + +err: + if (!list_empty(&write_list)) { + spin_lock(&zmd->mblk_lock); + list_splice(&write_list, &zmd->mblk_dirty_list); + spin_unlock(&zmd->mblk_lock); + } + if (!dmz_check_bdev(zmd->dev)) + ret = -EIO; + goto out; } /* @@ -1053,7 +1105,6 @@ static int dmz_init_zone(struct dmz_metadata *zmd, struct dm_zone *zone, if (blkz->type == BLK_ZONE_TYPE_CONVENTIONAL) { set_bit(DMZ_RND, &zone->flags); - zmd->nr_rnd_zones++; } else if (blkz->type == BLK_ZONE_TYPE_SEQWRITE_REQ || blkz->type == BLK_ZONE_TYPE_SEQWRITE_PREF) { set_bit(DMZ_SEQ, &zone->flags); @@ -1114,7 +1165,10 @@ static int dmz_init_zones(struct dmz_metadata *zmd) /* Init */ zmd->zone_bitmap_size = dev->zone_nr_blocks >> 3; - zmd->zone_nr_bitmap_blocks = zmd->zone_bitmap_size >> DMZ_BLOCK_SHIFT; + zmd->zone_nr_bitmap_blocks = + max_t(sector_t, 1, zmd->zone_bitmap_size >> DMZ_BLOCK_SHIFT); + zmd->zone_bits_per_mblk = min_t(sector_t, dev->zone_nr_blocks, + DMZ_BLOCK_SIZE_BITS); /* Allocate zone array */ zmd->zones = kcalloc(dev->nr_zones, sizeof(struct dm_zone), GFP_KERNEL); @@ -1149,6 +1203,9 @@ static int dmz_init_zones(struct dmz_metadata *zmd) goto out; } + if (!nr_blkz) + break; + /* Process report */ for (i = 0; i < nr_blkz; i++) { ret = dmz_init_zone(zmd, zone, &blkz[i]); @@ -1184,9 +1241,12 @@ static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone) /* Get zone information from disk */ ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone), &blkz, &nr_blkz, GFP_NOIO); + if (!nr_blkz) + ret = -EIO; if (ret) { dmz_dev_err(zmd->dev, "Get zone %u report failed", dmz_id(zmd, zone)); + dmz_check_bdev(zmd->dev); return ret; } @@ -1509,7 +1569,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd) struct dm_zone *zone; if (list_empty(&zmd->map_rnd_list)) - return NULL; + return ERR_PTR(-EBUSY); list_for_each_entry(zone, &zmd->map_rnd_list, link) { if (dmz_is_buf(zone)) @@ -1531,7 +1591,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd) struct dm_zone *zone; if (list_empty(&zmd->map_seq_list)) - return NULL; + return ERR_PTR(-EBUSY); list_for_each_entry(zone, &zmd->map_seq_list, link) { if (!zone->bzone) @@ -1568,30 +1628,6 @@ struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd) return zone; } -/* - * Activate a zone (increment its reference count). - */ -void dmz_activate_zone(struct dm_zone *zone) -{ - set_bit(DMZ_ACTIVE, &zone->flags); - atomic_inc(&zone->refcount); -} - -/* - * Deactivate a zone. This decrement the zone reference counter - * and clears the active state of the zone once the count reaches 0, - * indicating that all BIOs to the zone have completed. Returns - * true if the zone was deactivated. - */ -void dmz_deactivate_zone(struct dm_zone *zone) -{ - if (atomic_dec_and_test(&zone->refcount)) { - WARN_ON(!test_bit(DMZ_ACTIVE, &zone->flags)); - clear_bit_unlock(DMZ_ACTIVE, &zone->flags); - smp_mb__after_atomic(); - } -} - /* * Get the zone mapping a chunk, if the chunk is mapped already. * If no mapping exist and the operation is WRITE, a zone is @@ -1622,6 +1658,10 @@ struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd, unsigned int chu /* Alloate a random zone */ dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND); if (!dzone) { + if (dmz_bdev_is_dying(zmd->dev)) { + dzone = ERR_PTR(-EIO); + goto out; + } dmz_wait_for_free_zones(zmd); goto again; } @@ -1719,6 +1759,10 @@ struct dm_zone *dmz_get_chunk_buffer(struct dmz_metadata *zmd, /* Alloate a random zone */ bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND); if (!bzone) { + if (dmz_bdev_is_dying(zmd->dev)) { + bzone = ERR_PTR(-EIO); + goto out; + } dmz_wait_for_free_zones(zmd); goto again; } @@ -1941,7 +1985,7 @@ int dmz_copy_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone, dmz_release_mblock(zmd, to_mblk); dmz_release_mblock(zmd, from_mblk); - chunk_block += DMZ_BLOCK_SIZE_BITS; + chunk_block += zmd->zone_bits_per_mblk; } to_zone->weight = from_zone->weight; @@ -2002,7 +2046,7 @@ int dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone, /* Set bits */ bit = chunk_block & DMZ_BLOCK_MASK_BITS; - nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit); + nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit); count = dmz_set_bits((unsigned long *)mblk->data, bit, nr_bits); if (count) { @@ -2081,7 +2125,7 @@ int dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone, /* Clear bits */ bit = chunk_block & DMZ_BLOCK_MASK_BITS; - nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit); + nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit); count = dmz_clear_bits((unsigned long *)mblk->data, bit, nr_bits); @@ -2141,6 +2185,7 @@ static int dmz_to_next_set_block(struct dmz_metadata *zmd, struct dm_zone *zone, { struct dmz_mblock *mblk; unsigned int bit, set_bit, nr_bits; + unsigned int zone_bits = zmd->zone_bits_per_mblk; unsigned long *bitmap; int n = 0; @@ -2155,15 +2200,15 @@ static int dmz_to_next_set_block(struct dmz_metadata *zmd, struct dm_zone *zone, /* Get offset */ bitmap = (unsigned long *) mblk->data; bit = chunk_block & DMZ_BLOCK_MASK_BITS; - nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit); + nr_bits = min(nr_blocks, zone_bits - bit); if (set) - set_bit = find_next_bit(bitmap, DMZ_BLOCK_SIZE_BITS, bit); + set_bit = find_next_bit(bitmap, zone_bits, bit); else - set_bit = find_next_zero_bit(bitmap, DMZ_BLOCK_SIZE_BITS, bit); + set_bit = find_next_zero_bit(bitmap, zone_bits, bit); dmz_release_mblock(zmd, mblk); n += set_bit - bit; - if (set_bit < DMZ_BLOCK_SIZE_BITS) + if (set_bit < zone_bits) break; nr_blocks -= nr_bits; @@ -2266,7 +2311,7 @@ static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone) /* Count bits in this block */ bitmap = mblk->data; bit = chunk_block & DMZ_BLOCK_MASK_BITS; - nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit); + nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit); n += dmz_count_bits(bitmap, bit, nr_bits); dmz_release_mblock(zmd, mblk); @@ -2308,7 +2353,7 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd) mblk = list_first_entry(&zmd->mblk_dirty_list, struct dmz_mblock, link); dmz_dev_warn(zmd->dev, "mblock %llu still in dirty list (ref %u)", - (u64)mblk->no, atomic_read(&mblk->ref)); + (u64)mblk->no, mblk->ref); list_del_init(&mblk->link); rb_erase(&mblk->node, &zmd->mblk_rbtree); dmz_free_mblock(zmd, mblk); @@ -2326,8 +2371,8 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd) root = &zmd->mblk_rbtree; rbtree_postorder_for_each_entry_safe(mblk, next, root, node) { dmz_dev_warn(zmd->dev, "mblock %llu ref %u still in rbtree", - (u64)mblk->no, atomic_read(&mblk->ref)); - atomic_set(&mblk->ref, 0); + (u64)mblk->no, mblk->ref); + mblk->ref = 0; dmz_free_mblock(zmd, mblk); } diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c index edf4b95eb0750dc6485513d49c240b2982017114..879848aad97a7f56f0b85834085ad5d9473c9163 100644 --- a/drivers/md/dm-zoned-reclaim.c +++ b/drivers/md/dm-zoned-reclaim.c @@ -37,7 +37,7 @@ enum { /* * Number of seconds of target BIO inactivity to consider the target idle. */ -#define DMZ_IDLE_PERIOD (10UL * HZ) +#define DMZ_IDLE_PERIOD (10UL * HZ) /* * Percentage of unmapped (free) random zones below which reclaim starts @@ -81,6 +81,7 @@ static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone, "Align zone %u wp %llu to %llu (wp+%u) blocks failed %d", dmz_id(zmd, zone), (unsigned long long)wp_block, (unsigned long long)block, nr_blocks, ret); + dmz_check_bdev(zrc->dev); return ret; } @@ -134,6 +135,9 @@ static int dmz_reclaim_copy(struct dmz_reclaim *zrc, set_bit(DM_KCOPYD_WRITE_SEQ, &flags); while (block < end_block) { + if (dev->flags & DMZ_BDEV_DYING) + return -EIO; + /* Get a valid region from the source zone */ ret = dmz_first_valid_block(zmd, src_zone, &block); if (ret <= 0) @@ -215,7 +219,7 @@ static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone) dmz_unlock_flush(zmd); - return 0; + return ret; } /* @@ -259,7 +263,7 @@ static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone) dmz_unlock_flush(zmd); - return 0; + return ret; } /* @@ -312,7 +316,7 @@ static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone) dmz_unlock_flush(zmd); - return 0; + return ret; } /* @@ -334,7 +338,7 @@ static void dmz_reclaim_empty(struct dmz_reclaim *zrc, struct dm_zone *dzone) /* * Find a candidate zone for reclaim and process it. */ -static void dmz_reclaim(struct dmz_reclaim *zrc) +static int dmz_do_reclaim(struct dmz_reclaim *zrc) { struct dmz_metadata *zmd = zrc->metadata; struct dm_zone *dzone; @@ -345,7 +349,7 @@ static void dmz_reclaim(struct dmz_reclaim *zrc) /* Get a data zone */ dzone = dmz_get_zone_for_reclaim(zmd); if (!dzone) - return; + return -EBUSY; start = jiffies; @@ -391,13 +395,20 @@ static void dmz_reclaim(struct dmz_reclaim *zrc) out: if (ret) { dmz_unlock_zone_reclaim(dzone); - return; + return ret; } - (void) dmz_flush_metadata(zrc->metadata); + ret = dmz_flush_metadata(zrc->metadata); + if (ret) { + dmz_dev_debug(zrc->dev, + "Metadata flush for zone %u failed, err %d\n", + dmz_id(zmd, rzone), ret); + return ret; + } dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms", dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start)); + return 0; } /* @@ -442,6 +453,10 @@ static void dmz_reclaim_work(struct work_struct *work) struct dmz_metadata *zmd = zrc->metadata; unsigned int nr_rnd, nr_unmap_rnd; unsigned int p_unmap_rnd; + int ret; + + if (dmz_bdev_is_dying(zrc->dev)) + return; if (!dmz_should_reclaim(zrc)) { mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD); @@ -471,7 +486,12 @@ static void dmz_reclaim_work(struct work_struct *work) (dmz_target_idle(zrc) ? "Idle" : "Busy"), p_unmap_rnd, nr_unmap_rnd, nr_rnd); - dmz_reclaim(zrc); + ret = dmz_do_reclaim(zrc); + if (ret) { + dmz_dev_debug(zrc->dev, "Reclaim error %d\n", ret); + if (!dmz_check_bdev(zrc->dev)) + return; + } dmz_schedule_reclaim(zrc); } diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index a44183ff4be0a3bd4219a7bf5854622aeca79db2..caee8460a0b05e93771de18692a78f59a6f261bd 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -20,7 +20,6 @@ struct dmz_bioctx { struct dm_zone *zone; struct bio *bio; atomic_t ref; - blk_status_t status; }; /* @@ -78,65 +77,68 @@ static inline void dmz_bio_endio(struct bio *bio, blk_status_t status) { struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); - if (bioctx->status == BLK_STS_OK && status != BLK_STS_OK) - bioctx->status = status; - bio_endio(bio); + if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK) + bio->bi_status = status; + if (bio->bi_status != BLK_STS_OK) + bioctx->target->dev->flags |= DMZ_CHECK_BDEV; + + if (atomic_dec_and_test(&bioctx->ref)) { + struct dm_zone *zone = bioctx->zone; + + if (zone) { + if (bio->bi_status != BLK_STS_OK && + bio_op(bio) == REQ_OP_WRITE && + dmz_is_seq(zone)) + set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags); + dmz_deactivate_zone(zone); + } + bio_endio(bio); + } } /* - * Partial clone read BIO completion callback. This terminates the + * Completion callback for an internally cloned target BIO. This terminates the * target BIO when there are no more references to its context. */ -static void dmz_read_bio_end_io(struct bio *bio) +static void dmz_clone_endio(struct bio *clone) { - struct dmz_bioctx *bioctx = bio->bi_private; - blk_status_t status = bio->bi_status; + struct dmz_bioctx *bioctx = clone->bi_private; + blk_status_t status = clone->bi_status; - bio_put(bio); + bio_put(clone); dmz_bio_endio(bioctx->bio, status); } /* - * Issue a BIO to a zone. The BIO may only partially process the + * Issue a clone of a target BIO. The clone may only partially process the * original target BIO. */ -static int dmz_submit_read_bio(struct dmz_target *dmz, struct dm_zone *zone, - struct bio *bio, sector_t chunk_block, - unsigned int nr_blocks) +static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone, + struct bio *bio, sector_t chunk_block, + unsigned int nr_blocks) { struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); - sector_t sector; struct bio *clone; - /* BIO remap sector */ - sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block); - - /* If the read is not partial, there is no need to clone the BIO */ - if (nr_blocks == dmz_bio_blocks(bio)) { - /* Setup and submit the BIO */ - bio->bi_iter.bi_sector = sector; - atomic_inc(&bioctx->ref); - generic_make_request(bio); - return 0; - } - - /* Partial BIO: we need to clone the BIO */ clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set); if (!clone) return -ENOMEM; - /* Setup the clone */ - clone->bi_iter.bi_sector = sector; + bio_set_dev(clone, dmz->dev->bdev); + clone->bi_iter.bi_sector = + dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block); clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT; - clone->bi_end_io = dmz_read_bio_end_io; + clone->bi_end_io = dmz_clone_endio; clone->bi_private = bioctx; bio_advance(bio, clone->bi_iter.bi_size); - /* Submit the clone */ atomic_inc(&bioctx->ref); generic_make_request(clone); + if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone)) + zone->wp_block += nr_blocks; + return 0; } @@ -214,7 +216,7 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone, if (nr_blocks) { /* Valid blocks found: read them */ nr_blocks = min_t(unsigned int, nr_blocks, end_block - chunk_block); - ret = dmz_submit_read_bio(dmz, rzone, bio, chunk_block, nr_blocks); + ret = dmz_submit_bio(dmz, rzone, bio, chunk_block, nr_blocks); if (ret) return ret; chunk_block += nr_blocks; @@ -228,25 +230,6 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone, return 0; } -/* - * Issue a write BIO to a zone. - */ -static void dmz_submit_write_bio(struct dmz_target *dmz, struct dm_zone *zone, - struct bio *bio, sector_t chunk_block, - unsigned int nr_blocks) -{ - struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); - - /* Setup and submit the BIO */ - bio_set_dev(bio, dmz->dev->bdev); - bio->bi_iter.bi_sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block); - atomic_inc(&bioctx->ref); - generic_make_request(bio); - - if (dmz_is_seq(zone)) - zone->wp_block += nr_blocks; -} - /* * Write blocks directly in a data zone, at the write pointer. * If a buffer zone is assigned, invalidate the blocks written @@ -265,7 +248,9 @@ static int dmz_handle_direct_write(struct dmz_target *dmz, return -EROFS; /* Submit write */ - dmz_submit_write_bio(dmz, zone, bio, chunk_block, nr_blocks); + ret = dmz_submit_bio(dmz, zone, bio, chunk_block, nr_blocks); + if (ret) + return ret; /* * Validate the blocks in the data zone and invalidate @@ -294,14 +279,16 @@ static int dmz_handle_buffered_write(struct dmz_target *dmz, /* Get the buffer zone. One will be allocated if needed */ bzone = dmz_get_chunk_buffer(zmd, zone); - if (!bzone) - return -ENOSPC; + if (IS_ERR(bzone)) + return PTR_ERR(bzone); if (dmz_is_readonly(bzone)) return -EROFS; /* Submit write */ - dmz_submit_write_bio(dmz, bzone, bio, chunk_block, nr_blocks); + ret = dmz_submit_bio(dmz, bzone, bio, chunk_block, nr_blocks); + if (ret) + return ret; /* * Validate the blocks in the buffer zone @@ -404,6 +391,11 @@ static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw, dmz_lock_metadata(zmd); + if (dmz->dev->flags & DMZ_BDEV_DYING) { + ret = -EIO; + goto out; + } + /* * Get the data zone mapping the chunk. There may be no * mapping for read and discard. If a mapping is obtained, @@ -508,6 +500,8 @@ static void dmz_flush_work(struct work_struct *work) /* Flush dirty metadata blocks */ ret = dmz_flush_metadata(dmz->metadata); + if (ret) + dmz_dev_debug(dmz->dev, "Metadata flush failed, rc=%d\n", ret); /* Process queued flush requests */ while (1) { @@ -528,22 +522,24 @@ static void dmz_flush_work(struct work_struct *work) * Get a chunk work and start it to process a new BIO. * If the BIO chunk has no work yet, create one. */ -static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) +static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) { unsigned int chunk = dmz_bio_chunk(dmz->dev, bio); struct dm_chunk_work *cw; + int ret = 0; mutex_lock(&dmz->chunk_lock); /* Get the BIO chunk work. If one is not active yet, create one */ cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk); if (!cw) { - int ret; /* Create a new chunk work */ cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO); - if (!cw) + if (unlikely(!cw)) { + ret = -ENOMEM; goto out; + } INIT_WORK(&cw->work, dmz_chunk_work); atomic_set(&cw->refcount, 0); @@ -554,7 +550,6 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw); if (unlikely(ret)) { kfree(cw); - cw = NULL; goto out; } } @@ -562,10 +557,58 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) bio_list_add(&cw->bio_list, bio); dmz_get_chunk_work(cw); + dmz_reclaim_bio_acc(dmz->reclaim); if (queue_work(dmz->chunk_wq, &cw->work)) dmz_get_chunk_work(cw); out: mutex_unlock(&dmz->chunk_lock); + return ret; +} + +/* + * Check if the backing device is being removed. If it's on the way out, + * start failing I/O. Reclaim and metadata components also call this + * function to cleanly abort operation in the event of such failure. + */ +bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev) +{ + if (dmz_dev->flags & DMZ_BDEV_DYING) + return true; + + if (dmz_dev->flags & DMZ_CHECK_BDEV) + return !dmz_check_bdev(dmz_dev); + + if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) { + dmz_dev_warn(dmz_dev, "Backing device queue dying"); + dmz_dev->flags |= DMZ_BDEV_DYING; + } + + return dmz_dev->flags & DMZ_BDEV_DYING; +} + +/* + * Check the backing device availability. This detects such events as + * backing device going offline due to errors, media removals, etc. + * This check is less efficient than dmz_bdev_is_dying() and should + * only be performed as a part of error handling. + */ +bool dmz_check_bdev(struct dmz_dev *dmz_dev) +{ + struct gendisk *disk; + + dmz_dev->flags &= ~DMZ_CHECK_BDEV; + + if (dmz_bdev_is_dying(dmz_dev)) + return false; + + disk = dmz_dev->bdev->bd_disk; + if (disk->fops->check_events && + disk->fops->check_events(disk, 0) & DISK_EVENT_MEDIA_CHANGE) { + dmz_dev_warn(dmz_dev, "Backing device offline"); + dmz_dev->flags |= DMZ_BDEV_DYING; + } + + return !(dmz_dev->flags & DMZ_BDEV_DYING); } /* @@ -579,6 +622,10 @@ static int dmz_map(struct dm_target *ti, struct bio *bio) sector_t sector = bio->bi_iter.bi_sector; unsigned int nr_sectors = bio_sectors(bio); sector_t chunk_sector; + int ret; + + if (dmz_bdev_is_dying(dmz->dev)) + return DM_MAPIO_KILL; dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks", bio_op(bio), (unsigned long long)sector, nr_sectors, @@ -600,7 +647,6 @@ static int dmz_map(struct dm_target *ti, struct bio *bio) bioctx->zone = NULL; bioctx->bio = bio; atomic_set(&bioctx->ref, 1); - bioctx->status = BLK_STS_OK; /* Set the BIO pending in the flush list */ if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) { @@ -617,39 +663,16 @@ static int dmz_map(struct dm_target *ti, struct bio *bio) dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector); /* Now ready to handle this BIO */ - dmz_reclaim_bio_acc(dmz->reclaim); - dmz_queue_chunk_work(dmz, bio); - - return DM_MAPIO_SUBMITTED; -} - -/* - * Completed target BIO processing. - */ -static int dmz_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error) -{ - struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); - - if (bioctx->status == BLK_STS_OK && *error) - bioctx->status = *error; - - if (!atomic_dec_and_test(&bioctx->ref)) - return DM_ENDIO_INCOMPLETE; - - /* Done */ - bio->bi_status = bioctx->status; - - if (bioctx->zone) { - struct dm_zone *zone = bioctx->zone; - - if (*error && bio_op(bio) == REQ_OP_WRITE) { - if (dmz_is_seq(zone)) - set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags); - } - dmz_deactivate_zone(zone); + ret = dmz_queue_chunk_work(dmz, bio); + if (ret) { + dmz_dev_debug(dmz->dev, + "BIO op %d, can't process chunk %llu, err %i\n", + bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio), + ret); + return DM_MAPIO_REQUEUE; } - return DM_ENDIO_DONE; + return DM_MAPIO_SUBMITTED; } /* @@ -688,7 +711,8 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path) q = bdev_get_queue(dev->bdev); dev->capacity = i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT; - aligned_capacity = dev->capacity & ~(blk_queue_zone_sectors(q) - 1); + aligned_capacity = dev->capacity & + ~((sector_t)blk_queue_zone_sectors(q) - 1); if (ti->begin || ((ti->len != dev->capacity) && (ti->len != aligned_capacity))) { ti->error = "Partial mapping not supported"; @@ -766,7 +790,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv) } /* Set target (no write same support) */ - ti->max_io_len = dev->zone_nr_sectors << 9; + ti->max_io_len = dev->zone_nr_sectors; ti->num_flush_bios = 1; ti->num_discard_bios = 1; ti->num_write_zeroes_bios = 1; @@ -901,6 +925,9 @@ static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev) { struct dmz_target *dmz = ti->private; + if (!dmz_check_bdev(dmz->dev)) + return -EIO; + *bdev = dmz->dev->bdev; return 0; @@ -947,7 +974,6 @@ static struct target_type dmz_type = { .ctr = dmz_ctr, .dtr = dmz_dtr, .map = dmz_map, - .end_io = dmz_end_io, .io_hints = dmz_io_hints, .prepare_ioctl = dmz_prepare_ioctl, .postsuspend = dmz_suspend, diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h index 12419f0bfe78ba14fd8e0c29249f26a22a99095b..2662746ba8b9dadf29e6b1e8dcdafbe84f42b1d8 100644 --- a/drivers/md/dm-zoned.h +++ b/drivers/md/dm-zoned.h @@ -56,6 +56,8 @@ struct dmz_dev { unsigned int nr_zones; + unsigned int flags; + sector_t zone_nr_sectors; unsigned int zone_nr_sectors_shift; @@ -67,6 +69,10 @@ struct dmz_dev { (dev)->zone_nr_sectors_shift) #define dmz_chunk_block(dev, b) ((b) & ((dev)->zone_nr_blocks - 1)) +/* Device flags. */ +#define DMZ_BDEV_DYING (1 << 0) +#define DMZ_CHECK_BDEV (2 << 0) + /* * Zone descriptor. */ @@ -115,7 +121,6 @@ enum { DMZ_BUF, /* Zone internal state */ - DMZ_ACTIVE, DMZ_RECLAIM, DMZ_SEQ_WRITE_ERR, }; @@ -128,7 +133,6 @@ enum { #define dmz_is_empty(z) ((z)->wp_block == 0) #define dmz_is_offline(z) test_bit(DMZ_OFFLINE, &(z)->flags) #define dmz_is_readonly(z) test_bit(DMZ_READ_ONLY, &(z)->flags) -#define dmz_is_active(z) test_bit(DMZ_ACTIVE, &(z)->flags) #define dmz_in_reclaim(z) test_bit(DMZ_RECLAIM, &(z)->flags) #define dmz_seq_write_err(z) test_bit(DMZ_SEQ_WRITE_ERR, &(z)->flags) @@ -188,8 +192,30 @@ void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone); unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd); unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd); -void dmz_activate_zone(struct dm_zone *zone); -void dmz_deactivate_zone(struct dm_zone *zone); +/* + * Activate a zone (increment its reference count). + */ +static inline void dmz_activate_zone(struct dm_zone *zone) +{ + atomic_inc(&zone->refcount); +} + +/* + * Deactivate a zone. This decrement the zone reference counter + * indicating that all BIOs to the zone have completed when the count is 0. + */ +static inline void dmz_deactivate_zone(struct dm_zone *zone) +{ + atomic_dec(&zone->refcount); +} + +/* + * Test if a zone is active, that is, has a refcount > 0. + */ +static inline bool dmz_is_active(struct dm_zone *zone) +{ + return atomic_read(&zone->refcount); +} int dmz_lock_zone_reclaim(struct dm_zone *zone); void dmz_unlock_zone_reclaim(struct dm_zone *zone); @@ -225,4 +251,10 @@ void dmz_resume_reclaim(struct dmz_reclaim *zrc); void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc); void dmz_schedule_reclaim(struct dmz_reclaim *zrc); +/* + * Functions defined in dm-zoned-target.c + */ +bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev); +bool dmz_check_bdev(struct dmz_dev *dmz_dev); + #endif /* DM_ZONED_H */ diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 45abb54037fc6427106f383bc1a3ba22bc8d3152..469aefecfe0ba9a1ce4a743e1aaf9d79da7973d7 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -140,10 +141,21 @@ EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr); #define DMF_NOFLUSH_SUSPENDING 5 #define DMF_DEFERRED_REMOVE 6 #define DMF_SUSPENDED_INTERNALLY 7 +#define DMF_POST_SUSPENDING 8 #define DM_NUMA_NODE NUMA_NO_NODE static int dm_numa_node = DM_NUMA_NODE; +#define DEFAULT_SWAP_BIOS (8 * 1048576 / PAGE_SIZE) +static int swap_bios = DEFAULT_SWAP_BIOS; +static int get_swap_bios(void) +{ + int latch = READ_ONCE(swap_bios); + if (unlikely(latch <= 0)) + latch = DEFAULT_SWAP_BIOS; + return latch; +} + /* * For mempools pre-allocation at the table loading time. */ @@ -609,8 +621,8 @@ static void start_io_acct(struct dm_io *io) io->start_time = jiffies; - generic_start_io_acct(md->queue, bio_op(bio), bio_sectors(bio), - &dm_disk(md)->part0); + generic_start_precise_io_acct(md->queue, bio_op(bio), + &dm_disk(md)->part0); atomic_set(&dm_disk(md)->part0.in_flight[rw], atomic_inc_return(&md->pending[rw])); @@ -621,21 +633,20 @@ static void start_io_acct(struct dm_io *io) false, 0, &io->stats_aux); } -static void end_io_acct(struct dm_io *io) +static void end_io_acct(struct mapped_device *md, struct bio *bio, + unsigned long start_time, struct dm_stats_aux *stats_aux) { - struct mapped_device *md = io->md; - struct bio *bio = io->orig_bio; - unsigned long duration = jiffies - io->start_time; + unsigned long duration = jiffies - start_time; int pending; int rw = bio_data_dir(bio); - generic_end_io_acct(md->queue, bio_op(bio), &dm_disk(md)->part0, - io->start_time); + generic_end_precise_io_acct(md->queue, bio_op(bio), &dm_disk(md)->part0, + start_time, bio_sectors(bio)); if (unlikely(dm_stats_used(&md->stats))) dm_stats_account_io(&md->stats, bio_data_dir(bio), bio->bi_iter.bi_sector, bio_sectors(bio), - true, duration, &io->stats_aux); + true, duration, stats_aux); /* * After this is decremented the bio must not be touched if it is @@ -862,6 +873,8 @@ static void dec_pending(struct dm_io *io, blk_status_t error) blk_status_t io_error; struct bio *bio; struct mapped_device *md = io->md; + unsigned long start_time = 0; + struct dm_stats_aux stats_aux; /* Push-back supersedes any I/O errors */ if (unlikely(error)) { @@ -888,8 +901,10 @@ static void dec_pending(struct dm_io *io, blk_status_t error) io_error = io->status; bio = io->orig_bio; - end_io_acct(io); + start_time = io->start_time; + stats_aux = io->stats_aux; free_io(md, io); + end_io_acct(md, bio, start_time, &stats_aux); if (io_error == BLK_STS_DM_REQUEUE) return; @@ -910,6 +925,15 @@ static void dec_pending(struct dm_io *io, blk_status_t error) } } +void disable_discard(struct mapped_device *md) +{ + struct queue_limits *limits = dm_get_queue_limits(md); + + /* device doesn't really support DISCARD, disable it */ + limits->max_discard_sectors = 0; + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue); +} + void disable_write_same(struct mapped_device *md) { struct queue_limits *limits = dm_get_queue_limits(md); @@ -926,6 +950,11 @@ void disable_write_zeroes(struct mapped_device *md) limits->max_write_zeroes_sectors = 0; } +static bool swap_bios_limit(struct dm_target *ti, struct bio *bio) +{ + return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios); +} + static void clone_endio(struct bio *bio) { blk_status_t error = bio->bi_status; @@ -935,11 +964,14 @@ static void clone_endio(struct bio *bio) dm_endio_fn endio = tio->ti->type->end_io; if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) { - if (bio_op(bio) == REQ_OP_WRITE_SAME && - !bio->bi_disk->queue->limits.max_write_same_sectors) + if (bio_op(bio) == REQ_OP_DISCARD && + !bio->bi_disk->queue->limits.max_discard_sectors) + disable_discard(md); + else if (bio_op(bio) == REQ_OP_WRITE_SAME && + !bio->bi_disk->queue->limits.max_write_same_sectors) disable_write_same(md); - if (bio_op(bio) == REQ_OP_WRITE_ZEROES && - !bio->bi_disk->queue->limits.max_write_zeroes_sectors) + else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && + !bio->bi_disk->queue->limits.max_write_zeroes_sectors) disable_write_zeroes(md); } @@ -960,6 +992,11 @@ static void clone_endio(struct bio *bio) } } + if (unlikely(swap_bios_limit(tio->ti, bio))) { + struct mapped_device *md = io->md; + up(&md->swap_bios_semaphore); + } + free_tio(tio); dec_pending(io, error); } @@ -1007,15 +1044,7 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) return -EINVAL; } - /* - * BIO based queue uses its own splitting. When multipage bvecs - * is switched on, size of the incoming bio may be too big to - * be handled in some targets, such as crypt. - * - * When these targets are ready for the big bio, we can remove - * the limit. - */ - ti->max_io_len = min_t(uint32_t, len, BIO_MAX_PAGES * PAGE_SIZE); + ti->max_io_len = (uint32_t) len; return 0; } @@ -1246,6 +1275,22 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start) } EXPORT_SYMBOL_GPL(dm_remap_zone_report); +static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch) +{ + mutex_lock(&md->swap_bios_lock); + while (latch < md->swap_bios) { + cond_resched(); + down(&md->swap_bios_semaphore); + md->swap_bios--; + } + while (latch > md->swap_bios) { + cond_resched(); + up(&md->swap_bios_semaphore); + md->swap_bios++; + } + mutex_unlock(&md->swap_bios_lock); +} + static blk_qc_t __map_bio(struct dm_target_io *tio) { int r; @@ -1266,6 +1311,14 @@ static blk_qc_t __map_bio(struct dm_target_io *tio) atomic_inc(&io->io_count); sector = clone->bi_iter.bi_sector; + if (unlikely(swap_bios_limit(ti, clone))) { + struct mapped_device *md = io->md; + int latch = get_swap_bios(); + if (unlikely(latch != md->swap_bios)) + __set_swap_bios_limit(md, latch); + down(&md->swap_bios_semaphore); + } + r = ti->type->map(ti, clone); switch (r) { case DM_MAPIO_SUBMITTED: @@ -1280,10 +1333,18 @@ static blk_qc_t __map_bio(struct dm_target_io *tio) ret = generic_make_request(clone); break; case DM_MAPIO_KILL: + if (unlikely(swap_bios_limit(ti, clone))) { + struct mapped_device *md = io->md; + up(&md->swap_bios_semaphore); + } free_tio(tio); dec_pending(io, BLK_STS_IOERR); break; case DM_MAPIO_REQUEUE: + if (unlikely(swap_bios_limit(ti, clone))) { + struct mapped_device *md = io->md; + up(&md->swap_bios_semaphore); + } free_tio(tio); dec_pending(io, BLK_STS_DM_REQUEUE); break; @@ -1592,6 +1653,8 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md, return ret; } + blk_queue_split(md->queue, &bio); + init_clone_info(&ci, md, map, bio); if (bio->bi_opf & REQ_PREFLUSH) { @@ -1623,6 +1686,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md, struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count, GFP_NOIO, &md->queue->bio_split); ci.io->orig_bio = b; + bio_chain(b, bio); ret = generic_make_request(bio); break; @@ -1685,10 +1749,16 @@ static blk_qc_t __process_bio(struct mapped_device *md, return ret; } -typedef blk_qc_t (process_bio_fn)(struct mapped_device *, struct dm_table *, struct bio *); +static blk_qc_t dm_process_bio(struct mapped_device *md, + struct dm_table *map, struct bio *bio) +{ + if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED) + return __process_bio(md, map, bio); + else + return __split_and_process_bio(md, map, bio); +} -static blk_qc_t __dm_make_request(struct request_queue *q, struct bio *bio, - process_bio_fn process_bio) +static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) { struct mapped_device *md = q->queuedata; blk_qc_t ret = BLK_QC_T_NONE; @@ -1697,8 +1767,9 @@ static blk_qc_t __dm_make_request(struct request_queue *q, struct bio *bio, map = dm_get_live_table(md, &srcu_idx); - /* if we're suspended, we have to queue this io for later */ - if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { + /* If suspended, or map not yet available, queue this IO for later */ + if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) || + unlikely(!map)) { dm_put_live_table(md, srcu_idx); if (!(bio->bi_opf & REQ_RAHEAD)) @@ -1708,26 +1779,12 @@ static blk_qc_t __dm_make_request(struct request_queue *q, struct bio *bio, return ret; } - ret = process_bio(md, map, bio); + ret = dm_process_bio(md, map, bio); dm_put_live_table(md, srcu_idx); return ret; } -/* - * The request function that remaps the bio to one target and - * splits off any remainder. - */ -static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) -{ - return __dm_make_request(q, bio, __split_and_process_bio); -} - -static blk_qc_t dm_make_request_nvme(struct request_queue *q, struct bio *bio) -{ - return __dm_make_request(q, bio, __process_bio); -} - static int dm_any_congested(void *congested_data, int bdi_bits) { int r = bdi_bits; @@ -1813,6 +1870,7 @@ static void dm_init_normal_md_queue(struct mapped_device *md) /* * Initialize aspects of queue that aren't relevant for blk-mq */ + md->queue->backing_dev_info->congested_data = md; md->queue->backing_dev_info->congested_fn = dm_any_congested; } @@ -1852,6 +1910,7 @@ static void cleanup_mapped_device(struct mapped_device *md) mutex_destroy(&md->suspend_lock); mutex_destroy(&md->type_lock); mutex_destroy(&md->table_devices_lock); + mutex_destroy(&md->swap_bios_lock); dm_mq_cleanup_mapped_device(md); } @@ -1907,7 +1966,12 @@ static struct mapped_device *alloc_dev(int minor) if (!md->queue) goto bad; md->queue->queuedata = md; - md->queue->backing_dev_info->congested_data = md; + /* + * default to bio-based required ->make_request_fn until DM + * table is loaded and md->type established. If request-based + * table is loaded: blk-mq will override accordingly. + */ + blk_queue_make_request(md->queue, dm_make_request); md->disk = alloc_disk_node(1, md->numa_node_id); if (!md->disk) @@ -1921,6 +1985,10 @@ static struct mapped_device *alloc_dev(int minor) init_completion(&md->kobj_holder.completion); md->kworker_task = NULL; + md->swap_bios = get_swap_bios(); + sema_init(&md->swap_bios_semaphore, md->swap_bios); + mutex_init(&md->swap_bios_lock); + md->disk->major = _major; md->disk->first_minor = minor; md->disk->fops = &dm_blk_dops; @@ -1928,6 +1996,8 @@ static struct mapped_device *alloc_dev(int minor) md->disk->private_data = md; sprintf(md->disk->disk_name, "dm-%d", minor); + add_disk_no_queue_reg(md->disk); + if (IS_ENABLED(CONFIG_DAX_DRIVER)) { dax_dev = alloc_dax(md, md->disk->disk_name, &dm_dax_ops); if (!dax_dev) @@ -1935,7 +2005,6 @@ static struct mapped_device *alloc_dev(int minor) } md->dax_dev = dax_dev; - add_disk_no_queue_reg(md->disk); format_dev_t(md->name, MKDEV(_major, minor)); md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); @@ -1950,7 +2019,9 @@ static struct mapped_device *alloc_dev(int minor) bio_set_dev(&md->flush_bio, md->bdev); md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; - dm_stats_init(&md->stats); + r = dm_stats_init(&md->stats); + if (r < 0) + goto bad; /* Populate the mapping, nobody knows we exist yet */ spin_lock(&_minor_lock); @@ -2235,12 +2306,8 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) break; case DM_TYPE_BIO_BASED: case DM_TYPE_DAX_BIO_BASED: - dm_init_normal_md_queue(md); - blk_queue_make_request(md->queue, dm_make_request); - break; case DM_TYPE_NVME_BIO_BASED: dm_init_normal_md_queue(md); - blk_queue_make_request(md->queue, dm_make_request_nvme); break; case DM_TYPE_NONE: WARN_ON_ONCE(true); @@ -2334,6 +2401,19 @@ static void __dm_destroy(struct mapped_device *md, bool wait) if (dm_request_based(md) && md->kworker_task) kthread_flush_worker(&md->kworker); + /* + * Rare, but there may be I/O requests still going to complete, + * for example. Wait for all references to disappear. + * No one should increment the reference count of the mapped_device, + * after the mapped_device state becomes DMF_FREEING. + */ + if (wait) + while (atomic_read(&md->holders)) + msleep(1); + else if (atomic_read(&md->holders)) + DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", + dm_device_name(md), atomic_read(&md->holders)); + /* * Take suspend_lock so that presuspend and postsuspend methods * do not race with internal suspend. @@ -2342,25 +2422,14 @@ static void __dm_destroy(struct mapped_device *md, bool wait) map = dm_get_live_table(md, &srcu_idx); if (!dm_suspended_md(md)) { dm_table_presuspend_targets(map); + set_bit(DMF_SUSPENDED, &md->flags); + set_bit(DMF_POST_SUSPENDING, &md->flags); dm_table_postsuspend_targets(map); } /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ dm_put_live_table(md, srcu_idx); mutex_unlock(&md->suspend_lock); - /* - * Rare, but there may be I/O requests still going to complete, - * for example. Wait for all references to disappear. - * No one should increment the reference count of the mapped_device, - * after the mapped_device state becomes DMF_FREEING. - */ - if (wait) - while (atomic_read(&md->holders)) - msleep(1); - else if (atomic_read(&md->holders)) - DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", - dm_device_name(md), atomic_read(&md->holders)); - dm_sysfs_exit(md); dm_table_destroy(__unbind(md)); free_dev(md); @@ -2402,6 +2471,8 @@ static int dm_wait_for_completion(struct mapped_device *md, long task_state) } finish_wait(&md->wait, &wait); + smp_rmb(); /* paired with atomic_dec_return in end_io_acct */ + return r; } @@ -2427,9 +2498,9 @@ static void dm_wq_work(struct work_struct *work) break; if (dm_request_based(md)) - generic_make_request(c); + (void) generic_make_request(c); else - __split_and_process_bio(md, map, c); + (void) dm_process_bio(md, map, c); } dm_put_live_table(md, srcu_idx); @@ -2662,12 +2733,18 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) } map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); + if (!map) { + /* avoid deadlock with fs/namespace.c:do_mount() */ + suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; + } r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); if (r) goto out_unlock; + set_bit(DMF_POST_SUSPENDING, &md->flags); dm_table_postsuspend_targets(map); + clear_bit(DMF_POST_SUSPENDING, &md->flags); out_unlock: mutex_unlock(&md->suspend_lock); @@ -2764,11 +2841,16 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, DMF_SUSPENDED_INTERNALLY); + set_bit(DMF_POST_SUSPENDING, &md->flags); dm_table_postsuspend_targets(map); + clear_bit(DMF_POST_SUSPENDING, &md->flags); } static void __dm_internal_resume(struct mapped_device *md) { + int r; + struct dm_table *map; + BUG_ON(!md->internal_suspend_count); if (--md->internal_suspend_count) @@ -2777,12 +2859,23 @@ static void __dm_internal_resume(struct mapped_device *md) if (dm_suspended_md(md)) goto done; /* resume from nested suspend */ - /* - * NOTE: existing callers don't need to call dm_table_resume_targets - * (which may fail -- so best to avoid it for now by passing NULL map) - */ - (void) __dm_resume(md, NULL); - + map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); + r = __dm_resume(md, map); + if (r) { + /* + * If a preresume method of some target failed, we are in a + * tricky situation. We can't return an error to the caller. We + * can't fake success because then the "resume" and + * "postsuspend" methods would not be paired correctly, and it + * would break various targets, for example it would cause list + * corruption in the "origin" target. + * + * So, we fake normal suspend here, to make sure that the + * "resume" and "postsuspend" methods will be paired correctly. + */ + DMERR("Preresume method failed: %d", r); + set_bit(DMF_SUSPENDED, &md->flags); + } done: clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); smp_mb__after_atomic(); @@ -2841,17 +2934,25 @@ EXPORT_SYMBOL_GPL(dm_internal_resume_fast); int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, unsigned cookie) { + int r; + unsigned noio_flag; char udev_cookie[DM_COOKIE_LENGTH]; char *envp[] = { udev_cookie, NULL }; + noio_flag = memalloc_noio_save(); + if (!cookie) - return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); + r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action); else { snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", DM_COOKIE_ENV_VAR_NAME, cookie); - return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, - action, envp); + r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, + action, envp); } + + memalloc_noio_restore(noio_flag); + + return r; } uint32_t dm_next_uevent_seq(struct mapped_device *md) @@ -2917,6 +3018,11 @@ int dm_suspended_md(struct mapped_device *md) return test_bit(DMF_SUSPENDED, &md->flags); } +static int dm_post_suspending_md(struct mapped_device *md) +{ + return test_bit(DMF_POST_SUSPENDING, &md->flags); +} + int dm_suspended_internally_md(struct mapped_device *md) { return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); @@ -2933,6 +3039,12 @@ int dm_suspended(struct dm_target *ti) } EXPORT_SYMBOL_GPL(dm_suspended); +int dm_post_suspending(struct dm_target *ti) +{ + return dm_post_suspending_md(dm_table_get_md(ti->table)); +} +EXPORT_SYMBOL_GPL(dm_post_suspending); + int dm_noflush_suspending(struct dm_target *ti) { return __noflush_suspending(dm_table_get_md(ti->table)); @@ -3024,6 +3136,11 @@ static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, goto out; ti = dm_table_get_target(table, 0); + if (dm_suspended_md(md)) { + ret = -EAGAIN; + goto out; + } + ret = -EINVAL; if (!ti->type->iterate_devices) goto out; @@ -3192,6 +3309,9 @@ MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); +module_param(swap_bios, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs"); + MODULE_DESCRIPTION(DM_NAME " driver"); MODULE_AUTHOR("Joe Thornber "); MODULE_LICENSE("GPL"); diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index 2fc8c113977fbec7877220e034dd34561399741b..f05417673bf2ab0f92cac2e30cfc8dbf70fab78e 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -1635,7 +1635,7 @@ void md_bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force) s += blocks; } bitmap->last_end_sync = jiffies; - sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed"); + sysfs_notify_dirent_safe(bitmap->mddev->sysfs_completed); } EXPORT_SYMBOL(md_bitmap_cond_end_sync); @@ -1725,6 +1725,8 @@ void md_bitmap_flush(struct mddev *mddev) md_bitmap_daemon_work(mddev); bitmap->daemon_lastrun -= sleep; md_bitmap_daemon_work(mddev); + if (mddev->bitmap_info.external) + md_super_wait(mddev); md_bitmap_update_sb(bitmap); } @@ -1947,6 +1949,7 @@ int md_bitmap_load(struct mddev *mddev) } EXPORT_SYMBOL_GPL(md_bitmap_load); +/* caller need to free returned bitmap with md_bitmap_free() */ struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot) { int rv = 0; @@ -2010,6 +2013,7 @@ int md_bitmap_copy_from_slot(struct mddev *mddev, int slot, md_bitmap_unplug(mddev->bitmap); *low = lo; *high = hi; + md_bitmap_free(bitmap); return rv; } @@ -2132,6 +2136,7 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks, memcpy(page_address(store.sb_page), page_address(bitmap->storage.sb_page), sizeof(bitmap_super_t)); + spin_lock_irq(&bitmap->counts.lock); md_bitmap_file_unmap(&bitmap->storage); bitmap->storage = store; @@ -2147,7 +2152,6 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks, blocks = min(old_counts.chunks << old_counts.chunkshift, chunks << chunkshift); - spin_lock_irq(&bitmap->counts.lock); /* For cluster raid, need to pre-allocate bitmap */ if (mddev_is_clustered(bitmap->mddev)) { unsigned long page; @@ -2288,9 +2292,9 @@ location_store(struct mddev *mddev, const char *buf, size_t len) goto out; } if (mddev->pers) { - mddev->pers->quiesce(mddev, 1); + mddev_suspend(mddev); md_bitmap_destroy(mddev); - mddev->pers->quiesce(mddev, 0); + mddev_resume(mddev); } mddev->bitmap_info.offset = 0; if (mddev->bitmap_info.file) { @@ -2327,8 +2331,8 @@ location_store(struct mddev *mddev, const char *buf, size_t len) mddev->bitmap_info.offset = offset; if (mddev->pers) { struct bitmap *bitmap; - mddev->pers->quiesce(mddev, 1); bitmap = md_bitmap_create(mddev, -1); + mddev_suspend(mddev); if (IS_ERR(bitmap)) rv = PTR_ERR(bitmap); else { @@ -2337,11 +2341,12 @@ location_store(struct mddev *mddev, const char *buf, size_t len) if (rv) mddev->bitmap_info.offset = 0; } - mddev->pers->quiesce(mddev, 0); if (rv) { md_bitmap_destroy(mddev); + mddev_resume(mddev); goto out; } + mddev_resume(mddev); } } } @@ -2598,4 +2603,3 @@ struct attribute_group md_bitmap_group = { .name = "bitmap", .attrs = md_bitmap_attrs, }; - diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 0b2af6e74fc375ed163824fa9cdaf84d1b9ffd95..9a640863cd8c1241051051ee2c3bda7954104ee8 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -669,9 +669,27 @@ static void recv_daemon(struct md_thread *thread) * Takes the lock on the TOKEN lock resource so no other * node can communicate while the operation is underway. */ -static int lock_token(struct md_cluster_info *cinfo, bool mddev_locked) +static int lock_token(struct md_cluster_info *cinfo) { - int error, set_bit = 0; + int error; + + error = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX); + if (error) { + pr_err("md-cluster(%s:%d): failed to get EX on TOKEN (%d)\n", + __func__, __LINE__, error); + } else { + /* Lock the receive sequence */ + mutex_lock(&cinfo->recv_mutex); + } + return error; +} + +/* lock_comm() + * Sets the MD_CLUSTER_SEND_LOCK bit to lock the send channel. + */ +static int lock_comm(struct md_cluster_info *cinfo, bool mddev_locked) +{ + int rv, set_bit = 0; struct mddev *mddev = cinfo->mddev; /* @@ -682,34 +700,19 @@ static int lock_token(struct md_cluster_info *cinfo, bool mddev_locked) */ if (mddev_locked && !test_bit(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state)) { - error = test_and_set_bit_lock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, + rv = test_and_set_bit_lock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state); - WARN_ON_ONCE(error); + WARN_ON_ONCE(rv); md_wakeup_thread(mddev->thread); set_bit = 1; } - error = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX); - if (set_bit) - clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state); - if (error) - pr_err("md-cluster(%s:%d): failed to get EX on TOKEN (%d)\n", - __func__, __LINE__, error); - - /* Lock the receive sequence */ - mutex_lock(&cinfo->recv_mutex); - return error; -} - -/* lock_comm() - * Sets the MD_CLUSTER_SEND_LOCK bit to lock the send channel. - */ -static int lock_comm(struct md_cluster_info *cinfo, bool mddev_locked) -{ wait_event(cinfo->wait, !test_and_set_bit(MD_CLUSTER_SEND_LOCK, &cinfo->state)); - - return lock_token(cinfo, mddev_locked); + rv = lock_token(cinfo); + if (set_bit) + clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state); + return rv; } static void unlock_comm(struct md_cluster_info *cinfo) @@ -789,9 +792,11 @@ static int sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg, { int ret; - lock_comm(cinfo, mddev_locked); - ret = __sendmsg(cinfo, cmsg); - unlock_comm(cinfo); + ret = lock_comm(cinfo, mddev_locked); + if (!ret) { + ret = __sendmsg(cinfo, cmsg); + unlock_comm(cinfo); + } return ret; } @@ -1063,7 +1068,7 @@ static int metadata_update_start(struct mddev *mddev) return 0; } - ret = lock_token(cinfo, 1); + ret = lock_token(cinfo); clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state); return ret; } @@ -1181,7 +1186,10 @@ static void update_size(struct mddev *mddev, sector_t old_dev_sectors) int raid_slot = -1; md_update_sb(mddev, 1); - lock_comm(cinfo, 1); + if (lock_comm(cinfo, 1)) { + pr_err("%s: lock_comm failed\n", __func__); + return; + } memset(&cmsg, 0, sizeof(cmsg)); cmsg.type = cpu_to_le32(METADATA_UPDATED); @@ -1330,7 +1338,8 @@ static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev) cmsg.type = cpu_to_le32(NEWDISK); memcpy(cmsg.uuid, uuid, 16); cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); - lock_comm(cinfo, 1); + if (lock_comm(cinfo, 1)) + return -EAGAIN; ret = __sendmsg(cinfo, &cmsg); if (ret) { unlock_comm(cinfo); diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c index d45c697c0ebe7cede53a25618e30a4398390202f..9a60231b844be161299ec8f53150519c645c2b75 100644 --- a/drivers/md/md-linear.c +++ b/drivers/md/md-linear.c @@ -252,10 +252,9 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio) sector_t start_sector, end_sector, data_offset; sector_t bio_sector = bio->bi_iter.bi_sector; - if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { - md_flush_request(mddev, bio); + if (unlikely(bio->bi_opf & REQ_PREFLUSH) + && md_flush_request(mddev, bio)) return true; - } tmp_dev = which_dev(mddev, bio_sector); start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors; diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c index 881487de1e25af5e994c8770e255b74b3864f513..80c35bfd11b8dd83440fcec1ef4846423b3ef6ca 100644 --- a/drivers/md/md-multipath.c +++ b/drivers/md/md-multipath.c @@ -112,10 +112,9 @@ static bool multipath_make_request(struct mddev *mddev, struct bio * bio) struct multipath_bh * mp_bh; struct multipath_info *multipath; - if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { - md_flush_request(mddev, bio); + if (unlikely(bio->bi_opf & REQ_PREFLUSH) + && md_flush_request(mddev, bio)) return true; - } mp_bh = mempool_alloc(&conf->pool, GFP_NOIO); diff --git a/drivers/md/md.c b/drivers/md/md.c index 63ceabb4e020f656313fabff5e25e3ba593a8786..80f7b58c202ed4ab6c2ff5296067fce74add0efb 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -94,6 +94,7 @@ EXPORT_SYMBOL(md_cluster_mod); static DECLARE_WAIT_QUEUE_HEAD(resync_wait); static struct workqueue_struct *md_wq; static struct workqueue_struct *md_misc_wq; +static struct workqueue_struct *md_rdev_misc_wq; static int remove_and_add_spares(struct mddev *mddev, struct md_rdev *this); @@ -132,24 +133,6 @@ static inline int speed_max(struct mddev *mddev) mddev->sync_speed_max : sysctl_speed_limit_max; } -static void * flush_info_alloc(gfp_t gfp_flags, void *data) -{ - return kzalloc(sizeof(struct flush_info), gfp_flags); -} -static void flush_info_free(void *flush_info, void *data) -{ - kfree(flush_info); -} - -static void * flush_bio_alloc(gfp_t gfp_flags, void *data) -{ - return kzalloc(sizeof(struct flush_bio), gfp_flags); -} -static void flush_bio_free(void *flush_bio, void *data) -{ - kfree(flush_bio); -} - static struct ctl_table_header *raid_table_header; static struct ctl_table raid_table[] = { @@ -429,53 +412,32 @@ static int md_congested(void *data, int bits) /* * Generic flush handling for md */ -static void submit_flushes(struct work_struct *ws) -{ - struct flush_info *fi = container_of(ws, struct flush_info, flush_work); - struct mddev *mddev = fi->mddev; - struct bio *bio = fi->bio; - - bio->bi_opf &= ~REQ_PREFLUSH; - md_handle_request(mddev, bio); - mempool_free(fi, mddev->flush_pool); -} - -static void md_end_flush(struct bio *fbio) +static void md_end_flush(struct bio *bio) { - struct flush_bio *fb = fbio->bi_private; - struct md_rdev *rdev = fb->rdev; - struct flush_info *fi = fb->fi; - struct bio *bio = fi->bio; - struct mddev *mddev = fi->mddev; + struct md_rdev *rdev = bio->bi_private; + struct mddev *mddev = rdev->mddev; + + bio_put(bio); rdev_dec_pending(rdev, mddev); - if (atomic_dec_and_test(&fi->flush_pending)) { - if (bio->bi_iter.bi_size == 0) - /* an empty barrier - all done */ - bio_endio(bio); - else { - INIT_WORK(&fi->flush_work, submit_flushes); - queue_work(md_wq, &fi->flush_work); - } + if (atomic_dec_and_test(&mddev->flush_pending)) { + /* The pre-request flush has finished */ + queue_work(md_wq, &mddev->flush_work); } - - mempool_free(fb, mddev->flush_bio_pool); - bio_put(fbio); } -void md_flush_request(struct mddev *mddev, struct bio *bio) +static void md_submit_flush_data(struct work_struct *ws); + +static void submit_flushes(struct work_struct *ws) { + struct mddev *mddev = container_of(ws, struct mddev, flush_work); struct md_rdev *rdev; - struct flush_info *fi; - - fi = mempool_alloc(mddev->flush_pool, GFP_NOIO); - - fi->bio = bio; - fi->mddev = mddev; - atomic_set(&fi->flush_pending, 1); + mddev->start_flush = ktime_get_boottime(); + INIT_WORK(&mddev->flush_work, md_submit_flush_data); + atomic_set(&mddev->flush_pending, 1); rcu_read_lock(); rdev_for_each_rcu(rdev, mddev) if (rdev->raid_disk >= 0 && @@ -485,38 +447,85 @@ void md_flush_request(struct mddev *mddev, struct bio *bio) * we reclaim rcu_read_lock */ struct bio *bi; - struct flush_bio *fb; atomic_inc(&rdev->nr_pending); atomic_inc(&rdev->nr_pending); rcu_read_unlock(); - - fb = mempool_alloc(mddev->flush_bio_pool, GFP_NOIO); - fb->fi = fi; - fb->rdev = rdev; - bi = bio_alloc_mddev(GFP_NOIO, 0, mddev); - bio_set_dev(bi, rdev->bdev); bi->bi_end_io = md_end_flush; - bi->bi_private = fb; + bi->bi_private = rdev; + bio_set_dev(bi, rdev->bdev); bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; - - atomic_inc(&fi->flush_pending); + atomic_inc(&mddev->flush_pending); submit_bio(bi); - rcu_read_lock(); rdev_dec_pending(rdev, mddev); } rcu_read_unlock(); + if (atomic_dec_and_test(&mddev->flush_pending)) + queue_work(md_wq, &mddev->flush_work); +} + +static void md_submit_flush_data(struct work_struct *ws) +{ + struct mddev *mddev = container_of(ws, struct mddev, flush_work); + struct bio *bio = mddev->flush_bio; + + /* + * must reset flush_bio before calling into md_handle_request to avoid a + * deadlock, because other bios passed md_handle_request suspend check + * could wait for this and below md_handle_request could wait for those + * bios because of suspend check + */ + spin_lock_irq(&mddev->lock); + mddev->last_flush = mddev->start_flush; + mddev->flush_bio = NULL; + spin_unlock_irq(&mddev->lock); + wake_up(&mddev->sb_wait); + + if (bio->bi_iter.bi_size == 0) { + /* an empty barrier - all done */ + bio_endio(bio); + } else { + bio->bi_opf &= ~REQ_PREFLUSH; + md_handle_request(mddev, bio); + } +} - if (atomic_dec_and_test(&fi->flush_pending)) { +/* + * Manages consolidation of flushes and submitting any flushes needed for + * a bio with REQ_PREFLUSH. Returns true if the bio is finished or is + * being finished in another context. Returns false if the flushing is + * complete but still needs the I/O portion of the bio to be processed. + */ +bool md_flush_request(struct mddev *mddev, struct bio *bio) +{ + ktime_t start = ktime_get_boottime(); + spin_lock_irq(&mddev->lock); + wait_event_lock_irq(mddev->sb_wait, + !mddev->flush_bio || + ktime_after(mddev->last_flush, start), + mddev->lock); + if (!ktime_after(mddev->last_flush, start)) { + WARN_ON(mddev->flush_bio); + mddev->flush_bio = bio; + bio = NULL; + } + spin_unlock_irq(&mddev->lock); + + if (!bio) { + INIT_WORK(&mddev->flush_work, submit_flushes); + queue_work(md_wq, &mddev->flush_work); + } else { + /* flush was performed for some other bio while we waited. */ if (bio->bi_iter.bi_size == 0) /* an empty barrier - all done */ bio_endio(bio); else { - INIT_WORK(&fi->flush_work, submit_flushes); - queue_work(md_wq, &fi->flush_work); + bio->bi_opf &= ~REQ_PREFLUSH; + return false; } } + return true; } EXPORT_SYMBOL(md_flush_request); @@ -557,13 +566,16 @@ void mddev_init(struct mddev *mddev) mutex_init(&mddev->open_mutex); mutex_init(&mddev->reconfig_mutex); mutex_init(&mddev->bitmap_info.mutex); + mutex_init(&mddev->sync_mutex); INIT_LIST_HEAD(&mddev->disks); INIT_LIST_HEAD(&mddev->all_mddevs); timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0); atomic_set(&mddev->active, 1); atomic_set(&mddev->openers, 0); atomic_set(&mddev->active_io, 0); + atomic_set(&mddev->sync_seq, 0); spin_lock_init(&mddev->lock); + atomic_set(&mddev->flush_pending, 0); init_waitqueue_head(&mddev->sb_wait); init_waitqueue_head(&mddev->recovery_wait); mddev->reshape_position = MaxSector; @@ -575,7 +587,34 @@ void mddev_init(struct mddev *mddev) } EXPORT_SYMBOL_GPL(mddev_init); +static struct mddev *mddev_find_locked(dev_t unit) +{ + struct mddev *mddev; + + list_for_each_entry(mddev, &all_mddevs, all_mddevs) + if (mddev->unit == unit) + return mddev; + + return NULL; +} + static struct mddev *mddev_find(dev_t unit) +{ + struct mddev *mddev; + + if (MAJOR(unit) != MD_MAJOR) + unit &= ~((1 << MdpMinorShift) - 1); + + spin_lock(&all_mddevs_lock); + mddev = mddev_find_locked(unit); + if (mddev) + mddev_get(mddev); + spin_unlock(&all_mddevs_lock); + + return mddev; +} + +static struct mddev *mddev_find_or_alloc(dev_t unit) { struct mddev *mddev, *new = NULL; @@ -586,13 +625,13 @@ static struct mddev *mddev_find(dev_t unit) spin_lock(&all_mddevs_lock); if (unit) { - list_for_each_entry(mddev, &all_mddevs, all_mddevs) - if (mddev->unit == unit) { - mddev_get(mddev); - spin_unlock(&all_mddevs_lock); - kfree(new); - return mddev; - } + mddev = mddev_find_locked(unit); + if (mddev) { + mddev_get(mddev); + spin_unlock(&all_mddevs_lock); + kfree(new); + return mddev; + } if (new) { list_add(&new->all_mddevs, &all_mddevs); @@ -618,12 +657,7 @@ static struct mddev *mddev_find(dev_t unit) return NULL; } - is_free = 1; - list_for_each_entry(mddev, &all_mddevs, all_mddevs) - if (mddev->unit == dev) { - is_free = 0; - break; - } + is_free = !mddev_find_locked(dev); } new->unit = dev; new->md_minor = MINOR(dev); @@ -679,7 +713,13 @@ void mddev_unlock(struct mddev *mddev) sysfs_remove_group(&mddev->kobj, &md_redundancy_group); if (mddev->sysfs_action) sysfs_put(mddev->sysfs_action); + if (mddev->sysfs_completed) + sysfs_put(mddev->sysfs_completed); + if (mddev->sysfs_degraded) + sysfs_put(mddev->sysfs_degraded); mddev->sysfs_action = NULL; + mddev->sysfs_completed = NULL; + mddev->sysfs_degraded = NULL; } } mddev->sysfs_active = 0; @@ -791,10 +831,12 @@ static void super_written(struct bio *bio) } else clear_bit(LastDev, &rdev->flags); + bio_put(bio); + + rdev_dec_pending(rdev, mddev); + if (atomic_dec_and_test(&mddev->pending_writes)) wake_up(&mddev->sb_wait); - rdev_dec_pending(rdev, mddev); - bio_put(bio); } void md_super_write(struct mddev *mddev, struct md_rdev *rdev, @@ -1001,6 +1043,7 @@ struct super_type { struct md_rdev *refdev, int minor_version); int (*validate_super)(struct mddev *mddev, + struct md_rdev *freshest, struct md_rdev *rdev); void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev); @@ -1036,6 +1079,7 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; mdp_super_t *sb; int ret; + bool spare_disk = true; /* * Calculate the position of the superblock (512byte sectors), @@ -1086,8 +1130,19 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor else rdev->desc_nr = sb->this_disk.number; + /* not spare disk, or LEVEL_MULTIPATH */ + if (sb->level == LEVEL_MULTIPATH || + (rdev->desc_nr >= 0 && + rdev->desc_nr < MD_SB_DISKS && + sb->disks[rdev->desc_nr].state & + ((1<sb_page); @@ -1103,7 +1158,8 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor } ev1 = md_event(sb); ev2 = md_event(refsb); - if (ev1 > ev2) + + if (!spare_disk && ev1 > ev2) ret = 1; else ret = 0; @@ -1127,8 +1183,9 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor /* * validate_super for 0.90.0 + * note: we are not using "freshest" for 0.9 superblock */ -static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) +static int super_90_validate(struct mddev *mddev, struct md_rdev *freshest, struct md_rdev *rdev) { mdp_disk_t *desc; mdp_super_t *sb = page_address(rdev->sb_page); @@ -1176,6 +1233,8 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) mddev->new_layout = mddev->layout; mddev->new_chunk_sectors = mddev->chunk_sectors; } + if (mddev->level == 0) + mddev->layout = -1; if (sb->state & (1<recovery_cp = MaxSector; @@ -1463,6 +1522,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_ sector_t sectors; char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; int bmask; + bool spare_disk = true; /* * Calculate the position of the superblock in 512byte sectors. @@ -1592,8 +1652,23 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_ rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset; } + if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT) && + sb->level != 0) + return -EINVAL; + + /* not spare disk, or LEVEL_MULTIPATH */ + if (sb->level == cpu_to_le32(LEVEL_MULTIPATH) || + (rdev->desc_nr >= 0 && + rdev->desc_nr < le32_to_cpu(sb->max_dev) && + (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX || + le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))) + spare_disk = false; + if (!refdev) { - ret = 1; + if (!spare_disk) + ret = 1; + else + ret = 0; } else { __u64 ev1, ev2; struct mdp_superblock_1 *refsb = page_address(refdev->sb_page); @@ -1610,7 +1685,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_ ev1 = le64_to_cpu(sb->events); ev2 = le64_to_cpu(refsb->events); - if (ev1 > ev2) + if (!spare_disk && ev1 > ev2) ret = 1; else ret = 0; @@ -1626,7 +1701,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_ return ret; } -static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) +static int super_1_validate(struct mddev *mddev, struct md_rdev *freshest, struct md_rdev *rdev) { struct mdp_superblock_1 *sb = page_address(rdev->sb_page); __u64 ev1 = le64_to_cpu(sb->events); @@ -1702,6 +1777,10 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) mddev->new_chunk_sectors = mddev->chunk_sectors; } + if (mddev->level == 0 && + !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT)) + mddev->layout = -1; + if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL) set_bit(MD_HAS_JOURNAL, &mddev->flags); @@ -1718,13 +1797,15 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) } } else if (mddev->pers == NULL) { /* Insist of good event counter while assembling, except for - * spares (which don't need an event count) */ - ++ev1; + * spares (which don't need an event count). + * Similar to mdadm, we allow event counter difference of 1 + * from the freshest device. + */ if (rdev->desc_nr >= 0 && rdev->desc_nr < le32_to_cpu(sb->max_dev) && (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX || le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)) - if (ev1 < mddev->events) + if (ev1 + 1 < mddev->events) return -EINVAL; } else if (mddev->bitmap) { /* If adding to array with a bitmap, then we can accept an @@ -1745,8 +1826,38 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) rdev->desc_nr >= le32_to_cpu(sb->max_dev)) { role = MD_DISK_ROLE_SPARE; rdev->desc_nr = -1; - } else + } else if (mddev->pers == NULL && freshest && ev1 < mddev->events) { + /* + * If we are assembling, and our event counter is smaller than the + * highest event counter, we cannot trust our superblock about the role. + * It could happen that our rdev was marked as Faulty, and all other + * superblocks were updated with +1 event counter. + * Then, before the next superblock update, which typically happens when + * remove_and_add_spares() removes the device from the array, there was + * a crash or reboot. + * If we allow current rdev without consulting the freshest superblock, + * we could cause data corruption. + * Note that in this case our event counter is smaller by 1 than the + * highest, otherwise, this rdev would not be allowed into array; + * both kernel and mdadm allow event counter difference of 1. + */ + struct mdp_superblock_1 *freshest_sb = page_address(freshest->sb_page); + u32 freshest_max_dev = le32_to_cpu(freshest_sb->max_dev); + + if (rdev->desc_nr >= freshest_max_dev) { + /* this is unexpected, better not proceed */ + pr_warn("md: %s: rdev[%pg]: desc_nr(%d) >= freshest(%pg)->sb->max_dev(%u)\n", + mdname(mddev), rdev->bdev, rdev->desc_nr, + freshest->bdev, freshest_max_dev); + return -EUCLEAN; + } + + role = le16_to_cpu(freshest_sb->dev_roles[rdev->desc_nr]); + pr_debug("md: %s: rdev[%pg]: role=%d(0x%x) according to freshest %pg\n", + mdname(mddev), rdev->bdev, role, role, freshest->bdev); + } else { role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); + } switch(role) { case MD_DISK_ROLE_SPARE: /* spare */ break; @@ -1771,8 +1882,15 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_BITMAP)) rdev->saved_raid_disk = -1; - } else - set_bit(In_sync, &rdev->flags); + } else { + /* + * If the array is FROZEN, then the device can't + * be in_sync with rest of array. + */ + if (!test_bit(MD_RECOVERY_FROZEN, + &mddev->recovery)) + set_bit(In_sync, &rdev->flags); + } rdev->raid_disk = role; break; } @@ -2171,8 +2289,9 @@ EXPORT_SYMBOL(md_integrity_add_rdev); static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) { - char b[BDEVNAME_SIZE]; + char b[BDEVNAME_SIZE + 4]; struct kobject *ko; + struct kernfs_node *sysfs_rdev; int err; /* prevent duplicates */ @@ -2223,19 +2342,32 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) mdname(mddev), mddev->max_disks); return -EBUSY; } - bdevname(rdev->bdev,b); + memcpy(b, "dev-", 4); + bdevname(rdev->bdev, b + 4); strreplace(b, '/', '!'); rdev->mddev = mddev; pr_debug("md: bind<%s>\n", b); - if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) + sysfs_rdev = sysfs_get_dirent_safe(mddev->kobj.sd, b); + if (sysfs_rdev) { + sysfs_put(sysfs_rdev); + err = -EBUSY; + goto fail; + } + + err = kobject_add(&rdev->kobj, &mddev->kobj, b); + if (err) goto fail; ko = &part_to_dev(rdev->bdev->bd_part)->kobj; if (sysfs_create_link(&rdev->kobj, ko, "block")) /* failure here is OK */; rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state"); + rdev->sysfs_unack_badblocks = + sysfs_get_dirent_safe(rdev->kobj.sd, "unacknowledged_bad_blocks"); + rdev->sysfs_badblocks = + sysfs_get_dirent_safe(rdev->kobj.sd, "bad_blocks"); list_add_rcu(&rdev->same_set, &mddev->disks); bd_link_disk_holder(rdev->bdev, mddev->gendisk); @@ -2246,12 +2378,12 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) return 0; fail: - pr_warn("md: failed to register dev-%s for %s\n", + pr_warn("md: failed to register %s for %s\n", b, mdname(mddev)); return err; } -static void md_delayed_delete(struct work_struct *ws) +static void rdev_delayed_delete(struct work_struct *ws) { struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work); kobject_del(&rdev->kobj); @@ -2268,16 +2400,20 @@ static void unbind_rdev_from_array(struct md_rdev *rdev) rdev->mddev = NULL; sysfs_remove_link(&rdev->kobj, "block"); sysfs_put(rdev->sysfs_state); + sysfs_put(rdev->sysfs_unack_badblocks); + sysfs_put(rdev->sysfs_badblocks); rdev->sysfs_state = NULL; + rdev->sysfs_unack_badblocks = NULL; + rdev->sysfs_badblocks = NULL; rdev->badblocks.count = 0; /* We need to delay this, otherwise we can deadlock when * writing to 'remove' to "dev/state". We also need * to delay it due to rcu usage. */ synchronize_rcu(); - INIT_WORK(&rdev->del_work, md_delayed_delete); + INIT_WORK(&rdev->del_work, rdev_delayed_delete); kobject_get(&rdev->kobj); - queue_work(md_misc_wq, &rdev->del_work); + queue_work(md_rdev_misc_wq, &rdev->del_work); } /* @@ -2396,14 +2532,16 @@ static void sync_sbs(struct mddev *mddev, int nospares) static bool does_sb_need_changing(struct mddev *mddev) { - struct md_rdev *rdev; + struct md_rdev *rdev = NULL, *iter; struct mdp_superblock_1 *sb; int role; /* Find a good rdev */ - rdev_for_each(rdev, mddev) - if ((rdev->raid_disk >= 0) && !test_bit(Faulty, &rdev->flags)) + rdev_for_each(iter, mddev) + if ((iter->raid_disk >= 0) && !test_bit(Faulty, &iter->flags)) { + rdev = iter; break; + } /* No good device found. */ if (!rdev) @@ -2613,7 +2751,7 @@ void md_update_sb(struct mddev *mddev, int force_change) goto repeat; wake_up(&mddev->sb_wait); if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) - sysfs_notify(&mddev->kobj, NULL, "sync_completed"); + sysfs_notify_dirent_safe(mddev->sysfs_completed); rdev_for_each(rdev, mddev) { if (test_and_clear_bit(FaultRecorded, &rdev->flags)) @@ -2639,7 +2777,7 @@ static int add_bound_rdev(struct md_rdev *rdev) * and should be added immediately. */ super_types[mddev->major_version]. - validate_super(mddev, rdev); + validate_super(mddev, NULL/*freshest*/, rdev); if (add_journal) mddev_suspend(mddev); err = mddev->pers->hot_add_disk(mddev, rdev); @@ -2861,8 +2999,10 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len) err = 0; } } else if (cmd_match(buf, "re-add")) { - if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) && - rdev->saved_raid_disk >= 0) { + if (!rdev->mddev->pers) + err = -EINVAL; + else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) && + rdev->saved_raid_disk >= 0) { /* clear_bit is performed _after_ all the devices * have their local Faulty bit cleared. If any writes * happen in the meantime in the local node, they @@ -3390,10 +3530,10 @@ rdev_attr_store(struct kobject *kobj, struct attribute *attr, return -EIO; if (!capable(CAP_SYS_ADMIN)) return -EACCES; - rv = mddev ? mddev_lock(mddev): -EBUSY; + rv = mddev ? mddev_lock(mddev) : -ENODEV; if (!rv) { if (rdev->mddev == NULL) - rv = -EBUSY; + rv = -ENODEV; else rv = entry->store(rdev, page, length); mddev_unlock(mddev); @@ -3514,7 +3654,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe * Check a full RAID array for plausibility */ -static void analyze_sbs(struct mddev *mddev) +static int analyze_sbs(struct mddev *mddev) { int i; struct md_rdev *rdev, *freshest, *tmp; @@ -3535,8 +3675,14 @@ static void analyze_sbs(struct mddev *mddev) md_kick_rdev_from_array(rdev); } + /* Cannot find a valid fresh disk */ + if (!freshest) { + pr_warn("md: cannot find a valid disk\n"); + return -EINVAL; + } + super_types[mddev->major_version]. - validate_super(mddev, freshest); + validate_super(mddev, NULL/*freshest*/, freshest); i = 0; rdev_for_each_safe(rdev, tmp, mddev) { @@ -3551,7 +3697,7 @@ static void analyze_sbs(struct mddev *mddev) } if (rdev != freshest) { if (super_types[mddev->major_version]. - validate_super(mddev, rdev)) { + validate_super(mddev, freshest, rdev)) { pr_warn("md: kicking non-fresh %s from array!\n", bdevname(rdev->bdev,b)); md_kick_rdev_from_array(rdev); @@ -3569,6 +3715,8 @@ static void analyze_sbs(struct mddev *mddev) clear_bit(In_sync, &rdev->flags); } } + + return 0; } /* Read a fixed-point number. @@ -3812,6 +3960,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len) pr_warn("md: cannot register extra attributes for %s\n", mdname(mddev)); mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action"); + mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed"); + mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded"); } if (oldpers->sync_request != NULL && pers->sync_request == NULL) { @@ -3859,7 +4009,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len) mddev_resume(mddev); if (!mddev->thread) md_update_sb(mddev, 1); - sysfs_notify(&mddev->kobj, NULL, "level"); + sysfs_notify_dirent_safe(mddev->sysfs_level); md_new_event(mddev); rv = len; out_unlock: @@ -4115,7 +4265,7 @@ array_state_show(struct mddev *mddev, char *page) { enum array_state st = inactive; - if (mddev->pers) + if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) switch(mddev->ro) { case 1: st = readonly; @@ -4320,8 +4470,7 @@ new_dev_store(struct mddev *mddev, const char *buf, size_t len) minor != MINOR(dev)) return -EOVERFLOW; - flush_workqueue(md_misc_wq); - + flush_workqueue(md_rdev_misc_wq); err = mddev_lock(mddev); if (err) return err; @@ -4545,6 +4694,70 @@ action_show(struct mddev *mddev, char *page) return sprintf(page, "%s\n", type); } +static void stop_sync_thread(struct mddev *mddev) +{ + if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) + return; + + if (mddev_lock(mddev)) + return; + + /* + * Check again in case MD_RECOVERY_RUNNING is cleared before lock is + * held. + */ + if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { + mddev_unlock(mddev); + return; + } + + if (work_pending(&mddev->del_work)) + flush_workqueue(md_misc_wq); + + set_bit(MD_RECOVERY_INTR, &mddev->recovery); + /* + * Thread might be blocked waiting for metadata update which will now + * never happen. + */ + if (mddev->sync_thread) + wake_up_process(mddev->sync_thread->tsk); + + mddev_unlock(mddev); +} + +void idle_sync_thread(struct mddev *mddev) +{ + int sync_seq = atomic_read(&mddev->sync_seq); + + if (mutex_lock_interruptible(&mddev->sync_mutex)) + return; + + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + stop_sync_thread(mddev); + + wait_event_interruptible(resync_wait, + sync_seq != atomic_read(&mddev->sync_seq) || + !test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)); + + mutex_unlock(&mddev->sync_mutex); +} +EXPORT_SYMBOL_GPL(idle_sync_thread); + +void frozen_sync_thread(struct mddev *mddev) +{ + if (mutex_lock_interruptible(&mddev->sync_mutex)) + return; + + set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + stop_sync_thread(mddev); + + wait_event_interruptible(resync_wait, mddev->sync_thread == NULL && + !test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)); + + mutex_unlock(&mddev->sync_mutex); +} +EXPORT_SYMBOL_GPL(frozen_sync_thread); + static ssize_t action_store(struct mddev *mddev, const char *page, size_t len) { @@ -4552,21 +4765,11 @@ action_store(struct mddev *mddev, const char *page, size_t len) return -EINVAL; - if (cmd_match(page, "idle") || cmd_match(page, "frozen")) { - if (cmd_match(page, "frozen")) - set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); - else - clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); - if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && - mddev_lock(mddev) == 0) { - flush_workqueue(md_misc_wq); - if (mddev->sync_thread) { - set_bit(MD_RECOVERY_INTR, &mddev->recovery); - md_reap_sync_thread(mddev); - } - mddev_unlock(mddev); - } - } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) + if (cmd_match(page, "idle")) + idle_sync_thread(mddev); + else if (cmd_match(page, "frozen")) + frozen_sync_thread(mddev); + else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) return -EBUSY; else if (cmd_match(page, "resync")) clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); @@ -4589,7 +4792,7 @@ action_store(struct mddev *mddev, const char *page, size_t len) } if (err) return err; - sysfs_notify(&mddev->kobj, NULL, "degraded"); + sysfs_notify_dirent_safe(mddev->sysfs_degraded); } else { if (cmd_match(page, "check")) set_bit(MD_RECOVERY_CHECK, &mddev->recovery); @@ -4732,7 +4935,7 @@ static ssize_t sync_speed_show(struct mddev *mddev, char *page) { unsigned long resync, dt, db; - if (mddev->curr_resync == 0) + if (mddev->curr_resync == MD_RESYNC_NONE) return sprintf(page, "none\n"); resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active); dt = (jiffies - mddev->resync_mark) / HZ; @@ -4751,8 +4954,8 @@ sync_completed_show(struct mddev *mddev, char *page) if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) return sprintf(page, "none\n"); - if (mddev->curr_resync == 1 || - mddev->curr_resync == 2) + if (mddev->curr_resync == MD_RESYNC_YIELDED || + mddev->curr_resync == MD_RESYNC_DELAYED) return sprintf(page, "delayed\n"); if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || @@ -5214,6 +5417,8 @@ static void md_free(struct kobject *ko) if (mddev->sysfs_state) sysfs_put(mddev->sysfs_state); + if (mddev->sysfs_level) + sysfs_put(mddev->sysfs_level); if (mddev->gendisk) del_gendisk(mddev->gendisk); @@ -5275,7 +5480,7 @@ static int md_alloc(dev_t dev, char *name) * writing to /sys/module/md_mod/parameters/new_array. */ static DEFINE_MUTEX(disks_mutex); - struct mddev *mddev = mddev_find(dev); + struct mddev *mddev = mddev_find_or_alloc(dev); struct gendisk *disk; int partitioned; int shift; @@ -5293,6 +5498,7 @@ static int md_alloc(dev_t dev, char *name) * completely removed (mddev_delayed_delete). */ flush_workqueue(md_misc_wq); + flush_workqueue(md_rdev_misc_wq); mutex_lock(&disks_mutex); error = -EEXIST; @@ -5352,10 +5558,6 @@ static int md_alloc(dev_t dev, char *name) */ disk->flags |= GENHD_FL_EXT_DEVT; mddev->gendisk = disk; - /* As soon as we call add_disk(), another thread could get - * through to md_open, so make sure it doesn't get too far - */ - mutex_lock(&mddev->open_mutex); add_disk(disk); error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md"); @@ -5370,13 +5572,13 @@ static int md_alloc(dev_t dev, char *name) if (mddev->kobj.sd && sysfs_create_group(&mddev->kobj, &md_bitmap_group)) pr_debug("pointless warning\n"); - mutex_unlock(&mddev->open_mutex); abort: - mutex_unlock(&disks_mutex); if (!error && mddev->kobj.sd) { kobject_uevent(&mddev->kobj, KOBJ_ADD); mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state"); + mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level"); } + mutex_unlock(&disks_mutex); mddev_put(mddev); return error; } @@ -5452,7 +5654,9 @@ int md_run(struct mddev *mddev) if (!mddev->raid_disks) { if (!mddev->persistent) return -EINVAL; - analyze_sbs(mddev); + err = analyze_sbs(mddev); + if (err) + return -EINVAL; } if (mddev->level != LEVEL_NONE) @@ -5517,22 +5721,6 @@ int md_run(struct mddev *mddev) if (err) return err; } - if (mddev->flush_pool == NULL) { - mddev->flush_pool = mempool_create(NR_FLUSH_INFOS, flush_info_alloc, - flush_info_free, mddev); - if (!mddev->flush_pool) { - err = -ENOMEM; - goto abort; - } - } - if (mddev->flush_bio_pool == NULL) { - mddev->flush_bio_pool = mempool_create(NR_FLUSH_BIOS, flush_bio_alloc, - flush_bio_free, mddev); - if (!mddev->flush_bio_pool) { - err = -ENOMEM; - goto abort; - } - } spin_lock(&pers_lock); pers = find_pers(mddev->level, mddev->clevel); @@ -5655,6 +5843,8 @@ int md_run(struct mddev *mddev) pr_warn("md: cannot register extra attributes for %s\n", mdname(mddev)); mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action"); + mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed"); + mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded"); } else if (mddev->ro == 2) /* auto-readonly not meaningful */ mddev->ro = 0; @@ -5685,22 +5875,14 @@ int md_run(struct mddev *mddev) if (mddev->sb_flags) md_update_sb(mddev, 0); + if (mddev->queue && pers->level == 10) + queue_init_dispatch_async_cpus(mddev->queue, 1); md_new_event(mddev); - sysfs_notify_dirent_safe(mddev->sysfs_state); - sysfs_notify_dirent_safe(mddev->sysfs_action); - sysfs_notify(&mddev->kobj, NULL, "degraded"); return 0; abort: - if (mddev->flush_bio_pool) { - mempool_destroy(mddev->flush_bio_pool); - mddev->flush_bio_pool = NULL; - } - if (mddev->flush_pool){ - mempool_destroy(mddev->flush_pool); - mddev->flush_pool = NULL; - } - + bioset_exit(&mddev->bio_set); + bioset_exit(&mddev->sync_set); return err; } EXPORT_SYMBOL_GPL(md_run); @@ -5709,6 +5891,7 @@ static int do_md_run(struct mddev *mddev) { int err; + set_bit(MD_NOT_READY, &mddev->flags); err = md_run(mddev); if (err) goto out; @@ -5729,9 +5912,14 @@ static int do_md_run(struct mddev *mddev) set_capacity(mddev->gendisk, mddev->array_sectors); revalidate_disk(mddev->gendisk); + clear_bit(MD_NOT_READY, &mddev->flags); mddev->changed = 1; kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); + sysfs_notify_dirent_safe(mddev->sysfs_state); + sysfs_notify_dirent_safe(mddev->sysfs_action); + sysfs_notify_dirent_safe(mddev->sysfs_degraded); out: + clear_bit(MD_NOT_READY, &mddev->flags); return err; } @@ -5844,7 +6032,8 @@ static void md_clean(struct mddev *mddev) static void __md_stop_writes(struct mddev *mddev) { set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); - flush_workqueue(md_misc_wq); + if (work_pending(&mddev->del_work)) + flush_workqueue(md_misc_wq); if (mddev->sync_thread) { set_bit(MD_RECOVERY_INTR, &mddev->recovery); md_reap_sync_thread(mddev); @@ -5879,7 +6068,7 @@ EXPORT_SYMBOL_GPL(md_stop_writes); static void mddev_detach(struct mddev *mddev) { md_bitmap_wait_behind_writes(mddev); - if (mddev->pers && mddev->pers->quiesce) { + if (mddev->pers && mddev->pers->quiesce && !mddev->suspended) { mddev->pers->quiesce(mddev, 1); mddev->pers->quiesce(mddev, 0); } @@ -5894,11 +6083,13 @@ static void __md_stop(struct mddev *mddev) md_bitmap_destroy(mddev); mddev_detach(mddev); /* Ensure ->event_work is done */ - flush_workqueue(md_misc_wq); + if (mddev->event_work.func) + flush_workqueue(md_misc_wq); spin_lock(&mddev->lock); mddev->pers = NULL; spin_unlock(&mddev->lock); - pers->free(mddev, mddev->private); + if (mddev->private) + pers->free(mddev, mddev->private); mddev->private = NULL; if (pers->sync_request && mddev->to_remove == NULL) mddev->to_remove = &md_redundancy_group; @@ -5912,14 +6103,6 @@ void md_stop(struct mddev *mddev) * This is called from dm-raid */ __md_stop(mddev); - if (mddev->flush_bio_pool) { - mempool_destroy(mddev->flush_bio_pool); - mddev->flush_bio_pool = NULL; - } - if (mddev->flush_pool) { - mempool_destroy(mddev->flush_pool); - mddev->flush_pool = NULL; - } bioset_exit(&mddev->bio_set); bioset_exit(&mddev->sync_set); } @@ -6156,11 +6339,9 @@ static void autorun_devices(int part) md_probe(dev, NULL, NULL); mddev = mddev_find(dev); - if (!mddev || !mddev->gendisk) { - if (mddev) - mddev_put(mddev); + if (!mddev) break; - } + if (mddev_lock(mddev)) pr_warn("md: %s locked, cannot run\n", mdname(mddev)); else if (mddev->raid_disks || mddev->major_version @@ -6417,7 +6598,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info) rdev->saved_raid_disk = rdev->raid_disk; } else super_types[mddev->major_version]. - validate_super(mddev, rdev); + validate_super(mddev, NULL/*freshest*/, rdev); if ((info->state & (1<raid_disk != info->raid_disk) { /* This was a hot-add request, but events doesn't @@ -6567,8 +6748,10 @@ static int hot_remove_disk(struct mddev *mddev, dev_t dev) goto busy; kick_rdev: - if (mddev_is_clustered(mddev)) - md_cluster_ops->remove_disk(mddev, rdev); + if (mddev_is_clustered(mddev)) { + if (md_cluster_ops->remove_disk(mddev, rdev)) + goto busy; + } md_kick_rdev_from_array(rdev); set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); @@ -6797,6 +6980,9 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info) mddev->external = 0; mddev->layout = info->layout; + if (mddev->level == 0) + /* Cannot trust RAID0 layout info here */ + mddev->layout = -1; mddev->chunk_sectors = info->chunk_size >> 9; if (mddev->persistent) { @@ -6895,6 +7081,7 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks) return -EINVAL; if (mddev->sync_thread || test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || + test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) || mddev->reshape_position != MaxSector) return -EBUSY; @@ -7194,9 +7381,8 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, } - if (cmd == ADD_NEW_DISK) - /* need to ensure md_delayed_delete() has completed */ - flush_workqueue(md_misc_wq); + if (cmd == ADD_NEW_DISK || cmd == HOT_ADD_DISK) + flush_workqueue(md_rdev_misc_wq); if (cmd == HOT_REMOVE_DISK) /* need to ensure recovery thread has run */ @@ -7214,8 +7400,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, err = -EBUSY; goto out; } - WARN_ON_ONCE(test_bit(MD_CLOSING, &mddev->flags)); - set_bit(MD_CLOSING, &mddev->flags); + if (test_and_set_bit(MD_CLOSING, &mddev->flags)) { + mutex_unlock(&mddev->open_mutex); + err = -EBUSY; + goto out; + } did_set_md_closing = true; mutex_unlock(&mddev->open_mutex); sync_blockdev(bdev); @@ -7451,9 +7640,9 @@ static int md_open(struct block_device *bdev, fmode_t mode) */ mddev_put(mddev); /* Wait until bdev->bd_disk is definitely gone */ - flush_workqueue(md_misc_wq); - /* Then retry the open from the top */ - return -ERESTARTSYS; + if (work_pending(&mddev->del_work)) + flush_workqueue(md_misc_wq); + return -EBUSY; } BUG_ON(mddev != bdev->bd_disk->private_data); @@ -7595,17 +7784,22 @@ EXPORT_SYMBOL(md_register_thread); void md_unregister_thread(struct md_thread **threadp) { - struct md_thread *thread = *threadp; - if (!thread) - return; - pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); - /* Locking ensures that mddev_unlock does not wake_up a + struct md_thread *thread; + + /* + * Locking ensures that mddev_unlock does not wake_up a * non-existent thread */ spin_lock(&pers_lock); + thread = *threadp; + if (!thread) { + spin_unlock(&pers_lock); + return; + } *threadp = NULL; spin_unlock(&pers_lock); + pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); kthread_stop(thread->tsk); kfree(thread); } @@ -7655,9 +7849,9 @@ static void status_unused(struct seq_file *seq) static int status_resync(struct seq_file *seq, struct mddev *mddev) { sector_t max_sectors, resync, res; - unsigned long dt, db; - sector_t rt; - int scale; + unsigned long dt, db = 0; + sector_t rt, curr_mark_cnt, resync_mark_cnt; + int scale, recovery_active; unsigned int per_milli; if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || @@ -7667,16 +7861,26 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev) max_sectors = mddev->dev_sectors; resync = mddev->curr_resync; - if (resync <= 3) { + if (resync < MD_RESYNC_ACTIVE) { if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) /* Still cleaning up */ resync = max_sectors; - } else if (resync > max_sectors) + } else if (resync > max_sectors) { resync = max_sectors; - else - resync -= atomic_read(&mddev->recovery_active); + } else { + res = atomic_read(&mddev->recovery_active); + /* + * Resync has started, but the subtraction has overflowed or + * yielded one of the special values. Force it to active to + * ensure the status reports an active resync. + */ + if (resync < res || resync - res < MD_RESYNC_ACTIVE) + resync = MD_RESYNC_ACTIVE; + else + resync -= res; + } - if (resync == 0) { + if (resync == MD_RESYNC_NONE) { if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) { struct md_rdev *rdev; @@ -7700,7 +7904,7 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev) } return 0; } - if (resync < 3) { + if (resync < MD_RESYNC_ACTIVE) { seq_printf(seq, "\tresync=DELAYED"); return 1; } @@ -7746,22 +7950,30 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev) * db: blocks written from mark until now * rt: remaining time * - * rt is a sector_t, so could be 32bit or 64bit. - * So we divide before multiply in case it is 32bit and close - * to the limit. - * We scale the divisor (db) by 32 to avoid losing precision - * near the end of resync when the number of remaining sectors - * is close to 'db'. - * We then divide rt by 32 after multiplying by db to compensate. - * The '+1' avoids division by zero if db is very small. + * rt is a sector_t, which is always 64bit now. We are keeping + * the original algorithm, but it is not really necessary. + * + * Original algorithm: + * So we divide before multiply in case it is 32bit and close + * to the limit. + * We scale the divisor (db) by 32 to avoid losing precision + * near the end of resync when the number of remaining sectors + * is close to 'db'. + * We then divide rt by 32 after multiplying by db to compensate. + * The '+1' avoids division by zero if db is very small. */ dt = ((jiffies - mddev->resync_mark) / HZ); if (!dt) dt++; - db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active)) - - mddev->resync_mark_cnt; + + curr_mark_cnt = mddev->curr_mark_cnt; + recovery_active = atomic_read(&mddev->recovery_active); + resync_mark_cnt = mddev->resync_mark_cnt; + + if (curr_mark_cnt >= (recovery_active + resync_mark_cnt)) + db = curr_mark_cnt - (recovery_active + resync_mark_cnt); rt = max_sectors - resync; /* number of remaining sectors */ - sector_div(rt, db/32+1); + rt = div64_u64(rt, db/32+1); rt *= dt; rt >>= 5; @@ -7778,7 +7990,11 @@ static void *md_seq_start(struct seq_file *seq, loff_t *pos) loff_t l = *pos; struct mddev *mddev; - if (l >= 0x10000) + if (l == 0x10000) { + ++*pos; + return (void *)2; + } + if (l > 0x10000) return NULL; if (!l--) /* header */ @@ -8054,14 +8270,15 @@ static int is_mddev_idle(struct mddev *mddev, int init) { struct md_rdev *rdev; int idle; - int curr_events; + long long curr_events; idle = 1; rcu_read_lock(); rdev_for_each_rcu(rdev, mddev) { struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; - curr_events = (int)part_stat_read_accum(&disk->part0, sectors) - - atomic_read(&disk->sync_io); + curr_events = + (long long)part_stat_read_accum(&disk->part0, sectors) - + atomic64_read(&disk->sync_io_sectors); /* sync IO will cause sync_io to increase before the disk_stats * as sync_io is counted when a request starts, and * disk_stats is counted when it completes. @@ -8292,13 +8509,7 @@ void md_do_sync(struct md_thread *thread) mddev->last_sync_action = action ?: desc; - /* we overload curr_resync somewhat here. - * 0 == not engaged in resync at all - * 2 == checking that there is no conflict with another sync - * 1 == like 2, but have yielded to allow conflicting resync to - * commense - * other == active in resync - this many blocks - * + /* * Before starting a resync we must have set curr_resync to * 2, and then checked that every "conflicting" array has curr_resync * less than ours. When we find one that is the same or higher @@ -8310,7 +8521,7 @@ void md_do_sync(struct md_thread *thread) do { int mddev2_minor = -1; - mddev->curr_resync = 2; + mddev->curr_resync = MD_RESYNC_DELAYED; try_again: if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) @@ -8322,12 +8533,14 @@ void md_do_sync(struct md_thread *thread) && mddev2->curr_resync && match_mddev_units(mddev, mddev2)) { DEFINE_WAIT(wq); - if (mddev < mddev2 && mddev->curr_resync == 2) { + if (mddev < mddev2 && + mddev->curr_resync == MD_RESYNC_DELAYED) { /* arbitrarily yield */ - mddev->curr_resync = 1; + mddev->curr_resync = MD_RESYNC_YIELDED; wake_up(&resync_wait); } - if (mddev > mddev2 && mddev->curr_resync == 1) + if (mddev > mddev2 && + mddev->curr_resync == MD_RESYNC_YIELDED) /* no need to wait here, we can wait the next * time 'round when curr_resync == 2 */ @@ -8355,7 +8568,7 @@ void md_do_sync(struct md_thread *thread) finish_wait(&resync_wait, &wq); } } - } while (mddev->curr_resync < 2); + } while (mddev->curr_resync < MD_RESYNC_DELAYED); j = 0; if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { @@ -8431,9 +8644,9 @@ void md_do_sync(struct md_thread *thread) desc, mdname(mddev)); mddev->curr_resync = j; } else - mddev->curr_resync = 3; /* no longer delayed */ + mddev->curr_resync = MD_RESYNC_ACTIVE; /* no longer delayed */ mddev->curr_resync_completed = j; - sysfs_notify(&mddev->kobj, NULL, "sync_completed"); + sysfs_notify_dirent_safe(mddev->sysfs_completed); md_new_event(mddev); update_time = jiffies; @@ -8461,7 +8674,7 @@ void md_do_sync(struct md_thread *thread) mddev->recovery_cp = j; update_time = jiffies; set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); - sysfs_notify(&mddev->kobj, NULL, "sync_completed"); + sysfs_notify_dirent_safe(mddev->sysfs_completed); } while (j >= mddev->resync_max && @@ -8566,14 +8779,14 @@ void md_do_sync(struct md_thread *thread) if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && - mddev->curr_resync > 3) { + mddev->curr_resync >= MD_RESYNC_ACTIVE) { mddev->curr_resync_completed = mddev->curr_resync; - sysfs_notify(&mddev->kobj, NULL, "sync_completed"); + sysfs_notify_dirent_safe(mddev->sysfs_completed); } mddev->pers->sync_request(mddev, max_sectors, &skipped); if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && - mddev->curr_resync > 3) { + mddev->curr_resync > MD_RESYNC_ACTIVE) { if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { if (mddev->curr_resync >= mddev->recovery_cp) { @@ -8636,7 +8849,7 @@ void md_do_sync(struct md_thread *thread) } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) mddev->resync_min = mddev->curr_resync_completed; set_bit(MD_RECOVERY_DONE, &mddev->recovery); - mddev->curr_resync = 0; + mddev->curr_resync = MD_RESYNC_NONE; spin_unlock(&mddev->lock); wake_up(&resync_wait); @@ -8696,7 +8909,7 @@ static int remove_and_add_spares(struct mddev *mddev, } if (removed && mddev->kobj.sd) - sysfs_notify(&mddev->kobj, NULL, "degraded"); + sysfs_notify_dirent_safe(mddev->sysfs_degraded); if (this && removed) goto no_add; @@ -8742,11 +8955,10 @@ static int remove_and_add_spares(struct mddev *mddev, static void md_start_sync(struct work_struct *ws) { struct mddev *mddev = container_of(ws, struct mddev, del_work); + struct md_thread *sync_thread; - mddev->sync_thread = md_register_thread(md_do_sync, - mddev, - "resync"); - if (!mddev->sync_thread) { + sync_thread = md_register_thread(md_do_sync, mddev, "resync"); + if (!sync_thread) { pr_warn("%s: could not start resync thread...\n", mdname(mddev)); /* leave the spares where they are, it shouldn't hurt */ @@ -8760,8 +8972,12 @@ static void md_start_sync(struct work_struct *ws) &mddev->recovery)) if (mddev->sysfs_action) sysfs_notify_dirent_safe(mddev->sysfs_action); - } else + } else { + spin_lock(&pers_lock); + mddev->sync_thread = sync_thread; md_wakeup_thread(mddev->sync_thread); + spin_unlock(&pers_lock); + } sysfs_notify_dirent_safe(mddev->sysfs_action); md_new_event(mddev); } @@ -8790,6 +9006,18 @@ static void md_start_sync(struct work_struct *ws) */ void md_check_recovery(struct mddev *mddev) { + if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) { + /* Write superblock - thread that called mddev_suspend() + * holds reconfig_mutex for us. + */ + set_bit(MD_UPDATING_SB, &mddev->flags); + smp_mb__after_atomic(); + if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags)) + md_update_sb(mddev, 0); + clear_bit_unlock(MD_UPDATING_SB, &mddev->flags); + wake_up(&mddev->sb_wait); + } + if (mddev->suspended) return; @@ -8819,6 +9047,7 @@ void md_check_recovery(struct mddev *mddev) if (mddev_trylock(mddev)) { int spares = 0; + bool try_set_sync = mddev->safemode != 0; if (!mddev->external && mddev->safemode == 1) mddev->safemode = 0; @@ -8853,18 +9082,18 @@ void md_check_recovery(struct mddev *mddev) } if (mddev_is_clustered(mddev)) { - struct md_rdev *rdev; + struct md_rdev *rdev, *tmp; /* kick the device if another node issued a * remove disk. */ - rdev_for_each(rdev, mddev) { + rdev_for_each_safe(rdev, tmp, mddev) { if (test_and_clear_bit(ClusterRemove, &rdev->flags) && rdev->raid_disk < 0) md_kick_rdev_from_array(rdev); } } - if (!mddev->external && !mddev->in_sync) { + if (try_set_sync && !mddev->external && !mddev->in_sync) { spin_lock(&mddev->lock); set_in_sync(mddev); spin_unlock(&mddev->lock); @@ -8949,16 +9178,6 @@ void md_check_recovery(struct mddev *mddev) unlock: wake_up(&mddev->sb_wait); mddev_unlock(mddev); - } else if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) { - /* Write superblock - thread that called mddev_suspend() - * holds reconfig_mutex for us. - */ - set_bit(MD_UPDATING_SB, &mddev->flags); - smp_mb__after_atomic(); - if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags)) - md_update_sb(mddev, 0); - clear_bit_unlock(MD_UPDATING_SB, &mddev->flags); - wake_up(&mddev->sb_wait); } } EXPORT_SYMBOL(md_check_recovery); @@ -8969,13 +9188,15 @@ void md_reap_sync_thread(struct mddev *mddev) /* resync has finished, collect result */ md_unregister_thread(&mddev->sync_thread); + atomic_inc(&mddev->sync_seq); + if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && - !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { + !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && + mddev->degraded != mddev->raid_disks) { /* success...*/ /* activate any spares */ if (mddev->pers->spare_active(mddev)) { - sysfs_notify(&mddev->kobj, NULL, - "degraded"); + sysfs_notify_dirent_safe(mddev->sysfs_degraded); set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); } } @@ -9002,13 +9223,13 @@ void md_reap_sync_thread(struct mddev *mddev) clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); - wake_up(&resync_wait); /* flag recovery needed just to double check */ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); sysfs_notify_dirent_safe(mddev->sysfs_action); md_new_event(mddev); if (mddev->event_work.func) queue_work(md_misc_wq, &mddev->event_work); + wake_up(&resync_wait); } EXPORT_SYMBOL(md_reap_sync_thread); @@ -9054,8 +9275,7 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, if (rv == 0) { /* Make sure they get written out promptly */ if (test_bit(ExternalBbl, &rdev->flags)) - sysfs_notify(&rdev->kobj, NULL, - "unacknowledged_bad_blocks"); + sysfs_notify_dirent_safe(rdev->sysfs_unack_badblocks); sysfs_notify_dirent_safe(rdev->sysfs_state); set_mask_bits(&mddev->sb_flags, 0, BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING)); @@ -9076,7 +9296,7 @@ int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, s += rdev->data_offset; rv = badblocks_clear(&rdev->badblocks, s, sectors); if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags)) - sysfs_notify(&rdev->kobj, NULL, "bad_blocks"); + sysfs_notify_dirent_safe(rdev->sysfs_badblocks); return rv; } EXPORT_SYMBOL_GPL(rdev_clear_badblocks); @@ -9105,7 +9325,7 @@ static int md_notify_reboot(struct notifier_block *this, * driver, we do want to have a safe RAID driver ... */ if (need_delay) - mdelay(1000*1); + msleep(1000); return NOTIFY_DONE; } @@ -9135,6 +9355,10 @@ static int __init md_init(void) if (!md_misc_wq) goto err_misc_wq; + md_rdev_misc_wq = alloc_workqueue("md_rdev_misc", 0, 0); + if (!md_rdev_misc_wq) + goto err_rdev_misc_wq; + if ((ret = register_blkdev(MD_MAJOR, "md")) < 0) goto err_md; @@ -9156,6 +9380,8 @@ static int __init md_init(void) err_mdp: unregister_blkdev(MD_MAJOR, "md"); err_md: + destroy_workqueue(md_rdev_misc_wq); +err_rdev_misc_wq: destroy_workqueue(md_misc_wq); err_misc_wq: destroy_workqueue(md_wq); @@ -9166,7 +9392,7 @@ static int __init md_init(void) static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) { struct mdp_superblock_1 *sb = page_address(rdev->sb_page); - struct md_rdev *rdev2; + struct md_rdev *rdev2, *tmp; int role, ret; char b[BDEVNAME_SIZE]; @@ -9183,7 +9409,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) } /* Check for change of roles in the active devices */ - rdev_for_each(rdev2, mddev) { + rdev_for_each_safe(rdev2, tmp, mddev) { if (test_bit(Faulty, &rdev2->flags)) continue; @@ -9225,8 +9451,11 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) } } - if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) - update_raid_disks(mddev, le32_to_cpu(sb->raid_disks)); + if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) { + ret = update_raid_disks(mddev, le32_to_cpu(sb->raid_disks)); + if (ret) + pr_warn("md: updating array disks failed. %d\n", ret); + } /* Finally set the event to be up to date */ mddev->events = le64_to_cpu(sb->events); @@ -9273,7 +9502,7 @@ static int read_rdev(struct mddev *mddev, struct md_rdev *rdev) if (rdev->recovery_offset == MaxSector && !test_bit(In_sync, &rdev->flags) && mddev->pers->spare_active(mddev)) - sysfs_notify(&mddev->kobj, NULL, "degraded"); + sysfs_notify_dirent_safe(mddev->sysfs_degraded); put_page(swapout); return 0; @@ -9281,16 +9510,18 @@ static int read_rdev(struct mddev *mddev, struct md_rdev *rdev) void md_reload_sb(struct mddev *mddev, int nr) { - struct md_rdev *rdev; + struct md_rdev *rdev = NULL, *iter; int err; /* Find the rdev */ - rdev_for_each_rcu(rdev, mddev) { - if (rdev->desc_nr == nr) + rdev_for_each_rcu(iter, mddev) { + if (iter->desc_nr == nr) { + rdev = iter; break; + } } - if (!rdev || rdev->desc_nr != nr) { + if (!rdev) { pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr); return; } @@ -9415,6 +9646,7 @@ static __exit void md_exit(void) * destroy_workqueue() below will wait for that to complete. */ } + destroy_workqueue(md_rdev_misc_wq); destroy_workqueue(md_misc_wq); destroy_workqueue(md_wq); } diff --git a/drivers/md/md.h b/drivers/md/md.h index 8afd6bfdbfb9b5934097b3d0b3378dbd0d53f778..84fbf9062053a3c0fe0f2a85b6ca92869a40ca3a 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -47,8 +47,16 @@ struct md_rdev { sector_t sectors; /* Device size (in 512bytes sectors) */ struct mddev *mddev; /* RAID array if running */ - int last_events; /* IO event timestamp */ + /* + * IO event timestamp, this pos has 64 bit space, + * can enlarge it solving its small size. + */ +#ifndef __GENKSYMS__ + long long last_events; +#else + int last_events; +#endif /* * If meta_bdev is non-NULL, it means that a separate device is * being used to store the metadata (superblock/bitmap) which @@ -120,7 +128,10 @@ struct md_rdev { struct kernfs_node *sysfs_state; /* handle for 'state' * sysfs entry */ - + /* handle for 'unacknowledged_bad_blocks' sysfs dentry */ + struct kernfs_node *sysfs_unack_badblocks; + /* handle for 'bad_blocks' sysfs dentry */ + struct kernfs_node *sysfs_badblocks; struct badblocks badblocks; struct { @@ -200,6 +211,10 @@ enum flag_bits { * it didn't fail, so don't use FailFast * any more for metadata */ + WantRemove, /* Before set conf->mirrors[i] as NULL, + * we set the bit first, avoiding access the + * conf->mirrors[i] after it set NULL. + */ }; static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors, @@ -243,6 +258,9 @@ enum mddev_flags { MD_UPDATING_SB, /* md_check_recovery is updating the metadata * without explicitly holding reconfig_mutex. */ + MD_NOT_READY, /* do_md_run() is active, so 'array_state' + * must not report that array is ready yet + */ }; enum mddev_sb_flags { @@ -252,17 +270,19 @@ enum mddev_sb_flags { MD_SB_NEED_REWRITE, /* metadata write needs to be repeated */ }; -#define NR_FLUSH_INFOS 8 -#define NR_FLUSH_BIOS 64 -struct flush_info { - struct bio *bio; - struct mddev *mddev; - struct work_struct flush_work; - atomic_t flush_pending; -}; -struct flush_bio { - struct flush_info *fi; - struct md_rdev *rdev; +/* + * mddev->curr_resync stores the current sector of the resync but + * also has some overloaded values. + */ +enum { + /* No resync in progress */ + MD_RESYNC_NONE = 0, + /* Yielded to allow another conflicting resync to commence */ + MD_RESYNC_YIELDED = 1, + /* Delayed to check that there is no conflict with another sync */ + MD_RESYNC_DELAYED = 2, + /* Any value greater than or equal to this is in an active resync */ + MD_RESYNC_ACTIVE = 3, }; struct mddev { @@ -408,6 +428,9 @@ struct mddev { * file in sysfs. */ struct kernfs_node *sysfs_action; /* handle for 'sync_action' */ + struct kernfs_node *sysfs_completed; /*handle for 'sync_completed' */ + struct kernfs_node *sysfs_degraded; /*handle for 'degraded' */ + struct kernfs_node *sysfs_level; /*handle for 'level' */ struct work_struct del_work; /* used for delayed sysfs removal */ @@ -470,14 +493,29 @@ struct mddev { * metadata and bitmap writes */ - mempool_t *flush_pool; - mempool_t *flush_bio_pool; + /* Generic flush handling. + * The last to finish preflush schedules a worker to submit + * the rest of the request (without the REQ_PREFLUSH flag). + */ + struct bio *flush_bio; + atomic_t flush_pending; + ktime_t start_flush, last_flush; /* last_flush is when the last completed + * flush was started. + */ + struct work_struct flush_work; struct work_struct event_work; /* used by dm to report failure event */ void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev); struct md_cluster_info *cluster_info; unsigned int good_device_nr; /* good device num within cluster raid */ bool has_superblocks:1; + +#ifndef __GENKSYMS__ + /* Used to synchronize idle and frozen for action_store() */ + struct mutex sync_mutex; + /* The sequence number for sync thread */ + atomic_t sync_seq; +#endif }; enum recovery_flags { @@ -520,12 +558,12 @@ extern void mddev_unlock(struct mddev *mddev); static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors) { - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); + atomic64_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io_sectors); } static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors) { - atomic_add(nr_sectors, &bio->bi_disk->sync_io); + atomic64_add(nr_sectors, &bio->bi_disk->sync_io_sectors); } struct md_personality @@ -534,7 +572,7 @@ struct md_personality int level; struct list_head list; struct module *owner; - bool (*make_request)(struct mddev *mddev, struct bio *bio); + bool __must_check (*make_request)(struct mddev *mddev, struct bio *bio); /* * start up works that do NOT require md_thread. tasks that * requires md_thread should go into start() @@ -686,7 +724,7 @@ extern void md_error(struct mddev *mddev, struct md_rdev *rdev); extern void md_finish_reshape(struct mddev *mddev); extern int mddev_congested(struct mddev *mddev, int bits); -extern void md_flush_request(struct mddev *mddev, struct bio *bio); +extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio); extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev, sector_t sector, int size, struct page *page); extern int md_super_wait(struct mddev *mddev); @@ -714,6 +752,8 @@ extern void md_rdev_clear(struct md_rdev *rdev); extern void md_handle_request(struct mddev *mddev, struct bio *bio); extern void mddev_suspend(struct mddev *mddev); extern void mddev_resume(struct mddev *mddev); +extern void idle_sync_thread(struct mddev *mddev); +extern void frozen_sync_thread(struct mddev *mddev); extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, struct mddev *mddev); diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c index 492a3f8ac1199b841215758119e4167b46d9b71e..11dd247cf85da73f1816acbcf217331bc5417703 100644 --- a/drivers/md/persistent-data/dm-block-manager.c +++ b/drivers/md/persistent-data/dm-block-manager.c @@ -415,6 +415,12 @@ void dm_block_manager_destroy(struct dm_block_manager *bm) } EXPORT_SYMBOL_GPL(dm_block_manager_destroy); +void dm_block_manager_reset(struct dm_block_manager *bm) +{ + dm_bufio_client_reset(bm->bufio); +} +EXPORT_SYMBOL_GPL(dm_block_manager_reset); + unsigned dm_bm_block_size(struct dm_block_manager *bm) { return dm_bufio_get_block_size(bm->bufio); @@ -494,7 +500,7 @@ int dm_bm_write_lock(struct dm_block_manager *bm, void *p; int r; - if (bm->read_only) + if (dm_bm_is_read_only(bm)) return -EPERM; p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result); @@ -563,7 +569,7 @@ int dm_bm_write_lock_zero(struct dm_block_manager *bm, struct buffer_aux *aux; void *p; - if (bm->read_only) + if (dm_bm_is_read_only(bm)) return -EPERM; p = dm_bufio_new(bm->bufio, b, (struct dm_buffer **) result); @@ -603,7 +609,7 @@ EXPORT_SYMBOL_GPL(dm_bm_unlock); int dm_bm_flush(struct dm_block_manager *bm) { - if (bm->read_only) + if (dm_bm_is_read_only(bm)) return -EPERM; return dm_bufio_write_dirty_buffers(bm->bufio); @@ -617,19 +623,21 @@ void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b) bool dm_bm_is_read_only(struct dm_block_manager *bm) { - return bm->read_only; + return (bm ? bm->read_only : true); } EXPORT_SYMBOL_GPL(dm_bm_is_read_only); void dm_bm_set_read_only(struct dm_block_manager *bm) { - bm->read_only = true; + if (bm) + bm->read_only = true; } EXPORT_SYMBOL_GPL(dm_bm_set_read_only); void dm_bm_set_read_write(struct dm_block_manager *bm) { - bm->read_only = false; + if (bm) + bm->read_only = false; } EXPORT_SYMBOL_GPL(dm_bm_set_read_write); diff --git a/drivers/md/persistent-data/dm-block-manager.h b/drivers/md/persistent-data/dm-block-manager.h index e728937f376a365fd9c61d1b8790b7166d561225..644cd7015e2894b1ff377ff19ab54fbb15b61deb 100644 --- a/drivers/md/persistent-data/dm-block-manager.h +++ b/drivers/md/persistent-data/dm-block-manager.h @@ -35,6 +35,7 @@ struct dm_block_manager *dm_block_manager_create( struct block_device *bdev, unsigned block_size, unsigned max_held_per_thread); void dm_block_manager_destroy(struct dm_block_manager *bm); +void dm_block_manager_reset(struct dm_block_manager *bm); unsigned dm_bm_block_size(struct dm_block_manager *bm); dm_block_t dm_bm_nr_blocks(struct dm_block_manager *bm); diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h index a240990a7f3339047e45a4b45087a8a3d0ca30fe..5673f8eb5f88f8f3a67ba5ab80029fe721f03e35 100644 --- a/drivers/md/persistent-data/dm-btree-internal.h +++ b/drivers/md/persistent-data/dm-btree-internal.h @@ -34,12 +34,12 @@ struct node_header { __le32 max_entries; __le32 value_size; __le32 padding; -} __packed; +} __attribute__((packed, aligned(8))); struct btree_node { struct node_header header; __le64 keys[0]; -} __packed; +} __attribute__((packed, aligned(8))); /* diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c index 21ea537bd55e9984f7cfe5b908a3d6bcad9038e9..63f2baed3c8a6e8d5f4d85abf2dd39e5951ea70a 100644 --- a/drivers/md/persistent-data/dm-btree-remove.c +++ b/drivers/md/persistent-data/dm-btree-remove.c @@ -203,7 +203,13 @@ static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent, struct btree_node *right = r->n; uint32_t nr_left = le32_to_cpu(left->header.nr_entries); uint32_t nr_right = le32_to_cpu(right->header.nr_entries); - unsigned threshold = 2 * merge_threshold(left) + 1; + /* + * Ensure the number of entries in each child will be greater + * than or equal to (max_entries / 3 + 1), so no matter which + * child is used for removal, the number will still be not + * less than (max_entries / 3). + */ + unsigned int threshold = 2 * (merge_threshold(left) + 1); if (nr_left + nr_right < threshold) { /* @@ -417,9 +423,9 @@ static int rebalance_children(struct shadow_spine *s, memcpy(n, dm_block_data(child), dm_bm_block_size(dm_tm_get_bm(info->tm))); - dm_tm_unlock(info->tm, child); dm_tm_dec(info->tm, dm_block_location(child)); + dm_tm_unlock(info->tm, child); return 0; } @@ -543,7 +549,8 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root, delete_at(n, index); } - *new_root = shadow_root(&spine); + if (!r) + *new_root = shadow_root(&spine); exit_shadow_spine(&spine); return r; diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c index b27b8091a1ca4f649240e074a7bee564285dd30d..1b90d1ed03579bce411f70a34d1c55d99b8367cc 100644 --- a/drivers/md/persistent-data/dm-btree-spine.c +++ b/drivers/md/persistent-data/dm-btree-spine.c @@ -30,8 +30,8 @@ static void node_prepare_for_write(struct dm_block_validator *v, h->csum = cpu_to_le32(dm_bm_checksum(&h->flags, block_size - sizeof(__le32), BTREE_CSUM_XOR)); - - BUG_ON(node_check(v, b, 4096)); + if (node_check(v, b, 4096)) + DMWARN_LIMIT("%s node_check failed", __func__); } static int node_check(struct dm_block_validator *v, diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c index 58b319757b1e5a1c274bd98681d6ce229bbbabbe..6383afb88f319b813d8df26082aafa9df29dd93c 100644 --- a/drivers/md/persistent-data/dm-btree.c +++ b/drivers/md/persistent-data/dm-btree.c @@ -83,14 +83,16 @@ void inc_children(struct dm_transaction_manager *tm, struct btree_node *n, } static int insert_at(size_t value_size, struct btree_node *node, unsigned index, - uint64_t key, void *value) - __dm_written_to_disk(value) + uint64_t key, void *value) + __dm_written_to_disk(value) { uint32_t nr_entries = le32_to_cpu(node->header.nr_entries); + uint32_t max_entries = le32_to_cpu(node->header.max_entries); __le64 key_le = cpu_to_le64(key); if (index > nr_entries || - index >= le32_to_cpu(node->header.max_entries)) { + index >= max_entries || + nr_entries >= max_entries) { DMERR("too many entries in btree node for insert"); __dm_unbless_for_disk(value); return -ENOMEM; @@ -628,39 +630,40 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key) new_parent = shadow_current(s); + pn = dm_block_data(new_parent); + size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ? + sizeof(__le64) : s->info->value_type.size; + + /* create & init the left block */ r = new_block(s->info, &left); if (r < 0) return r; + ln = dm_block_data(left); + nr_left = le32_to_cpu(pn->header.nr_entries) / 2; + + ln->header.flags = pn->header.flags; + ln->header.nr_entries = cpu_to_le32(nr_left); + ln->header.max_entries = pn->header.max_entries; + ln->header.value_size = pn->header.value_size; + memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0])); + memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size); + + /* create & init the right block */ r = new_block(s->info, &right); if (r < 0) { unlock_block(s->info, left); return r; } - pn = dm_block_data(new_parent); - ln = dm_block_data(left); rn = dm_block_data(right); - - nr_left = le32_to_cpu(pn->header.nr_entries) / 2; nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left; - ln->header.flags = pn->header.flags; - ln->header.nr_entries = cpu_to_le32(nr_left); - ln->header.max_entries = pn->header.max_entries; - ln->header.value_size = pn->header.value_size; - rn->header.flags = pn->header.flags; rn->header.nr_entries = cpu_to_le32(nr_right); rn->header.max_entries = pn->header.max_entries; rn->header.value_size = pn->header.value_size; - - memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0])); memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0])); - - size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ? - sizeof(__le64) : s->info->value_type.size; - memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size); memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left), nr_right * size); diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c index 0a3b8ae4a29c6789b400e91a912f90b8ced1a806..5115a27196038dbb618444b1c82565b34fc79a4c 100644 --- a/drivers/md/persistent-data/dm-space-map-common.c +++ b/drivers/md/persistent-data/dm-space-map-common.c @@ -279,6 +279,11 @@ int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result) struct disk_index_entry ie_disk; struct dm_block *blk; + if (b >= ll->nr_blocks) { + DMERR_LIMIT("metadata block out of bounds"); + return -EINVAL; + } + b = do_div(index, ll->entries_per_block); r = ll->load_ie(ll, index, &ie_disk); if (r < 0) @@ -337,6 +342,8 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin, */ begin = do_div(index_begin, ll->entries_per_block); end = do_div(end, ll->entries_per_block); + if (end == 0) + end = ll->entries_per_block; for (i = index_begin; i < index_end; i++, begin = 0) { struct dm_block *blk; @@ -382,6 +389,33 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin, return -ENOSPC; } +int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll, + dm_block_t begin, dm_block_t end, dm_block_t *b) +{ + int r; + uint32_t count; + + do { + r = sm_ll_find_free_block(new_ll, begin, new_ll->nr_blocks, b); + if (r) + break; + + /* double check this block wasn't used in the old transaction */ + if (*b >= old_ll->nr_blocks) + count = 0; + else { + r = sm_ll_lookup(old_ll, *b, &count); + if (r) + break; + + if (count) + begin = *b + 1; + } + } while (count); + + return r; +} + static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b, int (*mutator)(void *context, uint32_t old, uint32_t *new), void *context, enum allocation_event *ev) diff --git a/drivers/md/persistent-data/dm-space-map-common.h b/drivers/md/persistent-data/dm-space-map-common.h index b3078d5eda0c335c2986b84128d3e5bde01e8b8e..87e17909ef5210729ff13b5594d0be82137deb36 100644 --- a/drivers/md/persistent-data/dm-space-map-common.h +++ b/drivers/md/persistent-data/dm-space-map-common.h @@ -33,7 +33,7 @@ struct disk_index_entry { __le64 blocknr; __le32 nr_free; __le32 none_free_before; -} __packed; +} __attribute__ ((packed, aligned(8))); #define MAX_METADATA_BITMAPS 255 @@ -43,7 +43,7 @@ struct disk_metadata_index { __le64 blocknr; struct disk_index_entry index[MAX_METADATA_BITMAPS]; -} __packed; +} __attribute__ ((packed, aligned(8))); struct ll_disk; @@ -86,7 +86,7 @@ struct disk_sm_root { __le64 nr_allocated; __le64 bitmap_root; __le64 ref_count_root; -} __packed; +} __attribute__ ((packed, aligned(8))); #define ENTRIES_PER_BYTE 4 @@ -94,7 +94,7 @@ struct disk_bitmap_header { __le32 csum; __le32 not_used; __le64 blocknr; -} __packed; +} __attribute__ ((packed, aligned(8))); enum allocation_event { SM_NONE, @@ -109,6 +109,8 @@ int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result); int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result); int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin, dm_block_t end, dm_block_t *result); +int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll, + dm_block_t begin, dm_block_t end, dm_block_t *result); int sm_ll_insert(struct ll_disk *ll, dm_block_t b, uint32_t ref_count, enum allocation_event *ev); int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev); int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev); diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c index 32adf6b4a9c7097e12796f8b396f80bfc8867c0a..bf4c5e2ccb6ffc8d8e0fa5ba049429bdb6dc18d0 100644 --- a/drivers/md/persistent-data/dm-space-map-disk.c +++ b/drivers/md/persistent-data/dm-space-map-disk.c @@ -167,8 +167,10 @@ static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b) enum allocation_event ev; struct sm_disk *smd = container_of(sm, struct sm_disk, sm); - /* FIXME: we should loop round a couple of times */ - r = sm_ll_find_free_block(&smd->old_ll, smd->begin, smd->old_ll.nr_blocks, b); + /* + * Any block we allocate has to be free in both the old and current ll. + */ + r = sm_ll_find_common_free_block(&smd->old_ll, &smd->ll, smd->begin, smd->ll.nr_blocks, b); if (r) return r; diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c index aec44924396622729f64e84abda381d24606e14a..9e3c64ec2026fa66b3b1a02adcd5c3b0085f1318 100644 --- a/drivers/md/persistent-data/dm-space-map-metadata.c +++ b/drivers/md/persistent-data/dm-space-map-metadata.c @@ -249,7 +249,7 @@ static int out(struct sm_metadata *smm) } if (smm->recursion_count == 1) - apply_bops(smm); + r = apply_bops(smm); smm->recursion_count--; @@ -448,7 +448,10 @@ static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b) enum allocation_event ev; struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); - r = sm_ll_find_free_block(&smm->old_ll, smm->begin, smm->old_ll.nr_blocks, b); + /* + * Any block we allocate has to be free in both the old and current ll. + */ + r = sm_ll_find_common_free_block(&smm->old_ll, &smm->ll, smm->begin, smm->ll.nr_blocks, b); if (r) return r; diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h index 3e6d1153b7c4b898b5a1355ad8bcc43cac2de4f4..f300aece842a3cd93721ba928649e8f52953bd9a 100644 --- a/drivers/md/persistent-data/dm-space-map.h +++ b/drivers/md/persistent-data/dm-space-map.h @@ -76,7 +76,8 @@ struct dm_space_map { static inline void dm_sm_destroy(struct dm_space_map *sm) { - sm->destroy(sm); + if (sm) + sm->destroy(sm); } static inline int dm_sm_extend(struct dm_space_map *sm, dm_block_t extra_blocks) diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c index abe2c5dd0993b6083f2e9cebe54ebb1d8de541ac..73f57977b2e34b8410e87ce6e6cb5abf89349906 100644 --- a/drivers/md/persistent-data/dm-transaction-manager.c +++ b/drivers/md/persistent-data/dm-transaction-manager.c @@ -197,6 +197,9 @@ EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone); void dm_tm_destroy(struct dm_transaction_manager *tm) { + if (!tm) + return; + if (!tm->is_clone) wipe_shadow_table(tm); diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index ac1cffd2a09b05f5f5217e579c9e87ea80efce84..aec1b9a223263d0b28d15088dd4b6d290be7c931 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -26,6 +26,9 @@ #include "raid0.h" #include "raid5.h" +static int default_layout = 0; +module_param(default_layout, int, 0644); + #define UNSUPPORTED_MDDEV_FLAGS \ ((1L << MD_HAS_JOURNAL) | \ (1L << MD_JOURNAL_CLEAN) | \ @@ -146,6 +149,22 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) } pr_debug("md/raid0:%s: FINAL %d zones\n", mdname(mddev), conf->nr_strip_zones); + + if (conf->nr_strip_zones == 1) { + conf->layout = RAID0_ORIG_LAYOUT; + } else if (mddev->layout == RAID0_ORIG_LAYOUT || + mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) { + conf->layout = mddev->layout; + } else if (default_layout == RAID0_ORIG_LAYOUT || + default_layout == RAID0_ALT_MULTIZONE_LAYOUT) { + conf->layout = default_layout; + } else { + pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n", + mdname(mddev)); + pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n"); + err = -ENOTSUPP; + goto abort; + } /* * now since we have the hard sector sizes, we can make sure * chunk size is a multiple of that sector size @@ -547,6 +566,7 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio) trace_block_bio_remap(bdev_get_queue(rdev->bdev), discard_bio, disk_devt(mddev->gendisk), bio->bi_iter.bi_sector); + bio_clear_flag(bio, BIO_QUEUE_ENTERED); generic_make_request(discard_bio); } bio_endio(bio); @@ -554,17 +574,18 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio) static bool raid0_make_request(struct mddev *mddev, struct bio *bio) { + struct r0conf *conf = mddev->private; struct strip_zone *zone; struct md_rdev *tmp_dev; sector_t bio_sector; sector_t sector; + sector_t orig_sector; unsigned chunk_sects; unsigned sectors; - if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { - md_flush_request(mddev, bio); + if (unlikely(bio->bi_opf & REQ_PREFLUSH) + && md_flush_request(mddev, bio)) return true; - } if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) { raid0_handle_discard(mddev, bio); @@ -591,8 +612,21 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio) bio = split; } + orig_sector = sector; zone = find_zone(mddev->private, §or); - tmp_dev = map_sector(mddev, zone, sector, §or); + switch (conf->layout) { + case RAID0_ORIG_LAYOUT: + tmp_dev = map_sector(mddev, zone, orig_sector, §or); + break; + case RAID0_ALT_MULTIZONE_LAYOUT: + tmp_dev = map_sector(mddev, zone, sector, §or); + break; + default: + WARN(1, "md/raid0:%s: Invalid layout\n", mdname(mddev)); + bio_io_error(bio); + return true; + } + bio_set_dev(bio, tmp_dev->bdev); bio->bi_iter.bi_sector = sector + zone->dev_start + tmp_dev->data_offset; @@ -602,6 +636,7 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio) disk_devt(mddev->gendisk), bio_sector); mddev_check_writesame(mddev, bio); mddev_check_write_zeroes(mddev, bio); + bio_clear_flag(bio, BIO_QUEUE_ENTERED); generic_make_request(bio); return true; } diff --git a/drivers/md/raid0.h b/drivers/md/raid0.h index 540e65d92642d8427fbe3af4d5960834cc967a74..3816e5477db1e743afeaf707c1971f1dbdce1901 100644 --- a/drivers/md/raid0.h +++ b/drivers/md/raid0.h @@ -8,11 +8,25 @@ struct strip_zone { int nb_dev; /* # of devices attached to the zone */ }; +/* Linux 3.14 (20d0189b101) made an unintended change to + * the RAID0 layout for multi-zone arrays (where devices aren't all + * the same size. + * RAID0_ORIG_LAYOUT restores the original layout + * RAID0_ALT_MULTIZONE_LAYOUT uses the altered layout + * The layouts are identical when there is only one zone (all + * devices the same size). + */ + +enum r0layout { + RAID0_ORIG_LAYOUT = 1, + RAID0_ALT_MULTIZONE_LAYOUT = 2, +}; struct r0conf { struct strip_zone *strip_zone; struct md_rdev **devlist; /* lists of rdevs, pointed to * by strip_zone->dev */ int nr_strip_zones; + enum r0layout layout; }; #endif diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 4e990246225eada686e4d77f657c515a13283a12..ee3826db193dafa91027b0636988db7f877a1b67 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -434,19 +434,23 @@ static void raid1_end_write_request(struct bio *bio) /* We never try FailFast to WriteMostly devices */ !test_bit(WriteMostly, &rdev->flags)) { md_error(r1_bio->mddev, rdev); - if (!test_bit(Faulty, &rdev->flags)) - /* This is the only remaining device, - * We need to retry the write without - * FailFast - */ - set_bit(R1BIO_WriteError, &r1_bio->state); - else { - /* Finished with this branch */ - r1_bio->bios[mirror] = NULL; - to_put = bio; - } - } else + } + + /* + * When the device is faulty, it is not necessary to + * handle write error. + * For failfast, this is the only remaining device, + * We need to retry the write without FailFast. + */ + if (!test_bit(Faulty, &rdev->flags)) set_bit(R1BIO_WriteError, &r1_bio->state); + else { + /* Fail the request */ + set_bit(R1BIO_Degraded, &r1_bio->state); + /* Finished with this branch */ + r1_bio->bios[mirror] = NULL; + to_put = bio; + } } else { /* * Set R1BIO_Uptodate in our master bio, so that we @@ -603,7 +607,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect rdev = rcu_dereference(conf->mirrors[disk].rdev); if (r1_bio->bios[disk] == IO_BLOCKED || rdev == NULL - || test_bit(Faulty, &rdev->flags)) + || test_bit(Faulty, &rdev->flags) + || test_bit(WantRemove, &rdev->flags)) continue; if (!test_bit(In_sync, &rdev->flags) && rdev->recovery_offset < this_sector + sectors) @@ -732,7 +737,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect if (best_disk >= 0) { rdev = rcu_dereference(conf->mirrors[best_disk].rdev); - if (!rdev) + if (!rdev || test_bit(Faulty, &rdev->flags) + || test_bit(WantRemove, &rdev->flags)) goto retry; atomic_inc(&rdev->nr_pending); sectors = best_good_sectors; @@ -1312,6 +1318,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, struct raid1_plug_cb *plug = NULL; int first_clone; int max_sectors; + bool write_behind = false; if (mddev_is_clustered(mddev) && md_cluster_ops->area_resyncing(mddev, WRITE, @@ -1364,13 +1371,23 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, max_sectors = r1_bio->sectors; for (i = 0; i < disks; i++) { struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); + + /* + * The write-behind io is only attempted on drives marked as + * write-mostly, which means we could allocate write behind + * bio later. + */ + if (rdev && test_bit(WriteMostly, &rdev->flags)) + write_behind = true; + if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { atomic_inc(&rdev->nr_pending); blocked_rdev = rdev; break; } r1_bio->bios[i] = NULL; - if (!rdev || test_bit(Faulty, &rdev->flags)) { + if (!rdev || test_bit(Faulty, &rdev->flags) + || test_bit(WantRemove, &rdev->flags)) { if (i < conf->raid_disks) set_bit(R1BIO_Degraded, &r1_bio->state); continue; @@ -1437,6 +1454,15 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, goto retry_write; } + /* + * When using a bitmap, we may call alloc_behind_master_bio below. + * alloc_behind_master_bio allocates a copy of the data payload a page + * at a time and thus needs a new bio that can fit the whole payload + * this bio in page sized chunks. + */ + if (write_behind && bitmap) + max_sectors = min_t(int, max_sectors, + BIO_MAX_PAGES * (PAGE_SIZE >> 9)); if (max_sectors < bio_sectors(bio)) { struct bio *split = bio_split(bio, max_sectors, GFP_NOIO, &conf->bio_split); @@ -1535,10 +1561,9 @@ static bool raid1_make_request(struct mddev *mddev, struct bio *bio) { sector_t sectors; - if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { - md_flush_request(mddev, bio); + if (unlikely(bio->bi_opf & REQ_PREFLUSH) + && md_flush_request(mddev, bio)) return true; - } /* * There is a limit to the maximum size, but @@ -1734,6 +1759,7 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) */ if (rdev->saved_raid_disk >= 0 && rdev->saved_raid_disk >= first && + rdev->saved_raid_disk < conf->raid_disks && conf->mirrors[rdev->saved_raid_disk].rdev == NULL) first = last = rdev->saved_raid_disk; @@ -1747,6 +1773,7 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) p->head_position = 0; rdev->raid_disk = mirror; + clear_bit(WantRemove, &rdev->flags); err = 0; /* As all devices are equivalent, we don't need a full recovery * if this was recently any drive of the array @@ -1761,6 +1788,7 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) /* Add this device as a replacement */ clear_bit(In_sync, &rdev->flags); set_bit(Replacement, &rdev->flags); + clear_bit(WantRemove, &rdev->flags); rdev->raid_disk = mirror; err = 0; conf->fullsync = 1; @@ -1781,6 +1809,9 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev) int number = rdev->raid_disk; struct raid1_info *p = conf->mirrors + number; + if (unlikely(number >= conf->raid_disks)) + goto abort; + if (rdev != p->rdev) p = conf->mirrors + conf->raid_disks + number; @@ -1800,16 +1831,26 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev) err = -EBUSY; goto abort; } - p->rdev = NULL; + + /* + * Before set p->rdev = NULL, we set WantRemove bit avoiding + * race between rdev remove and issue bio, which can cause + * NULL pointer deference of rdev by conf->mirrors[i].rdev. + */ + set_bit(WantRemove, &rdev->flags); + if (!test_bit(RemoveSynchronized, &rdev->flags)) { synchronize_rcu(); if (atomic_read(&rdev->nr_pending)) { /* lost the race, try later */ err = -EBUSY; - p->rdev = rdev; + clear_bit(WantRemove, &rdev->flags); goto abort; } } + + p->rdev = NULL; + if (conf->mirrors[conf->raid_disks + number].rdev) { /* We just removed a device that is being replaced. * Move down the replacement. We drain all IO before @@ -1862,6 +1903,20 @@ static void end_sync_read(struct bio *bio) reschedule_retry(r1_bio); } +static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio) +{ + sector_t sync_blocks = 0; + sector_t s = r1_bio->sector; + long sectors_to_go = r1_bio->sectors; + + /* make sure these bits don't get cleared. */ + do { + md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1); + s += sync_blocks; + sectors_to_go -= sync_blocks; + } while (sectors_to_go > 0); +} + static void end_sync_write(struct bio *bio) { int uptodate = !bio->bi_status; @@ -1873,15 +1928,7 @@ static void end_sync_write(struct bio *bio) struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev; if (!uptodate) { - sector_t sync_blocks = 0; - sector_t s = r1_bio->sector; - long sectors_to_go = r1_bio->sectors; - /* make sure these bits doesn't get cleared. */ - do { - md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1); - s += sync_blocks; - sectors_to_go -= sync_blocks; - } while (sectors_to_go > 0); + abort_sync_write(mddev, r1_bio); set_bit(WriteErrorSeen, &rdev->flags); if (!test_and_set_bit(WantReplacement, &rdev->flags)) set_bit(MD_RECOVERY_NEEDED, & @@ -2171,8 +2218,10 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) (i == r1_bio->read_disk || !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) continue; - if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) + if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) { + abort_sync_write(mddev, r1_bio); continue; + } bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); if (test_bit(FailFast, &conf->mirrors[i].rdev->flags)) @@ -2698,7 +2747,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, rdev = rcu_dereference(conf->mirrors[i].rdev); if (rdev == NULL || - test_bit(Faulty, &rdev->flags)) { + test_bit(Faulty, &rdev->flags) || + test_bit(WantRemove, &rdev->flags)) { if (i < conf->raid_disks) still_degraded = 1; } else if (!test_bit(In_sync, &rdev->flags)) { @@ -2746,7 +2796,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, write_targets++; } } - if (bio->bi_end_io) { + if (rdev && bio->bi_end_io) { atomic_inc(&rdev->nr_pending); bio->bi_iter.bi_sector = sector_nr + rdev->data_offset; bio_set_dev(bio, rdev->bdev); @@ -3040,7 +3090,6 @@ static struct r1conf *setup_conf(struct mddev *mddev) return ERR_PTR(err); } -static void raid1_free(struct mddev *mddev, void *priv); static int raid1_run(struct mddev *mddev) { struct r1conf *conf; @@ -3094,6 +3143,13 @@ static int raid1_run(struct mddev *mddev) !test_bit(In_sync, &conf->mirrors[i].rdev->flags) || test_bit(Faulty, &conf->mirrors[i].rdev->flags)) mddev->degraded++; + /* + * RAID1 needs at least one disk in active + */ + if (conf->raid_disks - mddev->degraded < 1) { + md_unregister_thread(&conf->thread); + return -EINVAL; + } if (conf->raid_disks - mddev->degraded == 1) mddev->recovery_cp = MaxSector; @@ -3125,10 +3181,8 @@ static int raid1_run(struct mddev *mddev) } ret = md_integrity_register(mddev); - if (ret) { + if (ret) md_unregister_thread(&mddev->thread); - raid1_free(mddev, conf); - } return ret; } diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index d6f7978b4449e92aba522035941ff51137e2ce38..f8fea9593955e58522a1e70155afdf5b27bf74a6 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -112,6 +112,21 @@ static void end_reshape(struct r10conf *conf); #include "raid1-10.c" +#define NULL_CMD +#define cmd_before(conf, cmd) \ + do { \ + write_sequnlock_irq(&(conf)->resync_lock); \ + cmd; \ + } while (0) +#define cmd_after(conf) write_seqlock_irq(&(conf)->resync_lock) + +#define wait_event_barrier_cmd(conf, cond, cmd) \ + wait_event_cmd((conf)->wait_barrier, cond, cmd_before(conf, cmd), \ + cmd_after(conf)) + +#define wait_event_barrier(conf, cond) \ + wait_event_barrier_cmd(conf, cond, NULL_CMD) + /* * for resync bio, r10bio pointer can be retrieved from the per-bio * 'struct resync_pages'. @@ -229,7 +244,7 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data) out_free_pages: while (--j >= 0) - resync_free_pages(&rps[j * 2]); + resync_free_pages(&rps[j]); j = 0; out_free_bio: @@ -305,6 +320,12 @@ static void put_buf(struct r10bio *r10_bio) lower_barrier(conf); } +static void wake_up_barrier(struct r10conf *conf) +{ + if (wq_has_sleeper(&conf->wait_barrier)) + wake_up(&conf->wait_barrier); +} + static void reschedule_retry(struct r10bio *r10_bio) { unsigned long flags; @@ -965,35 +986,54 @@ static void flush_pending_writes(struct r10conf *conf) static void raise_barrier(struct r10conf *conf, int force) { BUG_ON(force && !conf->barrier); - spin_lock_irq(&conf->resync_lock); + write_seqlock_irq(&conf->resync_lock); /* Wait until no block IO is waiting (unless 'force') */ - wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, - conf->resync_lock); + wait_event_barrier(conf, force || !conf->nr_waiting); /* block any new IO from starting */ - conf->barrier++; + WRITE_ONCE(conf->barrier, conf->barrier + 1); /* Now wait for all pending IO to complete */ - wait_event_lock_irq(conf->wait_barrier, - !atomic_read(&conf->nr_pending) && conf->barrier < RESYNC_DEPTH, - conf->resync_lock); + wait_event_barrier(conf, !atomic_read(&conf->nr_pending) && + conf->barrier < RESYNC_DEPTH); - spin_unlock_irq(&conf->resync_lock); + write_sequnlock_irq(&conf->resync_lock); } static void lower_barrier(struct r10conf *conf) { unsigned long flags; - spin_lock_irqsave(&conf->resync_lock, flags); - conf->barrier--; - spin_unlock_irqrestore(&conf->resync_lock, flags); + + write_seqlock_irqsave(&conf->resync_lock, flags); + WRITE_ONCE(conf->barrier, conf->barrier - 1); + write_sequnlock_irqrestore(&conf->resync_lock, flags); wake_up(&conf->wait_barrier); } +static bool wait_barrier_nolock(struct r10conf *conf) +{ + unsigned int seq = read_seqbegin(&conf->resync_lock); + + if (READ_ONCE(conf->barrier)) + return false; + + atomic_inc(&conf->nr_pending); + if (!read_seqretry(&conf->resync_lock, seq)) + return true; + + if (atomic_dec_and_test(&conf->nr_pending)) + wake_up_barrier(conf); + + return false; +} + static void wait_barrier(struct r10conf *conf) { - spin_lock_irq(&conf->resync_lock); + if (wait_barrier_nolock(conf)) + return; + + write_seqlock_irq(&conf->resync_lock); if (conf->barrier) { conf->nr_waiting++; /* Wait for the barrier to drop. @@ -1006,26 +1046,25 @@ static void wait_barrier(struct r10conf *conf) * count down. */ raid10_log(conf->mddev, "wait barrier"); - wait_event_lock_irq(conf->wait_barrier, + wait_event_barrier(conf, !conf->barrier || (atomic_read(&conf->nr_pending) && current->bio_list && (!bio_list_empty(¤t->bio_list[0]) || - !bio_list_empty(¤t->bio_list[1]))), - conf->resync_lock); + !bio_list_empty(¤t->bio_list[1])))); conf->nr_waiting--; if (!conf->nr_waiting) wake_up(&conf->wait_barrier); } atomic_inc(&conf->nr_pending); - spin_unlock_irq(&conf->resync_lock); + write_sequnlock_irq(&conf->resync_lock); } static void allow_barrier(struct r10conf *conf) { if ((atomic_dec_and_test(&conf->nr_pending)) || (conf->array_freeze_pending)) - wake_up(&conf->wait_barrier); + wake_up_barrier(conf); } static void freeze_array(struct r10conf *conf, int extra) @@ -1042,27 +1081,26 @@ static void freeze_array(struct r10conf *conf, int extra) * must match the number of pending IOs (nr_pending) before * we continue. */ - spin_lock_irq(&conf->resync_lock); + write_seqlock_irq(&conf->resync_lock); conf->array_freeze_pending++; - conf->barrier++; + WRITE_ONCE(conf->barrier, conf->barrier + 1); conf->nr_waiting++; - wait_event_lock_irq_cmd(conf->wait_barrier, - atomic_read(&conf->nr_pending) == conf->nr_queued+extra, - conf->resync_lock, - flush_pending_writes(conf)); + wait_event_barrier_cmd(conf, + atomic_read(&conf->nr_pending) == conf->nr_queued+extra, + flush_pending_writes(conf)); conf->array_freeze_pending--; - spin_unlock_irq(&conf->resync_lock); + write_sequnlock_irq(&conf->resync_lock); } static void unfreeze_array(struct r10conf *conf) { /* reverse the effect of the freeze */ - spin_lock_irq(&conf->resync_lock); - conf->barrier--; + write_seqlock_irq(&conf->resync_lock); + WRITE_ONCE(conf->barrier, conf->barrier - 1); conf->nr_waiting--; wake_up(&conf->wait_barrier); - spin_unlock_irq(&conf->resync_lock); + write_sequnlock_irq(&conf->resync_lock); } static sector_t choose_data_offset(struct r10bio *r10_bio, @@ -1208,7 +1246,9 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, struct bio *split = bio_split(bio, max_sectors, gfp, &conf->bio_split); bio_chain(split, bio); + allow_barrier(conf); generic_make_request(bio); + wait_barrier(conf); bio = split; r10_bio->master_bio = bio; r10_bio->sectors = max_sectors; @@ -1513,7 +1553,9 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, struct bio *split = bio_split(bio, r10_bio->sectors, GFP_NOIO, &conf->bio_split); bio_chain(split, bio); + allow_barrier(conf); generic_make_request(bio); + wait_barrier(conf); bio = split; r10_bio->master_bio = bio; } @@ -1558,10 +1600,9 @@ static bool raid10_make_request(struct mddev *mddev, struct bio *bio) int chunk_sects = chunk_mask + 1; int sectors = bio_sectors(bio); - if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { - md_flush_request(mddev, bio); + if (unlikely(bio->bi_opf & REQ_PREFLUSH) + && md_flush_request(mddev, bio)) return true; - } if (!md_write_start(mddev, bio)) return false; @@ -1581,7 +1622,7 @@ static bool raid10_make_request(struct mddev *mddev, struct bio *bio) __make_request(mddev, bio, sectors); /* In case raid10d snuck in to freeze_array */ - wake_up(&conf->wait_barrier); + wake_up_barrier(conf); return true; } @@ -1808,6 +1849,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) first = last = rdev->raid_disk; if (rdev->saved_raid_disk >= first && + rdev->saved_raid_disk < conf->geo.raid_disks && conf->mirrors[rdev->saved_raid_disk].rdev == NULL) mirror = rdev->saved_raid_disk; else @@ -3079,6 +3121,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, sector_t sect; int must_sync; int any_working; + int need_recover = 0; + int need_replace = 0; struct raid10_info *mirror = &conf->mirrors[i]; struct md_rdev *mrdev, *mreplace; @@ -3086,11 +3130,15 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, mrdev = rcu_dereference(mirror->rdev); mreplace = rcu_dereference(mirror->replacement); - if ((mrdev == NULL || - test_bit(Faulty, &mrdev->flags) || - test_bit(In_sync, &mrdev->flags)) && - (mreplace == NULL || - test_bit(Faulty, &mreplace->flags))) { + if (mrdev != NULL && + !test_bit(Faulty, &mrdev->flags) && + !test_bit(In_sync, &mrdev->flags)) + need_recover = 1; + if (mreplace != NULL && + !test_bit(Faulty, &mreplace->flags)) + need_replace = 1; + + if (!need_recover && !need_replace) { rcu_read_unlock(); continue; } @@ -3213,7 +3261,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, r10_bio->devs[1].devnum = i; r10_bio->devs[1].addr = to_addr; - if (!test_bit(In_sync, &mrdev->flags)) { + if (need_recover) { bio = r10_bio->devs[1].bio; bio->bi_next = biolist; biolist = bio; @@ -3230,16 +3278,11 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, bio = r10_bio->devs[1].repl_bio; if (bio) bio->bi_end_io = NULL; - /* Note: if mreplace != NULL, then bio + /* Note: if need_replace, then bio * cannot be NULL as r10buf_pool_alloc will * have allocated it. - * So the second test here is pointless. - * But it keeps semantic-checkers happy, and - * this comment keeps human reviewers - * happy. */ - if (mreplace == NULL || bio == NULL || - test_bit(Faulty, &mreplace->flags)) + if (!need_replace) break; bio->bi_next = biolist; biolist = bio; @@ -3729,7 +3772,7 @@ static struct r10conf *setup_conf(struct mddev *mddev) INIT_LIST_HEAD(&conf->retry_list); INIT_LIST_HEAD(&conf->bio_end_io_list); - spin_lock_init(&conf->resync_lock); + seqlock_init(&conf->resync_lock); init_waitqueue_head(&conf->wait_barrier); atomic_set(&conf->nr_pending, 0); @@ -3954,6 +3997,8 @@ static int raid10_run(struct mddev *mddev) set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); mddev->sync_thread = md_register_thread(md_do_sync, mddev, "reshape"); + if (!mddev->sync_thread) + goto out_free_conf; } return 0; @@ -4067,7 +4112,7 @@ static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs) rdev->new_raid_disk = rdev->raid_disk * 2; rdev->sectors = size; } - conf->barrier = 1; + WRITE_ONCE(conf->barrier, 1); } return conf; @@ -4450,7 +4495,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, sector_nr = conf->reshape_progress; if (sector_nr) { mddev->curr_resync_completed = sector_nr; - sysfs_notify(&mddev->kobj, NULL, "sync_completed"); + sysfs_notify_dirent_safe(mddev->sysfs_completed); *skipped = 1; return sector_nr; } @@ -4625,7 +4670,6 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, atomic_inc(&r10_bio->remaining); read_bio->bi_next = NULL; generic_make_request(read_bio); - sector_nr += nr_sectors; sectors_done += nr_sectors; if (sector_nr <= last) goto read_more; diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h index d3eaaf3eb1bcf3004878cc5c396c355faebb3c65..e368a92f37fd5c66effef754886be3db533400d1 100644 --- a/drivers/md/raid10.h +++ b/drivers/md/raid10.h @@ -77,7 +77,7 @@ struct r10conf { struct bio_list pending_bio_list; int pending_count; - spinlock_t resync_lock; + seqlock_t resync_lock; atomic_t nr_pending; int nr_waiting; int nr_queued; diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index e6e925add7005786861ef1cb7f22d83d2d89df65..6518b012756fed3e878edfc6fdcb738f4ae595f2 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -1935,12 +1935,14 @@ r5l_recovery_replay_one_stripe(struct r5conf *conf, } static struct stripe_head * -r5c_recovery_alloc_stripe(struct r5conf *conf, - sector_t stripe_sect) +r5c_recovery_alloc_stripe( + struct r5conf *conf, + sector_t stripe_sect, + int noblock) { struct stripe_head *sh; - sh = raid5_get_active_stripe(conf, stripe_sect, 0, 1, 0); + sh = raid5_get_active_stripe(conf, stripe_sect, 0, noblock, 0); if (!sh) return NULL; /* no more stripe available */ @@ -2150,7 +2152,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log, stripe_sect); if (!sh) { - sh = r5c_recovery_alloc_stripe(conf, stripe_sect); + sh = r5c_recovery_alloc_stripe(conf, stripe_sect, 1); /* * cannot get stripe from raid5_get_active_stripe * try replay some stripes @@ -2159,20 +2161,29 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log, r5c_recovery_replay_stripes( cached_stripe_list, ctx); sh = r5c_recovery_alloc_stripe( - conf, stripe_sect); + conf, stripe_sect, 1); } if (!sh) { + int new_size = conf->min_nr_stripes * 2; pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n", mdname(mddev), - conf->min_nr_stripes * 2); - raid5_set_cache_size(mddev, - conf->min_nr_stripes * 2); - sh = r5c_recovery_alloc_stripe(conf, - stripe_sect); + new_size); + ret = raid5_set_cache_size(mddev, new_size); + if (conf->min_nr_stripes <= new_size / 2) { + pr_err("md/raid:%s: Cannot increase cache size, ret=%d, new_size=%d, min_nr_stripes=%d, max_nr_stripes=%d\n", + mdname(mddev), + ret, + new_size, + conf->min_nr_stripes, + conf->max_nr_stripes); + return -ENOMEM; + } + sh = r5c_recovery_alloc_stripe( + conf, stripe_sect, 0); } if (!sh) { pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n", - mdname(mddev)); + mdname(mddev)); return -ENOMEM; } list_add_tail(&sh->lru, cached_stripe_list); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index e4e98f47865def0449979058c6c7e51228b9b42b..b2b35cdabac58ae849702c4bb9e5a6a09237c5c9 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2192,7 +2192,7 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) atomic_inc(&conf->active_stripes); raid5_release_stripe(sh); - conf->max_nr_stripes++; + WRITE_ONCE(conf->max_nr_stripes, conf->max_nr_stripes + 1); return 1; } @@ -2458,7 +2458,7 @@ static int drop_one_stripe(struct r5conf *conf) shrink_buffers(sh); free_stripe(conf->slab_cache, sh); atomic_dec(&conf->active_stripes); - conf->max_nr_stripes--; + WRITE_ONCE(conf->max_nr_stripes, conf->max_nr_stripes - 1); return 1; } @@ -2540,7 +2540,8 @@ static void raid5_end_read_request(struct bio * bi) int set_bad = 0; clear_bit(R5_UPTODATE, &sh->dev[i].flags); - atomic_inc(&rdev->read_errors); + if (!(bi->bi_status == BLK_STS_PROTECTION)) + atomic_inc(&rdev->read_errors); if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) pr_warn_ratelimited( "md/raid:%s: read error on replacement device (sector %llu on %s).\n", @@ -2572,7 +2573,9 @@ static void raid5_end_read_request(struct bio * bi) && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) retry = 1; if (retry) - if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) { + if (sh->qd_idx >= 0 && sh->pd_idx == i) + set_bit(R5_ReadError, &sh->dev[i].flags); + else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) { set_bit(R5_ReadError, &sh->dev[i].flags); clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); } else @@ -4185,7 +4188,7 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, /* now write out any block on a failed drive, * or P or Q if they were recomputed */ - BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */ + dev = NULL; if (s->failed == 2) { dev = &sh->dev[s->failed_num[1]]; s->locked++; @@ -4210,6 +4213,14 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, set_bit(R5_LOCKED, &dev->flags); set_bit(R5_Wantwrite, &dev->flags); } + if (WARN_ONCE(dev && !test_bit(R5_UPTODATE, &dev->flags), + "%s: disk%td not up to date\n", + mdname(conf->mddev), + dev - (struct r5dev *) &sh->dev)) { + clear_bit(R5_LOCKED, &dev->flags); + clear_bit(R5_Wantwrite, &dev->flags); + s->locked--; + } clear_bit(STRIPE_DEGRADED, &sh->state); set_bit(STRIPE_INSYNC, &sh->state); @@ -5579,8 +5590,8 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi) if (ret == 0) return true; if (ret == -ENODEV) { - md_flush_request(mddev, bi); - return true; + if (md_flush_request(mddev, bi)) + return true; } /* ret == -EAGAIN, fallback */ /* @@ -5713,7 +5724,8 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi) do_flush = false; } - set_bit(STRIPE_HANDLE, &sh->state); + if (!sh->batch_head || sh == sh->batch_head) + set_bit(STRIPE_HANDLE, &sh->state); clear_bit(STRIPE_DELAYED, &sh->state); if ((!sh->batch_head || sh == sh->batch_head) && (bi->bi_opf & REQ_SYNC) && @@ -5778,7 +5790,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk sector_div(sector_nr, new_data_disks); if (sector_nr) { mddev->curr_resync_completed = sector_nr; - sysfs_notify(&mddev->kobj, NULL, "sync_completed"); + sysfs_notify_dirent_safe(mddev->sysfs_completed); *skipped = 1; retn = sector_nr; goto finish; @@ -5805,7 +5817,9 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk safepos = conf->reshape_safe; sector_div(safepos, data_disks); if (mddev->reshape_backwards) { - BUG_ON(writepos < reshape_sectors); + if (WARN_ON(writepos < reshape_sectors)) + return MaxSector; + writepos -= reshape_sectors; readpos += reshape_sectors; safepos += reshape_sectors; @@ -5823,14 +5837,18 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk * to set 'stripe_addr' which is where we will write to. */ if (mddev->reshape_backwards) { - BUG_ON(conf->reshape_progress == 0); + if (WARN_ON(conf->reshape_progress == 0)) + return MaxSector; + stripe_addr = writepos; - BUG_ON((mddev->dev_sectors & - ~((sector_t)reshape_sectors - 1)) - - reshape_sectors - stripe_addr - != sector_nr); + if (WARN_ON((mddev->dev_sectors & + ~((sector_t)reshape_sectors - 1)) - + reshape_sectors - stripe_addr != sector_nr)) + return MaxSector; } else { - BUG_ON(writepos != sector_nr + reshape_sectors); + if (WARN_ON(writepos != sector_nr + reshape_sectors)) + return MaxSector; + stripe_addr = sector_nr; } @@ -5892,7 +5910,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk conf->reshape_safe = mddev->reshape_position; spin_unlock_irq(&conf->device_lock); wake_up(&conf->wait_for_overlap); - sysfs_notify(&mddev->kobj, NULL, "sync_completed"); + sysfs_notify_dirent_safe(mddev->sysfs_completed); } INIT_LIST_HEAD(&stripes); @@ -5999,7 +6017,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk conf->reshape_safe = mddev->reshape_position; spin_unlock_irq(&conf->device_lock); wake_up(&conf->wait_for_overlap); - sysfs_notify(&mddev->kobj, NULL, "sync_completed"); + sysfs_notify_dirent_safe(mddev->sysfs_completed); } ret: return retn; @@ -6357,12 +6375,13 @@ raid5_show_stripe_cache_size(struct mddev *mddev, char *page) int raid5_set_cache_size(struct mddev *mddev, int size) { + int result = 0; struct r5conf *conf = mddev->private; if (size <= 16 || size > 32768) return -EINVAL; - conf->min_nr_stripes = size; + WRITE_ONCE(conf->min_nr_stripes, size); mutex_lock(&conf->cache_size_mutex); while (size < conf->max_nr_stripes && drop_one_stripe(conf)) @@ -6373,11 +6392,14 @@ raid5_set_cache_size(struct mddev *mddev, int size) mutex_lock(&conf->cache_size_mutex); while (size > conf->max_nr_stripes) - if (!grow_one_stripe(conf, GFP_KERNEL)) + if (!grow_one_stripe(conf, GFP_KERNEL)) { + WRITE_ONCE(conf->min_nr_stripes, conf->max_nr_stripes); + result = -ENOMEM; break; + } mutex_unlock(&conf->cache_size_mutex); - return 0; + return result; } EXPORT_SYMBOL(raid5_set_cache_size); @@ -6845,11 +6867,13 @@ static unsigned long raid5_cache_count(struct shrinker *shrink, struct shrink_control *sc) { struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); + int max_stripes = READ_ONCE(conf->max_nr_stripes); + int min_stripes = READ_ONCE(conf->min_nr_stripes); - if (conf->max_nr_stripes < conf->min_nr_stripes) + if (max_stripes < min_stripes) /* unlikely, but not impossible */ return 0; - return conf->max_nr_stripes - conf->min_nr_stripes; + return max_stripes - min_stripes; } static struct r5conf *setup_conf(struct mddev *mddev) @@ -7386,6 +7410,8 @@ static int raid5_run(struct mddev *mddev) set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); mddev->sync_thread = md_register_thread(md_do_sync, mddev, "reshape"); + if (!mddev->sync_thread) + goto abort; } /* Ok, everything is just fine now */ @@ -7656,7 +7682,7 @@ static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev) static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) { struct r5conf *conf = mddev->private; - int err = -EEXIST; + int ret, err = -EEXIST; int disk; struct disk_info *p; int first = 0; @@ -7671,7 +7697,14 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) * The array is in readonly mode if journal is missing, so no * write requests running. We should be safe */ - log_init(conf, rdev, false); + ret = log_init(conf, rdev, false); + if (ret) + return ret; + + ret = r5l_start(conf->log); + if (ret) + return ret; + return 0; } if (mddev->recovery_disabled == conf->recovery_disabled) diff --git a/drivers/media/cec/Makefile b/drivers/media/cec/Makefile index 29a2ab9e77c5dffd8fadc034450f83bdbe9e2aad..ad8677d8c89679bb6b0eda8357a136dfb920fbf3 100644 --- a/drivers/media/cec/Makefile +++ b/drivers/media/cec/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -cec-objs := cec-core.o cec-adap.o cec-api.o cec-edid.o +cec-objs := cec-core.o cec-adap.o cec-api.o ifeq ($(CONFIG_CEC_NOTIFIER),y) cec-objs += cec-notifier.o diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c index 030b2602faf0c322ad0483cd5f0f29e565b06497..a9b8c6636e50ce1432c5333cf9da0c7239c44c21 100644 --- a/drivers/media/cec/cec-adap.c +++ b/drivers/media/cec/cec-adap.c @@ -62,6 +62,19 @@ static unsigned int cec_log_addr2dev(const struct cec_adapter *adap, u8 log_addr return adap->log_addrs.primary_device_type[i < 0 ? 0 : i]; } +u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size, + unsigned int *offset) +{ + unsigned int loc = cec_get_edid_spa_location(edid, size); + + if (offset) + *offset = loc; + if (loc == 0) + return CEC_PHYS_ADDR_INVALID; + return (edid[loc] << 8) | edid[loc + 1]; +} +EXPORT_SYMBOL_GPL(cec_get_edid_phys_addr); + /* * Queue a new event for this filehandle. If ts == 0, then set it * to the current time. @@ -341,7 +354,7 @@ static void cec_data_completed(struct cec_data *data) * * This function is called with adap->lock held. */ -static void cec_data_cancel(struct cec_data *data) +static void cec_data_cancel(struct cec_data *data, u8 tx_status) { /* * It's either the current transmit, or it is a pending @@ -352,17 +365,16 @@ static void cec_data_cancel(struct cec_data *data) } else { list_del_init(&data->list); if (!(data->msg.tx_status & CEC_TX_STATUS_OK)) - data->adap->transmit_queue_sz--; + if (!WARN_ON(!data->adap->transmit_queue_sz)) + data->adap->transmit_queue_sz--; } if (data->msg.tx_status & CEC_TX_STATUS_OK) { - /* Mark the canceled RX as a timeout */ data->msg.rx_ts = ktime_get_ns(); - data->msg.rx_status = CEC_RX_STATUS_TIMEOUT; + data->msg.rx_status = CEC_RX_STATUS_ABORTED; } else { - /* Mark the canceled TX as an error */ data->msg.tx_ts = ktime_get_ns(); - data->msg.tx_status |= CEC_TX_STATUS_ERROR | + data->msg.tx_status |= tx_status | CEC_TX_STATUS_MAX_RETRIES; data->msg.tx_error_cnt++; data->attempts = 0; @@ -390,15 +402,15 @@ static void cec_flush(struct cec_adapter *adap) while (!list_empty(&adap->transmit_queue)) { data = list_first_entry(&adap->transmit_queue, struct cec_data, list); - cec_data_cancel(data); + cec_data_cancel(data, CEC_TX_STATUS_ABORTED); } if (adap->transmitting) - cec_data_cancel(adap->transmitting); + cec_data_cancel(adap->transmitting, CEC_TX_STATUS_ABORTED); /* Cancel the pending timeout work. */ list_for_each_entry_safe(data, n, &adap->wait_queue, list) { if (cancel_delayed_work(&data->work)) - cec_data_cancel(data); + cec_data_cancel(data, CEC_TX_STATUS_OK); /* * If cancel_delayed_work returned false, then * the cec_wait_timeout function is running, @@ -406,6 +418,14 @@ static void cec_flush(struct cec_adapter *adap) * need to do anything special in that case. */ } + /* + * If something went wrong and this counter isn't what it should + * be, then this will reset it back to 0. Warn if it is not 0, + * since it indicates a bug, either in this framework or in a + * CEC driver. + */ + if (WARN_ON(adap->transmit_queue_sz)) + adap->transmit_queue_sz = 0; } /* @@ -430,7 +450,7 @@ int cec_thread_func(void *_adap) bool timeout = false; u8 attempts; - if (adap->transmitting) { + if (adap->transmit_in_progress) { int err; /* @@ -444,7 +464,7 @@ int cec_thread_func(void *_adap) (adap->needs_hpd && (!adap->is_configured && !adap->is_configuring)) || kthread_should_stop() || - (!adap->transmitting && + (!adap->transmit_in_progress && !list_empty(&adap->transmit_queue)), msecs_to_jiffies(CEC_XFER_TIMEOUT_MS)); timeout = err == 0; @@ -452,7 +472,7 @@ int cec_thread_func(void *_adap) /* Otherwise we just wait for something to happen. */ wait_event_interruptible(adap->kthread_waitq, kthread_should_stop() || - (!adap->transmitting && + (!adap->transmit_in_progress && !list_empty(&adap->transmit_queue))); } @@ -465,7 +485,7 @@ int cec_thread_func(void *_adap) goto unlock; } - if (adap->transmitting && timeout) { + if (adap->transmit_in_progress && timeout) { /* * If we timeout, then log that. Normally this does * not happen and it is an indication of a faulty CEC @@ -474,12 +494,18 @@ int cec_thread_func(void *_adap) * so much traffic on the bus that the adapter was * unable to transmit for CEC_XFER_TIMEOUT_MS (2.1s). */ - dprintk(1, "%s: message %*ph timed out\n", __func__, - adap->transmitting->msg.len, - adap->transmitting->msg.msg); + if (adap->transmitting) { + pr_warn("cec-%s: message %*ph timed out\n", adap->name, + adap->transmitting->msg.len, + adap->transmitting->msg.msg); + /* Just give up on this. */ + cec_data_cancel(adap->transmitting, + CEC_TX_STATUS_TIMEOUT); + } else { + pr_warn("cec-%s: transmit timed out\n", adap->name); + } + adap->transmit_in_progress = false; adap->tx_timeouts++; - /* Just give up on this. */ - cec_data_cancel(adap->transmitting); goto unlock; } @@ -487,14 +513,15 @@ int cec_thread_func(void *_adap) * If we are still transmitting, or there is nothing new to * transmit, then just continue waiting. */ - if (adap->transmitting || list_empty(&adap->transmit_queue)) + if (adap->transmit_in_progress || list_empty(&adap->transmit_queue)) goto unlock; /* Get a new message to transmit */ data = list_first_entry(&adap->transmit_queue, struct cec_data, list); list_del_init(&data->list); - adap->transmit_queue_sz--; + if (!WARN_ON(!data->adap->transmit_queue_sz)) + adap->transmit_queue_sz--; /* Make this the current transmitting message */ adap->transmitting = data; @@ -514,9 +541,11 @@ int cec_thread_func(void *_adap) if (data->attempts) { /* should be >= 3 data bit periods for a retry */ signal_free_time = CEC_SIGNAL_FREE_TIME_RETRY; - } else if (data->new_initiator) { + } else if (adap->last_initiator != + cec_msg_initiator(&data->msg)) { /* should be >= 5 data bit periods for new initiator */ signal_free_time = CEC_SIGNAL_FREE_TIME_NEW_INITIATOR; + adap->last_initiator = cec_msg_initiator(&data->msg); } else { /* * should be >= 7 data bit periods for sending another @@ -530,7 +559,9 @@ int cec_thread_func(void *_adap) /* Tell the adapter to transmit, cancel on error */ if (adap->ops->adap_transmit(adap, data->attempts, signal_free_time, &data->msg)) - cec_data_cancel(data); + cec_data_cancel(data, CEC_TX_STATUS_ABORTED); + else + adap->transmit_in_progress = true; unlock: mutex_unlock(&adap->lock); @@ -561,14 +592,17 @@ void cec_transmit_done_ts(struct cec_adapter *adap, u8 status, data = adap->transmitting; if (!data) { /* - * This can happen if a transmit was issued and the cable is + * This might happen if a transmit was issued and the cable is * unplugged while the transmit is ongoing. Ignore this * transmit in that case. */ - dprintk(1, "%s was called without an ongoing transmit!\n", - __func__); - goto unlock; + if (!adap->transmit_in_progress) + dprintk(1, "%s was called without an ongoing transmit!\n", + __func__); + adap->transmit_in_progress = false; + goto wake_thread; } + adap->transmit_in_progress = false; msg = &data->msg; @@ -634,7 +668,6 @@ void cec_transmit_done_ts(struct cec_adapter *adap, u8 status, * for transmitting or to retry the current message. */ wake_up_interruptible(&adap->kthread_waitq); -unlock: mutex_unlock(&adap->lock); } EXPORT_SYMBOL_GPL(cec_transmit_done_ts); @@ -701,9 +734,6 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg, struct cec_fh *fh, bool block) { struct cec_data *data; - u8 last_initiator = 0xff; - unsigned int timeout; - int res = 0; msg->rx_ts = 0; msg->tx_ts = 0; @@ -813,23 +843,6 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg, data->adap = adap; data->blocking = block; - /* - * Determine if this message follows a message from the same - * initiator. Needed to determine the free signal time later on. - */ - if (msg->len > 1) { - if (!(list_empty(&adap->transmit_queue))) { - const struct cec_data *last; - - last = list_last_entry(&adap->transmit_queue, - const struct cec_data, list); - last_initiator = cec_msg_initiator(&last->msg); - } else if (adap->transmitting) { - last_initiator = - cec_msg_initiator(&adap->transmitting->msg); - } - } - data->new_initiator = last_initiator != cec_msg_initiator(msg); init_completion(&data->c); INIT_DELAYED_WORK(&data->work, cec_wait_timeout); @@ -845,48 +858,22 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg, if (!block) return 0; - /* - * If we don't get a completion before this time something is really - * wrong and we time out. - */ - timeout = CEC_XFER_TIMEOUT_MS; - /* Add the requested timeout if we have to wait for a reply as well */ - if (msg->timeout) - timeout += msg->timeout; - /* * Release the lock and wait, retake the lock afterwards. */ mutex_unlock(&adap->lock); - res = wait_for_completion_killable_timeout(&data->c, - msecs_to_jiffies(timeout)); + wait_for_completion_killable(&data->c); + cancel_delayed_work_sync(&data->work); mutex_lock(&adap->lock); - if (data->completed) { - /* The transmit completed (possibly with an error) */ - *msg = data->msg; - kfree(data); - return 0; - } - /* - * The wait for completion timed out or was interrupted, so mark this - * as non-blocking and disconnect from the filehandle since it is - * still 'in flight'. When it finally completes it will just drop the - * result silently. - */ - data->blocking = false; - if (data->fh) - list_del(&data->xfer_list); - data->fh = NULL; + /* Cancel the transmit if it was interrupted */ + if (!data->completed) + cec_data_cancel(data, CEC_TX_STATUS_ABORTED); - if (res == 0) { /* timed out */ - /* Check if the reply or the transmit failed */ - if (msg->timeout && (msg->tx_status & CEC_TX_STATUS_OK)) - msg->rx_status = CEC_RX_STATUS_TIMEOUT; - else - msg->tx_status = CEC_TX_STATUS_MAX_RETRIES; - } - return res > 0 ? 0 : res; + /* The transmit completed (possibly with an error) */ + *msg = data->msg; + kfree(data); + return 0; } /* Helper function to be used by drivers and this framework. */ @@ -1044,6 +1031,8 @@ void cec_received_msg_ts(struct cec_adapter *adap, mutex_lock(&adap->lock); dprintk(2, "%s: %*ph\n", __func__, msg->len, msg->msg); + adap->last_initiator = 0xff; + /* Check if this message was for us (directed or broadcast). */ if (!cec_msg_is_broadcast(msg)) valid_la = cec_has_log_addr(adap, msg_dest); @@ -1062,11 +1051,11 @@ void cec_received_msg_ts(struct cec_adapter *adap, valid_la = false; else if (!cec_msg_is_broadcast(msg) && !(dir_fl & DIRECTED)) valid_la = false; - else if (cec_msg_is_broadcast(msg) && !(dir_fl & BCAST1_4)) + else if (cec_msg_is_broadcast(msg) && !(dir_fl & BCAST)) valid_la = false; else if (cec_msg_is_broadcast(msg) && - adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0 && - !(dir_fl & BCAST2_0)) + adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0 && + !(dir_fl & BCAST1_4)) valid_la = false; } if (valid_la && min_len) { @@ -1209,6 +1198,8 @@ static int cec_config_log_addr(struct cec_adapter *adap, { struct cec_log_addrs *las = &adap->log_addrs; struct cec_msg msg = { }; + const unsigned int max_retries = 2; + unsigned int i; int err; if (cec_has_log_addr(adap, log_addr)) @@ -1217,19 +1208,44 @@ static int cec_config_log_addr(struct cec_adapter *adap, /* Send poll message */ msg.len = 1; msg.msg[0] = (log_addr << 4) | log_addr; - err = cec_transmit_msg_fh(adap, &msg, NULL, true); - /* - * While trying to poll the physical address was reset - * and the adapter was unconfigured, so bail out. - */ - if (!adap->is_configuring) - return -EINTR; + for (i = 0; i < max_retries; i++) { + err = cec_transmit_msg_fh(adap, &msg, NULL, true); - if (err) - return err; + /* + * While trying to poll the physical address was reset + * and the adapter was unconfigured, so bail out. + */ + if (!adap->is_configuring) + return -EINTR; + + if (err) + return err; + + /* + * The message was aborted due to a disconnect or + * unconfigure, just bail out. + */ + if (msg.tx_status & CEC_TX_STATUS_ABORTED) + return -EINTR; + if (msg.tx_status & CEC_TX_STATUS_OK) + return 0; + if (msg.tx_status & CEC_TX_STATUS_NACK) + break; + /* + * Retry up to max_retries times if the message was neither + * OKed or NACKed. This can happen due to e.g. a Lost + * Arbitration condition. + */ + } - if (msg.tx_status & CEC_TX_STATUS_OK) + /* + * If we are unable to get an OK or a NACK after max_retries attempts + * (and note that each attempt already consists of four polls), then + * then we assume that something is really weird and that it is not a + * good idea to try and claim this logical address. + */ + if (i == max_retries) return 0; /* @@ -1434,6 +1450,13 @@ static int cec_config_thread_func(void *arg) las->log_addr[i], cec_phys_addr_exp(adap->phys_addr)); cec_transmit_msg_fh(adap, &msg, NULL, false); + + /* Report Vendor ID */ + if (adap->log_addrs.vendor_id != CEC_VENDOR_ID_NONE) { + cec_msg_device_vendor_id(&msg, + adap->log_addrs.vendor_id); + cec_transmit_msg_fh(adap, &msg, NULL, false); + } } adap->kthread_config = NULL; complete(&adap->config_completion); @@ -1498,14 +1521,20 @@ void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block) if (adap->monitor_all_cnt) WARN_ON(call_op(adap, adap_monitor_all_enable, false)); mutex_lock(&adap->devnode.lock); - if (adap->needs_hpd || list_empty(&adap->devnode.fhs)) + if (adap->needs_hpd || list_empty(&adap->devnode.fhs)) { WARN_ON(adap->ops->adap_enable(adap, false)); + adap->transmit_in_progress = false; + wake_up_interruptible(&adap->kthread_waitq); + } mutex_unlock(&adap->devnode.lock); if (phys_addr == CEC_PHYS_ADDR_INVALID) return; } mutex_lock(&adap->devnode.lock); + adap->last_initiator = 0xff; + adap->transmit_in_progress = false; + if ((adap->needs_hpd || list_empty(&adap->devnode.fhs)) && adap->ops->adap_enable(adap, true)) { mutex_unlock(&adap->devnode.lock); diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c index b6536bbad530c6b2bed6af16a9a4b1a41b32726a..d4c848c2f376485049e4dd95708bf5cfdfd25f86 100644 --- a/drivers/media/cec/cec-api.c +++ b/drivers/media/cec/cec-api.c @@ -101,6 +101,23 @@ static long cec_adap_g_phys_addr(struct cec_adapter *adap, return 0; } +static int cec_validate_phys_addr(u16 phys_addr) +{ + int i; + + if (phys_addr == CEC_PHYS_ADDR_INVALID) + return 0; + for (i = 0; i < 16; i += 4) + if (phys_addr & (0xf << i)) + break; + if (i == 16) + return 0; + for (i += 4; i < 16; i += 4) + if ((phys_addr & (0xf << i)) == 0) + return -EINVAL; + return 0; +} + static long cec_adap_s_phys_addr(struct cec_adapter *adap, struct cec_fh *fh, bool block, __u16 __user *parg) { @@ -112,7 +129,7 @@ static long cec_adap_s_phys_addr(struct cec_adapter *adap, struct cec_fh *fh, if (copy_from_user(&phys_addr, parg, sizeof(phys_addr))) return -EFAULT; - err = cec_phys_addr_validate(phys_addr, NULL, NULL); + err = cec_validate_phys_addr(phys_addr); if (err) return err; mutex_lock(&adap->lock); @@ -130,7 +147,13 @@ static long cec_adap_g_log_addrs(struct cec_adapter *adap, struct cec_log_addrs log_addrs; mutex_lock(&adap->lock); - log_addrs = adap->log_addrs; + /* + * We use memcpy here instead of assignment since there is a + * hole at the end of struct cec_log_addrs that an assignment + * might ignore. So when we do copy_to_user() we could leak + * one byte of memory. + */ + memcpy(&log_addrs, &adap->log_addrs, sizeof(log_addrs)); if (!adap->is_configured) memset(log_addrs.log_addr, CEC_LOG_ADDR_INVALID, sizeof(log_addrs.log_addr)); @@ -637,6 +660,8 @@ static int cec_release(struct inode *inode, struct file *filp) list_del(&data->xfer_list); } mutex_unlock(&adap->lock); + + mutex_lock(&fh->lock); while (!list_empty(&fh->msgs)) { struct cec_msg_entry *entry = list_first_entry(&fh->msgs, struct cec_msg_entry, list); @@ -654,6 +679,7 @@ static int cec_release(struct inode *inode, struct file *filp) kfree(entry); } } + mutex_unlock(&fh->lock); kfree(fh); cec_put_device(devnode); diff --git a/drivers/media/cec/cec-edid.c b/drivers/media/cec/cec-edid.c deleted file mode 100644 index ec72ac1c0b915ef70de9b1bf86707d5cf0fe2585..0000000000000000000000000000000000000000 --- a/drivers/media/cec/cec-edid.c +++ /dev/null @@ -1,155 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * cec-edid - HDMI Consumer Electronics Control EDID & CEC helper functions - * - * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved. - */ - -#include -#include -#include -#include - -/* - * This EDID is expected to be a CEA-861 compliant, which means that there are - * at least two blocks and one or more of the extensions blocks are CEA-861 - * blocks. - * - * The returned location is guaranteed to be < size - 1. - */ -static unsigned int cec_get_edid_spa_location(const u8 *edid, unsigned int size) -{ - unsigned int blocks = size / 128; - unsigned int block; - u8 d; - - /* Sanity check: at least 2 blocks and a multiple of the block size */ - if (blocks < 2 || size % 128) - return 0; - - /* - * If there are fewer extension blocks than the size, then update - * 'blocks'. It is allowed to have more extension blocks than the size, - * since some hardware can only read e.g. 256 bytes of the EDID, even - * though more blocks are present. The first CEA-861 extension block - * should normally be in block 1 anyway. - */ - if (edid[0x7e] + 1 < blocks) - blocks = edid[0x7e] + 1; - - for (block = 1; block < blocks; block++) { - unsigned int offset = block * 128; - - /* Skip any non-CEA-861 extension blocks */ - if (edid[offset] != 0x02 || edid[offset + 1] != 0x03) - continue; - - /* search Vendor Specific Data Block (tag 3) */ - d = edid[offset + 2] & 0x7f; - /* Check if there are Data Blocks */ - if (d <= 4) - continue; - if (d > 4) { - unsigned int i = offset + 4; - unsigned int end = offset + d; - - /* Note: 'end' is always < 'size' */ - do { - u8 tag = edid[i] >> 5; - u8 len = edid[i] & 0x1f; - - if (tag == 3 && len >= 5 && i + len <= end && - edid[i + 1] == 0x03 && - edid[i + 2] == 0x0c && - edid[i + 3] == 0x00) - return i + 4; - i += len + 1; - } while (i < end); - } - } - return 0; -} - -u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size, - unsigned int *offset) -{ - unsigned int loc = cec_get_edid_spa_location(edid, size); - - if (offset) - *offset = loc; - if (loc == 0) - return CEC_PHYS_ADDR_INVALID; - return (edid[loc] << 8) | edid[loc + 1]; -} -EXPORT_SYMBOL_GPL(cec_get_edid_phys_addr); - -void cec_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr) -{ - unsigned int loc = cec_get_edid_spa_location(edid, size); - u8 sum = 0; - unsigned int i; - - if (loc == 0) - return; - edid[loc] = phys_addr >> 8; - edid[loc + 1] = phys_addr & 0xff; - loc &= ~0x7f; - - /* update the checksum */ - for (i = loc; i < loc + 127; i++) - sum += edid[i]; - edid[i] = 256 - sum; -} -EXPORT_SYMBOL_GPL(cec_set_edid_phys_addr); - -u16 cec_phys_addr_for_input(u16 phys_addr, u8 input) -{ - /* Check if input is sane */ - if (WARN_ON(input == 0 || input > 0xf)) - return CEC_PHYS_ADDR_INVALID; - - if (phys_addr == 0) - return input << 12; - - if ((phys_addr & 0x0fff) == 0) - return phys_addr | (input << 8); - - if ((phys_addr & 0x00ff) == 0) - return phys_addr | (input << 4); - - if ((phys_addr & 0x000f) == 0) - return phys_addr | input; - - /* - * All nibbles are used so no valid physical addresses can be assigned - * to the input. - */ - return CEC_PHYS_ADDR_INVALID; -} -EXPORT_SYMBOL_GPL(cec_phys_addr_for_input); - -int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port) -{ - int i; - - if (parent) - *parent = phys_addr; - if (port) - *port = 0; - if (phys_addr == CEC_PHYS_ADDR_INVALID) - return 0; - for (i = 0; i < 16; i += 4) - if (phys_addr & (0xf << i)) - break; - if (i == 16) - return 0; - if (parent) - *parent = phys_addr & (0xfff0 << i); - if (port) - *port = (phys_addr >> i) & 0xf; - for (i += 4; i < 16; i += 4) - if ((phys_addr & (0xf << i)) == 0) - return -EINVAL; - return 0; -} -EXPORT_SYMBOL_GPL(cec_phys_addr_validate); diff --git a/drivers/media/cec/cec-notifier.c b/drivers/media/cec/cec-notifier.c index dd2078b27a419e6a4cd5afeb9cd9e48220747b63..2424680f71c3db3abb62065251928a567237fa12 100644 --- a/drivers/media/cec/cec-notifier.c +++ b/drivers/media/cec/cec-notifier.c @@ -123,6 +123,8 @@ void cec_notifier_unregister(struct cec_notifier *n) { mutex_lock(&n->lock); n->callback = NULL; + n->cec_adap->notifier = NULL; + n->cec_adap = NULL; mutex_unlock(&n->lock); cec_notifier_put(n); } diff --git a/drivers/media/cec/cec-pin.c b/drivers/media/cec/cec-pin.c index 6e311424f0dc5030ab33571bdb9a4d4958093cdd..8f987bc0dd883096c29851602d03ca2c62411a05 100644 --- a/drivers/media/cec/cec-pin.c +++ b/drivers/media/cec/cec-pin.c @@ -601,8 +601,9 @@ static void cec_pin_tx_states(struct cec_pin *pin, ktime_t ts) break; /* Was the message ACKed? */ ack = cec_msg_is_broadcast(&pin->tx_msg) ? v : !v; - if (!ack && !pin->tx_ignore_nack_until_eom && - pin->tx_bit / 10 < pin->tx_msg.len && !pin->tx_post_eom) { + if (!ack && (!pin->tx_ignore_nack_until_eom || + pin->tx_bit / 10 == pin->tx_msg.len - 1) && + !pin->tx_post_eom) { /* * Note: the CEC spec is ambiguous regarding * what action to take when a NACK appears @@ -935,6 +936,17 @@ static enum hrtimer_restart cec_pin_timer(struct hrtimer *timer) /* Start bit, switch to receive state */ pin->ts = ts; pin->state = CEC_ST_RX_START_BIT_LOW; + /* + * If a transmit is pending, then that transmit should + * use a signal free time of no more than + * CEC_SIGNAL_FREE_TIME_NEW_INITIATOR since it will + * have a new initiator due to the receive that is now + * starting. + */ + if (pin->tx_msg.len && pin->tx_signal_free_time > + CEC_SIGNAL_FREE_TIME_NEW_INITIATOR) + pin->tx_signal_free_time = + CEC_SIGNAL_FREE_TIME_NEW_INITIATOR; break; } if (ktime_to_ns(pin->ts) == 0) @@ -1157,6 +1169,15 @@ static int cec_pin_adap_transmit(struct cec_adapter *adap, u8 attempts, { struct cec_pin *pin = adap->pin; + /* + * If a receive is in progress, then this transmit should use + * a signal free time of max CEC_SIGNAL_FREE_TIME_NEW_INITIATOR + * since when it starts transmitting it will have a new initiator. + */ + if (pin->state != CEC_ST_IDLE && + signal_free_time > CEC_SIGNAL_FREE_TIME_NEW_INITIATOR) + signal_free_time = CEC_SIGNAL_FREE_TIME_NEW_INITIATOR; + pin->tx_signal_free_time = signal_free_time; pin->tx_extra_bytes = 0; pin->tx_msg = *msg; diff --git a/drivers/media/common/saa7146/Kconfig b/drivers/media/common/saa7146/Kconfig index 769c6f8142d2cd750b9e520ecaaed6a24a525bfa..2fa2b2b8e9119999f7257d20f34cf64987177a47 100644 --- a/drivers/media/common/saa7146/Kconfig +++ b/drivers/media/common/saa7146/Kconfig @@ -5,5 +5,6 @@ config VIDEO_SAA7146 config VIDEO_SAA7146_VV tristate depends on VIDEO_V4L2 + depends on BROKEN select VIDEOBUF_DMA_SG select VIDEO_SAA7146 diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c index 3a3dc23c560c86862d0215d19269db855da64fe0..a4341205c197d9fdf90a1f80de12f4843a688167 100644 --- a/drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c +++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c @@ -602,14 +602,14 @@ const struct tpg_rbg_color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFE [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SRGB][5] = { 3138, 657, 810 }, [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SRGB][6] = { 731, 680, 3048 }, [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SRGB][7] = { 800, 799, 800 }, - [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 }, - [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][1] = { 3046, 3054, 886 }, - [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][2] = { 0, 3058, 3031 }, - [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][3] = { 360, 3079, 877 }, - [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][4] = { 3103, 587, 3027 }, - [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][5] = { 3116, 723, 861 }, - [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][6] = { 789, 744, 3025 }, - [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_OPRGB][0] = { 3033, 3033, 3033 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_OPRGB][1] = { 3046, 3054, 886 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_OPRGB][2] = { 0, 3058, 3031 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_OPRGB][3] = { 360, 3079, 877 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_OPRGB][4] = { 3103, 587, 3027 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_OPRGB][5] = { 3116, 723, 861 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_OPRGB][6] = { 789, 744, 3025 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_OPRGB][7] = { 851, 851, 851 }, [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 }, [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE240M][1] = { 2941, 2950, 546 }, [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE240M][2] = { 0, 2954, 2924 }, @@ -658,14 +658,14 @@ const struct tpg_rbg_color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFE [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SRGB][5] = { 3138, 657, 810 }, [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SRGB][6] = { 731, 680, 3048 }, [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SRGB][7] = { 800, 799, 800 }, - [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 }, - [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][1] = { 3046, 3054, 886 }, - [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][2] = { 0, 3058, 3031 }, - [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][3] = { 360, 3079, 877 }, - [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][4] = { 3103, 587, 3027 }, - [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][5] = { 3116, 723, 861 }, - [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][6] = { 789, 744, 3025 }, - [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_OPRGB][0] = { 3033, 3033, 3033 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_OPRGB][1] = { 3046, 3054, 886 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_OPRGB][2] = { 0, 3058, 3031 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_OPRGB][3] = { 360, 3079, 877 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_OPRGB][4] = { 3103, 587, 3027 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_OPRGB][5] = { 3116, 723, 861 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_OPRGB][6] = { 789, 744, 3025 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_OPRGB][7] = { 851, 851, 851 }, [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 }, [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE240M][1] = { 2941, 2950, 546 }, [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE240M][2] = { 0, 2954, 2924 }, @@ -714,14 +714,14 @@ const struct tpg_rbg_color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFE [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SRGB][5] = { 3056, 800, 800 }, [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SRGB][6] = { 800, 800, 3056 }, [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 800 }, - [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 }, - [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][1] = { 3033, 3033, 851 }, - [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][2] = { 851, 3033, 3033 }, - [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][3] = { 851, 3033, 851 }, - [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][4] = { 3033, 851, 3033 }, - [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][5] = { 3033, 851, 851 }, - [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][6] = { 851, 851, 3033 }, - [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_OPRGB][0] = { 3033, 3033, 3033 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_OPRGB][1] = { 3033, 3033, 851 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_OPRGB][2] = { 851, 3033, 3033 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_OPRGB][3] = { 851, 3033, 851 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_OPRGB][4] = { 3033, 851, 3033 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_OPRGB][5] = { 3033, 851, 851 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_OPRGB][6] = { 851, 851, 3033 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_OPRGB][7] = { 851, 851, 851 }, [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 }, [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE240M][1] = { 2926, 2926, 507 }, [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE240M][2] = { 507, 2926, 2926 }, @@ -770,14 +770,14 @@ const struct tpg_rbg_color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFE [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][5] = { 2599, 901, 909 }, [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][6] = { 991, 0, 2966 }, [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][7] = { 800, 799, 800 }, - [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 }, - [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][1] = { 2989, 3120, 1180 }, - [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][2] = { 1913, 3011, 3009 }, - [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][3] = { 1836, 3099, 1105 }, - [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][4] = { 2627, 413, 2966 }, - [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][5] = { 2576, 943, 951 }, - [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][6] = { 1026, 0, 2942 }, - [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_OPRGB][0] = { 3033, 3033, 3033 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_OPRGB][1] = { 2989, 3120, 1180 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_OPRGB][2] = { 1913, 3011, 3009 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_OPRGB][3] = { 1836, 3099, 1105 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_OPRGB][4] = { 2627, 413, 2966 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_OPRGB][5] = { 2576, 943, 951 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_OPRGB][6] = { 1026, 0, 2942 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_OPRGB][7] = { 851, 851, 851 }, [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 }, [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][1] = { 2879, 3022, 874 }, [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][2] = { 1688, 2903, 2901 }, @@ -826,14 +826,14 @@ const struct tpg_rbg_color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFE [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SRGB][5] = { 3001, 800, 799 }, [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SRGB][6] = { 800, 800, 3071 }, [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 799 }, - [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 }, - [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][1] = { 3033, 3033, 776 }, - [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][2] = { 1068, 3033, 3033 }, - [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][3] = { 1068, 3033, 776 }, - [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][4] = { 2977, 851, 3048 }, - [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][5] = { 2977, 851, 851 }, - [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][6] = { 851, 851, 3048 }, - [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_OPRGB][0] = { 3033, 3033, 3033 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_OPRGB][1] = { 3033, 3033, 776 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_OPRGB][2] = { 1068, 3033, 3033 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_OPRGB][3] = { 1068, 3033, 776 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_OPRGB][4] = { 2977, 851, 3048 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_OPRGB][5] = { 2977, 851, 851 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_OPRGB][6] = { 851, 851, 3048 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_OPRGB][7] = { 851, 851, 851 }, [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 }, [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE240M][1] = { 2926, 2926, 423 }, [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE240M][2] = { 749, 2926, 2926 }, @@ -882,14 +882,14 @@ const struct tpg_rbg_color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFE [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SRGB][5] = { 3056, 800, 800 }, [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SRGB][6] = { 800, 800, 3056 }, [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 800 }, - [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 }, - [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][1] = { 3033, 3033, 851 }, - [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][2] = { 851, 3033, 3033 }, - [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][3] = { 851, 3033, 851 }, - [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][4] = { 3033, 851, 3033 }, - [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][5] = { 3033, 851, 851 }, - [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][6] = { 851, 851, 3033 }, - [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_OPRGB][0] = { 3033, 3033, 3033 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_OPRGB][1] = { 3033, 3033, 851 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_OPRGB][2] = { 851, 3033, 3033 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_OPRGB][3] = { 851, 3033, 851 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_OPRGB][4] = { 3033, 851, 3033 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_OPRGB][5] = { 3033, 851, 851 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_OPRGB][6] = { 851, 851, 3033 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_OPRGB][7] = { 851, 851, 851 }, [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 }, [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE240M][1] = { 2926, 2926, 507 }, [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE240M][2] = { 507, 2926, 2926 }, @@ -922,62 +922,62 @@ const struct tpg_rbg_color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFE [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][5] = { 1812, 886, 886 }, [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][6] = { 886, 886, 1812 }, [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][7] = { 886, 886, 886 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][1] = { 2939, 2939, 781 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][2] = { 1622, 2939, 2939 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][3] = { 1622, 2939, 781 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][4] = { 2502, 547, 2881 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][5] = { 2502, 547, 547 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][6] = { 547, 547, 2881 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][7] = { 547, 547, 547 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][0] = { 3056, 3056, 3056 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][1] = { 3056, 3056, 1031 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][2] = { 1838, 3056, 3056 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][3] = { 1838, 3056, 1031 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][4] = { 2657, 800, 3002 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][5] = { 2657, 800, 800 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][6] = { 800, 800, 3002 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 800 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][1] = { 3033, 3033, 1063 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][2] = { 1828, 3033, 3033 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][3] = { 1828, 3033, 1063 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][4] = { 2633, 851, 2979 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][5] = { 2633, 851, 851 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][6] = { 851, 851, 2979 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][1] = { 2926, 2926, 744 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][2] = { 1594, 2926, 2926 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][3] = { 1594, 2926, 744 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][4] = { 2484, 507, 2867 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][5] = { 2484, 507, 507 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][6] = { 507, 507, 2867 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][7] = { 507, 507, 507 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][0] = { 2125, 2125, 2125 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][1] = { 2125, 2125, 212 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][2] = { 698, 2125, 2125 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][3] = { 698, 2125, 212 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][4] = { 1557, 130, 2043 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][5] = { 1557, 130, 130 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][6] = { 130, 130, 2043 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][1] = { 3175, 3175, 1308 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][2] = { 2069, 3175, 3175 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][3] = { 2069, 3175, 1308 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][4] = { 2816, 1084, 3127 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][5] = { 2816, 1084, 1084 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][6] = { 1084, 1084, 3127 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][0] = { 1812, 1812, 1812 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][1] = { 1812, 1812, 1022 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][2] = { 1402, 1812, 1812 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][3] = { 1402, 1812, 1022 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][4] = { 1692, 886, 1797 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][5] = { 1692, 886, 886 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][6] = { 886, 886, 1797 }, - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][7] = { 886, 886, 886 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_709][1] = { 2939, 2939, 781 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_709][2] = { 1622, 2939, 2939 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_709][3] = { 1622, 2939, 781 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_709][4] = { 2502, 547, 2881 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_709][5] = { 2502, 547, 547 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_709][6] = { 547, 547, 2881 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_709][7] = { 547, 547, 547 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SRGB][0] = { 3056, 3056, 3056 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SRGB][1] = { 3056, 3056, 1031 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SRGB][2] = { 1838, 3056, 3056 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SRGB][3] = { 1838, 3056, 1031 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SRGB][4] = { 2657, 800, 3002 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SRGB][5] = { 2657, 800, 800 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SRGB][6] = { 800, 800, 3002 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 800 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_OPRGB][0] = { 3033, 3033, 3033 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_OPRGB][1] = { 3033, 3033, 1063 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_OPRGB][2] = { 1828, 3033, 3033 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_OPRGB][3] = { 1828, 3033, 1063 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_OPRGB][4] = { 2633, 851, 2979 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_OPRGB][5] = { 2633, 851, 851 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_OPRGB][6] = { 851, 851, 2979 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_OPRGB][7] = { 851, 851, 851 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE240M][1] = { 2926, 2926, 744 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE240M][2] = { 1594, 2926, 2926 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE240M][3] = { 1594, 2926, 744 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE240M][4] = { 2484, 507, 2867 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE240M][5] = { 2484, 507, 507 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE240M][6] = { 507, 507, 2867 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE240M][7] = { 507, 507, 507 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_NONE][0] = { 2125, 2125, 2125 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_NONE][1] = { 2125, 2125, 212 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_NONE][2] = { 698, 2125, 2125 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_NONE][3] = { 698, 2125, 212 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_NONE][4] = { 1557, 130, 2043 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_NONE][5] = { 1557, 130, 130 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_NONE][6] = { 130, 130, 2043 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_DCI_P3][1] = { 3175, 3175, 1308 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_DCI_P3][2] = { 2069, 3175, 3175 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_DCI_P3][3] = { 2069, 3175, 1308 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_DCI_P3][4] = { 2816, 1084, 3127 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_DCI_P3][5] = { 2816, 1084, 1084 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_DCI_P3][6] = { 1084, 1084, 3127 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE2084][0] = { 1812, 1812, 1812 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE2084][1] = { 1812, 1812, 1022 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE2084][2] = { 1402, 1812, 1812 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE2084][3] = { 1402, 1812, 1022 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE2084][4] = { 1692, 886, 1797 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE2084][5] = { 1692, 886, 886 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE2084][6] = { 886, 886, 1797 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE2084][7] = { 886, 886, 886 }, [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 }, [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][1] = { 2877, 2923, 1058 }, [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][2] = { 1837, 2840, 2916 }, @@ -994,14 +994,14 @@ const struct tpg_rbg_color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFE [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SRGB][5] = { 2517, 1159, 900 }, [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SRGB][6] = { 1042, 870, 2917 }, [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 800 }, - [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 }, - [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][1] = { 2976, 3018, 1315 }, - [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][2] = { 2024, 2942, 3011 }, - [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][3] = { 1930, 2926, 1256 }, - [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][4] = { 2563, 1227, 2916 }, - [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][5] = { 2494, 1183, 943 }, - [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][6] = { 1073, 916, 2894 }, - [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_OPRGB][0] = { 3033, 3033, 3033 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_OPRGB][1] = { 2976, 3018, 1315 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_OPRGB][2] = { 2024, 2942, 3011 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_OPRGB][3] = { 1930, 2926, 1256 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_OPRGB][4] = { 2563, 1227, 2916 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_OPRGB][5] = { 2494, 1183, 943 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_OPRGB][6] = { 1073, 916, 2894 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_OPRGB][7] = { 851, 851, 851 }, [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 }, [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE240M][1] = { 2864, 2910, 1024 }, [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE240M][2] = { 1811, 2826, 2903 }, @@ -1050,14 +1050,14 @@ const struct tpg_rbg_color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFE [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][5] = { 2880, 998, 902 }, [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][6] = { 816, 823, 2940 }, [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 799 }, - [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 }, - [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][1] = { 3029, 3028, 1255 }, - [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][2] = { 1406, 2988, 3011 }, - [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][3] = { 1398, 2983, 1190 }, - [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][4] = { 2860, 1050, 2939 }, - [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][5] = { 2857, 1033, 945 }, - [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][6] = { 866, 873, 2916 }, - [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_OPRGB][0] = { 3033, 3033, 3033 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_OPRGB][1] = { 3029, 3028, 1255 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_OPRGB][2] = { 1406, 2988, 3011 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_OPRGB][3] = { 1398, 2983, 1190 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_OPRGB][4] = { 2860, 1050, 2939 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_OPRGB][5] = { 2857, 1033, 945 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_OPRGB][6] = { 866, 873, 2916 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_OPRGB][7] = { 851, 851, 851 }, [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 }, [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][1] = { 2923, 2921, 957 }, [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][2] = { 1125, 2877, 2902 }, @@ -1128,7 +1128,7 @@ static const double rec709_to_240m[3][3] = { { 0.0016327, 0.0044133, 0.9939540 }, }; -static const double rec709_to_adobergb[3][3] = { +static const double rec709_to_oprgb[3][3] = { { 0.7151627, 0.2848373, -0.0000000 }, { 0.0000000, 1.0000000, 0.0000000 }, { -0.0000000, 0.0411705, 0.9588295 }, @@ -1195,7 +1195,7 @@ static double transfer_rec709_to_rgb(double v) return (v < 0.081) ? v / 4.5 : pow((v + 0.099) / 1.099, 1.0 / 0.45); } -static double transfer_rgb_to_adobergb(double v) +static double transfer_rgb_to_oprgb(double v) { return pow(v, 1.0 / 2.19921875); } @@ -1251,8 +1251,8 @@ static void csc(enum v4l2_colorspace colorspace, enum v4l2_xfer_func xfer_func, case V4L2_COLORSPACE_470_SYSTEM_M: mult_matrix(r, g, b, rec709_to_ntsc1953); break; - case V4L2_COLORSPACE_ADOBERGB: - mult_matrix(r, g, b, rec709_to_adobergb); + case V4L2_COLORSPACE_OPRGB: + mult_matrix(r, g, b, rec709_to_oprgb); break; case V4L2_COLORSPACE_BT2020: mult_matrix(r, g, b, rec709_to_bt2020); @@ -1284,10 +1284,10 @@ static void csc(enum v4l2_colorspace colorspace, enum v4l2_xfer_func xfer_func, *g = transfer_rgb_to_srgb(*g); *b = transfer_rgb_to_srgb(*b); break; - case V4L2_XFER_FUNC_ADOBERGB: - *r = transfer_rgb_to_adobergb(*r); - *g = transfer_rgb_to_adobergb(*g); - *b = transfer_rgb_to_adobergb(*b); + case V4L2_XFER_FUNC_OPRGB: + *r = transfer_rgb_to_oprgb(*r); + *g = transfer_rgb_to_oprgb(*g); + *b = transfer_rgb_to_oprgb(*b); break; case V4L2_XFER_FUNC_DCI_P3: *r = transfer_rgb_to_dcip3(*r); @@ -1321,7 +1321,7 @@ int main(int argc, char **argv) V4L2_COLORSPACE_470_SYSTEM_BG, 0, V4L2_COLORSPACE_SRGB, - V4L2_COLORSPACE_ADOBERGB, + V4L2_COLORSPACE_OPRGB, V4L2_COLORSPACE_BT2020, 0, V4L2_COLORSPACE_DCI_P3, @@ -1336,7 +1336,7 @@ int main(int argc, char **argv) "V4L2_COLORSPACE_470_SYSTEM_BG", "", "V4L2_COLORSPACE_SRGB", - "V4L2_COLORSPACE_ADOBERGB", + "V4L2_COLORSPACE_OPRGB", "V4L2_COLORSPACE_BT2020", "", "V4L2_COLORSPACE_DCI_P3", @@ -1345,7 +1345,7 @@ int main(int argc, char **argv) "", "V4L2_XFER_FUNC_709", "V4L2_XFER_FUNC_SRGB", - "V4L2_XFER_FUNC_ADOBERGB", + "V4L2_XFER_FUNC_OPRGB", "V4L2_XFER_FUNC_SMPTE240M", "V4L2_XFER_FUNC_NONE", "V4L2_XFER_FUNC_DCI_P3", diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c index abd4c788dffdea26b73acb0bb5191ff78c1a3c5d..84f8ca9bf0282f081fda41ad2696ecd918671524 100644 --- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c +++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c @@ -113,6 +113,7 @@ int tpg_alloc(struct tpg_data *tpg, unsigned max_w) { unsigned pat; unsigned plane; + int ret = 0; tpg->max_line_width = max_w; for (pat = 0; pat < TPG_MAX_PAT_LINES; pat++) { @@ -121,14 +122,18 @@ int tpg_alloc(struct tpg_data *tpg, unsigned max_w) tpg->lines[pat][plane] = vzalloc(array3_size(max_w, 2, pixelsz)); - if (!tpg->lines[pat][plane]) - return -ENOMEM; + if (!tpg->lines[pat][plane]) { + ret = -ENOMEM; + goto free_lines; + } if (plane == 0) continue; tpg->downsampled_lines[pat][plane] = vzalloc(array3_size(max_w, 2, pixelsz)); - if (!tpg->downsampled_lines[pat][plane]) - return -ENOMEM; + if (!tpg->downsampled_lines[pat][plane]) { + ret = -ENOMEM; + goto free_lines; + } } } for (plane = 0; plane < TPG_MAX_PLANES; plane++) { @@ -136,18 +141,45 @@ int tpg_alloc(struct tpg_data *tpg, unsigned max_w) tpg->contrast_line[plane] = vzalloc(array_size(pixelsz, max_w)); - if (!tpg->contrast_line[plane]) - return -ENOMEM; + if (!tpg->contrast_line[plane]) { + ret = -ENOMEM; + goto free_contrast_line; + } tpg->black_line[plane] = vzalloc(array_size(pixelsz, max_w)); - if (!tpg->black_line[plane]) - return -ENOMEM; + if (!tpg->black_line[plane]) { + ret = -ENOMEM; + goto free_contrast_line; + } tpg->random_line[plane] = vzalloc(array3_size(max_w, 2, pixelsz)); - if (!tpg->random_line[plane]) - return -ENOMEM; + if (!tpg->random_line[plane]) { + ret = -ENOMEM; + goto free_contrast_line; + } } return 0; + +free_contrast_line: + for (plane = 0; plane < TPG_MAX_PLANES; plane++) { + vfree(tpg->contrast_line[plane]); + vfree(tpg->black_line[plane]); + vfree(tpg->random_line[plane]); + tpg->contrast_line[plane] = NULL; + tpg->black_line[plane] = NULL; + tpg->random_line[plane] = NULL; + } +free_lines: + for (pat = 0; pat < TPG_MAX_PAT_LINES; pat++) + for (plane = 0; plane < TPG_MAX_PLANES; plane++) { + vfree(tpg->lines[pat][plane]); + tpg->lines[pat][plane] = NULL; + if (plane == 0) + continue; + vfree(tpg->downsampled_lines[pat][plane]); + tpg->downsampled_lines[pat][plane] = NULL; + } + return ret; } EXPORT_SYMBOL_GPL(tpg_alloc); @@ -1607,6 +1639,9 @@ static void tpg_precalculate_line(struct tpg_data *tpg) unsigned p; unsigned x; + if (WARN_ON_ONCE(!tpg->src_width || !tpg->scaled_width)) + return; + switch (tpg->pattern) { case TPG_PAT_GREEN: contrast = TPG_COLOR_100_RED; @@ -1738,7 +1773,7 @@ typedef struct { u16 __; u8 _; } __packed x24; unsigned s; \ \ for (s = 0; s < len; s++) { \ - u8 chr = font8x16[text[s] * 16 + line]; \ + u8 chr = font8x16[(u8)text[s] * 16 + line]; \ \ if (hdiv == 2 && tpg->hflip) { \ pos[3] = (chr & (0x01 << 6) ? fg : bg); \ @@ -1770,7 +1805,7 @@ typedef struct { u16 __; u8 _; } __packed x24; pos[7] = (chr & (0x01 << 0) ? fg : bg); \ } \ \ - pos += (tpg->hflip ? -8 : 8) / hdiv; \ + pos += (tpg->hflip ? -8 : 8) / (int)hdiv; \ } \ } \ } while (0) diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c index 5653e8eebe2b1cd564332100be923012c0d9b1bc..93d250db0b6f06616f0654f37a49c2c6b2e8741e 100644 --- a/drivers/media/common/videobuf2/videobuf2-core.c +++ b/drivers/media/common/videobuf2/videobuf2-core.c @@ -207,6 +207,10 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb) for (plane = 0; plane < vb->num_planes; ++plane) { unsigned long size = PAGE_ALIGN(vb->planes[plane].length); + /* Did it wrap around? */ + if (size < vb->planes[plane].length) + goto free; + mem_priv = call_ptr_memop(vb, alloc, q->alloc_devs[plane] ? : q->dev, q->dma_attrs, size, q->dma_dir, q->gfp_flags); @@ -668,6 +672,11 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, return -EBUSY; } + if (q->waiting_in_dqbuf && *count) { + dprintk(1, "another dup()ped fd is waiting for a buffer\n"); + return -EBUSY; + } + if (*count == 0 || q->num_buffers != 0 || (q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory)) { /* @@ -797,9 +806,16 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, } if (!q->num_buffers) { + if (q->waiting_in_dqbuf && *count) { + dprintk(1, "another dup()ped fd is waiting for a buffer\n"); + return -EBUSY; + } memset(q->alloc_devs, 0, sizeof(q->alloc_devs)); q->memory = memory; q->waiting_for_buffers = !q->is_output; + } else if (q->memory != memory) { + dprintk(1, "memory model mismatch\n"); + return -EINVAL; } num_buffers = min(*count, VB2_MAX_FRAME - q->num_buffers); @@ -1463,6 +1479,11 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking) for (;;) { int ret; + if (q->waiting_in_dqbuf) { + dprintk(1, "another dup()ped fd is waiting for a buffer\n"); + return -EBUSY; + } + if (!q->streaming) { dprintk(1, "streaming off, will not wait for buffers\n"); return -EINVAL; @@ -1490,6 +1511,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking) return -EAGAIN; } + q->waiting_in_dqbuf = 1; /* * We are streaming and blocking, wait for another buffer to * become ready or for streamoff. Driver's lock is released to @@ -1510,6 +1532,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking) * the locks or return an error if one occurred. */ call_void_qop(q, wait_finish, q); + q->waiting_in_dqbuf = 0; if (ret) { dprintk(1, "sleep was interrupted\n"); return ret; @@ -1755,10 +1778,8 @@ int vb2_core_streamon(struct vb2_queue *q, unsigned int type) if (ret) return ret; ret = vb2_start_streaming(q); - if (ret) { - __vb2_queue_cancel(q); + if (ret) return ret; - } } q->streaming = 1; @@ -1932,9 +1953,13 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma) return -EINVAL; } } + + mutex_lock(&q->mmap_lock); + if (vb2_fileio_is_active(q)) { dprintk(1, "mmap: file io in progress\n"); - return -EBUSY; + ret = -EBUSY; + goto unlock; } /* @@ -1942,7 +1967,7 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma) */ ret = __find_plane_by_offset(q, off, &buffer, &plane); if (ret) - return ret; + goto unlock; vb = q->bufs[buffer]; @@ -1955,11 +1980,13 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma) if (length < (vma->vm_end - vma->vm_start)) { dprintk(1, "MMAP invalid, as it would overflow buffer length\n"); - return -EINVAL; + ret = -EINVAL; + goto unlock; } - mutex_lock(&q->mmap_lock); ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma); + +unlock: mutex_unlock(&q->mmap_lock); if (ret) return ret; @@ -2354,6 +2381,12 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_ if (!data) return -EINVAL; + if (q->waiting_in_dqbuf) { + dprintk(3, "another dup()ped fd is %s\n", + read ? "reading" : "writing"); + return -EBUSY; + } + /* * Initialize emulator on first call. */ diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c index 015e737095cdd6644b4e0332120aa3ee993eb1d0..e9bfea986cc47e4bba3064d9de1c53e6f59741ee 100644 --- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c +++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c @@ -59,7 +59,7 @@ static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf, gfp_t gfp_flags) { unsigned int last_page = 0; - int size = buf->size; + unsigned long size = buf->size; while (size > 0) { struct page *pages; diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c index 886a2d8d5c6c424790ecc13874e9ba4949c1161a..9d4a81bb0e5970e97fb56258cc8ed8dccd8fad4d 100644 --- a/drivers/media/common/videobuf2/videobuf2-v4l2.c +++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c @@ -145,7 +145,6 @@ static void vb2_warn_zero_bytesused(struct vb2_buffer *vb) return; check_once = true; - WARN_ON(1); pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n"); if (vb->vb2_queue->allow_zero_bytesused) diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c index d548f98c7a67d48052c2a77f2c00e9ebb23f3eb1..6f0121aa546e457e0cfe47839b50a0f5007a2d35 100644 --- a/drivers/media/dvb-core/dmxdev.c +++ b/drivers/media/dvb-core/dmxdev.c @@ -800,6 +800,11 @@ static int dvb_demux_open(struct inode *inode, struct file *file) if (mutex_lock_interruptible(&dmxdev->mutex)) return -ERESTARTSYS; + if (dmxdev->exit) { + mutex_unlock(&dmxdev->mutex); + return -ENODEV; + } + for (i = 0; i < dmxdev->filternum; i++) if (dmxdev->filter[i].state == DMXDEV_STATE_FREE) break; @@ -1445,7 +1450,10 @@ EXPORT_SYMBOL(dvb_dmxdev_init); void dvb_dmxdev_release(struct dmxdev *dmxdev) { + mutex_lock(&dmxdev->mutex); dmxdev->exit = 1; + mutex_unlock(&dmxdev->mutex); + if (dmxdev->dvbdev->users > 1) { wait_event(dmxdev->dvbdev->wait_queue, dmxdev->dvbdev->users == 1); diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c index 4d371cea0d5df818fe64919b6c50aa2f52358505..fe152007c963e0d87711ef1eb64612d1a935eab1 100644 --- a/drivers/media/dvb-core/dvb_ca_en50221.c +++ b/drivers/media/dvb-core/dvb_ca_en50221.c @@ -162,6 +162,12 @@ struct dvb_ca_private { /* mutex serializing ioctls */ struct mutex ioctl_mutex; + + /* A mutex used when a device is disconnected */ + struct mutex remove_mutex; + + /* Whether the device is disconnected */ + int exit; }; static void dvb_ca_private_free(struct dvb_ca_private *ca) @@ -1717,12 +1723,22 @@ static int dvb_ca_en50221_io_open(struct inode *inode, struct file *file) dprintk("%s\n", __func__); - if (!try_module_get(ca->pub->owner)) + mutex_lock(&ca->remove_mutex); + + if (ca->exit) { + mutex_unlock(&ca->remove_mutex); + return -ENODEV; + } + + if (!try_module_get(ca->pub->owner)) { + mutex_unlock(&ca->remove_mutex); return -EIO; + } err = dvb_generic_open(inode, file); if (err < 0) { module_put(ca->pub->owner); + mutex_unlock(&ca->remove_mutex); return err; } @@ -1747,6 +1763,7 @@ static int dvb_ca_en50221_io_open(struct inode *inode, struct file *file) dvb_ca_private_get(ca); + mutex_unlock(&ca->remove_mutex); return 0; } @@ -1766,6 +1783,8 @@ static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file) dprintk("%s\n", __func__); + mutex_lock(&ca->remove_mutex); + /* mark the CA device as closed */ ca->open = 0; dvb_ca_en50221_thread_update_delay(ca); @@ -1776,6 +1795,13 @@ static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file) dvb_ca_private_put(ca); + if (dvbdev->users == 1 && ca->exit == 1) { + mutex_unlock(&ca->remove_mutex); + wake_up(&dvbdev->wait_queue); + } else { + mutex_unlock(&ca->remove_mutex); + } + return err; } @@ -1900,6 +1926,7 @@ int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter, } mutex_init(&ca->ioctl_mutex); + mutex_init(&ca->remove_mutex); if (signal_pending(current)) { ret = -EINTR; @@ -1942,6 +1969,14 @@ void dvb_ca_en50221_release(struct dvb_ca_en50221 *pubca) dprintk("%s\n", __func__); + mutex_lock(&ca->remove_mutex); + ca->exit = 1; + mutex_unlock(&ca->remove_mutex); + + if (ca->dvbdev->users < 1) + wait_event(ca->dvbdev->wait_queue, + ca->dvbdev->users == 1); + /* shutdown the thread if there was one */ kthread_stop(ca->thread); diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c index c4e7ebfe4d2955c9d9eb8b0cd55e1302269d7f4b..3a0c794f7b80e984e9257081d0d5d10b35873c3c 100644 --- a/drivers/media/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb-core/dvb_frontend.c @@ -164,6 +164,9 @@ static void dvb_frontend_free(struct kref *ref) static void dvb_frontend_put(struct dvb_frontend *fe) { + /* call detach before dropping the reference count */ + if (fe->ops.detach) + fe->ops.detach(fe); /* * Check if the frontend was registered, as otherwise * kref was not initialized yet. @@ -301,14 +304,22 @@ static int dvb_frontend_get_event(struct dvb_frontend *fe, } if (events->eventw == events->eventr) { - int ret; + struct wait_queue_entry wait; + int ret = 0; if (flags & O_NONBLOCK) return -EWOULDBLOCK; - ret = wait_event_interruptible(events->wait_queue, - dvb_frontend_test_event(fepriv, events)); - + init_waitqueue_entry(&wait, current); + add_wait_queue(&events->wait_queue, &wait); + while (!dvb_frontend_test_event(fepriv, events)) { + wait_woken(&wait, TASK_INTERRUPTIBLE, 0); + if (signal_pending(current)) { + ret = -ERESTARTSYS; + break; + } + } + remove_wait_queue(&events->wait_queue, &wait); if (ret < 0) return ret; } @@ -3035,7 +3046,6 @@ void dvb_frontend_detach(struct dvb_frontend *fe) dvb_frontend_invoke_release(fe, fe->ops.release_sec); dvb_frontend_invoke_release(fe, fe->ops.tuner_ops.release); dvb_frontend_invoke_release(fe, fe->ops.analog_ops.release); - dvb_frontend_invoke_release(fe, fe->ops.detach); dvb_frontend_put(fe); } EXPORT_SYMBOL(dvb_frontend_detach); diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c index 10f78109bb3fc905c6e730e6a5326ea937954be4..d8bdca2bf56631d8af107aac61aad8215ba5f135 100644 --- a/drivers/media/dvb-core/dvb_net.c +++ b/drivers/media/dvb-core/dvb_net.c @@ -1562,15 +1562,43 @@ static long dvb_net_ioctl(struct file *file, return dvb_usercopy(file, cmd, arg, dvb_net_do_ioctl); } +static int locked_dvb_net_open(struct inode *inode, struct file *file) +{ + struct dvb_device *dvbdev = file->private_data; + struct dvb_net *dvbnet = dvbdev->priv; + int ret; + + if (mutex_lock_interruptible(&dvbnet->remove_mutex)) + return -ERESTARTSYS; + + if (dvbnet->exit) { + mutex_unlock(&dvbnet->remove_mutex); + return -ENODEV; + } + + ret = dvb_generic_open(inode, file); + + mutex_unlock(&dvbnet->remove_mutex); + + return ret; +} + static int dvb_net_close(struct inode *inode, struct file *file) { struct dvb_device *dvbdev = file->private_data; struct dvb_net *dvbnet = dvbdev->priv; + mutex_lock(&dvbnet->remove_mutex); + dvb_generic_release(inode, file); - if(dvbdev->users == 1 && dvbnet->exit == 1) + if (dvbdev->users == 1 && dvbnet->exit == 1) { + mutex_unlock(&dvbnet->remove_mutex); wake_up(&dvbdev->wait_queue); + } else { + mutex_unlock(&dvbnet->remove_mutex); + } + return 0; } @@ -1578,7 +1606,7 @@ static int dvb_net_close(struct inode *inode, struct file *file) static const struct file_operations dvb_net_fops = { .owner = THIS_MODULE, .unlocked_ioctl = dvb_net_ioctl, - .open = dvb_generic_open, + .open = locked_dvb_net_open, .release = dvb_net_close, .llseek = noop_llseek, }; @@ -1597,10 +1625,13 @@ void dvb_net_release (struct dvb_net *dvbnet) { int i; + mutex_lock(&dvbnet->remove_mutex); dvbnet->exit = 1; + mutex_unlock(&dvbnet->remove_mutex); + if (dvbnet->dvbdev->users < 1) wait_event(dvbnet->dvbdev->wait_queue, - dvbnet->dvbdev->users==1); + dvbnet->dvbdev->users == 1); dvb_unregister_device(dvbnet->dvbdev); @@ -1619,6 +1650,7 @@ int dvb_net_init (struct dvb_adapter *adap, struct dvb_net *dvbnet, int i; mutex_init(&dvbnet->ioctl_mutex); + mutex_init(&dvbnet->remove_mutex); dvbnet->demux = dmx; for (i=0; i static DEFINE_MUTEX(dvbdev_mutex); +static LIST_HEAD(dvbdevfops_list); static int dvbdev_debug; module_param(dvbdev_debug, int, 0644); @@ -241,6 +242,7 @@ static void dvb_media_device_free(struct dvb_device *dvbdev) if (dvbdev->adapter->conn) { media_device_unregister_entity(dvbdev->adapter->conn); + kfree(dvbdev->adapter->conn); dvbdev->adapter->conn = NULL; kfree(dvbdev->adapter->conn_pads); dvbdev->adapter->conn_pads = NULL; @@ -339,8 +341,10 @@ static int dvb_create_media_entity(struct dvb_device *dvbdev, if (npads) { dvbdev->pads = kcalloc(npads, sizeof(*dvbdev->pads), GFP_KERNEL); - if (!dvbdev->pads) + if (!dvbdev->pads) { + kfree(dvbdev->entity); return -ENOMEM; + } } switch (type) { @@ -455,14 +459,15 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, enum dvb_device_type type, int demux_sink_pads) { struct dvb_device *dvbdev; - struct file_operations *dvbdevfops; + struct file_operations *dvbdevfops = NULL; + struct dvbdevfops_node *node = NULL, *new_node = NULL; struct device *clsdev; int minor; int id, ret; mutex_lock(&dvbdev_register_lock); - if ((id = dvbdev_get_free_id (adap, type)) < 0){ + if ((id = dvbdev_get_free_id (adap, type)) < 0) { mutex_unlock(&dvbdev_register_lock); *pdvbdev = NULL; pr_err("%s: couldn't find free device id\n", __func__); @@ -470,18 +475,47 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, } *pdvbdev = dvbdev = kzalloc(sizeof(*dvbdev), GFP_KERNEL); - if (!dvbdev){ mutex_unlock(&dvbdev_register_lock); return -ENOMEM; } - dvbdevfops = kzalloc(sizeof(struct file_operations), GFP_KERNEL); + /* + * When a device of the same type is probe()d more than once, + * the first allocated fops are used. This prevents memory leaks + * that can occur when the same device is probe()d repeatedly. + */ + list_for_each_entry(node, &dvbdevfops_list, list_head) { + if (node->fops->owner == adap->module && + node->type == type && + node->template == template) { + dvbdevfops = node->fops; + break; + } + } - if (!dvbdevfops){ - kfree (dvbdev); - mutex_unlock(&dvbdev_register_lock); - return -ENOMEM; + if (dvbdevfops == NULL) { + dvbdevfops = kmemdup(template->fops, sizeof(*dvbdevfops), GFP_KERNEL); + if (!dvbdevfops) { + kfree(dvbdev); + *pdvbdev = NULL; + mutex_unlock(&dvbdev_register_lock); + return -ENOMEM; + } + + new_node = kzalloc(sizeof(struct dvbdevfops_node), GFP_KERNEL); + if (!new_node) { + kfree(dvbdevfops); + kfree(dvbdev); + *pdvbdev = NULL; + mutex_unlock(&dvbdev_register_lock); + return -ENOMEM; + } + + new_node->fops = dvbdevfops; + new_node->type = type; + new_node->template = template; + list_add_tail (&new_node->list_head, &dvbdevfops_list); } memcpy(dvbdev, template, sizeof(struct dvb_device)); @@ -491,21 +525,21 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, dvbdev->priv = priv; dvbdev->fops = dvbdevfops; init_waitqueue_head (&dvbdev->wait_queue); - - memcpy(dvbdevfops, template->fops, sizeof(struct file_operations)); dvbdevfops->owner = adap->module; - list_add_tail (&dvbdev->list_head, &adap->device_list); - down_write(&minor_rwsem); #ifdef CONFIG_DVB_DYNAMIC_MINORS for (minor = 0; minor < MAX_DVB_MINORS; minor++) if (dvb_minors[minor] == NULL) break; - if (minor == MAX_DVB_MINORS) { - kfree(dvbdevfops); + if (new_node) { + list_del (&new_node->list_head); + kfree(dvbdevfops); + kfree(new_node); + } kfree(dvbdev); + *pdvbdev = NULL; up_write(&minor_rwsem); mutex_unlock(&dvbdev_register_lock); return -EINVAL; @@ -513,37 +547,48 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, #else minor = nums2minor(adap->num, type, id); #endif - dvbdev->minor = minor; dvb_minors[minor] = dvbdev; up_write(&minor_rwsem); - ret = dvb_register_media_device(dvbdev, type, minor, demux_sink_pads); if (ret) { pr_err("%s: dvb_register_media_device failed to create the mediagraph\n", __func__); - + if (new_node) { + list_del (&new_node->list_head); + kfree(dvbdevfops); + kfree(new_node); + } dvb_media_device_free(dvbdev); - kfree(dvbdevfops); kfree(dvbdev); + *pdvbdev = NULL; up_write(&minor_rwsem); mutex_unlock(&dvbdev_register_lock); return ret; } - mutex_unlock(&dvbdev_register_lock); - clsdev = device_create(dvb_class, adap->device, MKDEV(DVB_MAJOR, minor), dvbdev, "dvb%d.%s%d", adap->num, dnames[type], id); if (IS_ERR(clsdev)) { pr_err("%s: failed to create device dvb%d.%s%d (%ld)\n", __func__, adap->num, dnames[type], id, PTR_ERR(clsdev)); + if (new_node) { + list_del (&new_node->list_head); + kfree(dvbdevfops); + kfree(new_node); + } + dvb_media_device_free(dvbdev); + kfree(dvbdev); + *pdvbdev = NULL; + mutex_unlock(&dvbdev_register_lock); return PTR_ERR(clsdev); } + dprintk("DVB: register adapter%d/%s%d @ minor: %i (0x%02x)\n", adap->num, dnames[type], id, minor, minor); + mutex_unlock(&dvbdev_register_lock); return 0; } EXPORT_SYMBOL(dvb_register_device); @@ -572,7 +617,6 @@ void dvb_free_device(struct dvb_device *dvbdev) if (!dvbdev) return; - kfree (dvbdev->fops); kfree (dvbdev); } EXPORT_SYMBOL(dvb_free_device); @@ -1054,9 +1098,17 @@ static int __init init_dvbdev(void) static void __exit exit_dvbdev(void) { + struct dvbdevfops_node *node, *next; + class_destroy(dvb_class); cdev_del(&dvb_device_cdev); unregister_chrdev_region(MKDEV(DVB_MAJOR, 0), MAX_DVB_MINORS); + + list_for_each_entry_safe(node, next, &dvbdevfops_list, list_head) { + list_del (&node->list_head); + kfree(node->fops); + kfree(node); + } } subsys_initcall(init_dvbdev); diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c index 9628d4067fe19c8ea64bd0f616ddd410c9e47e41..4dae309fdb0061381cc1b2193c4959c67ad9978e 100644 --- a/drivers/media/dvb-frontends/drx39xyj/drxj.c +++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c @@ -12287,7 +12287,8 @@ struct dvb_frontend *drx39xxj_attach(struct i2c_adapter *i2c) if (state == NULL) goto error; - demod = kmalloc(sizeof(struct drx_demod_instance), GFP_KERNEL); + demod = kmemdup(&drxj_default_demod_g, + sizeof(struct drx_demod_instance), GFP_KERNEL); if (demod == NULL) goto error; @@ -12311,8 +12312,6 @@ struct dvb_frontend *drx39xxj_attach(struct i2c_adapter *i2c) state->demod = demod; /* setup the demod data */ - memcpy(demod, &drxj_default_demod_g, sizeof(struct drx_demod_instance)); - demod->my_i2c_dev_addr = demod_addr; demod->my_common_attr = demod_comm_attr; demod->my_i2c_dev_addr->user_data = state; diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c index 6d4b2eec67b4fdde3fb78f7c8da0d2bb9531b22b..ee830c76e4b30b69361b5fbdc4d2708cc71463e3 100644 --- a/drivers/media/dvb-frontends/dvb-pll.c +++ b/drivers/media/dvb-frontends/dvb-pll.c @@ -18,6 +18,7 @@ #include #include +#include #include #include @@ -43,8 +44,7 @@ struct dvb_pll_priv { }; #define DVB_PLL_MAX 64 - -static unsigned int dvb_pll_devcount; +static DEFINE_IDA(pll_ida); static int debug; module_param(debug, int, 0644); @@ -80,8 +80,8 @@ struct dvb_pll_desc { static const struct dvb_pll_desc dvb_pll_thomson_dtt7579 = { .name = "Thomson dtt7579", - .min = 177000000, - .max = 858000000, + .min = 177 * MHz, + .max = 858 * MHz, .iffreq= 36166667, .sleepdata = (u8[]){ 2, 0xb4, 0x03 }, .count = 4, @@ -102,8 +102,8 @@ static void thomson_dtt759x_bw(struct dvb_frontend *fe, u8 *buf) static const struct dvb_pll_desc dvb_pll_thomson_dtt759x = { .name = "Thomson dtt759x", - .min = 177000000, - .max = 896000000, + .min = 177 * MHz, + .max = 896 * MHz, .set = thomson_dtt759x_bw, .iffreq= 36166667, .sleepdata = (u8[]){ 2, 0x84, 0x03 }, @@ -126,8 +126,8 @@ static void thomson_dtt7520x_bw(struct dvb_frontend *fe, u8 *buf) static const struct dvb_pll_desc dvb_pll_thomson_dtt7520x = { .name = "Thomson dtt7520x", - .min = 185000000, - .max = 900000000, + .min = 185 * MHz, + .max = 900 * MHz, .set = thomson_dtt7520x_bw, .iffreq = 36166667, .count = 7, @@ -144,8 +144,8 @@ static const struct dvb_pll_desc dvb_pll_thomson_dtt7520x = { static const struct dvb_pll_desc dvb_pll_lg_z201 = { .name = "LG z201", - .min = 174000000, - .max = 862000000, + .min = 174 * MHz, + .max = 862 * MHz, .iffreq= 36166667, .sleepdata = (u8[]){ 2, 0xbc, 0x03 }, .count = 5, @@ -160,8 +160,8 @@ static const struct dvb_pll_desc dvb_pll_lg_z201 = { static const struct dvb_pll_desc dvb_pll_unknown_1 = { .name = "unknown 1", /* used by dntv live dvb-t */ - .min = 174000000, - .max = 862000000, + .min = 174 * MHz, + .max = 862 * MHz, .iffreq= 36166667, .count = 9, .entries = { @@ -182,8 +182,8 @@ static const struct dvb_pll_desc dvb_pll_unknown_1 = { */ static const struct dvb_pll_desc dvb_pll_tua6010xs = { .name = "Infineon TUA6010XS", - .min = 44250000, - .max = 858000000, + .min = 44250 * kHz, + .max = 858 * MHz, .iffreq= 36125000, .count = 3, .entries = { @@ -196,8 +196,8 @@ static const struct dvb_pll_desc dvb_pll_tua6010xs = { /* Panasonic env57h1xd5 (some Philips PLL ?) */ static const struct dvb_pll_desc dvb_pll_env57h1xd5 = { .name = "Panasonic ENV57H1XD5", - .min = 44250000, - .max = 858000000, + .min = 44250 * kHz, + .max = 858 * MHz, .iffreq= 36125000, .count = 4, .entries = { @@ -220,8 +220,8 @@ static void tda665x_bw(struct dvb_frontend *fe, u8 *buf) static const struct dvb_pll_desc dvb_pll_tda665x = { .name = "Philips TDA6650/TDA6651", - .min = 44250000, - .max = 858000000, + .min = 44250 * kHz, + .max = 858 * MHz, .set = tda665x_bw, .iffreq= 36166667, .initdata = (u8[]){ 4, 0x0b, 0xf5, 0x85, 0xab }, @@ -254,8 +254,8 @@ static void tua6034_bw(struct dvb_frontend *fe, u8 *buf) static const struct dvb_pll_desc dvb_pll_tua6034 = { .name = "Infineon TUA6034", - .min = 44250000, - .max = 858000000, + .min = 44250 * kHz, + .max = 858 * MHz, .iffreq= 36166667, .count = 3, .set = tua6034_bw, @@ -278,8 +278,8 @@ static void tded4_bw(struct dvb_frontend *fe, u8 *buf) static const struct dvb_pll_desc dvb_pll_tded4 = { .name = "ALPS TDED4", - .min = 47000000, - .max = 863000000, + .min = 47 * MHz, + .max = 863 * MHz, .iffreq= 36166667, .set = tded4_bw, .count = 4, @@ -296,8 +296,8 @@ static const struct dvb_pll_desc dvb_pll_tded4 = { */ static const struct dvb_pll_desc dvb_pll_tdhu2 = { .name = "ALPS TDHU2", - .min = 54000000, - .max = 864000000, + .min = 54 * MHz, + .max = 864 * MHz, .iffreq= 44000000, .count = 4, .entries = { @@ -313,8 +313,8 @@ static const struct dvb_pll_desc dvb_pll_tdhu2 = { */ static const struct dvb_pll_desc dvb_pll_samsung_tbmv = { .name = "Samsung TBMV30111IN / TBMV30712IN1", - .min = 54000000, - .max = 860000000, + .min = 54 * MHz, + .max = 860 * MHz, .iffreq= 44000000, .count = 6, .entries = { @@ -332,8 +332,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_tbmv = { */ static const struct dvb_pll_desc dvb_pll_philips_sd1878_tda8261 = { .name = "Philips SD1878", - .min = 950000, - .max = 2150000, + .min = 950 * MHz, + .max = 2150 * MHz, .iffreq= 249, /* zero-IF, offset 249 is to round up */ .count = 4, .entries = { @@ -398,8 +398,8 @@ static void opera1_bw(struct dvb_frontend *fe, u8 *buf) static const struct dvb_pll_desc dvb_pll_opera1 = { .name = "Opera Tuner", - .min = 900000, - .max = 2250000, + .min = 900 * MHz, + .max = 2250 * MHz, .initdata = (u8[]){ 4, 0x08, 0xe5, 0xe1, 0x00 }, .initdata2 = (u8[]){ 4, 0x08, 0xe5, 0xe5, 0x00 }, .iffreq= 0, @@ -445,8 +445,8 @@ static void samsung_dtos403ih102a_set(struct dvb_frontend *fe, u8 *buf) /* unknown pll used in Samsung DTOS403IH102A DVB-C tuner */ static const struct dvb_pll_desc dvb_pll_samsung_dtos403ih102a = { .name = "Samsung DTOS403IH102A", - .min = 44250000, - .max = 858000000, + .min = 44250 * kHz, + .max = 858 * MHz, .iffreq = 36125000, .count = 8, .set = samsung_dtos403ih102a_set, @@ -465,8 +465,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_dtos403ih102a = { /* Samsung TDTC9251DH0 DVB-T NIM, as used on AirStar 2 */ static const struct dvb_pll_desc dvb_pll_samsung_tdtc9251dh0 = { .name = "Samsung TDTC9251DH0", - .min = 48000000, - .max = 863000000, + .min = 48 * MHz, + .max = 863 * MHz, .iffreq = 36166667, .count = 3, .entries = { @@ -479,8 +479,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_tdtc9251dh0 = { /* Samsung TBDU18132 DVB-S NIM with TSA5059 PLL, used in SkyStar2 DVB-S 2.3 */ static const struct dvb_pll_desc dvb_pll_samsung_tbdu18132 = { .name = "Samsung TBDU18132", - .min = 950000, - .max = 2150000, /* guesses */ + .min = 950 * MHz, + .max = 2150 * MHz, /* guesses */ .iffreq = 0, .count = 2, .entries = { @@ -500,8 +500,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_tbdu18132 = { /* Samsung TBMU24112 DVB-S NIM with SL1935 zero-IF tuner */ static const struct dvb_pll_desc dvb_pll_samsung_tbmu24112 = { .name = "Samsung TBMU24112", - .min = 950000, - .max = 2150000, /* guesses */ + .min = 950 * MHz, + .max = 2150 * MHz, /* guesses */ .iffreq = 0, .count = 2, .entries = { @@ -521,8 +521,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_tbmu24112 = { * 822 - 862 1 * 0 0 1 0 0 0 0x88 */ static const struct dvb_pll_desc dvb_pll_alps_tdee4 = { .name = "ALPS TDEE4", - .min = 47000000, - .max = 862000000, + .min = 47 * MHz, + .max = 862 * MHz, .iffreq = 36125000, .count = 4, .entries = { @@ -537,8 +537,8 @@ static const struct dvb_pll_desc dvb_pll_alps_tdee4 = { /* CP cur. 50uA, AGC takeover: 103dBuV, PORT3 on */ static const struct dvb_pll_desc dvb_pll_tua6034_friio = { .name = "Infineon TUA6034 ISDB-T (Friio)", - .min = 90000000, - .max = 770000000, + .min = 90 * MHz, + .max = 770 * MHz, .iffreq = 57000000, .initdata = (u8[]){ 4, 0x9a, 0x50, 0xb2, 0x08 }, .sleepdata = (u8[]){ 4, 0x9a, 0x70, 0xb3, 0x0b }, @@ -553,8 +553,8 @@ static const struct dvb_pll_desc dvb_pll_tua6034_friio = { /* Philips TDA6651 ISDB-T, used in Earthsoft PT1 */ static const struct dvb_pll_desc dvb_pll_tda665x_earth_pt1 = { .name = "Philips TDA6651 ISDB-T (EarthSoft PT1)", - .min = 90000000, - .max = 770000000, + .min = 90 * MHz, + .max = 770 * MHz, .iffreq = 57000000, .initdata = (u8[]){ 5, 0x0e, 0x7f, 0xc1, 0x80, 0x80 }, .count = 10, @@ -610,9 +610,6 @@ static int dvb_pll_configure(struct dvb_frontend *fe, u8 *buf, u32 div; int i; - if (frequency && (frequency < desc->min || frequency > desc->max)) - return -EINVAL; - for (i = 0; i < desc->count; i++) { if (frequency > desc->entries[i].limit) continue; @@ -799,7 +796,7 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr, struct dvb_pll_priv *priv = NULL; int ret; const struct dvb_pll_desc *desc; - struct dtv_frontend_properties *c = &fe->dtv_property_cache; + int nr; b1 = kmalloc(1, GFP_KERNEL); if (!b1) @@ -808,9 +805,14 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr, b1[0] = 0; msg.buf = b1; - if ((id[dvb_pll_devcount] > DVB_PLL_UNDEFINED) && - (id[dvb_pll_devcount] < ARRAY_SIZE(pll_list))) - pll_desc_id = id[dvb_pll_devcount]; + nr = ida_simple_get(&pll_ida, 0, DVB_PLL_MAX, GFP_KERNEL); + if (nr < 0) { + kfree(b1); + return NULL; + } + + if (id[nr] > DVB_PLL_UNDEFINED && id[nr] < ARRAY_SIZE(pll_list)) + pll_desc_id = id[nr]; BUG_ON(pll_desc_id < 1 || pll_desc_id >= ARRAY_SIZE(pll_list)); @@ -821,42 +823,32 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr, fe->ops.i2c_gate_ctrl(fe, 1); ret = i2c_transfer (i2c, &msg, 1); - if (ret != 1) { - kfree(b1); - return NULL; - } + if (ret != 1) + goto out; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } priv = kzalloc(sizeof(struct dvb_pll_priv), GFP_KERNEL); - if (!priv) { - kfree(b1); - return NULL; - } + if (!priv) + goto out; priv->pll_i2c_address = pll_addr; priv->i2c = i2c; priv->pll_desc = desc; - priv->nr = dvb_pll_devcount++; + priv->nr = nr; memcpy(&fe->ops.tuner_ops, &dvb_pll_tuner_ops, sizeof(struct dvb_tuner_ops)); strncpy(fe->ops.tuner_ops.info.name, desc->name, sizeof(fe->ops.tuner_ops.info.name)); - switch (c->delivery_system) { - case SYS_DVBS: - case SYS_DVBS2: - case SYS_TURBO: - case SYS_ISDBS: - fe->ops.tuner_ops.info.frequency_min_hz = desc->min * kHz; - fe->ops.tuner_ops.info.frequency_max_hz = desc->max * kHz; - break; - default: - fe->ops.tuner_ops.info.frequency_min_hz = desc->min; - fe->ops.tuner_ops.info.frequency_max_hz = desc->max; - } + + fe->ops.tuner_ops.info.frequency_min_hz = desc->min; + fe->ops.tuner_ops.info.frequency_max_hz = desc->max; + + dprintk("%s tuner, frequency range: %u...%u\n", + desc->name, desc->min, desc->max); if (!desc->initdata) fe->ops.tuner_ops.init = NULL; @@ -877,6 +869,11 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr, kfree(b1); return fe; +out: + kfree(b1); + ida_simple_remove(&pll_ida, nr); + + return NULL; } EXPORT_SYMBOL(dvb_pll_attach); @@ -913,9 +910,10 @@ dvb_pll_probe(struct i2c_client *client, const struct i2c_device_id *id) static int dvb_pll_remove(struct i2c_client *client) { - struct dvb_frontend *fe; + struct dvb_frontend *fe = i2c_get_clientdata(client); + struct dvb_pll_priv *priv = fe->tuner_priv; - fe = i2c_get_clientdata(client); + ida_simple_remove(&pll_ida, priv->nr); dvb_pll_release(fe); return 0; } diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c index 0e1f5daaf20cdfc39e4203ae3661718790655827..4607e255e9f8793a7642164c81f6ab8a846a0ed8 100644 --- a/drivers/media/dvb-frontends/lgdt3306a.c +++ b/drivers/media/dvb-frontends/lgdt3306a.c @@ -2205,6 +2205,11 @@ static int lgdt3306a_probe(struct i2c_client *client, struct dvb_frontend *fe; int ret; + if (!client->dev.platform_data) { + dev_err(&client->dev, "platform data is mandatory\n"); + return -EINVAL; + } + config = kzalloc(sizeof(struct lgdt3306a_config), GFP_KERNEL); if (config == NULL) { ret = -ENOMEM; diff --git a/drivers/media/dvb-frontends/lgdt330x.c b/drivers/media/dvb-frontends/lgdt330x.c index 10d584ce538d71bf84729a3b2527edc27418de12..9ee1c1360ab8435a3c5a6044eee612d3730482f3 100644 --- a/drivers/media/dvb-frontends/lgdt330x.c +++ b/drivers/media/dvb-frontends/lgdt330x.c @@ -783,7 +783,7 @@ static int lgdt3303_read_status(struct dvb_frontend *fe, if ((buf[0] & 0x02) == 0x00) *status |= FE_HAS_SYNC; - if ((buf[0] & 0xfd) == 0x01) + if ((buf[0] & 0x01) == 0x01) *status |= FE_HAS_VITERBI | FE_HAS_LOCK; break; default: diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c index dffd2d4bf1c8b96b0735707d449f46a6bb125033..c25c9279740890a6b08760dfd0d541f9f74cf336 100644 --- a/drivers/media/dvb-frontends/m88ds3103.c +++ b/drivers/media/dvb-frontends/m88ds3103.c @@ -309,6 +309,9 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe) u16 u16tmp; u32 tuner_frequency_khz, target_mclk; s32 s32tmp; + static const struct reg_sequence reset_buf[] = { + {0x07, 0x80}, {0x07, 0x00} + }; dev_dbg(&client->dev, "delivery_system=%d modulation=%d frequency=%u symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n", @@ -321,11 +324,7 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe) } /* reset */ - ret = regmap_write(dev->regmap, 0x07, 0x80); - if (ret) - goto err; - - ret = regmap_write(dev->regmap, 0x07, 0x00); + ret = regmap_multi_reg_write(dev->regmap, reset_buf, 2); if (ret) goto err; diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c index c0659568471b8d3663b3cde025199f842eb4cbb0..8cda25902d63a08cb764fd161ff1b1d26a2da213 100644 --- a/drivers/media/dvb-frontends/rtl2830.c +++ b/drivers/media/dvb-frontends/rtl2830.c @@ -619,7 +619,7 @@ static int rtl2830_pid_filter(struct dvb_frontend *fe, u8 index, u16 pid, int on index, pid, onoff); /* skip invalid PIDs (0x2000) */ - if (pid > 0x1fff || index > 32) + if (pid > 0x1fff || index >= 32) return 0; if (onoff) diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c index 2f1f5cbaf03c46bb312d9962d6a792d9b13acd76..a7a6c4e5d1cc53f06b491bc318717dc33f98e8f2 100644 --- a/drivers/media/dvb-frontends/rtl2832.c +++ b/drivers/media/dvb-frontends/rtl2832.c @@ -995,7 +995,7 @@ static int rtl2832_pid_filter(struct dvb_frontend *fe, u8 index, u16 pid, index, pid, onoff, dev->slave_ts); /* skip invalid PIDs (0x2000) */ - if (pid > 0x1fff || index > 32) + if (pid > 0x1fff || index >= 32) return 0; if (onoff) diff --git a/drivers/media/dvb-frontends/si2165.c b/drivers/media/dvb-frontends/si2165.c index feacd8da421dafc21daa4a9c9b3dfefce0132b16..d55d8f169dca64b5ed38301606a5e6ffdcac088c 100644 --- a/drivers/media/dvb-frontends/si2165.c +++ b/drivers/media/dvb-frontends/si2165.c @@ -275,18 +275,20 @@ static u32 si2165_get_fe_clk(struct si2165_state *state) static int si2165_wait_init_done(struct si2165_state *state) { - int ret = -EINVAL; + int ret; u8 val = 0; int i; for (i = 0; i < 3; ++i) { - si2165_readreg8(state, REG_INIT_DONE, &val); + ret = si2165_readreg8(state, REG_INIT_DONE, &val); + if (ret < 0) + return ret; if (val == 0x01) return 0; usleep_range(1000, 50000); } dev_err(&state->client->dev, "init_done was not set\n"); - return ret; + return -EINVAL; } static int si2165_upload_firmware_block(struct si2165_state *state, diff --git a/drivers/media/dvb-frontends/stv0297.c b/drivers/media/dvb-frontends/stv0297.c index 9a9915f7148353de94821f007799d41155c8373d..3ef31a3a27ffb6863196211bbf219ebdceef6508 100644 --- a/drivers/media/dvb-frontends/stv0297.c +++ b/drivers/media/dvb-frontends/stv0297.c @@ -694,7 +694,7 @@ static const struct dvb_frontend_ops stv0297_ops = { .delsys = { SYS_DVBC_ANNEX_A }, .info = { .name = "ST STV0297 DVB-C", - .frequency_min_hz = 470 * MHz, + .frequency_min_hz = 47 * MHz, .frequency_max_hz = 862 * MHz, .frequency_stepsize_hz = 62500, .symbol_rate_min = 870000, diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c index 5b91e740e13581b05c08fc092da920f5043e3022..a6f2924c137f59aab96b486c2568a3be92afde15 100644 --- a/drivers/media/dvb-frontends/stv0367.c +++ b/drivers/media/dvb-frontends/stv0367.c @@ -128,50 +128,32 @@ static const s32 stv0367cab_RF_LookUp2[RF_LOOKUP_TABLE2_SIZE][RF_LOOKUP_TABLE2_S } }; -static -int stv0367_writeregs(struct stv0367_state *state, u16 reg, u8 *data, int len) +static noinline_for_stack +int stv0367_writereg(struct stv0367_state *state, u16 reg, u8 data) { - u8 buf[MAX_XFER_SIZE]; + u8 buf[3] = { MSB(reg), LSB(reg), data }; struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf, - .len = len + 2 + .len = 3, }; int ret; - if (2 + len > sizeof(buf)) { - printk(KERN_WARNING - "%s: i2c wr reg=%04x: len=%d is too big!\n", - KBUILD_MODNAME, reg, len); - return -EINVAL; - } - - - buf[0] = MSB(reg); - buf[1] = LSB(reg); - memcpy(buf + 2, data, len); - if (i2cdebug) printk(KERN_DEBUG "%s: [%02x] %02x: %02x\n", __func__, - state->config->demod_address, reg, buf[2]); + state->config->demod_address, reg, data); ret = i2c_transfer(state->i2c, &msg, 1); if (ret != 1) printk(KERN_ERR "%s: i2c write error! ([%02x] %02x: %02x)\n", - __func__, state->config->demod_address, reg, buf[2]); + __func__, state->config->demod_address, reg, data); return (ret != 1) ? -EREMOTEIO : 0; } -static int stv0367_writereg(struct stv0367_state *state, u16 reg, u8 data) -{ - u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ - - return stv0367_writeregs(state, reg, &tmp, 1); -} - -static u8 stv0367_readreg(struct stv0367_state *state, u16 reg) +static noinline_for_stack +u8 stv0367_readreg(struct stv0367_state *state, u16 reg) { u8 b0[] = { 0, 0 }; u8 b1[] = { 0 }; diff --git a/drivers/media/dvb-frontends/tda10048.c b/drivers/media/dvb-frontends/tda10048.c index c01d60a88af295d72cbd44f094311e465b76e000..9e6a20fcac7698ceee15ff9355638c5108a86b21 100644 --- a/drivers/media/dvb-frontends/tda10048.c +++ b/drivers/media/dvb-frontends/tda10048.c @@ -422,6 +422,7 @@ static int tda10048_set_if(struct dvb_frontend *fe, u32 bw) struct tda10048_config *config = &state->config; int i; u32 if_freq_khz; + u64 sample_freq; dprintk(1, "%s(bw = %d)\n", __func__, bw); @@ -463,9 +464,11 @@ static int tda10048_set_if(struct dvb_frontend *fe, u32 bw) dprintk(1, "- pll_pfactor = %d\n", state->pll_pfactor); /* Calculate the sample frequency */ - state->sample_freq = state->xtal_hz * (state->pll_mfactor + 45); - state->sample_freq /= (state->pll_nfactor + 1); - state->sample_freq /= (state->pll_pfactor + 4); + sample_freq = state->xtal_hz; + sample_freq *= state->pll_mfactor + 45; + do_div(sample_freq, state->pll_nfactor + 1); + do_div(sample_freq, state->pll_pfactor + 4); + state->sample_freq = sample_freq; dprintk(1, "- sample_freq = %d\n", state->sample_freq); /* Update the I/F */ diff --git a/drivers/media/dvb-frontends/tua6100.c b/drivers/media/dvb-frontends/tua6100.c index b233b7be0b84aaee1eb621660c02a22614e988fa..e6aaf4973aef47bdf8e84628bf5aa471734b2b96 100644 --- a/drivers/media/dvb-frontends/tua6100.c +++ b/drivers/media/dvb-frontends/tua6100.c @@ -75,8 +75,8 @@ static int tua6100_set_params(struct dvb_frontend *fe) struct i2c_msg msg1 = { .addr = priv->i2c_address, .flags = 0, .buf = reg1, .len = 4 }; struct i2c_msg msg2 = { .addr = priv->i2c_address, .flags = 0, .buf = reg2, .len = 3 }; -#define _R 4 -#define _P 32 +#define _R_VAL 4 +#define _P_VAL 32 #define _ri 4000000 // setup register 0 @@ -91,14 +91,14 @@ static int tua6100_set_params(struct dvb_frontend *fe) else reg1[1] = 0x0c; - if (_P == 64) + if (_P_VAL == 64) reg1[1] |= 0x40; if (c->frequency >= 1525000) reg1[1] |= 0x80; // register 2 - reg2[1] = (_R >> 8) & 0x03; - reg2[2] = _R; + reg2[1] = (_R_VAL >> 8) & 0x03; + reg2[2] = _R_VAL; if (c->frequency < 1455000) reg2[1] |= 0x1c; else if (c->frequency < 1630000) @@ -110,18 +110,18 @@ static int tua6100_set_params(struct dvb_frontend *fe) * The N divisor ratio (note: c->frequency is in kHz, but we * need it in Hz) */ - prediv = (c->frequency * _R) / (_ri / 1000); - div = prediv / _P; + prediv = (c->frequency * _R_VAL) / (_ri / 1000); + div = prediv / _P_VAL; reg1[1] |= (div >> 9) & 0x03; reg1[2] = div >> 1; reg1[3] = (div << 7); - priv->frequency = ((div * _P) * (_ri / 1000)) / _R; + priv->frequency = ((div * _P_VAL) * (_ri / 1000)) / _R_VAL; // Finally, calculate and store the value for A - reg1[3] |= (prediv - (div*_P)) & 0x7f; + reg1[3] |= (prediv - (div*_P_VAL)) & 0x7f; -#undef _R -#undef _P +#undef _R_VAL +#undef _P_VAL #undef _ri if (fe->ops.i2c_gate_ctrl) diff --git a/drivers/media/firewire/firedtv-avc.c b/drivers/media/firewire/firedtv-avc.c index 1c933b2cf7603cdc045c49671997d5f13a4e9a37..8c31cf90c590719333bbfe2e8a4ac9c80a6d8730 100644 --- a/drivers/media/firewire/firedtv-avc.c +++ b/drivers/media/firewire/firedtv-avc.c @@ -968,7 +968,8 @@ static int get_ca_object_length(struct avc_response_frame *r) return r->operand[7]; } -int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len) +int avc_ca_app_info(struct firedtv *fdtv, unsigned char *app_info, + unsigned int *len) { struct avc_command_frame *c = (void *)fdtv->avc_data; struct avc_response_frame *r = (void *)fdtv->avc_data; @@ -1009,7 +1010,8 @@ int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len) return ret; } -int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len) +int avc_ca_info(struct firedtv *fdtv, unsigned char *app_info, + unsigned int *len) { struct avc_command_frame *c = (void *)fdtv->avc_data; struct avc_response_frame *r = (void *)fdtv->avc_data; @@ -1167,7 +1169,11 @@ int avc_ca_pmt(struct firedtv *fdtv, char *msg, int length) read_pos += program_info_length; write_pos += program_info_length; } - while (read_pos < length) { + while (read_pos + 4 < length) { + if (write_pos + 4 >= sizeof(c->operand) - 4) { + ret = -EINVAL; + goto out; + } c->operand[write_pos++] = msg[read_pos++]; c->operand[write_pos++] = msg[read_pos++]; c->operand[write_pos++] = msg[read_pos++]; @@ -1179,13 +1185,17 @@ int avc_ca_pmt(struct firedtv *fdtv, char *msg, int length) c->operand[write_pos++] = es_info_length >> 8; c->operand[write_pos++] = es_info_length & 0xff; if (es_info_length > 0) { + if (read_pos >= length) { + ret = -EINVAL; + goto out; + } pmt_cmd_id = msg[read_pos++]; if (pmt_cmd_id != 1 && pmt_cmd_id != 4) dev_err(fdtv->device, "invalid pmt_cmd_id %d at stream level\n", pmt_cmd_id); - if (es_info_length > sizeof(c->operand) - 4 - - write_pos) { + if (es_info_length > sizeof(c->operand) - 4 - write_pos || + es_info_length > length - read_pos) { ret = -EINVAL; goto out; } diff --git a/drivers/media/firewire/firedtv-ci.c b/drivers/media/firewire/firedtv-ci.c index 8dc5a7495abeec407d8a1e3772fdcf06f819c1c1..14f779812d25026d1a05541ca324ee3416850c93 100644 --- a/drivers/media/firewire/firedtv-ci.c +++ b/drivers/media/firewire/firedtv-ci.c @@ -138,6 +138,8 @@ static int fdtv_ca_pmt(struct firedtv *fdtv, void *arg) } else { data_length = msg->msg[3]; } + if (data_length > sizeof(msg->msg) - data_pos) + return -EINVAL; return avc_ca_pmt(fdtv, &msg->msg[data_pos], data_length); } diff --git a/drivers/media/firewire/firedtv.h b/drivers/media/firewire/firedtv.h index 876cdec8329be30cc7f981c661535a83e44b8fc6..009905a199472ed3adc4db2c7764a8d4b291d08e 100644 --- a/drivers/media/firewire/firedtv.h +++ b/drivers/media/firewire/firedtv.h @@ -124,8 +124,10 @@ int avc_lnb_control(struct firedtv *fdtv, char voltage, char burst, struct dvb_diseqc_master_cmd *diseqcmd); void avc_remote_ctrl_work(struct work_struct *work); int avc_register_remote_control(struct firedtv *fdtv); -int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len); -int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len); +int avc_ca_app_info(struct firedtv *fdtv, unsigned char *app_info, + unsigned int *len); +int avc_ca_info(struct firedtv *fdtv, unsigned char *app_info, + unsigned int *len); int avc_ca_reset(struct firedtv *fdtv); int avc_ca_pmt(struct firedtv *fdtv, char *app_info, int length); int avc_ca_get_time_date(struct firedtv *fdtv, int *interval); diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig index 82af97430e5b36c6bb81f05703dd8c3c9c6fc547..8b1ae1d6680b7ca5f97cb8dc9b4cf4bd05f4529c 100644 --- a/drivers/media/i2c/Kconfig +++ b/drivers/media/i2c/Kconfig @@ -60,7 +60,9 @@ config VIDEO_TDA1997X tristate "NXP TDA1997x HDMI receiver" depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API depends on SND_SOC + select HDMI select SND_PCM + select V4L2_FWNODE ---help--- V4L2 subdevice driver for the NXP TDA1997x HDMI receivers. @@ -610,6 +612,7 @@ config VIDEO_IMX274 tristate "Sony IMX274 sensor support" depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API depends on MEDIA_CAMERA_SUPPORT + select REGMAP_I2C ---help--- This is a V4L2 sensor driver for the Sony IMX274 CMOS image sensor. diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile index a94eb03d10d4e12d278207a1484686b6131df8f6..520b3c3bf48c2aaac4380e976addc4333c48fdbd 100644 --- a/drivers/media/i2c/Makefile +++ b/drivers/media/i2c/Makefile @@ -36,7 +36,7 @@ obj-$(CONFIG_VIDEO_ADV748X) += adv748x/ obj-$(CONFIG_VIDEO_ADV7604) += adv7604.o obj-$(CONFIG_VIDEO_ADV7842) += adv7842.o obj-$(CONFIG_VIDEO_AD9389B) += ad9389b.o -obj-$(CONFIG_VIDEO_ADV7511) += adv7511.o +obj-$(CONFIG_VIDEO_ADV7511) += adv7511-v4l2.o obj-$(CONFIG_VIDEO_VPX3220) += vpx3220.o obj-$(CONFIG_VIDEO_VS6624) += vs6624.o obj-$(CONFIG_VIDEO_BT819) += bt819.o diff --git a/drivers/media/i2c/ad9389b.c b/drivers/media/i2c/ad9389b.c index 5b008b0002c02c185bad782c2e3fe4f8e26a23b5..aa8b04cfed0f6c3c298bba33f48345710b0d8fe5 100644 --- a/drivers/media/i2c/ad9389b.c +++ b/drivers/media/i2c/ad9389b.c @@ -578,7 +578,7 @@ static const struct v4l2_dv_timings_cap ad9389b_timings_cap = { .type = V4L2_DV_BT_656_1120, /* keep this initialization for compatibility with GCC < 4.4.6 */ .reserved = { 0 }, - V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 170000000, + V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1200, 25000000, 170000000, V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT, V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING | diff --git a/drivers/media/i2c/adv748x/adv748x-core.c b/drivers/media/i2c/adv748x/adv748x-core.c index 6ca88daa0ecd79dad07c16bf7686c78089f15e13..65c3024c5f76f1588f6ab5f4108ca06e8109afc7 100644 --- a/drivers/media/i2c/adv748x/adv748x-core.c +++ b/drivers/media/i2c/adv748x/adv748x-core.c @@ -569,7 +569,8 @@ static int adv748x_parse_dt(struct adv748x_state *state) { struct device_node *ep_np = NULL; struct of_endpoint ep; - bool found = false; + bool out_found = false; + bool in_found = false; for_each_endpoint_of_node(state->dev->of_node, ep_np) { of_graph_parse_endpoint(ep_np, &ep); @@ -592,10 +593,17 @@ static int adv748x_parse_dt(struct adv748x_state *state) of_node_get(ep_np); state->endpoints[ep.port] = ep_np; - found = true; + /* + * At least one input endpoint and one output endpoint shall + * be defined. + */ + if (ep.port < ADV748X_PORT_TXA) + in_found = true; + else + out_found = true; } - return found ? 0 : -ENODEV; + return in_found && out_found ? 0 : -ENODEV; } static void adv748x_dt_cleanup(struct adv748x_state *state) @@ -627,6 +635,17 @@ static int adv748x_probe(struct i2c_client *client, state->i2c_clients[ADV748X_PAGE_IO] = client; i2c_set_clientdata(client, state); + /* + * We can not use container_of to get back to the state with two TXs; + * Initialize the TXs's fields unconditionally on the endpoint + * presence to access them later. + */ + state->txa.state = state->txb.state = state; + state->txa.page = ADV748X_PAGE_TXA; + state->txb.page = ADV748X_PAGE_TXB; + state->txa.port = ADV748X_PORT_TXA; + state->txb.port = ADV748X_PORT_TXB; + /* Discover and process ports declared by the Device tree endpoints */ ret = adv748x_parse_dt(state); if (ret) { diff --git a/drivers/media/i2c/adv748x/adv748x-csi2.c b/drivers/media/i2c/adv748x/adv748x-csi2.c index 469be87a3761feb5b775e4cf26aeac2600987f83..556e13c911a62842724096afcfd410c340f4d344 100644 --- a/drivers/media/i2c/adv748x/adv748x-csi2.c +++ b/drivers/media/i2c/adv748x/adv748x-csi2.c @@ -266,19 +266,10 @@ static int adv748x_csi2_init_controls(struct adv748x_csi2 *tx) int adv748x_csi2_init(struct adv748x_state *state, struct adv748x_csi2 *tx) { - struct device_node *ep; int ret; - /* We can not use container_of to get back to the state with two TXs */ - tx->state = state; - tx->page = is_txa(tx) ? ADV748X_PAGE_TXA : ADV748X_PAGE_TXB; - - ep = state->endpoints[is_txa(tx) ? ADV748X_PORT_TXA : ADV748X_PORT_TXB]; - if (!ep) { - adv_err(state, "No endpoint found for %s\n", - is_txa(tx) ? "txa" : "txb"); - return -ENODEV; - } + if (!is_tx_enabled(tx)) + return 0; /* Initialise the virtual channel */ adv748x_csi2_set_virtual_channel(tx, 0); @@ -288,7 +279,7 @@ int adv748x_csi2_init(struct adv748x_state *state, struct adv748x_csi2 *tx) is_txa(tx) ? "txa" : "txb"); /* Ensure that matching is based upon the endpoint fwnodes */ - tx->sd.fwnode = of_fwnode_handle(ep); + tx->sd.fwnode = of_fwnode_handle(state->endpoints[tx->port]); /* Register internal ops for incremental subdev registration */ tx->sd.internal_ops = &adv748x_csi2_internal_ops; @@ -321,6 +312,9 @@ int adv748x_csi2_init(struct adv748x_state *state, struct adv748x_csi2 *tx) void adv748x_csi2_cleanup(struct adv748x_csi2 *tx) { + if (!is_tx_enabled(tx)) + return; + v4l2_async_unregister_subdev(&tx->sd); media_entity_cleanup(&tx->sd.entity); v4l2_ctrl_handler_free(&tx->ctrl_hdl); diff --git a/drivers/media/i2c/adv748x/adv748x.h b/drivers/media/i2c/adv748x/adv748x.h index 65f83741277e1365cdf723a1221df693562da55e..1cf46c401664d5dfade36847ebe5910589c9ad0e 100644 --- a/drivers/media/i2c/adv748x/adv748x.h +++ b/drivers/media/i2c/adv748x/adv748x.h @@ -82,6 +82,7 @@ struct adv748x_csi2 { struct adv748x_state *state; struct v4l2_mbus_framefmt format; unsigned int page; + unsigned int port; struct media_pad pads[ADV748X_CSI2_NR_PADS]; struct v4l2_ctrl_handler ctrl_hdl; @@ -91,6 +92,7 @@ struct adv748x_csi2 { #define notifier_to_csi2(n) container_of(n, struct adv748x_csi2, notifier) #define adv748x_sd_to_csi2(sd) container_of(sd, struct adv748x_csi2, sd) +#define is_tx_enabled(_tx) ((_tx)->state->endpoints[(_tx)->port] != NULL) enum adv748x_hdmi_pads { ADV748X_HDMI_SINK, diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511-v4l2.c similarity index 99% rename from drivers/media/i2c/adv7511.c rename to drivers/media/i2c/adv7511-v4l2.c index 55c2ea0720d9e13dc846d5665602d21cbd5c9fdd..6869bb593a68275d36062268bcfc5e0a7af83859 100644 --- a/drivers/media/i2c/adv7511.c +++ b/drivers/media/i2c/adv7511-v4l2.c @@ -5,6 +5,11 @@ * Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved. */ +/* + * This file is named adv7511-v4l2.c so it doesn't conflict with the Analog + * Device ADV7511 (config fragment CONFIG_DRM_I2C_ADV7511). + */ + #include #include @@ -130,7 +135,7 @@ static const struct v4l2_dv_timings_cap adv7511_timings_cap = { .type = V4L2_DV_BT_656_1120, /* keep this initialization for compatibility with GCC < 4.4.6 */ .reserved = { 0 }, - V4L2_INIT_BT_TIMINGS(0, ADV7511_MAX_WIDTH, 0, ADV7511_MAX_HEIGHT, + V4L2_INIT_BT_TIMINGS(640, ADV7511_MAX_WIDTH, 350, ADV7511_MAX_HEIGHT, ADV7511_MIN_PIXELCLOCK, ADV7511_MAX_PIXELCLOCK, V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT, @@ -1355,10 +1360,10 @@ static int adv7511_set_fmt(struct v4l2_subdev *sd, state->xfer_func = format->format.xfer_func; switch (format->format.colorspace) { - case V4L2_COLORSPACE_ADOBERGB: + case V4L2_COLORSPACE_OPRGB: c = HDMI_COLORIMETRY_EXTENDED; - ec = y ? HDMI_EXTENDED_COLORIMETRY_ADOBE_YCC_601 : - HDMI_EXTENDED_COLORIMETRY_ADOBE_RGB; + ec = y ? HDMI_EXTENDED_COLORIMETRY_OPYCC_601 : + HDMI_EXTENDED_COLORIMETRY_OPRGB; break; case V4L2_COLORSPACE_SMPTE170M: c = y ? HDMI_COLORIMETRY_ITU_601 : HDMI_COLORIMETRY_NONE; diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c index 668be2bca57aa53ace00b92960aaa3dc3a5a1554..a4b0a89c7e7e632e703d2ea2724cecf56b2f8ab2 100644 --- a/drivers/media/i2c/adv7604.c +++ b/drivers/media/i2c/adv7604.c @@ -766,7 +766,7 @@ static const struct v4l2_dv_timings_cap adv7604_timings_cap_analog = { .type = V4L2_DV_BT_656_1120, /* keep this initialization for compatibility with GCC < 4.4.6 */ .reserved = { 0 }, - V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 170000000, + V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1200, 25000000, 170000000, V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT, V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING | @@ -777,7 +777,7 @@ static const struct v4l2_dv_timings_cap adv76xx_timings_cap_digital = { .type = V4L2_DV_BT_656_1120, /* keep this initialization for compatibility with GCC < 4.4.6 */ .reserved = { 0 }, - V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 225000000, + V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1200, 25000000, 225000000, V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT, V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING | @@ -2284,8 +2284,10 @@ static int adv76xx_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid) state->aspect_ratio.numerator = 16; state->aspect_ratio.denominator = 9; - if (!state->edid.present) + if (!state->edid.present) { state->edid.blocks = 0; + cec_phys_addr_invalidate(state->cec_adap); + } v4l2_dbg(2, debug, sd, "%s: clear EDID pad %d, edid.present = 0x%x\n", __func__, edid->pad, state->edid.present); @@ -2295,8 +2297,8 @@ static int adv76xx_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid) edid->blocks = 2; return -E2BIG; } - pa = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, &spa_loc); - err = cec_phys_addr_validate(pa, &pa, NULL); + pa = v4l2_get_edid_phys_addr(edid->edid, edid->blocks * 128, &spa_loc); + err = v4l2_phys_addr_validate(pa, &pa, NULL); if (err) return err; @@ -2474,7 +2476,7 @@ static int adv76xx_log_status(struct v4l2_subdev *sd) "YCbCr Bt.601 (16-235)", "YCbCr Bt.709 (16-235)", "xvYCC Bt.601", "xvYCC Bt.709", "YCbCr Bt.601 (0-255)", "YCbCr Bt.709 (0-255)", - "sYCC", "Adobe YCC 601", "AdobeRGB", "invalid", "invalid", + "sYCC", "opYCC 601", "opRGB", "invalid", "invalid", "invalid", "invalid", "invalid" }; static const char * const rgb_quantization_range_txt[] = { diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c index 4f8fbdd00e35eb01d5f3ec3d4fb92820249e5f18..58662ba92d4f8f79a27fbb5126c5d730da87ed7a 100644 --- a/drivers/media/i2c/adv7842.c +++ b/drivers/media/i2c/adv7842.c @@ -663,7 +663,7 @@ static const struct v4l2_dv_timings_cap adv7842_timings_cap_analog = { .type = V4L2_DV_BT_656_1120, /* keep this initialization for compatibility with GCC < 4.4.6 */ .reserved = { 0 }, - V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 170000000, + V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1200, 25000000, 170000000, V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT, V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING | @@ -674,7 +674,7 @@ static const struct v4l2_dv_timings_cap adv7842_timings_cap_digital = { .type = V4L2_DV_BT_656_1120, /* keep this initialization for compatibility with GCC < 4.4.6 */ .reserved = { 0 }, - V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 225000000, + V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1200, 25000000, 225000000, V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT, V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING | @@ -786,11 +786,13 @@ static int edid_write_hdmi_segment(struct v4l2_subdev *sd, u8 port) /* Disable I2C access to internal EDID ram from HDMI DDC ports */ rep_write_and_or(sd, 0x77, 0xf3, 0x00); - if (!state->hdmi_edid.present) + if (!state->hdmi_edid.present) { + cec_phys_addr_invalidate(state->cec_adap); return 0; + } - pa = cec_get_edid_phys_addr(edid, 256, &spa_loc); - err = cec_phys_addr_validate(pa, &pa, NULL); + pa = v4l2_get_edid_phys_addr(edid, 256, &spa_loc); + err = v4l2_phys_addr_validate(pa, &pa, NULL); if (err) return err; diff --git a/drivers/media/i2c/dw9714.c b/drivers/media/i2c/dw9714.c index 91fae01d052bf5568cd646d6bf521062182a43ca..3dc2100470a1cdf5ecb5c2c1115c921d10be09b8 100644 --- a/drivers/media/i2c/dw9714.c +++ b/drivers/media/i2c/dw9714.c @@ -169,7 +169,8 @@ static int dw9714_probe(struct i2c_client *client) return 0; err_cleanup: - dw9714_subdev_cleanup(dw9714_dev); + v4l2_ctrl_handler_free(&dw9714_dev->ctrls_vcm); + media_entity_cleanup(&dw9714_dev->sd.entity); dev_err(&client->dev, "Probe failed: %d\n", rval); return rval; } diff --git a/drivers/media/i2c/dw9807-vcm.c b/drivers/media/i2c/dw9807-vcm.c index 8ba3920b6e2f470e2a3b514fa2e1ea2ccd3e7ff6..5477ba326d68108afba9a5741180c194d0ecc5dc 100644 --- a/drivers/media/i2c/dw9807-vcm.c +++ b/drivers/media/i2c/dw9807-vcm.c @@ -218,7 +218,8 @@ static int dw9807_probe(struct i2c_client *client) return 0; err_cleanup: - dw9807_subdev_cleanup(dw9807_dev); + v4l2_ctrl_handler_free(&dw9807_dev->ctrls_vcm); + media_entity_cleanup(&dw9807_dev->sd.entity); return rval; } diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c index f8c70f1a34feb09f2fddfba53471e8f9bd0b8409..8cc3bdb7f608c855f55e9f6c877ed07d7d2b3bd9 100644 --- a/drivers/media/i2c/imx274.c +++ b/drivers/media/i2c/imx274.c @@ -636,16 +636,19 @@ static int imx274_write_table(struct stimx274 *priv, const struct reg_8 table[]) static inline int imx274_read_reg(struct stimx274 *priv, u16 addr, u8 *val) { + unsigned int uint_val; int err; - err = regmap_read(priv->regmap, addr, (unsigned int *)val); + err = regmap_read(priv->regmap, addr, &uint_val); if (err) dev_err(&priv->client->dev, "%s : i2c read failed, addr = %x\n", __func__, addr); else dev_dbg(&priv->client->dev, "%s : addr 0x%x, val=0x%x\n", __func__, - addr, *val); + addr, uint_val); + + *val = uint_val; return err; } diff --git a/drivers/media/i2c/mt9m111.c b/drivers/media/i2c/mt9m111.c index efda1aa95ca023d0fde2cb6ac7585444170f9659..7a7d3969af20e8d7d6f2e04a247e96a353f7d0d0 100644 --- a/drivers/media/i2c/mt9m111.c +++ b/drivers/media/i2c/mt9m111.c @@ -1014,6 +1014,8 @@ static int mt9m111_probe(struct i2c_client *client, mt9m111->rect.top = MT9M111_MIN_DARK_ROWS; mt9m111->rect.width = MT9M111_MAX_WIDTH; mt9m111->rect.height = MT9M111_MAX_HEIGHT; + mt9m111->width = mt9m111->rect.width; + mt9m111->height = mt9m111->rect.height; mt9m111->fmt = &mt9m111_colour_fmts[0]; mt9m111->lastpage = -1; mutex_init(&mt9m111->power_lock); diff --git a/drivers/media/i2c/ov13858.c b/drivers/media/i2c/ov13858.c index a66f6201f53c71e73306ff3d1942d47ba08d6fef..afd66d243403b31adfa75c6e1123f99bce0ffad2 100644 --- a/drivers/media/i2c/ov13858.c +++ b/drivers/media/i2c/ov13858.c @@ -1230,7 +1230,7 @@ static int ov13858_set_ctrl(struct v4l2_ctrl *ctrl) * Applying V4L2 control value only happens * when power is up for streaming */ - if (pm_runtime_get_if_in_use(&client->dev) <= 0) + if (!pm_runtime_get_if_in_use(&client->dev)) return 0; ret = 0; @@ -1612,7 +1612,8 @@ static int ov13858_init_controls(struct ov13858 *ov13858) OV13858_NUM_OF_LINK_FREQS - 1, 0, link_freq_menu_items); - ov13858->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY; + if (ov13858->link_freq) + ov13858->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY; pixel_rate_max = link_freq_to_pixel_rate(link_freq_menu_items[0]); pixel_rate_min = link_freq_to_pixel_rate(link_freq_menu_items[1]); @@ -1635,7 +1636,8 @@ static int ov13858_init_controls(struct ov13858 *ov13858) ov13858->hblank = v4l2_ctrl_new_std( ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_HBLANK, hblank, hblank, 1, hblank); - ov13858->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY; + if (ov13858->hblank) + ov13858->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY; exposure_max = mode->vts_def - 8; ov13858->exposure = v4l2_ctrl_new_std( diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c index 4715edc8ca33e2ef2d73fbf8a6eb32ba5099ae5b..4b6be3b0fd5283723cac3b2424fdfb5f92acb2b7 100644 --- a/drivers/media/i2c/ov2659.c +++ b/drivers/media/i2c/ov2659.c @@ -419,10 +419,14 @@ static struct sensor_register ov2659_720p[] = { { REG_TIMING_YINC, 0x11 }, { REG_TIMING_VERT_FORMAT, 0x80 }, { REG_TIMING_HORIZ_FORMAT, 0x00 }, + { 0x370a, 0x12 }, { 0x3a03, 0xe8 }, { 0x3a09, 0x6f }, { 0x3a0b, 0x5d }, { 0x3a15, 0x9a }, + { REG_VFIFO_READ_START_H, 0x00 }, + { REG_VFIFO_READ_START_L, 0x80 }, + { REG_ISP_CTRL02, 0x00 }, { REG_NULL, 0x00 }, }; @@ -1117,8 +1121,10 @@ static int ov2659_set_fmt(struct v4l2_subdev *sd, if (ov2659_formats[index].code == mf->code) break; - if (index < 0) - return -EINVAL; + if (index < 0) { + index = 0; + mf->code = ov2659_formats[index].code; + } mf->colorspace = V4L2_COLORSPACE_SRGB; mf->field = V4L2_FIELD_NONE; @@ -1201,11 +1207,15 @@ static int ov2659_s_stream(struct v4l2_subdev *sd, int on) goto unlock; } - ov2659_set_pixel_clock(ov2659); - ov2659_set_frame_size(ov2659); - ov2659_set_format(ov2659); - ov2659_set_streaming(ov2659, 1); - ov2659->streaming = on; + ret = ov2659_set_pixel_clock(ov2659); + if (!ret) + ret = ov2659_set_frame_size(ov2659); + if (!ret) + ret = ov2659_set_format(ov2659); + if (!ret) { + ov2659_set_streaming(ov2659, 1); + ov2659->streaming = on; + } unlock: mutex_unlock(&ov2659->lock); diff --git a/drivers/media/i2c/ov2680.c b/drivers/media/i2c/ov2680.c index f753a1c333ef9b7f10963fca1f1f7b7817900997..d8798fb714ba8a6d3cbf44769ecd61c440aff2a0 100644 --- a/drivers/media/i2c/ov2680.c +++ b/drivers/media/i2c/ov2680.c @@ -568,10 +568,6 @@ static int ov2680_power_on(struct ov2680_dev *sensor) if (ret < 0) return ret; - ret = ov2680_mode_restore(sensor); - if (ret < 0) - goto disable; - sensor->is_enabled = true; /* Set clock lane into LP-11 state */ @@ -580,12 +576,6 @@ static int ov2680_power_on(struct ov2680_dev *sensor) ov2680_stream_disable(sensor); return 0; - -disable: - dev_err(dev, "failed to enable sensor: %d\n", ret); - ov2680_power_off(sensor); - - return ret; } static int ov2680_s_power(struct v4l2_subdev *sd, int on) @@ -606,6 +596,8 @@ static int ov2680_s_power(struct v4l2_subdev *sd, int on) ret = v4l2_ctrl_handler_setup(&sensor->ctrls.handler); if (ret < 0) return ret; + + ret = ov2680_mode_restore(sensor); } return ret; @@ -1088,26 +1080,20 @@ static int ov2680_probe(struct i2c_client *client) mutex_init(&sensor->lock); - ret = ov2680_v4l2_init(sensor); + ret = ov2680_check_id(sensor); if (ret < 0) goto lock_destroy; - ret = ov2680_check_id(sensor); + ret = ov2680_v4l2_init(sensor); if (ret < 0) - goto error_cleanup; + goto lock_destroy; dev_info(dev, "ov2680 init correctly\n"); return 0; -error_cleanup: - dev_err(dev, "ov2680 init fail: %d\n", ret); - - media_entity_cleanup(&sensor->sd.entity); - v4l2_async_unregister_subdev(&sensor->sd); - v4l2_ctrl_handler_free(&sensor->ctrls.handler); - lock_destroy: + dev_err(dev, "ov2680 init fail: %d\n", ret); mutex_destroy(&sensor->lock); return ret; diff --git a/drivers/media/i2c/ov2685.c b/drivers/media/i2c/ov2685.c index 385c1886a9470a1ad4153b61493501803be5ee5c..98a1f2e312b58249069de19f7b3c78dc4bf4e85d 100644 --- a/drivers/media/i2c/ov2685.c +++ b/drivers/media/i2c/ov2685.c @@ -549,7 +549,7 @@ static int ov2685_set_ctrl(struct v4l2_ctrl *ctrl) break; } - if (pm_runtime_get_if_in_use(&client->dev) <= 0) + if (!pm_runtime_get_if_in_use(&client->dev)) return 0; switch (ctrl->id) { diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c index 071f4bc240ca72a1162b354170bdcf956a4d4f89..2023df14f8282e00fff07e7f4cc30581bca73edf 100644 --- a/drivers/media/i2c/ov5640.c +++ b/drivers/media/i2c/ov5640.c @@ -223,8 +223,10 @@ struct ov5640_dev { int power_count; struct v4l2_mbus_framefmt fmt; + bool pending_fmt_change; const struct ov5640_mode_info *current_mode; + const struct ov5640_mode_info *last_mode; enum ov5640_frame_rate current_fr; struct v4l2_fract frame_interval; @@ -255,7 +257,7 @@ static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl) * should be identified and removed to speed register load time * over i2c. */ - +/* YUV422 UYVY VGA@30fps */ static const struct reg_value ov5640_init_setting_30fps_VGA[] = { {0x3103, 0x11, 0, 0}, {0x3008, 0x82, 0, 5}, {0x3008, 0x42, 0, 0}, {0x3103, 0x03, 0, 0}, {0x3017, 0x00, 0, 0}, {0x3018, 0x00, 0, 0}, @@ -286,10 +288,10 @@ static const struct reg_value ov5640_init_setting_30fps_VGA[] = { {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0}, {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x3000, 0x00, 0, 0}, {0x3002, 0x1c, 0, 0}, {0x3004, 0xff, 0, 0}, {0x3006, 0xc3, 0, 0}, - {0x300e, 0x45, 0, 0}, {0x302e, 0x08, 0, 0}, {0x4300, 0x3f, 0, 0}, + {0x302e, 0x08, 0, 0}, {0x4300, 0x3f, 0, 0}, {0x501f, 0x00, 0, 0}, {0x4713, 0x03, 0, 0}, {0x4407, 0x04, 0, 0}, {0x440e, 0x00, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0}, - {0x4837, 0x0a, 0, 0}, {0x4800, 0x04, 0, 0}, {0x3824, 0x02, 0, 0}, + {0x4837, 0x0a, 0, 0}, {0x3824, 0x02, 0, 0}, {0x5000, 0xa7, 0, 0}, {0x5001, 0xa3, 0, 0}, {0x5180, 0xff, 0, 0}, {0x5181, 0xf2, 0, 0}, {0x5182, 0x00, 0, 0}, {0x5183, 0x14, 0, 0}, {0x5184, 0x25, 0, 0}, {0x5185, 0x24, 0, 0}, {0x5186, 0x09, 0, 0}, @@ -908,6 +910,26 @@ static int ov5640_mod_reg(struct ov5640_dev *sensor, u16 reg, } /* download ov5640 settings to sensor through i2c */ +static int ov5640_set_timings(struct ov5640_dev *sensor, + const struct ov5640_mode_info *mode) +{ + int ret; + + ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_DVPHO, mode->hact); + if (ret < 0) + return ret; + + ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_DVPVO, mode->vact); + if (ret < 0) + return ret; + + ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_HTS, mode->htot); + if (ret < 0) + return ret; + + return ov5640_write_reg16(sensor, OV5640_REG_TIMING_VTS, mode->vtot); +} + static int ov5640_load_regs(struct ov5640_dev *sensor, const struct ov5640_mode_info *mode) { @@ -935,7 +957,13 @@ static int ov5640_load_regs(struct ov5640_dev *sensor, usleep_range(1000 * delay_ms, 1000 * delay_ms + 100); } - return ret; + return ov5640_set_timings(sensor, mode); +} + +static int ov5640_set_autoexposure(struct ov5640_dev *sensor, bool on) +{ + return ov5640_mod_reg(sensor, OV5640_REG_AEC_PK_MANUAL, + BIT(0), on ? 0 : BIT(0)); } /* read exposure, in number of line periods */ @@ -994,6 +1022,18 @@ static int ov5640_get_gain(struct ov5640_dev *sensor) return gain & 0x3ff; } +static int ov5640_set_gain(struct ov5640_dev *sensor, int gain) +{ + return ov5640_write_reg16(sensor, OV5640_REG_AEC_PK_REAL_GAIN, + (u16)gain & 0x3ff); +} + +static int ov5640_set_autogain(struct ov5640_dev *sensor, bool on) +{ + return ov5640_mod_reg(sensor, OV5640_REG_AEC_PK_MANUAL, + BIT(1), on ? 0 : BIT(1)); +} + static int ov5640_set_stream_dvp(struct ov5640_dev *sensor, bool on) { int ret; @@ -1102,12 +1142,25 @@ static int ov5640_set_stream_mipi(struct ov5640_dev *sensor, bool on) { int ret; - ret = ov5640_mod_reg(sensor, OV5640_REG_MIPI_CTRL00, BIT(5), - on ? 0 : BIT(5)); - if (ret) - return ret; - ret = ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT00, - on ? 0x00 : 0x70); + /* + * Enable/disable the MIPI interface + * + * 0x300e = on ? 0x45 : 0x40 + * + * FIXME: the sensor manual (version 2.03) reports + * [7:5] = 000 : 1 data lane mode + * [7:5] = 001 : 2 data lanes mode + * But this settings do not work, while the following ones + * have been validated for 2 data lanes mode. + * + * [7:5] = 010 : 2 data lanes mode + * [4] = 0 : Power up MIPI HS Tx + * [3] = 0 : Power up MIPI LS Rx + * [2] = 1/0 : MIPI interface enable/disable + * [1:0] = 01/00: FIXME: 'debug' + */ + ret = ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, + on ? 0x45 : 0x40); if (ret) return ret; @@ -1331,7 +1384,7 @@ static int ov5640_set_ae_target(struct ov5640_dev *sensor, int target) return ov5640_write_reg(sensor, OV5640_REG_AEC_CTRL1F, fast_low); } -static int ov5640_binning_on(struct ov5640_dev *sensor) +static int ov5640_get_binning(struct ov5640_dev *sensor) { u8 temp; int ret; @@ -1339,8 +1392,8 @@ static int ov5640_binning_on(struct ov5640_dev *sensor) ret = ov5640_read_reg(sensor, OV5640_REG_TIMING_TC_REG21, &temp); if (ret) return ret; - temp &= 0xfe; - return temp ? 1 : 0; + + return temp & BIT(0); } static int ov5640_set_binning(struct ov5640_dev *sensor, bool enable) @@ -1385,30 +1438,6 @@ static int ov5640_set_virtual_channel(struct ov5640_dev *sensor) return ov5640_write_reg(sensor, OV5640_REG_DEBUG_MODE, temp); } -static int ov5640_set_timings(struct ov5640_dev *sensor, - const struct ov5640_mode_info *mode) -{ - int ret; - - ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_DVPHO, mode->hact); - if (ret < 0) - return ret; - - ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_DVPVO, mode->vact); - if (ret < 0) - return ret; - - ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_HTS, mode->htot); - if (ret < 0) - return ret; - - ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_VTS, mode->vtot); - if (ret < 0) - return ret; - - return 0; -} - static const struct ov5640_mode_info * ov5640_find_mode(struct ov5640_dev *sensor, enum ov5640_frame_rate fr, int width, int height, bool nearest) @@ -1450,7 +1479,7 @@ static int ov5640_set_mode_exposure_calc(struct ov5640_dev *sensor, if (ret < 0) return ret; prev_shutter = ret; - ret = ov5640_binning_on(sensor); + ret = ov5640_get_binning(sensor); if (ret < 0) return ret; if (ret && mode->id != OV5640_MODE_720P_1280_720 && @@ -1571,7 +1600,7 @@ static int ov5640_set_mode_exposure_calc(struct ov5640_dev *sensor, } /* set capture gain */ - ret = __v4l2_ctrl_s_ctrl(sensor->ctrls.gain, cap_gain16); + ret = ov5640_set_gain(sensor, cap_gain16); if (ret) return ret; @@ -1584,7 +1613,7 @@ static int ov5640_set_mode_exposure_calc(struct ov5640_dev *sensor, } /* set exposure */ - return __v4l2_ctrl_s_ctrl(sensor->ctrls.exposure, cap_shutter); + return ov5640_set_exposure(sensor, cap_shutter); } /* @@ -1592,53 +1621,45 @@ static int ov5640_set_mode_exposure_calc(struct ov5640_dev *sensor, * change mode directly */ static int ov5640_set_mode_direct(struct ov5640_dev *sensor, - const struct ov5640_mode_info *mode, - s32 exposure) + const struct ov5640_mode_info *mode) { - int ret; - if (!mode->reg_data) return -EINVAL; /* Write capture setting */ - ret = ov5640_load_regs(sensor, mode); - if (ret < 0) - return ret; - - /* turn auto gain/exposure back on for direct mode */ - ret = __v4l2_ctrl_s_ctrl(sensor->ctrls.auto_gain, 1); - if (ret) - return ret; - - return __v4l2_ctrl_s_ctrl(sensor->ctrls.auto_exp, exposure); + return ov5640_load_regs(sensor, mode); } -static int ov5640_set_mode(struct ov5640_dev *sensor, - const struct ov5640_mode_info *orig_mode) +static int ov5640_set_mode(struct ov5640_dev *sensor) { const struct ov5640_mode_info *mode = sensor->current_mode; + const struct ov5640_mode_info *orig_mode = sensor->last_mode; enum ov5640_downsize_mode dn_mode, orig_dn_mode; - s32 exposure; + bool auto_gain = sensor->ctrls.auto_gain->val == 1; + bool auto_exp = sensor->ctrls.auto_exp->val == V4L2_EXPOSURE_AUTO; int ret; dn_mode = mode->dn_mode; orig_dn_mode = orig_mode->dn_mode; /* auto gain and exposure must be turned off when changing modes */ - ret = __v4l2_ctrl_s_ctrl(sensor->ctrls.auto_gain, 0); - if (ret) - return ret; + if (auto_gain) { + ret = ov5640_set_autogain(sensor, false); + if (ret) + return ret; + } - exposure = sensor->ctrls.auto_exp->val; - ret = ov5640_set_exposure(sensor, V4L2_EXPOSURE_MANUAL); - if (ret) - return ret; + if (auto_exp) { + ret = ov5640_set_autoexposure(sensor, false); + if (ret) + goto restore_auto_gain; + } if ((dn_mode == SUBSAMPLING && orig_dn_mode == SCALING) || (dn_mode == SCALING && orig_dn_mode == SUBSAMPLING)) { /* * change between subsampling and scaling - * go through exposure calucation + * go through exposure calculation */ ret = ov5640_set_mode_exposure_calc(sensor, mode); } else { @@ -1646,15 +1667,16 @@ static int ov5640_set_mode(struct ov5640_dev *sensor, * change inside subsampling or scaling * download firmware directly */ - ret = ov5640_set_mode_direct(sensor, mode, exposure); + ret = ov5640_set_mode_direct(sensor, mode); } - if (ret < 0) - return ret; + goto restore_auto_exp_gain; - ret = ov5640_set_timings(sensor, mode); - if (ret < 0) - return ret; + /* restore auto gain and exposure */ + if (auto_gain) + ov5640_set_autogain(sensor, true); + if (auto_exp) + ov5640_set_autoexposure(sensor, true); ret = ov5640_set_binning(sensor, dn_mode != SCALING); if (ret < 0) @@ -1673,8 +1695,18 @@ static int ov5640_set_mode(struct ov5640_dev *sensor, return ret; sensor->pending_mode_change = false; + sensor->last_mode = mode; return 0; + +restore_auto_exp_gain: + if (auto_exp) + ov5640_set_autoexposure(sensor, true); +restore_auto_gain: + if (auto_gain) + ov5640_set_autogain(sensor, true); + + return ret; } static int ov5640_set_framefmt(struct ov5640_dev *sensor, @@ -1689,6 +1721,7 @@ static int ov5640_restore_mode(struct ov5640_dev *sensor) ret = ov5640_load_regs(sensor, &ov5640_mode_init_data); if (ret < 0) return ret; + sensor->last_mode = &ov5640_mode_init_data; ret = ov5640_mod_reg(sensor, OV5640_REG_SYS_ROOT_DIVIDER, 0x3f, (ilog2(OV5640_SCLK2X_ROOT_DIVIDER_DEFAULT) << 2) | @@ -1697,7 +1730,7 @@ static int ov5640_restore_mode(struct ov5640_dev *sensor) return ret; /* now restore the last capture mode */ - ret = ov5640_set_mode(sensor, &ov5640_mode_init_data); + ret = ov5640_set_mode(sensor); if (ret < 0) return ret; @@ -1726,7 +1759,7 @@ static void ov5640_reset(struct ov5640_dev *sensor) usleep_range(1000, 2000); gpiod_set_value_cansleep(sensor->reset_gpio, 0); - usleep_range(5000, 10000); + usleep_range(20000, 25000); } static int ov5640_set_power_on(struct ov5640_dev *sensor) @@ -1786,23 +1819,69 @@ static int ov5640_set_power(struct ov5640_dev *sensor, bool on) if (ret) goto power_off; + /* We're done here for DVP bus, while CSI-2 needs setup. */ + if (sensor->ep.bus_type != V4L2_MBUS_CSI2) + return 0; + + /* + * Power up MIPI HS Tx and LS Rx; 2 data lanes mode + * + * 0x300e = 0x40 + * [7:5] = 010 : 2 data lanes mode (see FIXME note in + * "ov5640_set_stream_mipi()") + * [4] = 0 : Power up MIPI HS Tx + * [3] = 0 : Power up MIPI LS Rx + * [2] = 0 : MIPI interface disabled + */ + ret = ov5640_write_reg(sensor, + OV5640_REG_IO_MIPI_CTRL00, 0x40); + if (ret) + goto power_off; + + /* + * Gate clock and set LP11 in 'no packets mode' (idle) + * + * 0x4800 = 0x24 + * [5] = 1 : Gate clock when 'no packets' + * [2] = 1 : MIPI bus in LP11 when 'no packets' + */ + ret = ov5640_write_reg(sensor, + OV5640_REG_MIPI_CTRL00, 0x24); + if (ret) + goto power_off; + + /* + * Set data lanes and clock in LP11 when 'sleeping' + * + * 0x3019 = 0x70 + * [6] = 1 : MIPI data lane 2 in LP11 when 'sleeping' + * [5] = 1 : MIPI data lane 1 in LP11 when 'sleeping' + * [4] = 1 : MIPI clock lane in LP11 when 'sleeping' + */ + ret = ov5640_write_reg(sensor, + OV5640_REG_PAD_OUTPUT00, 0x70); + if (ret) + goto power_off; + + /* Give lanes some time to coax into LP11 state. */ + usleep_range(500, 1000); + + } else { if (sensor->ep.bus_type == V4L2_MBUS_CSI2) { - /* - * start streaming briefly followed by stream off in - * order to coax the clock lane into LP-11 state. - */ - ret = ov5640_set_stream_mipi(sensor, true); - if (ret) - goto power_off; - usleep_range(1000, 2000); - ret = ov5640_set_stream_mipi(sensor, false); - if (ret) - goto power_off; + /* Reset MIPI bus settings to their default values. */ + ov5640_write_reg(sensor, + OV5640_REG_IO_MIPI_CTRL00, 0x58); + ov5640_write_reg(sensor, + OV5640_REG_MIPI_CTRL00, 0x04); + ov5640_write_reg(sensor, + OV5640_REG_PAD_OUTPUT00, 0x00); } - return 0; + ov5640_set_power_off(sensor); } + return 0; + power_off: ov5640_set_power_off(sensor); return ret; @@ -1941,6 +2020,7 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd, struct ov5640_dev *sensor = to_ov5640_dev(sd); const struct ov5640_mode_info *new_mode; struct v4l2_mbus_framefmt *mbus_fmt = &format->format; + struct v4l2_mbus_framefmt *fmt; int ret; if (format->pad != 0) @@ -1958,19 +2038,20 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd, if (ret) goto out; - if (format->which == V4L2_SUBDEV_FORMAT_TRY) { - struct v4l2_mbus_framefmt *fmt = - v4l2_subdev_get_try_format(sd, cfg, 0); + if (format->which == V4L2_SUBDEV_FORMAT_TRY) + fmt = v4l2_subdev_get_try_format(sd, cfg, 0); + else + fmt = &sensor->fmt; - *fmt = *mbus_fmt; - goto out; - } + *fmt = *mbus_fmt; if (new_mode != sensor->current_mode) { sensor->current_mode = new_mode; - sensor->fmt = *mbus_fmt; sensor->pending_mode_change = true; } + if (mbus_fmt->code != sensor->fmt.code) + sensor->pending_fmt_change = true; + out: mutex_unlock(&sensor->lock); return ret; @@ -2137,20 +2218,20 @@ static int ov5640_set_ctrl_white_balance(struct ov5640_dev *sensor, int awb) return ret; } -static int ov5640_set_ctrl_exposure(struct ov5640_dev *sensor, int exp) +static int ov5640_set_ctrl_exposure(struct ov5640_dev *sensor, + enum v4l2_exposure_auto_type auto_exposure) { struct ov5640_ctrls *ctrls = &sensor->ctrls; - bool auto_exposure = (exp == V4L2_EXPOSURE_AUTO); + bool auto_exp = (auto_exposure == V4L2_EXPOSURE_AUTO); int ret = 0; if (ctrls->auto_exp->is_new) { - ret = ov5640_mod_reg(sensor, OV5640_REG_AEC_PK_MANUAL, - BIT(0), auto_exposure ? 0 : BIT(0)); + ret = ov5640_set_autoexposure(sensor, auto_exp); if (ret) return ret; } - if (!auto_exposure && ctrls->exposure->is_new) { + if (!auto_exp && ctrls->exposure->is_new) { u16 max_exp; ret = ov5640_read_reg16(sensor, OV5640_REG_AEC_PK_VTS, @@ -2170,25 +2251,19 @@ static int ov5640_set_ctrl_exposure(struct ov5640_dev *sensor, int exp) return ret; } -static int ov5640_set_ctrl_gain(struct ov5640_dev *sensor, int auto_gain) +static int ov5640_set_ctrl_gain(struct ov5640_dev *sensor, bool auto_gain) { struct ov5640_ctrls *ctrls = &sensor->ctrls; int ret = 0; if (ctrls->auto_gain->is_new) { - ret = ov5640_mod_reg(sensor, OV5640_REG_AEC_PK_MANUAL, - BIT(1), - ctrls->auto_gain->val ? 0 : BIT(1)); + ret = ov5640_set_autogain(sensor, auto_gain); if (ret) return ret; } - if (!auto_gain && ctrls->gain->is_new) { - u16 gain = (u16)ctrls->gain->val; - - ret = ov5640_write_reg16(sensor, OV5640_REG_AEC_PK_REAL_GAIN, - gain & 0x3ff); - } + if (!auto_gain && ctrls->gain->is_new) + ret = ov5640_set_gain(sensor, ctrls->gain->val); return ret; } @@ -2261,16 +2336,12 @@ static int ov5640_g_volatile_ctrl(struct v4l2_ctrl *ctrl) switch (ctrl->id) { case V4L2_CID_AUTOGAIN: - if (!ctrl->val) - return 0; val = ov5640_get_gain(sensor); if (val < 0) return val; sensor->ctrls.gain->val = val; break; case V4L2_CID_EXPOSURE_AUTO: - if (ctrl->val == V4L2_EXPOSURE_MANUAL) - return 0; val = ov5640_get_exposure(sensor); if (val < 0) return val; @@ -2501,8 +2572,6 @@ static int ov5640_s_frame_interval(struct v4l2_subdev *sd, if (frame_rate < 0) frame_rate = OV5640_15_FPS; - sensor->current_fr = frame_rate; - sensor->frame_interval = fi->interval; mode = ov5640_find_mode(sensor, frame_rate, mode->hact, mode->vact, true); if (!mode) { @@ -2510,7 +2579,10 @@ static int ov5640_s_frame_interval(struct v4l2_subdev *sd, goto out; } - if (mode != sensor->current_mode) { + if (mode != sensor->current_mode || + frame_rate != sensor->current_fr) { + sensor->current_fr = frame_rate; + sensor->frame_interval = fi->interval; sensor->current_mode = mode; sensor->pending_mode_change = true; } @@ -2541,13 +2613,16 @@ static int ov5640_s_stream(struct v4l2_subdev *sd, int enable) if (sensor->streaming == !enable) { if (enable && sensor->pending_mode_change) { - ret = ov5640_set_mode(sensor, sensor->current_mode); + ret = ov5640_set_mode(sensor); if (ret) goto out; + } + if (enable && sensor->pending_fmt_change) { ret = ov5640_set_framefmt(sensor, &sensor->fmt); if (ret) goto out; + sensor->pending_fmt_change = false; } if (sensor->ep.bus_type == V4L2_MBUS_CSI2) @@ -2642,9 +2717,14 @@ static int ov5640_probe(struct i2c_client *client, return -ENOMEM; sensor->i2c_client = client; + + /* + * default init sequence initialize sensor to + * YUV422 UYVY VGA@30fps + */ fmt = &sensor->fmt; - fmt->code = ov5640_formats[0].code; - fmt->colorspace = ov5640_formats[0].colorspace; + fmt->code = MEDIA_BUS_FMT_UYVY8_2X8; + fmt->colorspace = V4L2_COLORSPACE_SRGB; fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace); fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE; fmt->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(fmt->colorspace); @@ -2656,7 +2736,7 @@ static int ov5640_probe(struct i2c_client *client, sensor->current_fr = OV5640_30_FPS; sensor->current_mode = &ov5640_mode_data[OV5640_30_FPS][OV5640_MODE_VGA_640_480]; - sensor->pending_mode_change = true; + sensor->last_mode = sensor->current_mode; sensor->ae_target = 52; @@ -2708,9 +2788,14 @@ static int ov5640_probe(struct i2c_client *client, /* request optional power down pin */ sensor->pwdn_gpio = devm_gpiod_get_optional(dev, "powerdown", GPIOD_OUT_HIGH); + if (IS_ERR(sensor->pwdn_gpio)) + return PTR_ERR(sensor->pwdn_gpio); + /* request optional reset pin */ sensor->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(sensor->reset_gpio)) + return PTR_ERR(sensor->reset_gpio); v4l2_i2c_subdev_init(&sensor->sd, client, &ov5640_subdev_ops); diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c index 1722cdab0daf2ae1b4a7c444ddb29cc48be5d3f0..34343bc10007850e502844bdeded13c2ee64a3a6 100644 --- a/drivers/media/i2c/ov5645.c +++ b/drivers/media/i2c/ov5645.c @@ -53,6 +53,8 @@ #define OV5645_CHIP_ID_HIGH_BYTE 0x56 #define OV5645_CHIP_ID_LOW 0x300b #define OV5645_CHIP_ID_LOW_BYTE 0x45 +#define OV5645_IO_MIPI_CTRL00 0x300e +#define OV5645_PAD_OUTPUT00 0x3019 #define OV5645_AWB_MANUAL_CONTROL 0x3406 #define OV5645_AWB_MANUAL_ENABLE BIT(0) #define OV5645_AEC_PK_MANUAL 0x3503 @@ -63,6 +65,7 @@ #define OV5645_ISP_VFLIP BIT(2) #define OV5645_TIMING_TC_REG21 0x3821 #define OV5645_SENSOR_MIRROR BIT(1) +#define OV5645_MIPI_CTRL00 0x4800 #define OV5645_PRE_ISP_TEST_SETTING_1 0x503d #define OV5645_TEST_PATTERN_MASK 0x3 #define OV5645_SET_TEST_PATTERN(x) ((x) & OV5645_TEST_PATTERN_MASK) @@ -129,7 +132,6 @@ static const struct reg_value ov5645_global_init_setting[] = { { 0x3503, 0x07 }, { 0x3002, 0x1c }, { 0x3006, 0xc3 }, - { 0x300e, 0x45 }, { 0x3017, 0x00 }, { 0x3018, 0x00 }, { 0x302e, 0x0b }, @@ -358,7 +360,10 @@ static const struct reg_value ov5645_global_init_setting[] = { { 0x3a1f, 0x14 }, { 0x0601, 0x02 }, { 0x3008, 0x42 }, - { 0x3008, 0x02 } + { 0x3008, 0x02 }, + { OV5645_IO_MIPI_CTRL00, 0x40 }, + { OV5645_MIPI_CTRL00, 0x24 }, + { OV5645_PAD_OUTPUT00, 0x70 } }; static const struct reg_value ov5645_setting_sxga[] = { @@ -745,13 +750,9 @@ static int ov5645_s_power(struct v4l2_subdev *sd, int on) goto exit; } - ret = ov5645_write_reg(ov5645, OV5645_SYSTEM_CTRL0, - OV5645_SYSTEM_CTRL0_STOP); - if (ret < 0) { - ov5645_set_power_off(ov5645); - goto exit; - } + usleep_range(500, 1000); } else { + ov5645_write_reg(ov5645, OV5645_IO_MIPI_CTRL00, 0x58); ov5645_set_power_off(ov5645); } } @@ -1057,11 +1058,20 @@ static int ov5645_s_stream(struct v4l2_subdev *subdev, int enable) dev_err(ov5645->dev, "could not sync v4l2 controls\n"); return ret; } + + ret = ov5645_write_reg(ov5645, OV5645_IO_MIPI_CTRL00, 0x45); + if (ret < 0) + return ret; + ret = ov5645_write_reg(ov5645, OV5645_SYSTEM_CTRL0, OV5645_SYSTEM_CTRL0_START); if (ret < 0) return ret; } else { + ret = ov5645_write_reg(ov5645, OV5645_IO_MIPI_CTRL00, 0x40); + if (ret < 0) + return ret; + ret = ov5645_write_reg(ov5645, OV5645_SYSTEM_CTRL0, OV5645_SYSTEM_CTRL0_STOP); if (ret < 0) diff --git a/drivers/media/i2c/ov5670.c b/drivers/media/i2c/ov5670.c index 7b7c74d7737075c3e2dd5402342afd08edf40cc7..53dd30d96e691308288515fffe7f6913bbb1042d 100644 --- a/drivers/media/i2c/ov5670.c +++ b/drivers/media/i2c/ov5670.c @@ -2016,7 +2016,7 @@ static int ov5670_set_ctrl(struct v4l2_ctrl *ctrl) } /* V4L2 controls values will be applied only when power is already up */ - if (pm_runtime_get_if_in_use(&client->dev) <= 0) + if (!pm_runtime_get_if_in_use(&client->dev)) return 0; switch (ctrl->id) { diff --git a/drivers/media/i2c/ov5695.c b/drivers/media/i2c/ov5695.c index 9a80decd93d3c0ed5e011734dc8ea5c4211085a2..5d107c53364d64249374431d7c1a236ead06e4b4 100644 --- a/drivers/media/i2c/ov5695.c +++ b/drivers/media/i2c/ov5695.c @@ -1110,7 +1110,7 @@ static int ov5695_set_ctrl(struct v4l2_ctrl *ctrl) break; } - if (pm_runtime_get_if_in_use(&client->dev) <= 0) + if (!pm_runtime_get_if_in_use(&client->dev)) return 0; switch (ctrl->id) { diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c index 17a34b4a819d32187589cbd045853ec3999995aa..c5aadd8dd23f104a6d66ae4a1e665838bae56d26 100644 --- a/drivers/media/i2c/ov6650.c +++ b/drivers/media/i2c/ov6650.c @@ -469,38 +469,39 @@ static int ov6650_set_selection(struct v4l2_subdev *sd, { struct i2c_client *client = v4l2_get_subdevdata(sd); struct ov6650 *priv = to_ov6650(client); - struct v4l2_rect rect = sel->r; int ret; if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE || sel->target != V4L2_SEL_TGT_CROP) return -EINVAL; - v4l_bound_align_image(&rect.width, 2, W_CIF, 1, - &rect.height, 2, H_CIF, 1, 0); - v4l_bound_align_image(&rect.left, DEF_HSTRT << 1, - (DEF_HSTRT << 1) + W_CIF - (__s32)rect.width, 1, - &rect.top, DEF_VSTRT << 1, - (DEF_VSTRT << 1) + H_CIF - (__s32)rect.height, 1, - 0); + v4l_bound_align_image(&sel->r.width, 2, W_CIF, 1, + &sel->r.height, 2, H_CIF, 1, 0); + v4l_bound_align_image(&sel->r.left, DEF_HSTRT << 1, + (DEF_HSTRT << 1) + W_CIF - (__s32)sel->r.width, 1, + &sel->r.top, DEF_VSTRT << 1, + (DEF_VSTRT << 1) + H_CIF - (__s32)sel->r.height, + 1, 0); - ret = ov6650_reg_write(client, REG_HSTRT, rect.left >> 1); + ret = ov6650_reg_write(client, REG_HSTRT, sel->r.left >> 1); if (!ret) { - priv->rect.left = rect.left; + priv->rect.width += priv->rect.left - sel->r.left; + priv->rect.left = sel->r.left; ret = ov6650_reg_write(client, REG_HSTOP, - (rect.left + rect.width) >> 1); + (sel->r.left + sel->r.width) >> 1); } if (!ret) { - priv->rect.width = rect.width; - ret = ov6650_reg_write(client, REG_VSTRT, rect.top >> 1); + priv->rect.width = sel->r.width; + ret = ov6650_reg_write(client, REG_VSTRT, sel->r.top >> 1); } if (!ret) { - priv->rect.top = rect.top; + priv->rect.height += priv->rect.top - sel->r.top; + priv->rect.top = sel->r.top; ret = ov6650_reg_write(client, REG_VSTOP, - (rect.top + rect.height) >> 1); + (sel->r.top + sel->r.height) >> 1); } if (!ret) - priv->rect.height = rect.height; + priv->rect.height = sel->r.height; return ret; } @@ -614,7 +615,6 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) dev_err(&client->dev, "Pixel format not handled: 0x%x\n", code); return -EINVAL; } - priv->code = code; if (code == MEDIA_BUS_FMT_Y8_1X8 || code == MEDIA_BUS_FMT_SBGGR8_1X8) { @@ -640,7 +640,6 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) dev_dbg(&client->dev, "max resolution: CIF\n"); coma_mask |= COMA_QCIF; } - priv->half_scale = half_scale; clkrc = CLKRC_12MHz; mclk = 12000000; @@ -658,8 +657,13 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) ret = ov6650_reg_rmw(client, REG_COMA, coma_set, coma_mask); if (!ret) ret = ov6650_reg_write(client, REG_CLKRC, clkrc); - if (!ret) + if (!ret) { + priv->half_scale = half_scale; + ret = ov6650_reg_rmw(client, REG_COML, coml_set, coml_mask); + } + if (!ret) + priv->code = code; if (!ret) { mf->colorspace = priv->colorspace; @@ -811,9 +815,18 @@ static int ov6650_video_probe(struct i2c_client *client) u8 pidh, pidl, midh, midl; int ret; + priv->clk = v4l2_clk_get(&client->dev, NULL); + if (IS_ERR(priv->clk)) { + ret = PTR_ERR(priv->clk); + dev_err(&client->dev, "v4l2_clk request err: %d\n", ret); + return ret; + } + ret = ov6650_s_power(&priv->subdev, 1); if (ret < 0) - return ret; + goto eclkput; + + msleep(20); /* * check and show product ID and manufacturer ID @@ -848,6 +861,11 @@ static int ov6650_video_probe(struct i2c_client *client) done: ov6650_s_power(&priv->subdev, 0); + if (!ret) + return 0; +eclkput: + v4l2_clk_put(priv->clk); + return ret; } @@ -990,18 +1008,9 @@ static int ov6650_probe(struct i2c_client *client, priv->code = MEDIA_BUS_FMT_YUYV8_2X8; priv->colorspace = V4L2_COLORSPACE_JPEG; - priv->clk = v4l2_clk_get(&client->dev, NULL); - if (IS_ERR(priv->clk)) { - ret = PTR_ERR(priv->clk); - goto eclkget; - } - ret = ov6650_video_probe(client); - if (ret) { - v4l2_clk_put(priv->clk); -eclkget: + if (ret) v4l2_ctrl_handler_free(&priv->hdl); - } return ret; } diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c index 31bf577b0bd3024b900cfdfc9dbe4b7860b5113c..1f71c14c8aab48fde55b1dbe1d1524608f253f73 100644 --- a/drivers/media/i2c/ov7670.c +++ b/drivers/media/i2c/ov7670.c @@ -159,10 +159,10 @@ MODULE_PARM_DESC(debug, "Debug level (0-1)"); #define REG_GFIX 0x69 /* Fix gain control */ #define REG_DBLV 0x6b /* PLL control an debugging */ -#define DBLV_BYPASS 0x00 /* Bypass PLL */ -#define DBLV_X4 0x01 /* clock x4 */ -#define DBLV_X6 0x10 /* clock x6 */ -#define DBLV_X8 0x11 /* clock x8 */ +#define DBLV_BYPASS 0x0a /* Bypass PLL */ +#define DBLV_X4 0x4a /* clock x4 */ +#define DBLV_X6 0x8a /* clock x6 */ +#define DBLV_X8 0xca /* clock x8 */ #define REG_SCALING_XSC 0x70 /* Test pattern and horizontal scale factor */ #define TEST_PATTTERN_0 0x80 @@ -862,7 +862,7 @@ static int ov7675_set_framerate(struct v4l2_subdev *sd, if (ret < 0) return ret; - return ov7670_write(sd, REG_DBLV, DBLV_X4); + return 0; } static void ov7670_get_framerate_legacy(struct v4l2_subdev *sd, @@ -1797,28 +1797,31 @@ static int ov7670_probe(struct i2c_client *client, if (config->clock_speed) info->clock_speed = config->clock_speed; - /* - * It should be allowed for ov7670 too when it is migrated to - * the new frame rate formula. - */ - if (config->pll_bypass && id->driver_data != MODEL_OV7670) + if (config->pll_bypass) info->pll_bypass = true; if (config->pclk_hb_disable) info->pclk_hb_disable = true; } - info->clk = devm_clk_get(&client->dev, "xclk"); - if (IS_ERR(info->clk)) - return PTR_ERR(info->clk); - ret = clk_prepare_enable(info->clk); - if (ret) - return ret; + info->clk = devm_clk_get(&client->dev, "xclk"); /* optional */ + if (IS_ERR(info->clk)) { + ret = PTR_ERR(info->clk); + if (ret == -ENOENT) + info->clk = NULL; + else + return ret; + } + if (info->clk) { + ret = clk_prepare_enable(info->clk); + if (ret) + return ret; - info->clock_speed = clk_get_rate(info->clk) / 1000000; - if (info->clock_speed < 10 || info->clock_speed > 48) { - ret = -EINVAL; - goto clk_disable; + info->clock_speed = clk_get_rate(info->clk) / 1000000; + if (info->clock_speed < 10 || info->clock_speed > 48) { + ret = -EINVAL; + goto clk_disable; + } } ret = ov7670_init_gpio(client, info); diff --git a/drivers/media/i2c/ov772x.c b/drivers/media/i2c/ov772x.c index 7158c31d8403be27483c6e1afa011ba8dc1912b2..4eae5f2f7d3183d22cca5c42377b935d8aaf4a78 100644 --- a/drivers/media/i2c/ov772x.c +++ b/drivers/media/i2c/ov772x.c @@ -896,6 +896,7 @@ static int ov772x_power_on(struct ov772x_priv *priv) GPIOD_OUT_LOW); if (IS_ERR(priv->rstb_gpio)) { dev_info(&client->dev, "Unable to get GPIO \"reset\""); + clk_disable_unprepare(priv->clk); return PTR_ERR(priv->rstb_gpio); } diff --git a/drivers/media/i2c/ov7740.c b/drivers/media/i2c/ov7740.c index 605f3e25ad82b2cdd1d11978d508043a99798a53..7804013934ab5f314623779109362c012a777a6d 100644 --- a/drivers/media/i2c/ov7740.c +++ b/drivers/media/i2c/ov7740.c @@ -510,7 +510,7 @@ static int ov7740_set_ctrl(struct v4l2_ctrl *ctrl) int ret; u8 val = 0; - if (pm_runtime_get_if_in_use(&client->dev) <= 0) + if (!pm_runtime_get_if_in_use(&client->dev)) return 0; switch (ctrl->id) { @@ -761,7 +761,11 @@ static int ov7740_try_fmt_internal(struct v4l2_subdev *sd, fsize++; } - + if (i >= ARRAY_SIZE(ov7740_framesizes)) { + fsize = &ov7740_framesizes[0]; + fmt->width = fsize->width; + fmt->height = fsize->height; + } if (ret_frmsize != NULL) *ret_frmsize = fsize; @@ -1101,6 +1105,9 @@ static int ov7740_probe(struct i2c_client *client, if (ret) return ret; + pm_runtime_set_active(&client->dev); + pm_runtime_enable(&client->dev); + ret = ov7740_detect(ov7740); if (ret) goto error_detect; @@ -1123,8 +1130,6 @@ static int ov7740_probe(struct i2c_client *client, if (ret) goto error_async_register; - pm_runtime_set_active(&client->dev); - pm_runtime_enable(&client->dev); pm_runtime_idle(&client->dev); return 0; @@ -1134,6 +1139,8 @@ static int ov7740_probe(struct i2c_client *client, error_init_controls: ov7740_free_controls(ov7740); error_detect: + pm_runtime_disable(&client->dev); + pm_runtime_set_suspended(&client->dev); ov7740_set_power(ov7740, 0); media_entity_cleanup(&ov7740->subdev.entity); diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c index 5bea31cd41aa1e63b3e024e0ce9d55624d633b7e..33a21d585dc9c2b9eaee1dbd564ea7ef6b8994f7 100644 --- a/drivers/media/i2c/ov9650.c +++ b/drivers/media/i2c/ov9650.c @@ -716,6 +716,11 @@ static int ov965x_set_gain(struct ov965x *ov965x, int auto_gain) for (m = 6; m >= 0; m--) if (gain >= (1 << m) * 16) break; + + /* Sanity check: don't adjust the gain with a negative value */ + if (m < 0) + return -EINVAL; + rgain = (gain - ((1 << m) * 16)) / (1 << m); rgain |= (((1 << m) - 1) << 4); diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c index 1236683da8f75556c52aa738178822f0bf0c2e1c..4731e1c72f9601d63597d9f1c3cfd96b92178643 100644 --- a/drivers/media/i2c/smiapp/smiapp-core.c +++ b/drivers/media/i2c/smiapp/smiapp-core.c @@ -3108,19 +3108,23 @@ static int smiapp_probe(struct i2c_client *client, if (rval < 0) goto out_media_entity_cleanup; - rval = v4l2_async_register_subdev_sensor_common(&sensor->src->sd); - if (rval < 0) - goto out_media_entity_cleanup; - pm_runtime_set_active(&client->dev); pm_runtime_get_noresume(&client->dev); pm_runtime_enable(&client->dev); + + rval = v4l2_async_register_subdev_sensor_common(&sensor->src->sd); + if (rval < 0) + goto out_disable_runtime_pm; + pm_runtime_set_autosuspend_delay(&client->dev, 1000); pm_runtime_use_autosuspend(&client->dev); pm_runtime_put_autosuspend(&client->dev); return 0; +out_disable_runtime_pm: + pm_runtime_disable(&client->dev); + out_media_entity_cleanup: media_entity_cleanup(&sensor->src->sd.entity); diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c index 44c41933415ab89aea8c847687615a3f0813341d..1b88388b7d0240b8036cdbc1edb93ade41d71337 100644 --- a/drivers/media/i2c/tc358743.c +++ b/drivers/media/i2c/tc358743.c @@ -59,7 +59,7 @@ static const struct v4l2_dv_timings_cap tc358743_timings_cap = { /* keep this initialization for compatibility with GCC < 4.4.6 */ .reserved = { 0 }, /* Pixel clock from REF_01 p. 20. Min/max height/width are unknown */ - V4L2_INIT_BT_TIMINGS(1, 10000, 1, 10000, 0, 165000000, + V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1200, 13000000, 165000000, V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT, V4L2_DV_BT_CAP_PROGRESSIVE | @@ -1243,9 +1243,9 @@ static int tc358743_log_status(struct v4l2_subdev *sd) u8 vi_status3 = i2c_rd8(sd, VI_STATUS3); const int deep_color_mode[4] = { 8, 10, 12, 16 }; static const char * const input_color_space[] = { - "RGB", "YCbCr 601", "Adobe RGB", "YCbCr 709", "NA (4)", + "RGB", "YCbCr 601", "opRGB", "YCbCr 709", "NA (4)", "xvYCC 601", "NA(6)", "xvYCC 709", "NA(8)", "sYCC601", - "NA(10)", "NA(11)", "NA(12)", "Adobe YCC 601"}; + "NA(10)", "NA(11)", "NA(12)", "opYCC 601"}; v4l2_info(sd, "-----Chip status-----\n"); v4l2_info(sd, "Chip ID: 0x%02x\n", @@ -1789,7 +1789,7 @@ static int tc358743_s_edid(struct v4l2_subdev *sd, return -E2BIG; } pa = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, NULL); - err = cec_phys_addr_validate(pa, &pa, NULL); + err = v4l2_phys_addr_validate(pa, &pa, NULL); if (err) return err; @@ -2104,9 +2104,6 @@ static int tc358743_probe(struct i2c_client *client, state->mbus_fmt_code = MEDIA_BUS_FMT_RGB888_1X24; sd->dev = &client->dev; - err = v4l2_async_register_subdev(sd); - if (err < 0) - goto err_hdl; mutex_init(&state->confctl_mutex); @@ -2164,6 +2161,10 @@ static int tc358743_probe(struct i2c_client *client, if (err) goto err_work_queues; + err = v4l2_async_register_subdev(sd); + if (err < 0) + goto err_work_queues; + v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name, client->addr << 1, client->adapter->name); diff --git a/drivers/media/i2c/ths8200.c b/drivers/media/i2c/ths8200.c index 498ad2368cbcfa56d0d84ecf26cf62e1f74375b5..f5ee28058ea2af35ae7dd137ab4f27ea3115529e 100644 --- a/drivers/media/i2c/ths8200.c +++ b/drivers/media/i2c/ths8200.c @@ -49,7 +49,7 @@ static const struct v4l2_dv_timings_cap ths8200_timings_cap = { .type = V4L2_DV_BT_656_1120, /* keep this initialization for compatibility with GCC < 4.4.6 */ .reserved = { 0 }, - V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1080, 25000000, 148500000, + V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1080, 25000000, 148500000, V4L2_DV_BT_STD_CEA861, V4L2_DV_BT_CAP_PROGRESSIVE) }; diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c index 76e6bed5a1da22c24e7289fc9e40010b24e7ac0b..15a5e98b3d4585f2bb80005a72f943b114383c1c 100644 --- a/drivers/media/i2c/tvp5150.c +++ b/drivers/media/i2c/tvp5150.c @@ -828,7 +828,7 @@ static int tvp5150_s_ctrl(struct v4l2_ctrl *ctrl) return 0; case V4L2_CID_HUE: tvp5150_write(sd, TVP5150_HUE_CTL, ctrl->val); - break; + return 0; case V4L2_CID_TEST_PATTERN: decoder->enable = ctrl->val ? false : true; tvp5150_selmux(sd); @@ -901,9 +901,6 @@ static int tvp5150_set_selection(struct v4l2_subdev *sd, /* tvp5150 has some special limits */ rect.left = clamp(rect.left, 0, TVP5150_MAX_CROP_LEFT); - rect.width = clamp_t(unsigned int, rect.width, - TVP5150_H_MAX - TVP5150_MAX_CROP_LEFT - rect.left, - TVP5150_H_MAX - rect.left); rect.top = clamp(rect.top, 0, TVP5150_MAX_CROP_TOP); /* Calculate height based on current standard */ @@ -917,9 +914,16 @@ static int tvp5150_set_selection(struct v4l2_subdev *sd, else hmax = TVP5150_V_MAX_OTHERS; - rect.height = clamp_t(unsigned int, rect.height, + /* + * alignments: + * - width = 2 due to UYVY colorspace + * - height, image = no special alignment + */ + v4l_bound_align_image(&rect.width, + TVP5150_H_MAX - TVP5150_MAX_CROP_LEFT - rect.left, + TVP5150_H_MAX - rect.left, 1, &rect.height, hmax - TVP5150_MAX_CROP_TOP - rect.top, - hmax - rect.top); + hmax - rect.top, 0, 0); tvp5150_write(sd, TVP5150_VERT_BLANKING_START, rect.top); tvp5150_write(sd, TVP5150_VERT_BLANKING_STOP, @@ -1534,7 +1538,7 @@ static int tvp5150_probe(struct i2c_client *c, 27000000, 1, 27000000); v4l2_ctrl_new_std_menu_items(&core->hdl, &tvp5150_ctrl_ops, V4L2_CID_TEST_PATTERN, - ARRAY_SIZE(tvp5150_test_patterns), + ARRAY_SIZE(tvp5150_test_patterns) - 1, 0, 0, tvp5150_test_patterns); sd->ctrl_handler = &core->hdl; if (core->hdl.error) { diff --git a/drivers/media/i2c/video-i2c.c b/drivers/media/i2c/video-i2c.c index 06d29d8f6be8ed8e7cb7c7418c58643e6a9eda58..f27d294dcbef5faf543c8c04147431df2b8e9121 100644 --- a/drivers/media/i2c/video-i2c.c +++ b/drivers/media/i2c/video-i2c.c @@ -510,7 +510,12 @@ static const struct v4l2_ioctl_ops video_i2c_ioctl_ops = { static void video_i2c_release(struct video_device *vdev) { - kfree(video_get_drvdata(vdev)); + struct video_i2c_data *data = video_get_drvdata(vdev); + + v4l2_device_unregister(&data->v4l2_dev); + mutex_destroy(&data->lock); + mutex_destroy(&data->queue_lock); + kfree(data); } static int video_i2c_probe(struct i2c_client *client, @@ -608,10 +613,6 @@ static int video_i2c_remove(struct i2c_client *client) struct video_i2c_data *data = i2c_get_clientdata(client); video_unregister_device(&data->vdev); - v4l2_device_unregister(&data->v4l2_dev); - - mutex_destroy(&data->lock); - mutex_destroy(&data->queue_lock); return 0; } diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c index 3bae24b15eaa4cbd5e25485121e50ddcb5991af2..ed518b1f82e4a941799ee2c0061b5dcbf5fbabbf 100644 --- a/drivers/media/media-device.c +++ b/drivers/media/media-device.c @@ -487,6 +487,7 @@ static long media_device_enum_links32(struct media_device *mdev, { struct media_links_enum links; compat_uptr_t pads_ptr, links_ptr; + int ret; memset(&links, 0, sizeof(links)); @@ -498,7 +499,14 @@ static long media_device_enum_links32(struct media_device *mdev, links.pads = compat_ptr(pads_ptr); links.links = compat_ptr(links_ptr); - return media_device_enum_links(mdev, &links); + ret = media_device_enum_links(mdev, &links); + if (ret) + return ret; + + if (copy_to_user(ulinks->reserved, links.reserved, + sizeof(ulinks->reserved))) + return -EFAULT; + return 0; } #define MEDIA_IOC_ENUM_LINKS32 _IOWR('|', 0x02, struct media_links_enum32) diff --git a/drivers/media/pci/bt8xx/Kconfig b/drivers/media/pci/bt8xx/Kconfig index bc89e37608cdd7199a8ce8f20427eef1b97ebc1a..07dda09caf3c24f99330f5f07a8dc1cb3e3984c3 100644 --- a/drivers/media/pci/bt8xx/Kconfig +++ b/drivers/media/pci/bt8xx/Kconfig @@ -3,6 +3,7 @@ config VIDEO_BT848 depends on VIDEO_DEV && PCI && I2C && VIDEO_V4L2 select I2C_ALGOBIT select VIDEOBUF_DMA_SG + depends on BROKEN depends on RC_CORE depends on MEDIA_RADIO_SUPPORT select VIDEO_TUNER diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c index cf05e11da01b06c71bccbd17e080d806709cbd06..00400f58d9c49c1b291ef31be9f5fa4c92fc980d 100644 --- a/drivers/media/pci/bt8xx/bttv-driver.c +++ b/drivers/media/pci/bt8xx/bttv-driver.c @@ -4293,6 +4293,7 @@ static void bttv_remove(struct pci_dev *pci_dev) /* free resources */ free_irq(btv->c.pci->irq,btv); + del_timer_sync(&btv->timeout); iounmap(btv->bt848_mmio); release_mem_region(pci_resource_start(btv->c.pci,0), pci_resource_len(btv->c.pci,0)); diff --git a/drivers/media/pci/cx18/Kconfig b/drivers/media/pci/cx18/Kconfig index c675b83c43a96d5e3207c468cc0aba735e5baa44..dd5c456c6c84b9a3ab4088f26c2cf37961d0083e 100644 --- a/drivers/media/pci/cx18/Kconfig +++ b/drivers/media/pci/cx18/Kconfig @@ -4,6 +4,7 @@ config VIDEO_CX18 select I2C_ALGOBIT select VIDEOBUF_VMALLOC depends on RC_CORE + depends on BROKEN select VIDEO_TUNER select VIDEO_TVEEPROM select VIDEO_CX2341X diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c index 0c389a3fb4e5f0fb9bdaaa75128e0a177898ba10..e64f9093cd6d394aaa9d455eb526f66e696a6504 100644 --- a/drivers/media/pci/cx18/cx18-driver.c +++ b/drivers/media/pci/cx18/cx18-driver.c @@ -1252,7 +1252,7 @@ static void cx18_cancel_out_work_orders(struct cx18 *cx) { int i; for (i = 0; i < CX18_MAX_STREAMS; i++) - if (&cx->streams[i].video_dev) + if (cx->streams[i].video_dev.v4l2_dev) cancel_work_sync(&cx->streams[i].out_work_order); } diff --git a/drivers/media/pci/cx23885/altera-ci.c b/drivers/media/pci/cx23885/altera-ci.c index 62bc8049b320122d397ff18eeb06562311c724fa..198c05e83f5c0b1a106564e3c94f18e06ce6214f 100644 --- a/drivers/media/pci/cx23885/altera-ci.c +++ b/drivers/media/pci/cx23885/altera-ci.c @@ -665,6 +665,10 @@ static int altera_hw_filt_init(struct altera_ci_config *config, int hw_filt_nr) } temp_int = append_internal(inter); + if (!temp_int) { + ret = -ENOMEM; + goto err; + } inter->filts_used = 1; inter->dev = config->dev; inter->fpga_rw = config->fpga_rw; @@ -699,6 +703,7 @@ static int altera_hw_filt_init(struct altera_ci_config *config, int hw_filt_nr) __func__, ret); kfree(pid_filt); + kfree(inter); return ret; } @@ -733,6 +738,10 @@ int altera_ci_init(struct altera_ci_config *config, int ci_nr) } temp_int = append_internal(inter); + if (!temp_int) { + ret = -ENOMEM; + goto err; + } inter->cis_used = 1; inter->dev = config->dev; inter->fpga_rw = config->fpga_rw; @@ -801,6 +810,7 @@ int altera_ci_init(struct altera_ci_config *config, int ci_nr) ci_dbg_print("%s: Cannot initialize CI: Error %d.\n", __func__, ret); kfree(state); + kfree(inter); return ret; } diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c index 39804d830305c152822995d0cbb9b803a08c741e..fd5c52b21436b5fb035a9642ab5d934800a26579 100644 --- a/drivers/media/pci/cx23885/cx23885-core.c +++ b/drivers/media/pci/cx23885/cx23885-core.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -41,6 +42,18 @@ MODULE_AUTHOR("Steven Toth "); MODULE_LICENSE("GPL"); MODULE_VERSION(CX23885_VERSION); +/* + * Some platforms have been found to require periodic resetting of the DMA + * engine. Ryzen and XEON platforms are known to be affected. The symptom + * encountered is "mpeg risc op code error". Only Ryzen platforms employ + * this workaround if the option equals 1. The workaround can be explicitly + * disabled for all platforms by setting to 0, the workaround can be forced + * on for any platform by setting to 2. + */ +static unsigned int dma_reset_workaround = 1; +module_param(dma_reset_workaround, int, 0644); +MODULE_PARM_DESC(dma_reset_workaround, "periodic RiSC dma engine reset; 0-force disable, 1-driver detect (default), 2-force enable"); + static unsigned int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "enable debug messages"); @@ -603,8 +616,13 @@ static void cx23885_risc_disasm(struct cx23885_tsport *port, static void cx23885_clear_bridge_error(struct cx23885_dev *dev) { - uint32_t reg1_val = cx_read(TC_REQ); /* read-only */ - uint32_t reg2_val = cx_read(TC_REQ_SET); + uint32_t reg1_val, reg2_val; + + if (!dev->need_dma_reset) + return; + + reg1_val = cx_read(TC_REQ); /* read-only */ + reg2_val = cx_read(TC_REQ_SET); if (reg1_val && reg2_val) { cx_write(TC_REQ, reg1_val); @@ -2058,6 +2076,37 @@ void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput) /* TODO: 23-19 */ } +static struct { + int vendor, dev; +} const broken_dev_id[] = { + /* According with + * https://openbenchmarking.org/system/1703021-RI-AMDZEN08075/Ryzen%207%201800X/lspci, + * 0x1451 is PCI ID for the IOMMU found on Ryzen + */ + { PCI_VENDOR_ID_AMD, 0x1451 }, +}; + +static bool cx23885_does_need_dma_reset(void) +{ + int i; + struct pci_dev *pdev = NULL; + + if (dma_reset_workaround == 0) + return false; + else if (dma_reset_workaround == 2) + return true; + + for (i = 0; i < ARRAY_SIZE(broken_dev_id); i++) { + pdev = pci_get_device(broken_dev_id[i].vendor, + broken_dev_id[i].dev, NULL); + if (pdev) { + pci_dev_put(pdev); + return true; + } + } + return false; +} + static int cx23885_initdev(struct pci_dev *pci_dev, const struct pci_device_id *pci_id) { @@ -2069,6 +2118,8 @@ static int cx23885_initdev(struct pci_dev *pci_dev, if (NULL == dev) return -ENOMEM; + dev->need_dma_reset = cx23885_does_need_dma_reset(); + err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev); if (err < 0) goto fail_free; diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c index f8a3deadc77a19e2b1fdd0d404dfb3948f52a0f6..b9f3ec89ede3d5ff01252f3d2ae3c4b0267064c3 100644 --- a/drivers/media/pci/cx23885/cx23885-video.c +++ b/drivers/media/pci/cx23885/cx23885-video.c @@ -1296,6 +1296,10 @@ int cx23885_video_register(struct cx23885_dev *dev) /* register Video device */ dev->video_dev = cx23885_vdev_init(dev, dev->pci, &cx23885_video_template, "video"); + if (!dev->video_dev) { + err = -ENOMEM; + goto fail_unreg; + } dev->video_dev->queue = &dev->vb2_vidq; err = video_register_device(dev->video_dev, VFL_TYPE_GRABBER, video_nr[dev->nr]); @@ -1310,6 +1314,10 @@ int cx23885_video_register(struct cx23885_dev *dev) /* register VBI device */ dev->vbi_dev = cx23885_vdev_init(dev, dev->pci, &cx23885_vbi_template, "vbi"); + if (!dev->vbi_dev) { + err = -ENOMEM; + goto fail_unreg; + } dev->vbi_dev->queue = &dev->vb2_vbiq; err = video_register_device(dev->vbi_dev, VFL_TYPE_VBI, vbi_nr[dev->nr]); diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h index d54c7ee1ab218f003178b37240a80ba1ee74e277..cf965efabe66678f8c475f675ffbf818ec102ad1 100644 --- a/drivers/media/pci/cx23885/cx23885.h +++ b/drivers/media/pci/cx23885/cx23885.h @@ -451,6 +451,8 @@ struct cx23885_dev { /* Analog raw audio */ struct cx23885_audio_dev *audio_dev; + /* Does the system require periodic DMA resets? */ + unsigned int need_dma_reset:1; }; static inline struct cx23885_dev *to_cx23885(struct v4l2_device *v4l2_dev) diff --git a/drivers/media/pci/cx23885/cx23888-ir.c b/drivers/media/pci/cx23885/cx23888-ir.c index 00329f668b590260713d3dab844179398f55b465..5177479d13d388efa1a2cbd13c87d051afdc0b00 100644 --- a/drivers/media/pci/cx23885/cx23888-ir.c +++ b/drivers/media/pci/cx23885/cx23888-ir.c @@ -1178,8 +1178,11 @@ int cx23888_ir_probe(struct cx23885_dev *dev) return -ENOMEM; spin_lock_init(&state->rx_kfifo_lock); - if (kfifo_alloc(&state->rx_kfifo, CX23888_IR_RX_KFIFO_SIZE, GFP_KERNEL)) + if (kfifo_alloc(&state->rx_kfifo, CX23888_IR_RX_KFIFO_SIZE, + GFP_KERNEL)) { + kfree(state); return -ENOMEM; + } state->dev = dev; sd = &state->sd; diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c index 7b113bad70d23b2857dfcf26dd3648d465abffa3..248fb3b6833cc6ee4611a220dbe44e0ea19b8f98 100644 --- a/drivers/media/pci/cx88/cx88-video.c +++ b/drivers/media/pci/cx88/cx88-video.c @@ -1312,7 +1312,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev, core = cx88_core_get(dev->pci); if (!core) { err = -EINVAL; - goto fail_free; + goto fail_disable; } dev->core = core; @@ -1358,7 +1358,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev, cc->step, cc->default_value); if (!vc) { err = core->audio_hdl.error; - goto fail_core; + goto fail_irq; } vc->priv = (void *)cc; } @@ -1372,7 +1372,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev, cc->step, cc->default_value); if (!vc) { err = core->video_hdl.error; - goto fail_core; + goto fail_irq; } vc->priv = (void *)cc; if (vc->id == V4L2_CID_CHROMA_AGC) @@ -1535,11 +1535,14 @@ static int cx8800_initdev(struct pci_dev *pci_dev, fail_unreg: cx8800_unregister_video(dev); - free_irq(pci_dev->irq, dev); mutex_unlock(&core->lock); +fail_irq: + free_irq(pci_dev->irq, dev); fail_core: core->v4ldev = NULL; cx88_core_put(core, dev->pci); +fail_disable: + pci_disable_device(pci_dev); fail_free: kfree(dev); return err; diff --git a/drivers/media/pci/dm1105/dm1105.c b/drivers/media/pci/dm1105/dm1105.c index 1ddb0576fb7b1af37d8c681d925df6260bc2ff4a..dc3fc69e448062d09844bec5ced2e8b8d053f3f4 100644 --- a/drivers/media/pci/dm1105/dm1105.c +++ b/drivers/media/pci/dm1105/dm1105.c @@ -1188,6 +1188,7 @@ static void dm1105_remove(struct pci_dev *pdev) struct dvb_demux *dvbdemux = &dev->demux; struct dmx_demux *dmx = &dvbdemux->dmx; + cancel_work_sync(&dev->ir.work); dm1105_ir_exit(dev); dmx->close(dmx); dvb_net_release(&dev->dvbnet); diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c index 29027159eced88a57b9a39feba4197e45c1a2d00..ca1a4d8e972ec0f1c3a1ea8b57c5b83fd69437ea 100644 --- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c +++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c @@ -1846,12 +1846,12 @@ static void cio2_pci_remove(struct pci_dev *pci_dev) struct cio2_device *cio2 = pci_get_drvdata(pci_dev); unsigned int i; + media_device_unregister(&cio2->media_dev); cio2_notifier_exit(cio2); - cio2_fbpt_exit_dummy(cio2); for (i = 0; i < CIO2_QUEUES; i++) cio2_queue_exit(cio2, &cio2->queue[i]); + cio2_fbpt_exit_dummy(cio2); v4l2_device_unregister(&cio2->v4l2_dev); - media_device_unregister(&cio2->media_dev); media_device_cleanup(&cio2->media_dev); mutex_destroy(&cio2->lock); } diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c index 44936d6d7c3967e28c9d1dcbaa722cf7e380c39e..1380474519f2bf045cae322ee0fa84414289397d 100644 --- a/drivers/media/pci/ivtv/ivtv-yuv.c +++ b/drivers/media/pci/ivtv/ivtv-yuv.c @@ -935,7 +935,7 @@ static void ivtv_yuv_init(struct ivtv *itv) } /* We need a buffer for blanking when Y plane is offset - non-fatal if we can't get one */ - yi->blanking_ptr = kzalloc(720 * 16, GFP_KERNEL|__GFP_NOWARN); + yi->blanking_ptr = kzalloc(720 * 16, GFP_ATOMIC|__GFP_NOWARN); if (yi->blanking_ptr) { yi->blanking_dmaptr = pci_map_single(itv->pdev, yi->blanking_ptr, 720*16, PCI_DMA_TODEVICE); } else { diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c index 5ddaa8ed11a566c8fcc405ce60c9617c84f91e41..c7605cf887bdd933570854d87e14712c45a7dc16 100644 --- a/drivers/media/pci/ivtv/ivtvfb.c +++ b/drivers/media/pci/ivtv/ivtvfb.c @@ -356,7 +356,7 @@ static int ivtvfb_prep_frame(struct ivtv *itv, int cmd, void __user *source, IVTVFB_WARN("ivtvfb_prep_frame: Count not a multiple of 4 (%d)\n", count); /* Check Source */ - if (!access_ok(VERIFY_READ, source + dest_offset, count)) { + if (!access_ok(source + dest_offset, count)) { IVTVFB_WARN("Invalid userspace pointer %p\n", source); IVTVFB_DEBUG_WARN("access_ok() failed for offset 0x%08lx source %p count %d\n", diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c index 8001d3e9134e440836fdf39b5b3624fd75b5ebe7..db2a7ad1e523195b97c07c0c473a5cd279c80f2c 100644 --- a/drivers/media/pci/meye/meye.c +++ b/drivers/media/pci/meye/meye.c @@ -1460,7 +1460,7 @@ static int meye_mmap(struct file *file, struct vm_area_struct *vma) unsigned long page, pos; mutex_lock(&meye.lock); - if (size > gbuffers * gbufsize) { + if (size > gbuffers * gbufsize || offset > gbuffers * gbufsize - size) { mutex_unlock(&meye.lock); return -EINVAL; } diff --git a/drivers/media/pci/ngene/ngene-core.c b/drivers/media/pci/ngene/ngene-core.c index 25f16833a47521eb71cfb23b7a9560a3ee6db329..aa60559bbbb497a05c5e15ae54408f2ca8b3c72e 100644 --- a/drivers/media/pci/ngene/ngene-core.c +++ b/drivers/media/pci/ngene/ngene-core.c @@ -398,7 +398,7 @@ static int ngene_command_config_free_buf(struct ngene *dev, u8 *config) com.cmd.hdr.Opcode = CMD_CONFIGURE_FREE_BUFFER; com.cmd.hdr.Length = 6; - memcpy(&com.cmd.ConfigureBuffers.config, config, 6); + memcpy(&com.cmd.ConfigureFreeBuffers.config, config, 6); com.in_len = 6; com.out_len = 0; diff --git a/drivers/media/pci/ngene/ngene.h b/drivers/media/pci/ngene/ngene.h index 01d9f1b58fcbd43d305e4d94a443d6605902ad56..9728bce1ea1cdf19b63ebdc54eca549041f60509 100644 --- a/drivers/media/pci/ngene/ngene.h +++ b/drivers/media/pci/ngene/ngene.h @@ -419,12 +419,14 @@ enum _BUFFER_CONFIGS { struct FW_CONFIGURE_FREE_BUFFERS { struct FW_HEADER hdr; - u8 UVI1_BufferLength; - u8 UVI2_BufferLength; - u8 TVO_BufferLength; - u8 AUD1_BufferLength; - u8 AUD2_BufferLength; - u8 TVA_BufferLength; + struct { + u8 UVI1_BufferLength; + u8 UVI2_BufferLength; + u8 TVO_BufferLength; + u8 AUD1_BufferLength; + u8 AUD2_BufferLength; + u8 TVA_BufferLength; + } __packed config; } __attribute__ ((__packed__)); struct FW_CONFIGURE_UART { diff --git a/drivers/media/pci/saa7134/saa7134-i2c.c b/drivers/media/pci/saa7134/saa7134-i2c.c index cf1e526de56ac0fadd1c75b05b964481d8ec3e09..8a1128c60680b928835da0601fdc51d06383b593 100644 --- a/drivers/media/pci/saa7134/saa7134-i2c.c +++ b/drivers/media/pci/saa7134/saa7134-i2c.c @@ -351,7 +351,11 @@ static const struct i2c_client saa7134_client_template = { /* ----------------------------------------------------------- */ -/* On Medion 7134 reading EEPROM needs DVB-T demod i2c gate open */ +/* + * On Medion 7134 reading the SAA7134 chip config EEPROM needs DVB-T + * demod i2c gate closed due to an address clash between this EEPROM + * and the demod one. + */ static void saa7134_i2c_eeprom_md7134_gate(struct saa7134_dev *dev) { u8 subaddr = 0x7, dmdregval; @@ -368,14 +372,14 @@ static void saa7134_i2c_eeprom_md7134_gate(struct saa7134_dev *dev) ret = i2c_transfer(&dev->i2c_adap, i2cgatemsg_r, 2); if ((ret == 2) && (dmdregval & 0x2)) { - pr_debug("%s: DVB-T demod i2c gate was left closed\n", + pr_debug("%s: DVB-T demod i2c gate was left open\n", dev->name); data[0] = subaddr; data[1] = (dmdregval & ~0x2); if (i2c_transfer(&dev->i2c_adap, i2cgatemsg_w, 1) != 1) - pr_err("%s: EEPROM i2c gate open failure\n", - dev->name); + pr_err("%s: EEPROM i2c gate close failure\n", + dev->name); } } diff --git a/drivers/media/pci/saa7134/saa7134-ts.c b/drivers/media/pci/saa7134/saa7134-ts.c index 2be703617e29434cc06d0210287ef503fd4903f8..e7adcd4f99623d79648340a29567263541f2a9db 100644 --- a/drivers/media/pci/saa7134/saa7134-ts.c +++ b/drivers/media/pci/saa7134/saa7134-ts.c @@ -309,6 +309,7 @@ int saa7134_ts_start(struct saa7134_dev *dev) int saa7134_ts_fini(struct saa7134_dev *dev) { + del_timer_sync(&dev->ts_q.timeout); saa7134_pgtable_free(dev->pci, &dev->ts_q.pt); return 0; } diff --git a/drivers/media/pci/saa7134/saa7134-vbi.c b/drivers/media/pci/saa7134/saa7134-vbi.c index 57bea543c39ba38ba1c722099e226e8366ce0d0b..559db500b19ce6581b8365e9e8819716715713cb 100644 --- a/drivers/media/pci/saa7134/saa7134-vbi.c +++ b/drivers/media/pci/saa7134/saa7134-vbi.c @@ -194,6 +194,7 @@ int saa7134_vbi_init1(struct saa7134_dev *dev) int saa7134_vbi_fini(struct saa7134_dev *dev) { /* nothing */ + del_timer_sync(&dev->vbi_q.timeout); return 0; } diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c index 1a50ec9d084f3d6e78fb50ae818b9330071e4ba2..103cbc8c1345a0c503d0efea0686fafed5488e03 100644 --- a/drivers/media/pci/saa7134/saa7134-video.c +++ b/drivers/media/pci/saa7134/saa7134-video.c @@ -2213,6 +2213,7 @@ int saa7134_video_init1(struct saa7134_dev *dev) void saa7134_video_fini(struct saa7134_dev *dev) { + del_timer_sync(&dev->video_q.timeout); /* free stuff */ vb2_queue_release(&dev->video_vbq); saa7134_pgtable_free(dev->pci, &dev->video_q.pt); diff --git a/drivers/media/pci/saa7146/Kconfig b/drivers/media/pci/saa7146/Kconfig index da88b77a916caa7a66b88b7451c4f324b18a1218..77918241d9b3d6de2b317edb538520cf897d4528 100644 --- a/drivers/media/pci/saa7146/Kconfig +++ b/drivers/media/pci/saa7146/Kconfig @@ -1,6 +1,7 @@ config VIDEO_HEXIUM_GEMINI tristate "Hexium Gemini frame grabber" depends on PCI && VIDEO_V4L2 && I2C + depends on BROKEN select VIDEO_SAA7146_VV ---help--- This is a video4linux driver for the Hexium Gemini frame @@ -13,6 +14,7 @@ config VIDEO_HEXIUM_GEMINI config VIDEO_HEXIUM_ORION tristate "Hexium HV-PCI6 and Orion frame grabber" depends on PCI && VIDEO_V4L2 && I2C + depends on BROKEN select VIDEO_SAA7146_VV ---help--- This is a video4linux driver for the Hexium HV-PCI6 and @@ -24,6 +26,7 @@ config VIDEO_HEXIUM_ORION config VIDEO_MXB tristate "Siemens-Nixdorf 'Multimedia eXtension Board'" depends on PCI && VIDEO_V4L2 && I2C + depends on BROKEN select VIDEO_SAA7146_VV select VIDEO_TUNER select VIDEO_SAA711X if MEDIA_SUBDRV_AUTOSELECT diff --git a/drivers/media/pci/saa7146/hexium_gemini.c b/drivers/media/pci/saa7146/hexium_gemini.c index 5817d9cde4d0c0f448679b48ec418cfea02fc2ec..8c56d4c37a525a643daafff463b0cf794931a702 100644 --- a/drivers/media/pci/saa7146/hexium_gemini.c +++ b/drivers/media/pci/saa7146/hexium_gemini.c @@ -270,9 +270,8 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d /* enable i2c-port pins */ saa7146_write(dev, MC1, (MASK_08 | MASK_24 | MASK_10 | MASK_26)); - hexium->i2c_adapter = (struct i2c_adapter) { - .name = "hexium gemini", - }; + strscpy(hexium->i2c_adapter.name, "hexium gemini", + sizeof(hexium->i2c_adapter.name)); saa7146_i2c_adapter_prepare(dev, &hexium->i2c_adapter, SAA7146_I2C_BUS_BIT_RATE_480); if (i2c_add_adapter(&hexium->i2c_adapter) < 0) { DEB_S("cannot register i2c-device. skipping.\n"); @@ -305,6 +304,9 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d ret = saa7146_register_device(&hexium->video_dev, dev, "hexium gemini", VFL_TYPE_GRABBER); if (ret < 0) { pr_err("cannot register capture v4l2 device. skipping.\n"); + saa7146_vv_release(dev); + i2c_del_adapter(&hexium->i2c_adapter); + kfree(hexium); return ret; } diff --git a/drivers/media/pci/saa7146/hexium_orion.c b/drivers/media/pci/saa7146/hexium_orion.c index 0a05176c18ab6e55fab01c3e859d738f02d8cf85..a794f9e5f99087b1a917b59041d9c0ed1861d2d3 100644 --- a/drivers/media/pci/saa7146/hexium_orion.c +++ b/drivers/media/pci/saa7146/hexium_orion.c @@ -231,9 +231,8 @@ static int hexium_probe(struct saa7146_dev *dev) saa7146_write(dev, DD1_STREAM_B, 0x00000000); saa7146_write(dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26)); - hexium->i2c_adapter = (struct i2c_adapter) { - .name = "hexium orion", - }; + strscpy(hexium->i2c_adapter.name, "hexium orion", + sizeof(hexium->i2c_adapter.name)); saa7146_i2c_adapter_prepare(dev, &hexium->i2c_adapter, SAA7146_I2C_BUS_BIT_RATE_480); if (i2c_add_adapter(&hexium->i2c_adapter) < 0) { DEB_S("cannot register i2c-device. skipping.\n"); diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c index d697e1ad929c2d8e765feb15f136563b103bef05..5102519df108328cccf15c061b4038dd673d9963 100644 --- a/drivers/media/pci/saa7164/saa7164-core.c +++ b/drivers/media/pci/saa7164/saa7164-core.c @@ -1122,16 +1122,25 @@ static int saa7164_proc_show(struct seq_file *m, void *v) return 0; } +static struct proc_dir_entry *saa7164_pe; + static int saa7164_proc_create(void) { - struct proc_dir_entry *pe; - - pe = proc_create_single("saa7164", S_IRUGO, NULL, saa7164_proc_show); - if (!pe) + saa7164_pe = proc_create_single("saa7164", 0444, NULL, saa7164_proc_show); + if (!saa7164_pe) return -ENOMEM; return 0; } + +static void saa7164_proc_destroy(void) +{ + if (saa7164_pe) + remove_proc_entry("saa7164", NULL); +} +#else +static int saa7164_proc_create(void) { return 0; } +static void saa7164_proc_destroy(void) {} #endif static int saa7164_thread_function(void *data) @@ -1503,19 +1512,21 @@ static struct pci_driver saa7164_pci_driver = { static int __init saa7164_init(void) { - printk(KERN_INFO "saa7164 driver loaded\n"); + int ret = pci_register_driver(&saa7164_pci_driver); + + if (ret) + return ret; -#ifdef CONFIG_PROC_FS saa7164_proc_create(); -#endif - return pci_register_driver(&saa7164_pci_driver); + + pr_info("saa7164 driver loaded\n"); + + return 0; } static void __exit saa7164_fini(void) { -#ifdef CONFIG_PROC_FS - remove_proc_entry("saa7164", NULL); -#endif + saa7164_proc_destroy(); pci_unregister_driver(&saa7164_pci_driver); } diff --git a/drivers/media/pci/ttpci/Kconfig b/drivers/media/pci/ttpci/Kconfig index dfba74dd65212e31cbc7d7f7e6c5873582e62489..3c047fc0a2bd7d9b862e7d9e7fc5564fcd037080 100644 --- a/drivers/media/pci/ttpci/Kconfig +++ b/drivers/media/pci/ttpci/Kconfig @@ -6,6 +6,7 @@ config DVB_AV7110 depends on DVB_CORE && PCI && I2C select TTPCI_EEPROM select VIDEO_SAA7146_VV + depends on BROKEN select DVB_AV7110_IR if INPUT_EVDEV=y || INPUT_EVDEV=DVB_AV7110 depends on VIDEO_DEV # dependencies of VIDEO_SAA7146_VV select DVB_VES1820 if MEDIA_SUBDRV_AUTOSELECT @@ -116,6 +117,7 @@ config DVB_BUDGET_AV tristate "Budget cards with analog video inputs" depends on DVB_BUDGET_CORE && I2C select VIDEO_SAA7146_VV + depends on BROKEN depends on VIDEO_DEV # dependencies of VIDEO_SAA7146_VV select DVB_PLL if MEDIA_SUBDRV_AUTOSELECT select DVB_STV0299 if MEDIA_SUBDRV_AUTOSELECT diff --git a/drivers/media/pci/ttpci/budget-av.c b/drivers/media/pci/ttpci/budget-av.c index abc98f1ad26c3f30435fca60a69ee40b161cc7dd..fdb704552318d0cbc7d79127d67c2c9a1a337704 100644 --- a/drivers/media/pci/ttpci/budget-av.c +++ b/drivers/media/pci/ttpci/budget-av.c @@ -1476,7 +1476,8 @@ static int budget_av_attach(struct saa7146_dev *dev, struct saa7146_pci_extensio budget_av->has_saa7113 = 1; err = saa7146_vv_init(dev, &vv_data); if (err != 0) { - /* fixme: proper cleanup here */ + ttpci_budget_deinit(&budget_av->budget); + kfree(budget_av); ERR("cannot init vv subsystem\n"); return err; } @@ -1485,9 +1486,10 @@ static int budget_av_attach(struct saa7146_dev *dev, struct saa7146_pci_extensio vv_data.vid_ops.vidioc_s_input = vidioc_s_input; if ((err = saa7146_register_device(&budget_av->vd, dev, "knc1", VFL_TYPE_GRABBER))) { - /* fixme: proper cleanup here */ - ERR("cannot register capture v4l2 device\n"); saa7146_vv_release(dev); + ttpci_budget_deinit(&budget_av->budget); + kfree(budget_av); + ERR("cannot register capture v4l2 device\n"); return err; } diff --git a/drivers/media/pci/tw5864/tw5864-video.c b/drivers/media/pci/tw5864/tw5864-video.c index ff2b7da90c088694a83f7c908536aebfd5c8a8a1..6c40e60ac99391bcad47038701e13013baccc765 100644 --- a/drivers/media/pci/tw5864/tw5864-video.c +++ b/drivers/media/pci/tw5864/tw5864-video.c @@ -1395,13 +1395,13 @@ static void tw5864_handle_frame(struct tw5864_h264_frame *frame) input->vb = NULL; spin_unlock_irqrestore(&input->slock, flags); - v4l2_buf = to_vb2_v4l2_buffer(&vb->vb.vb2_buf); - if (!vb) { /* Gone because of disabling */ dev_dbg(&dev->pci->dev, "vb is empty, dropping frame\n"); return; } + v4l2_buf = to_vb2_v4l2_buffer(&vb->vb.vb2_buf); + /* * Check for space. * Mind the overhead of startcode emulation prevention. diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index 54fe90acb5b2962432140949a17e1630361e08ca..abb11711b3f3cf432de742edb21bc849b4271e5a 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig @@ -16,6 +16,7 @@ source "drivers/media/platform/marvell-ccic/Kconfig" config VIDEO_VIA_CAMERA tristate "VIAFB camera controller support" depends on FB_VIA + depends on BROKEN select VIDEOBUF_DMA_SG select VIDEO_OV7670 help @@ -45,6 +46,7 @@ config VIDEO_VIU tristate "Freescale VIU Video Driver" depends on VIDEO_V4L2 && (PPC_MPC512x || COMPILE_TEST) && I2C select VIDEOBUF_DMA_CONTIG + depends on BROKEN default y ---help--- Support for Freescale VIU video driver. This device captures @@ -151,6 +153,17 @@ config VIDEO_TI_CAL In TI Technical Reference Manual this module is referred as Camera Interface Subsystem (CAMSS). +config VIDEO_PHYTIUM_JPEG + tristate "Phytium JPEG Encoder driver" + depends on VIDEO_V4L2 + depends on ARCH_PHYTIUM + select VIDEOBUF2_DMA_CONTIG + help + Support for the Phytium JPEG Encoder Engine embedded + in the Phytium SOCs. + The engine can capture and compress video data from + digital or analog sources. + endif # V4L_PLATFORM_DRIVERS menuconfig V4L_MEM2MEM_DRIVERS diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile index 41322ab658027f1d7aa0c571d01ea34444319570..f89634e3239ad5a995a909c33d88a4fe3d714d50 100644 --- a/drivers/media/platform/Makefile +++ b/drivers/media/platform/Makefile @@ -93,6 +93,8 @@ obj-$(CONFIG_VIDEO_QCOM_CAMSS) += qcom/camss/ obj-$(CONFIG_VIDEO_QCOM_VENUS) += qcom/venus/ +obj-$(CONFIG_VIDEO_PHYTIUM_JPEG) += phytium-jpeg/ + obj-y += meson/ obj-y += cros-ec-cec/ diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c index b05738a95e55e1dc77449e0780fa6a30b019009f..809320decdeb283d77cadda59fa9efc3b0c8565a 100644 --- a/drivers/media/platform/am437x/am437x-vpfe.c +++ b/drivers/media/platform/am437x/am437x-vpfe.c @@ -1848,6 +1848,10 @@ static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id) if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD)) return -ENODATA; + /* if trying to set the same std then nothing to do */ + if (vpfe_standards[vpfe->std_index].std_id == std_id) + return 0; + /* If streaming is started, return error */ if (vb2_is_busy(&vpfe->buffer_queue)) { vpfe_err(vpfe, "%s device busy\n", __func__); diff --git a/drivers/media/platform/atmel/atmel-isc.c b/drivers/media/platform/atmel/atmel-isc.c index d89e14524d427faa94a04f2ae9402d621788270f..1fd0782576705b08e4078ffc3e47d2b246f78b4f 100644 --- a/drivers/media/platform/atmel/atmel-isc.c +++ b/drivers/media/platform/atmel/atmel-isc.c @@ -1895,6 +1895,8 @@ static int isc_async_complete(struct v4l2_async_notifier *notifier) struct vb2_queue *q = &isc->vb2_vidq; int ret; + INIT_WORK(&isc->awb_work, isc_awb_work); + ret = v4l2_device_register_subdev_nodes(&isc->v4l2_dev); if (ret < 0) { v4l2_err(&isc->v4l2_dev, "Failed to register subdev nodes\n"); @@ -1948,8 +1950,6 @@ static int isc_async_complete(struct v4l2_async_notifier *notifier) return ret; } - INIT_WORK(&isc->awb_work, isc_awb_work); - /* Register video device */ strlcpy(vdev->name, ATMEL_ISC_NAME, sizeof(vdev->name)); vdev->release = video_device_release_empty; @@ -2062,8 +2062,11 @@ static int isc_parse_dt(struct device *dev, struct isc_device *isc) break; } - subdev_entity->asd = devm_kzalloc(dev, - sizeof(*subdev_entity->asd), GFP_KERNEL); + /* asd will be freed by the subsystem once it's added to the + * notifier list + */ + subdev_entity->asd = kzalloc(sizeof(*subdev_entity->asd), + GFP_KERNEL); if (!subdev_entity->asd) { of_node_put(rem); ret = -ENOMEM; @@ -2209,6 +2212,7 @@ static int atmel_isc_probe(struct platform_device *pdev) &subdev_entity->notifier); if (ret) { dev_err(dev, "fail to register async notifier\n"); + kfree(subdev_entity->asd); goto cleanup_subdev; } diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c index d26c2d85a009463a708208a342b9d86e4e50edfc..c3eaddced72143a5c1969c56925d6ffd5de8b728 100644 --- a/drivers/media/platform/coda/coda-bit.c +++ b/drivers/media/platform/coda/coda-bit.c @@ -991,16 +991,15 @@ static int coda_start_encoding(struct coda_ctx *ctx) else coda_write(dev, CODA_STD_H264, CODA_CMD_ENC_SEQ_COD_STD); - if (ctx->params.h264_deblk_enabled) { - value = ((ctx->params.h264_deblk_alpha & - CODA_264PARAM_DEBLKFILTEROFFSETALPHA_MASK) << - CODA_264PARAM_DEBLKFILTEROFFSETALPHA_OFFSET) | - ((ctx->params.h264_deblk_beta & - CODA_264PARAM_DEBLKFILTEROFFSETBETA_MASK) << - CODA_264PARAM_DEBLKFILTEROFFSETBETA_OFFSET); - } else { - value = 1 << CODA_264PARAM_DISABLEDEBLK_OFFSET; - } + value = ((ctx->params.h264_disable_deblocking_filter_idc & + CODA_264PARAM_DISABLEDEBLK_MASK) << + CODA_264PARAM_DISABLEDEBLK_OFFSET) | + ((ctx->params.h264_slice_alpha_c0_offset_div2 & + CODA_264PARAM_DEBLKFILTEROFFSETALPHA_MASK) << + CODA_264PARAM_DEBLKFILTEROFFSETALPHA_OFFSET) | + ((ctx->params.h264_slice_beta_offset_div2 & + CODA_264PARAM_DEBLKFILTEROFFSETBETA_MASK) << + CODA_264PARAM_DEBLKFILTEROFFSETBETA_OFFSET); coda_write(dev, value, CODA_CMD_ENC_SEQ_264_PARA); break; case V4L2_PIX_FMT_JPEG: @@ -1729,6 +1728,7 @@ static int __coda_start_decoding(struct coda_ctx *ctx) v4l2_err(&dev->v4l2_dev, "CODA_COMMAND_SEQ_INIT timeout\n"); return ret; } + ctx->sequence_offset = ~0U; ctx->initialized = 1; /* Update kfifo out pointer from coda bitstream read pointer */ @@ -2000,6 +2000,9 @@ static int coda_prepare_decode(struct coda_ctx *ctx) /* Clear decode success flag */ coda_write(dev, 0, CODA_RET_DEC_PIC_SUCCESS); + /* Clear error return value */ + coda_write(dev, 0, CODA_RET_DEC_PIC_ERR_MB); + trace_coda_dec_pic_run(ctx, meta); coda_command_async(ctx, CODA_COMMAND_PIC_RUN); @@ -2140,12 +2143,17 @@ static void coda_finish_decode(struct coda_ctx *ctx) else if (ctx->display_idx < 0) ctx->hold = true; } else if (decoded_idx == -2) { + if (ctx->display_idx >= 0 && + ctx->display_idx < ctx->num_internal_frames) + ctx->sequence_offset++; /* no frame was decoded, we still return remaining buffers */ } else if (decoded_idx < 0 || decoded_idx >= ctx->num_internal_frames) { v4l2_err(&dev->v4l2_dev, "decoded frame index out of range: %d\n", decoded_idx); } else { - val = coda_read(dev, CODA_RET_DEC_PIC_FRAME_NUM) - 1; + val = coda_read(dev, CODA_RET_DEC_PIC_FRAME_NUM); + if (ctx->sequence_offset == -1) + ctx->sequence_offset = val; val -= ctx->sequence_offset; spin_lock_irqsave(&ctx->buffer_meta_lock, flags); if (!list_empty(&ctx->buffer_meta_list)) { @@ -2301,7 +2309,6 @@ irqreturn_t coda_irq_handler(int irq, void *data) if (ctx == NULL) { v4l2_err(&dev->v4l2_dev, "Instance released before the end of transaction\n"); - mutex_unlock(&dev->coda_mutex); return IRQ_HANDLED; } diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c index 726b3b93a4863184bb84ee792784d69cdcc39333..fccc771d23a51797f8b6158d6993d6c29ebead4d 100644 --- a/drivers/media/platform/coda/coda-common.c +++ b/drivers/media/platform/coda/coda-common.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -997,6 +998,8 @@ static int coda_encoder_cmd(struct file *file, void *fh, /* Set the stream-end flag on this context */ ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG; + flush_work(&ctx->pic_run_work); + /* If there is no buffer in flight, wake up */ if (!ctx->streamon_out || ctx->qsequence == ctx->osequence) { dst_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, @@ -1793,18 +1796,18 @@ static int coda_s_ctrl(struct v4l2_ctrl *ctrl) ctx->params.h264_max_qp = ctrl->val; break; case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA: - ctx->params.h264_deblk_alpha = ctrl->val; + ctx->params.h264_slice_alpha_c0_offset_div2 = ctrl->val; break; case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA: - ctx->params.h264_deblk_beta = ctrl->val; + ctx->params.h264_slice_beta_offset_div2 = ctrl->val; break; case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: - ctx->params.h264_deblk_enabled = (ctrl->val == - V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED); + ctx->params.h264_disable_deblocking_filter_idc = ctrl->val; break; case V4L2_CID_MPEG_VIDEO_H264_PROFILE: /* TODO: switch between baseline and constrained baseline */ - ctx->params.h264_profile_idc = 66; + if (ctx->inst_type == CODA_INST_ENCODER) + ctx->params.h264_profile_idc = 66; break; case V4L2_CID_MPEG_VIDEO_H264_LEVEL: /* nothing to do, this is set by the encoder */ @@ -1881,13 +1884,13 @@ static void coda_encode_ctrls(struct coda_ctx *ctx) v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_MAX_QP, 0, 51, 1, 51); v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, - V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA, 0, 15, 1, 0); + V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA, -6, 6, 1, 0); v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, - V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA, 0, 15, 1, 0); + V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA, -6, 6, 1, 0); v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE, - V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED, 0x0, - V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED); + V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY, + 0x0, V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED); v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_PROFILE, V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE, 0x0, @@ -2099,17 +2102,6 @@ int coda_decoder_queue_init(void *priv, struct vb2_queue *src_vq, return coda_queue_init(priv, dst_vq); } -static int coda_next_free_instance(struct coda_dev *dev) -{ - int idx = ffz(dev->instance_mask); - - if ((idx < 0) || - (dev->devtype->product == CODA_DX6 && idx > CODADX6_MAX_INSTANCES)) - return -EBUSY; - - return idx; -} - /* * File operations */ @@ -2118,7 +2110,8 @@ static int coda_open(struct file *file) { struct video_device *vdev = video_devdata(file); struct coda_dev *dev = video_get_drvdata(vdev); - struct coda_ctx *ctx = NULL; + struct coda_ctx *ctx; + unsigned int max = ~0; char *name; int ret; int idx; @@ -2127,12 +2120,13 @@ static int coda_open(struct file *file) if (!ctx) return -ENOMEM; - idx = coda_next_free_instance(dev); + if (dev->devtype->product == CODA_DX6) + max = CODADX6_MAX_INSTANCES - 1; + idx = ida_alloc_max(&dev->ida, max, GFP_KERNEL); if (idx < 0) { ret = idx; goto err_coda_max; } - set_bit(idx, &dev->instance_mask); name = kasprintf(GFP_KERNEL, "context%d", idx); if (!name) { @@ -2241,8 +2235,8 @@ static int coda_open(struct file *file) err_pm_get: v4l2_fh_del(&ctx->fh); v4l2_fh_exit(&ctx->fh); - clear_bit(ctx->idx, &dev->instance_mask); err_coda_name_init: + ida_free(&dev->ida, ctx->idx); err_coda_max: kfree(ctx); return ret; @@ -2284,7 +2278,7 @@ static int coda_release(struct file *file) pm_runtime_put_sync(&dev->plat_dev->dev); v4l2_fh_del(&ctx->fh); v4l2_fh_exit(&ctx->fh); - clear_bit(ctx->idx, &dev->instance_mask); + ida_free(&dev->ida, ctx->idx); if (ctx->ops->release) ctx->ops->release(ctx); debugfs_remove_recursive(ctx->debugfs_entry); @@ -2745,6 +2739,7 @@ static int coda_probe(struct platform_device *pdev) mutex_init(&dev->dev_mutex); mutex_init(&dev->coda_mutex); + ida_init(&dev->ida); dev->debugfs_root = debugfs_create_dir("coda", NULL); if (!dev->debugfs_root) @@ -2832,6 +2827,7 @@ static int coda_remove(struct platform_device *pdev) coda_free_aux_buf(dev, &dev->tempbuf); coda_free_aux_buf(dev, &dev->workbuf); debugfs_remove_recursive(dev->debugfs_root); + ida_destroy(&dev->ida); return 0; } diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h index 19ac0b9dc6ebad27d01a7e2fabe25f55fcade73d..8df02c32781ee2ba0c836474cf749c2236990d2a 100644 --- a/drivers/media/platform/coda/coda.h +++ b/drivers/media/platform/coda/coda.h @@ -16,6 +16,7 @@ #define __CODA_H__ #include +#include #include #include #include @@ -95,7 +96,7 @@ struct coda_dev { struct workqueue_struct *workqueue; struct v4l2_m2m_dev *m2m_dev; struct list_head instances; - unsigned long instance_mask; + struct ida ida; struct dentry *debugfs_root; }; @@ -115,9 +116,9 @@ struct coda_params { u8 h264_inter_qp; u8 h264_min_qp; u8 h264_max_qp; - u8 h264_deblk_enabled; - u8 h264_deblk_alpha; - u8 h264_deblk_beta; + u8 h264_disable_deblocking_filter_idc; + s8 h264_slice_alpha_c0_offset_div2; + s8 h264_slice_beta_offset_div2; u8 h264_profile_idc; u8 h264_level_idc; u8 mpeg4_intra_qp; diff --git a/drivers/media/platform/coda/coda_regs.h b/drivers/media/platform/coda/coda_regs.h index 5e7b00a97671ef396426d0e2b8cf9c93c00afdb6..e675e38f3475e79737f3ac22e2e12911cede7003 100644 --- a/drivers/media/platform/coda/coda_regs.h +++ b/drivers/media/platform/coda/coda_regs.h @@ -292,7 +292,7 @@ #define CODA_264PARAM_DEBLKFILTEROFFSETALPHA_OFFSET 8 #define CODA_264PARAM_DEBLKFILTEROFFSETALPHA_MASK 0x0f #define CODA_264PARAM_DISABLEDEBLK_OFFSET 6 -#define CODA_264PARAM_DISABLEDEBLK_MASK 0x01 +#define CODA_264PARAM_DISABLEDEBLK_MASK 0x03 #define CODA_264PARAM_CONSTRAINEDINTRAPREDFLAG_OFFSET 5 #define CODA_264PARAM_CONSTRAINEDINTRAPREDFLAG_MASK 0x01 #define CODA_264PARAM_CHROMAQPOFFSET_OFFSET 0 diff --git a/drivers/media/platform/davinci/Kconfig b/drivers/media/platform/davinci/Kconfig index 06b5e581f25f2416c05f4a6796a94e13afdddcdd..9fbcb79c07635ae673cdf3f302a58c758bf6dd68 100644 --- a/drivers/media/platform/davinci/Kconfig +++ b/drivers/media/platform/davinci/Kconfig @@ -34,6 +34,7 @@ config VIDEO_DM6446_CCDC depends on VIDEO_V4L2 depends on ARCH_DAVINCI || COMPILE_TEST depends on I2C + depends on BROKEN select VIDEOBUF_DMA_CONTIG help Enables DaVinci CCD hw module. DaVinci CCDC hw interfaces @@ -50,6 +51,7 @@ config VIDEO_DM355_CCDC depends on VIDEO_V4L2 depends on ARCH_DAVINCI || COMPILE_TEST depends on I2C + depends on BROKEN select VIDEOBUF_DMA_CONTIG help Enables DM355 CCD hw module. DM355 CCDC hw interfaces @@ -66,6 +68,7 @@ config VIDEO_DM365_ISIF depends on VIDEO_V4L2 depends on ARCH_DAVINCI || COMPILE_TEST depends on I2C + depends on BROKEN select VIDEOBUF_DMA_CONTIG help Enables ISIF hw module. This is the hardware module for diff --git a/drivers/media/platform/davinci/isif.c b/drivers/media/platform/davinci/isif.c index f924e76e2fbf869d859c6cea46c6f8945b368a02..340f8218f54d307cd582ecf645ca2228dc0cf067 100644 --- a/drivers/media/platform/davinci/isif.c +++ b/drivers/media/platform/davinci/isif.c @@ -1100,7 +1100,8 @@ static int isif_probe(struct platform_device *pdev) while (i >= 0) { res = platform_get_resource(pdev, IORESOURCE_MEM, i); - release_mem_region(res->start, resource_size(res)); + if (res) + release_mem_region(res->start, resource_size(res)); i--; } vpfe_unregister_ccdc_device(&isif_hw_dev); diff --git a/drivers/media/platform/davinci/vpbe.c b/drivers/media/platform/davinci/vpbe.c index 18c035ef84cfad4374978330e772600b6b959e6f..df1ae6b5c85458fd2153ce948e6ad0bd8173f434 100644 --- a/drivers/media/platform/davinci/vpbe.c +++ b/drivers/media/platform/davinci/vpbe.c @@ -740,7 +740,7 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev) if (ret) { v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set default output %s", def_output); - return ret; + goto fail_kfree_amp; } printk(KERN_NOTICE "Setting default mode to %s\n", def_mode); @@ -748,12 +748,15 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev) if (ret) { v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set default mode %s", def_mode); - return ret; + goto fail_kfree_amp; } vpbe_dev->initialized = 1; /* TBD handling of bootargs for default output and mode */ return 0; +fail_kfree_amp: + mutex_lock(&vpbe_dev->lock); + kfree(vpbe_dev->amp); fail_kfree_encoders: kfree(vpbe_dev->encoders); fail_dev_unregister: diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c index b0eb3d899eb44bfcca33eac2b50194eeb2b70838..6f82693524331e13a5aec76ca4003590c0fb7a4f 100644 --- a/drivers/media/platform/davinci/vpbe_display.c +++ b/drivers/media/platform/davinci/vpbe_display.c @@ -521,7 +521,7 @@ vpbe_disp_calculate_scale_factor(struct vpbe_display *disp_dev, else if (v_scale == 4) layer_info->v_zoom = ZOOM_X4; if (v_exp) - layer_info->h_exp = V_EXP_6_OVER_5; + layer_info->v_exp = V_EXP_6_OVER_5; } else { /* no scaling, only cropping. Set display area to crop area */ cfg->ysize = expected_ysize; diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c index 19cf6853411e223905e183585753ba2ded590e11..89a86c19579b8ab9d2cbee42a4cc25913dab9b96 100644 --- a/drivers/media/platform/davinci/vpss.c +++ b/drivers/media/platform/davinci/vpss.c @@ -518,6 +518,11 @@ static int __init vpss_init(void) return -EBUSY; oper_cfg.vpss_regs_base2 = ioremap(VPSS_CLK_CTRL, 4); + if (unlikely(!oper_cfg.vpss_regs_base2)) { + release_mem_region(VPSS_CLK_CTRL, 4); + return -ENOMEM; + } + writel(VPSS_CLK_CTRL_VENCCLKEN | VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2); diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c index 5ddb2321e9e48f74d41c803453cf2e112fcba22a..0fe9be93fabe2fd2feb0537ddae6092a9f0eee51 100644 --- a/drivers/media/platform/exynos4-is/fimc-is.c +++ b/drivers/media/platform/exynos4-is/fimc-is.c @@ -819,6 +819,7 @@ static int fimc_is_probe(struct platform_device *pdev) return -ENODEV; is->pmu_regs = of_iomap(node, 0); + of_node_put(node); if (!is->pmu_regs) return -ENOMEM; diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c index deb499f76412a33f139aaedc467e038476f905aa..b5993532831da6b3aada06390b8338f3b32f66d4 100644 --- a/drivers/media/platform/exynos4-is/media-dev.c +++ b/drivers/media/platform/exynos4-is/media-dev.c @@ -498,6 +498,7 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd) continue; ret = fimc_md_parse_port_node(fmd, port, index); + of_node_put(port); if (ret < 0) { of_node_put(node); goto rpm_put; @@ -531,6 +532,7 @@ static int __of_get_csis_id(struct device_node *np) if (!np) return -EINVAL; of_property_read_u32(np, "reg", ®); + of_node_put(np); return reg - FIMC_INPUT_MIPI_CSI2_0; } diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c index 0273302aa7412f7c83a5b1cfe30ae3cd6050614e..83086eea145006bd8bcf6c3c055f4c3f18a7c13d 100644 --- a/drivers/media/platform/fsl-viu.c +++ b/drivers/media/platform/fsl-viu.c @@ -37,7 +37,7 @@ #define VIU_VERSION "0.5.1" /* Allow building this driver with COMPILE_TEST */ -#ifndef CONFIG_PPC +#if !defined(CONFIG_PPC) && !defined(CONFIG_MICROBLAZE) #define out_be32(v, a) iowrite32be(a, (void __iomem *)v) #define in_be32(a) ioread32be((void __iomem *)a) #endif diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c index dfdbd4354b74cf700305c0b2c70a396938cd039a..eeee15ff007d878a3028d85e0f8a3eac2bcc4743 100644 --- a/drivers/media/platform/marvell-ccic/mcam-core.c +++ b/drivers/media/platform/marvell-ccic/mcam-core.c @@ -200,7 +200,6 @@ struct mcam_vb_buffer { struct list_head queue; struct mcam_dma_desc *dma_desc; /* Descriptor virtual address */ dma_addr_t dma_desc_pa; /* Descriptor physical address */ - int dma_desc_nent; /* Number of mapped descriptors */ }; static inline struct mcam_vb_buffer *vb_to_mvb(struct vb2_v4l2_buffer *vb) @@ -608,9 +607,11 @@ static void mcam_dma_contig_done(struct mcam_camera *cam, int frame) static void mcam_sg_next_buffer(struct mcam_camera *cam) { struct mcam_vb_buffer *buf; + struct sg_table *sg_table; buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer, queue); list_del_init(&buf->queue); + sg_table = vb2_dma_sg_plane_desc(&buf->vb_buf.vb2_buf, 0); /* * Very Bad Not Good Things happen if you don't clear * C1_DESC_ENA before making any descriptor changes. @@ -618,7 +619,7 @@ static void mcam_sg_next_buffer(struct mcam_camera *cam) mcam_reg_clear_bit(cam, REG_CTRL1, C1_DESC_ENA); mcam_reg_write(cam, REG_DMA_DESC_Y, buf->dma_desc_pa); mcam_reg_write(cam, REG_DESC_LEN_Y, - buf->dma_desc_nent*sizeof(struct mcam_dma_desc)); + sg_table->nents * sizeof(struct mcam_dma_desc)); mcam_reg_write(cam, REG_DESC_LEN_U, 0); mcam_reg_write(cam, REG_DESC_LEN_V, 0); mcam_reg_set_bit(cam, REG_CTRL1, C1_DESC_ENA); diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c index 4f24da8afecc4db93bbb482fd331daae084dff5b..11429633b2fbc72ec3ea32fc5e4c912faa148154 100644 --- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c +++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c @@ -702,7 +702,7 @@ static void mtk_jpeg_buf_queue(struct vb2_buffer *vb) v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, to_vb2_v4l2_buffer(vb)); } -static void *mtk_jpeg_buf_remove(struct mtk_jpeg_ctx *ctx, +static struct vb2_v4l2_buffer *mtk_jpeg_buf_remove(struct mtk_jpeg_ctx *ctx, enum v4l2_buf_type type) { if (V4L2_TYPE_IS_OUTPUT(type)) @@ -714,7 +714,7 @@ static void *mtk_jpeg_buf_remove(struct mtk_jpeg_ctx *ctx, static int mtk_jpeg_start_streaming(struct vb2_queue *q, unsigned int count) { struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(q); - struct vb2_buffer *vb; + struct vb2_v4l2_buffer *vb; int ret = 0; ret = pm_runtime_get_sync(ctx->jpeg->dev); @@ -724,14 +724,14 @@ static int mtk_jpeg_start_streaming(struct vb2_queue *q, unsigned int count) return 0; err: while ((vb = mtk_jpeg_buf_remove(ctx, q->type))) - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(vb), VB2_BUF_STATE_QUEUED); + v4l2_m2m_buf_done(vb, VB2_BUF_STATE_QUEUED); return ret; } static void mtk_jpeg_stop_streaming(struct vb2_queue *q) { struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(q); - struct vb2_buffer *vb; + struct vb2_v4l2_buffer *vb; /* * STREAMOFF is an acknowledgment for source change event. @@ -743,7 +743,7 @@ static void mtk_jpeg_stop_streaming(struct vb2_queue *q) struct mtk_jpeg_src_buf *src_buf; vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); - src_buf = mtk_jpeg_vb2_to_srcbuf(vb); + src_buf = mtk_jpeg_vb2_to_srcbuf(&vb->vb2_buf); mtk_jpeg_set_queue_data(ctx, &src_buf->dec_param); ctx->state = MTK_JPEG_RUNNING; } else if (V4L2_TYPE_IS_OUTPUT(q->type)) { @@ -751,7 +751,7 @@ static void mtk_jpeg_stop_streaming(struct vb2_queue *q) } while ((vb = mtk_jpeg_buf_remove(ctx, q->type))) - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(vb), VB2_BUF_STATE_ERROR); + v4l2_m2m_buf_done(vb, VB2_BUF_STATE_ERROR); pm_runtime_put_sync(ctx->jpeg->dev); } @@ -807,7 +807,7 @@ static void mtk_jpeg_device_run(void *priv) { struct mtk_jpeg_ctx *ctx = priv; struct mtk_jpeg_dev *jpeg = ctx->jpeg; - struct vb2_buffer *src_buf, *dst_buf; + struct vb2_v4l2_buffer *src_buf, *dst_buf; enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR; unsigned long flags; struct mtk_jpeg_src_buf *jpeg_src_buf; @@ -817,11 +817,11 @@ static void mtk_jpeg_device_run(void *priv) src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); - jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(src_buf); + jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(&src_buf->vb2_buf); if (jpeg_src_buf->flags & MTK_JPEG_BUF_FLAGS_LAST_FRAME) { - for (i = 0; i < dst_buf->num_planes; i++) - vb2_set_plane_payload(dst_buf, i, 0); + for (i = 0; i < dst_buf->vb2_buf.num_planes; i++) + vb2_set_plane_payload(&dst_buf->vb2_buf, i, 0); buf_state = VB2_BUF_STATE_DONE; goto dec_end; } @@ -833,8 +833,8 @@ static void mtk_jpeg_device_run(void *priv) return; } - mtk_jpeg_set_dec_src(ctx, src_buf, &bs); - if (mtk_jpeg_set_dec_dst(ctx, &jpeg_src_buf->dec_param, dst_buf, &fb)) + mtk_jpeg_set_dec_src(ctx, &src_buf->vb2_buf, &bs); + if (mtk_jpeg_set_dec_dst(ctx, &jpeg_src_buf->dec_param, &dst_buf->vb2_buf, &fb)) goto dec_end; spin_lock_irqsave(&jpeg->hw_lock, flags); @@ -849,8 +849,8 @@ static void mtk_jpeg_device_run(void *priv) dec_end: v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf), buf_state); - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf), buf_state); + v4l2_m2m_buf_done(src_buf, buf_state); + v4l2_m2m_buf_done(dst_buf, buf_state); v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx); } @@ -921,7 +921,7 @@ static irqreturn_t mtk_jpeg_dec_irq(int irq, void *priv) { struct mtk_jpeg_dev *jpeg = priv; struct mtk_jpeg_ctx *ctx; - struct vb2_buffer *src_buf, *dst_buf; + struct vb2_v4l2_buffer *src_buf, *dst_buf; struct mtk_jpeg_src_buf *jpeg_src_buf; enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR; u32 dec_irq_ret; @@ -938,7 +938,7 @@ static irqreturn_t mtk_jpeg_dec_irq(int irq, void *priv) src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); - jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(src_buf); + jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(&src_buf->vb2_buf); if (dec_irq_ret >= MTK_JPEG_DEC_RESULT_UNDERFLOW) mtk_jpeg_dec_reset(jpeg->dec_reg_base); @@ -948,15 +948,15 @@ static irqreturn_t mtk_jpeg_dec_irq(int irq, void *priv) goto dec_end; } - for (i = 0; i < dst_buf->num_planes; i++) - vb2_set_plane_payload(dst_buf, i, + for (i = 0; i < dst_buf->vb2_buf.num_planes; i++) + vb2_set_plane_payload(&dst_buf->vb2_buf, i, jpeg_src_buf->dec_param.comp_size[i]); buf_state = VB2_BUF_STATE_DONE; dec_end: - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf), buf_state); - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf), buf_state); + v4l2_m2m_buf_done(src_buf, buf_state); + v4l2_m2m_buf_done(dst_buf, buf_state); v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx); return IRQ_HANDLED; } diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c index bbb24fb95b9519b156428ee01846d3554cfada67..3deb0549b1a131855a4476e334164ed5e6d29409 100644 --- a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c +++ b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c @@ -118,7 +118,9 @@ static int mtk_mdp_probe(struct platform_device *pdev) mutex_init(&mdp->vpulock); /* Old dts had the components as child nodes */ - if (of_get_next_child(dev->of_node, NULL)) { + node = of_get_next_child(dev->of_node, NULL); + if (node) { + of_node_put(node); parent = dev->of_node; dev_warn(dev, "device tree is out of date\n"); } else { diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c index 3e73e9db781f426f5682cda940c8ab0499803319..7c025045ea904ff12279c98acc05eca5b55198bf 100644 --- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c +++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c @@ -41,25 +41,27 @@ int mtk_vcodec_init_enc_pm(struct mtk_vcodec_dev *mtkdev) node = of_parse_phandle(dev->of_node, "mediatek,larb", 0); if (!node) { mtk_v4l2_err("no mediatek,larb found"); - return -1; + return -ENODEV; } pdev = of_find_device_by_node(node); + of_node_put(node); if (!pdev) { mtk_v4l2_err("no mediatek,larb device found"); - return -1; + return -ENODEV; } pm->larbvenc = &pdev->dev; node = of_parse_phandle(dev->of_node, "mediatek,larb", 1); if (!node) { mtk_v4l2_err("no mediatek,larb found"); - return -1; + return -ENODEV; } pdev = of_find_device_by_node(node); + of_node_put(node); if (!pdev) { mtk_v4l2_err("no mediatek,larb device found"); - return -1; + return -ENODEV; } pm->larbvenclt = &pdev->dev; diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c index 64195c4ddeaf5d3b2f4c682cc4acc5dc3b744014..419e1cb10dc66ef719a73cdf3c8f9bdd912fb0a2 100644 --- a/drivers/media/platform/mx2_emmaprp.c +++ b/drivers/media/platform/mx2_emmaprp.c @@ -274,7 +274,7 @@ static void emmaprp_device_run(void *priv) { struct emmaprp_ctx *ctx = priv; struct emmaprp_q_data *s_q_data, *d_q_data; - struct vb2_buffer *src_buf, *dst_buf; + struct vb2_v4l2_buffer *src_buf, *dst_buf; struct emmaprp_dev *pcdev = ctx->dev; unsigned int s_width, s_height; unsigned int d_width, d_height; @@ -294,8 +294,8 @@ static void emmaprp_device_run(void *priv) d_height = d_q_data->height; d_size = d_width * d_height; - p_in = vb2_dma_contig_plane_dma_addr(src_buf, 0); - p_out = vb2_dma_contig_plane_dma_addr(dst_buf, 0); + p_in = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0); + p_out = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0); if (!p_in || !p_out) { v4l2_err(&pcdev->v4l2_dev, "Acquiring kernel pointers to buffers failed\n"); diff --git a/drivers/media/platform/omap/Kconfig b/drivers/media/platform/omap/Kconfig index 4b5e55d41ad4346c7b35f516ffdeecb77f47491e..2f8ace8700a4acc3d1becaef0d60c74eb5531f99 100644 --- a/drivers/media/platform/omap/Kconfig +++ b/drivers/media/platform/omap/Kconfig @@ -9,7 +9,7 @@ config VIDEO_OMAP2_VOUT depends on FB_OMAP2 || (COMPILE_TEST && FB_OMAP2=n) depends on ARCH_OMAP2 || ARCH_OMAP3 || COMPILE_TEST depends on VIDEO_V4L2 - select VIDEOBUF_GEN + depends on BROKEN select VIDEOBUF_DMA_CONTIG select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3 select FRAME_VECTOR diff --git a/drivers/media/platform/omap/omap_vout_vrfb.c b/drivers/media/platform/omap/omap_vout_vrfb.c index 29e3f5da59c1ff61137f66d93b75b865521ed5e1..11ec048929e80109e6702249aea0b345e79378d3 100644 --- a/drivers/media/platform/omap/omap_vout_vrfb.c +++ b/drivers/media/platform/omap/omap_vout_vrfb.c @@ -253,8 +253,7 @@ int omap_vout_prepare_vrfb(struct omap_vout_device *vout, */ pixsize = vout->bpp * vout->vrfb_bpp; - dst_icg = ((MAX_PIXELS_PER_LINE * pixsize) - - (vout->pix.width * vout->bpp)) + 1; + dst_icg = MAX_PIXELS_PER_LINE * pixsize - vout->pix.width * vout->bpp; xt->src_start = vout->buf_phy_addr[vb->i]; xt->dst_start = vout->vrfb_context[vb->i].paddr[0]; diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c index 842e2235047d9c6327a65e1ab4fd9ea468fc015a..addd03b51748136843eedb8c89f1eee0bc9b2d01 100644 --- a/drivers/media/platform/omap3isp/isp.c +++ b/drivers/media/platform/omap3isp/isp.c @@ -722,6 +722,10 @@ static int isp_pipeline_enable(struct isp_pipeline *pipe, s_stream, mode); pipe->do_propagation = true; } + + /* Stop at the first external sub-device. */ + if (subdev->dev != isp->dev) + break; } return 0; @@ -836,6 +840,10 @@ static int isp_pipeline_disable(struct isp_pipeline *pipe) &subdev->entity); failure = -ETIMEDOUT; } + + /* Stop at the first external sub-device. */ + if (subdev->dev != isp->dev) + break; } return failure; @@ -1587,6 +1595,8 @@ static void isp_pm_complete(struct device *dev) static void isp_unregister_entities(struct isp_device *isp) { + media_device_unregister(&isp->media_dev); + omap3isp_csi2_unregister_entities(&isp->isp_csi2a); omap3isp_ccp2_unregister_entities(&isp->isp_ccp2); omap3isp_ccdc_unregister_entities(&isp->isp_ccdc); @@ -1597,7 +1607,6 @@ static void isp_unregister_entities(struct isp_device *isp) omap3isp_stat_unregister_entities(&isp->isp_hist); v4l2_device_unregister(&isp->v4l2_dev); - media_device_unregister(&isp->media_dev); media_device_cleanup(&isp->media_dev); } diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c index 77b73e27a2746bf086d1bf86c7cb252582c94316..412438dce2854a353d257e1267b8cc797edddd8b 100644 --- a/drivers/media/platform/omap3isp/ispccdc.c +++ b/drivers/media/platform/omap3isp/ispccdc.c @@ -2605,6 +2605,7 @@ int omap3isp_ccdc_register_entities(struct isp_ccdc_device *ccdc, int ret; /* Register the subdev and video node. */ + ccdc->subdev.dev = vdev->mdev->dev; ret = v4l2_device_register_subdev(vdev, &ccdc->subdev); if (ret < 0) goto error; diff --git a/drivers/media/platform/omap3isp/ispccp2.c b/drivers/media/platform/omap3isp/ispccp2.c index e062939d0d054386b533ab8d3ab13afb3bdc1fbf..47b0d3fe87d8076dee4625dcb35a6452620f1868 100644 --- a/drivers/media/platform/omap3isp/ispccp2.c +++ b/drivers/media/platform/omap3isp/ispccp2.c @@ -1034,6 +1034,7 @@ int omap3isp_ccp2_register_entities(struct isp_ccp2_device *ccp2, int ret; /* Register the subdev and video nodes. */ + ccp2->subdev.dev = vdev->mdev->dev; ret = v4l2_device_register_subdev(vdev, &ccp2->subdev); if (ret < 0) goto error; diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/omap3isp/ispcsi2.c index a4d3d030e81e20568b063d0544d18b64dd3a4d87..e45292a1bf6c5a35f3f370bd205c08b8fda4dd18 100644 --- a/drivers/media/platform/omap3isp/ispcsi2.c +++ b/drivers/media/platform/omap3isp/ispcsi2.c @@ -1201,6 +1201,7 @@ int omap3isp_csi2_register_entities(struct isp_csi2_device *csi2, int ret; /* Register the subdev and video nodes. */ + csi2->subdev.dev = vdev->mdev->dev; ret = v4l2_device_register_subdev(vdev, &csi2->subdev); if (ret < 0) goto error; diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c index 3195f7c8b8b7e43343af0908ad4a6ac8d0075a27..591c6de498f8913b15b43771696d1c1bba089100 100644 --- a/drivers/media/platform/omap3isp/isppreview.c +++ b/drivers/media/platform/omap3isp/isppreview.c @@ -2228,6 +2228,7 @@ int omap3isp_preview_register_entities(struct isp_prev_device *prev, int ret; /* Register the subdev and video nodes. */ + prev->subdev.dev = vdev->mdev->dev; ret = v4l2_device_register_subdev(vdev, &prev->subdev); if (ret < 0) goto error; diff --git a/drivers/media/platform/omap3isp/ispresizer.c b/drivers/media/platform/omap3isp/ispresizer.c index 0b6a87508584f4eb5201f3f2b0bf284c1b7380f7..2035e3c6a9deeeb5660d25ae690b4719ac9e4ebb 100644 --- a/drivers/media/platform/omap3isp/ispresizer.c +++ b/drivers/media/platform/omap3isp/ispresizer.c @@ -1684,6 +1684,7 @@ int omap3isp_resizer_register_entities(struct isp_res_device *res, int ret; /* Register the subdev and video nodes. */ + res->subdev.dev = vdev->mdev->dev; ret = v4l2_device_register_subdev(vdev, &res->subdev); if (ret < 0) goto error; diff --git a/drivers/media/platform/omap3isp/ispstat.c b/drivers/media/platform/omap3isp/ispstat.c index 47353fee26c3297234502d6d9ce197ff5098eb0c..bfa2d05046466a40baae760eecf11aa2e8e71936 100644 --- a/drivers/media/platform/omap3isp/ispstat.c +++ b/drivers/media/platform/omap3isp/ispstat.c @@ -1029,6 +1029,8 @@ void omap3isp_stat_unregister_entities(struct ispstat *stat) int omap3isp_stat_register_entities(struct ispstat *stat, struct v4l2_device *vdev) { + stat->subdev.dev = vdev->mdev->dev; + return v4l2_device_register_subdev(vdev, &stat->subdev); } diff --git a/drivers/media/platform/phytium-jpeg/Makefile b/drivers/media/platform/phytium-jpeg/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..d9f50a1aaba3546017f4f69f80be616ee21b4aff --- /dev/null +++ b/drivers/media/platform/phytium-jpeg/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +phytium_jpeg-objs := phytium_jpeg_core.o +obj-$(CONFIG_VIDEO_PHYTIUM_JPEG) += phytium_jpeg.o diff --git a/drivers/media/platform/phytium-jpeg/phytium_jpeg_core.c b/drivers/media/platform/phytium-jpeg/phytium_jpeg_core.c new file mode 100644 index 0000000000000000000000000000000000000000..10eeeb44bc1ac29846e8194a07926b7fd63e05fd --- /dev/null +++ b/drivers/media/platform/phytium-jpeg/phytium_jpeg_core.c @@ -0,0 +1,1373 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for Phytium JPEG Encoder Engine + * + * Copyright (c) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include "phytium_jpeg_reg.h" +#include "phytium_jpeg_core.h" +#include + +static u32 phytium_jpeg_header[PHYTIUM_JPEG_HEADER_SIZE] = { + 0xe0ffd8ff, 0x464a0100, 0x01004649, 0x01000001, + 0x00000100, 0x4300dbff, 0x0c0b1000, 0x100a0c0e, + 0x120e0d0e, 0x18131011, 0x16181a28, 0x23311816, + 0x3a281d25, 0x393c3d33, 0x40373833, 0x404e5c48, + 0x37455744, 0x516d5038, 0x67625f57, 0x4d3e6768, + 0x64707971, 0x67655c78, 0x00dbff63, 0x12110143, + 0x18151812, 0x2f1a1a2f, 0x42384263, 0x63636363, + 0x63636363, 0x63636363, 0x63636363, 0x63636363, + 0x63636363, 0x63636363, 0x63636363, 0x63636363, + 0x63636363, 0x63636363, 0x63636363, 0xc0ff6363, + /* h_index(40) indicates high 8 bits of the height + * w_index(41) contains the low 8 bits of the height, + * and the width. For example, height 480(0x01e0) + * locates at 0x<01> 081100 and 0x038002 . + * width 640 (0x0280) locates at 0x03 <80> <02> e0. + */ + + /* 0x0200 <11> 01 is a field marks YUV mode */ + 0x01081100, 0x038002e0, 0x02001101, 0x11030111, + 0x00c4ff01, 0x0100001f, 0x01010105, 0x00010101, + 0x00000000, 0x01000000, 0x05040302, 0x09080706, + 0xc4ff0b0a, 0x00011f00, 0x01010103, 0x01010101, + 0x00000101, 0x00000000, 0x04030201, 0x08070605, + 0xff0b0a09, 0x10b500c4, 0x03010200, 0x03040203, + 0x04040505, 0x7d010000, 0x00030201, 0x12051104, + 0x06413121, 0x07615113, 0x32147122, 0x08a19181, + 0xc1b14223, 0xf0d15215, 0x72623324, 0x160a0982, + 0x1a191817, 0x28272625, 0x35342a29, 0x39383736, + 0x4544433a, 0x49484746, 0x5554534a, 0x59585756, + 0x6564635a, 0x69686766, 0x7574736a, 0x79787776, + 0x8584837a, 0x89888786, 0x9493928a, 0x98979695, + 0xa3a29a99, 0xa7a6a5a4, 0xb2aaa9a8, 0xb6b5b4b3, + 0xbab9b8b7, 0xc5c4c3c2, 0xc9c8c7c6, 0xd4d3d2ca, + 0xd8d7d6d5, 0xe2e1dad9, 0xe6e5e4e3, 0xeae9e8e7, + 0xf4f3f2f1, 0xf8f7f6f5, 0xc4fffaf9, 0x0011b500, + 0x04020102, 0x07040304, 0x00040405, 0x00770201, + 0x11030201, 0x31210504, 0x51411206, 0x13716107, + 0x08813222, 0xa1914214, 0x2309c1b1, 0x15f05233, + 0x0ad17262, 0xe1342416, 0x1817f125, 0x27261a19, + 0x352a2928, 0x39383736, 0x4544433a, 0x49484746, + 0x5554534a, 0x59585756, 0x6564635a, 0x69686766, + 0x7574736a, 0x79787776, 0x8483827a, 0x88878685, + 0x93928a89, 0x97969594, 0xa29a9998, 0xa6a5a4a3, + 0xaaa9a8a7, 0xb5b4b3b2, 0xb9b8b7b6, 0xc4c3c2ba, + 0xc8c7c6c5, 0xd3d2cac9, 0xd7d6d5d4, 0xe2dad9d8, + 0xe6e5e4e3, 0xeae9e8e7, 0xf5f4f3f2, 0xf9f8f7f6, + 0x00fefffa, 0x0000008f, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xdaff0000, 0x01030c00, 0x03110200, 0x003f0011 +}; + +static char yuv_mode_str[YUV_MODE_STR_LEN] = { "yuv444" }; + +module_param_string(yuv_mode, yuv_mode_str, sizeof(yuv_mode_str), 0444); +MODULE_PARM_DESC(yuv_mode, "Users select one mode from such modes as 'yuv444', or 'yuv422', or 'yuv420'. If no mode is set, the driver adapts defaults mode 'yuv444'."); + +static u32 phytium_jpeg_read(struct phytium_jpeg_dev *jpeg_dev, u32 reg) +{ + u32 reg_val = readl(jpeg_dev->base_addr + reg); + + dev_dbg(jpeg_dev->dev, "read 0x%p + 0x%x -->val[0x%x]\n", + jpeg_dev->base_addr, reg, reg_val); + + return reg_val; +} + +static void phytium_jpeg_write(struct phytium_jpeg_dev *jpeg_dev, + u32 reg, u32 val) +{ + writel(val, jpeg_dev->base_addr + reg); + dev_dbg(jpeg_dev->dev, "write 0x%x to addr 0x%p + 0x%x\n", + val, jpeg_dev->base_addr, reg); +} + +static void phytium_jpeg_update(struct phytium_jpeg_dev *jpeg_dev, u32 reg, + u32 clear, u32 bits) +{ + u32 reg_val = readl(jpeg_dev->base_addr + reg); + u32 tmp = reg_val; + + reg_val &= ~clear; + reg_val |= bits; + writel(reg_val, jpeg_dev->base_addr + reg); + + dev_dbg(jpeg_dev->dev, "the val of addr 0x%p + 0x%x, from 0x%x to 0x%x\n", + jpeg_dev->base_addr, reg, tmp, readl(jpeg_dev->base_addr + reg)); +} + +static void phytium_jpeg_init_regs(struct phytium_jpeg_dev *jpeg_dev) +{ + u32 transform_info = 0; + u32 disable_all_interrupt = 0; + u32 clear_all_interrupt = INT_FIFO_OVERFLOW | INT_OCM_BUF_OVERFLOW | + INT_JPEG_ENCODE_COMPLETE | INT_VIDEO_FORMAT_CHANGE; + u32 rate_to_reg = 0; + + /* First, disable the JPEG engine, set bit0 = 0*/ + phytium_jpeg_write(jpeg_dev, TRANSFORM_INFO_REG, transform_info); + + /* Second, set VGAvideo_source_information. bit1 = 0 marks VGA */ + transform_info |= 0; + + /* Third, set AXI burst length bit[16:22]= 0xf , default value*/ + transform_info |= (0xF << TRANS_AXI_LEN_SHIFT) & TRANSINFO_AXI_LEN; + + /* Fourth, the default sampling format is YUV422, set bit13 to 0 */ + /* ignore setting sampling interval */ + phytium_jpeg_write(jpeg_dev, TRANSFORM_INFO_REG, transform_info); + udelay(5); + + /* Fifth, setting frame rate. + * Linux driver prohibit float point operations. So use the + * format: reg_val = (1 second * 10^8 / frame_rate / 134 *100) + * write reg_val to register. then enable Highest bit31 = 1 + */ + if (jpeg_dev->frame_rate) { + rate_to_reg = 100000000 / jpeg_dev->frame_rate / 134 * 100; + rate_to_reg |= FRAME_SAMPLE_CTRL_EN; + phytium_jpeg_write(jpeg_dev, FRAME_SAMPLE_CTRL, rate_to_reg); + } + /* Sixth, HUFF_MODE, driver needn't to configure, ignore */ + + /* disable all interrupts and then clear all interrupts */ + phytium_jpeg_write(jpeg_dev, INT_STATUS_CTRL_REG, + disable_all_interrupt); + udelay(5); + phytium_jpeg_write(jpeg_dev, INT_STATUS_CTRL_REG, clear_all_interrupt); + + /* Seventh, Sample_mode, hardware default is yuv444 */ + jpeg_dev->yuv420 = false; +} + +/* Turn on the clock of the jpeg engine */ +static void phytium_jpeg_on(struct phytium_jpeg_dev *jpeg_dev) +{ + if (test_bit(VIDEO_CLOCKS_ON, &jpeg_dev->status)) + return; + + /* Turn on the relevant clocks */ + set_bit(VIDEO_CLOCKS_ON, &jpeg_dev->status); +} + +/* Disable the jpeg engine */ +static void phytium_jpeg_off(struct phytium_jpeg_dev *jpeg_dev) +{ + u32 disable_all_interrupt = 0; + u32 clear_all_interrupt = INT_FIFO_OVERFLOW | INT_OCM_BUF_OVERFLOW | + INT_JPEG_ENCODE_COMPLETE | INT_VIDEO_FORMAT_CHANGE; + + if (!test_bit(VIDEO_CLOCKS_ON, &jpeg_dev->status)) { + dev_info(jpeg_dev->dev, "JPEG Engine is already off.\n"); + return; + } + + /* disable all interrupt */ + phytium_jpeg_write(jpeg_dev, INT_STATUS_CTRL_REG, disable_all_interrupt); + /* clear all interrupt */ + phytium_jpeg_write(jpeg_dev, INT_STATUS_CTRL_REG, clear_all_interrupt); + /* disable JPEG engine */ + phytium_jpeg_update(jpeg_dev, TRANSFORM_INFO_REG, TRANSINFO_ENABLE_ENGINE, 0); + + clear_bit(VIDEO_CLOCKS_ON, &jpeg_dev->status); + /* wait 50 ms */ + mdelay(50); + /* C08 bit7 1:busy */ +} + +static inline void phytium_jpeg_enable_source_detecting(struct phytium_jpeg_dev *jpeg_dev) +{ + /* + * Enable the dectection to discovery + * the source resolution is changed + */ + //phytium_jpeg_update(jpeg_dev, INT_STATUS_CTRL_REG, 0, DETECT_RESOLUTION_CHANGE_EN); + phytium_jpeg_update(jpeg_dev, TRANSFORM_INFO_REG, 0, TRANSINFO_SRC_SELECT); +} + +#define res_check(val) \ + test_and_clear_bit(VIDEO_MODE_DETECT_DONE, &(val)->status) + +static void phytium_jpeg_get_resolution(struct phytium_jpeg_dev *jpeg_dev) +{ + u32 source_info; + u32 width; + u32 height; + struct v4l2_bt_timings *detected_timings = &jpeg_dev->detected_timings; + + /* Before get a new resolution, maybe need to wait 10 us */ + detected_timings->width = MIN_WIDTH; + detected_timings->height = MIN_HEIGHT; + jpeg_dev->v4l2_input_status = V4L2_IN_ST_NO_SIGNAL; + + + phytium_jpeg_enable_source_detecting(jpeg_dev); + source_info = phytium_jpeg_read(jpeg_dev, SRC_VGA_INFO_REG); + width = (source_info & SRC_HOR_PIXELS) >> SRC_WIDTH_SHIFT; + height = (source_info & SRC_VER_PIXELS) >> SRC_HEIGHT_SHIFT; + detected_timings->width = width; + detected_timings->height = height; + jpeg_dev->v4l2_input_status = 0; + + /* + * Resolution is changed will trigger an interrupt, resolution detecting + * also is disable during process interrupt. So re-enable. + */ + phytium_jpeg_enable_source_detecting(jpeg_dev); + dev_info(jpeg_dev->dev, "Change resolution: %uX%u\n", width, height); +} + +static void phytium_jpeg_set_resolution(struct phytium_jpeg_dev *jpeg_dev) +{ + struct v4l2_bt_timings *active_timings = &jpeg_dev->active_timings; + int i; + int src_addrs[OCM_BUF_NUM]; + /* + * The OCM address space is 0x30C0_0000 ~ 0x30C7_FFFF, JPEG Engine uses the + * high-bottom address. src_0 uses 0x30C4_0000 ~ 0x30c6_0000 (total capacity is + * 128KB, greater than the requirements of the largest resolution). src_1 uses + * 0x30C6_0000 ~ 0x30C7_FFFF. + */ + + /* The OCM address should shift right 8 bits */ + for (i = 0; i < OCM_BUF_NUM; i++) + src_addrs[i] = jpeg_dev->src_addrs[i].dma_addr >> OCM_BUF_SHIFT; + + phytium_jpeg_write(jpeg_dev, OCM_BUF0_ADDR, src_addrs[0]); + phytium_jpeg_write(jpeg_dev, OCM_BUF1_ADDR, src_addrs[1]); + + /* + * In the worst case, the size of one image will be compressed to 25% the + * raw image's size. When a pixel is 4-byte, no need to divide 4. + */ + jpeg_dev->max_compressed_size = active_timings->width * active_timings->height; +} + +/* The below functions is implemented for various v4l2 ioctl operations */ +static int phytium_jpeg_querycap(struct file *file, void *priv, + struct v4l2_capability *cap) +{ + struct phytium_jpeg_dev *jpeg_dev = video_drvdata(file); + + strscpy(cap->driver, PHYTIUM_JPEG_NAME, sizeof(cap->driver)); + strscpy(cap->card, "Phytium JPEG Engine", sizeof(cap->card)); + snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s", dev_name(jpeg_dev->dev)); + + return 0; +} + +static int phytium_jpeg_enum_format(struct file *file, void *priv, + struct v4l2_fmtdesc *f) +{ + struct phytium_jpeg_dev *jpeg_dev = video_drvdata(file); + + if (f->index) { + dev_err(jpeg_dev->dev, "Failed to enum format\n"); + return -EINVAL; + } + + f->pixelformat = V4L2_PIX_FMT_JPEG; + + return 0; +} + +static int phytium_jpeg_get_format(struct file *file, void *priv, + struct v4l2_format *f) +{ + struct phytium_jpeg_dev *jpeg_dev = video_drvdata(file); + + f->fmt.pix = jpeg_dev->pix_fmt; + + return 0; +} + +static int phytium_jpeg_enum_input(struct file *file, void *priv, + struct v4l2_input *input) +{ + struct phytium_jpeg_dev *jpeg_dev = video_drvdata(file); + + if (input->index) { + dev_err(jpeg_dev->dev, "failed to enum input\n"); + return -EINVAL; + } + + strscpy(input->name, "Host DC Capture", sizeof(input->name)); + input->type = V4L2_INPUT_TYPE_CAMERA; + input->capabilities = V4L2_IN_CAP_DV_TIMINGS; + input->status = jpeg_dev->v4l2_input_status; + + return 0; +} + +static int phytium_jpeg_get_input(struct file *file, void *priv, + unsigned int *i) +{ + *i = 0; + return 0; +} + +static int phytium_jpeg_set_input(struct file *file, void *priv, + unsigned int i) +{ + struct phytium_jpeg_dev *jpeg_dev = video_drvdata(file); + + if (i != 0) { + dev_err(jpeg_dev->dev, "Failed to set input\n"); + return -EINVAL; + } + + return 0; +} + +static int phytium_jpeg_get_parm(struct file *file, void *priv, + struct v4l2_streamparm *stream) +{ + struct phytium_jpeg_dev *jpeg_dev = video_drvdata(file); + /* Readbuffers num is 3 */ + stream->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; + stream->parm.capture.readbuffers = CAPTURE_BUF_NUMBER; + stream->parm.capture.timeperframe.denominator = 1; + + if (jpeg_dev->frame_rate == 0) + stream->parm.capture.timeperframe.denominator = MAX_FRAME_RATE; + else + stream->parm.capture.timeperframe.denominator = jpeg_dev->frame_rate; + + return 0; +} + +static int phytium_jpeg_set_parm(struct file *file, void *priv, + struct v4l2_streamparm *stream) +{ + unsigned int frame_rate = 0; + u32 rate_to_reg = 0; + struct phytium_jpeg_dev *jpeg_dev = video_drvdata(file); + /* Readbuffers num is 3 */ + stream->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; + stream->parm.capture.readbuffers = CAPTURE_BUF_NUMBER; + + if (stream->parm.capture.timeperframe.numerator) + frame_rate = stream->parm.capture.timeperframe.denominator / + stream->parm.capture.timeperframe.numerator; + + if (frame_rate == 0 || frame_rate > MAX_FRAME_RATE) { + frame_rate = MAX_FRAME_RATE; + stream->parm.capture.timeperframe.denominator = MAX_FRAME_RATE; + stream->parm.capture.timeperframe.numerator = 1; + } + /* + * reg_val = (1 second * 10^9 / frame_rate / 13.4) + * Linux driver prohibit float point operations. So use the + * format: reg_val = (1 second * 10^8 / frame_rate / 134 *100) + * write reg_val to register. then enable Highest bit31 = 1 + */ + if (jpeg_dev->frame_rate != frame_rate) { + jpeg_dev->frame_rate = frame_rate; + rate_to_reg = 100000000 / jpeg_dev->frame_rate / 134 * 100; + rate_to_reg |= FRAME_SAMPLE_CTRL_EN; + phytium_jpeg_write(jpeg_dev, FRAME_SAMPLE_CTRL, rate_to_reg); + } + + return 0; +} + +static int phytium_jpeg_enum_framesizes(struct file *file, void *priv, + struct v4l2_frmsizeenum *fsize) + +{ + struct phytium_jpeg_dev *jpeg_dev = video_drvdata(file); + + if (fsize->index != 0) { + dev_err(jpeg_dev->dev, "Failed to enum framesize.\n"); + return -EINVAL; + } + + if (fsize->pixel_format != V4L2_PIX_FMT_JPEG) { + dev_err(jpeg_dev->dev, "enum framesize pixel_format is not JPEG"); + return -EINVAL; + } + + fsize->discrete.width = jpeg_dev->pix_fmt.width; + fsize->discrete.height = jpeg_dev->pix_fmt.height; + fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; + + return 0; + +} + +static int phytium_jpeg_enum_frameintervals(struct file *file, void *priv, + struct v4l2_frmivalenum *fival) +{ + struct phytium_jpeg_dev *jpeg_dev = video_drvdata(file); + + if (fival->index != 0) { + dev_err(jpeg_dev->dev, "enum frame intervals failed\n"); + return -EINVAL; + } + + if (fival->width != jpeg_dev->detected_timings.width || + fival->height != jpeg_dev->detected_timings.height) { + dev_err(jpeg_dev->dev, "interval isn't same with the detected_timings.\n"); + return -EINVAL; + } + + if (fival->pixel_format != V4L2_PIX_FMT_JPEG) { + dev_err(jpeg_dev->dev, "enum frame interval pixel fomat is incorrect.\n"); + return -EINVAL; + } + + fival->type = V4L2_FRMIVAL_TYPE_CONTINUOUS; + fival->stepwise.min.denominator = MAX_FRAME_RATE; + fival->stepwise.min.numerator = 1; + fival->stepwise.max.denominator = 1; + fival->stepwise.max.numerator = 1; + fival->stepwise.step = fival->stepwise.max; + + return 0; +} + +static int phytium_jpeg_set_dv_timings(struct file *file, void *priv, + struct v4l2_dv_timings *timings) +{ + struct phytium_jpeg_dev *jpeg_dev = video_drvdata(file); + + /* the params are passed from user space are same with hardware's params */ + if (timings->bt.width == jpeg_dev->active_timings.width && + timings->bt.height == jpeg_dev->active_timings.height) + return 0; + + if (vb2_is_busy(&jpeg_dev->queue)) { + dev_err(jpeg_dev->dev, "queue is busy during setting dv timings.\n"); + return -EBUSY; + } + + jpeg_dev->active_timings = timings->bt; + phytium_jpeg_set_resolution(jpeg_dev); + jpeg_dev->pix_fmt.width = timings->bt.width; + jpeg_dev->pix_fmt.height = timings->bt.height; + jpeg_dev->pix_fmt.sizeimage = jpeg_dev->max_compressed_size; + timings->type = V4L2_DV_BT_656_1120; + + return 0; +} + +static int phytium_jpeg_get_dv_timings(struct file *file, void *priv, + struct v4l2_dv_timings *timings) +{ + struct phytium_jpeg_dev *jpeg_dev = video_drvdata(file); + + timings->type = V4L2_DV_BT_656_1120; + timings->bt = jpeg_dev->active_timings; + + return 0; +} + +static int phytium_jpeg_query_dv_timings(struct file *file, void *priv, + struct v4l2_dv_timings *timings) +{ + int ret; + struct phytium_jpeg_dev *jpeg_dev = video_drvdata(file); + + /* + * This blocks only if the driver is currently in the process of + * detecting a new resolution; in the event of no signal or timeout + * this function is woken up. + */ + if ((file->f_flags & O_NONBLOCK) && + test_bit(VIDEO_RES_CHANGE, &jpeg_dev->status)) + return -EAGAIN; + + ret = wait_event_interruptible(jpeg_dev->wait, !test_bit(VIDEO_RES_CHANGE, + &jpeg_dev->status)); + if (ret) { + dev_err(jpeg_dev->dev, "Failed to query dv timing\n"); + return -EINTR; + } + + timings->type = V4L2_DV_BT_656_1120; + timings->bt = jpeg_dev->detected_timings; + + return jpeg_dev->v4l2_input_status ? -ENOLINK : 0; +} + +static const struct v4l2_dv_timings_cap phytium_jpeg_timings_cap = { + .type = V4L2_DV_BT_656_1120, + .bt = { + .min_width = MIN_WIDTH, + .max_width = MAX_WIDTH, + .min_height = MIN_HEIGHT, + .max_height = MAX_HEIGHT, + .min_pixelclock = 6574080, /* 640 x 480 x 24Hz */ + .max_pixelclock = 1244160000, /* 1920 x 1080 x 60Hz */ + .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT | + V4L2_DV_BT_STD_CVT | V4L2_DV_BT_STD_GTF, + .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE | + V4L2_DV_BT_CAP_REDUCED_BLANKING | + V4L2_DV_BT_CAP_CUSTOM, + }, +}; + +static int phytium_jpeg_enum_dv_timings(struct file *file, void *priv, + struct v4l2_enum_dv_timings *timings) +{ + return v4l2_enum_dv_timings_cap(timings, &phytium_jpeg_timings_cap, + NULL, NULL); +} + +static int phytium_jpeg_dv_timings_cap(struct file *file, void *priv, + struct v4l2_dv_timings_cap *cap) +{ + *cap = phytium_jpeg_timings_cap; + + return 0; +} + +/* The function is used to notify DV that video resolution is altered */ +static int phytium_jpeg_sub_event(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub) +{ + switch (sub->type) { + case V4L2_EVENT_SOURCE_CHANGE: + return v4l2_src_change_event_subscribe(fh, sub); + default: + break; + } + + return v4l2_ctrl_subscribe_event(fh, sub); +} + +static const struct v4l2_ioctl_ops phytium_jpeg_ioctl_ops = { + .vidioc_querycap = phytium_jpeg_querycap, + .vidioc_enum_fmt_vid_cap = phytium_jpeg_enum_format, + .vidioc_g_fmt_vid_cap = phytium_jpeg_get_format, + .vidioc_s_fmt_vid_cap = phytium_jpeg_get_format, + .vidioc_try_fmt_vid_cap = phytium_jpeg_get_format, + .vidioc_reqbufs = vb2_ioctl_reqbufs, + .vidioc_querybuf = vb2_ioctl_querybuf, + .vidioc_qbuf = vb2_ioctl_qbuf, + .vidioc_expbuf = vb2_ioctl_expbuf, + .vidioc_dqbuf = vb2_ioctl_dqbuf, + .vidioc_create_bufs = vb2_ioctl_create_bufs, + .vidioc_prepare_buf = vb2_ioctl_prepare_buf, + .vidioc_streamon = vb2_ioctl_streamon, + .vidioc_streamoff = vb2_ioctl_streamoff, + .vidioc_enum_input = phytium_jpeg_enum_input, + .vidioc_g_input = phytium_jpeg_get_input, + .vidioc_s_input = phytium_jpeg_set_input, + .vidioc_g_parm = phytium_jpeg_get_parm, + .vidioc_s_parm = phytium_jpeg_set_parm, + .vidioc_enum_framesizes = phytium_jpeg_enum_framesizes, + .vidioc_enum_frameintervals = phytium_jpeg_enum_frameintervals, + .vidioc_s_dv_timings = phytium_jpeg_set_dv_timings, + .vidioc_g_dv_timings = phytium_jpeg_get_dv_timings, + .vidioc_query_dv_timings = phytium_jpeg_query_dv_timings, + .vidioc_enum_dv_timings = phytium_jpeg_enum_dv_timings, + .vidioc_dv_timings_cap = phytium_jpeg_dv_timings_cap, + .vidioc_subscribe_event = phytium_jpeg_sub_event, + .vidioc_unsubscribe_event = v4l2_event_unsubscribe, +}; + +static void phytium_jpeg_init_jpeg_quant(struct phytium_jpeg_dev *jpeg_dev) +{ + const u32 y_quant_table[QUANT_REG_NUM] = { + 0x08000000, 0x0ba2e8ba, 0x0aaaaaab, 0x09249249, 0x0aaaaaab, + 0x0ccccccc, 0x08000000, 0x09249249, 0x09d89d8a, 0x09249249, + 0x071c71c7, 0x07878788, 0x08000000, 0x06bca1af, 0x05555555, + 0x03333333, 0x04ec4ec5, 0x05555555, 0x05d1745d, 0x05d1745d, + 0x05555555, 0x029cbc15, 0x03a83a84, 0x03759f23, 0x0469ee58, + 0x03333333, 0x0234f72c, 0x02828283, 0x02192e2a, 0x02222222, + 0x023ee090, 0x02828283, 0x02492492, 0x0253c825, 0x02000000, + 0x01c71c72, 0x01642c86, 0x01a41a42, 0x02000000, 0x01e1e1e2, + 0x0178a4c8, 0x01dae607, 0x0253c825, 0x02492492, 0x0199999a, + 0x012c9fb5, 0x01948b10, 0x0178a4c8, 0x0158ed23, 0x014e5e0a, + 0x013e22cc, 0x013b13b1, 0x013e22cc, 0x02108421, 0x01a98ef6, + 0x0121fb78, 0x010ecf57, 0x01249249, 0x013e22cc, 0x01111111, + 0x01642c86, 0x01446f86, 0x013e22cc, 0x014afd6a + }; + + const u32 c_quant_table[QUANT_REG_NUM] = { + 0x07878788, 0x071c71c7, 0x071c71c7, 0x05555555, 0x06186186, + 0x05555555, 0x02b93105, 0x04ec4ec5, 0x04ec4ec5, 0x02b93105, + 0x014afd6a, 0x01f07c1f, 0x02492492, 0x01f07c1f, 0x014afd6a, + 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a, + 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a, + 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a, + 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a, + 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a, + 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a, + 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a, + 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a, + 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a, + 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a + }; + int i; + + for (i = 0; i < QUANT_REG_NUM; i++) { + phytium_jpeg_write(jpeg_dev, Y_QUANT_INDEX_ADDR_REG(i), y_quant_table[i]); + phytium_jpeg_write(jpeg_dev, C_QUANT_INDEX_ADDR_REG(i), c_quant_table[i]); + } + +} + +static void phytium_jpeg_start(struct phytium_jpeg_dev *jpeg_dev) +{ + phytium_jpeg_on(jpeg_dev); + phytium_jpeg_init_regs(jpeg_dev); + + /* Resolution set to 640x480 if no signal is found */ + phytium_jpeg_get_resolution(jpeg_dev); + + /* Set timings since the device is being opened for the first tiime */ + jpeg_dev->active_timings = jpeg_dev->detected_timings; + phytium_jpeg_set_resolution(jpeg_dev); + + jpeg_dev->pix_fmt.width = jpeg_dev->active_timings.width; + jpeg_dev->pix_fmt.height = jpeg_dev->active_timings.height; + jpeg_dev->pix_fmt.sizeimage = jpeg_dev->max_compressed_size; +} + +static void phytium_jpeg_stop(struct phytium_jpeg_dev *jpeg_dev) +{ + set_bit(VIDEO_STOPPED, &jpeg_dev->status); + cancel_delayed_work_sync(&jpeg_dev->res_work); + + phytium_jpeg_off(jpeg_dev); + + jpeg_dev->v4l2_input_status = V4L2_IN_ST_NO_SIGNAL; + jpeg_dev->status = 0; +} + +static int phytium_jpeg_open(struct file *file) +{ + int ret; + struct phytium_jpeg_dev *jpeg_dev = video_drvdata(file); + + mutex_lock(&jpeg_dev->video_lock); + + ret = v4l2_fh_open(file); + if (ret != 0) { + mutex_unlock(&jpeg_dev->video_lock); + dev_err(jpeg_dev->dev, "Failed to open the phytium jpeg device.\n"); + return ret; + } + + if (v4l2_fh_is_singular_file(file)) + phytium_jpeg_start(jpeg_dev); + + mutex_unlock(&jpeg_dev->video_lock); + + return 0; +} + +static int phytium_jpeg_release(struct file *file) +{ + int ret; + struct phytium_jpeg_dev *jpeg_dev = video_drvdata(file); + + mutex_lock(&jpeg_dev->video_lock); + + if (v4l2_fh_is_singular_file(file)) + phytium_jpeg_stop(jpeg_dev); + + ret = _vb2_fop_release(file, NULL); + mutex_unlock(&jpeg_dev->video_lock); + + return ret; +} + + +static const struct v4l2_file_operations phytium_jpeg_fops = { + .owner = THIS_MODULE, + .read = vb2_fop_read, + .poll = vb2_fop_poll, + .unlocked_ioctl = video_ioctl2, + .mmap = vb2_fop_mmap, + .open = phytium_jpeg_open, + .release = phytium_jpeg_release, +}; + +static void phytium_jpeg_update_jpeg_header(u32 width, u32 height) +{ + const int h_index = PHYTIUM_JPEG_HEADER_H_INDEX; + const int w_index = PHYTIUM_JPEG_HEADER_W_INDEX; + + /* the high 8 bits of the height locates at bit24~bit31 */ + phytium_jpeg_header[h_index] = phytium_jpeg_header[h_index] & 0x00FFFFFF; + phytium_jpeg_header[h_index] |= ((height >> 8) & 0xFF) << 24; + + /* the low 8 bits of the height locates at bit0~bit7 */ + phytium_jpeg_header[w_index] = phytium_jpeg_header[w_index] & 0xFF000000; + phytium_jpeg_header[w_index] |= height & 0xFF; + + /* the high 8 bits of the width locates at bit8~bit15 */ + phytium_jpeg_header[w_index] |= ((width >> 8) & 0xFF) << 8; + /* the low 8 bits of the width locates at bit16~bit24 */ + phytium_jpeg_header[w_index] |= (width & 0xFF) << 16; +} + +static void phytium_jpeg_fill_header(struct phytium_jpeg_dev *jpeg_dev, + struct phytium_jpeg_buffer *jpeg_buf) +{ + void *vbuf = vb2_plane_vaddr(&jpeg_buf->vb.vb2_buf, 0); + u32 width = jpeg_dev->active_timings.width; + u32 height = jpeg_dev->active_timings.height; + + /* update the contents of the phytium jpeg header according to the resolution */ + phytium_jpeg_update_jpeg_header(width, height); + + /* replenish the contents of the JPEG header */ + memcpy(vbuf, phytium_jpeg_header, PHYTIUM_JPEG_HEADER_LEN); +} + +static int phytium_jpeg_start_frame(struct phytium_jpeg_dev *jpeg_dev) +{ + dma_addr_t dst_addr; + unsigned long status; + struct phytium_jpeg_buffer *jpeg_buf; + + if (jpeg_dev->v4l2_input_status) { + dev_err(jpeg_dev->dev, "No signal; needn't start frame\n"); + return 0; + } + + spin_lock_irqsave(&jpeg_dev->hw_lock, status); + jpeg_buf = list_first_entry_or_null(&jpeg_dev->buffers, + struct phytium_jpeg_buffer, link); + if (jpeg_buf == NULL) { + spin_unlock_irqrestore(&jpeg_dev->hw_lock, status); + dev_err(jpeg_dev->dev, "No buffers; doesn't start frame\n"); + return -EPROTO; + } + + set_bit(VIDEO_FRAME_INPRG, &jpeg_dev->status); + dst_addr = vb2_dma_contig_plane_dma_addr(&jpeg_buf->vb.vb2_buf, 0); + spin_unlock_irqrestore(&jpeg_dev->hw_lock, status); + + /* + * Because the JPEG Engine is unable to add a JPEG header, the phytium + * jpeg driver is required to fill the contents of a JPEG header before + * the jpeg engine write datas to the dma address. + */ + phytium_jpeg_fill_header(jpeg_dev, jpeg_buf); + dst_addr += PHYTIUM_JPEG_HEADER_LEN; + /* + * The ikvm application only using the last frame, so the driver replenish + * one output register with a dma address. + */ + dst_addr >>= JPEG_DST_ADDR_SHIFT; + phytium_jpeg_write(jpeg_dev, BUF_LIST_INDEX_ADDR(VB_BUF_NO), dst_addr); + /* Enable the validilty of the buffer marked with index */ + phytium_jpeg_write(jpeg_dev, BUF_LIST_INDEX_CTRL_STS_ADDR(VB_BUF_NO), + STS_JPEG_BUF_HIGH_LEVEL_VALID); + /* Enable the interruption which is used to identify an image was compressed */ + phytium_jpeg_update(jpeg_dev, INT_STATUS_CTRL_REG, 0, STS_VE_JPEG_CODE_COMP_EN); + /* Enable JPEG, start to capture and compress */ + phytium_jpeg_update(jpeg_dev, TRANSFORM_INFO_REG, TRANSINFO_ENABLE_ENGINE, 1); + + return 0; +} + +static void phytium_jpeg_resolution_work(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct phytium_jpeg_dev *jpeg_dev = container_of(dwork, + struct phytium_jpeg_dev, res_work); + u32 input_status = jpeg_dev->v4l2_input_status; + + phytium_jpeg_on(jpeg_dev); + + /* Exit early in the case no clients remain */ + if (test_bit(VIDEO_STOPPED, &jpeg_dev->status)) + goto done; + + phytium_jpeg_init_regs(jpeg_dev); + phytium_jpeg_get_resolution(jpeg_dev); + + /* if source's resolution is changed, the event should be enqueued */ + if (jpeg_dev->detected_timings.width != jpeg_dev->active_timings.width || + jpeg_dev->detected_timings.height != jpeg_dev->active_timings.height || + input_status != jpeg_dev->v4l2_input_status) { + static const struct v4l2_event event = { + .type = V4L2_EVENT_SOURCE_CHANGE, + .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION, + }; + v4l2_event_queue(&jpeg_dev->vdev, &event); + clear_bit(VIDEO_FRAME_INPRG, &jpeg_dev->status); + dev_info(jpeg_dev->dev, "event notify changing resolution\n"); + } else if (test_bit(VIDEO_STREAMING, &jpeg_dev->status)) { + /* No resolution change so just restart streaming */ + phytium_jpeg_start_frame(jpeg_dev); + } + +done: + clear_bit(VIDEO_RES_CHANGE, &jpeg_dev->status); + wake_up_interruptible_all(&jpeg_dev->wait); +} + +static int phytium_jpeg_queue_setup(struct vb2_queue *q, + unsigned int *num_buffers, + unsigned int *num_planes, + unsigned int sizes[], + struct device *alloc_devs[]) +{ + struct phytium_jpeg_dev *jpeg_dev = vb2_get_drv_priv(q); + + if (*num_planes) { + if (sizes[0] < jpeg_dev->max_compressed_size) { + v4l2_err(&jpeg_dev->v4l2_dev, "queue v4l2_buf's size is invalid\n"); + return -EINVAL; + } + } + + *num_planes = 1; + sizes[0] = jpeg_dev->max_compressed_size; + return 0; +} + +static int phytium_jpeg_buf_prepare(struct vb2_buffer *vb) +{ + struct phytium_jpeg_dev *jpeg_dev = vb2_get_drv_priv(vb->vb2_queue); + + if (vb2_plane_size(vb, 0) < jpeg_dev->max_compressed_size) { + v4l2_err(&jpeg_dev->v4l2_dev, "failed to prepare buffer\n"); + return -EINVAL; + } + + return 0; +} + +static inline struct phytium_jpeg_buffer * +phytium_vb2buf_to_dstbuf(struct vb2_v4l2_buffer *buf) +{ + return container_of(buf, struct phytium_jpeg_buffer, vb); +} + +static void phytium_jpeg_buf_queue(struct vb2_buffer *vb) +{ + bool empty; + struct phytium_jpeg_dev *jpeg_dev = vb2_get_drv_priv(vb->vb2_queue); + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct phytium_jpeg_buffer *jpeg_buf = phytium_vb2buf_to_dstbuf(vbuf); + unsigned long status; + + spin_lock_irqsave(&jpeg_dev->hw_lock, status); + empty = list_empty(&jpeg_dev->buffers); + list_add_tail(&jpeg_buf->link, &jpeg_dev->buffers); + spin_unlock_irqrestore(&jpeg_dev->hw_lock, status); + + /* the empty ensures the address of the first node's vb2_v4l2_buf + * in the list is written to output register + */ + if (test_bit(VIDEO_STREAMING, &jpeg_dev->status) && + (!test_bit(VIDEO_FRAME_INPRG, &jpeg_dev->status)) && + empty) + phytium_jpeg_start_frame(jpeg_dev); +} + +static void phytium_jpeg_bufs_done(struct phytium_jpeg_dev *jpeg_dev, + enum vb2_buffer_state state) +{ + unsigned long flags; + struct phytium_jpeg_buffer *buf; + + spin_lock_irqsave(&jpeg_dev->hw_lock, flags); + + list_for_each_entry(buf, &jpeg_dev->buffers, link) + vb2_buffer_done(&buf->vb.vb2_buf, state); + + INIT_LIST_HEAD(&jpeg_dev->buffers); + + spin_unlock_irqrestore(&jpeg_dev->hw_lock, flags); +} + +static void phytium_jpeg_irq_res_change(struct phytium_jpeg_dev *jpeg_dev, + ulong delay) +{ + dev_info(jpeg_dev->dev, "Source resolution is changed, resetting\n"); + set_bit(VIDEO_RES_CHANGE, &jpeg_dev->status); + + phytium_jpeg_off(jpeg_dev); + phytium_jpeg_bufs_done(jpeg_dev, VB2_BUF_STATE_ERROR); + + schedule_delayed_work(&jpeg_dev->res_work, delay); +} + +static irqreturn_t phytium_jpeg_irq(int irq, void *arg) +{ + struct phytium_jpeg_dev *jpeg_dev = arg; + u32 status; + struct phytium_jpeg_buffer *buf; + u32 frame_size; + + if (test_bit(VIDEO_POWEROFF, &jpeg_dev->status)) { + dev_info(jpeg_dev->dev, "jpeg engine is requested to poweroff\n"); + return IRQ_HANDLED; + } + + status = phytium_jpeg_read(jpeg_dev, INT_STATUS_CTRL_REG); + + if (status & INT_VIDEO_FORMAT_CHANGE) { + dev_info(jpeg_dev->dev, "receive resolution changed interrupt\n"); + phytium_jpeg_update(jpeg_dev, INT_STATUS_CTRL_REG, + DETECT_RESOLUTION_CHANGE_EN, 0); + phytium_jpeg_write(jpeg_dev, INT_STATUS_CTRL_REG, INT_VIDEO_FORMAT_CHANGE); + phytium_jpeg_irq_res_change(jpeg_dev, RESOLUTION_CHANGE_DELAY); + return IRQ_HANDLED; + } + + /* + * JPEG engine finish compressing a image JPEG encoding to trigger + * a interruption. the status identifies the buffer number. Currently, + * the driver uses one buffer. + * + * Note: Because the JPEG doesn't support adding a JPEG header, and + * driver is also unable to add a JPEG header to vb2_buffers. One + * solution is that a JPEG header is added by an application. + */ + if (status & INT_JPEG_COMP_BUF_LIST_NO) { + frame_size = phytium_jpeg_read(jpeg_dev, jpeg_dev->comp_size_read); + frame_size &= JPEG_BUF_CAPACITY_SIZE; + frame_size >>= JPEG_BUF_CAPACITY_SIZE_SHIFT; + spin_lock(&jpeg_dev->hw_lock); + clear_bit(VIDEO_FRAME_INPRG, &jpeg_dev->status); + /* Delete first node from the queue */ + buf = list_first_entry_or_null(&jpeg_dev->buffers, + struct phytium_jpeg_buffer, link); + if (buf != NULL) { + frame_size += PHYTIUM_JPEG_HEADER_LEN; + vb2_set_plane_payload(&buf->vb.vb2_buf, 0, frame_size); + if (!list_is_last(&buf->link, &jpeg_dev->buffers)) { + buf->vb.vb2_buf.timestamp = ktime_get_ns(); + buf->vb.sequence = jpeg_dev->sequence++; + buf->vb.field = V4L2_FIELD_NONE; + vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE); + list_del(&buf->link); + } + } + + spin_unlock(&jpeg_dev->hw_lock); + /* Disable JPEG engine */ + phytium_jpeg_update(jpeg_dev, TRANSFORM_INFO_REG, TRANSINFO_ENABLE_ENGINE, 0); + /* Disable interruption */ + phytium_jpeg_update(jpeg_dev, INT_STATUS_CTRL_REG, STS_VE_JPEG_CODE_COMP_EN, 0); + /* clear all interruption of the hardware's buffers */ + phytium_jpeg_update(jpeg_dev, INT_STATUS_CTRL_REG, INT_JPEG_ENCODE_COMPLETE, 1); + + status &= ~INT_JPEG_COMP_BUF_LIST_NO; + if (test_bit(VIDEO_STREAMING, &jpeg_dev->status) && buf) + phytium_jpeg_start_frame(jpeg_dev); + } + + return IRQ_HANDLED; +} + +/* VIDIOC_STREAMON, all vb2_v4l2_buf' states are queue */ +static int phytium_jpeg_start_streaming(struct vb2_queue *q, unsigned int count) +{ + int ret; + struct phytium_jpeg_dev *jpeg_dev = vb2_get_drv_priv(q); + + jpeg_dev->sequence = 0; + ret = phytium_jpeg_start_frame(jpeg_dev); + if (ret != 0) { + phytium_jpeg_bufs_done(jpeg_dev, VB2_BUF_STATE_QUEUED); + return ret; + } + + /* set the states of the jpeg engine */ + set_bit(VIDEO_STREAMING, &jpeg_dev->status); + return ret; +} + +static void phytium_jpeg_stop_streaming(struct vb2_queue *q) +{ + int ret; + struct phytium_jpeg_dev *jpeg_dev = vb2_get_drv_priv(q); + + clear_bit(VIDEO_STREAMING, &jpeg_dev->status); + ret = wait_event_timeout(jpeg_dev->wait, + !test_bit(VIDEO_FRAME_INPRG, &jpeg_dev->status), + STOP_TIMEOUT); + + /* time out */ + if (ret == 0) { + dev_err(jpeg_dev->dev, "Timed out when stopping streaming.\n"); + /* + * Need to force stop any DMA and try and get HW into a good states + * for future calls to start streaming again. + */ + phytium_jpeg_off(jpeg_dev); + phytium_jpeg_on(jpeg_dev); + phytium_jpeg_init_regs(jpeg_dev); + phytium_jpeg_get_resolution(jpeg_dev); + } + /* first stop jpeg, wait, the free buffer */ + phytium_jpeg_bufs_done(jpeg_dev, VB2_BUF_STATE_ERROR); +} + +static const struct vb2_ops phytium_jpeg_vb2_ops = { + .queue_setup = phytium_jpeg_queue_setup, + .wait_prepare = vb2_ops_wait_prepare, + .wait_finish = vb2_ops_wait_finish, + .buf_prepare = phytium_jpeg_buf_prepare, + .buf_queue = phytium_jpeg_buf_queue, + .start_streaming = phytium_jpeg_start_streaming, + .stop_streaming = phytium_jpeg_stop_streaming, +}; + +static void phytium_jpeg_set_yuv_mode(struct phytium_jpeg_dev *jpeg_dev) +{ + const char *mode = yuv_mode_str; + enum jpeg_yuv_mode yuv_mode; + + if (strstr(mode, "yuv422") != NULL) + yuv_mode = YUV422; + else if (strstr(mode, "yuv420") != NULL) + yuv_mode = YUV420; + else + yuv_mode = YUV444; + + /* set the yuv mode register */ + phytium_jpeg_write(jpeg_dev, SAMPLE_MODE_REG, yuv_mode); + + /* update the field which indicates YUV mode locates in the JPEG header. */ + phytium_jpeg_header[YUVID] &= 0xFFFF00FF; + if (yuv_mode == YUV422) + phytium_jpeg_header[YUVID] |= 0x2100; + else if (yuv_mode == YUV420) + phytium_jpeg_header[YUVID] |= 0x2200; + else + phytium_jpeg_header[YUVID] |= 0x1100; + +} + +static irqreturn_t phytium_jpeg_timer31_irq(int irq, void *arg) +{ + struct phytium_jpeg_dev *jpeg_dev = arg; + + /* disable timer interrupt */ + writel(0, jpeg_dev->timer31_addr); + + /* clear timer interrupt status */ + writel(0x8, jpeg_dev->timer31_addr + 0x2c); + + /* clear JPEG Engine's poweroff status */ + clear_bit(VIDEO_POWEROFF, &jpeg_dev->status); + dev_info(jpeg_dev->dev, "timer31 set jpeg status 0x%lx\n", jpeg_dev->status); + + /* JPEG Engine is poweron, reconfig quntization table and YUV mode */ + phytium_jpeg_init_jpeg_quant(jpeg_dev); + phytium_jpeg_set_yuv_mode(jpeg_dev); + phytium_jpeg_update(jpeg_dev, INT_STATUS_CTRL_REG, 0, DETECT_RESOLUTION_CHANGE_EN); + phytium_jpeg_update(jpeg_dev, TRANSFORM_INFO_REG, 0, TRANSINFO_SRC_SELECT); + + dev_info(jpeg_dev->dev, "reconfigure quant table and yuv mode\n"); + + return IRQ_HANDLED; +} + +static int phytium_jpeg_parser_timer31_irq(struct phytium_jpeg_dev *jpeg_dev) +{ + int irq; + int ret; + struct device *dev = jpeg_dev->dev; + + irq = irq_of_parse_and_map(dev->of_node, 2); + if (!irq) { + dev_err(dev, "Failed to get timer31 IRQ\n"); + return -ENODEV; + } + + ret = devm_request_threaded_irq(dev, irq, NULL, phytium_jpeg_timer31_irq, + IRQF_ONESHOT, PHYTIUM_JPEG_NAME, jpeg_dev); + if (ret < 0) + dev_err(dev, "Failed to request timer31 IRQ %d\n", irq); + + return ret; +} + +static irqreturn_t phytium_jpeg_timer30_irq(int irq, void *arg) +{ + struct phytium_jpeg_dev *jpeg_dev = arg; + struct arm_smccc_res res; + + /* disable timer interrupt */ + writel(0, jpeg_dev->timer30_addr); + /* clear timer interrupt status */ + writel(0x8, jpeg_dev->timer30_addr + 0x2c); + + /* Disable interruption */ + phytium_jpeg_update(jpeg_dev, INT_STATUS_CTRL_REG, STS_VE_JPEG_CODE_COMP_EN, 0); + + /* call SE to poweroff JPEG Engine */ + arm_smccc_smc(0xc300fff4, 0x9, 0x2, 0x80000020, 0, 0, 0, 0, &res); + + /* set JPEG Engine's status is poweroff */ + set_bit(VIDEO_POWEROFF, &jpeg_dev->status); + dev_info(jpeg_dev->dev, "timer30 set jpeg status 0x%lx\n", jpeg_dev->status); + + return IRQ_HANDLED; +} + +static int phytium_jpeg_parser_timer30_irq(struct phytium_jpeg_dev *jpeg_dev) +{ + int irq; + int ret; + struct device *dev = jpeg_dev->dev; + + irq = irq_of_parse_and_map(dev->of_node, 1); + if (!irq) { + dev_err(dev, "Failed to get timer30 IRQ\n"); + return -ENODEV; + } + + ret = devm_request_threaded_irq(dev, irq, NULL, phytium_jpeg_timer30_irq, + IRQF_ONESHOT, PHYTIUM_JPEG_NAME, jpeg_dev); + if (ret < 0) + dev_err(dev, "Failed to request timer30 IRQ %d\n", irq); + + return ret; +} + +static int phytium_jpeg_init(struct phytium_jpeg_dev *jpeg_dev) +{ + int irq; + int ret; + struct device *dev = jpeg_dev->dev; + u32 ocm_buf_addr[OCM_BUF_NUM]; + int i; + + irq = irq_of_parse_and_map(dev->of_node, 0); + if (!irq) { + dev_err(dev, "Failed to get IRQ\n"); + return -ENODEV; + } + + ret = devm_request_threaded_irq(dev, irq, NULL, phytium_jpeg_irq, + IRQF_ONESHOT, PHYTIUM_JPEG_NAME, jpeg_dev); + if (ret < 0) { + dev_err(dev, "Failed to request IRQ %d\n", irq); + return ret; + } + + ret = phytium_jpeg_parser_timer30_irq(jpeg_dev); + if (ret < 0) + return ret; + + ret = phytium_jpeg_parser_timer31_irq(jpeg_dev); + if (ret < 0) + return ret; + + ret = of_property_read_u32_array(dev->of_node, "phytium,ocm-buf-addr", + ocm_buf_addr, OCM_BUF_NUM); + if (ret != 0) { + dev_err(dev, "Failed to get the OCM address from device tree node.\n"); + return ret; + } + + for (i = 0; i < OCM_BUF_NUM; i++) + jpeg_dev->src_addrs[i].dma_addr = ocm_buf_addr[i]; + + /* CMA memory for JPEG device */ + of_reserved_mem_device_init(dev); + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (ret != 0) { + dev_err(dev, "Failed to set DMA mask\n"); + return ret; + } + + /* Initializing JPEG Y and CbCr quantization table */ + phytium_jpeg_init_jpeg_quant(jpeg_dev); + + /* Select YUV mode */ + phytium_jpeg_set_yuv_mode(jpeg_dev); + dev_info(dev, "successfully initialize jpeg engine\n"); + return 0; + +} + + +static int phytium_jpeg_setup_video(struct phytium_jpeg_dev *jpeg_dev) +{ + struct v4l2_device *v4l2_dev = &jpeg_dev->v4l2_dev; + struct vb2_queue *dst_vq = &jpeg_dev->queue; + struct video_device *vdev = &jpeg_dev->vdev; + int ret; + + jpeg_dev->pix_fmt.pixelformat = V4L2_PIX_FMT_JPEG; + jpeg_dev->pix_fmt.field = V4L2_FIELD_NONE; + jpeg_dev->pix_fmt.colorspace = V4L2_COLORSPACE_SRGB; /* maybe ARGB */ + jpeg_dev->pix_fmt.quantization = V4L2_QUANTIZATION_FULL_RANGE; + jpeg_dev->v4l2_input_status = V4L2_IN_ST_NO_SIGNAL; + + ret = v4l2_device_register(jpeg_dev->dev, v4l2_dev); + if (ret != 0) { + dev_err(jpeg_dev->dev, "Failed to register v4l2 device\n"); + return ret; + } + + /* Register how many v4l2 controls to a handler */ + dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + dst_vq->io_modes = VB2_MMAP | VB2_READ | VB2_DMABUF; + dst_vq->dev = v4l2_dev->dev; + dst_vq->lock = &jpeg_dev->video_lock; + dst_vq->ops = &phytium_jpeg_vb2_ops; + dst_vq->mem_ops = &vb2_dma_contig_memops; + dst_vq->drv_priv = jpeg_dev; + dst_vq->buf_struct_size = sizeof(struct phytium_jpeg_buffer); + dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; + dst_vq->min_buffers_needed = CAPTURE_BUF_NUMBER; + ret = vb2_queue_init(dst_vq); + if (ret) { + dev_err(jpeg_dev->dev, "Failed to init vb2 queue\n"); + goto err_v4l2_register; + } + + vdev->queue = dst_vq; + vdev->fops = &phytium_jpeg_fops; + vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE | + V4L2_CAP_STREAMING; + vdev->v4l2_dev = v4l2_dev; + strscpy(vdev->name, PHYTIUM_JPEG_NAME, sizeof(vdev->name)); + vdev->vfl_type = VFL_TYPE_GRABBER; /* The newest kernel using VFL_TYPE_VIDEO */ + vdev->vfl_dir = VFL_DIR_RX; + vdev->release = video_device_release_empty; + vdev->ioctl_ops = &phytium_jpeg_ioctl_ops; + vdev->lock = &jpeg_dev->video_lock; + + video_set_drvdata(vdev, jpeg_dev); + ret = video_register_device(vdev, VFL_TYPE_GRABBER, 0); + if (ret != 0) { + dev_err(jpeg_dev->dev, "Failed to register video device\n"); + goto err_video_register; + } + + v4l2_info(v4l2_dev, "phytium JPEG registered as /dev/video%d (%d, %d)\n", + jpeg_dev->vdev.num, VIDEO_MAJOR, jpeg_dev->vdev.minor); + return ret; + +err_video_register: + vb2_queue_release(dst_vq); + +err_v4l2_register: + v4l2_device_unregister(v4l2_dev); + return ret; +} + +static const struct phytium_jpeg_config phytium_jpeg_config = { + .comp_size_read = BUF_LIST_INDEX_CTRL_STS_ADDR(VB_BUF_NO), +}; + +static const struct of_device_id phytium_jpeg_match[] = { + { + .compatible = "phytium,jpeg", + .data = &phytium_jpeg_config, + }, + {}, +}; + +MODULE_DEVICE_TABLE(of, phytium_jpeg_match); + +static int phytium_jpeg_probe(struct platform_device *pdev) +{ + struct phytium_jpeg_dev *jpeg_dev; + const struct of_device_id *match; + const struct phytium_jpeg_config *config; + struct resource *res; + int ret; + + jpeg_dev = devm_kzalloc(&pdev->dev, sizeof(*jpeg_dev), GFP_KERNEL); + if (jpeg_dev == NULL) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + jpeg_dev->base_addr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(jpeg_dev->base_addr)) { + dev_err(jpeg_dev->dev, "Failed to ioremap.\n"); + return PTR_ERR(jpeg_dev->base_addr); + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + jpeg_dev->timer30_addr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(jpeg_dev->base_addr)) { + dev_err(jpeg_dev->dev, "Failed to parse timer30.\n"); + return PTR_ERR(jpeg_dev->timer30_addr); + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 2); + jpeg_dev->timer31_addr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(jpeg_dev->base_addr)) { + dev_err(jpeg_dev->dev, "Failed to parse timer30.\n"); + return PTR_ERR(jpeg_dev->timer31_addr); + } + match = of_match_node(phytium_jpeg_match, pdev->dev.of_node); + if (match == NULL) { + dev_err(jpeg_dev->dev, "Failed to match.\n"); + return -EINVAL; + } + + config = match->data; + jpeg_dev->comp_size_read = config->comp_size_read; + + jpeg_dev->frame_rate = 30; + jpeg_dev->dev = &pdev->dev; + spin_lock_init(&jpeg_dev->hw_lock); + mutex_init(&jpeg_dev->video_lock); + init_waitqueue_head(&jpeg_dev->wait); + INIT_DELAYED_WORK(&jpeg_dev->res_work, phytium_jpeg_resolution_work); + INIT_LIST_HEAD(&jpeg_dev->buffers); + + ret = phytium_jpeg_init(jpeg_dev); + if (ret != 0) { + dev_err(jpeg_dev->dev, "Failed to initialize the JPEG engine.\n"); + return ret; + } + + ret = phytium_jpeg_setup_video(jpeg_dev); + + return ret; +} + +#define to_phytium_jpeg(x) container_of((x), struct phytium_jpeg_dev, v4l2_dev) +static int phytium_jpeg_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct v4l2_device *v4l2_dev = dev_get_drvdata(dev); + struct phytium_jpeg_dev *jpeg_dev = to_phytium_jpeg(v4l2_dev); + + phytium_jpeg_off(jpeg_dev); + + video_unregister_device(&jpeg_dev->vdev); + + vb2_queue_release(&jpeg_dev->queue); + + v4l2_device_unregister(v4l2_dev); + + of_reserved_mem_device_release(dev); + + return 0; +} + +static struct platform_driver phytium_jpeg_driver = { + .probe = phytium_jpeg_probe, + .remove = phytium_jpeg_remove, + .driver = { + .name = PHYTIUM_JPEG_NAME, + .of_match_table = phytium_jpeg_match, + }, +}; + +module_platform_driver(phytium_jpeg_driver); + +MODULE_DESCRIPTION("Phytium JPEG Encoder driver"); +MODULE_AUTHOR("Wang Min "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/phytium-jpeg/phytium_jpeg_core.h b/drivers/media/platform/phytium-jpeg/phytium_jpeg_core.h new file mode 100644 index 0000000000000000000000000000000000000000..5cb98e08469389527e327f7b8bccfc37d9bb51a7 --- /dev/null +++ b/drivers/media/platform/phytium-jpeg/phytium_jpeg_core.h @@ -0,0 +1,147 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef _PHYTIUM_JPEG_CORE_H +#define _PHYTIUM_JPEG_CORE_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PHYTIUM_JPEG_NAME "phytium-jpeg" +#define MAX_FRAME_RATE 60 +#define MAX_HEIGHT 1080 +#define MAX_WIDTH 1920 +#define MIN_HEIGHT 480 +#define MIN_WIDTH 640 +#define MIN_PIXEL_CLOCK (640 * 480 * 60) /* 640 x 480 x 60Hz */ +#define MAX_PIXEL_CLOCK (1920 * 1080 * 60) /* 1920 x 1080 x 60Hz */ + +#define SOURCE_RESOLUTION_DETECT_TIMEOUT msecs_to_jiffies(500) +#define RESOLUTION_CHANGE_DELAY msecs_to_jiffies(0) +#define INVALID_RESOLUTION_DELAY msecs_to_jiffies(250) +#define STOP_TIMEOUT msecs_to_jiffies(1000) + +#define INVALID_RESOLUTION_RETRIES 2 +#define CAPTURE_BUF_NUMBER 3 /* using how many buffers */ +#define VB_BUF_NO 0 /* there are 16 buffer, use which one */ + +/* The below macros are defined for the JPEG header of the phytium JPEG Engine */ +#define PHYTIUM_JPEG_HEADER_LEN (256 * 3) +#define PHYTIUM_JPEG_HEADER_SIZE (PHYTIUM_JPEG_HEADER_LEN / sizeof(u32)) +#define PHYTIUM_JPEG_HEADER_H_INDEX 40 +#define PHYTIUM_JPEG_HEADER_W_INDEX 41 + +/* There are two ocm buffers that are used for storaging the inputing video data */ +#define OCM_BUF_NUM 2 + +enum phytium_jpeg_status { + VIDEO_MODE_DETECT_DONE, + VIDEO_RES_CHANGE, + VIDEO_RES_DETECT, + VIDEO_STREAMING, + VIDEO_FRAME_INPRG, + VIDEO_STOPPED, + VIDEO_CLOCKS_ON, + VIDEO_POWEROFF, +}; + +struct phytium_jpeg_addr { + unsigned int size; + dma_addr_t dma_addr; + void *virt_addr; +}; + +struct phytium_jpeg_buffer { + struct vb2_v4l2_buffer vb; + struct list_head link; +}; + +/** + * struct phytium_jpeg - JPEG IP abstraction + * @lock: the mutex protecting this structure + * @hw_lock: spinlock protecting the hw device resource + * @workqueue: decode work queue + * @dev: JPEG device + * @v4l2_dev: v4l2 device for mem2mem mode + * @m2m_dev: v4l2 mem2mem device data + * @alloc_ctx: videobuf2 memory allocator's context + * @dec_vdev: video device node for decoder mem2mem mode + * @dec_reg_base: JPEG registers mapping + * @clk_jdec: JPEG hw working clock + * @clk_jdec_smi: JPEG SMI bus clock + * @larb: SMI device + */ +struct phytium_jpeg_dev { + void __iomem *base_addr; + struct device *dev; + struct v4l2_device v4l2_dev; + struct v4l2_pix_format pix_fmt; + struct v4l2_bt_timings active_timings; + struct v4l2_bt_timings detected_timings; + u32 v4l2_input_status; + struct vb2_queue queue; + struct video_device vdev; + /* v4l2 and videobuf2 lock, protect the structure*/ + struct mutex video_lock; + u32 jpeg_mode; + u32 comp_size_read; + wait_queue_head_t wait; + /* buffer list lock, protecting the hw device resource */ + spinlock_t hw_lock; + struct delayed_work res_work; + struct list_head buffers; + unsigned long status; + unsigned int sequence; + unsigned int max_compressed_size; + struct phytium_jpeg_addr src_addrs[OCM_BUF_NUM]; + struct phytium_jpeg_addr dst_addrs[16]; + + bool yuv420; + unsigned int frame_rate; + void __iomem *timer30_addr; + void __iomem *timer31_addr; +}; + +struct phytium_jpeg_config { + u32 jpeg_mode; + u32 comp_size_read; +}; + +#define YUV_MODE_STR_LEN 8 +#define YUVID 42 + +enum jpeg_yuv_mode { + YUV444 = 0x0, + YUV422 = 0x1, + YUV420 = 0x2 +}; + +#endif /* _PHYTIUM_JPEG_CORE_H */ diff --git a/drivers/media/platform/phytium-jpeg/phytium_jpeg_reg.h b/drivers/media/platform/phytium-jpeg/phytium_jpeg_reg.h new file mode 100644 index 0000000000000000000000000000000000000000..3cdd623022b3210830f1dd08565853283035a540 --- /dev/null +++ b/drivers/media/platform/phytium-jpeg/phytium_jpeg_reg.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef _PHYTIUM_JPEG_REG_H +#define _PHYTIUM_JPEG_REG_H + +#include +/* define the all kinds of control registers in a JPEG soc */ + +/* The register is used to set the information of the video comes from main memory */ +#define SRC_DDR_INFO_REG 0x00000800 + +/* The register is used to get the information of the video comes from external VGA */ +#define SRC_VGA_INFO_REG 0x00000894 + +#define SRC_FORMAT BIT(0) /* 0:RGB888, 1:RGB565 */ +#define SRC_DE_POLARITY BIT(1) /* 0:low level is effect, other */ +#define SRC_HS_POLARITY BIT(2) /* 0:low level is effect, other */ +#define SRC_VS_POLARITY BIT(3) /* 0:low level is effect, other */ +#define SRC_HOR_PIXELS GENMASK(15, 4) /* the number of the horizontal pixels */ +#define SRC_WIDTH_SHIFT 4 /* shift right to get width */ +#define SRC_VER_PIXELS GENMASK(26, 16) /* the number of the vertical pixels */ +#define SRC_HEIGHT_SHIFT 16 /* shift right to get height */ +/* The below bit fields is only used by image comes from main memory */ +#define SRC_COMP_DDR_IMG_EN BIT(27) /* 0: disable to JPEG compression, others */ + +/* marks which ocm buffer is occupied to store an image */ +#define SRC_DDR_IMG2OCM_VALID GENMASK(29, 28) + +/* The register controls starting work of the JPEG engine */ +#define TRANSFORM_INFO_REG 0x00000804 +#define TRANSINFO_ENABLE_ENGINE BIT(0) /* 1: enable the JPEG engine */ +/* 1: video comes from external VGA, 0: video comes from DDR */ +#define TRANSINFO_SRC_SELECT BIT(1) +/* 0: video comes from external VGA is cached to OCM, 1: DDR */ +#define TRANSINFO_IMAGE_STORE BIT(2) +#define TRANSINFO_FRAME_RATE GENMASK(9, 4) /* the value notes frame rate */ +#define TRANSINFO_BLOCK_SIZE BIT(12) /* 0: 8x8, 1: 16x16 */ +#define TRANSINFO_ENABLE_YUV422 BIT(13) /* 1: JPEG block is populated YUV422 */ +/* support burst with the values such as 1, 2, 4, 8, 16, 32, 64. using default value 0xf */ +#define TRANSINFO_AXI_LEN GENMASK(22, 16) +#define TRANS_AXI_LEN_SHIFT 16 + +/* The interrupt and status register */ +#define INT_STATUS_CTRL_REG 0x00000808 +#define INT_FIFO_OVERFLOW BIT(0) /* video fifo overflow, write 1 to clear */ +#define INT_OCM_BUF_OVERFLOW BIT(1) /* ocm buffer overflow, write 1 to clear */ +/* JPEG engine complete compression, write 1 to clear */ +#define INT_JPEG_ENCODE_COMPLETE BIT(2) +/* in VGA mode, video's format is changed */ +#define INT_VIDEO_FORMAT_CHANGE BIT(3) +/* enable the interrupt of th video fifo overflow and source resolution */ +#define DETECT_RESOLUTION_CHANGE_EN BIT(4) +/* enable the interrupt of the ocm buffer overflow */ +#define STS_VE_OCM_BUF_OVERFLOW_EN BIT(5) +/* enable the interrupt of the JPEG complete compression */ +#define STS_VE_JPEG_CODE_COMP_EN BIT(6) +/* in VGA mode, the bit notes ocm buff is busy */ +#define STS_VE_OCM_BUF_BUSY BIT(7) +/* in VGA mode, the bit notes sequence number of the frame */ +#define STS_VE_CUR_FRAME_NUMBER GENMASK(9, 8) +/* in VGA mode, the bit notes sequence number of the cached frame */ +#define STS_VE_BUF_CACHE_NUMBER GENMASK(11, 10) +/* in VGA mode, the buffer number in buffer list */ +#define STS_JPEG_COMP_BUF_NO GENMASK(15, 12) +#define INT_JPEG_COMP_BUF_LIST_NO GENMASK(31, 16) /* the interrupt number of the buffer */ + +#define OCM_BUF0_ADDR 0x0000080C +#define OCM_BUF1_ADDR 0x00000810 +#define OCM_BUF_SHIFT 8 + +#define BUF_LIST_BASE_ADDR 0x00000814 + +#define PHYTIUM_BUF_LIST_ACTRL_AND_STS_BASE_ADDR_REG 0x00000818 +#define STS_JPEG_BUF_HIGH_LEVEL_VALID BIT(0) /* Hight levle is validity */ +#define JPEG_BUF_CAPACITY_SIZE GENMASK(29, 8) /* the capacity of the buffer */ +#define JPEG_BUF_CAPACITY_SIZE_SHIFT 8 + +/* There are 16 buffers in the buffer list, the width between each other' address is 8 bytes */ +#define BUF_LIST_ADDR_OFFSET 0x8 +#define BUF_LIST_CTRL_AND_STS_OFFSET 0x8 + +/* Get the address of the specific index buffer */ +#define BUF_LIST_INDEX_ADDR(index) \ + (BUF_LIST_BASE_ADDR + (index) * BUF_LIST_ADDR_OFFSET) + +#define JPEG_DST_ADDR_SHIFT 8 + +#define BUF_LIST_INDEX_CTRL_STS_ADDR(index) \ + (PHYTIUM_BUF_LIST_ACTRL_AND_STS_BASE_ADDR_REG + (index) * BUF_LIST_CTRL_AND_STS_OFFSET) + +#define FRAME_SAMPLE_CTRL 0x00000898 +#define FRAME_SAMPLE_CTRL_EN BIT(31) +#define FRAME_SAMPLE_INTERVAL GENMASK(30, 0) + +/* The below registers are all related to quantilize */ +#define HUFF_MODE_REG 0x300 +#define SAMPLE_MODE_REG 0x304 + +#define Y_QUANT_BASE_ADDR_REG 0x400 +#define C_QUANT_BASE_ADDR_REG 0x500 + +#define QUANT_REG_NUM 64 + +#define Y_QUANT_INDEX_ADDR_REG(index) \ + (Y_QUANT_BASE_ADDR_REG + 4 * (index)) + +#define C_QUANT_INDEX_ADDR_REG(index) \ + (C_QUANT_BASE_ADDR_REG + 4 * (index)) + +#endif /* _PHYTIUM_JPEG_REG_H */ diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c index b6e9e93bde7a8ad635a32eab35fc4658610d571a..406ac673ad84cb9e46cd715065fb3f6a8d6c7625 100644 --- a/drivers/media/platform/pxa_camera.c +++ b/drivers/media/platform/pxa_camera.c @@ -2397,7 +2397,7 @@ static int pxa_camera_probe(struct platform_device *pdev) pcdev->res = res; pcdev->pdata = pdev->dev.platform_data; - if (&pdev->dev.of_node && !pcdev->pdata) { + if (pdev->dev.of_node && !pcdev->pdata) { err = pxa_camera_pdata_from_dt(&pdev->dev, pcdev, &pcdev->asd); } else { pcdev->platform_flags = pcdev->pdata->flags; diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c index bb6add9d340e29cc6218d3b2ff167a723bbebfff..46dc50efc80af23f659fccade8de2aa67777f605 100644 --- a/drivers/media/platform/qcom/venus/core.c +++ b/drivers/media/platform/qcom/venus/core.c @@ -264,6 +264,14 @@ static int venus_probe(struct platform_device *pdev) if (ret) return ret; + if (!dev->dma_parms) { + dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), + GFP_KERNEL); + if (!dev->dma_parms) + return -ENOMEM; + } + dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); + INIT_LIST_HEAD(&core->instances); mutex_init(&core->lock); INIT_DELAYED_WORK(&core->work, venus_sys_error_handler); @@ -337,6 +345,7 @@ static int venus_remove(struct platform_device *pdev) struct device *dev = core->dev; int ret; + cancel_delayed_work_sync(&core->work); ret = pm_runtime_get_sync(dev); WARN_ON(ret < 0); @@ -422,10 +431,11 @@ static const struct venus_resources msm8916_res = { }; static const struct freq_tbl msm8996_freq_table[] = { - { 1944000, 490000000 }, /* 4k UHD @ 60 */ - { 972000, 320000000 }, /* 4k UHD @ 30 */ - { 489600, 150000000 }, /* 1080p @ 60 */ - { 244800, 75000000 }, /* 1080p @ 30 */ + { 1944000, 520000000 }, /* 4k UHD @ 60 (decode only) */ + { 972000, 520000000 }, /* 4k UHD @ 30 */ + { 489600, 346666667 }, /* 1080p @ 60 */ + { 244800, 150000000 }, /* 1080p @ 30 */ + { 108000, 75000000 }, /* 720p @ 30 */ }; static const struct reg_val msm8996_reg_preset[] = { diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c index 124085556b94bb63a20901de301ea94550f25ab5..fbcc67c10993fc25b69847437e6b37684cf6e94f 100644 --- a/drivers/media/platform/qcom/venus/hfi_venus.c +++ b/drivers/media/platform/qcom/venus/hfi_venus.c @@ -1484,6 +1484,7 @@ static int venus_suspend_3xx(struct venus_core *core) { struct venus_hfi_device *hdev = to_hfi_priv(core); struct device *dev = core->dev; + u32 ctrl_status; bool val; int ret; @@ -1499,6 +1500,10 @@ static int venus_suspend_3xx(struct venus_core *core) return -EINVAL; } + ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0); + if (ctrl_status & CPU_CS_SCIACMDARG0_PC_READY) + goto power_off; + /* * Power collapse sequence for Venus 3xx and 4xx versions: * 1. Check for ARM9 and video core to be idle by checking WFI bit @@ -1523,6 +1528,7 @@ static int venus_suspend_3xx(struct venus_core *core) if (ret) return ret; +power_off: mutex_lock(&hdev->lock); ret = venus_power_off(hdev); diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c index dfbbbf0f746f93e41d9bf39c69ddc066b8f7a4a1..297a0e84b8dfd0d6a71e5b02c88cb121e4296291 100644 --- a/drivers/media/platform/qcom/venus/vdec.c +++ b/drivers/media/platform/qcom/venus/vdec.c @@ -888,8 +888,7 @@ static void vdec_buf_done(struct venus_inst *inst, unsigned int buf_type, unsigned int opb_sz = venus_helper_get_opb_size(inst); vb = &vbuf->vb2_buf; - vb->planes[0].bytesused = - max_t(unsigned int, opb_sz, bytesused); + vb2_set_plane_payload(vb, 0, bytesused ? : opb_sz); vb->planes[0].data_offset = data_offset; vb->timestamp = timestamp_us * NSEC_PER_USEC; vbuf->sequence = inst->sequence_cap++; @@ -1095,6 +1094,7 @@ static int vdec_close(struct file *file) { struct venus_inst *inst = to_inst(file); + cancel_work_sync(&inst->delayed_process_work); v4l2_m2m_ctx_release(inst->m2m_ctx); v4l2_m2m_release(inst->m2m_dev); vdec_ctrl_deinit(inst); @@ -1116,9 +1116,6 @@ static const struct v4l2_file_operations vdec_fops = { .unlocked_ioctl = video_ioctl2, .poll = v4l2_m2m_fop_poll, .mmap = v4l2_m2m_fop_mmap, -#ifdef CONFIG_COMPAT - .compat_ioctl32 = v4l2_compat_ioctl32, -#endif }; static int vdec_probe(struct platform_device *pdev) diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c index 41249d1443fa18dc1b1d4667ef81f36c3509fa86..420897241248c8455a2acf9e2c120a8e9bf8df22 100644 --- a/drivers/media/platform/qcom/venus/venc.c +++ b/drivers/media/platform/qcom/venus/venc.c @@ -1220,9 +1220,6 @@ static const struct v4l2_file_operations venc_fops = { .unlocked_ioctl = video_ioctl2, .poll = v4l2_m2m_fop_poll, .mmap = v4l2_m2m_fop_mmap, -#ifdef CONFIG_COMPAT - .compat_ioctl32 = v4l2_compat_ioctl32, -#endif }; static int venc_probe(struct platform_device *pdev) diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c index ce09799976efe1b506598123f9cf82997fb041ca..485fa3fa8b49a7852bb5fd81631137e846eba56f 100644 --- a/drivers/media/platform/rcar-vin/rcar-core.c +++ b/drivers/media/platform/rcar-vin/rcar-core.c @@ -131,9 +131,13 @@ static int rvin_group_link_notify(struct media_link *link, u32 flags, !is_media_entity_v4l2_video_device(link->sink->entity)) return 0; - /* If any entity is in use don't allow link changes. */ + /* + * Don't allow link changes if any entity in the graph is + * streaming, modifying the CHSEL register fields can disrupt + * running streams. + */ media_device_for_each_entity(entity, &group->mdev) - if (entity->use_count) + if (entity->stream_count) return -EBUSY; mutex_lock(&group->lock); @@ -170,7 +174,6 @@ static int rvin_group_link_notify(struct media_link *link, u32 flags, if (csi_id == -ENODEV) { struct v4l2_subdev *sd; - unsigned int i; /* * Make sure the source entity subdevice is registered as diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c index 2a15b7cca338fe6445ae11a8ec275c9e40ac1245..5a30f1d84fe177021c050149d953f07a4c3196e9 100644 --- a/drivers/media/platform/rcar_fdp1.c +++ b/drivers/media/platform/rcar_fdp1.c @@ -257,6 +257,8 @@ MODULE_PARM_DESC(debug, "activate debug info"); #define FD1_IP_H3_ES1 0x02010101 #define FD1_IP_M3W 0x02010202 #define FD1_IP_H3 0x02010203 +#define FD1_IP_M3N 0x02010204 +#define FD1_IP_E3 0x02010205 /* LUTs */ #define FD1_LUT_DIF_ADJ 0x1000 @@ -2304,7 +2306,7 @@ static int fdp1_probe(struct platform_device *pdev) fdp1->fcp = rcar_fcp_get(fcp_node); of_node_put(fcp_node); if (IS_ERR(fdp1->fcp)) { - dev_err(&pdev->dev, "FCP not found (%ld)\n", + dev_dbg(&pdev->dev, "FCP not found (%ld)\n", PTR_ERR(fdp1->fcp)); return PTR_ERR(fdp1->fcp); } @@ -2365,6 +2367,12 @@ static int fdp1_probe(struct platform_device *pdev) case FD1_IP_H3: dprintk(fdp1, "FDP1 Version R-Car H3\n"); break; + case FD1_IP_M3N: + dprintk(fdp1, "FDP1 Version R-Car M3N\n"); + break; + case FD1_IP_E3: + dprintk(fdp1, "FDP1 Version R-Car E3\n"); + break; default: dev_err(fdp1->dev, "FDP1 Unidentifiable (0x%08x)\n", hw_version); diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c index ab5a6f95044a2d0ab5a1b77a7a4e69708945ba73..86a76f35a9a1a4ab2f8b2102ed9b40cf115d311d 100644 --- a/drivers/media/platform/rockchip/rga/rga.c +++ b/drivers/media/platform/rockchip/rga/rga.c @@ -43,7 +43,7 @@ static void device_run(void *prv) { struct rga_ctx *ctx = prv; struct rockchip_rga *rga = ctx->rga; - struct vb2_buffer *src, *dst; + struct vb2_v4l2_buffer *src, *dst; unsigned long flags; spin_lock_irqsave(&rga->ctrl_lock, flags); @@ -53,8 +53,8 @@ static void device_run(void *prv) src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); - rga_buf_map(src); - rga_buf_map(dst); + rga_buf_map(&src->vb2_buf); + rga_buf_map(&dst->vb2_buf); rga_hw_start(rga); diff --git a/drivers/media/platform/s5p-cec/s5p_cec.c b/drivers/media/platform/s5p-cec/s5p_cec.c index 8837e2678bdeb3507e4ebce501e5ee58216334ad..b8f125e45e384366793708bcfe1e21e410df9a21 100644 --- a/drivers/media/platform/s5p-cec/s5p_cec.c +++ b/drivers/media/platform/s5p-cec/s5p_cec.c @@ -116,6 +116,8 @@ static irqreturn_t s5p_cec_irq_handler(int irq, void *priv) dev_dbg(cec->dev, "Buffer overrun (worker did not process previous message)\n"); cec->rx = STATE_BUSY; cec->msg.len = status >> 24; + if (cec->msg.len > CEC_MAX_MSG_SIZE) + cec->msg.len = CEC_MAX_MSG_SIZE; cec->msg.rx_status = CEC_RX_STATUS_OK; s5p_cec_get_rx_buf(cec, cec->msg.len, cec->msg.msg); diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c index e901201b6fcc57849f544b1e1f89069935b5f918..1f58574d0b965bc0d7c62cf5655a48a4d24c42aa 100644 --- a/drivers/media/platform/s5p-g2d/g2d.c +++ b/drivers/media/platform/s5p-g2d/g2d.c @@ -487,7 +487,7 @@ static void device_run(void *prv) { struct g2d_ctx *ctx = prv; struct g2d_dev *dev = ctx->dev; - struct vb2_buffer *src, *dst; + struct vb2_v4l2_buffer *src, *dst; unsigned long flags; u32 cmd = 0; @@ -502,10 +502,10 @@ static void device_run(void *prv) spin_lock_irqsave(&dev->ctrl_lock, flags); g2d_set_src_size(dev, &ctx->in); - g2d_set_src_addr(dev, vb2_dma_contig_plane_dma_addr(src, 0)); + g2d_set_src_addr(dev, vb2_dma_contig_plane_dma_addr(&src->vb2_buf, 0)); g2d_set_dst_size(dev, &ctx->out); - g2d_set_dst_addr(dev, vb2_dma_contig_plane_dma_addr(dst, 0)); + g2d_set_dst_addr(dev, vb2_dma_contig_plane_dma_addr(&dst->vb2_buf, 0)); g2d_set_rop4(dev, ctx->rop); g2d_set_flip(dev, ctx->flip); diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c index 04fd2e0493c0f9bc1da65603629d3f4645480e0f..1cb5b622821491745e2688eec0b0849881c27336 100644 --- a/drivers/media/platform/s5p-jpeg/jpeg-core.c +++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c @@ -793,21 +793,24 @@ static void skip(struct s5p_jpeg_buffer *buf, long len); static void exynos4_jpeg_parse_decode_h_tbl(struct s5p_jpeg_ctx *ctx) { struct s5p_jpeg *jpeg = ctx->jpeg; - struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); + struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); struct s5p_jpeg_buffer jpeg_buffer; unsigned int word; int c, x, components; jpeg_buffer.size = 2; /* Ls */ jpeg_buffer.data = - (unsigned long)vb2_plane_vaddr(vb, 0) + ctx->out_q.sos + 2; + (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) + ctx->out_q.sos + 2; jpeg_buffer.curr = 0; - word = 0; - if (get_word_be(&jpeg_buffer, &word)) return; - jpeg_buffer.size = (long)word - 2; + + if (word < 2) + jpeg_buffer.size = 0; + else + jpeg_buffer.size = (long)word - 2; + jpeg_buffer.data += 2; jpeg_buffer.curr = 0; @@ -830,14 +833,14 @@ static void exynos4_jpeg_parse_decode_h_tbl(struct s5p_jpeg_ctx *ctx) static void exynos4_jpeg_parse_huff_tbl(struct s5p_jpeg_ctx *ctx) { struct s5p_jpeg *jpeg = ctx->jpeg; - struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); + struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); struct s5p_jpeg_buffer jpeg_buffer; unsigned int word; int c, i, n, j; for (j = 0; j < ctx->out_q.dht.n; ++j) { jpeg_buffer.size = ctx->out_q.dht.len[j]; - jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(vb, 0) + + jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) + ctx->out_q.dht.marker[j]; jpeg_buffer.curr = 0; @@ -889,13 +892,13 @@ static void exynos4_jpeg_parse_huff_tbl(struct s5p_jpeg_ctx *ctx) static void exynos4_jpeg_parse_decode_q_tbl(struct s5p_jpeg_ctx *ctx) { struct s5p_jpeg *jpeg = ctx->jpeg; - struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); + struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); struct s5p_jpeg_buffer jpeg_buffer; int c, x, components; jpeg_buffer.size = ctx->out_q.sof_len; jpeg_buffer.data = - (unsigned long)vb2_plane_vaddr(vb, 0) + ctx->out_q.sof; + (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) + ctx->out_q.sof; jpeg_buffer.curr = 0; skip(&jpeg_buffer, 5); /* P, Y, X */ @@ -920,14 +923,14 @@ static void exynos4_jpeg_parse_decode_q_tbl(struct s5p_jpeg_ctx *ctx) static void exynos4_jpeg_parse_q_tbl(struct s5p_jpeg_ctx *ctx) { struct s5p_jpeg *jpeg = ctx->jpeg; - struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); + struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); struct s5p_jpeg_buffer jpeg_buffer; unsigned int word; int c, i, j; for (j = 0; j < ctx->out_q.dqt.n; ++j) { jpeg_buffer.size = ctx->out_q.dqt.len[j]; - jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(vb, 0) + + jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) + ctx->out_q.dqt.marker[j]; jpeg_buffer.curr = 0; @@ -1086,6 +1089,7 @@ static int get_word_be(struct s5p_jpeg_buffer *buf, unsigned int *word) if (byte == -1) return -1; *word = (unsigned int)byte | temp; + return 0; } @@ -1173,7 +1177,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result, if (get_word_be(&jpeg_buffer, &word)) break; length = (long)word - 2; - if (!length) + if (length <= 0) return false; sof = jpeg_buffer.curr; /* after 0xffc0 */ sof_len = length; @@ -1204,7 +1208,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result, if (get_word_be(&jpeg_buffer, &word)) break; length = (long)word - 2; - if (!length) + if (length <= 0) return false; if (n_dqt >= S5P_JPEG_MAX_MARKER) return false; @@ -1217,7 +1221,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result, if (get_word_be(&jpeg_buffer, &word)) break; length = (long)word - 2; - if (!length) + if (length <= 0) return false; if (n_dht >= S5P_JPEG_MAX_MARKER) return false; @@ -1242,6 +1246,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result, if (get_word_be(&jpeg_buffer, &word)) break; length = (long)word - 2; + /* No need to check underflows as skip() does it */ skip(&jpeg_buffer, length); break; } @@ -1293,13 +1298,16 @@ static int s5p_jpeg_querycap(struct file *file, void *priv, return 0; } -static int enum_fmt(struct s5p_jpeg_fmt *sjpeg_formats, int n, +static int enum_fmt(struct s5p_jpeg_ctx *ctx, + struct s5p_jpeg_fmt *sjpeg_formats, int n, struct v4l2_fmtdesc *f, u32 type) { int i, num = 0; + unsigned int fmt_ver_flag = ctx->jpeg->variant->fmt_ver_flag; for (i = 0; i < n; ++i) { - if (sjpeg_formats[i].flags & type) { + if (sjpeg_formats[i].flags & type && + sjpeg_formats[i].flags & fmt_ver_flag) { /* index-th format of type type found ? */ if (num == f->index) break; @@ -1326,11 +1334,11 @@ static int s5p_jpeg_enum_fmt_vid_cap(struct file *file, void *priv, struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv); if (ctx->mode == S5P_JPEG_ENCODE) - return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f, + return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f, SJPEG_FMT_FLAG_ENC_CAPTURE); - return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f, - SJPEG_FMT_FLAG_DEC_CAPTURE); + return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f, + SJPEG_FMT_FLAG_DEC_CAPTURE); } static int s5p_jpeg_enum_fmt_vid_out(struct file *file, void *priv, @@ -1339,11 +1347,11 @@ static int s5p_jpeg_enum_fmt_vid_out(struct file *file, void *priv, struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv); if (ctx->mode == S5P_JPEG_ENCODE) - return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f, + return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f, SJPEG_FMT_FLAG_ENC_OUTPUT); - return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f, - SJPEG_FMT_FLAG_DEC_OUTPUT); + return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f, + SJPEG_FMT_FLAG_DEC_OUTPUT); } static struct s5p_jpeg_q_data *get_q_data(struct s5p_jpeg_ctx *ctx, @@ -2072,15 +2080,15 @@ static void s5p_jpeg_device_run(void *priv) { struct s5p_jpeg_ctx *ctx = priv; struct s5p_jpeg *jpeg = ctx->jpeg; - struct vb2_buffer *src_buf, *dst_buf; + struct vb2_v4l2_buffer *src_buf, *dst_buf; unsigned long src_addr, dst_addr, flags; spin_lock_irqsave(&ctx->jpeg->slock, flags); src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); - src_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0); - dst_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0); + src_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0); + dst_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0); s5p_jpeg_reset(jpeg->regs); s5p_jpeg_poweron(jpeg->regs); @@ -2153,7 +2161,7 @@ static void exynos4_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx) { struct s5p_jpeg *jpeg = ctx->jpeg; struct s5p_jpeg_fmt *fmt; - struct vb2_buffer *vb; + struct vb2_v4l2_buffer *vb; struct s5p_jpeg_addr jpeg_addr = {}; u32 pix_size, padding_bytes = 0; @@ -2172,7 +2180,7 @@ static void exynos4_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx) vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); } - jpeg_addr.y = vb2_dma_contig_plane_dma_addr(vb, 0); + jpeg_addr.y = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0); if (fmt->colplanes == 2) { jpeg_addr.cb = jpeg_addr.y + pix_size - padding_bytes; @@ -2190,7 +2198,7 @@ static void exynos4_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx) static void exynos4_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx) { struct s5p_jpeg *jpeg = ctx->jpeg; - struct vb2_buffer *vb; + struct vb2_v4l2_buffer *vb; unsigned int jpeg_addr = 0; if (ctx->mode == S5P_JPEG_ENCODE) @@ -2198,7 +2206,7 @@ static void exynos4_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx) else vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); - jpeg_addr = vb2_dma_contig_plane_dma_addr(vb, 0); + jpeg_addr = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0); if (jpeg->variant->version == SJPEG_EXYNOS5433 && ctx->mode == S5P_JPEG_DECODE) jpeg_addr += ctx->out_q.sos; @@ -2314,7 +2322,7 @@ static void exynos3250_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx) { struct s5p_jpeg *jpeg = ctx->jpeg; struct s5p_jpeg_fmt *fmt; - struct vb2_buffer *vb; + struct vb2_v4l2_buffer *vb; struct s5p_jpeg_addr jpeg_addr = {}; u32 pix_size; @@ -2328,7 +2336,7 @@ static void exynos3250_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx) fmt = ctx->cap_q.fmt; } - jpeg_addr.y = vb2_dma_contig_plane_dma_addr(vb, 0); + jpeg_addr.y = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0); if (fmt->colplanes == 2) { jpeg_addr.cb = jpeg_addr.y + pix_size; @@ -2346,7 +2354,7 @@ static void exynos3250_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx) static void exynos3250_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx) { struct s5p_jpeg *jpeg = ctx->jpeg; - struct vb2_buffer *vb; + struct vb2_v4l2_buffer *vb; unsigned int jpeg_addr = 0; if (ctx->mode == S5P_JPEG_ENCODE) @@ -2354,7 +2362,7 @@ static void exynos3250_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx) else vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); - jpeg_addr = vb2_dma_contig_plane_dma_addr(vb, 0); + jpeg_addr = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0); exynos3250_jpeg_jpgadr(jpeg->regs, jpeg_addr); } diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c index 927a1235408de47ca854ce895b7f64211414ae28..4b8516c35bc204700bdc1de88d6faf7503b71249 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c @@ -527,7 +527,8 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx, dev); ctx->mv_count = s5p_mfc_hw_call(dev->mfc_ops, get_mv_count, dev); - ctx->scratch_buf_size = s5p_mfc_hw_call(dev->mfc_ops, + if (FW_HAS_E_MIN_SCRATCH_BUF(dev)) + ctx->scratch_buf_size = s5p_mfc_hw_call(dev->mfc_ops, get_min_scratch_buf_size, dev); if (ctx->img_width == 0 || ctx->img_height == 0) ctx->state = MFCINST_ERROR; @@ -1089,7 +1090,6 @@ static struct device *s5p_mfc_alloc_memdev(struct device *dev, device_initialize(child); dev_set_name(child, "%s:%s", dev_name(dev), name); child->parent = dev; - child->bus = dev->bus; child->coherent_dma_mask = dev->coherent_dma_mask; child->dma_mask = dev->dma_mask; child->release = s5p_mfc_memdev_release; diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c index eb85cedc5ef34a66ba1889c416ef7e401921c804..5e080f32b0e8247324ea513151cb97c362edd44a 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c @@ -38,6 +38,11 @@ int s5p_mfc_init_pm(struct s5p_mfc_dev *dev) for (i = 0; i < pm->num_clocks; i++) { pm->clocks[i] = devm_clk_get(pm->device, pm->clk_names[i]); if (IS_ERR(pm->clocks[i])) { + /* additional clocks are optional */ + if (i && PTR_ERR(pm->clocks[i]) == -ENOENT) { + pm->clocks[i] = NULL; + continue; + } mfc_err("Failed to get clock: %s\n", pm->clk_names[i]); return PTR_ERR(pm->clocks[i]); diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c index 1d274c64de09e54ff01b0201b87bc3c791269f04..03ee9839a03e0c72bca9e91feff78ea792ef9da2 100644 --- a/drivers/media/platform/sh_veu.c +++ b/drivers/media/platform/sh_veu.c @@ -273,13 +273,13 @@ static void sh_veu_process(struct sh_veu_dev *veu, static void sh_veu_device_run(void *priv) { struct sh_veu_dev *veu = priv; - struct vb2_buffer *src_buf, *dst_buf; + struct vb2_v4l2_buffer *src_buf, *dst_buf; src_buf = v4l2_m2m_next_src_buf(veu->m2m_ctx); dst_buf = v4l2_m2m_next_dst_buf(veu->m2m_ctx); if (src_buf && dst_buf) - sh_veu_process(veu, src_buf, dst_buf); + sh_veu_process(veu, &src_buf->vb2_buf, &dst_buf->vb2_buf); } /* ========== video ioctls ========== */ diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c index 66b64096f5de0e8cc33306af98692d9b4e7b2691..40c4eef71c34c554d0f7dfac4cb601515dc22395 100644 --- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c +++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c @@ -651,8 +651,7 @@ static int bdisp_release(struct file *file) dev_dbg(bdisp->dev, "%s\n", __func__); - if (mutex_lock_interruptible(&bdisp->lock)) - return -ERESTARTSYS; + mutex_lock(&bdisp->lock); v4l2_m2m_ctx_release(ctx->fh.m2m_ctx); diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c index 721564176d8c07585d1803b35435f7ef8c52afcd..18d0b5641789436536b07ce838890bf968201a94 100644 --- a/drivers/media/platform/stm32/stm32-dcmi.c +++ b/drivers/media/platform/stm32/stm32-dcmi.c @@ -164,6 +164,9 @@ struct stm32_dcmi { int errors_count; int overrun_count; int buffers_count; + + /* Ensure DMA operations atomicity */ + struct mutex dma_lock; }; static inline struct stm32_dcmi *notifier_to_dcmi(struct v4l2_async_notifier *n) @@ -314,6 +317,13 @@ static int dcmi_start_dma(struct stm32_dcmi *dcmi, return ret; } + /* + * Avoid call of dmaengine_terminate_all() between + * dmaengine_prep_slave_single() and dmaengine_submit() + * by locking the whole DMA submission sequence + */ + mutex_lock(&dcmi->dma_lock); + /* Prepare a DMA transaction */ desc = dmaengine_prep_slave_single(dcmi->dma_chan, buf->paddr, buf->size, @@ -322,6 +332,7 @@ static int dcmi_start_dma(struct stm32_dcmi *dcmi, if (!desc) { dev_err(dcmi->dev, "%s: DMA dmaengine_prep_slave_single failed for buffer phy=%pad size=%zu\n", __func__, &buf->paddr, buf->size); + mutex_unlock(&dcmi->dma_lock); return -EINVAL; } @@ -333,9 +344,12 @@ static int dcmi_start_dma(struct stm32_dcmi *dcmi, dcmi->dma_cookie = dmaengine_submit(desc); if (dma_submit_error(dcmi->dma_cookie)) { dev_err(dcmi->dev, "%s: DMA submission failed\n", __func__); + mutex_unlock(&dcmi->dma_lock); return -ENXIO; } + mutex_unlock(&dcmi->dma_lock); + dma_async_issue_pending(dcmi->dma_chan); return 0; @@ -570,9 +584,9 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count) int ret; ret = pm_runtime_get_sync(dcmi->dev); - if (ret) { - dev_err(dcmi->dev, "%s: Failed to start streaming, cannot get sync\n", - __func__); + if (ret < 0) { + dev_err(dcmi->dev, "%s: Failed to start streaming, cannot get sync (%d)\n", + __func__, ret); goto err_release_buffers; } @@ -717,7 +731,9 @@ static void dcmi_stop_streaming(struct vb2_queue *vq) spin_unlock_irq(&dcmi->irqlock); /* Stop all pending DMA operations */ + mutex_lock(&dcmi->dma_lock); dmaengine_terminate_all(dcmi->dma_chan); + mutex_unlock(&dcmi->dma_lock); pm_runtime_put(dcmi->dev); @@ -808,6 +824,9 @@ static int dcmi_try_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f, sd_fmt = find_format_by_fourcc(dcmi, pix->pixelformat); if (!sd_fmt) { + if (!dcmi->num_of_sd_formats) + return -ENODATA; + sd_fmt = dcmi->sd_formats[dcmi->num_of_sd_formats - 1]; pix->pixelformat = sd_fmt->fourcc; } @@ -986,6 +1005,9 @@ static int dcmi_set_sensor_format(struct stm32_dcmi *dcmi, sd_fmt = find_format_by_fourcc(dcmi, pix->pixelformat); if (!sd_fmt) { + if (!dcmi->num_of_sd_formats) + return -ENODATA; + sd_fmt = dcmi->sd_formats[dcmi->num_of_sd_formats - 1]; pix->pixelformat = sd_fmt->fourcc; } @@ -1645,7 +1667,7 @@ static int dcmi_probe(struct platform_device *pdev) dcmi->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); if (IS_ERR(dcmi->rstc)) { dev_err(&pdev->dev, "Could not get reset control\n"); - return -ENODEV; + return PTR_ERR(dcmi->rstc); } /* Get bus characteristics from devicetree */ @@ -1660,7 +1682,7 @@ static int dcmi_probe(struct platform_device *pdev) of_node_put(np); if (ret) { dev_err(&pdev->dev, "Could not parse the endpoint\n"); - return -ENODEV; + return ret; } if (ep.bus_type == V4L2_MBUS_CSI2) { @@ -1673,8 +1695,9 @@ static int dcmi_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq <= 0) { - dev_err(&pdev->dev, "Could not get irq\n"); - return -ENODEV; + if (irq != -EPROBE_DEFER) + dev_err(&pdev->dev, "Could not get irq\n"); + return irq ? irq : -ENXIO; } dcmi->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -1694,12 +1717,13 @@ static int dcmi_probe(struct platform_device *pdev) dev_name(&pdev->dev), dcmi); if (ret) { dev_err(&pdev->dev, "Unable to request irq %d\n", irq); - return -ENODEV; + return ret; } mclk = devm_clk_get(&pdev->dev, "mclk"); if (IS_ERR(mclk)) { - dev_err(&pdev->dev, "Unable to get mclk\n"); + if (PTR_ERR(mclk) != -EPROBE_DEFER) + dev_err(&pdev->dev, "Unable to get mclk\n"); return PTR_ERR(mclk); } @@ -1711,6 +1735,7 @@ static int dcmi_probe(struct platform_device *pdev) spin_lock_init(&dcmi->irqlock); mutex_init(&dcmi->lock); + mutex_init(&dcmi->dma_lock); init_completion(&dcmi->complete); INIT_LIST_HEAD(&dcmi->buffers); diff --git a/drivers/media/platform/ti-vpe/vpdma.h b/drivers/media/platform/ti-vpe/vpdma.h index 7e611501c291696a45b6283d064d2ac8fccd0405..f29074c849155144fd1e4acdf40440f963996f0c 100644 --- a/drivers/media/platform/ti-vpe/vpdma.h +++ b/drivers/media/platform/ti-vpe/vpdma.h @@ -60,6 +60,7 @@ struct vpdma_data_format { * line stride of source and dest * buffers should be 16 byte aligned */ +#define VPDMA_MAX_STRIDE 65520 /* Max line stride 16 byte aligned */ #define VPDMA_DTD_DESC_SIZE 32 /* 8 words */ #define VPDMA_CFD_CTD_DESC_SIZE 16 /* 4 words */ diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c index d70871d0ad2d9654fbf02f7b1977aa2a4e8a7aa3..a285b9db7ee864ee7bfb5f4912bccfc05fc42d5c 100644 --- a/drivers/media/platform/ti-vpe/vpe.c +++ b/drivers/media/platform/ti-vpe/vpe.c @@ -352,20 +352,25 @@ enum { }; /* find our format description corresponding to the passed v4l2_format */ -static struct vpe_fmt *find_format(struct v4l2_format *f) +static struct vpe_fmt *__find_format(u32 fourcc) { struct vpe_fmt *fmt; unsigned int k; for (k = 0; k < ARRAY_SIZE(vpe_formats); k++) { fmt = &vpe_formats[k]; - if (fmt->fourcc == f->fmt.pix.pixelformat) + if (fmt->fourcc == fourcc) return fmt; } return NULL; } +static struct vpe_fmt *find_format(struct v4l2_format *f) +{ + return __find_format(f->fmt.pix.pixelformat); +} + /* * there is one vpe_dev structure in the driver, it is shared by * all instances. @@ -1027,11 +1032,14 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port) dma_addr_t dma_addr; u32 flags = 0; u32 offset = 0; + u32 stride; if (port == VPE_PORT_MV_OUT) { vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV]; dma_addr = ctx->mv_buf_dma[mv_buf_selector]; q_data = &ctx->q_data[Q_DATA_SRC]; + stride = ALIGN((q_data->width * vpdma_fmt->depth) >> 3, + VPDMA_STRIDE_ALIGN); } else { /* to incorporate interleaved formats */ int plane = fmt->coplanar ? p_data->vb_part : 0; @@ -1058,6 +1066,7 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port) } /* Apply the offset */ dma_addr += offset; + stride = q_data->bytesperline[VPE_LUMA]; } if (q_data->flags & Q_DATA_FRAME_1D) @@ -1069,7 +1078,7 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port) MAX_W, MAX_H); vpdma_add_out_dtd(&ctx->desc_list, q_data->width, - q_data->bytesperline[VPE_LUMA], &q_data->c_rect, + stride, &q_data->c_rect, vpdma_fmt, dma_addr, MAX_OUT_WIDTH_REG1, MAX_OUT_HEIGHT_REG1, p_data->channel, flags); } @@ -1088,10 +1097,13 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port) dma_addr_t dma_addr; u32 flags = 0; u32 offset = 0; + u32 stride; if (port == VPE_PORT_MV_IN) { vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV]; dma_addr = ctx->mv_buf_dma[mv_buf_selector]; + stride = ALIGN((q_data->width * vpdma_fmt->depth) >> 3, + VPDMA_STRIDE_ALIGN); } else { /* to incorporate interleaved formats */ int plane = fmt->coplanar ? p_data->vb_part : 0; @@ -1118,6 +1130,7 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port) } /* Apply the offset */ dma_addr += offset; + stride = q_data->bytesperline[VPE_LUMA]; if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB) { /* @@ -1153,10 +1166,10 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port) if (p_data->vb_part && fmt->fourcc == V4L2_PIX_FMT_NV12) frame_height /= 2; - vpdma_add_in_dtd(&ctx->desc_list, q_data->width, - q_data->bytesperline[VPE_LUMA], &q_data->c_rect, - vpdma_fmt, dma_addr, p_data->channel, field, flags, frame_width, - frame_height, 0, 0); + vpdma_add_in_dtd(&ctx->desc_list, q_data->width, stride, + &q_data->c_rect, vpdma_fmt, dma_addr, + p_data->channel, field, flags, frame_width, + frame_height, 0, 0); } /* @@ -1405,9 +1418,6 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data) /* the previous dst mv buffer becomes the next src mv buffer */ ctx->src_mv_buf_selector = !ctx->src_mv_buf_selector; - if (ctx->aborting) - goto finished; - s_vb = ctx->src_vbs[0]; d_vb = ctx->dst_vb; @@ -1418,6 +1428,7 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data) d_vb->timecode = s_vb->timecode; d_vb->sequence = ctx->sequence; + s_vb->sequence = ctx->sequence; d_q_data = &ctx->q_data[Q_DATA_DST]; if (d_q_data->flags & Q_IS_INTERLACED) { @@ -1471,6 +1482,9 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data) ctx->src_vbs[0] = NULL; ctx->dst_vb = NULL; + if (ctx->aborting) + goto finished; + ctx->bufs_completed++; if (ctx->bufs_completed < ctx->bufs_per_job && job_ready(ctx)) { device_run(ctx); @@ -1583,9 +1597,9 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f, unsigned int stride = 0; if (!fmt || !(fmt->types & type)) { - vpe_err(ctx->dev, "Fourcc format (0x%08x) invalid.\n", + vpe_dbg(ctx->dev, "Fourcc format (0x%08x) invalid.\n", pix->pixelformat); - return -EINVAL; + fmt = __find_format(V4L2_PIX_FMT_YUYV); } if (pix->field != V4L2_FIELD_NONE && pix->field != V4L2_FIELD_ALTERNATE @@ -1632,7 +1646,7 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f, &pix->height, MIN_H, MAX_H, H_ALIGN, S_ALIGN); - if (!pix->num_planes) + if (!pix->num_planes || pix->num_planes > 2) pix->num_planes = fmt->coplanar ? 2 : 1; else if (pix->num_planes > 1 && !fmt->coplanar) pix->num_planes = 1; @@ -1671,6 +1685,10 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f, if (stride > plane_fmt->bytesperline) plane_fmt->bytesperline = stride; + plane_fmt->bytesperline = clamp_t(u32, plane_fmt->bytesperline, + stride, + VPDMA_MAX_STRIDE); + plane_fmt->bytesperline = ALIGN(plane_fmt->bytesperline, VPDMA_STRIDE_ALIGN); @@ -2291,7 +2309,7 @@ static int vpe_open(struct file *file) v4l2_ctrl_handler_setup(hdl); s_q_data = &ctx->q_data[Q_DATA_SRC]; - s_q_data->fmt = &vpe_formats[2]; + s_q_data->fmt = __find_format(V4L2_PIX_FMT_YUYV); s_q_data->width = 1920; s_q_data->height = 1080; s_q_data->nplanes = 1; @@ -2369,6 +2387,12 @@ static int vpe_release(struct file *file) mutex_lock(&dev->dev_mutex); free_mv_buffers(ctx); + + vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf); + vpdma_unmap_desc_buf(dev->vpdma, &ctx->mmr_adb); + vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_h); + vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_v); + vpdma_free_desc_list(&ctx->desc_list); vpdma_free_desc_buf(&ctx->mmr_adb); diff --git a/drivers/media/platform/vicodec/vicodec-codec.c b/drivers/media/platform/vicodec/vicodec-codec.c index 2d047646f6147e74f04f9cf6ebcd877ac40e554a..d854b2344f12b1c68f299c25df095a6c95d067ac 100644 --- a/drivers/media/platform/vicodec/vicodec-codec.c +++ b/drivers/media/platform/vicodec/vicodec-codec.c @@ -588,8 +588,14 @@ static void fill_decoder_block(u8 *dst, const s16 *input, int stride) int i, j; for (i = 0; i < 8; i++) { - for (j = 0; j < 8; j++) - *dst++ = *input++; + for (j = 0; j < 8; j++, input++, dst++) { + if (*input < 0) + *dst = 0; + else if (*input > 255) + *dst = 255; + else + *dst = *input; + } dst += stride - 8; } } diff --git a/drivers/media/platform/vicodec/vicodec-core.c b/drivers/media/platform/vicodec/vicodec-core.c index 408cd55d35801676265754c501ae4a2bd4b14dd5..7a33a52eaccaa99c159f23058f7c1636f01d5b9d 100644 --- a/drivers/media/platform/vicodec/vicodec-core.c +++ b/drivers/media/platform/vicodec/vicodec-core.c @@ -42,7 +42,7 @@ MODULE_PARM_DESC(debug, " activates debug info"); #define MAX_WIDTH 4096U #define MIN_WIDTH 640U #define MAX_HEIGHT 2160U -#define MIN_HEIGHT 480U +#define MIN_HEIGHT 360U #define dprintk(dev, fmt, arg...) \ v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg) @@ -438,7 +438,8 @@ static int job_ready(void *priv) for (; p < p_out + sz; p++) { u32 copy; - p = memchr(p, magic[ctx->comp_magic_cnt], sz); + p = memchr(p, magic[ctx->comp_magic_cnt], + p_out + sz - p); if (!p) { ctx->comp_magic_cnt = 0; break; diff --git a/drivers/media/platform/video-mux.c b/drivers/media/platform/video-mux.c index c01e1592ad0a8f7d52614bb81d4bc768b5459f2e..c8ffe7bff77f16b6aea80efaedf4b14bf133b058 100644 --- a/drivers/media/platform/video-mux.c +++ b/drivers/media/platform/video-mux.c @@ -365,9 +365,14 @@ static int video_mux_probe(struct platform_device *pdev) vmux->active = -1; vmux->pads = devm_kcalloc(dev, num_pads, sizeof(*vmux->pads), GFP_KERNEL); + if (!vmux->pads) + return -ENOMEM; + vmux->format_mbus = devm_kcalloc(dev, num_pads, sizeof(*vmux->format_mbus), GFP_KERNEL); + if (!vmux->format_mbus) + return -ENOMEM; for (i = 0; i < num_pads; i++) { vmux->pads[i].flags = (i < num_pads - 1) ? MEDIA_PAD_FL_SINK diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c index 462099a141e4aaae2da52bedad20d94b5197d554..7b8cf661f2386c22c3b95e92794dfadd8975820f 100644 --- a/drivers/media/platform/vim2m.c +++ b/drivers/media/platform/vim2m.c @@ -3,7 +3,8 @@ * * This is a virtual device driver for testing mem-to-mem videobuf framework. * It simulates a device that uses memory buffers for both source and - * destination, processes the data and issues an "irq" (simulated by a timer). + * destination, processes the data and issues an "irq" (simulated by a delayed + * workqueue). * The device is capable of multi-instance, multi-buffer-per-transaction * operation (via the mem2mem framework). * @@ -19,7 +20,6 @@ #include #include #include -#include #include #include @@ -148,7 +148,7 @@ struct vim2m_dev { struct mutex dev_mutex; spinlock_t irqlock; - struct timer_list timer; + struct delayed_work work_run; struct v4l2_m2m_dev *m2m_dev; }; @@ -336,12 +336,6 @@ static int device_process(struct vim2m_ctx *ctx, return 0; } -static void schedule_irq(struct vim2m_dev *dev, int msec_timeout) -{ - dprintk(dev, "Scheduling a simulated irq\n"); - mod_timer(&dev->timer, jiffies + msecs_to_jiffies(msec_timeout)); -} - /* * mem2mem callbacks */ @@ -387,13 +381,14 @@ static void device_run(void *priv) device_process(ctx, src_buf, dst_buf); - /* Run a timer, which simulates a hardware irq */ - schedule_irq(dev, ctx->transtime); + /* Run delayed work, which simulates a hardware irq */ + schedule_delayed_work(&dev->work_run, msecs_to_jiffies(ctx->transtime)); } -static void device_isr(struct timer_list *t) +static void device_work(struct work_struct *w) { - struct vim2m_dev *vim2m_dev = from_timer(vim2m_dev, t, timer); + struct vim2m_dev *vim2m_dev = + container_of(w, struct vim2m_dev, work_run.work); struct vim2m_ctx *curr_ctx; struct vb2_v4l2_buffer *src_vb, *dst_vb; unsigned long flags; @@ -802,9 +797,13 @@ static int vim2m_start_streaming(struct vb2_queue *q, unsigned count) static void vim2m_stop_streaming(struct vb2_queue *q) { struct vim2m_ctx *ctx = vb2_get_drv_priv(q); + struct vim2m_dev *dev = ctx->dev; struct vb2_v4l2_buffer *vbuf; unsigned long flags; + if (v4l2_m2m_get_curr_priv(dev->m2m_dev) == ctx) + cancel_delayed_work_sync(&dev->work_run); + for (;;) { if (V4L2_TYPE_IS_OUTPUT(q->type)) vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); @@ -1015,6 +1014,7 @@ static int vim2m_probe(struct platform_device *pdev) vfd = &dev->vfd; vfd->lock = &dev->dev_mutex; vfd->v4l2_dev = &dev->v4l2_dev; + INIT_DELAYED_WORK(&dev->work_run, device_work); ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); if (ret) { @@ -1026,7 +1026,6 @@ static int vim2m_probe(struct platform_device *pdev) v4l2_info(&dev->v4l2_dev, "Device registered as /dev/video%d\n", vfd->num); - timer_setup(&dev->timer, device_isr, 0); platform_set_drvdata(pdev, dev); dev->m2m_dev = v4l2_m2m_init(&m2m_ops); @@ -1083,7 +1082,6 @@ static int vim2m_remove(struct platform_device *pdev) media_device_cleanup(&dev->mdev); #endif v4l2_m2m_release(dev->m2m_dev); - del_timer_sync(&dev->timer); video_unregister_device(&dev->vfd); v4l2_device_unregister(&dev->v4l2_dev); diff --git a/drivers/media/platform/vimc/Makefile b/drivers/media/platform/vimc/Makefile index 4b2e3de7856e3261a6fee61418f4145b7444ee39..c4fc8e7d365a4a1c38c60f77844e233f1b02d1b5 100644 --- a/drivers/media/platform/vimc/Makefile +++ b/drivers/media/platform/vimc/Makefile @@ -5,6 +5,7 @@ vimc_common-objs := vimc-common.o vimc_debayer-objs := vimc-debayer.o vimc_scaler-objs := vimc-scaler.o vimc_sensor-objs := vimc-sensor.o +vimc_streamer-objs := vimc-streamer.o obj-$(CONFIG_VIDEO_VIMC) += vimc.o vimc_capture.o vimc_common.o vimc-debayer.o \ - vimc_scaler.o vimc_sensor.o + vimc_scaler.o vimc_sensor.o vimc_streamer.o diff --git a/drivers/media/platform/vimc/vimc-capture.c b/drivers/media/platform/vimc/vimc-capture.c index ec68feaac3784242aacae61852fdb0cf0c19cf17..8e014cc485f002ef8555794a4fa182f1f4e634e0 100644 --- a/drivers/media/platform/vimc/vimc-capture.c +++ b/drivers/media/platform/vimc/vimc-capture.c @@ -24,6 +24,7 @@ #include #include "vimc-common.h" +#include "vimc-streamer.h" #define VIMC_CAP_DRV_NAME "vimc-capture" @@ -44,7 +45,7 @@ struct vimc_cap_device { spinlock_t qlock; struct mutex lock; u32 sequence; - struct media_pipeline pipe; + struct vimc_stream stream; }; static const struct v4l2_pix_format fmt_default = { @@ -131,12 +132,15 @@ static int vimc_cap_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct vimc_cap_device *vcap = video_drvdata(file); + int ret; /* Do not change the format while stream is on */ if (vb2_is_busy(&vcap->queue)) return -EBUSY; - vimc_cap_try_fmt_vid_cap(file, priv, f); + ret = vimc_cap_try_fmt_vid_cap(file, priv, f); + if (ret) + return ret; dev_dbg(vcap->dev, "%s: format update: " "old:%dx%d (0x%x, %d, %d, %d, %d) " @@ -248,14 +252,13 @@ static int vimc_cap_start_streaming(struct vb2_queue *vq, unsigned int count) vcap->sequence = 0; /* Start the media pipeline */ - ret = media_pipeline_start(entity, &vcap->pipe); + ret = media_pipeline_start(entity, &vcap->stream.pipe); if (ret) { vimc_cap_return_all_buffers(vcap, VB2_BUF_STATE_QUEUED); return ret; } - /* Enable streaming from the pipe */ - ret = vimc_pipeline_s_stream(&vcap->vdev.entity, 1); + ret = vimc_streamer_s_stream(&vcap->stream, &vcap->ved, 1); if (ret) { media_pipeline_stop(entity); vimc_cap_return_all_buffers(vcap, VB2_BUF_STATE_QUEUED); @@ -273,8 +276,7 @@ static void vimc_cap_stop_streaming(struct vb2_queue *vq) { struct vimc_cap_device *vcap = vb2_get_drv_priv(vq); - /* Disable streaming from the pipe */ - vimc_pipeline_s_stream(&vcap->vdev.entity, 0); + vimc_streamer_s_stream(&vcap->stream, &vcap->ved, 0); /* Stop the media pipeline */ media_pipeline_stop(&vcap->vdev.entity); @@ -355,8 +357,8 @@ static void vimc_cap_comp_unbind(struct device *comp, struct device *master, kfree(vcap); } -static void vimc_cap_process_frame(struct vimc_ent_device *ved, - struct media_pad *sink, const void *frame) +static void *vimc_cap_process_frame(struct vimc_ent_device *ved, + const void *frame) { struct vimc_cap_device *vcap = container_of(ved, struct vimc_cap_device, ved); @@ -370,7 +372,7 @@ static void vimc_cap_process_frame(struct vimc_ent_device *ved, typeof(*vimc_buf), list); if (!vimc_buf) { spin_unlock(&vcap->qlock); - return; + return ERR_PTR(-EAGAIN); } /* Remove this entry from the list */ @@ -391,6 +393,7 @@ static void vimc_cap_process_frame(struct vimc_ent_device *ved, vb2_set_plane_payload(&vimc_buf->vb2.vb2_buf, 0, vcap->format.sizeimage); vb2_buffer_done(&vimc_buf->vb2.vb2_buf, VB2_BUF_STATE_DONE); + return NULL; } static int vimc_cap_comp_bind(struct device *comp, struct device *master, diff --git a/drivers/media/platform/vimc/vimc-common.c b/drivers/media/platform/vimc/vimc-common.c index 617415c224fe4943fc1308d9bc3d0be55997f45e..fa8435ac2b1ae549654b77c112ea7d0c5e86f501 100644 --- a/drivers/media/platform/vimc/vimc-common.c +++ b/drivers/media/platform/vimc/vimc-common.c @@ -207,41 +207,6 @@ const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat) } EXPORT_SYMBOL_GPL(vimc_pix_map_by_pixelformat); -int vimc_propagate_frame(struct media_pad *src, const void *frame) -{ - struct media_link *link; - - if (!(src->flags & MEDIA_PAD_FL_SOURCE)) - return -EINVAL; - - /* Send this frame to all sink pads that are direct linked */ - list_for_each_entry(link, &src->entity->links, list) { - if (link->source == src && - (link->flags & MEDIA_LNK_FL_ENABLED)) { - struct vimc_ent_device *ved = NULL; - struct media_entity *entity = link->sink->entity; - - if (is_media_entity_v4l2_subdev(entity)) { - struct v4l2_subdev *sd = - container_of(entity, struct v4l2_subdev, - entity); - ved = v4l2_get_subdevdata(sd); - } else if (is_media_entity_v4l2_video_device(entity)) { - struct video_device *vdev = - container_of(entity, - struct video_device, - entity); - ved = video_get_drvdata(vdev); - } - if (ved && ved->process_frame) - ved->process_frame(ved, link->sink, frame); - } - } - - return 0; -} -EXPORT_SYMBOL_GPL(vimc_propagate_frame); - /* Helper function to allocate and initialize pads */ struct media_pad *vimc_pads_init(u16 num_pads, const unsigned long *pads_flag) { @@ -276,6 +241,8 @@ int vimc_pipeline_s_stream(struct media_entity *ent, int enable) /* Start the stream in the subdevice direct connected */ pad = media_entity_remote_pad(&ent->pads[i]); + if (!pad) + continue; if (!is_media_entity_v4l2_subdev(pad->entity)) return -EINVAL; diff --git a/drivers/media/platform/vimc/vimc-common.h b/drivers/media/platform/vimc/vimc-common.h index 2e9981b18166969e33e0bf362f6e2b21651e0499..6ed969d9efbba835b3b92b1e5044e611f3f8cb86 100644 --- a/drivers/media/platform/vimc/vimc-common.h +++ b/drivers/media/platform/vimc/vimc-common.h @@ -113,23 +113,12 @@ struct vimc_pix_map { struct vimc_ent_device { struct media_entity *ent; struct media_pad *pads; - void (*process_frame)(struct vimc_ent_device *ved, - struct media_pad *sink, const void *frame); + void * (*process_frame)(struct vimc_ent_device *ved, + const void *frame); void (*vdev_get_format)(struct vimc_ent_device *ved, struct v4l2_pix_format *fmt); }; -/** - * vimc_propagate_frame - propagate a frame through the topology - * - * @src: the source pad where the frame is being originated - * @frame: the frame to be propagated - * - * This function will call the process_frame callback from the vimc_ent_device - * struct of the nodes directly connected to the @src pad - */ -int vimc_propagate_frame(struct media_pad *src, const void *frame); - /** * vimc_pads_init - initialize pads * diff --git a/drivers/media/platform/vimc/vimc-core.c b/drivers/media/platform/vimc/vimc-core.c index 9246f265de31b920f2181efe1f305c8b85c66cf4..8548fa93bcf65c7766f2af4457b87e938e516091 100644 --- a/drivers/media/platform/vimc/vimc-core.c +++ b/drivers/media/platform/vimc/vimc-core.c @@ -243,10 +243,7 @@ static void vimc_comp_unbind(struct device *master) static int vimc_comp_compare(struct device *comp, void *data) { - const struct platform_device *pdev = to_platform_device(comp); - const char *name = data; - - return !strcmp(pdev->dev.platform_data, name); + return comp == data; } static struct component_match *vimc_add_subdevs(struct vimc_device *vimc) @@ -276,7 +273,7 @@ static struct component_match *vimc_add_subdevs(struct vimc_device *vimc) } component_match_add(&vimc->pdev.dev, &match, vimc_comp_compare, - (void *)vimc->pipe_cfg->ents[i].name); + &vimc->subdevs[i]->dev); } return match; @@ -303,6 +300,8 @@ static int vimc_probe(struct platform_device *pdev) dev_dbg(&pdev->dev, "probe"); + memset(&vimc->mdev, 0, sizeof(vimc->mdev)); + /* Create platform_device for each entity in the topology*/ vimc->subdevs = devm_kcalloc(&vimc->pdev.dev, vimc->pipe_cfg->num_ents, sizeof(*vimc->subdevs), GFP_KERNEL); diff --git a/drivers/media/platform/vimc/vimc-debayer.c b/drivers/media/platform/vimc/vimc-debayer.c index 77887f66f3236a9802913422a8d7fd9fda4ae28d..7d77c63b99d26f78320d6da4d0e17823230bc6ff 100644 --- a/drivers/media/platform/vimc/vimc-debayer.c +++ b/drivers/media/platform/vimc/vimc-debayer.c @@ -321,7 +321,6 @@ static void vimc_deb_set_rgb_mbus_fmt_rgb888_1x24(struct vimc_deb_device *vdeb, static int vimc_deb_s_stream(struct v4l2_subdev *sd, int enable) { struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd); - int ret; if (enable) { const struct vimc_pix_map *vpix; @@ -351,22 +350,10 @@ static int vimc_deb_s_stream(struct v4l2_subdev *sd, int enable) if (!vdeb->src_frame) return -ENOMEM; - /* Turn the stream on in the subdevices directly connected */ - ret = vimc_pipeline_s_stream(&vdeb->sd.entity, 1); - if (ret) { - vfree(vdeb->src_frame); - vdeb->src_frame = NULL; - return ret; - } } else { if (!vdeb->src_frame) return 0; - /* Disable streaming from the pipe */ - ret = vimc_pipeline_s_stream(&vdeb->sd.entity, 0); - if (ret) - return ret; - vfree(vdeb->src_frame); vdeb->src_frame = NULL; } @@ -480,9 +467,8 @@ static void vimc_deb_calc_rgb_sink(struct vimc_deb_device *vdeb, } } -static void vimc_deb_process_frame(struct vimc_ent_device *ved, - struct media_pad *sink, - const void *sink_frame) +static void *vimc_deb_process_frame(struct vimc_ent_device *ved, + const void *sink_frame) { struct vimc_deb_device *vdeb = container_of(ved, struct vimc_deb_device, ved); @@ -491,7 +477,7 @@ static void vimc_deb_process_frame(struct vimc_ent_device *ved, /* If the stream in this node is not active, just return */ if (!vdeb->src_frame) - return; + return ERR_PTR(-EINVAL); for (i = 0; i < vdeb->sink_fmt.height; i++) for (j = 0; j < vdeb->sink_fmt.width; j++) { @@ -499,12 +485,8 @@ static void vimc_deb_process_frame(struct vimc_ent_device *ved, vdeb->set_rgb_src(vdeb, i, j, rgb); } - /* Propagate the frame through all source pads */ - for (i = 1; i < vdeb->sd.entity.num_pads; i++) { - struct media_pad *pad = &vdeb->sd.entity.pads[i]; + return vdeb->src_frame; - vimc_propagate_frame(pad, vdeb->src_frame); - } } static void vimc_deb_comp_unbind(struct device *comp, struct device *master, diff --git a/drivers/media/platform/vimc/vimc-scaler.c b/drivers/media/platform/vimc/vimc-scaler.c index b0952ee86296604a36ab3bdaf83950fe48d1bae9..39b2a73dfcc15d915e68751545474857887ed329 100644 --- a/drivers/media/platform/vimc/vimc-scaler.c +++ b/drivers/media/platform/vimc/vimc-scaler.c @@ -217,7 +217,6 @@ static const struct v4l2_subdev_pad_ops vimc_sca_pad_ops = { static int vimc_sca_s_stream(struct v4l2_subdev *sd, int enable) { struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd); - int ret; if (enable) { const struct vimc_pix_map *vpix; @@ -245,22 +244,10 @@ static int vimc_sca_s_stream(struct v4l2_subdev *sd, int enable) if (!vsca->src_frame) return -ENOMEM; - /* Turn the stream on in the subdevices directly connected */ - ret = vimc_pipeline_s_stream(&vsca->sd.entity, 1); - if (ret) { - vfree(vsca->src_frame); - vsca->src_frame = NULL; - return ret; - } } else { if (!vsca->src_frame) return 0; - /* Disable streaming from the pipe */ - ret = vimc_pipeline_s_stream(&vsca->sd.entity, 0); - if (ret) - return ret; - vfree(vsca->src_frame); vsca->src_frame = NULL; } @@ -346,26 +333,19 @@ static void vimc_sca_fill_src_frame(const struct vimc_sca_device *const vsca, vimc_sca_scale_pix(vsca, i, j, sink_frame); } -static void vimc_sca_process_frame(struct vimc_ent_device *ved, - struct media_pad *sink, - const void *sink_frame) +static void *vimc_sca_process_frame(struct vimc_ent_device *ved, + const void *sink_frame) { struct vimc_sca_device *vsca = container_of(ved, struct vimc_sca_device, ved); - unsigned int i; /* If the stream in this node is not active, just return */ if (!vsca->src_frame) - return; + return ERR_PTR(-EINVAL); vimc_sca_fill_src_frame(vsca, sink_frame); - /* Propagate the frame through all source pads */ - for (i = 1; i < vsca->sd.entity.num_pads; i++) { - struct media_pad *pad = &vsca->sd.entity.pads[i]; - - vimc_propagate_frame(pad, vsca->src_frame); - } + return vsca->src_frame; }; static void vimc_sca_comp_unbind(struct device *comp, struct device *master, diff --git a/drivers/media/platform/vimc/vimc-sensor.c b/drivers/media/platform/vimc/vimc-sensor.c index b2b89315e7ba566fecf71f0811e1e30f0b3c6b51..3f0ffd4915cd2156bcdc234aecd3912317d57088 100644 --- a/drivers/media/platform/vimc/vimc-sensor.c +++ b/drivers/media/platform/vimc/vimc-sensor.c @@ -16,8 +16,6 @@ */ #include -#include -#include #include #include #include @@ -201,38 +199,20 @@ static const struct v4l2_subdev_pad_ops vimc_sen_pad_ops = { .set_fmt = vimc_sen_set_fmt, }; -static int vimc_sen_tpg_thread(void *data) +static void *vimc_sen_process_frame(struct vimc_ent_device *ved, + const void *sink_frame) { - struct vimc_sen_device *vsen = data; - unsigned int i; - - set_freezable(); - set_current_state(TASK_UNINTERRUPTIBLE); - - for (;;) { - try_to_freeze(); - if (kthread_should_stop()) - break; + struct vimc_sen_device *vsen = container_of(ved, struct vimc_sen_device, + ved); - tpg_fill_plane_buffer(&vsen->tpg, 0, 0, vsen->frame); - - /* Send the frame to all source pads */ - for (i = 0; i < vsen->sd.entity.num_pads; i++) - vimc_propagate_frame(&vsen->sd.entity.pads[i], - vsen->frame); - - /* 60 frames per second */ - schedule_timeout(HZ/60); - } - - return 0; + tpg_fill_plane_buffer(&vsen->tpg, 0, 0, vsen->frame); + return vsen->frame; } static int vimc_sen_s_stream(struct v4l2_subdev *sd, int enable) { struct vimc_sen_device *vsen = container_of(sd, struct vimc_sen_device, sd); - int ret; if (enable) { const struct vimc_pix_map *vpix; @@ -258,26 +238,8 @@ static int vimc_sen_s_stream(struct v4l2_subdev *sd, int enable) /* configure the test pattern generator */ vimc_sen_tpg_s_format(vsen); - /* Initialize the image generator thread */ - vsen->kthread_sen = kthread_run(vimc_sen_tpg_thread, vsen, - "%s-sen", vsen->sd.v4l2_dev->name); - if (IS_ERR(vsen->kthread_sen)) { - dev_err(vsen->dev, "%s: kernel_thread() failed\n", - vsen->sd.name); - vfree(vsen->frame); - vsen->frame = NULL; - return PTR_ERR(vsen->kthread_sen); - } } else { - if (!vsen->kthread_sen) - return 0; - - /* Stop image generator */ - ret = kthread_stop(vsen->kthread_sen); - if (ret) - return ret; - vsen->kthread_sen = NULL; vfree(vsen->frame); vsen->frame = NULL; return 0; @@ -393,6 +355,7 @@ static int vimc_sen_comp_bind(struct device *comp, struct device *master, if (ret) goto err_free_hdl; + vsen->ved.process_frame = vimc_sen_process_frame; dev_set_drvdata(comp, &vsen->ved); vsen->dev = comp; diff --git a/drivers/media/platform/vimc/vimc-streamer.c b/drivers/media/platform/vimc/vimc-streamer.c new file mode 100644 index 0000000000000000000000000000000000000000..392754c18046cb0ac8b4285ae653f59e23aff5a3 --- /dev/null +++ b/drivers/media/platform/vimc/vimc-streamer.c @@ -0,0 +1,188 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * vimc-streamer.c Virtual Media Controller Driver + * + * Copyright (C) 2018 Lucas A. M. Magalhães + * + */ + +#include +#include +#include +#include + +#include "vimc-streamer.h" + +/** + * vimc_get_source_entity - get the entity connected with the first sink pad + * + * @ent: reference media_entity + * + * Helper function that returns the media entity containing the source pad + * linked with the first sink pad from the given media entity pad list. + */ +static struct media_entity *vimc_get_source_entity(struct media_entity *ent) +{ + struct media_pad *pad; + int i; + + for (i = 0; i < ent->num_pads; i++) { + if (ent->pads[i].flags & MEDIA_PAD_FL_SOURCE) + continue; + pad = media_entity_remote_pad(&ent->pads[i]); + return pad ? pad->entity : NULL; + } + return NULL; +} + +/* + * vimc_streamer_pipeline_terminate - Disable stream in all ved in stream + * + * @stream: the pointer to the stream structure with the pipeline to be + * disabled. + * + * Calls s_stream to disable the stream in each entity of the pipeline + * + */ +static void vimc_streamer_pipeline_terminate(struct vimc_stream *stream) +{ + struct media_entity *entity; + struct v4l2_subdev *sd; + + while (stream->pipe_size) { + stream->pipe_size--; + entity = stream->ved_pipeline[stream->pipe_size]->ent; + entity = vimc_get_source_entity(entity); + stream->ved_pipeline[stream->pipe_size] = NULL; + + if (!is_media_entity_v4l2_subdev(entity)) + continue; + + sd = media_entity_to_v4l2_subdev(entity); + v4l2_subdev_call(sd, video, s_stream, 0); + } +} + +/* + * vimc_streamer_pipeline_init - initializes the stream structure + * + * @stream: the pointer to the stream structure to be initialized + * @ved: the pointer to the vimc entity initializing the stream + * + * Initializes the stream structure. Walks through the entity graph to + * construct the pipeline used later on the streamer thread. + * Calls s_stream to enable stream in all entities of the pipeline. + */ +static int vimc_streamer_pipeline_init(struct vimc_stream *stream, + struct vimc_ent_device *ved) +{ + struct media_entity *entity; + struct video_device *vdev; + struct v4l2_subdev *sd; + int ret = 0; + + stream->pipe_size = 0; + while (stream->pipe_size < VIMC_STREAMER_PIPELINE_MAX_SIZE) { + if (!ved) { + vimc_streamer_pipeline_terminate(stream); + return -EINVAL; + } + stream->ved_pipeline[stream->pipe_size++] = ved; + + entity = vimc_get_source_entity(ved->ent); + /* Check if the end of the pipeline was reached*/ + if (!entity) + return 0; + + if (is_media_entity_v4l2_subdev(entity)) { + sd = media_entity_to_v4l2_subdev(entity); + ret = v4l2_subdev_call(sd, video, s_stream, 1); + if (ret && ret != -ENOIOCTLCMD) { + vimc_streamer_pipeline_terminate(stream); + return ret; + } + ved = v4l2_get_subdevdata(sd); + } else { + vdev = container_of(entity, + struct video_device, + entity); + ved = video_get_drvdata(vdev); + } + } + + vimc_streamer_pipeline_terminate(stream); + return -EINVAL; +} + +static int vimc_streamer_thread(void *data) +{ + struct vimc_stream *stream = data; + int i; + + set_freezable(); + + for (;;) { + try_to_freeze(); + if (kthread_should_stop()) + break; + + for (i = stream->pipe_size - 1; i >= 0; i--) { + stream->frame = stream->ved_pipeline[i]->process_frame( + stream->ved_pipeline[i], + stream->frame); + if (!stream->frame) + break; + if (IS_ERR(stream->frame)) + break; + } + //wait for 60hz + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(HZ / 60); + } + + return 0; +} + +int vimc_streamer_s_stream(struct vimc_stream *stream, + struct vimc_ent_device *ved, + int enable) +{ + int ret; + + if (!stream || !ved) + return -EINVAL; + + if (enable) { + if (stream->kthread) + return 0; + + ret = vimc_streamer_pipeline_init(stream, ved); + if (ret) + return ret; + + stream->kthread = kthread_run(vimc_streamer_thread, stream, + "vimc-streamer thread"); + + if (IS_ERR(stream->kthread)) + return PTR_ERR(stream->kthread); + + } else { + if (!stream->kthread) + return 0; + + ret = kthread_stop(stream->kthread); + if (ret) + return ret; + + stream->kthread = NULL; + + vimc_streamer_pipeline_terminate(stream); + } + + return 0; +} +EXPORT_SYMBOL_GPL(vimc_streamer_s_stream); + +MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Streamer"); +MODULE_AUTHOR("Lucas A. M. Magalhães "); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/platform/vimc/vimc-streamer.h b/drivers/media/platform/vimc/vimc-streamer.h new file mode 100644 index 0000000000000000000000000000000000000000..752af2e2d5a21ff9abda60c4376786eee6075d08 --- /dev/null +++ b/drivers/media/platform/vimc/vimc-streamer.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * vimc-streamer.h Virtual Media Controller Driver + * + * Copyright (C) 2018 Lucas A. M. Magalhães + * + */ + +#ifndef _VIMC_STREAMER_H_ +#define _VIMC_STREAMER_H_ + +#include + +#include "vimc-common.h" + +#define VIMC_STREAMER_PIPELINE_MAX_SIZE 16 + +struct vimc_stream { + struct media_pipeline pipe; + struct vimc_ent_device *ved_pipeline[VIMC_STREAMER_PIPELINE_MAX_SIZE]; + unsigned int pipe_size; + u8 *frame; + struct task_struct *kthread; +}; + +/** + * vimc_streamer_s_streamer - start/stop the stream + * + * @stream: the pointer to the stream to start or stop + * @ved: The last entity of the streamer pipeline + * @enable: any non-zero number start the stream, zero stop + * + */ +int vimc_streamer_s_stream(struct vimc_stream *stream, + struct vimc_ent_device *ved, + int enable); + +#endif //_VIMC_STREAMER_H_ diff --git a/drivers/media/platform/vivid/vivid-core.h b/drivers/media/platform/vivid/vivid-core.h index 477c80a4d44c00ab105bd45ade343b2565d9e9f4..cd4c8230563c7a348744c8d65550cca2218b8010 100644 --- a/drivers/media/platform/vivid/vivid-core.h +++ b/drivers/media/platform/vivid/vivid-core.h @@ -111,7 +111,7 @@ enum vivid_colorspace { VIVID_CS_170M, VIVID_CS_709, VIVID_CS_SRGB, - VIVID_CS_ADOBERGB, + VIVID_CS_OPRGB, VIVID_CS_2020, VIVID_CS_DCI_P3, VIVID_CS_240M, diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c index 5429193fbb91d09dfa158d7d69537a1bb02b24b2..999aa101b15018afd6f3988ff6f04daea72f0520 100644 --- a/drivers/media/platform/vivid/vivid-ctrls.c +++ b/drivers/media/platform/vivid/vivid-ctrls.c @@ -348,7 +348,7 @@ static int vivid_vid_cap_s_ctrl(struct v4l2_ctrl *ctrl) V4L2_COLORSPACE_SMPTE170M, V4L2_COLORSPACE_REC709, V4L2_COLORSPACE_SRGB, - V4L2_COLORSPACE_ADOBERGB, + V4L2_COLORSPACE_OPRGB, V4L2_COLORSPACE_BT2020, V4L2_COLORSPACE_DCI_P3, V4L2_COLORSPACE_SMPTE240M, @@ -729,7 +729,7 @@ static const char * const vivid_ctrl_colorspace_strings[] = { "SMPTE 170M", "Rec. 709", "sRGB", - "AdobeRGB", + "opRGB", "BT.2020", "DCI-P3", "SMPTE 240M", @@ -752,7 +752,7 @@ static const char * const vivid_ctrl_xfer_func_strings[] = { "Default", "Rec. 709", "sRGB", - "AdobeRGB", + "opRGB", "SMPTE 240M", "None", "DCI-P3", diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c index f06003bb8e4239282404a4559eebf77afa93067a..ac17883a054fef861a74188edff68728bf10b18e 100644 --- a/drivers/media/platform/vivid/vivid-kthread-cap.c +++ b/drivers/media/platform/vivid/vivid-kthread-cap.c @@ -765,7 +765,11 @@ static int vivid_thread_vid_cap(void *data) if (kthread_should_stop()) break; - mutex_lock(&dev->mutex); + if (!mutex_trylock(&dev->mutex)) { + schedule_timeout_uninterruptible(1); + continue; + } + cur_jiffies = jiffies; if (dev->cap_seq_resync) { dev->jiffies_vid_cap = cur_jiffies; @@ -865,8 +869,11 @@ int vivid_start_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming) "%s-vid-cap", dev->v4l2_dev.name); if (IS_ERR(dev->kthread_vid_cap)) { + int err = PTR_ERR(dev->kthread_vid_cap); + + dev->kthread_vid_cap = NULL; v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n"); - return PTR_ERR(dev->kthread_vid_cap); + return err; } *pstreaming = true; vivid_grab_controls(dev, true); @@ -915,8 +922,6 @@ void vivid_stop_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming) /* shutdown control thread */ vivid_grab_controls(dev, false); - mutex_unlock(&dev->mutex); kthread_stop(dev->kthread_vid_cap); dev->kthread_vid_cap = NULL; - mutex_lock(&dev->mutex); } diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c index 9981e7548019b3ba726e02b330d247929592a44e..c5f466a7331291d8145e3c60e310382e1fc30c12 100644 --- a/drivers/media/platform/vivid/vivid-kthread-out.c +++ b/drivers/media/platform/vivid/vivid-kthread-out.c @@ -135,7 +135,11 @@ static int vivid_thread_vid_out(void *data) if (kthread_should_stop()) break; - mutex_lock(&dev->mutex); + if (!mutex_trylock(&dev->mutex)) { + schedule_timeout_uninterruptible(1); + continue; + } + cur_jiffies = jiffies; if (dev->out_seq_resync) { dev->jiffies_vid_out = cur_jiffies; @@ -236,8 +240,11 @@ int vivid_start_generating_vid_out(struct vivid_dev *dev, bool *pstreaming) "%s-vid-out", dev->v4l2_dev.name); if (IS_ERR(dev->kthread_vid_out)) { + int err = PTR_ERR(dev->kthread_vid_out); + + dev->kthread_vid_out = NULL; v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n"); - return PTR_ERR(dev->kthread_vid_out); + return err; } *pstreaming = true; vivid_grab_controls(dev, true); @@ -286,8 +293,6 @@ void vivid_stop_generating_vid_out(struct vivid_dev *dev, bool *pstreaming) /* shutdown control thread */ vivid_grab_controls(dev, false); - mutex_unlock(&dev->mutex); kthread_stop(dev->kthread_vid_out); dev->kthread_vid_out = NULL; - mutex_lock(&dev->mutex); } diff --git a/drivers/media/platform/vivid/vivid-sdr-cap.c b/drivers/media/platform/vivid/vivid-sdr-cap.c index cfb7cb4d37a875b0715d1b4020a662c0338625eb..e1794f8689d473349084403c00a72a80d025dce3 100644 --- a/drivers/media/platform/vivid/vivid-sdr-cap.c +++ b/drivers/media/platform/vivid/vivid-sdr-cap.c @@ -137,7 +137,11 @@ static int vivid_thread_sdr_cap(void *data) if (kthread_should_stop()) break; - mutex_lock(&dev->mutex); + if (!mutex_trylock(&dev->mutex)) { + schedule_timeout_uninterruptible(1); + continue; + } + cur_jiffies = jiffies; if (dev->sdr_cap_seq_resync) { dev->jiffies_sdr_cap = cur_jiffies; @@ -297,10 +301,8 @@ static void sdr_cap_stop_streaming(struct vb2_queue *vq) } /* shutdown control thread */ - mutex_unlock(&dev->mutex); kthread_stop(dev->kthread_sdr_cap); dev->kthread_sdr_cap = NULL; - mutex_lock(&dev->mutex); } const struct vb2_ops vivid_sdr_cap_qops = { diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c index 1599159f2574e5c1f9dbc9b075bdc05134bb2d5a..9eaac497ec34801791c4fca6d0195ef812d5222b 100644 --- a/drivers/media/platform/vivid/vivid-vid-cap.c +++ b/drivers/media/platform/vivid/vivid-vid-cap.c @@ -222,9 +222,6 @@ static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count) if (vb2_is_streaming(&dev->vb_vid_out_q)) dev->can_loop_video = vivid_vid_can_loop(dev); - if (dev->kthread_vid_cap) - return 0; - dev->vid_cap_seq_count = 0; dprintk(dev, 1, "%s\n", __func__); for (i = 0; i < VIDEO_MAX_FRAME; i++) @@ -438,6 +435,8 @@ void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls) tpg_s_rgb_range(&dev->tpg, v4l2_ctrl_g_ctrl(dev->rgb_range_cap)); break; } + vfree(dev->bitmap_cap); + dev->bitmap_cap = NULL; vivid_update_quality(dev); tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap); dev->crop_cap = dev->src_rect; @@ -916,6 +915,7 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection if (dev->has_compose_cap) { v4l2_rect_set_min_size(compose, &min_rect); v4l2_rect_set_max_size(compose, &max_rect); + v4l2_rect_map_inside(compose, &fmt); } dev->fmt_cap_rect = fmt; tpg_s_buf_height(&dev->tpg, fmt.height); @@ -990,7 +990,7 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect); if (dev->bitmap_cap && (compose->width != s->r.width || compose->height != s->r.height)) { - kfree(dev->bitmap_cap); + vfree(dev->bitmap_cap); dev->bitmap_cap = NULL; } *compose = s->r; @@ -1722,7 +1722,7 @@ int vidioc_s_edid(struct file *file, void *_fh, return -E2BIG; } phys_addr = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, NULL); - ret = cec_phys_addr_validate(phys_addr, &phys_addr, NULL); + ret = v4l2_phys_addr_validate(phys_addr, &phys_addr, NULL); if (ret) return ret; @@ -1738,7 +1738,7 @@ int vidioc_s_edid(struct file *file, void *_fh, for (i = 0; i < MAX_OUTPUTS && dev->cec_tx_adap[i]; i++) cec_s_phys_addr(dev->cec_tx_adap[i], - cec_phys_addr_for_input(phys_addr, i + 1), + v4l2_phys_addr_for_input(phys_addr, i + 1), false); return 0; } diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c index be531caa2cdf9e5f4d9f1213e7f8e4e524848b80..e108e9befb77fc53d1f7fd33f4a68a049224d2d2 100644 --- a/drivers/media/platform/vivid/vivid-vid-common.c +++ b/drivers/media/platform/vivid/vivid-vid-common.c @@ -21,7 +21,7 @@ const struct v4l2_dv_timings_cap vivid_dv_timings_cap = { .type = V4L2_DV_BT_656_1120, /* keep this initialization for compatibility with GCC < 4.4.6 */ .reserved = { 0 }, - V4L2_INIT_BT_TIMINGS(0, MAX_WIDTH, 0, MAX_HEIGHT, 14000000, 775000000, + V4L2_INIT_BT_TIMINGS(16, MAX_WIDTH, 16, MAX_HEIGHT, 14000000, 775000000, V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT | V4L2_DV_BT_STD_GTF, V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_INTERLACED) @@ -863,7 +863,7 @@ int vidioc_g_edid(struct file *file, void *_fh, if (edid->blocks > dev->edid_blocks - edid->start_block) edid->blocks = dev->edid_blocks - edid->start_block; if (adap) - cec_set_edid_phys_addr(dev->edid, dev->edid_blocks * 128, adap->phys_addr); + v4l2_set_edid_phys_addr(dev->edid, dev->edid_blocks * 128, adap->phys_addr); memcpy(edid->edid, dev->edid + edid->start_block * 128, edid->blocks * 128); return 0; } diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c index 51fec66d8d455673139f12a5b5c826c988bf838b..0f909500a0b801767dc1e1856aa12694ccd42772 100644 --- a/drivers/media/platform/vivid/vivid-vid-out.c +++ b/drivers/media/platform/vivid/vivid-vid-out.c @@ -146,9 +146,6 @@ static int vid_out_start_streaming(struct vb2_queue *vq, unsigned count) if (vb2_is_streaming(&dev->vb_vid_cap_q)) dev->can_loop_video = vivid_vid_can_loop(dev); - if (dev->kthread_vid_out) - return 0; - dev->vid_out_seq_count = 0; dprintk(dev, 1, "%s\n", __func__); if (dev->start_streaming_error) { @@ -413,7 +410,7 @@ int vivid_try_fmt_vid_out(struct file *file, void *priv, mp->colorspace = V4L2_COLORSPACE_SMPTE170M; } else if (mp->colorspace != V4L2_COLORSPACE_SMPTE170M && mp->colorspace != V4L2_COLORSPACE_REC709 && - mp->colorspace != V4L2_COLORSPACE_ADOBERGB && + mp->colorspace != V4L2_COLORSPACE_OPRGB && mp->colorspace != V4L2_COLORSPACE_BT2020 && mp->colorspace != V4L2_COLORSPACE_SRGB) { mp->colorspace = V4L2_COLORSPACE_REC709; diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c index 26289adaf658c1f4a2adda8e02d3618323ea1f1b..a5634ca85a3165c3aa51ca9f829abf1390c9f700 100644 --- a/drivers/media/platform/vsp1/vsp1_dl.c +++ b/drivers/media/platform/vsp1/vsp1_dl.c @@ -557,8 +557,10 @@ static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm) /* Get a default body for our list. */ dl->body0 = vsp1_dl_body_get(dlm->pool); - if (!dl->body0) + if (!dl->body0) { + kfree(dl); return NULL; + } header_offset = dl->body0->max_entries * sizeof(*dl->body0->entries); diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c index b9c0f695d002bdedbd6dbcc3a2df71367697f575..8d86f618ec776bb893c6b298e1d232b0cca076af 100644 --- a/drivers/media/platform/vsp1/vsp1_drm.c +++ b/drivers/media/platform/vsp1/vsp1_drm.c @@ -770,6 +770,7 @@ int vsp1_du_atomic_update(struct device *dev, unsigned int pipe_index, struct vsp1_device *vsp1 = dev_get_drvdata(dev); struct vsp1_drm_pipeline *drm_pipe = &vsp1->drm->pipe[pipe_index]; const struct vsp1_format_info *fmtinfo; + unsigned int chroma_hsub; struct vsp1_rwpf *rpf; if (rpf_index >= vsp1->info->rpf_count) @@ -810,10 +811,18 @@ int vsp1_du_atomic_update(struct device *dev, unsigned int pipe_index, return -EINVAL; } + /* + * Only formats with three planes can affect the chroma planes pitch. + * All formats with two planes have a horizontal subsampling value of 2, + * but combine U and V in a single chroma plane, which thus results in + * the luma plane and chroma plane having the same pitch. + */ + chroma_hsub = (fmtinfo->planes == 3) ? fmtinfo->hsub : 1; + rpf->fmtinfo = fmtinfo; rpf->format.num_planes = fmtinfo->planes; rpf->format.plane_fmt[0].bytesperline = cfg->pitch; - rpf->format.plane_fmt[1].bytesperline = cfg->pitch; + rpf->format.plane_fmt[1].bytesperline = cfg->pitch / chroma_hsub; rpf->alpha = cfg->alpha; rpf->mem.addr[0] = cfg->mem[0]; diff --git a/drivers/media/platform/vsp1/vsp1_regs.h b/drivers/media/platform/vsp1/vsp1_regs.h index 3738ff2f7b850052565d3b131d559fad048103d7..f6e4157095cc0081d013e20b238698c0c3183716 100644 --- a/drivers/media/platform/vsp1/vsp1_regs.h +++ b/drivers/media/platform/vsp1/vsp1_regs.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * vsp1_regs.h -- R-Car VSP1 Registers Definitions * diff --git a/drivers/media/radio/radio-raremono.c b/drivers/media/radio/radio-raremono.c index 9a5079d64c4ab1fa96af7f0a22b0ebdb4793701b..729600c4a056b824b4fdfcc7e831ae939e951aa2 100644 --- a/drivers/media/radio/radio-raremono.c +++ b/drivers/media/radio/radio-raremono.c @@ -271,6 +271,14 @@ static int vidioc_g_frequency(struct file *file, void *priv, return 0; } +static void raremono_device_release(struct v4l2_device *v4l2_dev) +{ + struct raremono_device *radio = to_raremono_dev(v4l2_dev); + + kfree(radio->buffer); + kfree(radio); +} + /* File system interface */ static const struct v4l2_file_operations usb_raremono_fops = { .owner = THIS_MODULE, @@ -295,12 +303,14 @@ static int usb_raremono_probe(struct usb_interface *intf, struct raremono_device *radio; int retval = 0; - radio = devm_kzalloc(&intf->dev, sizeof(struct raremono_device), GFP_KERNEL); - if (radio) - radio->buffer = devm_kmalloc(&intf->dev, BUFFER_LENGTH, GFP_KERNEL); - - if (!radio || !radio->buffer) + radio = kzalloc(sizeof(*radio), GFP_KERNEL); + if (!radio) + return -ENOMEM; + radio->buffer = kmalloc(BUFFER_LENGTH, GFP_KERNEL); + if (!radio->buffer) { + kfree(radio); return -ENOMEM; + } radio->usbdev = interface_to_usbdev(intf); radio->intf = intf; @@ -324,7 +334,8 @@ static int usb_raremono_probe(struct usb_interface *intf, if (retval != 3 || (get_unaligned_be16(&radio->buffer[1]) & 0xfff) == 0x0242) { dev_info(&intf->dev, "this is not Thanko's Raremono.\n"); - return -ENODEV; + retval = -ENODEV; + goto free_mem; } dev_info(&intf->dev, "Thanko's Raremono connected: (%04X:%04X)\n", @@ -333,7 +344,7 @@ static int usb_raremono_probe(struct usb_interface *intf, retval = v4l2_device_register(&intf->dev, &radio->v4l2_dev); if (retval < 0) { dev_err(&intf->dev, "couldn't register v4l2_device\n"); - return retval; + goto free_mem; } mutex_init(&radio->lock); @@ -345,6 +356,7 @@ static int usb_raremono_probe(struct usb_interface *intf, radio->vdev.ioctl_ops = &usb_raremono_ioctl_ops; radio->vdev.lock = &radio->lock; radio->vdev.release = video_device_release_empty; + radio->v4l2_dev.release = raremono_device_release; usb_set_intfdata(intf, &radio->v4l2_dev); @@ -360,6 +372,10 @@ static int usb_raremono_probe(struct usb_interface *intf, } dev_err(&intf->dev, "could not register video device\n"); v4l2_device_unregister(&radio->v4l2_dev); + +free_mem: + kfree(radio->buffer); + kfree(radio); return retval; } diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c index 11aa94f189cb0f2fd2776e3dc23220942c549f9f..500026974dee0528eb87a67511fc15b822035267 100644 --- a/drivers/media/radio/radio-wl1273.c +++ b/drivers/media/radio/radio-wl1273.c @@ -1156,8 +1156,7 @@ static int wl1273_fm_fops_release(struct file *file) if (radio->rds_users > 0) { radio->rds_users--; if (radio->rds_users == 0) { - if (mutex_lock_interruptible(&core->lock)) - return -EINTR; + mutex_lock(&core->lock); radio->irq_flags &= ~WL1273_RDS_EVENT; diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c index e3b3ecd14a4dd38803cec253a5eefc18ef723af4..ae7540b765e1dd81659bbaf99e4b5612b284d2aa 100644 --- a/drivers/media/radio/si470x/radio-si470x-i2c.c +++ b/drivers/media/radio/si470x/radio-si470x-i2c.c @@ -485,6 +485,8 @@ static int si470x_i2c_remove(struct i2c_client *client) video_unregister_device(&radio->videodev); kfree(radio); + v4l2_ctrl_handler_free(&radio->hdl); + v4l2_device_unregister(&radio->v4l2_dev); return 0; } diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c index 313a95f195a27235bf2f1545e7bf2e25cde521e1..19e381dd58089130664471855922db71f3fd5243 100644 --- a/drivers/media/radio/si470x/radio-si470x-usb.c +++ b/drivers/media/radio/si470x/radio-si470x-usb.c @@ -743,7 +743,7 @@ static int si470x_usb_driver_probe(struct usb_interface *intf, /* start radio */ retval = si470x_start_usb(radio); if (retval < 0) - goto err_all; + goto err_buf; /* set initial frequency */ si470x_set_freq(radio, 87.5 * FREQ_MUL); /* available in all regions */ @@ -758,6 +758,8 @@ static int si470x_usb_driver_probe(struct usb_interface *intf, return 0; err_all: + usb_kill_urb(radio->int_in_urb); +err_buf: kfree(radio->buffer); err_ctrl: v4l2_ctrl_handler_free(&radio->hdl); @@ -831,6 +833,7 @@ static void si470x_usb_driver_disconnect(struct usb_interface *intf) mutex_lock(&radio->lock); v4l2_device_disconnect(&radio->v4l2_dev); video_unregister_device(&radio->videodev); + usb_kill_urb(radio->int_in_urb); usb_set_intfdata(intf, NULL); mutex_unlock(&radio->lock); v4l2_device_put(&radio->v4l2_dev); diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c index 800d69c3f80b8677fb3e609198beb1a3ba251cee..8b8453dab660b76d7dd1403e619f7626adfd8d75 100644 --- a/drivers/media/radio/wl128x/fmdrv_common.c +++ b/drivers/media/radio/wl128x/fmdrv_common.c @@ -472,11 +472,12 @@ int fmc_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload, jiffies_to_msecs(FM_DRV_TX_TIMEOUT) / 1000); return -ETIMEDOUT; } + spin_lock_irqsave(&fmdev->resp_skb_lock, flags); if (!fmdev->resp_skb) { + spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags); fmerr("Response SKB is missing\n"); return -EFAULT; } - spin_lock_irqsave(&fmdev->resp_skb_lock, flags); skb = fmdev->resp_skb; fmdev->resp_skb = NULL; spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags); @@ -489,7 +490,8 @@ int fmc_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload, return -EIO; } /* Send response data to caller */ - if (response != NULL && response_len != NULL && evt_hdr->dlen) { + if (response != NULL && response_len != NULL && evt_hdr->dlen && + evt_hdr->dlen <= payload_len) { /* Skip header info and copy only response data */ skb_pull(skb, sizeof(struct fm_event_msg_hdr)); memcpy(response, skb->data, evt_hdr->dlen); @@ -583,6 +585,8 @@ static void fm_irq_handle_flag_getcmd_resp(struct fmdev *fmdev) return; fm_evt_hdr = (void *)skb->data; + if (fm_evt_hdr->dlen > sizeof(fmdev->irq_info.flag)) + return; /* Skip header info and copy only response data */ skb_pull(skb, sizeof(struct fm_event_msg_hdr)); @@ -1308,7 +1312,7 @@ static int load_default_rx_configuration(struct fmdev *fmdev) static int fm_power_up(struct fmdev *fmdev, u8 mode) { u16 payload; - __be16 asic_id, asic_ver; + __be16 asic_id = 0, asic_ver = 0; int resp_len, ret; u8 fw_name[50]; diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c index dccdf6558e6ab7ce4e1e613453acdd5558badd3a..33abc8616ecb8122bd24a6117680e1f22aaf3ef7 100644 --- a/drivers/media/radio/wl128x/fmdrv_v4l2.c +++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c @@ -549,6 +549,7 @@ int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr) /* Register with V4L2 subsystem as RADIO device */ if (video_register_device(&gradio_dev, VFL_TYPE_RADIO, radio_nr)) { + v4l2_device_unregister(&fmdev->v4l2_dev); fmerr("Could not register video device\n"); return -ENOMEM; } @@ -562,6 +563,8 @@ int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr) if (ret < 0) { fmerr("(fmdev): Can't init ctrl handler\n"); v4l2_ctrl_handler_free(&fmdev->ctrl_handler); + video_unregister_device(fmdev->radio_dev); + v4l2_device_unregister(&fmdev->v4l2_dev); return -EBUSY; } diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c index 8b97fd1f0ceaf8dbbe9086591c492eaa34f18cd3..7a83dc5fed006281b95279bd53ce062532f7061d 100644 --- a/drivers/media/rc/bpf-lirc.c +++ b/drivers/media/rc/bpf-lirc.c @@ -218,7 +218,7 @@ int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog) if (attr->attach_flags) return -EINVAL; - rcdev = rc_dev_get_from_fd(attr->target_fd); + rcdev = rc_dev_get_from_fd(attr->target_fd, true); if (IS_ERR(rcdev)) return PTR_ERR(rcdev); @@ -243,7 +243,7 @@ int lirc_prog_detach(const union bpf_attr *attr) if (IS_ERR(prog)) return PTR_ERR(prog); - rcdev = rc_dev_get_from_fd(attr->target_fd); + rcdev = rc_dev_get_from_fd(attr->target_fd, true); if (IS_ERR(rcdev)) { bpf_prog_put(prog); return PTR_ERR(rcdev); @@ -268,7 +268,7 @@ int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr) if (attr->query.query_flags) return -EINVAL; - rcdev = rc_dev_get_from_fd(attr->query.target_fd); + rcdev = rc_dev_get_from_fd(attr->query.target_fd, false); if (IS_ERR(rcdev)) return PTR_ERR(rcdev); diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c index 71b8c9bbf6c409b954471d6e4cf20884fdea35ee..8cf2a5c0575ab09f33f4601f4ba53af3a0cfaea2 100644 --- a/drivers/media/rc/ene_ir.c +++ b/drivers/media/rc/ene_ir.c @@ -1116,6 +1116,8 @@ static void ene_remove(struct pnp_dev *pnp_dev) struct ene_device *dev = pnp_get_drvdata(pnp_dev); unsigned long flags; + rc_unregister_device(dev->rdev); + del_timer_sync(&dev->tx_sim_timer); spin_lock_irqsave(&dev->hw_lock, flags); ene_rx_disable(dev); ene_rx_restore_hw_buffer(dev); @@ -1123,7 +1125,6 @@ static void ene_remove(struct pnp_dev *pnp_dev) free_irq(dev->irq, dev); release_region(dev->hw_io, ENE_IO_SIZE); - rc_unregister_device(dev->rdev); kfree(dev); } diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c index 7daac8bab83b02d2db1f52f394ac701971cc6125..6f3030b2054d07ca0731df6552e108e1c1d6e54f 100644 --- a/drivers/media/rc/iguanair.c +++ b/drivers/media/rc/iguanair.c @@ -424,6 +424,10 @@ static int iguanair_probe(struct usb_interface *intf, int ret, pipein, pipeout; struct usb_host_interface *idesc; + idesc = intf->altsetting; + if (idesc->desc.bNumEndpoints < 2) + return -ENODEV; + ir = kzalloc(sizeof(*ir), GFP_KERNEL); rc = rc_allocate_device(RC_DRIVER_IR_RAW); if (!ir || !rc) { @@ -438,18 +442,13 @@ static int iguanair_probe(struct usb_interface *intf, ir->urb_in = usb_alloc_urb(0, GFP_KERNEL); ir->urb_out = usb_alloc_urb(0, GFP_KERNEL); - if (!ir->buf_in || !ir->packet || !ir->urb_in || !ir->urb_out) { + if (!ir->buf_in || !ir->packet || !ir->urb_in || !ir->urb_out || + !usb_endpoint_is_int_in(&idesc->endpoint[0].desc) || + !usb_endpoint_is_int_out(&idesc->endpoint[1].desc)) { ret = -ENOMEM; goto out; } - idesc = intf->altsetting; - - if (idesc->desc.bNumEndpoints < 2) { - ret = -ENODEV; - goto out; - } - ir->rc = rc; ir->dev = &intf->dev; ir->udev = udev; diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c index 1041c056854d5035d2d6c6072ad37537334c2927..4dfaa791888cab7f1860637ad92e17aaace60d61 100644 --- a/drivers/media/rc/imon.c +++ b/drivers/media/rc/imon.c @@ -1607,8 +1607,7 @@ static void imon_incoming_packet(struct imon_context *ictx, spin_unlock_irqrestore(&ictx->kc_lock, flags); /* send touchscreen events through input subsystem if touchpad data */ - if (ictx->display_type == IMON_DISPLAY_TYPE_VGA && len == 8 && - buf[7] == 0x86) { + if (ictx->touch && len == 8 && buf[7] == 0x86) { imon_touch_event(ictx, buf); return; @@ -1835,12 +1834,17 @@ static void imon_get_ffdc_type(struct imon_context *ictx) break; /* iMON VFD, MCE IR */ case 0x46: - case 0x7e: case 0x9e: dev_info(ictx->dev, "0xffdc iMON VFD, MCE IR"); detected_display_type = IMON_DISPLAY_TYPE_VFD; allowed_protos = RC_PROTO_BIT_RC6_MCE; break; + /* iMON VFD, iMON or MCE IR */ + case 0x7e: + dev_info(ictx->dev, "0xffdc iMON VFD, iMON or MCE IR"); + detected_display_type = IMON_DISPLAY_TYPE_VFD; + allowed_protos |= RC_PROTO_BIT_RC6_MCE; + break; /* iMON LCD, MCE IR */ case 0x9f: dev_info(ictx->dev, "0xffdc iMON LCD, MCE IR"); @@ -2390,6 +2394,12 @@ static int imon_probe(struct usb_interface *interface, goto fail; } + if (first_if->dev.driver != interface->dev.driver) { + dev_err(&interface->dev, "inconsistent driver matching\n"); + ret = -EINVAL; + goto fail; + } + first_if_ctx = usb_get_intfdata(first_if); if (ifnum == 0) { diff --git a/drivers/media/rc/ir-rc6-decoder.c b/drivers/media/rc/ir-rc6-decoder.c index 68487ce9f79b618e213aaba9dcb0a5ae069baf15..d96aed1343e4208afef093e04383e549265657ab 100644 --- a/drivers/media/rc/ir-rc6-decoder.c +++ b/drivers/media/rc/ir-rc6-decoder.c @@ -40,6 +40,7 @@ #define RC6_6A_MCE_TOGGLE_MASK 0x8000 /* for the body bits */ #define RC6_6A_LCC_MASK 0xffff0000 /* RC6-6A-32 long customer code mask */ #define RC6_6A_MCE_CC 0x800f0000 /* MCE customer code */ +#define RC6_6A_KATHREIN_CC 0x80460000 /* Kathrein RCU-676 customer code */ #ifndef CHAR_BIT #define CHAR_BIT 8 /* Normally in */ #endif @@ -242,13 +243,17 @@ static int ir_rc6_decode(struct rc_dev *dev, struct ir_raw_event ev) toggle = 0; break; case 32: - if ((scancode & RC6_6A_LCC_MASK) == RC6_6A_MCE_CC) { + switch (scancode & RC6_6A_LCC_MASK) { + case RC6_6A_MCE_CC: + case RC6_6A_KATHREIN_CC: protocol = RC_PROTO_RC6_MCE; toggle = !!(scancode & RC6_6A_MCE_TOGGLE_MASK); scancode &= ~RC6_6A_MCE_TOGGLE_MASK; - } else { + break; + default: protocol = RC_PROTO_RC6_6A_32; toggle = 0; + break; } break; default: diff --git a/drivers/media/rc/ir-spi.c b/drivers/media/rc/ir-spi.c index 66334e8d63baa8c41fb4e4afe742aeb994704148..c58f2d38a4582ec3ab126bd471a1861b7e42f215 100644 --- a/drivers/media/rc/ir-spi.c +++ b/drivers/media/rc/ir-spi.c @@ -161,6 +161,7 @@ static const struct of_device_id ir_spi_of_match[] = { { .compatible = "ir-spi-led" }, {}, }; +MODULE_DEVICE_TABLE(of, ir_spi_of_match); static struct spi_driver ir_spi_driver = { .probe = ir_spi_probe, diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c index f862f1b7f99657c6cff3a816a68bc853d2d01033..71a7c7cdb1b83e569ab541e7d4bae27cb6001b9e 100644 --- a/drivers/media/rc/lirc_dev.c +++ b/drivers/media/rc/lirc_dev.c @@ -848,7 +848,7 @@ void __exit lirc_dev_exit(void) unregister_chrdev_region(lirc_base_dev, RC_DEV_MAX); } -struct rc_dev *rc_dev_get_from_fd(int fd) +struct rc_dev *rc_dev_get_from_fd(int fd, bool write) { struct fd f = fdget(fd); struct lirc_fh *fh; @@ -862,6 +862,9 @@ struct rc_dev *rc_dev_get_from_fd(int fd) return ERR_PTR(-EINVAL); } + if (write && !(f.file->f_mode & FMODE_WRITE)) + return ERR_PTR(-EPERM); + fh = f.file->private_data; dev = fh->rc; diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c index 4c0c8008872aed48b4dac05063eb4c8831f2283b..520138f268cbe34f927d502504a720e3a98c808a 100644 --- a/drivers/media/rc/mceusb.c +++ b/drivers/media/rc/mceusb.c @@ -42,21 +42,22 @@ #include #include -#define DRIVER_VERSION "1.94" +#define DRIVER_VERSION "1.95" #define DRIVER_AUTHOR "Jarod Wilson " #define DRIVER_DESC "Windows Media Center Ed. eHome Infrared Transceiver " \ "device driver" #define DRIVER_NAME "mceusb" +#define USB_TX_TIMEOUT 1000 /* in milliseconds */ #define USB_CTRL_MSG_SZ 2 /* Size of usb ctrl msg on gen1 hw */ #define MCE_G1_INIT_MSGS 40 /* Init messages on gen1 hw to throw out */ /* MCE constants */ -#define MCE_CMDBUF_SIZE 384 /* MCE Command buffer length */ +#define MCE_IRBUF_SIZE 128 /* TX IR buffer length */ #define MCE_TIME_UNIT 50 /* Approx 50us resolution */ -#define MCE_CODE_LENGTH 5 /* Normal length of packet (with header) */ -#define MCE_PACKET_SIZE 4 /* Normal length of packet (without header) */ -#define MCE_IRDATA_HEADER 0x84 /* Actual header format is 0x80 + num_bytes */ +#define MCE_PACKET_SIZE 31 /* Max length of packet (with header) */ +#define MCE_IRDATA_HEADER (0x80 + MCE_PACKET_SIZE - 1) + /* Actual format is 0x80 + num_bytes */ #define MCE_IRDATA_TRAILER 0x80 /* End of IR data */ #define MCE_MAX_CHANNELS 2 /* Two transmitters, hardware dependent? */ #define MCE_DEFAULT_TX_MASK 0x03 /* Vals: TX1=0x01, TX2=0x02, ALL=0x03 */ @@ -609,9 +610,9 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len, if (len <= skip) return; - dev_dbg(dev, "%cx data: %*ph (length=%d)", - (out ? 't' : 'r'), - min(len, buf_len - offset), buf + offset, len); + dev_dbg(dev, "%cx data[%d]: %*ph (len=%d sz=%d)", + (out ? 't' : 'r'), offset, + min(len, buf_len - offset), buf + offset, len, buf_len); inout = out ? "Request" : "Got"; @@ -733,6 +734,9 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len, case MCE_RSP_CMD_ILLEGAL: dev_dbg(dev, "Illegal PORT_IR command"); break; + case MCE_RSP_TX_TIMEOUT: + dev_dbg(dev, "IR TX timeout (TX buffer underrun)"); + break; default: dev_dbg(dev, "Unknown command 0x%02x 0x%02x", cmd, subcmd); @@ -747,13 +751,14 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len, dev_dbg(dev, "End of raw IR data"); else if ((cmd != MCE_CMD_PORT_IR) && ((cmd & MCE_PORT_MASK) == MCE_COMMAND_IRDATA)) - dev_dbg(dev, "Raw IR data, %d pulse/space samples", ir->rem); + dev_dbg(dev, "Raw IR data, %d pulse/space samples", + cmd & MCE_PACKET_LENGTH_MASK); #endif } /* * Schedule work that can't be done in interrupt handlers - * (mceusb_dev_recv() and mce_async_callback()) nor tasklets. + * (mceusb_dev_recv() and mce_write_callback()) nor tasklets. * Invokes mceusb_deferred_kevent() for recovering from * error events specified by the kevent bit field. */ @@ -766,23 +771,80 @@ static void mceusb_defer_kevent(struct mceusb_dev *ir, int kevent) dev_dbg(ir->dev, "kevent %d scheduled", kevent); } -static void mce_async_callback(struct urb *urb) +static void mce_write_callback(struct urb *urb) { - struct mceusb_dev *ir; - int len; - if (!urb) return; - ir = urb->context; + complete(urb->context); +} + +/* + * Write (TX/send) data to MCE device USB endpoint out. + * Used for IR blaster TX and MCE device commands. + * + * Return: The number of bytes written (> 0) or errno (< 0). + */ +static int mce_write(struct mceusb_dev *ir, u8 *data, int size) +{ + int ret; + struct urb *urb; + struct device *dev = ir->dev; + unsigned char *buf_out; + struct completion tx_done; + unsigned long expire; + unsigned long ret_wait; + + mceusb_dev_printdata(ir, data, size, 0, size, true); + + urb = usb_alloc_urb(0, GFP_KERNEL); + if (unlikely(!urb)) { + dev_err(dev, "Error: mce write couldn't allocate urb"); + return -ENOMEM; + } + + buf_out = kmalloc(size, GFP_KERNEL); + if (!buf_out) { + usb_free_urb(urb); + return -ENOMEM; + } + + init_completion(&tx_done); + + /* outbound data */ + if (usb_endpoint_xfer_int(ir->usb_ep_out)) + usb_fill_int_urb(urb, ir->usbdev, ir->pipe_out, + buf_out, size, mce_write_callback, &tx_done, + ir->usb_ep_out->bInterval); + else + usb_fill_bulk_urb(urb, ir->usbdev, ir->pipe_out, + buf_out, size, mce_write_callback, &tx_done); + memcpy(buf_out, data, size); + + ret = usb_submit_urb(urb, GFP_KERNEL); + if (ret) { + dev_err(dev, "Error: mce write submit urb error = %d", ret); + kfree(buf_out); + usb_free_urb(urb); + return ret; + } + + expire = msecs_to_jiffies(USB_TX_TIMEOUT); + ret_wait = wait_for_completion_timeout(&tx_done, expire); + if (!ret_wait) { + dev_err(dev, "Error: mce write timed out (expire = %lu (%dms))", + expire, USB_TX_TIMEOUT); + usb_kill_urb(urb); + ret = (urb->status == -ENOENT ? -ETIMEDOUT : urb->status); + } else { + ret = urb->status; + } + if (ret >= 0) + ret = urb->actual_length; /* bytes written */ switch (urb->status) { /* success */ case 0: - len = urb->actual_length; - - mceusb_dev_printdata(ir, urb->transfer_buffer, len, - 0, len, true); break; case -ECONNRESET: @@ -792,140 +854,135 @@ static void mce_async_callback(struct urb *urb) break; case -EPIPE: - dev_err(ir->dev, "Error: request urb status = %d (TX HALT)", + dev_err(ir->dev, "Error: mce write urb status = %d (TX HALT)", urb->status); mceusb_defer_kevent(ir, EVENT_TX_HALT); break; default: - dev_err(ir->dev, "Error: request urb status = %d", urb->status); + dev_err(ir->dev, "Error: mce write urb status = %d", + urb->status); break; } - /* the transfer buffer and urb were allocated in mce_request_packet */ - kfree(urb->transfer_buffer); - usb_free_urb(urb); -} + dev_dbg(dev, "tx done status = %d (wait = %lu, expire = %lu (%dms), urb->actual_length = %d, urb->status = %d)", + ret, ret_wait, expire, USB_TX_TIMEOUT, + urb->actual_length, urb->status); -/* request outgoing (send) usb packet - used to initialize remote */ -static void mce_request_packet(struct mceusb_dev *ir, unsigned char *data, - int size) -{ - int res; - struct urb *async_urb; - struct device *dev = ir->dev; - unsigned char *async_buf; - - async_urb = usb_alloc_urb(0, GFP_KERNEL); - if (unlikely(!async_urb)) { - dev_err(dev, "Error, couldn't allocate urb!"); - return; - } - - async_buf = kmalloc(size, GFP_KERNEL); - if (!async_buf) { - usb_free_urb(async_urb); - return; - } - - /* outbound data */ - if (usb_endpoint_xfer_int(ir->usb_ep_out)) - usb_fill_int_urb(async_urb, ir->usbdev, ir->pipe_out, - async_buf, size, mce_async_callback, ir, - ir->usb_ep_out->bInterval); - else - usb_fill_bulk_urb(async_urb, ir->usbdev, ir->pipe_out, - async_buf, size, mce_async_callback, ir); - - memcpy(async_buf, data, size); - - dev_dbg(dev, "send request called (size=%#x)", size); + kfree(buf_out); + usb_free_urb(urb); - res = usb_submit_urb(async_urb, GFP_ATOMIC); - if (res) { - dev_err(dev, "send request FAILED! (res=%d)", res); - kfree(async_buf); - usb_free_urb(async_urb); - return; - } - dev_dbg(dev, "send request complete (res=%d)", res); + return ret; } -static void mce_async_out(struct mceusb_dev *ir, unsigned char *data, int size) +static void mce_command_out(struct mceusb_dev *ir, u8 *data, int size) { int rsize = sizeof(DEVICE_RESUME); if (ir->need_reset) { ir->need_reset = false; - mce_request_packet(ir, DEVICE_RESUME, rsize); + mce_write(ir, DEVICE_RESUME, rsize); msleep(10); } - mce_request_packet(ir, data, size); + mce_write(ir, data, size); msleep(10); } -/* Send data out the IR blaster port(s) */ +/* + * Transmit IR out the MCE device IR blaster port(s). + * + * Convert IR pulse/space sequence from LIRC to MCE format. + * Break up a long IR sequence into multiple parts (MCE IR data packets). + * + * u32 txbuf[] consists of IR pulse, space, ..., and pulse times in usec. + * Pulses and spaces are implicit by their position. + * The first IR sample, txbuf[0], is always a pulse. + * + * u8 irbuf[] consists of multiple IR data packets for the MCE device. + * A packet is 1 u8 MCE_IRDATA_HEADER and up to 30 u8 IR samples. + * An IR sample is 1-bit pulse/space flag with 7-bit time + * in MCE time units (50usec). + * + * Return: The number of IR samples sent (> 0) or errno (< 0). + */ static int mceusb_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned count) { struct mceusb_dev *ir = dev->priv; - int i, length, ret = 0; - int cmdcount = 0; - unsigned char cmdbuf[MCE_CMDBUF_SIZE]; - - /* MCE tx init header */ - cmdbuf[cmdcount++] = MCE_CMD_PORT_IR; - cmdbuf[cmdcount++] = MCE_CMD_SETIRTXPORTS; - cmdbuf[cmdcount++] = ir->tx_mask; + u8 cmdbuf[3] = { MCE_CMD_PORT_IR, MCE_CMD_SETIRTXPORTS, 0x00 }; + u8 irbuf[MCE_IRBUF_SIZE]; + int ircount = 0; + unsigned int irsample; + int i, length, ret; /* Send the set TX ports command */ - mce_async_out(ir, cmdbuf, cmdcount); - cmdcount = 0; - - /* Generate mce packet data */ - for (i = 0; (i < count) && (cmdcount < MCE_CMDBUF_SIZE); i++) { - txbuf[i] = txbuf[i] / MCE_TIME_UNIT; - - do { /* loop to support long pulses/spaces > 127*50us=6.35ms */ - - /* Insert mce packet header every 4th entry */ - if ((cmdcount < MCE_CMDBUF_SIZE) && - (cmdcount % MCE_CODE_LENGTH) == 0) - cmdbuf[cmdcount++] = MCE_IRDATA_HEADER; - - /* Insert mce packet data */ - if (cmdcount < MCE_CMDBUF_SIZE) - cmdbuf[cmdcount++] = - (txbuf[i] < MCE_PULSE_BIT ? - txbuf[i] : MCE_MAX_PULSE_LENGTH) | - (i & 1 ? 0x00 : MCE_PULSE_BIT); - else { - ret = -EINVAL; - goto out; + cmdbuf[2] = ir->tx_mask; + mce_command_out(ir, cmdbuf, sizeof(cmdbuf)); + + /* Generate mce IR data packet */ + for (i = 0; i < count; i++) { + irsample = txbuf[i] / MCE_TIME_UNIT; + + /* loop to support long pulses/spaces > 6350us (127*50us) */ + while (irsample > 0) { + /* Insert IR header every 30th entry */ + if (ircount % MCE_PACKET_SIZE == 0) { + /* Room for IR header and one IR sample? */ + if (ircount >= MCE_IRBUF_SIZE - 1) { + /* Send near full buffer */ + ret = mce_write(ir, irbuf, ircount); + if (ret < 0) + return ret; + ircount = 0; + } + irbuf[ircount++] = MCE_IRDATA_HEADER; } - } while ((txbuf[i] > MCE_MAX_PULSE_LENGTH) && - (txbuf[i] -= MCE_MAX_PULSE_LENGTH)); - } - - /* Check if we have room for the empty packet at the end */ - if (cmdcount >= MCE_CMDBUF_SIZE) { - ret = -EINVAL; - goto out; - } + /* Insert IR sample */ + if (irsample <= MCE_MAX_PULSE_LENGTH) { + irbuf[ircount] = irsample; + irsample = 0; + } else { + irbuf[ircount] = MCE_MAX_PULSE_LENGTH; + irsample -= MCE_MAX_PULSE_LENGTH; + } + /* + * Even i = IR pulse + * Odd i = IR space + */ + irbuf[ircount] |= (i & 1 ? 0 : MCE_PULSE_BIT); + ircount++; + + /* IR buffer full? */ + if (ircount >= MCE_IRBUF_SIZE) { + /* Fix packet length in last header */ + length = ircount % MCE_PACKET_SIZE; + if (length > 0) + irbuf[ircount - length] -= + MCE_PACKET_SIZE - length; + /* Send full buffer */ + ret = mce_write(ir, irbuf, ircount); + if (ret < 0) + return ret; + ircount = 0; + } + } + } /* after for loop, 0 <= ircount < MCE_IRBUF_SIZE */ /* Fix packet length in last header */ - length = cmdcount % MCE_CODE_LENGTH; - cmdbuf[cmdcount - length] -= MCE_CODE_LENGTH - length; + length = ircount % MCE_PACKET_SIZE; + if (length > 0) + irbuf[ircount - length] -= MCE_PACKET_SIZE - length; - /* All mce commands end with an empty packet (0x80) */ - cmdbuf[cmdcount++] = MCE_IRDATA_TRAILER; + /* Append IR trailer (0x80) to final partial (or empty) IR buffer */ + irbuf[ircount++] = MCE_IRDATA_TRAILER; - /* Transmit the command to the mce device */ - mce_async_out(ir, cmdbuf, cmdcount); + /* Send final buffer */ + ret = mce_write(ir, irbuf, ircount); + if (ret < 0) + return ret; -out: - return ret ? ret : count; + return count; } /* Sets active IR outputs -- mce devices typically have two */ @@ -965,7 +1022,7 @@ static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier) cmdbuf[2] = MCE_CMD_SIG_END; cmdbuf[3] = MCE_IRDATA_TRAILER; dev_dbg(ir->dev, "disabling carrier modulation"); - mce_async_out(ir, cmdbuf, sizeof(cmdbuf)); + mce_command_out(ir, cmdbuf, sizeof(cmdbuf)); return 0; } @@ -979,7 +1036,7 @@ static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier) carrier); /* Transmit new carrier to mce device */ - mce_async_out(ir, cmdbuf, sizeof(cmdbuf)); + mce_command_out(ir, cmdbuf, sizeof(cmdbuf)); return 0; } } @@ -1002,10 +1059,10 @@ static int mceusb_set_timeout(struct rc_dev *dev, unsigned int timeout) cmdbuf[2] = units >> 8; cmdbuf[3] = units; - mce_async_out(ir, cmdbuf, sizeof(cmdbuf)); + mce_command_out(ir, cmdbuf, sizeof(cmdbuf)); /* get receiver timeout value */ - mce_async_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT)); + mce_command_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT)); return 0; } @@ -1030,7 +1087,7 @@ static int mceusb_set_rx_wideband(struct rc_dev *dev, int enable) ir->wideband_rx_enabled = false; cmdbuf[2] = 1; /* port 1 is long range receiver */ } - mce_async_out(ir, cmdbuf, sizeof(cmdbuf)); + mce_command_out(ir, cmdbuf, sizeof(cmdbuf)); /* response from device sets ir->learning_active */ return 0; @@ -1053,7 +1110,7 @@ static int mceusb_set_rx_carrier_report(struct rc_dev *dev, int enable) ir->carrier_report_enabled = true; if (!ir->learning_active) { cmdbuf[2] = 2; /* port 2 is short range receiver */ - mce_async_out(ir, cmdbuf, sizeof(cmdbuf)); + mce_command_out(ir, cmdbuf, sizeof(cmdbuf)); } } else { ir->carrier_report_enabled = false; @@ -1064,7 +1121,7 @@ static int mceusb_set_rx_carrier_report(struct rc_dev *dev, int enable) */ if (ir->learning_active && !ir->wideband_rx_enabled) { cmdbuf[2] = 1; /* port 1 is long range receiver */ - mce_async_out(ir, cmdbuf, sizeof(cmdbuf)); + mce_command_out(ir, cmdbuf, sizeof(cmdbuf)); } } @@ -1143,6 +1200,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index) } break; case MCE_RSP_CMD_ILLEGAL: + case MCE_RSP_TX_TIMEOUT: ir->need_reset = true; break; default: @@ -1280,70 +1338,63 @@ static void mceusb_get_emulator_version(struct mceusb_dev *ir) { /* If we get no reply or an illegal command reply, its ver 1, says MS */ ir->emver = 1; - mce_async_out(ir, GET_EMVER, sizeof(GET_EMVER)); + mce_command_out(ir, GET_EMVER, sizeof(GET_EMVER)); } static void mceusb_gen1_init(struct mceusb_dev *ir) { int ret; struct device *dev = ir->dev; - char *data; - - data = kzalloc(USB_CTRL_MSG_SZ, GFP_KERNEL); - if (!data) { - dev_err(dev, "%s: memory allocation failed!", __func__); - return; - } + char data[USB_CTRL_MSG_SZ]; /* * This is a strange one. Windows issues a set address to the device * on the receive control pipe and expect a certain value pair back */ - ret = usb_control_msg(ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0), - USB_REQ_SET_ADDRESS, USB_TYPE_VENDOR, 0, 0, - data, USB_CTRL_MSG_SZ, HZ * 3); + ret = usb_control_msg_recv(ir->usbdev, 0, USB_REQ_SET_ADDRESS, + USB_DIR_IN | USB_TYPE_VENDOR, + 0, 0, data, USB_CTRL_MSG_SZ, 3000, + GFP_KERNEL); dev_dbg(dev, "set address - ret = %d", ret); dev_dbg(dev, "set address - data[0] = %d, data[1] = %d", data[0], data[1]); /* set feature: bit rate 38400 bps */ - ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0), - USB_REQ_SET_FEATURE, USB_TYPE_VENDOR, - 0xc04e, 0x0000, NULL, 0, HZ * 3); + ret = usb_control_msg_send(ir->usbdev, 0, + USB_REQ_SET_FEATURE, USB_TYPE_VENDOR, + 0xc04e, 0x0000, NULL, 0, 3000, GFP_KERNEL); dev_dbg(dev, "set feature - ret = %d", ret); /* bRequest 4: set char length to 8 bits */ - ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0), - 4, USB_TYPE_VENDOR, - 0x0808, 0x0000, NULL, 0, HZ * 3); + ret = usb_control_msg_send(ir->usbdev, 0, + 4, USB_TYPE_VENDOR, + 0x0808, 0x0000, NULL, 0, 3000, GFP_KERNEL); dev_dbg(dev, "set char length - retB = %d", ret); /* bRequest 2: set handshaking to use DTR/DSR */ - ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0), - 2, USB_TYPE_VENDOR, - 0x0000, 0x0100, NULL, 0, HZ * 3); + ret = usb_control_msg_send(ir->usbdev, 0, + 2, USB_TYPE_VENDOR, + 0x0000, 0x0100, NULL, 0, 3000, GFP_KERNEL); dev_dbg(dev, "set handshake - retC = %d", ret); /* device resume */ - mce_async_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME)); + mce_command_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME)); /* get hw/sw revision? */ - mce_async_out(ir, GET_REVISION, sizeof(GET_REVISION)); - - kfree(data); + mce_command_out(ir, GET_REVISION, sizeof(GET_REVISION)); } static void mceusb_gen2_init(struct mceusb_dev *ir) { /* device resume */ - mce_async_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME)); + mce_command_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME)); /* get wake version (protocol, key, address) */ - mce_async_out(ir, GET_WAKEVERSION, sizeof(GET_WAKEVERSION)); + mce_command_out(ir, GET_WAKEVERSION, sizeof(GET_WAKEVERSION)); /* unknown what this one actually returns... */ - mce_async_out(ir, GET_UNKNOWN2, sizeof(GET_UNKNOWN2)); + mce_command_out(ir, GET_UNKNOWN2, sizeof(GET_UNKNOWN2)); } static void mceusb_get_parameters(struct mceusb_dev *ir) @@ -1357,24 +1408,24 @@ static void mceusb_get_parameters(struct mceusb_dev *ir) ir->num_rxports = 2; /* get number of tx and rx ports */ - mce_async_out(ir, GET_NUM_PORTS, sizeof(GET_NUM_PORTS)); + mce_command_out(ir, GET_NUM_PORTS, sizeof(GET_NUM_PORTS)); /* get the carrier and frequency */ - mce_async_out(ir, GET_CARRIER_FREQ, sizeof(GET_CARRIER_FREQ)); + mce_command_out(ir, GET_CARRIER_FREQ, sizeof(GET_CARRIER_FREQ)); if (ir->num_txports && !ir->flags.no_tx) /* get the transmitter bitmask */ - mce_async_out(ir, GET_TX_BITMASK, sizeof(GET_TX_BITMASK)); + mce_command_out(ir, GET_TX_BITMASK, sizeof(GET_TX_BITMASK)); /* get receiver timeout value */ - mce_async_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT)); + mce_command_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT)); /* get receiver sensor setting */ - mce_async_out(ir, GET_RX_SENSOR, sizeof(GET_RX_SENSOR)); + mce_command_out(ir, GET_RX_SENSOR, sizeof(GET_RX_SENSOR)); for (i = 0; i < ir->num_txports; i++) { cmdbuf[2] = i; - mce_async_out(ir, cmdbuf, sizeof(cmdbuf)); + mce_command_out(ir, cmdbuf, sizeof(cmdbuf)); } } @@ -1383,7 +1434,7 @@ static void mceusb_flash_led(struct mceusb_dev *ir) if (ir->emver < 2) return; - mce_async_out(ir, FLASH_LED, sizeof(FLASH_LED)); + mce_command_out(ir, FLASH_LED, sizeof(FLASH_LED)); } /* diff --git a/drivers/media/rc/mtk-cir.c b/drivers/media/rc/mtk-cir.c index e42efd9d382ec4290413e606b6f6040c28805cfe..d37b85d2bc750c0a3a5283b63c310c0663e33b02 100644 --- a/drivers/media/rc/mtk-cir.c +++ b/drivers/media/rc/mtk-cir.c @@ -44,6 +44,11 @@ /* Fields containing pulse width data */ #define MTK_WIDTH_MASK (GENMASK(7, 0)) +/* IR threshold */ +#define MTK_IRTHD 0x14 +#define MTK_DG_CNT_MASK (GENMASK(12, 8)) +#define MTK_DG_CNT(x) ((x) << 8) + /* Bit to enable interrupt */ #define MTK_IRINT_EN BIT(0) @@ -409,6 +414,9 @@ static int mtk_ir_probe(struct platform_device *pdev) mtk_w32_mask(ir, val, ir->data->fields[MTK_HW_PERIOD].mask, ir->data->fields[MTK_HW_PERIOD].reg); + /* Set de-glitch counter */ + mtk_w32_mask(ir, MTK_DG_CNT(1), MTK_DG_CNT_MASK, MTK_IRTHD); + /* Enable IR and PWM */ val = mtk_r32(ir, MTK_CONFIG_HIGH_REG); val |= MTK_OK_COUNT(ir->data->ok_count) | MTK_PWM_EN | MTK_IR_EN; diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h index e847bdad5c51ca3e65e8d7961f9a298a4c30f49c..8056be86f55f0fe28325d51dc211429c88838c2d 100644 --- a/drivers/media/rc/rc-core-priv.h +++ b/drivers/media/rc/rc-core-priv.h @@ -303,7 +303,7 @@ void ir_lirc_raw_event(struct rc_dev *dev, struct ir_raw_event ev); void ir_lirc_scancode_event(struct rc_dev *dev, struct lirc_scancode *lsc); int ir_lirc_register(struct rc_dev *dev); void ir_lirc_unregister(struct rc_dev *dev); -struct rc_dev *rc_dev_get_from_fd(int fd); +struct rc_dev *rc_dev_get_from_fd(int fd, bool write); #else static inline int lirc_dev_init(void) { return 0; } static inline void lirc_dev_exit(void) {} diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index ca68e1d2b2f989cf366bdecc999db5e388d50e11..0f218afdadaa51dca3fa7a9b5edc99a54ae8d1f4 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c @@ -707,7 +707,8 @@ void rc_repeat(struct rc_dev *dev) (dev->last_toggle ? LIRC_SCANCODE_FLAG_TOGGLE : 0) }; - ir_lirc_scancode_event(dev, &sc); + if (dev->allowed_protocols != RC_PROTO_BIT_CEC) + ir_lirc_scancode_event(dev, &sc); spin_lock_irqsave(&dev->keylock, flags); @@ -747,7 +748,8 @@ static void ir_do_keydown(struct rc_dev *dev, enum rc_proto protocol, .keycode = keycode }; - ir_lirc_scancode_event(dev, &sc); + if (dev->allowed_protocols != RC_PROTO_BIT_CEC) + ir_lirc_scancode_event(dev, &sc); if (new_event && dev->keypressed) ir_do_keyup(dev, false); @@ -1954,6 +1956,8 @@ void rc_unregister_device(struct rc_dev *dev) rc_free_rx_device(dev); mutex_lock(&dev->lock); + if (dev->users && dev->close) + dev->close(dev); dev->registered = false; mutex_unlock(&dev->lock); diff --git a/drivers/media/rc/serial_ir.c b/drivers/media/rc/serial_ir.c index 8bf5637b3a69aca4a2fbf9ba4fe4f74c449fd233..e613c0175591ec4e175f692748927eba71e48329 100644 --- a/drivers/media/rc/serial_ir.c +++ b/drivers/media/rc/serial_ir.c @@ -773,8 +773,6 @@ static void serial_ir_exit(void) static int __init serial_ir_init_module(void) { - int result; - switch (type) { case IR_HOMEBREW: case IR_IRDEO: @@ -802,12 +800,7 @@ static int __init serial_ir_init_module(void) if (sense != -1) sense = !!sense; - result = serial_ir_init(); - if (!result) - return 0; - - serial_ir_exit(); - return result; + return serial_ir_init(); } static void __exit serial_ir_exit_module(void) diff --git a/drivers/media/spi/cxd2880-spi.c b/drivers/media/spi/cxd2880-spi.c index 11ce5101e19f6b71ed703ea9cd8010e7ae1b6967..c43730977f53c0bd7caa28fa58a1ce67018ce082 100644 --- a/drivers/media/spi/cxd2880-spi.c +++ b/drivers/media/spi/cxd2880-spi.c @@ -536,6 +536,7 @@ cxd2880_spi_probe(struct spi_device *spi) if (!dvb_attach(cxd2880_attach, &dvb_spi->dvb_fe, &config)) { pr_err("cxd2880_attach failed\n"); + ret = -ENODEV; goto fail_attach; } diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c index aa6861dcd3fd78fd0df75c358b1149564564371a..eb74cb8f259a9d2617de216761f125a7e2f09611 100644 --- a/drivers/media/tuners/tuner-xc2028.c +++ b/drivers/media/tuners/tuner-xc2028.c @@ -1362,9 +1362,16 @@ static void load_firmware_cb(const struct firmware *fw, void *context) { struct dvb_frontend *fe = context; - struct xc2028_data *priv = fe->tuner_priv; + struct xc2028_data *priv; int rc; + if (!fe) { + pr_warn("xc2028: No frontend in %s\n", __func__); + return; + } + + priv = fe->tuner_priv; + tuner_dbg("request_firmware_nowait(): %s\n", fw ? "OK" : "error"); if (!fw) { tuner_err("Could not load firmware %s.\n", priv->fname); diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c index eb6d65dae74868cce98e12de25c8315f1dcdaf24..d32845be94e50e8d814e6af0d95f89163e0aa00b 100644 --- a/drivers/media/tuners/xc4000.c +++ b/drivers/media/tuners/xc4000.c @@ -1527,10 +1527,10 @@ static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq) { struct xc4000_priv *priv = fe->tuner_priv; + mutex_lock(&priv->lock); *freq = priv->freq_hz + priv->freq_offset; if (debug) { - mutex_lock(&priv->lock); if ((priv->cur_fw.type & (BASE | FM | DTV6 | DTV7 | DTV78 | DTV8)) == BASE) { u16 snr = 0; @@ -1541,8 +1541,8 @@ static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq) return 0; } } - mutex_unlock(&priv->lock); } + mutex_unlock(&priv->lock); dprintk(1, "%s()\n", __func__); diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c index cd363a2100d453c6b5be15b35430907acd44cdde..07e3322bb1827cd75568cd2f0e6dd9088e03298e 100644 --- a/drivers/media/usb/au0828/au0828-core.c +++ b/drivers/media/usb/au0828/au0828-core.c @@ -623,31 +623,30 @@ static int au0828_usb_probe(struct usb_interface *interface, /* Setup */ au0828_card_setup(dev); + /* + * Store the pointer to the au0828_dev so it can be accessed in + * au0828_usb_disconnect + */ + usb_set_intfdata(interface, dev); + /* Analog TV */ retval = au0828_analog_register(dev, interface); if (retval) { - pr_err("%s() au0282_dev_register failed to register on V4L2\n", + pr_err("%s() au0828_analog_register failed to register on V4L2\n", __func__); mutex_unlock(&dev->lock); - kfree(dev); goto done; } /* Digital TV */ retval = au0828_dvb_register(dev); if (retval) - pr_err("%s() au0282_dev_register failed\n", + pr_err("%s() au0828_dvb_register failed\n", __func__); /* Remote controller */ au0828_rc_register(dev); - /* - * Store the pointer to the au0828_dev so it can be accessed in - * au0828_usb_disconnect - */ - usb_set_intfdata(interface, dev); - pr_info("Registered device AU0828 [%s]\n", dev->board.name == NULL ? "Unset" : dev->board.name); diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c index 62b45062b1e685ebdc3dd271c439c57d78a90812..3e111f7f56dfeecee01350866c325ba5bcf55874 100644 --- a/drivers/media/usb/au0828/au0828-video.c +++ b/drivers/media/usb/au0828/au0828-video.c @@ -758,6 +758,9 @@ static int au0828_analog_stream_enable(struct au0828_dev *d) dprintk(1, "au0828_analog_stream_enable called\n"); + if (test_bit(DEV_DISCONNECTED, &d->dev_state)) + return -ENODEV; + iface = usb_ifnum_to_if(d->usbdev, 0); if (iface && iface->cur_altsetting->desc.bAlternateSetting != 5) { dprintk(1, "Changing intf#0 to alt 5\n"); @@ -839,9 +842,9 @@ int au0828_start_analog_streaming(struct vb2_queue *vq, unsigned int count) return rc; } + v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 1); + if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { - v4l2_device_call_all(&dev->v4l2_dev, 0, video, - s_stream, 1); dev->vid_timeout_running = 1; mod_timer(&dev->vid_timeout, jiffies + (HZ / 10)); } else if (vq->type == V4L2_BUF_TYPE_VBI_CAPTURE) { @@ -861,10 +864,11 @@ static void au0828_stop_streaming(struct vb2_queue *vq) dprintk(1, "au0828_stop_streaming called %d\n", dev->streaming_users); - if (dev->streaming_users-- == 1) + if (dev->streaming_users-- == 1) { au0828_uninit_isoc(dev); + v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0); + } - v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0); dev->vid_timeout_running = 0; del_timer_sync(&dev->vid_timeout); @@ -893,8 +897,10 @@ void au0828_stop_vbi_streaming(struct vb2_queue *vq) dprintk(1, "au0828_stop_vbi_streaming called %d\n", dev->streaming_users); - if (dev->streaming_users-- == 1) + if (dev->streaming_users-- == 1) { au0828_uninit_isoc(dev); + v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0); + } spin_lock_irqsave(&dev->slock, flags); if (dev->isoc_ctl.vbi_buf != NULL) { diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c index a8f3169e30b31b278a45665805c565627134cdeb..427cda457af68e49e09cfaff4a927ddc80ab5292 100644 --- a/drivers/media/usb/b2c2/flexcop-usb.c +++ b/drivers/media/usb/b2c2/flexcop-usb.c @@ -294,7 +294,7 @@ static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c, mutex_unlock(&fc_usb->data_mutex); - return 0; + return ret; } /* actual bus specific access functions, @@ -503,7 +503,13 @@ static int flexcop_usb_transfer_init(struct flexcop_usb *fc_usb) static int flexcop_usb_init(struct flexcop_usb *fc_usb) { /* use the alternate setting with the larges buffer */ - usb_set_interface(fc_usb->udev,0,1); + int ret = usb_set_interface(fc_usb->udev, 0, 1); + + if (ret) { + err("set interface failed."); + return ret; + } + switch (fc_usb->udev->speed) { case USB_SPEED_LOW: err("cannot handle USB speed because it is too slow."); @@ -537,6 +543,9 @@ static int flexcop_usb_probe(struct usb_interface *intf, struct flexcop_device *fc = NULL; int ret; + if (intf->cur_altsetting->desc.bNumEndpoints < 1) + return -ENODEV; + if ((fc = flexcop_device_kmalloc(sizeof(struct flexcop_usb))) == NULL) { err("out of memory\n"); return -ENOMEM; diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c index a771e0a52610c84e492f1bafccfebbfe9f73e7c5..4c191fcd3a7f55be1c7306bdfbb8b87d97e1ca36 100644 --- a/drivers/media/usb/cpia2/cpia2_usb.c +++ b/drivers/media/usb/cpia2/cpia2_usb.c @@ -685,6 +685,10 @@ static int submit_urbs(struct camera_data *cam) if (!urb) { for (j = 0; j < i; j++) usb_free_urb(cam->sbuf[j].urb); + for (j = 0; j < NUM_SBUF; j++) { + kfree(cam->sbuf[j].data); + cam->sbuf[j].data = NULL; + } return -ENOMEM; } @@ -902,7 +906,6 @@ static void cpia2_usb_disconnect(struct usb_interface *intf) cpia2_unregister_camera(cam); v4l2_device_disconnect(&cam->v4l2_dev); mutex_unlock(&cam->v4l2_lock); - v4l2_device_put(&cam->v4l2_dev); if(cam->buffers) { DBG("Wakeup waiting processes\n"); @@ -911,6 +914,8 @@ static void cpia2_usb_disconnect(struct usb_interface *intf) wake_up_interruptible(&cam->wq_stream); } + v4l2_device_put(&cam->v4l2_dev); + LOG("CPiA2 camera disconnected.\n"); } diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c index 99f106b13280f1f6d16685f69b8b8c7d1d316d11..d47318958fe566e0fcafb268350146c225e189f4 100644 --- a/drivers/media/usb/cpia2/cpia2_v4l.c +++ b/drivers/media/usb/cpia2/cpia2_v4l.c @@ -1244,8 +1244,7 @@ static int __init cpia2_init(void) LOG("%s v%s\n", ABOUT, CPIA_VERSION); check_parameters(); - cpia2_usb_init(); - return 0; + return cpia2_usb_init(); } diff --git a/drivers/media/usb/cx231xx/Kconfig b/drivers/media/usb/cx231xx/Kconfig index 9e5b3e7c3ef524b989ec84601c80c37b306464f9..dc3322afb4c52f628281da8d188241c6dfc38421 100644 --- a/drivers/media/usb/cx231xx/Kconfig +++ b/drivers/media/usb/cx231xx/Kconfig @@ -1,6 +1,7 @@ config VIDEO_CX231XX tristate "Conexant cx231xx USB video capture support" depends on VIDEO_DEV && I2C && I2C_MUX + depends on BROKEN select VIDEO_TUNER select VIDEO_TVEEPROM select VIDEOBUF_VMALLOC diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c index f7fcd733a2ca8de24360a30fc5e08a6d80902589..963739fa86718f296d5dba4afacc73593fa5b441 100644 --- a/drivers/media/usb/cx231xx/cx231xx-video.c +++ b/drivers/media/usb/cx231xx/cx231xx-video.c @@ -1389,7 +1389,7 @@ int cx231xx_g_register(struct file *file, void *priv, ret = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, (u16)reg->reg, value, 4); reg->val = value[0] | value[1] << 8 | - value[2] << 16 | value[3] << 24; + value[2] << 16 | (u32)value[3] << 24; reg->size = 4; break; case 1: /* AFE - read byte */ diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c index 1f6c1eefe38920c9f1e08d1f99158db7e715c838..2ed29a99fee1e962b0f293e5b38a34066445d2ce 100644 --- a/drivers/media/usb/dvb-usb-v2/af9035.c +++ b/drivers/media/usb/dvb-usb-v2/af9035.c @@ -284,6 +284,7 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap, struct dvb_usb_device *d = i2c_get_adapdata(adap); struct state *state = d_to_priv(d); int ret; + u32 reg; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; @@ -336,8 +337,12 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap, ret = -EOPNOTSUPP; } else if ((msg[0].addr == state->af9033_i2c_addr[0]) || (msg[0].addr == state->af9033_i2c_addr[1])) { + if (msg[0].len < 3 || msg[1].len < 1) { + ret = -EOPNOTSUPP; + goto unlock; + } /* demod access via firmware interface */ - u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 | + reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 | msg[0].buf[2]; if (msg[0].addr == state->af9033_i2c_addr[1]) @@ -395,17 +400,18 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap, ret = -EOPNOTSUPP; } else if ((msg[0].addr == state->af9033_i2c_addr[0]) || (msg[0].addr == state->af9033_i2c_addr[1])) { + if (msg[0].len < 3) { + ret = -EOPNOTSUPP; + goto unlock; + } /* demod access via firmware interface */ - u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 | + reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 | msg[0].buf[2]; if (msg[0].addr == state->af9033_i2c_addr[1]) reg |= 0x100000; - ret = (msg[0].len >= 3) ? af9035_wr_regs(d, reg, - &msg[0].buf[3], - msg[0].len - 3) - : -EOPNOTSUPP; + ret = af9035_wr_regs(d, reg, &msg[0].buf[3], msg[0].len - 3); } else { /* I2C write */ u8 buf[MAX_XFER_SIZE]; @@ -472,6 +478,7 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap, ret = -EOPNOTSUPP; } +unlock: mutex_unlock(&d->i2c_mutex); if (ret < 0) diff --git a/drivers/media/usb/dvb-usb-v2/dvbsky.c b/drivers/media/usb/dvb-usb-v2/dvbsky.c index 1aa88d94e57f5e54c96c097c86d1f9c91e82608b..ae0814dd202a6f344b31acc209b80b348ba4b80d 100644 --- a/drivers/media/usb/dvb-usb-v2/dvbsky.c +++ b/drivers/media/usb/dvb-usb-v2/dvbsky.c @@ -31,6 +31,7 @@ MODULE_PARM_DESC(disable_rc, "Disable inbuilt IR receiver."); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); struct dvbsky_state { + struct mutex stream_mutex; u8 ibuf[DVBSKY_BUF_LEN]; u8 obuf[DVBSKY_BUF_LEN]; u8 last_lock; @@ -67,17 +68,18 @@ static int dvbsky_usb_generic_rw(struct dvb_usb_device *d, static int dvbsky_stream_ctrl(struct dvb_usb_device *d, u8 onoff) { + struct dvbsky_state *state = d_to_priv(d); int ret; - static u8 obuf_pre[3] = { 0x37, 0, 0 }; - static u8 obuf_post[3] = { 0x36, 3, 0 }; + u8 obuf_pre[3] = { 0x37, 0, 0 }; + u8 obuf_post[3] = { 0x36, 3, 0 }; - mutex_lock(&d->usb_mutex); - ret = dvb_usbv2_generic_rw_locked(d, obuf_pre, 3, NULL, 0); + mutex_lock(&state->stream_mutex); + ret = dvbsky_usb_generic_rw(d, obuf_pre, 3, NULL, 0); if (!ret && onoff) { msleep(20); - ret = dvb_usbv2_generic_rw_locked(d, obuf_post, 3, NULL, 0); + ret = dvbsky_usb_generic_rw(d, obuf_post, 3, NULL, 0); } - mutex_unlock(&d->usb_mutex); + mutex_unlock(&state->stream_mutex); return ret; } @@ -606,21 +608,25 @@ static int dvbsky_init(struct dvb_usb_device *d) if (ret) return ret; */ + mutex_init(&state->stream_mutex); + state->last_lock = 0; return 0; } -static void dvbsky_exit(struct dvb_usb_device *d) +static int dvbsky_frontend_detach(struct dvb_usb_adapter *adap) { + struct dvb_usb_device *d = adap_to_d(adap); struct dvbsky_state *state = d_to_priv(d); - struct dvb_usb_adapter *adap = &d->adapter[0]; + + dev_dbg(&d->udev->dev, "%s: adap=%d\n", __func__, adap->id); dvb_module_release(state->i2c_client_tuner); dvb_module_release(state->i2c_client_demod); dvb_module_release(state->i2c_client_ci); - adap->fe[0] = NULL; + return 0; } /* DVB USB Driver stuff */ @@ -636,11 +642,11 @@ static struct dvb_usb_device_properties dvbsky_s960_props = { .i2c_algo = &dvbsky_i2c_algo, .frontend_attach = dvbsky_s960_attach, + .frontend_detach = dvbsky_frontend_detach, .init = dvbsky_init, .get_rc_config = dvbsky_get_rc_config, .streaming_ctrl = dvbsky_streaming_ctrl, .identify_state = dvbsky_identify_state, - .exit = dvbsky_exit, .read_mac_address = dvbsky_read_mac_addr, .num_adapters = 1, @@ -663,11 +669,11 @@ static struct dvb_usb_device_properties dvbsky_s960c_props = { .i2c_algo = &dvbsky_i2c_algo, .frontend_attach = dvbsky_s960c_attach, + .frontend_detach = dvbsky_frontend_detach, .init = dvbsky_init, .get_rc_config = dvbsky_get_rc_config, .streaming_ctrl = dvbsky_streaming_ctrl, .identify_state = dvbsky_identify_state, - .exit = dvbsky_exit, .read_mac_address = dvbsky_read_mac_addr, .num_adapters = 1, @@ -690,11 +696,11 @@ static struct dvb_usb_device_properties dvbsky_t680c_props = { .i2c_algo = &dvbsky_i2c_algo, .frontend_attach = dvbsky_t680c_attach, + .frontend_detach = dvbsky_frontend_detach, .init = dvbsky_init, .get_rc_config = dvbsky_get_rc_config, .streaming_ctrl = dvbsky_streaming_ctrl, .identify_state = dvbsky_identify_state, - .exit = dvbsky_exit, .read_mac_address = dvbsky_read_mac_addr, .num_adapters = 1, @@ -717,11 +723,11 @@ static struct dvb_usb_device_properties dvbsky_t330_props = { .i2c_algo = &dvbsky_i2c_algo, .frontend_attach = dvbsky_t330_attach, + .frontend_detach = dvbsky_frontend_detach, .init = dvbsky_init, .get_rc_config = dvbsky_get_rc_config, .streaming_ctrl = dvbsky_streaming_ctrl, .identify_state = dvbsky_identify_state, - .exit = dvbsky_exit, .read_mac_address = dvbsky_read_mac_addr, .num_adapters = 1, @@ -744,11 +750,11 @@ static struct dvb_usb_device_properties mygica_t230c_props = { .i2c_algo = &dvbsky_i2c_algo, .frontend_attach = dvbsky_mygica_t230c_attach, + .frontend_detach = dvbsky_frontend_detach, .init = dvbsky_init, .get_rc_config = dvbsky_get_rc_config, .streaming_ctrl = dvbsky_streaming_ctrl, .identify_state = dvbsky_identify_state, - .exit = dvbsky_exit, .num_adapters = 1, .adapter = { diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c index 4713ba65e1c228b71c1a4c51ecc015cb58a610ad..da2f282b1b3c34bff0057eb9745692b030bbdf7b 100644 --- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c +++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c @@ -932,8 +932,6 @@ static int mxl111sf_init(struct dvb_usb_device *d) .len = sizeof(eeprom), .buf = eeprom }, }; - mutex_init(&state->msg_lock); - ret = get_chip_info(state); if (mxl_fail(ret)) pr_err("failed to get chip info during probe"); @@ -1075,6 +1073,14 @@ static int mxl111sf_get_stream_config_dvbt(struct dvb_frontend *fe, return 0; } +static int mxl111sf_probe(struct dvb_usb_device *dev) +{ + struct mxl111sf_state *state = d_to_priv(dev); + + mutex_init(&state->msg_lock); + return 0; +} + static struct dvb_usb_device_properties mxl111sf_props_dvbt = { .driver_name = KBUILD_MODNAME, .owner = THIS_MODULE, @@ -1084,6 +1090,7 @@ static struct dvb_usb_device_properties mxl111sf_props_dvbt = { .generic_bulk_ctrl_endpoint = 0x02, .generic_bulk_ctrl_endpoint_response = 0x81, + .probe = mxl111sf_probe, .i2c_algo = &mxl111sf_i2c_algo, .frontend_attach = mxl111sf_frontend_attach_dvbt, .tuner_attach = mxl111sf_attach_tuner, @@ -1125,6 +1132,7 @@ static struct dvb_usb_device_properties mxl111sf_props_atsc = { .generic_bulk_ctrl_endpoint = 0x02, .generic_bulk_ctrl_endpoint_response = 0x81, + .probe = mxl111sf_probe, .i2c_algo = &mxl111sf_i2c_algo, .frontend_attach = mxl111sf_frontend_attach_atsc, .tuner_attach = mxl111sf_attach_tuner, @@ -1166,6 +1174,7 @@ static struct dvb_usb_device_properties mxl111sf_props_mh = { .generic_bulk_ctrl_endpoint = 0x02, .generic_bulk_ctrl_endpoint_response = 0x81, + .probe = mxl111sf_probe, .i2c_algo = &mxl111sf_i2c_algo, .frontend_attach = mxl111sf_frontend_attach_mh, .tuner_attach = mxl111sf_attach_tuner, @@ -1234,6 +1243,7 @@ static struct dvb_usb_device_properties mxl111sf_props_atsc_mh = { .generic_bulk_ctrl_endpoint = 0x02, .generic_bulk_ctrl_endpoint_response = 0x81, + .probe = mxl111sf_probe, .i2c_algo = &mxl111sf_i2c_algo, .frontend_attach = mxl111sf_frontend_attach_atsc_mh, .tuner_attach = mxl111sf_attach_tuner, @@ -1312,6 +1322,7 @@ static struct dvb_usb_device_properties mxl111sf_props_mercury = { .generic_bulk_ctrl_endpoint = 0x02, .generic_bulk_ctrl_endpoint_response = 0x81, + .probe = mxl111sf_probe, .i2c_algo = &mxl111sf_i2c_algo, .frontend_attach = mxl111sf_frontend_attach_mercury, .tuner_attach = mxl111sf_attach_tuner, @@ -1382,6 +1393,7 @@ static struct dvb_usb_device_properties mxl111sf_props_mercury_mh = { .generic_bulk_ctrl_endpoint = 0x02, .generic_bulk_ctrl_endpoint_response = 0x81, + .probe = mxl111sf_probe, .i2c_algo = &mxl111sf_i2c_algo, .frontend_attach = mxl111sf_frontend_attach_mercury_mh, .tuner_attach = mxl111sf_attach_tuner, diff --git a/drivers/media/usb/dvb-usb-v2/usb_urb.c b/drivers/media/usb/dvb-usb-v2/usb_urb.c index 024c751eb16595e5616f345b3301d59142155dcc..2ad2ddeaff513f36b43c2574ba93c0abcf799173 100644 --- a/drivers/media/usb/dvb-usb-v2/usb_urb.c +++ b/drivers/media/usb/dvb-usb-v2/usb_urb.c @@ -155,7 +155,6 @@ static int usb_urb_alloc_bulk_urbs(struct usb_data_stream *stream) stream->props.u.bulk.buffersize, usb_urb_complete, stream); - stream->urb_list[i]->transfer_flags = URB_FREE_BUFFER; stream->urbs_initialized++; } return 0; @@ -186,7 +185,7 @@ static int usb_urb_alloc_isoc_urbs(struct usb_data_stream *stream) urb->complete = usb_urb_complete; urb->pipe = usb_rcvisocpipe(stream->udev, stream->props.endpoint); - urb->transfer_flags = URB_ISO_ASAP | URB_FREE_BUFFER; + urb->transfer_flags = URB_ISO_ASAP; urb->interval = stream->props.u.isoc.interval; urb->number_of_packets = stream->props.u.isoc.framesperurb; urb->transfer_buffer_length = stream->props.u.isoc.framesize * @@ -210,7 +209,7 @@ static int usb_free_stream_buffers(struct usb_data_stream *stream) if (stream->state & USB_STATE_URB_BUF) { while (stream->buf_num) { stream->buf_num--; - stream->buf_list[stream->buf_num] = NULL; + kfree(stream->buf_list[stream->buf_num]); } } diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c index 16e946e01d2caab9eaf68546067b3fb92f18731d..25871979123f4ac101b6d4cc0d5bd784b4d43767 100644 --- a/drivers/media/usb/dvb-usb/af9005.c +++ b/drivers/media/usb/dvb-usb/af9005.c @@ -985,8 +985,9 @@ static int af9005_identify_state(struct usb_device *udev, else if (reply == 0x02) *cold = 0; else - return -EIO; - deb_info("Identify state cold = %d\n", *cold); + ret = -EIO; + if (!ret) + deb_info("Identify state cold = %d\n", *cold); err: kfree(buf); diff --git a/drivers/media/usb/dvb-usb/az6027.c b/drivers/media/usb/dvb-usb/az6027.c index 6321b8e302612080d2e93352975198e6a1430ad4..555c8ac44881ece62f92e0a09072a71b8350c2ef 100644 --- a/drivers/media/usb/dvb-usb/az6027.c +++ b/drivers/media/usb/dvb-usb/az6027.c @@ -977,6 +977,10 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n if (msg[i].addr == 0x99) { req = 0xBE; index = 0; + if (msg[i].len < 1) { + i = -EOPNOTSUPP; + break; + } value = msg[i].buf[0] & 0x00ff; length = 1; az6027_usb_out_op(d, req, value, index, data, length); diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c index 5b51ed7d6243fe8572b126e45fba6cee83587f3e..5400ec99986fc8ad240e44101dc1576c43c59883 100644 --- a/drivers/media/usb/dvb-usb/cxusb.c +++ b/drivers/media/usb/dvb-usb/cxusb.c @@ -457,7 +457,8 @@ static int cxusb_rc_query(struct dvb_usb_device *d) { u8 ircode[4]; - cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4); + if (cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4) < 0) + return 0; if (ircode[2] || ircode[3]) rc_keydown(d->rc_dev, RC_PROTO_NEC, diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c index 091389fdf89ee62f6e15f8c02f025cfd8ffe6c66..c8d79502827b7ecab4f7757624e30086de013bb1 100644 --- a/drivers/media/usb/dvb-usb/dib0700_devices.c +++ b/drivers/media/usb/dvb-usb/dib0700_devices.c @@ -2442,9 +2442,13 @@ static int dib9090_tuner_attach(struct dvb_usb_adapter *adap) 8, 0x0486, }; + if (!IS_ENABLED(CONFIG_DVB_DIB9000)) + return -ENODEV; if (dvb_attach(dib0090_fw_register, adap->fe_adap[0].fe, i2c, &dib9090_dib0090_config) == NULL) return -ENODEV; i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_1_2, 0); + if (!i2c) + return -ENODEV; if (dib01x0_pmu_update(i2c, data_dib190, 10) != 0) return -ENODEV; dib0700_set_i2c_speed(adap->dev, 1500); @@ -2520,10 +2524,14 @@ static int nim9090md_tuner_attach(struct dvb_usb_adapter *adap) 0, 0x00ef, 8, 0x0406, }; + if (!IS_ENABLED(CONFIG_DVB_DIB9000)) + return -ENODEV; i2c = dib9000_get_tuner_interface(adap->fe_adap[0].fe); if (dvb_attach(dib0090_fw_register, adap->fe_adap[0].fe, i2c, &nim9090md_dib0090_config[0]) == NULL) return -ENODEV; i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_1_2, 0); + if (!i2c) + return -ENODEV; if (dib01x0_pmu_update(i2c, data_dib190, 10) < 0) return -ENODEV; diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c index 40ca4eafb137412edd1bb58f24868da5069e017c..39ac22486bcd91b6823ba0055fc58444afccba59 100644 --- a/drivers/media/usb/dvb-usb/dvb-usb-init.c +++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c @@ -287,12 +287,15 @@ EXPORT_SYMBOL(dvb_usb_device_init); void dvb_usb_device_exit(struct usb_interface *intf) { struct dvb_usb_device *d = usb_get_intfdata(intf); - const char *name = "generic DVB-USB module"; + const char *default_name = "generic DVB-USB module"; + char name[40]; usb_set_intfdata(intf, NULL); if (d != NULL && d->desc != NULL) { - name = d->desc->name; + strscpy(name, d->desc->name, sizeof(name)); dvb_usb_exit(d); + } else { + strscpy(name, default_name, sizeof(name)); } info("%s successfully deinitialized and disconnected.", name); diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c index 0af74383083d25ece0606a4826e5d9053b29d5ed..ae793dac49648a104db367f66f8e7efc7b8203de 100644 --- a/drivers/media/usb/dvb-usb/pctv452e.c +++ b/drivers/media/usb/dvb-usb/pctv452e.c @@ -913,14 +913,6 @@ static int pctv452e_frontend_attach(struct dvb_usb_adapter *a) &a->dev->i2c_adap); if (!a->fe_adap[0].fe) return -ENODEV; - - /* - * dvb_frontend will call dvb_detach for both stb0899_detach - * and stb0899_release but we only do dvb_attach(stb0899_attach). - * Increment the module refcount instead. - */ - symbol_get(stb0899_attach); - if ((dvb_attach(lnbp22_attach, a->fe_adap[0].fe, &a->dev->i2c_adap)) == NULL) err("Cannot attach lnbp22\n"); diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c index 18d0f8f5283fa6cb96af7702229a3dc196950a24..8d8e9f56a8be5c7d5fa04384157a1eee783137f3 100644 --- a/drivers/media/usb/dvb-usb/technisat-usb2.c +++ b/drivers/media/usb/dvb-usb/technisat-usb2.c @@ -607,10 +607,9 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a) static int technisat_usb2_get_ir(struct dvb_usb_device *d) { struct technisat_usb2_state *state = d->priv; - u8 *buf = state->buf; - u8 *b; - int ret; struct ir_raw_event ev; + u8 *buf = state->buf; + int i, ret; buf[0] = GET_IR_DATA_VENDOR_REQUEST; buf[1] = 0x08; @@ -646,26 +645,25 @@ static int technisat_usb2_get_ir(struct dvb_usb_device *d) return 0; /* no key pressed */ /* decoding */ - b = buf+1; #if 0 deb_rc("RC: %d ", ret); - debug_dump(b, ret, deb_rc); + debug_dump(buf + 1, ret, deb_rc); #endif ev.pulse = 0; - while (1) { - ev.pulse = !ev.pulse; - ev.duration = (*b * FIRMWARE_CLOCK_DIVISOR * FIRMWARE_CLOCK_TICK) / 1000; - ir_raw_event_store(d->rc_dev, &ev); - - b++; - if (*b == 0xff) { + for (i = 1; i < ARRAY_SIZE(state->buf); i++) { + if (buf[i] == 0xff) { ev.pulse = 0; ev.duration = 888888*2; ir_raw_event_store(d->rc_dev, &ev); break; } + + ev.pulse = !ev.pulse; + ev.duration = (buf[i] * FIRMWARE_CLOCK_DIVISOR * + FIRMWARE_CLOCK_TICK) / 1000; + ir_raw_event_store(d->rc_dev, &ev); } ir_raw_event_handle(d->rc_dev); diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c index 71c829f31d3bb3b86f0e9469ab0135966d54e71d..00a3b5768b9a85f675fa073ed5f5c4567796e690 100644 --- a/drivers/media/usb/em28xx/em28xx-cards.c +++ b/drivers/media/usb/em28xx/em28xx-cards.c @@ -2141,13 +2141,13 @@ const struct em28xx_board em28xx_boards[] = { .input = { { .type = EM28XX_VMUX_COMPOSITE, .vmux = TVP5150_COMPOSITE1, - .amux = EM28XX_AUDIO_SRC_LINE, + .amux = EM28XX_AMUX_LINE_IN, .gpio = terratec_av350_unmute_gpio, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = TVP5150_SVIDEO, - .amux = EM28XX_AUDIO_SRC_LINE, + .amux = EM28XX_AMUX_LINE_IN, .gpio = terratec_av350_unmute_gpio, } }, }, @@ -3039,6 +3039,9 @@ static int em28xx_hint_board(struct em28xx *dev) static void em28xx_card_setup(struct em28xx *dev) { + int i, j, idx; + bool duplicate_entry; + /* * If the device can be a webcam, seek for a sensor. * If sensor is not found, then it isn't a webcam. @@ -3195,6 +3198,32 @@ static void em28xx_card_setup(struct em28xx *dev) /* Allow override tuner type by a module parameter */ if (tuner >= 0) dev->tuner_type = tuner; + + /* + * Dynamically generate a list of valid audio inputs for this + * specific board, mapping them via enum em28xx_amux. + */ + + idx = 0; + for (i = 0; i < MAX_EM28XX_INPUT; i++) { + if (!INPUT(i)->type) + continue; + + /* Skip already mapped audio inputs */ + duplicate_entry = false; + for (j = 0; j < idx; j++) { + if (INPUT(i)->amux == dev->amux_map[j]) { + duplicate_entry = true; + break; + } + } + if (duplicate_entry) + continue; + + dev->amux_map[idx++] = INPUT(i)->amux; + } + for (; idx < MAX_EM28XX_INPUT; idx++) + dev->amux_map[idx] = EM28XX_AMUX_UNUSED; } void em28xx_setup_xc3028(struct em28xx *dev, struct xc2028_ctrl *ctl) @@ -3787,6 +3816,8 @@ static int em28xx_usb_probe(struct usb_interface *intf, goto err_free; } + kref_init(&dev->ref); + dev->devno = nr; dev->model = id->driver_info; dev->alt = -1; @@ -3887,6 +3918,8 @@ static int em28xx_usb_probe(struct usb_interface *intf, } if (dev->board.has_dual_ts && em28xx_duplicate_dev(dev) == 0) { + kref_init(&dev->dev_next->ref); + dev->dev_next->ts = SECONDARY_TS; dev->dev_next->alt = -1; dev->dev_next->is_audio_only = has_vendor_audio && @@ -3941,12 +3974,8 @@ static int em28xx_usb_probe(struct usb_interface *intf, em28xx_write_reg(dev, 0x0b, 0x82); mdelay(100); } - - kref_init(&dev->dev_next->ref); } - kref_init(&dev->ref); - request_modules(dev); /* @@ -3991,7 +4020,6 @@ static void em28xx_usb_disconnect(struct usb_interface *intf) dev->dev_next->disconnected = 1; dev_info(&dev->intf->dev, "Disconnecting %s\n", dev->dev_next->name); - flush_request_modules(dev->dev_next); } dev->disconnected = 1; diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c index 68571bf36d28623cad6be5908f4f6075dd947df5..3bf98ac897ec382e786d46bb1b74b66466ec0f43 100644 --- a/drivers/media/usb/em28xx/em28xx-video.c +++ b/drivers/media/usb/em28xx/em28xx-video.c @@ -1093,6 +1093,8 @@ int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count) em28xx_videodbg("%s\n", __func__); + dev->v4l2->field_count = 0; + /* * Make sure streaming is not already in progress for this type * of filehandle (e.g. video, vbi) @@ -1471,9 +1473,9 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, fmt = format_by_fourcc(f->fmt.pix.pixelformat); if (!fmt) { - em28xx_videodbg("Fourcc format (%08x) invalid.\n", - f->fmt.pix.pixelformat); - return -EINVAL; + fmt = &format[0]; + em28xx_videodbg("Fourcc format (%08x) invalid. Using default (%08x).\n", + f->fmt.pix.pixelformat, fmt->fourcc); } if (dev->board.is_em2800) { @@ -1666,6 +1668,7 @@ static int vidioc_enum_input(struct file *file, void *priv, { struct em28xx *dev = video_drvdata(file); unsigned int n; + int j; n = i->index; if (n >= MAX_EM28XX_INPUT) @@ -1685,6 +1688,12 @@ static int vidioc_enum_input(struct file *file, void *priv, if (dev->is_webcam) i->capabilities = 0; + /* Dynamically generates an audioset bitmask */ + i->audioset = 0; + for (j = 0; j < MAX_EM28XX_INPUT; j++) + if (dev->amux_map[j] != EM28XX_AMUX_UNUSED) + i->audioset |= 1 << j; + return 0; } @@ -1710,11 +1719,24 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i) return 0; } -static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a) +static int em28xx_fill_audio_input(struct em28xx *dev, + const char *s, + struct v4l2_audio *a, + unsigned int index) { - struct em28xx *dev = video_drvdata(file); + unsigned int idx = dev->amux_map[index]; + + /* + * With msp3400, almost all mappings use the default (amux = 0). + * The only one may use a different value is WinTV USB2, where it + * can also be SCART1 input. + * As it is very doubtful that we would see new boards with msp3400, + * let's just reuse the existing switch. + */ + if (dev->has_msp34xx && idx != EM28XX_AMUX_UNUSED) + idx = EM28XX_AMUX_LINE_IN; - switch (a->index) { + switch (idx) { case EM28XX_AMUX_VIDEO: strcpy(a->name, "Television"); break; @@ -1739,32 +1761,79 @@ static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a) case EM28XX_AMUX_PCM_OUT: strcpy(a->name, "PCM"); break; + case EM28XX_AMUX_UNUSED: default: return -EINVAL; } - - a->index = dev->ctl_ainput; + a->index = index; a->capability = V4L2_AUDCAP_STEREO; + em28xx_videodbg("%s: audio input index %d is '%s'\n", + s, a->index, a->name); + return 0; } +static int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *a) +{ + struct em28xx *dev = video_drvdata(file); + + if (a->index >= MAX_EM28XX_INPUT) + return -EINVAL; + + return em28xx_fill_audio_input(dev, __func__, a, a->index); +} + +static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a) +{ + struct em28xx *dev = video_drvdata(file); + int i; + + for (i = 0; i < MAX_EM28XX_INPUT; i++) + if (dev->ctl_ainput == dev->amux_map[i]) + return em28xx_fill_audio_input(dev, __func__, a, i); + + /* Should never happen! */ + return -EINVAL; +} + static int vidioc_s_audio(struct file *file, void *priv, const struct v4l2_audio *a) { struct em28xx *dev = video_drvdata(file); + int idx, i; if (a->index >= MAX_EM28XX_INPUT) return -EINVAL; - if (!INPUT(a->index)->type) + + idx = dev->amux_map[a->index]; + + if (idx == EM28XX_AMUX_UNUSED) return -EINVAL; - dev->ctl_ainput = INPUT(a->index)->amux; - dev->ctl_aoutput = INPUT(a->index)->aout; + dev->ctl_ainput = idx; + + /* + * FIXME: This is wrong, as different inputs at em28xx_cards + * may have different audio outputs. So, the right thing + * to do is to implement VIDIOC_G_AUDOUT/VIDIOC_S_AUDOUT. + * With the current board definitions, this would work fine, + * as, currently, all boards fit. + */ + for (i = 0; i < MAX_EM28XX_INPUT; i++) + if (idx == dev->amux_map[i]) + break; + if (i == MAX_EM28XX_INPUT) + return -EINVAL; + + dev->ctl_aoutput = INPUT(i)->aout; if (!dev->ctl_aoutput) dev->ctl_aoutput = EM28XX_AOUT_MASTER; + em28xx_videodbg("%s: set audio input to %d\n", __func__, + dev->ctl_ainput); + return 0; } @@ -2302,6 +2371,7 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = { .vidioc_try_fmt_vbi_cap = vidioc_g_fmt_vbi_cap, .vidioc_s_fmt_vbi_cap = vidioc_g_fmt_vbi_cap, .vidioc_enum_framesizes = vidioc_enum_framesizes, + .vidioc_enumaudio = vidioc_enumaudio, .vidioc_g_audio = vidioc_g_audio, .vidioc_s_audio = vidioc_s_audio, diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h index 953caac025f22a4acc2fc8b15beb85ba456fd58d..a551072e62ed1fb9a03293b389b8d4c6e27d9b30 100644 --- a/drivers/media/usb/em28xx/em28xx.h +++ b/drivers/media/usb/em28xx/em28xx.h @@ -335,6 +335,9 @@ enum em28xx_usb_audio_type { /** * em28xx_amux - describes the type of audio input used by em28xx * + * @EM28XX_AMUX_UNUSED: + * Used only on em28xx dev->map field, in order to mark an entry + * as unused. * @EM28XX_AMUX_VIDEO: * On devices without AC97, this is the only value that it is currently * allowed. @@ -369,7 +372,8 @@ enum em28xx_usb_audio_type { * same time, via the alsa mux. */ enum em28xx_amux { - EM28XX_AMUX_VIDEO, + EM28XX_AMUX_UNUSED = -1, + EM28XX_AMUX_VIDEO = 0, EM28XX_AMUX_LINE_IN, /* Some less-common mixer setups */ @@ -692,6 +696,8 @@ struct em28xx { unsigned int ctl_input; // selected input unsigned int ctl_ainput;// selected audio input unsigned int ctl_aoutput;// selected audio output + enum em28xx_amux amux_map[MAX_EM28XX_INPUT]; + int mute; int volume; diff --git a/drivers/media/usb/go7007/go7007-driver.c b/drivers/media/usb/go7007/go7007-driver.c index 62aeebcdd7f71bede5f47a654d9163e33df3e82d..89eb65624054bb6786a1fdf0483d4d4307ebae11 100644 --- a/drivers/media/usb/go7007/go7007-driver.c +++ b/drivers/media/usb/go7007/go7007-driver.c @@ -88,7 +88,7 @@ static int go7007_load_encoder(struct go7007 *go) const struct firmware *fw_entry; char fw_name[] = "go7007/go7007fw.bin"; void *bounce; - int fw_len, rv = 0; + int fw_len; u16 intr_val, intr_data; if (go->boot_fw == NULL) { @@ -117,9 +117,11 @@ static int go7007_load_encoder(struct go7007 *go) go7007_read_interrupt(go, &intr_val, &intr_data) < 0 || (intr_val & ~0x1) != 0x5a5a) { v4l2_err(go, "error transferring firmware\n"); - rv = -1; + kfree(go->boot_fw); + go->boot_fw = NULL; + return -1; } - return rv; + return 0; } MODULE_FIRMWARE("go7007/go7007fw.bin"); diff --git a/drivers/media/usb/go7007/go7007-fw.c b/drivers/media/usb/go7007/go7007-fw.c index 24f5b615dc7af78d65b3b41e94bec96633d93145..dfa9f899d0c2552c505979662521a0f1a9c66f44 100644 --- a/drivers/media/usb/go7007/go7007-fw.c +++ b/drivers/media/usb/go7007/go7007-fw.c @@ -1499,8 +1499,8 @@ static int modet_to_package(struct go7007 *go, __le16 *code, int space) return cnt; } -static int do_special(struct go7007 *go, u16 type, __le16 *code, int space, - int *framelen) +static noinline_for_stack int do_special(struct go7007 *go, u16 type, + __le16 *code, int space, int *framelen) { switch (type) { case SPECIAL_FRM_HEAD: diff --git a/drivers/media/usb/go7007/snd-go7007.c b/drivers/media/usb/go7007/snd-go7007.c index 137fc253b1228495e230b8466a8531230da1e6e5..96c37a131deb55530aa2d23ffaef99a1a391a679 100644 --- a/drivers/media/usb/go7007/snd-go7007.c +++ b/drivers/media/usb/go7007/snd-go7007.c @@ -244,22 +244,18 @@ int go7007_snd_init(struct go7007 *go) gosnd->capturing = 0; ret = snd_card_new(go->dev, index[dev], id[dev], THIS_MODULE, 0, &gosnd->card); - if (ret < 0) { - kfree(gosnd); - return ret; - } + if (ret < 0) + goto free_snd; + ret = snd_device_new(gosnd->card, SNDRV_DEV_LOWLEVEL, go, &go7007_snd_device_ops); - if (ret < 0) { - kfree(gosnd); - return ret; - } + if (ret < 0) + goto free_card; + ret = snd_pcm_new(gosnd->card, "go7007", 0, 0, 1, &gosnd->pcm); - if (ret < 0) { - snd_card_free(gosnd->card); - kfree(gosnd); - return ret; - } + if (ret < 0) + goto free_card; + strlcpy(gosnd->card->driver, "go7007", sizeof(gosnd->card->driver)); strlcpy(gosnd->card->shortname, go->name, sizeof(gosnd->card->driver)); strlcpy(gosnd->card->longname, gosnd->card->shortname, @@ -270,11 +266,8 @@ int go7007_snd_init(struct go7007 *go) &go7007_snd_capture_ops); ret = snd_card_register(gosnd->card); - if (ret < 0) { - snd_card_free(gosnd->card); - kfree(gosnd); - return ret; - } + if (ret < 0) + goto free_card; gosnd->substream = NULL; go->snd_context = gosnd; @@ -282,6 +275,12 @@ int go7007_snd_init(struct go7007 *go) ++dev; return 0; + +free_card: + snd_card_free(gosnd->card); +free_snd: + kfree(gosnd); + return ret; } EXPORT_SYMBOL(go7007_snd_init); diff --git a/drivers/media/usb/gspca/cpia1.c b/drivers/media/usb/gspca/cpia1.c index 2b09af8865f40f5fc91cc943205e42cf921f9462..5e785343528cc03f2b6f71264b830aae56f1e158 100644 --- a/drivers/media/usb/gspca/cpia1.c +++ b/drivers/media/usb/gspca/cpia1.c @@ -28,6 +28,7 @@ #include #include +#include #include "gspca.h" @@ -1033,6 +1034,8 @@ static int set_flicker(struct gspca_dev *gspca_dev, int on, int apply) sd->params.exposure.expMode = 2; sd->exposure_status = EXPOSURE_NORMAL; } + if (sd->params.exposure.gain >= BITS_PER_TYPE(currentexp)) + return -EINVAL; currentexp = currentexp << sd->params.exposure.gain; sd->params.exposure.gain = 0; /* round down current exposure to nearest value */ diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c index 57aa521e16b159005bea6206dee791873f4cf402..b12356c533a656f2d8318330004722a99fe07737 100644 --- a/drivers/media/usb/gspca/gspca.c +++ b/drivers/media/usb/gspca/gspca.c @@ -294,7 +294,7 @@ static void fill_frame(struct gspca_dev *gspca_dev, /* check the packet status and length */ st = urb->iso_frame_desc[i].status; if (st) { - pr_err("ISOC data error: [%d] len=%d, status=%d\n", + gspca_dbg(gspca_dev, D_PACK, "ISOC data error: [%d] len=%d, status=%d\n", i, len, st); gspca_dev->last_packet_type = DISCARD_PACKET; continue; @@ -314,6 +314,8 @@ static void fill_frame(struct gspca_dev *gspca_dev, } resubmit: + if (!gspca_dev->streaming) + return; /* resubmit the URB */ st = usb_submit_urb(urb, GFP_ATOMIC); if (st < 0) @@ -330,7 +332,7 @@ static void isoc_irq(struct urb *urb) struct gspca_dev *gspca_dev = (struct gspca_dev *) urb->context; gspca_dbg(gspca_dev, D_PACK, "isoc irq\n"); - if (!vb2_start_streaming_called(&gspca_dev->queue)) + if (!gspca_dev->streaming) return; fill_frame(gspca_dev, urb); } @@ -344,7 +346,7 @@ static void bulk_irq(struct urb *urb) int st; gspca_dbg(gspca_dev, D_PACK, "bulk irq\n"); - if (!vb2_start_streaming_called(&gspca_dev->queue)) + if (!gspca_dev->streaming) return; switch (urb->status) { case 0: @@ -367,6 +369,8 @@ static void bulk_irq(struct urb *urb) urb->actual_length); resubmit: + if (!gspca_dev->streaming) + return; /* resubmit the URB */ if (gspca_dev->cam.bulk_nurbs != 0) { st = usb_submit_urb(urb, GFP_ATOMIC); @@ -426,10 +430,10 @@ void gspca_frame_add(struct gspca_dev *gspca_dev, /* append the packet to the frame buffer */ if (len > 0) { - if (gspca_dev->image_len + len > gspca_dev->pixfmt.sizeimage) { + if (gspca_dev->image_len + len > PAGE_ALIGN(gspca_dev->pixfmt.sizeimage)) { gspca_err(gspca_dev, "frame overflow %d > %d\n", gspca_dev->image_len + len, - gspca_dev->pixfmt.sizeimage); + PAGE_ALIGN(gspca_dev->pixfmt.sizeimage)); packet_type = DISCARD_PACKET; } else { /* !! image is NULL only when last pkt is LAST or DISCARD @@ -1297,18 +1301,19 @@ static int gspca_queue_setup(struct vb2_queue *vq, unsigned int sizes[], struct device *alloc_devs[]) { struct gspca_dev *gspca_dev = vb2_get_drv_priv(vq); + unsigned int size = PAGE_ALIGN(gspca_dev->pixfmt.sizeimage); if (*nplanes) - return sizes[0] < gspca_dev->pixfmt.sizeimage ? -EINVAL : 0; + return sizes[0] < size ? -EINVAL : 0; *nplanes = 1; - sizes[0] = gspca_dev->pixfmt.sizeimage; + sizes[0] = size; return 0; } static int gspca_buffer_prepare(struct vb2_buffer *vb) { struct gspca_dev *gspca_dev = vb2_get_drv_priv(vb->vb2_queue); - unsigned long size = gspca_dev->pixfmt.sizeimage; + unsigned long size = PAGE_ALIGN(gspca_dev->pixfmt.sizeimage); if (vb2_plane_size(vb, 0) < size) { gspca_err(gspca_dev, "buffer too small (%lu < %lu)\n", @@ -1629,6 +1634,8 @@ void gspca_disconnect(struct usb_interface *intf) mutex_lock(&gspca_dev->usb_lock); gspca_dev->present = false; + destroy_urbs(gspca_dev); + gspca_input_destroy_urb(gspca_dev); vb2_queue_error(&gspca_dev->queue); diff --git a/drivers/media/usb/gspca/konica.c b/drivers/media/usb/gspca/konica.c index 989ae997f66de8ccb174af33016556583294ef45..89b9293b31bef597b528814d0a95b4132050a49c 100644 --- a/drivers/media/usb/gspca/konica.c +++ b/drivers/media/usb/gspca/konica.c @@ -123,6 +123,11 @@ static void reg_r(struct gspca_dev *gspca_dev, u16 value, u16 index) if (ret < 0) { pr_err("reg_r err %d\n", ret); gspca_dev->usb_err = ret; + /* + * Make sure the buffer is zeroed to avoid uninitialized + * values. + */ + memset(gspca_dev->usb_buf, 0, 2); } } diff --git a/drivers/media/usb/gspca/nw80x.c b/drivers/media/usb/gspca/nw80x.c index bedc04a72e97e01f540c83283aaae4e786445339..bde4441f935e79ff1414bc474f47af0bd1d88d15 100644 --- a/drivers/media/usb/gspca/nw80x.c +++ b/drivers/media/usb/gspca/nw80x.c @@ -1581,6 +1581,11 @@ static void reg_r(struct gspca_dev *gspca_dev, if (ret < 0) { pr_err("reg_r err %d\n", ret); gspca_dev->usb_err = ret; + /* + * Make sure the buffer is zeroed to avoid uninitialized + * values. + */ + memset(gspca_dev->usb_buf, 0, USB_BUF_SZ); return; } if (len == 1) diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c index 10fcbe9e8614b2f0d1d064a72573ac806309a43f..1e9835dc24d47b87643d5acbf256a609e7138e84 100644 --- a/drivers/media/usb/gspca/ov519.c +++ b/drivers/media/usb/gspca/ov519.c @@ -2083,6 +2083,11 @@ static int reg_r(struct sd *sd, u16 index) } else { gspca_err(gspca_dev, "reg_r %02x failed %d\n", index, ret); sd->gspca_dev.usb_err = ret; + /* + * Make sure the result is zeroed to avoid uninitialized + * values. + */ + gspca_dev->usb_buf[0] = 0; } return ret; @@ -2111,6 +2116,11 @@ static int reg_r8(struct sd *sd, } else { gspca_err(gspca_dev, "reg_r8 %02x failed %d\n", index, ret); sd->gspca_dev.usb_err = ret; + /* + * Make sure the buffer is zeroed to avoid uninitialized + * values. + */ + memset(gspca_dev->usb_buf, 0, 8); } return ret; @@ -3477,6 +3487,11 @@ static void ov511_mode_init_regs(struct sd *sd) return; } + if (alt->desc.bNumEndpoints < 1) { + sd->gspca_dev.usb_err = -ENODEV; + return; + } + packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); reg_w(sd, R51x_FIFO_PSIZE, packet_size >> 5); @@ -3603,6 +3618,11 @@ static void ov518_mode_init_regs(struct sd *sd) return; } + if (alt->desc.bNumEndpoints < 1) { + sd->gspca_dev.usb_err = -ENODEV; + return; + } + packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); ov518_reg_w32(sd, R51x_FIFO_PSIZE, packet_size & ~7, 2); diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c index d06dc0755b9a5586f93434a47440be353ee1e660..9e3326b66c7922fe067a9f6f2d7c4e01d3125b10 100644 --- a/drivers/media/usb/gspca/ov534.c +++ b/drivers/media/usb/gspca/ov534.c @@ -642,6 +642,11 @@ static u8 ov534_reg_read(struct gspca_dev *gspca_dev, u16 reg) if (ret < 0) { pr_err("read failed %d\n", ret); gspca_dev->usb_err = ret; + /* + * Make sure the result is zeroed to avoid uninitialized + * values. + */ + gspca_dev->usb_buf[0] = 0; } return gspca_dev->usb_buf[0]; } diff --git a/drivers/media/usb/gspca/ov534_9.c b/drivers/media/usb/gspca/ov534_9.c index 3d1364d2f83e628d0b80adca5f683cf4d05b0003..4d4ae22e96406c53ee59a25871c46fbc294b4a62 100644 --- a/drivers/media/usb/gspca/ov534_9.c +++ b/drivers/media/usb/gspca/ov534_9.c @@ -1154,6 +1154,7 @@ static u8 reg_r(struct gspca_dev *gspca_dev, u16 reg) if (ret < 0) { pr_err("reg_r err %d\n", ret); gspca_dev->usb_err = ret; + return 0; } return gspca_dev->usb_buf[0]; } diff --git a/drivers/media/usb/gspca/se401.c b/drivers/media/usb/gspca/se401.c index 477da0664b7daf1a77537c6314c22bcb7778550d..40b87717bb5c5ab76e13970aead7d04b2520d361 100644 --- a/drivers/media/usb/gspca/se401.c +++ b/drivers/media/usb/gspca/se401.c @@ -111,6 +111,11 @@ static void se401_read_req(struct gspca_dev *gspca_dev, u16 req, int silent) pr_err("read req failed req %#04x error %d\n", req, err); gspca_dev->usb_err = err; + /* + * Make sure the buffer is zeroed to avoid uninitialized + * values. + */ + memset(gspca_dev->usb_buf, 0, READ_REQ_SIZE); } } diff --git a/drivers/media/usb/gspca/sn9c20x.c b/drivers/media/usb/gspca/sn9c20x.c index cfa2a04d9f3f6a90cb4c34fd32f32c6f15ba3583..efca54ee0f35230f060d24930dcd771a7b245137 100644 --- a/drivers/media/usb/gspca/sn9c20x.c +++ b/drivers/media/usb/gspca/sn9c20x.c @@ -132,6 +132,13 @@ static const struct dmi_system_id flip_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "0341") } }, + { + .ident = "MSI MS-1039", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MICRO-STAR INT'L CO.,LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, "MS-1039"), + } + }, { .ident = "MSI MS-1632", .matches = { @@ -918,6 +925,11 @@ static void reg_r(struct gspca_dev *gspca_dev, u16 reg, u16 length) if (unlikely(result < 0 || result != length)) { pr_err("Read register %02x failed %d\n", reg, result); gspca_dev->usb_err = result; + /* + * Make sure the buffer is zeroed to avoid uninitialized + * values. + */ + memset(gspca_dev->usb_buf, 0, USB_BUF_SZ); } } diff --git a/drivers/media/usb/gspca/sonixb.c b/drivers/media/usb/gspca/sonixb.c index 5f3f2979540a64b7a868dd2573db3c329a6e08da..22de65d840dd3d980b5725b45762c02e9a1e6714 100644 --- a/drivers/media/usb/gspca/sonixb.c +++ b/drivers/media/usb/gspca/sonixb.c @@ -462,6 +462,11 @@ static void reg_r(struct gspca_dev *gspca_dev, dev_err(gspca_dev->v4l2_dev.dev, "Error reading register %02x: %d\n", value, res); gspca_dev->usb_err = res; + /* + * Make sure the result is zeroed to avoid uninitialized + * values. + */ + gspca_dev->usb_buf[0] = 0; } } diff --git a/drivers/media/usb/gspca/sonixj.c b/drivers/media/usb/gspca/sonixj.c index df8d8482b79599974b5465b1fd4a995e02bcfb90..fa108ce000ad6e77143c97ed8769f87e958f7978 100644 --- a/drivers/media/usb/gspca/sonixj.c +++ b/drivers/media/usb/gspca/sonixj.c @@ -1171,6 +1171,11 @@ static void reg_r(struct gspca_dev *gspca_dev, if (ret < 0) { pr_err("reg_r err %d\n", ret); gspca_dev->usb_err = ret; + /* + * Make sure the buffer is zeroed to avoid uninitialized + * values. + */ + memset(gspca_dev->usb_buf, 0, USB_BUF_SZ); } } diff --git a/drivers/media/usb/gspca/spca1528.c b/drivers/media/usb/gspca/spca1528.c index d25924e430f37b8a3f2c9af0fb208db0079491be..a20eb8580db2ea944100ecfd32ecafc9825f49eb 100644 --- a/drivers/media/usb/gspca/spca1528.c +++ b/drivers/media/usb/gspca/spca1528.c @@ -80,6 +80,11 @@ static void reg_r(struct gspca_dev *gspca_dev, if (ret < 0) { pr_err("reg_r err %d\n", ret); gspca_dev->usb_err = ret; + /* + * Make sure the buffer is zeroed to avoid uninitialized + * values. + */ + memset(gspca_dev->usb_buf, 0, USB_BUF_SZ); } } diff --git a/drivers/media/usb/gspca/sq930x.c b/drivers/media/usb/gspca/sq930x.c index d7cbcf2b394794ac8d51529da921c2c51eb14065..3521f5ff428e9e28f545835497f5334cd0bd03a6 100644 --- a/drivers/media/usb/gspca/sq930x.c +++ b/drivers/media/usb/gspca/sq930x.c @@ -434,6 +434,11 @@ static void reg_r(struct gspca_dev *gspca_dev, if (ret < 0) { pr_err("reg_r %04x failed %d\n", value, ret); gspca_dev->usb_err = ret; + /* + * Make sure the buffer is zeroed to avoid uninitialized + * values. + */ + memset(gspca_dev->usb_buf, 0, USB_BUF_SZ); } } diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx.c b/drivers/media/usb/gspca/stv06xx/stv06xx.c index 6080a35310ca03635a628b4cd4b52e5fdf5a46fb..b7ea4f982964805ea6234c5e477ddb2719d5e669 100644 --- a/drivers/media/usb/gspca/stv06xx/stv06xx.c +++ b/drivers/media/usb/gspca/stv06xx/stv06xx.c @@ -291,6 +291,9 @@ static int stv06xx_start(struct gspca_dev *gspca_dev) return -EIO; } + if (alt->desc.bNumEndpoints < 1) + return -ENODEV; + packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); err = stv06xx_write_bridge(sd, STV_ISO_SIZE_L, packet_size); if (err < 0) @@ -315,11 +318,21 @@ static int stv06xx_start(struct gspca_dev *gspca_dev) static int stv06xx_isoc_init(struct gspca_dev *gspca_dev) { + struct usb_interface_cache *intfc; struct usb_host_interface *alt; struct sd *sd = (struct sd *) gspca_dev; + intfc = gspca_dev->dev->actconfig->intf_cache[0]; + + if (intfc->num_altsetting < 2) + return -ENODEV; + + alt = &intfc->altsetting[1]; + + if (alt->desc.bNumEndpoints < 1) + return -ENODEV; + /* Start isoc bandwidth "negotiation" at max isoc bandwidth */ - alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1]; alt->endpoint[0].desc.wMaxPacketSize = cpu_to_le16(sd->sensor->max_packet_size[gspca_dev->curr_mode]); @@ -332,6 +345,10 @@ static int stv06xx_isoc_nego(struct gspca_dev *gspca_dev) struct usb_host_interface *alt; struct sd *sd = (struct sd *) gspca_dev; + /* + * Existence of altsetting and endpoint was verified in + * stv06xx_isoc_init() + */ alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1]; packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); min_packet_size = sd->sensor->min_packet_size[gspca_dev->curr_mode]; diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c b/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c index 7374aeb0a67a9aa8753c2c1dc88bb99b5f40f6cc..6f1ced4654722459cc7b57fb8561b6f2a970e362 100644 --- a/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c +++ b/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c @@ -194,6 +194,10 @@ static int pb0100_start(struct sd *sd) alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt); if (!alt) return -ENODEV; + + if (alt->desc.bNumEndpoints < 1) + return -ENODEV; + packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); /* If we don't have enough bandwidth use a lower framerate */ diff --git a/drivers/media/usb/gspca/sunplus.c b/drivers/media/usb/gspca/sunplus.c index 437a3367ab97488734b88685341a1eb8085aa98f..26eae69a2562f565a51f8f41895cbd44a2bb12f4 100644 --- a/drivers/media/usb/gspca/sunplus.c +++ b/drivers/media/usb/gspca/sunplus.c @@ -264,6 +264,11 @@ static void reg_r(struct gspca_dev *gspca_dev, if (ret < 0) { pr_err("reg_r err %d\n", ret); gspca_dev->usb_err = ret; + /* + * Make sure the buffer is zeroed to avoid uninitialized + * values. + */ + memset(gspca_dev->usb_buf, 0, USB_BUF_SZ); } } diff --git a/drivers/media/usb/gspca/vc032x.c b/drivers/media/usb/gspca/vc032x.c index 52d0716596343dbd44a93b444f6c48abdc16dee9..6e32264d3825a5e744c8ed5de9c35f317c41131a 100644 --- a/drivers/media/usb/gspca/vc032x.c +++ b/drivers/media/usb/gspca/vc032x.c @@ -2915,6 +2915,11 @@ static void reg_r_i(struct gspca_dev *gspca_dev, if (ret < 0) { pr_err("reg_r err %d\n", ret); gspca_dev->usb_err = ret; + /* + * Make sure the buffer is zeroed to avoid uninitialized + * values. + */ + memset(gspca_dev->usb_buf, 0, USB_BUF_SZ); } } static void reg_r(struct gspca_dev *gspca_dev, diff --git a/drivers/media/usb/gspca/w996Xcf.c b/drivers/media/usb/gspca/w996Xcf.c index abfab3de18662b1ed60499538e3fbe0f5c88664c..ef0a839f9b8aeccf5e61a5fe6d9fbe9bf1e8ed20 100644 --- a/drivers/media/usb/gspca/w996Xcf.c +++ b/drivers/media/usb/gspca/w996Xcf.c @@ -143,6 +143,11 @@ static int w9968cf_read_sb(struct sd *sd) } else { pr_err("Read SB reg [01] failed\n"); sd->gspca_dev.usb_err = ret; + /* + * Make sure the buffer is zeroed to avoid uninitialized + * values. + */ + memset(sd->gspca_dev.usb_buf, 0, 2); } udelay(W9968CF_I2C_BUS_DELAY); diff --git a/drivers/media/usb/gspca/xirlink_cit.c b/drivers/media/usb/gspca/xirlink_cit.c index 58deb0c388267b07fed4c80e13c38db1728595fb..fa65c3eaa7259e12c7b4376144a3d19e5d98a03a 100644 --- a/drivers/media/usb/gspca/xirlink_cit.c +++ b/drivers/media/usb/gspca/xirlink_cit.c @@ -1452,6 +1452,9 @@ static int cit_get_packet_size(struct gspca_dev *gspca_dev) return -EIO; } + if (alt->desc.bNumEndpoints < 1) + return -ENODEV; + return le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); } @@ -2636,6 +2639,7 @@ static int sd_start(struct gspca_dev *gspca_dev) static int sd_isoc_init(struct gspca_dev *gspca_dev) { + struct usb_interface_cache *intfc; struct usb_host_interface *alt; int max_packet_size; @@ -2651,8 +2655,17 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev) break; } + intfc = gspca_dev->dev->actconfig->intf_cache[0]; + + if (intfc->num_altsetting < 2) + return -ENODEV; + + alt = &intfc->altsetting[1]; + + if (alt->desc.bNumEndpoints < 1) + return -ENODEV; + /* Start isoc bandwidth "negotiation" at max isoc bandwidth */ - alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1]; alt->endpoint[0].desc.wMaxPacketSize = cpu_to_le16(max_packet_size); return 0; @@ -2675,6 +2688,9 @@ static int sd_isoc_nego(struct gspca_dev *gspca_dev) break; } + /* + * Existence of altsetting and endpoint was verified in sd_isoc_init() + */ alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1]; packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); if (packet_size <= min_packet_size) diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c index 29ac7fc5b039fe1a4f51c8becfe492cbc63e5a39..3316a17c141be265bbc5077896ca28daf0aeaa3c 100644 --- a/drivers/media/usb/hdpvr/hdpvr-core.c +++ b/drivers/media/usb/hdpvr/hdpvr-core.c @@ -141,6 +141,7 @@ static int device_authorization(struct hdpvr_device *dev) dev->fw_ver = dev->usbc_buf[1]; + dev->usbc_buf[46] = '\0'; v4l2_info(&dev->v4l2_dev, "firmware version 0x%x dated %s\n", dev->fw_ver, &dev->usbc_buf[2]); @@ -275,6 +276,7 @@ static int hdpvr_probe(struct usb_interface *interface, #endif size_t buffer_size; int i; + int dev_num; int retval = -ENOMEM; /* allocate memory for our device state and initialize it */ @@ -372,8 +374,17 @@ static int hdpvr_probe(struct usb_interface *interface, } #endif + dev_num = atomic_inc_return(&dev_nr); + if (dev_num >= HDPVR_MAX) { + v4l2_err(&dev->v4l2_dev, + "max device number reached, device register failed\n"); + atomic_dec(&dev_nr); + retval = -ENODEV; + goto reg_fail; + } + retval = hdpvr_register_videodev(dev, &interface->dev, - video_nr[atomic_inc_return(&dev_nr)]); + video_nr[dev_num]); if (retval < 0) { v4l2_err(&dev->v4l2_dev, "registering videodev failed\n"); goto reg_fail; diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c index 1b89c77bad6673d03600f2961f4c8fc96667b5dc..0615996572e41a618626795d36822ee466d324d2 100644 --- a/drivers/media/usb/hdpvr/hdpvr-video.c +++ b/drivers/media/usb/hdpvr/hdpvr-video.c @@ -439,7 +439,7 @@ static ssize_t hdpvr_read(struct file *file, char __user *buffer, size_t count, /* wait for the first buffer */ if (!(file->f_flags & O_NONBLOCK)) { if (wait_event_interruptible(dev->wait_data, - hdpvr_get_next_buffer(dev))) + !list_empty_careful(&dev->rec_buff_list))) return -ERESTARTSYS; } @@ -465,10 +465,17 @@ static ssize_t hdpvr_read(struct file *file, char __user *buffer, size_t count, goto err; } if (!err) { - v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, - "timeout: restart streaming\n"); + v4l2_info(&dev->v4l2_dev, + "timeout: restart streaming\n"); + mutex_lock(&dev->io_mutex); hdpvr_stop_streaming(dev); - msecs_to_jiffies(4000); + mutex_unlock(&dev->io_mutex); + /* + * The FW needs about 4 seconds after streaming + * stopped before it is ready to restart + * streaming. + */ + msleep(4000); err = hdpvr_start_streaming(dev); if (err) { ret = err; @@ -1133,9 +1140,7 @@ static void hdpvr_device_release(struct video_device *vdev) struct hdpvr_device *dev = video_get_drvdata(vdev); hdpvr_delete(dev); - mutex_lock(&dev->io_mutex); flush_work(&dev->worker); - mutex_unlock(&dev->io_mutex); v4l2_device_unregister(&dev->v4l2_dev); v4l2_ctrl_handler_free(&dev->hdl); diff --git a/drivers/media/usb/pulse8-cec/pulse8-cec.c b/drivers/media/usb/pulse8-cec/pulse8-cec.c index 350635826aaed39898519ba4844327ab9d4695eb..f5899321e5a317bcc4f271bf173ca1a25db26dc0 100644 --- a/drivers/media/usb/pulse8-cec/pulse8-cec.c +++ b/drivers/media/usb/pulse8-cec/pulse8-cec.c @@ -121,6 +121,7 @@ struct pulse8 { unsigned int vers; struct completion cmd_done; struct work_struct work; + u8 work_result; struct delayed_work ping_eeprom_work; struct cec_msg rx_msg; u8 data[DATA_SIZE]; @@ -142,8 +143,10 @@ static void pulse8_irq_work_handler(struct work_struct *work) { struct pulse8 *pulse8 = container_of(work, struct pulse8, work); + u8 result = pulse8->work_result; - switch (pulse8->data[0] & 0x3f) { + pulse8->work_result = 0; + switch (result & 0x3f) { case MSGCODE_FRAME_DATA: cec_received_msg(pulse8->adap, &pulse8->rx_msg); break; @@ -177,12 +180,12 @@ static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data, pulse8->escape = false; } else if (data == MSGEND) { struct cec_msg *msg = &pulse8->rx_msg; + u8 msgcode = pulse8->buf[0]; if (debug) dev_info(pulse8->dev, "received: %*ph\n", pulse8->idx, pulse8->buf); - pulse8->data[0] = pulse8->buf[0]; - switch (pulse8->buf[0] & 0x3f) { + switch (msgcode & 0x3f) { case MSGCODE_FRAME_START: msg->len = 1; msg->msg[0] = pulse8->buf[1]; @@ -191,14 +194,20 @@ static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data, if (msg->len == CEC_MAX_MSG_SIZE) break; msg->msg[msg->len++] = pulse8->buf[1]; - if (pulse8->buf[0] & MSGCODE_FRAME_EOM) + if (msgcode & MSGCODE_FRAME_EOM) { + WARN_ON(pulse8->work_result); + pulse8->work_result = msgcode; schedule_work(&pulse8->work); + break; + } break; case MSGCODE_TRANSMIT_SUCCEEDED: case MSGCODE_TRANSMIT_FAILED_LINE: case MSGCODE_TRANSMIT_FAILED_ACK: case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA: case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE: + WARN_ON(pulse8->work_result); + pulse8->work_result = msgcode; schedule_work(&pulse8->work); break; case MSGCODE_HIGH_ERROR: @@ -585,7 +594,7 @@ static int pulse8_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr) else pulse8->config_pending = true; mutex_unlock(&pulse8->config_lock); - return err; + return log_addr == CEC_LOG_ADDR_INVALID ? 0 : err; } static int pulse8_cec_adap_transmit(struct cec_adapter *adap, u8 attempts, diff --git a/drivers/media/usb/pvrusb2/pvrusb2-context.c b/drivers/media/usb/pvrusb2/pvrusb2-context.c index d9e8481e9e286e5f2d6dc7584155e995d7541cc2..89897cb7d29bddea2977e8c28f3a57c3d3da7508 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-context.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-context.c @@ -276,8 +276,9 @@ static void pvr2_context_exit(struct pvr2_context *mp) void pvr2_context_disconnect(struct pvr2_context *mp) { pvr2_hdw_disconnect(mp->hdw); + if (!pvr2_context_shutok()) + pvr2_context_notify(mp); mp->disconnect_flag = !0; - pvr2_context_notify(mp); } diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c index a8519da0020bf82e9e15cfd5295cd9b6c02b5405..fcb201a40920e0281f15d05f55fb8550de245b0e 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c @@ -666,6 +666,8 @@ static int ctrl_get_input(struct pvr2_ctrl *cptr,int *vp) static int ctrl_check_input(struct pvr2_ctrl *cptr,int v) { + if (v < 0 || v > PVR2_CVAL_INPUT_MAX) + return 0; return ((1 << v) & cptr->hdw->input_allowed_mask) != 0; } @@ -1678,7 +1680,7 @@ static int pvr2_decoder_enable(struct pvr2_hdw *hdw,int enablefl) } if (!hdw->flag_decoder_missed) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, - "WARNING: No decoder present"); + "***WARNING*** No decoder present"); hdw->flag_decoder_missed = !0; trace_stbit("flag_decoder_missed", hdw->flag_decoder_missed); @@ -2364,7 +2366,7 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf, if (hdw_desc->flag_is_experimental) { pvr2_trace(PVR2_TRACE_INFO, "**********"); pvr2_trace(PVR2_TRACE_INFO, - "WARNING: Support for this device (%s) is experimental.", + "***WARNING*** Support for this device (%s) is experimental.", hdw_desc->description); pvr2_trace(PVR2_TRACE_INFO, "Important functionality might not be entirely working."); diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.h b/drivers/media/usb/pvrusb2/pvrusb2-hdw.h index 25648add77e58c841ebf03672433cfb2e35da34d..bd2b7a67b7322dd3955d3dc49677625d31d4b969 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.h +++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.h @@ -50,6 +50,7 @@ #define PVR2_CVAL_INPUT_COMPOSITE 2 #define PVR2_CVAL_INPUT_SVIDEO 3 #define PVR2_CVAL_INPUT_RADIO 4 +#define PVR2_CVAL_INPUT_MAX PVR2_CVAL_INPUT_RADIO enum pvr2_config { pvr2_config_empty, /* No configuration */ diff --git a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c index f3003ca05f4ba370d3ccbc647b269ca5bd817c9c..922c06279663519e4e4240b229784cefb7702b1c 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c @@ -343,11 +343,11 @@ static int i2c_hack_cx25840(struct pvr2_hdw *hdw, if ((ret != 0) || (*rdata == 0x04) || (*rdata == 0x0a)) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, - "WARNING: Detected a wedged cx25840 chip; the device will not work."); + "***WARNING*** Detected a wedged cx25840 chip; the device will not work."); pvr2_trace(PVR2_TRACE_ERROR_LEGS, - "WARNING: Try power cycling the pvrusb2 device."); + "***WARNING*** Try power cycling the pvrusb2 device."); pvr2_trace(PVR2_TRACE_ERROR_LEGS, - "WARNING: Disabling further access to the device to prevent other foul-ups."); + "***WARNING*** Disabling further access to the device to prevent other foul-ups."); // This blocks all further communication with the part. hdw->i2c_func[0x44] = NULL; pvr2_hdw_render_useless(hdw); diff --git a/drivers/media/usb/pvrusb2/pvrusb2-std.c b/drivers/media/usb/pvrusb2/pvrusb2-std.c index 6b651f8b54df0f7a713d34959b08c7bcac0cec39..37dc299a1ca2682e41913388ff2d11a87880dd58 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-std.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-std.c @@ -353,7 +353,7 @@ struct v4l2_standard *pvr2_std_create_enum(unsigned int *countptr, bcnt = pvr2_std_id_to_str(buf,sizeof(buf),fmsk); pvr2_trace( PVR2_TRACE_ERROR_LEGS, - "WARNING: Failed to classify the following standard(s): %.*s", + "***WARNING*** Failed to classify the following standard(s): %.*s", bcnt,buf); } diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c index e53a80b589a15b3e5f7c2dd05aed728b65c2ffe5..04d334152eae2b64f60f4b26f236099d4a68518a 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c @@ -916,8 +916,12 @@ static void pvr2_v4l2_internal_check(struct pvr2_channel *chp) pvr2_v4l2_dev_disassociate_parent(vp->dev_video); pvr2_v4l2_dev_disassociate_parent(vp->dev_radio); if (!list_empty(&vp->dev_video->devbase.fh_list) || - !list_empty(&vp->dev_radio->devbase.fh_list)) + (vp->dev_radio && + !list_empty(&vp->dev_radio->devbase.fh_list))) { + pvr2_trace(PVR2_TRACE_STRUCT, + "pvr2_v4l2 internal_check exit-empty id=%p", vp); return; + } pvr2_v4l2_destroy_no_lock(vp); } @@ -953,7 +957,8 @@ static int pvr2_v4l2_release(struct file *file) kfree(fhp); if (vp->channel.mc_head->disconnect_flag && list_empty(&vp->dev_video->devbase.fh_list) && - list_empty(&vp->dev_radio->devbase.fh_list)) { + (!vp->dev_radio || + list_empty(&vp->dev_radio->devbase.fh_list))) { pvr2_v4l2_destroy_no_lock(vp); } return 0; diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c index be3634407f1f361845d12f770dbbef272478c1e4..cd706874899c3c8e225557605110032125de384b 100644 --- a/drivers/media/usb/siano/smsusb.c +++ b/drivers/media/usb/siano/smsusb.c @@ -190,6 +190,8 @@ static void smsusb_stop_streaming(struct smsusb_device_t *dev) for (i = 0; i < MAX_URBS; i++) { usb_kill_urb(&dev->surbs[i].urb); + if (dev->surbs[i].wq.func) + cancel_work_sync(&dev->surbs[i].wq); if (dev->surbs[i].cb) { smscore_putbuffer(dev->coredev, dev->surbs[i].cb); @@ -401,6 +403,7 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id) struct smsusb_device_t *dev; void *mdev; int i, rc; + int align = 0; /* create device object */ dev = kzalloc(sizeof(struct smsusb_device_t), GFP_KERNEL); @@ -412,6 +415,24 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id) dev->udev = interface_to_usbdev(intf); dev->state = SMSUSB_DISCONNECTED; + for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) { + struct usb_endpoint_descriptor *desc = + &intf->cur_altsetting->endpoint[i].desc; + + if (desc->bEndpointAddress & USB_DIR_IN) { + dev->in_ep = desc->bEndpointAddress; + align = usb_endpoint_maxp(desc) - sizeof(struct sms_msg_hdr); + } else { + dev->out_ep = desc->bEndpointAddress; + } + } + + pr_debug("in_ep = %02x, out_ep = %02x\n", dev->in_ep, dev->out_ep); + if (!dev->in_ep || !dev->out_ep || align < 0) { /* Missing endpoints? */ + smsusb_term_device(intf); + return -ENODEV; + } + params.device_type = sms_get_board(board_id)->type; switch (params.device_type) { @@ -426,24 +447,12 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id) /* fall-thru */ default: dev->buffer_size = USB2_BUFFER_SIZE; - dev->response_alignment = - le16_to_cpu(dev->udev->ep_in[1]->desc.wMaxPacketSize) - - sizeof(struct sms_msg_hdr); + dev->response_alignment = align; params.flags |= SMS_DEVICE_FAMILY2; break; } - for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) { - if (intf->cur_altsetting->endpoint[i].desc. bEndpointAddress & USB_DIR_IN) - dev->in_ep = intf->cur_altsetting->endpoint[i].desc.bEndpointAddress; - else - dev->out_ep = intf->cur_altsetting->endpoint[i].desc.bEndpointAddress; - } - - pr_debug("in_ep = %02x, out_ep = %02x\n", - dev->in_ep, dev->out_ep); - params.device = &dev->udev->dev; params.usb_device = dev->udev; params.buffer_size = dev->buffer_size; diff --git a/drivers/media/usb/stk1160/stk1160-video.c b/drivers/media/usb/stk1160/stk1160-video.c index 2811f612820fc1b2c1f6db64fa744daf165f2083..c7c83980c2a8269dba48efefdff13ad29e27aa07 100644 --- a/drivers/media/usb/stk1160/stk1160-video.c +++ b/drivers/media/usb/stk1160/stk1160-video.c @@ -109,7 +109,7 @@ void stk1160_buffer_done(struct stk1160 *dev) static inline void stk1160_copy_video(struct stk1160 *dev, u8 *src, int len) { - int linesdone, lineoff, lencopy; + int linesdone, lineoff, lencopy, offset; int bytesperline = dev->width * 2; struct stk1160_buffer *buf = dev->isoc_ctl.buf; u8 *dst = buf->mem; @@ -150,8 +150,13 @@ void stk1160_copy_video(struct stk1160 *dev, u8 *src, int len) * Check if we have enough space left in the buffer. * In that case, we force loop exit after copy. */ - if (lencopy > buf->bytesused - buf->length) { - lencopy = buf->bytesused - buf->length; + offset = dst - (u8 *)buf->mem; + if (offset > buf->length) { + dev_warn_ratelimited(dev->dev, "out of bounds offset\n"); + return; + } + if (lencopy > buf->length - offset) { + lencopy = buf->length - offset; remain = lencopy; } @@ -193,8 +198,13 @@ void stk1160_copy_video(struct stk1160 *dev, u8 *src, int len) * Check if we have enough space left in the buffer. * In that case, we force loop exit after copy. */ - if (lencopy > buf->bytesused - buf->length) { - lencopy = buf->bytesused - buf->length; + offset = dst - (u8 *)buf->mem; + if (offset > buf->length) { + dev_warn_ratelimited(dev->dev, "offset out of bounds\n"); + return; + } + if (lencopy > buf->length - offset) { + lencopy = buf->length - offset; remain = lencopy; } diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c index 5accb52410720196b24181e18ee0620e68dbc223..e33fa78ef98dda3b36c77640e3e8f6fd8a156d94 100644 --- a/drivers/media/usb/stkwebcam/stk-webcam.c +++ b/drivers/media/usb/stkwebcam/stk-webcam.c @@ -164,7 +164,11 @@ int stk_camera_read_reg(struct stk_camera *dev, u16 index, u8 *value) *value = *buf; kfree(buf); - return ret; + + if (ret < 0) + return ret; + else + return 0; } static int stk_start_stream(struct stk_camera *dev) @@ -641,8 +645,7 @@ static int v4l_stk_release(struct file *fp) dev->owner = NULL; } - if (is_present(dev)) - usb_autopm_put_interface(dev->interface); + usb_autopm_put_interface(dev->interface); mutex_unlock(&dev->lock); return v4l2_fh_release(fp); } diff --git a/drivers/media/usb/tm6000/Kconfig b/drivers/media/usb/tm6000/Kconfig index a43b77abd931996199ec83c49734e8a3afefa652..ef2403e4414dc2c7c755c2d775f3be9c1766198b 100644 --- a/drivers/media/usb/tm6000/Kconfig +++ b/drivers/media/usb/tm6000/Kconfig @@ -1,6 +1,7 @@ config VIDEO_TM6000 tristate "TV Master TM5600/6000/6010 driver" depends on VIDEO_DEV && I2C && INPUT && RC_CORE && USB + depends on BROKEN select VIDEO_TUNER select MEDIA_TUNER_XC2028 select MEDIA_TUNER_XC5000 diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c index 3a4e545c603745126c4a4e8f6bfdbc58a54f1092..3db2fd7f5d7c49a2ec768d26a73374921fd7e82c 100644 --- a/drivers/media/usb/tm6000/tm6000-dvb.c +++ b/drivers/media/usb/tm6000/tm6000-dvb.c @@ -105,6 +105,7 @@ static void tm6000_urb_received(struct urb *urb) printk(KERN_ERR "tm6000: error %s\n", __func__); kfree(urb->transfer_buffer); usb_free_urb(urb); + dev->dvb->bulk_urb = NULL; } } } @@ -135,6 +136,7 @@ static int tm6000_start_stream(struct tm6000_core *dev) dvb->bulk_urb->transfer_buffer = kzalloc(size, GFP_KERNEL); if (!dvb->bulk_urb->transfer_buffer) { usb_free_urb(dvb->bulk_urb); + dvb->bulk_urb = NULL; return -ENOMEM; } @@ -161,6 +163,7 @@ static int tm6000_start_stream(struct tm6000_core *dev) kfree(dvb->bulk_urb->transfer_buffer); usb_free_urb(dvb->bulk_urb); + dvb->bulk_urb = NULL; return ret; } diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c index 44ca66cb9b8f141e15d7ca0ccc0b7ae5c2abee60..c915e555897ba336748e04656daef1516878da58 100644 --- a/drivers/media/usb/ttusb-dec/ttusb_dec.c +++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c @@ -329,7 +329,7 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command, dprintk("%s\n", __func__); - b = kmalloc(COMMAND_PACKET_SIZE + 4, GFP_KERNEL); + b = kzalloc(COMMAND_PACKET_SIZE + 4, GFP_KERNEL); if (!b) return -ENOMEM; @@ -1561,8 +1561,7 @@ static void ttusb_dec_exit_dvb(struct ttusb_dec *dec) dvb_dmx_release(&dec->demux); if (dec->fe) { dvb_unregister_frontend(dec->fe); - if (dec->fe->ops.release) - dec->fe->ops.release(dec->fe); + dvb_frontend_detach(dec->fe); } dvb_unregister_adapter(&dec->adapter); } diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c index 36a9a401718574fc7c8809934e035cc30b4eb546..096fca028d1ff4b1669ce2994e55a6a71b6b1813 100644 --- a/drivers/media/usb/usbtv/usbtv-video.c +++ b/drivers/media/usb/usbtv/usbtv-video.c @@ -963,15 +963,8 @@ int usbtv_video_init(struct usbtv *usbtv) void usbtv_video_free(struct usbtv *usbtv) { - mutex_lock(&usbtv->vb2q_lock); - mutex_lock(&usbtv->v4l2_lock); - - usbtv_stop(usbtv); video_unregister_device(&usbtv->vdev); v4l2_device_disconnect(&usbtv->v4l2_dev); - mutex_unlock(&usbtv->v4l2_lock); - mutex_unlock(&usbtv->vb2q_lock); - v4l2_device_put(&usbtv->v4l2_dev); } diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c index f29d1bef029367f241dbbb1125199ee2d99f2525..cce29b604f4ac70568a7c306db53f4c1e6afa50e 100644 --- a/drivers/media/usb/usbvision/usbvision-video.c +++ b/drivers/media/usb/usbvision/usbvision-video.c @@ -327,6 +327,10 @@ static int usbvision_v4l2_open(struct file *file) if (mutex_lock_interruptible(&usbvision->v4l2_lock)) return -ERESTARTSYS; + if (usbvision->remove_pending) { + err_code = -ENODEV; + goto unlock; + } if (usbvision->user) { err_code = -EBUSY; } else { @@ -390,6 +394,7 @@ static int usbvision_v4l2_open(struct file *file) static int usbvision_v4l2_close(struct file *file) { struct usb_usbvision *usbvision = video_drvdata(file); + int r; PDEBUG(DBG_IO, "close"); @@ -404,9 +409,10 @@ static int usbvision_v4l2_close(struct file *file) usbvision_scratch_free(usbvision); usbvision->user--; + r = usbvision->remove_pending; mutex_unlock(&usbvision->v4l2_lock); - if (usbvision->remove_pending) { + if (r) { printk(KERN_INFO "%s: Final disconnect\n", __func__); usbvision_release(usbvision); return 0; @@ -1090,6 +1096,11 @@ static int usbvision_radio_open(struct file *file) if (mutex_lock_interruptible(&usbvision->v4l2_lock)) return -ERESTARTSYS; + + if (usbvision->remove_pending) { + err_code = -ENODEV; + goto out; + } err_code = v4l2_fh_open(file); if (err_code) goto out; @@ -1122,6 +1133,7 @@ static int usbvision_radio_open(struct file *file) static int usbvision_radio_close(struct file *file) { struct usb_usbvision *usbvision = video_drvdata(file); + int r; PDEBUG(DBG_IO, ""); @@ -1134,9 +1146,10 @@ static int usbvision_radio_close(struct file *file) usbvision_audio_off(usbvision); usbvision->radio = 0; usbvision->user--; + r = usbvision->remove_pending; mutex_unlock(&usbvision->v4l2_lock); - if (usbvision->remove_pending) { + if (r) { printk(KERN_INFO "%s: Final disconnect\n", __func__); v4l2_fh_release(file); usbvision_release(usbvision); @@ -1562,6 +1575,7 @@ static int usbvision_probe(struct usb_interface *intf, static void usbvision_disconnect(struct usb_interface *intf) { struct usb_usbvision *usbvision = to_usbvision(usb_get_intfdata(intf)); + int u; PDEBUG(DBG_PROBE, ""); @@ -1578,13 +1592,14 @@ static void usbvision_disconnect(struct usb_interface *intf) v4l2_device_disconnect(&usbvision->v4l2_dev); usbvision_i2c_unregister(usbvision); usbvision->remove_pending = 1; /* Now all ISO data will be ignored */ + u = usbvision->user; usb_put_dev(usbvision->dev); usbvision->dev = NULL; /* USB device is no more */ mutex_unlock(&usbvision->v4l2_lock); - if (usbvision->user) { + if (u) { printk(KERN_INFO "%s: In use, disconnect pending\n", __func__); wake_up_interruptible(&usbvision->wait_frame); diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c index c2ad102bd693d219846c400bf09b328ee92825e5..f2854337cdcac8073bd59854bfd58eb22d2480fc 100644 --- a/drivers/media/usb/uvc/uvc_ctrl.c +++ b/drivers/media/usb/uvc/uvc_ctrl.c @@ -1212,7 +1212,7 @@ static void uvc_ctrl_fill_event(struct uvc_video_chain *chain, __uvc_query_v4l2_ctrl(chain, ctrl, mapping, &v4l2_ctrl); - memset(ev->reserved, 0, sizeof(ev->reserved)); + memset(ev, 0, sizeof(*ev)); ev->type = V4L2_EVENT_CTRL; ev->id = v4l2_ctrl.id; ev->u.ctrl.value = value; @@ -2350,7 +2350,9 @@ void uvc_ctrl_cleanup_device(struct uvc_device *dev) struct uvc_entity *entity; unsigned int i; - cancel_work_sync(&dev->async_ctrl.work); + /* Can be uninitialized if we are aborting on probe error. */ + if (dev->async_ctrl.work.func) + cancel_work_sync(&dev->async_ctrl.work); /* Free controls and control mappings for all entities. */ list_for_each_entry(entity, &dev->entities, list) { diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c index d46dc432456c4fc4a1fe72bab319b9bc3eb1fdd8..24e05b375137116ff1116178d097a56ccfb82415 100644 --- a/drivers/media/usb/uvc/uvc_driver.c +++ b/drivers/media/usb/uvc/uvc_driver.c @@ -390,6 +390,39 @@ static struct uvc_streaming *uvc_stream_by_id(struct uvc_device *dev, int id) return NULL; } +/* ------------------------------------------------------------------------ + * Streaming Object Management + */ + +static void uvc_stream_delete(struct uvc_streaming *stream) +{ + mutex_destroy(&stream->mutex); + + usb_put_intf(stream->intf); + + kfree(stream->format); + kfree(stream->header.bmaControls); + kfree(stream); +} + +static struct uvc_streaming *uvc_stream_new(struct uvc_device *dev, + struct usb_interface *intf) +{ + struct uvc_streaming *stream; + + stream = kzalloc(sizeof(*stream), GFP_KERNEL); + if (stream == NULL) + return NULL; + + mutex_init(&stream->mutex); + + stream->dev = dev; + stream->intf = usb_get_intf(intf); + stream->intfnum = intf->cur_altsetting->desc.bInterfaceNumber; + + return stream; +} + /* ------------------------------------------------------------------------ * Descriptors parsing */ @@ -542,7 +575,7 @@ static int uvc_parse_format(struct uvc_device *dev, /* Parse the frame descriptors. Only uncompressed, MJPEG and frame * based formats have frame descriptors. */ - while (buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE && + while (ftype && buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE && buffer[2] == ftype) { frame = &format->frame[format->nframes]; if (ftype != UVC_VS_FRAME_FRAME_BASED) @@ -682,17 +715,12 @@ static int uvc_parse_streaming(struct uvc_device *dev, return -EINVAL; } - streaming = kzalloc(sizeof(*streaming), GFP_KERNEL); + streaming = uvc_stream_new(dev, intf); if (streaming == NULL) { usb_driver_release_interface(&uvc_driver.driver, intf); - return -EINVAL; + return -ENOMEM; } - mutex_init(&streaming->mutex); - streaming->dev = dev; - streaming->intf = usb_get_intf(intf); - streaming->intfnum = intf->cur_altsetting->desc.bInterfaceNumber; - /* The Pico iMage webcam has its class-specific interface descriptors * after the endpoint descriptors. */ @@ -899,10 +927,7 @@ static int uvc_parse_streaming(struct uvc_device *dev, error: usb_driver_release_interface(&uvc_driver.driver, intf); - usb_put_intf(intf); - kfree(streaming->format); - kfree(streaming->header.bmaControls); - kfree(streaming); + uvc_stream_delete(streaming); return ret; } @@ -914,7 +939,7 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u8 id, unsigned int size; unsigned int i; - extra_size = ALIGN(extra_size, sizeof(*entity->pads)); + extra_size = roundup(extra_size, sizeof(*entity->pads)); num_inputs = (type & UVC_TERM_OUTPUT) ? num_pads : num_pads - 1; size = sizeof(*entity) + extra_size + sizeof(*entity->pads) * num_pads + num_inputs; @@ -1065,11 +1090,19 @@ static int uvc_parse_standard_control(struct uvc_device *dev, return -EINVAL; } - /* Make sure the terminal type MSB is not null, otherwise it - * could be confused with a unit. + /* + * Reject invalid terminal types that would cause issues: + * + * - The high byte must be non-zero, otherwise it would be + * confused with a unit. + * + * - Bit 15 must be 0, as we use it internally as a terminal + * direction flag. + * + * Other unknown types are accepted. */ type = get_unaligned_le16(&buffer[4]); - if ((type & 0xff00) == 0) { + if ((type & 0x7f00) == 0 || (type & 0x8000) != 0) { uvc_trace(UVC_TRACE_DESCR, "device %d videocontrol " "interface %d INPUT_TERMINAL %d has invalid " "type 0x%04x, skipping\n", udev->devnum, @@ -1449,6 +1482,11 @@ static int uvc_scan_chain_forward(struct uvc_video_chain *chain, break; if (forward == prev) continue; + if (forward->chain.next || forward->chain.prev) { + uvc_trace(UVC_TRACE_DESCR, "Found reference to " + "entity %d already in chain.\n", forward->id); + return -EINVAL; + } switch (UVC_ENTITY_TYPE(forward)) { case UVC_VC_EXTENSION_UNIT: @@ -1530,6 +1568,13 @@ static int uvc_scan_chain_backward(struct uvc_video_chain *chain, return -1; } + if (term->chain.next || term->chain.prev) { + uvc_trace(UVC_TRACE_DESCR, "Found reference to " + "entity %d already in chain.\n", + term->id); + return -EINVAL; + } + if (uvc_trace_param & UVC_TRACE_PROBE) printk(KERN_CONT " %d", term->id); @@ -1810,7 +1855,7 @@ static int uvc_scan_device(struct uvc_device *dev) * is released. * * As this function is called after or during disconnect(), all URBs have - * already been canceled by the USB core. There is no need to kill the + * already been cancelled by the USB core. There is no need to kill the * interrupt URB manually. */ static void uvc_delete(struct kref *kref) @@ -1824,11 +1869,7 @@ static void uvc_delete(struct kref *kref) usb_put_intf(dev->intf); usb_put_dev(dev->udev); - if (dev->vdev.dev) - v4l2_device_unregister(&dev->vdev); #ifdef CONFIG_MEDIA_CONTROLLER - if (media_devnode_is_registered(dev->mdev.devnode)) - media_device_unregister(&dev->mdev); media_device_cleanup(&dev->mdev); #endif @@ -1852,10 +1893,7 @@ static void uvc_delete(struct kref *kref) streaming = list_entry(p, struct uvc_streaming, list); usb_driver_release_interface(&uvc_driver.driver, streaming->intf); - usb_put_intf(streaming->intf); - kfree(streaming->format); - kfree(streaming->header.bmaControls); - kfree(streaming); + uvc_stream_delete(streaming); } kfree(dev); @@ -1885,6 +1923,15 @@ static void uvc_unregister_video(struct uvc_device *dev) uvc_debugfs_cleanup_stream(stream); } + + uvc_status_unregister(dev); + + if (dev->vdev.dev) + v4l2_device_unregister(&dev->vdev); +#ifdef CONFIG_MEDIA_CONTROLLER + if (media_devnode_is_registered(dev->mdev.devnode)) + media_device_unregister(&dev->mdev); +#endif } int uvc_register_video_device(struct uvc_device *dev, @@ -2111,6 +2158,20 @@ static int uvc_probe(struct usb_interface *intf, sizeof(dev->name) - len); } + /* Initialize the media device. */ +#ifdef CONFIG_MEDIA_CONTROLLER + dev->mdev.dev = &intf->dev; + strscpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model)); + if (udev->serial) + strscpy(dev->mdev.serial, udev->serial, + sizeof(dev->mdev.serial)); + usb_make_path(udev, dev->mdev.bus_info, sizeof(dev->mdev.bus_info)); + dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice); + media_device_init(&dev->mdev); + + dev->vdev.mdev = &dev->mdev; +#endif + /* Parse the Video Class control descriptor. */ if (uvc_parse_control(dev) < 0) { uvc_trace(UVC_TRACE_PROBE, "Unable to parse UVC " @@ -2131,19 +2192,7 @@ static int uvc_probe(struct usb_interface *intf, "linux-uvc-devel mailing list.\n"); } - /* Initialize the media device and register the V4L2 device. */ -#ifdef CONFIG_MEDIA_CONTROLLER - dev->mdev.dev = &intf->dev; - strlcpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model)); - if (udev->serial) - strlcpy(dev->mdev.serial, udev->serial, - sizeof(dev->mdev.serial)); - strcpy(dev->mdev.bus_info, udev->devpath); - dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice); - media_device_init(&dev->mdev); - - dev->vdev.mdev = &dev->mdev; -#endif + /* Register the V4L2 device. */ if (v4l2_device_register(&intf->dev, &dev->vdev) < 0) goto error; diff --git a/drivers/media/usb/uvc/uvc_status.c b/drivers/media/usb/uvc/uvc_status.c index 0722dc684378fd718e30cb7c15a8c9138a25a8ae..883e4cab45e79b1d3e1d0636963acfac73aa420b 100644 --- a/drivers/media/usb/uvc/uvc_status.c +++ b/drivers/media/usb/uvc/uvc_status.c @@ -54,7 +54,7 @@ static int uvc_input_init(struct uvc_device *dev) return ret; } -static void uvc_input_cleanup(struct uvc_device *dev) +static void uvc_input_unregister(struct uvc_device *dev) { if (dev->input) input_unregister_device(dev->input); @@ -71,7 +71,7 @@ static void uvc_input_report_key(struct uvc_device *dev, unsigned int code, #else #define uvc_input_init(dev) -#define uvc_input_cleanup(dev) +#define uvc_input_unregister(dev) #define uvc_input_report_key(dev, code, value) #endif /* CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV */ @@ -292,12 +292,16 @@ int uvc_status_init(struct uvc_device *dev) return 0; } -void uvc_status_cleanup(struct uvc_device *dev) +void uvc_status_unregister(struct uvc_device *dev) { usb_kill_urb(dev->int_urb); + uvc_input_unregister(dev); +} + +void uvc_status_cleanup(struct uvc_device *dev) +{ usb_free_urb(dev->int_urb); kfree(dev->status); - uvc_input_cleanup(dev); } int uvc_status_start(struct uvc_device *dev, gfp_t flags) diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c index 86a99f461fd8a7b92aee45641c47ccdc05fe85fb..ffffb66d51a00415b16dfc8e6094b25ff5da3868 100644 --- a/drivers/media/usb/uvc/uvc_video.c +++ b/drivers/media/usb/uvc/uvc_video.c @@ -676,6 +676,14 @@ void uvc_video_clock_update(struct uvc_streaming *stream, if (!uvc_hw_timestamps_param) return; + /* + * We will get called from __vb2_queue_cancel() if there are buffers + * done but not dequeued by the user, but the sample array has already + * been released at that time. Just bail out in that case. + */ + if (!clock->samples) + return; + spin_lock_irqsave(&clock->lock, flags); if (clock->count < clock->size) diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h index e5f5d84f1d1d5821e8ce5a8b1d30e69d9d730735..a738486fd9d649d94a2807de6c5560b23dec5e17 100644 --- a/drivers/media/usb/uvc/uvcvideo.h +++ b/drivers/media/usb/uvc/uvcvideo.h @@ -750,6 +750,7 @@ int uvc_register_video_device(struct uvc_device *dev, /* Status */ int uvc_status_init(struct uvc_device *dev); +void uvc_status_unregister(struct uvc_device *dev); void uvc_status_cleanup(struct uvc_device *dev); int uvc_status_start(struct uvc_device *dev, gfp_t flags); void uvc_status_stop(struct uvc_device *dev); diff --git a/drivers/media/usb/zr364xx/Kconfig b/drivers/media/usb/zr364xx/Kconfig index ac429bca70e8a671476224c1575b70b097629ed0..60c466e907c7a701884d4f10b8d5878a7b3ed371 100644 --- a/drivers/media/usb/zr364xx/Kconfig +++ b/drivers/media/usb/zr364xx/Kconfig @@ -1,7 +1,7 @@ config USB_ZR364XX tristate "USB ZR364XX Camera support" depends on VIDEO_V4L2 - select VIDEOBUF_GEN + depends on BROKEN select VIDEOBUF_VMALLOC ---help--- Say Y here if you want to connect this type of camera to your diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c index b8886102c5ed6325b6ef36eddc34bca80e1a5859..f92d7688b07ad1ec78e598781b5c1eb9e320a582 100644 --- a/drivers/media/usb/zr364xx/zr364xx.c +++ b/drivers/media/usb/zr364xx/zr364xx.c @@ -703,7 +703,8 @@ static int zr364xx_vidioc_querycap(struct file *file, void *priv, struct zr364xx_camera *cam = video_drvdata(file); strlcpy(cap->driver, DRIVER_DESC, sizeof(cap->driver)); - strlcpy(cap->card, cam->udev->product, sizeof(cap->card)); + if (cam->udev->product) + strlcpy(cap->card, cam->udev->product, sizeof(cap->card)); strlcpy(cap->bus_info, dev_name(&cam->udev->dev), sizeof(cap->bus_info)); cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | @@ -1057,6 +1058,7 @@ static int zr364xx_start_readpipe(struct zr364xx_camera *cam) DBG("submitting URB %p\n", pipe_info->stream_urb); retval = usb_submit_urb(pipe_info->stream_urb, GFP_KERNEL); if (retval) { + usb_free_urb(pipe_info->stream_urb); printk(KERN_ERR KBUILD_MODNAME ": start read pipe failed\n"); return retval; } diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig index b97090e859968c9962261780681efff870708fd3..9e8d714a043ef932ef4d13a4415fc541e110bf13 100644 --- a/drivers/media/v4l2-core/Kconfig +++ b/drivers/media/v4l2-core/Kconfig @@ -60,17 +60,14 @@ config V4L2_FWNODE tristate # Used by drivers that need Videobuf modules -config VIDEOBUF_GEN - tristate - config VIDEOBUF_DMA_SG tristate - select VIDEOBUF_GEN + depends on BROKEN config VIDEOBUF_VMALLOC tristate - select VIDEOBUF_GEN + depends on BROKEN config VIDEOBUF_DMA_CONTIG tristate - select VIDEOBUF_GEN + depends on BROKEN diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile index 9ee57e1efefed177d04c0f5857dfa39812ae2d01..a4bf5a62d0eae1fa0056098aa59ba86898d7c006 100644 --- a/drivers/media/v4l2-core/Makefile +++ b/drivers/media/v4l2-core/Makefile @@ -27,7 +27,6 @@ obj-$(CONFIG_V4L2_MEM2MEM_DEV) += v4l2-mem2mem.o obj-$(CONFIG_V4L2_FLASH_LED_CLASS) += v4l2-flash-led-class.o -obj-$(CONFIG_VIDEOBUF_GEN) += videobuf-core.o obj-$(CONFIG_VIDEOBUF_DMA_SG) += videobuf-dma-sg.o obj-$(CONFIG_VIDEOBUF_DMA_CONTIG) += videobuf-dma-contig.o obj-$(CONFIG_VIDEOBUF_VMALLOC) += videobuf-vmalloc.o diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index 6481212fda772c734e0c2222dde8611932cbdaf1..cd47a12bbf1279af543f1facedc82177a40dce2b 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -158,7 +158,7 @@ static int get_v4l2_window32(struct v4l2_window __user *p64, compat_caddr_t p; u32 clipcount; - if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) || + if (!access_ok(p32, sizeof(*p32)) || copy_in_user(&p64->w, &p32->w, sizeof(p32->w)) || assign_in_user(&p64->field, &p32->field) || assign_in_user(&p64->chromakey, &p32->chromakey) || @@ -281,7 +281,7 @@ static int __bufsize_v4l2_format(struct v4l2_format32 __user *p32, u32 *size) static int bufsize_v4l2_format(struct v4l2_format32 __user *p32, u32 *size) { - if (!access_ok(VERIFY_READ, p32, sizeof(*p32))) + if (!access_ok(p32, sizeof(*p32))) return -EFAULT; return __bufsize_v4l2_format(p32, size); } @@ -332,7 +332,7 @@ static int get_v4l2_format32(struct v4l2_format __user *p64, struct v4l2_format32 __user *p32, void __user *aux_buf, u32 aux_space) { - if (!access_ok(VERIFY_READ, p32, sizeof(*p32))) + if (!access_ok(p32, sizeof(*p32))) return -EFAULT; return __get_v4l2_format32(p64, p32, aux_buf, aux_space); } @@ -340,7 +340,7 @@ static int get_v4l2_format32(struct v4l2_format __user *p64, static int bufsize_v4l2_create(struct v4l2_create_buffers32 __user *p32, u32 *size) { - if (!access_ok(VERIFY_READ, p32, sizeof(*p32))) + if (!access_ok(p32, sizeof(*p32))) return -EFAULT; return __bufsize_v4l2_format(&p32->format, size); } @@ -349,7 +349,7 @@ static int get_v4l2_create32(struct v4l2_create_buffers __user *p64, struct v4l2_create_buffers32 __user *p32, void __user *aux_buf, u32 aux_space) { - if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) || + if (!access_ok(p32, sizeof(*p32)) || copy_in_user(p64, p32, offsetof(struct v4l2_create_buffers32, format))) return -EFAULT; @@ -400,7 +400,7 @@ static int __put_v4l2_format32(struct v4l2_format __user *p64, static int put_v4l2_format32(struct v4l2_format __user *p64, struct v4l2_format32 __user *p32) { - if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32))) + if (!access_ok(p32, sizeof(*p32))) return -EFAULT; return __put_v4l2_format32(p64, p32); } @@ -408,7 +408,7 @@ static int put_v4l2_format32(struct v4l2_format __user *p64, static int put_v4l2_create32(struct v4l2_create_buffers __user *p64, struct v4l2_create_buffers32 __user *p32) { - if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) || + if (!access_ok(p32, sizeof(*p32)) || copy_in_user(p32, p64, offsetof(struct v4l2_create_buffers32, format)) || copy_in_user(p32->reserved, p64->reserved, sizeof(p64->reserved))) @@ -429,7 +429,7 @@ static int get_v4l2_standard32(struct v4l2_standard __user *p64, struct v4l2_standard32 __user *p32) { /* other fields are not set by the user, nor used by the driver */ - if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) || + if (!access_ok(p32, sizeof(*p32)) || assign_in_user(&p64->index, &p32->index)) return -EFAULT; return 0; @@ -438,7 +438,7 @@ static int get_v4l2_standard32(struct v4l2_standard __user *p64, static int put_v4l2_standard32(struct v4l2_standard __user *p64, struct v4l2_standard32 __user *p32) { - if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) || + if (!access_ok(p32, sizeof(*p32)) || assign_in_user(&p32->index, &p64->index) || assign_in_user(&p32->id, &p64->id) || copy_in_user(p32->name, p64->name, sizeof(p32->name)) || @@ -555,7 +555,7 @@ static int bufsize_v4l2_buffer(struct v4l2_buffer32 __user *p32, u32 *size) u32 type; u32 length; - if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) || + if (!access_ok(p32, sizeof(*p32)) || get_user(type, &p32->type) || get_user(length, &p32->length)) return -EFAULT; @@ -587,7 +587,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer __user *p64, compat_caddr_t p; int ret; - if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) || + if (!access_ok(p32, sizeof(*p32)) || assign_in_user(&p64->index, &p32->index) || get_user(type, &p32->type) || put_user(type, &p64->type) || @@ -624,7 +624,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer __user *p64, return -EFAULT; uplane32 = compat_ptr(p); - if (!access_ok(VERIFY_READ, uplane32, + if (!access_ok(uplane32, num_planes * sizeof(*uplane32))) return -EFAULT; @@ -683,7 +683,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer __user *p64, compat_caddr_t p; int ret; - if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) || + if (!access_ok(p32, sizeof(*p32)) || assign_in_user(&p32->index, &p64->index) || get_user(type, &p64->type) || put_user(type, &p32->type) || @@ -773,7 +773,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer __user *p64, { compat_caddr_t tmp; - if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) || + if (!access_ok(p32, sizeof(*p32)) || get_user(tmp, &p32->base) || put_user_force(compat_ptr(tmp), &p64->base) || assign_in_user(&p64->capability, &p32->capability) || @@ -788,7 +788,7 @@ static int put_v4l2_framebuffer32(struct v4l2_framebuffer __user *p64, { void *base; - if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) || + if (!access_ok(p32, sizeof(*p32)) || get_user(base, &p64->base) || put_user(ptr_to_compat((void __user *)base), &p32->base) || assign_in_user(&p32->capability, &p64->capability) || @@ -884,7 +884,7 @@ static int bufsize_v4l2_ext_controls(struct v4l2_ext_controls32 __user *p32, { u32 count; - if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) || + if (!access_ok(p32, sizeof(*p32)) || get_user(count, &p32->count)) return -EFAULT; if (count > V4L2_CID_MAX_CTRLS) @@ -904,7 +904,7 @@ static int get_v4l2_ext_controls32(struct file *file, u32 n; compat_caddr_t p; - if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) || + if (!access_ok(p32, sizeof(*p32)) || assign_in_user(&p64->which, &p32->which) || get_user(count, &p32->count) || put_user(count, &p64->count) || @@ -919,7 +919,7 @@ static int get_v4l2_ext_controls32(struct file *file, if (get_user(p, &p32->controls)) return -EFAULT; ucontrols = compat_ptr(p); - if (!access_ok(VERIFY_READ, ucontrols, count * sizeof(*ucontrols))) + if (!access_ok(ucontrols, count * sizeof(*ucontrols))) return -EFAULT; if (aux_space < count * sizeof(*kcontrols)) return -EFAULT; @@ -969,7 +969,7 @@ static int put_v4l2_ext_controls32(struct file *file, * with __user causes smatch warnings, so instead declare it * without __user and cast it as a userspace pointer where needed. */ - if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) || + if (!access_ok(p32, sizeof(*p32)) || assign_in_user(&p32->which, &p64->which) || get_user(count, &p64->count) || put_user(count, &p32->count) || @@ -983,7 +983,7 @@ static int put_v4l2_ext_controls32(struct file *file, if (get_user(p, &p32->controls)) return -EFAULT; ucontrols = compat_ptr(p); - if (!access_ok(VERIFY_WRITE, ucontrols, count * sizeof(*ucontrols))) + if (!access_ok(ucontrols, count * sizeof(*ucontrols))) return -EFAULT; for (n = 0; n < count; n++) { @@ -1032,7 +1032,7 @@ struct v4l2_event32 { static int put_v4l2_event32(struct v4l2_event __user *p64, struct v4l2_event32 __user *p32) { - if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) || + if (!access_ok(p32, sizeof(*p32)) || assign_in_user(&p32->type, &p64->type) || copy_in_user(&p32->u, &p64->u, sizeof(p64->u)) || assign_in_user(&p32->pending, &p64->pending) || @@ -1058,7 +1058,7 @@ static int get_v4l2_edid32(struct v4l2_edid __user *p64, { compat_uptr_t tmp; - if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) || + if (!access_ok(p32, sizeof(*p32)) || assign_in_user(&p64->pad, &p32->pad) || assign_in_user(&p64->start_block, &p32->start_block) || assign_in_user_cast(&p64->blocks, &p32->blocks) || @@ -1074,7 +1074,7 @@ static int put_v4l2_edid32(struct v4l2_edid __user *p64, { void *edid; - if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) || + if (!access_ok(p32, sizeof(*p32)) || assign_in_user(&p32->pad, &p64->pad) || assign_in_user(&p32->start_block, &p64->start_block) || assign_in_user(&p32->blocks, &p64->blocks) || diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c index 599c1cbff3b9e9e4cc584b3a4b395d11094ec995..f4ebff347d7a3de5784d4e0ad4076a0fde6fdee2 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls.c +++ b/drivers/media/v4l2-core/v4l2-ctrls.c @@ -1145,6 +1145,7 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type, case V4L2_CID_FLASH_STROBE_STOP: case V4L2_CID_AUTO_FOCUS_START: case V4L2_CID_AUTO_FOCUS_STOP: + case V4L2_CID_DO_WHITE_BALANCE: *type = V4L2_CTRL_TYPE_BUTTON; *flags |= V4L2_CTRL_FLAG_WRITE_ONLY | V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; @@ -1379,7 +1380,7 @@ static u32 user_flags(const struct v4l2_ctrl *ctrl) static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 changes) { - memset(ev->reserved, 0, sizeof(ev->reserved)); + memset(ev, 0, sizeof(*ev)); ev->type = V4L2_EVENT_CTRL; ev->id = ctrl->id; ev->u.ctrl.changes = changes; @@ -2249,16 +2250,15 @@ struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl, v4l2_ctrl_fill(cfg->id, &name, &type, &min, &max, &step, &def, &flags); - is_menu = (cfg->type == V4L2_CTRL_TYPE_MENU || - cfg->type == V4L2_CTRL_TYPE_INTEGER_MENU); + is_menu = (type == V4L2_CTRL_TYPE_MENU || + type == V4L2_CTRL_TYPE_INTEGER_MENU); if (is_menu) WARN_ON(step); else WARN_ON(cfg->menu_skip_mask); - if (cfg->type == V4L2_CTRL_TYPE_MENU && qmenu == NULL) + if (type == V4L2_CTRL_TYPE_MENU && !qmenu) { qmenu = v4l2_ctrl_get_menu(cfg->id); - else if (cfg->type == V4L2_CTRL_TYPE_INTEGER_MENU && - qmenu_int == NULL) { + } else if (type == V4L2_CTRL_TYPE_INTEGER_MENU && !qmenu_int) { handler_set_err(hdl, -EINVAL); return NULL; } diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c index c81faea96fbabf51f519648211fd636313fe28f0..a24b40dfec97a3b7b75b077791c28df9ff527af5 100644 --- a/drivers/media/v4l2-core/v4l2-dv-timings.c +++ b/drivers/media/v4l2-core/v4l2-dv-timings.c @@ -15,6 +15,7 @@ #include #include #include +#include MODULE_AUTHOR("Hans Verkuil"); MODULE_DESCRIPTION("V4L2 DV Timings Helper Functions"); @@ -837,9 +838,9 @@ v4l2_hdmi_rx_colorimetry(const struct hdmi_avi_infoframe *avi, switch (avi->colorimetry) { case HDMI_COLORIMETRY_EXTENDED: switch (avi->extended_colorimetry) { - case HDMI_EXTENDED_COLORIMETRY_ADOBE_RGB: - c.colorspace = V4L2_COLORSPACE_ADOBERGB; - c.xfer_func = V4L2_XFER_FUNC_ADOBERGB; + case HDMI_EXTENDED_COLORIMETRY_OPRGB: + c.colorspace = V4L2_COLORSPACE_OPRGB; + c.xfer_func = V4L2_XFER_FUNC_OPRGB; break; case HDMI_EXTENDED_COLORIMETRY_BT2020: c.colorspace = V4L2_COLORSPACE_BT2020; @@ -908,10 +909,10 @@ v4l2_hdmi_rx_colorimetry(const struct hdmi_avi_infoframe *avi, c.ycbcr_enc = V4L2_YCBCR_ENC_601; c.xfer_func = V4L2_XFER_FUNC_SRGB; break; - case HDMI_EXTENDED_COLORIMETRY_ADOBE_YCC_601: - c.colorspace = V4L2_COLORSPACE_ADOBERGB; + case HDMI_EXTENDED_COLORIMETRY_OPYCC_601: + c.colorspace = V4L2_COLORSPACE_OPRGB; c.ycbcr_enc = V4L2_YCBCR_ENC_601; - c.xfer_func = V4L2_XFER_FUNC_ADOBERGB; + c.xfer_func = V4L2_XFER_FUNC_OPRGB; break; case HDMI_EXTENDED_COLORIMETRY_BT2020: c.colorspace = V4L2_COLORSPACE_BT2020; @@ -942,3 +943,153 @@ v4l2_hdmi_rx_colorimetry(const struct hdmi_avi_infoframe *avi, return c; } EXPORT_SYMBOL_GPL(v4l2_hdmi_rx_colorimetry); + +/** + * v4l2_get_edid_phys_addr() - find and return the physical address + * + * @edid: pointer to the EDID data + * @size: size in bytes of the EDID data + * @offset: If not %NULL then the location of the physical address + * bytes in the EDID will be returned here. This is set to 0 + * if there is no physical address found. + * + * Return: the physical address or CEC_PHYS_ADDR_INVALID if there is none. + */ +u16 v4l2_get_edid_phys_addr(const u8 *edid, unsigned int size, + unsigned int *offset) +{ + unsigned int loc = cec_get_edid_spa_location(edid, size); + + if (offset) + *offset = loc; + if (loc == 0) + return CEC_PHYS_ADDR_INVALID; + return (edid[loc] << 8) | edid[loc + 1]; +} +EXPORT_SYMBOL_GPL(v4l2_get_edid_phys_addr); + +/** + * v4l2_set_edid_phys_addr() - find and set the physical address + * + * @edid: pointer to the EDID data + * @size: size in bytes of the EDID data + * @phys_addr: the new physical address + * + * This function finds the location of the physical address in the EDID + * and fills in the given physical address and updates the checksum + * at the end of the EDID block. It does nothing if the EDID doesn't + * contain a physical address. + */ +void v4l2_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr) +{ + unsigned int loc = cec_get_edid_spa_location(edid, size); + u8 sum = 0; + unsigned int i; + + if (loc == 0) + return; + edid[loc] = phys_addr >> 8; + edid[loc + 1] = phys_addr & 0xff; + loc &= ~0x7f; + + /* update the checksum */ + for (i = loc; i < loc + 127; i++) + sum += edid[i]; + edid[i] = 256 - sum; +} +EXPORT_SYMBOL_GPL(v4l2_set_edid_phys_addr); + +/** + * v4l2_phys_addr_for_input() - calculate the PA for an input + * + * @phys_addr: the physical address of the parent + * @input: the number of the input port, must be between 1 and 15 + * + * This function calculates a new physical address based on the input + * port number. For example: + * + * PA = 0.0.0.0 and input = 2 becomes 2.0.0.0 + * + * PA = 3.0.0.0 and input = 1 becomes 3.1.0.0 + * + * PA = 3.2.1.0 and input = 5 becomes 3.2.1.5 + * + * PA = 3.2.1.3 and input = 5 becomes f.f.f.f since it maxed out the depth. + * + * Return: the new physical address or CEC_PHYS_ADDR_INVALID. + */ +u16 v4l2_phys_addr_for_input(u16 phys_addr, u8 input) +{ + /* Check if input is sane */ + if (WARN_ON(input == 0 || input > 0xf)) + return CEC_PHYS_ADDR_INVALID; + + if (phys_addr == 0) + return input << 12; + + if ((phys_addr & 0x0fff) == 0) + return phys_addr | (input << 8); + + if ((phys_addr & 0x00ff) == 0) + return phys_addr | (input << 4); + + if ((phys_addr & 0x000f) == 0) + return phys_addr | input; + + /* + * All nibbles are used so no valid physical addresses can be assigned + * to the input. + */ + return CEC_PHYS_ADDR_INVALID; +} +EXPORT_SYMBOL_GPL(v4l2_phys_addr_for_input); + +/** + * v4l2_phys_addr_validate() - validate a physical address from an EDID + * + * @phys_addr: the physical address to validate + * @parent: if not %NULL, then this is filled with the parents PA. + * @port: if not %NULL, then this is filled with the input port. + * + * This validates a physical address as read from an EDID. If the + * PA is invalid (such as 1.0.1.0 since '0' is only allowed at the end), + * then it will return -EINVAL. + * + * The parent PA is passed into %parent and the input port is passed into + * %port. For example: + * + * PA = 0.0.0.0: has parent 0.0.0.0 and input port 0. + * + * PA = 1.0.0.0: has parent 0.0.0.0 and input port 1. + * + * PA = 3.2.0.0: has parent 3.0.0.0 and input port 2. + * + * PA = f.f.f.f: has parent f.f.f.f and input port 0. + * + * Return: 0 if the PA is valid, -EINVAL if not. + */ +int v4l2_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port) +{ + int i; + + if (parent) + *parent = phys_addr; + if (port) + *port = 0; + if (phys_addr == CEC_PHYS_ADDR_INVALID) + return 0; + for (i = 0; i < 16; i += 4) + if (phys_addr & (0xf << i)) + break; + if (i == 16) + return 0; + if (parent) + *parent = phys_addr & (0xfff0 << i); + if (port) + *port = (phys_addr >> i) & 0xf; + for (i += 4; i < 16; i += 4) + if ((phys_addr & (0xf << i)) == 0) + return -EINVAL; + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_phys_addr_validate); diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c index a3ef1f50a4b3496dcfbe7cb4332a3bab9a3acc56..481e3c65cf97a63202e1223b106c59b5e670651b 100644 --- a/drivers/media/v4l2-core/v4l2-event.c +++ b/drivers/media/v4l2-core/v4l2-event.c @@ -193,6 +193,22 @@ int v4l2_event_pending(struct v4l2_fh *fh) } EXPORT_SYMBOL_GPL(v4l2_event_pending); +static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev) +{ + struct v4l2_fh *fh = sev->fh; + unsigned int i; + + lockdep_assert_held(&fh->subscribe_lock); + assert_spin_locked(&fh->vdev->fh_lock); + + /* Remove any pending events for this subscription */ + for (i = 0; i < sev->in_use; i++) { + list_del(&sev->events[sev_pos(sev, i)].list); + fh->navailable--; + } + list_del(&sev->list); +} + int v4l2_event_subscribe(struct v4l2_fh *fh, const struct v4l2_event_subscription *sub, unsigned elems, const struct v4l2_subscribed_event_ops *ops) @@ -224,27 +240,23 @@ int v4l2_event_subscribe(struct v4l2_fh *fh, spin_lock_irqsave(&fh->vdev->fh_lock, flags); found_ev = v4l2_event_subscribed(fh, sub->type, sub->id); + if (!found_ev) + list_add(&sev->list, &fh->subscribed); spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); if (found_ev) { /* Already listening */ kvfree(sev); - goto out_unlock; - } - - if (sev->ops && sev->ops->add) { + } else if (sev->ops && sev->ops->add) { ret = sev->ops->add(sev, elems); if (ret) { + spin_lock_irqsave(&fh->vdev->fh_lock, flags); + __v4l2_event_unsubscribe(sev); + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); kvfree(sev); - goto out_unlock; } } - spin_lock_irqsave(&fh->vdev->fh_lock, flags); - list_add(&sev->list, &fh->subscribed); - spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); - -out_unlock: mutex_unlock(&fh->subscribe_lock); return ret; @@ -279,7 +291,6 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh, { struct v4l2_subscribed_event *sev; unsigned long flags; - int i; if (sub->type == V4L2_EVENT_ALL) { v4l2_event_unsubscribe_all(fh); @@ -291,14 +302,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh, spin_lock_irqsave(&fh->vdev->fh_lock, flags); sev = v4l2_event_subscribed(fh, sub->type, sub->id); - if (sev != NULL) { - /* Remove any pending events for this subscription */ - for (i = 0; i < sev->in_use; i++) { - list_del(&sev->events[sev_pos(sev, i)].list); - fh->navailable--; - } - list_del(&sev->list); - } + if (sev != NULL) + __v4l2_event_unsubscribe(sev); spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index 54afc9c7ee6ea162b96a83f44389ae98dc9293d1..b01e5f0c5c0ca9a8316458f32037f9c84b85aa30 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -286,6 +286,7 @@ static void v4l_print_format(const void *arg, bool write_only) const struct v4l2_window *win; const struct v4l2_sdr_format *sdr; const struct v4l2_meta_format *meta; + u32 planes; unsigned i; pr_cont("type=%s", prt_names(p->type, v4l2_type_names)); @@ -316,7 +317,8 @@ static void v4l_print_format(const void *arg, bool write_only) prt_names(mp->field, v4l2_field_names), mp->colorspace, mp->num_planes, mp->flags, mp->ycbcr_enc, mp->quantization, mp->xfer_func); - for (i = 0; i < mp->num_planes; i++) + planes = min_t(u32, mp->num_planes, VIDEO_MAX_PLANES); + for (i = 0; i < planes; i++) printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i, mp->plane_fmt[i].bytesperline, mp->plane_fmt[i].sizeimage); @@ -1413,10 +1415,26 @@ static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops, return ret; } +static void v4l_pix_format_touch(struct v4l2_pix_format *p) +{ + /* + * The v4l2_pix_format structure contains fields that make no sense for + * touch. Set them to default values in this case. + */ + + p->field = V4L2_FIELD_NONE; + p->colorspace = V4L2_COLORSPACE_RAW; + p->flags = 0; + p->ycbcr_enc = 0; + p->quantization = 0; + p->xfer_func = 0; +} + static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct v4l2_format *p = arg; + struct video_device *vfd = video_devdata(file); int ret = check_fmt(file, p->type); if (ret) @@ -1454,6 +1472,8 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops, ret = ops->vidioc_g_fmt_vid_cap(file, fh, arg); /* just in case the driver zeroed it again */ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; + if (vfd->vfl_type == VFL_TYPE_TOUCH) + v4l_pix_format_touch(&p->fmt.pix); return ret; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: return ops->vidioc_g_fmt_vid_cap_mplane(file, fh, arg); @@ -1489,21 +1509,6 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops, return -EINVAL; } -static void v4l_pix_format_touch(struct v4l2_pix_format *p) -{ - /* - * The v4l2_pix_format structure contains fields that make no sense for - * touch. Set them to default values in this case. - */ - - p->field = V4L2_FIELD_NONE; - p->colorspace = V4L2_COLORSPACE_RAW; - p->flags = 0; - p->ycbcr_enc = 0; - p->quantization = 0; - p->xfer_func = 0; -} - static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { @@ -2934,7 +2939,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg, v4l2_kioctl func) { char sbuf[128]; - void *mbuf = NULL; + void *mbuf = NULL, *array_buf = NULL; void *parg = (void *)arg; long err = -EINVAL; bool has_array_args; @@ -2993,20 +2998,14 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg, has_array_args = err; if (has_array_args) { - /* - * When adding new types of array args, make sure that the - * parent argument to ioctl (which contains the pointer to the - * array) fits into sbuf (so that mbuf will still remain - * unused up to here). - */ - mbuf = kvmalloc(array_size, GFP_KERNEL); + array_buf = kvmalloc(array_size, GFP_KERNEL); err = -ENOMEM; - if (NULL == mbuf) + if (array_buf == NULL) goto out_array_args; err = -EFAULT; - if (copy_from_user(mbuf, user_ptr, array_size)) + if (copy_from_user(array_buf, user_ptr, array_size)) goto out_array_args; - *kernel_ptr = mbuf; + *kernel_ptr = array_buf; } /* Handles IOCTL */ @@ -3025,7 +3024,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg, if (has_array_args) { *kernel_ptr = (void __force *)user_ptr; - if (copy_to_user(user_ptr, mbuf, array_size)) + if (copy_to_user(user_ptr, array_buf, array_size)) err = -EFAULT; goto out_array_args; } @@ -3047,6 +3046,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg, } out: + kvfree(array_buf); kvfree(mbuf); return err; } diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c index ce9bd1b912102aeb1b89840872f2ea5f1add6a1a..0a9be4c5f1940a73e346fab94317a38df847af30 100644 --- a/drivers/media/v4l2-core/v4l2-mem2mem.c +++ b/drivers/media/v4l2-core/v4l2-mem2mem.c @@ -445,19 +445,14 @@ int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, } EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs); -int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, - struct v4l2_buffer *buf) +static void v4l2_m2m_adjust_mem_offset(struct vb2_queue *vq, + struct v4l2_buffer *buf) { - struct vb2_queue *vq; - int ret = 0; - unsigned int i; - - vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); - ret = vb2_querybuf(vq, buf); - /* Adjust MMAP memory offsets for the CAPTURE queue */ if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) { if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) { + unsigned int i; + for (i = 0; i < buf->length; ++i) buf->m.planes[i].m.mem_offset += DST_QUEUE_OFF_BASE; @@ -465,8 +460,23 @@ int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, buf->m.offset += DST_QUEUE_OFF_BASE; } } +} - return ret; +int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_buffer *buf) +{ + struct vb2_queue *vq; + int ret; + + vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); + ret = vb2_querybuf(vq, buf); + if (ret) + return ret; + + /* Adjust MMAP memory offsets for the CAPTURE queue */ + v4l2_m2m_adjust_mem_offset(vq, buf); + + return 0; } EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf); @@ -478,8 +488,12 @@ int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); ret = vb2_qbuf(vq, buf); - if (!ret) - v4l2_m2m_try_schedule(m2m_ctx); + if (ret) + return ret; + + /* Adjust MMAP memory offsets for the CAPTURE queue */ + v4l2_m2m_adjust_mem_offset(vq, buf); + v4l2_m2m_try_schedule(m2m_ctx); return ret; } @@ -489,9 +503,17 @@ int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct v4l2_buffer *buf) { struct vb2_queue *vq; + int ret; vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); - return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK); + ret = vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK); + if (ret) + return ret; + + /* Adjust MMAP memory offsets for the CAPTURE queue */ + v4l2_m2m_adjust_mem_offset(vq, buf); + + return 0; } EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf); @@ -503,10 +525,14 @@ int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); ret = vb2_prepare_buf(vq, buf); - if (!ret) - v4l2_m2m_try_schedule(m2m_ctx); + if (ret) + return ret; - return ret; + /* Adjust MMAP memory offsets for the CAPTURE queue */ + v4l2_m2m_adjust_mem_offset(vq, buf); + v4l2_m2m_try_schedule(m2m_ctx); + + return 0; } EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf); @@ -747,11 +773,17 @@ static int v4l2_m2m_register_entity(struct media_device *mdev, entity->function = function; ret = media_entity_pads_init(entity, num_pads, pads); - if (ret) + if (ret) { + kfree(entity->name); + entity->name = NULL; return ret; + } ret = media_device_register_entity(mdev, entity); - if (ret) + if (ret) { + kfree(entity->name); + entity->name = NULL; return ret; + } return 0; } diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c deleted file mode 100644 index 7491b337002ce586b598fcad9b0aabc7e6c2d877..0000000000000000000000000000000000000000 --- a/drivers/media/v4l2-core/videobuf-core.c +++ /dev/null @@ -1,1199 +0,0 @@ -/* - * generic helper functions for handling video4linux capture buffers - * - * (c) 2007 Mauro Carvalho Chehab, - * - * Highly based on video-buf written originally by: - * (c) 2001,02 Gerd Knorr - * (c) 2006 Mauro Carvalho Chehab, - * (c) 2006 Ted Walther and John Sokol - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 - */ - -#include -#include -#include -#include -#include -#include -#include - -#include - -#define MAGIC_BUFFER 0x20070728 -#define MAGIC_CHECK(is, should) \ - do { \ - if (unlikely((is) != (should))) { \ - printk(KERN_ERR \ - "magic mismatch: %x (expected %x)\n", \ - is, should); \ - BUG(); \ - } \ - } while (0) - -static int debug; -module_param(debug, int, 0644); - -MODULE_DESCRIPTION("helper module to manage video4linux buffers"); -MODULE_AUTHOR("Mauro Carvalho Chehab "); -MODULE_LICENSE("GPL"); - -#define dprintk(level, fmt, arg...) \ - do { \ - if (debug >= level) \ - printk(KERN_DEBUG "vbuf: " fmt, ## arg); \ - } while (0) - -/* --------------------------------------------------------------------- */ - -#define CALL(q, f, arg...) \ - ((q->int_ops->f) ? q->int_ops->f(arg) : 0) -#define CALLPTR(q, f, arg...) \ - ((q->int_ops->f) ? q->int_ops->f(arg) : NULL) - -struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q) -{ - struct videobuf_buffer *vb; - - BUG_ON(q->msize < sizeof(*vb)); - - if (!q->int_ops || !q->int_ops->alloc_vb) { - printk(KERN_ERR "No specific ops defined!\n"); - BUG(); - } - - vb = q->int_ops->alloc_vb(q->msize); - if (NULL != vb) { - init_waitqueue_head(&vb->done); - vb->magic = MAGIC_BUFFER; - } - - return vb; -} -EXPORT_SYMBOL_GPL(videobuf_alloc_vb); - -static int state_neither_active_nor_queued(struct videobuf_queue *q, - struct videobuf_buffer *vb) -{ - unsigned long flags; - bool rc; - - spin_lock_irqsave(q->irqlock, flags); - rc = vb->state != VIDEOBUF_ACTIVE && vb->state != VIDEOBUF_QUEUED; - spin_unlock_irqrestore(q->irqlock, flags); - return rc; -}; - -int videobuf_waiton(struct videobuf_queue *q, struct videobuf_buffer *vb, - int non_blocking, int intr) -{ - bool is_ext_locked; - int ret = 0; - - MAGIC_CHECK(vb->magic, MAGIC_BUFFER); - - if (non_blocking) { - if (state_neither_active_nor_queued(q, vb)) - return 0; - return -EAGAIN; - } - - is_ext_locked = q->ext_lock && mutex_is_locked(q->ext_lock); - - /* Release vdev lock to prevent this wait from blocking outside access to - the device. */ - if (is_ext_locked) - mutex_unlock(q->ext_lock); - if (intr) - ret = wait_event_interruptible(vb->done, - state_neither_active_nor_queued(q, vb)); - else - wait_event(vb->done, state_neither_active_nor_queued(q, vb)); - /* Relock */ - if (is_ext_locked) - mutex_lock(q->ext_lock); - - return ret; -} -EXPORT_SYMBOL_GPL(videobuf_waiton); - -int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb, - struct v4l2_framebuffer *fbuf) -{ - MAGIC_CHECK(vb->magic, MAGIC_BUFFER); - MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); - - return CALL(q, iolock, q, vb, fbuf); -} -EXPORT_SYMBOL_GPL(videobuf_iolock); - -void *videobuf_queue_to_vaddr(struct videobuf_queue *q, - struct videobuf_buffer *buf) -{ - if (q->int_ops->vaddr) - return q->int_ops->vaddr(buf); - return NULL; -} -EXPORT_SYMBOL_GPL(videobuf_queue_to_vaddr); - -/* --------------------------------------------------------------------- */ - - -void videobuf_queue_core_init(struct videobuf_queue *q, - const struct videobuf_queue_ops *ops, - struct device *dev, - spinlock_t *irqlock, - enum v4l2_buf_type type, - enum v4l2_field field, - unsigned int msize, - void *priv, - struct videobuf_qtype_ops *int_ops, - struct mutex *ext_lock) -{ - BUG_ON(!q); - memset(q, 0, sizeof(*q)); - q->irqlock = irqlock; - q->ext_lock = ext_lock; - q->dev = dev; - q->type = type; - q->field = field; - q->msize = msize; - q->ops = ops; - q->priv_data = priv; - q->int_ops = int_ops; - - /* All buffer operations are mandatory */ - BUG_ON(!q->ops->buf_setup); - BUG_ON(!q->ops->buf_prepare); - BUG_ON(!q->ops->buf_queue); - BUG_ON(!q->ops->buf_release); - - /* Lock is mandatory for queue_cancel to work */ - BUG_ON(!irqlock); - - /* Having implementations for abstract methods are mandatory */ - BUG_ON(!q->int_ops); - - mutex_init(&q->vb_lock); - init_waitqueue_head(&q->wait); - INIT_LIST_HEAD(&q->stream); -} -EXPORT_SYMBOL_GPL(videobuf_queue_core_init); - -/* Locking: Only usage in bttv unsafe find way to remove */ -int videobuf_queue_is_busy(struct videobuf_queue *q) -{ - int i; - - MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); - - if (q->streaming) { - dprintk(1, "busy: streaming active\n"); - return 1; - } - if (q->reading) { - dprintk(1, "busy: pending read #1\n"); - return 1; - } - if (q->read_buf) { - dprintk(1, "busy: pending read #2\n"); - return 1; - } - for (i = 0; i < VIDEO_MAX_FRAME; i++) { - if (NULL == q->bufs[i]) - continue; - if (q->bufs[i]->map) { - dprintk(1, "busy: buffer #%d mapped\n", i); - return 1; - } - if (q->bufs[i]->state == VIDEOBUF_QUEUED) { - dprintk(1, "busy: buffer #%d queued\n", i); - return 1; - } - if (q->bufs[i]->state == VIDEOBUF_ACTIVE) { - dprintk(1, "busy: buffer #%d avtive\n", i); - return 1; - } - } - return 0; -} -EXPORT_SYMBOL_GPL(videobuf_queue_is_busy); - -/* - * __videobuf_free() - free all the buffers and their control structures - * - * This function can only be called if streaming/reading is off, i.e. no buffers - * are under control of the driver. - */ -/* Locking: Caller holds q->vb_lock */ -static int __videobuf_free(struct videobuf_queue *q) -{ - int i; - - dprintk(1, "%s\n", __func__); - if (!q) - return 0; - - if (q->streaming || q->reading) { - dprintk(1, "Cannot free buffers when streaming or reading\n"); - return -EBUSY; - } - - MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); - - for (i = 0; i < VIDEO_MAX_FRAME; i++) - if (q->bufs[i] && q->bufs[i]->map) { - dprintk(1, "Cannot free mmapped buffers\n"); - return -EBUSY; - } - - for (i = 0; i < VIDEO_MAX_FRAME; i++) { - if (NULL == q->bufs[i]) - continue; - q->ops->buf_release(q, q->bufs[i]); - kfree(q->bufs[i]); - q->bufs[i] = NULL; - } - - return 0; -} - -/* Locking: Caller holds q->vb_lock */ -void videobuf_queue_cancel(struct videobuf_queue *q) -{ - unsigned long flags = 0; - int i; - - q->streaming = 0; - q->reading = 0; - wake_up_interruptible_sync(&q->wait); - - /* remove queued buffers from list */ - spin_lock_irqsave(q->irqlock, flags); - for (i = 0; i < VIDEO_MAX_FRAME; i++) { - if (NULL == q->bufs[i]) - continue; - if (q->bufs[i]->state == VIDEOBUF_QUEUED) { - list_del(&q->bufs[i]->queue); - q->bufs[i]->state = VIDEOBUF_ERROR; - wake_up_all(&q->bufs[i]->done); - } - } - spin_unlock_irqrestore(q->irqlock, flags); - - /* free all buffers + clear queue */ - for (i = 0; i < VIDEO_MAX_FRAME; i++) { - if (NULL == q->bufs[i]) - continue; - q->ops->buf_release(q, q->bufs[i]); - } - INIT_LIST_HEAD(&q->stream); -} -EXPORT_SYMBOL_GPL(videobuf_queue_cancel); - -/* --------------------------------------------------------------------- */ - -/* Locking: Caller holds q->vb_lock */ -enum v4l2_field videobuf_next_field(struct videobuf_queue *q) -{ - enum v4l2_field field = q->field; - - BUG_ON(V4L2_FIELD_ANY == field); - - if (V4L2_FIELD_ALTERNATE == field) { - if (V4L2_FIELD_TOP == q->last) { - field = V4L2_FIELD_BOTTOM; - q->last = V4L2_FIELD_BOTTOM; - } else { - field = V4L2_FIELD_TOP; - q->last = V4L2_FIELD_TOP; - } - } - return field; -} -EXPORT_SYMBOL_GPL(videobuf_next_field); - -/* Locking: Caller holds q->vb_lock */ -static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b, - struct videobuf_buffer *vb, enum v4l2_buf_type type) -{ - MAGIC_CHECK(vb->magic, MAGIC_BUFFER); - MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); - - b->index = vb->i; - b->type = type; - - b->memory = vb->memory; - switch (b->memory) { - case V4L2_MEMORY_MMAP: - b->m.offset = vb->boff; - b->length = vb->bsize; - break; - case V4L2_MEMORY_USERPTR: - b->m.userptr = vb->baddr; - b->length = vb->bsize; - break; - case V4L2_MEMORY_OVERLAY: - b->m.offset = vb->boff; - break; - case V4L2_MEMORY_DMABUF: - /* DMABUF is not handled in videobuf framework */ - break; - } - - b->flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; - if (vb->map) - b->flags |= V4L2_BUF_FLAG_MAPPED; - - switch (vb->state) { - case VIDEOBUF_PREPARED: - case VIDEOBUF_QUEUED: - case VIDEOBUF_ACTIVE: - b->flags |= V4L2_BUF_FLAG_QUEUED; - break; - case VIDEOBUF_ERROR: - b->flags |= V4L2_BUF_FLAG_ERROR; - /* fall through */ - case VIDEOBUF_DONE: - b->flags |= V4L2_BUF_FLAG_DONE; - break; - case VIDEOBUF_NEEDS_INIT: - case VIDEOBUF_IDLE: - /* nothing */ - break; - } - - b->field = vb->field; - b->timestamp = vb->ts; - b->bytesused = vb->size; - b->sequence = vb->field_count >> 1; -} - -int videobuf_mmap_free(struct videobuf_queue *q) -{ - int ret; - videobuf_queue_lock(q); - ret = __videobuf_free(q); - videobuf_queue_unlock(q); - return ret; -} -EXPORT_SYMBOL_GPL(videobuf_mmap_free); - -/* Locking: Caller holds q->vb_lock */ -int __videobuf_mmap_setup(struct videobuf_queue *q, - unsigned int bcount, unsigned int bsize, - enum v4l2_memory memory) -{ - unsigned int i; - int err; - - MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); - - err = __videobuf_free(q); - if (0 != err) - return err; - - /* Allocate and initialize buffers */ - for (i = 0; i < bcount; i++) { - q->bufs[i] = videobuf_alloc_vb(q); - - if (NULL == q->bufs[i]) - break; - - q->bufs[i]->i = i; - q->bufs[i]->memory = memory; - q->bufs[i]->bsize = bsize; - switch (memory) { - case V4L2_MEMORY_MMAP: - q->bufs[i]->boff = PAGE_ALIGN(bsize) * i; - break; - case V4L2_MEMORY_USERPTR: - case V4L2_MEMORY_OVERLAY: - case V4L2_MEMORY_DMABUF: - /* nothing */ - break; - } - } - - if (!i) - return -ENOMEM; - - dprintk(1, "mmap setup: %d buffers, %d bytes each\n", i, bsize); - - return i; -} -EXPORT_SYMBOL_GPL(__videobuf_mmap_setup); - -int videobuf_mmap_setup(struct videobuf_queue *q, - unsigned int bcount, unsigned int bsize, - enum v4l2_memory memory) -{ - int ret; - videobuf_queue_lock(q); - ret = __videobuf_mmap_setup(q, bcount, bsize, memory); - videobuf_queue_unlock(q); - return ret; -} -EXPORT_SYMBOL_GPL(videobuf_mmap_setup); - -int videobuf_reqbufs(struct videobuf_queue *q, - struct v4l2_requestbuffers *req) -{ - unsigned int size, count; - int retval; - - if (req->memory != V4L2_MEMORY_MMAP && - req->memory != V4L2_MEMORY_USERPTR && - req->memory != V4L2_MEMORY_OVERLAY) { - dprintk(1, "reqbufs: memory type invalid\n"); - return -EINVAL; - } - - videobuf_queue_lock(q); - if (req->type != q->type) { - dprintk(1, "reqbufs: queue type invalid\n"); - retval = -EINVAL; - goto done; - } - - if (q->streaming) { - dprintk(1, "reqbufs: streaming already exists\n"); - retval = -EBUSY; - goto done; - } - if (!list_empty(&q->stream)) { - dprintk(1, "reqbufs: stream running\n"); - retval = -EBUSY; - goto done; - } - - if (req->count == 0) { - dprintk(1, "reqbufs: count invalid (%d)\n", req->count); - retval = __videobuf_free(q); - goto done; - } - - count = req->count; - if (count > VIDEO_MAX_FRAME) - count = VIDEO_MAX_FRAME; - size = 0; - q->ops->buf_setup(q, &count, &size); - dprintk(1, "reqbufs: bufs=%d, size=0x%x [%u pages total]\n", - count, size, - (unsigned int)((count * PAGE_ALIGN(size)) >> PAGE_SHIFT)); - - retval = __videobuf_mmap_setup(q, count, size, req->memory); - if (retval < 0) { - dprintk(1, "reqbufs: mmap setup returned %d\n", retval); - goto done; - } - - req->count = retval; - retval = 0; - - done: - videobuf_queue_unlock(q); - return retval; -} -EXPORT_SYMBOL_GPL(videobuf_reqbufs); - -int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b) -{ - int ret = -EINVAL; - - videobuf_queue_lock(q); - if (unlikely(b->type != q->type)) { - dprintk(1, "querybuf: Wrong type.\n"); - goto done; - } - if (unlikely(b->index >= VIDEO_MAX_FRAME)) { - dprintk(1, "querybuf: index out of range.\n"); - goto done; - } - if (unlikely(NULL == q->bufs[b->index])) { - dprintk(1, "querybuf: buffer is null.\n"); - goto done; - } - - videobuf_status(q, b, q->bufs[b->index], q->type); - - ret = 0; -done: - videobuf_queue_unlock(q); - return ret; -} -EXPORT_SYMBOL_GPL(videobuf_querybuf); - -int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b) -{ - struct videobuf_buffer *buf; - enum v4l2_field field; - unsigned long flags = 0; - int retval; - - MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); - - if (b->memory == V4L2_MEMORY_MMAP) - down_read(¤t->mm->mmap_sem); - - videobuf_queue_lock(q); - retval = -EBUSY; - if (q->reading) { - dprintk(1, "qbuf: Reading running...\n"); - goto done; - } - retval = -EINVAL; - if (b->type != q->type) { - dprintk(1, "qbuf: Wrong type.\n"); - goto done; - } - if (b->index >= VIDEO_MAX_FRAME) { - dprintk(1, "qbuf: index out of range.\n"); - goto done; - } - buf = q->bufs[b->index]; - if (NULL == buf) { - dprintk(1, "qbuf: buffer is null.\n"); - goto done; - } - MAGIC_CHECK(buf->magic, MAGIC_BUFFER); - if (buf->memory != b->memory) { - dprintk(1, "qbuf: memory type is wrong.\n"); - goto done; - } - if (buf->state != VIDEOBUF_NEEDS_INIT && buf->state != VIDEOBUF_IDLE) { - dprintk(1, "qbuf: buffer is already queued or active.\n"); - goto done; - } - - switch (b->memory) { - case V4L2_MEMORY_MMAP: - if (0 == buf->baddr) { - dprintk(1, "qbuf: mmap requested but buffer addr is zero!\n"); - goto done; - } - if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT - || q->type == V4L2_BUF_TYPE_VBI_OUTPUT - || q->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT - || q->type == V4L2_BUF_TYPE_SDR_OUTPUT) { - buf->size = b->bytesused; - buf->field = b->field; - buf->ts = b->timestamp; - } - break; - case V4L2_MEMORY_USERPTR: - if (b->length < buf->bsize) { - dprintk(1, "qbuf: buffer length is not enough\n"); - goto done; - } - if (VIDEOBUF_NEEDS_INIT != buf->state && - buf->baddr != b->m.userptr) - q->ops->buf_release(q, buf); - buf->baddr = b->m.userptr; - break; - case V4L2_MEMORY_OVERLAY: - buf->boff = b->m.offset; - break; - default: - dprintk(1, "qbuf: wrong memory type\n"); - goto done; - } - - dprintk(1, "qbuf: requesting next field\n"); - field = videobuf_next_field(q); - retval = q->ops->buf_prepare(q, buf, field); - if (0 != retval) { - dprintk(1, "qbuf: buffer_prepare returned %d\n", retval); - goto done; - } - - list_add_tail(&buf->stream, &q->stream); - if (q->streaming) { - spin_lock_irqsave(q->irqlock, flags); - q->ops->buf_queue(q, buf); - spin_unlock_irqrestore(q->irqlock, flags); - } - dprintk(1, "qbuf: succeeded\n"); - retval = 0; - wake_up_interruptible_sync(&q->wait); - -done: - videobuf_queue_unlock(q); - - if (b->memory == V4L2_MEMORY_MMAP) - up_read(¤t->mm->mmap_sem); - - return retval; -} -EXPORT_SYMBOL_GPL(videobuf_qbuf); - -/* Locking: Caller holds q->vb_lock */ -static int stream_next_buffer_check_queue(struct videobuf_queue *q, int noblock) -{ - int retval; - -checks: - if (!q->streaming) { - dprintk(1, "next_buffer: Not streaming\n"); - retval = -EINVAL; - goto done; - } - - if (list_empty(&q->stream)) { - if (noblock) { - retval = -EAGAIN; - dprintk(2, "next_buffer: no buffers to dequeue\n"); - goto done; - } else { - dprintk(2, "next_buffer: waiting on buffer\n"); - - /* Drop lock to avoid deadlock with qbuf */ - videobuf_queue_unlock(q); - - /* Checking list_empty and streaming is safe without - * locks because we goto checks to validate while - * holding locks before proceeding */ - retval = wait_event_interruptible(q->wait, - !list_empty(&q->stream) || !q->streaming); - videobuf_queue_lock(q); - - if (retval) - goto done; - - goto checks; - } - } - - retval = 0; - -done: - return retval; -} - -/* Locking: Caller holds q->vb_lock */ -static int stream_next_buffer(struct videobuf_queue *q, - struct videobuf_buffer **vb, int nonblocking) -{ - int retval; - struct videobuf_buffer *buf = NULL; - - retval = stream_next_buffer_check_queue(q, nonblocking); - if (retval) - goto done; - - buf = list_entry(q->stream.next, struct videobuf_buffer, stream); - retval = videobuf_waiton(q, buf, nonblocking, 1); - if (retval < 0) - goto done; - - *vb = buf; -done: - return retval; -} - -int videobuf_dqbuf(struct videobuf_queue *q, - struct v4l2_buffer *b, int nonblocking) -{ - struct videobuf_buffer *buf = NULL; - int retval; - - MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); - - memset(b, 0, sizeof(*b)); - videobuf_queue_lock(q); - - retval = stream_next_buffer(q, &buf, nonblocking); - if (retval < 0) { - dprintk(1, "dqbuf: next_buffer error: %i\n", retval); - goto done; - } - - switch (buf->state) { - case VIDEOBUF_ERROR: - dprintk(1, "dqbuf: state is error\n"); - break; - case VIDEOBUF_DONE: - dprintk(1, "dqbuf: state is done\n"); - break; - default: - dprintk(1, "dqbuf: state invalid\n"); - retval = -EINVAL; - goto done; - } - CALL(q, sync, q, buf); - videobuf_status(q, b, buf, q->type); - list_del(&buf->stream); - buf->state = VIDEOBUF_IDLE; - b->flags &= ~V4L2_BUF_FLAG_DONE; -done: - videobuf_queue_unlock(q); - return retval; -} -EXPORT_SYMBOL_GPL(videobuf_dqbuf); - -int videobuf_streamon(struct videobuf_queue *q) -{ - struct videobuf_buffer *buf; - unsigned long flags = 0; - int retval; - - videobuf_queue_lock(q); - retval = -EBUSY; - if (q->reading) - goto done; - retval = 0; - if (q->streaming) - goto done; - q->streaming = 1; - spin_lock_irqsave(q->irqlock, flags); - list_for_each_entry(buf, &q->stream, stream) - if (buf->state == VIDEOBUF_PREPARED) - q->ops->buf_queue(q, buf); - spin_unlock_irqrestore(q->irqlock, flags); - - wake_up_interruptible_sync(&q->wait); -done: - videobuf_queue_unlock(q); - return retval; -} -EXPORT_SYMBOL_GPL(videobuf_streamon); - -/* Locking: Caller holds q->vb_lock */ -static int __videobuf_streamoff(struct videobuf_queue *q) -{ - if (!q->streaming) - return -EINVAL; - - videobuf_queue_cancel(q); - - return 0; -} - -int videobuf_streamoff(struct videobuf_queue *q) -{ - int retval; - - videobuf_queue_lock(q); - retval = __videobuf_streamoff(q); - videobuf_queue_unlock(q); - - return retval; -} -EXPORT_SYMBOL_GPL(videobuf_streamoff); - -/* Locking: Caller holds q->vb_lock */ -static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q, - char __user *data, - size_t count, loff_t *ppos) -{ - enum v4l2_field field; - unsigned long flags = 0; - int retval; - - MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); - - /* setup stuff */ - q->read_buf = videobuf_alloc_vb(q); - if (NULL == q->read_buf) - return -ENOMEM; - - q->read_buf->memory = V4L2_MEMORY_USERPTR; - q->read_buf->baddr = (unsigned long)data; - q->read_buf->bsize = count; - - field = videobuf_next_field(q); - retval = q->ops->buf_prepare(q, q->read_buf, field); - if (0 != retval) - goto done; - - /* start capture & wait */ - spin_lock_irqsave(q->irqlock, flags); - q->ops->buf_queue(q, q->read_buf); - spin_unlock_irqrestore(q->irqlock, flags); - retval = videobuf_waiton(q, q->read_buf, 0, 0); - if (0 == retval) { - CALL(q, sync, q, q->read_buf); - if (VIDEOBUF_ERROR == q->read_buf->state) - retval = -EIO; - else - retval = q->read_buf->size; - } - -done: - /* cleanup */ - q->ops->buf_release(q, q->read_buf); - kfree(q->read_buf); - q->read_buf = NULL; - return retval; -} - -static int __videobuf_copy_to_user(struct videobuf_queue *q, - struct videobuf_buffer *buf, - char __user *data, size_t count, - int nonblocking) -{ - void *vaddr = CALLPTR(q, vaddr, buf); - - /* copy to userspace */ - if (count > buf->size - q->read_off) - count = buf->size - q->read_off; - - if (copy_to_user(data, vaddr + q->read_off, count)) - return -EFAULT; - - return count; -} - -static int __videobuf_copy_stream(struct videobuf_queue *q, - struct videobuf_buffer *buf, - char __user *data, size_t count, size_t pos, - int vbihack, int nonblocking) -{ - unsigned int *fc = CALLPTR(q, vaddr, buf); - - if (vbihack) { - /* dirty, undocumented hack -- pass the frame counter - * within the last four bytes of each vbi data block. - * We need that one to maintain backward compatibility - * to all vbi decoding software out there ... */ - fc += (buf->size >> 2) - 1; - *fc = buf->field_count >> 1; - dprintk(1, "vbihack: %d\n", *fc); - } - - /* copy stuff using the common method */ - count = __videobuf_copy_to_user(q, buf, data, count, nonblocking); - - if ((count == -EFAULT) && (pos == 0)) - return -EFAULT; - - return count; -} - -ssize_t videobuf_read_one(struct videobuf_queue *q, - char __user *data, size_t count, loff_t *ppos, - int nonblocking) -{ - enum v4l2_field field; - unsigned long flags = 0; - unsigned size = 0, nbufs = 1; - int retval; - - MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); - - videobuf_queue_lock(q); - - q->ops->buf_setup(q, &nbufs, &size); - - if (NULL == q->read_buf && - count >= size && - !nonblocking) { - retval = videobuf_read_zerocopy(q, data, count, ppos); - if (retval >= 0 || retval == -EIO) - /* ok, all done */ - goto done; - /* fallback to kernel bounce buffer on failures */ - } - - if (NULL == q->read_buf) { - /* need to capture a new frame */ - retval = -ENOMEM; - q->read_buf = videobuf_alloc_vb(q); - - dprintk(1, "video alloc=0x%p\n", q->read_buf); - if (NULL == q->read_buf) - goto done; - q->read_buf->memory = V4L2_MEMORY_USERPTR; - q->read_buf->bsize = count; /* preferred size */ - field = videobuf_next_field(q); - retval = q->ops->buf_prepare(q, q->read_buf, field); - - if (0 != retval) { - kfree(q->read_buf); - q->read_buf = NULL; - goto done; - } - - spin_lock_irqsave(q->irqlock, flags); - q->ops->buf_queue(q, q->read_buf); - spin_unlock_irqrestore(q->irqlock, flags); - - q->read_off = 0; - } - - /* wait until capture is done */ - retval = videobuf_waiton(q, q->read_buf, nonblocking, 1); - if (0 != retval) - goto done; - - CALL(q, sync, q, q->read_buf); - - if (VIDEOBUF_ERROR == q->read_buf->state) { - /* catch I/O errors */ - q->ops->buf_release(q, q->read_buf); - kfree(q->read_buf); - q->read_buf = NULL; - retval = -EIO; - goto done; - } - - /* Copy to userspace */ - retval = __videobuf_copy_to_user(q, q->read_buf, data, count, nonblocking); - if (retval < 0) - goto done; - - q->read_off += retval; - if (q->read_off == q->read_buf->size) { - /* all data copied, cleanup */ - q->ops->buf_release(q, q->read_buf); - kfree(q->read_buf); - q->read_buf = NULL; - } - -done: - videobuf_queue_unlock(q); - return retval; -} -EXPORT_SYMBOL_GPL(videobuf_read_one); - -/* Locking: Caller holds q->vb_lock */ -static int __videobuf_read_start(struct videobuf_queue *q) -{ - enum v4l2_field field; - unsigned long flags = 0; - unsigned int count = 0, size = 0; - int err, i; - - q->ops->buf_setup(q, &count, &size); - if (count < 2) - count = 2; - if (count > VIDEO_MAX_FRAME) - count = VIDEO_MAX_FRAME; - size = PAGE_ALIGN(size); - - err = __videobuf_mmap_setup(q, count, size, V4L2_MEMORY_USERPTR); - if (err < 0) - return err; - - count = err; - - for (i = 0; i < count; i++) { - field = videobuf_next_field(q); - err = q->ops->buf_prepare(q, q->bufs[i], field); - if (err) - return err; - list_add_tail(&q->bufs[i]->stream, &q->stream); - } - spin_lock_irqsave(q->irqlock, flags); - for (i = 0; i < count; i++) - q->ops->buf_queue(q, q->bufs[i]); - spin_unlock_irqrestore(q->irqlock, flags); - q->reading = 1; - return 0; -} - -static void __videobuf_read_stop(struct videobuf_queue *q) -{ - int i; - - videobuf_queue_cancel(q); - __videobuf_free(q); - INIT_LIST_HEAD(&q->stream); - for (i = 0; i < VIDEO_MAX_FRAME; i++) { - if (NULL == q->bufs[i]) - continue; - kfree(q->bufs[i]); - q->bufs[i] = NULL; - } - q->read_buf = NULL; -} - -int videobuf_read_start(struct videobuf_queue *q) -{ - int rc; - - videobuf_queue_lock(q); - rc = __videobuf_read_start(q); - videobuf_queue_unlock(q); - - return rc; -} -EXPORT_SYMBOL_GPL(videobuf_read_start); - -void videobuf_read_stop(struct videobuf_queue *q) -{ - videobuf_queue_lock(q); - __videobuf_read_stop(q); - videobuf_queue_unlock(q); -} -EXPORT_SYMBOL_GPL(videobuf_read_stop); - -void videobuf_stop(struct videobuf_queue *q) -{ - videobuf_queue_lock(q); - - if (q->streaming) - __videobuf_streamoff(q); - - if (q->reading) - __videobuf_read_stop(q); - - videobuf_queue_unlock(q); -} -EXPORT_SYMBOL_GPL(videobuf_stop); - -ssize_t videobuf_read_stream(struct videobuf_queue *q, - char __user *data, size_t count, loff_t *ppos, - int vbihack, int nonblocking) -{ - int rc, retval; - unsigned long flags = 0; - - MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); - - dprintk(2, "%s\n", __func__); - videobuf_queue_lock(q); - retval = -EBUSY; - if (q->streaming) - goto done; - if (!q->reading) { - retval = __videobuf_read_start(q); - if (retval < 0) - goto done; - } - - retval = 0; - while (count > 0) { - /* get / wait for data */ - if (NULL == q->read_buf) { - q->read_buf = list_entry(q->stream.next, - struct videobuf_buffer, - stream); - list_del(&q->read_buf->stream); - q->read_off = 0; - } - rc = videobuf_waiton(q, q->read_buf, nonblocking, 1); - if (rc < 0) { - if (0 == retval) - retval = rc; - break; - } - - if (q->read_buf->state == VIDEOBUF_DONE) { - rc = __videobuf_copy_stream(q, q->read_buf, data + retval, count, - retval, vbihack, nonblocking); - if (rc < 0) { - retval = rc; - break; - } - retval += rc; - count -= rc; - q->read_off += rc; - } else { - /* some error */ - q->read_off = q->read_buf->size; - if (0 == retval) - retval = -EIO; - } - - /* requeue buffer when done with copying */ - if (q->read_off == q->read_buf->size) { - list_add_tail(&q->read_buf->stream, - &q->stream); - spin_lock_irqsave(q->irqlock, flags); - q->ops->buf_queue(q, q->read_buf); - spin_unlock_irqrestore(q->irqlock, flags); - q->read_buf = NULL; - } - if (retval < 0) - break; - } - -done: - videobuf_queue_unlock(q); - return retval; -} -EXPORT_SYMBOL_GPL(videobuf_read_stream); - -__poll_t videobuf_poll_stream(struct file *file, - struct videobuf_queue *q, - poll_table *wait) -{ - __poll_t req_events = poll_requested_events(wait); - struct videobuf_buffer *buf = NULL; - __poll_t rc = 0; - - videobuf_queue_lock(q); - if (q->streaming) { - if (!list_empty(&q->stream)) - buf = list_entry(q->stream.next, - struct videobuf_buffer, stream); - } else if (req_events & (EPOLLIN | EPOLLRDNORM)) { - if (!q->reading) - __videobuf_read_start(q); - if (!q->reading) { - rc = EPOLLERR; - } else if (NULL == q->read_buf) { - q->read_buf = list_entry(q->stream.next, - struct videobuf_buffer, - stream); - list_del(&q->read_buf->stream); - q->read_off = 0; - } - buf = q->read_buf; - } - if (!buf) - rc = EPOLLERR; - - if (0 == rc) { - poll_wait(file, &buf->done, wait); - if (buf->state == VIDEOBUF_DONE || - buf->state == VIDEOBUF_ERROR) { - switch (q->type) { - case V4L2_BUF_TYPE_VIDEO_OUTPUT: - case V4L2_BUF_TYPE_VBI_OUTPUT: - case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: - case V4L2_BUF_TYPE_SDR_OUTPUT: - rc = EPOLLOUT | EPOLLWRNORM; - break; - default: - rc = EPOLLIN | EPOLLRDNORM; - break; - } - } - } - videobuf_queue_unlock(q); - return rc; -} -EXPORT_SYMBOL_GPL(videobuf_poll_stream); - -int videobuf_mmap_mapper(struct videobuf_queue *q, struct vm_area_struct *vma) -{ - int rc = -EINVAL; - int i; - - MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); - - if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED)) { - dprintk(1, "mmap appl bug: PROT_WRITE and MAP_SHARED are required\n"); - return -EINVAL; - } - - videobuf_queue_lock(q); - for (i = 0; i < VIDEO_MAX_FRAME; i++) { - struct videobuf_buffer *buf = q->bufs[i]; - - if (buf && buf->memory == V4L2_MEMORY_MMAP && - buf->boff == (vma->vm_pgoff << PAGE_SHIFT)) { - rc = CALL(q, mmap_mapper, q, buf, vma); - break; - } - } - videobuf_queue_unlock(q); - - return rc; -} -EXPORT_SYMBOL_GPL(videobuf_mmap_mapper); diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c index 1b182b117f9cf3fbcacfcd9a3e343a3d66e47ab2..38b945eb410f31a85db44e7de3afb05bec9931e7 100644 --- a/drivers/memory/fsl_ifc.c +++ b/drivers/memory/fsl_ifc.c @@ -109,7 +109,6 @@ static int fsl_ifc_ctrl_remove(struct platform_device *dev) iounmap(ctrl->gregs); dev_set_drvdata(&dev->dev, NULL); - kfree(ctrl); return 0; } @@ -221,7 +220,8 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev) dev_info(&dev->dev, "Freescale Integrated Flash Controller\n"); - fsl_ifc_ctrl_dev = kzalloc(sizeof(*fsl_ifc_ctrl_dev), GFP_KERNEL); + fsl_ifc_ctrl_dev = devm_kzalloc(&dev->dev, sizeof(*fsl_ifc_ctrl_dev), + GFP_KERNEL); if (!fsl_ifc_ctrl_dev) return -ENOMEM; @@ -231,8 +231,7 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev) fsl_ifc_ctrl_dev->gregs = of_iomap(dev->dev.of_node, 0); if (!fsl_ifc_ctrl_dev->gregs) { dev_err(&dev->dev, "failed to get memory region\n"); - ret = -ENODEV; - goto err; + return -ENODEV; } if (of_property_read_bool(dev->dev.of_node, "little-endian")) { @@ -308,6 +307,7 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev) free_irq(fsl_ifc_ctrl_dev->irq, fsl_ifc_ctrl_dev); irq_dispose_mapping(fsl_ifc_ctrl_dev->irq); err: + iounmap(fsl_ifc_ctrl_dev->gregs); return ret; } diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c index c215287e80cf3bbff8d452f53bf097fba39148dd..1c6a7c16e0c17d25a34f37c920fa189e51f5ffe3 100644 --- a/drivers/memory/omap-gpmc.c +++ b/drivers/memory/omap-gpmc.c @@ -21,6 +21,7 @@ #include #include #include +#include /* GPIO descriptor enum */ #include #include #include diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c index bd25faf6d13d319db73d399b98f7163332dbd895..c8f16666256cc3c23f5d68cfe3d2370f045e8c39 100644 --- a/drivers/memory/tegra/mc.c +++ b/drivers/memory/tegra/mc.c @@ -280,7 +280,7 @@ static int tegra_mc_setup_latency_allowance(struct tegra_mc *mc) u32 value; /* compute the number of MC clock cycles per tick */ - tick = mc->tick * clk_get_rate(mc->clk); + tick = (unsigned long long)mc->tick * clk_get_rate(mc->clk); do_div(tick, NSEC_PER_SEC); value = readl(mc->regs + MC_EMEM_ARB_CFG); diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c index 76382c858c35435b98e061a7dda49d5dacad9585..b1564cacd19e1b86dc26fabe945b4c892e5f4e40 100644 --- a/drivers/memstick/core/memstick.c +++ b/drivers/memstick/core/memstick.c @@ -18,6 +18,7 @@ #include #include #include +#include #define DRIVER_NAME "memstick" @@ -436,6 +437,7 @@ static void memstick_check(struct work_struct *work) struct memstick_dev *card; dev_dbg(&host->dev, "memstick_check started\n"); + pm_runtime_get_noresume(host->dev.parent); mutex_lock(&host->lock); if (!host->card) { if (memstick_power_on(host)) @@ -479,6 +481,7 @@ static void memstick_check(struct work_struct *work) host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_OFF); mutex_unlock(&host->lock); + pm_runtime_put(host->dev.parent); dev_dbg(&host->dev, "memstick_check finished\n"); } @@ -626,13 +629,18 @@ static int __init memstick_init(void) return -ENOMEM; rc = bus_register(&memstick_bus_type); - if (!rc) - rc = class_register(&memstick_host_class); + if (rc) + goto error_destroy_workqueue; - if (!rc) - return 0; + rc = class_register(&memstick_host_class); + if (rc) + goto error_bus_unregister; + + return 0; +error_bus_unregister: bus_unregister(&memstick_bus_type); +error_destroy_workqueue: destroy_workqueue(workqueue); return rc; diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c index bcdca9fbef51cc802d34b74a4fab19abd308b1bc..29f5021d21ea62dc83344c2c5d3ea9f6630f85e0 100644 --- a/drivers/memstick/host/jmb38x_ms.c +++ b/drivers/memstick/host/jmb38x_ms.c @@ -949,7 +949,7 @@ static int jmb38x_ms_probe(struct pci_dev *pdev, if (!cnt) { rc = -ENODEV; pci_dev_busy = 1; - goto err_out; + goto err_out_int; } jm = kzalloc(sizeof(struct jmb38x_ms) diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c index 627d6e62fe3133319f0da646a0b9f84fc12445d1..61b9c97cf0e7a9ae24f67c714e09ca43c797f7bf 100644 --- a/drivers/memstick/host/r592.c +++ b/drivers/memstick/host/r592.c @@ -827,7 +827,7 @@ static void r592_remove(struct pci_dev *pdev) /* Stop the processing thread. That ensures that we won't take any more requests */ kthread_stop(dev->io_thread); - + del_timer_sync(&dev->detect_timer); r592_enable_device(dev, false); while (!error && dev->req) { diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c index 4f64563df7ded29f387cc0b32d2e147eb27e6848..aee62f0618c70c37a9df6423f2f15a0866fc7ce3 100644 --- a/drivers/memstick/host/rtsx_usb_ms.c +++ b/drivers/memstick/host/rtsx_usb_ms.c @@ -798,9 +798,6 @@ static int rtsx_usb_ms_drv_remove(struct platform_device *pdev) mutex_unlock(&host->host_mutex); wait_for_completion(&host->detect_ms_exit); - memstick_remove_host(msh); - memstick_free_host(msh); - /* Balance possible unbalanced usage count * e.g. unconditional module removal */ @@ -808,10 +805,11 @@ static int rtsx_usb_ms_drv_remove(struct platform_device *pdev) pm_runtime_put(ms_dev(host)); pm_runtime_disable(&pdev->dev); - platform_set_drvdata(pdev, NULL); - + memstick_remove_host(msh); dev_dbg(&(pdev->dev), ": Realtek USB Memstick controller has been removed\n"); + memstick_free_host(msh); + platform_set_drvdata(pdev, NULL); return 0; } diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c index 8d22d6134a89d1abb7d55c59663d8de51ae8b550..a0a42bdc3028b891d1e24bb9038ccb48f25b8829 100644 --- a/drivers/message/fusion/mptctl.c +++ b/drivers/message/fusion/mptctl.c @@ -100,19 +100,19 @@ struct buflist { * Function prototypes. Called from OS entry point mptctl_ioctl. * arg contents specific to function. */ -static int mptctl_fw_download(unsigned long arg); -static int mptctl_getiocinfo(unsigned long arg, unsigned int cmd); -static int mptctl_gettargetinfo(unsigned long arg); -static int mptctl_readtest(unsigned long arg); -static int mptctl_mpt_command(unsigned long arg); -static int mptctl_eventquery(unsigned long arg); -static int mptctl_eventenable(unsigned long arg); -static int mptctl_eventreport(unsigned long arg); -static int mptctl_replace_fw(unsigned long arg); - -static int mptctl_do_reset(unsigned long arg); -static int mptctl_hp_hostinfo(unsigned long arg, unsigned int cmd); -static int mptctl_hp_targetinfo(unsigned long arg); +static int mptctl_fw_download(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_getiocinfo(MPT_ADAPTER *iocp, unsigned long arg, unsigned int cmd); +static int mptctl_gettargetinfo(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_readtest(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_mpt_command(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_eventquery(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_eventenable(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_eventreport(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_replace_fw(MPT_ADAPTER *iocp, unsigned long arg); + +static int mptctl_do_reset(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_hp_hostinfo(MPT_ADAPTER *iocp, unsigned long arg, unsigned int cmd); +static int mptctl_hp_targetinfo(MPT_ADAPTER *iocp, unsigned long arg); static int mptctl_probe(struct pci_dev *, const struct pci_device_id *); static void mptctl_remove(struct pci_dev *); @@ -123,8 +123,8 @@ static long compat_mpctl_ioctl(struct file *f, unsigned cmd, unsigned long arg); /* * Private function calls. */ -static int mptctl_do_mpt_command(struct mpt_ioctl_command karg, void __user *mfPtr); -static int mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen); +static int mptctl_do_mpt_command(MPT_ADAPTER *iocp, struct mpt_ioctl_command karg, void __user *mfPtr); +static int mptctl_do_fw_download(MPT_ADAPTER *iocp, char __user *ufwbuf, size_t fwlen); static MptSge_t *kbuf_alloc_2_sgl(int bytes, u32 dir, int sge_offset, int *frags, struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc); static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, @@ -656,19 +656,19 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) * by TM and FW reloads. */ if ((cmd & ~IOCSIZE_MASK) == (MPTIOCINFO & ~IOCSIZE_MASK)) { - return mptctl_getiocinfo(arg, _IOC_SIZE(cmd)); + return mptctl_getiocinfo(iocp, arg, _IOC_SIZE(cmd)); } else if (cmd == MPTTARGETINFO) { - return mptctl_gettargetinfo(arg); + return mptctl_gettargetinfo(iocp, arg); } else if (cmd == MPTTEST) { - return mptctl_readtest(arg); + return mptctl_readtest(iocp, arg); } else if (cmd == MPTEVENTQUERY) { - return mptctl_eventquery(arg); + return mptctl_eventquery(iocp, arg); } else if (cmd == MPTEVENTENABLE) { - return mptctl_eventenable(arg); + return mptctl_eventenable(iocp, arg); } else if (cmd == MPTEVENTREPORT) { - return mptctl_eventreport(arg); + return mptctl_eventreport(iocp, arg); } else if (cmd == MPTFWREPLACE) { - return mptctl_replace_fw(arg); + return mptctl_replace_fw(iocp, arg); } /* All of these commands require an interrupt or @@ -678,15 +678,15 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return ret; if (cmd == MPTFWDOWNLOAD) - ret = mptctl_fw_download(arg); + ret = mptctl_fw_download(iocp, arg); else if (cmd == MPTCOMMAND) - ret = mptctl_mpt_command(arg); + ret = mptctl_mpt_command(iocp, arg); else if (cmd == MPTHARDRESET) - ret = mptctl_do_reset(arg); + ret = mptctl_do_reset(iocp, arg); else if ((cmd & ~IOCSIZE_MASK) == (HP_GETHOSTINFO & ~IOCSIZE_MASK)) - ret = mptctl_hp_hostinfo(arg, _IOC_SIZE(cmd)); + ret = mptctl_hp_hostinfo(iocp, arg, _IOC_SIZE(cmd)); else if (cmd == HP_GETTARGETINFO) - ret = mptctl_hp_targetinfo(arg); + ret = mptctl_hp_targetinfo(iocp, arg); else ret = -EINVAL; @@ -705,11 +705,10 @@ mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return ret; } -static int mptctl_do_reset(unsigned long arg) +static int mptctl_do_reset(MPT_ADAPTER *iocp, unsigned long arg) { struct mpt_ioctl_diag_reset __user *urinfo = (void __user *) arg; struct mpt_ioctl_diag_reset krinfo; - MPT_ADAPTER *iocp; if (copy_from_user(&krinfo, urinfo, sizeof(struct mpt_ioctl_diag_reset))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_do_reset - " @@ -718,12 +717,6 @@ static int mptctl_do_reset(unsigned long arg) return -EFAULT; } - if (mpt_verify_adapter(krinfo.hdr.iocnum, &iocp) < 0) { - printk(KERN_DEBUG MYNAM "%s@%d::mptctl_do_reset - ioc%d not found!\n", - __FILE__, __LINE__, krinfo.hdr.iocnum); - return -ENODEV; /* (-6) No such device or address */ - } - dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "mptctl_do_reset called.\n", iocp->name)); @@ -754,7 +747,7 @@ static int mptctl_do_reset(unsigned long arg) * -ENOMSG if FW upload returned bad status */ static int -mptctl_fw_download(unsigned long arg) +mptctl_fw_download(MPT_ADAPTER *iocp, unsigned long arg) { struct mpt_fw_xfer __user *ufwdl = (void __user *) arg; struct mpt_fw_xfer kfwdl; @@ -766,7 +759,7 @@ mptctl_fw_download(unsigned long arg) return -EFAULT; } - return mptctl_do_fw_download(kfwdl.iocnum, kfwdl.bufp, kfwdl.fwlen); + return mptctl_do_fw_download(iocp, kfwdl.bufp, kfwdl.fwlen); } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -784,11 +777,10 @@ mptctl_fw_download(unsigned long arg) * -ENOMSG if FW upload returned bad status */ static int -mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) +mptctl_do_fw_download(MPT_ADAPTER *iocp, char __user *ufwbuf, size_t fwlen) { FWDownload_t *dlmsg; MPT_FRAME_HDR *mf; - MPT_ADAPTER *iocp; FWDownloadTCSGE_t *ptsge; MptSge_t *sgl, *sgIn; char *sgOut; @@ -808,17 +800,10 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) pFWDownloadReply_t ReplyMsg = NULL; unsigned long timeleft; - if (mpt_verify_adapter(ioc, &iocp) < 0) { - printk(KERN_DEBUG MYNAM "ioctl_fwdl - ioc%d not found!\n", - ioc); - return -ENODEV; /* (-6) No such device or address */ - } else { - - /* Valid device. Get a message frame and construct the FW download message. - */ - if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL) - return -EAGAIN; - } + /* Valid device. Get a message frame and construct the FW download message. + */ + if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL) + return -EAGAIN; dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "mptctl_do_fwdl called. mptctl_id = %xh.\n", iocp->name, mptctl_id)); @@ -826,8 +811,6 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) iocp->name, ufwbuf)); dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.fwlen = %d\n", iocp->name, (int)fwlen)); - dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.ioc = %04xh\n", - iocp->name, ioc)); dlmsg = (FWDownload_t*) mf; ptsge = (FWDownloadTCSGE_t *) &dlmsg->SGL; @@ -1238,13 +1221,11 @@ kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, struct buflist *buflist, MPT_ADAPTE * -ENODEV if no such device/adapter */ static int -mptctl_getiocinfo (unsigned long arg, unsigned int data_size) +mptctl_getiocinfo (MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size) { struct mpt_ioctl_iocinfo __user *uarg = (void __user *) arg; struct mpt_ioctl_iocinfo *karg; - MPT_ADAPTER *ioc; struct pci_dev *pdev; - int iocnum; unsigned int port; int cim_rev; struct scsi_device *sdev; @@ -1272,14 +1253,6 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size) return PTR_ERR(karg); } - if (((iocnum = mpt_verify_adapter(karg->hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_getiocinfo() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - kfree(karg); - return -ENODEV; - } - /* Verify the data transfer size is correct. */ if (karg->hdr.maxDataSize != data_size) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_getiocinfo - " @@ -1385,15 +1358,13 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size) * -ENODEV if no such device/adapter */ static int -mptctl_gettargetinfo (unsigned long arg) +mptctl_gettargetinfo (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_targetinfo __user *uarg = (void __user *) arg; struct mpt_ioctl_targetinfo karg; - MPT_ADAPTER *ioc; VirtDevice *vdevice; char *pmem; int *pdata; - int iocnum; int numDevices = 0; int lun; int maxWordsLeft; @@ -1408,13 +1379,6 @@ mptctl_gettargetinfo (unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_gettargetinfo() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_gettargetinfo called.\n", ioc->name)); /* Get the port number and set the maximum number of bytes @@ -1510,12 +1474,10 @@ mptctl_gettargetinfo (unsigned long arg) * -ENODEV if no such device/adapter */ static int -mptctl_readtest (unsigned long arg) +mptctl_readtest (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_test __user *uarg = (void __user *) arg; struct mpt_ioctl_test karg; - MPT_ADAPTER *ioc; - int iocnum; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_test))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_readtest - " @@ -1524,13 +1486,6 @@ mptctl_readtest (unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_readtest() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_readtest called.\n", ioc->name)); /* Fill in the data and return the structure to the calling @@ -1571,12 +1526,10 @@ mptctl_readtest (unsigned long arg) * -ENODEV if no such device/adapter */ static int -mptctl_eventquery (unsigned long arg) +mptctl_eventquery (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_eventquery __user *uarg = (void __user *) arg; struct mpt_ioctl_eventquery karg; - MPT_ADAPTER *ioc; - int iocnum; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventquery))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_eventquery - " @@ -1585,13 +1538,6 @@ mptctl_eventquery (unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_eventquery() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventquery called.\n", ioc->name)); karg.eventEntries = MPTCTL_EVENT_LOG_SIZE; @@ -1610,12 +1556,10 @@ mptctl_eventquery (unsigned long arg) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static int -mptctl_eventenable (unsigned long arg) +mptctl_eventenable (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_eventenable __user *uarg = (void __user *) arg; struct mpt_ioctl_eventenable karg; - MPT_ADAPTER *ioc; - int iocnum; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventenable))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_eventenable - " @@ -1624,13 +1568,6 @@ mptctl_eventenable (unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_eventenable() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventenable called.\n", ioc->name)); if (ioc->events == NULL) { @@ -1658,12 +1595,10 @@ mptctl_eventenable (unsigned long arg) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static int -mptctl_eventreport (unsigned long arg) +mptctl_eventreport (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_eventreport __user *uarg = (void __user *) arg; struct mpt_ioctl_eventreport karg; - MPT_ADAPTER *ioc; - int iocnum; int numBytes, maxEvents, max; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventreport))) { @@ -1673,12 +1608,6 @@ mptctl_eventreport (unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_eventreport() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventreport called.\n", ioc->name)); @@ -1712,12 +1641,10 @@ mptctl_eventreport (unsigned long arg) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static int -mptctl_replace_fw (unsigned long arg) +mptctl_replace_fw (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_replace_fw __user *uarg = (void __user *) arg; struct mpt_ioctl_replace_fw karg; - MPT_ADAPTER *ioc; - int iocnum; int newFwSize; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_replace_fw))) { @@ -1727,13 +1654,6 @@ mptctl_replace_fw (unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_replace_fw() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_replace_fw called.\n", ioc->name)); /* If caching FW, Free the old FW image @@ -1780,12 +1700,10 @@ mptctl_replace_fw (unsigned long arg) * -ENOMEM if memory allocation error */ static int -mptctl_mpt_command (unsigned long arg) +mptctl_mpt_command (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_command __user *uarg = (void __user *) arg; struct mpt_ioctl_command karg; - MPT_ADAPTER *ioc; - int iocnum; int rc; @@ -1796,14 +1714,7 @@ mptctl_mpt_command (unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_mpt_command() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } - - rc = mptctl_do_mpt_command (karg, &uarg->MF); + rc = mptctl_do_mpt_command (ioc, karg, &uarg->MF); return rc; } @@ -1821,9 +1732,8 @@ mptctl_mpt_command (unsigned long arg) * -EPERM if SCSI I/O and target is untagged */ static int -mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) +mptctl_do_mpt_command (MPT_ADAPTER *ioc, struct mpt_ioctl_command karg, void __user *mfPtr) { - MPT_ADAPTER *ioc; MPT_FRAME_HDR *mf = NULL; MPIHeader_t *hdr; char *psge; @@ -1832,7 +1742,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) dma_addr_t dma_addr_in; dma_addr_t dma_addr_out; int sgSize = 0; /* Num SG elements */ - int iocnum, flagsLength; + int flagsLength; int sz, rc = 0; int msgContext; u16 req_idx; @@ -1847,13 +1757,6 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) bufIn.kptr = bufOut.kptr = NULL; bufIn.len = bufOut.len = 0; - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_do_mpt_command() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } - spin_lock_irqsave(&ioc->taskmgmt_lock, flags); if (ioc->ioc_reset_in_progress) { spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); @@ -2418,17 +2321,15 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) * -ENOMEM if memory allocation error */ static int -mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size) +mptctl_hp_hostinfo(MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size) { hp_host_info_t __user *uarg = (void __user *) arg; - MPT_ADAPTER *ioc; struct pci_dev *pdev; char *pbuf=NULL; dma_addr_t buf_dma; hp_host_info_t karg; CONFIGPARMS cfg; ConfigPageHeader_t hdr; - int iocnum; int rc, cim_rev; ToolboxIstwiReadWriteRequest_t *IstwiRWRequest; MPT_FRAME_HDR *mf = NULL; @@ -2452,12 +2353,6 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_hp_hostinfo() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": mptctl_hp_hostinfo called.\n", ioc->name)); @@ -2659,15 +2554,13 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size) * -ENOMEM if memory allocation error */ static int -mptctl_hp_targetinfo(unsigned long arg) +mptctl_hp_targetinfo(MPT_ADAPTER *ioc, unsigned long arg) { hp_target_info_t __user *uarg = (void __user *) arg; SCSIDevicePage0_t *pg0_alloc; SCSIDevicePage3_t *pg3_alloc; - MPT_ADAPTER *ioc; MPT_SCSI_HOST *hd = NULL; hp_target_info_t karg; - int iocnum; int data_sz; dma_addr_t page_dma; CONFIGPARMS cfg; @@ -2681,12 +2574,6 @@ mptctl_hp_targetinfo(unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_hp_targetinfo() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } if (karg.hdr.id >= MPT_MAX_FC_DEVICES) return -EINVAL; dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n", @@ -2854,7 +2741,7 @@ compat_mptfwxfer_ioctl(struct file *filp, unsigned int cmd, kfw.fwlen = kfw32.fwlen; kfw.bufp = compat_ptr(kfw32.bufp); - ret = mptctl_do_fw_download(kfw.iocnum, kfw.bufp, kfw.fwlen); + ret = mptctl_do_fw_download(iocp, kfw.bufp, kfw.fwlen); mutex_unlock(&iocp->ioctl_cmds.mutex); @@ -2908,7 +2795,7 @@ compat_mpt_command(struct file *filp, unsigned int cmd, /* Pass new structure to do_mpt_command */ - ret = mptctl_do_mpt_command (karg, &uarg->MF); + ret = mptctl_do_mpt_command (iocp, karg, &uarg->MF); mutex_unlock(&iocp->ioctl_cmds.mutex); diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 11841f4b7b2ba4c9f1fe845fdf1c8daaf4e539ae..dd938a5d04094e5e2476e33972fa8cafc8078309 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -509,10 +509,10 @@ config INTEL_SOC_PMIC bool "Support for Crystal Cove PMIC" depends on ACPI && HAS_IOMEM && I2C=y && GPIOLIB && COMMON_CLK depends on X86 || COMPILE_TEST + depends on I2C_DESIGNWARE_PLATFORM=y select MFD_CORE select REGMAP_I2C select REGMAP_IRQ - select I2C_DESIGNWARE_PLATFORM help Select this option to enable support for Crystal Cove PMIC on some Intel SoC systems. The PMIC provides ADC, GPIO, @@ -538,10 +538,10 @@ config INTEL_SOC_PMIC_CHTWC bool "Support for Intel Cherry Trail Whiskey Cove PMIC" depends on ACPI && HAS_IOMEM && I2C=y && COMMON_CLK depends on X86 || COMPILE_TEST + depends on I2C_DESIGNWARE_PLATFORM=y select MFD_CORE select REGMAP_I2C select REGMAP_IRQ - select I2C_DESIGNWARE_PLATFORM help Select this option to enable support for the Intel Cherry Trail Whiskey Cove PMIC found on some Intel Cherry Trail systems. @@ -1403,9 +1403,9 @@ config MFD_TPS65217 config MFD_TPS68470 bool "TI TPS68470 Power Management / LED chips" depends on ACPI && I2C=y + depends on I2C_DESIGNWARE_PLATFORM=y select MFD_CORE select REGMAP_I2C - select I2C_DESIGNWARE_PLATFORM help If you say yes here you get support for the TPS68470 series of Power Management / LED chips. diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c index 30d09d17717179005ecd37202d20ce2f2ea93790..11ab17f64c6491953341c8055d7d23a2a8689c3a 100644 --- a/drivers/mfd/ab8500-core.c +++ b/drivers/mfd/ab8500-core.c @@ -261,7 +261,7 @@ static int get_register_interruptible(struct ab8500 *ab8500, u8 bank, mutex_unlock(&ab8500->lock); dev_vdbg(ab8500->dev, "rd: addr %#x => data %#x\n", addr, ret); - return ret; + return (ret < 0) ? ret : 0; } static int ab8500_get_register(struct device *dev, u8 bank, diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c index 5f1e37d23943a3986bcb416b42f4170fa8a478c5..a4403a57ddc89631ba934058b2c95688a7e47aff 100644 --- a/drivers/mfd/arizona-core.c +++ b/drivers/mfd/arizona-core.c @@ -52,8 +52,10 @@ int arizona_clk32k_enable(struct arizona *arizona) if (ret != 0) goto err_ref; ret = clk_prepare_enable(arizona->mclk[ARIZONA_MCLK1]); - if (ret != 0) - goto err_pm; + if (ret != 0) { + pm_runtime_put_sync(arizona->dev); + goto err_ref; + } break; case ARIZONA_32KZ_MCLK2: ret = clk_prepare_enable(arizona->mclk[ARIZONA_MCLK2]); @@ -67,8 +69,6 @@ int arizona_clk32k_enable(struct arizona *arizona) ARIZONA_CLK_32K_ENA); } -err_pm: - pm_runtime_put_sync(arizona->dev); err_ref: if (ret != 0) arizona->clk32k_ref--; @@ -996,7 +996,7 @@ int arizona_dev_init(struct arizona *arizona) unsigned int reg, val; int (*apply_patch)(struct arizona *) = NULL; const struct mfd_cell *subdevs = NULL; - int n_subdevs, ret, i; + int n_subdevs = 0, ret, i; dev_set_drvdata(arizona->dev, arizona); mutex_init(&arizona->clk_lock); diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c index 0be511dd93d012b82ff33afae6c779982041895b..f8e0fa97bb31e9ad097ea66039aa6d7cd37bd5a6 100644 --- a/drivers/mfd/axp20x.c +++ b/drivers/mfd/axp20x.c @@ -640,9 +640,9 @@ static const struct mfd_cell axp221_cells[] = { static const struct mfd_cell axp223_cells[] = { { - .name = "axp221-pek", - .num_resources = ARRAY_SIZE(axp22x_pek_resources), - .resources = axp22x_pek_resources, + .name = "axp221-pek", + .num_resources = ARRAY_SIZE(axp22x_pek_resources), + .resources = axp22x_pek_resources, }, { .name = "axp22x-adc", .of_compatible = "x-powers,axp221-adc", @@ -650,7 +650,7 @@ static const struct mfd_cell axp223_cells[] = { .name = "axp20x-battery-power-supply", .of_compatible = "x-powers,axp221-battery-power-supply", }, { - .name = "axp20x-regulator", + .name = "axp20x-regulator", }, { .name = "axp20x-ac-power-supply", .of_compatible = "x-powers,axp221-ac-power-supply", @@ -666,9 +666,9 @@ static const struct mfd_cell axp223_cells[] = { static const struct mfd_cell axp152_cells[] = { { - .name = "axp20x-pek", - .num_resources = ARRAY_SIZE(axp152_pek_resources), - .resources = axp152_pek_resources, + .name = "axp20x-pek", + .num_resources = ARRAY_SIZE(axp152_pek_resources), + .resources = axp152_pek_resources, }, }; @@ -697,87 +697,101 @@ static const struct resource axp288_charger_resources[] = { static const struct mfd_cell axp288_cells[] = { { - .name = "axp288_adc", - .num_resources = ARRAY_SIZE(axp288_adc_resources), - .resources = axp288_adc_resources, - }, - { - .name = "axp288_extcon", - .num_resources = ARRAY_SIZE(axp288_extcon_resources), - .resources = axp288_extcon_resources, - }, - { - .name = "axp288_charger", - .num_resources = ARRAY_SIZE(axp288_charger_resources), - .resources = axp288_charger_resources, - }, - { - .name = "axp288_fuel_gauge", - .num_resources = ARRAY_SIZE(axp288_fuel_gauge_resources), - .resources = axp288_fuel_gauge_resources, - }, - { - .name = "axp221-pek", - .num_resources = ARRAY_SIZE(axp288_power_button_resources), - .resources = axp288_power_button_resources, - }, - { - .name = "axp288_pmic_acpi", + .name = "axp288_adc", + .num_resources = ARRAY_SIZE(axp288_adc_resources), + .resources = axp288_adc_resources, + }, { + .name = "axp288_extcon", + .num_resources = ARRAY_SIZE(axp288_extcon_resources), + .resources = axp288_extcon_resources, + }, { + .name = "axp288_charger", + .num_resources = ARRAY_SIZE(axp288_charger_resources), + .resources = axp288_charger_resources, + }, { + .name = "axp288_fuel_gauge", + .num_resources = ARRAY_SIZE(axp288_fuel_gauge_resources), + .resources = axp288_fuel_gauge_resources, + }, { + .name = "axp221-pek", + .num_resources = ARRAY_SIZE(axp288_power_button_resources), + .resources = axp288_power_button_resources, + }, { + .name = "axp288_pmic_acpi", }, }; static const struct mfd_cell axp803_cells[] = { { - .name = "axp221-pek", - .num_resources = ARRAY_SIZE(axp803_pek_resources), - .resources = axp803_pek_resources, + .name = "axp221-pek", + .num_resources = ARRAY_SIZE(axp803_pek_resources), + .resources = axp803_pek_resources, + }, { + .name = "axp20x-gpio", + .of_compatible = "x-powers,axp813-gpio", + }, { + .name = "axp813-adc", + .of_compatible = "x-powers,axp813-adc", + }, { + .name = "axp20x-battery-power-supply", + .of_compatible = "x-powers,axp813-battery-power-supply", + }, { + .name = "axp20x-ac-power-supply", + .of_compatible = "x-powers,axp813-ac-power-supply", + .num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources), + .resources = axp20x_ac_power_supply_resources, }, - { .name = "axp20x-regulator" }, + { .name = "axp20x-regulator" }, }; static const struct mfd_cell axp806_self_working_cells[] = { { - .name = "axp221-pek", - .num_resources = ARRAY_SIZE(axp806_pek_resources), - .resources = axp806_pek_resources, + .name = "axp221-pek", + .num_resources = ARRAY_SIZE(axp806_pek_resources), + .resources = axp806_pek_resources, }, - { .name = "axp20x-regulator" }, + { .name = "axp20x-regulator" }, }; static const struct mfd_cell axp806_cells[] = { { - .id = 2, - .name = "axp20x-regulator", + .id = 2, + .name = "axp20x-regulator", }, }; static const struct mfd_cell axp809_cells[] = { { - .name = "axp221-pek", - .num_resources = ARRAY_SIZE(axp809_pek_resources), - .resources = axp809_pek_resources, + .name = "axp221-pek", + .num_resources = ARRAY_SIZE(axp809_pek_resources), + .resources = axp809_pek_resources, }, { - .id = 1, - .name = "axp20x-regulator", + .id = 1, + .name = "axp20x-regulator", }, }; static const struct mfd_cell axp813_cells[] = { { - .name = "axp221-pek", - .num_resources = ARRAY_SIZE(axp803_pek_resources), - .resources = axp803_pek_resources, + .name = "axp221-pek", + .num_resources = ARRAY_SIZE(axp803_pek_resources), + .resources = axp803_pek_resources, }, { - .name = "axp20x-regulator", + .name = "axp20x-regulator", }, { - .name = "axp20x-gpio", - .of_compatible = "x-powers,axp813-gpio", + .name = "axp20x-gpio", + .of_compatible = "x-powers,axp813-gpio", }, { - .name = "axp813-adc", - .of_compatible = "x-powers,axp813-adc", + .name = "axp813-adc", + .of_compatible = "x-powers,axp813-adc", }, { .name = "axp20x-battery-power-supply", .of_compatible = "x-powers,axp813-battery-power-supply", + }, { + .name = "axp20x-ac-power-supply", + .of_compatible = "x-powers,axp813-ac-power-supply", + .num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources), + .resources = axp20x_ac_power_supply_resources, }, }; diff --git a/drivers/mfd/bd9571mwv.c b/drivers/mfd/bd9571mwv.c index 503979c81dae11bbd3203f5482e398f0455a2597..fab3cdc27ed64539b7089da302f41b037dad3b1e 100644 --- a/drivers/mfd/bd9571mwv.c +++ b/drivers/mfd/bd9571mwv.c @@ -59,6 +59,7 @@ static const struct regmap_access_table bd9571mwv_writable_table = { }; static const struct regmap_range bd9571mwv_volatile_yes_ranges[] = { + regmap_reg_range(BD9571MWV_DVFS_MONIVDAC, BD9571MWV_DVFS_MONIVDAC), regmap_reg_range(BD9571MWV_GPIO_IN, BD9571MWV_GPIO_IN), regmap_reg_range(BD9571MWV_GPIO_INT, BD9571MWV_GPIO_INT), regmap_reg_range(BD9571MWV_INT_INTREQ, BD9571MWV_INT_INTREQ), diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c index 999dac752bccfd7d6da0c1af1a5a834bd440c2e6..bccde3eac92ca6434815c373369e87087f31a85a 100644 --- a/drivers/mfd/cros_ec_dev.c +++ b/drivers/mfd/cros_ec_dev.c @@ -263,6 +263,11 @@ static const struct file_operations fops = { #endif }; +static void cros_ec_class_release(struct device *dev) +{ + kfree(to_cros_ec_dev(dev)); +} + static void cros_ec_sensors_register(struct cros_ec_dev *ec) { /* @@ -395,7 +400,7 @@ static int ec_device_probe(struct platform_device *pdev) int retval = -ENOMEM; struct device *dev = &pdev->dev; struct cros_ec_platform *ec_platform = dev_get_platdata(dev); - struct cros_ec_dev *ec = devm_kzalloc(dev, sizeof(*ec), GFP_KERNEL); + struct cros_ec_dev *ec = kzalloc(sizeof(*ec), GFP_KERNEL); if (!ec) return retval; @@ -417,6 +422,7 @@ static int ec_device_probe(struct platform_device *pdev) ec->class_dev.devt = MKDEV(ec_major, pdev->id); ec->class_dev.class = &cros_class; ec->class_dev.parent = dev; + ec->class_dev.release = cros_ec_class_release; retval = dev_set_name(&ec->class_dev, "%s", ec_platform->ec_name); if (retval) { @@ -493,6 +499,7 @@ static int ec_device_remove(struct platform_device *pdev) cros_ec_debugfs_remove(ec); + mfd_remove_devices(ec->dev); cdev_del(&ec->cdev); device_unregister(&ec->class_dev); return 0; diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c index 5970b8def5487ea3b7f40296d7da61824905df0e..aec20e1c7d3d517072739daa11d192bea8fdf071 100644 --- a/drivers/mfd/db8500-prcmu.c +++ b/drivers/mfd/db8500-prcmu.c @@ -2584,7 +2584,7 @@ static struct irq_chip prcmu_irq_chip = { .irq_unmask = prcmu_irq_unmask, }; -static __init char *fw_project_name(u32 project) +static char *fw_project_name(u32 project) { switch (project) { case PRCMU_FW_PROJECT_U8500: @@ -2732,7 +2732,7 @@ void __init db8500_prcmu_early_init(u32 phy_base, u32 size) INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work); } -static void __init init_prcm_registers(void) +static void init_prcm_registers(void) { u32 val; diff --git a/drivers/mfd/hi655x-pmic.c b/drivers/mfd/hi655x-pmic.c index 96c07fa1802adcce7d48f971410096b4b2a4f479..6693f74aa6ab9a41307b1838ce21011e0fc0fc68 100644 --- a/drivers/mfd/hi655x-pmic.c +++ b/drivers/mfd/hi655x-pmic.c @@ -112,6 +112,8 @@ static int hi655x_pmic_probe(struct platform_device *pdev) pmic->regmap = devm_regmap_init_mmio_clk(dev, NULL, base, &hi655x_regmap_config); + if (IS_ERR(pmic->regmap)) + return PTR_ERR(pmic->regmap); regmap_read(pmic->regmap, HI655X_BUS_ADDR(HI655X_VER_REG), &pmic->ver); if ((pmic->ver < PMU_VER_START) || (pmic->ver > PMU_VER_END)) { diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c index 0e5282fc1467568aa3663be8872b1216d26a77da..c37c8bb860685d5f6e75d70c126018b32d77da8a 100644 --- a/drivers/mfd/intel-lpss-pci.c +++ b/drivers/mfd/intel-lpss-pci.c @@ -39,6 +39,8 @@ static int intel_lpss_pci_probe(struct pci_dev *pdev, info->mem = &pdev->resource[0]; info->irq = pdev->irq; + pdev->d3cold_delay = 0; + /* Probably it is enough to set this for iDMA capable devices only */ pci_set_master(pdev); pci_try_set_mwi(pdev); diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c index 50bffc3382d77fb008eaa994591cb338fa0b5d01..ff3fba16e7359c1fe17c95bf3b0d1994c90049d8 100644 --- a/drivers/mfd/intel-lpss.c +++ b/drivers/mfd/intel-lpss.c @@ -273,6 +273,9 @@ static void intel_lpss_init_dev(const struct intel_lpss *lpss) { u32 value = LPSS_PRIV_SSP_REG_DIS_DMA_FIN; + /* Set the device in reset state */ + writel(0, lpss->priv + LPSS_PRIV_RESETS); + intel_lpss_deassert_reset(lpss); intel_lpss_set_remap_addr(lpss); diff --git a/drivers/mfd/intel_soc_pmic_bxtwc.c b/drivers/mfd/intel_soc_pmic_bxtwc.c index 15bc052704a6d8ace919278db045c4ba189df156..9ca1f8c015de9466d955a446f7c1cf39e8998bcc 100644 --- a/drivers/mfd/intel_soc_pmic_bxtwc.c +++ b/drivers/mfd/intel_soc_pmic_bxtwc.c @@ -31,8 +31,8 @@ /* Interrupt Status Registers */ #define BXTWC_IRQLVL1 0x4E02 -#define BXTWC_PWRBTNIRQ 0x4E03 +#define BXTWC_PWRBTNIRQ 0x4E03 #define BXTWC_THRM0IRQ 0x4E04 #define BXTWC_THRM1IRQ 0x4E05 #define BXTWC_THRM2IRQ 0x4E06 @@ -47,10 +47,9 @@ /* Interrupt MASK Registers */ #define BXTWC_MIRQLVL1 0x4E0E -#define BXTWC_MPWRTNIRQ 0x4E0F - #define BXTWC_MIRQLVL1_MCHGR BIT(5) +#define BXTWC_MPWRBTNIRQ 0x4E0F #define BXTWC_MTHRM0IRQ 0x4E12 #define BXTWC_MTHRM1IRQ 0x4E13 #define BXTWC_MTHRM2IRQ 0x4E14 @@ -66,9 +65,7 @@ /* Whiskey Cove PMIC share same ACPI ID between different platforms */ #define BROXTON_PMIC_WC_HRV 4 -/* Manage in two IRQ chips since mask registers are not consecutive */ enum bxtwc_irqs { - /* Level 1 */ BXTWC_PWRBTN_LVL1_IRQ = 0, BXTWC_TMU_LVL1_IRQ, BXTWC_THRM_LVL1_IRQ, @@ -77,9 +74,11 @@ enum bxtwc_irqs { BXTWC_CHGR_LVL1_IRQ, BXTWC_GPIO_LVL1_IRQ, BXTWC_CRIT_LVL1_IRQ, +}; - /* Level 2 */ - BXTWC_PWRBTN_IRQ, +enum bxtwc_irqs_pwrbtn { + BXTWC_PWRBTN_IRQ = 0, + BXTWC_UIBTN_IRQ, }; enum bxtwc_irqs_bcu { @@ -113,7 +112,10 @@ static const struct regmap_irq bxtwc_regmap_irqs[] = { REGMAP_IRQ_REG(BXTWC_CHGR_LVL1_IRQ, 0, BIT(5)), REGMAP_IRQ_REG(BXTWC_GPIO_LVL1_IRQ, 0, BIT(6)), REGMAP_IRQ_REG(BXTWC_CRIT_LVL1_IRQ, 0, BIT(7)), - REGMAP_IRQ_REG(BXTWC_PWRBTN_IRQ, 1, 0x03), +}; + +static const struct regmap_irq bxtwc_regmap_irqs_pwrbtn[] = { + REGMAP_IRQ_REG(BXTWC_PWRBTN_IRQ, 0, 0x01), }; static const struct regmap_irq bxtwc_regmap_irqs_bcu[] = { @@ -125,7 +127,7 @@ static const struct regmap_irq bxtwc_regmap_irqs_adc[] = { }; static const struct regmap_irq bxtwc_regmap_irqs_chgr[] = { - REGMAP_IRQ_REG(BXTWC_USBC_IRQ, 0, BIT(5)), + REGMAP_IRQ_REG(BXTWC_USBC_IRQ, 0, 0x20), REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 0, 0x1f), REGMAP_IRQ_REG(BXTWC_CHGR1_IRQ, 1, 0x1f), }; @@ -144,7 +146,16 @@ static struct regmap_irq_chip bxtwc_regmap_irq_chip = { .mask_base = BXTWC_MIRQLVL1, .irqs = bxtwc_regmap_irqs, .num_irqs = ARRAY_SIZE(bxtwc_regmap_irqs), - .num_regs = 2, + .num_regs = 1, +}; + +static struct regmap_irq_chip bxtwc_regmap_irq_chip_pwrbtn = { + .name = "bxtwc_irq_chip_pwrbtn", + .status_base = BXTWC_PWRBTNIRQ, + .mask_base = BXTWC_MPWRBTNIRQ, + .irqs = bxtwc_regmap_irqs_pwrbtn, + .num_irqs = ARRAY_SIZE(bxtwc_regmap_irqs_pwrbtn), + .num_regs = 1, }; static struct regmap_irq_chip bxtwc_regmap_irq_chip_tmu = { @@ -472,6 +483,16 @@ static int bxtwc_probe(struct platform_device *pdev) return ret; } + ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data, + BXTWC_PWRBTN_LVL1_IRQ, + IRQF_ONESHOT, + &bxtwc_regmap_irq_chip_pwrbtn, + &pmic->irq_chip_data_pwrbtn); + if (ret) { + dev_err(&pdev->dev, "Failed to add PWRBTN IRQ chip\n"); + return ret; + } + ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data, BXTWC_TMU_LVL1_IRQ, IRQF_ONESHOT, diff --git a/drivers/mfd/madera-core.c b/drivers/mfd/madera-core.c index 8cfea969b060277dd53633a114b534e3eefdb2e0..45c7d8b9734938625c58890191ab28c806621e4f 100644 --- a/drivers/mfd/madera-core.c +++ b/drivers/mfd/madera-core.c @@ -278,6 +278,7 @@ const struct of_device_id madera_of_match[] = { { .compatible = "cirrus,wm1840", .data = (void *)WM1840 }, {} }; +MODULE_DEVICE_TABLE(of, madera_of_match); EXPORT_SYMBOL_GPL(madera_of_match); static int madera_get_reset_gpio(struct madera *madera) diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c index 3f554c44752183a917f6229a4bc26ec81a042e00..d1495d76bf2c3d5983903d91d458f217e45f1b95 100644 --- a/drivers/mfd/max8997.c +++ b/drivers/mfd/max8997.c @@ -153,12 +153,6 @@ static struct max8997_platform_data *max8997_i2c_parse_dt_pdata( pd->ono = irq_of_parse_and_map(dev->of_node, 1); - /* - * ToDo: the 'wakeup' member in the platform data is more of a linux - * specfic information. Hence, there is no binding for that yet and - * not parsed here. - */ - return pd; } @@ -246,7 +240,7 @@ static int max8997_i2c_probe(struct i2c_client *i2c, */ /* MAX8997 has a power button input. */ - device_init_wakeup(max8997->dev, pdata->wakeup); + device_init_wakeup(max8997->dev, true); return ret; diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c index c63e331738c175b8b6520f8f1e7d0e9779162f3f..d0bf50e3568d7e770835092430e33175a4475601 100644 --- a/drivers/mfd/mc13xxx-core.c +++ b/drivers/mfd/mc13xxx-core.c @@ -274,9 +274,12 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode, mc13xxx->adcflags |= MC13XXX_ADC_WORKING; - mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0); + ret = mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0); + if (ret) + goto out; - adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2; + adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2 | + MC13XXX_ADC0_CHRGRAWDIV; adc1 = MC13XXX_ADC1_ADEN | MC13XXX_ADC1_ADTRIGIGN | MC13XXX_ADC1_ASC; /* diff --git a/drivers/mfd/menelaus.c b/drivers/mfd/menelaus.c index 29b7164a823bd7d1f81542ae8ec8518ebfd4aad9..d28ebe7ecd211d87c209f6891468cfc55236d046 100644 --- a/drivers/mfd/menelaus.c +++ b/drivers/mfd/menelaus.c @@ -1094,6 +1094,7 @@ static void menelaus_rtc_alarm_work(struct menelaus_chip *m) static inline void menelaus_rtc_init(struct menelaus_chip *m) { int alarm = (m->client->irq > 0); + int err; /* assume 32KDETEN pin is pulled high */ if (!(menelaus_read_reg(MENELAUS_OSC_CTRL) & 0x80)) { @@ -1101,6 +1102,12 @@ static inline void menelaus_rtc_init(struct menelaus_chip *m) return; } + m->rtc = devm_rtc_allocate_device(&m->client->dev); + if (IS_ERR(m->rtc)) + return; + + m->rtc->ops = &menelaus_rtc_ops; + /* support RTC alarm; it can issue wakeups */ if (alarm) { if (menelaus_add_irq_work(MENELAUS_RTCALM_IRQ, @@ -1125,10 +1132,8 @@ static inline void menelaus_rtc_init(struct menelaus_chip *m) menelaus_write_reg(MENELAUS_RTC_CTRL, m->rtc_control); } - m->rtc = rtc_device_register(DRIVER_NAME, - &m->client->dev, - &menelaus_rtc_ops, THIS_MODULE); - if (IS_ERR(m->rtc)) { + err = rtc_register_device(m->rtc); + if (err) { if (alarm) { menelaus_remove_irq_work(MENELAUS_RTCALM_IRQ); device_init_wakeup(&m->client->dev, 0); diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c index 94e3f32ce935717e97f2e938b433f3673c38f22d..182973df1aed4b7c5f0ce6766f5fb94a2e675ab1 100644 --- a/drivers/mfd/mfd-core.c +++ b/drivers/mfd/mfd-core.c @@ -179,6 +179,7 @@ static int mfd_add_device(struct device *parent, int id, for_each_child_of_node(parent->of_node, np) { if (of_device_is_compatible(np, cell->of_compatible)) { pdev->dev.of_node = np; + pdev->dev.fwnode = &np->fwnode; break; } } diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c index 77b64bd64df36aab2dd805a5dd2e056003c3c6e7..ab24e176ef4487a0f2101dec3496aaa440c1b8c1 100644 --- a/drivers/mfd/mt6397-core.c +++ b/drivers/mfd/mt6397-core.c @@ -329,8 +329,7 @@ static int mt6397_probe(struct platform_device *pdev) default: dev_err(&pdev->dev, "unsupported chip: %d\n", id); - ret = -ENODEV; - break; + return -ENODEV; } if (ret) { diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c index 446713dbee27aa2852079bf3370562a8a92fcc51..93177d85e7a95aad12ac0df2cf7bc0c5020458ff 100644 --- a/drivers/mfd/omap-usb-tll.c +++ b/drivers/mfd/omap-usb-tll.c @@ -459,7 +459,6 @@ EXPORT_SYMBOL_GPL(omap_tll_disable); MODULE_AUTHOR("Keshava Munegowda "); MODULE_AUTHOR("Roger Quadros "); -MODULE_ALIAS("platform:" USBHS_DRIVER_NAME); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("usb tll driver for TI OMAP EHCI and OHCI controllers"); diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c index 52fafea06067e17cfd7ea14c0fc033764fc561c9..8d420c37b2a613c87f45970c8abe06390c234c04 100644 --- a/drivers/mfd/qcom_rpm.c +++ b/drivers/mfd/qcom_rpm.c @@ -638,6 +638,10 @@ static int qcom_rpm_probe(struct platform_device *pdev) return -EFAULT; } + writel(fw_version[0], RPM_CTRL_REG(rpm, 0)); + writel(fw_version[1], RPM_CTRL_REG(rpm, 1)); + writel(fw_version[2], RPM_CTRL_REG(rpm, 2)); + dev_info(&pdev->dev, "RPM firmware %u.%u.%u\n", fw_version[0], fw_version[1], fw_version[2]); diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c index a530972c5a7e939a6b0be066c590a2ad7dccd9fc..e0173bf4b0dc7e15c39fffb278dc100e29eb03b6 100644 --- a/drivers/mfd/sm501.c +++ b/drivers/mfd/sm501.c @@ -1145,6 +1145,9 @@ static int sm501_register_gpio_i2c_instance(struct sm501_devdata *sm, lookup = devm_kzalloc(&pdev->dev, sizeof(*lookup) + 3 * sizeof(struct gpiod_lookup), GFP_KERNEL); + if (!lookup) + return -ENOMEM; + lookup->dev_id = "i2c-gpio"; if (iic->pin_sda < 32) lookup->table[0].chip_label = "SM501-LOW"; diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c index 7a30546880a4280a1a6e75e1066e89171c63ebce..4660ad90ef556e52b57fd28bda959f2104937ba4 100644 --- a/drivers/mfd/ti_am335x_tscadc.c +++ b/drivers/mfd/ti_am335x_tscadc.c @@ -264,8 +264,9 @@ static int ti_tscadc_probe(struct platform_device *pdev) cell->pdata_size = sizeof(tscadc); } - err = mfd_add_devices(&pdev->dev, pdev->id, tscadc->cells, - tscadc->used_cells, NULL, 0, NULL); + err = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO, + tscadc->cells, tscadc->used_cells, NULL, + 0, NULL); if (err < 0) goto err_disable_clk; @@ -294,11 +295,24 @@ static int ti_tscadc_remove(struct platform_device *pdev) return 0; } +static int __maybe_unused ti_tscadc_can_wakeup(struct device *dev, void *data) +{ + return device_may_wakeup(dev); +} + static int __maybe_unused tscadc_suspend(struct device *dev) { struct ti_tscadc_dev *tscadc = dev_get_drvdata(dev); regmap_write(tscadc->regmap, REG_SE, 0x00); + if (device_for_each_child(dev, NULL, ti_tscadc_can_wakeup)) { + u32 ctrl; + + regmap_read(tscadc->regmap, REG_CTRL, &ctrl); + ctrl &= ~(CNTRLREG_POWERDOWN); + ctrl |= CNTRLREG_TSCSSENB; + regmap_write(tscadc->regmap, REG_CTRL, ctrl); + } pm_runtime_put_sync(dev); return 0; diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c index 910f569ff77c1b7313c4c03c6d3a57b92f6c73b4..8bcdecf494d050b5fc69290af8c908369fa9ed79 100644 --- a/drivers/mfd/tps65218.c +++ b/drivers/mfd/tps65218.c @@ -235,9 +235,9 @@ static int tps65218_probe(struct i2c_client *client, mutex_init(&tps->tps_lock); - ret = regmap_add_irq_chip(tps->regmap, tps->irq, - IRQF_ONESHOT, 0, &tps65218_irq_chip, - &tps->irq_data); + ret = devm_regmap_add_irq_chip(&client->dev, tps->regmap, tps->irq, + IRQF_ONESHOT, 0, &tps65218_irq_chip, + &tps->irq_data); if (ret < 0) return ret; @@ -253,26 +253,9 @@ static int tps65218_probe(struct i2c_client *client, ARRAY_SIZE(tps65218_cells), NULL, 0, regmap_irq_get_domain(tps->irq_data)); - if (ret < 0) - goto err_irq; - - return 0; - -err_irq: - regmap_del_irq_chip(tps->irq, tps->irq_data); - return ret; } -static int tps65218_remove(struct i2c_client *client) -{ - struct tps65218 *tps = i2c_get_clientdata(client); - - regmap_del_irq_chip(tps->irq, tps->irq_data); - - return 0; -} - static const struct i2c_device_id tps65218_id_table[] = { { "tps65218", TPS65218 }, { }, @@ -285,7 +268,6 @@ static struct i2c_driver tps65218_driver = { .of_match_table = of_tps65218_match_table, }, .probe = tps65218_probe, - .remove = tps65218_remove, .id_table = tps65218_id_table, }; diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c index b893797827410c25450abd76fa47581c5c09d4df..9c7925ca13cf008f6580bad59554eeb86a1b70dd 100644 --- a/drivers/mfd/tps6586x.c +++ b/drivers/mfd/tps6586x.c @@ -592,6 +592,29 @@ static int tps6586x_i2c_remove(struct i2c_client *client) return 0; } +static int __maybe_unused tps6586x_i2c_suspend(struct device *dev) +{ + struct tps6586x *tps6586x = dev_get_drvdata(dev); + + if (tps6586x->client->irq) + disable_irq(tps6586x->client->irq); + + return 0; +} + +static int __maybe_unused tps6586x_i2c_resume(struct device *dev) +{ + struct tps6586x *tps6586x = dev_get_drvdata(dev); + + if (tps6586x->client->irq) + enable_irq(tps6586x->client->irq); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(tps6586x_pm_ops, tps6586x_i2c_suspend, + tps6586x_i2c_resume); + static const struct i2c_device_id tps6586x_id_table[] = { { "tps6586x", 0 }, { }, @@ -602,6 +625,7 @@ static struct i2c_driver tps6586x_driver = { .driver = { .name = "tps6586x", .of_match_table = of_match_ptr(tps6586x_of_match), + .pm = &tps6586x_pm_ops, }, .probe = tps6586x_i2c_probe, .remove = tps6586x_i2c_remove, diff --git a/drivers/mfd/tps65912-spi.c b/drivers/mfd/tps65912-spi.c index 3bd75061f7776882065697de527e52938ad350e6..f78be039e4637636b4a119547e9c2aa0d0decc51 100644 --- a/drivers/mfd/tps65912-spi.c +++ b/drivers/mfd/tps65912-spi.c @@ -27,6 +27,7 @@ static const struct of_device_id tps65912_spi_of_match_table[] = { { .compatible = "ti,tps65912", }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, tps65912_spi_of_match_table); static int tps65912_spi_probe(struct spi_device *spi) { diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c index 4be3d239da9ec8a2db40eea62f608f6559cff5a3..104477b512a296b56e7549f001b63cd1ad9a43ac 100644 --- a/drivers/mfd/twl-core.c +++ b/drivers/mfd/twl-core.c @@ -979,7 +979,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base, * letting it generate the right frequencies for USB, MADC, and * other purposes. */ -static inline int __init protect_pm_master(void) +static inline int protect_pm_master(void) { int e = 0; @@ -988,7 +988,7 @@ static inline int __init protect_pm_master(void) return e; } -static inline int __init unprotect_pm_master(void) +static inline int unprotect_pm_master(void) { int e = 0; @@ -1245,6 +1245,28 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id) return status; } +static int __maybe_unused twl_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + + if (client->irq) + disable_irq(client->irq); + + return 0; +} + +static int __maybe_unused twl_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + + if (client->irq) + enable_irq(client->irq); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(twl_dev_pm_ops, twl_suspend, twl_resume); + static const struct i2c_device_id twl_ids[] = { { "twl4030", TWL4030_VAUX2 }, /* "Triton 2" */ { "twl5030", 0 }, /* T2 updated */ @@ -1262,6 +1284,7 @@ static const struct i2c_device_id twl_ids[] = { /* One Client Driver , 4 Clients */ static struct i2c_driver twl_driver = { .driver.name = DRIVER_NAME, + .driver.pm = &twl_dev_pm_ops, .id_table = twl_ids, .probe = twl_probe, .remove = twl_remove, diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c index dd19f17a1b637543965dd94e64d0d44b9178f64c..2b8c479dbfa6e1edb5db688bc0f7fe2112ea1738 100644 --- a/drivers/mfd/twl6040.c +++ b/drivers/mfd/twl6040.c @@ -322,8 +322,19 @@ int twl6040_power(struct twl6040 *twl6040, int on) } } + /* + * Register access can produce errors after power-up unless we + * wait at least 8ms based on measurements on duovero. + */ + usleep_range(10000, 12000); + /* Sync with the HW */ - regcache_sync(twl6040->regmap); + ret = regcache_sync(twl6040->regmap); + if (ret) { + dev_err(twl6040->dev, "Failed to sync with the HW: %i\n", + ret); + goto out; + } /* Default PLL configuration after power up */ twl6040->pll = TWL6040_SYSCLK_SEL_LPPLL; diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c index 1ee68bd440fbc279874ceaf7ccefc37c692e9f5c..16c6e2accfaa5e49b9ddb53edbf8c234092e110f 100644 --- a/drivers/mfd/wm5110-tables.c +++ b/drivers/mfd/wm5110-tables.c @@ -1618,6 +1618,7 @@ static const struct reg_default wm5110_reg_default[] = { { 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */ { 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */ { 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */ + { 0x00000EE3, 0x4000 }, /* R3811 - ASRC_RATE2 */ { 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */ { 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */ { 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */ @@ -2869,6 +2870,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg) case ARIZONA_ASRC_ENABLE: case ARIZONA_ASRC_STATUS: case ARIZONA_ASRC_RATE1: + case ARIZONA_ASRC_RATE2: case ARIZONA_ISRC_1_CTRL_1: case ARIZONA_ISRC_1_CTRL_2: case ARIZONA_ISRC_1_CTRL_3: diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 3726eacdf65de2470ba83c6a91dc3351f69ea713..74f7c79d509978e543aa9ce0c589cddd7c027f4c 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -527,4 +527,5 @@ source "drivers/misc/echo/Kconfig" source "drivers/misc/cxl/Kconfig" source "drivers/misc/ocxl/Kconfig" source "drivers/misc/cardreader/Kconfig" +source "drivers/misc/uacce/Kconfig" endmenu diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index af22bbc3d00cbcd248c4e10247b87bbb46ecab3f..ebc68dd8a4ed56e5c1bda54d8d20362ebae110ee 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -58,3 +58,4 @@ obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o obj-$(CONFIG_OCXL) += ocxl/ obj-$(CONFIG_MISC_RTSX) += cardreader/ +obj-$(CONFIG_UACCE) += uacce/ diff --git a/drivers/misc/altera-stapl/altera.c b/drivers/misc/altera-stapl/altera.c index ef83a9078646fb5177f2a1da4b28eb03fdae4327..d2ed3b9728b7cbf96562200e489daf6745a3df4e 100644 --- a/drivers/misc/altera-stapl/altera.c +++ b/drivers/misc/altera-stapl/altera.c @@ -2176,8 +2176,7 @@ static int altera_get_note(u8 *p, s32 program_size, key_ptr = &p[note_strings + get_unaligned_be32( &p[note_table + (8 * i)])]; - if ((strncasecmp(key, key_ptr, strlen(key_ptr)) == 0) && - (key != NULL)) { + if (key && !strncasecmp(key, key_ptr, strlen(key_ptr))) { status = 0; value_ptr = &p[note_strings + diff --git a/drivers/misc/aspeed-lpc-ctrl.c b/drivers/misc/aspeed-lpc-ctrl.c index a024f8042259ae8a778788f9b88ee0fa0bcb66d6..870ab0dfcde0649cdacd45a1fbad2d3963925994 100644 --- a/drivers/misc/aspeed-lpc-ctrl.c +++ b/drivers/misc/aspeed-lpc-ctrl.c @@ -50,7 +50,7 @@ static int aspeed_lpc_ctrl_mmap(struct file *file, struct vm_area_struct *vma) unsigned long vsize = vma->vm_end - vma->vm_start; pgprot_t prot = vma->vm_page_prot; - if (vma->vm_pgoff + vsize > lpc_ctrl->mem_base + lpc_ctrl->mem_size) + if (vma->vm_pgoff + vma_pages(vma) > lpc_ctrl->mem_size >> PAGE_SHIFT) return -EINVAL; /* ast2400/2500 AHB accesses are not cache coherent */ diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c index b2a0340f277e268739c288e6bb73f62321475c30..d8e3cc2dc7470d8deaa3b89a5855b22b034725db 100644 --- a/drivers/misc/atmel-ssc.c +++ b/drivers/misc/atmel-ssc.c @@ -132,7 +132,7 @@ static const struct of_device_id atmel_ssc_dt_ids[] = { MODULE_DEVICE_TABLE(of, atmel_ssc_dt_ids); #endif -static inline const struct atmel_ssc_platform_data * __init +static inline const struct atmel_ssc_platform_data * atmel_ssc_get_driver_data(struct platform_device *pdev) { if (pdev->dev.of_node) { diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c index 3bc0c15d4d85b7f16e8ad10fa00847c17ee5dc7e..08f4a512afad204b4798b8b6b7ee8ccff5167db1 100644 --- a/drivers/misc/cxl/guest.c +++ b/drivers/misc/cxl/guest.c @@ -267,6 +267,7 @@ static int guest_reset(struct cxl *adapter) int i, rc; pr_devel("Adapter reset request\n"); + spin_lock(&adapter->afu_list_lock); for (i = 0; i < adapter->slices; i++) { if ((afu = adapter->afu[i])) { pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT, @@ -283,6 +284,7 @@ static int guest_reset(struct cxl *adapter) pci_error_handlers(afu, CXL_RESUME_EVENT, 0); } } + spin_unlock(&adapter->afu_list_lock); return rc; } @@ -1018,8 +1020,6 @@ int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_n void cxl_guest_remove_afu(struct cxl_afu *afu) { - pr_devel("in %s - AFU(%d)\n", __func__, afu->slice); - if (!afu) return; diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index b66d832d3233b65369a10c843843600d7563529c..787a69a2a726fabc3f9ec83614660723c28057a7 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -1807,7 +1807,7 @@ static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu, /* There should only be one entry, but go through the list * anyway */ - if (afu->phb == NULL) + if (afu == NULL || afu->phb == NULL) return result; list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { @@ -1834,7 +1834,8 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev, { struct cxl *adapter = pci_get_drvdata(pdev); struct cxl_afu *afu; - pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET, afu_result; + pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET; + pci_ers_result_t afu_result = PCI_ERS_RESULT_NEED_RESET; int i; /* At this point, we could still have an interrupt pending. @@ -1845,6 +1846,7 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev, /* If we're permanently dead, give up. */ if (state == pci_channel_io_perm_failure) { + spin_lock(&adapter->afu_list_lock); for (i = 0; i < adapter->slices; i++) { afu = adapter->afu[i]; /* @@ -1853,6 +1855,7 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev, */ cxl_vphb_error_detected(afu, state); } + spin_unlock(&adapter->afu_list_lock); return PCI_ERS_RESULT_DISCONNECT; } @@ -1934,11 +1937,17 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev, * * In slot_reset, free the old resources and allocate new ones. * * In resume, clear the flag to allow things to start. */ + + /* Make sure no one else changes the afu list */ + spin_lock(&adapter->afu_list_lock); + for (i = 0; i < adapter->slices; i++) { afu = adapter->afu[i]; - afu_result = cxl_vphb_error_detected(afu, state); + if (afu == NULL) + continue; + afu_result = cxl_vphb_error_detected(afu, state); cxl_context_detach_all(afu); cxl_ops->afu_deactivate_mode(afu, afu->current_mode); pci_deconfigure_afu(afu); @@ -1950,6 +1959,7 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev, (result == PCI_ERS_RESULT_NEED_RESET)) result = PCI_ERS_RESULT_NONE; } + spin_unlock(&adapter->afu_list_lock); /* should take the context lock here */ if (cxl_adapter_context_lock(adapter) != 0) @@ -1982,14 +1992,18 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev) */ cxl_adapter_context_unlock(adapter); + spin_lock(&adapter->afu_list_lock); for (i = 0; i < adapter->slices; i++) { afu = adapter->afu[i]; + if (afu == NULL) + continue; + if (pci_configure_afu(afu, adapter, pdev)) - goto err; + goto err_unlock; if (cxl_afu_select_best_mode(afu)) - goto err; + goto err_unlock; if (afu->phb == NULL) continue; @@ -2001,16 +2015,16 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev) ctx = cxl_get_context(afu_dev); if (ctx && cxl_release_context(ctx)) - goto err; + goto err_unlock; ctx = cxl_dev_context_init(afu_dev); if (IS_ERR(ctx)) - goto err; + goto err_unlock; afu_dev->dev.archdata.cxl_ctx = ctx; if (cxl_ops->afu_check_and_enable(afu)) - goto err; + goto err_unlock; afu_dev->error_state = pci_channel_io_normal; @@ -2031,8 +2045,13 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev) result = PCI_ERS_RESULT_DISCONNECT; } } + + spin_unlock(&adapter->afu_list_lock); return result; +err_unlock: + spin_unlock(&adapter->afu_list_lock); + err: /* All the bits that happen in both error_detected and cxl_remove * should be idempotent, so we don't need to worry about leaving a mix @@ -2053,10 +2072,11 @@ static void cxl_pci_resume(struct pci_dev *pdev) * This is not the place to be checking if everything came back up * properly, because there's no return value: do that in slot_reset. */ + spin_lock(&adapter->afu_list_lock); for (i = 0; i < adapter->slices; i++) { afu = adapter->afu[i]; - if (afu->phb == NULL) + if (afu == NULL || afu->phb == NULL) continue; list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { @@ -2065,6 +2085,7 @@ static void cxl_pci_resume(struct pci_dev *pdev) afu_dev->driver->err_handler->resume(afu_dev); } } + spin_unlock(&adapter->afu_list_lock); } static const struct pci_error_handlers cxl_err_handler = { diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig index 68a1ac92991712102ad14de75bb6c35370354256..d382b13c27dd934b5a8d1cd8799df3ce4fb73e2f 100644 --- a/drivers/misc/eeprom/Kconfig +++ b/drivers/misc/eeprom/Kconfig @@ -13,7 +13,7 @@ config EEPROM_AT24 ones like at24c64, 24lc02 or fm24c04: 24c00, 24c01, 24c02, spd (readonly 24c02), 24c04, 24c08, - 24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024 + 24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024, 24c2048 Unless you like data loss puzzles, always be sure that any chip you configure as a 24c32 (32 kbit) or larger is NOT really a diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index 7e50e1d6f58c22b914f39ae7f5e419a53d689b37..dc3537651b807a7d4e1d1ee22a0a81acfccf8c0a 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c @@ -106,23 +106,6 @@ static unsigned int at24_write_timeout = 25; module_param_named(write_timeout, at24_write_timeout, uint, 0); MODULE_PARM_DESC(at24_write_timeout, "Time (in ms) to try writes (default 25)"); -/* - * Both reads and writes fail if the previous write didn't complete yet. This - * macro loops a few times waiting at least long enough for one entire page - * write to work while making sure that at least one iteration is run before - * checking the break condition. - * - * It takes two parameters: a variable in which the future timeout in jiffies - * will be stored and a temporary variable holding the time of the last - * iteration of processing the request. Both should be unsigned integers - * holding at least 32 bits. - */ -#define at24_loop_until_timeout(tout, op_time) \ - for (tout = jiffies + msecs_to_jiffies(at24_write_timeout), \ - op_time = 0; \ - op_time ? time_before(op_time, tout) : true; \ - usleep_range(1000, 1500), op_time = jiffies) - struct at24_chip_data { /* * these fields mirror their equivalents in @@ -173,6 +156,7 @@ AT24_CHIP_DATA(at24_data_24c128, 131072 / 8, AT24_FLAG_ADDR16); AT24_CHIP_DATA(at24_data_24c256, 262144 / 8, AT24_FLAG_ADDR16); AT24_CHIP_DATA(at24_data_24c512, 524288 / 8, AT24_FLAG_ADDR16); AT24_CHIP_DATA(at24_data_24c1024, 1048576 / 8, AT24_FLAG_ADDR16); +AT24_CHIP_DATA(at24_data_24c2048, 2097152 / 8, AT24_FLAG_ADDR16); /* identical to 24c08 ? */ AT24_CHIP_DATA(at24_data_INT3499, 8192 / 8, 0); @@ -199,6 +183,7 @@ static const struct i2c_device_id at24_ids[] = { { "24c256", (kernel_ulong_t)&at24_data_24c256 }, { "24c512", (kernel_ulong_t)&at24_data_24c512 }, { "24c1024", (kernel_ulong_t)&at24_data_24c1024 }, + { "24c2048", (kernel_ulong_t)&at24_data_24c2048 }, { "at24", 0 }, { /* END OF LIST */ } }; @@ -227,6 +212,7 @@ static const struct of_device_id at24_of_match[] = { { .compatible = "atmel,24c256", .data = &at24_data_24c256 }, { .compatible = "atmel,24c512", .data = &at24_data_24c512 }, { .compatible = "atmel,24c1024", .data = &at24_data_24c1024 }, + { .compatible = "atmel,24c2048", .data = &at24_data_24c2048 }, { /* END OF LIST */ }, }; MODULE_DEVICE_TABLE(of, at24_of_match); @@ -308,13 +294,22 @@ static ssize_t at24_regmap_read(struct at24_data *at24, char *buf, /* adjust offset for mac and serial read ops */ offset += at24->offset_adj; - at24_loop_until_timeout(timeout, read_time) { + timeout = jiffies + msecs_to_jiffies(at24_write_timeout); + do { + /* + * The timestamp shall be taken before the actual operation + * to avoid a premature timeout in case of high CPU load. + */ + read_time = jiffies; + ret = regmap_bulk_read(regmap, offset, buf, count); dev_dbg(&client->dev, "read %zu@%d --> %d (%ld)\n", count, offset, ret, jiffies); if (!ret) return count; - } + + usleep_range(1000, 1500); + } while (time_before(read_time, timeout)); return -ETIMEDOUT; } @@ -358,14 +353,23 @@ static ssize_t at24_regmap_write(struct at24_data *at24, const char *buf, regmap = at24_client->regmap; client = at24_client->client; count = at24_adjust_write_count(at24, offset, count); + timeout = jiffies + msecs_to_jiffies(at24_write_timeout); + + do { + /* + * The timestamp shall be taken before the actual operation + * to avoid a premature timeout in case of high CPU load. + */ + write_time = jiffies; - at24_loop_until_timeout(timeout, write_time) { ret = regmap_bulk_write(regmap, offset, buf, count); dev_dbg(&client->dev, "write %zu@%d --> %d (%ld)\n", count, offset, ret, jiffies); if (!ret) return count; - } + + usleep_range(1000, 1500); + } while (time_before(write_time, timeout)); return -ETIMEDOUT; } @@ -720,7 +724,7 @@ static int at24_probe(struct i2c_client *client) nvmem_config.name = dev_name(dev); nvmem_config.dev = dev; nvmem_config.read_only = !writable; - nvmem_config.root_only = true; + nvmem_config.root_only = !(pdata.flags & AT24_FLAG_IRUGO); nvmem_config.owner = THIS_MODULE; nvmem_config.compat = true; nvmem_config.base_dev = dev; diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c index 5a17bfeb80d398c023278ff9f0ed691d6785ef53..471263ffd9c92d1afaaad170f06e0e29f8d96f78 100644 --- a/drivers/misc/enclosure.c +++ b/drivers/misc/enclosure.c @@ -419,10 +419,9 @@ int enclosure_remove_device(struct enclosure_device *edev, struct device *dev) cdev = &edev->component[i]; if (cdev->dev == dev) { enclosure_remove_links(cdev); - device_del(&cdev->cdev); put_device(dev); cdev->dev = NULL; - return device_add(&cdev->cdev); + return 0; } } return -ENODEV; diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h index 120738d6e58b244626879346091807502177af19..77ed3967c5b002113db515769c11b8b6a3c79bf8 100644 --- a/drivers/misc/genwqe/card_base.h +++ b/drivers/misc/genwqe/card_base.h @@ -408,7 +408,7 @@ struct genwqe_file { struct file *filp; struct fasync_struct *async_queue; - struct task_struct *owner; + struct pid *opener; struct list_head list; /* entry in list of open files */ spinlock_t map_lock; /* lock for dma_mappings */ diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c index f453ab82f0d7be6a2269593e0564d4e0427373f8..d2098b4d29451e759d1e9a6a1ccd215a36c25c03 100644 --- a/drivers/misc/genwqe/card_dev.c +++ b/drivers/misc/genwqe/card_dev.c @@ -52,7 +52,7 @@ static void genwqe_add_file(struct genwqe_dev *cd, struct genwqe_file *cfile) { unsigned long flags; - cfile->owner = current; + cfile->opener = get_pid(task_tgid(current)); spin_lock_irqsave(&cd->file_lock, flags); list_add(&cfile->list, &cd->file_list); spin_unlock_irqrestore(&cd->file_lock, flags); @@ -65,6 +65,7 @@ static int genwqe_del_file(struct genwqe_dev *cd, struct genwqe_file *cfile) spin_lock_irqsave(&cd->file_lock, flags); list_del(&cfile->list); spin_unlock_irqrestore(&cd->file_lock, flags); + put_pid(cfile->opener); return 0; } @@ -275,7 +276,7 @@ static int genwqe_kill_fasync(struct genwqe_dev *cd, int sig) return files; } -static int genwqe_force_sig(struct genwqe_dev *cd, int sig) +static int genwqe_terminate(struct genwqe_dev *cd) { unsigned int files = 0; unsigned long flags; @@ -283,7 +284,7 @@ static int genwqe_force_sig(struct genwqe_dev *cd, int sig) spin_lock_irqsave(&cd->file_lock, flags); list_for_each_entry(cfile, &cd->file_list, list) { - force_sig(sig, cfile->owner); + kill_pid(cfile->opener, SIGKILL, 1); files++; } spin_unlock_irqrestore(&cd->file_lock, flags); @@ -779,6 +780,8 @@ static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m) if ((m->addr == 0x0) || (m->size == 0)) return -EINVAL; + if (m->size > ULONG_MAX - PAGE_SIZE - (m->addr & ~PAGE_MASK)) + return -EINVAL; map_addr = (m->addr & PAGE_MASK); map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE); @@ -1352,7 +1355,7 @@ static int genwqe_inform_and_stop_processes(struct genwqe_dev *cd) dev_warn(&pci_dev->dev, "[%s] send SIGKILL and wait ...\n", __func__); - rc = genwqe_force_sig(cd, SIGKILL); /* force terminate */ + rc = genwqe_terminate(cd); if (rc) { /* Give kill_timout more seconds to end processes */ for (i = 0; (i < GENWQE_KILL_TIMEOUT) && diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c index 8679e0bd8ec281eb34e457d565f6c2f17f3ef032..22301bba8c495b8dbf34d87d8ccbeee59b6d8123 100644 --- a/drivers/misc/genwqe/card_utils.c +++ b/drivers/misc/genwqe/card_utils.c @@ -217,7 +217,7 @@ u32 genwqe_crc32(u8 *buff, size_t len, u32 init) void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size, dma_addr_t *dma_handle) { - if (get_order(size) > MAX_ORDER) + if (get_order(size) >= MAX_ORDER) return NULL; return dma_zalloc_coherent(&cd->pci_dev->dev, size, dma_handle, @@ -298,7 +298,7 @@ static int genwqe_sgl_size(int num_pages) int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, void __user *user_addr, size_t user_size, int write) { - int rc; + int ret = -ENOMEM; struct pci_dev *pci_dev = cd->pci_dev; sgl->fpage_offs = offset_in_page((unsigned long)user_addr); @@ -318,7 +318,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, if (get_order(sgl->sgl_size) > MAX_ORDER) { dev_err(&pci_dev->dev, "[%s] err: too much memory requested!\n", __func__); - return -ENOMEM; + return ret; } sgl->sgl = __genwqe_alloc_consistent(cd, sgl->sgl_size, @@ -326,7 +326,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, if (sgl->sgl == NULL) { dev_err(&pci_dev->dev, "[%s] err: no memory available!\n", __func__); - return -ENOMEM; + return ret; } /* Only use buffering on incomplete pages */ @@ -339,7 +339,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, /* Sync with user memory */ if (copy_from_user(sgl->fpage + sgl->fpage_offs, user_addr, sgl->fpage_size)) { - rc = -EFAULT; + ret = -EFAULT; goto err_out; } } @@ -352,7 +352,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, /* Sync with user memory */ if (copy_from_user(sgl->lpage, user_addr + user_size - sgl->lpage_size, sgl->lpage_size)) { - rc = -EFAULT; + ret = -EFAULT; goto err_out2; } } @@ -374,7 +374,8 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, sgl->sgl = NULL; sgl->sgl_dma_addr = 0; sgl->sgl_size = 0; - return -ENOMEM; + + return ret; } int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, @@ -587,6 +588,10 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr, /* determine space needed for page_list. */ data = (unsigned long)uaddr; offs = offset_in_page(data); + if (size > ULONG_MAX - PAGE_SIZE - offs) { + m->size = 0; /* mark unused and not added */ + return -EINVAL; + } m->nr_pages = DIV_ROUND_UP(offs + size, PAGE_SIZE); m->page_list = kcalloc(m->nr_pages, diff --git a/drivers/misc/ibmasm/module.c b/drivers/misc/ibmasm/module.c index e914b8c80943316a270ac3ba9e64bcc1addbc979..9f8344169845ced9421fec3f64e21aaccee454ac 100644 --- a/drivers/misc/ibmasm/module.c +++ b/drivers/misc/ibmasm/module.c @@ -125,7 +125,7 @@ static int ibmasm_init_one(struct pci_dev *pdev, const struct pci_device_id *id) result = ibmasm_init_remote_input_dev(sp); if (result) { dev_err(sp->dev, "Failed to initialize remote queue\n"); - goto error_send_message; + goto error_init_remote; } result = ibmasm_send_driver_vpd(sp); @@ -145,8 +145,9 @@ static int ibmasm_init_one(struct pci_dev *pdev, const struct pci_device_id *id) return 0; error_send_message: - disable_sp_interrupts(sp->base_address); ibmasm_free_remote_input_dev(sp); +error_init_remote: + disable_sp_interrupts(sp->base_address); free_irq(sp->irq, (void *)sp); error_request_irq: iounmap(sp->base_address); diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c index b8aaa684c397b0b8be8fe0c5ae00a37b087b6997..2ed23c99f59fdbebab6cc49b032e04fa5f0c020c 100644 --- a/drivers/misc/ibmvmc.c +++ b/drivers/misc/ibmvmc.c @@ -820,21 +820,24 @@ static int ibmvmc_send_msg(struct crq_server_adapter *adapter, * * Return: * 0 - Success + * Non-zero - Failure */ static int ibmvmc_open(struct inode *inode, struct file *file) { struct ibmvmc_file_session *session; - int rc = 0; pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__, (unsigned long)inode, (unsigned long)file, ibmvmc.state); session = kzalloc(sizeof(*session), GFP_KERNEL); + if (!session) + return -ENOMEM; + session->file = file; file->private_data = session; - return rc; + return 0; } /** diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c index 6193270e7b3dcabf9c4f2befac666678b5cdb00d..8b01257783dd861569beb62db90a38878eed03ee 100644 --- a/drivers/misc/kgdbts.c +++ b/drivers/misc/kgdbts.c @@ -985,6 +985,12 @@ static void kgdbts_run_tests(void) int nmi_sleep = 0; int i; + verbose = 0; + if (strstr(config, "V1")) + verbose = 1; + if (strstr(config, "V2")) + verbose = 2; + ptr = strchr(config, 'F'); if (ptr) fork_test = simple_strtol(ptr + 1, NULL, 10); @@ -1068,13 +1074,6 @@ static int kgdbts_option_setup(char *opt) return -ENOSPC; } strcpy(config, opt); - - verbose = 0; - if (strstr(config, "V1")) - verbose = 1; - if (strstr(config, "V2")) - verbose = 2; - return 0; } @@ -1086,9 +1085,6 @@ static int configure_kgdbts(void) if (!strlen(config) || isspace(config[0])) goto noconfig; - err = kgdbts_option_setup(config); - if (err) - goto noconfig; final_ack = 0; run_plant_and_detach_test(1); @@ -1139,7 +1135,7 @@ static void kgdbts_put_char(u8 chr) static int param_set_kgdbts_var(const char *kmessage, const struct kernel_param *kp) { - int len = strlen(kmessage); + size_t len = strlen(kmessage); if (len >= MAX_CONFIG_LEN) { printk(KERN_ERR "kgdbts: config string too long\n"); @@ -1159,7 +1155,7 @@ static int param_set_kgdbts_var(const char *kmessage, strcpy(config, kmessage); /* Chop out \n char as a result of echo */ - if (config[len - 1] == '\n') + if (len && config[len - 1] == '\n') config[len - 1] = '\0'; /* Go and configure with the new params. */ diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile index 3370a4138e942621a008573b1fa1ad6912b1c75c..cce47a15a79f288f3ba5b28951da482cdf89b8a7 100644 --- a/drivers/misc/lkdtm/Makefile +++ b/drivers/misc/lkdtm/Makefile @@ -13,8 +13,7 @@ KCOV_INSTRUMENT_rodata.o := n OBJCOPYFLAGS := OBJCOPYFLAGS_rodata_objcopy.o := \ - --set-section-flags .text=alloc,readonly \ - --rename-section .text=.rodata + --rename-section .text=.rodata,alloc,readonly,load targets += rodata.o rodata_objcopy.o $(obj)/rodata_objcopy.o: $(obj)/rodata.o FORCE $(call if_changed,objcopy) diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c index 2154d1bfd18b610b60f0fc8409cf7bf8919f2aad..07caaa2cfe1e4a40399ed08082c53b2fe18b7c7d 100644 --- a/drivers/misc/lkdtm/core.c +++ b/drivers/misc/lkdtm/core.c @@ -152,7 +152,9 @@ static const struct crashtype crashtypes[] = { CRASHTYPE(EXEC_VMALLOC), CRASHTYPE(EXEC_RODATA), CRASHTYPE(EXEC_USERSPACE), + CRASHTYPE(EXEC_NULL), CRASHTYPE(ACCESS_USERSPACE), + CRASHTYPE(ACCESS_NULL), CRASHTYPE(WRITE_RO), CRASHTYPE(WRITE_RO_AFTER_INIT), CRASHTYPE(WRITE_KERN), diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h index 9e513dcfd8093613dd767765829158aa3addc810..8c3f2e6af256cf3c24fabd4df4dbf706d3b2d716 100644 --- a/drivers/misc/lkdtm/lkdtm.h +++ b/drivers/misc/lkdtm/lkdtm.h @@ -45,7 +45,9 @@ void lkdtm_EXEC_KMALLOC(void); void lkdtm_EXEC_VMALLOC(void); void lkdtm_EXEC_RODATA(void); void lkdtm_EXEC_USERSPACE(void); +void lkdtm_EXEC_NULL(void); void lkdtm_ACCESS_USERSPACE(void); +void lkdtm_ACCESS_NULL(void); /* lkdtm_refcount.c */ void lkdtm_REFCOUNT_INC_OVERFLOW(void); diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c index 53b85c9d16b89247d0fa66d0dce5192d71f9c63e..62f76d506f0405443eeb1804b220e803339d9378 100644 --- a/drivers/misc/lkdtm/perms.c +++ b/drivers/misc/lkdtm/perms.c @@ -47,7 +47,7 @@ static noinline void execute_location(void *dst, bool write) { void (*func)(void) = dst; - pr_info("attempting ok execution at %p\n", do_nothing); + pr_info("attempting ok execution at %px\n", do_nothing); do_nothing(); if (write == CODE_WRITE) { @@ -55,7 +55,7 @@ static noinline void execute_location(void *dst, bool write) flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE); } - pr_info("attempting bad execution at %p\n", func); + pr_info("attempting bad execution at %px\n", func); func(); } @@ -66,14 +66,14 @@ static void execute_user_location(void *dst) /* Intentionally crossing kernel/user memory boundary. */ void (*func)(void) = dst; - pr_info("attempting ok execution at %p\n", do_nothing); + pr_info("attempting ok execution at %px\n", do_nothing); do_nothing(); copied = access_process_vm(current, (unsigned long)dst, do_nothing, EXEC_SIZE, FOLL_WRITE); if (copied < EXEC_SIZE) return; - pr_info("attempting bad execution at %p\n", func); + pr_info("attempting bad execution at %px\n", func); func(); } @@ -82,7 +82,7 @@ void lkdtm_WRITE_RO(void) /* Explicitly cast away "const" for the test. */ unsigned long *ptr = (unsigned long *)&rodata; - pr_info("attempting bad rodata write at %p\n", ptr); + pr_info("attempting bad rodata write at %px\n", ptr); *ptr ^= 0xabcd1234; } @@ -100,7 +100,7 @@ void lkdtm_WRITE_RO_AFTER_INIT(void) return; } - pr_info("attempting bad ro_after_init write at %p\n", ptr); + pr_info("attempting bad ro_after_init write at %px\n", ptr); *ptr ^= 0xabcd1234; } @@ -112,7 +112,7 @@ void lkdtm_WRITE_KERN(void) size = (unsigned long)do_overwritten - (unsigned long)do_nothing; ptr = (unsigned char *)do_overwritten; - pr_info("attempting bad %zu byte write at %p\n", size, ptr); + pr_info("attempting bad %zu byte write at %px\n", size, ptr); memcpy(ptr, (unsigned char *)do_nothing, size); flush_icache_range((unsigned long)ptr, (unsigned long)(ptr + size)); @@ -164,6 +164,11 @@ void lkdtm_EXEC_USERSPACE(void) vm_munmap(user_addr, PAGE_SIZE); } +void lkdtm_EXEC_NULL(void) +{ + execute_location(NULL, CODE_AS_IS); +} + void lkdtm_ACCESS_USERSPACE(void) { unsigned long user_addr, tmp = 0; @@ -185,16 +190,29 @@ void lkdtm_ACCESS_USERSPACE(void) ptr = (unsigned long *)user_addr; - pr_info("attempting bad read at %p\n", ptr); + pr_info("attempting bad read at %px\n", ptr); tmp = *ptr; tmp += 0xc0dec0de; - pr_info("attempting bad write at %p\n", ptr); + pr_info("attempting bad write at %px\n", ptr); *ptr = tmp; vm_munmap(user_addr, PAGE_SIZE); } +void lkdtm_ACCESS_NULL(void) +{ + unsigned long tmp; + unsigned long *ptr = (unsigned long *)NULL; + + pr_info("attempting bad read at %px\n", ptr); + tmp = *ptr; + tmp += 0xc0dec0de; + + pr_info("attempting bad write at %px\n", ptr); + *ptr = tmp; +} + void __init lkdtm_perms_init(void) { /* Make sure we can write to __ro_after_init values during __init */ diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c index a6f41f96f2a163a911acbba5e346b5b55e6d0145..198e030e5b3d543ab2d225776b41adce3017b791 100644 --- a/drivers/misc/mei/bus-fixup.c +++ b/drivers/misc/mei/bus-fixup.c @@ -214,13 +214,21 @@ static void mei_mkhi_fix(struct mei_cl_device *cldev) { int ret; + /* No need to enable the client if nothing is needed from it */ + if (!cldev->bus->fw_f_fw_ver_supported && + !cldev->bus->hbm_f_os_supported) + return; + ret = mei_cldev_enable(cldev); if (ret) return; - ret = mei_fwver(cldev); - if (ret < 0) - dev_err(&cldev->dev, "FW version command failed %d\n", ret); + if (cldev->bus->fw_f_fw_ver_supported) { + ret = mei_fwver(cldev); + if (ret < 0) + dev_err(&cldev->dev, "FW version command failed %d\n", + ret); + } if (cldev->bus->hbm_f_os_supported) { ret = mei_osver(cldev); diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index fc3872fe7b2510e1fa862ea508be53ad377a4bfd..bb2e1387b119788f752710ce697a4077f29caa23 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -541,17 +541,9 @@ int mei_cldev_enable(struct mei_cl_device *cldev) goto out; } - if (!mei_cl_bus_module_get(cldev)) { - dev_err(&cldev->dev, "get hw module failed"); - ret = -ENODEV; - goto out; - } - ret = mei_cl_connect(cl, cldev->me_cl, NULL); - if (ret < 0) { + if (ret < 0) dev_err(&cldev->dev, "cannot connect\n"); - mei_cl_bus_module_put(cldev); - } out: mutex_unlock(&bus->device_lock); @@ -614,7 +606,6 @@ int mei_cldev_disable(struct mei_cl_device *cldev) if (err < 0) dev_err(bus->dev, "Could not disconnect from the ME client\n"); - mei_cl_bus_module_put(cldev); out: /* Flush queues and remove any pending read */ mei_cl_flush_queues(cl, NULL); @@ -725,9 +716,16 @@ static int mei_cl_device_probe(struct device *dev) if (!id) return -ENODEV; + if (!mei_cl_bus_module_get(cldev)) { + dev_err(&cldev->dev, "get hw module failed"); + return -ENODEV; + } + ret = cldrv->probe(cldev, id); - if (ret) + if (ret) { + mei_cl_bus_module_put(cldev); return ret; + } __module_get(THIS_MODULE); return 0; @@ -755,6 +753,7 @@ static int mei_cl_device_remove(struct device *dev) mei_cldev_unregister_callbacks(cldev); + mei_cl_bus_module_put(cldev); module_put(THIS_MODULE); dev->driver = NULL; return ret; @@ -884,15 +883,16 @@ static const struct device_type mei_cl_device_type = { /** * mei_cl_bus_set_name - set device name for me client device + * - + * Example: 0000:00:16.0-55213584-9a29-4916-badf-0fb7ed682aeb * * @cldev: me client device */ static inline void mei_cl_bus_set_name(struct mei_cl_device *cldev) { - dev_set_name(&cldev->dev, "mei:%s:%pUl:%02X", - cldev->name, - mei_me_cl_uuid(cldev->me_cl), - mei_me_cl_ver(cldev->me_cl)); + dev_set_name(&cldev->dev, "%s-%pUl", + dev_name(cldev->bus->dev), + mei_me_cl_uuid(cldev->me_cl)); } /** diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index e56f3e72d57a06cc4a9c2e8358bd3359113524c8..d39cc2909474228b0e47be168e2e8e33663f8c4f 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -986,29 +986,36 @@ static void mei_hbm_config_features(struct mei_device *dev) dev->version.minor_version >= HBM_MINOR_VERSION_PGI) dev->hbm_f_pg_supported = 1; + dev->hbm_f_dc_supported = 0; if (dev->version.major_version >= HBM_MAJOR_VERSION_DC) dev->hbm_f_dc_supported = 1; + dev->hbm_f_ie_supported = 0; if (dev->version.major_version >= HBM_MAJOR_VERSION_IE) dev->hbm_f_ie_supported = 1; /* disconnect on connect timeout instead of link reset */ + dev->hbm_f_dot_supported = 0; if (dev->version.major_version >= HBM_MAJOR_VERSION_DOT) dev->hbm_f_dot_supported = 1; /* Notification Event Support */ + dev->hbm_f_ev_supported = 0; if (dev->version.major_version >= HBM_MAJOR_VERSION_EV) dev->hbm_f_ev_supported = 1; /* Fixed Address Client Support */ + dev->hbm_f_fa_supported = 0; if (dev->version.major_version >= HBM_MAJOR_VERSION_FA) dev->hbm_f_fa_supported = 1; /* OS ver message Support */ + dev->hbm_f_os_supported = 0; if (dev->version.major_version >= HBM_MAJOR_VERSION_OS) dev->hbm_f_os_supported = 1; /* DMA Ring Support */ + dev->hbm_f_dr_supported = 0; if (dev->version.major_version > HBM_MAJOR_VERSION_DR || (dev->version.major_version == HBM_MAJOR_VERSION_DR && dev->version.minor_version >= HBM_MINOR_VERSION_DR)) diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index e4b10b2d1a0838af03f16f29922b86c653cf99ba..9c40424200224225592227b9d7fb1d4aae8a084c 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -127,6 +127,8 @@ #define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */ #define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */ +#define MEI_DEV_ID_DNV_IE 0x19E5 /* Denverton IE */ + #define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */ #define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ @@ -137,6 +139,17 @@ #define MEI_DEV_ID_CNP_H 0xA360 /* Cannon Point H */ #define MEI_DEV_ID_CNP_H_4 0xA364 /* Cannon Point H 4 (iTouch) */ +#define MEI_DEV_ID_CMP_LP 0x02e0 /* Comet Point LP */ +#define MEI_DEV_ID_CMP_LP_3 0x02e4 /* Comet Point LP 3 (iTouch) */ +#define MEI_DEV_ID_CMP_V 0xA3BA /* Comet Point Lake V */ + +#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */ + +#define MEI_DEV_ID_TGP_LP 0xA0E0 /* Tiger Lake Point LP */ + +#define MEI_DEV_ID_MCC 0x4B70 /* Mule Creek Canyon (EHL) */ +#define MEI_DEV_ID_MCC_4 0x4B75 /* Mule Creek Canyon 4 (EHL) */ + /* * MEI HW Section */ diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index 0759c3a668de7a7fc38968be27c619672c8f6ece..60c8c84181a97acb0b2fba6ac00a7b41e9b583e8 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c @@ -1368,6 +1368,8 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev) #define MEI_CFG_FW_SPS \ .quirk_probe = mei_me_fw_type_sps +#define MEI_CFG_FW_VER_SUPP \ + .fw_ver_supported = 1 #define MEI_CFG_ICH_HFS \ .fw_status.count = 0 @@ -1405,31 +1407,41 @@ static const struct mei_cfg mei_me_ich10_cfg = { MEI_CFG_ICH10_HFS, }; -/* PCH devices */ -static const struct mei_cfg mei_me_pch_cfg = { +/* PCH6 devices */ +static const struct mei_cfg mei_me_pch6_cfg = { MEI_CFG_PCH_HFS, }; +/* PCH7 devices */ +static const struct mei_cfg mei_me_pch7_cfg = { + MEI_CFG_PCH_HFS, + MEI_CFG_FW_VER_SUPP, +}; + /* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */ static const struct mei_cfg mei_me_pch_cpt_pbg_cfg = { MEI_CFG_PCH_HFS, + MEI_CFG_FW_VER_SUPP, MEI_CFG_FW_NM, }; /* PCH8 Lynx Point and newer devices */ static const struct mei_cfg mei_me_pch8_cfg = { MEI_CFG_PCH8_HFS, + MEI_CFG_FW_VER_SUPP, }; /* PCH8 Lynx Point with quirk for SPS Firmware exclusion */ static const struct mei_cfg mei_me_pch8_sps_cfg = { MEI_CFG_PCH8_HFS, + MEI_CFG_FW_VER_SUPP, MEI_CFG_FW_SPS, }; /* Cannon Lake and newer devices */ static const struct mei_cfg mei_me_pch12_cfg = { MEI_CFG_PCH8_HFS, + MEI_CFG_FW_VER_SUPP, MEI_CFG_DMA_128, }; @@ -1441,7 +1453,8 @@ static const struct mei_cfg *const mei_cfg_list[] = { [MEI_ME_UNDEF_CFG] = NULL, [MEI_ME_ICH_CFG] = &mei_me_ich_cfg, [MEI_ME_ICH10_CFG] = &mei_me_ich10_cfg, - [MEI_ME_PCH_CFG] = &mei_me_pch_cfg, + [MEI_ME_PCH6_CFG] = &mei_me_pch6_cfg, + [MEI_ME_PCH7_CFG] = &mei_me_pch7_cfg, [MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg, [MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg, [MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg, @@ -1480,6 +1493,8 @@ struct mei_device *mei_me_dev_init(struct pci_dev *pdev, mei_device_init(dev, &pdev->dev, &mei_me_hw_ops); hw->cfg = cfg; + dev->fw_f_fw_ver_supported = cfg->fw_ver_supported; + return dev; } diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h index bbcc5fc106cdf46418acd2dcd4adfe6f0c1f5383..77597133978952cc805368dc25713236884d17ab 100644 --- a/drivers/misc/mei/hw-me.h +++ b/drivers/misc/mei/hw-me.h @@ -32,11 +32,13 @@ * @fw_status: FW status * @quirk_probe: device exclusion quirk * @dma_size: device DMA buffers size + * @fw_ver_supported: is fw version retrievable from FW */ struct mei_cfg { const struct mei_fw_status fw_status; bool (*quirk_probe)(struct pci_dev *pdev); size_t dma_size[DMA_DSCR_NUM]; + u32 fw_ver_supported:1; }; @@ -74,7 +76,8 @@ struct mei_me_hw { * @MEI_ME_UNDEF_CFG: Lower sentinel. * @MEI_ME_ICH_CFG: I/O Controller Hub legacy devices. * @MEI_ME_ICH10_CFG: I/O Controller Hub platforms Gen10 - * @MEI_ME_PCH_CFG: Platform Controller Hub platforms (Up to Gen8). + * @MEI_ME_PCH6_CFG: Platform Controller Hub platforms (Gen6). + * @MEI_ME_PCH7_CFG: Platform Controller Hub platforms (Gen7). * @MEI_ME_PCH_CPT_PBG_CFG:Platform Controller Hub workstations * with quirk for Node Manager exclusion. * @MEI_ME_PCH8_CFG: Platform Controller Hub Gen8 and newer @@ -89,7 +92,8 @@ enum mei_cfg_idx { MEI_ME_UNDEF_CFG, MEI_ME_ICH_CFG, MEI_ME_ICH10_CFG, - MEI_ME_PCH_CFG, + MEI_ME_PCH6_CFG, + MEI_ME_PCH7_CFG, MEI_ME_PCH_CPT_PBG_CFG, MEI_ME_PCH8_CFG, MEI_ME_PCH8_SPS_CFG, diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index 377397e1b5a5bfd08f1a02a39ceeef3ffe3b6d03..fc7a5e3fbfcd16349c5ef58edd274ef8b22e401b 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h @@ -422,6 +422,8 @@ struct mei_fw_version { * * @fw_ver : FW versions * + * @fw_f_fw_ver_supported : fw feature: fw version supported + * * @me_clients_rwsem: rw lock over me_clients list * @me_clients : list of FW clients * @me_clients_map : FW clients bit map @@ -500,6 +502,8 @@ struct mei_device { struct mei_fw_version fw_ver[MEI_MAX_FW_VER_BLOCKS]; + unsigned int fw_f_fw_ver_supported:1; + struct rw_semaphore me_clients_rwsem; struct list_head me_clients; DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX); diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index ea4e152270a3b0270b1dc23edd8c3620af0ffd26..41a10e392839ec713ce13f1bb9e93b973d05a81b 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -70,13 +70,13 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_3, MEI_ME_ICH10_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_4, MEI_ME_ICH10_CFG)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, MEI_ME_PCH_CFG)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, MEI_ME_PCH_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, MEI_ME_PCH6_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, MEI_ME_PCH6_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_CPT_1, MEI_ME_PCH_CPT_PBG_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_PBG_1, MEI_ME_PCH_CPT_PBG_CFG)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH_CFG)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH_CFG)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH7_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH7_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH7_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, MEI_ME_PCH8_CFG)}, @@ -88,11 +88,13 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_DNV_IE, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)}, @@ -103,6 +105,17 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_V, MEI_ME_PCH12_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH12_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH12_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)}, + /* required last entry */ {0, } }; diff --git a/drivers/misc/mic/scif/scif_fence.c b/drivers/misc/mic/scif/scif_fence.c index cac3bcc308a7ebfc99bc40522389587060b251f7..7bb929f05d85227b1f6b16f5d037ac58ad3e2200 100644 --- a/drivers/misc/mic/scif/scif_fence.c +++ b/drivers/misc/mic/scif/scif_fence.c @@ -272,7 +272,7 @@ static int _scif_prog_signal(scif_epd_t epd, dma_addr_t dst, u64 val) dma_fail: if (!x100) dma_pool_free(ep->remote_dev->signal_pool, status, - status->src_dma_addr); + src - offsetof(struct scif_status, val)); alloc_fail: return err; } diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c index c824329f7012adfc765e4b1872185f5b148fd445..0e4193cb08cf1ac9d3f095810c082e5c3298775b 100644 --- a/drivers/misc/mic/scif/scif_rma.c +++ b/drivers/misc/mic/scif/scif_rma.c @@ -416,7 +416,7 @@ static int scif_create_remote_lookup(struct scif_dev *remote_dev, if (err) goto error_window; err = scif_map_page(&window->num_pages_lookup.lookup[j], - vmalloc_dma_phys ? + vmalloc_num_pages ? vmalloc_to_page(&window->num_pages[i]) : virt_to_page(&window->num_pages[i]), remote_dev); diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c index 3633202e18f4f19c3bd9074c8e968fdbb8f20b32..de7f035a176df85edccf14e9e3e9c47b9a9ffde6 100644 --- a/drivers/misc/mic/vop/vop_main.c +++ b/drivers/misc/mic/vop/vop_main.c @@ -563,6 +563,8 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d, int ret = -1; if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) { + struct device *dev = get_device(&vdev->vdev.dev); + dev_dbg(&vpdev->dev, "%s %d config_change %d type %d vdev %p\n", __func__, __LINE__, @@ -574,7 +576,7 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d, iowrite8(-1, &dc->h2c_vdev_db); if (status & VIRTIO_CONFIG_S_DRIVER_OK) wait_for_completion(&vdev->reset_done); - put_device(&vdev->vdev.dev); + put_device(dev); iowrite8(1, &dc->guest_ack); dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n", __func__, __LINE__, ioread8(&dc->guest_ack)); diff --git a/drivers/misc/ocxl/config.c b/drivers/misc/ocxl/config.c index 2e30de9c694acf86fb8cbdcfe7da1f43e6981170..8f2c5d8bd2eee84302c0120a5f1ba06d5e5a2c5b 100644 --- a/drivers/misc/ocxl/config.c +++ b/drivers/misc/ocxl/config.c @@ -280,7 +280,9 @@ int ocxl_config_check_afu_index(struct pci_dev *dev, u32 val; int rc, templ_major, templ_minor, len; - pci_write_config_word(dev, fn->dvsec_afu_info_pos, afu_idx); + pci_write_config_byte(dev, + fn->dvsec_afu_info_pos + OCXL_DVSEC_AFU_INFO_AFU_IDX, + afu_idx); rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_VERSION, &val); if (rc) return rc; @@ -316,7 +318,7 @@ static int read_afu_name(struct pci_dev *dev, struct ocxl_fn_config *fn, if (rc) return rc; ptr = (u32 *) &afu->name[i]; - *ptr = val; + *ptr = le32_to_cpu((__force __le32) val); } afu->name[OCXL_AFU_NAME_SZ - 1] = '\0'; /* play safe */ return 0; diff --git a/drivers/misc/ocxl/link.c b/drivers/misc/ocxl/link.c index 31695a078485abaf1b41703938b1bce072720de9..646d16450066f5bf3aece9a853326dcc00c02482 100644 --- a/drivers/misc/ocxl/link.c +++ b/drivers/misc/ocxl/link.c @@ -566,7 +566,7 @@ int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid) mutex_lock(&spa->spa_lock); - pe->tid = tid; + pe->tid = cpu_to_be32(tid); /* * The barrier makes sure the PE is updated diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c index 896e2df9400fd2f3b9fa6c238cd70e68f69edf0a..fd33a3b9c66f6248ae831972b6111ed6e769f310 100644 --- a/drivers/misc/pci_endpoint_test.c +++ b/drivers/misc/pci_endpoint_test.c @@ -662,6 +662,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, data = (struct pci_endpoint_test_data *)ent->driver_data; if (data) { test_reg_bar = data->test_reg_bar; + test->test_reg_bar = test_reg_bar; test->alignment = data->alignment; irq_type = data->irq_type; } diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c index 93be82fc338ad8b2c3e454dad1e1491f20d47c1c..16df731e63c5691be19850a32295759474122c88 100644 --- a/drivers/misc/sgi-gru/grufault.c +++ b/drivers/misc/sgi-gru/grufault.c @@ -661,6 +661,7 @@ int gru_handle_user_call_os(unsigned long cb) if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB) return -EINVAL; +again: gts = gru_find_lock_gts(cb); if (!gts) return -EINVAL; @@ -669,7 +670,11 @@ int gru_handle_user_call_os(unsigned long cb) if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) goto exit; - gru_check_context_placement(gts); + if (gru_check_context_placement(gts)) { + gru_unlock_gts(gts); + gru_unload_context(gts, 1); + goto again; + } /* * CCH may contain stale data if ts_force_cch_reload is set. @@ -887,7 +892,11 @@ int gru_set_context_option(unsigned long arg) } else { gts->ts_user_blade_id = req.val1; gts->ts_user_chiplet_id = req.val0; - gru_check_context_placement(gts); + if (gru_check_context_placement(gts)) { + gru_unlock_gts(gts); + gru_unload_context(gts, 1); + return ret; + } } break; case sco_gseg_owner: diff --git a/drivers/misc/sgi-gru/grukdump.c b/drivers/misc/sgi-gru/grukdump.c index 313da31502626897a61a65606aa117d0d67dc83b..1540a7785e14743ae1b035aeb21d391af8516050 100644 --- a/drivers/misc/sgi-gru/grukdump.c +++ b/drivers/misc/sgi-gru/grukdump.c @@ -27,6 +27,9 @@ #include #include #include + +#include + #include "gru.h" #include "grutables.h" #include "gruhandles.h" @@ -196,6 +199,7 @@ int gru_dump_chiplet_request(unsigned long arg) /* Currently, only dump by gid is implemented */ if (req.gid >= gru_max_gids) return -EINVAL; + req.gid = array_index_nospec(req.gid, gru_max_gids); gru = GID_TO_GRU(req.gid); ubuf = req.buf; diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c index ab174f28e3befe9e19efcb3aa5b3f60e3e386e6e..8c3e0317c115b8d70de9dfb12e77486ec7608e7f 100644 --- a/drivers/misc/sgi-gru/grumain.c +++ b/drivers/misc/sgi-gru/grumain.c @@ -729,9 +729,10 @@ static int gru_check_chiplet_assignment(struct gru_state *gru, * chiplet. Misassignment can occur if the process migrates to a different * blade or if the user changes the selected blade/chiplet. */ -void gru_check_context_placement(struct gru_thread_state *gts) +int gru_check_context_placement(struct gru_thread_state *gts) { struct gru_state *gru; + int ret = 0; /* * If the current task is the context owner, verify that the @@ -739,15 +740,23 @@ void gru_check_context_placement(struct gru_thread_state *gts) * references. Pthread apps use non-owner references to the CBRs. */ gru = gts->ts_gru; + /* + * If gru or gts->ts_tgid_owner isn't initialized properly, return + * success to indicate that the caller does not need to unload the + * gru context.The caller is responsible for their inspection and + * reinitialization if needed. + */ if (!gru || gts->ts_tgid_owner != current->tgid) - return; + return ret; if (!gru_check_chiplet_assignment(gru, gts)) { STAT(check_context_unload); - gru_unload_context(gts, 1); + ret = -EINVAL; } else if (gru_retarget_intr(gts)) { STAT(check_context_retarget_intr); } + + return ret; } @@ -947,7 +956,12 @@ vm_fault_t gru_fault(struct vm_fault *vmf) mutex_lock(>s->ts_ctxlock); preempt_disable(); - gru_check_context_placement(gts); + if (gru_check_context_placement(gts)) { + preempt_enable(); + mutex_unlock(>s->ts_ctxlock); + gru_unload_context(gts, 1); + return VM_FAULT_NOPAGE; + } if (!gts->ts_gru) { STAT(load_user_context); diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h index 3e041b6f7a6833b0546acaad20b1af8801317223..2becf4c3f7cabb2384eea0333d46586e62fa1188 100644 --- a/drivers/misc/sgi-gru/grutables.h +++ b/drivers/misc/sgi-gru/grutables.h @@ -652,7 +652,7 @@ extern int gru_user_flush_tlb(unsigned long arg); extern int gru_user_unload_context(unsigned long arg); extern int gru_get_exception_detail(unsigned long arg); extern int gru_set_context_option(unsigned long address); -extern void gru_check_context_placement(struct gru_thread_state *gts); +extern int gru_check_context_placement(struct gru_thread_state *gts); extern int gru_cpu_fault_map_id(void); extern struct vm_area_struct *gru_find_vma(unsigned long vaddr); extern void gru_flush_all_tlb(struct gru_state *gru); diff --git a/drivers/misc/uacce/Kconfig b/drivers/misc/uacce/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..f0d02be5a057a7ef539f6fa103f632662b939988 --- /dev/null +++ b/drivers/misc/uacce/Kconfig @@ -0,0 +1,14 @@ +menuconfig UACCE + tristate "Accelerator Framework for User Land" + depends on IOMMU_API + select ANON_INODES + help + UACCE provides interface for the user process to access the hardware + without interaction with the kernel space in data path. + + The user-space interface is described in + include/uapi/misc/uacce/uacce.h + + See Documentation/misc-devices/uacce.rst for more details. + + If you don't know what to do here, say N. diff --git a/drivers/misc/uacce/Makefile b/drivers/misc/uacce/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..5b4374e8b5f213e2ef7441c969b4982a51fb89ee --- /dev/null +++ b/drivers/misc/uacce/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +obj-$(CONFIG_UACCE) += uacce.o diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c new file mode 100644 index 0000000000000000000000000000000000000000..dc3aaae307527c398474678ae5867e8405723621 --- /dev/null +++ b/drivers/misc/uacce/uacce.c @@ -0,0 +1,1360 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2018 HiSilicon Limited. */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static struct class *uacce_class; +static DEFINE_IDR(uacce_idr); +static dev_t uacce_devt; + +static DECLARE_RWSEM(uacce_qs_lock); +#define uacce_qs_rlock() down_read(&uacce_qs_lock) +#define uacce_qs_runlock() up_read(&uacce_qs_lock) +#define uacce_qs_wlock() down_write(&uacce_qs_lock) +#define uacce_qs_wunlock() up_write(&uacce_qs_lock) + +#define UACCE_RESET_DELAY_MS 10 +#define UACCE_FROM_CDEV_ATTR(dev) container_of(dev, struct uacce, dev) + +static const struct file_operations uacce_fops; +static void uacce_put_queue(struct uacce_queue *q); + +/* match with enum uacce_qfrt */ +static const char *const qfrt_str[] = { + "mmio", + "dus", + "ss", + "invalid" +}; + +static int cdev_get(struct device *dev, void *data) +{ + struct uacce *uacce; + struct device **t_dev = data; + + uacce = UACCE_FROM_CDEV_ATTR(dev); + if (uacce->pdev == *t_dev) { + *t_dev = dev; + return 1; + } + + return 0; +} + +/** + * dev_to_uacce - Get structure uacce from its device + * @dev the device + */ +struct uacce *dev_to_uacce(struct device *dev) +{ + struct device **tdev = &dev; + int ret; + + ret = class_for_each_device(uacce_class, NULL, tdev, cdev_get); + if (ret) { + dev = *tdev; + return UACCE_FROM_CDEV_ATTR(dev); + } + return NULL; +} +EXPORT_SYMBOL_GPL(dev_to_uacce); + +/** + * uacce_hw_err_isolate - Try to isolate the uacce device with its VFs + * according to user's configuration of isolation strategy. Warning: this + * API should be called while there is no user on the device, or the users + * on this device are suspended by slot resetting preparation of PCI AER. + * @uacce the uacce device + */ +int uacce_hw_err_isolate(struct uacce *uacce) +{ + struct uacce_hw_err *err, *tmp, *hw_err; + struct uacce_err_isolate *isolate; + u32 count = 0; + + if (!uacce) + return -EINVAL; + isolate = uacce->isolate; + +#define SECONDS_PER_HOUR 3600 + + /* all the hw errs are processed by PF driver */ + if (uacce->is_vf || atomic_read(&isolate->is_isolate) || + !isolate->hw_err_isolate_hz) + return 0; + + hw_err = kzalloc(sizeof(*hw_err), GFP_ATOMIC); + if (!hw_err) + return -ENOMEM; + hw_err->tick_stamp = jiffies; + list_for_each_entry_safe(err, tmp, &isolate->hw_errs, list) { + if ((hw_err->tick_stamp - err->tick_stamp) / HZ > + SECONDS_PER_HOUR) { + list_del(&err->list); + kfree(err); + } else { + count++; + } + } + list_add(&hw_err->list, &isolate->hw_errs); + + if (count >= isolate->hw_err_isolate_hz) + atomic_set(&isolate->is_isolate, 1); + + return 0; +} +EXPORT_SYMBOL_GPL(uacce_hw_err_isolate); + +static void uacce_hw_err_destroy(struct uacce *uacce) +{ + struct uacce_hw_err *err, *tmp; + + list_for_each_entry_safe(err, tmp, &uacce->isolate_data.hw_errs, list) { + list_del(&err->list); + kfree(err); + } +} + +static const char *uacce_qfrt_str(struct uacce_qfile_region *qfr) +{ + return qfrt_str[qfr->type]; +} + +/** + * uacce_wake_up - Wake up the process who is waiting this queue + * @q the accelerator queue to wake up + */ +void uacce_wake_up(struct uacce_queue *q) +{ + if (!q) + return; + + wake_up_interruptible(&q->wait); +} +EXPORT_SYMBOL_GPL(uacce_wake_up); + +static bool uacce_q_avail_ioctl(struct uacce_queue *q, unsigned int cmd) +{ + enum uacce_q_state state = q->state; + bool avail = false; + + switch (state) { + case UACCE_Q_INIT: + switch (cmd) { + case UACCE_CMD_SHARE_SVAS: + case UACCE_CMD_GET_SS_DMA: + case UACCE_CMD_PUT_Q: + avail = true; + break; + case UACCE_CMD_START: + if (q->qfrs[UACCE_QFRT_MMIO] && + q->qfrs[UACCE_QFRT_DUS]) + avail = true; + break; + /* acc specific ioctl */ + default: + avail = true; + } + break; + case UACCE_Q_STARTED: + switch (cmd) { + case UACCE_CMD_SHARE_SVAS: + case UACCE_CMD_GET_SS_DMA: + case UACCE_CMD_PUT_Q: + avail = true; + break; + case UACCE_CMD_START: + break; + default: + avail = true; + } + break; + case UACCE_Q_ZOMBIE: + break; + default: + break; + } + + return avail; +} + +static bool uacce_q_avail_mmap(struct uacce_queue *q, unsigned int type) +{ + enum uacce_q_state state = q->state; + bool avail = false; + + switch (state) { + case UACCE_Q_INIT: + avail = true; + break; + case UACCE_Q_STARTED: + switch (type) { + /* fix me: ss map should be done before start queue */ + case UACCE_QFRT_SS: + avail = true; + break; + case UACCE_QFRT_MMIO: + case UACCE_QFRT_DUS: + default: + break; + } + break; + case UACCE_Q_ZOMBIE: + break; + default: + break; + } + + return avail; +} + +static void uacce_free_dma_buffers(struct uacce_queue *q) +{ + struct uacce_qfile_region *qfr = q->qfrs[UACCE_QFRT_SS]; + struct uacce *uacce = q->uacce; + int i = 0; + + if (!qfr->dma_list) + return; + while (i < qfr->dma_list[0].total_num) { + WARN_ON(!qfr->dma_list[i].size || !qfr->dma_list[i].dma); + dev_dbg(uacce->pdev, "free dma qfr %s (index = %d)\n", + uacce_qfrt_str(qfr), i); + dma_free_coherent(uacce->pdev, qfr->dma_list[i].size, + qfr->dma_list[i].kaddr, + qfr->dma_list[i].dma); + i++; + } + kfree(qfr->dma_list); + qfr->dma_list = NULL; +} + +static int get_sort_base(struct uacce_dma_slice *list, int low, int high, + struct uacce_dma_slice *tmp) +{ + tmp->kaddr = list[low].kaddr; + tmp->size = list[low].size; + tmp->dma = list[low].dma; + + if (low >= high) + return -EINVAL; + while (low < high) { + while (low < high && list[high].dma > tmp->dma) + high--; + list[low].kaddr = list[high].kaddr; + list[low].dma = list[high].dma; + list[low].size = list[high].size; + while (low < high && list[low].dma < tmp->dma) + low++; + list[high].kaddr = list[low].kaddr; + list[high].dma = list[low].dma; + list[high].size = list[low].size; + } + list[low].kaddr = tmp->kaddr; + list[low].dma = tmp->dma; + list[low].size = tmp->size; + + return low; +} + +static void uacce_sort_dma_buffers(struct uacce_dma_slice *list, int low, + int high, struct uacce_dma_slice *tmp) +{ + int pilot, top = 0; + int *idx_list; + + idx_list = kcalloc(list[0].total_num, sizeof(int), + GFP_KERNEL | __GFP_ZERO); + if (!idx_list) + return; + + pilot = get_sort_base(list, low, high, tmp); + if (pilot < 0) { + kfree(idx_list); + return; + } + if (pilot > low + 1) { + idx_list[top++] = low; + idx_list[top++] = pilot - 1; + } + if (pilot < high - 1) { + idx_list[top++] = pilot + 1; + idx_list[top++] = high; + } + while (top > 0) { + high = idx_list[--top]; + low = idx_list[--top]; + pilot = get_sort_base(list, low, high, tmp); + if (pilot > low + 1) { + idx_list[top++] = low; + idx_list[top++] = pilot - 1; + } + if (pilot < high - 1) { + idx_list[top++] = pilot + 1; + idx_list[top++] = high; + } + } + + kfree(idx_list); +} + +static int uacce_alloc_dma_buffers(struct uacce_queue *q, + struct vm_area_struct *vma) +{ + struct uacce_qfile_region *qfr = q->qfrs[UACCE_QFRT_SS]; + unsigned long size = vma->vm_end - vma->vm_start; + unsigned long max_size = PAGE_SIZE << (MAX_ORDER - 1); + unsigned long start = vma->vm_start; + struct uacce *uacce = q->uacce; + struct uacce_dma_slice *slice; + unsigned long ss_num; + int i; + + /* Set maximum slice size is 128MB */ + if (max_size > UACCE_GRAN_NUM_MASK << UACCE_GRAN_SHIFT) + max_size = (UACCE_GRAN_NUM_MASK + 1) << (UACCE_GRAN_SHIFT - 1); + + ss_num = size / max_size + (size % max_size ? 1 : 0); + slice = kcalloc(ss_num + 1, sizeof(*slice), GFP_KERNEL | __GFP_ZERO); + if (!slice) + return -ENOMEM; + + qfr->dma_list = slice; + for (i = 0; i < ss_num; i++) { + if (start + max_size > vma->vm_end) + size = vma->vm_end - start; + else + size = max_size; + dev_dbg(uacce->pdev, "allocate dma %ld pages\n", + (size + PAGE_SIZE - 1) >> PAGE_SHIFT); + slice[i].kaddr = dma_alloc_coherent(uacce->pdev, (size + + PAGE_SIZE - 1) & PAGE_MASK, + &slice[i].dma, GFP_KERNEL); + if (!slice[i].kaddr) { + dev_err(uacce->pdev, "alloc dma slice(sz=%ld) fail!\n", + size); + slice[0].total_num = i; + return -ENOMEM; + } + slice[i].size = (size + PAGE_SIZE - 1) & PAGE_MASK; + slice[i].total_num = ss_num; + start += size; + } + + uacce_sort_dma_buffers(slice, 0, slice[0].total_num - 1, + &slice[ss_num]); + + return 0; +} + +static int uacce_mmap_dma_buffers(struct uacce_queue *q, + struct vm_area_struct *vma) +{ + struct uacce_qfile_region *qfr = q->qfrs[UACCE_QFRT_SS]; + struct uacce_dma_slice *slice = qfr->dma_list; + struct uacce *uacce = q->uacce; + unsigned long vm_pgoff; + int ret = 0; + int i = 0; + + /* + * dma_mmap_coherent() requires vm_pgoff as 0 + * restore vm_pfoff to initial value for mmap() + */ + vm_pgoff = vma->vm_pgoff; + vma->vm_pgoff = 0; + while (i < slice[0].total_num && slice[i].size) { + vma->vm_end = vma->vm_start + slice[i].size; + ret = dma_mmap_coherent(uacce->pdev, vma, slice[i].kaddr, + slice[i].dma, + slice[i].size); + if (ret) { + dev_err(uacce->pdev, + "mmap dma buf fail(dma index=%d,size=0x%x)!\n", + i, slice[i].size); + goto DMA_MMAP_FAIL; + } + + i++; + vma->vm_start = vma->vm_end; + } + + /* System unmap_region will clean the results, we need do nothing */ +DMA_MMAP_FAIL: + vma->vm_pgoff = vm_pgoff; + vma->vm_start = qfr->iova; + vma->vm_end = vma->vm_start + (qfr->nr_pages << PAGE_SHIFT); + + return ret; +} + +static int uacce_mmap_region(u32 flags, struct uacce_queue *q, + struct vm_area_struct *vma, + struct uacce_qfile_region *qfr) +{ + struct uacce *uacce = q->uacce; + int ret = 0; + + if (flags & UACCE_QFRF_SELFMT) + return uacce->ops->mmap(q, vma, qfr); + + /* mmap to user space */ + if (flags & UACCE_QFRF_MMAP) { + if (flags & UACCE_QFRF_DMA) + ret = uacce_mmap_dma_buffers(q, vma); + } + + return ret; +} + +static struct +uacce_qfile_region *uacce_create_region(struct uacce_queue *q, + struct vm_area_struct *vma, + enum uacce_qfrt type, u32 flags) +{ + struct uacce_qfile_region *qfr; + struct uacce *uacce = q->uacce; + int ret = -ENOMEM; + + qfr = kzalloc(sizeof(*qfr), GFP_ATOMIC); + if (!qfr) + return ERR_PTR(ret); + + qfr->type = type; + qfr->flags = flags; + qfr->iova = vma->vm_start; + qfr->nr_pages = vma_pages(vma); + q->qfrs[type] = qfr; + + if (vma->vm_flags & VM_READ) + qfr->prot |= IOMMU_READ; + + if (vma->vm_flags & VM_WRITE) + qfr->prot |= IOMMU_WRITE; + + /* allocate memory */ + if (flags & UACCE_QFRF_DMA) { + ret = uacce_alloc_dma_buffers(q, vma); + if (ret) { + uacce_free_dma_buffers(q); + goto err_with_qfr; + } + } + ret = uacce_mmap_region(flags, q, vma, qfr); + if (ret) { + dev_err(uacce->pdev, "uacce mmap region fail!\n"); + goto err_with_pages; + } + + return qfr; + +err_with_pages: + if (flags & UACCE_QFRF_DMA) + uacce_free_dma_buffers(q); +err_with_qfr: + kfree(qfr); + q->qfrs[type] = NULL; + return ERR_PTR(ret); +} + +static struct uacce_qfile_region noiommu_ss_default_qfr = { + .type = UACCE_QFRT_SS, + .flags = UACCE_QFRF_DMA, +}; + +/* we assume you have uacce_queue_unmap_qfr(q, qfr) from all related queues */ +static void uacce_destroy_region(struct uacce_queue *q, + struct uacce_qfile_region *qfr) +{ + if (qfr->flags & UACCE_QFRF_DMA) + uacce_free_dma_buffers(q); + + if (qfr != &noiommu_ss_default_qfr) + kfree(qfr); +} + +static long uacce_cmd_share_qfr(struct uacce_queue *src, int fd) +{ + struct device *dev = &src->uacce->dev; + struct file *filep = fget(fd); + struct uacce_queue *tgt; + int ret = -EINVAL; + + if (!filep) { + dev_err(dev, "filep is NULL!\n"); + return ret; + } + + if (filep->f_op != &uacce_fops) { + dev_err(dev, "file ops mismatch!\n"); + goto out_with_fd; + } + + tgt = filep->private_data; + if (!tgt) { + dev_err(dev, "target queue is not exist!\n"); + goto out_with_fd; + } + + /* no SVA is needed if the dev can do fault-from-dev */ + if (tgt->uacce->flags & UACCE_DEV_FAULT_FROM_DEV) { + dev_err(dev, "No need to share in SVA device\n"); + goto out_with_fd; + } + + dev_dbg(&src->uacce->dev, "share ss with %s\n", + dev_name(&tgt->uacce->dev)); + + if (!src->qfrs[UACCE_QFRT_SS] || tgt->qfrs[UACCE_QFRT_SS]) { + dev_err(dev, "src q's SS not exists or target q's SS exists!\n"); + goto out_with_fd; + } + + /* In No-IOMMU mode, taget queue uses default SS qfr */ + if (src->qfrs[UACCE_QFRT_SS]->flags & UACCE_QFRF_DMA) { + tgt->qfrs[UACCE_QFRT_SS] = &noiommu_ss_default_qfr; + } else { + tgt->qfrs[UACCE_QFRT_SS] = src->qfrs[UACCE_QFRT_SS]; + list_add(&tgt->list, &src->qfrs[UACCE_QFRT_SS]->qs); + } + ret = 0; + +out_with_fd: + fput(filep); + return ret; +} + +static int uacce_start_queue(struct uacce_queue *q) +{ + struct device *dev = &q->uacce->dev; + int ret; + + ret = q->uacce->ops->start_queue(q); + if (ret < 0) { + dev_err(dev, "uacce fails to start queue!\n"); + return ret; + } + + dev_dbg(&q->uacce->dev, "uacce queue state switch to STARTED\n"); + q->state = UACCE_Q_STARTED; + + return 0; +} + +static long uacce_get_ss_dma(struct uacce_queue *q, void __user *arg) +{ + struct uacce *uacce = q->uacce; + struct uacce_dma_slice *slice; + unsigned long slice_idx = 0; + unsigned long dma, size; + long ret = -EFAULT; + + if (!(uacce->flags & UACCE_DEV_NOIOMMU) || !q->qfrs[UACCE_QFRT_SS]) { + dev_err(&uacce->dev, "no ss dma region!\n"); + return -EINVAL; + } + + slice = q->qfrs[UACCE_QFRT_SS]->dma_list; + if (copy_from_user(&slice_idx, arg, sizeof(unsigned long))) + return ret; + + if (slice[0].total_num - 1 < slice_idx) { + dev_err(&uacce->dev, "no ss slice idx %ld err!\n", slice_idx); + return -EINVAL; + } + dma = slice[slice_idx].dma; + size = slice[slice_idx].size; + if (!dma || !size) { + dev_err(&uacce->dev, "%ldth ss region no exist!\n", slice_idx); + return -ENODEV; + } + dma = dma | (size >> UACCE_GRAN_SHIFT); + if (copy_to_user(arg, &dma, sizeof(unsigned long))) + return ret; + + return (long)(slice[0].total_num - 1 - slice_idx); +} + +static long uacce_fops_unl_ioctl(struct file *filep, + unsigned int cmd, unsigned long arg) +{ + struct uacce_queue *q; + struct uacce *uacce; + long ret = 0; + + uacce_qs_wlock(); + + if (unlikely(!filep->private_data)) { + uacce_qs_wunlock(); + return -EBADF; + } + q = filep->private_data; + uacce = q->uacce; + + if (!uacce_q_avail_ioctl(q, cmd)) { + uacce_qs_wunlock(); + return -EINVAL; + } + + switch (cmd) { + case UACCE_CMD_SHARE_SVAS: + ret = uacce_cmd_share_qfr(q, (int)arg); + break; + case UACCE_CMD_START: + ret = uacce_start_queue(q); + break; + case UACCE_CMD_GET_SS_DMA: + uacce_qs_wunlock(); + ret = uacce_get_ss_dma(q, (void __user *)arg); + return ret; + case UACCE_CMD_PUT_Q: + uacce_put_queue(q); + break; + default: + uacce_qs_wunlock(); + if (uacce->ops->ioctl) + /* This is not protected by uacce_qs_lock */ + return uacce->ops->ioctl(q, cmd, arg); + + dev_err(&uacce->dev, "ioctl cmd (%d) is not supported!\n", cmd); + return -EINVAL; + } + + uacce_qs_wunlock(); + + return ret; +} + +#ifdef CONFIG_COMPAT +static long uacce_fops_compat_ioctl(struct file *filep, + unsigned int cmd, unsigned long arg) +{ + arg = (unsigned long)compat_ptr(arg); + return uacce_fops_unl_ioctl(filep, cmd, arg); +} +#endif + +static int uacce_dev_open_check(struct uacce *uacce) +{ + if (uacce->flags & UACCE_DEV_NOIOMMU) + return 0; + + /* + * The device can be opened once if it dose not support multiple page + * table. The better way to check this is counting it per iommu_domain, + * this is just a temporary solution + */ + if (uacce->flags & (UACCE_DEV_PASID | UACCE_DEV_NOIOMMU)) + return 0; + + if (!atomic_read(&uacce->ref)) + return 0; + + dev_info(&uacce->dev, "this device can be openned only once\n"); + return -EBUSY; +} + +static void uacce_queue_drain(struct uacce_queue *q) +{ + struct uacce_qfile_region *qfr; + bool is_to_free_region; + struct uacce *uacce; + int state; + int i; + + uacce = q->uacce; + + state = (q->state == UACCE_Q_INIT || q->state == UACCE_Q_STARTED) ? 1 : + 0; + if (state && uacce->ops->stop_queue) + uacce->ops->stop_queue(q); + + for (i = 0; i < UACCE_QFRT_MAX; i++) { + qfr = q->qfrs[i]; + if (!qfr) + continue; + + is_to_free_region = false; + + if (i == UACCE_QFRT_SS && !(qfr->flags & UACCE_QFRF_DMA)) { + list_del(&q->list); + if (list_empty(&qfr->qs)) + is_to_free_region = true; + } else + is_to_free_region = true; + + if (is_to_free_region) + uacce_destroy_region(q, qfr); + } + + if (state && uacce->ops->put_queue) + uacce->ops->put_queue(q); + + /* + * Put_queue above just put hardware queue, but not free uacce_q. + * + * Put_queue(and stop_queue) is used to support UACCE_PUT_QUEUE + * ioctl, UACCE_PUT_QUEUE is defined only to put low level hardware + * queue, after UACCE_PUT_QUEUE ioctl, uacce_queue enters into zombie + * state. So uacce_queue can only be freed here. + */ + kfree(q); + atomic_dec(&uacce->ref); +} + +/* + * While user space releases a queue, all the relatives on the queue + * should be released imediately by this putting. + */ +static void uacce_put_queue(struct uacce_queue *q) +{ + struct uacce *uacce = q->uacce; + + /* + * To do: we should vm_munmap mmio and dus regions, currently we munmap + * mmio and dus region before put queue. + */ + if (uacce->ops->stop_queue) + uacce->ops->stop_queue(q); + + if (uacce->ops->put_queue) + uacce->ops->put_queue(q); + + q->state = UACCE_Q_ZOMBIE; + q->filep->private_data = NULL; + uacce_queue_drain(q); + + if (!uacce->pdev->driver) { + dev_warn(uacce->pdev, "the parent device is hot plugged!\n"); + return; + } + + if (module_refcount(uacce->pdev->driver->owner) > 0) + module_put(uacce->pdev->driver->owner); +} + +static int uacce_get_queue(struct uacce *uacce, struct file *filep) +{ + struct uacce_queue *q; + int ret; + int pasid = 0; + + uacce_qs_wlock(); + + ret = uacce->ops->get_queue(uacce, pasid, &q); + if (ret < 0) { + uacce_qs_wunlock(); + goto err_unbind; + } + q->pasid = pasid; + q->uacce = uacce; + q->mm = current->mm; + q->filep = filep; + memset(q->qfrs, 0, sizeof(q->qfrs)); + INIT_LIST_HEAD(&q->list); + init_waitqueue_head(&q->wait); + q->state = UACCE_Q_INIT; + filep->private_data = q; + atomic_inc(&uacce->ref); + + uacce_qs_wunlock(); + + return 0; + +err_unbind: + + return ret; +} + +static int uacce_fops_open(struct inode *inode, struct file *filep) +{ + struct uacce *uacce; + int ret; + + uacce = idr_find(&uacce_idr, iminor(inode)); + if (!uacce) { + pr_err("fail to find uacce device!\n"); + return -ENODEV; + } + + if (!uacce->ops->get_queue) { + dev_err(uacce->pdev, "uacce driver get_queue is NULL!\n"); + return -EINVAL; + } + + ret = uacce_dev_open_check(uacce); + if (ret) + return ret; + + ret = uacce_get_queue(uacce, filep); + if (ret) { + dev_err(uacce->pdev, "uacce get queue fail!\n"); + return ret; + } + + if (!try_module_get(uacce->pdev->driver->owner)) { + uacce_put_queue(filep->private_data); + dev_err(uacce->pdev, "uacce try to get module(%s) fail!\n", + uacce->pdev->driver->name); + return -ENODEV; + } + + return 0; +} + +static int uacce_fops_release(struct inode *inode, struct file *filep) +{ + struct uacce_queue *q; + struct uacce *uacce; + + uacce_qs_wlock(); + + q = filep->private_data; + if (q) { + uacce = q->uacce; + /* + * As user space exception(without release queue), it will + * fall into this logic as the task exits to prevent hardware + * resources leaking. + */ + uacce_queue_drain(q); + filep->private_data = NULL; + } + + uacce_qs_wunlock(); + + if (q) + module_put(uacce->pdev->driver->owner); + + return 0; +} + +static enum uacce_qfrt uacce_get_region_type(struct uacce *uacce, + struct vm_area_struct *vma) +{ + enum uacce_qfrt type = UACCE_QFRT_MAX; + size_t next_start = UACCE_QFR_NA; + int i; + + for (i = UACCE_QFRT_MAX - 1; i >= 0; i--) { + if (vma->vm_pgoff >= uacce->qf_pg_start[i]) { + type = i; + break; + } + } + + switch (type) { + case UACCE_QFRT_MMIO: + if (!uacce->ops->mmap) { + dev_err(&uacce->dev, "no driver mmap!\n"); + return UACCE_QFRT_INVALID; + } + break; + + case UACCE_QFRT_DUS: + break; + + case UACCE_QFRT_SS: + + /* todo: this can be valid to protect the process space */ + if (uacce->flags & UACCE_DEV_FAULT_FROM_DEV) { + dev_err(&uacce->dev, "no SS in SVA mode!\n"); + return UACCE_QFRT_INVALID; + } + break; + + default: + dev_err(&uacce->dev, "uacce invalid type(%d)!\n", type); + return UACCE_QFRT_INVALID; + } + + /* make sure the mapping size is exactly the same as the region */ + if (type < UACCE_QFRT_SS) { + for (i = type + 1; i < UACCE_QFRT_MAX; i++) + if (uacce->qf_pg_start[i] != UACCE_QFR_NA) { + next_start = uacce->qf_pg_start[i]; + break; + } + + if (next_start == UACCE_QFR_NA) { + dev_err(&uacce->dev, "uacce config error. make sure setting SS offset properly\n"); + return UACCE_QFRT_INVALID; + } + + if (vma_pages(vma) != + next_start - uacce->qf_pg_start[type]) { + dev_err(&uacce->dev, "invalid mmap size, (%ld vs %ld pages) for region %s.\n", + vma_pages(vma), + next_start - uacce->qf_pg_start[type], + qfrt_str[type]); + return UACCE_QFRT_INVALID; + } + } + + return type; +} + +static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma) +{ + struct uacce_qfile_region *qfr; + struct uacce_queue *q; + struct uacce *uacce; + enum uacce_qfrt type; + unsigned int flags = 0; + int ret; + + vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; + + uacce_qs_wlock(); + + if (unlikely(!filep->private_data)) { + uacce_qs_wunlock(); + return -EBADF; + } + q = filep->private_data; + uacce = q->uacce; + type = uacce_get_region_type(uacce, vma); + + dev_dbg(&uacce->dev, "mmap q file(t=%s, off=%lx, start=%pK, end=%pK)\n", + qfrt_str[type], vma->vm_pgoff, + (void *)vma->vm_start, (void *)vma->vm_end); + + if (type == UACCE_QFRT_INVALID) { + ret = -EINVAL; + goto out_with_lock; + } + + /* this type of qfr has mapped already */ + if (q->qfrs[type]) { + dev_err(uacce->pdev, "%d type qfr is existing!\n", type); + ret = -EEXIST; + goto out_with_lock; + } + + if (!uacce_q_avail_mmap(q, type)) { + ret = -EINVAL; + goto out_with_lock; + } + + switch (type) { + case UACCE_QFRT_MMIO: + case UACCE_QFRT_DUS: + flags = UACCE_QFRF_SELFMT; + break; + case UACCE_QFRT_SS: + flags = UACCE_QFRF_MMAP | UACCE_QFRF_DMA; + break; + default: + WARN_ON(&uacce->dev); + break; + } + + qfr = uacce_create_region(q, vma, type, flags); + if (IS_ERR(qfr)) { + ret = PTR_ERR(qfr); + goto out_with_lock; + } + + uacce_qs_wunlock(); + + return 0; + +out_with_lock: + uacce_qs_wunlock(); + return ret; +} + +static __poll_t uacce_fops_poll(struct file *file, poll_table *wait) +{ + struct uacce_queue *q; + struct uacce *uacce; + __poll_t ret = 0; + + uacce_qs_wlock(); + + if (unlikely(!file->private_data)) { + uacce_qs_wunlock(); + ret = EPOLLERR; + return ret; + } + q = file->private_data; + uacce = q->uacce; + + poll_wait(file, &q->wait, wait); + if (uacce->ops->is_q_updated && uacce->ops->is_q_updated(q)) + ret = EPOLLIN | EPOLLRDNORM; + + uacce_qs_wunlock(); + + return ret; +} + +static const struct file_operations uacce_fops = { + .owner = THIS_MODULE, + .open = uacce_fops_open, + .release = uacce_fops_release, + .unlocked_ioctl = uacce_fops_unl_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = uacce_fops_compat_ioctl, +#endif + .mmap = uacce_fops_mmap, + .poll = uacce_fops_poll, +}; + +static ssize_t id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct uacce *uacce = UACCE_FROM_CDEV_ATTR(dev); + + return sprintf(buf, "%d\n", uacce->dev_id); +} +static DEVICE_ATTR_RO(id); + +static ssize_t api_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct uacce *uacce = UACCE_FROM_CDEV_ATTR(dev); + + return sprintf(buf, "%s\n", uacce->api_ver); +} +static DEVICE_ATTR_RO(api); + +static ssize_t numa_distance_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct uacce *uacce = UACCE_FROM_CDEV_ATTR(dev); + int distance = 0; + +#ifdef CONFIG_NUMA + distance = node_distance(uacce->pdev->numa_node, + cpu_to_node(smp_processor_id())); +#endif + return sprintf(buf, "%d\n", distance); +} +static DEVICE_ATTR_RO(numa_distance); + +static ssize_t node_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct uacce *uacce = UACCE_FROM_CDEV_ATTR(dev); + int node_id = -1; + +#ifdef CONFIG_NUMA + node_id = uacce->pdev->numa_node; +#endif + return sprintf(buf, "%d\n", node_id); +} +static DEVICE_ATTR_RO(node_id); + +static ssize_t flags_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct uacce *uacce = UACCE_FROM_CDEV_ATTR(dev); + + return sprintf(buf, "%d\n", uacce->flags); +} +static DEVICE_ATTR_RO(flags); + +static ssize_t available_instances_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct uacce *uacce = UACCE_FROM_CDEV_ATTR(dev); + + return sprintf(buf, "%d\n", uacce->ops->get_available_instances(uacce)); +} +static DEVICE_ATTR_RO(available_instances); + +static ssize_t algorithms_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct uacce *uacce = UACCE_FROM_CDEV_ATTR(dev); + + return sprintf(buf, "%s", uacce->algs); +} +static DEVICE_ATTR_RO(algorithms); + +static ssize_t qfrs_offset_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct uacce *uacce = UACCE_FROM_CDEV_ATTR(dev); + int i, ret; + unsigned long offset; + + for (i = 0, ret = 0; i < UACCE_QFRT_MAX; i++) { + offset = uacce->qf_pg_start[i]; + if (offset != UACCE_QFR_NA) + offset = offset << PAGE_SHIFT; + if (i == UACCE_QFRT_SS) + break; + ret += sprintf(buf + ret, "%lu\t", offset); + } + ret += sprintf(buf + ret, "%lu\n", offset); + + return ret; +} + +static DEVICE_ATTR_RO(qfrs_offset); + +static ssize_t isolate_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct uacce *uacce = UACCE_FROM_CDEV_ATTR(dev); + + return sprintf(buf, "%d\n", atomic_read(&uacce->isolate->is_isolate)); +} +static DEVICE_ATTR_RO(isolate); + +static ssize_t isolate_strategy_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct uacce *uacce = UACCE_FROM_CDEV_ATTR(dev); + + return sprintf(buf, "%u\n", uacce->isolate->hw_err_isolate_hz); +} + +static ssize_t isolate_strategy_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct uacce *uacce = UACCE_FROM_CDEV_ATTR(dev); + unsigned long val = 0; + +#define MAX_ISOLATE_STRATEGY 65535 + + /* must be set by PF */ + if (uacce->is_vf) + return -EINVAL; + + if (kstrtoul(buf, 0, &val) < 0) + return -EINVAL; + + if (val > MAX_ISOLATE_STRATEGY) + return -EINVAL; + + if (atomic_read(&uacce->ref)) + return -EBUSY; + + uacce->isolate->hw_err_isolate_hz = val; + + return count; +} +static DEVICE_ATTR_RW(isolate_strategy); + +static ssize_t dev_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct uacce *uacce = UACCE_FROM_CDEV_ATTR(dev); + + return sprintf(buf, "%d\n", uacce->ops->get_dev_state(uacce)); +} +static DEVICE_ATTR_RO(dev_state); + +static struct attribute *uacce_dev_attrs[] = { + &dev_attr_id.attr, + &dev_attr_api.attr, + &dev_attr_node_id.attr, + &dev_attr_numa_distance.attr, + &dev_attr_flags.attr, + &dev_attr_available_instances.attr, + &dev_attr_algorithms.attr, + &dev_attr_qfrs_offset.attr, + &dev_attr_isolate.attr, + &dev_attr_isolate_strategy.attr, + &dev_attr_dev_state.attr, + NULL, +}; + +static const struct attribute_group uacce_dev_attr_group = { + .name = UACCE_DEV_ATTRS, + .attrs = uacce_dev_attrs, +}; + +static const struct attribute_group *uacce_dev_attr_groups[] = { + &uacce_dev_attr_group, + NULL +}; + +static void uacce_dev_release(struct device *dev) {} + +static int uacce_create_chrdev(struct uacce *uacce) +{ + int ret; + + ret = idr_alloc(&uacce_idr, uacce, 0, 0, GFP_KERNEL); + if (ret < 0) + return ret; + + cdev_init(&uacce->cdev, &uacce_fops); + uacce->dev_id = ret; + uacce->cdev.owner = THIS_MODULE; + device_initialize(&uacce->dev); + uacce->dev.devt = MKDEV(MAJOR(uacce_devt), uacce->dev_id); + uacce->dev.class = uacce_class; + uacce->dev.groups = uacce_dev_attr_groups; + uacce->dev.parent = uacce->pdev; + uacce->dev.release = uacce_dev_release; + dev_set_name(&uacce->dev, "%s-%d", uacce->drv_name, uacce->dev_id); + ret = cdev_device_add(&uacce->cdev, &uacce->dev); + if (ret) + goto err_with_idr; + + dev_dbg(&uacce->dev, "create uacce minior=%d\n", uacce->dev_id); + return 0; + +err_with_idr: + idr_remove(&uacce_idr, uacce->dev_id); + return ret; +} + +static void uacce_destroy_chrdev(struct uacce *uacce) +{ + cdev_device_del(&uacce->cdev, &uacce->dev); + put_device(&uacce->dev); + memset(&uacce->dev, 0, sizeof(struct device)); + idr_remove(&uacce_idr, uacce->dev_id); +} + +static int uacce_default_get_available_instances(struct uacce *uacce) +{ + return -1; +} + +static int uacce_default_start_queue(struct uacce_queue *q) +{ + dev_dbg(&q->uacce->dev, "fake start queue\n"); + return 0; +} + +/** + * uacce_register - register an accelerator + * @uacce: the accelerator structure + */ +int uacce_register(struct uacce *uacce) +{ + struct device *dev; + int ret; + + if (!uacce) + return -ENODEV; + + if (!uacce->pdev) { + pr_err("uacce parent device not set\n"); + return -ENODEV; + } + dev = uacce->pdev; + + if (uacce->flags & UACCE_DEV_NOIOMMU) { + add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); + dev_warn(dev, "register to noiommu mode, it's not safe for kernel\n"); + } + + /* if dev support fault-from-dev, it should support pasid */ + if ((uacce->flags & UACCE_DEV_FAULT_FROM_DEV) && + !(uacce->flags & UACCE_DEV_PASID)) { + dev_err(dev, "SVM/SVA device should support PASID\n"); + return -EINVAL; + } + + if (!uacce->ops) { + dev_err(dev, "uacce ops is null\n"); + return -EINVAL; + } + + if (!uacce->ops->start_queue) + uacce->ops->start_queue = uacce_default_start_queue; + + if (!uacce->ops->get_available_instances) + uacce->ops->get_available_instances = + uacce_default_get_available_instances; + + ret = uacce_create_chrdev(uacce); + if (ret) { + dev_err(dev, "uacce creates cdev fail!\n"); + return ret; + } + + if (uacce->flags & UACCE_DEV_PASID) + uacce->flags &= ~(UACCE_DEV_FAULT_FROM_DEV | UACCE_DEV_PASID); + + dev_dbg(&uacce->dev, "register to uacce!\n"); + atomic_set(&uacce->ref, 0); + INIT_LIST_HEAD(&uacce->isolate_data.hw_errs); + + return 0; +} +EXPORT_SYMBOL_GPL(uacce_register); + +/** + * uacce_unregister - unregisters a uacce + * @uacce: the accelerator to unregister + * + * Unregister an accelerator that wat previously successully registered with + * uacce_register(). + */ +int uacce_unregister(struct uacce *uacce) +{ + if (!uacce) + return -ENODEV; + + if (atomic_read(&uacce->ref) > 0) { + printk_ratelimited("Fail to unregister uacce, please close all uacce queues!\n"); + return -EAGAIN; + } + + uacce_hw_err_destroy(uacce); + uacce_destroy_chrdev(uacce); + + return 0; +} +EXPORT_SYMBOL_GPL(uacce_unregister); + +static int __init uacce_init(void) +{ + int ret; + + uacce_class = class_create(THIS_MODULE, UACCE_CLASS_NAME); + if (IS_ERR(uacce_class)) { + ret = PTR_ERR(uacce_class); + goto err; + } + + ret = alloc_chrdev_region(&uacce_devt, 0, MINORMASK, "uacce"); + if (ret) + goto err_with_class; + + pr_info("uacce init with major number:%d\n", MAJOR(uacce_devt)); + pr_debug("uacce debug enabled\n"); + + return 0; + +err_with_class: + class_destroy(uacce_class); +err: + return ret; +} + +static __exit void uacce_exit(void) +{ + unregister_chrdev_region(uacce_devt, MINORMASK); + class_destroy(uacce_class); + idr_destroy(&uacce_idr); +} + +subsys_initcall(uacce_init); +module_exit(uacce_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("HiSilicon Tech. Co., Ltd."); +MODULE_DESCRIPTION("Accelerator interface for Userland applications"); diff --git a/drivers/misc/vexpress-syscfg.c b/drivers/misc/vexpress-syscfg.c index 6c3591cdf8555ff00e576c33c55a2bdbce3ea61e..a3c6c773d9dc80c9e583d4db53cffb23c429d473 100644 --- a/drivers/misc/vexpress-syscfg.c +++ b/drivers/misc/vexpress-syscfg.c @@ -61,7 +61,7 @@ static int vexpress_syscfg_exec(struct vexpress_syscfg_func *func, int tries; long timeout; - if (WARN_ON(index > func->num_templates)) + if (WARN_ON(index >= func->num_templates)) return -EINVAL; command = readl(syscfg->base + SYS_CFGCTRL); diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c index 21d0fa592145c0b77fc0cad6f95d310ef8b456b7..bc089e634a751c853dcad45facc6ac7d57dd4b41 100644 --- a/drivers/misc/vmw_vmci/vmci_context.c +++ b/drivers/misc/vmw_vmci/vmci_context.c @@ -29,6 +29,9 @@ #include "vmci_driver.h" #include "vmci_event.h" +/* Use a wide upper bound for the maximum contexts. */ +#define VMCI_MAX_CONTEXTS 2000 + /* * List of current VMCI contexts. Contexts can be added by * vmci_ctx_create() and removed via vmci_ctx_destroy(). @@ -125,19 +128,22 @@ struct vmci_ctx *vmci_ctx_create(u32 cid, u32 priv_flags, /* Initialize host-specific VMCI context. */ init_waitqueue_head(&context->host_context.wait_queue); - context->queue_pair_array = vmci_handle_arr_create(0); + context->queue_pair_array = + vmci_handle_arr_create(0, VMCI_MAX_GUEST_QP_COUNT); if (!context->queue_pair_array) { error = -ENOMEM; goto err_free_ctx; } - context->doorbell_array = vmci_handle_arr_create(0); + context->doorbell_array = + vmci_handle_arr_create(0, VMCI_MAX_GUEST_DOORBELL_COUNT); if (!context->doorbell_array) { error = -ENOMEM; goto err_free_qp_array; } - context->pending_doorbell_array = vmci_handle_arr_create(0); + context->pending_doorbell_array = + vmci_handle_arr_create(0, VMCI_MAX_GUEST_DOORBELL_COUNT); if (!context->pending_doorbell_array) { error = -ENOMEM; goto err_free_db_array; @@ -212,7 +218,7 @@ static int ctx_fire_notification(u32 context_id, u32 priv_flags) * We create an array to hold the subscribers we find when * scanning through all contexts. */ - subscriber_array = vmci_handle_arr_create(0); + subscriber_array = vmci_handle_arr_create(0, VMCI_MAX_CONTEXTS); if (subscriber_array == NULL) return VMCI_ERROR_NO_MEM; @@ -631,20 +637,26 @@ int vmci_ctx_add_notification(u32 context_id, u32 remote_cid) spin_lock(&context->lock); - list_for_each_entry(n, &context->notifier_list, node) { - if (vmci_handle_is_equal(n->handle, notifier->handle)) { - exists = true; - break; + if (context->n_notifiers < VMCI_MAX_CONTEXTS) { + list_for_each_entry(n, &context->notifier_list, node) { + if (vmci_handle_is_equal(n->handle, notifier->handle)) { + exists = true; + break; + } } - } - if (exists) { - kfree(notifier); - result = VMCI_ERROR_ALREADY_EXISTS; + if (exists) { + kfree(notifier); + result = VMCI_ERROR_ALREADY_EXISTS; + } else { + list_add_tail_rcu(¬ifier->node, + &context->notifier_list); + context->n_notifiers++; + result = VMCI_SUCCESS; + } } else { - list_add_tail_rcu(¬ifier->node, &context->notifier_list); - context->n_notifiers++; - result = VMCI_SUCCESS; + kfree(notifier); + result = VMCI_ERROR_NO_MEM; } spin_unlock(&context->lock); @@ -729,8 +741,7 @@ static int vmci_ctx_get_chkpt_doorbells(struct vmci_ctx *context, u32 *buf_size, void **pbuf) { struct dbell_cpt_state *dbells; - size_t n_doorbells; - int i; + u32 i, n_doorbells; n_doorbells = vmci_handle_arr_get_size(context->doorbell_array); if (n_doorbells > 0) { @@ -868,7 +879,8 @@ int vmci_ctx_rcv_notifications_get(u32 context_id, spin_lock(&context->lock); *db_handle_array = context->pending_doorbell_array; - context->pending_doorbell_array = vmci_handle_arr_create(0); + context->pending_doorbell_array = + vmci_handle_arr_create(0, VMCI_MAX_GUEST_DOORBELL_COUNT); if (!context->pending_doorbell_array) { context->pending_doorbell_array = *db_handle_array; *db_handle_array = NULL; @@ -950,12 +962,11 @@ int vmci_ctx_dbell_create(u32 context_id, struct vmci_handle handle) return VMCI_ERROR_NOT_FOUND; spin_lock(&context->lock); - if (!vmci_handle_arr_has_entry(context->doorbell_array, handle)) { - vmci_handle_arr_append_entry(&context->doorbell_array, handle); - result = VMCI_SUCCESS; - } else { + if (!vmci_handle_arr_has_entry(context->doorbell_array, handle)) + result = vmci_handle_arr_append_entry(&context->doorbell_array, + handle); + else result = VMCI_ERROR_DUPLICATE_ENTRY; - } spin_unlock(&context->lock); vmci_ctx_put(context); @@ -1091,15 +1102,16 @@ int vmci_ctx_notify_dbell(u32 src_cid, if (!vmci_handle_arr_has_entry( dst_context->pending_doorbell_array, handle)) { - vmci_handle_arr_append_entry( + result = vmci_handle_arr_append_entry( &dst_context->pending_doorbell_array, handle); - - ctx_signal_notify(dst_context); - wake_up(&dst_context->host_context.wait_queue); - + if (result == VMCI_SUCCESS) { + ctx_signal_notify(dst_context); + wake_up(&dst_context->host_context.wait_queue); + } + } else { + result = VMCI_SUCCESS; } - result = VMCI_SUCCESS; } spin_unlock(&dst_context->lock); } @@ -1126,13 +1138,11 @@ int vmci_ctx_qp_create(struct vmci_ctx *context, struct vmci_handle handle) if (context == NULL || vmci_handle_is_invalid(handle)) return VMCI_ERROR_INVALID_ARGS; - if (!vmci_handle_arr_has_entry(context->queue_pair_array, handle)) { - vmci_handle_arr_append_entry(&context->queue_pair_array, - handle); - result = VMCI_SUCCESS; - } else { + if (!vmci_handle_arr_has_entry(context->queue_pair_array, handle)) + result = vmci_handle_arr_append_entry( + &context->queue_pair_array, handle); + else result = VMCI_ERROR_DUPLICATE_ENTRY; - } return result; } diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c index b3fa738ae0050b48ba3f07ef3a02460d72898ea4..f005206d9033b53a83bf39955cc9d12fe732e0c3 100644 --- a/drivers/misc/vmw_vmci/vmci_doorbell.c +++ b/drivers/misc/vmw_vmci/vmci_doorbell.c @@ -318,7 +318,8 @@ int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle) entry = container_of(resource, struct dbell_entry, resource); if (entry->run_delayed) { - schedule_work(&entry->work); + if (!schedule_work(&entry->work)) + vmci_resource_put(resource); } else { entry->notify_cb(entry->client_data); vmci_resource_put(resource); @@ -366,7 +367,8 @@ static void dbell_fire_entries(u32 notify_idx) atomic_read(&dbell->active) == 1) { if (dbell->run_delayed) { vmci_resource_get(&dbell->resource); - schedule_work(&dbell->work); + if (!schedule_work(&dbell->work)) + vmci_resource_put(&dbell->resource); } else { dbell->notify_cb(dbell->client_data); } diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c index d7eaf1eb11e7f3e67646dd7da053213a78fd35bd..003bfba407588cdcec306822050eb18b9a86e40b 100644 --- a/drivers/misc/vmw_vmci/vmci_driver.c +++ b/drivers/misc/vmw_vmci/vmci_driver.c @@ -113,5 +113,5 @@ module_exit(vmci_drv_exit); MODULE_AUTHOR("VMware, Inc."); MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface."); -MODULE_VERSION("1.1.5.0-k"); +MODULE_VERSION("1.1.6.0-k"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/vmw_vmci/vmci_event.c b/drivers/misc/vmw_vmci/vmci_event.c index 84258a48029d41a2bc8ed9751deef566f3aa4a43..da24cb341a7f59ce34da077def99fc551c010ae7 100644 --- a/drivers/misc/vmw_vmci/vmci_event.c +++ b/drivers/misc/vmw_vmci/vmci_event.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -94,9 +95,12 @@ static void event_deliver(struct vmci_event_msg *event_msg) { struct vmci_subscription *cur; struct list_head *subscriber_list; + u32 sanitized_event, max_vmci_event; rcu_read_lock(); - subscriber_list = &subscriber_array[event_msg->event_data.event]; + max_vmci_event = ARRAY_SIZE(subscriber_array); + sanitized_event = array_index_nospec(event_msg->event_data.event, max_vmci_event); + subscriber_list = &subscriber_array[sanitized_event]; list_for_each_entry_rcu(cur, subscriber_list, node) { cur->callback(cur->id, &event_msg->event_data, cur->callback_data); diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.c b/drivers/misc/vmw_vmci/vmci_handle_array.c index 344973a0fb0acaac1185b065d16d6e9144600d71..917e18a8af9523231095a82e7d647ab3c32ba73f 100644 --- a/drivers/misc/vmw_vmci/vmci_handle_array.c +++ b/drivers/misc/vmw_vmci/vmci_handle_array.c @@ -16,24 +16,29 @@ #include #include "vmci_handle_array.h" -static size_t handle_arr_calc_size(size_t capacity) +static size_t handle_arr_calc_size(u32 capacity) { - return sizeof(struct vmci_handle_arr) + + return VMCI_HANDLE_ARRAY_HEADER_SIZE + capacity * sizeof(struct vmci_handle); } -struct vmci_handle_arr *vmci_handle_arr_create(size_t capacity) +struct vmci_handle_arr *vmci_handle_arr_create(u32 capacity, u32 max_capacity) { struct vmci_handle_arr *array; + if (max_capacity == 0 || capacity > max_capacity) + return NULL; + if (capacity == 0) - capacity = VMCI_HANDLE_ARRAY_DEFAULT_SIZE; + capacity = min((u32)VMCI_HANDLE_ARRAY_DEFAULT_CAPACITY, + max_capacity); array = kmalloc(handle_arr_calc_size(capacity), GFP_ATOMIC); if (!array) return NULL; array->capacity = capacity; + array->max_capacity = max_capacity; array->size = 0; return array; @@ -44,27 +49,34 @@ void vmci_handle_arr_destroy(struct vmci_handle_arr *array) kfree(array); } -void vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr, - struct vmci_handle handle) +int vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr, + struct vmci_handle handle) { struct vmci_handle_arr *array = *array_ptr; if (unlikely(array->size >= array->capacity)) { /* reallocate. */ struct vmci_handle_arr *new_array; - size_t new_capacity = array->capacity * VMCI_ARR_CAP_MULT; - size_t new_size = handle_arr_calc_size(new_capacity); + u32 capacity_bump = min(array->max_capacity - array->capacity, + array->capacity); + size_t new_size = handle_arr_calc_size(array->capacity + + capacity_bump); + + if (array->size >= array->max_capacity) + return VMCI_ERROR_NO_MEM; new_array = krealloc(array, new_size, GFP_ATOMIC); if (!new_array) - return; + return VMCI_ERROR_NO_MEM; - new_array->capacity = new_capacity; + new_array->capacity += capacity_bump; *array_ptr = array = new_array; } array->entries[array->size] = handle; array->size++; + + return VMCI_SUCCESS; } /* @@ -74,7 +86,7 @@ struct vmci_handle vmci_handle_arr_remove_entry(struct vmci_handle_arr *array, struct vmci_handle entry_handle) { struct vmci_handle handle = VMCI_INVALID_HANDLE; - size_t i; + u32 i; for (i = 0; i < array->size; i++) { if (vmci_handle_is_equal(array->entries[i], entry_handle)) { @@ -109,7 +121,7 @@ struct vmci_handle vmci_handle_arr_remove_tail(struct vmci_handle_arr *array) * Handle at given index, VMCI_INVALID_HANDLE if invalid index. */ struct vmci_handle -vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, size_t index) +vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, u32 index) { if (unlikely(index >= array->size)) return VMCI_INVALID_HANDLE; @@ -120,7 +132,7 @@ vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, size_t index) bool vmci_handle_arr_has_entry(const struct vmci_handle_arr *array, struct vmci_handle entry_handle) { - size_t i; + u32 i; for (i = 0; i < array->size; i++) if (vmci_handle_is_equal(array->entries[i], entry_handle)) diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.h b/drivers/misc/vmw_vmci/vmci_handle_array.h index b5f3a7f98cf1f8de28845e033d48babe5fd28835..0fc58597820e24da137c55bf463b856e9cfba5e3 100644 --- a/drivers/misc/vmw_vmci/vmci_handle_array.h +++ b/drivers/misc/vmw_vmci/vmci_handle_array.h @@ -17,32 +17,41 @@ #define _VMCI_HANDLE_ARRAY_H_ #include +#include #include -#define VMCI_HANDLE_ARRAY_DEFAULT_SIZE 4 -#define VMCI_ARR_CAP_MULT 2 /* Array capacity multiplier */ - struct vmci_handle_arr { - size_t capacity; - size_t size; + u32 capacity; + u32 max_capacity; + u32 size; + u32 pad; struct vmci_handle entries[]; }; -struct vmci_handle_arr *vmci_handle_arr_create(size_t capacity); +#define VMCI_HANDLE_ARRAY_HEADER_SIZE \ + offsetof(struct vmci_handle_arr, entries) +/* Select a default capacity that results in a 64 byte sized array */ +#define VMCI_HANDLE_ARRAY_DEFAULT_CAPACITY 6 +/* Make sure that the max array size can be expressed by a u32 */ +#define VMCI_HANDLE_ARRAY_MAX_CAPACITY \ + ((U32_MAX - VMCI_HANDLE_ARRAY_HEADER_SIZE - 1) / \ + sizeof(struct vmci_handle)) + +struct vmci_handle_arr *vmci_handle_arr_create(u32 capacity, u32 max_capacity); void vmci_handle_arr_destroy(struct vmci_handle_arr *array); -void vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr, - struct vmci_handle handle); +int vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr, + struct vmci_handle handle); struct vmci_handle vmci_handle_arr_remove_entry(struct vmci_handle_arr *array, struct vmci_handle entry_handle); struct vmci_handle vmci_handle_arr_remove_tail(struct vmci_handle_arr *array); struct vmci_handle -vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, size_t index); +vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, u32 index); bool vmci_handle_arr_has_entry(const struct vmci_handle_arr *array, struct vmci_handle entry_handle); struct vmci_handle *vmci_handle_arr_get_handles(struct vmci_handle_arr *array); -static inline size_t vmci_handle_arr_get_size( +static inline u32 vmci_handle_arr_get_size( const struct vmci_handle_arr *array) { return array->size; diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c index 83e0c95d20a47e4cfc7ad792882ceb66df481a7e..cdd326cc9cb92414aebc569dd58c2a0b1e494fab 100644 --- a/drivers/misc/vmw_vmci/vmci_host.c +++ b/drivers/misc/vmw_vmci/vmci_host.c @@ -237,7 +237,7 @@ static int vmci_host_setup_notify(struct vmci_ctx *context, * about the size. */ BUILD_BUG_ON(sizeof(bool) != sizeof(u8)); - if (!access_ok(VERIFY_WRITE, (void __user *)uva, sizeof(u8))) + if (!access_ok((void __user *)uva, sizeof(u8))) return VMCI_ERROR_GENERIC; /* diff --git a/drivers/misc/vmw_vmci/vmci_resource.c b/drivers/misc/vmw_vmci/vmci_resource.c index 1ab6e8737a5f0953cae0fb3422adceacf4bb6d7a..2779704e128aa4462512500b0a2a860741550cce 100644 --- a/drivers/misc/vmw_vmci/vmci_resource.c +++ b/drivers/misc/vmw_vmci/vmci_resource.c @@ -57,7 +57,8 @@ static struct vmci_resource *vmci_resource_lookup(struct vmci_handle handle, if (r->type == type && rid == handle.resource && - (cid == handle.context || cid == VMCI_INVALID_ID)) { + (cid == handle.context || cid == VMCI_INVALID_ID || + handle.context == VMCI_INVALID_ID)) { resource = r; break; } @@ -151,7 +152,8 @@ void vmci_resource_remove(struct vmci_resource *resource) spin_lock(&vmci_resource_table.lock); hlist_for_each_entry(r, &vmci_resource_table.entries[idx], node) { - if (vmci_handle_is_equal(r->handle, resource->handle)) { + if (vmci_handle_is_equal(r->handle, resource->handle) && + resource->type == r->type) { hlist_del_init_rcu(&r->node); break; } diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index e201ccb3fda4d74dab326e0f2eb983aa7bacc434..890a21bc3e28bbe56568532e210277e47e6411ad 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -409,38 +409,6 @@ static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr, return 0; } -static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status, - u32 retries_max) -{ - int err; - u32 retry_count = 0; - - if (!status || !retries_max) - return -EINVAL; - - do { - err = __mmc_send_status(card, status, 5); - if (err) - break; - - if (!R1_STATUS(*status) && - (R1_CURRENT_STATE(*status) != R1_STATE_PRG)) - break; /* RPMB programming operation complete */ - - /* - * Rechedule to give the MMC device a chance to continue - * processing the previous command without being polled too - * frequently. - */ - usleep_range(1000, 5000); - } while (++retry_count < retries_max); - - if (retry_count == retries_max) - err = -EPERM; - - return err; -} - static int ioctl_do_sanitize(struct mmc_card *card) { int err; @@ -469,16 +437,67 @@ static int ioctl_do_sanitize(struct mmc_card *card) return err; } +static inline bool mmc_blk_in_tran_state(u32 status) +{ + /* + * Some cards mishandle the status bits, so make sure to check both the + * busy indication and the card state. + */ + return status & R1_READY_FOR_DATA && + (R1_CURRENT_STATE(status) == R1_STATE_TRAN); +} + +static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms, + u32 *resp_errs) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); + int err = 0; + u32 status; + + do { + bool done = time_after(jiffies, timeout); + + err = __mmc_send_status(card, &status, 5); + if (err) { + dev_err(mmc_dev(card->host), + "error %d requesting status\n", err); + return err; + } + + /* Accumulate any response error bits seen */ + if (resp_errs) + *resp_errs |= status; + + /* + * Timeout if the device never becomes ready for data and never + * leaves the program state. + */ + if (done) { + dev_err(mmc_dev(card->host), + "Card stuck in wrong state! %s status: %#x\n", + __func__, status); + return -ETIMEDOUT; + } + + /* + * Some cards mishandle the status bits, + * so make sure to check both the busy + * indication and the card state. + */ + } while (!mmc_blk_in_tran_state(status)); + + return err; +} + static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, struct mmc_blk_ioc_data *idata) { - struct mmc_command cmd = {}; + struct mmc_command cmd = {}, sbc = {}; struct mmc_data data = {}; struct mmc_request mrq = {}; struct scatterlist sg; int err; unsigned int target_part; - u32 status = 0; if (!card || !md || !idata) return -EINVAL; @@ -550,10 +569,15 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, } if (idata->rpmb) { - err = mmc_set_blockcount(card, data.blocks, - idata->ic.write_flag & (1 << 31)); - if (err) - return err; + sbc.opcode = MMC_SET_BLOCK_COUNT; + /* + * We don't do any blockcount validation because the max size + * may be increased by a future standard. We just copy the + * 'Reliable Write' bit here. + */ + sbc.arg = data.blocks | (idata->ic.write_flag & BIT(31)); + sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; + mrq.sbc = &sbc; } if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) && @@ -607,16 +631,12 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp)); - if (idata->rpmb) { + if (idata->rpmb || (cmd.flags & MMC_RSP_R1B)) { /* - * Ensure RPMB command has completed by polling CMD13 + * Ensure RPMB/R1B command has completed by polling CMD13 * "Send Status". */ - err = ioctl_rpmb_card_status_poll(card, &status, 5); - if (err) - dev_err(mmc_dev(card->host), - "%s: Card Status=0x%08X, error %d\n", - __func__, status, err); + err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, NULL); } return err; @@ -966,58 +986,6 @@ static unsigned int mmc_blk_data_timeout_ms(struct mmc_host *host, return ms; } -static inline bool mmc_blk_in_tran_state(u32 status) -{ - /* - * Some cards mishandle the status bits, so make sure to check both the - * busy indication and the card state. - */ - return status & R1_READY_FOR_DATA && - (R1_CURRENT_STATE(status) == R1_STATE_TRAN); -} - -static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms, - struct request *req, u32 *resp_errs) -{ - unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); - int err = 0; - u32 status; - - do { - bool done = time_after(jiffies, timeout); - - err = __mmc_send_status(card, &status, 5); - if (err) { - pr_err("%s: error %d requesting status\n", - req->rq_disk->disk_name, err); - return err; - } - - /* Accumulate any response error bits seen */ - if (resp_errs) - *resp_errs |= status; - - /* - * Timeout if the device never becomes ready for data and never - * leaves the program state. - */ - if (done) { - pr_err("%s: Card stuck in wrong state! %s %s status: %#x\n", - mmc_hostname(card->host), - req->rq_disk->disk_name, __func__, status); - return -ETIMEDOUT; - } - - /* - * Some cards mishandle the status bits, - * so make sure to check both the busy - * indication and the card state. - */ - } while (!mmc_blk_in_tran_state(status)); - - return err; -} - static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, int type) { @@ -1673,7 +1641,7 @@ static int mmc_blk_fix_state(struct mmc_card *card, struct request *req) mmc_blk_send_stop(card, timeout); - err = card_busy_detect(card, timeout, req, NULL); + err = card_busy_detect(card, timeout, NULL); mmc_retune_release(card->host); @@ -1690,31 +1658,31 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req) struct mmc_card *card = mq->card; struct mmc_host *host = card->host; blk_status_t error = BLK_STS_OK; - int retries = 0; do { u32 status; int err; + int retries = 0; - mmc_blk_rw_rq_prep(mqrq, card, 1, mq); - - mmc_wait_for_req(host, mrq); + while (retries++ <= MMC_READ_SINGLE_RETRIES) { + mmc_blk_rw_rq_prep(mqrq, card, 1, mq); - err = mmc_send_status(card, &status); - if (err) - goto error_exit; + mmc_wait_for_req(host, mrq); - if (!mmc_host_is_spi(host) && - !mmc_blk_in_tran_state(status)) { - err = mmc_blk_fix_state(card, req); + err = mmc_send_status(card, &status); if (err) goto error_exit; - } - if (mrq->cmd->error && retries++ < MMC_READ_SINGLE_RETRIES) - continue; + if (!mmc_host_is_spi(host) && + !mmc_blk_in_tran_state(status)) { + err = mmc_blk_fix_state(card, req); + if (err) + goto error_exit; + } - retries = 0; + if (!mrq->cmd->error) + break; + } if (mrq->cmd->error || mrq->data->error || @@ -1897,7 +1865,7 @@ static int mmc_blk_card_busy(struct mmc_card *card, struct request *req) if (mmc_host_is_spi(card->host) || rq_data_dir(req) == READ) return 0; - err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, req, &status); + err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, &status); /* * Do not assume data transferred correctly if there are any error bits @@ -2109,7 +2077,7 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq) if (waiting) wake_up(&mq->wait); else - kblockd_schedule_work(&mq->complete_work); + queue_work(mq->card->complete_wq, &mq->complete_work); return; } @@ -2379,12 +2347,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), "mmcblk%u%s", card->host->index, subname ? subname : ""); - if (mmc_card_mmc(card)) - blk_queue_logical_block_size(md->queue.queue, - card->ext_csd.data_sector_size); - else - blk_queue_logical_block_size(md->queue.queue, 512); - set_capacity(md->disk, size); if (mmc_host_cmd23(card->host)) { @@ -2923,6 +2885,13 @@ static int mmc_blk_probe(struct mmc_card *card) mmc_fixup_device(card, mmc_blk_fixups); + card->complete_wq = alloc_workqueue("mmc_complete", + WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); + if (unlikely(!card->complete_wq)) { + pr_err("Failed to create mmc completion workqueue"); + return -ENOMEM; + } + md = mmc_blk_alloc(card); if (IS_ERR(md)) return PTR_ERR(md); @@ -2986,6 +2955,7 @@ static void mmc_blk_remove(struct mmc_card *card) pm_runtime_put_noidle(&card->dev); mmc_blk_remove_req(md); dev_set_drvdata(&card->dev, NULL); + destroy_workqueue(card->complete_wq); } static int _mmc_blk_suspend(struct mmc_card *card) diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 50a5c340307b8c33b9ed6ad72eafeb8afa227174..0a74785e575ba93ada09fc47bcb7cd7b55d56abc 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -95,7 +95,7 @@ static void mmc_should_fail_request(struct mmc_host *host, if (!data) return; - if (cmd->error || data->error || + if ((cmd && cmd->error) || data->error || !should_fail(&host->fail_mmc_request, data->blksz * data->blocks)) return; @@ -144,8 +144,9 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) int err = cmd->error; /* Flag re-tuning needed on CRC errors */ - if ((cmd->opcode != MMC_SEND_TUNING_BLOCK && - cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) && + if (cmd->opcode != MMC_SEND_TUNING_BLOCK && + cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200 && + !host->retune_crc_disable && (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) || (mrq->data && mrq->data->error == -EILSEQ) || (mrq->stop && mrq->stop->error == -EILSEQ))) @@ -2378,9 +2379,9 @@ unsigned int mmc_calc_max_discard(struct mmc_card *card) return card->pref_erase; max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG); - if (max_discard && mmc_can_trim(card)) { + if (mmc_can_trim(card)) { max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG); - if (max_trim < max_discard) + if (max_trim < max_discard || max_discard == 0) max_discard = max_trim; } else if (max_discard < card->erase_size) { max_discard = 0; diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index bc1bd2c256132c2f3def2844a7d5ccccb05ef523..f1fe446eee666a3d8a8d3b31ca6bb4ddf4a1b088 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -30,6 +30,7 @@ #include "pwrseq.h" #define DEFAULT_CMD6_TIMEOUT_MS 500 +#define MIN_CACHE_EN_TIMEOUT_MS 1600 static const unsigned int tran_exp[] = { 10000, 100000, 1000000, 10000000, @@ -526,8 +527,7 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd) card->cid.year += 16; /* check whether the eMMC card supports BKOPS */ - if (!mmc_card_broken_hpi(card) && - ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) { + if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) { card->ext_csd.bkops = 1; card->ext_csd.man_bkops_en = (ext_csd[EXT_CSD_BKOPS_EN] & @@ -1209,13 +1209,13 @@ static int mmc_select_hs400(struct mmc_card *card) mmc_set_timing(host, MMC_TIMING_MMC_HS400); mmc_set_bus_speed(card); + if (host->ops->hs400_complete) + host->ops->hs400_complete(host); + err = mmc_switch_status(card); if (err) goto out_err; - if (host->ops->hs400_complete) - host->ops->hs400_complete(host); - return 0; out_err: @@ -1782,20 +1782,26 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, if (err) { pr_warn("%s: Enabling HPI failed\n", mmc_hostname(card->host)); + card->ext_csd.hpi_en = 0; err = 0; - } else + } else { card->ext_csd.hpi_en = 1; + } } /* - * If cache size is higher than 0, this indicates - * the existence of cache and it can be turned on. + * If cache size is higher than 0, this indicates the existence of cache + * and it can be turned on. Note that some eMMCs from Micron has been + * reported to need ~800 ms timeout, while enabling the cache after + * sudden power failure tests. Let's extend the timeout to a minimum of + * DEFAULT_CACHE_EN_TIMEOUT_MS and do it for all cards. */ - if (!mmc_card_broken_hpi(card) && - card->ext_csd.cache_size > 0) { + if (card->ext_csd.cache_size > 0) { + unsigned int timeout_ms = MIN_CACHE_EN_TIMEOUT_MS; + + timeout_ms = max(card->ext_csd.generic_cmd6_time, timeout_ms); err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_CACHE_CTRL, 1, - card->ext_csd.generic_cmd6_time); + EXT_CSD_CACHE_CTRL, 1, timeout_ms); if (err && err != -EBADMSG) goto free_card; diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c index ef18daeaa4cc6f0a54f2814762fd7d615fcf4c9f..164b4e43050e7016abf2adb39edae95e87b6fbd2 100644 --- a/drivers/mmc/core/mmc_test.c +++ b/drivers/mmc/core/mmc_test.c @@ -3101,13 +3101,13 @@ static ssize_t mtf_test_write(struct file *file, const char __user *buf, test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL); #ifdef CONFIG_HIGHMEM test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER); + if (!test->highmem) { + count = -ENOMEM; + goto free_test_buffer; + } #endif -#ifdef CONFIG_HIGHMEM - if (test->buffer && test->highmem) { -#else if (test->buffer) { -#endif mutex_lock(&mmc_test_lock); mmc_test_run(test, testcase); mutex_unlock(&mmc_test_lock); @@ -3115,6 +3115,7 @@ static ssize_t mtf_test_write(struct file *file, const char __user *buf, #ifdef CONFIG_HIGHMEM __free_pages(test->highmem, BUFFER_ORDER); +free_test_buffer: #endif kfree(test->buffer); kfree(test); diff --git a/drivers/mmc/core/pwrseq_emmc.c b/drivers/mmc/core/pwrseq_emmc.c index efb8a7965dd4a98c2e9d3e651e742a5488620f80..154f4204d58cbd414400dd205826842c2c8774a4 100644 --- a/drivers/mmc/core/pwrseq_emmc.c +++ b/drivers/mmc/core/pwrseq_emmc.c @@ -30,19 +30,14 @@ struct mmc_pwrseq_emmc { #define to_pwrseq_emmc(p) container_of(p, struct mmc_pwrseq_emmc, pwrseq) -static void __mmc_pwrseq_emmc_reset(struct mmc_pwrseq_emmc *pwrseq) -{ - gpiod_set_value(pwrseq->reset_gpio, 1); - udelay(1); - gpiod_set_value(pwrseq->reset_gpio, 0); - udelay(200); -} - static void mmc_pwrseq_emmc_reset(struct mmc_host *host) { struct mmc_pwrseq_emmc *pwrseq = to_pwrseq_emmc(host->pwrseq); - __mmc_pwrseq_emmc_reset(pwrseq); + gpiod_set_value_cansleep(pwrseq->reset_gpio, 1); + udelay(1); + gpiod_set_value_cansleep(pwrseq->reset_gpio, 0); + udelay(200); } static int mmc_pwrseq_emmc_reset_nb(struct notifier_block *this, @@ -50,8 +45,11 @@ static int mmc_pwrseq_emmc_reset_nb(struct notifier_block *this, { struct mmc_pwrseq_emmc *pwrseq = container_of(this, struct mmc_pwrseq_emmc, reset_nb); + gpiod_set_value(pwrseq->reset_gpio, 1); + udelay(1); + gpiod_set_value(pwrseq->reset_gpio, 0); + udelay(200); - __mmc_pwrseq_emmc_reset(pwrseq); return NOTIFY_DONE; } @@ -72,14 +70,18 @@ static int mmc_pwrseq_emmc_probe(struct platform_device *pdev) if (IS_ERR(pwrseq->reset_gpio)) return PTR_ERR(pwrseq->reset_gpio); - /* - * register reset handler to ensure emmc reset also from - * emergency_reboot(), priority 255 is the highest priority - * so it will be executed before any system reboot handler. - */ - pwrseq->reset_nb.notifier_call = mmc_pwrseq_emmc_reset_nb; - pwrseq->reset_nb.priority = 255; - register_restart_handler(&pwrseq->reset_nb); + if (!gpiod_cansleep(pwrseq->reset_gpio)) { + /* + * register reset handler to ensure emmc reset also from + * emergency_reboot(), priority 255 is the highest priority + * so it will be executed before any system reboot handler. + */ + pwrseq->reset_nb.notifier_call = mmc_pwrseq_emmc_reset_nb; + pwrseq->reset_nb.priority = 255; + register_restart_handler(&pwrseq->reset_nb); + } else { + dev_notice(dev, "EMMC reset pin tied to a sleepy GPIO driver; reset on emergency-reboot disabled\n"); + } pwrseq->pwrseq.ops = &mmc_pwrseq_emmc_ops; pwrseq->pwrseq.dev = dev; diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 6edffeed99534935f320b5b3d6e941dad15a968d..becc6594a8a47cf05dc5bbb7582d27324091b7b6 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -355,6 +355,7 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) { struct mmc_host *host = card->host; u64 limit = BLK_BOUNCE_HIGH; + unsigned block_size = 512; if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; @@ -368,7 +369,13 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) blk_queue_max_hw_sectors(mq->queue, min(host->max_blk_count, host->max_req_size / 512)); blk_queue_max_segments(mq->queue, host->max_segs); - blk_queue_max_segment_size(mq->queue, host->max_seg_size); + + if (mmc_card_mmc(card)) + block_size = card->ext_csd.data_sector_size; + + blk_queue_logical_block_size(mq->queue, block_size); + blk_queue_max_segment_size(mq->queue, + round_down(host->max_seg_size, block_size)); INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler); INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work); @@ -494,6 +501,7 @@ void mmc_cleanup_queue(struct mmc_queue *mq) blk_mq_unquiesce_queue(q); blk_cleanup_queue(q); + blk_mq_free_tag_set(&mq->tag_set); /* * A request can be completed before the next request, potentially diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index d0d9f90e7cdfb7453adc31f6cb5cd458989e8ceb..04738359ec0292797136344778879999212d8651 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -216,6 +216,14 @@ static int mmc_decode_scr(struct mmc_card *card) if (scr->sda_spec3) scr->cmds = UNSTUFF_BITS(resp, 32, 2); + + /* SD Spec says: any SD Card shall set at least bits 0 and 2 */ + if (!(scr->bus_widths & SD_SCR_BUS_WIDTH_1) || + !(scr->bus_widths & SD_SCR_BUS_WIDTH_4)) { + pr_err("%s: invalid bus width\n", mmc_hostname(card->host)); + return -EINVAL; + } + return 0; } @@ -1269,6 +1277,12 @@ int mmc_attach_sd(struct mmc_host *host) goto err; } + /* + * Some SD cards claims an out of spec VDD voltage range. Let's treat + * these bits as being in-valid and especially also bit7. + */ + ocr &= ~0x7FFF; + rocr = mmc_select_voltage(host, ocr); /* diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index d8e17ea6126de8339beddd776702e5aa3c0691d5..0aa99694b9379899bfe4ee86a7ca2c9b0e44cc72 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -934,6 +934,10 @@ static int mmc_sdio_pre_suspend(struct mmc_host *host) */ static int mmc_sdio_suspend(struct mmc_host *host) { + /* Prevent processing of SDIO IRQs in suspended state. */ + mmc_card_set_suspended(host->card); + cancel_delayed_work_sync(&host->sdio_irq_work); + mmc_claim_host(host); if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) @@ -982,13 +986,20 @@ static int mmc_sdio_resume(struct mmc_host *host) err = sdio_enable_4bit_bus(host->card); } - if (!err && host->sdio_irqs) { + if (err) + goto out; + + /* Allow SDIO IRQs to be processed again. */ + mmc_card_clr_suspended(host->card); + + if (host->sdio_irqs) { if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) wake_up_process(host->sdio_irq_thread); else if (host->caps & MMC_CAP_SDIO_IRQ) host->ops->enable_sdio_irq(host, 1); } +out: mmc_release_host(host); host->pm_flags &= ~MMC_PM_KEEP_POWER; diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c index b6d8203e46ebe177de629694f2a15406a6b262eb..2ee4822c92853baed1ba54afd48bcbc46d484d2a 100644 --- a/drivers/mmc/core/sdio_bus.c +++ b/drivers/mmc/core/sdio_bus.c @@ -266,6 +266,12 @@ static void sdio_release_func(struct device *dev) sdio_free_func_cis(func); + /* + * We have now removed the link to the tuples in the + * card structure, so remove the reference. + */ + put_device(&func->card->dev); + kfree(func->info); kfree(func->tmpbuf); kfree(func); @@ -296,6 +302,12 @@ struct sdio_func *sdio_alloc_func(struct mmc_card *card) device_initialize(&func->dev); + /* + * We may link to tuples in the card structure, + * we need make sure we have a reference to it. + */ + get_device(&func->card->dev); + func->dev.parent = &card->dev; func->dev.bus = &sdio_bus_type; func->dev.release = sdio_release_func; @@ -349,10 +361,9 @@ int sdio_add_func(struct sdio_func *func) */ void sdio_remove_func(struct sdio_func *func) { - if (!sdio_func_present(func)) - return; + if (sdio_func_present(func)) + device_del(&func->dev); - device_del(&func->dev); of_node_put(func->dev.of_node); put_device(&func->dev); } diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c index f8c372839d24491f607c8a919480e19112e26ff1..cbb66528e70f2636f1102260837e98c3db4e766c 100644 --- a/drivers/mmc/core/sdio_cis.c +++ b/drivers/mmc/core/sdio_cis.c @@ -378,12 +378,6 @@ int sdio_read_func_cis(struct sdio_func *func) if (ret) return ret; - /* - * Since we've linked to tuples in the card structure, - * we must make sure we have a reference to it. - */ - get_device(&func->card->dev); - /* * Vendor/device id is optional for function CIS, so * copy it from the card structure as needed. @@ -409,11 +403,5 @@ void sdio_free_func_cis(struct sdio_func *func) } func->tuples = NULL; - - /* - * We have now removed the link to the tuples in the - * card structure, so remove the reference. - */ - put_device(&func->card->dev); } diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c index d40744bbafa9a95e7a36bd8efb980ac9a78c2f1e..ed2d8c48ea178006950dde152b33b5104b9f6a88 100644 --- a/drivers/mmc/core/sdio_io.c +++ b/drivers/mmc/core/sdio_io.c @@ -18,6 +18,7 @@ #include "sdio_ops.h" #include "core.h" #include "card.h" +#include "host.h" /** * sdio_claim_host - exclusively claim a bus for a certain SDIO function @@ -725,3 +726,79 @@ int sdio_set_host_pm_flags(struct sdio_func *func, mmc_pm_flag_t flags) return 0; } EXPORT_SYMBOL_GPL(sdio_set_host_pm_flags); + +/** + * sdio_retune_crc_disable - temporarily disable retuning on CRC errors + * @func: SDIO function attached to host + * + * If the SDIO card is known to be in a state where it might produce + * CRC errors on the bus in response to commands (like if we know it is + * transitioning between power states), an SDIO function driver can + * call this function to temporarily disable the SD/MMC core behavior of + * triggering an automatic retuning. + * + * This function should be called while the host is claimed and the host + * should remain claimed until sdio_retune_crc_enable() is called. + * Specifically, the expected sequence of calls is: + * - sdio_claim_host() + * - sdio_retune_crc_disable() + * - some number of calls like sdio_writeb() and sdio_readb() + * - sdio_retune_crc_enable() + * - sdio_release_host() + */ +void sdio_retune_crc_disable(struct sdio_func *func) +{ + func->card->host->retune_crc_disable = true; +} +EXPORT_SYMBOL_GPL(sdio_retune_crc_disable); + +/** + * sdio_retune_crc_enable - re-enable retuning on CRC errors + * @func: SDIO function attached to host + * + * This is the compement to sdio_retune_crc_disable(). + */ +void sdio_retune_crc_enable(struct sdio_func *func) +{ + func->card->host->retune_crc_disable = false; +} +EXPORT_SYMBOL_GPL(sdio_retune_crc_enable); + +/** + * sdio_retune_hold_now - start deferring retuning requests till release + * @func: SDIO function attached to host + * + * This function can be called if it's currently a bad time to do + * a retune of the SDIO card. Retune requests made during this time + * will be held and we'll actually do the retune sometime after the + * release. + * + * This function could be useful if an SDIO card is in a power state + * where it can respond to a small subset of commands that doesn't + * include the retuning command. Care should be taken when using + * this function since (presumably) the retuning request we might be + * deferring was made for a good reason. + * + * This function should be called while the host is claimed. + */ +void sdio_retune_hold_now(struct sdio_func *func) +{ + mmc_retune_hold_now(func->card->host); +} +EXPORT_SYMBOL_GPL(sdio_retune_hold_now); + +/** + * sdio_retune_release - signal that it's OK to retune now + * @func: SDIO function attached to host + * + * This is the complement to sdio_retune_hold_now(). Calling this + * function won't make a retune happen right away but will allow + * them to be scheduled normally. + * + * This function should be called while the host is claimed. + */ +void sdio_retune_release(struct sdio_func *func) +{ + mmc_retune_release(func->card->host); +} +EXPORT_SYMBOL_GPL(sdio_retune_release); diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c index 7ca7b99413f0d98bc5cb9f48bb68dc4a77b78f6f..d206f2de80d2331130a106155c3fbc7862ab95c6 100644 --- a/drivers/mmc/core/sdio_irq.c +++ b/drivers/mmc/core/sdio_irq.c @@ -35,16 +35,24 @@ static int process_sdio_pending_irqs(struct mmc_host *host) { struct mmc_card *card = host->card; int i, ret, count; + bool sdio_irq_pending = host->sdio_irq_pending; unsigned char pending; struct sdio_func *func; + /* Don't process SDIO IRQs if the card is suspended. */ + if (mmc_card_suspended(card)) + return 0; + + /* Clear the flag to indicate that we have processed the IRQ. */ + host->sdio_irq_pending = false; + /* * Optimization, if there is only 1 function interrupt registered * and we know an IRQ was signaled then call irq handler directly. * Otherwise do the full probe. */ func = card->sdio_single_irq; - if (func && host->sdio_irq_pending) { + if (func && sdio_irq_pending) { func->irq_handler(func); return 1; } @@ -96,7 +104,6 @@ void sdio_run_irqs(struct mmc_host *host) { mmc_claim_host(host); if (host->sdio_irqs) { - host->sdio_irq_pending = true; process_sdio_pending_irqs(host); if (host->ops->ack_sdio_irq) host->ops->ack_sdio_irq(host); @@ -115,6 +122,7 @@ void sdio_irq_work(struct work_struct *work) void sdio_signal_irq(struct mmc_host *host) { + host->sdio_irq_pending = true; queue_delayed_work(system_wq, &host->sdio_irq_work, 0); } EXPORT_SYMBOL_GPL(sdio_signal_irq); @@ -160,7 +168,6 @@ static int sdio_irq_thread(void *_host) if (ret) break; ret = process_sdio_pending_irqs(host); - host->sdio_irq_pending = false; mmc_release_host(host); /* diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 694d0828215d22a41138dbe16fae3e38c2bbe956..c8ba58bd8086ed614a2d0c2633ed429102d776b4 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -943,3 +943,29 @@ config MMC_SDHCI_OMAP If you have a controller with this interface, say Y or M here. If unsure, say N. + +config MMC_PHYTIUM_SDCI + tristate "Phytium SD Host Controller support" + depends on ARM64 + help + This selects support for the Phytium SD Host Controller + +config MMC_PHYTIUM_MCI_PCI + tristate "Phytium PCI MultiMedia Card Interface support" + depends on ARCH_PHYTIUM + help + This selects support for the PCI MultiMedia Card Interface on Phytium + Px210 chipset. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + +config MMC_PHYTIUM_MCI_PLTFM + tristate "Phytium MultiMedia Card Interface support" + depends on ARCH_PHYTIUM && OF + help + This selects support for the MultiMedia Card Interface on Phytium SoCs. + If you have a controller with this interface, say Y or M here. + + If unsure, say N. diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index ce8398e6f2c0e49395c4a818c95ecb1e69d6b87e..323e61240bba254eca1bcaaee609b20b9aeeab19 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -69,6 +69,9 @@ obj-$(CONFIG_MMC_SUNXI) += sunxi-mmc.o obj-$(CONFIG_MMC_USDHI6ROL0) += usdhi6rol0.o obj-$(CONFIG_MMC_TOSHIBA_PCI) += toshsd.o obj-$(CONFIG_MMC_BCM2835) += bcm2835.o +obj-$(CONFIG_MMC_PHYTIUM_SDCI) += phytium-sdci.o +obj-$(CONFIG_MMC_PHYTIUM_MCI_PCI) += phytium-mci-pci.o phytium-mci.o +obj-$(CONFIG_MMC_PHYTIUM_MCI_PLTFM) += phytium-mci-plat.o phytium-mci.o obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o obj-$(CONFIG_MMC_REALTEK_USB) += rtsx_usb_sdmmc.o diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index be53044086c76f7291224c33ed10c734ece7e897..fbc56ee9968277634b7d8ede472070d89d486380 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -1954,13 +1954,14 @@ static void atmci_tasklet_func(unsigned long priv) } atmci_request_end(host, host->mrq); - state = STATE_IDLE; + goto unlock; /* atmci_request_end() sets host->state */ break; } } while (state != prev_state); host->state = state; +unlock: spin_unlock(&host->lock); } diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c index 768972af8b853cf882b1b3311ed8fa1b0a64a49e..5301302fb53108641c16a5c7691f96efaa1b28f1 100644 --- a/drivers/mmc/host/bcm2835.c +++ b/drivers/mmc/host/bcm2835.c @@ -286,6 +286,7 @@ static void bcm2835_reset(struct mmc_host *mmc) if (host->dma_chan) dmaengine_terminate_sync(host->dma_chan); + host->dma_chan = NULL; bcm2835_reset_internal(host); } @@ -772,6 +773,8 @@ static void bcm2835_finish_command(struct bcm2835_host *host) if (!(sdhsts & SDHSTS_CRC7_ERROR) || (host->cmd->opcode != MMC_SEND_OP_COND)) { + u32 edm, fsm; + if (sdhsts & SDHSTS_CMD_TIME_OUT) { host->cmd->error = -ETIMEDOUT; } else { @@ -780,6 +783,13 @@ static void bcm2835_finish_command(struct bcm2835_host *host) bcm2835_dumpregs(host); host->cmd->error = -EILSEQ; } + edm = readl(host->ioaddr + SDEDM); + fsm = edm & SDEDM_FSM_MASK; + if (fsm == SDEDM_FSM_READWAIT || + fsm == SDEDM_FSM_WRITESTART1) + /* Kick the FSM out of its wait */ + writel(edm | SDEDM_FORCE_DATA_MODE, + host->ioaddr + SDEDM); bcm2835_finish_request(host); return; } @@ -837,6 +847,8 @@ static void bcm2835_timeout(struct work_struct *work) dev_err(dev, "timeout waiting for hardware interrupt.\n"); bcm2835_dumpregs(host); + bcm2835_reset(host->mmc); + if (host->data) { host->data->error = -ETIMEDOUT; bcm2835_finish_data(host); @@ -1427,6 +1439,8 @@ static int bcm2835_probe(struct platform_device *pdev) err: dev_dbg(dev, "%s -> err %d\n", __func__, ret); + if (host->dma_chan_rxtx) + dma_release_channel(host->dma_chan_rxtx); mmc_free_host(mmc); return ret; diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c index ed5cefb8376838b401aba0ac394c6a69e37dd818..89deb451e0ac6225c9481dc21ca21373f46c2d7c 100644 --- a/drivers/mmc/host/cavium.c +++ b/drivers/mmc/host/cavium.c @@ -374,6 +374,7 @@ static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data) { data->bytes_xfered = data->blocks * data->blksz; data->error = 0; + dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data)); return 1; } @@ -1046,7 +1047,8 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host) mmc->max_segs = 1; /* DMA size field can address up to 8 MB */ - mmc->max_seg_size = 8 * 1024 * 1024; + mmc->max_seg_size = min_t(unsigned int, 8 * 1024 * 1024, + dma_get_max_seg_size(host->dev)); mmc->max_req_size = mmc->max_seg_size; /* External DMA is in 512 byte blocks */ mmc->max_blk_size = 512; diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c index 159270e947cf62965a932edf9eb1d3a478ac89db..28f5aaca505acb668468b7ac34cd812e90219f36 100644 --- a/drivers/mmc/host/cqhci.c +++ b/drivers/mmc/host/cqhci.c @@ -201,7 +201,7 @@ static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host) cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots; cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs * - (cq_host->num_slots - 1); + cq_host->mmc->cqe_qdepth; pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n", mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size, @@ -217,12 +217,21 @@ static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host) cq_host->desc_size, &cq_host->desc_dma_base, GFP_KERNEL); + if (!cq_host->desc_base) + return -ENOMEM; + cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc), cq_host->data_size, &cq_host->trans_desc_dma_base, GFP_KERNEL); - if (!cq_host->desc_base || !cq_host->trans_desc_base) + if (!cq_host->trans_desc_base) { + dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size, + cq_host->desc_base, + cq_host->desc_dma_base); + cq_host->desc_base = NULL; + cq_host->desc_dma_base = 0; return -ENOMEM; + } pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n", mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base, @@ -608,7 +617,8 @@ static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq) cq_host->slot[tag].flags = 0; cq_host->qcnt += 1; - + /* Make sure descriptors are ready before ringing the doorbell */ + wmb(); cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR); if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag))) pr_debug("%s: cqhci: doorbell not set for tag %d\n", diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c index 9e68c3645e2270272f3bed4a5035e0dd8cdd2247..591cef502b59fe6976a19f667579c124ac4f94cc 100644 --- a/drivers/mmc/host/davinci_mmc.c +++ b/drivers/mmc/host/davinci_mmc.c @@ -1117,7 +1117,7 @@ static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) { } #endif -static void __init init_mmcsd_host(struct mmc_davinci_host *host) +static void init_mmcsd_host(struct mmc_davinci_host *host) { mmc_davinci_reset_ctrl(host, 1); @@ -1361,7 +1361,7 @@ static int davinci_mmcsd_probe(struct platform_device *pdev) return ret; } -static int __exit davinci_mmcsd_remove(struct platform_device *pdev) +static int davinci_mmcsd_remove(struct platform_device *pdev) { struct mmc_davinci_host *host = platform_get_drvdata(pdev); @@ -1413,7 +1413,7 @@ static struct platform_driver davinci_mmcsd_driver = { .of_match_table = davinci_mmc_dt_ids, }, .probe = davinci_mmcsd_probe, - .remove = __exit_p(davinci_mmcsd_remove), + .remove = davinci_mmcsd_remove, .id_table = davinci_mmc_devtype, }; diff --git a/drivers/mmc/host/dw_mmc-bluefield.c b/drivers/mmc/host/dw_mmc-bluefield.c index 54c3fbb4a39181ea728fb5af34dca80827d2889e..db56d4f58aaab37d6763486f8e2daf8c1318ece2 100644 --- a/drivers/mmc/host/dw_mmc-bluefield.c +++ b/drivers/mmc/host/dw_mmc-bluefield.c @@ -1,11 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2018 Mellanox Technologies. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 80dc2fd6576cf3f88afd695ad1f36ec1b4f52b41..22c454c7aaca6a6bdaa906d8d17a2a3394ba7dcb 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -2038,8 +2038,7 @@ static void dw_mci_tasklet_func(unsigned long priv) * delayed. Allowing the transfer to take place * avoids races and keeps things simple. */ - if ((err != -ETIMEDOUT) && - (cmd->opcode == MMC_SEND_TUNING_BLOCK)) { + if (err != -ETIMEDOUT) { state = STATE_SENDING_DATA; continue; } @@ -3487,6 +3486,10 @@ int dw_mci_runtime_resume(struct device *dev) /* Force setup bus to guarantee available clock output */ dw_mci_setup_bus(host->slot, true); + /* Re-enable SDIO interrupts. */ + if (sdio_irq_claimed(host->slot->mmc)) + __dw_mci_enable_sdio_irq(host->slot, 1); + /* Now that slots are all setup, we can enable card detect */ dw_mci_enable_cd(host); diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c index 993386c9ea500f2006c73b66c84876084f502c6f..864338e308e2b04ef026af950461959b20e37a9d 100644 --- a/drivers/mmc/host/jz4740_mmc.c +++ b/drivers/mmc/host/jz4740_mmc.c @@ -983,17 +983,17 @@ static int jz4740_mmc_request_gpios(struct mmc_host *mmc, if (!pdata->read_only_active_low) mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; - if (gpio_is_valid(pdata->gpio_card_detect)) { - ret = mmc_gpio_request_cd(mmc, pdata->gpio_card_detect, 0); - if (ret) - return ret; - } + /* + * Get optional card detect and write protect GPIOs, + * only back out on probe deferral. + */ + ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL); + if (ret == -EPROBE_DEFER) + return ret; - if (gpio_is_valid(pdata->gpio_read_only)) { - ret = mmc_gpio_request_ro(mmc, pdata->gpio_read_only); - if (ret) - return ret; - } + ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL); + if (ret == -EPROBE_DEFER) + return ret; return jz4740_mmc_request_gpio(&pdev->dev, pdata->gpio_power, "MMC read only", true, pdata->power_active_low); diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index c201c378537e4f1a8a601e8d857481a7e6af9f0c..72f34a58928ca6bd27fec902384d4a091e6b9ecf 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -90,9 +91,11 @@ #define CFG_CLK_ALWAYS_ON BIT(18) #define CFG_CHK_DS BIT(20) #define CFG_AUTO_CLK BIT(23) +#define CFG_ERR_ABORT BIT(27) #define SD_EMMC_STATUS 0x48 #define STATUS_BUSY BIT(31) +#define STATUS_DESC_BUSY BIT(30) #define STATUS_DATI GENMASK(23, 16) #define SD_EMMC_IRQ_EN 0x4c @@ -174,6 +177,8 @@ struct meson_host { struct sd_emmc_desc *descs; dma_addr_t descs_dma_addr; + int irq; + bool vqmmc_enabled; }; @@ -928,6 +933,7 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd) cmd_cfg |= FIELD_PREP(CMD_CFG_CMD_INDEX_MASK, cmd->opcode); cmd_cfg |= CMD_CFG_OWNER; /* owned by CPU */ + cmd_cfg |= CMD_CFG_ERROR; /* stop in case of error */ meson_mmc_set_response_bits(cmd, &cmd_cfg); @@ -1022,6 +1028,17 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id) u32 irq_en, status, raw_status; irqreturn_t ret = IRQ_NONE; + irq_en = readl(host->regs + SD_EMMC_IRQ_EN); + raw_status = readl(host->regs + SD_EMMC_STATUS); + status = raw_status & irq_en; + + if (!status) { + dev_dbg(host->dev, + "Unexpected IRQ! irq_en 0x%08x - status 0x%08x\n", + irq_en, raw_status); + return IRQ_NONE; + } + if (WARN_ON(!host) || WARN_ON(!host->cmd)) return IRQ_NONE; @@ -1029,22 +1046,18 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id) cmd = host->cmd; data = cmd->data; - irq_en = readl(host->regs + SD_EMMC_IRQ_EN); - raw_status = readl(host->regs + SD_EMMC_STATUS); - status = raw_status & irq_en; - cmd->error = 0; if (status & IRQ_CRC_ERR) { dev_dbg(host->dev, "CRC Error - status 0x%08x\n", status); cmd->error = -EILSEQ; - ret = IRQ_HANDLED; + ret = IRQ_WAKE_THREAD; goto out; } if (status & IRQ_TIMEOUTS) { dev_dbg(host->dev, "Timeout - status 0x%08x\n", status); cmd->error = -ETIMEDOUT; - ret = IRQ_HANDLED; + ret = IRQ_WAKE_THREAD; goto out; } @@ -1069,17 +1082,49 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id) /* ack all enabled interrupts */ writel(irq_en, host->regs + SD_EMMC_STATUS); + if (cmd->error) { + /* Stop desc in case of errors */ + u32 start = readl(host->regs + SD_EMMC_START); + + start &= ~START_DESC_BUSY; + writel(start, host->regs + SD_EMMC_START); + } + if (ret == IRQ_HANDLED) meson_mmc_request_done(host->mmc, cmd->mrq); - else if (ret == IRQ_NONE) - dev_warn(host->dev, - "Unexpected IRQ! status=0x%08x, irq_en=0x%08x\n", - raw_status, irq_en); spin_unlock(&host->lock); return ret; } +static int meson_mmc_wait_desc_stop(struct meson_host *host) +{ + int loop; + u32 status; + + /* + * It may sometimes take a while for it to actually halt. Here, we + * are giving it 5ms to comply + * + * If we don't confirm the descriptor is stopped, it might raise new + * IRQs after we have called mmc_request_done() which is bad. + */ + for (loop = 50; loop; loop--) { + status = readl(host->regs + SD_EMMC_STATUS); + if (status & (STATUS_BUSY | STATUS_DESC_BUSY)) + udelay(100); + else + break; + } + + if (status & (STATUS_BUSY | STATUS_DESC_BUSY)) { + dev_err(host->dev, "Timed out waiting for host to stop\n"); + return -ETIMEDOUT; + } + + return 0; +} + static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id) { struct meson_host *host = dev_id; @@ -1090,6 +1135,13 @@ static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id) if (WARN_ON(!cmd)) return IRQ_NONE; + if (cmd->error) { + meson_mmc_wait_desc_stop(host); + meson_mmc_request_done(host->mmc, cmd->mrq); + + return IRQ_HANDLED; + } + data = cmd->data; if (meson_mmc_bounce_buf_read(data)) { xfer_bytes = data->blksz * data->blocks; @@ -1130,6 +1182,9 @@ static void meson_mmc_cfg_init(struct meson_host *host) cfg |= FIELD_PREP(CFG_RC_CC_MASK, ilog2(SD_EMMC_CFG_CMD_GAP)); cfg |= FIELD_PREP(CFG_BLK_LEN_MASK, ilog2(SD_EMMC_CFG_BLK_SIZE)); + /* abort chain on R/W errors */ + cfg |= CFG_ERR_ABORT; + writel(cfg, host->regs + SD_EMMC_CFG); } @@ -1181,7 +1236,7 @@ static int meson_mmc_probe(struct platform_device *pdev) struct resource *res; struct meson_host *host; struct mmc_host *mmc; - int ret, irq; + int ret; mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev); if (!mmc) @@ -1228,8 +1283,8 @@ static int meson_mmc_probe(struct platform_device *pdev) goto free_host; } - irq = platform_get_irq(pdev, 0); - if (irq <= 0) { + host->irq = platform_get_irq(pdev, 0); + if (host->irq <= 0) { dev_err(&pdev->dev, "failed to get interrupt resource.\n"); ret = -EINVAL; goto free_host; @@ -1283,9 +1338,9 @@ static int meson_mmc_probe(struct platform_device *pdev) writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN, host->regs + SD_EMMC_IRQ_EN); - ret = devm_request_threaded_irq(&pdev->dev, irq, meson_mmc_irq, - meson_mmc_irq_thread, IRQF_SHARED, - NULL, host); + ret = request_threaded_irq(host->irq, meson_mmc_irq, + meson_mmc_irq_thread, IRQF_SHARED, + dev_name(&pdev->dev), host); if (ret) goto err_init_clk; @@ -1303,7 +1358,7 @@ static int meson_mmc_probe(struct platform_device *pdev) if (host->bounce_buf == NULL) { dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n"); ret = -ENOMEM; - goto err_init_clk; + goto err_free_irq; } host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN, @@ -1322,6 +1377,8 @@ static int meson_mmc_probe(struct platform_device *pdev) err_bounce_buf: dma_free_coherent(host->dev, host->bounce_buf_size, host->bounce_buf, host->bounce_dma_addr); +err_free_irq: + free_irq(host->irq, host); err_init_clk: clk_disable_unprepare(host->mmc_clk); err_core_clk: @@ -1339,6 +1396,7 @@ static int meson_mmc_remove(struct platform_device *pdev) /* disable interrupts */ writel(0, host->regs + SD_EMMC_IRQ_EN); + free_irq(host->irq, host); dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN, host->descs, host->descs_dma_addr); diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c index 2cfec33178c1fa20532270c04a1f634414a4ec19..f6c76be2be0d3fa0d5b93bdd1d79acee793f7a9d 100644 --- a/drivers/mmc/host/meson-mx-sdio.c +++ b/drivers/mmc/host/meson-mx-sdio.c @@ -76,7 +76,7 @@ #define MESON_MX_SDIO_IRQC_IF_CONFIG_MASK GENMASK(7, 6) #define MESON_MX_SDIO_IRQC_FORCE_DATA_CLK BIT(8) #define MESON_MX_SDIO_IRQC_FORCE_DATA_CMD BIT(9) - #define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK GENMASK(10, 13) + #define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK GENMASK(13, 10) #define MESON_MX_SDIO_IRQC_SOFT_RESET BIT(15) #define MESON_MX_SDIO_IRQC_FORCE_HALT BIT(30) #define MESON_MX_SDIO_IRQC_HALT_HOLE BIT(31) @@ -596,6 +596,9 @@ static int meson_mx_mmc_register_clks(struct meson_mx_mmc_host *host) init.name = devm_kasprintf(host->controller_dev, GFP_KERNEL, "%s#fixed_factor", dev_name(host->controller_dev)); + if (!init.name) + return -ENOMEM; + init.ops = &clk_fixed_factor_ops; init.flags = 0; init.parent_names = &clk_fixed_factor_parent; @@ -612,6 +615,9 @@ static int meson_mx_mmc_register_clks(struct meson_mx_mmc_host *host) clk_div_parent = __clk_get_name(host->fixed_factor_clk); init.name = devm_kasprintf(host->controller_dev, GFP_KERNEL, "%s#div", dev_name(host->controller_dev)); + if (!init.name) + return -ENOMEM; + init.ops = &clk_divider_ops; init.flags = CLK_SET_RATE_PARENT; init.parent_names = &clk_div_parent; diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 476e53d301283d0efa06486312dfc06d40373a16..4fb61e87b9e3e8c59ba09adc3bdad296ce6191f6 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -819,6 +819,10 @@ mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t, } status = spi_sync_locked(spi, &host->m); + if (status < 0) { + dev_dbg(&spi->dev, "read error %d\n", status); + return status; + } if (host->dma_dev) { dma_sync_single_for_cpu(host->dma_dev, @@ -1432,13 +1436,13 @@ static int mmc_spi_probe(struct spi_device *spi) status = mmc_add_host(mmc); if (status != 0) - goto fail_add_host; + goto fail_glue_init; if (host->pdata && host->pdata->flags & MMC_SPI_USE_CD_GPIO) { status = mmc_gpio_request_cd(mmc, host->pdata->cd_gpio, host->pdata->cd_debounce); if (status != 0) - goto fail_add_host; + goto fail_gpiod_request; /* The platform has a CD GPIO signal that may support * interrupts, so let mmc_gpiod_request_cd_irq() decide @@ -1447,12 +1451,13 @@ static int mmc_spi_probe(struct spi_device *spi) mmc->caps &= ~MMC_CAP_NEEDS_POLL; mmc_gpiod_request_cd_irq(mmc); } + mmc_detect_change(mmc, 0); if (host->pdata && host->pdata->flags & MMC_SPI_USE_RO_GPIO) { has_ro = true; status = mmc_gpio_request_ro(mmc, host->pdata->ro_gpio); if (status != 0) - goto fail_add_host; + goto fail_gpiod_request; } dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n", @@ -1465,7 +1470,7 @@ static int mmc_spi_probe(struct spi_device *spi) ? ", cd polling" : ""); return 0; -fail_add_host: +fail_gpiod_request: mmc_remove_host (mmc); fail_glue_init: if (host->dma_dev) diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 1841d250e9e2c67690d06e10f382e45857b6f6d9..fa6268c0f12321f8a6f0c5f22074b31af93c70c3 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -895,14 +895,18 @@ static void mmci_data_irq(struct mmci_host *host, struct mmc_data *data, unsigned int status) { + unsigned int status_err; + /* Make sure we have data to handle */ if (!data) return; /* First check for errors */ - if (status & (MCI_DATACRCFAIL | MCI_DATATIMEOUT | - host->variant->start_err | - MCI_TXUNDERRUN | MCI_RXOVERRUN)) { + status_err = status & (host->variant->start_err | + MCI_DATACRCFAIL | MCI_DATATIMEOUT | + MCI_TXUNDERRUN | MCI_RXOVERRUN); + + if (status_err) { u32 remain, success; /* Terminate the DMA transfer */ @@ -922,18 +926,18 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, success = data->blksz * data->blocks - remain; dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n", - status, success); - if (status & MCI_DATACRCFAIL) { + status_err, success); + if (status_err & MCI_DATACRCFAIL) { /* Last block was not successful */ success -= 1; data->error = -EILSEQ; - } else if (status & MCI_DATATIMEOUT) { + } else if (status_err & MCI_DATATIMEOUT) { data->error = -ETIMEDOUT; - } else if (status & MCI_STARTBITERR) { + } else if (status_err & MCI_STARTBITERR) { data->error = -ECOMM; - } else if (status & MCI_TXUNDERRUN) { + } else if (status_err & MCI_TXUNDERRUN) { data->error = -EIO; - } else if (status & MCI_RXOVERRUN) { + } else if (status_err & MCI_RXOVERRUN) { if (success > host->variant->fifosize) success -= host->variant->fifosize; else @@ -1295,9 +1299,10 @@ static irqreturn_t mmci_irq(int irq, void *dev_id) } /* - * Don't poll for busy completion in irq context. + * Busy detection has been handled by mmci_cmd_irq() above. + * Clear the status bit to prevent polling in IRQ context. */ - if (host->variant->busy_detect && host->busy_status) + if (host->variant->busy_detect_flag) status &= ~host->variant->busy_detect_flag; ret = 1; @@ -1789,7 +1794,7 @@ static int mmci_probe(struct amba_device *dev, goto clk_disable; } - writel(MCI_IRQENABLE, host->base + MMCIMASK0); + writel(MCI_IRQENABLE | variant->start_err, host->base + MMCIMASK0); amba_set_drvdata(dev, mmc); @@ -1876,7 +1881,8 @@ static void mmci_restore(struct mmci_host *host) writel(host->datactrl_reg, host->base + MMCIDATACTRL); writel(host->pwr_reg, host->base + MMCIPOWER); } - writel(MCI_IRQENABLE, host->base + MMCIMASK0); + writel(MCI_IRQENABLE | host->variant->start_err, + host->base + MMCIMASK0); mmci_reg_delay(host); spin_unlock_irqrestore(&host->lock, flags); diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index 517591d219e933fc233c636adcbff268f4bb621e..613d37ab08d20c84c2b37cd08ed6683e6311d0aa 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h @@ -181,9 +181,9 @@ #define MMCIFIFO 0x080 /* to 0x0bc */ #define MCI_IRQENABLE \ - (MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK| \ - MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \ - MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_STARTBITERRMASK) + (MCI_CMDCRCFAILMASK | MCI_DATACRCFAILMASK | MCI_CMDTIMEOUTMASK | \ + MCI_DATATIMEOUTMASK | MCI_TXUNDERRUNMASK | MCI_RXOVERRUNMASK | \ + MCI_CMDRESPENDMASK | MCI_CMDSENTMASK) /* These interrupts are directed to IRQ1 when two IRQ lines are available */ #define MCI_IRQ1MASK \ diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c index a0670e9cd0127b180a8208880418643ea43680aa..f483211c5cc6623a850960bda78f898e41570bf9 100644 --- a/drivers/mmc/host/moxart-mmc.c +++ b/drivers/mmc/host/moxart-mmc.c @@ -695,12 +695,12 @@ static int moxart_remove(struct platform_device *pdev) if (!IS_ERR(host->dma_chan_rx)) dma_release_channel(host->dma_chan_rx); mmc_remove_host(mmc); - mmc_free_host(mmc); writel(0, host->base + REG_INTERRUPT_MASK); writel(0, host->base + REG_POWER_CONTROL); writel(readl(host->base + REG_CLOCK_CONTROL) | CLK_OFF, host->base + REG_CLOCK_CONTROL); + mmc_free_host(mmc); } return 0; } diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index 04841386b65da4b474e8b30afe9872a727eac902..9ecf86ba4bb0b60d24c72116c0e9eafa18c554f2 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -228,6 +228,7 @@ #define MSDC_PATCH_BIT_SPCPUSH (0x1 << 29) /* RW */ #define MSDC_PATCH_BIT_DECRCTMO (0x1 << 30) /* RW */ +#define MSDC_PATCH_BIT1_CMDTA (0x7 << 3) /* RW */ #define MSDC_PATCH_BIT1_STOP_DLY (0xf << 8) /* RW */ #define MSDC_PATCH_BIT2_CFGRESP (0x1 << 15) /* RW */ @@ -390,7 +391,6 @@ struct msdc_host { struct clk *src_clk_cg; /* msdc source clock control gate */ u32 mclk; /* mmc subsystem clock frequency */ u32 src_clk_freq; /* source clock frequency */ - u32 sclk; /* SD/MS bus clock frequency */ unsigned char timing; bool vqmmc_enabled; u32 latch_ck; @@ -635,10 +635,10 @@ static void msdc_set_timeout(struct msdc_host *host, u32 ns, u32 clks) host->timeout_ns = ns; host->timeout_clks = clks; - if (host->sclk == 0) { + if (host->mmc->actual_clock == 0) { timeout = 0; } else { - clk_ns = 1000000000UL / host->sclk; + clk_ns = 1000000000UL / host->mmc->actual_clock; timeout = (ns + clk_ns - 1) / clk_ns + clks; /* in 1048576 sclk cycle unit */ timeout = (timeout + (0x1 << 20) - 1) >> 20; @@ -683,6 +683,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz) if (!hz) { dev_dbg(host->dev, "set mclk to 0\n"); host->mclk = 0; + host->mmc->actual_clock = 0; sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN); return; } @@ -761,7 +762,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz) while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB)) cpu_relax(); sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN); - host->sclk = sclk; + host->mmc->actual_clock = sclk; host->mclk = hz; host->timing = timing; /* need because clk changed. */ @@ -772,7 +773,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz) * mmc_select_hs400() will drop to 50Mhz and High speed mode, * tune result of hs200/200Mhz is not suitable for 50Mhz */ - if (host->sclk <= 52000000) { + if (host->mmc->actual_clock <= 52000000) { writel(host->def_tune_para.iocon, host->base + MSDC_IOCON); writel(host->def_tune_para.pad_tune, host->base + tune_reg); } else { @@ -784,10 +785,11 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz) if (timing == MMC_TIMING_MMC_HS400 && host->dev_comp->hs400_tune) - sdr_set_field(host->base + PAD_CMD_TUNE, + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRRDLY, host->hs400_cmd_int_delay); - dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->sclk, timing); + dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->mmc->actual_clock, + timing); } static inline u32 msdc_cmd_find_resp(struct msdc_host *host, @@ -1055,6 +1057,7 @@ static void msdc_start_command(struct msdc_host *host, WARN_ON(host->cmd); host->cmd = cmd; + mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT); if (!msdc_cmd_is_ready(host, mrq, cmd)) return; @@ -1066,7 +1069,6 @@ static void msdc_start_command(struct msdc_host *host, cmd->error = 0; rawcmd = msdc_cmd_prepare_raw_cmd(host, mrq, cmd); - mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT); sdr_set_bits(host->base + MSDC_INTEN, cmd_ints_mask); writel(cmd->arg, host->base + SDC_ARG); @@ -1672,6 +1674,7 @@ static int hs400_tune_response(struct mmc_host *mmc, u32 opcode) /* select EMMC50 PAD CMD tune */ sdr_set_bits(host->base + PAD_CMD_TUNE, BIT(0)); + sdr_set_field(host->base + MSDC_PATCH_BIT1, MSDC_PATCH_BIT1_CMDTA, 2); if (mmc->ios.timing == MMC_TIMING_MMC_HS200 || mmc->ios.timing == MMC_TIMING_UHS_SDR104) diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index de4e6e5bf304468123d657d8792a797690d666e6..2f604b312767d01b4d713bd46fd17b5ce83aa180 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c @@ -292,11 +292,8 @@ static void mxcmci_swap_buffers(struct mmc_data *data) struct scatterlist *sg; int i; - for_each_sg(data->sg, sg, data->sg_len, i) { - void *buf = kmap_atomic(sg_page(sg) + sg->offset); - buffer_swap32(buf, sg->length); - kunmap_atomic(buf); - } + for_each_sg(data->sg, sg, data->sg_len, i) + buffer_swap32(sg_virt(sg), sg->length); } #else static inline void mxcmci_swap_buffers(struct mmc_data *data) {} @@ -613,7 +610,6 @@ static int mxcmci_transfer_data(struct mxcmci_host *host) { struct mmc_data *data = host->req->data; struct scatterlist *sg; - void *buf; int stat, i; host->data = data; @@ -621,18 +617,14 @@ static int mxcmci_transfer_data(struct mxcmci_host *host) if (data->flags & MMC_DATA_READ) { for_each_sg(data->sg, sg, data->sg_len, i) { - buf = kmap_atomic(sg_page(sg) + sg->offset); - stat = mxcmci_pull(host, buf, sg->length); - kunmap(buf); + stat = mxcmci_pull(host, sg_virt(sg), sg->length); if (stat) return stat; host->datasize += sg->length; } } else { for_each_sg(data->sg, sg, data->sg_len, i) { - buf = kmap_atomic(sg_page(sg) + sg->offset); - stat = mxcmci_push(host, buf, sg->length); - kunmap(buf); + stat = mxcmci_push(host, sg_virt(sg), sg->length); if (stat) return stat; host->datasize += sg->length; diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index adf32682f27a3c8f96c2c244af96cae2bafbd6a8..b2873a2432b69fd374daa049729348c5af042ed6 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c @@ -104,6 +104,7 @@ struct mmc_omap_slot { unsigned int vdd; u16 saved_con; u16 bus_mode; + u16 power_mode; unsigned int fclk_freq; struct tasklet_struct cover_tasklet; @@ -919,7 +920,7 @@ static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_reques reg &= ~(1 << 5); OMAP_MMC_WRITE(host, SDIO, reg); /* Set maximum timeout */ - OMAP_MMC_WRITE(host, CTO, 0xff); + OMAP_MMC_WRITE(host, CTO, 0xfd); } static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_request *req) @@ -1157,7 +1158,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) struct mmc_omap_slot *slot = mmc_priv(mmc); struct mmc_omap_host *host = slot->host; int i, dsor; - int clk_enabled; + int clk_enabled, init_stream; mmc_omap_select_slot(slot, 0); @@ -1167,6 +1168,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) slot->vdd = ios->vdd; clk_enabled = 0; + init_stream = 0; switch (ios->power_mode) { case MMC_POWER_OFF: mmc_omap_set_power(slot, 0, ios->vdd); @@ -1174,13 +1176,17 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) case MMC_POWER_UP: /* Cannot touch dsor yet, just power up MMC */ mmc_omap_set_power(slot, 1, ios->vdd); + slot->power_mode = ios->power_mode; goto exit; case MMC_POWER_ON: mmc_omap_fclk_enable(host, 1); clk_enabled = 1; dsor |= 1 << 11; + if (slot->power_mode != MMC_POWER_ON) + init_stream = 1; break; } + slot->power_mode = ios->power_mode; if (slot->bus_mode != ios->bus_mode) { if (slot->pdata->set_bus_mode != NULL) @@ -1196,7 +1202,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) for (i = 0; i < 2; i++) OMAP_MMC_WRITE(host, CON, dsor); slot->saved_con = dsor; - if (ios->power_mode == MMC_POWER_ON) { + if (init_stream) { /* worst case at 400kHz, 80 cycles makes 200 microsecs */ int usecs = 250; @@ -1234,6 +1240,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id) slot->host = host; slot->mmc = mmc; slot->id = id; + slot->power_mode = MMC_POWER_UNDEFINED; slot->pdata = &host->pdata->slots[id]; host->slots[id] = slot; diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 68760d4a5d3da1a53c655d1b33f1c2237273d136..0135693afa158420587976796dde940f0872fd65 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -1661,6 +1661,36 @@ static void omap_hsmmc_init_card(struct mmc_host *mmc, struct mmc_card *card) if (mmc_pdata(host)->init_card) mmc_pdata(host)->init_card(card); + else if (card->type == MMC_TYPE_SDIO || + card->type == MMC_TYPE_SD_COMBO) { + struct device_node *np = mmc_dev(mmc)->of_node; + + /* + * REVISIT: should be moved to sdio core and made more + * general e.g. by expanding the DT bindings of child nodes + * to provide a mechanism to provide this information: + * Documentation/devicetree/bindings/mmc/mmc-card.txt + */ + + np = of_get_compatible_child(np, "ti,wl1251"); + if (np) { + /* + * We have TI wl1251 attached to MMC3. Pass this + * information to the SDIO core because it can't be + * probed by normal methods. + */ + + dev_info(host->dev, "found wl1251\n"); + card->quirks |= MMC_QUIRK_NONSTD_SDIO; + card->cccr.wide_bus = 1; + card->cis.vendor = 0x104c; + card->cis.device = 0x9066; + card->cis.blksize = 512; + card->cis.max_dtr = 24000000; + card->ocr = 0x80; + of_node_put(np); + } + } } static void omap_hsmmc_enable_sdio_irq(struct mmc_host *mmc, int enable) @@ -2066,7 +2096,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev) mmc->max_blk_size = 512; /* Block Length at max can be 1024 */ mmc->max_blk_count = 0xFFFF; /* No. of Blocks is 16 bits */ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; - mmc->max_seg_size = mmc->max_req_size; mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | MMC_CAP_CMD23; @@ -2096,6 +2125,17 @@ static int omap_hsmmc_probe(struct platform_device *pdev) goto err_irq; } + /* + * Limit the maximum segment size to the lower of the request size + * and the DMA engine device segment size limits. In reality, with + * 32-bit transfers, the DMA engine can do longer segments than this + * but there is no way to represent that in the DMA model - if we + * increase this figure here, we get warnings from the DMA API debug. + */ + mmc->max_seg_size = min3(mmc->max_req_size, + dma_get_max_seg_size(host->rx_chan->device->dev), + dma_get_max_seg_size(host->tx_chan->device->dev)); + /* Request IRQ for MMC operations */ ret = devm_request_irq(&pdev->dev, host->irq, omap_hsmmc_irq, 0, mmc_hostname(mmc), host); diff --git a/drivers/mmc/host/phytium-mci-pci.c b/drivers/mmc/host/phytium-mci-pci.c new file mode 100644 index 0000000000000000000000000000000000000000..717a186f838548119a8b572a5feb48f23320b0f5 --- /dev/null +++ b/drivers/mmc/host/phytium-mci-pci.c @@ -0,0 +1,184 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Phytium Multimedia Card Interface PCI driver + * + * Copyright (C) 2020-2023, Phytium Technology Co., Ltd. + * + */ + +#include +#include +#include +#include +#include +#include +#include "phytium-mci.h" + +static u32 sd_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY | + MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_4_BIT_DATA; +static u32 sd_caps2 = MMC_CAP2_NO_MMC; + +static u32 emmc_caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA | + MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | + MMC_CAP_CMD23 | MMC_CAP_HW_RESET | + MMC_CAP_MMC_HIGHSPEED | MMC_CAP_NONREMOVABLE; + +static u32 emmc_caps2 = MMC_CAP2_NO_SDIO | MMC_CAP2_NO_SD; + +#define PCI_BAR_NO 0 + +#if defined CONFIG_PM && defined CONFIG_PM_SLEEP +static const struct dev_pm_ops phytium_mci_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(phytium_mci_suspend, + phytium_mci_resume) + SET_RUNTIME_PM_OPS(phytium_mci_runtime_suspend, + phytium_mci_runtime_resume, NULL) +}; +#else +#define phytium_mci_dev_pm_ops NULL +#endif + +static int +phytium_mci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) +{ + struct phytium_mci_host *host; + struct mmc_host *mmc; + int ret; + + ret = pcim_enable_device(pdev); + + if (ret) + return ret; + pci_set_master(pdev); + + mmc = mmc_alloc_host(sizeof(struct phytium_mci_host), &pdev->dev); + + if (!mmc) + return -ENOMEM; + + host = mmc_priv(mmc); + + pci_enable_msi(pdev); + + host->irq = pdev->irq; + host->irq_flags = IRQF_SHARED; + host->dev = &pdev->dev; + ret = pcim_iomap_regions(pdev, 1 << PCI_BAR_NO, pci_name(pdev)); + + if (ret) { + dev_err(&pdev->dev, "I/O memory remapping failed\n"); + goto host_free; + } + + host->base = pcim_iomap_table(pdev)[PCI_BAR_NO]; + host->is_use_dma = 1; + host->is_device_x100 = 1; + + if (pdev->devfn == 2) { + host->caps = emmc_caps; + host->caps2 = emmc_caps2; + } else { + host->caps = sd_caps; + host->caps2 = sd_caps2; + mmc->f_max = 25000000; /* stable frequency */ + } + + host->mmc = mmc; + host->clk_rate = MCI_CLK; + + dev_info(&pdev->dev, + "%s %d:[bar %d] addr:0x%llx size:0x%llx km:0x%llx devfn:%d\n", + __func__, __LINE__, PCI_BAR_NO, pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0), (uint64_t)host->base, pdev->devfn); + + dev_dbg(&pdev->dev, "%s %d:irq:0x%x\n", __func__, __LINE__, host->irq); + + ret = phytium_mci_common_probe(host); + + if (ret == MCI_REALEASE_MEM) { + ret = -ENOMEM; + goto release_mem; + } else if (ret) { + goto release; + } + pci_set_drvdata(pdev, mmc); + dev_info(&pdev->dev, + "%s %d: probe phytium mci successful.\n", __func__, __LINE__); + return 0; + +release: + phytium_mci_deinit_hw(host); +release_mem: + + if (host->dma.adma_table) { + dma_free_coherent(&pdev->dev, + MAX_BD_NUM * sizeof(struct phytium_adma2_64_desc), + host->dma.adma_table, host->dma.adma_addr); + } +host_free: + mmc_free_host(mmc); + pci_disable_device(pdev); + return ret; +} + +static void phytium_mci_pci_remove(struct pci_dev *pdev) +{ + struct phytium_mci_host *host; + struct mmc_host *mmc; + + mmc = pci_get_drvdata(pdev); + if (!mmc) { + dev_info(&pdev->dev, + "%s %d: mmc is null.\n", __func__, __LINE__); + return; + } + host = mmc_priv(mmc); + if (!host) { + dev_info(&pdev->dev, + "%s %d: host is null.\n", __func__, __LINE__); + mmc_remove_host(mmc); + mmc_free_host(mmc); + return; + } + + del_timer(&host->hotplug_timer); + + mmc_remove_host(host->mmc); + + if (host->dma.adma_table) { + dma_free_coherent(&pdev->dev, + MAX_BD_NUM * sizeof(struct phytium_adma2_64_desc), + host->dma.adma_table, host->dma.adma_addr); + } + phytium_mci_deinit_hw(host); + mmc_free_host(mmc); + pci_set_drvdata(pdev, NULL); +} + +static const struct pci_device_id phytium_mci_pci_tbl[] = { + { + .vendor = 0x1DB7, + .device = 0xDC28, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = 0x5, + .class_mask = 0, + }, + {} +}; +MODULE_DEVICE_TABLE(pci, phytium_mci_pci_tbl); + +static struct pci_driver phytium_mci_pci_driver = { + .name = "phytium-mci-pci", + .id_table = phytium_mci_pci_tbl, + .probe = phytium_mci_pci_probe, + .remove = phytium_mci_pci_remove, + .driver = { + .pm = &phytium_mci_dev_pm_ops, + } +}; +module_pci_driver(phytium_mci_pci_driver); + +MODULE_DESCRIPTION("Phytium Multimedia Card Interface PCI driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Cheng Quan "); diff --git a/drivers/mmc/host/phytium-mci-plat.c b/drivers/mmc/host/phytium-mci-plat.c new file mode 100644 index 0000000000000000000000000000000000000000..0715db2ad468af673071d8ea2d4ccb71c15c8eda --- /dev/null +++ b/drivers/mmc/host/phytium-mci-plat.c @@ -0,0 +1,177 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Phytium Multimedia Card Interface PCI driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "phytium-mci.h" + +static u32 mci_caps = MMC_CAP_CMD23 | MMC_CAP_ERASE | MMC_CAP_WAIT_WHILE_BUSY; + +#if defined CONFIG_PM && defined CONFIG_PM_SLEEP + +static const struct dev_pm_ops phytium_mci_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(phytium_mci_suspend, + phytium_mci_resume) + SET_RUNTIME_PM_OPS(phytium_mci_runtime_suspend, + phytium_mci_runtime_resume, NULL) +}; +#else +#define phytium_mci_dev_pm_ops NULL +#endif + +static int phytium_mci_probe(struct platform_device *pdev) +{ + struct mmc_host *mmc; + struct phytium_mci_host *host; + struct resource *res; + const struct acpi_device_id *match; + struct device *dev = &pdev->dev; + int ret; + + mmc = mmc_alloc_host(sizeof(struct phytium_mci_host), &pdev->dev); + if (!mmc) + return -ENOMEM; + host = mmc_priv(mmc); + ret = mmc_of_parse(mmc); + if (ret) + goto host_free; + + if (dev->of_node) { + host->src_clk = devm_clk_get(&pdev->dev, "phytium_mci_clk"); + if (IS_ERR(host->src_clk)) { + ret = PTR_ERR(host->src_clk); + goto host_free; + } + + host->clk_rate = clk_get_rate(host->src_clk); + } else if (has_acpi_companion(dev)) { + match = acpi_match_device(dev->driver->acpi_match_table, dev); + if (!match) { + dev_err(dev, "Error ACPI match data is missing\n"); + return -ENODEV; + } + host->clk_rate = 1200000000; + } + + host->is_use_dma = 1; + host->is_device_x100 = 0; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + host->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(host->base)) { + ret = PTR_ERR(host->base); + goto host_free; + } + + host->irq = platform_get_irq(pdev, 0); + + if (host->irq < 0) { + ret = -EINVAL; + goto host_free; + } + host->irq_flags = IRQF_SHARED; + dev_dbg(&pdev->dev, "%s %d:irq:%d\n", __func__, __LINE__, host->irq); + host->dev = &pdev->dev; + host->caps = mci_caps; + host->mmc = mmc; + ret = phytium_mci_common_probe(host); + if (ret == MCI_REALEASE_MEM) { + ret = -ENOMEM; + goto release_mem; + } else if (ret) { + goto release; + } + platform_set_drvdata(pdev, mmc); + dev_info(&pdev->dev, + "%s %d: probe phytium mci successful.\n", __func__, __LINE__); + return 0; + +release: + phytium_mci_deinit_hw(host); +release_mem: + if (host->dma.adma_table) { + dma_free_coherent(&pdev->dev, + MAX_BD_NUM * sizeof(struct phytium_adma2_64_desc), + host->dma.adma_table, host->dma.adma_addr); + } +host_free: + mmc_free_host(mmc); + return ret; +} + +static int phytium_mci_remove(struct platform_device *pdev) +{ + struct mmc_host *mmc; + struct phytium_mci_host *host; + + mmc = platform_get_drvdata(pdev); + if (!mmc) { + dev_info(&pdev->dev, + "%s %d: mmc is null.\n", __func__, __LINE__); + return -1; + } + host = mmc_priv(mmc); + if (!host) { + dev_info(&pdev->dev, + "%s %d: host is null.\n", __func__, __LINE__); + mmc_remove_host(mmc); + mmc_free_host(mmc); + return -1; + } + del_timer(&host->hotplug_timer); + mmc_remove_host(host->mmc); + + if (host->dma.adma_table) { + dma_free_coherent(&pdev->dev, + MAX_BD_NUM * sizeof(struct phytium_adma2_64_desc), + host->dma.adma_table, host->dma.adma_addr); + } + phytium_mci_deinit_hw(host); + mmc_free_host(mmc); + platform_set_drvdata(pdev, NULL); + return 0; +} + +static const struct of_device_id phytium_mci_of_ids[] = { + { .compatible = "phytium,mci", }, + {} +}; + +MODULE_DEVICE_TABLE(of, phytium_mci_of_ids); + +#ifdef CONFIG_ACPI +static const struct acpi_device_id phytium_mci_acpi_ids[] = { + { .id = "PHYT0017" }, + { } +}; + +MODULE_DEVICE_TABLE(acpi, phytium_mci_acpi_ids); +#else +#define phytium_mci_acpi_ids NULL +#endif + +static struct platform_driver phytium_mci_driver = { + .probe = phytium_mci_probe, + .remove = phytium_mci_remove, + .driver = { + .name = "phytium-mci-platform", + .of_match_table = phytium_mci_of_ids, + .acpi_match_table = phytium_mci_acpi_ids, + .pm = &phytium_mci_dev_pm_ops, + }, +}; + +module_platform_driver(phytium_mci_driver); + +MODULE_DESCRIPTION("Phytium Multimedia Card Interface driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Cheng Quan "); diff --git a/drivers/mmc/host/phytium-mci.c b/drivers/mmc/host/phytium-mci.c new file mode 100644 index 0000000000000000000000000000000000000000..14f5abc636323ad773e8ee0122bf2912648d7a24 --- /dev/null +++ b/drivers/mmc/host/phytium-mci.c @@ -0,0 +1,1600 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Driver for Phytium Multimedia Card Interface + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytium-mci.h" + +static const u32 cmd_ints_mask = MCI_INT_MASK_RE | MCI_INT_MASK_CMD | + MCI_INT_MASK_RCRC | MCI_INT_MASK_RTO | + MCI_INT_MASK_HTO | MCI_RAW_INTS_HLE; + +static const u32 data_ints_mask = MCI_INT_MASK_DTO | MCI_INT_MASK_DCRC | + MCI_INT_MASK_DRTO | MCI_INT_MASK_SBE_BCI; + +static const u32 cmd_err_ints_mask = MCI_INT_MASK_RTO | MCI_INT_MASK_RCRC | + MCI_INT_MASK_RE | MCI_INT_MASK_DCRC | + MCI_INT_MASK_DRTO | MCI_MASKED_INTS_SBE_BCI; + +static const u32 dmac_ints_mask = MCI_DMAC_INT_ENA_FBE | MCI_DMAC_INT_ENA_DU | + MCI_DMAC_INT_ENA_NIS | MCI_DMAC_INT_ENA_AIS; +static const u32 dmac_err_ints_mask = MCI_DMAC_INT_ENA_FBE | + MCI_DMAC_INT_ENA_DU | + MCI_DMAC_INT_ENA_AIS; + +static void phytium_mci_cmd_next(struct phytium_mci_host *host, + struct mmc_request *mrq, + struct mmc_command *cmd); + +static void phytium_mci_adma_reset(struct phytium_mci_host *host); + +static +void phytium_mci_send_cmd(struct phytium_mci_host *host, u32 cmd, u32 arg); + +static +bool phytium_mci_data_xfer_done(struct phytium_mci_host *host, u32 events, + struct mmc_request *mrq, struct mmc_data *data); + +static void phytium_mci_init_adma_table(struct phytium_mci_host *host, + struct phytium_mci_dma *dma); + +static void phytium_mci_init_hw(struct phytium_mci_host *host); +static int phytium_mci_get_cd(struct mmc_host *mmc); + +static int +phytium_mci_err_irq(struct phytium_mci_host *host, u32 dmac_events, u32 events); + +static void sdr_set_bits(void __iomem *reg, u32 bs) +{ + u32 val = readl(reg); + + val |= bs; + writel(val, reg); +} + +static void sdr_clr_bits(void __iomem *reg, u32 bs) +{ + u32 val = readl(reg); + + val &= ~bs; + writel(val, reg); +} + +static void phytium_mci_reset_hw(struct phytium_mci_host *host) +{ + sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_FIFO_RESET | + MCI_CNTRL_DMA_RESET); + + while (readl(host->base + MCI_CNTRL) & + (MCI_CNTRL_FIFO_RESET | MCI_CNTRL_DMA_RESET)) + cpu_relax(); + phytium_mci_send_cmd(host, MCI_CMD_UPD_CLK, 0); +} + +static void phytium_mci_update_external_clk(struct phytium_mci_host *host, + u32 uhs_reg_value) +{ + writel(0, host->base + MCI_UHS_REG_EXT); + writel(uhs_reg_value, host->base + MCI_UHS_REG_EXT); + while (!(readl(host->base + MCI_CCLK_RDY) & 0x1)) + cpu_relax(); + +} + +static void phytium_mci_prepare_data(struct phytium_mci_host *host, + struct mmc_request *mrq) +{ + struct mmc_data *data = mrq->data; + + if (!(data->host_cookie & MCI_PREPARE_FLAG)) { + data->host_cookie |= MCI_PREPARE_FLAG; + data->sg_count = dma_map_sg(host->dev, data->sg, data->sg_len, + mmc_get_dma_dir(data)); + } +} + +static void phytium_mci_unprepare_data(struct phytium_mci_host *host, + struct mmc_request *mrq) +{ + struct mmc_data *data = mrq->data; + + if (data->host_cookie & MCI_ASYNC_FLAG) + return; + + if (data->host_cookie & MCI_PREPARE_FLAG) { + dma_unmap_sg(host->dev, data->sg, data->sg_len, + mmc_get_dma_dir(data)); + data->host_cookie &= ~MCI_PREPARE_FLAG; + } +} + +static +void phytium_mci_send_cmd(struct phytium_mci_host *host, u32 cmd, u32 arg) +{ + int rc; + u32 data; + + writel(arg, host->base + MCI_CMDARG); + wmb(); /* drain writebuffer */ + + rc = readl_relaxed_poll_timeout(host->base + MCI_STATUS, + data, + !(data & MCI_STATUS_CARD_BUSY), + 0, 100 * 1000); + if (rc == -ETIMEDOUT) + pr_debug("%s %d, timeout mci_status: 0x%08x\n", + __func__, __LINE__, data); + + writel(MCI_CMD_START | cmd, host->base + MCI_CMD); + + rc = readl_relaxed_poll_timeout(host->base + MCI_CMD, + data, + !(data & MCI_CMD_START), + 0, 100 * 1000); + if (rc == -ETIMEDOUT) + pr_debug("%s %d, timeout mci_cmd: 0x%08x\n", + __func__, __LINE__, data); +} + +static void phytium_mci_update_cmd11(struct phytium_mci_host *host, u32 cmd) +{ + writel(MCI_CMD_START | cmd, host->base + MCI_CMD); + + while (readl(host->base + MCI_CMD) & MCI_CMD_START) + cpu_relax(); +} + +static +void phytium_mci_set_clk(struct phytium_mci_host *host, struct mmc_ios *ios) +{ + u32 div = 0xff, drv = 0, sample = 0; + unsigned long clk_rate; + u32 mci_cmd_bits = MCI_CMD_UPD_CLK; + u32 cmd_reg; + u32 cur_cmd_index; + u32 first_uhs_div, tmp_ext_reg; + + cmd_reg = readl(host->base + MCI_CMD); + cur_cmd_index = cmd_reg & 0x3F; + + if (cur_cmd_index == SD_SWITCH_VOLTAGE) + mci_cmd_bits |= MCI_CMD_VOLT_SWITCH; + if (ios->clock) { + if (host->current_ios_clk == ios->clock) + return; + + dev_dbg(host->dev, "will change clock, "); + dev_dbg(host->dev, "host->clk_rate: %ld, ios->clock: %d\n", + host->clk_rate, ios->clock); + + if (ios->clock >= 25000000) + tmp_ext_reg = 0x202; + else if (ios->clock == 400000) + tmp_ext_reg = 0x502; + else + tmp_ext_reg = 0x302; + + phytium_mci_update_external_clk(host, tmp_ext_reg); + sdr_clr_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); + + if (cur_cmd_index == SD_SWITCH_VOLTAGE) + phytium_mci_update_cmd11(host, mci_cmd_bits | cmd_reg); + else + phytium_mci_send_cmd(host, mci_cmd_bits, 0); + + clk_rate = host->clk_rate; + first_uhs_div = 1 + ((tmp_ext_reg >> 8)&0xFF); + div = clk_rate / (2 * first_uhs_div * ios->clock); + if (div > 2) { + sample = div / 2 + 1; + drv = sample - 1; + writel((sample << 16) | (drv << 8) | (div & 0xff), + host->base + MCI_CLKDIV); + } else if (div == 2) { + drv = 0; + sample = 1; + writel((drv << 8) | (sample << 16) | (div & 0xff), + host->base + MCI_CLKDIV); + } + + dev_dbg(host->dev, "UHS_REG_EXT ext: %x, CLKDIV: %x\n", + readl(host->base + MCI_UHS_REG_EXT), + readl(host->base + MCI_CLKDIV)); + + sdr_set_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); + + if (cur_cmd_index == SD_SWITCH_VOLTAGE) + phytium_mci_update_cmd11(host, mci_cmd_bits | cmd_reg); + else + phytium_mci_send_cmd(host, mci_cmd_bits, 0); + + host->current_ios_clk = ios->clock; + + dev_dbg(host->dev, "host->clk_rate: %ld, ios->clock: %d\n", + host->clk_rate, ios->clock); + } else { + host->current_ios_clk = 0; + sdr_clr_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); + sdr_clr_bits(host->base + MCI_UHS_REG_EXT, MCI_EXT_CLK_ENABLE); + dev_dbg(host->dev, "host->clk_rate: %ld, ios->clock: %d\n", + host->clk_rate, ios->clock); + } +} + +static inline u32 +phytium_mci_cmd_find_resp(struct phytium_mci_host *host, + struct mmc_request *mrq, + struct mmc_command *cmd) +{ + u32 resp; + + switch (mmc_resp_type(cmd)) { + case MMC_RSP_R1: + case MMC_RSP_R1B: + resp = 0x5; + break; + + case MMC_RSP_R2: + resp = 0x7; + break; + + case MMC_RSP_R3: + resp = 0x1; + break; + + case MMC_RSP_NONE: + default: + resp = 0x0; + break; + } + + return resp; +} + +static inline +u32 phytium_mci_cmd_prepare_raw_cmd(struct phytium_mci_host *host, + struct mmc_request *mrq, + struct mmc_command *cmd) +{ + u32 opcode = cmd->opcode; + u32 resp = phytium_mci_cmd_find_resp(host, mrq, cmd); + u32 rawcmd = ((opcode & 0x3f) | ((resp & 0x7) << 6)); + + if (opcode == MMC_GO_INACTIVE_STATE || + (opcode == SD_IO_RW_DIRECT && + ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT)) + rawcmd |= (0x1 << 14); + else if (opcode == SD_SWITCH_VOLTAGE) + rawcmd |= (0x1 << 28); + + if (test_and_clear_bit(MCI_CARD_NEED_INIT, &host->flags)) + rawcmd |= (0x1 << 15); + + if (cmd->data) { + struct mmc_data *data = cmd->data; + + rawcmd |= (0x1 << 9); + + if (data->flags & MMC_DATA_WRITE) + rawcmd |= (0x1 << 10); + } + + return (rawcmd | (0x1 << 29) | (0x1 << 31)); +} + +static inline void +phytium_mci_adma_write_desc(struct phytium_mci_host *host, + struct phytium_adma2_64_desc *desc, + dma_addr_t addr, u32 len, u32 attribute) +{ + desc->attribute = attribute; + desc->len = len; + desc->addr_lo = lower_32_bits(addr); + desc->addr_hi = upper_32_bits(addr); + dev_dbg(host->dev, "%s %d:addr_lo:0x%x ddr_hi:0x%x\n", __func__, + __LINE__, desc->addr_lo, desc->addr_hi); + + if ((attribute == 0x80000004) || (attribute == 0x8000000c)) { + desc->desc_lo = 0; + desc->desc_hi = 0; + } +} + +static void +phytium_mci_data_sg_write_2_admc_table(struct phytium_mci_host *host, + struct mmc_data *data) +{ + struct phytium_adma2_64_desc *desc; + u32 dma_len, i; + dma_addr_t dma_address; + struct scatterlist *sg; + + phytium_mci_init_adma_table(host, &host->dma); + + desc = host->dma.adma_table; + for_each_sg(data->sg, sg, data->sg_count, i) { + dma_address = sg_dma_address(sg); + dma_len = sg_dma_len(sg); + + if (i == 0) { + if (sg_is_last(sg) || (data->sg_count == 1 && + dma_len == SD_BLOCK_SIZE)) + phytium_mci_adma_write_desc(host, desc, + dma_address, + dma_len, 0x8000000c); + else + phytium_mci_adma_write_desc(host, desc, + dma_address, + dma_len, 0x8000001a); + } else if (sg_is_last(sg)) { + phytium_mci_adma_write_desc(host, desc, dma_address, + dma_len, 0x80000004); + } else { + phytium_mci_adma_write_desc(host, desc, dma_address, + dma_len, 0x80000012); + } + + desc++; + } +} + +static void +phytium_mci_data_sg_write_2_fifo(struct phytium_mci_host *host, + struct mmc_data *data) +{ + struct scatterlist *sg; + u32 dma_len, i, j; + u32 *virt_addr; + + if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) { + writel(0x1<<10, host->base + MCI_CMD); + for_each_sg(data->sg, sg, data->sg_count, i) { + dma_len = sg_dma_len(sg); + virt_addr = sg_virt(data->sg); + for (j = 0; j < (dma_len / 4); j++) { + writel(*virt_addr, host->base + MCI_DATA); + virt_addr++; + } + } + } +} + +static void phytium_mci_restart_clk(struct phytium_mci_host *host) +{ + u32 clk_div, uhs; + + while (readl(host->base + MCI_CMD) & MCI_CMD_START) + cpu_relax(); + sdr_clr_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); + clk_div = readl(host->base + MCI_CLKDIV); + uhs = readl(host->base + MCI_UHS_REG_EXT); + writel(0, host->base + MCI_UHS_REG_EXT); + writel(uhs, host->base + MCI_UHS_REG_EXT); + while (!(readl(host->base + MCI_CCLK_RDY) & 0x1)) + cpu_relax(); + + writel(clk_div, host->base + MCI_CLKDIV); + sdr_set_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); + writel(MCI_CMD_START | MCI_CMD_UPD_CLK, host->base + MCI_CMD); + while (readl(host->base + MCI_CMD) & MCI_CMD_START) + cpu_relax(); +} + +static int +start_multiple_write(struct phytium_mci_host *host, struct mmc_request *mrq, + u32 cnts, u32 offset) +{ + u32 rawcmd, cmd_status; + struct mmc_command *cmd = mrq->cmd; + u32 *rsp = cmd->resp; + unsigned long deadline_time; + + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) && + readl(host->base + MCI_CARD_DETECT)) + return -ESHUTDOWN; + + while ((readl(host->base + MCI_STATUS) & (MCI_STATUS_CARD_BUSY))) + cpu_relax(); + + writel(0xffffe, host->base + MCI_RAW_INTS); + rawcmd = phytium_mci_cmd_prepare_raw_cmd(host, mrq, cmd); + writel(mrq->data->blksz, host->base + MCI_BLKSIZ); + writel(cnts * mrq->data->blksz, host->base + MCI_BYTCNT); + writel(cmd->arg + offset, host->base + MCI_CMDARG); + writel(rawcmd, host->base + MCI_CMD); + deadline_time = jiffies + msecs_to_jiffies(200); + + cmd_status = readl(host->base + MCI_RAW_INTS); + while (!(cmd_status & MCI_MASKED_INTS_CMD)) { + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) && + readl(host->base + MCI_CARD_DETECT)) + return -ESHUTDOWN; + + cmd_status = readl(host->base + MCI_RAW_INTS); + if (cmd_err_ints_mask & cmd_status) + return -ESHUTDOWN; + + if (cmd_status & MCI_MASKED_INTS_CMD) + break; + + if (time_after(jiffies, deadline_time)) + return -ESHUTDOWN; + } + + if (cmd_status & MCI_MASKED_INTS_CMD) { + if (cmd->flags & MMC_RSP_136) { + rsp[3] = readl(host->base + MCI_RESP0); + rsp[2] = readl(host->base + MCI_RESP1); + rsp[1] = readl(host->base + MCI_RESP2); + rsp[0] = readl(host->base + MCI_RESP3); + } else { + rsp[0] = readl(host->base + MCI_RESP0); + } + } + deadline_time = jiffies + msecs_to_jiffies(1000); + while (!(cmd_status & MCI_MASKED_INTS_DTO)) { + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) && + readl(host->base + MCI_CARD_DETECT)) + return -ESHUTDOWN; + cmd_status = readl(host->base + MCI_RAW_INTS); + if (cmd_err_ints_mask & cmd_status) + return -ESHUTDOWN; + if (cmd_status & MCI_MASKED_INTS_DTO) + return 0; + if (time_after(jiffies, deadline_time)) + return -ESHUTDOWN; + } + return 0; +} + +static int start_sbc_stop_cmd(struct phytium_mci_host *host, + struct mmc_request *mrq, + struct mmc_command *cmd, u32 arg) +{ + u32 rawcmd, cmd_status; + u32 *rsp = cmd->resp; + unsigned long deadline_time; + + writel(0xffffe, host->base + MCI_RAW_INTS); + + while ((readl(host->base + MCI_STATUS) & (MCI_STATUS_CARD_BUSY))) + cpu_relax(); + + rawcmd = phytium_mci_cmd_prepare_raw_cmd(host, mrq, cmd); + writel(arg, host->base + MCI_CMDARG); + writel(rawcmd, host->base + MCI_CMD); + + deadline_time = jiffies + msecs_to_jiffies(200); + cmd_status = readl(host->base + MCI_RAW_INTS); + while (!(cmd_status & MCI_MASKED_INTS_CMD)) { + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) && + readl(host->base + MCI_CARD_DETECT)) + return -ENOMEDIUM; + + cmd_status = readl(host->base + MCI_RAW_INTS); + if (cmd_err_ints_mask & cmd_status) + return -ETIMEDOUT; + + if (cmd_status & MCI_MASKED_INTS_CMD) + break; + + if (time_after(jiffies, deadline_time)) + return -ETIMEDOUT; + } + + if (cmd_status & MCI_MASKED_INTS_CMD) { + if (cmd->flags & MMC_RSP_136) { + rsp[3] = readl(host->base + MCI_RESP0); + rsp[2] = readl(host->base + MCI_RESP1); + rsp[1] = readl(host->base + MCI_RESP2); + rsp[0] = readl(host->base + MCI_RESP3); + } else { + rsp[0] = readl(host->base + MCI_RESP0); + } + } + + if (cmd_err_ints_mask & cmd_status) + return -ETIMEDOUT; + + return 0; +} + +static void +phytium_mci_start_write_multiple_non_dma(struct mmc_host *mmc, + struct mmc_request *mrq) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + struct mmc_data *data = mrq->data; + u32 write_cnts, last_cnts; + u32 i, j, k, send_cnt_one_sg, block_offset; + int ret = 0, dma_len; + struct scatterlist *sg; + u32 *virt_addr = NULL; + + write_cnts = data->blocks / 4; + (data->blocks % 4) ? write_cnts++ : write_cnts; + last_cnts = data->blocks % 4; + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) && + readl(host->base + MCI_CARD_DETECT)) { + ret = -ENOMEDIUM; + goto write_err; + } + + dev_dbg(host->dev, "%s: cmd:%d, block counts:%d\n", + __func__, mrq->cmd->opcode, data->blocks); + + sdr_clr_bits(host->base + MCI_CNTRL, MCI_CNTRL_USE_INTERNAL_DMAC); + sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_FIFO_RESET); + while (readl(host->base + MCI_CNTRL) & MCI_CNTRL_FIFO_RESET) + cpu_relax(); + sdr_clr_bits(host->base + MCI_BUS_MODE, MCI_BUS_MODE_DE); + + if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) { + block_offset = 0; + for_each_sg(data->sg, sg, data->sg_count, i) { + /* Each SG data transfor starts */ + dma_len = sg_dma_len(sg); + send_cnt_one_sg = (dma_len / MCI_MAX_FIFO_CNT) + 1; + virt_addr = sg_virt(sg); + for (k = 0; k < send_cnt_one_sg; k++) { + if (dma_len && dma_len >= MCI_MAX_FIFO_CNT) { + /*first write sbc cmd*/ + ret = start_sbc_stop_cmd(host, mrq, + mrq->sbc, 4); + if (ret) + goto write_err; + writel(0x1 << 10, host->base + MCI_CMD); + for (j = 0; j < (MCI_MAX_FIFO_CNT / 4); + j++) { + writel(*virt_addr, + host->base + MCI_DATA); + virt_addr++; + } + + /*second write cmd25 here*/ + ret = start_multiple_write(host, mrq, 4, + block_offset); + if (ret) + goto write_err; + block_offset += 4; + dma_len -= MCI_MAX_FIFO_CNT; + } else if (dma_len > 0) { + /*first write sbc cmd*/ + last_cnts = dma_len / 512; + ret = start_sbc_stop_cmd(host, mrq, + mrq->sbc, + last_cnts); + if (ret) + goto write_err; + writel(0x1 << 10, host->base + MCI_CMD); + for (j = 0; j < (dma_len / 4); j++) { + writel(*virt_addr, + host->base + MCI_DATA); + virt_addr++; + } + /*second write cmd25 here*/ + ret = start_multiple_write(host, mrq, + last_cnts, + block_offset); + if (ret) + goto write_err; + block_offset += last_cnts; + dma_len = 0; + } else { + dev_dbg(host->dev, + "%s: sg %d end\n", __func__, i); + break; + } + } + } + } + +write_err: + host->data = NULL; + host->cmd = NULL; + host->mrq = NULL; + writel(0xffffe, host->base + MCI_RAW_INTS); + if (ret) { + data->bytes_xfered = 0; + if (ret == -ESHUTDOWN) { + sdr_set_bits(host->base + MCI_CNTRL, + MCI_CNTRL_FIFO_RESET); + while (readl(host->base + MCI_CNTRL) & + MCI_CNTRL_FIFO_RESET) + cpu_relax(); + + sdr_set_bits(host->base + MCI_CNTRL, + MCI_CNTRL_CONTROLLER_RESET); + while (readl(host->base + MCI_STATUS) & + MCI_STATUS_CARD_BUSY) + sdr_set_bits(host->base + MCI_CNTRL, + MCI_CNTRL_CONTROLLER_RESET); + phytium_mci_restart_clk(host); + start_sbc_stop_cmd(host, mrq, mrq->stop, + mrq->stop->arg); + } + data->error = -ETIMEDOUT; + mrq->cmd->error = -ETIMEDOUT; + mmc_request_done(host->mmc, mrq); + return; + } + data->bytes_xfered = data->blocks * data->blksz; + mmc_request_done(host->mmc, mrq); +} + +static void +phytium_mci_start_data(struct phytium_mci_host *host, struct mmc_request *mrq, + struct mmc_command *cmd, struct mmc_data *data) +{ + bool read; + u32 rawcmd; + unsigned long flags; + + + WARN_ON(host->cmd); + host->cmd = cmd; + cmd->error = 0; + WARN_ON(host->data); + host->data = data; + read = data->flags & MMC_DATA_READ; + + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) && + readl(host->base + MCI_CARD_DETECT)) { + phytium_mci_err_irq(host, 0, MCI_INT_MASK_RTO); + return; + } + /* clear interrupts */ + writel(0xffffe, host->base + MCI_RAW_INTS); + + sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_FIFO_RESET | + MCI_CNTRL_DMA_RESET); + + while (readl(host->base + MCI_CNTRL) & (MCI_CNTRL_FIFO_RESET | + MCI_CNTRL_DMA_RESET)) + cpu_relax(); + + if (host->adtc_type == COMMOM_ADTC) + sdr_clr_bits(host->base + MCI_CNTRL, + MCI_CNTRL_USE_INTERNAL_DMAC); + else + sdr_set_bits(host->base + MCI_CNTRL, + MCI_CNTRL_USE_INTERNAL_DMAC); + wmb(); /* drain writebuffer */ + sdr_clr_bits(host->base + MCI_CNTRL, MCI_CNTRL_INT_ENABLE); + + rawcmd = phytium_mci_cmd_prepare_raw_cmd(host, mrq, cmd); + if (host->is_use_dma && host->adtc_type == BLOCK_RW_ADTC) + phytium_mci_data_sg_write_2_admc_table(host, data); + else + phytium_mci_data_sg_write_2_fifo(host, data); + + spin_lock_irqsave(&host->lock, flags); + sdr_set_bits(host->base + MCI_INT_MASK, cmd_ints_mask | data_ints_mask); + if (host->is_use_dma && host->adtc_type == BLOCK_RW_ADTC) { + sdr_set_bits(host->base + MCI_DMAC_INT_ENA, dmac_ints_mask); + /* Enable the IDMAC */ + sdr_set_bits(host->base + MCI_BUS_MODE, MCI_BUS_MODE_DE); + writel((u32)host->dma.adma_addr, + host->base + MCI_DESC_LIST_ADDRL); + writel((u32)(host->dma.adma_addr >> 32), + host->base + MCI_DESC_LIST_ADDRH); + } + writel(mrq->data->blksz, host->base + MCI_BLKSIZ); + writel(mrq->data->blocks * mrq->data->blksz, host->base + MCI_BYTCNT); + sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_INT_ENABLE); + writel(cmd->arg, host->base + MCI_CMDARG); + wmb(); /* drain writebuffer */ + writel(rawcmd, host->base + MCI_CMD); + spin_unlock_irqrestore(&host->lock, flags); +} + +static void phytium_mci_track_cmd_data(struct phytium_mci_host *host, + struct mmc_command *cmd, + struct mmc_data *data) +{ + if (host->error) + dev_dbg(host->dev, "%s: cmd=%d arg=%08X; host->error=0x%08X\n", + __func__, cmd->opcode, cmd->arg, host->error); +} + +static void +phytium_mci_request_done(struct phytium_mci_host *host, struct mmc_request *mrq) +{ + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + host->mrq = NULL; + if (host->cmd) + host->cmd = NULL; + spin_unlock_irqrestore(&host->lock, flags); + phytium_mci_track_cmd_data(host, mrq->cmd, mrq->data); + + if (mrq->data) + phytium_mci_unprepare_data(host, mrq); + + mmc_request_done(host->mmc, mrq); +} + +static bool +phytium_mci_cmd_done(struct phytium_mci_host *host, int events, + struct mmc_request *mrq, struct mmc_command *cmd) +{ + bool done = false; + unsigned long flags; + u32 *rsp = cmd->resp; + + if (!(events & (MCI_RAW_INTS_RCRC | MCI_RAW_INTS_RE | MCI_RAW_INTS_CMD | + MCI_RAW_INTS_RTO | MCI_INT_MASK_HTO))) { + dev_err(host->dev, "No interrupt generation:h%x\n", events); + return done; + } + + spin_lock_irqsave(&host->lock, flags); + done = !host->cmd; + host->cmd = NULL; + if (done) { + spin_unlock_irqrestore(&host->lock, flags); + return true; + } + sdr_clr_bits(host->base + MCI_INT_MASK, cmd_ints_mask); + spin_unlock_irqrestore(&host->lock, flags); + + if (cmd->flags & MMC_RSP_PRESENT) { + if (cmd->flags & MMC_RSP_136) { + rsp[3] = readl(host->base + MCI_RESP0); + rsp[2] = readl(host->base + MCI_RESP1); + rsp[1] = readl(host->base + MCI_RESP2); + rsp[0] = readl(host->base + MCI_RESP3); + } else { + rsp[0] = readl(host->base + MCI_RESP0); + } + + if (cmd->opcode == SD_SEND_RELATIVE_ADDR) + host->current_rca = rsp[0] & 0xFFFF0000; + } + if (!(events & (MCI_RAW_INTS_CMD | MCI_INT_MASK_HTO))) { + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) && + (events & MCI_RAW_INTS_RTO) && + readl(host->base + MCI_CARD_DETECT)) { + cmd->error = -ENOMEDIUM; + rsp[0] = 0; + } else if (events & MCI_RAW_INTS_RTO || + (cmd->opcode != MMC_SEND_TUNING_BLOCK && + cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)) { + cmd->error = -ETIMEDOUT; + } else if (events & MCI_RAW_INTS_RCRC) { + cmd->error = -EILSEQ; + } else { + cmd->error = -ETIMEDOUT; + } + } + phytium_mci_cmd_next(host, mrq, cmd); + return true; +} + +static void phytium_mci_start_command(struct phytium_mci_host *host, + struct mmc_request *mrq, + struct mmc_command *cmd) +{ + u32 rawcmd; + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + WARN_ON(host->cmd); + host->cmd = cmd; + cmd->error = 0; + writel(0xffffe, host->base + MCI_RAW_INTS); + + rawcmd = phytium_mci_cmd_prepare_raw_cmd(host, mrq, cmd); + spin_unlock_irqrestore(&host->lock, flags); + + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) && + readl(host->base + MCI_CARD_DETECT)) { + phytium_mci_cmd_done(host, MCI_RAW_INTS_RTO, mrq, cmd); + return; + } + + spin_lock_irqsave(&host->lock, flags); + sdr_set_bits(host->base + MCI_INT_MASK, cmd_ints_mask); + writel(cmd->arg, host->base + MCI_CMDARG); + writel(rawcmd, host->base + MCI_CMD); + spin_unlock_irqrestore(&host->lock, flags); +} + +static void +phytium_mci_cmd_next(struct phytium_mci_host *host, struct mmc_request *mrq, + struct mmc_command *cmd) +{ + if ((cmd->error && !(cmd->opcode == MMC_SEND_TUNING_BLOCK || + cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)) || + (mrq->sbc && mrq->sbc->error)) { + phytium_mci_request_done(host, mrq); + } else if (cmd == mrq->sbc) { + if ((mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK) || + (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) || + (mrq->cmd->opcode == MMC_READ_SINGLE_BLOCK) || + (mrq->cmd->opcode == MMC_WRITE_BLOCK)) { + dev_dbg(host->dev, "%s %d:sbc done, ", + __func__, __LINE__); + dev_dbg(host->dev, "next cmd :%d length:%d\n", + mrq->cmd->opcode, mrq->data->sg->length); + phytium_mci_prepare_data(host, mrq); + if (host->is_use_dma) + host->adtc_type = BLOCK_RW_ADTC; + else + host->adtc_type = COMMOM_ADTC; + phytium_mci_start_data(host, mrq, mrq->cmd, mrq->data); + } else { + dev_err(host->dev, + "%s %d:ERROR: cmd %d followers the SBC\n", + __func__, __LINE__, cmd->opcode); + } + } else if (!cmd->data) { + phytium_mci_request_done(host, mrq); + } +} + +static void +phytium_mci_ops_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + u32 data; + int rc; + + host->error = 0; + WARN_ON(host->mrq); + host->mrq = mrq; + + rc = readl_relaxed_poll_timeout(host->base + MCI_STATUS, + data, + !(data & MCI_STATUS_CARD_BUSY), + 0, 2000 * 1000); + if (rc == -ETIMEDOUT) + pr_debug("%s %d, timeout mci_status: 0x%08x\n", + __func__, __LINE__, data); + + dev_dbg(host->dev, "%s %d: cmd:%d arg:0x%x\n", __func__, __LINE__, + mrq->cmd->opcode, mrq->cmd->arg); + + if (host->is_device_x100 && mrq->sbc && mrq->cmd->opcode == + MMC_WRITE_MULTIPLE_BLOCK) { + phytium_mci_start_write_multiple_non_dma(mmc, mrq); + return; + } + + if (mrq->sbc) { + phytium_mci_start_command(host, mrq, mrq->sbc); + return; + } + if (mrq->data) { + phytium_mci_prepare_data(host, mrq); + + if ((mrq->data->sg->length >= 512) && host->is_use_dma && + ((mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK) || + (mrq->cmd->opcode == MMC_READ_SINGLE_BLOCK) || + (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) || + (mrq->cmd->opcode == MMC_WRITE_BLOCK) || + (mrq->cmd->opcode == SD_IO_RW_EXTENDED))) + + host->adtc_type = BLOCK_RW_ADTC; + else + host->adtc_type = COMMOM_ADTC; + + phytium_mci_start_data(host, mrq, mrq->cmd, mrq->data); + return; + } + phytium_mci_start_command(host, mrq, mrq->cmd); +} + +static void phytium_mci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + struct mmc_data *data = mrq->data; + + if (!data) + return; + + phytium_mci_prepare_data(host, mrq); + data->host_cookie |= MCI_ASYNC_FLAG; +} + +static void phytium_mci_post_req(struct mmc_host *mmc, struct mmc_request *mrq, + int err) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + struct mmc_data *data = mrq->data; + + if (!data) + return; + + if (data->host_cookie & MCI_ASYNC_FLAG) { + data->host_cookie &= ~MCI_ASYNC_FLAG; + phytium_mci_unprepare_data(host, mrq); + } +} + +static void phytium_mci_data_read_without_dma(struct phytium_mci_host *host, + struct mmc_data *data) +{ + u32 length, i, data_val, dma_len, tmp = 0; + u32 *virt_addr; + unsigned long flags; + struct scatterlist *sg; + + length = data->blocks * data->blksz; + + if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) { + spin_lock_irqsave(&host->lock, flags); + if (data->host_cookie & MCI_ASYNC_FLAG) { + tmp = MCI_ASYNC_FLAG; + phytium_mci_post_req(host->mmc, data->mrq, 0); + } else { + phytium_mci_unprepare_data(host, data->mrq); + } + + for_each_sg(data->sg, sg, data->sg_count, i) { + dma_len = sg_dma_len(sg); + virt_addr = sg_virt(data->sg); + + for (i = 0; i < (dma_len / 4); i++) { + data_val = readl(host->base + MCI_DATA); + memcpy(virt_addr, &data_val, 4); + ++virt_addr; + } + } + + if (tmp & MCI_ASYNC_FLAG) + phytium_mci_pre_req(host->mmc, data->mrq); + else + phytium_mci_prepare_data(host, data->mrq); + + spin_unlock_irqrestore(&host->lock, flags); + } + data->bytes_xfered = length; +} + +static void phytium_mci_data_xfer_next(struct phytium_mci_host *host, + struct mmc_request *mrq, + struct mmc_data *data) +{ + if (mmc_op_multi(mrq->cmd->opcode) && mrq->stop && + (data->error || !mrq->sbc)) { + while ((readl(host->base + MCI_STATUS) & + (MCI_STATUS_CARD_BUSY))) + cpu_relax(); + phytium_mci_start_command(host, mrq, mrq->stop); + } else { + phytium_mci_request_done(host, mrq); + } +} + +static bool +phytium_mci_data_xfer_done(struct phytium_mci_host *host, u32 events, + struct mmc_request *mrq, struct mmc_data *data) +{ + unsigned long flags; + bool done; + + unsigned int + check_data = events & (MCI_RAW_INTS_DTO | MCI_RAW_INTS_RCRC | + MCI_RAW_INTS_DCRC | MCI_RAW_INTS_RE | + MCI_RAW_INTS_DRTO | MCI_RAW_INTS_EBE | + MCI_DMAC_STATUS_AIS | MCI_DMAC_STATUS_DU | + MCI_RAW_INTS_SBE_BCI | MCI_INT_MASK_RTO); + + spin_lock_irqsave(&host->lock, flags); + done = !host->data; + + if (check_data || host->data) + host->data = NULL; + spin_unlock_irqrestore(&host->lock, flags); + + if (done) + return true; + if (check_data) { + spin_lock_irqsave(&host->lock, flags); + sdr_clr_bits(host->base + MCI_DMAC_INT_ENA, dmac_ints_mask); + sdr_clr_bits(host->base + MCI_INT_MASK, data_ints_mask); + /* Stop the IDMAC running */ + sdr_clr_bits(host->base + MCI_BUS_MODE, MCI_BUS_MODE_DE); + dev_dbg(host->dev, "DMA stop\n"); + spin_unlock_irqrestore(&host->lock, flags); + + if (events & MCI_RAW_INTS_DTO) { + if (!host->is_use_dma || + (host->is_use_dma && + host->adtc_type == COMMOM_ADTC && + (mrq->cmd->flags & MMC_CMD_MASK) == MMC_CMD_ADTC)) + phytium_mci_data_read_without_dma(host, data); + else + data->bytes_xfered = data->blocks * data->blksz; + } else { + data->bytes_xfered = 0; + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) + && readl(host->base + MCI_CARD_DETECT) + && (events & cmd_err_ints_mask)) { + data->error = -ENOMEDIUM; + data->mrq->cmd->error = -ENOMEDIUM; + } else if (events & (MCI_RAW_INTS_DCRC | + MCI_RAW_INTS_EBE | + MCI_RAW_INTS_SBE_BCI)) { + data->error = -EILSEQ; + host->cmd = NULL; + } else { + data->error = -ETIMEDOUT; + host->cmd = NULL; + } + } + + phytium_mci_data_xfer_next(host, mrq, data); + done = true; + } + return done; +} + +static int phytium_mci_card_busy(struct mmc_host *mmc) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + u32 status; + + status = readl(host->base + MCI_STATUS); + + return !!(status & MCI_STATUS_CARD_BUSY); +} + +static void +__phytium_mci_enable_sdio_irq(struct phytium_mci_host *host, int enable) +{ + if (enable) + sdr_set_bits(host->base + MCI_INT_MASK, MCI_INT_MASK_SDIO); + else + sdr_clr_bits(host->base + MCI_INT_MASK, MCI_INT_MASK_SDIO); +} + +static void phytium_mci_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + + __phytium_mci_enable_sdio_irq(host, enable); +} + +static void hotplug_timer_func(struct timer_list *t) +{ + struct phytium_mci_host *host; + u32 status; + + host = from_timer(host, t, hotplug_timer); + if (!host) + return; + + status = readl(host->base + MCI_CARD_DETECT); + + if (status & 0x1) { + if (host->mmc->card) { + cancel_delayed_work(&host->mmc->detect); + mmc_detect_change(host->mmc, msecs_to_jiffies(100)); + } + } else { + cancel_delayed_work(&host->mmc->detect); + mmc_detect_change(host->mmc, msecs_to_jiffies(200)); + } +} + +static int +phytium_mci_err_irq(struct phytium_mci_host *host, u32 dmac_events, u32 events) +{ + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + + mrq = host->mrq; + cmd = host->cmd; + data = host->data; + + if (cmd && (cmd == mrq->sbc)) { + phytium_mci_cmd_done(host, MCI_RAW_INTS_RTO, mrq, mrq->sbc); + } else if (cmd && (cmd == mrq->stop)) { + phytium_mci_cmd_done(host, MCI_RAW_INTS_RTO, mrq, mrq->stop); + } else if (data) { + data->error = -ETIMEDOUT; + if ((data->flags & MMC_DATA_READ) == MMC_DATA_READ || + (data->flags & MMC_DATA_WRITE) == MMC_DATA_WRITE) + phytium_mci_data_xfer_done(host, events | dmac_events, + mrq, data); + } else if (cmd) { + phytium_mci_cmd_done(host, MCI_RAW_INTS_RTO, mrq, mrq->cmd); + } + + return 0; +} + +static irqreturn_t phytium_mci_irq(int irq, void *dev_id) +{ + struct phytium_mci_host *host = (struct phytium_mci_host *) dev_id; + unsigned long flags; + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + u32 events, event_mask, dmac_events, dmac_evt_mask; + + if (!host) + return IRQ_NONE; + writel(0, host->base + 0xfd0); + + spin_lock_irqsave(&host->lock, flags); + events = readl(host->base + MCI_RAW_INTS); + dmac_events = readl(host->base + MCI_DMAC_STATUS); + event_mask = readl(host->base + MCI_INT_MASK); + dmac_evt_mask = readl(host->base + MCI_DMAC_INT_ENA); + if ((!events) && (!(dmac_events&0x1fff))) { + spin_unlock_irqrestore(&host->lock, flags); + return IRQ_NONE; + } + dev_dbg(host->dev, "%s:events:%x,mask:0x%x,dmac_events:%x, ", + __func__, events, event_mask, dmac_events); + dev_dbg(host->dev, "dmac_mask:0x%x,cmd:%d\n", + dmac_evt_mask, host->mrq ? host->mrq->cmd->opcode : 255); + + mrq = host->mrq; + cmd = host->cmd; + data = host->data; + + if (((events & event_mask) & MCI_RAW_INTS_SDIO) && + ((events == 0x10001) || (events == 0x10000) || + (events == 0x10040))) { + writel(events, host->base + MCI_RAW_INTS); + __phytium_mci_enable_sdio_irq(host, 0); + sdio_signal_irq(host->mmc); + spin_unlock_irqrestore(&host->lock, flags); + goto irq_out; + } + + writel(events, host->base + MCI_RAW_INTS); + writel(dmac_events, host->base + MCI_DMAC_STATUS); + spin_unlock_irqrestore(&host->lock, flags); + + if (((events & event_mask) == 0) && + ((dmac_evt_mask & dmac_events) == 0)) + goto irq_out; + + if (((events & event_mask) & MCI_RAW_INTS_CD) && + !(host->mmc->caps & MMC_CAP_NONREMOVABLE)) { + mod_timer(&host->hotplug_timer, + jiffies + usecs_to_jiffies(20000)); + dev_dbg(host->dev, + "sd status changed here ! status:[%d] [%s %d]", + readl(host->base + MCI_CARD_DETECT), + __func__, __LINE__); + + if ((events & event_mask) == MCI_RAW_INTS_CD) + goto irq_out; + } + + if (!mrq) { + if (events & MCI_RAW_INTS_HLE) { + dev_dbg(host->dev, "%s: MRQ=NULL and HW write locked, ", + __func__); + dev_dbg(host->dev, "events=%08x,event_mask=%08x\n", + events, event_mask); + } else { + dev_dbg(host->dev, "%s: MRQ=NULL events:%08X, ", + __func__, events); + dev_dbg(host->dev, "evt_mask=%08X,sd_status:%d\n", + event_mask, + readl(host->base + MCI_CARD_DETECT)); + } + goto irq_out; + } + + if ((dmac_events & dmac_err_ints_mask) || + (events & cmd_err_ints_mask)) { + dev_dbg(host->dev, "ERR:events:%x,mask:0x%x,dmac_evts:%x, ", + events, event_mask, dmac_events); + dev_dbg(host->dev, "dmac_mask:0x%x,cmd:%d\n", + dmac_evt_mask, mrq->cmd->opcode); + phytium_mci_err_irq(host, dmac_events & dmac_err_ints_mask, + events & cmd_err_ints_mask); + goto irq_out; + } + + if ((events & MCI_MASKED_INTS_DTO) && (events & MCI_MASKED_INTS_CMD)) { + phytium_mci_cmd_done(host, events, mrq, cmd); + phytium_mci_data_xfer_done(host, (events & data_ints_mask) | + (dmac_events & dmac_ints_mask), + mrq, data); + } else if (events & MCI_MASKED_INTS_CMD || + ((events & MCI_INT_MASK_HTO) && + (cmd->opcode == SD_SWITCH_VOLTAGE))) { + phytium_mci_cmd_done(host, events, mrq, cmd); + } else if (events & MCI_MASKED_INTS_DTO) { + phytium_mci_data_xfer_done(host, (events & data_ints_mask) | + (dmac_events & dmac_ints_mask), + mrq, data); + } + +irq_out: + return IRQ_HANDLED; +} + +static void phytium_mci_init_hw(struct phytium_mci_host *host) +{ + u32 val; + + sdr_set_bits(host->base + MCI_PWREN, MCI_PWREN_ENABLE); + sdr_set_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); + sdr_set_bits(host->base + MCI_UHS_REG_EXT, MCI_EXT_CLK_ENABLE); + sdr_clr_bits(host->base + MCI_UHS_REG, MCI_UHS_REG_VOLT); + + phytium_mci_reset_hw(host); + + if (host->mmc->caps & MMC_CAP_NONREMOVABLE) + sdr_set_bits(host->base + MCI_CARD_RESET, + MCI_CARD_RESET_ENABLE); + else + sdr_clr_bits(host->base + MCI_CARD_RESET, + MCI_CARD_RESET_ENABLE); + + writel(0, host->base + MCI_INT_MASK); + val = readl(host->base + MCI_RAW_INTS); + writel(val, host->base + MCI_RAW_INTS); + writel(0, host->base + MCI_DMAC_INT_ENA); + val = readl(host->base + MCI_DMAC_STATUS); + writel(val, host->base + MCI_DMAC_STATUS); + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE)) + writel(MCI_INT_MASK_CD, host->base + MCI_INT_MASK); + + sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_INT_ENABLE | + MCI_CNTRL_USE_INTERNAL_DMAC); + + writel(0xFFFFFFFF, host->base + MCI_TMOUT); + dev_info(host->dev, "init hardware done!"); + +} + +void phytium_mci_deinit_hw(struct phytium_mci_host *host) +{ + u32 val; + + sdr_clr_bits(host->base + MCI_PWREN, MCI_PWREN_ENABLE); + sdr_clr_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); + sdr_clr_bits(host->base + MCI_UHS_REG_EXT, MCI_EXT_CLK_ENABLE); + sdr_clr_bits(host->base + MCI_UHS_REG, MCI_UHS_REG_VOLT); + writel(0, host->base + MCI_INT_MASK); + val = readl(host->base + MCI_RAW_INTS); + writel(val, host->base + MCI_RAW_INTS); + writel(0, host->base + MCI_DMAC_INT_ENA); + val = readl(host->base + MCI_DMAC_STATUS); + writel(val, host->base + MCI_DMAC_STATUS); + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE)) + writel(MCI_INT_MASK_CD, host->base + MCI_INT_MASK); +} +EXPORT_SYMBOL_GPL(phytium_mci_deinit_hw); + +static void phytium_mci_adma_reset(struct phytium_mci_host *host) +{ + u32 bmod = readl(host->base + MCI_BUS_MODE); + + bmod |= MCI_BUS_MODE_SWR; + writel(bmod, host->base + MCI_BUS_MODE); +} + +static void phytium_mci_init_adma_table(struct phytium_mci_host *host, + struct phytium_mci_dma *dma) +{ + struct phytium_adma2_64_desc *adma_table = dma->adma_table; + dma_addr_t dma_addr; + int i; + + memset(adma_table, 0, + sizeof(struct phytium_adma2_64_desc) * MAX_BD_NUM); + + for (i = 0; i < (MAX_BD_NUM - 1); i++) { + dma_addr = dma->adma_addr + sizeof(*adma_table) * (i + 1); + adma_table[i].desc_lo = lower_32_bits(dma_addr); + adma_table[i].desc_hi = upper_32_bits(dma_addr); + adma_table[i].attribute = 0; + adma_table[i].NON1 = 0; + adma_table[i].len = 0; + adma_table[i].NON2 = 0; + } + + phytium_mci_adma_reset(host); +} + +static void phytium_mci_set_buswidth(struct phytium_mci_host *host, u32 width) +{ + u32 val; + + switch (width) { + case MMC_BUS_WIDTH_1: + val = MCI_BUS_1BITS; + break; + + case MMC_BUS_WIDTH_4: + val = MCI_BUS_4BITS; + break; + + case MMC_BUS_WIDTH_8: + val = MCI_BUS_8BITS; + break; + default: + val = MCI_BUS_4BITS; + break; + } + writel(val, host->base + MCI_CTYPE); + dev_dbg(host->dev, "Bus Width = %d, set value:0x%x\n", width, val); +} + +static void phytium_mci_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + + if (ios->timing == MMC_TIMING_MMC_DDR52 || + ios->timing == MMC_TIMING_UHS_DDR50) + sdr_set_bits(host->base + MCI_UHS_REG, MCI_UHS_REG_DDR); + else + sdr_clr_bits(host->base + MCI_UHS_REG, MCI_UHS_REG_DDR); + + phytium_mci_set_buswidth(host, ios->bus_width); + + switch (ios->power_mode) { + case MMC_POWER_UP: + set_bit(MCI_CARD_NEED_INIT, &host->flags); + writel(MCI_POWER_ON, host->base + MCI_PWREN); + break; + + case MMC_POWER_ON: + break; + + case MMC_POWER_OFF: + writel(MCI_POWER_OFF, host->base + MCI_PWREN); + break; + + default: + break; + } + phytium_mci_set_clk(host, ios); +} + +static void phytium_mci_ack_sdio_irq(struct mmc_host *mmc) +{ + unsigned long flags; + struct phytium_mci_host *host = mmc_priv(mmc); + + spin_lock_irqsave(&host->lock, flags); + __phytium_mci_enable_sdio_irq(host, 1); + spin_unlock_irqrestore(&host->lock, flags); +} + +static int phytium_mci_get_cd(struct mmc_host *mmc) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + u32 status; + + if (mmc->caps & MMC_CAP_NONREMOVABLE) + return 1; + + status = readl(host->base + MCI_CARD_DETECT); + + if ((status & 0x1) == 0x1) + return 0; + + return 1; +} + +static int +phytium_mci_ops_switch_volt(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + unsigned int is_voltage_180 = 0; + + is_voltage_180 = readl(host->base + MCI_UHS_REG); + if ((mmc->caps & MMC_CAP_NONREMOVABLE) && + (ios->signal_voltage != MMC_SIGNAL_VOLTAGE_180)) + return -EINVAL; + + if ((ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) && + (is_voltage_180 & 0x1)) + sdr_clr_bits(host->base + MCI_UHS_REG, MCI_UHS_REG_VOLT); + else if ((ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) && + (!(is_voltage_180 & 0x1))) + sdr_set_bits(host->base + MCI_UHS_REG, MCI_UHS_REG_VOLT); + else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_120) + return -EINVAL; + return 0; +} + +static void phytium_mci_hw_reset(struct mmc_host *mmc) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + u32 reset_flag; + + if (host->is_use_dma) { + reset_flag = MCI_CNTRL_FIFO_RESET | MCI_CNTRL_DMA_RESET; + phytium_mci_adma_reset(host); + sdr_set_bits(host->base + MCI_CNTRL, reset_flag); + } else { + reset_flag = MCI_CNTRL_FIFO_RESET; + sdr_set_bits(host->base + MCI_CNTRL, reset_flag); + } + + while (readl(host->base + MCI_CNTRL) & reset_flag) + cpu_relax(); + + sdr_clr_bits(host->base + MCI_CARD_RESET, MCI_CARD_RESET_ENABLE); + udelay(5); + sdr_set_bits(host->base + MCI_CARD_RESET, MCI_CARD_RESET_ENABLE); + usleep_range(200, 300); +} + +#ifdef CONFIG_PM_SLEEP +int phytium_mci_suspend(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct phytium_mci_host *host = mmc_priv(mmc); + + phytium_mci_deinit_hw(host); + return 0; +} +EXPORT_SYMBOL(phytium_mci_suspend); + +int phytium_mci_resume(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct phytium_mci_host *host = mmc_priv(mmc); + + phytium_mci_init_hw(host); + return 0; +} +EXPORT_SYMBOL(phytium_mci_resume); + +#endif + +#ifdef CONFIG_PM +int phytium_mci_runtime_suspend(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct phytium_mci_host *host = mmc_priv(mmc); + + phytium_mci_deinit_hw(host); + return 0; +} +EXPORT_SYMBOL(phytium_mci_runtime_suspend); + +int phytium_mci_runtime_resume(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct phytium_mci_host *host = mmc_priv(mmc); + + phytium_mci_init_hw(host); + return 0; +} +EXPORT_SYMBOL(phytium_mci_runtime_resume); + +#endif + +static struct mmc_host_ops phytium_mci_ops = { + .post_req = phytium_mci_post_req, + .pre_req = phytium_mci_pre_req, + .request = phytium_mci_ops_request, + .set_ios = phytium_mci_ops_set_ios, + .get_cd = phytium_mci_get_cd, + .enable_sdio_irq = phytium_mci_enable_sdio_irq, + .ack_sdio_irq = phytium_mci_ack_sdio_irq, + .card_busy = phytium_mci_card_busy, + .start_signal_voltage_switch = phytium_mci_ops_switch_volt, + .hw_reset = phytium_mci_hw_reset, +}; + +int phytium_mci_common_probe(struct phytium_mci_host *host) +{ + struct mmc_host *mmc = host->mmc; + struct device *dev = host->dev; + int uhs_reg_value = 0x502; + int ret; + + dma_set_mask(dev, DMA_BIT_MASK(64)); + dma_set_coherent_mask(dev, DMA_BIT_MASK(64)); + + timer_setup(&host->hotplug_timer, hotplug_timer_func, 0); + + mmc->f_min = MCI_F_MIN; + if (!mmc->f_max) + mmc->f_max = MCI_F_MAX; + + mmc->ops = &phytium_mci_ops; + mmc->ocr_avail_sdio = MMC_VDD_32_33 | MMC_VDD_33_34; + mmc->ocr_avail_sd = MMC_VDD_32_33 | MMC_VDD_33_34; + mmc->ocr_avail_mmc = MMC_VDD_165_195; + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195; + mmc->caps |= host->caps; + + if (mmc->caps & MMC_CAP_SDIO_IRQ) { + mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; + dev_dbg(host->dev, "%s %d: MMC_CAP_SDIO_IRQ\n", + __func__, __LINE__); + } + mmc->caps2 |= host->caps2; + if (host->is_use_dma) { + /* MMC core transfer sizes tunable parameters */ + mmc->max_segs = MAX_BD_NUM; + mmc->max_seg_size = 4 * 1024; + mmc->max_blk_size = 512; + mmc->max_req_size = 512 * 1024; + mmc->max_blk_count = mmc->max_req_size / 512; + host->dma.adma_table = dma_zalloc_coherent(host->dev, + MAX_BD_NUM * + sizeof(struct phytium_adma2_64_desc), + &host->dma.adma_addr, GFP_KERNEL); + if (!host->dma.adma_table) + return MCI_REALEASE_MEM; + + host->dma.desc_sz = ADMA2_64_DESC_SZ; + phytium_mci_init_adma_table(host, &host->dma); + } else { + mmc->max_segs = MAX_BD_NUM; + mmc->max_seg_size = 4 * 1024; + mmc->max_blk_size = 512; + mmc->max_req_size = 4 * 512; + mmc->max_blk_count = mmc->max_req_size / 512; + } + writel(MCI_SET_FIFOTH(0x2, 0x7, 0x100), host->base + MCI_FIFOTH); + writel(0x800001, host->base + MCI_CARD_THRCTL); + sdr_clr_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); + phytium_mci_update_external_clk(host, uhs_reg_value); + + spin_lock_init(&host->lock); + + phytium_mci_init_hw(host); + ret = devm_request_irq(host->dev, host->irq, phytium_mci_irq, + host->irq_flags, "phytium-mci", host); + + if (ret) + return ret; + + ret = mmc_add_host(mmc); + + if (ret) { + dev_err(host->dev, "%s %d: mmc add host!\n", + __func__, __LINE__); + return ret; + } + return 0; +} +EXPORT_SYMBOL(phytium_mci_common_probe); + +MODULE_DESCRIPTION("Phytium Multimedia Card Interface driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Cheng Quan "); diff --git a/drivers/mmc/host/phytium-mci.h b/drivers/mmc/host/phytium-mci.h new file mode 100644 index 0000000000000000000000000000000000000000..f94e628fd1d9263813224d37c8e7b048cb3b0dae --- /dev/null +++ b/drivers/mmc/host/phytium-mci.h @@ -0,0 +1,350 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Driver for Phytium Multimedia Card Interface + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_MCI_H +#define __PHYTIUM_MCI_H + +#include +#include +#include +#include +#include +#include +#include + +/*------------------------------------------------------*/ +/* Common Definition */ +/*------------------------------------------------------*/ +#define MAX_BD_NUM 128 +#define SD_BLOCK_SIZE 512 + +#define MCI_BUS_1BITS 0x0 +#define MCI_BUS_4BITS 0x1 +#define MCI_BUS_8BITS (0x1 << 16) + +#define MCI_SD_DRV_VALUE 0 +#define MCI_SD_SAMP_VALUE_MAX 0 +#define MCI_SD_SAMP_VALUE_MIN 0 + +#define MCI_TIMEOUT_CMD_VALUE 0xFFFFFFFF +#define MCI_POWER_ON 1 +#define MCI_POWER_OFF 0 + +#define MCI_PREPARE_FLAG (0x1 << 0) +#define MCI_ASYNC_FLAG (0x1 << 1) +#define MCI_MMAP_FLAG (0x1 << 2) + +#define MCI_CMD_TIMEOUT (HZ/10 * 50) /* 100ms x5 */ +#define MCI_DATA_TIMEOUT (HZ * 10) /* 1000ms x5 */ + +#define MCI_CMD_TYPE_ADTC 0x2 + +#define MCI_F_MIN 400000 +#define MCI_F_MAX 50000000 + +#define MCI_CLK 1200000000 +#define MCI_REALEASE_MEM 0x1 +#define MCI_MAX_FIFO_CNT 0x800 + +/* FIFOTH register defines */ +#define MCI_SET_FIFOTH(m, r, t) (((m) & 0x7) << 28 | \ + ((r) & 0xFFF) << 16 | ((t) & 0xFFF)) +/* Card read threshold */ +#define MCI_SET_THLD(v, x) (((v) & 0xFFF) << 16 | (x)) +#define MCI_CARD_WR_THR_EN BIT(2) +#define MCI_CARD_RD_THR_EN BIT(0) + +/*----------------------------------------------------------------------*/ +/* Register Offset */ +/*----------------------------------------------------------------------*/ +#define MCI_CNTRL 0x00 /* the controller config reg */ +#define MCI_PWREN 0x04 /* the power enable reg */ +#define MCI_CLKDIV 0x08 /* the clock divider reg */ +#define MCI_CLKENA 0x10 /* the clock enable reg */ +#define MCI_TMOUT 0x14 /* the timeout reg */ +#define MCI_CTYPE 0x18 /* the card type reg */ +#define MCI_BLKSIZ 0x1C /* the block size reg */ +#define MCI_BYTCNT 0x20 /* the byte count reg */ +#define MCI_INT_MASK 0x24 /* the interrupt mask reg */ +#define MCI_CMDARG 0x28 /* the command argument reg */ +#define MCI_CMD 0x2C /* the command reg */ +#define MCI_RESP0 0x30 /* the response reg0 */ +#define MCI_RESP1 0x34 /* the response reg1 */ +#define MCI_RESP2 0x38 /* the response reg2 */ +#define MCI_RESP3 0X3C /* the response reg3 */ +#define MCI_MASKED_INTS 0x40 /* the masked interrupt status reg */ +#define MCI_RAW_INTS 0x44 /* the raw interrupt status reg */ +#define MCI_STATUS 0x48 /* the status reg */ +#define MCI_FIFOTH 0x4C /* the FIFO threshold watermark reg */ +#define MCI_CARD_DETECT 0x50 /* the card detect reg */ +#define MCI_CARD_WRTPRT 0x54 /* the card write protect reg */ +#define MCI_CCLK_RDY 0x58 /* div is ready? 1:ready,0:not ready*/ +#define MCI_TRAN_CARD_CNT 0x5C /* transferred CIU card byte count reg */ +#define MCI_TRAN_FIFO_CNT 0x60 /* transferred host FIFO byte count reg*/ +#define MCI_DEBNCE 0x64 /* the debounce count reg */ +#define MCI_UID 0x68 /* the user ID reg */ +#define MCI_VID 0x6C /* the controller version ID reg */ +#define MCI_HWCONF 0x70 /* the hardware configuration reg */ +#define MCI_UHS_REG 0x74 /* the UHS-I reg */ +#define MCI_CARD_RESET 0x78 /* the card reset reg */ +#define MCI_BUS_MODE 0x80 /* the bus mode reg */ +#define MCI_DESC_LIST_ADDRL 0x88 /* descriptor list low base address reg */ +#define MCI_DESC_LIST_ADDRH 0x8C /* descriptor list high base address reg */ +#define MCI_DMAC_STATUS 0x90 /* the internal DMAC status reg */ +#define MCI_DMAC_INT_ENA 0x94 /* internal DMAC interrupt enable reg */ +#define MCI_CUR_DESC_ADDRL 0x98 /* current descriptor low address reg */ +#define MCI_CUR_DESC_ADDRH 0x9C /* current descriptor high address reg */ +#define MCI_CUR_BUF_ADDRL 0xA0 /* the current buffer low address reg */ +#define MCI_CUR_BUF_ADDRH 0xA4 /* the current buffer high address reg */ +#define MCI_CARD_THRCTL 0x100 /* the card threshold control reg */ +#define MCI_UHS_REG_EXT 0x108 /* the UHS register extension */ +#define MCI_EMMC_DDR_REG 0x10C /* the EMMC DDR reg */ +#define MCI_ENABLE_SHIFT 0x110 /* the enable phase shift reg */ +#define MCI_DATA 0x200 /* the data FIFO access */ + +/* Command register defines */ +#define MCI_CMD_START BIT(31) +#define MCI_CMD_USE_HOLD_REG BIT(29) +#define MCI_CMD_VOLT_SWITCH BIT(28) +#define MCI_CMD_CCS_EXP BIT(23) +#define MCI_CMD_CEATA_RD BIT(22) +#define MCI_CMD_UPD_CLK BIT(21) +#define MCI_CMD_INIT BIT(15) +#define MCI_CMD_STOP BIT(14) +#define MCI_CMD_PRV_DAT_WAIT BIT(13) +#define MCI_CMD_SEND_STOP BIT(12) +#define MCI_CMD_STRM_MODE BIT(11) +#define MCI_CMD_DAT_WR BIT(10) +#define MCI_CMD_DAT_EXP BIT(9) +#define MCI_CMD_RESP_CRC BIT(8) +#define MCI_CMD_RESP_LONG BIT(7) +#define MCI_CMD_RESP_EXP BIT(6) +#define MCI_CMD_INDX(n) ((n) & 0x1F) + +/*------------------------------------------------------*/ +/* Register Mask */ +/*------------------------------------------------------*/ +/* MCI_CNTRL mask */ +#define MCI_CNTRL_CONTROLLER_RESET (0x1 << 0) /* RW */ +#define MCI_CNTRL_FIFO_RESET (0x1 << 1) /* RW */ +#define MCI_CNTRL_DMA_RESET (0x1 << 2) /* RW */ +#define MCI_CNTRL_RES (0x1 << 3) /* */ +#define MCI_CNTRL_INT_ENABLE (0x1 << 4) /* RW */ +#define MCI_CNTRL_DMA_ENABLE (0x1 << 5) /* RW */ +#define MCI_CNTRL_READ_WAIT (0x1 << 6) /* RW */ +#define MCI_CNTRL_SEND_IRQ_RESPONSE (0x1 << 7) /* RW */ +#define MCI_CNTRL_ABORT_READ_DATA (0x1 << 8) /* RW */ +#define MCI_CNTRL_ENDIAN (0x1 << 11) /* RW */ +//#define MCI_CNTRL_CARD_VOLTAGE_A (0xF << 16) /* RW */ +//#define MCI_CNTRL_CARD_VOLTAGE_B (0xF << 20) /* RW */ +#define MCI_CNTRL_ENABLE_OD_PULLUP (0x1 << 24) /* RW */ +#define MCI_CNTRL_USE_INTERNAL_DMAC (0x1 << 25) /* RW */ + +/* MCI_PWREN mask */ +#define MCI_PWREN_ENABLE (0x1 << 0) /* RW */ + +/* MCI_CLKENA mask */ +#define MCI_CLKENA_CCLK_ENABLE (0x1 << 0) /* RW */ +#define MCI_CLKENA_CCLK_LOW_POWER (0x1 << 16) /* RW */ +#define MCI_EXT_CLK_ENABLE (0x1 << 1) + +/* MCI_INT_MASK mask */ +#define MCI_INT_MASK_CD (0x1 << 0) /* RW */ +#define MCI_INT_MASK_RE (0x1 << 1) /* RW */ +#define MCI_INT_MASK_CMD (0x1 << 2) /* RW */ +#define MCI_INT_MASK_DTO (0x1 << 3) /* RW */ +#define MCI_INT_MASK_TXDR (0x1 << 4) /* RW */ +#define MCI_INT_MASK_RXDR (0x1 << 5) /* RW */ +#define MCI_INT_MASK_RCRC (0x1 << 6) /* RW */ +#define MCI_INT_MASK_DCRC (0x1 << 7) /* RW */ +#define MCI_INT_MASK_RTO (0x1 << 8) /* RW */ +#define MCI_INT_MASK_DRTO (0x1 << 9) /* RW */ +#define MCI_INT_MASK_HTO (0x1 << 10) /* RW */ +#define MCI_INT_MASK_FRUN (0x1 << 11) /* RW */ +#define MCI_INT_MASK_HLE (0x1 << 12) /* RW */ +#define MCI_INT_MASK_SBE_BCI (0x1 << 13) /* RW */ +#define MCI_INT_MASK_ACD (0x1 << 14) /* RW */ +#define MCI_INT_MASK_EBE (0x1 << 15) /* RW */ +#define MCI_INT_MASK_SDIO (0x1 << 16) /* RW */ + +/* MCI_MASKED_INTS mask */ +#define MCI_MASKED_INTS_CD (0x1 << 0) /* RO */ +#define MCI_MASKED_INTS_RE (0x1 << 1) /* RO */ +#define MCI_MASKED_INTS_CMD (0x1 << 2) /* RO */ +#define MCI_MASKED_INTS_DTO (0x1 << 3) /* RO */ +#define MCI_MASKED_INTS_TXDR (0x1 << 4) /* RO */ +#define MCI_MASKED_INTS_RXDR (0x1 << 5) /* RO */ +#define MCI_MASKED_INTS_RCRC (0x1 << 6) /* RO */ +#define MCI_MASKED_INTS_DCRC (0x1 << 7) /* RO */ +#define MCI_MASKED_INTS_RTO (0x1 << 8) /* RO */ +#define MCI_MASKED_INTS_DRTO (0x1 << 9) /* RO */ +#define MCI_MASKED_INTS_HTO (0x1 << 10) /* RO */ +#define MCI_MASKED_INTS_FRUN (0x1 << 11) /* RO */ +#define MCI_MASKED_INTS_HLE (0x1 << 12) /* RO */ +#define MCI_MASKED_INTS_SBE_BCI (0x1 << 13) /* RO */ +#define MCI_MASKED_INTS_ACD (0x1 << 14) /* RO */ +#define MCI_MASKED_INTS_EBE (0x1 << 15) /* RO */ +#define MCI_MASKED_INTS_SDIO (0x1 << 16) /* RO */ + +/* MCI_RAW_INTS mask */ +#define MCI_RAW_INTS_CD (0x1 << 0) /* W1C */ +#define MCI_RAW_INTS_RE (0x1 << 1) /* W1C */ +#define MCI_RAW_INTS_CMD (0x1 << 2) /* W1C */ +#define MCI_RAW_INTS_DTO (0x1 << 3) /* W1C */ +#define MCI_RAW_INTS_TXDR (0x1 << 4) /* W1C */ +#define MCI_RAW_INTS_RXDR (0x1 << 5) /* W1C */ +#define MCI_RAW_INTS_RCRC (0x1 << 6) /* W1C */ +#define MCI_RAW_INTS_DCRC (0x1 << 7) /* W1C */ +#define MCI_RAW_INTS_RTO (0x1 << 8) /* W1C */ +#define MCI_RAW_INTS_DRTO (0x1 << 9) /* W1C */ +#define MCI_RAW_INTS_HTO (0x1 << 10) /* W1C */ +#define MCI_RAW_INTS_FRUN (0x1 << 11) /* W1C */ +#define MCI_RAW_INTS_HLE (0x1 << 12) /* W1C */ +#define MCI_RAW_INTS_SBE_BCI (0x1 << 13) /* W1C */ +#define MCI_RAW_INTS_ACD (0x1 << 14) /* W1C */ +#define MCI_RAW_INTS_EBE (0x1 << 15) /* W1C */ +#define MCI_RAW_INTS_SDIO (0x1 << 16) /* W1C */ + +/* MCI_STATUS mask */ +#define MCI_STATUS_FIFO_RX (0x1 << 0) /* RO */ +#define MCI_STATUS_FIFO_TX (0x1 << 1) /* RO */ +#define MCI_STATUS_FIFO_EMPTY (0x1 << 2) /* RO */ +#define MCI_STATUS_FIFO_FULL (0x1 << 3) /* RO */ +#define MCI_STATUS_CARD_STATUS (0x1 << 8) /* RO */ +#define MCI_STATUS_CARD_BUSY (0x1 << 9) /* RO */ +#define MCI_STATUS_DATA_BUSY (0x1 << 10) /* RO */ +#define MCI_STATUS_DMA_ACK (0x1 << 31) /* RO */ +#define MCI_STATUS_DMA_REQ (0x1 << 32) /* RO */ + +/* MCI_UHS_REG mask */ +#define MCI_UHS_REG_VOLT (0x1 << 0) /* RW */ +#define MCI_UHS_REG_DDR (0x1 << 16) /* RW */ + +/* MCI_CARD_RESET mask */ +#define MCI_CARD_RESET_ENABLE (0x1 << 0) /* RW */ + +/* MCI_BUS_MODE mask */ +#define MCI_BUS_MODE_SWR (0x1 << 0) /* RW */ +#define MCI_BUS_MODE_FB (0x1 << 1) /* RW */ +#define MCI_BUS_MODE_DE (0x1 << 7) /* RW */ + +/* MCI_DMAC_STATUS mask */ +#define MCI_DMAC_STATUS_TI (0x1 << 0) /* RW */ +#define MCI_DMAC_STATUS_RI (0x1 << 1) /* RW */ +#define MCI_DMAC_STATUS_FBE (0x1 << 2) /* RW */ +#define MCI_DMAC_STATUS_DU (0x1 << 4) /* RW */ +#define MCI_DMAC_STATUS_NIS (0x1 << 8) /* RW */ +#define MCI_DMAC_STATUS_AIS (0x1 << 9) /* RW */ + +/* MCI_DMAC_INT_ENA mask */ +#define MCI_DMAC_INT_ENA_TI (0x1 << 0) /* RW */ +#define MCI_DMAC_INT_ENA_RI (0x1 << 1) /* RW */ +#define MCI_DMAC_INT_ENA_FBE (0x1 << 2) /* RW */ +#define MCI_DMAC_INT_ENA_DU (0x1 << 4) /* RW */ +#define MCI_DMAC_INT_ENA_CES (0x1 << 5) /* RW */ +#define MCI_DMAC_INT_ENA_NIS (0x1 << 8) /* RW */ +#define MCI_DMAC_INT_ENA_AIS (0x1 << 9) /* RW */ + +/* MCI_CARD_THRCTL mask */ +#define MCI_CARD_THRCTL_CARDRD (0x1 << 0) /* RW */ +#define MCI_CARD_THRCTL_BUSY_CLR (0x1 << 1) /* RW */ +#define MCI_CARD_THRCTL_CARDWR (0x1 << 2) /* RW */ + +/* MCI_UHS_REG_EXT mask */ +#define MCI_UHS_REG_EXT_MMC_VOLT (0x1 << 0) /* RW */ +#define MCI_UHS_REG_EXT_CLK_ENA (0x1 << 1) /* RW */ + +/* MCI_EMMC_DDR_REG mask */ +#define MCI_EMMC_DDR_CYCLE (0x1 << 0) /* RW */ + +/*--------------------------------------*/ +/* Structure Type */ +/*--------------------------------------*/ +/* Maximum segments assuming a 512KiB maximum requisition */ +/* size and a minimum4KiB page size. */ +#define MCI_MAX_SEGS 128 +/* ADMA2 64-bit DMA descriptor size */ +#define ADMA2_64_DESC_SZ 32 + +/* Each descriptor can transfer up to 4KB of data in chained mode */ +/*ADMA2 64-bit descriptor.*/ +struct phytium_adma2_64_desc { + u32 attribute; +#define IDMAC_DES0_DIC BIT(1) +#define IDMAC_DES0_LD BIT(2) +#define IDMAC_DES0_FD BIT(3) +#define IDMAC_DES0_CH BIT(4) +#define IDMAC_DES0_ER BIT(5) +#define IDMAC_DES0_CES BIT(30) +#define IDMAC_DES0_OWN BIT(31) + u32 NON1; + u32 len; + u32 NON2; + u32 addr_lo; /* Lower 32-bits of Buffer Address Pointer 1*/ + u32 addr_hi; /* Upper 32-bits of Buffer Address Pointer 1*/ + u32 desc_lo; /* Lower 32-bits of Next Descriptor Address */ + u32 desc_hi; /* Upper 32-bits of Next Descriptor Address */ +} __packed __aligned(4); + +struct phytium_mci_dma { + struct scatterlist *sg; /* I/O scatter list */ + /* ADMA descriptor table, pointer to adma_table array */ + struct phytium_adma2_64_desc *adma_table; + /* Mapped ADMA descr. table, the physical address of adma_table array */ + dma_addr_t adma_addr; + unsigned int desc_sz; /* ADMA descriptor size */ +}; + +enum adtc_t { + COMMOM_ADTC = 0, + BLOCK_RW_ADTC = 1 +}; + +struct phytium_mci_host { + struct device *dev; + struct mmc_host *mmc; + u32 caps; + u32 caps2; + spinlock_t lock; + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + int error; + void __iomem *base; /* host base address */ + void *adma_table1; + dma_addr_t adma_addr1; + struct phytium_mci_dma dma_rx; /* dma channel */ + struct phytium_mci_dma dma_tx; /* dma channel */ + struct phytium_mci_dma dma; /* dma channel */ + u64 dma_mask; + bool vqmmc_enabled; + u32 *sg_virt_addr; + enum adtc_t adtc_type; /* 0:common adtc cmd; 1:block r/w adtc cmd;*/ + struct timer_list hotplug_timer; + struct delayed_work req_timeout; + int irq; /* host interrupt */ + u32 current_rca; /*the current rca value*/ + u32 current_ios_clk; + u32 is_use_dma; + u32 is_device_x100; + struct clk *src_clk; /* phytium_mci source clock */ + unsigned long clk_rate; + unsigned long clk_div; + unsigned long irq_flags; + unsigned long flags; +#define MCI_CARD_NEED_INIT 1 + +}; + +int phytium_mci_common_probe(struct phytium_mci_host *host); +void phytium_mci_deinit_hw(struct phytium_mci_host *host); +int phytium_mci_runtime_suspend(struct device *dev); +int phytium_mci_runtime_resume(struct device *dev); +int phytium_mci_resume(struct device *dev); +int phytium_mci_suspend(struct device *dev); + +#endif /* __PHYTIUM_MCI_HW_H */ diff --git a/drivers/mmc/host/phytium-sdci.c b/drivers/mmc/host/phytium-sdci.c new file mode 100644 index 0000000000000000000000000000000000000000..f2fdb8ecc2d13a60978b1e16c91361f33fac84e6 --- /dev/null +++ b/drivers/mmc/host/phytium-sdci.c @@ -0,0 +1,1492 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium SDCI dirver + * + * Copyright (C) 2019-2023, Phytium Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "phytium-sdci.h" + +static const u32 cmd_ints_mask = SDCI_SDCI_NORMAL_ISER_ECC_EN | + SDCI_SDCI_NORMAL_ISER_EEI_EN; +static const u32 data_ints_mask = SDCI_BD_ISER_ETRS_EN; +static const u32 err_ints_mask = SDCI_ERROR_ISER_ECTE_EN | + SDCI_ERROR_ISR_CCRCE_EN | + SDCI_ERROR_ISR_CIR_EN | + SDCI_ERROR_ISR_CNR_EN; +static const u32 caps = MMC_CAP_ERASE; + +static void hotplug_timer_func(struct timer_list *t); +static bool phytium_sdci_private_send_cmd(struct phytium_sdci_host *host, + u32 cmd, u32 resp_type, u32 arg); +static bool phytium_sdci_cmd_done(struct phytium_sdci_host *host, int events, + struct mmc_request *mrq, + struct mmc_command *cmd); +static bool phytium_sdci_data_xfer_done(struct phytium_sdci_host *host, + u32 events, struct mmc_request *mrq, + struct mmc_data *data); +static void phytium_sdci_cmd_next(struct phytium_sdci_host *host, + struct mmc_request *mrq, + struct mmc_command *cmd); + +static int phytium_sdci_cmd13_process(struct phytium_sdci_host *host, + struct mmc_request *mrq, + struct mmc_data *data, + u32 wait_timeout_ms, + u32 send_once_time_ms); + +static int phytium_sd_error(struct phytium_sdci_host *host) +{ + int temp; + + temp = readl(host->base + SDCI_NORMAL_ISR); + dev_err(host->dev, + "[%s %d]SDCI_NORMAL_ISR:%x\n", __func__, __LINE__, temp); + temp = readl(host->base + SDCI_BD_ISR); + temp = readl(host->base + SDCI_ERROR_ISR); + dev_err(host->dev, + "[%s %d]SDCI_ERROR_ISR:%x\n", __func__, __LINE__, temp); + temp = readl(host->base + SDCI_BD_ISR); + dev_err(host->dev, + "[%s %d]SDCI_BD_ISR:%x\n", __func__, __LINE__, temp); + temp = readl(host->base + SDCI_RESP0); + dev_err(host->dev, "[%s %d]SDCI_RESP0:%x\n", __func__, __LINE__, temp); + + return 0; +} + +static void sdr_set_bits(void __iomem *reg, u32 bs) +{ + u32 val; + + val = readl(reg); + val |= bs; + + writel(val, reg); +} + +static void sdr_clr_bits(void __iomem *reg, u32 bs) +{ + u32 val; + + val = readl(reg); + val &= ~bs; + + writel(val, reg); +} + +static void phytium_sdci_reset_hw(struct phytium_sdci_host *host) +{ + sdr_set_bits(host->base + SDCI_SOFTWARE, + SDCI_SOFTWARE_SRST); + sdr_clr_bits(host->base + SDCI_SOFTWARE, + SDCI_SOFTWARE_SRST); + while (!(readl(host->base + SDCI_STATUS) & SDCI_STATUS_IDIE)) + cpu_relax(); +} + +static void phytium_sdci_prepare_data(struct phytium_sdci_host *host, + struct mmc_request *mrq) +{ + struct mmc_data *data = mrq->data; + bool read; + + read = (data->flags & MMC_DATA_READ) != 0; + data->sg_count = dma_map_sg(host->dev, data->sg, data->sg_len, + read ? DMA_FROM_DEVICE : DMA_TO_DEVICE); +} + +static void phytium_sdci_unprepare_data(struct phytium_sdci_host *host, + struct mmc_request *mrq) +{ + bool read; + struct mmc_data *data = mrq->data; + + read = (data->flags & MMC_DATA_READ) != 0; + dma_unmap_sg(host->dev, data->sg, data->sg_len, + read ? DMA_FROM_DEVICE : DMA_TO_DEVICE); +} + +static void phytium_sdci_set_clk(struct phytium_sdci_host *host, + struct mmc_ios *ios) +{ + unsigned long clk_rate; + u32 div = 0xffffffff, div_reg; + + if (ios->clock) { + clk_rate = host->clk_rate; + div = ((clk_rate / (2 * ios->clock)) - 1); + div_reg = readl(host->base + SDCI_CLOCK_D); + if (div_reg == div) + return; + writel(div, host->base + SDCI_CLOCK_D); + writel(0, host->base + SDCI_SD_DRV); + writel(5, host->base + SDCI_SD_SAMP); + + sdr_set_bits(host->base + SDCI_SOFTWARE, SDCI_SOFTWARE_SRST); + sdr_clr_bits(host->base + SDCI_SOFTWARE, SDCI_SOFTWARE_SRST); + while (!(readl(host->base + SDCI_STATUS) & SDCI_STATUS_IDIE)) + cpu_relax(); + dev_dbg(host->dev, "host->clk_rate: %ld, ios->clock: %d\n", + host->clk_rate, ios->clock); + } +} + + +static inline u32 phytium_sdci_cmd_find_resp(struct phytium_sdci_host *host, + struct mmc_request *mrq, + struct mmc_command *cmd) +{ + u32 resp; + + switch (mmc_resp_type(cmd)) { + case MMC_RSP_R1: + resp = 0x2; + break; + case MMC_RSP_R1B: + resp = 0x2; + break; + case MMC_RSP_R2: + resp = 0x1; + break; + case MMC_RSP_R3: + resp = 0x3; + break; + case MMC_RSP_NONE: + default: + resp = 0x0; + break; + } + + return resp; +} + +static inline +u32 phytium_sdci_cmd_prepare_raw_cmd(struct phytium_sdci_host *host, + struct mmc_request *mrq, struct mmc_command *cmd) +{ + /* + * rawcmd : + * trty << 14 | opcode << 8 | cmdw << 6 | cice << 4 | crce << 3 | resp + */ + u32 resp, rawcmd; + u32 opcode = cmd->opcode; + + resp = phytium_sdci_cmd_find_resp(host, mrq, cmd); + rawcmd = ((opcode << 8) | resp); + + if ((cmd->flags & MMC_CMD_MASK) == MMC_CMD_ADTC) + rawcmd = (rawcmd | (SDCI_CMD_TYPE_ADTC << 14)); + + return rawcmd; +} + +static void +phytsd_unexpected_error_handler(struct phytium_sdci_host *host, + struct mmc_request *mrq, + struct mmc_data *data, + int err_type) +{ + unsigned long flags; + int error; + + spin_lock_irqsave(&host->lock, flags); + host->mrq = NULL; + host->cmd = NULL; + host->data = NULL; + spin_unlock_irqrestore(&host->lock, flags); + + if (err_type & ERR_CARD_ABSENT) { + host->mmc->detect_change = 1; + dev_dbg(host->dev, + "SD is absent when send cmd:%d\n", mrq->cmd->opcode); + } + + switch (err_type) { + case ERR_CARD_ABSENT: + error = -ENOMEDIUM; + break; + case ERR_TIMEOUT: + error = -ETIMEDOUT; + break; + case ERR_CMD_RESPONED: + error = -EIO; + break; + default: + error = -ETIMEDOUT; + break; + } + + if (data) { + data->error = error; + phytium_sdci_unprepare_data(host, mrq); + + if ((data->flags & MMC_DATA_READ) == MMC_DATA_READ || + (data->flags & MMC_DATA_WRITE) == MMC_DATA_WRITE) + phytium_sdci_data_xfer_done(host, SDCI_BD_ISR_TRS_R, + mrq, data); + } else { + mrq->cmd->error = error; + } + + mmc_request_done(host->mmc, mrq); +} + +static bool phytium_sdci_start_data(struct phytium_sdci_host *host, + struct mmc_request *mrq, + struct mmc_command *cmd, struct mmc_data *data) +{ + bool read, res; + u32 sg_dma_addrh, sg_dma_addrl; + u32 sd_block_addrh, sd_block_addrl; + u32 temp, timeout, sd_status; + u32 block_cnt = 0; + u32 sd_block_addr = cmd->arg; + u32 private_cmd, resp_type, arg; + u32 j, dma_len; + unsigned long deadline_time; + dma_addr_t dma_address; + struct scatterlist *sg; + int ret; + + WARN_ON(host->cmd); + host->cmd = cmd; + + WARN_ON(host->data); + host->data = data; + read = data->flags & MMC_DATA_READ; + + for_each_sg(data->sg, sg, data->sg_count, j) { + writel(0, host->base + SDCI_COMMAND); + + dma_address = sg_dma_address(sg); + sg_dma_addrh = (u32) (dma_address >> 32); + sg_dma_addrl = (u32) dma_address; + + dma_len = sg_dma_len(sg); + block_cnt = (dma_len / SD_BLOCK_SIZE); + + sd_block_addrh = 0; + sd_block_addrl = sd_block_addr; + + sdr_set_bits(host->base + SDCI_SOFTWARE, SDCI_SOFTWARE_BDRST); + sdr_clr_bits(host->base + SDCI_SOFTWARE, SDCI_SOFTWARE_BDRST); + writel(block_cnt, host->base + SDCI_BLK_CNT); + + if ((mrq->data->flags & MMC_DATA_READ) == MMC_DATA_READ) { + writel(sg_dma_addrl, host->base + SDCI_BD_RX); + writel(sg_dma_addrh, host->base + SDCI_BD_RX); + writel(sd_block_addrl, host->base + SDCI_BD_RX); + writel(sd_block_addrh, host->base + SDCI_BD_RX); + timeout = 100 * block_cnt; + } else { + timeout = 250 * block_cnt; + ret = phytium_sdci_cmd13_process(host, mrq, data, + timeout, 1); + if (ret != SDCI_CMD13_OK) + return false; + + writel(sg_dma_addrl, host->base + SDCI_BD_TX); + writel(sg_dma_addrh, host->base + SDCI_BD_TX); + writel(sd_block_addrl, host->base + SDCI_BD_TX); + writel(sd_block_addrh, host->base + SDCI_BD_TX); + } + + deadline_time = jiffies + msecs_to_jiffies(timeout); + + temp = readl(host->base + SDCI_BD_ISR); + if ((mrq->data->flags & MMC_DATA_READ) == MMC_DATA_READ) { + while ((temp & SDCI_BD_ISR_TRS_R) != + SDCI_BD_ISR_TRS_R) { + sd_status = readl(host->base + SDCI_STATUS); + if (sd_status & SDCI_STATUS_CDSL) { + phytsd_unexpected_error_handler(host, + mrq, data, + ERR_CARD_ABSENT); + if (temp & SDCI_BD_ISR_DAIS) + writel(1, host->base + + SDCI_BD_ISR); + return false; + } + + temp = readl(host->base + SDCI_BD_ISR); + if (time_after(jiffies, deadline_time)) { + phytsd_unexpected_error_handler(host, + mrq, data, + ERR_TIMEOUT); + dev_err(host->dev, "Read Data timeout"); + dev_err(host->dev, "jiffies:0x%lx\n", + jiffies); + dev_err(host->dev, "dt_jiffies:0x%lx\n", + jiffies - deadline_time); + dev_err(host->dev, "BD_isr_reg:0x%x\n", + temp); + dev_err(host->dev, + "cmd:%d, REG_D0:0x%x\n", + cmd->opcode, readl(host->base + + SDCI_STATUS)); + + return false; + } + } + } else { + while ((temp & SDCI_BD_ISR_TRS_W) != + SDCI_BD_ISR_TRS_W) { + sd_status = readl(host->base + SDCI_STATUS); + if (sd_status & SDCI_STATUS_CDSL) { + phytsd_unexpected_error_handler(host, + mrq, data, + ERR_CARD_ABSENT); + dev_err(host->dev, + "[%s][%d]: Card absent !\n", + __func__, __LINE__); + dev_err(host->dev, "cmd(%d)\n", + mrq->cmd->opcode); + return false; + } + + temp = readl(host->base + SDCI_BD_ISR); + if (time_after(jiffies, deadline_time)) { + phytsd_unexpected_error_handler(host, + mrq, data, + ERR_TIMEOUT); + dev_err(host->dev, + "Write Date timeout\n"); + dev_err(host->dev, + "jiffies:0x%lx\n", jiffies); + dev_err(host->dev, "dt_jiffies:0x%lx\n", + jiffies - deadline_time); + dev_err(host->dev, "BD_isr_reg:0x%x\n", + temp); + return false; + } + } + } + writel(1, host->base + SDCI_BD_ISR); + writel(1, host->base + SDCI_NORMAL_ISR); + sd_block_addr = sd_block_addr + block_cnt; + + if (j < (data->sg_count - 1) && 1 < block_cnt) { + private_cmd = MMC_STOP_TRANSMISSION; + resp_type = 0x2; + arg = 0; + res = phytium_sdci_private_send_cmd(host, private_cmd, + resp_type, arg); + if (!res) { + sd_status = readl(host->base + SDCI_STATUS); + if (sd_status & SDCI_STATUS_CDSL) { + phytsd_unexpected_error_handler(host, + mrq, data, + ERR_CARD_ABSENT); + writel(1, host->base + SDCI_BD_ISR); + dev_err(host->dev, + "[%s][%d]:Card absent !\n", + __func__, __LINE__); + dev_err(host->dev, "private_cmd(%d)\n", + private_cmd); + } else { + phytsd_unexpected_error_handler(host, + mrq, data, + ERR_CMD_RESPONED); + dev_err(host->dev, + "[%s][%d] cmd(%d) errored\n", + __func__, __LINE__, + mrq->cmd->opcode); + phytium_sd_error(host); + } + writel(1, host->base + SDCI_NORMAL_ISR); + return false; + } + writel(1, host->base + SDCI_NORMAL_ISR); + } + } + + host->is_multi_rw_only_one_blkcnt = false; + + if ((cmd->opcode == MMC_READ_MULTIPLE_BLOCK && block_cnt == 1) || + (cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK && block_cnt == 1)) + host->is_multi_rw_only_one_blkcnt = true; + + phytium_sdci_cmd_done(host, SDCI_NORMAL_ISR_CC, mrq, cmd); + if ((mrq->data->flags & MMC_DATA_READ) == MMC_DATA_READ) + phytium_sdci_data_xfer_done(host, SDCI_BD_ISR_TRS_R, + mrq, data); + else + phytium_sdci_data_xfer_done(host, SDCI_BD_ISR_TRS_W, + mrq, data); + + return true; +} + +static int phytium_sdci_auto_cmd_done(struct phytium_sdci_host *host, + int events, struct mmc_command *cmd) +{ + u32 *rsp = cmd->resp; + + rsp[0] = readl(host->base + SDCI_RESP0); + + if (events & SDCI_NORMAL_ISR_CC) + cmd->error = 0; + else { + phytium_sdci_reset_hw(host); + dev_err(host->dev, + "%s: AUTO_CMD%d arg=%08X; rsp %08X; cmd_error=%d\n", + __func__, cmd->opcode, cmd->arg, rsp[0], cmd->error); + } + + return cmd->error; +} + +static void phytium_sdci_track_cmd_data(struct phytium_sdci_host *host, + struct mmc_command *cmd, + struct mmc_data *data) +{ + if (host->error) + dev_dbg(host->dev, "%s: cmd=%d arg=%08X; host->error=0x%08X\n", + __func__, cmd->opcode, cmd->arg, host->error); +} + +static void phytium_sdci_request_done(struct phytium_sdci_host *host, + struct mmc_request *mrq) +{ + unsigned long flags; + + dev_dbg(host->dev, + "%s_%d:mrq->cmd->opcode:%d, ", + __func__, __LINE__, mrq->cmd->opcode); + dev_dbg(host->dev, "mrq->cmd->arg:0x%x resp 0x%x 0x%x 0x%x 0x%x\n", + mrq->cmd->arg, mrq->cmd->resp[0], mrq->cmd->resp[1], + mrq->cmd->resp[2], mrq->cmd->resp[3]); + + spin_lock_irqsave(&host->lock, flags); + host->mrq = NULL; + spin_unlock_irqrestore(&host->lock, flags); + + phytium_sdci_track_cmd_data(host, mrq->cmd, mrq->data); + if (mrq->data) + phytium_sdci_unprepare_data(host, mrq); + mmc_request_done(host->mmc, mrq); +} + +static bool +phytium_sdci_auto_command_done(struct phytium_sdci_host *host, int events, + struct mmc_request *mrq, struct mmc_command *cmd) +{ + u32 *rsp = cmd->resp; + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + host->cmd = NULL; + spin_unlock_irqrestore(&host->lock, flags); + + sdr_clr_bits(host->base + SDCI_NORMAL_ISER, cmd_ints_mask); + + rsp[0] = 0x900; + phytium_sdci_request_done(host, mrq); + return true; +} + +/* returns true if command is fully handled; returns false otherwise */ +static bool phytium_sdci_cmd_done(struct phytium_sdci_host *host, int events, + struct mmc_request *mrq, + struct mmc_command *cmd) +{ + bool done = false; + bool sbc_error; + unsigned long flags; + u32 *rsp = cmd->resp; + + if (mrq->sbc && cmd == mrq->cmd && + (events & SDCI_NORMAL_ISR_CC)) + phytium_sdci_auto_cmd_done(host, events, mrq->sbc); + + sbc_error = mrq->sbc && mrq->sbc->error; + + if (!sbc_error && !(events & (SDCI_NORMAL_ISR_CC | + SDCI_NORMAL_ISR_CR | + SDCI_NORMAL_ISR_TIMEOUT))) + return done; + + spin_lock_irqsave(&host->lock, flags); + done = !host->cmd; + host->cmd = NULL; + spin_unlock_irqrestore(&host->lock, flags); + + if (done) + return true; + + sdr_clr_bits(host->base + SDCI_NORMAL_ISER, cmd_ints_mask); + + if (cmd->flags & MMC_RSP_PRESENT) { + if (cmd->flags & MMC_RSP_136) { + rsp[0] = readl(host->base + SDCI_RESP0); + rsp[1] = readl(host->base + SDCI_RESP1); + rsp[2] = readl(host->base + SDCI_RESP2); + rsp[3] = readl(host->base + SDCI_RESP3); + } else { + rsp[0] = readl(host->base + SDCI_RESP0); + } + + if (cmd->opcode == SD_SEND_RELATIVE_ADDR) + host->current_rca = rsp[0] & 0xFFFF0000; + } + + if (!sbc_error && + !(events & SDCI_NORMAL_ISR_CC) && + (events & SDCI_NORMAL_ISR_TIMEOUT)) + cmd->error = -ETIMEDOUT; + + if (cmd->error) + dev_dbg(host->dev, + "%s: cmd=%d arg=%08X; rsp %08X; cmd_error=%d\n", + __func__, cmd->opcode, cmd->arg, rsp[0], + cmd->error); + + phytium_sdci_cmd_next(host, mrq, cmd); + + return true; +} + +static bool set_databus_width(struct phytium_sdci_host *host) +{ + bool res; + u32 cmd, resp_type, arg; + + cmd = SD_APP_SET_BUS_WIDTH; + resp_type = 0x2; + arg = 0x2; + res = phytium_sdci_private_send_cmd(host, cmd, resp_type, arg); + if (!res) + return false; + + cmd = MMC_APP_CMD; + resp_type = 0x2; + arg = host->current_rca; + res = phytium_sdci_private_send_cmd(host, cmd, resp_type, arg); + if (!res) + return false; + + return true; +} + + +static void phytium_sdci_start_command(struct phytium_sdci_host *host, + struct mmc_request *mrq, + struct mmc_command *cmd) +{ + u32 rawcmd; + struct mmc_data *data = mrq->data; + dma_addr_t dma_adtc_buf; + u32 dma_bufh, dma_bufl; + u32 block_cnt = 0; + + WARN_ON(host->cmd); + host->cmd = cmd; + + cmd->error = 0; + rawcmd = phytium_sdci_cmd_prepare_raw_cmd(host, mrq, cmd); + if (cmd->opcode == MMC_STOP_TRANSMISSION || + cmd->opcode == MMC_SEND_STATUS) + writel(1, host->base + SDCI_ERROR_ISR); + sdr_set_bits(host->base + SDCI_NORMAL_ISER, cmd_ints_mask); + writel(rawcmd, host->base + SDCI_COMMAND); + + if ((cmd->flags & MMC_CMD_MASK) == MMC_CMD_ADTC) { + WARN_ON(host->data); + host->data = data; + + dma_adtc_buf = host->dma_rx.bd_addr; + dma_bufh = (u32) (dma_adtc_buf >> 32); + dma_bufl = (u32) dma_adtc_buf; + block_cnt = mrq->data->blocks; + sdr_set_bits(host->base + SDCI_BD_ISER, data_ints_mask); + writel(block_cnt, host->base + SDCI_BLK_CNT); + + if ((mrq->data->flags & MMC_DATA_READ) == MMC_DATA_READ) { + writel(dma_bufl, host->base + SDCI_BD_RX); + writel(dma_bufh, host->base + SDCI_BD_RX); + writel(cmd->arg, host->base + SDCI_BD_RX); + writel(0, host->base + SDCI_BD_RX); + } else { + writel(dma_bufl, host->base + SDCI_BD_TX); + writel(dma_bufh, host->base + SDCI_BD_TX); + writel(cmd->arg, host->base + SDCI_BD_TX); + writel(0, host->base + SDCI_BD_TX); + } + } else { + writel(cmd->arg, host->base + SDCI_ARGUMENT); + } +} + +static void phytium_sdci_cmd_next(struct phytium_sdci_host *host, + struct mmc_request *mrq, + struct mmc_command *cmd) +{ + if (cmd->error || (mrq->sbc && mrq->sbc->error)) + phytium_sdci_request_done(host, mrq); + else if (cmd == mrq->sbc) + phytium_sdci_start_command(host, mrq, mrq->cmd); + else if (!cmd->data) + phytium_sdci_request_done(host, mrq); +} + +static int phytium_sdci_cmd13_process(struct phytium_sdci_host *host, + struct mmc_request *mrq, + struct mmc_data *data, + u32 wait_timeout_ms, + u32 send_once_time_ms) +{ + u32 private_cmd, resp_type, arg, temp, sd_status; + unsigned long deadline_time; + bool res; + + deadline_time = jiffies + msecs_to_jiffies(wait_timeout_ms); + + do { + private_cmd = MMC_SEND_STATUS; + resp_type = 0x2; + arg = host->current_rca; + + res = phytium_sdci_private_send_cmd(host, private_cmd, + resp_type, arg); + if (!res) { + sd_status = readl(host->base + SDCI_STATUS); + if (sd_status & SDCI_STATUS_CDSL) { + phytsd_unexpected_error_handler(host, + mrq, data, + ERR_CARD_ABSENT); + dev_err(host->dev, + "[%s][%d] Card absent! ", + __func__, __LINE__); + dev_err(host->dev, "private_cmd(%d)\n", + private_cmd); + } else { + phytsd_unexpected_error_handler(host, mrq, + data, + ERR_CMD_RESPONED); + + dev_err(host->dev, + "[%s][%d] private_cmd(%d) errored\n", + __func__, __LINE__, private_cmd); + phytium_sd_error(host); + } + writel(1, host->base + SDCI_BD_ISR); + return SDCI_CMD13_FAILED; + } + + temp = readl(host->base + SDCI_RESP0); + + if (time_after(jiffies, deadline_time)) { + + if (mrq->cmd->opcode == MMC_SEND_STATUS) + return SDCI_CMD13_OK; + + dev_err(host->dev, "SD card is not in transfer mode, "); + dev_err(host->dev, "timeout:%d,rsp[0]:%x\n", + wait_timeout_ms, temp); + + phytsd_unexpected_error_handler(host, mrq, data, + ERR_TIMEOUT); + phytium_sd_error(host); + return SDCI_CMD13_FAILED; + } + + writel(1, host->base + SDCI_NORMAL_ISR); + + if (CARD_TRAN_STATE != (temp & CARD_CURRENT_STATE) && + send_once_time_ms) + mdelay(send_once_time_ms); + + } while (CARD_TRAN_STATE != (temp & CARD_CURRENT_STATE)); + + return SDCI_CMD13_OK; +} + +static void phytium_sdci_ops_request(struct mmc_host *mmc, + struct mmc_request *mrq) +{ + struct phytium_sdci_host *host = mmc_priv(mmc); + unsigned long flags; + bool res; + u32 status_sd; + int res_cmd13; + + host->error = 0; + WARN_ON(host->mrq); + host->mrq = mrq; + + dev_dbg(host->dev, "%s: mrq->cmd->opcode:%d, mrq->cmd->arg:0x%x\n", + __func__, mrq->cmd->opcode, mrq->cmd->arg); + + if (mrq->cmd->opcode == MMC_SEND_STATUS && + (mrq->cmd->flags & MMC_CMD_MASK) != MMC_CMD_ADTC) { + u32 status = readl(host->base + SDCI_STATUS); + + if (status & SDCI_STATUS_CDSL) { + phytsd_unexpected_error_handler(host, mrq, NULL, + ERR_CARD_ABSENT); + return; + } + + res_cmd13 = phytium_sdci_cmd13_process(host, mrq, NULL, 400, 5); + if (res_cmd13 == SDCI_CMD13_FAILED) + return; + } else if (mrq->cmd->opcode == MMC_STOP_TRANSMISSION) { + status_sd = readl(host->base + SDCI_STATUS); + if (status_sd & SDCI_STATUS_CDSL) { + phytsd_unexpected_error_handler(host, mrq, NULL, + ERR_CARD_ABSENT); + return; + } + } + + if (mrq->data) { + phytium_sdci_prepare_data(host, mrq); + if (mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK || + mrq->cmd->opcode == MMC_READ_SINGLE_BLOCK || + mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || + mrq->cmd->opcode == MMC_WRITE_BLOCK) { + host->adtc_type = BLOCK_RW_ADTC; + phytium_sdci_start_data(host, mrq, + mrq->cmd, mrq->data); + return; + } + host->adtc_type = COMMOM_ADTC; + } + + if (mrq->cmd->opcode == SD_IO_RW_DIRECT || + mrq->cmd->opcode == SD_IO_SEND_OP_COND) { + spin_lock_irqsave(&host->lock, flags); + host->mrq = NULL; + host->cmd = NULL; + spin_unlock_irqrestore(&host->lock, flags); + mrq->cmd->error = -EINVAL; + mmc_request_done(host->mmc, mrq); + + return; + } + + if (mrq->cmd->opcode == SD_APP_SEND_SCR) { + res = set_databus_width(host); + if (!res) { + phytsd_unexpected_error_handler(host, mrq, + NULL, ERR_CMD_RESPONED); + return; + } + } + + /* if SBC is required, we have HW option and SW option. + * if HW option is enabled, and SBC does not have "special" flags, + * use HW option, otherwise use SW option + */ + if (mrq->sbc && + (!mmc_card_mmc(mmc->card) || (mrq->sbc->arg & 0xFFFF0000))) + phytium_sdci_start_command(host, mrq, mrq->sbc); + else + phytium_sdci_start_command(host, mrq, mrq->cmd); +} + +static void phytium_sdci_data_xfer_next(struct phytium_sdci_host *host, + struct mmc_request *mrq, + struct mmc_data *data) +{ + if (mmc_op_multi(mrq->cmd->opcode) && + mrq->stop && !mrq->stop->error && + !mrq->sbc && host->is_multi_rw_only_one_blkcnt) { + host->is_multi_rw_only_one_blkcnt = false; + phytium_sdci_auto_command_done(host, SDCI_NORMAL_ISR_CC, mrq, + mrq->stop); + } else if (mmc_op_multi(mrq->cmd->opcode) && + mrq->stop && !mrq->stop->error && + !mrq->sbc) + phytium_sdci_start_command(host, mrq, mrq->stop); + else + phytium_sdci_request_done(host, mrq); +} + +static inline void get_data_buffer(struct mmc_data *data, + u32 *bytes, u32 **pointer) +{ + struct scatterlist *sg; + + sg = &data->sg[0]; + *bytes = sg->length; + *pointer = sg_virt(sg); +} + +static bool phytium_sdci_data_xfer_done(struct phytium_sdci_host *host, + u32 events, struct mmc_request *mrq, + struct mmc_data *data) +{ + struct mmc_command *stop = data->stop; + unsigned long flags; + bool done; + unsigned int check_data; + u32 sg_length, i; + u32 *sg_virt_addr; + + check_data = events & (SDCI_BD_ISR_TRS_R | SDCI_BD_ISR_TRS_W | + SDCI_BD_ISR_EDTE); + + spin_lock_irqsave(&host->lock, flags); + done = !host->data; + if (check_data) + host->data = NULL; + spin_unlock_irqrestore(&host->lock, flags); + + if (done) + return true; + + if (check_data || (stop && stop->error)) { + sdr_clr_bits(host->base + SDCI_BD_ISER, data_ints_mask); + dev_dbg(host->dev, "DMA stop\n"); + + if (((events & SDCI_BD_ISR_TRS_R) || + (events & SDCI_BD_ISR_TRS_W)) && + (!stop || !stop->error)) { + if ((mrq->cmd->flags & MMC_CMD_MASK) == MMC_CMD_ADTC && + (host->adtc_type == COMMOM_ADTC)) { + get_data_buffer(data, &sg_length, + &host->sg_virt_addr); + sg_virt_addr = host->sg_virt_addr; + + for (i = 0; i < (sg_length/4); i++) { + *sg_virt_addr = host->dma_rx.buf[i]; + sg_virt_addr++; + } + } + data->bytes_xfered = data->blocks * data->blksz; + } else { + dev_dbg(host->dev, "interrupt events: %x\n", events); + phytium_sdci_reset_hw(host); + data->bytes_xfered = 0; + dev_dbg(host->dev, "%s: cmd=%d; blocks=%d", + __func__, mrq->cmd->opcode, data->blocks); + dev_dbg(host->dev, "data_error=%d xfer_size=%d\n", + (int)data->error, data->bytes_xfered); + } + + phytium_sdci_data_xfer_next(host, mrq, data); + done = true; + } + + return done; +} + + +static int phytium_sdci_card_busy(struct mmc_host *mmc) +{ + struct phytium_sdci_host *host = mmc_priv(mmc); + u32 status; + + /* check if any pin between dat[0:3] is low */ + status = readl(host->base + SDCI_STATUS); + if (((status >> 20) & 0xf) != 0xf) + return 1; + + return 0; +} + +static void phytium_sdci_request_timeout(struct work_struct *work) +{ + struct phytium_sdci_host *host; + + host = container_of(work, struct phytium_sdci_host, req_timeout.work); + dev_err(host->dev, "%s: aborting cmd/data/mrq\n", __func__); + if (host->mrq) { + dev_err(host->dev, "%s: aborting mrq=%p cmd=%d\n", __func__, + host->mrq, host->mrq->cmd->opcode); + if (host->cmd) { + dev_err(host->dev, "%s: aborting cmd=%d\n", + __func__, host->cmd->opcode); + phytium_sdci_cmd_done(host, SDCI_NORMAL_ISR_TIMEOUT, + host->mrq, host->cmd); + } else if (host->data) { + dev_err(host->dev, "%s: abort data: cmd%d; %d blocks\n", + __func__, host->mrq->cmd->opcode, + host->data->blocks); + phytium_sdci_data_xfer_done(host, SDCI_BD_ISR_EDTE, + host->mrq, host->data); + } + } +} + +static void hotplug_timer_func(struct timer_list *t) +{ + struct phytium_sdci_host *host; + u32 status; + + host = from_timer(host, t, hotplug_timer); + if (!host) + dev_err(host->dev, "%s: Not find host!\n", __func__); + status = readl(host->base + SDCI_STATUS); + + if (status & SDCI_STATUS_CDSL) { /* card absent */ + if (host->mmc->card) { + cancel_delayed_work(&host->mmc->detect); + mmc_detect_change(host->mmc, + msecs_to_jiffies(100)); + } + } else { /* card insert */ + cancel_delayed_work(&host->mmc->detect); + mmc_detect_change(host->mmc, msecs_to_jiffies(200)); + } +} + +static irqreturn_t phytium_sdci_irq(int irq, void *dev_id) +{ + struct phytium_sdci_host *host = (struct phytium_sdci_host *) dev_id; + unsigned long flags; + struct mmc_request *mrq; + struct mmc_command *cmd; + u32 events; + + if (!host) + return IRQ_NONE; + + spin_lock_irqsave(&host->lock, flags); + events = readl(host->base + SDCI_NORMAL_ISR); + /* clear interrupts */ + writel(1, host->base + SDCI_NORMAL_ISR); + + mrq = host->mrq; + cmd = host->cmd; + spin_unlock_irqrestore(&host->lock, flags); + + if (events & (SDCI_NORMAL_ISR_CR | SDCI_NORMAL_ISR_CI)) { + mod_timer(&host->hotplug_timer, + jiffies + usecs_to_jiffies(30000)); + goto irq_out; + } + + if (!(events & cmd_ints_mask)) + goto irq_out; + + if (!mrq) { + dev_err(host->dev, "%s: MRQ=NULL; events=%08X\n", + __func__, events); + WARN_ON(1); + goto irq_out; + } + + dev_dbg(host->dev, "%s: events=%08X\n", __func__, events); + + if (cmd) + phytium_sdci_cmd_done(host, events, mrq, cmd); + +irq_out: + return IRQ_HANDLED; +} + +static irqreturn_t phytium_sdci_dma_irq(int irq, void *dev_id) +{ + struct phytium_sdci_host *host = (struct phytium_sdci_host *) dev_id; + unsigned long flags; + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + u32 events; + + spin_lock_irqsave(&host->lock, flags); + events = readl(host->base + SDCI_BD_ISR); + writel(1, host->base + SDCI_BD_ISR); + + mrq = host->mrq; + cmd = host->cmd; + data = host->data; + spin_unlock_irqrestore(&host->lock, flags); + + if (!(events & data_ints_mask)) + goto dma_irq_out; + + if (!mrq) { + dev_err(host->dev, + "%s: MRQ=NULL; events=%08X\n", + __func__, events); + goto dma_irq_out; + } + + dev_dbg(host->dev, "%s: events=%08X\n", __func__, events); + + if (data) + phytium_sdci_data_xfer_done(host, events, mrq, data); + +dma_irq_out: + return IRQ_HANDLED; +} + +static irqreturn_t phytium_sdci_err_irq(int irq, void *dev_id) +{ + struct phytium_sdci_host *host = (struct phytium_sdci_host *) dev_id; + unsigned long flags; + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + u32 events; + + if (!host) + return IRQ_NONE; + + spin_lock_irqsave(&host->lock, flags); + events = readl(host->base + SDCI_ERROR_ISR); + mrq = host->mrq; + cmd = host->cmd; + data = host->data; + spin_unlock_irqrestore(&host->lock, flags); + + if (!(events&err_ints_mask)) + goto err_irq_out; + + if (!mrq) { + sdr_clr_bits(host->base + SDCI_NORMAL_ISER, SDCI_NORMAL_ISR_EI); + writel(1, host->base + SDCI_ERROR_ISR); + dev_err(host->dev, "%s: MRQ=NULL; events=%08X\n", + __func__, events); + goto err_irq_out; + } + sdr_clr_bits(host->base + SDCI_NORMAL_ISER, SDCI_NORMAL_ISR_EI); + if (data) { + dev_err(host->dev, "[%s][%d]: cmd(%d); %d read blocks", + __func__, __LINE__, mrq->cmd->opcode, data->blocks); + dev_err(host->dev, "status:%x,flag:%x\n", + events, data->flags); + data->error = -ETIMEDOUT; + if ((data->flags & MMC_DATA_READ) == MMC_DATA_READ || + (data->flags & MMC_DATA_WRITE) == MMC_DATA_WRITE) + phytium_sdci_data_xfer_done(host, SDCI_BD_ISR_EDTE | + SDCI_BD_ISR_TRS_R, mrq, data); + mrq->cmd->error = -ETIMEDOUT; + mmc_request_done(host->mmc, mrq); + } else if (cmd) { + phytium_sdci_cmd_done(host, SDCI_NORMAL_ISR_TIMEOUT, mrq, cmd); + } + + writel(1, host->base + SDCI_NORMAL_ISR); + writel(1, host->base + SDCI_ERROR_ISR); +err_irq_out: + return IRQ_HANDLED; +} + +static void phytium_sdci_init_hw(struct phytium_sdci_host *host) +{ + u32 val; + + /* Reset */ + phytium_sdci_reset_hw(host); + + val = SDCI_SEN_CREFR_VAL | SDCI_SEN_DEBNCE_VAL; + writel(val, host->base + SDCI_SD_SEN); + + /* Disable and clear all interrupts */ + writel(0, host->base + SDCI_NORMAL_ISER); + writel(0, host->base + SDCI_ERROR_ISER); + writel(0, host->base + SDCI_BD_ISER); + + writel(1, host->base + SDCI_NORMAL_ISR); + writel(1, host->base + SDCI_ERROR_ISR); + writel(1, host->base + SDCI_BD_ISR); + + sdr_set_bits(host->base + SDCI_NORMAL_ISER, + SDCI_SDCI_NORMAL_ISER_ECI|SDCI_SDCI_NORMAL_ISER_ECR); + /* Configure default cmd timeout to 0.1(s)s = val/25M */ + val = SDCI_F_MAX / 10; + writel(val, host->base + SDCI_TIMEOUT_CMD); + writel(SDCI_TIMEOUT_DATA_VALUE, host->base + SDCI_TIMEOUT_DATA); + + val = 0x0F00; + writel(val, host->base + SDCI_CONTROLLER); + + dev_dbg(host->dev, "init hardware done!"); +} + +static void phytium_sdci_deinit_hw(struct phytium_sdci_host *host) +{ + /* Disable and clear all interrupts */ + writel(0, host->base + SDCI_NORMAL_ISER); + writel(0, host->base + SDCI_ERROR_ISER); + writel(0, host->base + SDCI_BD_ISER); + + writel(0, host->base + SDCI_NORMAL_ISR); + writel(0, host->base + SDCI_ERROR_ISR); + writel(0, host->base + SDCI_BD_ISR); +} + +static void phytium_sdci_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct phytium_sdci_host *host = mmc_priv(mmc); + + if (ios->bus_width == MMC_BUS_WIDTH_4) + mmc->caps = mmc->caps & (~MMC_CAP_4_BIT_DATA); + + /* Suspend/Resume will do power off/on */ + switch (ios->power_mode) { + case MMC_POWER_UP: + writel(SDCI_POWER_ON, host->base + SDCI_POWER); + break; + case MMC_POWER_ON: + phytium_sdci_set_clk(host, ios); + break; + case MMC_POWER_OFF: + writel(SDCI_POWER_OFF, host->base + SDCI_POWER); + break; + default: + break; + } +} + +static int phytium_sdci_get_cd(struct mmc_host *mmc) +{ + struct phytium_sdci_host *host = mmc_priv(mmc); + u32 status = readl(host->base + SDCI_STATUS); + + if (((status >> 19) & 0x1) == 0x1) + return 0; + + return 1; +} + +static void phytium_sdci_hw_reset(struct mmc_host *mmc) +{ + struct phytium_sdci_host *host = mmc_priv(mmc); + + sdr_set_bits(host->base + SDCI_SOFTWARE, SDCI_SOFTWARE_SRST); + sdr_clr_bits(host->base + SDCI_SOFTWARE, SDCI_SOFTWARE_SRST); + while (!(readl(host->base + SDCI_STATUS) & SDCI_STATUS_IDIE)) + cpu_relax(); +} + +static struct mmc_host_ops phytium_sdci_ops = { + .request = phytium_sdci_ops_request, + .set_ios = phytium_sdci_ops_set_ios, + .get_cd = phytium_sdci_get_cd, + .card_busy = phytium_sdci_card_busy, + .hw_reset = phytium_sdci_hw_reset, +}; + +static bool phytium_sdci_private_send_cmd(struct phytium_sdci_host *host, + u32 cmd, u32 resp_type, u32 arg) +{ + u32 temp, sd_cmd, sd_arg, sd_status; + unsigned long deadline_time; + + writel(1, host->base + SDCI_NORMAL_ISR); + writel(1, host->base + SDCI_ERROR_ISR); + + sd_cmd = (cmd << 8) | resp_type; + sd_arg = arg; + writel(sd_cmd, host->base + SDCI_COMMAND); + writel(sd_arg, host->base + SDCI_ARGUMENT); + + if (cmd == MMC_STOP_TRANSMISSION) + deadline_time = jiffies + msecs_to_jiffies(1000); + else + deadline_time = jiffies + msecs_to_jiffies(100); + + temp = readl(host->base + SDCI_NORMAL_ISR); + while ((temp & SDCI_NORMAL_ISR_CC) != SDCI_NORMAL_ISR_CC) { + sd_status = readl(host->base + SDCI_STATUS); + if (sd_status & SDCI_STATUS_CDSL) + return false; + + temp = readl(host->base + SDCI_NORMAL_ISR); + if (time_after(jiffies, deadline_time)) + return false; + + if (cmd == MMC_STOP_TRANSMISSION) + mdelay(1); + } + + return true; +} + +static int phytium_sdci_probe(struct platform_device *pdev) +{ + struct mmc_host *mmc; + struct phytium_sdci_host *host; + struct resource *res; + int ret; + const struct acpi_device_id *match; + struct device *dev = &pdev->dev; + + /* Allocate MMC host for this device */ + mmc = mmc_alloc_host(sizeof(struct phytium_sdci_host), &pdev->dev); + if (!mmc) + return -ENOMEM; + + host = mmc_priv(mmc); + ret = mmc_of_parse(mmc); + if (ret) + goto host_free; + + if (dev->of_node) { + host->src_clk = devm_clk_get(&pdev->dev, "phytium_sdc_clk"); + if (IS_ERR(host->src_clk)) { + ret = PTR_ERR(host->src_clk); + goto host_free; + } + + host->clk_rate = clk_get_rate(host->src_clk); + if (device_property_read_bool(dev, "no-dma-coherent")) + dev->archdata.dma_coherent = false; + } else if (has_acpi_companion(dev)) { + match = acpi_match_device(dev->driver->acpi_match_table, dev); + if (!match) { + dev_err(dev, "Error ACPI match data is missing\n"); + return -ENODEV; + } + + acpi_dma_configure(dev, DEV_DMA_NOT_SUPPORTED); + + host->clk_rate = 600000000; + } else { + dev_err(&pdev->dev, "No DT found\n"); + return -EINVAL; + } + + dma_set_mask(dev, DMA_BIT_MASK(40)); + dma_set_coherent_mask(dev, DMA_BIT_MASK(40)); + + timer_setup(&host->hotplug_timer, hotplug_timer_func, 0); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + host->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(host->base)) { + ret = PTR_ERR(host->base); + goto host_free; + } + + host->irq = platform_get_irq(pdev, 1); + if (host->irq < 0) { + ret = -EINVAL; + goto host_free; + } + + host->irq_err = platform_get_irq(pdev, 2); + if (host->irq_err < 0) { + ret = -EINVAL; + goto host_free; + } + + host->irq_bd = platform_get_irq(pdev, 0); + if (host->irq_bd < 0) { + ret = -EINVAL; + goto host_free; + } + + host->caps = caps; + host->dev = &pdev->dev; + host->mmc = mmc; + + if ((4 * SDCI_F_MAX) > host->clk_rate) + host->clk_div = 1; + else + host->clk_div = ((host->clk_rate / (2 * SDCI_F_MAX)) - 1); + + /* Set host parameters to mmc */ + mmc->f_min = SDCI_F_MIN; + mmc->f_max = (host->clk_rate / ((host->clk_div + 1) * 2)); + mmc->ops = &phytium_sdci_ops; + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + + mmc->caps |= host->caps; + /* MMC core transfer sizes tunable parameters */ + mmc->max_segs = MAX_BD_NUM; + mmc->max_seg_size = 512 * 1024; + mmc->max_blk_size = 512; + mmc->max_req_size = 512 * 1024; + mmc->max_blk_count = mmc->max_req_size / 512; + + host->dma_rx.buf = dma_zalloc_coherent(&pdev->dev, + MAX_BD_NUM, + &host->dma_rx.bd_addr, + GFP_KERNEL); + if (!host->dma_rx.buf) { + ret = -ENOMEM; + goto release_mem; + } + + host->cmd_timeout = msecs_to_jiffies(100); + host->data_timeout = msecs_to_jiffies(250); + + INIT_DELAYED_WORK(&host->req_timeout, phytium_sdci_request_timeout); + spin_lock_init(&host->lock); + + platform_set_drvdata(pdev, mmc); + phytium_sdci_init_hw(host); + + ret = devm_request_irq(&pdev->dev, host->irq, phytium_sdci_irq, + IRQF_SHARED, pdev->name, host); + if (ret) + goto release; + + ret = devm_request_irq(&pdev->dev, host->irq_err, phytium_sdci_err_irq, + IRQF_SHARED, pdev->name, host); + if (ret) + goto release; + + ret = devm_request_irq(&pdev->dev, host->irq_bd, phytium_sdci_dma_irq, + IRQF_SHARED, pdev->name, host); + if (ret) + goto release; + + ret = mmc_add_host(mmc); + if (ret) + goto release; + + return 0; + +release: + platform_set_drvdata(pdev, NULL); + phytium_sdci_deinit_hw(host); +release_mem: + if (host->dma_rx.buf) + dma_free_coherent(&pdev->dev, MAX_BD_NUM, + host->dma_rx.buf, + host->dma_rx.bd_addr); +host_free: + mmc_free_host(mmc); + + return ret; +} + +static int phytium_sdci_remove(struct platform_device *pdev) +{ + struct mmc_host *mmc; + struct phytium_sdci_host *host; + + mmc = platform_get_drvdata(pdev); + host = mmc_priv(mmc); + + cancel_delayed_work_sync(&host->req_timeout); + platform_set_drvdata(pdev, NULL); + mmc_remove_host(host->mmc); + phytium_sdci_deinit_hw(host); + + if (host->dma_rx.buf) + dma_free_coherent(&pdev->dev, MAX_BD_NUM, + host->dma_rx.buf, host->dma_rx.bd_addr); + + mmc_free_host(host->mmc); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int phytium_sdci_suspend(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct phytium_sdci_host *host = mmc_priv(mmc); + + phytium_sdci_deinit_hw(host); + return 0; +} + +static int phytium_sdci_resume(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct phytium_sdci_host *host = mmc_priv(mmc); + + phytium_sdci_init_hw(host); + mmc->caps = mmc->caps | MMC_CAP_4_BIT_DATA; + + return 0; +} +#endif + +#ifdef CONFIG_PM +static int phytium_sdci_runtime_suspend(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct phytium_sdci_host *host = mmc_priv(mmc); + + phytium_sdci_deinit_hw(host); + + return 0; +} + +static int phytium_sdci_runtime_resume(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct phytium_sdci_host *host = mmc_priv(mmc); + + phytium_sdci_init_hw(host); + + return 0; +} + +static const struct dev_pm_ops phytium_sdci_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(phytium_sdci_suspend, + phytium_sdci_resume) + SET_RUNTIME_PM_OPS(phytium_sdci_runtime_suspend, + phytium_sdci_runtime_resume, NULL) +}; +#else +#define phytium_sdci_dev_pm_ops NULL +#endif + +static const struct of_device_id phytium_sdci_of_ids[] = { + { .compatible = "phytium,sdci", }, + { } +}; +MODULE_DEVICE_TABLE(of, phytium_sdci_of_ids); + +#ifdef CONFIG_ACPI +static const struct acpi_device_id phytium_sdci_acpi_ids[] = { + { .id = "PHYT0005" }, + { } +}; + +MODULE_DEVICE_TABLE(acpi, phytium_sdci_acpi_ids); +#else +#define phytium_sdci_acpi_ids NULL +#endif + +static struct platform_driver phytium_sdci_driver = { + .probe = phytium_sdci_probe, + .remove = phytium_sdci_remove, + .driver = { + .name = "sdci-phytium", + .of_match_table = phytium_sdci_of_ids, + .acpi_match_table = phytium_sdci_acpi_ids, + .pm = &phytium_sdci_dev_pm_ops, + }, +}; + +module_platform_driver(phytium_sdci_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Cheng Quan "); +MODULE_AUTHOR("Chen Baozi "); +MODULE_DESCRIPTION("Phytium SD Card Interface driver"); diff --git a/drivers/mmc/host/phytium-sdci.h b/drivers/mmc/host/phytium-sdci.h new file mode 100644 index 0000000000000000000000000000000000000000..2fc3c19d2a57addc8cb4c72b6090e1233267f4f0 --- /dev/null +++ b/drivers/mmc/host/phytium-sdci.h @@ -0,0 +1,204 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * File Name: phytium_sdci.h - Phytium FT SDCI dirver + * + * Copyright (C) 2019-2023, Phytium Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +/*---------------------------------------------------------------------------*/ +/* Common Definition */ +/*---------------------------------------------------------------------------*/ +#define MAX_BD_NUM 0x1000 +#define SD_BLOCK_SIZE 512 + +/*---------------------------------------------------------------------------*/ +/* Register Offset */ +/*---------------------------------------------------------------------------*/ +#define SDCI_CONTROLLER 0x00 /* controller config reg */ +#define SDCI_ARGUMENT 0x04 /* argument reg */ +#define SDCI_COMMAND 0x08 /* command reg */ +#define SDCI_CLOCK_D 0x0C /* clock divide reg */ +#define SDCI_SOFTWARE 0x10 /* controller reset reg */ +#define SDCI_POWER 0X14 /* POWRE CONTROL REG */ +#define SDCI_TIMEOUT_CMD 0x18 /* cmd timeout config reg */ +#define SDCI_TIMEOUT_DATA 0x1C /* data timeout reg */ +#define SDCI_NORMAL_ISER 0x20 /* normal ISR config reg */ +#define SDCI_ERROR_ISER 0x24 /* erroe ISR config reg */ +#define SDCI_BD_ISER 0x28 /* BD ISR config reg */ +#define SDCI_CAPA 0x2C /* BD ISR config reg */ +#define SDCI_SD_DRV 0x30 /* SD card driving phase position reg */ +#define SDCI_SD_SAMP 0x34 /* SD card sampling phase position reg */ +#define SDCI_SD_SEN 0x38 /* SD card detection reg */ +#define SDCI_HDS_AXI 0x3C /* AXI boundary config reg */ +#define SDCI_BD_RX 0x40 /* BD rx addr reg */ +#define SDCI_BD_TX 0x60 /* BD tx addr reg */ +#define SDCI_BLK_CNT 0x80 /* r/w block num reg */ +#define SDCI_NORMAL_ISR 0xC0 /* normal ISR status reg */ +#define SDCI_ERROR_ISR 0xC4 /* error ISR status reg */ +#define SDCI_BD_ISR 0xC8 /* BD ISR status reg */ +#define SDCI_BD_STATUS 0xCC /* BD descriptor status reg */ +#define SDCI_STATUS 0xD0 /* status reg */ +#define SDCI_BLOCK 0xD4 /* block len reg */ +#define SDCI_RESP0 0xE0 /* response reg0 */ +#define SDCI_RESP1 0xE4 /* response reg1 */ +#define SDCI_RESP2 0xE8 /* response reg2 */ +#define SDCI_RESP3 0XEC /* response reg3 */ + +/*---------------------------------------------------------------------------*/ +/* Register Mask */ +/*---------------------------------------------------------------------------*/ +/* SDCI_CONTROLLER mask */ +#define SDCI_CONTROLLER_ECRCWR (0x1 << 0) /* RW */ +#define SDCI_CONTROLLER_ECRCRD (0x1 << 1) /* RW */ +#define SDCI_CONTROLLER_RESEDE (0x1 << 2) /* RW */ +#define SDCI_CONTROLLER_PERMDR (0x3 << 8) /* RW */ +#define SDCI_CONTROLLER_PERMDX (0x3 << 10) /* RW */ + +/* SDCI_SOFTWARE mask */ +#define SDCI_SOFTWARE_SRST (0x1 << 0) /* RW */ +#define SDCI_SOFTWARE_SCRST (0x1 << 1) /* RW */ +#define SDCI_SOFTWARE_BDRST (0x1 << 2) /* RW */ +#define SDCI_SOFTWARE_CFCLF (0x1 << 3) /* RW */ +#define SDCI_SOFTWARE_SDRST (0x1 << 4) /* RW */ + +/* SDCI_NORMAL_ISER mask */ +#define SDCI_SDCI_NORMAL_ISER_ECC_EN (0x1 << 0) /* RW */ +#define SDCI_SDCI_NORMAL_ISER_ECR (0x1 << 1) /* RW */ +#define SDCI_SDCI_NORMAL_ISER_ECI (0x1 << 2) /* RW */ +#define SDCI_SDCI_NORMAL_ISER_EEI_EN (0x1 << 15) /* RW */ + +/* SDCI_NORMAL_ISR mask */ +#define SDCI_NORMAL_ISR_CC (0x1 << 0) /* R */ +#define SDCI_NORMAL_ISR_CR (0x1 << 1) /* R */ +#define SDCI_NORMAL_ISR_CI (0x1 << 2) /* R */ +#define SDCI_NORMAL_ISR_TIMEOUT (0x1 << 3) /* R */ +#define SDCI_NORMAL_ISR_EI (0x1 << 15) /* R */ + +/* SDCI_ERROR_ISER mask */ +#define SDCI_ERROR_ISER_ECTE_EN (0x1 << 0) /* RW */ +#define SDCI_ERROR_ISR_CCRCE_EN (0x1 << 1) /* RW */ +#define SDCI_ERROR_ISR_CIR_EN (0x1 << 3) /* RW */ +#define SDCI_ERROR_ISR_CNR_EN (0x1 << 4) /* RW */ +/* SDCI_ERROR_ISR mask */ +#define SDCI_ERROR_ISR_CTE (0x1 << 0) /* R */ +#define SDCI_ERROR_ISR_CCRCE (0x1 << 1) /* R */ +#define SDCI_ERROR_ISR_CIR (0x1 << 3) /* R */ +#define SDCI_ERROR_ISR_CNR (0x1 << 4) /* R */ + +/* SDCI_BD_ISER mask */ +#define SDCI_BD_ISER_ETRS_EN (0x1 << 8) /* RW */ +#define SDCI_BD_ISER_DATFRAX_EN (0x1 << 7) /* RW */ + +/* SDCI_BD_ISR mask */ +#define SDCI_BD_ISR_TRS_W (0x1 << 0) /* R */ +#define SDCI_BD_ISR_TRS_R (0x1 << 8) /* R */ +#define SDCI_BD_ISR_EDTE (0x1 << 3) /* R */ +#define SDCI_BD_ISR_DAIS (0x1 << 15) /* R */ +#define SDCI_BD_ISR_DATFRAX (0x1 << 7) /* R */ + +/* SDCI_HDS_AXI mask */ +#define SDCI_HDS_AXI_AWDOMAIN (0x1 << 0) /* RW */ +#define SDCI_HDS_AXI_ARDOMAIN (0x1 << 12) /* RW */ +#define SDCI_HDS_AXI_AWCACHE (0x6 << 24) /* RW */ +#define SDCI_HDS_AXI_ARCACHE (0xB << 28) /* RW */ + +/* SDCI_STATUS mask */ +#define SDCI_STATUS_CMD_BUSY (0x0 << 0) /* R */ +#define SDCI_STATUS_CMD_READY (0x1 << 0) /* R */ +#define SDCI_STATUS_IDIE (0x1 << 12) /* R */ +#define SDCI_CARD_BUSY_IN_PRG (0x1 << 20) /* R D0 BUSY:0,IDLE:1 */ + +/* SDCI_STATUS */ +#define SDCI_STATUS_CDSL (0x1 << 19) /* R */ + +/*---------------------------------------------------------------------------*/ +/* Register Value */ +/*---------------------------------------------------------------------------*/ +#define SDCI_SD_DRV_VALUE 0 +#define SDCI_SD_SAMP_VALUE_MAX 50 +#define SDCI_SD_SAMP_VALUE_MIN 0 + +#define SDCI_TIMEOUT_CMD_VALUE 0xFFFFFFFF +#define SDCI_TIMEOUT_DATA_VALUE 0xFFFFFFFF +#define SDCI_POWER_ON 1 +#define SDCI_POWER_OFF 0 + +#define SDCI_CMD_TIMEOUT 10 +#define SDCI_DAT_TIMEOUT 5000 + +#define SDCI_CMD_TYPE_ADTC 0x2 + +#define SDCI_F_MIN 400000 +#define SDCI_F_MAX 25000000 + +#define SDCI_SEN_CREFR_VAL (0x1 << 1) +#define SDCI_SEN_DEBNCE_VAL (0xB << 8) + +#define CARD_CURRENT_STATE (0xF << 9) +#define CARD_PRG_STATE (0x7 << 9) +#define CARD_TRAN_STATE (0x4 << 9) + +#define SDCI_CMD13_OK 1 +#define SDCI_CMD13_FAILED 0 + +#define ERR_TIMEOUT (0x1 << 0) +#define ERR_CARD_ABSENT (0x1 << 1) +#define ERR_CMD_RESPONED (0x1 << 2) + +/*---------------------------------------------------------------------------*/ +/* Structure Type */ +/*---------------------------------------------------------------------------*/ +struct phytium_sdci_dma { + struct scatterlist *sg; + u32 *buf; + dma_addr_t bd_addr; + size_t bytes; +}; + +enum adtc_type { + COMMOM_ADTC = 0, + BLOCK_RW_ADTC = 1 +}; + +struct phytium_sdci_host { + struct device *dev; + struct mmc_host *mmc; + u32 caps; + spinlock_t lock; + + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + int error; + + void __iomem *base; + + struct phytium_sdci_dma dma_rx; + struct phytium_sdci_dma dma_tx; + + u32 *sg_virt_addr; + enum adtc_type adtc_type; + + struct timer_list hotplug_timer; + + struct delayed_work req_timeout; + u32 cmd_timeout; + u32 data_timeout; + + int irq; + int irq_err; + int irq_bd; + + struct clk *src_clk; + unsigned long clk_rate; + unsigned long clk_div; + unsigned long real_rate; + + u32 current_rca; + bool is_multi_rw_only_one_blkcnt; +}; diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index f7ffbf1676b13f9735b234773cc510752afe60b6..00b5465dfb0ce953a7d1f14f60a10c334693e16e 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -164,7 +164,7 @@ static void pxamci_dma_irq(void *param); static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data) { struct dma_async_tx_descriptor *tx; - enum dma_data_direction direction; + enum dma_transfer_direction direction; struct dma_slave_config config; struct dma_chan *chan; unsigned int nob = data->blocks; diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index 777e32b0e410e8501197c0f16de30d000b042ae3..61f0faddfd8897d9684fcdbefa08279c2ca121d7 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -557,6 +557,7 @@ int renesas_sdhi_probe(struct platform_device *pdev, struct renesas_sdhi *priv; struct resource *res; int irq, ret, i; + u16 ver; of_data = of_device_get_match_data(&pdev->dev); @@ -635,6 +636,13 @@ int renesas_sdhi_probe(struct platform_device *pdev, host->ops.card_busy = renesas_sdhi_card_busy; host->ops.start_signal_voltage_switch = renesas_sdhi_start_signal_voltage_switch; + + /* SDR and HS200/400 registers requires HW reset */ + if (of_data && of_data->scc_offset) { + priv->scc_ctl = host->ctl + of_data->scc_offset; + host->mmc->caps |= MMC_CAP_HW_RESET; + host->hw_reset = renesas_sdhi_hw_reset; + } } /* Orginally registers were 16 bit apart, could be 32 or 64 nowadays */ @@ -671,12 +679,17 @@ int renesas_sdhi_probe(struct platform_device *pdev, if (ret) goto efree; + ver = sd_ctrl_read16(host, CTL_VERSION); + /* GEN2_SDR104 is first known SDHI to use 32bit block count */ + if (ver < SDHI_VER_GEN2_SDR104 && mmc_data->max_blk_count > U16_MAX) + mmc_data->max_blk_count = U16_MAX; + ret = tmio_mmc_host_probe(host); if (ret < 0) goto edisclk; /* One Gen2 SDHI incarnation does NOT have a CBSY bit */ - if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN2_SDR50) + if (ver == SDHI_VER_GEN2_SDR50) mmc_data->flags &= ~TMIO_MMC_HAVE_CBSY; /* Enable tuning iff we have an SCC and a supported mode */ @@ -687,8 +700,6 @@ int renesas_sdhi_probe(struct platform_device *pdev, const struct renesas_sdhi_scc *taps = of_data->taps; bool hit = false; - host->mmc->caps |= MMC_CAP_HW_RESET; - for (i = 0; i < of_data->taps_num; i++) { if (taps[i].clk_rate == 0 || taps[i].clk_rate == host->mmc->f_max) { @@ -701,12 +712,10 @@ int renesas_sdhi_probe(struct platform_device *pdev, if (!hit) dev_warn(&host->pdev->dev, "Unknown clock rate for SDR104\n"); - priv->scc_ctl = host->ctl + of_data->scc_offset; host->init_tuning = renesas_sdhi_init_tuning; host->prepare_tuning = renesas_sdhi_prepare_tuning; host->select_tuning = renesas_sdhi_select_tuning; host->check_scc_error = renesas_sdhi_check_scc_error; - host->hw_reset = renesas_sdhi_hw_reset; host->prepare_hs400_tuning = renesas_sdhi_prepare_hs400_tuning; host->hs400_downgrade = renesas_sdhi_disable_scc; diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c index ca0b43973769c9f80b4771914b5b74016b54bf84..382172fb3da8f55886a27874dbcffd585e4aaf95 100644 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c @@ -298,6 +298,7 @@ static const struct soc_device_attribute gen3_soc_whitelist[] = { { .soc_id = "r8a7796", .revision = "ES1.0", .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) }, /* generic ones */ + { .soc_id = "r8a774a1" }, { .soc_id = "r8a7795" }, { .soc_id = "r8a7796" }, { .soc_id = "r8a77965" }, @@ -309,12 +310,20 @@ static const struct soc_device_attribute gen3_soc_whitelist[] = { static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev) { const struct soc_device_attribute *soc = soc_device_match(gen3_soc_whitelist); + struct device *dev = &pdev->dev; if (!soc) return -ENODEV; global_flags |= (unsigned long)soc->data; + dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL); + if (!dev->dma_parms) + return -ENOMEM; + + /* value is max of SD_SECCNT. Confirmed by HW engineers */ + dma_set_max_seg_size(dev, 0xffffffff); + return renesas_sdhi_probe(pdev, &renesas_sdhi_internal_dmac_dma_ops); } diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c index 5389c48218820166209a7de463c01084366b1fe4..c3d63edb545e391848eb3cb5b60479df87fa29c0 100644 --- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c @@ -68,6 +68,7 @@ static const struct renesas_sdhi_of_data of_rcar_gen2_compatible = { .scc_offset = 0x0300, .taps = rcar_gen2_scc_taps, .taps_num = ARRAY_SIZE(rcar_gen2_scc_taps), + .max_blk_count = 0xffffffff, }; /* Definitions for sampling clocks */ diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index 32321bd596d880027358db10e9eb5f5b45957c1d..57c1ec322e42106a89d4df0c3ac486b113a6642c 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -76,6 +76,7 @@ struct sdhci_acpi_slot { size_t priv_size; int (*probe_slot)(struct platform_device *, const char *, const char *); int (*remove_slot)(struct platform_device *); + int (*free_slot)(struct platform_device *pdev); int (*setup_host)(struct platform_device *pdev); }; @@ -246,7 +247,7 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = { static bool sdhci_acpi_byt(void) { static const struct x86_cpu_id byt[] = { - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT }, {} }; @@ -756,6 +757,9 @@ static int sdhci_acpi_probe(struct platform_device *pdev) err_cleanup: sdhci_cleanup_host(c->host); err_free: + if (c->slot && c->slot->free_slot) + c->slot->free_slot(pdev); + sdhci_free_host(c->host); return err; } @@ -777,6 +781,10 @@ static int sdhci_acpi_remove(struct platform_device *pdev) dead = (sdhci_readl(c->host, SDHCI_INT_STATUS) == ~0); sdhci_remove_host(c->host, dead); + + if (c->slot && c->slot->free_slot) + c->slot->free_slot(pdev); + sdhci_free_host(c->host); return 0; diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index f44e49014a440e1a4790d1d13b3ea7f62037db5f..629860f7327c9e465331a568e2556b0550d3f192 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -429,7 +429,7 @@ static u16 esdhc_readw_le(struct sdhci_host *host, int reg) val = readl(host->ioaddr + ESDHC_MIX_CTRL); else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) /* the std tuning bits is in ACMD12_ERR for imx6sl */ - val = readl(host->ioaddr + SDHCI_ACMD12_ERR); + val = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS); } if (val & ESDHC_MIX_CTRL_EXE_TUNE) @@ -494,7 +494,7 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) } writel(new_val , host->ioaddr + ESDHC_MIX_CTRL); } else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) { - u32 v = readl(host->ioaddr + SDHCI_ACMD12_ERR); + u32 v = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS); u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL); if (val & SDHCI_CTRL_TUNED_CLK) { v |= ESDHC_MIX_CTRL_SMPCLK_SEL; @@ -512,7 +512,7 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) v &= ~ESDHC_MIX_CTRL_EXE_TUNE; } - writel(v, host->ioaddr + SDHCI_ACMD12_ERR); + writel(v, host->ioaddr + SDHCI_AUTO_CMD_STATUS); writel(m, host->ioaddr + ESDHC_MIX_CTRL); } return; @@ -957,9 +957,9 @@ static void esdhc_reset_tuning(struct sdhci_host *host) writel(ctrl, host->ioaddr + ESDHC_MIX_CTRL); writel(0, host->ioaddr + ESDHC_TUNE_CTRL_STATUS); } else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) { - ctrl = readl(host->ioaddr + SDHCI_ACMD12_ERR); + ctrl = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS); ctrl &= ~ESDHC_MIX_CTRL_SMPCLK_SEL; - writel(ctrl, host->ioaddr + SDHCI_ACMD12_ERR); + writel(ctrl, host->ioaddr + SDHCI_AUTO_CMD_STATUS); } } } @@ -981,6 +981,7 @@ static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing) case MMC_TIMING_UHS_SDR25: case MMC_TIMING_UHS_SDR50: case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_MMC_HS: case MMC_TIMING_MMC_HS200: writel(m, host->ioaddr + ESDHC_MIX_CTRL); break; @@ -1097,11 +1098,12 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host) writel(readl(host->ioaddr + SDHCI_HOST_CONTROL) | ESDHC_BURST_LEN_EN_INCR, host->ioaddr + SDHCI_HOST_CONTROL); + /* - * erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL - * TO1.1, it's harmless for MX6SL - */ - writel(readl(host->ioaddr + 0x6c) | BIT(7), + * erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL + * TO1.1, it's harmless for MX6SL + */ + writel(readl(host->ioaddr + 0x6c) & ~BIT(7), host->ioaddr + 0x6c); /* disable DLL_CTRL delay line settings */ @@ -1317,7 +1319,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) /* clear tuning bits in case ROM has set it already */ writel(0x0, host->ioaddr + ESDHC_MIX_CTRL); - writel(0x0, host->ioaddr + SDHCI_ACMD12_ERR); + writel(0x0, host->ioaddr + SDHCI_AUTO_CMD_STATUS); writel(0x0, host->ioaddr + ESDHC_TUNE_CTRL_STATUS); } diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c index d0e83db42ae52614b5ab9f03d211290f7c100937..f903ab96aa21e41863f8fce016d75918dd7442e6 100644 --- a/drivers/mmc/host/sdhci-iproc.c +++ b/drivers/mmc/host/sdhci-iproc.c @@ -185,7 +185,8 @@ static const struct sdhci_ops sdhci_iproc_32only_ops = { }; static const struct sdhci_pltfm_data sdhci_iproc_cygnus_pltfm_data = { - .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK, + .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | + SDHCI_QUIRK_NO_HISPD_BIT, .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN | SDHCI_QUIRK2_HOST_OFF_CARD_ON, .ops = &sdhci_iproc_32only_ops, }; @@ -208,7 +209,8 @@ static const struct sdhci_iproc_data iproc_cygnus_data = { static const struct sdhci_pltfm_data sdhci_iproc_pltfm_data = { .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | - SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, + SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 | + SDHCI_QUIRK_NO_HISPD_BIT, .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN, .ops = &sdhci_iproc_ops, }; @@ -279,7 +281,10 @@ static int sdhci_iproc_probe(struct platform_device *pdev) iproc_host->data = iproc_data; - mmc_of_parse(host->mmc); + ret = mmc_of_parse(host->mmc); + if (ret) + goto err; + sdhci_get_of_property(pdev); host->mmc->caps |= iproc_host->data->mmc_caps; diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 3cc8bfee6c18f9a39da058748daf37d260571b51..594952681818c69f76094fd58abd05f314fc4f5e 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -108,7 +108,7 @@ #define CORE_PWRSAVE_DLL BIT(3) -#define DDR_CONFIG_POR_VAL 0x80040853 +#define DDR_CONFIG_POR_VAL 0x80040873 #define INVALID_TUNING_PHASE -1 @@ -157,8 +157,9 @@ struct sdhci_msm_offset { u32 core_ddr_200_cfg; u32 core_vendor_spec3; u32 core_dll_config_2; + u32 core_dll_config_3; + u32 core_ddr_config_old; /* Applicable to sdcc minor ver < 0x49 */ u32 core_ddr_config; - u32 core_ddr_config_2; }; static const struct sdhci_msm_offset sdhci_msm_v5_offset = { @@ -186,8 +187,8 @@ static const struct sdhci_msm_offset sdhci_msm_v5_offset = { .core_ddr_200_cfg = 0x224, .core_vendor_spec3 = 0x250, .core_dll_config_2 = 0x254, - .core_ddr_config = 0x258, - .core_ddr_config_2 = 0x25c, + .core_dll_config_3 = 0x258, + .core_ddr_config = 0x25c, }; static const struct sdhci_msm_offset sdhci_msm_mci_offset = { @@ -216,8 +217,8 @@ static const struct sdhci_msm_offset sdhci_msm_mci_offset = { .core_ddr_200_cfg = 0x184, .core_vendor_spec3 = 0x1b0, .core_dll_config_2 = 0x1b4, - .core_ddr_config = 0x1b8, - .core_ddr_config_2 = 0x1bc, + .core_ddr_config_old = 0x1b8, + .core_ddr_config = 0x1bc, }; struct sdhci_msm_variant_ops { @@ -258,6 +259,9 @@ struct sdhci_msm_host { bool mci_removed; const struct sdhci_msm_variant_ops *var_ops; const struct sdhci_msm_offset *offset; + bool use_cdr; + u32 transfer_mode; + bool updated_ddr_cfg; }; static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host) @@ -580,11 +584,14 @@ static int msm_init_cm_dll(struct sdhci_host *host) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); int wait_cnt = 50; - unsigned long flags; + unsigned long flags, xo_clk = 0; u32 config; const struct sdhci_msm_offset *msm_offset = msm_host->offset; + if (msm_host->use_14lpp_dll_reset && !IS_ERR_OR_NULL(msm_host->xo_clk)) + xo_clk = clk_get_rate(msm_host->xo_clk); + spin_lock_irqsave(&host->lock, flags); /* @@ -632,10 +639,10 @@ static int msm_init_cm_dll(struct sdhci_host *host) config &= CORE_FLL_CYCLE_CNT; if (config) mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 8), - clk_get_rate(msm_host->xo_clk)); + xo_clk); else mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 4), - clk_get_rate(msm_host->xo_clk)); + xo_clk); config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config_2); @@ -926,8 +933,10 @@ static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host) static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host) { struct mmc_host *mmc = host->mmc; - u32 dll_status, config; + u32 dll_status, config, ddr_cfg_offset; int ret; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); const struct sdhci_msm_offset *msm_offset = sdhci_priv_msm_offset(host); @@ -940,8 +949,11 @@ static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host) * bootloaders. In the future, if this changes, then the desired * values will need to be programmed appropriately. */ - writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr + - msm_offset->core_ddr_config); + if (msm_host->updated_ddr_cfg) + ddr_cfg_offset = msm_offset->core_ddr_config; + else + ddr_cfg_offset = msm_offset->core_ddr_config_old; + writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr + ddr_cfg_offset); if (mmc->ios.enhanced_strobe) { config = readl_relaxed(host->ioaddr + @@ -1025,6 +1037,26 @@ static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host) return ret; } +static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable) +{ + const struct sdhci_msm_offset *msm_offset = sdhci_priv_msm_offset(host); + u32 config, oldconfig = readl_relaxed(host->ioaddr + + msm_offset->core_dll_config); + + config = oldconfig; + if (enable) { + config |= CORE_CDR_EN; + config &= ~CORE_CDR_EXT_EN; + } else { + config &= ~CORE_CDR_EN; + config |= CORE_CDR_EXT_EN; + } + + if (config != oldconfig) + writel_relaxed(config, host->ioaddr + + msm_offset->core_dll_config); +} + static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode) { struct sdhci_host *host = mmc_priv(mmc); @@ -1042,8 +1074,14 @@ static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode) if (host->clock <= CORE_FREQ_100MHZ || !(ios.timing == MMC_TIMING_MMC_HS400 || ios.timing == MMC_TIMING_MMC_HS200 || - ios.timing == MMC_TIMING_UHS_SDR104)) + ios.timing == MMC_TIMING_UHS_SDR104)) { + msm_host->use_cdr = false; + sdhci_msm_set_cdr(host, false); return 0; + } + + /* Clock-Data-Recovery used to dynamically adjust RX sampling point */ + msm_host->use_cdr = true; /* * For HS400 tuning in HS200 timing requires: @@ -1525,6 +1563,19 @@ static int __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg) case SDHCI_POWER_CONTROL: req_type = !val ? REQ_BUS_OFF : REQ_BUS_ON; break; + case SDHCI_TRANSFER_MODE: + msm_host->transfer_mode = val; + break; + case SDHCI_COMMAND: + if (!msm_host->use_cdr) + break; + if ((msm_host->transfer_mode & SDHCI_TRNS_READ) && + SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK_HS200 && + SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK) + sdhci_msm_set_cdr(host, true); + else + sdhci_msm_set_cdr(host, false); + break; } if (req_type) { @@ -1818,6 +1869,9 @@ static int sdhci_msm_probe(struct platform_device *pdev) msm_offset->core_vendor_spec_capabilities0); } + if (core_major == 1 && core_minor >= 0x49) + msm_host->updated_ddr_cfg = true; + /* * Power on reset state may trigger power irq if previous status of * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq @@ -1916,6 +1970,11 @@ static int sdhci_msm_runtime_suspend(struct device *dev) struct sdhci_host *host = dev_get_drvdata(dev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + host->runtime_suspended = true; + spin_unlock_irqrestore(&host->lock, flags); clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks), msm_host->bulk_clks); @@ -1928,9 +1987,19 @@ static int sdhci_msm_runtime_resume(struct device *dev) struct sdhci_host *host = dev_get_drvdata(dev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + unsigned long flags; + int ret; - return clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks), + ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks), msm_host->bulk_clks); + if (ret) + return ret; + + spin_lock_irqsave(&host->lock, flags); + host->runtime_suspended = false; + spin_unlock_irqrestore(&host->lock, flags); + + return ret; } #endif diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c index a40bcc27f187891f594ccc79713140c664619eb8..9c77bfe4334f3c67ab61fb1cac75d5adc4ef53bb 100644 --- a/drivers/mmc/host/sdhci-of-arasan.c +++ b/drivers/mmc/host/sdhci-of-arasan.c @@ -788,7 +788,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev) ret = mmc_of_parse(host->mmc); if (ret) { - dev_err(&pdev->dev, "parsing dt failed (%d)\n", ret); + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, "parsing dt failed (%d)\n", ret); goto unreg_clk; } @@ -814,7 +815,10 @@ static int sdhci_arasan_probe(struct platform_device *pdev) host->mmc_host_ops.start_signal_voltage_switch = sdhci_arasan_voltage_switch; sdhci_arasan->has_cqe = true; - host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; + host->mmc->caps2 |= MMC_CAP2_CQE; + + if (!of_property_read_bool(np, "disable-cqe-dcmd")) + host->mmc->caps2 |= MMC_CAP2_CQE_DCMD; } ret = sdhci_arasan_add_host(sdhci_arasan); diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c index 682c573e20a727e118282b33eae9d982917e138f..1ebcf0eb781ea6cae8dc9cf4ae12b53cfdf03b96 100644 --- a/drivers/mmc/host/sdhci-of-at91.c +++ b/drivers/mmc/host/sdhci-of-at91.c @@ -365,6 +365,9 @@ static int sdhci_at91_probe(struct platform_device *pdev) pm_runtime_set_autosuspend_delay(&pdev->dev, 50); pm_runtime_use_autosuspend(&pdev->dev); + /* HS200 is broken at this moment */ + host->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200; + ret = sdhci_add_host(host); if (ret) goto pm_runtime_disable; diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index 9cb7554a463d732f937bd4e0e9725474a1ff3b68..2c9110fee1cc233e1da75ca7b5b1078d66787450 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -480,7 +480,12 @@ static int esdhc_of_enable_dma(struct sdhci_host *host) dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); value = sdhci_readl(host, ESDHC_DMA_SYSCTL); - value |= ESDHC_DMA_SNOOP; + + if (of_dma_is_coherent(dev->of_node)) + value |= ESDHC_DMA_SNOOP; + else + value &= ~ESDHC_DMA_SNOOP; + sdhci_writel(host, value, ESDHC_DMA_SYSCTL); return 0; } @@ -526,8 +531,12 @@ static void esdhc_clock_enable(struct sdhci_host *host, bool enable) /* Wait max 20 ms */ timeout = ktime_add_ms(ktime_get(), 20); val = ESDHC_CLOCK_STABLE; - while (!(sdhci_readl(host, ESDHC_PRSSTAT) & val)) { - if (ktime_after(ktime_get(), timeout)) { + while (1) { + bool timedout = ktime_after(ktime_get(), timeout); + + if (sdhci_readl(host, ESDHC_PRSSTAT) & val) + break; + if (timedout) { pr_err("%s: Internal clock never stabilised.\n", mmc_hostname(host->mmc)); break; @@ -591,8 +600,12 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) /* Wait max 20 ms */ timeout = ktime_add_ms(ktime_get(), 20); - while (!(sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)) { - if (ktime_after(ktime_get(), timeout)) { + while (1) { + bool timedout = ktime_after(ktime_get(), timeout); + + if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE) + break; + if (timedout) { pr_err("%s: Internal clock never stabilised.\n", mmc_hostname(host->mmc)); return; @@ -909,6 +922,11 @@ static int sdhci_esdhc_probe(struct platform_device *pdev) if (esdhc->vendor_ver > VENDOR_V_22) host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ; + if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) { + host->quirks |= SDHCI_QUIRK_RESET_AFTER_REQUEST; + host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; + } + if (of_device_is_compatible(np, "fsl,p5040-esdhc") || of_device_is_compatible(np, "fsl,p5020-esdhc") || of_device_is_compatible(np, "fsl,p4080-esdhc") || diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c index 88347ce78f23feee0b1b2f1f191c0891ce563358..d02f5cf76b3d171d75760ab8debe9bb91edab56b 100644 --- a/drivers/mmc/host/sdhci-omap.c +++ b/drivers/mmc/host/sdhci-omap.c @@ -220,8 +220,12 @@ static void sdhci_omap_conf_bus_power(struct sdhci_omap_host *omap_host, /* wait 1ms */ timeout = ktime_add_ms(ktime_get(), SDHCI_OMAP_TIMEOUT); - while (!(sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL) & HCTL_SDBP)) { - if (WARN_ON(ktime_after(ktime_get(), timeout))) + while (1) { + bool timedout = ktime_after(ktime_get(), timeout); + + if (sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL) & HCTL_SDBP) + break; + if (WARN_ON(timedout)) return; usleep_range(5, 10); } @@ -288,9 +292,9 @@ static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode) struct device *dev = omap_host->dev; struct mmc_ios *ios = &mmc->ios; u32 start_window = 0, max_window = 0; + bool dcrc_was_enabled = false; u8 cur_match, prev_match = 0; u32 length = 0, max_len = 0; - u32 ier = host->ier; u32 phase_delay = 0; int ret = 0; u32 reg; @@ -317,9 +321,10 @@ static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode) * during the tuning procedure. So disable it during the * tuning procedure. */ - ier &= ~SDHCI_INT_DATA_CRC; - sdhci_writel(host, ier, SDHCI_INT_ENABLE); - sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE); + if (host->ier & SDHCI_INT_DATA_CRC) { + host->ier &= ~SDHCI_INT_DATA_CRC; + dcrc_was_enabled = true; + } while (phase_delay <= MAX_PHASE_DELAY) { sdhci_omap_set_dll(omap_host, phase_delay); @@ -366,6 +371,9 @@ static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode) ret: sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); + /* Reenable forbidden interrupt */ + if (dcrc_was_enabled) + host->ier |= SDHCI_INT_DATA_CRC; sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); return ret; @@ -649,8 +657,12 @@ static void sdhci_omap_init_74_clocks(struct sdhci_host *host, u8 power_mode) /* wait 1ms */ timeout = ktime_add_ms(ktime_get(), SDHCI_OMAP_TIMEOUT); - while (!(sdhci_omap_readl(omap_host, SDHCI_OMAP_STAT) & INT_CC_EN)) { - if (WARN_ON(ktime_after(ktime_get(), timeout))) + while (1) { + bool timedout = ktime_after(ktime_get(), timeout); + + if (sdhci_omap_readl(omap_host, SDHCI_OMAP_STAT) & INT_CC_EN) + break; + if (WARN_ON(timedout)) return; usleep_range(5, 10); } diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 7bfd366d970dae374bae6acd36624eb23f719506..65985dc3e1a74a588e4ab758e080a5dde6e84cd2 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -12,6 +12,7 @@ * - JMicron (hardware and technical support) */ +#include #include #include #include @@ -29,6 +30,7 @@ #include #include #include +#include #include "cqhci.h" @@ -462,6 +464,9 @@ struct intel_host { u32 dsm_fns; int drv_strength; bool d3_retune; + bool rpm_retune_ok; + u32 glk_rx_ctrl1; + u32 glk_tun_val; }; static const guid_t intel_dsm_guid = @@ -728,11 +733,18 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) return 0; } +static bool glk_broken_cqhci(struct sdhci_pci_slot *slot) +{ + return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC && + dmi_match(DMI_BIOS_VENDOR, "LENOVO"); +} + static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot) { int ret = byt_emmc_probe_slot(slot); - slot->host->mmc->caps2 |= MMC_CAP2_CQE; + if (!glk_broken_cqhci(slot)) + slot->host->mmc->caps2 |= MMC_CAP2_CQE; if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) { slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES, @@ -791,6 +803,77 @@ static int glk_emmc_add_host(struct sdhci_pci_slot *slot) return ret; } +#ifdef CONFIG_PM +#define GLK_RX_CTRL1 0x834 +#define GLK_TUN_VAL 0x840 +#define GLK_PATH_PLL GENMASK(13, 8) +#define GLK_DLY GENMASK(6, 0) +/* Workaround firmware failing to restore the tuning value */ +static void glk_rpm_retune_wa(struct sdhci_pci_chip *chip, bool susp) +{ + struct sdhci_pci_slot *slot = chip->slots[0]; + struct intel_host *intel_host = sdhci_pci_priv(slot); + struct sdhci_host *host = slot->host; + u32 glk_rx_ctrl1; + u32 glk_tun_val; + u32 dly; + + if (intel_host->rpm_retune_ok || !mmc_can_retune(host->mmc)) + return; + + glk_rx_ctrl1 = sdhci_readl(host, GLK_RX_CTRL1); + glk_tun_val = sdhci_readl(host, GLK_TUN_VAL); + + if (susp) { + intel_host->glk_rx_ctrl1 = glk_rx_ctrl1; + intel_host->glk_tun_val = glk_tun_val; + return; + } + + if (!intel_host->glk_tun_val) + return; + + if (glk_rx_ctrl1 != intel_host->glk_rx_ctrl1) { + intel_host->rpm_retune_ok = true; + return; + } + + dly = FIELD_PREP(GLK_DLY, FIELD_GET(GLK_PATH_PLL, glk_rx_ctrl1) + + (intel_host->glk_tun_val << 1)); + if (dly == FIELD_GET(GLK_DLY, glk_rx_ctrl1)) + return; + + glk_rx_ctrl1 = (glk_rx_ctrl1 & ~GLK_DLY) | dly; + sdhci_writel(host, glk_rx_ctrl1, GLK_RX_CTRL1); + + intel_host->rpm_retune_ok = true; + chip->rpm_retune = true; + mmc_retune_needed(host->mmc); + pr_info("%s: Requiring re-tune after rpm resume", mmc_hostname(host->mmc)); +} + +static void glk_rpm_retune_chk(struct sdhci_pci_chip *chip, bool susp) +{ + if (chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC && + !chip->rpm_retune) + glk_rpm_retune_wa(chip, susp); +} + +static int glk_runtime_suspend(struct sdhci_pci_chip *chip) +{ + glk_rpm_retune_chk(chip, true); + + return sdhci_cqhci_runtime_suspend(chip); +} + +static int glk_runtime_resume(struct sdhci_pci_chip *chip) +{ + glk_rpm_retune_chk(chip, false); + + return sdhci_cqhci_runtime_resume(chip); +} +#endif + #ifdef CONFIG_ACPI static int ni_set_max_freq(struct sdhci_pci_slot *slot) { @@ -879,8 +962,8 @@ static const struct sdhci_pci_fixes sdhci_intel_glk_emmc = { .resume = sdhci_cqhci_resume, #endif #ifdef CONFIG_PM - .runtime_suspend = sdhci_cqhci_runtime_suspend, - .runtime_resume = sdhci_cqhci_runtime_resume, + .runtime_suspend = glk_runtime_suspend, + .runtime_resume = glk_runtime_resume, #endif .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | @@ -1502,6 +1585,8 @@ static const struct pci_device_id pci_ids[] = { SDHCI_PCI_DEVICE(INTEL, CNPH_SD, intel_byt_sd), SDHCI_PCI_DEVICE(INTEL, ICP_EMMC, intel_glk_emmc), SDHCI_PCI_DEVICE(INTEL, ICP_SD, intel_byt_sd), + SDHCI_PCI_DEVICE(INTEL, CML_EMMC, intel_glk_emmc), + SDHCI_PCI_DEVICE(INTEL, CML_SD, intel_byt_sd), SDHCI_PCI_DEVICE(O2, 8120, o2), SDHCI_PCI_DEVICE(O2, 8220, o2), SDHCI_PCI_DEVICE(O2, 8221, o2), @@ -1762,8 +1847,13 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot( device_init_wakeup(&pdev->dev, true); if (slot->cd_idx >= 0) { - ret = mmc_gpiod_request_cd(host->mmc, NULL, slot->cd_idx, + ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx, slot->cd_override_level, 0, NULL); + if (ret && ret != -EPROBE_DEFER) + ret = mmc_gpiod_request_cd(host->mmc, NULL, + slot->cd_idx, + slot->cd_override_level, + 0, NULL); if (ret == -EPROBE_DEFER) goto remove; diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c index 77e9bc4aaee91b894ff34556083f686269f1c64a..e248d7945c062a561d035a5613c04e0a241721ac 100644 --- a/drivers/mmc/host/sdhci-pci-o2micro.c +++ b/drivers/mmc/host/sdhci-pci-o2micro.c @@ -117,6 +117,7 @@ static int sdhci_o2_execute_tuning(struct mmc_host *mmc, u32 opcode) */ if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) { current_bus_width = mmc->ios.bus_width; + mmc->ios.bus_width = MMC_BUS_WIDTH_4; sdhci_set_bus_width(host, MMC_BUS_WIDTH_4); } @@ -128,8 +129,10 @@ static int sdhci_o2_execute_tuning(struct mmc_host *mmc, u32 opcode) sdhci_end_tuning(host); - if (current_bus_width == MMC_BUS_WIDTH_8) + if (current_bus_width == MMC_BUS_WIDTH_8) { + mmc->ios.bus_width = MMC_BUS_WIDTH_8; sdhci_set_bus_width(host, current_bus_width); + } host->flags &= ~SDHCI_HS400_TUNING; return 0; @@ -287,11 +290,21 @@ int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot) { struct sdhci_pci_chip *chip; struct sdhci_host *host; - u32 reg; + u32 reg, caps; int ret; chip = slot->chip; host = slot->host; + + caps = sdhci_readl(host, SDHCI_CAPABILITIES); + + /* + * mmc_select_bus_width() will test the bus to determine the actual bus + * width. + */ + if (caps & SDHCI_CAN_DO_8BIT) + host->mmc->caps |= MMC_CAP_8_BIT_DATA; + switch (chip->pdev->device) { case PCI_DEVICE_ID_O2_SDS0: case PCI_DEVICE_ID_O2_SEABIRD0: @@ -490,6 +503,9 @@ int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip) pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); break; case PCI_DEVICE_ID_O2_SEABIRD0: + if (chip->pdev->revision == 0x01) + chip->quirks |= SDHCI_QUIRK_DELAY_AFTER_POWER; + /* fall through */ case PCI_DEVICE_ID_O2_SEABIRD1: /* UnLock WP */ ret = pci_read_config_byte(chip->pdev, diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h index 2ef0bdca919743baaf51ae78d4cce8e215c5e65b..6f04a62b2998ecd632c2e505ae8e1df6efb0d08c 100644 --- a/drivers/mmc/host/sdhci-pci.h +++ b/drivers/mmc/host/sdhci-pci.h @@ -50,6 +50,8 @@ #define PCI_DEVICE_ID_INTEL_CNPH_SD 0xa375 #define PCI_DEVICE_ID_INTEL_ICP_EMMC 0x34c4 #define PCI_DEVICE_ID_INTEL_ICP_SD 0x34f8 +#define PCI_DEVICE_ID_INTEL_CML_EMMC 0x02c4 +#define PCI_DEVICE_ID_INTEL_CML_SD 0x02f5 #define PCI_DEVICE_ID_SYSKONNECT_8000 0x8000 #define PCI_DEVICE_ID_VIA_95D0 0x95d0 diff --git a/drivers/mmc/host/sdhci-xenon-phy.c b/drivers/mmc/host/sdhci-xenon-phy.c index c335052d0c026cee2e7f283d37d15a026387cc5a..caccedc836dcffe1ee082c43c9b3be910975b6e4 100644 --- a/drivers/mmc/host/sdhci-xenon-phy.c +++ b/drivers/mmc/host/sdhci-xenon-phy.c @@ -357,9 +357,13 @@ static int xenon_emmc_phy_enable_dll(struct sdhci_host *host) /* Wait max 32 ms */ timeout = ktime_add_ms(ktime_get(), 32); - while (!(sdhci_readw(host, XENON_SLOT_EXT_PRESENT_STATE) & - XENON_DLL_LOCK_STATE)) { - if (ktime_after(ktime_get(), timeout)) { + while (1) { + bool timedout = ktime_after(ktime_get(), timeout); + + if (sdhci_readw(host, XENON_SLOT_EXT_PRESENT_STATE) & + XENON_DLL_LOCK_STATE) + break; + if (timedout) { dev_err(mmc_dev(host->mmc), "Wait for DLL Lock time-out\n"); return -ETIMEDOUT; } diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c index 4d0791f6ec2368ee099a80b5a842c9e059770e54..a0b5089b3274821b6e84f16e57ffebd08a184cae 100644 --- a/drivers/mmc/host/sdhci-xenon.c +++ b/drivers/mmc/host/sdhci-xenon.c @@ -34,9 +34,13 @@ static int xenon_enable_internal_clk(struct sdhci_host *host) sdhci_writel(host, reg, SDHCI_CLOCK_CONTROL); /* Wait max 20 ms */ timeout = ktime_add_ms(ktime_get(), 20); - while (!((reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) - & SDHCI_CLOCK_INT_STABLE)) { - if (ktime_after(ktime_get(), timeout)) { + while (1) { + bool timedout = ktime_after(ktime_get(), timeout); + + reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + if (reg & SDHCI_CLOCK_INT_STABLE) + break; + if (timedout) { dev_err(mmc_dev(host->mmc), "Internal clock never stabilised.\n"); return -ETIMEDOUT; } diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 1b3fbd9bd5c5b57c451acf44e186b0736b6ed13e..369817a29c22a250644d931d2e2f2c3bca04e20f 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -82,8 +82,8 @@ void sdhci_dumpregs(struct sdhci_host *host) SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n", sdhci_readl(host, SDHCI_INT_ENABLE), sdhci_readl(host, SDHCI_SIGNAL_ENABLE)); - SDHCI_DUMP("AC12 err: 0x%08x | Slot int: 0x%08x\n", - sdhci_readw(host, SDHCI_ACMD12_ERR), + SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n", + sdhci_readw(host, SDHCI_AUTO_CMD_STATUS), sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n", sdhci_readl(host, SDHCI_CAPABILITIES), @@ -193,8 +193,12 @@ void sdhci_reset(struct sdhci_host *host, u8 mask) timeout = ktime_add_ms(ktime_get(), 100); /* hw clears the bit when it's done */ - while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) { - if (ktime_after(ktime_get(), timeout)) { + while (1) { + bool timedout = ktime_after(ktime_get(), timeout); + + if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)) + break; + if (timedout) { pr_err("%s: Reset 0x%x never completed.\n", mmc_hostname(host->mmc), (int)mask); sdhci_dumpregs(host); @@ -837,6 +841,11 @@ static void sdhci_set_transfer_irqs(struct sdhci_host *host) else host->ier = (host->ier & ~dma_irqs) | pio_irqs; + if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12)) + host->ier |= SDHCI_INT_AUTO_CMD_ERR; + else + host->ier &= ~SDHCI_INT_AUTO_CMD_ERR; + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); } @@ -1074,8 +1083,7 @@ static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq) return (!(host->flags & SDHCI_DEVICE_DEAD) && ((mrq->cmd && mrq->cmd->error) || (mrq->sbc && mrq->sbc->error) || - (mrq->data && ((mrq->data->error && !mrq->data->stop) || - (mrq->data->stop && mrq->data->stop->error))) || + (mrq->data && mrq->data->stop && mrq->data->stop->error) || (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))); } @@ -1127,6 +1135,16 @@ static void sdhci_finish_data(struct sdhci_host *host) host->data = NULL; host->data_cmd = NULL; + /* + * The controller needs a reset of internal state machines upon error + * conditions. + */ + if (data->error) { + if (!host->cmd || host->cmd == data_cmd) + sdhci_do_reset(host, SDHCI_RESET_CMD); + sdhci_do_reset(host, SDHCI_RESET_DATA); + } + if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) == (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) sdhci_adma_table_post(host, data); @@ -1151,17 +1169,6 @@ static void sdhci_finish_data(struct sdhci_host *host) if (data->stop && (data->error || !data->mrq->sbc)) { - - /* - * The controller needs a reset of internal state machines - * upon error conditions. - */ - if (data->error) { - if (!host->cmd || host->cmd == data_cmd) - sdhci_do_reset(host, SDHCI_RESET_CMD); - sdhci_do_reset(host, SDHCI_RESET_DATA); - } - /* * 'cap_cmd_during_tfr' request must not use the command line * after mmc_command_done() has been called. It is upper layer's @@ -1495,9 +1502,13 @@ void sdhci_enable_clk(struct sdhci_host *host, u16 clk) /* Wait max 20 ms */ timeout = ktime_add_ms(ktime_get(), 20); - while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) - & SDHCI_CLOCK_INT_STABLE)) { - if (ktime_after(ktime_get(), timeout)) { + while (1) { + bool timedout = ktime_after(ktime_get(), timeout); + + clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + if (clk & SDHCI_CLOCK_INT_STABLE) + break; + if (timedout) { pr_err("%s: Internal clock never stabilised.\n", mmc_hostname(host->mmc)); sdhci_dumpregs(host); @@ -2233,8 +2244,8 @@ static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) sdhci_send_tuning(host, opcode); if (!host->tuning_done) { - pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n", - mmc_hostname(host->mmc)); + pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n", + mmc_hostname(host->mmc)); sdhci_abort_tuning(host, opcode); return; } @@ -2634,8 +2645,23 @@ static void sdhci_timeout_data_timer(struct timer_list *t) * * \*****************************************************************************/ -static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask) +static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p) { + /* Handle auto-CMD12 error */ + if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) { + struct mmc_request *mrq = host->data_cmd->mrq; + u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS); + int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ? + SDHCI_INT_DATA_TIMEOUT : + SDHCI_INT_DATA_CRC; + + /* Treat auto-CMD12 error the same as data error */ + if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) { + *intmask_p |= data_err_bit; + return; + } + } + if (!host->cmd) { /* * SDHCI recovers from errors by resetting the cmd and data @@ -2657,20 +2683,12 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask) else host->cmd->error = -EILSEQ; - /* - * If this command initiates a data phase and a response - * CRC error is signalled, the card can start transferring - * data - the card may have received the command without - * error. We must not terminate the mmc_request early. - * - * If the card did not receive the command or returned an - * error which prevented it sending data, the data phase - * will time out. - */ + /* Treat data command CRC error the same as data CRC error */ if (host->cmd->data && (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) == SDHCI_INT_CRC) { host->cmd = NULL; + *intmask_p |= SDHCI_INT_DATA_CRC; return; } @@ -2678,6 +2696,21 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask) return; } + /* Handle auto-CMD23 error */ + if (intmask & SDHCI_INT_AUTO_CMD_ERR) { + struct mmc_request *mrq = host->cmd->mrq; + u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS); + int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ? + -ETIMEDOUT : + -EILSEQ; + + if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) { + mrq->sbc->error = err; + sdhci_finish_mrq(host, mrq); + return; + } + } + if (intmask & SDHCI_INT_RESPONSE) sdhci_finish_command(host); } @@ -2685,6 +2718,7 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask) static void sdhci_adma_show_error(struct sdhci_host *host) { void *desc = host->adma_table; + dma_addr_t dma = host->adma_addr; sdhci_dumpregs(host); @@ -2692,18 +2726,21 @@ static void sdhci_adma_show_error(struct sdhci_host *host) struct sdhci_adma2_64_desc *dma_desc = desc; if (host->flags & SDHCI_USE_64_BIT_DMA) - DBG("%p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n", - desc, le32_to_cpu(dma_desc->addr_hi), + SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n", + (unsigned long long)dma, + le32_to_cpu(dma_desc->addr_hi), le32_to_cpu(dma_desc->addr_lo), le16_to_cpu(dma_desc->len), le16_to_cpu(dma_desc->cmd)); else - DBG("%p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", - desc, le32_to_cpu(dma_desc->addr_lo), + SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", + (unsigned long long)dma, + le32_to_cpu(dma_desc->addr_lo), le16_to_cpu(dma_desc->len), le16_to_cpu(dma_desc->cmd)); desc += host->desc_sz; + dma += host->desc_sz; if (dma_desc->cmd & cpu_to_le16(ADMA2_END)) break; @@ -2779,7 +2816,8 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) != MMC_BUS_TEST_R) host->data->error = -EILSEQ; else if (intmask & SDHCI_INT_ADMA_ERROR) { - pr_err("%s: ADMA error\n", mmc_hostname(host->mmc)); + pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc), + intmask); sdhci_adma_show_error(host); host->data->error = -EIO; if (host->ops->adma_workaround) @@ -2898,7 +2936,7 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id) } if (intmask & SDHCI_INT_CMD_MASK) - sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK); + sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask); if (intmask & SDHCI_INT_DATA_MASK) sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); @@ -3511,6 +3549,9 @@ int sdhci_setup_host(struct sdhci_host *host) mmc_hostname(mmc), host->version); } + if (host->quirks & SDHCI_QUIRK_BROKEN_CQE) + mmc->caps2 &= ~MMC_CAP2_CQE; + if (host->quirks & SDHCI_QUIRK_FORCE_DMA) host->flags |= SDHCI_USE_SDMA; else if (!(host->caps & SDHCI_CAN_DO_SDMA)) diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index f0bd36ce3817fef0eb50cbc6cc50c035c07e7396..c0372e3443fdf2ce2e8f5c5d859f76800ff74cb3 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -144,14 +144,15 @@ #define SDHCI_INT_DATA_CRC 0x00200000 #define SDHCI_INT_DATA_END_BIT 0x00400000 #define SDHCI_INT_BUS_POWER 0x00800000 -#define SDHCI_INT_ACMD12ERR 0x01000000 +#define SDHCI_INT_AUTO_CMD_ERR 0x01000000 #define SDHCI_INT_ADMA_ERROR 0x02000000 #define SDHCI_INT_NORMAL_MASK 0x00007FFF #define SDHCI_INT_ERROR_MASK 0xFFFF8000 #define SDHCI_INT_CMD_MASK (SDHCI_INT_RESPONSE | SDHCI_INT_TIMEOUT | \ - SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX) + SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX | \ + SDHCI_INT_AUTO_CMD_ERR) #define SDHCI_INT_DATA_MASK (SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \ SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \ SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \ @@ -166,7 +167,11 @@ #define SDHCI_CQE_INT_MASK (SDHCI_CQE_INT_ERR_MASK | SDHCI_INT_CQE) -#define SDHCI_ACMD12_ERR 0x3C +#define SDHCI_AUTO_CMD_STATUS 0x3C +#define SDHCI_AUTO_CMD_TIMEOUT 0x00000002 +#define SDHCI_AUTO_CMD_CRC 0x00000004 +#define SDHCI_AUTO_CMD_END_BIT 0x00000008 +#define SDHCI_AUTO_CMD_INDEX 0x00000010 #define SDHCI_HOST_CONTROL2 0x3E #define SDHCI_CTRL_UHS_MASK 0x0007 @@ -386,6 +391,8 @@ struct sdhci_host { #define SDHCI_QUIRK_BROKEN_CARD_DETECTION (1<<15) /* Controller reports inverted write-protect state */ #define SDHCI_QUIRK_INVERTED_WRITE_PROTECT (1<<16) +/* Controller has unusable command queue engine */ +#define SDHCI_QUIRK_BROKEN_CQE (1<<17) /* Controller does not like fast PIO transfers */ #define SDHCI_QUIRK_PIO_NEEDS_DELAY (1<<18) /* Controller has to be forced to use block size of 2048 bytes */ diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index 568349e1fbc2d3e2f4ade9cae5eebb9407dfb4ee..c4584184525f90a4893d89e1efe9b2859f14b1e6 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c @@ -1394,6 +1394,21 @@ static int sunxi_mmc_probe(struct platform_device *pdev) if (ret) goto error_free_dma; + /* + * If we don't support delay chains in the SoC, we can't use any + * of the higher speed modes. Mask them out in case the device + * tree specifies the properties for them, which gets added to + * the caps by mmc_of_parse() above. + */ + if (!(host->cfg->clk_delays || host->use_new_timings)) { + mmc->caps &= ~(MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR | + MMC_CAP_1_2V_DDR | MMC_CAP_UHS); + mmc->caps2 &= ~MMC_CAP2_HS200; + } + + /* TODO: This driver doesn't support HS400 mode yet */ + mmc->caps2 &= ~MMC_CAP2_HS400; + ret = sunxi_mmc_init_host(host); if (ret) goto error_free_dma; diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index 5d141f79e175b2a3690beab69b626a5416efe76d..7c40a7e1fea1c48757efb3639f7d5cecc7fe60e1 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h @@ -279,6 +279,11 @@ static inline void sd_ctrl_write32_as_16_and_16(struct tmio_mmc_host *host, iowrite16(val >> 16, host->ctl + ((addr + 2) << host->bus_shift)); } +static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val) +{ + iowrite32(val, host->ctl + (addr << host->bus_shift)); +} + static inline void sd_ctrl_write32_rep(struct tmio_mmc_host *host, int addr, const u32 *buf, int count) { diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index 261b4d62d2b1061f283cbda336fad8555e3d76ab..33c9ca8f14a97373afc9aec2b35ccd346aca06b7 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -46,6 +46,7 @@ #include #include #include +#include #include #include #include @@ -703,7 +704,7 @@ static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host, int ireg, return false; } -static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host) +static bool __tmio_mmc_sdio_irq(struct tmio_mmc_host *host) { struct mmc_host *mmc = host->mmc; struct tmio_mmc_data *pdata = host->pdata; @@ -711,7 +712,7 @@ static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host) unsigned int sdio_status; if (!(pdata->flags & TMIO_MMC_SDIO_IRQ)) - return; + return false; status = sd_ctrl_read16(host, CTL_SDIO_STATUS); ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdio_irq_mask; @@ -724,6 +725,8 @@ static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host) if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ) mmc_signal_sdio_irq(mmc); + + return ireg; } irqreturn_t tmio_mmc_irq(int irq, void *devid) @@ -742,9 +745,10 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid) if (__tmio_mmc_sdcard_irq(host, ireg, status)) return IRQ_HANDLED; - __tmio_mmc_sdio_irq(host); + if (__tmio_mmc_sdio_irq(host)) + return IRQ_HANDLED; - return IRQ_HANDLED; + return IRQ_NONE; } EXPORT_SYMBOL_GPL(tmio_mmc_irq); @@ -774,7 +778,10 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host, /* Set transfer length / blocksize */ sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); - sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); + if (host->mmc->max_blk_count >= SZ_64K) + sd_ctrl_write32(host, CTL_XFER_BLK_COUNT, data->blocks); + else + sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); tmio_mmc_start_dma(host, data); @@ -919,8 +926,9 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host) if (mrq->cmd->error || (mrq->data && mrq->data->error)) tmio_mmc_abort_dma(host); - if (host->check_scc_error) - host->check_scc_error(host); + /* SCC error means retune, but executed command was still successful */ + if (host->check_scc_error && host->check_scc_error(host)) + mmc_retune_needed(host->mmc); /* If SET_BLOCK_COUNT, continue with main command */ if (host->mrq && !mrq->cmd->error) { @@ -1259,7 +1267,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host) return ret; } - mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities; + mmc->caps |= MMC_CAP_ERASE | MMC_CAP_4_BIT_DATA | pdata->capabilities; mmc->caps2 |= pdata->capabilities2; mmc->max_segs = pdata->max_segs ? : 32; mmc->max_blk_size = 512; diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index 72428b6bfc474ba6d757b94d79ff62804cc7c8ec..ba44ea6d497ee6c88f1258504a1185db51082e57 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c @@ -1627,29 +1627,35 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, continue; } - if (time_after(jiffies, timeo) && !chip_ready(map, adr)){ + /* + * We check "time_after" and "!chip_good" before checking + * "chip_good" to avoid the failure due to scheduling. + */ + if (time_after(jiffies, timeo) && !chip_good(map, adr, datum)) { xip_enable(map, chip, adr); printk(KERN_WARNING "MTD %s(): software timeout\n", __func__); xip_disable(map, chip, adr); + ret = -EIO; break; } - if (chip_ready(map, adr)) + if (chip_good(map, adr, datum)) break; /* Latency issues. Drop the lock, wait a while and retry */ UDELAY(map, chip, adr, 1); } + /* Did we succeed? */ - if (!chip_good(map, adr, datum)) { + if (ret) { /* reset on all failures. */ map_write(map, CMD(0xF0), chip->start); /* FIXME - should have reset delay before continuing */ - if (++retry_cnt <= MAX_RETRIES) + if (++retry_cnt <= MAX_RETRIES) { + ret = 0; goto retry; - - ret = -EIO; + } } xip_enable(map, chip, adr); op_done: diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig index e514d57a0419defecb8dcbbc8be4604aea1321da..aa983422aa970f1035201a1a4841b7a09d9acc3f 100644 --- a/drivers/mtd/devices/Kconfig +++ b/drivers/mtd/devices/Kconfig @@ -207,7 +207,7 @@ comment "Disk-On-Chip Device Drivers" config MTD_DOCG3 tristate "M-Systems Disk-On-Chip G3" select BCH - select BCH_CONST_PARAMS + select BCH_CONST_PARAMS if !MTD_NAND_BCH select BITREVERSE help This provides an MTD device driver for the M-Systems DiskOnChip diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index 270d3c9580c51195ccb6b05e3719e98fb1836031..c4a1d04b8c800d1b090e99dc9975025d4b61009c 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c @@ -90,7 +90,6 @@ static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len, SPI_MEM_OP_ADDR(nor->addr_width, to, 1), SPI_MEM_OP_NO_DUMMY, SPI_MEM_OP_DATA_OUT(len, buf, 1)); - size_t remaining = len; int ret; /* get transfer protocols. */ @@ -101,22 +100,16 @@ static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len, if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second) op.addr.nbytes = 0; - while (remaining) { - op.data.nbytes = remaining < UINT_MAX ? remaining : UINT_MAX; - ret = spi_mem_adjust_op_size(flash->spimem, &op); - if (ret) - return ret; - - ret = spi_mem_exec_op(flash->spimem, &op); - if (ret) - return ret; + ret = spi_mem_adjust_op_size(flash->spimem, &op); + if (ret) + return ret; + op.data.nbytes = len < op.data.nbytes ? len : op.data.nbytes; - op.addr.val += op.data.nbytes; - remaining -= op.data.nbytes; - op.data.buf.out += op.data.nbytes; - } + ret = spi_mem_exec_op(flash->spimem, &op); + if (ret) + return ret; - return len; + return op.data.nbytes; } /* diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c index 986f81d2f93e3d8cdceed38250dbdc8d48046663..47ad0766affa9b234caafeb32f14905897da0fff 100644 --- a/drivers/mtd/devices/spear_smi.c +++ b/drivers/mtd/devices/spear_smi.c @@ -592,6 +592,26 @@ static int spear_mtd_read(struct mtd_info *mtd, loff_t from, size_t len, return 0; } +/* + * The purpose of this function is to ensure a memcpy_toio() with byte writes + * only. Its structure is inspired from the ARM implementation of _memcpy_toio() + * which also does single byte writes but cannot be used here as this is just an + * implementation detail and not part of the API. Not mentioning the comment + * stating that _memcpy_toio() should be optimized. + */ +static void spear_smi_memcpy_toio_b(volatile void __iomem *dest, + const void *src, size_t len) +{ + const unsigned char *from = src; + + while (len) { + len--; + writeb(*from, dest); + from++; + dest++; + } +} + static inline int spear_smi_cpy_toio(struct spear_smi *dev, u32 bank, void __iomem *dest, const void *src, size_t len) { @@ -614,7 +634,23 @@ static inline int spear_smi_cpy_toio(struct spear_smi *dev, u32 bank, ctrlreg1 = readl(dev->io_base + SMI_CR1); writel((ctrlreg1 | WB_MODE) & ~SW_MODE, dev->io_base + SMI_CR1); - memcpy_toio(dest, src, len); + /* + * In Write Burst mode (WB_MODE), the specs states that writes must be: + * - incremental + * - of the same size + * The ARM implementation of memcpy_toio() will optimize the number of + * I/O by using as much 4-byte writes as possible, surrounded by + * 2-byte/1-byte access if: + * - the destination is not 4-byte aligned + * - the length is not a multiple of 4-byte. + * Avoid this alternance of write access size by using our own 'byte + * access' helper if at least one of the two conditions above is true. + */ + if (IS_ALIGNED(len, sizeof(u32)) && + IS_ALIGNED((uintptr_t)dest, sizeof(u32))) + memcpy_toio(dest, src, len); + else + spear_smi_memcpy_toio_b(dest, src, len); writel(ctrlreg1, dev->io_base + SMI_CR1); diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c index 55d4a77f3b7f5732b1db91e2b5910d9488228ee1..533096c88ae19f40276e025e6f7ba07dbfd8068e 100644 --- a/drivers/mtd/devices/st_spi_fsm.c +++ b/drivers/mtd/devices/st_spi_fsm.c @@ -2120,10 +2120,12 @@ static int stfsm_probe(struct platform_device *pdev) (long long)fsm->mtd.size, (long long)(fsm->mtd.size >> 20), fsm->mtd.erasesize, (fsm->mtd.erasesize >> 10)); - return mtd_device_register(&fsm->mtd, NULL, 0); - + ret = mtd_device_register(&fsm->mtd, NULL, 0); + if (ret) { err_clk_unprepare: - clk_disable_unprepare(fsm->clk); + clk_disable_unprepare(fsm->clk); + } + return ret; } diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c index 9d972369321755b5560c371626d9c8bf9a8d8271..2e3a8da3ce72c9ab6545d0da540dd0d42c2783d7 100644 --- a/drivers/mtd/maps/gpio-addr-flash.c +++ b/drivers/mtd/maps/gpio-addr-flash.c @@ -238,7 +238,7 @@ static int gpio_flash_probe(struct platform_device *pdev) state->map.copy_to = gf_copy_to; state->map.bankwidth = pdata->width; state->map.size = state->win_size * (1 << state->gpio_count); - state->map.virt = ioremap_nocache(memory->start, state->map.size); + state->map.virt = ioremap_nocache(memory->start, state->win_size); if (!state->map.virt) return -ENOMEM; diff --git a/drivers/mtd/maps/physmap_of_core.c b/drivers/mtd/maps/physmap_of_core.c index 4129535b8e46f34e8891c3d86f0c6fb5160f6095..ece605d78c215670af7334fff835fb095e155bd0 100644 --- a/drivers/mtd/maps/physmap_of_core.c +++ b/drivers/mtd/maps/physmap_of_core.c @@ -31,7 +31,6 @@ struct of_flash_list { struct mtd_info *mtd; struct map_info map; - struct resource *res; }; struct of_flash { @@ -56,18 +55,10 @@ static int of_flash_remove(struct platform_device *dev) mtd_concat_destroy(info->cmtd); } - for (i = 0; i < info->list_size; i++) { + for (i = 0; i < info->list_size; i++) if (info->list[i].mtd) map_destroy(info->list[i].mtd); - if (info->list[i].map.virt) - iounmap(info->list[i].map.virt); - - if (info->list[i].res) { - release_resource(info->list[i].res); - kfree(info->list[i].res); - } - } return 0; } @@ -215,10 +206,11 @@ static int of_flash_probe(struct platform_device *dev) err = -EBUSY; res_size = resource_size(&res); - info->list[i].res = request_mem_region(res.start, res_size, - dev_name(&dev->dev)); - if (!info->list[i].res) + info->list[i].map.virt = devm_ioremap_resource(&dev->dev, &res); + if (IS_ERR(info->list[i].map.virt)) { + err = PTR_ERR(info->list[i].map.virt); goto err_out; + } err = -ENXIO; width = of_get_property(dp, "bank-width", NULL); @@ -246,15 +238,6 @@ static int of_flash_probe(struct platform_device *dev) if (err) goto err_out; - err = -ENOMEM; - info->list[i].map.virt = ioremap(info->list[i].map.phys, - info->list[i].map.size); - if (!info->list[i].map.virt) { - dev_err(&dev->dev, "Failed to ioremap() flash" - " region\n"); - goto err_out; - } - simple_map_init(&info->list[i].map); /* diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 29c0bfd74e8a19e1ce37d97033d40e959a8b5348..40b2cbd8a0b51fa997b2101ecc8a2451073501b3 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -521,7 +521,7 @@ static void blktrans_notify_add(struct mtd_info *mtd) { struct mtd_blktrans_ops *tr; - if (mtd->type == MTD_ABSENT) + if (mtd->type == MTD_ABSENT || mtd->type == MTD_UBIVOLUME) return; list_for_each_entry(tr, &blktrans_majors, list) @@ -564,7 +564,7 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr) list_add(&tr->list, &blktrans_majors); mtd_for_each_device(mtd) - if (mtd->type != MTD_ABSENT) + if (mtd->type != MTD_ABSENT && mtd->type != MTD_UBIVOLUME) tr->add_mtd(tr, mtd); mutex_unlock(&mtd_table_mutex); diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h index 9887bda317cd9daabe4de8515587e5a11803a431..b31c868019adada77b86e028974203f09251739e 100644 --- a/drivers/mtd/mtdcore.h +++ b/drivers/mtd/mtdcore.h @@ -7,7 +7,7 @@ extern struct mutex mtd_table_mutex; struct mtd_info *__mtd_next_device(int i); -int add_mtd_device(struct mtd_info *mtd); +int __must_check add_mtd_device(struct mtd_info *mtd); int del_mtd_device(struct mtd_info *mtd); int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int); int del_mtd_partitions(struct mtd_info *); diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index 99c460facd5e97702896aae893d4df258320c0f2..10c53364aa70c1849b0768ee330293759919c97e 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -470,6 +470,10 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent, /* let's register it anyway to preserve ordering */ slave->offset = 0; slave->mtd.size = 0; + + /* Initialize ->erasesize to make add_mtd_device() happy. */ + slave->mtd.erasesize = parent->erasesize; + printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n", part->name); goto out_register; @@ -608,10 +612,21 @@ int mtd_add_partition(struct mtd_info *parent, const char *name, list_add(&new->list, &mtd_partitions); mutex_unlock(&mtd_partitions_mutex); - add_mtd_device(&new->mtd); + ret = add_mtd_device(&new->mtd); + if (ret) + goto err_remove_part; mtd_add_partition_attrs(new); + return 0; + +err_remove_part: + mutex_lock(&mtd_partitions_mutex); + list_del(&new->list); + mutex_unlock(&mtd_partitions_mutex); + + free_partition(new); + return ret; } EXPORT_SYMBOL_GPL(mtd_add_partition); @@ -702,22 +717,31 @@ int add_mtd_partitions(struct mtd_info *master, { struct mtd_part *slave; uint64_t cur_offset = 0; - int i; + int i, ret; printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name); for (i = 0; i < nbparts; i++) { slave = allocate_partition(master, parts + i, i, cur_offset); if (IS_ERR(slave)) { - del_mtd_partitions(master); - return PTR_ERR(slave); + ret = PTR_ERR(slave); + goto err_del_partitions; } mutex_lock(&mtd_partitions_mutex); list_add(&slave->list, &mtd_partitions); mutex_unlock(&mtd_partitions_mutex); - add_mtd_device(&slave->mtd); + ret = add_mtd_device(&slave->mtd); + if (ret) { + mutex_lock(&mtd_partitions_mutex); + list_del(&slave->list); + mutex_unlock(&mtd_partitions_mutex); + + free_partition(slave); + goto err_del_partitions; + } + mtd_add_partition_attrs(slave); /* Look for subpartitions */ parse_mtd_partitions(&slave->mtd, parts[i].types, NULL); @@ -726,6 +750,11 @@ int add_mtd_partitions(struct mtd_info *master, } return 0; + +err_del_partitions: + del_mtd_partitions(master); + + return ret; } static DEFINE_SPINLOCK(part_parser_lock); diff --git a/drivers/mtd/nand/bbt.c b/drivers/mtd/nand/bbt.c index 56cde38b92c034e28b1428d7f11ac57c25753fad..64af6898131d656401a42135855c22e183df4109 100644 --- a/drivers/mtd/nand/bbt.c +++ b/drivers/mtd/nand/bbt.c @@ -27,7 +27,8 @@ int nanddev_bbt_init(struct nand_device *nand) unsigned int nwords = DIV_ROUND_UP(nblocks * bits_per_block, BITS_PER_LONG); - nand->bbt.cache = kzalloc(nwords, GFP_KERNEL); + nand->bbt.cache = kcalloc(nwords, sizeof(*nand->bbt.cache), + GFP_KERNEL); if (!nand->bbt.cache) return -ENOMEM; @@ -122,7 +123,7 @@ int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry, unsigned int rbits = bits_per_block + offs - BITS_PER_LONG; pos[1] &= ~GENMASK(rbits - 1, 0); - pos[1] |= val >> rbits; + pos[1] |= val >> (bits_per_block - rbits); } return 0; diff --git a/drivers/mtd/nand/onenand/generic.c b/drivers/mtd/nand/onenand/generic.c index acad17ec6581a247f3377b27024cdb839a75a0a1..bfb5cee1b472249ceb7b0baae6e0dbf7e49e31cc 100644 --- a/drivers/mtd/nand/onenand/generic.c +++ b/drivers/mtd/nand/onenand/generic.c @@ -56,7 +56,12 @@ static int generic_onenand_probe(struct platform_device *pdev) } info->onenand.mmcontrol = pdata ? pdata->mmcontrol : NULL; - info->onenand.irq = platform_get_irq(pdev, 0); + + err = platform_get_irq(pdev, 0); + if (err < 0) + goto out_iounmap; + + info->onenand.irq = err; info->mtd.dev.parent = &pdev->dev; info->mtd.priv = &info->onenand; diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c index a068b214ebaa7970fac10d1b0ae4b2af0602e9ae..5ee507ae9e2be302625f8d4c7c0327001569f63a 100644 --- a/drivers/mtd/nand/raw/atmel/nand-controller.c +++ b/drivers/mtd/nand/raw/atmel/nand-controller.c @@ -1826,7 +1826,7 @@ static int atmel_nand_controller_add_nands(struct atmel_nand_controller *nc) ret = of_property_read_u32(np, "#size-cells", &val); if (ret) { - dev_err(dev, "missing #address-cells property\n"); + dev_err(dev, "missing #size-cells property\n"); return ret; } @@ -1982,13 +1982,15 @@ static int atmel_nand_controller_init(struct atmel_nand_controller *nc, nc->mck = of_clk_get(dev->parent->of_node, 0); if (IS_ERR(nc->mck)) { dev_err(dev, "Failed to retrieve MCK clk\n"); - return PTR_ERR(nc->mck); + ret = PTR_ERR(nc->mck); + goto out_release_dma; } np = of_parse_phandle(dev->parent->of_node, "atmel,smc", 0); if (!np) { dev_err(dev, "Missing or invalid atmel,smc property\n"); - return -EINVAL; + ret = -EINVAL; + goto out_release_dma; } nc->smc = syscon_node_to_regmap(np); @@ -1996,10 +1998,16 @@ static int atmel_nand_controller_init(struct atmel_nand_controller *nc, if (IS_ERR(nc->smc)) { ret = PTR_ERR(nc->smc); dev_err(dev, "Could not get SMC regmap (err = %d)\n", ret); - return ret; + goto out_release_dma; } return 0; + +out_release_dma: + if (nc->dmac) + dma_release_channel(nc->dmac); + + return ret; } static int @@ -2061,8 +2069,11 @@ atmel_hsmc_nand_controller_legacy_init(struct atmel_hsmc_nand_controller *nc) int ret; nand_np = dev->of_node; - nfc_np = of_find_compatible_node(dev->of_node, NULL, - "atmel,sama5d3-nfc"); + nfc_np = of_get_compatible_child(dev->of_node, "atmel,sama5d3-nfc"); + if (!nfc_np) { + dev_err(dev, "Could not find device node for sama5d3-nfc\n"); + return -ENODEV; + } nc->clk = of_clk_get(nfc_np, 0); if (IS_ERR(nc->clk)) { @@ -2472,15 +2483,19 @@ static int atmel_nand_controller_probe(struct platform_device *pdev) } if (caps->legacy_of_bindings) { + struct device_node *nfc_node; u32 ale_offs = 21; /* * If we are parsing legacy DT props and the DT contains a * valid NFC node, forward the request to the sama5 logic. */ - if (of_find_compatible_node(pdev->dev.of_node, NULL, - "atmel,sama5d3-nfc")) + nfc_node = of_get_compatible_child(pdev->dev.of_node, + "atmel,sama5d3-nfc"); + if (nfc_node) { caps = &atmel_sama5_nand_caps; + of_node_put(nfc_node); + } /* * Even if the compatible says we are dealing with an diff --git a/drivers/mtd/nand/raw/atmel/pmecc.c b/drivers/mtd/nand/raw/atmel/pmecc.c index 555a74e15269da45f2b4685dcd149cfd88f47438..9d3997840889b2b3139e54ecbd0729cf507f74ae 100644 --- a/drivers/mtd/nand/raw/atmel/pmecc.c +++ b/drivers/mtd/nand/raw/atmel/pmecc.c @@ -876,23 +876,32 @@ static struct atmel_pmecc *atmel_pmecc_get_by_node(struct device *userdev, { struct platform_device *pdev; struct atmel_pmecc *pmecc, **ptr; + int ret; pdev = of_find_device_by_node(np); - if (!pdev || !platform_get_drvdata(pdev)) + if (!pdev) return ERR_PTR(-EPROBE_DEFER); + pmecc = platform_get_drvdata(pdev); + if (!pmecc) { + ret = -EPROBE_DEFER; + goto err_put_device; + } ptr = devres_alloc(devm_atmel_pmecc_put, sizeof(*ptr), GFP_KERNEL); - if (!ptr) - return ERR_PTR(-ENOMEM); - - get_device(&pdev->dev); - pmecc = platform_get_drvdata(pdev); + if (!ptr) { + ret = -ENOMEM; + goto err_put_device; + } *ptr = pmecc; devres_add(userdev, ptr); return pmecc; + +err_put_device: + put_device(&pdev->dev); + return ERR_PTR(ret); } static const int atmel_pmecc_strengths[] = { 2, 4, 8, 12, 24, 32 }; diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index 4b90d5b380c2503fc88426890f719bd018d6d2f7..6fbca4ae1113f5e9c0ea984bf7cd30bcfdf87d4b 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -1633,7 +1633,7 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip, mtd->oobsize / trans, host->hwcfg.sector_size_1k); - if (!ret) { + if (ret != -EBADMSG) { *err_addr = brcmnand_read_reg(ctrl, BRCMNAND_UNCORR_ADDR) | ((u64)(brcmnand_read_reg(ctrl, diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c index b864b93dd289ed6eda8b2591006a6ab57922c1b7..2242e999a76bf0b827f7905f8d3910e994f18c48 100644 --- a/drivers/mtd/nand/raw/denali.c +++ b/drivers/mtd/nand/raw/denali.c @@ -28,6 +28,7 @@ MODULE_LICENSE("GPL"); #define DENALI_NAND_NAME "denali-nand" +#define DENALI_DEFAULT_OOB_SKIP_BYTES 8 /* for Indexed Addressing */ #define DENALI_INDEXED_CTRL 0x00 @@ -1105,12 +1106,17 @@ static void denali_hw_init(struct denali_nand_info *denali) denali->revision = swab16(ioread32(denali->reg + REVISION)); /* - * tell driver how many bit controller will skip before - * writing ECC code in OOB, this register may be already - * set by firmware. So we read this value out. - * if this value is 0, just let it be. + * Set how many bytes should be skipped before writing data in OOB. + * If a non-zero value has already been set (by firmware or something), + * just use it. Otherwise, set the driver default. */ denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES); + if (!denali->oob_skip_bytes) { + denali->oob_skip_bytes = DENALI_DEFAULT_OOB_SKIP_BYTES; + iowrite32(denali->oob_skip_bytes, + denali->reg + SPARE_AREA_SKIP_BYTES); + } + denali_detect_max_banks(denali); iowrite32(0x0F, denali->reg + RB_PIN_ENABLED); iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE); diff --git a/drivers/mtd/nand/raw/fsl_ifc_nand.c b/drivers/mtd/nand/raw/fsl_ifc_nand.c index 24f59d0066afdd77d586f29b944f566e0038d98c..7e7729df782780728826f1bd113bd1199d5a050b 100644 --- a/drivers/mtd/nand/raw/fsl_ifc_nand.c +++ b/drivers/mtd/nand/raw/fsl_ifc_nand.c @@ -30,6 +30,7 @@ #include #include #include +#include #define ERR_BYTE 0xFF /* Value returned for read bytes when read failed */ @@ -761,7 +762,7 @@ static const struct nand_controller_ops fsl_ifc_controller_ops = { .attach_chip = fsl_ifc_attach_chip, }; -static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv) +static int fsl_ifc_sram_init(struct fsl_ifc_mtd *priv) { struct fsl_ifc_ctrl *ctrl = priv->ctrl; struct fsl_ifc_runtime __iomem *ifc_runtime = ctrl->rregs; @@ -769,6 +770,27 @@ static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv) uint32_t csor = 0, csor_8k = 0, csor_ext = 0; uint32_t cs = priv->bank; + if (ctrl->version < FSL_IFC_VERSION_1_1_0) + return 0; + + if (ctrl->version > FSL_IFC_VERSION_1_1_0) { + u32 ncfgr, status; + int ret; + + /* Trigger auto initialization */ + ncfgr = ifc_in32(&ifc_runtime->ifc_nand.ncfgr); + ifc_out32(ncfgr | IFC_NAND_NCFGR_SRAM_INIT_EN, &ifc_runtime->ifc_nand.ncfgr); + + /* Wait until done */ + ret = readx_poll_timeout(ifc_in32, &ifc_runtime->ifc_nand.ncfgr, + status, !(status & IFC_NAND_NCFGR_SRAM_INIT_EN), + 10, IFC_TIMEOUT_MSECS * 1000); + if (ret) + dev_err(priv->dev, "Failed to initialize SRAM!\n"); + + return ret; + } + /* Save CSOR and CSOR_ext */ csor = ifc_in32(&ifc_global->csor_cs[cs].csor); csor_ext = ifc_in32(&ifc_global->csor_cs[cs].csor_ext); @@ -805,12 +827,16 @@ static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv) wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat, msecs_to_jiffies(IFC_TIMEOUT_MSECS)); - if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC) + if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC) { pr_err("fsl-ifc: Failed to Initialise SRAM\n"); + return -ETIMEDOUT; + } /* Restore CSOR and CSOR_ext */ ifc_out32(csor, &ifc_global->csor_cs[cs].csor); ifc_out32(csor_ext, &ifc_global->csor_cs[cs].csor_ext); + + return 0; } static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) @@ -821,6 +847,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) struct nand_chip *chip = &priv->chip; struct mtd_info *mtd = nand_to_mtd(&priv->chip); u32 csor; + int ret; /* Fill in fsl_ifc_mtd structure */ mtd->dev.parent = priv->dev; @@ -914,8 +941,9 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) chip->ecc.algo = NAND_ECC_HAMMING; } - if (ctrl->version >= FSL_IFC_VERSION_1_1_0) - fsl_ifc_sram_init(priv); + ret = fsl_ifc_sram_init(priv); + if (ret) + return ret; /* * As IFC version 2.0.0 has 16KB of internal SRAM as compared to older diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c index 88ea2203e263bcdd1e54d3c4abc558fb42a1c895..322a008290e52410eacd2f6fd24c981a4284e772 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c @@ -155,9 +155,10 @@ int gpmi_init(struct gpmi_nand_data *this) /* * Reset BCH here, too. We got failures otherwise :( - * See later BCH reset for explanation of MX23 handling + * See later BCH reset for explanation of MX23 and MX28 handling */ - ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this)); + ret = gpmi_reset_block(r->bch_regs, + GPMI_IS_MX23(this) || GPMI_IS_MX28(this)); if (ret) goto err_out; @@ -263,12 +264,10 @@ int bch_set_geometry(struct gpmi_nand_data *this) /* * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this * chip, otherwise it will lock up. So we skip resetting BCH on the MX23. - * On the other hand, the MX28 needs the reset, because one case has been - * seen where the BCH produced ECC errors constantly after 10000 - * consecutive reboots. The latter case has not been seen on the MX23 - * yet, still we don't know if it could happen there as well. + * and MX28. */ - ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this)); + ret = gpmi_reset_block(r->bch_regs, + GPMI_IS_MX23(this) || GPMI_IS_MX28(this)); if (ret) goto err_out; diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index bc2ef52097834f7c43194835a5177d102e6e2c88..7a84a8f05b46d14abfbc87f4b4bc18127934b5d9 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -444,9 +444,14 @@ static void marvell_nfc_enable_int(struct marvell_nfc *nfc, u32 int_mask) writel_relaxed(reg & ~int_mask, nfc->regs + NDCR); } -static void marvell_nfc_clear_int(struct marvell_nfc *nfc, u32 int_mask) +static u32 marvell_nfc_clear_int(struct marvell_nfc *nfc, u32 int_mask) { + u32 reg; + + reg = readl_relaxed(nfc->regs + NDSR); writel_relaxed(int_mask, nfc->regs + NDSR); + + return reg & int_mask; } static void marvell_nfc_force_byte_access(struct nand_chip *chip, @@ -613,6 +618,7 @@ static int marvell_nfc_wait_cmdd(struct nand_chip *chip) static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms) { struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); + u32 pending; int ret; /* Timeout is expressed in ms */ @@ -625,8 +631,13 @@ static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms) ret = wait_for_completion_timeout(&nfc->complete, msecs_to_jiffies(timeout_ms)); marvell_nfc_disable_int(nfc, NDCR_RDYM); - marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1)); - if (!ret) { + pending = marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1)); + + /* + * In case the interrupt was not served in the required time frame, + * check if the ISR was not served or if something went actually wrong. + */ + if (ret && !pending) { dev_err(nfc->dev, "Timeout waiting for RB signal\n"); return -ETIMEDOUT; } @@ -686,7 +697,7 @@ static irqreturn_t marvell_nfc_isr(int irq, void *dev_id) marvell_nfc_disable_int(nfc, st & NDCR_ALL_INT); - if (!(st & (NDSR_RDDREQ | NDSR_WRDREQ | NDSR_WRCMDREQ))) + if (st & (NDSR_RDY(0) | NDSR_RDY(1))) complete(&nfc->complete); return IRQ_HANDLED; @@ -2699,24 +2710,23 @@ static int marvell_nfc_init(struct marvell_nfc *nfc) struct regmap *sysctrl_base = syscon_regmap_lookup_by_phandle(np, "marvell,system-controller"); - u32 reg; if (IS_ERR(sysctrl_base)) return PTR_ERR(sysctrl_base); - reg = GENCONF_SOC_DEVICE_MUX_NFC_EN | - GENCONF_SOC_DEVICE_MUX_ECC_CLK_RST | - GENCONF_SOC_DEVICE_MUX_ECC_CORE_RST | - GENCONF_SOC_DEVICE_MUX_NFC_INT_EN; - regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX, reg); + regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX, + GENCONF_SOC_DEVICE_MUX_NFC_EN | + GENCONF_SOC_DEVICE_MUX_ECC_CLK_RST | + GENCONF_SOC_DEVICE_MUX_ECC_CORE_RST | + GENCONF_SOC_DEVICE_MUX_NFC_INT_EN); - regmap_read(sysctrl_base, GENCONF_CLK_GATING_CTRL, ®); - reg |= GENCONF_CLK_GATING_CTRL_ND_GATE; - regmap_write(sysctrl_base, GENCONF_CLK_GATING_CTRL, reg); + regmap_update_bits(sysctrl_base, GENCONF_CLK_GATING_CTRL, + GENCONF_CLK_GATING_CTRL_ND_GATE, + GENCONF_CLK_GATING_CTRL_ND_GATE); - regmap_read(sysctrl_base, GENCONF_ND_CLK_CTRL, ®); - reg |= GENCONF_ND_CLK_CTRL_EN; - regmap_write(sysctrl_base, GENCONF_ND_CLK_CTRL, reg); + regmap_update_bits(sysctrl_base, GENCONF_ND_CLK_CTRL, + GENCONF_ND_CLK_CTRL_EN, + GENCONF_ND_CLK_CTRL_EN); } /* Configure the DMA if appropriate */ diff --git a/drivers/mtd/nand/raw/mtk_ecc.c b/drivers/mtd/nand/raw/mtk_ecc.c index 6432bd70c3b3913a71442744ad122b727e12bc30..9e4a78a808028739d879ebf2124b4a79973b54e7 100644 --- a/drivers/mtd/nand/raw/mtk_ecc.c +++ b/drivers/mtd/nand/raw/mtk_ecc.c @@ -51,6 +51,7 @@ struct mtk_ecc_caps { u32 err_mask; + u32 err_shift; const u8 *ecc_strength; const u32 *ecc_regs; u8 num_ecc_strength; @@ -84,7 +85,7 @@ static const u8 ecc_strength_mt2712[] = { }; static const u8 ecc_strength_mt7622[] = { - 4, 6, 8, 10, 12, 14, 16 + 4, 6, 8, 10, 12 }; enum mtk_ecc_regs { @@ -229,7 +230,7 @@ void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats, for (i = 0; i < sectors; i++) { offset = (i >> 2) << 2; err = readl(ecc->regs + ECC_DECENUM0 + offset); - err = err >> ((i % 4) * 8); + err = err >> ((i % 4) * ecc->caps->err_shift); err &= ecc->caps->err_mask; if (err == ecc->caps->err_mask) { /* uncorrectable errors */ @@ -453,6 +454,7 @@ EXPORT_SYMBOL(mtk_ecc_get_parity_bits); static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = { .err_mask = 0x3f, + .err_shift = 8, .ecc_strength = ecc_strength_mt2701, .ecc_regs = mt2701_ecc_regs, .num_ecc_strength = 20, @@ -463,6 +465,7 @@ static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = { static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = { .err_mask = 0x7f, + .err_shift = 8, .ecc_strength = ecc_strength_mt2712, .ecc_regs = mt2712_ecc_regs, .num_ecc_strength = 23, @@ -472,10 +475,11 @@ static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = { }; static const struct mtk_ecc_caps mtk_ecc_caps_mt7622 = { - .err_mask = 0x3f, + .err_mask = 0x1f, + .err_shift = 5, .ecc_strength = ecc_strength_mt7622, .ecc_regs = mt7622_ecc_regs, - .num_ecc_strength = 7, + .num_ecc_strength = 5, .ecc_mode_shift = 4, .parity_bits = 13, .pg_irq_sel = 0, diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c index 57b5ed1699e386e51865ba77c61c6561ffc1775f..ab5a8778c4b249ec08b8176dbc9b770898de1b84 100644 --- a/drivers/mtd/nand/raw/mtk_nand.c +++ b/drivers/mtd/nand/raw/mtk_nand.c @@ -509,7 +509,8 @@ static int mtk_nfc_setup_data_interface(struct mtd_info *mtd, int csline, { struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd)); const struct nand_sdr_timings *timings; - u32 rate, tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt; + u32 rate, tpoecs, tprecs, tc2r, tw2r, twh, twst = 0, trlt = 0; + u32 thold; timings = nand_get_sdr_timings(conf); if (IS_ERR(timings)) @@ -545,11 +546,28 @@ static int mtk_nfc_setup_data_interface(struct mtd_info *mtd, int csline, twh = DIV_ROUND_UP(twh * rate, 1000000) - 1; twh &= 0xf; - twst = timings->tWP_min / 1000; + /* Calculate real WE#/RE# hold time in nanosecond */ + thold = (twh + 1) * 1000000 / rate; + /* nanosecond to picosecond */ + thold *= 1000; + + /* + * WE# low level time should be expaned to meet WE# pulse time + * and WE# cycle time at the same time. + */ + if (thold < timings->tWC_min) + twst = timings->tWC_min - thold; + twst = max(timings->tWP_min, twst) / 1000; twst = DIV_ROUND_UP(twst * rate, 1000000) - 1; twst &= 0xf; - trlt = max(timings->tREA_max, timings->tRP_min) / 1000; + /* + * RE# low level time should be expaned to meet RE# pulse time, + * RE# access time and RE# cycle time at the same time. + */ + if (thold < timings->tRC_min) + trlt = timings->tRC_min - thold; + trlt = max3(trlt, timings->tREA_max, timings->tRP_min) / 1000; trlt = DIV_ROUND_UP(trlt * rate, 1000000) - 1; trlt &= 0xf; @@ -845,19 +863,21 @@ static int mtk_nfc_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, return mtk_nfc_write_page_raw(mtd, chip, NULL, 1, page); } -static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors) +static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 start, + u32 sectors) { struct nand_chip *chip = mtd_to_nand(mtd); struct mtk_nfc *nfc = nand_get_controller_data(chip); struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); struct mtk_ecc_stats stats; + u32 reg_size = mtk_nand->fdm.reg_size; int rc, i; rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE; if (rc) { memset(buf, 0xff, sectors * chip->ecc.size); for (i = 0; i < sectors; i++) - memset(oob_ptr(chip, i), 0xff, mtk_nand->fdm.reg_size); + memset(oob_ptr(chip, start + i), 0xff, reg_size); return 0; } @@ -877,7 +897,7 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, u32 spare = mtk_nand->spare_per_sector; u32 column, sectors, start, end, reg; dma_addr_t addr; - int bitflips; + int bitflips = 0; size_t len; u8 *buf; int rc; @@ -944,14 +964,11 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, if (rc < 0) { dev_err(nfc->dev, "subpage done timeout\n"); bitflips = -EIO; - } else { - bitflips = 0; - if (!raw) { - rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE); - bitflips = rc < 0 ? -ETIMEDOUT : - mtk_nfc_update_ecc_stats(mtd, buf, sectors); - mtk_nfc_read_fdm(chip, start, sectors); - } + } else if (!raw) { + rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE); + bitflips = rc < 0 ? -ETIMEDOUT : + mtk_nfc_update_ecc_stats(mtd, buf, start, sectors); + mtk_nfc_read_fdm(chip, start, sectors); } dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE); diff --git a/drivers/mtd/nand/raw/nand_hynix.c b/drivers/mtd/nand/raw/nand_hynix.c index 4ffbb26e76d6dad05be6418c2ac31c091eae1b36..6f595455a8c23cf5b326f71e13cee7ad1a84b630 100644 --- a/drivers/mtd/nand/raw/nand_hynix.c +++ b/drivers/mtd/nand/raw/nand_hynix.c @@ -414,7 +414,7 @@ static int hynix_nand_rr_init(struct nand_chip *chip) if (ret) pr_warn("failed to initialize read-retry infrastructure"); - return 0; + return ret; } static void hynix_nand_extract_oobsize(struct nand_chip *chip, diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c index f5dc0a7a2456325a92142ce0f4b537a7dcbbe79d..fb401c25732c7abf61119a5830859d5992a4bc0f 100644 --- a/drivers/mtd/nand/raw/nand_micron.c +++ b/drivers/mtd/nand/raw/nand_micron.c @@ -400,6 +400,14 @@ static int micron_supports_on_die_ecc(struct nand_chip *chip) (chip->id.data[4] & MICRON_ID_INTERNAL_ECC_MASK) != 0x2) return MICRON_ON_DIE_UNSUPPORTED; + /* + * It seems that there are devices which do not support ECC officially. + * At least the MT29F2G08ABAGA / MT29F2G08ABBGA devices supports + * enabling the ECC feature but don't reflect that to the READ_ID table. + * So we have to guarantee that we disable the ECC feature directly + * after we did the READ_ID table command. Later we can evaluate the + * ECC_ENABLE support. + */ ret = micron_nand_on_die_ecc_setup(chip, true); if (ret) return MICRON_ON_DIE_UNSUPPORTED; @@ -408,13 +416,13 @@ static int micron_supports_on_die_ecc(struct nand_chip *chip) if (ret) return MICRON_ON_DIE_UNSUPPORTED; - if (!(id[4] & MICRON_ID_ECC_ENABLED)) - return MICRON_ON_DIE_UNSUPPORTED; - ret = micron_nand_on_die_ecc_setup(chip, false); if (ret) return MICRON_ON_DIE_UNSUPPORTED; + if (!(id[4] & MICRON_ID_ECC_ENABLED)) + return MICRON_ON_DIE_UNSUPPORTED; + ret = nand_readid_op(chip, 0, id, sizeof(id)); if (ret) return MICRON_ON_DIE_UNSUPPORTED; diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c index 4546ac0bed4a0eb7435c8806c2323170327f61f3..b1683d7a7e04d72da36d8ca656faaecc1b8e9fcb 100644 --- a/drivers/mtd/nand/raw/omap2.c +++ b/drivers/mtd/nand/raw/omap2.c @@ -1938,7 +1938,7 @@ static int omap_nand_attach_chip(struct nand_chip *chip) case NAND_OMAP_PREFETCH_DMA: dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); - info->dma = dma_request_chan(dev, "rxtx"); + info->dma = dma_request_chan(dev->parent, "rxtx"); if (IS_ERR(info->dma)) { dev_err(dev, "DMA engine request failed\n"); diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c index d1d470bb32e422ca6597fb6bdcbff3dadca32e07..0e4be6814fc7c61322cb65c3235131fb7f13da71 100644 --- a/drivers/mtd/nand/raw/qcom_nandc.c +++ b/drivers/mtd/nand/raw/qcom_nandc.c @@ -10,7 +10,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ - #include #include #include @@ -23,7 +22,6 @@ #include #include #include -#include /* XXX: drivers shall never use this directly! */ /* NANDc reg offsets */ #define NAND_FLASH_CMD 0x00 @@ -151,15 +149,15 @@ #define NAND_VERSION_MINOR_SHIFT 16 /* NAND OP_CMDs */ -#define PAGE_READ 0x2 -#define PAGE_READ_WITH_ECC 0x3 -#define PAGE_READ_WITH_ECC_SPARE 0x4 -#define PROGRAM_PAGE 0x6 -#define PAGE_PROGRAM_WITH_ECC 0x7 -#define PROGRAM_PAGE_SPARE 0x9 -#define BLOCK_ERASE 0xa -#define FETCH_ID 0xb -#define RESET_DEVICE 0xd +#define OP_PAGE_READ 0x2 +#define OP_PAGE_READ_WITH_ECC 0x3 +#define OP_PAGE_READ_WITH_ECC_SPARE 0x4 +#define OP_PROGRAM_PAGE 0x6 +#define OP_PAGE_PROGRAM_WITH_ECC 0x7 +#define OP_PROGRAM_PAGE_SPARE 0x9 +#define OP_BLOCK_ERASE 0xa +#define OP_FETCH_ID 0xb +#define OP_RESET_DEVICE 0xd /* Default Value for NAND_DEV_CMD_VLD */ #define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \ @@ -692,11 +690,11 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read) if (read) { if (host->use_ecc) - cmd = PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE; + cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE; else - cmd = PAGE_READ | PAGE_ACC | LAST_PAGE; + cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE; } else { - cmd = PROGRAM_PAGE | PAGE_ACC | LAST_PAGE; + cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE; } if (host->use_ecc) { @@ -1170,7 +1168,7 @@ static int nandc_param(struct qcom_nand_host *host) * in use. we configure the controller to perform a raw read of 512 * bytes to read onfi params */ - nandc_set_reg(nandc, NAND_FLASH_CMD, PAGE_READ | PAGE_ACC | LAST_PAGE); + nandc_set_reg(nandc, NAND_FLASH_CMD, OP_PAGE_READ | PAGE_ACC | LAST_PAGE); nandc_set_reg(nandc, NAND_ADDR0, 0); nandc_set_reg(nandc, NAND_ADDR1, 0); nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE @@ -1224,7 +1222,7 @@ static int erase_block(struct qcom_nand_host *host, int page_addr) struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); nandc_set_reg(nandc, NAND_FLASH_CMD, - BLOCK_ERASE | PAGE_ACC | LAST_PAGE); + OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE); nandc_set_reg(nandc, NAND_ADDR0, page_addr); nandc_set_reg(nandc, NAND_ADDR1, 0); nandc_set_reg(nandc, NAND_DEV0_CFG0, @@ -1255,7 +1253,7 @@ static int read_id(struct qcom_nand_host *host, int column) if (column == -1) return 0; - nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID); + nandc_set_reg(nandc, NAND_FLASH_CMD, OP_FETCH_ID); nandc_set_reg(nandc, NAND_ADDR0, column); nandc_set_reg(nandc, NAND_ADDR1, 0); nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT, @@ -1276,7 +1274,7 @@ static int reset(struct qcom_nand_host *host) struct nand_chip *chip = &host->chip; struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); - nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE); + nandc_set_reg(nandc, NAND_FLASH_CMD, OP_RESET_DEVICE); nandc_set_reg(nandc, NAND_EXEC_CMD, 1); write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); @@ -2839,6 +2837,16 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc, if (ret) return ret; + if (nandc->props->is_bam) { + free_bam_transaction(nandc); + nandc->bam_txn = alloc_bam_transaction(nandc); + if (!nandc->bam_txn) { + dev_err(nandc->dev, + "failed to allocate bam transaction\n"); + return -ENOMEM; + } + } + ret = mtd_device_register(mtd, NULL, 0); if (ret) nand_cleanup(chip); @@ -2853,16 +2861,6 @@ static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc) struct qcom_nand_host *host; int ret; - if (nandc->props->is_bam) { - free_bam_transaction(nandc); - nandc->bam_txn = alloc_bam_transaction(nandc); - if (!nandc->bam_txn) { - dev_err(nandc->dev, - "failed to allocate bam transaction\n"); - return -ENOMEM; - } - } - for_each_available_child_of_node(dn, child) { host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); if (!host) { @@ -2958,10 +2956,6 @@ static int qcom_nandc_probe(struct platform_device *pdev) if (!nandc->base_dma) return -ENXIO; - ret = qcom_nandc_alloc(nandc); - if (ret) - goto err_nandc_alloc; - ret = clk_prepare_enable(nandc->core_clk); if (ret) goto err_core_clk; @@ -2970,6 +2964,10 @@ static int qcom_nandc_probe(struct platform_device *pdev) if (ret) goto err_aon_clk; + ret = qcom_nandc_alloc(nandc); + if (ret) + goto err_nandc_alloc; + ret = qcom_nandc_setup(nandc); if (ret) goto err_setup; @@ -2981,15 +2979,14 @@ static int qcom_nandc_probe(struct platform_device *pdev) return 0; err_setup: + qcom_nandc_unalloc(nandc); +err_nandc_alloc: clk_disable_unprepare(nandc->aon_clk); err_aon_clk: clk_disable_unprepare(nandc->core_clk); err_core_clk: - qcom_nandc_unalloc(nandc); -err_nandc_alloc: dma_unmap_resource(dev, res->start, resource_size(res), DMA_BIDIRECTIONAL, 0); - return ret; } diff --git a/drivers/mtd/nand/raw/sh_flctl.c b/drivers/mtd/nand/raw/sh_flctl.c index bb8866e05ff76cc2be9bb1d24f2f5eaadb3799bc..1ab7d6401303c3e46766bb2723ee74cd632fdba5 100644 --- a/drivers/mtd/nand/raw/sh_flctl.c +++ b/drivers/mtd/nand/raw/sh_flctl.c @@ -399,7 +399,8 @@ static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf, dma_addr_t dma_addr; dma_cookie_t cookie; uint32_t reg; - int ret; + int ret = 0; + unsigned long time_left; if (dir == DMA_FROM_DEVICE) { chan = flctl->chan_fifo0_rx; @@ -440,13 +441,14 @@ static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf, goto out; } - ret = + time_left = wait_for_completion_timeout(&flctl->dma_complete, msecs_to_jiffies(3000)); - if (ret <= 0) { + if (time_left == 0) { dmaengine_terminate_all(chan); dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n"); + ret = -ETIMEDOUT; } out: @@ -456,7 +458,7 @@ static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf, dma_unmap_single(chan->device->dev, dma_addr, len, dir); - /* ret > 0 is success */ + /* ret == 0 is success */ return ret; } @@ -480,7 +482,7 @@ static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset) /* initiate DMA transfer */ if (flctl->chan_fifo0_rx && rlen >= 32 && - flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_DEV_TO_MEM) > 0) + !flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_FROM_DEVICE)) goto convert; /* DMA success */ /* do polling transfer */ @@ -539,7 +541,7 @@ static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen, /* initiate DMA transfer */ if (flctl->chan_fifo0_tx && rlen >= 32 && - flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_MEM_TO_DEV) > 0) + !flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_TO_DEVICE)) return; /* DMA success */ /* do polling transfer */ diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c index 1f0b7ee38df5627acf1d0f675fc5d85abb7aea67..5b5f4d25a3e126e44ab3584d27a46f87e00b0836 100644 --- a/drivers/mtd/nand/raw/sunxi_nand.c +++ b/drivers/mtd/nand/raw/sunxi_nand.c @@ -1397,7 +1397,7 @@ static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd, sunxi_nfc_randomizer_enable(mtd); writel((NAND_CMD_RNDIN << 8) | NAND_CMD_PAGEPROG, - nfc->regs + NFC_REG_RCMD_SET); + nfc->regs + NFC_REG_WCMD_SET); dma_async_issue_pending(nfc->dmac); diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c index 30f83649c48140e63d7f8871851a7f4ee3320ab6..48b3ab26b12492d8b2efbc41e1d8eb4a5eda72d6 100644 --- a/drivers/mtd/nand/spi/core.c +++ b/drivers/mtd/nand/spi/core.c @@ -304,24 +304,30 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand, struct nand_device *nand = spinand_to_nand(spinand); struct mtd_info *mtd = nanddev_to_mtd(nand); struct nand_page_io_req adjreq = *req; - unsigned int nbytes = 0; - void *buf = NULL; + void *buf = spinand->databuf; + unsigned int nbytes; u16 column = 0; int ret; - memset(spinand->databuf, 0xff, - nanddev_page_size(nand) + - nanddev_per_page_oobsize(nand)); + /* + * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset + * the cache content to 0xFF (depends on vendor implementation), so we + * must fill the page cache entirely even if we only want to program + * the data portion of the page, otherwise we might corrupt the BBM or + * user data previously programmed in OOB area. + */ + nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand); + memset(spinand->databuf, 0xff, nbytes); + adjreq.dataoffs = 0; + adjreq.datalen = nanddev_page_size(nand); + adjreq.databuf.out = spinand->databuf; + adjreq.ooblen = nanddev_per_page_oobsize(nand); + adjreq.ooboffs = 0; + adjreq.oobbuf.out = spinand->oobbuf; - if (req->datalen) { + if (req->datalen) memcpy(spinand->databuf + req->dataoffs, req->databuf.out, req->datalen); - adjreq.dataoffs = 0; - adjreq.datalen = nanddev_page_size(nand); - adjreq.databuf.out = spinand->databuf; - nbytes = adjreq.datalen; - buf = spinand->databuf; - } if (req->ooblen) { if (req->mode == MTD_OPS_AUTO_OOB) @@ -332,14 +338,6 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand, else memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out, req->ooblen); - - adjreq.ooblen = nanddev_per_page_oobsize(nand); - adjreq.ooboffs = 0; - nbytes += nanddev_per_page_oobsize(nand); - if (!buf) { - buf = spinand->oobbuf; - column = nanddev_page_size(nand); - } } spinand_cache_op_adjust_colum(spinand, &adjreq, &column); @@ -370,8 +368,8 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand, /* * We need to use the RANDOM LOAD CACHE operation if there's - * more than one iteration, because the LOAD operation resets - * the cache to 0xff. + * more than one iteration, because the LOAD operation might + * reset the cache to 0xff. */ if (nbytes) { column = op.addr.val; @@ -574,12 +572,12 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from, if (ret == -EBADMSG) { ecc_failed = true; mtd->ecc_stats.failed++; - ret = 0; } else { mtd->ecc_stats.corrected += ret; max_bitflips = max_t(unsigned int, max_bitflips, ret); } + ret = 0; ops->retlen += iter.req.datalen; ops->oobretlen += iter.req.ooblen; } @@ -1016,11 +1014,11 @@ static int spinand_init(struct spinand_device *spinand) for (i = 0; i < nand->memorg.ntargets; i++) { ret = spinand_select_target(spinand, i); if (ret) - goto err_free_bufs; + goto err_manuf_cleanup; ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED); if (ret) - goto err_free_bufs; + goto err_manuf_cleanup; } ret = nanddev_init(nand, &spinand_ops, THIS_MODULE); diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c index 98f6b9c4b68403833d3fecd94d70a73216216bbb..d16b57081c95ab376d9c89afc1fa1461d48eb1a4 100644 --- a/drivers/mtd/nand/spi/macronix.c +++ b/drivers/mtd/nand/spi/macronix.c @@ -10,6 +10,7 @@ #include #define SPINAND_MFR_MACRONIX 0xC2 +#define MACRONIX_ECCSR_MASK 0x0F static SPINAND_OP_VARIANTS(read_cache_variants, SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), @@ -55,7 +56,12 @@ static int mx35lf1ge4ab_get_eccsr(struct spinand_device *spinand, u8 *eccsr) SPI_MEM_OP_DUMMY(1, 1), SPI_MEM_OP_DATA_IN(1, eccsr, 1)); - return spi_mem_exec_op(spinand->spimem, &op); + int ret = spi_mem_exec_op(spinand->spimem, &op); + if (ret) + return ret; + + *eccsr &= MACRONIX_ECCSR_MASK; + return 0; } static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand, diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c index f3bd86e136033d80e791fc996f0d9516d5e54d48..e57f7ba054bc56fab4286c22d81a1167ae651a9b 100644 --- a/drivers/mtd/sm_ftl.c +++ b/drivers/mtd/sm_ftl.c @@ -1091,9 +1091,9 @@ static void sm_release(struct mtd_blktrans_dev *dev) { struct sm_ftl *ftl = dev->priv; - mutex_lock(&ftl->mutex); del_timer_sync(&ftl->timer); cancel_work_sync(&ftl->flush_work); + mutex_lock(&ftl->mutex); sm_cache_flush(ftl); mutex_unlock(&ftl->mutex); } diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig index 6cc9c929ff571616abb601dc7c494cdfe3d34b61..ff2af58a9a4a9cdeb4af430cefd38cf7b91c911c 100644 --- a/drivers/mtd/spi-nor/Kconfig +++ b/drivers/mtd/spi-nor/Kconfig @@ -7,6 +7,15 @@ menuconfig MTD_SPI_NOR if MTD_SPI_NOR +config SPI_PHYTIUM_QUADSPI + tristate "Phytium Quad SPI Controller" + depends on ARCH_PHYTIUM || ARM + depends on OF && HAS_IOMEM + help + This enables support for the Quad SPI controller in master mode. + This driver does not support generic SPI. The implementation only + supports SPI NOR. + config MTD_MT81xx_NOR tristate "Mediatek MT81xx SPI NOR flash controller" depends on HAS_IOMEM @@ -41,7 +50,7 @@ config SPI_ASPEED_SMC config SPI_ATMEL_QUADSPI tristate "Atmel Quad SPI Controller" - depends on ARCH_AT91 || (ARM && COMPILE_TEST) + depends on ARCH_AT91 || (ARM && COMPILE_TEST && !ARCH_EBSA110) depends on OF && HAS_IOMEM help This enables support for the Quad SPI controller in master mode. diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile index f4c61d282abd5c29538d3e208615f799fcaad251..ebc8ce095bd08493749591bfeb70bb7a36d68eb6 100644 --- a/drivers/mtd/spi-nor/Makefile +++ b/drivers/mtd/spi-nor/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o +obj-$(CONFIG_SPI_PHYTIUM_QUADSPI) += phytium-quadspi.o obj-$(CONFIG_SPI_ASPEED_SMC) += aspeed-smc.o obj-$(CONFIG_SPI_ATMEL_QUADSPI) += atmel-quadspi.o obj-$(CONFIG_SPI_CADENCE_QUADSPI) += cadence-quadspi.o diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c index 8e714fbfa52123d8ae8ff4c9750d134cc648792d..04cedd3a2bf6634c5d1f05ef3d27ba302d12935f 100644 --- a/drivers/mtd/spi-nor/cadence-quadspi.c +++ b/drivers/mtd/spi-nor/cadence-quadspi.c @@ -644,9 +644,23 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr, ndelay(cqspi->wr_delay); while (remaining > 0) { + size_t write_words, mod_bytes; + write_bytes = remaining > page_size ? page_size : remaining; - iowrite32_rep(cqspi->ahb_base, txbuf, - DIV_ROUND_UP(write_bytes, 4)); + write_words = write_bytes / 4; + mod_bytes = write_bytes % 4; + /* Write 4 bytes at a time then single bytes. */ + if (write_words) { + iowrite32_rep(cqspi->ahb_base, txbuf, write_words); + txbuf += (write_words * 4); + } + if (mod_bytes) { + unsigned int temp = 0xFFFFFFFF; + + memcpy(&temp, txbuf, mod_bytes); + iowrite32(temp, cqspi->ahb_base); + txbuf += mod_bytes; + } if (!wait_for_completion_timeout(&cqspi->transfer_complete, msecs_to_jiffies(CQSPI_TIMEOUT_MS))) { @@ -655,7 +669,6 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr, goto failwr; } - txbuf += write_bytes; remaining -= write_bytes; if (remaining > 0) @@ -959,7 +972,7 @@ static int cqspi_direct_read_execute(struct spi_nor *nor, u_char *buf, return 0; } - dma_dst = dma_map_single(nor->dev, buf, len, DMA_DEV_TO_MEM); + dma_dst = dma_map_single(nor->dev, buf, len, DMA_FROM_DEVICE); if (dma_mapping_error(nor->dev, dma_dst)) { dev_err(nor->dev, "dma mapping failed\n"); return -ENOMEM; @@ -994,9 +1007,9 @@ static int cqspi_direct_read_execute(struct spi_nor *nor, u_char *buf, } err_unmap: - dma_unmap_single(nor->dev, dma_dst, len, DMA_DEV_TO_MEM); + dma_unmap_single(nor->dev, dma_dst, len, DMA_FROM_DEVICE); - return 0; + return ret; } static ssize_t cqspi_read(struct spi_nor *nor, loff_t from, diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c index 7d9620c7ff6c58aa2ea74b3eb57dd63799a7b934..1ff3430f82c883bcb6bcb80ed28ecb95374a19f6 100644 --- a/drivers/mtd/spi-nor/fsl-quadspi.c +++ b/drivers/mtd/spi-nor/fsl-quadspi.c @@ -478,6 +478,7 @@ static int fsl_qspi_get_seqid(struct fsl_qspi *q, u8 cmd) { switch (cmd) { case SPINOR_OP_READ_1_1_4: + case SPINOR_OP_READ_1_1_4_4B: return SEQID_READ; case SPINOR_OP_WREN: return SEQID_WREN; @@ -543,6 +544,9 @@ fsl_qspi_runcmd(struct fsl_qspi *q, u8 cmd, unsigned int addr, int len) /* trigger the LUT now */ seqid = fsl_qspi_get_seqid(q, cmd); + if (seqid < 0) + return seqid; + qspi_writel(q, (seqid << QUADSPI_IPCR_SEQID_SHIFT) | len, base + QUADSPI_IPCR); @@ -671,7 +675,7 @@ static void fsl_qspi_set_map_addr(struct fsl_qspi *q) * causes the controller to clear the buffer, and use the sequence pointed * by the QUADSPI_BFGENCR[SEQID] to initiate a read from the flash. */ -static void fsl_qspi_init_ahb_read(struct fsl_qspi *q) +static int fsl_qspi_init_ahb_read(struct fsl_qspi *q) { void __iomem *base = q->iobase; int seqid; @@ -696,8 +700,13 @@ static void fsl_qspi_init_ahb_read(struct fsl_qspi *q) /* Set the default lut sequence for AHB Read. */ seqid = fsl_qspi_get_seqid(q, q->nor[0].read_opcode); + if (seqid < 0) + return seqid; + qspi_writel(q, seqid << QUADSPI_BFGENCR_SEQID_SHIFT, q->iobase + QUADSPI_BFGENCR); + + return 0; } /* This function was used to prepare and enable QSPI clock */ @@ -805,9 +814,7 @@ static int fsl_qspi_nor_setup_last(struct fsl_qspi *q) fsl_qspi_init_lut(q); /* Init for AHB read */ - fsl_qspi_init_ahb_read(q); - - return 0; + return fsl_qspi_init_ahb_read(q); } static const struct of_device_id fsl_qspi_dt_ids[] = { diff --git a/drivers/mtd/spi-nor/intel-spi-pci.c b/drivers/mtd/spi-nor/intel-spi-pci.c index c0976f2e3dd19925b06f155bd093176818465720..872b409226081a5933f527371c36541eafa04970 100644 --- a/drivers/mtd/spi-nor/intel-spi-pci.c +++ b/drivers/mtd/spi-nor/intel-spi-pci.c @@ -65,6 +65,7 @@ static void intel_spi_pci_remove(struct pci_dev *pdev) static const struct pci_device_id intel_spi_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info }, { PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&bxt_info }, { PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info }, { PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info }, { }, diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c index af0a220195163ceaf9382e78acf12926494258ba..d60cbf23d9aa05e61f51b10c62e5a13ec32fb67e 100644 --- a/drivers/mtd/spi-nor/intel-spi.c +++ b/drivers/mtd/spi-nor/intel-spi.c @@ -632,6 +632,10 @@ static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len, while (len > 0) { block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ); + /* Read cannot cross 4K boundary */ + block_size = min_t(loff_t, from + block_size, + round_up(from + 1, SZ_4K)) - from; + writel(from, ispi->base + FADDR); val = readl(ispi->base + HSFSTS_CTL); @@ -685,6 +689,10 @@ static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len, while (len > 0) { block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ); + /* Write cannot cross 4K boundary */ + block_size = min_t(loff_t, to + block_size, + round_up(to + 1, SZ_4K)) - to; + writel(to, ispi->base + FADDR); val = readl(ispi->base + HSFSTS_CTL); diff --git a/drivers/mtd/spi-nor/phytium-quadspi.c b/drivers/mtd/spi-nor/phytium-quadspi.c new file mode 100644 index 0000000000000000000000000000000000000000..58c2d78e86a597fc1a9c0efcf62b833adab5b0c6 --- /dev/null +++ b/drivers/mtd/spi-nor/phytium-quadspi.c @@ -0,0 +1,1000 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium QuadSPI driver. + * + * Copyright (c) 2019-2023, Phytium Technology Co., Ltd. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define QSPI_FLASH_CAP_REG 0x000 +#define QSPI_RD_CFG_REG 0x004 +#define QSPI_WR_CFG_REG 0x008 +#define QSPI_FLUSH_REG 0x00C +#define QSPI_CMD_PORT_REG 0x010 +#define QSPI_ADDR_PORT_REG 0x014 +#define QSPI_HD_PORT_REG 0x018 +#define QSPI_LD_PORT_REG 0x01C +#define QSPI_FUN_SET_REG 0x020 +#define QSPI_WIP_REG 0x024 +#define QSPI_WP_REG 0x028 +#define QSPI_MODE_REG 0x02C + +#define QSPI_FLASH_CAP_NUM_SHIFT 3 +#define QSPI_FLASH_CAP_NUM_MASK (0x3 << QSPI_FLASH_CAP_NUM_SHIFT) +#define QSPI_FLASH_CAP_CAP_SHIFT 0 +#define QSPI_FLASH_CAP_CAP_MASK (0x7 << QSPI_FLASH_CAP_CAP_SHIFT) + +#define QSPI_RD_CFG_RD_CMD_SHIFT 24 +#define QSPI_RD_CFG_RD_CMD_MASK (0xFF << QSPI_RD_CFG_RD_CMD_SHIFT) +#define QSPI_RD_CFG_RD_THROUGH_SHIFT 23 +#define QSPI_RD_CFG_RD_THROUGH_MASK (0x01 << QSPI_RD_CFG_RD_THROUGH_SHIFT) +#define QSPI_RD_CFG_RD_TRANSFER_SHIFT 20 +#define QSPI_RD_CFG_RD_TRANSFER_MASK (0x07 << QSPI_RD_CFG_RD_TRANSFER_SHIFT) +#define QSPI_RD_CFG_RD_ADDR_SEL_SHIFT 19 +#define QSPI_RD_CFG_RD_ADDR_SEL_MASK (0x1 << QSPI_RD_CFG_RD_ADDR_SEL_SHIFT) +#define QSPI_RD_CFG_RD_LATENCY_SHIFT 18 +#define QSPI_RD_CFG_RD_LATENCY_MASK (0x1 << QSPI_RD_CFG_RD_LATENCY_SHIFT) +#define QSPI_RD_CFG_MODE_BYTE_SHIFT 17 +#define QSPI_RD_CFG_MODE_BYTE_MASK (0x1 << QSPI_RD_CFG_MODE_BYTE_SHIFT) +#define QSPI_RD_CFG_CMD_SIGN_SHIFT 9 +#define QSPI_RD_CFG_CMD_SIGN_MASK (0xFF << QSPI_RD_CFG_CMD_SIGN_SHIFT) +#define QSPI_RD_CFG_DUMMY_SHIFT 4 +#define QSPI_RD_CFG_DUMMY_MASK (0x1F << QSPI_RD_CFG_DUMMY_SHIFT) +#define QSPI_RD_CFG_D_BUFFER_SHIFT 3 +#define QSPI_RD_CFG_D_BUFFER_MASK (0x1 << QSPI_RD_CFG_D_BUFFER_SHIFT) +#define QSPI_RD_CFG_RD_SCK_SEL_SHIFT 0 +#define QSPI_RD_CFG_RD_SCK_SEL_MASK (0x3 << QSPI_RD_CFG_RD_SCK_SEL_SHIFT) + +#define QSPI_WR_CFG_WR_CMD_SHIFT 24 +#define QSPI_WR_CFG_WR_CMD_MASK (0xFF << QSPI_WR_CFG_WR_CMD_SHIFT) +#define QSPI_WR_CFG_WR_WAIT_SHIFT 9 +#define QSPI_WR_CFG_WR_WAIT_MASK (0x01 << QSPI_WR_CFG_WR_WAIT_SHIFT) +#define QSPI_WR_CFG_WR_THROUGH_SHIFT 8 +#define QSPI_WR_CFG_WR_THROUGH_MAS (0x01 << QSPI_WR_CFG_WR_THROUGH_SHIFT) +#define QSPI_WR_CFG_WR_TRANSFER_SHIFT 5 +#define QSPI_WR_CFG_WR_TRANSFER_MASK (0X7 << QSPI_WR_CFG_WR_TRANSFER_SHIFT) +#define QSPI_WR_CFG_WR_ADDR_SEL_SHIFT 4 +#define QSPI_WR_CFG_WR_ADDR_SEL_MASK (0x1 << QSPI_WR_CFG_WR_ADDR_SEL_SHIFT) +#define QSPI_WR_CFG_WR_MODE_SHIFT 3 +#define QSPI_WR_CFG_WR_MODE (0x01 << QSPI_WR_CFG_WR_MODE_SHIFT) +#define QSPI_WR_CFG_WR_SCK_SEL_SHIFT 0 +#define QSPI_WR_CFG_WR_SCK_SEL_MASK (0x7 << QSPI_WR_CFG_WR_SCK_SEL_SHIFT) + +#define QSPI_FLUSH_EN (0x1 << 0) + +#define QSPI_CMD_PORT_CMD_SHIFT 24 +#define QSPI_CMD_PORT_CMD_MASK (0xFF << QSPI_CMD_PORT_CMD_SHIFT) +#define QSPI_CMD_PORT_WAIT_SHIFT 22 +#define QSPI_CMD_PORT_WAIT_MASK (0x1 << QSPI_CMD_PORT_WAIT_SHIFT) +#define QSPI_CMD_PORT_THROUGH_SHIFT 21 +#define QSPI_CMD_PORT_THROUGH_MASK (0x1 << QSPI_CMD_PORT_THROUGH_SHIFT) +#define QSPI_CMD_PORT_CS_SHIFT 19 +#define QSPI_CMD_PORT_CS_MASK (0x3 << QSPI_CMD_PORT_CS_SHIFT) +#define QSPI_CMD_PORT_TRANSFER_SHIFT 16 +#define QSPI_CMD_PORT_TRANSFER_MASK (0x7 << QSPI_CMD_PORT_TRANSFER_SHIFT) +#define QSPI_CMD_PORT_CMD_ADDR_SHIFT 15 +#define QSPI_CMD_PORT_CMD_ADDR_MASK (0x1 << QSPI_CMD_PORT_CMD_ADDR_SHIFT) +#define QSPI_CMD_PORT_LATENCY_SHIFT 14 +#define QSPI_CMD_PORT_LATENCY_MASK (0x1 << QSPI_CMD_PORT_LATENCY_SHIFT) +#define QSPI_CMD_PORT_DATA_TRANSFER_SHIFT 13 +#define QSPI_CMD_PORT_DATA_TRANSFER_MASK (0x1 << 13) +#define QSPI_CMD_PORT_SEL_SHIFT 12 +#define QSPI_CMD_PORT_SEL_MASK (0x1 << QSPI_CMD_PORT_SEL_SHIFT) +#define QSPI_CMD_PORT_DUMMY_SHIFT 7 +#define QSPI_CMD_PORT_DUMMY_MASK (0x1F << QSPI_CMD_PORT_DUMMY_SHIFT) +#define QSPI_CMD_PORT_P_BUFFER_SHIFT 6 +#define QSPI_CMD_PORT_P_BUFFER_MASK (0x1 << QSPI_CMD_PORT_P_BUFFER_SHIFT) +#define QSPI_CMD_PORT_RW_NUM_SHIFT 3 +#define QSPI_CMD_PORT_RW_NUM_MASK (0x7 << QSPI_CMD_PORT_RW_NUM_SHIFT) +#define QSPI_CMD_PORT_SCK_SEL_SHIFT 0 +#define QSPI_CMD_PORT_SCK_SEL_MASK (0x7 << QSPI_CMD_PORT_SCK_SEL_SHIFT) + +#define QSPI_FUN_SET_HOLD_SHIFT 24 +#define QSPI_FUN_SET_HOLD_MASK (0xFF << QSPI_FUN_SET_HOLD_SHIFT) +#define QSPI_FUN_SET_SETUP_SHIFT 16 +#define QSPI_FUN_SET_SETUP_MASK (0xFF << QSPI_FUN_SET_SETUP_SHIFT) +#define QSPI_FUN_SET_DELAY_SHIFT 0 +#define QSPI_FUN_SET_DELAY_MASK (0xFFFF << QSPI_FUN_SET_DELAY_SHIFT) + +#define QSPI_WIP_W_CMD_SHIFT 24 +#define QSPI_WIP_W_CMD_MASK (0xFF << QSPI_WIP_W_CMD_SHIFT) +#define QSPI_WIP_W_TRANSFER_SHIFT 3 +#define QSPI_WIP_W_TRANSFER_MASK (0x3 << QSPI_WIP_W_TRANSFER_SHIFT) +#define QSPI_WIP_W_SCK_SEL_SHIFT 0 +#define QSPI_WIP_W_SCK_SEL_MASK (0x7 << QSPI_WIP_W_SCK_SEL_SHIFT) + +#define QSPI_WP_EN_SHIFT 17 +#define QSPI_WP_EN_MASK (0x1 << QSPI_WP_EN_SHIFT) +#define QSPI_WP_IO2_SHIFT 16 +#define QSPI_WP_IO2_MASK (0x1 << QSPI_WP_IO2_SHIFT) +#define QSPI_WP_HOLD_SHIFT 8 +#define QSPI_WP_HOLD_MASK (0xFF << QSPI_WP_HOLD_SHIFT) +#define QSPI_WP_SETUP_SHIFT 0 +#define QSPI_WP_SETUP_MASK (0xFF << QSPI_WP_SETUP_SHIFT) + +#define QSPI_MODE_VALID_SHIFT 8 +#define QSPI_MODE_VALID_MASK (0xFF << QSPI_MODE_VALID_SHIFT) +#define QSPI_MODE_SHIFT 0 +#define QSPI_MODE_MASK (0xFF << QSPI_MODE_SHIFT) + +#define FSIZE_VAL(size) (__fls(size) - 1) + +#define PHYTIUM_MAX_MMAP_S SZ_512M +#define PHYTIUM_MAX_NORCHIP 4 + +#define PHYTIUM_QSPI_FIFO_SZ 32 +#define PHYTIUM_QSPI_FIFO_TIMEOUT_US 50000 +#define PHYTIUM_QSPI_BUSY_TIMEOUT_US 100000 + +#define PHYTIUM_SCK_SEL 0x05 +#define PHYTIUM_CMD_SCK_SEL 0x07 + +#define PHYTIUM_FMODE_MM 0x01 +#define PHYTIUM_FMODE_IN 0x02 + +/* + * the codes of the different commands + */ +#define CMD_WRDI 0x04 +#define CMD_RDID 0x9F +#define CMD_RDSR 0x05 +#define CMD_WREN 0x06 +#define CMD_RDAR 0x65 +#define CMD_P4E 0x20 +#define CMD_4P4E 0x21 +#define CMD_BE 0x60 +#define CMD_4BE 0xC7 +#define CMD_READ 0x03 +#define CMD_FAST_READ 0x0B +#define CMD_QOR 0x6B +#define CMD_QIOR 0xEB +#define CMD_DDRFR 0x0D +#define CMD_DDRQIOQ 0xED +#define CMD_PP 0x02 +#define CMD_QPP 0x32 +#define CMD_SE 0xD8 +#define CMD_4FAST_READ 0x0C +#define CMD_4READ 0x13 +#define CMD_4QOR 0x6C +#define CMD_4QIOR 0xEC +#define CMD_4DDRFR 0x0E +#define CMD_4DDRQIOR 0xEE +#define CMD_4PP 0x12 +#define CMD_4QPP 0x34 +#define CMD_4SE 0xDC + +#define PHYTIUM_QSPI_1_1_1 0 +#define PHYTIUM_QSPI_1_1_2 1 +#define PHYTIUM_QSPI_1_1_4 2 +#define PHYTIUM_QSPI_1_2_2 3 +#define PHYTIUM_QSPI_1_4_4 4 +#define PHYTIUM_QSPI_2_2_2 5 +#define PHYTIUM_QSPI_4_4_4 6 + +struct phytium_qspi_flash { + struct spi_nor nor; + struct phytium_qspi *qspi; + u32 cs; + u32 fsize; + u32 presc; + u32 clk_div; + u32 read_mode; + bool registered; + u32 prefetch_limit; + u32 addr_width; + u32 read_cmd; +}; + +struct phytium_qspi { + struct device *dev; + void __iomem *io_base; + void __iomem *mm_base; + resource_size_t mm_size; + u32 nor_num; + struct clk *clk; + u32 clk_rate; + struct phytium_qspi_flash flash[PHYTIUM_MAX_NORCHIP]; + + spinlock_t spinlock; + + /* + * to protect device configuration, could be different between + * 2 flash access (bk1, bk2) + */ + struct mutex lock; +}; + +/* Need to enable p_buffer */ +static int memcpy_from_ftreg(struct phytium_qspi *qspi, u_char *buf, size_t len) +{ + int i; + u32 val = 0; + + if (!qspi || !buf) + return -EINVAL; + + for (i = 0; i < len; i++) { + if (i % 4 == 0) + val = readl_relaxed(qspi->io_base + QSPI_LD_PORT_REG); + + buf[i] = (u_char) (val >> (i % 4) * 8) & 0xFF; + } + + return 0; +} + +/* Not to enable p_buffer */ +static int memcpy_to_ftreg(struct phytium_qspi *qspi, u_char *buf, size_t len) +{ + u32 val = 0; + + if (!qspi || !buf || (len >= 8)) + return -EINVAL; + + if (len == 1) { + val = buf[0]; + } else if (len == 2) { + val = buf[1]; + val = (val << 8) + buf[0]; + } else if (len == 3) { + val = buf[2]; + val = (val << 8) + buf[1]; + val = (val << 8) + buf[0]; + } else if (len == 4) { + val = buf[3]; + val = (val << 8) + buf[2]; + val = (val << 8) + buf[1]; + val = (val << 8) + buf[0]; + } + + writel_relaxed(val, qspi->io_base + QSPI_LD_PORT_REG); + + return 0; +} + +static int phytium_qspi_wait_cmd(struct phytium_qspi *qspi, + struct phytium_qspi_flash *flash) +{ + u32 cmd = 0; + u32 cnt = 0; + + cmd |= CMD_RDSR << QSPI_CMD_PORT_CMD_SHIFT; + cmd |= BIT(QSPI_CMD_PORT_DATA_TRANSFER_SHIFT); + cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; + + writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); + + cnt = PHYTIUM_QSPI_BUSY_TIMEOUT_US / 10; + while (readl_relaxed(qspi->io_base + QSPI_LD_PORT_REG) & 0x01) { + udelay(10); + cnt--; + if (!cnt) { + dev_err(qspi->dev, "wait command process timeout\n"); + break; + } + } + + return !cnt; +} + +static int phytium_qspi_cmd_enable(struct phytium_qspi *qspi) +{ + u32 val = 0; + + writel_relaxed(val, qspi->io_base + QSPI_LD_PORT_REG); + + return 0; +} + +static int phytium_qspi_write_enable(struct phytium_qspi *qspi, + struct phytium_qspi_flash *flash) +{ + u32 cmd = 0; + + cmd = CMD_WREN << QSPI_CMD_PORT_CMD_SHIFT; + cmd |= PHYTIUM_SCK_SEL << QSPI_CMD_PORT_SCK_SEL_SHIFT; + cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; + + writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); + phytium_qspi_cmd_enable(qspi); + + return 0; +} + +static int phytium_qspi_write_disable(struct phytium_qspi *qspi, + struct phytium_qspi_flash *flash) +{ + u32 cmd = 0; + + cmd = CMD_WRDI << QSPI_CMD_PORT_CMD_SHIFT; + cmd |= PHYTIUM_SCK_SEL << QSPI_CMD_PORT_SCK_SEL_SHIFT; + cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; + + writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); + phytium_qspi_cmd_enable(qspi); + + return 0; +} + +static int phytium_qspi_read_flash_id(struct phytium_qspi *qspi, + struct phytium_qspi_flash *flash, u8 opcode, u8 *buf, int len) +{ + u32 cmd = 0; + unsigned long iflags; + + cmd = opcode << QSPI_CMD_PORT_CMD_SHIFT; + cmd |= BIT(QSPI_CMD_PORT_DATA_TRANSFER_SHIFT); + cmd |= BIT(QSPI_CMD_PORT_P_BUFFER_SHIFT); + cmd |= PHYTIUM_CMD_SCK_SEL << QSPI_CMD_PORT_SCK_SEL_SHIFT; + cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; + + writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); + phytium_qspi_cmd_enable(qspi); + + spin_lock_irqsave(&qspi->spinlock, iflags); + memcpy_from_ftreg(qspi, buf, len); + spin_unlock_irqrestore(&qspi->spinlock, iflags); + + dev_dbg(qspi->dev, "read flash id:%x\n", *(u32 *)buf); + return 0; +} + +static int phytium_qspi_read_flash_sfdp(struct phytium_qspi *qspi, + struct phytium_qspi_flash *flash, u8 opcode, loff_t from, + u8 *buf, int len) +{ + unsigned long iflags; + u32 cmd = 0; + + cmd = opcode << QSPI_CMD_PORT_CMD_SHIFT; + cmd |= BIT(QSPI_CMD_PORT_DATA_TRANSFER_SHIFT); + cmd |= BIT(QSPI_CMD_PORT_P_BUFFER_SHIFT); + cmd |= BIT(QSPI_CMD_PORT_CMD_ADDR_SHIFT); + cmd |= PHYTIUM_SCK_SEL << QSPI_CMD_PORT_SCK_SEL_SHIFT; + cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; + + writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); + writel_relaxed(from, qspi->io_base + QSPI_ADDR_PORT_REG); + phytium_qspi_cmd_enable(qspi); + + spin_lock_irqsave(&qspi->spinlock, iflags); + memcpy_from_ftreg(qspi, buf, len); + spin_unlock_irqrestore(&qspi->spinlock, iflags); + + dev_dbg(qspi->dev, "read flash sfdp:0x%llx 0x%llx\n", + *(u64 *)buf, *(u64 *)(buf + 8)); + return 0; +} + +static int phytium_qspi_read_flash_sr1(struct phytium_qspi *qspi, + struct phytium_qspi_flash *flash, u8 opcode, u8 *buf, int len) +{ + u32 cmd = 0; + u32 val; + + cmd = opcode << QSPI_CMD_PORT_CMD_SHIFT; + cmd |= BIT(QSPI_CMD_PORT_DATA_TRANSFER_SHIFT); + cmd |= (len << QSPI_CMD_PORT_RW_NUM_SHIFT) & QSPI_CMD_PORT_RW_NUM_MASK; + cmd |= PHYTIUM_CMD_SCK_SEL << QSPI_CMD_PORT_SCK_SEL_SHIFT; + cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; + + writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); + phytium_qspi_cmd_enable(qspi); + + val = readl_relaxed(qspi->io_base + QSPI_LD_PORT_REG); + buf[0] = (u8)val; + + return 0; +} + +static int phytium_qspi_read_reg(struct spi_nor *nor, + u8 opcode, u8 *buf, int len) +{ + struct phytium_qspi_flash *flash = nor->priv; + struct device *dev = flash->qspi->dev; + struct phytium_qspi *qspi = flash->qspi; + unsigned long iflags; + u32 cmd = 0; + + dev_dbg(dev, "read_reg: cmd:%#.2x buf:%pK len:%#x\n", opcode, buf, len); + + switch (opcode) { + case CMD_RDID: + phytium_qspi_read_flash_id(qspi, flash, opcode, buf, len); + return 0; + case CMD_RDSR: + phytium_qspi_read_flash_sr1(qspi, flash, opcode, buf, len); + return 0; + default: + break; + } + + cmd = opcode << QSPI_CMD_PORT_CMD_SHIFT; + cmd |= BIT(QSPI_CMD_PORT_DATA_TRANSFER_SHIFT); + cmd |= BIT(QSPI_CMD_PORT_P_BUFFER_SHIFT); + cmd |= PHYTIUM_CMD_SCK_SEL << QSPI_CMD_PORT_SCK_SEL_SHIFT; + cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; + + writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); + phytium_qspi_cmd_enable(qspi); + + spin_lock_irqsave(&qspi->spinlock, iflags); + memcpy_from_ftreg(qspi, buf, len); + spin_unlock_irqrestore(&qspi->spinlock, iflags); + + return 0; +} + +static int phytium_qspi_write_reg(struct spi_nor *nor, u8 opcode, + u8 *buf, int len) +{ + struct phytium_qspi_flash *flash = nor->priv; + struct device *dev = flash->qspi->dev; + struct phytium_qspi *qspi = flash->qspi; + u32 cmd = 0; + + dev_dbg(dev, "write_reg: cmd:%#.2x buf:%pK len:%#x\n", + opcode, buf, len); + + switch (opcode) { + case CMD_WREN: + phytium_qspi_write_enable(qspi, flash); + return 0; + case CMD_WRDI: + phytium_qspi_write_disable(qspi, flash); + return 0; + default: + break; + } + + cmd = opcode << QSPI_CMD_PORT_CMD_SHIFT; + cmd |= PHYTIUM_CMD_SCK_SEL << QSPI_CMD_PORT_SCK_SEL_SHIFT; + cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; + + if (len > 8 || !buf) { + dev_err(dev, "data length exceed. commad %x, len:%d\n", + opcode, len); + return -EINVAL; + } else if (len > 0) { + cmd |= ((len - 1) << QSPI_CMD_PORT_RW_NUM_SHIFT) + & QSPI_CMD_PORT_RW_NUM_MASK; + cmd |= BIT(QSPI_CMD_PORT_DATA_TRANSFER_SHIFT); + } + + writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); + memcpy_to_ftreg(qspi, buf, len); + + return 0; +} + +static ssize_t phytium_qspi_read_tmp(struct phytium_qspi *qspi, u32 read_cmd, + loff_t from, size_t len, u_char *buf) +{ + u32 addr = (u32)from; + u64 val = 0; + + if (!qspi) + return -1; + + dev_dbg(qspi->dev, "read cmd:%x, addr:%x len:%zx\n", + read_cmd, addr, len); + writel_relaxed(read_cmd, qspi->io_base + QSPI_RD_CFG_REG); + + memcpy_fromio(buf, qspi->mm_base + addr, len); + + val = *(u64 *)(buf); + dev_dbg(qspi->dev, "read val:%llx\n", val); + + return len; +} + +static ssize_t phytium_qspi_read(struct spi_nor *nor, loff_t from, size_t len, + u_char *buf) +{ + struct phytium_qspi_flash *flash = nor->priv; + struct phytium_qspi *qspi = flash->qspi; + u32 cmd = nor->read_opcode; + u32 addr = (u32)from; + + addr = addr + flash->cs * flash->fsize; + dev_dbg(qspi->dev, "read(%#.2x): buf:%pK from:%#.8x len:%#zx\n", + nor->read_opcode, buf, addr, len); + + cmd = cmd << QSPI_RD_CFG_RD_CMD_SHIFT; + cmd |= BIT(QSPI_RD_CFG_D_BUFFER_SHIFT); + cmd |= flash->clk_div << QSPI_CMD_PORT_SCK_SEL_SHIFT; + + cmd &= ~QSPI_RD_CFG_RD_TRANSFER_MASK; + cmd |= (flash->addr_width << QSPI_RD_CFG_RD_TRANSFER_SHIFT); + + switch (nor->read_opcode) { + case CMD_READ: + case CMD_FAST_READ: + case CMD_QIOR: + case CMD_QOR: + cmd &= ~QSPI_RD_CFG_RD_ADDR_SEL_MASK; + break; + case CMD_4READ: + case CMD_4FAST_READ: + case CMD_4QOR: + case CMD_4QIOR: + cmd |= BIT(QSPI_RD_CFG_RD_ADDR_SEL_SHIFT); + break; + case 0x5A: + cmd &= ~QSPI_RD_CFG_RD_ADDR_SEL_MASK; + phytium_qspi_read_flash_sfdp(qspi, flash, nor->read_opcode, + from, buf, len); + return 0; + default: + break; + } + + if (flash->addr_width == PHYTIUM_QSPI_1_1_4 || + flash->addr_width == PHYTIUM_QSPI_1_4_4) { + cmd |= BIT(QSPI_RD_CFG_RD_LATENCY_SHIFT); + + cmd &= ~QSPI_RD_CFG_DUMMY_MASK; + cmd |= (0x07 << QSPI_RD_CFG_DUMMY_SHIFT); + } + + dev_dbg(qspi->dev, "read(%#.2x): cmd:%#x\n", nor->read_opcode, cmd); + if (cmd != flash->read_cmd) + flash->read_cmd = cmd; + + writel_relaxed(cmd, qspi->io_base + QSPI_RD_CFG_REG); + + memcpy_fromio(buf, qspi->mm_base + addr, len); + + return len; +} + +static ssize_t phytium_qspi_write(struct spi_nor *nor, loff_t to, size_t len, + const u_char *buf) +{ + struct phytium_qspi_flash *flash = nor->priv; + struct device *dev = flash->qspi->dev; + struct phytium_qspi *qspi = flash->qspi; + u32 cmd = nor->program_opcode; + u32 addr = (u32)to; + int i; + u_char tmp[8] = {0}; + size_t mask = 0x03; + + addr = addr + flash->cs * flash->fsize; + dev_dbg(dev, "write(%#.2x): buf:%p to:%#.8x len:%#zx\n", + nor->program_opcode, buf, addr, len); + + if (addr & 0x03) { + dev_err(dev, "Addr not four-byte aligned!\n"); + return -EINVAL; + } + + cmd = cmd << QSPI_WR_CFG_WR_CMD_SHIFT; + cmd |= BIT(QSPI_WR_CFG_WR_MODE_SHIFT); + cmd |= PHYTIUM_CMD_SCK_SEL << QSPI_CMD_PORT_SCK_SEL_SHIFT; + + switch (nor->program_opcode) { + case CMD_PP: + case CMD_QPP: + cmd &= ~QSPI_WR_CFG_WR_ADDR_SEL_MASK; + break; + case CMD_4PP: + case CMD_4QPP: + cmd |= BIT(QSPI_WR_CFG_WR_ADDR_SEL_SHIFT); + break; + default: + dev_err(qspi->dev, "Not support program command:%#x\n", + nor->erase_opcode); + return -EINVAL; + } + + dev_dbg(qspi->dev, "write cmd:%x\n", cmd); + writel_relaxed(cmd, qspi->io_base + QSPI_WR_CFG_REG); + + for (i = 0; i < len/4; i++) + writel_relaxed(*(u32 *)(buf + 4*i), qspi->mm_base + addr + 4*i); + + if (len & mask) { + addr = addr + (len & ~mask); + phytium_qspi_read_tmp(qspi, flash->read_cmd, addr, 4, &tmp[0]); + memcpy(tmp, buf + (len & ~mask), len & mask); + writel_relaxed(*(u32 *)(tmp), qspi->mm_base + addr); + } + + writel_relaxed(QSPI_FLUSH_EN, qspi->io_base + QSPI_FLUSH_REG); + + phytium_qspi_wait_cmd(qspi, flash); + + return len; +} + +static int phytium_qspi_erase(struct spi_nor *nor, loff_t offs) +{ + struct phytium_qspi_flash *flash = nor->priv; + struct device *dev = flash->qspi->dev; + struct phytium_qspi *qspi = flash->qspi; + u32 cmd = nor->erase_opcode; + u32 addr = (u32)offs; + + dev_dbg(dev, "erase(%#.2x):offs:%#x\n", nor->erase_opcode, (u32)offs); + + phytium_qspi_write_enable(qspi, flash); + cmd = cmd << QSPI_CMD_PORT_CMD_SHIFT; + cmd |= PHYTIUM_SCK_SEL << QSPI_CMD_PORT_SCK_SEL_SHIFT; + cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; + + /* s25fl256s1 not supoort D8, DC, 20, 21 */ + switch (nor->erase_opcode) { + case CMD_SE: + cmd &= ~QSPI_CMD_PORT_SEL_MASK; + cmd |= BIT(QSPI_CMD_PORT_CMD_ADDR_SHIFT); + writel_relaxed(addr, qspi->io_base + QSPI_ADDR_PORT_REG); + break; + case CMD_4SE: + cmd |= BIT(QSPI_CMD_PORT_SEL_SHIFT); + cmd |= BIT(QSPI_CMD_PORT_CMD_ADDR_SHIFT); + writel_relaxed(addr, qspi->io_base + QSPI_ADDR_PORT_REG); + break; + case CMD_P4E: + cmd &= ~QSPI_CMD_PORT_SEL_MASK; + cmd |= BIT(QSPI_CMD_PORT_CMD_ADDR_SHIFT); + writel_relaxed(addr, qspi->io_base + QSPI_ADDR_PORT_REG); + break; + case CMD_4P4E: + cmd |= BIT(QSPI_CMD_PORT_SEL_SHIFT); + cmd |= BIT(QSPI_CMD_PORT_CMD_ADDR_SHIFT); + writel_relaxed(addr, qspi->io_base + QSPI_ADDR_PORT_REG); + break; + case CMD_BE: + cmd &= ~QSPI_CMD_PORT_SEL_MASK; + break; + case CMD_4BE: + cmd |= BIT(QSPI_CMD_PORT_SEL_SHIFT); + break; + default: + dev_err(qspi->dev, "Not support erase command:%#x\n", + nor->erase_opcode); + return -EINVAL; + } + + writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); + phytium_qspi_cmd_enable(qspi); + phytium_qspi_wait_cmd(qspi, flash); + + return 0; +} + +static int phytium_qspi_prep(struct spi_nor *nor, enum spi_nor_ops ops) +{ + struct phytium_qspi_flash *flash = nor->priv; + struct phytium_qspi *qspi = flash->qspi; + + mutex_lock(&qspi->lock); + return 0; +} + +static void phytium_qspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops) +{ + struct phytium_qspi_flash *flash = nor->priv; + struct phytium_qspi *qspi = flash->qspi; + + mutex_unlock(&qspi->lock); +} + +static int phytium_qspi_get_flash_size(struct phytium_qspi *qspi, u32 size) +{ + int ret = 0; + u32 value; + + switch (size) { + case SZ_4M: + value = 0; + break; + case SZ_8M: + value = 1; + break; + case SZ_16M: + value = 2; + break; + case SZ_32M: + value = 3; + break; + case SZ_64M: + value = 4; + break; + case SZ_128M: + value = 5; + break; + case SZ_256M: + value = 6; + break; + case SZ_512M: + value = 7; + break; + default: + value = 0; + + ret = -EINVAL; + return ret; + } + + return value; +} +static int phytium_qspi_flash_setup(struct phytium_qspi *qspi, + struct device_node *np) +{ + struct spi_nor_hwcaps hwcaps = { + .mask = SNOR_HWCAPS_READ | + SNOR_HWCAPS_READ_FAST | + SNOR_HWCAPS_PP, + }; + u32 width, presc; + u32 cs_num = 0; + u32 max_rate = 0; + u32 clk_div = 0; + u32 flash_cap = 0; + u32 addr_width = PHYTIUM_QSPI_1_1_1; + struct phytium_qspi_flash *flash; + struct mtd_info *mtd; + int ret; + + of_property_read_u32(np, "reg", &cs_num); + if (cs_num >= PHYTIUM_MAX_NORCHIP) + return -EINVAL; + + of_property_read_u32(np, "spi-max-frequency", &max_rate); + if (!max_rate) + return -EINVAL; + + of_property_read_u32(np, "spi-clk-div", &clk_div); + if (!clk_div) + clk_div = PHYTIUM_SCK_SEL; + + if (clk_div < 4) + return -EINVAL; + + presc = DIV_ROUND_UP(qspi->clk_rate, max_rate) - 1; + + of_property_read_u32(np, "spi-rx-bus-width", &width); + if (!width) + width = 1; + + if (width == 4) { + hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4; + addr_width = PHYTIUM_QSPI_1_1_4; + } else if (width == 2) { + hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2; + addr_width = PHYTIUM_QSPI_1_1_2; + } else if (width != 1) + return -EINVAL; + + flash = &qspi->flash[cs_num]; + flash->qspi = qspi; + flash->cs = cs_num; + flash->presc = presc; + flash->clk_div = clk_div; + flash->addr_width = addr_width; + + flash->nor.dev = qspi->dev; + spi_nor_set_flash_node(&flash->nor, np); + flash->nor.priv = flash; + mtd = &flash->nor.mtd; + + flash->nor.read = phytium_qspi_read; + flash->nor.write = phytium_qspi_write; + flash->nor.erase = phytium_qspi_erase; + flash->nor.read_reg = phytium_qspi_read_reg; + flash->nor.write_reg = phytium_qspi_write_reg; + flash->nor.prepare = phytium_qspi_prep; + flash->nor.unprepare = phytium_qspi_unprep; + + ret = spi_nor_scan(&flash->nor, NULL, &hwcaps); + if (ret) { + dev_err(qspi->dev, "device scan failed\n"); + return ret; + } + + flash->fsize = mtd->size; + flash->prefetch_limit = mtd->size - PHYTIUM_QSPI_FIFO_SZ; + + ret = phytium_qspi_get_flash_size(flash->qspi, mtd->size); + if (ret < 1) { + dev_err(qspi->dev, "flash size invalid\n"); + return ret; + } + + flash_cap = cs_num << QSPI_FLASH_CAP_NUM_SHIFT; + flash_cap |= ret; + writel_relaxed(flash_cap, qspi->io_base + QSPI_FLASH_CAP_REG); + + flash->read_mode = PHYTIUM_FMODE_MM; + + ret = mtd_device_register(mtd, NULL, 0); + if (ret) { + dev_err(qspi->dev, "mtd device parse failed\n"); + return ret; + } + + flash->registered = true; + + dev_dbg(qspi->dev, "read mm:%s %p cs:%d bus:%d clk-div:%d\n", + flash->read_mode == PHYTIUM_FMODE_MM ? "yes" : "no", + qspi->mm_base, cs_num, width, clk_div); + + dev_dbg(qspi->dev, "mtd->size:%llx, mtd->erasesize:%x, fsize:%x\n", + mtd->size, mtd->erasesize, flash->fsize); + + return 0; +} + +static void phytium_qspi_mtd_free(struct phytium_qspi *qspi) +{ + int i; + + for (i = 0; i < PHYTIUM_MAX_NORCHIP; i++) + if (qspi->flash[i].registered) + mtd_device_unregister(&qspi->flash[i].nor.mtd); +} + +static ssize_t clk_div_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct phytium_qspi *qspi = dev_get_drvdata(dev); + struct phytium_qspi_flash *flash = &qspi->flash[0]; + + return sprintf(buf, "Flash 0 clk-div: %d\n", flash->clk_div); +} + +static ssize_t clk_div_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t size) +{ + struct phytium_qspi *qspi = dev_get_drvdata(dev); + struct phytium_qspi_flash *flash = &qspi->flash[0]; + long value; + char *token; + ssize_t status; + + token = strsep((char **)&buf, " "); + if (!token) + return -EINVAL; + + status = kstrtol(token, 0, &value); + if (status) + return status; + + flash->clk_div = (u8)value; + + return size; +} +static DEVICE_ATTR_RW(clk_div); + +static struct attribute *phytium_qspi_attrs[] = { + &dev_attr_clk_div.attr, + NULL, +}; + +static struct attribute_group phytium_qspi_attr_group = { + .attrs = phytium_qspi_attrs, +}; + +static int phytium_qspi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *flash_np; + struct phytium_qspi *qspi; + struct resource *res; + int ret; + + qspi = devm_kzalloc(dev, sizeof(*qspi), GFP_KERNEL); + if (!qspi) + return -ENOMEM; + + qspi->nor_num = of_get_child_count(dev->of_node); + if (!qspi->nor_num || qspi->nor_num > PHYTIUM_MAX_NORCHIP) + return -ENODEV; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi"); + qspi->io_base = devm_ioremap_resource(dev, res); + if (IS_ERR(qspi->io_base)) + return PTR_ERR(qspi->io_base); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mm"); + qspi->mm_base = devm_ioremap_resource(dev, res); + if (IS_ERR(qspi->mm_base)) + return PTR_ERR(qspi->mm_base); + + qspi->mm_size = resource_size(res); + + qspi->clk = devm_clk_get(dev, NULL); + if (IS_ERR(qspi->clk)) + return PTR_ERR(qspi->clk); + + qspi->clk_rate = clk_get_rate(qspi->clk); + if (!qspi->clk_rate) + return -EINVAL; + + ret = clk_prepare_enable(qspi->clk); + if (ret) { + dev_err(dev, "can not enable the clock\n"); + return ret; + } + + qspi->dev = dev; + platform_set_drvdata(pdev, qspi); + mutex_init(&qspi->lock); + spin_lock_init(&qspi->spinlock); + + for_each_available_child_of_node(dev->of_node, flash_np) { + ret = phytium_qspi_flash_setup(qspi, flash_np); + if (ret) { + dev_err(dev, "unable to setup flash chip\n"); + goto err_flash; + } + } + + ret = sysfs_create_group(&qspi->dev->kobj, &phytium_qspi_attr_group); + if (ret) { + dev_err(dev, "unable to create sysfs\n"); + goto err_flash; + } + + return 0; + +err_flash: + mutex_destroy(&qspi->lock); + phytium_qspi_mtd_free(qspi); + + clk_disable_unprepare(qspi->clk); + return ret; +} + +static int phytium_qspi_remove(struct platform_device *pdev) +{ + struct phytium_qspi *qspi = platform_get_drvdata(pdev); + + sysfs_remove_group(&qspi->dev->kobj, &phytium_qspi_attr_group); + + phytium_qspi_mtd_free(qspi); + mutex_destroy(&qspi->lock); + + clk_disable_unprepare(qspi->clk); + return 0; +} + +static const struct of_device_id phytium_qspi_match[] = { + { .compatible = "phytium,qspi" }, + { } +}; +MODULE_DEVICE_TABLE(of, phytium_qspi_match); + +static struct platform_driver phytium_qspi_driver = { + .probe = phytium_qspi_probe, + .remove = phytium_qspi_remove, + .driver = { + .name = "phytium-quadspi", + .of_match_table = phytium_qspi_match, + }, +}; + +module_platform_driver(phytium_qspi_driver); + +MODULE_AUTHOR("Mingshuai Zhu "); +MODULE_DESCRIPTION("Phytium QuadSPI driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index f028277fb1cedb86ac4837e78341705c82f60b1b..2e183425facd855860e0f4d6d182954f6a9850e1 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -2459,7 +2459,7 @@ static int spi_nor_init_params(struct spi_nor *nor, memset(params, 0, sizeof(*params)); /* Set SPI NOR sizes. */ - params->size = info->sector_size * info->n_sectors; + params->size = (u64)info->sector_size * info->n_sectors; params->page_size = info->page_size; /* (Fast) Read settings. */ diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index d2a726654ff1182e961f22ff365dc461996b7204..93cce95287baa970ed9eb2106b825a26c21d5a10 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -866,6 +866,13 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, return -EINVAL; } + /* UBI cannot work on flashes with zero erasesize. */ + if (!mtd->erasesize) { + pr_err("ubi: refuse attaching mtd%d - zero erasesize flash is not supported\n", + mtd->index); + return -EINVAL; + } + if (ubi_num == UBI_DEV_NUM_AUTO) { /* Search for an empty slot in the @ubi_devices array */ for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++) @@ -1101,10 +1108,10 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway) ubi_wl_close(ubi); ubi_free_internal_volumes(ubi); vfree(ubi->vtbl); - put_mtd_device(ubi->mtd); vfree(ubi->peb_buf); vfree(ubi->fm_buf); ubi_msg(ubi, "mtd%d is detached", ubi->mtd->index); + put_mtd_device(ubi->mtd); put_device(&ubi->dev); return 0; } diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c index e9e9ecbcedcc384aee39a7b3e08f9d23cffd39aa..0b8f0c46268dae932896b7438e15fe8205b9016f 100644 --- a/drivers/mtd/ubi/kapi.c +++ b/drivers/mtd/ubi/kapi.c @@ -227,9 +227,9 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode) out_free: kfree(desc); out_put_ubi: - ubi_put_device(ubi); ubi_err(ubi, "cannot open device %d, volume %d, error %d", ubi_num, vol_id, err); + ubi_put_device(ubi); return ERR_PTR(err); } EXPORT_SYMBOL_GPL(ubi_open_volume); diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c index 1bc82154bb18f6b3e03df72d725778d7dea12227..634ec95a1e71a1be835ef1cfb5a273f8ef68a050 100644 --- a/drivers/mtd/ubi/vtbl.c +++ b/drivers/mtd/ubi/vtbl.c @@ -804,6 +804,12 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai) * The number of supported volumes is limited by the eraseblock size * and by the UBI_MAX_VOLUMES constant. */ + + if (ubi->leb_size < UBI_VTBL_RECORD_SIZE) { + ubi_err(ubi, "LEB size too small for a volume record"); + return -EINVAL; + } + ubi->vtbl_slots = ubi->leb_size / UBI_VTBL_RECORD_SIZE; if (ubi->vtbl_slots > UBI_MAX_VOLUMES) ubi->vtbl_slots = UBI_MAX_VOLUMES; diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index d03775100f7db789d783a3d3dd9ff9cd5c83c325..0652caad57ec1cb8e8c27c04242fe7e2996b329b 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -197,9 +197,9 @@ config VXLAN config GENEVE tristate "Generic Network Virtualization Encapsulation" - depends on INET && NET_UDP_TUNNEL + depends on INET depends on IPV6 || !IPV6 - select NET_IP_TUNNEL + select NET_UDP_TUNNEL select GRO_CELLS ---help--- This allows one to create geneve virtual interfaces that provide @@ -213,8 +213,8 @@ config GENEVE config GTP tristate "GPRS Tunneling Protocol datapath (GTP-U)" - depends on INET && NET_UDP_TUNNEL - select NET_IP_TUNNEL + depends on INET + select NET_UDP_TUNNEL ---help--- This allows one to create gtp virtual interfaces that provide the GPRS Tunneling Protocol datapath (GTP-U). This tunneling protocol diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 21cde7e7862168e501b37217d2c983fafa989879..0d3ba056cda3ca0cb1ec1da6e6a2addef548985b 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -40,7 +40,7 @@ obj-$(CONFIG_ARCNET) += arcnet/ obj-$(CONFIG_DEV_APPLETALK) += appletalk/ obj-$(CONFIG_CAIF) += caif/ obj-$(CONFIG_CAN) += can/ -obj-$(CONFIG_NET_DSA) += dsa/ +obj-y += dsa/ obj-$(CONFIG_ETHERNET) += ethernet/ obj-$(CONFIG_FDDI) += fddi/ obj-$(CONFIG_HIPPI) += hippi/ diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c index 8459115d9d4e51faf405335c9886908cee5c1b02..553776cc1d29ddceb9f4855d93e5d97366dd5a8f 100644 --- a/drivers/net/arcnet/arcnet.c +++ b/drivers/net/arcnet/arcnet.c @@ -1063,31 +1063,34 @@ EXPORT_SYMBOL(arcnet_interrupt); static void arcnet_rx(struct net_device *dev, int bufnum) { struct arcnet_local *lp = netdev_priv(dev); - struct archdr pkt; + union { + struct archdr pkt; + char buf[512]; + } rxdata; struct arc_rfc1201 *soft; int length, ofs; - soft = &pkt.soft.rfc1201; + soft = &rxdata.pkt.soft.rfc1201; - lp->hw.copy_from_card(dev, bufnum, 0, &pkt, ARC_HDR_SIZE); - if (pkt.hard.offset[0]) { - ofs = pkt.hard.offset[0]; + lp->hw.copy_from_card(dev, bufnum, 0, &rxdata.pkt, ARC_HDR_SIZE); + if (rxdata.pkt.hard.offset[0]) { + ofs = rxdata.pkt.hard.offset[0]; length = 256 - ofs; } else { - ofs = pkt.hard.offset[1]; + ofs = rxdata.pkt.hard.offset[1]; length = 512 - ofs; } /* get the full header, if possible */ - if (sizeof(pkt.soft) <= length) { - lp->hw.copy_from_card(dev, bufnum, ofs, soft, sizeof(pkt.soft)); + if (sizeof(rxdata.pkt.soft) <= length) { + lp->hw.copy_from_card(dev, bufnum, ofs, soft, sizeof(rxdata.pkt.soft)); } else { - memset(&pkt.soft, 0, sizeof(pkt.soft)); + memset(&rxdata.pkt.soft, 0, sizeof(rxdata.pkt.soft)); lp->hw.copy_from_card(dev, bufnum, ofs, soft, length); } arc_printk(D_DURING, dev, "Buffer #%d: received packet from %02Xh to %02Xh (%d+4 bytes)\n", - bufnum, pkt.hard.source, pkt.hard.dest, length); + bufnum, rxdata.pkt.hard.source, rxdata.pkt.hard.dest, length); dev->stats.rx_packets++; dev->stats.rx_bytes += length + ARC_HDR_SIZE; @@ -1096,13 +1099,13 @@ static void arcnet_rx(struct net_device *dev, int bufnum) if (arc_proto_map[soft->proto]->is_ip) { if (BUGLVL(D_PROTO)) { struct ArcProto - *oldp = arc_proto_map[lp->default_proto[pkt.hard.source]], + *oldp = arc_proto_map[lp->default_proto[rxdata.pkt.hard.source]], *newp = arc_proto_map[soft->proto]; if (oldp != newp) { arc_printk(D_PROTO, dev, "got protocol %02Xh; encap for host %02Xh is now '%c' (was '%c')\n", - soft->proto, pkt.hard.source, + soft->proto, rxdata.pkt.hard.source, newp->suffix, oldp->suffix); } } @@ -1111,10 +1114,10 @@ static void arcnet_rx(struct net_device *dev, int bufnum) lp->default_proto[0] = soft->proto; /* in striking contrast, the following isn't a hack. */ - lp->default_proto[pkt.hard.source] = soft->proto; + lp->default_proto[rxdata.pkt.hard.source] = soft->proto; } /* call the protocol-specific receiver. */ - arc_proto_map[soft->proto]->rx(dev, bufnum, &pkt, length); + arc_proto_map[soft->proto]->rx(dev, bufnum, &rxdata.pkt, length); } static void null_rx(struct net_device *dev, int bufnum, diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c index eb7f76753c9c0dbdd2ca07985d9c407b433ab506..9f44e2e458df17e08b2ab9238d5dca39c52110bb 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c @@ -136,6 +136,9 @@ static int com20020pci_probe(struct pci_dev *pdev, return -ENOMEM; ci = (struct com20020_pci_card_info *)id->driver_data; + if (!ci) + return -EINVAL; + priv->ci = ci; mm = &ci->misc_map; diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile index 6f4e80853ed4013644e68d097a14ef4a208a5395..e6cbc8439db9bc9268921c1564943167c64325ee 100644 --- a/drivers/net/bonding/Makefile +++ b/drivers/net/bonding/Makefile @@ -4,7 +4,7 @@ obj-$(CONFIG_BONDING) += bonding.o -bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_sysfs_slave.o bond_debugfs.o bond_netlink.o bond_options.o +bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_sysfs_slave.o bond_debugfs.o bond_netlink.o bond_options.o bond_sysctl.o proc-$(CONFIG_PROC_FS) += bond_procfs.o bonding-objs += $(proc-y) diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index f43fb2f958a54e12c4d29ad91340e237e9d98e5a..a6bb7e915f74f0e562e80c6e484a8a6ad7241da4 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -249,7 +249,7 @@ static inline int __check_agg_selection_timer(struct port *port) if (bond == NULL) return 0; - return BOND_AD_INFO(bond).agg_select_timer ? 1 : 0; + return atomic_read(&BOND_AD_INFO(bond).agg_select_timer) ? 1 : 0; } /** @@ -1012,8 +1012,8 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr) if (port->aggregator && port->aggregator->is_active && !__port_is_enabled(port)) { - __enable_port(port); + *update_slave_arr = true; } } break; @@ -1760,6 +1760,7 @@ static void ad_agg_selection_logic(struct aggregator *agg, port = port->next_port_in_aggregator) { __enable_port(port); } + *update_slave_arr = true; } } @@ -1964,7 +1965,7 @@ static void ad_marker_response_received(struct bond_marker *marker, */ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout) { - BOND_AD_INFO(bond).agg_select_timer = timeout; + atomic_set(&BOND_AD_INFO(bond).agg_select_timer, timeout); } /** @@ -1976,30 +1977,24 @@ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout) */ void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution) { - /* check that the bond is not initialized yet */ - if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr), - bond->dev->dev_addr)) { - - BOND_AD_INFO(bond).aggregator_identifier = 0; - - BOND_AD_INFO(bond).system.sys_priority = - bond->params.ad_actor_sys_prio; - if (is_zero_ether_addr(bond->params.ad_actor_system)) - BOND_AD_INFO(bond).system.sys_mac_addr = - *((struct mac_addr *)bond->dev->dev_addr); - else - BOND_AD_INFO(bond).system.sys_mac_addr = - *((struct mac_addr *)bond->params.ad_actor_system); + BOND_AD_INFO(bond).aggregator_identifier = 0; + BOND_AD_INFO(bond).system.sys_priority = + bond->params.ad_actor_sys_prio; + if (is_zero_ether_addr(bond->params.ad_actor_system)) + BOND_AD_INFO(bond).system.sys_mac_addr = + *((struct mac_addr *)bond->dev->dev_addr); + else + BOND_AD_INFO(bond).system.sys_mac_addr = + *((struct mac_addr *)bond->params.ad_actor_system); - /* initialize how many times this module is called in one - * second (should be about every 100ms) - */ - ad_ticks_per_sec = tick_resolution; + /* initialize how many times this module is called in one + * second (should be about every 100ms) + */ + ad_ticks_per_sec = tick_resolution; - bond_3ad_initiate_agg_selection(bond, - AD_AGGREGATOR_SELECTION_TIMER * - ad_ticks_per_sec); - } + bond_3ad_initiate_agg_selection(bond, + AD_AGGREGATOR_SELECTION_TIMER * + ad_ticks_per_sec); } /** @@ -2086,6 +2081,9 @@ void bond_3ad_unbind_slave(struct slave *slave) aggregator->aggregator_identifier); /* Tell the partner that this port is not suitable for aggregation */ + port->actor_oper_port_state &= ~AD_STATE_SYNCHRONIZATION; + port->actor_oper_port_state &= ~AD_STATE_COLLECTING; + port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING; port->actor_oper_port_state &= ~AD_STATE_AGGREGATION; __update_lacpdu_from_port(port); ad_lacpdu_send(port); @@ -2195,7 +2193,8 @@ void bond_3ad_unbind_slave(struct slave *slave) temp_aggregator->num_of_ports--; if (__agg_active_ports(temp_aggregator) == 0) { select_new_active_agg = temp_aggregator->is_active; - ad_clear_agg(temp_aggregator); + if (temp_aggregator->num_of_ports == 0) + ad_clear_agg(temp_aggregator); if (select_new_active_agg) { netdev_info(bond->dev, "Removing an active aggregator\n"); /* select new active aggregator */ @@ -2245,6 +2244,28 @@ void bond_3ad_update_ad_actor_settings(struct bonding *bond) spin_unlock_bh(&bond->mode_lock); } +/** + * bond_agg_timer_advance - advance agg_select_timer + * @bond: bonding structure + * + * Return true when agg_select_timer reaches 0. + */ +static bool bond_agg_timer_advance(struct bonding *bond) +{ + int val, nval; + + while (1) { + val = atomic_read(&BOND_AD_INFO(bond).agg_select_timer); + if (!val) + return false; + nval = val - 1; + if (atomic_cmpxchg(&BOND_AD_INFO(bond).agg_select_timer, + val, nval) == val) + break; + } + return nval == 0; +} + /** * bond_3ad_state_machine_handler - handle state machines timeout * @bond: bonding struct to work on @@ -2280,9 +2301,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work) if (!bond_has_slaves(bond)) goto re_arm; - /* check if agg_select_timer timer after initialize is timed out */ - if (BOND_AD_INFO(bond).agg_select_timer && - !(--BOND_AD_INFO(bond).agg_select_timer)) { + if (bond_agg_timer_advance(bond)) { slave = bond_first_slave_rcu(bond); port = slave ? &(SLAVE_AD_INFO(slave)->port) : NULL; diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index e82108c917a635f036a9e86eeeed5d09c9cbbf53..253e25a294f4b5f6701c18a2b1725fe492eb4565 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -66,11 +66,6 @@ struct arp_pkt { }; #pragma pack() -static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb) -{ - return (struct arp_pkt *)skb_network_header(skb); -} - /* Forward declaration */ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[], bool strict_match); @@ -568,10 +563,11 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip) spin_unlock(&bond->mode_lock); } -static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bond) +static struct slave *rlb_choose_channel(struct sk_buff *skb, + struct bonding *bond, + const struct arp_pkt *arp) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); - struct arp_pkt *arp = arp_pkt(skb); struct slave *assigned_slave, *curr_active_slave; struct rlb_client_info *client_info; u32 hash_index = 0; @@ -668,18 +664,32 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon */ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond) { - struct arp_pkt *arp = arp_pkt(skb); struct slave *tx_slave = NULL; + struct net_device *dev; + struct arp_pkt *arp; + + if (!pskb_network_may_pull(skb, sizeof(*arp))) + return NULL; + arp = (struct arp_pkt *)skb_network_header(skb); - /* Don't modify or load balance ARPs that do not originate locally - * (e.g.,arrive via a bridge). + /* Don't modify or load balance ARPs that do not originate + * from the bond itself or a VLAN directly above the bond. */ - if (!bond_slave_has_mac_rx(bond, arp->mac_src)) + if (!bond_slave_has_mac_rcu(bond, arp->mac_src)) return NULL; + dev = ip_dev_find(dev_net(bond->dev), arp->ip_src); + if (dev) { + if (netif_is_bridge_master(dev)) { + dev_put(dev); + return NULL; + } + dev_put(dev); + } + if (arp->op_code == htons(ARPOP_REPLY)) { /* the arp must be sent on the selected rx channel */ - tx_slave = rlb_choose_channel(skb, bond); + tx_slave = rlb_choose_channel(skb, bond, arp); if (tx_slave) bond_hw_addr_copy(arp->mac_src, tx_slave->dev->dev_addr, tx_slave->dev->addr_len); @@ -690,7 +700,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond) * When the arp reply is received the entry will be updated * with the correct unicast address of the client. */ - rlb_choose_channel(skb, bond); + rlb_choose_channel(skb, bond, arp); /* The ARP reply packets must be delayed so that * they can cancel out the influence of the ARP request. @@ -984,7 +994,8 @@ static int alb_upper_dev_walk(struct net_device *upper, void *_data) if (netif_is_macvlan(upper) && !strict_match) { tags = bond_verify_device_path(bond->dev, upper, 0); if (IS_ERR_OR_NULL(tags)) - BUG(); + return -ENOMEM; + alb_send_lp_vid(slave, upper->dev_addr, tags[0].vlan_proto, tags[0].vlan_id); kfree(tags); @@ -1281,6 +1292,27 @@ static int alb_set_mac_address(struct bonding *bond, void *addr) return res; } +/* determine if the packet is NA or NS */ +static bool alb_determine_nd(struct sk_buff *skb, struct bonding *bond) +{ + struct ipv6hdr *ip6hdr; + struct icmp6hdr *hdr; + + if (!pskb_network_may_pull(skb, sizeof(*ip6hdr))) + return true; + + ip6hdr = ipv6_hdr(skb); + if (ip6hdr->nexthdr != IPPROTO_ICMPV6) + return false; + + if (!pskb_network_may_pull(skb, sizeof(*ip6hdr) + sizeof(*hdr))) + return true; + + hdr = icmp6_hdr(skb); + return hdr->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT || + hdr->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION; +} + /************************ exported alb funcions ************************/ int bond_alb_initialize(struct bonding *bond, int rlb_enabled) @@ -1292,12 +1324,12 @@ int bond_alb_initialize(struct bonding *bond, int rlb_enabled) return res; if (rlb_enabled) { - bond->alb_info.rlb_enabled = 1; res = rlb_initialize(bond); if (res) { tlb_deinitialize(bond); return res; } + bond->alb_info.rlb_enabled = 1; } else { bond->alb_info.rlb_enabled = 0; } @@ -1363,10 +1395,13 @@ netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev) /* Do not TX balance any multicast or broadcast */ if (!is_multicast_ether_addr(eth_data->h_dest)) { switch (skb->protocol) { + case htons(ETH_P_IPV6): + if (alb_determine_nd(skb, bond)) + break; + /* fallthrough */ case htons(ETH_P_IP): case htons(ETH_P_IPX): /* In case of IPX, it will falback to L2 hash */ - case htons(ETH_P_IPV6): hash_index = bond_xmit_hash(bond, skb); if (bond->params.tlb_dynamic_lb) { tx_slave = tlb_choose_channel(bond, @@ -1399,26 +1434,31 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) bool do_tx_balance = true; u32 hash_index = 0; const u8 *hash_start = NULL; - struct ipv6hdr *ip6hdr; skb_reset_mac_header(skb); eth_data = eth_hdr(skb); switch (ntohs(skb->protocol)) { case ETH_P_IP: { - const struct iphdr *iph = ip_hdr(skb); + const struct iphdr *iph; if (is_broadcast_ether_addr(eth_data->h_dest) || - iph->daddr == ip_bcast || - iph->protocol == IPPROTO_IGMP) { + !pskb_network_may_pull(skb, sizeof(*iph))) { + do_tx_balance = false; + break; + } + iph = ip_hdr(skb); + if (iph->daddr == ip_bcast || iph->protocol == IPPROTO_IGMP) { do_tx_balance = false; break; } hash_start = (char *)&(iph->daddr); hash_size = sizeof(iph->daddr); - } break; - case ETH_P_IPV6: + } + case ETH_P_IPV6: { + const struct ipv6hdr *ip6hdr; + /* IPv6 doesn't really use broadcast mac address, but leave * that here just in case. */ @@ -1435,7 +1475,13 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) break; } - /* Additianally, DAD probes should not be tx-balanced as that + if (alb_determine_nd(skb, bond)) { + do_tx_balance = false; + break; + } + + /* The IPv6 header is pulled by alb_determine_nd */ + /* Additionally, DAD probes should not be tx-balanced as that * will lead to false positives for duplicate addresses and * prevent address configuration from working. */ @@ -1445,17 +1491,26 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) break; } - hash_start = (char *)&(ipv6_hdr(skb)->daddr); - hash_size = sizeof(ipv6_hdr(skb)->daddr); + hash_start = (char *)&ip6hdr->daddr; + hash_size = sizeof(ip6hdr->daddr); break; - case ETH_P_IPX: - if (ipx_hdr(skb)->ipx_checksum != IPX_NO_CHECKSUM) { + } + case ETH_P_IPX: { + const struct ipxhdr *ipxhdr; + + if (pskb_network_may_pull(skb, sizeof(*ipxhdr))) { + do_tx_balance = false; + break; + } + ipxhdr = (struct ipxhdr *)skb_network_header(skb); + + if (ipxhdr->ipx_checksum != IPX_NO_CHECKSUM) { /* something is wrong with this packet */ do_tx_balance = false; break; } - if (ipx_hdr(skb)->ipx_type != IPX_TYPE_NCP) { + if (ipxhdr->ipx_type != IPX_TYPE_NCP) { /* The only protocol worth balancing in * this family since it has an "ARP" like * mechanism @@ -1464,9 +1519,11 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) break; } + eth_data = eth_hdr(skb); hash_start = (char *)eth_data->h_dest; hash_size = ETH_ALEN; break; + } case ETH_P_ARP: do_tx_balance = false; if (bond_info->rlb_enabled) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index ee28ec9e0abaddd13053da9fda5c0cefc722555d..56c16b171e359d5973a3acea3414b56faa68ca77 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include #include @@ -782,14 +783,16 @@ static bool bond_should_notify_peers(struct bonding *bond) slave = rcu_dereference(bond->curr_active_slave); rcu_read_unlock(); - netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n", - slave ? slave->dev->name : "NULL"); - if (!slave || !bond->send_peer_notif || + bond->send_peer_notif % + max(1, bond->params.peer_notif_delay) != 0 || !netif_carrier_ok(bond->dev) || test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state)) return false; + netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n", + slave ? slave->dev->name : "NULL"); + return true; } @@ -878,15 +881,18 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) if (netif_running(bond->dev)) { bond->send_peer_notif = - bond->params.num_peer_notif; + bond->params.num_peer_notif * + max(1, bond->params.peer_notif_delay); should_notify_peers = bond_should_notify_peers(bond); } call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev); - if (should_notify_peers) + if (should_notify_peers) { + bond->send_peer_notif--; call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev); + } } } @@ -1066,12 +1072,16 @@ static netdev_features_t bond_fix_features(struct net_device *dev, #define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ NETIF_F_RXCSUM | NETIF_F_ALL_TSO) +#define BOND_MPLS_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ + NETIF_F_ALL_TSO) + static void bond_compute_features(struct bonding *bond) { unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; netdev_features_t vlan_features = BOND_VLAN_FEATURES; netdev_features_t enc_features = BOND_ENC_FEATURES; + netdev_features_t mpls_features = BOND_MPLS_FEATURES; struct net_device *bond_dev = bond->dev; struct list_head *iter; struct slave *slave; @@ -1082,6 +1092,7 @@ static void bond_compute_features(struct bonding *bond) if (!bond_has_slaves(bond)) goto done; vlan_features &= NETIF_F_ALL_FOR_ALL; + mpls_features &= NETIF_F_ALL_FOR_ALL; bond_for_each_slave(bond, slave, iter) { vlan_features = netdev_increment_features(vlan_features, @@ -1090,6 +1101,11 @@ static void bond_compute_features(struct bonding *bond) enc_features = netdev_increment_features(enc_features, slave->dev->hw_enc_features, BOND_ENC_FEATURES); + + mpls_features = netdev_increment_features(mpls_features, + slave->dev->mpls_features, + BOND_MPLS_FEATURES); + dst_release_flag &= slave->dev->priv_flags; if (slave->dev->hard_header_len > max_hard_header_len) max_hard_header_len = slave->dev->hard_header_len; @@ -1102,7 +1118,10 @@ static void bond_compute_features(struct bonding *bond) done: bond_dev->vlan_features = vlan_features; bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX | NETIF_F_GSO_UDP_L4; + bond_dev->mpls_features = mpls_features; bond_dev->gso_max_segs = gso_max_segs; netif_set_gso_max_size(bond_dev, gso_max_size); @@ -1121,10 +1140,16 @@ static void bond_setup_by_slave(struct net_device *bond_dev, bond_dev->type = slave_dev->type; bond_dev->hard_header_len = slave_dev->hard_header_len; + bond_dev->needed_headroom = slave_dev->needed_headroom; bond_dev->addr_len = slave_dev->addr_len; memcpy(bond_dev->broadcast, slave_dev->broadcast, slave_dev->addr_len); + + if (slave_dev->flags & IFF_POINTOPOINT) { + bond_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); + bond_dev->flags |= (IFF_POINTOPOINT | IFF_NOARP); + } } /* On bonding slaves other than the currently active slave, suppress @@ -1171,29 +1196,22 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) } } - /* Link-local multicast packets should be passed to the - * stack on the link they arrive as well as pass them to the - * bond-master device. These packets are mostly usable when - * stack receives it with the link on which they arrive - * (e.g. LLDP) they also must be available on master. Some of - * the use cases include (but are not limited to): LLDP agents - * that must be able to operate both on enslaved interfaces as - * well as on bonds themselves; linux bridges that must be able - * to process/pass BPDUs from attached bonds when any kind of - * STP version is enabled on the network. + /* + * For packets determined by bond_should_deliver_exact_match() call to + * be suppressed we want to make an exception for link-local packets. + * This is necessary for e.g. LLDP daemons to be able to monitor + * inactive slave links without being forced to bind to them + * explicitly. + * + * At the same time, packets that are passed to the bonding master + * (including link-local ones) can have their originating interface + * determined via PACKET_ORIGDEV socket option. */ - if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) { - struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); - - if (nskb) { - nskb->dev = bond->dev; - nskb->queue_mapping = 0; - netif_rx(nskb); - } - return RX_HANDLER_PASS; - } - if (bond_should_deliver_exact_match(skb, slave, bond)) + if (bond_should_deliver_exact_match(skb, slave, bond)) { + if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) + return RX_HANDLER_PASS; return RX_HANDLER_EXACT; + } skb->dev = bond->dev; @@ -1272,7 +1290,39 @@ static void bond_upper_dev_unlink(struct bonding *bond, struct slave *slave) slave->dev->flags &= ~IFF_SLAVE; } -static struct slave *bond_alloc_slave(struct bonding *bond) +static void slave_kobj_release(struct kobject *kobj) +{ + struct slave *slave = to_slave(kobj); + struct bonding *bond = bond_get_bond_by_slave(slave); + + cancel_delayed_work_sync(&slave->notify_work); + if (BOND_MODE(bond) == BOND_MODE_8023AD) + kfree(SLAVE_AD_INFO(slave)); + + kfree(slave); +} + +static struct kobj_type slave_ktype = { + .release = slave_kobj_release, +#ifdef CONFIG_SYSFS + .sysfs_ops = &slave_sysfs_ops, +#endif +}; + +static int bond_kobj_init(struct slave *slave) +{ + int err; + + err = kobject_init_and_add(&slave->kobj, &slave_ktype, + &(slave->dev->dev.kobj), "bonding_slave"); + if (err) + kobject_put(&slave->kobj); + + return err; +} + +static struct slave *bond_alloc_slave(struct bonding *bond, + struct net_device *slave_dev) { struct slave *slave = NULL; @@ -1280,30 +1330,25 @@ static struct slave *bond_alloc_slave(struct bonding *bond) if (!slave) return NULL; + slave->bond = bond; + slave->dev = slave_dev; + INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work); + + if (bond_kobj_init(slave)) + return NULL; + if (BOND_MODE(bond) == BOND_MODE_8023AD) { SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info), GFP_KERNEL); if (!SLAVE_AD_INFO(slave)) { - kfree(slave); + kobject_put(&slave->kobj); return NULL; } } - INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work); return slave; } -static void bond_free_slave(struct slave *slave) -{ - struct bonding *bond = bond_get_bond_by_slave(slave); - - cancel_delayed_work_sync(&slave->notify_work); - if (BOND_MODE(bond) == BOND_MODE_8023AD) - kfree(SLAVE_AD_INFO(slave)); - - kfree(slave); -} - static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info) { info->bond_mode = BOND_MODE(bond); @@ -1351,6 +1396,20 @@ void bond_lower_state_changed(struct slave *slave) netdev_lower_state_changed(slave->dev, &info); } +/* The bonding driver uses ether_setup() to convert a master bond device + * to ARPHRD_ETHER, that resets the target netdevice's flags so we always + * have to restore the IFF_MASTER flag, and only restore IFF_SLAVE and IFF_UP + * if they were set + */ +static void bond_ether_setup(struct net_device *bond_dev) +{ + unsigned int flags = bond_dev->flags & (IFF_SLAVE | IFF_UP); + + ether_setup(bond_dev); + bond_dev->flags |= IFF_MASTER | flags; + bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING; +} + /* enslave device to bond device */ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, struct netlink_ext_ack *extack) @@ -1441,10 +1500,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, if (slave_dev->type != ARPHRD_ETHER) bond_setup_by_slave(bond_dev, slave_dev); - else { - ether_setup(bond_dev); - bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING; - } + else + bond_ether_setup(bond_dev); call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, bond_dev); @@ -1491,14 +1548,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, bond->dev->addr_assign_type == NET_ADDR_RANDOM) bond_set_dev_addr(bond->dev, slave_dev); - new_slave = bond_alloc_slave(bond); + new_slave = bond_alloc_slave(bond, slave_dev); if (!new_slave) { res = -ENOMEM; goto err_undo_flags; } - new_slave->bond = bond; - new_slave->dev = slave_dev; /* Set the new_slave's queue_id to be zero. Queue ID mapping * is set via sysfs or module option if desired. */ @@ -1803,7 +1858,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, slave_disable_netpoll(new_slave); err_close: - slave_dev->priv_flags &= ~IFF_BONDING; + if (!netif_is_bond_master(slave_dev)) + slave_dev->priv_flags &= ~IFF_BONDING; dev_close(slave_dev); err_restore_mac: @@ -1824,7 +1880,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, dev_set_mtu(slave_dev, new_slave->original_mtu); err_free: - bond_free_slave(new_slave); + kobject_put(&new_slave->kobj); err_undo_flags: /* Enslave of first slave has failed and we need to fix master's mac */ @@ -1834,9 +1890,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, eth_hw_addr_random(bond_dev); if (bond_dev->type != ARPHRD_ETHER) { dev_close(bond_dev); - ether_setup(bond_dev); - bond_dev->flags |= IFF_MASTER; - bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING; + bond_ether_setup(bond_dev); } } @@ -1890,7 +1944,6 @@ static int __bond_release_one(struct net_device *bond_dev, /* recompute stats just before removing the slave */ bond_get_stats(bond->dev, &bond->bond_stats); - bond_upper_dev_unlink(bond, slave); /* unregister rx_handler early so bond_handle_frame wouldn't be called * for this slave anymore. */ @@ -1899,6 +1952,8 @@ static int __bond_release_one(struct net_device *bond_dev, if (BOND_MODE(bond) == BOND_MODE_8023AD) bond_3ad_unbind_slave(slave); + bond_upper_dev_unlink(bond, slave); + if (bond_mode_can_use_xmit_hash(bond)) bond_update_slave_arr(bond, slave); @@ -1947,6 +2002,9 @@ static int __bond_release_one(struct net_device *bond_dev, if (!bond_has_slaves(bond)) { bond_set_carrier(bond); eth_hw_addr_random(bond_dev); + bond->nest_level = SINGLE_DEPTH_NESTING; + } else { + bond->nest_level = dev_get_nest_level(bond_dev) + 1; } unblock_netpoll_tx(); @@ -2006,9 +2064,10 @@ static int __bond_release_one(struct net_device *bond_dev, else dev_set_mtu(slave_dev, slave->original_mtu); - slave_dev->priv_flags &= ~IFF_BONDING; + if (!netif_is_bond_master(slave_dev)) + slave_dev->priv_flags &= ~IFF_BONDING; - bond_free_slave(slave); + kobject_put(&slave->kobj); return 0; } @@ -2029,7 +2088,8 @@ static int bond_release_and_destroy(struct net_device *bond_dev, int ret; ret = __bond_release_one(bond_dev, slave_dev, false, true); - if (ret == 0 && !bond_has_slaves(bond)) { + if (ret == 0 && !bond_has_slaves(bond) && + bond_dev->reg_state != NETREG_UNREGISTERING) { bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; netdev_info(bond_dev, "Destroying bond %s\n", bond_dev->name); @@ -2068,16 +2128,15 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in /* called with rcu_read_lock() */ static int bond_miimon_inspect(struct bonding *bond) { + bool ignore_updelay = false; int link_state, commit = 0; struct list_head *iter; struct slave *slave; - bool ignore_updelay; ignore_updelay = !rcu_dereference(bond->curr_active_slave); bond_for_each_slave_rcu(bond, slave, iter) { - slave->new_link = BOND_LINK_NOCHANGE; - slave->link_new_state = slave->link; + bond_propose_link_state(slave, BOND_LINK_NOCHANGE); link_state = bond_check_dev_link(bond, slave->dev, 0); @@ -2113,7 +2172,7 @@ static int bond_miimon_inspect(struct bonding *bond) } if (slave->delay <= 0) { - slave->new_link = BOND_LINK_DOWN; + bond_propose_link_state(slave, BOND_LINK_DOWN); commit++; continue; } @@ -2152,7 +2211,7 @@ static int bond_miimon_inspect(struct bonding *bond) slave->delay = 0; if (slave->delay <= 0) { - slave->new_link = BOND_LINK_UP; + bond_propose_link_state(slave, BOND_LINK_UP); commit++; ignore_updelay = false; continue; @@ -2190,8 +2249,17 @@ static void bond_miimon_commit(struct bonding *bond) struct slave *slave, *primary; bond_for_each_slave(bond, slave, iter) { - switch (slave->new_link) { + switch (slave->link_new_state) { case BOND_LINK_NOCHANGE: + /* For 802.3ad mode, check current slave speed and + * duplex again in case its port was disabled after + * invalid speed/duplex reporting but recovered before + * link monitoring could make a decision on the actual + * link status + */ + if (BOND_MODE(bond) == BOND_MODE_8023AD && + slave->link == BOND_LINK_UP) + bond_3ad_adapter_speed_duplex_changed(slave); continue; case BOND_LINK_UP: @@ -2215,9 +2283,6 @@ static void bond_miimon_commit(struct bonding *bond) } else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { /* make it immediately active */ bond_set_active_slave(slave); - } else if (slave != primary) { - /* prevent it from being the active one */ - bond_set_backup_slave(slave); } netdev_info(bond->dev, "link status definitely up for interface %s, %u Mbps %s duplex\n", @@ -2256,8 +2321,8 @@ static void bond_miimon_commit(struct bonding *bond) default: netdev_err(bond->dev, "invalid new link %d on slave %s\n", - slave->new_link, slave->dev->name); - slave->new_link = BOND_LINK_NOCHANGE; + slave->link_new_state, slave->dev->name); + bond_propose_link_state(slave, BOND_LINK_NOCHANGE); continue; } @@ -2283,6 +2348,7 @@ static void bond_mii_monitor(struct work_struct *work) struct bonding *bond = container_of(work, struct bonding, mii_work.work); bool should_notify_peers = false; + bool commit; unsigned long delay; struct slave *slave; struct list_head *iter; @@ -2293,12 +2359,19 @@ static void bond_mii_monitor(struct work_struct *work) goto re_arm; rcu_read_lock(); - should_notify_peers = bond_should_notify_peers(bond); - - if (bond_miimon_inspect(bond)) { + commit = !!bond_miimon_inspect(bond); + if (bond->send_peer_notif) { rcu_read_unlock(); + if (rtnl_trylock()) { + bond->send_peer_notif--; + rtnl_unlock(); + } + } else { + rcu_read_unlock(); + } + if (commit) { /* Race avoidance with bond_close cancel of workqueue */ if (!rtnl_trylock()) { delay = 1; @@ -2312,8 +2385,7 @@ static void bond_mii_monitor(struct work_struct *work) bond_miimon_commit(bond); rtnl_unlock(); /* might sleep, hold no other locks */ - } else - rcu_read_unlock(); + } re_arm: if (bond->params.miimon) @@ -2657,13 +2729,13 @@ static void bond_loadbalance_arp_mon(struct bonding *bond) bond_for_each_slave_rcu(bond, slave, iter) { unsigned long trans_start = dev_trans_start(slave->dev); - slave->new_link = BOND_LINK_NOCHANGE; + bond_propose_link_state(slave, BOND_LINK_NOCHANGE); if (slave->link != BOND_LINK_UP) { if (bond_time_in_interval(bond, trans_start, 1) && bond_time_in_interval(bond, slave->last_rx, 1)) { - slave->new_link = BOND_LINK_UP; + bond_propose_link_state(slave, BOND_LINK_UP); slave_state_changed = 1; /* primary_slave has no meaning in round-robin @@ -2690,7 +2762,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond) if (!bond_time_in_interval(bond, trans_start, 2) || !bond_time_in_interval(bond, slave->last_rx, 2)) { - slave->new_link = BOND_LINK_DOWN; + bond_propose_link_state(slave, BOND_LINK_DOWN); slave_state_changed = 1; if (slave->link_failure_count < UINT_MAX) @@ -2722,8 +2794,8 @@ static void bond_loadbalance_arp_mon(struct bonding *bond) goto re_arm; bond_for_each_slave(bond, slave, iter) { - if (slave->new_link != BOND_LINK_NOCHANGE) - slave->link = slave->new_link; + if (slave->link_new_state != BOND_LINK_NOCHANGE) + slave->link = slave->link_new_state; } if (slave_state_changed) { @@ -2746,9 +2818,9 @@ static void bond_loadbalance_arp_mon(struct bonding *bond) } /* Called to inspect slaves for active-backup mode ARP monitor link state - * changes. Sets new_link in slaves to specify what action should take - * place for the slave. Returns 0 if no changes are found, >0 if changes - * to link states must be committed. + * changes. Sets proposed link state in slaves to specify what action + * should take place for the slave. Returns 0 if no changes are found, >0 + * if changes to link states must be committed. * * Called with rcu_read_lock held. */ @@ -2760,12 +2832,15 @@ static int bond_ab_arp_inspect(struct bonding *bond) int commit = 0; bond_for_each_slave_rcu(bond, slave, iter) { - slave->new_link = BOND_LINK_NOCHANGE; + bond_propose_link_state(slave, BOND_LINK_NOCHANGE); last_rx = slave_last_rx(bond, slave); if (slave->link != BOND_LINK_UP) { if (bond_time_in_interval(bond, last_rx, 1)) { - slave->new_link = BOND_LINK_UP; + bond_propose_link_state(slave, BOND_LINK_UP); + commit++; + } else if (slave->link == BOND_LINK_BACK) { + bond_propose_link_state(slave, BOND_LINK_FAIL); commit++; } continue; @@ -2793,7 +2868,7 @@ static int bond_ab_arp_inspect(struct bonding *bond) if (!bond_is_active_slave(slave) && !rcu_access_pointer(bond->current_arp_slave) && !bond_time_in_interval(bond, last_rx, 3)) { - slave->new_link = BOND_LINK_DOWN; + bond_propose_link_state(slave, BOND_LINK_DOWN); commit++; } @@ -2806,7 +2881,7 @@ static int bond_ab_arp_inspect(struct bonding *bond) if (bond_is_active_slave(slave) && (!bond_time_in_interval(bond, trans_start, 2) || !bond_time_in_interval(bond, last_rx, 2))) { - slave->new_link = BOND_LINK_DOWN; + bond_propose_link_state(slave, BOND_LINK_DOWN); commit++; } } @@ -2826,7 +2901,7 @@ static void bond_ab_arp_commit(struct bonding *bond) struct slave *slave; bond_for_each_slave(bond, slave, iter) { - switch (slave->new_link) { + switch (slave->link_new_state) { case BOND_LINK_NOCHANGE: continue; @@ -2877,9 +2952,22 @@ static void bond_ab_arp_commit(struct bonding *bond) continue; + case BOND_LINK_FAIL: + bond_set_slave_link_state(slave, BOND_LINK_FAIL, + BOND_SLAVE_NOTIFY_NOW); + bond_set_slave_inactive_flags(slave, + BOND_SLAVE_NOTIFY_NOW); + + /* A slave has just been enslaved and has become + * the current active slave. + */ + if (rtnl_dereference(bond->curr_active_slave)) + RCU_INIT_POINTER(bond->current_arp_slave, NULL); + continue; + default: netdev_err(bond->dev, "impossible: new_link %d on slave %s\n", - slave->new_link, slave->dev->name); + slave->link_new_state, slave->dev->name); continue; } @@ -2926,8 +3014,6 @@ static bool bond_ab_arp_probe(struct bonding *bond) return should_notify_rtnl; } - bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER); - bond_for_each_slave_rcu(bond, slave, iter) { if (!found && !before && bond_slave_is_up(slave)) before = slave; @@ -3022,9 +3108,11 @@ static void bond_activebackup_arp_mon(struct bonding *bond) if (!rtnl_trylock()) return; - if (should_notify_peers) + if (should_notify_peers) { + bond->send_peer_notif--; call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev); + } if (should_notify_rtnl) { bond_slave_state_notify(bond); bond_slave_link_notify(bond); @@ -3072,10 +3160,6 @@ static int bond_master_netdev_event(unsigned long event, case NETDEV_REGISTER: bond_create_proc_entry(event_bond); break; - case NETDEV_NOTIFY_PEERS: - if (event_bond->send_peer_notif) - event_bond->send_peer_notif--; - break; default: break; } @@ -3111,13 +3195,18 @@ static int bond_slave_netdev_event(unsigned long event, case NETDEV_CHANGE: /* For 802.3ad mode only: * Getting invalid Speed/Duplex values here will put slave - * in weird state. So mark it as link-down for the time - * being and let link-monitoring (miimon) set it right when - * correct speeds/duplex are available. + * in weird state. Mark it as link-fail if the link was + * previously up or link-down if it hasn't yet come up, and + * let link-monitoring (miimon) set it right when correct + * speeds/duplex are available. */ if (bond_update_speed_duplex(slave) && - BOND_MODE(bond) == BOND_MODE_8023AD) - slave->link = BOND_LINK_DOWN; + BOND_MODE(bond) == BOND_MODE_8023AD) { + if (slave->last_link_up) + slave->link = BOND_LINK_FAIL; + else + slave->link = BOND_LINK_DOWN; + } if (BOND_MODE(bond) == BOND_MODE_8023AD) bond_3ad_adapter_speed_duplex_changed(slave); @@ -3202,8 +3291,12 @@ static int bond_netdev_event(struct notifier_block *this, return NOTIFY_DONE; if (event_dev->flags & IFF_MASTER) { + int ret; + netdev_dbg(event_dev, "IFF_MASTER\n"); - return bond_master_netdev_event(event, event_dev); + ret = bond_master_netdev_event(event, event_dev); + if (ret != NOTIFY_DONE) + return ret; } if (event_dev->flags & IFF_SLAVE) { @@ -3847,8 +3940,8 @@ static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); - struct iphdr *iph = ip_hdr(skb); struct slave *slave; + int slave_cnt; u32 slave_id; /* Start with the curr_active_slave that joined the bond as the @@ -3857,23 +3950,32 @@ static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb, * send the join/membership reports. The curr_active_slave found * will send all of this type of traffic. */ - if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) { - slave = rcu_dereference(bond->curr_active_slave); - if (slave) - bond_dev_queue_xmit(bond, skb, slave->dev); - else - bond_xmit_slave_id(bond, skb, 0); - } else { - int slave_cnt = READ_ONCE(bond->slave_cnt); + if (skb->protocol == htons(ETH_P_IP)) { + int noff = skb_network_offset(skb); + struct iphdr *iph; - if (likely(slave_cnt)) { - slave_id = bond_rr_gen_slave_id(bond); - bond_xmit_slave_id(bond, skb, slave_id % slave_cnt); - } else { - bond_tx_drop(bond_dev, skb); + if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph)))) + goto non_igmp; + + iph = ip_hdr(skb); + if (iph->protocol == IPPROTO_IGMP) { + slave = rcu_dereference(bond->curr_active_slave); + if (slave) + bond_dev_queue_xmit(bond, skb, slave->dev); + else + bond_xmit_slave_id(bond, skb, 0); + return NETDEV_TX_OK; } } +non_igmp: + slave_cnt = READ_ONCE(bond->slave_cnt); + if (likely(slave_cnt)) { + slave_id = bond_rr_gen_slave_id(bond); + bond_xmit_slave_id(bond, skb, slave_id % slave_cnt); + } else { + bond_tx_drop(bond_dev, skb); + } return NETDEV_TX_OK; } @@ -4008,7 +4110,7 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave) * this to-be-skipped slave to send a packet out. */ old_arr = rtnl_dereference(bond->slave_arr); - for (idx = 0; idx < old_arr->count; idx++) { + for (idx = 0; old_arr != NULL && idx < old_arr->count; idx++) { if (skipslave == old_arr->arr[idx]) { old_arr->arr[idx] = old_arr->arr[old_arr->count-1]; @@ -4020,6 +4122,39 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave) return ret; } +/* Check whether the skb is arp or nd msg */ +static inline bool skb_is_arp_or_nd(struct sk_buff *skb) +{ + switch (ntohs(skb->protocol)) { + case ETH_P_ARP: + return true; + case ETH_P_IPV6: + if (pskb_may_pull(skb, sizeof(struct ipv6hdr) + + sizeof(struct nd_msg))) { + struct ipv6hdr *hdr = ipv6_hdr(skb); + u8 nexthdr = hdr->nexthdr; + struct icmp6hdr *icmp6; + + if (nexthdr == IPPROTO_ICMPV6) { + icmp6 = icmp6_hdr(skb); + + if ((icmp6->icmp6_type == + NDISC_NEIGHBOUR_SOLICITATION || + icmp6->icmp6_type == + NDISC_NEIGHBOUR_ADVERTISEMENT) && + icmp6->icmp6_code == 0) { + return true; + } + } + } + } + + return false; +} + +static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb, + struct net_device *bond_dev); + /* Use this Xmit function for 3AD as well as XOR modes. The current * usable slave array is formed in the control path. The xmit function * just calculates hash and sends the packet out. @@ -4032,6 +4167,10 @@ static netdev_tx_t bond_3ad_xor_xmit(struct sk_buff *skb, struct bond_up_slave *slaves; unsigned int count; + /* Broadcast to all slaves. */ + if (sysctl_bond_broadcast_arp_or_nd && skb_is_arp_or_nd(skb)) + return bond_xmit_broadcast(skb, dev); + slaves = rcu_dereference(bond->slave_arr); count = slaves ? READ_ONCE(slaves->count) : 0; if (likely(count)) { @@ -4177,13 +4316,23 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) return ret; } +static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed) +{ + if (speed == 0 || speed == SPEED_UNKNOWN) + speed = slave->speed; + else + speed = min(speed, slave->speed); + + return speed; +} + static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev, struct ethtool_link_ksettings *cmd) { struct bonding *bond = netdev_priv(bond_dev); - unsigned long speed = 0; struct list_head *iter; struct slave *slave; + u32 speed = 0; cmd->base.duplex = DUPLEX_UNKNOWN; cmd->base.port = PORT_OTHER; @@ -4195,8 +4344,13 @@ static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev, */ bond_for_each_slave(bond, slave, iter) { if (bond_slave_can_tx(slave)) { - if (slave->speed != SPEED_UNKNOWN) - speed += slave->speed; + if (slave->speed != SPEED_UNKNOWN) { + if (BOND_MODE(bond) == BOND_MODE_BROADCAST) + speed = bond_mode_bcast_speed(slave, + speed); + else + speed += slave->speed; + } if (cmd->base.duplex == DUPLEX_UNKNOWN && slave->duplex != DUPLEX_UNKNOWN) cmd->base.duplex = slave->duplex; @@ -4302,12 +4456,14 @@ void bond_setup(struct net_device *bond_dev) bond_dev->features |= NETIF_F_NETNS_LOCAL; bond_dev->hw_features = BOND_VLAN_FEATURES | - NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER; + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_FILTER; bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4; bond_dev->features |= bond_dev->hw_features; + bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; } /* Destroy a bonding device. @@ -4689,6 +4845,7 @@ static int bond_check_params(struct bond_params *params) params->arp_all_targets = arp_all_targets_value; params->updelay = updelay; params->downdelay = downdelay; + params->peer_notif_delay = 0; params->use_carrier = use_carrier; params->lacp_fast = lacp_fast; params->primary[0] = 0; @@ -4794,15 +4951,19 @@ int bond_create(struct net *net, const char *name) bond_dev->rtnl_link_ops = &bond_link_ops; res = register_netdevice(bond_dev); + if (res < 0) { + free_netdev(bond_dev); + rtnl_unlock(); + + return res; + } netif_carrier_off(bond_dev); bond_work_init_all(bond); rtnl_unlock(); - if (res < 0) - free_netdev(bond_dev); - return res; + return 0; } static int __net_init bond_net_init(struct net *net) @@ -4863,6 +5024,7 @@ static int __init bonding_init(void) goto err_link; bond_create_debugfs(); + bond_create_sysctl(); for (i = 0; i < max_bonds; i++) { res = bond_create(&init_net, NULL); @@ -4875,6 +5037,7 @@ static int __init bonding_init(void) return res; err: bond_destroy_debugfs(); + bond_destroy_sysctl(); bond_netlink_fini(); err_link: unregister_pernet_subsys(&bond_net_ops); @@ -4887,6 +5050,7 @@ static void __exit bonding_exit(void) unregister_netdevice_notifier(&bond_netdev_notifier); bond_destroy_debugfs(); + bond_destroy_sysctl(); bond_netlink_fini(); unregister_pernet_subsys(&bond_net_ops); diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c index 9697977b80f040c3fde59037e2caabe0cdfaa79f..d89aba932b8ea65e08b98f4fafef472d0d824bf0 100644 --- a/drivers/net/bonding/bond_netlink.c +++ b/drivers/net/bonding/bond_netlink.c @@ -112,6 +112,7 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = { [IFLA_BOND_AD_ACTOR_SYSTEM] = { .type = NLA_BINARY, .len = ETH_ALEN }, [IFLA_BOND_TLB_DYNAMIC_LB] = { .type = NLA_U8 }, + [IFLA_BOND_PEER_NOTIF_DELAY] = { .type = NLA_U32 }, }; static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = { @@ -219,6 +220,14 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[], if (err) return err; } + if (data[IFLA_BOND_PEER_NOTIF_DELAY]) { + int delay = nla_get_u32(data[IFLA_BOND_PEER_NOTIF_DELAY]); + + bond_opt_initval(&newval, delay); + err = __bond_opt_set(bond, BOND_OPT_PEER_NOTIF_DELAY, &newval); + if (err) + return err; + } if (data[IFLA_BOND_USE_CARRIER]) { int use_carrier = nla_get_u8(data[IFLA_BOND_USE_CARRIER]); @@ -451,11 +460,10 @@ static int bond_newlink(struct net *src_net, struct net_device *bond_dev, return err; err = register_netdevice(bond_dev); - - netif_carrier_off(bond_dev); if (!err) { struct bonding *bond = netdev_priv(bond_dev); + netif_carrier_off(bond_dev); bond_work_init_all(bond); } @@ -498,6 +506,7 @@ static size_t bond_get_size(const struct net_device *bond_dev) nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_USER_PORT_KEY */ nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_ACTOR_SYSTEM */ nla_total_size(sizeof(u8)) + /* IFLA_BOND_TLB_DYNAMIC_LB */ + nla_total_size(sizeof(u32)) + /* IFLA_BOND_PEER_NOTIF_DELAY */ 0; } @@ -540,6 +549,10 @@ static int bond_fill_info(struct sk_buff *skb, bond->params.downdelay * bond->params.miimon)) goto nla_put_failure; + if (nla_put_u32(skb, IFLA_BOND_PEER_NOTIF_DELAY, + bond->params.peer_notif_delay * bond->params.miimon)) + goto nla_put_failure; + if (nla_put_u8(skb, IFLA_BOND_USE_CARRIER, bond->params.use_carrier)) goto nla_put_failure; @@ -638,8 +651,7 @@ static int bond_fill_info(struct sk_buff *skb, goto nla_put_failure; if (nla_put(skb, IFLA_BOND_AD_ACTOR_SYSTEM, - sizeof(bond->params.ad_actor_system), - &bond->params.ad_actor_system)) + ETH_ALEN, &bond->params.ad_actor_system)) goto nla_put_failure; } if (!bond_3ad_get_active_agg_info(bond, &info)) { diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 4d5d01cb8141b89b18cf9d7affd8049a42c19e3b..8f44f80a878132de62ae8d8b0710936e18d4944b 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -28,6 +28,8 @@ static int bond_option_updelay_set(struct bonding *bond, const struct bond_opt_value *newval); static int bond_option_downdelay_set(struct bonding *bond, const struct bond_opt_value *newval); +static int bond_option_peer_notif_delay_set(struct bonding *bond, + const struct bond_opt_value *newval); static int bond_option_use_carrier_set(struct bonding *bond, const struct bond_opt_value *newval); static int bond_option_arp_interval_set(struct bonding *bond, @@ -428,6 +430,13 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = { .desc = "Number of peer notifications to send on failover event", .values = bond_num_peer_notif_tbl, .set = bond_option_num_peer_notif_set + }, + [BOND_OPT_PEER_NOTIF_DELAY] = { + .id = BOND_OPT_PEER_NOTIF_DELAY, + .name = "peer_notif_delay", + .desc = "Delay between each peer notification on failover event, in milliseconds", + .values = bond_intmax_tbl, + .set = bond_option_peer_notif_delay_set } }; @@ -850,6 +859,9 @@ static int bond_option_miimon_set(struct bonding *bond, if (bond->params.downdelay) netdev_dbg(bond->dev, "Note: Updating downdelay (to %d) since it is a multiple of the miimon value\n", bond->params.downdelay * bond->params.miimon); + if (bond->params.peer_notif_delay) + netdev_dbg(bond->dev, "Note: Updating peer_notif_delay (to %d) since it is a multiple of the miimon value\n", + bond->params.peer_notif_delay * bond->params.miimon); if (newval->value && bond->params.arp_interval) { netdev_dbg(bond->dev, "MII monitoring cannot be used with ARP monitoring - disabling ARP monitoring...\n"); bond->params.arp_interval = 0; @@ -873,52 +885,59 @@ static int bond_option_miimon_set(struct bonding *bond, return 0; } -/* Set up and down delays. These must be multiples of the - * MII monitoring value, and are stored internally as the multiplier. - * Thus, we must translate to MS for the real world. +/* Set up, down and peer notification delays. These must be multiples + * of the MII monitoring value, and are stored internally as the + * multiplier. Thus, we must translate to MS for the real world. */ -static int bond_option_updelay_set(struct bonding *bond, - const struct bond_opt_value *newval) +static int _bond_option_delay_set(struct bonding *bond, + const struct bond_opt_value *newval, + const char *name, + int *target) { int value = newval->value; if (!bond->params.miimon) { - netdev_err(bond->dev, "Unable to set up delay as MII monitoring is disabled\n"); + netdev_err(bond->dev, "Unable to set %s as MII monitoring is disabled\n", + name); return -EPERM; } if ((value % bond->params.miimon) != 0) { - netdev_warn(bond->dev, "up delay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n", + netdev_warn(bond->dev, + "%s (%d) is not a multiple of miimon (%d), value rounded to %d ms\n", + name, value, bond->params.miimon, (value / bond->params.miimon) * bond->params.miimon); } - bond->params.updelay = value / bond->params.miimon; - netdev_dbg(bond->dev, "Setting up delay to %d\n", - bond->params.updelay * bond->params.miimon); + *target = value / bond->params.miimon; + netdev_dbg(bond->dev, "Setting %s to %d\n", + name, + *target * bond->params.miimon); return 0; } +static int bond_option_updelay_set(struct bonding *bond, + const struct bond_opt_value *newval) +{ + return _bond_option_delay_set(bond, newval, "up delay", + &bond->params.updelay); +} + static int bond_option_downdelay_set(struct bonding *bond, const struct bond_opt_value *newval) { - int value = newval->value; - - if (!bond->params.miimon) { - netdev_err(bond->dev, "Unable to set down delay as MII monitoring is disabled\n"); - return -EPERM; - } - if ((value % bond->params.miimon) != 0) { - netdev_warn(bond->dev, "down delay (%d) is not a multiple of miimon (%d), delay rounded to %d ms\n", - value, bond->params.miimon, - (value / bond->params.miimon) * - bond->params.miimon); - } - bond->params.downdelay = value / bond->params.miimon; - netdev_dbg(bond->dev, "Setting down delay to %d\n", - bond->params.downdelay * bond->params.miimon); + return _bond_option_delay_set(bond, newval, "down delay", + &bond->params.downdelay); +} - return 0; +static int bond_option_peer_notif_delay_set(struct bonding *bond, + const struct bond_opt_value *newval) +{ + int ret = _bond_option_delay_set(bond, newval, + "peer notification delay", + &bond->params.peer_notif_delay); + return ret; } static int bond_option_use_carrier_set(struct bonding *bond, @@ -1074,9 +1093,9 @@ static int bond_option_arp_ip_targets_set(struct bonding *bond, __be32 target; if (newval->string) { - if (!in4_pton(newval->string+1, -1, (u8 *)&target, -1, NULL)) { - netdev_err(bond->dev, "invalid ARP target %pI4 specified\n", - &target); + if (strlen(newval->string) < 1 || + !in4_pton(newval->string + 1, -1, (u8 *)&target, -1, NULL)) { + netdev_err(bond->dev, "invalid ARP target specified\n"); return ret; } if (newval->string[0] == '+') @@ -1098,13 +1117,6 @@ static int bond_option_arp_validate_set(struct bonding *bond, { netdev_dbg(bond->dev, "Setting arp_validate to %s (%llu)\n", newval->string, newval->value); - - if (bond->dev->flags & IFF_UP) { - if (!newval->value) - bond->recv_probe = NULL; - else if (bond->params.arp_interval) - bond->recv_probe = bond_arp_rcv; - } bond->params.arp_validate = newval->value; return 0; @@ -1446,7 +1458,7 @@ static int bond_option_ad_actor_system_set(struct bonding *bond, mac = (u8 *)&newval->value; } - if (!is_valid_ether_addr(mac)) + if (is_multicast_ether_addr(mac)) goto err; netdev_dbg(bond->dev, "Setting ad_actor_system to %pM\n", mac); diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c index 9f7d83e827c37361d9856c5124655f157bf7ef49..fd5c9cbe45b1be1af68252ac48aa68bd52004df3 100644 --- a/drivers/net/bonding/bond_procfs.c +++ b/drivers/net/bonding/bond_procfs.c @@ -104,6 +104,8 @@ static void bond_info_show_master(struct seq_file *seq) bond->params.updelay * bond->params.miimon); seq_printf(seq, "Down Delay (ms): %d\n", bond->params.downdelay * bond->params.miimon); + seq_printf(seq, "Peer Notification Delay (ms): %d\n", + bond->params.peer_notif_delay * bond->params.miimon); /* ARP information */ diff --git a/drivers/net/bonding/bond_sysctl.c b/drivers/net/bonding/bond_sysctl.c new file mode 100644 index 0000000000000000000000000000000000000000..17404d37a2fd9dca0999c360817bce908aef382c --- /dev/null +++ b/drivers/net/bonding/bond_sysctl.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +int sysctl_bond_broadcast_arp_or_nd __read_mostly; +EXPORT_SYMBOL(sysctl_bond_broadcast_arp_or_nd); + +struct ctl_table_header *bond_broadcast_arp_or_nd_table_header; + +static struct ctl_table bond_broadcast_arp_or_nd_table[] = { + { + .procname = "broadcast_arp_or_nd", + .data = &sysctl_bond_broadcast_arp_or_nd, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + {} +}; + +void bond_create_sysctl(void) +{ + bond_broadcast_arp_or_nd_table_header = + register_net_sysctl(&init_net, "net/bonding", + bond_broadcast_arp_or_nd_table); +} + +void bond_destroy_sysctl(void) +{ + unregister_net_sysctl_table(bond_broadcast_arp_or_nd_table_header); +} diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 35847250da5aa935a0286b1b28396813dfcc0ee2..0ecafe115f32c829fd6f36fe63fd88a2adddf60d 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -343,6 +343,18 @@ static ssize_t bonding_show_updelay(struct device *d, static DEVICE_ATTR(updelay, 0644, bonding_show_updelay, bonding_sysfs_store_option); +static ssize_t bonding_show_peer_notif_delay(struct device *d, + struct device_attribute *attr, + char *buf) +{ + struct bonding *bond = to_bond(d); + + return sprintf(buf, "%d\n", + bond->params.peer_notif_delay * bond->params.miimon); +} +static DEVICE_ATTR(peer_notif_delay, 0644, + bonding_show_peer_notif_delay, bonding_sysfs_store_option); + /* Show the LACP interval. */ static ssize_t bonding_show_lacp(struct device *d, struct device_attribute *attr, @@ -734,6 +746,7 @@ static struct attribute *per_bond_attrs[] = { &dev_attr_arp_ip_target.attr, &dev_attr_downdelay.attr, &dev_attr_updelay.attr, + &dev_attr_peer_notif_delay.attr, &dev_attr_lacp_rate.attr, &dev_attr_ad_select.attr, &dev_attr_xmit_hash_policy.attr, diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c index 2f120b2ffef0cfd7d97f6a901f9552fcc58288df..1bc20de8e57be081eb59f39dea9925da31561ba2 100644 --- a/drivers/net/bonding/bond_sysfs_slave.c +++ b/drivers/net/bonding/bond_sysfs_slave.c @@ -55,7 +55,9 @@ static SLAVE_ATTR_RO(link_failure_count); static ssize_t perm_hwaddr_show(struct slave *slave, char *buf) { - return sprintf(buf, "%pM\n", slave->perm_hwaddr); + return sprintf(buf, "%*phC\n", + slave->dev->addr_len, + slave->perm_hwaddr); } static SLAVE_ATTR_RO(perm_hwaddr); @@ -110,20 +112,19 @@ static ssize_t ad_partner_oper_port_state_show(struct slave *slave, char *buf) } static SLAVE_ATTR_RO(ad_partner_oper_port_state); -static const struct slave_attribute *slave_attrs[] = { - &slave_attr_state, - &slave_attr_mii_status, - &slave_attr_link_failure_count, - &slave_attr_perm_hwaddr, - &slave_attr_queue_id, - &slave_attr_ad_aggregator_id, - &slave_attr_ad_actor_oper_port_state, - &slave_attr_ad_partner_oper_port_state, +static const struct attribute *slave_attrs[] = { + &slave_attr_state.attr, + &slave_attr_mii_status.attr, + &slave_attr_link_failure_count.attr, + &slave_attr_perm_hwaddr.attr, + &slave_attr_queue_id.attr, + &slave_attr_ad_aggregator_id.attr, + &slave_attr_ad_actor_oper_port_state.attr, + &slave_attr_ad_partner_oper_port_state.attr, NULL }; #define to_slave_attr(_at) container_of(_at, struct slave_attribute, attr) -#define to_slave(obj) container_of(obj, struct slave, kobj) static ssize_t slave_show(struct kobject *kobj, struct attribute *attr, char *buf) @@ -134,43 +135,16 @@ static ssize_t slave_show(struct kobject *kobj, return slave_attr->show(slave, buf); } -static const struct sysfs_ops slave_sysfs_ops = { +const struct sysfs_ops slave_sysfs_ops = { .show = slave_show, }; -static struct kobj_type slave_ktype = { -#ifdef CONFIG_SYSFS - .sysfs_ops = &slave_sysfs_ops, -#endif -}; - int bond_sysfs_slave_add(struct slave *slave) { - const struct slave_attribute **a; - int err; - - err = kobject_init_and_add(&slave->kobj, &slave_ktype, - &(slave->dev->dev.kobj), "bonding_slave"); - if (err) - return err; - - for (a = slave_attrs; *a; ++a) { - err = sysfs_create_file(&slave->kobj, &((*a)->attr)); - if (err) { - kobject_put(&slave->kobj); - return err; - } - } - - return 0; + return sysfs_create_files(&slave->kobj, slave_attrs); } void bond_sysfs_slave_del(struct slave *slave) { - const struct slave_attribute **a; - - for (a = slave_attrs; *a; ++a) - sysfs_remove_file(&slave->kobj, &((*a)->attr)); - - kobject_put(&slave->kobj); + sysfs_remove_files(&slave->kobj, slave_attrs); } diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c index 433a14b9f731bc5a71ac431b2eb71274c31247da..253a1bbe37e8bd48caf8b59ea8de8a6d3797830e 100644 --- a/drivers/net/caif/caif_hsi.c +++ b/drivers/net/caif/caif_hsi.c @@ -1455,7 +1455,7 @@ static void __exit cfhsi_exit_module(void) rtnl_lock(); list_for_each_safe(list_node, n, &cfhsi_list) { cfhsi = list_entry(list_node, struct cfhsi, list); - unregister_netdev(cfhsi->ndev); + unregister_netdevice(cfhsi->ndev); } rtnl_unlock(); } diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 7cdd0cead693dc5d78565f5fd988c6c25c7a2740..ee84f63e6e3138531940c24431dd2eb4fbab97ac 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -158,6 +158,7 @@ source "drivers/net/can/ifi_canfd/Kconfig" source "drivers/net/can/m_can/Kconfig" source "drivers/net/can/mscan/Kconfig" source "drivers/net/can/peak_canfd/Kconfig" +source "drivers/net/can/phytium/Kconfig" source "drivers/net/can/rcar/Kconfig" source "drivers/net/can/sja1000/Kconfig" source "drivers/net/can/softing/Kconfig" diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile index 44922bf29b6a0653ea1eda6ad96c30b3a645fbaa..deea03f1deada75f749d003808cf631043488df7 100644 --- a/drivers/net/can/Makefile +++ b/drivers/net/can/Makefile @@ -7,12 +7,7 @@ obj-$(CONFIG_CAN_VCAN) += vcan.o obj-$(CONFIG_CAN_VXCAN) += vxcan.o obj-$(CONFIG_CAN_SLCAN) += slcan.o -obj-$(CONFIG_CAN_DEV) += can-dev.o -can-dev-y += dev.o -can-dev-y += rx-offload.o - -can-dev-$(CONFIG_CAN_LEDS) += led.o - +obj-y += dev/ obj-y += rcar/ obj-y += spi/ obj-y += usb/ @@ -33,5 +28,5 @@ obj-$(CONFIG_CAN_SUN4I) += sun4i_can.o obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o obj-$(CONFIG_CAN_XILINXCAN) += xilinx_can.o obj-$(CONFIG_PCH_CAN) += pch_can.o - +obj-$(CONFIG_CAN_PHYTIUM) += phytium/ subdir-ccflags-$(CONFIG_CAN_DEBUG_DEVICES) += -DDEBUG diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index 606b7d8ffe138f880bfc830eff10b84ba6a01cfe..24c6015f6c92bcb4bc3e604d78562010d49dc4fd 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c @@ -52,6 +52,7 @@ #define CONTROL_EX_PDR BIT(8) /* control register */ +#define CONTROL_SWR BIT(15) #define CONTROL_TEST BIT(7) #define CONTROL_CCE BIT(6) #define CONTROL_DISABLE_AR BIT(5) @@ -97,6 +98,9 @@ #define BTR_TSEG2_SHIFT 12 #define BTR_TSEG2_MASK (0x7 << BTR_TSEG2_SHIFT) +/* interrupt register */ +#define INT_STS_PENDING 0x8000 + /* brp extension register */ #define BRP_EXT_BRPE_MASK 0x0f #define BRP_EXT_BRPE_SHIFT 0 @@ -569,6 +573,26 @@ static void c_can_configure_msg_objects(struct net_device *dev) IF_MCONT_RCV_EOB); } +static int c_can_software_reset(struct net_device *dev) +{ + struct c_can_priv *priv = netdev_priv(dev); + int retry = 0; + + if (priv->type != BOSCH_D_CAN) + return 0; + + priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_SWR | CONTROL_INIT); + while (priv->read_reg(priv, C_CAN_CTRL_REG) & CONTROL_SWR) { + msleep(20); + if (retry++ > 100) { + netdev_err(dev, "CCTRL: software reset failed\n"); + return -EIO; + } + } + + return 0; +} + /* * Configure C_CAN chip: * - enable/disable auto-retransmission @@ -578,6 +602,11 @@ static void c_can_configure_msg_objects(struct net_device *dev) static int c_can_chip_config(struct net_device *dev) { struct c_can_priv *priv = netdev_priv(dev); + int err; + + err = c_can_software_reset(dev); + if (err) + return err; /* enable automatic retransmission */ priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_ENABLE_AR); @@ -1029,10 +1058,16 @@ static int c_can_poll(struct napi_struct *napi, int quota) u16 curr, last = priv->last_status; int work_done = 0; - priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG); - /* Ack status on C_CAN. D_CAN is self clearing */ - if (priv->type != BOSCH_D_CAN) - priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED); + /* Only read the status register if a status interrupt was pending */ + if (atomic_xchg(&priv->sie_pending, 0)) { + priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG); + /* Ack status on C_CAN. D_CAN is self clearing */ + if (priv->type != BOSCH_D_CAN) + priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED); + } else { + /* no change detected ... */ + curr = last; + } /* handle state changes */ if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) { @@ -1083,10 +1118,16 @@ static irqreturn_t c_can_isr(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; struct c_can_priv *priv = netdev_priv(dev); + int reg_int; - if (!priv->read_reg(priv, C_CAN_INT_REG)) + reg_int = priv->read_reg(priv, C_CAN_INT_REG); + if (!reg_int) return IRQ_NONE; + /* save for later use */ + if (reg_int & INT_STS_PENDING) + atomic_set(&priv->sie_pending, 1); + /* disable all interrupts and schedule the NAPI */ c_can_irq_control(priv, false); napi_schedule(&priv->napi); diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h index 8acdc7fa4792f24438cb0cd84d73b44f0c46f7d8..d5567a7c1c6d4652a04f8135b381387e3a2e5304 100644 --- a/drivers/net/can/c_can/c_can.h +++ b/drivers/net/can/c_can/c_can.h @@ -198,6 +198,7 @@ struct c_can_priv { struct net_device *dev; struct device *device; atomic_t tx_active; + atomic_t sie_pending; unsigned long tx_dir; int last_status; u16 (*read_reg) (const struct c_can_priv *priv, enum reg index); diff --git a/drivers/net/can/dev/Makefile b/drivers/net/can/dev/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..cba92e6bcf6f58a68006e5115ae74a5402e9e691 --- /dev/null +++ b/drivers/net/can/dev/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_CAN_DEV) += can-dev.o +can-dev-y += dev.o +can-dev-y += rx-offload.o + +can-dev-$(CONFIG_CAN_LEDS) += led.o diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev/dev.c similarity index 95% rename from drivers/net/can/dev.c rename to drivers/net/can/dev/dev.c index 49163570a63afad2e36777993a57319370d6c8b0..5667f1ebd8e7230c422e1aab73179cad62fff407 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev/dev.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -446,7 +447,11 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, { struct can_priv *priv = netdev_priv(dev); - BUG_ON(idx >= priv->echo_skb_max); + if (idx >= priv->echo_skb_max) { + netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n", + __func__, idx, priv->echo_skb_max); + return; + } /* check flag whether this packet has to be looped back */ if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK || @@ -477,6 +482,33 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, } EXPORT_SYMBOL_GPL(can_put_echo_skb); +struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr) +{ + struct can_priv *priv = netdev_priv(dev); + + if (idx >= priv->echo_skb_max) { + netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n", + __func__, idx, priv->echo_skb_max); + return NULL; + } + + if (priv->echo_skb[idx]) { + /* Using "struct canfd_frame::len" for the frame + * length is supported on both CAN and CANFD frames. + */ + struct sk_buff *skb = priv->echo_skb[idx]; + struct canfd_frame *cf = (struct canfd_frame *)skb->data; + u8 len = cf->len; + + *len_ptr = len; + priv->echo_skb[idx] = NULL; + + return skb; + } + + return NULL; +} + /* * Get the skb from the stack and loop it back locally * @@ -486,22 +518,16 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb); */ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx) { - struct can_priv *priv = netdev_priv(dev); - - BUG_ON(idx >= priv->echo_skb_max); - - if (priv->echo_skb[idx]) { - struct sk_buff *skb = priv->echo_skb[idx]; - struct can_frame *cf = (struct can_frame *)skb->data; - u8 dlc = cf->can_dlc; + struct sk_buff *skb; + u8 len; - netif_rx(priv->echo_skb[idx]); - priv->echo_skb[idx] = NULL; + skb = __can_get_echo_skb(dev, idx, &len); + if (!skb) + return 0; - return dlc; - } + netif_rx(skb); - return 0; + return len; } EXPORT_SYMBOL_GPL(can_get_echo_skb); @@ -534,7 +560,8 @@ static void can_restart(struct net_device *dev) struct can_frame *cf; int err; - BUG_ON(netif_carrier_ok(dev)); + if (netif_carrier_ok(dev)) + netdev_err(dev, "Attempt to restart for bus-off recovery, but carrier is OK?\n"); /* * No synchronization needed because the device is bus-off and @@ -560,11 +587,12 @@ static void can_restart(struct net_device *dev) priv->can_stats.restarts++; /* Now restart the device */ - err = priv->do_set_mode(dev, CAN_MODE_START); - netif_carrier_on(dev); - if (err) + err = priv->do_set_mode(dev, CAN_MODE_START); + if (err) { netdev_err(dev, "Error %d during restart", err); + netif_carrier_off(dev); + } } static void can_restart_work(struct work_struct *work) @@ -708,11 +736,24 @@ struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max, struct can_priv *priv; int size; + /* We put the driver's priv, the CAN mid layer priv and the + * echo skb into the netdevice's priv. The memory layout for + * the netdev_priv is like this: + * + * +-------------------------+ + * | driver's priv | + * +-------------------------+ + * | struct can_ml_priv | + * +-------------------------+ + * | array of struct sk_buff | + * +-------------------------+ + */ + + size = ALIGN(sizeof_priv, NETDEV_ALIGN) + sizeof(struct can_ml_priv); + if (echo_skb_max) - size = ALIGN(sizeof_priv, sizeof(struct sk_buff *)) + + size = ALIGN(size, sizeof(struct sk_buff *)) + echo_skb_max * sizeof(struct sk_buff *); - else - size = sizeof_priv; dev = alloc_netdev_mqs(size, "can%d", NET_NAME_UNKNOWN, can_setup, txqs, rxqs); @@ -722,10 +763,12 @@ struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max, priv = netdev_priv(dev); priv->dev = dev; + dev->ml_priv = (void *)priv + ALIGN(sizeof_priv, NETDEV_ALIGN); + if (echo_skb_max) { priv->echo_skb_max = echo_skb_max; priv->echo_skb = (void *)priv + - ALIGN(sizeof_priv, sizeof(struct sk_buff *)); + (size - echo_skb_max * sizeof(struct sk_buff *)); } priv->state = CAN_STATE_STOPPED; @@ -832,6 +875,7 @@ void of_can_transceiver(struct net_device *dev) return; ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max); + of_node_put(dn); if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max)) netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n"); } @@ -1239,6 +1283,8 @@ int register_candev(struct net_device *dev) return -EINVAL; dev->rtnl_link_ops = &can_link_ops; + netif_carrier_off(dev); + return register_netdev(dev); } EXPORT_SYMBOL_GPL(register_candev); diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/dev/rx-offload.c similarity index 66% rename from drivers/net/can/rx-offload.c rename to drivers/net/can/dev/rx-offload.c index d94dae21682093112ce2037d5672f0a5779d470e..5f7e97d54733c1064583703cd5b90c1f2b0f95d5 100644 --- a/drivers/net/can/rx-offload.c +++ b/drivers/net/can/dev/rx-offload.c @@ -116,37 +116,95 @@ static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b) return cb_b->timestamp - cb_a->timestamp; } -static struct sk_buff *can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n) +/** + * can_rx_offload_offload_one() - Read one CAN frame from HW + * @offload: pointer to rx_offload context + * @n: number of mailbox to read + * + * The task of this function is to read a CAN frame from mailbox @n + * from the device and return the mailbox's content as a struct + * sk_buff. + * + * If the struct can_rx_offload::skb_queue exceeds the maximal queue + * length (struct can_rx_offload::skb_queue_len_max) or no skb can be + * allocated, the mailbox contents is discarded by reading it into an + * overflow buffer. This way the mailbox is marked as free by the + * driver. + * + * Return: A pointer to skb containing the CAN frame on success. + * + * NULL if the mailbox @n is empty. + * + * ERR_PTR() in case of an error + */ +static struct sk_buff * +can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n) { - struct sk_buff *skb = NULL; + struct sk_buff *skb = NULL, *skb_error = NULL; struct can_rx_offload_cb *cb; struct can_frame *cf; int ret; - /* If queue is full or skb not available, read to discard mailbox */ - if (likely(skb_queue_len(&offload->skb_queue) <= - offload->skb_queue_len_max)) + if (likely(skb_queue_len(&offload->skb_queue) < + offload->skb_queue_len_max)) { skb = alloc_can_skb(offload->dev, &cf); + if (unlikely(!skb)) + skb_error = ERR_PTR(-ENOMEM); /* skb alloc failed */ + } else { + skb_error = ERR_PTR(-ENOBUFS); /* skb_queue is full */ + } - if (!skb) { + /* If queue is full or skb not available, drop by reading into + * overflow buffer. + */ + if (unlikely(skb_error)) { struct can_frame cf_overflow; u32 timestamp; ret = offload->mailbox_read(offload, &cf_overflow, ×tamp, n); - if (ret) - offload->dev->stats.rx_dropped++; - return NULL; + /* Mailbox was empty. */ + if (unlikely(!ret)) + return NULL; + + /* Mailbox has been read and we're dropping it or + * there was a problem reading the mailbox. + * + * Increment error counters in any case. + */ + offload->dev->stats.rx_dropped++; + offload->dev->stats.rx_fifo_errors++; + + /* There was a problem reading the mailbox, propagate + * error value. + */ + if (unlikely(ret < 0)) + return ERR_PTR(ret); + + return skb_error; } cb = can_rx_offload_get_cb(skb); ret = offload->mailbox_read(offload, cf, &cb->timestamp, n); - if (!ret) { + + /* Mailbox was empty. */ + if (unlikely(!ret)) { kfree_skb(skb); return NULL; } + /* There was a problem reading the mailbox, propagate error value. */ + if (unlikely(ret < 0)) { + kfree_skb(skb); + + offload->dev->stats.rx_dropped++; + offload->dev->stats.rx_fifo_errors++; + + return ERR_PTR(ret); + } + + /* Mailbox was read. */ return skb; } @@ -166,8 +224,8 @@ int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pen continue; skb = can_rx_offload_offload_one(offload, i); - if (!skb) - break; + if (IS_ERR_OR_NULL(skb)) + continue; __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare); } @@ -197,7 +255,13 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload) struct sk_buff *skb; int received = 0; - while ((skb = can_rx_offload_offload_one(offload, 0))) { + while (1) { + skb = can_rx_offload_offload_one(offload, 0); + if (IS_ERR(skb)) + continue; + if (!skb) + break; + skb_queue_tail(&offload->skb_queue, skb); received++; } @@ -209,18 +273,69 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload) } EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo); -int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb) +int can_rx_offload_queue_sorted(struct can_rx_offload *offload, + struct sk_buff *skb, u32 timestamp) +{ + struct can_rx_offload_cb *cb; + unsigned long flags; + + if (skb_queue_len(&offload->skb_queue) > + offload->skb_queue_len_max) { + kfree_skb(skb); + return -ENOBUFS; + } + + cb = can_rx_offload_get_cb(skb); + cb->timestamp = timestamp; + + spin_lock_irqsave(&offload->skb_queue.lock, flags); + __skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare); + spin_unlock_irqrestore(&offload->skb_queue.lock, flags); + + can_rx_offload_schedule(offload); + + return 0; +} +EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted); + +unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload, + unsigned int idx, u32 timestamp) +{ + struct net_device *dev = offload->dev; + struct net_device_stats *stats = &dev->stats; + struct sk_buff *skb; + u8 len; + int err; + + skb = __can_get_echo_skb(dev, idx, &len); + if (!skb) + return 0; + + err = can_rx_offload_queue_sorted(offload, skb, timestamp); + if (err) { + stats->rx_errors++; + stats->tx_fifo_errors++; + } + + return len; +} +EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb); + +int can_rx_offload_queue_tail(struct can_rx_offload *offload, + struct sk_buff *skb) { if (skb_queue_len(&offload->skb_queue) > - offload->skb_queue_len_max) - return -ENOMEM; + offload->skb_queue_len_max) { + kfree_skb(skb); + return -ENOBUFS; + } skb_queue_tail(&offload->skb_queue, skb); can_rx_offload_schedule(offload); return 0; } -EXPORT_SYMBOL_GPL(can_rx_offload_irq_queue_err_skb); +EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail); static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight) { diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 8e972ef0863769e88a2c9d6cec37408d66566292..bfe13c6627bed6af9b5b0618b4fa4e468859fd82 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -135,13 +135,12 @@ /* FLEXCAN interrupt flag register (IFLAG) bits */ /* Errata ERR005829 step7: Reserve first valid MB */ -#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO 8 -#define FLEXCAN_TX_MB_OFF_FIFO 9 +#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO 8 #define FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP 0 -#define FLEXCAN_TX_MB_OFF_TIMESTAMP 1 -#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST (FLEXCAN_TX_MB_OFF_TIMESTAMP + 1) -#define FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST 63 -#define FLEXCAN_IFLAG_MB(x) BIT(x) +#define FLEXCAN_TX_MB 63 +#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST (FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP + 1) +#define FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST (FLEXCAN_TX_MB - 1) +#define FLEXCAN_IFLAG_MB(x) BIT((x) & 0x1f) #define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7) #define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6) #define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE BIT(5) @@ -166,7 +165,7 @@ #define FLEXCAN_MB_CNT_LENGTH(x) (((x) & 0xf) << 16) #define FLEXCAN_MB_CNT_TIMESTAMP(x) ((x) & 0xffff) -#define FLEXCAN_TIMEOUT_US (50) +#define FLEXCAN_TIMEOUT_US (250) /* FLEXCAN hardware feature flags * @@ -259,9 +258,7 @@ struct flexcan_priv { struct can_rx_offload offload; struct flexcan_regs __iomem *regs; - struct flexcan_mb __iomem *tx_mb; struct flexcan_mb __iomem *tx_mb_reserved; - u8 tx_mb_idx; u32 reg_ctrl_default; u32 reg_imask1_default; u32 reg_imask2_default; @@ -515,6 +512,7 @@ static int flexcan_get_berr_counter(const struct net_device *dev, static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) { const struct flexcan_priv *priv = netdev_priv(dev); + struct flexcan_regs __iomem *regs = priv->regs; struct can_frame *cf = (struct can_frame *)skb->data; u32 can_id; u32 data; @@ -537,17 +535,17 @@ static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *de if (cf->can_dlc > 0) { data = be32_to_cpup((__be32 *)&cf->data[0]); - priv->write(data, &priv->tx_mb->data[0]); + priv->write(data, ®s->mb[FLEXCAN_TX_MB].data[0]); } if (cf->can_dlc > 4) { data = be32_to_cpup((__be32 *)&cf->data[4]); - priv->write(data, &priv->tx_mb->data[1]); + priv->write(data, ®s->mb[FLEXCAN_TX_MB].data[1]); } can_put_echo_skb(skb, dev, 0); - priv->write(can_id, &priv->tx_mb->can_id); - priv->write(ctrl, &priv->tx_mb->can_ctrl); + priv->write(can_id, ®s->mb[FLEXCAN_TX_MB].can_id); + priv->write(ctrl, ®s->mb[FLEXCAN_TX_MB].can_ctrl); /* Errata ERR005829 step8: * Write twice INACTIVE(0x8) code to first MB. @@ -563,9 +561,14 @@ static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *de static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr) { struct flexcan_priv *priv = netdev_priv(dev); + struct flexcan_regs __iomem *regs = priv->regs; struct sk_buff *skb; struct can_frame *cf; bool rx_errors = false, tx_errors = false; + u32 timestamp; + int err; + + timestamp = priv->read(®s->timer) << 16; skb = alloc_can_err_skb(dev, &cf); if (unlikely(!skb)) @@ -612,17 +615,24 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr) if (tx_errors) dev->stats.tx_errors++; - can_rx_offload_irq_queue_err_skb(&priv->offload, skb); + err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); + if (err) + dev->stats.rx_fifo_errors++; } static void flexcan_irq_state(struct net_device *dev, u32 reg_esr) { struct flexcan_priv *priv = netdev_priv(dev); + struct flexcan_regs __iomem *regs = priv->regs; struct sk_buff *skb; struct can_frame *cf; enum can_state new_state, rx_state, tx_state; int flt; struct can_berr_counter bec; + u32 timestamp; + int err; + + timestamp = priv->read(®s->timer) << 16; flt = reg_esr & FLEXCAN_ESR_FLT_CONF_MASK; if (likely(flt == FLEXCAN_ESR_FLT_CONF_ACTIVE)) { @@ -652,7 +662,9 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr) if (unlikely(new_state == CAN_STATE_BUS_OFF)) can_bus_off(dev); - can_rx_offload_irq_queue_err_skb(&priv->offload, skb); + err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); + if (err) + dev->stats.rx_fifo_errors++; } static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload) @@ -720,9 +732,14 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload, priv->write(BIT(n - 32), ®s->iflag2); } else { priv->write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, ®s->iflag1); - priv->read(®s->timer); } + /* Read the Free Running Timer. It is optional but recommended + * to unlock Mailbox as soon as possible and make it available + * for reception. + */ + priv->read(®s->timer); + return 1; } @@ -732,9 +749,9 @@ static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv) struct flexcan_regs __iomem *regs = priv->regs; u32 iflag1, iflag2; - iflag2 = priv->read(®s->iflag2) & priv->reg_imask2_default; - iflag1 = priv->read(®s->iflag1) & priv->reg_imask1_default & - ~FLEXCAN_IFLAG_MB(priv->tx_mb_idx); + iflag2 = priv->read(®s->iflag2) & priv->reg_imask2_default & + ~FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB); + iflag1 = priv->read(®s->iflag1) & priv->reg_imask1_default; return (u64)iflag2 << 32 | iflag1; } @@ -746,11 +763,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_regs __iomem *regs = priv->regs; irqreturn_t handled = IRQ_NONE; - u32 reg_iflag1, reg_esr; + u32 reg_iflag2, reg_esr; enum can_state last_state = priv->can.state; - reg_iflag1 = priv->read(®s->iflag1); - /* reception interrupt */ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { u64 reg_iflag; @@ -764,6 +779,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) break; } } else { + u32 reg_iflag1; + + reg_iflag1 = priv->read(®s->iflag1); if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) { handled = IRQ_HANDLED; can_rx_offload_irq_offload_fifo(&priv->offload); @@ -779,17 +797,22 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) } } + reg_iflag2 = priv->read(®s->iflag2); + /* transmission complete interrupt */ - if (reg_iflag1 & FLEXCAN_IFLAG_MB(priv->tx_mb_idx)) { + if (reg_iflag2 & FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB)) { + u32 reg_ctrl = priv->read(®s->mb[FLEXCAN_TX_MB].can_ctrl); + handled = IRQ_HANDLED; - stats->tx_bytes += can_get_echo_skb(dev, 0); + stats->tx_bytes += can_rx_offload_get_echo_skb(&priv->offload, + 0, reg_ctrl << 16); stats->tx_packets++; can_led_event(dev, CAN_LED_EVENT_TX); /* after sending a RTR frame MB is in RX mode */ priv->write(FLEXCAN_MB_CODE_TX_INACTIVE, - &priv->tx_mb->can_ctrl); - priv->write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), ®s->iflag1); + ®s->mb[FLEXCAN_TX_MB].can_ctrl); + priv->write(FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB), ®s->iflag2); netif_wake_queue(dev); } @@ -931,15 +954,13 @@ static int flexcan_chip_start(struct net_device *dev) reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff); reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | FLEXCAN_MCR_IRMQ | - FLEXCAN_MCR_IDAM_C; + FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_MAXMB(FLEXCAN_TX_MB); - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) reg_mcr &= ~FLEXCAN_MCR_FEN; - reg_mcr |= FLEXCAN_MCR_MAXMB(priv->offload.mb_last); - } else { - reg_mcr |= FLEXCAN_MCR_FEN | - FLEXCAN_MCR_MAXMB(priv->tx_mb_idx); - } + else + reg_mcr |= FLEXCAN_MCR_FEN; + netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr); priv->write(reg_mcr, ®s->mcr); @@ -982,16 +1003,17 @@ static int flexcan_chip_start(struct net_device *dev) priv->write(reg_ctrl2, ®s->ctrl2); } - /* clear and invalidate all mailboxes first */ - for (i = priv->tx_mb_idx; i < ARRAY_SIZE(regs->mb); i++) { - priv->write(FLEXCAN_MB_CODE_RX_INACTIVE, - ®s->mb[i].can_ctrl); - } - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { - for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++) + for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++) { priv->write(FLEXCAN_MB_CODE_RX_EMPTY, ®s->mb[i].can_ctrl); + } + } else { + /* clear and invalidate unused mailboxes first */ + for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i < ARRAY_SIZE(regs->mb); i++) { + priv->write(FLEXCAN_MB_CODE_RX_INACTIVE, + ®s->mb[i].can_ctrl); + } } /* Errata ERR005829: mark first TX mailbox as INACTIVE */ @@ -1000,7 +1022,7 @@ static int flexcan_chip_start(struct net_device *dev) /* mark TX mailbox as INACTIVE */ priv->write(FLEXCAN_MB_CODE_TX_INACTIVE, - &priv->tx_mb->can_ctrl); + ®s->mb[FLEXCAN_TX_MB].can_ctrl); /* acceptance mask/acceptance code (accept everything) */ priv->write(0x0, ®s->rxgmask); @@ -1032,6 +1054,7 @@ static int flexcan_chip_start(struct net_device *dev) reg_mecr = priv->read(®s->mecr); reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS; priv->write(reg_mecr, ®s->mecr); + reg_mecr |= FLEXCAN_MECR_ECCDIS; reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK | FLEXCAN_MECR_FANCEI_MSK); priv->write(reg_mecr, ®s->mecr); @@ -1355,17 +1378,13 @@ static int flexcan_probe(struct platform_device *pdev) priv->devtype_data = devtype_data; priv->reg_xceiver = reg_xceiver; - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { - priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_TIMESTAMP; + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) priv->tx_mb_reserved = ®s->mb[FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP]; - } else { - priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_FIFO; + else priv->tx_mb_reserved = ®s->mb[FLEXCAN_TX_MB_RESERVED_OFF_FIFO]; - } - priv->tx_mb = ®s->mb[priv->tx_mb_idx]; - priv->reg_imask1_default = FLEXCAN_IFLAG_MB(priv->tx_mb_idx); - priv->reg_imask2_default = 0; + priv->reg_imask1_default = 0; + priv->reg_imask2_default = FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB); priv->offload.mailbox_read = flexcan_mailbox_read; diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 9b449400376bc536cd0d53ea4abfe13d14515ba6..deb274a19ba003c9061f127e28ec57521c4f877b 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -822,6 +822,27 @@ static int m_can_poll(struct napi_struct *napi, int quota) if (!irqstatus) goto end; + /* Errata workaround for issue "Needless activation of MRAF irq" + * During frame reception while the MCAN is in Error Passive state + * and the Receive Error Counter has the value MCAN_ECR.REC = 127, + * it may happen that MCAN_IR.MRAF is set although there was no + * Message RAM access failure. + * If MCAN_IR.MRAF is enabled, an interrupt to the Host CPU is generated + * The Message RAM Access Failure interrupt routine needs to check + * whether MCAN_ECR.RP = ’1’ and MCAN_ECR.REC = 127. + * In this case, reset MCAN_IR.MRAF. No further action is required. + */ + if ((priv->version <= 31) && (irqstatus & IR_MRAF) && + (m_can_read(priv, M_CAN_ECR) & ECR_RP)) { + struct can_berr_counter bec; + + __m_can_get_berr_counter(dev, &bec); + if (bec.rxerr == 127) { + m_can_write(priv, M_CAN_IR, IR_MRAF); + irqstatus &= ~IR_MRAF; + } + } + psr = m_can_read(priv, M_CAN_PSR); if (irqstatus & IR_ERR_STATE) work_done += m_can_handle_state_errors(dev, psr); diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c index c1317889d3d8d97858604feeb63fa4f7cec8a368..ced11ea8926984a312d876a99dab13a4ea3d328d 100644 --- a/drivers/net/can/pch_can.c +++ b/drivers/net/can/pch_can.c @@ -703,11 +703,11 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 obj_num, int quota) cf->data[i + 1] = data_reg >> 8; } - netif_receive_skb(skb); rcv_pkts++; stats->rx_packets++; quota--; stats->rx_bytes += cf->can_dlc; + netif_receive_skb(skb); pch_fifo_thresh(priv, obj_num); obj_num++; diff --git a/drivers/net/can/phytium/Kconfig b/drivers/net/can/phytium/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..a23216d23c22626a97244e0e48f66411ca4ed1b7 --- /dev/null +++ b/drivers/net/can/phytium/Kconfig @@ -0,0 +1,34 @@ +# SPDX-License-Identifier: GPL-2.0-only +menuconfig CAN_PHYTIUM + tristate "Phytium CAN support" + help + Say Y here if you want support for Phytium CAN controller framework. + This is common support for devices that embed the Phytium CAN IP. + + To compile this driver as a module, choose M here: the module will + be called phytium_can. + +if CAN_PHYTIUM + +config CAN_PHYTIUM_PLATFORM + tristate "Phytium CAN support for io-mapped devices" + depends on HAS_IOMEM + help + Say Y here is you want to support for IO Mapped Phytium CAN controller. + This support is for devices that have the Phytium CAN controller IP + embedded into the device and the IP is IO Mapped to the processor. + + To compile this driver as a module, choose M here: the module will + be called phytium_can_platform. + +config CAN_PHYTIUM_PCI + tristate "Phytium CAN support for PCI devices" + depends on PCI + help + Say Y here is you want to support for Phytium CAN controller connected + to the PCI bus. This support is for devices that have the Phytium CAN + controller IP embedded into a PCI device. + + To compile this driver as a module, choose M here: the module will + be called phytium_can_pci. +endif diff --git a/drivers/net/can/phytium/Makefile b/drivers/net/can/phytium/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..7ef554fca58e4b5ded60b3bcdb6f9c282d5deda3 --- /dev/null +++ b/drivers/net/can/phytium/Makefile @@ -0,0 +1,8 @@ +# +# Makefile for the Phytium CAN controller drivers. +# +# + +obj-$(CONFIG_CAN_PHYTIUM) += phytium_can.o +obj-$(CONFIG_CAN_PHYTIUM_PLATFORM) += phytium_can_platform.o +obj-$(CONFIG_CAN_PHYTIUM_PCI) += phytium_can_pci.o diff --git a/drivers/net/can/phytium/phytium_can.c b/drivers/net/can/phytium/phytium_can.c new file mode 100644 index 0000000000000000000000000000000000000000..f01938f01d017e4bb19c90fa6d1296db02b25137 --- /dev/null +++ b/drivers/net/can/phytium/phytium_can.c @@ -0,0 +1,1214 @@ +// SPDX-License-Identifier: GPL-2.0 +/* CAN bus driver for Phytium CAN controller + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include + +#include "phytium_can.h" + +/* register definition */ +enum phytium_can_reg { + CAN_CTRL = 0x00, /* Global control register */ + CAN_INTR = 0x04, /* Interrupt register */ + CAN_ARB_RATE_CTRL = 0x08, /* Arbitration rate control + register */ + CAN_DAT_RATE_CTRL = 0x0c, /* Data rate control register */ + CAN_ACC_ID0 = 0x10, /* Acceptance identifier0 + register */ + CAN_ACC_ID1 = 0x14, /* Acceptance identifier1 + register */ + CAN_ACC_ID2 = 0x18, /* Acceptance identifier2 + register */ + CAN_ACC_ID3 = 0x1c, /* Acceptance identifier3 + register */ + CAN_ACC_ID0_MASK = 0x20, /* Acceptance identifier0 + mask register */ + CAN_ACC_ID1_MASK = 0x24, /* Acceptance identifier1 + mask register */ + CAN_ACC_ID2_MASK = 0x28, /* Acceptance identifier2 + mask register */ + CAN_ACC_ID3_MASK = 0x2c, /* Acceptance identifier3 + mask register */ + CAN_XFER_STS = 0x30, /* Transfer status register */ + CAN_ERR_CNT = 0x34, /* Error counter register */ + CAN_FIFO_CNT = 0x38, /* FIFO counter register */ + CAN_DMA_CTRL = 0x3c, /* DMA request control + register */ + CAN_XFER_EN = 0x40, /* Transfer enable register */ + CAN_INTR1 = 0x44, /* Interrupt register 1 */ + CAN_FRM_INFO = 0x48, /* Frame valid number + register */ + CAN_TIME_OUT = 0x4c, /* Timeout register */ + CAN_TIME_OUT_CNT = 0x50, /* Timeout counter register */ + CAN_INTR2 = 0x54, /* Interrupt register 2 */ + CAN_TX_FIFO = 0x100, /* TX FIFO shadow register */ + CAN_RX_FIFO = 0x200, /* RX FIFO shadow register */ + CAN_RX_INFO_FIFO = 0x300, /* RX information FIFO shadow + register */ + CAN_PIDR4 = 0xfd0, /* Peripheral Identification + Register 4 */ + CAN_PIDR0 = 0xfe0, /* Peripheral Identification + Register 0 */ + CAN_PIDR1 = 0xfe4, /* Peripheral Identification + Register 1 */ + CAN_PIDR2 = 0xfe8, /* Peripheral Identification + Register 2 */ + CAN_PIDR3 = 0xfec, /* Peripheral Identification + Register 3 */ + CAN_CIDR0 = 0xff0, /* Component Identification + Register 0 */ + CAN_CIDR1 = 0xff4, /* Component Identification + Register 1 */ + CAN_CIDR2 = 0xff8, /* Component Identification + Register 2 */ + CAN_CIDR3 = 0xffc, /* Component Identification + Register 3 */ +}; + +/* Global control register (CTRL) */ +#define CTRL_XFER BIT(0) /* Transfer enable */ +#define CTRL_TXREQ BIT(1) /* Transmit request */ +#define CTRL_AIME BIT(2) /* Acceptance identifier mask enable */ +#define CTRL_TTS BIT(3) /* Transmit trigger strategy */ +#define CTRL_RST BIT(7) /* Write 1 to soft reset and self + clear */ +#define CTRL_RFEIDF BIT(8) /* Allow RX frame end interrupt during + ID filtered frame */ +#define CTRL_RFEDT BIT(9) /* Allow RX frame end interrupt during + TX frame */ +#define CTRL_IOF BIT(10) /* Ignore overload flag internally */ +#define CTRL_FDCRC BIT(11) /* CANFD CRC mode */ + +/* Interrupt register (INTR) */ +#define INTR_BOIS BIT(0) /* Bus off interrupt status */ +#define INTR_PWIS BIT(1) /* Passive warning interrupt status */ +#define INTR_PEIS BIT(2) /* Passive error interrupt status */ +#define INTR_RFIS BIT(3) /* RX FIFO full interrupt status */ +#define INTR_TFIS BIT(4) /* TX FIFO empty interrupt status */ +#define INTR_REIS BIT(5) /* RX frame end interrupt status */ +#define INTR_TEIS BIT(6) /* TX frame end interrupt status */ +#define INTR_EIS BIT(7) /* Error interrupt status */ +#define INTR_BOIE BIT(8) /* Bus off interrupt enable */ +#define INTR_PWIE BIT(9) /* Passive warning interrupt enable */ +#define INTR_PEIE BIT(10) /* Passive error interrupt enable */ +#define INTR_RFIE BIT(11) /* RX FIFO full interrupt enable */ +#define INTR_TFIE BIT(12) /* TX FIFO empty interrupt enable */ +#define INTR_REIE BIT(13) /* RX frame end interrupt enable */ +#define INTR_TEIE BIT(14) /* TX frame end interrupt enable */ +#define INTR_EIE BIT(15) /* Error interrupt enable */ +#define INTR_BOIC BIT(16) /* Bus off interrupt clear */ +#define INTR_PWIC BIT(17) /* Passive warning interrupt clear */ +#define INTR_PEIC BIT(18) /* Passive error interrupt clear */ +#define INTR_RFIC BIT(19) /* RX FIFO full interrupt clear */ +#define INTR_TFIC BIT(20) /* TX FIFO empty interrupt clear */ +#define INTR_REIC BIT(21) /* RX frame end interrupt clear */ +#define INTR_TEIC BIT(22) /* TX frame end interrupt clear */ +#define INTR_EIC BIT(23) /* Error interrupt clear */ + +#define INTR_STATUS_MASK (INTR_BOIS | INTR_PWIS | INTR_PEIS | INTR_RFIS | \ + INTR_TFIS | INTR_REIS | INTR_TEIS | INTR_EIS) +#define INTR_EN_MASK (INTR_RFIE | INTR_REIE | INTR_TEIE) +#define INTR_CLEAR_MASK (INTR_BOIC | INTR_PWIC | INTR_PEIC | INTR_RFIC | \ + INTR_TFIC | INTR_REIC | INTR_TEIC | INTR_EIC) + +/* Arbitration rate control register (ARB_RATE_CTRL) */ +#define ARB_RATE_CTRL_ARJW GENMASK(1, 0) /* Arbitration field resync + jump width */ +#define ARB_RATE_CTRL_APRS GENMASK(4, 2) /* Arbitration field + propagation segment */ +#define ARB_RATE_CTRL_APH1S GENMASK(7, 5) /* Arbitration field phase1 + segment */ +#define ARB_RATE_CTRL_APH2S GENMASK(10, 8) /* Arbitration field phase2 + segment */ +#define ARB_RATE_CTRL_APD GENMASK(28, 16) /* Arbitration field prescaler + divider */ + +/* Data rate control register (DAT_RATE_CTRL) */ +#define DAT_RATE_CTRL_DRJW GENMASK(1, 0) /* Data field resync jump + width */ +#define DAT_RATE_CTRL_DPRS GENMASK(4, 2) /* Data field propagation + segment */ +#define DAT_RATE_CTRL_DPH1S GENMASK(7, 5) /* Data field phase1 segment */ +#define DAT_RATE_CTRL_DPH2S GENMASK(10, 8) /* Data field phase2 segment */ +#define DAT_RATE_CTRL_DPD GENMASK(28, 16) /* Data field prescaler + divider */ + +/* Acceptance identifierX register (ACC_IDX) */ +#define ACC_IDX_AID_MASK GENMASK(28, 0) /* Acceptance identifier */ + +/* Acceptance identifier0 mask register (ACC_ID0_MASK) */ +#define ACC_IDX_MASK_AID_MASK GENMASK(28, 0) /* Acceptance identifier + mask */ + +/* Transfer status register (XFER_STS) */ +#define XFER_STS_FRAS GENMASK(2, 0) /* Frame status */ +#define XFER_STS_FIES GENMASK(7, 3) /* Field status */ +#define XFER_STS_FIES_IDLE (0x0) /* idle */ +#define XFER_STS_FIES_ARBITRATION (0x1) /* arbitration */ +#define XFER_STS_FIES_TX_CTRL (0x2) /* transmit control */ +#define XFER_STS_FIES_TX_DATA (0x3) /* transmit data */ +#define XFER_STS_FIES_TX_CRC (0x4) /* transmit crc */ +#define XFER_STS_FIES_TX_FRM (0x5) /* transmit frame */ +#define XFER_STS_FIES_RX_CTRL (0x6) /* receive control */ +#define XFER_STS_FIES_RX_DATA (0x7) /* receive data */ +#define XFER_STS_FIES_RX_CRC (0x8) /* receive crc */ +#define XFER_STS_FIES_RX_FRM (0x9) /* receive frame */ +#define XFER_STS_FIES_INTERMISSION (0xa) /* intermission */ +#define XFER_STS_FIES_TX_SUSPD (0xb) /* transmit suspend */ +#define XFER_STS_FIES_BUS_IDLE (0xc) /* bus idle */ +#define XFER_STS_FIES_OVL_FLAG (0xd) /* overload flag */ +#define XFER_STS_FIES_OVL_DLM (0xe) /* overload delimiter */ +#define XFER_STS_FIES_ERR_FLAG (0xf) /* error flag */ +#define XFER_STS_FIES_ERR_DLM (0x10) /* error delimiter */ +#define XFER_STS_FIES_BUS_OFF (0x11) /* bus off */ +#define XFER_STS_TS BIT(8) /* Transmit status */ +#define XFER_STS_RS BIT(9) /* Receive status */ +#define XFER_STS_XFERS BIT(10) /* Transfer status */ + +/* Error counter register (ERR_CNT) */ +#define ERR_CNT_REC GENMASK(8, 0) /* Receive error counter */ +#define ERR_CNT_TEC GENMASK(24, 16) /* Transmit error counter */ + +/* FIFO counter register (FIFO_CNT) */ +#define FIFO_CNT_RFN GENMASK(6, 0) /* Receive FIFO valid data + number */ +#define FIFO_CNT_TFN GENMASK(22, 16) /* Transmit FIFO valid data + number */ + +/* DMA request control register (DMA_CTRL) */ +#define DMA_CTRL_RFTH GENMASK(5, 0) /* Receive FIFO DMA request + threshold */ +#define DMA_CTRL_RFRE BIT(6) /* Receive FIFO DMA request + enable */ +#define DMA_CTRL_TFTH GENMASK(21, 16) /* Transmit FIFO DMA request + threshold */ +#define DMA_CTRL_TFRE BIT(22) /* Transmit FIFO DMA request + enable */ + +/* Transfer enable register (XFER_EN) */ +#define XFER_EN_XFER BIT(0) /* Transfer enable */ + +/* Interrupt register 1 (INTR1) */ +#define INTR1_RF1IS BIT(0) /* RX FIFO 1/4 interrupt status */ +#define INTR1_RF2IS BIT(1) /* RX FIFO 1/2 interrupt status */ +#define INTR1_RF3IS BIT(2) /* RX FIFO 3/4 interrupt status */ +#define INTR1_RF4IS BIT(3) /* RX FIFO full interrupt status */ +#define INTR1_TF1IS BIT(4) /* TX FIFO 1/4 interrupt status */ +#define INTR1_TF2IS BIT(5) /* TX FIFO 1/2 interrupt status */ +#define INTR1_TF3IS BIT(6) /* TX FIFO 3/4 interrupt status */ +#define INTR1_TF4IS BIT(7) /* TX FIFO empty interrupt status */ +#define INTR1_RF1IE BIT(8) /* RX FIFO 1/4 interrupt enable */ +#define INTR1_RF2IE BIT(9) /* RX FIFO 1/2 interrupt enable */ +#define INTR1_RF3IE BIT(10) /* RX FIFO 3/4 interrupt enable */ +#define INTR1_RF4IE BIT(11) /* RX FIFO full interrupt enable */ +#define INTR1_TF1IE BIT(12) /* TX FIFO 1/4 interrupt enable */ +#define INTR1_TF2IE BIT(13) /* TX FIFO 1/2 interrupt enable */ +#define INTR1_TF3IE BIT(14) /* TX FIFO 3/4 interrupt enable */ +#define INTR1_TF4IE BIT(15) /* TX FIFO empty interrupt enable */ +#define INTR1_RF1IC BIT(16) /* RX FIFO 1/4 interrupt clear */ +#define INTR1_RF2IC BIT(17) /* RX FIFO 1/2 interrupt clear */ +#define INTR1_RF3IC BIT(18) /* RX FIFO 3/4 interrupt clear */ +#define INTR1_RF4IC BIT(19) /* RX FIFO full interrupt clear */ +#define INTR1_TF1IC BIT(20) /* TX FIFO 1/4 interrupt clear */ +#define INTR1_TF2IC BIT(21) /* TX FIFO 1/2 interrupt clear */ +#define INTR1_TF3IC BIT(22) /* TX FIFO 3/4 interrupt clear */ +#define INTR1_TF4IC BIT(23) /* TX FIFO empty interrupt clear */ +#define INTR1_RF1RIS BIT(24) /* RX FIFO 1/4 raw interrupt status */ +#define INTR1_RF2RIS BIT(25) /* RX FIFO 1/2 raw interrupt status */ +#define INTR1_RF3RIS BIT(26) /* RX FIFO 3/4 raw interrupt status */ +#define INTR1_RF4RIS BIT(27) /* RX FIFO full raw interrupt status */ +#define INTR1_TF1RIS BIT(28) /* TX FIFO 1/4 raw interrupt status */ +#define INTR1_TF2RIS BIT(29) /* TX FIFO 1/2 raw interrupt status */ +#define INTR1_TF3RIS BIT(30) /* TX FIFO 3/4 raw interrupt status */ +#define INTR1_TF4RIS BIT(31) /* TX FIFO empty raw interrupt status */ + +/* Frame valid number register (FRM_INFO) */ +#define FRM_INFO_RXFC GENMASK(5, 0) /* Valid frame number in RX FIFO */ +#define FRM_INFO_SSPD GENMASK(31, 16) /* Secondary sample point delay */ + +/* Interrupt register 2 (INTR2) */ +#define INTR2_TOIS BIT(0) /* RX FIFO time out interrupt status */ +#define INTR2_TOIM BIT(8) /* RX FIFO time out interrupt mask */ +#define INTR2_TOIC BIT(16) /* RX FIFO time out interrupt clear */ +#define INTR2_TORIS BIT(24) /* RX FIFO time out raw interrupt status */ + +/* RX information FIFO shadow register (RX_INFO_FIFO) */ +#define RX_INFO_FIFO_WNORF GENMASK(4, 0) /* Word (4-byte) number of + current receive frame */ +#define RX_INFO_FIFO_RORF BIT(5) /* RTR value of current + receive frame */ +#define RX_INFO_FIFO_FORF BIT(6) /* FDF value of current + receive frame */ +#define RX_INFO_FIFO_IORF BIT(7) /* IDE value of current + receive frame */ + +/* Arbitration Bits */ +#define CAN_ID1_MASK GENMASK(31, 21) /* Base identifer */ +/* Standard Remote Transmission Request */ +#define CAN_ID1_RTR_MASK BIT(20) +/* Extended Substitute remote TXreq */ +#define CAN_ID2_SRR_MASK BIT(20) +#define CAN_IDE_MASK BIT(19) /* IDentifier extension flag */ +#define CAN_ID2_MASK GENMASK(18, 1) /* Identifier extension */ +/* Extended frames remote TX request */ +#define CAN_ID2_RTR_MASK BIT(0) +#define CAN_ID1_FDF_MASK BIT(18) +#define CAN_ID1_DLC_MASK GENMASK(17, 14) +#define CANFD_ID1_BRS_MASK BIT(16) +#define CANFD_ID1_ESI_MASK BIT(15) +#define CANFD_ID1_DLC_MASK GENMASK(14, 11) + +#define CAN_ID2_FDF_MASK BIT(31) +#define CAN_ID2_DLC_MASK GENMASK(29, 26) +#define CANFD_ID2_BRS_MASK BIT(29) +#define CANFD_ID2_ESI_MASK BIT(28) +#define CANFD_ID2_DLC_MASK GENMASK(27, 24) + +#define CAN_ID1_DLC_OFF 14 +#define CANFD_ID1_DLC_OFF 11 +#define CAN_ID2_DLC_OFF 26 +#define CANFD_ID2_DLC_OFF 24 + +#define CAN_IDR_ID1_SHIFT 21 /* Standard Messg Identifier */ +#define CAN_IDR_ID2_SHIFT 1 /* Extended Message Identifier */ +#define CAN_IDR_SDLC_SHIFT 14 +#define CAN_IDR_EDLC_SHIFT 26 + +/* CANFD Standard msg padding 1 */ +#define CANFD_IDR_PAD_MASK 0x000007FF +#define CAN_IDR_PAD_MASK 0x00003FFF /* Standard msg padding 1 */ + +/** + * phytium_can_set_reg_bits - set a bit value to the device register + * @cdev: Driver private data structure + * @reg: Register offset + * @bs: The bit mask + * + * Read data from the particular CAN register + * Return: value read from the CAN register + */ +static void +phytium_can_set_reg_bits(const struct phytium_can_dev *cdev, + enum phytium_can_reg reg, u32 bs) +{ + u32 val = readl(cdev->base + reg); + + val |= bs; + writel(val, cdev->base + reg); +} + +/** + * phytium_can_clr_reg_bits - clear a bit value to the device register + * @cdev: Driver private data structure + * @reg: Register offset + * @bs: The bit mask + * + * Read data from the particular CAN register + * Return: value read from the CAN register + */ +static void +phytium_can_clr_reg_bits(const struct phytium_can_dev *cdev, + enum phytium_can_reg reg, u32 bs) +{ + u32 val = readl(cdev->base + reg); + + val &= ~bs; + writel(val, cdev->base + reg); +} + +static inline u32 phytium_can_read(const struct phytium_can_dev *cdev, enum + phytium_can_reg reg) +{ + return readl(cdev->base + reg); +} + +static inline void phytium_can_write(const struct phytium_can_dev *cdev, enum + phytium_can_reg reg, u32 val) +{ + writel(val, cdev->base + reg); +} + +static inline void phytium_can_enable_all_interrupts(struct phytium_can_dev + *cdev) +{ + phytium_can_write(cdev, CAN_INTR, INTR_EN_MASK); +} + +static inline void phytium_can_disable_all_interrupt(struct phytium_can_dev + *cdev) +{ + phytium_can_write(cdev, CAN_INTR, 0x0); +} + +static int phytium_can_get_berr_counter(const struct net_device *dev, + struct can_berr_counter *bec) +{ + struct phytium_can_dev *cdev = netdev_priv(dev); + + bec->rxerr = phytium_can_read(cdev, CAN_ERR_CNT) & ERR_CNT_REC; + bec->txerr = (phytium_can_read(cdev, CAN_ERR_CNT) & ERR_CNT_TEC) >> 16; + + return 0; +} + +static int phytium_can_read_fifo(struct net_device *dev) +{ + struct net_device_stats *stats = &dev->stats; + struct phytium_can_dev *cdev = netdev_priv(dev); + struct canfd_frame *cf; + struct sk_buff *skb; + u32 id, dlc, i; + + /* Read the frame header from FIFO */ + id = phytium_can_read(cdev, CAN_RX_FIFO); + id = be32_to_cpup(&id); + if (id & CAN_IDE_MASK) { + /* Received an extended frame */ + dlc = phytium_can_read(cdev, CAN_RX_FIFO); + dlc = be32_to_cpup(&dlc); + if (dlc & CAN_ID2_FDF_MASK) + skb = alloc_canfd_skb(dev, &cf); + else + skb = alloc_can_skb(dev, (struct can_frame **)&cf); + + if (unlikely(!skb)) { + stats->rx_dropped++; + return 0; + } + + if (dlc & CAN_ID2_FDF_MASK) { + /* CAN FD extended frame */ + if (dlc & CANFD_ID2_BRS_MASK) + cf->flags |= CANFD_BRS; + if (dlc & CANFD_ID2_ESI_MASK) + cf->flags |= CANFD_ESI; + cf->len = can_dlc2len((dlc & CANFD_ID2_DLC_MASK) >> + CANFD_ID2_DLC_OFF); + } else { + /* CAN extended frame */ + cf->len = get_can_dlc((dlc & CAN_ID2_DLC_MASK) >> + CAN_ID2_DLC_OFF); + } + + cf->can_id = (id & CAN_ID1_MASK) >> 3; + cf->can_id |= (id & CAN_ID2_MASK) >> 1; + cf->can_id |= CAN_EFF_FLAG; + + if (id & CAN_ID2_RTR_MASK) + cf->can_id |= CAN_RTR_FLAG; + } else { + /* Received a standard frame */ + if (id & CAN_ID1_FDF_MASK) + skb = alloc_canfd_skb(dev, &cf); + else + skb = alloc_can_skb(dev, (struct can_frame **)&cf); + + if (unlikely(!skb)) { + stats->rx_dropped++; + return 0; + } + + if (id & CAN_ID1_FDF_MASK) { + /* CAN FD extended frame */ + if (id & CANFD_ID1_BRS_MASK) + cf->flags |= CANFD_BRS; + if (id & CANFD_ID1_ESI_MASK) + cf->flags |= CANFD_ESI; + cf->len = can_dlc2len((id & CANFD_ID1_DLC_MASK) >> + CANFD_ID1_DLC_OFF); + } else { + /* CAN extended frame */ + cf->len = get_can_dlc((id & CAN_ID1_DLC_MASK) >> + CAN_ID1_DLC_OFF); + } + + cf->can_id = (id & CAN_ID1_MASK) >> 21; + + if (id & CAN_ID1_RTR_MASK) + cf->can_id |= CAN_RTR_FLAG; + } + + if (!(cf->can_id & CAN_RTR_FLAG)) + /* Receive data frames */ + for (i = 0; i < cf->len; i += 4) + *(__be32 *)(cf->data + i) = phytium_can_read(cdev, + CAN_RX_FIFO); + + stats->rx_packets++; + stats->rx_bytes += cf->len; + netif_receive_skb(skb); + + return 1; +} + +static int phytium_can_do_rx_poll(struct net_device *dev, int quota) +{ + struct phytium_can_dev *cdev = netdev_priv(dev); + u32 rxfs, pkts = 0; + int isr; + + isr = cdev->isr; + + rxfs = phytium_can_read(cdev, CAN_FIFO_CNT) & FIFO_CNT_RFN; + if (!rxfs) { + netdev_dbg(dev, "no messages in RX FIFO\n"); + return 0; + } + + while ((rxfs != 0) && (quota > 0)) { + if (isr & INTR_REIS) { + pkts += phytium_can_read_fifo(dev); + quota--; + } else { + break; + } + rxfs = phytium_can_read(cdev, CAN_FIFO_CNT) & FIFO_CNT_RFN; + netdev_dbg(dev, "Next received %d frame again.\n", rxfs); + } + + if (pkts) + can_led_event(dev, CAN_LED_EVENT_RX); + + return pkts; +} + +static int phytium_can_rx_handler(struct net_device *dev, int quota) +{ + struct phytium_can_dev *cdev = netdev_priv(dev); + int work_done = 0; + u32 isr; + + isr = cdev->isr | phytium_can_read(cdev, CAN_INTR); + if (!isr) + goto end; + + /* Handle RX IRQ */ + if (isr & INTR_REIS) { + int rx_work_or_err; + + rx_work_or_err = phytium_can_do_rx_poll(dev, (quota - + work_done)); + if (rx_work_or_err < 0) + return rx_work_or_err; + + work_done += rx_work_or_err; + } + +end: + return 0; +} + +static int phytium_can_poll(struct napi_struct *napi, int quota) +{ + struct net_device *dev = napi->dev; + struct phytium_can_dev *cdev = netdev_priv(dev); + int work_done; + + netdev_dbg(dev, "The receive processing is going on !\n"); + + work_done = phytium_can_rx_handler(dev, quota); + + /* Don't re-enable interrupts if the driver had a fatal error + * (e.g., FIFO read failure) + */ + if (work_done >= 0 && work_done < quota) { + napi_complete_done(napi, work_done); + phytium_can_enable_all_interrupts(cdev); + } + + return work_done; +} + +static void phytium_can_write_frame(struct phytium_can_dev *cdev) +{ + struct canfd_frame *cf = (struct canfd_frame *)cdev->tx_skb->data; + struct net_device *dev = cdev->net; + struct net_device_stats *stats = &dev->stats; + struct sk_buff *skb = cdev->tx_skb; + u32 i, id, dlc = 0, frame_head[2] = {0, 0}; + u32 data_len; + + data_len = can_len2dlc(cf->len); + cdev->tx_skb = NULL; + + phytium_can_clr_reg_bits(cdev, CAN_CTRL, CTRL_XFER); + + /* Watch carefully on the bit sequence */ + if (cf->can_id & CAN_EFF_FLAG) { + /* Extended CAN ID format */ + id = ((cf->can_id & CAN_EFF_MASK) << 1) & CAN_ID2_MASK; + id |= (((cf->can_id & CAN_EFF_MASK) >> + (CAN_EFF_ID_BITS - CAN_SFF_ID_BITS)) << + CAN_IDR_ID1_SHIFT) & CAN_ID1_MASK; + + /* The substibute remote TX request bit should be "1" + * for extended frames as in the Phytium CAN datasheet + */ + id |= CAN_IDE_MASK | CAN_ID2_SRR_MASK; + + if (cf->can_id & CAN_RTR_FLAG) + /* Extended frames remote TX request */ + id |= CAN_ID2_RTR_MASK; + if ((cdev->can.ctrlmode & CAN_CTRLMODE_FD) && + can_is_canfd_skb(skb)) + dlc = data_len << CANFD_ID2_DLC_OFF; + else + dlc = data_len << CAN_ID2_DLC_OFF; + + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) { + dlc |= CAN_ID2_FDF_MASK; + if (cf->flags & CANFD_BRS) + dlc |= CANFD_ID2_BRS_MASK; + if (cf->flags & CANFD_ESI) + dlc |= CANFD_ID2_ESI_MASK; + } + + frame_head[0] = cpu_to_be32p(&id); + frame_head[1] = cpu_to_be32p(&dlc); + + /* Write the Frame to Phytium CAN TX FIFO */ + phytium_can_write(cdev, CAN_TX_FIFO, frame_head[0]); + phytium_can_write(cdev, CAN_TX_FIFO, frame_head[1]); + netdev_dbg(dev, "Write atbitration field [0]:0x%x [1]:0x%x\n", + frame_head[0], frame_head[1]); + } else { + /* Standard CAN ID format */ + id = ((cf->can_id & CAN_SFF_MASK) << CAN_IDR_ID1_SHIFT) + & CAN_ID1_MASK; + + if (cf->can_id & CAN_RTR_FLAG) + /* Standard frames remote TX request */ + id |= CAN_ID1_RTR_MASK; + + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) + dlc = (data_len << CANFD_ID1_DLC_OFF) + | CANFD_IDR_PAD_MASK; + else + dlc = (data_len << CAN_ID1_DLC_OFF) | CAN_IDR_PAD_MASK; + + id |= dlc; + + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) { + id |= CAN_ID1_FDF_MASK; + if (cf->flags & CANFD_BRS) + id |= CANFD_ID1_BRS_MASK; + if (cf->flags & CANFD_ESI) + id |= CANFD_ID1_ESI_MASK; + } + + frame_head[0] = cpu_to_be32p(&id); + /* Write the Frame to Phytium CAN TX FIFO */ + phytium_can_write(cdev, CAN_TX_FIFO, frame_head[0]); + netdev_dbg(dev, "Write atbitration field [0] 0x%x\n", + frame_head[0]); + } + + if (!(cf->can_id & CAN_RTR_FLAG)) { + netdev_dbg(dev, "Write CAN data frame\n"); + for (i = 0; i < cf->len; i += 4) { + phytium_can_write(cdev, CAN_TX_FIFO, + *(__be32 *)(cf->data + i)); + netdev_dbg(dev, "[%d]:%x\n", i, + *(__be32 *)(cf->data + i)); + } + } + + stats->tx_bytes += cf->len; + can_put_echo_skb(skb, dev, cdev->tx_head % cdev->tx_max); + cdev->tx_head++; + + netif_stop_queue(dev); + /* triggers tranmission */ + phytium_can_set_reg_bits(cdev, CAN_CTRL, CTRL_TXREQ | CTRL_XFER); + + netdev_dbg(dev, "Trigger send message!\n"); +} + +static netdev_tx_t phytium_can_tx_handler(struct phytium_can_dev *cdev) +{ + struct net_device *dev = cdev->net; + u32 tx_fifo_used; + + /* Check if the TX buffer is full */ + tx_fifo_used = (phytium_can_read(cdev, CAN_FIFO_CNT) & + FIFO_CNT_TFN) >> 16; + if (tx_fifo_used == cdev->tx_max) { + netif_stop_queue(dev); + netdev_err(dev, "BUG!, TX FIFO full when queue awake!\n"); + return NETDEV_TX_BUSY; + } + + if (cdev->tx_head == cdev->tx_tail) { + cdev->tx_head = 0; + cdev->tx_tail = 0; + } + + phytium_can_write_frame(cdev); + + return NETDEV_TX_OK; +} + +/** + * phytium_can_tx_interrupt - Tx Done Isr + * @ndev: net_device pointer + * @isr: Interrupt status register value + */ +static void phytium_can_tx_interrupt(struct net_device *ndev, u32 isr) +{ + struct phytium_can_dev *cdev = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + + while ((cdev->tx_head - cdev->tx_tail > 0) && (isr & INTR_TEIS)) { + phytium_can_set_reg_bits(cdev, CAN_INTR, INTR_TEIC | INTR_REIC); + can_get_echo_skb(ndev, cdev->tx_tail % cdev->tx_max); + cdev->tx_tail++; + stats->tx_packets++; + isr = (phytium_can_read(cdev, CAN_INTR) & INTR_STATUS_MASK); + } + + netdev_dbg(ndev, "Finish transform packets %lu\n", stats->tx_packets); + netdev_dbg(ndev, "\n-------------------\n"); + can_led_event(ndev, CAN_LED_EVENT_TX); + netif_wake_queue(ndev); +} + +static void phytium_can_err_interrupt(struct net_device *ndev, u32 isr) +{ + struct phytium_can_dev *cdev = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + struct can_frame *cf; + struct sk_buff *skb; + u32 txerr = 0, rxerr = 0; + + skb = alloc_can_err_skb(ndev, &cf); + + rxerr = phytium_can_read(cdev, CAN_ERR_CNT) & ERR_CNT_REC; + txerr = ((phytium_can_read(cdev, CAN_ERR_CNT) & ERR_CNT_TEC) >> 16); + + if (isr & INTR_BOIS) { + netdev_dbg(ndev, "bus_off %s: txerr :%u rxerr :%u\n", + __func__, txerr, rxerr); + cdev->can.state = CAN_STATE_BUS_OFF; + cdev->can.can_stats.bus_off++; + /* Leave device in Config Mode in bus-off state */ + phytium_can_write(cdev, CAN_CTRL, CTRL_RST); + can_bus_off(ndev); + if (skb) + cf->can_id |= CAN_ERR_BUSOFF; + } else if ((isr & INTR_PEIS) == INTR_PEIS) { + netdev_dbg(ndev, "error_passive %s: txerr :%u rxerr :%u\n", + __func__, txerr, rxerr); + cdev->can.state = CAN_STATE_ERROR_PASSIVE; + cdev->can.can_stats.error_passive++; + /* Clear interrupt condition */ + phytium_can_set_reg_bits(cdev, CAN_INTR, INTR_PEIC); + phytium_can_set_reg_bits(cdev, CAN_INTR, INTR_PWIC); + phytium_can_set_reg_bits(cdev, CAN_INTR, INTR_TEIC); + phytium_can_set_reg_bits(cdev, CAN_INTR, INTR_EIC); + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = (rxerr > 127) ? + CAN_ERR_CRTL_RX_PASSIVE : + CAN_ERR_CRTL_TX_PASSIVE; + cf->data[6] = txerr; + cf->data[7] = rxerr; + } + } else if (isr & INTR_PWIS) { + netdev_dbg(ndev, "error_warning %s: txerr :%u rxerr :%u\n", + __func__, txerr, rxerr); + cdev->can.state = CAN_STATE_ERROR_WARNING; + cdev->can.can_stats.error_warning++; + phytium_can_set_reg_bits(cdev, CAN_INTR, INTR_PWIC); + phytium_can_set_reg_bits(cdev, CAN_INTR, INTR_TEIC); + phytium_can_set_reg_bits(cdev, CAN_INTR, INTR_EIC); + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] |= (txerr > rxerr) ? + CAN_ERR_CRTL_TX_WARNING : + CAN_ERR_CRTL_RX_WARNING; + cf->data[6] = txerr; + cf->data[7] = rxerr; + } + } + + /* Check for RX FIFO Overflow interrupt */ + if (isr & INTR_RFIS) { + stats->rx_over_errors++; + stats->rx_errors++; + + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; + } + } + + if (skb) { + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); + } +} + +/** + * phytium_can_isr - CAN Isr + * @irq: irq number + * @dev_id: device id poniter + * + * This is the phytium CAN Isr. It checks for the type of interrupt + * and invokes the corresponding ISR. + * + * Return: + * * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise + */ +static irqreturn_t phytium_can_isr(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct phytium_can_dev *cdev = netdev_priv(dev); + u32 isr; + + /* Get the interrupt status */ + isr = phytium_can_read(cdev, CAN_INTR) & INTR_STATUS_MASK; + if (!isr) + return IRQ_NONE; + + /* Check for FIFO full interrupt and alarm */ + if ((isr & INTR_RFIS)) { + netdev_dbg(dev, "rx_fifo is full!.\n"); + isr &= (~INTR_RFIS); + phytium_can_clr_reg_bits(cdev, CAN_INTR, INTR_RFIE); + phytium_can_set_reg_bits(cdev, CAN_INTR, INTR_RFIC); + napi_schedule(&cdev->napi); + } + + /* Check for the type of error interrupt and Processing it */ + if (isr & (INTR_EIS | INTR_RFIS | INTR_BOIS | INTR_PEIS)) { + phytium_can_clr_reg_bits(cdev, CAN_INTR, (INTR_EIE + | INTR_RFIE | INTR_BOIE | INTR_PEIE)); + phytium_can_err_interrupt(dev, isr); + phytium_can_set_reg_bits(cdev, CAN_INTR, (INTR_EIC + | INTR_RFIC | INTR_BOIC | INTR_PEIC)); + phytium_can_set_reg_bits(cdev, CAN_INTR, INTR_EN_MASK); + return IRQ_HANDLED; + } + + /* Check for Tx interrupt and Processing it */ + if ((isr & INTR_TEIS)) { + isr &= (~INTR_REIS); + phytium_can_tx_interrupt(dev, isr); + } + + /* Check for the type of receive interrupt and Processing it */ + if (isr & (INTR_REIS)) { + cdev->isr = (isr & INTR_REIS); + phytium_can_clr_reg_bits(cdev, CAN_INTR, INTR_REIE); + phytium_can_set_reg_bits(cdev, CAN_INTR, INTR_REIC); + napi_schedule(&cdev->napi); + } + + return IRQ_HANDLED; +} + +/** + * phytium_can_set_bittiming - CAN set bit timing routine + * @dev: Pointer to net_device structure + * + * This is the driver set bittiming routine. + * Return: 0 on success and failure value on error + */ +static int phytium_can_set_bittiming(struct net_device *dev) +{ + struct phytium_can_dev *cdev = netdev_priv(dev); + const struct can_bittiming *bt = &cdev->can.bittiming; + const struct can_bittiming *dbt = &cdev->can.data_bittiming; + u32 btr, dbtr; + u32 is_config_mode; + + /** + * Check whether Phytium CAN is in configuration mode. + * It cannot set bit timing if Phytium CAN is not in configuration mode. + */ + is_config_mode = phytium_can_read(cdev, CAN_CTRL) & CTRL_XFER; + if (is_config_mode) { + netdev_alert(dev, "BUG! Cannot set bittiming\n"); + return -EPERM; + } + + /* Setting Baud Rate prescalar value in BRPR Register */ + btr = (bt->brp - 1) << 16; + + /* Setting Time Segment 1 in BTR Register */ + btr |= (bt->prop_seg - 1) << 2; + + btr |= (bt->phase_seg1 - 1) << 5; + + /* Setting Time Segment 2 in BTR Register */ + btr |= (bt->phase_seg2 - 1) << 8; + + /* Setting Synchronous jump width in BTR Register */ + btr |= (bt->sjw - 1); + + dbtr = (dbt->brp - 1) << 16; + dbtr |= (dbt->prop_seg - 1) << 2; + dbtr |= (dbt->phase_seg1 - 1) << 5; + dbtr |= (dbt->phase_seg2 - 1) << 8; + dbtr |= (dbt->sjw - 1); + + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) { + phytium_can_write(cdev, CAN_ARB_RATE_CTRL, btr); + phytium_can_write(cdev, CAN_DAT_RATE_CTRL, dbtr); + } else { + phytium_can_write(cdev, CAN_ARB_RATE_CTRL, btr); + phytium_can_write(cdev, CAN_DAT_RATE_CTRL, btr); + } + + netdev_dbg(dev, "DAT=0x%08x, ARB=0x%08x\n", + phytium_can_read(cdev, CAN_DAT_RATE_CTRL), + phytium_can_read(cdev, CAN_ARB_RATE_CTRL)); + + return 0; +} + +/** + * phytium_can_start - This the drivers start routine + * @dev: Pointer to net_device structure + * + * This is the drivers start routine. + * Based on the State of the CAN device it puts + * the CAN device into a proper mode. + * + * Return: 0 on success and failure value on error + */ +static void phytium_can_start(struct net_device *dev) +{ + struct phytium_can_dev *cdev = netdev_priv(dev); + u32 ctrl; + + /* Disable transfer */ + ctrl = phytium_can_read(cdev, CAN_CTRL); + ctrl &= ~CTRL_XFER; + phytium_can_write(cdev, CAN_CTRL, ctrl); + + /* XXX: If CANFD, reset the controller */ + phytium_can_write(cdev, CAN_CTRL, (ctrl | CTRL_RST)); + + /* Bittiming setup */ + phytium_can_set_bittiming(dev); + + /* Acceptance identifier mask setup */ + phytium_can_write(cdev, CAN_ACC_ID0_MASK, ACC_IDX_MASK_AID_MASK); + phytium_can_write(cdev, CAN_ACC_ID1_MASK, ACC_IDX_MASK_AID_MASK); + phytium_can_write(cdev, CAN_ACC_ID2_MASK, ACC_IDX_MASK_AID_MASK); + phytium_can_write(cdev, CAN_ACC_ID3_MASK, ACC_IDX_MASK_AID_MASK); + ctrl |= CTRL_AIME; + + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) + ctrl |= CTRL_IOF | CTRL_FDCRC; + + phytium_can_write(cdev, CAN_CTRL, ctrl); + + cdev->can.state = CAN_STATE_ERROR_ACTIVE; + + phytium_can_enable_all_interrupts(cdev); + + if (cdev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + ctrl |= CTRL_XFER; + else + ctrl |= CTRL_XFER | CTRL_TXREQ; + + phytium_can_write(cdev, CAN_CTRL, ctrl); +} + +/** + * phytium_can_stop - Driver stop routine + * @dev: Pointer to net_device structure + * + * This is the drivers stop routine. It will disable the + * interrupts and put the device into configuration mode. + */ +static void phytium_can_stop(struct net_device *dev) +{ + struct phytium_can_dev *cdev = netdev_priv(dev); + u32 ctrl; + + /* Disable all interrupts */ + phytium_can_disable_all_interrupt(cdev); + + /* Disable transfer and switch to receive-only mode */ + ctrl = phytium_can_read(cdev, CAN_CTRL); + ctrl &= ~(CTRL_XFER | CTRL_TXREQ); + phytium_can_write(cdev, CAN_CTRL, ctrl); + + /* Set the state as STOPPED */ + cdev->can.state = CAN_STATE_STOPPED; +} + +static void phytium_can_clean(struct net_device *dev) +{ + struct phytium_can_dev *cdev = netdev_priv(dev); + + if (cdev->tx_skb) { + dev->stats.tx_errors++; + can_free_echo_skb(cdev->net, 0); + cdev->tx_skb = NULL; + } +} + +static int phytium_can_set_mode(struct net_device *dev, enum can_mode mode) +{ + switch (mode) { + case CAN_MODE_START: + phytium_can_clean(dev); + phytium_can_start(dev); + netif_wake_queue(dev); + break; + default: + return -EOPNOTSUPP; + } + return 0; +} + +/** + * phytium_can_open - Driver open routine + * @dev: Pointer to net_device structure + * + * This is the driver open routine. + * Return: 0 on success and failure value on error + */ +static int phytium_can_open(struct net_device *dev) +{ + struct phytium_can_dev *cdev = netdev_priv(dev); + int ret; + + /* Start clock */ + ret = pm_runtime_resume(cdev->dev); + if (ret) + return ret; + + /* Open the CAN device */ + ret = open_candev(dev); + if (ret) { + netdev_err(dev, "failed to open can device\n"); + goto disable_clk; + } + + /* Register interrupt handler */ + ret = request_irq(dev->irq, phytium_can_isr, + IRQF_SHARED, dev->name, dev); + if (ret < 0) { + netdev_err(dev, "failed to request interrupt\n"); + goto fail; + } + + /* Start the controller */ + phytium_can_start(dev); + + can_led_event(dev, CAN_LED_EVENT_OPEN); + napi_enable(&cdev->napi); + netif_start_queue(dev); + + return 0; + +fail: + close_candev(dev); +disable_clk: + pm_runtime_put_sync(cdev->dev); + return ret; +} + +/** + * phytium_can_close - Driver close routine + * @dev: Pointer to net_device structure + * + * Return: 0 always + */ +static int phytium_can_close(struct net_device *dev) +{ + struct phytium_can_dev *cdev = netdev_priv(dev); + + netif_stop_queue(dev); + napi_disable(&cdev->napi); + + phytium_can_stop(dev); + free_irq(dev->irq, dev); + pm_runtime_put_sync(cdev->dev); + + close_candev(dev); + can_led_event(dev, CAN_LED_EVENT_STOP); + + return 0; +} + +/** + * phytium_can_start_xmit - Starts the transmission + * + * Return: 0 on success. + */ +static netdev_tx_t phytium_can_start_xmit(struct + sk_buff *skb, struct net_device *dev) +{ + struct phytium_can_dev *cdev = netdev_priv(dev); + + if (can_dropped_invalid_skb(dev, skb)) + return NETDEV_TX_OK; + + cdev->tx_skb = skb; + + return phytium_can_tx_handler(cdev); +} + +static const struct net_device_ops phytium_can_netdev_ops = { + .ndo_open = phytium_can_open, + .ndo_stop = phytium_can_close, + .ndo_start_xmit = phytium_can_start_xmit, + .ndo_change_mtu = can_change_mtu, +}; + +static int register_phytium_can_dev(struct net_device *dev) +{ + dev->flags |= IFF_ECHO; + dev->netdev_ops = &phytium_can_netdev_ops; + + return register_candev(dev); +} + +static int phytium_can_dev_setup(struct phytium_can_dev *cdev) +{ + struct net_device *dev = cdev->net; + + netif_napi_add(dev, &cdev->napi, phytium_can_poll, 64); + + cdev->can.do_set_mode = phytium_can_set_mode; + cdev->can.do_get_berr_counter = phytium_can_get_berr_counter; + + cdev->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY | + CAN_CTRLMODE_BERR_REPORTING; + cdev->can.bittiming_const = cdev->bit_timing; + + if (cdev->fdmode) { + cdev->can.ctrlmode_supported |= CAN_CTRLMODE_FD; + dev->mtu = CANFD_MTU; + cdev->can.ctrlmode = CAN_CTRLMODE_FD; + cdev->can.data_bittiming_const = cdev->bit_timing; + } + + return 0; +} + +struct phytium_can_dev *phytium_can_allocate_dev(struct device *dev, int + sizeof_priv, int tx_fifo_depth) +{ + struct phytium_can_dev *cdev = NULL; + struct net_device *net_dev; + + /* Allocate the can device struct */ + net_dev = alloc_candev(sizeof_priv, tx_fifo_depth); + if (!net_dev) { + dev_err(dev, "Failed to allocate CAN device.\n"); + goto out; + } + + cdev = netdev_priv(net_dev); + cdev->net = net_dev; + cdev->dev = dev; + SET_NETDEV_DEV(net_dev, dev); + +out: + return cdev; +} +EXPORT_SYMBOL(phytium_can_allocate_dev); + +void phytium_can_free_dev(struct net_device *net) +{ + free_candev(net); +} +EXPORT_SYMBOL(phytium_can_free_dev); + +int phytium_can_register(struct phytium_can_dev *cdev) +{ + int ret; + + ret = pm_runtime_resume(cdev->dev); + if (ret) + return ret; + + ret = phytium_can_dev_setup(cdev); + if (ret) + goto fail; + + ret = register_phytium_can_dev(cdev->net); + if (ret) { + dev_err(cdev->dev, "registering %s failed (err=%d)\n", + cdev->net->name, ret); + goto fail; + } + + devm_can_led_init(cdev->net); + + dev_info(cdev->dev, "%s device registered (irq=%d)\n", + KBUILD_MODNAME, cdev->net->irq); + + /* Probe finished + * Stop clocks. They will be reactivated once the device is opened. + */ + pm_runtime_put_sync(cdev->dev); + + return 0; + +fail: + pm_runtime_put_sync(cdev->dev); + return ret; +} +EXPORT_SYMBOL(phytium_can_register); + +void phytium_can_unregister(struct phytium_can_dev *cdev) +{ + unregister_candev(cdev->net); +} +EXPORT_SYMBOL(phytium_can_unregister); + +int phytium_can_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct phytium_can_dev *cdev = netdev_priv(ndev); + + if (netif_running(ndev)) { + netif_stop_queue(ndev); + netif_device_detach(ndev); + phytium_can_stop(ndev); + pm_runtime_put_sync(cdev->dev); + } + + cdev->can.state = CAN_STATE_SLEEPING; + + return 0; +} +EXPORT_SYMBOL(phytium_can_suspend); + +int phytium_can_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct phytium_can_dev *cdev = netdev_priv(ndev); + int ret; + + cdev->can.state = CAN_STATE_ERROR_ACTIVE; + + if (netif_running(ndev)) { + ret = pm_runtime_resume(cdev->dev); + if (ret) + return ret; + + phytium_can_start(ndev); + netif_device_attach(ndev); + netif_start_queue(ndev); + } + + return 0; +} +EXPORT_SYMBOL(phytium_can_resume); + +MODULE_AUTHOR("Cheng Quan "); +MODULE_AUTHOR("Chen Baozi "); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("CAN bus driver for Phytium CAN controller"); diff --git a/drivers/net/can/phytium/phytium_can.h b/drivers/net/can/phytium/phytium_can.h new file mode 100644 index 0000000000000000000000000000000000000000..ba3260a577cc3430da20b77f0a9ef51cc8705377 --- /dev/null +++ b/drivers/net/can/phytium/phytium_can.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium CAN controller driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef _PHYTIUM_CAN_H_ +#define _PHYTIUM_CAN_H_ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum phytium_can_ip_type { + PHYTIUM_CAN = 0, + PHYTIUM_CANFD, +}; + +struct phytium_can_devtype { + enum phytium_can_ip_type cantype; + const struct can_bittiming_const *bittiming_const; +}; + +struct phytium_can_dev { + struct can_priv can; + unsigned int tx_head; + unsigned int tx_tail; + unsigned int tx_max; + struct napi_struct napi; + struct net_device *net; + struct device *dev; + struct clk *clk; + + struct sk_buff *tx_skb; + + const struct can_bittiming_const *bit_timing; + + int fdmode; + u32 isr; + u32 tx_fifo_depth; + + void __iomem *base; +}; + +struct phytium_can_dev *phytium_can_allocate_dev(struct device *dev, int + sizeof_priv, int tx_fifo_depth); +void phytium_can_free_dev(struct net_device *net); + +int phytium_can_register(struct phytium_can_dev *cdev); +void phytium_can_unregister(struct phytium_can_dev *cdev); + +int phytium_can_suspend(struct device *dev); +int phytium_can_resume(struct device *dev); +#endif /* _PHYTIUM_CAN_H_ */ diff --git a/drivers/net/can/phytium/phytium_can_pci.c b/drivers/net/can/phytium/phytium_can_pci.c new file mode 100644 index 0000000000000000000000000000000000000000..63a99b660f6165f09752aead1ad48b0df10c2eaa --- /dev/null +++ b/drivers/net/can/phytium/phytium_can_pci.c @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Platform CAN bus driver for Phytium CAN controller + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include + +#include "phytium_can.h" + +struct phytium_can_pci_config { + const struct phytium_can_devtype *devtype; + unsigned int clock_freq; + unsigned int tx_fifo_depth; +}; + +#define cdev2priv(dev) container_of(dev, struct phytium_can_pci, cdev) + +struct phytium_can_pci { + struct phytium_can_dev cdev; + + void __iomem *base; +}; + +static const struct can_bittiming_const phytium_bittiming_const_8192 = { + .name = "phytium_can", + .tseg1_min = 1, /* Time segment 1 = prop_seg + phase_seg1 */ + .tseg1_max = 16, + .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ + .tseg2_max = 8, + .sjw_max = 4, /* Synchronisation jump width */ + .brp_min = 1, /* Bit-rate prescaler */ + .brp_max = 8192, + .brp_inc = 2, +}; + +static const struct phytium_can_devtype phytium_can_pci = { + .cantype = PHYTIUM_CAN, + .bittiming_const = &phytium_bittiming_const_8192, +}; + +static const struct phytium_can_pci_config phytium_can_pci_data = { + .devtype = &phytium_can_pci, + .clock_freq = 480000000, + .tx_fifo_depth = 64, +}; + +static int phytium_can_pci_probe(struct pci_dev *pdev, const + struct pci_device_id *id) +{ + const struct phytium_can_pci_config *cfg; + struct phytium_can_dev *cdev; + struct phytium_can_pci *priv; + int ret; + + cfg = (const struct phytium_can_pci_config *)id->driver_data; + + ret = pcim_enable_device(pdev); + if (ret) + goto err; + + ret = pcim_iomap_regions(pdev, 0x1, pci_name(pdev)); + if (ret) + goto err; + + cdev = phytium_can_allocate_dev(&pdev->dev, sizeof(struct + phytium_can_pci), cfg->tx_fifo_depth); + if (!cdev) + return -ENOMEM; + + priv = cdev2priv(cdev); + priv->base = pcim_iomap_table(pdev)[0]; + + cdev->dev = &pdev->dev; + cdev->fdmode = cfg->devtype->cantype; + cdev->bit_timing = cfg->devtype->bittiming_const; + cdev->can.clock.freq = cfg->clock_freq; + cdev->tx_fifo_depth = cfg->tx_fifo_depth; + + cdev->base = priv->base; + cdev->net->irq = pdev->irq; + + pci_set_drvdata(pdev, cdev->net); + + pm_runtime_enable(cdev->dev); + ret = phytium_can_register(cdev); + if (ret) + goto err; + + return 0; +err: + return ret; +} + +static void phytium_can_pci_remove(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct phytium_can_dev *cdev = netdev_priv(dev); + + phytium_can_unregister(cdev); + phytium_can_free_dev(cdev->net); +} + +static __maybe_unused int phytium_can_pci_suspend(struct device *dev) +{ + return phytium_can_suspend(dev); +} + +static __maybe_unused int phytium_can_pci_resume(struct device *dev) +{ + return phytium_can_resume(dev); +} + +static SIMPLE_DEV_PM_OPS(phytium_can_pci_pm_ops, + phytium_can_pci_suspend, phytium_can_pci_resume); + +static const struct pci_device_id phytium_can_pci_id_table[] = { + { PCI_VDEVICE(PHYTIUM, 0xdc2d), + (kernel_ulong_t)&phytium_can_pci_data, }, + { /* sentinel */ }, +}; + +static struct pci_driver phytium_can_pci_driver = { + .name = KBUILD_MODNAME, + .probe = phytium_can_pci_probe, + .remove = phytium_can_pci_remove, + .id_table = phytium_can_pci_id_table, + .driver = { + .pm = &phytium_can_pci_pm_ops, + }, +}; + +module_pci_driver(phytium_can_pci_driver); + +MODULE_AUTHOR("Cheng Quan +#include +#include + +#include "phytium_can.h" + +#define cdev2priv(dev) container_of(dev, struct phytium_can_plat, cdev) + +struct phytium_can_plat { + struct phytium_can_dev cdev; + struct phytium_can_devtype *devtype; + + int irq; + void __iomem *reg_base; +}; + +static const struct can_bittiming_const phytium_bittiming_const_512 = { + .name = "phytium_can", + .tseg1_min = 1, /* Time segment 1 = prop_seg + phase_seg1 */ + .tseg1_max = 16, + .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ + .tseg2_max = 8, + .sjw_max = 4, /* Synchronisation jump width */ + .brp_min = 1, /* Bit-rate prescaler */ + .brp_max = 512, + .brp_inc = 2, +}; + +static const struct can_bittiming_const phytium_bittiming_const_8192 = { + .name = "phytium_can", + .tseg1_min = 1, /* Time segment 1 = prop_seg + phase_seg1 */ + .tseg1_max = 16, + .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ + .tseg2_max = 8, + .sjw_max = 4, /* Synchronisation jump width */ + .brp_min = 1, /* Bit-rate prescaler */ + .brp_max = 8192, + .brp_inc = 2, +}; + +static const struct phytium_can_devtype phytium_can_data = { + .cantype = PHYTIUM_CAN, + .bittiming_const = &phytium_bittiming_const_512, +}; + +static const struct phytium_can_devtype phytium_canfd_data = { + .cantype = PHYTIUM_CANFD, + .bittiming_const = &phytium_bittiming_const_8192, +}; + +#ifdef CONFIG_ACPI +static const struct acpi_device_id phytium_can_acpi_ids[] = { + { "PHYT000A", 0 }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(acpi, phytium_can_acpi_ids); +#endif + +#ifdef CONFIG_OF +static const struct of_device_id phytium_can_of_ids[] = { + { .compatible = "phytium,can", .data = &phytium_can_data }, + { .compatible = "phytium,canfd", .data = &phytium_canfd_data }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, phytium_can_of_ids); +#endif + +static int phytium_can_plat_probe(struct platform_device *pdev) +{ + struct phytium_can_dev *cdev; + struct phytium_can_plat *priv; + struct resource *res; + const struct of_device_id *of_id; + const struct phytium_can_devtype *devtype = &phytium_can_data; + u32 tx_fifo_depth; + int ret; + const char *str = "can"; + + ret = fwnode_property_read_u32(dev_fwnode(&pdev->dev), "tx-fifo-depth", + &tx_fifo_depth); + if (ret) + tx_fifo_depth = 64; + + cdev = phytium_can_allocate_dev(&pdev->dev, sizeof(struct + phytium_can_plat), tx_fifo_depth); + if (!cdev) + return -ENOMEM; + + priv = cdev2priv(cdev); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->reg_base = devm_ioremap_resource(&pdev->dev, res); + priv->irq = platform_get_irq(pdev, 0); + if (IS_ERR(priv->reg_base) || cdev->net->irq < 0) { + ret = -EINVAL; + goto fail; + } + + if (pdev->dev.of_node) { + cdev->clk = devm_clk_get(&pdev->dev, "can_clk"); + if (IS_ERR(cdev->clk)) { + dev_err(&pdev->dev, "no clock found\n"); + ret = -ENODEV; + goto fail; + } + cdev->can.clock.freq = clk_get_rate(cdev->clk); + + of_id = of_match_device(phytium_can_of_ids, &pdev->dev); + if (of_id && of_id->data) + devtype = of_id->data; + } else if (has_acpi_companion(&pdev->dev)) { + ret = fwnode_property_read_u32(dev_fwnode(&pdev->dev), + "clock-frequency", + &cdev->can.clock.freq); + if (ret < 0) { + dev_err(&pdev->dev, "failed to get clock frequency.\n"); + goto fail; + } + + fwnode_property_read_string(dev_fwnode(&pdev->dev), + "mode-select", &str); + if (!(strcmp(str, "canfd"))) + devtype = &phytium_canfd_data; + } + + cdev->tx_fifo_depth = tx_fifo_depth; + cdev->tx_head = 0; + cdev->tx_tail = 0; + cdev->tx_max = tx_fifo_depth; + + if (devtype->cantype == PHYTIUM_CANFD) + cdev->fdmode = 1; + else + cdev->fdmode = 0; + + if (fwnode_property_present(dev_fwnode(&pdev->dev), "extend_brp")) + cdev->bit_timing = &phytium_bittiming_const_8192; + else + cdev->bit_timing = devtype->bittiming_const; + cdev->can.bittiming_const = devtype->bittiming_const; + cdev->base = priv->reg_base; + cdev->net->irq = priv->irq; + + platform_set_drvdata(pdev, cdev->net); + + pm_runtime_enable(cdev->dev); + ret = phytium_can_register(cdev); + if (ret) + goto out_runtime_disable; + + return ret; + +out_runtime_disable: + pm_runtime_disable(cdev->dev); +fail: + phytium_can_free_dev(cdev->net); + return ret; +} + +static __maybe_unused int phytium_can_plat_suspend(struct device *dev) +{ + return phytium_can_suspend(dev); +} + +static __maybe_unused int phytium_can_plat_resume(struct device *dev) +{ + return phytium_can_resume(dev); +} + +static int phytium_can_plat_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct phytium_can_dev *cdev = netdev_priv(dev); + + phytium_can_unregister(cdev); + + phytium_can_free_dev(cdev->net); + + return 0; +} + +static int __maybe_unused phytium_can_runtime_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct phytium_can_dev *cdev = netdev_priv(ndev); + + clk_disable_unprepare(cdev->clk); + + return 0; +} + +static int __maybe_unused phytium_can_runtime_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct phytium_can_dev *cdev = netdev_priv(ndev); + + return clk_prepare_enable(cdev->clk); +} + +static const struct dev_pm_ops phytium_can_plat_pm_ops = { + SET_RUNTIME_PM_OPS(phytium_can_runtime_suspend, + phytium_can_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(phytium_can_suspend, phytium_can_resume) +}; + +static struct platform_driver phytium_can_plat_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = of_match_ptr(phytium_can_of_ids), + .acpi_match_table = ACPI_PTR(phytium_can_acpi_ids), + .pm = &phytium_can_plat_pm_ops, + }, + .probe = phytium_can_plat_probe, + .remove = phytium_can_plat_remove, +}; + +module_platform_driver(phytium_can_plat_driver); + +MODULE_AUTHOR("Cheng Quan "); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Phytium CAN driver for IO Mapped controllers"); diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c index 11662f479e760ba77f613c90bfc8026b005da3ea..771a4608373978c31b7011a45cf5659f543820c1 100644 --- a/drivers/net/can/rcar/rcar_can.c +++ b/drivers/net/can/rcar/rcar_can.c @@ -24,6 +24,9 @@ #define RCAR_CAN_DRV_NAME "rcar_can" +#define RCAR_SUPPORTED_CLOCKS (BIT(CLKR_CLKP1) | BIT(CLKR_CLKP2) | \ + BIT(CLKR_CLKEXT)) + /* Mailbox configuration: * mailbox 60 - 63 - Rx FIFO mailboxes * mailbox 56 - 59 - Tx FIFO mailboxes @@ -789,7 +792,7 @@ static int rcar_can_probe(struct platform_device *pdev) goto fail_clk; } - if (clock_select >= ARRAY_SIZE(clock_names)) { + if (!(BIT(clock_select) & RCAR_SUPPORTED_CLOCKS)) { err = -EINVAL; dev_err(&pdev->dev, "invalid CAN clock selected\n"); goto fail_clk; diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index 602c19e23f052ed50bd691408bc9644bf5049cb5..786d852a70d5844f4531637e07a9fb2e040b90cd 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -1512,10 +1512,11 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota) /* All packets processed */ if (num_pkts < quota) { - napi_complete_done(napi, num_pkts); - /* Enable Rx FIFO interrupts */ - rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx), - RCANFD_RFCC_RFIE); + if (napi_complete_done(napi, num_pkts)) { + /* Enable Rx FIFO interrupts */ + rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx), + RCANFD_RFCC_RFIE); + } } return num_pkts; } diff --git a/drivers/net/can/sja1000/ems_pcmcia.c b/drivers/net/can/sja1000/ems_pcmcia.c index 381de998d2f1655b955bc701add77a30fe44ac21..fef5c59c0f4ca8a95e499413a12d2481bedeb93f 100644 --- a/drivers/net/can/sja1000/ems_pcmcia.c +++ b/drivers/net/can/sja1000/ems_pcmcia.c @@ -243,7 +243,12 @@ static int ems_pcmcia_add_card(struct pcmcia_device *pdev, unsigned long base) free_sja1000dev(dev); } - err = request_irq(dev->irq, &ems_pcmcia_interrupt, IRQF_SHARED, + if (!card->channels) { + err = -ENODEV; + goto failure_cleanup; + } + + err = request_irq(pdev->irq, &ems_pcmcia_interrupt, IRQF_SHARED, DRV_NAME, card); if (!err) return 0; diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c index a97b81d1d0da9b0b29fc622b34b6b54a06872ae8..e989841b411fa1c4ffe8be3be963a80acced4f6f 100644 --- a/drivers/net/can/sja1000/peak_pci.c +++ b/drivers/net/can/sja1000/peak_pci.c @@ -739,16 +739,15 @@ static void peak_pci_remove(struct pci_dev *pdev) struct net_device *prev_dev = chan->prev_dev; dev_info(&pdev->dev, "removing device %s\n", dev->name); + /* do that only for first channel */ + if (!prev_dev && chan->pciec_card) + peak_pciec_remove(chan->pciec_card); unregister_sja1000dev(dev); free_sja1000dev(dev); dev = prev_dev; - if (!dev) { - /* do that only for first channel */ - if (chan->pciec_card) - peak_pciec_remove(chan->pciec_card); + if (!dev) break; - } priv = netdev_priv(dev); chan = priv->priv; } diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c index b8c39ede7cd51445b6ed653585811066ab75d7d4..179bfcd541f2f552253848d20342ab7c23409f58 100644 --- a/drivers/net/can/sja1000/peak_pcmcia.c +++ b/drivers/net/can/sja1000/peak_pcmcia.c @@ -487,7 +487,7 @@ static void pcan_free_channels(struct pcan_pccard *card) if (!netdev) continue; - strncpy(name, netdev->name, IFNAMSIZ); + strlcpy(name, netdev->name, IFNAMSIZ); unregister_sja1000dev(netdev); diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index aa97dbc797b6be339e911a9c34d55778040fdad4..c283770e3120e89a1bddbc2760d80ced645c4101 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -55,6 +55,7 @@ #include #include #include +#include MODULE_ALIAS_LDISC(N_SLCAN); MODULE_DESCRIPTION("serial line CAN interface"); @@ -147,7 +148,7 @@ static void slc_bump(struct slcan *sl) u32 tmpid; char *cmd = sl->rbuff; - cf.can_id = 0; + memset(&cf, 0, sizeof(cf)); switch (*cmd) { case 'r': @@ -186,8 +187,6 @@ static void slc_bump(struct slcan *sl) else return; - *(u64 *) (&cf.data) = 0; /* clear payload */ - /* RTR frames may have a dlc > 0 but they never have any data bytes */ if (!(cf.can_id & CAN_RTR_FLAG)) { for (i = 0; i < cf.can_dlc; i++) { @@ -343,9 +342,16 @@ static void slcan_transmit(struct work_struct *work) */ static void slcan_write_wakeup(struct tty_struct *tty) { - struct slcan *sl = tty->disc_data; + struct slcan *sl; + + rcu_read_lock(); + sl = rcu_dereference(tty->disc_data); + if (!sl) + goto out; schedule_work(&sl->tx_work); +out: + rcu_read_unlock(); } /* Send a can_frame to a TTY queue. */ @@ -514,6 +520,7 @@ static struct slcan *slc_alloc(void) char name[IFNAMSIZ]; struct net_device *dev = NULL; struct slcan *sl; + int size; for (i = 0; i < maxdev; i++) { dev = slcan_devs[i]; @@ -527,12 +534,14 @@ static struct slcan *slc_alloc(void) return NULL; sprintf(name, "slcan%d", i); - dev = alloc_netdev(sizeof(*sl), name, NET_NAME_UNKNOWN, slc_setup); + size = ALIGN(sizeof(*sl), NETDEV_ALIGN) + sizeof(struct can_ml_priv); + dev = alloc_netdev(size, name, NET_NAME_UNKNOWN, slc_setup); if (!dev) return NULL; dev->base_addr = i; sl = netdev_priv(dev); + dev->ml_priv = (void *)sl + ALIGN(sizeof(*sl), NETDEV_ALIGN); /* Initialize channel control data */ sl->magic = SLCAN_MAGIC; @@ -613,6 +622,8 @@ static int slcan_open(struct tty_struct *tty) sl->tty = NULL; tty->disc_data = NULL; clear_bit(SLF_INUSE, &sl->flags); + slc_free_netdev(sl->dev); + free_netdev(sl->dev); err_exit: rtnl_unlock(); @@ -638,10 +649,11 @@ static void slcan_close(struct tty_struct *tty) return; spin_lock_bh(&sl->lock); - tty->disc_data = NULL; + rcu_assign_pointer(tty->disc_data, NULL); sl->tty = NULL; spin_unlock_bh(&sl->lock); + synchronize_rcu(); flush_work(&sl->tx_work); /* Flush network side */ diff --git a/drivers/net/can/spi/Kconfig b/drivers/net/can/spi/Kconfig index 8f2e0dd7b7565769c768543e1097caff53d555ff..792e9c6c4a2f777ffe292cc703784b458493baa6 100644 --- a/drivers/net/can/spi/Kconfig +++ b/drivers/net/can/spi/Kconfig @@ -8,9 +8,10 @@ config CAN_HI311X Driver for the Holt HI311x SPI CAN controllers. config CAN_MCP251X - tristate "Microchip MCP251x SPI CAN controllers" + tristate "Microchip MCP251x and MCP25625 SPI CAN controllers" depends on HAS_DMA ---help--- - Driver for the Microchip MCP251x SPI CAN controllers. + Driver for the Microchip MCP251x and MCP25625 SPI CAN + controllers. endmenu diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c index 53e320c92a8be21e286ab4f7ada738fd223a08fa..ddaf46239e39e92337a4ed54ecb3feb1ab94cc59 100644 --- a/drivers/net/can/spi/hi311x.c +++ b/drivers/net/can/spi/hi311x.c @@ -760,7 +760,7 @@ static int hi3110_open(struct net_device *net) { struct hi3110_priv *priv = netdev_priv(net); struct spi_device *spi = priv->spi; - unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_RISING; + unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_HIGH; int ret; ret = open_candev(net); diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index e908176086450be172b5de5004033f817f415cfc..0b0dd3f096dc6ad8807809d1b89ca626544e0170 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -1,5 +1,5 @@ /* - * CAN bus driver for Microchip 251x CAN Controller with SPI Interface + * CAN bus driver for Microchip 251x/25625 CAN Controller with SPI Interface * * MCP2510 support and bug fixes by Christian Pellegrin * @@ -41,7 +41,7 @@ * static struct spi_board_info spi_board_info[] = { * { * .modalias = "mcp2510", - * // or "mcp2515" depending on your controller + * // "mcp2515" or "mcp25625" depending on your controller * .platform_data = &mcp251x_info, * .irq = IRQ_EINT13, * .max_speed_hz = 2*1000*1000, @@ -238,6 +238,7 @@ static const struct can_bittiming_const mcp251x_bittiming_const = { enum mcp251x_model { CAN_MCP251X_MCP2510 = 0x2510, CAN_MCP251X_MCP2515 = 0x2515, + CAN_MCP251X_MCP25625 = 0x25625, }; struct mcp251x_priv { @@ -280,7 +281,6 @@ static inline int mcp251x_is_##_model(struct spi_device *spi) \ } MCP251X_IS(2510); -MCP251X_IS(2515); static void mcp251x_clean(struct net_device *net) { @@ -626,7 +626,7 @@ static int mcp251x_setup(struct net_device *net, struct spi_device *spi) static int mcp251x_hw_reset(struct spi_device *spi) { struct mcp251x_priv *priv = spi_get_drvdata(spi); - u8 reg; + unsigned long timeout; int ret; /* Wait for oscillator startup timer after power up */ @@ -639,11 +639,20 @@ static int mcp251x_hw_reset(struct spi_device *spi) /* Wait for oscillator startup timer after reset */ mdelay(MCP251X_OST_DELAY_MS); - - reg = mcp251x_read_reg(spi, CANSTAT); - if ((reg & CANCTRL_REQOP_MASK) != CANCTRL_REQOP_CONF) - return -ENODEV; + /* Wait for reset to finish */ + timeout = jiffies + HZ; + while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) != + CANCTRL_REQOP_CONF) { + usleep_range(MCP251X_OST_DELAY_MS * 1000, + MCP251X_OST_DELAY_MS * 1000 * 2); + + if (time_after(jiffies, timeout)) { + dev_err(&spi->dev, + "MCP251x didn't enter in conf mode after reset\n"); + return -EBUSY; + } + } return 0; } @@ -678,17 +687,6 @@ static int mcp251x_power_enable(struct regulator *reg, int enable) return regulator_disable(reg); } -static void mcp251x_open_clean(struct net_device *net) -{ - struct mcp251x_priv *priv = netdev_priv(net); - struct spi_device *spi = priv->spi; - - free_irq(spi->irq, priv); - mcp251x_hw_sleep(spi); - mcp251x_power_enable(priv->transceiver, 0); - close_candev(net); -} - static int mcp251x_stop(struct net_device *net) { struct mcp251x_priv *priv = netdev_priv(net); @@ -775,6 +773,7 @@ static void mcp251x_restart_work_handler(struct work_struct *ws) if (priv->after_suspend) { mcp251x_hw_reset(spi); mcp251x_setup(net, spi); + priv->force_quit = 0; if (priv->after_suspend & AFTER_SUSPEND_RESTART) { mcp251x_set_normal_mode(spi); } else if (priv->after_suspend & AFTER_SUSPEND_UP) { @@ -786,7 +785,6 @@ static void mcp251x_restart_work_handler(struct work_struct *ws) mcp251x_hw_sleep(spi); } priv->after_suspend = 0; - priv->force_quit = 0; } if (priv->restart_tx) { @@ -820,9 +818,8 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id) /* receive buffer 0 */ if (intf & CANINTF_RX0IF) { mcp251x_hw_rx(spi, 0); - /* - * Free one buffer ASAP - * (The MCP2515 does this automatically.) + /* Free one buffer ASAP + * (The MCP2515/25625 does this automatically.) */ if (mcp251x_is_2510(spi)) mcp251x_write_bits(spi, CANINTF, CANINTF_RX0IF, 0x00); @@ -831,7 +828,7 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id) /* receive buffer 1 */ if (intf & CANINTF_RX1IF) { mcp251x_hw_rx(spi, 1); - /* the MCP2515 does this automatically */ + /* The MCP2515/25625 does this automatically. */ if (mcp251x_is_2510(spi)) clear_intf |= CANINTF_RX1IF; } @@ -955,37 +952,43 @@ static int mcp251x_open(struct net_device *net) flags | IRQF_ONESHOT, DEVICE_NAME, priv); if (ret) { dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq); - mcp251x_power_enable(priv->transceiver, 0); - close_candev(net); - goto open_unlock; + goto out_close; } priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM, 0); + if (!priv->wq) { + ret = -ENOMEM; + goto out_clean; + } INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler); INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler); ret = mcp251x_hw_reset(spi); - if (ret) { - mcp251x_open_clean(net); - goto open_unlock; - } + if (ret) + goto out_free_wq; ret = mcp251x_setup(net, spi); - if (ret) { - mcp251x_open_clean(net); - goto open_unlock; - } + if (ret) + goto out_free_wq; ret = mcp251x_set_normal_mode(spi); - if (ret) { - mcp251x_open_clean(net); - goto open_unlock; - } + if (ret) + goto out_free_wq; can_led_event(net, CAN_LED_EVENT_OPEN); netif_wake_queue(net); + mutex_unlock(&priv->mcp_lock); + + return 0; -open_unlock: +out_free_wq: + destroy_workqueue(priv->wq); +out_clean: + free_irq(spi->irq, priv); + mcp251x_hw_sleep(spi); +out_close: + mcp251x_power_enable(priv->transceiver, 0); + close_candev(net); mutex_unlock(&priv->mcp_lock); return ret; } @@ -1006,6 +1009,10 @@ static const struct of_device_id mcp251x_of_match[] = { .compatible = "microchip,mcp2515", .data = (void *)CAN_MCP251X_MCP2515, }, + { + .compatible = "microchip,mcp25625", + .data = (void *)CAN_MCP251X_MCP25625, + }, { } }; MODULE_DEVICE_TABLE(of, mcp251x_of_match); @@ -1019,6 +1026,10 @@ static const struct spi_device_id mcp251x_id_table[] = { .name = "mcp2515", .driver_data = (kernel_ulong_t)CAN_MCP251X_MCP2515, }, + { + .name = "mcp25625", + .driver_data = (kernel_ulong_t)CAN_MCP251X_MCP25625, + }, { } }; MODULE_DEVICE_TABLE(spi, mcp251x_id_table); @@ -1259,5 +1270,5 @@ module_spi_driver(mcp251x_can_driver); MODULE_AUTHOR("Chris Elston , " "Christian Pellegrin "); -MODULE_DESCRIPTION("Microchip 251x CAN driver"); +MODULE_DESCRIPTION("Microchip 251x/25625 CAN driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index b7dfd4109d24ef3db5c03d78fb078f7f7cf6e3ab..7a92f640c3796b4852945b54aff2aa98a269b228 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -823,7 +823,6 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne usb_unanchor_urb(urb); usb_free_coherent(dev->udev, size, buf, urb->transfer_dma); - dev_kfree_skb(skb); atomic_dec(&dev->active_tx_urbs); diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 17c21ad3b95eed15fb72342ff7d3ccedb7a2096f..3a39f51a9e2448790a6840b0dda76c339c98e85c 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -631,6 +631,7 @@ static int gs_can_open(struct net_device *netdev) rc); usb_unanchor_urb(urb); + usb_free_urb(urb); break; } diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c index b939a4c10b8409f5fe58e700204fe4e5183b23c0..c89c7d4900d75068badc7a7234c36b5b7345f675 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c @@ -528,7 +528,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, context = &priv->tx_contexts[i]; context->echo_index = i; - can_put_echo_skb(skb, netdev, context->echo_index); ++priv->active_tx_contexts; if (priv->active_tx_contexts >= (int)dev->max_tx_urbs) netif_stop_queue(netdev); @@ -553,7 +552,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, dev_kfree_skb(skb); spin_lock_irqsave(&priv->tx_contexts_lock, flags); - can_free_echo_skb(netdev, context->echo_index); context->echo_index = dev->max_tx_urbs; --priv->active_tx_contexts; netif_wake_queue(netdev); @@ -564,6 +562,8 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, context->priv = priv; + can_put_echo_skb(skb, netdev, context->echo_index); + usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, dev->bulk_out->bEndpointAddress), diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c index c084bae5ec0a4d936f6a7f121272d972903a8023..5fc0be564274375f3d5c579521a2d3b89ecd4a88 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c @@ -1019,6 +1019,11 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv, new_state : CAN_STATE_ERROR_ACTIVE; can_change_state(netdev, cf, tx_state, rx_state); + + if (priv->can.restart_ms && + old_state >= CAN_STATE_BUS_OFF && + new_state < CAN_STATE_BUS_OFF) + cf->can_id |= CAN_ERR_RESTARTED; } if (new_state == CAN_STATE_BUS_OFF) { @@ -1028,11 +1033,6 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv, can_bus_off(netdev); } - - if (priv->can.restart_ms && - old_state >= CAN_STATE_BUS_OFF && - new_state < CAN_STATE_BUS_OFF) - cf->can_id |= CAN_ERR_RESTARTED; } if (!skb) { diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c index 07d2f3aa2c026c99e0358266ff781e8d893c75a5..ae4c37e1bb753ecd8e2cc01320c9f7797f7868c9 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c @@ -608,7 +608,7 @@ static int kvaser_usb_leaf_simple_cmd_async(struct kvaser_usb_net_priv *priv, struct kvaser_cmd *cmd; int err; - cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC); + cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); if (!cmd) return -ENOMEM; @@ -1140,7 +1140,7 @@ static int kvaser_usb_leaf_set_opt_mode(const struct kvaser_usb_net_priv *priv) struct kvaser_cmd *cmd; int rc; - cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; @@ -1206,7 +1206,7 @@ static int kvaser_usb_leaf_flush_queue(struct kvaser_usb_net_priv *priv) struct kvaser_cmd *cmd; int rc; - cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c index 8d8c2086424d09b2c93a377be756dead26dc7979..5c0b6613c28eaa9f6bf4a7ec4afe7588e25cb6ba 100644 --- a/drivers/net/can/usb/mcba_usb.c +++ b/drivers/net/can/usb/mcba_usb.c @@ -93,6 +93,8 @@ struct mcba_priv { bool can_ka_first_pass; bool can_speed_check; atomic_t free_ctx_cnt; + void *rxbuf[MCBA_MAX_RX_URBS]; + dma_addr_t rxbuf_dma[MCBA_MAX_RX_URBS]; }; /* CAN frame */ @@ -377,7 +379,6 @@ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb, xmit_failed: can_free_echo_skb(priv->netdev, ctx->ndx); mcba_usb_free_ctx(ctx); - dev_kfree_skb(skb); stats->tx_dropped++; return NETDEV_TX_OK; @@ -644,6 +645,7 @@ static int mcba_usb_start(struct mcba_priv *priv) for (i = 0; i < MCBA_MAX_RX_URBS; i++) { struct urb *urb = NULL; u8 *buf; + dma_addr_t buf_dma; /* create a URB, and a buffer for it */ urb = usb_alloc_urb(0, GFP_KERNEL); @@ -653,7 +655,7 @@ static int mcba_usb_start(struct mcba_priv *priv) } buf = usb_alloc_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE, - GFP_KERNEL, &urb->transfer_dma); + GFP_KERNEL, &buf_dma); if (!buf) { netdev_err(netdev, "No memory left for USB buffer\n"); usb_free_urb(urb); @@ -661,6 +663,8 @@ static int mcba_usb_start(struct mcba_priv *priv) break; } + urb->transfer_dma = buf_dma; + usb_fill_bulk_urb(urb, priv->udev, usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_IN), buf, MCBA_USB_RX_BUFF_SIZE, @@ -672,11 +676,14 @@ static int mcba_usb_start(struct mcba_priv *priv) if (err) { usb_unanchor_urb(urb); usb_free_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE, - buf, urb->transfer_dma); + buf, buf_dma); usb_free_urb(urb); break; } + priv->rxbuf[i] = buf; + priv->rxbuf_dma[i] = buf_dma; + /* Drop reference, USB core will take care of freeing it */ usb_free_urb(urb); } @@ -719,7 +726,14 @@ static int mcba_usb_open(struct net_device *netdev) static void mcba_urb_unlink(struct mcba_priv *priv) { + int i; + usb_kill_anchored_urbs(&priv->rx_submitted); + + for (i = 0; i < MCBA_MAX_RX_URBS; ++i) + usb_free_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE, + priv->rxbuf[i], priv->rxbuf_dma[i]); + usb_kill_anchored_urbs(&priv->tx_submitted); } @@ -887,9 +901,8 @@ static void mcba_usb_disconnect(struct usb_interface *intf) netdev_info(priv->netdev, "device disconnected\n"); unregister_candev(priv->netdev); - free_candev(priv->netdev); - mcba_urb_unlink(priv); + free_candev(priv->netdev); } static struct usb_driver mcba_usb_driver = { diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index 13238a72a33862a7c2acfc966c03f93d206d82be..215cd74800df4b6e625cd41ed3627bb178f46267 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c @@ -108,7 +108,7 @@ struct pcan_usb_msg_context { u8 *end; u8 rec_cnt; u8 rec_idx; - u8 rec_data_idx; + u8 rec_ts_idx; struct net_device *netdev; struct pcan_usb *pdev; }; @@ -444,8 +444,8 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n, } if ((n & PCAN_USB_ERROR_BUS_LIGHT) == 0) { /* no error (back to active state) */ - mc->pdev->dev.can.state = CAN_STATE_ERROR_ACTIVE; - return 0; + new_state = CAN_STATE_ERROR_ACTIVE; + break; } break; @@ -468,9 +468,9 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n, } if ((n & PCAN_USB_ERROR_BUS_HEAVY) == 0) { - /* no error (back to active state) */ - mc->pdev->dev.can.state = CAN_STATE_ERROR_ACTIVE; - return 0; + /* no error (back to warning state) */ + new_state = CAN_STATE_ERROR_WARNING; + break; } break; @@ -509,6 +509,11 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n, mc->pdev->dev.can.can_stats.error_warning++; break; + case CAN_STATE_ERROR_ACTIVE: + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = CAN_ERR_CRTL_ACTIVE; + break; + default: /* CAN_STATE_MAX (trick to handle other errors) */ cf->can_id |= CAN_ERR_CRTL; @@ -555,10 +560,15 @@ static int pcan_usb_decode_status(struct pcan_usb_msg_context *mc, mc->ptr += PCAN_USB_CMD_ARGS; if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) { - int err = pcan_usb_decode_ts(mc, !mc->rec_idx); + int err = pcan_usb_decode_ts(mc, !mc->rec_ts_idx); if (err) return err; + + /* Next packet in the buffer will have a timestamp on a single + * byte + */ + mc->rec_ts_idx++; } switch (f) { @@ -640,10 +650,13 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) cf->can_dlc = get_can_dlc(rec_len); - /* first data packet timestamp is a word */ - if (pcan_usb_decode_ts(mc, !mc->rec_data_idx)) + /* Only first packet timestamp is a word */ + if (pcan_usb_decode_ts(mc, !mc->rec_ts_idx)) goto decode_failed; + /* Next packet in the buffer will have a timestamp on a single byte */ + mc->rec_ts_idx++; + /* read data */ memset(cf->data, 0x0, sizeof(cf->data)); if (status_len & PCAN_USB_STATUSLEN_RTR) { @@ -696,7 +709,6 @@ static int pcan_usb_decode_msg(struct peak_usb_device *dev, u8 *ibuf, u32 lbuf) /* handle normal can frames here */ } else { err = pcan_usb_decode_data(&mc, sl); - mc.rec_data_idx++; } } diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index 611f9d31be5d0370612fe8d4f9771b88dd9f3d37..afc8d978124ef6fed024c3a7f5e31db1b989810a 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -576,16 +576,16 @@ static int peak_usb_ndo_stop(struct net_device *netdev) dev->state &= ~PCAN_USB_STATE_STARTED; netif_stop_queue(netdev); + close_candev(netdev); + + dev->can.state = CAN_STATE_STOPPED; + /* unlink all pending urbs and free used memory */ peak_usb_unlink_all_urbs(dev); if (dev->adapter->dev_stop) dev->adapter->dev_stop(dev); - close_candev(netdev); - - dev->can.state = CAN_STATE_STOPPED; - /* can set bus off now */ if (dev->adapter->dev_set_bus) { int err = dev->adapter->dev_set_bus(dev, 0); @@ -758,7 +758,7 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter, dev = netdev_priv(netdev); /* allocate a buffer large enough to send commands */ - dev->cmd_buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL); + dev->cmd_buf = kzalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL); if (!dev->cmd_buf) { err = -ENOMEM; goto lbl_free_candev; @@ -863,7 +863,7 @@ static void peak_usb_disconnect(struct usb_interface *intf) dev_prev_siblings = dev->prev_siblings; dev->state &= ~PCAN_USB_STATE_CONNECTED; - strncpy(name, netdev->name, IFNAMSIZ); + strlcpy(name, netdev->name, IFNAMSIZ); unregister_netdev(netdev); diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c index dd161c5eea8ec7e01aaa7d4c759fe212e5d9b79d..41988358f63c86cdc307fe541e510ef5bcfd1b1a 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c @@ -849,7 +849,7 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev) goto err_out; /* allocate command buffer once for all for the interface */ - pdev->cmd_buffer_addr = kmalloc(PCAN_UFD_CMD_BUFFER_SIZE, + pdev->cmd_buffer_addr = kzalloc(PCAN_UFD_CMD_BUFFER_SIZE, GFP_KERNEL); if (!pdev->cmd_buffer_addr) goto err_out_1; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c index d516def846abec6c661dc185da1d64b73ca22441..b304198f0b3af0d670677bb0526549c49394a037 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c @@ -502,7 +502,7 @@ static int pcan_usb_pro_drv_loaded(struct peak_usb_device *dev, int loaded) u8 *buffer; int err; - buffer = kmalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL); + buffer = kzalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL); if (!buffer) return -ENOMEM; diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c index 0678a38b1af4588135f04074c56b37cfe6d09b70..43350f56e223d47d2a6363251b2ecfa0fc441d13 100644 --- a/drivers/net/can/usb/ucan.c +++ b/drivers/net/can/usb/ucan.c @@ -796,7 +796,7 @@ static void ucan_read_bulk_callback(struct urb *urb) up); usb_anchor_urb(urb, &up->rx_urbs); - ret = usb_submit_urb(urb, GFP_KERNEL); + ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret < 0) { netdev_err(up->netdev, @@ -1575,11 +1575,8 @@ static int ucan_probe(struct usb_interface *intf, /* disconnect the device */ static void ucan_disconnect(struct usb_interface *intf) { - struct usb_device *udev; struct ucan_priv *up = usb_get_intfdata(intf); - udev = interface_to_usbdev(intf); - usb_set_intfdata(intf, NULL); if (up) { diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c index 27861c417c9404c9c3df841daac95015978f3a69..9cd67262a5edee81885dc58290ccf5903267fa31 100644 --- a/drivers/net/can/usb/usb_8dev.c +++ b/drivers/net/can/usb/usb_8dev.c @@ -680,9 +680,20 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb, atomic_inc(&priv->active_tx_urbs); err = usb_submit_urb(urb, GFP_ATOMIC); - if (unlikely(err)) - goto failed; - else if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS) + if (unlikely(err)) { + can_free_echo_skb(netdev, context->echo_index); + + usb_unanchor_urb(urb); + usb_free_coherent(priv->udev, size, buf, urb->transfer_dma); + + atomic_dec(&priv->active_tx_urbs); + + if (err == -ENODEV) + netif_device_detach(netdev); + else + netdev_warn(netdev, "failed tx_urb %d\n", err); + stats->tx_dropped++; + } else if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS) /* Slow down tx path */ netif_stop_queue(netdev); @@ -701,19 +712,6 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb, return NETDEV_TX_BUSY; -failed: - can_free_echo_skb(netdev, context->echo_index); - - usb_unanchor_urb(urb); - usb_free_coherent(priv->udev, size, buf, urb->transfer_dma); - - atomic_dec(&priv->active_tx_urbs); - - if (err == -ENODEV) - netif_device_detach(netdev); - else - netdev_warn(netdev, "failed tx_urb %d\n", err); - nomembuf: usb_free_urb(urb); @@ -1007,9 +1005,8 @@ static void usb_8dev_disconnect(struct usb_interface *intf) netdev_info(priv->netdev, "device disconnected\n"); unregister_netdev(priv->netdev); - free_candev(priv->netdev); - unlink_all_urbs(priv); + free_candev(priv->netdev); } } diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c index d200a5b0651c2867ed974ac3618e437aa08e5ca6..32efb4b5f22ccedebc557d9f813f7e8c7b344680 100644 --- a/drivers/net/can/vcan.c +++ b/drivers/net/can/vcan.c @@ -45,6 +45,7 @@ #include #include #include +#include #include #include #include @@ -157,6 +158,7 @@ static void vcan_setup(struct net_device *dev) dev->addr_len = 0; dev->tx_queue_len = 0; dev->flags = IFF_NOARP; + dev->ml_priv = netdev_priv(dev); /* set flags according to driver capabilities */ if (echo) @@ -167,8 +169,9 @@ static void vcan_setup(struct net_device *dev) } static struct rtnl_link_ops vcan_link_ops __read_mostly = { - .kind = DRV_NAME, - .setup = vcan_setup, + .kind = DRV_NAME, + .priv_size = sizeof(struct can_ml_priv), + .setup = vcan_setup, }; static __init int vcan_init_module(void) diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c index ed6828821fbd3c5f707068101e23adfcadbeffab..a496753ea53b29439ed0c5635436b4e55e02c593 100644 --- a/drivers/net/can/vxcan.c +++ b/drivers/net/can/vxcan.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -157,6 +158,7 @@ static void vxcan_setup(struct net_device *dev) dev->flags = (IFF_NOARP|IFF_ECHO); dev->netdev_ops = &vxcan_netdev_ops; dev->needs_free_netdev = true; + dev->ml_priv = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN); } /* forward declaration for rtnl_create_link() */ @@ -292,7 +294,7 @@ static struct net *vxcan_get_link_net(const struct net_device *dev) static struct rtnl_link_ops vxcan_link_ops = { .kind = DRV_NAME, - .priv_size = sizeof(struct vxcan_priv), + .priv_size = ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN) + sizeof(struct can_ml_priv), .setup = vxcan_setup, .newlink = vxcan_newlink, .dellink = vxcan_dellink, diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 045f0845e665e42a0d588cef735d2230c60a7b13..b01c6da4dd814595b46eaad8e024537efc19f14e 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -612,7 +612,7 @@ static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev) * * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY when the tx queue is full */ -static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); int ret; @@ -1424,7 +1424,7 @@ static const struct xcan_devtype_data xcan_canfd_data = { XCAN_FLAG_RXMNF | XCAN_FLAG_TX_MAILBOXES | XCAN_FLAG_RX_FIFO_MULTI, - .bittiming_const = &xcan_bittiming_const, + .bittiming_const = &xcan_bittiming_const_canfd, .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD, .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD, .bus_clk_name = "s_axi_aclk", diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index d3ce1e4cb4d3cdc9126cef772d12df479d6048b8..dbfb6ad80fac8fd7145feebd3f2d432a99c4553f 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -66,6 +66,7 @@ config NET_DSA_REALTEK_SMI config NET_DSA_SMSC_LAN9303 tristate select NET_DSA_TAG_LAN9303 + select REGMAP ---help--- This enables support for the SMSC/Microchip LAN9303 3 port ethernet switch chips. diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index ad534b90ef21b6a269f57472fe5beee70f6fd917..2d3a2cb026d264751a79f1de3960e8427edf2759 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1584,7 +1584,6 @@ int b53_mirror_add(struct dsa_switch *ds, int port, loc = B53_EG_MIR_CTL; b53_read16(dev, B53_MGMT_PAGE, loc, ®); - reg &= ~MIRROR_MASK; reg |= BIT(port); b53_write16(dev, B53_MGMT_PAGE, loc, reg); diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index fc8b48adf38b45aa7f0dbef5dc2de248826d1889..02a4187d81bd037e6bcb9abe3e430d406a2a9b62 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -41,22 +41,11 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) unsigned int i; u32 reg, offset; - if (priv->type == BCM7445_DEVICE_ID) - offset = CORE_STS_OVERRIDE_IMP; - else - offset = CORE_STS_OVERRIDE_IMP2; - /* Enable the port memories */ reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); reg &= ~P_TXQ_PSM_VDD(port); core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); - /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */ - reg = core_readl(priv, CORE_IMP_CTL); - reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN); - reg &= ~(RX_DIS | TX_DIS); - core_writel(priv, reg, CORE_IMP_CTL); - /* Enable forwarding */ core_writel(priv, SW_FWDG_EN, CORE_SWMODE); @@ -75,10 +64,27 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) b53_brcm_hdr_setup(ds, port); - /* Force link status for IMP port */ - reg = core_readl(priv, offset); - reg |= (MII_SW_OR | LINK_STS); - core_writel(priv, reg, offset); + if (port == 8) { + if (priv->type == BCM7445_DEVICE_ID) + offset = CORE_STS_OVERRIDE_IMP; + else + offset = CORE_STS_OVERRIDE_IMP2; + + /* Force link status for IMP port */ + reg = core_readl(priv, offset); + reg |= (MII_SW_OR | LINK_STS); + core_writel(priv, reg, offset); + + /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */ + reg = core_readl(priv, CORE_IMP_CTL); + reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN); + reg &= ~(RX_DIS | TX_DIS); + core_writel(priv, reg, CORE_IMP_CTL); + } else { + reg = core_readl(priv, CORE_G_PCTL_PORT(port)); + reg &= ~(RX_DIS | TX_DIS); + core_writel(priv, reg, CORE_G_PCTL_PORT(port)); + } } static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable) @@ -303,11 +309,10 @@ static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum, * send them to our master MDIO bus controller */ if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr)) - bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val); + return bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val); else - mdiobus_write_nested(priv->master_mii_bus, addr, regnum, val); - - return 0; + return mdiobus_write_nested(priv->master_mii_bus, addr, + regnum, val); } static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id) @@ -692,7 +697,7 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds) * port, the other ones have already been disabled during * bcm_sf2_sw_setup */ - for (port = 0; port < DSA_MAX_PORTS; port++) { + for (port = 0; port < ds->num_ports; port++) { if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port)) bcm_sf2_port_disable(ds, port, NULL); } @@ -724,10 +729,11 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port, { struct net_device *p = ds->ports[port].cpu_dp->master; struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); - struct ethtool_wolinfo pwol; + struct ethtool_wolinfo pwol = { }; /* Get the parent device WoL settings */ - p->ethtool_ops->get_wol(p, &pwol); + if (p->ethtool_ops->get_wol) + p->ethtool_ops->get_wol(p, &pwol); /* Advertise the parent device supported settings */ wol->supported = pwol.supported; @@ -748,9 +754,10 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port, struct net_device *p = ds->ports[port].cpu_dp->master; struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); s8 cpu_port = ds->ports[port].cpu_dp->index; - struct ethtool_wolinfo pwol; + struct ethtool_wolinfo pwol = { }; - p->ethtool_ops->get_wol(p, &pwol); + if (p->ethtool_ops->get_wol) + p->ethtool_ops->get_wol(p, &pwol); if (wol->wolopts & ~pwol.supported) return -EINVAL; @@ -1091,12 +1098,16 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) return ret; } + bcm_sf2_gphy_enable_set(priv->dev->ds, true); + ret = bcm_sf2_mdio_register(ds); if (ret) { pr_err("failed to register MDIO bus\n"); return ret; } + bcm_sf2_gphy_enable_set(priv->dev->ds, false); + ret = bcm_sf2_cfp_rst(priv); if (ret) { pr_err("failed to reset CFP\n"); diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c index 47c5f272a084dffe9b8340514449b5aa5c5bcd40..21db1804e85d9370d0331e3dc7091536b705312a 100644 --- a/drivers/net/dsa/bcm_sf2_cfp.c +++ b/drivers/net/dsa/bcm_sf2_cfp.c @@ -742,6 +742,9 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port, fs->m_ext.data[1])) return -EINVAL; + if (fs->location != RX_CLS_LOC_ANY && fs->location >= CFP_NUM_RULES) + return -EINVAL; + if (fs->location != RX_CLS_LOC_ANY && test_bit(fs->location, priv->cfp.used)) return -EBUSY; @@ -836,6 +839,9 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 next_loc = 0; int ret; + if (loc >= CFP_NUM_RULES) + return -EINVAL; + /* Refuse deleting unused rules, and those that are not unique since * that could leave IPv6 rules with one of the chained rule in the * table. diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 54e0ca6ed7308c511ce42bc6ea3dc6e65fb0662b..86b6464b4525c426e09d4d6a9f98bf9a0ee49111 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -1117,11 +1117,6 @@ static int ksz_switch_init(struct ksz_device *dev) { int i; - mutex_init(&dev->reg_mutex); - mutex_init(&dev->stats_mutex); - mutex_init(&dev->alu_mutex); - mutex_init(&dev->vlan_mutex); - dev->ds->ops = &ksz_switch_ops; for (i = 0; i < ARRAY_SIZE(ksz_switch_chips); i++) { @@ -1206,6 +1201,11 @@ int ksz_switch_register(struct ksz_device *dev) if (dev->pdata) dev->chip_id = dev->pdata->chip_id; + mutex_init(&dev->reg_mutex); + mutex_init(&dev->stats_mutex); + mutex_init(&dev->alu_mutex); + mutex_init(&dev->vlan_mutex); + if (ksz_switch_detect(dev)) return -EINVAL; diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 62e486652e622074b1ca1615e02a6cf029cc667b..5c279d9f49921902360a54b7abecae64fcc8d3a5 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -853,14 +853,6 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port) { struct mt7530_priv *priv = ds->priv; - /* The real fabric path would be decided on the membership in the - * entry of VLAN table. PCR_MATRIX set up here with ALL_MEMBERS - * means potential VLAN can be consisting of certain subset of all - * ports. - */ - mt7530_rmw(priv, MT7530_PCR_P(port), - PCR_MATRIX_MASK, PCR_MATRIX(MT7530_ALL_MEMBERS)); - /* Trapped into security mode allows packet forwarding through VLAN * table lookup. */ @@ -887,11 +879,8 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port, /* Remove this port from the port matrix of the other ports * in the same bridge. If the port is disabled, port matrix * is kept and not being setup until the port becomes enabled. - * And the other port's port matrix cannot be broken when the - * other port is still a VLAN-aware port. */ - if (!priv->ports[i].vlan_filtering && - dsa_is_user_port(ds, i) && i != port) { + if (dsa_is_user_port(ds, i) && i != port) { if (dsa_to_port(ds, i)->bridge_dev != bridge) continue; if (priv->ports[i].enable) diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index 65f10fec25b397345b03e503526a3b40de514a3a..0b3e51f248c21a2477c9b1736b0f06b80e350e29 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c @@ -116,8 +116,7 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds) /* Reset the switch. */ REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL, GLOBAL_ATU_CONTROL_SWRESET | - GLOBAL_ATU_CONTROL_ATUSIZE_1024 | - GLOBAL_ATU_CONTROL_ATE_AGE_5MIN); + GLOBAL_ATU_CONTROL_LEARNDIS); /* Wait up to one second for reset to complete. */ timeout = jiffies + 1 * HZ; @@ -142,13 +141,10 @@ static int mv88e6060_setup_global(struct dsa_switch *ds) */ REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, GLOBAL_CONTROL_MAX_FRAME_1536); - /* Enable automatic address learning, set the address - * database size to 1024 entries, and set the default aging - * time to 5 minutes. + /* Disable automatic address learning. */ REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL, - GLOBAL_ATU_CONTROL_ATUSIZE_1024 | - GLOBAL_ATU_CONTROL_ATE_AGE_5MIN); + GLOBAL_ATU_CONTROL_LEARNDIS); return 0; } diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 8da3d39e321890726bd3c0270153bde685842942..93c893731acb8b5c848915235af830f22374f85f 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -231,8 +231,8 @@ struct mii_bus *mv88e6xxx_default_mdio_bus(struct mv88e6xxx_chip *chip) { struct mv88e6xxx_mdio_bus *mdio_bus; - mdio_bus = list_first_entry(&chip->mdios, struct mv88e6xxx_mdio_bus, - list); + mdio_bus = list_first_entry_or_null(&chip->mdios, + struct mv88e6xxx_mdio_bus, list); if (!mdio_bus) return NULL; @@ -261,6 +261,7 @@ static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip) unsigned int sub_irq; unsigned int n; u16 reg; + u16 ctl1; int err; mutex_lock(&chip->reg_lock); @@ -270,13 +271,28 @@ static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip) if (err) goto out; - for (n = 0; n < chip->g1_irq.nirqs; ++n) { - if (reg & (1 << n)) { - sub_irq = irq_find_mapping(chip->g1_irq.domain, n); - handle_nested_irq(sub_irq); - ++nhandled; + do { + for (n = 0; n < chip->g1_irq.nirqs; ++n) { + if (reg & (1 << n)) { + sub_irq = irq_find_mapping(chip->g1_irq.domain, + n); + handle_nested_irq(sub_irq); + ++nhandled; + } } - } + + mutex_lock(&chip->reg_lock); + err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &ctl1); + if (err) + goto unlock; + err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, ®); +unlock: + mutex_unlock(&chip->reg_lock); + if (err) + goto out; + ctl1 &= GENMASK(chip->g1_irq.nirqs, 0); + } while (reg & ctl1); + out: return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE); } @@ -426,16 +442,26 @@ static int mv88e6xxx_g1_irq_setup_common(struct mv88e6xxx_chip *chip) static int mv88e6xxx_g1_irq_setup(struct mv88e6xxx_chip *chip) { + static struct lock_class_key lock_key; + static struct lock_class_key request_key; int err; err = mv88e6xxx_g1_irq_setup_common(chip); if (err) return err; + /* These lock classes tells lockdep that global 1 irqs are in + * a different category than their parent GPIO, so it won't + * report false recursion. + */ + irq_set_lockdep_class(chip->irq, &lock_key, &request_key); + + mutex_unlock(&chip->reg_lock); err = request_threaded_irq(chip->irq, NULL, mv88e6xxx_g1_irq_thread_fn, IRQF_ONESHOT, dev_name(chip->dev), chip); + mutex_lock(&chip->reg_lock); if (err) mv88e6xxx_g1_irq_free_common(chip); @@ -853,7 +879,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip, err = mv88e6xxx_port_read(chip, port, s->reg + 1, ®); if (err) return U64_MAX; - high = reg; + low |= ((u32)reg) << 16; } break; case STATS_TYPE_BANK1: @@ -868,7 +894,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip, default: return U64_MAX; } - value = (((u64)high) << 16) | low; + value = (((u64)high) << 32) | low; return value; } @@ -1460,7 +1486,7 @@ static int mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid, int err; if (!vid) - return -EINVAL; + return -EOPNOTSUPP; entry->vid = vid - 1; entry->valid = false; @@ -2391,6 +2417,107 @@ static int mv88e6xxx_stats_setup(struct mv88e6xxx_chip *chip) return mv88e6xxx_g1_stats_clear(chip); } +/* The mv88e6390 has some hidden registers used for debug and + * development. The errata also makes use of them. + */ +static int mv88e6390_hidden_write(struct mv88e6xxx_chip *chip, int port, + int reg, u16 val) +{ + u16 ctrl; + int err; + + err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_DATA_PORT, + PORT_RESERVED_1A, val); + if (err) + return err; + + ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_WRITE | + PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT | + reg; + + return mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT, + PORT_RESERVED_1A, ctrl); +} + +static int mv88e6390_hidden_wait(struct mv88e6xxx_chip *chip) +{ + return mv88e6xxx_wait(chip, PORT_RESERVED_1A_CTRL_PORT, + PORT_RESERVED_1A, PORT_RESERVED_1A_BUSY); +} + + +static int mv88e6390_hidden_read(struct mv88e6xxx_chip *chip, int port, + int reg, u16 *val) +{ + u16 ctrl; + int err; + + ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_READ | + PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT | + reg; + + err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT, + PORT_RESERVED_1A, ctrl); + if (err) + return err; + + err = mv88e6390_hidden_wait(chip); + if (err) + return err; + + return mv88e6xxx_port_read(chip, PORT_RESERVED_1A_DATA_PORT, + PORT_RESERVED_1A, val); +} + +/* Check if the errata has already been applied. */ +static bool mv88e6390_setup_errata_applied(struct mv88e6xxx_chip *chip) +{ + int port; + int err; + u16 val; + + for (port = 0; port < mv88e6xxx_num_ports(chip); port++) { + err = mv88e6390_hidden_read(chip, port, 0, &val); + if (err) { + dev_err(chip->dev, + "Error reading hidden register: %d\n", err); + return false; + } + if (val != 0x01c0) + return false; + } + + return true; +} + +/* The 6390 copper ports have an errata which require poking magic + * values into undocumented hidden registers and then performing a + * software reset. + */ +static int mv88e6390_setup_errata(struct mv88e6xxx_chip *chip) +{ + int port; + int err; + + if (mv88e6390_setup_errata_applied(chip)) + return 0; + + /* Set the ports into blocking mode */ + for (port = 0; port < mv88e6xxx_num_ports(chip); port++) { + err = mv88e6xxx_port_set_state(chip, port, BR_STATE_DISABLED); + if (err) + return err; + } + + for (port = 0; port < mv88e6xxx_num_ports(chip); port++) { + err = mv88e6390_hidden_write(chip, port, 0, 0x01c0); + if (err) + return err; + } + + return mv88e6xxx_software_reset(chip); +} + static int mv88e6xxx_setup(struct dsa_switch *ds) { struct mv88e6xxx_chip *chip = ds->priv; @@ -2403,6 +2530,12 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) mutex_lock(&chip->reg_lock); + if (chip->info->ops->setup_errata) { + err = chip->info->ops->setup_errata(chip); + if (err) + goto unlock; + } + /* Cache the cmode of each port. */ for (i = 0; i < mv88e6xxx_num_ports(chip); i++) { if (chip->info->ops->port_get_cmode) { @@ -2512,11 +2645,22 @@ static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg) mutex_unlock(&chip->reg_lock); if (reg == MII_PHYSID2) { - /* Some internal PHYS don't have a model number. Use - * the mv88e6390 family model number instead. - */ - if (!(val & 0x3f0)) - val |= MV88E6XXX_PORT_SWITCH_ID_PROD_6390 >> 4; + /* Some internal PHYs don't have a model number. */ + if (chip->info->family != MV88E6XXX_FAMILY_6165) + /* Then there is the 6165 family. It gets is + * PHYs correct. But it can also have two + * SERDES interfaces in the PHY address + * space. And these don't have a model + * number. But they are not PHYs, so we don't + * want to give them something a PHY driver + * will recognise. + * + * Use the mv88e6390 family model number + * instead, for anything which really could be + * a PHY, + */ + if (!(val & 0x3f0)) + val |= MV88E6XXX_PORT_SWITCH_ID_PROD_6390 >> 4; } return err ? err : val; @@ -2895,7 +3039,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, - .port_set_speed = mv88e6390_port_set_speed, + .port_set_speed = mv88e6341_port_set_speed, .port_tag_remap = mv88e6095_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_egress_floods = mv88e6352_port_set_egress_floods, @@ -2947,7 +3091,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6185_port_get_cmode, - .stats_snapshot = mv88e6320_g1_stats_snapshot, + .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, .stats_get_strings = mv88e6095_stats_get_strings, @@ -3201,6 +3345,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = { static const struct mv88e6xxx_ops mv88e6190_ops = { /* MV88E6XXX_FAMILY_6390 */ + .setup_errata = mv88e6390_setup_errata, .irl_init_all = mv88e6390_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom8, .set_eeprom = mv88e6xxx_g2_set_eeprom8, @@ -3243,6 +3388,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { static const struct mv88e6xxx_ops mv88e6190x_ops = { /* MV88E6XXX_FAMILY_6390 */ + .setup_errata = mv88e6390_setup_errata, .irl_init_all = mv88e6390_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom8, .set_eeprom = mv88e6xxx_g2_set_eeprom8, @@ -3285,6 +3431,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { static const struct mv88e6xxx_ops mv88e6191_ops = { /* MV88E6XXX_FAMILY_6390 */ + .setup_errata = mv88e6390_setup_errata, .irl_init_all = mv88e6390_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom8, .set_eeprom = mv88e6xxx_g2_set_eeprom8, @@ -3374,6 +3521,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { static const struct mv88e6xxx_ops mv88e6290_ops = { /* MV88E6XXX_FAMILY_6390 */ + .setup_errata = mv88e6390_setup_errata, .irl_init_all = mv88e6390_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom8, .set_eeprom = mv88e6xxx_g2_set_eeprom8, @@ -3512,7 +3660,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, - .port_set_speed = mv88e6390_port_set_speed, + .port_set_speed = mv88e6341_port_set_speed, .port_tag_remap = mv88e6095_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_egress_floods = mv88e6352_port_set_egress_floods, @@ -3675,6 +3823,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { static const struct mv88e6xxx_ops mv88e6390_ops = { /* MV88E6XXX_FAMILY_6390 */ + .setup_errata = mv88e6390_setup_errata, .irl_init_all = mv88e6390_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom8, .set_eeprom = mv88e6xxx_g2_set_eeprom8, @@ -3722,6 +3871,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { static const struct mv88e6xxx_ops mv88e6390x_ops = { /* MV88E6XXX_FAMILY_6390 */ + .setup_errata = mv88e6390_setup_errata, .irl_init_all = mv88e6390_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom8, .set_eeprom = mv88e6xxx_g2_set_eeprom8, @@ -4059,7 +4209,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6190", .num_databases = 4096, .num_ports = 11, /* 10 + Z80 */ - .num_internal_phys = 11, + .num_internal_phys = 9, .num_gpio = 16, .max_vid = 8191, .port_base_addr = 0x0, @@ -4082,7 +4232,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6190X", .num_databases = 4096, .num_ports = 11, /* 10 + Z80 */ - .num_internal_phys = 11, + .num_internal_phys = 9, .num_gpio = 16, .max_vid = 8191, .port_base_addr = 0x0, @@ -4105,7 +4255,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6191", .num_databases = 4096, .num_ports = 11, /* 10 + Z80 */ - .num_internal_phys = 11, + .num_internal_phys = 9, .max_vid = 8191, .port_base_addr = 0x0, .phy_base_addr = 0x0, @@ -4152,7 +4302,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6290", .num_databases = 4096, .num_ports = 11, /* 10 + Z80 */ - .num_internal_phys = 11, + .num_internal_phys = 9, .num_gpio = 16, .max_vid = 8191, .port_base_addr = 0x0, @@ -4314,7 +4464,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6390", .num_databases = 4096, .num_ports = 11, /* 10 + Z80 */ - .num_internal_phys = 11, + .num_internal_phys = 9, .num_gpio = 16, .max_vid = 8191, .port_base_addr = 0x0, @@ -4337,7 +4487,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6390X", .num_databases = 4096, .num_ports = 11, /* 10 + Z80 */ - .num_internal_phys = 11, + .num_internal_phys = 9, .num_gpio = 16, .max_vid = 8191, .port_base_addr = 0x0, @@ -4432,6 +4582,14 @@ static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip, return 0; } +static void mv88e6xxx_ports_cmode_init(struct mv88e6xxx_chip *chip) +{ + int i; + + for (i = 0; i < mv88e6xxx_num_ports(chip); i++) + chip->ports[i].cmode = MV88E6XXX_PORT_STS_CMODE_INVALID; +} + static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds, int port) { @@ -4468,6 +4626,8 @@ static const char *mv88e6xxx_drv_probe(struct device *dsa_dev, if (err) goto free; + mv88e6xxx_ports_cmode_init(chip); + mutex_lock(&chip->reg_lock); err = mv88e6xxx_switch_reset(chip); mutex_unlock(&chip->reg_lock); @@ -4669,11 +4829,14 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) err = PTR_ERR(chip->reset); goto out; } + if (chip->reset) + usleep_range(1000, 2000); err = mv88e6xxx_detect(chip); if (err) goto out; + mv88e6xxx_ports_cmode_init(chip); mv88e6xxx_phy_init(chip); if (chip->info->ops->get_eeprom) { diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index f9ecb7872d32cd3a6f0807e176a8fc04f63692a7..546651d8c3e1fd5e395d2da1527c7a04f02a00f3 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -300,6 +300,11 @@ struct mv88e6xxx_mdio_bus { }; struct mv88e6xxx_ops { + /* Switch Setup Errata, called early in the switch setup to + * allow any errata actions to be performed + */ + int (*setup_errata)(struct mv88e6xxx_chip *chip); + int (*ieee_pri_map)(struct mv88e6xxx_chip *chip); int (*ip_pri_map)(struct mv88e6xxx_chip *chip); diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c index d721ccf7d8bed8230fa5fbbac2ecfd5068680cac..8298d6743c5771ca9e92bce7a47ef67bf2a4517a 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.c +++ b/drivers/net/dsa/mv88e6xxx/global1.c @@ -371,6 +371,11 @@ int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port) { u16 ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST; + /* Use the default high priority for management frames sent to + * the CPU. + */ + port |= MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI; + return mv88e6390_g1_monitor_write(chip, ptr, port); } @@ -567,6 +572,8 @@ int mv88e6xxx_g1_stats_clear(struct mv88e6xxx_chip *chip) if (err) return err; + /* Keep the histogram mode bits */ + val &= MV88E6XXX_G1_STATS_OP_HIST_RX_TX; val |= MV88E6XXX_G1_STATS_OP_BUSY | MV88E6XXX_G1_STATS_OP_FLUSH_ALL; err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP, val); diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h index bef01331266f40776db98937c13d4e468158fa72..70b870c7cf1d230cdadf178e670d1dfb81d009fc 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.h +++ b/drivers/net/dsa/mv88e6xxx/global1.h @@ -197,6 +197,7 @@ #define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST 0x2000 #define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST 0x2100 #define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST 0x3000 +#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI 0x00e0 #define MV88E6390_G1_MONITOR_MGMT_CTL_DATA_MASK 0x00ff /* Offset 0x1C: Global Control 2 */ diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c index 5200e4bdce93d19f3a3938f3fabbe74743b1352d..ea243840ee0fe62e28c5ccc6599533d406e22de8 100644 --- a/drivers/net/dsa/mv88e6xxx/global1_atu.c +++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c @@ -314,6 +314,7 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id) { struct mv88e6xxx_chip *chip = dev_id; struct mv88e6xxx_atu_entry entry; + int spid; int err; u16 val; @@ -336,6 +337,8 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id) if (err) goto out; + spid = entry.state; + if (val & MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION) { dev_err_ratelimited(chip->dev, "ATU age out violation for %pM\n", @@ -344,23 +347,23 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id) if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) { dev_err_ratelimited(chip->dev, - "ATU member violation for %pM portvec %x\n", - entry.mac, entry.portvec); - chip->ports[entry.portvec].atu_member_violation++; + "ATU member violation for %pM portvec %x spid %d\n", + entry.mac, entry.portvec, spid); + chip->ports[spid].atu_member_violation++; } if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) { dev_err_ratelimited(chip->dev, - "ATU miss violation for %pM portvec %x\n", - entry.mac, entry.portvec); - chip->ports[entry.portvec].atu_miss_violation++; + "ATU miss violation for %pM portvec %x spid %d\n", + entry.mac, entry.portvec, spid); + chip->ports[spid].atu_miss_violation++; } if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) { dev_err_ratelimited(chip->dev, - "ATU full violation for %pM portvec %x\n", - entry.mac, entry.portvec); - chip->ports[entry.portvec].atu_full_violation++; + "ATU full violation for %pM portvec %x spid %d\n", + entry.mac, entry.portvec, spid); + chip->ports[spid].atu_full_violation++; } mutex_unlock(&chip->reg_lock); diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c index 058326924f3e2f955161f77bfbc7a7fbcb6d65f1..7a6667e0b9f9ce6466c9fa70da78b7ae41db887d 100644 --- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c +++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c @@ -419,7 +419,7 @@ int mv88e6185_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip, * VTU DBNum[7:4] are located in VTU Operation 11:8 */ op |= entry->fid & 0x000f; - op |= (entry->fid & 0x00f0) << 8; + op |= (entry->fid & 0x00f0) << 4; } return mv88e6xxx_g1_vtu_op(chip, op); diff --git a/drivers/net/dsa/mv88e6xxx/phy.c b/drivers/net/dsa/mv88e6xxx/phy.c index 46af8052e535361e7d73ce62f4e6a017c1352bbc..152a65d46e0b85d26326e2c39f8452e5da5a3806 100644 --- a/drivers/net/dsa/mv88e6xxx/phy.c +++ b/drivers/net/dsa/mv88e6xxx/phy.c @@ -110,6 +110,9 @@ int mv88e6xxx_phy_page_write(struct mv88e6xxx_chip *chip, int phy, err = mv88e6xxx_phy_page_get(chip, phy, page); if (!err) { err = mv88e6xxx_phy_write(chip, phy, MV88E6XXX_PHY_PAGE, page); + if (!err) + err = mv88e6xxx_phy_write(chip, phy, reg, val); + mv88e6xxx_phy_page_put(chip, phy); } diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index 92945841c8e882e0e582372778d5c81603052236..2f16a310c110eb3682edd2e8a2410b1facea77a6 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -190,7 +190,7 @@ int mv88e6xxx_port_set_duplex(struct mv88e6xxx_chip *chip, int port, int dup) /* normal duplex detection */ break; default: - return -EINVAL; + return -EOPNOTSUPP; } err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_MAC_CTL, reg); @@ -228,8 +228,11 @@ static int mv88e6xxx_port_set_speed(struct mv88e6xxx_chip *chip, int port, ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_1000; break; case 2500: - ctrl = MV88E6390_PORT_MAC_CTL_SPEED_10000 | - MV88E6390_PORT_MAC_CTL_ALTSPEED; + if (alt_bit) + ctrl = MV88E6390_PORT_MAC_CTL_SPEED_10000 | + MV88E6390_PORT_MAC_CTL_ALTSPEED; + else + ctrl = MV88E6390_PORT_MAC_CTL_SPEED_10000; break; case 10000: /* all bits set, fall through... */ @@ -291,6 +294,24 @@ int mv88e6185_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed) return mv88e6xxx_port_set_speed(chip, port, speed, false, false); } +/* Support 10, 100, 200, 1000, 2500 Mbps (e.g. 88E6341) */ +int mv88e6341_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed) +{ + if (speed == SPEED_MAX) + speed = port < 5 ? 1000 : 2500; + + if (speed > 2500) + return -EOPNOTSUPP; + + if (speed == 200 && port != 0) + return -EOPNOTSUPP; + + if (speed == 2500 && port < 5) + return -EOPNOTSUPP; + + return mv88e6xxx_port_set_speed(chip, port, speed, !port, true); +} + /* Support 10, 100, 200, 1000 Mbps (e.g. 88E6352 family) */ int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed) { @@ -374,19 +395,27 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, cmode = 0; } + /* cmode doesn't change, nothing to do for us */ + if (cmode == chip->ports[port].cmode) + return 0; + lane = mv88e6390x_serdes_get_lane(chip, port); - if (lane < 0) + if (lane < 0 && lane != -ENODEV) return lane; - if (chip->ports[port].serdes_irq) { - err = mv88e6390_serdes_irq_disable(chip, port, lane); + if (lane >= 0) { + if (chip->ports[port].serdes_irq) { + err = mv88e6390_serdes_irq_disable(chip, port, lane); + if (err) + return err; + } + + err = mv88e6390x_serdes_power(chip, port, false); if (err) return err; } - err = mv88e6390_serdes_power(chip, port, false); - if (err) - return err; + chip->ports[port].cmode = 0; if (cmode) { err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, ®); @@ -400,7 +429,13 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, if (err) return err; - err = mv88e6390_serdes_power(chip, port, true); + chip->ports[port].cmode = cmode; + + lane = mv88e6390x_serdes_get_lane(chip, port); + if (lane < 0) + return lane; + + err = mv88e6390x_serdes_power(chip, port, true); if (err) return err; @@ -411,8 +446,6 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, } } - chip->ports[port].cmode = cmode; - return 0; } diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h index f32f56af8e35d3a1b735406c1561e98863051352..cbb64a7683e287d724203cda74a186297e85d6b3 100644 --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h @@ -52,6 +52,7 @@ #define MV88E6185_PORT_STS_CMODE_1000BASE_X 0x0005 #define MV88E6185_PORT_STS_CMODE_PHY 0x0006 #define MV88E6185_PORT_STS_CMODE_DISABLED 0x0007 +#define MV88E6XXX_PORT_STS_CMODE_INVALID 0xff /* Offset 0x01: MAC (or PCS or Physical) Control Register */ #define MV88E6XXX_PORT_MAC_CTL 0x01 @@ -251,6 +252,16 @@ /* Offset 0x19: Port IEEE Priority Remapping Registers (4-7) */ #define MV88E6095_PORT_IEEE_PRIO_REMAP_4567 0x19 +/* Offset 0x1a: Magic undocumented errata register */ +#define PORT_RESERVED_1A 0x1a +#define PORT_RESERVED_1A_BUSY BIT(15) +#define PORT_RESERVED_1A_WRITE BIT(14) +#define PORT_RESERVED_1A_READ 0 +#define PORT_RESERVED_1A_PORT_SHIFT 5 +#define PORT_RESERVED_1A_BLOCK (0xf << 10) +#define PORT_RESERVED_1A_CTRL_PORT 4 +#define PORT_RESERVED_1A_DATA_PORT 5 + int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg, u16 *val); int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg, @@ -269,6 +280,7 @@ int mv88e6xxx_port_set_duplex(struct mv88e6xxx_chip *chip, int port, int dup); int mv88e6065_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed); int mv88e6185_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed); +int mv88e6341_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed); int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed); int mv88e6390_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed); int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed); diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index cdcde7f8e0b275362b759ba95fae337041d553e0..33232cc9fb04d2a0dcf8713e9f161e971a7a0bd0 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -543,7 +543,7 @@ qca8k_setup(struct dsa_switch *ds) BIT(0) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S); /* Setup connection between CPU port & user ports */ - for (i = 0; i < DSA_MAX_PORTS; i++) { + for (i = 0; i < QCA8K_NUM_PORTS; i++) { /* CPU port gets connected to all user ports of the switch */ if (dsa_is_cpu_port(ds, i)) { qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(QCA8K_CPU_PORT), @@ -620,22 +620,6 @@ qca8k_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phy) qca8k_port_set_status(priv, port, 1); } -static int -qca8k_phy_read(struct dsa_switch *ds, int phy, int regnum) -{ - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; - - return mdiobus_read(priv->bus, phy, regnum); -} - -static int -qca8k_phy_write(struct dsa_switch *ds, int phy, int regnum, u16 val) -{ - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; - - return mdiobus_write(priv->bus, phy, regnum, val); -} - static void qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data) { @@ -876,8 +860,6 @@ static const struct dsa_switch_ops qca8k_switch_ops = { .setup = qca8k_setup, .adjust_link = qca8k_adjust_link, .get_strings = qca8k_get_strings, - .phy_read = qca8k_phy_read, - .phy_write = qca8k_phy_write, .get_ethtool_stats = qca8k_get_ethtool_stats, .get_sset_count = qca8k_get_sset_count, .get_mac_eee = qca8k_get_mac_eee, @@ -915,7 +897,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev) if (id != QCA8K_ID_QCA8337) return -ENODEV; - priv->ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS); + priv->ds = dsa_switch_alloc(&mdiodev->dev, QCA8K_NUM_PORTS); if (!priv->ds) return -ENOMEM; diff --git a/drivers/net/dsa/realtek-smi.c b/drivers/net/dsa/realtek-smi.c index b4b839a1d09521c521b340679592b3f36b5fb33f..ad41ec63cc9f03aae553e43660afbe08dbc85522 100644 --- a/drivers/net/dsa/realtek-smi.c +++ b/drivers/net/dsa/realtek-smi.c @@ -347,16 +347,17 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi) struct device_node *mdio_np; int ret; - mdio_np = of_find_compatible_node(smi->dev->of_node, NULL, - "realtek,smi-mdio"); + mdio_np = of_get_compatible_child(smi->dev->of_node, "realtek,smi-mdio"); if (!mdio_np) { dev_err(smi->dev, "no MDIO bus node\n"); return -ENODEV; } smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev); - if (!smi->slave_mii_bus) - return -ENOMEM; + if (!smi->slave_mii_bus) { + ret = -ENOMEM; + goto err_put_node; + } smi->slave_mii_bus->priv = smi; smi->slave_mii_bus->name = "SMI slave MII"; smi->slave_mii_bus->read = realtek_smi_mdio_read; @@ -371,10 +372,15 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi) if (ret) { dev_err(smi->dev, "unable to register MDIO bus %s\n", smi->slave_mii_bus->id); - of_node_put(mdio_np); + goto err_put_node; } return 0; + +err_put_node: + of_node_put(mdio_np); + + return ret; } static int realtek_smi_probe(struct platform_device *pdev) @@ -457,6 +463,8 @@ static int realtek_smi_remove(struct platform_device *pdev) struct realtek_smi *smi = dev_get_drvdata(&pdev->dev); dsa_unregister_switch(smi->ds); + if (smi->slave_mii_bus) + of_node_put(smi->slave_mii_bus->dev.of_node); gpiod_set_value(smi->reset, 1); return 0; diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c index 6dedd43442cc5775980725bbda8c3c7d5ee68443..c281c488a306fb6aed2839b0b0442a2877ad7328 100644 --- a/drivers/net/dsa/rtl8366.c +++ b/drivers/net/dsa/rtl8366.c @@ -307,7 +307,8 @@ int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) struct rtl8366_vlan_4k vlan4k; int ret; - if (!smi->ops->is_vlan_valid(smi, port)) + /* Use VLAN nr port + 1 since VLAN0 is not valid */ + if (!smi->ops->is_vlan_valid(smi, port + 1)) return -EINVAL; dev_info(smi->dev, "%s filtering on port %d\n", @@ -318,12 +319,12 @@ int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) * The hardware support filter ID (FID) 0..7, I have no clue how to * support this in the driver when the callback only says on/off. */ - ret = smi->ops->get_vlan_4k(smi, port, &vlan4k); + ret = smi->ops->get_vlan_4k(smi, port + 1, &vlan4k); if (ret) return ret; /* Just set the filter to FID 1 for now then */ - ret = rtl8366_set_vlan(smi, port, + ret = rtl8366_set_vlan(smi, port + 1, vlan4k.member, vlan4k.untag, 1); @@ -338,10 +339,12 @@ int rtl8366_vlan_prepare(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan) { struct realtek_smi *smi = ds->priv; + u16 vid; int ret; - if (!smi->ops->is_vlan_valid(smi, port)) - return -EINVAL; + for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++) + if (!smi->ops->is_vlan_valid(smi, vid)) + return -EINVAL; dev_info(smi->dev, "prepare VLANs %04x..%04x\n", vlan->vid_begin, vlan->vid_end); @@ -369,8 +372,9 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port, u16 vid; int ret; - if (!smi->ops->is_vlan_valid(smi, port)) - return; + for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++) + if (!smi->ops->is_vlan_valid(smi, vid)) + return; dev_info(smi->dev, "add VLAN on port %d, %s, %s\n", port, diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c index a4d5049df6928281edaddb198457c064989e3657..f4b14b6acd22d9d1683911d72378fa7e5c9cf697 100644 --- a/drivers/net/dsa/rtl8366rb.c +++ b/drivers/net/dsa/rtl8366rb.c @@ -507,7 +507,8 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi) irq = of_irq_get(intc, 0); if (irq <= 0) { dev_err(smi->dev, "failed to get parent IRQ\n"); - return irq ? irq : -EINVAL; + ret = irq ? irq : -EINVAL; + goto out_put_node; } /* This clears the IRQ status register */ @@ -515,7 +516,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi) &val); if (ret) { dev_err(smi->dev, "can't read interrupt status\n"); - return ret; + goto out_put_node; } /* Fetch IRQ edge information from the descriptor */ @@ -537,7 +538,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi) val); if (ret) { dev_err(smi->dev, "could not configure IRQ polarity\n"); - return ret; + goto out_put_node; } ret = devm_request_threaded_irq(smi->dev, irq, NULL, @@ -545,7 +546,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi) "RTL8366RB", smi); if (ret) { dev_err(smi->dev, "unable to request irq: %d\n", ret); - return ret; + goto out_put_node; } smi->irqdomain = irq_domain_add_linear(intc, RTL8366RB_NUM_INTERRUPT, @@ -553,12 +554,15 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi) smi); if (!smi->irqdomain) { dev_err(smi->dev, "failed to create IRQ domain\n"); - return -EINVAL; + ret = -EINVAL; + goto out_put_node; } for (i = 0; i < smi->num_ports; i++) irq_set_parent(irq_create_mapping(smi->irqdomain, i), irq); - return 0; +out_put_node: + of_node_put(intc); + return ret; } static int rtl8366rb_set_addr(struct realtek_smi *smi) diff --git a/drivers/net/ethernet/3snic/Kconfig b/drivers/net/ethernet/3snic/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..1e67b76c9147cac9160c2227d893989add0b8977 --- /dev/null +++ b/drivers/net/ethernet/3snic/Kconfig @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# 3SNIC network device configuration +# + +config NET_VENDOR_3SNIC + bool "3SNIC smart NIC devices" + depends on PCI + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about 3SNIC cards. If you say Y, you will be + asked for your specific card in the following questions. + +if NET_VENDOR_3SNIC + +source "drivers/net/ethernet/3snic/sssnic/Kconfig" + +endif # NET_VENDOR_3SNIC diff --git a/drivers/net/ethernet/3snic/Makefile b/drivers/net/ethernet/3snic/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..eb9a8b8cf105c31ea3499fe233ca41271acefc25 --- /dev/null +++ b/drivers/net/ethernet/3snic/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the 3SNIC network device drivers. +# + +obj-$(CONFIG_SSSNIC) += sssnic/ diff --git a/drivers/net/ethernet/3snic/sssnic/Kconfig b/drivers/net/ethernet/3snic/sssnic/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..9a89d62ae5fd5fae3d494db4b1f09618dcf0133b --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/Kconfig @@ -0,0 +1,23 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# 3SNIC network device configuration +# + +config SSSNIC + tristate "3SNIC Ethernet Controller SSSNIC Support" + depends on PCI + depends on ARM64 || X86_64 + select SSSNIC_HW + help + This driver supports 3SNIC Ethernet Controller SSSNIC device. + For more information about this product, go to the product + description with smart NIC: + + + + To compile this driver as a module, choose M here. The module + will be called sssnic. + +config SSSNIC_HW + tristate + depends on PCI diff --git a/drivers/net/ethernet/3snic/sssnic/Makefile b/drivers/net/ethernet/3snic/sssnic/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..d9a179959e4985e1e0d763a0be00296608c3a25b --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the 3SNIC network device drivers. +# + +obj-$(CONFIG_SSSNIC_HW) += hw/ +obj-$(CONFIG_SSSNIC) += nic/ diff --git a/drivers/net/ethernet/3snic/sssnic/hw/Makefile b/drivers/net/ethernet/3snic/sssnic/hw/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..989527da46a3fce40a2dea72d1f178189b7c4128 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/Makefile @@ -0,0 +1,55 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2023 3SNIC +# +SYS_TIME=$(shell date +%Y-%m-%d_%H:%M:%S) +ccflags-y += -D __TIME_STR__=\"$(SYS_TIME)\" + +ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/hw +ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/hw/tool +ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/include +ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/include/hw +ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/include/kernel +ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/hw/include + +ccflags-y += -Werror +ccflags-y += -Wno-implicit-fallthrough + +obj-$(CONFIG_SSSNIC_HW) += sssdk.o +sssdk-y := sss_hw_main.o \ + sss_pci_probe.o \ + sss_pci_remove.o \ + sss_pci_shutdown.o \ + sss_pci_error.o \ + sss_pci_sriov.o \ + sss_pci_global.o \ + sss_hwdev_api.o \ + sss_hwdev_cap.o \ + sss_hwdev_export.o \ + sss_hwdev_link.o \ + sss_hwdev_init.o \ + sss_hwdev_mgmt_info.o \ + sss_hwdev_mgmt_channel.o \ + sss_hwdev_io_flush.o \ + sss_hwif_ctrlq.o \ + sss_hwif_ctrlq_init.o \ + sss_hwif_ctrlq_export.o \ + sss_hwif_mbx.o \ + sss_hwif_mbx_init.o \ + sss_hwif_mbx_export.o \ + sss_hwif_adm.o \ + sss_hwif_adm_init.o \ + sss_hwif_init.o \ + sss_hwif_api.o \ + sss_hwif_export.o \ + sss_hwif_eq.o \ + sss_hwif_mgmt_init.o \ + sss_hwif_irq.o \ + sss_hwif_aeq.o \ + sss_common.o \ + sss_wq.o \ + sss_hwif_ceq.o \ + sss_adapter_mgmt.o \ + ./tool/sss_tool_main.o \ + ./tool/sss_tool_chip.o \ + ./tool/sss_tool_sdk.o \ + ./tool/sss_tool_sm.o diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_adapter.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_adapter.h new file mode 100644 index 0000000000000000000000000000000000000000..afc5ff37f4a3cfd887d3a2f101683d5e186bb7ff --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_adapter.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_ADAPTER_H +#define SSS_ADAPTER_H + +#include +#include +#include +#include +#include + +#include "sss_hw_common.h" +#include "sss_hw_uld_driver.h" +#include "sss_hw_svc_cap.h" +#include "sss_sriov_info.h" + +#define SSS_MAX_FUNC 4096 + +struct sss_card_node { + struct list_head node; + struct list_head func_list; + char chip_name[IFNAMSIZ]; + u8 bus_id; + u8 resvd[7]; + u16 func_num; + atomic_t channel_timeout_cnt; + void *func_handle_array[SSS_MAX_FUNC]; + void *dbgtool_info; +}; + +/* Structure pcidev private */ +struct sss_pci_adapter { + struct pci_dev *pcidev; + void *hwdev; + + struct sss_hal_dev hal_dev; + + /* Record the upper driver object address, + * such as nic_dev and toe_dev, fc_dev + */ + void *uld_dev[SSS_SERVICE_TYPE_MAX]; + + /* Record the upper driver object name */ + char uld_dev_name[SSS_SERVICE_TYPE_MAX][IFNAMSIZ]; + + /* Manage all function device linked by list */ + struct list_head node; + + void __iomem *cfg_reg_bar; + void __iomem *intr_reg_bar; + void __iomem *mgmt_reg_bar; + void __iomem *db_reg_bar; + u64 db_dwqe_len; + u64 db_base_paddr; + + struct sss_card_node *chip_node; + + int init_state; + + struct sss_sriov_info sriov_info; + + atomic_t ref_cnt; + + atomic_t uld_ref_cnt[SSS_SERVICE_TYPE_MAX]; + spinlock_t uld_lock; /* protect uld probe and remove */ + + /* set when uld driver processing event */ + unsigned long uld_run_state; + + unsigned long uld_attach_state; + + /* lock for attach/detach uld */ + struct mutex uld_attach_mutex; + + spinlock_t dettach_uld_lock; /* spin lock for uld_attach_state access */ +}; +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_adm_info.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_adm_info.h new file mode 100644 index 0000000000000000000000000000000000000000..fbcf0b007194b005a07046b9091cd37f1dce6d40 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_adm_info.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_ADM_INFO_H +#define SSS_ADM_INFO_H + +#include +#include +#include +#include + +#include "sss_hw_common.h" + +enum sss_adm_msg_type { + /* write to mgmt cpu command with completion */ + SSS_ADM_MSG_WRITE_TO_MGMT_MODULE = 2, + + /* multi read command with completion notification */ + SSS_ADM_MSG_MULTI_READ = 3, + + /* write command without completion notification */ + SSS_ADM_MSG_POLL_WRITE = 4, + + /* read command without completion notification */ + SSS_ADM_MSG_POLL_READ = 5, + + /* read from mgmt cpu command with completion */ + SSS_ADM_MSG_WRITE_ASYNC_TO_MGMT_MODULE = 6, + + SSS_ADM_MSG_MAX, +}; + +struct sss_adm_msg_state { + u64 head; + u32 desc_buf; + u32 elem_hi; + u32 elem_lo; + u32 rsvd0; + u64 rsvd1; +}; + +/* HW struct */ +struct sss_adm_msg_elem { + u64 control; + + u64 next_elem_paddr; + + u64 desc; + + /* HW struct */ + union { + struct { + u64 hw_msg_paddr; + } write; + + struct { + u64 hw_wb_reply_paddr; + u64 hw_msg_paddr; + } read; + }; +}; + +struct sss_adm_msg_reply_fmt { + u64 head; + u64 reply; +}; + +struct sss_adm_msg_elem_ctx { + struct sss_adm_msg_elem *elem_vaddr; + + void *adm_msg_vaddr; + + struct sss_adm_msg_reply_fmt *reply_fmt; + + struct completion done; + int state; + + u32 store_pi; + void *hwdev; +}; + +struct sss_adm_msg { + void *hwdev; + + enum sss_adm_msg_type msg_type; + + u32 elem_num; + + u16 elem_size; + u16 reply_size; + + u32 pi; + u32 ci; + + struct semaphore sem; + spinlock_t async_lock; /* protect adm msg async and sync */ + dma_addr_t wb_state_paddr; + + dma_addr_t head_elem_paddr; + + struct sss_adm_msg_state *wb_state; + + struct sss_adm_msg_elem *head_node; + + struct sss_adm_msg_elem_ctx *elem_ctx; + struct sss_adm_msg_elem *now_node; + + struct sss_dma_addr_align elem_addr; + + u8 *elem_vaddr_base; + u8 *reply_vaddr_base; + u8 *buf_vaddr_base; + + u64 elem_paddr_base; + u64 reply_paddr_base; + u64 buf_paddr_base; + u64 elem_size_align; + u64 reply_size_align; + u64 buf_size_align; +}; +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_aeq_info.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_aeq_info.h new file mode 100644 index 0000000000000000000000000000000000000000..bdcec6ae4ad8136e7b09b4b3f6bf92ec7ae42693 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_aeq_info.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_AEQ_INFO_H +#define SSS_AEQ_INFO_H + +#include +#include + +#include "sss_eq_info.h" +#include "sss_hw_aeq.h" + +#define SSS_MAX_AEQ 4 + +typedef void (*sss_aeq_hw_event_handler_t)(void *pri_handle, u8 *data, u8 size); +typedef u8 (*sss_aeq_sw_event_handler_t)(void *pri_handle, u8 event, u8 *data); + +struct sss_aeq_info { + void *hwdev; + + sss_aeq_hw_event_handler_t hw_event_handler[SSS_AEQ_EVENT_MAX]; + void *hw_event_data[SSS_AEQ_EVENT_MAX]; + sss_aeq_sw_event_handler_t sw_event_handler[SSS_AEQ_SW_EVENT_MAX]; + void *sw_event_data[SSS_AEQ_SW_EVENT_MAX]; + unsigned long hw_event_handler_state[SSS_AEQ_EVENT_MAX]; + unsigned long sw_event_handler_state[SSS_AEQ_SW_EVENT_MAX]; + + struct sss_eq aeq[SSS_MAX_AEQ]; + u16 num; + u16 rsvd1; + u32 rsvd2; + struct workqueue_struct *workq; +}; +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_board_info.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_board_info.h new file mode 100644 index 0000000000000000000000000000000000000000..749268d67a6bfac8e108d38ae4d9f153c5441ac2 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_board_info.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_BOARD_INFO_H +#define SSS_BOARD_INFO_H + +enum sss_board_type_define { + SSS_BOARD_TYPE_MPU_DEFAULT = 0, /* Default config */ + SSS_BOARD_TYPE_TEST_EVB_4X25G = 1, /* EVB Board */ + SSS_BOARD_TYPE_TEST_CEM_2X100G = 2, /* 2X100G CEM Card */ + SSS_BOARD_TYPE_STRG_SMARTIO_4X32G_FC = 30, /* 4X32G SmartIO FC Card */ + SSS_BOARD_TYPE_STRG_SMARTIO_4X25G_TIOE = 31, /* 4X25GE SmartIO TIOE Card */ + SSS_BOARD_TYPE_STRG_SMARTIO_4X25G_ROCE = 32, /* 4X25GE SmartIO ROCE Card */ + SSS_BOARD_TYPE_STRG_SMARTIO_4X25G_ROCE_AA = 33, /* 4X25GE SmartIO ROCE_AA Card */ + SSS_BOARD_TYPE_STRG_SMARTIO_4X25G_SRIOV = 34, /* 4X25GE SmartIO container Card */ + SSS_BOARD_TYPE_STRG_SMARTIO_4X25G_SRIOV_SW = 35, /* 4X25GE SmartIO container switch Card */ + SSS_BOARD_TYPE_STRG_2X100G_TIOE = 40, /* 2X100G SmartIO TIOE Card */ + SSS_BOARD_TYPE_STRG_2X100G_ROCE = 41, /* 2X100G SmartIO ROCE Card */ + SSS_BOARD_TYPE_STRG_2X100G_ROCE_AA = 42, /* 2X100G SmartIO ROCE_AA Card */ + SSS_BOARD_TYPE_CAL_2X25G_NIC_75MPPS = 100, /* 2X25G ETH Standard card 75MPPS */ + SSS_BOARD_TYPE_CAL_2X25G_NIC_40MPPS = 101, /* 2X25G ETH Standard card 40MPPS */ + SSS_BOARD_TYPE_CAL_4X25G_NIC_120MPPS = 105, /* 4X25G ETH Standard card 120MPPS */ + SSS_BOARD_TYPE_CAL_2X32G_FC_HBA = 110, /* 2X32G FC HBA card */ + SSS_BOARD_TYPE_CAL_2X16G_FC_HBA = 111, /* 2X16G FC HBA card */ + SSS_BOARD_TYPE_CAL_2X100G_NIC_120MPPS = 115, /* 2X100G ETH Standard card 120MPPS */ + SSS_BOARD_TYPE_CLD_2X100G_SDI5_1 = 170, /* 2X100G SDI 5.1 Card */ + SSS_BOARD_TYPE_CLD_2X25G_SDI5_0_LITE = 171, /* 2x25G SDI5.0 Lite Card */ + SSS_BOARD_TYPE_CLD_2X100G_SDI5_0 = 172, /* 2x100G SDI5.0 Card */ + SSS_BOARD_MAX_TYPE = 0xFF +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_ceq_info.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_ceq_info.h new file mode 100644 index 0000000000000000000000000000000000000000..e6806f64cadad893a3bceccb568ffb9195d15017 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_ceq_info.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_CEQ_INFO_H +#define SSS_CEQ_INFO_H + +#include + +#include "sss_hw_ceq.h" +#include "sss_eq_info.h" + +#define SSS_MAX_CEQ 32 + +typedef void (*sss_ceq_event_handler_t)(void *dev, u32 data); + +struct sss_ceq_info { + void *hwdev; + + sss_ceq_event_handler_t event_handler[SSS_CEQ_EVENT_MAX]; + void *event_handler_data[SSS_CEQ_EVENT_MAX]; + void *ceq_data[SSS_CEQ_EVENT_MAX]; + unsigned long event_handler_state[SSS_CEQ_EVENT_MAX]; + + struct sss_eq ceq[SSS_MAX_CEQ]; + u16 num; + u16 rsvd1; + u32 rsvd2; +}; +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_csr.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_csr.h new file mode 100644 index 0000000000000000000000000000000000000000..08e4389957964d26288a6947b26c560317c5a8ec --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_csr.h @@ -0,0 +1,171 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_CSR_H +#define SSS_CSR_H + +#define SSS_CSR_CFG_FLAG 0x40000000 + +#define SSS_MGMT_FLAG 0xC0000000 + +#define SSS_CSR_FLAG_MASK 0x3FFFFFFF + +#define SSS_VF_CFG_REG_OFFSET 0x2000 + +#define SSS_HOST_CSR_BASE_ADDR (SSS_MGMT_FLAG + 0x6000) +#define SSS_CSR_GLOBAL_BASE_ADDR (SSS_MGMT_FLAG + 0x6400) + +/* HW interface registers */ +#define SSS_CSR_HW_ATTR0_ADDR (SSS_CSR_CFG_FLAG + 0x0) +#define SSS_CSR_HW_ATTR1_ADDR (SSS_CSR_CFG_FLAG + 0x4) +#define SSS_CSR_HW_ATTR2_ADDR (SSS_CSR_CFG_FLAG + 0x8) +#define SSS_CSR_HW_ATTR3_ADDR (SSS_CSR_CFG_FLAG + 0xC) +#define SSS_CSR_HW_ATTR4_ADDR (SSS_CSR_CFG_FLAG + 0x10) +#define SSS_CSR_HW_ATTR5_ADDR (SSS_CSR_CFG_FLAG + 0x14) +#define SSS_CSR_HW_ATTR6_ADDR (SSS_CSR_CFG_FLAG + 0x18) + +#define SSS_HW_CSR_MBX_DATA_OFF 0x80 +#define SSS_HW_CSR_MBX_CTRL_OFF (SSS_CSR_CFG_FLAG + 0x0100) +#define SSS_HW_CSR_MBX_INT_OFFSET_OFF (SSS_CSR_CFG_FLAG + 0x0104) +#define SSS_HW_CSR_MBX_RES_H_OFF (SSS_CSR_CFG_FLAG + 0x0108) +#define SSS_HW_CSR_MBX_RES_L_OFF (SSS_CSR_CFG_FLAG + 0x010C) + +#define SSS_PPF_ELECT_OFF 0x0 +#define SSS_MPF_ELECT_OFF 0x20 + +#define SSS_CSR_PPF_ELECT_ADDR \ + (SSS_HOST_CSR_BASE_ADDR + SSS_PPF_ELECT_OFF) + +#define SSS_CSR_GLOBAL_MPF_ELECT_ADDR \ + (SSS_HOST_CSR_BASE_ADDR + SSS_MPF_ELECT_OFF) + +#define SSS_CSR_HW_PPF_ELECT_BASE_ADDR (SSS_CSR_CFG_FLAG + 0x60) +#define SSS_CSR_HW_PPF_ELECT_PORT_STRIDE 0x4 + +#define SSS_CSR_FUNC_PPF_ELECT(host_id) \ + (SSS_CSR_HW_PPF_ELECT_BASE_ADDR + \ + (host_id) * SSS_CSR_HW_PPF_ELECT_PORT_STRIDE) + +#define SSS_CSR_DMA_ATTR_TBL_ADDR (SSS_CSR_CFG_FLAG + 0x380) +#define SSS_CSR_DMA_ATTR_INDIR_ID_ADDR (SSS_CSR_CFG_FLAG + 0x390) + +/* CLP registers */ +#define SSS_BAR3_CLP_BASE_ADDR (SSS_MGMT_FLAG + 0x0000) + +#define SSS_UCPU_CLP_SIZE_REG (SSS_HOST_CSR_BASE_ADDR + 0x40) +#define SSS_UCPU_CLP_REQBASE_REG (SSS_HOST_CSR_BASE_ADDR + 0x44) +#define SSS_UCPU_CLP_RSPBASE_REG (SSS_HOST_CSR_BASE_ADDR + 0x48) +#define SSS_UCPU_CLP_REQ_REG (SSS_HOST_CSR_BASE_ADDR + 0x4c) +#define SSS_UCPU_CLP_RSP_REG (SSS_HOST_CSR_BASE_ADDR + 0x50) +#define SSS_CLP_REG(member) (SSS_UCPU_CLP_##member##_REG) + +#define SSS_CLP_REQ_DATA SSS_BAR3_CLP_BASE_ADDR +#define SSS_CLP_RSP_DATA (SSS_BAR3_CLP_BASE_ADDR + 0x1000) +#define SSS_CLP_DATA(member) (SSS_CLP_##member##_DATA) + +/* MSI-X registers */ +#define SSS_CSR_MSIX_INDIR_ID_ADDR (SSS_CSR_CFG_FLAG + 0x310) +#define SSS_CSR_MSIX_CTRL_ADDR (SSS_CSR_CFG_FLAG + 0x300) +#define SSS_CSR_MSIX_CNT_ADDR (SSS_CSR_CFG_FLAG + 0x304) +#define SSS_CSR_FUNC_MSI_CLR_WR_ADDR (SSS_CSR_CFG_FLAG + 0x58) + +#define SSS_MSI_CLR_INDIR_RESEND_TIMER_CLR_SHIFT 0 +#define SSS_MSI_CLR_INDIR_INT_MSK_SET_SHIFT 1 +#define SSS_MSI_CLR_INDIR_INT_MSK_CLR_SHIFT 2 +#define SSS_MSI_CLR_INDIR_AUTO_MSK_SET_SHIFT 3 +#define SSS_MSI_CLR_INDIR_AUTO_MSK_CLR_SHIFT 4 +#define SSS_MSI_CLR_INDIR_SIMPLE_INDIR_ID_SHIFT 22 + +#define SSS_MSI_CLR_INDIR_RESEND_TIMER_CLR_MASK 0x1U +#define SSS_MSI_CLR_INDIR_INT_MSK_SET_MASK 0x1U +#define SSS_MSI_CLR_INDIR_INT_MSK_CLR_MASK 0x1U +#define SSS_MSI_CLR_INDIR_AUTO_MSK_SET_MASK 0x1U +#define SSS_MSI_CLR_INDIR_AUTO_MSK_CLR_MASK 0x1U +#define SSS_MSI_CLR_INDIR_SIMPLE_INDIR_ID_MASK 0x3FFU + +#define SSS_SET_MSI_CLR_INDIR(val, member) \ + (((val) & SSS_MSI_CLR_INDIR_##member##_MASK) << \ + SSS_MSI_CLR_INDIR_##member##_SHIFT) + +/* EQ registers */ +#define SSS_AEQ_INDIR_ID_ADDR (SSS_CSR_CFG_FLAG + 0x210) +#define SSS_CEQ_INDIR_ID_ADDR (SSS_CSR_CFG_FLAG + 0x290) + +#define SSS_EQ_INDIR_ID_ADDR(type) \ + ((type == SSS_AEQ) ? SSS_AEQ_INDIR_ID_ADDR : SSS_CEQ_INDIR_ID_ADDR) + +#define SSS_AEQ_MTT_OFF_BASE_ADDR (SSS_CSR_CFG_FLAG + 0x240) +#define SSS_CEQ_MTT_OFF_BASE_ADDR (SSS_CSR_CFG_FLAG + 0x2C0) + +#define SSS_CSR_EQ_PAGE_OFF_STRIDE 8 + +#define SSS_AEQ_PHY_HI_ADDR_REG(pg_num) \ + (SSS_AEQ_MTT_OFF_BASE_ADDR + (pg_num) * SSS_CSR_EQ_PAGE_OFF_STRIDE) + +#define SSS_AEQ_PHY_LO_ADDR_REG(pg_num) \ + (SSS_AEQ_MTT_OFF_BASE_ADDR + (pg_num) * SSS_CSR_EQ_PAGE_OFF_STRIDE + 4) + +#define SSS_CEQ_PHY_HI_ADDR_REG(pg_num) \ + (SSS_CEQ_MTT_OFF_BASE_ADDR + (pg_num) * SSS_CSR_EQ_PAGE_OFF_STRIDE) + +#define SSS_CEQ_PHY_LO_ADDR_REG(pg_num) \ + (SSS_CEQ_MTT_OFF_BASE_ADDR + \ + (pg_num) * SSS_CSR_EQ_PAGE_OFF_STRIDE + 4) + +#define SSS_CSR_AEQ_CTRL_0_ADDR (SSS_CSR_CFG_FLAG + 0x200) +#define SSS_CSR_AEQ_CTRL_1_ADDR (SSS_CSR_CFG_FLAG + 0x204) +#define SSS_CSR_AEQ_CI_ADDR (SSS_CSR_CFG_FLAG + 0x208) +#define SSS_CSR_AEQ_PI_ADDR (SSS_CSR_CFG_FLAG + 0x20C) +#define SSS_CSR_AEQ_CI_SIMPLE_INDIR_ADDR (SSS_CSR_CFG_FLAG + 0x50) + +#define SSS_CSR_CEQ_CTRL_0_ADDR (SSS_CSR_CFG_FLAG + 0x280) +#define SSS_CSR_CEQ_CTRL_1_ADDR (SSS_CSR_CFG_FLAG + 0x284) +#define SSS_CSR_CEQ_CI_ADDR (SSS_CSR_CFG_FLAG + 0x288) +#define SSS_CSR_CEQ_PI_ADDR (SSS_CSR_CFG_FLAG + 0x28c) +#define SSS_CSR_CEQ_CI_SIMPLE_INDIR_ADDR (SSS_CSR_CFG_FLAG + 0x54) + +/* ADM MSG registers */ +#define SSS_CSR_ADM_MSG_BASE (SSS_MGMT_FLAG + 0x2000) + +#define SSS_CSR_ADM_MSG_STRIDE 0x80 + +#define SSS_CSR_ADM_MSG_HEAD_HI_ADDR(id) \ + (SSS_CSR_ADM_MSG_BASE + 0x0 + (id) * SSS_CSR_ADM_MSG_STRIDE) + +#define SSS_CSR_ADM_MSG_HEAD_LO_ADDR(id) \ + (SSS_CSR_ADM_MSG_BASE + 0x4 + (id) * SSS_CSR_ADM_MSG_STRIDE) + +#define SSS_CSR_ADM_MSG_STATE_HI_ADDR(id) \ + (SSS_CSR_ADM_MSG_BASE + 0x8 + (id) * SSS_CSR_ADM_MSG_STRIDE) + +#define SSS_CSR_ADM_MSG_STATE_LO_ADDR(id) \ + (SSS_CSR_ADM_MSG_BASE + 0xC + (id) * SSS_CSR_ADM_MSG_STRIDE) + +#define SSS_CSR_ADM_MSG_NUM_ELEM_ADDR(id) \ + (SSS_CSR_ADM_MSG_BASE + 0x10 + (id) * SSS_CSR_ADM_MSG_STRIDE) + +#define SSS_CSR_ADM_MSG_CTRL_ADDR(id) \ + (SSS_CSR_ADM_MSG_BASE + 0x14 + (id) * SSS_CSR_ADM_MSG_STRIDE) + +#define SSS_CSR_ADM_MSG_PI_ADDR(id) \ + (SSS_CSR_ADM_MSG_BASE + 0x1C + (id) * SSS_CSR_ADM_MSG_STRIDE) + +#define SSS_CSR_ADM_MSG_REQ_ADDR(id) \ + (SSS_CSR_ADM_MSG_BASE + 0x20 + (id) * SSS_CSR_ADM_MSG_STRIDE) + +#define SSS_CSR_ADM_MSG_STATE_0_ADDR(id) \ + (SSS_CSR_ADM_MSG_BASE + 0x30 + (id) * SSS_CSR_ADM_MSG_STRIDE) + +/* self test register */ +#define SSS_MGMT_HEALTH_STATUS_ADDR (SSS_MGMT_FLAG + 0x983c) + +#define SSS_CHIP_BASE_INFO_ADDR (SSS_MGMT_FLAG + 0xB02C) + +#define SSS_CHIP_ERR_STATUS0_ADDR (SSS_MGMT_FLAG + 0xC0EC) +#define SSS_CHIP_ERR_STATUS1_ADDR (SSS_MGMT_FLAG + 0xC0F0) + +#define SSS_ERR_INFO0_ADDR (SSS_MGMT_FLAG + 0xC0F4) +#define SSS_ERR_INFO1_ADDR (SSS_MGMT_FLAG + 0xC0F8) +#define SSS_ERR_INFO2_ADDR (SSS_MGMT_FLAG + 0xC0FC) + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_ctrlq_info.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_ctrlq_info.h new file mode 100644 index 0000000000000000000000000000000000000000..02727d453fed460531e6c2b2d41ee56a2b30f1e5 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_ctrlq_info.h @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_CTRLQ_INFO_H +#define SSS_CTRLQ_INFO_H + +#include +#include +#include +#include + +#include "sss_hw_mbx_msg.h" +#include "sss_hw_wq.h" +#include "sss_hw_ctrlq.h" + +#define SSS_DEFAULT_WQ_PAGE_SIZE 0x100000 +#define SSS_HW_WQ_PAGE_SIZE 0x1000 +#define SSS_MAX_WQ_PAGE_NUM 8 + +/* ctrlq ack type */ +enum sss_ack_type { + SSS_ACK_TYPE_CTRLQ, + SSS_ACK_TYPE_SHARE_CQN, + SSS_ACK_TYPE_APP_CQN, + + SSS_MOD_ACK_MAX = 15, +}; + +enum sss_ctrlq_type { + SSS_CTRLQ_SYNC, + SSS_CTRLQ_ASYNC, + SSS_MAX_CTRLQ_TYPE = 4 +}; + +enum sss_ctrlq_msg_type { + SSS_MSG_TYPE_NONE, + SSS_MSG_TYPE_SET_ARM, + SSS_MSG_TYPE_DIRECT_RESP, + SSS_MSG_TYPE_SGE_RESP, + SSS_MSG_TYPE_ASYNC, + SSS_MSG_TYPE_PSEUDO_TIMEOUT, + SSS_MSG_TYPE_TIMEOUT, + SSS_MSG_TYPE_FORCE_STOP, + SSS_MSG_TYPE_MAX +}; + +struct sss_ctrlq_cmd_info { + enum sss_ctrlq_msg_type msg_type; + u16 channel; + + struct completion *done; + int *err_code; + int *cmpt_code; + u64 *direct_resp; + u64 msg_id; + + struct sss_ctrl_msg_buf *in_buf; + struct sss_ctrl_msg_buf *out_buf; +}; + +struct sss_ctrlq { + struct sss_wq wq; + + enum sss_ctrlq_type ctrlq_type; + int wrapped; + + /* spinlock for send ctrlq commands */ + spinlock_t ctrlq_lock; + + struct sss_ctrlq_ctxt_info ctrlq_ctxt; + + struct sss_ctrlq_cmd_info *cmd_info; + + void *hwdev; +}; + +struct sss_ctrlq_info { + void *hwdev; + + struct pci_pool *msg_buf_pool; + + /* doorbell area */ + u8 __iomem *db_base; + + /* All ctrlq's CLA of a VF occupy a PAGE when ctrlq wq is 1-level CLA */ + void *wq_block_vaddr; + dma_addr_t wq_block_paddr; + struct sss_ctrlq ctrlq[SSS_MAX_CTRLQ_TYPE]; + + u32 state; + u32 disable_flag; + + u8 lock_channel_en; + u8 num; + u8 rsvd[6]; + unsigned long channel_stop; +}; +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_eq_info.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_eq_info.h new file mode 100644 index 0000000000000000000000000000000000000000..c8a16dabeacc198711eea8511e2d78be037d1674 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_eq_info.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_EQ_INFO_H +#define SSS_EQ_INFO_H + +#include +#include +#include +#include + +#include "sss_hw_common.h" +#include "sss_hw_irq.h" +#include "sss_hw_svc_cap.h" + +#define SSS_EQ_IRQ_NAME_LEN 64 + +enum sss_eq_type { + SSS_AEQ, + SSS_CEQ +}; + +typedef void (*sss_init_desc_handler_t)(void *eq); +typedef u32 (*sss_chip_init_attr_handler_t)(void *eq); + +struct sss_eq { + char *name; + void *hwdev; + enum sss_eq_type type; + u32 page_size; + u32 old_page_size; + u32 len; + + u32 ci; + + u16 wrap; + u16 qid; + + u16 entry_size; + u16 page_num; + + u32 num_entry_per_pg; + + struct sss_irq_desc irq_desc; + char irq_name[SSS_EQ_IRQ_NAME_LEN]; + + struct sss_dma_addr_align *page_array; + + struct work_struct aeq_work; + struct tasklet_struct ceq_tasklet; + + u64 hw_intr_jiffies; + u64 sw_intr_jiffies; + + sss_init_desc_handler_t init_desc_handler; + sss_chip_init_attr_handler_t init_attr_handler; + irq_handler_t irq_handler; +}; + +struct sss_eq_cfg { + enum sss_service_type type; + int id; + int free; /* 1 - alocated, 0- freed */ +}; + +struct sss_eq_info { + struct sss_eq_cfg *eq; + + u8 ceq_num; + + u8 remain_ceq_num; + + /* mutex used for allocate EQs */ + struct mutex eq_mutex; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_hwdev.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_hwdev.h new file mode 100644 index 0000000000000000000000000000000000000000..91353309963194af5c721de6612c9db9c7645d8b --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_hwdev.h @@ -0,0 +1,273 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWDEV_H +#define SSS_HWDEV_H + +#include +#include +#include +#include +#include + +#include "sss_hw_common.h" +#include "sss_hw_svc_cap.h" +#include "sss_hw_mbx_msg.h" +#include "sss_hw_statistics.h" +#include "sss_hw_event.h" + +#include "sss_hwif.h" +#include "sss_mgmt_info.h" +#include "sss_ctrlq_info.h" +#include "sss_aeq_info.h" +#include "sss_ceq_info.h" +#include "sss_mbx_info.h" +#include "sss_mgmt_channel.h" + +#define SSSNIC_CHANNEL_DETECT_PERIOD (5 * 1000) + +enum sss_func_mode { + SSS_FUNC_MOD_MIN, + + /* single host */ + SSS_FUNC_MOD_NORMAL_HOST = SSS_FUNC_MOD_MIN, + + /* multi host, bare-metal, sdi side */ + SSS_FUNC_MOD_MULTI_BM_MASTER, + + /* multi host, bare-metal, host side */ + SSS_FUNC_MOD_MULTI_BM_SLAVE, + + /* multi host, vm mode, sdi side */ + SSS_FUNC_MOD_MULTI_VM_MASTER, + + /* multi host, vm mode, host side */ + SSS_FUNC_MOD_MULTI_VM_SLAVE, + + SSS_FUNC_MOD_MAX = SSS_FUNC_MOD_MULTI_VM_SLAVE, +}; + +struct sss_page_addr { + void *virt_addr; + u64 phys_addr; +}; + +struct sss_mqm_addr_trans_tbl_info { + u32 chunk_num; + u32 search_gpa_num; + u32 page_size; + u32 page_num; + + struct sss_page_addr *brm_srch_page_addr; +}; + +struct sss_devlink { + void *hwdev; + u8 active_cfg_id; /* 1 ~ 8 */ + u8 switch_cfg_id; /* 1 ~ 8 */ +}; + +struct sss_heartbeat { + u8 pcie_link_down; + u8 heartbeat_lost; + u16 rsvd; + u32 pcie_link_down_cnt; + struct timer_list heartbeat_timer; + struct work_struct lost_work; +}; + +struct sss_aeq_stat { + u16 busy_cnt; + u16 rsvd; + u64 cur_recv_cnt; + u64 last_recv_cnt; +}; + +struct sss_clp_pf_to_mgmt { + struct semaphore clp_msg_lock; + void *clp_msg_buf; +}; + +struct sss_hwdev { + void *adapter_hdl; /* pointer to sss_pci_adapter or NDIS_Adapter */ + void *pcidev_hdl; /* pointer to pcidev or Handler */ + + /* pointer to pcidev->dev or Handler, for + * sdk_err() or dma_alloc() + */ + void *dev_hdl; + void *chip_node; + + void *service_adapter[SSS_SERVICE_TYPE_MAX]; + + u32 wq_page_size; + int chip_present_flag; + u8 poll; /* use polling mode or int mode */ + u8 rsvd[3]; + struct sss_hwif *hwif; /* include void __iomem *bar */ + struct sss_comm_global_attr glb_attr; + u64 features[SSS_MAX_FEATURE_QWORD]; + + struct sss_mgmt_info *mgmt_info; + + struct sss_ctrlq_info *ctrlq_info; + struct sss_aeq_info *aeq_info; + struct sss_ceq_info *ceq_info; + struct sss_mbx *mbx; // mbx + struct sss_msg_pf_to_mgmt *pf_to_mgmt; // adm + struct sss_clp_pf_to_mgmt *clp_pf_to_mgmt; + + struct sss_hw_stats hw_stats; + u8 *chip_fault_stats; + + sss_event_handler_t event_handler; + void *event_handler_data; + + struct sss_board_info board_info; + + struct delayed_work sync_time_task; + struct delayed_work channel_detect_task; + + struct workqueue_struct *workq; + + struct sss_heartbeat heartbeat; + + ulong func_state; + spinlock_t channel_lock; /* protect channel init and deinit */ + + struct sss_devlink *devlink_dev; + + enum sss_func_mode func_mode; + + struct sss_aeq_stat aeq_stat; + + u16 aeq_busy_cnt; +}; + +#define SSS_TO_HWDEV(ptr) ((struct sss_hwdev *)(ptr)->hwdev) +#define SSS_TO_DEV(hwdev) (((struct sss_hwdev *)hwdev)->dev_hdl) +#define SSS_TO_HWIF(hwdev) (((struct sss_hwdev *)hwdev)->hwif) +#define SSS_TO_MGMT_INFO(hwdev) (((struct sss_hwdev *)hwdev)->mgmt_info) +#define SSS_TO_AEQ_INFO(hwdev) (((struct sss_hwdev *)hwdev)->aeq_info) +#define SSS_TO_CEQ_INFO(hwdev) (((struct sss_hwdev *)hwdev)->ceq_info) +#define SSS_TO_CTRLQ_INFO(hwdev) (((struct sss_hwdev *)hwdev)->ctrlq_info) +#define SSS_TO_IRQ_INFO(hwdev) (&((struct sss_hwdev *)hwdev)->mgmt_info->irq_info) +#define SSS_TO_SVC_CAP(hwdev) (&(((struct sss_hwdev *)hwdev)->mgmt_info->svc_cap)) +#define SSS_TO_NIC_CAP(hwdev) (&(((struct sss_hwdev *)hwdev)->mgmt_info->svc_cap.nic_cap)) +#define SSS_TO_MAX_SQ_NUM(hwdev) \ + (((struct sss_hwdev *)hwdev)->mgmt_info->svc_cap.nic_cap.max_sq) +#define SSS_TO_PHY_PORT_ID(hwdev) (((struct sss_hwdev *)hwdev)->mgmt_info->svc_cap.port_id) +#define SSS_TO_MAX_VF_NUM(hwdev) (((struct sss_hwdev *)hwdev)->mgmt_info->svc_cap.max_vf) +#define SSS_TO_FUNC_COS_BITMAP(hwdev) \ + (((struct sss_hwdev *)hwdev)->mgmt_info->svc_cap.cos_valid_bitmap) +#define SSS_TO_PORT_COS_BITMAP(hwdev) \ + (((struct sss_hwdev *)hwdev)->mgmt_info->svc_cap.port_cos_valid_bitmap) + +enum sss_servic_bit_define { + SSS_SERVICE_BIT_NIC = 0, + SSS_SERVICE_BIT_ROCE = 1, + SSS_SERVICE_BIT_VBS = 2, + SSS_SERVICE_BIT_TOE = 3, + SSS_SERVICE_BIT_IPSEC = 4, + SSS_SERVICE_BIT_FC = 5, + SSS_SERVICE_BIT_VIRTIO = 6, + SSS_SERVICE_BIT_OVS = 7, + SSS_SERVICE_BIT_NVME = 8, + SSS_SERVICE_BIT_ROCEAA = 9, + SSS_SERVICE_BIT_CURRENET = 10, + SSS_SERVICE_BIT_PPA = 11, + SSS_SERVICE_BIT_MIGRATE = 12, + SSS_MAX_SERVICE_BIT +}; + +#define SSS_CFG_SERVICE_MASK_NIC (0x1 << SSS_SERVICE_BIT_NIC) +#define SSS_CFG_SERVICE_MASK_ROCE (0x1 << SSS_SERVICE_BIT_ROCE) +#define SSS_CFG_SERVICE_MASK_VBS (0x1 << SSS_SERVICE_BIT_VBS) +#define SSS_CFG_SERVICE_MASK_TOE (0x1 << SSS_SERVICE_BIT_TOE) +#define SSS_CFG_SERVICE_MASK_IPSEC (0x1 << SSS_SERVICE_BIT_IPSEC) +#define SSS_CFG_SERVICE_MASK_FC (0x1 << SSS_SERVICE_BIT_FC) +#define SSS_CFG_SERVICE_MASK_VIRTIO (0x1 << SSS_SERVICE_BIT_VIRTIO) +#define SSS_CFG_SERVICE_MASK_OVS (0x1 << SSS_SERVICE_BIT_OVS) +#define SSS_CFG_SERVICE_MASK_NVME (0x1 << SSS_SERVICE_BIT_NVME) +#define SSS_CFG_SERVICE_MASK_ROCEAA (0x1 << SSS_SERVICE_BIT_ROCEAA) +#define SSS_CFG_SERVICE_MASK_CURRENET (0x1 << SSS_SERVICE_BIT_CURRENET) +#define SSS_CFG_SERVICE_MASK_PPA (0x1 << SSS_SERVICE_BIT_PPA) +#define SSS_CFG_SERVICE_MASK_MIGRATE (0x1 << SSS_SERVICE_BIT_MIGRATE) + +#define SSS_CFG_SERVICE_RDMA_EN SSS_CFG_SERVICE_MASK_ROCE + +#define SSS_IS_NIC_TYPE(dev) \ + (((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_MASK_NIC) +#define SSS_IS_ROCE_TYPE(dev) \ + (((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_MASK_ROCE) +#define SSS_IS_VBS_TYPE(dev) \ + (((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_MASK_VBS) +#define SSS_IS_TOE_TYPE(dev) \ + (((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_MASK_TOE) +#define SSS_IS_IPSEC_TYPE(dev) \ + (((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_MASK_IPSEC) +#define SSS_IS_FC_TYPE(dev) \ + (((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_MASK_FC) +#define SSS_IS_OVS_TYPE(dev) \ + (((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_MASK_OVS) +#define SSS_IS_RDMA_TYPE(dev) \ + (((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_RDMA_EN) +#define SSS_IS_RDMA_ENABLE(dev) \ + ((dev)->mgmt_info->svc_cap.sf_svc_attr.rdma_en) +#define SSS_IS_PPA_TYPE(dev) \ + (((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_MASK_PPA) +#define SSS_IS_MIGR_TYPE(dev) \ + (((u32)(dev)->mgmt_info->svc_cap.chip_svc_type) & SSS_CFG_SERVICE_MASK_MIGRATE) + +#define SSS_MAX_HOST_NUM(hwdev) ((hwdev)->glb_attr.max_host_num) +#define SSS_MAX_PF_NUM(hwdev) ((hwdev)->glb_attr.max_pf_num) +#define SSS_MGMT_CPU_NODE_ID(hwdev) \ + ((hwdev)->glb_attr.mgmt_host_node_id) + +#define SSS_GET_FUNC_TYPE(hwdev) ((hwdev)->hwif->attr.func_type) +#define SSS_IS_PF(dev) (SSS_GET_FUNC_TYPE(dev) == SSS_FUNC_TYPE_PF) +#define SSS_IS_VF(dev) (SSS_GET_FUNC_TYPE(dev) == SSS_FUNC_TYPE_VF) +#define SSS_IS_PPF(dev) \ + (SSS_GET_FUNC_TYPE(dev) == SSS_FUNC_TYPE_PPF) + +#define SSS_GET_FUNC_ID(hwdev) ((hwdev)->hwif->attr.func_id) + +#define SSS_IS_BMGW_MASTER_HOST(hwdev) \ + ((hwdev)->func_mode == SSS_FUNC_MOD_MULTI_BM_MASTER) +#define SSS_IS_BMGW_SLAVE_HOST(hwdev) \ + ((hwdev)->func_mode == SSS_FUNC_MOD_MULTI_BM_SLAVE) +#define SSS_IS_VM_MASTER_HOST(hwdev) \ + ((hwdev)->func_mode == SSS_FUNC_MOD_MULTI_VM_MASTER) +#define SSS_IS_VM_SLAVE_HOST(hwdev) \ + ((hwdev)->func_mode == SSS_FUNC_MOD_MULTI_VM_SLAVE) + +#define SSS_IS_MASTER_HOST(hwdev) \ + (SSS_IS_BMGW_MASTER_HOST(hwdev) || SSS_IS_VM_MASTER_HOST(hwdev)) + +#define SSS_IS_SLAVE_HOST(hwdev) \ + (SSS_IS_BMGW_SLAVE_HOST(hwdev) || SSS_IS_VM_SLAVE_HOST(hwdev)) + +#define SSS_IS_MULTI_HOST(hwdev) \ + (SSS_IS_BMGW_MASTER_HOST(hwdev) || SSS_IS_BMGW_SLAVE_HOST(hwdev) || \ + SSS_IS_VM_MASTER_HOST(hwdev) || SSS_IS_VM_SLAVE_HOST(hwdev)) + +#define SSS_SPU_HOST_ID 4 + +#define SSS_SUPPORT_ADM_MSG(hwdev) ((hwdev)->features[0] & SSS_COMM_F_ADM) +#define SSS_SUPPORT_MBX_SEGMENT(hwdev) \ + (SSS_GET_HWIF_PCI_INTF_ID(hwdev->hwif) == SSS_SPU_HOST_ID) +#define SSS_SUPPORT_CTRLQ_NUM(hwdev) \ + ((hwdev)->features[0] & SSS_COMM_F_CTRLQ_NUM) +#define SSS_SUPPORT_VIRTIO_VQ_SIZE(hwdev) \ + ((hwdev)->features[0] & SSS_COMM_F_VIRTIO_VQ_SIZE) +#define SSS_SUPPORT_CHANNEL_DETECT(hwdev) \ + ((hwdev)->features[0] & SSS_COMM_F_CHANNEL_DETECT) +#define SSS_SUPPORT_CLP(hwdev) \ + ((hwdev)->features[0] & SSS_COMM_F_CLP) + +enum { + SSS_CFG_FREE = 0, + SSS_CFG_BUSY = 1 +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_hwif.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_hwif.h new file mode 100644 index 0000000000000000000000000000000000000000..d7e18653e79438840d37236f05397facae789452 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_hwif.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_H +#define SSS_HWIF_H + +#include +#include + +struct sss_db_pool { + unsigned long *bitmap; + u32 bit_size; + + /* spinlock for allocating doorbell area */ + spinlock_t id_lock; +}; + +struct sss_func_attr { + enum sss_func_type func_type; + + u16 func_id; + u8 pf_id; + u8 pci_intf_id; + + u16 global_vf_off; + u8 mpf_id; + u8 ppf_id; + + u16 irq_num; /* max: 2 ^ 15 */ + u8 aeq_num; /* max: 2 ^ 3 */ + u8 ceq_num; /* max: 2 ^ 7 */ + + u16 sq_num; /* max: 2 ^ 8 */ + u8 dma_attr_num; /* max: 2 ^ 6 */ + u8 msix_flex_en; +}; + +struct sss_hwif { + u8 __iomem *cfg_reg_base; + u8 __iomem *mgmt_reg_base; + u64 db_base_paddr; + u64 db_dwqe_len; + u8 __iomem *db_base_vaddr; + + void *pdev; + + struct sss_db_pool db_pool; + + struct sss_func_attr attr; +}; + +#define SSS_GET_HWIF_AEQ_NUM(hwif) ((hwif)->attr.aeq_num) +#define SSS_GET_HWIF_CEQ_NUM(hwif) ((hwif)->attr.ceq_num) +#define SSS_GET_HWIF_IRQ_NUM(hwif) ((hwif)->attr.irq_num) +#define SSS_GET_HWIF_GLOBAL_ID(hwif) ((hwif)->attr.func_id) +#define SSS_GET_HWIF_PF_ID(hwif) ((hwif)->attr.pf_id) +#define SSS_GET_HWIF_GLOBAL_VF_OFFSET(hwif) ((hwif)->attr.global_vf_off) +#define SSS_GET_HWIF_PPF_ID(hwif) ((hwif)->attr.ppf_id) +#define SSS_GET_HWIF_MPF_ID(hwif) ((hwif)->attr.mpf_id) +#define SSS_GET_HWIF_PCI_INTF_ID(hwif) ((hwif)->attr.pci_intf_id) +#define SSS_GET_HWIF_FUNC_TYPE(hwif) ((hwif)->attr.func_type) +#define SSS_GET_HWIF_MSIX_EN(hwif) ((hwif)->attr.msix_flex_en) + +#define SSS_SET_HWIF_AEQ_NUM(hwif, val) \ + ((hwif)->attr.aeq_num = (val)) + +#define SSS_SET_HWIF_CEQ_NUM(hwif, val) \ + ((hwif)->attr.ceq_num = (val)) + +#define SSS_SET_HWIF_IRQ_NUM(hwif, val) \ + ((hwif)->attr.irq_num = (val)) + +#define SSS_SET_HWIF_GLOBAL_ID(hwif, val) \ + ((hwif)->attr.func_id = (val)) + +#define SSS_SET_HWIF_PF_ID(hwif, val) \ + ((hwif)->attr.pf_id = (val)) + +#define SSS_SET_HWIF_GLOBAL_VF_OFFSET(hwif, val) \ + ((hwif)->attr.global_vf_off = (val)) + +#define SSS_SET_HWIF_PPF_ID(hwif, val) \ + ((hwif)->attr.ppf_id = (val)) + +#define SSS_SET_HWIF_MPF_ID(hwif, val) \ + ((hwif)->attr.mpf_id = (val)) + +#define SSS_SET_HWIF_PCI_INTF_ID(hwif, val) \ + ((hwif)->attr.pci_intf_id = (val)) + +#define SSS_SET_HWIF_FUNC_TYPE(hwif, val) \ + ((hwif)->attr.func_type = (val)) + +#define SSS_SET_HWIF_DMA_ATTR_NUM(hwif, val) \ + ((hwif)->attr.dma_attr_num = (val)) + +#define SSS_SET_HWIF_MSIX_EN(hwif, val) \ + ((hwif)->attr.msix_flex_en = (val)) + +#define SSS_SET_HWIF_SQ_NUM(hwif, val) \ + ((hwif)->attr.sq_num = (val)) + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_irq_info.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_irq_info.h new file mode 100644 index 0000000000000000000000000000000000000000..dfc2a68680430c5cb83708dabfd9c383fab0a32c --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_irq_info.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_IRQ_INFO_H +#define SSS_IRQ_INFO_H + +#include +#include + +#include "sss_hw_svc_cap.h" +#include "sss_hw_irq.h" + +struct sss_irq { + enum sss_service_type type; + int busy; /* 1 - allocated, 0 - freed */ + struct sss_irq_desc desc; +}; + +struct sss_irq_info { + struct sss_irq *irq; + u16 total_num; + u16 free_num; + u16 max_num; /* device max irq number */ + + struct mutex irq_mutex; /* mutex is used to allocate eq */ +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_mbx_info.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_mbx_info.h new file mode 100644 index 0000000000000000000000000000000000000000..542fcb20442a39c044aaf0013f52152070499c70 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_mbx_info.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_MBX_INFO_H +#define SSS_MBX_INFO_H +#include +#include +#include +#include +#include + +#include "sss_hw_mbx.h" + +enum sss_mbx_event_state { + SSS_EVENT_START = 0, + SSS_EVENT_FAIL, + SSS_EVENT_SUCCESS, + SSS_EVENT_TIMEOUT, + SSS_EVENT_END, +}; + +struct sss_mbx_send { + u8 *data; + + u64 *wb_state; /* write back status */ + void *wb_vaddr; + dma_addr_t wb_paddr; +}; + +struct sss_mbx_dma_queue { + void *dma_buff_vaddr; + dma_addr_t dma_buff_paddr; + + u16 depth; + u16 pi; + u16 ci; +}; + +struct sss_mbx_msg_info { + u8 msg_id; + u8 state; /* can only use 1 bit */ +}; + +struct sss_msg_desc { + void *msg; + u16 msg_len; + u8 seq_id; + u8 mod; + u16 cmd; + struct sss_mbx_msg_info msg_info; +}; + +struct sss_msg_buffer { + struct sss_msg_desc resp_msg; + struct sss_msg_desc recv_msg; + + atomic_t recv_msg_cnt; +}; + +struct sss_mbx { + void *hwdev; + + u8 lock_channel_en; + u8 rsvd0[3]; + unsigned long channel_stop; + + /* lock for send mbx message and ack message */ + struct mutex mbx_send_lock; + /* lock for send mbx message */ + struct mutex msg_send_lock; + struct sss_mbx_send mbx_send; + + struct sss_mbx_dma_queue sync_msg_queue; + struct sss_mbx_dma_queue async_msg_queue; + + struct workqueue_struct *workq; + + struct sss_msg_buffer mgmt_msg; /* driver and MGMT CPU */ + struct sss_msg_buffer *host_msg; /* PPF message between hosts */ + struct sss_msg_buffer *func_msg; /* PF to VF or VF to PF */ + u16 num_func_msg; + u16 cur_msg_channel; + u8 support_h2h_msg; /* host to host */ + u8 rsvd1[3]; + /* vf receive pf/ppf callback */ + sss_vf_mbx_handler_t vf_mbx_cb[SSS_MOD_TYPE_MAX]; + void *vf_mbx_data[SSS_MOD_TYPE_MAX]; + /* pf/ppf receive vf callback */ + sss_pf_mbx_handler_t pf_mbx_cb[SSS_MOD_TYPE_MAX]; + void *pf_mbx_data[SSS_MOD_TYPE_MAX]; + /* ppf receive pf/ppf callback */ + sss_ppf_mbx_handler_t ppf_mbx_cb[SSS_MOD_TYPE_MAX]; + void *ppf_mbx_data[SSS_MOD_TYPE_MAX]; + /* pf receive ppf callback */ + sss_pf_from_ppf_mbx_handler_t pf_recv_ppf_mbx_cb[SSS_MOD_TYPE_MAX]; + void *pf_recv_ppf_mbx_data[SSS_MOD_TYPE_MAX]; + unsigned long ppf_to_pf_mbx_cb_state[SSS_MOD_TYPE_MAX]; + unsigned long ppf_mbx_cb_state[SSS_MOD_TYPE_MAX]; + unsigned long pf_mbx_cb_state[SSS_MOD_TYPE_MAX]; + unsigned long vf_mbx_cb_state[SSS_MOD_TYPE_MAX]; + + enum sss_mbx_event_state event_flag; + /* lock for mbx event flag */ + spinlock_t mbx_lock; + + u8 send_msg_id; + u8 rsvd2[3]; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_mgmt_channel.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_mgmt_channel.h new file mode 100644 index 0000000000000000000000000000000000000000..4c0c3c482dde4889d12dbdcf0b3f96aa20a743d8 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_mgmt_channel.h @@ -0,0 +1,141 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_MGMT_CHANNEL_H +#define SSS_MGMT_CHANNEL_H + +#include +#include +#include +#include +#include + +#include "sss_hw_mbx.h" +#include "sss_hw_mgmt.h" +#include "sss_adm_info.h" + +/* message header define */ +#define SSS_MSG_HEADER_SRC_GLB_FUNC_ID_SHIFT 0 +#define SSS_MSG_HEADER_STATUS_SHIFT 13 +#define SSS_MSG_HEADER_SOURCE_SHIFT 15 +#define SSS_MSG_HEADER_AEQ_ID_SHIFT 16 +#define SSS_MSG_HEADER_MSG_ID_SHIFT 18 +#define SSS_MSG_HEADER_CMD_SHIFT 22 + +#define SSS_MSG_HEADER_MSG_LEN_SHIFT 32 +#define SSS_MSG_HEADER_MODULE_SHIFT 43 +#define SSS_MSG_HEADER_SEG_LEN_SHIFT 48 +#define SSS_MSG_HEADER_NO_ACK_SHIFT 54 +#define SSS_MSG_HEADER_DATA_TYPE_SHIFT 55 +#define SSS_MSG_HEADER_SEQID_SHIFT 56 +#define SSS_MSG_HEADER_LAST_SHIFT 62 +#define SSS_MSG_HEADER_DIRECTION_SHIFT 63 + +#define SSS_MSG_HEADER_SRC_GLB_FUNC_ID_MASK 0x1FFF +#define SSS_MSG_HEADER_STATUS_MASK 0x1 +#define SSS_MSG_HEADER_SOURCE_MASK 0x1 +#define SSS_MSG_HEADER_AEQ_ID_MASK 0x3 +#define SSS_MSG_HEADER_MSG_ID_MASK 0xF +#define SSS_MSG_HEADER_CMD_MASK 0x3FF + +#define SSS_MSG_HEADER_MSG_LEN_MASK 0x7FF +#define SSS_MSG_HEADER_MODULE_MASK 0x1F +#define SSS_MSG_HEADER_SEG_LEN_MASK 0x3F +#define SSS_MSG_HEADER_NO_ACK_MASK 0x1 +#define SSS_MSG_HEADER_DATA_TYPE_MASK 0x1 +#define SSS_MSG_HEADER_SEQID_MASK 0x3F +#define SSS_MSG_HEADER_LAST_MASK 0x1 +#define SSS_MSG_HEADER_DIRECTION_MASK 0x1 + +#define SSS_GET_MSG_HEADER(val, field) \ + (((val) >> SSS_MSG_HEADER_##field##_SHIFT) & \ + SSS_MSG_HEADER_##field##_MASK) +#define SSS_SET_MSG_HEADER(val, field) \ + ((u64)(((u64)(val)) & SSS_MSG_HEADER_##field##_MASK) << \ + SSS_MSG_HEADER_##field##_SHIFT) + +enum sss_msg_ack_type { + SSS_MSG_ACK, + SSS_MSG_NO_ACK, +}; + +enum sss_data_type { + SSS_INLINE_DATA = 0, + SSS_DMA_DATA = 1, +}; + +enum sss_msg_seg_type { + SSS_NOT_LAST_SEG = 0, + SSS_LAST_SEG = 1, +}; + +enum sss_msg_direction_type { + SSS_DIRECT_SEND_MSG = 0, + SSS_RESP_MSG = 1, +}; + +enum sss_msg_src_type { + SSS_MSG_SRC_MGMT = 0, + SSS_MSG_SRC_MBX = 1, +}; + +enum sss_mgmt_msg_cb_t_state { + SSS_CALLBACK_REG = 0, + SSS_CALLBACK_RUNNING, +}; + +enum sss_pf_to_mgmt_event_state { + SSS_ADM_EVENT_UNINIT = 0, + SSS_ADM_EVENT_START, + SSS_ADM_EVENT_SUCCESS, + SSS_ADM_EVENT_FAIL, + SSS_ADM_EVENT_TIMEOUT, + SSS_ADM_EVENT_END, +}; + +struct sss_recv_msg { + void *buf; + + u16 buf_len; + u16 cmd; + + u16 msg_id; + u8 seq_id; + u8 no_ack; + + enum sss_mod_type mod; + + struct completion done; +}; + +struct sss_msg_pf_to_mgmt { + void *hwdev; + spinlock_t async_msg_lock; /* protect msg async and sync */ + + struct semaphore sync_lock; + + struct workqueue_struct *workq; + + void *async_msg_buf; + void *sync_buf; + void *ack_buf; + + struct sss_recv_msg recv_msg; + struct sss_recv_msg recv_resp_msg; + + u16 rsvd; + u16 async_msg_id; + u16 sync_msg_id; + struct sss_adm_msg *adm_msg[SSS_ADM_MSG_MAX]; + + sss_mgmt_msg_handler_t recv_handler[SSS_MOD_TYPE_HW_MAX]; + void *recv_data[SSS_MOD_TYPE_HW_MAX]; + unsigned long recv_handler_state[SSS_MOD_TYPE_HW_MAX]; + void *async_msg_cb_data[SSS_MOD_TYPE_HW_MAX]; + + /* lock when sending msg */ + spinlock_t sync_event_lock; /* protect event async and sync */ + enum sss_pf_to_mgmt_event_state event_state; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_mgmt_info.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_mgmt_info.h new file mode 100644 index 0000000000000000000000000000000000000000..f3b50b0d4f1dfecb00121f58d7a97c733654a5af --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_mgmt_info.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_MGMT_INFO_H +#define SSS_MGMT_INFO_H + +#include + +#include "sss_hw_svc_cap.h" +#include "sss_eq_info.h" +#include "sss_irq_info.h" + +struct sss_dev_sf_svc_attr { + u8 rdma_en; + u8 rsvd[3]; +}; + +enum sss_intr_type { + SSS_INTR_TYPE_MSIX, + SSS_INTR_TYPE_MSI, + SSS_INTR_TYPE_INT, + SSS_INTR_TYPE_NONE, + + /* PXE,OVS need single thread processing, + * synchronization messages must use poll wait mechanism interface + */ +}; + +/* device service capability */ +struct sss_service_cap { + struct sss_dev_sf_svc_attr sf_svc_attr; + u16 svc_type; /* user input service type */ + u16 chip_svc_type; /* HW supported service type, reference to sss_servic_bit_define */ + + u8 host_id; + u8 ep_id; + u8 er_id; /* PF/VF's ER */ + u8 port_id; /* PF/VF's physical port */ + + /* Host global resources */ + u16 host_total_function; + u8 pf_num; + u8 pf_id_start; + u16 vf_num; /* max numbers of vf in current host */ + u16 vf_id_start; + u8 host_oq_id_mask_val; + u8 host_valid_bitmap; + u8 master_host_id; + u8 srv_multi_host_mode; + + u8 timer_pf_num; + u8 timer_pf_id_start; + u16 timer_vf_num; + u16 timer_vf_id_start; + u8 flexq_en; + u8 resvd; + + u8 cos_valid_bitmap; + u8 port_cos_valid_bitmap; + u16 max_vf; /* max VF number that PF supported */ + u16 pseudo_vf_start_id; + u16 pseudo_vf_num; + u32 pseudo_vf_max_pctx; + u16 pseudo_vf_bfilter_start_addr; + u16 pseudo_vf_bfilter_len; + + u16 pseudo_vf_cfg_num; + u16 virtio_vq_size; + + /* DO NOT get interrupt_type from firmware */ + enum sss_intr_type intr_type; + + u8 sf_en; /* stateful business status */ + u8 timer_en; /* 0:disable, 1:enable */ + u8 bloomfilter_en; /* 0:disable, 1:enable */ + u8 lb_mode; + u8 smf_pg; + u8 rsvd[3]; + + u32 max_connect_num; /* PF/VF maximum connection number(1M) */ + + /* The maximum connections which can be stick to cache memory, max 1K */ + u16 max_stick2cache_num; + + /* Starting address in cache memory for bloom filter, 64Bytes aligned */ + u16 bfilter_start_addr; + + /* Length for bloom filter, aligned on 64Bytes. The size is length*64B. + * Bloom filter memory size + 1 must be power of 2. + * The maximum memory size of bloom filter is 4M + */ + u16 bfilter_len; + + /* The size of hash bucket tables, align on 64 entries. + * Be used to AND (&) the hash value. Bucket Size +1 must be power of 2. + * The maximum number of hash bucket is 4M + */ + u16 hash_bucket_num; + + struct sss_nic_service_cap nic_cap; /* NIC capability */ + struct sss_rdma_service_cap rdma_cap; /* RDMA capability */ + struct sss_fc_service_cap fc_cap; /* FC capability */ + struct sss_toe_service_cap toe_cap; /* ToE capability */ + struct sss_ovs_service_cap ovs_cap; /* OVS capability */ + struct sss_ipsec_service_cap ipsec_cap; /* IPsec capability */ + struct sss_ppa_service_cap ppa_cap; /* PPA capability */ + struct sss_vbs_service_cap vbs_cap; /* VBS capability */ +}; + +struct sss_svc_cap_info { + u32 func_id; + struct sss_service_cap cap; +}; + +struct sss_mgmt_info { + void *hwdev; + struct sss_service_cap svc_cap; + struct sss_eq_info eq_info; /* CEQ */ + struct sss_irq_info irq_info; /* IRQ */ + u32 func_seq_num; /* temporary */ +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/include/sss_sriov_info.h b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_sriov_info.h new file mode 100644 index 0000000000000000000000000000000000000000..bfb29200db9f5053b7d400edca10bb2e64b12b34 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/include/sss_sriov_info.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_SRIOV_INFO_H +#define SSS_SRIOV_INFO_H + +#include + +enum sss_sriov_state { + SSS_SRIOV_DISABLE, + SSS_SRIOV_ENABLE, + SSS_SRIOV_PRESENT, +}; + +struct sss_sriov_info { + u8 enabled; + u8 rsvd[3]; + unsigned int vf_num; + unsigned long state; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_adapter_mgmt.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_adapter_mgmt.c new file mode 100644 index 0000000000000000000000000000000000000000..b9e202e4ee089383385b86a704e45cb638fe83a6 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_adapter_mgmt.c @@ -0,0 +1,724 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_pci_sriov.h" +#include "sss_pci_id_tbl.h" +#include "sss_adapter.h" +#include "sss_adapter_mgmt.h" +#include "sss_pci_global.h" +#include "sss_tool_comm.h" +#include "sss_hw_export.h" +#include "sss_tool_hw.h" +#include "sss_tool.h" + +#ifndef SSS_PF_NUM_MAX +#define SSS_PF_NUM_MAX (16) +#endif + +#define SSS_ADAPTER_CNT_TIMEOUT 10000 +#define SSS_WAIT_ADAPTER_USLEEP_MIN 9900 +#define SSS_WAIT_ADAPTER_USLEEP_MAX 10000 + +#define SSS_CHIP_NODE_HOLD_TIMEOUT (10 * 60 * 1000) +#define SSS_WAIT_CHIP_NODE_CHANGED (10 * 60 * 1000) +#define SSS_PRINT_TIMEOUT_INTERVAL 10000 +#define SSS_MICRO_SECOND 1000 +#define SSS_CHIP_NODE_USLEEP_MIN 900 +#define SSS_CHIP_NODE_USLEEP_MAX 1000 + +#define SSS_CARD_CNT_MAX 64 + +#define SSS_IS_SPU_DEV(pdev) ((pdev)->device == SSS_DEV_ID_SPU) + +enum sss_node_state { + SSS_NODE_CHANGE = BIT(0), +}; + +struct sss_chip_node_lock { + struct mutex chip_mutex; /* lock for chip list */ + unsigned long state; + atomic_t ref_cnt; +}; + +static struct sss_chip_node_lock g_chip_node_lock; + +static unsigned long g_index_bit_map; + +LIST_HEAD(g_chip_list); + +struct list_head *sss_get_chip_list(void) +{ + return &g_chip_list; +} + +void lld_dev_hold(struct sss_hal_dev *dev) +{ + struct sss_pci_adapter *pci_adapter = pci_get_drvdata(dev->pdev); + + atomic_inc(&pci_adapter->ref_cnt); +} + +void lld_dev_put(struct sss_hal_dev *dev) +{ + struct sss_pci_adapter *pci_adapter = pci_get_drvdata(dev->pdev); + + atomic_dec(&pci_adapter->ref_cnt); +} + +void sss_chip_node_lock(void) +{ + unsigned long end; + bool timeout = true; + u32 loop_cnt; + + mutex_lock(&g_chip_node_lock.chip_mutex); + + loop_cnt = 0; + end = jiffies + msecs_to_jiffies(SSS_WAIT_CHIP_NODE_CHANGED); + do { + if (!test_and_set_bit(SSS_NODE_CHANGE, &g_chip_node_lock.state)) { + timeout = false; + break; + } + + loop_cnt++; + if (loop_cnt % SSS_PRINT_TIMEOUT_INTERVAL == 0) + pr_warn("Wait for adapter change complete for %us\n", + loop_cnt / SSS_MICRO_SECOND); + + /* if sleep 1ms, use usleep_range to be more precise */ + usleep_range(SSS_CHIP_NODE_USLEEP_MIN, SSS_CHIP_NODE_USLEEP_MAX); + } while (time_before(jiffies, end)); + + if (timeout && test_and_set_bit(SSS_NODE_CHANGE, &g_chip_node_lock.state)) + pr_warn("Wait for adapter change complete timeout when trying to get adapter lock\n"); + + loop_cnt = 0; + timeout = true; + end = jiffies + msecs_to_jiffies(SSS_WAIT_CHIP_NODE_CHANGED); + do { + if (!atomic_read(&g_chip_node_lock.ref_cnt)) { + timeout = false; + break; + } + + loop_cnt++; + if (loop_cnt % SSS_PRINT_TIMEOUT_INTERVAL == 0) + pr_warn("Wait for adapter unused for %us, reference count: %d\n", + loop_cnt / SSS_MICRO_SECOND, + atomic_read(&g_chip_node_lock.ref_cnt)); + + usleep_range(SSS_CHIP_NODE_USLEEP_MIN, + SSS_CHIP_NODE_USLEEP_MAX); + } while (time_before(jiffies, end)); + + if (timeout && atomic_read(&g_chip_node_lock.ref_cnt)) + pr_warn("Wait for adapter unused timeout\n"); + + mutex_unlock(&g_chip_node_lock.chip_mutex); +} + +void sss_chip_node_unlock(void) +{ + clear_bit(SSS_NODE_CHANGE, &g_chip_node_lock.state); +} + +void sss_hold_chip_node(void) +{ + unsigned long end; + u32 loop_cnt = 0; + + mutex_lock(&g_chip_node_lock.chip_mutex); + + end = jiffies + msecs_to_jiffies(SSS_CHIP_NODE_HOLD_TIMEOUT); + do { + if (!test_bit(SSS_NODE_CHANGE, &g_chip_node_lock.state)) + break; + + loop_cnt++; + + if (loop_cnt % SSS_PRINT_TIMEOUT_INTERVAL == 0) + pr_warn("Wait adapter change complete for %us\n", + loop_cnt / SSS_MICRO_SECOND); + /* if sleep 1ms, use usleep_range to be more precise */ + usleep_range(SSS_CHIP_NODE_USLEEP_MIN, SSS_CHIP_NODE_USLEEP_MAX); + } while (time_before(jiffies, end)); + + if (test_bit(SSS_NODE_CHANGE, &g_chip_node_lock.state)) + pr_warn("Wait adapter change complete timeout when trying to adapter dev\n"); + + atomic_inc(&g_chip_node_lock.ref_cnt); + mutex_unlock(&g_chip_node_lock.chip_mutex); +} + +void sss_put_chip_node(void) +{ + atomic_dec(&g_chip_node_lock.ref_cnt); +} + +void sss_pre_init(void) +{ + mutex_init(&g_chip_node_lock.chip_mutex); + atomic_set(&g_chip_node_lock.ref_cnt, 0); + sss_init_uld_lock(); +} + +struct sss_pci_adapter *sss_get_adapter_by_pcidev(struct pci_dev *pdev) +{ + struct sss_pci_adapter *adapter = pci_get_drvdata(pdev); + + if (!pdev) + return NULL; + + return adapter; +} + +static bool sss_chip_node_exist(struct sss_pci_adapter *adapter, + unsigned char bus_id) +{ + struct sss_card_node *chip_node = NULL; + + sss_chip_node_lock(); + if (bus_id != 0) { + list_for_each_entry(chip_node, &g_chip_list, node) { + if (chip_node->bus_id == bus_id) { + adapter->chip_node = chip_node; + sss_chip_node_unlock(); + return true; + } + } + } else if (SSS_IS_VF_DEV(adapter->pcidev) || + SSS_IS_SPU_DEV(adapter->pcidev)) { + list_for_each_entry(chip_node, &g_chip_list, node) { + if (chip_node) { + adapter->chip_node = chip_node; + sss_chip_node_unlock(); + return true; + } + } + } + sss_chip_node_unlock(); + + return false; +} + +static unsigned char sss_get_pci_bus_id(struct sss_pci_adapter *adapter) +{ + struct pci_dev *pf_pdev = NULL; + unsigned char bus_id = 0; + + if (!pci_is_root_bus(adapter->pcidev->bus)) + bus_id = adapter->pcidev->bus->number; + + if (bus_id == 0) + return bus_id; + + if (adapter->pcidev->is_virtfn) { + pf_pdev = adapter->pcidev->physfn; + bus_id = pf_pdev->bus->number; + } + + return bus_id; +} + +static bool sss_alloc_card_id(u8 *id) +{ + unsigned char i; + + sss_chip_node_lock(); + for (i = 0; i < SSS_CARD_CNT_MAX; i++) { + if (test_and_set_bit(i, &g_index_bit_map) == 0) { + sss_chip_node_unlock(); + *id = i; + return true; + } + } + sss_chip_node_unlock(); + + return false; +} + +static void sss_free_card_id(u8 id) +{ + clear_bit(id, &g_index_bit_map); +} + +int sss_alloc_chip_node(struct sss_pci_adapter *adapter) +{ + struct sss_card_node *chip_node = NULL; + unsigned char card_id; + unsigned char bus_id; + + bus_id = sss_get_pci_bus_id(adapter); + + if (sss_chip_node_exist(adapter, bus_id)) + return 0; + + chip_node = kzalloc(sizeof(*chip_node), GFP_KERNEL); + if (!chip_node) + return -ENOMEM; + + chip_node->bus_id = bus_id; + + if (!sss_alloc_card_id(&card_id)) { + kfree(chip_node); + sdk_err(&adapter->pcidev->dev, "chip node is exceed\n"); + return -EINVAL; + } + + if (snprintf(chip_node->chip_name, IFNAMSIZ, "%s%u", SSS_CHIP_NAME, card_id) < 0) { + sss_free_card_id(card_id); + kfree(chip_node); + return -EINVAL; + } + + INIT_LIST_HEAD(&chip_node->func_list); + sss_chip_node_lock(); + list_add_tail(&chip_node->node, &g_chip_list); + sss_chip_node_unlock(); + adapter->chip_node = chip_node; + sdk_info(&adapter->pcidev->dev, + "Success to add new chip %s to global list\n", chip_node->chip_name); + + return 0; +} + +void sss_free_chip_node(struct sss_pci_adapter *adapter) +{ + struct sss_card_node *chip_node = adapter->chip_node; + int id; + int ret; + + sss_chip_node_lock(); + if (list_empty(&chip_node->func_list)) { + list_del(&chip_node->node); + sdk_info(&adapter->pcidev->dev, + "Success to delete chip %s from global list\n", + chip_node->chip_name); + ret = sscanf(chip_node->chip_name, SSS_CHIP_NAME "%d", &id); + if (ret < 0) + sdk_err(&adapter->pcidev->dev, "Fail to get nic id\n"); + + sss_free_card_id(id); + kfree(chip_node); + } + sss_chip_node_unlock(); +} + +void sss_add_func_list(struct sss_pci_adapter *adapter) +{ + sss_chip_node_lock(); + list_add_tail(&adapter->node, &adapter->chip_node->func_list); + sss_chip_node_unlock(); +} + +void sss_del_func_list(struct sss_pci_adapter *adapter) +{ + sss_chip_node_lock(); + list_del(&adapter->node); + sss_chip_node_unlock(); +} + +static struct sss_card_node *sss_get_chip_node_by_hwdev(const void *hwdev) +{ + struct sss_card_node *chip_node = NULL; + struct sss_card_node *node_tmp = NULL; + struct sss_pci_adapter *dev = NULL; + + if (!hwdev) + return NULL; + + sss_hold_chip_node(); + + list_for_each_entry(node_tmp, &g_chip_list, node) { + if (!chip_node) { + list_for_each_entry(dev, &node_tmp->func_list, node) { + if (dev->hwdev == hwdev) { + chip_node = node_tmp; + break; + } + } + } + } + + sss_put_chip_node(); + + return chip_node; +} + +static bool sss_is_func_valid(struct sss_pci_adapter *dev) +{ + if (sss_get_func_type(dev->hwdev) == SSS_FUNC_TYPE_VF) + return false; + + return true; +} + +static int sss_get_dynamic_uld_dev_name(struct sss_pci_adapter *dev, enum sss_service_type type, + char *ifname) +{ + u32 out_size = IFNAMSIZ; + struct sss_uld_info *uld_info = sss_get_uld_info(); + + if (!uld_info[type].ioctl) + return -EFAULT; + + return uld_info[type].ioctl(dev->uld_dev[type], SSS_TOOL_GET_ULD_DEV_NAME, + NULL, 0, ifname, &out_size); +} + +static bool sss_support_service_type(void *hwdev) +{ + struct sss_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + return !dev->mgmt_info->svc_cap.chip_svc_type; +} + +void sss_get_card_info(const void *hwdev, void *bufin) +{ + struct sss_card_node *chip_node = NULL; + struct sss_tool_card_info *info = (struct sss_tool_card_info *)bufin; + struct sss_pci_adapter *dev = NULL; + void *fun_hwdev = NULL; + u32 i = 0; + + info->pf_num = 0; + + chip_node = sss_get_chip_node_by_hwdev(hwdev); + if (!chip_node) + return; + + sss_hold_chip_node(); + + list_for_each_entry(dev, &chip_node->func_list, node) { + if (!sss_is_func_valid(dev)) + continue; + + fun_hwdev = dev->hwdev; + + if (sss_support_nic(fun_hwdev)) { + if (dev->uld_dev[SSS_SERVICE_TYPE_NIC]) { + info->pf[i].pf_type |= (u32)BIT(SSS_SERVICE_TYPE_NIC); + sss_get_dynamic_uld_dev_name(dev, SSS_SERVICE_TYPE_NIC, + info->pf[i].name); + } + } + + if (sss_support_ppa(fun_hwdev, NULL)) { + if (dev->uld_dev[SSS_SERVICE_TYPE_PPA]) { + info->pf[i].pf_type |= (u32)BIT(SSS_SERVICE_TYPE_PPA); + sss_get_dynamic_uld_dev_name(dev, SSS_SERVICE_TYPE_PPA, + info->pf[i].name); + } + } + + if (sss_support_service_type(fun_hwdev)) + strlcpy(info->pf[i].name, "FOR_MGMT", IFNAMSIZ); + + strlcpy(info->pf[i].bus_info, pci_name(dev->pcidev), + sizeof(info->pf[i].bus_info)); + info->pf_num++; + i = info->pf_num; + } + + sss_put_chip_node(); +} + +bool sss_is_in_host(void) +{ + struct sss_card_node *node = NULL; + struct sss_pci_adapter *adapter = NULL; + + sss_hold_chip_node(); + list_for_each_entry(node, &g_chip_list, node) { + list_for_each_entry(adapter, &node->func_list, node) { + if (sss_get_func_type(adapter->hwdev) != SSS_FUNC_TYPE_VF) { + sss_put_chip_node(); + return true; + } + } + } + sss_put_chip_node(); + + return false; +} + +void sss_get_all_chip_id(void *id_info) +{ + int i = 0; + int id; + int ret; + struct sss_card_id *card_id = (struct sss_card_id *)id_info; + struct sss_card_node *node = NULL; + + sss_hold_chip_node(); + list_for_each_entry(node, &g_chip_list, node) { + ret = sscanf(node->chip_name, SSS_CHIP_NAME "%d", &id); + if (ret < 0) { + pr_err("Fail to get chip id\n"); + continue; + } + card_id->id[i] = (u32)id; + i++; + } + sss_put_chip_node(); + + card_id->num = (u32)i; +} + +void *sss_get_pcidev_hdl(void *hwdev) +{ + struct sss_hwdev *dev = (struct sss_hwdev *)hwdev; + + if (!hwdev) + return NULL; + + return dev->pcidev_hdl; +} + +struct sss_card_node *sss_get_card_node(struct sss_hal_dev *hal_dev) +{ + struct sss_pci_adapter *adapter = pci_get_drvdata(hal_dev->pdev); + + return adapter->chip_node; +} + +void sss_get_card_func_info(const char *chip_name, struct sss_card_func_info *card_func) +{ + struct sss_card_node *card_node = NULL; + struct sss_pci_adapter *adapter = NULL; + struct sss_func_pdev_info *info = NULL; + + card_func->pf_num = 0; + + sss_hold_chip_node(); + + list_for_each_entry(card_node, &g_chip_list, node) { + if (strncmp(card_node->chip_name, chip_name, IFNAMSIZ)) + continue; + + list_for_each_entry(adapter, &card_node->func_list, node) { + if (sss_get_func_type(adapter->hwdev) == SSS_FUNC_TYPE_VF) + continue; + + info = &card_func->pdev_info[card_func->pf_num]; + info->bar1_size = + pci_resource_len(adapter->pcidev, SSS_PF_PCI_CFG_REG_BAR); + info->bar1_pa = + pci_resource_start(adapter->pcidev, SSS_PF_PCI_CFG_REG_BAR); + + info->bar3_size = + pci_resource_len(adapter->pcidev, SSS_PCI_MGMT_REG_BAR); + info->bar3_pa = + pci_resource_start(adapter->pcidev, SSS_PCI_MGMT_REG_BAR); + + card_func->pf_num++; + if (card_func->pf_num >= SSS_PF_NUM_MAX) { + sss_put_chip_node(); + return; + } + } + } + + sss_put_chip_node(); +} + +int sss_get_pf_id(struct sss_card_node *card_node, u32 port_id, u32 *pf_id, u32 *valid) +{ + struct sss_pci_adapter *adapter = NULL; + + sss_hold_chip_node(); + list_for_each_entry(adapter, &card_node->func_list, node) { + if (sss_get_func_type(adapter->hwdev) == SSS_FUNC_TYPE_VF) + continue; + + if (SSS_TO_PHY_PORT_ID(adapter->hwdev) == port_id) { + *pf_id = sss_get_func_id(adapter->hwdev); + *valid = 1; + break; + } + } + sss_put_chip_node(); + + return 0; +} + +void *sss_get_uld_dev(struct sss_hal_dev *hal_dev, enum sss_service_type type) +{ + struct sss_pci_adapter *dev = NULL; + void *uld = NULL; + + if (!hal_dev) + return NULL; + + dev = pci_get_drvdata(hal_dev->pdev); + if (!dev) + return NULL; + + spin_lock_bh(&dev->uld_lock); + if (!dev->uld_dev[type] || !test_bit(type, &dev->uld_attach_state)) { + spin_unlock_bh(&dev->uld_lock); + return NULL; + } + uld = dev->uld_dev[type]; + + atomic_inc(&dev->uld_ref_cnt[type]); + spin_unlock_bh(&dev->uld_lock); + + return uld; +} + +void sss_uld_dev_put(struct sss_hal_dev *hal_dev, enum sss_service_type type) +{ + struct sss_pci_adapter *pci_adapter = pci_get_drvdata(hal_dev->pdev); + + atomic_dec(&pci_adapter->uld_ref_cnt[type]); +} + +static bool sss_is_pcidev_match_dev_name(const char *dev_name, struct sss_pci_adapter *dev, + enum sss_service_type type) +{ + enum sss_service_type i; + char nic_uld_name[IFNAMSIZ] = {0}; + int err; + + if (type > SSS_SERVICE_TYPE_MAX) + return false; + + if (type == SSS_SERVICE_TYPE_MAX) { + for (i = SSS_SERVICE_TYPE_OVS; i < SSS_SERVICE_TYPE_MAX; i++) { + if (!strncmp(dev->uld_dev_name[i], dev_name, IFNAMSIZ)) + return true; + } + } else { + if (!strncmp(dev->uld_dev_name[type], dev_name, IFNAMSIZ)) + return true; + } + + err = sss_get_dynamic_uld_dev_name(dev, SSS_SERVICE_TYPE_NIC, (char *)nic_uld_name); + if (err == 0) { + if (!strncmp(nic_uld_name, dev_name, IFNAMSIZ)) + return true; + } + + return false; +} + +struct sss_hal_dev *sss_get_lld_dev_by_dev_name(const char *dev_name, enum sss_service_type type) +{ + struct sss_card_node *chip_node = NULL; + struct sss_pci_adapter *dev = NULL; + + sss_hold_chip_node(); + + list_for_each_entry(chip_node, &g_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (sss_is_pcidev_match_dev_name(dev_name, dev, type)) { + lld_dev_hold(&dev->hal_dev); + sss_put_chip_node(); + return &dev->hal_dev; + } + } + } + + sss_put_chip_node(); + + return NULL; +} + +static bool sss_is_pcidev_match_chip_name(const char *ifname, struct sss_pci_adapter *dev, + struct sss_card_node *chip_node, enum sss_func_type type) +{ + if (!strncmp(chip_node->chip_name, ifname, IFNAMSIZ)) { + if (sss_get_func_type(dev->hwdev) != type) + return false; + return true; + } + + return false; +} + +static struct sss_hal_dev *sss_get_dst_type_lld_dev_by_chip_name(const char *ifname, + enum sss_func_type type) +{ + struct sss_card_node *chip_node = NULL; + struct sss_pci_adapter *dev = NULL; + + list_for_each_entry(chip_node, &g_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (sss_is_pcidev_match_chip_name(ifname, dev, chip_node, type)) + return &dev->hal_dev; + } + } + + return NULL; +} + +struct sss_hal_dev *sss_get_lld_dev_by_chip_name(const char *chip_name) +{ + struct sss_hal_dev *dev = NULL; + + sss_hold_chip_node(); + + dev = sss_get_dst_type_lld_dev_by_chip_name(chip_name, SSS_FUNC_TYPE_PPF); + if (dev) + goto out; + + dev = sss_get_dst_type_lld_dev_by_chip_name(chip_name, SSS_FUNC_TYPE_PF); + if (dev) + goto out; + + dev = sss_get_dst_type_lld_dev_by_chip_name(chip_name, SSS_FUNC_TYPE_VF); +out: + if (dev) + lld_dev_hold(dev); + sss_put_chip_node(); + + return dev; +} + +struct sss_hal_dev *sss_get_lld_dev_by_chip_and_port(const char *chip_name, u8 port_id) +{ + struct sss_card_node *chip_node = NULL; + struct sss_pci_adapter *dev = NULL; + + sss_hold_chip_node(); + list_for_each_entry(chip_node, &g_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (sss_get_func_type(dev->hwdev) == SSS_FUNC_TYPE_VF) + continue; + + if (SSS_TO_PHY_PORT_ID(dev->hwdev) == port_id && + !strncmp(chip_node->chip_name, chip_name, IFNAMSIZ)) { + lld_dev_hold(&dev->hal_dev); + sss_put_chip_node(); + + return &dev->hal_dev; + } + } + } + sss_put_chip_node(); + + return NULL; +} + diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_adapter_mgmt.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_adapter_mgmt.h new file mode 100644 index 0000000000000000000000000000000000000000..44805eccff4fdff898c63322bef049a14ea0fc30 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_adapter_mgmt.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_ADAPTER_MGMT_H +#define SSS_ADAPTER_MGMT_H + +#include +#include + +#include "sss_version.h" +#include "sss_adapter.h" + +#define SSS_DRV_VERSION SSS_VERSION_STR + +#define SSS_DRV_NAME "sssdk" +#define SSS_CHIP_NAME "sssnic" + +#define SSS_VF_PCI_CFG_REG_BAR 0 +#define SSS_PF_PCI_CFG_REG_BAR 1 + +#define SSS_PCI_INTR_REG_BAR 2 +#define SSS_PCI_MGMT_REG_BAR 3 /* Only PF have mgmt bar */ +#define SSS_PCI_DB_BAR 4 + +#define SSS_IS_VF_DEV(pdev) ((pdev)->device == SSS_DEV_ID_VF) + +#define SSS_CARD_MAX_SIZE (64) + +struct sss_card_id { + u32 id[SSS_CARD_MAX_SIZE]; + u32 num; +}; + +struct sss_func_pdev_info { + u64 bar0_pa; + u64 bar0_size; + u64 bar1_pa; + u64 bar1_size; + u64 bar3_pa; + u64 bar3_size; + u64 rsvd[4]; +}; + +struct sss_card_func_info { + u32 pf_num; + u32 rsvd; + u64 usr_adm_pa; + struct sss_func_pdev_info pdev_info[SSS_CARD_MAX_SIZE]; +}; + +enum { + SSS_NO_PROBE = 1, + SSS_PROBE_START = 2, + SSS_PROBE_OK = 3, + SSS_IN_REMOVE = 4, +}; + +struct list_head *sss_get_chip_list(void); +int sss_alloc_chip_node(struct sss_pci_adapter *adapter); +void sss_free_chip_node(struct sss_pci_adapter *adapter); +void sss_pre_init(void); +struct sss_pci_adapter *sss_get_adapter_by_pcidev(struct pci_dev *pdev); +void sss_add_func_list(struct sss_pci_adapter *adapter); +void sss_del_func_list(struct sss_pci_adapter *adapter); +void sss_hold_chip_node(void); +void sss_put_chip_node(void); + +void sss_set_adapter_probe_state(struct sss_pci_adapter *adapter, int state); + +void lld_dev_hold(struct sss_hal_dev *dev); +void lld_dev_put(struct sss_hal_dev *dev); + +void sss_chip_node_lock(void); +void sss_chip_node_unlock(void); + +void *sss_get_pcidev_hdl(void *hwdev); +void *sss_get_uld_dev(struct sss_hal_dev *hal_dev, enum sss_service_type type); + +void sss_uld_dev_put(struct sss_hal_dev *hal_dev, enum sss_service_type type); + +struct sss_hal_dev *sss_get_lld_dev_by_dev_name(const char *dev_name, enum sss_service_type type); + +struct sss_hal_dev *sss_get_lld_dev_by_chip_name(const char *chip_name); + +struct sss_hal_dev *sss_get_lld_dev_by_chip_and_port(const char *chip_name, u8 port_id); + +void sss_get_all_chip_id(void *id_info); + +void sss_get_card_func_info + (const char *chip_name, struct sss_card_func_info *card_func); + +void sss_get_card_info(const void *hwdev, void *bufin); + +bool sss_is_in_host(void); + +int sss_get_pf_id(struct sss_card_node *chip_node, u32 port_id, u32 *pf_id, u32 *valid); + +struct sss_card_node *sss_get_card_node(struct sss_hal_dev *hal_dev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_common.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_common.c new file mode 100644 index 0000000000000000000000000000000000000000..452795f7bcb5b38fea71fa5afeb89a75ec32715d --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_common.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_common.h" + +#define SSS_MIN_SLEEP_TIME(us) ((us) - (us) / 10) + +/* Sleep more than 20ms using msleep is accurate */ +#define SSS_HANDLER_SLEEP(usleep_min, wait_once_us) \ +do { \ + if ((wait_once_us) >= 20 * USEC_PER_MSEC) \ + msleep((wait_once_us) / USEC_PER_MSEC); \ + else \ + usleep_range((usleep_min), (wait_once_us)); \ +} while (0) + +int sss_dma_zalloc_coherent_align(void *dev_hdl, u64 size, u64 align, + unsigned int flag, struct sss_dma_addr_align *addr) +{ + dma_addr_t pa; + dma_addr_t pa_align; + void *va = NULL; + void *va_align = NULL; + + va = dma_zalloc_coherent(dev_hdl, size, &pa, flag); + if (!va) + return -ENOMEM; + + pa_align = ALIGN(pa, align); + if (pa_align == pa) { + va_align = va; + goto same_addr_after_align; + } + + dma_free_coherent(dev_hdl, size, va, pa); + + va = dma_zalloc_coherent(dev_hdl, size + align, &pa, flag); + if (!va) + return -ENOMEM; + + pa_align = ALIGN(pa, align); + va_align = (void *)((u64)va + (pa_align - pa)); + +same_addr_after_align: + addr->origin_paddr = pa; + addr->align_paddr = pa_align; + addr->origin_vaddr = va; + addr->align_vaddr = va_align; + addr->real_size = (u32)size; + + return 0; +} + +void sss_dma_free_coherent_align(void *dev_hdl, struct sss_dma_addr_align *addr) +{ + dma_free_coherent(dev_hdl, addr->real_size, addr->origin_vaddr, addr->origin_paddr); +} + +int sss_check_handler_timeout(void *priv_data, sss_wait_handler_t handler, + u32 wait_total_ms, u32 wait_once_us) +{ + enum sss_process_ret ret; + unsigned long end; + u32 usleep_min = SSS_MIN_SLEEP_TIME(wait_once_us); + + if (!handler) + return -EINVAL; + + end = jiffies + msecs_to_jiffies(wait_total_ms); + do { + ret = handler(priv_data); + if (ret == SSS_PROCESS_OK) + return 0; + else if (ret == SSS_PROCESS_ERR) + return -EIO; + + SSS_HANDLER_SLEEP(usleep_min, wait_once_us); + } while (time_before(jiffies, end)); + + ret = handler(priv_data); + if (ret == SSS_PROCESS_OK) + return 0; + else if (ret == SSS_PROCESS_ERR) + return -EIO; + + return -ETIMEDOUT; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_common.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_common.h new file mode 100644 index 0000000000000000000000000000000000000000..36988f134d964d56ae66b697f56271d361dac1dd --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_common.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_COMMON_H +#define SSS_COMMON_H + +#include + +#include "sss_hw_common.h" + +int sss_dma_zalloc_coherent_align(void *dev_hdl, u64 size, u64 align, + unsigned int flag, struct sss_dma_addr_align *mem_align); + +void sss_dma_free_coherent_align(void *dev_hdl, struct sss_dma_addr_align *mem_align); + +int sss_check_handler_timeout(void *priv_data, sss_wait_handler_t handler, + u32 wait_total_ms, u32 wait_once_us); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hw_main.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hw_main.c new file mode 100644 index 0000000000000000000000000000000000000000..8c30c6c52fe5b2924229aa61876f09a8db68d341 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hw_main.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_version.h" +#include "sss_adapter_mgmt.h" +#include "sss_pci_id_tbl.h" +#include "sss_pci_sriov.h" +#include "sss_pci_probe.h" +#include "sss_pci_remove.h" +#include "sss_pci_shutdown.h" +#include "sss_pci_error.h" + +#define SSS_DRV_DESC "Intelligent Network Interface Card Driver" + +MODULE_AUTHOR("steven.song@3snic.com"); +MODULE_DESCRIPTION("3SNIC Network Interface Card Driver"); +MODULE_VERSION(SSS_DRV_VERSION); +MODULE_LICENSE("GPL"); + +static const struct pci_device_id g_pci_table[] = { + {PCI_VDEVICE(SSSNIC, SSS_DEV_ID_STANDARD), 0}, + {PCI_VDEVICE(SSSNIC, SSS_DEV_ID_SPN120), 0}, + {PCI_VDEVICE(SSSNIC, SSS_DEV_ID_VF), 0}, + {0, 0} +}; + +MODULE_DEVICE_TABLE(pci, g_pci_table); + +#ifdef HAVE_RHEL6_SRIOV_CONFIGURE +static struct pci_driver_rh g_pci_driver_rh = { + .sriov_configure = sss_pci_configure_sriov, +}; +#endif + +static struct pci_error_handlers g_pci_err_handler = { + .error_detected = sss_detect_pci_error, +}; + +static struct pci_driver g_pci_driver = { + .name = SSS_DRV_NAME, + .id_table = g_pci_table, + .probe = sss_pci_probe, + .remove = sss_pci_remove, + .shutdown = sss_pci_shutdown, +#if defined(HAVE_SRIOV_CONFIGURE) + .sriov_configure = sss_pci_configure_sriov, +#elif defined(HAVE_RHEL6_SRIOV_CONFIGURE) + .rh_reserved = &g_pci_driver_rh, +#endif + .err_handler = &g_pci_err_handler +}; + +static __init int sss_init_pci(void) +{ + int ret; + + pr_info("%s - version %s\n", SSS_DRV_DESC, SSS_DRV_VERSION); + sss_pre_init(); + + ret = pci_register_driver(&g_pci_driver); + if (ret != 0) + return ret; + + return 0; +} + +static __exit void sss_exit_pci(void) +{ + pci_unregister_driver(&g_pci_driver); +} + +module_init(sss_init_pci); +module_exit(sss_exit_pci); diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_api.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_api.c new file mode 100644 index 0000000000000000000000000000000000000000..c825864805f31d848e6589b925ad81321d81ccb2 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_api.c @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_csr.h" +#include "sss_hwdev.h" +#include "sss_hwdev_api.h" +#include "sss_hwif_api.h" + +int sss_chip_sync_time(void *hwdev, u64 mstime) +{ + int ret; + struct sss_cmd_sync_time cmd_time = {0}; + u16 out_len = sizeof(cmd_time); + + cmd_time.mstime = mstime; + ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_SYNC_TIME, &cmd_time, + sizeof(cmd_time), &cmd_time, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_time)) { + sdk_err(SSS_TO_DEV(hwdev), + "Fail to sync time, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_time.head.state, out_len); + return -EIO; + } + + return 0; +} + +void sss_chip_disable_mgmt_channel(void *hwdev) +{ + sss_chip_set_pf_status(SSS_TO_HWIF(hwdev), SSS_PF_STATUS_INIT); +} + +int sss_chip_get_board_info(void *hwdev, struct sss_board_info *board_info) +{ + int ret; + struct sss_cmd_board_info cmd_info = {0}; + u16 out_len = sizeof(cmd_info); + + ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_GET_BOARD_INFO, + &cmd_info, sizeof(cmd_info), &cmd_info, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_info)) { + sdk_err(SSS_TO_DEV(hwdev), + "Fail to get board info, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_info.head.state, out_len); + return -EIO; + } + + memcpy(board_info, &cmd_info.info, sizeof(*board_info)); + + return 0; +} + +int sss_chip_do_nego_feature(void *hwdev, u8 opcode, u64 *feature, u16 feature_num) +{ + int ret; + struct sss_cmd_feature_nego cmd_feature = {0}; + u16 out_len = sizeof(cmd_feature); + + cmd_feature.func_id = sss_get_global_func_id(hwdev); + cmd_feature.opcode = opcode; + if (opcode == SSS_MGMT_MSG_SET_CMD) + memcpy(cmd_feature.feature, feature, (feature_num * sizeof(u64))); + + ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_FEATURE_NEGO, + &cmd_feature, sizeof(cmd_feature), &cmd_feature, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_feature)) { + sdk_err(SSS_TO_DEV(hwdev), + "Fail to nego feature, opcode: %d, ret: %d, status: 0x%x, out_len: 0x%x\n", + opcode, ret, cmd_feature.head.state, out_len); + return -EINVAL; + } + + if (opcode == SSS_MGMT_MSG_GET_CMD) + memcpy(feature, cmd_feature.feature, (feature_num * sizeof(u64))); + + return 0; +} + +int sss_chip_set_pci_bdf_num(void *hwdev, u8 bus_id, u8 device_id, u8 func_id) +{ + int ret; + struct sss_cmd_bdf_info cmd_bdf = {0}; + u16 out_len = sizeof(cmd_bdf); + + cmd_bdf.bus = bus_id; + cmd_bdf.device = device_id; + cmd_bdf.function = func_id; + cmd_bdf.function_id = sss_get_global_func_id(hwdev); + + ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_SEND_BDF_INFO, + &cmd_bdf, sizeof(cmd_bdf), &cmd_bdf, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_bdf)) { + sdk_err(SSS_TO_DEV(hwdev), + "Fail to set bdf info, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_bdf.head.state, out_len); + return -EIO; + } + + return 0; +} + +int sss_chip_comm_channel_detect(struct sss_hwdev *hwdev) +{ + int ret; + struct sss_cmd_channel_detect cmd_detect = {0}; + u16 out_len = sizeof(cmd_detect); + + if (!hwdev) + return -EINVAL; + + cmd_detect.func_id = sss_get_global_func_id(hwdev); + + ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_CHANNEL_DETECT, + &cmd_detect, sizeof(cmd_detect), &cmd_detect, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_detect)) { + sdk_err(hwdev->dev_hdl, + "Fail to send channel detect, ret: %d, status: 0x%x, out_size: 0x%x\n", + ret, cmd_detect.head.state, out_len); + return -EINVAL; + } + + return 0; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_api.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_api.h new file mode 100644 index 0000000000000000000000000000000000000000..d0471e8a9514db4ab2ce9d40f0f4baf2eafeb778 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_api.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWDEV_API_H +#define SSS_HWDEV_API_H + +#include + +#include "sss_hw_mbx_msg.h" +#include "sss_hwdev.h" + +int sss_chip_sync_time(void *hwdev, u64 mstime); +int sss_chip_get_board_info(void *hwdev, struct sss_board_info *board_info); +void sss_chip_disable_mgmt_channel(void *hwdev); +int sss_chip_do_nego_feature(void *hwdev, u8 opcode, u64 *feature, u16 feature_num); +int sss_chip_set_pci_bdf_num(void *hwdev, u8 bus_id, u8 device_id, u8 func_id); +int sss_chip_comm_channel_detect(struct sss_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_cap.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_cap.c new file mode 100644 index 0000000000000000000000000000000000000000..412cc574a563d5c496cc1d25fdeeeb7c746f2c85 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_cap.c @@ -0,0 +1,748 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_hwif_api.h" +#include "sss_hwdev_cap.h" + +/* RDMA resource */ +#define K_UNIT BIT(10) +#define M_UNIT BIT(20) +#define G_UNIT BIT(30) + +/* L2NIC */ +#define SSS_CFG_MAX_QP 256 + +/* RDMA */ +#define SSS_RDMA_RSVD_QP 2 +#define SSS_ROCE_MAX_WQE (8 * K_UNIT - 1) + +#define SSS_RDMA_MAX_SQ_SGE 16 + +#define SSS_ROCE_MAX_RQ_SGE 16 + +#define SSS_RDMA_MAX_SQ_DESC_SIZE 256 + +/* (256B(cache_line_len) - 16B(ctrl_seg_len) - 48B(max_task_seg_len)) */ +#define SSS_ROCE_MAX_SQ_INLINE_DATA_SIZE 192 + +#define SSS_ROCE_MAX_RQ_DESC_SIZE 256 + +#define SSS_ROCE_QPC_ENTRY_SIZE 512 + +#define SSS_WQEBB_SIZE 64 + +#define SSS_ROCE_RDMARC_ENTRY_SIZE 32 +#define SSS_ROCE_MAX_QP_INIT_RDMA 128 +#define SSS_ROCE_MAX_QP_DEST_RDMA 128 + +#define SSS_ROCE_MAX_SRQ_WQE (16 * K_UNIT - 1) +#define SSS_ROCE_RSVD_SRQ 0 +#define SSS_ROCE_MAX_SRQ_SGE 15 +#define ROCE_SRQC_ENTERY_SIZE 64 + +#define SSS_ROCE_MAX_SRQ 0x400 +#define SSS_ROCE_MAX_CQ 0x800 +#define SSS_ROCE_MAX_QP 0x400 +#define SSS_ROCE_MAX_MPT 0x400 +#define SSS_ROCE_MAX_DRC_QP 0x40 + +#define SSS_RDMA_MAX_CQE (8 * M_UNIT - 1) +#define SSS_RDMA_RSVD_CQ 0 + +#define SSS_RDMA_CQC_ENTRY_SIZE 128 + +#define SSS_RDMA_CQE_SIZE 64 +#define SSS_RDMA_RSVD_MRW 128 +#define SSS_RDMA_MPT_ENTRY_SIZE 64 +#define SSS_RDMA_MTT_NUM (1 * G_UNIT) +#define SSS_LOG_MTT_SEG 5 +#define SSS_MTT_ENTRY_SIZE 8 +#define SSS_LOG_RDMARC_SEG 3 + +#define SSS_LOCAL_ACK_DELAY 15 +#define SSS_RDMA_PORT_NUM 1 +#define SSS_ROCE_MAX_MSG_SIZE (2 * G_UNIT) + +#define SSS_DB_PAGE_SIZE_K (4 * K_UNIT) +#define SSS_DWQE_SIZE 256 + +#define SSS_PD_NUM (128 * K_UNIT) +#define SSS_RSVD_PD 0 + +#define SSS_MAX_XRCD (64 * K_UNIT) +#define SSS_RSVD_XRCD 0 + +#define SSS_MAX_GID_PER_PORT 128 +#define SSS_GID_ENTRY_SIZE 32 +#define SSS_RSVD_LKEY ((SSS_RDMA_RSVD_MRW - 1) << 8) +#define SSS_PAGE_SIZE_CAP ((1UL << 12) | (1UL << 16) | (1UL << 21)) +#define SSS_ROCE_MODE 1 + +#define SSS_MAX_FRPL_LEN 511 +#define SSS_MAX_PKEY 1 + +/* ToE */ +#define SSS_TOE_PCTX_SIZE 1024 +#define SSS_TOE_SCQC_SIZE 64 + +/* FC */ +#define SSS_FC_PQPC_SIZE 256 +#define SSS_FC_CQPC_SIZE 256 +#define SSS_FC_SQE_SIZE 128 +#define SSS_FC_SCQC_SIZE 64 +#define SSS_FC_SCQE_SIZE 64 +#define SSS_FC_SRQC_SIZE 64 +#define SSS_FC_SRQE_SIZE 32 + +/* OVS */ +#define SSS_OVS_PCTX_SIZE 512 + +/* PPA */ +#define SSS_PPA_PCTX_SIZE 512 + +/* IPsec */ +#define SSS_IPSEC_SACTX_SIZE 512 + +/* VirtIO */ +#define SSS_VIRTIO_BASE_VQ_SIZE 2048U +#define SSS_VIRTIO_DEFAULT_VQ_SIZE 8192U + +struct sss_cmd_dev_cap_cfg { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd; + + u8 host_id; + u8 ep_id; + u8 er_id; + u8 port_id; + + u16 host_total_function; + u8 pf_num; + u8 pf_id_start; + u16 vf_num; + u16 vf_id_start; + u8 host_oq_id_mask_val; + u8 timer_en; + u8 host_valid_bitmap; + u8 rsvd_host; + + u16 svc_type; + u16 max_vf; + u8 flexq_en; + u8 cos_valid_bitmap; + u8 port_cos_valid_bitmap; + u8 rsvd_func1; + u32 rsvd_func2; + + u8 sf_svc_attr; + u8 func_sf_en; + u8 lb_mode; + u8 smf_pg; + + u32 max_connect_num; + u16 max_stick2cache_num; + u16 bfilter_start_addr; + u16 bfilter_len; + u16 hash_bucket_num; + + u8 host_sf_en; + u8 master_host_id; + u8 srv_multi_host_mode; + u8 rsvd2_sr; + + u32 rsvd_func3[5]; + + /* l2nic */ + u16 nic_max_sq_id; + u16 nic_max_rq_id; + u16 nic_def_queue_num; + u16 rsvd_nic1; + u32 rsvd_nic2[2]; + + /* RoCE */ + u32 roce_max_qp; + u32 roce_max_cq; + u32 roce_max_srq; + u32 roce_max_mpt; + u32 roce_max_drc_qp; + + u32 roce_cmtt_cl_start; + u32 roce_cmtt_cl_end; + u32 roce_cmtt_cl_size; + + u32 roce_dmtt_cl_start; + u32 roce_dmtt_cl_end; + u32 roce_dmtt_cl_size; + + u32 roce_wqe_cl_start; + u32 roce_wqe_cl_end; + u32 roce_wqe_cl_size; + u8 roce_srq_container_mode; + u8 rsvd_roce1[3]; + u32 rsvd_roce2[5]; + + /* IPsec */ + u32 ipsec_max_sactx; + u16 ipsec_max_cq; + u16 rsvd_ipsec1; + u32 rsvd_ipsec2[2]; + + /* OVS */ + u32 ovs_max_qpc; + u32 rsvd_ovs[3]; + + /* ToE */ + u32 toe_max_pctx; + u32 toe_max_cq; + u16 toe_max_srq; + u16 toe_srq_id_start; + u16 toe_max_mpt; + u16 toe_max_cctxt; + u32 rsvd_toe[2]; + + /* FC */ + u32 fc_max_pctx; + u32 fc_max_scq; + u32 fc_max_srq; + + u32 fc_max_cctx; + u32 fc_cctx_id_start; + + u8 fc_vp_id_start; + u8 fc_vp_id_end; + u8 rsvd_fc1[2]; + u32 rsvd_fc2[5]; + + /* VBS */ + u16 vbs_max_volq; + u16 rsvd_vbs1; + u32 rsvd_vbs2[3]; + + u16 pseudo_vf_start_id; + u16 pseudo_vf_num; + u32 pseudo_vf_max_pctx; + u16 pseudo_vf_bfilter_start_addr; + u16 pseudo_vf_bfilter_len; + u32 rsvd_glb[8]; +}; + +enum { + SSS_SF_SVC_FT_BIT = (1 << 0), + SSS_SF_SVC_RDMA_BIT = (1 << 1), +}; + +enum sss_cfg_cmd { + SSS_CFG_CMD_GET_CAP_CFG = 0, + SSS_CFG_CMD_GET_HOST_TIMER = 1, +}; + +static void sss_print_pubic_cap(void *dev_hdl, const struct sss_service_cap *svc_cap) +{ + sdk_info(dev_hdl, + "Get public capbility: svc_type: 0x%x, chip_svc_type: 0x%x\n", + svc_cap->svc_type, svc_cap->chip_svc_type); + sdk_info(dev_hdl, + "host_id: 0x%x, ep_id: 0x%x, er_id: 0x%x, port_id: 0x%x\n", + svc_cap->host_id, svc_cap->ep_id, svc_cap->er_id, svc_cap->port_id); + sdk_info(dev_hdl, + "host_total_function: 0x%x, host_oq_id_mask_val: 0x%x, max_vf: 0x%x\n", + svc_cap->host_total_function, svc_cap->host_oq_id_mask_val, svc_cap->max_vf); + sdk_info(dev_hdl, + "pf_num: 0x%x, pf_id_start: 0x%x, vf_num: 0x%x, vf_id_start: 0x%x\n", + svc_cap->pf_num, svc_cap->pf_id_start, svc_cap->vf_num, svc_cap->vf_id_start); + sdk_info(dev_hdl, + "host_valid_bitmap: 0x%x, master_host_id: 0x%x, srv_multi_host_mode: 0x%x\n", + svc_cap->host_valid_bitmap, svc_cap->master_host_id, svc_cap->srv_multi_host_mode); + sdk_info(dev_hdl, + "cos_valid_bitmap: 0x%x, port_cos_valid_bitmap: 0x%x, flexq_en: 0x%x, virtio_vq_size: 0x%x\n", + svc_cap->cos_valid_bitmap, svc_cap->port_cos_valid_bitmap, svc_cap->flexq_en, + svc_cap->virtio_vq_size); + sdk_info(dev_hdl, + "pseudo_vf_start_id: 0x%x, pseudo_vf_num: 0x%x, pseudo_vf_max_pctx: 0x%x\n", + svc_cap->pseudo_vf_start_id, svc_cap->pseudo_vf_num, svc_cap->pseudo_vf_max_pctx); + sdk_info(dev_hdl, + "pseudo_vf_bfilter_start_addr: 0x%x, pseudo_vf_bfilter_len: 0x%x\n", + svc_cap->pseudo_vf_bfilter_start_addr, svc_cap->pseudo_vf_bfilter_len); +} + +static void sss_parse_qmm_cap(struct sss_hwdev *hwdev, + struct sss_service_cap *svc_cap, struct sss_cmd_dev_cap_cfg *cmd_cap) +{ + struct sss_dev_sf_svc_attr *sf_svc_attr = &svc_cap->sf_svc_attr; + + svc_cap->pseudo_vf_num = cmd_cap->pseudo_vf_num; + svc_cap->pseudo_vf_cfg_num = cmd_cap->pseudo_vf_num; + svc_cap->pseudo_vf_start_id = cmd_cap->pseudo_vf_start_id; + svc_cap->pseudo_vf_max_pctx = cmd_cap->pseudo_vf_max_pctx; + svc_cap->pseudo_vf_bfilter_start_addr = cmd_cap->pseudo_vf_bfilter_start_addr; + svc_cap->pseudo_vf_bfilter_len = cmd_cap->pseudo_vf_bfilter_len; + + if (SSS_SUPPORT_VIRTIO_VQ_SIZE(hwdev)) + svc_cap->virtio_vq_size = (u16)(SSS_VIRTIO_BASE_VQ_SIZE << svc_cap->virtio_vq_size); + else + svc_cap->virtio_vq_size = SSS_VIRTIO_DEFAULT_VQ_SIZE; + + sf_svc_attr->rdma_en = !!(cmd_cap->sf_svc_attr & SSS_SF_SVC_RDMA_BIT); + + svc_cap->smf_pg = cmd_cap->smf_pg; + svc_cap->lb_mode = cmd_cap->lb_mode; + + svc_cap->timer_en = cmd_cap->timer_en; + svc_cap->bfilter_start_addr = cmd_cap->bfilter_start_addr; + svc_cap->bfilter_len = cmd_cap->bfilter_len; + svc_cap->host_oq_id_mask_val = cmd_cap->host_oq_id_mask_val; + svc_cap->hash_bucket_num = cmd_cap->hash_bucket_num; + svc_cap->max_stick2cache_num = cmd_cap->max_stick2cache_num; + svc_cap->max_connect_num = cmd_cap->max_connect_num; +} + +static void sss_parse_pubic_cap(struct sss_hwdev *hwdev, + struct sss_service_cap *svc_cap, + struct sss_cmd_dev_cap_cfg *cmd_cap, + enum sss_func_type type) +{ + svc_cap->svc_type = cmd_cap->svc_type; + svc_cap->chip_svc_type = cmd_cap->svc_type; + + svc_cap->ep_id = cmd_cap->ep_id; + svc_cap->er_id = cmd_cap->er_id; + svc_cap->host_id = cmd_cap->host_id; + svc_cap->port_id = cmd_cap->port_id; + + svc_cap->host_total_function = cmd_cap->host_total_function; + svc_cap->host_valid_bitmap = cmd_cap->host_valid_bitmap; + svc_cap->master_host_id = cmd_cap->master_host_id; + svc_cap->srv_multi_host_mode = cmd_cap->srv_multi_host_mode; + + svc_cap->flexq_en = cmd_cap->flexq_en; + svc_cap->cos_valid_bitmap = cmd_cap->cos_valid_bitmap; + svc_cap->port_cos_valid_bitmap = cmd_cap->port_cos_valid_bitmap; + + if (type != SSS_FUNC_TYPE_VF) { + svc_cap->pf_num = cmd_cap->pf_num; + svc_cap->pf_id_start = cmd_cap->pf_id_start; + svc_cap->vf_num = cmd_cap->vf_num; + svc_cap->vf_id_start = cmd_cap->vf_id_start; + svc_cap->max_vf = cmd_cap->max_vf; + } else { + svc_cap->max_vf = 0; + } + + svc_cap->sf_en = (type == SSS_FUNC_TYPE_PPF) ? + (!!cmd_cap->host_sf_en) : (!!cmd_cap->func_sf_en); + + sss_parse_qmm_cap(hwdev, svc_cap, cmd_cap); + sss_print_pubic_cap(hwdev->dev_hdl, svc_cap); +} + +static void sss_parse_l2nic_cap(struct sss_hwdev *hwdev, + struct sss_service_cap *svc_cap, + struct sss_cmd_dev_cap_cfg *cmd_cap, + enum sss_func_type type) +{ + struct sss_nic_service_cap *nic_svc_cap = &svc_cap->nic_cap; + + if (!SSS_IS_NIC_TYPE(hwdev)) + return; + + nic_svc_cap->max_rq = cmd_cap->nic_max_rq_id + 1; + nic_svc_cap->max_sq = cmd_cap->nic_max_sq_id + 1; + nic_svc_cap->def_queue_num = cmd_cap->nic_def_queue_num; + + sdk_info(hwdev->dev_hdl, + "Get Nic capbility, max_sq: 0x%x, max_rq: 0x%x, def_queue_num: 0x%x\n", + nic_svc_cap->max_sq, nic_svc_cap->max_rq, nic_svc_cap->def_queue_num); + + /* Check parameters from firmware */ + if (nic_svc_cap->max_sq > SSS_CFG_MAX_QP || + nic_svc_cap->max_rq > SSS_CFG_MAX_QP) { + sdk_info(hwdev->dev_hdl, "Exceed limit[1-%d]:sq: %u, rq: %u\n", + SSS_CFG_MAX_QP, nic_svc_cap->max_sq, nic_svc_cap->max_rq); + nic_svc_cap->max_rq = SSS_CFG_MAX_QP; + nic_svc_cap->max_sq = SSS_CFG_MAX_QP; + } +} + +static void sss_parse_fc_cap(struct sss_hwdev *hwdev, + struct sss_service_cap *svc_cap, + struct sss_cmd_dev_cap_cfg *cmd_cap, + enum sss_func_type type) +{ + struct sss_fc_service_cap *fc_svc_cap = &svc_cap->fc_cap; + struct sss_dev_fc_svc_cap *dev_fc_cap = &fc_svc_cap->dev_fc_cap; + + if (!SSS_IS_FC_TYPE(hwdev)) + return; + + /* FC without virtulization */ + if (type != SSS_FUNC_TYPE_PF && type != SSS_FUNC_TYPE_PPF) + return; + + dev_fc_cap->srq_num = cmd_cap->fc_max_srq; + dev_fc_cap->scq_num = cmd_cap->fc_max_scq; + dev_fc_cap->max_parent_qpc_num = cmd_cap->fc_max_pctx; + dev_fc_cap->max_child_qpc_num = cmd_cap->fc_max_cctx; + dev_fc_cap->child_qpc_id_start = cmd_cap->fc_cctx_id_start; + dev_fc_cap->vp_id_start = cmd_cap->fc_vp_id_start; + dev_fc_cap->vp_id_end = cmd_cap->fc_vp_id_end; + + fc_svc_cap->parent_qpc_size = SSS_FC_PQPC_SIZE; + fc_svc_cap->child_qpc_size = SSS_FC_CQPC_SIZE; + fc_svc_cap->sqe_size = SSS_FC_SQE_SIZE; + + fc_svc_cap->scqc_size = SSS_FC_SCQC_SIZE; + fc_svc_cap->scqe_size = SSS_FC_SCQE_SIZE; + + fc_svc_cap->srqc_size = SSS_FC_SRQC_SIZE; + fc_svc_cap->srqe_size = SSS_FC_SRQE_SIZE; + + sdk_info(hwdev->dev_hdl, "Get FC capbility, type: 0x%x\n", type); + sdk_info(hwdev->dev_hdl, + "max_parent_qpc_num: 0x%x, max_child_qpc_num: 0x%x, scq_num: 0x%x, srq_num: 0x%x\n", + dev_fc_cap->max_parent_qpc_num, dev_fc_cap->max_child_qpc_num, + dev_fc_cap->scq_num, dev_fc_cap->srq_num); + sdk_info(hwdev->dev_hdl, "child_qpc_id_start: 0x%x, vp_id_start: 0x%x, vp_id_end: 0x%x\n", + dev_fc_cap->child_qpc_id_start, dev_fc_cap->vp_id_start, dev_fc_cap->vp_id_end); +} + +static void sss_init_rdma_cap_param(struct sss_hwdev *hwdev) +{ + struct sss_rdma_service_cap *rdma_svc_cap = &hwdev->mgmt_info->svc_cap.rdma_cap; + struct sss_dev_roce_svc_own_cap *roce_own_cap = + &rdma_svc_cap->dev_rdma_cap.roce_own_cap; + + rdma_svc_cap->log_mtt = SSS_LOG_MTT_SEG; + rdma_svc_cap->log_rdmarc = SSS_LOG_RDMARC_SEG; + rdma_svc_cap->reserved_qp = SSS_RDMA_RSVD_QP; + rdma_svc_cap->max_sq_sg = SSS_RDMA_MAX_SQ_SGE; + + /* RoCE */ + roce_own_cap->qpc_entry_size = SSS_ROCE_QPC_ENTRY_SIZE; + roce_own_cap->max_wqe = SSS_ROCE_MAX_WQE; + roce_own_cap->max_rq_sg = SSS_ROCE_MAX_RQ_SGE; + roce_own_cap->max_sq_inline_data_size = SSS_ROCE_MAX_SQ_INLINE_DATA_SIZE; + roce_own_cap->max_rq_desc_size = SSS_ROCE_MAX_RQ_DESC_SIZE; + roce_own_cap->rdmarc_entry_size = SSS_ROCE_RDMARC_ENTRY_SIZE; + roce_own_cap->max_qp_init_rdma = SSS_ROCE_MAX_QP_INIT_RDMA; + roce_own_cap->max_qp_dest_rdma = SSS_ROCE_MAX_QP_DEST_RDMA; + roce_own_cap->max_srq_wqe = SSS_ROCE_MAX_SRQ_WQE; + roce_own_cap->reserved_srq = SSS_ROCE_RSVD_SRQ; + roce_own_cap->max_srq_sge = SSS_ROCE_MAX_SRQ_SGE; + roce_own_cap->srqc_entry_size = ROCE_SRQC_ENTERY_SIZE; + roce_own_cap->max_msg_size = SSS_ROCE_MAX_MSG_SIZE; + + rdma_svc_cap->max_sq_desc_size = SSS_RDMA_MAX_SQ_DESC_SIZE; + rdma_svc_cap->wqebb_size = SSS_WQEBB_SIZE; + rdma_svc_cap->max_cqe = SSS_RDMA_MAX_CQE; + rdma_svc_cap->reserved_cq = SSS_RDMA_RSVD_CQ; + rdma_svc_cap->cqc_entry_size = SSS_RDMA_CQC_ENTRY_SIZE; + rdma_svc_cap->cqe_size = SSS_RDMA_CQE_SIZE; + rdma_svc_cap->reserved_mrw = SSS_RDMA_RSVD_MRW; + rdma_svc_cap->mpt_entry_size = SSS_RDMA_MPT_ENTRY_SIZE; + + rdma_svc_cap->max_fmr_map = 0xff; + rdma_svc_cap->mtt_num = SSS_RDMA_MTT_NUM; + rdma_svc_cap->log_mtt_seg = SSS_LOG_MTT_SEG; + rdma_svc_cap->mtt_entry_size = SSS_MTT_ENTRY_SIZE; + rdma_svc_cap->log_rdmarc_seg = SSS_LOG_RDMARC_SEG; + rdma_svc_cap->local_ca_ack_delay = SSS_LOCAL_ACK_DELAY; + rdma_svc_cap->port_num = SSS_RDMA_PORT_NUM; + rdma_svc_cap->db_page_size = SSS_DB_PAGE_SIZE_K; + rdma_svc_cap->direct_wqe_size = SSS_DWQE_SIZE; + rdma_svc_cap->pd_num = SSS_PD_NUM; + rdma_svc_cap->reserved_pd = SSS_RSVD_PD; + rdma_svc_cap->max_xrcd = SSS_MAX_XRCD; + rdma_svc_cap->reserved_xrcd = SSS_RSVD_XRCD; + rdma_svc_cap->max_gid_per_port = SSS_MAX_GID_PER_PORT; + rdma_svc_cap->gid_entry_size = SSS_GID_ENTRY_SIZE; + rdma_svc_cap->reserved_lkey = SSS_RSVD_LKEY; + rdma_svc_cap->comp_vector_num = (u32)hwdev->mgmt_info->eq_info.ceq_num; + rdma_svc_cap->page_size_cap = SSS_PAGE_SIZE_CAP; + rdma_svc_cap->flag = (SSS_RDMA_BMME_FLAG_LOCAL_INV | + SSS_RDMA_BMME_FLAG_REMOTE_INV | + SSS_RDMA_BMME_FLAG_FAST_REG_WR | + SSS_RDMA_DEV_CAP_FLAG_XRC | + SSS_RDMA_DEV_CAP_FLAG_MEM_WINDOW | + SSS_RDMA_BMME_FLAG_TYPE_2_WIN | + SSS_RDMA_BMME_FLAG_WIN_TYPE_2B | + SSS_RDMA_DEV_CAP_FLAG_ATOMIC); + rdma_svc_cap->max_frpl_len = SSS_MAX_FRPL_LEN; + rdma_svc_cap->max_pkey = SSS_MAX_PKEY; +} + +static void sss_parse_roce_cap(struct sss_hwdev *hwdev, + struct sss_service_cap *svc_cap, + struct sss_cmd_dev_cap_cfg *cmd_cap, + enum sss_func_type type) +{ + struct sss_dev_roce_svc_own_cap *roce_own_cap = + &svc_cap->rdma_cap.dev_rdma_cap.roce_own_cap; + + if (!SSS_IS_ROCE_TYPE(hwdev)) + return; + + roce_own_cap->max_srq = cmd_cap->roce_max_srq; + roce_own_cap->max_cq = cmd_cap->roce_max_cq; + roce_own_cap->max_qp = cmd_cap->roce_max_qp; + roce_own_cap->max_mpt = cmd_cap->roce_max_mpt; + roce_own_cap->max_drc_qp = cmd_cap->roce_max_drc_qp; + + roce_own_cap->wqe_cl_size = cmd_cap->roce_wqe_cl_size; + roce_own_cap->wqe_cl_start = cmd_cap->roce_wqe_cl_start; + roce_own_cap->wqe_cl_end = cmd_cap->roce_wqe_cl_end; + + if (roce_own_cap->max_qp == 0) { + roce_own_cap->max_drc_qp = SSS_ROCE_MAX_DRC_QP; + if (type == SSS_FUNC_TYPE_PF || type == SSS_FUNC_TYPE_PPF) { + roce_own_cap->max_srq = SSS_ROCE_MAX_SRQ; + roce_own_cap->max_cq = SSS_ROCE_MAX_CQ; + roce_own_cap->max_qp = SSS_ROCE_MAX_QP; + roce_own_cap->max_mpt = SSS_ROCE_MAX_MPT; + } else { + roce_own_cap->max_srq = SSS_ROCE_MAX_SRQ / 2; + roce_own_cap->max_cq = SSS_ROCE_MAX_CQ / 2; + roce_own_cap->max_qp = SSS_ROCE_MAX_QP / 2; + roce_own_cap->max_mpt = SSS_ROCE_MAX_MPT / 2; + } + } + + sss_init_rdma_cap_param(hwdev); + + sdk_info(hwdev->dev_hdl, "Get ROCE capbility, type: 0x%x\n", type); + sdk_info(hwdev->dev_hdl, + "max_qps: 0x%x, max_srq: 0x%x, max_cq: 0x%x, max_mpt: 0x%x, max_drct: 0x%x\n", + roce_own_cap->max_qp, roce_own_cap->max_srq, roce_own_cap->max_cq, + roce_own_cap->max_mpt, roce_own_cap->max_drc_qp); + sdk_info(hwdev->dev_hdl, "wqe_start: 0x%x, wqe_end: 0x%x, wqe_sz: 0x%x\n", + roce_own_cap->wqe_cl_start, roce_own_cap->wqe_cl_end, roce_own_cap->wqe_cl_size); +} + +static void sss_parse_rdma_cap(struct sss_hwdev *hwdev, + struct sss_service_cap *svc_cap, + struct sss_cmd_dev_cap_cfg *cmd_cap, + enum sss_func_type type) +{ + struct sss_rdma_service_cap *rdma_svc_cap = &svc_cap->rdma_cap; + struct sss_dev_roce_svc_own_cap *roce_own_cap = + &rdma_svc_cap->dev_rdma_cap.roce_own_cap; + + if (!SSS_IS_RDMA_ENABLE(hwdev)) + return; + + roce_own_cap->dmtt_cl_start = cmd_cap->roce_dmtt_cl_start; + roce_own_cap->dmtt_cl_end = cmd_cap->roce_dmtt_cl_end; + roce_own_cap->dmtt_cl_size = cmd_cap->roce_dmtt_cl_size; + + roce_own_cap->cmtt_cl_start = cmd_cap->roce_cmtt_cl_start; + roce_own_cap->cmtt_cl_end = cmd_cap->roce_cmtt_cl_end; + roce_own_cap->cmtt_cl_size = cmd_cap->roce_cmtt_cl_size; + + rdma_svc_cap->log_mtt = SSS_LOG_MTT_SEG; + rdma_svc_cap->log_mtt_seg = SSS_LOG_MTT_SEG; + rdma_svc_cap->mtt_entry_size = SSS_MTT_ENTRY_SIZE; + rdma_svc_cap->mpt_entry_size = SSS_RDMA_MPT_ENTRY_SIZE; + rdma_svc_cap->mtt_num = SSS_RDMA_MTT_NUM; + + sdk_info(hwdev->dev_hdl, "Get RDMA capbility, type: 0x%x\n", type); + sdk_info(hwdev->dev_hdl, "cmtt_cl_start: 0x%x, cmtt_cl_end: 0x%x, cmtt_cl_size: 0x%x\n", + roce_own_cap->cmtt_cl_start, roce_own_cap->cmtt_cl_end, + roce_own_cap->cmtt_cl_size); + sdk_info(hwdev->dev_hdl, "dmtt_cl_start: 0x%x, dmtt_cl_end: 0x%x, dmtt_cl_size: 0x%x\n", + roce_own_cap->dmtt_cl_start, roce_own_cap->dmtt_cl_end, + roce_own_cap->dmtt_cl_size); +} + +static void sss_parse_ovs_cap(struct sss_hwdev *hwdev, + struct sss_service_cap *svc_cap, + struct sss_cmd_dev_cap_cfg *cmd_cap, + enum sss_func_type type) +{ + struct sss_ovs_service_cap *ovs_cap = &svc_cap->ovs_cap; + struct sss_dev_ovs_svc_cap *dev_ovs_cap = &ovs_cap->dev_ovs_cap; + + if (!SSS_IS_OVS_TYPE(hwdev)) + return; + + dev_ovs_cap->max_pctx = cmd_cap->ovs_max_qpc; + dev_ovs_cap->pseudo_vf_start_id = cmd_cap->pseudo_vf_start_id; + dev_ovs_cap->pseudo_vf_num = cmd_cap->pseudo_vf_num; + dev_ovs_cap->pseudo_vf_max_pctx = cmd_cap->pseudo_vf_max_pctx; + dev_ovs_cap->dynamic_qp_en = cmd_cap->flexq_en; + ovs_cap->pctx_size = SSS_OVS_PCTX_SIZE; + + sdk_info(hwdev->dev_hdl, "Get OVS capbility, type: 0x%x\n", type); + sdk_info(hwdev->dev_hdl, "max_pctxs: 0x%x, pseudo_vf_start_id: 0x%x, pseudo_vf_num: 0x%x\n", + dev_ovs_cap->max_pctx, dev_ovs_cap->pseudo_vf_start_id, + dev_ovs_cap->pseudo_vf_num); + sdk_info(hwdev->dev_hdl, "pseudo_vf_max_pctx: 0x%x, dynamic_qp_en: 0x%x\n", + dev_ovs_cap->pseudo_vf_max_pctx, dev_ovs_cap->dynamic_qp_en); +} + +static void sss_parse_ppa_cap(struct sss_hwdev *hwdev, + struct sss_service_cap *svc_cap, + struct sss_cmd_dev_cap_cfg *cmd_cap, + enum sss_func_type type) +{ + struct sss_ppa_service_cap *ppa_cap = &svc_cap->ppa_cap; + + if (!SSS_IS_PPA_TYPE(hwdev)) + return; + + ppa_cap->qpc_pseudo_vf_start = cmd_cap->pseudo_vf_start_id; + ppa_cap->qpc_pseudo_vf_num = cmd_cap->pseudo_vf_num; + ppa_cap->qpc_pseudo_vf_ctx_num = cmd_cap->pseudo_vf_max_pctx; + ppa_cap->bloomfilter_len = cmd_cap->pseudo_vf_bfilter_len; + ppa_cap->bloomfilter_en = !!cmd_cap->pseudo_vf_bfilter_len; + ppa_cap->pctx_size = SSS_PPA_PCTX_SIZE; + + sdk_info(hwdev->dev_hdl, "Get PPA capbility, type: 0x%x\n", type); + sdk_info(hwdev->dev_hdl, + "qpc_pseudo_vf_start: 0x%x, qpc_pseudo_vf_num: 0x%x, qpc_pseudo_vf_ctx_num: 0x%x\n", + ppa_cap->qpc_pseudo_vf_start, ppa_cap->qpc_pseudo_vf_num, + ppa_cap->qpc_pseudo_vf_ctx_num); +} + +static void sss_parse_toe_cap(struct sss_hwdev *hwdev, + struct sss_service_cap *svc_cap, + struct sss_cmd_dev_cap_cfg *cmd_cap, + enum sss_func_type type) +{ + struct sss_toe_service_cap *toe_svc_cap = &svc_cap->toe_cap; + struct sss_dev_toe_svc_cap *dev_toe_cap = &toe_svc_cap->dev_toe_cap; + + if (!SSS_IS_TOE_TYPE(hwdev)) + return; + + dev_toe_cap->max_srq = cmd_cap->toe_max_srq; + dev_toe_cap->max_cq = cmd_cap->toe_max_cq; + dev_toe_cap->srq_id_start = cmd_cap->toe_srq_id_start; + dev_toe_cap->max_pctx = cmd_cap->toe_max_pctx; + dev_toe_cap->max_cctxt = cmd_cap->toe_max_cctxt; + dev_toe_cap->max_mpt = cmd_cap->toe_max_mpt; + + toe_svc_cap->pctx_size = SSS_TOE_PCTX_SIZE; + toe_svc_cap->scqc_size = SSS_TOE_SCQC_SIZE; + + sdk_info(hwdev->dev_hdl, "Get TOE capbility, type: 0x%x\n", type); + sdk_info(hwdev->dev_hdl, + "max_pctx: 0x%x, max_cq: 0x%x, max_srq: 0x%x, srq_id_start: 0x%x, max_mpt: 0x%x\n", + dev_toe_cap->max_pctx, dev_toe_cap->max_cq, dev_toe_cap->max_srq, + dev_toe_cap->srq_id_start, dev_toe_cap->max_mpt); +} + +static void sss_parse_ipsec_cap(struct sss_hwdev *hwdev, + struct sss_service_cap *svc_cap, + struct sss_cmd_dev_cap_cfg *cmd_cap, + enum sss_func_type type) +{ + struct sss_ipsec_service_cap *ipsec_cap = &svc_cap->ipsec_cap; + struct sss_dev_ipsec_svc_cap *dev_ipsec_cap = &ipsec_cap->dev_ipsec_cap; + + if (!SSS_IS_IPSEC_TYPE(hwdev)) + return; + + dev_ipsec_cap->max_sactx = cmd_cap->ipsec_max_sactx; + dev_ipsec_cap->max_cq = cmd_cap->ipsec_max_cq; + ipsec_cap->sactx_size = SSS_IPSEC_SACTX_SIZE; + + sdk_info(hwdev->dev_hdl, "Get IPSEC capbility, type: 0x%x\n", type); + sdk_info(hwdev->dev_hdl, "max_sactx: 0x%x, max_cq: 0x%x\n", + dev_ipsec_cap->max_sactx, dev_ipsec_cap->max_cq); +} + +static void sss_parse_vbs_cap(struct sss_hwdev *hwdev, + struct sss_service_cap *svc_cap, + struct sss_cmd_dev_cap_cfg *cmd_cap, + enum sss_func_type type) +{ + struct sss_vbs_service_cap *vbs_cap = &svc_cap->vbs_cap; + + if (!SSS_IS_VBS_TYPE(hwdev)) + return; + + vbs_cap->vbs_max_volq = cmd_cap->vbs_max_volq; + + sdk_info(hwdev->dev_hdl, "Get VBS capbility, type: 0x%x, vbs_max_volq: 0x%x\n", + type, vbs_cap->vbs_max_volq); +} + +static void sss_parse_dev_cap(struct sss_hwdev *hwdev, + struct sss_cmd_dev_cap_cfg *cmd_cap, enum sss_func_type type) +{ + struct sss_service_cap *svc_cap = &hwdev->mgmt_info->svc_cap; + + sss_parse_pubic_cap(hwdev, svc_cap, cmd_cap, type); + sss_parse_l2nic_cap(hwdev, svc_cap, cmd_cap, type); + sss_parse_fc_cap(hwdev, svc_cap, cmd_cap, type); + sss_parse_toe_cap(hwdev, svc_cap, cmd_cap, type); + sss_parse_rdma_cap(hwdev, svc_cap, cmd_cap, type); + sss_parse_roce_cap(hwdev, svc_cap, cmd_cap, type); + sss_parse_ovs_cap(hwdev, svc_cap, cmd_cap, type); + sss_parse_ipsec_cap(hwdev, svc_cap, cmd_cap, type); + sss_parse_ppa_cap(hwdev, svc_cap, cmd_cap, type); + sss_parse_vbs_cap(hwdev, svc_cap, cmd_cap, type); +} + +static int sss_chip_get_cap(struct sss_hwdev *hwdev, struct sss_cmd_dev_cap_cfg *cmd_cap) +{ + int ret; + u16 out_len = sizeof(*cmd_cap); + + cmd_cap->func_id = sss_get_global_func_id(hwdev); + sdk_info(hwdev->dev_hdl, "Get svc_cap, func_id: %u\n", cmd_cap->func_id); + + ret = sss_sync_mbx_send_msg(hwdev, SSS_MOD_TYPE_CFGM, SSS_CFG_CMD_GET_CAP_CFG, + cmd_cap, sizeof(*cmd_cap), cmd_cap, &out_len, 0, + SSS_CHANNEL_COMM); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, cmd_cap)) { + sdk_err(hwdev->dev_hdl, + "Fail to get capability, err: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_cap->head.state, out_len); + return -EIO; + } + + return 0; +} + +int sss_init_capability(struct sss_hwdev *hwdev) +{ + int ret; + enum sss_func_type type = SSS_GET_FUNC_TYPE(hwdev); + struct sss_cmd_dev_cap_cfg cmd_cap = {0}; + + if (type != SSS_FUNC_TYPE_PF && + type != SSS_FUNC_TYPE_VF && + type != SSS_FUNC_TYPE_PPF) { + sdk_err(hwdev->dev_hdl, "Unsupported PCI Function type: %d\n", type); + return -EINVAL; + } + + ret = sss_chip_get_cap(hwdev, &cmd_cap); + if (ret != 0) + return ret; + + sss_parse_dev_cap(hwdev, &cmd_cap, type); + + sdk_info(hwdev->dev_hdl, "Success to init capability\n"); + return 0; +} + +void sss_deinit_capability(struct sss_hwdev *hwdev) +{ + sdk_info(hwdev->dev_hdl, "Success to deinit capability"); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_cap.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_cap.h new file mode 100644 index 0000000000000000000000000000000000000000..fa4a8809e1fd580eb5d29f425e4f08530dd5312e --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_cap.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWDEV_CAP_H +#define SSS_HWDEV_CAP_H + +#include "sss_hwdev.h" + +int sss_init_capability(struct sss_hwdev *dev); +void sss_deinit_capability(struct sss_hwdev *dev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_export.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_export.c new file mode 100644 index 0000000000000000000000000000000000000000..0469392468273f72ae4c08b537b0053f5fe94c90 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_export.c @@ -0,0 +1,599 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_csr.h" +#include "sss_hwif_api.h" +#include "sss_hw_svc_cap.h" + +#define SSS_DEFAULT_RX_BUF_SIZE_LEVEL ((u16)0xB) + +enum sss_rx_buf_size { + SSS_RX_BUF_SIZE_32B = 0x20, + SSS_RX_BUF_SIZE_64B = 0x40, + SSS_RX_BUF_SIZE_96B = 0x60, + SSS_RX_BUF_SIZE_128B = 0x80, + SSS_RX_BUF_SIZE_192B = 0xC0, + SSS_RX_BUF_SIZE_256B = 0x100, + SSS_RX_BUF_SIZE_384B = 0x180, + SSS_RX_BUF_SIZE_512B = 0x200, + SSS_RX_BUF_SIZE_768B = 0x300, + SSS_RX_BUF_SIZE_1K = 0x400, + SSS_RX_BUF_SIZE_1_5K = 0x600, + SSS_RX_BUF_SIZE_2K = 0x800, + SSS_RX_BUF_SIZE_3K = 0xC00, + SSS_RX_BUF_SIZE_4K = 0x1000, + SSS_RX_BUF_SIZE_8K = 0x2000, + SSS_RX_BUF_SIZE_16K = 0x4000, +}; + +const int sss_rx_buf_size_level[] = { + SSS_RX_BUF_SIZE_32B, + SSS_RX_BUF_SIZE_64B, + SSS_RX_BUF_SIZE_96B, + SSS_RX_BUF_SIZE_128B, + SSS_RX_BUF_SIZE_192B, + SSS_RX_BUF_SIZE_256B, + SSS_RX_BUF_SIZE_384B, + SSS_RX_BUF_SIZE_512B, + SSS_RX_BUF_SIZE_768B, + SSS_RX_BUF_SIZE_1K, + SSS_RX_BUF_SIZE_1_5K, + SSS_RX_BUF_SIZE_2K, + SSS_RX_BUF_SIZE_3K, + SSS_RX_BUF_SIZE_4K, + SSS_RX_BUF_SIZE_8K, + SSS_RX_BUF_SIZE_16K, +}; + +static u16 sss_get_rx_buf_size_level(int buf_size) +{ + u16 i; + u16 cnt = ARRAY_LEN(sss_rx_buf_size_level); + + for (i = 0; i < cnt; i++) { + if (sss_rx_buf_size_level[i] == buf_size) + return i; + } + + return SSS_DEFAULT_RX_BUF_SIZE_LEVEL; /* default 2K */ +} + +static int sss_chip_get_interrupt_cfg(void *hwdev, + struct sss_irq_cfg *intr_cfg, u16 channel) +{ + int ret; + struct sss_cmd_msix_config cmd_msix = {0}; + u16 out_len = sizeof(cmd_msix); + + cmd_msix.opcode = SSS_MGMT_MSG_GET_CMD; + cmd_msix.func_id = sss_get_global_func_id(hwdev); + cmd_msix.msix_index = intr_cfg->msix_id; + + ret = sss_sync_send_msg_ch(hwdev, SSS_COMM_MGMT_CMD_CFG_MSIX_CTRL_REG, + &cmd_msix, sizeof(cmd_msix), &cmd_msix, &out_len, channel); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_msix)) { + sdk_err(SSS_TO_DEV(hwdev), + "Fail to get intr config, ret: %d, status: 0x%x, out_len: 0x%x, channel: 0x%x\n", + ret, cmd_msix.head.state, out_len, channel); + return -EINVAL; + } + + intr_cfg->lli_credit = cmd_msix.lli_credit_cnt; + intr_cfg->lli_timer = cmd_msix.lli_timer_cnt; + intr_cfg->pending = cmd_msix.pending_cnt; + intr_cfg->coalesc_timer = cmd_msix.coalesce_timer_cnt; + intr_cfg->resend_timer = cmd_msix.resend_timer_cnt; + + return 0; +} + +int sss_chip_set_msix_attr(void *hwdev, + struct sss_irq_cfg intr_cfg, u16 channel) +{ + int ret; + struct sss_irq_cfg temp_cfg = {0}; + + if (!hwdev) + return -EINVAL; + + temp_cfg.msix_id = intr_cfg.msix_id; + + ret = sss_chip_get_interrupt_cfg(hwdev, &temp_cfg, channel); + if (ret != 0) + return -EINVAL; + + if (intr_cfg.lli_set == 0) { + intr_cfg.lli_credit = temp_cfg.lli_credit; + intr_cfg.lli_timer = temp_cfg.lli_timer; + } + + if (intr_cfg.coalesc_intr_set == 0) { + intr_cfg.pending = temp_cfg.pending; + intr_cfg.coalesc_timer = temp_cfg.coalesc_timer; + intr_cfg.resend_timer = temp_cfg.resend_timer; + } + + return sss_chip_set_eq_msix_attr(hwdev, &intr_cfg, channel); +} +EXPORT_SYMBOL(sss_chip_set_msix_attr); + +void sss_chip_clear_msix_resend_bit(void *hwdev, u16 msix_id, bool clear_en) +{ + u32 val; + + if (!hwdev) + return; + + val = SSS_SET_MSI_CLR_INDIR(msix_id, SIMPLE_INDIR_ID) | + SSS_SET_MSI_CLR_INDIR(!!clear_en, RESEND_TIMER_CLR); + + sss_chip_write_reg(SSS_TO_HWIF(hwdev), SSS_CSR_FUNC_MSI_CLR_WR_ADDR, val); +} +EXPORT_SYMBOL(sss_chip_clear_msix_resend_bit); + +int sss_chip_reset_function(void *hwdev, u16 func_id, u64 flag, u16 channel) +{ + int ret = 0; + struct sss_cmd_func_reset cmd_reset = {0}; + u16 out_len = sizeof(cmd_reset); + + if (!hwdev) + return -EINVAL; + + cmd_reset.func_id = func_id; + cmd_reset.reset_flag = flag; + sdk_info(SSS_TO_DEV(hwdev), "Func reset, flag: 0x%llx, channel:0x%x\n", flag, channel); + + ret = sss_sync_send_msg_ch(hwdev, SSS_COMM_MGMT_CMD_FUNC_RESET, + &cmd_reset, sizeof(cmd_reset), &cmd_reset, &out_len, channel); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_reset)) { + sdk_err(SSS_TO_DEV(hwdev), + "Fail to reset func, flag 0x%llx, ret: %d, status: 0x%x, out_len: 0x%x\n", + flag, ret, cmd_reset.head.state, out_len); + return -EIO; + } + + return 0; +} +EXPORT_SYMBOL(sss_chip_reset_function); + +int sss_chip_set_root_ctx(void *hwdev, + u32 rq_depth, u32 sq_depth, int rx_size, u16 channel) +{ + int ret; + struct sss_cmd_root_ctxt cmd_root = {0}; + u16 out_len = sizeof(cmd_root); + + if (!hwdev) + return -EINVAL; + + cmd_root.func_id = sss_get_global_func_id(hwdev); + if (rq_depth != 0 || sq_depth != 0 || rx_size != 0) { + cmd_root.rx_buf_sz = sss_get_rx_buf_size_level(rx_size); + cmd_root.rq_depth = (u16)ilog2(rq_depth); + cmd_root.sq_depth = (u16)ilog2(sq_depth); + cmd_root.lro_en = 1; + } + + ret = sss_sync_send_msg_ch(hwdev, SSS_COMM_MGMT_CMD_SET_VAT, + &cmd_root, sizeof(cmd_root), &cmd_root, &out_len, channel); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_root)) { + sdk_err(SSS_TO_DEV(hwdev), + "Fail to set root ctx, ret: %d, status: 0x%x, out_len: 0x%x, channel: 0x%x\n", + ret, cmd_root.head.state, out_len, channel); + return -EFAULT; + } + + return 0; +} +EXPORT_SYMBOL(sss_chip_set_root_ctx); + +int sss_chip_clean_root_ctx(void *hwdev, u16 channel) +{ + return sss_chip_set_root_ctx(hwdev, 0, 0, 0, channel); +} +EXPORT_SYMBOL(sss_chip_clean_root_ctx); + +static int sss_get_fw_ver(struct sss_hwdev *hwdev, + enum sss_fw_ver_type fw_type, u8 *buf, u8 buf_size, u16 channel) +{ + int ret; + struct sss_cmd_get_fw_version cmd_version = {0}; + u16 out_len = sizeof(cmd_version); + + if (!hwdev || !buf) + return -EINVAL; + + cmd_version.fw_type = fw_type; + ret = sss_sync_send_msg_ch(hwdev, SSS_COMM_MGMT_CMD_GET_FW_VERSION, + &cmd_version, sizeof(cmd_version), &cmd_version, + &out_len, channel); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_version)) { + sdk_err(hwdev->dev_hdl, + "Fail to get fw version, ret: %d, status: 0x%x, out_len: 0x%x, channel: 0x%x\n", + ret, cmd_version.head.state, out_len, channel); + return -EIO; + } + + ret = snprintf(buf, buf_size, "%s", cmd_version.ver); + if (ret < 0) + return -EINVAL; + + return 0; +} + +int sss_get_mgmt_version(void *hwdev, u8 *buf, u8 buf_size, u16 channel) +{ + return sss_get_fw_ver(hwdev, SSS_FW_VER_TYPE_MPU, buf, + buf_size, channel); +} +EXPORT_SYMBOL(sss_get_mgmt_version); + +int sss_chip_set_func_used_state(void *hwdev, + u16 service_type, bool state, u16 channel) +{ + int ret; + struct sss_cmd_func_svc_used_state cmd_state = {0}; + u16 out_len = sizeof(cmd_state); + + if (!hwdev) + return -EINVAL; + + cmd_state.func_id = sss_get_global_func_id(hwdev); + cmd_state.svc_type = service_type; + cmd_state.used_state = !!state; + + ret = sss_sync_send_msg_ch(hwdev, + SSS_COMM_MGMT_CMD_SET_FUNC_SVC_USED_STATE, + &cmd_state, sizeof(cmd_state), &cmd_state, &out_len, channel); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_state)) { + sdk_err(SSS_TO_DEV(hwdev), + "Fail to set func used state, ret: %d, status: 0x%x, out_len: 0x%x, channel: 0x%x\n\n", + ret, cmd_state.head.state, out_len, channel); + return -EIO; + } + + return 0; +} +EXPORT_SYMBOL(sss_chip_set_func_used_state); + +bool sss_get_nic_capability(void *hwdev, struct sss_nic_service_cap *capability) +{ + struct sss_hwdev *dev = hwdev; + + if (!capability || !hwdev) + return false; + + if (SSS_IS_NIC_TYPE(dev)) { + memcpy(capability, SSS_TO_NIC_CAP(hwdev), sizeof(*capability)); + return true; + } else { + return false; + } +} +EXPORT_SYMBOL(sss_get_nic_capability); + +bool sss_support_nic(void *hwdev) +{ + return (hwdev && SSS_IS_NIC_TYPE((struct sss_hwdev *)hwdev)); +} +EXPORT_SYMBOL(sss_support_nic); + +bool sss_support_ppa(void *hwdev, struct sss_ppa_service_cap *cap) +{ + struct sss_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!SSS_IS_PPA_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->mgmt_info->svc_cap.ppa_cap, sizeof(*cap)); + + return true; +} +EXPORT_SYMBOL(sss_support_ppa); + +u16 sss_get_max_sq_num(void *hwdev) +{ + if (!hwdev) { + pr_err("Get max sq num: hwdev is NULL\n"); + return 0; + } + + return SSS_TO_MAX_SQ_NUM(hwdev); +} +EXPORT_SYMBOL(sss_get_max_sq_num); + +u8 sss_get_phy_port_id(void *hwdev) +{ + if (!hwdev) { + pr_err("Get phy port id: hwdev is NULL\n"); + return 0; + } + + return SSS_TO_PHY_PORT_ID(hwdev); +} +EXPORT_SYMBOL(sss_get_phy_port_id); + +u16 sss_get_max_vf_num(void *hwdev) +{ + if (!hwdev) { + pr_err("Get max vf num: hwdev is NULL\n"); + return 0; + } + + return SSS_TO_MAX_VF_NUM(hwdev); +} +EXPORT_SYMBOL(sss_get_max_vf_num); + +u16 sss_nic_intr_num(void *hwdev) +{ + struct sss_hwif *hwif = NULL; + + if (!hwdev) + return 0; + + hwif = ((struct sss_hwdev *)hwdev)->hwif; + + return hwif->attr.irq_num; +} +EXPORT_SYMBOL(sss_nic_intr_num); + +int sss_get_cos_valid_bitmap(void *hwdev, u8 *func_cos_bitmap, u8 *port_cos_bitmap) +{ + if (!hwdev) { + pr_err("Get cos valid bitmap: hwdev is NULL\n"); + return -EINVAL; + } + + *func_cos_bitmap = SSS_TO_FUNC_COS_BITMAP(hwdev); + *port_cos_bitmap = SSS_TO_PORT_COS_BITMAP(hwdev); + + return 0; +} +EXPORT_SYMBOL(sss_get_cos_valid_bitmap); + +u16 sss_alloc_irq(void *hwdev, enum sss_service_type service_type, + struct sss_irq_desc *alloc_array, u16 alloc_num) +{ + int i; + int j; + u16 need_num = alloc_num; + u16 act_num = 0; + struct sss_irq_info *irq_info = NULL; + struct sss_irq *irq = NULL; + + if (!hwdev || !alloc_array) + return 0; + + irq_info = SSS_TO_IRQ_INFO(hwdev); + irq = irq_info->irq; + + mutex_lock(&irq_info->irq_mutex); + if (irq_info->free_num == 0) { + sdk_err(SSS_TO_DEV(hwdev), "Fail to alloc irq, free_num is zero\n"); + mutex_unlock(&irq_info->irq_mutex); + return 0; + } + + if (alloc_num > irq_info->free_num) { + sdk_warn(SSS_TO_DEV(hwdev), "Adjust need_num to %u\n", irq_info->free_num); + need_num = irq_info->free_num; + } + + for (i = 0; i < need_num; i++) { + for (j = 0; j < irq_info->total_num; j++) { + if (irq[j].busy != SSS_CFG_FREE) + continue; + + if (irq_info->free_num == 0) { + sdk_err(SSS_TO_DEV(hwdev), "Fail to alloc irq, free_num is zero\n"); + mutex_unlock(&irq_info->irq_mutex); + memset(alloc_array, 0, sizeof(*alloc_array) * alloc_num); + return 0; + } + + irq[j].type = service_type; + irq[j].busy = SSS_CFG_BUSY; + + alloc_array[i].irq_id = irq[j].desc.irq_id; + alloc_array[i].msix_id = irq[j].desc.msix_id; + irq_info->free_num--; + act_num++; + + break; + } + } + + mutex_unlock(&irq_info->irq_mutex); + return act_num; +} +EXPORT_SYMBOL(sss_alloc_irq); + +void sss_free_irq(void *hwdev, enum sss_service_type service_type, u32 irq_id) +{ + int i; + struct sss_irq_info *irq_info = NULL; + struct sss_irq *irq = NULL; + + if (!hwdev) + return; + + irq_info = SSS_TO_IRQ_INFO(hwdev); + irq = irq_info->irq; + + mutex_lock(&irq_info->irq_mutex); + + for (i = 0; i < irq_info->total_num; i++) { + if (irq_id != irq[i].desc.irq_id || + service_type != irq[i].type) + continue; + + if (irq[i].busy == SSS_CFG_FREE) + continue; + + irq[i].busy = SSS_CFG_FREE; + irq_info->free_num++; + if (irq_info->free_num > irq_info->total_num) { + sdk_err(SSS_TO_DEV(hwdev), "Free_num out of range :[0, %u]\n", + irq_info->total_num); + mutex_unlock(&irq_info->irq_mutex); + return; + } + break; + } + + if (i >= irq_info->total_num) + sdk_warn(SSS_TO_DEV(hwdev), "Irq %u don`t need to free\n", irq_id); + + mutex_unlock(&irq_info->irq_mutex); +} +EXPORT_SYMBOL(sss_free_irq); + +void sss_register_dev_event(void *hwdev, void *data, sss_event_handler_t callback) +{ + struct sss_hwdev *dev = hwdev; + + if (!hwdev) { + pr_err("Register event: hwdev is NULL\n"); + return; + } + + dev->event_handler = callback; + dev->event_handler_data = data; +} +EXPORT_SYMBOL(sss_register_dev_event); + +void sss_unregister_dev_event(void *hwdev) +{ + struct sss_hwdev *dev = hwdev; + + if (!hwdev) { + pr_err("Unregister event: hwdev is NULL\n"); + return; + } + + dev->event_handler = NULL; + dev->event_handler_data = NULL; +} +EXPORT_SYMBOL(sss_unregister_dev_event); + +int sss_get_dev_present_flag(const void *hwdev) +{ + return hwdev && !!((struct sss_hwdev *)hwdev)->chip_present_flag; +} +EXPORT_SYMBOL(sss_get_dev_present_flag); + +u8 sss_get_max_pf_num(void *hwdev) +{ + if (!hwdev) + return 0; + + return SSS_MAX_PF_NUM((struct sss_hwdev *)hwdev); +} +EXPORT_SYMBOL(sss_get_max_pf_num); + +int sss_get_chip_present_state(void *hwdev, bool *present_state) +{ + if (!hwdev || !present_state) + return -EINVAL; + + *present_state = sss_chip_get_present_state(hwdev); + + return 0; +} +EXPORT_SYMBOL(sss_get_chip_present_state); + +void sss_fault_event_report(void *hwdev, u16 src, u16 level) +{ + if (!hwdev) + return; + + sdk_info(SSS_TO_DEV(hwdev), + "Fault event report, src: %u, level: %u\n", src, level); +} +EXPORT_SYMBOL(sss_fault_event_report); + +int sss_register_service_adapter(void *hwdev, enum sss_service_type service_type, + void *service_adapter) +{ + struct sss_hwdev *dev = hwdev; + + if (!hwdev || !service_adapter || service_type >= SSS_SERVICE_TYPE_MAX) + return -EINVAL; + + if (dev->service_adapter[service_type]) + return -EINVAL; + + dev->service_adapter[service_type] = service_adapter; + + return 0; +} +EXPORT_SYMBOL(sss_register_service_adapter); + +void sss_unregister_service_adapter(void *hwdev, + enum sss_service_type service_type) +{ + struct sss_hwdev *dev = hwdev; + + if (!hwdev || service_type >= SSS_SERVICE_TYPE_MAX) + return; + + dev->service_adapter[service_type] = NULL; +} +EXPORT_SYMBOL(sss_unregister_service_adapter); + +void *sss_get_service_adapter(void *hwdev, enum sss_service_type service_type) +{ + struct sss_hwdev *dev = hwdev; + + if (!hwdev || service_type >= SSS_SERVICE_TYPE_MAX) + return NULL; + + return dev->service_adapter[service_type]; +} +EXPORT_SYMBOL(sss_get_service_adapter); + +void sss_do_event_callback(void *hwdev, struct sss_event_info *event) +{ + struct sss_hwdev *dev = hwdev; + + if (!hwdev) { + pr_err("Event callback: hwdev is NULL\n"); + return; + } + + if (!dev->event_handler) { + sdk_info(dev->dev_hdl, "Event callback: handler is NULL\n"); + return; + } + + dev->event_handler(dev->event_handler_data, event); +} +EXPORT_SYMBOL(sss_do_event_callback); + +void sss_update_link_stats(void *hwdev, bool link_state) +{ + struct sss_hwdev *dev = hwdev; + + if (!hwdev) + return; + + if (link_state) + atomic_inc(&dev->hw_stats.link_event_stats.link_up_stats); + else + atomic_inc(&dev->hw_stats.link_event_stats.link_down_stats); +} +EXPORT_SYMBOL(sss_update_link_stats); diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_init.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_init.c new file mode 100644 index 0000000000000000000000000000000000000000..50c45a623a90ce8ee30865b06ae040a9588001fe --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_init.c @@ -0,0 +1,548 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_adapter.h" +#include "sss_hwdev_api.h" +#include "sss_hwdev_mgmt_info.h" +#include "sss_hwdev_mgmt_channel.h" +#include "sss_hwdev_cap.h" +#include "sss_hwdev_link.h" +#include "sss_hwdev_io_flush.h" +#include "sss_hwif_init.h" +#include "sss_hwif_api.h" +#include "sss_hwif_export.h" +#include "sss_hwif_mgmt_init.h" + +enum sss_host_mode { + SSS_HOST_MODE_NORMAL = 0, + SSS_HOST_MODE_VM, + SSS_HOST_MODE_BM, + SSS_HOST_MODE_MAX, +}; + +#define SSS_HWDEV_WQ_NAME "sssnic_hardware" +#define SSS_WQ_MAX_REQ 10 + +#define SSS_DETECT_PCIE_LINK_DOWN_RETRY 2 + +#define SSS_CHN_BUSY_TIMEOUT 25 + +#define SSS_HEARTBEAT_TIMER_EXPIRES 5000 +#define SSS_HEARTBEAT_PERIOD 1000 + +#define SSS_GET_PCIE_LINK_STATUS(hwdev) \ + ((hwdev)->heartbeat.pcie_link_down ? \ + SSS_EVENT_PCIE_LINK_DOWN : SSS_EVENT_HEART_LOST) + +#define SSS_SET_FUNC_HOST_MODE(hwdev, mode) \ +do { \ + if ((mode) >= SSS_FUNC_MOD_MIN && (mode) <= SSS_FUNC_MOD_MAX) { \ + (hwdev)->func_mode = (mode); \ + } else \ + (hwdev)->func_mode = SSS_FUNC_MOD_NORMAL_HOST; \ +} while (0) + +#define SSS_SYNFW_TIME_PERIOD (60 * 60 * 1000) +#define SSS_CHANNEL_DETECT_PERIOD (5 * 1000) + +#define SSS_COMM_SUPPORT_CHANNEL_DETECT(hwdev) \ + ((hwdev)->features[0] & SSS_COMM_F_CHANNEL_DETECT) + +typedef void (*sss_set_mode_handler_t)(struct sss_hwdev *hwdev); + +static struct sss_hwdev *sss_alloc_hwdev(void) +{ + struct sss_hwdev *hwdev; + + hwdev = kzalloc(sizeof(*hwdev), GFP_KERNEL); + if (!hwdev) + return NULL; + + hwdev->chip_fault_stats = vzalloc(SSS_CHIP_FAULT_SIZE); + if (!hwdev->chip_fault_stats) { + kfree(hwdev); + return NULL; + } + + return hwdev; +} + +static void sss_free_hwdev(struct sss_hwdev *hwdev) +{ + vfree(hwdev->chip_fault_stats); + kfree(hwdev); +} + +static void sss_init_hwdev_param(struct sss_hwdev *hwdev, + struct sss_pci_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pcidev; + + hwdev->adapter_hdl = adapter; + hwdev->pcidev_hdl = pdev; + hwdev->dev_hdl = &pdev->dev; + hwdev->chip_node = adapter->chip_node; + spin_lock_init(&hwdev->channel_lock); +} + +static void sss_set_chip_present_flag(struct sss_hwdev *hwdev, bool present) +{ + hwdev->chip_present_flag = !!present; +} + +static bool sss_is_chip_abnormal(struct sss_hwdev *hwdev) +{ + u32 pcie_status; + + if (!sss_get_dev_present_flag(hwdev)) + return false; + + pcie_status = sss_chip_get_pcie_link_status(hwdev); + if (pcie_status == SSS_PCIE_LINK_DOWN) { + hwdev->heartbeat.pcie_link_down_cnt++; + sdk_warn(hwdev->dev_hdl, "Pcie link down\n"); + if (hwdev->heartbeat.pcie_link_down_cnt >= SSS_DETECT_PCIE_LINK_DOWN_RETRY) { + sss_set_chip_present_flag(hwdev, false); + sss_force_complete_all(hwdev); + hwdev->heartbeat.pcie_link_down = true; + return true; + } + + return false; + } + + if (pcie_status != SSS_PCIE_LINK_UP) { + hwdev->heartbeat.heartbeat_lost = true; + return true; + } + + hwdev->heartbeat.pcie_link_down_cnt = 0; + + return false; +} + +static void sss_update_aeq_stat(struct sss_hwdev *hwdev) +{ + if (hwdev->aeq_stat.last_recv_cnt != hwdev->aeq_stat.cur_recv_cnt) { + hwdev->aeq_stat.last_recv_cnt = hwdev->aeq_stat.cur_recv_cnt; + hwdev->aeq_stat.busy_cnt = 0; + } else { + hwdev->aeq_stat.busy_cnt++; + } +} + +static void sss_update_channel_status(struct sss_hwdev *hwdev) +{ + struct sss_card_node *node = hwdev->chip_node; + + if (!node) + return; + + if (sss_get_func_type(hwdev) != SSS_FUNC_TYPE_PPF || + !SSS_COMM_SUPPORT_CHANNEL_DETECT(hwdev) || + atomic_read(&node->channel_timeout_cnt)) + return; + + if (test_bit(SSS_HW_MBX_INIT_OK, &hwdev->func_state)) { + sss_update_aeq_stat(hwdev); + + if (hwdev->aeq_stat.busy_cnt > SSS_CHN_BUSY_TIMEOUT) { + sdk_err(hwdev->dev_hdl, "Detect channel busy\n"); + atomic_inc(&node->channel_timeout_cnt); + } + } +} + +static void sss_heartbeat_timer_handler(struct timer_list *t) +{ + struct sss_hwdev *hwdev = from_timer(hwdev, t, heartbeat.heartbeat_timer); + + if (sss_is_chip_abnormal(hwdev)) { + queue_work(hwdev->workq, &hwdev->heartbeat.lost_work); + } else { + mod_timer(&hwdev->heartbeat.heartbeat_timer, + jiffies + msecs_to_jiffies(SSS_HEARTBEAT_PERIOD)); + } + + sss_update_channel_status(hwdev); +} + +static void sss_heartbeat_lost_handler(struct work_struct *work) +{ + u16 fault_level; + u16 pcie_src; + struct sss_event_info event_info = {0}; + struct sss_hwdev *hwdev = container_of(work, struct sss_hwdev, + heartbeat.lost_work); + + atomic_inc(&hwdev->hw_stats.heart_lost_stats); + + if (hwdev->event_handler) { + event_info.type = SSS_GET_PCIE_LINK_STATUS(hwdev); + event_info.service = SSS_EVENT_SRV_COMM; + hwdev->event_handler(hwdev->event_handler_data, &event_info); + } + + if (hwdev->heartbeat.pcie_link_down) { + sdk_err(hwdev->dev_hdl, "Detect pcie is link down\n"); + fault_level = SSS_FAULT_LEVEL_HOST; + pcie_src = SSS_FAULT_SRC_PCIE_LINK_DOWN; + } else { + sdk_err(hwdev->dev_hdl, "Heart lost report received, func_id: %d\n", + sss_get_global_func_id(hwdev)); + fault_level = SSS_FAULT_LEVEL_FATAL; + pcie_src = SSS_FAULT_SRC_HOST_HEARTBEAT_LOST; + } + + sss_dump_chip_err_info(hwdev); +} + +static void sss_create_heartbeat_timer(struct sss_hwdev *hwdev) +{ + timer_setup(&hwdev->heartbeat.heartbeat_timer, sss_heartbeat_timer_handler, 0); + hwdev->heartbeat.heartbeat_timer.expires = + jiffies + msecs_to_jiffies(SSS_HEARTBEAT_TIMER_EXPIRES); + add_timer(&hwdev->heartbeat.heartbeat_timer); + + INIT_WORK(&hwdev->heartbeat.lost_work, sss_heartbeat_lost_handler); +} + +static void sss_destroy_heartbeat_timer(struct sss_hwdev *hwdev) +{ + destroy_work(&hwdev->heartbeat.lost_work); + del_timer_sync(&hwdev->heartbeat.heartbeat_timer); +} + +static void sss_set_bm_host_mode(struct sss_hwdev *hwdev) +{ + struct sss_service_cap *svc_cap = &hwdev->mgmt_info->svc_cap; + u8 host_id = SSS_GET_HWIF_PCI_INTF_ID(hwdev->hwif); + + if (host_id == svc_cap->master_host_id) + SSS_SET_FUNC_HOST_MODE(hwdev, SSS_FUNC_MOD_MULTI_BM_MASTER); + else + SSS_SET_FUNC_HOST_MODE(hwdev, SSS_FUNC_MOD_MULTI_BM_SLAVE); +} + +static void sss_set_vm_host_mode(struct sss_hwdev *hwdev) +{ + struct sss_service_cap *svc_cap = &hwdev->mgmt_info->svc_cap; + u8 host_id = SSS_GET_HWIF_PCI_INTF_ID(hwdev->hwif); + + if (host_id == svc_cap->master_host_id) + SSS_SET_FUNC_HOST_MODE(hwdev, SSS_FUNC_MOD_MULTI_VM_MASTER); + else + SSS_SET_FUNC_HOST_MODE(hwdev, SSS_FUNC_MOD_MULTI_VM_SLAVE); +} + +static void sss_set_normal_host_mode(struct sss_hwdev *hwdev) +{ + SSS_SET_FUNC_HOST_MODE(hwdev, SSS_FUNC_MOD_NORMAL_HOST); +} + +static int sss_enable_multi_host(struct sss_hwdev *hwdev) +{ + if (!SSS_IS_PPF(hwdev) || !SSS_IS_MULTI_HOST(hwdev)) + return 0; + + if (SSS_IS_SLAVE_HOST(hwdev)) + sss_chip_set_slave_host_status(hwdev, sss_get_pcie_itf_id(hwdev), true); + + return 0; +} + +static int sss_disable_multi_host(struct sss_hwdev *hwdev) +{ + if (!SSS_IS_PPF(hwdev) || !SSS_IS_MULTI_HOST(hwdev)) + return 0; + + if (SSS_IS_SLAVE_HOST(hwdev)) + sss_chip_set_slave_host_status(hwdev, sss_get_pcie_itf_id(hwdev), false); + + return 0; +} + +static int sss_init_host_mode(struct sss_hwdev *hwdev) +{ + int ret; + struct sss_service_cap *svc_cap = &hwdev->mgmt_info->svc_cap; + sss_set_mode_handler_t handler[SSS_HOST_MODE_MAX] = { + sss_set_normal_host_mode, + sss_set_vm_host_mode, + sss_set_bm_host_mode + }; + + if (SSS_GET_FUNC_TYPE(hwdev) == SSS_FUNC_TYPE_VF) { + SSS_SET_FUNC_HOST_MODE(hwdev, SSS_FUNC_MOD_NORMAL_HOST); + return 0; + } + + if (svc_cap->srv_multi_host_mode >= SSS_HOST_MODE_MAX) { + SSS_SET_FUNC_HOST_MODE(hwdev, SSS_FUNC_MOD_NORMAL_HOST); + return 0; + } + + handler[svc_cap->srv_multi_host_mode](hwdev); + + ret = sss_enable_multi_host(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init function mode\n"); + return ret; + } + + return 0; +} + +static void sss_deinit_host_mode(struct sss_hwdev *hwdev) +{ + sss_disable_multi_host(hwdev); +} + +static u64 sss_get_real_time(void) +{ + struct timeval val = {0}; + + do_gettimeofday(&val); + + return (u64)val.tv_sec * MSEC_PER_SEC + + (u64)val.tv_usec / USEC_PER_MSEC; +} + +static void sss_auto_sync_time_work(struct work_struct *work) +{ + struct delayed_work *delay = to_delayed_work(work); + struct sss_hwdev *hwdev = container_of(delay, + struct sss_hwdev, sync_time_task); + int ret; + + ret = sss_chip_sync_time(hwdev, sss_get_real_time()); + if (ret != 0) + sdk_err(hwdev->dev_hdl, + "Fail to sync UTC time to firmware, errno:%d.\n", ret); + + queue_delayed_work(hwdev->workq, &hwdev->sync_time_task, + msecs_to_jiffies(SSS_SYNFW_TIME_PERIOD)); +} + +static void sss_auto_channel_detect_work(struct work_struct *work) +{ + struct delayed_work *delay = to_delayed_work(work); + struct sss_hwdev *hwdev = container_of(delay, + struct sss_hwdev, channel_detect_task); + struct sss_card_node *chip_node = NULL; + + sss_chip_comm_channel_detect(hwdev); + + chip_node = hwdev->chip_node; + if (!atomic_read(&chip_node->channel_timeout_cnt)) + queue_delayed_work(hwdev->workq, &hwdev->channel_detect_task, + msecs_to_jiffies(SSS_CHANNEL_DETECT_PERIOD)); +} + +static void sss_hwdev_init_work(struct sss_hwdev *hwdev) +{ + if (SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_PPF) + return; + + INIT_DELAYED_WORK(&hwdev->sync_time_task, sss_auto_sync_time_work); + queue_delayed_work(hwdev->workq, &hwdev->sync_time_task, + msecs_to_jiffies(SSS_SYNFW_TIME_PERIOD)); + + if (SSS_COMM_SUPPORT_CHANNEL_DETECT(hwdev)) { + INIT_DELAYED_WORK(&hwdev->channel_detect_task, + sss_auto_channel_detect_work); + queue_delayed_work(hwdev->workq, &hwdev->channel_detect_task, + msecs_to_jiffies(SSS_CHANNEL_DETECT_PERIOD)); + } +} + +static void sss_hwdev_deinit_work(struct sss_hwdev *hwdev) +{ + if (SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_PPF) + return; + + if (SSS_COMM_SUPPORT_CHANNEL_DETECT(hwdev)) { + hwdev->features[0] &= ~(SSS_COMM_F_CHANNEL_DETECT); + cancel_delayed_work_sync(&hwdev->channel_detect_task); + } + + cancel_delayed_work_sync(&hwdev->sync_time_task); +} + +int sss_init_hwdev(struct sss_pci_adapter *adapter) +{ + struct sss_hwdev *hwdev; + int ret; + + hwdev = sss_alloc_hwdev(); + if (!hwdev) + return -ENOMEM; + + sss_init_hwdev_param(hwdev, adapter); + adapter->hwdev = hwdev; + + ret = sss_hwif_init(adapter); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init hwif\n"); + goto init_hwif_err; + } + + sss_set_chip_present_flag(hwdev, true); + + hwdev->workq = alloc_workqueue(SSS_HWDEV_WQ_NAME, WQ_MEM_RECLAIM, SSS_WQ_MAX_REQ); + if (!hwdev->workq) { + sdk_err(hwdev->dev_hdl, "Fail to alloc hardware workq\n"); + goto alloc_workq_err; + } + + sss_create_heartbeat_timer(hwdev); + + ret = sss_init_mgmt_info(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init mgmt info\n"); + goto init_mgmt_info_err; + } + + ret = sss_init_mgmt_channel(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init mgmt channel\n"); + goto init_mgmt_channel_err; + } + +#ifdef HAVE_DEVLINK_FLASH_UPDATE_PARAMS + ret = sss_init_devlink(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init devlink\n"); + goto init_devlink_err; + } +#endif + + ret = sss_init_capability(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init capability\n"); + goto init_cap_err; + } + + ret = sss_init_host_mode(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init capability\n"); + goto init_multi_host_fail; + } + + sss_hwdev_init_work(hwdev); + + ret = sss_chip_do_nego_feature(hwdev, SSS_MGMT_MSG_SET_CMD, + hwdev->features, SSS_MAX_FEATURE_QWORD); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to set comm features\n"); + goto set_feature_err; + } + + return 0; + +set_feature_err: + sss_hwdev_deinit_work(hwdev); + + sss_deinit_host_mode(hwdev); +init_multi_host_fail: + sss_deinit_capability(hwdev); + +init_cap_err: +#ifdef HAVE_DEVLINK_FLASH_UPDATE_PARAMS + sss_deinit_devlink(hwdev); + +init_devlink_err: +#endif + sss_deinit_mgmt_channel(hwdev); + +init_mgmt_channel_err: + sss_deinit_mgmt_info(hwdev); + +init_mgmt_info_err: + sss_destroy_heartbeat_timer(hwdev); + destroy_workqueue(hwdev->workq); + +alloc_workq_err: + sss_hwif_deinit(hwdev); + +init_hwif_err: + sss_free_hwdev(hwdev); + adapter->hwdev = NULL; + + return -EFAULT; +} + +void sss_deinit_hwdev(void *hwdev) +{ + struct sss_hwdev *dev = hwdev; + u64 drv_features[SSS_MAX_FEATURE_QWORD] = {0}; + + sss_chip_do_nego_feature(hwdev, SSS_MGMT_MSG_SET_CMD, + drv_features, SSS_MAX_FEATURE_QWORD); + + sss_hwdev_deinit_work(dev); + + if (SSS_IS_MULTI_HOST(dev)) + sss_disable_multi_host(dev); + + sss_hwdev_flush_io(dev, SSS_CHANNEL_COMM); + + sss_deinit_capability(dev); + +#ifdef HAVE_DEVLINK_FLASH_UPDATE_PARAMS + sss_deinit_devlink(dev); +#endif + + sss_deinit_mgmt_channel(dev); + + sss_deinit_mgmt_info(dev); + sss_destroy_heartbeat_timer(hwdev); + destroy_workqueue(dev->workq); + + sss_hwif_deinit(dev); + sss_free_hwdev(dev); +} + +void sss_hwdev_stop(void *hwdev) +{ + struct sss_hwdev *dev = hwdev; + + if (!hwdev) + return; + + sss_set_chip_present_flag(hwdev, false); + sdk_info(dev->dev_hdl, "Set card absent\n"); + sss_force_complete_all(dev); + sdk_info(dev->dev_hdl, "All messages interacting with the chip will stop\n"); +} + +void sss_hwdev_detach(void *hwdev) +{ + if (!sss_chip_get_present_state((struct sss_hwdev *)hwdev)) { + sss_set_chip_present_flag(hwdev, false); + sss_force_complete_all(hwdev); + } +} + +void sss_hwdev_shutdown(void *hwdev) +{ + struct sss_hwdev *dev = hwdev; + + if (!hwdev) + return; + + if (SSS_IS_SLAVE_HOST(dev)) + sss_chip_set_slave_host_status(hwdev, sss_get_pcie_itf_id(hwdev), false); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_init.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_init.h new file mode 100644 index 0000000000000000000000000000000000000000..43f35f29588c7fb4afd805ab3dbd3943bcaed2df --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_init.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWDEV_INIT_H +#define SSS_HWDEV_INIT_H + +#include "sss_adapter.h" + +int sss_init_hwdev(struct sss_pci_adapter *adapter); +void sss_deinit_hwdev(void *hwdev); +void sss_hwdev_detach(void *hwdev); +void sss_hwdev_stop(void *hwdev); +void sss_hwdev_shutdown(void *hwdev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_io_flush.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_io_flush.c new file mode 100644 index 0000000000000000000000000000000000000000..aeb2a64d758b46c0174b91ec37edc81daacd8f22 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_io_flush.c @@ -0,0 +1,141 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_hwif_ctrlq_init.h" +#include "sss_hwif_api.h" +#include "sss_hwif_mbx.h" +#include "sss_common.h" + +#define SSS_FLR_TIMEOUT 1000 +#define SSS_FLR_TIMEOUT_ONCE 10000 + +static enum sss_process_ret sss_check_flr_finish_handler(void *priv_data) +{ + struct sss_hwif *hwif = priv_data; + enum sss_pf_status status; + + status = sss_chip_get_pf_status(hwif); + if (status == SSS_PF_STATUS_FLR_FINISH_FLAG) { + sss_chip_set_pf_status(hwif, SSS_PF_STATUS_ACTIVE_FLAG); + return SSS_PROCESS_OK; + } + + return SSS_PROCESS_DOING; +} + +static int sss_wait_for_flr_finish(struct sss_hwif *hwif) +{ + return sss_check_handler_timeout(hwif, sss_check_flr_finish_handler, + SSS_FLR_TIMEOUT, SSS_FLR_TIMEOUT_ONCE); +} + +static int sss_msg_to_mgmt_no_ack(void *hwdev, u8 mod, u16 cmd, + void *buf_in, u16 in_size, u16 channel) +{ + if (!hwdev) + return -EINVAL; + + if (sss_get_dev_present_flag(hwdev) == 0) + return -EPERM; + + return sss_send_mbx_to_mgmt_no_ack(hwdev, mod, cmd, buf_in, + in_size, channel); +} + +static int sss_chip_flush_doorbell(struct sss_hwdev *hwdev, u16 channel) +{ + struct sss_hwif *hwif = hwdev->hwif; + struct sss_cmd_clear_doorbell clear_db = {0}; + u16 out_len = sizeof(clear_db); + int ret; + + clear_db.func_id = SSS_GET_HWIF_GLOBAL_ID(hwif); + + ret = sss_sync_send_msg_ch(hwdev, SSS_COMM_MGMT_CMD_FLUSH_DOORBELL, + &clear_db, sizeof(clear_db), + &clear_db, &out_len, channel); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &clear_db)) { + sdk_warn(hwdev->dev_hdl, + "Fail to flush doorbell, ret: %d, status: 0x%x, out_size: 0x%x, channel: 0x%x\n", + ret, clear_db.head.state, out_len, channel); + if (ret == 0) + return -EFAULT; + } + + return ret; +} + +static int sss_chip_flush_resource(struct sss_hwdev *hwdev, u16 channel) +{ + struct sss_hwif *hwif = hwdev->hwif; + struct sss_cmd_clear_resource clr_res = {0}; + int ret; + + clr_res.func_id = SSS_GET_HWIF_GLOBAL_ID(hwif); + ret = sss_msg_to_mgmt_no_ack(hwdev, SSS_MOD_TYPE_COMM, + SSS_COMM_MGMT_CMD_START_FLUSH, &clr_res, + sizeof(clr_res), channel); + if (ret != 0) { + sdk_warn(hwdev->dev_hdl, "Fail to notice flush message, ret: %d, channel: 0x%x\n", + ret, channel); + } + + return ret; +} + +int sss_hwdev_flush_io(struct sss_hwdev *hwdev, u16 channel) +{ + struct sss_hwif *hwif = hwdev->hwif; + int err; + int ret = 0; + + if (hwdev->chip_present_flag == 0) + return 0; + + if (SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_VF) + msleep(100); + + err = sss_wait_ctrlq_stop(hwdev); + if (err != 0) { + sdk_warn(hwdev->dev_hdl, "Fail to wait ctrlq stop\n"); + ret = err; + } + + sss_chip_disable_doorbell(hwif); + + err = sss_chip_flush_doorbell(hwdev, channel); + if (err != 0) + ret = err; + + if (SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_VF) + sss_chip_set_pf_status(hwif, SSS_PF_STATUS_FLR_START_FLAG); + else + msleep(100); + + err = sss_chip_flush_resource(hwdev, channel); + if (err != 0) + ret = err; + + if (SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_VF) { + err = sss_wait_for_flr_finish(hwif); + if (err != 0) { + sdk_warn(hwdev->dev_hdl, "Wait firmware FLR timeout\n"); + ret = err; + } + } + + sss_chip_enable_doorbell(hwif); + + err = sss_reinit_ctrlq_ctx(hwdev); + if (err != 0) { + sdk_warn(hwdev->dev_hdl, "Fail to reinit ctrlq ctx\n"); + ret = err; + } + + return ret; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_io_flush.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_io_flush.h new file mode 100644 index 0000000000000000000000000000000000000000..4b15cd0d23f6e3d41b1b453f38dd276cf5b0dd0d --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_io_flush.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWDEV_IO_FLUSH_H +#define SSS_HWDEV_IO_FLUSH_H + +#include "sss_hwdev.h" + +int sss_hwdev_flush_io(struct sss_hwdev *hwdev, u16 channel); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_link.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_link.c new file mode 100644 index 0000000000000000000000000000000000000000..f86da62a0015af2c1c3979abf9ee5a1679896e51 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_link.c @@ -0,0 +1,711 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include + +#include "sss_hwdev_link.h" +#ifdef HAVE_DEVLINK_FLASH_UPDATE_PARAMS +#include "sss_hw_common.h" +#include "sss_hwdev_api.h" +#include "sss_hwif_adm.h" +#include "sss_hwif_adm_common.h" + +#define SSS_FW_MAGIC_NUM 0x5a5a1100 +#define SSS_FW_IMAGE_HEAD_SIZE 4096 +#define SSS_FW_FRAGMENT_MAX_LEN 1536 +#define SSS_FW_CFG_DEFAULT_INDEX 0xFF +#define SSS_FW_UPDATE_MGMT_TIMEOUT 3000000U +#define SSS_FW_TYPE_MAX_NUM 0x40 +#define SSS_FW_CFG_MAX_INDEX 8 +#define SSS_FW_CFG_MIN_INDEX 1 + +enum sss_devlink_param_id { + SSS_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, + SSS_DEVLINK_PARAM_ID_ACTIVATE_FW, + SSS_DEVLINK_PARAM_ID_SWITCH_CFG, +}; + +enum sss_firmware_type { + SSS_UP_FW_UPDATE_MIN_TYPE1 = 0x0, + SSS_UP_FW_UPDATE_UP_TEXT = 0x0, + SSS_UP_FW_UPDATE_UP_DATA = 0x1, + SSS_UP_FW_UPDATE_UP_DICT = 0x2, + SSS_UP_FW_UPDATE_TILE_PCPTR = 0x3, + SSS_UP_FW_UPDATE_TILE_TEXT = 0x4, + SSS_UP_FW_UPDATE_TILE_DATA = 0x5, + SSS_UP_FW_UPDATE_TILE_DICT = 0x6, + SSS_UP_FW_UPDATE_PPE_STATE = 0x7, + SSS_UP_FW_UPDATE_PPE_BRANCH = 0x8, + SSS_UP_FW_UPDATE_PPE_EXTACT = 0x9, + SSS_UP_FW_UPDATE_MAX_TYPE1 = 0x9, + SSS_UP_FW_UPDATE_CFG0 = 0xa, + SSS_UP_FW_UPDATE_CFG1 = 0xb, + SSS_UP_FW_UPDATE_CFG2 = 0xc, + SSS_UP_FW_UPDATE_CFG3 = 0xd, + SSS_UP_FW_UPDATE_MAX_TYPE1_CFG = 0xd, + + SSS_UP_FW_UPDATE_MIN_TYPE2 = 0x14, + SSS_UP_FW_UPDATE_MAX_TYPE2 = 0x14, + + SSS_UP_FW_UPDATE_MIN_TYPE3 = 0x18, + SSS_UP_FW_UPDATE_PHY = 0x18, + SSS_UP_FW_UPDATE_BIOS = 0x19, + SSS_UP_FW_UPDATE_HLINK_ONE = 0x1a, + SSS_UP_FW_UPDATE_HLINK_TWO = 0x1b, + SSS_UP_FW_UPDATE_HLINK_THR = 0x1c, + SSS_UP_FW_UPDATE_MAX_TYPE3 = 0x1c, + + SSS_UP_FW_UPDATE_MIN_TYPE4 = 0x20, + SSS_UP_FW_UPDATE_L0FW = 0x20, + SSS_UP_FW_UPDATE_L1FW = 0x21, + SSS_UP_FW_UPDATE_BOOT = 0x22, + SSS_UP_FW_UPDATE_SEC_DICT = 0x23, + SSS_UP_FW_UPDATE_HOT_PATCH0 = 0x24, + SSS_UP_FW_UPDATE_HOT_PATCH1 = 0x25, + SSS_UP_FW_UPDATE_HOT_PATCH2 = 0x26, + SSS_UP_FW_UPDATE_HOT_PATCH3 = 0x27, + SSS_UP_FW_UPDATE_HOT_PATCH4 = 0x28, + SSS_UP_FW_UPDATE_HOT_PATCH5 = 0x29, + SSS_UP_FW_UPDATE_HOT_PATCH6 = 0x2a, + SSS_UP_FW_UPDATE_HOT_PATCH7 = 0x2b, + SSS_UP_FW_UPDATE_HOT_PATCH8 = 0x2c, + SSS_UP_FW_UPDATE_HOT_PATCH9 = 0x2d, + SSS_UP_FW_UPDATE_HOT_PATCH10 = 0x2e, + SSS_UP_FW_UPDATE_HOT_PATCH11 = 0x2f, + SSS_UP_FW_UPDATE_HOT_PATCH12 = 0x30, + SSS_UP_FW_UPDATE_HOT_PATCH13 = 0x31, + SSS_UP_FW_UPDATE_HOT_PATCH14 = 0x32, + SSS_UP_FW_UPDATE_HOT_PATCH15 = 0x33, + SSS_UP_FW_UPDATE_HOT_PATCH16 = 0x34, + SSS_UP_FW_UPDATE_HOT_PATCH17 = 0x35, + SSS_UP_FW_UPDATE_HOT_PATCH18 = 0x36, + SSS_UP_FW_UPDATE_HOT_PATCH19 = 0x37, + SSS_UP_FW_UPDATE_MAX_TYPE4 = 0x37, + + SSS_UP_FW_UPDATE_MIN_TYPE5 = 0x3a, + SSS_UP_FW_UPDATE_OPTION_ROM = 0x3a, + SSS_UP_FW_UPDATE_MAX_TYPE5 = 0x3a, + + SSS_UP_FW_UPDATE_MIN_TYPE6 = 0x3e, + SSS_UP_FW_UPDATE_MAX_TYPE6 = 0x3e, + + SSS_UP_FW_UPDATE_MIN_TYPE7 = 0x40, + SSS_UP_FW_UPDATE_MAX_TYPE7 = 0x40, +}; + +#define SSS_IMAGE_MPU_ALL_IN (BIT_ULL(SSS_UP_FW_UPDATE_UP_TEXT) | \ + BIT_ULL(SSS_UP_FW_UPDATE_UP_DATA) | \ + BIT_ULL(SSS_UP_FW_UPDATE_UP_DICT)) + +#define SSS_IMAGE_NPU_ALL_IN (BIT_ULL(SSS_UP_FW_UPDATE_TILE_PCPTR) | \ + BIT_ULL(SSS_UP_FW_UPDATE_TILE_TEXT) | \ + BIT_ULL(SSS_UP_FW_UPDATE_TILE_DATA) | \ + BIT_ULL(SSS_UP_FW_UPDATE_TILE_DICT) | \ + BIT_ULL(SSS_UP_FW_UPDATE_PPE_STATE) | \ + BIT_ULL(SSS_UP_FW_UPDATE_PPE_BRANCH) | \ + BIT_ULL(SSS_UP_FW_UPDATE_PPE_EXTACT)) + +#define SSS_IMAGE_COLD_ALL_IN (SSS_IMAGE_MPU_ALL_IN | SSS_IMAGE_NPU_ALL_IN) + +#define SSS_IMAGE_CFG_ALL_IN (BIT_ULL(SSS_UP_FW_UPDATE_CFG0) | \ + BIT_ULL(SSS_UP_FW_UPDATE_CFG1) | \ + BIT_ULL(SSS_UP_FW_UPDATE_CFG2) | \ + BIT_ULL(SSS_UP_FW_UPDATE_CFG3)) + +#define SSS_CHECK_IMAGE_INTEGRATY(mask) \ + (((mask) & SSS_IMAGE_COLD_ALL_IN) == SSS_IMAGE_COLD_ALL_IN && \ + ((mask) & SSS_IMAGE_CFG_ALL_IN) != 0) + +#define SSS_LINK_HWDEV(link) \ + ((struct sss_hwdev *)((struct sss_devlink *)devlink_priv(link))->hwdev) + +struct sss_firmware_section { + u32 section_len; + u32 section_offset; + u32 section_version; + u32 section_type; + u32 section_crc; + u32 section_flag; +}; + +struct sss_firmware_image { + u32 fw_version; + u32 fw_len; + u32 fw_magic; + struct { + u32 section_cnt : 16; + u32 rsvd : 16; + } fw_info; + struct sss_firmware_section section_info[SSS_FW_TYPE_MAX_NUM]; + u32 device_id; + u32 rsvd0[101]; + u32 rsvd1[534]; + u32 bin_data; +}; + +struct sss_host_image { + struct sss_firmware_section section_info[SSS_FW_TYPE_MAX_NUM]; + struct { + u32 total_len; + u32 fw_version; + } image_info; + u32 section_cnt; + u32 device_id; +}; + +struct sss_cmd_update_firmware { + struct sss_mgmt_msg_head head; + + struct { + u32 sl : 1; + u32 sf : 1; + u32 flag : 1; + u32 bit_signed : 1; + u32 reserved : 12; + u32 fragment_len : 16; + } ctl_info; + + struct { + u32 section_crc; + u32 section_type; + } section_info; + + u32 total_len; + u32 section_len; + u32 section_version; + u32 section_offset; + u32 data[384]; +}; + +struct sss_cmd_activate_firmware { + struct sss_mgmt_msg_head head; + u8 index; /* 0 ~ 7 */ + u8 data[7]; +}; + +struct sss_cmd_switch_config { + struct sss_mgmt_msg_head head; + u8 index; /* 0 ~ 7 */ + u8 data[7]; +}; + +static bool sss_check_image_valid(struct sss_hwdev *hwdev, + struct sss_firmware_image *image, u32 image_size) +{ + u32 i; + u32 length = 0; + u32 cnt; + + if (image->fw_magic != SSS_FW_MAGIC_NUM) { + sdk_err(hwdev->dev_hdl, "Err fw magic: 0x%x read from file\n", image->fw_magic); + return false; + } + + cnt = image->fw_info.section_cnt; + if (cnt > SSS_FW_TYPE_MAX_NUM) { + sdk_err(hwdev->dev_hdl, "Err fw type num: 0x%x read from file\n", cnt); + return false; + } + + for (i = 0; i < cnt; i++) + length += image->section_info[i].section_len; + + if (length != image->fw_len || + (u32)(image->fw_len + SSS_FW_IMAGE_HEAD_SIZE) != image_size) { + sdk_err(hwdev->dev_hdl, "Err data size: 0x%x read from file\n", length); + return false; + } + + return true; +} + +static void sss_init_host_image(struct sss_host_image *host_image, + struct sss_firmware_image *image) +{ + int i; + + for (i = 0; i < image->fw_info.section_cnt; i++) { + memcpy(&host_image->section_info[i], &image->section_info[i], + sizeof(image->section_info[i])); + } + + host_image->image_info.fw_version = image->fw_version; + host_image->section_cnt = image->fw_info.section_cnt; + host_image->device_id = image->device_id; + host_image->image_info.total_len = image->fw_len; +} + +static bool sss_check_image_integrity(struct sss_hwdev *hwdev, + struct sss_host_image *host_image) +{ + u32 i; + u32 section_type; + u64 mask = 0; + + for (i = 0; i < host_image->section_cnt; i++) { + section_type = host_image->section_info[i].section_type; + if (mask & (1ULL << section_type)) { + sdk_err(hwdev->dev_hdl, "Duplicate section type: %u\n", section_type); + return false; + } + mask |= (1ULL << section_type); + } + + if (SSS_CHECK_IMAGE_INTEGRATY(mask)) + return true; + + sdk_err(hwdev->dev_hdl, + "Fail to check file integrity, valid: 0x%llx, current: 0x%llx\n", + (SSS_IMAGE_COLD_ALL_IN | SSS_IMAGE_CFG_ALL_IN), mask); + + return false; +} + +static bool sss_check_image_device_id(struct sss_hwdev *hwdev, u32 dev_id) +{ + struct sss_cmd_board_info info = {0}; + + if (sss_chip_get_board_info(hwdev, &info.info) != 0) { + sdk_err(hwdev->dev_hdl, "Fail to get board info\n"); + return false; + } + + if (dev_id == info.info.board_type) + return true; + + sdk_err(hwdev->dev_hdl, + "The image device type: 0x%x don't match the fw dev id: 0x%x\n", + dev_id, info.info.board_type); + + return false; +} + +static void sss_init_update_cmd_param(struct sss_cmd_update_firmware *cmd_update, + struct sss_firmware_section *info, int remain, + u32 send_offset) +{ + cmd_update->ctl_info.sl = (remain <= SSS_FW_FRAGMENT_MAX_LEN) ? true : false; + cmd_update->ctl_info.sf = (remain == info->section_len) ? true : false; + cmd_update->ctl_info.bit_signed = info->section_flag & 0x1; + cmd_update->ctl_info.fragment_len = min(remain, SSS_FW_FRAGMENT_MAX_LEN); + + cmd_update->section_info.section_crc = info->section_crc; + cmd_update->section_info.section_type = info->section_type; + + cmd_update->section_version = info->section_version; + cmd_update->section_len = info->section_len; + cmd_update->section_offset = send_offset; +} + +static int sss_chip_update_firmware(struct sss_hwdev *hwdev, + struct sss_cmd_update_firmware *cmd_update) +{ + int ret; + u16 out_len = sizeof(*cmd_update); + + ret = sss_sync_send_adm_msg(hwdev, SSS_MOD_TYPE_COMM, + SSS_COMM_MGMT_CMD_UPDATE_FW, cmd_update, sizeof(*cmd_update), + cmd_update, &out_len, SSS_FW_UPDATE_MGMT_TIMEOUT); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, cmd_update)) { + sdk_err(hwdev->dev_hdl, + "Fail to update fw, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_update->head.state, out_len); + return (cmd_update->head.state != 0) ? + cmd_update->head.state : -EIO; + } + + return 0; +} + +static int sss_update_firmware(struct sss_hwdev *hwdev, const u8 *data, + struct sss_host_image *host_image) +{ + int ret; + int remain; + u32 i; + u32 send_offset; + u32 offset; + bool flag = false; + struct sss_cmd_update_firmware *cmd_update = NULL; + + cmd_update = kzalloc(sizeof(*cmd_update), GFP_KERNEL); + if (!cmd_update) + return -ENOMEM; + + for (i = 0; i < host_image->section_cnt; i++) { + offset = host_image->section_info[i].section_offset; + remain = (int)(host_image->section_info[i].section_len); + send_offset = 0; + + while (remain > 0) { + if (flag) { + cmd_update->total_len = 0; + } else { + cmd_update->total_len = host_image->image_info.total_len; + flag = true; + } + + sss_init_update_cmd_param(cmd_update, &host_image->section_info[i], + remain, send_offset); + + memcpy(cmd_update->data, + ((data + SSS_FW_IMAGE_HEAD_SIZE) + offset) + send_offset, + cmd_update->ctl_info.fragment_len); + + ret = sss_chip_update_firmware(hwdev, cmd_update); + if (ret != 0) { + kfree(cmd_update); + return ret; + } + + send_offset += cmd_update->ctl_info.fragment_len; + remain = (int)(host_image->section_info[i].section_len - send_offset); + } + } + + kfree(cmd_update); + + return 0; +} + +static int sss_flash_update_notify(struct devlink *devlink, + const struct firmware *fw, struct sss_host_image *image, + struct netlink_ext_ack *extack) +{ + struct sss_devlink *devlink_dev = devlink_priv(devlink); + struct sss_hwdev *hwdev = devlink_dev->hwdev; + int ret; + +#ifdef HAVE_DEVLINK_FW_FILE_NAME_MEMBER + devlink_flash_update_begin_notify(devlink); +#endif + devlink_flash_update_status_notify(devlink, "Flash firmware begin", NULL, 0, 0); + sdk_info(hwdev->dev_hdl, "Flash firmware begin\n"); + ret = sss_update_firmware(hwdev, fw->data, image); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to flash firmware, ret: %d\n", ret); + NL_SET_ERR_MSG_MOD(extack, "Fail to flash firmware"); + devlink_flash_update_status_notify(devlink, "Fail to flash firmware", NULL, 0, 0); + } else { + sdk_info(hwdev->dev_hdl, "Flash firmware end\n"); + devlink_flash_update_status_notify(devlink, "Flash firmware end", NULL, 0, 0); + } +#ifdef HAVE_DEVLINK_FW_FILE_NAME_MEMBER + devlink_flash_update_end_notify(devlink); +#endif + + return ret; +} + +#ifdef HAVE_DEVLINK_FW_FILE_NAME_PARAM +static int sss_devlink_flash_update(struct devlink *link, const char *file_name, + const char *component, struct netlink_ext_ack *extack) +#else +static int sss_devlink_flash_update(struct devlink *link, + struct devlink_flash_update_params *param, + struct netlink_ext_ack *extack) +#endif +{ + int ret; + struct sss_host_image *host_image = NULL; + struct sss_devlink *link_dev = devlink_priv(link); + struct sss_hwdev *hwdev = link_dev->hwdev; + +#ifdef HAVE_DEVLINK_FW_FILE_NAME_MEMBER + const struct firmware *fw = NULL; +#else + const struct firmware *fw = param->fw; +#endif + + host_image = kzalloc(sizeof(*host_image), GFP_KERNEL); + if (!host_image) { + ret = -ENOMEM; + goto alloc_host_image_err; + } + +#ifdef HAVE_DEVLINK_FW_FILE_NAME_PARAM + ret = request_firmware_direct(&fw, file_name, hwdev->dev_hdl); +#else +#ifdef HAVE_DEVLINK_FW_FILE_NAME_MEMBER + ret = request_firmware_direct(&fw, param->file_name, hwdev->dev_hdl); +#else + ret = 0; +#endif +#endif + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to request firmware\n"); + goto request_fw_err; + } + + if (!sss_check_image_valid(hwdev, (struct sss_firmware_image *)fw->data, + (u32)(fw->size))) { + sdk_err(hwdev->dev_hdl, "Fail to check image valid\n"); + NL_SET_ERR_MSG_MOD(extack, "Fail to check image valid"); + ret = -EINVAL; + goto check_image_err; + } + + sss_init_host_image(host_image, (struct sss_firmware_image *)fw->data); + + if (!sss_check_image_integrity(hwdev, host_image)) { + sdk_err(hwdev->dev_hdl, "Fail to check image integrity\n"); + NL_SET_ERR_MSG_MOD(extack, "Fail to check image integrity"); + ret = -EINVAL; + goto check_image_err; + } + + if (!sss_check_image_device_id(hwdev, host_image->device_id)) { + sdk_err(hwdev->dev_hdl, "Fail to check image device id\n"); + NL_SET_ERR_MSG_MOD(extack, "Fail to check image device id"); + ret = -EINVAL; + goto check_image_err; + } + + ret = sss_flash_update_notify(link, fw, host_image, extack); + +check_image_err: +#ifdef HAVE_DEVLINK_FW_FILE_NAME_PARAM + release_firmware(fw); +#endif + +request_fw_err: + kfree(host_image); + +alloc_host_image_err: + link_dev->switch_cfg_id = SSS_FW_CFG_DEFAULT_INDEX; + link_dev->active_cfg_id = SSS_FW_CFG_DEFAULT_INDEX; + + return ret; +} + +static const struct devlink_ops g_devlink_ops = { +#ifdef DEVLINK_HAVE_SUPPORTED_FLASH_UPDATE_PARAMS + .supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_COMPONENT, +#endif + .flash_update = sss_devlink_flash_update, +}; + +static int sss_chip_activate_firmware(struct sss_hwdev *hwdev, u8 cfg_num) +{ + int ret; + struct sss_cmd_activate_firmware cmd_activate = {0}; + u16 out_len = sizeof(cmd_activate); + + if (SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_PF && + SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_PPF) + return -EOPNOTSUPP; + + if (!SSS_SUPPORT_ADM_MSG(hwdev)) + return -EPERM; + + cmd_activate.index = cfg_num; + + ret = sss_sync_send_adm_msg(hwdev, SSS_MOD_TYPE_COMM, SSS_COMM_MGMT_CMD_ACTIVE_FW, + &cmd_activate, sizeof(cmd_activate), &cmd_activate, + &out_len, SSS_FW_UPDATE_MGMT_TIMEOUT); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_activate)) { + sdk_err(hwdev->dev_hdl, + "Fail to activate firmware, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_activate.head.state, out_len); + return (cmd_activate.head.state != 0) ? + cmd_activate.head.state : -EIO; + } + + return 0; +} + +static int sss_devlink_get_activate_fw_config(struct devlink *link, u32 id, + struct devlink_param_gset_ctx *param_ctx) +{ + struct sss_devlink *link_dev = devlink_priv(link); + + param_ctx->val.vu8 = link_dev->active_cfg_id; + + return 0; +} + +static int sss_devlink_set_activate_fw_config(struct devlink *link, u32 id, + struct devlink_param_gset_ctx *param_ctx) +{ + int ret; + struct sss_devlink *link_dev = devlink_priv(link); + struct sss_hwdev *hwdev = link_dev->hwdev; + + link_dev->active_cfg_id = param_ctx->val.vu8; + sdk_info(hwdev->dev_hdl, "Begin activate firmware\n"); + + ret = sss_chip_activate_firmware(hwdev, link_dev->active_cfg_id - 1); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to activate firmware, ret: %d\n", ret); + return ret; + } + + sdk_info(hwdev->dev_hdl, "End activate firmware\n"); + + return 0; +} + +static int sss_chip_switch_config(struct sss_hwdev *hwdev, u8 cfg_num) +{ + int ret; + struct sss_cmd_switch_config cmd_switch = {0}; + u16 out_len = sizeof(cmd_switch); + + if (SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_PF) + return -EOPNOTSUPP; + + if (!SSS_SUPPORT_ADM_MSG(hwdev)) + return -EPERM; + + cmd_switch.index = cfg_num; + + ret = sss_sync_send_adm_msg(hwdev, SSS_MOD_TYPE_COMM, SSS_COMM_MGMT_CMD_SWITCH_CFG, + &cmd_switch, sizeof(cmd_switch), &cmd_switch, + &out_len, SSS_FW_UPDATE_MGMT_TIMEOUT); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_switch)) { + sdk_err(hwdev->dev_hdl, + "Fail to switch cfg, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_switch.head.state, out_len); + return (cmd_switch.head.state != 0) ? + cmd_switch.head.state : -EIO; + } + + return 0; +} + +static int sss_devlink_get_switch_config(struct devlink *link, u32 id, + struct devlink_param_gset_ctx *param_ctx) +{ + struct sss_devlink *link_dev = devlink_priv(link); + + param_ctx->val.vu8 = link_dev->switch_cfg_id; + + return 0; +} + +static int sss_devlink_set_switch_config(struct devlink *link, u32 id, + struct devlink_param_gset_ctx *param_ctx) +{ + int ret; + struct sss_devlink *link_dev = devlink_priv(link); + struct sss_hwdev *hwdev = link_dev->hwdev; + + link_dev->switch_cfg_id = param_ctx->val.vu8; + sdk_info(hwdev->dev_hdl, "Begin switch cfg"); + + ret = sss_chip_switch_config(hwdev, link_dev->switch_cfg_id - 1); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to switch cfg, ret: %d\n", ret); + return ret; + } + + sdk_info(hwdev->dev_hdl, "End Switch cfg\n"); + + return 0; +} + +static int sss_devlink_validate_firmware_config(struct devlink *link, u32 id, + union devlink_param_value param_val, + struct netlink_ext_ack *ext_ack) +{ + struct sss_hwdev *hwdev = SSS_LINK_HWDEV(link); + + if (param_val.vu8 < SSS_FW_CFG_MIN_INDEX || + param_val.vu8 > SSS_FW_CFG_MAX_INDEX) { + sdk_err(hwdev->dev_hdl, "Firmware cfg id out of range [1,8]\n"); + NL_SET_ERR_MSG_MOD(ext_ack, "Firmware cfg id out of range [1,8]\n"); + return -ERANGE; + } + + return 0; +} + +static const struct devlink_param g_devlink_param[] = { + DEVLINK_PARAM_DRIVER(SSS_DEVLINK_PARAM_ID_ACTIVATE_FW, + "activate_fw", DEVLINK_PARAM_TYPE_U8, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + sss_devlink_get_activate_fw_config, + sss_devlink_set_activate_fw_config, + sss_devlink_validate_firmware_config), + DEVLINK_PARAM_DRIVER(SSS_DEVLINK_PARAM_ID_SWITCH_CFG, + "switch_cfg", DEVLINK_PARAM_TYPE_U8, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + sss_devlink_get_switch_config, + sss_devlink_set_switch_config, + sss_devlink_validate_firmware_config), +}; + +int sss_init_devlink(struct sss_hwdev *hwdev) +{ + int ret; + struct devlink *link = NULL; + struct pci_dev *pdev = hwdev->pcidev_hdl; + + link = devlink_alloc(&g_devlink_ops, sizeof(struct sss_devlink)); + if (!link) { + sdk_err(hwdev->dev_hdl, "Fail to alloc devlink\n"); + return -ENOMEM; + } + + hwdev->devlink_dev = devlink_priv(link); + hwdev->devlink_dev->hwdev = hwdev; + hwdev->devlink_dev->switch_cfg_id = SSS_FW_CFG_DEFAULT_INDEX; + hwdev->devlink_dev->active_cfg_id = SSS_FW_CFG_DEFAULT_INDEX; + +#ifdef REGISTER_DEVLINK_PARAMETER_PREFERRED + ret = devlink_params_register(devlink, g_devlink_param, + ARRAY_SIZE(g_devlink_param)); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to register devlink param\n"); + goto register_err; + } +#endif + + ret = devlink_register(link, &pdev->dev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to register devlink\n"); +#ifdef REGISTER_DEVLINK_PARAMETER_PREFERRED + devlink_params_unregister(devlink, g_devlink_param, + ARRAY_SIZE(g_devlink_param)); +#endif + goto register_err; + } + +#ifndef REGISTER_DEVLINK_PARAMETER_PREFERRED + ret = devlink_params_register(link, g_devlink_param, + ARRAY_SIZE(g_devlink_param)); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to register devlink param\n"); + goto register_param_err; + } +#endif + devlink_params_publish(link); + + return 0; + +#ifndef REGISTER_DEVLINK_PARAMETER_PREFERRED +register_param_err: + devlink_unregister(link); +#endif + +register_err: + devlink_free(link); + + return -EFAULT; +} + +void sss_deinit_devlink(struct sss_hwdev *hwdev) +{ + struct devlink *link = priv_to_devlink(hwdev->devlink_dev); + + devlink_params_unpublish(link); + devlink_params_unregister(link, g_devlink_param, + ARRAY_SIZE(g_devlink_param)); + devlink_unregister(link); + devlink_free(link); +} +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_link.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_link.h new file mode 100644 index 0000000000000000000000000000000000000000..32714685d1612d6ccbd025ed289bd04f9e3bee9b --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_link.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWDEV_LINK_H +#define SSS_HWDEV_LINK_H + +#include "sss_kernel.h" +#include "sss_hwdev.h" +#include "sss_hw_mbx_msg.h" + +int sss_init_devlink(struct sss_hwdev *hwdev); +void sss_deinit_devlink(struct sss_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_channel.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_channel.c new file mode 100644 index 0000000000000000000000000000000000000000..42f0c1fa15abb01ec8678b3daac7cd01b7676b80 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_channel.c @@ -0,0 +1,770 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_hwdev_api.h" +#include "sss_hwdev_mgmt_channel.h" +#include "sss_hwif_mbx.h" +#include "sss_hwif_mbx_init.h" +#include "sss_hwif_aeq.h" +#include "sss_hwif_export.h" +#include "sss_hwif_api.h" +#include "sss_hwif_adm_init.h" +#include "sss_hwif_mgmt_init.h" +#include "sss_hwif_ctrlq_init.h" +#include "sss_csr.h" + +#define SSS_DRV_FEATURE_DEF \ + (SSS_COMM_F_ADM | SSS_COMM_F_CLP | SSS_COMM_F_MBX_SEGMENT | \ + SSS_COMM_F_CTRLQ_NUM | SSS_COMM_F_VIRTIO_VQ_SIZE) + +#define SSS_COMM_SUPPORT_CLP(hwdev) \ + ((hwdev)->features[0] & SSS_COMM_F_CLP) + +#define SSS_DMA_ATTR_INDIR_ID_SHIFT 0 +#define SSS_DMA_ATTR_INDIR_ID_MASK 0x3FF + +#define SSS_SET_DMA_ATTR_INDIR_ID(val, member) \ + (((u32)(val) & SSS_DMA_ATTR_INDIR_##member##_MASK) << \ + SSS_DMA_ATTR_INDIR_##member##_SHIFT) + +#define SSS_CLEAR_DMA_ATTR_INDIR_ID(val, member) \ + ((val) & (~(SSS_DMA_ATTR_INDIR_##member##_MASK \ + << SSS_DMA_ATTR_INDIR_##member##_SHIFT))) + +#define SSS_DMA_ATTR_ENTRY_ST_SHIFT 0 +#define SSS_DMA_ATTR_ENTRY_AT_SHIFT 8 +#define SSS_DMA_ATTR_ENTRY_PH_SHIFT 10 +#define SSS_DMA_ATTR_ENTRY_NO_SNOOPING_SHIFT 12 +#define SSS_DMA_ATTR_ENTRY_TPH_EN_SHIFT 13 + +#define SSS_DMA_ATTR_ENTRY_ST_MASK 0xFF +#define SSS_DMA_ATTR_ENTRY_AT_MASK 0x3 +#define SSS_DMA_ATTR_ENTRY_PH_MASK 0x3 +#define SSS_DMA_ATTR_ENTRY_NO_SNOOPING_MASK 0x1 +#define SSS_DMA_ATTR_ENTRY_TPH_EN_MASK 0x1 + +#define SSS_SET_DMA_ATTR_ENTRY(val, member) \ + (((u32)(val) & SSS_DMA_ATTR_ENTRY_##member##_MASK) << \ + SSS_DMA_ATTR_ENTRY_##member##_SHIFT) + +#define SSS_PCIE_ST_DISABLE 0 +#define SSS_PCIE_AT_DISABLE 0 +#define SSS_PCIE_PH_DISABLE 0 + +#define SSS_PCIE_MSIX_ATTR_ENTRY 0 + +#define SSS_PCIE_SNOOP 0 +#define SSS_PCIE_NO_SNOOP 1 + +#define SSS_PCIE_TPH_DISABLE 0 +#define SSS_PCIE_TPH_ENABLE 1 + +#define SSS_FAULT_LEVEL_STR_FATAL "fatal" +#define SSS_FAULT_LEVEL_STR_RESET "reset" +#define SSS_FAULT_LEVEL_STR_HOST "host" +#define SSS_FAULT_LEVEL_STR_FLR "flr" +#define SSS_FAULT_LEVEL_STR_GENERAL "general" +#define SSS_FAULT_LEVEL_STR_SUGGESTION "suggestion" +#define SSS_FAULT_LEVEL_STR_UNKNOWN "Unknown" + +#define SSS_FAULT_TYPE_STR_CHIP "chip" +#define SSS_FAULT_TYPE_STR_NPU "ucode" +#define SSS_FAULT_TYPE_STR_MEM_RD "mem rd timeout" +#define SSS_FAULT_TYPE_STR_MEM_WR "mem wr timeout" +#define SSS_FAULT_TYPE_STR_REG_RD "reg rd timeout" +#define SSS_FAULT_TYPE_STR_REG_WR "reg wr timeout" +#define SSS_FAULT_TYPE_STR_PHY "phy fault" +#define SSS_FAULT_TYPE_STR_TSENSOR "tsensor fault" +#define SSS_FAULT_TYPE_STR_UNKNOWN "Unknown" + +#define SSS_COMM_RESET_TYPE \ + ((1 << SSS_RESET_TYPE_COMM) | (1 << SSS_RESET_TYPE_COMM_CMD_CH) | \ + (1 << SSS_RESET_TYPE_FLUSH_BIT) | (1 << SSS_RESET_TYPE_MQM) | \ + (1 << SSS_RESET_TYPE_SMF) | (1 << SSS_RESET_TYPE_PF_BW_CFG)) + +#define SSS_FOUR_REG_LEN 16 + +#define SSS_X_CSR_INDEX 30 +#define SSS_DUMP_16B_PER_LINE 16 +#define SSS_DUMP_4_VAR_PER_LINE 4 + +typedef void (*sss_print_err_handler_t)(struct sss_hwdev *hwdev, + struct sss_fault_event *fault_event); + +typedef void (*sss_mgmt_event_handler_t)(void *data, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size); + +struct sss_mgmt_event { + u16 event_type; + sss_mgmt_event_handler_t handler; +}; + +static void sss_fault_event_handler(void *data, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size); + +static void sss_show_watchdog_mgmt_register_info(struct sss_hwdev *hwdev, + struct sss_watchdog_info *watchdog_info) +{ + u32 i; + u64 *reg = NULL; + + sdk_err(hwdev->dev_hdl, "Mgmt deadloop time: 0x%x 0x%x, task id: 0x%x, sp: 0x%llx\n", + watchdog_info->cur_time_h, watchdog_info->cur_time_l, + watchdog_info->task_id, watchdog_info->sp); + + sdk_err(hwdev->dev_hdl, + "Stack current used: 0x%x, peak used: 0x%x, overflow flag: 0x%x, top: 0x%llx, bottom: 0x%llx\n", + watchdog_info->cur_used, watchdog_info->peak_used, + watchdog_info->is_overflow, watchdog_info->stack_top, watchdog_info->stack_bottom); + + sdk_err(hwdev->dev_hdl, "Mgmt pc: 0x%llx, elr: 0x%llx, spsr: 0x%llx, far: 0x%llx, esr: 0x%llx, xzr: 0x%llx\n", + watchdog_info->pc, watchdog_info->elr, watchdog_info->spsr, watchdog_info->far, + watchdog_info->esr, watchdog_info->xzr); + + sdk_err(hwdev->dev_hdl, "Mgmt register info\n"); + + reg = &watchdog_info->x30; + for (i = 0; i <= SSS_X_CSR_INDEX; i++) + sdk_err(hwdev->dev_hdl, "x%02u:0x%llx\n", + SSS_X_CSR_INDEX - i, reg[i]); +} + +static void sss_show_watchdog_stack_info(struct sss_hwdev *hwdev, + struct sss_watchdog_info *watchdog_info) +{ + u32 i; + u32 j; + u32 tmp; + u32 stack_len; + u32 *dump_addr = NULL; + + if (watchdog_info->stack_actlen <= SSS_STACK_DATA_LEN) { + stack_len = watchdog_info->stack_actlen; + } else { + sdk_err(hwdev->dev_hdl, "Oops stack length: 0x%x is wrong\n", + watchdog_info->stack_actlen); + stack_len = SSS_STACK_DATA_LEN; + } + + sdk_err(hwdev->dev_hdl, "Mgmt dump stack, 16 bytes per line(start from sp)\n"); + for (i = 0; i < (stack_len / SSS_DUMP_16B_PER_LINE); i++) { + dump_addr = (u32 *)(watchdog_info->stack_data + (u32)(i * SSS_DUMP_16B_PER_LINE)); + sdk_err(hwdev->dev_hdl, "0x%08x 0x%08x 0x%08x 0x%08x\n", + *dump_addr, *(dump_addr + 0x1), *(dump_addr + 0x2), *(dump_addr + 0x3)); + } + + tmp = (stack_len % SSS_DUMP_16B_PER_LINE) / SSS_DUMP_4_VAR_PER_LINE; + for (j = 0; j < tmp; j++) { + dump_addr = (u32 *)(watchdog_info->stack_data + + (u32)(i * SSS_DUMP_16B_PER_LINE + j * SSS_DUMP_4_VAR_PER_LINE)); + sdk_err(hwdev->dev_hdl, "0x%08x ", *dump_addr); + } +} + +static void sss_show_watchdog_timeout_info(struct sss_hwdev *hwdev, + void *buf_in, u16 in_size, void *buf_out, u16 *out_size) +{ + struct sss_watchdog_info *watchdog_info = buf_in; + + if (in_size != sizeof(*watchdog_info)) { + sdk_err(hwdev->dev_hdl, "Invalid mgmt watchdog report, length: %d, should be %ld\n", + in_size, sizeof(*watchdog_info)); + return; + } + + sss_show_watchdog_mgmt_register_info(hwdev, watchdog_info); + sss_show_watchdog_stack_info(hwdev, watchdog_info); + + *out_size = sizeof(*watchdog_info); + watchdog_info = buf_out; + watchdog_info->head.state = 0; +} + +static void sss_watchdog_timeout_event_handler(void *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct sss_event_info event_info = {0}; + struct sss_hwdev *dev = hwdev; + + sss_show_watchdog_timeout_info(dev, buf_in, in_size, buf_out, out_size); + + if (dev->event_handler) { + event_info.type = SSS_EVENT_MGMT_WATCHDOG; + dev->event_handler(dev->event_handler_data, &event_info); + } +} + +static void sss_show_exc_info(struct sss_hwdev *hwdev, struct sss_exc_info *exc_info) +{ + u32 i; + + /* key information */ + sdk_err(hwdev->dev_hdl, "==================== Exception Info Begin ====================\n"); + sdk_err(hwdev->dev_hdl, "Exception CpuTick : 0x%08x 0x%08x\n", + exc_info->cpu_tick.tick_cnt_h, exc_info->cpu_tick.tick_cnt_l); + sdk_err(hwdev->dev_hdl, "Exception Cause : %u\n", exc_info->exc_cause); + sdk_err(hwdev->dev_hdl, "Os Version : %s\n", exc_info->os_ver); + sdk_err(hwdev->dev_hdl, "App Version : %s\n", exc_info->app_ver); + sdk_err(hwdev->dev_hdl, "CPU Type : 0x%08x\n", exc_info->cpu_type); + sdk_err(hwdev->dev_hdl, "CPU ID : 0x%08x\n", exc_info->cpu_id); + sdk_err(hwdev->dev_hdl, "Thread Type : 0x%08x\n", exc_info->thread_type); + sdk_err(hwdev->dev_hdl, "Thread ID : 0x%08x\n", exc_info->thread_id); + sdk_err(hwdev->dev_hdl, "Byte Order : 0x%08x\n", exc_info->byte_order); + sdk_err(hwdev->dev_hdl, "Nest Count : 0x%08x\n", exc_info->nest_cnt); + sdk_err(hwdev->dev_hdl, "Fatal Error Num : 0x%08x\n", exc_info->fatal_errno); + sdk_err(hwdev->dev_hdl, "Current SP : 0x%016llx\n", exc_info->uw_sp); + sdk_err(hwdev->dev_hdl, "Stack Bottom : 0x%016llx\n", exc_info->stack_bottom); + + /* register field */ + sdk_err(hwdev->dev_hdl, "Register contents when exception occur.\n"); + sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx \t %-14s: 0x%016llx\n", "TTBR0", + exc_info->reg_info.ttbr0, "TTBR1", exc_info->reg_info.ttbr1); + sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx \t %-14s: 0x%016llx\n", "TCR", + exc_info->reg_info.tcr, "MAIR", exc_info->reg_info.mair); + sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx \t %-14s: 0x%016llx\n", "SCTLR", + exc_info->reg_info.sctlr, "VBAR", exc_info->reg_info.vbar); + sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx \t %-14s: 0x%016llx\n", "CURRENTE1", + exc_info->reg_info.current_el, "SP", exc_info->reg_info.sp); + sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx \t %-14s: 0x%016llx\n", "ELR", + exc_info->reg_info.elr, "SPSR", exc_info->reg_info.spsr); + sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx \t %-14s: 0x%016llx\n", "FAR", + exc_info->reg_info.far_r, "ESR", exc_info->reg_info.esr); + sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx\n", "XZR", exc_info->reg_info.xzr); + + for (i = 0; i < SSS_XREGS_NUM - 1; i += 0x2) + sdk_err(hwdev->dev_hdl, "XREGS[%02u]%-5s: 0x%016llx \t XREGS[%02u]%-5s: 0x%016llx", + i, " ", exc_info->reg_info.xregs[i], + (u32)(i + 0x1U), " ", exc_info->reg_info.xregs[(u32)(i + 0x1U)]); + + sdk_err(hwdev->dev_hdl, "XREGS[%02u]%-5s: 0x%016llx \t ", SSS_XREGS_NUM - 1, " ", + exc_info->reg_info.xregs[SSS_XREGS_NUM - 1]); +} + +static void sss_lastword_report_event_handler(void *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct sss_lastword_info *lastword_info = buf_in; + struct sss_exc_info *exc_info = &lastword_info->stack_info; + u32 stack_len = lastword_info->stack_actlen; + struct sss_hwdev *dev = hwdev; + u32 *curr_reg = NULL; + u32 reg_i; + u32 cnt; + + if (in_size != sizeof(*lastword_info)) { + sdk_err(dev->dev_hdl, "Invalid mgmt lastword, length: %u, should be %ld\n", + in_size, sizeof(*lastword_info)); + return; + } + + sss_show_exc_info(dev, exc_info); + + /* call stack dump */ + sdk_err(dev->dev_hdl, "Dump stack when exceptioin occurs, 16Bytes per line.\n"); + + cnt = stack_len / SSS_FOUR_REG_LEN; + for (reg_i = 0; reg_i < cnt; reg_i++) { + curr_reg = (u32 *)(lastword_info->stack_data + + ((u64)(u32)(reg_i * SSS_FOUR_REG_LEN))); + sdk_err(dev->dev_hdl, "0x%08x 0x%08x 0x%08x 0x%08x\n", + *curr_reg, *(curr_reg + 0x1), *(curr_reg + 0x2), *(curr_reg + 0x3)); + } + + sdk_err(dev->dev_hdl, "==================== Exception Info End ====================\n"); +} + +const struct sss_mgmt_event g_mgmt_event_handler[] = { + { + .event_type = SSS_COMM_MGMT_CMD_FAULT_REPORT, + .handler = sss_fault_event_handler, + }, + + { + .event_type = SSS_COMM_MGMT_CMD_WATCHDOG_INFO, + .handler = sss_watchdog_timeout_event_handler, + }, + + { + .event_type = SSS_COMM_MGMT_CMD_LASTWORD_GET, + .handler = sss_lastword_report_event_handler, + }, +}; + +static void sss_print_chip_fault(struct sss_hwdev *hwdev, + struct sss_fault_event *fault_event) +{ + u8 err_level; + char *level_str = NULL; + char *fault_level[SSS_FAULT_LEVEL_MAX] = { + SSS_FAULT_LEVEL_STR_FATAL, SSS_FAULT_LEVEL_STR_RESET, + SSS_FAULT_LEVEL_STR_HOST, SSS_FAULT_LEVEL_STR_FLR, + SSS_FAULT_LEVEL_STR_GENERAL, SSS_FAULT_LEVEL_STR_SUGGESTION + }; + + err_level = fault_event->info.chip.err_level; + if (err_level < SSS_FAULT_LEVEL_MAX) + level_str = fault_level[err_level]; + else + level_str = SSS_FAULT_LEVEL_STR_UNKNOWN; + + if (err_level == SSS_FAULT_LEVEL_SERIOUS_FLR) + sdk_err(hwdev->dev_hdl, "Err_level: %u [%s], func_id: %u\n", + err_level, level_str, fault_event->info.chip.func_id); + + sdk_err(hwdev->dev_hdl, "Node_id: 0x%x, err_type: 0x%x, err_level: %u[%s], err_csr_addr: 0x%08x, err_csr_value: 0x%08x\n", + fault_event->info.chip.node_id, fault_event->info.chip.err_type, + err_level, level_str, + fault_event->info.chip.err_csr_addr, fault_event->info.chip.err_csr_value); +} + +static void sss_print_ucode_err(struct sss_hwdev *hwdev, + struct sss_fault_event *fault_event) +{ + sdk_err(hwdev->dev_hdl, "Cause_id: %u, core_id: %u, c_id: %u, epc: 0x%08x\n", + fault_event->info.ucode.cause_id, fault_event->info.ucode.core_id, + fault_event->info.ucode.c_id, fault_event->info.ucode.epc); +} + +static void sss_print_mem_rw_err(struct sss_hwdev *hwdev, + struct sss_fault_event *fault_event) +{ + sdk_err(hwdev->dev_hdl, "Err_csr_ctrl: 0x%08x, err_csr_data: 0x%08x, ctrl_tab: 0x%08x, mem_id: 0x%08x\n", + fault_event->info.mem_timeout.err_csr_ctrl, + fault_event->info.mem_timeout.err_csr_data, + fault_event->info.mem_timeout.ctrl_tab, fault_event->info.mem_timeout.mem_id); +} + +static void sss_print_reg_rw_err(struct sss_hwdev *hwdev, + struct sss_fault_event *fault_event) +{ + sdk_err(hwdev->dev_hdl, "Err_csr: 0x%08x\n", fault_event->info.reg_timeout.err_csr); +} + +static void sss_print_phy_err(struct sss_hwdev *hwdev, + struct sss_fault_event *fault_event) +{ + sdk_err(hwdev->dev_hdl, "Op_type: %u, port_id: %u, dev_ad: %u, csr_addr: 0x%08x, op_data: 0x%08x\n", + fault_event->info.phy_fault.op_type, fault_event->info.phy_fault.port_id, + fault_event->info.phy_fault.dev_ad, fault_event->info.phy_fault.csr_addr, + fault_event->info.phy_fault.op_data); +} + +static void sss_print_fault_info(struct sss_hwdev *hwdev, + struct sss_fault_event *fault_event) +{ + struct sss_fault_event_stats *event_stats = &hwdev->hw_stats.fault_event_stats; + char *type = NULL; + char *fault_type[SSS_FAULT_TYPE_MAX] = { + SSS_FAULT_TYPE_STR_CHIP, SSS_FAULT_TYPE_STR_NPU, + SSS_FAULT_TYPE_STR_MEM_RD, SSS_FAULT_TYPE_STR_MEM_WR, + SSS_FAULT_TYPE_STR_REG_RD, SSS_FAULT_TYPE_STR_REG_WR, + SSS_FAULT_TYPE_STR_PHY, SSS_FAULT_TYPE_STR_TSENSOR + }; + sss_print_err_handler_t print_handler[] = { + sss_print_chip_fault, sss_print_ucode_err, + sss_print_mem_rw_err, sss_print_mem_rw_err, + sss_print_reg_rw_err, sss_print_reg_rw_err, + sss_print_phy_err + }; + + if (fault_event->type < SSS_FAULT_TYPE_MAX) { + type = fault_type[fault_event->type]; + atomic_inc(&event_stats->fault_type_stat[fault_event->type]); + } else { + type = SSS_FAULT_TYPE_STR_UNKNOWN; + } + + sdk_err(hwdev->dev_hdl, "Fault event report received, func_id: %u\n", + sss_get_global_func_id(hwdev)); + sdk_err(hwdev->dev_hdl, "Fault type: %u [%s]\n", fault_event->type, type); + sdk_err(hwdev->dev_hdl, "Fault val[0]: 0x%08x, val[1]: 0x%08x, val[2]: 0x%08x, val[3]: 0x%08x\n", + fault_event->info.val[0x0], fault_event->info.val[0x1], + fault_event->info.val[0x2], fault_event->info.val[0x3]); + + sss_dump_chip_err_info(hwdev); + + if (fault_event->type >= ARRAY_LEN(print_handler)) + return; + + print_handler[fault_event->type](hwdev, fault_event); +} + +static void sss_fault_event_handler(void *data, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + struct sss_hwdev *hwdev = data; + struct sss_cmd_fault_event *cmd_event = in_buf; + struct sss_event_info info; + struct sss_fault_event *fault_event = (void *)info.event_data; + + if (in_size != sizeof(*cmd_event)) { + sdk_err(hwdev->dev_hdl, "Invalid size: %u.\n", in_size); + return; + } + + sss_print_fault_info(hwdev, &cmd_event->fault_event); + + if (hwdev->event_handler) { + info.type = SSS_EVENT_FAULT; + info.service = SSS_EVENT_SRV_COMM; + memcpy(info.event_data, &cmd_event->fault_event, sizeof(cmd_event->fault_event)); + fault_event->fault_level = (cmd_event->fault_event.type == SSS_FAULT_TYPE_CHIP) ? + cmd_event->fault_event.info.chip.err_level : + SSS_FAULT_LEVEL_FATAL; + hwdev->event_handler(hwdev->event_handler_data, &info); + } +} + +static void sss_pf_handle_mgmt_event(void *data, u16 event_type, + void *in_buf, u16 in_size, void *out_buf, u16 *out_size) +{ + u32 i; + u32 num = ARRAY_LEN(g_mgmt_event_handler); + + for (i = 0; i < num; i++) { + if (event_type == g_mgmt_event_handler[i].event_type && + g_mgmt_event_handler[i].handler) { + g_mgmt_event_handler[i].handler(data, in_buf, in_size, + out_buf, out_size); + return; + } + } + + *out_size = sizeof(struct sss_mgmt_msg_head); + ((struct sss_mgmt_msg_head *)out_buf)->state = SSS_MGMT_CMD_UNSUPPORTED; + sdk_warn(SSS_TO_DEV(data), "Unsupported mgmt event %u.\n", event_type); +} + +static int sss_hwdev_init_mbx(struct sss_hwdev *hwdev) +{ + int ret; + + ret = sss_hwif_init_mbx(hwdev); + if (ret != 0) + return ret; + + sss_aeq_register_hw_cb(hwdev, hwdev, SSS_MBX_FROM_FUNC, sss_recv_mbx_aeq_handler); + sss_aeq_register_hw_cb(hwdev, hwdev, SSS_MSG_FROM_MGMT, sss_mgmt_msg_aeqe_handler); + + set_bit(SSS_HW_MBX_INIT_OK, &hwdev->func_state); + + return 0; +} + +static void sss_hwdev_deinit_mbx(struct sss_hwdev *hwdev) +{ + spin_lock_bh(&hwdev->channel_lock); + clear_bit(SSS_HW_MBX_INIT_OK, &hwdev->func_state); + spin_unlock_bh(&hwdev->channel_lock); + + sss_aeq_unregister_hw_cb(hwdev, SSS_MBX_FROM_FUNC); + + if (!SSS_IS_VF(hwdev)) { + sss_unregister_pf_mbx_handler(hwdev, SSS_MOD_TYPE_COMM); + } else { + sss_unregister_vf_mbx_handler(hwdev, SSS_MOD_TYPE_COMM); + + sss_aeq_unregister_hw_cb(hwdev, SSS_MSG_FROM_MGMT); + } + + sss_hwif_deinit_mbx(hwdev); +} + +static int sss_chip_get_global_attr(struct sss_hwdev *hwdev) +{ + int ret = 0; + struct sss_cmd_get_glb_attr attr_cmd = {0}; + u16 out_len = sizeof(attr_cmd); + + ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_GET_GLOBAL_ATTR, + &attr_cmd, sizeof(attr_cmd), &attr_cmd, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &attr_cmd)) { + sdk_err(((struct sss_hwdev *)hwdev)->dev_hdl, + "Fail to get global attr, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, attr_cmd.head.state, out_len); + return -EIO; + } + + memcpy(&hwdev->glb_attr, &attr_cmd.attr, sizeof(hwdev->glb_attr)); + + return 0; +} + +static int sss_chip_get_feature(struct sss_hwdev *hwdev) +{ + int i; + int ret; + u64 feature[SSS_MAX_FEATURE_QWORD] = {SSS_DRV_FEATURE_DEF, 0, 0, 0}; + + ret = sss_chip_do_nego_feature(hwdev, SSS_MGMT_MSG_GET_CMD, + hwdev->features, SSS_MAX_FEATURE_QWORD); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to get comm feature\n"); + return ret; + } + + if (sss_get_func_type(hwdev) == SSS_FUNC_TYPE_PPF) + feature[0] |= SSS_COMM_F_CHANNEL_DETECT; + + for (i = 0; i < SSS_MAX_FEATURE_QWORD; i++) + hwdev->features[i] &= feature[i]; + + return 0; +} + +static int sss_get_global_info(struct sss_hwdev *hwdev) +{ + int ret; + + ret = sss_chip_get_board_info(hwdev, &hwdev->board_info); + if (ret != 0) + return ret; + + ret = sss_chip_get_feature(hwdev); + if (ret != 0) + return ret; + + ret = sss_chip_get_global_attr(hwdev); + if (ret != 0) + return ret; + + return 0; +} + +static void sss_hwdev_deinit_adm(struct sss_hwdev *hwdev) +{ + if (sss_get_func_type(hwdev) == SSS_FUNC_TYPE_VF) + return; + + spin_lock_bh(&hwdev->channel_lock); + clear_bit(SSS_HW_ADM_INIT_OK, &hwdev->func_state); + spin_unlock_bh(&hwdev->channel_lock); + + sss_unregister_mgmt_msg_handler(hwdev, SSS_MOD_TYPE_COMM); + + sss_aeq_unregister_hw_cb(hwdev, SSS_MSG_FROM_MGMT); + + sss_hwif_deinit_adm(hwdev); +} + +static int sss_hwdev_init_adm(struct sss_hwdev *hwdev) +{ + int ret; + + if (sss_get_func_type(hwdev) == SSS_FUNC_TYPE_VF) + return 0; + + ret = sss_hwif_init_adm(hwdev); + if (ret != 0) + return ret; + + sss_register_mgmt_msg_handler(hwdev, SSS_MOD_TYPE_COMM, hwdev, + sss_pf_handle_mgmt_event); + + set_bit(SSS_HW_ADM_INIT_OK, &hwdev->func_state); + + return 0; +} + +static int sss_chip_set_dma_attr_table(struct sss_hwdev *hwdev) +{ + int ret; + struct sss_cmd_dma_attr_config attr = {0}; + u16 out_len = sizeof(attr); + + attr.ph = SSS_PCIE_PH_DISABLE; + attr.at = SSS_PCIE_AT_DISABLE; + attr.st = SSS_PCIE_ST_DISABLE; + attr.no_snooping = SSS_PCIE_SNOOP; + attr.tph_en = SSS_PCIE_TPH_DISABLE; + attr.func_id = sss_get_global_func_id(hwdev); + attr.entry_id = SSS_PCIE_MSIX_ATTR_ENTRY; + + ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_SET_DMA_ATTR, &attr, sizeof(attr), + &attr, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &attr)) { + sdk_err(hwdev->dev_hdl, + "Fail to set dma attr, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, attr.head.state, out_len); + return -EIO; + } + + return 0; +} + +static int sss_chip_init_dma_attr(struct sss_hwdev *hwdev) +{ + u32 set; + u32 get; + u32 dst; + + set = sss_chip_read_reg(hwdev->hwif, SSS_CSR_DMA_ATTR_INDIR_ID_ADDR); + set = SSS_CLEAR_DMA_ATTR_INDIR_ID(set, ID); + set |= SSS_SET_DMA_ATTR_INDIR_ID(SSS_PCIE_MSIX_ATTR_ENTRY, ID); + + sss_chip_write_reg(hwdev->hwif, SSS_CSR_DMA_ATTR_INDIR_ID_ADDR, set); + + /* make sure reset dma attr */ + wmb(); + + dst = SSS_SET_DMA_ATTR_ENTRY(SSS_PCIE_TPH_DISABLE, TPH_EN) | + SSS_SET_DMA_ATTR_ENTRY(SSS_PCIE_SNOOP, NO_SNOOPING) | + SSS_SET_DMA_ATTR_ENTRY(SSS_PCIE_ST_DISABLE, ST) | + SSS_SET_DMA_ATTR_ENTRY(SSS_PCIE_AT_DISABLE, AT) | + SSS_SET_DMA_ATTR_ENTRY(SSS_PCIE_PH_DISABLE, PH); + get = sss_chip_read_reg(hwdev->hwif, SSS_CSR_DMA_ATTR_TBL_ADDR); + + if (get == dst) + return 0; + + return sss_chip_set_dma_attr_table(hwdev); +} + +static void sss_chip_set_pf_state(struct sss_hwdev *hwdev) +{ + sss_chip_set_pf_status(hwdev->hwif, SSS_PF_STATUS_ACTIVE_FLAG); +} + +static void sss_chip_reset_pf_state(struct sss_hwdev *hwdev) +{ + sss_chip_set_pf_status(hwdev->hwif, SSS_PF_STATUS_INIT); +} + +static int sss_init_basic_mgmt_channel(struct sss_hwdev *hwdev) +{ + int ret; + + ret = sss_hwif_init_aeq(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init comm aeqs\n"); + return ret; + } + + ret = sss_hwdev_init_mbx(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init mbx\n"); + goto init_mbx_err; + } + + ret = sss_init_aeq_msix_attr(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init aeqs msix attr\n"); + goto init_aeq_msix_attr_err; + } + + return 0; + +init_aeq_msix_attr_err: + sss_hwdev_deinit_mbx(hwdev); + +init_mbx_err: + sss_hwif_deinit_aeq(hwdev); + + return ret; +} + +static void sss_free_base_mgmt_channel(struct sss_hwdev *hwdev) +{ + sss_hwdev_deinit_mbx(hwdev); + sss_hwif_deinit_aeq(hwdev); +} + +int sss_init_mgmt_channel(struct sss_hwdev *hwdev) +{ + int ret; + + /* init aeq, mbx */ + ret = sss_init_basic_mgmt_channel(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init basic mgmt channel\n"); + return ret; + } + + ret = sss_chip_reset_function(hwdev, sss_get_global_func_id(hwdev), + SSS_COMM_RESET_TYPE, SSS_CHANNEL_COMM); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to reset func\n"); + goto out; + } + + ret = sss_get_global_info(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init hwdev attr\n"); + goto out; + } + + ret = sss_hwdev_init_adm(hwdev); + if (ret != 0) + goto out; + + ret = sss_chip_set_func_used_state(hwdev, SSS_SVC_TYPE_COM, + true, SSS_CHANNEL_COMM); + if (ret != 0) + goto set_use_state_err; + + ret = sss_chip_init_dma_attr(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init dma attr table\n"); + goto init_dma_attr_err; + } + + ret = sss_init_ctrlq_channel(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init ctrlq channel\n"); + goto init_ctrlq_channel_err; + } + + sss_chip_set_pf_state(hwdev); + + ret = sss_aeq_register_swe_cb(hwdev, hwdev, SSS_STL_EVENT, sss_sw_aeqe_handler); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, + "Fail to register sw aeqe handler\n"); + goto register_ucode_aeqe_err; + } + + return 0; + +register_ucode_aeqe_err: + sss_chip_reset_pf_state(hwdev); + sss_deinit_ctrlq_channel(hwdev); + +init_ctrlq_channel_err: +init_dma_attr_err: + sss_chip_set_func_used_state(hwdev, SSS_SVC_TYPE_COM, + false, SSS_CHANNEL_COMM); + +set_use_state_err: + sss_hwdev_deinit_adm(hwdev); + +out: + sss_free_base_mgmt_channel(hwdev); + + return ret; +} + +void sss_deinit_mgmt_channel(struct sss_hwdev *hwdev) +{ + sss_aeq_unregister_swe_cb(hwdev, SSS_STL_EVENT); + + sss_chip_reset_pf_state(hwdev); + + sss_deinit_ctrlq_channel(hwdev); + + sss_chip_set_func_used_state(hwdev, SSS_SVC_TYPE_COM, + false, SSS_CHANNEL_COMM); + + sss_hwdev_deinit_adm(hwdev); + + sss_free_base_mgmt_channel(hwdev); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_channel.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_channel.h new file mode 100644 index 0000000000000000000000000000000000000000..f8ab14532b73dc8aacec5126f16624728e6add91 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_channel.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWDEV_MGMT_CHANNEL_H +#define SSS_HWDEV_MGMT_CHANNEL_H + +#include "sss_hwdev.h" + +#define SSS_STACK_DATA_LEN 1024 +#define SSS_XREGS_NUM 31 +#define SSS_MPU_LASTWORD_SIZE 1024 + +struct sss_watchdog_info { + struct sss_mgmt_msg_head head; + + u32 cur_time_h; + u32 cur_time_l; + u32 task_id; + u32 rsvd; + + u64 pc; + u64 elr; + u64 spsr; + u64 far; + u64 esr; + u64 xzr; + u64 x30; + u64 x29; + u64 x28; + u64 x27; + u64 x26; + u64 x25; + u64 x24; + u64 x23; + u64 x22; + u64 x21; + u64 x20; + u64 x19; + u64 x18; + u64 x17; + u64 x16; + u64 x15; + u64 x14; + u64 x13; + u64 x12; + u64 x11; + u64 x10; + u64 x09; + u64 x08; + u64 x07; + u64 x06; + u64 x05; + u64 x04; + u64 x03; + u64 x02; + u64 x01; + u64 x00; + + u64 stack_top; + u64 stack_bottom; + u64 sp; + u32 cur_used; + u32 peak_used; + u32 is_overflow; + + u32 stack_actlen; + u8 stack_data[SSS_STACK_DATA_LEN]; +}; + +struct sss_cpu_tick { + u32 tick_cnt_h; /* The cycle count higher 32 bits */ + u32 tick_cnt_l; /* The cycle count lower 32 bits */ +}; + +struct sss_ax_exc_reg_info { + u64 ttbr0; + u64 ttbr1; + u64 tcr; + u64 mair; + u64 sctlr; + u64 vbar; + u64 current_el; + u64 sp; + u64 elr; + u64 spsr; + u64 far_r; + u64 esr; + u64 xzr; + u64 xregs[SSS_XREGS_NUM]; /* 0~30: x30~x0 */ +}; + +struct sss_exc_info { + char os_ver[48]; /* OS version */ + char app_ver[64]; /* Product version */ + u32 exc_cause; /* Cause of exception */ + u32 thread_type; /* The thread type before the exception */ + u32 thread_id; /* Thread PID before exception */ + u16 byte_order; /* Byte order */ + u16 cpu_type; /* CPU type */ + u32 cpu_id; /* CPU ID */ + struct sss_cpu_tick cpu_tick; /* CPU Tick */ + u32 nest_cnt; /* The exception nested count */ + u32 fatal_errno; /* Fatal error code */ + u64 uw_sp; /* The stack pointer before the exception */ + u64 stack_bottom; /* Bottom of the stack before the exception */ + + /* The in-core register context information,*/ + /* 82\57 must be at 152 bytes; if it has changed, */ + /* the OS_EXC_REGINFO_OFFSET macro in sre_platform.eh must be updated */ + struct sss_ax_exc_reg_info reg_info; +}; + +struct sss_lastword_info { + struct sss_mgmt_msg_head head; + struct sss_exc_info stack_info; + + /* Stack details, Actual stack size(<=1024) */ + u32 stack_actlen; + + /* More than 1024, it will be truncated */ + u8 stack_data[SSS_MPU_LASTWORD_SIZE]; +}; + +int sss_init_mgmt_channel(struct sss_hwdev *hwdev); +void sss_deinit_mgmt_channel(struct sss_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_info.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_info.c new file mode 100644 index 0000000000000000000000000000000000000000..9672cce1341b9c516e50d2ea1a9f11938b1037a0 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_info.c @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hwdev.h" +#include "sss_hw_svc_cap.h" +#include "sss_hwif_irq.h" + +static int sss_init_ceq_info(struct sss_hwdev *hwdev) +{ + u8 i; + struct sss_eq_info *ceq_info = &hwdev->mgmt_info->eq_info; + struct sss_eq_cfg *ceq = NULL; + + ceq_info->ceq_num = SSS_GET_HWIF_CEQ_NUM(hwdev->hwif); + ceq_info->remain_ceq_num = ceq_info->ceq_num; + mutex_init(&ceq_info->eq_mutex); + + sdk_info(hwdev->dev_hdl, "Mgmt ceq info: ceq_num = 0x%x, remain_ceq_num = 0x%x\n", + ceq_info->ceq_num, ceq_info->remain_ceq_num); + + if (ceq_info->ceq_num == 0) { + sdk_err(hwdev->dev_hdl, "Mgmt ceq info: ceq_num = 0\n"); + return -EFAULT; + } + + ceq = kcalloc(ceq_info->ceq_num, sizeof(*ceq), GFP_KERNEL); + if (!ceq) + return -ENOMEM; + + for (i = 0; i < ceq_info->ceq_num; i++) { + ceq[i].id = i + 1; + ceq[i].free = SSS_CFG_FREE; + ceq[i].type = SSS_SERVICE_TYPE_MAX; + } + ceq_info->eq = ceq; + + return 0; +} + +static void sss_deinit_ceq_info(struct sss_hwdev *hwdev) +{ + struct sss_eq_info *ceq_info = &hwdev->mgmt_info->eq_info; + + kfree(ceq_info->eq); +} + +int sss_init_mgmt_info(struct sss_hwdev *hwdev) +{ + int ret; + struct sss_mgmt_info *mgmt_info; + + mgmt_info = kzalloc(sizeof(*mgmt_info), GFP_KERNEL); + if (!mgmt_info) + return -ENOMEM; + + mgmt_info->hwdev = hwdev; + hwdev->mgmt_info = mgmt_info; + + ret = sss_init_ceq_info(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init ceq info, ret: %d\n", ret); + goto init_ceq_info_err; + } + + ret = sss_init_irq_info(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init irq info, ret: %d\n", ret); + goto init_irq_info_err; + } + + return 0; + +init_irq_info_err: + sss_deinit_ceq_info(hwdev); + +init_ceq_info_err: + kfree(mgmt_info); + hwdev->mgmt_info = NULL; + + return ret; +} + +void sss_deinit_mgmt_info(struct sss_hwdev *hwdev) +{ + sss_deinit_irq_info(hwdev); + sss_deinit_ceq_info(hwdev); + + kfree(hwdev->mgmt_info); + hwdev->mgmt_info = NULL; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_info.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_info.h new file mode 100644 index 0000000000000000000000000000000000000000..78beeba092afec758dbe1f16e4f244820454e004 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwdev_mgmt_info.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWDEV_MGMT_INFO_H +#define SSS_HWDEV_MGMT_INFO_H + +#include "sss_hwdev.h" + +int sss_init_mgmt_info(struct sss_hwdev *dev); +void sss_deinit_mgmt_info(struct sss_hwdev *dev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm.c new file mode 100644 index 0000000000000000000000000000000000000000..401186d3786a9a6477f4d909d41f3a0db532aa5e --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm.c @@ -0,0 +1,805 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_common.h" +#include "sss_hwdev.h" +#include "sss_csr.h" +#include "sss_hwif_api.h" +#include "sss_hwif_adm_common.h" +#include "sss_hwif_aeq.h" + +#define SSS_ADM_MSG_ELEM_DESC_SIZE 8 +#define SSS_ADM_MSG_ELEM_DATA_ADDR_SIZE 8 +#define SSS_ADM_MSG_ELEM_WB_ADDR_SIZE 8 + +#define SSS_ADM_MSG_ELEM_ALIGNMENT 8 + +#define SSS_ADM_MSG_STATE_TIMEOUT 10000 + +/* adm_msg_state header */ +#define SSS_ADM_MSG_STATE_HEAD_VALID_SHIFT 0 +#define SSS_ADM_MSG_STATE_HEAD_MSG_ID_SHIFT 16 + +#define SSS_ADM_MSG_STATE_HEAD_VALID_MASK 0xFFU +#define SSS_ADM_MSG_STATE_HEAD_MSG_ID_MASK 0xFFU + +#define COMPLETION_TIMEOUT_DEFAULT 1000UL +#define POLLING_COMPLETION_TIMEOUT_DEFAULT 1000U + +#define SSS_ADM_MSG_STATE_HEAD_GET(val, member) \ + (((val) >> SSS_ADM_MSG_STATE_HEAD_##member##_SHIFT) & \ + SSS_ADM_MSG_STATE_HEAD_##member##_MASK) + +enum sss_adm_msg_data_format { + SSS_SGL_TYPE = 1, +}; + +enum sss_adm_msg_opt { + SSS_ADM_MSG_WRITE = 0, + SSS_ADM_MSG_READ = 1, +}; + +enum sss_adm_msg_bypass { + SSS_NO_BYPASS = 0, + SSS_BYPASS = 1, +}; + +enum sss_adm_msg_reply_aeq { + SSS_NO_TRIGGER = 0, + SSS_TRIGGER = 1, +}; + +enum sss_adm_msg_chn_code { + SSS_ADM_MSG_CHANNEL_0 = 0, +}; + +enum sss_adm_msg_chn_rsvd { + SSS_VALID_MSG_CHANNEL = 0, + SSS_INVALID_MSG_CHANNEL = 1, +}; + +#define SSS_ADM_MSG_DESC_LEN 7 + +struct sss_msg_head { + u8 state; + u8 version; + u8 reply_aeq_num; + u8 rsvd0[5]; +}; + +#define SSS_MGMT_MSG_SIZE_MIN 20 +#define SSS_MGMT_MSG_SIZE_STEP 16 +#define SSS_MGMT_MSG_RSVD_FOR_DEV 8 + +#define SSS_MSG_TO_MGMT_LEN_MAX 2016 + +#define SSS_SYNC_MSG_ID_MASK 0x7 +#define SSS_SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id) +#define SSS_INCREASE_SYNC_MSG_ID(pf_to_mgmt) \ + ((pf_to_mgmt)->sync_msg_id = \ + ((pf_to_mgmt)->sync_msg_id + 1) & SSS_SYNC_MSG_ID_MASK) + +#define SSS_MGMT_MSG_TIMEOUT 20000 /* millisecond */ + +#define SSS_MSG_CB_USLEEP_MIN 900 +#define SSS_MSG_CB_USLEEP_MAX 1000 + +#define SSS_ENCAPSULATE_ADM_MSG_HEAD(func_id, msg_len, mod, cmd, msg_id) \ + (SSS_SET_MSG_HEADER(msg_len, MSG_LEN) | \ + SSS_SET_MSG_HEADER(mod, MODULE) | \ + SSS_SET_MSG_HEADER(msg_len, SEG_LEN) | \ + SSS_SET_MSG_HEADER(SSS_MSG_ACK, NO_ACK) | \ + SSS_SET_MSG_HEADER(SSS_INLINE_DATA, DATA_TYPE) | \ + SSS_SET_MSG_HEADER(0, SEQID) | \ + SSS_SET_MSG_HEADER(SSS_ADM_MSG_AEQ_ID, AEQ_ID) | \ + SSS_SET_MSG_HEADER(SSS_LAST_SEG, LAST) | \ + SSS_SET_MSG_HEADER(SSS_DIRECT_SEND_MSG, DIRECTION) | \ + SSS_SET_MSG_HEADER(cmd, CMD) | \ + SSS_SET_MSG_HEADER(SSS_MSG_SRC_MGMT, SOURCE) | \ + SSS_SET_MSG_HEADER(func_id, SRC_GLB_FUNC_ID) | \ + SSS_SET_MSG_HEADER(msg_id, MSG_ID)) + +#define SSSNIC_API_CMD_RESP_HEAD_VALID_SHIFT 0 +#define SSSNIC_API_CMD_RESP_HEAD_STATUS_SHIFT 8 +#define SSSNIC_API_CMD_RESP_HEAD_CHAIN_ID_SHIFT 16 +#define SSSNIC_API_CMD_RESP_HEAD_RESP_LEN_SHIFT 24 +#define SSSNIC_API_CMD_RESP_HEAD_DRIVER_PRIV_SHIFT 40 + +#define SSSNIC_API_CMD_RESP_HEAD_VALID_MASK 0xFF +#define SSSNIC_API_CMD_RESP_HEAD_STATUS_MASK 0xFFU +#define SSSNIC_API_CMD_RESP_HEAD_CHAIN_ID_MASK 0xFFU +#define SSSNIC_API_CMD_RESP_HEAD_RESP_LEN_MASK 0x1FFU +#define SSSNIC_API_CMD_RESP_HEAD_DRIVER_PRIV_MASK 0xFFFFFFU + +#define SSSNIC_API_CMD_RESP_HEAD_VALID_CODE 0xFF + +#define SSSNIC_API_CMD_RESP_HEADER_VALID(val) \ + (((val) & SSSNIC_API_CMD_RESP_HEAD_VALID_MASK) == \ + SSSNIC_API_CMD_RESP_HEAD_VALID_CODE) + +#define SSSNIC_API_CMD_RESP_HEAD_GET(val, member) \ + (((val) >> SSSNIC_API_CMD_RESP_HEAD_##member##_SHIFT) & \ + SSSNIC_API_CMD_RESP_HEAD_##member##_MASK) + +#define SSSNIC_API_CMD_RESP_HEAD_CHAIN_ID(val) \ + (((val) >> SSSNIC_API_CMD_RESP_HEAD_CHAIN_ID_SHIFT) & \ + SSSNIC_API_CMD_RESP_HEAD_CHAIN_ID_MASK) + +#define SSSNIC_API_CMD_RESP_HEAD_DRIVER_PRIV(val) \ + ((u16)(((val) >> SSSNIC_API_CMD_RESP_HEAD_DRIVER_PRIV_SHIFT) & \ + SSSNIC_API_CMD_RESP_HEAD_DRIVER_PRIV_MASK)) + +static u8 sss_xor_chksum_set(void *data) +{ + int id; + u8 checksum = 0; + u8 *val = data; + + for (id = 0; id < SSS_ADM_MSG_DESC_LEN; id++) + checksum ^= val[id]; + + return checksum; +} + +static void sss_chip_set_pi(struct sss_adm_msg *adm_msg) +{ + enum sss_adm_msg_type msg_type = adm_msg->msg_type; + struct sss_hwif *hwif = SSS_TO_HWDEV(adm_msg)->hwif; + u32 hw_pi_addr = SSS_CSR_ADM_MSG_PI_ADDR(msg_type); + + sss_chip_write_reg(hwif, hw_pi_addr, adm_msg->pi); +} + +static u32 sss_chip_get_ci(struct sss_adm_msg *adm_msg) +{ + u32 addr; + u32 val; + + addr = SSS_CSR_ADM_MSG_STATE_0_ADDR(adm_msg->msg_type); + val = sss_chip_read_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr); + + return SSS_GET_ADM_MSG_STATE(val, CI); +} + +static void sss_dump_adm_msg_reg(struct sss_adm_msg *adm_msg) +{ + void *dev = SSS_TO_HWDEV(adm_msg)->dev_hdl; + u32 addr; + u32 val; + u16 pci_cmd = 0; + + addr = SSS_CSR_ADM_MSG_STATE_0_ADDR(adm_msg->msg_type); + val = sss_chip_read_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr); + + sdk_err(dev, "Msg type: 0x%x, cpld error: 0x%x, check error: 0x%x, current fsm: 0x%x\n", + adm_msg->msg_type, SSS_GET_ADM_MSG_STATE(val, CPLD_ERR), + SSS_GET_ADM_MSG_STATE(val, CHKSUM_ERR), + SSS_GET_ADM_MSG_STATE(val, FSM)); + + sdk_err(dev, "Adm msg hw current ci: 0x%x\n", + SSS_GET_ADM_MSG_STATE(val, CI)); + + addr = SSS_CSR_ADM_MSG_PI_ADDR(adm_msg->msg_type); + val = sss_chip_read_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr); + sdk_err(dev, "Adm msg hw current pi: 0x%x\n", val); + pci_read_config_word(SSS_TO_HWDEV(adm_msg)->pcidev_hdl, PCI_COMMAND, &pci_cmd); + sdk_err(dev, "PCI command reg: 0x%x\n", pci_cmd); +} + +static int sss_adm_msg_busy(struct sss_adm_msg *adm_msg) +{ + void *dev = SSS_TO_HWDEV(adm_msg)->dev_hdl; + struct sss_adm_msg_elem_ctx *ctx = &adm_msg->elem_ctx[adm_msg->pi]; + u64 resp_header; + + switch (adm_msg->msg_type) { + case SSS_ADM_MSG_MULTI_READ: + case SSS_ADM_MSG_POLL_READ: + resp_header = be64_to_cpu(ctx->reply_fmt->head); + if (ctx->state && !SSSNIC_API_CMD_RESP_HEADER_VALID(resp_header)) { + sdk_err(dev, "Context(0x%x) busy!, pi: %u, resp_header: 0x%08x%08x\n", + ctx->state, adm_msg->pi, + upper_32_bits(resp_header), + lower_32_bits(resp_header)); + sss_dump_adm_msg_reg(adm_msg); + return -EBUSY; + } + break; + case SSS_ADM_MSG_POLL_WRITE: + case SSS_ADM_MSG_WRITE_TO_MGMT_MODULE: + case SSS_ADM_MSG_WRITE_ASYNC_TO_MGMT_MODULE: + adm_msg->ci = sss_chip_get_ci(adm_msg); + + if (adm_msg->ci == SSS_MASK_ID(adm_msg, adm_msg->pi + 1)) { + sdk_err(dev, "API CMD chain %d is busy, cons_idx = %u, prod_idx = %u\n", + adm_msg->msg_type, adm_msg->ci, + adm_msg->pi); + sss_dump_adm_msg_reg(adm_msg); + return -EBUSY; + } + break; + default: + sdk_err(dev, "Unknown Chain type %d\n", adm_msg->msg_type); + return -EINVAL; + } + + return 0; +} + +static void sss_prepare_elem_ctrl(u64 *elem_ctrl, enum sss_adm_msg_type msg_type) +{ + u64 control; + u8 chksum; + u16 elem_len = 0; + + switch (msg_type) { + case SSS_ADM_MSG_POLL_READ: + elem_len = ALIGN(SSS_ADM_MSG_ELEM_DESC_SIZE + SSS_ADM_MSG_ELEM_WB_ADDR_SIZE + + SSS_ADM_MSG_ELEM_DATA_ADDR_SIZE, SSS_ADM_MSG_ELEM_ALIGNMENT); + break; + + case SSS_ADM_MSG_WRITE_TO_MGMT_MODULE: + case SSS_ADM_MSG_POLL_WRITE: + case SSS_ADM_MSG_WRITE_ASYNC_TO_MGMT_MODULE: + elem_len = ALIGN(SSS_ADM_MSG_ELEM_DESC_SIZE + + SSS_ADM_MSG_ELEM_DATA_ADDR_SIZE, SSS_ADM_MSG_ELEM_ALIGNMENT); + break; + default: + break; + } + + control = SSS_ADM_MSG_ELEM_CTRL_SET(SSS_SIZE_TO_8B(elem_len), ELEM_LEN) | + SSS_ADM_MSG_ELEM_CTRL_SET(0ULL, RD_DMA_ATTR_OFF) | + SSS_ADM_MSG_ELEM_CTRL_SET(0ULL, WR_DMA_ATTR_OFF); + + chksum = sss_xor_chksum_set(&control); + + control |= SSS_ADM_MSG_ELEM_CTRL_SET(chksum, XOR_CHKSUM); + + /* The data in the HW should be in Big Endian Format */ + *elem_ctrl = cpu_to_be64(control); +} + +static void sss_prepare_elem_desc(struct sss_adm_msg *adm_msg, + u8 node_id, u16 cmd_size) +{ + u32 priv; + struct sss_adm_msg_elem *elem = adm_msg->now_node; + struct sss_adm_msg_elem_ctx *ctx = &adm_msg->elem_ctx[adm_msg->pi]; + + switch (adm_msg->msg_type) { + case SSS_ADM_MSG_POLL_READ: + priv = SSS_READ_ADM_MSG_PRIV_DATA(adm_msg->msg_type, ctx->store_pi); + elem->desc = SSS_ADM_MSG_DESC_SET(SSS_SGL_TYPE, SGL_TYPE) | + SSS_ADM_MSG_DESC_SET(SSS_ADM_MSG_READ, RD_WR) | + SSS_ADM_MSG_DESC_SET(SSS_BYPASS, MGMT_BYPASS) | + SSS_ADM_MSG_DESC_SET(SSS_NO_TRIGGER, REPLY_AEQE_EN) | + SSS_ADM_MSG_DESC_SET(priv, PRIV_DATA); + break; + case SSS_ADM_MSG_POLL_WRITE: + priv = SSS_WRITE_ADM_MSG_PRIV_DATA(adm_msg->msg_type); + elem->desc = SSS_ADM_MSG_DESC_SET(SSS_SGL_TYPE, SGL_TYPE) | + SSS_ADM_MSG_DESC_SET(SSS_ADM_MSG_WRITE, RD_WR) | + SSS_ADM_MSG_DESC_SET(SSS_BYPASS, MGMT_BYPASS) | + SSS_ADM_MSG_DESC_SET(SSS_NO_TRIGGER, REPLY_AEQE_EN) | + SSS_ADM_MSG_DESC_SET(priv, PRIV_DATA); + break; + case SSS_ADM_MSG_WRITE_ASYNC_TO_MGMT_MODULE: + case SSS_ADM_MSG_WRITE_TO_MGMT_MODULE: + priv = SSS_WRITE_ADM_MSG_PRIV_DATA(adm_msg->msg_type); + elem->desc = SSS_ADM_MSG_DESC_SET(SSS_SGL_TYPE, SGL_TYPE) | + SSS_ADM_MSG_DESC_SET(SSS_ADM_MSG_WRITE, RD_WR) | + SSS_ADM_MSG_DESC_SET(SSS_NO_BYPASS, MGMT_BYPASS) | + SSS_ADM_MSG_DESC_SET(SSS_TRIGGER, REPLY_AEQE_EN) | + SSS_ADM_MSG_DESC_SET(priv, PRIV_DATA); + + break; + default: + sdk_err(((struct sss_hwdev *)adm_msg->hwdev)->dev_hdl, "Unknown Chain type: %d\n", + adm_msg->msg_type); + return; + } + + + elem->desc |= SSS_ADM_MSG_DESC_SET(SSS_ADM_MSG_CHANNEL_0, MSG_CHANNEL) | + SSS_ADM_MSG_DESC_SET(SSS_VALID_MSG_CHANNEL, MSG_VALID); + + elem->desc |= SSS_ADM_MSG_DESC_SET(node_id, DEST) | + SSS_ADM_MSG_DESC_SET(SSS_SIZE_TO_4B(cmd_size), SIZE); + + elem->desc |= SSS_ADM_MSG_DESC_SET(sss_xor_chksum_set(&elem->desc), XOR_CHKSUM); + + /* The data in the HW should be in Big Endian Format */ + elem->desc = cpu_to_be64(elem->desc); +} + +static void sss_prepare_elem_ctx(struct sss_adm_msg *adm_msg, + const void *cmd, u16 cmd_size) +{ + struct sss_adm_msg_elem_ctx *elem_ctx = &adm_msg->elem_ctx[adm_msg->pi]; + + memcpy(elem_ctx->adm_msg_vaddr, cmd, cmd_size); +} + +static void sss_prepare_elem(struct sss_adm_msg *adm_msg, u8 node_id, + const void *cmd, u16 cmd_size) +{ + struct sss_adm_msg_elem *now_node = adm_msg->now_node; + + sss_prepare_elem_ctrl(&now_node->control, adm_msg->msg_type); + sss_prepare_elem_desc(adm_msg, node_id, cmd_size); + sss_prepare_elem_ctx(adm_msg, cmd, cmd_size); +} + +static inline void sss_adm_msg_increase_pi(struct sss_adm_msg *adm_msg) +{ + adm_msg->pi = SSS_MASK_ID(adm_msg, adm_msg->pi + 1); +} + +static void sss_issue_adm_msg(struct sss_adm_msg *adm_msg) +{ + sss_chip_set_pi(adm_msg); +} + +static void sss_update_adm_msg_state(struct sss_adm_msg *adm_msg) +{ + struct sss_adm_msg_state *wb_state; + enum sss_adm_msg_type msg_type; + u64 status_header; + u32 desc_buf; + + wb_state = adm_msg->wb_state; + + desc_buf = be32_to_cpu(wb_state->desc_buf); + if (SSS_GET_ADM_MSG_STATE(desc_buf, CHKSUM_ERR)) + return; + + status_header = be64_to_cpu(wb_state->head); + msg_type = SSS_ADM_MSG_STATE_HEAD_GET(status_header, MSG_ID); + if (msg_type >= SSS_ADM_MSG_MAX) + return; + + if (msg_type != adm_msg->msg_type) + return; + + adm_msg->ci = SSS_GET_ADM_MSG_STATE(desc_buf, CI); +} + +static enum sss_process_ret sss_wait_for_state_poll_handler(void *priv_data) +{ + struct sss_adm_msg *adm_msg = priv_data; + + if (!SSS_TO_HWDEV(adm_msg)->chip_present_flag) + return SSS_PROCESS_ERR; + + sss_update_adm_msg_state(adm_msg); + /* SYNC ADM MSG cmd should start after prev cmd finished */ + if (adm_msg->ci == adm_msg->pi) + return SSS_PROCESS_OK; + + return SSS_PROCESS_DOING; +} + +static enum sss_process_ret check_cmd_resp_handler(void *priv_data) +{ + struct sss_adm_msg_elem_ctx *ctxt = priv_data; + u64 resp_header; + u8 resp_status; + + if (!SSS_TO_HWDEV(ctxt)->chip_present_flag) { + pr_err("Fail to resp chip present"); + return SSS_PROCESS_ERR; + } + + resp_header = be64_to_cpu(ctxt->reply_fmt->head); + rmb(); /* read the latest header */ + + if (SSSNIC_API_CMD_RESP_HEADER_VALID(resp_header)) { + resp_status = SSSNIC_API_CMD_RESP_HEAD_GET(resp_header, STATUS); + if (resp_status) { + pr_err("Api chain response data err, status: %u\n", + resp_status); + return SSS_PROCESS_ERR; + } + + return SSS_PROCESS_OK; + } + + return SSS_PROCESS_DOING; +} + +static int sss_wait_for_state_poll(struct sss_adm_msg *adm_msg) +{ + return sss_check_handler_timeout(adm_msg, sss_wait_for_state_poll_handler, + SSS_ADM_MSG_STATE_TIMEOUT, 100); /* wait 100 us once */ +} + +static int wait_for_resp_polling(struct sss_adm_msg_elem_ctx *ctx) +{ + return sss_check_handler_timeout(ctx, check_cmd_resp_handler, + POLLING_COMPLETION_TIMEOUT_DEFAULT, + USEC_PER_MSEC); +} + +static void copy_resp_data(struct sss_adm_msg_elem_ctx *ctx, void *ack, + u16 ack_size) +{ + struct sss_adm_msg_reply_fmt *resp = ctx->reply_fmt; + + memcpy(ack, &resp->reply, ack_size); + ctx->state = 0; +} + +static int sss_wait_for_adm_msg_completion(struct sss_adm_msg *adm_msg, + struct sss_adm_msg_elem_ctx *ctx, + void *ack, u16 ack_size) +{ + int ret = 0; + + switch (adm_msg->msg_type) { + case SSS_ADM_MSG_POLL_READ: + ret = wait_for_resp_polling(ctx); + if (ret == 0) + copy_resp_data(ctx, ack, ack_size); + else + sdk_err(SSS_TO_HWDEV(adm_msg)->dev_hdl, "API CMD poll response timeout\n"); + break; + case SSS_ADM_MSG_POLL_WRITE: + case SSS_ADM_MSG_WRITE_TO_MGMT_MODULE: + ret = sss_wait_for_state_poll(adm_msg); + break; + case SSS_ADM_MSG_WRITE_ASYNC_TO_MGMT_MODULE: + /* No need to wait */ + break; + default: + sdk_err(SSS_TO_HWDEV(adm_msg)->dev_hdl, "Unknown API CMD Chain type: %d\n", + adm_msg->msg_type); + ret = -EINVAL; + } + + if (ret) { + sss_dump_adm_msg_reg(adm_msg); + sdk_err(SSS_TO_HWDEV(adm_msg)->dev_hdl, "Adm msg wait timeout,type :%d\n", + adm_msg->msg_type); + } + + return ret; +} + +static inline void sss_update_adm_msg_ctx(struct sss_adm_msg *adm_msg) +{ + struct sss_adm_msg_elem_ctx *ctx = &adm_msg->elem_ctx[adm_msg->pi]; + + ctx->state = 1; + ctx->store_pi = adm_msg->pi; + if (ctx->reply_fmt) { + ctx->reply_fmt->head = 0; + + /* make sure "header" was cleared */ + wmb(); + } +} + +static void sss_adm_msg_lock(struct sss_adm_msg *adm_msg) +{ + if (adm_msg->msg_type == SSS_ADM_MSG_WRITE_ASYNC_TO_MGMT_MODULE) + spin_lock(&adm_msg->async_lock); + else + down(&adm_msg->sem); +} + +static void sss_adm_msg_unlock(struct sss_adm_msg *adm_msg) +{ + if (adm_msg->msg_type == SSS_ADM_MSG_WRITE_ASYNC_TO_MGMT_MODULE) + spin_unlock(&adm_msg->async_lock); + else + up(&adm_msg->sem); +} + +static int sss_adm_msg_io(struct sss_adm_msg *adm_msg, u8 node_id, + const void *cmd, u16 cmd_size, void *ack, u16 ack_size) +{ + struct sss_adm_msg_elem_ctx *ctx = NULL; + + sss_adm_msg_lock(adm_msg); + + ctx = &adm_msg->elem_ctx[adm_msg->pi]; + + if (sss_adm_msg_busy(adm_msg)) { + sss_adm_msg_unlock(adm_msg); + return -EBUSY; + } + + sss_update_adm_msg_ctx(adm_msg); + + sss_prepare_elem(adm_msg, node_id, cmd, cmd_size); + + sss_adm_msg_increase_pi(adm_msg); + + wmb(); /* make sure issue correctly the command */ + + sss_issue_adm_msg(adm_msg); + + adm_msg->now_node = adm_msg->elem_ctx[adm_msg->pi].elem_vaddr; + + sss_adm_msg_unlock(adm_msg); + + return sss_wait_for_adm_msg_completion(adm_msg, ctx, ack, ack_size); +} + +int sss_adm_msg_write(struct sss_adm_msg *adm_msg, u8 node_id, + const void *cmd, u16 cmd_size) +{ + return sss_adm_msg_io(adm_msg, node_id, cmd, cmd_size, NULL, 0); +} + +int sss_adm_msg_read(struct sss_adm_msg *adm_msg, u8 node_id, + const void *cmd, u16 size, void *ack, u16 ack_size) +{ + return sss_adm_msg_io(adm_msg, node_id, cmd, size, ack, ack_size); +} + +static void sss_set_adm_event_flag(struct sss_msg_pf_to_mgmt *pf_to_mgmt, + int event_flag) +{ + spin_lock(&pf_to_mgmt->sync_event_lock); + pf_to_mgmt->event_state = event_flag; + spin_unlock(&pf_to_mgmt->sync_event_lock); +} + +static u16 sss_align_adm_msg_len(u16 msg_data_len) +{ + /* u64 - the size of the header */ + u16 msg_size; + + msg_size = (u16)(SSS_MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) + msg_data_len); + + if (msg_size > SSS_MGMT_MSG_SIZE_MIN) + msg_size = SSS_MGMT_MSG_SIZE_MIN + + ALIGN((msg_size - SSS_MGMT_MSG_SIZE_MIN), SSS_MGMT_MSG_SIZE_STEP); + else + msg_size = SSS_MGMT_MSG_SIZE_MIN; + + return msg_size; +} + +static void sss_encapsulate_adm_msg(u8 *adm_msg, u64 *header, + const void *body, int body_len) +{ + u8 *adm_msg_new = adm_msg; + + memset(adm_msg_new, 0, SSS_MGMT_MSG_RSVD_FOR_DEV); + + adm_msg_new += SSS_MGMT_MSG_RSVD_FOR_DEV; + memcpy(adm_msg_new, header, sizeof(*header)); + + adm_msg_new += sizeof(*header); + memcpy(adm_msg_new, body, (size_t)(u32)body_len); +} + +#define SSS_MAX_PF_MGMT_BUF_MAX 2048L + +int sss_adm_msg_read_ack(void *hwdev, u8 dest, const void *cmd, + u16 size, void *ack, u16 ack_size) +{ + struct sss_msg_pf_to_mgmt *pf_to_mgmt = NULL; + struct sss_adm_msg *adm_mag = NULL; + + if (!hwdev || !cmd || (ack_size && !ack) || size > SSS_MAX_PF_MGMT_BUF_MAX) + return -EINVAL; + + if (!SSS_SUPPORT_ADM_MSG((struct sss_hwdev *)hwdev)) + return -EPERM; + + pf_to_mgmt = ((struct sss_hwdev *)hwdev)->pf_to_mgmt; + adm_mag = pf_to_mgmt->adm_msg[SSS_ADM_MSG_POLL_READ]; + + if (!(((struct sss_hwdev *)hwdev)->chip_present_flag)) + return -EPERM; + + return sss_adm_msg_read(adm_mag, dest, cmd, size, ack, ack_size); +} + +int sss_adm_msg_write_nack(void *hwdev, u8 dest, const void *cmd, u16 size) +{ + struct sss_msg_pf_to_mgmt *pf_to_mgmt = NULL; + struct sss_adm_msg *adm_mag = NULL; + + if (!hwdev || !size || !cmd || size > SSS_MAX_PF_MGMT_BUF_MAX) + return -EINVAL; + + if (!SSS_SUPPORT_ADM_MSG((struct sss_hwdev *)hwdev)) + return -EPERM; + + pf_to_mgmt = ((struct sss_hwdev *)hwdev)->pf_to_mgmt; + adm_mag = pf_to_mgmt->adm_msg[SSS_ADM_MSG_POLL_WRITE]; + + if (!(((struct sss_hwdev *)hwdev)->chip_present_flag)) + return -EPERM; + + return sss_adm_msg_write(adm_mag, dest, cmd, size); +} + +#define SSS_MSG_NO_RESP 0xFFFF + +static int sss_send_adm_msg(struct sss_msg_pf_to_mgmt *pf_to_mgmt, + u8 mod, u16 cmd, const void *msg_body, u16 msg_body_len) +{ + struct sss_hwif *hwif = SSS_TO_HWDEV(pf_to_mgmt)->hwif; + void *msg_buf = pf_to_mgmt->sync_buf; + u16 adm_msg_len = sss_align_adm_msg_len(msg_body_len); + u32 func_id = SSS_GET_HWIF_GLOBAL_ID(hwif); + u8 node_id = SSS_MGMT_CPU_NODE_ID(SSS_TO_HWDEV(pf_to_mgmt)); + u64 header; + struct sss_adm_msg *adm_mag; + + if (sss_get_dev_present_flag(pf_to_mgmt->hwdev) == 0) + return -EFAULT; + + if (adm_msg_len > SSS_MSG_TO_MGMT_LEN_MAX) + return -EFAULT; + + sss_set_adm_event_flag(pf_to_mgmt, SSS_ADM_EVENT_START); + + header = SSS_ENCAPSULATE_ADM_MSG_HEAD(func_id, msg_body_len, mod, + cmd, SSS_INCREASE_SYNC_MSG_ID(pf_to_mgmt)); + + sss_encapsulate_adm_msg((u8 *)msg_buf, &header, msg_body, msg_body_len); + + adm_mag = pf_to_mgmt->adm_msg[SSS_ADM_MSG_WRITE_TO_MGMT_MODULE]; + + return sss_adm_msg_write(adm_mag, node_id, msg_buf, adm_msg_len); +} + +static inline void sss_check_msg_body(u8 mod, void *buf_in) +{ + struct sss_msg_head *msg_head = NULL; + + /* set aeq fix num to 3, need to ensure response aeq id < 3 */ + if (mod == SSS_MOD_TYPE_COMM || mod == SSS_MOD_TYPE_L2NIC) { + msg_head = buf_in; + + if (msg_head->reply_aeq_num >= SSS_MAX_AEQ) + msg_head->reply_aeq_num = 0; + } +} + +int sss_sync_send_adm_msg(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout) +{ + struct sss_msg_pf_to_mgmt *pf_to_mgmt = NULL; + void *dev = ((struct sss_hwdev *)hwdev)->dev_hdl; + struct sss_recv_msg *recv_msg = NULL; + struct completion *recv_done = NULL; + ulong timeo; + int err; + ulong ret; + + if (!SSS_SUPPORT_ADM_MSG((struct sss_hwdev *)hwdev)) + return -EPERM; + + sss_check_msg_body(mod, buf_in); + + pf_to_mgmt = ((struct sss_hwdev *)hwdev)->pf_to_mgmt; + + /* Lock the sync_buf */ + down(&pf_to_mgmt->sync_lock); + recv_msg = &pf_to_mgmt->recv_resp_msg; + recv_done = &recv_msg->done; + + init_completion(recv_done); + + err = sss_send_adm_msg(pf_to_mgmt, mod, cmd, buf_in, in_size); + if (err != 0) { + sdk_err(dev, "Fail to send adm msg to mgmt, sync_msg_id: %u\n", + pf_to_mgmt->sync_msg_id); + sss_set_adm_event_flag(pf_to_mgmt, SSS_ADM_EVENT_FAIL); + goto unlock_sync_msg; + } + + timeo = msecs_to_jiffies(timeout ? timeout : SSS_MGMT_MSG_TIMEOUT); + + ret = wait_for_completion_timeout(recv_done, timeo); + if (ret == 0) { + sdk_err(dev, "Mgmt response sync cmd timeout, sync_msg_id: %u\n", + pf_to_mgmt->sync_msg_id); + sss_dump_aeq_info((struct sss_hwdev *)hwdev); + err = -ETIMEDOUT; + sss_set_adm_event_flag(pf_to_mgmt, SSS_ADM_EVENT_TIMEOUT); + goto unlock_sync_msg; + } + + spin_lock(&pf_to_mgmt->sync_event_lock); + if (pf_to_mgmt->event_state == SSS_ADM_EVENT_TIMEOUT) { + spin_unlock(&pf_to_mgmt->sync_event_lock); + err = -ETIMEDOUT; + goto unlock_sync_msg; + } + spin_unlock(&pf_to_mgmt->sync_event_lock); + + sss_set_adm_event_flag(pf_to_mgmt, SSS_ADM_EVENT_END); + + if (!(((struct sss_hwdev *)hwdev)->chip_present_flag)) { + destroy_completion(recv_done); + up(&pf_to_mgmt->sync_lock); + return -ETIMEDOUT; + } + + if (buf_out && out_size) { + if (*out_size < recv_msg->buf_len) { + sdk_err(dev, + "Invalid resp msg len: %u out of range: %u, mod %d, cmd %u\n", + recv_msg->buf_len, *out_size, mod, cmd); + err = -EFAULT; + goto unlock_sync_msg; + } + + if (recv_msg->buf_len) + memcpy(buf_out, recv_msg->buf, recv_msg->buf_len); + + *out_size = recv_msg->buf_len; + } + +unlock_sync_msg: + destroy_completion(recv_done); + up(&pf_to_mgmt->sync_lock); + + return err; +} + +int sss_register_mgmt_msg_handler(void *hwdev, u8 mod_type, void *data, + sss_mgmt_msg_handler_t handler) +{ + struct sss_msg_pf_to_mgmt *mgmt_msg = NULL; + + if (!hwdev || mod_type >= SSS_MOD_TYPE_HW_MAX) + return -EFAULT; + + mgmt_msg = ((struct sss_hwdev *)hwdev)->pf_to_mgmt; + if (!mgmt_msg) + return -EINVAL; + + mgmt_msg->recv_data[mod_type] = data; + mgmt_msg->recv_handler[mod_type] = handler; + + set_bit(SSS_CALLBACK_REG, &mgmt_msg->recv_handler_state[mod_type]); + + return 0; +} +EXPORT_SYMBOL(sss_register_mgmt_msg_handler); + +void sss_unregister_mgmt_msg_handler(void *hwdev, u8 mod_type) +{ + struct sss_msg_pf_to_mgmt *mgmt_msg = NULL; + + if (!hwdev || mod_type >= SSS_MOD_TYPE_HW_MAX) + return; + + mgmt_msg = ((struct sss_hwdev *)hwdev)->pf_to_mgmt; + if (!mgmt_msg) + return; + + clear_bit(SSS_CALLBACK_REG, &mgmt_msg->recv_handler_state[mod_type]); + + while (test_bit(SSS_CALLBACK_RUNNING, &mgmt_msg->recv_handler_state[mod_type])) + usleep_range(SSS_MSG_CB_USLEEP_MIN, SSS_MSG_CB_USLEEP_MAX); + + mgmt_msg->recv_data[mod_type] = NULL; + mgmt_msg->recv_handler[mod_type] = NULL; +} +EXPORT_SYMBOL(sss_unregister_mgmt_msg_handler); diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm.h new file mode 100644 index 0000000000000000000000000000000000000000..54cfe231e63130f7857ab8fecf553c4d5d5fdb36 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_ADM_H +#define SSS_HWIF_ADM_H + +#include +int sss_adm_msg_read_ack(void *hwdev, u8 dest, const void *cmd, + u16 size, void *ack, u16 ack_size); + +int sss_adm_msg_write_nack(void *hwdev, u8 dest, const void *cmd, u16 size); + +int sss_sync_send_adm_msg(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm_common.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm_common.h new file mode 100644 index 0000000000000000000000000000000000000000..fc0d99e326adec9c227137b2fe374af9a311b6ac --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm_common.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_ADM_COMMON_H +#define SSS_HWIF_ADM_COMMON_H + +#define SSS_ADM_MSG_AEQ_ID 2 + +#define SSS_WRITE_ADM_MSG_PRIV_DATA(id) (((u8)(id)) << 16) +#define SSS_READ_ADM_MSG_PRIV_DATA(id, token) ((((u32)(id)) << 16) + (token)) + +#define SSS_MASK_ID(adm_msg, id) \ + ((id) & ((adm_msg)->elem_num - 1)) + +#define SSS_SIZE_TO_4B(size) \ + (ALIGN((u32)(size), 4U) >> 2) +#define SSS_SIZE_TO_8B(size) \ + (ALIGN((u32)(size), 8U) >> 3) + +/* ADM_STATUS_0 CSR: 0x0030+adm msg id*0x080 */ +#define SSS_ADM_MSG_STATE_CI_MASK 0xFFFFFFU +#define SSS_ADM_MSG_STATE_CI_SHIFT 0 + +#define SSS_ADM_MSG_STATE_FSM_MASK 0xFU +#define SSS_ADM_MSG_STATE_FSM_SHIFT 24 + +#define SSS_ADM_MSG_STATE_CHKSUM_ERR_MASK 0x3U +#define SSS_ADM_MSG_STATE_CHKSUM_ERR_SHIFT 28 + +#define SSS_ADM_MSG_STATE_CPLD_ERR_MASK 0x1U +#define SSS_ADM_MSG_STATE_CPLD_ERR_SHIFT 30 + +#define SSS_GET_ADM_MSG_STATE(val, member) \ + (((val) >> SSS_ADM_MSG_STATE_##member##_SHIFT) & \ + SSS_ADM_MSG_STATE_##member##_MASK) + +/* adm_msg_elem.desc structure */ +#define SSS_ADM_MSG_DESC_SGL_TYPE_SHIFT 0 +#define SSS_ADM_MSG_DESC_RD_WR_SHIFT 1 +#define SSS_ADM_MSG_DESC_MGMT_BYPASS_SHIFT 2 +#define SSS_ADM_MSG_DESC_REPLY_AEQE_EN_SHIFT 3 +#define SSS_ADM_MSG_DESC_MSG_VALID_SHIFT 4 +#define SSS_ADM_MSG_DESC_MSG_CHANNEL_SHIFT 6 +#define SSS_ADM_MSG_DESC_PRIV_DATA_SHIFT 8 +#define SSS_ADM_MSG_DESC_DEST_SHIFT 32 +#define SSS_ADM_MSG_DESC_SIZE_SHIFT 40 +#define SSS_ADM_MSG_DESC_XOR_CHKSUM_SHIFT 56 + +#define SSS_ADM_MSG_DESC_SGL_TYPE_MASK 0x1U +#define SSS_ADM_MSG_DESC_RD_WR_MASK 0x1U +#define SSS_ADM_MSG_DESC_MGMT_BYPASS_MASK 0x1U +#define SSS_ADM_MSG_DESC_REPLY_AEQE_EN_MASK 0x1U +#define SSS_ADM_MSG_DESC_MSG_VALID_MASK 0x3U +#define SSS_ADM_MSG_DESC_MSG_CHANNEL_MASK 0x3U +#define SSS_ADM_MSG_DESC_PRIV_DATA_MASK 0xFFFFFFU +#define SSS_ADM_MSG_DESC_DEST_MASK 0x1FU +#define SSS_ADM_MSG_DESC_SIZE_MASK 0x7FFU +#define SSS_ADM_MSG_DESC_XOR_CHKSUM_MASK 0xFFU + +#define SSS_ADM_MSG_DESC_SET(val, member) \ + ((((u64)(val)) & SSS_ADM_MSG_DESC_##member##_MASK) << \ + SSS_ADM_MSG_DESC_##member##_SHIFT) + +/* adm_msg_elem structure */ +#define SSS_ADM_MSG_ELEM_CTRL_ELEM_LEN_SHIFT 0 +#define SSS_ADM_MSG_ELEM_CTRL_RD_DMA_ATTR_OFF_SHIFT 16 +#define SSS_ADM_MSG_ELEM_CTRL_WR_DMA_ATTR_OFF_SHIFT 24 +#define SSS_ADM_MSG_ELEM_CTRL_XOR_CHKSUM_SHIFT 56 + +#define SSS_ADM_MSG_ELEM_CTRL_ELEM_LEN_MASK 0x3FU +#define SSS_ADM_MSG_ELEM_CTRL_RD_DMA_ATTR_OFF_MASK 0x3FU +#define SSS_ADM_MSG_ELEM_CTRL_WR_DMA_ATTR_OFF_MASK 0x3FU +#define SSS_ADM_MSG_ELEM_CTRL_XOR_CHKSUM_MASK 0xFFU + +#define SSS_ADM_MSG_ELEM_CTRL_SET(val, member) \ + ((((u64)(val)) & SSS_ADM_MSG_ELEM_CTRL_##member##_MASK) << \ + SSS_ADM_MSG_ELEM_CTRL_##member##_SHIFT) + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm_init.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm_init.c new file mode 100644 index 0000000000000000000000000000000000000000..e418b29f4fc04e8c69169f78e1fb37ce490a9383 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm_init.c @@ -0,0 +1,763 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_common.h" +#include "sss_hwdev.h" +#include "sss_csr.h" +#include "sss_hwif_api.h" +#include "sss_hwif_adm_common.h" +#include "sss_hwif_mgmt_common.h" + +/* ADM_MSG_REQ CSR: 0x0020+adm_id*0x080 */ +#define SSS_ADM_MSG_REQ_RESTART_SHIFT 1 +#define SSS_ADM_MSG_REQ_WB_TRIGGER_SHIFT 2 + +#define SSS_ADM_MSG_REQ_RESTART_MASK 0x1U +#define SSS_ADM_MSG_REQ_WB_TRIGGER_MASK 0x1U + +#define SSS_SET_ADM_MSG_REQ(val, member) \ + (((val) & SSS_ADM_MSG_REQ_##member##_MASK) << \ + SSS_ADM_MSG_REQ_##member##_SHIFT) + +#define SSS_GET_ADM_MSG_REQ(val, member) \ + (((val) >> SSS_ADM_MSG_REQ_##member##_SHIFT) & \ + SSS_ADM_MSG_REQ_##member##_MASK) + +#define SSS_CLEAR_ADM_MSG_REQ(val, member) \ + ((val) & (~(SSS_ADM_MSG_REQ_##member##_MASK \ + << SSS_ADM_MSG_REQ_##member##_SHIFT))) + +/* ADM_MSG_CTRL CSR: 0x0014+adm_id*0x080 */ +#define SSS_ADM_MSG_CTRL_RESTART_EN_SHIFT 1 +#define SSS_ADM_MSG_CTRL_XOR_ERR_SHIFT 2 +#define SSS_ADM_MSG_CTRL_AEQE_EN_SHIFT 4 +#define SSS_ADM_MSG_CTRL_AEQ_ID_SHIFT 8 +#define SSS_ADM_MSG_CTRL_XOR_CHK_EN_SHIFT 28 +#define SSS_ADM_MSG_CTRL_ELEM_SIZE_SHIFT 30 + +#define SSS_ADM_MSG_CTRL_RESTART_EN_MASK 0x1U +#define SSS_ADM_MSG_CTRL_XOR_ERR_MASK 0x1U +#define SSS_ADM_MSG_CTRL_AEQE_EN_MASK 0x1U +#define SSS_ADM_MSG_CTRL_AEQ_ID_MASK 0x3U +#define SSS_ADM_MSG_CTRL_XOR_CHK_EN_MASK 0x3U +#define SSS_ADM_MSG_CTRL_ELEM_SIZE_MASK 0x3U + +#define SSS_SET_ADM_MSG_CTRL(val, member) \ + (((val) & SSS_ADM_MSG_CTRL_##member##_MASK) << \ + SSS_ADM_MSG_CTRL_##member##_SHIFT) + +#define SSS_CLEAR_ADM_MSG_CTRL(val, member) \ + ((val) & (~(SSS_ADM_MSG_CTRL_##member##_MASK \ + << SSS_ADM_MSG_CTRL_##member##_SHIFT))) + +#define SSS_ADM_MSG_BUF_SIZE 2048ULL + +#define SSS_ADM_MSG_NODE_ALIGN_SIZE 512ULL +#define SSS_ADM_MSG_PAYLOAD_ALIGN_SIZE 64ULL + +#define SSS_ADM_MSG_REPLY_ALIGNMENT 128ULL + +#define SSS_ADM_MSG_TIMEOUT 10000 + +#define SSS_ADM_MSG_ELEM_SIZE_SHIFT 6U + +#define SSS_ADM_MSG_ELEM_NUM 32 +#define SSS_ADM_MSG_ELEM_SIZE 128 +#define SSS_ADM_MSG_REPLY_DATA_SIZE 128 + +#define SSS_MGMT_WQ_NAME "sssnic_mgmt" + +#define SSS_GET_ADM_MSG_ELEM_PADDR(adm_msg, elem_id) \ + ((adm_msg)->elem_paddr_base + (adm_msg)->elem_size_align * (elem_id)) + +#define SSS_GET_ADM_MSG_ELEM_VADDR(adm_msg, elem_id) \ + ((adm_msg)->elem_vaddr_base + (adm_msg)->elem_size_align * (elem_id)) + +#define SSS_GET_ADM_MSG_BUF_PADDR(adm_msg, elem_id) \ + ((adm_msg)->buf_paddr_base + (adm_msg)->buf_size_align * (elem_id)) + +#define SSS_GET_ADM_MSG_BUF_VADDR(adm_msg, elem_id) \ + ((adm_msg)->buf_vaddr_base + (adm_msg)->buf_size_align * (elem_id)) + +#define SSS_GET_ADM_MSG_REPLY_PADDR(adm_msg, elem_id) \ + ((adm_msg)->reply_paddr_base + (adm_msg)->reply_size_align * (elem_id)) + +#define SSS_GET_ADM_MSG_REPLY_VADDR(adm_msg, elem_id) \ + ((adm_msg)->reply_vaddr_base + (adm_msg)->reply_size_align * (elem_id)) + +typedef void (*sss_alloc_elem_buf_handler_t)(struct sss_adm_msg *adm_msg, u32 elem_id); + +struct sss_adm_msg_attr { + struct sss_hwdev *hwdev; + enum sss_adm_msg_type msg_type; + + u32 elem_num; + u16 reply_size; + u16 elem_size; +}; + +static enum sss_process_ret sss_adm_msg_reset_handler(void *priv_data) +{ + u32 val; + u32 addr; + struct sss_adm_msg *adm_msg = priv_data; + + if (!SSS_TO_HWDEV(adm_msg)->chip_present_flag) + return SSS_PROCESS_ERR; + + addr = SSS_CSR_ADM_MSG_REQ_ADDR(adm_msg->msg_type); + val = sss_chip_read_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr); + if (!SSS_GET_ADM_MSG_REQ(val, RESTART)) + return SSS_PROCESS_OK; + + return SSS_PROCESS_DOING; +} + +static enum sss_process_ret sss_adm_msg_ready_handler(void *priv_data) +{ + u32 val; + u32 addr; + struct sss_adm_msg *adm_msg = priv_data; + + if (!SSS_TO_HWDEV(adm_msg)->chip_present_flag) + return SSS_PROCESS_ERR; + + addr = SSS_CSR_ADM_MSG_STATE_0_ADDR(adm_msg->msg_type); + val = sss_chip_read_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr); + if (SSS_GET_ADM_MSG_STATE(val, CI) == adm_msg->ci) + return SSS_PROCESS_OK; + + return SSS_PROCESS_DOING; +} + +static void sss_chip_clean_adm_msg(struct sss_adm_msg *adm_msg) +{ + u32 val; + u32 addr = SSS_CSR_ADM_MSG_CTRL_ADDR(adm_msg->msg_type); + + val = sss_chip_read_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr); + val = SSS_CLEAR_ADM_MSG_CTRL(val, RESTART_EN) & + SSS_CLEAR_ADM_MSG_CTRL(val, XOR_ERR) & + SSS_CLEAR_ADM_MSG_CTRL(val, AEQE_EN) & + SSS_CLEAR_ADM_MSG_CTRL(val, XOR_CHK_EN) & + SSS_CLEAR_ADM_MSG_CTRL(val, ELEM_SIZE); + + sss_chip_write_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr, val); +} + +static void sss_chip_set_adm_msg_wb_addr(struct sss_adm_msg *adm_msg) +{ + u32 val; + u32 addr; + + addr = SSS_CSR_ADM_MSG_STATE_HI_ADDR(adm_msg->msg_type); + val = upper_32_bits(adm_msg->wb_state_paddr); + sss_chip_write_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr, val); + + addr = SSS_CSR_ADM_MSG_STATE_LO_ADDR(adm_msg->msg_type); + val = lower_32_bits(adm_msg->wb_state_paddr); + sss_chip_write_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr, val); +} + +static int sss_chip_reset_adm_msg(struct sss_adm_msg *adm_msg) +{ + u32 val; + u32 addr; + + addr = SSS_CSR_ADM_MSG_REQ_ADDR(adm_msg->msg_type); + val = sss_chip_read_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr); + + val = SSS_CLEAR_ADM_MSG_REQ(val, RESTART); + val |= SSS_SET_ADM_MSG_REQ(1, RESTART); + + sss_chip_write_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr, val); + + return sss_check_handler_timeout(adm_msg, sss_adm_msg_reset_handler, + SSS_ADM_MSG_TIMEOUT, USEC_PER_MSEC); +} + +static void sss_chip_init_elem_size(struct sss_adm_msg *adm_msg) +{ + u32 val; + u32 addr; + u32 size; + + addr = SSS_CSR_ADM_MSG_CTRL_ADDR(adm_msg->msg_type); + val = sss_chip_read_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr); + val = SSS_CLEAR_ADM_MSG_CTRL(val, AEQE_EN) & + SSS_CLEAR_ADM_MSG_CTRL(val, ELEM_SIZE); + + size = (u32)ilog2(adm_msg->elem_size >> SSS_ADM_MSG_ELEM_SIZE_SHIFT); + val |= SSS_SET_ADM_MSG_CTRL(0, AEQE_EN) | + SSS_SET_ADM_MSG_CTRL(size, ELEM_SIZE); + + sss_chip_write_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr, val); +} + +static void sss_chip_set_elem_num(struct sss_adm_msg *adm_msg) +{ + u32 addr; + + addr = SSS_CSR_ADM_MSG_NUM_ELEM_ADDR(adm_msg->msg_type); + sss_chip_write_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr, adm_msg->elem_num); +} + +static void sss_chip_init_elem_head(struct sss_adm_msg *adm_msg) +{ + u32 val; + u32 addr; + + addr = SSS_CSR_ADM_MSG_HEAD_HI_ADDR(adm_msg->msg_type); + val = upper_32_bits(adm_msg->head_elem_paddr); + sss_chip_write_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr, val); + + addr = SSS_CSR_ADM_MSG_HEAD_LO_ADDR(adm_msg->msg_type); + val = lower_32_bits(adm_msg->head_elem_paddr); + sss_chip_write_reg(SSS_TO_HWDEV(adm_msg)->hwif, addr, val); +} + +static int sss_wait_adm_msg_ready(struct sss_adm_msg *adm_msg) +{ + return sss_check_handler_timeout(adm_msg, sss_adm_msg_ready_handler, + SSS_ADM_MSG_TIMEOUT, USEC_PER_MSEC); +} + +static int sss_chip_init_adm_msg(struct sss_adm_msg *adm_msg) +{ + sss_chip_clean_adm_msg(adm_msg); + + sss_chip_set_adm_msg_wb_addr(adm_msg); + + if (sss_chip_reset_adm_msg(adm_msg) != 0) { + sdk_err(SSS_TO_HWDEV(adm_msg)->dev_hdl, "Fail to restart adm cmd\n"); + return -EBUSY; + } + + sss_chip_init_elem_size(adm_msg); + sss_chip_set_elem_num(adm_msg); + sss_chip_init_elem_head(adm_msg); + + return sss_wait_adm_msg_ready(adm_msg); +} + +static void sss_init_ctx_buf_addr(struct sss_adm_msg *adm_msg, + u32 elem_id) +{ + u64 paddr; + void *vaddr; + struct sss_adm_msg_elem_ctx *ctx = &adm_msg->elem_ctx[elem_id]; + struct sss_adm_msg_elem *elem = NULL; + + vaddr = (u8 *)SSS_GET_ADM_MSG_BUF_VADDR(adm_msg, elem_id); + paddr = SSS_GET_ADM_MSG_BUF_PADDR(adm_msg, elem_id); + + ctx->adm_msg_vaddr = vaddr; + elem = + (struct sss_adm_msg_elem *)SSS_GET_ADM_MSG_ELEM_VADDR(adm_msg, elem_id); + elem->write.hw_msg_paddr = cpu_to_be64(paddr); +} + +static void sss_init_ctx_reply_addr(struct sss_adm_msg *adm_msg, + u32 elem_id) +{ + u64 paddr; + void *vaddr; + struct sss_adm_msg_elem_ctx *ctx = &adm_msg->elem_ctx[elem_id]; + struct sss_adm_msg_elem *elem = NULL; + + paddr = SSS_GET_ADM_MSG_REPLY_PADDR(adm_msg, elem_id); + vaddr = (u8 *)SSS_GET_ADM_MSG_REPLY_VADDR(adm_msg, elem_id); + + elem = + (struct sss_adm_msg_elem *)SSS_GET_ADM_MSG_ELEM_VADDR(adm_msg, elem_id); + elem->read.hw_wb_reply_paddr = cpu_to_be64(paddr); + ctx->reply_fmt = vaddr; + ctx->adm_msg_vaddr = &elem->read.hw_msg_paddr; +} + +static void sss_init_ctx_buf_reply_addr(struct sss_adm_msg *adm_msg, + u32 elem_id) +{ + u64 buf_paddr; + void *buf_vaddr; + void *rsp_vaddr; + struct sss_adm_msg_elem_ctx *ctx = &adm_msg->elem_ctx[elem_id]; + struct sss_adm_msg_elem *elem = NULL; + + rsp_vaddr = (u8 *)SSS_GET_ADM_MSG_REPLY_VADDR(adm_msg, elem_id); + buf_paddr = SSS_GET_ADM_MSG_BUF_PADDR(adm_msg, elem_id); + buf_vaddr = (u8 *)SSS_GET_ADM_MSG_BUF_VADDR(adm_msg, elem_id); + + elem = + (struct sss_adm_msg_elem *)SSS_GET_ADM_MSG_ELEM_VADDR(adm_msg, elem_id); + ctx->reply_fmt = rsp_vaddr; + ctx->adm_msg_vaddr = buf_vaddr; + elem->read.hw_msg_paddr = cpu_to_be64(buf_paddr); +} + +static void sss_alloc_reply_buf(struct sss_adm_msg *adm_msg, + struct sss_adm_msg_elem *elem, u32 cell_idx) +{ + struct sss_adm_msg_elem_ctx *ctx = NULL; + void *resp_vaddr; + u64 resp_paddr; + + resp_vaddr = (u8 *)((u64)adm_msg->reply_vaddr_base + + adm_msg->reply_size_align * cell_idx); + resp_paddr = adm_msg->reply_paddr_base + + adm_msg->reply_size_align * cell_idx; + + ctx = &adm_msg->elem_ctx[cell_idx]; + + ctx->reply_fmt = resp_vaddr; + elem->read.hw_wb_reply_paddr = cpu_to_be64(resp_paddr); +} + +static int sss_init_elem_ctx(struct sss_adm_msg *adm_msg, u32 elem_id) +{ + struct sss_adm_msg_elem_ctx *ctx = NULL; + struct sss_adm_msg_elem *elem; + sss_alloc_elem_buf_handler_t handler[] = { + NULL, + NULL, + sss_init_ctx_buf_addr, + sss_init_ctx_reply_addr, + sss_init_ctx_buf_addr, + sss_init_ctx_buf_reply_addr, + sss_init_ctx_buf_addr + }; + elem = (struct sss_adm_msg_elem *)SSS_GET_ADM_MSG_ELEM_VADDR(adm_msg, elem_id); + + if (adm_msg->msg_type == SSS_ADM_MSG_MULTI_READ || + adm_msg->msg_type == SSS_ADM_MSG_POLL_READ) + sss_alloc_reply_buf(adm_msg, elem, elem_id); + + ctx = &adm_msg->elem_ctx[elem_id]; + ctx->elem_vaddr = + (struct sss_adm_msg_elem *)SSS_GET_ADM_MSG_ELEM_VADDR(adm_msg, elem_id); + ctx->hwdev = adm_msg->hwdev; + + if (adm_msg->msg_type >= ARRAY_LEN(handler)) + goto out; + + if (!handler[adm_msg->msg_type]) + goto out; + + handler[adm_msg->msg_type](adm_msg, elem_id); + + return 0; + +out: + sdk_err(SSS_TO_HWDEV(adm_msg)->dev_hdl, "Unsupport adm msg type %u\n", adm_msg->msg_type); + return -EINVAL; +} + +static int sss_init_adm_msg_elem(struct sss_adm_msg *adm_msg) +{ + u32 i; + u64 paddr; + void *vaddr; + struct sss_adm_msg_elem *elem = NULL; + struct sss_adm_msg_elem *pre_elt = NULL; + int ret; + + for (i = 0; i < adm_msg->elem_num; i++) { + ret = sss_init_elem_ctx(adm_msg, i); + if (ret != 0) + return ret; + + paddr = SSS_GET_ADM_MSG_ELEM_PADDR(adm_msg, i); + vaddr = SSS_GET_ADM_MSG_ELEM_VADDR(adm_msg, i); + + if (!pre_elt) { + adm_msg->head_node = vaddr; + adm_msg->head_elem_paddr = (dma_addr_t)paddr; + } else { + pre_elt->next_elem_paddr = cpu_to_be64(paddr); + } + + elem = vaddr; + elem->next_elem_paddr = 0; + + pre_elt = elem; + } + + elem->next_elem_paddr = cpu_to_be64(adm_msg->head_elem_paddr); + adm_msg->now_node = adm_msg->head_node; + + return 0; +} + +static int sss_alloc_adm_msg_ctx(struct sss_adm_msg *adm_msg) +{ + size_t ctx_size; + + ctx_size = adm_msg->elem_num * sizeof(*adm_msg->elem_ctx); + + adm_msg->elem_ctx = kzalloc(ctx_size, GFP_KERNEL); + if (!adm_msg->elem_ctx) + return -ENOMEM; + + return 0; +} + +static void sss_free_adm_msg_ctx(struct sss_adm_msg *adm_msg) +{ + kfree(adm_msg->elem_ctx); + adm_msg->elem_ctx = NULL; +} + +static int sss_alloc_adm_msg_wb_state(struct sss_adm_msg *adm_msg) +{ + void *dev_hdl = SSS_TO_HWDEV(adm_msg)->dev_hdl; + + adm_msg->wb_state = dma_zalloc_coherent(dev_hdl, sizeof(*adm_msg->wb_state), + &adm_msg->wb_state_paddr, GFP_KERNEL); + if (!adm_msg->wb_state) { + sdk_err(dev_hdl, "Fail to alloc dma wb status\n"); + return -ENOMEM; + } + + return 0; +} + +static void sss_free_adm_msg_wb_state(struct sss_adm_msg *adm_msg) +{ + void *dev_hdl = SSS_TO_HWDEV(adm_msg)->dev_hdl; + + dma_free_coherent(dev_hdl, sizeof(*adm_msg->wb_state), + adm_msg->wb_state, adm_msg->wb_state_paddr); +} + +static int sss_alloc_elem_buf(struct sss_adm_msg *adm_msg) +{ + int ret; + size_t buf_size; + void *dev_hdl = SSS_TO_HWDEV(adm_msg)->dev_hdl; + + adm_msg->buf_size_align = ALIGN(SSS_ADM_MSG_BUF_SIZE, + SSS_ADM_MSG_PAYLOAD_ALIGN_SIZE); + adm_msg->elem_size_align = ALIGN((u64)adm_msg->elem_size, + SSS_ADM_MSG_NODE_ALIGN_SIZE); + adm_msg->reply_size_align = ALIGN((u64)adm_msg->reply_size, + SSS_ADM_MSG_REPLY_ALIGNMENT); + buf_size = (adm_msg->buf_size_align + adm_msg->elem_size_align + + adm_msg->reply_size_align) * adm_msg->elem_num; + + ret = sss_dma_zalloc_coherent_align(dev_hdl, buf_size, SSS_ADM_MSG_NODE_ALIGN_SIZE, + GFP_KERNEL, &adm_msg->elem_addr); + if (ret != 0) { + sdk_err(dev_hdl, "Fail to alloc adm msg elem buffer\n"); + return ret; + } + + adm_msg->elem_vaddr_base = adm_msg->elem_addr.align_vaddr; + adm_msg->elem_paddr_base = adm_msg->elem_addr.align_paddr; + + adm_msg->reply_vaddr_base = (u8 *)((u64)adm_msg->elem_vaddr_base + + adm_msg->elem_size_align * adm_msg->elem_num); + adm_msg->reply_paddr_base = adm_msg->elem_paddr_base + + adm_msg->elem_size_align * adm_msg->elem_num; + + adm_msg->buf_vaddr_base = (u8 *)((u64)adm_msg->reply_vaddr_base + + adm_msg->reply_size_align * adm_msg->elem_num); + adm_msg->buf_paddr_base = adm_msg->reply_paddr_base + + adm_msg->reply_size_align * adm_msg->elem_num; + + return 0; +} + +static void sss_free_elem_buf(struct sss_adm_msg *adm_msg) +{ + void *dev_hdl = SSS_TO_HWDEV(adm_msg)->dev_hdl; + + sss_dma_free_coherent_align(dev_hdl, &adm_msg->elem_addr); +} + +static int sss_alloc_adm_msg_buf(struct sss_adm_msg *adm_msg) +{ + int ret; + + ret = sss_alloc_adm_msg_ctx(adm_msg); + if (ret != 0) + return ret; + + ret = sss_alloc_adm_msg_wb_state(adm_msg); + if (ret != 0) + goto alloc_wb_err; + + ret = sss_alloc_elem_buf(adm_msg); + if (ret != 0) + goto alloc_elem_buf_err; + + return 0; + +alloc_elem_buf_err: + sss_free_adm_msg_wb_state(adm_msg); + +alloc_wb_err: + sss_free_adm_msg_ctx(adm_msg); + + return ret; +} + +static void sss_free_adm_msg_buf(struct sss_adm_msg *adm_msg) +{ + sss_free_elem_buf(adm_msg); + + sss_free_adm_msg_wb_state(adm_msg); + + sss_free_adm_msg_ctx(adm_msg); +} + +static void sss_init_adm_msg_param(struct sss_adm_msg *adm_msg, + struct sss_hwdev *hwdev, u8 msg_type) +{ + adm_msg->hwdev = hwdev; + adm_msg->elem_num = SSS_ADM_MSG_ELEM_NUM; + adm_msg->reply_size = SSS_ADM_MSG_REPLY_DATA_SIZE; + adm_msg->elem_size = SSS_ADM_MSG_ELEM_SIZE; + adm_msg->msg_type = msg_type; + adm_msg->pi = 0; + adm_msg->ci = 0; + if (adm_msg->msg_type == SSS_ADM_MSG_WRITE_ASYNC_TO_MGMT_MODULE) + spin_lock_init(&adm_msg->async_lock); + else + sema_init(&adm_msg->sem, 1); +} + +static int create_adm_msg(struct sss_hwdev *hwdev, struct sss_adm_msg **adm_msg, u8 msg_type) +{ + struct sss_adm_msg *msg; + int ret; + + msg = kzalloc(sizeof(*msg), GFP_KERNEL); + if (!msg) + return -ENOMEM; + + sss_init_adm_msg_param(msg, hwdev, msg_type); + + ret = sss_alloc_adm_msg_buf(msg); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init adm msg buf\n"); + return ret; + } + + ret = sss_init_adm_msg_elem(msg); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init adm msg elem\n"); + sss_free_adm_msg_buf(msg); + return ret; + } + + ret = sss_chip_init_adm_msg(msg); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init adm msg\n"); + sss_free_adm_msg_buf(msg); + return ret; + } + + *adm_msg = msg; + + return 0; +} + +void sss_destroy_adm_msg(struct sss_adm_msg *adm_msg) +{ + sss_free_adm_msg_buf(adm_msg); + kfree(adm_msg); +} + +static int sss_init_adm_msg(struct sss_hwdev *hwdev, + struct sss_adm_msg **adm_msg) +{ + int ret; + u8 i; + u8 adm_msg_type; + void *dev = ((struct sss_hwdev *)hwdev)->dev_hdl; + + if (!SSS_SUPPORT_ADM_MSG(hwdev)) + return 0; + + for (adm_msg_type = SSS_ADM_MSG_WRITE_TO_MGMT_MODULE; + adm_msg_type < SSS_ADM_MSG_MAX; adm_msg_type++) { + ret = create_adm_msg(hwdev, &adm_msg[adm_msg_type], adm_msg_type); + if (ret) { + sdk_err(dev, "Failed to create adm msg %d\n", adm_msg_type); + goto create_adm_msg_err; + } + } + + return 0; + +create_adm_msg_err: + for (i = SSS_ADM_MSG_WRITE_TO_MGMT_MODULE; i < adm_msg_type; i++) + sss_destroy_adm_msg(hwdev->pf_to_mgmt->adm_msg[adm_msg_type]); + + return ret; +} + +static void sss_deinit_adm_msg(const struct sss_hwdev *hwdev, + struct sss_adm_msg **adm_msg) +{ + u8 adm_msg_type; + + if (!SSS_SUPPORT_ADM_MSG(hwdev)) + return; + + for (adm_msg_type = SSS_ADM_MSG_WRITE_TO_MGMT_MODULE; + adm_msg_type < SSS_ADM_MSG_MAX; adm_msg_type++) + sss_destroy_adm_msg(adm_msg[adm_msg_type]); + +} + +static int sss_alloc_msg_buf(struct sss_msg_pf_to_mgmt *mgmt_msg) +{ + struct sss_recv_msg *recv_msg = &mgmt_msg->recv_msg; + struct sss_recv_msg *resp_msg = &mgmt_msg->recv_resp_msg; + + recv_msg->seq_id = SSS_MGMT_SEQ_ID_MAX; + resp_msg->seq_id = SSS_MGMT_SEQ_ID_MAX; + + recv_msg->buf = kzalloc(SSS_PF_MGMT_BUF_LEN_MAX, GFP_KERNEL); + if (!recv_msg->buf) + return -ENOMEM; + + resp_msg->buf = kzalloc(SSS_PF_MGMT_BUF_LEN_MAX, GFP_KERNEL); + if (!resp_msg->buf) + goto alloc_resp_msg_err; + + mgmt_msg->ack_buf = kzalloc(SSS_PF_MGMT_BUF_LEN_MAX, GFP_KERNEL); + if (!mgmt_msg->ack_buf) + goto alloc_ack_buf_err; + + mgmt_msg->sync_buf = kzalloc(SSS_PF_MGMT_BUF_LEN_MAX, GFP_KERNEL); + if (!mgmt_msg->sync_buf) + goto alloc_sync_buf_err; + + mgmt_msg->async_msg_buf = kzalloc(SSS_PF_MGMT_BUF_LEN_MAX, GFP_KERNEL); + if (!mgmt_msg->async_msg_buf) + goto alloc_async_msg_buf_err; + + return 0; + +alloc_async_msg_buf_err: + kfree(mgmt_msg->sync_buf); + mgmt_msg->sync_buf = NULL; +alloc_sync_buf_err: + kfree(mgmt_msg->ack_buf); + mgmt_msg->ack_buf = NULL; + +alloc_ack_buf_err: + kfree(resp_msg->buf); + resp_msg->buf = NULL; + +alloc_resp_msg_err: + kfree(recv_msg->buf); + recv_msg->buf = NULL; + + return -ENOMEM; +} + +static void sss_free_msg_buf(struct sss_msg_pf_to_mgmt *mgmt_msg) +{ + struct sss_recv_msg *recv_msg = &mgmt_msg->recv_msg; + struct sss_recv_msg *resp_msg = &mgmt_msg->recv_resp_msg; + + kfree(mgmt_msg->async_msg_buf); + kfree(mgmt_msg->sync_buf); + kfree(mgmt_msg->ack_buf); + kfree(resp_msg->buf); + kfree(recv_msg->buf); +} + +int sss_hwif_init_adm(struct sss_hwdev *hwdev) +{ + int ret; + struct sss_msg_pf_to_mgmt *mgmt_msg; + + mgmt_msg = kzalloc(sizeof(*mgmt_msg), GFP_KERNEL); + if (!mgmt_msg) + return -ENOMEM; + + spin_lock_init(&mgmt_msg->async_msg_lock); + spin_lock_init(&mgmt_msg->sync_event_lock); + sema_init(&mgmt_msg->sync_lock, 1); + mgmt_msg->hwdev = hwdev; + hwdev->pf_to_mgmt = mgmt_msg; + + mgmt_msg->workq = create_singlethread_workqueue(SSS_MGMT_WQ_NAME); + if (!mgmt_msg->workq) { + sdk_err(hwdev->dev_hdl, "Fail to init mgmt workq\n"); + ret = -ENOMEM; + goto alloc_mgmt_wq_err; + } + + ret = sss_alloc_msg_buf(mgmt_msg); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to alloc msg buffer\n"); + goto alloc_msg_buf_err; + } + + ret = sss_init_adm_msg(hwdev, mgmt_msg->adm_msg); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init adm msg\n"); + goto init_all_adm_err; + } + + return 0; + +init_all_adm_err: + sss_free_msg_buf(mgmt_msg); + +alloc_msg_buf_err: + destroy_workqueue(mgmt_msg->workq); + +alloc_mgmt_wq_err: + kfree(mgmt_msg); + hwdev->pf_to_mgmt = NULL; + + return ret; +} + +void sss_hwif_deinit_adm(struct sss_hwdev *hwdev) +{ + struct sss_msg_pf_to_mgmt *mgmt_msg = hwdev->pf_to_mgmt; + + destroy_workqueue(mgmt_msg->workq); + + sss_deinit_adm_msg(hwdev, mgmt_msg->adm_msg); + + sss_free_msg_buf(mgmt_msg); + + kfree(mgmt_msg); + hwdev->pf_to_mgmt = NULL; +} + +void sss_complete_adm_event(struct sss_hwdev *hwdev) +{ + struct sss_recv_msg *recv_msg = + &hwdev->pf_to_mgmt->recv_resp_msg; + + spin_lock_bh(&hwdev->pf_to_mgmt->sync_event_lock); + if (hwdev->pf_to_mgmt->event_state == SSS_ADM_EVENT_START) { + complete(&recv_msg->done); + hwdev->pf_to_mgmt->event_state = SSS_ADM_EVENT_TIMEOUT; + } + spin_unlock_bh(&hwdev->pf_to_mgmt->sync_event_lock); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm_init.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm_init.h new file mode 100644 index 0000000000000000000000000000000000000000..c2c3092fbdc00d2c3413e6555aaba1e6b5e3a972 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_adm_init.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_ADM_INIT_H +#define SSS_HWIF_ADM_INIT_H + +#include "sss_hwdev.h" + +int sss_hwif_init_adm(struct sss_hwdev *hwdev); +void sss_hwif_deinit_adm(struct sss_hwdev *hwdev); +void sss_complete_adm_event(struct sss_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_aeq.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_aeq.c new file mode 100644 index 0000000000000000000000000000000000000000..93bda1133420cfb6f48f841c0f3ca44a246ce564 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_aeq.c @@ -0,0 +1,568 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hwdev.h" +#include "sss_eq_info.h" +#include "sss_hw_svc_cap.h" +#include "sss_hw_irq.h" +#include "sss_hw_aeq.h" +#include "sss_hw_export.h" +#include "sss_hwif_aeq.h" +#include "sss_hw_common.h" +#include "sss_hwif_eq.h" +#include "sss_hwif_api.h" +#include "sss_hwif_export.h" +#include "sss_csr.h" + +#define SSS_DEF_AEQ_DEPTH 0x10000 + +#define SSS_MIN_AEQ_DEPTH 64 +#define SSS_MAX_AEQ_DEPTH \ + ((SSS_MAX_EQ_PAGE_SIZE / SSS_AEQE_SIZE) * SSS_AEQ_MAX_PAGE) + +#define SSS_AEQE_DESC_SIZE 4 +#define SSS_AEQE_DATA_SIZE (SSS_AEQE_SIZE - SSS_AEQE_DESC_SIZE) + +struct sss_aeq_elem { + u8 aeqe_data[SSS_AEQE_DATA_SIZE]; + u32 desc; +}; + +#define SSS_GET_AEQ_ELEM(aeq, id) \ + ((struct sss_aeq_elem *)SSS_GET_EQ_ELEM((aeq), (id))) + +#define SSS_GET_CUR_AEQ_ELEM(aeq) SSS_GET_AEQ_ELEM((aeq), (aeq)->ci) + +#define SSS_GET_AEQ_SW_EVENT(type) \ + (((type) >= SSS_ERR_MAX) ? \ + SSS_STF_EVENT : SSS_STL_EVENT) + +#define SSS_AEQ_CTRL_0_INTR_ID_SHIFT 0 +#define SSS_AEQ_CTRL_0_DMA_ATTR_SHIFT 12 +#define SSS_AEQ_CTRL_0_PCI_INTF_ID_SHIFT 20 +#define SSS_AEQ_CTRL_0_INTR_MODE_SHIFT 31 + +#define SSS_AEQ_CTRL_0_INTR_ID_MASK 0x3FFU +#define SSS_AEQ_CTRL_0_DMA_ATTR_MASK 0x3FU +#define SSS_AEQ_CTRL_0_PCI_INTF_ID_MASK 0x7U +#define SSS_AEQ_CTRL_0_INTR_MODE_MASK 0x1U + +#define SSS_SET_AEQ_CTRL_0(val, member) \ + (((val) & SSS_AEQ_CTRL_0_##member##_MASK) << \ + SSS_AEQ_CTRL_0_##member##_SHIFT) + +#define SSS_CLEAR_AEQ_CTRL_0(val, member) \ + ((val) & (~(SSS_AEQ_CTRL_0_##member##_MASK << \ + SSS_AEQ_CTRL_0_##member##_SHIFT))) + +#define SSS_AEQ_CTRL_1_SIZE_SHIFT 0 +#define SSS_AEQ_CTRL_1_ELEM_SIZE_SHIFT 24 +#define SSS_AEQ_CTRL_1_PAGE_SIZE_SHIFT 28 + +#define SSS_AEQ_CTRL_1_SIZE_MASK 0x1FFFFFU +#define SSS_AEQ_CTRL_1_ELEM_SIZE_MASK 0x3U +#define SSS_AEQ_CTRL_1_PAGE_SIZE_MASK 0xFU + +#define SSS_SET_AEQ_CTRL_1(val, member) \ + (((val) & SSS_AEQ_CTRL_1_##member##_MASK) << \ + SSS_AEQ_CTRL_1_##member##_SHIFT) + +#define SSS_CLEAR_AEQ_CTRL_1(val, member) \ + ((val) & (~(SSS_AEQ_CTRL_1_##member##_MASK << \ + SSS_AEQ_CTRL_1_##member##_SHIFT))) + +#define SSS_ELEM_SIZE_IN_32B(aeq) (((aeq)->entry_size) >> 5) +#define SSS_SET_EQ_HW_E_SIZE(aeq) ((u32)ilog2(SSS_ELEM_SIZE_IN_32B(aeq))) + +#define SSS_AEQ_WQ_NAME "sss_eqs" + +#define SSS_AEQ_NAME "sss_aeq" + +#define SSS_AEQ_TO_INFO(eq) \ + container_of((eq) - (eq)->qid, struct sss_aeq_info, aeq[0]) + +#define SSS_AEQ_DMA_ATTR_DEF 0 + +enum sss_aeq_cb_state { + SSS_AEQ_HW_CB_REG = 0, + SSS_AEQ_HW_CB_RUNNING, + SSS_AEQ_SW_CB_REG, + SSS_AEQ_SW_CB_RUNNING, +}; + +static u32 aeq_depth = SSS_DEF_AEQ_DEPTH; +module_param(aeq_depth, uint, 0444); +MODULE_PARM_DESC(aeq_depth, + "aeq depth, valid range is " __stringify(SSS_MIN_AEQ_DEPTH) + " - " __stringify(SSS_MAX_AEQ_DEPTH)); + +static void sss_chip_set_aeq_intr(struct sss_eq *aeq) +{ + u32 val; + struct sss_hwif *hwif = SSS_TO_HWDEV(aeq)->hwif; + + val = sss_chip_read_reg(hwif, SSS_CSR_AEQ_CTRL_0_ADDR); + + val = SSS_CLEAR_AEQ_CTRL_0(val, INTR_ID) & + SSS_CLEAR_AEQ_CTRL_0(val, DMA_ATTR) & + SSS_CLEAR_AEQ_CTRL_0(val, PCI_INTF_ID) & + SSS_CLEAR_AEQ_CTRL_0(val, INTR_MODE); + + val |= SSS_SET_AEQ_CTRL_0(SSS_EQ_IRQ_ID(aeq), INTR_ID) | + SSS_SET_AEQ_CTRL_0(SSS_AEQ_DMA_ATTR_DEF, DMA_ATTR) | + SSS_SET_AEQ_CTRL_0(SSS_GET_HWIF_PCI_INTF_ID(hwif), PCI_INTF_ID) | + SSS_SET_AEQ_CTRL_0(SSS_INTR_MODE_ARMED, INTR_MODE); + + sss_chip_write_reg(hwif, SSS_CSR_AEQ_CTRL_0_ADDR, val); +} + +static void sss_chip_set_aeq_size(struct sss_eq *aeq) +{ + u32 val; + struct sss_hwif *hwif = SSS_TO_HWDEV(aeq)->hwif; + + val = SSS_SET_AEQ_CTRL_1(aeq->len, SIZE) | + SSS_SET_AEQ_CTRL_1(SSS_SET_EQ_HW_E_SIZE(aeq), ELEM_SIZE) | + SSS_SET_AEQ_CTRL_1(SSS_SET_EQ_HW_PAGE_SIZE(aeq), PAGE_SIZE); + + sss_chip_write_reg(hwif, SSS_CSR_AEQ_CTRL_1_ADDR, val); +} + +static u32 sss_chip_init_aeq_attr(void *aeq) +{ + sss_chip_set_aeq_intr(aeq); + sss_chip_set_aeq_size(aeq); + + return 0; +} + +static void sss_init_aeqe_desc(void *data) +{ + u32 i; + u32 init_val; + struct sss_aeq_elem *aeqe = NULL; + struct sss_eq *aeq = (struct sss_eq *)data; + + init_val = cpu_to_be32(SSS_EQ_WRAPPED(aeq)); + for (i = 0; i < aeq->len; i++) { + aeqe = SSS_GET_AEQ_ELEM(aeq, i); + aeqe->desc = init_val; + } + + /* write all aeq desc */ + wmb(); +} + +static irqreturn_t sss_aeq_intr_handle(int irq, void *data) +{ + struct sss_eq *aeq = (struct sss_eq *)data; + struct sss_aeq_info *aeq_info = SSS_AEQ_TO_INFO(aeq); + + sss_chip_clear_msix_resend_bit(aeq->hwdev, SSS_EQ_IRQ_ID(aeq), + SSS_EQ_MSIX_RESEND_TIMER_CLEAR); + + queue_work_on(WORK_CPU_UNBOUND, aeq_info->workq, &aeq->aeq_work); + + return IRQ_HANDLED; +} + +static void sss_aeq_event_handle(struct sss_eq *aeq, u32 desc) +{ + u32 size; + u32 event; + u8 data[SSS_AEQE_DATA_SIZE]; + enum sss_aeq_hw_event hw_event; + enum sss_aeq_sw_event sw_event; + struct sss_aeq_info *aeq_info = SSS_AEQ_TO_INFO(aeq); + struct sss_aeq_elem *aeqe; + + aeqe = SSS_GET_CUR_AEQ_ELEM(aeq); + hw_event = SSS_GET_EQE_DESC(desc, TYPE); + SSS_TO_HWDEV(aeq)->aeq_stat.cur_recv_cnt++; + + if (SSS_GET_EQE_DESC(desc, SRC)) { + event = hw_event; + sw_event = SSS_GET_AEQ_SW_EVENT(event); + + memcpy(data, aeqe->aeqe_data, SSS_AEQE_DATA_SIZE); + sss_be32_to_cpu(data, SSS_AEQE_DATA_SIZE); + set_bit(SSS_AEQ_SW_CB_RUNNING, &aeq_info->sw_event_handler_state[sw_event]); + + if (aeq_info->sw_event_handler[sw_event] && + test_bit(SSS_AEQ_SW_CB_REG, &aeq_info->sw_event_handler_state[sw_event])) + aeq_info->sw_event_handler[sw_event](aeq_info->sw_event_data[sw_event], + hw_event, data); + + clear_bit(SSS_AEQ_SW_CB_RUNNING, &aeq_info->sw_event_handler_state[sw_event]); + + return; + } + + if (hw_event < SSS_AEQ_EVENT_MAX) { + memcpy(data, aeqe->aeqe_data, SSS_AEQE_DATA_SIZE); + sss_be32_to_cpu(data, SSS_AEQE_DATA_SIZE); + + size = SSS_GET_EQE_DESC(desc, SIZE); + set_bit(SSS_AEQ_HW_CB_RUNNING, &aeq_info->hw_event_handler_state[hw_event]); + + if (aeq_info->hw_event_handler[hw_event] && + test_bit(SSS_AEQ_HW_CB_REG, &aeq_info->hw_event_handler_state[hw_event])) + aeq_info->hw_event_handler[hw_event](aeq_info->hw_event_data[hw_event], + data, size); + + clear_bit(SSS_AEQ_HW_CB_RUNNING, &aeq_info->hw_event_handler_state[hw_event]); + + return; + } + sdk_warn(SSS_TO_HWDEV(aeq)->dev_hdl, "Unknown aeq event %d\n", hw_event); +} + +static bool sss_aeq_irq_handle(struct sss_eq *aeq) +{ + struct sss_aeq_elem *elem = NULL; + u32 desc; + u32 i; + u32 eqe_cnt = 0; + + for (i = 0; i < SSS_TASK_PROCESS_EQE_LIMIT; i++) { + elem = SSS_GET_CUR_AEQ_ELEM(aeq); + + /* Data in HW is in Big endian Format */ + desc = be32_to_cpu(elem->desc); + + /* HW updates wrap bit, when it adds eq element event */ + if (SSS_GET_EQE_DESC(desc, WRAPPED) == aeq->wrap) + return false; + + dma_rmb(); + + sss_aeq_event_handle(aeq, desc); + + sss_increase_eq_ci(aeq); + + if (++eqe_cnt >= SSS_EQ_UPDATE_CI_STEP) { + eqe_cnt = 0; + sss_chip_set_eq_ci(aeq, SSS_EQ_NOT_ARMED); + } + } + + return true; +} + +static void sss_aeq_irq_work(struct work_struct *work) +{ + bool unfinish; + struct sss_eq *aeq = container_of(work, struct sss_eq, aeq_work); + struct sss_aeq_info *aeq_info = SSS_AEQ_TO_INFO(aeq); + + unfinish = sss_aeq_irq_handle(aeq); + sss_chip_set_eq_ci(aeq, SSS_EQ_ARM_STATE(unfinish)); + + if (unfinish) + queue_work_on(WORK_CPU_UNBOUND, aeq_info->workq, &aeq->aeq_work); +} + +static void sss_init_aeq_para(struct sss_eq *aeq, u16 qid) +{ + aeq->init_desc_handler = sss_init_aeqe_desc; + aeq->init_attr_handler = sss_chip_init_aeq_attr; + aeq->irq_handler = sss_aeq_intr_handle; + aeq->name = SSS_AEQ_NAME; + INIT_WORK(&aeq->aeq_work, sss_aeq_irq_work); + + aeq->qid = qid; + aeq->len = aeq_depth; + aeq->type = SSS_AEQ; + aeq->entry_size = SSS_AEQE_SIZE; +} + +static int sss_init_aeq(struct sss_hwdev *hwdev, + u16 aeq_num, struct sss_irq_desc *irq) +{ + u16 i; + u16 qid; + int ret; + struct sss_aeq_info *aeq_info = NULL; + + aeq_info = kzalloc(sizeof(*aeq_info), GFP_KERNEL); + if (!aeq_info) + return -ENOMEM; + + hwdev->aeq_info = aeq_info; + aeq_info->hwdev = hwdev; + aeq_info->num = aeq_num; + + aeq_info->workq = alloc_workqueue(SSS_AEQ_WQ_NAME, WQ_MEM_RECLAIM, SSS_MAX_AEQ); + if (!aeq_info->workq) { + ret = -ENOMEM; + sdk_err(hwdev->dev_hdl, "Fail to alloc aeq workqueue\n"); + goto alloc_workq_err; + } + + if (aeq_depth < SSS_MIN_AEQ_DEPTH || aeq_depth > SSS_MAX_AEQ_DEPTH) { + sdk_warn(hwdev->dev_hdl, "Invalid aeq_depth value %u, adjust to %d\n", + aeq_depth, SSS_DEF_AEQ_DEPTH); + aeq_depth = SSS_DEF_AEQ_DEPTH; + } + + for (qid = 0; qid < aeq_num; qid++) { + sss_init_aeq_para(&aeq_info->aeq[qid], qid); + ret = sss_init_eq(hwdev, &aeq_info->aeq[qid], &irq[qid]); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init aeq %u\n", qid); + goto init_aeq_err; + } + } + + for (qid = 0; qid < aeq_num; qid++) + sss_chip_set_msix_state(hwdev, irq[qid].msix_id, SSS_MSIX_ENABLE); + + return 0; + +init_aeq_err: + for (i = 0; i < qid; i++) + sss_deinit_eq(&aeq_info->aeq[i]); + + destroy_workqueue(aeq_info->workq); + +alloc_workq_err: + kfree(aeq_info); + hwdev->aeq_info = NULL; + + return ret; +} + +void sss_deinit_aeq(struct sss_hwdev *hwdev) +{ + struct sss_aeq_info *aeq_info = hwdev->aeq_info; + enum sss_aeq_hw_event aeq_event; + enum sss_aeq_sw_event sw_aeq_event; + u16 qid; + + for (qid = 0; qid < aeq_info->num; qid++) + sss_deinit_eq(&aeq_info->aeq[qid]); + + for (sw_aeq_event = SSS_STL_EVENT; + sw_aeq_event < SSS_AEQ_SW_EVENT_MAX; sw_aeq_event++) + sss_aeq_unregister_swe_cb(hwdev, sw_aeq_event); + + for (aeq_event = SSS_HW_FROM_INT; + aeq_event < SSS_AEQ_EVENT_MAX; aeq_event++) + sss_aeq_unregister_hw_cb(hwdev, aeq_event); + + destroy_workqueue(aeq_info->workq); + + kfree(aeq_info); + hwdev->aeq_info = NULL; +} + +void sss_get_aeq_irq(struct sss_hwdev *hwdev, + struct sss_irq_desc *irq_array, u16 *irq_num) +{ + struct sss_aeq_info *aeq_info = hwdev->aeq_info; + u16 qid; + + for (qid = 0; qid < aeq_info->num; qid++) { + irq_array[qid].irq_id = aeq_info->aeq[qid].irq_desc.irq_id; + irq_array[qid].msix_id = + aeq_info->aeq[qid].irq_desc.msix_id; + } + + *irq_num = aeq_info->num; +} + +void sss_dump_aeq_info(struct sss_hwdev *hwdev) +{ + struct sss_aeq_elem *aeqe = NULL; + struct sss_eq *aeq = NULL; + u32 addr; + u32 ci; + u32 pi; + u32 ctrl0; + u32 id; + int qid; + + for (qid = 0; qid < hwdev->aeq_info->num; qid++) { + aeq = &hwdev->aeq_info->aeq[qid]; + /* Indirect access should set qid first */ + sss_chip_write_reg(SSS_TO_HWDEV(aeq)->hwif, + SSS_EQ_INDIR_ID_ADDR(aeq->type), aeq->qid); + wmb(); /* make sure set qid firstly */ + + addr = SSS_CSR_AEQ_CTRL_0_ADDR; + ctrl0 = sss_chip_read_reg(hwdev->hwif, addr); + id = sss_chip_read_reg(hwdev->hwif, SSS_EQ_INDIR_ID_ADDR(aeq->type)); + + addr = SSS_EQ_CI_REG_ADDR(aeq); + ci = sss_chip_read_reg(hwdev->hwif, addr); + addr = SSS_EQ_PI_REG_ADDR(aeq); + pi = sss_chip_read_reg(hwdev->hwif, addr); + aeqe = SSS_GET_CUR_AEQ_ELEM(aeq); + sdk_err(hwdev->dev_hdl, + "Aeq id: %d, id: %u, ctrl0: 0x%08x, ci: 0x%08x, pi: 0x%x, work_state: 0x%x, wrap: %u, desc: 0x%x swci:0x%x\n", + qid, id, ctrl0, ci, pi, work_busy(&aeq->aeq_work), + aeq->wrap, be32_to_cpu(aeqe->desc), aeq->ci); + } + + sss_dump_chip_err_info(hwdev); +} + +int sss_aeq_register_hw_cb(void *hwdev, void *pri_handle, + enum sss_aeq_hw_event event, sss_aeq_hw_event_handler_t event_handler) +{ + struct sss_aeq_info *aeq_info = NULL; + + if (!hwdev || !event_handler || event >= SSS_AEQ_EVENT_MAX) + return -EINVAL; + + aeq_info = SSS_TO_AEQ_INFO(hwdev); + aeq_info->hw_event_handler[event] = event_handler; + aeq_info->hw_event_data[event] = pri_handle; + set_bit(SSS_AEQ_HW_CB_REG, &aeq_info->hw_event_handler_state[event]); + + return 0; +} + +void sss_aeq_unregister_hw_cb(void *hwdev, enum sss_aeq_hw_event event) +{ + struct sss_aeq_info *aeq_info = NULL; + + if (!hwdev || event >= SSS_AEQ_EVENT_MAX) + return; + + aeq_info = SSS_TO_AEQ_INFO(hwdev); + clear_bit(SSS_AEQ_HW_CB_REG, &aeq_info->hw_event_handler_state[event]); + while (test_bit(SSS_AEQ_HW_CB_RUNNING, &aeq_info->hw_event_handler_state[event])) + usleep_range(SSS_EQ_USLEEP_LOW_LIMIT, SSS_EQ_USLEEP_HIG_LIMIT); + aeq_info->hw_event_handler[event] = NULL; +} + +int sss_aeq_register_swe_cb(void *hwdev, void *pri_handle, + enum sss_aeq_sw_event event, + sss_aeq_sw_event_handler_t sw_event_handler) +{ + struct sss_aeq_info *aeq_info = NULL; + + if (!hwdev || !sw_event_handler || event >= SSS_AEQ_SW_EVENT_MAX) + return -EINVAL; + + aeq_info = SSS_TO_AEQ_INFO(hwdev); + aeq_info->sw_event_handler[event] = sw_event_handler; + aeq_info->sw_event_data[event] = pri_handle; + set_bit(SSS_AEQ_SW_CB_REG, &aeq_info->sw_event_handler_state[event]); + + return 0; +} + +void sss_aeq_unregister_swe_cb(void *hwdev, enum sss_aeq_sw_event event) +{ + struct sss_aeq_info *aeq_info = NULL; + + if (!hwdev || event >= SSS_AEQ_SW_EVENT_MAX) + return; + + aeq_info = SSS_TO_AEQ_INFO(hwdev); + clear_bit(SSS_AEQ_SW_CB_REG, &aeq_info->sw_event_handler_state[event]); + while (test_bit(SSS_AEQ_SW_CB_RUNNING, + &aeq_info->sw_event_handler_state[event])) + usleep_range(SSS_EQ_USLEEP_LOW_LIMIT, SSS_EQ_USLEEP_HIG_LIMIT); + aeq_info->sw_event_handler[event] = NULL; +} + +int sss_hwif_init_aeq(struct sss_hwdev *hwdev) +{ + u16 i; + u16 aeq_num; + u16 act_num = 0; + int ret; + struct sss_irq_desc irq_array[SSS_MAX_AEQ] = {0}; + + aeq_num = SSS_GET_HWIF_AEQ_NUM(hwdev->hwif); + if (aeq_num > SSS_MAX_AEQ) { + sdk_warn(hwdev->dev_hdl, "Adjust aeq_num to %d\n", SSS_MAX_AEQ); + aeq_num = SSS_MAX_AEQ; + } + + act_num = sss_alloc_irq(hwdev, SSS_SERVICE_TYPE_INTF, irq_array, aeq_num); + if (act_num == 0) { + sdk_err(hwdev->dev_hdl, "Fail to alloc irq, aeq_num: %u\n", aeq_num); + return -ENOMEM; + } + + if (act_num < aeq_num) { + sdk_warn(hwdev->dev_hdl, "Adjust aeq_num to %u\n", act_num); + aeq_num = act_num; + } + + ret = sss_init_aeq(hwdev, aeq_num, irq_array); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init aeq\n"); + goto init_aeqs_err; + } + + return 0; + +init_aeqs_err: + for (i = 0; i < aeq_num; i++) + sss_free_irq(hwdev, SSS_SERVICE_TYPE_INTF, irq_array[i].irq_id); + + return ret; +} + +void sss_hwif_deinit_aeq(struct sss_hwdev *hwdev) +{ + u16 i; + u16 irq_num; + struct sss_irq_desc irq_array[SSS_MAX_AEQ] = {0}; + + sss_get_aeq_irq(hwdev, irq_array, &irq_num); + + sss_deinit_aeq(hwdev); + + for (i = 0; i < irq_num; i++) + sss_free_irq(hwdev, SSS_SERVICE_TYPE_INTF, irq_array[i].irq_id); +} + +int sss_init_aeq_msix_attr(struct sss_hwdev *hwdev) +{ + int i; + int ret; + struct sss_aeq_info *aeq_info = hwdev->aeq_info; + struct sss_irq_cfg intr_info = {0}; + + sss_init_eq_intr_info(&intr_info); + + for (i = aeq_info->num - 1; i >= 0; i--) { + intr_info.msix_id = SSS_EQ_IRQ_ID(&aeq_info->aeq[i]); + ret = sss_chip_set_eq_msix_attr(hwdev, &intr_info, SSS_CHANNEL_COMM); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to set msix attr for aeq %d\n", i); + return -EFAULT; + } + } + + return 0; +} + +u8 sss_sw_aeqe_handler(void *dev, u8 aeq_event, u8 *data) +{ + struct sss_hwdev *hwdev = (struct sss_hwdev *)dev; + + if (!hwdev) + return 0; + + sdk_err(hwdev->dev_hdl, "Received ucode aeq event, type: 0x%x, data: 0x%llx\n", + aeq_event, *((u64 *)data)); + + if (aeq_event < SSS_ERR_MAX) + atomic_inc(&hwdev->hw_stats.nic_ucode_event_stats[aeq_event]); + + return 0; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_aeq.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_aeq.h new file mode 100644 index 0000000000000000000000000000000000000000..105c8e985723193f8f1b2a39892620f1019402e7 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_aeq.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_AEQ_H +#define SSS_HWIF_AEQ_H + +#include "sss_hw_irq.h" +#include "sss_hw_aeq.h" +#include "sss_hwdev.h" +#include "sss_aeq_info.h" + +void sss_deinit_aeq(struct sss_hwdev *hwdev); +void sss_get_aeq_irq(struct sss_hwdev *hwdev, + struct sss_irq_desc *irq_array, u16 *irq_num); +void sss_dump_aeq_info(struct sss_hwdev *hwdev); +int sss_aeq_register_hw_cb(void *hwdev, void *pri_handle, + enum sss_aeq_hw_event event, sss_aeq_hw_event_handler_t event_handler); +void sss_aeq_unregister_hw_cb(void *hwdev, enum sss_aeq_hw_event event); +int sss_aeq_register_swe_cb(void *hwdev, void *pri_handle, + enum sss_aeq_sw_event event, + sss_aeq_sw_event_handler_t sw_event_handler); +void sss_aeq_unregister_swe_cb(void *hwdev, enum sss_aeq_sw_event event); +int sss_hwif_init_aeq(struct sss_hwdev *hwdev); +void sss_hwif_deinit_aeq(struct sss_hwdev *hwdev); +int sss_init_aeq_msix_attr(struct sss_hwdev *hwdev); +u8 sss_sw_aeqe_handler(void *dev, u8 aeq_event, u8 *data); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_api.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_api.c new file mode 100644 index 0000000000000000000000000000000000000000..1c7c907dea3136f25e8c5d33d90afafa88d4061c --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_api.c @@ -0,0 +1,293 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_csr.h" +#include "sss_hwdev.h" +#include "sss_hwif_api.h" +#include "sss_hwif_export.h" + +#define SSS_GET_REG_FLAG(reg) ((reg) & (~(SSS_CSR_FLAG_MASK))) +#define SSS_GET_REG_ADDR(reg) ((reg) & (SSS_CSR_FLAG_MASK)) + +#define SSS_PAGE_SIZE_HW(pg_size) ((u8)ilog2((u32)((pg_size) >> 12))) + +#define SSS_CLEAR_SLAVE_HOST_STATUS(host_id, val) ((val) & (~(1U << (host_id)))) +#define SSS_SET_SLAVE_HOST_STATUS(host_id, enable) (((u8)(enable) & 1U) << (host_id)) + +#define SSS_MULT_HOST_SLAVE_STATUS_ADDR (SSS_MGMT_FLAG + 0xDF30) + +u32 sss_chip_read_reg(struct sss_hwif *hwif, u32 reg) +{ + if (SSS_GET_REG_FLAG(reg) == SSS_MGMT_FLAG) + return be32_to_cpu(readl(hwif->mgmt_reg_base + + SSS_GET_REG_ADDR(reg))); + else + return be32_to_cpu(readl(hwif->cfg_reg_base + + SSS_GET_REG_ADDR(reg))); +} + +void sss_chip_write_reg(struct sss_hwif *hwif, u32 reg, u32 val) +{ + if (SSS_GET_REG_FLAG(reg) == SSS_MGMT_FLAG) + writel(cpu_to_be32(val), + hwif->mgmt_reg_base + SSS_GET_REG_ADDR(reg)); + else + writel(cpu_to_be32(val), + hwif->cfg_reg_base + SSS_GET_REG_ADDR(reg)); +} + +bool sss_chip_get_present_state(void *hwdev) +{ + u32 val; + + val = sss_chip_read_reg(SSS_TO_HWIF(hwdev), SSS_CSR_HW_ATTR1_ADDR); + if (val == SSS_PCIE_LINK_DOWN) { + sdk_warn(SSS_TO_DEV(hwdev), "Card is not present\n"); + return false; + } + + return true; +} + +u32 sss_chip_get_pcie_link_status(void *hwdev) +{ + u32 val; + + if (!hwdev) + return SSS_PCIE_LINK_DOWN; + + val = sss_chip_read_reg(SSS_TO_HWIF(hwdev), SSS_CSR_HW_ATTR1_ADDR); + if (val == SSS_PCIE_LINK_DOWN) + return val; + + return !SSS_GET_AF1(val, MGMT_INIT_STATUS); +} + +void sss_chip_set_pf_status(struct sss_hwif *hwif, + enum sss_pf_status status) +{ + u32 val; + + if (SSS_GET_HWIF_FUNC_TYPE(hwif) == SSS_FUNC_TYPE_VF) + return; + + val = sss_chip_read_reg(hwif, SSS_CSR_HW_ATTR6_ADDR); + val = SSS_CLEAR_AF6(val, PF_STATUS); + val |= SSS_SET_AF6(status, PF_STATUS); + + sss_chip_write_reg(hwif, SSS_CSR_HW_ATTR6_ADDR, val); +} + +enum sss_pf_status sss_chip_get_pf_status(struct sss_hwif *hwif) +{ + u32 val = sss_chip_read_reg(hwif, SSS_CSR_HW_ATTR6_ADDR); + + return SSS_GET_AF6(val, PF_STATUS); +} + +void sss_chip_enable_doorbell(struct sss_hwif *hwif) +{ + u32 addr; + u32 val; + + addr = SSS_CSR_HW_ATTR4_ADDR; + val = sss_chip_read_reg(hwif, addr); + + val = SSS_CLEAR_AF4(val, DOORBELL_CTRL); + val |= SSS_SET_AF4(DB_ENABLE, DOORBELL_CTRL); + + sss_chip_write_reg(hwif, addr, val); +} + +void sss_chip_disable_doorbell(struct sss_hwif *hwif) +{ + u32 addr; + u32 val; + + addr = SSS_CSR_HW_ATTR4_ADDR; + val = sss_chip_read_reg(hwif, addr); + + val = SSS_CLEAR_AF4(val, DOORBELL_CTRL); + val |= SSS_SET_AF4(DB_DISABLE, DOORBELL_CTRL); + + sss_chip_write_reg(hwif, addr, val); +} + +void sss_free_db_id(struct sss_hwif *hwif, u32 id) +{ + struct sss_db_pool *pool = &hwif->db_pool; + + if (id >= pool->bit_size) + return; + + spin_lock(&pool->id_lock); + clear_bit((int)id, pool->bitmap); + spin_unlock(&pool->id_lock); +} + +int sss_alloc_db_id(struct sss_hwif *hwif, u32 *id) +{ + struct sss_db_pool *pool = &hwif->db_pool; + u32 pg_id; + + spin_lock(&pool->id_lock); + pg_id = (u32)find_first_zero_bit(pool->bitmap, pool->bit_size); + if (pg_id == pool->bit_size) { + spin_unlock(&pool->id_lock); + return -ENOMEM; + } + set_bit(pg_id, pool->bitmap); + spin_unlock(&pool->id_lock); + + *id = pg_id; + + return 0; +} + +void sss_dump_chip_err_info(struct sss_hwdev *hwdev) +{ + u32 value; + + if (sss_get_func_type(hwdev) == SSS_FUNC_TYPE_VF) + return; + + value = sss_chip_read_reg(hwdev->hwif, SSS_CHIP_BASE_INFO_ADDR); + sdk_warn(hwdev->dev_hdl, "Chip base info: 0x%08x\n", value); + + value = sss_chip_read_reg(hwdev->hwif, SSS_MGMT_HEALTH_STATUS_ADDR); + sdk_warn(hwdev->dev_hdl, "Mgmt CPU health status: 0x%08x\n", value); + + value = sss_chip_read_reg(hwdev->hwif, SSS_CHIP_ERR_STATUS0_ADDR); + sdk_warn(hwdev->dev_hdl, "Chip fatal error status0: 0x%08x\n", value); + value = sss_chip_read_reg(hwdev->hwif, SSS_CHIP_ERR_STATUS1_ADDR); + sdk_warn(hwdev->dev_hdl, "Chip fatal error status1: 0x%08x\n", value); + + value = sss_chip_read_reg(hwdev->hwif, SSS_ERR_INFO0_ADDR); + sdk_warn(hwdev->dev_hdl, "Chip exception info0: 0x%08x\n", value); + value = sss_chip_read_reg(hwdev->hwif, SSS_ERR_INFO1_ADDR); + sdk_warn(hwdev->dev_hdl, "Chip exception info1: 0x%08x\n", value); + value = sss_chip_read_reg(hwdev->hwif, SSS_ERR_INFO2_ADDR); + sdk_warn(hwdev->dev_hdl, "Chip exception info2: 0x%08x\n", value); +} + +u8 sss_chip_get_host_ppf_id(struct sss_hwdev *hwdev, u8 host_id) +{ + u32 addr; + u32 val; + + if (!hwdev) + return 0; + + addr = SSS_CSR_FUNC_PPF_ELECT(host_id); + val = sss_chip_read_reg(hwdev->hwif, addr); + + return SSS_GET_PPF_ELECT_PORT(val, ID); +} + +static void sss_init_eq_msix_cfg(void *hwdev, + struct sss_cmd_msix_config *cmd_msix, + struct sss_irq_cfg *info) +{ + cmd_msix->opcode = SSS_MGMT_MSG_SET_CMD; + cmd_msix->func_id = sss_get_global_func_id(hwdev); + cmd_msix->msix_index = (u16)info->msix_id; + cmd_msix->lli_credit_cnt = info->lli_credit; + cmd_msix->lli_timer_cnt = info->lli_timer; + cmd_msix->pending_cnt = info->pending; + cmd_msix->coalesce_timer_cnt = info->coalesc_timer; + cmd_msix->resend_timer_cnt = info->resend_timer; +} + +int sss_chip_set_eq_msix_attr(void *hwdev, + struct sss_irq_cfg *intr_info, u16 ch) +{ + int ret; + struct sss_cmd_msix_config cmd_msix = {0}; + u16 out_len = sizeof(cmd_msix); + + sss_init_eq_msix_cfg(hwdev, &cmd_msix, intr_info); + + ret = sss_sync_send_msg_ch(hwdev, SSS_COMM_MGMT_CMD_CFG_MSIX_CTRL_REG, + &cmd_msix, sizeof(cmd_msix), &cmd_msix, &out_len, ch); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_msix)) { + sdk_err(SSS_TO_DEV(hwdev), + "Fail to set eq msix cfg, ret: %d, status: 0x%x, out_len: 0x%x, ch: 0x%x\n", + ret, cmd_msix.head.state, out_len, ch); + return -EINVAL; + } + + return 0; +} + +int sss_chip_set_wq_page_size(void *hwdev, u16 func_id, u32 page_size) +{ + int ret; + struct sss_cmd_wq_page_size cmd_page = {0}; + u16 out_len = sizeof(cmd_page); + + cmd_page.opcode = SSS_MGMT_MSG_SET_CMD; + cmd_page.func_id = func_id; + cmd_page.page_size = SSS_PAGE_SIZE_HW(page_size); + + ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_CFG_PAGESIZE, + &cmd_page, sizeof(cmd_page), &cmd_page, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_page)) { + sdk_err(SSS_TO_DEV(hwdev), + "Fail to set wq page size, ret: %d, status: 0x%x, out_len: 0x%0x\n", + ret, cmd_page.head.state, out_len); + return -EFAULT; + } + + return 0; +} + +int sss_chip_set_ceq_attr(struct sss_hwdev *hwdev, u16 qid, + u32 attr0, u32 attr1) +{ + int ret; + struct sss_cmd_ceq_ctrl_reg cmd_ceq = {0}; + u16 out_len = sizeof(cmd_ceq); + + cmd_ceq.func_id = sss_get_global_func_id(hwdev); + cmd_ceq.qid = qid; + cmd_ceq.ctrl0 = attr0; + cmd_ceq.ctrl1 = attr1; + + ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_SET_CEQ_CTRL_REG, + &cmd_ceq, sizeof(cmd_ceq), &cmd_ceq, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_ceq)) { + sdk_err(hwdev->dev_hdl, + "Fail to set ceq %u ctrl, ret: %d status: 0x%x, out_len: 0x%x\n", + qid, ret, cmd_ceq.head.state, out_len); + return -EFAULT; + } + + return 0; +} + +void sss_chip_set_slave_host_status(void *dev, u8 host_id, bool enable) +{ + u32 val; + struct sss_hwdev *hwdev = dev; + + if (SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_PPF) + return; + + val = sss_chip_read_reg(hwdev->hwif, SSS_MULT_HOST_SLAVE_STATUS_ADDR); + val = SSS_CLEAR_SLAVE_HOST_STATUS(host_id, val); + val |= SSS_SET_SLAVE_HOST_STATUS(host_id, !!enable); + + sss_chip_write_reg(hwdev->hwif, SSS_MULT_HOST_SLAVE_STATUS_ADDR, val); + + sdk_info(hwdev->dev_hdl, "Set slave host %d status %d, reg value: 0x%x\n", + host_id, enable, val); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_api.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_api.h new file mode 100644 index 0000000000000000000000000000000000000000..f299bf0fa6d90611a08433c7cb05109baf3d13c8 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_api.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_API_H +#define SSS_HWIF_API_H + +#include "sss_hwdev.h" + +enum sss_pf_status { + SSS_PF_STATUS_INIT = 0X0, + SSS_PF_STATUS_ACTIVE_FLAG = 0x11, + SSS_PF_STATUS_FLR_START_FLAG = 0x12, + SSS_PF_STATUS_FLR_FINISH_FLAG = 0x13, +}; + +enum sss_doorbell_ctrl { + DB_ENABLE, + DB_DISABLE, +}; + +enum sss_outbound_ctrl { + OUTBOUND_ENABLE, + OUTBOUND_DISABLE, +}; + +#define SSS_PCIE_LINK_DOWN 0xFFFFFFFF +#define SSS_PCIE_LINK_UP 0 + +#define SSS_AF1_PPF_ID_SHIFT 0 +#define SSS_AF1_AEQ_PER_FUNC_SHIFT 8 +#define SSS_AF1_MGMT_INIT_STATUS_SHIFT 30 +#define SSS_AF1_PF_INIT_STATUS_SHIFT 31 + +#define SSS_AF1_PPF_ID_MASK 0x3F +#define SSS_AF1_AEQ_PER_FUNC_MASK 0x3 +#define SSS_AF1_MGMT_INIT_STATUS_MASK 0x1 +#define SSS_AF1_PF_INIT_STATUS_MASK 0x1 + +#define SSS_GET_AF1(val, member) \ + (((val) >> SSS_AF1_##member##_SHIFT) & SSS_AF1_##member##_MASK) + +#define SSS_AF4_DOORBELL_CTRL_SHIFT 0 +#define SSS_AF4_DOORBELL_CTRL_MASK 0x1 + +#define SSS_GET_AF4(val, member) \ + (((val) >> SSS_AF4_##member##_SHIFT) & SSS_AF4_##member##_MASK) + +#define SSS_SET_AF4(val, member) \ + (((val) & SSS_AF4_##member##_MASK) << SSS_AF4_##member##_SHIFT) + +#define SSS_CLEAR_AF4(val, member) \ + ((val) & (~(SSS_AF4_##member##_MASK << SSS_AF4_##member##_SHIFT))) + +#define SSS_AF6_PF_STATUS_SHIFT 0 +#define SSS_AF6_PF_STATUS_MASK 0xFFFF + +#define SSS_AF6_FUNC_MAX_SQ_SHIFT 23 +#define SSS_AF6_FUNC_MAX_SQ_MASK 0x1FF + +#define SSS_AF6_MSIX_FLEX_EN_SHIFT 22 +#define SSS_AF6_MSIX_FLEX_EN_MASK 0x1 + +#define SSS_SET_AF6(val, member) \ + ((((u32)(val)) & SSS_AF6_##member##_MASK) << \ + SSS_AF6_##member##_SHIFT) + +#define SSS_GET_AF6(val, member) \ + (((u32)(val) >> SSS_AF6_##member##_SHIFT) & SSS_AF6_##member##_MASK) + +#define SSS_CLEAR_AF6(val, member) \ + ((u32)(val) & (~(SSS_AF6_##member##_MASK << \ + SSS_AF6_##member##_SHIFT))) + +#define SSS_PPF_ELECT_PORT_ID_SHIFT 0 + +#define SSS_PPF_ELECT_PORT_ID_MASK 0x3F + +#define SSS_GET_PPF_ELECT_PORT(val, member) \ + (((val) >> SSS_PPF_ELECT_PORT_##member##_SHIFT) & \ + SSS_PPF_ELECT_PORT_##member##_MASK) + +#define SSS_PPF_ELECTION_ID_SHIFT 0 + +#define SSS_PPF_ELECTION_ID_MASK 0x3F + +#define SSS_SET_PPF(val, member) \ + (((val) & SSS_PPF_ELECTION_##member##_MASK) << \ + SSS_PPF_ELECTION_##member##_SHIFT) + +#define SSS_GET_PPF(val, member) \ + (((val) >> SSS_PPF_ELECTION_##member##_SHIFT) & \ + SSS_PPF_ELECTION_##member##_MASK) + +#define SSS_CLEAR_PPF(val, member) \ + ((val) & (~(SSS_PPF_ELECTION_##member##_MASK << \ + SSS_PPF_ELECTION_##member##_SHIFT))) + +#define SSS_DB_DWQE_SIZE 0x00400000 + +/* db/dwqe page size: 4K */ +#define SSS_DB_PAGE_SIZE 0x00001000ULL +#define SSS_DWQE_OFFSET 0x00000800ULL + +#define SSS_DB_MAX_AREAS (SSS_DB_DWQE_SIZE / SSS_DB_PAGE_SIZE) + +#define SSS_DB_ID(db, db_base) \ + ((u32)(((ulong)(db) - (ulong)(db_base)) / SSS_DB_PAGE_SIZE)) + +u32 sss_chip_read_reg(struct sss_hwif *hwif, u32 reg); +void sss_chip_write_reg(struct sss_hwif *hwif, u32 reg, u32 val); +bool sss_chip_get_present_state(void *hwdev); +u32 sss_chip_get_pcie_link_status(void *hwdev); +void sss_chip_set_pf_status(struct sss_hwif *hwif, enum sss_pf_status status); +enum sss_pf_status sss_chip_get_pf_status(struct sss_hwif *hwif); +void sss_chip_enable_doorbell(struct sss_hwif *hwif); +void sss_chip_disable_doorbell(struct sss_hwif *hwif); +int sss_alloc_db_id(struct sss_hwif *hwif, u32 *id); +void sss_free_db_id(struct sss_hwif *hwif, u32 id); +void sss_dump_chip_err_info(struct sss_hwdev *hwdev); +u8 sss_chip_get_host_ppf_id(struct sss_hwdev *hwdev, u8 host_id); +int sss_chip_set_eq_msix_attr(void *hwdev, struct sss_irq_cfg *info, u16 channel); +int sss_chip_set_wq_page_size(void *hwdev, u16 func_id, u32 page_size); +int sss_chip_set_ceq_attr(struct sss_hwdev *hwdev, u16 qid, + u32 attr0, u32 attr1); +void sss_chip_set_slave_host_status(void *hwdev, u8 host_id, bool enable); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ceq.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ceq.c new file mode 100644 index 0000000000000000000000000000000000000000..ffc3d4bdb429250a404e00e7d280c93b3f34ec79 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ceq.c @@ -0,0 +1,441 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hwdev.h" +#include "sss_eq_info.h" +#include "sss_hw_svc_cap.h" +#include "sss_hw_irq.h" +#include "sss_hw_ceq.h" +#include "sss_hw_export.h" +#include "sss_hwif_ceq.h" +#include "sss_hw_common.h" +#include "sss_hwif_eq.h" +#include "sss_hwif_api.h" +#include "sss_hwif_export.h" + +#define SSS_DEF_CEQ_DEPTH 8192 + +#define SSS_CEQ_NAME "sss_ceq" + +#define SSS_CEQ_CTRL_0_INTR_ID_SHIFT 0 +#define SSS_CEQ_CTRL_0_DMA_ATTR_SHIFT 12 +#define SSS_CEQ_CTRL_0_LIMIT_KICK_SHIFT 20 +#define SSS_CEQ_CTRL_0_PCI_INTF_ID_SHIFT 24 +#define SSS_CEQ_CTRL_0_PAGE_SIZE_SHIFT 27 +#define SSS_CEQ_CTRL_0_INTR_MODE_SHIFT 31 + +#define SSS_CEQ_CTRL_0_INTR_ID_MASK 0x3FFU +#define SSS_CEQ_CTRL_0_DMA_ATTR_MASK 0x3FU +#define SSS_CEQ_CTRL_0_LIMIT_KICK_MASK 0xFU +#define SSS_CEQ_CTRL_0_PCI_INTF_ID_MASK 0x3U +#define SSS_CEQ_CTRL_0_PAGE_SIZE_MASK 0xF +#define SSS_CEQ_CTRL_0_INTR_MODE_MASK 0x1U + +#define SSS_SET_CEQ_CTRL_0(val, member) \ + (((val) & SSS_CEQ_CTRL_0_##member##_MASK) << \ + SSS_CEQ_CTRL_0_##member##_SHIFT) + +#define SSS_CEQ_CTRL_1_LEN_SHIFT 0 +#define SSS_CEQ_CTRL_1_GLB_FUNC_ID_SHIFT 20 + +#define SSS_CEQ_CTRL_1_LEN_MASK 0xFFFFFU +#define SSS_CEQ_CTRL_1_GLB_FUNC_ID_MASK 0xFFFU + +#define SSS_SET_CEQ_CTRL_1(val, member) \ + (((val) & SSS_CEQ_CTRL_1_##member##_MASK) << \ + SSS_CEQ_CTRL_1_##member##_SHIFT) + +#define SSS_CEQ_DMA_ATTR_DEF 0 + +#define SSS_MIN_CEQ_DEPTH 64 +#define SSS_MAX_CEQ_DEPTH \ + ((SSS_MAX_EQ_PAGE_SIZE / SSS_CEQE_SIZE) * SSS_CEQ_MAX_PAGE) + +#define SSS_GET_CEQ_ELEM(ceq, id) ((u32 *)SSS_GET_EQ_ELEM((ceq), (id))) + +#define SSS_GET_CUR_CEQ_ELEM(ceq) SSS_GET_CEQ_ELEM((ceq), (ceq)->ci) + +#define SSS_CEQE_TYPE_SHIFT 23 +#define SSS_CEQE_TYPE_MASK 0x7 + +#define SSS_CEQE_TYPE(type) \ + (((type) >> SSS_CEQE_TYPE_SHIFT) & SSS_CEQE_TYPE_MASK) + +#define SSS_CEQE_DATA_MASK 0x3FFFFFF +#define SSS_CEQE_DATA(data) ((data) & SSS_CEQE_DATA_MASK) + +#define SSS_CEQ_TO_INFO(eq) \ + container_of((eq) - (eq)->qid, struct sss_ceq_info, ceq[0]) + +#define CEQ_LMT_KICK_DEF 0 + +enum sss_ceq_cb_state { + SSS_CEQ_CB_REG = 0, + SSS_CEQ_CB_RUNNING, +}; + +static u32 ceq_depth = SSS_DEF_CEQ_DEPTH; +module_param(ceq_depth, uint, 0444); +MODULE_PARM_DESC(ceq_depth, + "ceq depth, valid range is " __stringify(SSS_MIN_CEQ_DEPTH) + " - " __stringify(SSS_MAX_CEQ_DEPTH)); + +static u32 tasklet_depth = SSS_TASK_PROCESS_EQE_LIMIT; +module_param(tasklet_depth, uint, 0444); +MODULE_PARM_DESC(tasklet_depth, + "The max number of ceqe can be processed in tasklet, default = 1024"); + +void sss_init_ceqe_desc(void *data) +{ + u32 i; + u32 init_val; + u32 *ceqe = NULL; + struct sss_eq *ceq = (struct sss_eq *)data; + + init_val = cpu_to_be32(SSS_EQ_WRAPPED(ceq)); + for (i = 0; i < ceq->len; i++) { + ceqe = SSS_GET_CEQ_ELEM(ceq, i); + *(ceqe) = init_val; + } + + /* write all ceq desc */ + wmb(); +} + +static u32 sss_chip_init_ceq_attr(void *data) +{ + u32 val; + u32 len; + struct sss_eq *ceq = (struct sss_eq *)data; + struct sss_hwif *hwif = SSS_TO_HWDEV(ceq)->hwif; + + val = SSS_SET_CEQ_CTRL_0(SSS_EQ_IRQ_ID(ceq), INTR_ID) | + SSS_SET_CEQ_CTRL_0(SSS_CEQ_DMA_ATTR_DEF, DMA_ATTR) | + SSS_SET_CEQ_CTRL_0(CEQ_LMT_KICK_DEF, LIMIT_KICK) | + SSS_SET_CEQ_CTRL_0(SSS_GET_HWIF_PCI_INTF_ID(hwif), PCI_INTF_ID) | + SSS_SET_CEQ_CTRL_0(SSS_SET_EQ_HW_PAGE_SIZE(ceq), PAGE_SIZE) | + SSS_SET_CEQ_CTRL_0(SSS_INTR_MODE_ARMED, INTR_MODE); + len = SSS_SET_CEQ_CTRL_1(ceq->len, LEN); + + return sss_chip_set_ceq_attr(SSS_TO_HWDEV(ceq), ceq->qid, val, len); +} + +irqreturn_t sss_ceq_intr_handle(int irq, void *data) +{ + struct sss_eq *ceq = (struct sss_eq *)data; + + ceq->hw_intr_jiffies = jiffies; + + sss_chip_clear_msix_resend_bit(ceq->hwdev, SSS_EQ_IRQ_ID(ceq), + SSS_EQ_MSIX_RESEND_TIMER_CLEAR); + + tasklet_schedule(&ceq->ceq_tasklet); + + return IRQ_HANDLED; +} + +static void sss_ceqe_handler(struct sss_eq *ceq, u32 ceqe) +{ + u32 ceqe_data = SSS_CEQE_DATA(ceqe); + enum sss_ceq_event ceq_event = SSS_CEQE_TYPE(ceqe); + struct sss_ceq_info *ceq_info = SSS_CEQ_TO_INFO(ceq); + + if (ceq_event >= SSS_CEQ_EVENT_MAX) { + sdk_err(SSS_TO_HWDEV(ceq)->dev_hdl, "Unknown ceq_event:%d, ceqe_data: 0x%x\n", + ceq_event, ceqe_data); + return; + } + + set_bit(SSS_CEQ_CB_RUNNING, &ceq_info->event_handler_state[ceq_event]); + + if (ceq_info->event_handler[ceq_event] && + test_bit(SSS_CEQ_CB_REG, &ceq_info->event_handler_state[ceq_event])) + ceq_info->event_handler[ceq_event](ceq_info->event_handler_data[ceq_event], + ceqe_data); + + clear_bit(SSS_CEQ_CB_RUNNING, &ceq_info->event_handler_state[ceq_event]); +} + +static bool sss_ceq_irq_handle(struct sss_eq *ceq) +{ + u32 elem; + u32 eqe_cnt = 0; + u32 i; + + for (i = 0; i < tasklet_depth; i++) { + elem = *(SSS_GET_CUR_CEQ_ELEM(ceq)); + elem = be32_to_cpu(elem); + + /* HW updates wrap bit, when it adds eq element event */ + if (SSS_GET_EQE_DESC(elem, WRAPPED) == ceq->wrap) + return false; + + sss_ceqe_handler(ceq, elem); + + sss_increase_eq_ci(ceq); + + if (++eqe_cnt >= SSS_EQ_UPDATE_CI_STEP) { + eqe_cnt = 0; + sss_chip_set_eq_ci(ceq, SSS_EQ_NOT_ARMED); + } + } + + return true; +} + +static void sss_ceq_tasklet(ulong ceq_data) +{ + bool unfinish; + struct sss_eq *ceq = (struct sss_eq *)ceq_data; + + ceq->sw_intr_jiffies = jiffies; + unfinish = sss_ceq_irq_handle(ceq); + sss_chip_set_eq_ci(ceq, SSS_EQ_ARM_STATE(unfinish)); + + if (unfinish) + tasklet_schedule(&ceq->ceq_tasklet); +} + +static void sss_init_ceq_para(struct sss_eq *ceq, u16 qid) +{ + ceq->init_desc_handler = sss_init_ceqe_desc; + ceq->init_attr_handler = sss_chip_init_ceq_attr; + ceq->irq_handler = sss_ceq_intr_handle; + ceq->name = SSS_CEQ_NAME; + tasklet_init(&ceq->ceq_tasklet, sss_ceq_tasklet, (ulong)ceq); + + ceq->qid = qid; + ceq->len = ceq_depth; + ceq->type = SSS_CEQ; + ceq->entry_size = SSS_CEQE_SIZE; +} + +static int sss_init_ceq(struct sss_hwdev *hwdev, + struct sss_irq_desc *irq_array, u16 irq_num) +{ + u16 i; + u16 qid; + int ret; + struct sss_ceq_info *ceq_info = NULL; + + ceq_info = kzalloc(sizeof(*ceq_info), GFP_KERNEL); + if (!ceq_info) + return -ENOMEM; + + ceq_info->hwdev = hwdev; + ceq_info->num = irq_num; + hwdev->ceq_info = ceq_info; + + if (tasklet_depth == 0) { + sdk_warn(hwdev->dev_hdl, + "Invalid tasklet_depth can not be zero, adjust to %d\n", + SSS_TASK_PROCESS_EQE_LIMIT); + tasklet_depth = SSS_TASK_PROCESS_EQE_LIMIT; + } + + if (ceq_depth < SSS_MIN_CEQ_DEPTH || ceq_depth > SSS_MAX_CEQ_DEPTH) { + sdk_warn(hwdev->dev_hdl, + "Invalid ceq_depth %u out of range, adjust to %d\n", + ceq_depth, SSS_DEF_CEQ_DEPTH); + ceq_depth = SSS_DEF_CEQ_DEPTH; + } + + for (qid = 0; qid < irq_num; qid++) { + sss_init_ceq_para(&ceq_info->ceq[qid], qid); + ret = sss_init_eq(hwdev, &ceq_info->ceq[qid], &irq_array[qid]); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init ceq %u\n", qid); + goto init_ceq_err; + } + } + + for (qid = 0; qid < irq_num; qid++) + sss_chip_set_msix_state(hwdev, irq_array[qid].msix_id, SSS_MSIX_ENABLE); + + return 0; + +init_ceq_err: + for (i = 0; i < qid; i++) + sss_deinit_eq(&ceq_info->ceq[i]); + + kfree(ceq_info); + hwdev->ceq_info = NULL; + + return ret; +} + +static void sss_get_ceq_irq(struct sss_hwdev *hwdev, struct sss_irq_desc *irq, + u16 *irq_num) +{ + u16 i; + struct sss_ceq_info *ceq_info = hwdev->ceq_info; + + for (i = 0; i < ceq_info->num; i++) { + irq[i].msix_id = ceq_info->ceq[i].irq_desc.msix_id; + irq[i].irq_id = ceq_info->ceq[i].irq_desc.irq_id; + } + + *irq_num = ceq_info->num; +} + +int sss_hwif_init_ceq(struct sss_hwdev *hwdev) +{ + u16 i; + u16 ceq_num; + u16 act_num = 0; + int ret; + struct sss_irq_desc irq_desc[SSS_MAX_CEQ] = {0}; + + ceq_num = SSS_GET_HWIF_CEQ_NUM(hwdev->hwif); + if (ceq_num > SSS_MAX_CEQ) { + sdk_warn(hwdev->dev_hdl, "Adjust ceq num to %d\n", SSS_MAX_CEQ); + ceq_num = SSS_MAX_CEQ; + } + + act_num = sss_alloc_irq(hwdev, SSS_SERVICE_TYPE_INTF, irq_desc, ceq_num); + if (act_num == 0) { + sdk_err(hwdev->dev_hdl, "Fail to alloc irq, ceq_num: %u\n", ceq_num); + return -EINVAL; + } + + if (act_num < ceq_num) { + sdk_warn(hwdev->dev_hdl, "Adjust ceq num to %u\n", act_num); + ceq_num = act_num; + } + + ret = sss_init_ceq(hwdev, irq_desc, ceq_num); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init ceq, ret:%d\n", ret); + goto init_ceq_err; + } + + return 0; + +init_ceq_err: + for (i = 0; i < act_num; i++) + sss_free_irq(hwdev, SSS_SERVICE_TYPE_INTF, irq_desc[i].irq_id); + + return ret; +} + +static void sss_deinit_ceq(struct sss_hwdev *hwdev) +{ + u16 i; + struct sss_ceq_info *ceq_info = hwdev->ceq_info; + enum sss_ceq_event event; + + for (i = 0; i < ceq_info->num; i++) + sss_deinit_eq(&ceq_info->ceq[i]); + + for (event = SSS_NIC_CTRLQ; event < SSS_CEQ_EVENT_MAX; event++) + sss_ceq_unregister_cb(hwdev, event); + + kfree(ceq_info); + hwdev->ceq_info = NULL; +} + +void sss_hwif_deinit_ceq(struct sss_hwdev *hwdev) +{ + int i; + u16 irq_num = 0; + struct sss_irq_desc irq[SSS_MAX_CEQ] = {0}; + + sss_get_ceq_irq(hwdev, irq, &irq_num); + + sss_deinit_ceq(hwdev); + + for (i = 0; i < irq_num; i++) + sss_free_irq(hwdev, SSS_SERVICE_TYPE_INTF, irq[i].irq_id); +} + +void sss_dump_ceq_info(struct sss_hwdev *hwdev) +{ + struct sss_eq *ceq_info = NULL; + u32 addr; + u32 ci; + u32 pi; + int qid; + + for (qid = 0; qid < hwdev->ceq_info->num; qid++) { + ceq_info = &hwdev->ceq_info->ceq[qid]; + /* Indirect access should set qid first */ + sss_chip_write_reg(SSS_TO_HWDEV(ceq_info)->hwif, + SSS_EQ_INDIR_ID_ADDR(ceq_info->type), ceq_info->qid); + wmb(); /* make sure set qid firstly */ + + addr = SSS_EQ_CI_REG_ADDR(ceq_info); + ci = sss_chip_read_reg(hwdev->hwif, addr); + addr = SSS_EQ_PI_REG_ADDR(ceq_info); + pi = sss_chip_read_reg(hwdev->hwif, addr); + sdk_err(hwdev->dev_hdl, + "Ceq id: %d, ci: 0x%08x, sw_ci: 0x%08x, pi: 0x%x, tasklet_state: 0x%lx, wrap: %u, ceqe: 0x%x\n", + qid, ci, ceq_info->ci, pi, tasklet_state(&ceq_info->ceq_tasklet), + ceq_info->wrap, be32_to_cpu(*(SSS_GET_CUR_CEQ_ELEM(ceq_info)))); + + sdk_err(hwdev->dev_hdl, "Ceq last response hard interrupt time: %u\n", + jiffies_to_msecs(jiffies - ceq_info->hw_intr_jiffies)); + sdk_err(hwdev->dev_hdl, "Ceq last response soft interrupt time: %u\n", + jiffies_to_msecs(jiffies - ceq_info->sw_intr_jiffies)); + } + + sss_dump_chip_err_info(hwdev); +} + +int sss_ceq_register_cb(void *hwdev, void *data, + enum sss_ceq_event ceq_event, sss_ceq_event_handler_t event_handler) +{ + struct sss_ceq_info *ceq_info = NULL; + + if (!hwdev || ceq_event >= SSS_CEQ_EVENT_MAX) + return -EINVAL; + + ceq_info = SSS_TO_CEQ_INFO(hwdev); + ceq_info->event_handler_data[ceq_event] = data; + ceq_info->event_handler[ceq_event] = event_handler; + set_bit(SSS_CEQ_CB_REG, &ceq_info->event_handler_state[ceq_event]); + + return 0; +} + +void sss_ceq_unregister_cb(void *hwdev, enum sss_ceq_event ceq_event) +{ + struct sss_ceq_info *ceq_info = NULL; + + if (!hwdev || ceq_event >= SSS_CEQ_EVENT_MAX) + return; + + ceq_info = SSS_TO_CEQ_INFO(hwdev); + clear_bit(SSS_CEQ_CB_REG, &ceq_info->event_handler_state[ceq_event]); + while (test_bit(SSS_CEQ_CB_RUNNING, + &ceq_info->event_handler_state[ceq_event])) + usleep_range(SSS_EQ_USLEEP_LOW_LIMIT, SSS_EQ_USLEEP_HIG_LIMIT); + ceq_info->event_handler[ceq_event] = NULL; +} + +int sss_init_ceq_msix_attr(struct sss_hwdev *hwdev) +{ + u16 i; + int ret; + struct sss_ceq_info *ceq_info = hwdev->ceq_info; + struct sss_irq_cfg intr_info = {0}; + + sss_init_eq_intr_info(&intr_info); + + for (i = 0; i < ceq_info->num; i++) { + intr_info.msix_id = SSS_EQ_IRQ_ID(&ceq_info->ceq[i]); + ret = sss_chip_set_msix_attr(hwdev, intr_info, SSS_CHANNEL_COMM); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to set msix attr for ceq %u\n", i); + return -EFAULT; + } + } + + return 0; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ceq.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ceq.h new file mode 100644 index 0000000000000000000000000000000000000000..29e65016b117016cc67c57706d9c3ebc2641d069 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ceq.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_CEQ_H +#define SSS_HWIF_CEQ_H + +#include "sss_hw_ceq.h" +#include "sss_ceq_info.h" +#include "sss_hwdev.h" + +int sss_ceq_register_cb(void *hwdev, void *data, + enum sss_ceq_event ceq_event, sss_ceq_event_handler_t event_handler); +void sss_ceq_unregister_cb(void *hwdev, enum sss_ceq_event ceq_event); +int sss_hwif_init_ceq(struct sss_hwdev *hwdev); +void sss_hwif_deinit_ceq(struct sss_hwdev *hwdev); +void sss_dump_ceq_info(struct sss_hwdev *hwdev); +int sss_init_ceq_msix_attr(struct sss_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq.c new file mode 100644 index 0000000000000000000000000000000000000000..43386b7984b9da834a16c4e5ccdb330885010eab --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq.c @@ -0,0 +1,928 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_hwif_eq.h" +#include "sss_hwif_api.h" +#include "sss_hwif_ctrlq.h" +#include "sss_hwif_aeq.h" +#include "sss_hwif_ceq.h" +#include "sss_common.h" + +#define SSS_CTRLQ_CMD_TIMEOUT 5000 /* millisecond */ + +#define SSS_CTRLQ_WQE_HEAD_LEN 32 + +#define SSS_HI_8_BITS(data) (((data) >> 8) & 0xFF) +#define SSS_LO_8_BITS(data) ((data) & 0xFF) + +#define SSS_CTRLQ_DB_INFO_HI_PI_SHIFT 0 +#define SSS_CTRLQ_DB_INFO_HI_PI_MASK 0xFFU +#define SSS_CTRLQ_DB_INFO_SET(val, member) \ + ((((u32)(val)) & SSS_CTRLQ_DB_INFO_##member##_MASK) << \ + SSS_CTRLQ_DB_INFO_##member##_SHIFT) + +#define SSS_CTRLQ_DB_HEAD_QUEUE_TYPE_SHIFT 23 +#define SSS_CTRLQ_DB_HEAD_CTRLQ_TYPE_SHIFT 24 +#define SSS_CTRLQ_DB_HEAD_SRC_TYPE_SHIFT 27 +#define SSS_CTRLQ_DB_HEAD_QUEUE_TYPE_MASK 0x1U +#define SSS_CTRLQ_DB_HEAD_CTRLQ_TYPE_MASK 0x7U +#define SSS_CTRLQ_DB_HEAD_SRC_TYPE_MASK 0x1FU +#define SSS_CTRLQ_DB_HEAD_SET(val, member) \ + ((((u32)(val)) & SSS_CTRLQ_DB_HEAD_##member##_MASK) << \ + SSS_CTRLQ_DB_HEAD_##member##_SHIFT) + +#define SSS_CTRLQ_CTRL_PI_SHIFT 0 +#define SSS_CTRLQ_CTRL_CMD_SHIFT 16 +#define SSS_CTRLQ_CTRL_MOD_SHIFT 24 +#define SSS_CTRLQ_CTRL_ACK_TYPE_SHIFT 29 +#define SSS_CTRLQ_CTRL_HW_BUSY_BIT_SHIFT 31 + +#define SSS_CTRLQ_CTRL_PI_MASK 0xFFFFU +#define SSS_CTRLQ_CTRL_CMD_MASK 0xFFU +#define SSS_CTRLQ_CTRL_MOD_MASK 0x1FU +#define SSS_CTRLQ_CTRL_ACK_TYPE_MASK 0x3U +#define SSS_CTRLQ_CTRL_HW_BUSY_BIT_MASK 0x1U + +#define SSS_CTRLQ_CTRL_SET(val, member) \ + ((((u32)(val)) & SSS_CTRLQ_CTRL_##member##_MASK) << \ + SSS_CTRLQ_CTRL_##member##_SHIFT) + +#define SSS_CTRLQ_CTRL_GET(val, member) \ + (((val) >> SSS_CTRLQ_CTRL_##member##_SHIFT) & \ + SSS_CTRLQ_CTRL_##member##_MASK) + +#define SSS_CTRLQ_WQE_HEAD_BD_LEN_SHIFT 0 +#define SSS_CTRLQ_WQE_HEAD_COMPLETE_FMT_SHIFT 15 +#define SSS_CTRLQ_WQE_HEAD_DATA_FMT_SHIFT 22 +#define SSS_CTRLQ_WQE_HEAD_COMPLETE_REQ_SHIFT 23 +#define SSS_CTRLQ_WQE_HEAD_COMPLETE_SECT_LEN_SHIFT 27 +#define SSS_CTRLQ_WQE_HEAD_CTRL_LEN_SHIFT 29 +#define SSS_CTRLQ_WQE_HEAD_HW_BUSY_BIT_SHIFT 31 + +#define SSS_CTRLQ_WQE_HEAD_BD_LEN_MASK 0xFFU +#define SSS_CTRLQ_WQE_HEAD_COMPLETE_FMT_MASK 0x1U +#define SSS_CTRLQ_WQE_HEAD_DATA_FMT_MASK 0x1U +#define SSS_CTRLQ_WQE_HEAD_COMPLETE_REQ_MASK 0x1U +#define SSS_CTRLQ_WQE_HEAD_COMPLETE_SECT_LEN_MASK 0x3U +#define SSS_CTRLQ_WQE_HEAD_CTRL_LEN_MASK 0x3U +#define SSS_CTRLQ_WQE_HEAD_HW_BUSY_BIT_MASK 0x1U + +#define SSS_CTRLQ_WQE_HEAD_SET(val, member) \ + ((((u32)(val)) & SSS_CTRLQ_WQE_HEAD_##member##_MASK) << \ + SSS_CTRLQ_WQE_HEAD_##member##_SHIFT) + +#define SSS_GET_CTRLQ_WQE_HEAD(val, member) \ + (((val) >> SSS_CTRLQ_WQE_HEAD_##member##_SHIFT) & \ + SSS_CTRLQ_WQE_HEAD_##member##_MASK) + +#define SSS_STORE_DATA_ARM_SHIFT 31 + +#define SSS_STORE_DATA_ARM_MASK 0x1U + +#define SSS_STORE_DATA_SET(val, member) \ + (((val) & SSS_STORE_DATA_##member##_MASK) << \ + SSS_STORE_DATA_##member##_SHIFT) + +#define SSS_STORE_DATA_CLEAR(val, member) \ + ((val) & (~(SSS_STORE_DATA_##member##_MASK << \ + SSS_STORE_DATA_##member##_SHIFT))) + +#define SSS_WQE_ERRCODE_VAL_SHIFT 0 + +#define SSS_WQE_ERRCODE_VAL_MASK 0x7FFFFFFF + +#define SSS_GET_WQE_ERRCODE(val, member) \ + (((val) >> SSS_WQE_ERRCODE_##member##_SHIFT) & \ + SSS_WQE_ERRCODE_##member##_MASK) + +#define SSS_CEQE_CTRLQ_TYPE_SHIFT 0 + +#define SSS_CEQE_CTRLQ_TYPE_MASK 0x7 + +#define SSS_GET_CEQE_CTRLQ(val, member) \ + (((val) >> SSS_CEQE_CTRLQ_##member##_SHIFT) & \ + SSS_CEQE_CTRLQ_##member##_MASK) + +#define SSS_WQE_COMPLETE(ctrl_info) SSS_CTRLQ_CTRL_GET(ctrl_info, HW_BUSY_BIT) + +#define SSS_WQE_HEAD(wqe) ((struct sss_ctrlq_head *)(wqe)) + +#define SSS_CTRLQ_DB_PI_OFF(pi) (((u16)SSS_LO_8_BITS(pi)) << 3) + +#define SSS_CTRLQ_DB_ADDR(db_base, pi) \ + (((u8 *)(db_base)) + SSS_CTRLQ_DB_PI_OFF(pi)) + +#define SSS_FIRST_DATA_TO_WRITE_LAST sizeof(u64) + +#define SSS_WQE_LCMD_SIZE 64 +#define SSS_WQE_SCMD_SIZE 64 + +#define SSS_COMPLETE_LEN 3 + +#define SSS_CTRLQ_WQE_SIZE 64 + +#define SSS_CTRLQ_TO_INFO(ctrlq) \ + container_of((ctrlq) - (ctrlq)->ctrlq_type, struct sss_ctrlq_info, ctrlq[0]) + +#define SSS_CTRLQ_COMPLETE_CODE 11 + +enum SSS_ctrlq_scmd_type { + SSS_CTRLQ_SET_ARM_CMD = 2, +}; + +enum sss_ctrl_sect_len { + SSS_CTRL_SECT_LEN = 1, + SSS_CTRL_DIRECT_SECT_LEN = 2, +}; + +enum sss_bd_len { + SSS_BD_LCMD_LEN = 2, + SSS_BD_SCMD_LEN = 3, +}; + +enum sss_data_fmt { + SSS_DATA_SGE, + SSS_DATA_DIRECT, +}; + +enum sss_completion_fmt { + SSS_COMPLETE_DIRECT, + SSS_COMPLETE_SGE, +}; + +enum sss_completion_request { + SSS_CEQ_SET = 1, +}; + +enum sss_ctrlq_comm_msg_type { + SSS_SYNC_MSG_DIRECT_REPLY, + SSS_SYNC_MSG_SGE_REPLY, + SSS_ASYNC_MSG, +}; + +#define SSS_SCMD_DATA_LEN 16 + +enum sss_db_src_type { + SSS_DB_SRC_CTRLQ_TYPE, + SSS_DB_SRC_L2NIC_SQ_TYPE, +}; + +enum sss_ctrlq_db_type { + SSS_DB_SQ_RQ_TYPE, + SSS_DB_CTRLQ_TYPE, +}; + +struct sss_ctrlq_db { + u32 head; + u32 info; +}; + +/* hardware define: ctrlq wqe */ +struct sss_ctrlq_head { + u32 info; + u32 store_data; +}; + +struct sss_scmd_bd { + u32 data_len; + u32 rsvd; + u8 data[SSS_SCMD_DATA_LEN]; +}; + +struct sss_lcmd_bd { + struct sss_sge sge; + u32 rsvd1; + u64 store_async_buf; + u64 rsvd3; +}; + +struct sss_wqe_state { + u32 info; +}; + +struct sss_wqe_ctrl { + u32 info; +}; + +struct sss_sge_reply { + struct sss_sge sge; + u32 rsvd; +}; + +struct sss_ctrlq_completion { + union { + struct sss_sge_reply sge_reply; + u64 direct_reply; + }; +}; + +struct sss_ctrlq_wqe_scmd { + struct sss_ctrlq_head head; + u64 rsvd; + struct sss_wqe_state state; + struct sss_wqe_ctrl ctrl; + struct sss_ctrlq_completion completion; + struct sss_scmd_bd bd; +}; + +struct sss_ctrlq_wqe_lcmd { + struct sss_ctrlq_head head; + struct sss_wqe_state state; + struct sss_wqe_ctrl ctrl; + struct sss_ctrlq_completion completion; + struct sss_lcmd_bd bd; +}; + +struct sss_ctrlq_inline_wqe { + struct sss_ctrlq_wqe_scmd wqe_scmd; +}; + +struct sss_ctrlq_wqe { + union { + struct sss_ctrlq_inline_wqe inline_wqe; + struct sss_ctrlq_wqe_lcmd wqe_lcmd; + }; +}; + +typedef int (*sss_ctrlq_type_handler_t)(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_wqe *wqe, u16 ci); + +void *sss_ctrlq_read_wqe(struct sss_wq *wq, u16 *ci) +{ + if (sss_wq_is_empty(wq)) + return NULL; + + return sss_wq_read_one_wqebb(wq, ci); +} + +static void *sss_ctrlq_get_wqe(struct sss_wq *wq, u16 *pi) +{ + if (!sss_wq_free_wqebb(wq)) + return NULL; + + return sss_wq_get_one_wqebb(wq, pi); +} + +static void sss_ctrlq_set_completion(struct sss_ctrlq_completion *complete, + struct sss_ctrl_msg_buf *out_buf) +{ + struct sss_sge_reply *sge_reply = &complete->sge_reply; + + sss_set_sge(&sge_reply->sge, out_buf->dma_addr, SSS_CTRLQ_BUF_LEN); +} + +static void sss_ctrlq_set_lcmd_bufdesc(struct sss_ctrlq_wqe_lcmd *wqe, + struct sss_ctrl_msg_buf *in_buf) +{ + sss_set_sge(&wqe->bd.sge, in_buf->dma_addr, in_buf->size); +} + +static void sss_ctrlq_fill_db(struct sss_ctrlq_db *db, + enum sss_ctrlq_type ctrlq_type, u16 pi) +{ + db->info = SSS_CTRLQ_DB_INFO_SET(SSS_HI_8_BITS(pi), HI_PI); + + db->head = SSS_CTRLQ_DB_HEAD_SET(SSS_DB_CTRLQ_TYPE, QUEUE_TYPE) | + SSS_CTRLQ_DB_HEAD_SET(ctrlq_type, CTRLQ_TYPE) | + SSS_CTRLQ_DB_HEAD_SET(SSS_DB_SRC_CTRLQ_TYPE, SRC_TYPE); +} + +static void sss_ctrlq_set_db(struct sss_ctrlq *ctrlq, + enum sss_ctrlq_type ctrlq_type, u16 pi) +{ + struct sss_ctrlq_db db = {0}; + u8 *db_base = SSS_TO_HWDEV(ctrlq)->ctrlq_info->db_base; + + sss_ctrlq_fill_db(&db, ctrlq_type, pi); + + /* The data that is written to HW should be in Big Endian Format */ + db.info = sss_hw_be32(db.info); + db.head = sss_hw_be32(db.head); + + wmb(); /* make sure write db info to reg */ + writeq(*((u64 *)&db), SSS_CTRLQ_DB_ADDR(db_base, pi)); +} + +static void sss_ctrlq_fill_wqe(void *dst, const void *src) +{ + memcpy((u8 *)dst + SSS_FIRST_DATA_TO_WRITE_LAST, + (u8 *)src + SSS_FIRST_DATA_TO_WRITE_LAST, + SSS_CTRLQ_WQE_SIZE - SSS_FIRST_DATA_TO_WRITE_LAST); + + wmb(); /* The first 8 bytes should be written last */ + + *(u64 *)dst = *(u64 *)src; +} + +static void sss_ctrlq_prepare_wqe_ctrl(struct sss_ctrlq_wqe *wqe, + int wrapped, u8 mod, u8 cmd, u16 pi, + enum sss_completion_fmt complete_fmt, + enum sss_data_fmt data_fmt, + enum sss_bd_len buf_len) +{ + struct sss_wqe_ctrl *ctrl = NULL; + enum sss_ctrl_sect_len ctrl_len; + struct sss_ctrlq_wqe_lcmd *wqe_lcmd = NULL; + struct sss_ctrlq_wqe_scmd *wqe_scmd = NULL; + u32 saved_data = SSS_WQE_HEAD(wqe)->store_data; + + if (data_fmt == SSS_DATA_SGE) { + wqe_lcmd = &wqe->wqe_lcmd; + + wqe_lcmd->state.info = 0; + ctrl = &wqe_lcmd->ctrl; + ctrl_len = SSS_CTRL_SECT_LEN; + } else { + wqe_scmd = &wqe->inline_wqe.wqe_scmd; + + wqe_scmd->state.info = 0; + ctrl = &wqe_scmd->ctrl; + ctrl_len = SSS_CTRL_DIRECT_SECT_LEN; + } + + ctrl->info = SSS_CTRLQ_CTRL_SET(pi, PI) | + SSS_CTRLQ_CTRL_SET(cmd, CMD) | + SSS_CTRLQ_CTRL_SET(mod, MOD) | + SSS_CTRLQ_CTRL_SET(SSS_ACK_TYPE_CTRLQ, ACK_TYPE); + + SSS_WQE_HEAD(wqe)->info = + SSS_CTRLQ_WQE_HEAD_SET(buf_len, BD_LEN) | + SSS_CTRLQ_WQE_HEAD_SET(complete_fmt, COMPLETE_FMT) | + SSS_CTRLQ_WQE_HEAD_SET(data_fmt, DATA_FMT) | + SSS_CTRLQ_WQE_HEAD_SET(SSS_CEQ_SET, COMPLETE_REQ) | + SSS_CTRLQ_WQE_HEAD_SET(SSS_COMPLETE_LEN, COMPLETE_SECT_LEN) | + SSS_CTRLQ_WQE_HEAD_SET(ctrl_len, CTRL_LEN) | + SSS_CTRLQ_WQE_HEAD_SET((u32)wrapped, HW_BUSY_BIT); + + if (cmd == SSS_CTRLQ_SET_ARM_CMD && mod == SSS_MOD_TYPE_COMM) { + saved_data &= SSS_STORE_DATA_CLEAR(saved_data, ARM); + SSS_WQE_HEAD(wqe)->store_data = saved_data | + SSS_STORE_DATA_SET(1, ARM); + } else { + saved_data &= SSS_STORE_DATA_CLEAR(saved_data, ARM); + SSS_WQE_HEAD(wqe)->store_data = saved_data; + } +} + +static void sss_ctrlq_set_lcmd_wqe(struct sss_ctrlq_wqe *wqe, + enum sss_ctrlq_comm_msg_type cmd_type, + struct sss_ctrl_msg_buf *in_buf, + struct sss_ctrl_msg_buf *out_buf, int wrapped, + u8 mod, u8 cmd, u16 pi) +{ + struct sss_ctrlq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd; + enum sss_completion_fmt complete_fmt = SSS_COMPLETE_DIRECT; + + switch (cmd_type) { + case SSS_SYNC_MSG_DIRECT_REPLY: + wqe_lcmd->completion.direct_reply = 0; + break; + case SSS_SYNC_MSG_SGE_REPLY: + if (out_buf) { + complete_fmt = SSS_COMPLETE_SGE; + sss_ctrlq_set_completion(&wqe_lcmd->completion, out_buf); + } + break; + case SSS_ASYNC_MSG: + wqe_lcmd->completion.direct_reply = 0; + wqe_lcmd->bd.store_async_buf = (u64)(in_buf); + break; + } + + sss_ctrlq_prepare_wqe_ctrl(wqe, wrapped, mod, cmd, pi, complete_fmt, + SSS_DATA_SGE, SSS_BD_LCMD_LEN); + + sss_ctrlq_set_lcmd_bufdesc(wqe_lcmd, in_buf); +} + +static void sss_ctrlq_update_cmd_state(struct sss_ctrlq *ctrlq, u16 pi, + struct sss_ctrlq_wqe *wqe) +{ + struct sss_ctrlq_cmd_info *info = &ctrlq->cmd_info[pi]; + struct sss_ctrlq_wqe_lcmd *lcmd = &wqe->wqe_lcmd; + u32 state = sss_hw_cpu32(lcmd->state.info); + + if (info->direct_resp) + *info->direct_resp = + sss_hw_cpu32(lcmd->completion.direct_reply); + + if (info->err_code) + *info->err_code = SSS_GET_WQE_ERRCODE(state, VAL); +} + +static int sss_ctrlq_check_sync_timeout(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_wqe *wqe, u16 pi) +{ + struct sss_ctrlq_wqe_lcmd *wqe_lcmd; + struct sss_wqe_ctrl *ctrl; + u32 ctrl_info; + + wqe_lcmd = &wqe->wqe_lcmd; + ctrl = &wqe_lcmd->ctrl; + ctrl_info = sss_hw_cpu32((ctrl)->info); + if (!SSS_WQE_COMPLETE(ctrl_info)) { + sdk_info(SSS_TO_HWDEV(ctrlq)->dev_hdl, "Ctrlq wqe do not complete\n"); + return -EFAULT; + } + + sss_ctrlq_update_cmd_state(ctrlq, pi, wqe); + + sdk_info(SSS_TO_HWDEV(ctrlq)->dev_hdl, "Success to check ctrlq sync cmd\n"); + return 0; +} + +static void sss_reset_cmd_info(struct sss_ctrlq_cmd_info *cmd_info, + const struct sss_ctrlq_cmd_info *store_cmd_info) +{ + if (cmd_info->err_code == store_cmd_info->err_code) + cmd_info->err_code = NULL; + + if (cmd_info->done == store_cmd_info->done) + cmd_info->done = NULL; + + if (cmd_info->direct_resp == store_cmd_info->direct_resp) + cmd_info->direct_resp = NULL; +} + +static int sss_ctrlq_ceq_handler_state(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_cmd_info *cmd_info, + struct sss_ctrlq_cmd_info *store_cmd_info, + u64 curr_msg_id, u16 curr_pi, + struct sss_ctrlq_wqe *curr_wqe, + u32 timeout) +{ + ulong timeo; + int ret; + ulong end = jiffies + msecs_to_jiffies(timeout); + + if (SSS_TO_HWDEV(ctrlq)->poll) { + while (time_before(jiffies, end)) { + sss_ctrlq_ceq_handler(SSS_TO_HWDEV(ctrlq), 0); + if (store_cmd_info->done->done != 0) + return 0; + usleep_range(9, 10); /* sleep 9 us ~ 10 us */ + } + } else { + timeo = msecs_to_jiffies(timeout); + if (wait_for_completion_timeout(store_cmd_info->done, timeo)) + return 0; + } + + spin_lock_bh(&ctrlq->ctrlq_lock); + + if (cmd_info->cmpt_code == store_cmd_info->cmpt_code) + cmd_info->cmpt_code = NULL; + + if (*store_cmd_info->cmpt_code == SSS_CTRLQ_COMPLETE_CODE) { + sdk_info(SSS_TO_HWDEV(ctrlq)->dev_hdl, "Ctrlq direct sync command complete\n"); + spin_unlock_bh(&ctrlq->ctrlq_lock); + return 0; + } + + if (curr_msg_id == cmd_info->msg_id) { + ret = sss_ctrlq_check_sync_timeout(ctrlq, curr_wqe, curr_pi); + if (ret != 0) + cmd_info->msg_type = SSS_MSG_TYPE_TIMEOUT; + else + cmd_info->msg_type = SSS_MSG_TYPE_PSEUDO_TIMEOUT; + } else { + ret = -ETIMEDOUT; + sdk_err(SSS_TO_HWDEV(ctrlq)->dev_hdl, + "Ctrlq sync command curr_msg_id dismatch with cmd_info msg_id\n"); + } + + sss_reset_cmd_info(cmd_info, store_cmd_info); + + spin_unlock_bh(&ctrlq->ctrlq_lock); + + if (ret == 0) + return 0; + + sss_dump_ceq_info(SSS_TO_HWDEV(ctrlq)); + + return -ETIMEDOUT; +} + +static int sss_wait_ctrlq_sync_cmd_completion(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_cmd_info *cmd_info, + struct sss_ctrlq_cmd_info *store_cmd_info, + u64 curr_msg_id, u16 curr_pi, + struct sss_ctrlq_wqe *curr_wqe, u32 timeout) +{ + return sss_ctrlq_ceq_handler_state(ctrlq, cmd_info, store_cmd_info, + curr_msg_id, curr_pi, curr_wqe, timeout); +} + +static int sss_ctrlq_msg_lock(struct sss_ctrlq *ctrlq, u16 channel) +{ + struct sss_ctrlq_info *ctrlq_info = SSS_CTRLQ_TO_INFO(ctrlq); + + spin_lock_bh(&ctrlq->ctrlq_lock); + + if (ctrlq_info->lock_channel_en && test_bit(channel, &ctrlq_info->channel_stop)) { + spin_unlock_bh(&ctrlq->ctrlq_lock); + return -EAGAIN; + } + + return 0; +} + +static void sss_ctrlq_msg_unlock(struct sss_ctrlq *ctrlq) +{ + spin_unlock_bh(&ctrlq->ctrlq_lock); +} + +static void sss_ctrlq_set_cmd_buf(struct sss_ctrlq_cmd_info *cmd_info, + struct sss_hwdev *hwdev, + struct sss_ctrl_msg_buf *in_buf, + struct sss_ctrl_msg_buf *out_buf) +{ + cmd_info->in_buf = in_buf; + cmd_info->out_buf = out_buf; + + if (in_buf) + atomic_inc(&in_buf->ref_cnt); + + if (out_buf) + atomic_inc(&out_buf->ref_cnt); +} + +int sss_ctrlq_sync_cmd_direct_reply(struct sss_ctrlq *ctrlq, u8 mod, + u8 cmd, struct sss_ctrl_msg_buf *in_buf, + u64 *out_param, u32 timeout, u16 channel) +{ + struct sss_wq *wq = &ctrlq->wq; + struct sss_ctrlq_wqe *curr_wqe = NULL; + struct sss_ctrlq_wqe wqe; + struct sss_ctrlq_cmd_info *cmd_info = NULL; + struct sss_ctrlq_cmd_info store_cmd_info; + struct completion done; + u16 curr_pi, next_pi; + int wrapped; + int errcode = 0; + int cmpt_code = SSS_CTRLQ_SEND_CMPT_CODE; + u64 curr_msg_id; + int ret; + u32 real_timeout; + + ret = sss_ctrlq_msg_lock(ctrlq, channel); + if (ret != 0) + return ret; + + curr_wqe = sss_ctrlq_get_wqe(wq, &curr_pi); + if (!curr_wqe) { + sss_ctrlq_msg_unlock(ctrlq); + return -EBUSY; + } + + memset(&wqe, 0, sizeof(wqe)); + + wrapped = ctrlq->wrapped; + + next_pi = curr_pi + SSS_WQEBB_NUM_FOR_CTRLQ; + if (next_pi >= wq->q_depth) { + ctrlq->wrapped = (ctrlq->wrapped == 0) ? 1 : 0; + next_pi -= (u16)wq->q_depth; + } + + cmd_info = &ctrlq->cmd_info[curr_pi]; + + init_completion(&done); + + cmd_info->msg_type = SSS_MSG_TYPE_DIRECT_RESP; + cmd_info->done = &done; + cmd_info->err_code = &errcode; + cmd_info->direct_resp = out_param; + cmd_info->cmpt_code = &cmpt_code; + cmd_info->channel = channel; + sss_ctrlq_set_cmd_buf(cmd_info, SSS_TO_HWDEV(ctrlq), in_buf, NULL); + + memcpy(&store_cmd_info, cmd_info, sizeof(*cmd_info)); + + sss_ctrlq_set_lcmd_wqe(&wqe, SSS_SYNC_MSG_DIRECT_REPLY, in_buf, NULL, + wrapped, mod, cmd, curr_pi); + + /* CTRLQ WQE is not shadow, therefore wqe will be written to wq */ + sss_ctrlq_fill_wqe(curr_wqe, &wqe); + + (cmd_info->msg_id)++; + curr_msg_id = cmd_info->msg_id; + + sss_ctrlq_set_db(ctrlq, SSS_CTRLQ_SYNC, next_pi); + + sss_ctrlq_msg_unlock(ctrlq); + + real_timeout = timeout ? timeout : SSS_CTRLQ_CMD_TIMEOUT; + ret = sss_wait_ctrlq_sync_cmd_completion(ctrlq, cmd_info, &store_cmd_info, + curr_msg_id, curr_pi, curr_wqe, real_timeout); + if (ret != 0) { + sdk_err(SSS_TO_HWDEV(ctrlq)->dev_hdl, + "Ctrlq sync cmd direct resp timeout, mod: %u, cmd: %u, pi: 0x%x\n", + mod, cmd, curr_pi); + ret = -ETIMEDOUT; + } + + if (cmpt_code == SSS_CTRLQ_FORCE_STOP_CMPT_CODE) { + sdk_info(SSS_TO_HWDEV(ctrlq)->dev_hdl, "Force stop ctrlq cmd, mod: %u, cmd: %u\n", + mod, cmd); + ret = -EAGAIN; + } + + destroy_completion(&done); + smp_rmb(); /* read error code after completion */ + + return (ret != 0) ? ret : errcode; +} + +int sss_ctrlq_sync_cmd_detail_reply(struct sss_ctrlq *ctrlq, u8 mod, u8 cmd, + struct sss_ctrl_msg_buf *in_buf, + struct sss_ctrl_msg_buf *out_buf, + u64 *out_param, u32 timeout, u16 channel) +{ + struct sss_wq *wq = &ctrlq->wq; + struct sss_ctrlq_wqe *curr_wqe = NULL, wqe; + struct sss_ctrlq_cmd_info *cmd_info = NULL, store_cmd_info; + struct completion done; + u16 curr_pi, next_pi; + int wrapped, errcode = 0; + int cmpt_code = SSS_CTRLQ_SEND_CMPT_CODE; + u64 curr_msg_id; + int ret; + u32 real_timeout; + + ret = sss_ctrlq_msg_lock(ctrlq, channel); + if (ret != 0) + return ret; + + curr_wqe = sss_ctrlq_get_wqe(wq, &curr_pi); + if (!curr_wqe) { + sss_ctrlq_msg_unlock(ctrlq); + return -EBUSY; + } + + memset(&wqe, 0, sizeof(wqe)); + + wrapped = ctrlq->wrapped; + + next_pi = curr_pi + SSS_WQEBB_NUM_FOR_CTRLQ; + if (next_pi >= wq->q_depth) { + ctrlq->wrapped = (ctrlq->wrapped == 0) ? 1 : 0; + next_pi -= (u16)wq->q_depth; + } + + cmd_info = &ctrlq->cmd_info[curr_pi]; + + init_completion(&done); + + cmd_info->msg_type = SSS_MSG_TYPE_SGE_RESP; + cmd_info->done = &done; + cmd_info->err_code = &errcode; + cmd_info->direct_resp = out_param; + cmd_info->cmpt_code = &cmpt_code; + cmd_info->channel = channel; + sss_ctrlq_set_cmd_buf(cmd_info, SSS_TO_HWDEV(ctrlq), in_buf, out_buf); + + memcpy(&store_cmd_info, cmd_info, sizeof(*cmd_info)); + + sss_ctrlq_set_lcmd_wqe(&wqe, SSS_SYNC_MSG_SGE_REPLY, in_buf, out_buf, + wrapped, mod, cmd, curr_pi); + + sss_ctrlq_fill_wqe(curr_wqe, &wqe); + + (cmd_info->msg_id)++; + curr_msg_id = cmd_info->msg_id; + + sss_ctrlq_set_db(ctrlq, ctrlq->ctrlq_type, next_pi); + + sss_ctrlq_msg_unlock(ctrlq); + + real_timeout = timeout ? timeout : SSS_CTRLQ_CMD_TIMEOUT; + ret = sss_wait_ctrlq_sync_cmd_completion(ctrlq, cmd_info, &store_cmd_info, + curr_msg_id, curr_pi, curr_wqe, real_timeout); + if (ret != 0) { + sdk_err(SSS_TO_HWDEV(ctrlq)->dev_hdl, + "Ctrlq sync cmd detail resp timeout, mod: %u, cmd: %u, pi: 0x%x\n", + mod, cmd, curr_pi); + ret = -ETIMEDOUT; + } + + if (cmpt_code == SSS_CTRLQ_FORCE_STOP_CMPT_CODE) { + sdk_info(SSS_TO_HWDEV(ctrlq)->dev_hdl, "Force stop ctrlq cmd, mod: %u, cmd: %u\n", + mod, cmd); + ret = -EAGAIN; + } + + destroy_completion(&done); + smp_rmb(); /* read error code after completion */ + + return (ret != 0) ? ret : errcode; +} + +void sss_free_ctrlq_cmd_buf(struct sss_hwdev *hwdev, + struct sss_ctrlq_cmd_info *info) +{ + if (info->in_buf) + sss_free_ctrlq_msg_buf(hwdev, info->in_buf); + + if (info->out_buf) + sss_free_ctrlq_msg_buf(hwdev, info->out_buf); + + info->out_buf = NULL; + info->in_buf = NULL; +} + +static void sss_erase_wqe_complete_bit(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_wqe *wqe, u16 ci) +{ + struct sss_wqe_ctrl *wqe_ctrl = NULL; + u32 head = sss_hw_cpu32(SSS_WQE_HEAD(wqe)->info); + enum sss_data_fmt format = SSS_GET_CTRLQ_WQE_HEAD(head, DATA_FMT); + + wqe_ctrl = (format == SSS_DATA_SGE) ? &wqe->wqe_lcmd.ctrl : + &wqe->inline_wqe.wqe_scmd.ctrl; + + wqe_ctrl->info = 0; + ctrlq->cmd_info[ci].msg_type = SSS_MSG_TYPE_NONE; + + /* write ctrlq wqe msg type */ + wmb(); + + sss_update_wq_ci(&ctrlq->wq, SSS_WQEBB_NUM_FOR_CTRLQ); +} + +static void sss_ctrlq_update_cmd_info(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_wqe *wqe, u16 ci) +{ + struct sss_ctrlq_cmd_info *info = &ctrlq->cmd_info[ci]; + struct sss_ctrlq_wqe_lcmd *lcmd = &wqe->wqe_lcmd; + u32 status; + + spin_lock(&ctrlq->ctrlq_lock); + + if (info->direct_resp) + *info->direct_resp = + sss_hw_cpu32(lcmd->completion.direct_reply); + + if (info->err_code) { + status = sss_hw_cpu32(lcmd->state.info); + *info->err_code = SSS_GET_WQE_ERRCODE(status, VAL); + } + + if (info->cmpt_code) { + *info->cmpt_code = SSS_CTRLQ_COMPLETE_CODE; + info->cmpt_code = NULL; + } + + /* read all before set info done */ + smp_rmb(); + + if (info->done) { + complete(info->done); + info->done = NULL; + } + + spin_unlock(&ctrlq->ctrlq_lock); +} + +static int sss_ctrlq_arm_ceq_handler(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_wqe *wqe, u16 ci) +{ + struct sss_wqe_ctrl *ctrl = &wqe->inline_wqe.wqe_scmd.ctrl; + u32 info = sss_hw_cpu32((ctrl)->info); + + if (!SSS_WQE_COMPLETE(info)) + return -EBUSY; + + sss_erase_wqe_complete_bit(ctrlq, wqe, ci); + + return 0; +} + +static int sss_ctrlq_default_handler(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_wqe *wqe, u16 ci) +{ + struct sss_wqe_ctrl *ctrl = &wqe->wqe_lcmd.ctrl; + u32 info = sss_hw_cpu32((ctrl)->info); + + if (!SSS_WQE_COMPLETE(info)) + return -EBUSY; + + dma_rmb(); + + sss_ctrlq_update_cmd_info(ctrlq, wqe, ci); + sss_free_ctrlq_cmd_buf(SSS_TO_HWDEV(ctrlq), &ctrlq->cmd_info[ci]); + sss_erase_wqe_complete_bit(ctrlq, wqe, ci); + + return 0; +} + +static int sss_ctrlq_async_cmd_handler(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_wqe *wqe, u16 ci) +{ + struct sss_wqe_ctrl *ctrl = &wqe->wqe_lcmd.ctrl; + u32 info = sss_hw_cpu32((ctrl)->info); + + if (!SSS_WQE_COMPLETE(info)) + return -EBUSY; + + dma_rmb(); + + sss_free_ctrlq_cmd_buf(SSS_TO_HWDEV(ctrlq), &ctrlq->cmd_info[ci]); + sss_erase_wqe_complete_bit(ctrlq, wqe, ci); + + return 0; +} + +static int sss_ctrlq_pseudo_timeout_handler(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_wqe *wqe, u16 ci) +{ + sss_free_ctrlq_cmd_buf(SSS_TO_HWDEV(ctrlq), &ctrlq->cmd_info[ci]); + sss_erase_wqe_complete_bit(ctrlq, wqe, ci); + + return 0; +} + +static int sss_ctrlq_timeout_handler(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_wqe *wqe, u16 ci) +{ + u32 i; + u32 *data = (u32 *)wqe; + u32 num = SSS_CTRLQ_WQE_HEAD_LEN / sizeof(u32); + + sdk_warn(SSS_TO_HWDEV(ctrlq)->dev_hdl, "Ctrlq timeout, ci: %u\n", ci); + + for (i = 0; i < num; i += 0x4) { + sdk_info(SSS_TO_HWDEV(ctrlq)->dev_hdl, "Ctrlq wqe data: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", + *(data + i), *(data + i + 0x1), *(data + i + 0x2), + *(data + i + 0x3)); + } + + sss_free_ctrlq_cmd_buf(SSS_TO_HWDEV(ctrlq), &ctrlq->cmd_info[ci]); + sss_erase_wqe_complete_bit(ctrlq, wqe, ci); + + return 0; +} + +static int sss_ctrlq_force_stop_handler(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_wqe *wqe, u16 ci) +{ + return sss_ctrlq_async_cmd_handler(ctrlq, wqe, ci); +} + +void sss_ctrlq_ceq_handler(void *dev, u32 data) +{ + u16 ci; + int ret; + enum sss_ctrlq_type type = SSS_GET_CEQE_CTRLQ(data, TYPE); + struct sss_ctrlq *ctrlq = &SSS_TO_CTRLQ_INFO(dev)->ctrlq[type]; + struct sss_ctrlq_wqe *ctrlq_wqe = NULL; + struct sss_ctrlq_cmd_info *info = NULL; + + sss_ctrlq_type_handler_t handler[] = { + NULL, + sss_ctrlq_arm_ceq_handler, + sss_ctrlq_default_handler, + sss_ctrlq_default_handler, + sss_ctrlq_async_cmd_handler, + sss_ctrlq_pseudo_timeout_handler, + sss_ctrlq_timeout_handler, + sss_ctrlq_force_stop_handler, + }; + + while ((ctrlq_wqe = sss_ctrlq_read_wqe(&ctrlq->wq, &ci)) != NULL) { + info = &ctrlq->cmd_info[ci]; + + if (info->msg_type < SSS_MSG_TYPE_NONE || + info->msg_type >= SSS_MSG_TYPE_MAX) { + ret = sss_ctrlq_default_handler(ctrlq, ctrlq_wqe, ci); + if (ret) + break; + + continue; + } + + if (!handler[info->msg_type]) + break; + + ret = handler[info->msg_type](ctrlq, ctrlq_wqe, ci); + if (ret) + break; + } +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq.h new file mode 100644 index 0000000000000000000000000000000000000000..7544e2f6bc84670d477decdc45619ad287f470cd --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_CTRLQ_H +#define SSS_HWIF_CTRLQ_H + +#include "sss_hw_wq.h" + +#define SSS_CTRLQ_BUF_LEN 2048U + +#define SSS_CTRLQ_SEND_CMPT_CODE 10 + +#define SSS_CTRLQ_FORCE_STOP_CMPT_CODE 12 + +#define SSS_WQEBB_NUM_FOR_CTRLQ 1 + +enum sss_ctrlq_state { + SSS_CTRLQ_ENABLE = BIT(0), +}; + +void *sss_ctrlq_read_wqe(struct sss_wq *wq, u16 *ci); +void sss_ctrlq_ceq_handler(void *handle, u32 ceqe_data); +void sss_free_ctrlq_cmd_buf(struct sss_hwdev *hwdev, + struct sss_ctrlq_cmd_info *cmd_info); +int sss_ctrlq_sync_cmd_direct_reply(struct sss_ctrlq *ctrlq, u8 mod, + u8 cmd, struct sss_ctrl_msg_buf *in_buf, + u64 *out_param, u32 timeout, u16 channel); +int sss_ctrlq_sync_cmd_detail_reply(struct sss_ctrlq *ctrlq, u8 mod, u8 cmd, + struct sss_ctrl_msg_buf *in_buf, + struct sss_ctrl_msg_buf *out_buf, + u64 *out_param, u32 timeout, u16 channel); + +#endif + diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq_export.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq_export.c new file mode 100644 index 0000000000000000000000000000000000000000..e9a9660f48bb1beaba18836138e8729370b4f728 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq_export.c @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_hwif_api.h" +#include "sss_hwif_ctrlq.h" +#include "sss_common.h" + +#define SSS_CTRLQ_ENABLE_TIMEOUT 300 + +static int sss_wait_ctrlq_enable(struct sss_ctrlq_info *ctrlq_info) +{ + unsigned long end; + + end = jiffies + msecs_to_jiffies(SSS_CTRLQ_ENABLE_TIMEOUT); + do { + if (ctrlq_info->state & SSS_CTRLQ_ENABLE) + return 0; + } while (time_before(jiffies, end) && + SSS_TO_HWDEV(ctrlq_info)->chip_present_flag && + !ctrlq_info->disable_flag); + + ctrlq_info->disable_flag = 1; + + return -EBUSY; +} + +static int sss_check_ctrlq_param(const void *hwdev, const struct sss_ctrl_msg_buf *in_buf) +{ + if (!hwdev || !in_buf) { + pr_err("Invalid ctrlq param: hwdev: %p or in_buf: %p\n", hwdev, in_buf); + return -EINVAL; + } + + if (in_buf->size == 0 || in_buf->size > SSS_CTRLQ_BUF_LEN) { + pr_err("Invalid ctrlq buf size: 0x%x\n", in_buf->size); + return -EINVAL; + } + + return 0; +} + +struct sss_ctrl_msg_buf *sss_alloc_ctrlq_msg_buf(void *hwdev) +{ + struct sss_ctrlq_info *ctrlq_info = NULL; + struct sss_ctrl_msg_buf *msg_buf = NULL; + void *dev = NULL; + + if (!hwdev) { + pr_err("Alloc ctrlq msg buf: hwdev is NULL\n"); + return NULL; + } + + ctrlq_info = ((struct sss_hwdev *)hwdev)->ctrlq_info; + dev = ((struct sss_hwdev *)hwdev)->dev_hdl; + + msg_buf = kzalloc(sizeof(*msg_buf), GFP_ATOMIC); + if (!msg_buf) + return NULL; + + msg_buf->buf = pci_pool_alloc(ctrlq_info->msg_buf_pool, GFP_ATOMIC, + &msg_buf->dma_addr); + if (!msg_buf->buf) { + sdk_err(dev, "Fail to allocate ctrlq pci pool\n"); + goto alloc_pci_buf_err; + } + + msg_buf->size = SSS_CTRLQ_BUF_LEN; + atomic_set(&msg_buf->ref_cnt, 1); + + return msg_buf; + +alloc_pci_buf_err: + kfree(msg_buf); + return NULL; +} +EXPORT_SYMBOL(sss_alloc_ctrlq_msg_buf); + +void sss_free_ctrlq_msg_buf(void *hwdev, struct sss_ctrl_msg_buf *msg_buf) +{ + struct sss_ctrlq_info *ctrlq_info = SSS_TO_CTRLQ_INFO(hwdev); + + if (!hwdev || !msg_buf) { + pr_err("Invalid ctrlq param: hwdev: %p or msg_buf: %p\n", hwdev, msg_buf); + return; + } + + if (atomic_dec_and_test(&msg_buf->ref_cnt) == 0) + return; + + pci_pool_free(ctrlq_info->msg_buf_pool, msg_buf->buf, msg_buf->dma_addr); + kfree(msg_buf); +} +EXPORT_SYMBOL(sss_free_ctrlq_msg_buf); + +int sss_ctrlq_direct_reply(void *hwdev, u8 mod, u8 cmd, + struct sss_ctrl_msg_buf *in_buf, u64 *out_param, + u32 timeout, u16 channel) +{ + int ret; + struct sss_ctrlq_info *ctrlq_info = NULL; + + ret = sss_check_ctrlq_param(hwdev, in_buf); + if (ret != 0) { + pr_err("Invalid ctrlq parameters\n"); + return ret; + } + + if (!sss_chip_get_present_state((struct sss_hwdev *)hwdev)) + return -EPERM; + + ctrlq_info = ((struct sss_hwdev *)hwdev)->ctrlq_info; + ret = sss_wait_ctrlq_enable(ctrlq_info); + if (ret != 0) { + sdk_err(SSS_TO_HWDEV(ctrlq_info)->dev_hdl, "Ctrlq is disable\n"); + return ret; + } + + ret = sss_ctrlq_sync_cmd_direct_reply(&ctrlq_info->ctrlq[SSS_CTRLQ_SYNC], + mod, cmd, in_buf, out_param, timeout, channel); + + if (!(((struct sss_hwdev *)hwdev)->chip_present_flag)) + return -ETIMEDOUT; + else + return ret; +} +EXPORT_SYMBOL(sss_ctrlq_direct_reply); + +int sss_ctrlq_detail_reply(void *hwdev, u8 mod, u8 cmd, + struct sss_ctrl_msg_buf *in_buf, struct sss_ctrl_msg_buf *out_buf, + u64 *out_param, u32 timeout, u16 channel) +{ + int ret; + struct sss_ctrlq_info *ctrlq_info = NULL; + + ret = sss_check_ctrlq_param(hwdev, in_buf); + if (ret != 0) + return ret; + + ctrlq_info = ((struct sss_hwdev *)hwdev)->ctrlq_info; + + if (!sss_chip_get_present_state((struct sss_hwdev *)hwdev)) + return -EPERM; + + ret = sss_wait_ctrlq_enable(ctrlq_info); + if (ret != 0) { + sdk_err(SSS_TO_HWDEV(ctrlq_info)->dev_hdl, "Ctrlq is disable\n"); + return ret; + } + + ret = sss_ctrlq_sync_cmd_detail_reply(&ctrlq_info->ctrlq[SSS_CTRLQ_SYNC], + mod, cmd, in_buf, out_buf, + out_param, timeout, channel); + if (!(((struct sss_hwdev *)hwdev)->chip_present_flag)) + return -ETIMEDOUT; + else + return ret; +} +EXPORT_SYMBOL(sss_ctrlq_detail_reply); diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq_init.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq_init.c new file mode 100644 index 0000000000000000000000000000000000000000..2919357a5973d3e548955a3710b4259b4c1e60d6 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq_init.c @@ -0,0 +1,597 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_hwif_export.h" +#include "sss_hwif_ceq.h" +#include "sss_hwif_api.h" +#include "sss_hwif_ctrlq.h" +#include "sss_common.h" + +#define SSS_CTRLQ_DEPTH 4096 + +#define SSS_CTRLQ_PFN_SHIFT 12 +#define SSS_CTRLQ_PFN(addr) ((addr) >> SSS_CTRLQ_PFN_SHIFT) + +#define SSS_CTRLQ_CEQ_ID 0 + +#define SSS_CTRLQ_WQ_CLA_SIZE 512 + +#define SSS_CTRLQ_WQEBB_SIZE 64 + +#define SSS_CTRLQ_IDLE_TIMEOUT 5000 + +#define SSS_CTRLQ_CTX_NOW_WQE_PAGE_PFN_SHIFT 0 +#define SSS_CTRLQ_CTX_CEQ_ID_SHIFT 53 +#define SSS_CTRLQ_CTX_CEQ_ARM_SHIFT 61 +#define SSS_CTRLQ_CTX_CEQ_EN_SHIFT 62 +#define SSS_CTRLQ_CTX_HW_BUSY_BIT_SHIFT 63 + +#define SSS_CTRLQ_CTX_NOW_WQE_PAGE_PFN_MASK 0xFFFFFFFFFFFFF +#define SSS_CTRLQ_CTX_CEQ_ID_MASK 0xFF +#define SSS_CTRLQ_CTX_CEQ_ARM_MASK 0x1 +#define SSS_CTRLQ_CTX_CEQ_EN_MASK 0x1 +#define SSS_CTRLQ_CTX_HW_BUSY_BIT_MASK 0x1 + +#define SSS_SET_CTRLQ_CTX_INFO(val, member) \ + (((u64)(val) & SSS_CTRLQ_CTX_##member##_MASK) \ + << SSS_CTRLQ_CTX_##member##_SHIFT) + +#define SSS_CTRLQ_CTX_WQ_BLOCK_PFN_SHIFT 0 +#define SSS_CTRLQ_CTX_CI_SHIFT 52 + +#define SSS_CTRLQ_CTX_WQ_BLOCK_PFN_MASK 0xFFFFFFFFFFFFF +#define SSS_CTRLQ_CTX_CI_MASK 0xFFF + +#define SSS_SET_CTRLQ_CTX_BLOCK_INFO(val, member) \ + (((u64)(val) & SSS_CTRLQ_CTX_##member##_MASK) \ + << SSS_CTRLQ_CTX_##member##_SHIFT) + +#define SSS_CTRLQ_CLA_WQ_PAGE_NUM (SSS_CTRLQ_WQ_CLA_SIZE / sizeof(u64)) + +#define SSS_GET_WQ_PAGE_SIZE(page_order) (SSS_HW_WQ_PAGE_SIZE * (1U << (page_order))) + +#define SSS_CTRLQ_DMA_POOL_NAME "sss_ctrlq" + +#define SSS_CTRLQ_WRAP_ENABLE 1 + +#define SSS_SET_WQE_PAGE_PFN(pfn) \ + (SSS_SET_CTRLQ_CTX_INFO(1, CEQ_ARM) | \ + SSS_SET_CTRLQ_CTX_INFO(1, CEQ_EN) | \ + SSS_SET_CTRLQ_CTX_INFO((pfn), NOW_WQE_PAGE_PFN) | \ + SSS_SET_CTRLQ_CTX_INFO(SSS_CTRLQ_CEQ_ID, CEQ_ID) | \ + SSS_SET_CTRLQ_CTX_INFO(1, HW_BUSY_BIT)) + +#define SSS_SET_WQ_BLOCK_PFN(wq, pfn) \ + (SSS_SET_CTRLQ_CTX_BLOCK_INFO((pfn), WQ_BLOCK_PFN) | \ + SSS_SET_CTRLQ_CTX_BLOCK_INFO((u16)(wq)->ci, CI)) + +static u32 wq_page_num = SSS_MAX_WQ_PAGE_NUM; +module_param(wq_page_num, uint, 0444); +MODULE_PARM_DESC(wq_page_num, + "Set wq page num, wq page size is 4K * (2 ^ wq_page_num) - default is 8"); + +static int sss_init_ctrq_block(struct sss_ctrlq_info *ctrlq_info) +{ + u8 i; + + if (SSS_WQ_IS_0_LEVEL_CLA(&ctrlq_info->ctrlq[SSS_CTRLQ_SYNC].wq)) + return 0; + + /* ctrlq wq's CLA table is up to 512B */ + if (ctrlq_info->ctrlq[SSS_CTRLQ_SYNC].wq.page_num > SSS_CTRLQ_CLA_WQ_PAGE_NUM) { + sdk_err(SSS_TO_HWDEV(ctrlq_info)->dev_hdl, "Ctrlq wq page out of range: %lu\n", + SSS_CTRLQ_CLA_WQ_PAGE_NUM); + return -EINVAL; + } + + ctrlq_info->wq_block_vaddr = + dma_zalloc_coherent(SSS_TO_HWDEV(ctrlq_info)->dev_hdl, PAGE_SIZE, + &ctrlq_info->wq_block_paddr, GFP_KERNEL); + if (!ctrlq_info->wq_block_vaddr) { + sdk_err(SSS_TO_HWDEV(ctrlq_info)->dev_hdl, "Fail to alloc ctrlq wq block\n"); + return -ENOMEM; + } + + for (i = 0; i < ctrlq_info->num; i++) + memcpy((u8 *)ctrlq_info->wq_block_vaddr + SSS_CTRLQ_WQ_CLA_SIZE * i, + ctrlq_info->ctrlq[i].wq.block_vaddr, + ctrlq_info->ctrlq[i].wq.page_num * sizeof(u64)); + + return 0; +} + +static void sss_deinit_ctrq_block(struct sss_ctrlq_info *ctrlq_info) +{ + if (ctrlq_info->wq_block_vaddr) { + dma_free_coherent(SSS_TO_HWDEV(ctrlq_info)->dev_hdl, PAGE_SIZE, + ctrlq_info->wq_block_vaddr, ctrlq_info->wq_block_paddr); + ctrlq_info->wq_block_vaddr = NULL; + } +} + +static int sss_create_ctrlq_wq(struct sss_ctrlq_info *ctrlq_info) +{ + u8 i; + int ret; + u8 q_type; + + for (q_type = 0; q_type < ctrlq_info->num; q_type++) { + ret = sss_create_wq(SSS_TO_HWDEV(ctrlq_info), &ctrlq_info->ctrlq[q_type].wq, + SSS_CTRLQ_DEPTH, SSS_CTRLQ_WQEBB_SIZE); + if (ret != 0) { + sdk_err(SSS_TO_HWDEV(ctrlq_info)->dev_hdl, "Fail to create ctrlq wq\n"); + goto destroy_wq; + } + } + + /* 1-level CLA must put all ctrlq's wq page addr in one wq block */ + ret = sss_init_ctrq_block(ctrlq_info); + if (ret != 0) + goto destroy_wq; + + return 0; + +destroy_wq: + for (i = 0; i < q_type; i++) + sss_destroy_wq(&ctrlq_info->ctrlq[i].wq); + sss_deinit_ctrq_block(ctrlq_info); + + return ret; +} + +static void sss_destroy_ctrlq_wq(struct sss_ctrlq_info *ctrlq_info) +{ + u8 type; + + sss_deinit_ctrq_block(ctrlq_info); + + for (type = 0; type < ctrlq_info->num; type++) + sss_destroy_wq(&ctrlq_info->ctrlq[type].wq); +} + +static int sss_init_ctrlq_info(struct sss_ctrlq *ctrlq, + struct sss_ctrlq_ctxt_info *ctx, + dma_addr_t wq_block_paddr) +{ + struct sss_wq *wq = &ctrlq->wq; + u64 pfn = SSS_CTRLQ_PFN(wq->page[0].align_paddr); + + ctrlq->cmd_info = kcalloc(ctrlq->wq.q_depth, sizeof(*ctrlq->cmd_info), + GFP_KERNEL); + if (!ctrlq->cmd_info) + return -ENOMEM; + + ctrlq->wrapped = SSS_CTRLQ_WRAP_ENABLE; + spin_lock_init(&ctrlq->ctrlq_lock); + + ctx->curr_wqe_page_pfn = SSS_SET_WQE_PAGE_PFN(pfn); + pfn = SSS_WQ_IS_0_LEVEL_CLA(wq) ? pfn : SSS_CTRLQ_PFN(wq_block_paddr); + ctx->wq_block_pfn = SSS_SET_WQ_BLOCK_PFN(wq, pfn); + + return 0; +} + +static void sss_deinit_ctrlq_info(struct sss_ctrlq *ctrlq) +{ + kfree(ctrlq->cmd_info); +} + +static void sss_flush_ctrlq_sync_cmd(struct sss_ctrlq_cmd_info *info) +{ + if (info->msg_type != SSS_MSG_TYPE_DIRECT_RESP && + info->msg_type != SSS_MSG_TYPE_SGE_RESP) + return; + + info->msg_type = SSS_MSG_TYPE_FORCE_STOP; + + if (info->cmpt_code && *info->cmpt_code == SSS_CTRLQ_SEND_CMPT_CODE) + *info->cmpt_code = SSS_CTRLQ_FORCE_STOP_CMPT_CODE; + + if (info->done) { + complete(info->done); + info->cmpt_code = NULL; + info->direct_resp = NULL; + info->err_code = NULL; + info->done = NULL; + } +} + +static void sss_flush_ctrlq_cmd(struct sss_ctrlq *ctrlq) +{ + u16 ci = 0; + + spin_lock_bh(&ctrlq->ctrlq_lock); + while (sss_ctrlq_read_wqe(&ctrlq->wq, &ci)) { + sss_update_wq_ci(&ctrlq->wq, SSS_WQEBB_NUM_FOR_CTRLQ); + sss_flush_ctrlq_sync_cmd(&ctrlq->cmd_info[ci]); + } + spin_unlock_bh(&ctrlq->ctrlq_lock); +} + +static void sss_free_all_ctrlq_cmd_buff(struct sss_ctrlq *ctrlq) +{ + u16 i; + + for (i = 0; i < ctrlq->wq.q_depth; i++) + sss_free_ctrlq_cmd_buf(SSS_TO_HWDEV(ctrlq), &ctrlq->cmd_info[i]); +} + +static int sss_chip_set_ctrlq_ctx(struct sss_hwdev *hwdev, u8 qid, + struct sss_ctrlq_ctxt_info *ctxt) +{ + int ret; + struct sss_cmd_ctrlq_ctxt cmd_ctx = {0}; + u16 out_len = sizeof(cmd_ctx); + + memcpy(&cmd_ctx.ctxt, ctxt, sizeof(*ctxt)); + cmd_ctx.ctrlq_id = qid; + cmd_ctx.func_id = sss_get_global_func_id(hwdev); + + ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_SET_CTRLQ_CTXT, + &cmd_ctx, sizeof(cmd_ctx), &cmd_ctx, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_ctx)) { + sdk_err(hwdev->dev_hdl, + "Fail to set ctrlq ctx, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_ctx.head.state, out_len); + return -EFAULT; + } + + return 0; +} + +static int sss_init_ctrlq_ctx(struct sss_hwdev *hwdev) +{ + u8 q_type; + int ret; + struct sss_ctrlq_info *ctrlq_info = hwdev->ctrlq_info; + + for (q_type = 0; q_type < ctrlq_info->num; q_type++) { + ret = sss_chip_set_ctrlq_ctx(hwdev, q_type, &ctrlq_info->ctrlq[q_type].ctrlq_ctxt); + if (ret != 0) + return ret; + } + + ctrlq_info->disable_flag = 0; + ctrlq_info->state |= SSS_CTRLQ_ENABLE; + + return 0; +} + +int sss_reinit_ctrlq_ctx(struct sss_hwdev *hwdev) +{ + u8 ctrlq_type; + struct sss_ctrlq_info *ctrlq_info = hwdev->ctrlq_info; + + for (ctrlq_type = 0; ctrlq_type < ctrlq_info->num; ctrlq_type++) { + sss_flush_ctrlq_cmd(&ctrlq_info->ctrlq[ctrlq_type]); + sss_free_all_ctrlq_cmd_buff(&ctrlq_info->ctrlq[ctrlq_type]); + ctrlq_info->ctrlq[ctrlq_type].wrapped = 1; + sss_wq_reset(&ctrlq_info->ctrlq[ctrlq_type].wq); + } + + return sss_init_ctrlq_ctx(hwdev); +} + +static int sss_init_ctrlq(struct sss_hwdev *hwdev) +{ + u8 i; + u8 q_type; + int ret = -ENOMEM; + struct sss_ctrlq_info *ctrlq_info = NULL; + + ctrlq_info = kzalloc(sizeof(*ctrlq_info), GFP_KERNEL); + if (!ctrlq_info) + return -ENOMEM; + + ctrlq_info->hwdev = hwdev; + hwdev->ctrlq_info = ctrlq_info; + + if (SSS_SUPPORT_CTRLQ_NUM(hwdev)) { + ctrlq_info->num = hwdev->glb_attr.ctrlq_num; + if (hwdev->glb_attr.ctrlq_num > SSS_MAX_CTRLQ_TYPE) { + sdk_warn(hwdev->dev_hdl, "Adjust ctrlq num to %d\n", SSS_MAX_CTRLQ_TYPE); + ctrlq_info->num = SSS_MAX_CTRLQ_TYPE; + } + } else { + ctrlq_info->num = SSS_MAX_CTRLQ_TYPE; + } + + ctrlq_info->msg_buf_pool = dma_pool_create(SSS_CTRLQ_DMA_POOL_NAME, hwdev->dev_hdl, + SSS_CTRLQ_BUF_LEN, SSS_CTRLQ_BUF_LEN, 0ULL); + if (!ctrlq_info->msg_buf_pool) { + sdk_err(hwdev->dev_hdl, "Fail to create ctrlq buffer pool\n"); + goto create_pool_err; + } + + ret = sss_create_ctrlq_wq(ctrlq_info); + if (ret != 0) + goto create_wq_err; + + ret = sss_alloc_db_addr(hwdev, (void __iomem *)&ctrlq_info->db_base); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to alloc doorbell addr\n"); + goto init_db_err; + } + + for (q_type = 0; q_type < ctrlq_info->num; q_type++) { + ctrlq_info->ctrlq[q_type].hwdev = hwdev; + ctrlq_info->ctrlq[q_type].ctrlq_type = q_type; + ret = sss_init_ctrlq_info(&ctrlq_info->ctrlq[q_type], + &ctrlq_info->ctrlq[q_type].ctrlq_ctxt, + ctrlq_info->wq_block_paddr); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init ctrlq i :%d\n", q_type); + goto init_ctrlq_info_err; + } + } + + ret = sss_init_ctrlq_ctx(hwdev); + if (ret != 0) + goto init_ctrlq_info_err; + + return 0; + +init_ctrlq_info_err: + for (i = 0; i < q_type; i++) + sss_deinit_ctrlq_info(&ctrlq_info->ctrlq[i]); + + sss_free_db_addr(hwdev, ctrlq_info->db_base); +init_db_err: + sss_destroy_ctrlq_wq(ctrlq_info); +create_wq_err: + dma_pool_destroy(ctrlq_info->msg_buf_pool); +create_pool_err: + kfree(ctrlq_info); + hwdev->ctrlq_info = NULL; + + return ret; +} + +void sss_deinit_ctrlq(struct sss_hwdev *hwdev) +{ + u8 i; + struct sss_ctrlq_info *ctrlq_info = hwdev->ctrlq_info; + + ctrlq_info->state &= ~SSS_CTRLQ_ENABLE; + + for (i = 0; i < ctrlq_info->num; i++) { + sss_flush_ctrlq_cmd(&ctrlq_info->ctrlq[i]); + sss_free_all_ctrlq_cmd_buff(&ctrlq_info->ctrlq[i]); + sss_deinit_ctrlq_info(&ctrlq_info->ctrlq[i]); + } + + sss_free_db_addr(hwdev, ctrlq_info->db_base); + sss_destroy_ctrlq_wq(ctrlq_info); + + dma_pool_destroy(ctrlq_info->msg_buf_pool); + + kfree(ctrlq_info); + hwdev->ctrlq_info = NULL; +} + +static int sss_set_ctrlq_depth(void *hwdev) +{ + int ret; + struct sss_cmd_root_ctxt cmd_ctx = {0}; + u16 out_len = sizeof(cmd_ctx); + + cmd_ctx.set_ctrlq_depth = 1; + cmd_ctx.ctrlq_depth = (u8)ilog2(SSS_CTRLQ_DEPTH); + cmd_ctx.func_id = sss_get_global_func_id(hwdev); + + ret = sss_sync_send_msg(hwdev, SSS_COMM_MGMT_CMD_SET_VAT, &cmd_ctx, + sizeof(cmd_ctx), &cmd_ctx, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_ctx)) { + sdk_err(SSS_TO_DEV(hwdev), + "Fail to set ctrlq depth, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_ctx.head.state, out_len); + return -EFAULT; + } + + return 0; +} + +static int sss_hwif_init_ctrlq(struct sss_hwdev *hwdev) +{ + int ret; + + ret = sss_init_ctrlq(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init ctrlq\n"); + return ret; + } + + sss_ceq_register_cb(hwdev, hwdev, SSS_NIC_CTRLQ, sss_ctrlq_ceq_handler); + + ret = sss_set_ctrlq_depth(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to set ctrlq depth\n"); + goto set_depth_err; + } + + set_bit(SSS_HW_CTRLQ_INIT_OK, &hwdev->func_state); + + return 0; + +set_depth_err: + sss_deinit_ctrlq(hwdev); + + return ret; +} + +static void sss_hwif_deinit_ctrlq(struct sss_hwdev *hwdev) +{ + spin_lock_bh(&hwdev->channel_lock); + clear_bit(SSS_HW_CTRLQ_INIT_OK, &hwdev->func_state); + spin_unlock_bh(&hwdev->channel_lock); + + sss_ceq_unregister_cb(hwdev, SSS_NIC_CTRLQ); + sss_deinit_ctrlq(hwdev); +} + +static bool sss_ctrlq_is_idle(struct sss_ctrlq *ctrlq) +{ + return sss_wq_is_empty(&ctrlq->wq); +} + +static enum sss_process_ret sss_check_ctrlq_stop_handler(void *priv_data) +{ + struct sss_hwdev *hwdev = priv_data; + struct sss_ctrlq_info *ctrlq_info = hwdev->ctrlq_info; + enum sss_ctrlq_type ctrlq_type; + + /* Stop waiting when card unpresent */ + if (!hwdev->chip_present_flag) + return SSS_PROCESS_OK; + + for (ctrlq_type = 0; ctrlq_type < ctrlq_info->num; ctrlq_type++) { + if (!sss_ctrlq_is_idle(&ctrlq_info->ctrlq[ctrlq_type])) + return SSS_PROCESS_DOING; + } + + return SSS_PROCESS_OK; +} + +static int sss_init_ctrlq_page_size(struct sss_hwdev *hwdev) +{ + int ret; + + if (wq_page_num > SSS_MAX_WQ_PAGE_NUM) { + sdk_info(hwdev->dev_hdl, + "Invalid wq_page_num %u out of range, adjust to %d\n", + wq_page_num, SSS_MAX_WQ_PAGE_NUM); + wq_page_num = SSS_MAX_WQ_PAGE_NUM; + } + + hwdev->wq_page_size = SSS_GET_WQ_PAGE_SIZE(wq_page_num); + ret = sss_chip_set_wq_page_size(hwdev, sss_get_global_func_id(hwdev), + hwdev->wq_page_size); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to set wq page size\n"); + return ret; + } + + return 0; +} + +static void sss_deinit_ctrlq_page_size(struct sss_hwdev *hwdev) +{ + if (SSS_GET_FUNC_TYPE(hwdev) != SSS_FUNC_TYPE_VF) + sss_chip_set_wq_page_size(hwdev, sss_get_global_func_id(hwdev), + SSS_HW_WQ_PAGE_SIZE); +} + +int sss_init_ctrlq_channel(struct sss_hwdev *hwdev) +{ + int ret; + + ret = sss_hwif_init_ceq(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init hwdev ceq.\n"); + return ret; + } + + ret = sss_init_ceq_msix_attr(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init ceq msix attr\n"); + goto init_msix_err; + } + + ret = sss_init_ctrlq_page_size(hwdev); + if (ret != 0) + goto init_size_err; + + ret = sss_hwif_init_ctrlq(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init hwif ctrlq\n"); + goto init_ctrlq_err; + } + + return 0; + +init_ctrlq_err: + sss_deinit_ctrlq_page_size(hwdev); +init_size_err: +init_msix_err: + sss_hwif_deinit_ceq(hwdev); + + return ret; +} + +void sss_deinit_ctrlq_channel(struct sss_hwdev *hwdev) +{ + sss_hwif_deinit_ctrlq(hwdev); + + sss_deinit_ctrlq_page_size(hwdev); + + sss_hwif_deinit_ceq(hwdev); +} + +void sss_ctrlq_flush_sync_cmd(struct sss_hwdev *hwdev) +{ + u16 cnt; + u16 ci; + u16 i; + u16 id; + struct sss_wq *wq = NULL; + struct sss_ctrlq *ctrlq = NULL; + struct sss_ctrlq_cmd_info *info = NULL; + + ctrlq = &hwdev->ctrlq_info->ctrlq[SSS_CTRLQ_SYNC]; + + spin_lock_bh(&ctrlq->ctrlq_lock); + wq = &ctrlq->wq; + id = wq->pi + wq->q_depth - wq->ci; + cnt = (u16)SSS_WQ_MASK_ID(wq, id); + ci = wq->ci; + + for (i = 0; i < cnt; i++) { + info = &ctrlq->cmd_info[SSS_WQ_MASK_ID(wq, ci + i)]; + sss_flush_ctrlq_sync_cmd(info); + } + + spin_unlock_bh(&ctrlq->ctrlq_lock); +} + +int sss_wait_ctrlq_stop(struct sss_hwdev *hwdev) +{ + enum sss_ctrlq_type ctrlq_type; + struct sss_ctrlq_info *ctrlq_info = hwdev->ctrlq_info; + int ret; + + if (!(ctrlq_info->state & SSS_CTRLQ_ENABLE)) + return 0; + + ctrlq_info->state &= ~SSS_CTRLQ_ENABLE; + + ret = sss_check_handler_timeout(hwdev, sss_check_ctrlq_stop_handler, + SSS_CTRLQ_IDLE_TIMEOUT, USEC_PER_MSEC); + if (ret == 0) + return 0; + + for (ctrlq_type = 0; ctrlq_type < ctrlq_info->num; ctrlq_type++) { + if (!sss_ctrlq_is_idle(&ctrlq_info->ctrlq[ctrlq_type])) + sdk_err(hwdev->dev_hdl, "Ctrlq %d is busy\n", ctrlq_type); + } + + ctrlq_info->state |= SSS_CTRLQ_ENABLE; + + return ret; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq_init.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq_init.h new file mode 100644 index 0000000000000000000000000000000000000000..8aa0788c25bece6af17427080237ce816522a229 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_ctrlq_init.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_CTRLQ_INIT_H +#define SSS_HWIF_CTRLQ_INIT_H + +#include "sss_hwdev.h" + +int sss_init_ctrlq_channel(struct sss_hwdev *hwdev); +void sss_deinit_ctrlq_channel(struct sss_hwdev *hwdev); +int sss_reinit_ctrlq_ctx(struct sss_hwdev *hwdev); +int sss_wait_ctrlq_stop(struct sss_hwdev *hwdev); +void sss_ctrlq_flush_sync_cmd(struct sss_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_eq.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_eq.c new file mode 100644 index 0000000000000000000000000000000000000000..e2629c727e03642b208322dff1d13dc47e107f21 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_eq.c @@ -0,0 +1,355 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_common.h" +#include "sss_hwdev.h" +#include "sss_hwif_api.h" +#include "sss_csr.h" +#include "sss_hwif_eq.h" + +#define SSS_EQ_CI_SIMPLE_INDIR_CI_SHIFT 0 +#define SSS_EQ_CI_SIMPLE_INDIR_ARMED_SHIFT 21 +#define SSS_EQ_CI_SIMPLE_INDIR_AEQ_ID_SHIFT 30 +#define SSS_EQ_CI_SIMPLE_INDIR_CEQ_ID_SHIFT 24 + +#define SSS_EQ_CI_SIMPLE_INDIR_CI_MASK 0x1FFFFFU +#define SSS_EQ_CI_SIMPLE_INDIR_ARMED_MASK 0x1U +#define SSS_EQ_CI_SIMPLE_INDIR_AEQ_ID_MASK 0x3U +#define SSS_EQ_CI_SIMPLE_INDIR_CEQ_ID_MASK 0xFFU + +#define SSS_SET_EQ_CI_SIMPLE_INDIR(val, member) \ + (((val) & SSS_EQ_CI_SIMPLE_INDIR_##member##_MASK) << \ + SSS_EQ_CI_SIMPLE_INDIR_##member##_SHIFT) + +#define SSS_EQ_WRAPPED_SHIFT 20 + +#define SSS_EQ_CI(eq) ((eq)->ci | \ + ((u32)(eq)->wrap << SSS_EQ_WRAPPED_SHIFT)) + +#define SSS_EQ_CI_SIMPLE_INDIR_REG_ADDR(eq) \ + (((eq)->type == SSS_AEQ) ? \ + SSS_CSR_AEQ_CI_SIMPLE_INDIR_ADDR : \ + SSS_CSR_CEQ_CI_SIMPLE_INDIR_ADDR) + +#define SSS_EQ_HI_PHYS_ADDR_REG(type, pg_num) \ + ((u32)((type == SSS_AEQ) ? \ + SSS_AEQ_PHY_HI_ADDR_REG(pg_num) : \ + SSS_CEQ_PHY_HI_ADDR_REG(pg_num))) + +#define SSS_EQ_LO_PHYS_ADDR_REG(type, pg_num) \ + ((u32)((type == SSS_AEQ) ? \ + SSS_AEQ_PHY_LO_ADDR_REG(pg_num) : \ + SSS_CEQ_PHY_LO_ADDR_REG(pg_num))) + +#define SSS_GET_EQ_PAGES_NUM(eq, size) \ + ((u16)(ALIGN((u32)((eq)->len * (eq)->entry_size), \ + (size)) / (size))) + +#define SSS_GET_EQ_MAX_PAGES(eq) \ + ((eq)->type == SSS_AEQ ? SSS_AEQ_MAX_PAGE : \ + SSS_CEQ_MAX_PAGE) + +#define SSS_GET_EQE_NUM(eq, pg_size) ((pg_size) / (u32)(eq)->entry_size) + +#define SSS_EQE_NUM_IS_ALIGN(eq) ((eq)->num_entry_per_pg & ((eq)->num_entry_per_pg - 1)) + +void sss_chip_set_eq_ci(struct sss_eq *eq, u32 arm_state) +{ + u32 val; + + if (eq->qid != 0 && SSS_TO_HWDEV(eq)->poll) + arm_state = SSS_EQ_NOT_ARMED; + + val = SSS_SET_EQ_CI_SIMPLE_INDIR(arm_state, ARMED) | + SSS_SET_EQ_CI_SIMPLE_INDIR(SSS_EQ_CI(eq), CI); + + if (eq->type == SSS_AEQ) + val |= SSS_SET_EQ_CI_SIMPLE_INDIR(eq->qid, AEQ_ID); + else + val |= SSS_SET_EQ_CI_SIMPLE_INDIR(eq->qid, CEQ_ID); + + sss_chip_write_reg(SSS_TO_HWDEV(eq)->hwif, SSS_EQ_CI_SIMPLE_INDIR_REG_ADDR(eq), val); +} + +static void sss_chip_set_eq_page_addr(struct sss_eq *eq, + u16 page_id, struct sss_dma_addr_align *dma_addr) +{ + u32 addr; + + addr = SSS_EQ_HI_PHYS_ADDR_REG(eq->type, page_id); + sss_chip_write_reg(SSS_TO_HWDEV(eq)->hwif, addr, + upper_32_bits(dma_addr->align_paddr)); + + addr = SSS_EQ_LO_PHYS_ADDR_REG(eq->type, page_id); + sss_chip_write_reg(SSS_TO_HWDEV(eq)->hwif, addr, + lower_32_bits(dma_addr->align_paddr)); +} + +static int sss_chip_init_eq_attr(struct sss_eq *eq) +{ + u32 i; + int ret; + + for (i = 0; i < eq->page_num; i++) + sss_chip_set_eq_page_addr(eq, i, &eq->page_array[i]); + + ret = eq->init_attr_handler(eq); + if (ret != 0) + return ret; + + sss_chip_set_eq_ci(eq, SSS_EQ_ARMED); + + return 0; +} + +static u32 sss_init_eqe_desc(struct sss_eq *eq) +{ + eq->num_entry_per_pg = SSS_GET_EQE_NUM(eq, eq->page_size); + if (SSS_EQE_NUM_IS_ALIGN(eq)) { + sdk_err(SSS_TO_HWDEV(eq)->dev_hdl, "Number element in eq page is not align\n"); + return -EINVAL; + } + + eq->init_desc_handler(eq); + + return 0; +} + +static int sss_alloc_eq_dma_page(struct sss_eq *eq, u16 id) +{ + int ret; + + ret = sss_dma_zalloc_coherent_align(SSS_TO_HWDEV(eq)->dev_hdl, eq->page_size, + SSS_MIN_EQ_PAGE_SIZE, GFP_KERNEL, &eq->page_array[id]); + if (ret != 0) { + sdk_err(SSS_TO_HWDEV(eq)->dev_hdl, "Alloc eq page fail, pg index: %hu\n", id); + return ret; + } + + return 0; +} + +static void sss_free_eq_dma_page(struct sss_eq *eq, u16 max_id) +{ + int i; + + for (i = 0; i < max_id; i++) + sss_dma_free_coherent_align(SSS_TO_DEV(eq->hwdev), &eq->page_array[i]); +} + +static int sss_alloc_eq_page(struct sss_eq *eq) +{ + u16 page_id; + int ret; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(eq); + + eq->page_array = kcalloc(eq->page_num, sizeof(*eq->page_array), GFP_KERNEL); + if (!eq->page_array) + return -ENOMEM; + + for (page_id = 0; page_id < eq->page_num; page_id++) { + ret = sss_alloc_eq_dma_page(eq, page_id); + if (ret != 0) + goto alloc_dma_err; + } + + ret = sss_init_eqe_desc(eq); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init eqe\n"); + goto alloc_dma_err; + } + + return 0; + +alloc_dma_err: + sss_free_eq_dma_page(eq, page_id); + kfree(eq->page_array); + eq->page_array = NULL; + + return ret; +} + +static void sss_free_eq_page(struct sss_eq *eq) +{ + u16 i; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(eq); + + for (i = 0; i < eq->page_num; i++) + sss_dma_free_coherent_align(hwdev->dev_hdl, &eq->page_array[i]); + + kfree(eq->page_array); + eq->page_array = NULL; +} + +static inline u32 sss_get_eq_page_size(const struct sss_eq *eq) +{ + u32 total_size; + u32 count; + + total_size = ALIGN((eq->len * eq->entry_size), + SSS_MIN_EQ_PAGE_SIZE); + if (total_size <= (SSS_GET_EQ_MAX_PAGES(eq) * SSS_MIN_EQ_PAGE_SIZE)) + return SSS_MIN_EQ_PAGE_SIZE; + + count = (u32)(ALIGN((total_size / SSS_GET_EQ_MAX_PAGES(eq)), + SSS_MIN_EQ_PAGE_SIZE) / SSS_MIN_EQ_PAGE_SIZE); + + /* round up to nearest power of two */ + count = 1U << (u8)fls((int)(count - 1)); + + return ((u32)SSS_MIN_EQ_PAGE_SIZE) * count; +} + +static int sss_request_eq_irq(struct sss_eq *eq, struct sss_irq_desc *entry) +{ + struct pci_dev *pdev = SSS_TO_HWDEV(eq)->pcidev_hdl; + + snprintf(eq->irq_name, sizeof(eq->irq_name), "%s%u@pci:%s", + eq->name, eq->qid, pci_name(pdev)); + + return request_irq(entry->irq_id, eq->irq_handler, 0UL, eq->irq_name, eq); +} + +static void sss_chip_reset_eq(struct sss_eq *eq) +{ + struct sss_hwdev *hwdev = eq->hwdev; + struct sss_hwif *hwif = hwdev->hwif; + + sss_chip_write_reg(hwif, SSS_EQ_INDIR_ID_ADDR(eq->type), eq->qid); + + /* make sure set qid firstly*/ + wmb(); + + if (eq->type == SSS_AEQ) + sss_chip_write_reg(hwif, SSS_CSR_AEQ_CTRL_1_ADDR, 0); + else + sss_chip_set_ceq_attr(hwdev, eq->qid, 0, 0); + + /* make sure write ctrl reg secondly */ + wmb(); + + sss_chip_write_reg(hwif, SSS_EQ_PI_REG_ADDR(eq), 0); +} + +static int sss_init_eq_page_size(struct sss_eq *eq) +{ + eq->page_size = sss_get_eq_page_size(eq); + eq->old_page_size = eq->page_size; + eq->page_num = SSS_GET_EQ_PAGES_NUM(eq, eq->page_size); + + if (eq->page_num > SSS_GET_EQ_MAX_PAGES(eq)) { + sdk_err(SSS_TO_HWDEV(eq)->dev_hdl, "Number pages: %u too many pages for eq\n", + eq->page_num); + return -EINVAL; + } + + return 0; +} + +void sss_increase_eq_ci(struct sss_eq *eq) +{ + if (!eq) + return; + + eq->ci++; + + if (eq->ci == eq->len) { + eq->ci = 0; + eq->wrap = !eq->wrap; + } +} + +int sss_init_eq(struct sss_hwdev *hwdev, struct sss_eq *eq, + struct sss_irq_desc *entry) +{ + int ret = 0; + + eq->hwdev = hwdev; + eq->irq_desc.irq_id = entry->irq_id; + eq->irq_desc.msix_id = entry->msix_id; + + ret = sss_init_eq_page_size(eq); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init eq params\n"); + return ret; + } + + ret = sss_alloc_eq_page(eq); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to alloc eq page\n"); + return ret; + } + + sss_chip_reset_eq(eq); + + ret = sss_chip_init_eq_attr(eq); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init eq attr\n"); + goto out; + } + + ret = sss_request_eq_irq(eq, entry); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to request eq irq, err: %d\n", ret); + goto out; + } + + sss_chip_set_msix_state(hwdev, SSS_EQ_IRQ_ID(eq), SSS_MSIX_DISABLE); + + return 0; + +out: + sss_free_eq_page(eq); + return ret; +} + +void sss_deinit_eq(struct sss_eq *eq) +{ + struct sss_irq_desc *irq = &eq->irq_desc; + + sss_chip_set_msix_state(SSS_TO_HWDEV(eq), SSS_EQ_IRQ_ID(eq), SSS_MSIX_DISABLE); + + synchronize_irq(irq->irq_id); + + free_irq(irq->irq_id, eq); + + sss_chip_write_reg(SSS_TO_HWDEV(eq)->hwif, SSS_EQ_INDIR_ID_ADDR(eq->type), eq->qid); + + /* make sure disable msix */ + wmb(); + + if (eq->type == SSS_AEQ) { + cancel_work_sync(&eq->aeq_work); + sss_chip_write_reg(SSS_TO_HWDEV(eq)->hwif, SSS_CSR_AEQ_CTRL_1_ADDR, 0); + } else { + tasklet_kill(&eq->ceq_tasklet); + sss_chip_set_ceq_attr(SSS_TO_HWDEV(eq), eq->qid, 0, 0); + } + + eq->ci = sss_chip_read_reg(SSS_TO_HWDEV(eq)->hwif, SSS_EQ_PI_REG_ADDR(eq)); + sss_chip_set_eq_ci(eq, SSS_EQ_NOT_ARMED); + + sss_free_eq_page(eq); +} + +void sss_init_eq_intr_info(struct sss_irq_cfg *intr_info) +{ + intr_info->coalesc_intr_set = SSS_EQ_INTR_COALESC; + intr_info->coalesc_timer = SSS_EQ_INTR_COALESC_TIMER_CFG; + intr_info->resend_timer = SSS_EQ_INTR_RESEND_TIMER_CFG; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_eq.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_eq.h new file mode 100644 index 0000000000000000000000000000000000000000..45db82abb497c872d9ff6500d9364afb356ef9a5 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_eq.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_EQ_H +#define SSS_HWIF_EQ_H + +#include +#include +#include + +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_csr.h" + +#define SSS_EQ_UPDATE_CI_STEP 64 + +#define SSS_TASK_PROCESS_EQE_LIMIT 1024 + +#define SSS_MIN_EQ_PAGE_SIZE 0x1000 /* min eq page size 4K Bytes */ +#define SSS_MAX_EQ_PAGE_SIZE 0x400000 /* max eq page size 4M Bytes */ + +#define SSS_EQ_USLEEP_LOW_LIMIT 900 +#define SSS_EQ_USLEEP_HIG_LIMIT 1000 + +#define SSS_EQ_IRQ_ID(eq) ((eq)->irq_desc.msix_id) + +#define SSS_GET_EQ_ELEM(eq, id) \ + (((u8 *)(eq)->page_array[(id) / (eq)->num_entry_per_pg].align_vaddr) + \ + (u32)(((id) & ((eq)->num_entry_per_pg - 1)) * (eq)->entry_size)) + +#define SSS_EQ_VALID_SHIFT 31 +#define SSS_EQ_WRAPPED(eq) ((u32)(eq)->wrap << SSS_EQ_VALID_SHIFT) + +#define SSS_AEQ_MAX_PAGE 4 +#define SSS_CEQ_MAX_PAGE 8 + +#define SSS_AEQE_SIZE 64 +#define SSS_CEQE_SIZE 4 + +#define SSS_EQ_CI_REG_ADDR(eq) \ + (((eq)->type == SSS_AEQ) ? \ + SSS_CSR_AEQ_CI_ADDR : SSS_CSR_CEQ_CI_ADDR) + +#define SSS_EQ_PI_REG_ADDR(eq) \ + (((eq)->type == SSS_AEQ) ? \ + SSS_CSR_AEQ_PI_ADDR : SSS_CSR_CEQ_PI_ADDR) + +#define SSS_EQ_MSIX_RESEND_TIMER_CLEAR 1 + +#define SSS_EQ_ELEM_DESC_TYPE_SHIFT 0 +#define SSS_EQ_ELEM_DESC_SRC_SHIFT 7 +#define SSS_EQ_ELEM_DESC_SIZE_SHIFT 8 +#define SSS_EQ_ELEM_DESC_WRAPPED_SHIFT 31 + +#define SSS_EQ_ELEM_DESC_TYPE_MASK 0x7FU +#define SSS_EQ_ELEM_DESC_SRC_MASK 0x1U +#define SSS_EQ_ELEM_DESC_SIZE_MASK 0xFFU +#define SSS_EQ_ELEM_DESC_WRAPPED_MASK 0x1U + +#define SSS_GET_EQE_DESC(val, member) \ + (((val) >> SSS_EQ_ELEM_DESC_##member##_SHIFT) & \ + SSS_EQ_ELEM_DESC_##member##_MASK) + +#define SSS_PAGE_IN_4K(page_size) ((page_size) >> 12) +#define SSS_SET_EQ_HW_PAGE_SIZE(eq) ((u32)ilog2(SSS_PAGE_IN_4K((eq)->page_size))) + +enum sss_eq_intr_mode { + SSS_INTR_MODE_ARMED, + SSS_INTR_MODE_ALWAY, +}; + +enum sss_eq_ci_arm_state { + SSS_EQ_NOT_ARMED, + SSS_EQ_ARMED, +}; + +#define SSS_EQ_ARM_STATE(unfinish) \ + ((unfinish) ? SSS_EQ_NOT_ARMED : SSS_EQ_ARMED) + +#define SSS_EQ_INTR_COALESC 1 +#define SSS_EQ_INTR_COALESC_TIMER_CFG 0xFF +#define SSS_EQ_INTR_RESEND_TIMER_CFG 7 + +void sss_increase_eq_ci(struct sss_eq *eq); +int sss_init_eq(struct sss_hwdev *hwdev, struct sss_eq *eq, + struct sss_irq_desc *entry); +void sss_deinit_eq(struct sss_eq *eq); +void sss_chip_set_eq_ci(struct sss_eq *eq, u32 arm_state); +void sss_init_eq_intr_info(struct sss_irq_cfg *intr_info); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_export.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_export.c new file mode 100644 index 0000000000000000000000000000000000000000..c4639c18297b9b09fb555ea25e087268f0b7003d --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_export.c @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include + +#include "sss_kernel.h" +#include "sss_hw_irq.h" +#include "sss_csr.h" +#include "sss_hwdev.h" +#include "sss_hwif_api.h" + +int sss_alloc_db_addr(void *hwdev, void __iomem **db_base) +{ + struct sss_hwif *hwif = NULL; + u32 id = 0; + + int ret; + + if (!hwdev || !db_base) + return -EINVAL; + + hwif = SSS_TO_HWIF(hwdev); + + ret = sss_alloc_db_id(hwif, &id); + if (ret != 0) + return -EFAULT; + + *db_base = hwif->db_base_vaddr + id * SSS_DB_PAGE_SIZE; + + return 0; +} +EXPORT_SYMBOL(sss_alloc_db_addr); + +void sss_free_db_addr(void *hwdev, const void __iomem *db_base) +{ + struct sss_hwif *hwif = NULL; + u32 id; + + if (!hwdev || !db_base) + return; + + hwif = SSS_TO_HWIF(hwdev); + id = SSS_DB_ID(db_base, hwif->db_base_vaddr); + + sss_free_db_id(hwif, id); +} +EXPORT_SYMBOL(sss_free_db_addr); + +void sss_chip_set_msix_auto_mask(void *hwdev, u16 msix_id, + enum sss_msix_auto_mask flag) +{ + u32 val; + + if (!hwdev) + return; + + val = (flag == SSS_CLR_MSIX_AUTO_MASK) ? + SSS_SET_MSI_CLR_INDIR(1, AUTO_MSK_CLR) : + SSS_SET_MSI_CLR_INDIR(1, AUTO_MSK_SET); + + val |= SSS_SET_MSI_CLR_INDIR(msix_id, SIMPLE_INDIR_ID); + + sss_chip_write_reg(SSS_TO_HWIF(hwdev), SSS_CSR_FUNC_MSI_CLR_WR_ADDR, val); +} +EXPORT_SYMBOL(sss_chip_set_msix_auto_mask); + +void sss_chip_set_msix_state(void *hwdev, u16 msix_id, + enum sss_msix_state flag) +{ + u32 val; + + if (!hwdev) + return; + + val = (flag == SSS_MSIX_ENABLE) ? SSS_SET_MSI_CLR_INDIR(1, INT_MSK_CLR) : + SSS_SET_MSI_CLR_INDIR(1, INT_MSK_SET); + val |= SSS_SET_MSI_CLR_INDIR(msix_id, SIMPLE_INDIR_ID); + + sss_chip_write_reg(SSS_TO_HWIF(hwdev), SSS_CSR_FUNC_MSI_CLR_WR_ADDR, val); +} +EXPORT_SYMBOL(sss_chip_set_msix_state); + +u16 sss_get_global_func_id(void *hwdev) +{ + if (!hwdev) + return 0; + + return SSS_GET_HWIF_GLOBAL_ID(SSS_TO_HWIF(hwdev)); +} +EXPORT_SYMBOL(sss_get_global_func_id); + +u8 sss_get_pf_id_of_vf(void *hwdev) +{ + if (!hwdev) + return 0; + + return SSS_GET_HWIF_PF_ID(SSS_TO_HWIF(hwdev)); +} +EXPORT_SYMBOL(sss_get_pf_id_of_vf); + +u8 sss_get_pcie_itf_id(void *hwdev) +{ + if (!hwdev) + return 0; + + return SSS_GET_HWIF_PCI_INTF_ID(SSS_TO_HWIF(hwdev)); +} +EXPORT_SYMBOL(sss_get_pcie_itf_id); + +enum sss_func_type sss_get_func_type(void *hwdev) +{ + if (!hwdev) + return 0; + + return SSS_GET_FUNC_TYPE((struct sss_hwdev *)hwdev); +} +EXPORT_SYMBOL(sss_get_func_type); + +enum sss_func_type sss_get_func_id(void *hwdev) +{ + if (!hwdev) + return 0; + + return SSS_GET_FUNC_ID((struct sss_hwdev *)hwdev); +} +EXPORT_SYMBOL(sss_get_func_id); + +u16 sss_get_glb_pf_vf_offset(void *hwdev) +{ + if (!hwdev) + return 0; + + return SSS_GET_HWIF_GLOBAL_VF_OFFSET(SSS_TO_HWIF(hwdev)); +} +EXPORT_SYMBOL(sss_get_glb_pf_vf_offset); + +u8 sss_get_ppf_id(void *hwdev) +{ + if (!hwdev) + return 0; + + return SSS_GET_HWIF_PPF_ID(SSS_TO_HWIF(hwdev)); +} +EXPORT_SYMBOL(sss_get_ppf_id); diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_init.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_init.c new file mode 100644 index 0000000000000000000000000000000000000000..5451b85ffac886a029a8e25e8e21da44fc300e14 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_init.c @@ -0,0 +1,413 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_csr.h" +#include "sss_common.h" +#include "sss_hwdev.h" +#include "sss_hwif_init.h" +#include "sss_hwif_api.h" + +#define SSS_WAIT_CHIP_READY_TIMEOUT 10000 + +#define SSS_WAIT_DB_READY_TIMEOUT 60000 + +#define SSS_MAX_MSIX_ENTRY 2048 + +#define SSS_AF0_FUNC_GLOBAL_ID_SHIFT 0 +#define SSS_AF0_PF_ID_SHIFT 12 +#define SSS_AF0_PCI_INTF_ID_SHIFT 17 +#define SSS_AF0_VF_IN_PF_SHIFT 20 +#define SSS_AF0_FUNC_TYPE_SHIFT 28 + +#define SSS_AF0_FUNC_GLOBAL_ID_MASK 0xFFF +#define SSS_AF0_PF_ID_MASK 0x1F +#define SSS_AF0_PCI_INTF_ID_MASK 0x7 +#define SSS_AF0_VF_IN_PF_MASK 0xFF +#define SSS_AF0_FUNC_TYPE_MASK 0x1 + +#define SSS_GET_AF0(val, member) \ + (((val) >> SSS_AF0_##member##_SHIFT) & SSS_AF0_##member##_MASK) + +#define SSS_AF2_CEQ_PER_FUNC_SHIFT 0 +#define SSS_AF2_DMA_ATTR_PER_FUNC_SHIFT 9 +#define SSS_AF2_IRQ_PER_FUNC_SHIFT 16 + +#define SSS_AF2_CEQ_PER_FUNC_MASK 0x1FF +#define SSS_AF2_DMA_ATTR_PER_FUNC_MASK 0x7 +#define SSS_AF2_IRQ_PER_FUNC_MASK 0x7FF + +#define SSS_GET_AF2(val, member) \ + (((val) >> SSS_AF2_##member##_SHIFT) & SSS_AF2_##member##_MASK) + +#define SSS_AF3_GLOBAL_VF_ID_OF_NXT_PF_SHIFT 0 +#define SSS_AF3_GLOBAL_VF_ID_OF_PF_SHIFT 16 + +#define SSS_AF3_GLOBAL_VF_ID_OF_NXT_PF_MASK 0xFFF +#define SSS_AF3_GLOBAL_VF_ID_OF_PF_MASK 0xFFF + +#define SSS_GET_AF3(val, member) \ + (((val) >> SSS_AF3_##member##_SHIFT) & SSS_AF3_##member##_MASK) + +#define SSS_AF5_OUTBOUND_CTRL_SHIFT 0 +#define SSS_AF5_OUTBOUND_CTRL_MASK 0x1 + +#define SSS_GET_AF5(val, member) \ + (((val) >> SSS_AF5_##member##_SHIFT) & SSS_AF5_##member##_MASK) + +#define SSS_SET_AF5(val, member) \ + (((val) & SSS_AF5_##member##_MASK) << SSS_AF5_##member##_SHIFT) + +#define SSS_CLEAR_AF5(val, member) \ + ((val) & (~(SSS_AF5_##member##_MASK << SSS_AF5_##member##_SHIFT))) + +#define SSS_MPF_ELECTION_ID_SHIFT 0 + +#define SSS_MPF_ELECTION_ID_MASK 0x1F + +#define SSS_SET_MPF(val, member) \ + (((val) & SSS_MPF_ELECTION_##member##_MASK) << \ + SSS_MPF_ELECTION_##member##_SHIFT) + +#define SSS_GET_MPF(val, member) \ + (((val) >> SSS_MPF_ELECTION_##member##_SHIFT) & \ + SSS_MPF_ELECTION_##member##_MASK) + +#define SSS_CLEAR_MPF(val, member) \ + ((val) & (~(SSS_MPF_ELECTION_##member##_MASK << \ + SSS_MPF_ELECTION_##member##_SHIFT))) + +static enum sss_process_ret sss_check_pcie_link_handle(void *data) +{ + u32 status; + + status = sss_chip_get_pcie_link_status(data); + if (status == SSS_PCIE_LINK_DOWN) + return SSS_PROCESS_ERR; + else if (status == SSS_PCIE_LINK_UP) + return SSS_PROCESS_OK; + + return SSS_PROCESS_DOING; +} + +static int sss_wait_pcie_link_up(struct sss_hwdev *hwdev) +{ + int ret; + + ret = sss_check_handler_timeout(hwdev, sss_check_pcie_link_handle, + SSS_WAIT_CHIP_READY_TIMEOUT, USEC_PER_MSEC); + if (ret == -ETIMEDOUT) + sdk_err(hwdev->dev_hdl, "Wait for chip ready timeout\n"); + + return ret; +} + +static int sss_chip_get_func_attr0(struct sss_hwif *hwif) +{ + u32 attr = sss_chip_read_reg(hwif, SSS_CSR_HW_ATTR0_ADDR); + + if (attr == SSS_PCIE_LINK_DOWN) + return -EFAULT; + + SSS_SET_HWIF_GLOBAL_ID(hwif, SSS_GET_AF0(attr, FUNC_GLOBAL_ID)); + SSS_SET_HWIF_PF_ID(hwif, SSS_GET_AF0(attr, PF_ID)); + SSS_SET_HWIF_PCI_INTF_ID(hwif, SSS_GET_AF0(attr, PCI_INTF_ID)); + SSS_SET_HWIF_FUNC_TYPE(hwif, SSS_GET_AF0(attr, FUNC_TYPE)); + + return 0; +} + +static int sss_chip_get_func_attr1(struct sss_hwif *hwif) +{ + u32 attr = sss_chip_read_reg(hwif, SSS_CSR_HW_ATTR1_ADDR); + + if (attr == SSS_PCIE_LINK_DOWN) + return -EFAULT; + + SSS_SET_HWIF_PPF_ID(hwif, SSS_GET_AF1(attr, PPF_ID)); + SSS_SET_HWIF_AEQ_NUM(hwif, BIT(SSS_GET_AF1(attr, AEQ_PER_FUNC))); + + return 0; +} + +static int sss_chip_get_func_attr2(struct sss_hwif *hwif) +{ + u32 attr = sss_chip_read_reg(hwif, SSS_CSR_HW_ATTR2_ADDR); + + if (attr == SSS_PCIE_LINK_DOWN) + return -EFAULT; + + SSS_SET_HWIF_CEQ_NUM(hwif, (u8)SSS_GET_AF2(attr, CEQ_PER_FUNC)); + SSS_SET_HWIF_IRQ_NUM(hwif, SSS_GET_AF2(attr, IRQ_PER_FUNC)); + if (SSS_GET_HWIF_IRQ_NUM(hwif) > SSS_MAX_MSIX_ENTRY) + SSS_SET_HWIF_IRQ_NUM(hwif, SSS_MAX_MSIX_ENTRY); + SSS_SET_HWIF_DMA_ATTR_NUM(hwif, BIT(SSS_GET_AF2(attr, DMA_ATTR_PER_FUNC))); + + return 0; +} + +static int sss_chip_get_func_attr3(struct sss_hwif *hwif) +{ + u32 attr = sss_chip_read_reg(hwif, SSS_CSR_HW_ATTR3_ADDR); + + if (attr == SSS_PCIE_LINK_DOWN) + return -EFAULT; + + SSS_SET_HWIF_GLOBAL_VF_OFFSET(hwif, SSS_GET_AF3(attr, GLOBAL_VF_ID_OF_PF)); + + return 0; +} + +static int sss_chip_get_func_attr6(struct sss_hwif *hwif) +{ + u32 attr = sss_chip_read_reg(hwif, SSS_CSR_HW_ATTR6_ADDR); + + if (attr == SSS_PCIE_LINK_DOWN) + return -EFAULT; + + SSS_SET_HWIF_SQ_NUM(hwif, SSS_GET_AF6(attr, FUNC_MAX_SQ)); + SSS_SET_HWIF_MSIX_EN(hwif, SSS_GET_AF6(attr, MSIX_FLEX_EN)); + + return 0; +} + +static int sss_hwif_init_func_attr(struct sss_hwif *hwif) +{ + int ret; + + ret = sss_chip_get_func_attr0(hwif); + if (ret != 0) + return ret; + + ret = sss_chip_get_func_attr1(hwif); + if (ret != 0) + return ret; + + ret = sss_chip_get_func_attr2(hwif); + if (ret != 0) + return ret; + + ret = sss_chip_get_func_attr3(hwif); + if (ret != 0) + return ret; + + ret = sss_chip_get_func_attr6(hwif); + if (ret != 0) + return ret; + + return 0; +} + +static void sss_chip_init_ppf(struct sss_hwif *hwif) +{ + u32 val; + + val = sss_chip_read_reg(hwif, SSS_CSR_PPF_ELECT_ADDR); + val = SSS_CLEAR_PPF(val, ID); + val |= SSS_SET_PPF(SSS_GET_HWIF_GLOBAL_ID(hwif), ID); + + sss_chip_write_reg(hwif, SSS_CSR_PPF_ELECT_ADDR, val); + + /* Check PPF */ + val = sss_chip_read_reg(hwif, SSS_CSR_PPF_ELECT_ADDR); + SSS_SET_HWIF_PPF_ID(hwif, SSS_GET_PPF(val, ID)); + if (SSS_GET_HWIF_PPF_ID(hwif) == SSS_GET_HWIF_GLOBAL_ID(hwif)) + SSS_SET_HWIF_FUNC_TYPE(hwif, SSS_FUNC_TYPE_PPF); +} + +static void sss_chip_get_mpf(struct sss_hwif *hwif) +{ + u32 mpf; + + mpf = sss_chip_read_reg(hwif, SSS_CSR_GLOBAL_MPF_ELECT_ADDR); + SSS_SET_HWIF_MPF_ID(hwif, SSS_GET_MPF(mpf, ID)); +} + +static void sss_chip_init_mpf(struct sss_hwif *hwif) +{ + u32 val; + + val = sss_chip_read_reg(hwif, SSS_CSR_GLOBAL_MPF_ELECT_ADDR); + val = SSS_CLEAR_MPF(val, ID); + val |= SSS_SET_MPF(SSS_GET_HWIF_GLOBAL_ID(hwif), ID); + + sss_chip_write_reg(hwif, SSS_CSR_GLOBAL_MPF_ELECT_ADDR, val); +} + +static int sss_hwif_alloc_db_pool(struct sss_hwif *hwif) +{ + struct sss_db_pool *pool = &hwif->db_pool; + u32 bit_size; + + bit_size = (hwif->db_dwqe_len > SSS_DB_DWQE_SIZE) ? SSS_DB_MAX_AREAS : + ((u32)(hwif->db_dwqe_len / SSS_DB_PAGE_SIZE)); + pool->bitmap = bitmap_zalloc(bit_size, GFP_KERNEL); + if (!pool->bitmap) { + pr_err("Fail to allocate db area.\n"); + return -ENOMEM; + } + pool->bit_size = bit_size; + spin_lock_init(&pool->id_lock); + + return 0; +} + +static void sss_hwif_free_db_pool(struct sss_db_pool *pool) +{ + kfree(pool->bitmap); +} + +static void sss_chip_disable_all_msix(struct sss_hwdev *hwdev) +{ + u16 i; + u16 irq_num = SSS_GET_HWIF_IRQ_NUM(hwdev->hwif); + + for (i = 0; i < irq_num; i++) + sss_chip_set_msix_state(hwdev, i, SSS_MSIX_DISABLE); +} + +static enum sss_process_ret sss_chip_check_db_ready(void *data) +{ + int outbound_status; + int db_status; + struct sss_hwif *hwif = data; + u32 db_attr = sss_chip_read_reg(hwif, SSS_CSR_HW_ATTR4_ADDR); + u32 outband_attr = sss_chip_read_reg(hwif, SSS_CSR_HW_ATTR5_ADDR); + + db_status = SSS_GET_AF4(db_attr, DOORBELL_CTRL); + outbound_status = SSS_GET_AF5(outband_attr, OUTBOUND_CTRL); + + if (db_status == DB_ENABLE && outbound_status == OUTBOUND_ENABLE) + return SSS_PROCESS_OK; + + return SSS_PROCESS_DOING; +} + +static int sss_wait_db_ready(struct sss_hwif *hwif) +{ + return sss_check_handler_timeout(hwif, sss_chip_check_db_ready, + SSS_WAIT_DB_READY_TIMEOUT, USEC_PER_MSEC); +} + +static void sss_hwif_init_bar_base(struct sss_pci_adapter *adapter) +{ + struct sss_hwif *hwif = SSS_TO_HWIF(adapter->hwdev); + + hwif->db_dwqe_len = adapter->db_dwqe_len; + hwif->db_base_vaddr = adapter->db_reg_bar; + hwif->db_base_paddr = adapter->db_base_paddr; + + hwif->mgmt_reg_base = adapter->mgmt_reg_bar; + hwif->cfg_reg_base = (adapter->mgmt_reg_bar) ? + adapter->cfg_reg_bar : + ((u8 *)adapter->cfg_reg_bar + SSS_VF_CFG_REG_OFFSET); +} + +static int sss_hwif_wait_chip_ready(struct sss_hwdev *hwdev) +{ + int ret; + u32 db_attr; + u32 outband_attr; + + ret = sss_wait_pcie_link_up(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Pcie is not link up\n"); + return ret; + } + + ret = sss_wait_db_ready(hwdev->hwif); + if (ret != 0) { + db_attr = sss_chip_read_reg(hwdev->hwif, SSS_CSR_HW_ATTR4_ADDR); + outband_attr = sss_chip_read_reg(hwdev->hwif, SSS_CSR_HW_ATTR5_ADDR); + sdk_err(hwdev->dev_hdl, "Hw doorbell is disabled, db 0x%x outbound 0x%x\n", + db_attr, outband_attr); + return ret; + } + + return 0; +} + +static void sss_hwif_init_pf(struct sss_hwdev *hwdev) +{ + struct sss_hwif *hwif = hwdev->hwif; + + if (!SSS_IS_VF(hwdev)) { + sss_chip_init_ppf(hwif); + + if (SSS_IS_PPF(hwdev)) + sss_chip_init_mpf(hwif); + sss_chip_get_mpf(hwif); + } + + sss_chip_disable_all_msix(hwdev); + + sss_chip_set_pf_status(hwif, SSS_PF_STATUS_INIT); + + sdk_info(hwdev->dev_hdl, + "Global_func_id: %u, func_type: %d, host_id: %u, ppf: %u, mpf: %u\n", + SSS_GET_HWIF_GLOBAL_ID(hwif), SSS_GET_HWIF_FUNC_TYPE(hwif), + SSS_GET_HWIF_PCI_INTF_ID(hwif), SSS_GET_HWIF_PPF_ID(hwif), + SSS_GET_HWIF_MPF_ID(hwif)); +} + +int sss_hwif_init(struct sss_pci_adapter *adapter) +{ + struct sss_hwdev *hwdev = adapter->hwdev; + struct sss_hwif *hwif = NULL; + int ret; + + hwif = kzalloc(sizeof(*hwif), GFP_KERNEL); + if (!hwif) + return -ENOMEM; + + hwif->pdev = hwdev->pcidev_hdl; + hwdev->hwif = hwif; + + sss_hwif_init_bar_base(adapter); + + ret = sss_hwif_alloc_db_pool(hwif); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to init db pool.\n"); + goto alloc_db_pool_err; + } + + ret = sss_hwif_wait_chip_ready(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Chip is not ready\n"); + goto wait_chip_ready_err; + } + + ret = sss_hwif_init_func_attr(hwif); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail init hwif attr\n"); + goto wait_chip_ready_err; + } + + sss_hwif_init_pf(hwdev); + + return 0; + +wait_chip_ready_err: + sss_dump_chip_err_info(hwdev); + sss_hwif_free_db_pool(&hwif->db_pool); +alloc_db_pool_err: + kfree(hwif); + hwdev->hwif = NULL; + + return ret; +} + +void sss_hwif_deinit(struct sss_hwdev *hwdev) +{ + sss_hwif_free_db_pool(&hwdev->hwif->db_pool); + kfree(hwdev->hwif); + hwdev->hwif = NULL; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_init.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_init.h new file mode 100644 index 0000000000000000000000000000000000000000..ca5e2ce972e554b21bc215e4d6a2e82fea6183f0 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_init.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_INIT_H +#define SSS_HWIF_INIT_H + +#include "sss_hwdev.h" +#include "sss_adapter.h" + +int sss_hwif_init(struct sss_pci_adapter *adapter); +void sss_hwif_deinit(struct sss_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_irq.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_irq.c new file mode 100644 index 0000000000000000000000000000000000000000..574e9aa02515379bde2b77f1e0675a864c275589 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_irq.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hwdev.h" +#include "sss_hw_svc_cap.h" + +#define SSS_GET_NEED_IRQ_NUM(hwif, intr_num) \ + (SSS_GET_HWIF_MSIX_EN(hwif) ? (SSS_GET_HWIF_AEQ_NUM(hwif) + \ + SSS_GET_HWIF_CEQ_NUM(hwif) + (hwif)->attr.sq_num) : (intr_num)) + +#define SSS_MIN_VECTOR 2 + +static int sss_alloc_irq_info(struct sss_hwdev *hwdev) +{ + u16 total_num = SSS_GET_HWIF_IRQ_NUM(hwdev->hwif); + u16 need_num = SSS_GET_NEED_IRQ_NUM(hwdev->hwif, total_num); + struct sss_mgmt_info *mgmt_info = hwdev->mgmt_info; + struct sss_irq_info *irq_info = &mgmt_info->irq_info; + + if (total_num == 0) { + sdk_err(hwdev->dev_hdl, "Mgmt irq info: intr total_num = 0, msix_flex_en %d\n", + SSS_GET_HWIF_MSIX_EN(hwdev->hwif)); + return -EFAULT; + } + + if (need_num > total_num) { + sdk_warn(hwdev->dev_hdl, "Mgmt irq info: intr total_num %d < need_num %d, msix_flex_en %d\n", + total_num, need_num, SSS_GET_HWIF_MSIX_EN(hwdev->hwif)); + need_num = total_num; + } + + irq_info->irq = kcalloc(total_num, sizeof(*irq_info->irq), GFP_KERNEL); + if (!irq_info->irq) + return -ENOMEM; + + irq_info->max_num = need_num; + + return 0; +} + +static void sss_free_irq_info(struct sss_hwdev *hwdev) +{ + kfree(hwdev->mgmt_info->irq_info.irq); + hwdev->mgmt_info->irq_info.irq = NULL; +} + +int sss_init_irq_info(struct sss_hwdev *hwdev) +{ + u16 i = 0; + u16 irq_num; + int enable_irq_num; + int ret; + struct sss_mgmt_info *mgmt_info = hwdev->mgmt_info; + struct sss_irq *irq = NULL; + struct msix_entry *entry = NULL; + + ret = sss_alloc_irq_info(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to alloc irq info, err: %d\n", ret); + return ret; + } + + irq_num = mgmt_info->irq_info.max_num; + entry = kcalloc(irq_num, sizeof(*entry), GFP_KERNEL); + if (!entry) { + sss_free_irq_info(hwdev); + return -ENOMEM; + } + + for (i = 0; i < irq_num; i++) + entry[i].entry = i; + + enable_irq_num = pci_enable_msix_range(hwdev->pcidev_hdl, entry, + SSS_MIN_VECTOR, irq_num); + if (enable_irq_num < 0) { + kfree(entry); + sss_free_irq_info(hwdev); + sdk_err(hwdev->dev_hdl, "Fail to alloc msix entries with threshold 2. enabled_irq: %d\n", + enable_irq_num); + return -ENOMEM; + } + + irq_num = (u16)enable_irq_num; + mgmt_info->irq_info.total_num = irq_num; + mgmt_info->irq_info.free_num = irq_num; + mgmt_info->svc_cap.intr_type = SSS_INTR_TYPE_MSIX; + + irq = mgmt_info->irq_info.irq; + for (i = 0; i < irq_num; i++) { + irq[i].desc.msix_id = entry[i].entry; + irq[i].desc.irq_id = entry[i].vector; + irq[i].type = SSS_SERVICE_TYPE_MAX; + irq[i].busy = SSS_CFG_FREE; + } + + mutex_init(&mgmt_info->irq_info.irq_mutex); + + sdk_info(hwdev->dev_hdl, "Success to request %u msix vector.\n", irq_num); + kfree(entry); + + return 0; +} + +void sss_deinit_irq_info(struct sss_hwdev *hwdev) +{ + struct sss_service_cap *svc_cap = &hwdev->mgmt_info->svc_cap; + struct sss_irq_info *irq_info = &hwdev->mgmt_info->irq_info; + + if (irq_info->free_num != irq_info->total_num) + sdk_err(hwdev->dev_hdl, "Fail to reclaim all irq and eq, please check\n"); + + if (svc_cap->intr_type == SSS_INTR_TYPE_MSIX) + pci_disable_msix(hwdev->pcidev_hdl); + else if (svc_cap->intr_type == SSS_INTR_TYPE_MSI) + pci_disable_msi(hwdev->pcidev_hdl); + + sss_free_irq_info(hwdev); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_irq.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_irq.h new file mode 100644 index 0000000000000000000000000000000000000000..0918d74ebaa0112ef71ef969908f09d5323c1bf2 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_irq.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_IRQ_H +#define SSS_HWIF_IRQ_H + +#include "sss_hwdev.h" + +int sss_init_irq_info(struct sss_hwdev *dev); +void sss_deinit_irq_info(struct sss_hwdev *dev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx.c new file mode 100644 index 0000000000000000000000000000000000000000..4490e4378cbca657475c89b45d50a5a8ff7c1181 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx.c @@ -0,0 +1,656 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw_common.h" +#include "sss_hwdev.h" +#include "sss_hwif_api.h" +#include "sss_hwif_eq.h" +#include "sss_hwif_mbx.h" +#include "sss_hwif_aeq.h" +#include "sss_csr.h" +#include "sss_common.h" + +#define SSS_MBX_INT_DST_AEQN_SHIFT 10 +#define SSS_MBX_INT_SRC_RESP_AEQN_SHIFT 12 +#define SSS_MBX_INT_STAT_DMA_SHIFT 14 +/* The size of data to be send (unit of 4 bytes) */ +#define SSS_MBX_INT_TX_SIZE_SHIFT 20 +/* SO_RO(strong order, relax order) */ +#define SSS_MBX_INT_STAT_DMA_SO_RO_SHIFT 25 +#define SSS_MBX_INT_WB_EN_SHIFT 28 + +#define SSS_MBX_INT_DST_AEQN_MASK 0x3 +#define SSS_MBX_INT_SRC_RESP_AEQN_MASK 0x3 +#define SSS_MBX_INT_STAT_DMA_MASK 0x3F +#define SSS_MBX_INT_TX_SIZE_MASK 0x1F +#define SSS_MBX_INT_STAT_DMA_SO_RO_MASK 0x3 +#define SSS_MBX_INT_WB_EN_MASK 0x1 + +#define SSS_SET_MBX_INT(val, field) \ + (((val) & SSS_MBX_INT_##field##_MASK) << \ + SSS_MBX_INT_##field##_SHIFT) + +enum sss_mbx_tx_status { + SSS_MBX_TX_NOT_COMPLETE = 1, +}; + +#define SSS_MBX_CTRL_TRIGGER_AEQE_SHIFT 0 + +#define SSS_MBX_CTRL_TX_STATUS_SHIFT 1 +#define SSS_MBX_CTRL_DST_FUNC_SHIFT 16 + +#define SSS_MBX_CTRL_TRIGGER_AEQE_MASK 0x1 +#define SSS_MBX_CTRL_TX_STATUS_MASK 0x1 +#define SSS_MBX_CTRL_DST_FUNC_MASK 0x1FFF + +#define SSS_SET_MBX_CTRL(val, field) \ + (((val) & SSS_MBX_CTRL_##field##_MASK) << \ + SSS_MBX_CTRL_##field##_SHIFT) + +#define SSS_MBX_SEGLEN_MASK \ + SSS_SET_MSG_HEADER(SSS_MSG_HEADER_SEG_LEN_MASK, SEG_LEN) + +#define SSS_MBX_MSG_POLL_TIMEOUT_MS 8000 +#define SSS_MBX_COMPLETE_WAIT_TIME_MS 40000U + +#define SSS_SEQ_ID_START_VAL 0 + +/* mbx write back status is 16B, only first 4B is used */ +#define SSS_MBX_WB_STATUS_ERRCODE_MASK 0xFFFF +#define SSS_MBX_WB_STATUS_MASK 0xFF +#define SSS_MBX_WB_ERRCODE_MASK 0xFF00 +#define SSS_MBX_WB_STATUS_FINISHED_SUCCESS 0xFF +#define SSS_MBX_WB_STATUS_NOT_FINISHED 0x00 + +#define SSS_MBX_STATUS_FINISHED(wb) \ + (((wb) & SSS_MBX_WB_STATUS_MASK) != SSS_MBX_WB_STATUS_NOT_FINISHED) +#define SSS_MBX_STATUS_SUCCESS(wb) \ + (((wb) & SSS_MBX_WB_STATUS_MASK) == SSS_MBX_WB_STATUS_FINISHED_SUCCESS) +#define SSS_MBX_STATUS_ERRCODE(wb) \ + ((wb) & SSS_MBX_WB_ERRCODE_MASK) + +#define SSS_NO_DMA_ATTR 0 + +#define SSS_MBX_MSG_ID_MASK 0xF +#define SSS_MBX_MSG_ID(mbx) ((mbx)->send_msg_id) +#define SSS_INCREASE_MBX_MSG_ID(mbx) \ + ((mbx)->send_msg_id = ((mbx)->send_msg_id + 1) & SSS_MBX_MSG_ID_MASK) + +#define SSS_MBX_MSG_CHN_STOP(mbx) \ + ((((mbx)->lock_channel_en) && \ + test_bit((mbx)->cur_msg_channel, &(mbx)->channel_stop)) ? true : false) + +#define SSS_MBX_DMA_MSG_INIT_XOR_VAL 0x5a5a5a5a +#define SSS_MBX_XOR_DATA_ALIGN 4 + +#define SSS_MQ_ID_MASK(mq, id) ((id) & ((mq)->depth - 1)) +#define SSS_IS_MSG_QUEUE_FULL(mq) \ + (SSS_MQ_ID_MASK(mq, (mq)->pi + 1) == SSS_MQ_ID_MASK(mq, (mq)->ci)) + +#define SSS_MBX_TRY_LOCK_SLEPP_US 1000 + +#define SSS_FILL_MSG_HEADER(hwdev, msg_info, msg_len, mod, ack_type, type, direction, cmd) \ + (SSS_SET_MSG_HEADER((msg_len), MSG_LEN) | \ + SSS_SET_MSG_HEADER((mod), MODULE) | \ + SSS_SET_MSG_HEADER(SSS_MBX_SEG_SIZE, SEG_LEN) | \ + SSS_SET_MSG_HEADER((ack_type), NO_ACK) | \ + SSS_SET_MSG_HEADER((type), DATA_TYPE) | \ + SSS_SET_MSG_HEADER(SSS_SEQ_ID_START_VAL, SEQID) | \ + SSS_SET_MSG_HEADER(SSS_NOT_LAST_SEG, LAST) | \ + SSS_SET_MSG_HEADER((direction), DIRECTION) | \ + SSS_SET_MSG_HEADER((cmd), CMD) | \ + SSS_SET_MSG_HEADER((msg_info)->msg_id, MSG_ID) | \ + SSS_SET_MSG_HEADER((((hwdev)->poll || \ + (hwdev)->hwif->attr.aeq_num >= SSS_MGMT_RSP_MSG_AEQ) ? \ + SSS_MBX_RSP_MSG_AEQ : SSS_ASYNC_MSG_AEQ), AEQ_ID) | \ + SSS_SET_MSG_HEADER(SSS_MSG_SRC_MBX, SOURCE) | \ + SSS_SET_MSG_HEADER(!!(msg_info)->state, STATUS) | \ + SSS_SET_MSG_HEADER(sss_get_global_func_id(hwdev), SRC_GLB_FUNC_ID)) + +#define SSS_MBX_SEG_LEN_ALIGN 4 + +enum sss_msg_aeq_type { + SSS_ASYNC_MSG_AEQ = 0, + /* indicate dest func or mgmt cpu which aeq to response mbx message */ + SSS_MBX_RSP_MSG_AEQ = 1, + /* indicate mgmt cpu which aeq to response adm message */ + SSS_MGMT_RSP_MSG_AEQ = 2, +}; + +enum sss_mbx_order_type { + SSS_MBX_STRONG_ORDER, +}; + +enum sss_mbx_wb_type { + SSS_MBX_WB = 1, +}; + +enum sss_mbx_aeq_trig_type { + SSS_MBX_NOT_TRIG, +}; + +struct sss_mbx_dma_msg { + u32 xor; + u32 dma_addr_h; + u32 dma_addr_l; + u32 msg_len; + u64 rsvd; +}; + +static struct sss_msg_buffer *sss_get_msg_buffer_from_mgmt(struct sss_mbx *mbx) +{ + return &mbx->mgmt_msg; +} + +static struct sss_msg_buffer *sss_get_msg_buffer_from_pf(struct sss_mbx *mbx, u64 src_func_id) +{ + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + if (src_func_id != sss_get_pf_id_of_vf(hwdev) || !mbx->func_msg) + return NULL; + + return mbx->func_msg; +} + +static struct sss_msg_buffer *sss_get_msg_buffer_from_vf(struct sss_mbx *mbx, u64 src_func_id) +{ + u16 func_id; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + func_id = (u16)(src_func_id - 1U) - sss_get_glb_pf_vf_offset(hwdev); + if (func_id >= mbx->num_func_msg) + return NULL; + + return &mbx->func_msg[func_id]; +} + +static struct sss_msg_buffer *sss_get_msg_buffer_from_ppf(struct sss_mbx *mbx, u64 src_func_id) +{ + u16 func_id; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + if (!mbx->support_h2h_msg) + return NULL; + + for (func_id = 0; func_id < SSS_MAX_HOST_NUM(hwdev); func_id++) { + if (src_func_id == sss_chip_get_host_ppf_id(hwdev, (u8)func_id)) + break; + } + + if (func_id == SSS_MAX_HOST_NUM(hwdev) || !mbx->host_msg) + return NULL; + + return &mbx->host_msg[func_id]; +} + +struct sss_msg_desc *sss_get_mbx_msg_desc(struct sss_mbx *mbx, u64 src_func_id, u64 direction) +{ + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + struct sss_msg_buffer *msg_buffer = NULL; + + if (src_func_id == SSS_MGMT_SRC_ID) + msg_buffer = sss_get_msg_buffer_from_mgmt(mbx); + else if (SSS_IS_VF(hwdev)) + msg_buffer = sss_get_msg_buffer_from_pf(mbx, src_func_id); + else if (src_func_id > sss_get_glb_pf_vf_offset(hwdev)) + msg_buffer = sss_get_msg_buffer_from_vf(mbx, src_func_id); + else + msg_buffer = sss_get_msg_buffer_from_ppf(mbx, src_func_id); + + return (direction == SSS_DIRECT_SEND_MSG) ? + &msg_buffer->recv_msg : &msg_buffer->resp_msg; +} + +static u32 sss_mbx_dma_data_xor(u32 *data, u16 data_len) +{ + u16 i; + u16 cnt = data_len / sizeof(u32); + u32 val = SSS_MBX_DMA_MSG_INIT_XOR_VAL; + + for (i = 0; i < cnt; i++) + val ^= data[i]; + + return val; +} + +static void sss_mbx_fill_dma_msg_buf(struct sss_mbx_dma_queue *queue, + struct sss_mbx_dma_msg *dma_msg, + void *data, u16 data_len) +{ + u64 pi; + u64 dma_paddr; + void *dma_vaddr; + + pi = queue->pi * SSS_MBX_BUF_SIZE_MAX; + dma_vaddr = (u8 *)queue->dma_buff_vaddr + pi; + dma_paddr = queue->dma_buff_paddr + pi; + memcpy(dma_vaddr, data, data_len); + + dma_msg->dma_addr_h = upper_32_bits(dma_paddr); + dma_msg->dma_addr_l = lower_32_bits(dma_paddr); + dma_msg->msg_len = data_len; + dma_msg->xor = sss_mbx_dma_data_xor(dma_vaddr, + ALIGN(data_len, SSS_MBX_XOR_DATA_ALIGN)); +} + +static struct sss_mbx_dma_queue * +sss_get_mbx_dma_queue(struct sss_mbx *mbx, + enum sss_msg_ack_type ack_type) +{ + u32 val; + struct sss_mbx_dma_queue *queue = NULL; + + val = sss_chip_read_reg(SSS_TO_HWDEV(mbx)->hwif, SSS_MBX_MQ_CI_OFF); + if (ack_type == SSS_MSG_ACK) { + queue = &mbx->sync_msg_queue; + queue->ci = SSS_GET_MBX_MQ_CI(val, SYNC); + } else { + queue = &mbx->async_msg_queue; + queue->ci = SSS_GET_MBX_MQ_CI(val, ASYNC); + } + + if (SSS_IS_MSG_QUEUE_FULL(queue)) { + sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, "Mbx sync mq is busy, pi: %u, ci: %u\n", + queue->pi, SSS_MQ_ID_MASK(queue, queue->ci)); + return NULL; + } + + return queue; +} + +static void sss_fill_mbx_msg_body(struct sss_mbx_dma_queue *queue, + struct sss_mbx_dma_msg *dma_msg, void *msg_body, u16 body_len) +{ + sss_mbx_fill_dma_msg_buf(queue, dma_msg, msg_body, body_len); + queue->pi = SSS_MQ_ID_MASK(queue, queue->pi + 1); +} + +static void sss_clear_mbx_status(struct sss_mbx_send *send_mbx) +{ + *send_mbx->wb_state = 0; + + /* clear mbx wb state */ + wmb(); +} + +static void sss_chip_send_mbx_msg_header(struct sss_hwdev *hwdev, + struct sss_mbx_send *send_mbx, u64 *msg_header) +{ + u32 i; + u32 *header = (u32 *)msg_header; + u32 cnt = SSS_MBX_HEADER_SIZE / sizeof(u32); + + for (i = 0; i < cnt; i++) + __raw_writel(cpu_to_be32(*(header + i)), send_mbx->data + i * sizeof(u32)); +} + +static void sss_chip_send_mbx_msg_body(struct sss_hwdev *hwdev, + struct sss_mbx_send *send_mbx, void *body, u16 body_len) +{ + u32 *msg_data = body; + u32 size = sizeof(u32); + u32 i; + u8 buf[SSS_MBX_SEG_SIZE] = {0}; + u32 cnt = ALIGN(body_len, size) / size; + + if (body_len % size != 0) { + memcpy(buf, body, body_len); + msg_data = (u32 *)buf; + } + + for (i = 0; i < cnt; i++) { + __raw_writel(cpu_to_be32(*(msg_data + i)), + send_mbx->data + SSS_MBX_HEADER_SIZE + i * size); + } +} + +static void sss_chip_write_mbx_msg_attr(struct sss_mbx *mbx, + u16 dest, u16 aeq_num, u16 seg_len) +{ + u16 size; + u16 dest_func_id; + u32 intr; + u32 ctrl; + + size = ALIGN(seg_len + SSS_MBX_HEADER_SIZE, SSS_MBX_SEG_LEN_ALIGN) >> 2; + intr = SSS_SET_MBX_INT(aeq_num, DST_AEQN) | + SSS_SET_MBX_INT(0, SRC_RESP_AEQN) | + SSS_SET_MBX_INT(SSS_NO_DMA_ATTR, STAT_DMA) | + SSS_SET_MBX_INT(size, TX_SIZE) | + SSS_SET_MBX_INT(SSS_MBX_STRONG_ORDER, STAT_DMA_SO_RO) | + SSS_SET_MBX_INT(SSS_MBX_WB, WB_EN); + + sss_chip_write_reg(SSS_TO_HWDEV(mbx)->hwif, + SSS_HW_CSR_MBX_INT_OFFSET_OFF, intr); + + /* make sure write mbx intr attr reg */ + wmb(); + + dest_func_id = (SSS_IS_VF(SSS_TO_HWDEV(mbx)) && dest != SSS_MGMT_SRC_ID) ? 0 : dest; + ctrl = SSS_SET_MBX_CTRL(SSS_MBX_TX_NOT_COMPLETE, TX_STATUS) | + SSS_SET_MBX_CTRL(SSS_MBX_NOT_TRIG, TRIGGER_AEQE) | + SSS_SET_MBX_CTRL(dest_func_id, DST_FUNC); + + sss_chip_write_reg(SSS_TO_HWDEV(mbx)->hwif, + SSS_HW_CSR_MBX_CTRL_OFF, ctrl); + + /* make sure write mbx ctrl reg */ + wmb(); +} + +static void sss_dump_mbx_reg(struct sss_hwdev *hwdev) +{ + u32 val1; + u32 val2; + + val1 = sss_chip_read_reg(hwdev->hwif, SSS_HW_CSR_MBX_CTRL_OFF); + val2 = sss_chip_read_reg(hwdev->hwif, SSS_HW_CSR_MBX_INT_OFFSET_OFF); + + sdk_err(hwdev->dev_hdl, "Mbx ctrl reg:0x%x, intr offset:0x%x\n", val1, val2); +} + +static u16 sss_get_mbx_status(const struct sss_mbx_send *send_mbx) +{ + u64 val = be64_to_cpu(*send_mbx->wb_state); + + /* read wb state before returning it */ + rmb(); + + return (u16)(val & SSS_MBX_WB_STATUS_ERRCODE_MASK); +} + +static enum sss_process_ret sss_check_mbx_wb_status(void *priv_data) +{ + u16 status; + struct sss_mbx *mbx = priv_data; + + if (SSS_MBX_MSG_CHN_STOP(mbx) || !SSS_TO_HWDEV(mbx)->chip_present_flag) + return SSS_PROCESS_ERR; + + status = sss_get_mbx_status(&mbx->mbx_send); + + return SSS_MBX_STATUS_FINISHED(status) ? SSS_PROCESS_OK : SSS_PROCESS_DOING; +} + +static int sss_chip_send_mbx_fragment(struct sss_mbx *mbx, u16 dest_func_id, + u64 msg_header, void *msg_body, u16 body_len) +{ + u16 aeq_type; + u16 status = 0; + u16 err_code; + u16 direction; + int ret; + struct sss_mbx_send *send_mbx = &mbx->mbx_send; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + direction = SSS_GET_MSG_HEADER(msg_header, DIRECTION); + aeq_type = (SSS_GET_HWIF_AEQ_NUM(hwdev->hwif) > SSS_MBX_RSP_MSG_AEQ && + direction != SSS_DIRECT_SEND_MSG) ? SSS_MBX_RSP_MSG_AEQ : SSS_ASYNC_MSG_AEQ; + + sss_clear_mbx_status(send_mbx); + + sss_chip_send_mbx_msg_header(hwdev, send_mbx, &msg_header); + + sss_chip_send_mbx_msg_body(hwdev, send_mbx, msg_body, body_len); + + sss_chip_write_mbx_msg_attr(mbx, dest_func_id, aeq_type, body_len); + + ret = sss_check_handler_timeout(mbx, sss_check_mbx_wb_status, + SSS_MBX_MSG_POLL_TIMEOUT_MS, USEC_PER_MSEC); + status = sss_get_mbx_status(send_mbx); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Send mbx seg timeout, wb status: 0x%x\n", status); + sss_dump_mbx_reg(hwdev); + return -ETIMEDOUT; + } + + if (!SSS_MBX_STATUS_SUCCESS(status)) { + sdk_err(hwdev->dev_hdl, "Fail to send mbx seg to func %u, wb status: 0x%x\n", + dest_func_id, status); + err_code = SSS_MBX_STATUS_ERRCODE(status); + return (err_code != 0) ? err_code : -EFAULT; + } + + return 0; +} + +static int sss_send_mbx_to_chip(struct sss_mbx *mbx, u16 dest_func_id, + u64 msg_header, u8 *msg_body, u16 body_len) +{ + int ret; + u16 seg_len = SSS_MBX_SEG_SIZE; + u32 seq_id = 0; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + while (body_len > 0) { + if (body_len <= SSS_MBX_SEG_SIZE) { + msg_header &= ~SSS_MBX_SEGLEN_MASK; + msg_header |= SSS_SET_MSG_HEADER(body_len, SEG_LEN); + msg_header |= SSS_SET_MSG_HEADER(SSS_LAST_SEG, LAST); + seg_len = body_len; + } + + ret = sss_chip_send_mbx_fragment(mbx, dest_func_id, msg_header, msg_body, seg_len); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to send mbx seg, seq_id=0x%llx\n", + SSS_GET_MSG_HEADER(msg_header, SEQID)); + return ret; + } + + seq_id++; + msg_body += seg_len; + body_len -= seg_len; + msg_header &= ~(SSS_SET_MSG_HEADER(SSS_MSG_HEADER_SEQID_MASK, SEQID)); + msg_header |= SSS_SET_MSG_HEADER(seq_id, SEQID); + } + + return 0; +} + +int sss_send_mbx_msg(struct sss_mbx *mbx, u8 mod, u16 cmd, void *msg, + u16 msg_len, u16 dest_func_id, enum sss_msg_direction_type direction, + enum sss_msg_ack_type ack_type, struct sss_mbx_msg_info *msg_info) +{ + u8 *msg_body = NULL; + u64 msg_header = 0; + int ret = 0; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + struct sss_mbx_dma_msg msg_dma = {0}; + enum sss_data_type type = SSS_INLINE_DATA; + struct sss_mbx_dma_queue *queue = NULL; + + mutex_lock(&mbx->msg_send_lock); + + if (SSS_IS_DMA_MBX_MSG(dest_func_id) && !SSS_SUPPORT_MBX_SEGMENT(hwdev)) { + queue = sss_get_mbx_dma_queue(mbx, ack_type); + if (!queue) { + ret = -EBUSY; + goto out; + } + + sss_fill_mbx_msg_body(queue, &msg_dma, msg, msg_len); + + type = SSS_DMA_DATA; + msg = &msg_dma; + msg_len = sizeof(msg_dma); + } + + msg_body = (u8 *)msg; + msg_header = SSS_FILL_MSG_HEADER(hwdev, msg_info, msg_len, mod, + ack_type, type, direction, cmd); + + ret = sss_send_mbx_to_chip(mbx, dest_func_id, msg_header, msg_body, msg_len); + +out: + mutex_unlock(&mbx->msg_send_lock); + + return ret; +} + +static void sss_set_mbx_event_flag(struct sss_mbx *mbx, + enum sss_mbx_event_state event_flag) +{ + spin_lock(&mbx->mbx_lock); + mbx->event_flag = event_flag; + spin_unlock(&mbx->mbx_lock); +} + +static enum sss_process_ret check_mbx_msg_finish(void *priv_data) +{ + struct sss_mbx *mbx = priv_data; + + if (SSS_MBX_MSG_CHN_STOP(mbx) || SSS_TO_HWDEV(mbx)->chip_present_flag == 0) + return SSS_PROCESS_ERR; + + return (mbx->event_flag == SSS_EVENT_SUCCESS) ? SSS_PROCESS_OK : SSS_PROCESS_DOING; +} + +static int sss_wait_mbx_msg_completion(struct sss_mbx *mbx, u32 timeout) +{ + u32 wait_time; + int ret; + + wait_time = (timeout != 0) ? timeout : SSS_MBX_COMPLETE_WAIT_TIME_MS; + ret = sss_check_handler_timeout(mbx, check_mbx_msg_finish, + wait_time, USEC_PER_MSEC); + if (ret != 0) { + sss_set_mbx_event_flag(mbx, SSS_EVENT_TIMEOUT); + return -ETIMEDOUT; + } + + sss_set_mbx_event_flag(mbx, SSS_EVENT_END); + + return 0; +} + +static int sss_send_mbx_msg_lock(struct sss_mbx *mbx, u16 channel) +{ + if (!mbx->lock_channel_en) { + mutex_lock(&mbx->mbx_send_lock); + return 0; + } + + while (test_bit(channel, &mbx->channel_stop) == 0) { + if (mutex_trylock(&mbx->mbx_send_lock) != 0) + return 0; + + usleep_range(SSS_MBX_TRY_LOCK_SLEPP_US - 1, SSS_MBX_TRY_LOCK_SLEPP_US); + } + + return -EAGAIN; +} + +static void sss_send_mbx_msg_unlock(struct sss_mbx *mbx) +{ + mutex_unlock(&mbx->mbx_send_lock); +} + +int sss_send_mbx_to_func(struct sss_mbx *mbx, u8 mod, u16 cmd, + u16 dest_func_id, void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout, u16 channel) +{ + struct sss_msg_desc *msg_desc = NULL; + struct sss_mbx_msg_info msg_info = {0}; + int ret; + + if (SSS_TO_HWDEV(mbx)->chip_present_flag == 0) + return -EPERM; + + msg_desc = sss_get_mbx_msg_desc(mbx, dest_func_id, SSS_RESP_MSG); + if (!msg_desc) + return -EFAULT; + + ret = sss_send_mbx_msg_lock(mbx, channel); + if (ret != 0) + return ret; + + mbx->cur_msg_channel = channel; + SSS_INCREASE_MBX_MSG_ID(mbx); + sss_set_mbx_event_flag(mbx, SSS_EVENT_START); + + msg_info.msg_id = SSS_MBX_MSG_ID(mbx); + ret = sss_send_mbx_msg(mbx, mod, cmd, buf_in, in_size, dest_func_id, + SSS_DIRECT_SEND_MSG, SSS_MSG_ACK, &msg_info); + if (ret != 0) { + sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, + "Fail to send mbx mod %u, cmd %u, msg_id: %u, err: %d\n", + mod, cmd, msg_info.msg_id, ret); + sss_set_mbx_event_flag(mbx, SSS_EVENT_FAIL); + goto send_err; + } + + if (sss_wait_mbx_msg_completion(mbx, timeout)) { + sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, + "Send mbx msg timeout, msg_id: %u\n", msg_info.msg_id); + sss_dump_aeq_info(SSS_TO_HWDEV(mbx)); + ret = -ETIMEDOUT; + goto send_err; + } + + if (mod != msg_desc->mod || cmd != msg_desc->cmd) { + sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, + "Invalid response mbx message, mod: 0x%x, cmd: 0x%x, expect mod: 0x%x, cmd: 0x%x\n", + msg_desc->mod, msg_desc->cmd, mod, cmd); + ret = -EFAULT; + goto send_err; + } + + if (msg_desc->msg_info.state) { + ret = msg_desc->msg_info.state; + goto send_err; + } + + if (buf_out && out_size) { + if (*out_size < msg_desc->msg_len) { + sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, + "Invalid response mbx message length: %u for mod %d cmd %u, should less than: %u\n", + msg_desc->msg_len, mod, cmd, *out_size); + ret = -EFAULT; + goto send_err; + } + + if (msg_desc->msg_len) + memcpy(buf_out, msg_desc->msg, msg_desc->msg_len); + + *out_size = msg_desc->msg_len; + } + +send_err: + sss_send_mbx_msg_unlock(mbx); + + return ret; +} + +int sss_send_mbx_to_func_no_ack(struct sss_hwdev *hwdev, u16 func_id, + u8 mod, u16 cmd, void *buf_in, u16 in_size, u16 channel) +{ + struct sss_mbx_msg_info msg_info = {0}; + int ret; + + ret = sss_check_mbx_param(hwdev->mbx, buf_in, in_size, channel); + if (ret != 0) + return ret; + + ret = sss_send_mbx_msg_lock(hwdev->mbx, channel); + if (ret != 0) + return ret; + + ret = sss_send_mbx_msg(hwdev->mbx, mod, cmd, buf_in, in_size, + func_id, SSS_DIRECT_SEND_MSG, SSS_MSG_NO_ACK, &msg_info); + if (ret != 0) + sdk_err(hwdev->dev_hdl, "Fail to send mbx no ack\n"); + + sss_send_mbx_msg_unlock(hwdev->mbx); + + return ret; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx.h new file mode 100644 index 0000000000000000000000000000000000000000..f3f253046f8fc496af912de059ebe0ac26c70810 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_MBX_H +#define SSS_HWIF_MBX_H + +#include "sss_hw.h" +#include "sss_hwdev.h" + +#define SSS_MGMT_SRC_ID 0x1FFF + +#define SSS_IS_DMA_MBX_MSG(dest_func_id) ((dest_func_id) == SSS_MGMT_SRC_ID) + +#define SSS_MBX_BUF_SIZE_MAX 2048U + +#define SSS_MBX_HEADER_SIZE 8 + +/* MBX size is 64B, 8B for mbx_header, 8B reserved */ +#define SSS_MBX_SEG_SIZE 48 +#define SSS_MBX_DATA_SIZE (SSS_MBX_BUF_SIZE_MAX - SSS_MBX_HEADER_SIZE) + +#define SSS_MBX_MQ_CI_OFF (SSS_CSR_CFG_FLAG + \ + SSS_HW_CSR_MBX_DATA_OFF + SSS_MBX_HEADER_SIZE + SSS_MBX_SEG_SIZE) + +#define SSS_MBX_MQ_SYNC_CI_SHIFT 0 +#define SSS_MBX_MQ_ASYNC_CI_SHIFT 8 + +#define SSS_MBX_MQ_SYNC_CI_MASK 0xFF +#define SSS_MBX_MQ_ASYNC_CI_MASK 0xFF + +#define SSS_GET_MBX_MQ_CI(val, field) \ + (((val) >> SSS_MBX_MQ_##field##_CI_SHIFT) & SSS_MBX_MQ_##field##_CI_MASK) +#define SSS_CLEAR_MBX_MQ_CI(val, field) \ + ((val) & (~(SSS_MBX_MQ_##field##_CI_MASK << SSS_MBX_MQ_##field##_CI_SHIFT))) + +/* Recv func mbx msg */ +struct sss_recv_mbx { + void *buf; + u16 buf_len; + u8 msg_id; + u8 mod; + u16 cmd; + u16 src_func_id; + enum sss_msg_ack_type ack_type; + void *resp_buf; +}; + +enum sss_mbx_cb_state { + SSS_VF_RECV_HANDLER_REG = 0, + SSS_VF_RECV_HANDLER_RUN, + SSS_PF_RECV_HANDLER_REG, + SSS_PF_RECV_HANDLER_RUN, + SSS_PPF_RECV_HANDLER_REG, + SSS_PPF_RECV_HANDLER_RUN, + SSS_PPF_TO_PF_RECV_HANDLER_REG, + SSS_PPF_TO_PF_RECV_HANDLER_RUN, +}; + +static inline int sss_check_mbx_param(struct sss_mbx *mbx, + void *buf_in, u16 in_size, u16 channel) +{ + if (!buf_in || in_size == 0) + return -EINVAL; + + if (in_size > SSS_MBX_DATA_SIZE) { + sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, + "Mbx msg len %u exceed limit: [1, %u]\n", + in_size, SSS_MBX_DATA_SIZE); + return -EINVAL; + } + + if (channel >= SSS_CHANNEL_MAX) { + sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, + "Invalid channel id: 0x%x\n", channel); + return -EINVAL; + } + + return 0; +} + +struct sss_msg_desc *sss_get_mbx_msg_desc(struct sss_mbx *mbx, u64 src_func_id, u64 direction); +int sss_send_mbx_msg(struct sss_mbx *mbx, u8 mod, u16 cmd, + void *msg, u16 msg_len, u16 dest, enum sss_msg_direction_type direction_type, + enum sss_msg_ack_type type, struct sss_mbx_msg_info *msg_info); +int sss_send_mbx_to_func(struct sss_mbx *mbx, u8 mod, u16 cmd, + u16 dest_func_id, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout, u16 channel); +int sss_send_mbx_to_func_no_ack(struct sss_hwdev *hwdev, u16 func_id, + u8 mod, u16 cmd, void *buf_in, u16 in_size, u16 channel); +#define sss_send_mbx_to_mgmt_no_ack(hwdev, mod, cmd, buf_in, in_size, channel) \ + sss_send_mbx_to_func_no_ack(hwdev, SSS_MGMT_SRC_ID, mod, cmd, \ + buf_in, in_size, channel) + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx_export.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx_export.c new file mode 100644 index 0000000000000000000000000000000000000000..88dc7fcac5b8cf98450e3cbbb8734e271783f19c --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx_export.c @@ -0,0 +1,218 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_hwif_mbx.h" +#include "sss_hwif_export.h" + +#define SSS_WAIT_CB_COMPLETE_MIN 900 +#define SSS_WAIT_CB_COMPLETE_MAX 1000 + +int sss_register_pf_mbx_handler(void *hwdev, u8 mod, void *pri_handle, sss_pf_mbx_handler_t cb) +{ + struct sss_mbx *mbx = NULL; + + if (!hwdev || mod >= SSS_MOD_TYPE_MAX) + return -EFAULT; + + mbx = ((struct sss_hwdev *)hwdev)->mbx; + mbx->pf_mbx_cb[mod] = cb; + mbx->pf_mbx_data[mod] = pri_handle; + + set_bit(SSS_PF_RECV_HANDLER_REG, &mbx->pf_mbx_cb_state[mod]); + + return 0; +} +EXPORT_SYMBOL(sss_register_pf_mbx_handler); + +int sss_register_vf_mbx_handler(void *hwdev, u8 mod, void *pri_handle, sss_vf_mbx_handler_t cb) +{ + struct sss_mbx *mbx = NULL; + + if (!hwdev || mod >= SSS_MOD_TYPE_MAX) + return -EFAULT; + + mbx = ((struct sss_hwdev *)hwdev)->mbx; + mbx->vf_mbx_cb[mod] = cb; + mbx->vf_mbx_data[mod] = pri_handle; + + set_bit(SSS_VF_RECV_HANDLER_REG, &mbx->vf_mbx_cb_state[mod]); + + return 0; +} +EXPORT_SYMBOL(sss_register_vf_mbx_handler); + +void sss_unregister_pf_mbx_handler(void *hwdev, u8 mod) +{ + struct sss_mbx *mbx = NULL; + + if (!hwdev || mod >= SSS_MOD_TYPE_MAX) + return; + + mbx = ((struct sss_hwdev *)hwdev)->mbx; + + clear_bit(SSS_PF_RECV_HANDLER_REG, &mbx->pf_mbx_cb_state[mod]); + + while (test_bit(SSS_PF_RECV_HANDLER_RUN, &mbx->pf_mbx_cb_state[mod]) != 0) + usleep_range(SSS_WAIT_CB_COMPLETE_MIN, SSS_WAIT_CB_COMPLETE_MAX); + + mbx->pf_mbx_cb[mod] = NULL; + mbx->pf_mbx_data[mod] = NULL; +} +EXPORT_SYMBOL(sss_unregister_pf_mbx_handler); + +void sss_unregister_vf_mbx_handler(void *hwdev, u8 mod) +{ + struct sss_mbx *mbx = NULL; + + if (!hwdev || mod >= SSS_MOD_TYPE_MAX) + return; + + mbx = ((struct sss_hwdev *)hwdev)->mbx; + + clear_bit(SSS_VF_RECV_HANDLER_REG, &mbx->vf_mbx_cb_state[mod]); + + while (test_bit(SSS_VF_RECV_HANDLER_RUN, &mbx->vf_mbx_cb_state[mod]) != 0) + usleep_range(SSS_WAIT_CB_COMPLETE_MIN, SSS_WAIT_CB_COMPLETE_MAX); + + mbx->vf_mbx_cb[mod] = NULL; + mbx->vf_mbx_data[mod] = NULL; +} +EXPORT_SYMBOL(sss_unregister_vf_mbx_handler); + +int sss_mbx_send_to_pf(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout, u16 channel) +{ + struct sss_hwdev *dev = hwdev; + int ret; + + if (!hwdev) + return -EINVAL; + + if (!(dev->chip_present_flag)) + return -EPERM; + + ret = sss_check_mbx_param(dev->mbx, buf_in, in_size, channel); + if (ret != 0) + return ret; + + if (!SSS_IS_VF(dev)) { + sdk_err(dev->dev_hdl, "Invalid func_type: %d\n", + SSS_GET_FUNC_TYPE(dev)); + return -EINVAL; + } + + return sss_send_mbx_to_func(dev->mbx, mod, cmd, + sss_get_pf_id_of_vf(dev), buf_in, in_size, + buf_out, out_size, timeout, channel); +} +EXPORT_SYMBOL(sss_mbx_send_to_pf); + +int sss_mbx_send_to_vf(void *hwdev, u16 vf_id, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout, u16 channel) +{ + struct sss_hwdev *dev = hwdev; + int ret = 0; + u16 dst_func_id; + + if (!hwdev) + return -EINVAL; + + ret = sss_check_mbx_param(dev->mbx, buf_in, in_size, channel); + if (ret != 0) + return ret; + + if (SSS_IS_VF(dev)) { + sdk_err(dev->dev_hdl, "Invalid func_type: %d\n", + SSS_GET_FUNC_TYPE((struct sss_hwdev *)hwdev)); + return -EINVAL; + } + + if (vf_id == 0) { + sdk_err(dev->dev_hdl, "Invalid vf_id: %u\n", vf_id); + return -EINVAL; + } + + /* vf_offset_to_pf + vf_id is the vf's global function id of vf in + * this pf + */ + dst_func_id = sss_get_glb_pf_vf_offset(hwdev) + vf_id; + + return sss_send_mbx_to_func(dev->mbx, mod, cmd, + dst_func_id, buf_in, in_size, + buf_out, out_size, timeout, channel); +} +EXPORT_SYMBOL(sss_mbx_send_to_vf); + +int sss_mbx_send_to_vf_no_ack(void *hwdev, u16 vf_id, u8 mod, u16 cmd, void *buf_in, + u16 in_size, u16 channel) +{ + struct sss_hwdev *dev = hwdev; + int ret = 0; + u16 dst_func_id; + + if (!hwdev) + return -EINVAL; + + ret = sss_check_mbx_param(dev->mbx, buf_in, in_size, channel); + if (ret != 0) + return ret; + + if (SSS_IS_VF(dev)) { + sdk_err(dev->dev_hdl, "Invalid func_type: %d\n", + SSS_GET_FUNC_TYPE((struct sss_hwdev *)hwdev)); + return -EINVAL; + } + + if (vf_id == 0) { + sdk_err(dev->dev_hdl, "Invalid vf_id: %u\n", vf_id); + return -EINVAL; + } + + /* vf_offset_to_pf + vf_id is the vf's global function id of vf in + * this pf + */ + dst_func_id = sss_get_glb_pf_vf_offset(hwdev) + vf_id; + + return sss_send_mbx_to_func_no_ack(dev, dst_func_id, mod, cmd, buf_in, in_size, channel); +} +EXPORT_SYMBOL(sss_mbx_send_to_vf_no_ack); + +static int sss_send_mbx_to_mgmt(struct sss_hwdev *hwdev, u8 mod, u16 cmd, + void *buf_in, u16 in_size, void *buf_out, u16 *out_size, + u32 timeout, u16 channel) +{ + struct sss_mbx *func_to_func = hwdev->mbx; + int ret; + + ret = sss_check_mbx_param(func_to_func, buf_in, in_size, channel); + if (ret != 0) + return ret; + + if (mod == SSS_MOD_TYPE_COMM && cmd == SSS_COMM_MGMT_CMD_SEND_API_ACK_BY_UP) + return 0; + + return sss_send_mbx_to_func(func_to_func, mod, cmd, SSS_MGMT_SRC_ID, + buf_in, in_size, buf_out, out_size, timeout, channel); +} + +int sss_sync_mbx_send_msg(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout, u16 channel) +{ + if (!hwdev) + return -EINVAL; + + if (sss_get_dev_present_flag(hwdev) == 0) + return -EPERM; + + return sss_send_mbx_to_mgmt(hwdev, mod, cmd, buf_in, in_size, + buf_out, out_size, timeout, channel); +} +EXPORT_SYMBOL(sss_sync_mbx_send_msg); diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx_init.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx_init.c new file mode 100644 index 0000000000000000000000000000000000000000..0725cf2cd4b1e74885c0c93f495a031c22d44176 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx_init.c @@ -0,0 +1,888 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw_common.h" +#include "sss_hwdev.h" +#include "sss_hwif_api.h" +#include "sss_hwif_eq.h" +#include "sss_hwif_mbx.h" +#include "sss_csr.h" +#include "sss_common.h" +#include "sss_adapter_mgmt.h" + +#define SSS_MBX_WB_STATUS_SIZE 16UL + +#define SSS_MBX_DMA_MSG_QUEUE_DEPTH 32 + +#define SSS_MBX_WQ_NAME "sss_mbx" + +#define SSS_MBX_AREA(hwif) \ + ((hwif)->cfg_reg_base + SSS_HW_CSR_MBX_DATA_OFF) + +#define SSS_GET_MBX_BODY(header) ((u8 *)(header) + SSS_MBX_HEADER_SIZE) + +#define SSS_MBX_LAST_SEG_MAX_SIZE \ + (SSS_MBX_BUF_SIZE_MAX - SSS_MAX_SEG_ID * SSS_MBX_SEG_SIZE) + +#define SSS_MSG_PROCESS_CNT_MAX 10 + +#define SSS_SRC_IS_PF_OR_PPF(hwdev, src_func_id) \ + ((src_func_id) < SSS_MAX_PF_NUM(hwdev)) + +#define SSS_MBX_MSG_NO_DATA_SIZE 1 + +#define SSS_MBX_PF_SEND_ERR 0x1 + +#define SSS_MAX_SEG_ID 42 + +struct sss_mbx_work { + struct work_struct work; + struct sss_mbx *mbx; + struct sss_recv_mbx *recv_mbx; + struct sss_msg_buffer *msg_buffer; +}; + +static int sss_alloc_mbx_mq_dma_buf(struct sss_hwdev *hwdev, struct sss_mbx_dma_queue *mq) +{ + u32 size; + + size = mq->depth * SSS_MBX_BUF_SIZE_MAX; + mq->dma_buff_vaddr = dma_zalloc_coherent(hwdev->dev_hdl, size, &mq->dma_buff_paddr, + GFP_KERNEL); + if (!mq->dma_buff_vaddr) { + sdk_err(hwdev->dev_hdl, "Fail to alloc dma_buffer\n"); + return -ENOMEM; + } + + return 0; +} + +static void sss_free_mbx_mq_dma_buf(struct sss_hwdev *hwdev, struct sss_mbx_dma_queue *mq) +{ + dma_free_coherent(hwdev->dev_hdl, mq->depth * SSS_MBX_BUF_SIZE_MAX, + mq->dma_buff_vaddr, mq->dma_buff_paddr); + mq->dma_buff_vaddr = NULL; + mq->dma_buff_paddr = 0; +} + +static int sss_mbx_alloc_mq_dma_addr(struct sss_mbx *mbx) +{ + int ret; + + ret = sss_alloc_mbx_mq_dma_buf(SSS_TO_HWDEV(mbx), &mbx->sync_msg_queue); + if (ret != 0) + return ret; + + ret = sss_alloc_mbx_mq_dma_buf(SSS_TO_HWDEV(mbx), &mbx->async_msg_queue); + if (ret != 0) { + sss_free_mbx_mq_dma_buf(SSS_TO_HWDEV(mbx), &mbx->sync_msg_queue); + return ret; + } + + return 0; +} + +static void sss_mbx_free_mq_dma_addr(struct sss_mbx *mbx) +{ + sss_free_mbx_mq_dma_buf(SSS_TO_HWDEV(mbx), &mbx->sync_msg_queue); + sss_free_mbx_mq_dma_buf(SSS_TO_HWDEV(mbx), &mbx->async_msg_queue); +} + +static int sss_mbx_alloc_mq_wb_addr(struct sss_mbx *mbx) +{ + struct sss_mbx_send *send_mbx = &mbx->mbx_send; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + send_mbx->wb_vaddr = dma_zalloc_coherent(hwdev->dev_hdl, SSS_MBX_WB_STATUS_SIZE, + &send_mbx->wb_paddr, GFP_KERNEL); + if (!send_mbx->wb_vaddr) + return -ENOMEM; + + send_mbx->wb_state = send_mbx->wb_vaddr; + + return 0; +} + +static void sss_mbx_free_mq_wb_addr(struct sss_mbx *mbx) +{ + struct sss_mbx_send *send_mbx = &mbx->mbx_send; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + dma_free_coherent(hwdev->dev_hdl, SSS_MBX_WB_STATUS_SIZE, + send_mbx->wb_vaddr, send_mbx->wb_paddr); + + send_mbx->wb_vaddr = NULL; +} + +static int sss_alloc_mbx_msg_buffer(struct sss_msg_buffer *msg_buffer) +{ + msg_buffer->resp_msg.msg = kzalloc(SSS_MBX_BUF_SIZE_MAX, GFP_KERNEL); + if (!msg_buffer->resp_msg.msg) + return -ENOMEM; + + msg_buffer->recv_msg.msg = kzalloc(SSS_MBX_BUF_SIZE_MAX, GFP_KERNEL); + if (!msg_buffer->recv_msg.msg) { + kfree(msg_buffer->resp_msg.msg); + msg_buffer->resp_msg.msg = NULL; + return -ENOMEM; + } + + atomic_set(&msg_buffer->recv_msg_cnt, 0); + msg_buffer->recv_msg.seq_id = SSS_MAX_SEG_ID; + msg_buffer->resp_msg.seq_id = SSS_MAX_SEG_ID; + + return 0; +} + +static void sss_free_mbx_msg_buffer(struct sss_msg_buffer *msg_buffer) +{ + kfree(msg_buffer->recv_msg.msg); + msg_buffer->recv_msg.msg = NULL; + kfree(msg_buffer->resp_msg.msg); + msg_buffer->resp_msg.msg = NULL; +} + +static int sss_mbx_alloc_dma_addr(struct sss_mbx *sss_mbx) +{ + int ret; + + ret = sss_mbx_alloc_mq_dma_addr(sss_mbx); + if (ret != 0) { + sdk_err(SSS_TO_HWDEV(sss_mbx)->dev_hdl, "Fail to alloc mbx dma queue\n"); + return -ENOMEM; + } + + ret = sss_mbx_alloc_mq_wb_addr(sss_mbx); + if (ret != 0) { + sdk_err(SSS_TO_HWDEV(sss_mbx)->dev_hdl, "Fail to init mbx dma wb addr\n"); + goto alloc_dma_wb_addr_err; + } + + return 0; + +alloc_dma_wb_addr_err: + sss_mbx_free_mq_dma_addr(sss_mbx); + + return -ENOMEM; +} + +static void sss_mbx_free_dma_addr(struct sss_mbx *mbx) +{ + sss_mbx_free_mq_wb_addr(mbx); + sss_mbx_free_mq_dma_addr(mbx); +} + +static int sss_init_mbx_info(struct sss_mbx *mbx) +{ + int ret; + + mutex_init(&mbx->mbx_send_lock); + mutex_init(&mbx->msg_send_lock); + spin_lock_init(&mbx->mbx_lock); + mbx->sync_msg_queue.depth = SSS_MBX_DMA_MSG_QUEUE_DEPTH; + mbx->async_msg_queue.depth = SSS_MBX_DMA_MSG_QUEUE_DEPTH; + + mbx->workq = create_singlethread_workqueue(SSS_MBX_WQ_NAME); + if (!mbx->workq) { + sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, "Fail to create mbx workq\n"); + return -ENOMEM; + } + + ret = sss_alloc_mbx_msg_buffer(&mbx->mgmt_msg); + if (ret != 0) { + sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, "Fail to alloc mgmt message buffer\n"); + goto alloc_mbx_msg_buffer_err; + } + + ret = sss_mbx_alloc_dma_addr(mbx); + if (ret != 0) { + sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, "Fail to alloc dma addr\n"); + goto mbx_alloc_dma_addr_err; + } + + return 0; + +mbx_alloc_dma_addr_err: + sss_free_mbx_msg_buffer(&mbx->mgmt_msg); +alloc_mbx_msg_buffer_err: + destroy_workqueue(mbx->workq); + + return -ENOMEM; +} + +static void sss_deinit_mbx_info(struct sss_mbx *mbx) +{ + if (mbx->workq) { + destroy_workqueue(mbx->workq); + mbx->workq = NULL; + } + + sss_mbx_free_dma_addr(mbx); + sss_free_mbx_msg_buffer(&mbx->mgmt_msg); +} + +static int sss_alloc_func_mbx_msg(struct sss_mbx *mbx, u16 func_num) +{ + if (mbx->func_msg) + return (mbx->num_func_msg == func_num) ? 0 : -EFAULT; + + mbx->func_msg = kcalloc(func_num, sizeof(*mbx->func_msg), GFP_KERNEL); + if (!mbx->func_msg) + return -ENOMEM; + + return 0; +} + +static void sss_free_func_mbx_msg(struct sss_mbx *mbx) +{ + kfree(mbx->func_msg); + mbx->func_msg = NULL; +} + +int sss_init_func_mbx_msg(void *hwdev, u16 func_num) +{ + u16 i; + u16 cnt; + int ret; + struct sss_hwdev *dev = hwdev; + struct sss_mbx *mbx = dev->mbx; + + if (!hwdev || func_num == 0 || func_num > SSS_MAX_FUNC) + return -EINVAL; + + ret = sss_alloc_func_mbx_msg(mbx, func_num); + if (ret != 0) { + sdk_err(dev->dev_hdl, "Fail to alloc func msg\n"); + return ret; + } + + for (cnt = 0; cnt < func_num; cnt++) { + ret = sss_alloc_mbx_msg_buffer(&mbx->func_msg[cnt]); + if (ret != 0) { + sdk_err(dev->dev_hdl, "Fail to alloc func %hu msg buf\n", cnt); + goto alloc_mbx_msg_buf_err; + } + } + + mbx->num_func_msg = func_num; + + return 0; + +alloc_mbx_msg_buf_err: + for (i = 0; i < cnt; i++) + sss_free_mbx_msg_buffer(&mbx->func_msg[i]); + + sss_free_func_mbx_msg(mbx); + + return -ENOMEM; +} + +static void sss_deinit_func_mbx_msg(struct sss_mbx *mbx) +{ + u16 i; + + if (!mbx->func_msg) + return; + + for (i = 0; i < mbx->num_func_msg; i++) + sss_free_mbx_msg_buffer(&mbx->func_msg[i]); + + sss_free_func_mbx_msg(mbx); +} + +static void sss_chip_reset_mbx_ci(struct sss_mbx *mbx) +{ + u32 val; + + val = sss_chip_read_reg(SSS_TO_HWDEV(mbx)->hwif, SSS_MBX_MQ_CI_OFF); + val = SSS_CLEAR_MBX_MQ_CI(val, SYNC); + val = SSS_CLEAR_MBX_MQ_CI(val, ASYNC); + + sss_chip_write_reg(SSS_TO_HWDEV(mbx)->hwif, SSS_MBX_MQ_CI_OFF, val); +} + +static void sss_chip_set_mbx_wb_attr(struct sss_mbx *mbx) +{ + u32 addr_h; + u32 addr_l; + struct sss_mbx_send *send_mbx = &mbx->mbx_send; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + addr_h = upper_32_bits(send_mbx->wb_paddr); + addr_l = lower_32_bits(send_mbx->wb_paddr); + + sss_chip_write_reg(hwdev->hwif, SSS_HW_CSR_MBX_RES_H_OFF, addr_h); + sss_chip_write_reg(hwdev->hwif, SSS_HW_CSR_MBX_RES_L_OFF, addr_l); +} + +static void sss_chip_set_mbx_attr(struct sss_mbx *mbx) +{ + sss_chip_reset_mbx_ci(mbx); + sss_chip_set_mbx_wb_attr(mbx); +} + +static void sss_chip_reset_mbx_attr(struct sss_mbx *sss_mbx) +{ + struct sss_hwdev *hwdev = SSS_TO_HWDEV(sss_mbx); + + sss_chip_write_reg(hwdev->hwif, SSS_HW_CSR_MBX_RES_H_OFF, 0); + sss_chip_write_reg(hwdev->hwif, SSS_HW_CSR_MBX_RES_L_OFF, 0); +} + +static void sss_prepare_send_mbx(struct sss_mbx *mbx) +{ + struct sss_mbx_send *send_mbx = &mbx->mbx_send; + + send_mbx->data = SSS_MBX_AREA(SSS_TO_HWDEV(mbx)->hwif); +} + +static int sss_alloc_host_msg(struct sss_hwdev *hwdev) +{ + int i; + int ret; + int host_id; + u8 max_host = SSS_MAX_HOST_NUM(hwdev); + struct sss_mbx *mbx = hwdev->mbx; + + if (max_host == 0) + return 0; + + mbx->host_msg = kcalloc(max_host, sizeof(*mbx->host_msg), GFP_KERNEL); + if (!mbx->host_msg) + return -ENOMEM; + + for (host_id = 0; host_id < max_host; host_id++) { + ret = sss_alloc_mbx_msg_buffer(&mbx->host_msg[host_id]); + if (ret != 0) { + sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, + "Fail to alloc host %d msg channel\n", host_id); + goto out; + } + } + + mbx->support_h2h_msg = true; + + return 0; + +out: + for (i = 0; i < host_id; i++) + sss_free_mbx_msg_buffer(&mbx->host_msg[i]); + + kfree(mbx->host_msg); + mbx->host_msg = NULL; + + return -ENOMEM; +} + +static void sss_free_host_msg(struct sss_mbx *mbx) +{ + int i; + + if (!mbx->host_msg) + return; + + for (i = 0; i < SSS_MAX_HOST_NUM(SSS_TO_HWDEV(mbx)); i++) + sss_free_mbx_msg_buffer(&mbx->host_msg[i]); + + kfree(mbx->host_msg); + mbx->host_msg = NULL; +} + +int sss_hwif_init_mbx(struct sss_hwdev *hwdev) +{ + int ret; + struct sss_mbx *mbx; + + mbx = kzalloc(sizeof(*mbx), GFP_KERNEL); + if (!mbx) + return -ENOMEM; + + hwdev->mbx = mbx; + mbx->hwdev = hwdev; + + ret = sss_init_mbx_info(mbx); + if (ret != 0) + goto init_mbx_info_err; + + if (SSS_IS_VF(hwdev)) { + ret = sss_init_func_mbx_msg(hwdev, 1); + if (ret != 0) + goto init_func_mbx_msg_err; + } + + sss_chip_set_mbx_attr(mbx); + + sss_prepare_send_mbx(mbx); + + ret = sss_alloc_host_msg(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Fail to alloc host msg\n"); + goto alloc_host_msg_err; + } + + return 0; + +alloc_host_msg_err: + sss_chip_reset_mbx_attr(mbx); + sss_deinit_func_mbx_msg(mbx); + +init_func_mbx_msg_err: + sss_deinit_mbx_info(mbx); + +init_mbx_info_err: + kfree(mbx); + hwdev->mbx = NULL; + + return ret; +} + +void sss_hwif_deinit_mbx(struct sss_hwdev *hwdev) +{ + struct sss_mbx *mbx = hwdev->mbx; + + destroy_workqueue(mbx->workq); + mbx->workq = NULL; + + sss_chip_reset_mbx_attr(mbx); + + sss_free_host_msg(mbx); + + sss_deinit_func_mbx_msg(mbx); + + sss_deinit_mbx_info(mbx); + + kfree(mbx); + hwdev->mbx = NULL; +} + +static bool sss_check_mbx_msg_header(void *dev_hdl, + struct sss_msg_desc *msg_desc, u64 mbx_header) +{ + u8 seq_id = SSS_GET_MSG_HEADER(mbx_header, SEQID); + u8 seg_len = SSS_GET_MSG_HEADER(mbx_header, SEG_LEN); + u8 msg_id = SSS_GET_MSG_HEADER(mbx_header, MSG_ID); + u8 mod = SSS_GET_MSG_HEADER(mbx_header, MODULE); + u16 cmd = SSS_GET_MSG_HEADER(mbx_header, CMD); + + if (seq_id > SSS_MAX_SEG_ID) { + sdk_err(dev_hdl, "Current seg info: seq_id = 0x%x\n", seq_id); + return false; + } + + if (seg_len > SSS_MBX_SEG_SIZE) { + sdk_err(dev_hdl, "Current seg info: seg_len = 0x%x\n", seg_len); + return false; + } + + if (seq_id == SSS_MAX_SEG_ID && seg_len > SSS_MBX_LAST_SEG_MAX_SIZE) { + sdk_err(dev_hdl, "Current seg info: seq_id = 0x%x, seg_len = 0x%x\n", + seq_id, seg_len); + return false; + } + + if (seq_id == 0) + return true; + + if (seq_id != msg_desc->seq_id + 1) { + sdk_err(dev_hdl, "Current seg info: seq_id = 0x%x, 0x%x\n", + seq_id, msg_desc->seq_id); + return false; + } + + if (msg_id != msg_desc->msg_info.msg_id) { + sdk_err(dev_hdl, "Current seg info: msg_id = 0x%x, 0x%x\n", + msg_id, msg_desc->msg_info.msg_id); + return false; + } + + if (mod != msg_desc->mod) { + sdk_err(dev_hdl, "Current seg info: mod = 0x%x, 0x%x\n", + mod, msg_desc->mod); + return false; + } + + if (cmd != msg_desc->cmd) { + sdk_err(dev_hdl, "Current seg info: cmd = 0x%x, 0x%x\n", + cmd, msg_desc->cmd); + return false; + } + + return true; +} + +static void sss_fill_msg_desc(struct sss_msg_desc *msg_desc, u64 *msg_header) +{ + u64 mbx_header = *msg_header; + u8 seq_id = SSS_GET_MSG_HEADER(mbx_header, SEQID); + u8 seg_len = SSS_GET_MSG_HEADER(mbx_header, SEG_LEN); + u8 msg_id = SSS_GET_MSG_HEADER(mbx_header, MSG_ID); + u8 mod = SSS_GET_MSG_HEADER(mbx_header, MODULE); + u16 cmd = SSS_GET_MSG_HEADER(mbx_header, CMD); + u32 offset = seq_id * SSS_MBX_SEG_SIZE; + void *msg_body = SSS_GET_MBX_BODY(((void *)msg_header)); + + msg_desc->seq_id = seq_id; + if (seq_id == 0) { + msg_desc->msg_info.msg_id = msg_id; + msg_desc->mod = mod; + msg_desc->cmd = cmd; + } + msg_desc->msg_len = SSS_GET_MSG_HEADER(mbx_header, MSG_LEN); + msg_desc->msg_info.state = SSS_GET_MSG_HEADER(mbx_header, STATUS); + memcpy((u8 *)msg_desc->msg + offset, msg_body, seg_len); +} + +static struct sss_recv_mbx *sss_alloc_recv_mbx(void) +{ + struct sss_recv_mbx *recv_mbx = NULL; + + recv_mbx = kzalloc(sizeof(*recv_mbx), GFP_KERNEL); + if (!recv_mbx) + return NULL; + + recv_mbx->buf = kzalloc(SSS_MBX_BUF_SIZE_MAX, GFP_KERNEL); + if (!recv_mbx->buf) + goto alloc_recv_mbx_buf_err; + + recv_mbx->resp_buf = kzalloc(SSS_MBX_BUF_SIZE_MAX, GFP_KERNEL); + if (!recv_mbx->resp_buf) + goto alloc_recv_mbx_resp_buf_err; + + return recv_mbx; + +alloc_recv_mbx_resp_buf_err: + kfree(recv_mbx->buf); + +alloc_recv_mbx_buf_err: + kfree(recv_mbx); + + return NULL; +} + +static void sss_free_recv_mbx(struct sss_recv_mbx *recv_mbx) +{ + kfree(recv_mbx->resp_buf); + kfree(recv_mbx->buf); + kfree(recv_mbx); +} + +static int sss_recv_vf_mbx_handler(struct sss_mbx *mbx, + struct sss_recv_mbx *recv_mbx, void *resp_buf, u16 *size) +{ + int ret; + sss_vf_mbx_handler_t callback; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + if (recv_mbx->mod >= SSS_MOD_TYPE_MAX) { + sdk_warn(hwdev->dev_hdl, "Recv err mbx msg, mod = %hhu\n", recv_mbx->mod); + return -EINVAL; + } + + set_bit(SSS_VF_RECV_HANDLER_RUN, &mbx->vf_mbx_cb_state[recv_mbx->mod]); + + callback = mbx->vf_mbx_cb[recv_mbx->mod]; + if (callback && + test_bit(SSS_VF_RECV_HANDLER_REG, &mbx->vf_mbx_cb_state[recv_mbx->mod])) { + ret = callback(mbx->vf_mbx_data[recv_mbx->mod], recv_mbx->cmd, recv_mbx->buf, + recv_mbx->buf_len, resp_buf, size); + } else { + sdk_warn(hwdev->dev_hdl, "VF mbx cb is unregistered\n"); + ret = -EINVAL; + } + + clear_bit(SSS_VF_RECV_HANDLER_RUN, &mbx->vf_mbx_cb_state[recv_mbx->mod]); + + return ret; +} + +static int sss_recv_pf_from_ppf_handler(struct sss_mbx *mbx, + struct sss_recv_mbx *recv_mbx, void *resp_buf, u16 *size) +{ + int ret; + sss_pf_from_ppf_mbx_handler_t callback; + enum sss_mod_type mod = recv_mbx->mod; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + if (mod >= SSS_MOD_TYPE_MAX) { + sdk_warn(hwdev->dev_hdl, "Recv err mbx msg, mod = %d\n", mod); + return -EINVAL; + } + + set_bit(SSS_PPF_TO_PF_RECV_HANDLER_RUN, &mbx->ppf_to_pf_mbx_cb_state[mod]); + + callback = mbx->pf_recv_ppf_mbx_cb[mod]; + if (callback && + test_bit(SSS_PPF_TO_PF_RECV_HANDLER_REG, &mbx->ppf_to_pf_mbx_cb_state[mod]) != 0) { + ret = callback(mbx->pf_recv_ppf_mbx_data[mod], recv_mbx->cmd, + recv_mbx->buf, recv_mbx->buf_len, resp_buf, size); + } else { + sdk_warn(hwdev->dev_hdl, "PF recv ppf mbx cb is not registered\n"); + ret = -EINVAL; + } + + clear_bit(SSS_PPF_TO_PF_RECV_HANDLER_RUN, &mbx->ppf_to_pf_mbx_cb_state[mod]); + + return ret; +} + +static int sss_recv_ppf_mbx_handler(struct sss_mbx *mbx, + struct sss_recv_mbx *recv_mbx, u8 pf_id, + void *resp_buf, u16 *size) +{ + int ret; + u16 vf_id = 0; + sss_ppf_mbx_handler_t callback; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + if (recv_mbx->mod >= SSS_MOD_TYPE_MAX) { + sdk_warn(hwdev->dev_hdl, "Recv err mbx msg, mod = %hhu\n", recv_mbx->mod); + return -EINVAL; + } + + set_bit(SSS_PPF_RECV_HANDLER_RUN, &mbx->ppf_mbx_cb_state[recv_mbx->mod]); + + callback = mbx->ppf_mbx_cb[recv_mbx->mod]; + if (callback && + test_bit(SSS_PPF_RECV_HANDLER_REG, &mbx->ppf_mbx_cb_state[recv_mbx->mod])) { + ret = callback(mbx->ppf_mbx_data[recv_mbx->mod], pf_id, vf_id, recv_mbx->cmd, + recv_mbx->buf, recv_mbx->buf_len, resp_buf, size); + } else { + sdk_warn(hwdev->dev_hdl, "PPF mbx cb is unregistered, mod = %hhu\n", recv_mbx->mod); + ret = -EINVAL; + } + + clear_bit(SSS_PPF_RECV_HANDLER_RUN, &mbx->ppf_mbx_cb_state[recv_mbx->mod]); + + return ret; +} + +static int sss_recv_pf_from_vf_mbx_handler(struct sss_mbx *mbx, + struct sss_recv_mbx *recv_mbx, + u16 src_func_id, void *resp_buf, + u16 *size) +{ + int ret; + u16 vf_id = 0; + sss_pf_mbx_handler_t callback; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + if (recv_mbx->mod >= SSS_MOD_TYPE_MAX) { + sdk_warn(hwdev->dev_hdl, "Recv err mbx msg, mod = %hhu\n", recv_mbx->mod); + return -EINVAL; + } + + set_bit(SSS_PF_RECV_HANDLER_RUN, &mbx->pf_mbx_cb_state[recv_mbx->mod]); + + callback = mbx->pf_mbx_cb[recv_mbx->mod]; + if (callback && + test_bit(SSS_PF_RECV_HANDLER_REG, &mbx->pf_mbx_cb_state[recv_mbx->mod]) != 0) { + vf_id = src_func_id - sss_get_glb_pf_vf_offset(SSS_TO_HWDEV(mbx)); + ret = callback(mbx->pf_mbx_data[recv_mbx->mod], vf_id, recv_mbx->cmd, + recv_mbx->buf, recv_mbx->buf_len, resp_buf, size); + } else { + sdk_warn(hwdev->dev_hdl, "PF mbx mod(0x%x) cb is unregistered\n", recv_mbx->mod); + ret = -EINVAL; + } + + clear_bit(SSS_PF_RECV_HANDLER_RUN, &mbx->pf_mbx_cb_state[recv_mbx->mod]); + + return ret; +} + +static void sss_send_mbx_response(struct sss_mbx *mbx, + struct sss_recv_mbx *recv_mbx, int ret, u16 size, u16 src_func_id) +{ + u16 data_size; + struct sss_mbx_msg_info msg_info = {0}; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + msg_info.msg_id = recv_mbx->msg_id; + if (ret != 0) + msg_info.state = SSS_MBX_PF_SEND_ERR; + + data_size = (size == 0 || ret != 0) ? SSS_MBX_MSG_NO_DATA_SIZE : size; + if (data_size > SSS_MBX_DATA_SIZE) { + sdk_err(hwdev->dev_hdl, "Resp msg len(%d), out of range: %d\n", + data_size, SSS_MBX_DATA_SIZE); + data_size = SSS_MBX_DATA_SIZE; + } + + sss_send_mbx_msg(mbx, recv_mbx->mod, recv_mbx->cmd, recv_mbx->resp_buf, data_size, + src_func_id, SSS_RESP_MSG, SSS_MSG_NO_ACK, &msg_info); +} + +static void sss_recv_mbx_handler(struct sss_mbx *mbx, + struct sss_recv_mbx *recv_mbx) +{ + int ret = 0; + void *resp_buf = recv_mbx->resp_buf; + u16 size = SSS_MBX_DATA_SIZE; + u16 src_func_id = recv_mbx->src_func_id; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + + if (SSS_IS_VF(hwdev)) { + ret = sss_recv_vf_mbx_handler(mbx, recv_mbx, resp_buf, &size); + goto out; + } + + if (SSS_SRC_IS_PF_OR_PPF(hwdev, src_func_id)) { + if (SSS_IS_PPF(hwdev)) + ret = sss_recv_ppf_mbx_handler(mbx, recv_mbx, + (u8)src_func_id, + resp_buf, &size); + else + ret = sss_recv_pf_from_ppf_handler(mbx, recv_mbx, resp_buf, &size); + } else { + ret = sss_recv_pf_from_vf_mbx_handler(mbx, + recv_mbx, src_func_id, + resp_buf, &size); + } + +out: + if (recv_mbx->ack_type == SSS_MSG_ACK) + sss_send_mbx_response(mbx, recv_mbx, ret, size, src_func_id); +} + +static void sss_recv_mbx_work_handler(struct work_struct *work) +{ + struct sss_mbx_work *mbx_work = container_of(work, struct sss_mbx_work, work); + + sss_recv_mbx_handler(mbx_work->mbx, mbx_work->recv_mbx); + + atomic_dec(&mbx_work->msg_buffer->recv_msg_cnt); + + destroy_work(&mbx_work->work); + + sss_free_recv_mbx(mbx_work->recv_mbx); + + kfree(mbx_work); +} + +static void sss_init_recv_mbx_param(struct sss_recv_mbx *recv_mbx, + struct sss_msg_desc *msg_desc, u64 msg_header) +{ + recv_mbx->msg_id = msg_desc->msg_info.msg_id; + recv_mbx->mod = SSS_GET_MSG_HEADER(msg_header, MODULE); + recv_mbx->cmd = SSS_GET_MSG_HEADER(msg_header, CMD); + recv_mbx->ack_type = SSS_GET_MSG_HEADER(msg_header, NO_ACK); + recv_mbx->src_func_id = SSS_GET_MSG_HEADER(msg_header, SRC_GLB_FUNC_ID); + recv_mbx->buf_len = msg_desc->msg_len; + memcpy(recv_mbx->buf, msg_desc->msg, msg_desc->msg_len); +} + +static int sss_init_mbx_work(struct sss_mbx *mbx, struct sss_recv_mbx *recv_mbx, + struct sss_msg_buffer *msg_buffer) +{ + struct sss_mbx_work *mbx_work = NULL; + + mbx_work = kzalloc(sizeof(*mbx_work), GFP_KERNEL); + if (!mbx_work) + return -ENOMEM; + + atomic_inc(&msg_buffer->recv_msg_cnt); + + mbx_work->msg_buffer = msg_buffer; + mbx_work->recv_mbx = recv_mbx; + mbx_work->mbx = mbx; + + INIT_WORK(&mbx_work->work, sss_recv_mbx_work_handler); + queue_work_on(WORK_CPU_UNBOUND, mbx->workq, &mbx_work->work); + + return 0; +} + +static void sss_recv_mbx_msg_handler(struct sss_mbx *mbx, + struct sss_msg_desc *msg_desc, u64 msg_header) +{ + u32 msg_cnt; + int ret; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mbx); + struct sss_recv_mbx *recv_mbx = NULL; + struct sss_msg_buffer *msg_buffer = container_of(msg_desc, struct sss_msg_buffer, recv_msg); + + msg_cnt = atomic_read(&msg_buffer->recv_msg_cnt); + if (msg_cnt > SSS_MSG_PROCESS_CNT_MAX) { + u64 src_func_id = SSS_GET_MSG_HEADER(msg_header, SRC_GLB_FUNC_ID); + + sdk_warn(hwdev->dev_hdl, "This func(%llu) have %u msg wait to process\n", + src_func_id, msg_cnt); + return; + } + + recv_mbx = sss_alloc_recv_mbx(); + if (!recv_mbx) { + sdk_err(hwdev->dev_hdl, "Fail to alloc receive recv_mbx message buffer\n"); + return; + } + + sss_init_recv_mbx_param(recv_mbx, msg_desc, msg_header); + + ret = sss_init_mbx_work(mbx, recv_mbx, msg_buffer); + if (ret != 0) + sss_free_recv_mbx(recv_mbx); +} + +static void sss_resp_mbx_handler(struct sss_mbx *mbx, + const struct sss_msg_desc *msg_desc) +{ + spin_lock(&mbx->mbx_lock); + if (msg_desc->msg_info.msg_id == mbx->send_msg_id && + mbx->event_flag == SSS_EVENT_START) + mbx->event_flag = SSS_EVENT_SUCCESS; + else + sdk_err(SSS_TO_HWDEV(mbx)->dev_hdl, + "Mbx resp timeout, current send msg_id(0x%x), recv msg_id(0x%x), status(0x%x)\n", + mbx->send_msg_id, msg_desc->msg_info.msg_id, msg_desc->msg_info.state); + spin_unlock(&mbx->mbx_lock); +} + +static void sss_recv_mbx_aeq(struct sss_mbx *mbx, u64 *msg_header, + struct sss_msg_desc *msg_desc) +{ + u64 header = *msg_header; + + if (!sss_check_mbx_msg_header(SSS_TO_HWDEV(mbx)->dev_hdl, msg_desc, header)) { + msg_desc->seq_id = SSS_MAX_SEG_ID; + return; + } + + sss_fill_msg_desc(msg_desc, msg_header); + + if (!SSS_GET_MSG_HEADER(header, LAST)) + return; + + if (SSS_GET_MSG_HEADER(header, DIRECTION) == SSS_DIRECT_SEND_MSG) { + sss_recv_mbx_msg_handler(mbx, msg_desc, header); + return; + } + + sss_resp_mbx_handler(mbx, msg_desc); +} + +void sss_recv_mbx_aeq_handler(void *handle, u8 *header, u8 size) +{ + u64 msg_header = *((u64 *)header); + u64 src_func_id = SSS_GET_MSG_HEADER(msg_header, SRC_GLB_FUNC_ID); + u64 direction = SSS_GET_MSG_HEADER(msg_header, DIRECTION); + struct sss_msg_desc *msg_desc = NULL; + struct sss_hwdev *hwdev = (struct sss_hwdev *)handle; + struct sss_mbx *mbx = hwdev->mbx; + + msg_desc = sss_get_mbx_msg_desc(mbx, src_func_id, direction); + if (!msg_desc) { + sdk_err(hwdev->dev_hdl, "Invalid mbx src_func_id: %u\n", (u32)src_func_id); + return; + } + + sss_recv_mbx_aeq(mbx, (u64 *)header, msg_desc); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx_init.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx_init.h new file mode 100644 index 0000000000000000000000000000000000000000..ab440fea3e0a5cd0c774023bb591d543356079f6 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mbx_init.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_MBX_INIT_H +#define SSS_HWIF_MBX_INIT_H + +#include "sss_hwdev.h" + +int sss_init_func_mbx_msg(void *hwdev, u16 func_num); +int sss_hwif_init_mbx(struct sss_hwdev *hwdev); +void sss_hwif_deinit_mbx(struct sss_hwdev *hwdev); +void sss_recv_mbx_aeq_handler(void *handle, u8 *header, u8 size); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mgmt_common.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mgmt_common.h new file mode 100644 index 0000000000000000000000000000000000000000..c6a085e5444ab177619b9948ab1a1f33ffd40016 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mgmt_common.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_MGMT_COMMON_H +#define SSS_HWIF_MGMT_COMMON_H + +#define SSS_ASYNC_MSG_FLAG 0x8 + +#define SSS_PF_MGMT_BUF_LEN_MAX 2048UL + +#define SSS_MSG_TO_MGMT_LEN_MAX 2016 + +#define SSS_SEG_LEN 48 + +#define SSS_MGMT_SEQ_ID_MAX \ + (ALIGN(SSS_MSG_TO_MGMT_LEN_MAX, SSS_SEG_LEN) / SSS_SEG_LEN) + +#define SSS_MGMT_LAST_SEG_LEN_MAX \ + (SSS_PF_MGMT_BUF_LEN_MAX - SSS_SEG_LEN * SSS_MGMT_SEQ_ID_MAX) + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mgmt_init.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mgmt_init.c new file mode 100644 index 0000000000000000000000000000000000000000..af2e9d44d2bdb38c476b74f0c7eeb1c3f7c14210 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mgmt_init.c @@ -0,0 +1,298 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_hwif_mbx.h" +#include "sss_hwif_mbx_init.h" +#include "sss_hwif_mgmt_common.h" +#include "sss_hwif_ctrlq_init.h" +#include "sss_hwif_adm_init.h" + +#define SSS_DEF_OUT_SIZE 1 + +struct sss_mgmt_msg_handle_work { + struct work_struct work; + struct sss_msg_pf_to_mgmt *pf_to_mgmt; + + void *msg; + u16 msg_len; + u8 no_ack; + u8 resvd; + + enum sss_mod_type mod; + u16 cmd; + u16 msg_id; +}; + +static void sss_send_response_mbx_to_mgmt(struct sss_hwdev *hwdev, u8 mod, u16 cmd, + void *buf_in, u16 in_size, u16 msg_id) +{ + struct sss_mbx_msg_info info; + + info.msg_id = (u8)msg_id; + info.state = 0; + + sss_send_mbx_msg(hwdev->mbx, mod, cmd, buf_in, in_size, + SSS_MGMT_SRC_ID, SSS_RESP_MSG, SSS_MSG_NO_ACK, &info); +} + +static void sss_mgmt_recv_msg_handler(struct sss_msg_pf_to_mgmt *mgmt_msg, + u8 mod, u16 cmd, void *in_buf, + u16 in_size, u16 msg_id, int resp_need) +{ + u16 size; + u16 out_size = 0; + void *dev_hdl = SSS_TO_HWDEV(mgmt_msg)->dev_hdl; + void *out_buf = mgmt_msg->ack_buf; + + memset(out_buf, 0, SSS_PF_MGMT_BUF_LEN_MAX); + + if (mod >= SSS_MOD_TYPE_HW_MAX) { + sdk_warn(dev_hdl, "Recv illegal msg from mgmt cpu, mod = %d\n", mod); + out_size = sizeof(struct sss_mgmt_msg_head); + ((struct sss_mgmt_msg_head *)out_buf)->state = SSS_MGMT_CMD_UNSUPPORTED; + goto out; + } + + set_bit(SSS_CALLBACK_RUNNING, &mgmt_msg->recv_handler_state[mod]); + + if (!mgmt_msg->recv_handler[mod] || + !test_bit(SSS_CALLBACK_REG, &mgmt_msg->recv_handler_state[mod])) { + sdk_warn(dev_hdl, "Recv mgmt cb is null, mod = %d\n", mod); + clear_bit(SSS_CALLBACK_RUNNING, &mgmt_msg->recv_handler_state[mod]); + out_size = sizeof(struct sss_mgmt_msg_head); + ((struct sss_mgmt_msg_head *)out_buf)->state = SSS_MGMT_CMD_UNSUPPORTED; + goto out; + } + + mgmt_msg->recv_handler[mod](mgmt_msg->recv_data[mod], + cmd, in_buf, in_size, out_buf, &out_size); + + clear_bit(SSS_CALLBACK_RUNNING, &mgmt_msg->recv_handler_state[mod]); + +out: + if (resp_need != 0) { + size = (out_size == 0) ? SSS_DEF_OUT_SIZE : out_size; + sss_send_response_mbx_to_mgmt(SSS_TO_HWDEV(mgmt_msg), mod, cmd, + out_buf, size, msg_id); + } +} + +static void sss_recv_mgmt_msg_work_handler(struct work_struct *work) +{ + struct sss_mgmt_msg_handle_work *msg_work = + container_of(work, struct sss_mgmt_msg_handle_work, work); + + sss_mgmt_recv_msg_handler(msg_work->pf_to_mgmt, msg_work->mod, + msg_work->cmd, msg_work->msg, msg_work->msg_len, msg_work->msg_id, + !msg_work->no_ack); + + destroy_work(&msg_work->work); + + kfree(msg_work->msg); + kfree(msg_work); +} + +static void sss_init_mgmt_recv_msg(struct sss_recv_msg *msg_recv, u64 msg_header) +{ + msg_recv->cmd = SSS_GET_MSG_HEADER(msg_header, CMD); + msg_recv->mod = SSS_GET_MSG_HEADER(msg_header, MODULE); + msg_recv->no_ack = SSS_GET_MSG_HEADER(msg_header, NO_ACK); + msg_recv->buf_len = SSS_GET_MSG_HEADER(msg_header, MSG_LEN); + msg_recv->msg_id = SSS_GET_MSG_HEADER(msg_header, MSG_ID); + msg_recv->seq_id = SSS_MGMT_SEQ_ID_MAX; +} + +static bool sss_check_mgmt_head_info(struct sss_recv_msg *msg_recv, u64 header) +{ + u8 seg_len = SSS_GET_MSG_HEADER(header, SEG_LEN); + u8 seg_id = SSS_GET_MSG_HEADER(header, SEQID); + u16 msg_id = SSS_GET_MSG_HEADER(header, MSG_ID); + + if (seg_id > SSS_MGMT_SEQ_ID_MAX || seg_len > SSS_SEG_LEN || + (seg_id == SSS_MGMT_SEQ_ID_MAX && seg_len > SSS_MGMT_LAST_SEG_LEN_MAX)) + return false; + + if (seg_id == 0) { + msg_recv->msg_id = msg_id; + msg_recv->seq_id = seg_id; + + return true; + } + + if (seg_id != (msg_recv->seq_id + 1) || msg_id != msg_recv->msg_id) + return false; + + msg_recv->seq_id = seg_id; + + return true; +} + +static void sss_mgmt_resp_msg_handler(struct sss_msg_pf_to_mgmt *mgmt_msg, + struct sss_recv_msg *msg_recv) +{ + void *dev_hdl = SSS_TO_HWDEV(mgmt_msg)->dev_hdl; + + if ((msg_recv->msg_id & SSS_ASYNC_MSG_FLAG) != 0) + return; + + spin_lock(&mgmt_msg->sync_event_lock); + if (msg_recv->msg_id == mgmt_msg->sync_msg_id && + mgmt_msg->event_state == SSS_ADM_EVENT_START) { + mgmt_msg->event_state = SSS_ADM_EVENT_SUCCESS; + complete(&msg_recv->done); + spin_unlock(&mgmt_msg->sync_event_lock); + return; + } + + sdk_err(dev_hdl, "Send msg id(0x%x) recv msg id(0x%x) dismatch, event state=%d\n", + mgmt_msg->sync_msg_id, msg_recv->msg_id, mgmt_msg->event_state); + sdk_err(dev_hdl, "Wait timeout, send and recv msg id(0x%x)(0x%x), event state=%d\n", + mgmt_msg->sync_msg_id, msg_recv->msg_id, mgmt_msg->event_state); + spin_unlock(&mgmt_msg->sync_event_lock); +} + +static void sss_init_mgmt_msg_work(struct sss_msg_pf_to_mgmt *mgmt_msg, + struct sss_recv_msg *msg_recv) +{ + struct sss_mgmt_msg_handle_work *msg_work = NULL; + + msg_work = kzalloc(sizeof(*msg_work), GFP_KERNEL); + if (!msg_work) + return; + + if (msg_recv->buf_len != 0) { + msg_work->msg = kzalloc(msg_recv->buf_len, GFP_KERNEL); + if (!msg_work->msg) { + kfree(msg_work); + return; + } + } + + msg_work->pf_to_mgmt = mgmt_msg; + msg_work->msg_len = msg_recv->buf_len; + memcpy(msg_work->msg, msg_recv->buf, msg_recv->buf_len); + msg_work->msg_id = msg_recv->msg_id; + msg_work->mod = msg_recv->mod; + msg_work->cmd = msg_recv->cmd; + msg_work->no_ack = msg_recv->no_ack; + + INIT_WORK(&msg_work->work, sss_recv_mgmt_msg_work_handler); + queue_work_on(WORK_CPU_UNBOUND, mgmt_msg->workq, &msg_work->work); +} + +static void sss_recv_mgmt_msg_handler(struct sss_msg_pf_to_mgmt *mgmt_msg, + u8 *msg_header, struct sss_recv_msg *msg_recv) +{ + u8 seq_id; + u8 seq_len; + u16 msg_id; + u32 msg_offset; + u64 dir; + u64 header = *((u64 *)msg_header); + void *msg_body; + struct sss_hwdev *hwdev = SSS_TO_HWDEV(mgmt_msg); + + dir = SSS_GET_MSG_HEADER(header, DIRECTION); + msg_id = SSS_GET_MSG_HEADER(header, MSG_ID); + if (dir == SSS_RESP_MSG && (msg_id & SSS_ASYNC_MSG_FLAG) != 0) + return; + + if (!sss_check_mgmt_head_info(msg_recv, header)) { + msg_recv->seq_id = SSS_MGMT_SEQ_ID_MAX; + sdk_err(hwdev->dev_hdl, "Fail to check Mgmt msg seq id and seq len\n"); + return; + } + + seq_len = SSS_GET_MSG_HEADER(header, SEG_LEN); + seq_id = SSS_GET_MSG_HEADER(header, SEQID); + msg_offset = seq_id * SSS_SEG_LEN; + msg_body = msg_header + sizeof(header); + memcpy((u8 *)msg_recv->buf + msg_offset, msg_body, seq_len); + + if (!SSS_GET_MSG_HEADER(header, LAST)) + return; + + sss_init_mgmt_recv_msg(msg_recv, header); + + if (SSS_GET_MSG_HEADER(header, DIRECTION) == SSS_RESP_MSG) { + sss_mgmt_resp_msg_handler(mgmt_msg, msg_recv); + return; + } + + sss_init_mgmt_msg_work(mgmt_msg, msg_recv); +} + +static void sss_set_mbx_event_timeout(struct sss_hwdev *hwdev) +{ + struct sss_mbx *mbx = hwdev->mbx; + + spin_lock(&mbx->mbx_lock); + if (mbx->event_flag == SSS_EVENT_START) + mbx->event_flag = SSS_EVENT_TIMEOUT; + spin_unlock(&mbx->mbx_lock); +} + +void sss_mgmt_msg_aeqe_handler(void *hwdev, u8 *msg_header, u8 size) +{ + bool msg_dir; + struct sss_recv_msg *msg = NULL; + struct sss_msg_pf_to_mgmt *mgmt_msg = NULL; + struct sss_hwdev *dev = (struct sss_hwdev *)hwdev; + + if (SSS_GET_MSG_HEADER(*(u64 *)msg_header, SOURCE) == SSS_MSG_SRC_MBX) { + sss_recv_mbx_aeq_handler(hwdev, msg_header, size); + return; + } + + mgmt_msg = dev->pf_to_mgmt; + if (!mgmt_msg) + return; + + msg_dir = SSS_GET_MSG_HEADER(*(u64 *)msg_header, DIRECTION) == SSS_DIRECT_SEND_MSG; + + msg = msg_dir ? &mgmt_msg->recv_msg : &mgmt_msg->recv_resp_msg; + + sss_recv_mgmt_msg_handler(mgmt_msg, msg_header, msg); +} + +void sss_force_complete_all(void *dev) +{ + struct sss_hwdev *hwdev = dev; + + spin_lock_bh(&hwdev->channel_lock); + + if (sss_get_func_type(hwdev) != SSS_FUNC_TYPE_VF && + test_bit(SSS_HW_ADM_INIT_OK, &hwdev->func_state)) + sss_complete_adm_event(hwdev); + + if (test_bit(SSS_HW_MBX_INIT_OK, &hwdev->func_state)) + sss_set_mbx_event_timeout(hwdev); + + if (test_bit(SSS_HW_CTRLQ_INIT_OK, &hwdev->func_state)) + sss_ctrlq_flush_sync_cmd(hwdev); + + spin_unlock_bh(&hwdev->channel_lock); +} + +void sss_flush_mgmt_workq(void *hwdev) +{ + struct sss_hwdev *dev = (struct sss_hwdev *)hwdev; + + flush_workqueue(dev->aeq_info->workq); + + if (sss_get_func_type(dev) != SSS_FUNC_TYPE_VF) + flush_workqueue(dev->pf_to_mgmt->workq); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mgmt_init.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mgmt_init.h new file mode 100644 index 0000000000000000000000000000000000000000..19196c2b6f9bcb6f47eabb3b54a81bd14c554df5 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_hwif_mgmt_init.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_MGMT_INIT_H +#define SSS_HWIF_MGMT_INIT_H + +#include "sss_hwdev.h" + +void sss_mgmt_msg_aeqe_handler(void *hwdev, u8 *header, u8 size); +void sss_force_complete_all(void *dev); +void sss_flush_mgmt_workq(void *hwdev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_error.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_error.c new file mode 100644 index 0000000000000000000000000000000000000000..ead8a09435c67d82eb53a35b49887a2184e09dbb --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_error.c @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_pci_id_tbl.h" +#include "sss_pci_sriov.h" +#include "sss_adapter_mgmt.h" +#include "sss_hwdev.h" + +static void sss_record_pcie_error(void *dev) +{ + struct sss_hwdev *hwdev = (struct sss_hwdev *)dev; + + atomic_inc(&hwdev->hw_stats.fault_event_stats.pcie_fault_stats); +} + +pci_ers_result_t sss_detect_pci_error(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct sss_pci_adapter *adapter = sss_get_adapter_by_pcidev(pdev); + + sdk_err(&pdev->dev, "Pci error, state: 0x%08x\n", state); + + pci_cleanup_aer_uncorrect_error_status(pdev); + + if (adapter) + sss_record_pcie_error(adapter->hwdev); + + return PCI_ERS_RESULT_CAN_RECOVER; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_error.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_error.h new file mode 100644 index 0000000000000000000000000000000000000000..26e65d77b98e56835fa231fd498fcd69ba47dcfe --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_error.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_PCI_ERROR_H +#define SSS_PCI_ERROR_H + +#include + +pci_ers_result_t sss_detect_pci_error(struct pci_dev *pdev, + pci_channel_state_t state); +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_global.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_global.c new file mode 100644 index 0000000000000000000000000000000000000000..d73b7d2db28972a61d5013e5d5e6a64603e2acfe --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_global.c @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" + +static bool attach_uld = true; +module_param(attach_uld, bool, 0444); +MODULE_PARM_DESC(attach_uld, "enable attach upper driver - default is true"); + +static struct sss_uld_info g_uld_info[SSS_SERVICE_TYPE_MAX]; + +static const char *g_uld_name[SSS_SERVICE_TYPE_MAX] = { + "nic", "ovs", "roce", "toe", "ioe", + "fc", "vbs", "ipsec", "virtio", "migrate", "ppa", "custom" +}; + +/* lock for attach/detach all uld and register/ unregister uld */ +struct mutex g_uld_mutex; + +void sss_init_uld_lock(void) +{ + mutex_init(&g_uld_mutex); +} + +void sss_lock_uld(void) +{ + mutex_lock(&g_uld_mutex); +} + +void sss_unlock_uld(void) +{ + mutex_unlock(&g_uld_mutex); +} + +const char **sss_get_uld_names(void) +{ + return g_uld_name; +} + +struct sss_uld_info *sss_get_uld_info(void) +{ + return g_uld_info; +} + +bool sss_attach_is_enable(void) +{ + return attach_uld; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_global.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_global.h new file mode 100644 index 0000000000000000000000000000000000000000..c703eb3ab0d28fa7c03eab6c56a11dda59fac144 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_global.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_PCI_GLOBAL_H +#define SSS_PCI_GLOBAL_H + +#include + +#include "sss_hw_uld_driver.h" + +struct sss_uld_info *sss_get_uld_info(void); +bool sss_attach_is_enable(void); +const char **sss_get_uld_names(void); +void sss_init_uld_lock(void); +void sss_lock_uld(void); +void sss_unlock_uld(void); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_id_tbl.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_id_tbl.h new file mode 100644 index 0000000000000000000000000000000000000000..bce7e077f210184fa6bc40620d2e3f1fa5fff7ff --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_id_tbl.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_PCI_ID_TBL_H +#define SSS_PCI_ID_TBL_H + +#define PCI_VENDOR_ID_SSSNIC 0x1F3F +#define SSS_DEV_ID_STANDARD 0x9020 +#define SSS_DEV_ID_SPN120 0x9021 +#define SSS_DEV_ID_VF 0x9001 +#define SSS_DEV_ID_VF_HV 0x9002 +#define SSS_DEV_ID_SPU 0xAC00 + +#endif + diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_probe.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_probe.c new file mode 100644 index 0000000000000000000000000000000000000000..4f1a865d638a1e3aed44fb6761fe2e03a8ae3a5e --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_probe.c @@ -0,0 +1,587 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_pci_id_tbl.h" +#include "sss_pci_sriov.h" +#include "sss_adapter_mgmt.h" +#include "sss_hwdev_init.h" +#include "sss_hwdev_api.h" +#include "sss_pci_remove.h" +#include "sss_pci_global.h" +#include "sss_tool.h" + +#define SSS_SYNC_YEAR_OFFSET 1900 +#define SSS_SYNC_MONTH_OFFSET 1 + +#define SSS_CHECK_EVENT_INFO(event) \ + ((event)->service == SSS_EVENT_SRV_COMM && \ + (event)->type == SSS_EVENT_FAULT) + +#define SSS_CHECK_FAULT_EVENT_INFO(hwdev, fault_event) \ + ((fault_event)->fault_level == SSS_FAULT_LEVEL_SERIOUS_FLR && \ + (fault_event)->info.chip.func_id < sss_get_max_pf_num(hwdev)) + +#define SSS_GET_CFG_REG_BAR(pdev) (SSS_IS_VF_DEV(pdev) ? \ + SSS_VF_PCI_CFG_REG_BAR : SSS_PF_PCI_CFG_REG_BAR) + +static bool sss_get_vf_load_state(struct pci_dev *pdev) +{ + struct sss_pci_adapter *adapter = NULL; + struct pci_dev *dev = NULL; + + if (pci_is_root_bus(pdev->bus)) + return false; + + dev = pdev->is_virtfn ? pdev->physfn : pdev; + adapter = pci_get_drvdata(dev); + + if (!adapter) { + sdk_err(&pdev->dev, "Invalid adapter, is null.\n"); + return false; + } + + return true; +} + +static int sss_init_pci_dev(struct pci_dev *pdev) +{ + int ret; + + ret = pci_enable_device(pdev); + if (ret != 0) { + sdk_err(&pdev->dev, "Fail to enable pci device\n"); + goto enable_err; + } + + ret = pci_request_regions(pdev, SSS_DRV_NAME); + if (ret != 0) { + sdk_err(&pdev->dev, "Fail to request regions\n"); + goto regions_err; + } + + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (ret != 0) { + sdk_warn(&pdev->dev, "Fail to set 64-bit DMA mask\n"); + + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (ret != 0) { + sdk_err(&pdev->dev, "Fail to set DMA mask\n"); + goto dma_err; + } + } + + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (ret != 0) { + sdk_warn(&pdev->dev, "Fail to set 64-bit coherent DMA mask\n"); + + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (ret != 0) { + sdk_err(&pdev->dev, "Fail to set coherent DMA mask\n"); + goto dma_err; + } + } + + return 0; + +dma_err: + pci_clear_master(pdev); + pci_disable_pcie_error_reporting(pdev); + pci_release_regions(pdev); + +regions_err: + pci_disable_device(pdev); + +enable_err: + pci_set_drvdata(pdev, NULL); + + return ret; +} + +void sss_set_adapter_probe_state(struct sss_pci_adapter *adapter, int state) +{ + mutex_lock(&adapter->uld_attach_mutex); + adapter->init_state = state; + mutex_unlock(&adapter->uld_attach_mutex); +} + +static int sss_map_pci_bar(struct pci_dev *pdev, + struct sss_pci_adapter *adapter) +{ + adapter->db_base_paddr = pci_resource_start(pdev, SSS_PCI_DB_BAR); + adapter->db_dwqe_len = pci_resource_len(pdev, SSS_PCI_DB_BAR); + adapter->db_reg_bar = pci_ioremap_bar(pdev, SSS_PCI_DB_BAR); + if (!adapter->db_reg_bar) { + sdk_err(&pdev->dev, "Fail to map db reg bar\n"); + return -ENOMEM; + } + + if (!SSS_IS_VF_DEV(pdev)) { + adapter->mgmt_reg_bar = pci_ioremap_bar(pdev, SSS_PCI_MGMT_REG_BAR); + if (!adapter->mgmt_reg_bar) { + sdk_err(&pdev->dev, "Fail to map mgmt reg bar\n"); + goto mgmt_bar_err; + } + } + + adapter->intr_reg_bar = pci_ioremap_bar(pdev, SSS_PCI_INTR_REG_BAR); + if (!adapter->intr_reg_bar) { + sdk_err(&pdev->dev, "Fail to map intr reg bar\n"); + goto intr_bar_err; + } + + adapter->cfg_reg_bar = pci_ioremap_bar(pdev, SSS_GET_CFG_REG_BAR(pdev)); + if (!adapter->cfg_reg_bar) { + sdk_err(&pdev->dev, "Fail to map config reg bar\n"); + goto cfg_bar_err; + } + + return 0; + +cfg_bar_err: + iounmap(adapter->intr_reg_bar); + +intr_bar_err: + if (!SSS_IS_VF_DEV(pdev)) + iounmap(adapter->mgmt_reg_bar); + +mgmt_bar_err: + iounmap(adapter->db_reg_bar); + + return -ENOMEM; +} + +static void sss_send_event_to_uld(struct sss_pci_adapter *adapter, + struct sss_event_info *event_info) +{ + enum sss_service_type type; + const char **uld_name = sss_get_uld_names(); + struct sss_uld_info *uld_info = sss_get_uld_info(); + + for (type = SSS_SERVICE_TYPE_NIC; type < SSS_SERVICE_TYPE_MAX; type++) { + if (test_and_set_bit(type, &adapter->uld_run_state)) { + sdk_warn(&adapter->pcidev->dev, + "Fail to send event, svc: 0x%x, event type: 0x%x, uld_name: %s\n", + event_info->service, event_info->type, uld_name[type]); + continue; + } + + if (uld_info[type].event) + uld_info[type].event(&adapter->hal_dev, + adapter->uld_dev[type], event_info); + clear_bit(type, &adapter->uld_run_state); + } +} + +static void sss_send_event_to_dst(struct sss_pci_adapter *adapter, u16 func_id, + struct sss_event_info *event_info) +{ + struct sss_pci_adapter *dest_adapter = NULL; + + sss_hold_chip_node(); + list_for_each_entry(dest_adapter, &adapter->chip_node->func_list, node) { + if (adapter->init_state == SSS_IN_REMOVE) + continue; + if (sss_get_func_type(dest_adapter->hwdev) == SSS_FUNC_TYPE_VF) + continue; + + if (sss_get_global_func_id(dest_adapter->hwdev) == func_id) { + sss_send_event_to_uld(dest_adapter, event_info); + break; + } + } + sss_put_chip_node(); +} + +static void sss_send_event_to_all_pf(struct sss_pci_adapter *adapter, + struct sss_event_info *event_info) +{ + struct sss_pci_adapter *dest_adapter = NULL; + + sss_hold_chip_node(); + list_for_each_entry(dest_adapter, &adapter->chip_node->func_list, node) { + if (adapter->init_state == SSS_IN_REMOVE) + continue; + + if (sss_get_func_type(dest_adapter->hwdev) == SSS_FUNC_TYPE_VF) + continue; + + sss_send_event_to_uld(dest_adapter, event_info); + } + sss_put_chip_node(); +} + +static void sss_process_event(void *data, struct sss_event_info *event_info) +{ + u16 id; + struct sss_pci_adapter *pci_adapter = data; + struct sss_fault_event *fault_event = (void *)event_info->event_data; + + if (SSS_CHECK_EVENT_INFO(event_info) && + SSS_CHECK_FAULT_EVENT_INFO(pci_adapter->hwdev, fault_event)) { + id = fault_event->info.chip.func_id; + return sss_send_event_to_dst(pci_adapter, id, event_info); + } + + if (event_info->type == SSS_EVENT_MGMT_WATCHDOG) + sss_send_event_to_all_pf(pci_adapter, event_info); + else + sss_send_event_to_uld(pci_adapter, event_info); +} + +static void sss_sync_time_to_chip(struct sss_pci_adapter *adapter) +{ + int ret; + u64 mstime; + struct timeval val = {0}; + struct rtc_time r_time = {0}; + + do_gettimeofday(&val); + + mstime = (u64)(val.tv_sec * MSEC_PER_SEC + val.tv_usec / USEC_PER_MSEC); + ret = sss_chip_sync_time(adapter->hwdev, mstime); + if (ret != 0) { + sdk_err(&adapter->pcidev->dev, "Fail to sync UTC time to fw, ret:%d.\n", ret); + } else { + rtc_time_to_tm((unsigned long)(val.tv_sec), &r_time); + sdk_info(&adapter->pcidev->dev, + "Success to sync UTC time to fw. UTC time %d-%02d-%02d %02d:%02d:%02d.\n", + r_time.tm_year + SSS_SYNC_YEAR_OFFSET, + r_time.tm_mon + SSS_SYNC_MONTH_OFFSET, + r_time.tm_mday, r_time.tm_hour, r_time.tm_min, r_time.tm_sec); + } +} + +int sss_attach_uld_driver(struct sss_pci_adapter *adapter, + enum sss_service_type type, const struct sss_uld_info *uld_info) +{ + int ret = 0; + void *uld = NULL; + const char **name = sss_get_uld_names(); + struct pci_dev *pdev = adapter->pcidev; + + mutex_lock(&adapter->uld_attach_mutex); + + if (adapter->uld_dev[type]) { + sdk_err(&pdev->dev, "Fail to attach pci dev, driver %s\n", name[type]); + mutex_unlock(&adapter->uld_attach_mutex); + return 0; + } + + ret = uld_info->probe(&adapter->hal_dev, &uld, adapter->uld_dev_name[type]); + if (ret != 0) { + sdk_err(&pdev->dev, "Fail to probe for driver %s\n", name[type]); + mutex_unlock(&adapter->uld_attach_mutex); + return ret; + } + + adapter->uld_dev[type] = uld; + set_bit(type, &adapter->uld_attach_state); + mutex_unlock(&adapter->uld_attach_mutex); + + sdk_info(&pdev->dev, "Success to attach %s driver\n", name[type]); + + return 0; +} + +static bool sss_get_vf_service_load(struct pci_dev *pdev, + enum sss_service_type service_type) +{ + struct sss_pci_adapter *adapter = NULL; + struct pci_dev *dev = NULL; + + if (!pdev) { + pr_err("Invalid pdev, is null.\n"); + return false; + } + + dev = (pdev->is_virtfn != 0) ? pdev->physfn : pdev; + + adapter = pci_get_drvdata(dev); + if (!adapter) { + sdk_err(&pdev->dev, "Invalid pci adapter, is null.\n"); + return false; + } + + return true; +} + +static void sss_attach_all_uld_driver(struct sss_pci_adapter *adapter) +{ + enum sss_service_type type; + struct pci_dev *pdev = adapter->pcidev; + struct sss_uld_info *info = sss_get_uld_info(); + + sss_hold_chip_node(); + sss_lock_uld(); + for (type = SSS_SERVICE_TYPE_NIC; type < SSS_SERVICE_TYPE_MAX; type++) { + if (!info[type].probe) + continue; + if (pdev->is_virtfn && + !sss_get_vf_service_load(pdev, type)) { + sdk_info(&pdev->dev, + "VF dev disable service_type = %d load in host\n", type); + continue; + } + sss_attach_uld_driver(adapter, type, &info[type]); + } + sss_unlock_uld(); + sss_put_chip_node(); +} + +static int sss_attach_uld_dev(struct sss_pci_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pcidev; + + adapter->hal_dev.pdev = pdev; + adapter->hal_dev.hwdev = adapter->hwdev; + + if (!sss_attach_is_enable()) + return 0; + + sss_attach_all_uld_driver(adapter); + + return 0; +} + +int sss_register_uld(enum sss_service_type type, struct sss_uld_info *uld_info) +{ + struct sss_pci_adapter *adapter = NULL; + struct sss_card_node *card_node = NULL; + struct list_head *list = NULL; + struct sss_uld_info *info = sss_get_uld_info(); + const char **uld_name = sss_get_uld_names(); + + if (type >= SSS_SERVICE_TYPE_MAX) { + pr_err("Unknown type %d of uld to register\n", type); + return -EINVAL; + } + + if (!uld_info || !uld_info->probe || !uld_info->remove) { + pr_err("Invalid info of %s driver to register\n", uld_name[type]); + return -EINVAL; + } + + sss_hold_chip_node(); + sss_lock_uld(); + + if (info[type].probe) { + sss_unlock_uld(); + sss_put_chip_node(); + pr_err("Driver %s already register\n", uld_name[type]); + return -EINVAL; + } + + list = sss_get_chip_list(); + memcpy(&info[type], uld_info, sizeof(*uld_info)); + list_for_each_entry(card_node, list, node) { + list_for_each_entry(adapter, &card_node->func_list, node) { + if (sss_attach_uld_driver(adapter, type, uld_info) != 0) { + sdk_err(&adapter->pcidev->dev, + "Fail to attach %s driver to pci dev\n", uld_name[type]); + continue; + } + } + } + + sss_unlock_uld(); + sss_put_chip_node(); + + pr_info("Success to register %s driver\n", uld_name[type]); + return 0; +} +EXPORT_SYMBOL(sss_register_uld); + +static int sss_notify_ok_to_chip(struct sss_pci_adapter *adapter) +{ + int ret; + struct pci_dev *pdev = adapter->pcidev; + + if (sss_get_func_type(adapter->hwdev) == SSS_FUNC_TYPE_VF) + return 0; + + ret = sss_chip_set_pci_bdf_num(adapter->hwdev, pdev->bus->number, + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); + if (ret != 0) { + sdk_err(&pdev->dev, "Fail to set BDF info to chip\n"); + return ret; + } + + return 0; +} + +static int sss_init_function(struct pci_dev *pdev, struct sss_pci_adapter *adapter) +{ + int ret; + + ret = sss_init_hwdev(adapter); + if (ret != 0) { + adapter->hwdev = NULL; + sdk_err(&pdev->dev, "Fail to init hardware device\n"); + return -EFAULT; + } + + sss_register_dev_event(adapter->hwdev, adapter, sss_process_event); + + if (sss_get_func_type(adapter->hwdev) != SSS_FUNC_TYPE_VF) { + set_bit(SSS_SRIOV_PRESENT, &adapter->sriov_info.state); + sss_sync_time_to_chip(adapter); + } + + sss_chip_node_lock(); + ret = sss_tool_init(adapter->hwdev, adapter->chip_node); + if (ret) { + sss_chip_node_unlock(); + sdk_err(&pdev->dev, "Failed to initialize dbgtool\n"); + goto nictool_init_err; + } + sss_chip_node_unlock(); + + sss_add_func_list(adapter); + + ret = sss_attach_uld_dev(adapter); + if (ret != 0) { + sdk_err(&pdev->dev, "Fail to attach uld dev\n"); + goto attach_uld_err; + } + + return 0; + +attach_uld_err: + sss_del_func_list(adapter); + + sss_chip_node_lock(); + sss_tool_uninit(adapter->hwdev, adapter->chip_node); + sss_chip_node_unlock(); +nictool_init_err: + sss_unregister_dev_event(adapter->hwdev); + + sss_deinit_hwdev(adapter->hwdev); + + return ret; +} + +static int sss_init_adapter(struct sss_pci_adapter *adapter) +{ + int ret; + struct pci_dev *pdev = adapter->pcidev; + + if (pdev->is_virtfn != 0 && (!sss_get_vf_load_state(pdev))) { + sdk_info(&pdev->dev, "Vf dev disable load in host\n"); + return 0; + } + + sss_set_adapter_probe_state(adapter, SSS_PROBE_START); + + ret = sss_map_pci_bar(pdev, adapter); + if (ret != 0) { + sdk_err(&pdev->dev, "Fail to map bar\n"); + goto map_bar_fail; + } + + /* if chip information of pcie function exist, add the function into chip */ + ret = sss_alloc_chip_node(adapter); + if (ret != 0) { + sdk_err(&pdev->dev, "Fail to add new chip node to global list\n"); + goto alloc_chip_node_fail; + } + + ret = sss_init_function(pdev, adapter); + if (ret != 0) + goto func_init_err; + + ret = sss_notify_ok_to_chip(adapter); + if (ret != 0) { + sdk_err(&pdev->dev, "Fail to notify ok\n"); + goto notify_err; + } + + sss_set_adapter_probe_state(adapter, SSS_PROBE_OK); + + return 0; + +notify_err: + sss_deinit_function(pdev); + +func_init_err: + sss_free_chip_node(adapter); + +alloc_chip_node_fail: + sss_unmap_pci_bar(adapter); + +map_bar_fail: + sdk_err(&pdev->dev, "Fail to init adapter\n"); + return ret; +} + +static void sss_init_adapter_param(struct sss_pci_adapter *adapter, + struct pci_dev *pdev) +{ + adapter->pcidev = pdev; + adapter->init_state = SSS_NO_PROBE; + spin_lock_init(&adapter->dettach_uld_lock); + mutex_init(&adapter->uld_attach_mutex); + pci_set_drvdata(pdev, adapter); +} + +int sss_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + int ret; + struct sss_pci_adapter *adapter = NULL; + + sdk_info(&pdev->dev, "Pci probe begin\n"); + + if (!pdev) + return -EINVAL; + + adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); + if (!adapter) { + ret = -ENOMEM; + goto init_pci_err; + } + + sss_init_adapter_param(adapter, pdev); + + ret = sss_init_pci_dev(pdev); + if (ret != 0) { + kfree(adapter); + sdk_err(&pdev->dev, "Fail to init pci device\n"); + goto init_pci_err; + } + + ret = sss_init_adapter(adapter); + if (ret != 0) + goto init_adapter_err; + + sdk_info(&pdev->dev, "Success to probe pci\n"); + return 0; + +init_adapter_err: + sss_deinit_pci_dev(pdev); + +init_pci_err: + sdk_err(&pdev->dev, "Fail to pci probe\n"); + + return ret; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_probe.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_probe.h new file mode 100644 index 0000000000000000000000000000000000000000..64cb4ab6a6e1ceeac032118c9ea29de8d61bb67f --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_probe.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_PCI_PROBE_H +#define SSS_PCI_PROBE_H + +#include + +#include "sss_adapter.h" + +int sss_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_remove.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_remove.c new file mode 100644 index 0000000000000000000000000000000000000000..8a9bf5277183553f3bd399f4d957abf7cd66bc14 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_remove.c @@ -0,0 +1,263 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_pci_id_tbl.h" +#include "sss_pci_sriov.h" +#include "sss_adapter_mgmt.h" +#include "sss_hwdev_init.h" +#include "sss_hwdev_api.h" +#include "sss_hwif_mgmt_init.h" +#include "sss_pci_global.h" +#include "sss_tool.h" + +#define SSS_WAIT_SRIOV_CFG_TIMEOUT 15000 +#define SSS_EVENT_PROCESS_TIMEOUT 10000 + +#define SSS_SRIOV_MIN_USLEEP 9900 +#define SSS_SRIOV_MAX_USLEEP 10000 + +#define SSS_EVENT_MIN_USLEEP 900 +#define SSS_EVENT_MAX_USLEEP 1000 + +static void sss_set_adapter_remove_state(struct sss_pci_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pcidev; + + mutex_lock(&adapter->uld_attach_mutex); + if (adapter->init_state != SSS_PROBE_OK) { + sdk_warn(&pdev->dev, "Current function don not need remove\n"); + mutex_unlock(&adapter->uld_attach_mutex); + } + adapter->init_state = SSS_IN_REMOVE; + mutex_unlock(&adapter->uld_attach_mutex); +} + +static void sss_wait_sriov_cfg_complete(struct sss_pci_adapter *adapter) +{ + unsigned long end_time; + struct sss_sriov_info *info = &adapter->sriov_info; + + clear_bit(SSS_SRIOV_PRESENT, &info->state); + usleep_range(SSS_SRIOV_MIN_USLEEP, SSS_SRIOV_MAX_USLEEP); + + end_time = jiffies + msecs_to_jiffies(SSS_WAIT_SRIOV_CFG_TIMEOUT); + do { + if (!test_bit(SSS_SRIOV_ENABLE, &info->state) && + !test_bit(SSS_SRIOV_DISABLE, &info->state)) + return; + + usleep_range(SSS_SRIOV_MIN_USLEEP, SSS_SRIOV_MAX_USLEEP); + } while (time_before(jiffies, end_time)); +} + +static bool sss_wait_uld_dev_timeout(struct sss_pci_adapter *adapter, + enum sss_service_type type) +{ + unsigned long end_time; + + end_time = jiffies + msecs_to_jiffies(SSS_EVENT_PROCESS_TIMEOUT); + do { + if (!test_and_set_bit(type, &adapter->uld_run_state)) + return false; + + usleep_range(SSS_EVENT_MIN_USLEEP, SSS_EVENT_MAX_USLEEP); + } while (time_before(jiffies, end_time)); + + if (!test_and_set_bit(type, &adapter->uld_run_state)) + return false; + + return true; +} + +void sss_detach_uld_driver(struct sss_pci_adapter *adapter, + enum sss_service_type type) +{ + bool timeout; + struct sss_uld_info *info = sss_get_uld_info(); + const char **name = sss_get_uld_names(); + + mutex_lock(&adapter->uld_attach_mutex); + if (!adapter->uld_dev[type]) { + mutex_unlock(&adapter->uld_attach_mutex); + return; + } + + timeout = sss_wait_uld_dev_timeout(adapter, type); + + spin_lock_bh(&adapter->dettach_uld_lock); + clear_bit(type, &adapter->uld_attach_state); + spin_unlock_bh(&adapter->dettach_uld_lock); + + info[type].remove(&adapter->hal_dev, adapter->uld_dev[type]); + adapter->uld_dev[type] = NULL; + + if (!timeout) + clear_bit(type, &adapter->uld_run_state); + + sdk_info(&adapter->pcidev->dev, + "Success to detach %s driver from pci device\n", name[type]); + mutex_unlock(&adapter->uld_attach_mutex); +} + +void sss_detach_all_uld_driver(struct sss_pci_adapter *adapter) +{ + struct sss_uld_info *info = sss_get_uld_info(); + enum sss_service_type type; + + sss_hold_chip_node(); + sss_lock_uld(); + for (type = SSS_SERVICE_TYPE_MAX - 1; type > SSS_SERVICE_TYPE_NIC; type--) { + if (info[type].probe) + sss_detach_uld_driver(adapter, type); + } + + if (info[SSS_SERVICE_TYPE_NIC].probe) + sss_detach_uld_driver(adapter, SSS_SERVICE_TYPE_NIC); + sss_unlock_uld(); + sss_put_chip_node(); +} + +void sss_dettach_uld_dev(struct sss_pci_adapter *adapter) +{ + sss_detach_all_uld_driver(adapter); +} + +void sss_unregister_uld(enum sss_service_type type) +{ + struct sss_pci_adapter *adapter = NULL; + struct sss_card_node *card_node = NULL; + struct list_head *card_list = NULL; + struct sss_uld_info *info = sss_get_uld_info(); + + if (type >= SSS_SERVICE_TYPE_MAX) { + pr_err("Unknown type %d of uld to unregister\n", type); + return; + } + + sss_hold_chip_node(); + sss_lock_uld(); + card_list = sss_get_chip_list(); + list_for_each_entry(card_node, card_list, node) { + /* detach vf first */ + list_for_each_entry(adapter, &card_node->func_list, node) + if (sss_get_func_type(adapter->hwdev) == SSS_FUNC_TYPE_VF) + sss_detach_uld_driver(adapter, type); + + list_for_each_entry(adapter, &card_node->func_list, node) + if (sss_get_func_type(adapter->hwdev) == SSS_FUNC_TYPE_PF) + sss_detach_uld_driver(adapter, type); + + list_for_each_entry(adapter, &card_node->func_list, node) + if (sss_get_func_type(adapter->hwdev) == SSS_FUNC_TYPE_PPF) + sss_detach_uld_driver(adapter, type); + } + + memset(&info[type], 0, sizeof(*info)); + sss_unlock_uld(); + sss_put_chip_node(); +} +EXPORT_SYMBOL(sss_unregister_uld); + +void sss_deinit_function(struct pci_dev *pdev) +{ + struct sss_pci_adapter *adapter = sss_get_adapter_by_pcidev(pdev); + + sss_chip_disable_mgmt_channel(adapter->hwdev); + + sss_flush_mgmt_workq(adapter->hwdev); + + sss_del_func_list(adapter); + + sss_chip_node_lock(); + sss_tool_uninit(adapter->hwdev, adapter->chip_node); + sss_chip_node_unlock(); + + sss_dettach_uld_dev(adapter); + + sss_unregister_dev_event(adapter->hwdev); + + sss_deinit_hwdev(adapter->hwdev); +} + +void sss_unmap_pci_bar(struct sss_pci_adapter *adapter) +{ + iounmap(adapter->cfg_reg_bar); + iounmap(adapter->intr_reg_bar); + + if (!SSS_IS_VF_DEV(adapter->pcidev)) + iounmap(adapter->mgmt_reg_bar); + + iounmap(adapter->db_reg_bar); +} + +int sss_deinit_adapter(struct sss_pci_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pcidev; + + sss_set_adapter_remove_state(adapter); + + sss_hwdev_detach(adapter->hwdev); + + if (sss_get_func_type(adapter->hwdev) != SSS_FUNC_TYPE_VF) { + sss_wait_sriov_cfg_complete(adapter); + sss_pci_disable_sriov(adapter); + } + + sss_deinit_function(pdev); + + sss_free_chip_node(adapter); + + sss_unmap_pci_bar(adapter); + + sss_set_adapter_probe_state(adapter, SSS_NO_PROBE); + + sdk_info(&pdev->dev, "Pcie device removed function\n"); + + return 0; +} + +void sss_deinit_pci_dev(struct pci_dev *pdev) +{ + struct sss_pci_adapter *adapter = sss_get_adapter_by_pcidev(pdev); + + pci_clear_master(pdev); + pci_release_regions(pdev); + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + kfree(adapter); +} + +void sss_pci_remove(struct pci_dev *pdev) +{ + struct sss_pci_adapter *adapter = sss_get_adapter_by_pcidev(pdev); + + if (!adapter) + return; + + sdk_info(&pdev->dev, "Begin pcie device remove\n"); + + sss_deinit_adapter(adapter); + + sss_deinit_pci_dev(pdev); + + sdk_info(&pdev->dev, "Success to remove pcie device\n"); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_remove.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_remove.h new file mode 100644 index 0000000000000000000000000000000000000000..ddd760ee53dff4632c478ca7b93e425dec9dafd0 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_remove.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_PCI_REMOVE_H +#define SSS_PCI_REMOVE_H + +#include + +#include "sss_hw_svc_cap.h" +#include "sss_adapter.h" + +void sss_detach_uld_driver(struct sss_pci_adapter *adapter, enum sss_service_type type); +void sss_detach_all_uld_driver(struct sss_pci_adapter *adapter); +void sss_dettach_uld_dev(struct sss_pci_adapter *adapter); +void sss_deinit_function(struct pci_dev *pdev); +void sss_unmap_pci_bar(struct sss_pci_adapter *adapter); +int sss_deinit_adapter(struct sss_pci_adapter *adapter); +void sss_deinit_pci_dev(struct pci_dev *pdev); + +void sss_pci_remove(struct pci_dev *pdev); +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_shutdown.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_shutdown.c new file mode 100644 index 0000000000000000000000000000000000000000..54337fd447a95820d225693a93f33c39d1e3ee03 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_shutdown.c @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_pci_id_tbl.h" +#include "sss_pci_sriov.h" +#include "sss_adapter_mgmt.h" +#include "sss_hwdev_api.h" +#include "sss_hwdev_init.h" + +void sss_pci_shutdown(struct pci_dev *pdev) +{ + struct sss_pci_adapter *adapter = sss_get_adapter_by_pcidev(pdev); + + sdk_info(&pdev->dev, "Shutdown device\n"); + + if (adapter) + sss_hwdev_shutdown(adapter->hwdev); + + pci_disable_device(pdev); + + if (adapter) + sss_hwdev_stop(adapter->hwdev); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_shutdown.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_shutdown.h new file mode 100644 index 0000000000000000000000000000000000000000..7c9e92edda6ecb000c350793617efacf542f06a8 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_shutdown.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_PCI_SHUTDOWN_H +#define SSS_PCI_SHUTDOWN_H + +#include + +void sss_pci_shutdown(struct pci_dev *pdev); +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_sriov.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_sriov.c new file mode 100644 index 0000000000000000000000000000000000000000..d75c5859039c7147ff7c2f602f2c4d95b3df8d3f --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_sriov.c @@ -0,0 +1,190 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_adapter_mgmt.h" +#include "sss_hwif_mbx.h" +#include "sss_hwif_mbx_init.h" +#include "sss_pci_sriov.h" +#include "sss_hwdev_api.h" +#include "sss_hwif_api.h" + +static int sss_init_vf_hw(void *hwdev, u16 vf_num) +{ + int ret; + u16 i; + u16 id; + + /* mbx msg channel resources will be freed during remove process */ + ret = sss_init_func_mbx_msg(hwdev, sss_get_max_vf_num(hwdev)); + if (ret != 0) + return ret; + + /* vf use 256K as default wq page size, and can't change it */ + for (i = 1; i <= vf_num; i++) { + id = sss_get_glb_pf_vf_offset(hwdev) + i; + ret = sss_chip_set_wq_page_size(hwdev, id, SSS_DEFAULT_WQ_PAGE_SIZE); + if (ret != 0) + return ret; + } + + return 0; +} + +static void sss_deinit_vf_hw(void *hwdev, u16 vf_num) +{ + u16 i; + u16 id; + + for (i = 1; i <= vf_num; i++) { + id = sss_get_glb_pf_vf_offset(hwdev) + i; + sss_chip_set_wq_page_size(hwdev, id, SSS_HW_WQ_PAGE_SIZE); + } +} + +#ifdef CONFIG_PCI_IOV +static void sss_notify_sriov_state_change(void *hwdev, u16 vf_num) +{ + struct sss_event_info event = {0}; + + event.service = SSS_EVENT_SRV_COMM; + event.type = SSS_EVENT_SRIOV_STATE_CHANGE; + + if (vf_num > 0) { + ((struct sss_sriov_state_info *)(void *)event.event_data)->enable = 1; + ((struct sss_sriov_state_info *)(void *)event.event_data)->vf_num = vf_num; + } + + sss_do_event_callback(hwdev, &event); +} +#endif + +int sss_pci_disable_sriov(struct sss_pci_adapter *adapter) +{ +#ifdef CONFIG_PCI_IOV + void *hwdev = adapter->hwdev; + struct pci_dev *pdev = adapter->pcidev; + struct sss_sriov_info *info = &adapter->sriov_info; + + if (!info->enabled) + return 0; + + if (test_and_set_bit(SSS_SRIOV_DISABLE, &info->state)) { + sdk_err(&pdev->dev, "SR-IOV disable in process."); + return -EPERM; + } + + if (pci_vfs_assigned(pdev) != 0) { + clear_bit(SSS_SRIOV_DISABLE, &info->state); + sdk_warn(&pdev->dev, "VFs are assigned - VFs will not be deallocated\n"); + return -EPERM; + } + + sss_notify_sriov_state_change(hwdev, 0); + + info->enabled = false; + + pci_disable_sriov(pdev); + + sss_deinit_vf_hw(hwdev, (u16)info->vf_num); + info->vf_num = 0; + + clear_bit(SSS_SRIOV_DISABLE, &info->state); + +#endif + + return 0; +} + +#ifdef CONFIG_PCI_IOV +static int sss_check_existing_vf(struct sss_pci_adapter *adapter, u16 vf_num) +{ + int ret; + struct pci_dev *pdev = adapter->pcidev; + int existing_vf = pci_num_vf(pdev); + struct sss_sriov_info *info = &adapter->sriov_info; + + if (existing_vf != 0 && existing_vf != vf_num) { + ret = sss_pci_disable_sriov(adapter); + if (ret != 0) { + clear_bit(SSS_SRIOV_ENABLE, &info->state); + return ret; + } + } else if (existing_vf == vf_num) { + clear_bit(SSS_SRIOV_ENABLE, &info->state); + return vf_num; + } + + return 0; +} +#endif + +static int sss_pci_enable_sriov(struct sss_pci_adapter *adapter, u16 vf_num) +{ +#ifdef CONFIG_PCI_IOV + int ret = 0; + void *hwdev = adapter->hwdev; + struct pci_dev *pdev = adapter->pcidev; + struct sss_sriov_info *info = &adapter->sriov_info; + + if (test_and_set_bit(SSS_SRIOV_ENABLE, &info->state)) { + sdk_err(&pdev->dev, "SR-IOV disable, vf_num %d\n", vf_num); + return -EPERM; + } + + if (vf_num > pci_sriov_get_totalvfs(pdev)) { + clear_bit(SSS_SRIOV_ENABLE, &info->state); + return -ERANGE; + } + + ret = sss_check_existing_vf(adapter, vf_num); + if (ret != 0) + return ret; + + ret = sss_init_vf_hw(hwdev, vf_num); + if (ret != 0) { + sdk_err(&pdev->dev, "Fail to init vf in hw, ret: %d\n", ret); + clear_bit(SSS_SRIOV_ENABLE, &info->state); + return ret; + } + + ret = pci_enable_sriov(pdev, vf_num); + if (ret != 0) { + sdk_err(&pdev->dev, "Fail to enable SR-IOV, ret: %d\n", ret); + clear_bit(SSS_SRIOV_ENABLE, &info->state); + return ret; + } + + info->enabled = true; + info->vf_num = vf_num; + + sss_notify_sriov_state_change(hwdev, vf_num); + + clear_bit(SSS_SRIOV_ENABLE, &info->state); + + return vf_num; +#else + + return 0; +#endif +} + +int sss_pci_configure_sriov(struct pci_dev *pdev, int vf_num) +{ + struct sss_pci_adapter *adapter = sss_get_adapter_by_pcidev(pdev); + + if (!adapter) + return -EFAULT; + + if (!test_bit(SSS_SRIOV_PRESENT, &adapter->sriov_info.state)) + return -EFAULT; + + return (vf_num == 0) ? sss_pci_disable_sriov(adapter) : + sss_pci_enable_sriov(adapter, (u16)vf_num); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_sriov.h b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_sriov.h new file mode 100644 index 0000000000000000000000000000000000000000..3146e8eb9f8f7d8791a5bdf559f9cd5286b2ed6d --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_pci_sriov.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_PCI_SRIOV_H +#define SSS_PCI_SRIOV_H + +#include +#include + +#include "sss_sriov_info.h" +#include "sss_adapter.h" + +int sss_pci_disable_sriov(struct sss_pci_adapter *adapter); + +int sss_pci_configure_sriov(struct pci_dev *pdev, int num_vfs); +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/sss_wq.c b/drivers/net/ethernet/3snic/sssnic/hw/sss_wq.c new file mode 100644 index 0000000000000000000000000000000000000000..96d57922821b6a1cc649ed68ec625581319e0aaf --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/sss_wq.c @@ -0,0 +1,160 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BASE]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_common.h" +#include "sss_hwdev.h" +#include "sss_hw_wq.h" + +#define SSS_WQ_MIN_DEPTH 64 +#define SSS_WQ_MAX_DEPTH 65536 +#define SSS_WQ_MAX_PAGE_NUM (PAGE_SIZE / sizeof(u64)) + +static int sss_init_wq_block(struct sss_wq *wq) +{ + int i; + + if (SSS_WQ_IS_0_LEVEL_CLA(wq)) { + wq->block_paddr = wq->page[0].align_paddr; + wq->block_vaddr = wq->page[0].align_vaddr; + return 0; + } + + if (wq->page_num > SSS_WQ_MAX_PAGE_NUM) { + sdk_err(wq->dev_hdl, "Wq page num: 0x%x out of range: %lu\n", + wq->page_num, SSS_WQ_MAX_PAGE_NUM); + return -EFAULT; + } + + wq->block_vaddr = dma_zalloc_coherent(wq->dev_hdl, PAGE_SIZE, + &wq->block_paddr, GFP_KERNEL); + if (!wq->block_vaddr) { + sdk_err(wq->dev_hdl, "Fail to alloc wq block vaddr\n"); + return -ENOMEM; + } + + for (i = 0; i < wq->page_num; i++) + wq->block_vaddr[i] = cpu_to_be64(wq->page[i].align_paddr); + + return 0; +} + +static void sss_deinit_wq_block(struct sss_wq *wq) +{ + if (!SSS_WQ_IS_0_LEVEL_CLA(wq)) + dma_free_coherent(wq->dev_hdl, PAGE_SIZE, wq->block_vaddr, + wq->block_paddr); +} + +static int sss_alloc_wq_page(struct sss_wq *wq) +{ + int i; + int ret; + int id; + + wq->page = kcalloc(wq->page_num, sizeof(*wq->page), GFP_KERNEL); + if (!wq->page) + return -ENOMEM; + + for (id = 0; id < wq->page_num; id++) { + ret = sss_dma_zalloc_coherent_align(wq->dev_hdl, wq->page_size, + wq->page_size, GFP_KERNEL, &wq->page[id]); + if (ret != 0) { + sdk_err(wq->dev_hdl, "Fail to alloc wq dma page\n"); + goto dma_page_err; + } + } + + ret = sss_init_wq_block(wq); + if (ret != 0) + goto block_err; + + return 0; + +block_err: +dma_page_err: + for (i = 0; i < id; i++) + sss_dma_free_coherent_align(wq->dev_hdl, &wq->page[i]); + + kfree(wq->page); + wq->page = NULL; + + return -ENOMEM; +} + +static void sss_free_wq_page(struct sss_wq *wq) +{ + int i; + + sss_deinit_wq_block(wq); + + for (i = 0; i < wq->page_num; i++) + sss_dma_free_coherent_align(wq->dev_hdl, &wq->page[i]); + + kfree(wq->page); + wq->page = NULL; +} + +static void sss_init_wq_param(struct sss_hwdev *hwdev, struct sss_wq *wq, + u32 q_depth, u16 block_size) +{ + u32 page_size = ALIGN(hwdev->wq_page_size, PAGE_SIZE); + + wq->ci = 0; + wq->pi = 0; + wq->dev_hdl = hwdev->dev_hdl; + wq->q_depth = q_depth; + wq->id_mask = (u16)(q_depth - 1); + wq->elem_size = block_size; + wq->elem_size_shift = (u16)ilog2(wq->elem_size); + wq->page_size = page_size; + wq->elem_per_page = min(page_size / block_size, q_depth); + wq->elem_per_page_shift = (u16)ilog2(wq->elem_per_page); + wq->elem_per_page_mask = (u16)(wq->elem_per_page - 1); + wq->page_num = + (u16)(ALIGN(((u32)q_depth * block_size), page_size) / page_size); +} + +int sss_create_wq(void *hwdev, struct sss_wq *wq, u32 q_depth, u16 block_size) +{ + if (!wq || !hwdev) { + pr_err("Invalid wq or dev_hdl\n"); + return -EINVAL; + } + + if (q_depth < SSS_WQ_MIN_DEPTH || q_depth > SSS_WQ_MAX_DEPTH || + (q_depth & (q_depth - 1)) != 0) { + sdk_err(SSS_TO_DEV(hwdev), "Invalid q_depth(%u)\n", q_depth); + return -EINVAL; + } + + if (block_size == 0 || (block_size & (block_size - 1)) != 0) { + sdk_err(SSS_TO_DEV(hwdev), "Invalid block_size(%u)\n", block_size); + return -EINVAL; + } + + sss_init_wq_param(hwdev, wq, q_depth, block_size); + + return sss_alloc_wq_page(wq); +} +EXPORT_SYMBOL(sss_create_wq); + +void sss_destroy_wq(struct sss_wq *wq) +{ + if (!wq) + return; + + sss_free_wq_page(wq); +} +EXPORT_SYMBOL(sss_destroy_wq); diff --git a/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool.h b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool.h new file mode 100644 index 0000000000000000000000000000000000000000..92e474e81ba8c5d0a159f17222d798d4885c6351 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSSNIC_NICTOOL_H +#define SSSNIC_NICTOOL_H + +#include "sss_tool_chip.h" +#include "sss_tool_sdk.h" +#include "sss_tool_sm.h" +#include "sss_tool_comm.h" + +#ifndef _LLT_TEST_ +#define SSS_TOOL_PAGE_ORDER (10) +#else +#define SSS_TOOL_PAGE_ORDER (1) +#endif + +#define SSS_TOOL_MEM_MAP_SIZE (PAGE_SIZE * (1 << SSS_TOOL_PAGE_ORDER)) + +#define SSS_TOOL_CARD_MAX (64) + +int sss_tool_init(void *hwdev, void *chip_node); +void sss_tool_uninit(void *hwdev, void *chip_node); + +extern u64 g_card_pa[SSS_TOOL_CARD_MAX]; +extern void *g_card_va[SSS_TOOL_CARD_MAX]; +extern int g_card_id; + +#endif + diff --git a/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_chip.c b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_chip.c new file mode 100644 index 0000000000000000000000000000000000000000..21833df254b523f6bf31f0ce15da5596858ccdaf --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_chip.c @@ -0,0 +1,802 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [TOOL]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hwdev.h" +#include "sss_common.h" +#include "sss_pci_sriov.h" +#include "sss_adapter_mgmt.h" +#include "sss_hwif_adm.h" +#include "sss_hwif_adm_common.h" +#include "sss_hwif_mgmt_common.h" +#include "sss_hwif_ctrlq.h" +#include "sss_hwif_api.h" +#include "sss_hw_common.h" +#include "sss_mgmt_channel.h" +#include "sss_linux_kernel.h" +#include "sss_csr.h" +#include "sss_hw.h" +#include "sss_adapter.h" +#include "sss_tool.h" + +#define SSS_TOOL_DW_WIDTH 4 + +/* completion timeout interval, unit is millisecond */ +#define SSS_TOOL_UPDATE_MSG_TIMEOUT 50000U + +#define SSS_TOOL_CLP_REG_GAP 0x20 +#define SSS_TOOL_CLP_INPUT_BUF_LEN 4096UL +#define SSS_TOOL_CLP_DATA_UNIT 4UL +#define SSS_TOOL_CLP_MAX_DATA_SIZE (SSS_TOOL_CLP_INPUT_BUF_LEN / SSS_TOOL_CLP_DATA_UNIT) + +#define SSS_TOOL_CLP_REQ_SIZE_OFFSET 0 +#define SSS_TOOL_CLP_RSP_SIZE_OFFSET 16 +#define SSS_TOOL_CLP_BASE_OFFSET 0 +#define SSS_TOOL_CLP_LEN_OFFSET 0 +#define SSS_TOOL_CLP_START_OFFSET 31 +#define SSS_TOOL_CLP_READY_OFFSET 31 +#define SSS_TOOL_CLP_OFFSET(member) (SSS_TOOL_CLP_##member##_OFFSET) + +#define SSS_TOOL_CLP_SIZE_MASK 0x7ffUL +#define SSS_TOOL_CLP_BASE_MASK 0x7ffffffUL +#define SSS_TOOL_CLP_LEN_MASK 0x7ffUL +#define SSS_TOOL_CLP_START_MASK 0x1UL +#define SSS_TOOL_CLP_READY_MASK 0x1UL +#define SSS_TOOL_CLP_MASK(member) (SSS_TOOL_CLP_##member##_MASK) + +#define SSS_TOOL_CLP_DELAY_CNT_MAX 200UL +#define SSS_TOOL_CLP_SRAM_SIZE_REG_MAX 0x3ff +#define SSS_TOOL_CLP_SRAM_BASE_REG_MAX 0x7ffffff +#define SSS_TOOL_CLP_LEN_REG_MAX 0x3ff +#define SSS_TOOL_CLP_START_OR_READY_REG_MAX 0x1 + +#define SSS_TOOL_CLP_DATA_REAL_SIZE(in_size, header) \ + (((in_size) + (u16)sizeof(header) + \ + (((in_size) % SSS_TOOL_CLP_DATA_UNIT) ? SSS_TOOL_CLP_DATA_UNIT : 0)) / \ + SSS_TOOL_CLP_DATA_UNIT) + +#define SSS_TOOL_CLP_REG_VALUE(value, offset, mask) \ + (((value) >> SSS_TOOL_CLP_OFFSET(offset)) & SSS_TOOL_CLP_MASK(mask)) + +enum sss_tool_clp_data_type { + SSS_TOOL_CLP_REQ = 0, + SSS_TOOL_CLP_RSP = 1 +}; + +enum sss_tool_clp_reg_type { + SSS_TOOL_CLP_BASE = 0, + SSS_TOOL_CLP_SIZE = 1, + SSS_TOOL_CLP_LEN = 2, + SSS_TOOL_CLP_START_REQ = 3, + SSS_TOOL_CLP_READY_RSP = 4 +}; + +enum SSS_TOOL_ADM_CSR_DATA_OPERATION { + SSS_TOOL_ADM_CSR_WRITE = 0x1E, + SSS_TOOL_ADM_CSR_READ = 0x1F +}; + +enum SSS_TOOL_ADM_CSR_NEED_RESP_DATA { + SSS_TOOL_ADM_CSR_NO_RESP_DATA = 0, + SSS_TOOL_ADM_CSR_NEED_RESP_DATA = 1 +}; + +enum SSS_TOOL_ADM_CSR_DATA_SIZE { + SSS_TOOL_ADM_CSR_DATA_SZ_32 = 0, + SSS_TOOL_ADM_CSR_DATA_SZ_64 = 1 +}; + +struct sss_tool_csr_request_adm_data { + u32 dw0; + + union { + struct { + u32 reserved1:13; + /* this field indicates the write/read data size: + * 2'b00: 32 bits + * 2'b01: 64 bits + * 2'b10~2'b11:reserved + */ + u32 data_size:2; + /* this field indicates that requestor expect receive a + * response data or not. + * 1'b0: expect not to receive a response data. + * 1'b1: expect to receive a response data. + */ + u32 need_response:1; + /* this field indicates the operation that the requestor + * expected. + * 5'b1_1110: write value to csr space. + * 5'b1_1111: read register from csr space. + */ + u32 operation_id:5; + u32 reserved2:6; + /* this field specifies the Src node ID for this API + * request message. + */ + u32 src_node_id:5; + } bits; + + u32 val32; + } dw1; + + union { + struct { + /* it specifies the CSR address. */ + u32 csr_addr:26; + u32 reserved3:6; + } bits; + + u32 val32; + } dw2; + + /* if data_size=2'b01, it is high 32 bits of write data. else, it is + * 32'hFFFF_FFFF. + */ + u32 csr_write_data_h; + /* the low 32 bits of write data. */ + u32 csr_write_data_l; +}; + +struct sss_tool_csr_read { + u32 rd_len; + u32 addr; +}; + +struct sss_tool_csr_write { + u32 rd_len; + u32 addr; + u8 *data; +}; + +static u32 sss_tool_get_timeout_val(enum sss_mod_type mod, u16 cmd) +{ + if (mod == SSS_MOD_TYPE_COMM && + (cmd == SSS_COMM_MGMT_CMD_UPDATE_FW || + cmd == SSS_COMM_MGMT_CMD_UPDATE_BIOS || + cmd == SSS_COMM_MGMT_CMD_ACTIVE_FW || + cmd == SSS_COMM_MGMT_CMD_SWITCH_CFG || + cmd == SSS_COMM_MGMT_CMD_HOT_ACTIVE_FW)) + return SSS_TOOL_UPDATE_MSG_TIMEOUT; + + return 0; /* use default mbox/adm timeout time */ +} + +static int sss_tool_get_clp_reg(void *hwdev, enum sss_tool_clp_data_type data_type, + enum sss_tool_clp_reg_type type, u32 *addr) +{ + switch (type) { + case SSS_TOOL_CLP_BASE: + *addr = (data_type == SSS_TOOL_CLP_REQ) ? + SSS_CLP_REG(REQBASE) : SSS_CLP_REG(RSPBASE); + break; + + case SSS_TOOL_CLP_SIZE: + *addr = SSS_CLP_REG(SIZE); + break; + + case SSS_TOOL_CLP_LEN: + *addr = (data_type == SSS_TOOL_CLP_REQ) ? + SSS_CLP_REG(REQ) : SSS_CLP_REG(RSP); + break; + + case SSS_TOOL_CLP_START_REQ: + *addr = SSS_CLP_REG(REQ); + break; + + case SSS_TOOL_CLP_READY_RSP: + *addr = SSS_CLP_REG(RSP); + break; + + default: + *addr = 0; + break; + } + + return (*addr == 0) ? -EINVAL : 0; +} + +static inline int sss_tool_clp_param_valid(enum sss_tool_clp_data_type data_type, + enum sss_tool_clp_reg_type reg_type) +{ + if (data_type == SSS_TOOL_CLP_REQ && reg_type == SSS_TOOL_CLP_READY_RSP) + return -EINVAL; + + if (data_type == SSS_TOOL_CLP_RSP && reg_type == SSS_TOOL_CLP_START_REQ) + return -EINVAL; + + return 0; +} + +static u32 sss_tool_get_clp_reg_value(struct sss_hwdev *hwdev, + enum sss_tool_clp_data_type data_type, + enum sss_tool_clp_reg_type reg_type, u32 reg_addr) +{ + u32 value; + + value = sss_chip_read_reg(hwdev->hwif, reg_addr); + + switch (reg_type) { + case SSS_TOOL_CLP_BASE: + value = SSS_TOOL_CLP_REG_VALUE(value, BASE, BASE); + break; + + case SSS_TOOL_CLP_SIZE: + if (data_type == SSS_TOOL_CLP_REQ) + value = SSS_TOOL_CLP_REG_VALUE(value, REQ_SIZE, SIZE); + else + value = SSS_TOOL_CLP_REG_VALUE(value, RSP_SIZE, SIZE); + break; + + case SSS_TOOL_CLP_LEN: + value = SSS_TOOL_CLP_REG_VALUE(value, LEN, LEN); + break; + + case SSS_TOOL_CLP_START_REQ: + value = SSS_TOOL_CLP_REG_VALUE(value, START, START); + break; + + case SSS_TOOL_CLP_READY_RSP: + value = SSS_TOOL_CLP_REG_VALUE(value, READY, READY); + break; + + default: + break; + } + + return value; +} + +static int sss_tool_read_clp_reg(struct sss_hwdev *hwdev, + enum sss_tool_clp_data_type data_type, + enum sss_tool_clp_reg_type reg_type, u32 *read_value) +{ + u32 reg_addr; + int ret; + + ret = sss_tool_clp_param_valid(data_type, reg_type); + if (ret) + return ret; + + ret = sss_tool_get_clp_reg(hwdev, data_type, reg_type, ®_addr); + if (ret) + return ret; + + *read_value = sss_tool_get_clp_reg_value(hwdev, data_type, reg_type, reg_addr); + + return 0; +} + +static int sss_tool_check_reg_value(enum sss_tool_clp_reg_type reg_type, u32 value) +{ + if (reg_type == SSS_TOOL_CLP_BASE && + value > SSS_TOOL_CLP_SRAM_BASE_REG_MAX) + return -EINVAL; + + if (reg_type == SSS_TOOL_CLP_SIZE && + value > SSS_TOOL_CLP_SRAM_SIZE_REG_MAX) + return -EINVAL; + + if (reg_type == SSS_TOOL_CLP_LEN && + value > SSS_TOOL_CLP_LEN_REG_MAX) + return -EINVAL; + + if ((reg_type == SSS_TOOL_CLP_START_REQ || + reg_type == SSS_TOOL_CLP_READY_RSP) && + value > SSS_TOOL_CLP_START_OR_READY_REG_MAX) + return -EINVAL; + + return 0; +} + +static int sss_tool_check_clp_init_status(struct sss_hwdev *hwdev) +{ + int ret; + u32 reg_value = 0; + + ret = sss_tool_read_clp_reg(hwdev, SSS_TOOL_CLP_REQ, + SSS_TOOL_CLP_BASE, ®_value); + if (ret || !reg_value) { + tool_err("Fail to read clp reg: 0x%x\n", reg_value); + return -EINVAL; + } + + ret = sss_tool_read_clp_reg(hwdev, SSS_TOOL_CLP_RSP, + SSS_TOOL_CLP_BASE, ®_value); + if (ret || !reg_value) { + tool_err("Fail to read rsp ba value: 0x%x\n", reg_value); + return -EINVAL; + } + + ret = sss_tool_read_clp_reg(hwdev, SSS_TOOL_CLP_REQ, + SSS_TOOL_CLP_SIZE, ®_value); + if (ret || !reg_value) { + tool_err("Fail to read req size\n"); + return -EINVAL; + } + + ret = sss_tool_read_clp_reg(hwdev, SSS_TOOL_CLP_RSP, + SSS_TOOL_CLP_SIZE, ®_value); + if (ret || !reg_value) { + tool_err("Fail to read rsp size\n"); + return -EINVAL; + } + + return 0; +} + +static void sss_tool_write_clp_reg(struct sss_hwdev *hwdev, + enum sss_tool_clp_data_type data_type, + enum sss_tool_clp_reg_type reg_type, u32 value) +{ + u32 reg_addr, reg_value; + + if (sss_tool_clp_param_valid(data_type, reg_type)) + return; + + if (sss_tool_check_reg_value(reg_type, value)) + return; + + if (sss_tool_get_clp_reg(hwdev, data_type, reg_type, ®_addr)) + return; + + reg_value = sss_chip_read_reg(hwdev->hwif, reg_addr); + + switch (reg_type) { + case SSS_TOOL_CLP_LEN: + reg_value &= (~(SSS_TOOL_CLP_MASK(LEN) << SSS_TOOL_CLP_OFFSET(LEN))); + reg_value |= (value << SSS_TOOL_CLP_OFFSET(LEN)); + break; + + case SSS_TOOL_CLP_START_REQ: + reg_value &= (~(SSS_TOOL_CLP_MASK(START) << SSS_TOOL_CLP_OFFSET(START))); + reg_value |= (value << SSS_TOOL_CLP_OFFSET(START)); + break; + + case SSS_TOOL_CLP_READY_RSP: + reg_value &= (~(SSS_TOOL_CLP_MASK(READY) << SSS_TOOL_CLP_OFFSET(READY))); + reg_value |= (value << SSS_TOOL_CLP_OFFSET(READY)); + break; + + default: + return; + } + + sss_chip_write_reg(hwdev->hwif, reg_addr, reg_value); +} + +static int sss_tool_read_clp_data(struct sss_hwdev *hwdev, void *buf_out, u16 *out_size) +{ + int err; + u32 reg = SSS_CLP_DATA(RSP); + u32 ready, delay_cnt; + u32 *ptr = (u32 *)buf_out; + u32 temp_out_size = 0; + + err = sss_tool_read_clp_reg(hwdev, SSS_TOOL_CLP_RSP, + SSS_TOOL_CLP_READY_RSP, &ready); + if (err) + return err; + + delay_cnt = 0; + while (ready == 0) { + usleep_range(9000, 10000); /* sleep 9000 us ~ 10000 us */ + delay_cnt++; + err = sss_tool_read_clp_reg(hwdev, SSS_TOOL_CLP_RSP, + SSS_TOOL_CLP_READY_RSP, &ready); + if (err || delay_cnt > SSS_TOOL_CLP_DELAY_CNT_MAX) { + tool_err("Fail to read clp delay rsp, timeout delay_cnt: %u\n", + delay_cnt); + return -EINVAL; + } + } + + err = sss_tool_read_clp_reg(hwdev, SSS_TOOL_CLP_RSP, + SSS_TOOL_CLP_LEN, &temp_out_size); + if (err) + return err; + + if (temp_out_size > SSS_TOOL_CLP_SRAM_SIZE_REG_MAX || !temp_out_size) { + tool_err("Invalid temp out size: %u\n", temp_out_size); + return -EINVAL; + } + + *out_size = (u16)temp_out_size; + for (; temp_out_size > 0; temp_out_size--) { + *ptr = sss_chip_read_reg(hwdev->hwif, reg); + ptr++; + /* read 4 bytes every time */ + reg = reg + 4; + } + + sss_tool_write_clp_reg(hwdev, SSS_TOOL_CLP_RSP, + SSS_TOOL_CLP_READY_RSP, (u32)0x0); + sss_tool_write_clp_reg(hwdev, SSS_TOOL_CLP_RSP, SSS_TOOL_CLP_LEN, (u32)0x0); + + return 0; +} + +static int sss_tool_write_clp_data(struct sss_hwdev *hwdev, void *buf_in, u16 in_size) +{ + int ret; + u32 reg = SSS_CLP_DATA(REQ); + u32 start = 1; + u32 delay_cnt = 0; + u32 *ptr = (u32 *)buf_in; + u16 size_in = in_size; + + ret = sss_tool_read_clp_reg(hwdev, SSS_TOOL_CLP_REQ, + SSS_TOOL_CLP_START_REQ, &start); + if (ret != 0) + return ret; + + while (start == 1) { + usleep_range(9000, 10000); /* sleep 9000 us ~ 10000 us */ + delay_cnt++; + ret = sss_tool_read_clp_reg(hwdev, SSS_TOOL_CLP_REQ, + SSS_TOOL_CLP_START_REQ, &start); + if (ret || delay_cnt > SSS_TOOL_CLP_DELAY_CNT_MAX) + return -EINVAL; + } + + sss_tool_write_clp_reg(hwdev, SSS_TOOL_CLP_REQ, SSS_TOOL_CLP_LEN, size_in); + sss_tool_write_clp_reg(hwdev, SSS_TOOL_CLP_REQ, SSS_TOOL_CLP_START_REQ, (u32)0x1); + + for (; size_in > 0; size_in--) { + sss_chip_write_reg(hwdev->hwif, reg, *ptr); + ptr++; + reg = reg + sizeof(u32); + } + + return 0; +} + +static void sss_tool_clear_clp_data(struct sss_hwdev *hwdev, + enum sss_tool_clp_data_type data_type) +{ + u32 reg = (data_type == SSS_TOOL_CLP_REQ) ? + SSS_CLP_DATA(REQ) : SSS_CLP_DATA(RSP); + u32 count = SSS_TOOL_CLP_MAX_DATA_SIZE; + + for (; count > 0; count--) { + sss_chip_write_reg(hwdev->hwif, reg, 0x0); + reg = reg + sizeof(u32); + } +} + +static void sss_tool_clp_prepare_header(struct sss_hwdev *hwdev, u64 *header, + u16 msg_len, u8 mod, enum sss_mgmt_cmd cmd) +{ + struct sss_hwif *hwif = hwdev->hwif; + + *header = SSS_SET_MSG_HEADER(msg_len, MSG_LEN) | + SSS_SET_MSG_HEADER(mod, MODULE) | + SSS_SET_MSG_HEADER(msg_len, SEG_LEN) | + SSS_SET_MSG_HEADER(0, NO_ACK) | + SSS_SET_MSG_HEADER(SSS_INLINE_DATA, DATA_TYPE) | + SSS_SET_MSG_HEADER(0, SEQID) | + SSS_SET_MSG_HEADER(SSS_ADM_MSG_AEQ_ID, AEQ_ID) | + SSS_SET_MSG_HEADER(SSS_LAST_SEG, LAST) | + SSS_SET_MSG_HEADER(0, DIRECTION) | + SSS_SET_MSG_HEADER(cmd, CMD) | + SSS_SET_MSG_HEADER(hwif->attr.func_id, SRC_GLB_FUNC_ID) | + SSS_SET_MSG_HEADER(0, MSG_ID); +} + +int sss_tool_send_clp_msg(struct sss_hwdev *hwdev, u8 mod, u16 cmd, const void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) + +{ + struct sss_clp_pf_to_mgmt *clp_msg; + u64 header; + u16 size; + u8 *msg_buf; + int ret; + + if (!hwdev || SSS_GET_FUNC_TYPE(hwdev) == SSS_FUNC_TYPE_VF) + return -EINVAL; + + if (!hwdev->chip_present_flag || !SSS_SUPPORT_CLP(hwdev)) + return -EPERM; + + clp_msg = hwdev->clp_pf_to_mgmt; + if (!clp_msg) + return -EPERM; + + msg_buf = clp_msg->clp_msg_buf; + + /* 4 bytes alignment */ + size = SSS_TOOL_CLP_DATA_REAL_SIZE(in_size, header); + + if (size > SSS_TOOL_CLP_MAX_DATA_SIZE) { + tool_err("Invalid data size: %u\n", size); + return -EINVAL; + } + down(&clp_msg->clp_msg_lock); + + ret = sss_tool_check_clp_init_status(hwdev); + if (ret) { + tool_err("Fail to check clp init status\n"); + up(&clp_msg->clp_msg_lock); + return ret; + } + + sss_tool_clear_clp_data(hwdev, SSS_TOOL_CLP_RSP); + sss_tool_write_clp_reg(hwdev, SSS_TOOL_CLP_RSP, + SSS_TOOL_CLP_READY_RSP, 0x0); + + /* Send request */ + memset(msg_buf, 0x0, SSS_TOOL_CLP_INPUT_BUF_LEN); + sss_tool_clp_prepare_header(hwdev, &header, in_size, mod, cmd); + + memcpy(msg_buf, &header, sizeof(header)); + msg_buf += sizeof(header); + memcpy(msg_buf, buf_in, in_size); + + msg_buf = clp_msg->clp_msg_buf; + + sss_tool_clear_clp_data(hwdev, SSS_TOOL_CLP_REQ); + ret = sss_tool_write_clp_data(hwdev, clp_msg->clp_msg_buf, size); + if (ret) { + tool_err("Fail to send clp request\n"); + up(&clp_msg->clp_msg_lock); + return -EINVAL; + } + + /* Get response */ + msg_buf = clp_msg->clp_msg_buf; + memset(msg_buf, 0x0, SSS_TOOL_CLP_INPUT_BUF_LEN); + ret = sss_tool_read_clp_data(hwdev, msg_buf, &size); + sss_tool_clear_clp_data(hwdev, SSS_TOOL_CLP_RSP); + if (ret) { + tool_err("Fail to read clp response\n"); + up(&clp_msg->clp_msg_lock); + return -EINVAL; + } + + size = (u16)((size * SSS_TOOL_CLP_DATA_UNIT) & 0xffff); + if (size <= sizeof(header) || size > SSS_TOOL_CLP_INPUT_BUF_LEN) { + tool_err("Invalid response size: %u", size); + up(&clp_msg->clp_msg_lock); + return -EINVAL; + } + + if (size != *out_size + sizeof(header)) { + tool_err("Invalid size:%u, out_size: %u\n", size, *out_size); + up(&clp_msg->clp_msg_lock); + return -EINVAL; + } + + memcpy(buf_out, (msg_buf + sizeof(header)), size); + up(&clp_msg->clp_msg_lock); + + return 0; +} + +int sss_tool_adm_csr_rd32(struct sss_hwdev *hwdev, u8 dest, u32 addr, u32 *val) +{ + int ret; + u32 csr_val = 0; + struct sss_tool_csr_request_adm_data adm_data = {0}; + + if (!hwdev || !val) + return -EFAULT; + + if (!SSS_SUPPORT_ADM_MSG(hwdev)) + return -EPERM; + + adm_data.dw0 = 0; + adm_data.dw1.bits.operation_id = SSS_TOOL_ADM_CSR_READ; + adm_data.dw1.bits.need_response = SSS_TOOL_ADM_CSR_NEED_RESP_DATA; + adm_data.dw1.bits.data_size = SSS_TOOL_ADM_CSR_DATA_SZ_32; + adm_data.dw1.val32 = cpu_to_be32(adm_data.dw1.val32); + adm_data.dw2.bits.csr_addr = addr; + adm_data.dw2.val32 = cpu_to_be32(adm_data.dw2.val32); + + ret = sss_adm_msg_read_ack(hwdev, dest, (u8 *)(&adm_data), + sizeof(adm_data), &csr_val, 0x4); + if (ret) { + tool_err("Fail to read 32 bit csr, dest %u addr 0x%x, ret: 0x%x\n", + dest, addr, ret); + return ret; + } + + *val = csr_val; + + return 0; +} + +int sss_tool_adm_csr_wr32(struct sss_hwdev *hwdev, u8 dest, u32 addr, u32 val) +{ + int ret; + struct sss_tool_csr_request_adm_data adm_data = {0}; + + if (!hwdev) + return -EFAULT; + + if (!SSS_SUPPORT_ADM_MSG(hwdev)) + return -EPERM; + + adm_data.dw1.bits.operation_id = SSS_TOOL_ADM_CSR_WRITE; + adm_data.dw1.bits.need_response = SSS_TOOL_ADM_CSR_NO_RESP_DATA; + adm_data.dw1.bits.data_size = SSS_TOOL_ADM_CSR_DATA_SZ_32; + adm_data.dw1.val32 = cpu_to_be32(adm_data.dw1.val32); + adm_data.dw2.bits.csr_addr = addr; + adm_data.dw2.val32 = cpu_to_be32(adm_data.dw2.val32); + adm_data.csr_write_data_h = 0xffffffff; + adm_data.csr_write_data_l = val; + + ret = sss_adm_msg_write_nack(hwdev, dest, (u8 *)(&adm_data), sizeof(adm_data)); + if (ret) { + tool_err("Fail to write 32 bit csr! dest %u addr 0x%x val 0x%x\n", + dest, addr, val); + return ret; + } + + return 0; +} + +static int sss_tool_adm_csr_read(void *hwdev, struct sss_tool_msg *tool_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + int ret = 0; + u32 cnt = 0; + u32 offset = 0; + u32 i; + struct sss_tool_csr_read *rd_msg = (struct sss_tool_csr_read *)buf_in; + u8 node_id = (u8)tool_msg->mpu_cmd.mod; + u32 rd_len = rd_msg->rd_len; + u32 rd_addr = rd_msg->addr; + + if (!buf_in || !buf_out || in_size != sizeof(*rd_msg) || + *out_size != rd_len || rd_len % SSS_TOOL_DW_WIDTH != 0) + return -EINVAL; + + cnt = rd_len / SSS_TOOL_DW_WIDTH; + for (i = 0; i < cnt; i++) { + ret = sss_tool_adm_csr_rd32(hwdev, node_id, rd_addr + offset, + (u32 *)(((u8 *)buf_out) + offset)); + if (ret) { + tool_err("Fail to read csr, err: %d, node_id: %u, csr addr: 0x%08x\n", + ret, node_id, rd_addr + offset); + return ret; + } + offset += SSS_TOOL_DW_WIDTH; + } + *out_size = rd_len; + + return ret; +} + +static int sss_tool_adm_csr_write(void *hwdev, struct sss_tool_msg *tool_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + int ret = 0; + u32 cnt = 0; + u32 offset = 0; + u32 i; + struct sss_tool_csr_write *wr_msg = (struct sss_tool_csr_write *)buf_in; + u8 node_id = (u8)tool_msg->mpu_cmd.mod; + u32 rd_len = wr_msg->rd_len; + u32 rd_addr = wr_msg->addr; + u8 *data = NULL; + + if (!buf_in || in_size != sizeof(*wr_msg) || + wr_msg->rd_len % SSS_TOOL_DW_WIDTH != 0) + return -EINVAL; + + data = kzalloc(rd_len, GFP_KERNEL); + if (!data) + return -EFAULT; + + if (copy_from_user(data, (void *)wr_msg->data, rd_len)) { + tool_err("Fail to copy information from user\n"); + kfree(data); + return -EFAULT; + } + + cnt = rd_len / SSS_TOOL_DW_WIDTH; + for (i = 0; i < cnt; i++) { + ret = sss_tool_adm_csr_wr32(hwdev, node_id, rd_addr + offset, + *((u32 *)(data + offset))); + if (ret) { + tool_err("Fail to write csr, ret: %d, node_id: %u, csr addr: 0x%08x\n", + ret, rd_addr + offset, node_id); + kfree(data); + return ret; + } + offset += SSS_TOOL_DW_WIDTH; + } + + *out_size = 0; + kfree(data); + return ret; +} + +int sss_tool_msg_to_mpu(struct sss_hal_dev *hal_dev, struct sss_tool_msg *tool_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + int ret = 0; + u16 cmd = tool_msg->mpu_cmd.cmd; + enum sss_mod_type mod = (enum sss_mod_type)tool_msg->mpu_cmd.mod; + u32 timeout = sss_tool_get_timeout_val(mod, cmd); + void *hwdev = hal_dev->hwdev; + + if (tool_msg->mpu_cmd.channel == SSS_TOOL_CHANNEL_MBOX || + tool_msg->mpu_cmd.channel == SSS_TOOL_CHANNEL_CLP) { + if (tool_msg->mpu_cmd.channel == SSS_TOOL_CHANNEL_MBOX) { + ret = sss_sync_mbx_send_msg(hwdev, mod, cmd, buf_in, (u16)in_size, + buf_out, (u16 *)out_size, timeout, + SSS_CHANNEL_DEFAULT); + } else { + ret = sss_tool_send_clp_msg(hwdev, mod, cmd, buf_in, (u16)in_size, + buf_out, (u16 *)out_size); + } + + if (ret) { + tool_err("Fail to send msg to mgmt cpu, mod: %d, cmd: %u\n", mod, cmd); + return ret; + } + + } else if (tool_msg->mpu_cmd.channel == SSS_TOOL_CHANNEL_ADM_MSG_BYPASS) { + if (tool_msg->mpu_cmd.cmd == SSS_TOOL_ADM_MSG_WRITE) + return sss_tool_adm_csr_write(hwdev, tool_msg, buf_in, in_size, + buf_out, out_size); + + ret = sss_tool_adm_csr_read(hwdev, tool_msg, buf_in, in_size, buf_out, out_size); + } else if (tool_msg->mpu_cmd.channel == SSS_TOOL_CHANNEL_ADM_MSG_TO_MPU) { + if (SSS_GET_HWIF_PCI_INTF_ID(SSS_TO_HWIF(hwdev)) != SSS_SPU_HOST_ID) + ret = sss_sync_send_adm_msg(hwdev, mod, cmd, buf_in, (u16)in_size, + buf_out, (u16 *)out_size, timeout); + else + ret = sss_sync_mbx_send_msg(hwdev, mod, cmd, buf_in, (u16)in_size, + buf_out, (u16 *)out_size, timeout, + SSS_CHANNEL_DEFAULT); + + if (ret) { + tool_err("Fail to send adm msg to mgmt cpu, mod: %d, cmd: %u\n", + mod, cmd); + return ret; + } + + } else { + tool_err("Invalid channel %d\n", tool_msg->mpu_cmd.channel); + return -EINVAL; + } + + return ret; +} + +int sss_tool_msg_to_npu(struct sss_hal_dev *hal_dev, struct sss_tool_msg *tool_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + int ret = 0; + u8 cmd = tool_msg->npu_cmd.cmd; + enum sss_mod_type mod = (enum sss_mod_type)tool_msg->npu_cmd.mod; + + if (tool_msg->npu_cmd.direct_resp) { + ret = sss_ctrlq_direct_reply(hal_dev->hwdev, mod, cmd, buf_in, + buf_out, 0, SSS_CHANNEL_DEFAULT); + if (ret) + tool_err("Fail to send direct ctrlq, ret: %d\n", ret); + } else { + ret = sss_ctrlq_sync_cmd_detail_reply(hal_dev->hwdev, mod, cmd, buf_in, buf_out, + NULL, 0, SSS_CHANNEL_DEFAULT); + if (ret) + tool_err("Fail to send detail ctrlq, ret: %d\n", ret); + } + + return ret; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_chip.h b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_chip.h new file mode 100644 index 0000000000000000000000000000000000000000..4dbaed192f85d0820451d11f2b0e6f466f43ecab --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_chip.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_TOOL_CHIP_H +#define SSS_TOOL_CHIP_H +#include "sss_hw.h" +#include "sss_tool_comm.h" +#include "sss_tool_hw.h" + +int sss_tool_msg_to_mpu(struct sss_hal_dev *hal_dev, struct sss_tool_msg *tool_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size); +int sss_tool_msg_to_npu(struct sss_hal_dev *hal_dev, struct sss_tool_msg *tool_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_hw.h b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_hw.h new file mode 100644 index 0000000000000000000000000000000000000000..b951026a7c9c5b25e4dcb9bdeb484b967a1c546a --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_hw.h @@ -0,0 +1,212 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_TOOL_HW_H +#define SSS_TOOL_HW_H + +#define SSS_TOOL_CMD_TYPE (0x18) + +#define SSS_TOOL_PF_DEV_MAX 32 +/* Indicates the maximum number of interrupts that can be recorded. + * Subsequent interrupts are not recorded in FFM. + */ +#define SSS_TOOL_FFM_RECORD_MAX 64 + +#define SSS_TOOL_PF_INFO_MAX (16) +#define SSS_TOOL_BUSINFO_LEN (32) + +#define SSS_TOOL_CHIP_FAULT_SIZE (110 * 1024) +#define SSS_TOOL_DRV_BUF_SIZE_MAX 4096 + +/* dbgtool command type */ +/* You can add commands as required. The dbgtool command can be + * used to invoke all interfaces of the kernel-mode x86 driver. + */ +enum sss_tool_dbg_cmd { + SSS_TOOL_DBG_CMD_API_RD = 0, + SSS_TOOL_DBG_CMD_API_WR, + SSS_TOOL_DBG_CMD_FFM_RD, + SSS_TOOL_DBG_CMD_FFM_CLR, + SSS_TOOL_DBG_CMD_PF_DEV_INFO_GET, + SSS_TOOL_DBG_CMD_MSG_2_UP, + SSS_TOOL_DBG_CMD_FREE_MEM, + SSS_TOOL_DBG_CMD_NUM +}; + +enum module_name { + SSS_TOOL_MSG_TO_NPU = 1, + SSS_TOOL_MSG_TO_MPU, + SSS_TOOL_MSG_TO_SM, + SSS_TOOL_MSG_TO_HW_DRIVER, +#define SSS_TOOL_MSG_TO_SRV_DRV_BASE (SSS_TOOL_MSG_TO_HW_DRIVER + 1) + SSS_TOOL_MSG_TO_NIC_DRIVER = SSS_TOOL_MSG_TO_SRV_DRV_BASE, + SSS_TOOL_MSG_TO_OVS_DRIVER, + SSS_TOOL_MSG_TO_ROCE_DRIVER, + SSS_TOOL_MSG_TO_TOE_DRIVER, + SSS_TOOL_MSG_TO_IOE_DRIVER, + SSS_TOOL_MSG_TO_FC_DRIVER, + SSS_TOOL_MSG_TO_VBS_DRIVER, + SSS_TOOL_MSG_TO_IPSEC_DRIVER, + SSS_TOOL_MSG_TO_VIRTIO_DRIVER, + SSS_TOOL_MSG_TO_MIGRATE_DRIVER, + SSS_TOOL_MSG_TO_PPA_DRIVER, + SSS_TOOL_MSG_TO_CUSTOM_DRIVER = SSS_TOOL_MSG_TO_SRV_DRV_BASE + 11, + SSS_TOOL_MSG_TO_DRIVER_MAX = SSS_TOOL_MSG_TO_SRV_DRV_BASE + 15, /* reserved */ +}; + +enum sss_tool_adm_msg_type { + SSS_TOOL_ADM_MSG_READ, + SSS_TOOL_ADM_MSG_WRITE +}; + +enum sss_tool_sm_cmd_type { + SSS_TOOL_SM_CMD_RD16 = 1, + SSS_TOOL_SM_CMD_RD32, + SSS_TOOL_SM_CMD_RD64_PAIR, + SSS_TOOL_SM_CMD_RD64, + SSS_TOOL_SM_CMD_RD32_CLEAR, + SSS_TOOL_SM_CMD_RD64_PAIR_CLEAR, + SSS_TOOL_SM_CMD_RD64_CLEAR +}; + +enum sss_tool_channel_type { + SSS_TOOL_CHANNEL_MBOX = 1, + SSS_TOOL_CHANNEL_ADM_MSG_BYPASS, + SSS_TOOL_CHANNEL_ADM_MSG_TO_MPU, + SSS_TOOL_CHANNEL_CLP, +}; + +struct sss_tool_api_cmd_rd { + u32 pf_id; + u8 dest; + u8 *cmd; + u16 size; + void *ack; + u16 ack_size; +}; + +struct sss_tool_api_cmd_wr { + u32 pf_id; + u8 dest; + u8 *cmd; + u16 size; +}; + +struct sss_tool_pf_dev_info { + u64 bar0_size; + u8 bus; + u8 slot; + u8 func; + u64 phy_addr; +}; + +struct sss_tool_ffm_intr_info { + u8 node_id; + /* error level of the interrupt source */ + u8 err_level; + /* Classification by interrupt source properties */ + u16 err_type; + u32 err_csr_addr; + u32 err_csr_value; +}; + +struct sss_tool_ffm_intr_tm_info { + struct sss_tool_ffm_intr_info intr_info; + u8 times; + u8 sec; + u8 min; + u8 hour; + u8 mday; + u8 mon; + u16 year; +}; + +struct sss_tool_ffm_record_info { + u32 ffm_num; + u32 last_err_csr_addr; + u32 last_err_csr_value; + struct sss_tool_ffm_intr_tm_info ffm[SSS_TOOL_FFM_RECORD_MAX]; +}; + +struct sss_tool_knl_dbg_info { + struct semaphore dbgtool_sem; + struct sss_tool_ffm_record_info *ffm; +}; + +struct sss_tool_msg_to_up { + u8 pf_id; + u8 mod; + u8 cmd; + void *buf_in; + u16 in_size; + void *buf_out; + u16 *out_size; +}; + +struct sss_tool_dbg_param { + union { + struct sss_tool_api_cmd_rd api_rd; + struct sss_tool_api_cmd_wr api_wr; + struct sss_tool_pf_dev_info *dev_info; + struct sss_tool_ffm_record_info *ffm_rd; + struct sss_tool_msg_to_up msg2up; + } param; + char chip_name[16]; +}; + +struct sss_tool_pf { + char name[IFNAMSIZ]; + char bus_info[SSS_TOOL_BUSINFO_LEN]; + u32 pf_type; +}; + +struct sss_tool_card_info { + struct sss_tool_pf pf[SSS_TOOL_PF_INFO_MAX]; + u32 pf_num; +}; + +struct sss_tool_pf_info { + u32 valid; + u32 pf_id; +}; + +struct sss_tool_cmd_chip_fault_stats { + u32 offset; + u8 chip_fault_stats[SSS_TOOL_DRV_BUF_SIZE_MAX]; +}; + +struct sss_tool_npu_msg { + u32 mod : 8; + u32 cmd : 8; + u32 ack_type : 3; + u32 direct_resp : 1; + u32 len : 12; +}; + +struct sss_tool_mpu_msg { + u32 channel : 8; + u32 mod : 8; + u32 cmd : 16; +}; + +struct sss_tool_msg { + char device_name[IFNAMSIZ]; + u32 module; + union { + u32 msg_formate; /* for driver */ + struct sss_tool_npu_msg npu_cmd; + struct sss_tool_mpu_msg mpu_cmd; + }; + u32 timeout; /* for mpu/npu cmd */ + u32 func_id; + u32 buf_in_size; + u32 buf_out_size; + void *in_buf; + void *out_buf; + int bus_num; + u8 port_id; + u8 rsvd1[3]; + u32 rsvd2[4]; +}; + +#endif /* SSS_TOOL_HW_H */ diff --git a/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_main.c b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_main.c new file mode 100644 index 0000000000000000000000000000000000000000..776725e0418af6f907f6417a7aa5296a48a5f023 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_main.c @@ -0,0 +1,736 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [TOOL]" fmt + +#include +#include +#include +#include +#include +#include + +#include "sss_adapter_mgmt.h" +#include "sss_linux_kernel.h" +#include "sss_hw.h" +#include "sss_tool_comm.h" +#include "sss_tool_hw.h" +#include "sss_tool.h" + +#define SSS_TOOL_DEV_PATH "/dev/sssnic_nictool_dev" +#define SSS_TOOL_DEV_CLASS "sssnic_nictool_class" +#define SSS_TOOL_DEV_NAME "sssnic_nictool_dev" + +#define SSS_TOOL_CTRLQ_BUF_SIZE_MAX 2048U +#define SSS_TOOL_MSG_IN_SIZE_MAX (2048 * 1024) +#define SSS_TOOL_MSG_OUT_SIZE_MAX (2048 * 1024) +#define SSS_TOOL_BUF_SIZE_MAX (2048 * 1024) + +typedef int (*sss_tool_deal_handler_fun)(struct sss_hal_dev *hal_dev, struct sss_tool_msg *tool_msg, + void *in_buf, u32 in_len, void *out_buf, u32 *out_len); + +struct sss_tool_deal_handler { + enum module_name msg_name; + sss_tool_deal_handler_fun func; +}; + +static int g_nictool_ref_cnt; + +static dev_t g_dev_id = {0}; + +static struct class *g_nictool_class; +static struct cdev g_nictool_cdev; + +static void *g_card_node_array[SSS_TOOL_CARD_MAX] = {0}; +void *g_card_va[SSS_TOOL_CARD_MAX] = {0}; +u64 g_card_pa[SSS_TOOL_CARD_MAX] = {0}; +int g_card_id; + +static int sss_tool_msg_to_nic(struct sss_hal_dev *hal_dev, struct sss_tool_msg *tool_msg, + void *in_buf, u32 in_len, void *out_buf, u32 *out_len) +{ + int ret = -EINVAL; + void *uld_dev = NULL; + enum sss_service_type service_type; + struct sss_uld_info *uld_info = sss_get_uld_info(); + + service_type = tool_msg->module - SSS_TOOL_MSG_TO_SRV_DRV_BASE; + if (service_type >= SSS_SERVICE_TYPE_MAX) { + tool_err("Invalid input module id: %u\n", tool_msg->module); + return -EINVAL; + } + + uld_dev = sss_get_uld_dev(hal_dev, service_type); + if (!uld_dev) { + if (tool_msg->msg_formate == SSS_TOOL_GET_DRV_VERSION) + return 0; + + tool_err("Fail to get uld device\n"); + return -EINVAL; + } + + if (uld_info[service_type].ioctl) + ret = uld_info[service_type].ioctl(uld_dev, tool_msg->msg_formate, + in_buf, in_len, out_buf, out_len); + sss_uld_dev_put(hal_dev, service_type); + + return ret; +} + +void sss_tool_free_in_buf(void *hwdev, const struct sss_tool_msg *tool_msg, void *in_buf) +{ + if (!in_buf) + return; + + if (tool_msg->module == SSS_TOOL_MSG_TO_NPU) + sss_free_ctrlq_msg_buf(hwdev, in_buf); + else + kfree(in_buf); +} + +void sss_tool_free_out_buf(void *hwdev, struct sss_tool_msg *tool_msg, + void *out_buf) +{ + if (!out_buf) + return; + + if (tool_msg->module == SSS_TOOL_MSG_TO_NPU && + !tool_msg->npu_cmd.direct_resp) + sss_free_ctrlq_msg_buf(hwdev, out_buf); + else + kfree(out_buf); +} + +int sss_tool_alloc_in_buf(void *hwdev, struct sss_tool_msg *tool_msg, + u32 in_len, void **in_buf) +{ + void *msg_buf = NULL; + + if (!in_len) + return 0; + + if (tool_msg->module == SSS_TOOL_MSG_TO_NPU) { + struct sss_ctrl_msg_buf *cmd_buf = NULL; + + if (in_len > SSS_TOOL_CTRLQ_BUF_SIZE_MAX) { + tool_err("Invalid ctrlq in len(%u) more than %u\n", + in_len, SSS_TOOL_CTRLQ_BUF_SIZE_MAX); + return -ENOMEM; + } + + cmd_buf = sss_alloc_ctrlq_msg_buf(hwdev); + if (!cmd_buf) { + tool_err("Fail to alloc ctrlq msg buf\n"); + return -ENOMEM; + } + *in_buf = (void *)cmd_buf; + cmd_buf->size = (u16)in_len; + } else { + if (in_len > SSS_TOOL_MSG_IN_SIZE_MAX) { + tool_err("Invalid in len(%u) more than %u\n", + in_len, SSS_TOOL_MSG_IN_SIZE_MAX); + return -ENOMEM; + } + msg_buf = kzalloc(in_len, GFP_KERNEL); + *in_buf = msg_buf; + } + + if (!(*in_buf)) { + tool_err("Fail to alloc in buf\n"); + return -ENOMEM; + } + + return 0; +} + +int sss_tool_alloc_out_buf(void *hwdev, struct sss_tool_msg *tool_msg, + u32 out_len, void **out_buf) +{ + if (!out_len) { + tool_info("out len is 0, need not alloc buf\n"); + return 0; + } + + if (tool_msg->module == SSS_TOOL_MSG_TO_NPU && + !tool_msg->npu_cmd.direct_resp) { + struct sss_ctrl_msg_buf *msg_buf = NULL; + + if (out_len > SSS_TOOL_CTRLQ_BUF_SIZE_MAX) { + tool_err("Invalid ctrlq out len(%u) more than %u\n", + out_len, SSS_TOOL_CTRLQ_BUF_SIZE_MAX); + return -ENOMEM; + } + + msg_buf = sss_alloc_ctrlq_msg_buf(hwdev); + *out_buf = (void *)msg_buf; + } else { + if (out_len > SSS_TOOL_MSG_OUT_SIZE_MAX) { + tool_err("Invalid out len(%u) more than %u\n", + out_len, SSS_TOOL_MSG_OUT_SIZE_MAX); + return -ENOMEM; + } + *out_buf = kzalloc(out_len, GFP_KERNEL); + } + if (!(*out_buf)) { + tool_err("Fail to alloc out buf\n"); + return -ENOMEM; + } + + return 0; +} + +int sss_tool_copy_to_user(struct sss_tool_msg *tool_msg, + u32 out_len, void *out_buf) +{ + void *out_msg = NULL; + + if (tool_msg->module == SSS_TOOL_MSG_TO_NPU && !tool_msg->npu_cmd.direct_resp) { + out_msg = ((struct sss_ctrl_msg_buf *)out_buf)->buf; + if (copy_to_user(tool_msg->out_buf, out_msg, out_len)) + return -EFAULT; + return 0; + } + + if (copy_to_user(tool_msg->out_buf, out_buf, out_len)) + return -EFAULT; + + return 0; +} + +static int sss_tool_alloc_buf(void *hwdev, struct sss_tool_msg *tool_msg, u32 in_len, + void **in_buf, u32 out_len, void **out_buf) +{ + int ret; + + ret = sss_tool_alloc_in_buf(hwdev, tool_msg, in_len, in_buf); + if (ret) { + tool_err("Fail to alloc tool msg in buf\n"); + return ret; + } + + if (copy_from_user(*in_buf, tool_msg->in_buf, in_len)) { + tool_err("Fail to copy tool_msg to in buf\n"); + sss_tool_free_in_buf(hwdev, tool_msg, *in_buf); + return -EFAULT; + } + + ret = sss_tool_alloc_out_buf(hwdev, tool_msg, out_len, out_buf); + if (ret) { + tool_err("Fail to alloc tool msg out buf\n"); + goto alloc_out_buf_err; + } + + return 0; + +alloc_out_buf_err: + sss_tool_free_in_buf(hwdev, tool_msg, *in_buf); + + return ret; +} + +static void sss_tool_free_buf(void *hwdev, struct sss_tool_msg *tool_msg, + void *in_buf, void *out_buf) +{ + sss_tool_free_out_buf(hwdev, tool_msg, out_buf); + sss_tool_free_in_buf(hwdev, tool_msg, in_buf); +} + +const struct sss_tool_deal_handler g_deal_msg_handle[] = { + {SSS_TOOL_MSG_TO_NPU, sss_tool_msg_to_npu}, + {SSS_TOOL_MSG_TO_MPU, sss_tool_msg_to_mpu}, + {SSS_TOOL_MSG_TO_SM, sss_tool_msg_to_sm}, + {SSS_TOOL_MSG_TO_HW_DRIVER, sss_tool_msg_to_hw}, + {SSS_TOOL_MSG_TO_NIC_DRIVER, sss_tool_msg_to_nic} +}; + +static int sss_tool_deal_cmd(struct sss_hal_dev *hal_dev, struct sss_tool_msg *tool_msg, + void *in_buf, u32 in_len, void *out_buf, u32 *out_len) +{ + int ret = 0; + int index; + int msg_num = ARRAY_LEN(g_deal_msg_handle); + + for (index = 0; index < msg_num; index++) { + if (tool_msg->module != g_deal_msg_handle[index].msg_name) + continue; + + ret = g_deal_msg_handle[index].func(hal_dev, tool_msg, + in_buf, in_len, out_buf, out_len); + break; + } + + if (index == msg_num) + ret = sss_tool_msg_to_nic(hal_dev, tool_msg, + in_buf, in_len, out_buf, out_len); + + return ret; +} + +static struct sss_hal_dev *sss_tool_get_hal_dev_by_msg(struct sss_tool_msg *tool_msg) +{ + struct sss_hal_dev *hal_dev = NULL; + + if (tool_msg->module >= SSS_TOOL_MSG_TO_SRV_DRV_BASE && + tool_msg->module < SSS_TOOL_MSG_TO_DRIVER_MAX && + tool_msg->msg_formate != SSS_TOOL_GET_DRV_VERSION) { + hal_dev = sss_get_lld_dev_by_dev_name(tool_msg->device_name, + tool_msg->module - + SSS_TOOL_MSG_TO_SRV_DRV_BASE); + } else { + hal_dev = sss_get_lld_dev_by_chip_name(tool_msg->device_name); + if (!hal_dev) + hal_dev = sss_get_lld_dev_by_dev_name(tool_msg->device_name, + SSS_SERVICE_TYPE_MAX); + } + + if (tool_msg->module == SSS_TOOL_MSG_TO_NIC_DRIVER && + (tool_msg->msg_formate == SSS_TOOL_GET_XSFP_INFO || + tool_msg->msg_formate == SSS_TOOL_GET_XSFP_PRESENT)) + hal_dev = sss_get_lld_dev_by_chip_and_port(tool_msg->device_name, + tool_msg->port_id); + + return hal_dev; +} + +static int sss_tool_check_msg_valid(struct sss_tool_msg *tool_msg) +{ + if (tool_msg->buf_out_size > SSS_TOOL_BUF_SIZE_MAX || + tool_msg->buf_in_size > SSS_TOOL_BUF_SIZE_MAX) { + tool_err("Invalid in buf len: %u or out buf len: %u\n", + tool_msg->buf_in_size, tool_msg->buf_out_size); + return -EFAULT; + } + + return 0; +} + +static long sss_tool_msg_ioctl(unsigned long arg) +{ + int ret = 0; + u32 in_len = 0; + u32 expect_out_len = 0; + u32 out_len = 0; + void *in_buf = NULL; + void *out_buf = NULL; + struct sss_hal_dev *hal_dev = NULL; + struct sss_tool_msg tool_msg = {0}; + + if (copy_from_user(&tool_msg, (void *)arg, sizeof(tool_msg))) { + tool_err("Fail to copy msg from user space\n"); + return -EFAULT; + } + + if (sss_tool_check_msg_valid(&tool_msg)) { + tool_err("Fail to check msg valid\n"); + return -EFAULT; + } + + tool_msg.device_name[IFNAMSIZ - 1] = '\0'; + expect_out_len = tool_msg.buf_out_size; + in_len = tool_msg.buf_in_size; + + hal_dev = sss_tool_get_hal_dev_by_msg(&tool_msg); + if (!hal_dev) { + if (tool_msg.msg_formate != SSS_TOOL_DEV_NAME_TEST) + tool_err("Fail to find device %s for module %d\n", + tool_msg.device_name, tool_msg.module); + return -ENODEV; + } + + if (tool_msg.msg_formate == SSS_TOOL_DEV_NAME_TEST) + return 0; + + ret = sss_tool_alloc_buf(hal_dev->hwdev, &tool_msg, + in_len, &in_buf, expect_out_len, &out_buf); + if (ret) { + tool_err("Fail to alloc cmd buf\n"); + goto out_free_lock; + } + + out_len = expect_out_len; + + ret = sss_tool_deal_cmd(hal_dev, &tool_msg, in_buf, in_len, out_buf, &out_len); + if (ret) { + tool_err("Fail to execute cmd, module: %u, ret: %d.\n", tool_msg.module, ret); + goto out_free_buf; + } + + if (out_len > expect_out_len) { + ret = -EFAULT; + tool_err("Fail to execute cmd, expected out len from user: %u, out len: %u\n", + expect_out_len, out_len); + goto out_free_buf; + } + + ret = sss_tool_copy_to_user(&tool_msg, out_len, out_buf); + if (ret) + tool_err("Fail to copy return information to user space\n"); + +out_free_buf: + sss_tool_free_buf(hal_dev->hwdev, &tool_msg, in_buf, out_buf); + +out_free_lock: + lld_dev_put(hal_dev); + return (long)ret; +} + +static long sss_tool_knl_ffm_info_rd(struct sss_tool_dbg_param *dbg_param, + struct sss_tool_knl_dbg_info *dbg_info) +{ + if (copy_to_user(dbg_param->param.ffm_rd, dbg_info->ffm, + (unsigned int)sizeof(*dbg_param->param.ffm_rd))) { + tool_err("Fail to copy ffm_info to user space\n"); + return -EFAULT; + } + + return 0; +} + +static struct sss_card_node *sss_tool_find_card_node(char *chip_name) +{ + int i; + struct sss_card_node *card_node = NULL; + + for (i = 0; i < SSS_TOOL_CARD_MAX; i++) { + card_node = (struct sss_card_node *)g_card_node_array[i]; + if (!card_node) + continue; + if (!strncmp(chip_name, card_node->chip_name, IFNAMSIZ)) + break; + } + if (i == SSS_TOOL_CARD_MAX || !card_node) + return NULL; + + g_card_id = i; + + return card_node; +} + +static long sss_tool_dbg_ioctl(unsigned int cmd_type, unsigned long arg) +{ + struct sss_tool_knl_dbg_info *dbg_info = NULL; + struct sss_card_node *card_node = NULL; + struct sss_tool_dbg_param param = {0}; + long ret; + + if (copy_from_user(¶m, (void *)arg, sizeof(param))) { + tool_err("Fail to copy msg param from user\n"); + return -EFAULT; + } + + sss_hold_chip_node(); + + card_node = sss_tool_find_card_node(param.chip_name); + if (!card_node) { + sss_put_chip_node(); + tool_err("Fail to find card node %s\n", param.chip_name); + return -EFAULT; + } + + dbg_info = (struct sss_tool_knl_dbg_info *)card_node->dbgtool_info; + + down(&dbg_info->dbgtool_sem); + + if (cmd_type == SSS_TOOL_DBG_CMD_FFM_RD) { + ret = sss_tool_knl_ffm_info_rd(¶m, dbg_info); + } else if (cmd_type == SSS_TOOL_DBG_CMD_MSG_2_UP) { + tool_info("cmd(0x%x) not suppose.\n", cmd_type); + ret = 0; + } else { + tool_err("Fail to execute cmd(0x%x) ,it is not support\n", cmd_type); + ret = -EFAULT; + } + + up(&dbg_info->dbgtool_sem); + + sss_put_chip_node(); + + return ret; +} + +static int sss_tool_release(struct inode *pnode, struct file *pfile) +{ + return 0; +} + +static int sss_tool_open(struct inode *pnode, struct file *pfile) +{ + return 0; +} + +static ssize_t sss_tool_read(struct file *pfile, char __user *ubuf, + size_t size, loff_t *ppos) +{ + return 0; +} + +static ssize_t sss_tool_write(struct file *pfile, const char __user *ubuf, + size_t size, loff_t *ppos) +{ + return 0; +} + +static long sss_tool_unlocked_ioctl(struct file *pfile, + unsigned int cmd, unsigned long arg) +{ + unsigned int cmd_type = _IOC_NR(cmd); + + if (cmd_type == SSS_TOOL_CMD_TYPE) + return sss_tool_msg_ioctl(arg); + + return sss_tool_dbg_ioctl(cmd_type, arg); +} + +static int sss_tool_mem_mmap(struct file *filp, struct vm_area_struct *mem_area) +{ + unsigned long mem_size = mem_area->vm_end - mem_area->vm_start; + phys_addr_t offset = (phys_addr_t)mem_area->vm_pgoff << PAGE_SHIFT; + phys_addr_t phy_addr; + + if (mem_size > SSS_TOOL_MEM_MAP_SIZE) { + tool_err("Fail to map mem, mem_size :%ld, alloc size: %ld\n", + mem_size, SSS_TOOL_MEM_MAP_SIZE); + return -EAGAIN; + } + + phy_addr = offset ? offset : g_card_pa[g_card_id]; + if (!phy_addr) { + tool_err("Fail to map mem, card_id = %d phy_addr is 0\n", g_card_id); + return -EAGAIN; + } + + mem_area->vm_page_prot = pgprot_noncached(mem_area->vm_page_prot); + if (remap_pfn_range(mem_area, mem_area->vm_start, (phy_addr >> PAGE_SHIFT), + mem_size, mem_area->vm_page_prot)) { + tool_err("Fail to remap pfn range.\n"); + return -EAGAIN; + } + + return 0; +} + +static const struct file_operations sss_tool_file_ops = { + .owner = THIS_MODULE, + .release = sss_tool_release, + .open = sss_tool_open, + .read = sss_tool_read, + .write = sss_tool_write, + .unlocked_ioctl = sss_tool_unlocked_ioctl, + .mmap = sss_tool_mem_mmap, +}; + +static struct sss_tool_knl_dbg_info *sss_tool_alloc_dbg_info(void *hwdev) +{ + struct sss_tool_knl_dbg_info *dbg_info = NULL; + + dbg_info = (struct sss_tool_knl_dbg_info *) + kzalloc(sizeof(struct sss_tool_knl_dbg_info), GFP_KERNEL); + if (!dbg_info) + return NULL; + + dbg_info->ffm = (struct sss_tool_ffm_record_info *) + kzalloc(sizeof(*dbg_info->ffm), GFP_KERNEL); + if (!dbg_info->ffm) { + tool_err("Fail to alloc ffm_record_info\n"); + kfree(dbg_info); + return NULL; + } + + return dbg_info; +} + +static void sss_tool_free_dbg_info(struct sss_tool_knl_dbg_info *dbg_info) +{ + kfree(dbg_info->ffm); + kfree(dbg_info); +} + +static int sss_tool_get_node_id(struct sss_card_node *card_node, int *node_id) +{ + int ret; + + ret = sscanf(card_node->chip_name, SSS_CHIP_NAME "%d", node_id); + if (ret < 0) { + tool_err("Fail to get card id\n"); + return -ENOMEM; + } + + return 0; +} + +static int sss_tool_add_func_to_card_node(void *hwdev, struct sss_card_node *card_node) +{ + int func_id = sss_get_func_id(hwdev); + struct sss_tool_knl_dbg_info *dbg_info = NULL; + int ret; + int node_id; + + if (sss_get_func_type(hwdev) != SSS_FUNC_TYPE_VF) + card_node->func_handle_array[func_id] = hwdev; + + if (card_node->func_num++) + return 0; + + dbg_info = sss_tool_alloc_dbg_info(hwdev); + if (!dbg_info) { + ret = -ENOMEM; + tool_err("Fail to alloc dbg_info\n"); + goto alloc_dbg_info_err; + } + card_node->dbgtool_info = dbg_info; + sema_init(&dbg_info->dbgtool_sem, 1); + + ret = sss_tool_get_node_id(card_node, &node_id); + if (ret) { + tool_err("Fail to add node to global array\n"); + goto get_node_id_err; + } + g_card_node_array[node_id] = card_node; + + return 0; + +get_node_id_err: + sss_tool_free_dbg_info(dbg_info); + card_node->dbgtool_info = NULL; + +alloc_dbg_info_err: + card_node->func_num--; + if (sss_get_func_type(hwdev) != SSS_FUNC_TYPE_VF) + card_node->func_handle_array[func_id] = NULL; + + return ret; +} + +static void sss_tool_del_func_in_card_node(void *hwdev, struct sss_card_node *card_node) +{ + struct sss_tool_knl_dbg_info *dbg_info = card_node->dbgtool_info; + int func_id = sss_get_func_id(hwdev); + int node_id; + + if (sss_get_func_type(hwdev) != SSS_FUNC_TYPE_VF) + card_node->func_handle_array[func_id] = NULL; + + if (--card_node->func_num) + return; + + sss_tool_get_node_id(card_node, &node_id); + if (node_id < SSS_TOOL_CARD_MAX) + g_card_node_array[node_id] = NULL; + + sss_tool_free_dbg_info(dbg_info); + card_node->dbgtool_info = NULL; + + if (node_id < SSS_TOOL_CARD_MAX) + (void)sss_tool_free_card_mem(node_id); +} + +static int sss_tool_create_dev(void) +{ + int ret; + struct device *pdevice = NULL; + + ret = alloc_chrdev_region(&g_dev_id, 0, 1, SSS_TOOL_DEV_NAME); + if (ret) { + tool_err("Fail to alloc sssnic_nictool_dev region(0x%x)\n", ret); + return ret; + } + + g_nictool_class = class_create(THIS_MODULE, SSS_TOOL_DEV_CLASS); + if (IS_ERR(g_nictool_class)) { + tool_err("Fail to create sssnic_nictool_class\n"); + ret = -EFAULT; + goto create_class_err; + } + + cdev_init(&g_nictool_cdev, &sss_tool_file_ops); + + ret = cdev_add(&g_nictool_cdev, g_dev_id, 1); + if (ret < 0) { + tool_err("Fail to add sssnic_nictool_dev to operating system (0x%x)\n", ret); + goto add_cdev_err; + } + + pdevice = device_create(g_nictool_class, NULL, g_dev_id, NULL, SSS_TOOL_DEV_NAME); + if (IS_ERR(pdevice)) { + tool_err("Fail to create sssnic_nictool_dev on operating system\n"); + ret = -EFAULT; + goto create_device_err; + } + + tool_info("Success to register sssnic_nictool_dev to system\n"); + + return 0; + +create_device_err: + cdev_del(&g_nictool_cdev); + +add_cdev_err: + class_destroy(g_nictool_class); + +create_class_err: + g_nictool_class = NULL; + unregister_chrdev_region(g_dev_id, 1); + + return ret; +} + +static void sss_tool_destroy_dev(void) +{ + device_destroy(g_nictool_class, g_dev_id); + cdev_del(&g_nictool_cdev); + class_destroy(g_nictool_class); + g_nictool_class = NULL; + unregister_chrdev_region(g_dev_id, 1); + tool_info("Success to unregister sssnic_nictool_dev to system\n"); +} + +int sss_tool_init(void *hwdev, void *chip_node) +{ + struct sss_card_node *card_node = (struct sss_card_node *)chip_node; + int ret; + + ret = sss_tool_add_func_to_card_node(hwdev, card_node); + if (ret) { + tool_err("Fail to add func to card node\n"); + return ret; + } + + if (g_nictool_ref_cnt++) { + tool_info("sssnic_nictool_dev has already create\n"); + return 0; + } + + ret = sss_tool_create_dev(); + if (ret) { + tool_err("Fail to create sssnic_nictool_dev\n"); + goto out; + } + + return 0; + +out: + g_nictool_ref_cnt--; + sss_tool_del_func_in_card_node(hwdev, card_node); + + return ret; +} + +void sss_tool_uninit(void *hwdev, void *chip_node) +{ + struct sss_card_node *chip_info = (struct sss_card_node *)chip_node; + + sss_tool_del_func_in_card_node(hwdev, chip_info); + + if (g_nictool_ref_cnt == 0) + return; + + if (--g_nictool_ref_cnt) + return; + + if (!g_nictool_class || IS_ERR(g_nictool_class)) { + tool_err("Fail to uninit sssnictool, tool class is NULL.\n"); + return; + } + + sss_tool_destroy_dev(); +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sdk.c b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sdk.c new file mode 100644 index 0000000000000000000000000000000000000000..8f8fb6d364d4927ec740c95ecfca802e036fd8d9 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sdk.c @@ -0,0 +1,527 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [TOOL]" fmt + +#include +#include +#include +#include +#include +#include + +#include "sss_linux_kernel.h" +#include "sss_hw.h" +#include "sss_hwdev.h" +#include "sss_tool.h" +#include "sss_csr.h" +#include "sss_adapter_mgmt.h" +#include "sss_mgmt_info.h" +#include "sss_pci_global.h" +#include "sss_hwif_api.h" + +typedef int (*sss_tool_hw_cmd_func)(struct sss_hal_dev *hal_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size); +struct sss_tool_hw_cmd_handle { + enum sss_tool_driver_cmd_type cmd_type; + sss_tool_hw_cmd_func func; +}; + +int sss_tool_get_func_type(struct sss_hal_dev *hal_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + if (*out_size != sizeof(u16) || !buf_out) { + tool_err("Invalid out_size from user :%u, expect: %lu\n", *out_size, sizeof(u16)); + return -EFAULT; + } + + *(u16 *)buf_out = (u16)sss_get_func_type(SSS_TO_HWDEV(hal_dev)); + + return 0; +} + +int sss_tool_get_func_id(struct sss_hal_dev *hal_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + if (*out_size != sizeof(u16) || !buf_out) { + tool_err("Invalid out_size from user :%u, expect: %lu\n", *out_size, sizeof(u16)); + return -EFAULT; + } + + *(u16 *)buf_out = (u16)sss_get_func_id(SSS_TO_HWDEV(hal_dev)); + + return 0; +} + +int sss_tool_get_hw_driver_stats(struct sss_hal_dev *hal_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + struct sss_hwdev *hwdev = hal_dev->hwdev; + struct sss_card_node *node = hwdev->chip_node; + struct sss_hw_stats *stats = buf_out; + struct sss_hw_stats *tmp = stats; + + if (!hwdev) + return -EINVAL; + + if (*out_size != sizeof(struct sss_hw_stats) || !stats) { + tool_err("Invalid out_size from user :%u, expect: %lu\n", + *out_size, sizeof(struct sss_hw_stats)); + return -EFAULT; + } + + memcpy(stats, &hwdev->hw_stats, sizeof(struct sss_hw_stats)); + + atomic_set(&tmp->nic_ucode_event_stats[SSS_CHN_BUSY], + atomic_read(&node->channel_timeout_cnt)); + + return 0; +} + +static int sss_tool_clear_hw_driver_stats(struct sss_hal_dev *hal_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct sss_hwdev *hwdev = hal_dev->hwdev; + struct sss_card_node *node = hwdev->chip_node; + + memset((void *)&hwdev->hw_stats, 0, sizeof(struct sss_hw_stats)); + memset((void *)hwdev->chip_fault_stats, 0, SSS_TOOL_CHIP_FAULT_SIZE); + + if (SSS_SUPPORT_CHANNEL_DETECT(hwdev) && atomic_read(&node->channel_timeout_cnt)) { + atomic_set(&node->channel_timeout_cnt, 0); + hwdev->aeq_busy_cnt = 0; +#if !defined(__UEFI__) && !defined(VMWARE) + queue_delayed_work(hwdev->workq, &hwdev->channel_detect_task, + msecs_to_jiffies(SSSNIC_CHANNEL_DETECT_PERIOD)); +#endif + } + + if (*out_size != sizeof(struct sss_hw_stats)) { + tool_err("Invalid out_size from user :%u, expect: %lu\n", + *out_size, sizeof(struct sss_hw_stats)); + return -EFAULT; + } + + return 0; +} + +static int sss_tool_get_self_test_result(struct sss_hal_dev *hal_dev, + const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + u32 val; + + if (*out_size != sizeof(u32) || !buf_out) { + tool_err("Invalid out_size from user :%u, expect: %lu\n", + *out_size, sizeof(u32)); + return -EFAULT; + } + + val = sss_chip_read_reg(SSS_TO_HWIF(hal_dev->hwdev), SSS_MGMT_HEALTH_STATUS_ADDR); + *(u32 *)buf_out = val; + + return 0; +} + +static void sss_tool_get_chip_fault_stats(const void *hwdev, u8 *chip_fault_stats, u32 offset) +{ + u32 size; + + if (offset >= SSS_TOOL_CHIP_FAULT_SIZE) { + tool_err("Invalid chip offset value: %d\n", offset); + return; + } + + size = min(SSS_TOOL_DRV_BUF_SIZE_MAX, SSS_TOOL_CHIP_FAULT_SIZE - (int)offset); + memcpy(chip_fault_stats, ((struct sss_hwdev *)hwdev)->chip_fault_stats + + offset, size); +} + +static int sss_tool_get_chip_faults_stats(struct sss_hal_dev *hal_dev, + const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + u32 offset = 0; + struct sss_tool_cmd_chip_fault_stats *info = NULL; + + if (!buf_in || !buf_out || *out_size != sizeof(*info) || + in_size != sizeof(*info)) { + tool_err("Invalid out_size from user: %d, expect: %lu\n", *out_size, sizeof(*info)); + return -EFAULT; + } + info = (struct sss_tool_cmd_chip_fault_stats *)buf_in; + offset = info->offset; + + info = (struct sss_tool_cmd_chip_fault_stats *)buf_out; + sss_tool_get_chip_fault_stats(hal_dev->hwdev, + info->chip_fault_stats, offset); + + return 0; +} + +static int sss_tool_get_single_card_info(struct sss_hal_dev *hal_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + if (!buf_out || *out_size != sizeof(struct sss_tool_card_info)) { + tool_err("Invalid buf out is NULL, or out_size != %lu\n", + sizeof(struct sss_tool_card_info)); + return -EINVAL; + } + + sss_get_card_info(hal_dev->hwdev, buf_out); + + return 0; +} + +static int sss_tool_is_driver_in_vm(struct sss_hal_dev *hal_dev, + const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + if (!buf_out || (*out_size != sizeof(u8))) { + tool_err("Invalid parameter, buf_out is NULL or out_size != %lu\n", sizeof(u8)); + return -EINVAL; + } + + *((u8 *)buf_out) = sss_is_in_host() ? 0 : 1; + + return 0; +} + +static int sss_tool_get_all_chip_id_cmd(struct sss_hal_dev *hal_dev, + const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + if (*out_size != sizeof(struct sss_card_id) || !buf_out) { + tool_err("Invalid parameter: out_size %u, expect %lu\n", + *out_size, sizeof(struct sss_card_id)); + return -EFAULT; + } + + sss_get_all_chip_id(buf_out); + + return 0; +} + +static int sss_tool_get_card_id(char *dev_name, int *id) +{ + int ret; + + ret = sscanf(dev_name, SSS_CHIP_NAME "%d", id); + if (ret < 0) { + tool_err("Fail to get card id\n"); + return ret; + } + + if (*id >= SSS_TOOL_CARD_MAX || *id < 0) { + tool_err("Invalid chip id %d, out of range: [0-%d]\n", *id, SSS_TOOL_CARD_MAX - 1); + return -EINVAL; + } + + return 0; +} + +static void sss_tool_get_pf_dev_info_param(struct sss_tool_pf_dev_info *dev_info, int card_id, + void **func_array) +{ + u32 func_id; + void *hwdev = NULL; + struct pci_dev *pdev = NULL; + + for (func_id = 0; func_id < SSS_TOOL_PF_DEV_MAX; func_id++) { + hwdev = (void *)func_array[func_id]; + + dev_info[func_id].phy_addr = g_card_pa[card_id]; + + if (!hwdev) { + dev_info[func_id].bar0_size = 0; + dev_info[func_id].bus = 0; + dev_info[func_id].slot = 0; + dev_info[func_id].func = 0; + } else { + pdev = (struct pci_dev *)sss_get_pcidev_hdl(hwdev); + dev_info[func_id].bar0_size = pci_resource_len(pdev, 0); + dev_info[func_id].bus = pdev->bus->number; + dev_info[func_id].slot = PCI_SLOT(pdev->devfn); + dev_info[func_id].func = PCI_FUNC(pdev->devfn); + } + } +} + +static int sss_tool_get_card_adm_mem(int card_id) +{ + int i; + unsigned char *card_va = NULL; + + g_card_id = card_id; + if (!g_card_va[card_id]) { + g_card_va[card_id] = + (void *)__get_free_pages(GFP_KERNEL, SSS_TOOL_PAGE_ORDER); + if (!g_card_va[card_id]) { + tool_err("Fail to alloc adm memory for card %d!\n", card_id); + return -EFAULT; + } + + memset(g_card_va[card_id], 0, PAGE_SIZE * (1 << SSS_TOOL_PAGE_ORDER)); + + g_card_pa[card_id] = virt_to_phys(g_card_va[card_id]); + if (!g_card_pa[card_id]) { + tool_err("Invalid phy addr for card %d is 0\n", card_id); + free_pages((unsigned long)g_card_va[card_id], SSS_TOOL_PAGE_ORDER); + g_card_va[card_id] = NULL; + return -EFAULT; + } + + card_va = g_card_va[card_id]; + for (i = 0; i < (1 << SSS_TOOL_PAGE_ORDER); i++) { + SetPageReserved(virt_to_page(card_va)); + card_va += PAGE_SIZE; + } + } + + return 0; +} + +static int sss_tool_get_pf_dev_info(struct sss_hal_dev *hal_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + int id; + int ret; + struct sss_tool_pf_dev_info *info = buf_out; + struct sss_card_node *node = sss_get_card_node(hal_dev); + + if (!buf_out || *out_size != sizeof(struct sss_tool_pf_dev_info) * SSS_TOOL_PF_DEV_MAX) { + tool_err("Invalid param: out_size %u, expect %lu\n", + *out_size, sizeof(info) * SSS_TOOL_PF_DEV_MAX); + return -EFAULT; + } + + ret = sss_tool_get_card_id(node->chip_name, &id); + if (ret) + return ret; + + sss_tool_get_pf_dev_info_param(info, id, node->func_handle_array); + + ret = sss_tool_get_card_adm_mem(id); + if (ret) { + tool_err("Fail to get adm memory for userspace %s\n", node->chip_name); + return -EFAULT; + } + + return 0; +} + +long sss_tool_free_card_mem(int id) +{ + unsigned char *va = NULL; + int i; + + if (!g_card_va[id]) + return 0; + + va = g_card_va[id]; + for (i = 0; i < (1 << SSS_TOOL_PAGE_ORDER); i++) { + ClearPageReserved(virt_to_page(va)); + va += PAGE_SIZE; + } + + free_pages((unsigned long)g_card_va[id], SSS_TOOL_PAGE_ORDER); + g_card_va[id] = NULL; + g_card_pa[id] = 0; + + return 0; +} + +static int sss_tool_free_all_card_mem(struct sss_hal_dev *hal_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + int id; + int ret; + struct sss_card_node *node = sss_get_card_node(hal_dev); + + ret = sss_tool_get_card_id(node->chip_name, &id); + if (ret) + return ret; + + sss_tool_free_card_mem(id); + + return 0; +} + +static int sss_tool_check_card_info_param(char *dev_name, const void *buf_out, u32 out_size) +{ + int ret; + + if (!buf_out || out_size != sizeof(struct sss_card_func_info)) { + tool_err("Invalid out_size %u, expect %lu\n", + out_size, sizeof(struct sss_card_func_info)); + return -EINVAL; + } + + ret = memcmp(dev_name, SSS_CHIP_NAME, strlen(SSS_CHIP_NAME)); + if (ret) { + tool_err("Invalid chip name %s\n", dev_name); + return ret; + } + + return 0; +} + +static int sss_tool_get_card_func_info(struct sss_hal_dev *hal_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + int ret; + int id = 0; + struct sss_card_func_info *info = buf_out; + struct sss_card_node *node = sss_get_card_node(hal_dev); + + ret = sss_tool_check_card_info_param(node->chip_name, buf_out, *out_size); + if (ret) + return ret; + + ret = sss_tool_get_card_id(node->chip_name, &id); + if (ret) + return ret; + + sss_get_card_func_info(node->chip_name, info); + + if (!info->pf_num) { + tool_err("Fail to get card func info, chip name %s\n", node->chip_name); + return -EFAULT; + } + + ret = sss_tool_get_card_adm_mem(id); + if (ret) { + tool_err("Fail to get adm memory for userspace %s\n", node->chip_name); + return -EFAULT; + } + + info->usr_adm_pa = g_card_pa[id]; + + return 0; +} + +static int sss_tool_get_pf_cap_info(struct sss_hal_dev *hal_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + struct sss_hwdev *hwdev = NULL; + struct sss_card_node *node = sss_get_card_node(hal_dev); + struct sss_svc_cap_info *in_info = (struct sss_svc_cap_info *)buf_in; + struct sss_svc_cap_info *out_info = (struct sss_svc_cap_info *)buf_out; + + if (*out_size != sizeof(struct sss_svc_cap_info) || + in_size != sizeof(struct sss_svc_cap_info) || + !buf_in || !buf_out) { + tool_err("Invalid out_size %u, in_size: %u, expect %lu\n", + *out_size, in_size, sizeof(struct sss_svc_cap_info)); + return -EINVAL; + } + + if (in_info->func_id >= SSS_MAX_FUNC) { + tool_err("Invalid func id: %u, max_num: %u\n", + in_info->func_id, SSS_MAX_FUNC); + return -EINVAL; + } + + sss_hold_chip_node(); + hwdev = (struct sss_hwdev *)(node->func_handle_array)[in_info->func_id]; + if (!hwdev) { + sss_put_chip_node(); + return -EINVAL; + } + + memcpy(&out_info->cap, SSS_TO_SVC_CAP(hwdev), sizeof(struct sss_service_cap)); + sss_put_chip_node(); + + return 0; +} + +static int sss_tool_get_hw_drv_version(struct sss_hal_dev *hal_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + int ret; + struct sss_tool_drv_version_info *info = buf_out; + + if (!buf_out || *out_size != sizeof(*info)) { + tool_err("Invalid param, buf_out is NULL or out_size:%u, expect: %lu\n", + *out_size, sizeof(*info)); + return -EINVAL; + } + + ret = snprintf(info->ver, sizeof(info->ver), "%s %s", SSS_DRV_VERSION, + __TIME_STR__); + if (ret < 0) + return -EINVAL; + + return 0; +} + +static int sss_tool_get_pf_id(struct sss_hal_dev *hal_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + struct sss_tool_pf_info *info = NULL; + struct sss_card_node *node = sss_get_card_node(hal_dev); + u32 port_id; + int ret; + + if (!node) + return -ENODEV; + + if (!buf_out || (*out_size != sizeof(*info)) || !buf_in || in_size != sizeof(port_id)) { + tool_err("Invalid out_size from user: %u, expect: %lu, in_size:%u\n", + *out_size, sizeof(*info), in_size); + return -EINVAL; + } + + port_id = *((u32 *)buf_in); + info = (struct sss_tool_pf_info *)buf_out; + + ret = sss_get_pf_id(node, port_id, &info->pf_id, &info->valid); + if (ret != 0) + return ret; + + *out_size = sizeof(*info); + + return 0; +} + +struct sss_tool_hw_cmd_handle g_hw_cmd_handle[] = { + {SSS_TOOL_FUNC_TYPE, sss_tool_get_func_type}, + {SSS_TOOL_GET_FUNC_IDX, sss_tool_get_func_id}, + {SSS_TOOL_GET_CHIP_INFO, sss_tool_get_card_func_info}, + {SSS_TOOL_GET_DRV_VERSION, sss_tool_get_hw_drv_version}, + {SSS_TOOL_GET_PF_ID, sss_tool_get_pf_id}, + {SSS_TOOL_GET_FUNC_CAP, sss_tool_get_pf_cap_info}, + {SSS_TOOL_GET_SELF_TEST_RES, sss_tool_get_self_test_result}, + {SSS_TOOL_GET_CHIP_ID, sss_tool_get_all_chip_id_cmd}, + {SSS_TOOL_GET_PF_DEV_INFO, sss_tool_get_pf_dev_info}, + {SSS_TOOL_IS_DRV_IN_VM, sss_tool_is_driver_in_vm}, + {SSS_TOOL_CMD_FREE_MEM, sss_tool_free_all_card_mem}, + {SSS_TOOL_GET_CHIP_FAULT_STATS, (sss_tool_hw_cmd_func)sss_tool_get_chip_faults_stats}, + {SSS_TOOL_GET_SINGLE_CARD_INFO, (sss_tool_hw_cmd_func)sss_tool_get_single_card_info}, + {SSS_TOOL_GET_HW_STATS, (sss_tool_hw_cmd_func)sss_tool_get_hw_driver_stats}, + {SSS_TOOL_CLEAR_HW_STATS, sss_tool_clear_hw_driver_stats}, +}; + +int sss_tool_msg_to_hw(struct sss_hal_dev *hal_dev, struct sss_tool_msg *tool_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + int id; + int ret = 0; + int cmd_num = ARRAY_LEN(g_hw_cmd_handle); + enum sss_tool_driver_cmd_type cmd = + (enum sss_tool_driver_cmd_type)(tool_msg->msg_formate); + + for (id = 0; id < cmd_num; id++) { + if (cmd == g_hw_cmd_handle[id].cmd_type) { + ret = g_hw_cmd_handle[id].func + (hal_dev, buf_in, in_size, buf_out, out_size); + break; + } + } + + if (id == cmd_num) { + tool_err("Fail to send msg to hw, cmd: %d out of range\n", cmd); + return -EINVAL; + } + + return ret; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sdk.h b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sdk.h new file mode 100644 index 0000000000000000000000000000000000000000..d02af2fe52c1c4bfdfbc3bb253018082e9a9674d --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sdk.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_TOOL_SDK_H +#define SSS_TOOL_SDK_H + +#include "sss_tool_comm.h" +#include "sss_tool_hw.h" +#include "sss_hw.h" + +long sss_tool_free_card_mem(int id); + +int sss_tool_msg_to_hw(struct sss_hal_dev *hal_dev, struct sss_tool_msg *tool_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sm.c b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sm.c new file mode 100644 index 0000000000000000000000000000000000000000..549eb928f5c40d9fb418921b72f16ea6fc0cf794 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sm.c @@ -0,0 +1,383 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [TOOL]" fmt + +#include "sss_hwdev.h" +#include "sss_hwif_adm.h" +#include "sss_tool_comm.h" +#include "sss_tool_sm.h" + +#define SSS_TOOL_CHIP_ACK 1 +#define SSS_TOOL_CHIP_NOACK 0 + +#define SSS_TOOL_SM_CHIP_OP_READ 0x2 +#define SSS_TOOL_SM_CHIP_OP_READ_CLEAR 0x6 + +#define SSS_TOOL_BIT_32 32 + +struct sss_tool_sm_in { + int node; + int id; + int instance; +}; + +struct sss_tool_sm_out { + u64 val1; + u64 val2; +}; + +union sss_tool_sm_chip_request_head { + struct { + u32 pad:15; + u32 ack:1; + u32 op_id:5; + u32 instance:6; + u32 src:5; + } bs; + + u32 value; +}; + +/* counter read request struct */ +struct sss_tool_sm_chip_request { + u32 extra; + union sss_tool_sm_chip_request_head head; + u32 ctr_id; + u32 initial; + u32 pad; +}; + +/* counter read response union */ +union sss_tool_chip_rd_response { + struct { + u32 value1:16; + u32 pad0:16; + u32 pad1[3]; + } bs_ss16_rsp; + + struct { + u32 value1; + u32 pad[3]; + } bs_ss32_rsp; + + struct { + u32 value1:20; + u32 pad0:12; + u32 value2:12; + u32 pad1:20; + u32 pad2[2]; + } bs_sp_rsp; + + struct { + u32 value1; + u32 value2; + u32 pad[2]; + } bs_bs64_rsp; + + struct { + u32 val1_h; + u32 val1_l; + u32 val2_h; + u32 val2_l; + } bs_bp64_rsp; +}; + +typedef int (*sss_tool_sm_handler_func)(void *hwdev, u32 id, u8 instance, + u8 node, struct sss_tool_sm_out *out_buf); + +struct sss_tool_sm_handler { + enum sss_tool_sm_cmd_type msg_name; + sss_tool_sm_handler_func sm_func; +}; + +static void sss_tool_sm_read_msg_create(struct sss_tool_sm_chip_request *request, + u8 instance_id, u8 op_id, + u8 ack, u32 ctr_id, u32 init_val) +{ + request->head.value = 0; + request->head.bs.op_id = op_id; + request->head.bs.ack = ack; + request->head.bs.instance = instance_id; + request->head.value = HTONL(request->head.value); + + request->initial = init_val; + request->ctr_id = ctr_id; + request->ctr_id = HTONL(request->ctr_id); +} + +static void sss_tool_sm_node_htonl(u32 *node, u32 len) +{ + u32 *new_node = node; + u32 i; + + for (i = 0; i < len; i++) { + *new_node = HTONL(*new_node); + new_node++; + } +} + +static int sss_tool_sm_adm_msg_rd(void *hwdev, u32 id, u8 instance, + u8 node, union sss_tool_chip_rd_response *rsp, u8 opcode) +{ + struct sss_tool_sm_chip_request req = {0}; + int ret; + + if (!hwdev) + return -EFAULT; + + if (!SSS_SUPPORT_ADM_MSG((struct sss_hwdev *)hwdev)) { + tool_err("Fail to read sm data, device not support adm msg\n"); + return -EPERM; + } + + sss_tool_sm_read_msg_create(&req, instance, opcode, + SSS_TOOL_CHIP_ACK, id, 0); + + ret = sss_adm_msg_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), + (void *)rsp, + (unsigned short)sizeof(*rsp)); + if (ret) { + tool_err("Fail to read sm data from adm msg, err(%d)\n", ret); + return ret; + } + + sss_tool_sm_node_htonl((u32 *)rsp, sizeof(*rsp) / sizeof(u32)); + + return 0; +} + +static int sss_tool_sm_msg_rd16(void *hwdev, u32 id, u8 instance, + u8 node, struct sss_tool_sm_out *out_buf) +{ + u16 val1; + union sss_tool_chip_rd_response rsp; + int ret = 0; + + ret = sss_tool_sm_adm_msg_rd(hwdev, id, instance, node, &rsp, SSS_TOOL_SM_CHIP_OP_READ); + if (ret) { + tool_err("Fail to read sm 32 bits\n"); + val1 = ~0; + goto out; + } + + val1 = rsp.bs_ss16_rsp.value1; +out: + out_buf->val1 = val1; + + return ret; +} + +static int sss_tool_sm_msg_rd32(void *hwdev, u32 id, u8 instance, + u8 node, struct sss_tool_sm_out *out_buf) +{ + u32 val1; + union sss_tool_chip_rd_response rsp; + int ret = 0; + + ret = sss_tool_sm_adm_msg_rd(hwdev, id, instance, node, &rsp, SSS_TOOL_SM_CHIP_OP_READ); + if (ret) { + tool_err("Fail to read sm 32 bits\n"); + val1 = ~0; + goto out; + } + + val1 = rsp.bs_ss32_rsp.value1; +out: + out_buf->val1 = val1; + + return ret; +} + +static int sss_tool_sm_msg_rd32_clear(void *hwdev, u32 id, u8 instance, + u8 node, struct sss_tool_sm_out *out_buf) +{ + u32 val1; + union sss_tool_chip_rd_response rsp; + int ret = 0; + + ret = sss_tool_sm_adm_msg_rd(hwdev, id, instance, node, + &rsp, SSS_TOOL_SM_CHIP_OP_READ_CLEAR); + if (ret) { + tool_err("Fail to read sm 32 bits\n"); + val1 = ~0; + goto out; + } + + val1 = rsp.bs_ss32_rsp.value1; + +out: + out_buf->val1 = val1; + return ret; +} + +static int sss_tool_sm_msg_rd128(void *hwdev, u32 id, u8 instance, + u8 node, struct sss_tool_sm_out *out_buf) +{ + u64 val1 = 0; + u64 val2 = 0; + int ret = 0; + union sss_tool_chip_rd_response rsp; + + if ((id & 0x1) != 0) { + tool_err("Invalid id(%u), It is odd number\n", id); + val1 = ~0; + val2 = ~0; + ret = -EINVAL; + goto out; + } + + ret = sss_tool_sm_adm_msg_rd(hwdev, id, instance, node, + &rsp, SSS_TOOL_SM_CHIP_OP_READ); + if (ret) { + tool_err("Fail to read sm 128 bits\n"); + val1 = ~0; + val2 = ~0; + goto out; + } + + sss_tool_sm_node_htonl((u32 *)&rsp, sizeof(rsp) / sizeof(u32)); + val1 = ((u64)rsp.bs_bp64_rsp.val1_h << SSS_TOOL_BIT_32) | rsp.bs_bp64_rsp.val1_l; + val2 = ((u64)rsp.bs_bp64_rsp.val2_h << SSS_TOOL_BIT_32) | rsp.bs_bp64_rsp.val2_l; + +out: + out_buf->val1 = val1; + out_buf->val2 = val2; + + return ret; +} + +static int sss_tool_sm_msg_rd128_clear(void *hwdev, u32 id, u8 instance, + u8 node, struct sss_tool_sm_out *out_buf) +{ + u64 val1 = 0; + u64 val2 = 0; + int ret = 0; + union sss_tool_chip_rd_response rsp; + + if ((id & 0x1) != 0) { + tool_err("Invalid id(%u), It is odd number\n", id); + val1 = ~0; + val2 = ~0; + ret = -EINVAL; + goto out; + } + + ret = sss_tool_sm_adm_msg_rd(hwdev, id, instance, node, + &rsp, SSS_TOOL_SM_CHIP_OP_READ_CLEAR); + if (ret) { + tool_err("Fail to read sm 128 bits\n"); + val1 = ~0; + val2 = ~0; + goto out; + } + + val1 = ((u64)rsp.bs_bp64_rsp.val1_h << SSS_TOOL_BIT_32) | rsp.bs_bp64_rsp.val1_l; + val2 = ((u64)rsp.bs_bp64_rsp.val2_h << SSS_TOOL_BIT_32) | rsp.bs_bp64_rsp.val2_l; + +out: + out_buf->val1 = val1; + out_buf->val2 = val2; + + return ret; +} + +static int sss_tool_sm_msg_rd64(void *hwdev, u32 id, u8 instance, + u8 node, struct sss_tool_sm_out *out_buf) +{ + u64 val1 = 0; + int ret = 0; + union sss_tool_chip_rd_response rsp; + + ret = sss_tool_sm_adm_msg_rd(hwdev, id, instance, node, + &rsp, SSS_TOOL_SM_CHIP_OP_READ); + if (ret) { + tool_err("Fail to read sm 64 bits\n"); + val1 = ~0; + goto out; + } + + val1 = ((u64)rsp.bs_bs64_rsp.value1 << SSS_TOOL_BIT_32) | rsp.bs_bs64_rsp.value2; + +out: + out_buf->val1 = val1; + + return ret; +} + +static int sss_tool_sm_msg_rd64_clear(void *hwdev, u32 id, u8 instance, + u8 node, struct sss_tool_sm_out *out_buf) +{ + u64 val1 = 0; + int ret = 0; + union sss_tool_chip_rd_response rsp; + + ret = sss_tool_sm_adm_msg_rd(hwdev, id, instance, node, + &rsp, SSS_TOOL_SM_CHIP_OP_READ_CLEAR); + if (ret) { + tool_err("Fail to read sm 64 bits\n"); + val1 = ~0; + goto out; + } + + val1 = ((u64)rsp.bs_bs64_rsp.value1 << SSS_TOOL_BIT_32) | rsp.bs_bs64_rsp.value2; + +out: + out_buf->val1 = val1; + + return ret; +} + +const struct sss_tool_sm_handler g_sm_cmd_handle[] = { + {SSS_TOOL_SM_CMD_RD16, sss_tool_sm_msg_rd16}, + {SSS_TOOL_SM_CMD_RD32, sss_tool_sm_msg_rd32}, + {SSS_TOOL_SM_CMD_RD32_CLEAR, sss_tool_sm_msg_rd32_clear}, + {SSS_TOOL_SM_CMD_RD64, sss_tool_sm_msg_rd64}, + {SSS_TOOL_SM_CMD_RD64_CLEAR, sss_tool_sm_msg_rd64_clear}, + {SSS_TOOL_SM_CMD_RD64_PAIR, sss_tool_sm_msg_rd128}, + {SSS_TOOL_SM_CMD_RD64_PAIR_CLEAR, sss_tool_sm_msg_rd128_clear} +}; + +int sss_tool_msg_to_sm(struct sss_hal_dev *hal_dev, struct sss_tool_msg *msg, + void *in_buf, u32 in_len, void *out_buf, u32 *out_len) +{ + int index; + int ret = 0; + int cmd_num = ARRAY_LEN(g_sm_cmd_handle); + u32 msg_formate = msg->msg_formate; + struct sss_tool_sm_in *sm_in = in_buf; + struct sss_tool_sm_out *sm_out = out_buf; + + if (!in_buf || !out_buf || !out_len) { + tool_err("Invalid in_buf or out buf param\n"); + return -EINVAL; + } + + if (in_len != sizeof(*sm_in) || *out_len != sizeof(*sm_out)) { + tool_err("Invalid out buf size :%u, in buf size: %u\n", + *out_len, in_len); + return -EINVAL; + } + + for (index = 0; index < cmd_num; index++) { + if (msg_formate != g_sm_cmd_handle[index].msg_name) + continue; + + ret = g_sm_cmd_handle[index].sm_func(hal_dev->hwdev, (u32)sm_in->id, + (u8)sm_in->instance, (u8)sm_in->node, sm_out); + break; + } + + if (index == cmd_num) { + tool_err("Fail to execute msg %d,could not find callback\n", msg_formate); + return -EINVAL; + } + + if (ret != 0) + tool_err("Fail to get sm information, id:%u, instance:%u, node:%u, msg:%d\n", + sm_in->id, sm_in->instance, sm_in->node, msg_formate); + + *out_len = sizeof(*sm_out); + + return ret; +} diff --git a/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sm.h b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sm.h new file mode 100644 index 0000000000000000000000000000000000000000..7c32ebdf2f4d3164f656721329f1e918d10bacd9 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/hw/tool/sss_tool_sm.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_TOOL_SM_H +#define SSS_TOOL_SM_H +#include "sss_pci_global.h" +#include "sss_tool_comm.h" +#include "sss_tool_hw.h" + +#ifndef HTONL +#define HTONL(x) \ + ((((x) & 0x000000ff) << 24) | \ + (((x) & 0x0000ff00) << 8) | \ + (((x) & 0x00ff0000) >> 8) | \ + (((x) & 0xff000000) >> 24)) +#endif + +int sss_tool_msg_to_sm(struct sss_hal_dev *hal_dev, struct sss_tool_msg *msg, + void *in_buf, u32 in_len, void *out_buf, u32 *out_len); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_aeq.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_aeq.h new file mode 100644 index 0000000000000000000000000000000000000000..4a9dd7eee1ad6bf7650801ff2627c219a246a68c --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_aeq.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_AEQ_H +#define SSS_HW_AEQ_H + +enum sss_aeq_hw_event { + SSS_HW_FROM_INT = 0, + SSS_MBX_FROM_FUNC = 1, + SSS_MSG_FROM_MGMT = 2, + SSS_ADM_RSP = 3, + SSS_ADM_MSG_STS = 4, + SSS_MBX_SEND_RSLT = 5, + SSS_AEQ_EVENT_MAX +}; + +enum sss_aeq_sw_event { + SSS_STL_EVENT = 0, + SSS_STF_EVENT = 1, + SSS_AEQ_SW_EVENT_MAX +}; + +enum sss_ucode_event_type { + SSS_INTERN_ERR = 0x0, + SSS_CHN_BUSY = 0x7, + SSS_ERR_MAX = 0x8, +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_ceq.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_ceq.h new file mode 100644 index 0000000000000000000000000000000000000000..7626ec44b968e8fc723884389000776b65d58263 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_ceq.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_CEQ_H +#define SSS_HW_CEQ_H + +enum sss_ceq_event { + SSS_NIC_CTRLQ = 0x3, + SSS_NIC_SQ, + SSS_NIC_RQ, + SSS_CEQ_EVENT_MAX, +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_common.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_common.h new file mode 100644 index 0000000000000000000000000000000000000000..aef21aa49b28822bf979be3b420814298a5ce759 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_common.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_COMMON_H +#define SSS_HW_COMMON_H + +#include + +#ifndef BIG_ENDIAN +#define BIG_ENDIAN 0x4321 +#endif + +#ifndef LITTLE_ENDIAN +#define LITTLE_ENDIAN 0x1234 +#endif + +#ifdef BYTE_ORDER +#undef BYTE_ORDER +#endif +/* X86 */ +#define BYTE_ORDER LITTLE_ENDIAN + +#define ARRAY_LEN(arr) ((int)((int)sizeof(arr) / (int)sizeof((arr)[0]))) + +#ifndef IFNAMSIZ +#define IFNAMSIZ 16 +#endif + +enum sss_func_type { + SSS_FUNC_TYPE_PF, + SSS_FUNC_TYPE_VF, + SSS_FUNC_TYPE_PPF, + SSS_FUNC_TYPE_UNKNOWN, +}; + +struct sss_dma_addr_align { + u32 real_size; + + void *origin_vaddr; + dma_addr_t origin_paddr; + + void *align_vaddr; + dma_addr_t align_paddr; +}; + +enum sss_process_ret { + SSS_PROCESS_OK = 0, + SSS_PROCESS_DOING = 1, + SSS_PROCESS_ERR = 2, +}; + +struct sss_sge { + u32 high_addr; + u32 low_addr; + u32 len; +}; + +typedef enum sss_process_ret(*sss_wait_handler_t)(void *priv_data); + +/* * + * sssnic_cpu_to_be32 - convert data to big endian 32 bit format + * @data: the data to convert + * @len: length of data to convert, must be Multiple of 4B + */ +static inline void sss_cpu_to_be32(void *data, int len) +{ + int i, chunk_sz = sizeof(u32); + int data_len = len; + u32 *mem = data; + + if (!data) + return; + + data_len = data_len / chunk_sz; + + for (i = 0; i < data_len; i++) { + *mem = cpu_to_be32(*mem); + mem++; + } +} + +/* * + * sss_cpu_to_be32 - convert data from big endian 32 bit format + * @data: the data to convert + * @len: length of data to convert + */ +static inline void sss_be32_to_cpu(void *data, int len) +{ + int i; + int data_len; + u32 *array = data; + + if (!data) + return; + + data_len = len / sizeof(u32); + + for (i = 0; i < data_len; i++) { + *array = be32_to_cpu(*array); + array++; + } +} + +/* * + * sss_set_sge - set dma area in scatter gather entry + * @sge: scatter gather entry + * @addr: dma address + * @len: length of relevant data in the dma address + */ +static inline void sss_set_sge(struct sss_sge *sge, dma_addr_t addr, int len) +{ + sge->high_addr = upper_32_bits(addr); + sge->low_addr = lower_32_bits(addr); + sge->len = len; +} + +#define sss_hw_be32(val) (val) +#define sss_hw_cpu32(val) (val) +#define sss_hw_cpu16(val) (val) + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_ctrlq.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_ctrlq.h new file mode 100644 index 0000000000000000000000000000000000000000..71921daa24526bd16827664938ce122da77c9531 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_ctrlq.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_CTRLQ_H +#define SSS_HW_CTRLQ_H + +#include +#include + +struct sss_ctrl_msg_buf { + void *buf; + dma_addr_t dma_addr; + u16 size; + + /* Usage count, USERS DO NOT USE */ + atomic_t ref_cnt; +}; + +/** + * @brief sss_alloc_ctrlq_msg_buf - alloc ctrlq msg buffer + * @param hwdev: device pointer to hwdev + * @retval non-zero: success + * @retval null: failure + **/ +struct sss_ctrl_msg_buf *sss_alloc_ctrlq_msg_buf(void *hwdev); + +/** + * @brief sss_free_ctrlq_msg_buf - free ctrlq msg buffer + * @param hwdev: device pointer to hwdev + * @param msg_buf: buffer to free + **/ +void sss_free_ctrlq_msg_buf(void *hwdev, struct sss_ctrl_msg_buf *msg_buf); + +/** + * @brief sss_ctrlq_direct_reply - ctrlq direct message response + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param cmd: cmd + * @param in_buf: message buffer in + * @param out_param: message out + * @param timeout: timeout + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int sss_ctrlq_direct_reply(void *hwdev, u8 mod, u8 cmd, + struct sss_ctrl_msg_buf *in_buf, + u64 *out_param, u32 timeout, u16 channel); + +/** + * @brief sss_ctrlq_detail_reply - ctrlq detail message response + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param cmd: cmd + * @param in_buf: message buffer in + * @param out_buf: message buffer out + * @param out_param: inline output data + * @param timeout: timeout + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int sss_ctrlq_detail_reply(void *hwdev, u8 mod, u8 cmd, + struct sss_ctrl_msg_buf *in_buf, struct sss_ctrl_msg_buf *out_buf, + u64 *out_param, u32 timeout, u16 channel); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_event.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_event.h new file mode 100644 index 0000000000000000000000000000000000000000..362ba20656ce30f7ae9d028d6f8b78dcb3eb9495 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_event.h @@ -0,0 +1,160 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_EVENT_H +#define SSS_HW_EVENT_H + +#include + +#include "sss_hw_svc_cap.h" + +enum sss_fault_source_type { + /* same as SSS_FAULT_TYPE_CHIP */ + SSS_FAULT_SRC_HW_MGMT_CHIP = 0, + /* same as SSS_FAULT_TYPE_NPU */ + SSS_FAULT_SRC_HW_MGMT_NPU, + /* same as SSS_FAULT_TYPE_MEM_RD_TIMEOUT */ + SSS_FAULT_SRC_HW_MGMT_MEM_RD_TIMEOUT, + /* same as SSS_FAULT_TYPE_MEM_WR_TIMEOUT */ + SSS_FAULT_SRC_HW_MGMT_MEM_WR_TIMEOUT, + /* same as SSS_FAULT_TYPE_REG_RD_TIMEOUT */ + SSS_FAULT_SRC_HW_MGMT_REG_RD_TIMEOUT, + /* same as SSS_FAULT_TYPE_REG_WR_TIMEOUT */ + SSS_FAULT_SRC_HW_MGMT_REG_WR_TIMEOUT, + SSS_FAULT_SRC_SW_MGMT_NPU, + SSS_FAULT_SRC_MGMT_WATCHDOG, + SSS_FAULT_SRC_MGMT_RESET = 8, + SSS_FAULT_SRC_HW_PHY_FAULT, + SSS_FAULT_SRC_TX_PAUSE_EXCP, + SSS_FAULT_SRC_PCIE_LINK_DOWN = 20, + SSS_FAULT_SRC_HOST_HEARTBEAT_LOST = 21, + SSS_FAULT_SRC_TX_TIMEOUT, + SSS_FAULT_SRC_TYPE_MAX, +}; + +enum sss_comm_event_type { + SSS_EVENT_PCIE_LINK_DOWN, + SSS_EVENT_HEART_LOST, + SSS_EVENT_FAULT, + SSS_EVENT_SRIOV_STATE_CHANGE, + SSS_EVENT_CARD_REMOVE, + SSS_EVENT_MGMT_WATCHDOG, + SSS_EVENT_MAX +}; + +enum sss_event_service_type { + SSS_EVENT_SRV_COMM, + SSS_SERVICE_EVENT_BASE, + SSS_EVENT_SRV_NIC = SSS_SERVICE_EVENT_BASE + SSS_SERVICE_TYPE_NIC, + SSS_EVENT_SRV_MIGRATE = SSS_SERVICE_EVENT_BASE + SSS_SERVICE_TYPE_MIGRATE, +}; + +enum sss_fault_err_level { + SSS_FAULT_LEVEL_FATAL, + SSS_FAULT_LEVEL_SERIOUS_RESET, + SSS_FAULT_LEVEL_HOST, + SSS_FAULT_LEVEL_SERIOUS_FLR, + SSS_FAULT_LEVEL_GENERAL, + SSS_FAULT_LEVEL_SUGGESTION, + SSS_FAULT_LEVEL_MAX, +}; + +enum sss_fault_type { + SSS_FAULT_TYPE_CHIP, + SSS_FAULT_TYPE_NPU, + SSS_FAULT_TYPE_MEM_RD_TIMEOUT, + SSS_FAULT_TYPE_MEM_WR_TIMEOUT, + SSS_FAULT_TYPE_REG_RD_TIMEOUT, + SSS_FAULT_TYPE_REG_WR_TIMEOUT, + SSS_FAULT_TYPE_PHY_FAULT, + SSS_FAULT_TYPE_TSENSOR_FAULT, + SSS_FAULT_TYPE_MAX, +}; + +#define SSS_SRV_EVENT_TYPE(svc, type) ((((u32)(svc)) << 16) | (type)) + +#define SSS_MGMT_CMD_UNSUPPORTED 0xFF + +union sss_fault_hw_mgmt { + u32 val[4]; + /* valid only type == SSS_FAULT_TYPE_CHIP */ + struct { + u8 node_id; + /* enum sss_fault_err_level */ + u8 err_level; + u16 err_type; + u32 err_csr_addr; + u32 err_csr_value; + /* func_id valid only if err_level == SSS_FAULT_LEVEL_SERIOUS_FLR */ + u8 rsvd1; + u8 host_id; + u16 func_id; + } chip; + + /* valid only if type == SSS_FAULT_TYPE_NPU */ + struct { + u8 cause_id; + u8 core_id; + u8 c_id; + u8 rsvd3; + u32 epc; + u32 rsvd4; + u32 rsvd5; + } ucode; + + /* valid only if type == SSS_FAULT_TYPE_MEM_RD_TIMEOUT || + * SSS_FAULT_TYPE_MEM_WR_TIMEOUT + */ + struct { + u32 err_csr_ctrl; + u32 err_csr_data; + u32 ctrl_tab; + u32 mem_id; + } mem_timeout; + + /* valid only if type == SSS_FAULT_TYPE_REG_RD_TIMEOUT || + * SSS_FAULT_TYPE_REG_WR_TIMEOUT + */ + struct { + u32 err_csr; + u32 rsvd6; + u32 rsvd7; + u32 rsvd8; + } reg_timeout; + + struct { + /* 0: read; 1: write */ + u8 op_type; + u8 port_id; + u8 dev_ad; + u8 rsvd9; + u32 csr_addr; + u32 op_data; + u32 rsvd10; + } phy_fault; +}; + +/* defined by chip */ +struct sss_fault_event { + u8 type; /* enum sss_fault_type */ + u8 fault_level; /* sdk write fault level for uld event */ + u8 rsvd[2]; + union sss_fault_hw_mgmt info; +}; + +struct sss_cmd_fault_event { + u8 status; + u8 ver; + u8 rsvd[6]; + struct sss_fault_event fault_event; +}; + +struct sss_event_info { + u16 service; /* enum sss_event_service_type */ + u16 type; /* enum sss_comm_event_type */ + u8 event_data[104]; +}; + +typedef void (*sss_event_handler_t)(void *handle, struct sss_event_info *event); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_export.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_export.h new file mode 100644 index 0000000000000000000000000000000000000000..b14290fb2f2727b17ad274b722773ace46f3ef27 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_export.h @@ -0,0 +1,228 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_EXPORT_H +#define SSS_HW_EXPORT_H + +#include + +#include "sss_hw_irq.h" +#include "sss_hw_svc_cap.h" +#include "sss_hw_event.h" + +int sss_chip_set_msix_attr(void *hwdev, + struct sss_irq_cfg intr_cfg, u16 channel); + +/* * + * @brief sss_chip_clear_msix_resend_bit - clear msix resend bit + * @param hwdev: device pointer to hwdev + * @param msix_id: msix id + * @param clear_en: 1-clear + */ +void sss_chip_clear_msix_resend_bit(void *hwdev, u16 msix_id, bool clear_en); + +/** + * @brief sss_chip_reset_function - reset func + * @param hwdev: device pointer to hwdev + * @param func_id: global function index + * @param flag: reset flag + * @param channel: channel id + */ +int sss_chip_reset_function(void *hwdev, u16 func_id, u64 flag, u16 channel); + +/** + * @brief sss_chip_set_root_ctx - set root context + * @param hwdev: device pointer to hwdev + * @param rq_depth: rq depth + * @param sq_depth: sq depth + * @param rx_size: rx buffer size + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + **/ +int sss_chip_set_root_ctx(void *hwdev, + u32 rq_depth, u32 sq_depth, int rx_size, u16 channel); + +/** + * @brief sss_chip_clean_root_ctx - clean root context + * @param hwdev: device pointer to hwdev + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + **/ +int sss_chip_clean_root_ctx(void *hwdev, u16 channel); + +/* * + * @brief sss_get_mgmt_version - get management cpu version + * @param hwdev: device pointer to hwdev + * @param buf: output management version + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int sss_get_mgmt_version(void *hwdev, u8 *buf, u8 buf_size, u16 channel); + +/** + * @brief sss_chip_set_func_used_state - set function service used state + * @param hwdev: device pointer to hwdev + * @param service_type: service type + * @param state: function used state + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int sss_chip_set_func_used_state(void *hwdev, + u16 service_type, bool state, u16 channel); + +bool sss_get_nic_capability(void *hwdev, struct sss_nic_service_cap *capability); + +/* * + * @brief sss_support_nic - function support nic + * @param hwdev: device pointer to hwdev + * @param cap: nic service capbility + * @retval true: function support nic + * @retval false: function not support nic + */ +bool sss_support_nic(void *hwdev); + +bool sss_support_ppa(void *hwdev, struct sss_ppa_service_cap *cap); + +/* * + * @brief sss_get_max_sq_num - get max queue number + * @param hwdev: device pointer to hwdev + * @retval non-zero: max queue number + * @retval zero: failure + */ +u16 sss_get_max_sq_num(void *hwdev); + +/* * + * @brief sss_get_phy_port_id - get physical port id + * @param hwdev: device pointer to hwdev + * @retval physical port id + */ +u8 sss_get_phy_port_id(void *hwdev); /* Obtain sss_service_cap.port_id */ + +/* * + * @brief sss_get_max_vf_num - get vf number + * @param hwdev: device pointer to hwdev + * @retval non-zero: vf number + * @retval zero: failure + */ +u16 sss_get_max_vf_num(void *hwdev); /* Obtain sss_service_cap.max_vf */ + +/* * + * @brief sss_get_cos_valid_bitmap - get cos valid bitmap + * @param hwdev: device pointer to hwdev + * @retval non-zero: valid cos bit map + * @retval zero: failure + */ +int sss_get_cos_valid_bitmap(void *hwdev, u8 *func_cos_bitmap, u8 *port_cos_bitmap); + +/* * + * @brief sss_alloc_irq - alloc irq + * @param hwdev: device pointer to hwdev + * @param service_type: service type + * @param alloc_array: alloc irq info + * @param alloc_num: alloc number + * @retval zero: failure + * @retval non-zero: success + */ +u16 sss_alloc_irq(void *hwdev, enum sss_service_type service_type, + struct sss_irq_desc *alloc_array, u16 alloc_num); + +/* * + * @brief sss_free_irq - free irq + * @param hwdev: device pointer to hwdev + * @param service_type: service type + * @param irq_id: irq id + */ +void sss_free_irq(void *hwdev, enum sss_service_type service_type, u32 irq_id); + +/* * + * @brief sss_register_dev_event - register hardware event + * @param hwdev: device pointer to hwdev + * @param data: private data will be used by the callback + * @param callback: callback function + */ +void sss_register_dev_event(void *hwdev, void *data, sss_event_handler_t callback); + +/* * + * @brief sss_unregister_dev_event - unregister hardware event + * @param dev: device pointer to hwdev + */ +void sss_unregister_dev_event(void *dev); + +/* * + * @brief sss_get_dev_present_flag - get chip present flag + * @param hwdev: device pointer to hwdev + * @retval 1: chip is present + * @retval 0: chip is absent + */ +int sss_get_dev_present_flag(const void *hwdev); + +/* * + * @brief sss_get_max_pf_num - get global max pf number + */ +u8 sss_get_max_pf_num(void *hwdev); + +u16 sss_nic_intr_num(void *hwdev); + +/* * + * @brief sss_get_chip_present_state - get card present state + * @param hwdev: device pointer to hwdev + * @param present_state: return card present state + * @retval zero: success + * @retval non-zero: failure + */ +int sss_get_chip_present_state(void *hwdev, bool *present_state); + +/** + * @brief sss_fault_event_report - report fault event + * @param hwdev: device pointer to hwdev + * @param src: fault event source, reference to enum sss_fault_source_type + * @param level: fault level, reference to enum sss_fault_err_level + */ +void sss_fault_event_report(void *hwdev, u16 src, u16 level); + +/** + * @brief sss_register_service_adapter - register service adapter + * @param hwdev: device pointer to hwdev + * @param service_type: service type + * @param service_adapter: service adapter + * @retval zero: success + * @retval non-zero: failure + **/ +int sss_register_service_adapter(void *hwdev, enum sss_service_type service_type, + void *service_adapter); + +/** + * @brief sss_unregister_service_adapter - unregister service adapter + * @param hwdev: device pointer to hwdev + * @param service_type: service type + **/ +void sss_unregister_service_adapter(void *hwdev, + enum sss_service_type service_type); + +/** + * @brief sss_get_service_adapter - get service adapter + * @param hwdev: device pointer to hwdev + * @param service_type: service type + * @retval non-zero: success + * @retval null: failure + **/ +void *sss_get_service_adapter(void *hwdev, enum sss_service_type service_type); + +/** + * @brief sss_do_event_callback - evnet callback to notify service driver + * @param hwdev: device pointer to hwdev + * @param event: event info to service driver + */ +void sss_do_event_callback(void *hwdev, struct sss_event_info *event); + +/** + * @brief sss_update_link_stats - link event stats + * @param hwdev: device pointer to hwdev + * @param link_state: link status + */ +void sss_update_link_stats(void *hwdev, bool link_state); +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_irq.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_irq.h new file mode 100644 index 0000000000000000000000000000000000000000..60354bcf0efac15567b5efa6507e63ab638a5ac1 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_irq.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_IRQ_H +#define SSS_HW_IRQ_H + +#include + +enum sss_msix_auto_mask { + SSS_CLR_MSIX_AUTO_MASK, + SSS_SET_MSIX_AUTO_MASK, +}; + +enum sss_msix_state { + SSS_MSIX_ENABLE, + SSS_MSIX_DISABLE, +}; + +struct sss_irq_desc { + u16 msix_id; /* PCIe MSIX id */ + u16 rsvd; + u32 irq_id; /* OS IRQ id */ +}; + +struct sss_irq_cfg { + u32 lli_set; + u32 coalesc_intr_set; + u16 msix_id; + u8 lli_credit; + u8 lli_timer; + u8 pending; + u8 coalesc_timer; + u8 resend_timer; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_mbx.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_mbx.h new file mode 100644 index 0000000000000000000000000000000000000000..85f91d5c0df49409c54386c05aa229c16416a810 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_mbx.h @@ -0,0 +1,335 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_MBX_H +#define SSS_HW_MBX_H + +#include + +/* between Driver to MPU */ +enum sss_mgmt_cmd { + /* flr */ + SSS_COMM_MGMT_CMD_FUNC_RESET = 0, + SSS_COMM_MGMT_CMD_FEATURE_NEGO, + SSS_COMM_MGMT_CMD_FLUSH_DOORBELL, + SSS_COMM_MGMT_CMD_START_FLUSH, + SSS_COMM_MGMT_CMD_SET_FUNC_FLR, + SSS_COMM_MGMT_CMD_GET_GLOBAL_ATTR, + SSS_COMM_MGMT_CMD_SET_PPF_FLR_TYPE, + SSS_COMM_MGMT_CMD_SET_FUNC_SVC_USED_STATE, + + /* msi-x */ + SSS_COMM_MGMT_CMD_CFG_MSIX_NUM = 10, + + /* init cfg */ + SSS_COMM_MGMT_CMD_SET_CTRLQ_CTXT = 20, + SSS_COMM_MGMT_CMD_SET_VAT, + SSS_COMM_MGMT_CMD_CFG_PAGESIZE, + SSS_COMM_MGMT_CMD_CFG_MSIX_CTRL_REG, + SSS_COMM_MGMT_CMD_SET_CEQ_CTRL_REG, + SSS_COMM_MGMT_CMD_SET_DMA_ATTR, + + /* infra */ + SSS_COMM_MGMT_CMD_GET_MQM_FIX_INFO = 40, + SSS_COMM_MGMT_CMD_SET_MQM_CFG_INFO, + SSS_COMM_MGMT_CMD_SET_MQM_SRCH_GPA, + SSS_COMM_MGMT_CMD_SET_PPF_TMR, + SSS_COMM_MGMT_CMD_SET_PPF_HT_GPA, + SSS_COMM_MGMT_CMD_SET_FUNC_TMR_BITMAT, + SSS_COMM_MGMT_CMD_SET_MBX_CRDT, + SSS_COMM_MGMT_CMD_CFG_TEMPLATE, + SSS_COMM_MGMT_CMD_SET_MQM_LIMIT, + + /* get chip info */ + SSS_COMM_MGMT_CMD_GET_FW_VERSION = 60, + SSS_COMM_MGMT_CMD_GET_BOARD_INFO, + SSS_COMM_MGMT_CMD_SYNC_TIME, + SSS_COMM_MGMT_CMD_GET_HW_PF_INFOS, + SSS_COMM_MGMT_CMD_SEND_BDF_INFO, + SSS_COMM_MGMT_CMD_GET_VIRTIO_BDF_INFO, + SSS_COMM_MGMT_CMD_GET_SML_TABLE_INFO, + + /* update firmware */ + SSS_COMM_MGMT_CMD_UPDATE_FW = 80, + SSS_COMM_MGMT_CMD_ACTIVE_FW, + SSS_COMM_MGMT_CMD_HOT_ACTIVE_FW, + SSS_COMM_MGMT_CMD_HOT_ACTIVE_DONE_NOTICE, + SSS_COMM_MGMT_CMD_SWITCH_CFG, + SSS_COMM_MGMT_CMD_CHECK_FLASH, + SSS_COMM_MGMT_CMD_CHECK_FLASH_RW, + SSS_COMM_MGMT_CMD_RESOURCE_CFG, + SSS_COMM_MGMT_CMD_UPDATE_BIOS, /* merge to SSS_COMM_MGMT_CMD_UPDATE_FW */ + SSS_COMM_MGMT_CMD_MPU_GIT_CODE, + + /* chip reset */ + SSS_COMM_MGMT_CMD_FAULT_REPORT = 100, + SSS_COMM_MGMT_CMD_WATCHDOG_INFO, + SSS_COMM_MGMT_CMD_MGMT_RESET, + SSS_COMM_MGMT_CMD_FFM_SET, + + /* chip info/log */ + SSS_COMM_MGMT_CMD_GET_LOG = 120, + SSS_COMM_MGMT_CMD_TEMP_OP, + SSS_COMM_MGMT_CMD_EN_AUTO_RST_CHIP, + SSS_COMM_MGMT_CMD_CFG_REG, + SSS_COMM_MGMT_CMD_GET_CHIP_ID, + SSS_COMM_MGMT_CMD_SYSINFO_DFX, + SSS_COMM_MGMT_CMD_PCIE_DFX_NTC, + SSS_COMM_MGMT_CMD_DICT_LOG_STATUS, /* LOG STATUS 127 */ + SSS_COMM_MGMT_CMD_MSIX_INFO, + SSS_COMM_MGMT_CMD_CHANNEL_DETECT, + + /* DFT mode */ + SSS_COMM_MGMT_CMD_GET_DIE_ID = 200, + SSS_COMM_MGMT_CMD_GET_EFUSE_TEST, + SSS_COMM_MGMT_CMD_EFUSE_INFO_CFG, + SSS_COMM_MGMT_CMD_GPIO_CTL, + SSS_COMM_MGMT_CMD_HI30_SERLOOP_START, /* DFT or ssslink */ + SSS_COMM_MGMT_CMD_HI30_SERLOOP_STOP, /* DFT or ssslink */ + SSS_COMM_MGMT_CMD_HI30_MBIST_SET_FLAG, /* DFT or ssslink */ + SSS_COMM_MGMT_CMD_HI30_MBIST_GET_RESULT, /* DFT or ssslink */ + SSS_COMM_MGMT_CMD_ECC_TEST, + SSS_COMM_MGMT_CMD_FUNC_BIST_TEST, + SSS_COMM_MGMT_CMD_VPD_SET, + SSS_COMM_MGMT_CMD_VPD_GET, + + SSS_COMM_MGMT_CMD_ERASE_FLASH, + SSS_COMM_MGMT_CMD_QUERY_FW_INFO, + SSS_COMM_MGMT_CMD_GET_CFG_INFO, + SSS_COMM_MGMT_CMD_GET_UART_LOG, + SSS_COMM_MGMT_CMD_SET_UART_CMD, + SSS_COMM_MGMT_CMD_SPI_TEST, + + /* ALL reg read/write merge to SSS_COMM_MGMT_CMD_CFG_REG */ + SSS_COMM_MGMT_CMD_UP_REG_GET, + SSS_COMM_MGMT_CMD_UP_REG_SET, + SSS_COMM_MGMT_CMD_REG_READ, + SSS_COMM_MGMT_CMD_REG_WRITE, + SSS_COMM_MGMT_CMD_MAG_REG_WRITE, + SSS_COMM_MGMT_CMD_ANLT_REG_WRITE, + + SSS_COMM_MGMT_CMD_HEART_EVENT, + SSS_COMM_MGMT_CMD_NCSI_OEM_GET_DRV_INFO, + SSS_COMM_MGMT_CMD_LASTWORD_GET, /* merge to SSS_COMM_MGMT_CMD_GET_LOG */ + SSS_COMM_MGMT_CMD_READ_BIN_DATA, + SSS_COMM_MGMT_CMD_WWPN_GET, + SSS_COMM_MGMT_CMD_WWPN_SET, + + SSS_COMM_MGMT_CMD_SEND_API_ACK_BY_UP, + + SSS_COMM_MGMT_CMD_SET_MAC, + + /* MPU patch cmd */ + SSS_COMM_MGMT_CMD_LOAD_PATCH, + SSS_COMM_MGMT_CMD_REMOVE_PATCH, + SSS_COMM_MGMT_CMD_PATCH_ACTIVE, + SSS_COMM_MGMT_CMD_PATCH_DEACTIVE, + SSS_COMM_MGMT_CMD_PATCH_SRAM_OPTIMIZE, + /* container host process */ + SSS_COMM_MGMT_CMD_CONTAINER_HOST_PROC, + /* nsci counter */ + SSS_COMM_MGMT_CMD_NCSI_COUNTER_PROC, +}; + +enum sss_channel_type { + SSS_CHANNEL_DEFAULT, + SSS_CHANNEL_COMM, + SSS_CHANNEL_NIC, + SSS_CHANNEL_ROCE, + SSS_CHANNEL_TOE, + SSS_CHANNEL_FC, + SSS_CHANNEL_OVS, + SSS_CHANNEL_DSW, + SSS_CHANNEL_MIG, + SSS_CHANNEL_CRYPT, + SSS_CHANNEL_MAX = 32, +}; + +enum sss_mbx_errcode { + SSS_MBX_ERRCODE_NO_ERRORS = 0, + /* VF send the mbx data to the wrong destination functions */ + SSS_MBX_ERRCODE_VF_TO_WRONG_FUNC = 0x100, + /* PPF send the mbx data to the wrong destination functions */ + SSS_MBX_ERRCODE_PPF_TO_WRONG_FUNC = 0x200, + /* PF send the mbx data to the wrong destination functions */ + SSS_MBX_ERRCODE_PF_TO_WRONG_FUNC = 0x300, + /* The mbx data size is set to all zero */ + SSS_MBX_ERRCODE_ZERO_DATA_SIZE = 0x400, + /* The sender function attribute has not been learned by hardware */ + SSS_MBX_ERRCODE_UNKNOWN_SRC_FUNC = 0x500, + /* The receiver function attr has not been learned by hardware */ + SSS_MBX_ERRCODE_UNKNOWN_DES_FUNC = 0x600, +}; + +/* CTRLQ MODULE_TYPE */ +enum sss_mod_type { + SSS_MOD_TYPE_COMM = 0, /* HW communication module */ + SSS_MOD_TYPE_L2NIC = 1, /* L2NIC module */ + SSS_MOD_TYPE_ROCE = 2, + SSS_MOD_TYPE_PLOG = 3, + SSS_MOD_TYPE_TOE = 4, + SSS_MOD_TYPE_FLR = 5, + SSS_MOD_TYPE_RSVD1 = 6, + SSS_MOD_TYPE_CFGM = 7, /* Configuration module */ + SSS_MOD_TYPE_QMM = 8, + SSS_MOD_TYPE_RSVD2 = 9, + COMM_MOD_FC = 10, + SSS_MOD_TYPE_OVS = 11, + SSS_MOD_TYPE_DSW = 12, + SSS_MOD_TYPE_MIGRATE = 13, + SSS_MOD_TYPE_SSSLINK = 14, + SSS_MOD_TYPE_CRYPT = 15, /* secure crypto module */ + SSS_MOD_TYPE_VIO = 16, + SSS_MOD_TYPE_IMU = 17, + SSS_MOD_TYPE_DFT = 18, /* DFT */ + SSS_MOD_TYPE_HW_MAX = 19, /* hardware max module id */ + /* Software module id, for PF/VF and multi-host */ + SSS_MOD_TYPE_SW_FUNC = 20, + SSS_MOD_TYPE_MAX, +}; + +/* func reset flag */ +enum sss_func_reset_flag { + SSS_RESET_TYPE_FLUSH_BIT = 0, + SSS_RESET_TYPE_MQM, + SSS_RESET_TYPE_SMF, + SSS_RESET_TYPE_PF_BW_CFG, + + SSS_RESET_TYPE_COMM = 10, + SSS_RESET_TYPE_COMM_MGMT_CH, + SSS_RESET_TYPE_COMM_CMD_CH, + SSS_RESET_TYPE_NIC, + SSS_RESET_TYPE_OVS, + SSS_RESET_TYPE_VBS, + SSS_RESET_TYPE_ROCE, + SSS_RESET_TYPE_FC, + SSS_RESET_TYPE_TOE, + SSS_RESET_TYPE_IPSEC, + SSS_RESET_TYPE_MAX, +}; + +#define SSS_NIC_RESET BIT(SSS_RESET_TYPE_NIC) +#define SSS_OVS_RESET BIT(SSS_RESET_TYPE_OVS) +#define SSS_VBS_RESET BIT(SSS_RESET_TYPE_VBS) +#define SSS_ROCE_RESET BIT(SSS_RESET_TYPE_ROCE) +#define SSS_FC_RESET BIT(SSS_RESET_TYPE_FC) +#define SSS_TOE_RESET BIT(SSS_RESET_TYPE_TOE) +#define SSS_IPSEC_RESET BIT(SSS_RESET_TYPE_IPSEC) + +typedef int (*sss_vf_mbx_handler_t)(void *pri_handle, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size); + +typedef int (*sss_pf_mbx_handler_t)(void *pri_handle, u16 vf_id, u16 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size); + +typedef int (*sss_ppf_mbx_handler_t)(void *pri_handle, u16 pf_id, u16 vf_id, + u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +typedef int (*sss_pf_from_ppf_mbx_handler_t)(void *pri_handle, + u16 cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size); + +/** + * @brief sss_register_pf_mbx_handler - pf register mbx msg callback + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param pri_handle: private data will be used by the callback + * @param callback: callback function + * @retval zero: success + * @retval non-zero: failure + **/ +int sss_register_pf_mbx_handler(void *hwdev, u8 mod, void *pri_handle, sss_pf_mbx_handler_t cb); + +/** + * @brief sss_register_vf_mbx_handler - vf register mbx msg callback + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param pri_handle: private data will be used by the callback + * @param callback: callback function + * @retval zero: success + * @retval non-zero: failure + **/ +int sss_register_vf_mbx_handler(void *hwdev, u8 mod, void *pri_handle, sss_vf_mbx_handler_t cb); + +/** + * @brief sss_unregister_pf_mbx_handler - pf register mbx msg callback + * @param hwdev: device pointer to hwdev + * @param mod: mod type + **/ +void sss_unregister_pf_mbx_handler(void *hwdev, u8 mod); + +/** + * @brief sss_unregister_vf_mbx_handler - pf register mbx msg callback + * @param hwdev: device pointer to hwdev + * @param mod: mod type + **/ +void sss_unregister_vf_mbx_handler(void *hwdev, u8 mod); + +/** + * @brief sss_sync_send_mbx_msg - msg to management cpu + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param cmd: cmd + * @param buf_in: message buffer in + * @param in_size: in buffer size + * @param buf_out: message buffer out + * @param out_size: out buffer size + * @param timeout: timeout + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int sss_sync_mbx_send_msg(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout, u16 channel); + +#define sss_sync_send_msg_ch(hwdev, cmd, buf_in, in_size, buf_out, out_size, channel) \ + sss_sync_mbx_send_msg(hwdev, SSS_MOD_TYPE_COMM, cmd, \ + buf_in, in_size, buf_out, out_size, 0, channel) + +#define sss_sync_send_msg(hwdev, cmd, buf_in, in_size, buf_out, out_size) \ + sss_sync_mbx_send_msg(hwdev, SSS_MOD_TYPE_COMM, cmd, \ + buf_in, in_size, buf_out, out_size, 0, SSS_CHANNEL_COMM) + +#define SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, cmd_ptr) \ + ((ret) != 0 || (out_len) == 0 || (cmd_ptr)->head.state != SSS_MGMT_CMD_SUCCESS) + +/** + * @brief sss_mbx_send_to_pf - vf mbx message to pf + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param cmd: cmd + * @param buf_in: message buffer in + * @param in_size: in buffer size + * @param buf_out: message buffer out + * @param out_size: out buffer size + * @param timeout: timeout + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int sss_mbx_send_to_pf(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout, u16 channel); + +/** + * @brief sss_mbx_send_to_vf - mbx message to vf + * @param hwdev: device pointer to hwdev + * @param vf_id: vf index + * @param mod: mod type + * @param cmd: cmd + * @param buf_in: message buffer in + * @param in_size: in buffer size + * @param buf_out: message buffer out + * @param out_size: out buffer size + * @param timeout: timeout + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int sss_mbx_send_to_vf(void *hwdev, u16 vf_id, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout, u16 channel); + +int sss_mbx_send_to_vf_no_ack(void *hwdev, u16 vf_id, u8 mod, u16 cmd, void *buf_in, + u16 in_size, u16 channel); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_mbx_msg.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_mbx_msg.h new file mode 100644 index 0000000000000000000000000000000000000000..2280b234e06039b7d822a82f2b94d348fb20736e --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_mbx_msg.h @@ -0,0 +1,260 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_MBX_MSG_H +#define SSS_HW_MBX_MSG_H + +#include + +#define SSS_MGMT_MSG_SET_CMD 1 +#define SSS_MGMT_MSG_GET_CMD 0 + +#define SSS_MGMT_CMD_SUCCESS 0 + +struct sss_mgmt_msg_head { + u8 state; + u8 version; + u8 rsvd0[6]; +}; + +struct sss_cmd_func_reset { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd[3]; + u64 reset_flag; +}; + +enum { + SSS_COMM_F_ADM = 1U << 0, + SSS_COMM_F_CLP = 1U << 1, + SSS_COMM_F_CHANNEL_DETECT = 1U << 2, + SSS_COMM_F_MBX_SEGMENT = 1U << 3, + SSS_COMM_F_CTRLQ_NUM = 1U << 4, + SSS_COMM_F_VIRTIO_VQ_SIZE = 1U << 5, +}; + +#define SSS_MAX_FEATURE_QWORD 4 +struct sss_cmd_feature_nego { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 opcode; /* 1: set, 0: get */ + u8 rsvd; + u64 feature[SSS_MAX_FEATURE_QWORD]; +}; + +struct sss_cmd_clear_doorbell { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd[3]; +}; + +struct sss_cmd_clear_resource { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd[3]; +}; + +struct sss_comm_global_attr { + u8 max_host_num; + u8 max_pf_num; + u16 vf_id_start; + + u8 mgmt_host_node_id; /* for adm msg to mgmt cpu */ + u8 ctrlq_num; + u8 rsvd1[2]; + u32 rsvd2[8]; +}; + +struct sss_cmd_channel_detect { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd1[3]; + u32 rsvd2[2]; +}; + +enum sss_svc_type { + SSS_SVC_TYPE_COM = 0, + SSS_SVC_TYPE_NIC, + SSS_SVC_TYPE_OVS, + SSS_SVC_TYPE_ROCE, + SSS_SVC_TYPE_TOE, + SSS_SVC_TYPE_IOE, + SSS_SVC_TYPE_FC, + SSS_SVC_TYPE_VBS, + SSS_SVC_TYPE_IPSEC, + SSS_SVC_TYPE_VIRTIO, + SSS_SVC_TYPE_MIGRATE, + SSS_SVC_TYPE_PPA, + SSS_SVC_TYPE_MAX, +}; + +struct sss_cmd_func_svc_used_state { + struct sss_mgmt_msg_head head; + u16 func_id; + u16 svc_type; + u8 used_state; + u8 rsvd[35]; +}; + +struct sss_cmd_get_glb_attr { + struct sss_mgmt_msg_head head; + + struct sss_comm_global_attr attr; +}; + +enum sss_fw_ver_type { + SSS_FW_VER_TYPE_BOOT, + SSS_FW_VER_TYPE_MPU, + SSS_FW_VER_TYPE_NPU, + SSS_FW_VER_TYPE_SMU_L0, + SSS_FW_VER_TYPE_SMU_L1, + SSS_FW_VER_TYPE_CFG, +}; + +#define SSS_FW_VERSION_LEN 16 +#define SSS_FW_COMPILE_TIME_LEN 20 +struct sss_cmd_get_fw_version { + struct sss_mgmt_msg_head head; + + u16 fw_type; + u16 rsvd; + u8 ver[SSS_FW_VERSION_LEN]; + u8 time[SSS_FW_COMPILE_TIME_LEN]; +}; + +/* hardware define: ctrlq context */ +struct sss_ctrlq_ctxt_info { + u64 curr_wqe_page_pfn; + u64 wq_block_pfn; +}; + +struct sss_cmd_ctrlq_ctxt { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 ctrlq_id; + u8 rsvd[5]; + + struct sss_ctrlq_ctxt_info ctxt; +}; + +struct sss_cmd_root_ctxt { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 set_ctrlq_depth; + u8 ctrlq_depth; + u16 rx_buf_sz; + u8 lro_en; + u8 rsvd1; + u16 sq_depth; + u16 rq_depth; + u64 rsvd2; +}; + +struct sss_cmd_wq_page_size { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 opcode; + u8 page_size; + + u32 rsvd; +}; + +struct sss_cmd_msix_config { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 opcode; + u8 rsvd1; + u16 msix_index; + u8 pending_cnt; + u8 coalesce_timer_cnt; + u8 resend_timer_cnt; + u8 lli_timer_cnt; + u8 lli_credit_cnt; + u8 rsvd2[5]; +}; + +struct sss_cmd_dma_attr_config { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 entry_id; + u8 st; + u8 at; + u8 ph; + u8 no_snooping; + u8 tph_en; + u32 resv; +}; + +struct sss_cmd_ceq_ctrl_reg { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 qid; + u32 ctrl0; + u32 ctrl1; + u32 rsvd1; +}; + +struct sss_board_info { + u8 board_type; + u8 port_num; + u8 port_speed; + u8 pcie_width; + u8 host_num; + u8 pf_num; + u16 vf_total_num; + u8 tile_num; + u8 qcm_num; + u8 core_num; + u8 work_mode; + u8 service_mode; + u8 pcie_mode; + u8 boot_sel; + u8 board_id; + u32 cfg_addr; + u32 service_en_bitmap; + u8 scenes_id; + u8 cfg_tmpl_id; + u8 hw_id; + u8 rsvd; + u16 pf_vendor_id; + u8 tile_bitmap; + u8 sm_bitmap; +}; + +struct sss_cmd_board_info { + struct sss_mgmt_msg_head head; + + struct sss_board_info info; + u32 rsvd[22]; +}; + +struct sss_cmd_sync_time { + struct sss_mgmt_msg_head head; + + u64 mstime; + u64 rsvd; +}; + +struct sss_cmd_bdf_info { + struct sss_mgmt_msg_head head; + + u16 function_id; + u8 rsvd1[2]; + u8 bus; + u8 device; + u8 function; + u8 rsvd2[5]; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_mgmt.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_mgmt.h new file mode 100644 index 0000000000000000000000000000000000000000..61ed2206cd3bac637b73d9e4d1032d0e365f0a49 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_mgmt.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_MGMT_H +#define SSS_HW_MGMT_H + +enum sss_hwdev_init_state { + SSS_HW_NOT_INIT_OK = 0, + SSS_HW_ADM_INIT_OK, + SSS_HW_MBX_INIT_OK, + SSS_HW_CTRLQ_INIT_OK, +}; + +typedef void (*sss_mgmt_msg_handler_t)(void *data, u16 cmd, void *in_buf, + u16 in_size, void *out_buf, u16 *out_size); + +int sss_register_mgmt_msg_handler(void *hwdev, u8 mod_type, void *data, + sss_mgmt_msg_handler_t handler); + +void sss_unregister_mgmt_msg_handler(void *hwdev, u8 mod_type); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_sriov.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_sriov.h new file mode 100644 index 0000000000000000000000000000000000000000..41f053608b353ac28224ed140614b6ba0b2be34b --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_sriov.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_SRIOV_H +#define SSS_HW_SRIOV_H + +#include + +struct sss_sriov_state_info { + u8 enable; + u16 vf_num; +}; +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_statistics.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_statistics.h new file mode 100644 index 0000000000000000000000000000000000000000..0dbb4b6963ea7147e7ce2be7bfe38d39345a3c8e --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_statistics.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_STATISTICS_H +#define SSS_HW_STATISTICS_H + +#include +#include + +#include "sss_hw_event.h" +#include "sss_hw_aeq.h" + +struct sss_qmm_stats { + atomic_t qmm_rsv_cnt[134]; +}; + +struct sss_link_event_stats { + atomic_t link_down_stats; + atomic_t link_up_stats; +}; + +struct sss_fault_event_stats { + atomic_t chip_fault_stats[22][SSS_FAULT_LEVEL_MAX]; + atomic_t fault_type_stat[SSS_FAULT_TYPE_MAX]; + atomic_t pcie_fault_stats; +}; + +struct sss_hw_stats { + atomic_t heart_lost_stats; + struct sss_qmm_stats qmm_stats; + struct sss_link_event_stats link_event_stats; + struct sss_fault_event_stats fault_event_stats; + atomic_t nic_ucode_event_stats[SSS_ERR_MAX]; +}; + +#define SSS_CHIP_FAULT_SIZE (110 * 1024) + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_svc_cap.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_svc_cap.h new file mode 100644 index 0000000000000000000000000000000000000000..158ba77fe66358184f7bf5d3fd5ad95ed202d47e --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_svc_cap.h @@ -0,0 +1,281 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_SVC_CAP_H +#define SSS_HW_SVC_CAP_H + +#include + +enum sss_service_type { + SSS_SERVICE_TYPE_NIC = 0, + SSS_SERVICE_TYPE_OVS, + SSS_SERVICE_TYPE_ROCE, + SSS_SERVICE_TYPE_TOE, + SSS_SERVICE_TYPE_IOE, + SSS_SERVICE_TYPE_FC, + SSS_SERVICE_TYPE_VBS, + SSS_SERVICE_TYPE_IPSEC, + SSS_SERVICE_TYPE_VIRTIO, + SSS_SERVICE_TYPE_MIGRATE, + SSS_SERVICE_TYPE_PPA, + SSS_SERVICE_TYPE_CUSTOM, + SSS_SERVICE_TYPE_VROCE, + SSS_SERVICE_TYPE_MAX, + + SSS_SERVICE_TYPE_INTF = (1 << 15), + SSS_SERVICE_TYPE_QMM = (1 << 16), +}; + +/* RDMA service capability */ +enum { + SSS_RDMA_BMME_FLAG_LOCAL_INV = (1 << 0), + SSS_RDMA_BMME_FLAG_REMOTE_INV = (1 << 1), + SSS_RDMA_BMME_FLAG_FAST_REG_WR = (1 << 2), + SSS_RDMA_BMME_FLAG_RESERVED_LKEY = (1 << 3), + SSS_RDMA_BMME_FLAG_TYPE_2_WIN = (1 << 4), + SSS_RDMA_BMME_FLAG_WIN_TYPE_2B = (1 << 5), + + SSS_RDMA_DEV_CAP_FLAG_XRC = (1 << 6), + SSS_RDMA_DEV_CAP_FLAG_MEM_WINDOW = (1 << 7), + SSS_RDMA_DEV_CAP_FLAG_ATOMIC = (1 << 8), + SSS_RDMA_DEV_CAP_FLAG_APM = (1 << 9), +}; + +struct sss_ppa_service_cap { + u16 qpc_pseudo_vf_start; + u16 qpc_pseudo_vf_num; + u32 qpc_pseudo_vf_ctx_num; + u32 pctx_size; /* 512B */ + u32 bloomfilter_len; + u8 bloomfilter_en; + u8 rsvd0; + u16 rsvd1; +}; + +struct sss_vbs_service_cap { + u16 vbs_max_volq; + u16 rsvd1; +}; + +/* PF/VF ToE service resource */ +struct sss_dev_toe_svc_cap { + u32 max_pctx; /* Parent Context: max specifications 1M */ + u32 max_cctxt; + u32 max_cq; + u16 max_srq; + u32 srq_id_start; + u32 max_mpt; +}; + +/* ToE services */ +struct sss_toe_service_cap { + struct sss_dev_toe_svc_cap dev_toe_cap; + + u8 alloc_flag; + u8 rsvd[3]; + u32 pctx_size; /* 1KB */ + u32 scqc_size; /* 64B */ +}; + +/* PF FC service resource */ +struct sss_dev_fc_svc_cap { + /* PF Parent QPC */ + u32 max_parent_qpc_num; /* max number is 2048 */ + + /* PF Child QPC */ + u32 max_child_qpc_num; /* max number is 2048 */ + u32 child_qpc_id_start; + + /* PF SCQ */ + u32 scq_num; /* 16 */ + + /* PF supports SRQ */ + u32 srq_num; /* Number of SRQ is 2 */ + + u8 vp_id_start; + u8 vp_id_end; +}; + +/* FC services */ +struct sss_fc_service_cap { + struct sss_dev_fc_svc_cap dev_fc_cap; + + /* Parent QPC */ + u32 parent_qpc_size; /* 256B */ + + /* Child QPC */ + u32 child_qpc_size; /* 256B */ + + /* SQ */ + u32 sqe_size; /* 128B(in linked list mode) */ + + /* SCQ */ + u32 scqc_size; /* Size of the Context 32B */ + u32 scqe_size; /* 64B */ + + /* SRQ */ + u32 srqc_size; /* Size of SRQ Context (64B) */ + u32 srqe_size; /* 32B */ +}; + +struct sss_dev_roce_svc_own_cap { + u32 max_qp; + u32 max_cq; + u32 max_srq; + u32 max_mpt; + u32 max_drc_qp; + + u32 cmtt_cl_start; + u32 cmtt_cl_end; + u32 cmtt_cl_size; + + u32 dmtt_cl_start; + u32 dmtt_cl_end; + u32 dmtt_cl_size; + + u32 wqe_cl_start; + u32 wqe_cl_end; + u32 wqe_cl_size; + + u32 qpc_entry_size; + u32 max_wqe; + u32 max_rq_sg; + u32 max_sq_inline_data_size; + u32 max_rq_desc_size; + + u32 rdmarc_entry_size; + u32 max_qp_init_rdma; + u32 max_qp_dest_rdma; + + u32 max_srq_wqe; + u32 reserved_srq; + u32 max_srq_sge; + u32 srqc_entry_size; + + u32 max_msg_size; /* Message size 2GB */ +}; + +/* RDMA service capability */ +struct sss_dev_rdma_svc_cap { + struct sss_dev_roce_svc_own_cap roce_own_cap; +}; + +struct sss_nic_service_cap { + u16 max_sq; + u16 max_rq; + u16 def_queue_num; +}; + +/* RDMA services */ +struct sss_rdma_service_cap { + struct sss_dev_rdma_svc_cap dev_rdma_cap; + + /* 1. the number of MTT PA must be integer power of 2 + * 2. represented by logarithm. Each MTT table can + * contain 1, 2, 4, 8, and 16 PA) + */ + u8 log_mtt; + + /* Number of MTT table (4M), is actually MTT seg number */ + u32 mtt_num; + + u32 log_mtt_seg; + u32 mtt_entry_size; /* MTT table size 8B, including 1 PA(64bits) */ + u32 mpt_entry_size; /* MPT table size (64B) */ + + u32 dmtt_cl_start; + u32 dmtt_cl_end; + u32 dmtt_cl_size; + + /* 1. the number of RDMArc PA must be integer power of 2 + * 2. represented by logarithm. Each MTT table can + * contain 1, 2, 4, 8, and 16 PA) + */ + u8 log_rdmarc; + + u32 reserved_qp; /* Number of reserved QP */ + u32 max_sq_sg; /* Maximum SGE number of SQ (8) */ + + /* WQE maximum size of SQ(1024B), inline maximum + * size if 960B(944B aligned to the 960B), + * 960B=>wqebb alignment=>1024B + */ + u32 max_sq_desc_size; + + /* Currently, the supports 64B and 128B, + * defined as 64Bytes + */ + u32 wqebb_size; + + u32 max_cqe; /* Size of the depth of the CQ (64K-1) */ + u32 reserved_cq; /* Number of reserved CQ */ + u32 cqc_entry_size; /* Size of the CQC (64B/128B) */ + u32 cqe_size; /* Size of CQE (32B) */ + + u32 reserved_mrw; /* Number of reserved MR/MR Window */ + + /* max MAP of FMR, + * (1 << (32-ilog2(num_mpt)))-1; + */ + u32 max_fmr_map; + + u32 log_rdmarc_seg; /* table number of each RDMArc seg(3) */ + + /* Timeout time. Formula:Tr=4.096us*2(local_ca_ack_delay), [Tr,4Tr] */ + u32 local_ca_ack_delay; + u32 port_num; /* Physical port number */ + + u32 db_page_size; /* Size of the DB (4KB) */ + u32 direct_wqe_size; /* Size of the DWQE (256B) */ + + u32 pd_num; /* Maximum number of PD (128K) */ + u32 reserved_pd; /* Number of reserved PD */ + u32 max_xrcd; /* Maximum number of xrcd (64K) */ + u32 reserved_xrcd; /* Number of reserved xrcd */ + + u32 max_gid_per_port; /* gid number (16) of each port */ + + /* RoCE v2 GID table is 32B, + * compatible RoCE v1 expansion + */ + u32 gid_entry_size; + + u32 reserved_lkey; /* local_dma_lkey */ + u32 comp_vector_num; /* Number of complete vector (32) */ + u32 page_size_cap; /* Supports 4K,8K,64K,256K,1M and 4M page_size */ + + u32 flag; /* RDMA some identity */ + u32 max_frpl_len; /* Maximum number of pages frmr registration */ + u32 max_pkey; /* Number of supported pkey group */ +}; + +/* PF OVS service resource */ +struct sss_dev_ovs_svc_cap { + u32 max_pctx; /* Parent Context: max specifications 1M */ + u32 pseudo_vf_max_pctx; + u16 pseudo_vf_num; + u16 pseudo_vf_start_id; + u8 dynamic_qp_en; +}; + +/* OVS services */ +struct sss_ovs_service_cap { + struct sss_dev_ovs_svc_cap dev_ovs_cap; + + u32 pctx_size; /* 512B */ +}; + +/* PF IPsec service resource */ +struct sss_dev_ipsec_svc_cap { + u32 max_sactx; /* max IPsec SA context num */ + u16 max_cq; /* max IPsec SCQC num */ + u16 rsvd0; +}; + +/* IPsec services */ +struct sss_ipsec_service_cap { + struct sss_dev_ipsec_svc_cap dev_ipsec_cap; + u32 sactx_size; /* 512B */ +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_uld_driver.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_uld_driver.h new file mode 100644 index 0000000000000000000000000000000000000000..677008109e18f76fd5d749190fa13ccb5bc985a2 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_uld_driver.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_ULD_DRIVER_H +#define SSS_HW_ULD_DRIVER_H + +#include "sss_hw_event.h" +#include "sss_hw_svc_cap.h" + +struct sss_hal_dev { + struct pci_dev *pdev; + void *hwdev; +}; + +struct sss_uld_info { + /* When it is unnessary to initialize the uld dev, + * @probe needs to return 0 and uld_dev is set to NULL; + * if uld_dev is NULL, @remove will not be called when uninstalling + */ + int (*probe)(struct sss_hal_dev *hal_dev, void **uld_dev, char *uld_dev_name); + void (*remove)(struct sss_hal_dev *hal_dev, void *uld_dev); + int (*suspend)(struct sss_hal_dev *hal_dev, void *uld_dev, pm_message_t state); + int (*resume)(struct sss_hal_dev *hal_dev, void *uld_dev); + void (*event)(struct sss_hal_dev *hal_dev, void *uld_dev, + struct sss_event_info *event); + int (*ioctl)(void *uld_dev, u32 cmd, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size); +}; + +/* sss_register_uld - register an upper driver + * @type: uld service type + * @uld_info: uld callback + * + * Registers an upper-layer driver. + * Traverse existing devices and call @probe to initialize the uld device. + */ +int sss_register_uld(enum sss_service_type type, struct sss_uld_info *uld_info); + +/** + * sss_unregister_uld - unregister an upper driver + * @type: uld service type + * + * Traverse existing devices and call @remove to uninstall the uld device. + * Unregisters an existing upper-layer driver. + */ +void sss_unregister_uld(enum sss_service_type type); +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_wq.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_wq.h new file mode 100644 index 0000000000000000000000000000000000000000..dd9dd0695a15b47df4a556f6c140fa239c1698c7 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hw_wq.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_WQ_H +#define SSS_HW_WQ_H +#include + +#include "sss_hw_common.h" + +struct sss_wq { + u16 ci; + u16 pi; + + u32 q_depth; + u16 id_mask; + u16 elem_size_shift; + u16 page_num; + u16 rsvd1; + u32 elem_per_page; + u16 elem_per_page_shift; + u16 elem_per_page_mask; + + struct sss_dma_addr_align *page; + + dma_addr_t block_paddr; + u64 *block_vaddr; + + void *dev_hdl; + u32 page_size; + u16 elem_size; + u16 rsvd2; +} ____cacheline_aligned; + +#define SSS_WQ_MASK_ID(wq, id) ((id) & (wq)->id_mask) +#define SSS_WQ_MASK_PAGE(wq, pg_id) \ + ((pg_id) < (wq)->page_num ? (pg_id) : 0) +#define SSS_WQ_PAGE_ID(wq, id) ((id) >> (wq)->elem_per_page_shift) +#define SSS_WQ_OFFSET_IN_PAGE(wq, id) ((id) & (wq)->elem_per_page_mask) +#define SSS_WQ_GET_WQEBB_ADDR(wq, pg_id, id_in_pg) \ + ((u8 *)(wq)->page[pg_id].align_vaddr + \ + ((id_in_pg) << (wq)->elem_size_shift)) +#define SSS_WQ_IS_0_LEVEL_CLA(wq) ((wq)->page_num == 1) + +static inline u16 sss_wq_free_wqebb(struct sss_wq *wq) +{ + return wq->q_depth - ((wq->q_depth + wq->pi - wq->ci) & wq->id_mask) - 1; +} + +static inline bool sss_wq_is_empty(struct sss_wq *wq) +{ + return SSS_WQ_MASK_ID(wq, wq->pi) == SSS_WQ_MASK_ID(wq, wq->ci); +} + +static inline void *sss_wq_get_one_wqebb(struct sss_wq *wq, u16 *pi) +{ + *pi = SSS_WQ_MASK_ID(wq, wq->pi); + wq->pi++; + + return SSS_WQ_GET_WQEBB_ADDR(wq, SSS_WQ_PAGE_ID(wq, *pi), + SSS_WQ_OFFSET_IN_PAGE(wq, *pi)); +} + +static inline void *sss_wq_get_multi_wqebb(struct sss_wq *wq, + u16 num_wqebbs, u16 *pi, + void **second_part_wqebbs_addr, + u16 *first_part_wqebbs_num) +{ + u32 pg_id; + u32 off_in_page; + + *pi = SSS_WQ_MASK_ID(wq, wq->pi); + wq->pi += num_wqebbs; + + pg_id = SSS_WQ_PAGE_ID(wq, *pi); + off_in_page = SSS_WQ_OFFSET_IN_PAGE(wq, *pi); + + if (off_in_page + num_wqebbs > wq->elem_per_page) { + /* wqe across wq page boundary */ + *second_part_wqebbs_addr = + SSS_WQ_GET_WQEBB_ADDR(wq, SSS_WQ_MASK_PAGE(wq, pg_id + 1), 0); + *first_part_wqebbs_num = wq->elem_per_page - off_in_page; + } else { + *second_part_wqebbs_addr = NULL; + *first_part_wqebbs_num = num_wqebbs; + } + + return SSS_WQ_GET_WQEBB_ADDR(wq, pg_id, off_in_page); +} + +static inline void sss_update_wq_ci(struct sss_wq *wq, u16 num_wqebbs) +{ + wq->ci += num_wqebbs; +} + +static inline void *sss_wq_wqebb_addr(struct sss_wq *wq, u16 id) +{ + return SSS_WQ_GET_WQEBB_ADDR(wq, SSS_WQ_PAGE_ID(wq, id), + SSS_WQ_OFFSET_IN_PAGE(wq, id)); +} + +static inline void *sss_wq_read_one_wqebb(struct sss_wq *wq, u16 *ci) +{ + *ci = SSS_WQ_MASK_ID(wq, wq->ci); + + return sss_wq_wqebb_addr(wq, *ci); +} + +static inline u64 sss_wq_get_first_wqe_page_addr(struct sss_wq *wq) +{ + return wq->page[0].align_paddr; +} + +static inline void sss_wq_reset(struct sss_wq *wq) +{ + u16 pg_id; + + wq->ci = 0; + wq->pi = 0; + + for (pg_id = 0; pg_id < wq->page_num; pg_id++) + memset(wq->page[pg_id].align_vaddr, 0, wq->page_size); +} + +int sss_create_wq(void *hwdev, struct sss_wq *wq, u32 q_depth, u16 block_size); +void sss_destroy_wq(struct sss_wq *wq); +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hwif_export.h b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hwif_export.h new file mode 100644 index 0000000000000000000000000000000000000000..e83810dde176cc784331d6ce9cca131fdcb0b318 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/hw/sss_hwif_export.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HWIF_EXPORT_H +#define SSS_HWIF_EXPORT_H + +#include + +#include "sss_hw_common.h" +#include "sss_hw_irq.h" + +/** + * @brief sss_alloc_db_addr - alloc doorbell + * @param hwdev: device pointer to hwdev + * @param db_base: pointer to alloc doorbell base address + * @retval zero: success + * @retval non-zero: failure + **/ +int sss_alloc_db_addr(void *hwdev, void __iomem **db_base); + +/** + * @brief sss_free_db_addr - free doorbell + * @param hwdev: device pointer to hwdev + * @param db_base: pointer to free doorbell base address + **/ +void sss_free_db_addr(void *hwdev, const void __iomem *db_base); + +/* * + * @brief sss_nic_set_msix_auto_mask - set msix auto mask function + * @param hwdev: device pointer to hwdev + * @param msix_idx: msix id + * @param flag: msix auto_mask flag, 1-enable, 2-clear + */ +void sss_chip_set_msix_auto_mask(void *hwdev, u16 msix_id, + enum sss_msix_auto_mask flag); + +/* * + * @brief sss_chip_set_msix_state - set msix state + * @param hwdev: device pointer to hwdev + * @param msix_id: msix id + * @param flag: msix state flag, 0-enable, 1-disable + */ +void sss_chip_set_msix_state(void *hwdev, u16 msix_id, + enum sss_msix_state flag); + +/* * + * @brief sss_get_global_func_id - get global function id + * @param hwdev: device pointer to hwdev + * @retval global function id + */ +u16 sss_get_global_func_id(void *hwdev); + +/* * + * @brief sss_get_pf_id_of_vf - get pf id of vf + * @param hwdev: device pointer to hwdev + * @retval pf id + */ +u8 sss_get_pf_id_of_vf(void *hwdev); + +/* * + * @brief sss_get_pcie_itf_id - get pcie port id + * @param hwdev: device pointer to hwdev + * @retval pcie port id + */ +u8 sss_get_pcie_itf_id(void *hwdev); + +/* * + * @brief sss_get_func_type - get function type + * @param hwdev: device pointer to hwdev + * @retval function type + */ +enum sss_func_type sss_get_func_type(void *hwdev); + +enum sss_func_type sss_get_func_id(void *hwdev); + +/* * + * @brief sss_get_glb_pf_vf_offset - get vf offset id of pf + * @param hwdev: device pointer to hwdev + * @retval vf offset id + */ +u16 sss_get_glb_pf_vf_offset(void *hwdev); + +/* * + * @brief sss_get_ppf_id - get ppf id + * @param hwdev: device pointer to hwdev + * @retval ppf id + */ +u8 sss_get_ppf_id(void *hwdev); +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/kernel/sss_linux_kernel.h b/drivers/net/ethernet/3snic/sssnic/include/kernel/sss_linux_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..3862c5993803cdbba84c42abcf86747c0febda1c --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/kernel/sss_linux_kernel.h @@ -0,0 +1,341 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_LINUX_KERNEL_H_ +#define SSS_LINUX_KERNEL_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* UTS_RELEASE is in a different header starting in kernel 2.6.18 */ +#ifndef UTS_RELEASE +#include +#endif + +#ifndef NETIF_F_SCTP_CSUM +#define NETIF_F_SCTP_CSUM 0 +#endif + +#ifndef __GFP_COLD +#define __GFP_COLD 0 +#endif + +#ifndef __GFP_COMP +#define __GFP_COMP 0 +#endif + +/* ************************************************************************ */ +#define ETH_TYPE_TRANS_SETS_DEV +#define HAVE_NETDEV_STATS_IN_NETDEV + +/* ************************************************************************ */ +#ifndef HAVE_SET_RX_MODE +#define HAVE_SET_RX_MODE +#endif +#define HAVE_INET6_IFADDR_LIST + +/* ************************************************************************ */ +#define HAVE_NDO_GET_STATS64 + +/* ************************************************************************ */ +#ifndef HAVE_MQPRIO +#define HAVE_MQPRIO +#endif +#ifndef HAVE_SETUP_TC +#define HAVE_SETUP_TC +#endif + +#ifndef HAVE_NDO_SET_FEATURES +#define HAVE_NDO_SET_FEATURES +#endif +#define HAVE_IRQ_AFFINITY_NOTIFY + +/* ************************************************************************ */ +#define HAVE_ETHTOOL_SET_PHYS_ID + +/* ************************************************************************ */ +#define HAVE_NETDEV_WANTED_FEAUTES + +/* ************************************************************************ */ +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_VF_SPOOFCHK_CONFIGURE +#endif +#ifndef HAVE_SKB_L4_RXHASH +#define HAVE_SKB_L4_RXHASH +#endif + +/* ************************************************************************ */ +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#define HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef ETHTOOL_SRXNTUPLE +#undef ETHTOOL_SRXNTUPLE +#endif + +/* ************************************************************************ */ +#define _kc_kmap_atomic(page) kmap_atomic(page) +#define _kc_kunmap_atomic(addr) kunmap_atomic(addr) + +/* ************************************************************************ */ +#include +#define HAVE_FDB_OPS +#define HAVE_ETHTOOL_GET_TS_INFO + +/* ************************************************************************ */ +#define HAVE_NAPI_GRO_FLUSH_OLD + +/* ************************************************************************ */ +#ifndef HAVE_SRIOV_CONFIGURE +#define HAVE_SRIOV_CONFIGURE +#endif + +/* ************************************************************************ */ +#define HAVE_ENCAP_TSO_OFFLOAD +#define HAVE_SKB_INNER_NETWORK_HEADER + +/* ************************************************************************ */ +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_SKB_INNER_PROTOCOL +#define HAVE_MPLS_FEATURES + +/* ************************************************************************ */ +#define HAVE_VXLAN_CHECKS +#define HAVE_NDO_SELECT_QUEUE_ACCEL + +#define HAVE_NET_GET_RANDOM_ONCE +#define HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS + +/* ************************************************************************ */ +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK + +/* ************************************************************************ */ +#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +#define HAVE_VLAN_FIND_DEV_DEEP_RCU + +/* ************************************************************************ */ +#define HAVE_SKBUFF_CSUM_LEVEL +#define HAVE_MULTI_VLAN_OFFLOAD_EN +#define HAVE_ETH_GET_HEADLEN_FUNC + +/* ************************************************************************ */ +#define HAVE_RXFH_HASHFUNC + +/****************************************************************/ +#define HAVE_NDO_SET_VF_TRUST + +/* ************************************************************** */ +#include + +/* ************************************************************** */ +#define HAVE_IO_MAP_WC_SIZE + +/* ************************************************************************ */ +#define HAVE_NETDEVICE_MIN_MAX_MTU + +/* ************************************************************************ */ +#define HAVE_VOID_NDO_GET_STATS64 +#define HAVE_VM_OPS_FAULT_NO_VMA + +/* ************************************************************************ */ +#define HAVE_HWTSTAMP_FILTER_NTP_ALL +#define HAVE_NDO_SETUP_TC_ADM_INDEX +#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE +#define HAVE_PTP_CLOCK_DO_AUX_WORK + +/* ************************************************************************ */ +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#define HAVE_XDP_SUPPORT + +/* ************************************************************************ */ +#define HAVE_NDO_BPF_NETDEV_BPF +#define HAVE_TIMER_SETUP +#define HAVE_XDP_DATA_META + +/* ************************************************************************ */ +#define HAVE_NDO_SELECT_QUEUE_SB_DEV + +/*****************************************************************************/ +#define dev_open(x) dev_open(x, NULL) +#define HAVE_NEW_ETHTOOL_LINK_SETTINGS_ONLY + +#ifndef get_ds +#define get_ds() (KERNEL_DS) +#endif + +#ifndef dma_zalloc_coherent +#define dma_zalloc_coherent(d, s, h, f) _sss_nic_dma_zalloc_coherent(d, s, h, f) +static inline void *_sss_nic_dma_zalloc_coherent(struct device *dev, + size_t size, dma_addr_t *dma_handle, gfp_t gfp) +{ + /* Above kernel 5.0, fixed up all remaining architectures + * to zero the memory in dma_alloc_coherent, and made + * dma_zalloc_coherent a no-op wrapper around dma_alloc_coherent, + * which fixes all of the above issues. + */ + return dma_alloc_coherent(dev, size, dma_handle, gfp); +} +#endif + +#ifndef do_gettimeofday +#define do_gettimeofday(time) _kc_do_gettimeofday(time) +static inline void _kc_do_gettimeofday(struct timeval *tv) +{ + struct timespec64 ts; + + ktime_get_real_ts64(&ts); + tv->tv_sec = ts.tv_sec; + tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC; +} +#endif + +/*****************************************************************************/ +#ifndef FIELD_SIZEOF +#define FIELD_SIZEOF(t, f) (sizeof(((t *)0)->f)) +#endif + +/*****************************************************************************/ +#ifndef rtc_time_to_tm +#define rtc_time_to_tm rtc_time64_to_tm +#endif +#define HAVE_NDO_TX_TIMEOUT_TXQ + +/* ************************************************************************ */ + +#define HAVE_ENCAPSULATION_TSO +#define HAVE_ENCAPSULATION_CSUM + +#ifndef eth_zero_addr +static inline void __kc_eth_zero_addr(u8 *addr) +{ + memset(addr, 0x00, ETH_ALEN); +} + +#define eth_zero_addr(_addr) __kc_eth_zero_addr(_addr) +#endif + +#ifndef netdev_hw_addr_list_for_each +#define netdev_hw_addr_list_for_each(ha, l) \ + list_for_each_entry(ha, &(l)->list, list) +#endif + +#define spin_lock_deinit(lock) + +#define destroy_work(work) + +#ifndef HAVE_TIMER_SETUP +void initialize_timer(const void *adapter_hdl, struct timer_list *timer); +#endif + +#define nicif_err(priv, type, dev, fmt, args...) \ + netif_level(err, priv, type, dev, "[NIC]" fmt, ##args) +#define nicif_warn(priv, type, dev, fmt, args...) \ + netif_level(warn, priv, type, dev, "[NIC]" fmt, ##args) +#define nicif_notice(priv, type, dev, fmt, args...) \ + netif_level(notice, priv, type, dev, "[NIC]" fmt, ##args) +#define nicif_info(priv, type, dev, fmt, args...) \ + netif_level(info, priv, type, dev, "[NIC]" fmt, ##args) +#define nicif_dbg(priv, type, dev, fmt, args...) \ + netif_level(dbg, priv, type, dev, "[NIC]" fmt, ##args) + +#define destroy_completion(completion) +#define sema_deinit(lock) +#define mutex_deinit(lock) +#define rwlock_deinit(lock) + +#define tasklet_state(tasklet) ((tasklet)->state) + +#ifndef hash_init +#define HASH_SIZE(name) (ARRAY_SIZE(name)) + +static inline void __hash_init(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + INIT_HLIST_HEAD(&ht[i]); +} + +#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable)) +#endif + +#ifndef FIELD_SIZEOF +#define FIELD_SIZEOF sizeof_field +#endif + +#ifndef skb_frag_off_add +#define skb_frag_off_add(frag, delta) \ + ((frag)->page_offset += (unsigned short)(delta)) +#endif + +#ifdef ETHTOOL_GMODULEEEPROM +#ifndef ETH_MODULE_SFF_8472 +#define ETH_MODULE_SFF_8472 0x2 +#endif +#ifndef ETH_MODULE_SFF_8636 +#define ETH_MODULE_SFF_8636 0x3 +#endif +#ifndef ETH_MODULE_SFF_8436 +#define ETH_MODULE_SFF_8436 0x4 +#endif +#ifndef ETH_MODULE_SFF_8472_LEN +#define ETH_MODULE_SFF_8472_LEN 512 +#endif +#ifndef ETH_MODULE_SFF_8636_MAX_LEN +#define ETH_MODULE_SFF_8636_MAX_LEN 640 +#endif +#ifndef ETH_MODULE_SFF_8436_MAX_LEN +#define ETH_MODULE_SFF_8436_MAX_LEN 640 +#endif +#endif + +#ifndef ETHTOOL_LINK_MODE_50000baseKR_Full_BIT +#define ETHTOOL_LINK_MODE_50000baseKR_Full_BIT 52 +#define ETHTOOL_LINK_MODE_50000baseCR_Full_BIT 54 +#define ETHTOOL_LINK_MODE_50000baseSR_Full_BIT 53 +#endif + +#ifndef ETHTOOL_LINK_MODE_100000baseKR_Full_BIT +#define ETHTOOL_LINK_MODE_100000baseKR_Full_BIT 75 +#define ETHTOOL_LINK_MODE_100000baseCR_Full_BIT 78 +#define ETHTOOL_LINK_MODE_100000baseSR_Full_BIT 76 +#endif + +#ifndef ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT +#define ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT 57 +#define ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT 59 +#define ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT 58 +#endif + +#ifndef ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT +#define ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT 62 +#define ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT 63 +#define ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT 66 +#endif + +#ifndef ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT +#define ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT 80 +#define ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT 81 +#define ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT 84 +#endif + +#ifndef SPEED_200000 +#define SPEED_200000 200000 +#endif + +#define HAVE_XDP_QUERY_PROG + +#endif +/* ************************************************************************ */ diff --git a/drivers/net/ethernet/3snic/sssnic/include/sss_hw.h b/drivers/net/ethernet/3snic/sssnic/include/sss_hw.h new file mode 100644 index 0000000000000000000000000000000000000000..9a2bf99f0b3c37a6a5ea0ac16bb3c56cc057608f --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/sss_hw.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_HW_H +#define SSS_HW_H + +#include "sss_hw_aeq.h" +#include "sss_hw_ceq.h" +#include "sss_hw_ctrlq.h" +#include "sss_hw_common.h" +#include "sss_hw_event.h" +#include "sss_hw_export.h" +#include "sss_hw_irq.h" +#include "sss_hw_mbx.h" +#include "sss_hw_mbx_msg.h" +#include "sss_hw_mgmt.h" +#include "sss_hw_sriov.h" +#include "sss_hw_statistics.h" +#include "sss_hw_svc_cap.h" +#include "sss_hw_uld_driver.h" +#include "sss_hw_wq.h" +#include "sss_hwif_export.h" + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/include/sss_kernel.h b/drivers/net/ethernet/3snic/sssnic/include/sss_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..19b2aa3b7fa3eeb9ac0c7abf9f5bea0483d4dd90 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/sss_kernel.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_KERNEL_H +#define SSS_KERNEL_H + +#include "sss_linux_kernel.h" + +#define sdk_err(dev, format, ...) dev_err(dev, "[BASE]" format, ##__VA_ARGS__) +#define sdk_warn(dev, format, ...) dev_warn(dev, "[BASE]" format, ##__VA_ARGS__) +#define sdk_notice(dev, format, ...) dev_notice(dev, "[BASE]" format, ##__VA_ARGS__) +#define sdk_info(dev, format, ...) dev_info(dev, "[BASE]" format, ##__VA_ARGS__) + +#define nic_err(dev, format, ...) dev_err(dev, "[NIC]" format, ##__VA_ARGS__) +#define nic_warn(dev, format, ...) dev_warn(dev, "[NIC]" format, ##__VA_ARGS__) +#define nic_notice(dev, format, ...) dev_notice(dev, "[NIC]" format, ##__VA_ARGS__) +#define nic_info(dev, format, ...) dev_info(dev, "[NIC]" format, ##__VA_ARGS__) + +#ifndef BIG_ENDIAN +#define BIG_ENDIAN 0x4321 +#endif + +#ifndef LITTLE_ENDIAN +#define LITTLE_ENDIAN 0x1234 +#endif + +#ifdef BYTE_ORDER +#undef BYTE_ORDER +#endif +/* X86 */ +#define BYTE_ORDER LITTLE_ENDIAN +#define USEC_PER_MSEC 1000L +#define MSEC_PER_SEC 1000L + +#endif /* OSSL_KNL_H */ diff --git a/drivers/net/ethernet/3snic/sssnic/include/sss_tool_comm.h b/drivers/net/ethernet/3snic/sssnic/include/sss_tool_comm.h new file mode 100644 index 0000000000000000000000000000000000000000..633bd3850af9c973b4e3197f019edb695150919e --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/sss_tool_comm.h @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_TOOL_COMM_H +#define SSS_TOOL_COMM_H + +#define tool_err(format, ...) pr_err(format, ##__VA_ARGS__) +#define tool_warn(format, ...) pr_warn(format, ##__VA_ARGS__) +#define tool_info(format, ...) pr_info(format, ##__VA_ARGS__) + +#define SSS_TOOL_SHOW_ITEM_LEN 32 + +#define SSS_TOOL_VERSION_INFO_LEN 128 + +#define SSS_TOOL_EPERM 1 /* Operation not permitted */ +#define SSS_TOOL_EIO 2 /* I/O error */ +#define SSS_TOOL_EINVAL 3 /* Invalid argument */ +#define SSS_TOOL_EBUSY 4 /* Device or resource busy */ +#define SSS_TOOL_EOPNOTSUPP 0xFF /* Operation not supported */ + +enum sss_tool_driver_cmd_type { + SSS_TOOL_GET_TX_INFO = 1, + SSS_TOOL_GET_Q_NUM, + SSS_TOOL_GET_TX_WQE_INFO, + SSS_TOOL_TX_MAPPING, + SSS_TOOL_GET_RX_INFO, + SSS_TOOL_GET_RX_WQE_INFO, + SSS_TOOL_GET_RX_CQE_INFO, + SSS_TOOL_UPRINT_FUNC_EN, + SSS_TOOL_UPRINT_FUNC_RESET, + SSS_TOOL_UPRINT_SET_PATH, + SSS_TOOL_UPRINT_GET_STATISTICS, + SSS_TOOL_FUNC_TYPE, + SSS_TOOL_GET_FUNC_IDX, + SSS_TOOL_GET_INTER_NUM, + SSS_TOOL_CLOSE_TX_STREAM, + SSS_TOOL_GET_DRV_VERSION, + SSS_TOOL_CLEAR_FUNC_STATS, + SSS_TOOL_GET_HW_STATS, + SSS_TOOL_CLEAR_HW_STATS, + SSS_TOOL_GET_SELF_TEST_RES, + SSS_TOOL_GET_CHIP_FAULT_STATS, + SSS_TOOL_NIC_RSVD1, + SSS_TOOL_NIC_RSVD2, + SSS_TOOL_NIC_RSVD3, + SSS_TOOL_GET_CHIP_ID, + SSS_TOOL_GET_SINGLE_CARD_INFO, + SSS_TOOL_GET_FIRMWARE_ACTIVE_STATUS, + SSS_TOOL_ROCE_DFX_FUNC, + SSS_TOOL_GET_DEVICE_ID, + SSS_TOOL_GET_PF_DEV_INFO, + SSS_TOOL_CMD_FREE_MEM, + SSS_TOOL_GET_LOOPBACK_MODE = 32, + SSS_TOOL_SET_LOOPBACK_MODE, + SSS_TOOL_SET_LINK_MODE, + SSS_TOOL_SET_PF_BW_LIMIT, + SSS_TOOL_GET_PF_BW_LIMIT, + SSS_TOOL_ROCE_CMD, + SSS_TOOL_GET_POLL_WEIGHT, + SSS_TOOL_SET_POLL_WEIGHT, + SSS_TOOL_GET_HOMOLOGUE, + SSS_TOOL_SET_HOMOLOGUE, + SSS_TOOL_GET_SSET_COUNT, + SSS_TOOL_GET_SSET_ITEMS, + SSS_TOOL_IS_DRV_IN_VM, + SSS_TOOL_LRO_ADPT_MGMT, + SSS_TOOL_SET_INTER_COAL_PARAM, + SSS_TOOL_GET_INTER_COAL_PARAM, + SSS_TOOL_GET_CHIP_INFO, + SSS_TOOL_GET_NIC_STATS_LEN, + SSS_TOOL_GET_NIC_STATS_STRING, + SSS_TOOL_GET_NIC_STATS_INFO, + SSS_TOOL_GET_PF_ID, + SSS_TOOL_NIC_RSVD4, + SSS_TOOL_NIC_RSVD5, + SSS_TOOL_DCB_QOS_INFO, + SSS_TOOL_DCB_PFC_STATE, + SSS_TOOL_DCB_ETS_STATE, + SSS_TOOL_DCB_STATE, + SSS_TOOL_QOS_DEV, + SSS_TOOL_GET_QOS_COS, + SSS_TOOL_GET_ULD_DEV_NAME, + SSS_TOOL_GET_TX_TIMEOUT, + SSS_TOOL_SET_TX_TIMEOUT, + + SSS_TOOL_RSS_CFG = 0x40, + SSS_TOOL_RSS_INDIR, + SSS_TOOL_PORT_ID, + + SSS_TOOL_GET_FUNC_CAP = 0x50, + SSS_TOOL_GET_XSFP_PRESENT = 0x51, + SSS_TOOL_GET_XSFP_INFO = 0x52, + SSS_TOOL_DEV_NAME_TEST = 0x53, + + SSS_TOOL_GET_WIN_STAT = 0x60, + SSS_TOOL_WIN_CSR_READ = 0x61, + SSS_TOOL_WIN_CSR_WRITE = 0x62, + SSS_TOOL_WIN_API_CMD_RD = 0x63, + + SSS_TOOL_VM_COMPAT_TEST = 0xFF +}; + +struct sss_tool_show_item { + char name[SSS_TOOL_SHOW_ITEM_LEN]; + u8 hexadecimal; /* 0: decimal , 1: Hexadecimal */ + u8 rsvd[7]; + u64 value; +}; + +struct sss_tool_drv_version_info { + char ver[SSS_TOOL_VERSION_INFO_LEN]; +}; + +#endif /* _SSS_NIC_MT_H_ */ + diff --git a/drivers/net/ethernet/3snic/sssnic/include/sss_version.h b/drivers/net/ethernet/3snic/sssnic/include/sss_version.h new file mode 100644 index 0000000000000000000000000000000000000000..6b6edef780d97bb882ddc3d99e4df42a4ff46b80 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/include/sss_version.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_VERSION_H +#define SSS_VERSION_H + +#define SSS_VERSION_STR "1.1.0.0" + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/Makefile b/drivers/net/ethernet/3snic/sssnic/nic/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..677234f13b6dda8c041cd8e34c3c894c36d8f81f --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/Makefile @@ -0,0 +1,45 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2023 3SNIC +# + +SYS_TIME=$(shell date +%Y-%m-%d_%H:%M:%S) +ccflags-y += -D __TIME_STR__=\"$(SYS_TIME)\" + +ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/include +ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/include/hw +ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/include/kernel +ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/nic +ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/nic/tool +ccflags-y += -I$(srctree)/drivers/net/ethernet/3snic/sssnic/nic/include + +ccflags-y += -Werror + +obj-$(CONFIG_SSSNIC) += sssnic.o +sssnic-y := sss_nic_main.o \ + sss_nic_tx.o \ + sss_nic_tx_init.o \ + sss_nic_rx.o \ + sss_nic_rx_init.o \ + sss_nic_rx_reset.o \ + sss_nic_rss.o \ + sss_nic_ntuple.o \ + sss_nic_dcb.o \ + sss_nic_ethtool.o \ + sss_nic_ethtool_api.o \ + sss_nic_ethtool_stats.o \ + sss_nic_ethtool_stats_api.o \ + sss_nic_irq.o \ + sss_nic_filter.o \ + sss_nic_netdev_ops.o \ + sss_nic_cfg.o \ + sss_nic_mag_cfg.o \ + sss_nic_vf_cfg.o \ + sss_nic_rss_cfg.o \ + sss_nic_event.o \ + sss_nic_io.o \ + sss_nic_netdev_ops_api.o \ + ./tool/sss_tool_nic_func.o \ + ./tool/sss_tool_nic_dcb.o \ + ./tool/sss_tool_nic_phy_attr.o \ + ./tool/sss_tool_nic_qp_info.o \ + ./tool/sss_tool_nic_stats.o diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_define.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_define.h new file mode 100644 index 0000000000000000000000000000000000000000..21b4612f06860ffd05e1a1c0a0756b61903a9dd4 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_define.h @@ -0,0 +1,608 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_CFG_DEFINE_H +#define SSS_NIC_CFG_DEFINE_H + +#include "sss_hw_mbx_msg.h" +#include "sss_nic_cfg_mag_define.h" +#include "sss_nic_cfg_vf_define.h" +#include "sss_nic_cfg_rss_define.h" +#include "sss_nic_dcb_define.h" +#include "sss_nic_tcam_define.h" + +#ifndef ETH_ALEN +#define ETH_ALEN 6 +#endif + +#define SSSNIC_MBX_OPCODE_SET 1 +#define SSSNIC_MBX_OPCODE_GET 0 + +#define SSSNIC_MBX_OPCODE_ADD 1 +#define SSSNIC_MBX_OPCODE_DEL 0 + +#ifndef BIT +#define BIT(n) (1UL << (n)) +#endif + +#define SSSNIC_MIN_MTU_SIZE 256 + +#define SSSNIC_MAX_JUMBO_FRAME_SIZE 9600 + +#define SSSNIC_PF_SET_VF_ALREADY 0x4 + +#define SSSNIC_LOWEST_LATENCY 1 + +#define SSSNIC_MAX_FEATURE_QWORD 4 + +#define SSSNIC_MBX_OPCODE_GET_DCB_STATE 0 +#define SSSNIC_MBX_OPCODE_SET_DCB_STATE 1 +#define SSSNIC_DCB_STATE_DISABLE 0 +#define SSSNIC_DCB_STATE_ENABLE 1 + +#define SSSNIC_STD_SFP_INFO_MAX_SIZE 640 + +#define SSSNIC_BIOS_SIGNATURE 0x1923E518 +#define SSSNIC_BIOS_FUN_VALID 1 +#define SSSNIC_BIOS_FUN_INVALID 0 + +enum sss_nic_func_tbl_cfg_type { + SSSNIC_FUNC_CFG_TYPE_INIT, + SSSNIC_FUNC_CFG_TYPE_RX_BUF_SIZE, + SSSNIC_FUNC_CFG_TYPE_MTU, +}; + +enum sss_nic_feature_cap { + SSSNIC_F_CSUM = BIT(0), + SSSNIC_F_SCTP_CRC = BIT(1), + SSSNIC_F_TSO = BIT(2), + SSSNIC_F_LRO = BIT(3), + SSSNIC_F_UFO = BIT(4), + SSSNIC_F_RSS = BIT(5), + SSSNIC_F_RX_VLAN_FILTER = BIT(6), + SSSNIC_F_RX_VLAN_STRIP = BIT(7), + SSSNIC_F_TX_VLAN_INSERT = BIT(8), + SSSNIC_F_VXLAN_OFFLOAD = BIT(9), + SSSNIC_F_IPSEC_OFFLOAD = BIT(10), + SSSNIC_F_FDIR = BIT(11), + SSSNIC_F_PROMISC = BIT(12), + SSSNIC_F_ALLMULTI = BIT(13), + SSSNIC_F_XSFP_REPORT = BIT(14), + SSSNIC_F_VF_MAC = BIT(15), + SSSNIC_F_RATE_LIMIT = BIT(16), + SSSNIC_F_RXQ_RECOVERY = BIT(17), +}; + +/* BIOS CONF */ +enum { + SSSNIC_NVM_PF_SPEED_LIMIT = BIT(6), +}; + +/* Commands between NIC to MPU */ +enum sss_nic_mbx_opcode { + SSSNIC_MBX_OPCODE_VF_REGISTER = 0, /* only for PFD and VFD */ + + /* FUNC CFG */ + SSSNIC_MBX_OPCODE_SET_FUNC_TBL = 5, + SSSNIC_MBX_OPCODE_SET_VPORT_ENABLE, + SSSNIC_MBX_OPCODE_SET_RX_MODE, + SSSNIC_MBX_OPCODE_SQ_CI_ATTR_SET, + SSSNIC_MBX_OPCODE_GET_VPORT_STAT, + SSSNIC_MBX_OPCODE_CLEAN_VPORT_STAT, + SSSNIC_MBX_OPCODE_CLEAR_QP_RESOURCE, + SSSNIC_MBX_OPCODE_CFG_FLEX_QUEUE, + /* LRO CFG */ + SSSNIC_MBX_OPCODE_CFG_RX_LRO, + SSSNIC_MBX_OPCODE_CFG_LRO_TIMER, + SSSNIC_MBX_OPCODE_FEATURE_NEGO, + SSSNIC_MBX_OPCODE_CFG_LOCAL_LRO_STATE, + + SSSNIC_MBX_OPCODE_CACHE_OUT_QP_RES, + /* MAC & VLAN CFG */ + SSSNIC_MBX_OPCODE_GET_MAC = 20, + SSSNIC_MBX_OPCODE_SET_MAC, + SSSNIC_MBX_OPCODE_DEL_MAC, + SSSNIC_MBX_OPCODE_UPDATE_MAC, + SSSNIC_MBX_OPCODE_GET_ALL_DEFAULT_MAC, + + SSSNIC_MBX_OPCODE_CFG_FUNC_VLAN, + SSSNIC_MBX_OPCODE_SET_VLAN_FILTER_EN, + SSSNIC_MBX_OPCODE_SET_RX_VLAN_OFFLOAD, + SSSNIC_MBX_OPCODE_SMAC_CHECK_STATE, + + /* SR-IOV */ + SSSNIC_MBX_OPCODE_CFG_VF_VLAN = 40, + SSSNIC_MBX_OPCODE_SET_SPOOPCHK_STATE, + /* RATE LIMIT */ + SSSNIC_MBX_OPCODE_SET_MAX_MIN_RATE, + + /* RSS CFG */ + SSSNIC_MBX_OPCODE_RSS_CFG = 60, + SSSNIC_MBX_OPCODE_RSS_TEMP_MGR, + SSSNIC_MBX_OPCODE_GET_RSS_CTX_TBL, + SSSNIC_MBX_OPCODE_CFG_RSS_HASH_KEY, + SSSNIC_MBX_OPCODE_CFG_RSS_HASH_ENGINE, + SSSNIC_MBX_OPCODE_SET_RSS_CTX_TBL_INTO_FUNC, + + /* IP checksum error packets, enable rss quadruple hash */ + SSSNIC_MBX_OPCODE_IPCS_ERR_RSS_ENABLE_OP = 66, + + /* PPA/FDIR */ + SSSNIC_MBX_OPCODE_ADD_TC_FLOW = 80, + SSSNIC_MBX_OPCODE_DEL_TC_FLOW, + SSSNIC_MBX_OPCODE_GET_TC_FLOW, + SSSNIC_MBX_OPCODE_FLUSH_TCAM, + SSSNIC_MBX_OPCODE_CFG_TCAM_BLOCK, + SSSNIC_MBX_OPCODE_ENABLE_TCAM, + SSSNIC_MBX_OPCODE_GET_TCAM_BLOCK, + SSSNIC_MBX_OPCODE_CFG_PPA_TABLE_ID, + SSSNIC_MBX_OPCODE_SET_PPA_EN = 88, + SSSNIC_MBX_OPCODE_CFG_PPA_MODE, + SSSNIC_MBX_OPCODE_CFG_PPA_FLUSH, + SSSNIC_MBX_OPCODE_SET_FDIR_STATUS, + SSSNIC_MBX_OPCODE_GET_PPA_COUNTER, + + /* PORT CFG */ + SSSNIC_MBX_OPCODE_SET_PORT_ENABLE = 100, + SSSNIC_MBX_OPCODE_CFG_PAUSE_INFO, + + SSSNIC_MBX_OPCODE_SET_PORT_CAR, + SSSNIC_MBX_OPCODE_SET_ER_DROP_PKT, + + SSSNIC_MBX_OPCODE_GET_VF_COS, + SSSNIC_MBX_OPCODE_SETUP_COS_MAPPING, + SSSNIC_MBX_OPCODE_SET_ETS, + SSSNIC_MBX_OPCODE_SET_PFC, + SSSNIC_MBX_OPCODE_QOS_ETS, + SSSNIC_MBX_OPCODE_QOS_PFC, + SSSNIC_MBX_OPCODE_QOS_DCB_STATE, + SSSNIC_MBX_OPCODE_QOS_PORT_CFG, + SSSNIC_MBX_OPCODE_QOS_MAP_CFG, + SSSNIC_MBX_OPCODE_FORCE_PKT_DROP, + SSSNIC_MBX_OPCODE_TX_PAUSE_EXCP_NOTICE = 118, + SSSNIC_MBX_OPCODE_INQUIRT_PAUSE_CFG = 119, + + /* MISC */ + SSSNIC_MBX_OPCODE_BIOS_CFG = 120, + SSSNIC_MBX_OPCODE_SET_FIRMWARE_CUSTOM_PACKETS_MSG, + + /* BOND */ + SSSNIC_MBX_OPCODE_BOND_DEV_CREATE = 134, + SSSNIC_MBX_OPCODE_BOND_DEV_DELETE, + SSSNIC_MBX_OPCODE_BOND_DEV_OPEN_CLOSE, + SSSNIC_MBX_OPCODE_BOND_INFO_GET, + SSSNIC_MBX_OPCODE_BOND_ACTIVE_INFO_GET, + SSSNIC_MBX_OPCODE_BOND_ACTIVE_NOTICE, + + /* DFX */ + SSSNIC_MBX_OPCODE_GET_SM_TABLE = 140, + SSSNIC_MBX_OPCODE_RD_LINE_TBL, + + SSSNIC_MBX_OPCODE_SET_UCAPTURE_OPT = 160, + SSSNIC_MBX_OPCODE_SET_VHD_CFG, + + /* move to SSSLINK */ + SSSNIC_MBX_OPCODE_GET_PORT_STAT = 200, + SSSNIC_MBX_OPCODE_CLEAN_PORT_STAT, + SSSNIC_MBX_OPCODE_CFG_LOOPBACK_MODE, + SSSNIC_MBX_OPCODE_GET_SFP_QSFP_INFO, + SSSNIC_MBX_OPCODE_SET_SFP_STATUS, + SSSNIC_MBX_OPCODE_GET_LIGHT_MODULE_ABS, + SSSNIC_MBX_OPCODE_GET_LINK_INFO, + SSSNIC_MBX_OPCODE_CFG_AN_TYPE, + SSSNIC_MBX_OPCODE_GET_PORT_INFO, + SSSNIC_MBX_OPCODE_SET_LINK_SETTINGS, + SSSNIC_MBX_OPCODE_ACTIVATE_BIOS_LINK_CFG, + SSSNIC_MBX_OPCODE_RESTORE_LINK_CFG, + SSSNIC_MBX_OPCODE_SET_LINK_FOLLOW, + SSSNIC_MBX_OPCODE_GET_LINK_STATE, + SSSNIC_MBX_OPCODE_LINK_STATUS_REPORT, + SSSNIC_MBX_OPCODE_CABLE_PLUG_EVENT, + SSSNIC_MBX_OPCODE_LINK_ERR_EVENT, + SSSNIC_MBX_OPCODE_SET_LED_STATUS, + + SSSNIC_MBX_OPCODE_MAX = 256, +}; + +/* NIC CTRLQ MODE */ +enum sss_nic_ctrlq_opcode { + SSSNIC_CTRLQ_OPCODE_MODIFY_QUEUE_CTX = 0, + SSSNIC_CTRLQ_OPCODE_CLEAN_QUEUE_CONTEXT, + SSSNIC_CTRLQ_OPCODE_ARM_SQ, + SSSNIC_CTRLQ_OPCODE_ARM_RQ, + SSSNIC_CTRLQ_OPCODE_SET_RSS_INDIR_TABLE, + SSSNIC_CTRLQ_OPCODE_SET_RSS_CONTEXT_TABLE, + SSSNIC_CTRLQ_OPCODE_GET_RSS_INDIR_TABLE, + SSSNIC_CTRLQ_OPCODE_GET_RSS_CONTEXT_TABLE, + SSSNIC_CTRLQ_OPCODE_SET_IQ_ENABLE, + SSSNIC_CTRLQ_OPCODE_SET_RQ_FLUSH = 10, + SSSNIC_CTRLQ_OPCODE_MODIFY_VLAN_CTX, + SSSNIC_CTRLQ_OPCODE_PPA_HASH_TABLE, + SSSNIC_CTRLQ_OPCODE_RXQ_INFO_GET = 13, +}; + +struct sss_nic_rq_pc_info { + u16 hw_pi; + u16 hw_ci; +}; + +struct sss_nic_rq_hw_info { + u32 func_id; + u32 num_queues; + u32 rsvd[14]; +}; + +struct sss_nic_mbx_feature_nego { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 opcode; /* 1: set, 0: get */ + u8 rsvd; + u64 feature[SSSNIC_MAX_FEATURE_QWORD]; +}; + +struct sss_nic_mbx_mac_addr { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 vlan_id; + u16 rsvd1; + u8 mac[ETH_ALEN]; +}; + +struct sss_nic_mbx_mac_update { + struct sss_nic_mbx_mac_addr old_mac; + u16 rsvd2; + u8 new_mac[ETH_ALEN]; +}; + +struct sss_nic_mbx_vport_state { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd1; + u8 state; /* 0--disable, 1--enable */ + u8 rsvd2[3]; +}; + +struct sss_nic_mbx_clear_qp_resource { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd1; +}; + +struct sss_nic_mbx_invalid_qp_cache { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd1; +}; + +struct sss_nic_mbx_port_stats_info { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd1; +}; + +struct sss_nic_port_stats { + u64 tx_unicast_pkts; + u64 tx_unicast_bytes; + u64 tx_multicast_pkts; + u64 tx_multicast_bytes; + u64 tx_broadcast_pkts; + u64 tx_broadcast_bytes; + + u64 rx_unicast_pkts; + u64 rx_unicast_bytes; + u64 rx_multicast_pkts; + u64 rx_multicast_bytes; + u64 rx_broadcast_pkts; + u64 rx_broadcast_bytes; + + u64 tx_discard; + u64 rx_discard; + u64 tx_err; + u64 rx_err; +}; + +struct sss_nic_mbx_port_stats { + struct sss_mgmt_msg_head head; + + u32 stats_size; + u32 rsvd1; + struct sss_nic_port_stats stats; + u64 rsvd2[6]; +}; + +struct sss_nic_func_table_cfg { + u16 rx_wqe_buf_size; + u16 mtu; + u32 rsvd[9]; +}; + +struct sss_nic_mbx_set_func_table { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd; + + u32 cfg_bitmap; + struct sss_nic_func_table_cfg tbl_cfg; +}; + +struct sss_nic_mbx_intr_attr { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 dma_attr_off; + u8 pending_limit; + u8 coalescing_time; + u8 intr_en; + u16 intr_id; + u32 l2nic_sqn; + u32 rsvd; + u64 ci_addr; +}; + +struct sss_nic_mbx_offload_vlan { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 vlan_offload; + u8 rsvd1[5]; +}; + +struct sss_nic_mbx_lro_cfg { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 opcode; + u8 rsvd1; + u8 lro_ipv4_en; + u8 lro_ipv6_en; + u8 lro_max_pkt_len; /* unit is 1K */ + u8 resv2[13]; +}; + +struct sss_nic_mbx_lro_timer { + struct sss_mgmt_msg_head head; + + u8 opcode; /* 1: set timer value, 0: get timer value */ + u8 rsvd1; + u16 rsvd2; + u32 timer; +}; + +struct sss_nic_mbx_vf_vlan_cfg { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 opcode; + u8 rsvd1; + u16 vlan_id; + u8 qos; + u8 rsvd2[5]; +}; + +struct sss_nic_mbx_set_spoofchk { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 state; + u8 rsvd1; +}; + +struct sss_nic_mbx_tx_rate_cfg { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd1; + u32 min_rate; + u32 max_rate; + u8 rsvd2[8]; +}; + +struct sss_nic_mbx_attach_vf { + struct sss_mgmt_msg_head head; + + u8 op_register; /* 0 - unregister, 1 - register */ + u8 rsvd1[3]; + u32 extra_feature; + u8 rsvd2[32]; +}; + +struct sss_nic_mbx_vlan_cfg { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 opcode; + u8 rsvd1; + u16 vlan_id; + u16 rsvd2; +}; + +/* set vlan filter */ +struct sss_nic_mbx_vlan_filter_cfg { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 resvd[2]; + u32 vlan_filter_ctrl; /* bit0:vlan filter en; bit1:broadcast_filter_en */ +}; + +struct sss_nic_mbx_force_drop_pkt { + struct sss_mgmt_msg_head head; + + u8 port; + u8 rsvd1[3]; +}; + +struct sss_nic_mbx_set_rx_mode { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd1; + u32 rx_mode; +}; + +/* rss */ +struct sss_nic_mbx_rss_ctx { + struct sss_mgmt_msg_head head; + + u16 func_id; + u16 rsvd1; + u32 context; +}; + +struct sss_nic_mbx_rss_engine_cfg { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 opcode; + u8 hash_engine; + u8 rsvd1[4]; +}; + +struct sss_nic_mbx_rss_key_cfg { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 opcode; + u8 rsvd1; + u8 key[SSSNIC_RSS_KEY_SIZE]; +}; + +struct sss_nic_mbx_rss_cfg { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 rss_en; + u8 rq_priority_number; + u8 prio_tc[SSSNIC_DCB_COS_MAX]; + u16 qp_num; + u16 rsvd1; +}; + +struct sss_nic_mbx_vf_dcb_cfg { + struct sss_mgmt_msg_head head; + + struct sss_nic_dcb_info dcb_info; +}; + +struct sss_nic_mbx_dcb_state { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 op_code; /* 0 - get dcb state, 1 - set dcb state */ + u8 state; /* 0 - disable, 1 - enable dcb */ + u8 port_state; /* 0 - disable, 1 - enable dcb */ + u8 rsvd[7]; +}; + +struct sss_nic_mbx_pause_cfg { + struct sss_mgmt_msg_head head; + + u8 port_id; + u8 opcode; + u16 rsvd1; + u8 auto_neg; + u8 rx_pause; + u8 tx_pause; + u8 rsvd2[5]; +}; + +/* pfc/pause tx abnormal */ +struct sss_nic_msg_tx_pause_info { + struct sss_mgmt_msg_head head; + + u32 tx_pause_except; /* 1: 异常,0: 正常 */ + u32 except_level; + u32 rsvd; +}; + +struct sss_nic_mbx_set_tcam_state { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 tcam_enable; + u8 rsvd1; + u32 rsvd2; +}; + +/* alloc tcam block output struct */ +struct sss_nic_mbx_tcam_block_cfg { + struct sss_mgmt_msg_head head; + + u16 func_id; /* func_id */ + u8 alloc_en; + u8 tcam_type; /* 0: 16 size tcam block, 1: 0 size tcam block */ + u16 tcam_block_index; + u16 mpu_alloc_block_size; +}; + +struct sss_nic_mbx_flush_tcam_rule { + struct sss_mgmt_msg_head head; + + u16 func_id; /* func_id */ + u16 rsvd; +}; + +struct sss_nic_mbx_add_tcam_rule { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 type; + u8 rsvd; + struct sss_nic_tcam_rule_cfg rule; +}; + +struct sss_nic_mbx_del_tcam_rule { + struct sss_mgmt_msg_head head; + + u16 func_id; + u8 type; + u8 rsvd; + u32 index_start; + u32 index_num; +}; + +/* note:must 4 byte align */ +struct sss_nic_bios_cfg { + u32 signature; /* check flash data valid */ + u8 pxe_en; /* PXE enable: 0 - disable 1 - enable */ + u8 extend_mode; + u8 rsvd0[2]; + u8 pxe_vlan_en; /* PXE VLAN enable: 0 - disable 1 - enable */ + u8 pxe_vlan_pri; /* PXE VLAN priority: 0-7 */ + u16 pxe_vlan_id; /* PXE VLAN ID 1-4094 */ + u32 service_mode; /* refer to CHIPIF_SERVICE_MODE_x macro */ + u32 pf_bw; /* PF rate,percent 0-100 */ + u8 speed; /* enum of port speed */ + u8 auto_neg; /* 0 - invalid 1 - open 2 - close */ + u8 lanes; /* lane num */ + u8 fec; /* FEC mode, refer to enum mag_cmd_port_fec */ + u8 auto_adapt; /* 0 - invalid 1 - open 2 - close */ + u8 func_valid; /* 0 - func_id is invalid,other - func_id is valid */ + u8 func_id; + u8 sriov_en; /* SRIOV-EN: 0 - invalid, 1 - open, 2 - close */ +}; + +struct sss_nic_mbx_bios_cfg { + struct sss_mgmt_msg_head head; + u32 op_code; /* Operation Code: Bit0[0: read 1:write, BIT1-6: cfg_mask */ + struct sss_nic_bios_cfg bios_cfg; +}; + +/* lacp status update */ +struct sss_nic_msg_bond_active_info { + struct sss_mgmt_msg_head head; + u32 bond_id; + u32 bon_mmi_status; /* bond link state */ + u32 active_bitmap; /* slave port state */ + + u8 rsvd[16]; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_mag_define.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_mag_define.h new file mode 100644 index 0000000000000000000000000000000000000000..73bbeb34f6429f8d6d646a91bd0d8d45f27cd5b4 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_mag_define.h @@ -0,0 +1,460 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_CFG_MAG_DEFINE_H +#define SSS_NIC_CFG_MAG_DEFINE_H + +#include +#include +#include + +#include "sss_hw_mbx_msg.h" + +/* * + * Definition of the NIC receiving mode + */ +#define SSSNIC_RX_MODE_UC 0x01 +#define SSSNIC_RX_MODE_MC 0x02 +#define SSSNIC_RX_MODE_BC 0x04 +#define SSSNIC_RX_MODE_MC_ALL 0x08 +#define SSSNIC_RX_MODE_PROMISC 0x10 + +#define SSSNIC_RX_RATE_LOW 200000 +#define SSSNIC_RX_COAL_TIME_LOW 25 +#define SSSNIC_RX_PENDING_LIMIT_LOW 2 + +#define SSSNIC_RX_RATE_HIGH 700000 +#define SSSNIC_RX_COAL_TIME_HIGH 225 +#define SSSNIC_RX_PENDING_LIMIT_HIGH 8 + +#define SSSNIC_RX_RATE_THRESH 50000 +#define SSSNIC_TX_RATE_THRESH 50000 +#define SSSNIC_RX_RATE_LOW_VM 100000 +#define SSSNIC_RX_PENDING_LIMIT_HIGH_VM 87 + +#define SSSNIC_MAX_LIMIT_BW 100 + +#define SSSNIC_MAG_OPCODE_PORT_DISABLE 0x0 +#define SSSNIC_MAG_OPCODE_TX_ENABLE 0x1 +#define SSSNIC_MAG_OPCODE_RX_ENABLE 0x2 + +#define SSSNIC_XSFP_INFO_MAX_SIZE 640 + +#define SSNSIC_PORT_PRESENT 0 +#define SSNSIC_PORT_ABSENT 1 + +enum sss_nic_valid_link_settings { + SSSNIC_LINK_SET_SPEED = 0x1, + SSSNIC_LINK_SET_AUTONEG = 0x2, + SSSNIC_LINK_SET_FEC = 0x4, +}; + +enum sss_nic_link_follow_status { + SSSNIC_LINK_FOLLOW_DEFAULT, + SSSNIC_LINK_FOLLOW_PORT, + SSSNIC_LINK_FOLLOW_SEPARATE, + SSSNIC_LINK_FOLLOW_STATUS_MAX, +}; + +/* serdes/mag message cmd define */ +enum sss_nic_mag_opcode { + SSSNIC_MAG_OPCODE_SERDES_PROCESS = 0, + + /* port configure, 0-29 */ + SSSNIC_MAG_OPCODE_SET_PORT_CFG = 1, + SSSNIC_MAG_OPCODE_SET_PORT_ADAPT = 2, + SSSNIC_MAG_OPCODE_CFG_LOOPBACK_MODE = 3, + + SSSNIC_MAG_OPCODE_GET_PORT_ENABLE = 5, + SSSNIC_MAG_OPCODE_SET_PORT_ENABLE = 6, + SSSNIC_MAG_OPCODE_LINK_STATUS = 7, + SSSNIC_MAG_OPCODE_SET_LINK_FOLLOW = 8, + SSSNIC_MAG_OPCODE_SET_PMA_ENABLE = 9, + SSSNIC_MAG_OPCODE_CFG_FEC_MODE = 10, + + SSSNIC_MAG_OPCODE_CFG_AN_TYPE = 12, /* reserved for future use */ + SSSNIC_MAG_OPCODE_CFG_LINK_TIME = 13, + + /* bios link, 30-49 */ + SSSNIC_MAG_OPCODE_CFG_BIOS_LINK_CFG = 31, + SSSNIC_MAG_OPCODE_RESTORE_LINK_CFG = 32, + SSSNIC_MAG_OPCODE_ACTIVATE_BIOS_LINK_CFG = 33, + + /* LED */ + SSSNIC_MAG_OPCODE_SET_LED_CFG = 50, + + /* PHY */ + SSSNIC_MAG_OPCODE_GET_PHY_INIT_STATUS = 55, /* reserved for future use */ + + /* sfp */ + SSSNIC_MAG_OPCODE_GET_XSFP_INFO = 60, + SSSNIC_MAG_OPCODE_SET_XSFP_ENABLE = 61, + SSSNIC_MAG_OPCODE_GET_XSFP_PRESENT = 62, + /* sfp/qsfp single byte read/write, for equipment test */ + SSSNIC_MAG_OPCODE_SET_XSFP_RW = 63, + SSSNIC_MAG_OPCODE_CFG_XSFP_TEMPERATURE = 64, + + /* event 100-149 */ + SSSNIC_MAG_OPCODE_WIRE_EVENT = 100, + SSSNIC_MAG_OPCODE_LINK_ERR_EVENT = 101, + + /* DFX、Counter */ + SSSNIC_MAG_OPCODE_EVENT_PORT_INFO = 150, + SSSNIC_MAG_OPCODE_GET_PORT_STAT = 151, + SSSNIC_MAG_OPCODE_CLR_PORT_STAT = 152, + SSSNIC_MAG_OPCODE_GET_PORT_INFO = 153, + SSSNIC_MAG_OPCODE_GET_PCS_ERR_CNT = 154, + SSSNIC_MAG_OPCODE_GET_MAG_CNT = 155, + SSSNIC_MAG_OPCODE_DUMP_ANTRAIN_INFO = 156, + + SSSNIC_MAG_OPCODE_MAX = 0xFF +}; + +enum sss_nic_mag_opcode_port_speed { + SSSNIC_PORT_SPEED_NOT_SET = 0, + SSSNIC_PORT_SPEED_10MB = 1, + SSSNIC_PORT_SPEED_100MB = 2, + SSSNIC_PORT_SPEED_1GB = 3, + SSSNIC_PORT_SPEED_10GB = 4, + SSSNIC_PORT_SPEED_25GB = 5, + SSSNIC_PORT_SPEED_40GB = 6, + SSSNIC_PORT_SPEED_50GB = 7, + SSSNIC_PORT_SPEED_100GB = 8, + SSSNIC_PORT_SPEED_200GB = 9, + SSSNIC_PORT_SPEED_UNKNOWN +}; + +enum sss_nic_mag_opcode_port_an { + SSSNIC_PORT_AN_NOT_SET = 0, + SSSNIC_PORT_CFG_AN_ON = 1, + SSSNIC_PORT_CFG_AN_OFF = 2 +}; + +/* mag supported/advertised link mode bitmap */ +enum mag_cmd_link_mode { + SSSNIC_LINK_MODE_GE = 0, + SSSNIC_LINK_MODE_10GE_BASE_R = 1, + SSSNIC_LINK_MODE_25GE_BASE_R = 2, + SSSNIC_LINK_MODE_40GE_BASE_R4 = 3, + SSSNIC_LINK_MODE_50GE_BASE_R = 4, + SSSNIC_LINK_MODE_50GE_BASE_R2 = 5, + SSSNIC_LINK_MODE_100GE_BASE_R = 6, + SSSNIC_LINK_MODE_100GE_BASE_R2 = 7, + SSSNIC_LINK_MODE_100GE_BASE_R4 = 8, + SSSNIC_LINK_MODE_200GE_BASE_R2 = 9, + SSSNIC_LINK_MODE_200GE_BASE_R4 = 10, + SSSNIC_LINK_MODE_MAX_NUMBERS, + + SSSNIC_LINK_MODE_UNKNOWN = 0xFFFF +}; + +/* led type */ +enum sss_nic_mag_led_type { + SSSNIC_MAG_LED_TYPE_ALARM = 0x0, + SSSNIC_MAG_LED_TYPE_LOW_SPEED = 0x1, + SSSNIC_MAG_LED_TYPE_HIGH_SPEED = 0x2 +}; + +/* led mode */ +enum sss_nic_mag_led_mode { + SSSNIC_MAG_LED_DEFAULT = 0x0, + SSSNIC_MAG_LED_FORCE_ON = 0x1, + SSSNIC_MAG_LED_FORCE_OFF = 0x2, + SSSNIC_MAG_LED_FORCE_BLINK_1HZ = 0x3, + SSSNIC_MAG_LED_FORCE_BLINK_2HZ = 0x4, + SSSNIC_MAG_LED_FORCE_BLINK_4HZ = 0x5, + SSSNIC_MAG_LED_1HZ = 0x6, + SSSNIC_MAG_LED_2HZ = 0x7, + SSSNIC_MAG_LED_4HZ = 0x8 +}; + +/* xsfp wire type, refer to cmis protocol definition */ +enum sss_nic_mag_wire_type { + SSSNIC_MAG_WIRE_TYPE_UNKNOWN = 0x0, + SSSNIC_MAG_WIRE_TYPE_MM = 0x1, + SSSNIC_MAG_WIRE_TYPE_SM = 0x2, + SSSNIC_MAG_WIRE_TYPE_COPPER = 0x3, + SSSNIC_MAG_WIRE_TYPE_ACC = 0x4, + SSSNIC_MAG_WIRE_TYPE_BASET = 0x5, + SSSNIC_MAG_WIRE_TYPE_AOC = 0x40, + SSSNIC_MAG_WIRE_TYPE_ELECTRIC = 0x41, + SSSNIC_MAG_WIRE_TYPE_BACKPLANE = 0x42 +}; + +enum sss_nic_link_status { + SSSNIC_LINK_DOWN = 0, + SSSNIC_LINK_UP +}; + +struct sss_nic_link_ksettings { + u32 valid_bitmap; + u8 speed; /* enum nic_speed_level */ + u8 autoneg; /* 0 - off; 1 - on */ + u8 fec; /* 0 - RSFEC; 1 - BASEFEC; 2 - NOFEC */ +}; + +struct sss_nic_port_info { + u8 port_type; + u8 autoneg_cap; + u8 autoneg_state; + u8 duplex; + u8 speed; + u8 fec; + u32 supported_mode; + u32 advertised_mode; +}; + +struct sss_nic_pause_cfg { + u8 auto_neg; + u8 rx_pause; + u8 tx_pause; +}; + +struct sss_nic_mbx_mag_set_port_cfg { + struct sss_mgmt_msg_head head; + + u8 port_id; + u8 rsvd0[3]; + + u32 config_bitmap; + u8 speed; + u8 autoneg; + u8 fec; + u8 lanes; + u8 rsvd1[20]; +}; + +struct sss_nic_mbx_get_port_info { + struct sss_mgmt_msg_head head; + + u8 port_id; + u8 rsvd0[3]; + + u8 wire_type; + u8 an_support; + u8 an_en; + u8 duplex; + + u8 speed; + u8 fec; + u8 lanes; + u8 rsvd1; + + u32 supported_mode; + u32 advertised_mode; + u8 rsvd2[8]; +}; + +struct sss_nic_mbx_loopback_mode { + struct sss_mgmt_msg_head head; + + u8 port_id; + u8 opcode; /* 0:get loopback mode 1:set loopback mode */ + u8 mode; + u8 en; /* 0:disable 1:enable */ + + u32 rsvd0[2]; +}; + +struct sss_nic_mbx_set_port_mag_state { + struct sss_mgmt_msg_head head; + + u16 function_id; /* function_id should not more than the max support pf_id(32) */ + u16 rsvd0; + + u8 state; /* bitmap bit0:tx_en bit1:rx_en */ + u8 rsvd1[3]; +}; + +/* the physical port disable link follow only when all pf of the port are set to follow disable */ +struct sss_nic_mbx_set_link_follow { + struct sss_mgmt_msg_head head; + + u16 function_id; /* function_id should not more than the max support pf_id(32) */ + u16 rsvd0; + + u8 follow; + u8 rsvd1[3]; +}; + +/* firmware also use this cmd report link event to driver */ +struct sss_nic_mbx_get_link_state { + struct sss_mgmt_msg_head head; + + u8 port_id; + u8 status; /* 0:link down 1:link up */ + u8 rsvd0[2]; +}; + +/* the led is report alarm when any pf of the port is alram */ +struct sss_nic_mbx_set_led_cfg { + struct sss_mgmt_msg_head head; + + u16 function_id; + u8 type; + u8 mode; +}; + +struct sss_nic_mbx_get_xsfp_info { + struct sss_mgmt_msg_head head; + + u8 port_id; + u8 wire_type; + u16 out_len; + u32 rsvd; + u8 sfp_info[SSSNIC_XSFP_INFO_MAX_SIZE]; +}; + +struct sss_nic_mbx_get_xsfp_present { + struct sss_mgmt_msg_head head; + + u8 port_id; + u8 abs_status; /* 0:present, 1:absent */ + u8 rsvd[2]; +}; + +struct sss_nic_cache_port_sfp { + u8 mpu_send_sfp_info; + u8 mpu_send_sfp_abs; + u8 rsvd[2]; + struct sss_nic_mbx_get_xsfp_info std_sfp_info; + struct sss_nic_mbx_get_xsfp_present abs; +}; + +/* xsfp plug event */ +struct sss_nic_mag_wire_event { + struct sss_mgmt_msg_head head; + + u8 port_id; + u8 status; /* 0:present, 1:absent */ + u8 rsvd[2]; +}; + +struct sss_nic_mag_port_stats { + u64 tx_fragment_pkts; + u64 tx_undersize_pkts; + u64 tx_undermin_pkts; + u64 tx_64_oct_pkts; + u64 tx_65_127_oct_pkts; + u64 tx_128_255_oct_pkts; + u64 tx_256_511_oct_pkts; + u64 tx_512_1023_oct_pkts; + u64 tx_1024_1518_oct_pkts; + u64 tx_1519_2047_oct_pkts; + u64 tx_2048_4095_oct_pkts; + u64 tx_4096_8191_oct_pkts; + u64 tx_8192_9216_oct_pkts; + u64 tx_9217_12287_oct_pkts; + u64 tx_12288_16383_oct_pkts; + u64 tx_1519_max_bad_pkts; + u64 tx_1519_max_good_pkts; + u64 tx_oversize_pkts; + u64 tx_jabber_pkts; + u64 tx_bad_pkts; + u64 tx_bad_octs; + u64 tx_good_pkts; + u64 tx_good_octs; + u64 tx_total_pkts; + u64 tx_total_octs; + u64 tx_uni_pkts; + u64 tx_multi_pkts; + u64 tx_broad_pkts; + u64 tx_pauses; + u64 tx_pfc_pkts; + u64 tx_pfc_pri0_pkts; + u64 tx_pfc_pri1_pkts; + u64 tx_pfc_pri2_pkts; + u64 tx_pfc_pri3_pkts; + u64 tx_pfc_pri4_pkts; + u64 tx_pfc_pri5_pkts; + u64 tx_pfc_pri6_pkts; + u64 tx_pfc_pri7_pkts; + u64 tx_control_pkts; + u64 tx_err_all_pkts; + u64 tx_from_app_good_pkts; + u64 tx_from_app_bad_pkts; + + u64 rx_fragment_pkts; + u64 rx_undersize_pkts; + u64 rx_undermin_pkts; + u64 rx_64_oct_pkts; + u64 rx_65_127_oct_pkts; + u64 rx_128_255_oct_pkts; + u64 rx_256_511_oct_pkts; + u64 rx_512_1023_oct_pkts; + u64 rx_1024_1518_oct_pkts; + u64 rx_1519_2047_oct_pkts; + u64 rx_2048_4095_oct_pkts; + u64 rx_4096_8191_oct_pkts; + u64 rx_8192_9216_oct_pkts; + u64 rx_9217_12287_oct_pkts; + u64 rx_12288_16383_oct_pkts; + u64 rx_1519_max_bad_pkts; + u64 rx_1519_max_good_pkts; + u64 rx_oversize_pkts; + u64 rx_jabber_pkts; + u64 rx_bad_pkts; + u64 rx_bad_octs; + u64 rx_good_pkts; + u64 rx_good_octs; + u64 rx_total_pkts; + u64 rx_total_octs; + u64 rx_uni_pkts; + u64 rx_multi_pkts; + u64 rx_broad_pkts; + u64 rx_pauses; + u64 rx_pfc_pkts; + u64 rx_pfc_pri0_pkts; + u64 rx_pfc_pri1_pkts; + u64 rx_pfc_pri2_pkts; + u64 rx_pfc_pri3_pkts; + u64 rx_pfc_pri4_pkts; + u64 rx_pfc_pri5_pkts; + u64 rx_pfc_pri6_pkts; + u64 rx_pfc_pri7_pkts; + u64 rx_control_pkts; + u64 rx_sym_err_pkts; + u64 rx_fcs_err_pkts; + u64 rx_send_app_good_pkts; + u64 rx_send_app_bad_pkts; + u64 rx_unfilter_pkts; +}; + +struct sss_nic_mbx_mag_port_stats_info { + struct sss_mgmt_msg_head head; + + u8 port_id; + u8 rsvd0[3]; +}; + +struct sss_nic_mbx_mag_port_stats { + struct sss_mgmt_msg_head head; + + struct sss_nic_mag_port_stats counter; + u64 rsvd1[15]; +}; + +struct sss_nic_mag_cfg { + struct semaphore cfg_lock; + + /* Valid when pfc is disable */ + u8 pause_set; + u8 rsvd1[3]; + struct sss_nic_pause_cfg nic_pause; + + u8 pfc_en; + u8 pfc_bitmap; + u8 rsvd2[2]; + + struct sss_nic_port_info port_info; + + /* percentage of pf link bandwidth */ + u32 pf_bw_limit; + + struct sss_nic_cache_port_sfp rt_cmd; + struct mutex sfp_mutex; /* mutex used for copy sfp info */ +}; + +#define SSSNIC_PF_LIMIT_BW_MAX 100 + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_rss_define.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_rss_define.h new file mode 100644 index 0000000000000000000000000000000000000000..adfb3eae339618f7cce3c9853bf6865450dc2ec4 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_rss_define.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_CFG_RSS_DEFINE_H +#define SSS_NIC_CFG_RSS_DEFINE_H + +#include + +/* rss */ +#define SSSNIC_RSS_TYPE_VALID_SHIFT 23 +#define SSSNIC_RSS_TYPE_TCP_IPV6_EXT_SHIFT 24 +#define SSSNIC_RSS_TYPE_IPV6_EXT_SHIFT 25 +#define SSSNIC_RSS_TYPE_TCP_IPV6_SHIFT 26 +#define SSSNIC_RSS_TYPE_IPV6_SHIFT 27 +#define SSSNIC_RSS_TYPE_TCP_IPV4_SHIFT 28 +#define SSSNIC_RSS_TYPE_IPV4_SHIFT 29 +#define SSSNIC_RSS_TYPE_UDP_IPV6_SHIFT 30 +#define SSSNIC_RSS_TYPE_UDP_IPV4_SHIFT 31 + +#define SSSNIC_RSS_TYPE_SET(val, member) (((u32)(val) & 0x1) << SSSNIC_RSS_TYPE_##member##_SHIFT) +#define SSSNIC_RSS_TYPE_GET(val, member) (((u32)(val) >> SSSNIC_RSS_TYPE_##member##_SHIFT) & 0x1) + +#define SSSNIC_RSS_KEY_RSV_NUM 2 + +#define SSSNIC_RSS_INDIR_SIZE 256 +#define SSSNIC_RSS_KEY_SIZE 40 + +enum sss_nic_rss_hash_engine_type { + SSSNIC_RSS_ENGINE_XOR = 0, + SSSNIC_RSS_ENGINE_TOEP, + SSSNIC_RSS_ENGINE_MAX, +}; + +struct sss_nic_rss_type { + u8 tcp_ipv6_ext; + u8 ipv6_ext; + u8 tcp_ipv6; + u8 ipv6; + u8 tcp_ipv4; + u8 ipv4; + u8 udp_ipv6; + u8 udp_ipv4; +}; + +/* rss */ +struct sss_nic_rss_indirect_table { + u32 rsvd[4]; /* Make sure that 16B beyond entry[] */ + u16 entry[SSSNIC_RSS_INDIR_SIZE]; +}; + +struct sss_nic_rss_ctx_table { + u32 rsvd[4]; + u32 ctx; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_vf_define.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_vf_define.h new file mode 100644 index 0000000000000000000000000000000000000000..b9aaa38104a005387f07b4419575d16d806ad049 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_cfg_vf_define.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_CFG_VF_DEFINE_H +#define SSS_NIC_CFG_VF_DEFINE_H + +#include + +#define SSSNIC_OS_VF_ID_TO_HW(os_vf_id) ((os_vf_id) + 1) +#define SSSNIC_HW_VF_ID_TO_OS(hw_vf_id) ((hw_vf_id) - 1) + +#define SSSNIC_VLAN_PRIORITY_SHIFT 13 + +#define SSSNIC_CONFIG_ALL_QUEUE_VLAN_CTX 0xFFFF + +#define SSSNIC_GET_VLAN_PRIO(vlan, qos) \ + ((u16)((vlan) | ((qos) << SSSNIC_VLAN_PRIORITY_SHIFT))) + +struct sss_nic_vlan_ctx { + u32 func_id; + u32 qid; /* if qid = 0xFFFF, config current function all queue */ + u32 tag; + u32 mode; + u32 sel; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_common.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_common.h new file mode 100644 index 0000000000000000000000000000000000000000..3924d9f9b8eead3e866d603adf96db25451a1c89 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_common.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_COMMON_H +#define SSS_NIC_COMMON_H + +#include + +#include "sss_kernel.h" +#include "sss_version.h" + +#define SSSNIC_DRV_NAME "sssnic" +#define SSSNIC_DRV_VERSION SSS_VERSION_STR + +#define SSSNIC_FUNC_IS_VF(hwdev) (sss_get_func_type(hwdev) == SSS_FUNC_TYPE_VF) + +#define SSSNIC_MODERATONE_DELAY HZ + +#define SSSNIC_LP_PKT_CNT 64 + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_dcb_define.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_dcb_define.h new file mode 100644 index 0000000000000000000000000000000000000000..946928c7199de019b657ca03bc9787269a88d397 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_dcb_define.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_DCB_DEFINE_H +#define SSS_NIC_DCB_DEFINE_H + +#include + +#define SSSNIC_PCP_UP_MAX 8 +#define SSSNIC_DSCP_MAC_UP 64 + +/* IEEE 802.1Qaz std */ +#define SSSNIC_DCB_COS_MAX 0x8 +#define SSSNIC_DCB_UP_MAX 0x8 +#define SSSNIC_DCB_TC_MAX 0x8 +#define SSSNIC_DCB_PG_MAX 0x8 +#define SSSNIC_DCB_TSA_SP 0x0 +#define SSSNIC_DCB_TSA_CBS 0x1 +#define SSSNIC_DCB_TSA_ETS 0x2 +#define SSSNIC_DCB_DSCP_NUM 0x8 +#define SSSNIC_DCB_IP_PRI_MAX 0x40 + +#define SSSNIC_DCB_PRIO_DWRR 0x0 +#define SSSNIC_DCB_PRIO_STRICT 0x1 + +#define SSSNIC_DCB_MAX_PFC_NUM 0x4 + +struct sss_nic_dcb_config { + u8 trust; /* pcp, dscp */ + u8 default_cos; + u8 pcp_user_cos_num; + u8 pcp_valid_cos_map; + u8 dscp_user_cos_num; + u8 dscp_valid_cos_map; + u8 pcp2cos[SSSNIC_PCP_UP_MAX]; + u8 dscp2cos[SSSNIC_DSCP_MAC_UP]; + + u8 cos_qp_offset[SSSNIC_DCB_COS_MAX]; + u8 cos_qp_num[SSSNIC_DCB_COS_MAX]; +}; + +struct sss_nic_dcb_info { + u8 dcb_on; + u8 default_cos; + u8 trust; + u8 rsvd1; + u8 pcp2cos[SSSNIC_DCB_UP_MAX]; + u8 dscp2cos[64]; + u32 rsvd2[7]; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_dev_define.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_dev_define.h new file mode 100644 index 0000000000000000000000000000000000000000..adf6b92b96168858f795ba1d99c353fb94c2095e --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_dev_define.h @@ -0,0 +1,272 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_DEV_DEFINE_H +#define SSS_NIC_DEV_DEFINE_H + +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw_uld_driver.h" +#include "sss_hw_svc_cap.h" +#include "sss_hw_irq.h" +#include "sss_nic_common.h" +#include "sss_nic_cfg_define.h" +#include "sss_nic_dcb_define.h" +#include "sss_nic_tx_define.h" +#include "sss_nic_rx_define.h" +#include "sss_nic_irq_define.h" +#include "sss_nic_tcam_define.h" + +enum sss_nic_flags { + SSSNIC_INTF_UP, + SSSNIC_MAC_FILTER_CHANGED, + SSSNIC_LP_TEST, + SSSNIC_RSS_ENABLE, + SSSNIC_DCB_ENABLE, + SSSNIC_SAME_RXTX, + SSSNIC_INTR_ADAPT, + SSSNIC_UPDATE_MAC_FILTER, + SSSNIC_CHANGE_RES_INVALID, + SSSNIC_RSS_DEFAULT_INDIR, + SSSNIC_FORCE_LINK_UP, + SSSNIC_BONDING_MASTER, + SSSNIC_AUTONEG_RESET, + SSSNIC_RXQ_RECOVERY, +}; + +enum sss_nic_event_flags { + SSSNIC_EVENT_TX_TIMEOUT, +}; + +struct sss_nic_tx_stats { + u64 tx_timeout; + + /* Subdivision statistics show in private tool */ + u64 tx_drop; + u64 tx_invalid_qid; + u64 rsvd1; + u64 rsvd2; + +#ifdef HAVE_NDO_GET_STATS64 + struct u64_stats_sync stats_sync; +#else + struct u64_stats_sync_empty stats_sync; +#endif +}; + +struct sss_nic_qp_resource { + u16 qp_num; + u8 cos_num; + u8 rsvd1; + u32 sq_depth; + u32 rq_depth; + + struct sss_nic_sq_resource *sq_res_group; + struct sss_nic_rq_resource *rq_res_group; + struct sss_nic_irq_cfg *irq_cfg; +}; + +struct sss_nic_rx_rule { + struct list_head rule_list; + int rule_cnt; +}; + +struct sss_nic_dev { + struct pci_dev *pdev; + struct net_device *netdev; + struct sss_hal_dev *uld_dev; + void *hwdev; + void *dev_hdl; + struct sss_nic_io *nic_io; + + int poll_budget; + + u32 msg_enable; + + unsigned long flags; + unsigned long event_flag; + unsigned long dcb_flags; + unsigned long rx_mode; + + u32 rx_poll_wqe; + + u32 rx_dma_buff_size; + u16 rx_buff_len; + + u16 max_qp_num; + + u32 page_order; + + /* Rss related varibles */ + u8 rss_hash_engine; + u8 rsvd1[3]; + u8 *rss_key; + u32 *rss_key_big; /* hkey in big endian */ + u32 *rss_indir_tbl; + struct sss_nic_rss_type rss_type; + + u8 max_cos_num; + u8 dft_func_cos_bitmap; + u16 dft_port_cos_bitmap; + + int disable_port_cnt; + + unsigned long last_jiffies; + + u32 use_adaptive_rx_coalesce; + u32 rsvd2; + + struct sss_nic_intr_coal_info *coal_info; + struct workqueue_struct *workq; + + int netdev_uc_cnt; + int netdev_mc_cnt; + + int loop_test_rx_cnt; + int loop_pkt_len; + u8 *loop_test_rx_buf; + + struct sss_irq_desc *irq_desc_group; + u16 irq_desc_num; + + u8 link_status; + + u8 rsvd3; + + u32 get_rq_fail_cnt; + + struct sss_nic_tx_stats tx_stats; + + struct sss_nic_sq_desc *sq_desc_group; + struct sss_nic_rq_desc *rq_desc_group; + + struct sss_nic_qp_resource qp_res; + + struct delayed_work routine_work; + struct delayed_work rq_watchdog_work; + + struct list_head uc_filter_list; + struct list_head mc_filter_list; + + unsigned long *vlan_bitmap; +#ifdef HAVE_XDP_SUPPORT + struct bpf_prog *xdp_prog; +#endif + + /* lock for qp_res,qp_info access */ + struct mutex qp_mutex; + struct semaphore port_sem; + + struct work_struct rx_mode_work; + + struct delayed_work moderation_task; + + struct sss_nic_dcb_config hw_dcb_cfg; + struct sss_nic_dcb_config backup_dcb_cfg; + +#ifndef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats net_stats; +#endif + + struct sss_nic_tcam_info tcam_info; + struct sss_nic_rx_rule rx_rule; + + struct sss_nic_service_cap nic_svc_cap; + +}; + +#define SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, flag) \ + test_bit(flag, &(nic_dev)->flags) +#define SSSNIC_SET_NIC_DEV_FLAG(nic_dev, flag) \ + set_bit(flag, &(nic_dev)->flags) +#define SSSNIC_CLEAR_NIC_DEV_FLAG(nic_dev, flag) \ + clear_bit(flag, &(nic_dev)->flags) +#define SSSNIC_TEST_CLEAR_NIC_DEV_FLAG(nic_dev, flag) \ + test_and_clear_bit(flag, &(nic_dev)->flags) +#define SSSNIC_TEST_SET_NIC_DEV_FLAG(nic_dev, flag) \ + test_and_set_bit(flag, &(nic_dev)->flags) + +#ifdef HAVE_XDP_SUPPORT +#define SSSNIC_IS_XDP_ENABLE(nic_dev) (!!(nic_dev)->xdp_prog) +#endif + +#define SSS_CHANNEL_RES_VALID(nic_dev) \ + (test_bit(SSSNIC_INTF_UP, &(nic_dev)->flags) && \ + !test_bit(SSSNIC_CHANGE_RES_INVALID, &(nic_dev)->flags)) + +#define SSSNIC_VLAN_BITMAP_BYTE_SIZE(nic_dev) (sizeof(*(nic_dev)->vlan_bitmap)) +#define SSSNIC_VLAN_BITMAP_BIT_SIZE(nic_dev) (SSSNIC_VLAN_BITMAP_BYTE_SIZE(nic_dev) * 8) +#define SSSNIC_VLAN_NUM_BITMAP(nic_dev) (VLAN_N_VID / \ + SSSNIC_VLAN_BITMAP_BIT_SIZE(nic_dev)) +#define SSSNIC_VLAN_BITMAP_SIZE(nic_dev) (VLAN_N_VID / \ + SSSNIC_VLAN_BITMAP_BYTE_SIZE(nic_dev)) +#define SSSNIC_VID_LINE(nic_dev, vid) ((vid) / SSSNIC_VLAN_BITMAP_BIT_SIZE(nic_dev)) +#define SSSNIC_VID_COL(nic_dev, vid) ((vid) & (SSSNIC_VLAN_BITMAP_BIT_SIZE(nic_dev) - 1)) +#define SSSNIC_TEST_VLAN_BIT(nic_dev, vid) \ + ((nic_dev)->vlan_bitmap[SSSNIC_VID_LINE(nic_dev, vid)] & \ + (1UL << SSSNIC_VID_COL(nic_dev, vid))) + +#define SSSNIC_SET_VLAN_BITMAP(nic_dev, vid) \ + set_bit(SSSNIC_VID_COL(nic_dev, vid), \ + &(nic_dev)->vlan_bitmap[SSSNIC_VID_LINE(nic_dev, vid)]) +#define SSSNIC_CLEAR_VLAN_BITMAP(nic_dev, vid) \ + clear_bit(SSSNIC_VID_COL(nic_dev, vid), \ + &(nic_dev)->vlan_bitmap[SSSNIC_VID_LINE(nic_dev, vid)]) + +#define SSSNIC_SET_NIC_EVENT_FLAG(nic_dev, flag) \ + set_bit(flag, &(nic_dev)->event_flag) + +#define SSSNIC_TEST_CLEAR_NIC_EVENT_FLAG(nic_dev, flag) \ + test_and_clear_bit(flag, &(nic_dev)->event_flag) + +#define SSSNIC_STATS_TX_TIMEOUT_INC(nic_dev) \ +do { \ + typeof(nic_dev) (_nic_dev) = (nic_dev); \ + u64_stats_update_begin(&(_nic_dev)->tx_stats.stats_sync); \ + (_nic_dev)->tx_stats.tx_timeout++; \ + u64_stats_update_end(&(_nic_dev)->tx_stats.stats_sync); \ +} while (0) + +#define SSSNIC_STATS_TX_DROP_INC(nic_dev) \ +do { \ + typeof(nic_dev) (_nic_dev) = (nic_dev); \ + u64_stats_update_begin(&(_nic_dev)->tx_stats.stats_sync); \ + (_nic_dev)->tx_stats.tx_drop++; \ + u64_stats_update_end(&(_nic_dev)->tx_stats.stats_sync); \ +} while (0) + +#define SSSNIC_STATS_TX_INVALID_QID_INC(nic_dev) \ +do { \ + typeof(nic_dev) (_nic_dev) = (nic_dev); \ + u64_stats_update_begin(&(_nic_dev)->tx_stats.stats_sync); \ + (_nic_dev)->tx_stats.tx_invalid_qid++; \ + u64_stats_update_end(&(_nic_dev)->tx_stats.stats_sync); \ +} while (0) + +#define sssnic_msg(level, nic_dev, msglvl, format, arg...) \ +do { \ + if ((nic_dev)->netdev && (nic_dev)->netdev->reg_state \ + == NETREG_REGISTERED) \ + nicif_##level((nic_dev), msglvl, (nic_dev)->netdev, \ + format, ## arg); \ + else \ + nic_##level(&(nic_dev)->pdev->dev, \ + format, ## arg); \ +} while (0) + +#define sss_nic_info(nic_dev, msglvl, format, arg...) \ + sssnic_msg(info, nic_dev, msglvl, format, ## arg) + +#define sss_nic_warn(nic_dev, msglvl, format, arg...) \ + sssnic_msg(warn, nic_dev, msglvl, format, ## arg) + +#define sss_nic_err(nic_dev, msglvl, format, arg...) \ + sssnic_msg(err, nic_dev, msglvl, format, ## arg) + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_io_define.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_io_define.h new file mode 100644 index 0000000000000000000000000000000000000000..32eccbe831b1ad80b4651f916a7f3a6aac67bdae --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_io_define.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_IO_DEFINE_H +#define SSS_NIC_IO_DEFINE_H + +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw_wq.h" +#include "sss_nic_dcb_define.h" +#include "sss_nic_cfg_mag_define.h" + +struct sss_nic_vf_info { + u8 user_mac[ETH_ALEN]; + u8 drv_mac[ETH_ALEN]; + u16 qp_num; + u16 pf_vlan; + + u8 pf_qos; + u8 rsvd0[3]; + u32 extra_feature; + + u32 min_rate; + u32 max_rate; + + u8 specified_mac; + u8 attach; + u8 trust; + u8 spoofchk; + u8 link_forced; + u8 link_up; /* only valid if VF link is forced */ + u8 rsvd1[2]; +}; + +struct sss_nic_io_queue { + struct sss_wq wq; + union { + u8 wqe_type; /* for rq */ + u8 owner; /* for sq */ + }; + u8 rsvd1; + u16 rsvd2; + + u16 qid; + u16 msix_id; + + u8 __iomem *db_addr; + + union { + struct { + void *ci_addr; + } tx; + + struct { + u16 *pi_vaddr; + dma_addr_t pi_daddr; + } rx; + }; +} ____cacheline_aligned; + +struct sss_nic_io { + void *hwdev; + void *pcidev_hdl; + void *dev_hdl; + void *nic_dev; + + struct sss_nic_io_queue *sq_group; + struct sss_nic_io_queue *rq_group; + + u16 active_qp_num; + u16 max_qp_num; + + u8 link_status; + u8 rsvd1[3]; + + void *ci_base_vaddr; + dma_addr_t ci_base_daddr; + + u8 __iomem *sq_db_addr; + u8 __iomem *rq_db_addr; + + u16 rx_buff_len; + u16 max_vf_num; + + struct sss_nic_vf_info *vf_info_group; + + u64 feature_cap; + + struct sss_nic_dcb_info dcb_info; + + struct sss_nic_mag_cfg mag_cfg; +}; + +struct sss_nic_qp_info { + u16 qp_num; + u8 resvd[6]; + + u32 sq_depth; + u32 rq_depth; + + struct sss_nic_io_queue *sq_group; + struct sss_nic_io_queue *rq_group; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_irq_define.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_irq_define.h new file mode 100644 index 0000000000000000000000000000000000000000..b6c44d40a22d2231624fdb8e3ba5664b8bbf7ccc --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_irq_define.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_IRQ_DEFINE_H +#define SSS_NIC_IRQ_DEFINE_H + +#include +#include + +#include "sss_kernel.h" +#include "sss_hw_common.h" + +struct sss_nic_irq_cfg { + struct net_device *netdev; + u16 msix_id; /* PCIe MSIX id */ + u16 rsvd1; + u32 irq_id; /* OS IRQ id */ + char irq_name[IFNAMSIZ + 16]; + struct napi_struct napi; + cpumask_t affinity_mask; + void *sq; + void *rq; +}; + +struct sss_nic_intr_coal_info { + u8 pending_limt; + u8 coalesce_timer; + u8 resend_timer; + + u64 pkt_rate_low; + u8 rx_usecs_low; + u8 rx_pending_limt_low; + u64 pkt_rate_high; + u8 rx_usecs_high; + u8 rx_pending_limt_high; + + u8 user_set_intr_coal_flag; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_qp_define.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_qp_define.h new file mode 100644 index 0000000000000000000000000000000000000000..9da431372bbf3fc43e191987072d9216be6ca388 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_qp_define.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_QP_DEFINE_H +#define SSS_NIC_QP_DEFINE_H + +#include + +#include "sss_kernel.h" +#include "sss_hw_common.h" + +struct sss_nic_cqe { + u32 state; + u32 vlan_len; + + u32 offload_type; + u32 hash; + u32 xid; + u32 decrypt_desc; + u32 rsvd6; + u32 pkt_desc; +}; + +struct sss_nic_normal_rqe { + u32 bd_hi_addr; + u32 bd_lo_addr; + u32 cqe_hi_addr; + u32 cqe_lo_addr; +}; + +struct sss_nic_sge_section { + struct sss_sge sge; + u32 rsvd; +}; + +struct sss_nic_extend_rqe { + struct sss_nic_sge_section bd_sect; + struct sss_nic_sge_section cqe_sect; +}; + +struct sss_nic_rqe { + union { + struct sss_nic_normal_rqe normal_rqe; + struct sss_nic_extend_rqe extend_rqe; + }; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_rx_define.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_rx_define.h new file mode 100644 index 0000000000000000000000000000000000000000..1ecd5d6409c9d5ce4de87c0a6222bd6672d985b8 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_rx_define.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_RX_DEFINE_H +#define SSS_NIC_RX_DEFINE_H + +#include + +#include "sss_kernel.h" +#include "sss_nic_qp_define.h" +#include "sss_nic_io_define.h" +#include "sss_nic_irq_define.h" + +struct sss_nic_rq_stats { + u64 rx_packets; + u64 rx_bytes; + u64 errors; + u64 csum_errors; + u64 other_errors; + u64 rx_dropped; + u64 xdp_dropped; + u64 rx_buf_errors; + + u64 alloc_rx_dma_err; + u64 alloc_skb_err; + u64 reset_drop_sge; + u64 large_xdp_pkts; + u64 rsvd2; + +#ifdef HAVE_NDO_GET_STATS64 + struct u64_stats_sync stats_sync; +#else + struct u64_stats_sync_empty stats_sync; +#endif +}; + +struct sss_nic_rx_desc { + dma_addr_t buf_daddr; + dma_addr_t cqe_daddr; + struct sss_nic_rqe *rqe; + struct sss_nic_cqe *cqe; + struct page *page; + u32 page_offset; +}; + +struct sss_nic_rq_desc { + struct net_device *netdev; + struct device *dev; /* device for DMA mapping */ + + u32 irq_id; + u16 msix_id; + + u16 qid; + u32 qid_mask; + u32 q_depth; + + u32 buff_size_shift; + u32 dma_buff_size; + u16 buf_len; + u16 rsvd; + + u16 backup_pi; + u16 pi; + u32 last_sw_pi; + u32 last_sw_ci; + u32 last_hw_ci; + u32 ci; + u16 reset_pi; + u16 reset_wqe_num; + u32 delta; + + u64 last_rx_bytes; + u64 last_rx_pkts; + u64 rx_pkts; + + unsigned long status; + + u8 last_pending_limt; + u8 last_coal_timer; + + u8 print_err_cnt; + u8 check_err_cnt; + + struct sss_nic_irq_cfg *irq_cfg; + + struct sss_nic_rq_stats stats; + + struct sss_nic_rx_desc *rx_desc_group; + struct sss_nic_io_queue *rq; + +#ifdef HAVE_XDP_SUPPORT + struct bpf_prog *xdp_prog; +#endif + + void *cqe_vaddr; + dma_addr_t cqe_paddr; +} ____cacheline_aligned; + +struct sss_nic_rq_resource { + u16 page_num; + u8 rsvd[6]; + struct sss_nic_rx_desc *rx_desc_group; + void *cqe_vaddr; + dma_addr_t cqe_paddr; +}; + +#define SSSNIC_RQ_STATS_INC(rq_desc, field) \ +do { \ + u64_stats_update_begin(&(rq_desc)->stats.stats_sync); \ + (rq_desc)->stats.field++; \ + u64_stats_update_end(&(rq_desc)->stats.stats_sync); \ +} while (0) + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_tcam_define.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_tcam_define.h new file mode 100644 index 0000000000000000000000000000000000000000..0a6dec9e9dc0748e86e847db6d96dd8234d50b0f --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_tcam_define.h @@ -0,0 +1,184 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_TCAM_DEFINE_H +#define SSS_NIC_TCAM_DEFINE_H + +#include +#include + +#include "sss_kernel.h" + +#define SSSNIC_TCAM_BLOCK_SIZE 16 +#define SSSNIC_TCAM_FILTERS_MAX 512 + +#define SSSNIC_PKT_TCAM_INDEX_START(block_index) \ + (SSSNIC_TCAM_BLOCK_SIZE * (block_index)) + +#define SSSNIC_TCAM_FLOW_KEY_SIZE (44) + +#define SSSNIC_TCAM_RULE_FDIR_TYPE 0 +#define SSSNIC_TCAM_RULE_PPA_TYPE 1 + +#define SSSNIC_TCAM_BLOCK_ENABLE 1 +#define SSSNIC_TCAM_BLOCK_DISABLE 0 +#define SSSNIC_TCAM_RULES_NUM_MAX 4096 + +/* tcam block type, according to tcam block size */ +enum { + SSSNIC_TCAM_BLOCK_TYPE_LARGE = 0, /* block_size: 16 */ + SSSNIC_TCAM_BLOCK_TYPE_SMALL, /* block_size: 0 */ + SSSNIC_TCAM_BLOCK_TYPE_MAX +}; + +struct sss_nic_ipv4_tcam_key { + u32 rsvd1 : 4; + u32 tunnel_type : 4; + u32 ip_proto : 8; + u32 rsvd0 : 16; + u32 sipv4_h : 16; + u32 ip_type : 1; + u32 func_id : 15; + u32 dipv4_h : 16; + u32 sipv4_l : 16; + u32 rsvd2 : 16; + u32 dipv4_l : 16; + u32 rsvd3; + u32 dport : 16; + u32 rsvd4 : 16; + u32 rsvd5 : 16; + u32 sport : 16; + u32 outer_sipv4_h : 16; + u32 rsvd6 : 16; + u32 outer_dipv4_h : 16; + u32 outer_sipv4_l : 16; + u32 vni_h : 16; + u32 outer_dipv4_l : 16; + u32 rsvd7 : 16; + u32 vni_l : 16; +}; + +struct sss_nic_ipv6_tcam_key { + u32 rsvd1 : 4; + u32 tunnel_type : 4; + u32 ip_proto : 8; + u32 rsvd0 : 16; + u32 sipv6_key0 : 16; + u32 ip_type : 1; + u32 func_id : 15; + u32 sipv6_key2 : 16; + u32 sipv6_key1 : 16; + u32 sipv6_key4 : 16; + u32 sipv6_key3 : 16; + u32 sipv6_key6 : 16; + u32 sipv6_key5 : 16; + u32 dport : 16; + u32 sipv6_key7 : 16; + u32 dipv6_key0 : 16; + u32 sport : 16; + u32 dipv6_key2 : 16; + u32 dipv6_key1 : 16; + u32 dipv6_key4 : 16; + u32 dipv6_key3 : 16; + u32 dipv6_key6 : 16; + u32 dipv6_key5 : 16; + u32 rsvd2 : 16; + u32 dipv6_key7 : 16; +}; + +struct sss_nic_vxlan_ipv6_tcam_key { + u32 rsvd1 : 4; + u32 tunnel_type : 4; + u32 ip_proto : 8; + u32 rsvd0 : 16; + + u32 dipv6_key0 : 16; + u32 ip_type : 1; + u32 func_id : 15; + + u32 dipv6_key2 : 16; + u32 dipv6_key1 : 16; + + u32 dipv6_key4 : 16; + u32 dipv6_key3 : 16; + + u32 dipv6_key6 : 16; + u32 dipv6_key5 : 16; + + u32 dport : 16; + u32 dipv6_key7 : 16; + + u32 rsvd2 : 16; + u32 sport : 16; + + u32 outer_sipv4_h : 16; + u32 rsvd3 : 16; + + u32 outer_dipv4_h : 16; + u32 outer_sipv4_l : 16; + + u32 vni_h : 16; + u32 outer_dipv4_l : 16; + + u32 rsvd4 : 16; + u32 vni_l : 16; +}; + +struct sss_nic_tcam_key_tag { + union { + struct sss_nic_ipv4_tcam_key key_info_ipv4; + struct sss_nic_ipv6_tcam_key key_info_ipv6; + struct sss_nic_vxlan_ipv6_tcam_key key_info_vxlan_ipv6; + }; + + union { + struct sss_nic_ipv4_tcam_key key_mask_ipv4; + struct sss_nic_ipv6_tcam_key key_mask_ipv6; + struct sss_nic_vxlan_ipv6_tcam_key key_mask_vxlan_ipv6; + }; +}; + +struct sss_nic_tcam_node { + struct list_head block_list; + u16 block_id; + u16 index_cnt; + u8 index_used[SSSNIC_TCAM_BLOCK_SIZE]; +}; + +struct sss_nic_tcam_node_list { + struct list_head tcam_node_list; + u16 block_cnt; +}; + +struct sss_nic_tcam_filter { + struct list_head tcam_filter_list; + u16 block_id; + u16 index; + struct sss_nic_tcam_key_tag tcam_key; + u16 qid; +}; + +/* function level struct info */ +struct sss_nic_tcam_info { + u16 tcam_rule_num; + struct list_head tcam_list; + struct sss_nic_tcam_node_list tcam_node_info; +}; + +struct sss_nic_tcam_result { + u32 qid; + u32 rsvd; +}; + +struct sss_nic_tcam_key { + u8 key_x[SSSNIC_TCAM_FLOW_KEY_SIZE]; + u8 key_y[SSSNIC_TCAM_FLOW_KEY_SIZE]; +}; + +struct sss_nic_tcam_rule_cfg { + u32 index; + struct sss_nic_tcam_result data; + struct sss_nic_tcam_key key; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_tx_define.h b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_tx_define.h new file mode 100644 index 0000000000000000000000000000000000000000..b6076c87121aa06a1207e91df42db018d5ec21e5 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/include/sss_nic_tx_define.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_TX_DEFINE_H +#define SSS_NIC_TX_DEFINE_H + +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_nic_io_define.h" + +struct sss_nic_dma_info { + dma_addr_t dma; + u32 len; +}; + +struct sss_nic_tx_desc { + struct sk_buff *skb; + + u16 wqebb_cnt; + u16 nr_frags; + + int sge_num; + u16 nr_pkt_cnt; + u16 rsvd1; + u32 rsvd2; + + u64 bytes; + struct sss_nic_dma_info *dma_group; + u64 rsvd3; +}; + +struct sss_nic_sq_stats { + u64 tx_packets; + u64 tx_bytes; + u64 tx_busy; + u64 wake; + u64 tx_dropped; + + /* Subdivision statistics show in private tool */ + u64 skb_pad_err; + u64 offload_err; + u64 dma_map_err; + u64 unknown_tunnel_proto; + u64 frag_size_zero; + u64 frag_len_overflow; + u64 rsvd1; + u64 rsvd2; + +#ifdef HAVE_NDO_GET_STATS64 + struct u64_stats_sync stats_sync; +#else + struct u64_stats_sync_empty stats_sync; +#endif +}; + +struct sss_nic_sq_desc { + struct net_device *netdev; + struct device *dev; + + struct sss_nic_sq_stats stats; + + u8 cos; + u8 rsvd1; + u16 qid; + u32 qid_mask; + u32 q_depth; + u32 rsvd2; + + struct sss_nic_tx_desc *tx_desc_group; + struct sss_nic_io_queue *sq; + + u64 last_tx_pkts; + u64 last_tx_bytes; + u64 rsvd3; +} ____cacheline_aligned; + +struct sss_nic_sq_resource { + struct sss_nic_tx_desc *tx_desc_group; + struct sss_nic_dma_info *dma_group; +}; + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_cfg.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_cfg.c new file mode 100644 index 0000000000000000000000000000000000000000..65cb4ac9c8f9f550cd6efd746ce2d087e5691c98 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_cfg.c @@ -0,0 +1,1141 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_io.h" +#include "sss_nic_io_define.h" +#include "sss_nic_cfg_define.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_rss_cfg.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_event.h" + +#define SSSNIC_DEFAULT_MAX_MTU 0x3FFF +#define SSSNIC_VLAN_ID_MASK 0x7FFF + +#define SSSNIC_INIT_FUNC_MASK \ + (BIT(SSSNIC_FUNC_CFG_TYPE_INIT) | \ + BIT(SSSNIC_FUNC_CFG_TYPE_MTU) | \ + BIT(SSSNIC_FUNC_CFG_TYPE_RX_BUF_SIZE)) + +#define SSSNIC_MGMT_STATUS_EXIST 0x6 + +#define SSSNIC_CHECK_IPSU_15BIT 0x8000 + +#define SSSNIC_DCB_PCP 0 +#define SSSNIC_DCB_DSCP 1 + +#define SSSNIC_F_ALL_MASK 0x3FFFF /* enable all feature */ +#define SSSNIC_DRV_DEFAULT_FEATURE SSSNIC_F_ALL_MASK + +#define SSSNIC_UNSUPPORT_SET_PAUSE 0x10 + +#define SSSNIC_VF_SET_MAC_ALREADY(func_type, status) \ + ((func_type) == SSS_FUNC_TYPE_VF && (status) == SSSNIC_PF_SET_VF_ALREADY) + +static int sss_nic_check_mac_set_status(u32 func_type, u8 status, u16 vlan_id) +{ + if ((status != 0) && (status != SSSNIC_MGMT_STATUS_EXIST)) { + if (!SSSNIC_VF_SET_MAC_ALREADY(func_type, status)) + return -EINVAL; + } + + if (((vlan_id & SSSNIC_CHECK_IPSU_15BIT) != 0) && (status == SSSNIC_MGMT_STATUS_EXIST)) { + if (!SSSNIC_VF_SET_MAC_ALREADY(func_type, status)) + return -EINVAL; + } + + return 0; +} + +int sss_nic_set_mac(struct sss_nic_dev *nic_dev, const u8 *mac_addr, + u16 vlan_id, u16 func_id, u16 channel) +{ + struct sss_nic_mbx_mac_addr cmd_mac = {0}; + u16 out_len = sizeof(cmd_mac); + u32 func_type; + int ret; + + if (!nic_dev || !mac_addr) + return -EINVAL; + + if ((vlan_id & SSSNIC_VLAN_ID_MASK) >= VLAN_N_VID) { + nic_err(nic_dev->dev_hdl, "Invalid VLAN ID: %d\n", (vlan_id & SSSNIC_VLAN_ID_MASK)); + return -EINVAL; + } + + cmd_mac.vlan_id = vlan_id; + cmd_mac.func_id = func_id; + ether_addr_copy(cmd_mac.mac, mac_addr); + + ret = sss_nic_l2nic_msg_to_mgmt_sync_ch(nic_dev->hwdev, SSSNIC_MBX_OPCODE_SET_MAC, + &cmd_mac, sizeof(cmd_mac), + &cmd_mac, &out_len, channel); + if (ret != 0 || out_len == 0) { + nic_err(nic_dev->dev_hdl, + "Fail to set MAC, ret: %d, out_len: 0x%x, channel: 0x%x\n", + ret, out_len, channel); + return -EIO; + } + + func_type = sss_get_func_type(nic_dev->hwdev); + if (sss_nic_check_mac_set_status(func_type, cmd_mac.head.state, cmd_mac.vlan_id) != 0) { + nic_err(nic_dev->dev_hdl, + "Fail to set MAC, state: 0x%x, channel: 0x%x\n", + cmd_mac.head.state, channel); + return -EIO; + } + + if (SSSNIC_VF_SET_MAC_ALREADY(func_type, cmd_mac.head.state)) { + nic_warn(nic_dev->dev_hdl, + "PF has already set VF mac, ignore it\n"); + return SSSNIC_PF_SET_VF_ALREADY; + } + + if (cmd_mac.head.state == SSSNIC_MGMT_STATUS_EXIST) { + nic_warn(nic_dev->dev_hdl, "Repeat mac, ignore it\n"); + return 0; + } + + return 0; +} + +int sss_nic_del_mac(struct sss_nic_dev *nic_dev, const u8 *mac_addr, + u16 vlan_id, u16 func_id, u16 channel) +{ + struct sss_nic_mbx_mac_addr cmd_mac = {0}; + u16 out_len = sizeof(cmd_mac); + u32 func_type; + int ret; + + if (!nic_dev || !mac_addr) + return -EINVAL; + + if ((vlan_id & SSSNIC_VLAN_ID_MASK) >= VLAN_N_VID) { + nic_err(nic_dev->dev_hdl, "Invalid VLAN number: %d\n", + (vlan_id & SSSNIC_VLAN_ID_MASK)); + return -EINVAL; + } + + cmd_mac.func_id = func_id; + cmd_mac.vlan_id = vlan_id; + ether_addr_copy(cmd_mac.mac, mac_addr); + + ret = sss_nic_l2nic_msg_to_mgmt_sync_ch(nic_dev->hwdev, SSSNIC_MBX_OPCODE_DEL_MAC, + &cmd_mac, sizeof(cmd_mac), &cmd_mac, + &out_len, channel); + if (ret != 0 || out_len == 0) { + nic_err(nic_dev->dev_hdl, + "Fail to del MAC, ret: %d, out_len: 0x%x, channel: 0x%x\n", + ret, out_len, channel); + return -EIO; + } + + func_type = sss_get_func_type(nic_dev->hwdev); + if (SSSNIC_VF_SET_MAC_ALREADY(func_type, cmd_mac.head.state)) { + nic_warn(nic_dev->dev_hdl, "PF has already set VF mac\n"); + return SSSNIC_PF_SET_VF_ALREADY; + } + + if (cmd_mac.head.state != 0) { + nic_err(nic_dev->dev_hdl, + "Fail to delete MAC, ret: %d, state: 0x%x, channel: 0x%x\n", + ret, cmd_mac.head.state, channel); + return -EIO; + } + + return 0; +} + +int sss_nic_update_mac(struct sss_nic_dev *nic_dev, u8 *new_mac) +{ + int ret; + u32 func_type; + struct sss_nic_mbx_mac_update cmd_mac_update = {0}; + u16 out_len = sizeof(cmd_mac_update); + + ether_addr_copy(cmd_mac_update.new_mac, new_mac); + ether_addr_copy(cmd_mac_update.old_mac.mac, nic_dev->netdev->dev_addr); + cmd_mac_update.old_mac.func_id = sss_get_global_func_id(nic_dev->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_UPDATE_MAC, + &cmd_mac_update, sizeof(cmd_mac_update), + &cmd_mac_update, &out_len); + + if (ret != 0 || out_len == 0) { + nic_err(nic_dev->dev_hdl, + "Fail to update MAC, ret: %d, out_len: 0x%x\n", ret, out_len); + return -EIO; + } + + func_type = sss_get_func_type(nic_dev->hwdev); + if (sss_nic_check_mac_set_status(func_type, cmd_mac_update.old_mac.head.state, + cmd_mac_update.old_mac.vlan_id)) { + nic_err(nic_dev->dev_hdl, + "Fail to update MAC, state: 0x%x", cmd_mac_update.old_mac.head.state); + return -EIO; + } + + if (SSSNIC_VF_SET_MAC_ALREADY(func_type, cmd_mac_update.old_mac.head.state)) { + nic_warn(nic_dev->dev_hdl, + "PF has already set VF MAC. Ignore update\n"); + return SSSNIC_PF_SET_VF_ALREADY; + } + + if (cmd_mac_update.old_mac.head.state == SSSNIC_MGMT_STATUS_EXIST) + nic_warn(nic_dev->dev_hdl, + "MAC is existed. Ignore update\n"); + + return 0; +} + +int sss_nic_get_default_mac(struct sss_nic_dev *nic_dev, u8 *mac_addr) +{ + struct sss_nic_mbx_mac_addr cmd_mac = {0}; + u16 out_len = sizeof(cmd_mac); + int ret; + + cmd_mac.func_id = sss_get_global_func_id(nic_dev->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_GET_MAC, + &cmd_mac, sizeof(cmd_mac), &cmd_mac, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_mac)) { + nic_err(nic_dev->hwdev, + "Fail to get mac, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, cmd_mac.head.state, out_len); + return -EINVAL; + } + + ether_addr_copy(mac_addr, cmd_mac.mac); + + return 0; +} + +int sss_nic_config_vlan(struct sss_nic_dev *nic_dev, u8 opcode, u16 vlan_id) +{ + struct sss_nic_mbx_vlan_cfg cmd_config_vlan = {0}; + u16 out_len = sizeof(cmd_config_vlan); + int ret; + + cmd_config_vlan.func_id = + sss_get_global_func_id(nic_dev->hwdev); + cmd_config_vlan.opcode = opcode; + cmd_config_vlan.vlan_id = vlan_id; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, + SSSNIC_MBX_OPCODE_CFG_FUNC_VLAN, + &cmd_config_vlan, sizeof(cmd_config_vlan), + &cmd_config_vlan, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_config_vlan)) { + nic_err(nic_dev->dev_hdl, + "Fail to %s vlan, ret: %d, state: 0x%x, out_len: 0x%x\n", + opcode == SSSNIC_MBX_OPCODE_ADD ? "add" : "delete", + ret, cmd_config_vlan.head.state, out_len); + return -EINVAL; + } + + return 0; +} + +int sss_nic_set_hw_vport_state(struct sss_nic_dev *nic_dev, + u16 func_id, bool enable, u16 channel) +{ + struct sss_nic_mbx_vport_state cmd_set_vport_state = {0}; + u16 out_len = sizeof(cmd_set_vport_state); + int ret; + + cmd_set_vport_state.func_id = func_id; + cmd_set_vport_state.state = enable ? 1 : 0; + + ret = sss_nic_l2nic_msg_to_mgmt_sync_ch(nic_dev->hwdev, SSSNIC_MBX_OPCODE_SET_VPORT_ENABLE, + &cmd_set_vport_state, sizeof(cmd_set_vport_state), + &cmd_set_vport_state, &out_len, channel); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_set_vport_state)) { + nic_err(nic_dev->dev_hdl, + "Fail to set vport state, ret: %d, state: 0x%x, out_len: 0x%x, channel: 0x%x\n", + ret, cmd_set_vport_state.head.state, out_len, channel); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(sss_nic_set_hw_vport_state); + +int sss_nic_set_dcb_info(struct sss_nic_io *nic_io, + struct sss_nic_dcb_info *dcb_info) +{ + if (memcmp(&nic_io->dcb_info, dcb_info, sizeof(*dcb_info)) == 0) + return 0; + + memcpy(&nic_io->dcb_info, dcb_info, sizeof(*dcb_info)); + + /* notify stateful in pf, than notify all vf */ + sss_nic_notify_dcb_state_event(nic_io->hwdev, dcb_info); + + return 0; +} + +static int sss_nic_cfg_hw_pause(struct sss_nic_dev *nic_dev, + u8 opcode, struct sss_nic_pause_cfg *pause_cfg) +{ + struct sss_nic_mbx_pause_cfg cmd_pause_cfg = {0}; + u16 out_len = sizeof(cmd_pause_cfg); + int ret; + + cmd_pause_cfg.port_id = sss_get_phy_port_id(nic_dev->hwdev); + cmd_pause_cfg.opcode = opcode; + if (opcode == SSSNIC_MBX_OPCODE_SET) { + cmd_pause_cfg.auto_neg = pause_cfg->auto_neg; + cmd_pause_cfg.rx_pause = pause_cfg->rx_pause; + cmd_pause_cfg.tx_pause = pause_cfg->tx_pause; + } + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, + SSSNIC_MBX_OPCODE_CFG_PAUSE_INFO, + &cmd_pause_cfg, sizeof(cmd_pause_cfg), + &cmd_pause_cfg, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_pause_cfg)) { + if (cmd_pause_cfg.head.state == SSSNIC_UNSUPPORT_SET_PAUSE) { + ret = -EOPNOTSUPP; + nic_err(nic_dev->dev_hdl, "Fail to set pause when pfc is enable\n"); + } else { + ret = -EFAULT; + nic_err(nic_dev->dev_hdl, + "Fail to %s pause info, ret: %d, state: 0x%x, out_len: 0x%x\n", + opcode == SSSNIC_MBX_OPCODE_SET ? "set" : "get", + ret, cmd_pause_cfg.head.state, out_len); + } + return ret; + } + + if (opcode == SSSNIC_MBX_OPCODE_GET) { + pause_cfg->auto_neg = cmd_pause_cfg.auto_neg; + pause_cfg->rx_pause = cmd_pause_cfg.rx_pause; + pause_cfg->tx_pause = cmd_pause_cfg.tx_pause; + } + + return 0; +} + +int sss_nic_set_hw_pause_info(struct sss_nic_dev *nic_dev, + struct sss_nic_pause_cfg pause_cfg) +{ + struct sss_nic_mag_cfg *mag_cfg = NULL; + int ret; + + mag_cfg = &nic_dev->nic_io->mag_cfg; + + down(&mag_cfg->cfg_lock); + + ret = sss_nic_cfg_hw_pause(nic_dev, SSSNIC_MBX_OPCODE_SET, &pause_cfg); + if (ret != 0) { + up(&mag_cfg->cfg_lock); + return ret; + } + + mag_cfg->pfc_en = 0; + mag_cfg->pfc_bitmap = 0; + mag_cfg->pause_set = true; + mag_cfg->nic_pause.auto_neg = pause_cfg.auto_neg; + mag_cfg->nic_pause.rx_pause = pause_cfg.rx_pause; + mag_cfg->nic_pause.tx_pause = pause_cfg.tx_pause; + + up(&mag_cfg->cfg_lock); + + return 0; +} + +int sss_nic_get_hw_pause_info(struct sss_nic_dev *nic_dev, struct sss_nic_pause_cfg *pause_cfg) +{ + struct sss_nic_mag_cfg *mag_cfg = NULL; + int ret = 0; + + ret = sss_nic_cfg_hw_pause(nic_dev, SSSNIC_MBX_OPCODE_GET, pause_cfg); + if (ret != 0) + return ret; + + mag_cfg = &nic_dev->nic_io->mag_cfg; + if (mag_cfg->pause_set || pause_cfg->auto_neg == SSSNIC_PORT_AN_NOT_SET) { + pause_cfg->rx_pause = mag_cfg->nic_pause.rx_pause; + pause_cfg->tx_pause = mag_cfg->nic_pause.tx_pause; + } + + return 0; +} + +int sss_nic_set_hw_dcb_state(struct sss_nic_dev *nic_dev, u8 op_code, u8 state) +{ + struct sss_nic_mbx_dcb_state cmd_dcb_state = {0}; + u16 out_len = sizeof(cmd_dcb_state); + int ret; + + cmd_dcb_state.state = state; + cmd_dcb_state.op_code = op_code; + cmd_dcb_state.func_id = sss_get_global_func_id(nic_dev->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_QOS_DCB_STATE, + &cmd_dcb_state, sizeof(cmd_dcb_state), + &cmd_dcb_state, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_dcb_state)) { + nic_err(nic_dev->dev_hdl, + "Fail to set dcb state, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, cmd_dcb_state.head.state, out_len); + return -EFAULT; + } + + return 0; +} + +int sss_nic_clear_hw_qp_resource(struct sss_nic_dev *nic_dev) +{ + struct sss_nic_mbx_clear_qp_resource qp_res = {0}; + u16 out_len = sizeof(qp_res); + int ret; + + if (!nic_dev) + return -EINVAL; + + qp_res.func_id = sss_get_global_func_id(nic_dev->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_CLEAR_QP_RESOURCE, + &qp_res, sizeof(qp_res), &qp_res, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &qp_res)) { + nic_err(nic_dev->dev_hdl, + "Fail to clear qp resource, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, qp_res.head.state, out_len); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL(sss_nic_clear_hw_qp_resource); + +int sss_nic_cache_out_qp_resource(struct sss_nic_io *nic_io) +{ + struct sss_nic_mbx_invalid_qp_cache cmd_qp_res = {0}; + u16 out_len = sizeof(cmd_qp_res); + int ret; + + cmd_qp_res.func_id = sss_get_global_func_id(nic_io->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, SSSNIC_MBX_OPCODE_CACHE_OUT_QP_RES, + &cmd_qp_res, sizeof(cmd_qp_res), + &cmd_qp_res, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_qp_res)) { + nic_err(nic_io->dev_hdl, + "Fail to cache out qp resources, ret: %d, state: 0x%x, out len: 0x%x\n", + ret, cmd_qp_res.head.state, out_len); + return -EIO; + } + + return 0; +} + +int sss_nic_get_vport_stats(struct sss_nic_dev *nic_dev, u16 func_id, + struct sss_nic_port_stats *stats) +{ + struct sss_nic_mbx_port_stats_info cmd_port_stats = {0}; + struct sss_nic_mbx_port_stats vport_stats = {0}; + u16 out_len = sizeof(vport_stats); + int ret; + + if (!nic_dev || !stats) + return -EINVAL; + + cmd_port_stats.func_id = func_id; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_GET_VPORT_STAT, + &cmd_port_stats, sizeof(cmd_port_stats), + &vport_stats, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &vport_stats)) { + nic_err(nic_dev->dev_hdl, + "Fail to get vport statistics, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, vport_stats.head.state, out_len); + return -EFAULT; + } + + memcpy(stats, &vport_stats.stats, sizeof(*stats)); + + return 0; +} + +static int sss_nic_set_func_table(struct sss_nic_io *nic_io, + u32 cfg_mask, const struct sss_nic_func_table_cfg *cfg) +{ + struct sss_nic_mbx_set_func_table cmd_func_tbl = {0}; + u16 out_len = sizeof(cmd_func_tbl); + int ret; + + cmd_func_tbl.tbl_cfg = *cfg; + cmd_func_tbl.cfg_bitmap = cfg_mask; + cmd_func_tbl.func_id = sss_get_global_func_id(nic_io->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, + SSSNIC_MBX_OPCODE_SET_FUNC_TBL, + &cmd_func_tbl, sizeof(cmd_func_tbl), + &cmd_func_tbl, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_func_tbl)) { + nic_err(nic_io->dev_hdl, + "Fail to set func table, bitmap: 0x%x, ret: %d, state: 0x%x, out_len: 0x%x\n", + cfg_mask, ret, cmd_func_tbl.head.state, out_len); + return -EFAULT; + } + + return 0; +} + +static int sss_nic_init_func_table(struct sss_nic_io *nic_io) +{ + struct sss_nic_func_table_cfg tbl_cfg = {0}; + + tbl_cfg.mtu = SSSNIC_DEFAULT_MAX_MTU; + tbl_cfg.rx_wqe_buf_size = nic_io->rx_buff_len; + + return sss_nic_set_func_table(nic_io, SSSNIC_INIT_FUNC_MASK, &tbl_cfg); +} + +int sss_nic_set_dev_mtu(struct sss_nic_dev *nic_dev, u16 new_mtu) +{ + struct sss_nic_func_table_cfg func_tbl_cfg = {0}; + + if (new_mtu < SSSNIC_MIN_MTU_SIZE || new_mtu > SSSNIC_MAX_JUMBO_FRAME_SIZE) { + nic_err(nic_dev->dev_hdl, + "Invalid mtu size: %ubytes, mtu range %ubytes - %ubytes.\n", + new_mtu, SSSNIC_MIN_MTU_SIZE, SSSNIC_MAX_JUMBO_FRAME_SIZE); + return -EINVAL; + } + + func_tbl_cfg.mtu = new_mtu; + + return sss_nic_set_func_table(nic_dev->nic_io, + BIT(SSSNIC_FUNC_CFG_TYPE_MTU), &func_tbl_cfg); +} + +static int sss_nic_feature_nego(struct sss_nic_io *nic_io, u8 opcode, u64 *feature) +{ + struct sss_nic_mbx_feature_nego cmd_feature_nego = {0}; + u16 out_len = sizeof(cmd_feature_nego); + int ret; + + cmd_feature_nego.opcode = opcode; + cmd_feature_nego.func_id = sss_get_global_func_id(nic_io->hwdev); + if (opcode == SSSNIC_MBX_OPCODE_SET) + memcpy(cmd_feature_nego.feature, feature, sizeof(u64)); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, SSSNIC_MBX_OPCODE_FEATURE_NEGO, + &cmd_feature_nego, sizeof(cmd_feature_nego), + &cmd_feature_nego, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_feature_nego)) { + nic_err(nic_io->dev_hdl, + "Fail to negotiate nic feature, ret:%d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_feature_nego.head.state, out_len); + return -EIO; + } + + if (opcode == SSSNIC_MBX_OPCODE_GET) + memcpy(feature, cmd_feature_nego.feature, sizeof(u64)); + + return 0; +} + +static int sss_nic_get_bios_pf_bandwidth(struct sss_nic_io *nic_io) +{ + struct sss_nic_mbx_bios_cfg cmd_bios_cfg = {0}; + u16 out_len = sizeof(cmd_bios_cfg); + int ret; + + if (sss_get_func_type(nic_io->hwdev) == SSS_FUNC_TYPE_VF || + !SSSNIC_SUPPORT_RATE_LIMIT(nic_io)) + return 0; + + cmd_bios_cfg.op_code = SSSNIC_NVM_PF_SPEED_LIMIT; + cmd_bios_cfg.bios_cfg.func_valid = SSSNIC_BIOS_FUN_VALID; + cmd_bios_cfg.bios_cfg.func_id = (u8)sss_get_global_func_id(nic_io->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, SSSNIC_MBX_OPCODE_BIOS_CFG, + &cmd_bios_cfg, sizeof(cmd_bios_cfg), + &cmd_bios_cfg, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_bios_cfg)) { + nic_err(nic_io->dev_hdl, + "Fail to get bios pf bandwidth limit, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_bios_cfg.head.state, out_len); + return -EIO; + } + + if (cmd_bios_cfg.bios_cfg.pf_bw > SSSNIC_MAX_LIMIT_BW) { + nic_err(nic_io->dev_hdl, "Invalid bios cfg pf bandwidth limit: %u\n", + cmd_bios_cfg.bios_cfg.pf_bw); + return -EINVAL; + } + + if (cmd_bios_cfg.bios_cfg.signature != SSSNIC_BIOS_SIGNATURE) + nic_warn(nic_io->dev_hdl, "Invalid bios configuration data, signature: 0x%x\n", + cmd_bios_cfg.bios_cfg.signature); + + nic_io->mag_cfg.pf_bw_limit = cmd_bios_cfg.bios_cfg.pf_bw; + + return 0; +} + +static int sss_nic_get_feature_from_hw(struct sss_nic_io *nic_io) +{ + return sss_nic_feature_nego(nic_io, SSSNIC_MBX_OPCODE_GET, &nic_io->feature_cap); +} + +int sss_nic_set_feature_to_hw(struct sss_nic_io *nic_io) +{ + return sss_nic_feature_nego(nic_io, SSSNIC_MBX_OPCODE_SET, &nic_io->feature_cap); +} + +void sss_nic_update_nic_feature(struct sss_nic_dev *nic_dev, u64 feature) +{ + struct sss_nic_io *nic_io = nic_dev->nic_io; + + nic_io->feature_cap = feature; + + nic_info(nic_io->dev_hdl, "Update nic feature to 0x%llx\n", nic_io->feature_cap); +} + +int sss_nic_io_init(struct sss_nic_dev *nic_dev) +{ + struct pci_dev *pdev = nic_dev->pdev; + struct sss_nic_io *nic_io = NULL; + int ret; + + nic_io = kzalloc(sizeof(*nic_io), GFP_KERNEL); + if (!nic_io) + return -ENOMEM; + + nic_io->hwdev = nic_dev->hwdev; + nic_io->pcidev_hdl = pdev; + nic_io->dev_hdl = &pdev->dev; + nic_io->nic_dev = nic_dev; + mutex_init(&nic_io->mag_cfg.sfp_mutex); + sema_init(&nic_io->mag_cfg.cfg_lock, 1); + nic_io->rx_buff_len = nic_dev->rx_buff_len; + nic_dev->nic_io = nic_io; + + ret = sss_register_service_adapter(nic_dev->hwdev, SSS_SERVICE_TYPE_NIC, nic_io); + if (ret != 0) { + nic_err(&pdev->dev, "Fail to register service adapter\n"); + goto register_adapter_err; + } + + ret = sss_chip_set_func_used_state(nic_dev->hwdev, SSS_SVC_TYPE_NIC, + true, SSS_CHANNEL_NIC); + if (ret != 0) { + nic_err(&pdev->dev, "Fail to set function svc used state\n"); + goto set_state_err; + } + + ret = sss_nic_init_func_table(nic_io); + if (ret != 0) { + nic_err(&pdev->dev, "Fail to init function table\n"); + goto init_func_table_err; + } + + ret = sss_nic_get_feature_from_hw(nic_io); + if (ret != 0) { + nic_err(&pdev->dev, "Fail to get nic features\n"); + goto get_feature_from_hw_err; + } + + ret = sss_nic_get_bios_pf_bandwidth(nic_io); + if (ret != 0) { + nic_err(&pdev->dev, "Fail to get pf bandwidth limit\n"); + goto get_bios_pf_bandwidth_err; + } + + ret = sss_nic_init_pf_vf_info(nic_io); + if (ret != 0) + goto init_pf_vf_info_err; + + ret = sss_nic_register_io_callback(nic_io); + if (ret != 0) { + nic_err(&pdev->dev, "Fail to init vf info\n"); + goto register_io_callback_err; + } + + nic_io->feature_cap &= SSSNIC_DRV_DEFAULT_FEATURE; + + return 0; + +register_io_callback_err: + sss_nic_deinit_pf_vf_info(nic_io); + +init_pf_vf_info_err: +get_bios_pf_bandwidth_err: +get_feature_from_hw_err: +init_func_table_err: + sss_chip_set_func_used_state(nic_dev->hwdev, SSS_SVC_TYPE_NIC, + false, SSS_CHANNEL_NIC); + +set_state_err: + sss_unregister_service_adapter(nic_dev->hwdev, SSS_SERVICE_TYPE_NIC); + +register_adapter_err: + nic_dev->nic_io = NULL; + kfree(nic_io); + + return ret; +} +EXPORT_SYMBOL(sss_nic_io_init); + +void sss_nic_io_deinit(struct sss_nic_dev *nic_dev) +{ + struct sss_nic_io *nic_io = nic_dev->nic_io; + + sss_nic_unregister_io_callback(nic_io); + + if (nic_io->vf_info_group) { + sss_nic_clear_all_vf_info(nic_io); + sss_nic_deinit_pf_vf_info(nic_io); + } + + sss_chip_set_func_used_state(nic_dev->hwdev, SSS_SVC_TYPE_NIC, + false, SSS_CHANNEL_NIC); + + sss_unregister_service_adapter(nic_dev->hwdev, SSS_SERVICE_TYPE_NIC); + + nic_dev->nic_io = NULL; + kfree(nic_io); +} +EXPORT_SYMBOL(sss_nic_io_deinit); + +int sss_nic_force_drop_tx_pkt(struct sss_nic_dev *nic_dev) +{ + struct sss_nic_mbx_force_drop_pkt cmd_force_drop_pkt = {0}; + u16 out_len = sizeof(cmd_force_drop_pkt); + int ret; + + cmd_force_drop_pkt.port = sss_get_phy_port_id(nic_dev->hwdev); + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_FORCE_PKT_DROP, + &cmd_force_drop_pkt, sizeof(cmd_force_drop_pkt), + &cmd_force_drop_pkt, &out_len); + if ((cmd_force_drop_pkt.head.state != SSS_MGMT_CMD_UNSUPPORTED && + cmd_force_drop_pkt.head.state) || ret || !out_len) { + nic_err(nic_dev->dev_hdl, + "Fail to force drop tx packet, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, cmd_force_drop_pkt.head.state, out_len); + return -EFAULT; + } + + return cmd_force_drop_pkt.head.state; +} + +int sss_nic_set_rx_mode(struct sss_nic_dev *nic_dev, u32 rx_mode) +{ + struct sss_nic_mbx_set_rx_mode cmd_set_rx_mode = {0}; + u16 out_len = sizeof(cmd_set_rx_mode); + int ret; + + cmd_set_rx_mode.func_id = sss_get_global_func_id(nic_dev->hwdev); + cmd_set_rx_mode.rx_mode = rx_mode; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_SET_RX_MODE, + &cmd_set_rx_mode, sizeof(cmd_set_rx_mode), + &cmd_set_rx_mode, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_set_rx_mode)) { + nic_err(nic_dev->dev_hdl, + "Fail to set rx mode, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, cmd_set_rx_mode.head.state, out_len); + return -EINVAL; + } + + return 0; +} + +int sss_nic_set_rx_vlan_offload(struct sss_nic_dev *nic_dev, bool en) +{ + struct sss_nic_mbx_offload_vlan cmd_vlan_offload = {0}; + u16 out_len = sizeof(cmd_vlan_offload); + int ret; + + cmd_vlan_offload.vlan_offload = (u8)en; + cmd_vlan_offload.func_id = sss_get_global_func_id(nic_dev->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_SET_RX_VLAN_OFFLOAD, + &cmd_vlan_offload, sizeof(cmd_vlan_offload), + &cmd_vlan_offload, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_vlan_offload)) { + nic_err(nic_dev->dev_hdl, + "Fail to set rx vlan offload, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, cmd_vlan_offload.head.state, out_len); + return -EINVAL; + } + + return 0; +} + +int sss_nic_update_mac_vlan(struct sss_nic_dev *nic_dev, u16 old_vlan, u16 new_vlan, int vf_id) +{ + struct sss_nic_vf_info *vf_info = NULL; + struct sss_nic_io *nic_io = nic_dev->nic_io; + u16 func_id; + int ret; + + if (old_vlan >= VLAN_N_VID || new_vlan >= VLAN_N_VID) + return -EINVAL; + + vf_info = nic_io->vf_info_group + SSSNIC_HW_VF_ID_TO_OS(vf_id); + if (!nic_io->vf_info_group || is_zero_ether_addr(vf_info->drv_mac)) + return 0; + + func_id = sss_get_glb_pf_vf_offset(nic_dev->hwdev) + (u16)vf_id; + + ret = sss_nic_del_mac(nic_dev, vf_info->drv_mac, + func_id, old_vlan, SSS_CHANNEL_NIC); + if (ret != 0) { + nic_err(nic_dev->dev_hdl, "Fail to delete VF %d MAC %pM vlan %u\n", + SSSNIC_HW_VF_ID_TO_OS(vf_id), vf_info->drv_mac, old_vlan); + return ret; + } + + ret = sss_nic_set_mac(nic_dev, vf_info->drv_mac, + func_id, new_vlan, SSS_CHANNEL_NIC); + if (ret != 0) { + nic_err(nic_dev->dev_hdl, "Fail to add VF %d MAC %pM vlan %u\n", + SSSNIC_HW_VF_ID_TO_OS(vf_id), vf_info->drv_mac, new_vlan); + sss_nic_set_mac(nic_dev, vf_info->drv_mac, + func_id, old_vlan, SSS_CHANNEL_NIC); + return ret; + } + + return 0; +} + +static int sss_nic_set_rx_lro(struct sss_nic_dev *nic_dev, + bool lro_en, u8 lro_max_pkt_len) +{ + struct sss_nic_mbx_lro_cfg cmd_lro_cfg = {0}; + u16 out_len = sizeof(cmd_lro_cfg); + int ret; + + cmd_lro_cfg.lro_ipv4_en = (u8)lro_en; + cmd_lro_cfg.lro_ipv6_en = (u8)lro_en; + cmd_lro_cfg.lro_max_pkt_len = lro_max_pkt_len; + cmd_lro_cfg.opcode = SSSNIC_MBX_OPCODE_SET; + cmd_lro_cfg.func_id = sss_get_global_func_id(nic_dev->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_CFG_RX_LRO, + &cmd_lro_cfg, sizeof(cmd_lro_cfg), + &cmd_lro_cfg, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_lro_cfg)) { + nic_err(nic_dev->dev_hdl, + "Fail to set lro offload, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, cmd_lro_cfg.head.state, out_len); + return -EINVAL; + } + + return 0; +} + +static int sss_nic_set_rx_lro_timer(struct sss_nic_dev *nic_dev, u32 value) +{ + struct sss_nic_mbx_lro_timer cmd_lro_timer = {0}; + u16 out_len = sizeof(cmd_lro_timer); + int ret; + + cmd_lro_timer.timer = value; + cmd_lro_timer.opcode = SSSNIC_MBX_OPCODE_SET; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_CFG_LRO_TIMER, + &cmd_lro_timer, sizeof(cmd_lro_timer), + &cmd_lro_timer, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_lro_timer)) { + nic_err(nic_dev->dev_hdl, + "Fail to set lro timer, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, cmd_lro_timer.head.state, out_len); + + return -EINVAL; + } + + return 0; +} + +int sss_nic_set_rx_lro_state(struct sss_nic_dev *nic_dev, bool en, u32 timer, u32 max_pkt_len) +{ + int ret; + + nic_info(nic_dev->dev_hdl, "Set LRO max coalesce packet size to %uK\n", + max_pkt_len); + ret = sss_nic_set_rx_lro(nic_dev, en, (u8)max_pkt_len); + if (ret != 0) + return ret; + + /* we don't set LRO timer for VF */ + if (sss_get_func_type(nic_dev->hwdev) == SSS_FUNC_TYPE_VF) + return 0; + + nic_info(nic_dev->dev_hdl, "Success to set LRO timer to %u\n", timer); + + return sss_nic_set_rx_lro_timer(nic_dev, timer); +} + +int sss_nic_set_vlan_fliter(struct sss_nic_dev *nic_dev, bool en) +{ + struct sss_nic_mbx_vlan_filter_cfg cmd_set_filter = {0}; + u16 out_len = sizeof(cmd_set_filter); + int ret; + + cmd_set_filter.func_id = sss_get_global_func_id(nic_dev->hwdev); + cmd_set_filter.vlan_filter_ctrl = (u32)en; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_SET_VLAN_FILTER_EN, + &cmd_set_filter, sizeof(cmd_set_filter), + &cmd_set_filter, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_set_filter)) { + nic_err(nic_dev->dev_hdl, + "Fail to set vlan filter, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, cmd_set_filter.head.state, out_len); + return -EINVAL; + } + + return 0; +} + +int sss_nic_add_tcam_rule(struct sss_nic_dev *nic_dev, struct sss_nic_tcam_rule_cfg *tcam_rule) +{ + struct sss_nic_mbx_add_tcam_rule cmd_add_tcam_rule = {0}; + u16 out_len = sizeof(cmd_add_tcam_rule); + int ret; + + if (!nic_dev || !tcam_rule) + return -EINVAL; + + if (tcam_rule->index >= SSSNIC_TCAM_RULES_NUM_MAX) { + nic_err(nic_dev->dev_hdl, "Invalid tcam rules num :%u to add\n", + tcam_rule->index); + return -EINVAL; + } + + memcpy((void *)&cmd_add_tcam_rule.rule, (void *)tcam_rule, + sizeof(struct sss_nic_tcam_rule_cfg)); + cmd_add_tcam_rule.func_id = sss_get_global_func_id(nic_dev->hwdev); + cmd_add_tcam_rule.type = SSSNIC_TCAM_RULE_FDIR_TYPE; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_ADD_TC_FLOW, + &cmd_add_tcam_rule, sizeof(cmd_add_tcam_rule), + &cmd_add_tcam_rule, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_add_tcam_rule)) { + nic_err(nic_dev->dev_hdl, + "Fail to add tcam rule, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, cmd_add_tcam_rule.head.state, out_len); + return -EIO; + } + + return 0; +} + +int sss_nic_del_tcam_rule(struct sss_nic_dev *nic_dev, u32 index) +{ + struct sss_nic_mbx_del_tcam_rule cmd_del_tcam_rule = {0}; + u16 out_len = sizeof(cmd_del_tcam_rule); + int ret; + + if (!nic_dev) + return -EINVAL; + + if (index >= SSSNIC_TCAM_RULES_NUM_MAX) { + nic_err(nic_dev->dev_hdl, "Invalid tcam rule num :%u to del\n", index); + return -EINVAL; + } + + cmd_del_tcam_rule.index_start = index; + cmd_del_tcam_rule.index_num = 1; + cmd_del_tcam_rule.func_id = sss_get_global_func_id(nic_dev->hwdev); + cmd_del_tcam_rule.type = SSSNIC_TCAM_RULE_FDIR_TYPE; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_DEL_TC_FLOW, + &cmd_del_tcam_rule, sizeof(cmd_del_tcam_rule), + &cmd_del_tcam_rule, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_del_tcam_rule)) { + nic_err(nic_dev->dev_hdl, + "Fail to delete tcam rule, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, cmd_del_tcam_rule.head.state, out_len); + return -EIO; + } + + return 0; +} + +static int sss_nic_mgmt_tcam_block(struct sss_nic_dev *nic_dev, u8 alloc_en, u16 *index) +{ + struct sss_nic_mbx_tcam_block_cfg cmd_mgmt_tcam_block = {0}; + u16 out_len = sizeof(cmd_mgmt_tcam_block); + int ret; + + if (!nic_dev || !index) + return -EINVAL; + + cmd_mgmt_tcam_block.func_id = sss_get_global_func_id(nic_dev->hwdev); + cmd_mgmt_tcam_block.alloc_en = alloc_en; + cmd_mgmt_tcam_block.tcam_type = SSSNIC_TCAM_BLOCK_TYPE_LARGE; + cmd_mgmt_tcam_block.tcam_block_index = *index; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_CFG_TCAM_BLOCK, + &cmd_mgmt_tcam_block, sizeof(cmd_mgmt_tcam_block), + &cmd_mgmt_tcam_block, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_mgmt_tcam_block)) { + nic_err(nic_dev->dev_hdl, + "Fail to set tcam block, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, cmd_mgmt_tcam_block.head.state, out_len); + return -EIO; + } + + if (alloc_en) + *index = cmd_mgmt_tcam_block.tcam_block_index; + + return 0; +} + +int sss_nic_alloc_tcam_block(struct sss_nic_dev *nic_dev, u16 *index) +{ + return sss_nic_mgmt_tcam_block(nic_dev, SSSNIC_TCAM_BLOCK_ENABLE, index); +} + +int sss_nic_free_tcam_block(struct sss_nic_dev *nic_dev, u16 *index) +{ + return sss_nic_mgmt_tcam_block(nic_dev, SSSNIC_TCAM_BLOCK_DISABLE, index); +} + +int sss_nic_set_fdir_tcam_rule_filter(struct sss_nic_dev *nic_dev, bool enable) +{ + struct sss_nic_mbx_set_tcam_state cmd_set_tcam_enable = {0}; + u16 out_len = sizeof(cmd_set_tcam_enable); + int ret; + + cmd_set_tcam_enable.func_id = sss_get_global_func_id(nic_dev->hwdev); + cmd_set_tcam_enable.tcam_enable = (u8)enable; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_ENABLE_TCAM, + &cmd_set_tcam_enable, sizeof(cmd_set_tcam_enable), + &cmd_set_tcam_enable, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_set_tcam_enable)) { + nic_err(nic_dev->dev_hdl, + "Fail to set fdir tcam filter, ret: %d, state: 0x%x, out_len: 0x%x, enable: 0x%x\n", + ret, cmd_set_tcam_enable.head.state, out_len, + enable); + return -EIO; + } + + return 0; +} + +int sss_nic_flush_tcam_rule(struct sss_nic_dev *nic_dev) +{ + struct sss_nic_mbx_flush_tcam_rule cmd_flush_tcam_rule = {0}; + u16 out_len = sizeof(cmd_flush_tcam_rule); + int ret; + + cmd_flush_tcam_rule.func_id = sss_get_global_func_id(nic_dev->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_FLUSH_TCAM, + &cmd_flush_tcam_rule, + sizeof(cmd_flush_tcam_rule), + &cmd_flush_tcam_rule, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_flush_tcam_rule)) { + nic_err(nic_dev->dev_hdl, + "Fail to flush tcam fdir rule, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, cmd_flush_tcam_rule.head.state, out_len); + return -EIO; + } + + return 0; +} + +int sss_nic_rq_hw_pc_info(struct sss_nic_dev *nic_dev, + struct sss_nic_rq_pc_info *out_info, u16 qp_num, u16 wqe_type) +{ + int ret; + u16 i; + struct sss_nic_rq_pc_info *rq_pc_info = NULL; + struct sss_nic_rq_hw_info *rq_hw = NULL; + struct sss_ctrl_msg_buf *msg_buf = NULL; + + msg_buf = sss_alloc_ctrlq_msg_buf(nic_dev->hwdev); + if (!msg_buf) { + nic_err(nic_dev->dev_hdl, "Fail to alloc cmd_buf\n"); + return -ENOMEM; + } + + msg_buf->size = sizeof(*rq_hw); + + rq_hw = msg_buf->buf; + rq_hw->num_queues = qp_num; + rq_hw->func_id = sss_get_global_func_id(nic_dev->hwdev); + sss_cpu_to_be32(rq_hw, sizeof(*rq_hw)); + + ret = sss_ctrlq_detail_reply(nic_dev->hwdev, SSS_MOD_TYPE_L2NIC, + SSSNIC_CTRLQ_OPCODE_RXQ_INFO_GET, + msg_buf, msg_buf, NULL, 0, SSS_CHANNEL_NIC); + if (ret) + goto get_rq_info_error; + + rq_pc_info = msg_buf->buf; + for (i = 0; i < qp_num; i++) { + out_info[i].hw_ci = rq_pc_info[i].hw_ci >> wqe_type; + out_info[i].hw_pi = rq_pc_info[i].hw_pi >> wqe_type; + } + +get_rq_info_error: + sss_free_ctrlq_msg_buf(nic_dev->hwdev, msg_buf); + + return ret; +} + +int sss_nic_set_pf_rate(struct sss_nic_dev *nic_dev, u8 speed) +{ + int ret; + u32 pf_rate; + u32 speed_convert[SSSNIC_PORT_SPEED_UNKNOWN] = { + 0, 10, 100, 1000, 10000, 25000, 40000, 50000, 100000, 200000 + }; + struct sss_nic_io *nic_io = nic_dev->nic_io; + struct sss_nic_mbx_tx_rate_cfg rate_cfg = {0}; + u16 out_len = sizeof(rate_cfg); + + if (speed >= SSSNIC_PORT_SPEED_UNKNOWN) { + nic_err(nic_io->dev_hdl, "Invalid speed level: %u\n", speed); + return -EINVAL; + } + + if (nic_io->mag_cfg.pf_bw_limit == SSSNIC_PF_LIMIT_BW_MAX) { + pf_rate = 0; + } else { + pf_rate = (speed_convert[speed] / 100) * nic_io->mag_cfg.pf_bw_limit; + if (pf_rate == 0 && speed != SSSNIC_PORT_SPEED_NOT_SET) + pf_rate = 1; + } + + rate_cfg.func_id = sss_get_global_func_id(nic_dev->hwdev); + rate_cfg.max_rate = pf_rate; + rate_cfg.min_rate = 0; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_SET_MAX_MIN_RATE, + &rate_cfg, sizeof(rate_cfg), &rate_cfg, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &rate_cfg)) { + nic_err(nic_dev->dev_hdl, "Fail to set rate:%u, ret: %d, state: 0x%x, out len: 0x%x\n", + pf_rate, ret, rate_cfg.head.state, out_len); + return rate_cfg.head.state ? rate_cfg.head.state : -EIO; + } + + return 0; +} + diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_cfg.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_cfg.h new file mode 100644 index 0000000000000000000000000000000000000000..387990edd5ed0f80866b55bfa21a5f185db21b4b --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_cfg.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_CFG_H +#define SSS_NIC_CFG_H + +#include +#include + +#include "sss_nic_cfg_define.h" +#include "sss_nic_dev_define.h" + +#define SSSNIC_SUPPORT_FEATURE(nic_io, feature) \ + ((nic_io)->feature_cap & SSSNIC_F_##feature) +#define SSSNIC_SUPPORT_CSUM(nic_io) SSSNIC_SUPPORT_FEATURE(nic_io, CSUM) +#define SSSNIC_SUPPORT_SCTP_CRC(nic_io) SSSNIC_SUPPORT_FEATURE(nic_io, SCTP_CRC) +#define SSSNIC_SUPPORT_TSO(nic_io) SSSNIC_SUPPORT_FEATURE(nic_io, TSO) +#define SSSNIC_SUPPORT_UFO(nic_io) SSSNIC_SUPPORT_FEATURE(nic_io, UFO) +#define SSSNIC_SUPPORT_LRO(nic_io) SSSNIC_SUPPORT_FEATURE(nic_io, LRO) +#define SSSNIC_SUPPORT_RSS(nic_io) SSSNIC_SUPPORT_FEATURE(nic_io, RSS) +#define SSSNIC_SUPPORT_RXVLAN_FILTER(nic_io) \ + SSSNIC_SUPPORT_FEATURE(nic_io, RX_VLAN_FILTER) +#define SSSNIC_SUPPORT_VLAN_OFFLOAD(nic_io) \ + (SSSNIC_SUPPORT_FEATURE(nic_io, RX_VLAN_STRIP) && \ + SSSNIC_SUPPORT_FEATURE(nic_io, TX_VLAN_INSERT)) +#define SSSNIC_SUPPORT_VXLAN_OFFLOAD(nic_io) \ + SSSNIC_SUPPORT_FEATURE(nic_io, VXLAN_OFFLOAD) +#define SSSNIC_SUPPORT_IPSEC_OFFLOAD(nic_io) \ + SSSNIC_SUPPORT_FEATURE(nic_io, IPSEC_OFFLOAD) +#define SSSNIC_SUPPORT_FDIR(nic_io) SSSNIC_SUPPORT_FEATURE(nic_io, FDIR) +#define SSSNIC_SUPPORT_PROMISC(nic_io) SSSNIC_SUPPORT_FEATURE(nic_io, PROMISC) +#define SSSNIC_SUPPORT_ALLMULTI(nic_io) SSSNIC_SUPPORT_FEATURE(nic_io, ALLMULTI) +#define SSSNIC_SUPPORT_VF_MAC(nic_io) SSSNIC_SUPPORT_FEATURE(nic_io, VF_MAC) +#define SSSNIC_SUPPORT_RATE_LIMIT(nic_io) SSSNIC_SUPPORT_FEATURE(nic_io, RATE_LIMIT) +#define SSSNIC_SUPPORT_RXQ_RECOVERY(nic_io) SSSNIC_SUPPORT_FEATURE(nic_io, RXQ_RECOVERY) + +int sss_nic_set_mac(struct sss_nic_dev *nic_dev, const u8 *mac_addr, + u16 vlan_id, u16 func_id, u16 channel); + +int sss_nic_del_mac(struct sss_nic_dev *nic_dev, const u8 *mac_addr, + u16 vlan_id, u16 func_id, u16 channel); + +int sss_nic_add_tcam_rule(struct sss_nic_dev *nic_dev, struct sss_nic_tcam_rule_cfg *tcam_rule); +int sss_nic_del_tcam_rule(struct sss_nic_dev *nic_dev, u32 index); + +int sss_nic_alloc_tcam_block(struct sss_nic_dev *nic_dev, u16 *index); +int sss_nic_free_tcam_block(struct sss_nic_dev *nic_dev, u16 *index); + +int sss_nic_set_fdir_tcam_rule_filter(struct sss_nic_dev *nic_dev, bool enable); + +int sss_nic_flush_tcam_rule(struct sss_nic_dev *nic_dev); + +int sss_nic_update_mac(struct sss_nic_dev *nic_dev, u8 *new_mac); + +int sss_nic_get_default_mac(struct sss_nic_dev *nic_dev, u8 *mac_addr); + +int sss_nic_set_dev_mtu(struct sss_nic_dev *nic_dev, u16 new_mtu); + +int sss_nic_get_vport_stats(struct sss_nic_dev *nic_dev, + u16 func_id, struct sss_nic_port_stats *stats); + +int sss_nic_force_drop_tx_pkt(struct sss_nic_dev *nic_dev); + +int sss_nic_set_rx_mode(struct sss_nic_dev *nic_dev, u32 rx_mode); + +int sss_nic_set_rx_vlan_offload(struct sss_nic_dev *nic_dev, bool en); + +int sss_nic_set_rx_lro_state(struct sss_nic_dev *nic_dev, bool en, u32 timer, u32 max_pkt_len); + +int sss_nic_config_vlan(struct sss_nic_dev *nic_dev, u8 opcode, u16 vlan_id); + +int sss_nic_set_hw_vport_state(struct sss_nic_dev *nic_dev, + u16 func_id, bool enable, u16 channel); + +int sss_nic_set_dcb_info(struct sss_nic_io *nic_io, struct sss_nic_dcb_info *dcb_info); + +int sss_nic_set_hw_dcb_state(struct sss_nic_dev *nic_dev, u8 op_code, u8 state); + +int sss_nic_clear_hw_qp_resource(struct sss_nic_dev *nic_dev); + +int sss_nic_get_hw_pause_info(struct sss_nic_dev *nic_dev, struct sss_nic_pause_cfg *pause_config); + +int sss_nic_set_hw_pause_info(struct sss_nic_dev *nic_dev, struct sss_nic_pause_cfg pause_config); + +int sss_nic_set_vlan_fliter(struct sss_nic_dev *nic_dev, bool en); + +int sss_nic_update_mac_vlan(struct sss_nic_dev *nic_dev, + u16 old_vlan, u16 new_vlan, int vf_id); + +int sss_nic_cache_out_qp_resource(struct sss_nic_io *nic_io); + +int sss_nic_set_feature_to_hw(struct sss_nic_io *nic_io); + +void sss_nic_update_nic_feature(struct sss_nic_dev *nic_dev, u64 feature); + +int sss_nic_io_init(struct sss_nic_dev *nic_dev); + +void sss_nic_io_deinit(struct sss_nic_dev *nic_dev); + +int sss_nic_rq_hw_pc_info(struct sss_nic_dev *nic_dev, + struct sss_nic_rq_pc_info *out_info, u16 num_qps, u16 wqe_type); +int sss_nic_set_pf_rate(struct sss_nic_dev *nic_dev, u8 speed); + + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_dcb.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_dcb.c new file mode 100644 index 0000000000000000000000000000000000000000..573cf72f3b3968b1db17a10d4c44d2c2f90fa302 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_dcb.c @@ -0,0 +1,257 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_hw.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_rss_cfg.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_dcb.h" +#include "sss_nic_tx.h" +#include "sss_nic_rx.h" + +u8 sss_nic_get_user_cos_num(struct sss_nic_dev *nic_dev) +{ + if (nic_dev->hw_dcb_cfg.trust == 1) + return nic_dev->hw_dcb_cfg.dscp_user_cos_num; + if (nic_dev->hw_dcb_cfg.trust == 0) + return nic_dev->hw_dcb_cfg.pcp_user_cos_num; + return 0; +} + +u8 sss_nic_get_valid_cos_map(struct sss_nic_dev *nic_dev) +{ + if (nic_dev->hw_dcb_cfg.trust == 1) + return nic_dev->hw_dcb_cfg.dscp_valid_cos_map; + if (nic_dev->hw_dcb_cfg.trust == 0) + return nic_dev->hw_dcb_cfg.pcp_valid_cos_map; + return 0; +} + +void sss_nic_update_qp_cos_map(struct sss_nic_dev *nic_dev, u8 cos_num) +{ + u8 cur_cos_num = 0; + u8 cos_per_qp_num; + u8 qp_num; + u8 qp_offset; + u8 i; + u8 remain; + struct sss_nic_dcb_config *dcb_config = &nic_dev->hw_dcb_cfg; + u8 valid_cos_map; + + if (cos_num == 0) + return; + + cos_per_qp_num = (u8)(nic_dev->qp_res.qp_num / cos_num); + if (cos_per_qp_num == 0) + return; + + remain = nic_dev->qp_res.qp_num % cos_per_qp_num; + valid_cos_map = sss_nic_get_valid_cos_map(nic_dev); + + memset(dcb_config->cos_qp_num, 0, sizeof(dcb_config->cos_qp_num)); + memset(dcb_config->cos_qp_offset, 0, sizeof(dcb_config->cos_qp_offset)); + + for (i = 0; i < SSSNIC_PCP_UP_MAX; i++) { + if (BIT(i) & valid_cos_map) { + qp_num = cos_per_qp_num; + qp_offset = (u8)(cur_cos_num * cos_per_qp_num); + + if (cur_cos_num < remain) { + qp_offset += cur_cos_num; + qp_num++; + } else { + qp_offset += remain; + } + + valid_cos_map -= (u8)BIT(i); + cur_cos_num++; + + dcb_config->cos_qp_num[i] = qp_num; + dcb_config->cos_qp_offset[i] = qp_offset; + sss_nic_info(nic_dev, drv, "Qp info: cos %u, qp_offset=%u qp_num=%u\n", + i, qp_offset, qp_num); + } + } + + memcpy(nic_dev->backup_dcb_cfg.cos_qp_num, dcb_config->cos_qp_num, + sizeof(dcb_config->cos_qp_num)); + memcpy(nic_dev->backup_dcb_cfg.cos_qp_offset, dcb_config->cos_qp_offset, + sizeof(dcb_config->cos_qp_offset)); +} + +static void sss_nic_set_sq_cos(struct sss_nic_dev *nic_dev, + u16 qid_start, u16 qid_end, u8 cos) +{ + u16 qid; + + for (qid = qid_start; qid < qid_end; qid++) + nic_dev->sq_desc_group[qid].cos = cos; +} + +void sss_nic_update_sq_cos(struct sss_nic_dev *nic_dev, u8 dcb_en) +{ + u8 i; + u16 q_num; + u16 qid_start; + u16 qid_end; + + sss_nic_set_sq_cos(nic_dev, 0, nic_dev->qp_res.qp_num, + nic_dev->hw_dcb_cfg.default_cos); + + if (dcb_en == 0) + return; + + for (i = 0; i < SSSNIC_DCB_COS_MAX; i++) { + q_num = (u16)nic_dev->hw_dcb_cfg.cos_qp_num[i]; + if (q_num == 0) + continue; + + qid_start = (u16)nic_dev->hw_dcb_cfg.cos_qp_offset[i]; + qid_end = qid_start + q_num; + sss_nic_set_sq_cos(nic_dev, qid_start, qid_end, i); + sss_nic_info(nic_dev, drv, "Update tx db cos, qid_start=%u, qid_end=%u cos=%u\n", + qid_start, qid_end, i); + } +} + +static int sss_nic_init_tx_cos_info(struct sss_nic_dev *nic_dev) +{ + int ret; + struct sss_nic_dcb_info dcb_info = {0}; + struct sss_nic_dcb_config *dcb_config = &nic_dev->hw_dcb_cfg; + + dcb_info.default_cos = dcb_config->default_cos; + dcb_info.trust = dcb_config->trust; + memset(dcb_info.dscp2cos, dcb_config->default_cos, sizeof(dcb_info.dscp2cos)); + memset(dcb_info.pcp2cos, dcb_config->default_cos, sizeof(dcb_info.pcp2cos)); + + ret = sss_nic_set_dcb_info(nic_dev->nic_io, &dcb_info); + if (ret != 0) + sss_nic_err(nic_dev, drv, "Fail to set dcb state, ret: %d\n", ret); + + return ret; +} + +static u8 sss_nic_get_cos_num(u8 cos_bitmap) +{ + u8 i; + u8 cos_count = 0; + + for (i = 0; i < SSSNIC_DCB_COS_MAX; i++) + if (cos_bitmap & BIT(i)) + cos_count++; + + return cos_count; +} + +void sss_nic_sync_dcb_cfg(struct sss_nic_dev *nic_dev, + const struct sss_nic_dcb_config *dcb_config) +{ + struct sss_nic_dcb_config *hw_config = &nic_dev->hw_dcb_cfg; + + memcpy(hw_config, dcb_config, sizeof(*dcb_config)); +} + +static int sss_nic_init_dcb_cfg(struct sss_nic_dev *nic_dev, + struct sss_nic_dcb_config *dcb_config) +{ + u8 func_cos_bitmap; + u8 port_cos_bitmap; + int ret; + u8 i; + u8 j; + + ret = sss_get_cos_valid_bitmap(nic_dev->hwdev, &func_cos_bitmap, &port_cos_bitmap); + if (ret != 0) { + sss_nic_err(nic_dev, drv, "Fail to get cos valid bitmap, ret: %d\n", ret); + return -EFAULT; + } + + nic_dev->max_cos_num = sss_nic_get_cos_num(func_cos_bitmap); + nic_dev->dft_port_cos_bitmap = port_cos_bitmap; + nic_dev->dft_func_cos_bitmap = func_cos_bitmap; + + dcb_config->dscp_user_cos_num = nic_dev->max_cos_num; + dcb_config->pcp_user_cos_num = nic_dev->max_cos_num; + dcb_config->dscp_valid_cos_map = func_cos_bitmap; + dcb_config->pcp_valid_cos_map = func_cos_bitmap; + dcb_config->trust = DCB_PCP; + dcb_config->default_cos = (u8)fls(nic_dev->dft_func_cos_bitmap) - 1; + + for (i = 0; i < SSSNIC_DCB_COS_MAX; i++) { + dcb_config->pcp2cos[i] = func_cos_bitmap & BIT(i) ? i : dcb_config->default_cos; + for (j = 0; j < SSSNIC_DCB_COS_MAX; j++) + dcb_config->dscp2cos[i * SSSNIC_DCB_DSCP_NUM + j] = dcb_config->pcp2cos[i]; + } + + return 0; +} + +static void sss_nic_reset_dcb_config(struct sss_nic_dev *nic_dev) +{ + memset(&nic_dev->hw_dcb_cfg, 0, sizeof(nic_dev->hw_dcb_cfg)); + sss_nic_init_dcb_cfg(nic_dev, &nic_dev->hw_dcb_cfg); + sss_nic_info(nic_dev, drv, "Success to reset bcb confg\n"); +} + +int sss_nic_update_dcb_cfg(struct sss_nic_dev *nic_dev) +{ + int ret; + + ret = sss_nic_set_hw_dcb_state(nic_dev, SSSNIC_MBX_OPCODE_SET_DCB_STATE, + !!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE)); + if (ret != 0) { + sss_nic_err(nic_dev, drv, "Fail to set dcb state, ret: %d\n", ret); + return ret; + } + + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE)) + sss_nic_sync_dcb_cfg(nic_dev, &nic_dev->backup_dcb_cfg); + else + sss_nic_reset_dcb_config(nic_dev); + + return 0; +} + +int sss_nic_dcb_init(struct sss_nic_dev *nic_dev) +{ + int ret; + struct sss_nic_dcb_config *dcb_config = &nic_dev->hw_dcb_cfg; + + if (SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) { + dcb_config->default_cos = (u8)fls(nic_dev->dft_func_cos_bitmap) - 1; + return 0; + } + + ret = sss_nic_init_dcb_cfg(nic_dev, dcb_config); + if (ret != 0) { + sss_nic_err(nic_dev, drv, "Fail to init dcb, ret: %d\n", ret); + return ret; + } + sss_nic_info(nic_dev, drv, "Support num cos %u, default cos %u\n", + nic_dev->max_cos_num, dcb_config->default_cos); + + memcpy(&nic_dev->backup_dcb_cfg, &nic_dev->hw_dcb_cfg, sizeof(nic_dev->hw_dcb_cfg)); + + ret = sss_nic_init_tx_cos_info(nic_dev); + if (ret != 0) { + sss_nic_err(nic_dev, drv, "Fail to set tx cos info, ret: %d\n", ret); + return ret; + } + + return 0; +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_dcb.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_dcb.h new file mode 100644 index 0000000000000000000000000000000000000000..00a649598f286e6f2fc6cc613cd8ac81de381732 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_dcb.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_DCB_H +#define SSS_NIC_DCB_H + +#include "sss_kernel.h" +#include "sss_nic_dcb_define.h" + +enum SSSNIC_DCB_FLAGS { + SSSNIC_DCB_UP_COS_SETTING, + SSSNIC_DCB_TRAFFIC_STOPPED, +}; + +enum sss_nic_dcb_trust { + DCB_PCP, + DCB_DSCP, +}; + +u8 sss_nic_get_user_cos_num(struct sss_nic_dev *nic_dev); +u8 sss_nic_get_valid_cos_map(struct sss_nic_dev *nic_dev); +int sss_nic_dcb_init(struct sss_nic_dev *nic_dev); +int sss_nic_update_dcb_cfg(struct sss_nic_dev *nic_dev); +void sss_nic_update_sq_cos(struct sss_nic_dev *nic_dev, u8 dcb_en); +void sss_nic_update_qp_cos_map(struct sss_nic_dev *nic_dev, u8 cos_num); +void sss_nic_sync_dcb_cfg(struct sss_nic_dev *nic_dev, + const struct sss_nic_dcb_config *dcb_config); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool.c new file mode 100644 index 0000000000000000000000000000000000000000..5aada43de2e3546165ce512fa2ecc852d1778b33 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool.c @@ -0,0 +1,475 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_tx.h" +#include "sss_nic_rx.h" +#include "sss_nic_rss.h" +#include "sss_nic_ethtool_api.h" +#include "sss_nic_ethtool_stats.h" +#include "sss_nic_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_cfg_define.h" +#include "sss_nic_netdev_ops_api.h" + +#define SSSNIC_MGMT_VERSION_MAX_LEN 32 + +#define SSSNIC_AUTONEG_RESET_TIMEOUT 100 +#define SSSNIC_AUTONEG_FINISH_TIMEOUT 200 + +static void sss_nic_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct pci_dev *pdev = nic_dev->pdev; + u8 mgmt_ver[SSSNIC_MGMT_VERSION_MAX_LEN] = {0}; + int ret; + + strlcpy(drvinfo->driver, SSSNIC_DRV_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, SSSNIC_DRV_VERSION, sizeof(drvinfo->version)); + strlcpy(drvinfo->bus_info, pci_name(pdev), sizeof(drvinfo->bus_info)); + + ret = sss_get_mgmt_version(nic_dev->hwdev, mgmt_ver, + SSSNIC_MGMT_VERSION_MAX_LEN, + SSS_CHANNEL_NIC); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to get fw version, ret: %d\n", ret); + return; + } + + ret = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%s", mgmt_ver); + if (ret < 0) + nicif_err(nic_dev, drv, netdev, "Fail to snprintf fw version\n"); +} + +static u32 sss_nic_get_msglevel(struct net_device *netdev) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + return nic_dev->msg_enable; +} + +static void sss_nic_set_msglevel(struct net_device *netdev, u32 msg_enable) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + nicif_info(nic_dev, drv, netdev, "Success to change msg_enable from 0x%x to 0x%x\n", + nic_dev->msg_enable, msg_enable); + + nic_dev->msg_enable = msg_enable; +} + +static int sss_nic_nway_reset(struct net_device *netdev) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct sss_nic_port_info port_info = {0}; + int ret; + + while (SSSNIC_TEST_SET_NIC_DEV_FLAG(nic_dev, SSSNIC_AUTONEG_RESET)) + msleep(SSSNIC_AUTONEG_RESET_TIMEOUT); + + ret = sss_nic_get_hw_port_info(nic_dev, &port_info, SSS_CHANNEL_NIC); + if (ret) { + nicif_err(nic_dev, drv, netdev, "Fail to get port info\n"); + ret = -EFAULT; + goto reset_err; + } + + if (port_info.autoneg_state != SSSNIC_PORT_CFG_AN_ON) { + nicif_err(nic_dev, drv, netdev, "Autonegotiation is not on, don't support to restart it\n"); + ret = -EOPNOTSUPP; + goto reset_err; + } + + ret = sss_nic_set_autoneg(nic_dev, false); + if (ret) { + nicif_err(nic_dev, drv, netdev, "Fail to set autonegotiation off\n"); + ret = -EFAULT; + goto reset_err; + } + + msleep(SSSNIC_AUTONEG_FINISH_TIMEOUT); + + ret = sss_nic_set_autoneg(nic_dev, true); + if (ret) { + nicif_err(nic_dev, drv, netdev, "Fail to set autonegotiation on\n"); + ret = -EFAULT; + goto reset_err; + } + + msleep(SSSNIC_AUTONEG_FINISH_TIMEOUT); + nicif_info(nic_dev, drv, netdev, "Success to restart autonegotiation\n"); + +reset_err: + clear_bit(SSSNIC_AUTONEG_RESET, &nic_dev->flags); + return ret; +} + +static void sss_nic_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ringparam) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + ringparam->tx_pending = nic_dev->sq_desc_group[0].q_depth; + ringparam->rx_pending = nic_dev->rq_desc_group[0].q_depth; + ringparam->tx_max_pending = SSSNIC_MAX_TX_QUEUE_DEPTH; + ringparam->rx_max_pending = SSSNIC_MAX_RX_QUEUE_DEPTH; +} + +static int sss_nic_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ringparam) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct sss_nic_qp_resource qp_res = {0}; + u32 sq_depth; + u32 rq_depth; + int ret; + + ret = sss_nic_check_ringparam_valid(netdev, ringparam); + if (ret != 0) + return ret; + + sq_depth = (u32)(1U << (u16)ilog2(ringparam->tx_pending)); + rq_depth = (u32)(1U << (u16)ilog2(ringparam->rx_pending)); + if (sq_depth == nic_dev->qp_res.sq_depth && + rq_depth == nic_dev->qp_res.rq_depth) + return 0; /* nothing to do */ + + nicif_info(nic_dev, drv, netdev, + "Change Tx/Rx ring depth from %u/%u to %u/%u\n", + nic_dev->qp_res.sq_depth, nic_dev->qp_res.rq_depth, + sq_depth, rq_depth); + + if (netif_running(netdev) == 0) { + sss_nic_update_qp_depth(nic_dev, sq_depth, rq_depth); + return 0; + } + + qp_res = nic_dev->qp_res; + qp_res.sq_depth = sq_depth; + qp_res.rq_depth = rq_depth; + qp_res.sq_res_group = NULL; + qp_res.rq_res_group = NULL; + qp_res.irq_cfg = NULL; + + nicif_info(nic_dev, drv, netdev, "Restarting channel\n"); + ret = sss_nic_update_channel_setting(nic_dev, &qp_res, + NULL, NULL); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to update channel settings\n"); + return -EFAULT; + } + + return 0; +} + +static int sss_nic_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal) +{ + return sss_nic_ethtool_get_coalesce(netdev, coal, SSSNIC_COALESCE_ALL_QUEUE); +} + +static int sss_nic_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal) +{ + return sss_nic_ethtool_set_coalesce(netdev, coal, SSSNIC_COALESCE_ALL_QUEUE); +} + +static int sss_nic_get_per_queue_coalesce(struct net_device *netdev, u32 queue, + struct ethtool_coalesce *coal) +{ + return sss_nic_ethtool_get_coalesce(netdev, coal, (u16)queue); +} + +static int sss_nic_set_per_queue_coalesce(struct net_device *netdev, u32 queue, + struct ethtool_coalesce *coal) +{ + return sss_nic_ethtool_set_coalesce(netdev, coal, (u16)queue); +} + +static int sss_nic_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + enum sss_nic_mag_led_type led_type = SSSNIC_MAG_LED_TYPE_ALARM; + enum sss_nic_mag_led_mode led_mode; + int ret; + + if (state == ETHTOOL_ID_ACTIVE) { + led_mode = SSSNIC_MAG_LED_FORCE_BLINK_2HZ; + } else if (state == ETHTOOL_ID_INACTIVE) { + led_mode = SSSNIC_MAG_LED_DEFAULT; + } else { + nicif_err(nic_dev, drv, netdev, "Not support to set phys id, state:%d\n", state); + return -EOPNOTSUPP; + } + + ret = sss_nic_set_hw_led_state(nic_dev, led_type, led_mode); + if (ret != 0) + nicif_err(nic_dev, drv, netdev, "Fail to set led status, ret:%d, type:%d, mode:%d\n", + ret, led_type, led_mode); + + return ret; +} + +static void sss_nic_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pauseparam) +{ + int ret; + struct sss_nic_pause_cfg pause_config = {0}; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + ret = sss_nic_get_hw_pause_info(nic_dev, &pause_config); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to get pauseparam\n"); + } else { + pauseparam->autoneg = pause_config.auto_neg == SSSNIC_PORT_CFG_AN_ON ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + pauseparam->rx_pause = pause_config.rx_pause; + pauseparam->tx_pause = pause_config.tx_pause; + } +} + +static int sss_nic_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pauseparam) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct sss_nic_port_info port_info = {0}; + struct sss_nic_pause_cfg pause_config = {0}; + u32 auto_neg; + int ret; + + ret = sss_nic_get_hw_port_info(nic_dev, &port_info, SSS_CHANNEL_NIC); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, + "Fail to get auto-negotiation state\n"); + return -EFAULT; + } + + auto_neg = port_info.autoneg_state == SSSNIC_PORT_CFG_AN_ON ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + if (pauseparam->autoneg != auto_neg) { + nicif_err(nic_dev, drv, netdev, + "Use: ethtool -s autoneg to change autoneg\n"); + return -EOPNOTSUPP; + } + pause_config.auto_neg = pauseparam->autoneg == AUTONEG_ENABLE ? + SSSNIC_PORT_CFG_AN_ON : SSSNIC_PORT_CFG_AN_OFF; + pause_config.rx_pause = (u8)pauseparam->rx_pause; + pause_config.tx_pause = (u8)pauseparam->tx_pause; + + ret = sss_nic_set_hw_pause_info(nic_dev, pause_config); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to set pauseparam\n"); + return ret; + } + + nicif_info(nic_dev, drv, netdev, "Success to set pauseparam option, rx: %s, tx: %s\n", + pauseparam->rx_pause ? "on" : "off", pauseparam->tx_pause ? "on" : "off"); + + return 0; +} + +static int sss_nic_get_module_info(struct net_device *netdev, + struct ethtool_modinfo *modinfo) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + sss_nic_get_module_info_t handler[SSSNIC_MODULE_TYPE_MAX] = {NULL}; + u8 sfp_type = 0; + u8 sfp_type_ext = 0; + int ret; + + handler[SSSNIC_MODULE_TYPE_SFP] = sss_nic_module_type_sfp; + handler[SSSNIC_MODULE_TYPE_QSFP] = sss_nic_module_type_qsfp; + handler[SSSNIC_MODULE_TYPE_QSFP_PLUS] = sss_nic_module_type_qsfp_plus; + handler[SSSNIC_MODULE_TYPE_QSFP28] = sss_nic_module_type_qsfp28; + + ret = sss_nic_get_sfp_type(nic_dev, &sfp_type, &sfp_type_ext); + if (ret != 0) + return ret; + + if (sfp_type >= SSSNIC_MODULE_TYPE_MAX) { + nicif_warn(nic_dev, drv, netdev, + "Unknown optical module type: 0x%x\n", sfp_type); + return -EINVAL; + } + + if (!handler[sfp_type]) { + nicif_warn(nic_dev, drv, netdev, + "Unknown optical module type: 0x%x\n", sfp_type); + return -EINVAL; + } + + handler[sfp_type](modinfo, sfp_type_ext); + + return 0; +} + +static int sss_nic_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + u8 sfp_data[SSSNIC_STD_SFP_INFO_MAX_SIZE]; + u32 offset = ee->len + ee->offset; + u32 len = ee->len; + int ret; + + if (len == 0 || (offset > SSSNIC_STD_SFP_INFO_MAX_SIZE)) + return -EINVAL; + + memset(data, 0, len); + + ret = sss_nic_get_sfp_eeprom(nic_dev, (u8 *)sfp_data, len); + if (ret != 0) + return ret; + + memcpy(data, sfp_data + ee->offset, len); + + return 0; +} + +static u32 sss_nic_get_priv_flags(struct net_device *netdev) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + u32 ret_flag = 0; + + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_SAME_RXTX)) + ret_flag |= SSSNIC_PRIV_FLAG_SYMM_RSS; + + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_FORCE_LINK_UP)) + ret_flag |= SSSNIC_PRIV_FLAG_LINK_UP; + + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_RXQ_RECOVERY)) + ret_flag |= SSSNIC_PRIV_FLAG_RQ_RECOVERY; + + return ret_flag; +} + +static int sss_nic_set_priv_flags(struct net_device *netdev, u32 flags) +{ + int ret; + + ret = sss_nic_set_symm_rss_flag(netdev, flags); + if (ret) + return ret; + + ret = sss_nic_set_rq_recovery_flag(netdev, flags); + if (ret) + return ret; + + return sss_nic_set_force_link_flag(netdev, flags); +} + +static void sss_nic_self_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + memset(data, 0, SSSNIC_LP_TEST_TYPE_MAX * sizeof(u64)); + sss_nic_loop_test(netdev, eth_test, data); +} + +static const struct ethtool_ops g_nic_ethtool_ops = { + .get_link_ksettings = sss_nic_get_link_ksettings, + .set_link_ksettings = sss_nic_set_link_ksettings, + + .get_drvinfo = sss_nic_get_drvinfo, + .get_msglevel = sss_nic_get_msglevel, + .set_msglevel = sss_nic_set_msglevel, + .nway_reset = sss_nic_nway_reset, + .get_link = ethtool_op_get_link, + .get_ringparam = sss_nic_get_ringparam, + .set_ringparam = sss_nic_set_ringparam, + .get_pauseparam = sss_nic_get_pauseparam, + .set_pauseparam = sss_nic_set_pauseparam, + .get_sset_count = sss_nic_get_sset_count, + .get_ethtool_stats = sss_nic_get_ethtool_stats, + .get_strings = sss_nic_get_strings, + + .self_test = sss_nic_self_test, + + .set_phys_id = sss_nic_set_phys_id, + + .get_coalesce = sss_nic_get_coalesce, + .set_coalesce = sss_nic_set_coalesce, + + .get_per_queue_coalesce = sss_nic_get_per_queue_coalesce, + .set_per_queue_coalesce = sss_nic_set_per_queue_coalesce, + + .get_rxnfc = sss_nic_get_rxnfc, + .set_rxnfc = sss_nic_set_rxnfc, + .get_priv_flags = sss_nic_get_priv_flags, + .set_priv_flags = sss_nic_set_priv_flags, + + .get_channels = sss_nic_get_channels, + .set_channels = sss_nic_set_channels, + + .get_module_info = sss_nic_get_module_info, + .get_module_eeprom = sss_nic_get_module_eeprom, + + .get_rxfh_indir_size = sss_nic_get_rxfh_indir_size, + + .get_rxfh_key_size = sss_nic_get_rxfh_key_size, + .get_rxfh = sss_nic_get_rxfh, + .set_rxfh = sss_nic_set_rxfh, +}; + +static const struct ethtool_ops g_nicvf_ethtool_ops = { + .get_link_ksettings = sss_nic_get_link_ksettings, + + .get_drvinfo = sss_nic_get_drvinfo, + .get_msglevel = sss_nic_get_msglevel, + .set_msglevel = sss_nic_set_msglevel, + .get_link = ethtool_op_get_link, + .get_ringparam = sss_nic_get_ringparam, + + .set_ringparam = sss_nic_set_ringparam, + .get_sset_count = sss_nic_get_sset_count, + .get_ethtool_stats = sss_nic_get_ethtool_stats, + .get_strings = sss_nic_get_strings, + + .get_coalesce = sss_nic_get_coalesce, + .set_coalesce = sss_nic_set_coalesce, + + .get_per_queue_coalesce = sss_nic_get_per_queue_coalesce, + .set_per_queue_coalesce = sss_nic_set_per_queue_coalesce, + + .get_rxnfc = sss_nic_get_rxnfc, + .set_rxnfc = sss_nic_set_rxnfc, + .get_priv_flags = sss_nic_get_priv_flags, + .set_priv_flags = sss_nic_set_priv_flags, + + .get_channels = sss_nic_get_channels, + .set_channels = sss_nic_set_channels, + + .get_rxfh_indir_size = sss_nic_get_rxfh_indir_size, + + .get_rxfh_key_size = sss_nic_get_rxfh_key_size, + .get_rxfh = sss_nic_get_rxfh, + .set_rxfh = sss_nic_set_rxfh, + +}; + +void sss_nic_set_ethtool_ops(struct sss_nic_dev *adapter) +{ + struct net_device *netdev = adapter->netdev; + + if (!SSSNIC_FUNC_IS_VF(adapter->hwdev)) + netdev->ethtool_ops = &g_nic_ethtool_ops; + else + netdev->ethtool_ops = &g_nicvf_ethtool_ops; +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool.h new file mode 100644 index 0000000000000000000000000000000000000000..d27145371df1933d691456cc1fdee25ca6d04722 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_ETHTOOL_H +#define SSS_NIC_ETHTOOL_H + +#include + +void sss_nic_set_ethtool_ops(struct sss_nic_dev *adapter); +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_api.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_api.c new file mode 100644 index 0000000000000000000000000000000000000000..38b4748bf703e4c0a742c1ee34a18247ab2e5074 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_api.c @@ -0,0 +1,825 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_tx.h" +#include "sss_nic_rx.h" +#include "sss_nic_rss.h" +#include "sss_nic_ethtool_stats.h" +#include "sss_nic_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_cfg_define.h" +#include "sss_nic_ethtool_api.h" + +#define SSSNIC_COALESCE_PENDING_LIMIT_UNIT 8 +#define SSSNIC_COALESCE_TIMER_CFG_UNIT 5 +#define SSSNIC_COALESCE_MAX_PENDING_LIMIT (255 * SSSNIC_COALESCE_PENDING_LIMIT_UNIT) +#define SSSNIC_COALESCE_MAX_TIMER_CFG (255 * SSSNIC_COALESCE_TIMER_CFG_UNIT) +#define SSSNIC_WAIT_PKTS_TO_RX_BUFFER 200 +#define SSSNIC_WAIT_CLEAR_LP_TEST 100 + +#define SSSNIC_CHECK_COALESCE_ALIGN(coal, item, unit) \ +do { \ + if ((coal)->item % (unit)) \ + nicif_warn(nic_dev, drv, netdev, \ + "%s in %d units, change to %u\n", \ + #item, (unit), ((coal)->item - \ + (coal)->item % (unit))); \ +} while (0) + +#define SSSNIC_CHECK_COALESCE_CHANGED(coal, item, unit, ori_val, obj_str) \ +do { \ + if (((coal)->item / (unit)) != (ori_val)) \ + nicif_info(nic_dev, drv, netdev, \ + "Change %s from %d to %u %s\n", \ + #item, (ori_val) * (unit), \ + ((coal)->item - (coal)->item % (unit)), \ + (obj_str)); \ +} while (0) + +#define SSSNIC_CHECK_PKT_RATE_CHANGED(coal, item, ori_val, obj_str) \ +do { \ + if ((coal)->item != (ori_val)) \ + nicif_info(nic_dev, drv, netdev, \ + "Change %s from %llu to %u %s\n", \ + #item, (ori_val), (coal)->item, (obj_str)); \ +} while (0) + +#define SSSNIC_PORT_DOWN_ERR_ID 0 +#define SSSNIC_LP_DEF_TIME 5 /* seconds */ + +#define SSSNIC_TEST_TIME_MULTIPLE 5 + +#define SSSNIC_INTERNAL_LP_MODE 5 + +#define SSSNIC_WAIT_LOOP_TEST_FINISH_TIMEOUT 5000 + +void sss_nic_update_qp_depth(struct sss_nic_dev *nic_dev, + u32 sq_depth, u32 rq_depth) +{ + u16 i; + + nic_dev->qp_res.sq_depth = sq_depth; + nic_dev->qp_res.rq_depth = rq_depth; + for (i = 0; i < nic_dev->max_qp_num; i++) { + nic_dev->sq_desc_group[i].q_depth = sq_depth; + nic_dev->rq_desc_group[i].q_depth = rq_depth; + nic_dev->sq_desc_group[i].qid_mask = sq_depth - 1; + nic_dev->rq_desc_group[i].qid_mask = rq_depth - 1; + } +} + +int sss_nic_check_ringparam_valid(struct net_device *netdev, + const struct ethtool_ringparam *ringparam) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (ringparam->rx_mini_pending != 0 || ringparam->rx_jumbo_pending != 0) { + nicif_err(nic_dev, drv, netdev, + "Unsupport rx_mini_pending: %u, rx_jumbo_pending: %u\n", + ringparam->rx_mini_pending, ringparam->rx_jumbo_pending); + return -EINVAL; + } + + if (ringparam->tx_pending < SSSNIC_MIN_QUEUE_DEPTH || + ringparam->tx_pending > SSSNIC_MAX_TX_QUEUE_DEPTH || + ringparam->rx_pending < SSSNIC_MIN_QUEUE_DEPTH || + ringparam->rx_pending > SSSNIC_MAX_RX_QUEUE_DEPTH) { + nicif_err(nic_dev, drv, netdev, + "Queue depth out of range tx[%d-%d] rx[%d-%d]\n", + ringparam->tx_pending, ringparam->tx_pending, + ringparam->rx_pending, ringparam->rx_pending); + return -EINVAL; + } + + return 0; +} + +void sss_nic_intr_coal_to_ethtool_coal(struct ethtool_coalesce *ethtool_coal, + struct sss_nic_intr_coal_info *nic_coal) +{ + ethtool_coal->rx_coalesce_usecs = nic_coal->coalesce_timer * + SSSNIC_COALESCE_TIMER_CFG_UNIT; + ethtool_coal->tx_coalesce_usecs = ethtool_coal->rx_coalesce_usecs; + ethtool_coal->rx_coalesce_usecs_low = nic_coal->rx_usecs_low * + SSSNIC_COALESCE_TIMER_CFG_UNIT; + ethtool_coal->rx_coalesce_usecs_high = nic_coal->rx_usecs_high * + SSSNIC_COALESCE_TIMER_CFG_UNIT; + + ethtool_coal->rx_max_coalesced_frames = nic_coal->pending_limt * + SSSNIC_COALESCE_PENDING_LIMIT_UNIT; + ethtool_coal->tx_max_coalesced_frames = + ethtool_coal->rx_max_coalesced_frames; + ethtool_coal->rx_max_coalesced_frames_low = + nic_coal->rx_pending_limt_low * + SSSNIC_COALESCE_PENDING_LIMIT_UNIT; + ethtool_coal->rx_max_coalesced_frames_high = + nic_coal->rx_pending_limt_high * + SSSNIC_COALESCE_PENDING_LIMIT_UNIT; + + ethtool_coal->pkt_rate_low = (u32)nic_coal->pkt_rate_low; + ethtool_coal->pkt_rate_high = (u32)nic_coal->pkt_rate_high; +} + +int sss_nic_ethtool_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ethtool_coal, u16 queue) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct sss_nic_intr_coal_info *intr_coal_info = NULL; + + if (queue == SSSNIC_COALESCE_ALL_QUEUE) { + /* get tx/rx irq0 as default parameters */ + intr_coal_info = &nic_dev->coal_info[0]; + } else { + if (queue >= nic_dev->qp_res.qp_num) { + nicif_err(nic_dev, drv, netdev, + "Invalid queue_id: %u\n", queue); + return -EINVAL; + } + intr_coal_info = &nic_dev->coal_info[queue]; + } + + sss_nic_intr_coal_to_ethtool_coal(ethtool_coal, intr_coal_info); + ethtool_coal->use_adaptive_rx_coalesce = + nic_dev->use_adaptive_rx_coalesce; + + return 0; +} + +int sss_nic_set_hw_intr_coal(struct sss_nic_dev *nic_dev, + u16 qid, struct sss_nic_intr_coal_info *coal) +{ + struct sss_nic_intr_coal_info *intr_coal_info = NULL; + struct sss_irq_cfg irq_cfg = {0}; + struct net_device *netdev = nic_dev->netdev; + int ret; + + intr_coal_info = &nic_dev->coal_info[qid]; + if (intr_coal_info->coalesce_timer != coal->coalesce_timer || + intr_coal_info->pending_limt != coal->pending_limt) + intr_coal_info->user_set_intr_coal_flag = 1; + + intr_coal_info->coalesce_timer = coal->coalesce_timer; + intr_coal_info->pending_limt = coal->pending_limt; + intr_coal_info->rx_pending_limt_low = coal->rx_pending_limt_low; + intr_coal_info->rx_pending_limt_high = coal->rx_pending_limt_high; + intr_coal_info->pkt_rate_low = coal->pkt_rate_low; + intr_coal_info->pkt_rate_high = coal->pkt_rate_high; + intr_coal_info->rx_usecs_low = coal->rx_usecs_low; + intr_coal_info->rx_usecs_high = coal->rx_usecs_high; + + if (!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_INTF_UP) || + qid >= nic_dev->qp_res.qp_num || + nic_dev->use_adaptive_rx_coalesce != 0) + return 0; + + irq_cfg.msix_id = nic_dev->qp_res.irq_cfg[qid].msix_id; + irq_cfg.lli_set = 0; + irq_cfg.coalesc_intr_set = 1; + irq_cfg.coalesc_timer = intr_coal_info->coalesce_timer; + irq_cfg.resend_timer = intr_coal_info->resend_timer; + irq_cfg.pending = intr_coal_info->pending_limt; + nic_dev->rq_desc_group[qid].last_coal_timer = + intr_coal_info->coalesce_timer; + nic_dev->rq_desc_group[qid].last_pending_limt = intr_coal_info->pending_limt; + ret = sss_chip_set_msix_attr(nic_dev->hwdev, irq_cfg, + SSS_CHANNEL_NIC); + if (ret != 0) + nicif_warn(nic_dev, drv, netdev, + "Fail to set queue%u coalesce", qid); + + return ret; +} + +int sss_nic_check_coal_param_support(struct net_device *netdev, + const struct ethtool_coalesce *coal) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct ethtool_coalesce tmp_coal = {0}; + + tmp_coal.cmd = coal->cmd; + tmp_coal.rx_coalesce_usecs = coal->rx_coalesce_usecs; + tmp_coal.tx_coalesce_usecs = coal->tx_coalesce_usecs; + tmp_coal.rx_coalesce_usecs_low = coal->rx_coalesce_usecs_low; + tmp_coal.rx_coalesce_usecs_high = coal->rx_coalesce_usecs_high; + tmp_coal.rx_max_coalesced_frames = coal->rx_max_coalesced_frames; + tmp_coal.rx_max_coalesced_frames_low = + coal->rx_max_coalesced_frames_low; + tmp_coal.rx_max_coalesced_frames_high = + coal->rx_max_coalesced_frames_high; + tmp_coal.tx_max_coalesced_frames = coal->tx_max_coalesced_frames; + tmp_coal.use_adaptive_rx_coalesce = coal->use_adaptive_rx_coalesce; + tmp_coal.pkt_rate_low = coal->pkt_rate_low; + tmp_coal.pkt_rate_high = coal->pkt_rate_high; + + if (memcmp(coal, &tmp_coal, sizeof(tmp_coal)) != 0) { + nicif_err(nic_dev, drv, netdev, + "Invalid parameter, only support to change rx/tx-usecs and rx/tx-frames\n"); + return -EOPNOTSUPP; + } + + return 0; +} + +int sss_nic_check_coal_param_valid(struct net_device *netdev, + const struct ethtool_coalesce *coal) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (coal->rx_coalesce_usecs != coal->tx_coalesce_usecs) { + nicif_err(nic_dev, drv, netdev, + "Coal param: tx-usecs must be equal to rx-usecs\n"); + return -EINVAL; + } + + if (coal->rx_max_coalesced_frames != coal->tx_max_coalesced_frames) { + nicif_err(nic_dev, drv, netdev, + "Coal param: tx-frames must be equal to rx-frames\n"); + return -EINVAL; + } + + if (coal->rx_coalesce_usecs > SSSNIC_COALESCE_MAX_TIMER_CFG) { + nicif_err(nic_dev, drv, netdev, + "Coal param: rx_coalesce_usecs out of range[%d-%d]\n", 0, + SSSNIC_COALESCE_MAX_TIMER_CFG); + return -EOPNOTSUPP; + } + + if (coal->rx_coalesce_usecs_low > SSSNIC_COALESCE_MAX_TIMER_CFG) { + nicif_err(nic_dev, drv, netdev, + "Coal param: rx_coalesce_usecs_low out of range[%d-%d]\n", 0, + SSSNIC_COALESCE_MAX_TIMER_CFG); + return -EOPNOTSUPP; + } + + if (coal->rx_coalesce_usecs_high > SSSNIC_COALESCE_MAX_TIMER_CFG) { + nicif_err(nic_dev, drv, netdev, + "Coal param: rx_coalesce_usecs_high out of range[%d-%d]\n", 0, + SSSNIC_COALESCE_MAX_TIMER_CFG); + return -EOPNOTSUPP; + } + + if (coal->rx_max_coalesced_frames > SSSNIC_COALESCE_MAX_PENDING_LIMIT) { + nicif_err(nic_dev, drv, netdev, + "Coal param: rx_max_coalesced_frames out of range[%d-%d]\n", 0, + SSSNIC_COALESCE_MAX_PENDING_LIMIT); + return -EOPNOTSUPP; + } + + if (coal->rx_max_coalesced_frames_low > + SSSNIC_COALESCE_MAX_PENDING_LIMIT) { + nicif_err(nic_dev, drv, netdev, + "Coal param: rx_max_coalesced_frames_low out of range[%d-%d]\n", + 0, SSSNIC_COALESCE_MAX_PENDING_LIMIT); + return -EOPNOTSUPP; + } + + if (coal->rx_max_coalesced_frames_high > + SSSNIC_COALESCE_MAX_PENDING_LIMIT) { + nicif_err(nic_dev, drv, netdev, + "Coal param: rx_max_coalesced_frames_high out of range[%d-%d]\n", + 0, SSSNIC_COALESCE_MAX_PENDING_LIMIT); + return -EOPNOTSUPP; + } + + return 0; +} + +int sss_nic_check_coal_param_range(struct net_device *netdev, + const struct ethtool_coalesce *coal) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (coal->rx_coalesce_usecs_low / SSSNIC_COALESCE_TIMER_CFG_UNIT >= + coal->rx_coalesce_usecs_high / SSSNIC_COALESCE_TIMER_CFG_UNIT) { + nicif_err(nic_dev, drv, netdev, + "Coal param: coalesce_usecs_high(%u) must more than coalesce_usecs_low(%u)\n", + coal->rx_coalesce_usecs_high, + coal->rx_coalesce_usecs_low); + return -EOPNOTSUPP; + } + + if (coal->rx_max_coalesced_frames_low / SSSNIC_COALESCE_PENDING_LIMIT_UNIT >= + coal->rx_max_coalesced_frames_high / SSSNIC_COALESCE_PENDING_LIMIT_UNIT) { + nicif_err(nic_dev, drv, netdev, + "Coal param: coalesced_frames_high(%u) must more than coalesced_frames_low(%u)\n", + coal->rx_max_coalesced_frames_high, + coal->rx_max_coalesced_frames_low); + return -EOPNOTSUPP; + } + + if (coal->pkt_rate_low >= coal->pkt_rate_high) { + nicif_err(nic_dev, drv, netdev, + "Coal param: pkt_rate_high(%u) must more than pkt_rate_low(%u)\n", + coal->pkt_rate_high, + coal->pkt_rate_low); + return -EOPNOTSUPP; + } + + return 0; +} + +int sss_nic_coalesce_check(struct net_device *netdev, + const struct ethtool_coalesce *coal) +{ + int ret; + + ret = sss_nic_check_coal_param_support(netdev, coal); + if (ret != 0) + return ret; + + ret = sss_nic_check_coal_param_valid(netdev, coal); + if (ret != 0) + return ret; + + ret = sss_nic_check_coal_param_range(netdev, coal); + if (ret != 0) + return ret; + + return 0; +} + +int sss_nic_set_coal_param_to_hw(struct sss_nic_dev *nic_dev, + struct sss_nic_intr_coal_info *intr_coal_info, u16 queue) +{ + u16 i; + + if (queue < nic_dev->qp_res.qp_num) { + sss_nic_set_hw_intr_coal(nic_dev, queue, intr_coal_info); + return 0; + } else if (queue != SSSNIC_COALESCE_ALL_QUEUE) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Invalid queue_id: %u\n", queue); + return -EINVAL; + } + + for (i = 0; i < nic_dev->max_qp_num; i++) + sss_nic_set_hw_intr_coal(nic_dev, i, intr_coal_info); + + return 0; +} + +void sss_nic_coalesce_align_check(struct net_device *netdev, + struct ethtool_coalesce *coal) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + SSSNIC_CHECK_COALESCE_ALIGN(coal, rx_coalesce_usecs, + SSSNIC_COALESCE_TIMER_CFG_UNIT); + SSSNIC_CHECK_COALESCE_ALIGN(coal, rx_coalesce_usecs_low, + SSSNIC_COALESCE_TIMER_CFG_UNIT); + SSSNIC_CHECK_COALESCE_ALIGN(coal, rx_coalesce_usecs_high, + SSSNIC_COALESCE_TIMER_CFG_UNIT); + SSSNIC_CHECK_COALESCE_ALIGN(coal, rx_max_coalesced_frames, + SSSNIC_COALESCE_PENDING_LIMIT_UNIT); + SSSNIC_CHECK_COALESCE_ALIGN(coal, rx_max_coalesced_frames_low, + SSSNIC_COALESCE_PENDING_LIMIT_UNIT); + SSSNIC_CHECK_COALESCE_ALIGN(coal, rx_max_coalesced_frames_high, + SSSNIC_COALESCE_PENDING_LIMIT_UNIT); +} + +void sss_nic_coalesce_change_check(struct net_device *netdev, + struct ethtool_coalesce *coal, u16 queue) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct sss_nic_intr_coal_info *intr_coal_info = NULL; + char obj_str[32] = {0}; + + if (queue == SSSNIC_COALESCE_ALL_QUEUE) { + intr_coal_info = &nic_dev->coal_info[0]; + snprintf(obj_str, sizeof(obj_str), "for netdev"); + } else { + intr_coal_info = &nic_dev->coal_info[queue]; + snprintf(obj_str, sizeof(obj_str), "for queue %u", queue); + } + + SSSNIC_CHECK_COALESCE_CHANGED(coal, rx_coalesce_usecs, + SSSNIC_COALESCE_TIMER_CFG_UNIT, + intr_coal_info->coalesce_timer, obj_str); + SSSNIC_CHECK_COALESCE_CHANGED(coal, rx_coalesce_usecs_low, + SSSNIC_COALESCE_TIMER_CFG_UNIT, + intr_coal_info->rx_usecs_low, obj_str); + SSSNIC_CHECK_COALESCE_CHANGED(coal, rx_coalesce_usecs_high, + SSSNIC_COALESCE_TIMER_CFG_UNIT, + intr_coal_info->rx_usecs_high, obj_str); + SSSNIC_CHECK_COALESCE_CHANGED(coal, rx_max_coalesced_frames, + SSSNIC_COALESCE_PENDING_LIMIT_UNIT, + intr_coal_info->pending_limt, obj_str); + SSSNIC_CHECK_COALESCE_CHANGED(coal, rx_max_coalesced_frames_low, + SSSNIC_COALESCE_PENDING_LIMIT_UNIT, + intr_coal_info->rx_pending_limt_low, obj_str); + SSSNIC_CHECK_COALESCE_CHANGED(coal, rx_max_coalesced_frames_high, + SSSNIC_COALESCE_PENDING_LIMIT_UNIT, + intr_coal_info->rx_pending_limt_high, obj_str); + SSSNIC_CHECK_PKT_RATE_CHANGED(coal, pkt_rate_low, + intr_coal_info->pkt_rate_low, obj_str); + SSSNIC_CHECK_PKT_RATE_CHANGED(coal, pkt_rate_high, + intr_coal_info->pkt_rate_high, obj_str); +} + +void sss_nic_ethtool_coalesce_to_intr_coal_info(struct sss_nic_intr_coal_info *nic_coal, + struct ethtool_coalesce *ethtool_coal) +{ + nic_coal->coalesce_timer = + (u8)(ethtool_coal->rx_coalesce_usecs / SSSNIC_COALESCE_TIMER_CFG_UNIT); + nic_coal->pending_limt = (u8)(ethtool_coal->rx_max_coalesced_frames / + SSSNIC_COALESCE_PENDING_LIMIT_UNIT); + nic_coal->pkt_rate_low = ethtool_coal->pkt_rate_low; + nic_coal->pkt_rate_high = ethtool_coal->pkt_rate_high; + nic_coal->rx_usecs_low = + (u8)(ethtool_coal->rx_coalesce_usecs_low / SSSNIC_COALESCE_TIMER_CFG_UNIT); + nic_coal->rx_usecs_high = + (u8)(ethtool_coal->rx_coalesce_usecs_high / SSSNIC_COALESCE_TIMER_CFG_UNIT); + nic_coal->rx_pending_limt_low = + (u8)(ethtool_coal->rx_max_coalesced_frames_low / + SSSNIC_COALESCE_PENDING_LIMIT_UNIT); + nic_coal->rx_pending_limt_high = + (u8)(ethtool_coal->rx_max_coalesced_frames_high / + SSSNIC_COALESCE_PENDING_LIMIT_UNIT); +} + +int sss_nic_ethtool_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, u16 queue) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct sss_nic_intr_coal_info intr_coal_info = {0}; + u32 last_adaptive_rx; + int ret = 0; + + ret = sss_nic_coalesce_check(netdev, coal); + if (ret != 0) + return ret; + + sss_nic_coalesce_align_check(netdev, coal); + sss_nic_coalesce_change_check(netdev, coal, queue); + + sss_nic_ethtool_coalesce_to_intr_coal_info(&intr_coal_info, coal); + + last_adaptive_rx = nic_dev->use_adaptive_rx_coalesce; + nic_dev->use_adaptive_rx_coalesce = coal->use_adaptive_rx_coalesce; + + if (nic_dev->use_adaptive_rx_coalesce == 0 && + (intr_coal_info.coalesce_timer == 0 || + intr_coal_info.pending_limt == 0)) + nicif_warn(nic_dev, drv, netdev, "Coalesce will be disabled\n"); + + if (SSS_CHANNEL_RES_VALID(nic_dev) != 0) { + if (nic_dev->use_adaptive_rx_coalesce == 0) + cancel_delayed_work_sync(&nic_dev->moderation_task); + else if (last_adaptive_rx == 0) + queue_delayed_work(nic_dev->workq, + &nic_dev->moderation_task, + SSSNIC_MODERATONE_DELAY); + } + + return sss_nic_set_coal_param_to_hw(nic_dev, &intr_coal_info, queue); +} + +void sss_nic_module_type_sfp(struct ethtool_modinfo *modinfo, + u8 sfp_type_ext) +{ + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; +} + +void sss_nic_module_type_qsfp(struct ethtool_modinfo *modinfo, + u8 sfp_type_ext) +{ + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; +} + +void sss_nic_module_type_qsfp_plus(struct ethtool_modinfo *modinfo, u8 sfp_type_ext) +{ + if (sfp_type_ext < SSSNIC_SFP_TYPE_EXT_FLAG) { + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; + } +} + +void sss_nic_module_type_qsfp28(struct ethtool_modinfo *modinfo, + u8 sfp_type_ext) +{ + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; +} + +int sss_nic_set_rq_recovery_flag(struct net_device *netdev, u32 flag) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (flag & SSSNIC_PRIV_FLAG_RQ_RECOVERY) { + if (!SSSNIC_SUPPORT_RXQ_RECOVERY(nic_dev->nic_io)) { + nicif_info(nic_dev, drv, netdev, "Unsupport open rq recovery\n"); + return -EOPNOTSUPP; + } + + if (SSSNIC_TEST_SET_NIC_DEV_FLAG(nic_dev, SSSNIC_RXQ_RECOVERY)) + return 0; + queue_delayed_work(nic_dev->workq, &nic_dev->rq_watchdog_work, HZ); + nicif_info(nic_dev, drv, netdev, "Succss to open rq recovery\n"); + } else { + if (!SSSNIC_TEST_CLEAR_NIC_DEV_FLAG(nic_dev, SSSNIC_RXQ_RECOVERY)) + return 0; + cancel_delayed_work_sync(&nic_dev->rq_watchdog_work); + nicif_info(nic_dev, drv, netdev, "Success to close rq recovery\n"); + } + + return 0; +} + +int sss_nic_set_symm_rss_flag(struct net_device *netdev, u32 flag) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if ((flag & SSSNIC_PRIV_FLAG_SYMM_RSS) != 0) { + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE)) { + nicif_err(nic_dev, drv, netdev, "Fail to open Symmetric RSS while DCB is enabled\n"); + return -EOPNOTSUPP; + } + + if (!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_RSS_ENABLE)) { + nicif_err(nic_dev, drv, netdev, "Fail to open Symmetric RSS while RSS is disabled\n"); + return -EOPNOTSUPP; + } + + SSSNIC_SET_NIC_DEV_FLAG(nic_dev, SSSNIC_SAME_RXTX); + } else { + SSSNIC_CLEAR_NIC_DEV_FLAG(nic_dev, SSSNIC_SAME_RXTX); + } + + return 0; +} + +void sss_nic_force_link_up(struct sss_nic_dev *nic_dev) +{ + if (SSSNIC_TEST_SET_NIC_DEV_FLAG(nic_dev, SSSNIC_FORCE_LINK_UP)) + return; + + if (!SSS_CHANNEL_RES_VALID(nic_dev)) + return; + + if (netif_carrier_ok(nic_dev->netdev)) + return; + + nic_dev->link_status = true; + netif_carrier_on(nic_dev->netdev); + nicif_info(nic_dev, link, nic_dev->netdev, "Set link up\n"); + + if (!SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) + sss_nic_notify_all_vf_link_state(nic_dev->nic_io, nic_dev->link_status); +} + +int sss_nic_force_link_down(struct sss_nic_dev *nic_dev) +{ + int ret; + u8 link_status = 0; + + if (!SSSNIC_TEST_CLEAR_NIC_DEV_FLAG(nic_dev, SSSNIC_FORCE_LINK_UP)) + return 0; + + if (!SSS_CHANNEL_RES_VALID(nic_dev)) + return 0; + + ret = sss_nic_get_hw_link_state(nic_dev, &link_status); + if (ret != 0) { + nicif_err(nic_dev, link, nic_dev->netdev, "Fail to get link state: %d\n", ret); + return ret; + } + + nic_dev->link_status = link_status; + + if (link_status != 0) { + if (netif_carrier_ok(nic_dev->netdev)) + return 0; + + netif_carrier_on(nic_dev->netdev); + nicif_info(nic_dev, link, nic_dev->netdev, "Link state is up\n"); + } else { + if (!netif_carrier_ok(nic_dev->netdev)) + return 0; + + netif_carrier_off(nic_dev->netdev); + nicif_info(nic_dev, link, nic_dev->netdev, "Link state is down\n"); + } + + if (!SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) + sss_nic_notify_all_vf_link_state(nic_dev->nic_io, nic_dev->link_status); + + return ret; +} + +int sss_nic_set_force_link_flag(struct net_device *netdev, u32 flag) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + int ret = 0; + + if ((flag & SSSNIC_PRIV_FLAG_LINK_UP) != 0) + sss_nic_force_link_up(nic_dev); + else + ret = sss_nic_force_link_down(nic_dev); + + return ret; +} + +int sss_nic_finish_loop_test(struct sss_nic_dev *nic_dev, + struct sk_buff *skb_tmp, u32 test_time) +{ + struct net_device *netdev = nic_dev->netdev; + u8 *loop_test_rx_buf = nic_dev->loop_test_rx_buf; + u32 cnt = test_time * SSSNIC_TEST_TIME_MULTIPLE; + struct sk_buff *skb = NULL; + int lp_pkt_cnt = nic_dev->loop_pkt_len; + unsigned char pkt_mark_data; + u32 i; + u32 j; + + for (i = 0; i < cnt; i++) { + nic_dev->loop_test_rx_cnt = 0; + memset(loop_test_rx_buf, 0, SSSNIC_LP_PKT_CNT * lp_pkt_cnt); + + for (j = 0; j < SSSNIC_LP_PKT_CNT; j++) { + skb = pskb_copy(skb_tmp, GFP_ATOMIC); + if (!skb) { + nicif_err(nic_dev, drv, netdev, + "Fail to copy skb for loopback test\n"); + return -ENOMEM; + } + + /* mark index for every pkt */ + skb->data[lp_pkt_cnt - 1] = j; + + if (sss_nic_loop_start_xmit(skb, netdev) != NETDEV_TX_OK) { + dev_kfree_skb_any(skb); + nicif_err(nic_dev, drv, netdev, + "Fail to xmit pkt for loopback test\n"); + return -EBUSY; + } + } + + /* wait till all pkts received to RX buffer */ + msleep(SSSNIC_WAIT_PKTS_TO_RX_BUFFER); + + for (j = 0; j < SSSNIC_LP_PKT_CNT; j++) { + pkt_mark_data = *(loop_test_rx_buf + (j * lp_pkt_cnt) + (lp_pkt_cnt - 1)); + if (memcmp((loop_test_rx_buf + (j * lp_pkt_cnt)), + skb_tmp->data, (lp_pkt_cnt - 1)) != 0 || + pkt_mark_data != j) { + nicif_err(nic_dev, drv, netdev, + "Fail to compare pkt in loopback test(index=0x%02x, data[%d]=0x%02x)\n", + (j + (i * SSSNIC_LP_PKT_CNT)), + (lp_pkt_cnt - 1), pkt_mark_data); + return -EIO; + } + } + } + + return 0; +} + +static struct sk_buff *sss_nic_alloc_loop_skb(struct sss_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + struct sk_buff *skb = NULL; + struct ethhdr *eth_hdr = NULL; + u8 *test_data = NULL; + u32 i; + + skb = alloc_skb(nic_dev->loop_pkt_len, GFP_ATOMIC); + if (!skb) + return skb; + + eth_hdr = __skb_put(skb, ETH_HLEN); + eth_hdr->h_proto = htons(ETH_P_ARP); + ether_addr_copy(eth_hdr->h_dest, nic_dev->netdev->dev_addr); + eth_zero_addr(eth_hdr->h_source); + skb_reset_mac_header(skb); + + test_data = __skb_put(skb, nic_dev->loop_pkt_len - ETH_HLEN); + for (i = ETH_HLEN; i < nic_dev->loop_pkt_len; i++) + test_data[i] = i & 0xFF; + + skb->queue_mapping = 0; + skb->dev = netdev; + skb->protocol = htons(ETH_P_ARP); + + return skb; +} + +static int sss_nic_run_loop_test(struct sss_nic_dev *nic_dev, u32 test_time) +{ + struct net_device *netdev = nic_dev->netdev; + struct sk_buff *skb_tmp = NULL; + int ret; + + skb_tmp = sss_nic_alloc_loop_skb(nic_dev); + if (!skb_tmp) { + nicif_err(nic_dev, drv, netdev, + "Fail to create lp test skb for loopback test\n"); + return -ENOMEM; + } + + ret = sss_nic_finish_loop_test(nic_dev, skb_tmp, test_time); + if (ret != 0) { + dev_kfree_skb_any(skb_tmp); + return ret; + } + + dev_kfree_skb_any(skb_tmp); + nicif_info(nic_dev, drv, netdev, "Success to loopback test.\n"); + return 0; +} + +static int sss_nic_do_loop_test(struct sss_nic_dev *nic_dev, u32 *flags, + u32 test_time, enum sss_nic_lp_test_type *test_index) +{ + struct net_device *netdev = nic_dev->netdev; + int ret = 0; + + if (!(*flags & ETH_TEST_FL_EXTERNAL_LB)) { + *test_index = SSSNIC_INTERNAL_LP_TEST; + if (sss_nic_set_loopback_mode(nic_dev, + SSSNIC_INTERNAL_LP_MODE, true)) { + nicif_err(nic_dev, drv, netdev, + "Fail to set port loopback mode before loopback test\n"); + return -EFAULT; + } + + /* suspend 5000 ms, waiting for port to stop receiving frames */ + msleep(SSSNIC_WAIT_LOOP_TEST_FINISH_TIMEOUT); + } else { + *test_index = SSSNIC_EXTERNAL_LP_TEST; + } + + SSSNIC_SET_NIC_DEV_FLAG(nic_dev, SSSNIC_LP_TEST); + + if (sss_nic_run_loop_test(nic_dev, test_time)) + ret = -EFAULT; + + SSSNIC_CLEAR_NIC_DEV_FLAG(nic_dev, SSSNIC_LP_TEST); + msleep(SSSNIC_WAIT_CLEAR_LP_TEST); + + if (!(*flags & ETH_TEST_FL_EXTERNAL_LB)) { + if (sss_nic_set_loopback_mode(nic_dev, + SSSNIC_INTERNAL_LP_MODE, false)) { + nicif_err(nic_dev, drv, netdev, + "Fail to cancel port loopback mode after loopback test\n"); + ret = -EFAULT; + } + } else { + *flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; + } + return ret; +} + +void sss_nic_loop_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + enum sss_nic_lp_test_type test_type = SSSNIC_INTERNAL_LP_TEST; + u32 act_test_time = SSSNIC_LP_DEF_TIME; + u8 link_state = 0; + int ret; + + if (!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_INTF_UP)) { + nicif_err(nic_dev, drv, netdev, + "Fail to entry loopback test when netdev is closed\n"); + eth_test->flags |= ETH_TEST_FL_FAILED; + data[SSSNIC_PORT_DOWN_ERR_ID] = 1; + return; + } + + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + ret = sss_nic_do_loop_test(nic_dev, ð_test->flags, act_test_time, &test_type); + if (ret) { + eth_test->flags |= ETH_TEST_FL_FAILED; + data[test_type] = 1; + } + + netif_tx_wake_all_queues(netdev); + + ret = sss_nic_get_hw_link_state(nic_dev, &link_state); + if (!ret && link_state) + netif_carrier_on(netdev); +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_api.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_api.h new file mode 100644 index 0000000000000000000000000000000000000000..9cfb72b2668d5f9af96f467fb5807254153f0021 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_api.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_ETHTOOL_API_H +#define SSS_NIC_ETHTOOL_API_H + +#include + +#define SSSNIC_PRIV_FLAG_SYMM_RSS BIT(0) +#define SSSNIC_PRIV_FLAG_LINK_UP BIT(1) +#define SSSNIC_PRIV_FLAG_RQ_RECOVERY BIT(2) + +#define SSSNIC_COALESCE_ALL_QUEUE 0xFFFF + +#define SSSNIC_SFP_TYPE_EXT_FLAG 0x3 + +typedef void (*sss_nic_get_module_info_t)(struct ethtool_modinfo *modinfo, u8 sfp_type_ext); + +enum sss_nic_lp_test_type { + SSSNIC_INTERNAL_LP_TEST = 0, + SSSNIC_EXTERNAL_LP_TEST = 1, + SSSNIC_LP_TEST_TYPE_MAX = 2, +}; + +enum module_type { + SSSNIC_MODULE_TYPE_SFP = 0x3, + SSSNIC_MODULE_TYPE_QSFP = 0x0C, + SSSNIC_MODULE_TYPE_QSFP_PLUS = 0x0D, + SSSNIC_MODULE_TYPE_QSFP28 = 0x11, + SSSNIC_MODULE_TYPE_MAX, +}; + +void sss_nic_update_qp_depth(struct sss_nic_dev *nic_dev, + u32 sq_depth, u32 rq_depth); +int sss_nic_check_ringparam_valid(struct net_device *netdev, + const struct ethtool_ringparam *ringparam); +void sss_nic_intr_coal_to_ethtool_coal(struct ethtool_coalesce *ethtool_coal, + struct sss_nic_intr_coal_info *nic_coal); +int sss_nic_ethtool_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ethtool_coal, u16 queue); +int sss_nic_set_hw_intr_coal(struct sss_nic_dev *nic_dev, + u16 qid, struct sss_nic_intr_coal_info *coal); +int sss_nic_check_coal_param_support(struct net_device *netdev, + const struct ethtool_coalesce *coal); +int sss_nic_check_coal_param_valid(struct net_device *netdev, + const struct ethtool_coalesce *coal); +int sss_nic_check_coal_param_range(struct net_device *netdev, + const struct ethtool_coalesce *coal); +int sss_nic_coalesce_check(struct net_device *netdev, + const struct ethtool_coalesce *coal); +int sss_nic_set_coal_param_to_hw(struct sss_nic_dev *nic_dev, + struct sss_nic_intr_coal_info *intr_coal_info, u16 queue); +void sss_nic_coalesce_align_check(struct net_device *netdev, + struct ethtool_coalesce *coal); +void sss_nic_coalesce_change_check(struct net_device *netdev, + struct ethtool_coalesce *coal, u16 queue); +void sss_nic_ethtool_coalesce_to_intr_coal_info(struct sss_nic_intr_coal_info *nic_coal, + struct ethtool_coalesce *ethtool_coal); +int sss_nic_ethtool_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, u16 queue); +void sss_nic_module_type_sfp(struct ethtool_modinfo *modinfo, + u8 sfp_type_ext); +void sss_nic_module_type_qsfp(struct ethtool_modinfo *modinfo, + u8 sfp_type_ext); +void sss_nic_module_type_qsfp_plus(struct ethtool_modinfo *modinfo, u8 sfp_type_ext); +void sss_nic_module_type_qsfp28(struct ethtool_modinfo *modinfo, + u8 sfp_type_ext); +int sss_nic_set_rq_recovery_flag(struct net_device *netdev, + u32 flag); +int sss_nic_set_symm_rss_flag(struct net_device *netdev, u32 flag); +void sss_nic_force_link_up(struct sss_nic_dev *nic_dev); +int sss_nic_force_link_down(struct sss_nic_dev *nic_dev); +int sss_nic_set_force_link_flag(struct net_device *netdev, u32 flag); +void sss_nic_loop_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats.c new file mode 100644 index 0000000000000000000000000000000000000000..d5b80dfb2f8bce3ec6f0990c3ee535bc729e4009 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_rss_cfg.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_tx.h" +#include "sss_nic_rx.h" +#include "sss_nic_ethtool_stats_api.h" + +typedef int (*sss_nic_ss_handler_t)(struct sss_nic_dev *nic_dev); + +struct sss_nic_handler { + int type; + sss_nic_ss_handler_t handler_func; +}; + +typedef void (*sss_nic_strings_handler_t)(struct sss_nic_dev *nic_dev, + u8 *buffer); + +struct sss_nic_get_strings { + int type; + sss_nic_strings_handler_t handler_func; +}; + +int sss_nic_get_sset_count(struct net_device *netdev, int settings) +{ + int i; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + struct sss_nic_handler handler[] = { + {ETH_SS_TEST, sss_nic_eth_ss_test}, + {ETH_SS_STATS, sss_nic_eth_ss_stats}, + {ETH_SS_PRIV_FLAGS, sss_nic_eth_ss_priv_flags}, + }; + + for (i = 0; i < ARRAY_LEN(handler); i++) + if (settings == handler[i].type) + return handler[i].handler_func(nic_dev); + + return -EOPNOTSUPP; +} + +void sss_nic_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + u16 cnt; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + cnt = sss_nic_get_ethtool_dev_stats(nic_dev, data); + + cnt += sss_nic_get_ethtool_vport_stats(nic_dev, data + cnt); + + if (!SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) + cnt += sss_nic_get_ethtool_port_stats(nic_dev, data + cnt); + + sss_nic_get_drv_queue_stats(nic_dev, data + cnt); +} + +void sss_nic_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) +{ + int i; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + struct sss_nic_get_strings handler[] = { + {ETH_SS_TEST, sss_nic_get_test_strings}, + {ETH_SS_STATS, sss_nic_get_drv_stats_strings}, + {ETH_SS_PRIV_FLAGS, sss_nic_get_priv_flags_strings}, + }; + + for (i = 0; i < ARRAY_LEN(handler); i++) + if (stringset == handler[i].type) + return handler[i].handler_func(nic_dev, buf); + + nicif_err(nic_dev, drv, netdev, "Invalid string set %u.", stringset); +} + +#ifdef ETHTOOL_GLINKSETTINGS +#ifndef XENSERVER_HAVE_NEW_ETHTOOL_OPS +int sss_nic_get_link_ksettings(struct net_device *net_dev, + struct ethtool_link_ksettings *ksetting) +{ + int ret; + struct sss_nic_cmd_link_settings cmd = {0}; + + sss_nic_ethtool_ksetting_clear(ksetting, supported); + sss_nic_ethtool_ksetting_clear(ksetting, advertising); + + ret = sss_nic_get_link_setting(net_dev, &cmd); + if (ret != 0) + return ret; + + sss_nic_copy_ksetting(ksetting, &cmd); + + return 0; +} +#endif +#endif + +#ifdef ETHTOOL_GLINKSETTINGS +#ifndef XENSERVER_HAVE_NEW_ETHTOOL_OPS +int sss_nic_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *ksettings) +{ + /* Only support to set autoneg and speed */ + return sssnic_set_link_settings(netdev, + ksettings->base.autoneg, ksettings->base.speed); +} +#endif +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats.h new file mode 100644 index 0000000000000000000000000000000000000000..3e3d6e1aa8d63860146a07d4bd44306510e54d7e --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_ETHTOOL_STATS_H +#define SSS_NIC_ETHTOOL_STATS_H + +#include +#include + +#include "sss_kernel.h" + +void sss_nic_get_strings(struct net_device *netdev, u32 stringset, u8 *buf); +void sss_nic_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data); +int sss_nic_get_sset_count(struct net_device *netdev, int settings); + +#ifdef ETHTOOL_GLINKSETTINGS +#ifndef XENSERVER_HAVE_NEW_ETHTOOL_OPS +int sss_nic_get_link_ksettings(struct net_device *net_dev, + struct ethtool_link_ksettings *ksetting); +int sss_nic_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *ksettings); +#endif +#endif + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats_api.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats_api.c new file mode 100644 index 0000000000000000000000000000000000000000..91de82b658fa4664b23aae9f323256dc103e934c --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats_api.c @@ -0,0 +1,1058 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_rss_cfg.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_tx.h" +#include "sss_nic_rx.h" +#include "sss_nic_ethtool_stats_api.h" +#include "sss_tool_comm.h" +#include "sss_tool_nic.h" + +#define SSSNIC_SET_SUPPORTED_MODE 0 +#define SSSNIC_SET_ADVERTISED_MODE 1 + +#define SSSNIC_ETHTOOL_ADD_SUPPORTED_LINK_MODE(ecmd, mode) \ + set_bit(ETHTOOL_LINK_MODE_##mode##_BIT, (ecmd)->supported) +#define SSSNIC_ETHTOOL_ADD_ADVERTISED_LINK_MODE(ecmd, mode) \ + set_bit(ETHTOOL_LINK_MODE_##mode##_BIT, (ecmd)->advertising) + +#define SSSNIC_ETHTOOL_ADD_SPPED_LINK_MODE(ecmd, mode, op) \ +do { \ + u32 _link_mode; \ + unsigned long *val = (op == SSSNIC_SET_SUPPORTED_MODE) ? \ + (ecmd)->supported : (ecmd)->advertising; \ + for (_link_mode = 0; _link_mode < g_link_mode_table[mode].array_len; _link_mode++) { \ + if (g_link_mode_table[mode].array[_link_mode] >= \ + __ETHTOOL_LINK_MODE_MASK_NBITS) \ + continue; \ + set_bit(g_link_mode_table[mode].array[_link_mode], val); \ + } \ +} while (0) + +#define SSSNIC_NETDEV_STATS(_item) { \ + .name = #_item, \ + .len = FIELD_SIZEOF(struct rtnl_link_stats64, _item), \ + .offset = offsetof(struct rtnl_link_stats64, _item) \ +} + +#define SSSNIC_TX_STATS(_item) { \ + .name = #_item, \ + .len = FIELD_SIZEOF(struct sss_nic_tx_stats, _item), \ + .offset = offsetof(struct sss_nic_tx_stats, _item) \ +} + +#define SSSNIC_RQ_STATS(_item) { \ + .name = "rxq%d_"#_item, \ + .len = FIELD_SIZEOF(struct sss_nic_rq_stats, _item), \ + .offset = offsetof(struct sss_nic_rq_stats, _item) \ +} + +#define SSSNIC_SQ_STATS(_item) { \ + .name = "txq%d_"#_item, \ + .len = FIELD_SIZEOF(struct sss_nic_sq_stats, _item), \ + .offset = offsetof(struct sss_nic_sq_stats, _item) \ +} + +#define SSSNIC_FUNCTION_STATS(_item) { \ + .name = #_item, \ + .len = FIELD_SIZEOF(struct sss_nic_port_stats, _item), \ + .offset = offsetof(struct sss_nic_port_stats, _item) \ +} + +#define SSSNIC_PORT_STATS(_item) { \ + .name = #_item, \ + .len = FIELD_SIZEOF(struct sss_nic_mag_port_stats, _item), \ + .offset = offsetof(struct sss_nic_mag_port_stats, _item) \ +} + +#define SSSNIC_GET_VALUE_OF_PTR(len, ptr) ( \ + (len) == sizeof(u64) ? *(u64 *)(ptr) : \ + (len) == sizeof(u32) ? *(u32 *)(ptr) : \ + (len) == sizeof(u16) ? *(u16 *)(ptr) : *(u8 *)(ptr) \ +) + +#define SSSNIC_DEV_STATS_PACK(items, item_idx, array, stats_ptr) \ +do { \ + int j; \ + for (j = 0; j < ARRAY_LEN(array); j++) { \ + memcpy((items)[item_idx].name, (array)[j].name, SSS_TOOL_SHOW_ITEM_LEN); \ + (items)[item_idx].hexadecimal = 0; \ + (items)[item_idx].value = \ + SSSNIC_GET_VALUE_OF_PTR((array)[j].len, \ + (char *)(stats_ptr) + (array)[j].offset); \ + (item_idx)++; \ + } \ +} while (0) + +#define SSSNIC_QUEUE_STATS_PACK(items, item_idx, array, stats_ptr, qid) \ +do { \ + int j; \ + for (j = 0; j < ARRAY_LEN(array); j++) { \ + memcpy((items)[item_idx].name, (array)[j].name, \ + SSS_TOOL_SHOW_ITEM_LEN); \ + snprintf((items)[item_idx].name, SSS_TOOL_SHOW_ITEM_LEN, \ + (array)[j].name, (qid)); \ + (items)[item_idx].hexadecimal = 0; \ + (items)[item_idx].value = \ + SSSNIC_GET_VALUE_OF_PTR((array)[j].len, \ + (char *)(stats_ptr) + (array)[j].offset); \ + (item_idx)++; \ + } \ +} while (0) + +#define SSSNIC_CONVERT_DATA_TYPE(len, p) (((len) == sizeof(u64)) ? *(u64 *)(p) : *(u32 *)(p)) +#define SSSNIC_AUTONEG_STRING(autoneg) ((autoneg) ? ("autong enable") : ("autong disable")) +#define SSSNIC_AUTONEG_ENABLE(autoneg) ((autoneg) ? SSSNIC_PORT_CFG_AN_ON : SSSNIC_PORT_CFG_AN_OFF) + +#define SSSNIC_NEGATE_ZERO_U32 ((u32)~0) + +struct sss_nic_hw2ethtool_link_mode { + const u32 *array; + u32 array_len; + u32 speed; +}; + +typedef void (*sss_nic_port_type_handler_t)(struct sss_nic_cmd_link_settings *cmd); + +static void sss_nic_set_fibre_port(struct sss_nic_cmd_link_settings *cmd); +static void sss_nic_set_da_port(struct sss_nic_cmd_link_settings *cmd); +static void sss_nic_set_tp_port(struct sss_nic_cmd_link_settings *cmd); +static void sss_nic_set_none_port(struct sss_nic_cmd_link_settings *cmd); + +static char g_test_strings[][ETH_GSTRING_LEN] = { + "Internal lb test (on/offline)", + "External lb test (external_lb)", +}; + +static char g_priv_flags_strings[][ETH_GSTRING_LEN] = { + "Symmetric-RSS", + "Force-Link-up", + "Rxq_Recovery", +}; + +static struct sss_nic_stats g_nic_sq_stats[] = { + SSSNIC_SQ_STATS(tx_packets), + SSSNIC_SQ_STATS(tx_bytes), + SSSNIC_SQ_STATS(tx_busy), + SSSNIC_SQ_STATS(wake), + SSSNIC_SQ_STATS(tx_dropped), +}; + +static struct sss_nic_stats g_nic_sq_stats_extern[] = { + SSSNIC_SQ_STATS(skb_pad_err), + SSSNIC_SQ_STATS(offload_err), + SSSNIC_SQ_STATS(dma_map_err), + SSSNIC_SQ_STATS(unknown_tunnel_proto), + SSSNIC_SQ_STATS(frag_size_zero), + SSSNIC_SQ_STATS(frag_len_overflow), + SSSNIC_SQ_STATS(rsvd1), + SSSNIC_SQ_STATS(rsvd2), +}; + +static struct sss_nic_stats g_nic_rq_stats[] = { + SSSNIC_RQ_STATS(rx_packets), + SSSNIC_RQ_STATS(rx_bytes), + SSSNIC_RQ_STATS(errors), + SSSNIC_RQ_STATS(csum_errors), + SSSNIC_RQ_STATS(other_errors), + SSSNIC_RQ_STATS(rx_dropped), +#ifdef HAVE_XDP_SUPPORT + SSSNIC_RQ_STATS(xdp_dropped), +#endif + SSSNIC_RQ_STATS(rx_buf_errors), +}; + +static struct sss_nic_stats g_nic_rq_stats_extern[] = { + SSSNIC_RQ_STATS(alloc_rx_dma_err), + SSSNIC_RQ_STATS(alloc_skb_err), + SSSNIC_RQ_STATS(reset_drop_sge), + SSSNIC_RQ_STATS(large_xdp_pkts), + SSSNIC_RQ_STATS(rsvd2), +}; + +static struct sss_nic_stats g_netdev_stats[] = { + SSSNIC_NETDEV_STATS(rx_packets), + SSSNIC_NETDEV_STATS(tx_packets), + SSSNIC_NETDEV_STATS(rx_bytes), + SSSNIC_NETDEV_STATS(tx_bytes), + SSSNIC_NETDEV_STATS(rx_errors), + SSSNIC_NETDEV_STATS(tx_errors), + SSSNIC_NETDEV_STATS(rx_dropped), + SSSNIC_NETDEV_STATS(tx_dropped), + SSSNIC_NETDEV_STATS(multicast), + SSSNIC_NETDEV_STATS(collisions), + SSSNIC_NETDEV_STATS(rx_length_errors), + SSSNIC_NETDEV_STATS(rx_over_errors), + SSSNIC_NETDEV_STATS(rx_crc_errors), + SSSNIC_NETDEV_STATS(rx_frame_errors), + SSSNIC_NETDEV_STATS(rx_fifo_errors), + SSSNIC_NETDEV_STATS(rx_missed_errors), + SSSNIC_NETDEV_STATS(tx_aborted_errors), + SSSNIC_NETDEV_STATS(tx_carrier_errors), + SSSNIC_NETDEV_STATS(tx_fifo_errors), + SSSNIC_NETDEV_STATS(tx_heartbeat_errors), +}; + +static struct sss_nic_stats g_dev_stats[] = { + SSSNIC_TX_STATS(tx_timeout), +}; + +static struct sss_nic_stats g_dev_stats_extern[] = { + SSSNIC_TX_STATS(tx_drop), + SSSNIC_TX_STATS(tx_invalid_qid), + SSSNIC_TX_STATS(rsvd1), + SSSNIC_TX_STATS(rsvd2), +}; + +static struct sss_nic_stats g_function_stats[] = { + SSSNIC_FUNCTION_STATS(tx_unicast_pkts), + SSSNIC_FUNCTION_STATS(tx_unicast_bytes), + SSSNIC_FUNCTION_STATS(tx_multicast_pkts), + SSSNIC_FUNCTION_STATS(tx_multicast_bytes), + SSSNIC_FUNCTION_STATS(tx_broadcast_pkts), + SSSNIC_FUNCTION_STATS(tx_broadcast_bytes), + + SSSNIC_FUNCTION_STATS(rx_unicast_pkts), + SSSNIC_FUNCTION_STATS(rx_unicast_bytes), + SSSNIC_FUNCTION_STATS(rx_multicast_pkts), + SSSNIC_FUNCTION_STATS(rx_multicast_bytes), + SSSNIC_FUNCTION_STATS(rx_broadcast_pkts), + SSSNIC_FUNCTION_STATS(rx_broadcast_bytes), + + SSSNIC_FUNCTION_STATS(tx_discard), + SSSNIC_FUNCTION_STATS(rx_discard), + SSSNIC_FUNCTION_STATS(tx_err), + SSSNIC_FUNCTION_STATS(rx_err), +}; + +static struct sss_nic_stats g_port_stats[] = { + SSSNIC_PORT_STATS(tx_fragment_pkts), + SSSNIC_PORT_STATS(tx_undersize_pkts), + SSSNIC_PORT_STATS(tx_undermin_pkts), + SSSNIC_PORT_STATS(tx_64_oct_pkts), + SSSNIC_PORT_STATS(tx_65_127_oct_pkts), + SSSNIC_PORT_STATS(tx_128_255_oct_pkts), + SSSNIC_PORT_STATS(tx_256_511_oct_pkts), + SSSNIC_PORT_STATS(tx_512_1023_oct_pkts), + SSSNIC_PORT_STATS(tx_1024_1518_oct_pkts), + SSSNIC_PORT_STATS(tx_1519_2047_oct_pkts), + SSSNIC_PORT_STATS(tx_2048_4095_oct_pkts), + SSSNIC_PORT_STATS(tx_4096_8191_oct_pkts), + SSSNIC_PORT_STATS(tx_8192_9216_oct_pkts), + SSSNIC_PORT_STATS(tx_9217_12287_oct_pkts), + SSSNIC_PORT_STATS(tx_12288_16383_oct_pkts), + SSSNIC_PORT_STATS(tx_1519_max_bad_pkts), + SSSNIC_PORT_STATS(tx_1519_max_good_pkts), + SSSNIC_PORT_STATS(tx_oversize_pkts), + SSSNIC_PORT_STATS(tx_jabber_pkts), + SSSNIC_PORT_STATS(tx_bad_pkts), + SSSNIC_PORT_STATS(tx_bad_octs), + SSSNIC_PORT_STATS(tx_good_pkts), + SSSNIC_PORT_STATS(tx_good_octs), + SSSNIC_PORT_STATS(tx_total_pkts), + SSSNIC_PORT_STATS(tx_total_octs), + SSSNIC_PORT_STATS(tx_uni_pkts), + SSSNIC_PORT_STATS(tx_multi_pkts), + SSSNIC_PORT_STATS(tx_broad_pkts), + SSSNIC_PORT_STATS(tx_pauses), + SSSNIC_PORT_STATS(tx_pfc_pkts), + SSSNIC_PORT_STATS(tx_pfc_pri0_pkts), + SSSNIC_PORT_STATS(tx_pfc_pri1_pkts), + SSSNIC_PORT_STATS(tx_pfc_pri2_pkts), + SSSNIC_PORT_STATS(tx_pfc_pri3_pkts), + SSSNIC_PORT_STATS(tx_pfc_pri4_pkts), + SSSNIC_PORT_STATS(tx_pfc_pri5_pkts), + SSSNIC_PORT_STATS(tx_pfc_pri6_pkts), + SSSNIC_PORT_STATS(tx_pfc_pri7_pkts), + SSSNIC_PORT_STATS(tx_control_pkts), + SSSNIC_PORT_STATS(tx_err_all_pkts), + SSSNIC_PORT_STATS(tx_from_app_good_pkts), + SSSNIC_PORT_STATS(tx_from_app_bad_pkts), + + SSSNIC_PORT_STATS(rx_fragment_pkts), + SSSNIC_PORT_STATS(rx_undersize_pkts), + SSSNIC_PORT_STATS(rx_undermin_pkts), + SSSNIC_PORT_STATS(rx_64_oct_pkts), + SSSNIC_PORT_STATS(rx_65_127_oct_pkts), + SSSNIC_PORT_STATS(rx_128_255_oct_pkts), + SSSNIC_PORT_STATS(rx_256_511_oct_pkts), + SSSNIC_PORT_STATS(rx_512_1023_oct_pkts), + SSSNIC_PORT_STATS(rx_1024_1518_oct_pkts), + SSSNIC_PORT_STATS(rx_1519_2047_oct_pkts), + SSSNIC_PORT_STATS(rx_2048_4095_oct_pkts), + SSSNIC_PORT_STATS(rx_4096_8191_oct_pkts), + SSSNIC_PORT_STATS(rx_8192_9216_oct_pkts), + SSSNIC_PORT_STATS(rx_9217_12287_oct_pkts), + SSSNIC_PORT_STATS(rx_12288_16383_oct_pkts), + SSSNIC_PORT_STATS(rx_1519_max_bad_pkts), + SSSNIC_PORT_STATS(rx_1519_max_good_pkts), + SSSNIC_PORT_STATS(rx_oversize_pkts), + SSSNIC_PORT_STATS(rx_jabber_pkts), + SSSNIC_PORT_STATS(rx_bad_pkts), + SSSNIC_PORT_STATS(rx_bad_octs), + SSSNIC_PORT_STATS(rx_good_pkts), + SSSNIC_PORT_STATS(rx_good_octs), + SSSNIC_PORT_STATS(rx_total_pkts), + SSSNIC_PORT_STATS(rx_total_octs), + SSSNIC_PORT_STATS(rx_uni_pkts), + SSSNIC_PORT_STATS(rx_multi_pkts), + SSSNIC_PORT_STATS(rx_broad_pkts), + SSSNIC_PORT_STATS(rx_pauses), + SSSNIC_PORT_STATS(rx_pfc_pkts), + SSSNIC_PORT_STATS(rx_pfc_pri0_pkts), + SSSNIC_PORT_STATS(rx_pfc_pri1_pkts), + SSSNIC_PORT_STATS(rx_pfc_pri2_pkts), + SSSNIC_PORT_STATS(rx_pfc_pri3_pkts), + SSSNIC_PORT_STATS(rx_pfc_pri4_pkts), + SSSNIC_PORT_STATS(rx_pfc_pri5_pkts), + SSSNIC_PORT_STATS(rx_pfc_pri6_pkts), + SSSNIC_PORT_STATS(rx_pfc_pri7_pkts), + SSSNIC_PORT_STATS(rx_control_pkts), + SSSNIC_PORT_STATS(rx_sym_err_pkts), + SSSNIC_PORT_STATS(rx_fcs_err_pkts), + SSSNIC_PORT_STATS(rx_send_app_good_pkts), + SSSNIC_PORT_STATS(rx_send_app_bad_pkts), + SSSNIC_PORT_STATS(rx_unfilter_pkts), +}; + +static const u32 g_mag_link_mode_ge[] = { + ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + ETHTOOL_LINK_MODE_1000baseX_Full_BIT, +}; + +static const u32 g_mag_link_mode_10ge_base_r[] = { + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, + ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, +}; + +static const u32 g_mag_link_mode_25ge_base_r[] = { + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, +}; + +static const u32 g_mag_link_mode_40ge_base_r4[] = { + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, +}; + +static const u32 g_mag_link_mode_50ge_base_r[] = { + ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, +}; + +static const u32 g_mag_link_mode_50ge_base_r2[] = { + ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, + ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, +}; + +static const u32 g_mag_link_mode_100ge_base_r[] = { + ETHTOOL_LINK_MODE_100000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR_Full_BIT, +}; + +static const u32 g_mag_link_mode_100ge_base_r2[] = { + ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, +}; + +static const u32 g_mag_link_mode_100ge_base_r4[] = { + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, +}; + +static const u32 g_mag_link_mode_200ge_base_r2[] = { + ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT, + ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT, +}; + +static const u32 g_mag_link_mode_200ge_base_r4[] = { + ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, +}; + +static const struct sss_nic_hw2ethtool_link_mode + g_link_mode_table[SSSNIC_LINK_MODE_MAX_NUMBERS] = { + [SSSNIC_LINK_MODE_GE] = { + .array = g_mag_link_mode_ge, + .array_len = ARRAY_LEN(g_mag_link_mode_ge), + .speed = SPEED_1000, + }, + [SSSNIC_LINK_MODE_10GE_BASE_R] = { + .array = g_mag_link_mode_10ge_base_r, + .array_len = ARRAY_LEN(g_mag_link_mode_10ge_base_r), + .speed = SPEED_10000, + }, + [SSSNIC_LINK_MODE_25GE_BASE_R] = { + .array = g_mag_link_mode_25ge_base_r, + .array_len = ARRAY_LEN(g_mag_link_mode_25ge_base_r), + .speed = SPEED_25000, + }, + [SSSNIC_LINK_MODE_40GE_BASE_R4] = { + .array = g_mag_link_mode_40ge_base_r4, + .array_len = ARRAY_LEN(g_mag_link_mode_40ge_base_r4), + .speed = SPEED_40000, + }, + [SSSNIC_LINK_MODE_50GE_BASE_R] = { + .array = g_mag_link_mode_50ge_base_r, + .array_len = ARRAY_LEN(g_mag_link_mode_50ge_base_r), + .speed = SPEED_50000, + }, + [SSSNIC_LINK_MODE_50GE_BASE_R2] = { + .array = g_mag_link_mode_50ge_base_r2, + .array_len = ARRAY_LEN(g_mag_link_mode_50ge_base_r2), + .speed = SPEED_50000, + }, + [SSSNIC_LINK_MODE_100GE_BASE_R] = { + .array = g_mag_link_mode_100ge_base_r, + .array_len = ARRAY_LEN(g_mag_link_mode_100ge_base_r), + .speed = SPEED_100000, + }, + [SSSNIC_LINK_MODE_100GE_BASE_R2] = { + .array = g_mag_link_mode_100ge_base_r2, + .array_len = ARRAY_LEN(g_mag_link_mode_100ge_base_r2), + .speed = SPEED_100000, + }, + [SSSNIC_LINK_MODE_100GE_BASE_R4] = { + .array = g_mag_link_mode_100ge_base_r4, + .array_len = ARRAY_LEN(g_mag_link_mode_100ge_base_r4), + .speed = SPEED_100000, + }, + [SSSNIC_LINK_MODE_200GE_BASE_R2] = { + .array = g_mag_link_mode_200ge_base_r2, + .array_len = ARRAY_LEN(g_mag_link_mode_200ge_base_r2), + .speed = SPEED_200000, + }, + [SSSNIC_LINK_MODE_200GE_BASE_R4] = { + .array = g_mag_link_mode_200ge_base_r4, + .array_len = ARRAY_LEN(g_mag_link_mode_200ge_base_r4), + .speed = SPEED_200000, + }, +}; + +/* Related to enum sss_nic_mag_opcode_port_speed */ +static u32 g_hw_to_ethtool_speed[] = { + (u32)SPEED_UNKNOWN, SPEED_10, SPEED_100, SPEED_1000, SPEED_10000, + SPEED_25000, SPEED_40000, SPEED_50000, SPEED_100000, SPEED_200000 +}; + +static sss_nic_port_type_handler_t g_link_port_set_handler[] = { + NULL, + sss_nic_set_fibre_port, + sss_nic_set_fibre_port, + sss_nic_set_da_port, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + sss_nic_set_fibre_port, + sss_nic_set_tp_port, + sss_nic_set_none_port +}; + +u32 sss_nic_get_io_stats_size(const struct sss_nic_dev *nic_dev) +{ + u32 count; + + count = ARRAY_LEN(g_dev_stats) + + ARRAY_LEN(g_dev_stats_extern) + + (ARRAY_LEN(g_nic_sq_stats) + + ARRAY_LEN(g_nic_sq_stats_extern) + + ARRAY_LEN(g_nic_rq_stats) + + ARRAY_LEN(g_nic_rq_stats_extern)) * nic_dev->max_qp_num; + + return count; +} + +int sss_nic_eth_ss_test(struct sss_nic_dev *nic_dev) +{ + return ARRAY_LEN(g_test_strings); +} + +int sss_nic_eth_ss_stats(struct sss_nic_dev *nic_dev) +{ + int count; + int q_num; + + q_num = nic_dev->qp_res.qp_num; + count = ARRAY_LEN(g_netdev_stats) + ARRAY_LEN(g_dev_stats) + + ARRAY_LEN(g_function_stats) + (ARRAY_LEN(g_nic_sq_stats) + + ARRAY_LEN(g_nic_rq_stats)) * q_num; + + if (!SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) + count += ARRAY_LEN(g_port_stats); + + return count; +} + +int sss_nic_eth_ss_priv_flags(struct sss_nic_dev *nic_dev) +{ + return ARRAY_LEN(g_priv_flags_strings); +} + +static void sss_nic_get_ethtool_stats_data(char *ethtool_stats, + struct sss_nic_stats *stats, u16 stats_len, u64 *data) +{ + u16 i = 0; + u16 j = 0; + char *ptr = NULL; + + for (j = 0; j < stats_len; j++) { + ptr = ethtool_stats + stats[j].offset; + data[i] = SSSNIC_CONVERT_DATA_TYPE(stats[j].len, ptr); + i++; + } +} + +u16 sss_nic_get_ethtool_dev_stats(struct sss_nic_dev *nic_dev, + u64 *data) +{ + u16 cnt = 0; +#ifdef HAVE_NDO_GET_STATS64 + struct rtnl_link_stats64 temp; + const struct rtnl_link_stats64 *net_stats = NULL; + + net_stats = dev_get_stats(nic_dev->netdev, &temp); +#else + const struct net_device_stats *net_stats = NULL; + + net_stats = dev_get_stats(nic_dev->netdev); +#endif + + sss_nic_get_ethtool_stats_data((char *)net_stats, g_netdev_stats, + ARRAY_LEN(g_netdev_stats), data); + cnt += ARRAY_LEN(g_netdev_stats); + + sss_nic_get_ethtool_stats_data((char *)&nic_dev->tx_stats, g_dev_stats, + ARRAY_LEN(g_dev_stats), data + cnt); + cnt += ARRAY_LEN(g_dev_stats); + + return cnt; +} + +void sss_nic_get_drv_queue_stats(struct sss_nic_dev *nic_dev, u64 *data) +{ + u16 qid; + struct sss_nic_rq_stats rq_stats = {0}; + struct sss_nic_sq_stats sq_stats = {0}; + + for (qid = 0; qid < nic_dev->qp_res.qp_num; qid++) { + if (!nic_dev->sq_desc_group) + break; + + sss_nic_get_sq_stats(&nic_dev->sq_desc_group[qid], &sq_stats); + sss_nic_get_ethtool_stats_data((char *)&sq_stats, g_nic_sq_stats, + ARRAY_LEN(g_nic_sq_stats), + data + qid * ARRAY_LEN(g_nic_sq_stats)); + } + + data += ARRAY_LEN(g_nic_sq_stats) * nic_dev->qp_res.qp_num; + + for (qid = 0; qid < nic_dev->qp_res.qp_num; qid++) { + if (!nic_dev->rq_desc_group) + break; + + sss_nic_get_rq_stats(&nic_dev->rq_desc_group[qid], &rq_stats); + sss_nic_get_ethtool_stats_data((char *)&rq_stats, g_nic_rq_stats, + ARRAY_LEN(g_nic_rq_stats), + data + qid * ARRAY_LEN(g_nic_rq_stats)); + } +} + +int sss_nic_get_ethtool_vport_stats(struct sss_nic_dev *nic_dev, + u64 *data) +{ + int ret; + struct sss_nic_port_stats vport_stats = {0}; + + ret = sss_nic_get_vport_stats(nic_dev, sss_get_global_func_id(nic_dev->hwdev), + &vport_stats); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to get function stats from fw, ret:%d\n", ret); + return ARRAY_LEN(g_function_stats); + } + sss_nic_get_ethtool_stats_data((char *)&vport_stats, g_function_stats, + ARRAY_LEN(g_function_stats), data); + + return ARRAY_LEN(g_function_stats); +} + +u16 sss_nic_get_ethtool_port_stats(struct sss_nic_dev *nic_dev, + u64 *data) +{ + int ret; + u16 i = 0; + struct sss_nic_mag_port_stats *stats = NULL; + + stats = kzalloc(sizeof(*stats), GFP_KERNEL); + if (!stats) { + memset(&data[i], 0, ARRAY_LEN(g_port_stats) * sizeof(*data)); + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to Malloc port stats\n"); + return ARRAY_LEN(g_port_stats); + } + + ret = sss_nic_get_phy_port_stats(nic_dev, stats); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to get port stats from fw\n"); + goto out; + } + + sss_nic_get_ethtool_stats_data((char *)stats, g_port_stats, + ARRAY_LEN(g_port_stats), data); + +out: + kfree(stats); + + return ARRAY_LEN(g_port_stats); +} + +u16 sss_nic_get_stats_strings(struct sss_nic_stats *stats, + u16 stats_len, char *buffer) +{ + u16 i; + + for (i = 0; i < stats_len; i++) { + memcpy(buffer, stats[i].name, ETH_GSTRING_LEN); + buffer += ETH_GSTRING_LEN; + } + + return i; +} + +u16 sss_nic_get_drv_dev_strings(struct sss_nic_dev *nic_dev, + char *buffer) +{ + u16 cnt = + sss_nic_get_stats_strings(g_netdev_stats, ARRAY_LEN(g_netdev_stats), buffer); + cnt += sss_nic_get_stats_strings(g_dev_stats, ARRAY_LEN(g_dev_stats), + buffer + cnt * ETH_GSTRING_LEN); + + return cnt; +} + +u16 sss_nic_get_hw_stats_strings(struct sss_nic_dev *nic_dev, + char *buffer) +{ + u16 cnt = sss_nic_get_stats_strings(g_function_stats, + ARRAY_LEN(g_function_stats), buffer); + + if (SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) + return cnt; + + cnt += sss_nic_get_stats_strings(g_port_stats, + ARRAY_LEN(g_port_stats), buffer + cnt * ETH_GSTRING_LEN); + + return cnt; +} + +int sss_nic_get_queue_stats_cnt(const struct sss_nic_dev *nic_dev, + struct sss_nic_stats *stats, u16 stats_len, u16 qid, char *buffer) +{ + int ret; + u16 i; + + for (i = 0; i < stats_len; i++) { + ret = sprintf(buffer, stats[i].name, qid); + if (ret < 0) + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to sprintf stats name:%s, qid: %u, stats id: %u\n", + stats[i].name, qid, i); + buffer += ETH_GSTRING_LEN; + } + + return i; +} + +u16 sss_nic_get_qp_stats_strings(const struct sss_nic_dev *nic_dev, + char *buffer) +{ + u16 qid = 0; + u16 cnt = 0; + + for (qid = 0; qid < nic_dev->qp_res.qp_num; qid++) + cnt += sss_nic_get_queue_stats_cnt(nic_dev, g_nic_sq_stats, + ARRAY_LEN(g_nic_sq_stats), qid, + buffer + cnt * ETH_GSTRING_LEN); + + for (qid = 0; qid < nic_dev->qp_res.qp_num; qid++) + cnt += sss_nic_get_queue_stats_cnt(nic_dev, g_nic_rq_stats, + ARRAY_LEN(g_nic_rq_stats), qid, + buffer + cnt * ETH_GSTRING_LEN); + + return cnt; +} + +void sss_nic_get_test_strings(struct sss_nic_dev *nic_dev, u8 *buffer) +{ + memcpy(buffer, *g_test_strings, sizeof(g_test_strings)); +} + +void sss_nic_get_drv_stats_strings(struct sss_nic_dev *nic_dev, + u8 *buffer) +{ + u16 offset = 0; + + offset = sss_nic_get_drv_dev_strings(nic_dev, buffer); + offset += sss_nic_get_hw_stats_strings(nic_dev, buffer + offset * ETH_GSTRING_LEN); + sss_nic_get_qp_stats_strings(nic_dev, buffer + offset * ETH_GSTRING_LEN); +} + +void sss_nic_get_priv_flags_strings(struct sss_nic_dev *nic_dev, + u8 *buffer) +{ + memcpy(buffer, g_priv_flags_strings, sizeof(g_priv_flags_strings)); +} + +int sss_nic_get_speed_level(u32 speed) +{ + int level; + + for (level = 0; level < ARRAY_LEN(g_hw_to_ethtool_speed); level++) { + if (g_hw_to_ethtool_speed[level] == speed) + break; + } + + return level; +} + +void sss_nic_add_ethtool_link_mode(struct sss_nic_cmd_link_settings *cmd, + u32 hw_mode, u32 op) +{ + u32 i; + + for (i = 0; i < SSSNIC_LINK_MODE_MAX_NUMBERS; i++) { + if (test_bit(i, (unsigned long *)&hw_mode)) + SSSNIC_ETHTOOL_ADD_SPPED_LINK_MODE(cmd, i, op); + } +} + +void sss_nic_set_link_speed(struct sss_nic_dev *nic_dev, + struct sss_nic_cmd_link_settings *cmd, + struct sss_nic_port_info *port_info) +{ + int ret; + u8 state = 0; + + if (port_info->supported_mode != SSSNIC_LINK_MODE_UNKNOWN) + sss_nic_add_ethtool_link_mode(cmd, + port_info->supported_mode, + SSSNIC_SET_SUPPORTED_MODE); + if (port_info->advertised_mode != SSSNIC_LINK_MODE_UNKNOWN) + sss_nic_add_ethtool_link_mode(cmd, + port_info->advertised_mode, + SSSNIC_SET_ADVERTISED_MODE); + + ret = sss_nic_get_hw_link_state(nic_dev, &state); + if ((ret != 0) || (state == 0)) { + cmd->duplex = DUPLEX_UNKNOWN; + cmd->speed = (u32)SPEED_UNKNOWN; + return; + } + + cmd->duplex = port_info->duplex; + cmd->speed = port_info->speed < ARRAY_LEN(g_hw_to_ethtool_speed) ? + g_hw_to_ethtool_speed[port_info->speed] : (u32)SPEED_UNKNOWN; +} + +static void sss_nic_set_fibre_port(struct sss_nic_cmd_link_settings *cmd) +{ + SSSNIC_ETHTOOL_ADD_SUPPORTED_LINK_MODE(cmd, FIBRE); + SSSNIC_ETHTOOL_ADD_ADVERTISED_LINK_MODE(cmd, FIBRE); + cmd->port = PORT_FIBRE; +} + +static void sss_nic_set_da_port(struct sss_nic_cmd_link_settings *cmd) +{ + SSSNIC_ETHTOOL_ADD_SUPPORTED_LINK_MODE(cmd, FIBRE); + SSSNIC_ETHTOOL_ADD_ADVERTISED_LINK_MODE(cmd, FIBRE); + cmd->port = PORT_DA; +} + +static void sss_nic_set_tp_port(struct sss_nic_cmd_link_settings *cmd) +{ + SSSNIC_ETHTOOL_ADD_SUPPORTED_LINK_MODE(cmd, TP); + SSSNIC_ETHTOOL_ADD_ADVERTISED_LINK_MODE(cmd, TP); + cmd->port = PORT_TP; +} + +static void sss_nic_set_none_port(struct sss_nic_cmd_link_settings *cmd) +{ + SSSNIC_ETHTOOL_ADD_SUPPORTED_LINK_MODE(cmd, Backplane); + SSSNIC_ETHTOOL_ADD_ADVERTISED_LINK_MODE(cmd, Backplane); + cmd->port = PORT_NONE; +} + +void sss_nic_link_port_type(struct sss_nic_cmd_link_settings *cmd, + u8 port_type) +{ + if (port_type >= ARRAY_LEN(g_link_port_set_handler)) { + cmd->port = PORT_OTHER; + return; + } + + if (!g_link_port_set_handler[port_type]) { + cmd->port = PORT_OTHER; + return; + } + + g_link_port_set_handler[port_type](cmd); +} + +int sss_nic_get_link_pause_setting(struct sss_nic_dev *nic_dev, + struct sss_nic_cmd_link_settings *cmd) +{ + int ret; + struct sss_nic_pause_cfg pause_config = {0}; + + ret = sss_nic_get_hw_pause_info(nic_dev, &pause_config); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to get pauseparam from hw\n"); + return ret; + } + + SSSNIC_ETHTOOL_ADD_SUPPORTED_LINK_MODE(cmd, Pause); + if ((pause_config.rx_pause != 0) && (pause_config.tx_pause != 0)) { + SSSNIC_ETHTOOL_ADD_ADVERTISED_LINK_MODE(cmd, Pause); + return 0; + } + + SSSNIC_ETHTOOL_ADD_ADVERTISED_LINK_MODE(cmd, Asym_Pause); + if (pause_config.rx_pause != 0) + SSSNIC_ETHTOOL_ADD_ADVERTISED_LINK_MODE(cmd, Pause); + + return 0; +} + +int sss_nic_get_link_setting(struct net_device *net_dev, + struct sss_nic_cmd_link_settings *cmd) +{ + int ret; + struct sss_nic_dev *nic_dev = netdev_priv(net_dev); + struct sss_nic_port_info info = {0}; + + ret = sss_nic_get_hw_port_info(nic_dev, &info, SSS_CHANNEL_NIC); + if (ret != 0) { + nicif_err(nic_dev, drv, net_dev, "Fail to get port info\n"); + return ret; + } + + sss_nic_set_link_speed(nic_dev, cmd, &info); + sss_nic_link_port_type(cmd, info.port_type); + + cmd->autoneg = info.autoneg_state == SSSNIC_PORT_CFG_AN_ON ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + if (info.autoneg_cap != 0) + SSSNIC_ETHTOOL_ADD_SUPPORTED_LINK_MODE(cmd, Autoneg); + if (info.autoneg_state == SSSNIC_PORT_CFG_AN_ON) + SSSNIC_ETHTOOL_ADD_ADVERTISED_LINK_MODE(cmd, Autoneg); + + if (!SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) + ret = sss_nic_get_link_pause_setting(nic_dev, cmd); + + return ret; +} + +#ifdef ETHTOOL_GLINKSETTINGS +#ifndef XENSERVER_HAVE_NEW_ETHTOOL_OPS +void sss_nic_copy_ksetting(struct ethtool_link_ksettings *ksetting, + struct sss_nic_cmd_link_settings *cmd) +{ + struct ethtool_link_settings *setting = &ksetting->base; + + bitmap_copy(ksetting->link_modes.advertising, cmd->advertising, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_copy(ksetting->link_modes.supported, cmd->supported, + __ETHTOOL_LINK_MODE_MASK_NBITS); + + setting->speed = cmd->speed; + setting->duplex = cmd->duplex; + setting->port = cmd->port; + setting->autoneg = cmd->autoneg; +} +#endif +#endif + +bool sss_nic_is_support_speed(u32 support_mode, u32 speed) +{ + u32 link_mode; + + for (link_mode = 0; link_mode < SSSNIC_LINK_MODE_MAX_NUMBERS; link_mode++) { + if ((support_mode & BIT(link_mode)) == 0) + continue; + + if (g_link_mode_table[link_mode].speed == speed) + return true; + } + + return false; +} + +int sss_nic_get_link_settings_param(struct sss_nic_dev *nic_dev, + u8 autoneg, u32 speed, u32 *settings) +{ + struct sss_nic_port_info info = {0}; + int ret; + int level; + + ret = sss_nic_get_hw_port_info(nic_dev, &info, SSS_CHANNEL_NIC); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to get port info\n"); + return -EAGAIN; + } + + if (info.autoneg_cap != 0) + *settings |= SSSNIC_LINK_SET_AUTONEG; + + if (autoneg == AUTONEG_ENABLE) { + if (info.autoneg_cap == 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupport autoneg\n"); + return -EOPNOTSUPP; + } + + return 0; + } + + if (speed != (u32)SPEED_UNKNOWN) { + if ((info.supported_mode == SSSNIC_LINK_MODE_UNKNOWN) || + (info.advertised_mode == SSSNIC_LINK_MODE_UNKNOWN)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupport link mode\n"); + return -EAGAIN; + } + + /* Set speed only when autoneg is disable */ + level = sss_nic_get_speed_level(speed); + if ((level >= SSSNIC_PORT_SPEED_UNKNOWN) || + (!sss_nic_is_support_speed(info.supported_mode, speed))) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupport speed: %u\n", speed); + return -EINVAL; + } + + *settings |= SSSNIC_LINK_SET_SPEED; + return 0; + } + + nicif_err(nic_dev, drv, nic_dev->netdev, "Set speed when autoneg is off\n"); + return -EOPNOTSUPP; +} + +int sss_nic_set_settings_to_hw(struct sss_nic_dev *nic_dev, + u8 autoneg, u32 speed, u32 settings) +{ + int ret; + int level = 0; + char cmd_str[128] = {0}; + struct sss_nic_link_ksettings cmd = {0}; + struct net_device *netdev = nic_dev->netdev; + char *str = (bool)((settings & SSSNIC_LINK_SET_AUTONEG) != 0) ? + SSSNIC_AUTONEG_STRING((bool)autoneg) : ""; + + ret = snprintf(cmd_str, sizeof(cmd_str) - 1, "%s", str); + if (ret < 0) + return -EINVAL; + + if ((settings & SSSNIC_LINK_SET_SPEED) != 0) { + level = sss_nic_get_speed_level(speed); + ret = sprintf(cmd_str + strlen(cmd_str), "speed %u ", speed); + if (ret < 0) + return -EINVAL; + } + + cmd.valid_bitmap = settings; + cmd.autoneg = SSSNIC_AUTONEG_ENABLE((bool)autoneg); + cmd.speed = (u8)level; + + ret = sss_nic_set_link_settings(nic_dev, &cmd); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to set %s\n", cmd_str); + return ret; + } + + nicif_info(nic_dev, drv, netdev, "Success to set %s, ret: %d\n", cmd_str, ret); + return 0; +} + +int sssnic_set_link_settings(struct net_device *netdev, + u8 autoneg, u32 speed) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + u32 settings = 0; + int ret; + + ret = sss_nic_get_link_settings_param(nic_dev, autoneg, speed, &settings); + if (ret != 0) + return ret; + + if (settings != 0) + return sss_nic_set_settings_to_hw(nic_dev, autoneg, speed, settings); + + nicif_info(nic_dev, drv, netdev, "Nothing change, exit.\n"); + + return 0; +} + +void sss_nic_get_io_stats(const struct sss_nic_dev *nic_dev, void *stats) +{ + struct sss_tool_show_item *items = stats; + int item_idx = 0; + u16 qid; + + SSSNIC_DEV_STATS_PACK(items, item_idx, g_dev_stats, &nic_dev->tx_stats); + SSSNIC_DEV_STATS_PACK(items, item_idx, g_dev_stats_extern, + &nic_dev->tx_stats); + + for (qid = 0; qid < nic_dev->max_qp_num; qid++) { + SSSNIC_QUEUE_STATS_PACK(items, item_idx, g_nic_sq_stats, + &nic_dev->sq_desc_group[qid].stats, qid); + SSSNIC_QUEUE_STATS_PACK(items, item_idx, g_nic_sq_stats_extern, + &nic_dev->sq_desc_group[qid].stats, qid); + } + + for (qid = 0; qid < nic_dev->max_qp_num; qid++) { + SSSNIC_QUEUE_STATS_PACK(items, item_idx, g_nic_rq_stats, + &nic_dev->rq_desc_group[qid].stats, qid); + SSSNIC_QUEUE_STATS_PACK(items, item_idx, g_nic_rq_stats_extern, + &nic_dev->rq_desc_group[qid].stats, qid); + } +} + diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats_api.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats_api.h new file mode 100644 index 0000000000000000000000000000000000000000..cf2b1cbe894a1af56ed947b8a3e327c466ba4f3b --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ethtool_stats_api.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_ETHTOOL_STATS_API_H +#define SSS_NIC_ETHTOOL_STATS_API_H + +#include +#include + +#include "sss_kernel.h" + +struct sss_nic_stats { + char name[ETH_GSTRING_LEN]; + u32 len; + int offset; +}; + +struct sss_nic_cmd_link_settings { + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); + + u32 speed; + u8 duplex; + u8 port; + u8 autoneg; +}; + +#define sss_nic_ethtool_ksetting_clear(ptr, name) \ + ethtool_link_ksettings_zero_link_mode(ptr, name) + +int sss_nic_eth_ss_test(struct sss_nic_dev *nic_dev); + +int sss_nic_eth_ss_stats(struct sss_nic_dev *nic_dev); + +int sss_nic_eth_ss_priv_flags(struct sss_nic_dev *nic_dev); + +u16 sss_nic_get_ethtool_dev_stats(struct sss_nic_dev *nic_dev, + u64 *data); + +void sss_nic_get_drv_queue_stats(struct sss_nic_dev *nic_dev, + u64 *data); + +int sss_nic_get_ethtool_vport_stats(struct sss_nic_dev *nic_dev, + u64 *data); + +u16 sss_nic_get_ethtool_port_stats(struct sss_nic_dev *nic_dev, + u64 *data); + +u16 sss_nic_get_stats_strings(struct sss_nic_stats *stats, + u16 stats_len, char *buffer); + +u16 sss_nic_get_drv_dev_strings(struct sss_nic_dev *nic_dev, + char *buffer); + +u16 sss_nic_get_hw_stats_strings(struct sss_nic_dev *nic_dev, + char *buffer); + +int sss_nic_get_queue_stats_cnt(const struct sss_nic_dev *nic_dev, + struct sss_nic_stats *stats, u16 stats_len, u16 qid, char *buffer); + +u16 sss_nic_get_qp_stats_strings(const struct sss_nic_dev *nic_dev, + char *buffer); + +void sss_nic_get_test_strings(struct sss_nic_dev *nic_dev, u8 *buffer); + +void sss_nic_get_drv_stats_strings(struct sss_nic_dev *nic_dev, + u8 *buffer); + +void sss_nic_get_priv_flags_strings(struct sss_nic_dev *nic_dev, + u8 *buffer); + +int sss_nic_get_speed_level(u32 speed); + +void sss_nic_add_ethtool_link_mode(struct sss_nic_cmd_link_settings *cmd, u32 hw_mode, u32 op); + +void sss_nic_set_link_speed(struct sss_nic_dev *nic_dev, + struct sss_nic_cmd_link_settings *cmd, + struct sss_nic_port_info *port_info); + +void sss_nic_link_port_type(struct sss_nic_cmd_link_settings *cmd, + u8 port_type); + +int sss_nic_get_link_pause_setting(struct sss_nic_dev *nic_dev, + struct sss_nic_cmd_link_settings *cmd); + +int sss_nic_get_link_setting(struct net_device *net_dev, + struct sss_nic_cmd_link_settings *cmd); + +#ifdef ETHTOOL_GLINKSETTINGS +#ifndef XENSERVER_HAVE_NEW_ETHTOOL_OPS +void sss_nic_copy_ksetting(struct ethtool_link_ksettings *ksetting, + struct sss_nic_cmd_link_settings *cmd); +#endif +#endif + +bool sss_nic_is_support_speed(u32 support_mode, u32 speed); + +int sss_nic_get_link_settings_param(struct sss_nic_dev *nic_dev, + u8 autoneg, u32 speed, u32 *settings); + +int sss_nic_set_settings_to_hw(struct sss_nic_dev *nic_dev, + u8 autoneg, u32 speed, u32 settings); + +int sssnic_set_link_settings(struct net_device *netdev, + u8 autoneg, u32 speed); + +void sss_nic_get_io_stats(const struct sss_nic_dev *nic_dev, void *stats); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_event.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_event.c new file mode 100644 index 0000000000000000000000000000000000000000..9ea8113edf049c29e3fea176ea66f84d973d9199 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_event.c @@ -0,0 +1,562 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_io.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_rss_cfg.h" +#include "sss_nic_io_define.h" +#include "sss_nic_cfg_define.h" +#include "sss_nic_event.h" + +#define SSSNIC_VF_UNREGISTER 0 + +static void sss_nic_dcb_state_event_handler(struct sss_nic_io *nic_io, + void *in_buf, u16 in_size, + void *out_buf, u16 *out_size); +static void sss_nic_tx_pause_event_handler(struct sss_nic_io *nic_io, + void *in_buf, u16 in_size, + void *out_buf, u16 *out_size); +static void sss_nic_bond_active_event_handler(struct sss_nic_io *nic_io, + void *in_buf, u16 in_size, + void *out_buf, u16 *out_size); +static int sss_nic_register_vf_msg_handler(struct sss_nic_io *nic_io, + u16 vf_id, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size); +static int sss_nic_get_vf_cos_msg_handler(struct sss_nic_io *nic_io, + u16 vf_id, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size); +static int sss_nic_get_vf_mac_msg_handler(struct sss_nic_io *nic_io, + u16 vf_id, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size); +static int sss_nic_set_vf_mac_msg_handler(struct sss_nic_io *nic_io, + u16 vf_id, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size); +static int sss_nic_del_vf_mac_msg_handler(struct sss_nic_io *nic_io, + u16 vf_id, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size); +static int sss_nic_update_vf_mac_msg_handler(struct sss_nic_io *nic_io, + u16 vf_id, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size); + +static const struct nic_event_handler g_event_proc[] = { + { + .opcode = SSSNIC_MBX_OPCODE_GET_VF_COS, + .event_handler = sss_nic_dcb_state_event_handler, + }, + + { + .opcode = SSSNIC_MBX_OPCODE_TX_PAUSE_EXCP_NOTICE, + .event_handler = sss_nic_tx_pause_event_handler, + }, + + { + .opcode = SSSNIC_MBX_OPCODE_BOND_ACTIVE_NOTICE, + .event_handler = sss_nic_bond_active_event_handler, + }, +}; + +static const struct sss_nic_vf_msg_handler g_vf_cmd_proc[] = { + { + .opcode = SSSNIC_MBX_OPCODE_VF_REGISTER, + .msg_handler = sss_nic_register_vf_msg_handler, + }, + + { + .opcode = SSSNIC_MBX_OPCODE_GET_VF_COS, + .msg_handler = sss_nic_get_vf_cos_msg_handler + }, + + { + .opcode = SSSNIC_MBX_OPCODE_GET_MAC, + .msg_handler = sss_nic_get_vf_mac_msg_handler, + }, + + { + .opcode = SSSNIC_MBX_OPCODE_SET_MAC, + .msg_handler = sss_nic_set_vf_mac_msg_handler, + }, + + { + .opcode = SSSNIC_MBX_OPCODE_DEL_MAC, + .msg_handler = sss_nic_del_vf_mac_msg_handler, + }, + + { + .opcode = SSSNIC_MBX_OPCODE_UPDATE_MAC, + .msg_handler = sss_nic_update_vf_mac_msg_handler, + }, +}; + +static const struct nic_event_handler *sss_nic_get_event_proc(u16 opcode) +{ + u16 i; + u16 cmd_num = ARRAY_LEN(g_event_proc); + + for (i = 0; i < cmd_num; i++) + if (g_event_proc[i].opcode == opcode) + return &g_event_proc[i]; + + return NULL; +} + +static const struct sss_nic_vf_msg_handler *sss_nic_get_vf_cmd_proc(u16 opcode) +{ + u16 i; + u16 cmd_num = ARRAY_LEN(g_vf_cmd_proc); + + for (i = 0; i < cmd_num; i++) + if (g_vf_cmd_proc[i].opcode == opcode) + return &g_vf_cmd_proc[i]; + + return NULL; +} + +static int sss_nic_init_vf_config(struct sss_nic_io *nic_io, u16 vf_id) +{ + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + struct sss_nic_vf_info *vf_info = &nic_io->vf_info_group[id]; + u16 func_id; + int ret; + + vf_info->specified_mac = false; + ether_addr_copy(vf_info->drv_mac, vf_info->user_mac); + + if (!is_zero_ether_addr(vf_info->drv_mac)) { + vf_info->specified_mac = true; + func_id = sss_get_glb_pf_vf_offset(nic_io->hwdev) + vf_id; + ret = sss_nic_set_mac(nic_io->nic_dev, vf_info->drv_mac, + vf_info->pf_vlan, func_id, SSS_CHANNEL_NIC); + if (ret != 0) { + nic_err(nic_io->dev_hdl, "Fail to set VF %d MAC, ret: %d\n", id, ret); + return ret; + } + } + + if (SSSNIC_GET_VLAN_PRIO(vf_info->pf_vlan, vf_info->pf_qos) != 0) { + ret = sss_nic_set_vf_vlan(nic_io, SSSNIC_MBX_OPCODE_ADD, + vf_info->pf_vlan, vf_info->pf_qos, vf_id); + if (ret != 0) { + nic_err(nic_io->dev_hdl, "Fail to add VF %d VLAN_QOS, ret: %d\n", id, ret); + return ret; + } + } + + if (vf_info->max_rate != 0) { + ret = sss_nic_set_vf_tx_rate_limit(nic_io, vf_id, + vf_info->min_rate, vf_info->max_rate); + if (ret != 0) { + nic_err(nic_io->dev_hdl, + "Fail to set VF %d max rate %u, min rate %u, ret: %d\n", + id, vf_info->max_rate, vf_info->min_rate, ret); + return ret; + } + } + + return 0; +} + +static int sss_nic_attach_vf(struct sss_nic_io *nic_io, u16 vf_id, u32 extra_feature) +{ + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + struct sss_nic_vf_info *vf_info = &nic_io->vf_info_group[id]; + int ret; + + vf_info->extra_feature = extra_feature; + + if (vf_id > nic_io->max_vf_num) { + nic_err(nic_io->dev_hdl, "Fail to register VF id %d out of range: [0-%d]\n", + SSSNIC_HW_VF_ID_TO_OS(vf_id), SSSNIC_HW_VF_ID_TO_OS(nic_io->max_vf_num)); + return -EFAULT; + } + + ret = sss_nic_init_vf_config(nic_io, vf_id); + if (ret != 0) + return ret; + + vf_info->attach = true; + + return 0; +} + +int sss_nic_dettach_vf(struct sss_nic_io *nic_io, u16 vf_id) +{ + struct sss_nic_mbx_mac_addr cmd_set_mac = {0}; + struct sss_nic_vf_info *vf_info = &nic_io->vf_info_group[SSSNIC_HW_VF_ID_TO_OS(vf_id)]; + u16 out_len; + int ret; + + vf_info->extra_feature = 0; + + if (vf_id > nic_io->max_vf_num) { + nic_err(nic_io->dev_hdl, "Invalid vf_id %d, max_vf_num: %d\n", + vf_id, nic_io->max_vf_num); + return -EFAULT; + } + + vf_info->attach = false; + + if ((!vf_info->specified_mac) && (vf_info->pf_vlan == 0)) { + memset(vf_info->drv_mac, 0, ETH_ALEN); + return 0; + } + + out_len = sizeof(cmd_set_mac); + ether_addr_copy(cmd_set_mac.mac, vf_info->drv_mac); + cmd_set_mac.vlan_id = vf_info->pf_vlan; + cmd_set_mac.func_id = sss_get_glb_pf_vf_offset(nic_io->hwdev) + (u16)vf_id; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, SSSNIC_MBX_OPCODE_DEL_MAC, + &cmd_set_mac, sizeof(cmd_set_mac), + &cmd_set_mac, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_set_mac)) { + nic_err(nic_io->dev_hdl, + "Fail to delete the mac of VF %d, ret: %d, status: 0x%x, out_len: 0x%x\n", + SSSNIC_HW_VF_ID_TO_OS(vf_id), ret, + cmd_set_mac.head.state, out_len); + return -EFAULT; + } + + memset(vf_info->drv_mac, 0, ETH_ALEN); + + return 0; +} + +static int sss_nic_register_vf_msg_handler(struct sss_nic_io *nic_io, + u16 vf_id, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + int ret; + struct sss_nic_mbx_attach_vf *in_info = in_buf; + struct sss_nic_mbx_attach_vf *out_info = out_buf; + + if (in_info->op_register == SSSNIC_VF_UNREGISTER) + ret = sss_nic_dettach_vf(nic_io, vf_id); + else + ret = sss_nic_attach_vf(nic_io, vf_id, in_info->extra_feature); + + *out_size = sizeof(*out_info); + if (ret != 0) + out_info->head.state = EFAULT; + + return 0; +} + +static int sss_nic_get_vf_cos_msg_handler(struct sss_nic_io *nic_io, u16 vf_id, + void *in_buf, u16 in_size, void *out_buf, + u16 *out_size) +{ + struct sss_nic_mbx_vf_dcb_cfg *out_state = out_buf; + + *out_size = sizeof(*out_state); + out_state->head.state = SSS_MGMT_CMD_SUCCESS; + memcpy(&out_state->dcb_info, &nic_io->dcb_info, sizeof(nic_io->dcb_info)); + + return 0; +} + +static int sss_nic_get_vf_mac_msg_handler(struct sss_nic_io *nic_io, u16 vf_id, + void *in_buf, u16 in_size, void *out_buf, u16 *out_size) +{ + struct sss_nic_vf_info *vf_info = &nic_io->vf_info_group[SSSNIC_HW_VF_ID_TO_OS(vf_id)]; + struct sss_nic_mbx_mac_addr *out_info = out_buf; + int ret; + + if (SSSNIC_SUPPORT_VF_MAC(nic_io)) { + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, SSSNIC_MBX_OPCODE_GET_MAC, + in_buf, in_size, out_buf, out_size); + if (ret == 0) { + if (is_zero_ether_addr(out_info->mac)) + ether_addr_copy(out_info->mac, vf_info->drv_mac); + } + return ret; + } + + *out_size = sizeof(*out_info); + ether_addr_copy(out_info->mac, vf_info->drv_mac); + out_info->head.state = SSS_MGMT_CMD_SUCCESS; + + return 0; +} + +static int sss_nic_cmd_vf_mac(struct sss_nic_io *nic_io, struct sss_nic_vf_info *vf_info, + u16 cmd, void *in_buf, u16 in_size, void *out_buf, u16 *out_size) +{ + struct sss_nic_mbx_mac_addr *in_mac = in_buf; + struct sss_nic_mbx_mac_addr *out_mac = out_buf; + int ret; + + if (!vf_info->trust && vf_info->specified_mac && is_valid_ether_addr(in_mac->mac)) { + out_mac->head.state = SSSNIC_PF_SET_VF_ALREADY; + *out_size = sizeof(*out_mac); + nic_warn(nic_io->dev_hdl, + "PF has already set VF MAC address,and vf trust is off.\n"); + return 0; + } + if (is_valid_ether_addr(in_mac->mac)) + in_mac->vlan_id = vf_info->pf_vlan; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, cmd, in_buf, in_size, + out_buf, out_size); + if (ret != 0 || *out_size == 0) { + nic_warn(nic_io->dev_hdl, + "Fail to send vf mac, ret: %d,status: 0x%x, out size: 0x%x\n", + ret, out_mac->head.state, *out_size); + return -EFAULT; + } + + return 0; +} + +static int sss_nic_set_vf_mac_msg_handler(struct sss_nic_io *nic_io, + u16 vf_id, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + struct sss_nic_vf_info *vf_info = &nic_io->vf_info_group[id]; + struct sss_nic_mbx_mac_addr *in_mac = in_buf; + struct sss_nic_mbx_mac_addr *out_mac = out_buf; + int ret; + + ret = sss_nic_cmd_vf_mac(nic_io, vf_info, SSSNIC_MBX_OPCODE_SET_MAC, + in_buf, in_size, out_buf, out_size); + if (ret != 0) + return ret; + + if (is_valid_ether_addr(in_mac->mac) && + out_mac->head.state == SSS_MGMT_CMD_SUCCESS) + ether_addr_copy(vf_info->drv_mac, in_mac->mac); + + return 0; +} + +static int sss_nic_del_vf_mac_msg_handler(struct sss_nic_io *nic_io, + u16 vf_id, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + struct sss_nic_vf_info *vf_info = &nic_io->vf_info_group[id]; + struct sss_nic_mbx_mac_addr *in_mac = in_buf; + struct sss_nic_mbx_mac_addr *out_mac = out_buf; + int ret; + + ret = sss_nic_cmd_vf_mac(nic_io, vf_info, SSSNIC_MBX_OPCODE_DEL_MAC, + in_buf, in_size, out_buf, out_size); + if (ret != 0) + return ret; + + if (is_valid_ether_addr(in_mac->mac) && + out_mac->head.state == SSS_MGMT_CMD_SUCCESS) + eth_zero_addr(vf_info->drv_mac); + + return 0; +} + +static int sss_nic_update_vf_mac_msg_handler(struct sss_nic_io *nic_io, + u16 vf_id, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + struct sss_nic_vf_info *vf_info = &nic_io->vf_info_group[id]; + struct sss_nic_mbx_mac_update *in_mac = in_buf; + struct sss_nic_mbx_mac_update *out_mac = out_buf; + int ret; + + if (!is_valid_ether_addr(in_mac->old_mac.mac)) { + nic_err(nic_io->dev_hdl, "Fail to update mac, Invalid mac.\n"); + return -EINVAL; + } + + if (!vf_info->trust && vf_info->specified_mac) { + out_mac->old_mac.head.state = SSSNIC_PF_SET_VF_ALREADY; + *out_size = sizeof(*out_mac); + nic_warn(nic_io->dev_hdl, + "PF has already set VF MAC address,and vf trust is off.\n"); + return 0; + } + + in_mac->old_mac.vlan_id = vf_info->pf_vlan; + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, + SSSNIC_MBX_OPCODE_UPDATE_MAC, in_buf, in_size, + out_buf, out_size); + if (ret != 0 || *out_size == 0) { + nic_warn(nic_io->dev_hdl, + "Fail to update vf mac, ret: %d,status: 0x%x, out size: 0x%x\n", + ret, out_mac->old_mac.head.state, *out_size); + return -EFAULT; + } + + if (out_mac->old_mac.head.state == SSS_MGMT_CMD_SUCCESS) + ether_addr_copy(vf_info->drv_mac, in_mac->new_mac); + + return 0; +} + +static int _sss_nic_l2nic_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *in_buf, + u16 in_size, void *out_buf, + u16 *out_size, u16 channel) +{ + if (sss_get_func_type(hwdev) == SSS_FUNC_TYPE_VF) + if (sss_nic_get_vf_cmd_proc(cmd)) + return sss_mbx_send_to_pf(hwdev, SSS_MOD_TYPE_L2NIC, cmd, in_buf, + in_size, out_buf, out_size, 0, channel); + + return sss_sync_mbx_send_msg(hwdev, SSS_MOD_TYPE_L2NIC, cmd, in_buf, + in_size, out_buf, out_size, 0, channel); +} + +int sss_nic_l2nic_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + return _sss_nic_l2nic_msg_to_mgmt_sync(hwdev, cmd, in_buf, in_size, out_buf, + out_size, SSS_CHANNEL_NIC); +} + +int sss_nic_l2nic_msg_to_mgmt_sync_ch(void *hwdev, u16 cmd, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size, u16 channel) +{ + return _sss_nic_l2nic_msg_to_mgmt_sync(hwdev, cmd, in_buf, in_size, out_buf, + out_size, channel); +} + +/* pf/ppf handler mbx msg from vf */ +int sss_nic_pf_mbx_handler(void *hwdev, u16 vf_id, u16 cmd, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + struct sss_nic_io *nic_io = NULL; + const struct sss_nic_vf_msg_handler *handler = NULL; + + if (!hwdev) + return -EFAULT; + + nic_io = sss_get_service_adapter(hwdev, SSS_SERVICE_TYPE_NIC); + if (!nic_io) + return -EINVAL; + + handler = sss_nic_get_vf_cmd_proc(cmd); + if (handler) + return handler->msg_handler(nic_io, vf_id, in_buf, in_size, out_buf, out_size); + + nic_warn(nic_io->dev_hdl, "NO handler for nic cmd(%u) received from vf id: %u\n", + cmd, vf_id); + + return -EINVAL; +} + +void sss_nic_notify_dcb_state_event(void *hwdev, + struct sss_nic_dcb_info *dcb_info) +{ + struct sss_event_info event_info = {0}; + + event_info.type = SSSNIC_EVENT_DCB_STATE_CHANGE; + event_info.service = SSS_EVENT_SRV_NIC; + memcpy((void *)event_info.event_data, dcb_info, sizeof(*dcb_info)); + + sss_do_event_callback(hwdev, &event_info); +} + +static void sss_nic_dcb_state_event_handler(struct sss_nic_io *nic_io, + void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + struct sss_nic_mbx_vf_dcb_cfg *dcb_cfg = in_buf; + + if (!dcb_cfg) + return; + + memcpy(&nic_io->dcb_info, &dcb_cfg->dcb_info, sizeof(dcb_cfg->dcb_info)); + sss_nic_notify_dcb_state_event(nic_io->hwdev, &dcb_cfg->dcb_info); +} + +static void sss_nic_tx_pause_event_handler(struct sss_nic_io *nic_io, + void *in_buf, u16 in_size, void *out_buf, u16 *out_size) +{ + struct sss_nic_msg_tx_pause_info *in_pause = in_buf; + + if (in_size != sizeof(*in_pause)) { + nic_err(nic_io->dev_hdl, "Invalid in buffer size value: %u,It should be %ld\n", + in_size, sizeof(*in_pause)); + return; + } + + nic_warn(nic_io->dev_hdl, "Receive tx pause exception event, excp: %u, level: %u\n", + in_pause->tx_pause_except, in_pause->except_level); + sss_fault_event_report(nic_io->hwdev, SSS_FAULT_SRC_TX_PAUSE_EXCP, + (u16)in_pause->except_level); +} + +static void sss_nic_bond_active_event_handler(struct sss_nic_io *nic_io, + void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + struct sss_event_info in_info = {0}; + struct sss_nic_msg_bond_active_info *bond_info = in_buf; + + if (in_size != sizeof(*bond_info)) { + nic_err(nic_io->dev_hdl, "Invalid in_size: %u, should be %ld\n", + in_size, sizeof(*bond_info)); + return; + } + + memcpy((void *)in_info.event_data, bond_info, sizeof(*bond_info)); + in_info.type = SSSNIC_MBX_OPCODE_BOND_ACTIVE_NOTICE; + in_info.service = SSS_EVENT_SRV_NIC; + sss_do_event_callback(nic_io->hwdev, &in_info); +} + +static int _sss_nic_event_handler(void *hwdev, u16 cmd, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + struct sss_nic_io *nic_io = NULL; + const struct nic_event_handler *handler = NULL; + + if (!hwdev) + return -EINVAL; + + nic_io = sss_get_service_adapter(hwdev, SSS_SERVICE_TYPE_NIC); + if (!nic_io) + return -EINVAL; + + *out_size = 0; + + handler = sss_nic_get_event_proc(cmd); + if (handler) { + handler->event_handler(nic_io, in_buf, in_size, out_buf, out_size); + return 0; + } + + ((struct sss_mgmt_msg_head *)out_buf)->state = SSS_MGMT_CMD_UNSUPPORTED; + *out_size = sizeof(struct sss_mgmt_msg_head); + nic_warn(nic_io->dev_hdl, "Unsupport nic event, cmd: %u\n", cmd); + + return 0; +} + +int sss_nic_vf_event_handler(void *hwdev, + u16 cmd, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + return _sss_nic_event_handler(hwdev, cmd, in_buf, in_size, out_buf, out_size); +} + +void sss_nic_pf_event_handler(void *hwdev, u16 cmd, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + _sss_nic_event_handler(hwdev, cmd, in_buf, in_size, out_buf, out_size); +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_event.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_event.h new file mode 100644 index 0000000000000000000000000000000000000000..2b7fb586426ad1f04e49e063ebc6205559df3da7 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_event.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_EVENT_H +#define SSS_NIC_EVENT_H + +#include +#include + +#include "sss_hw_common.h" +#include "sss_nic_io.h" +#include "sss_nic_cfg.h" +#include "sss_nic_cfg_mag_define.h" + +enum sss_nic_event_type { + SSSNIC_EVENT_LINK_DOWN, + SSSNIC_EVENT_LINK_UP, + SSSNIC_EVENT_PORT_MODULE_EVENT, + SSSNIC_EVENT_DCB_STATE_CHANGE, + SSSNIC_EVENT_MAX +}; + +struct sss_nic_vf_msg_handler { + u16 opcode; + int (*msg_handler)(struct sss_nic_io *nic_io, + u16 vf, void *buf_in, u16 in_size, void *buf_out, u16 *out_size); +}; + +struct nic_event_handler { + u16 opcode; + void (*event_handler)(struct sss_nic_io *nic_io, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); +}; + +int sss_nic_dettach_vf(struct sss_nic_io *nic_io, u16 vf_id); + +int sss_nic_l2nic_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +int sss_nic_l2nic_msg_to_mgmt_sync_ch(void *hwdev, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u16 channel); + +int sss_nic_pf_mbx_handler(void *hwdev, + u16 vf_id, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +void sss_nic_notify_dcb_state_event(void *hwdev, + struct sss_nic_dcb_info *dcb_info); + +int sss_nic_vf_event_handler(void *hwdev, + u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +void sss_nic_pf_event_handler(void *hwdev, u16 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +#endif + diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_filter.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_filter.c new file mode 100644 index 0000000000000000000000000000000000000000..272f74f3e4d23f249d63ea678119c3c319e52f56 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_filter.c @@ -0,0 +1,496 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_cfg.h" + +enum sss_nic_rx_mode_state { + SSSNIC_PROMISC_ON, + SSSNIC_ALLMULTI_ON, + SSSNIC_PROMISC_FORCE_ON, + SSSNIC_ALLMULTI_FORCE_ON, +}; + +enum sss_nic_mac_filter_state { + SSSNIC_MAC_FILTER_WAIT_SYNC, + SSSNIC_MAC_FILTER_SYNCED, + SSSNIC_MAC_FILTER_WAIT_UNSYNC, + SSSNIC_MAC_FILTER_UNSYNCED, +}; + +struct sss_nic_mac_filter { + struct list_head list; + u8 address[ETH_ALEN]; + unsigned long status; +}; + +#define SSSNIC_DEFAULT_RX_MODE (SSSNIC_RX_MODE_UC | SSSNIC_RX_MODE_MC | SSSNIC_RX_MODE_BC) + +static bool mc_mac_filter = true; +module_param(mc_mac_filter, bool, 0444); +MODULE_PARM_DESC(mc_mac_filter, "Set multicast mac filter: 0 - disable, 1 - enable (default=1)"); + +static int sss_nic_sync_uc(struct net_device *netdev, u8 *address) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + return sss_nic_set_mac(nic_dev, address, 0, + sss_get_global_func_id(nic_dev->hwdev), SSS_CHANNEL_NIC); +} + +static int sss_nic_unsync_uc(struct net_device *netdev, u8 *address) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + /* The addr is in use */ + if (ether_addr_equal(address, netdev->dev_addr)) + return 0; + + return sss_nic_del_mac(nic_dev, address, 0, + sss_get_global_func_id(nic_dev->hwdev), SSS_CHANNEL_NIC); +} + +void sss_nic_clean_mac_list_filter(struct sss_nic_dev *nic_dev) +{ + struct sss_nic_mac_filter *filter = NULL; + struct sss_nic_mac_filter *tmp_filter = NULL; + struct net_device *netdev = nic_dev->netdev; + + list_for_each_entry_safe(filter, tmp_filter, &nic_dev->uc_filter_list, list) { + if (filter->status == SSSNIC_MAC_FILTER_SYNCED) + sss_nic_unsync_uc(netdev, filter->address); + list_del(&filter->list); + kfree(filter); + } + + list_for_each_entry_safe(filter, tmp_filter, &nic_dev->mc_filter_list, list) { + if (filter->status == SSSNIC_MAC_FILTER_SYNCED) + sss_nic_unsync_uc(netdev, filter->address); + list_del(&filter->list); + kfree(filter); + } +} + +static struct sss_nic_mac_filter *sss_nic_find_mac(const struct list_head *filter_list, + u8 *address) +{ + struct sss_nic_mac_filter *filter = NULL; + + list_for_each_entry(filter, filter_list, list) { + if (ether_addr_equal(address, filter->address)) + return filter; + } + return NULL; +} + +static struct sss_nic_mac_filter *sss_nic_add_filter(struct sss_nic_dev *nic_dev, + struct list_head *mac_filter_list, + u8 *address) +{ + struct sss_nic_mac_filter *filter; + + filter = kzalloc(sizeof(*filter), GFP_ATOMIC); + if (!filter) + goto out; + + ether_addr_copy(filter->address, address); + + INIT_LIST_HEAD(&filter->list); + list_add_tail(&filter->list, mac_filter_list); + + filter->status = SSSNIC_MAC_FILTER_WAIT_SYNC; + set_bit(SSSNIC_MAC_FILTER_CHANGED, &nic_dev->flags); + +out: + return filter; +} + +static void sss_nic_del_filter(struct sss_nic_dev *nic_dev, + struct sss_nic_mac_filter *filter) +{ + set_bit(SSSNIC_MAC_FILTER_CHANGED, &nic_dev->flags); + + if (filter->status == SSSNIC_MAC_FILTER_WAIT_SYNC) { + /* have not added to hw, delete it directly */ + list_del(&filter->list); + kfree(filter); + return; + } + + filter->status = SSSNIC_MAC_FILTER_WAIT_UNSYNC; +} + +static struct sss_nic_mac_filter *sss_nic_copy_mac_filter_entry(const struct sss_nic_mac_filter *ft) +{ + struct sss_nic_mac_filter *filter; + + filter = kzalloc(sizeof(*filter), GFP_ATOMIC); + if (!filter) + return NULL; + + *filter = *ft; + INIT_LIST_HEAD(&filter->list); + + return filter; +} + +static void sss_nic_undo_del_filter_entry(struct list_head *filter_list, + const struct list_head *from) +{ + struct sss_nic_mac_filter *filter = NULL; + struct sss_nic_mac_filter *tmp_filter = NULL; + + list_for_each_entry_safe(filter, tmp_filter, from, list) { + if (sss_nic_find_mac(filter_list, filter->address)) + continue; + + if (filter->status == SSSNIC_MAC_FILTER_SYNCED) + filter->status = SSSNIC_MAC_FILTER_WAIT_UNSYNC; + + list_move_tail(&filter->list, filter_list); + } +} + +static void sss_nic_undo_add_filter_entry(struct list_head *filter_list, + const struct list_head *from) +{ + struct sss_nic_mac_filter *filter = NULL; + struct sss_nic_mac_filter *tmp_filter = NULL; + struct sss_nic_mac_filter *ftmp_filter = NULL; + + list_for_each_entry_safe(filter, ftmp_filter, from, list) { + tmp_filter = sss_nic_find_mac(filter_list, filter->address); + if (tmp_filter && tmp_filter->status == SSSNIC_MAC_FILTER_SYNCED) + tmp_filter->status = SSSNIC_MAC_FILTER_WAIT_SYNC; + } +} + +static void sss_nic_cleanup_filter_list(const struct list_head *head) +{ + struct sss_nic_mac_filter *filter = NULL; + struct sss_nic_mac_filter *ftmp_filter = NULL; + + list_for_each_entry_safe(filter, ftmp_filter, head, list) { + list_del(&filter->list); + kfree(filter); + } +} + +static int sss_nic_sync_mac_filter_to_hw(struct sss_nic_dev *nic_dev, + struct list_head *del_list, + struct list_head *add_list) +{ + struct sss_nic_mac_filter *filter = NULL; + struct sss_nic_mac_filter *ftmp_filter = NULL; + struct net_device *netdev = nic_dev->netdev; + int ret = 0; + int add_num = 0; + + if (!list_empty(del_list)) { + list_for_each_entry_safe(filter, ftmp_filter, del_list, list) { + ret = sss_nic_unsync_uc(netdev, filter->address); + if (ret != 0) { /* ignore errors when delete mac */ + nic_err(nic_dev->dev_hdl, "Fail to delete mac\n"); + } + + list_del(&filter->list); + kfree(filter); + } + } + + if (!list_empty(add_list)) { + list_for_each_entry_safe(filter, ftmp_filter, add_list, list) { + ret = sss_nic_sync_uc(netdev, filter->address); + if (ret != 0) { + nic_err(nic_dev->dev_hdl, "Fail to add mac\n"); + return ret; + } + + add_num++; + list_del(&filter->list); + kfree(filter); + } + } + + return add_num; +} + +static int sss_nic_sync_mac_filter(struct sss_nic_dev *nic_dev, + struct list_head *mac_filter_list, bool uc) +{ + struct net_device *netdev = nic_dev->netdev; + struct list_head del_tmp_list; + struct list_head add_tmp_list; + struct sss_nic_mac_filter *filter = NULL; + struct sss_nic_mac_filter *ftmp_filter = NULL; + struct sss_nic_mac_filter *fclone_filter = NULL; + int ret = 0; + int add_num = 0; + + INIT_LIST_HEAD(&del_tmp_list); + INIT_LIST_HEAD(&add_tmp_list); + + list_for_each_entry_safe(filter, ftmp_filter, mac_filter_list, list) { + if (filter->status != SSSNIC_MAC_FILTER_WAIT_UNSYNC) + continue; + + filter->status = SSSNIC_MAC_FILTER_UNSYNCED; + list_move_tail(&filter->list, &del_tmp_list); + } + + list_for_each_entry_safe(filter, ftmp_filter, mac_filter_list, list) { + if (filter->status != SSSNIC_MAC_FILTER_WAIT_SYNC) + continue; + + fclone_filter = sss_nic_copy_mac_filter_entry(filter); + if (!fclone_filter) { + ret = -ENOMEM; + break; + } + + filter->status = SSSNIC_MAC_FILTER_SYNCED; + list_add_tail(&fclone_filter->list, &add_tmp_list); + } + + if (ret != 0) { + sss_nic_undo_del_filter_entry(mac_filter_list, &del_tmp_list); + sss_nic_undo_add_filter_entry(mac_filter_list, &add_tmp_list); + nicif_err(nic_dev, drv, netdev, "Fail to clone mac_filter_entry\n"); + + sss_nic_cleanup_filter_list(&del_tmp_list); + sss_nic_cleanup_filter_list(&add_tmp_list); + return -ENOMEM; + } + + add_num = sss_nic_sync_mac_filter_to_hw(nic_dev, &del_tmp_list, &add_tmp_list); + if (list_empty(&add_tmp_list)) + return add_num; + + /* there are errors when add mac to hw, delete all mac in hw */ + sss_nic_undo_add_filter_entry(mac_filter_list, &add_tmp_list); + /* VF don't support to enter promisc mode, + * so we can't delete any other uc mac + */ + if (!SSSNIC_FUNC_IS_VF(nic_dev->hwdev) || !uc) { + list_for_each_entry_safe(filter, ftmp_filter, mac_filter_list, list) { + if (filter->status != SSSNIC_MAC_FILTER_SYNCED) + continue; + + fclone_filter = sss_nic_copy_mac_filter_entry(filter); + if (!fclone_filter) + break; + + filter->status = SSSNIC_MAC_FILTER_WAIT_SYNC; + list_add_tail(&fclone_filter->list, &del_tmp_list); + } + } + + sss_nic_cleanup_filter_list(&add_tmp_list); + sss_nic_sync_mac_filter_to_hw(nic_dev, &del_tmp_list, &add_tmp_list); + + /* need to enter promisc/allmulti mode */ + return -ENOMEM; +} + +static void sss_nic_sync_all_mac_filter(struct sss_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + int add_num; + + if (test_bit(SSSNIC_MAC_FILTER_CHANGED, &nic_dev->flags)) { + clear_bit(SSSNIC_MAC_FILTER_CHANGED, &nic_dev->flags); + add_num = sss_nic_sync_mac_filter(nic_dev, &nic_dev->uc_filter_list, true); + if (add_num < 0 && SSSNIC_SUPPORT_PROMISC(nic_dev->nic_io)) { + set_bit(SSSNIC_PROMISC_FORCE_ON, &nic_dev->rx_mode); + nicif_info(nic_dev, drv, netdev, " Force promisc mode on\n"); + } else if (add_num != 0) { + clear_bit(SSSNIC_PROMISC_FORCE_ON, &nic_dev->rx_mode); + } + + add_num = sss_nic_sync_mac_filter(nic_dev, &nic_dev->mc_filter_list, false); + if (add_num < 0 && SSSNIC_SUPPORT_ALLMULTI(nic_dev->nic_io)) { + set_bit(SSSNIC_ALLMULTI_FORCE_ON, &nic_dev->rx_mode); + nicif_info(nic_dev, drv, netdev, "Force allmulti mode on\n"); + } else if (add_num != 0) { + clear_bit(SSSNIC_ALLMULTI_FORCE_ON, &nic_dev->rx_mode); + } + } +} + +static void sss_nic_update_mac_filter(struct sss_nic_dev *nic_dev, + const struct netdev_hw_addr_list *src_list, + struct list_head *filter_list) +{ + struct sss_nic_mac_filter *filter = NULL; + struct sss_nic_mac_filter *ftmp_filter = NULL; + struct sss_nic_mac_filter *f_filter = NULL; + struct netdev_hw_addr *hw_addr = NULL; + + /* add addr if not already in the filter list */ + netif_addr_lock_bh(nic_dev->netdev); + netdev_hw_addr_list_for_each(hw_addr, src_list) { + filter = sss_nic_find_mac(filter_list, hw_addr->addr); + if (!filter) + sss_nic_add_filter(nic_dev, filter_list, hw_addr->addr); + else if (filter->status == SSSNIC_MAC_FILTER_WAIT_UNSYNC) + filter->status = SSSNIC_MAC_FILTER_SYNCED; + } + netif_addr_unlock_bh(nic_dev->netdev); + + /* delete addr if not in netdev list */ + list_for_each_entry_safe(f_filter, ftmp_filter, filter_list, list) { + bool find = false; + + netif_addr_lock_bh(nic_dev->netdev); + netdev_hw_addr_list_for_each(hw_addr, src_list) + if (ether_addr_equal(hw_addr->addr, f_filter->address)) { + find = true; + break; + } + netif_addr_unlock_bh(nic_dev->netdev); + + if (find) + continue; + + sss_nic_del_filter(nic_dev, f_filter); + } +} + +#ifndef NETDEV_HW_ADDR_T_MULTICAST +static void sss_nic_update_mc_filter(struct sss_nic_dev *nic_dev, + struct list_head *filter_list) +{ + struct sss_nic_mac_filter *filter = NULL; + struct sss_nic_mac_filter *ftmp_filter = NULL; + struct sss_nic_mac_filter *f_filter = NULL; + struct dev_mc_list *hw_addr = NULL; + + /* add addr if not already in the filter list */ + netif_addr_lock_bh(nic_dev->netdev); + netdev_for_each_mc_addr(hw_addr, nic_dev->netdev) { + filter = sss_nic_find_mac(filter_list, hw_addr->da_addr); + if (!filter) + sss_nic_add_filter(nic_dev, filter_list, hw_addr->da_addr); + else if (filter->status == SSSNIC_MAC_FILTER_WAIT_UNSYNC) + filter->status = SSSNIC_MAC_FILTER_SYNCED; + } + netif_addr_unlock_bh(nic_dev->netdev); + /* delete addr if not in netdev list */ + list_for_each_entry_safe(f_filter, ftmp_filter, filter_list, list) { + bool find = false; + + netif_addr_lock_bh(nic_dev->netdev); + netdev_for_each_mc_addr(hw_addr, nic_dev->netdev) + if (ether_addr_equal(hw_addr->da_addr, f_filter->address)) { + find = true; + break; + } + netif_addr_unlock_bh(nic_dev->netdev); + + if (find) + continue; + + sss_nic_del_filter(nic_dev, f_filter); + } +} +#endif + +static void sss_nic_update_all_mac_filter(struct sss_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + + if (test_and_clear_bit(SSSNIC_UPDATE_MAC_FILTER, &nic_dev->flags)) { + sss_nic_update_mac_filter(nic_dev, &netdev->uc, + &nic_dev->uc_filter_list); + if (mc_mac_filter) { +#ifdef NETDEV_HW_ADDR_T_MULTICAST + sss_nic_update_mac_filter(nic_dev, &netdev->mc, &nic_dev->mc_filter_list); +#else + sss_nic_update_mc_filter(nic_dev, &nic_dev->mc_filter_list); +#endif + } + } +} + +static void sss_nic_sync_rx_mode_to_hw(struct sss_nic_dev *nic_dev, int allmulti_enter, + int promisc_enter) +{ + int ret; + u32 rx_mode = SSSNIC_DEFAULT_RX_MODE; + struct net_device *netdev = nic_dev->netdev; + + rx_mode |= (allmulti_enter ? SSSNIC_RX_MODE_MC_ALL : 0); + rx_mode |= (promisc_enter ? SSSNIC_RX_MODE_PROMISC : 0); + + if (allmulti_enter != + test_bit(SSSNIC_ALLMULTI_ON, &nic_dev->rx_mode)) + nicif_info(nic_dev, drv, netdev, + "%s allmulti mode\n", + allmulti_enter ? "Enable" : "Disable"); + + if (promisc_enter != test_bit(SSSNIC_PROMISC_ON, + &nic_dev->rx_mode)) + nicif_info(nic_dev, drv, netdev, + "%s promisc mode\n", + promisc_enter ? "Enable" : "Disable"); + + ret = sss_nic_set_rx_mode(nic_dev, rx_mode); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to set rx mode\n"); + return; + } + + if (allmulti_enter != 0) + set_bit(SSSNIC_ALLMULTI_ON, &nic_dev->rx_mode); + else + clear_bit(SSSNIC_ALLMULTI_ON, &nic_dev->rx_mode); + + if (promisc_enter != 0) + set_bit(SSSNIC_PROMISC_ON, &nic_dev->rx_mode); + else + clear_bit(SSSNIC_PROMISC_ON, &nic_dev->rx_mode); +} + +void sss_nic_set_rx_mode_work(struct work_struct *work) +{ + struct sss_nic_dev *nic_dev = + container_of(work, struct sss_nic_dev, rx_mode_work); + struct net_device *netdev = nic_dev->netdev; + int allmulti_enter = 0; + int promisc_enter = 0; + + sss_nic_update_all_mac_filter(nic_dev); + + sss_nic_sync_all_mac_filter(nic_dev); + + if (SSSNIC_SUPPORT_ALLMULTI(nic_dev->nic_io)) + allmulti_enter = !!(netdev->flags & IFF_ALLMULTI) || + test_bit(SSSNIC_ALLMULTI_FORCE_ON, + &nic_dev->rx_mode); + + if (SSSNIC_SUPPORT_PROMISC(nic_dev->nic_io)) + promisc_enter = !!(netdev->flags & IFF_PROMISC) || + test_bit(SSSNIC_PROMISC_FORCE_ON, + &nic_dev->rx_mode); + + if (allmulti_enter != + test_bit(SSSNIC_ALLMULTI_ON, &nic_dev->rx_mode) || + promisc_enter != + test_bit(SSSNIC_PROMISC_ON, &nic_dev->rx_mode)) + sss_nic_sync_rx_mode_to_hw(nic_dev, allmulti_enter, promisc_enter); +} + diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_filter.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_filter.h new file mode 100644 index 0000000000000000000000000000000000000000..65d13b459fc91c1b02236b8b4e11330462486822 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_filter.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_FILTER_H +#define SSS_NIC_FILTER_H + +#include +#include "sss_nic_dev_define.h" + +void sss_nic_set_rx_mode_work(struct work_struct *work); +void sss_nic_clean_mac_list_filter(struct sss_nic_dev *nic_dev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_io.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_io.c new file mode 100644 index 0000000000000000000000000000000000000000..c8606db4f5a50102875d521582aa1dc8e4e17380 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_io.c @@ -0,0 +1,953 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_rss_cfg.h" +#include "sss_nic_io_define.h" +#include "sss_nic_cfg_define.h" +#include "sss_nic_io.h" +#include "sss_nic_event.h" + +#define SSSNIC_DEAULT_DROP_THD_OFF 0 +#define SSSNIC_DEAULT_DROP_THD_ON (0xFFFF) +#define SSSNIC_DEAULT_TX_CI_PENDING_LIMIT 1 +#define SSSNIC_DEAULT_TX_CI_COALESCING_TIME 1 +#define SSSNIC_WQ_PREFETCH_MIN 1 +#define SSSNIC_WQ_PREFETCH_MAX 4 +#define SSSNIC_WQ_PREFETCH_THRESHOLD 256 +#define SSSNIC_Q_CTXT_MAX 31 /* (2048 - 8) / 64 */ + +/* performance: ci addr RTE_CACHE_SIZE(64B) alignment */ +#define SSSNIC_CI_Q_ADDR_SIZE (64) + +#define SSSNIC_CI_TABLE_SIZE(num_qps, pg_sz) \ + (ALIGN((num_qps) * SSSNIC_CI_Q_ADDR_SIZE, pg_sz)) + +#define SSSNIC_CI_PADDR(base_paddr, qid) ((base_paddr) + \ + (qid) * SSSNIC_CI_Q_ADDR_SIZE) + +#define SSSNIC_CI_VADDR(base_addr, qid) ((u8 *)(base_addr) + \ + (qid) * SSSNIC_CI_Q_ADDR_SIZE) + +#define SSSNIC_SQ_CTX_SIZE(num_sqs) ((u16)(sizeof(struct sss_nic_qp_ctx_header) \ + + (num_sqs) * sizeof(struct sss_nic_sq_ctx))) + +#define SSSNIC_RQ_CTX_SIZE(num_rqs) ((u16)(sizeof(struct sss_nic_qp_ctx_header) \ + + (num_rqs) * sizeof(struct sss_nic_rq_ctx))) + +#define SSSNIC_CI_ID_HIGH_SHIFH 12 +#define SSSNIC_CI_HIGN_ID(val) ((val) >> SSSNIC_CI_ID_HIGH_SHIFH) + +#define SSSNIC_SQ_CTX_MODE_SP_FLAG_SHIFT 0 +#define SSSNIC_SQ_CTX_MODE_PKT_DROP_SHIFT 1 + +#define SSSNIC_SQ_CTX_MODE_SP_FLAG_MASK 0x1U +#define SSSNIC_SQ_CTX_MODE_PKT_DROP_MASK 0x1U + +#define SSSNIC_SET_SQ_CTX_MODE(val, member) \ + (((val) & SSSNIC_SQ_CTX_MODE_##member##_MASK) \ + << SSSNIC_SQ_CTX_MODE_##member##_SHIFT) + +#define SSSNIC_SQ_CTX_PI_ID_SHIFT 0 +#define SSSNIC_SQ_CTX_CI_ID_SHIFT 16 + +#define SSSNIC_SQ_CTX_PI_ID_MASK 0xFFFFU +#define SSSNIC_SQ_CTX_CI_ID_MASK 0xFFFFU + +#define SSSNIC_SET_SQ_CTX_CI_PI(val, member) \ + (((val) & SSSNIC_SQ_CTX_##member##_MASK) \ + << SSSNIC_SQ_CTX_##member##_SHIFT) + +#define SSSNIC_SQ_CTX_WQ_PAGE_HI_PFN_SHIFT 0 +#define SSSNIC_SQ_CTX_WQ_PAGE_OWNER_SHIFT 23 + +#define SSSNIC_SQ_CTX_WQ_PAGE_HI_PFN_MASK 0xFFFFFU +#define SSSNIC_SQ_CTX_WQ_PAGE_OWNER_MASK 0x1U + +#define SSSNIC_SET_SQ_CTX_WQ_PAGE(val, member) \ + (((val) & SSSNIC_SQ_CTX_WQ_PAGE_##member##_MASK) \ + << SSSNIC_SQ_CTX_WQ_PAGE_##member##_SHIFT) + +#define SSSNIC_SQ_CTX_GLOBAL_SQ_ID_SHIFT 0 + +#define SSSNIC_SQ_CTX_GLOBAL_SQ_ID_MASK 0x1FFFU + +#define SSSNIC_SET_SQ_CTX_GLOBAL_QUEUE_ID(val, member) \ + (((val) & SSSNIC_SQ_CTX_##member##_MASK) \ + << SSSNIC_SQ_CTX_##member##_SHIFT) + +#define SSSNIC_SQ_CTX_PKT_DROP_THD_ON_SHIFT 0 +#define SSSNIC_SQ_CTX_PKT_DROP_THD_OFF_SHIFT 16 + +#define SSSNIC_SQ_CTX_PKT_DROP_THD_ON_MASK 0xFFFFU +#define SSSNIC_SQ_CTX_PKT_DROP_THD_OFF_MASK 0xFFFFU + +#define SSSNIC_SET_SQ_CTX_PKT_DROP_THD(val, member) \ + (((val) & SSSNIC_SQ_CTX_PKT_DROP_##member##_MASK) \ + << SSSNIC_SQ_CTX_PKT_DROP_##member##_SHIFT) + +#define SSSNIC_SQ_CTX_PREF_CACHE_THRESHOLD_SHIFT 0 +#define SSSNIC_SQ_CTX_PREF_CACHE_MAX_SHIFT 14 +#define SSSNIC_SQ_CTX_PREF_CACHE_MIN_SHIFT 25 + +#define SSSNIC_SQ_CTX_PREF_CACHE_THRESHOLD_MASK 0x3FFFU +#define SSSNIC_SQ_CTX_PREF_CACHE_MAX_MASK 0x7FFU +#define SSSNIC_SQ_CTX_PREF_CACHE_MIN_MASK 0x7FU + +#define SSSNIC_SQ_CTX_PREF_CI_HI_SHIFT 0 +#define SSSNIC_SQ_CTX_PREF_OWNER_SHIFT 4 + +#define SSSNIC_SQ_CTX_PREF_CI_HI_MASK 0xFU +#define SSSNIC_SQ_CTX_PREF_OWNER_MASK 0x1U + +#define SSSNIC_SQ_CTX_PREF_WQ_PFN_HI_SHIFT 0 +#define SSSNIC_SQ_CTX_PREF_CI_LOW_SHIFT 20 + +#define SSSNIC_SQ_CTX_PREF_WQ_PFN_HI_MASK 0xFFFFFU +#define SSSNIC_SQ_CTX_PREF_CI_LOW_MASK 0xFFFU + +#define SSSNIC_SET_SQ_CTX_PREF(val, member) \ + (((val) & SSSNIC_SQ_CTX_PREF_##member##_MASK) \ + << SSSNIC_SQ_CTX_PREF_##member##_SHIFT) + +#define SSSNIC_RQ_CTX_WQ_PAGE_HI_PFN_SHIFT 0 +#define SSSNIC_RQ_CTX_WQ_PAGE_WQE_TYPE_SHIFT 28 +#define SSSNIC_RQ_CTX_WQ_PAGE_OWNER_SHIFT 31 + +#define SSSNIC_RQ_CTX_WQ_PAGE_HI_PFN_MASK 0xFFFFFU +#define SSSNIC_RQ_CTX_WQ_PAGE_WQE_TYPE_MASK 0x3U +#define SSSNIC_RQ_CTX_WQ_PAGE_OWNER_MASK 0x1U + +#define SSSNIC_SET_RQ_CTX_WQ_PAGE(val, member) \ + (((val) & SSSNIC_RQ_CTX_WQ_PAGE_##member##_MASK) << \ + SSSNIC_RQ_CTX_WQ_PAGE_##member##_SHIFT) + +#define SSSNIC_SQ_CTX_VLAN_TAG_SHIFT 0 +#define SSSNIC_SQ_CTX_VLAN_TYPE_SEL_SHIFT 16 +#define SSSNIC_SQ_CTX_VLAN_INSERT_MODE_SHIFT 19 +#define SSSNIC_SQ_CTX_VLAN_CEQ_EN_SHIFT 23 + +#define SSSNIC_SQ_CTX_VLAN_TAG_MASK 0xFFFFU +#define SSSNIC_SQ_CTX_VLAN_TYPE_SEL_MASK 0x7U +#define SSSNIC_SQ_CTX_VLAN_INSERT_MODE_MASK 0x3U +#define SSSNIC_SQ_CTX_VLAN_CEQ_EN_MASK 0x1U + +#define SSSNIC_SET_SQ_CTX_VLAN_CEQ(val, member) \ + (((val) & SSSNIC_SQ_CTX_VLAN_##member##_MASK) \ + << SSSNIC_SQ_CTX_VLAN_##member##_SHIFT) + +#define SSSNIC_RQ_CTX_PI_ID_SHIFT 0 +#define SSSNIC_RQ_CTX_CI_ID_SHIFT 16 + +#define SSSNIC_RQ_CTX_PI_ID_MASK 0xFFFFU +#define SSSNIC_RQ_CTX_CI_ID_MASK 0xFFFFU + +#define SSSNIC_SET_RQ_CTX_CI_PI(val, member) \ + (((val) & SSSNIC_RQ_CTX_##member##_MASK) \ + << SSSNIC_RQ_CTX_##member##_SHIFT) + +#define SSSNIC_RQ_CTX_CEQ_ATTR_INTR_SHIFT 21 +#define SSSNIC_RQ_CTX_CEQ_ATTR_EN_SHIFT 31 + +#define SSSNIC_RQ_CTX_CEQ_ATTR_INTR_MASK 0x3FFU +#define SSSNIC_RQ_CTX_CEQ_ATTR_EN_MASK 0x1U + +#define SSSNIC_SET_RQ_CTX_CEQ_ATTR(val, member) \ + (((val) & SSSNIC_RQ_CTX_CEQ_ATTR_##member##_MASK) \ + << SSSNIC_RQ_CTX_CEQ_ATTR_##member##_SHIFT) + +#define SSSNIC_SQ_CTX_WQ_BLOCK_PFN_HI_SHIFT 0 + +#define SSSNIC_SQ_CTX_WQ_BLOCK_PFN_HI_MASK 0x7FFFFFU + +#define SSSNIC_SET_SQ_CTX_WQ_BLOCK(val, member) \ + (((val) & SSSNIC_SQ_CTX_WQ_BLOCK_##member##_MASK) \ + << SSSNIC_SQ_CTX_WQ_BLOCK_##member##_SHIFT) + +#define SSSNIC_RQ_CTX_PREF_CACHE_THRESHOLD_SHIFT 0 +#define SSSNIC_RQ_CTX_PREF_CACHE_MAX_SHIFT 14 +#define SSSNIC_RQ_CTX_PREF_CACHE_MIN_SHIFT 25 + +#define SSSNIC_RQ_CTX_PREF_CACHE_THRESHOLD_MASK 0x3FFFU +#define SSSNIC_RQ_CTX_PREF_CACHE_MAX_MASK 0x7FFU +#define SSSNIC_RQ_CTX_PREF_CACHE_MIN_MASK 0x7FU + +#define SSSNIC_RQ_CTX_PREF_CI_HI_SHIFT 0 +#define SSSNIC_RQ_CTX_PREF_OWNER_SHIFT 4 + +#define SSSNIC_RQ_CTX_PREF_CI_HI_MASK 0xFU +#define SSSNIC_RQ_CTX_PREF_OWNER_MASK 0x1U + +#define SSSNIC_RQ_CTX_PREF_WQ_PFN_HI_SHIFT 0 +#define SSSNIC_RQ_CTX_PREF_CI_LOW_SHIFT 20 + +#define SSSNIC_RQ_CTX_PREF_WQ_PFN_HI_MASK 0xFFFFFU +#define SSSNIC_RQ_CTX_PREF_CI_LOW_MASK 0xFFFU + +#define SSSNIC_SET_RQ_CTX_PREF(val, member) \ + (((val) & SSSNIC_RQ_CTX_PREF_##member##_MASK) << \ + SSSNIC_RQ_CTX_PREF_##member##_SHIFT) + +#define SSSNIC_RQ_CTX_CQE_LEN_SHIFT 28 + +#define SSSNIC_RQ_CTX_CQE_LEN_MASK 0x3U + +#define SSSNIC_SET_RQ_CTX_CQE_LEN(val, member) \ + (((val) & SSSNIC_RQ_CTX_##member##_MASK) << \ + SSSNIC_RQ_CTX_##member##_SHIFT) + +#define SSSNIC_RQ_CTX_WQ_BLOCK_PFN_HI_SHIFT 0 + +#define SSSNIC_RQ_CTX_WQ_BLOCK_PFN_HI_MASK 0x7FFFFFU + +#define SSSNIC_SET_RQ_CTX_WQ_BLOCK(val, member) \ + (((val) & SSSNIC_RQ_CTX_WQ_BLOCK_##member##_MASK) << \ + SSSNIC_RQ_CTX_WQ_BLOCK_##member##_SHIFT) + +#define SSSNIC_WQ_PAGE_PFN(page_addr) ((page_addr) >> 12) +#define SSSNIC_WQ_BLOCK_PFN(page_addr) ((page_addr) >> 9) + +enum sss_nic_qp_ctx_type { + SSSNIC_QP_CTX_TYPE_SQ, + SSSNIC_QP_CTX_TYPE_RQ, +}; + +struct sss_nic_qp_ctx_header { + u16 q_num; + u16 q_type; + u16 start_qid; + u16 rsvd; +}; + +struct sss_nic_clear_q_ctx { + struct sss_nic_qp_ctx_header ctrlq_hdr; + u32 rsvd; +}; + +struct sss_nic_rq_ctx { + u32 ci_pi; + u32 ceq_attr; + u32 hi_wq_pfn; + u32 lo_wq_pfn; + + u32 rsvd[3]; + u32 cqe_sge_len; + + u32 pref_cache; + u32 pref_ci_owner; + u32 hi_pref_wq_pfn_ci; + u32 lo_pref_wq_pfn; + + u32 pi_paddr_hi; + u32 pi_paddr_lo; + u32 hi_wq_block_pfn; + u32 lo_wq_block_pfn; +}; + +struct sss_nic_sq_ctx { + u32 ci_pi; + u32 drop_mode_sp; + u32 hi_wq_pfn; + u32 lo_wq_pfn; + + u32 rsvd0; + u32 pkt_drop_thd; + u32 global_sq_id; + u32 vlan_ceq_attr; + + u32 pref_cache; + u32 pref_ci_owner; + u32 hi_pref_wq_pfn_ci; + u32 lo_pref_wq_pfn; + + u32 rsvd8; + u32 rsvd9; + u32 hi_wq_block_pfn; + u32 lo_wq_block_pfn; +}; + +struct sss_nic_rq_ctx_block { + struct sss_nic_qp_ctx_header ctrlq_hdr; + struct sss_nic_rq_ctx rq_ctxt[SSSNIC_Q_CTXT_MAX]; +}; + +struct sss_nic_sq_ctx_block { + struct sss_nic_qp_ctx_header ctrlq_hdr; + struct sss_nic_sq_ctx sq_ctxt[SSSNIC_Q_CTXT_MAX]; +}; + +static int sss_nic_create_sq(struct sss_nic_io *nic_io, + struct sss_nic_io_queue *sq, + u16 qid, u32 sq_depth, u16 msix_id) +{ + int ret = 0; + + sq->qid = qid; + sq->msix_id = msix_id; + sq->owner = 1; + + ret = sss_create_wq(nic_io->hwdev, &sq->wq, sq_depth, + (u16)BIT(SSSNIC_SQ_WQEBB_SHIFT)); + if (ret != 0) + nic_err(nic_io->dev_hdl, "Fail to create sq(%u) wq\n", qid); + + return ret; +} + +static void sss_nic_destroy_sq(struct sss_nic_io_queue *sq) +{ + sss_destroy_wq(&sq->wq); +} + +static int sss_nic_create_rq(struct sss_nic_io *nic_io, + struct sss_nic_io_queue *rq, + u16 qid, u32 rq_depth, u16 msix_id) +{ + int ret = 0; + + rq->qid = qid; + rq->msix_id = msix_id; + rq->wqe_type = SSSNIC_NORMAL_RQ_WQE; + + rq->rx.pi_vaddr = dma_zalloc_coherent(nic_io->dev_hdl, PAGE_SIZE, + &rq->rx.pi_daddr, GFP_KERNEL); + if (!rq->rx.pi_vaddr) { + nic_err(nic_io->dev_hdl, "Fail to allocate rq pi virt addr\n"); + return -ENOMEM; + } + + ret = sss_create_wq(nic_io->hwdev, &rq->wq, rq_depth, + (u16)BIT(SSSNIC_RQ_WQEBB_SHIFT + SSSNIC_NORMAL_RQ_WQE)); + if (ret != 0) { + nic_err(nic_io->dev_hdl, "Fail to create rq(%u) wq\n", qid); + dma_free_coherent(nic_io->dev_hdl, PAGE_SIZE, rq->rx.pi_vaddr, + rq->rx.pi_daddr); + return ret; + } + + return 0; +} + +static void sss_nic_destroy_rq(struct sss_nic_io *nic_io, + struct sss_nic_io_queue *rq) +{ + dma_free_coherent(nic_io->dev_hdl, PAGE_SIZE, rq->rx.pi_vaddr, + rq->rx.pi_daddr); + + sss_destroy_wq(&rq->wq); +} + +static int sss_nic_create_qp(struct sss_nic_io *nic_io, + struct sss_nic_io_queue *rq, struct sss_nic_io_queue *sq, + u32 rq_depth, u32 sq_depth, u16 qid, u16 qp_msix_id) +{ + int ret = 0; + + ret = sss_nic_create_rq(nic_io, rq, qid, rq_depth, qp_msix_id); + if (ret != 0) { + nic_err(nic_io->dev_hdl, "Fail to create rq, qid: %u\n", qid); + return ret; + } + + ret = sss_nic_create_sq(nic_io, sq, qid, sq_depth, qp_msix_id); + if (ret != 0) { + nic_err(nic_io->dev_hdl, "Fail to create sq, qid: %u\n", qid); + sss_nic_destroy_rq(nic_io, rq); + } + + return ret; +} + +static void sss_nic_destroy_qp(struct sss_nic_io *nic_io, + struct sss_nic_io_queue *rq, struct sss_nic_io_queue *sq) +{ + sss_nic_destroy_rq(nic_io, rq); + sss_nic_destroy_sq(sq); +} + +int sss_nic_io_resource_init(struct sss_nic_io *nic_io) +{ + void __iomem *db_base = NULL; + int ret = 0; + + nic_io->max_qp_num = sss_get_max_sq_num(nic_io->hwdev); + + nic_io->ci_base_vaddr = dma_zalloc_coherent(nic_io->dev_hdl, + SSSNIC_CI_TABLE_SIZE(nic_io->max_qp_num, + PAGE_SIZE), + &nic_io->ci_base_daddr, GFP_KERNEL); + if (!nic_io->ci_base_vaddr) { + nic_err(nic_io->dev_hdl, "Fail to alloc ci dma buf\n"); + return -ENOMEM; + } + + ret = sss_alloc_db_addr(nic_io->hwdev, &db_base); + if (ret != 0) { + nic_err(nic_io->dev_hdl, "Fail to alloc sq doorbell\n"); + goto out; + } + nic_io->sq_db_addr = (u8 *)db_base; + + ret = sss_alloc_db_addr(nic_io->hwdev, &db_base); + if (ret != 0) { + nic_err(nic_io->dev_hdl, "Fail to alloc rq doorbell\n"); + sss_free_db_addr(nic_io->hwdev, nic_io->sq_db_addr); + goto out; + } + nic_io->rq_db_addr = (u8 *)db_base; + + return 0; + +out: + dma_free_coherent(nic_io->dev_hdl, + SSSNIC_CI_TABLE_SIZE(nic_io->max_qp_num, PAGE_SIZE), + nic_io->ci_base_vaddr, nic_io->ci_base_daddr); + nic_io->ci_base_vaddr = NULL; + + return -ENOMEM; +} + +void sss_nic_io_resource_deinit(struct sss_nic_io *nic_io) +{ + dma_free_coherent(nic_io->dev_hdl, + SSSNIC_CI_TABLE_SIZE(nic_io->max_qp_num, PAGE_SIZE), + nic_io->ci_base_vaddr, nic_io->ci_base_daddr); + + sss_free_db_addr(nic_io->hwdev, nic_io->sq_db_addr); + sss_free_db_addr(nic_io->hwdev, nic_io->rq_db_addr); +} + +int sss_nic_alloc_qp(struct sss_nic_io *nic_io, + struct sss_irq_desc *qp_msix_arry, struct sss_nic_qp_info *qp_info) +{ + u16 i; + u16 qid; + int ret = 0; + struct sss_nic_io_queue *rq_group = NULL; + struct sss_nic_io_queue *sq_group = NULL; + + if (qp_info->qp_num > nic_io->max_qp_num || qp_info->qp_num == 0) + return -EINVAL; + + rq_group = kcalloc(qp_info->qp_num, sizeof(*rq_group), GFP_KERNEL); + if (!rq_group) + return -ENOMEM; + + sq_group = kcalloc(qp_info->qp_num, sizeof(*sq_group), GFP_KERNEL); + if (!sq_group) { + ret = -ENOMEM; + nic_err(nic_io->dev_hdl, "Fail to allocate sq\n"); + goto alloc_sq_err; + } + + for (qid = 0; qid < qp_info->qp_num; qid++) { + ret = sss_nic_create_qp(nic_io, &rq_group[qid], &sq_group[qid], + qp_info->rq_depth, qp_info->sq_depth, qid, + qp_msix_arry[qid].msix_id); + if (ret != 0) { + nic_err(nic_io->dev_hdl, + "Fail to allocate qp %u, err: %d\n", qid, ret); + goto create_qp_err; + } + } + + qp_info->rq_group = rq_group; + qp_info->sq_group = sq_group; + + return 0; + +create_qp_err: + for (i = 0; i < qid; i++) + sss_nic_destroy_qp(nic_io, &rq_group[i], &sq_group[i]); + + kfree(sq_group); + +alloc_sq_err: + kfree(rq_group); + + return ret; +} + +void sss_nic_free_qp(struct sss_nic_io *nic_io, struct sss_nic_qp_info *qp_info) +{ + u16 qid; + + for (qid = 0; qid < qp_info->qp_num; qid++) + sss_nic_destroy_qp(nic_io, &qp_info->rq_group[qid], + &qp_info->sq_group[qid]); + + kfree(qp_info->rq_group); + kfree(qp_info->sq_group); + qp_info->rq_group = NULL; + qp_info->sq_group = NULL; +} + +static void sss_nic_init_db_info(struct sss_nic_io *nic_io, + struct sss_nic_qp_info *qp_info) +{ + u16 qid; + u16 *ci_addr = NULL; + + for (qid = 0; qid < nic_io->active_qp_num; qid++) { + qp_info->rq_group[qid].db_addr = nic_io->rq_db_addr; + qp_info->sq_group[qid].db_addr = nic_io->sq_db_addr; + qp_info->sq_group[qid].tx.ci_addr = + SSSNIC_CI_VADDR(nic_io->ci_base_vaddr, qid); + ci_addr = (u16 *)qp_info->sq_group[qid].tx.ci_addr; + *ci_addr = 0; + } +} + +int sss_nic_init_qp_info(struct sss_nic_io *nic_io, + struct sss_nic_qp_info *qp_info) +{ + nic_io->rq_group = qp_info->rq_group; + nic_io->sq_group = qp_info->sq_group; + nic_io->active_qp_num = qp_info->qp_num; + + sss_nic_init_db_info(nic_io, qp_info); + + return sss_nic_init_qp_ctx(nic_io); +} + +void sss_nic_deinit_qp_info(struct sss_nic_io *nic_io, + struct sss_nic_qp_info *qp_info) +{ + qp_info->qp_num = nic_io->active_qp_num; + qp_info->rq_group = nic_io->rq_group; + qp_info->sq_group = nic_io->sq_group; + + sss_nic_deinit_qp_ctx(nic_io->hwdev); +} + +static void sss_nic_fill_qp_ctx_ctrlq_header(struct sss_nic_qp_ctx_header *qp_ctx_hdr, + enum sss_nic_qp_ctx_type ctx_type, + u16 queue_num, u16 qid) +{ + qp_ctx_hdr->rsvd = 0; + qp_ctx_hdr->start_qid = qid; + qp_ctx_hdr->q_num = queue_num; + qp_ctx_hdr->q_type = ctx_type; + sss_cpu_to_be32(qp_ctx_hdr, sizeof(*qp_ctx_hdr)); +} + +static void sss_nic_fill_sq_ctx_ctrlq_body(struct sss_nic_io_queue *sq, u16 qid, + struct sss_nic_sq_ctx *sq_ctx) +{ + u16 ci_start; + u16 pi_start; + u32 lo_wq_block_pfn; + u32 hi_wq_block_pfn; + u32 lo_wq_page_pfn; + u32 hi_wq_page_pfn; + u64 wq_block_pfn; + u64 wq_page_addr; + u64 wq_page_pfn; + + pi_start = sss_nic_get_sq_local_pi(sq); + ci_start = sss_nic_get_sq_local_ci(sq); + + wq_block_pfn = SSSNIC_WQ_BLOCK_PFN(sq->wq.block_paddr); + lo_wq_block_pfn = lower_32_bits(wq_block_pfn); + hi_wq_block_pfn = upper_32_bits(wq_block_pfn); + + wq_page_addr = sss_wq_get_first_wqe_page_addr(&sq->wq); + wq_page_pfn = SSSNIC_WQ_PAGE_PFN(wq_page_addr); + lo_wq_page_pfn = lower_32_bits(wq_page_pfn); + hi_wq_page_pfn = upper_32_bits(wq_page_pfn); + + sq_ctx->rsvd0 = 0; + + sq_ctx->drop_mode_sp = + SSSNIC_SET_SQ_CTX_MODE(0, SP_FLAG) | + SSSNIC_SET_SQ_CTX_MODE(0, PKT_DROP); + + sq_ctx->ci_pi = + SSSNIC_SET_SQ_CTX_CI_PI(ci_start, CI_ID) | + SSSNIC_SET_SQ_CTX_CI_PI(pi_start, PI_ID); + + sq_ctx->global_sq_id = + SSSNIC_SET_SQ_CTX_GLOBAL_QUEUE_ID(qid, GLOBAL_SQ_ID); + + sq_ctx->pkt_drop_thd = + SSSNIC_SET_SQ_CTX_PKT_DROP_THD(SSSNIC_DEAULT_DROP_THD_ON, THD_ON) | + SSSNIC_SET_SQ_CTX_PKT_DROP_THD(SSSNIC_DEAULT_DROP_THD_OFF, THD_OFF); + + sq_ctx->vlan_ceq_attr = + SSSNIC_SET_SQ_CTX_VLAN_CEQ(0, CEQ_EN) | + SSSNIC_SET_SQ_CTX_VLAN_CEQ(1, INSERT_MODE); + + sq_ctx->pref_ci_owner = + SSSNIC_SET_SQ_CTX_PREF(SSSNIC_CI_HIGN_ID(ci_start), CI_HI) | + SSSNIC_SET_SQ_CTX_PREF(1, OWNER); + + sq_ctx->pref_cache = + SSSNIC_SET_SQ_CTX_PREF(SSSNIC_WQ_PREFETCH_MIN, CACHE_MIN) | + SSSNIC_SET_SQ_CTX_PREF(SSSNIC_WQ_PREFETCH_MAX, CACHE_MAX) | + SSSNIC_SET_SQ_CTX_PREF(SSSNIC_WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD); + + sq_ctx->lo_pref_wq_pfn = lo_wq_page_pfn; + + sq_ctx->hi_pref_wq_pfn_ci = + SSSNIC_SET_SQ_CTX_PREF(ci_start, CI_LOW) | + SSSNIC_SET_SQ_CTX_PREF(hi_wq_page_pfn, WQ_PFN_HI); + + sq_ctx->lo_wq_pfn = lo_wq_page_pfn; + + sq_ctx->hi_wq_pfn = + SSSNIC_SET_SQ_CTX_WQ_PAGE(hi_wq_page_pfn, HI_PFN) | + SSSNIC_SET_SQ_CTX_WQ_PAGE(1, OWNER); + + sq_ctx->lo_wq_block_pfn = lo_wq_block_pfn; + + sq_ctx->hi_wq_block_pfn = + SSSNIC_SET_SQ_CTX_WQ_BLOCK(hi_wq_block_pfn, PFN_HI); + + sss_cpu_to_be32(sq_ctx, sizeof(*sq_ctx)); +} + +static void sss_nic_fill_rq_ctx_ctrlq_body(struct sss_nic_io_queue *rq, + struct sss_nic_rq_ctx *rq_ctx) +{ + u16 wqe_type = rq->wqe_type; + u16 ci_start = (u16)((u32)sss_nic_get_rq_local_ci(rq) << wqe_type); + u16 pi_start = (u16)((u32)sss_nic_get_rq_local_pi(rq) << wqe_type); + u64 wq_page_addr = sss_wq_get_first_wqe_page_addr(&rq->wq); + u64 wq_page_pfn = SSSNIC_WQ_PAGE_PFN(wq_page_addr); + u64 wq_block_pfn = SSSNIC_WQ_BLOCK_PFN(rq->wq.block_paddr); + u32 lo_wq_page_pfn = lower_32_bits(wq_page_pfn); + u32 hi_wq_page_pfn = upper_32_bits(wq_page_pfn); + u32 lo_wq_block_pfn = lower_32_bits(wq_block_pfn); + u32 hi_wq_block_pfn = upper_32_bits(wq_block_pfn); + + rq_ctx->ceq_attr = SSSNIC_SET_RQ_CTX_CEQ_ATTR(0, EN) | + SSSNIC_SET_RQ_CTX_CEQ_ATTR(rq->msix_id, INTR); + + rq_ctx->ci_pi = + SSSNIC_SET_RQ_CTX_CI_PI(ci_start, CI_ID) | + SSSNIC_SET_RQ_CTX_CI_PI(pi_start, PI_ID); + + rq_ctx->pref_cache = + SSSNIC_SET_RQ_CTX_PREF(SSSNIC_WQ_PREFETCH_MIN, CACHE_MIN) | + SSSNIC_SET_RQ_CTX_PREF(SSSNIC_WQ_PREFETCH_MAX, CACHE_MAX) | + SSSNIC_SET_RQ_CTX_PREF(SSSNIC_WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD); + + rq_ctx->pref_ci_owner = + SSSNIC_SET_RQ_CTX_PREF(SSSNIC_CI_HIGN_ID(ci_start), CI_HI) | + SSSNIC_SET_RQ_CTX_PREF(1, OWNER); + + rq_ctx->lo_wq_pfn = lo_wq_page_pfn; + + rq_ctx->hi_wq_pfn = + SSSNIC_SET_RQ_CTX_WQ_PAGE(hi_wq_page_pfn, HI_PFN) | + SSSNIC_SET_RQ_CTX_WQ_PAGE(1, OWNER); + + if (wqe_type == SSSNIC_EXTEND_RQ_WQE) { + rq_ctx->hi_wq_pfn |= + SSSNIC_SET_RQ_CTX_WQ_PAGE(0, WQE_TYPE); + } else if (wqe_type == SSSNIC_NORMAL_RQ_WQE) { + rq_ctx->cqe_sge_len = SSSNIC_SET_RQ_CTX_CQE_LEN(1, CQE_LEN); + rq_ctx->hi_wq_pfn |= + SSSNIC_SET_RQ_CTX_WQ_PAGE(2, WQE_TYPE); + } else { + pr_err("Invalid rq wqe type: %u", wqe_type); + } + + rq_ctx->lo_pref_wq_pfn = lo_wq_page_pfn; + rq_ctx->hi_pref_wq_pfn_ci = + SSSNIC_SET_RQ_CTX_PREF(hi_wq_page_pfn, WQ_PFN_HI) | + SSSNIC_SET_RQ_CTX_PREF(ci_start, CI_LOW); + + rq_ctx->lo_wq_block_pfn = lo_wq_block_pfn; + rq_ctx->hi_wq_block_pfn = + SSSNIC_SET_RQ_CTX_WQ_BLOCK(hi_wq_block_pfn, PFN_HI); + + rq_ctx->pi_paddr_lo = lower_32_bits(rq->rx.pi_daddr); + rq_ctx->pi_paddr_hi = upper_32_bits(rq->rx.pi_daddr); + + sss_cpu_to_be32(rq_ctx, sizeof(*rq_ctx)); +} + +static int sss_nic_send_sq_ctx_by_ctrlq(struct sss_nic_io *nic_io, + struct sss_ctrl_msg_buf *msg_buf, u16 qid) +{ + u16 i; + u16 max_qp; + u64 out_param = 0; + int ret; + struct sss_nic_sq_ctx_block *sq_ctx_block = msg_buf->buf; + + max_qp = min(nic_io->active_qp_num - qid, SSSNIC_Q_CTXT_MAX); + sss_nic_fill_qp_ctx_ctrlq_header(&sq_ctx_block->ctrlq_hdr, + SSSNIC_QP_CTX_TYPE_SQ, max_qp, qid); + + for (i = 0; i < max_qp; i++) + sss_nic_fill_sq_ctx_ctrlq_body(&nic_io->sq_group[qid + i], qid + i, + &sq_ctx_block->sq_ctxt[i]); + + msg_buf->size = SSSNIC_SQ_CTX_SIZE(max_qp); + + ret = sss_ctrlq_direct_reply(nic_io->hwdev, SSS_MOD_TYPE_L2NIC, + SSSNIC_CTRLQ_OPCODE_MODIFY_QUEUE_CTX, + msg_buf, &out_param, 0, SSS_CHANNEL_NIC); + if (ret != 0 || out_param != 0) { + nic_err(nic_io->dev_hdl, + "Fail to set sq ctxt, ret: %d, out_param: 0x%llx\n", + ret, out_param); + + return -EFAULT; + } + + return 0; +} + +static int sss_nic_send_sq_ctx_to_hw(struct sss_nic_io *nic_io) +{ + int ret = 0; + u16 qid = 0; + u16 max_qp; + struct sss_ctrl_msg_buf *msg_buf = NULL; + + msg_buf = sss_alloc_ctrlq_msg_buf(nic_io->hwdev); + if (!msg_buf) { + nic_err(nic_io->dev_hdl, "Fail to allocate cmd buf\n"); + return -ENOMEM; + } + + while (qid < nic_io->active_qp_num) { + max_qp = min(nic_io->active_qp_num - qid, SSSNIC_Q_CTXT_MAX); + ret = sss_nic_send_sq_ctx_by_ctrlq(nic_io, msg_buf, qid); + if (ret) { + nic_err(nic_io->dev_hdl, + "Fail to set sq ctx, qid: %u\n", qid); + break; + } + + qid += max_qp; + } + + sss_free_ctrlq_msg_buf(nic_io->hwdev, msg_buf); + + return ret; +} + +static int sss_nic_send_rq_ctx_by_ctrlq(struct sss_nic_io *nic_io, + struct sss_ctrl_msg_buf *msg_buf, u16 qid) +{ + u16 i; + u16 max_qp; + u64 out_param = 0; + int ret; + struct sss_nic_rq_ctx_block *rq_ctx_block = msg_buf->buf; + + rq_ctx_block = msg_buf->buf; + max_qp = min(nic_io->active_qp_num - qid, SSSNIC_Q_CTXT_MAX); + + sss_nic_fill_qp_ctx_ctrlq_header(&rq_ctx_block->ctrlq_hdr, + SSSNIC_QP_CTX_TYPE_RQ, max_qp, qid); + + for (i = 0; i < max_qp; i++) + sss_nic_fill_rq_ctx_ctrlq_body(&nic_io->rq_group[qid + i], + &rq_ctx_block->rq_ctxt[i]); + + msg_buf->size = SSSNIC_RQ_CTX_SIZE(max_qp); + + ret = sss_ctrlq_direct_reply(nic_io->hwdev, SSS_MOD_TYPE_L2NIC, + SSSNIC_CTRLQ_OPCODE_MODIFY_QUEUE_CTX, + msg_buf, &out_param, 0, SSS_CHANNEL_NIC); + if (ret != 0 || out_param != 0) { + nic_err(nic_io->dev_hdl, + "Fail to set rq ctx, ret: %d, out_param: 0x%llx\n", + ret, out_param); + + return -EFAULT; + } + + return 0; +} + +static int sss_nic_send_rq_ctx_to_hw(struct sss_nic_io *nic_io) +{ + int ret = 0; + u16 qid = 0; + u16 max_qp; + struct sss_ctrl_msg_buf *msg_buf = NULL; + + msg_buf = sss_alloc_ctrlq_msg_buf(nic_io->hwdev); + if (!msg_buf) { + nic_err(nic_io->dev_hdl, "Fail to allocate cmd buf\n"); + return -ENOMEM; + } + + while (qid < nic_io->active_qp_num) { + max_qp = min(nic_io->active_qp_num - qid, SSSNIC_Q_CTXT_MAX); + + ret = sss_nic_send_rq_ctx_by_ctrlq(nic_io, msg_buf, qid); + if (ret) { + nic_err(nic_io->dev_hdl, + "Fail to set rq ctx, qid: %u\n", qid); + break; + } + + qid += max_qp; + } + + sss_free_ctrlq_msg_buf(nic_io->hwdev, msg_buf); + + return ret; +} + +static int sss_nic_reset_hw_offload_ctx(struct sss_nic_io *nic_io, + enum sss_nic_qp_ctx_type ctx_type) +{ + int ret = 0; + u64 out_param = 0; + struct sss_ctrl_msg_buf *msg_buf = NULL; + struct sss_nic_clear_q_ctx *ctx_block = NULL; + + msg_buf = sss_alloc_ctrlq_msg_buf(nic_io->hwdev); + if (!msg_buf) { + nic_err(nic_io->dev_hdl, "Fail to allocate cmd buf\n"); + return -ENOMEM; + } + + ctx_block = msg_buf->buf; + ctx_block->ctrlq_hdr.start_qid = 0; + ctx_block->ctrlq_hdr.q_type = ctx_type; + ctx_block->ctrlq_hdr.q_num = nic_io->max_qp_num; + + sss_cpu_to_be32(ctx_block, sizeof(*ctx_block)); + + msg_buf->size = sizeof(*ctx_block); + + ret = sss_ctrlq_direct_reply(nic_io->hwdev, SSS_MOD_TYPE_L2NIC, + SSSNIC_CTRLQ_OPCODE_CLEAN_QUEUE_CONTEXT, + msg_buf, &out_param, 0, SSS_CHANNEL_NIC); + if ((ret != 0) || (out_param != 0)) { + nic_err(nic_io->dev_hdl, + "Fail to clean queue offload ctxt, ret: %d, out_param: 0x%llx\n", + ret, out_param); + + ret = -EFAULT; + } + + sss_free_ctrlq_msg_buf(nic_io->hwdev, msg_buf); + + return ret; +} + +static int sss_nic_reset_hw_qp_offload_ctx(struct sss_nic_io *nic_io) +{ + int ret; + + ret = sss_nic_reset_hw_offload_ctx(nic_io, SSSNIC_QP_CTX_TYPE_SQ); + if (ret != 0) + return ret; + + ret = sss_nic_reset_hw_offload_ctx(nic_io, SSSNIC_QP_CTX_TYPE_RQ); + + return ret; +} + +static int sss_nic_set_hw_intr_attr(struct sss_nic_io *nic_io, u16 qid) +{ + struct sss_nic_mbx_intr_attr cmd_ci_attr = {0}; + u16 out_len = sizeof(cmd_ci_attr); + int ret; + + cmd_ci_attr.func_id = sss_get_global_func_id(nic_io->hwdev); + cmd_ci_attr.dma_attr_off = 0; + cmd_ci_attr.pending_limit = SSSNIC_DEAULT_TX_CI_PENDING_LIMIT; + cmd_ci_attr.coalescing_time = SSSNIC_DEAULT_TX_CI_COALESCING_TIME; + cmd_ci_attr.intr_en = 1; + cmd_ci_attr.intr_id = nic_io->sq_group[qid].msix_id; + cmd_ci_attr.l2nic_sqn = qid; + cmd_ci_attr.ci_addr = SSSNIC_CI_PADDR(nic_io->ci_base_daddr, qid) >> 0x2; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, SSSNIC_MBX_OPCODE_SQ_CI_ATTR_SET, + &cmd_ci_attr, sizeof(cmd_ci_attr), &cmd_ci_attr, + &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_ci_attr)) { + nic_err(nic_io->dev_hdl, + "Fail to set ci attr table, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_ci_attr.head.state, out_len); + return -EFAULT; + } + + return 0; +} + +static int sss_nic_set_qp_intr_attr(struct sss_nic_io *nic_io) +{ + u16 qid; + int ret; + + for (qid = 0; qid < nic_io->active_qp_num; qid++) { + ret = sss_nic_set_hw_intr_attr(nic_io, qid); + if (ret != 0) { + nic_err(nic_io->dev_hdl, "Fail to set ci table, qid:%u\n", qid); + return ret; + } + } + + return 0; +} + +int sss_nic_init_qp_ctx(struct sss_nic_io *nic_io) +{ + u32 rq_depth; + int ret; + + ret = sss_nic_send_sq_ctx_to_hw(nic_io); + if (ret != 0) { + nic_err(nic_io->dev_hdl, "Fail to send sq ctx to hw\n"); + return ret; + } + + ret = sss_nic_send_rq_ctx_to_hw(nic_io); + if (ret != 0) { + nic_err(nic_io->dev_hdl, "Fail to send rq ctx to hw\n"); + return ret; + } + + ret = sss_nic_reset_hw_qp_offload_ctx(nic_io); + if (ret != 0) { + nic_err(nic_io->dev_hdl, "Fail to reset qp offload ctx\n"); + return ret; + } + + rq_depth = nic_io->rq_group[0].wq.q_depth << nic_io->rq_group[0].wqe_type; + ret = sss_chip_set_root_ctx(nic_io->hwdev, rq_depth, nic_io->sq_group[0].wq.q_depth, + nic_io->rx_buff_len, SSS_CHANNEL_NIC); + if (ret != 0) { + nic_err(nic_io->dev_hdl, "Fail to set root context\n"); + return ret; + } + + ret = sss_nic_set_qp_intr_attr(nic_io); + if (ret != 0) { + sss_chip_clean_root_ctx(nic_io->hwdev, SSS_CHANNEL_NIC); + nic_err(nic_io->dev_hdl, "Fail to set ci table\n"); + } + + return ret; +} + +void sss_nic_deinit_qp_ctx(void *hwdev) +{ + if (!hwdev) + return; + sss_chip_clean_root_ctx(hwdev, SSS_CHANNEL_NIC); +} +EXPORT_SYMBOL_GPL(sss_nic_deinit_qp_ctx); diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_io.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_io.h new file mode 100644 index 0000000000000000000000000000000000000000..78180c0260667b04b919f11d46ba7872c4f6005a --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_io.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_IO_H +#define SSS_NIC_IO_H + +#include "sss_hw.h" +#include "sss_hw_wq.h" +#include "sss_nic_io_define.h" + +#define SSSNIC_RQ_WQEBB_SHIFT 3 +#define SSSNIC_CQE_SIZE_SHIFT 4 +#define SSSNIC_SQ_WQEBB_SHIFT 4 +#define SSSNIC_MIN_QUEUE_DEPTH 128 +#define SSSNIC_MAX_RX_QUEUE_DEPTH 16384 +#define SSSNIC_MAX_TX_QUEUE_DEPTH 65536 +#define SSSNIC_SQ_WQEBB_SIZE BIT(SSSNIC_SQ_WQEBB_SHIFT) + +/* ******************** DOORBELL DEFINE INFO ******************** */ +#define DB_INFO_CFLAG_SHIFT 23 +#define DB_INFO_QID_SHIFT 0 +#define DB_INFO_TYPE_SHIFT 27 +#define DB_INFO_NON_FILTER_SHIFT 22 +#define DB_INFO_COS_SHIFT 24 + +#define DB_INFO_COS_MASK 0x7U +#define DB_INFO_QID_MASK 0x1FFFU +#define DB_INFO_CFLAG_MASK 0x1U +#define DB_INFO_TYPE_MASK 0x1FU +#define DB_INFO_NON_FILTER_MASK 0x1U +#define SSSNIC_DB_INFO_SET(val, member) \ + (((u32)(val) & DB_INFO_##member##_MASK) << \ + DB_INFO_##member##_SHIFT) + +#define DB_PI_HIGH_MASK 0xFFU +#define DB_PI_LOW_MASK 0xFFU +#define DB_PI_HI_SHIFT 8 +#define SRC_TYPE 1 +#define DB_PI_HIGH(pi) (((pi) >> DB_PI_HI_SHIFT) & DB_PI_HIGH_MASK) +#define DB_PI_LOW(pi) ((pi) & DB_PI_LOW_MASK) +#define DB_ADDR(queue, pi) ((u64 *)((queue)->db_addr) + DB_PI_LOW(pi)) + +#define sss_nic_get_sq_local_pi(sq) SSS_WQ_MASK_ID(&sq->wq, sq->wq.pi) +#define sss_nic_get_sq_local_ci(sq) SSS_WQ_MASK_ID(&sq->wq, sq->wq.ci) +#define sss_nic_get_sq_hw_ci(sq) \ + SSS_WQ_MASK_ID(&sq->wq, sss_hw_cpu16(*(u16 *)sq->tx.ci_addr)) + +#define sss_nic_get_rq_local_pi(rq) SSS_WQ_MASK_ID(&rq->wq, rq->wq.pi) +#define sss_nic_get_rq_local_ci(rq) SSS_WQ_MASK_ID(&rq->wq, rq->wq.ci) + +/* CFLAG_DATA_PATH */ +#define RQ_CFLAG_DP 1 +#define SQ_CFLAG_DP 0 + +enum sss_nic_queue_type { + SSSNIC_SQ, + SSSNIC_RQ, + SSSNIC_MAX_QUEUE_TYPE +}; + +struct sss_nic_db { + u32 db_info; + u32 pi_hi; +}; + +enum sss_nic_rq_wqe_type { + SSSNIC_COMPACT_RQ_WQE, + SSSNIC_NORMAL_RQ_WQE, + SSSNIC_EXTEND_RQ_WQE, +}; + +int sss_nic_io_resource_init(struct sss_nic_io *nic_io); +int sss_nic_init_qp_info(struct sss_nic_io *nic_io, struct sss_nic_qp_info *qp_info); +int sss_nic_alloc_qp(struct sss_nic_io *nic_io, + struct sss_irq_desc *qp_msix_arry, struct sss_nic_qp_info *qp_info); +void sss_nic_io_resource_deinit(struct sss_nic_io *nic_io); +void sss_nic_free_qp(struct sss_nic_io *nic_io, struct sss_nic_qp_info *qp_info); +void sss_nic_deinit_qp_info(struct sss_nic_io *nic_io, struct sss_nic_qp_info *qp_info); +int sss_nic_init_qp_ctx(struct sss_nic_io *nic_io); +void sss_nic_deinit_qp_ctx(void *hwdev); + +/* * + * @brief sss_nic_write_db - write doorbell + * @param queue: nic io queue + * @param cos: cos index + * @param cflag: 0--sq, 1--rq + * @param pi: product index + */ +static inline void sss_nic_write_db(struct sss_nic_io_queue *queue, + int cos, u8 cflag, u16 pi) +{ + struct sss_nic_db doorbell; + + doorbell.db_info = SSSNIC_DB_INFO_SET(SRC_TYPE, TYPE) | SSSNIC_DB_INFO_SET(cflag, CFLAG) | + SSSNIC_DB_INFO_SET(cos, COS) | SSSNIC_DB_INFO_SET(queue->qid, QID); + doorbell.pi_hi = DB_PI_HIGH(pi); + doorbell.db_info = sss_hw_be32(doorbell.db_info); + doorbell.pi_hi = sss_hw_be32(doorbell.pi_hi); + + /* make sure write correctly db to reg */ + wmb(); + + writeq(*((u64 *)&doorbell), DB_ADDR(queue, pi)); +} + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_irq.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_irq.c new file mode 100644 index 0000000000000000000000000000000000000000..d875b45732c4420a6033078927696ab036870cab --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_irq.c @@ -0,0 +1,317 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_io.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_tx.h" +#include "sss_nic_rx.h" + +#define SSSNIC_AVG_PKT_SMALL_SIZE 256U + +static int sss_nic_napi_poll(struct napi_struct *napi, int budget) +{ + int tx_pkt; + int rx_pkt; + + struct sss_nic_irq_cfg *nic_irq = container_of(napi, struct sss_nic_irq_cfg, napi); + struct sss_nic_dev *nic_dev = netdev_priv(nic_irq->netdev); + + rx_pkt = sss_nic_rx_poll(nic_irq->rq, budget); + tx_pkt = sss_nic_tx_poll(nic_irq->sq, budget); + + if (tx_pkt >= budget || rx_pkt >= budget) + return budget; + + napi_complete(napi); + + sss_chip_set_msix_state(nic_dev->hwdev, nic_irq->msix_id, + SSS_MSIX_ENABLE); + + return max(tx_pkt, rx_pkt); +} + +static void sss_nic_add_napi(struct sss_nic_irq_cfg *nic_irq, int budget) +{ + netif_napi_add(nic_irq->netdev, &nic_irq->napi, sss_nic_napi_poll, budget); + napi_enable(&nic_irq->napi); +} + +static void sss_nic_del_napi(struct sss_nic_irq_cfg *nic_irq) +{ + napi_disable(&nic_irq->napi); + netif_napi_del(&nic_irq->napi); +} + +static irqreturn_t sss_nic_qp_irq(int irq, void *data) +{ + struct sss_nic_irq_cfg *nic_irq = (struct sss_nic_irq_cfg *)data; + struct sss_nic_dev *nic_dev = netdev_priv(nic_irq->netdev); + + sss_chip_clear_msix_resend_bit(nic_dev->hwdev, nic_irq->msix_id, 1); + + napi_schedule(&nic_irq->napi); + + return IRQ_HANDLED; +} + +static int sss_nic_request_irq(struct sss_nic_dev *nic_dev, u16 qid) +{ + int ret; + struct sss_irq_cfg irq_cfg = {0}; + struct sss_nic_irq_cfg *nic_irq = &nic_dev->qp_res.irq_cfg[qid]; + + sss_nic_add_napi(nic_irq, nic_dev->poll_budget); + + irq_cfg.coalesc_intr_set = 1; + irq_cfg.msix_id = nic_irq->msix_id; + irq_cfg.pending = nic_dev->coal_info[qid].pending_limt; + irq_cfg.coalesc_timer = + nic_dev->coal_info[qid].coalesce_timer; + irq_cfg.resend_timer = nic_dev->coal_info[qid].resend_timer; + nic_dev->rq_desc_group[qid].last_coal_timer = + nic_dev->coal_info[qid].coalesce_timer; + nic_dev->rq_desc_group[qid].last_pending_limt = + nic_dev->coal_info[qid].pending_limt; + ret = sss_chip_set_msix_attr(nic_dev->hwdev, irq_cfg, SSS_CHANNEL_NIC); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to set rx msix attr.\n"); + goto out; + } + + ret = request_irq(nic_irq->irq_id, &sss_nic_qp_irq, 0, nic_irq->irq_name, nic_irq); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_irq->netdev, "Fail to request rx irq\n"); + goto out; + } + + irq_set_affinity_hint(nic_irq->irq_id, &nic_irq->affinity_mask); + + return 0; + +out: + sss_nic_del_napi(nic_irq); + return ret; +} + +static void sss_nic_release_irq(struct sss_nic_irq_cfg *nic_irq) +{ + irq_set_affinity_hint(nic_irq->irq_id, NULL); + synchronize_irq(nic_irq->irq_id); + free_irq(nic_irq->irq_id, nic_irq); + sss_nic_del_napi(nic_irq); +} + +static int sss_nic_set_hw_coal(struct sss_nic_dev *nic_dev, + u16 qid, u8 coal_timer_cfg, u8 pending_limt) +{ + int ret; + struct sss_irq_cfg cmd_irq_cfg = {0}; + + cmd_irq_cfg.coalesc_intr_set = 1; + cmd_irq_cfg.msix_id = nic_dev->qp_res.irq_cfg[qid].msix_id; + cmd_irq_cfg.pending = pending_limt; + cmd_irq_cfg.coalesc_timer = coal_timer_cfg; + cmd_irq_cfg.resend_timer = + nic_dev->coal_info[qid].resend_timer; + + ret = sss_chip_set_msix_attr(nic_dev->hwdev, cmd_irq_cfg, SSS_CHANNEL_NIC); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to modify moderation for Queue: %u\n", qid); + return ret; + } + + return 0; +} + +static void sss_nic_calculate_intr_coal(struct sss_nic_intr_coal_info *coal_info, + u64 rx_rate, u8 *coal_timer_cfg, u8 *pending_limt) +{ + if (rx_rate < coal_info->pkt_rate_low) { + *pending_limt = coal_info->rx_pending_limt_low; + *coal_timer_cfg = coal_info->rx_usecs_low; + } else if (rx_rate > coal_info->pkt_rate_high) { + *pending_limt = coal_info->rx_pending_limt_high; + *coal_timer_cfg = coal_info->rx_usecs_high; + } else { + u8 rx_pending_limt = coal_info->rx_pending_limt_high - + coal_info->rx_pending_limt_low; + u8 rx_usecs = coal_info->rx_usecs_high - coal_info->rx_usecs_low; + u64 rx_rate_diff = rx_rate - coal_info->pkt_rate_low; + u64 pkt_rate = coal_info->pkt_rate_high - coal_info->pkt_rate_low; + + *pending_limt = (u8)(rx_rate_diff * rx_pending_limt / pkt_rate + + coal_info->rx_pending_limt_low); + *coal_timer_cfg = (u8)(rx_rate_diff * rx_usecs / pkt_rate + + coal_info->rx_usecs_low); + } +} + +static void sss_nic_update_intr_coal(struct sss_nic_dev *nic_dev, + u16 qid, u64 rx_rate, u64 tx_rate, u64 avg_pkt_size) +{ + u8 pending_limt; + u8 coal_timer_cfg; + struct sss_nic_intr_coal_info *coal_info = NULL; + + coal_info = &nic_dev->coal_info[qid]; + + if (rx_rate > SSSNIC_RX_RATE_THRESH && avg_pkt_size > SSSNIC_AVG_PKT_SMALL_SIZE) { + sss_nic_calculate_intr_coal(coal_info, rx_rate, &coal_timer_cfg, &pending_limt); + } else { + pending_limt = coal_info->rx_pending_limt_low; + coal_timer_cfg = SSSNIC_LOWEST_LATENCY; + } + + if (coal_timer_cfg == nic_dev->rq_desc_group[qid].last_coal_timer && + pending_limt == nic_dev->rq_desc_group[qid].last_pending_limt) + return; + + if (!SSS_CHANNEL_RES_VALID(nic_dev) || qid >= nic_dev->qp_res.qp_num) + return; + + (void)sss_nic_set_hw_coal(nic_dev, qid, coal_timer_cfg, pending_limt); + + nic_dev->rq_desc_group[qid].last_pending_limt = pending_limt; + nic_dev->rq_desc_group[qid].last_coal_timer = coal_timer_cfg; +} + +static void sss_nic_adjust_coal_work(struct work_struct *work) +{ + u16 qid; + u64 avg_pkt_size; + u64 tx_pkts; + u64 tx_rate; + u64 rx_bytes; + u64 rx_pkts; + u64 rx_rate; + struct delayed_work *delay = to_delayed_work(work); + struct sss_nic_dev *nic_dev = + container_of(delay, struct sss_nic_dev, moderation_task); + unsigned long period; + + if (!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_INTF_UP)) + return; + + queue_delayed_work(nic_dev->workq, &nic_dev->moderation_task, + SSSNIC_MODERATONE_DELAY); + period = (unsigned long)(jiffies - nic_dev->last_jiffies); + + if ((nic_dev->use_adaptive_rx_coalesce == 0) || (period == 0)) + return; + + for (qid = 0; qid < nic_dev->qp_res.qp_num; qid++) { + rx_bytes = nic_dev->rq_desc_group[qid].stats.rx_bytes - + nic_dev->rq_desc_group[qid].last_rx_bytes; + rx_pkts = nic_dev->rq_desc_group[qid].stats.rx_packets - + nic_dev->rq_desc_group[qid].last_rx_pkts; + avg_pkt_size = (rx_pkts != 0) ? (rx_bytes / rx_pkts) : 0; + rx_rate = rx_pkts * HZ / period; + + tx_pkts = nic_dev->sq_desc_group[qid].stats.tx_packets - + nic_dev->sq_desc_group[qid].last_tx_pkts; + tx_rate = tx_pkts * HZ / period; + + nic_dev->rq_desc_group[qid].last_rx_bytes = + nic_dev->rq_desc_group[qid].stats.rx_bytes; + nic_dev->rq_desc_group[qid].last_rx_pkts = + nic_dev->rq_desc_group[qid].stats.rx_packets; + nic_dev->sq_desc_group[qid].last_tx_bytes = + nic_dev->sq_desc_group[qid].stats.tx_bytes; + nic_dev->sq_desc_group[qid].last_tx_pkts = + nic_dev->sq_desc_group[qid].stats.tx_packets; + + sss_nic_update_intr_coal(nic_dev, qid, rx_rate, tx_rate, avg_pkt_size); + } + + nic_dev->last_jiffies = jiffies; +} + +static void sss_nic_dev_irq_cfg_init(struct sss_nic_dev *nic_dev, u16 qid) +{ + struct sss_irq_desc *irq_desc = &nic_dev->irq_desc_group[qid]; + struct sss_nic_irq_cfg *nic_irq = &nic_dev->qp_res.irq_cfg[qid]; + + nic_irq->netdev = nic_dev->netdev; + nic_irq->msix_id = irq_desc->msix_id; + nic_irq->irq_id = irq_desc->irq_id; + nic_irq->sq = &nic_dev->sq_desc_group[qid]; + nic_irq->rq = &nic_dev->rq_desc_group[qid]; + nic_dev->rq_desc_group[qid].irq_cfg = nic_irq; +} + +static void __sss_nic_release_qp_irq(struct sss_nic_dev *nic_dev, + struct sss_nic_irq_cfg *nic_irq) +{ + sss_chip_set_msix_state(nic_dev->hwdev, nic_irq->msix_id, SSS_MSIX_DISABLE); + sss_chip_set_msix_auto_mask(nic_dev->hwdev, + nic_irq->msix_id, SSS_CLR_MSIX_AUTO_MASK); + sss_nic_release_irq(nic_irq); +} + +int sss_nic_request_qp_irq(struct sss_nic_dev *nic_dev) +{ + u16 i; + u16 qid; + u32 cpuid; + int ret; + struct sss_nic_irq_cfg *nic_irq = NULL; + + for (qid = 0; qid < nic_dev->qp_res.qp_num; qid++) { + nic_irq = &nic_dev->qp_res.irq_cfg[qid]; + sss_nic_dev_irq_cfg_init(nic_dev, qid); + + cpuid = cpumask_local_spread(qid, dev_to_node(nic_dev->dev_hdl)); + cpumask_set_cpu(cpuid, &nic_irq->affinity_mask); + + ret = snprintf(nic_irq->irq_name, sizeof(nic_irq->irq_name), + "%s_qp%u", nic_dev->netdev->name, qid); + if (ret < 0) { + ret = -EINVAL; + goto out; + } + + ret = sss_nic_request_irq(nic_dev, qid); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to request rx irq\n"); + goto out; + } + + sss_chip_set_msix_auto_mask(nic_dev->hwdev, nic_irq->msix_id, + SSS_SET_MSIX_AUTO_MASK); + sss_chip_set_msix_state(nic_dev->hwdev, nic_irq->msix_id, + SSS_MSIX_ENABLE); + } + + INIT_DELAYED_WORK(&nic_dev->moderation_task, sss_nic_adjust_coal_work); + + return 0; + +out: + for (i = 0; i < qid; i++) + __sss_nic_release_qp_irq(nic_dev, &nic_dev->qp_res.irq_cfg[i]); + + return ret; +} + +void sss_nic_release_qp_irq(struct sss_nic_dev *nic_dev) +{ + u16 qid; + + for (qid = 0; qid < nic_dev->qp_res.qp_num; qid++) + __sss_nic_release_qp_irq(nic_dev, &nic_dev->qp_res.irq_cfg[qid]); +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_irq.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_irq.h new file mode 100644 index 0000000000000000000000000000000000000000..9731e347129359954bd9b86c919aaee114f15845 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_irq.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_IRQ_H +#define SSS_NIC_IRQ_H + +#include + +#include "sss_kernel.h" +#include "sss_nic_dev_define.h" + +int sss_nic_request_qp_irq(struct sss_nic_dev *nic_dev); +void sss_nic_release_qp_irq(struct sss_nic_dev *nic_dev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_mag_cfg.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_mag_cfg.c new file mode 100644 index 0000000000000000000000000000000000000000..1efc7401c79c053b61d619adcdcf737f40905203 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_mag_cfg.c @@ -0,0 +1,762 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_io.h" +#include "sss_nic_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_io_define.h" +#include "sss_nic_event.h" + +struct sss_nic_event_link_info { + u8 valid; + u8 port_type; + u8 autoneg_cap; + u8 autoneg_state; + u8 duplex; + u8 speed; +}; + +#define SSSNIC_LOOP_MODE_MIN 1 +#define SSSNIC_LOOP_MODE_MAX 6 + +#define SSSNIC_LOOP_MODE_IS_INVALID(mode) \ + (unlikely(((mode) > SSSNIC_LOOP_MODE_MAX) || ((mode) < SSSNIC_LOOP_MODE_MIN))) + +#define SSSNIC_LINK_INFO_VALID 1 + +static int sss_nic_mag_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *in_buf, + u16 in_size, void *out_buf, u16 *out_size); +static int sss_nic_mag_msg_to_mgmt_sync_ch(void *hwdev, u16 cmd, void *in_buf, + u16 in_size, void *out_buf, u16 *out_size, u16 channel); + +int sss_nic_set_hw_port_state(struct sss_nic_dev *nic_dev, bool enable, u16 channel) +{ + struct sss_nic_mbx_set_port_mag_state port_state = {0}; + u16 out_len = sizeof(port_state); + int ret; + + if (!nic_dev) + return -EINVAL; + + if (sss_get_func_type(nic_dev->hwdev) == SSS_FUNC_TYPE_VF) + return 0; + + port_state.state = enable ? (SSSNIC_MAG_OPCODE_TX_ENABLE | SSSNIC_MAG_OPCODE_RX_ENABLE) : + SSSNIC_MAG_OPCODE_PORT_DISABLE; + port_state.function_id = sss_get_global_func_id(nic_dev->hwdev); + + ret = sss_nic_mag_msg_to_mgmt_sync_ch(nic_dev->hwdev, SSSNIC_MAG_OPCODE_SET_PORT_ENABLE, + &port_state, sizeof(port_state), + &port_state, &out_len, channel); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &port_state)) { + nic_err(nic_dev->dev_hdl, + "Fail to set port state, ret: %d, state: 0x%x, out_len: 0x%x, channel: 0x%x\n", + ret, port_state.head.state, out_len, channel); + return -EIO; + } + + return 0; +} + +int sss_nic_get_phy_port_stats(struct sss_nic_dev *nic_dev, struct sss_nic_mag_port_stats *stats) +{ + struct sss_nic_mbx_mag_port_stats_info stats_info = {0}; + struct sss_nic_mbx_mag_port_stats *port_stats = NULL; + u16 out_len = sizeof(*port_stats); + int ret; + + port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL); + if (!port_stats) + return -ENOMEM; + + stats_info.port_id = sss_get_phy_port_id(nic_dev->hwdev); + + ret = sss_nic_mag_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MAG_OPCODE_GET_PORT_STAT, + &stats_info, sizeof(stats_info), + port_stats, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, port_stats)) { + nic_err(nic_dev->dev_hdl, + "Fail to get port statistics, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, port_stats->head.state, out_len); + ret = -EIO; + goto out; + } + + memcpy(stats, &port_stats->counter, sizeof(*stats)); + +out: + kfree(port_stats); + + return ret; +} + +int sss_nic_set_autoneg(struct sss_nic_dev *nic_dev, bool enable) +{ + struct sss_nic_link_ksettings settings = {0}; + u32 valid_bitmap = 0; + + valid_bitmap |= SSSNIC_LINK_SET_AUTONEG; + settings.valid_bitmap = valid_bitmap; + settings.autoneg = enable ? SSSNIC_PORT_CFG_AN_ON : SSSNIC_PORT_CFG_AN_OFF; + + return sss_nic_set_link_settings(nic_dev, &settings); +} + +static int sss_nic_cfg_loopback_mode(struct sss_nic_dev *nic_dev, u8 opcode, + u8 *mode, u8 *enable) +{ + struct sss_nic_mbx_loopback_mode loopback_mode = {0}; + u16 out_len = sizeof(loopback_mode); + int ret; + + if (opcode == SSS_MGMT_MSG_SET_CMD) { + loopback_mode.mode = *mode; + loopback_mode.en = *enable; + } + loopback_mode.opcode = opcode; + loopback_mode.port_id = sss_get_phy_port_id(nic_dev->hwdev); + + ret = sss_nic_mag_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MAG_OPCODE_CFG_LOOPBACK_MODE, + &loopback_mode, sizeof(loopback_mode), + &loopback_mode, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &loopback_mode)) { + nic_err(nic_dev->dev_hdl, + "Fail to %s loopback mode, ret: %d, state: 0x%x, out_len: 0x%x\n", + opcode == SSS_MGMT_MSG_SET_CMD ? "set" : "get", + ret, loopback_mode.head.state, out_len); + return -EIO; + } + + if (opcode == SSS_MGMT_MSG_GET_CMD) { + *enable = loopback_mode.en; + *mode = loopback_mode.mode; + } + + return 0; +} + +int sss_nic_set_loopback_mode(struct sss_nic_dev *nic_dev, u8 lp_mode, u8 enable) +{ + if (SSSNIC_LOOP_MODE_IS_INVALID(lp_mode)) { + nic_err(nic_dev->dev_hdl, "Invalid loopback mode %u to set\n", + lp_mode); + return -EINVAL; + } + + return sss_nic_cfg_loopback_mode(nic_dev, SSS_MGMT_MSG_SET_CMD, &lp_mode, &enable); +} + +int sss_nic_get_loopback_mode(struct sss_nic_dev *nic_dev, u8 *mode, u8 *enable) +{ + if (!nic_dev || !mode || !enable) + return -EINVAL; + + return sss_nic_cfg_loopback_mode(nic_dev, SSS_MGMT_MSG_GET_CMD, mode, + enable); +} + +int sss_nic_set_hw_led_state(struct sss_nic_dev *nic_dev, enum sss_nic_mag_led_type led_type, + enum sss_nic_mag_led_mode led_mode) +{ + struct sss_nic_mbx_set_led_cfg led_info = {0}; + u16 out_len = sizeof(led_info); + int ret; + + led_info.mode = led_mode; + led_info.type = led_type; + led_info.function_id = sss_get_global_func_id(nic_dev->hwdev); + + ret = sss_nic_mag_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MAG_OPCODE_SET_LED_CFG, + &led_info, sizeof(led_info), &led_info, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &led_info)) { + nic_err(nic_dev->dev_hdl, + "Fail to set led state, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, led_info.head.state, out_len); + return -EIO; + } + + return 0; +} + +int sss_nic_get_hw_port_info(struct sss_nic_dev *nic_dev, + struct sss_nic_port_info *port_info, u16 channel) +{ + struct sss_nic_mbx_get_port_info mbx_port_info = {0}; + u16 out_len = sizeof(mbx_port_info); + int ret; + + if (!nic_dev || !port_info) + return -EINVAL; + + mbx_port_info.port_id = sss_get_phy_port_id(nic_dev->hwdev); + + ret = sss_nic_mag_msg_to_mgmt_sync_ch(nic_dev->hwdev, SSSNIC_MAG_OPCODE_GET_PORT_INFO, + &mbx_port_info, sizeof(mbx_port_info), + &mbx_port_info, &out_len, channel); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &mbx_port_info)) { + nic_err(nic_dev->dev_hdl, + "Fail to get port info, ret: %d, state: 0x%x, out_len: 0x%x, channel: 0x%x\n", + ret, mbx_port_info.head.state, out_len, channel); + return -EIO; + } + + port_info->advertised_mode = mbx_port_info.advertised_mode; + port_info->duplex = mbx_port_info.duplex; + port_info->autoneg_cap = mbx_port_info.an_support; + port_info->fec = mbx_port_info.fec; + port_info->autoneg_state = mbx_port_info.an_en; + port_info->port_type = mbx_port_info.wire_type; + port_info->supported_mode = mbx_port_info.supported_mode; + port_info->speed = mbx_port_info.speed; + + return 0; +} + +int sss_nic_set_link_settings(struct sss_nic_dev *nic_dev, + struct sss_nic_link_ksettings *settings) +{ + struct sss_nic_mbx_mag_set_port_cfg port_cfg = {0}; + u16 out_len = sizeof(port_cfg); + int ret; + + port_cfg.autoneg = settings->autoneg; + port_cfg.port_id = sss_get_phy_port_id(nic_dev->hwdev); + port_cfg.fec = settings->fec; + port_cfg.config_bitmap = settings->valid_bitmap; + port_cfg.speed = settings->speed; + + ret = sss_nic_mag_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MAG_OPCODE_SET_PORT_CFG, + &port_cfg, sizeof(port_cfg), &port_cfg, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &port_cfg)) { + nic_err(nic_dev->dev_hdl, + "Fail to set link settings, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, port_cfg.head.state, out_len); + return -EIO; + } + + return port_cfg.head.state; +} + +int sss_nic_get_hw_link_state(struct sss_nic_dev *nic_dev, u8 *out_state) +{ + struct sss_nic_mbx_get_link_state link_state = {0}; + u16 out_len = sizeof(link_state); + int ret; + + if (!nic_dev || !out_state) + return -EINVAL; + + link_state.port_id = sss_get_phy_port_id(nic_dev->hwdev); + + ret = sss_nic_mag_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MAG_OPCODE_LINK_STATUS, + &link_state, sizeof(link_state), &link_state, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &link_state)) { + nic_err(nic_dev->dev_hdl, + "Fail to get link state, ret: %d, state: 0x%x, out_len: 0x%x\n", + ret, link_state.head.state, out_len); + return -EIO; + } + + *out_state = link_state.status; + + return 0; +} + +void sss_nic_notify_vf_link_state(struct sss_nic_io *nic_io, u16 vf_id, u8 state) +{ + struct sss_nic_mbx_get_link_state link_state = {0}; + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + int ret; + + link_state.status = state; + link_state.port_id = sss_get_phy_port_id(nic_io->hwdev); + ret = sss_mbx_send_to_vf_no_ack(nic_io->hwdev, vf_id, SSS_MOD_TYPE_SSSLINK, + SSSNIC_MAG_OPCODE_LINK_STATUS, + &link_state, sizeof(link_state), SSS_CHANNEL_NIC); + if (ret == SSS_MBX_ERRCODE_UNKNOWN_DES_FUNC) { + sss_nic_dettach_vf(nic_io, vf_id); + nic_warn(nic_io->dev_hdl, "VF %d not initialize, need to disconnect it\n", id); + } else if (ret != 0) { + nic_err(nic_io->dev_hdl, + "Fail to send VF %d the link state change event, ret:%d\n", id, ret); + } +} + +void sss_nic_notify_all_vf_link_state(struct sss_nic_io *nic_io, u8 state) +{ + struct sss_nic_vf_info *vf_info = NULL; + u16 vf_id; + + nic_io->link_status = state; + for (vf_id = 1; vf_id <= nic_io->max_vf_num; vf_id++) { + vf_info = &nic_io->vf_info_group[SSSNIC_HW_VF_ID_TO_OS(vf_id)]; + if (vf_info->link_forced || !vf_info->attach) + continue; + sss_nic_notify_vf_link_state(nic_io, vf_id, state); + } +} + +static int sss_nic_get_vf_link_status_handler(struct sss_nic_io *nic_io, + u16 vf_id, void *buf_in, u16 in_len, + void *buf_out, u16 *out_len) +{ + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + struct sss_nic_mbx_get_link_state *link_state = buf_out; + struct sss_nic_vf_info *vf_info_group = nic_io->vf_info_group; + bool link_up = vf_info_group[id].link_up; + bool link_forced = vf_info_group[id].link_forced; + + if (link_forced) + link_state->status = link_up ? SSSNIC_LINK_UP : SSSNIC_LINK_DOWN; + else + link_state->status = nic_io->link_status; + + link_state->head.state = SSS_MGMT_CMD_SUCCESS; + *out_len = sizeof(*link_state); + + return 0; +} + +static void sss_nic_get_link_info(struct sss_nic_io *nic_io, + const struct sss_nic_mbx_get_link_state *link_state, + struct sss_nic_event_link_info *link_info) +{ + struct sss_nic_port_info port_info = {0}; + int ret; + + /* link event reported only after set vport enable */ + if (sss_get_func_type(nic_io->hwdev) == SSS_FUNC_TYPE_VF || + link_state->status == SSSNIC_LINK_DOWN) + return; + + ret = sss_nic_get_hw_port_info(nic_io->nic_dev, &port_info, SSS_CHANNEL_NIC); + if (ret != 0) { + nic_warn(nic_io->dev_hdl, "Fail to get port info\n"); + return; + } + + link_info->valid = SSSNIC_LINK_INFO_VALID; + link_info->duplex = port_info.duplex; + link_info->port_type = port_info.port_type; + link_info->speed = port_info.speed; + link_info->autoneg_state = port_info.autoneg_state; + link_info->autoneg_cap = port_info.autoneg_cap; +} + +static void sss_nic_link_status_event_handler(struct sss_nic_io *nic_io, + void *buf_in, u16 in_len, + void *buf_out, u16 *out_len) +{ + struct sss_nic_mbx_get_link_state *in_link_state = buf_in; + struct sss_nic_mbx_get_link_state *out_link_state = buf_out; + struct sss_event_info event_info = {0}; + struct sss_nic_event_link_info *link_info = (void *)event_info.event_data; + + nic_info(nic_io->dev_hdl, "Link status report received, func_id: %u, status: %u\n", + sss_get_global_func_id(nic_io->hwdev), in_link_state->status); + + sss_update_link_stats(nic_io->hwdev, in_link_state->status); + + sss_nic_get_link_info(nic_io, in_link_state, link_info); + + event_info.type = (in_link_state->status == SSSNIC_LINK_DOWN) ? + SSSNIC_EVENT_LINK_DOWN : SSSNIC_EVENT_LINK_UP; + event_info.service = SSS_EVENT_SRV_NIC; + sss_do_event_callback(nic_io->hwdev, &event_info); + + if (sss_get_func_type(nic_io->hwdev) == SSS_FUNC_TYPE_VF) + return; + + *out_len = sizeof(*out_link_state); + out_link_state->head.state = SSS_MGMT_CMD_SUCCESS; + sss_nic_notify_all_vf_link_state(nic_io, in_link_state->status); +} + +static void sss_nic_cable_plug_event_handler(struct sss_nic_io *nic_io, + void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + struct sss_nic_mag_wire_event *in_wire_event = in_buf; + struct sss_nic_mag_wire_event *out_wire_event = out_buf; + struct sss_nic_cache_port_sfp *routine_cmd = NULL; + struct sss_event_info event_info = {0}; + struct sss_nic_port_module_event *module_event = (void *)event_info.event_data; + + routine_cmd = &nic_io->mag_cfg.rt_cmd; + mutex_lock(&nic_io->mag_cfg.sfp_mutex); + routine_cmd->mpu_send_sfp_info = false; + routine_cmd->mpu_send_sfp_abs = false; + mutex_unlock(&nic_io->mag_cfg.sfp_mutex); + + *out_size = sizeof(*out_wire_event); + out_wire_event->head.state = SSS_MGMT_CMD_SUCCESS; + + event_info.service = SSS_EVENT_SRV_NIC; + event_info.type = SSSNIC_EVENT_PORT_MODULE_EVENT; + module_event->type = (in_wire_event->status != SSNSIC_PORT_PRESENT) ? + SSSNIC_PORT_MODULE_CABLE_PLUGGED : SSSNIC_PORT_MODULE_CABLE_UNPLUGGED; + + sss_do_event_callback(nic_io->hwdev, &event_info); +} + +static void sss_nic_port_sfp_event_handler(struct sss_nic_io *nic_io, + void *in_buf, u16 in_size, void *out_buf, u16 *out_size) +{ + struct sss_nic_mbx_get_xsfp_info *in_xsfp_info = in_buf; + struct sss_nic_cache_port_sfp *routine_cmd = NULL; + + if (in_size != sizeof(*in_xsfp_info)) { + nic_err(nic_io->dev_hdl, "Invalid in_size: %u, should be %ld\n", + in_size, sizeof(*in_xsfp_info)); + return; + } + + routine_cmd = &nic_io->mag_cfg.rt_cmd; + mutex_lock(&nic_io->mag_cfg.sfp_mutex); + routine_cmd->mpu_send_sfp_info = true; + memcpy(&routine_cmd->std_sfp_info, in_xsfp_info, sizeof(*in_xsfp_info)); + mutex_unlock(&nic_io->mag_cfg.sfp_mutex); +} + +static void sss_nic_port_sfp_absent_event_handler(struct sss_nic_io *nic_io, + void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + struct sss_nic_mbx_get_xsfp_present *in_xsfp_present = in_buf; + struct sss_nic_cache_port_sfp *routine_cmd = NULL; + + if (in_size != sizeof(*in_xsfp_present)) { + nic_err(nic_io->dev_hdl, "Invalid in_size: %u, should be %ld\n", + in_size, sizeof(*in_xsfp_present)); + return; + } + + routine_cmd = &nic_io->mag_cfg.rt_cmd; + mutex_lock(&nic_io->mag_cfg.sfp_mutex); + routine_cmd->mpu_send_sfp_abs = true; + memcpy(&routine_cmd->abs, in_xsfp_present, sizeof(*in_xsfp_present)); + mutex_unlock(&nic_io->mag_cfg.sfp_mutex); +} + +bool sss_nic_if_sfp_absent(struct sss_nic_dev *nic_dev) +{ + int ret; + bool sfp_abs_state; + struct sss_nic_cache_port_sfp *routine_cmd = NULL; + u8 port_id = sss_get_phy_port_id(nic_dev->hwdev); + struct sss_nic_mbx_get_xsfp_present xsfp_present = {0}; + u16 out_len = sizeof(xsfp_present); + + routine_cmd = &nic_dev->nic_io->mag_cfg.rt_cmd; + mutex_lock(&nic_dev->nic_io->mag_cfg.sfp_mutex); + if (routine_cmd->mpu_send_sfp_abs) { + if (routine_cmd->abs.head.state) { + mutex_unlock(&nic_dev->nic_io->mag_cfg.sfp_mutex); + return true; + } + + sfp_abs_state = (bool)routine_cmd->abs.abs_status; + mutex_unlock(&nic_dev->nic_io->mag_cfg.sfp_mutex); + return sfp_abs_state; + } + mutex_unlock(&nic_dev->nic_io->mag_cfg.sfp_mutex); + + xsfp_present.port_id = port_id; + ret = sss_nic_mag_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MAG_OPCODE_GET_XSFP_PRESENT, + &xsfp_present, sizeof(xsfp_present), &xsfp_present, + &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &xsfp_present)) { + nic_err(nic_dev->dev_hdl, + "Fail to get port%u sfp absent status, ret: %d, status: 0x%x, out_len: 0x%x\n", + port_id, ret, xsfp_present.head.state, out_len); + return true; + } + + return !!xsfp_present.abs_status; +} + +int sss_nic_get_sfp_info(struct sss_nic_dev *nic_dev, + struct sss_nic_mbx_get_xsfp_info *xsfp_info) +{ + int ret; + u16 out_len = sizeof(*xsfp_info); + struct sss_nic_cache_port_sfp *routine_cmd = NULL; + + if (!nic_dev || !xsfp_info) + return -EINVAL; + + routine_cmd = &nic_dev->nic_io->mag_cfg.rt_cmd; + mutex_lock(&nic_dev->nic_io->mag_cfg.sfp_mutex); + if (routine_cmd->mpu_send_sfp_info) { + if (routine_cmd->std_sfp_info.head.state) { + mutex_unlock(&nic_dev->nic_io->mag_cfg.sfp_mutex); + return -EIO; + } + + memcpy(xsfp_info, &routine_cmd->std_sfp_info, sizeof(*xsfp_info)); + mutex_unlock(&nic_dev->nic_io->mag_cfg.sfp_mutex); + return 0; + } + mutex_unlock(&nic_dev->nic_io->mag_cfg.sfp_mutex); + + xsfp_info->port_id = sss_get_phy_port_id(nic_dev->hwdev); + ret = sss_nic_mag_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MAG_OPCODE_GET_XSFP_INFO, + xsfp_info, sizeof(*xsfp_info), xsfp_info, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, xsfp_info)) { + nic_err(nic_dev->dev_hdl, + "Fail to get port%u sfp eeprom information, ret: %d, status: 0x%x, out_len: 0x%x\n", + sss_get_phy_port_id(nic_dev->hwdev), ret, + xsfp_info->head.state, out_len); + return -EIO; + } + + return 0; +} + +int sss_nic_get_sfp_eeprom(struct sss_nic_dev *nic_dev, u8 *data, u32 len) +{ + struct sss_nic_mbx_get_xsfp_info xsfp_info = {0}; + int ret; + + if (!nic_dev || !data) + return -EINVAL; + + if (sss_nic_if_sfp_absent(nic_dev)) + return -ENXIO; + + ret = sss_nic_get_sfp_info(nic_dev, &xsfp_info); + if (ret != 0) + return ret; + + memcpy(data, xsfp_info.sfp_info, len); + + return 0; +} + +int sss_nic_get_sfp_type(struct sss_nic_dev *nic_dev, u8 *sfp_type, u8 *sfp_type_ext) +{ + struct sss_nic_cache_port_sfp *routine_cmd = NULL; + u8 sfp_data[SSSNIC_STD_SFP_INFO_MAX_SIZE]; + int ret; + + if (!nic_dev || !sfp_type || !sfp_type_ext) + return -EINVAL; + + if (sss_nic_if_sfp_absent(nic_dev)) + return -ENXIO; + + routine_cmd = &nic_dev->nic_io->mag_cfg.rt_cmd; + + mutex_lock(&nic_dev->nic_io->mag_cfg.sfp_mutex); + if (routine_cmd->mpu_send_sfp_info) { + if (routine_cmd->std_sfp_info.head.state) { + mutex_unlock(&nic_dev->nic_io->mag_cfg.sfp_mutex); + return -EIO; + } + + *sfp_type_ext = routine_cmd->std_sfp_info.sfp_info[1]; + *sfp_type = routine_cmd->std_sfp_info.sfp_info[0]; + mutex_unlock(&nic_dev->nic_io->mag_cfg.sfp_mutex); + return 0; + } + mutex_unlock(&nic_dev->nic_io->mag_cfg.sfp_mutex); + + ret = sss_nic_get_sfp_eeprom(nic_dev, (u8 *)sfp_data, SSSNIC_STD_SFP_INFO_MAX_SIZE); + if (ret != 0) + return ret; + + *sfp_type = sfp_data[0]; + *sfp_type_ext = sfp_data[1]; + + return 0; +} + +int sss_nic_set_link_follow_state(struct sss_nic_dev *nic_dev, + enum sss_nic_link_follow_status state) +{ + int ret; + struct sss_nic_mbx_set_link_follow link_follow = {0}; + u16 out_len = sizeof(link_follow); + + link_follow.function_id = sss_get_global_func_id(nic_dev->hwdev); + link_follow.follow = state; + + ret = sss_nic_mag_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MAG_OPCODE_SET_LINK_FOLLOW, + &link_follow, sizeof(link_follow), + &link_follow, &out_len); + if ((link_follow.head.state != SSS_MGMT_CMD_UNSUPPORTED && link_follow.head.state != 0) || + ret != 0 || out_len == 0) { + nic_err(nic_dev->dev_hdl, + "Fail to set link status follow, ret: %d, state: 0x%x, out size: 0x%x\n", + ret, link_follow.head.state, out_len); + return -EFAULT; + } + + return link_follow.head.state; +} + +static const struct sss_nic_vf_msg_handler g_sss_nic_vf_mag_cmd_proc[] = { + { + .opcode = SSSNIC_MAG_OPCODE_LINK_STATUS, + .msg_handler = sss_nic_get_vf_link_status_handler, + }, +}; + +static const struct sss_nic_vf_msg_handler *sss_nic_get_vf_mag_cmd_proc(u16 opcode) +{ + u16 i; + u16 cmd_num = ARRAY_LEN(g_sss_nic_vf_mag_cmd_proc); + + for (i = 0; i < cmd_num; i++) + if (g_sss_nic_vf_mag_cmd_proc[i].opcode == opcode) + return &g_sss_nic_vf_mag_cmd_proc[i]; + + return NULL; +} + +/* pf/ppf handler mbx msg from vf */ +int sss_nic_pf_mag_mbx_handler(void *hwdev, + u16 vf_id, u16 cmd, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + const struct sss_nic_vf_msg_handler *handler = NULL; + struct sss_nic_io *nic_io; + + if (!hwdev) + return -EFAULT; + + nic_io = sss_get_service_adapter(hwdev, SSS_SERVICE_TYPE_NIC); + if (!nic_io) + return -EINVAL; + + handler = sss_nic_get_vf_mag_cmd_proc(cmd); + if (handler) + return handler->msg_handler(nic_io, vf_id, + in_buf, in_size, out_buf, out_size); + + nic_warn(nic_io->dev_hdl, "NO function found for mag cmd: %u received from vf id: %u\n", + cmd, vf_id); + + return -EINVAL; +} + +static struct nic_event_handler g_sss_nic_mag_cmd_proc[] = { + { + .opcode = SSSNIC_MAG_OPCODE_LINK_STATUS, + .event_handler = sss_nic_link_status_event_handler, + }, + + { + .opcode = SSSNIC_MAG_OPCODE_WIRE_EVENT, + .event_handler = sss_nic_cable_plug_event_handler, + }, + + { + .opcode = SSSNIC_MAG_OPCODE_GET_XSFP_INFO, + .event_handler = sss_nic_port_sfp_event_handler, + }, + + { + .opcode = SSSNIC_MAG_OPCODE_GET_XSFP_PRESENT, + .event_handler = sss_nic_port_sfp_absent_event_handler, + }, +}; + +static const struct nic_event_handler *sss_nic_get_mag_cmd_proc(u16 opcode) +{ + u16 i; + u16 cmd_num = ARRAY_LEN(g_sss_nic_mag_cmd_proc); + + for (i = 0; i < cmd_num; i++) + if (g_sss_nic_mag_cmd_proc[i].opcode == opcode) + return &g_sss_nic_mag_cmd_proc[i]; + + return NULL; +} + +static int _sss_nic_mag_event_handler(void *hwdev, u16 cmd, + void *in_buf, u16 in_size, void *out_buf, u16 *out_size) +{ + const struct nic_event_handler *handler = NULL; + struct sss_nic_io *nic_io = NULL; + struct sss_mgmt_msg_head *out_msg_head = NULL; + + if (!hwdev) + return -EINVAL; + + nic_io = sss_get_service_adapter(hwdev, SSS_SERVICE_TYPE_NIC); + if (!nic_io) + return -EINVAL; + + *out_size = 0; + + handler = sss_nic_get_mag_cmd_proc(cmd); + if (handler) { + handler->event_handler(nic_io, in_buf, in_size, out_buf, out_size); + return 0; + } + + out_msg_head = out_buf; + out_msg_head->state = SSS_MGMT_CMD_UNSUPPORTED; + *out_size = sizeof(*out_msg_head); + + nic_warn(nic_io->dev_hdl, "Invalid mag event cmd: %u\n", cmd); + + return 0; +} + +int sss_nic_vf_mag_event_handler(void *hwdev, u16 cmd, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + return _sss_nic_mag_event_handler(hwdev, cmd, in_buf, in_size, out_buf, out_size); +} + +/* pf/ppf handler mgmt cpu report ssslink event */ +void sss_nic_pf_mag_event_handler(void *hwdev, u16 cmd, void *in_buf, u16 in_size, + void *out_buf, u16 *out_size) +{ + _sss_nic_mag_event_handler(hwdev, cmd, in_buf, in_size, out_buf, out_size); +} + +static int _sss_nic_mag_msg_to_mgmt_sync(void *hwdev, u16 cmd, + void *in_buf, u16 in_size, + void *out_buf, u16 *out_size, u16 channel) +{ + if (sss_get_func_type(hwdev) == SSS_FUNC_TYPE_VF) + if (sss_nic_get_vf_mag_cmd_proc(cmd)) + return sss_mbx_send_to_pf(hwdev, SSS_MOD_TYPE_SSSLINK, cmd, + in_buf, in_size, out_buf, out_size, 0, channel); + + return sss_sync_mbx_send_msg(hwdev, SSS_MOD_TYPE_SSSLINK, + cmd, in_buf, in_size, out_buf, out_size, 0, channel); +} + +static int sss_nic_mag_msg_to_mgmt_sync(void *hwdev, u16 cmd, + void *in_buf, u16 in_size, void *out_buf, u16 *out_size) +{ + return _sss_nic_mag_msg_to_mgmt_sync(hwdev, cmd, in_buf, in_size, + out_buf, out_size, SSS_CHANNEL_NIC); +} + +static int sss_nic_mag_msg_to_mgmt_sync_ch(void *hwdev, u16 cmd, + void *in_buf, u16 in_size, + void *out_buf, u16 *out_size, u16 channel) +{ + return _sss_nic_mag_msg_to_mgmt_sync(hwdev, cmd, in_buf, in_size, + out_buf, out_size, channel); +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_mag_cfg.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_mag_cfg.h new file mode 100644 index 0000000000000000000000000000000000000000..ef112925cf50541dc6bb23030c69c41fa6d83b6c --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_mag_cfg.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_MAG_CFG_H +#define SSS_NIC_MAG_CFG_H + +#include + +#include "sss_nic_cfg_mag_define.h" +#include "sss_nic_io_define.h" +#include "sss_nic_dev_define.h" + +enum port_module_event_type { + SSSNIC_PORT_MODULE_CABLE_PLUGGED, + SSSNIC_PORT_MODULE_CABLE_UNPLUGGED, + SSSNIC_PORT_MODULE_LINK_ERR, + SSSNIC_PORT_MODULE_MAX_EVENT, +}; + +enum link_err_type { + LINK_ERR_MODULE_UNRECOGENIZED, + LINK_ERR_NUM, +}; + +struct sss_nic_port_module_event { + enum port_module_event_type type; + enum link_err_type err_type; +}; + +int sss_nic_set_hw_port_state(struct sss_nic_dev *nic_dev, bool enable, u16 channel); + +int sss_nic_get_hw_link_state(struct sss_nic_dev *nic_dev, u8 *link_state); + +void sss_nic_notify_all_vf_link_state(struct sss_nic_io *nic_io, u8 link_status); + +int sss_nic_get_hw_port_info(struct sss_nic_dev *nic_dev, struct sss_nic_port_info *port_info, + u16 channel); + +int sss_nic_get_phy_port_stats(struct sss_nic_dev *nic_dev, struct sss_nic_mag_port_stats *stats); + +int sss_nic_set_link_settings(struct sss_nic_dev *nic_dev, + struct sss_nic_link_ksettings *settings); + +int sss_nic_set_hw_led_state(struct sss_nic_dev *nic_dev, enum sss_nic_mag_led_type type, + enum sss_nic_mag_led_mode mode); + +int sss_nic_set_loopback_mode(struct sss_nic_dev *nic_dev, u8 mode, u8 enable); + +int sss_nic_set_autoneg(struct sss_nic_dev *nic_dev, bool enable); + +int sss_nic_get_sfp_type(struct sss_nic_dev *nic_dev, u8 *sfp_type, u8 *sfp_type_ext); +int sss_nic_get_sfp_eeprom(struct sss_nic_dev *nic_dev, u8 *data, u32 len); + +int sss_nic_set_link_follow_state(struct sss_nic_dev *nic_dev, + enum sss_nic_link_follow_status status); + +void sss_nic_notify_vf_link_state(struct sss_nic_io *nic_io, + u16 vf_id, u8 link_status); + +int sss_nic_vf_mag_event_handler(void *hwdev, u16 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size); + +void sss_nic_pf_mag_event_handler(void *pri_handle, u16 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size); + +int sss_nic_pf_mag_mbx_handler(void *hwdev, + u16 vf_id, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +int sss_nic_get_sfp_info(struct sss_nic_dev *nic_dev, + struct sss_nic_mbx_get_xsfp_info *xsfp_info); + +bool sss_nic_if_sfp_absent(struct sss_nic_dev *nic_dev); + +int sss_nic_get_loopback_mode(struct sss_nic_dev *nic_dev, u8 *mode, u8 *enable); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_main.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_main.c new file mode 100644 index 0000000000000000000000000000000000000000..22968d4c6e245efd675d73f5cb1749047c2c3df7 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_main.c @@ -0,0 +1,1063 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_rss_cfg.h" +#include "sss_nic_io.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_tx.h" +#include "sss_nic_tx_init.h" +#include "sss_nic_rx.h" +#include "sss_nic_rx_init.h" +#include "sss_nic_rx_reset.h" +#include "sss_nic_rss.h" +#include "sss_nic_dcb.h" +#include "sss_nic_ethtool.h" +#include "sss_nic_filter.h" +#include "sss_nic_netdev_ops.h" +#include "sss_nic_netdev_ops_api.h" +#include "sss_nic_ntuple.h" +#include "sss_nic_event.h" +#include "sss_tool_nic_func.h" + + +#define DEFAULT_POLL_BUDGET 64 +static u32 poll_budget = DEFAULT_POLL_BUDGET; +module_param(poll_budget, uint, 0444); +MODULE_PARM_DESC(poll_budget, "Number packets for NAPI budget (default=64)"); + +#define SSSNIC_DEAULT_TXRX_MSIX_PENDING_LIMIT 2 +#define SSSNIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG 25 +#define SSSNIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG 7 + +static u8 msix_pending_limit = SSSNIC_DEAULT_TXRX_MSIX_PENDING_LIMIT; +module_param(msix_pending_limit, byte, 0444); +MODULE_PARM_DESC(msix_pending_limit, "QP MSI-X Interrupt coalescing parameter pending_limit (default=2)"); + +static u8 msix_coalesc_timer = + SSSNIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG; +module_param(msix_coalesc_timer, byte, 0444); +MODULE_PARM_DESC(msix_coalesc_timer, "QP MSI-X Interrupt coalescing parameter coalesc_timer_cfg (default=25)"); + +#define DEFAULT_RX_BUFF_LEN 2 +u16 rx_buff_size = DEFAULT_RX_BUFF_LEN; +module_param(rx_buff_size, ushort, 0444); +MODULE_PARM_DESC(rx_buff_size, "Set rx_buff size, buffer len must be 2^n. 2 - 16, default is 2KB"); + +static u32 rx_poll_wqe = 256; +module_param(rx_poll_wqe, uint, 0444); +MODULE_PARM_DESC(rx_poll_wqe, "Number wqe for rx poll (default=256)"); + +static u8 link_follow_status = SSSNIC_LINK_FOLLOW_STATUS_MAX; +module_param(link_follow_status, byte, 0444); +MODULE_PARM_DESC(link_follow_status, "Set link follow status port status (0=default,1=follow,2=separate,3=unset"); + +#define SSSNIC_DEV_WQ_NAME "sssnic_dev_wq" + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_LINK) + +#define QID_MASKED(qid, nic_dev) ((qid) & ((nic_dev)->qp_num - 1)) +#define WATCHDOG_TIMEOUT 5 + +#define SSSNIC_SQ_DEPTH 1024 +#define SSSNIC_RQ_DEPTH 1024 + +enum sss_nic_rx_buff_len { + RX_BUFF_VALID_2KB = 2, + RX_BUFF_VALID_4KB = 4, + RX_BUFF_VALID_8KB = 8, + RX_BUFF_VALID_16KB = 16, +}; + +#define CONVERT_UNIT 1024 +#define RX_BUFF_TO_BYTES(size) ((u16)((size) * CONVERT_UNIT)) +#define RX_BUFF_NUM_PER_PAGE 2 +#define RX_BUFF_TO_DMA_SIZE(rx_buff_len) (RX_BUFF_NUM_PER_PAGE * (rx_buff_len)) +#define DMA_SIZE_TO_PAGE_NUM(buff_size) ((buff_size) / PAGE_SIZE) +#define PAGE_NUM_TO_ORDER(page_num) ((page_num) > 0 ? ilog2(page_num) : 0) +#define BUFF_SIZE_TO_PAGE_ORDER(buff_size) PAGE_NUM_TO_ORDER(DMA_SIZE_TO_PAGE_NUM(buff_size)) + +#define POLL_BUDGET_IS_VALID(budget) ((budget) <= SSSNIC_MAX_RX_QUEUE_DEPTH) + +#define SSSNIC_NETDEV_DEFAULT_FEATURE (NETIF_F_SG | NETIF_F_HIGHDMA) + +#define SSSNIC_LP_PKT_LEN 60 + +#ifdef HAVE_MULTI_VLAN_OFFLOAD_EN + +#define SSSNIC_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT 1 +#define SSSNIC_VLAN_CLEAR_OFFLOAD (~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \ + NETIF_F_SCTP_CRC | NETIF_F_RXCSUM | \ + NETIF_F_ALL_TSO)) + +#define SSSNIC_DRV_DESC "Intelligent Network Interface Card Driver" + +static int sss_nic_netdev_event_handler(struct notifier_block *notifier, + unsigned long event, void *ptr); +typedef void (*sss_nic_port_module_event_handler_t)(struct sss_nic_dev *nic_dev, void *event_data); + +static DEFINE_MUTEX(g_netdev_notifier_mutex); +static int g_netdev_notifier_ref_cnt; + +typedef void (*sss_nic_event_handler_t)(struct sss_nic_dev *nic_dev, struct sss_event_info *event); + +static struct notifier_block g_netdev_notifier = { + .notifier_call = sss_nic_netdev_event_handler, +}; + +static void sss_nic_register_notifier(struct sss_nic_dev *nic_dev) +{ + int ret; + + mutex_lock(&g_netdev_notifier_mutex); + g_netdev_notifier_ref_cnt++; + if (g_netdev_notifier_ref_cnt == 1) { + ret = register_netdevice_notifier(&g_netdev_notifier); + if (ret != 0) { + nic_info(nic_dev->dev_hdl, + "Fail to register netdevice notifier, ret: %d\n", ret); + g_netdev_notifier_ref_cnt--; + } + } + mutex_unlock(&g_netdev_notifier_mutex); +} + +static void sss_nic_unregister_notifier(struct sss_nic_dev *nic_dev) +{ + mutex_lock(&g_netdev_notifier_mutex); + if (g_netdev_notifier_ref_cnt == 1) + unregister_netdevice_notifier(&g_netdev_notifier); + + if (g_netdev_notifier_ref_cnt > 0) + g_netdev_notifier_ref_cnt--; + mutex_unlock(&g_netdev_notifier_mutex); +} + +static u16 sss_nic_get_vlan_depth(struct net_device *dev) +{ + u16 vlan_depth = 0; + struct net_device *vlan_dev = dev; + + do { + vlan_depth++; + vlan_dev = vlan_dev_priv(vlan_dev)->real_dev; + } while (is_vlan_dev(vlan_dev)); + + return vlan_depth; +} + +static void sss_nic_clear_netdev_vlan_offload(struct net_device *dev, u16 vlan_depth) +{ + if (vlan_depth == SSSNIC_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT) { + dev->vlan_features &= SSSNIC_VLAN_CLEAR_OFFLOAD; + } else if (vlan_depth > SSSNIC_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT) { +#ifdef HAVE_NDO_SET_FEATURES + dev->hw_features &= SSSNIC_VLAN_CLEAR_OFFLOAD; +#endif + dev->features &= SSSNIC_VLAN_CLEAR_OFFLOAD; + } +} + +static int sss_nic_netdev_event_handler(struct notifier_block *notifier, + unsigned long event, void *ptr) +{ + u16 vlan_depth; + struct net_device *real_dev = NULL; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + + if (!is_vlan_dev(dev)) + return NOTIFY_DONE; + + if (event != NETDEV_REGISTER) + return NOTIFY_DONE; + + dev_hold(dev); + + real_dev = vlan_dev_real_dev(dev); + if (!sss_nic_is_netdev_ops_match(real_dev)) + goto out; + + vlan_depth = sss_nic_get_vlan_depth(dev); + sss_nic_clear_netdev_vlan_offload(dev, vlan_depth); +out: + dev_put(dev); + + return NOTIFY_DONE; +} +#endif + +static netdev_features_t sss_nic_default_cso_feature(struct sss_nic_dev *nic_dev) +{ + netdev_features_t feature = 0; + + if (SSSNIC_SUPPORT_CSUM(nic_dev->nic_io)) + feature |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; + if (SSSNIC_SUPPORT_SCTP_CRC(nic_dev->nic_io)) + feature |= NETIF_F_SCTP_CRC; + + return feature; +} + +static netdev_features_t sss_nic_default_gso_feature(struct sss_nic_dev *nic_dev) +{ + netdev_features_t feature = 0; + + if (SSSNIC_SUPPORT_TSO(nic_dev->nic_io)) + feature |= NETIF_F_TSO | NETIF_F_TSO6; +#ifdef HAVE_ENCAPSULATION_TSO + if (SSSNIC_SUPPORT_VXLAN_OFFLOAD(nic_dev->nic_io)) + feature |= NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM; +#endif /* HAVE_ENCAPSULATION_TSO */ + + return feature; +} + +static netdev_features_t sss_nic_default_vlan_feature(struct sss_nic_dev *nic_dev) +{ + netdev_features_t feature = 0; + + if (SSSNIC_SUPPORT_RXVLAN_FILTER(nic_dev->nic_io)) { +#if defined(NETIF_F_HW_VLAN_CTAG_FILTER) + feature |= NETIF_F_HW_VLAN_CTAG_FILTER; +#elif defined(NETIF_F_HW_VLAN_FILTER) + feature |= NETIF_F_HW_VLAN_FILTER; +#endif + } + + if (SSSNIC_SUPPORT_VLAN_OFFLOAD(nic_dev->nic_io)) { +#if defined(NETIF_F_HW_VLAN_CTAG_TX) + feature |= NETIF_F_HW_VLAN_CTAG_TX; +#elif defined(NETIF_F_HW_VLAN_TX) + feature |= NETIF_F_HW_VLAN_TX; +#endif + +#if defined(NETIF_F_HW_VLAN_CTAG_RX) + feature |= NETIF_F_HW_VLAN_CTAG_RX; +#elif defined(NETIF_F_HW_VLAN_RX) + feature |= NETIF_F_HW_VLAN_RX; +#endif + } + + return feature; +} + +static netdev_features_t sss_nic_default_lro_feature(struct sss_nic_dev *nic_dev) +{ + netdev_features_t feature = 0; + + if (SSSNIC_SUPPORT_LRO(nic_dev->nic_io)) + feature = NETIF_F_LRO; + + return feature; +} + +static void sss_nic_init_netdev_hw_feature(struct sss_nic_dev *nic_dev, + netdev_features_t lro_feature) +{ + struct net_device *netdev = nic_dev->netdev; + netdev_features_t hw_features = 0; + + hw_features = netdev->hw_features; + + hw_features |= netdev->features | lro_feature; + + netdev->hw_features = hw_features; +} + +static void sss_nic_init_netdev_hw_enc_feature(struct sss_nic_dev *nic_dev, + netdev_features_t cso_feature, + netdev_features_t gso_feature) +{ + struct net_device *netdev = nic_dev->netdev; + +#ifdef HAVE_ENCAPSULATION_CSUM + netdev->hw_enc_features |= SSSNIC_NETDEV_DEFAULT_FEATURE; + if (SSSNIC_SUPPORT_VXLAN_OFFLOAD(nic_dev->nic_io)) { + netdev->hw_enc_features |= cso_feature; +#ifdef HAVE_ENCAPSULATION_TSO + netdev->hw_enc_features |= gso_feature | NETIF_F_TSO_ECN; +#endif /* HAVE_ENCAPSULATION_TSO */ + } +#endif /* HAVE_ENCAPSULATION_CSUM */ +} + +static void sss_nic_init_netdev_feature(struct sss_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + netdev_features_t cso_feature = 0; + netdev_features_t gso_feature = 0; + netdev_features_t vlan_feature = 0; + netdev_features_t lro_feature = 0; + + cso_feature = sss_nic_default_cso_feature(nic_dev); + gso_feature = sss_nic_default_gso_feature(nic_dev); + vlan_feature = sss_nic_default_vlan_feature(nic_dev); + lro_feature = sss_nic_default_lro_feature(nic_dev); + + netdev->features |= SSSNIC_NETDEV_DEFAULT_FEATURE | + cso_feature | gso_feature | vlan_feature; + netdev->vlan_features |= SSSNIC_NETDEV_DEFAULT_FEATURE | + cso_feature | gso_feature; + + sss_nic_init_netdev_hw_feature(nic_dev, lro_feature); + sss_nic_init_netdev_hw_enc_feature(nic_dev, cso_feature, gso_feature); + +#ifdef IFF_UNICAST_FLT + netdev->priv_flags |= IFF_UNICAST_FLT; +#endif +} + +static void sss_nic_init_intr_coal_param(struct sss_nic_intr_coal_info *intr_coal, u16 max_qp) +{ + u16 i; + + for (i = 0; i < max_qp; i++) { + intr_coal[i].pkt_rate_low = SSSNIC_RX_RATE_LOW; + intr_coal[i].pkt_rate_high = SSSNIC_RX_RATE_HIGH; + intr_coal[i].rx_usecs_low = SSSNIC_RX_COAL_TIME_LOW; + intr_coal[i].rx_usecs_high = SSSNIC_RX_COAL_TIME_HIGH; + intr_coal[i].rx_pending_limt_low = SSSNIC_RX_PENDING_LIMIT_LOW; + intr_coal[i].rx_pending_limt_high = SSSNIC_RX_PENDING_LIMIT_HIGH; + intr_coal[i].pending_limt = msix_pending_limit; + intr_coal[i].coalesce_timer = msix_coalesc_timer; + intr_coal[i].resend_timer = SSSNIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG; + } +} + +static int sss_nic_init_intr_coalesce(struct sss_nic_dev *nic_dev) +{ + u64 coalesce_size; + + coalesce_size = sizeof(*nic_dev->coal_info) * nic_dev->max_qp_num; + nic_dev->coal_info = kzalloc(coalesce_size, GFP_KERNEL); + if (!nic_dev->coal_info) + return -ENOMEM; + + sss_nic_init_intr_coal_param(nic_dev->coal_info, nic_dev->max_qp_num); + + if (test_bit(SSSNIC_INTR_ADAPT, &nic_dev->flags)) + nic_dev->use_adaptive_rx_coalesce = 1; + else + nic_dev->use_adaptive_rx_coalesce = 0; + + return 0; +} + +static void sss_nic_deinit_intr_coalesce(struct sss_nic_dev *nic_dev) +{ + kfree(nic_dev->coal_info); + nic_dev->coal_info = NULL; +} + +static int sss_nic_alloc_lb_test_buf(struct sss_nic_dev *nic_dev) +{ + u8 *loop_test_rx_buf = NULL; + + loop_test_rx_buf = vmalloc(SSSNIC_LP_PKT_CNT * SSSNIC_LP_PKT_LEN); + if (!loop_test_rx_buf) + return -ENOMEM; + + nic_dev->loop_test_rx_buf = loop_test_rx_buf; + nic_dev->loop_pkt_len = SSSNIC_LP_PKT_LEN; + + return 0; +} + +static void sss_nic_free_lb_test_buf(struct sss_nic_dev *nic_dev) +{ + vfree(nic_dev->loop_test_rx_buf); + nic_dev->loop_test_rx_buf = NULL; +} + +static void sss_nic_dev_deinit(struct sss_nic_dev *nic_dev) +{ + sss_nic_free_lb_test_buf(nic_dev); + + sss_nic_deinit_intr_coalesce(nic_dev); + + sss_nic_free_rq_desc_group(nic_dev); + + sss_nic_free_sq_desc_group(nic_dev); + + sss_nic_clean_mac_list_filter(nic_dev); + + sss_nic_del_mac(nic_dev, nic_dev->netdev->dev_addr, 0, + sss_get_global_func_id(nic_dev->hwdev), SSS_CHANNEL_NIC); + + sss_nic_free_rss_key(nic_dev); + if (test_bit(SSSNIC_DCB_ENABLE, &nic_dev->flags)) + sss_nic_set_hw_dcb_state(nic_dev, + SSSNIC_MBX_OPCODE_SET_DCB_STATE, SSSNIC_DCB_STATE_DISABLE); +} + +static int sss_nic_init_mac_addr(struct sss_nic_dev *nic_dev) +{ + int ret; + struct net_device *netdev = nic_dev->netdev; + + ret = sss_nic_get_default_mac(nic_dev, netdev->dev_addr); + if (ret != 0) { + nic_err(nic_dev->dev_hdl, "Fail to get MAC address\n"); + return ret; + } + + if (!is_valid_ether_addr(netdev->dev_addr)) { + nic_info(nic_dev->dev_hdl, + "Invalid default mac address %pM\n", netdev->dev_addr); + if (!SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) { + nic_err(nic_dev->dev_hdl, "Invalid default MAC address\n"); + return -EIO; + } + + eth_hw_addr_random(netdev); + nic_info(nic_dev->dev_hdl, + "Use random mac address %pM\n", netdev->dev_addr); + } + + ret = sss_nic_set_mac(nic_dev, netdev->dev_addr, 0, + sss_get_global_func_id(nic_dev->hwdev), SSS_CHANNEL_NIC); + if (ret != 0 && ret != SSSNIC_PF_SET_VF_ALREADY) { + /* If it is a VF device, it is possible that the MAC address has been set by PF, + * and this situation is legal. + */ + nic_err(nic_dev->dev_hdl, "Fail to set default MAC\n"); + return ret; + } + + return 0; +} + +static void sss_nic_set_mtu_range(struct net_device *netdev) +{ + /* MTU range: 384 - 9600 */ +#ifdef HAVE_NETDEVICE_MIN_MAX_MTU + netdev->min_mtu = SSSNIC_MIN_MTU_SIZE; + netdev->max_mtu = SSSNIC_MAX_JUMBO_FRAME_SIZE; +#endif + +#ifdef HAVE_NETDEVICE_EXTENDED_MIN_MAX_MTU + netdev->extended->min_mtu = SSSNIC_MIN_MTU_SIZE; + netdev->extended->max_mtu = SSSNIC_MAX_JUMBO_FRAME_SIZE; +#endif +} + +static int sss_nic_dev_init(struct sss_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + int ret = 0; + + /* get nic cap from hw */ + sss_get_nic_capability(nic_dev->hwdev, &nic_dev->nic_svc_cap); + + ret = sss_nic_dcb_init(nic_dev); + if (ret != 0) { + nic_err(nic_dev->dev_hdl, "Fail to init dcb\n"); + return -EFAULT; + } + + sss_nic_try_to_enable_rss(nic_dev); + + ret = sss_nic_init_mac_addr(nic_dev); + if (ret != 0) { + nic_err(nic_dev->dev_hdl, "Fail to init mac address\n"); + goto init_mac_addr_err; + } + + sss_nic_set_mtu_range(netdev); + + ret = sss_nic_alloc_sq_desc_group(nic_dev); + if (ret != 0) { + nic_err(nic_dev->dev_hdl, "Fail to init sq\n"); + goto init_sq_err; + } + + ret = sss_nic_alloc_rq_desc_group(nic_dev); + if (ret != 0) { + nic_err(nic_dev->dev_hdl, "Fail to init rq\n"); + goto init_rq_err; + } + + ret = sss_nic_init_intr_coalesce(nic_dev); + if (ret != 0) { + nic_err(nic_dev->dev_hdl, "Fail to init interrupt and coalesce\n"); + goto init_intr_coalesce_err; + } + + ret = sss_nic_alloc_lb_test_buf(nic_dev); + if (ret) { + nic_err(nic_dev->dev_hdl, "Fail to alloc loopback test buf\n"); + goto alloc_lb_test_buf_err; + } + + return 0; + +alloc_lb_test_buf_err: + sss_nic_deinit_intr_coalesce(nic_dev); + +init_intr_coalesce_err: + sss_nic_free_rq_desc_group(nic_dev); + +init_rq_err: + sss_nic_free_sq_desc_group(nic_dev); + +init_sq_err: + sss_nic_del_mac(nic_dev, netdev->dev_addr, 0, + sss_get_global_func_id(nic_dev->hwdev), SSS_CHANNEL_NIC); + +init_mac_addr_err: + sss_nic_free_rss_key(nic_dev); + + return ret; +} + +static void sss_nic_init_netdev_ops(struct sss_nic_dev *nic_dev) +{ + sss_nic_set_netdev_ops(nic_dev); + + sss_nic_set_ethtool_ops(nic_dev); + + nic_dev->netdev->watchdog_timeo = WATCHDOG_TIMEOUT * HZ; +} + +static void sss_nic_validate_parameters(struct pci_dev *pdev) +{ + u16 i; + u16 valid_rx_buff_len_list[] = { + RX_BUFF_VALID_2KB, RX_BUFF_VALID_4KB, + RX_BUFF_VALID_8KB, RX_BUFF_VALID_16KB + }; + + if (!POLL_BUDGET_IS_VALID(poll_budget)) + poll_budget = DEFAULT_POLL_BUDGET; + + for (i = 0; i < ARRAY_LEN(valid_rx_buff_len_list); i++) { + if (rx_buff_size == valid_rx_buff_len_list[i]) + return; + } + + rx_buff_size = DEFAULT_RX_BUFF_LEN; +} + +static void sss_nic_periodic_work_handler(struct work_struct *work) +{ + struct delayed_work *delay_work = to_delayed_work(work); + struct sss_nic_dev *nic_dev = container_of(delay_work, struct sss_nic_dev, routine_work); + + if (SSSNIC_TEST_CLEAR_NIC_EVENT_FLAG(nic_dev, SSSNIC_EVENT_TX_TIMEOUT)) + sss_fault_event_report(nic_dev->hwdev, SSS_FAULT_SRC_TX_TIMEOUT, + SSS_FAULT_LEVEL_SERIOUS_FLR); + + queue_delayed_work(nic_dev->workq, &nic_dev->routine_work, HZ); +} + +static void sss_nic_dev_resource_destroy(struct sss_nic_dev *nic_dev) +{ + destroy_workqueue(nic_dev->workq); + kfree(nic_dev->vlan_bitmap); +} + +static int sss_nic_dev_params_init(struct net_device *netdev, + struct sss_hal_dev *uld_dev) +{ + struct pci_dev *pdev = uld_dev->pdev; + struct sss_nic_dev *nic_dev; + + nic_dev = (struct sss_nic_dev *)netdev_priv(netdev); + nic_dev->hwdev = uld_dev->hwdev; + nic_dev->netdev = netdev; + nic_dev->pdev = pdev; + nic_dev->dev_hdl = &pdev->dev; + nic_dev->uld_dev = uld_dev; + nic_dev->rx_buff_len = RX_BUFF_TO_BYTES(rx_buff_size); + nic_dev->rx_dma_buff_size = RX_BUFF_TO_DMA_SIZE(nic_dev->rx_buff_len); + nic_dev->page_order = BUFF_SIZE_TO_PAGE_ORDER(nic_dev->rx_dma_buff_size); + nic_dev->poll_budget = (int)poll_budget; + nic_dev->rx_poll_wqe = rx_poll_wqe; + nic_dev->msg_enable = DEFAULT_MSG_ENABLE; + nic_dev->qp_res.sq_depth = SSSNIC_SQ_DEPTH; + nic_dev->qp_res.rq_depth = SSSNIC_RQ_DEPTH; + nic_dev->max_qp_num = sss_get_max_sq_num(nic_dev->hwdev); + SET_NETDEV_DEV(netdev, &pdev->dev); + + mutex_init(&nic_dev->qp_mutex); + sema_init(&nic_dev->port_sem, 1); + + nic_dev->vlan_bitmap = kzalloc(SSSNIC_VLAN_BITMAP_SIZE(nic_dev), GFP_KERNEL); + if (!nic_dev->vlan_bitmap) + return -ENOMEM; + + nic_dev->workq = create_singlethread_workqueue(SSSNIC_DEV_WQ_NAME); + if (!nic_dev->workq) { + nic_err(&pdev->dev, "Fail to initialize nic workqueue\n"); + kfree(nic_dev->vlan_bitmap); + return -ENOMEM; + } + + INIT_LIST_HEAD(&nic_dev->tcam_info.tcam_node_info.tcam_node_list); + INIT_LIST_HEAD(&nic_dev->tcam_info.tcam_list); + INIT_LIST_HEAD(&nic_dev->rx_rule.rule_list); + + INIT_LIST_HEAD(&nic_dev->mc_filter_list); + INIT_LIST_HEAD(&nic_dev->uc_filter_list); + + INIT_DELAYED_WORK(&nic_dev->routine_work, sss_nic_periodic_work_handler); + INIT_DELAYED_WORK(&nic_dev->rq_watchdog_work, sss_nic_rq_watchdog_handler); + INIT_WORK(&nic_dev->rx_mode_work, sss_nic_set_rx_mode_work); + + SSSNIC_SET_NIC_DEV_FLAG(nic_dev, SSSNIC_INTR_ADAPT); + + return 0; +} + +static void sss_nic_set_default_link_follow(struct sss_nic_dev *nic_dev) +{ + int ret; + + if (SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) + return; + + if (link_follow_status >= SSSNIC_LINK_FOLLOW_STATUS_MAX) + return; + + ret = sss_nic_set_link_follow_state(nic_dev, link_follow_status); + if (ret == SSS_MGMT_CMD_UNSUPPORTED) + nic_warn(nic_dev->dev_hdl, + "Firmware doesn't support to set link status follow port status\n"); +} + +static int sss_nic_set_default_feature_to_hw(struct sss_nic_dev *nic_dev) +{ + int ret; + + sss_nic_set_default_link_follow(nic_dev); + + ret = sss_nic_set_feature_to_hw(nic_dev->nic_io); + if (ret != 0) { + nic_err(nic_dev->dev_hdl, "Fail to set nic feature\n"); + return ret; + } + + /* enable all features in netdev->features */ + ret = sss_nic_enable_netdev_feature(nic_dev); + if (ret != 0) { + sss_nic_update_nic_feature(nic_dev, 0); + sss_nic_set_feature_to_hw(nic_dev->nic_io); + nic_err(nic_dev->dev_hdl, "Fail to set netdev feature\n"); + return ret; + } + + if (SSSNIC_SUPPORT_RXQ_RECOVERY(nic_dev->nic_io)) + SSSNIC_SET_NIC_DEV_FLAG(nic_dev, SSSNIC_RXQ_RECOVERY); + + return 0; +} + +static struct net_device *sss_nic_alloc_netdev(void *hwdev) +{ + u16 max_qps = sss_get_max_sq_num(hwdev); + + return alloc_etherdev_mq(sizeof(struct sss_nic_dev), max_qps); +} + +static void sss_nic_free_netdev(struct sss_nic_dev *nic_dev) +{ + kfree(nic_dev->vlan_bitmap); + free_netdev(nic_dev->netdev); +} + +static int sss_nic_reset_function(void *hwdev) +{ + u16 glb_func_id = sss_get_global_func_id(hwdev); + + return sss_chip_reset_function(hwdev, glb_func_id, SSS_NIC_RESET, SSS_CHANNEL_NIC); +} + +static int sss_nic_init_netdev(struct sss_nic_dev *nic_dev) +{ + int ret; + + sss_nic_init_netdev_ops(nic_dev); + + sss_nic_init_netdev_feature(nic_dev); + + ret = sss_nic_set_default_feature_to_hw(nic_dev); + if (ret != 0) + return ret; + + return 0; +} + +static void sss_nic_deinit_netdev(struct sss_nic_dev *nic_dev) +{ + sss_nic_update_nic_feature(nic_dev, 0); + sss_nic_set_feature_to_hw(nic_dev->nic_io); +} + +static int sss_nic_register_netdev(struct sss_nic_dev *nic_dev) +{ + int ret; + struct net_device *netdev = nic_dev->netdev; + +#ifdef HAVE_MULTI_VLAN_OFFLOAD_EN + sss_nic_register_notifier(nic_dev); +#endif + + ret = register_netdev(netdev); + if (ret != 0) { +#ifdef HAVE_MULTI_VLAN_OFFLOAD_EN + sss_nic_unregister_notifier(nic_dev); +#endif + nic_err(nic_dev->dev_hdl, "Fail to register netdev\n"); + return -ENOMEM; + } + + queue_delayed_work(nic_dev->workq, &nic_dev->routine_work, HZ); + + netif_carrier_off(netdev); + + return 0; +} + +static void sss_nic_unregister_netdev(struct sss_nic_dev *nic_dev) +{ + unregister_netdev(nic_dev->netdev); + +#ifdef HAVE_MULTI_VLAN_OFFLOAD_EN + sss_nic_unregister_notifier(nic_dev); +#endif + cancel_delayed_work_sync(&nic_dev->routine_work); + cancel_delayed_work_sync(&nic_dev->rq_watchdog_work); + cancel_work_sync(&nic_dev->rx_mode_work); + destroy_workqueue(nic_dev->workq); +} + +static int sss_nic_probe(struct sss_hal_dev *hal_dev, void **uld_dev, + char *uld_dev_name) +{ + struct pci_dev *pdev = hal_dev->pdev; + void *hwdev = hal_dev->hwdev; + struct sss_nic_dev *nic_dev = NULL; + struct net_device *netdev = NULL; + int ret; + + if (!sss_support_nic(hwdev)) { + nic_info(&pdev->dev, "Hw don't support nic\n"); + return 0; + } + + nic_info(&pdev->dev, "NIC probe begin\n"); + + sss_nic_validate_parameters(pdev); + + ret = sss_nic_reset_function(hwdev); + if (ret != 0) { + nic_err(&pdev->dev, "Fail to reset function\n"); + goto err_out; + } + + netdev = sss_nic_alloc_netdev(hwdev); + if (!netdev) { + nic_err(&pdev->dev, "Fail to allocate net device\n"); + ret = -ENOMEM; + goto err_out; + } + + ret = sss_nic_dev_params_init(netdev, hal_dev); + if (ret != 0) { + nic_err(&pdev->dev, "Fail to init nic_dev params\n"); + goto nic_dev_params_init_err; + } + + nic_dev = (struct sss_nic_dev *)netdev_priv(netdev); + + ret = sss_nic_io_init(nic_dev); + if (ret != 0) { + nic_err(&pdev->dev, "Fail to init nic io\n"); + goto nic_io_init_err; + } + + ret = sss_nic_dev_init(nic_dev); + if (ret != 0) { + nic_err(&pdev->dev, "Fail to init nic dev\n"); + goto nic_dev_init_err; + } + + ret = sss_nic_init_netdev(nic_dev); + if (ret != 0) { + nic_err(&pdev->dev, "Fail to init net device\n"); + goto init_netdev_err; + } + + ret = sss_nic_register_netdev(nic_dev); + if (ret != 0) { + nic_err(&pdev->dev, "Fail to register net device\n"); + goto register_netdev_err; + } + + *uld_dev = nic_dev; + nic_info(&pdev->dev, "Success to probe NIC\n"); + + return 0; + +register_netdev_err: + sss_nic_deinit_netdev(nic_dev); + +init_netdev_err: + sss_nic_dev_deinit(nic_dev); + +nic_dev_init_err: + sss_nic_io_deinit(nic_dev); + +nic_io_init_err: + sss_nic_dev_resource_destroy(nic_dev); + +nic_dev_params_init_err: + free_netdev(netdev); + +err_out: + nic_err(&pdev->dev, "Fail to run NIC probe\n"); + + return ret; +} + +static void sss_nic_remove(struct sss_hal_dev *hal_dev, void *adapter) +{ + struct sss_nic_dev *nic_dev = adapter; + + if (!nic_dev || !sss_support_nic(hal_dev->hwdev)) + return; + + nic_info(&hal_dev->pdev->dev, "NIC remove begin\n"); + + sss_nic_unregister_netdev(nic_dev); + + sss_nic_flush_tcam(nic_dev); + + sss_nic_deinit_netdev(nic_dev); + + sss_nic_dev_deinit(nic_dev); + + sss_nic_io_deinit(nic_dev); + + sss_nic_free_netdev(nic_dev); + + nic_info(&hal_dev->pdev->dev, "Success to remove NIC\n"); +} + +static void sss_nic_sriov_state_change(struct sss_nic_dev *nic_dev, + struct sss_event_info *event) +{ + struct sss_sriov_state_info *info = (void *)event->event_data; + + if (!info->enable) + sss_nic_clear_all_vf_info(nic_dev->nic_io); +} + +void sss_nic_port_module_cable_plug(struct sss_nic_dev *nic_dev, void *event_data) +{ + nicif_info(nic_dev, link, nic_dev->netdev, + "Port module event: Cable plugged\n"); +} + +void sss_nic_port_module_cable_unplug(struct sss_nic_dev *nic_dev, void *event_data) +{ + nicif_info(nic_dev, link, nic_dev->netdev, + "Port module event: Cable unplugged\n"); +} + +void sss_nic_port_module_link_err(struct sss_nic_dev *nic_dev, void *event_data) +{ + struct sss_nic_port_module_event *port_event = event_data; + enum link_err_type err_type = port_event->err_type; + + nicif_info(nic_dev, link, nic_dev->netdev, + "Fail to link, err_type: 0x%x\n", err_type); +} + +static void sss_nic_port_module_event_handler(struct sss_nic_dev *nic_dev, + struct sss_event_info *event) +{ + struct sss_nic_port_module_event *port_event = (void *)event->event_data; + enum port_module_event_type type = port_event->type; + + sss_nic_port_module_event_handler_t handler[SSSNIC_PORT_MODULE_MAX_EVENT] = { + sss_nic_port_module_cable_plug, + sss_nic_port_module_cable_unplug, + sss_nic_port_module_link_err, + }; + + if (type >= SSSNIC_PORT_MODULE_MAX_EVENT) { + nicif_err(nic_dev, link, nic_dev->netdev, + "Unknown port module type %d\n", type); + return; + } + + if (handler[type]) + handler[type](nic_dev, event->event_data); +} + +static void sss_nic_link_down(struct sss_nic_dev *nic_dev, struct sss_event_info *event) +{ + struct net_device *netdev = nic_dev->netdev; + + if (!SSS_CHANNEL_RES_VALID(nic_dev) || + test_bit(SSSNIC_LP_TEST, &nic_dev->flags) || + test_bit(SSSNIC_FORCE_LINK_UP, &nic_dev->flags)) + return; + + if (!netif_carrier_ok(netdev)) + return; + + netif_carrier_off(netdev); + nic_dev->link_status = false; + nicif_info(nic_dev, link, netdev, "Link is down\n"); +} + +static void sss_nic_link_up(struct sss_nic_dev *nic_dev, struct sss_event_info *event) +{ + struct net_device *netdev = nic_dev->netdev; + + if (!SSS_CHANNEL_RES_VALID(nic_dev) || + test_bit(SSSNIC_LP_TEST, &nic_dev->flags) || + test_bit(SSSNIC_FORCE_LINK_UP, &nic_dev->flags)) + return; + + if (netif_carrier_ok(netdev)) + return; + + netif_carrier_on(netdev); + nic_dev->link_status = true; + + nicif_info(nic_dev, link, netdev, "Link is up\n"); +} + +static void sss_nic_comm_fail_envet_handler(struct sss_nic_dev *nic_dev, + struct sss_event_info *event) +{ + struct sss_fault_event *fault = (void *)event->event_data; + + if (fault->fault_level == SSS_FAULT_LEVEL_SERIOUS_FLR && + fault->info.chip.func_id == sss_get_global_func_id(nic_dev->hwdev)) + sss_nic_link_down(nic_dev, event); +} + +static void sss_nic_event_handler(struct sss_nic_dev *nic_dev, struct sss_event_info *event) +{ + sss_nic_event_handler_t handler[SSSNIC_EVENT_MAX] = { + sss_nic_link_down, + sss_nic_link_up, + sss_nic_port_module_event_handler, + NULL, + }; + + if (event->type >= SSSNIC_EVENT_MAX) + return; + + if (handler[event->type]) + handler[event->type](nic_dev, event); +} + +static void sss_nic_comm_event_handler(struct sss_nic_dev *nic_dev, + struct sss_event_info *event) +{ + sss_nic_event_handler_t handler[SSS_EVENT_MAX] = { + sss_nic_link_down, + sss_nic_link_down, + sss_nic_comm_fail_envet_handler, + sss_nic_sriov_state_change, + NULL, + sss_nic_link_down, + }; + + if (event->type >= SSS_EVENT_MAX) + return; + + if (handler[event->type]) + handler[event->type](nic_dev, event); +} + +static void sss_nic_event(struct sss_hal_dev *uld_dev, void *adapter, + struct sss_event_info *event) +{ + struct sss_nic_dev *nic_dev = adapter; + + if (!nic_dev || !event || !sss_support_nic(uld_dev->hwdev)) + return; + + if (event->service == SSS_EVENT_SRV_NIC) { + sss_nic_event_handler(nic_dev, event); + return; + } + + if (event->service == SSS_EVENT_SRV_COMM) { + sss_nic_comm_event_handler(nic_dev, event); + return; + } +} + +struct sss_uld_info g_nic_uld_info = { + .probe = sss_nic_probe, + .remove = sss_nic_remove, + .suspend = NULL, + .resume = NULL, + .event = sss_nic_event, + .ioctl = sss_tool_ioctl, +}; + +struct sss_uld_info *get_nic_uld_info(void) +{ + return &g_nic_uld_info; +} + +static __init int sss_nic_init(void) +{ + int ret; + + pr_info("%s - version %s\n", SSSNIC_DRV_DESC, + SSSNIC_DRV_VERSION); + + ret = sss_register_uld(SSS_SERVICE_TYPE_NIC, &g_nic_uld_info); + if (ret != 0) { + pr_err("Fail to register sss_nic uld\n"); + return ret; + } + + return 0; +} + +static __exit void sss_nic_exit(void) +{ + sss_unregister_uld(SSS_SERVICE_TYPE_NIC); +} + +#ifndef _LLT_TEST_ +module_init(sss_nic_init); +module_exit(sss_nic_exit); +#endif + +MODULE_AUTHOR("steven.song@3snic.com"); +MODULE_DESCRIPTION("3SNIC Network Interface Card Driver"); +MODULE_VERSION(SSSNIC_DRV_VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops.c new file mode 100644 index 0000000000000000000000000000000000000000..9f623f4b1ab3777b96281547c5add436a4c4bcde --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops.c @@ -0,0 +1,799 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#ifdef HAVE_XDP_SUPPORT +#include +#endif +#include "sss_hw.h" +#include "sss_nic_io.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_tx.h" +#include "sss_nic_rx.h" +#include "sss_nic_dcb.h" +#include "sss_nic_netdev_ops_api.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" + +#define SSSNIC_MAX_VLAN_ID 4094 +#define SSSNIC_MAX_QOS_NUM 7 + +#define SSSNIC_TX_RATE_TABLE_FULL 12 + +static int sss_nic_ndo_open(struct net_device *netdev) +{ + int ret; + struct sss_nic_qp_info qp_info = {0}; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_INTF_UP)) { + nicif_info(nic_dev, drv, netdev, "Netdev already open\n"); + return 0; + } + + ret = sss_nic_io_resource_init(nic_dev->nic_io); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to init nic io resource\n"); + return ret; + } + + ret = sss_nic_dev_resource_init(nic_dev); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to init qp resource\n"); + goto init_dev_res_err; + } + + ret = sss_nic_qp_resource_init(nic_dev, &qp_info, &nic_dev->qp_res); + if (ret != 0) + goto alloc_qp_res_err; + + ret = sss_nic_open_dev(nic_dev, &qp_info, &nic_dev->qp_res); + if (ret != 0) + goto open_chan_err; + + ret = sss_nic_vport_up(nic_dev); + if (ret != 0) + goto vport_err; + + SSSNIC_SET_NIC_DEV_FLAG(nic_dev, SSSNIC_INTF_UP); + nicif_info(nic_dev, drv, nic_dev->netdev, "Netdev is up\n"); + + return 0; + +vport_err: + sss_nic_close_dev(nic_dev, &qp_info); + +open_chan_err: + sss_nic_qp_resource_deinit(nic_dev, &qp_info, &nic_dev->qp_res); + +alloc_qp_res_err: + sss_nic_dev_resource_deinit(nic_dev); + +init_dev_res_err: + sss_nic_io_resource_deinit(nic_dev->nic_io); + + return ret; +} + +static int sss_nic_ndo_stop(struct net_device *netdev) +{ + struct sss_nic_qp_info qp_info = {0}; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (!SSSNIC_TEST_CLEAR_NIC_DEV_FLAG(nic_dev, SSSNIC_INTF_UP)) { + nicif_info(nic_dev, drv, netdev, "Netdev already close\n"); + return 0; + } + + if (SSSNIC_TEST_CLEAR_NIC_DEV_FLAG(nic_dev, SSSNIC_CHANGE_RES_INVALID)) + goto out; + + sss_nic_vport_down(nic_dev); + sss_nic_close_dev(nic_dev, &qp_info); + sss_nic_qp_resource_deinit(nic_dev, &qp_info, &nic_dev->qp_res); + +out: + sss_nic_io_resource_deinit(nic_dev->nic_io); + sss_nic_dev_resource_deinit(nic_dev); + + nicif_info(nic_dev, drv, nic_dev->netdev, "Netdev is down\n"); + + return 0; +} + +#if defined(HAVE_NDO_SELECT_QUEUE_SB_DEV_ONLY) +static u16 sss_nic_ndo_select_queue(struct net_device *netdev, struct sk_buff *skb, + struct net_device *sb_dev) +#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK) +#if defined(HAVE_NDO_SELECT_QUEUE_SB_DEV) +static u16 sss_nic_ndo_select_queue(struct net_device *netdev, struct sk_buff *skb, + struct net_device *sb_dev, + select_queue_fallback_t fallback) +#else +static u16 sss_nic_ndo_select_queue(struct net_device *netdev, struct sk_buff *skb, + __always_unused void *accel, + select_queue_fallback_t fallback) +#endif +#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL) +static u16 sss_nic_ndo_select_queue(struct net_device *netdev, struct sk_buff *skb, + __always_unused void *accel) +#else +static u16 sss_nic_ndo_select_queue(struct net_device *netdev, struct sk_buff *skb) +#endif /* end of HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK */ +{ + u8 cos; + u8 qp_num; + u16 sq_num; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_SAME_RXTX)) + return sss_nic_select_queue_by_hash_func(netdev, skb, netdev->real_num_tx_queues); + + sq_num = +#if defined(HAVE_NDO_SELECT_QUEUE_SB_DEV_ONLY) + netdev_pick_tx(netdev, skb, NULL); +#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK) +#ifdef HAVE_NDO_SELECT_QUEUE_SB_DEV + fallback(netdev, skb, sb_dev); +#else + fallback(netdev, skb); +#endif +#else + skb_tx_hash(netdev, skb); +#endif + + if (likely(!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE))) + return sq_num; + + cos = sss_nic_get_cos(nic_dev, skb); + + qp_num = (nic_dev->hw_dcb_cfg.cos_qp_num[cos] != 0) ? + sq_num % nic_dev->hw_dcb_cfg.cos_qp_num[cos] : 0; + sq_num = nic_dev->hw_dcb_cfg.cos_qp_offset[cos] + qp_num; + + return sq_num; +} + +#ifdef HAVE_NDO_GET_STATS64 +#ifdef HAVE_VOID_NDO_GET_STATS64 +static void sss_nic_ndo_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#else +static struct rtnl_link_stats64 *sss_nic_ndo_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#endif + +#else /* !HAVE_NDO_GET_STATS64 */ +static struct net_device_stats *sss_nic_ndo_get_stats(struct net_device *netdev) +#endif +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); +#ifndef HAVE_NDO_GET_STATS64 +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats *stats = &netdev->stats; +#else + struct net_device_stats *stats = &nic_dev->net_stats; +#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ +#endif /* HAVE_NDO_GET_STATS64 */ + + sss_nic_get_tx_stats(nic_dev, stats); + sss_nic_get_rx_stats(nic_dev, stats); + +#ifndef HAVE_VOID_NDO_GET_STATS64 + return stats; +#endif +} + +#ifdef HAVE_TX_TIMEOUT_TXQUEUE +static void sss_nic_ndo_tx_timeout(struct net_device *netdev, + unsigned int __maybe_unused queue) +#else +static void sss_nic_ndo_tx_timeout(struct net_device *netdev) +#endif +{ + struct sss_nic_io_queue *sq = NULL; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + u32 sw_pi; + u32 hw_ci; + u8 qid; + + SSSNIC_STATS_TX_TIMEOUT_INC(nic_dev); + nicif_err(nic_dev, drv, netdev, "Tx timeout\n"); + + for (qid = 0; qid < nic_dev->qp_res.qp_num; qid++) { + if (!netif_xmit_stopped(netdev_get_tx_queue(netdev, qid))) + continue; + + sq = nic_dev->sq_desc_group[qid].sq; + sw_pi = sss_nic_get_sq_local_pi(sq); + hw_ci = sss_nic_get_sq_hw_ci(sq); + nicif_info(nic_dev, drv, netdev, + "Sq%u: sw_pi: %hu, hw_ci: %u, sw_ci: %u, napi state: 0x%lx.\n", + qid, sw_pi, hw_ci, sss_nic_get_sq_local_ci(sq), + nic_dev->qp_res.irq_cfg[qid].napi.state); + + if (sw_pi != hw_ci) { + SSSNIC_SET_NIC_EVENT_FLAG(nic_dev, SSSNIC_EVENT_TX_TIMEOUT); + return; + } + } +} + +static int sss_nic_ndo_change_mtu(struct net_device *netdev, int new_mtu) +{ + int ret = 0; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + +#ifdef HAVE_XDP_SUPPORT + u32 xdp_max_mtu; + + if (SSSNIC_IS_XDP_ENABLE(nic_dev)) { + xdp_max_mtu = SSSNIC_XDP_MAX_MTU(nic_dev); + if (new_mtu > xdp_max_mtu) { + nicif_err(nic_dev, drv, netdev, + "Fail to change mtu to %d, max mtu is %d\n", + new_mtu, xdp_max_mtu); + return -EINVAL; + } + } +#endif + + ret = sss_nic_set_dev_mtu(nic_dev, (u16)new_mtu); + if (ret) { + nicif_err(nic_dev, drv, netdev, "Fail to change mtu to %d\n", + new_mtu); + return ret; + } + + nicif_info(nic_dev, drv, nic_dev->netdev, "Success to change mtu from %u to %d\n", + netdev->mtu, new_mtu); + + netdev->mtu = new_mtu; + + return 0; +} + +static int sss_nic_ndo_set_mac_address(struct net_device *netdev, void *mac_addr) +{ + int ret = 0; + struct sockaddr *set_addr = mac_addr; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (!is_valid_ether_addr(set_addr->sa_data)) + return -EADDRNOTAVAIL; + + if (ether_addr_equal(netdev->dev_addr, set_addr->sa_data)) { + nicif_info(nic_dev, drv, netdev, + "Already using mac addr: %pM\n", set_addr->sa_data); + return 0; + } + + ret = sss_nic_update_mac(nic_dev, set_addr->sa_data); + if (ret) + return ret; + + ether_addr_copy(netdev->dev_addr, set_addr->sa_data); + + nicif_info(nic_dev, drv, netdev, + "Success to set new mac addr: %pM\n", set_addr->sa_data); + + return 0; +} + +int sss_nic_ndo_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vlan_id) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + int ret; + + if (vlan_id == 0) + return 0; + + ret = sss_nic_config_vlan(nic_dev, SSSNIC_MBX_OPCODE_ADD, vlan_id); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to add vlan %u\n", vlan_id); + return ret; + } + + SSSNIC_SET_VLAN_BITMAP(nic_dev, vlan_id); + nicif_info(nic_dev, drv, netdev, "Success to add vlan %u\n", vlan_id); + + return 0; +} + +int sss_nic_ndo_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vlan_id) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + int ret; + + if (vlan_id == 0) + return 0; + + ret = sss_nic_config_vlan(nic_dev, SSSNIC_MBX_OPCODE_DEL, vlan_id); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to delete vlan\n"); + return ret; + } + + SSSNIC_CLEAR_VLAN_BITMAP(nic_dev, vlan_id); + nicif_info(nic_dev, drv, netdev, "Success to delete vlan %u\n", vlan_id); + + return 0; +} + +static netdev_features_t sss_nic_ndo_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t netdev_feature = features; + + /* If Rx checksum is disabled, then LRO should also be disabled */ + if ((netdev_feature & NETIF_F_RXCSUM) == 0) + netdev_feature &= ~NETIF_F_LRO; + + return netdev_feature; +} + +static int sss_nic_ndo_set_features(struct net_device *netdev, netdev_features_t features) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + return sss_nic_set_feature(nic_dev, nic_dev->netdev->features, features); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void sss_nic_ndo_poll_controller(struct net_device *netdev) +{ + u16 i; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + for (i = 0; i < nic_dev->qp_res.qp_num; i++) + napi_schedule(&nic_dev->qp_res.irq_cfg[i].napi); +} +#endif + +static void sss_nic_ndo_set_rx_mode(struct net_device *netdev) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (netdev_uc_count(netdev) != nic_dev->netdev_uc_cnt || + netdev_mc_count(netdev) != nic_dev->netdev_mc_cnt) { + nic_dev->netdev_uc_cnt = netdev_uc_count(netdev); + nic_dev->netdev_mc_cnt = netdev_mc_count(netdev); + SSSNIC_SET_NIC_DEV_FLAG(nic_dev, SSSNIC_UPDATE_MAC_FILTER); + } + + queue_work(nic_dev->workq, &nic_dev->rx_mode_work); +} + +static int sss_nic_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct sss_nic_io *nic_io = nic_dev->nic_io; + struct sss_nic_vf_info *vf_info = NULL; + + if (vf_id >= pci_num_vf(nic_dev->pdev) || + is_multicast_ether_addr(mac)) + return -EINVAL; + + vf_info = &nic_io->vf_info_group[vf_id]; + ether_addr_copy(vf_info->user_mac, mac); + + if (is_zero_ether_addr(mac)) + nic_info(nic_dev->dev_hdl, + "Success to delete mac on vf %d\n", vf_id); + else + nic_info(nic_dev->dev_hdl, + "Success to set mac %pM on vf %d\n", mac, vf_id); + + return 0; +} + +#ifdef IFLA_VF_MAX +#ifdef IFLA_VF_VLAN_INFO_MAX +static int sss_nic_ndo_set_vf_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, + u8 qos, __be16 vlan_proto) +#else +static int sss_nic_ndo_set_vf_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, + u8 qos) +#endif +{ + u16 pre_vlanprio; + u16 cur_vlanprio; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (qos > SSSNIC_MAX_QOS_NUM || vlan_id > SSSNIC_MAX_VLAN_ID || + vf_id >= pci_num_vf(nic_dev->pdev)) + return -EINVAL; +#ifdef IFLA_VF_VLAN_INFO_MAX + if (vlan_proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; +#endif + pre_vlanprio = SSSNIC_GET_VLAN_PRIO(vlan_id, qos); + cur_vlanprio = + sss_nic_vf_info_vlan_prio(nic_dev->nic_io, SSSNIC_OS_VF_ID_TO_HW(vf_id)); + if (pre_vlanprio == cur_vlanprio) + return 0; + + return sss_nic_set_hw_vf_vlan(nic_dev, cur_vlanprio, vf_id, vlan_id, qos); +} +#endif + +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE +static int sss_nic_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, + bool set_spoofchk) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + bool cur_spoofchk; + u16 id = SSSNIC_OS_VF_ID_TO_HW(vf_id); + int ret; + + if (vf_id >= pci_num_vf(nic_dev->pdev)) + return -EINVAL; + + cur_spoofchk = SSSNIC_GET_VF_SPOOFCHK(nic_dev->nic_io, vf_id); + if (set_spoofchk == cur_spoofchk) + return 0; + + ret = sss_nic_set_vf_spoofchk(nic_dev->nic_io, id, set_spoofchk); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, + "Fail to %s spoofchk control for VF %d\n", + set_spoofchk ? "enable" : "disable", vf_id); + return ret; + } + + nicif_info(nic_dev, drv, netdev, + "Success to %s spoofchk control for VF %d\n", + set_spoofchk ? "enable" : "disable", vf_id); + return 0; +} +#endif + +#ifdef HAVE_NDO_SET_VF_TRUST +static int sss_nic_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool new_trust) +{ + bool old_trust; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if ((vf_id >= pci_num_vf(nic_dev->pdev)) || (vf_id > nic_dev->nic_io->max_vf_num)) { + nicif_err(nic_dev, drv, netdev, "Invalid vf id, VF: %d pci_num_vf: %d max_vfs: %d\n", + vf_id, pci_num_vf(nic_dev->pdev), nic_dev->nic_io->max_vf_num); + return -EINVAL; + } + + old_trust = !!nic_dev->nic_io->vf_info_group[vf_id].trust; + /* Same old and new, no need to set, return success directly */ + if (new_trust == old_trust) + return 0; + + nic_dev->nic_io->vf_info_group[vf_id].trust = !!new_trust; + + nicif_info(nic_dev, drv, netdev, "Success to set VF %d trust %d to %d\n", + vf_id, old_trust, new_trust); + + return 0; +} +#endif + +static int sss_nic_ndo_get_vf_config(struct net_device *netdev, + int vf_id, struct ifla_vf_info *ifla_vf) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (vf_id >= pci_num_vf(nic_dev->pdev)) + return -EINVAL; + + sss_nic_get_vf_attribute(nic_dev->nic_io, (u16)SSSNIC_OS_VF_ID_TO_HW(vf_id), ifla_vf); + + return 0; +} + +int sss_nic_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) +{ + int ret; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (vf_id >= pci_num_vf(nic_dev->pdev)) { + nicif_err(nic_dev, drv, netdev, + "Invalid VF Id %d, pci_num_vf %d\n", vf_id, pci_num_vf(nic_dev->pdev)); + return -EINVAL; + } + + ret = sss_nic_set_vf_link_state(nic_dev->nic_io, (u16)SSSNIC_OS_VF_ID_TO_HW(vf_id), link); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to set VF %d link state %d\n", vf_id, link); + return ret; + } + + nicif_info(nic_dev, drv, netdev, "Success to set VF %d link state %d\n", + vf_id, link); + + return 0; +} + +static int sss_nic_check_vf_bw_param(const struct sss_nic_dev *nic_dev, + int vf_id, int min_rate, int max_rate) +{ + if (!SSSNIC_SUPPORT_RATE_LIMIT(nic_dev->nic_io)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupport to set vf rate limit.\n"); + return -EOPNOTSUPP; + } + + if (vf_id >= pci_num_vf(nic_dev->pdev)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid VF number %d\n", + pci_num_vf(nic_dev->pdev)); + return -EINVAL; + } + + if (max_rate < min_rate) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid rate, maximum rate %d minimum rate %d\n", + max_rate, min_rate); + return -EINVAL; + } + + if (max_rate < 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid maximum rate %d\n", max_rate); + return -EINVAL; + } + + return 0; +} + +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +static int sss_nic_ndo_set_vf_rate(struct net_device *netdev, + int vf_id, int min_tx_rate, int max_tx_rate) +#else +static int sss_nic_ndo_set_vf_tx_rate(struct net_device *netdev, int vf_id, + int max_tx_rate) +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ +{ +#ifndef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + int min_tx_rate = 0; +#endif + u8 link_status; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct sss_nic_port_info port_info = {0}; + u32 speeds[] = {0, SPEED_10, SPEED_100, SPEED_1000, SPEED_10000, + SPEED_25000, SPEED_40000, SPEED_50000, SPEED_100000, + SPEED_200000 + }; + int ret; + + ret = sss_nic_check_vf_bw_param(nic_dev, vf_id, min_tx_rate, max_tx_rate); + if (ret != 0) + return ret; + + ret = sss_nic_get_hw_link_state(nic_dev, &link_status); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, + "Fail to get link status when set vf tx rate.\n"); + return -EIO; + } + + if (link_status == 0) { + nicif_err(nic_dev, drv, netdev, + "Fail to set vf tx rate. the link state is down.\n"); + return -EINVAL; + } + + ret = sss_nic_get_hw_port_info(nic_dev, &port_info, + SSS_CHANNEL_NIC); + if (ret != 0 || port_info.speed >= SSSNIC_PORT_SPEED_UNKNOWN) + return -EIO; + + if (max_tx_rate > speeds[port_info.speed]) { + nicif_err(nic_dev, drv, netdev, "Invalid max_tx_rate, it must be in [0 - %u]\n", + speeds[port_info.speed]); + return -EINVAL; + } + + ret = sss_nic_set_vf_tx_rate_limit(nic_dev->nic_io, (u16)SSSNIC_OS_VF_ID_TO_HW(vf_id), + (u32)min_tx_rate, (u32)max_tx_rate); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, + "Fail to set VF %d max rate %d min rate %d%s\n", + vf_id, max_tx_rate, min_tx_rate, + ret == SSSNIC_TX_RATE_TABLE_FULL ? + ", tx rate profile is full" : ""); + return -EIO; + } + + nicif_info(nic_dev, drv, netdev, + "Success to set VF %d tx rate [%u-%u]\n", + vf_id, min_tx_rate, max_tx_rate); + + return 0; +} + +#ifdef HAVE_XDP_SUPPORT +#ifdef HAVE_NDO_BPF_NETDEV_BPF +static int sss_nic_ndo_bpf(struct net_device *netdev, struct netdev_bpf *xdp) +#else +static int sss_nic_ndo_xdp(struct net_device *netdev, struct netdev_xdp *xdp) +#endif +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + +#ifdef HAVE_XDP_QUERY_PROG + if (xdp->command == XDP_QUERY_PROG) { + xdp->prog_id = nic_dev->xdp_prog ? nic_dev->xdp_prog->aux->id : 0; + return 0; + } +#endif + if (xdp->command == XDP_SETUP_PROG) + return sss_nic_setup_xdp(nic_dev, xdp); + + return -EINVAL; +} +#endif + +static const struct net_device_ops g_nic_netdev_ops = { + .ndo_open = sss_nic_ndo_open, + .ndo_stop = sss_nic_ndo_stop, + .ndo_start_xmit = sss_nic_ndo_start_xmit, + +#ifdef HAVE_NDO_GET_STATS64 + .ndo_get_stats64 = sss_nic_ndo_get_stats64, +#else + .ndo_get_stats = sss_nic_ndo_get_stats, +#endif /* HAVE_NDO_GET_STATS64 */ + + .ndo_tx_timeout = sss_nic_ndo_tx_timeout, + .ndo_select_queue = sss_nic_ndo_select_queue, +#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_CHANGE_MTU + .extended.ndo_change_mtu = sss_nic_ndo_change_mtu, +#else + .ndo_change_mtu = sss_nic_ndo_change_mtu, +#endif + .ndo_set_mac_address = sss_nic_ndo_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) + .ndo_vlan_rx_add_vid = sss_nic_ndo_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = sss_nic_ndo_vlan_rx_kill_vid, +#endif + +#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT + /* RHEL7 requires this to be defined to enable extended ops. RHEL7 + * uses the function get_ndo_ext to retrieve offsets for extended + * fields from with the net_device_ops struct and ndo_size is checked + * to determine whether or not the offset is valid. + */ + .ndo_size = sizeof(const struct net_device_ops), +#endif + +#ifdef IFLA_VF_MAX + .ndo_set_vf_mac = sss_nic_ndo_set_vf_mac, +#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN + .extended.ndo_set_vf_vlan = sss_nic_ndo_set_vf_vlan, +#else + .ndo_set_vf_vlan = sss_nic_ndo_set_vf_vlan, +#endif +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + .ndo_set_vf_rate = sss_nic_ndo_set_vf_rate, +#else + .ndo_set_vf_tx_rate = sss_nic_ndo_set_vf_tx_rate, +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + .ndo_set_vf_spoofchk = sss_nic_ndo_set_vf_spoofchk, +#endif + +#ifdef HAVE_NDO_SET_VF_TRUST +#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT + .extended.ndo_set_vf_trust = sss_nic_ndo_set_vf_trust, +#else + .ndo_set_vf_trust = sss_nic_ndo_set_vf_trust, +#endif /* HAVE_RHEL7_NET_DEVICE_OPS_EXT */ +#endif /* HAVE_NDO_SET_VF_TRUST */ + + .ndo_get_vf_config = sss_nic_ndo_get_vf_config, +#endif + +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = sss_nic_ndo_poll_controller, +#endif /* CONFIG_NET_POLL_CONTROLLER */ + + .ndo_set_rx_mode = sss_nic_ndo_set_rx_mode, + +#ifdef HAVE_XDP_SUPPORT +#ifdef HAVE_NDO_BPF_NETDEV_BPF + .ndo_bpf = sss_nic_ndo_bpf, +#else + .ndo_xdp = sss_nic_ndo_xdp, +#endif +#endif + +#ifdef HAVE_NDO_SET_VF_LINK_STATE + .ndo_set_vf_link_state = sss_nic_ndo_set_vf_link_state, +#endif + +#ifdef HAVE_NDO_SET_FEATURES + .ndo_fix_features = sss_nic_ndo_fix_features, + .ndo_set_features = sss_nic_ndo_set_features, +#endif /* HAVE_NDO_SET_FEATURES */ +}; + +static const struct net_device_ops g_nicvf_netdev_ops = { + .ndo_open = sss_nic_ndo_open, + .ndo_stop = sss_nic_ndo_stop, + .ndo_start_xmit = sss_nic_ndo_start_xmit, + +#ifdef HAVE_NDO_GET_STATS64 + .ndo_get_stats64 = sss_nic_ndo_get_stats64, +#else + .ndo_get_stats = sss_nic_ndo_get_stats, +#endif /* HAVE_NDO_GET_STATS64 */ + + .ndo_tx_timeout = sss_nic_ndo_tx_timeout, + .ndo_select_queue = sss_nic_ndo_select_queue, + +#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT + /* RHEL7 requires this to be defined to enable extended ops. RHEL7 + * uses the function get_ndo_ext to retrieve offsets for extended + * fields from with the net_device_ops struct and ndo_size is checked + * to determine whether or not the offset is valid. + */ + .ndo_size = sizeof(const struct net_device_ops), +#endif + +#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_CHANGE_MTU + .extended.ndo_change_mtu = sss_nic_ndo_change_mtu, +#else + .ndo_change_mtu = sss_nic_ndo_change_mtu, +#endif + .ndo_set_mac_address = sss_nic_ndo_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) + .ndo_vlan_rx_add_vid = sss_nic_ndo_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = sss_nic_ndo_vlan_rx_kill_vid, +#endif + +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = sss_nic_ndo_poll_controller, +#endif /* CONFIG_NET_POLL_CONTROLLER */ + + .ndo_set_rx_mode = sss_nic_ndo_set_rx_mode, + +#ifdef HAVE_XDP_SUPPORT +#ifdef HAVE_NDO_BPF_NETDEV_BPF + .ndo_bpf = sss_nic_ndo_bpf, +#else + .ndo_xdp = sss_nic_ndo_xdp, +#endif +#endif + +#ifdef HAVE_NDO_SET_FEATURES + .ndo_fix_features = sss_nic_ndo_fix_features, + .ndo_set_features = sss_nic_ndo_set_features, +#endif /* HAVE_NDO_SET_FEATURES */ +}; + +void sss_nic_set_netdev_ops(struct sss_nic_dev *nic_dev) +{ + if (!SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) + nic_dev->netdev->netdev_ops = &g_nic_netdev_ops; + else + nic_dev->netdev->netdev_ops = &g_nicvf_netdev_ops; +} + +bool sss_nic_is_netdev_ops_match(const struct net_device *netdev) +{ + return netdev->netdev_ops == &g_nic_netdev_ops || + netdev->netdev_ops == &g_nicvf_netdev_ops; +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..941dcca091f0d04d9ccdb57fb4e88941a8305679 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_NETDEV_OPS_H +#define SSS_NIC_NETDEV_OPS_H + +#include +#include + +#include "sss_nic_dev_define.h" + +void sss_nic_set_netdev_ops(struct sss_nic_dev *nic_dev); +bool sss_nic_is_netdev_ops_match(const struct net_device *netdev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops_api.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops_api.c new file mode 100644 index 0000000000000000000000000000000000000000..6b4404c89b14db0d9862693f7c32323cda50e276 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops_api.c @@ -0,0 +1,1074 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#ifdef HAVE_XDP_SUPPORT +#include +#endif +#include "sss_hw.h" +#include "sss_nic_io.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_tx.h" +#include "sss_nic_tx_init.h" +#include "sss_nic_rx_init.h" +#include "sss_nic_rx.h" +#include "sss_nic_dcb.h" +#include "sss_nic_netdev_ops_api.h" +#include "sss_nic_irq.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" + +#define IPV4_VERSION 4 +#define IPV6_VERSION 6 + +#define SSSNIC_LRO_DEF_COAL_PKT_SIZE 32 +#define SSSNIC_LRO_DEF_TIME_LIMIT 16 +#define SSSNIC_WAIT_FLUSH_QP_RES_TIMEOUT 100 + +#define SSSNIC_IPV6_ADDR_SIZE 4 +#define SSSNIC_PKT_INFO_SIZE 9 +#define SSSNIC_BIT_PER_TUPLE 32 + +#define SSSNIC_RSS_VAL(val, type) \ + (((type) == SSSNIC_RSS_ENGINE_TOEP) ? ntohl(val) : (val)) + +/* Low 16 bits are sport, High 16 bits are dport */ +#define SSSNIC_RSS_VAL_BY_L4_PORT(l4_hdr) \ + (((u32)ntohs(*((u16 *)(l4_hdr) + 1U)) << 16) | ntohs(*(u16 *)(l4_hdr))) + +#define SSSNIC_GET_SQ_ID_BY_RSS_INDIR(nic_dev, sq_id) \ + ((u16)(nic_dev)->rss_indir_tbl[(sq_id) & 0xFF]) + +#define SSSNIC_GET_DSCP_PRI_OFFSET 2 + +#define SSSNIC_FEATURE_OP_STR(op) ((op) ? "Enable" : "Disable") + +#define SSSNIC_VLAN_TCI_TO_COS_ID(skb) \ + ((skb)->vlan_tci >> VLAN_PRIO_SHIFT) + +#define SSSNIC_IPV4_DSF_TO_COS_ID(skb) \ + (ipv4_get_dsfield(ip_hdr(skb)) >> SSSNIC_GET_DSCP_PRI_OFFSET) + +#define SSSNIC_IPV6_DSF_TO_COS_ID(skb) \ + (ipv6_get_dsfield(ipv6_hdr(skb)) >> SSSNIC_GET_DSCP_PRI_OFFSET) + +static int sss_nic_alloc_qp_mgmt_info(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res) +{ + u16 qp_num = qp_res->qp_num; + u32 len; + + len = sizeof(*qp_res->irq_cfg) * qp_num; + qp_res->irq_cfg = kzalloc(len, GFP_KERNEL); + if (!qp_res->irq_cfg) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to alloc irq config\n"); + return -ENOMEM; + } + + len = sizeof(*qp_res->rq_res_group) * qp_num; + qp_res->rq_res_group = kzalloc(len, GFP_KERNEL); + if (!qp_res->rq_res_group) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to alloc rq res info\n"); + goto alloc_rq_res_err; + } + + len = sizeof(*qp_res->sq_res_group) * qp_num; + qp_res->sq_res_group = kzalloc(len, GFP_KERNEL); + if (!qp_res->sq_res_group) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to alloc sq res info\n"); + goto alloc_sq_res_err; + } + + return 0; + +alloc_sq_res_err: + kfree(qp_res->rq_res_group); + qp_res->rq_res_group = NULL; + +alloc_rq_res_err: + kfree(qp_res->irq_cfg); + qp_res->irq_cfg = NULL; + + return -ENOMEM; +} + +static void sss_nic_free_qp_mgmt_info(struct sss_nic_qp_resource *qp_res) +{ + kfree(qp_res->irq_cfg); + kfree(qp_res->rq_res_group); + kfree(qp_res->sq_res_group); + qp_res->irq_cfg = NULL; + qp_res->sq_res_group = NULL; + qp_res->rq_res_group = NULL; +} + +static int sss_nic_alloc_qp_resource(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res) +{ + int ret; + + ret = sss_nic_alloc_qp_mgmt_info(nic_dev, qp_res); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to alloc qp mgmt info\n"); + return ret; + } + + ret = sss_nic_alloc_rq_res_group(nic_dev, qp_res); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to alloc rq resource\n"); + goto alloc_rq_res_err; + } + + ret = sss_nic_alloc_sq_resource(nic_dev, qp_res); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to alloc sq resource\n"); + goto alloc_sq_res_err; + } + + return 0; + +alloc_sq_res_err: + sss_nic_free_rq_res_group(nic_dev, qp_res); + +alloc_rq_res_err: + sss_nic_free_qp_mgmt_info(qp_res); + + return ret; +} + +static void sss_nic_free_qp_resource(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res) +{ + sss_nic_free_rq_res_group(nic_dev, qp_res); + sss_nic_free_sq_resource(nic_dev, qp_res); + sss_nic_free_qp_mgmt_info(qp_res); +} + +static int sss_nic_init_qp_wq(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res) +{ + int ret; + + sss_nic_init_all_sq(nic_dev, qp_res); + + ret = sss_nic_init_rq_desc_group(nic_dev, qp_res); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to configure rq\n"); + return ret; + } + + return 0; +} + +static void sss_nic_config_dcb_qp_map(struct sss_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + u8 cos_num; + u16 qp_num = nic_dev->qp_res.qp_num; + + if (!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE)) { + sss_nic_update_sq_cos(nic_dev, 0); + return; + } + + cos_num = sss_nic_get_user_cos_num(nic_dev); + sss_nic_update_qp_cos_map(nic_dev, cos_num); + /* For now, we don't support to change cos_num */ + if (cos_num > nic_dev->max_cos_num || cos_num > qp_num) { + nicif_err(nic_dev, drv, netdev, + "Invalid cos_num: %u, qp_num: %u or RSS is disable, disable DCB\n", + cos_num, qp_num); + nic_dev->qp_res.cos_num = 0; + SSSNIC_CLEAR_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE); + /* if we can't enable rss or get enough qp_num, + * need to sync default configure to hw + */ + sss_nic_update_dcb_cfg(nic_dev); + } + + sss_nic_update_sq_cos(nic_dev, 1); +} + +static int sss_nic_update_dev_cfg(struct sss_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + int ret; + + ret = sss_nic_set_dev_mtu(nic_dev, (u16)netdev->mtu); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to set mtu\n"); + return ret; + } + + sss_nic_config_dcb_qp_map(nic_dev); + + ret = sss_nic_update_rx_rss(nic_dev); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to update rx rss\n"); + return ret; + } + + return 0; +} + +static u16 sss_nic_realloc_qp_irq(struct sss_nic_dev *nic_dev, + u16 new_qp_irq_num) +{ + struct sss_irq_desc *qps_irq_info = nic_dev->irq_desc_group; + u16 act_irq_num; + u16 extra_irq_num; + u16 id; + u16 i; + + if (new_qp_irq_num > nic_dev->irq_desc_num) { + extra_irq_num = new_qp_irq_num - nic_dev->irq_desc_num; + act_irq_num = sss_alloc_irq(nic_dev->hwdev, SSS_SERVICE_TYPE_NIC, + &qps_irq_info[nic_dev->irq_desc_num], + extra_irq_num); + if (act_irq_num == 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to alloc irq\n"); + return nic_dev->irq_desc_num; + } + + nic_dev->irq_desc_num += act_irq_num; + } else if (new_qp_irq_num < nic_dev->irq_desc_num) { + extra_irq_num = nic_dev->irq_desc_num - new_qp_irq_num; + for (i = 0; i < extra_irq_num; i++) { + id = (nic_dev->irq_desc_num - i) - 1; + sss_free_irq(nic_dev->hwdev, SSS_SERVICE_TYPE_NIC, + qps_irq_info[id].irq_id); + qps_irq_info[id].irq_id = 0; + qps_irq_info[id].msix_id = 0; + } + nic_dev->irq_desc_num = new_qp_irq_num; + } + + return nic_dev->irq_desc_num; +} + +static void sss_nic_update_dcb_cos_map(struct sss_nic_dev *nic_dev, + const struct sss_nic_qp_resource *qp_res) +{ + u8 cos_num = qp_res->cos_num; + u16 max_qp = qp_res->qp_num; + u8 user_cos_num = sss_nic_get_user_cos_num(nic_dev); + + if (cos_num == 0 || cos_num > nic_dev->max_cos_num || cos_num > max_qp) + return; /* will disable DCB */ + + sss_nic_update_qp_cos_map(nic_dev, user_cos_num); +} + +static void sss_nic_update_qp_info(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res) +{ + u16 alloc_irq_num; + u16 dst_irq_num; + u16 cur_irq_num; + struct net_device *netdev = nic_dev->netdev; + + if (!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_RSS_ENABLE)) + qp_res->qp_num = 1; + + sss_nic_update_dcb_cos_map(nic_dev, qp_res); + + if (nic_dev->irq_desc_num >= qp_res->qp_num) + goto out; + + cur_irq_num = nic_dev->irq_desc_num; + + alloc_irq_num = sss_nic_realloc_qp_irq(nic_dev, qp_res->qp_num); + if (alloc_irq_num < qp_res->qp_num) { + qp_res->qp_num = alloc_irq_num; + sss_nic_update_dcb_cos_map(nic_dev, qp_res); + nicif_warn(nic_dev, drv, netdev, + "Fail to alloc enough irq, qp_num: %u\n", + qp_res->qp_num); + + dst_irq_num = (u16)max_t(u16, cur_irq_num, qp_res->qp_num); + sss_nic_realloc_qp_irq(nic_dev, dst_irq_num); + } + +out: + nicif_info(nic_dev, drv, netdev, "Finally qp_num: %u\n", + qp_res->qp_num); +} + +static int sss_nic_init_qp_irq(struct sss_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + u32 irq_info_len = sizeof(*nic_dev->irq_desc_group) * nic_dev->max_qp_num; + + nic_dev->irq_desc_num = 0; + + if (irq_info_len == 0) { + nicif_err(nic_dev, drv, netdev, "Invalid irq_info_len\n"); + return -EINVAL; + } + + nic_dev->irq_desc_group = kzalloc(irq_info_len, GFP_KERNEL); + if (!nic_dev->irq_desc_group) + return -ENOMEM; + + if (!test_bit(SSSNIC_RSS_ENABLE, &nic_dev->flags)) + nic_dev->qp_res.qp_num = 1; + + if (nic_dev->irq_desc_num >= nic_dev->qp_res.qp_num) { + nicif_info(nic_dev, drv, netdev, "Finally qp_num: %u\n", + nic_dev->qp_res.qp_num); + return 0; + } + + nic_dev->irq_desc_num = sss_alloc_irq(nic_dev->hwdev, SSS_SERVICE_TYPE_NIC, + nic_dev->irq_desc_group, nic_dev->qp_res.qp_num); + if (nic_dev->irq_desc_num == 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to alloc qp irq\n"); + kfree(nic_dev->irq_desc_group); + nic_dev->irq_desc_group = NULL; + return -ENOMEM; + } + + if (nic_dev->irq_desc_num < nic_dev->qp_res.qp_num) { + nic_dev->qp_res.qp_num = nic_dev->irq_desc_num; + nicif_warn(nic_dev, drv, netdev, + "Fail to alloc enough irq, now qp_num: %u\n", + nic_dev->qp_res.qp_num); + } + + return 0; +} + +static void sss_nic_deinit_qp_irq(struct sss_nic_dev *nic_dev) +{ + u16 id; + + for (id = 0; id < nic_dev->irq_desc_num; id++) + sss_free_irq(nic_dev->hwdev, SSS_SERVICE_TYPE_NIC, + nic_dev->irq_desc_group[id].irq_id); + + kfree(nic_dev->irq_desc_group); + nic_dev->irq_desc_group = NULL; +} + +int sss_nic_dev_resource_init(struct sss_nic_dev *nic_dev) +{ + int ret; + struct net_device *netdev = nic_dev->netdev; + + ret = sss_nic_init_qp_irq(nic_dev); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to init irq info\n"); + return ret; + } + + sss_nic_update_dcb_cos_map(nic_dev, &nic_dev->qp_res); + + return 0; +} + +void sss_nic_dev_resource_deinit(struct sss_nic_dev *nic_dev) +{ + sss_nic_deinit_qp_irq(nic_dev); +} + +static int sss_nic_set_port_state(struct sss_nic_dev *nic_dev, bool state) +{ + int ret; + + down(&nic_dev->port_sem); + + ret = sss_nic_set_hw_port_state(nic_dev, state, SSS_CHANNEL_NIC); + + up(&nic_dev->port_sem); + + return ret; +} + +static void sss_nic_update_link_state(struct sss_nic_dev *nic_dev, + u8 link_state) +{ + struct net_device *netdev = nic_dev->netdev; + + if (nic_dev->link_status == link_state) + return; + + nic_dev->link_status = link_state; + + nicif_info(nic_dev, link, netdev, "Link is %s\n", + (link_state ? "up" : "down")); +} + +int sss_nic_qp_resource_init(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_info *qp_info, + struct sss_nic_qp_resource *qp_res) +{ + int ret; + struct net_device *netdev = nic_dev->netdev; + + qp_info->sq_depth = qp_res->sq_depth; + qp_info->rq_depth = qp_res->rq_depth; + qp_info->qp_num = qp_res->qp_num; + + ret = sss_nic_alloc_qp(nic_dev->nic_io, nic_dev->irq_desc_group, qp_info); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to alloc qp\n"); + return ret; + } + + ret = sss_nic_alloc_qp_resource(nic_dev, qp_res); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to alloc qp resource\n"); + sss_nic_free_qp(nic_dev->nic_io, qp_info); + return ret; + } + + return 0; +} + +void sss_nic_qp_resource_deinit(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_info *qp_info, + struct sss_nic_qp_resource *qp_res) +{ + mutex_lock(&nic_dev->qp_mutex); + sss_nic_free_qp_resource(nic_dev, qp_res); + sss_nic_free_qp(nic_dev->nic_io, qp_info); + mutex_unlock(&nic_dev->qp_mutex); +} + +int sss_nic_open_dev(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_info *qp_info, + struct sss_nic_qp_resource *qp_res) +{ + int ret; + struct net_device *netdev = nic_dev->netdev; + + ret = sss_nic_init_qp_info(nic_dev->nic_io, qp_info); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to init qp info\n"); + return ret; + } + + ret = sss_nic_init_qp_wq(nic_dev, qp_res); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to init qp wq\n"); + goto cfg_qp_err; + } + + ret = sss_nic_request_qp_irq(nic_dev); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to request qp irq\n"); + goto init_qp_irq_err; + } + + ret = sss_nic_update_dev_cfg(nic_dev); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to update configure\n"); + goto cfg_err; + } + + return 0; + +cfg_err: + sss_nic_release_qp_irq(nic_dev); + +init_qp_irq_err: +cfg_qp_err: + sss_nic_deinit_qp_info(nic_dev->nic_io, qp_info); + + return ret; +} + +void sss_nic_close_dev(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_info *qp_info) +{ + sss_nic_reset_rx_rss(nic_dev->netdev); + sss_nic_release_qp_irq(nic_dev); + sss_nic_deinit_qp_info(nic_dev->nic_io, qp_info); +} + +int sss_nic_vport_up(struct sss_nic_dev *nic_dev) +{ + u16 func_id; + u8 link_state = 0; + int ret; + struct net_device *netdev = nic_dev->netdev; + + func_id = sss_get_global_func_id(nic_dev->hwdev); + ret = sss_nic_set_hw_vport_state(nic_dev, func_id, true, SSS_CHANNEL_NIC); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to set vport enable\n"); + goto set_vport_state_err; + } + + ret = sss_nic_set_port_state(nic_dev, true); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to set port enable\n"); + goto set_port_state_err; + } + + netif_set_real_num_rx_queues(netdev, nic_dev->qp_res.qp_num); + netif_set_real_num_tx_queues(netdev, nic_dev->qp_res.qp_num); + netif_tx_wake_all_queues(netdev); + + if (!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_FORCE_LINK_UP)) { + ret = sss_nic_get_hw_link_state(nic_dev, &link_state); + if (ret == 0 && link_state != 0) + netif_carrier_on(netdev); + } else { + link_state = true; + netif_carrier_on(netdev); + } + + queue_delayed_work(nic_dev->workq, &nic_dev->moderation_task, + SSSNIC_MODERATONE_DELAY); + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_RXQ_RECOVERY)) + queue_delayed_work(nic_dev->workq, &nic_dev->rq_watchdog_work, HZ); + + sss_nic_update_link_state(nic_dev, link_state); + + if (!SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) + sss_nic_notify_all_vf_link_state(nic_dev->nic_io, link_state); + + return 0; + +set_port_state_err: + sss_nic_set_hw_vport_state(nic_dev, func_id, false, SSS_CHANNEL_NIC); + +set_vport_state_err: + sss_nic_clear_hw_qp_resource(nic_dev); + /*No packets will be send to host when after set vport disable 100ms*/ + msleep(SSSNIC_WAIT_FLUSH_QP_RES_TIMEOUT); + + return ret; +} + +void sss_nic_vport_down(struct sss_nic_dev *nic_dev) +{ + u16 func_id; + + netif_carrier_off(nic_dev->netdev); + netif_tx_disable(nic_dev->netdev); + + cancel_delayed_work_sync(&nic_dev->rq_watchdog_work); + cancel_delayed_work_sync(&nic_dev->moderation_task); + + if (sss_get_dev_present_flag(nic_dev->hwdev) == 0) + return; + + if (SSSNIC_FUNC_IS_VF(nic_dev->hwdev) == 0) + sss_nic_notify_all_vf_link_state(nic_dev->nic_io, 0); + + sss_nic_set_port_state(nic_dev, false); + + func_id = sss_get_global_func_id(nic_dev->hwdev); + sss_nic_set_hw_vport_state(nic_dev, func_id, false, SSS_CHANNEL_NIC); + + sss_nic_flush_all_sq(nic_dev); + msleep(SSSNIC_WAIT_FLUSH_QP_RES_TIMEOUT); + sss_nic_clear_hw_qp_resource(nic_dev); +} + +int sss_nic_update_channel_setting(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res, + sss_nic_reopen_handler_t reopen_hdl, + const void *priv_data) +{ + struct net_device *netdev = nic_dev->netdev; + struct sss_nic_qp_info cur_qp_info = {0}; + struct sss_nic_qp_info new_qp_info = {0}; + int ret; + + sss_nic_update_qp_info(nic_dev, qp_res); + + ret = sss_nic_qp_resource_init(nic_dev, &new_qp_info, qp_res); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, + "Fail to alloc channel resource\n"); + return ret; + } + + if (!SSSNIC_TEST_SET_NIC_DEV_FLAG(nic_dev, SSSNIC_CHANGE_RES_INVALID)) { + sss_nic_vport_down(nic_dev); + sss_nic_close_dev(nic_dev, &cur_qp_info); + sss_nic_qp_resource_deinit(nic_dev, &cur_qp_info, + &nic_dev->qp_res); + } + + if (nic_dev->irq_desc_num > qp_res->qp_num) + sss_nic_realloc_qp_irq(nic_dev, qp_res->qp_num); + nic_dev->qp_res = *qp_res; + + if (reopen_hdl) + reopen_hdl(nic_dev, priv_data); + + ret = sss_nic_open_dev(nic_dev, &new_qp_info, qp_res); + if (ret != 0) + goto open_channel_err; + + ret = sss_nic_vport_up(nic_dev); + if (ret != 0) + goto up_vport_err; + + clear_bit(SSSNIC_CHANGE_RES_INVALID, &nic_dev->flags); + nicif_info(nic_dev, drv, netdev, "Success to update channel settings\n"); + + return 0; + +up_vport_err: + sss_nic_close_dev(nic_dev, &new_qp_info); + +open_channel_err: + sss_nic_qp_resource_deinit(nic_dev, &new_qp_info, qp_res); + + return ret; +} + +static u32 sss_nic_calc_xor_rss(u8 *rss_tunple, u32 size) +{ + u32 count; + u32 hash_value; + + hash_value = rss_tunple[0]; + for (count = 1; count < size; count++) + hash_value = hash_value ^ rss_tunple[count]; + + return hash_value; +} + +static u32 sss_nic_calc_toep_rss(const u32 *rss_tunple, u32 size, const u32 *rss_key) +{ + u32 i; + u32 j; + u32 rss = 0; + u32 tunple; + + for (i = 0; i < size; i++) { + for (j = 0; j < SSSNIC_BIT_PER_TUPLE; j++) { + tunple = rss_tunple[i] & + ((u32)1 << (u32)((SSSNIC_BIT_PER_TUPLE - 1) - j)); + if (tunple != 0) + rss ^= (rss_key[i] << j) | + ((u32)((u64)rss_key[i + 1] >> (SSSNIC_BIT_PER_TUPLE - j))); + } + } + + return rss; +} + +static u8 sss_nic_parse_ipv6_info(struct sk_buff *skb, u8 hash_engine, + u32 *rss_tunple, u32 *size) +{ + struct ipv6hdr *ipv6hdr = ipv6_hdr(skb); + u32 *daddr = (u32 *)&ipv6hdr->daddr; + u32 *saddr = (u32 *)&ipv6hdr->saddr; + u32 offset; + u8 i; + + for (i = 0; i < SSSNIC_IPV6_ADDR_SIZE; i++) { + rss_tunple[i] = SSSNIC_RSS_VAL(daddr[i], hash_engine); + /* The offset of the sport relative to the dport is 4 */ + offset = (u32)(i + SSSNIC_IPV6_ADDR_SIZE); + rss_tunple[offset] = SSSNIC_RSS_VAL(saddr[i], hash_engine); + } + *size = SSSNIC_IPV6_ADDR_SIZE << 1; + + return (skb_network_header(skb) + sizeof(*ipv6hdr) == + skb_transport_header(skb)) ? ipv6hdr->nexthdr : 0; +} + +u16 sss_nic_select_queue_by_hash_func(struct net_device *dev, struct sk_buff *skb, + unsigned int max_sq_num) +{ + struct iphdr *iphdr = NULL; + unsigned char *l4_hdr = NULL; + struct sss_nic_dev *nic_dev = netdev_priv(dev); + struct sss_nic_rss_type rss_type = nic_dev->rss_type; + u8 l4_proto; + u32 sq_id = 0; + u32 cnt = 0; + u8 hash_engine = nic_dev->rss_hash_engine; + u32 rss_tunple[SSSNIC_PKT_INFO_SIZE] = {0}; + bool convert_flag; + + if (skb_rx_queue_recorded(skb)) { + sq_id = skb_get_rx_queue(skb); + if (unlikely(sq_id >= max_sq_num)) + sq_id %= max_sq_num; + + return (u16)sq_id; + } + + iphdr = ip_hdr(skb); + + if ((iphdr->version != IPV4_VERSION) && (iphdr->version != IPV6_VERSION)) + return (u16)sq_id; + + if (iphdr->version == IPV4_VERSION) { + rss_tunple[cnt++] = SSSNIC_RSS_VAL(iphdr->daddr, hash_engine); + rss_tunple[cnt++] = SSSNIC_RSS_VAL(iphdr->saddr, hash_engine); + l4_proto = iphdr->protocol; + convert_flag = ((l4_proto == IPPROTO_UDP) && rss_type.udp_ipv4) || + ((l4_proto == IPPROTO_TCP) && rss_type.tcp_ipv4); + } else { + l4_proto = sss_nic_parse_ipv6_info(skb, hash_engine, (u32 *)rss_tunple, &cnt); + convert_flag = ((l4_proto == IPPROTO_UDP) && rss_type.udp_ipv6) || + ((l4_proto == IPPROTO_TCP) && rss_type.tcp_ipv6); + } + + if (convert_flag) { + l4_hdr = skb_transport_header(skb); + rss_tunple[cnt++] = SSSNIC_RSS_VAL_BY_L4_PORT(l4_hdr); + } + + if (hash_engine == SSSNIC_RSS_ENGINE_TOEP) + sq_id = sss_nic_calc_toep_rss((u32 *)rss_tunple, cnt, nic_dev->rss_key_big); + else + sq_id = sss_nic_calc_xor_rss((u8 *)rss_tunple, cnt * (u32)sizeof(cnt)); + + return SSSNIC_GET_SQ_ID_BY_RSS_INDIR(nic_dev, sq_id); +} + +static inline u8 sss_nic_get_cos_by_dscp(struct sss_nic_dev *nic_dev, struct sk_buff *skb) +{ + int dscp_cp; + + dscp_cp = (skb->protocol == htons(ETH_P_IP)) ? SSSNIC_IPV4_DSF_TO_COS_ID(skb) : + (skb->protocol == htons(ETH_P_IPV6) ? SSSNIC_IPV6_DSF_TO_COS_ID(skb) : + nic_dev->hw_dcb_cfg.default_cos); + return nic_dev->hw_dcb_cfg.dscp2cos[dscp_cp]; +} + +static inline u8 sss_nic_get_cos_by_pcp(struct sss_nic_dev *nic_dev, + struct sk_buff *skb) +{ + return skb->vlan_tci ? + nic_dev->hw_dcb_cfg.pcp2cos[SSSNIC_VLAN_TCI_TO_COS_ID(skb)] : + nic_dev->hw_dcb_cfg.default_cos; +} + +u8 sss_nic_get_cos(struct sss_nic_dev *nic_dev, struct sk_buff *skb) +{ + if (nic_dev->hw_dcb_cfg.trust == DCB_PCP) + return sss_nic_get_cos_by_pcp(nic_dev, skb); + + return sss_nic_get_cos_by_dscp(nic_dev, skb); +} + +#ifdef NEED_VLAN_RESTORE +static int sss_nic_restore_vlan(struct sss_nic_dev *nic_dev) +{ + int ret = 0; +#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) + u16 i; + struct net_device *netdev = nic_dev->netdev; + struct net_device *vlandev = NULL; + + rcu_read_lock(); + for (i = 0; i < VLAN_N_VID; i++) { +#ifdef HAVE_VLAN_FIND_DEV_DEEP_RCU + vlandev = __vlan_find_dev_deep_rcu(netdev, htons(ETH_P_8021Q), i); +#else + vlandev = __vlan_find_dev_deep(netdev, htons(ETH_P_8021Q), i); +#endif + + if ((!vlandev) && (SSSNIC_TEST_VLAN_BIT(nic_dev, i) != 0)) { + ret = netdev->netdev_ops->ndo_vlan_rx_kill_vid(netdev, + htons(ETH_P_8021Q), i); + if (ret != 0) { + sss_nic_err(nic_dev, drv, + "Fail to delete vlan %u, ret: %d\n", i, ret); + break; + } + } else if ((vlandev) && (SSSNIC_TEST_VLAN_BIT(nic_dev, i) == 0)) { + ret = netdev->netdev_ops->ndo_vlan_rx_add_vid(netdev, + htons(ETH_P_8021Q), i); + if (ret != 0) { + sss_nic_err(nic_dev, drv, + "Fail to restore vlan %u, ret: %d\n", i, ret); + break; + } + } + } + rcu_read_unlock(); +#endif +#endif + return ret; +} +#endif + +static int sss_nic_set_lro_feature(struct sss_nic_dev *nic_dev, netdev_features_t old_feature, + netdev_features_t new_feature, netdev_features_t *fail_feature) +{ + int ret; + bool change = !!((new_feature ^ old_feature) & NETIF_F_LRO); + bool en = !!(new_feature & NETIF_F_LRO); + + if (!change) + return 0; + +#ifdef HAVE_XDP_SUPPORT + if (en && SSSNIC_IS_XDP_ENABLE(nic_dev)) { + *fail_feature |= NETIF_F_LRO; + sss_nic_err(nic_dev, drv, "Fail to enable LRO when xdp is enable\n"); + return -EINVAL; + } +#endif + ret = sss_nic_set_rx_lro_state(nic_dev, en, + SSSNIC_LRO_DEF_TIME_LIMIT, SSSNIC_LRO_DEF_COAL_PKT_SIZE); + if (ret != 0) { + *fail_feature |= NETIF_F_LRO; + sss_nic_err(nic_dev, drv, "Fail to set lro %s\n", SSSNIC_FEATURE_OP_STR(en)); + return ret; + } + + sss_nic_info(nic_dev, drv, "Success to set lro %s\n", SSSNIC_FEATURE_OP_STR(en)); + + return 0; +} + +static int sss_nic_set_rx_cvlan_feature(struct sss_nic_dev *nic_dev, netdev_features_t old_feature, + netdev_features_t new_feature, + netdev_features_t *fail_feature) +{ + int ret; +#ifdef NETIF_F_HW_VLAN_CTAG_RX + netdev_features_t vlan_feature = NETIF_F_HW_VLAN_CTAG_RX; +#else + netdev_features_t vlan_feature = NETIF_F_HW_VLAN_RX; +#endif + bool change = !!((old_feature ^ new_feature) & vlan_feature); + bool en = !!(new_feature & vlan_feature); + + if (!change) + return 0; + + ret = sss_nic_set_rx_vlan_offload(nic_dev, en); + if (ret != 0) { + *fail_feature |= vlan_feature; + sss_nic_err(nic_dev, drv, "Fail to set %s rx vlan offload\n", + SSSNIC_FEATURE_OP_STR(en)); + return ret; + } + + sss_nic_info(nic_dev, drv, "Success to set rx vlan offload %s\n", + SSSNIC_FEATURE_OP_STR(en)); + + return 0; +} + +static int sss_nic_set_vlan_filter_feature(struct sss_nic_dev *nic_dev, + netdev_features_t old_feature, + netdev_features_t new_feature, + netdev_features_t *fail_feature) +{ + int ret = 0; +#if defined(NETIF_F_HW_VLAN_CTAG_FILTER) + netdev_features_t filter_feature = NETIF_F_HW_VLAN_CTAG_FILTER; +#elif defined(NETIF_F_HW_VLAN_FILTER) + netdev_features_t filter_feature = NETIF_F_HW_VLAN_FILTER; +#endif + bool change = !!((new_feature ^ old_feature) & filter_feature); + bool en = !!(new_feature & filter_feature); + + if (!change) + return 0; + +#ifdef NEED_VLAN_RESTORE + if (en) { + ret = sss_nic_restore_vlan(nic_dev); + if (ret != 0) { + *fail_feature |= filter_feature; + sss_nic_err(nic_dev, drv, + "Fail to set rx vlan filter %s\n", SSSNIC_FEATURE_OP_STR(en)); + return ret; + } + } +#endif + ret = sss_nic_set_vlan_fliter(nic_dev, en); + if (ret != 0) { + *fail_feature |= filter_feature; + sss_nic_err(nic_dev, drv, + "Fail to set rx vlan filter %s\n", SSSNIC_FEATURE_OP_STR(en)); + return ret; + } + + sss_nic_info(nic_dev, drv, "Success to set rx vlan filter %s\n", SSSNIC_FEATURE_OP_STR(en)); + + return 0; +} + +int sss_nic_set_feature(struct sss_nic_dev *nic_dev, netdev_features_t old_feature, + netdev_features_t new_feature) +{ + u32 ret = 0; + netdev_features_t fail_feature = 0; + + ret |= (u32)sss_nic_set_lro_feature(nic_dev, old_feature, new_feature, &fail_feature); + ret |= (u32)sss_nic_set_rx_cvlan_feature(nic_dev, old_feature, new_feature, &fail_feature); + ret |= (u32)sss_nic_set_vlan_filter_feature(nic_dev, old_feature, + new_feature, &fail_feature); + if (ret != 0) { + nic_dev->netdev->features = new_feature ^ fail_feature; + return -EIO; + } + + return 0; +} + +int sss_nic_enable_netdev_feature(struct sss_nic_dev *nic_dev) +{ + /* enable all feature in netdev->features */ + return sss_nic_set_feature(nic_dev, ~nic_dev->netdev->features, nic_dev->netdev->features); +} + +#ifdef IFLA_VF_MAX +int sss_nic_set_hw_vf_vlan(struct sss_nic_dev *nic_dev, + u16 cur_vlanprio, int vf_id, u16 vlan_id, u8 qos) +{ + int ret = 0; + u16 old_vlan = cur_vlanprio & VLAN_VID_MASK; + + if (vlan_id == 0 && qos == 0) { + ret = sss_nic_destroy_vf_vlan(nic_dev->nic_io, SSSNIC_OS_VF_ID_TO_HW(vf_id)); + } else { + if (cur_vlanprio != 0) { + ret = sss_nic_destroy_vf_vlan(nic_dev->nic_io, + SSSNIC_OS_VF_ID_TO_HW(vf_id)); + if (ret != 0) + return ret; + } + ret = sss_nic_create_vf_vlan(nic_dev->nic_io, SSSNIC_OS_VF_ID_TO_HW(vf_id), + vlan_id, qos); + } + + ret = sss_nic_update_mac_vlan(nic_dev, old_vlan, vlan_id, SSSNIC_OS_VF_ID_TO_HW(vf_id)); + return ret; +} +#endif + +#ifdef HAVE_XDP_SUPPORT +static void sss_nic_put_prog(struct sss_nic_dev *nic_dev, struct bpf_prog *prog) +{ + int i; + struct bpf_prog *pre_prog = NULL; + + pre_prog = xchg(&nic_dev->xdp_prog, prog); + for (i = 0; i < nic_dev->max_qp_num; i++) + xchg(&nic_dev->rq_desc_group[i].xdp_prog, nic_dev->xdp_prog); + + if (pre_prog) + bpf_prog_put(pre_prog); +} + +#ifdef HAVE_NDO_BPF_NETDEV_BPF +int sss_nic_setup_xdp(struct sss_nic_dev *nic_dev, struct netdev_bpf *xdp) +#else +int sss_nic_setup_xdp(struct sss_nic_dev *nic_dev, struct netdev_xdp *xdp) +#endif +{ + struct net_device *netdev = nic_dev->netdev; + struct netlink_ext_ack *extack = xdp->extack; + int xdp_max_mtu = SSSNIC_XDP_MAX_MTU(nic_dev); + + if (netdev->mtu > xdp_max_mtu) { + NL_SET_ERR_MSG_MOD(extack, "Invalid mtu for loading xdp program"); + nicif_err(nic_dev, drv, netdev, + "Fail to setup xdp, netdev mtu %d is larger than xdp allowed mtu %d\n", + netdev->mtu, xdp_max_mtu); + + return -EINVAL; + } + + if ((netdev->features & NETIF_F_LRO) != 0) { + NL_SET_ERR_MSG_MOD(extack, + "Fail to setup xdp when LRO is on\n"); + nicif_err(nic_dev, drv, netdev, + "Fail to setup xdp when LRO is on\n"); + + return -EINVAL; + } + + sss_nic_put_prog(nic_dev, xdp->prog); + + return 0; +} + +void sss_nic_get_tx_stats(struct sss_nic_dev *nic_dev, + struct rtnl_link_stats64 *stats) +{ + struct sss_nic_sq_desc *sq_desc = NULL; + struct sss_nic_sq_stats *sq_stats = NULL; + unsigned int start; + int qid; + + stats->tx_bytes = 0; + stats->tx_packets = 0; + stats->tx_dropped = 0; + + if (!nic_dev->sq_desc_group) + return; + + for (qid = 0; qid < nic_dev->max_qp_num; qid++) { + sq_desc = &nic_dev->sq_desc_group[qid]; + sq_stats = &sq_desc->stats; + do { + start = u64_stats_fetch_begin(&sq_stats->stats_sync); + stats->tx_dropped += sq_stats->tx_dropped; + stats->tx_packets += sq_stats->tx_packets; + stats->tx_bytes += sq_stats->tx_bytes; + } while (u64_stats_fetch_retry(&sq_stats->stats_sync, start)); + } +} + +void sss_nic_get_rx_stats(struct sss_nic_dev *nic_dev, + struct rtnl_link_stats64 *stats) +{ + struct sss_nic_rq_desc *rq_desc = NULL; + struct sss_nic_rq_stats *rq_stats = NULL; + unsigned int start; + int qid; + + stats->rx_errors = 0; + stats->rx_dropped = 0; + stats->rx_packets = 0; + stats->rx_bytes = 0; + + if (!nic_dev->rq_desc_group) + return; + + for (qid = 0; qid < nic_dev->max_qp_num; qid++) { + rq_desc = &nic_dev->rq_desc_group[qid]; + rq_stats = &rq_desc->stats; + do { + start = u64_stats_fetch_begin(&rq_stats->stats_sync); + stats->rx_dropped += rq_stats->rx_dropped; + stats->rx_errors += rq_stats->csum_errors + + rq_stats->other_errors; + stats->rx_packets += rq_stats->rx_packets; + stats->rx_bytes += rq_stats->rx_bytes; + } while (u64_stats_fetch_retry(&rq_stats->stats_sync, start)); + } +} +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops_api.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops_api.h new file mode 100644 index 0000000000000000000000000000000000000000..bb8bfce43c01c093fa9aef0bd0118aabca29b4d8 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_netdev_ops_api.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_NETDEV_OPS_API_H +#define SSS_NIC_NETDEV_OPS_API_H + +#include +#include +#include + +#include "sss_kernel.h" +#ifdef HAVE_XDP_SUPPORT +#include +#endif +#include "sss_hw.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_io_define.h" + +typedef void (*sss_nic_reopen_handler_t)(struct sss_nic_dev *nic_dev, + const void *priv_data); + +int sss_nic_dev_resource_init(struct sss_nic_dev *nic_dev); +void sss_nic_dev_resource_deinit(struct sss_nic_dev *nic_dev); +int sss_nic_qp_resource_init(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_info *qp_info, + struct sss_nic_qp_resource *qp_res); +void sss_nic_qp_resource_deinit(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_info *qp_info, + struct sss_nic_qp_resource *qp_res); +int sss_nic_open_dev(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_info *qp_info, + struct sss_nic_qp_resource *qp_res); +void sss_nic_close_dev(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_info *qp_info); +int sss_nic_vport_up(struct sss_nic_dev *nic_dev); +void sss_nic_vport_down(struct sss_nic_dev *nic_dev); +int sss_nic_update_channel_setting(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res, + sss_nic_reopen_handler_t reopen_handler, + const void *priv_data); +u16 sss_nic_select_queue_by_hash_func(struct net_device *dev, struct sk_buff *skb, + unsigned int num_tx_queues); +u8 sss_nic_get_cos(struct sss_nic_dev *nic_dev, struct sk_buff *skb); +int sss_nic_set_feature(struct sss_nic_dev *nic_dev, netdev_features_t old_feature, + netdev_features_t new_feature); + +int sss_nic_enable_netdev_feature(struct sss_nic_dev *nic_dev); + +#ifdef IFLA_VF_MAX +int sss_nic_set_hw_vf_vlan(struct sss_nic_dev *nic_dev, + u16 cur_vlanprio, int vf, u16 vlan, u8 qos); +#endif + +#ifdef HAVE_XDP_SUPPORT +#define SSSNIC_XDP_MAX_MTU(nic_dev) ((nic_dev)->rx_buff_len - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)) +#ifdef HAVE_NDO_BPF_NETDEV_BPF +int sss_nic_setup_xdp(struct sss_nic_dev *nic_dev, struct netdev_bpf *xdp); +#else +int sss_nic_setup_xdp(struct sss_nic_dev *nic_dev, struct netdev_xdp *xdp); +#endif +#endif +void sss_nic_get_tx_stats(struct sss_nic_dev *nic_dev, + struct rtnl_link_stats64 *stats); +void sss_nic_get_rx_stats(struct sss_nic_dev *nic_dev, + struct rtnl_link_stats64 *stats); + +u32 sss_nic_get_io_stats_size(const struct sss_nic_dev *nic_dev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ntuple.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ntuple.c new file mode 100644 index 0000000000000000000000000000000000000000..341a37bbfb67479021f835385bd2650e6086c16a --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ntuple.c @@ -0,0 +1,919 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_rss_cfg.h" +#include "sss_nic_dev_define.h" + +#define SSSNIC_MAX_ETHTOOL_NTUPLE_RULE BIT(9) + +#define SSSNIC_TCAM_IP_TYPE_MASK 0x1 +#define SSSNIC_TCAM_TUNNEL_TYPE_MASK 0xF +#define SSSNIC_TCAM_FUNC_ID_MASK 0x7FFF + +#define SSSNIC_TCAM_IPV4_TYPE 0 +#define SSSNIC_TCAM_IPV6_TYPE 1 + +#ifndef UNSUPPORT_NTUPLE_IPV6 +enum sss_nic_ipv6_parse_res { + SSSNIC_IPV6_MASK_INVALID, + SSSNIC_IPV6_MASK_ALL_MASK, + SSSNIC_IPV6_MASK_ALL_ZERO, +}; + +enum sss_nic_ipv6_index { + SSSNIC_IPV6_ID0, + SSSNIC_IPV6_ID1, + SSSNIC_IPV6_ID2, + SSSNIC_IPV6_ID3, +}; +#endif + +struct sss_nic_ethtool_rx_flow_rule { + struct list_head list; + struct ethtool_rx_flow_spec flow_spec; +}; + +static void sss_nic_calculate_tcam_key_y(u8 *key_y, const u8 *src_input, const u8 *mask, u8 len) +{ + u8 id; + + for (id = 0; id < len; id++) + key_y[id] = src_input[id] & mask[id]; +} + +static void sss_nic_calculate_tcam_key_x(u8 *key_x, const u8 *key_y, const u8 *mask, u8 len) +{ + u8 id; + + for (id = 0; id < len; id++) + key_x[id] = key_y[id] ^ mask[id]; +} + +static void sss_nic_calculate_tcam_key(struct sss_nic_tcam_key_tag *tcam_key, + struct sss_nic_tcam_rule_cfg *fdir_tcam_rule) +{ + sss_nic_calculate_tcam_key_y(fdir_tcam_rule->key.key_y, + (u8 *)(&tcam_key->key_info_ipv4), + (u8 *)(&tcam_key->key_mask_ipv4), SSSNIC_TCAM_FLOW_KEY_SIZE); + sss_nic_calculate_tcam_key_x(fdir_tcam_rule->key.key_x, fdir_tcam_rule->key.key_y, + (u8 *)(&tcam_key->key_mask_ipv4), SSSNIC_TCAM_FLOW_KEY_SIZE); +} + +static int sss_nic_parse_ipv4_base(struct sss_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *flow_spec, + struct sss_nic_tcam_key_tag *tcam_key) +{ + u32 temp; + struct ethtool_tcpip4_spec *val = &flow_spec->h_u.tcp_ip4_spec; + struct ethtool_tcpip4_spec *mask = &flow_spec->m_u.tcp_ip4_spec; + + if (mask->ip4src == U32_MAX) { + temp = ntohl(val->ip4src); + tcam_key->key_info_ipv4.sipv4_l = low_16_bits(temp); + tcam_key->key_info_ipv4.sipv4_h = high_16_bits(temp); + + tcam_key->key_mask_ipv4.sipv4_l = U16_MAX; + tcam_key->key_mask_ipv4.sipv4_h = U16_MAX; + + } else if (mask->ip4src != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid source ip mask\n"); + return -EINVAL; + } + + if (mask->ip4dst == U32_MAX) { + temp = ntohl(val->ip4dst); + tcam_key->key_info_ipv4.dipv4_l = low_16_bits(temp); + tcam_key->key_info_ipv4.dipv4_h = high_16_bits(temp); + + tcam_key->key_mask_ipv4.dipv4_l = U16_MAX; + tcam_key->key_mask_ipv4.dipv4_h = U16_MAX; + + } else if (mask->ip4dst != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid destination ip mask\n"); + return -EINVAL; + } + + tcam_key->key_mask_ipv4.ip_type = SSSNIC_TCAM_IP_TYPE_MASK; + tcam_key->key_info_ipv4.ip_type = SSSNIC_TCAM_IPV4_TYPE; + + tcam_key->key_info_ipv4.func_id = sss_get_global_func_id(nic_dev->hwdev); + tcam_key->key_mask_ipv4.func_id = SSSNIC_TCAM_FUNC_ID_MASK; + + return 0; +} + +static int sss_nic_init_ipv4_l4_fdir_tcam(struct sss_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *flow_spec, + struct sss_nic_tcam_key_tag *tcam_key) +{ + struct ethtool_tcpip4_spec *l4_val = &flow_spec->h_u.tcp_ip4_spec; + struct ethtool_tcpip4_spec *l4_mask = &flow_spec->m_u.tcp_ip4_spec; + int ret; + + ret = sss_nic_parse_ipv4_base(nic_dev, flow_spec, tcam_key); + if (ret != 0) + return ret; + + tcam_key->key_info_ipv4.dport = ntohs(l4_val->pdst); + tcam_key->key_mask_ipv4.dport = l4_mask->pdst; + + tcam_key->key_info_ipv4.sport = ntohs(l4_val->psrc); + tcam_key->key_mask_ipv4.sport = l4_mask->psrc; + + tcam_key->key_mask_ipv4.ip_proto = U8_MAX; + if (flow_spec->flow_type == TCP_V4_FLOW) + tcam_key->key_info_ipv4.ip_proto = IPPROTO_TCP; + else + tcam_key->key_info_ipv4.ip_proto = IPPROTO_UDP; + + return 0; +} + +static int sss_nic_init_ipv4_fdir_tcam(struct sss_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *flow_spec, + struct sss_nic_tcam_key_tag *tcam_key) +{ + int ret; + struct ethtool_usrip4_spec *l3_val = &flow_spec->h_u.usr_ip4_spec; + struct ethtool_usrip4_spec *l3_mask = &flow_spec->m_u.usr_ip4_spec; + + ret = sss_nic_parse_ipv4_base(nic_dev, flow_spec, tcam_key); + if (ret != 0) + return ret; + + tcam_key->key_mask_ipv4.ip_proto = l3_mask->proto; + tcam_key->key_info_ipv4.ip_proto = l3_val->proto; + + return 0; +} + +#ifndef UNSUPPORT_NTUPLE_IPV6 +static int sss_nic_parse_ipv6_mask(const u32 *ipv6_mask) +{ + if (ipv6_mask[SSSNIC_IPV6_ID0] == 0 && ipv6_mask[SSSNIC_IPV6_ID1] == 0 && + ipv6_mask[SSSNIC_IPV6_ID2] == 0 && ipv6_mask[SSSNIC_IPV6_ID3] == 0) + return SSSNIC_IPV6_MASK_ALL_ZERO; + + if (ipv6_mask[SSSNIC_IPV6_ID0] == U32_MAX && + ipv6_mask[SSSNIC_IPV6_ID1] == U32_MAX && + ipv6_mask[SSSNIC_IPV6_ID2] == U32_MAX && ipv6_mask[SSSNIC_IPV6_ID3] == U32_MAX) + return SSSNIC_IPV6_MASK_ALL_MASK; + + return SSSNIC_IPV6_MASK_INVALID; +} + +static int sss_nic_parse_ipv6_base(struct sss_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *flow_spec, + struct sss_nic_tcam_key_tag *tcam_key) +{ + int parse_res; + u32 temp; + struct ethtool_tcpip6_spec *val = &flow_spec->h_u.tcp_ip6_spec; + struct ethtool_tcpip6_spec *mask = &flow_spec->m_u.tcp_ip6_spec; + + parse_res = sss_nic_parse_ipv6_mask((u32 *)mask->ip6src); + if (parse_res == SSSNIC_IPV6_MASK_ALL_MASK) { + tcam_key->key_mask_ipv6.sipv6_key0 = U16_MAX; + tcam_key->key_mask_ipv6.sipv6_key1 = U16_MAX; + tcam_key->key_mask_ipv6.sipv6_key2 = U16_MAX; + tcam_key->key_mask_ipv6.sipv6_key3 = U16_MAX; + tcam_key->key_mask_ipv6.sipv6_key4 = U16_MAX; + tcam_key->key_mask_ipv6.sipv6_key5 = U16_MAX; + tcam_key->key_mask_ipv6.sipv6_key6 = U16_MAX; + tcam_key->key_mask_ipv6.sipv6_key7 = U16_MAX; + + temp = ntohl(val->ip6src[SSSNIC_IPV6_ID0]); + tcam_key->key_info_ipv6.sipv6_key0 = high_16_bits(temp); + tcam_key->key_info_ipv6.sipv6_key1 = low_16_bits(temp); + temp = ntohl(val->ip6src[SSSNIC_IPV6_ID1]); + tcam_key->key_info_ipv6.sipv6_key2 = high_16_bits(temp); + tcam_key->key_info_ipv6.sipv6_key3 = low_16_bits(temp); + temp = ntohl(val->ip6src[SSSNIC_IPV6_ID2]); + tcam_key->key_info_ipv6.sipv6_key4 = high_16_bits(temp); + tcam_key->key_info_ipv6.sipv6_key5 = low_16_bits(temp); + temp = ntohl(val->ip6src[SSSNIC_IPV6_ID3]); + tcam_key->key_info_ipv6.sipv6_key6 = high_16_bits(temp); + tcam_key->key_info_ipv6.sipv6_key7 = low_16_bits(temp); + + } else if (parse_res == SSSNIC_IPV6_MASK_INVALID) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid src_ipv6 mask\n"); + return -EINVAL; + } + + parse_res = sss_nic_parse_ipv6_mask((u32 *)mask->ip6dst); + if (parse_res == SSSNIC_IPV6_MASK_ALL_MASK) { + tcam_key->key_mask_ipv6.dipv6_key0 = U16_MAX; + tcam_key->key_mask_ipv6.dipv6_key1 = U16_MAX; + tcam_key->key_mask_ipv6.dipv6_key2 = U16_MAX; + tcam_key->key_mask_ipv6.dipv6_key3 = U16_MAX; + tcam_key->key_mask_ipv6.dipv6_key4 = U16_MAX; + tcam_key->key_mask_ipv6.dipv6_key5 = U16_MAX; + tcam_key->key_mask_ipv6.dipv6_key6 = U16_MAX; + tcam_key->key_mask_ipv6.dipv6_key7 = U16_MAX; + + temp = ntohl(val->ip6dst[SSSNIC_IPV6_ID0]); + tcam_key->key_info_ipv6.dipv6_key0 = high_16_bits(temp); + tcam_key->key_info_ipv6.dipv6_key1 = low_16_bits(temp); + temp = ntohl(val->ip6dst[SSSNIC_IPV6_ID1]); + tcam_key->key_info_ipv6.dipv6_key2 = high_16_bits(temp); + tcam_key->key_info_ipv6.dipv6_key3 = low_16_bits(temp); + temp = ntohl(val->ip6dst[SSSNIC_IPV6_ID2]); + tcam_key->key_info_ipv6.dipv6_key4 = high_16_bits(temp); + tcam_key->key_info_ipv6.dipv6_key5 = low_16_bits(temp); + temp = ntohl(val->ip6dst[SSSNIC_IPV6_ID3]); + tcam_key->key_info_ipv6.dipv6_key6 = high_16_bits(temp); + tcam_key->key_info_ipv6.dipv6_key7 = low_16_bits(temp); + + } else if (parse_res == SSSNIC_IPV6_MASK_INVALID) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid dst_ipv6 mask\n"); + return -EINVAL; + } + + tcam_key->key_mask_ipv6.ip_type = SSSNIC_TCAM_IP_TYPE_MASK; + tcam_key->key_info_ipv6.ip_type = SSSNIC_TCAM_IPV6_TYPE; + + tcam_key->key_info_ipv6.func_id = + sss_get_global_func_id(nic_dev->hwdev); + tcam_key->key_mask_ipv6.func_id = SSSNIC_TCAM_FUNC_ID_MASK; + + return 0; +} + +static int sss_nic_init_ipv6_l4_fdir_tcam(struct sss_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *flow_spec, + struct sss_nic_tcam_key_tag *tcam_key) +{ + int ret; + struct ethtool_tcpip6_spec *l4_val = &flow_spec->h_u.tcp_ip6_spec; + struct ethtool_tcpip6_spec *l4_mask = &flow_spec->m_u.tcp_ip6_spec; + + ret = sss_nic_parse_ipv6_base(nic_dev, flow_spec, tcam_key); + if (ret != 0) + return ret; + + tcam_key->key_mask_ipv6.dport = l4_mask->pdst; + tcam_key->key_info_ipv6.dport = ntohs(l4_val->pdst); + + tcam_key->key_mask_ipv6.sport = l4_mask->psrc; + tcam_key->key_info_ipv6.sport = ntohs(l4_val->psrc); + + tcam_key->key_mask_ipv6.ip_proto = U8_MAX; + if (flow_spec->flow_type == TCP_V6_FLOW) + tcam_key->key_info_ipv6.ip_proto = NEXTHDR_TCP; + else + tcam_key->key_info_ipv6.ip_proto = NEXTHDR_UDP; + + return 0; +} + +static int sss_nic_init_ipv6_fdir_tcam(struct sss_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *flow_spec, + struct sss_nic_tcam_key_tag *tcam_key) +{ + int ret; + struct ethtool_usrip6_spec *l3_mask = &flow_spec->m_u.usr_ip6_spec; + struct ethtool_usrip6_spec *l3_val = &flow_spec->h_u.usr_ip6_spec; + + ret = sss_nic_parse_ipv6_base(nic_dev, flow_spec, tcam_key); + if (ret != 0) + return ret; + + tcam_key->key_mask_ipv6.ip_proto = l3_mask->l4_proto; + tcam_key->key_info_ipv6.ip_proto = l3_val->l4_proto; + + return 0; +} +#endif + +static int sss_nic_init_fdir_tcam_info(struct sss_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *flow_spec, + struct sss_nic_tcam_key_tag *tcam_key, + struct sss_nic_tcam_rule_cfg *fdir_tcam_rule) +{ + int ret; + + switch (flow_spec->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + ret = sss_nic_init_ipv4_l4_fdir_tcam(nic_dev, flow_spec, tcam_key); + if (ret != 0) + return ret; + break; + case IP_USER_FLOW: + ret = sss_nic_init_ipv4_fdir_tcam(nic_dev, flow_spec, tcam_key); + if (ret != 0) + return ret; + break; +#ifndef UNSUPPORT_NTUPLE_IPV6 + case TCP_V6_FLOW: + case UDP_V6_FLOW: + ret = sss_nic_init_ipv6_l4_fdir_tcam(nic_dev, flow_spec, tcam_key); + if (ret != 0) + return ret; + break; + case IPV6_USER_FLOW: + ret = sss_nic_init_ipv6_fdir_tcam(nic_dev, flow_spec, tcam_key); + if (ret != 0) + return ret; + break; +#endif + default: + return -EOPNOTSUPP; + } + + tcam_key->key_mask_ipv4.tunnel_type = SSSNIC_TCAM_TUNNEL_TYPE_MASK; + tcam_key->key_info_ipv4.tunnel_type = 0; + + fdir_tcam_rule->data.qid = (u32)flow_spec->ring_cookie; + sss_nic_calculate_tcam_key(tcam_key, fdir_tcam_rule); + + return 0; +} + +void sss_nic_flush_tcam_list(struct sss_nic_tcam_info *tcam_info) +{ + struct sss_nic_tcam_filter *filter_tmp = NULL; + struct sss_nic_tcam_filter *filter = NULL; + struct list_head *tcam_list = &tcam_info->tcam_list; + + if (list_empty(tcam_list)) + return; + + list_for_each_entry_safe(filter, filter_tmp, + tcam_list, tcam_filter_list) { + list_del(&filter->tcam_filter_list); + kfree(filter); + } +} + +void sss_nic_flush_tcam_node_list(struct sss_nic_tcam_info *tcam_info) +{ + struct sss_nic_tcam_node *block_tmp = NULL; + struct sss_nic_tcam_node *block = NULL; + struct list_head *dynamic_list = + &tcam_info->tcam_node_info.tcam_node_list; + + if (list_empty(dynamic_list)) + return; + + list_for_each_entry_safe(block, block_tmp, dynamic_list, block_list) { + list_del(&block->block_list); + kfree(block); + } +} + +void sss_nic_flush_rx_flow_rule(struct sss_nic_rx_rule *rx_flow_rule) +{ + struct sss_nic_ethtool_rx_flow_rule *rule_tmp = NULL; + struct sss_nic_ethtool_rx_flow_rule *rule = NULL; + struct list_head *rule_list = &rx_flow_rule->rule_list; + + if (list_empty(rule_list)) + return; + + list_for_each_entry_safe(rule, rule_tmp, rule_list, list) { + list_del(&rule->list); + kfree(rule); + } +} + +void sss_nic_flush_tcam(struct sss_nic_dev *nic_dev) +{ + sss_nic_flush_tcam_list(&nic_dev->tcam_info); + + sss_nic_flush_tcam_node_list(&nic_dev->tcam_info); + + sss_nic_flush_rx_flow_rule(&nic_dev->rx_rule); + + if (SSSNIC_SUPPORT_FDIR(nic_dev->nic_io)) { + sss_nic_flush_tcam_rule(nic_dev); + sss_nic_set_fdir_tcam_rule_filter(nic_dev, false); + } +} + +static struct sss_nic_tcam_node * +sss_nic_alloc_tcam_block_resource(struct sss_nic_dev *nic_dev, + struct sss_nic_tcam_info *nic_tcam_info, + u16 block_id) +{ + struct sss_nic_tcam_node *dynamic_block_ptr = NULL; + + dynamic_block_ptr = kzalloc(sizeof(*dynamic_block_ptr), GFP_KERNEL); + if (!dynamic_block_ptr) + return NULL; + + dynamic_block_ptr->block_id = block_id; + list_add_tail(&dynamic_block_ptr->block_list, + &nic_tcam_info->tcam_node_info.tcam_node_list); + + nic_tcam_info->tcam_node_info.block_cnt++; + + return dynamic_block_ptr; +} + +static void sss_nic_free_tcam_block_resource(struct sss_nic_tcam_info *nic_tcam_info, + struct sss_nic_tcam_node *block_ptr) +{ + if (!block_ptr) + return; + + list_del(&block_ptr->block_list); + kfree(block_ptr); + + nic_tcam_info->tcam_node_info.block_cnt--; +} + +static struct sss_nic_tcam_node * +sss_nic_dynamic_lookup_tcam_filter(struct sss_nic_dev *nic_dev, + struct sss_nic_tcam_rule_cfg *fdir_tcam_rule, + const struct sss_nic_tcam_info *tcam_info, + struct sss_nic_tcam_filter *tcam_filter, + u16 *tcam_index) +{ + u16 index; + struct sss_nic_tcam_node *ptr = NULL; + + list_for_each_entry(ptr, + &tcam_info->tcam_node_info.tcam_node_list, + block_list) + if (ptr->index_cnt < SSSNIC_TCAM_BLOCK_SIZE) + break; + + if (!ptr || ptr->index_cnt >= SSSNIC_TCAM_BLOCK_SIZE) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to lookup index for fdir filter dynamic\n"); + return NULL; + } + + for (index = 0; index < SSSNIC_TCAM_BLOCK_SIZE; index++) + if (ptr->index_used[index] == 0) + break; + + if (index == SSSNIC_TCAM_BLOCK_SIZE) { + nicif_err(nic_dev, drv, nic_dev->netdev, "tcam block 0x%x supports filter rules is full\n", + ptr->block_id); + return NULL; + } + + tcam_filter->block_id = ptr->block_id; + tcam_filter->index = index; + *tcam_index = index; + + fdir_tcam_rule->index = index + + SSSNIC_PKT_TCAM_INDEX_START(ptr->block_id); + + return ptr; +} + +static int sss_nic_add_tcam_filter(struct sss_nic_dev *nic_dev, + struct sss_nic_tcam_filter *tcam_filter, + struct sss_nic_tcam_rule_cfg *fdir_tcam_rule) +{ + int ret; + struct sss_nic_tcam_info *tcam_info = &nic_dev->tcam_info; + struct sss_nic_tcam_node *dynamic_block_ptr = NULL; + struct sss_nic_tcam_node *tmp = NULL; + u16 block_cnt = tcam_info->tcam_node_info.block_cnt; + u16 tcam_block_index = 0; + int block_alloc_flag = 0; + u16 index = 0; + + if (tcam_info->tcam_rule_num >= + block_cnt * SSSNIC_TCAM_BLOCK_SIZE) { + if (block_cnt >= (SSSNIC_TCAM_FILTERS_MAX / + SSSNIC_TCAM_BLOCK_SIZE)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to alloc, dynamic tcam block is full\n"); + goto failed; + } + + ret = sss_nic_alloc_tcam_block(nic_dev, &tcam_block_index); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to fdir filter dynamic tcam alloc block\n"); + goto failed; + } + + block_alloc_flag = 1; + + dynamic_block_ptr = + sss_nic_alloc_tcam_block_resource(nic_dev, tcam_info, + tcam_block_index); + if (!dynamic_block_ptr) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to Fdir filter dynamic alloc block memory\n"); + goto block_alloc_failed; + } + } + + tmp = sss_nic_dynamic_lookup_tcam_filter(nic_dev, + fdir_tcam_rule, tcam_info, + tcam_filter, &index); + if (!tmp) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to dynamic lookup tcam filter\n"); + goto lookup_tcam_index_failed; + } + + ret = sss_nic_add_tcam_rule(nic_dev, fdir_tcam_rule); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to add fdir_tcam_rule\n"); + goto add_tcam_rules_failed; + } + + nicif_info(nic_dev, drv, nic_dev->netdev, + "Add fdir tcam rule, func_id: 0x%x, tcam_block_id: %d, local_index: %d, global_index: %d, queue: %d, tcam_rule_num: %d succeed\n", + sss_get_global_func_id(nic_dev->hwdev), + tcam_filter->block_id, index, fdir_tcam_rule->index, + fdir_tcam_rule->data.qid, tcam_info->tcam_rule_num + 1); + + if (tcam_info->tcam_rule_num == 0) { + ret = sss_nic_set_fdir_tcam_rule_filter(nic_dev, true); + if (ret != 0) + goto enable_failed; + } + + list_add_tail(&tcam_filter->tcam_filter_list, &tcam_info->tcam_list); + + tmp->index_used[index] = 1; + tmp->index_cnt++; + + tcam_info->tcam_rule_num++; + + return 0; + +enable_failed: + sss_nic_del_tcam_rule(nic_dev, fdir_tcam_rule->index); + +add_tcam_rules_failed: +lookup_tcam_index_failed: + if (block_alloc_flag == 1) + sss_nic_free_tcam_block_resource(tcam_info, + dynamic_block_ptr); + +block_alloc_failed: + if (block_alloc_flag == 1) + sss_nic_free_tcam_block(nic_dev, &tcam_block_index); + +failed: + return -EFAULT; +} + +static int sss_nic_del_tcam_filter(struct sss_nic_dev *nic_dev, + struct sss_nic_tcam_filter *tcam_filter) +{ + int ret; + struct sss_nic_tcam_info *tcam_info = &nic_dev->tcam_info; + u16 block_id = tcam_filter->block_id; + struct sss_nic_tcam_node *ptr = NULL; + u32 index = 0; + + list_for_each_entry(ptr, + &tcam_info->tcam_node_info.tcam_node_list, + block_list) { + if (ptr->block_id == block_id) + break; + } + if (!ptr || ptr->block_id != block_id) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to lookup block for fdir filter del dynamic\n"); + return -EFAULT; + } + + index = SSSNIC_PKT_TCAM_INDEX_START(ptr->block_id) + + tcam_filter->index; + + ret = sss_nic_del_tcam_rule(nic_dev, index); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to del fdir_tcam_rule\n"); + return -EFAULT; + } + + nicif_info(nic_dev, drv, nic_dev->netdev, + "Del fdir_tcam_dynamic_rule func_id: 0x%x, tcam_block_id: %d, local_index: %d, global_index: %d, local_rules_nums: %d, global_rule_nums: %d succeed\n", + sss_get_global_func_id(nic_dev->hwdev), block_id, + tcam_filter->index, index, ptr->index_cnt - 1, + tcam_info->tcam_rule_num - 1); + + ptr->index_used[tcam_filter->index] = 0; + ptr->index_cnt--; + tcam_info->tcam_rule_num--; + if (ptr->index_cnt == 0) { + sss_nic_free_tcam_block(nic_dev, &block_id); + sss_nic_free_tcam_block_resource(tcam_info, ptr); + } + + if (tcam_info->tcam_rule_num == 0) + sss_nic_set_fdir_tcam_rule_filter(nic_dev, false); + + list_del(&tcam_filter->tcam_filter_list); + kfree(tcam_filter); + + return 0; +} + +static inline struct sss_nic_tcam_filter * +sss_nic_lookup_tcam_filter(const struct list_head *filter_list, + struct sss_nic_tcam_key_tag *key) +{ + struct sss_nic_tcam_filter *ptr; + + list_for_each_entry(ptr, filter_list, tcam_filter_list) { + if (memcmp(key, &ptr->tcam_key, + sizeof(*key)) == 0) + return ptr; + } + + return NULL; +} + +static void sss_nic_del_ethtool_rule(struct sss_nic_dev *nic_dev, + struct sss_nic_ethtool_rx_flow_rule *eth_rule) +{ + list_del(ð_rule->list); + nic_dev->rx_rule.rule_cnt--; + + kfree(eth_rule); +} + +static int sss_nic_del_one_rule(struct sss_nic_dev *nic_dev, + struct sss_nic_ethtool_rx_flow_rule *eth_rule) +{ + int ret; + struct sss_nic_tcam_info *tcam_info = &nic_dev->tcam_info; + struct sss_nic_tcam_filter *tcam_filter; + struct sss_nic_tcam_rule_cfg fdir_tcam_rule = {0}; + struct sss_nic_tcam_key_tag tcam_key = {0}; + + ret = sss_nic_init_fdir_tcam_info(nic_dev, ð_rule->flow_spec, + &tcam_key, &fdir_tcam_rule); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to init fdir info\n"); + return ret; + } + + tcam_filter = sss_nic_lookup_tcam_filter(&tcam_info->tcam_list, + &tcam_key); + if (!tcam_filter) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Filter does not exists\n"); + return -EEXIST; + } + + ret = sss_nic_del_tcam_filter(nic_dev, tcam_filter); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to delete tcam filter\n"); + return ret; + } + + sss_nic_del_ethtool_rule(nic_dev, eth_rule); + + return 0; +} + +static void sss_nic_add_rule_to_list(struct sss_nic_dev *nic_dev, + struct sss_nic_ethtool_rx_flow_rule *rule) +{ + struct sss_nic_ethtool_rx_flow_rule *ptr = NULL; + struct list_head *head = &nic_dev->rx_rule.rule_list; + + list_for_each_entry(ptr, &nic_dev->rx_rule.rule_list, list) { + if (ptr->flow_spec.location > rule->flow_spec.location) + break; + head = &ptr->list; + } + nic_dev->rx_rule.rule_cnt++; + list_add(&rule->list, head); +} + +static int sss_nic_add_one_rule(struct sss_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *flow_spec) +{ + int ret; + struct sss_nic_tcam_key_tag tcam_key = {0}; + struct sss_nic_tcam_rule_cfg fdir_tcam_rule = {0}; + struct sss_nic_tcam_filter *tcam_filter = NULL; + struct sss_nic_ethtool_rx_flow_rule *eth_rule = NULL; + struct sss_nic_tcam_info *tcam_info = &nic_dev->tcam_info; + + ret = sss_nic_init_fdir_tcam_info(nic_dev, flow_spec, &tcam_key, + &fdir_tcam_rule); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to init fdir info\n"); + return ret; + } + + tcam_filter = sss_nic_lookup_tcam_filter(&tcam_info->tcam_list, + &tcam_key); + if (tcam_filter) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Filter exists\n"); + return -EEXIST; + } + + tcam_filter = kzalloc(sizeof(*tcam_filter), GFP_KERNEL); + if (!tcam_filter) + return -ENOMEM; + memcpy(&tcam_filter->tcam_key, + &tcam_key, sizeof(tcam_key)); + tcam_filter->qid = (u16)fdir_tcam_rule.data.qid; + + ret = sss_nic_add_tcam_filter(nic_dev, tcam_filter, &fdir_tcam_rule); + if (ret != 0) + goto add_tcam_filter_fail; + + /* driver save new rule filter */ + eth_rule = kzalloc(sizeof(*eth_rule), GFP_KERNEL); + if (!eth_rule) { + ret = -ENOMEM; + goto alloc_eth_rule_fail; + } + + eth_rule->flow_spec = *flow_spec; + sss_nic_add_rule_to_list(nic_dev, eth_rule); + + return 0; + +alloc_eth_rule_fail: + sss_nic_del_tcam_filter(nic_dev, tcam_filter); +add_tcam_filter_fail: + kfree(tcam_filter); + return ret; +} + +static struct sss_nic_ethtool_rx_flow_rule * +sss_nic_ethtool_find_rule(const struct sss_nic_dev *nic_dev, u32 location) +{ + struct sss_nic_ethtool_rx_flow_rule *ptr = NULL; + + list_for_each_entry(ptr, &nic_dev->rx_rule.rule_list, list) { + if (ptr->flow_spec.location == location) + return ptr; + } + return NULL; +} + +static int sss_nic_validate_flow(struct sss_nic_dev *nic_dev, + const struct ethtool_rx_flow_spec *flow_spec) +{ + int i; + u32 flow_type[] = { + TCP_V4_FLOW, UDP_V4_FLOW, IP_USER_FLOW, +#ifndef UNSUPPORT_NTUPLE_IPV6 + TCP_V6_FLOW, UDP_V6_FLOW, IPV6_USER_FLOW, +#endif + }; + + if (flow_spec->ring_cookie >= nic_dev->qp_res.qp_num) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Action larger than queue number %u\n", + nic_dev->qp_res.qp_num); + return -EINVAL; + } + + if (flow_spec->location >= SSSNIC_MAX_ETHTOOL_NTUPLE_RULE) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid location out of range: [0,%lu]\n", + SSSNIC_MAX_ETHTOOL_NTUPLE_RULE); + return -EINVAL; + } + + for (i = 0; i < ARRAY_LEN(flow_type); i++) { + if (flow_spec->flow_type == flow_type[i]) + return 0; + } + + nicif_err(nic_dev, drv, nic_dev->netdev, "flow type not supported\n"); + return -EOPNOTSUPP; +} + +int sss_nic_ethtool_update_flow(struct sss_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *flow_spec) +{ + int ret; + struct ethtool_rx_flow_spec flow_spec_temp; + int loc_exit_flag = 0; + struct sss_nic_ethtool_rx_flow_rule *eth_rule = NULL; + + if (!SSSNIC_SUPPORT_FDIR(nic_dev->nic_io)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupport ntuple function\n"); + return -EOPNOTSUPP; + } + + ret = sss_nic_validate_flow(nic_dev, flow_spec); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "flow is not valid %d\n", ret); + return ret; + } + + eth_rule = sss_nic_ethtool_find_rule(nic_dev, flow_spec->location); + /* when location is same, delete old location rule. */ + if (eth_rule) { + memcpy(&flow_spec_temp, ð_rule->flow_spec, + sizeof(flow_spec_temp)); + ret = sss_nic_del_one_rule(nic_dev, eth_rule); + if (ret != 0) + return ret; + + loc_exit_flag = 1; + } + + /* add new rule filter */ + ret = sss_nic_add_one_rule(nic_dev, flow_spec); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to add new rule filter\n"); + if (loc_exit_flag) + sss_nic_add_one_rule(nic_dev, &flow_spec_temp); + + return -ENOENT; + } + + return 0; +} + +int sss_nic_ethtool_delete_flow(struct sss_nic_dev *nic_dev, u32 location) +{ + int ret; + struct sss_nic_ethtool_rx_flow_rule *eth_rule = NULL; + + if (!SSSNIC_SUPPORT_FDIR(nic_dev->nic_io)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupport ntuple function\n"); + return -EOPNOTSUPP; + } + + if (location >= SSSNIC_MAX_ETHTOOL_NTUPLE_RULE) + return -ENOSPC; + + eth_rule = sss_nic_ethtool_find_rule(nic_dev, location); + if (!eth_rule) + return -ENOENT; + + ret = sss_nic_del_one_rule(nic_dev, eth_rule); + + return ret; +} + +int sss_nic_ethtool_get_flow(const struct sss_nic_dev *nic_dev, + struct ethtool_rxnfc *info, u32 location) +{ + struct sss_nic_ethtool_rx_flow_rule *nic_eth_rule = NULL; + + if (!SSSNIC_SUPPORT_FDIR(nic_dev->nic_io)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupported ntuple function\n"); + return -EOPNOTSUPP; + } + + if (location >= SSSNIC_MAX_ETHTOOL_NTUPLE_RULE) + return -EINVAL; + + list_for_each_entry(nic_eth_rule, &nic_dev->rx_rule.rule_list, list) { + if (nic_eth_rule->flow_spec.location == location) { + info->fs = nic_eth_rule->flow_spec; + return 0; + } + } + + return -ENOENT; +} + +int sss_nic_ethtool_get_all_flows(const struct sss_nic_dev *nic_dev, + struct ethtool_rxnfc *info, u32 *rule_locs) +{ + int id = 0; + struct sss_nic_ethtool_rx_flow_rule *nic_eth_rule = NULL; + + if (!SSSNIC_SUPPORT_FDIR(nic_dev->nic_io)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupported ntuple function\n"); + return -EOPNOTSUPP; + } + + info->data = SSSNIC_MAX_ETHTOOL_NTUPLE_RULE; + list_for_each_entry(nic_eth_rule, &nic_dev->rx_rule.rule_list, list) + rule_locs[id++] = nic_eth_rule->flow_spec.location; + + return info->rule_cnt == id ? 0 : -ENOENT; +} + +bool sss_nic_validate_channel_setting_in_ntuple(const struct sss_nic_dev *nic_dev, u32 q_num) +{ + struct sss_nic_ethtool_rx_flow_rule *ptr = NULL; + + list_for_each_entry(ptr, &nic_dev->rx_rule.rule_list, list) { + if (ptr->flow_spec.ring_cookie >= q_num) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "User defined filter %u assigns flow to queue %llu. Queue number %u is Invalid\n", + ptr->flow_spec.location, ptr->flow_spec.ring_cookie, q_num); + return false; + } + } + + return true; +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ntuple.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ntuple.h new file mode 100644 index 0000000000000000000000000000000000000000..3712434b0510329b9dd0ec868b068a31a759af08 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_ntuple.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_NTUPLE_H +#define SSS_NIC_NTUPLE_H + +#include +#include + +#include "sss_nic_dev_define.h" + +void sss_nic_flush_tcam(struct sss_nic_dev *nic_dev); + +int sss_nic_ethtool_update_flow(struct sss_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *fs); + +int sss_nic_ethtool_delete_flow(struct sss_nic_dev *nic_dev, u32 location); + +int sss_nic_ethtool_get_flow(const struct sss_nic_dev *nic_dev, + struct ethtool_rxnfc *info, u32 location); + +int sss_nic_ethtool_get_all_flows(const struct sss_nic_dev *nic_dev, + struct ethtool_rxnfc *info, u32 *rule_locs); + +bool sss_nic_validate_channel_setting_in_ntuple(const struct sss_nic_dev *nic_dev, u32 q_num); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss.c new file mode 100644 index 0000000000000000000000000000000000000000..d1713b35b7d05915b4745fda5d2c9a6ecfacae03 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss.c @@ -0,0 +1,1003 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_cfg.h" +#include "sss_nic_rss_cfg.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_rss.h" +#include "sss_nic_ntuple.h" +#include "sss_nic_netdev_ops_api.h" +#include "sss_nic_dcb.h" + +#define SSSNIC_INVALID_TC_ID 0xFF + +#define SSSNIC_DEF_RSS_KEY_0 0x6d +#define SSSNIC_DEF_RSS_KEY_1 0x5a +#define SSSNIC_DEF_RSS_KEY_2 0x56 +#define SSSNIC_DEF_RSS_KEY_3 0xda +#define SSSNIC_DEF_RSS_KEY_4 0x25 +#define SSSNIC_DEF_RSS_KEY_5 0x5b +#define SSSNIC_DEF_RSS_KEY_6 0x0e +#define SSSNIC_DEF_RSS_KEY_7 0xc2 +#define SSSNIC_DEF_RSS_KEY_8 0x41 +#define SSSNIC_DEF_RSS_KEY_9 0x67 +#define SSSNIC_DEF_RSS_KEY_10 0x25 +#define SSSNIC_DEF_RSS_KEY_11 0x3d +#define SSSNIC_DEF_RSS_KEY_12 0x43 +#define SSSNIC_DEF_RSS_KEY_13 0xa3 +#define SSSNIC_DEF_RSS_KEY_14 0x8f +#define SSSNIC_DEF_RSS_KEY_15 0xb0 +#define SSSNIC_DEF_RSS_KEY_16 0xd0 +#define SSSNIC_DEF_RSS_KEY_17 0xca +#define SSSNIC_DEF_RSS_KEY_18 0x2b +#define SSSNIC_DEF_RSS_KEY_19 0xcb +#define SSSNIC_DEF_RSS_KEY_20 0xae +#define SSSNIC_DEF_RSS_KEY_21 0x7b +#define SSSNIC_DEF_RSS_KEY_22 0x30 +#define SSSNIC_DEF_RSS_KEY_23 0xb4 +#define SSSNIC_DEF_RSS_KEY_24 0x77 +#define SSSNIC_DEF_RSS_KEY_25 0xcb +#define SSSNIC_DEF_RSS_KEY_26 0x2d +#define SSSNIC_DEF_RSS_KEY_27 0xa3 +#define SSSNIC_DEF_RSS_KEY_28 0x80 +#define SSSNIC_DEF_RSS_KEY_29 0x30 +#define SSSNIC_DEF_RSS_KEY_30 0xf2 +#define SSSNIC_DEF_RSS_KEY_31 0x0c +#define SSSNIC_DEF_RSS_KEY_32 0x6a +#define SSSNIC_DEF_RSS_KEY_33 0x42 +#define SSSNIC_DEF_RSS_KEY_34 0xb7 +#define SSSNIC_DEF_RSS_KEY_35 0x3b +#define SSSNIC_DEF_RSS_KEY_36 0xbe +#define SSSNIC_DEF_RSS_KEY_37 0xac +#define SSSNIC_DEF_RSS_KEY_38 0x01 +#define SSSNIC_DEF_RSS_KEY_39 0xfa + +#define SSSNIC_COS_CHANGE_OFFSET 4 + +#define SSSNIC_RXH_PORT (RXH_L4_B_0_1 | RXH_L4_B_2_3) +#define SSSNIC_RXH_IP (RXH_IP_DST | RXH_IP_SRC) +#define SSSNIC_SUPPORT_RXH (SSSNIC_RXH_IP | SSSNIC_RXH_PORT) + +static int sss_nic_set_hw_rss(struct net_device *netdev, u8 *cos_map, u8 cos_num); + +static u16 max_qp_num; +module_param(max_qp_num, ushort, 0444); +MODULE_PARM_DESC(max_qp_num, "Number of Queue Pairs (default=0)"); + +static void sss_nic_fill_indir_tbl(struct sss_nic_dev *nic_dev, u8 cos_num, u32 *indir) +{ + int i = 0; + u16 k; + u16 group_size; + u16 start_qid = 0; + u16 qp_num = 0; + u8 cur_cos = 0; + u8 j; + u8 default_cos; + u8 cos_map = sss_nic_get_valid_cos_map(nic_dev); + + if (cos_num == 0) { + for (i = 0; i < SSSNIC_RSS_INDIR_SIZE; i++) + indir[i] = i % nic_dev->qp_res.qp_num; + return; + } + + group_size = SSSNIC_RSS_INDIR_SIZE / cos_num; + for (j = 0; j < cos_num; j++) { + while (cur_cos < SSSNIC_DCB_COS_MAX && + nic_dev->hw_dcb_cfg.cos_qp_num[cur_cos] == 0) + cur_cos++; + + if (cur_cos < SSSNIC_DCB_COS_MAX) { + qp_num = nic_dev->hw_dcb_cfg.cos_qp_num[cur_cos]; + start_qid = nic_dev->hw_dcb_cfg.cos_qp_offset[cur_cos]; + } else { + if (BIT(nic_dev->hw_dcb_cfg.default_cos) & cos_map) + default_cos = nic_dev->hw_dcb_cfg.default_cos; + else + default_cos = (u8)fls(cos_map) - 1; + qp_num = nic_dev->hw_dcb_cfg.cos_qp_num[default_cos]; + start_qid = nic_dev->hw_dcb_cfg.cos_qp_offset[default_cos]; + } + + for (k = 0; k < group_size; k++) + indir[i++] = start_qid + k % qp_num; + + cur_cos++; + } +} + +static void sss_nic_get_dcb_cos_map(struct sss_nic_dev *nic_dev, + u8 *cos_map, u8 *cos_num) +{ + u8 i; + u8 num; + u8 cfg_map[SSSNIC_DCB_UP_MAX]; + bool dcb_en = !!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE); + + if (!dcb_en) + return; + + if (nic_dev->hw_dcb_cfg.trust == 0) { + memcpy(cfg_map, nic_dev->hw_dcb_cfg.pcp2cos, sizeof(cfg_map)); + } else if (nic_dev->hw_dcb_cfg.trust == 1) { + for (i = 0; i < SSSNIC_DCB_UP_MAX; i++) + cfg_map[i] = nic_dev->hw_dcb_cfg.dscp2cos[i * SSSNIC_DCB_DSCP_NUM]; + } + + for (i = 0; i < SSSNIC_COS_CHANGE_OFFSET; i++) + cos_map[SSSNIC_COS_CHANGE_OFFSET + i] = cfg_map[i]; + + for (i = 0; i < SSSNIC_COS_CHANGE_OFFSET; i++) + cos_map[i] = cfg_map[SSSNIC_DCB_UP_MAX - (i + 1)]; + + num = sss_nic_get_user_cos_num(nic_dev); + while (num & (num - 1)) + num++; + + *cos_num = num; +} + +int sss_nic_update_rss_cfg(struct sss_nic_dev *nic_dev) +{ + int ret; + u8 cos_num = 0; + u8 cos_map[SSSNIC_DCB_UP_MAX] = {0}; + struct net_device *netdev = nic_dev->netdev; + + sss_nic_get_dcb_cos_map(nic_dev, cos_map, &cos_num); + + ret = sss_nic_set_hw_rss(netdev, cos_map, cos_num); + if (ret != 0) + return ret; + + return ret; +} + +void sss_nic_reset_rss_cfg(struct sss_nic_dev *nic_dev) +{ + u8 cos_map[SSSNIC_DCB_UP_MAX] = {0}; + + sss_nic_config_rss_to_hw(nic_dev, 0, cos_map, 1, 0); +} + +static void sss_nic_init_rss_type(struct sss_nic_dev *nic_dev) +{ + nic_dev->rss_type.ipv4 = 1; + nic_dev->rss_type.ipv6 = 1; + nic_dev->rss_type.ipv6_ext = 1; + nic_dev->rss_type.tcp_ipv4 = 1; + nic_dev->rss_type.tcp_ipv6 = 1; + nic_dev->rss_type.tcp_ipv6_ext = 1; + nic_dev->rss_type.udp_ipv4 = 1; + nic_dev->rss_type.udp_ipv6 = 1; + nic_dev->rss_hash_engine = SSSNIC_RSS_ENGINE_XOR; +} + +void sss_nic_free_rss_key(struct sss_nic_dev *nic_dev) +{ + kfree(nic_dev->rss_key); + nic_dev->rss_key = NULL; + nic_dev->rss_key_big = NULL; + + kfree(nic_dev->rss_indir_tbl); + nic_dev->rss_indir_tbl = NULL; +} + +void sss_nic_set_default_rss_indir(struct net_device *netdev) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + set_bit(SSSNIC_RSS_DEFAULT_INDIR, &nic_dev->flags); +} + +static void sss_nic_maybe_reset_rss_indir(struct net_device *netdev, bool dcb_en) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + int i; + + if (dcb_en) { + nicif_info(nic_dev, drv, netdev, "DCB is enabled, set default rss indir\n"); + set_bit(SSSNIC_RSS_DEFAULT_INDIR, &nic_dev->flags); + return; + } + + for (i = 0; i < SSSNIC_RSS_INDIR_SIZE; i++) { + if (nic_dev->rss_indir_tbl[i] >= nic_dev->qp_res.qp_num) { + set_bit(SSSNIC_RSS_DEFAULT_INDIR, &nic_dev->flags); + return; + } + } +} + +static u16 sss_nic_get_online_cpu(struct pci_dev *pdev) +{ + int i; + int node; + u16 cpu_num = 0; + + for (i = 0; i < (int)num_online_cpus(); i++) { + node = (int)cpu_to_node(i); + if (node == dev_to_node(&pdev->dev)) + cpu_num++; + } + + if (cpu_num == 0) + cpu_num = (u16)num_online_cpus(); + + return cpu_num; +} + +static void sss_nic_init_qp_num(struct sss_nic_dev *nic_dev) +{ + u16 cpu_num = 0; + u16 qp_num = nic_dev->max_qp_num; + u16 default_qp_num = nic_dev->nic_svc_cap.def_queue_num; + + if (default_qp_num != 0 && default_qp_num < qp_num) + qp_num = default_qp_num; + + if (max_qp_num > nic_dev->max_qp_num) + qp_num = nic_dev->max_qp_num; + else if (max_qp_num > 0) + qp_num = max_qp_num; + + cpu_num = sss_nic_get_online_cpu(nic_dev->pdev); + + nic_dev->qp_res.qp_num = (u16)min_t(u16, qp_num, cpu_num); +} + +static void sss_nic_set_rss_hkey(struct sss_nic_dev *nic_dev, const u8 *key) +{ + u32 i; + u32 *rss_hkey = (u32 *)nic_dev->rss_key; + + memcpy(nic_dev->rss_key, key, SSSNIC_RSS_KEY_SIZE); + + /* make a copy of the key, and convert it to Big Endian */ + for (i = 0; i < SSSNIC_RSS_KEY_SIZE / sizeof(u32); i++) + nic_dev->rss_key_big[i] = cpu_to_be32(rss_hkey[i]); +} + +static void sss_nic_init_rss_default_key(struct sss_nic_dev *nic_dev) +{ + u8 default_key[SSSNIC_RSS_KEY_SIZE] = { + SSSNIC_DEF_RSS_KEY_0, SSSNIC_DEF_RSS_KEY_1, SSSNIC_DEF_RSS_KEY_2, + SSSNIC_DEF_RSS_KEY_3, SSSNIC_DEF_RSS_KEY_4, SSSNIC_DEF_RSS_KEY_5, + SSSNIC_DEF_RSS_KEY_6, SSSNIC_DEF_RSS_KEY_7, SSSNIC_DEF_RSS_KEY_8, + SSSNIC_DEF_RSS_KEY_9, SSSNIC_DEF_RSS_KEY_10, SSSNIC_DEF_RSS_KEY_11, + SSSNIC_DEF_RSS_KEY_12, SSSNIC_DEF_RSS_KEY_13, SSSNIC_DEF_RSS_KEY_14, + SSSNIC_DEF_RSS_KEY_15, SSSNIC_DEF_RSS_KEY_16, SSSNIC_DEF_RSS_KEY_17, + SSSNIC_DEF_RSS_KEY_18, SSSNIC_DEF_RSS_KEY_19, SSSNIC_DEF_RSS_KEY_20, + SSSNIC_DEF_RSS_KEY_21, SSSNIC_DEF_RSS_KEY_22, SSSNIC_DEF_RSS_KEY_23, + SSSNIC_DEF_RSS_KEY_24, SSSNIC_DEF_RSS_KEY_25, SSSNIC_DEF_RSS_KEY_26, + SSSNIC_DEF_RSS_KEY_27, SSSNIC_DEF_RSS_KEY_28, SSSNIC_DEF_RSS_KEY_29, + SSSNIC_DEF_RSS_KEY_30, SSSNIC_DEF_RSS_KEY_31, SSSNIC_DEF_RSS_KEY_32, + SSSNIC_DEF_RSS_KEY_33, SSSNIC_DEF_RSS_KEY_34, SSSNIC_DEF_RSS_KEY_35, + SSSNIC_DEF_RSS_KEY_36, SSSNIC_DEF_RSS_KEY_37, SSSNIC_DEF_RSS_KEY_38, + SSSNIC_DEF_RSS_KEY_39 + }; + + sss_nic_set_rss_hkey(nic_dev, default_key); +} + +static int sss_nic_alloc_rss_key(struct sss_nic_dev *nic_dev) +{ + /* We need double the space to store the RSS key, + * with the second space used to store the RSS key in big-endian mode. + */ + nic_dev->rss_key = + kzalloc(SSSNIC_RSS_KEY_SIZE * SSSNIC_RSS_KEY_RSV_NUM, GFP_KERNEL); + if (!nic_dev->rss_key) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to alloc memory for rss_hkey\n"); + return -ENOMEM; + } + + nic_dev->rss_indir_tbl = kzalloc(sizeof(u32) * SSSNIC_RSS_INDIR_SIZE, GFP_KERNEL); + if (!nic_dev->rss_indir_tbl) { + kfree(nic_dev->rss_key); + nic_dev->rss_key = NULL; + return -ENOMEM; + } + + /* The second space is for big edian hash key */ + nic_dev->rss_key_big = (u32 *)(nic_dev->rss_key + SSSNIC_RSS_KEY_SIZE); + + return 0; +} + +static int sss_nic_config_rss_hw_resource(struct sss_nic_dev *nic_dev, u32 *indir) +{ + int ret; + u8 engine_type = nic_dev->rss_hash_engine; + + ret = sss_nic_set_rss_indir_tbl(nic_dev, indir); + if (ret != 0) + return ret; + + ret = sss_nic_set_rss_type(nic_dev, nic_dev->rss_type); + if (ret != 0) + return ret; + + return sss_nic_rss_hash_engine(nic_dev, SSSNIC_MBX_OPCODE_SET, &engine_type); +} + +static int sss_nic_set_hw_rss(struct net_device *netdev, u8 *cos_map, u8 cos_num) +{ + int ret; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + bool dcb_en = !!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE); + + ret = sss_nic_cfg_rss_hash_key(nic_dev, SSSNIC_MBX_OPCODE_SET, nic_dev->rss_key); + if (ret != 0) + return ret; + + sss_nic_maybe_reset_rss_indir(netdev, dcb_en); + + if (test_bit(SSSNIC_RSS_DEFAULT_INDIR, &nic_dev->flags)) + sss_nic_fill_indir_tbl(nic_dev, cos_num, nic_dev->rss_indir_tbl); + + ret = sss_nic_config_rss_hw_resource(nic_dev, nic_dev->rss_indir_tbl); + if (ret != 0) + return ret; + + ret = sss_nic_config_rss_to_hw(nic_dev, cos_num, cos_map, + nic_dev->qp_res.qp_num, 1); + if (ret != 0) + return ret; + + return 0; +} + +static void sss_nic_init_rss_key(struct sss_nic_dev *nic_dev) +{ + sss_nic_init_rss_default_key(nic_dev); + + sss_nic_init_qp_num(nic_dev); + + sss_nic_init_rss_type(nic_dev); + + sss_nic_fill_indir_tbl(nic_dev, 0, nic_dev->rss_indir_tbl); +} + +static int sss_nic_set_rss_key_to_hw(struct sss_nic_dev *nic_dev) +{ + int ret; + u8 engine_type = nic_dev->rss_hash_engine; + + ret = sss_nic_cfg_rss_hash_key(nic_dev, SSSNIC_MBX_OPCODE_SET, nic_dev->rss_key); + if (ret != 0) + return ret; + + ret = sss_nic_set_rss_indir_tbl(nic_dev, nic_dev->rss_indir_tbl); + if (ret != 0) + return ret; + + ret = sss_nic_set_rss_type(nic_dev, nic_dev->rss_type); + if (ret != 0) + return ret; + + ret = sss_nic_rss_hash_engine(nic_dev, SSSNIC_MBX_OPCODE_SET, &engine_type); + if (ret != 0) + return ret; + + ret = sss_nic_init_hw_rss(nic_dev, nic_dev->qp_res.qp_num); + if (ret != 0) + return ret; + + return 0; +} + +void sss_nic_try_to_enable_rss(struct sss_nic_dev *nic_dev) +{ + int ret = 0; + + if (!SSSNIC_SUPPORT_RSS(nic_dev->nic_io) || nic_dev->max_qp_num <= 1) { + nic_dev->qp_res.qp_num = nic_dev->max_qp_num; + return; + } + + ret = sss_nic_alloc_rss_key(nic_dev); + if (ret != 0) + goto disable_rss; + + set_bit(SSSNIC_RSS_ENABLE, &nic_dev->flags); + set_bit(SSSNIC_RSS_DEFAULT_INDIR, &nic_dev->flags); + sss_nic_init_rss_key(nic_dev); + + ret = sss_nic_set_rss_key_to_hw(nic_dev); + if (ret != 0) { + sss_nic_free_rss_key(nic_dev); + nic_err(nic_dev->dev_hdl, "Fail to set hardware rss parameters\n"); + goto disable_rss; + } + + return; + +disable_rss: + clear_bit(SSSNIC_RSS_ENABLE, &nic_dev->flags); + nic_dev->max_qp_num = 1; + nic_dev->qp_res.qp_num = nic_dev->max_qp_num; +} + +/* for ethtool */ +static int sss_nic_set_l4_rss_hash_type(const struct ethtool_rxnfc *cmd, + struct sss_nic_rss_type *rss_type) +{ + u8 rss_l4_en = 0; + + if ((cmd->data & SSSNIC_RXH_PORT) == 0) + rss_l4_en = 0; + else if ((cmd->data & SSSNIC_RXH_PORT) == SSSNIC_RXH_PORT) + rss_l4_en = 1; + else + return -EINVAL; + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + rss_type->tcp_ipv4 = rss_l4_en; + break; + case TCP_V6_FLOW: + rss_type->tcp_ipv6 = rss_l4_en; + break; + case UDP_V4_FLOW: + rss_type->udp_ipv4 = rss_l4_en; + break; + case UDP_V6_FLOW: + rss_type->udp_ipv6 = rss_l4_en; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int sss_nic_update_rss_type(struct sss_nic_dev *nic_dev, + struct ethtool_rxnfc *cmd, + struct sss_nic_rss_type *rss_type) +{ + int ret; + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case TCP_V6_FLOW: + case UDP_V6_FLOW: + ret = sss_nic_set_l4_rss_hash_type(cmd, rss_type); + if (ret != 0) + return ret; + + break; + case IPV4_FLOW: + rss_type->ipv4 = 1; + break; + case IPV6_FLOW: + rss_type->ipv6 = 1; + break; + default: + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unsupport flow type\n"); + return -EINVAL; + } + + return 0; +} + +static inline int sss_nic_check_cmd_data(struct ethtool_rxnfc *cmd) +{ + /* RSS only support hashing to queues based src and dst IP and port */ + if (cmd->data & ~SSSNIC_SUPPORT_RXH) + return -EINVAL; + + /* We need at least the IP SRC and DEST fields for hashing */ + if (!(cmd->data & SSSNIC_RXH_IP)) + return -EINVAL; + + return 0; +} + +static int sss_nic_set_rss_hash_type(struct sss_nic_dev *nic_dev, struct ethtool_rxnfc *cmd) +{ + struct sss_nic_rss_type *rss_type = &nic_dev->rss_type; + int ret; + + if (test_bit(SSSNIC_RSS_ENABLE, &nic_dev->flags) == 0) { + cmd->data = 0; + nicif_err(nic_dev, drv, nic_dev->netdev, + "RSS disable, no support to set flow-hash\n"); + return -EOPNOTSUPP; + } + + if (sss_nic_check_cmd_data(cmd) != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid ethool rxnfc cmd data\n"); + return -EINVAL; + } + + ret = sss_nic_get_rss_type(nic_dev, rss_type); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to get rss type\n"); + return -EFAULT; + } + + ret = sss_nic_update_rss_type(nic_dev, cmd, rss_type); + if (ret != 0) + return ret; + + ret = sss_nic_set_rss_type(nic_dev, *rss_type); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to set rss type\n"); + return -EFAULT; + } + + nicif_info(nic_dev, drv, nic_dev->netdev, "Success to set rss hash options\n"); + + return 0; +} + +static void translate_rss_type(u8 rss_opt, struct ethtool_rxnfc *cmd) +{ + if (rss_opt != 0) + cmd->data |= SSSNIC_RXH_PORT; +} + +static int sss_nic_translate_rss_type(struct sss_nic_dev *nic_dev, + struct sss_nic_rss_type *rss_type, + struct ethtool_rxnfc *cmd) +{ + cmd->data = SSSNIC_RXH_IP; + switch (cmd->flow_type) { + case TCP_V4_FLOW: + translate_rss_type(rss_type->tcp_ipv4, cmd); + break; + case UDP_V4_FLOW: + translate_rss_type(rss_type->udp_ipv4, cmd); + break; + case TCP_V6_FLOW: + translate_rss_type(rss_type->tcp_ipv6, cmd); + break; + case UDP_V6_FLOW: + translate_rss_type(rss_type->udp_ipv6, cmd); + break; + case IPV4_FLOW: + case IPV6_FLOW: + break; + default: + nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupport flow type\n"); + cmd->data = 0; + return -EINVAL; + } + + return 0; +} + +static int sss_nic_get_rss_hash_type(struct sss_nic_dev *nic_dev, struct ethtool_rxnfc *cmd) +{ + struct sss_nic_rss_type rss_type = {0}; + int ret; + + cmd->data = 0; + + if (test_bit(SSSNIC_RSS_ENABLE, &nic_dev->flags) == 0) + return 0; + + ret = sss_nic_get_rss_type(nic_dev, &rss_type); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to get rss type\n"); + return ret; + } + + return sss_nic_translate_rss_type(nic_dev, &rss_type, cmd); +} + +int sss_nic_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + int ret = 0; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = nic_dev->qp_res.qp_num; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = (u32)nic_dev->rx_rule.rule_cnt; + break; + case ETHTOOL_GRXCLSRULE: + ret = sss_nic_ethtool_get_flow(nic_dev, cmd, cmd->fs.location); + break; + case ETHTOOL_GRXCLSRLALL: + ret = sss_nic_ethtool_get_all_flows(nic_dev, cmd, rule_locs); + break; + case ETHTOOL_GRXFH: + ret = sss_nic_get_rss_hash_type(nic_dev, cmd); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +int sss_nic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + int ret = 0; + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + ret = sss_nic_set_rss_hash_type(nic_dev, cmd); + break; + case ETHTOOL_SRXCLSRLINS: + ret = sss_nic_ethtool_update_flow(nic_dev, &cmd->fs); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = sss_nic_ethtool_delete_flow(nic_dev, cmd->fs.location); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +static u16 sss_nic_channels_max(struct sss_nic_dev *nic_dev) +{ + u8 tcs = (u8)netdev_get_num_tc(nic_dev->netdev); + + return tcs ? nic_dev->max_qp_num / tcs : nic_dev->max_qp_num; +} + +static u16 sss_nic_curr_channels(struct sss_nic_dev *nic_dev) +{ + if (netif_running(nic_dev->netdev)) + return nic_dev->qp_res.qp_num ? + nic_dev->qp_res.qp_num : 1; + else + return (u16)min_t(u16, sss_nic_channels_max(nic_dev), + nic_dev->qp_res.qp_num); +} + +void sss_nic_get_channels(struct net_device *netdev, + struct ethtool_channels *channels) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + channels->tx_count = 0; + channels->rx_count = 0; + channels->other_count = 0; + channels->max_tx = 0; + channels->max_rx = 0; + channels->max_other = 0; + channels->max_combined = sss_nic_channels_max(nic_dev); + /* report flow director queues as maximum channels */ + channels->combined_count = sss_nic_curr_channels(nic_dev); +} + +static int sss_nic_check_channel_parameter(struct net_device *netdev, + const struct ethtool_channels *channels) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + unsigned int combined_count = channels->combined_count; + u16 max_channel = sss_nic_channels_max(nic_dev); + + if (combined_count == 0) { + nicif_err(nic_dev, drv, netdev, + "Unsupport combined_count=0\n"); + return -EINVAL; + } + + if (channels->tx_count != 0 || channels->rx_count != 0 || + channels->other_count != 0) { + nicif_err(nic_dev, drv, netdev, + "Set rx/tx/other count no support\n"); + return -EINVAL; + } + + if (combined_count > max_channel) { + nicif_err(nic_dev, drv, netdev, + "Invalid combined_count %u out of range %u\n", combined_count, + max_channel); + return -EINVAL; + } + + return 0; +} + +static void sss_nic_change_num_channel_reopen_handler(struct sss_nic_dev *nic_dev, + const void *priv_data) +{ + sss_nic_set_default_rss_indir(nic_dev->netdev); +} + +int sss_nic_set_channels(struct net_device *netdev, + struct ethtool_channels *channels) +{ + struct sss_nic_qp_resource q_param = {0}; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + unsigned int combined_count = channels->combined_count; + u8 user_cos_num = sss_nic_get_user_cos_num(nic_dev); + int ret; + + if (sss_nic_check_channel_parameter(netdev, channels)) + return -EINVAL; + + if (!test_bit(SSSNIC_RSS_ENABLE, &nic_dev->flags)) { + nicif_err(nic_dev, drv, netdev, + "This function not support RSS, only support 1 queue pair\n"); + return -EOPNOTSUPP; + } + + if (test_bit(SSSNIC_DCB_ENABLE, &nic_dev->flags)) { + if (combined_count < user_cos_num) { + nicif_err(nic_dev, drv, netdev, + "DCB is on, channel num should more than valid cos num:%u\n", + user_cos_num); + return -EOPNOTSUPP; + } + } + + if (SSSNIC_SUPPORT_FDIR(nic_dev->nic_io) && + !sss_nic_validate_channel_setting_in_ntuple(nic_dev, combined_count)) + return -EOPNOTSUPP; + + nicif_info(nic_dev, drv, netdev, "Set max combine queue number from %u to %u\n", + nic_dev->qp_res.qp_num, combined_count); + + if (netif_running(netdev)) { + q_param = nic_dev->qp_res; + q_param.irq_cfg = NULL; + q_param.rq_res_group = NULL; + q_param.sq_res_group = NULL; + q_param.qp_num = (u16)combined_count; + + nicif_info(nic_dev, drv, netdev, "Restart channel\n"); + ret = sss_nic_update_channel_setting(nic_dev, &q_param, + sss_nic_change_num_channel_reopen_handler, + NULL); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to change channel setting\n"); + return -EFAULT; + } + } else { + /* Discard user configured rss */ + sss_nic_set_default_rss_indir(netdev); + nic_dev->qp_res.qp_num = (u16)combined_count; + } + + return 0; +} + +#ifndef NOT_HAVE_GET_RXFH_INDIR_SIZE +u32 sss_nic_get_rxfh_indir_size(struct net_device *netdev) +{ + return SSSNIC_RSS_INDIR_SIZE; +} +#endif + +static int sss_nic_set_rss_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *hash_key) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + int ret; + + if (indir) { + ret = sss_nic_set_rss_indir_tbl(nic_dev, indir); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, + "Fail to set rss indir table\n"); + return -EFAULT; + } + clear_bit(SSSNIC_RSS_DEFAULT_INDIR, &nic_dev->flags); + + memcpy(nic_dev->rss_indir_tbl, indir, + sizeof(u32) * SSSNIC_RSS_INDIR_SIZE); + nicif_info(nic_dev, drv, netdev, "Success to set rss indir\n"); + } + + if (hash_key) { + ret = sss_nic_set_rss_hash_key(nic_dev, hash_key); + if (ret != 0) { + nicif_err(nic_dev, drv, netdev, "Fail to set rss key\n"); + return -EFAULT; + } + + sss_nic_set_rss_hkey(nic_dev, hash_key); + nicif_info(nic_dev, drv, netdev, "Success to set rss key\n"); + } + + return 0; +} + +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) +u32 sss_nic_get_rxfh_key_size(struct net_device *netdev) +{ + return SSSNIC_RSS_KEY_SIZE; +} + +#ifdef HAVE_RXFH_HASHFUNC +int sss_nic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hash_key, u8 *hfunc) +#else +int sss_nic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hash_key) +#endif +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + int ret = 0; + + if (!test_bit(SSSNIC_RSS_ENABLE, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Rss is disable\n"); + return -EOPNOTSUPP; + } + +#ifdef HAVE_RXFH_HASHFUNC + if (hfunc) + *hfunc = nic_dev->rss_hash_engine ? + ETH_RSS_HASH_TOP : ETH_RSS_HASH_XOR; +#endif + + if (indir) { + ret = sss_nic_get_rss_indir_tbl(nic_dev, indir); + if (ret != 0) + return -EFAULT; + } + + if (hash_key) + memcpy(hash_key, nic_dev->rss_key, SSSNIC_RSS_KEY_SIZE); + + return ret; +} + +#ifdef HAVE_RXFH_HASHFUNC +int sss_nic_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *hash_key, + const u8 hfunc) +#else +#ifdef HAVE_RXFH_NONCONST +int sss_nic_set_rxfh(struct net_device *netdev, u32 *indir, u8 *hash_key) +#else +int sss_nic_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *hash_key) +#endif +#endif /* HAVE_RXFH_HASHFUNC */ +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + int ret = 0; + + if (!test_bit(SSSNIC_RSS_ENABLE, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "No support to set rss parameters when rss disable\n"); + return -EOPNOTSUPP; + } + + if (test_bit(SSSNIC_DCB_ENABLE, &nic_dev->flags) && indir) { + nicif_err(nic_dev, drv, netdev, + "No support to set indir when DCB enable\n"); + return -EOPNOTSUPP; + } + +#ifdef HAVE_RXFH_HASHFUNC + if (hfunc != ETH_RSS_HASH_NO_CHANGE) { + if (hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR) { + nicif_err(nic_dev, drv, netdev, + "No support to set hfunc type except TOP and XOR\n"); + return -EOPNOTSUPP; + } + + nic_dev->rss_hash_engine = (hfunc == ETH_RSS_HASH_XOR) ? + SSSNIC_RSS_ENGINE_XOR : + SSSNIC_RSS_ENGINE_TOEP; + ret = sss_nic_set_rss_hash_engine(nic_dev, + nic_dev->rss_hash_engine); + if (ret != 0) + return -EFAULT; + + nicif_info(nic_dev, drv, netdev, + "Success to set hfunc to RSS_HASH_%s\n", + (hfunc == ETH_RSS_HASH_XOR) ? "XOR" : "TOP"); + } +#endif + ret = sss_nic_set_rss_rxfh(netdev, indir, hash_key); + + return ret; +} + +#else /* !(defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH)) */ + +#ifdef NOT_HAVE_GET_RXFH_INDIR_SIZE +int sss_nic_get_rxfh_indir(struct net_device *netdev, + struct ethtool_rxfh_indir *rxfh_indir) +#else +int sss_nic_get_rxfh_indir(struct net_device *netdev, u32 *indir) +#endif +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + int ret = 0; +#ifdef NOT_HAVE_GET_RXFH_INDIR_SIZE + u32 *indir = NULL; + + /* In a low version kernel(eg:suse 11.2), call the interface twice. + * First call to get the size value, + * and second call to get the rxfh indir according to the size value. + */ + if (rxfh_indir->size == 0) { + rxfh_indir->size = SSSNIC_RSS_INDIR_SIZE; + return 0; + } + + if (rxfh_indir->size < SSSNIC_RSS_INDIR_SIZE) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to get rss indir, rss size(%d) less than default rss size(%u).\n", + rxfh_indir->size, SSSNIC_RSS_INDIR_SIZE); + return -EINVAL; + } + + indir = rxfh_indir->ring_index; +#endif + if (!test_bit(SSSNIC_RSS_ENABLE, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "No support to get rss when rss disable\n"); + return -EOPNOTSUPP; + } + + if (indir) + ret = sss_nic_get_rss_indir_tbl(nic_dev, indir); + + return ret; +} + +#ifdef NOT_HAVE_GET_RXFH_INDIR_SIZE +int sss_nic_set_rxfh_indir(struct net_device *netdev, + const struct ethtool_rxfh_indir *rxfh_indir) +#else +int sss_nic_set_rxfh_indir(struct net_device *netdev, const u32 *indir) +#endif +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); +#ifdef NOT_HAVE_GET_RXFH_INDIR_SIZE + const u32 *indir = NULL; + + if (rxfh_indir->size != SSSNIC_RSS_INDIR_SIZE) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to set rss indir, rss size(%d) is less than default rss size(%u).\n", + rxfh_indir->size, SSSNIC_RSS_INDIR_SIZE); + return -EINVAL; + } + + indir = rxfh_indir->ring_index; +#endif + + if (!test_bit(SSSNIC_RSS_ENABLE, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "No support to set rss indir when rss disable\n"); + return -EOPNOTSUPP; + } + + if (test_bit(SSSNIC_DCB_ENABLE, &nic_dev->flags) && indir) { + nicif_err(nic_dev, drv, netdev, + "No support to set indir when DCB enable\n"); + return -EOPNOTSUPP; + } + + return sss_nic_set_rss_rxfh(netdev, indir, NULL); +} + +#endif /* defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) */ + diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss.h new file mode 100644 index 0000000000000000000000000000000000000000..93b7dee9995182ea3b7af9656b9aff184f9a4ad0 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_RSS_H +#define SSS_NIC_RSS_H + +#include "sss_nic_dev_define.h" + +#define SSS_NIC_NUM_IQ_PER_FUNC 8 + +int sss_nic_update_rss_cfg(struct sss_nic_dev *nic_dev); + +void sss_nic_reset_rss_cfg(struct sss_nic_dev *nic_dev); + +void sss_nic_set_default_rss_indir(struct net_device *netdev); + +void sss_nic_try_to_enable_rss(struct sss_nic_dev *nic_dev); + +void sss_nic_free_rss_key(struct sss_nic_dev *nic_dev); + +/* for ethtool */ +int sss_nic_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *cmd, u32 *rule_locs); + +int sss_nic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd); + +void sss_nic_get_channels(struct net_device *netdev, + struct ethtool_channels *channels); + +int sss_nic_set_channels(struct net_device *netdev, + struct ethtool_channels *channels); + +#ifndef NOT_HAVE_GET_RXFH_INDIR_SIZE +u32 sss_nic_get_rxfh_indir_size(struct net_device *netdev); +#endif /* NOT_HAVE_GET_RXFH_INDIR_SIZE */ + +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) +u32 sss_nic_get_rxfh_key_size(struct net_device *netdev); + +#ifdef HAVE_RXFH_HASHFUNC +int sss_nic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc); +#else /* HAVE_RXFH_HASHFUNC */ +int sss_nic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key); +#endif /* HAVE_RXFH_HASHFUNC */ + +#ifdef HAVE_RXFH_HASHFUNC +int sss_nic_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, + const u8 hfunc); +#else +#ifdef HAVE_RXFH_NONCONST +int sss_nic_set_rxfh(struct net_device *netdev, u32 *indir, u8 *key); +#else +int sss_nic_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key); +#endif /* HAVE_RXFH_NONCONST */ +#endif /* HAVE_RXFH_HASHFUNC */ + +#else /* !(defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH)) */ + +#ifdef NOT_HAVE_GET_RXFH_INDIR_SIZE +int sss_nic_get_rxfh_indir(struct net_device *netdev, + struct ethtool_rxfh_indir *indir1); +#else +int sss_nic_get_rxfh_indir(struct net_device *netdev, u32 *indir); +#endif + +#ifdef NOT_HAVE_GET_RXFH_INDIR_SIZE +int sss_nic_set_rxfh_indir(struct net_device *netdev, + const struct ethtool_rxfh_indir *indir1); +#else +int sss_nic_set_rxfh_indir(struct net_device *netdev, const u32 *indir); +#endif /* NOT_HAVE_GET_RXFH_INDIR_SIZE */ + +#endif /* (defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH)) */ + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss_cfg.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss_cfg.c new file mode 100644 index 0000000000000000000000000000000000000000..a52e7a45f574a71bcdef3579510046d8e22c8e73 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss_cfg.c @@ -0,0 +1,342 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_cfg.h" +#include "sss_nic_rss_cfg.h" +#include "sss_nic_cfg_define.h" +#include "sss_nic_io_define.h" +#include "sss_nic_event.h" + +int sss_nic_cfg_rss_hash_key(struct sss_nic_dev *nic_dev, u8 opcode, u8 *hash_key) +{ + int ret; + struct sss_nic_mbx_rss_key_cfg cmd_rss_hash_key = {0}; + u16 out_len = sizeof(cmd_rss_hash_key); + + if (opcode == SSSNIC_MBX_OPCODE_SET) + memcpy(cmd_rss_hash_key.key, hash_key, SSSNIC_RSS_KEY_SIZE); + + cmd_rss_hash_key.opcode = opcode; + cmd_rss_hash_key.func_id = sss_get_global_func_id(nic_dev->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_CFG_RSS_HASH_KEY, + &cmd_rss_hash_key, sizeof(cmd_rss_hash_key), + &cmd_rss_hash_key, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_rss_hash_key)) { + nic_err(nic_dev->dev_hdl, + "Fail to hash key,opcode: %d ret: %d, status: 0x%x, out_len: 0x%x\n", + opcode, ret, cmd_rss_hash_key.head.state, out_len); + return -EINVAL; + } + + if (opcode == SSSNIC_MBX_OPCODE_GET) + memcpy(hash_key, cmd_rss_hash_key.key, SSSNIC_RSS_KEY_SIZE); + + return 0; +} + +int sss_nic_set_rss_hash_key(struct sss_nic_dev *nic_dev, const u8 *hash_key) +{ + u8 rss_hash_key[SSSNIC_RSS_KEY_SIZE]; + + memcpy(rss_hash_key, hash_key, SSSNIC_RSS_KEY_SIZE); + return sss_nic_cfg_rss_hash_key(nic_dev, SSSNIC_MBX_OPCODE_SET, rss_hash_key); +} + +int sss_nic_get_rss_indir_tbl(struct sss_nic_dev *nic_dev, u32 *indir_tbl) +{ + int i; + int ret; + u16 *temp_tbl = NULL; + struct sss_ctrl_msg_buf *msg_buf = NULL; + + if (!nic_dev || !indir_tbl) + return -EINVAL; + + msg_buf = sss_alloc_ctrlq_msg_buf(nic_dev->hwdev); + if (!msg_buf) { + nic_err(nic_dev->dev_hdl, "Fail to allocate cmd buf\n"); + return -ENOMEM; + } + + msg_buf->size = sizeof(struct sss_nic_rss_indirect_table); + ret = sss_ctrlq_detail_reply(nic_dev->hwdev, SSS_MOD_TYPE_L2NIC, + SSSNIC_CTRLQ_OPCODE_GET_RSS_INDIR_TABLE, + msg_buf, msg_buf, NULL, 0, + SSS_CHANNEL_NIC); + if (ret != 0) { + nic_err(nic_dev->dev_hdl, "Fail to get rss indir tbl\n"); + goto get_tbl_fail; + } + + temp_tbl = (u16 *)msg_buf->buf; + for (i = 0; i < SSSNIC_RSS_INDIR_SIZE; i++) + indir_tbl[i] = *(temp_tbl + i); + +get_tbl_fail: + sss_free_ctrlq_msg_buf(nic_dev->hwdev, msg_buf); + + return ret; +} + +static void sss_nic_fill_indir_tbl(struct sss_nic_rss_indirect_table *indir_tbl, + const u32 *indir_table) +{ + u32 i; + u32 tbl_size; + u32 *temp_entry = NULL; + + memset(indir_tbl, 0, sizeof(*indir_tbl)); + for (i = 0; i < SSSNIC_RSS_INDIR_SIZE; i++) + indir_tbl->entry[i] = (u16)indir_table[i]; + + temp_entry = (u32 *)indir_tbl->entry; + tbl_size = sizeof(indir_tbl->entry) / (sizeof(u32)); + for (i = 0; i < tbl_size; i++) + temp_entry[i] = cpu_to_be32(temp_entry[i]); +} + +int sss_nic_set_rss_indir_tbl(struct sss_nic_dev *nic_dev, const u32 *indir_tbl) +{ + int ret; + u64 output_param = 0; + struct sss_ctrl_msg_buf *msg_buf = NULL; + + if (!nic_dev || !indir_tbl) + return -EINVAL; + + msg_buf = sss_alloc_ctrlq_msg_buf(nic_dev->hwdev); + if (!msg_buf) { + nic_err(nic_dev->dev_hdl, "Fail to allocate cmd buf\n"); + return -ENOMEM; + } + + msg_buf->size = sizeof(struct sss_nic_rss_indirect_table); + + sss_nic_fill_indir_tbl(msg_buf->buf, indir_tbl); + + ret = sss_ctrlq_direct_reply(nic_dev->hwdev, SSS_MOD_TYPE_L2NIC, + SSSNIC_CTRLQ_OPCODE_SET_RSS_INDIR_TABLE, + msg_buf, &output_param, + 0, SSS_CHANNEL_NIC); + if (ret != 0 || output_param != 0) { + sss_free_ctrlq_msg_buf(nic_dev->hwdev, msg_buf); + nic_err(nic_dev->dev_hdl, "Fail to set rss indir tbl\n"); + return -EFAULT; + } + + sss_free_ctrlq_msg_buf(nic_dev->hwdev, msg_buf); + return ret; +} + +static int sss_nic_set_rss_type_by_ctrlq(struct sss_nic_dev *nic_dev, u32 ctx) +{ + int ret; + u64 output_param = 0; + struct sss_nic_rss_ctx_table *rss_ctx_tbl = NULL; + struct sss_ctrl_msg_buf *msg_buf = NULL; + + msg_buf = sss_alloc_ctrlq_msg_buf(nic_dev->hwdev); + if (!msg_buf) { + nic_err(nic_dev->dev_hdl, "Fail to allocate cmd buf\n"); + return -ENOMEM; + } + + rss_ctx_tbl = (struct sss_nic_rss_ctx_table *)msg_buf->buf; + memset(rss_ctx_tbl, 0, sizeof(*rss_ctx_tbl)); + rss_ctx_tbl->ctx = cpu_to_be32(ctx); + msg_buf->size = sizeof(*rss_ctx_tbl); + + ret = sss_ctrlq_direct_reply(nic_dev->hwdev, SSS_MOD_TYPE_L2NIC, + SSSNIC_CTRLQ_OPCODE_SET_RSS_CONTEXT_TABLE, msg_buf, + &output_param, 0, SSS_CHANNEL_NIC); + if (ret != 0 || output_param != 0) { + sss_free_ctrlq_msg_buf(nic_dev->hwdev, msg_buf); + nic_err(nic_dev->dev_hdl, "Fail to set rss ctx, ret: %d\n", ret); + return -EFAULT; + } + + sss_free_ctrlq_msg_buf(nic_dev->hwdev, msg_buf); + + return 0; +} + +static int sss_nic_set_rss_type_by_mbx(struct sss_nic_dev *nic_dev, u32 ctx) +{ + struct sss_nic_mbx_rss_ctx ctx_tbl = {0}; + u16 out_len = sizeof(ctx_tbl); + int ret; + + ctx_tbl.func_id = sss_get_global_func_id(nic_dev->hwdev); + ctx_tbl.context = ctx; + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, + SSSNIC_MBX_OPCODE_SET_RSS_CTX_TBL_INTO_FUNC, + &ctx_tbl, sizeof(ctx_tbl), &ctx_tbl, &out_len); + + if (ctx_tbl.head.state == SSS_MGMT_CMD_UNSUPPORTED) { + return SSS_MGMT_CMD_UNSUPPORTED; + } else if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &ctx_tbl)) { + nic_err(nic_dev->dev_hdl, + "Fail to set rss ctx, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, ctx_tbl.head.state, out_len); + return -EINVAL; + } + + return 0; +} + +int sss_nic_set_rss_type(struct sss_nic_dev *nic_dev, struct sss_nic_rss_type rss_type) +{ + int ret; + u32 ctx = 0; + + ctx |= SSSNIC_RSS_TYPE_SET(rss_type.ipv4, IPV4) | + SSSNIC_RSS_TYPE_SET(rss_type.tcp_ipv4, TCP_IPV4) | + SSSNIC_RSS_TYPE_SET(rss_type.udp_ipv4, UDP_IPV4) | + SSSNIC_RSS_TYPE_SET(rss_type.ipv6, IPV6) | + SSSNIC_RSS_TYPE_SET(rss_type.ipv6_ext, IPV6_EXT) | + SSSNIC_RSS_TYPE_SET(rss_type.tcp_ipv6, TCP_IPV6) | + SSSNIC_RSS_TYPE_SET(rss_type.tcp_ipv6_ext, TCP_IPV6_EXT) | + SSSNIC_RSS_TYPE_SET(rss_type.udp_ipv6, UDP_IPV6) | + SSSNIC_RSS_TYPE_SET(1, VALID); + + ret = sss_nic_set_rss_type_by_mbx(nic_dev, ctx); + if (ret == SSS_MGMT_CMD_UNSUPPORTED) + ret = sss_nic_set_rss_type_by_ctrlq(nic_dev, ctx); + + return ret; +} + +int sss_nic_get_rss_type(struct sss_nic_dev *nic_dev, struct sss_nic_rss_type *rss_type) +{ + int ret; + struct sss_nic_mbx_rss_ctx rss_ctx_tbl = {0}; + u16 out_len = sizeof(rss_ctx_tbl); + + if (!nic_dev || !rss_type) + return -EINVAL; + + rss_ctx_tbl.func_id = sss_get_global_func_id(nic_dev->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_GET_RSS_CTX_TBL, + &rss_ctx_tbl, sizeof(rss_ctx_tbl), + &rss_ctx_tbl, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &rss_ctx_tbl)) { + nic_err(nic_dev->dev_hdl, "Fail to get hash type, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, rss_ctx_tbl.head.state, out_len); + return -EINVAL; + } + + rss_type->ipv4 = SSSNIC_RSS_TYPE_GET(rss_ctx_tbl.context, IPV4); + rss_type->ipv6 = SSSNIC_RSS_TYPE_GET(rss_ctx_tbl.context, IPV6); + rss_type->ipv6_ext = SSSNIC_RSS_TYPE_GET(rss_ctx_tbl.context, IPV6_EXT); + rss_type->tcp_ipv4 = SSSNIC_RSS_TYPE_GET(rss_ctx_tbl.context, TCP_IPV4); + rss_type->tcp_ipv6 = SSSNIC_RSS_TYPE_GET(rss_ctx_tbl.context, TCP_IPV6); + rss_type->tcp_ipv6_ext = SSSNIC_RSS_TYPE_GET(rss_ctx_tbl.context, TCP_IPV6_EXT); + rss_type->udp_ipv4 = SSSNIC_RSS_TYPE_GET(rss_ctx_tbl.context, UDP_IPV4); + rss_type->udp_ipv6 = SSSNIC_RSS_TYPE_GET(rss_ctx_tbl.context, UDP_IPV6); + + return 0; +} + +int sss_nic_rss_hash_engine(struct sss_nic_dev *nic_dev, u8 cmd, u8 *hash_engine) +{ + int ret; + struct sss_nic_mbx_rss_engine_cfg cmd_rss_engine = {0}; + u16 out_len = sizeof(cmd_rss_engine); + + cmd_rss_engine.opcode = cmd; + cmd_rss_engine.func_id = sss_get_global_func_id(nic_dev->hwdev); + + if (cmd == SSSNIC_MBX_OPCODE_SET) + cmd_rss_engine.hash_engine = *hash_engine; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, + SSSNIC_MBX_OPCODE_CFG_RSS_HASH_ENGINE, + &cmd_rss_engine, sizeof(cmd_rss_engine), + &cmd_rss_engine, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_rss_engine)) { + nic_err(nic_dev->dev_hdl, "Fail to handle hash engine,opcode:%d, ret: %d, status: 0x%x, out_len: 0x%x\n", + cmd, ret, cmd_rss_engine.head.state, out_len); + + return -EIO; + } + + if (cmd == SSSNIC_MBX_OPCODE_GET) + *hash_engine = cmd_rss_engine.hash_engine; + + return 0; +} + +int sss_nic_set_rss_hash_engine(struct sss_nic_dev *nic_dev, u8 hash_engine) +{ + return sss_nic_rss_hash_engine(nic_dev, SSSNIC_MBX_OPCODE_SET, &hash_engine); +} + +int sss_nic_config_rss_to_hw(struct sss_nic_dev *nic_dev, + u8 cos_num, u8 *cos_map, u16 qp_num, u8 rss_en) +{ + int ret; + struct sss_nic_mbx_rss_cfg cmd_rss_cfg = {0}; + u16 out_len = sizeof(cmd_rss_cfg); + + if (!nic_dev || !cos_map || (cos_num & (cos_num - 1)) != 0) + return -EINVAL; + + cmd_rss_cfg.rss_en = rss_en; + cmd_rss_cfg.qp_num = qp_num; + cmd_rss_cfg.rq_priority_number = (cos_num > 0) ? (u8)ilog2(cos_num) : 0; + cmd_rss_cfg.func_id = sss_get_global_func_id(nic_dev->hwdev); + memcpy(cmd_rss_cfg.prio_tc, cos_map, SSSNIC_DCB_UP_MAX); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_RSS_CFG, + &cmd_rss_cfg, sizeof(cmd_rss_cfg), + &cmd_rss_cfg, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_rss_cfg)) { + nic_err(nic_dev->dev_hdl, + "Fail to set rss cfg, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_rss_cfg.head.state, out_len); + return -EINVAL; + } + + return 0; +} + +int sss_nic_init_hw_rss(struct sss_nic_dev *nic_dev, u16 qp_num) +{ + int ret; + struct sss_nic_mbx_rss_cfg cmd_rss_cfg = {0}; + u16 out_len = sizeof(cmd_rss_cfg); + + if (!nic_dev) + return -EINVAL; + + cmd_rss_cfg.qp_num = qp_num; + cmd_rss_cfg.func_id = sss_get_global_func_id(nic_dev->hwdev); + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_dev->hwdev, SSSNIC_MBX_OPCODE_RSS_CFG, + &cmd_rss_cfg, sizeof(cmd_rss_cfg), + &cmd_rss_cfg, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_rss_cfg)) { + nic_err(nic_dev->dev_hdl, + "Fail to set rss cfg, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_rss_cfg.head.state, out_len); + return -EINVAL; + } + + return 0; +} + diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss_cfg.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss_cfg.h new file mode 100644 index 0000000000000000000000000000000000000000..e5515c1e11cf270b7bec36b7d438214239a36019 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rss_cfg.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_RSS_CFG_H +#define SSS_NIC_RSS_CFG_H + +#include + +#include "sss_nic_cfg_rss_define.h" + +int sss_nic_set_rss_type(struct sss_nic_dev *nic_dev, struct sss_nic_rss_type rss_type); + +int sss_nic_get_rss_type(struct sss_nic_dev *nic_dev, struct sss_nic_rss_type *rss_type); + +int sss_nic_set_rss_hash_engine(struct sss_nic_dev *nic_dev, u8 hash_engine); + +int sss_nic_rss_hash_engine(struct sss_nic_dev *nic_dev, u8 cmd, u8 *hash_engine); + +int sss_nic_config_rss_to_hw(struct sss_nic_dev *nic_dev, + u8 cos_num, u8 *prio_tc, u16 qp_num, u8 rss_en); + +int sss_nic_init_hw_rss(struct sss_nic_dev *nic_dev, u16 qp_num); + +int sss_nic_set_rss_hash_key(struct sss_nic_dev *nic_dev, const u8 *hash_key); + +int sss_nic_cfg_rss_hash_key(struct sss_nic_dev *nic_dev, u8 opcode, u8 *hash_key); + +int sss_nic_set_rss_indir_tbl(struct sss_nic_dev *nic_dev, const u32 *indir_tbl); + +int sss_nic_get_rss_indir_tbl(struct sss_nic_dev *nic_dev, u32 *indir_tbl); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx.c new file mode 100644 index 0000000000000000000000000000000000000000..fa3d5ccf2887d6886597464778bd19864f76ac1d --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx.c @@ -0,0 +1,903 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_io.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_rss.h" +#include "sss_nic_rx.h" +#include "sss_nic_cfg.h" + +/* rx cqe checksum err */ +#define SSSNIC_RX_IP_CSUM_ERR BIT(0) +#define SSSNIC_RX_TCP_CSUM_ERR BIT(1) +#define SSSNIC_RX_UDP_CSUM_ERR BIT(2) +#define SSSNIC_RX_IGMP_CSUM_ERR BIT(3) +#define SSSNIC_RX_ICMPV4_CSUM_ERR BIT(4) +#define SSSNIC_RX_ICMPV6_CSUM_ERR BIT(5) +#define SSSNIC_RX_SCTP_CRC_ERR BIT(6) +#define SSSNIC_RX_CSUM_HW_CHECK_NONE BIT(7) +#define SSSNIC_RX_CSUM_IPSU_OTHER_ERR BIT(8) + +#define LRO_PKT_HDR_LEN_IPV4 66 +#define LRO_PKT_HDR_LEN_IPV6 86 +#define LRO_PKT_HDR_LEN(cqe) \ + (SSSNIC_GET_RX_IP_TYPE(sss_hw_cpu32((cqe)->offload_type)) == \ + SSSNIC_RX_IPV6_PKT ? LRO_PKT_HDR_LEN_IPV6 : LRO_PKT_HDR_LEN_IPV4) + +#define SSSNIC_MAX_NUM_RQ 256 + +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_SHIFT 0 +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_IP_TYPE_SHIFT 5 +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_ENC_L3_TYPE_SHIFT 7 +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_TUNNEL_PKT_FORMAT_SHIFT 8 +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_PKT_UMBCAST_SHIFT 19 +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_VLAN_EN_SHIFT 21 +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_RSS_TYPE_SHIFT 24 + +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_MASK 0x1FU +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_IP_TYPE_MASK 0x3U +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_ENC_L3_TYPE_MASK 0x1U +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_TUNNEL_PKT_FORMAT_MASK 0xFU +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_PKT_UMBCAST_MASK 0x3U +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_VLAN_EN_MASK 0x1U +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_RSS_TYPE_MASK 0xFFU + +#define SSSNIC_RQ_CQE_OFFOLAD_TYPE_GET(val, member) \ + (((val) >> SSSNIC_RQ_CQE_OFFOLAD_TYPE_##member##_SHIFT) & \ + SSSNIC_RQ_CQE_OFFOLAD_TYPE_##member##_MASK) + +#define SSSNIC_GET_RX_PKT_TYPE(offload_type) \ + SSSNIC_RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_TYPE) +#define SSSNIC_GET_RX_IP_TYPE(offload_type) \ + SSSNIC_RQ_CQE_OFFOLAD_TYPE_GET(offload_type, IP_TYPE) +#define SSSNIC_GET_RX_ENC_L3_TYPE(offload_type) \ + SSSNIC_RQ_CQE_OFFOLAD_TYPE_GET(offload_type, ENC_L3_TYPE) +#define SSSNIC_GET_RX_TUNNEL_PKT_FORMAT(offload_type) \ + SSSNIC_RQ_CQE_OFFOLAD_TYPE_GET(offload_type, TUNNEL_PKT_FORMAT) + +#define SSSNIC_GET_RX_PKT_UMBCAST(offload_type) \ + SSSNIC_RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_UMBCAST) + +#define SSSNIC_GET_RX_VLAN_OFFLOAD_EN(offload_type) \ + SSSNIC_RQ_CQE_OFFOLAD_TYPE_GET(offload_type, VLAN_EN) + +#define SSSNIC_GET_RSS_TYPES(offload_type) \ + SSSNIC_RQ_CQE_OFFOLAD_TYPE_GET(offload_type, RSS_TYPE) + +#define SSSNIC_RQ_CQE_SGE_VLAN_SHIFT 0 +#define SSSNIC_RQ_CQE_SGE_LEN_SHIFT 16 + +#define SSSNIC_RQ_CQE_SGE_VLAN_MASK 0xFFFFU +#define SSSNIC_RQ_CQE_SGE_LEN_MASK 0xFFFFU + +#define SSSNIC_RQ_CQE_SGE_GET(val, member) \ + (((val) >> SSSNIC_RQ_CQE_SGE_##member##_SHIFT) & SSSNIC_RQ_CQE_SGE_##member##_MASK) + +#define SSSNIC_GET_RX_VLAN_TAG(vlan_len) SSSNIC_RQ_CQE_SGE_GET(vlan_len, VLAN) + +#define SSSNIC_GET_RX_PKT_LEN(vlan_len) SSSNIC_RQ_CQE_SGE_GET(vlan_len, LEN) + +#define SSSNIC_GET_RX_CSUM_ERR(status) SSSNIC_RQ_CQE_STATUS_GET(status, CSUM_ERR) + +#define SSSNIC_GET_RX_FLUSH(status) SSSNIC_RQ_CQE_STATUS_GET(status, FLUSH) + +#define SSSNIC_GET_RX_BP_EN(status) SSSNIC_RQ_CQE_STATUS_GET(status, BP_EN) + +#define SSSNIC_GET_RX_NUM_LRO(status) SSSNIC_RQ_CQE_STATUS_GET(status, NUM_LRO) + +#define SSSNIC_RX_IS_DECRY_PKT(status) SSSNIC_RQ_CQE_STATUS_GET(status, DECRY_PKT) + +#define SSSNIC_RQ_CQE_SUPER_CQE_EN_SHIFT 0 +#define SSSNIC_RQ_CQE_PKT_NUM_SHIFT 1 +#define SSSNIC_RQ_CQE_PKT_LAST_LEN_SHIFT 6 +#define SSSNIC_RQ_CQE_PKT_FIRST_LEN_SHIFT 19 + +#define SSSNIC_RQ_CQE_SUPER_CQE_EN_MASK 0x1 +#define SSSNIC_RQ_CQE_PKT_NUM_MASK 0x1FU +#define SSSNIC_RQ_CQE_PKT_FIRST_LEN_MASK 0x1FFFU +#define SSSNIC_RQ_CQE_PKT_LAST_LEN_MASK 0x1FFFU + +#define SSSNIC_RQ_CQE_PKT_NUM_GET(val, member) \ + (((val) >> SSSNIC_RQ_CQE_PKT_##member##_SHIFT) & SSSNIC_RQ_CQE_PKT_##member##_MASK) +#define SSSNIC_GET_RQ_CQE_PKT_NUM(pkt_info) SSSNIC_RQ_CQE_PKT_NUM_GET(pkt_info, NUM) + +#define SSSNIC_RQ_CQE_SUPER_CQE_EN_GET(val, member) \ + (((val) >> SSSNIC_RQ_CQE_##member##_SHIFT) & SSSNIC_RQ_CQE_##member##_MASK) +#define SSSNIC_GET_SUPER_CQE_EN(pkt_info) \ + SSSNIC_RQ_CQE_SUPER_CQE_EN_GET(pkt_info, SUPER_CQE_EN) + +/* performance: ci addr RTE_CACHE_SIZE(64B) alignment */ +#define SSSNIC_RX_HDR_SIZE 256 +#define SSSNIC_RX_BUFFER_WRITE 16 + +#define SSSNIC_RX_TCP_PKT 0x3 +#define SSSNIC_RX_UDP_PKT 0x4 +#define SSSNIC_RX_SCTP_PKT 0x7 + +#define SSSNIC_RX_IPV4_PKT 0 +#define SSSNIC_RX_IPV6_PKT 1 +#define SSSNIC_RX_INVALID_IP_TYPE 2 + +#define SSSNIC_RX_PKT_FORMAT_NON_TUNNEL 0 +#define SSSNIC_RX_PKT_FORMAT_VXLAN 1 + +#ifdef HAVE_XDP_SUPPORT +enum sss_nic_xdp_pkt { + SSSNIC_XDP_PKT_PASS, + SSSNIC_XDP_PKT_DROP, +}; +#endif + +#define SSSNIC_LRO_PKT_HDR_LEN_IPV4 66 +#define SSSNIC_LRO_PKT_HDR_LEN_IPV6 86 +#define SSSNIC_LRO_PKT_HDR_LEN(cqe) \ + (SSSNIC_GET_RX_IP_TYPE(sss_hw_cpu32((cqe)->offload_type)) == \ + SSSNIC_RX_IPV6_PKT ? SSSNIC_LRO_PKT_HDR_LEN_IPV6 : SSSNIC_LRO_PKT_HDR_LEN_IPV4) + +#define SSSNIC_GET_SGE_NUM(pkt_len, rxq) \ + ((u8)(((pkt_len) >> (rxq)->buff_size_shift) + \ + (((pkt_len) & ((rxq)->buf_len - 1)) ? 1 : 0))) + +bool sss_nic_rx_alloc_dma_page(struct sss_nic_dev *nic_dev, + struct sss_nic_rx_desc *rx_desc) +{ + struct page *page = rx_desc->page; + dma_addr_t dma_addr = rx_desc->buf_daddr; + + if (likely(dma_addr != 0)) + return true; + + page = alloc_pages_node(NUMA_NO_NODE, + GFP_ATOMIC | __GFP_COLD | __GFP_COMP, nic_dev->page_order); + if (unlikely(!page)) + return false; + + dma_addr = dma_map_page(nic_dev->dev_hdl, page, 0, + nic_dev->rx_dma_buff_size, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(nic_dev->dev_hdl, dma_addr) != 0)) { + __free_pages(page, nic_dev->page_order); + return false; + } + + rx_desc->page = page; + rx_desc->buf_daddr = dma_addr; + rx_desc->page_offset = 0; + + return true; +} + +u32 sss_nic_fill_bd_sge(struct sss_nic_rq_desc *rq_desc) +{ + struct net_device *netdev = rq_desc->netdev; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct sss_nic_rx_desc *rx_desc = NULL; + struct sss_nic_rqe *rqe = NULL; + u32 idle_wqe = rq_desc->delta - 1; + dma_addr_t dma_addr; + u32 i; + + for (i = 0; i < idle_wqe; i++) { + rx_desc = &rq_desc->rx_desc_group[rq_desc->pi]; + rqe = rx_desc->rqe; + + if (unlikely(!sss_nic_rx_alloc_dma_page(nic_dev, rx_desc))) { + SSSNIC_RQ_STATS_INC(rq_desc, alloc_rx_dma_err); + break; + } + + dma_addr = rx_desc->buf_daddr + rx_desc->page_offset; + + if (rq_desc->rq->wqe_type == SSSNIC_EXTEND_RQ_WQE) { + rqe->extend_rqe.bd_sect.sge.low_addr = + sss_hw_be32(lower_32_bits(dma_addr)); + rqe->extend_rqe.bd_sect.sge.high_addr = + sss_hw_be32(upper_32_bits(dma_addr)); + } else { + rqe->normal_rqe.bd_lo_addr = sss_hw_be32(lower_32_bits(dma_addr)); + rqe->normal_rqe.bd_hi_addr = sss_hw_be32(upper_32_bits(dma_addr)); + } + rq_desc->pi = (u16)((rq_desc->pi + 1) & rq_desc->qid_mask); + } + + if (likely(i != 0)) { + sss_nic_write_db(rq_desc->rq, rq_desc->qid & (SSSNIC_DCB_COS_MAX - 1), + RQ_CFLAG_DP, (u16)((u32)rq_desc->pi << rq_desc->rq->wqe_type)); + + rq_desc->delta -= i; + rq_desc->backup_pi = rq_desc->pi; + } else if (idle_wqe == rq_desc->q_depth - 1) { + SSSNIC_RQ_STATS_INC(rq_desc, rx_buf_errors); + } + + return i; +} + +#define SSS_NIC_FILL_BD_SGE(rq_desc) \ +do { \ + struct sss_nic_dev *nic_dev = netdev_priv((rq_desc)->netdev); \ + struct sss_nic_rx_desc *_rx_desc = NULL; \ + struct sss_nic_rqe *_rqe = NULL; \ + u32 _idle_wqe = (rq_desc)->delta - 1; \ + dma_addr_t _dma_addr; \ + u32 _id; \ +\ + for (_id = 0; _id < _idle_wqe; _id++) { \ + _rx_desc = &(rq_desc)->rx_desc_group[(rq_desc)->pi]; \ + _rqe = _rx_desc->rqe; \ +\ + if (unlikely(!sss_nic_rx_alloc_dma_page(nic_dev, _rx_desc))) { \ + SSSNIC_RQ_STATS_INC((rq_desc), alloc_rx_dma_err); \ + break; \ + } \ +\ + _dma_addr = _rx_desc->buf_daddr + _rx_desc->page_offset; \ +\ + if ((rq_desc)->rq->wqe_type == SSSNIC_EXTEND_RQ_WQE) { \ + _rqe->extend_rqe.bd_sect.sge.low_addr = \ + sss_hw_be32(lower_32_bits(_dma_addr)); \ + _rqe->extend_rqe.bd_sect.sge.high_addr = \ + sss_hw_be32(upper_32_bits(_dma_addr)); \ + } else { \ + _rqe->normal_rqe.bd_lo_addr = sss_hw_be32(lower_32_bits(_dma_addr)); \ + _rqe->normal_rqe.bd_hi_addr = sss_hw_be32(upper_32_bits(_dma_addr)); \ + } \ + (rq_desc)->pi = (u16)(((rq_desc)->pi + 1) & (rq_desc)->qid_mask); \ + } \ +\ + if (likely(_id != 0)) { \ + sss_nic_write_db((rq_desc)->rq, (rq_desc)->qid & (SSSNIC_DCB_COS_MAX - 1), \ + RQ_CFLAG_DP, \ + (u16)((u32)(rq_desc)->pi << (rq_desc)->rq->wqe_type)); \ +\ + (rq_desc)->delta -= _id; \ + (rq_desc)->backup_pi = (rq_desc)->pi; \ + } else if (_idle_wqe == (rq_desc)->q_depth - 1) { \ + SSSNIC_RQ_STATS_INC((rq_desc), rx_buf_errors); \ + } \ +} while (0) + +#define sss_nic_rx_reuse_dma_page(rq_desc, old_rqe_desc) \ +do { \ + u16 _pi = (rq_desc)->backup_pi; \ + struct sss_nic_rx_desc *new_rqe_desc; \ +\ + new_rqe_desc = &(rq_desc)->rx_desc_group[_pi++]; \ +\ + (rq_desc)->backup_pi = (_pi < (rq_desc)->q_depth) ? _pi : 0; \ +\ + new_rqe_desc->page = (old_rqe_desc)->page; \ + new_rqe_desc->page_offset = (old_rqe_desc)->page_offset; \ + new_rqe_desc->buf_daddr = (old_rqe_desc)->buf_daddr; \ +\ + dma_sync_single_range_for_device((rq_desc)->dev, new_rqe_desc->buf_daddr, \ + new_rqe_desc->page_offset, (rq_desc)->buf_len, \ + DMA_FROM_DEVICE); \ +} while (0) + +#if L1_CACHE_BYTES < 128 +#define PREFETCH_L1_CACHE(vaddr) prefetch((vaddr) + L1_CACHE_BYTES) +#else +#define PREFETCH_L1_CACHE(vaddr) do {} while (0) +#endif + +#define sss_nic_skb_add_rx_frag(rq_desc, rx_desc, skb, size, ret_flag) \ +do { \ + u8 *vaddr; \ + struct page *page; \ +\ + page = (rx_desc)->page; \ + vaddr = (u8 *)page_address(page) + (rx_desc)->page_offset; \ + prefetch(vaddr); \ + PREFETCH_L1_CACHE(vaddr); \ +\ + dma_sync_single_range_for_cpu((rq_desc)->dev, (rx_desc)->buf_daddr, \ + (rx_desc)->page_offset, (rq_desc)->buf_len, \ + DMA_FROM_DEVICE); \ +\ + if ((size) <= SSSNIC_RX_HDR_SIZE && !skb_is_nonlinear(skb)) { \ + memcpy(__skb_put((skb), (size)), vaddr, ALIGN((size), sizeof(long))); \ + if (likely(page_to_nid(page) == numa_node_id())) \ + *(ret_flag) = true; \ + else { \ + put_page(page); \ + *(ret_flag) = false; \ + } \ + } else { \ + skb_add_rx_frag((skb), skb_shinfo(skb)->nr_frags, page, \ + (int)(rx_desc)->page_offset, (int)(size), (rq_desc)->buf_len); \ + if (unlikely(page_count(page) != 1)) \ + *(ret_flag) = false; \ + else if (unlikely(page_to_nid(page) != numa_node_id())) \ + *(ret_flag) = false; \ + else { \ + (rx_desc)->page_offset ^= (rq_desc)->buf_len; \ + get_page(page); \ + *(ret_flag) = true; \ + } \ + } \ +} while (0) + +#define sss_nic_combine_skb(rq_desc, head_skb, sge_num, pkt_size) \ +do { \ + struct sss_nic_rx_desc *_rx_desc = NULL; \ + struct sk_buff *_skb = NULL; \ + u8 _frag_num = 0; \ + u32 tmp_pkt_sz = (pkt_size); \ + u8 tmp_sge_num = (sge_num); \ + u32 _size; \ + u32 _ci; \ + u8 _ret; \ +\ + _skb = (head_skb); \ + _ci = (rq_desc)->ci & (rq_desc)->qid_mask; \ + while (tmp_sge_num > 0) { \ + _rx_desc = &(rq_desc)->rx_desc_group[_ci]; \ + if (unlikely(tmp_pkt_sz > (rq_desc)->buf_len)) { \ + _size = (rq_desc)->buf_len; \ + tmp_pkt_sz -= (rq_desc)->buf_len; \ + } else { \ + _size = tmp_pkt_sz; \ + } \ +\ + if (unlikely(_frag_num == MAX_SKB_FRAGS)) { \ + if (_skb == (head_skb)) \ + _skb = skb_shinfo(_skb)->frag_list; \ + else \ + _skb = _skb->next; \ +\ + _frag_num = 0; \ + } \ +\ + if (unlikely(_skb != (head_skb))) { \ + (head_skb)->truesize += (rq_desc)->buf_len; \ + (head_skb)->len += _size; \ + (head_skb)->data_len += _size; \ + } \ +\ + sss_nic_skb_add_rx_frag((rq_desc), _rx_desc, _skb, _size, &_ret); \ + if (likely(_ret)) \ + sss_nic_rx_reuse_dma_page((rq_desc), _rx_desc); \ + else \ + dma_unmap_page((rq_desc)->dev, _rx_desc->buf_daddr, \ + (rq_desc)->dma_buff_size, DMA_FROM_DEVICE); \ +\ + _rx_desc->buf_daddr = 0; \ + _rx_desc->page = NULL; \ + tmp_sge_num--; \ + _frag_num++; \ + _ci = (_ci + 1) & (rq_desc)->qid_mask; \ + } \ +} while (0) + +#define sss_nic_fetch_one_skb(rq_desc, pkt_size, ret_skb) \ +do { \ + struct net_device *_netdev = (rq_desc)->netdev; \ + struct sk_buff *head_skb = NULL; \ + struct sk_buff *next_skb = NULL; \ + struct sk_buff *_skb = NULL; \ + u8 sge_num; \ + u8 skb_num; \ +\ + head_skb = netdev_alloc_skb_ip_align((rq_desc)->netdev, SSSNIC_RX_HDR_SIZE); \ + if (likely(head_skb)) { \ + sge_num = SSSNIC_GET_SGE_NUM((pkt_size), (rq_desc)); \ + if (likely(sge_num <= MAX_SKB_FRAGS)) \ + skb_num = 1; \ + else \ + skb_num = (sge_num / MAX_SKB_FRAGS) + \ + ((sge_num % MAX_SKB_FRAGS) ? 1 : 0); \ +\ + while (unlikely(skb_num > 1)) { \ + next_skb = netdev_alloc_skb_ip_align(_netdev, SSSNIC_RX_HDR_SIZE); \ + if (unlikely(!next_skb)) { \ + dev_kfree_skb_any(head_skb); \ + break; \ + } \ +\ + if (!_skb) { \ + skb_shinfo(head_skb)->frag_list = next_skb; \ + _skb = next_skb; \ + } else { \ + _skb->next = next_skb; \ + _skb = next_skb; \ + } \ +\ + skb_num--; \ + } \ +\ + if (likely(skb_num <= 1)) { \ + prefetchw(head_skb->data); \ + sss_nic_combine_skb((rq_desc), head_skb, sge_num, (pkt_size)); \ +\ + (rq_desc)->delta += sge_num; \ + (rq_desc)->ci += sge_num; \ +\ + (ret_skb) = head_skb; \ + } else { \ + (ret_skb) = NULL; \ + } \ + } else { \ + (ret_skb) = NULL; \ + } \ +} while (0) + +void sss_nic_get_rq_stats(struct sss_nic_rq_desc *rq_desc, + struct sss_nic_rq_stats *stats) +{ + struct sss_nic_rq_stats *rq_stats = &rq_desc->stats; + unsigned int start; + + u64_stats_update_begin(&stats->stats_sync); + do { + start = u64_stats_fetch_begin(&rq_stats->stats_sync); + stats->rx_bytes = rq_stats->rx_bytes; + stats->rx_packets = rq_stats->rx_packets; + stats->csum_errors = rq_stats->csum_errors; + stats->other_errors = rq_stats->other_errors; + stats->errors = rq_stats->csum_errors + rq_stats->other_errors; + stats->rx_dropped = rq_stats->rx_dropped; + stats->xdp_dropped = rq_stats->xdp_dropped; + stats->rx_buf_errors = rq_stats->rx_buf_errors; + } while (u64_stats_fetch_retry(&rq_stats->stats_sync, start)); + u64_stats_update_end(&stats->stats_sync); +} + +static unsigned int sss_nic_eth_get_headlen(struct sk_buff *skb, + unsigned char *data, + unsigned int max_hlen) +{ +#ifdef HAVE_ETH_GET_HEADLEN_FUNC +#ifdef ETH_GET_HEADLEN_NEED_DEV + return eth_get_headlen(skb->dev, data, SSSNIC_RX_HDR_SIZE); +#else + return eth_get_headlen(data, SSSNIC_RX_HDR_SIZE); +#endif +#else +#define IP_FRAG_OFFSET 0x1FFF +#define FCOE_HLEN 38 +#define TCP_HEAD_OFFSET 12 + u8 nexthdr = 0; + u16 proto; + u8 hlen; + union { + struct ethhdr *eth; + struct vlan_ethhdr *vlan; + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + unsigned char *data; + } header; + + if (unlikely(max_hlen < ETH_HLEN)) + return max_hlen; + + header.data = data; + proto = header.eth->h_proto; + + if (proto == htons(ETH_P_8021AD) || proto == htons(ETH_P_8021Q)) { + if (unlikely(max_hlen < ETH_HLEN + VLAN_HLEN)) + return max_hlen; + + proto = header.vlan->h_vlan_encapsulated_proto; + header.data += sizeof(struct vlan_ethhdr); + } else { + header.data += ETH_HLEN; + } + + if (proto == htons(ETH_P_IP)) { + if ((int)(header.data - data) > (int)(max_hlen - sizeof(struct iphdr))) + return max_hlen; + + hlen = (header.data[0] & 0x0F) << 2; + if (hlen < sizeof(struct iphdr)) + return (unsigned int)(header.data - data); + + if ((header.ipv4->frag_off & htons(IP_FRAG_OFFSET)) == 0) + nexthdr = header.ipv4->proto; + + header.data += hlen; + } else if (proto == htons(ETH_P_IPV6)) { + if ((int)(header.data - data) > (int)(max_hlen - sizeof(struct ipv6hdr))) + return max_hlen; + + nexthdr = header.ipv6->nexthdr; + header.data += sizeof(struct ipv6hdr); + } else if (proto == htons(ETH_P_FCOE)) { + header.data += FCOE_HLEN; + } else { + return (unsigned int)(header.data - data); + } + + if (nexthdr == IPPROTO_TCP) { + if ((int)(header.data - data) > (int)(max_hlen - sizeof(struct tcphdr))) + return max_hlen; + + if (SSSNIC_HEADER_LEN_TO_BYTE(header.data[TCP_HEAD_OFFSET] & 0xF0) > + sizeof(struct tcphdr)) + header.data += SSSNIC_HEADER_LEN_TO_BYTE(header.data[TCP_HEAD_OFFSET] & + 0xF0); + else + header.data += sizeof(struct tcphdr); + } else if (nexthdr == IPPROTO_UDP || nexthdr == IPPROTO_UDPLITE) { + header.data += sizeof(struct udphdr); + } else if (nexthdr == IPPROTO_SCTP) { + header.data += sizeof(struct sctphdr); + } + + if ((header.data - data) > max_hlen) + return max_hlen; + else + return (unsigned int)(header.data - data); +#endif +} + +#define sss_nic_pull_tail(skb) \ +do { \ + skb_frag_t *_frag = &skb_shinfo(skb)->frags[0]; \ + unsigned int _len; \ + unsigned char *_data = NULL; \ +\ + _data = skb_frag_address(_frag); \ +\ + _len = sss_nic_eth_get_headlen((skb), _data, SSSNIC_RX_HDR_SIZE); \ +\ + skb_copy_to_linear_data((skb), _data, ALIGN(_len, sizeof(long))); \ +\ + skb_frag_size_sub(_frag, (int)_len); \ + skb_frag_off_add(_frag, (int)_len); \ +\ + (skb)->tail += _len; \ + (skb)->data_len -= _len; \ +} while (0) + +#define sss_nic_check_rx_csum(rq_desc, offload_type, status, skb) \ +do { \ + struct net_device *_netdev = (rq_desc)->netdev; \ + u32 pkt_fmt = SSSNIC_GET_RX_TUNNEL_PKT_FORMAT(offload_type); \ + u32 pkt_type = SSSNIC_GET_RX_PKT_TYPE(offload_type); \ + u32 ip_type = SSSNIC_GET_RX_IP_TYPE(offload_type); \ + u32 chksum_err; \ +\ + chksum_err = SSSNIC_GET_RX_CSUM_ERR(status); \ + if (unlikely(chksum_err == SSSNIC_RX_CSUM_IPSU_OTHER_ERR)) \ + (rq_desc)->stats.other_errors++; \ +\ + if ((_netdev->features & NETIF_F_RXCSUM)) { \ + if (unlikely(chksum_err != 0)) { \ + if ((chksum_err & \ + (SSSNIC_RX_CSUM_HW_CHECK_NONE | \ + SSSNIC_RX_CSUM_IPSU_OTHER_ERR)) == 0) \ + (rq_desc)->stats.csum_errors++; \ + (skb)->ip_summed = CHECKSUM_NONE; \ + } else if (ip_type == SSSNIC_RX_INVALID_IP_TYPE || \ + !(pkt_fmt == SSSNIC_RX_PKT_FORMAT_NON_TUNNEL || \ + pkt_fmt == SSSNIC_RX_PKT_FORMAT_VXLAN)) { \ + (skb)->ip_summed = CHECKSUM_NONE; \ + } else if (pkt_type == SSSNIC_RX_TCP_PKT || \ + pkt_type == SSSNIC_RX_UDP_PKT || \ + pkt_type == SSSNIC_RX_SCTP_PKT) \ + (skb)->ip_summed = CHECKSUM_UNNECESSARY; \ + else \ + (skb)->ip_summed = CHECKSUM_NONE; \ + } \ +} while (0) + +#ifdef HAVE_SKBUFF_CSUM_LEVEL +#define sss_nic_check_rx_gso(rq_desc, offload_type, skb) \ +do { \ + struct net_device *_netdev = (rq_desc)->netdev; \ +\ + if (_netdev->features & NETIF_F_GRO) { \ + if (SSSNIC_GET_RX_TUNNEL_PKT_FORMAT(offload_type) == \ + SSSNIC_RX_PKT_FORMAT_VXLAN && \ + (skb)->ip_summed == CHECKSUM_UNNECESSARY) \ + (skb)->csum_level = 1; \ + } \ +} while (0) +#else +#define sss_nic_check_rx_gso(rq_desc, offload_type, skb) do {} while (0) +#endif /* HAVE_SKBUFF_CSUM_LEVEL */ + +static void sss_nic_loop_copy_data(struct sss_nic_dev *nic_dev, + struct sk_buff *skb) +{ + struct net_device *netdev = nic_dev->netdev; + u8 *loop_test_rx_buf = nic_dev->loop_test_rx_buf; + int loop_pkt_len = nic_dev->loop_pkt_len; + void *frag_data = NULL; + int frag_size; + int pkt_off; + int i; + + if (nic_dev->loop_test_rx_cnt == SSSNIC_LP_PKT_CNT) { + nic_dev->loop_test_rx_cnt = 0; + nicif_warn(nic_dev, rx_err, netdev, "Loopback test received too many pkts\n"); + } + + if (skb->len != loop_pkt_len) { + nicif_warn(nic_dev, rx_err, netdev, "Invalid packet length\n"); + nic_dev->loop_test_rx_cnt++; + return; + } + + pkt_off = nic_dev->loop_test_rx_cnt * loop_pkt_len; + frag_size = (int)skb_headlen(skb); + memcpy(loop_test_rx_buf + pkt_off, skb->data, (size_t)(u32)frag_size); + + pkt_off += frag_size; + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + frag_data = skb_frag_address(&skb_shinfo(skb)->frags[i]); + frag_size = (int)skb_frag_size(&skb_shinfo(skb)->frags[i]); + memcpy(loop_test_rx_buf + pkt_off, frag_data, (size_t)(u32)frag_size); + + pkt_off += frag_size; + } + nic_dev->loop_test_rx_cnt++; +} + +#define sss_nic_update_gso_params(skb, gso) \ +do { \ + struct ethhdr *_ether = (struct ethhdr *)((skb)->data); \ + __be16 _protocol; \ +\ + _protocol = __vlan_get_protocol((skb), _ether->h_proto, NULL); \ +\ + skb_shinfo(skb)->gso_segs = gso; \ + skb_shinfo(skb)->gso_size = (u16)DIV_ROUND_UP(((skb)->len - skb_headlen(skb)), \ + gso); \ + skb_shinfo(skb)->gso_type = (_protocol == htons(ETH_P_IP)) ? \ + SKB_GSO_TCPV4 : SKB_GSO_TCPV6; \ +} while (0) + +#ifdef HAVE_XDP_SUPPORT +#define sss_nic_xdp_update_rx_info(rq_desc, wqe_num) \ +do { \ + struct sss_nic_rx_desc *_rx_desc = NULL; \ + u16 _wqe_cnt = wqe_num; \ +\ + while (_wqe_cnt > 0) { \ + _rx_desc = &(rq_desc)->rx_desc_group[(rq_desc)->ci & (rq_desc)->qid_mask]; \ + if (likely(page_to_nid(_rx_desc->page) == numa_node_id())) \ + sss_nic_rx_reuse_dma_page((rq_desc), _rx_desc); \ +\ + (rq_desc)->ci++; \ + (rq_desc)->delta++; \ + _rx_desc->buf_daddr = 0; \ + _rx_desc->page = NULL; \ +\ + _wqe_cnt--; \ + } \ +} while (0) + +#ifdef HAVE_XDP_FRAME_SZ +#define SSSNIC_SET_XDP_FRAME_SZ(xdp, len) ((xdp)->frame_sz = (len)) +#else +#define SSSNIC_SET_XDP_FRAME_SZ(xdp, len) do {} while (0) +#endif + +#ifdef HAVE_XDP_DATA_META +#define SSSNIC_XDP_SET_DATA_META_INVALID(xdp) xdp_set_data_meta_invalid(xdp) +#else +#define SSSNIC_XDP_SET_DATA_META_INVALID(xdp) do {} while (0) +#endif + +#ifdef HAVE_BFP_WARN_NETDEV_PARAM +#define SSSNIC_BDF_WARN_INVALID_XDP_ACTION(netdev, xdp_prog, ret) \ + bpf_warn_invalid_xdp_action(netdev, xdp_prog, ret) +#else +#define SSSNIC_BDF_WARN_INVALID_XDP_ACTION(netdev, xdp_prog, ret) \ + bpf_warn_invalid_xdp_action(ret) +#endif + +#define sss_nic_bpf_prog_run_xdp(rq_desc, pkt_size, result) \ +do { \ + struct bpf_prog *xdp_prog = NULL; \ + struct sss_nic_rx_desc *rx_desc = NULL; \ + struct xdp_buff xdp; \ + u16 _wqe_num = 1; \ + u8 *_data = NULL; \ + u32 _ret; \ +\ + rcu_read_lock(); \ +\ + xdp_prog = READ_ONCE((rq_desc)->xdp_prog); \ + if (!xdp_prog) { \ + *(result) = SSSNIC_XDP_PKT_PASS; \ + } else if (unlikely((pkt_size) > (rq_desc)->buf_len)) { \ + SSSNIC_RQ_STATS_INC((rq_desc), large_xdp_pkts); \ + _wqe_num = (u16)((pkt_size) >> (rq_desc)->buff_size_shift) + \ + (((pkt_size) & ((rq_desc)->buf_len - 1)) ? 1 : 0); \ + SSSNIC_RQ_STATS_INC((rq_desc), xdp_dropped); \ + sss_nic_xdp_update_rx_info((rq_desc), _wqe_num); \ + *(result) = SSSNIC_XDP_PKT_DROP; \ + } else { \ + rx_desc = &(rq_desc)->rx_desc_group[(rq_desc)->ci & (rq_desc)->qid_mask]; \ + _data = (u8 *)page_address(rx_desc->page) + rx_desc->page_offset; \ + prefetch(_data); \ + dma_sync_single_range_for_cpu((rq_desc)->dev, rx_desc->buf_daddr, \ + rx_desc->page_offset, (rq_desc)->buf_len, \ + DMA_FROM_DEVICE); \ + xdp.data = _data; \ + xdp.data_hard_start = xdp.data; \ + xdp.data_end = xdp.data + (pkt_size); \ + SSSNIC_SET_XDP_FRAME_SZ(&xdp, (rq_desc)->buf_len); \ + SSSNIC_XDP_SET_DATA_META_INVALID(&xdp); \ + prefetchw(xdp.data_hard_start); \ +\ + _ret = bpf_prog_run_xdp(xdp_prog, &xdp); \ + if (_ret == XDP_PASS) { \ + *(result) = SSSNIC_XDP_PKT_PASS; \ + } else { \ + *(result) = SSSNIC_XDP_PKT_DROP; \ + if (_ret != XDP_DROP) { \ + SSSNIC_BDF_WARN_INVALID_XDP_ACTION((rq_desc)->netdev, \ + xdp_prog, _ret); \ + } \ + SSSNIC_RQ_STATS_INC((rq_desc), xdp_dropped); \ + sss_nic_xdp_update_rx_info((rq_desc), _wqe_num); \ + } \ + } \ +\ + rcu_read_unlock(); \ +} while (0) +#endif + +#if defined(NETIF_F_HW_VLAN_CTAG_RX) +#define sss_nic_vlan_put_tag(skb, netdev, offload_type, vlan_len) \ +do { \ + u16 vlan_id; \ + if (((netdev)->features & NETIF_F_HW_VLAN_CTAG_RX) != 0 && \ + SSSNIC_GET_RX_VLAN_OFFLOAD_EN(offload_type) != 0) { \ + vlan_id = SSSNIC_GET_RX_VLAN_TAG(vlan_len); \ +\ + /* if the packet is a vlan pkt, the vid may be 0 */ \ + __vlan_hwaccel_put_tag((skb), htons(ETH_P_8021Q), vlan_id); \ + } \ +} while (0) +#else +#define sss_nic_vlan_put_tag(skb, netdev, offload_type, vlan_len) \ +do { \ + u16 vlan_id; \ + if (((netdev)->features & NETIF_F_HW_VLAN_RX) != 0 && \ + SSSNIC_GET_RX_VLAN_OFFLOAD_EN(offload_type) != 0) { \ + vlan_id = SSSNIC_GET_RX_VLAN_TAG(vlan_len); \ +\ + /* if the packet is a vlan pkt, the vid may be 0 */ \ + __vlan_hwaccel_put_tag((skb), htons(ETH_P_8021Q), vlan_id); \ + } \ +} while (0) +#endif + +static int sss_nic_recv_one_packet(struct sss_nic_rq_desc *rq_desc, + struct sss_nic_cqe *rx_cqe, u32 pkt_len, + u32 vlan_len, u32 status) +{ + struct net_device *netdev = rq_desc->netdev; + struct sss_nic_dev *nic_dev = netdev_priv(rq_desc->netdev); + struct sk_buff *skb = NULL; + u32 offload_type; + u16 lro_segs; + +#ifdef HAVE_XDP_SUPPORT + u32 xdp_result; + + sss_nic_bpf_prog_run_xdp(rq_desc, pkt_len, &xdp_result); + if (xdp_result == SSSNIC_XDP_PKT_DROP) + return 0; +#endif + + sss_nic_fetch_one_skb(rq_desc, pkt_len, skb); + if (unlikely(!skb)) { + SSSNIC_RQ_STATS_INC(rq_desc, alloc_skb_err); + return -ENOMEM; + } + + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb)) + sss_nic_pull_tail(skb); + + offload_type = sss_hw_cpu32(rx_cqe->offload_type); + sss_nic_check_rx_csum(rq_desc, offload_type, status, skb); + sss_nic_check_rx_gso(rq_desc, offload_type, skb); + sss_nic_vlan_put_tag(skb, netdev, offload_type, vlan_len); + + if (unlikely(test_bit(SSSNIC_LP_TEST, &nic_dev->flags))) + sss_nic_loop_copy_data(nic_dev, skb); + + lro_segs = SSSNIC_GET_RX_NUM_LRO(status); + if (lro_segs > 0) + sss_nic_update_gso_params(skb, lro_segs); + + skb_record_rx_queue(skb, rq_desc->qid); + skb->protocol = eth_type_trans(skb, netdev); + + if (skb_has_frag_list(skb)) { +#ifdef HAVE_NAPI_GRO_FLUSH_OLD + napi_gro_flush(&rq_desc->irq_cfg->napi, false); +#else + napi_gro_flush(&rq_desc->irq_cfg->napi); +#endif + netif_receive_skb(skb); + } else { + napi_gro_receive(&rq_desc->irq_cfg->napi, skb); + } + + return 0; +} + +int sss_nic_rx_poll(struct sss_nic_rq_desc *rq_desc, int budget) +{ + struct sss_nic_dev *nic_dev = netdev_priv(rq_desc->netdev); + struct sss_nic_cqe *rx_cqe = NULL; + u64 rx_bytes = 0; + int pkts = 0; + int rx_packets = 0; + u16 wqe_num = 0; + u16 lro_segs; + u32 ci; + u32 status; + u32 pkt_len; + u32 vlan_len; + + while (likely(pkts < budget)) { + ci = rq_desc->ci & rq_desc->qid_mask; + rx_cqe = rq_desc->rx_desc_group[ci].cqe; + status = sss_hw_cpu32(rx_cqe->state); + if (!SSSNIC_GET_RX_DONE(status)) + break; + + /* read rx cqe firstly */ + rmb(); + + vlan_len = sss_hw_cpu32(rx_cqe->vlan_len); + pkt_len = SSSNIC_GET_RX_PKT_LEN(vlan_len); + if (sss_nic_recv_one_packet(rq_desc, rx_cqe, pkt_len, vlan_len, status)) + break; + + rx_bytes += pkt_len; + pkts++; + rx_packets++; + + lro_segs = SSSNIC_GET_RX_NUM_LRO(status); + if (lro_segs > 0) { + rx_bytes += ((lro_segs - 1) * SSSNIC_LRO_PKT_HDR_LEN(rx_cqe)); + wqe_num += SSSNIC_GET_SGE_NUM(pkt_len, rq_desc); + } + + rx_cqe->state = 0; + + if (wqe_num >= nic_dev->rx_poll_wqe) + break; + } + + if (rq_desc->delta >= SSSNIC_RX_BUFFER_WRITE) + SSS_NIC_FILL_BD_SGE(rq_desc); + + u64_stats_update_begin(&rq_desc->stats.stats_sync); + rq_desc->stats.rx_packets += (u64)(u32)rx_packets; + rq_desc->stats.rx_bytes += rx_bytes; + u64_stats_update_end(&rq_desc->stats.stats_sync); + + return pkts; +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx.h new file mode 100644 index 0000000000000000000000000000000000000000..15df34e5b17458fe47bdf0c0b479994d473b6ae2 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_RX_H +#define SSS_NIC_RX_H + +#include +#include +#include +#include +#include +#include + +#include "sss_nic_io.h" +#include "sss_nic_dev_define.h" + +#define SSSNIC_HEADER_LEN_TO_BYTE(header) ((header) >> 2) + +#define SSSNIC_RQ_CQE_STATUS_CSUM_ERR_SHIFT 0 +#define SSSNIC_RQ_CQE_STATUS_NUM_LRO_SHIFT 16 +#define SSSNIC_RQ_CQE_STATUS_LRO_PUSH_SHIFT 25 +#define SSSNIC_RQ_CQE_STATUS_LRO_ENTER_SHIFT 26 +#define SSSNIC_RQ_CQE_STATUS_LRO_INTR_SHIFT 27 + +#define SSSNIC_RQ_CQE_STATUS_BP_EN_SHIFT 30 +#define SSSNIC_RQ_CQE_STATUS_RXDONE_SHIFT 31 +#define SSSNIC_RQ_CQE_STATUS_DECRY_PKT_SHIFT 29 +#define SSSNIC_RQ_CQE_STATUS_FLUSH_SHIFT 28 + +#define SSSNIC_RQ_CQE_STATUS_CSUM_ERR_MASK 0xFFFFU +#define SSSNIC_RQ_CQE_STATUS_NUM_LRO_MASK 0xFFU +#define SSSNIC_RQ_CQE_STATUS_LRO_PUSH_MASK 0X1U +#define SSSNIC_RQ_CQE_STATUS_LRO_ENTER_MASK 0X1U +#define SSSNIC_RQ_CQE_STATUS_LRO_INTR_MASK 0X1U +#define SSSNIC_RQ_CQE_STATUS_BP_EN_MASK 0X1U +#define SSSNIC_RQ_CQE_STATUS_RXDONE_MASK 0x1U +#define SSSNIC_RQ_CQE_STATUS_FLUSH_MASK 0x1U +#define SSSNIC_RQ_CQE_STATUS_DECRY_PKT_MASK 0x1U + +#define SSSNIC_RQ_CQE_STATUS_GET(val, member) \ + (((val) >> SSSNIC_RQ_CQE_STATUS_##member##_SHIFT) & \ + SSSNIC_RQ_CQE_STATUS_##member##_MASK) + +#define SSSNIC_GET_RQ_CQE_STATUS(rq_desc, id) \ + sss_hw_cpu32((rq_desc)->rx_desc_group[id].cqe->state) + +#define SSSNIC_GET_RX_DONE(status) SSSNIC_RQ_CQE_STATUS_GET(status, RXDONE) + +bool sss_nic_rx_alloc_dma_page(struct sss_nic_dev *nic_dev, + struct sss_nic_rx_desc *rx_desc); +u32 sss_nic_fill_bd_sge(struct sss_nic_rq_desc *rq_desc); +void sss_nic_get_rq_stats(struct sss_nic_rq_desc *rq_desc, + struct sss_nic_rq_stats *stats); +int sss_nic_rx_poll(struct sss_nic_rq_desc *rq_desc, int budget); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_init.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_init.c new file mode 100644 index 0000000000000000000000000000000000000000..d2b759f613ac67379f6ee10ae95f4f735c8a0c1c --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_init.c @@ -0,0 +1,289 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_io.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_rss.h" +#include "sss_nic_rx.h" +#include "sss_nic_cfg.h" + +static void sss_nic_rx_free_dma_page(struct sss_nic_dev *nic_dev, + struct sss_nic_rx_desc *rx_desc) +{ + if (rx_desc->buf_daddr != 0) { + dma_unmap_page(nic_dev->dev_hdl, rx_desc->buf_daddr, + nic_dev->rx_dma_buff_size, DMA_FROM_DEVICE); + rx_desc->buf_daddr = 0; + } + + if (rx_desc->page) { + __free_pages(rx_desc->page, nic_dev->page_order); + rx_desc->page = NULL; + } +} + +static u32 sss_nic_rx_alloc_dma_buffer(struct sss_nic_dev *nic_dev, + u32 rq_depth, struct sss_nic_rx_desc *rx_desc_group) +{ + u32 i; + + for (i = 0; i < rq_depth - 1; i++) + if (!sss_nic_rx_alloc_dma_page(nic_dev, &rx_desc_group[i])) + break; + + return i; +} + +static void sss_nic_rx_free_dma_buffer(struct sss_nic_dev *nic_dev, + u32 rq_depth, struct sss_nic_rx_desc *rx_desc_group) +{ + u32 id; + + for (id = 0; id < rq_depth; id++) + sss_nic_rx_free_dma_page(nic_dev, &rx_desc_group[id]); +} + +static void _sss_nic_free_rq_resource(struct sss_nic_dev *nic_dev, + struct sss_nic_rq_resource *rq_res, u32 rq_depth) +{ + u64 size = sizeof(struct sss_nic_cqe) * rq_depth; + + sss_nic_rx_free_dma_buffer(nic_dev, rq_depth, rq_res->rx_desc_group); + dma_free_coherent(nic_dev->dev_hdl, size, rq_res->cqe_vaddr, rq_res->cqe_paddr); + kfree(rq_res->rx_desc_group); + rq_res->cqe_vaddr = NULL; + rq_res->rx_desc_group = NULL; +} + +int sss_nic_alloc_rq_res_group(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res) +{ + int i; + int id; + u32 page_num; + u64 size; + u64 cqe_dma_size = sizeof(struct sss_nic_cqe) * qp_res->rq_depth; + struct sss_nic_rq_resource *rq_res = NULL; + + for (id = 0; id < qp_res->qp_num; id++) { + rq_res = &qp_res->rq_res_group[id]; + rq_res->cqe_vaddr = dma_zalloc_coherent(nic_dev->dev_hdl, cqe_dma_size, + &rq_res->cqe_paddr, GFP_KERNEL); + if (!rq_res->cqe_vaddr) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to alloc cqe dma buf, rq%d\n", id); + goto alloc_cqe_dma_err; + } + + size = sizeof(*rq_res->rx_desc_group) * qp_res->rq_depth; + rq_res->rx_desc_group = kzalloc(size, GFP_KERNEL); + if (!rq_res->rx_desc_group) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to alloc rx info, rq%d\n", id); + goto alloc_rqe_desc_group_err; + } + + page_num = sss_nic_rx_alloc_dma_buffer(nic_dev, qp_res->rq_depth, + rq_res->rx_desc_group); + if (page_num == 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to alloc rx buffer, rq%d\n", id); + goto alloc_rx_buf_err; + } + rq_res->page_num = (u16)page_num; + } + return 0; + +alloc_rx_buf_err: + kfree(rq_res->rx_desc_group); + rq_res->rx_desc_group = NULL; + +alloc_rqe_desc_group_err: + dma_free_coherent(nic_dev->dev_hdl, cqe_dma_size, rq_res->cqe_vaddr, + rq_res->cqe_paddr); + rq_res->cqe_vaddr = NULL; + +alloc_cqe_dma_err: + for (i = 0; i < id; i++) + _sss_nic_free_rq_resource(nic_dev, &qp_res->rq_res_group[i], + qp_res->rq_depth); + + return -ENOMEM; +} + +void sss_nic_free_rq_res_group(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res) +{ + int id; + + for (id = 0; id < qp_res->qp_num; id++) + _sss_nic_free_rq_resource(nic_dev, &qp_res->rq_res_group[id], + qp_res->rq_depth); +} + +static void sss_nic_init_rq_desc(struct sss_nic_rq_desc *rq_desc, + struct sss_nic_qp_resource *qp_res, + struct sss_nic_rq_resource *rq_res, + struct sss_irq_desc *irq_desc) +{ + u32 id; + dma_addr_t dma_addr; + struct sss_nic_cqe *rq_cqe; + + rq_desc->irq_id = irq_desc->irq_id; + rq_desc->msix_id = irq_desc->msix_id; + rq_desc->pi = 0; + rq_desc->backup_pi = rq_res->page_num; + rq_desc->q_depth = qp_res->rq_depth; + rq_desc->delta = rq_desc->q_depth; + rq_desc->qid_mask = rq_desc->q_depth - 1; + rq_desc->ci = 0; + rq_desc->last_sw_pi = rq_desc->q_depth - 1; + rq_desc->last_sw_ci = 0; + rq_desc->last_hw_ci = 0; + rq_desc->check_err_cnt = 0; + rq_desc->print_err_cnt = 0; + rq_desc->rx_pkts = 0; + rq_desc->reset_wqe_num = 0; + rq_desc->rx_desc_group = rq_res->rx_desc_group; + + dma_addr = rq_res->cqe_paddr; + rq_cqe = (struct sss_nic_cqe *)rq_res->cqe_vaddr; + for (id = 0; id < qp_res->rq_depth; id++) { + rq_desc->rx_desc_group[id].cqe = rq_cqe; + rq_desc->rx_desc_group[id].cqe_daddr = dma_addr; + dma_addr += sizeof(*rq_desc->rx_desc_group[id].cqe); + rq_cqe++; + } +} + +static void sss_nic_fill_cqe_sge(struct sss_nic_rq_desc *rq_desc) +{ + struct net_device *netdev = rq_desc->netdev; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct sss_nic_rx_desc *rx_desc = NULL; + struct sss_nic_rqe *rqe = NULL; + u32 i; + + for (i = 0; i < rq_desc->q_depth; i++) { + rx_desc = &rq_desc->rx_desc_group[i]; + rqe = sss_wq_wqebb_addr(&rq_desc->rq->wq, (u16)i); + + if (rq_desc->rq->wqe_type == SSSNIC_EXTEND_RQ_WQE) { + sss_set_sge(&rqe->extend_rqe.cqe_sect.sge, rx_desc->cqe_daddr, + (sizeof(struct sss_nic_cqe) >> SSSNIC_CQE_SIZE_SHIFT)); + + rqe->extend_rqe.bd_sect.sge.len = nic_dev->rx_buff_len; + } else { + rqe->normal_rqe.cqe_lo_addr = lower_32_bits(rx_desc->cqe_daddr); + rqe->normal_rqe.cqe_hi_addr = upper_32_bits(rx_desc->cqe_daddr); + } + + rx_desc->rqe = rqe; + } +} + +int sss_nic_init_rq_desc_group(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res) +{ + struct sss_nic_rq_desc *rq_desc = NULL; + u16 qid; + u32 pkt; + + nic_dev->get_rq_fail_cnt = 0; + for (qid = 0; qid < qp_res->qp_num; qid++) { + rq_desc = &nic_dev->rq_desc_group[qid]; + rq_desc->rq = &nic_dev->nic_io->rq_group[rq_desc->qid]; + + sss_nic_init_rq_desc(rq_desc, qp_res, &qp_res->rq_res_group[qid], + &nic_dev->irq_desc_group[qid]); + + sss_nic_fill_cqe_sge(rq_desc); + + pkt = sss_nic_fill_bd_sge(rq_desc); + if (pkt == 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to fill rx buffer\n"); + return -ENOMEM; + } + } + + return 0; +} + +void sss_nic_free_rq_desc_group(struct sss_nic_dev *nic_dev) +{ + kfree(nic_dev->rq_desc_group); + nic_dev->rq_desc_group = NULL; +} + +int sss_nic_alloc_rq_desc_group(struct sss_nic_dev *nic_dev) +{ + struct sss_nic_rq_desc *rq_desc = NULL; + u16 rq_num = nic_dev->max_qp_num; + u16 i; + + nic_dev->rq_desc_group = kcalloc(rq_num, sizeof(*nic_dev->rq_desc_group), GFP_KERNEL); + if (!nic_dev->rq_desc_group) + return -ENOMEM; + + for (i = 0; i < rq_num; i++) { + rq_desc = &nic_dev->rq_desc_group[i]; + rq_desc->dev = nic_dev->dev_hdl; + rq_desc->netdev = nic_dev->netdev; + rq_desc->qid = i; + rq_desc->qid_mask = nic_dev->qp_res.rq_depth - 1; + rq_desc->q_depth = nic_dev->qp_res.rq_depth; + rq_desc->dma_buff_size = nic_dev->rx_dma_buff_size; + rq_desc->buff_size_shift = (u32)ilog2(nic_dev->rx_buff_len); + rq_desc->buf_len = nic_dev->rx_buff_len; + u64_stats_init(&rq_desc->stats.stats_sync); + } + + return 0; +} + +int sss_nic_update_rx_rss(struct sss_nic_dev *nic_dev) +{ + int ret; + + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_RSS_ENABLE)) { + ret = sss_nic_update_rss_cfg(nic_dev); + if (ret != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to init rss\n"); + return -EFAULT; + } + } + + return 0; +} + +void sss_nic_reset_rx_rss(struct net_device *netdev) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (test_bit(SSSNIC_RSS_ENABLE, &nic_dev->flags) != 0) + sss_nic_reset_rss_cfg(nic_dev); +} + diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_init.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_init.h new file mode 100644 index 0000000000000000000000000000000000000000..1273262c49fec81e34c7d0e73f49e0a735c3168d --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_init.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_RX_INIT_H +#define SSS_NIC_RX_INIT_H + +#include +#include +#include +#include +#include +#include + +#include "sss_nic_io.h" +#include "sss_nic_dev_define.h" + +int sss_nic_alloc_rq_res_group(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res); + +void sss_nic_free_rq_res_group(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res); + +int sss_nic_init_rq_desc_group(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res); + +int sss_nic_alloc_rq_desc_group(struct sss_nic_dev *nic_dev); + +void sss_nic_free_rq_desc_group(struct sss_nic_dev *nic_dev); + +int sss_nic_update_rx_rss(struct sss_nic_dev *nic_dev); + +void sss_nic_reset_rx_rss(struct net_device *netdev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_reset.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_reset.c new file mode 100644 index 0000000000000000000000000000000000000000..0efd6ce87d636de6fac02a39743a798771c0d579 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_reset.c @@ -0,0 +1,244 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_io.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_rss.h" +#include "sss_nic_rx.h" +#include "sss_nic_cfg.h" + +#define SSSNIC_RQ_GET_ERR_CNT_THRESHOLD 3 +#define SSSNIC_RQ_CHECK_ERR_CNT_THRESHOLD 2 +#define SSSNIC_RQ_PRINT_CNT_THRESHOLD 3 + +static inline void sss_nic_fill_wqe_sge(struct sss_nic_rx_desc *rx_desc, + u8 wqe_type) +{ + dma_addr_t dma_addr = rx_desc->buf_daddr + rx_desc->page_offset; + struct sss_nic_rqe *rqe = rx_desc->rqe; + + if (unlikely(wqe_type == SSSNIC_EXTEND_RQ_WQE)) { + rqe->extend_rqe.bd_sect.sge.low_addr = + sss_hw_be32(lower_32_bits(dma_addr)); + rqe->extend_rqe.bd_sect.sge.high_addr = + sss_hw_be32(upper_32_bits(dma_addr)); + } else { + rqe->normal_rqe.bd_lo_addr = + sss_hw_be32(lower_32_bits(dma_addr)); + rqe->normal_rqe.bd_hi_addr = + sss_hw_be32(upper_32_bits(dma_addr)); + } +} + +static inline void sss_nic_free_wqe_buffer(struct sss_nic_dev *nic_dev, + struct sss_nic_rx_desc *rx_desc) +{ + if (rx_desc->buf_daddr) { + dma_unmap_page(nic_dev->dev_hdl, rx_desc->buf_daddr, + nic_dev->rx_dma_buff_size, DMA_FROM_DEVICE); + rx_desc->buf_daddr = 0; + } + + if (rx_desc->page) { + __free_pages(rx_desc->page, nic_dev->page_order); + rx_desc->page = NULL; + } +} + +static inline int sss_nic_fill_idle_wqe(struct sss_nic_rq_desc *rq_desc, + u32 wqebb_num, u32 start_pi) +{ + u32 pi = start_pi; + u32 i; + struct sss_nic_rx_desc *rx_desc = NULL; + struct sss_nic_dev *nic_dev = netdev_priv(rq_desc->netdev); + + for (i = 0; i < wqebb_num; i++) { + rx_desc = &rq_desc->rx_desc_group[pi]; + + if (unlikely(!sss_nic_rx_alloc_dma_page(nic_dev, rx_desc))) { + rq_desc->reset_pi = (u16)((rq_desc->reset_pi + i) & rq_desc->qid_mask); + SSSNIC_RQ_STATS_INC(rq_desc, alloc_rx_dma_err); + return -ENOMEM; + } + + sss_nic_fill_wqe_sge(rx_desc, rq_desc->rq->wqe_type); + + pi = (u16)((pi + 1) & rq_desc->qid_mask); + rq_desc->reset_wqe_num++; + } + + return 0; +} + +static int sss_nic_reset_rq(struct sss_nic_dev *nic_dev, u16 qid, u16 hw_ci) +{ + int ret; + u32 i; + u32 total; + u32 ci; + u32 pi; + struct sss_nic_rq_desc *rq_desc = &nic_dev->rq_desc_group[qid]; + u32 idle_wqebb = rq_desc->delta - rq_desc->reset_wqe_num; + struct sss_nic_rx_desc *rx_desc = NULL; + + if (rq_desc->delta < rq_desc->reset_wqe_num) + return -EINVAL; + + if (rq_desc->reset_wqe_num == 0) + rq_desc->reset_pi = rq_desc->pi; + + ci = rq_desc->ci & rq_desc->qid_mask; + total = ci + rq_desc->q_depth - rq_desc->pi; + if ((total % rq_desc->q_depth) != rq_desc->delta) + return -EINVAL; + + ret = sss_nic_fill_idle_wqe(rq_desc, idle_wqebb, rq_desc->reset_pi); + if (ret) + return ret; + + nic_info(nic_dev->dev_hdl, "Reset rq: rq %u, restore_buf_num:%u\n", qid, + rq_desc->reset_wqe_num); + + pi = (hw_ci + rq_desc->q_depth - 1) & rq_desc->qid_mask; + rx_desc = &rq_desc->rx_desc_group[pi]; + sss_nic_free_wqe_buffer(nic_dev, rx_desc); + + rq_desc->delta = 1; + rq_desc->reset_wqe_num = 0; + rq_desc->pi = (u16)pi; + rq_desc->backup_pi = rq_desc->pi; + rq_desc->ci = (u16)((rq_desc->pi + 1) & rq_desc->qid_mask); + + for (i = 0; i < rq_desc->q_depth; i++) { + if (!SSSNIC_GET_RX_DONE(sss_hw_cpu32(rq_desc->rx_desc_group[i].cqe->state))) + continue; + + rq_desc->rx_desc_group[i].cqe->state = 0; + SSSNIC_RQ_STATS_INC(rq_desc, reset_drop_sge); + } + + ret = sss_nic_cache_out_qp_resource(nic_dev->nic_io); + if (ret) { + SSSNIC_CLEAR_NIC_DEV_FLAG(nic_dev, SSSNIC_RXQ_RECOVERY); + return ret; + } + + sss_nic_write_db(rq_desc->rq, rq_desc->qid & (SSSNIC_DCB_COS_MAX - 1), + RQ_CFLAG_DP, (u16)((u32)rq_desc->pi << rq_desc->rq->wqe_type)); + + return 0; +} + +static bool sss_nic_rq_is_normal(struct sss_nic_rq_desc *rq_desc, + struct sss_nic_rq_pc_info check_info) +{ + u32 status; + u32 sw_ci = rq_desc->ci & rq_desc->qid_mask; + + if (check_info.hw_pi != check_info.hw_ci || + check_info.hw_ci != rq_desc->last_hw_ci) + return true; + + if (rq_desc->stats.rx_packets != rq_desc->rx_pkts || + rq_desc->pi != rq_desc->last_sw_pi) + return true; + + status = SSSNIC_GET_RQ_CQE_STATUS(rq_desc, sw_ci); + if (SSSNIC_GET_RX_DONE(status)) + return true; + + if (sw_ci != rq_desc->last_sw_ci || rq_desc->pi != check_info.hw_pi) + return true; + + return false; +} + +void sss_nic_rq_watchdog_handler(struct work_struct *work) +{ + int ret; + u16 qid; + struct sss_nic_rq_pc_info *check_info = NULL; + struct sss_nic_rq_desc *rq_desc = NULL; + struct delayed_work *delay = to_delayed_work(work); + struct sss_nic_dev *nic_dev = container_of(delay, struct sss_nic_dev, rq_watchdog_work); + u64 size = sizeof(*check_info) * nic_dev->qp_res.qp_num; + + if (!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_INTF_UP)) + return; + + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_RXQ_RECOVERY)) + queue_delayed_work(nic_dev->workq, &nic_dev->rq_watchdog_work, HZ); + + if (!size) + return; + check_info = kzalloc(size, GFP_KERNEL); + if (!check_info) + return; + + ret = sss_nic_rq_hw_pc_info(nic_dev, check_info, nic_dev->qp_res.qp_num, + nic_dev->rq_desc_group[0].rq->wqe_type); + if (ret) { + nic_dev->get_rq_fail_cnt++; + if (nic_dev->get_rq_fail_cnt >= SSSNIC_RQ_GET_ERR_CNT_THRESHOLD) + SSSNIC_CLEAR_NIC_DEV_FLAG(nic_dev, SSSNIC_RXQ_RECOVERY); + goto free_rq_info; + } + + for (qid = 0; qid < nic_dev->qp_res.qp_num; qid++) { + rq_desc = &nic_dev->rq_desc_group[qid]; + if (!sss_nic_rq_is_normal(rq_desc, check_info[qid])) { + rq_desc->check_err_cnt++; + if (rq_desc->check_err_cnt < SSSNIC_RQ_CHECK_ERR_CNT_THRESHOLD) + continue; + + if (rq_desc->print_err_cnt <= SSSNIC_RQ_PRINT_CNT_THRESHOLD) { + nic_warn(nic_dev->dev_hdl, + "Rq handle: rq(%u) wqe abnormal, hw_pi:%u, hw_ci:%u, sw_pi:%u, sw_ci:%u delta:%u\n", + qid, check_info[qid].hw_pi, check_info[qid].hw_ci, + rq_desc->pi, + rq_desc->ci & rq_desc->qid_mask, rq_desc->delta); + rq_desc->print_err_cnt++; + } + + ret = sss_nic_reset_rq(nic_dev, qid, check_info[qid].hw_ci); + if (ret) + continue; + } + + rq_desc->last_hw_ci = check_info[qid].hw_ci; + rq_desc->rx_pkts = rq_desc->stats.rx_packets; + rq_desc->last_sw_pi = rq_desc->pi; + rq_desc->last_sw_ci = rq_desc->ci & rq_desc->qid_mask; + rq_desc->print_err_cnt = 0; + rq_desc->check_err_cnt = 0; + } + + nic_dev->get_rq_fail_cnt = 0; + +free_rq_info: + kfree(check_info); +} + diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_reset.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_reset.h new file mode 100644 index 0000000000000000000000000000000000000000..6d588e690cca7e571c4f0277d24900b69e45c5b2 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_rx_reset.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_RX_RESET_H +#define SSS_NIC_RX_RESET_H + +#include +#include +#include +#include +#include +#include + +void sss_nic_rq_watchdog_handler(struct work_struct *work); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx.c new file mode 100644 index 0000000000000000000000000000000000000000..021054c1cf844b58c416fb98a8dbb805e26f3dbc --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx.c @@ -0,0 +1,869 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_io.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_rss_cfg.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_tx.h" + +#define SSSNIC_DEFAULT_MSS 0x3E00 +#define SSSNIC_MIN_MSS 0x50 +#define SSSNIC_SKB_LEN_MIN 32 +#define SSSNIC_SKB_LEN_MAX 16383 +#define SSSNIC_PAYLOAD_OFFSET_MAX 221 + +#define SSSNIC_IPV4_VERSION 4 +#define SSSNIC_IPV6_VERSION 6 +#define SSSNIC_TCP_DOFF_TO_BYTES(doff) ((doff) << 2) +#define SSSNIC_VXLAN_OFFLOAD_PORT 46354 + +#define SSSNIC_TRANSPORT_OFFSET(hdr, skb) ((u32)((hdr) - (skb)->data)) + +// SQE CTRL +#define SSSNIC_SQE_CTRL_SECT_BD0_LEN_SHIFT 0 +#define SSSNIC_SQE_CTRL_SECT_RSVD_SHIFT 18 +#define SSSNIC_SQE_CTRL_SECT_BUFDESC_NUM_SHIFT 19 +#define SSSNIC_SQE_CTRL_SECT_TASKSECT_LEN_SHIFT 27 +#define SSSNIC_SQE_CTRL_SECT_DATA_FORMAT_SHIFT 28 +#define SSSNIC_SQE_CTRL_SECT_DIRECT_SHIFT 29 +#define SSSNIC_SQE_CTRL_SECT_EXTENDED_SHIFT 30 +#define SSSNIC_SQE_CTRL_SECT_OWNER_SHIFT 31 + +#define SSSNIC_SQE_CTRL_SECT_BD0_LEN_MASK 0x3FFFFU +#define SSSNIC_SQE_CTRL_SECT_RSVD_MASK 0x1U +#define SSSNIC_SQE_CTRL_SECT_BUFDESC_NUM_MASK 0xFFU +#define SSSNIC_SQE_CTRL_SECT_TASKSECT_LEN_MASK 0x1U +#define SSSNIC_SQE_CTRL_SECT_DATA_FORMAT_MASK 0x1U +#define SSSNIC_SQE_CTRL_SECT_DIRECT_MASK 0x1U +#define SSSNIC_SQE_CTRL_SECT_EXTENDED_MASK 0x1U +#define SSSNIC_SQE_CTRL_SECT_OWNER_MASK 0x1U + +#define SSSNIC_SQE_CTRL_SECT_SET(val, member) \ +(((u32)(val) & SSSNIC_SQE_CTRL_SECT_##member##_MASK) << SSSNIC_SQE_CTRL_SECT_##member##_SHIFT) + +// SQ CTRL QINFO +#define SSSNIC_SQE_CTRL_SECT_QINFO_PKT_TYPE_SHIFT 0 +#define SSSNIC_SQE_CTRL_SECT_QINFO_PLDOFF_SHIFT 2 +#define SSSNIC_SQE_CTRL_SECT_QINFO_UFO_SHIFT 10 +#define SSSNIC_SQE_CTRL_SECT_QINFO_TSO_SHIFT 11 +#define SSSNIC_SQE_CTRL_SECT_QINFO_TCPUDP_CS_SHIFT 12 +#define SSSNIC_SQE_CTRL_SECT_QINFO_MSS_SHIFT 13 +#define SSSNIC_SQE_CTRL_SECT_QINFO_SCTP_SHIFT 27 +#define SSSNIC_SQE_CTRL_SECT_QINFO_UC_SHIFT 28 +#define SSSNIC_SQE_CTRL_SECT_QINFO_PRI_SHIFT 29 + +#define SSSNIC_SQE_CTRL_SECT_QINFO_PKT_TYPE_MASK 0x3U +#define SSSNIC_SQE_CTRL_SECT_QINFO_PLDOFF_MASK 0xFFU +#define SSSNIC_SQE_CTRL_SECT_QINFO_UFO_MASK 0x1U +#define SSSNIC_SQE_CTRL_SECT_QINFO_TSO_MASK 0x1U +#define SSSNIC_SQE_CTRL_SECT_QINFO_TCPUDP_CS_MASK 0x1U +#define SSSNIC_SQE_CTRL_SECT_QINFO_MSS_MASK 0x3FFFU +#define SSSNIC_SQE_CTRL_SECT_QINFO_SCTP_MASK 0x1U +#define SSSNIC_SQE_CTRL_SECT_QINFO_UC_MASK 0x1U +#define SSSNIC_SQE_CTRL_SECT_QINFO_PRI_MASK 0x7U + +#define SSSNIC_SQE_CTRL_SECT_QINFO_SET(val, member) \ + (((u32)(val) & SSSNIC_SQE_CTRL_SECT_QINFO_##member##_MASK) << \ + SSSNIC_SQE_CTRL_SECT_QINFO_##member##_SHIFT) + +#define SSSNIC_SQE_CTRL_SECT_QINFO_GET(val, member) \ + (((val) >> SSSNIC_SQE_CTRL_SECT_QINFO_##member##_SHIFT) & \ + SSSNIC_SQE_CTRL_SECT_QINFO_##member##_MASK) + +#define SSSNIC_SQE_CTRL_SECT_QINFO_CLEAR(val, member) \ + ((val) & (~(SSSNIC_SQE_CTRL_SECT_QINFO_##member##_MASK << \ + SSSNIC_SQE_CTRL_SECT_QINFO_##member##_SHIFT))) + +// SQ TASK +#define SSSNIC_SQE_TASK_SECT_VALUE0_TUNNEL_FLAG_SHIFT 19 +#define SSSNIC_SQE_TASK_SECT_VALUE0_ESP_NEXT_PROTO_SHIFT 22 +#define SSSNIC_SQE_TASK_SECT_VALUE0_INNER_L4_EN_SHIFT 24 +#define SSSNIC_SQE_TASK_SECT_VALUE0_INNER_L3_EN_SHIFT 25 +#define SSSNIC_SQE_TASK_SECT_VALUE0_INNER_L4_PSEUDO_SHIFT 26 +#define SSSNIC_SQE_TASK_SECT_VALUE0_OUT_L4_EN_SHIFT 27 +#define SSSNIC_SQE_TASK_SECT_VALUE0_OUT_L3_EN_SHIFT 28 +#define SSSNIC_SQE_TASK_SECT_VALUE0_OUT_L4_PSEUDO_SHIFT 29 +#define SSSNIC_SQE_TASK_SECT_VALUE0_ESP_OFFLOAD_SHIFT 30 +#define SSSNIC_SQE_TASK_SECT_VALUE0_IPSEC_PROTO_SHIFT 31 + +#define SSSNIC_SQE_TASK_SECT_VALUE0_TUNNEL_FLAG_MASK 0x1U +#define SSSNIC_SQE_TASK_SECT_VALUE0_ESP_NEXT_PROTO_MASK 0x3U +#define SSSNIC_SQE_TASK_SECT_VALUE0_INNER_L4_EN_MASK 0x1U +#define SSSNIC_SQE_TASK_SECT_VALUE0_INNER_L3_EN_MASK 0x1U +#define SSSNIC_SQE_TASK_SECT_VALUE0_INNER_L4_PSEUDO_MASK 0x1U +#define SSSNIC_SQE_TASK_SECT_VALUE0_OUT_L4_EN_MASK 0x1U +#define SSSNIC_SQE_TASK_SECT_VALUE0_OUT_L3_EN_MASK 0x1U +#define SSSNIC_SQE_TASK_SECT_VALUE0_OUT_L4_PSEUDO_MASK 0x1U +#define SSSNIC_SQE_TASK_SECT_VALUE0_ESP_OFFLOAD_MASK 0x1U +#define SSSNIC_SQE_TASK_SECT_VALUE0_IPSEC_PROTO_MASK 0x1U + +#define SSSNIC_SQE_TASK_SECT_VALUE0_SET(val, member) \ + (((u32)(val) & SSSNIC_SQE_TASK_SECT_VALUE0_##member##_MASK) << \ + SSSNIC_SQE_TASK_SECT_VALUE0_##member##_SHIFT) + +#define SSSNIC_SQE_TASK_SECT_VALUE3_VLAN_TAG_SHIFT 0 +#define SSSNIC_SQE_TASK_SECT_VALUE3_VLAN_TYPE_SHIFT 16 +#define SSSNIC_SQE_TASK_SECT_VALUE3_VLAN_TAG_VALID_SHIFT 19 + +#define SSSNIC_SQE_TASK_SECT_VALUE3_VLAN_TAG_MASK 0xFFFFU +#define SSSNIC_SQE_TASK_SECT_VALUE3_VLAN_TYPE_MASK 0x7U +#define SSSNIC_SQE_TASK_SECT_VALUE3_VLAN_TAG_VALID_MASK 0x1U + +#define SSSNIC_SQE_TASK_SECT_VALUE3_SET(val, member) \ + (((val) & SSSNIC_SQE_TASK_SECT_VALUE3_##member##_MASK) << \ + SSSNIC_SQE_TASK_SECT_VALUE3_##member##_SHIFT) + +#define SSSNIC_VLAN_INSERT_MODE_MAX 5 +#define SSSNIC_TSO_CS_EN 1 +#define SSSNIC_DEF_PKT_CNT 1 + +#define SSSNIC_SQ_STATS_INC(sq_desc, field) \ +do { \ + u64_stats_update_begin(&(sq_desc)->stats.stats_sync); \ + (sq_desc)->stats.field++; \ + u64_stats_update_end(&(sq_desc)->stats.stats_sync); \ +} while (0) + +enum sss_nic_check_tx_offload_type { + SSSNIC_OFFLOAD_TSO = BIT(0), + SSSNIC_OFFLOAD_TX_CSUM = BIT(1), + SSSNIC_OFFLOAD_TX_VLAN = BIT(2), + SSSNIC_OFFLOAD_TX_DISABLE = BIT(3), + SSSNIC_OFFLOAD_TX_ESP = BIT(4), +}; + +union sss_nic_ip { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; +}; + +struct sss_nic_sqe_ctrl_section { + u32 sect_len; + u32 qinfo; + u32 addr_high; + u32 addr_low; +}; + +/* Engine only pass first 12B TS field directly to uCode through metadata + * vlan_offoad is used for hardware when vlan insert in tx + */ +struct sss_nic_sqe_task_section { + u32 value[4]; +}; + +struct sss_nic_sqe_bd_section { + u32 len; /* 31-bits Length, L2NIC only use length[17:0] */ + u32 rsvd; + u32 addr_high; + u32 addr_low; +}; + +/* use section pointer for support non continuous wqe */ +struct sss_nic_sqe { + struct sss_nic_sqe_ctrl_section *ctrl_sect; + struct sss_nic_sqe_task_section *task_sect; + struct sss_nic_sqe_bd_section *bd_sect0; + struct sss_nic_sqe_bd_section *bd_sect1; + u16 first_bds_num; + u32 wqe_type; + u32 task_type; +}; + +/* ************* SQ_CTRL ************** */ +enum sss_nic_sqe_data_format { + SSSNIC_NORMAL_SQE = 0, +}; + +enum sss_nic_sqe_type { + SSSNIC_SQE_COMPACT_TYPE = 0, + SSSNIC_SQE_EXTENDED_TYPE = 1, +}; + +enum sss_nic_sqe_task_len { + SSSNIC_SQE_TASK_LEN_46BITS = 0, + SSSNIC_SQE_TASK_LEN_128BITS = 1, +}; + +union sss_nic_transport_header { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; +}; + +enum sss_nic_sq_l3_proto_type { + SSSNIC_UNSUPPORT_L3_PORTO_TYPE = 0, + SSSNIC_IPV6_PKT = 1, + SSSNIC_IPV4_PKT_NO_CSO = 2, + SSSNIC_IPV4_PKT_WITH_CSO = 3, +}; + +enum sss_nic_sq_l4_offload_type { + SSSNIC_DISABLE_OFFLOAD = 0, + SSSNIC_TCP_OFFLOAD = 1, + SSSNIC_SCTP_OFFLOAD = 2, + SSSNIC_UDP_OFFLOAD = 3, +}; + +static inline __sum16 sss_nic_csum_magic(union sss_nic_ip *ip, + unsigned short proto) +{ + return (ip->v4->version == SSSNIC_IPV4_VERSION) ? + csum_tcpudp_magic(ip->v4->saddr, ip->v4->daddr, 0, proto, 0) : + csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0); +} + +#define sss_nic_set_vlan_tx_offload(task_sect, vlan_tag, vlan_type) \ + ((task_sect)->value[3] = SSSNIC_SQE_TASK_SECT_VALUE3_SET((vlan_tag), VLAN_TAG) | \ + SSSNIC_SQE_TASK_SECT_VALUE3_SET((vlan_type), VLAN_TYPE) | \ + SSSNIC_SQE_TASK_SECT_VALUE3_SET(1U, VLAN_TAG_VALID)) + +void sss_nic_get_sq_stats(struct sss_nic_sq_desc *sq_desc, + struct sss_nic_sq_stats *stats) +{ + struct sss_nic_sq_stats *sq_stats = &sq_desc->stats; + unsigned int begin; + + u64_stats_update_begin(&stats->stats_sync); + do { + begin = u64_stats_fetch_begin(&sq_stats->stats_sync); + stats->tx_bytes = sq_stats->tx_bytes; + stats->tx_packets = sq_stats->tx_packets; + stats->tx_busy = sq_stats->tx_busy; + stats->wake = sq_stats->wake; + stats->tx_dropped = sq_stats->tx_dropped; + } while (u64_stats_fetch_retry(&sq_stats->stats_sync, begin)); + u64_stats_update_end(&stats->stats_sync); +} + +#define sss_nic_init_bd_sect(bd_sect, addr, bd_len) \ +do { \ + (bd_sect)->addr_high = sss_hw_be32(upper_32_bits(addr)); \ + (bd_sect)->addr_low = sss_hw_be32(lower_32_bits(addr)); \ + (bd_sect)->len = sss_hw_be32(bd_len); \ +} while (0) + +#define sss_nic_unmap_dma_page(nic_dev, nr_frags, dma_group) \ +do { \ + struct pci_dev *_pdev = (nic_dev)->pdev; \ + int _frag_id; \ +\ + for (_frag_id = 1; _frag_id < (nr_frags) + 1; _frag_id++) \ + dma_unmap_page(&_pdev->dev, (dma_group)[_frag_id].dma, \ + (dma_group)[_frag_id].len, DMA_TO_DEVICE); \ + dma_unmap_single(&_pdev->dev, (dma_group)[0].dma, (dma_group)[0].len, \ + DMA_TO_DEVICE); \ +} while (0) + +static int sss_nic_map_dma_page(struct sss_nic_dev *nic_dev, + struct sk_buff *skb, u16 valid_nr_frag, + struct sss_nic_sq_desc *sq_desc, + struct sss_nic_tx_desc *tx_desc, + struct sss_nic_sqe *sqe) +{ + struct sss_nic_sqe_ctrl_section *ctrl_sect = sqe->ctrl_sect; + struct sss_nic_sqe_bd_section *bd_sect = sqe->bd_sect0; + struct sss_nic_dma_info *dma_group = tx_desc->dma_group; + struct pci_dev *pdev = nic_dev->pdev; + skb_frag_t *frag = NULL; + u32 flag; + int ret; + + dma_group[0].dma = dma_map_single(&pdev->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, dma_group[0].dma)) { + SSSNIC_SQ_STATS_INC(sq_desc, dma_map_err); + return -EFAULT; + } + + dma_group[0].len = skb_headlen(skb); + + ctrl_sect->addr_high = sss_hw_be32(upper_32_bits(dma_group[0].dma)); + ctrl_sect->addr_low = sss_hw_be32(lower_32_bits(dma_group[0].dma)); + ctrl_sect->sect_len = dma_group[0].len; + + for (flag = 0; flag < valid_nr_frag;) { + frag = &(skb_shinfo(skb)->frags[flag]); + if (unlikely(flag == sqe->first_bds_num)) + bd_sect = sqe->bd_sect1; + + flag++; + dma_group[flag].dma = skb_frag_dma_map(&pdev->dev, frag, 0, + skb_frag_size(frag), + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, dma_group[flag].dma)) { + SSSNIC_SQ_STATS_INC(sq_desc, dma_map_err); + flag--; + ret = -EFAULT; + goto frag_map_err; + } + dma_group[flag].len = skb_frag_size(frag); + + sss_nic_init_bd_sect(bd_sect, dma_group[flag].dma, + dma_group[flag].len); + bd_sect++; + } + return 0; + +frag_map_err: + sss_nic_unmap_dma_page(nic_dev, flag, dma_group); + return ret; +} + + +#ifdef HAVE_IP6_FRAG_ID_ENABLE_UFO +#define sss_nic_ipv6_frag_id(task_sect, skb, ip) \ +do { \ + if ((ip)->v4->version == 6) \ + (task_sect)->value[1] = be32_to_cpu(skb_shinfo(skb)->ip6_frag_id); \ +} while (0) +#else +#define sss_nic_ipv6_frag_id(task_sect, skb, ip) do {} while (0) +#endif + +#define sss_nic_get_inner_transport_info(task_sect, skb, ip, l4, l4_proto, offset, l4_offload) \ +do { \ + if ((l4_proto) == IPPROTO_TCP) { \ + (l4)->tcp->check = ~sss_nic_csum_magic((ip), IPPROTO_TCP); \ + *(l4_offload) = SSSNIC_TCP_OFFLOAD; \ + *(offset) = SSSNIC_TCP_DOFF_TO_BYTES((l4)->tcp->doff) + \ + SSSNIC_TRANSPORT_OFFSET((l4)->hdr, (skb)); \ + } else if ((l4_proto) == IPPROTO_UDP) { \ + sss_nic_ipv6_frag_id(task_sect, (skb), (ip)); \ + *(l4_offload) = SSSNIC_UDP_OFFLOAD; \ + *(offset) = SSSNIC_TRANSPORT_OFFSET((l4)->hdr, (skb)); \ + } \ +} while (0) + +#define sss_nic_check_enc_tx_csum(sq_desc, task_sect, skb, offload) \ +do { \ + union sss_nic_ip _ip; \ + u8 _l4_proto; \ +\ + (task_sect)->value[0] |= SSSNIC_SQE_TASK_SECT_VALUE0_SET(1U, TUNNEL_FLAG); \ + _ip.hdr = skb_network_header(skb); \ + if (_ip.v4->version == SSSNIC_IPV4_VERSION) { \ + _l4_proto = _ip.v4->protocol; \ + } else if (_ip.v4->version == SSSNIC_IPV6_VERSION) { \ + union sss_nic_transport_header l4; \ + unsigned char *exthdr; \ + __be16 frag_off; \ +\ + exthdr = _ip.hdr + sizeof(*_ip.v6); \ + _l4_proto = _ip.v6->nexthdr; \ + l4.hdr = skb_transport_header(skb); \ + if (l4.hdr != exthdr) \ + ipv6_skip_exthdr((skb), exthdr - (skb)->data, &_l4_proto, &frag_off); \ + } else { \ + _l4_proto = IPPROTO_RAW; \ + } \ + if (((struct udphdr *)skb_transport_header(skb))->dest != \ + SSSNIC_VXLAN_OFFLOAD_PORT || \ + _l4_proto != IPPROTO_UDP) { \ + SSSNIC_SQ_STATS_INC((sq_desc), unknown_tunnel_proto); \ + /* disable checksum offload */ \ + skb_checksum_help(skb); \ + } else { \ + (task_sect)->value[0] |= SSSNIC_SQE_TASK_SECT_VALUE0_SET(1U, INNER_L4_EN); \ + *(offload) = SSSNIC_OFFLOAD_TX_CSUM; \ + } \ +} while (0) + +#define sss_nic_check_tx_csum(sq_desc, task_sect, skb, offload) \ +do { \ + if ((skb)->ip_summed == CHECKSUM_PARTIAL) {\ + if ((skb)->encapsulation) \ + sss_nic_check_enc_tx_csum((sq_desc), (task_sect), (skb), (offload)); \ + else {\ + (task_sect)->value[0] |= \ + SSSNIC_SQE_TASK_SECT_VALUE0_SET(1U, INNER_L4_EN); \ + *(offload) = SSSNIC_OFFLOAD_TX_CSUM; \ + } \ + } \ +} while (0) + +#define sss_nic_get_inner_proto_type(skb, ip, l4, l4_proto) \ +do { \ + unsigned char *_ext_hdr = NULL; \ + __be16 _frag_off = 0; \ +\ + if ((ip)->v4->version == SSSNIC_IPV4_VERSION) { \ + *(l4_proto) = (ip)->v4->protocol; \ + } else if ((ip)->v4->version == SSSNIC_IPV6_VERSION) { \ + _ext_hdr = (ip)->hdr + sizeof(*((ip)->v6)); \ + *(l4_proto) = (ip)->v6->nexthdr; \ + if (_ext_hdr != (l4)->hdr) \ + ipv6_skip_exthdr((skb), (int)(_ext_hdr - (skb)->data), \ + (l4_proto), &_frag_off); \ + } else { \ + *(l4_proto) = 0; \ + } \ +} while (0) + +#define sss_nic_set_tso_info(task_sect, qinfo, l4_offload, offset, mss) \ +do { \ + if ((l4_offload) == SSSNIC_TCP_OFFLOAD) { \ + *(qinfo) |= SSSNIC_SQE_CTRL_SECT_QINFO_SET(1U, TSO); \ + (task_sect)->value[0] |= SSSNIC_SQE_TASK_SECT_VALUE0_SET(1U, INNER_L4_EN); \ + } else if ((l4_offload) == SSSNIC_UDP_OFFLOAD) { \ + *(qinfo) |= SSSNIC_SQE_CTRL_SECT_QINFO_SET(1U, UFO); \ + (task_sect)->value[0] |= SSSNIC_SQE_TASK_SECT_VALUE0_SET(1U, INNER_L4_EN); \ + } \ + (task_sect)->value[0] |= SSSNIC_SQE_TASK_SECT_VALUE0_SET(1U, INNER_L3_EN); \ + *(qinfo) |= SSSNIC_SQE_CTRL_SECT_QINFO_SET((offset) >> 1, PLDOFF); \ + *(qinfo) = SSSNIC_SQE_CTRL_SECT_QINFO_CLEAR(*(qinfo), MSS); \ + *(qinfo) |= SSSNIC_SQE_CTRL_SECT_QINFO_SET((mss), MSS); \ +} while (0) + +#define sss_nic_get_proto_hdr(task_sect, skb, ip, l4) \ +do { \ + if ((skb)->encapsulation) { \ + u32 gso_type = skb_shinfo(skb)->gso_type; \ + (task_sect)->value[0] |= SSSNIC_SQE_TASK_SECT_VALUE0_SET(1U, OUT_L3_EN); \ + (task_sect)->value[0] |= SSSNIC_SQE_TASK_SECT_VALUE0_SET(1U, TUNNEL_FLAG); \ +\ + (l4)->hdr = skb_transport_header(skb); \ + (ip)->hdr = skb_network_header(skb); \ +\ + if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) { \ + (l4)->udp->check = ~sss_nic_csum_magic((ip), IPPROTO_UDP); \ + (task_sect)->value[0] |= SSSNIC_SQE_TASK_SECT_VALUE0_SET(1U, OUT_L4_EN); \ + } \ +\ + (ip)->hdr = skb_inner_network_header(skb); \ + (l4)->hdr = skb_inner_transport_header(skb); \ + } else { \ + (ip)->hdr = skb_network_header(skb); \ + (l4)->hdr = skb_transport_header(skb); \ + } \ +} while (0) + +#define sss_nic_check_tso(task_sect, qinfo, skb, offload) \ +do { \ + enum sss_nic_sq_l4_offload_type _l4_offload = SSSNIC_DISABLE_OFFLOAD; \ + union sss_nic_ip _ip; \ + union sss_nic_transport_header _l4; \ + u32 _offset = 0; \ + u8 _l4_proto; \ + int _ret; \ +\ + _ret = skb_cow_head((skb), 0); \ + if (_ret < 0) \ + *(offload) = SSSNIC_OFFLOAD_TX_DISABLE; \ + else { \ + sss_nic_get_proto_hdr((task_sect), (skb), &_ip, &_l4); \ + sss_nic_get_inner_proto_type(skb, &_ip, &_l4, &_l4_proto); \ + sss_nic_get_inner_transport_info((task_sect), (skb), &_ip, &_l4, \ + _l4_proto, &_offset, &_l4_offload); \ + sss_nic_set_tso_info((task_sect), (qinfo), _l4_offload, _offset, \ + skb_shinfo(skb)->gso_size); \ +\ + if (unlikely(SSSNIC_SQE_CTRL_SECT_QINFO_GET(*(qinfo), PLDOFF) > \ + SSSNIC_PAYLOAD_OFFSET_MAX)) \ + *(offload) = SSSNIC_OFFLOAD_TX_DISABLE; \ + else \ + *(offload) = SSSNIC_OFFLOAD_TSO; \ + } \ +} while (0) + +#define sss_nic_check_tx_offload(sq_desc, task_sect, skb, qinfo, offload) \ +do { \ + if (skb_is_gso(skb) == 0) \ + sss_nic_check_tx_csum((sq_desc), (task_sect), (skb), (offload)); \ + else \ + sss_nic_check_tso((task_sect), (qinfo), (skb), (offload)); \ +\ + if (*(offload) != SSSNIC_OFFLOAD_TX_DISABLE) { \ + if (unlikely(skb_vlan_tag_present(skb))) { \ + sss_nic_set_vlan_tx_offload((task_sect), skb_vlan_tag_get(skb), \ + (sq_desc)->qid % \ + SSSNIC_VLAN_INSERT_MODE_MAX); \ + *(offload) |= SSSNIC_OFFLOAD_TX_VLAN; \ + } \ + } \ +} while (0) + +#ifdef HAVE_SKB_INNER_TRANSPORT_OFFSET +#define sss_nic_get_inner_ihs(skb) \ + (skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb)) +#else +#define sss_nic_get_inner_ihs(skb) \ + ((skb_inner_transport_header(skb) - (skb)->data) + inner_tcp_hdrlen(skb)) +#endif + +#if (defined(HAVE_SKB_INNER_TRANSPORT_HEADER) && defined(HAVE_SK_BUFF_ENCAPSULATION)) +#define sss_nic_get_ihs(skb, ihs) \ +do { \ + if ((skb)->encapsulation) \ + (ihs) = sss_nic_get_inner_ihs(skb); \ + else \ + (ihs) = skb_transport_offset(skb) + tcp_hdrlen(skb); \ +} while (0) +#else +#define sss_nic_get_ihs(skb, ihs) \ + ((ihs) = skb_transport_offset(skb) + tcp_hdrlen(skb)) +#endif + +#define sss_nic_get_pkt_stats(tx_desc, skb) \ +do { \ + u32 _ihs; \ + u32 _hdr_len; \ +\ + if (skb_is_gso(skb)) { \ + sss_nic_get_ihs((skb), _ihs); \ + _hdr_len = (skb_shinfo(skb)->gso_segs - 1) * _ihs; \ + (tx_desc)->bytes = (skb)->len + (u64)_hdr_len; \ + } else { \ + (tx_desc)->bytes = (skb)->len > ETH_ZLEN ? (skb)->len : ETH_ZLEN; \ + } \ + (tx_desc)->nr_pkt_cnt = SSSNIC_DEF_PKT_CNT; \ +} while (0) + +#define sss_nic_get_sq_free_wqebbs(sq) sss_wq_free_wqebb(&(sq)->wq) + +static inline int sss_nic_check_tx_stop(struct sss_nic_sq_desc *sq_desc, + u16 wqebb_cnt) +{ + if (likely(sss_nic_get_sq_free_wqebbs(sq_desc->sq) >= wqebb_cnt)) + return 0; + + /* We need to check again in a case another CPU has free room available. */ + netif_stop_subqueue(sq_desc->netdev, sq_desc->qid); + + if (likely(sss_nic_get_sq_free_wqebbs(sq_desc->sq) < wqebb_cnt)) + return -EBUSY; + + /* wake up queue when there are enough wqebbs */ + netif_start_subqueue(sq_desc->netdev, sq_desc->qid); + + return 0; +} + + +#define sss_nic_get_and_update_sq_owner(sq, owner_ptr, curr_pi, wqebb_cnt) \ +do { \ + if (unlikely((curr_pi) + (wqebb_cnt) >= (sq)->wq.q_depth)) \ + (sq)->owner = !(sq)->owner; \ + *(owner_ptr) = (sq)->owner; \ +} while (0) + +#define sss_nic_combo_sqe(sq, sqe, task, curr_pi, owner, offload, sge_cnt) \ +do { \ + void *_wqebb = NULL; \ + void *_second_part_wqebbs_addr = NULL; \ + u16 _tmp_pi; \ + u16 _first_part_wqebbs_num; \ + int _id; \ +\ + (sqe)->ctrl_sect = sss_wq_get_one_wqebb(&(sq)->wq, (curr_pi)); \ + if ((offload) == 0 && (sge_cnt) == 1) { \ + (sqe)->wqe_type = SSSNIC_SQE_COMPACT_TYPE; \ + sss_nic_get_and_update_sq_owner((sq), (owner), *(curr_pi), 1); \ + } else { \ + (sqe)->wqe_type = SSSNIC_SQE_EXTENDED_TYPE; \ +\ + if ((offload) != 0) { \ + (sqe)->task_sect = sss_wq_get_one_wqebb(&(sq)->wq, &_tmp_pi); \ + (sqe)->task_type = SSSNIC_SQE_TASK_LEN_128BITS; \ +\ + for (_id = 0; _id < ARRAY_LEN((sqe)->task_sect->value); _id++) \ + (sqe)->task_sect->value[_id] = sss_hw_be32((task)->value[_id]); \ +\ + } else { \ + (sqe)->task_type = SSSNIC_SQE_TASK_LEN_46BITS; \ + } \ +\ + if ((sge_cnt) > 1) { \ + /* first wqebb contain bd0, so use weqbb_cnt(sge_num-1) */ \ + _wqebb = sss_wq_get_multi_wqebb(&(sq)->wq, (sge_cnt) - 1, &_tmp_pi, \ + &_second_part_wqebbs_addr, \ + &_first_part_wqebbs_num); \ + (sqe)->first_bds_num = _first_part_wqebbs_num; \ + (sqe)->bd_sect1 = _second_part_wqebbs_addr; \ + (sqe)->bd_sect0 = _wqebb; \ + } \ +\ + sss_nic_get_and_update_sq_owner((sq), (owner), *(curr_pi), \ + (sge_cnt) + (u16)!!(offload)); \ + } \ +} while (0) + +#define SSSNIC_FILL_COMPACT_WQE_CTRL_SECT(sqe, ctrl_sect, owner) \ +do { \ + (ctrl_sect)->sect_len |= \ + SSSNIC_SQE_CTRL_SECT_SET((owner), OWNER) | \ + SSSNIC_SQE_CTRL_SECT_SET((sqe)->wqe_type, EXTENDED) | \ + SSSNIC_SQE_CTRL_SECT_SET(SSSNIC_NORMAL_SQE, DATA_FORMAT); \ + (ctrl_sect)->sect_len = sss_hw_be32((ctrl_sect)->sect_len); \ + (ctrl_sect)->qinfo = 0; \ +} while (0) + +#define SSSNIC_FILL_EXTEND_WQE_CTRL_SECT(sqe, ctrl_sect, info, sge_cnt, owner) \ +do { \ + (ctrl_sect)->sect_len |= SSSNIC_SQE_CTRL_SECT_SET((sge_cnt), BUFDESC_NUM) | \ + SSSNIC_SQE_CTRL_SECT_SET((owner), OWNER) | \ + SSSNIC_SQE_CTRL_SECT_SET((sqe)->task_type, TASKSECT_LEN) | \ + SSSNIC_SQE_CTRL_SECT_SET((sqe)->wqe_type, EXTENDED) | \ + SSSNIC_SQE_CTRL_SECT_SET(SSSNIC_NORMAL_SQE, DATA_FORMAT); \ +\ + (ctrl_sect)->sect_len = sss_hw_be32((ctrl_sect)->sect_len); \ + (ctrl_sect)->qinfo = (info); \ + (ctrl_sect)->qinfo |= SSSNIC_SQE_CTRL_SECT_QINFO_SET(1U, UC); \ +\ + if (!SSSNIC_SQE_CTRL_SECT_QINFO_GET((ctrl_sect)->qinfo, MSS)) { \ + (ctrl_sect)->qinfo |= SSSNIC_SQE_CTRL_SECT_QINFO_SET(SSSNIC_DEFAULT_MSS, MSS); \ + } else if (SSSNIC_SQE_CTRL_SECT_QINFO_GET((ctrl_sect)->qinfo, MSS) < SSSNIC_MIN_MSS) { \ + /* mss should not less than 80 */ \ + (ctrl_sect)->qinfo = SSSNIC_SQE_CTRL_SECT_QINFO_CLEAR((ctrl_sect)->qinfo, MSS); \ + ctrl_sect->qinfo |= SSSNIC_SQE_CTRL_SECT_QINFO_SET(SSSNIC_MIN_MSS, MSS); \ + } \ + (ctrl_sect)->qinfo = sss_hw_be32((ctrl_sect)->qinfo); \ +} while (0) + +#define sss_nic_init_sq_ctrl(sqe, info, sge_cnt, owner) \ +do { \ + if ((sqe)->wqe_type == SSSNIC_SQE_COMPACT_TYPE) \ + SSSNIC_FILL_COMPACT_WQE_CTRL_SECT((sqe), (sqe)->ctrl_sect, (owner)); \ + else \ + SSSNIC_FILL_EXTEND_WQE_CTRL_SECT((sqe), (sqe)->ctrl_sect, \ + (info), (sge_cnt), (owner)); \ +} while (0) + +#define sss_nic_rollback_sq_wqebbs(sq, wqebb_cnt, owner) \ +do { \ + if ((owner) != (sq)->owner) \ + (sq)->owner = (u8)(owner); \ + (sq)->wq.pi -= (wqebb_cnt); \ +} while (0) + +#define sss_nic_update_sq_local_ci(sq, wqebb_cnt) \ + sss_update_wq_ci(&(sq)->wq, (wqebb_cnt)) + +static netdev_tx_t sss_nic_send_one_skb(struct sk_buff *skb, + struct net_device *netdev, + struct sss_nic_sq_desc *sq_desc) +{ + u32 qinfo = 0; + u32 offload = 0; + u16 pi = 0; + u16 owner; + u16 sge_cnt; + u16 nr_frags = 0; + u16 wqebb_cnt; + bool find_zero_len = false; + int ret; + int frag_id; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + struct sss_nic_tx_desc *tx_desc = NULL; + struct sss_nic_sqe sqe = {0}; + struct sss_nic_sqe_task_section task_sect = {0}; + + if (unlikely(skb->len < SSSNIC_SKB_LEN_MIN)) { + if (skb_pad(skb, (int)(SSSNIC_SKB_LEN_MIN - skb->len))) { + SSSNIC_SQ_STATS_INC(sq_desc, skb_pad_err); + goto tx_drop_pad_err; + } + + skb->len = SSSNIC_SKB_LEN_MIN; + } + + for (frag_id = 0; frag_id < skb_shinfo(skb)->nr_frags; frag_id++) { + if (skb_frag_size(&skb_shinfo(skb)->frags[frag_id]) == 0) { + find_zero_len = true; + continue; + } else if (find_zero_len) { + SSSNIC_SQ_STATS_INC(sq_desc, frag_size_zero); + goto tx_drop_pkts; + } + nr_frags++; + } + sge_cnt = nr_frags + 1; + wqebb_cnt = sge_cnt + 1; /* task info need 1 wqebb */ + + if (unlikely(sss_nic_check_tx_stop(sq_desc, wqebb_cnt))) { + SSSNIC_SQ_STATS_INC(sq_desc, tx_busy); + return NETDEV_TX_BUSY; + } + + sss_nic_check_tx_offload(sq_desc, &task_sect, skb, &qinfo, &offload); + if (unlikely(offload == SSSNIC_OFFLOAD_TX_DISABLE)) { + SSSNIC_SQ_STATS_INC(sq_desc, offload_err); + goto tx_drop_pkts; + } else if (offload == 0) { + /* no TS in current wqe */ + wqebb_cnt -= 1; + if (unlikely(sge_cnt == 1 && skb->len > SSSNIC_SKB_LEN_MAX)) + goto tx_drop_pkts; + } + + sss_nic_combo_sqe(sq_desc->sq, &sqe, &task_sect, &pi, &owner, offload, sge_cnt); + + tx_desc = &sq_desc->tx_desc_group[pi]; + tx_desc->nr_frags = nr_frags; + tx_desc->wqebb_cnt = wqebb_cnt; + tx_desc->skb = skb; + ret = sss_nic_map_dma_page(nic_dev, skb, nr_frags, sq_desc, tx_desc, &sqe); + if (ret != 0) { + sss_nic_rollback_sq_wqebbs(sq_desc->sq, wqebb_cnt, owner); + goto tx_drop_pkts; + } + sss_nic_get_pkt_stats(tx_desc, skb); + sss_nic_init_sq_ctrl(&sqe, qinfo, sge_cnt, owner); + sss_nic_write_db(sq_desc->sq, sq_desc->cos, SQ_CFLAG_DP, + sss_nic_get_sq_local_pi(sq_desc->sq)); + return NETDEV_TX_OK; + +tx_drop_pkts: + dev_kfree_skb_any(skb); +tx_drop_pad_err: + SSSNIC_SQ_STATS_INC(sq_desc, tx_dropped); + return NETDEV_TX_OK; +} + +netdev_tx_t sss_nic_loop_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + u16 qid = skb_get_queue_mapping(skb); + struct sss_nic_sq_desc *sq_desc = &nic_dev->sq_desc_group[qid]; + + return sss_nic_send_one_skb(skb, netdev, sq_desc); +} + +netdev_tx_t sss_nic_ndo_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct sss_nic_sq_desc *sq_desc = NULL; + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + u16 qid = skb_get_queue_mapping(skb); + + if (unlikely(!netif_carrier_ok(netdev))) { + SSSNIC_STATS_TX_DROP_INC(nic_dev); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (unlikely(qid >= nic_dev->qp_res.qp_num)) { + SSSNIC_STATS_TX_INVALID_QID_INC(nic_dev); + goto out; + } + sq_desc = &nic_dev->sq_desc_group[qid]; + return sss_nic_send_one_skb(skb, netdev, sq_desc); + +out: + dev_kfree_skb_any(skb); + sq_desc = &nic_dev->sq_desc_group[0]; + SSSNIC_SQ_STATS_INC(sq_desc, tx_dropped); + return NETDEV_TX_OK; +} + +#define sss_nic_tx_free_skb(nic_dev, tx_desc) \ +do { \ + sss_nic_unmap_dma_page((nic_dev), (tx_desc)->nr_frags, (tx_desc)->dma_group); \ + dev_kfree_skb_any((tx_desc)->skb); \ + (tx_desc)->skb = NULL; \ +} while (0) + +void sss_nic_free_all_skb(struct sss_nic_dev *nic_dev, u32 sq_depth, + struct sss_nic_tx_desc *tx_desc_group) +{ + struct sss_nic_tx_desc *tx_desc = NULL; + u32 i; + + for (i = 0; i < sq_depth; i++) { + tx_desc = &tx_desc_group[i]; + if (tx_desc->skb) + sss_nic_tx_free_skb(nic_dev, tx_desc); + } +} + +#define sss_nic_stop_subqueue(nic_dev, sq_desc, wake) \ +do { \ + u16 _qid = (sq_desc)->sq->qid; \ + u64 _wake = 0; \ + struct netdev_queue *_netdev_sq; \ +\ + if (unlikely(__netif_subqueue_stopped((nic_dev)->netdev, _qid) && \ + sss_nic_get_sq_free_wqebbs((sq_desc)->sq) >= 1 && \ + test_bit(SSSNIC_INTF_UP, &(nic_dev)->flags))) { \ + _netdev_sq = netdev_get_tx_queue((sq_desc)->netdev, _qid); \ +\ + __netif_tx_lock(_netdev_sq, smp_processor_id()); \ + if (__netif_subqueue_stopped((nic_dev)->netdev, _qid)) { \ + netif_wake_subqueue((nic_dev)->netdev, _qid); \ + _wake++; \ + } \ + __netif_tx_unlock(_netdev_sq); \ + } \ +\ + *(wake) = _wake; \ +} while (0) + +int sss_nic_tx_poll(struct sss_nic_sq_desc *sq_desc, int budget) +{ + struct sss_nic_tx_desc *tx_desc = NULL; + struct sss_nic_dev *nic_dev = netdev_priv(sq_desc->netdev); + u64 tx_byte_cnt = 0; + u64 nr_pkt_cnt = 0; + u64 wake = 0; + u16 sw_ci; + u16 hw_ci; + u16 wqebb_cnt = 0; + int pkt_cnt = 0; + + hw_ci = sss_nic_get_sq_hw_ci(sq_desc->sq); + dma_rmb(); + sw_ci = sss_nic_get_sq_local_ci(sq_desc->sq); + + do { + tx_desc = &sq_desc->tx_desc_group[sw_ci]; + + if (hw_ci == sw_ci || + ((hw_ci - sw_ci) & sq_desc->qid_mask) < tx_desc->wqebb_cnt) + break; + + sw_ci = (sw_ci + tx_desc->wqebb_cnt) & (u16)sq_desc->qid_mask; + prefetch(&sq_desc->tx_desc_group[sw_ci]); + + tx_byte_cnt += tx_desc->bytes; + nr_pkt_cnt += tx_desc->nr_pkt_cnt; + wqebb_cnt += tx_desc->wqebb_cnt; + pkt_cnt++; + + sss_nic_tx_free_skb(nic_dev, tx_desc); + } while (likely(pkt_cnt < budget)); + + sss_nic_update_sq_local_ci(sq_desc->sq, wqebb_cnt); + + sss_nic_stop_subqueue(nic_dev, sq_desc, &wake); + + u64_stats_update_begin(&sq_desc->stats.stats_sync); + sq_desc->stats.tx_bytes += tx_byte_cnt; + sq_desc->stats.tx_packets += nr_pkt_cnt; + sq_desc->stats.wake += wake; + u64_stats_update_end(&sq_desc->stats.stats_sync); + + return pkt_cnt; +} + diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx.h new file mode 100644 index 0000000000000000000000000000000000000000..faeca6a936858933cce1af3398754da9825ade0c --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_TX_H +#define SSS_NIC_TX_H + +#include +#include +#include +#include +#include + +#include "sss_nic_io.h" +#include "sss_nic_dev_define.h" + +void sss_nic_free_all_skb(struct sss_nic_dev *nic_dev, u32 sq_depth, + struct sss_nic_tx_desc *tx_desc_group); +netdev_tx_t sss_nic_loop_start_xmit(struct sk_buff *skb, + struct net_device *netdev); +netdev_tx_t sss_nic_ndo_start_xmit(struct sk_buff *skb, + struct net_device *netdev); +void sss_nic_get_sq_stats(struct sss_nic_sq_desc *sq_desc, + struct sss_nic_sq_stats *stats); +int sss_nic_tx_poll(struct sss_nic_sq_desc *sq_desc, int budget); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx_init.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx_init.c new file mode 100644 index 0000000000000000000000000000000000000000..a5dc709fa5bd4f113785177e49c88c48f88b49a5 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx_init.c @@ -0,0 +1,211 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_io.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_rss_cfg.h" +#include "sss_nic_dev_define.h" +#include "sss_nic_tx.h" + +#define SSSNIC_SQ_EXTRA_SGE 18 + +#define SSSNIC_FLUSH_SQ_TIMEOUT 1000 + +#define SSSNIC_STOP_SQ_WAIT_TIME_MIN 900 +#define SSSNIC_STOP_SQ_WAIT_TIME_MAX 1000 +#define SSSNIC_STOP_SQ_WAIT_TIME_FORCE_MIN 9900 +#define SSSNIC_STOP_SQ_WAIT_TIME_FORCE_MAX 10000 + +#define SSSNIC_SQ_WQEBB_BD (SSSNIC_SQ_WQEBB_SIZE / 16) + +int sss_nic_alloc_sq_resource(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res) +{ + struct sss_nic_sq_resource *sq_res = NULL; + int qid; + int id; + u64 bds_size; + u64 len; + + for (qid = 0; qid < qp_res->qp_num; qid++) { + sq_res = &qp_res->sq_res_group[qid]; + bds_size = sizeof(*sq_res->dma_group) * + (qp_res->sq_depth * SSSNIC_SQ_WQEBB_BD + SSSNIC_SQ_EXTRA_SGE); + sq_res->dma_group = kzalloc(bds_size, GFP_KERNEL); + if (!sq_res->dma_group) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to allocate sq %d dma info\n", qid); + goto error; + } + + len = sizeof(*sq_res->tx_desc_group) * qp_res->sq_depth; + sq_res->tx_desc_group = kzalloc(len, GFP_KERNEL); + if (!sq_res->tx_desc_group) { + kfree(sq_res->dma_group); + sq_res->dma_group = NULL; + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fail to alloc sq %d tx desc\n", qid); + goto error; + } + } + + return 0; + +error: + for (id = 0; id < qid; id++) { + sq_res = &qp_res->sq_res_group[id]; + kfree(sq_res->dma_group); + kfree(sq_res->tx_desc_group); + sq_res->dma_group = NULL; + sq_res->tx_desc_group = NULL; + } + + return -ENOMEM; +} + +void sss_nic_free_sq_resource(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res) +{ + struct sss_nic_sq_resource *sq_res = NULL; + u16 qid; + + for (qid = 0; qid < qp_res->qp_num; qid++) { + sq_res = &qp_res->sq_res_group[qid]; + + sss_nic_free_all_skb(nic_dev, qp_res->sq_depth, sq_res->tx_desc_group); + kfree(sq_res->dma_group); + kfree(sq_res->tx_desc_group); + sq_res->dma_group = NULL; + sq_res->tx_desc_group = NULL; + } +} + +void sss_nic_init_all_sq(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res) +{ + struct sss_nic_sq_resource *sq_res = NULL; + struct sss_nic_sq_desc *sq_desc = NULL; + u16 qid; + u32 did; + + for (qid = 0; qid < qp_res->qp_num; qid++) { + sq_desc = &nic_dev->sq_desc_group[qid]; + sq_res = &qp_res->sq_res_group[qid]; + + sq_desc->q_depth = qp_res->sq_depth; + sq_desc->qid_mask = qp_res->sq_depth - 1; + + sq_desc->tx_desc_group = sq_res->tx_desc_group; + for (did = 0; did < qp_res->sq_depth; did++) + sq_desc->tx_desc_group[did].dma_group = + &sq_res->dma_group[did * SSSNIC_SQ_WQEBB_BD]; + + sq_desc->sq = &nic_dev->nic_io->sq_group[qid]; + } +} + +int sss_nic_alloc_sq_desc_group(struct sss_nic_dev *nic_dev) +{ + struct sss_nic_sq_desc *sq_desc = NULL; + struct sss_nic_sq_stats *sq_stats = NULL; + u16 sq_num = nic_dev->max_qp_num; + u16 qid; + + nic_dev->sq_desc_group = kcalloc(sq_num, sizeof(*nic_dev->sq_desc_group), GFP_KERNEL); + if (!nic_dev->sq_desc_group) + return -ENOMEM; + + for (qid = 0; qid < sq_num; qid++) { + sq_desc = &nic_dev->sq_desc_group[qid]; + sq_stats = &sq_desc->stats; + sq_desc->qid = qid; + sq_desc->dev = nic_dev->dev_hdl; + sq_desc->netdev = nic_dev->netdev; + sq_desc->qid_mask = nic_dev->qp_res.sq_depth - 1; + sq_desc->q_depth = nic_dev->qp_res.sq_depth; + u64_stats_init(&sq_stats->stats_sync); + } + + return 0; +} + +void sss_nic_free_sq_desc_group(struct sss_nic_dev *nic_dev) +{ + kfree(nic_dev->sq_desc_group); + nic_dev->sq_desc_group = NULL; +} + +static bool sss_nic_sq_is_null(struct sss_nic_io_queue *sq) +{ + u16 sw_pi = sss_nic_get_sq_local_pi(sq); + u16 hw_ci = sss_nic_get_sq_hw_ci(sq); + + return sw_pi == hw_ci; +} + +static int sss_nic_stop_sq(struct sss_nic_dev *nic_dev, u16 qid) +{ + int ret; + unsigned long timeout; + struct sss_nic_io_queue *sq = nic_dev->sq_desc_group[qid].sq; + + timeout = msecs_to_jiffies(SSSNIC_FLUSH_SQ_TIMEOUT) + jiffies; + do { + if (sss_nic_sq_is_null(sq)) + return 0; + + usleep_range(SSSNIC_STOP_SQ_WAIT_TIME_MIN, SSSNIC_STOP_SQ_WAIT_TIME_MAX); + } while (time_before(jiffies, timeout)); + + timeout = msecs_to_jiffies(SSSNIC_FLUSH_SQ_TIMEOUT) + jiffies; + do { + if (sss_nic_sq_is_null(sq)) + return 0; + + ret = sss_nic_force_drop_tx_pkt(nic_dev); + if (ret != 0) + break; + + usleep_range(SSSNIC_STOP_SQ_WAIT_TIME_FORCE_MIN, + SSSNIC_STOP_SQ_WAIT_TIME_FORCE_MAX); + } while (time_before(jiffies, timeout)); + + if (!sss_nic_sq_is_null(sq)) + return -EFAULT; + + return 0; +} + +void sss_nic_flush_all_sq(struct sss_nic_dev *nic_dev) +{ + u16 qid = 0; + int ret = 0; + + for (qid = 0; qid < nic_dev->qp_res.qp_num; qid++) { + ret = sss_nic_stop_sq(nic_dev, qid); + if (ret != 0) + nicif_err(nic_dev, drv, nic_dev->netdev, "Fail to stop sq%u\n", qid); + } +} + diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx_init.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx_init.h new file mode 100644 index 0000000000000000000000000000000000000000..c72af131707ebe018423492cf14675dd0a1724c1 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_tx_init.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_TX_INIT_H +#define SSS_NIC_TX_INIT_H + +#include +#include +#include +#include +#include + +#include "sss_nic_io.h" +#include "sss_nic_dev_define.h" + +int sss_nic_alloc_sq_desc_group(struct sss_nic_dev *nic_dev); +void sss_nic_free_sq_desc_group(struct sss_nic_dev *nic_dev); +int sss_nic_alloc_sq_resource(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res); +void sss_nic_free_sq_resource(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res); +void sss_nic_init_all_sq(struct sss_nic_dev *nic_dev, + struct sss_nic_qp_resource *qp_res); +void sss_nic_flush_all_sq(struct sss_nic_dev *nic_dev); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_vf_cfg.c b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_vf_cfg.c new file mode 100644 index 0000000000000000000000000000000000000000..1c585ad7a15f66b6d12b4b7e86c4785d36e8332c --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_vf_cfg.c @@ -0,0 +1,603 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sss_kernel.h" +#include "sss_hw.h" +#include "sss_nic_io.h" +#include "sss_nic_cfg.h" +#include "sss_nic_vf_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_io_define.h" +#include "sss_nic_cfg_define.h" +#include "sss_nic_event.h" + +static u8 vf_link_state; +module_param(vf_link_state, byte, 0444); +MODULE_PARM_DESC(vf_link_state, + "Set vf link state, 0 - link auto, 1 - always link up, 2 - always link down. - default is 0."); + +/* In order to adapt different linux version */ +enum { + SSSNIC_IFLA_VF_LINK_STATE_AUTO, + SSSNIC_IFLA_VF_LINK_STATE_ENABLE, + SSSNIC_IFLA_VF_LINK_STATE_DISABLE, + SSSNIC_IFLA_VF_LINK_STATE_MAX +}; + +#define SSSNIC_CVLAN_INSERT_ENABLE 0x1 +#define SSSNIC_QINQ_INSERT_ENABLE 0X3 + +#define SSSNIC_GET_VLAN_TAG(vlan_id, qos) ((vlan_id) + (u16)((qos) << VLAN_PRIO_SHIFT)) + +typedef void (*sss_nic_link_vf_handler_t)(struct sss_nic_vf_info *); +typedef u8 (*sss_nic_link_state_handler_t)(struct sss_nic_io *nic_io, u16 vf_id); + +static int sss_nic_set_vlan_mode(struct sss_nic_io *nic_io, u16 func_id, + u16 vlan_tag, u16 qid, u32 vlan_mode) +{ + int ret; + u64 out_param = 0; + struct sss_nic_vlan_ctx *vlan_ctx = NULL; + struct sss_ctrl_msg_buf *msg_buf = NULL; + + msg_buf = sss_alloc_ctrlq_msg_buf(nic_io->hwdev); + if (!msg_buf) { + nic_err(nic_io->dev_hdl, "Fail to allocate send buf\n"); + return -ENOMEM; + } + + msg_buf->size = sizeof(*vlan_ctx); + vlan_ctx = (struct sss_nic_vlan_ctx *)msg_buf->buf; + vlan_ctx->sel = 0; /* TPID0 in IPSU */ + vlan_ctx->func_id = func_id; + vlan_ctx->mode = vlan_mode; + vlan_ctx->qid = qid; + vlan_ctx->tag = vlan_tag; + + sss_cpu_to_be32(vlan_ctx, sizeof(*vlan_ctx)); + + ret = sss_ctrlq_direct_reply(nic_io->hwdev, SSS_MOD_TYPE_L2NIC, + SSSNIC_CTRLQ_OPCODE_MODIFY_VLAN_CTX, msg_buf, + &out_param, 0, SSS_CHANNEL_NIC); + if (ret != 0 || out_param != 0) { + nic_err(nic_io->dev_hdl, "Fail to set vlan ctx, ret: %d, out_param: 0x%llx\n", + ret, out_param); + sss_free_ctrlq_msg_buf(nic_io->hwdev, msg_buf); + return -EFAULT; + } + + sss_free_ctrlq_msg_buf(nic_io->hwdev, msg_buf); + + return 0; +} + +int sss_nic_set_vf_vlan(struct sss_nic_io *nic_io, u8 opcode, u16 vlan_id, u8 qos, int vf_id) +{ + int ret; + u32 vlan_mode; + u16 os_id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + u16 vlan_tag = SSSNIC_GET_VLAN_TAG(vlan_id, qos); + u16 func_id = sss_get_glb_pf_vf_offset(nic_io->hwdev) + (u16)vf_id; + struct sss_nic_mbx_vf_vlan_cfg cmd_config_info = {0}; + u16 out_len = sizeof(cmd_config_info); + + if (vlan_id == 0 && opcode == SSSNIC_MBX_OPCODE_DEL) + return 0; + + cmd_config_info.vlan_id = vlan_id; + cmd_config_info.func_id = func_id; + cmd_config_info.opcode = opcode; + cmd_config_info.qos = qos; + + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, SSSNIC_MBX_OPCODE_CFG_VF_VLAN, + &cmd_config_info, sizeof(cmd_config_info), + &cmd_config_info, &out_len); + if (ret != 0 || out_len == 0 || cmd_config_info.head.state != SSS_MGMT_CMD_SUCCESS) { + nic_err(nic_io->dev_hdl, + "Fail to set VF %d vlan, ret: %d, status: 0x%x, out_len: 0x%x\n", + os_id, ret, cmd_config_info.head.state, out_len); + return -EFAULT; + } + + vlan_mode = (opcode == SSSNIC_MBX_OPCODE_ADD) ? + SSSNIC_QINQ_INSERT_ENABLE : SSSNIC_CVLAN_INSERT_ENABLE; + + ret = sss_nic_set_vlan_mode(nic_io, func_id, vlan_tag, + SSSNIC_CONFIG_ALL_QUEUE_VLAN_CTX, vlan_mode); + if (ret != 0) { + cmd_config_info.opcode = (opcode == SSSNIC_MBX_OPCODE_DEL) ? + SSSNIC_MBX_OPCODE_ADD : SSSNIC_MBX_OPCODE_DEL; + sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, SSSNIC_MBX_OPCODE_CFG_VF_VLAN, + &cmd_config_info, sizeof(cmd_config_info), + &cmd_config_info, &out_len); + nic_err(nic_io->dev_hdl, + "Fail to set VF %d vlan context, ret: %d\n", os_id, ret); + } + + return ret; +} + +int sss_nic_create_vf_vlan(struct sss_nic_io *nic_io, int vf_id, u16 vlan, u8 qos) +{ + int ret; + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + + ret = sss_nic_set_vf_vlan(nic_io, SSSNIC_MBX_OPCODE_ADD, vlan, qos, vf_id); + if (ret != 0) + return ret; + + nic_io->vf_info_group[id].pf_qos = qos; + nic_io->vf_info_group[id].pf_vlan = vlan; + + nic_info(nic_io->dev_hdl, "Add vf vlan VLAN %u, QOS 0x%x on VF %d\n", + vlan, qos, id); + + return 0; +} + +int sss_nic_destroy_vf_vlan(struct sss_nic_io *nic_io, int vf_id) +{ + int ret; + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + struct sss_nic_vf_info *vf_info_group; + + vf_info_group = nic_io->vf_info_group; + + ret = sss_nic_set_vf_vlan(nic_io, SSSNIC_MBX_OPCODE_DEL, + vf_info_group[id].pf_vlan, + vf_info_group[id].pf_qos, vf_id); + if (ret != 0) + return ret; + + nic_info(nic_io->dev_hdl, "Kill vf VLAN %u on VF %d\n", + vf_info_group[id].pf_vlan, id); + + vf_info_group[id].pf_qos = 0; + vf_info_group[id].pf_vlan = 0; + + return 0; +} + +u16 sss_nic_vf_info_vlan_prio(struct sss_nic_io *nic_io, int vf_id) +{ + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + u16 vlan_prio; + u16 pf_vlan; + u8 pf_qos; + + pf_vlan = nic_io->vf_info_group[id].pf_vlan; + pf_qos = nic_io->vf_info_group[id].pf_qos; + + vlan_prio = SSSNIC_GET_VLAN_PRIO(pf_vlan, pf_qos); + + return vlan_prio; +} + +static u8 sss_nic_ifla_vf_link_state_auto(struct sss_nic_io *nic_io, u16 id) +{ + nic_io->vf_info_group[id].link_forced = false; + nic_io->vf_info_group[id].link_up = !!nic_io->link_status; + + return nic_io->link_status; +} + +static u8 sss_nic_ifla_vf_link_state_enable(struct sss_nic_io *nic_io, u16 id) +{ + nic_io->vf_info_group[id].link_forced = true; + nic_io->vf_info_group[id].link_up = true; + + return SSSNIC_LINK_UP; +} + +static u8 sss_nic_ifla_vf_link_state_disable(struct sss_nic_io *nic_io, u16 id) +{ + nic_io->vf_info_group[id].link_forced = true; + nic_io->vf_info_group[id].link_up = false; + + return SSSNIC_LINK_DOWN; +} + +int sss_nic_set_vf_link_state(struct sss_nic_io *nic_io, u16 vf_id, int link) +{ + u8 link_status = 0; + struct sss_nic_vf_info *vf_info = NULL; + + sss_nic_link_state_handler_t handler[SSSNIC_IFLA_VF_LINK_STATE_MAX] = { + sss_nic_ifla_vf_link_state_auto, + sss_nic_ifla_vf_link_state_enable, + sss_nic_ifla_vf_link_state_disable, + }; + + if (link >= SSSNIC_IFLA_VF_LINK_STATE_MAX) + return -EINVAL; + + if (handler[link]) + link_status = handler[link](nic_io, SSSNIC_HW_VF_ID_TO_OS(vf_id)); + + /* Notify the VF of its new link state */ + vf_info = &nic_io->vf_info_group[SSSNIC_HW_VF_ID_TO_OS(vf_id)]; + if (vf_info->attach) + sss_nic_notify_vf_link_state(nic_io, vf_id, link_status); + + return 0; +} + +int sss_nic_set_vf_spoofchk(struct sss_nic_io *nic_io, u16 vf_id, bool spoofchk) +{ + int ret; + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + struct sss_nic_vf_info *vf_info = NULL; + struct sss_nic_mbx_set_spoofchk cmd_spoofchk_cfg = {0}; + u16 out_len = sizeof(cmd_spoofchk_cfg); + + cmd_spoofchk_cfg.func_id = sss_get_glb_pf_vf_offset(nic_io->hwdev) + vf_id; + cmd_spoofchk_cfg.state = !!spoofchk; + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, SSSNIC_MBX_OPCODE_SET_SPOOPCHK_STATE, + &cmd_spoofchk_cfg, + sizeof(cmd_spoofchk_cfg), &cmd_spoofchk_cfg, + &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_spoofchk_cfg)) { + nic_err(nic_io->dev_hdl, "Fail to set VF(%d) spoofchk, ret: %d, status: 0x%x, out_len: 0x%x\n", + id, ret, cmd_spoofchk_cfg.head.state, out_len); + ret = -EINVAL; + } + + vf_info = nic_io->vf_info_group; + vf_info[id].spoofchk = !!spoofchk; + + return ret; +} + +#ifdef HAVE_NDO_SET_VF_TRUST +int sss_nic_set_vf_trust(struct sss_nic_io *nic_io, u16 vf_id, bool trust) +{ + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + + if (vf_id > nic_io->max_vf_num) + return -EINVAL; + + nic_io->vf_info_group[id].trust = !!trust; + + return 0; +} + +bool sss_nic_get_vf_trust(struct sss_nic_io *nic_io, int vf_id) +{ + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + + if (vf_id > nic_io->max_vf_num) + return -EINVAL; + + return !!nic_io->vf_info_group[id].trust; +} +#endif + +int sss_nic_set_vf_tx_rate_limit(struct sss_nic_io *nic_io, u16 vf_id, u32 min_rate, u32 max_rate) +{ + int ret; + u16 id = SSSNIC_HW_VF_ID_TO_OS(vf_id); + struct sss_nic_mbx_tx_rate_cfg cmd_cfg = {0}; + u16 out_len = sizeof(cmd_cfg); + + cmd_cfg.min_rate = min_rate; + cmd_cfg.max_rate = max_rate; + cmd_cfg.func_id = sss_get_glb_pf_vf_offset(nic_io->hwdev) + vf_id; + ret = sss_nic_l2nic_msg_to_mgmt_sync(nic_io->hwdev, SSSNIC_MBX_OPCODE_SET_MAX_MIN_RATE, + &cmd_cfg, sizeof(cmd_cfg), &cmd_cfg, &out_len); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_cfg)) { + nic_err(nic_io->dev_hdl, + "Fail to set VF %d max_rate %u, min_rate %u, ret: %d, status: 0x%x, out_len: 0x%x\n", + id, max_rate, min_rate, ret, cmd_cfg.head.state, + out_len); + return -EIO; + } + + nic_io->vf_info_group[id].max_rate = max_rate; + nic_io->vf_info_group[id].min_rate = min_rate; + + return 0; +} + +void sss_nic_get_vf_attribute(struct sss_nic_io *nic_io, u16 vf_id, + struct ifla_vf_info *ifla_vf) +{ + struct sss_nic_vf_info *vf_info; + + vf_info = nic_io->vf_info_group + SSSNIC_HW_VF_ID_TO_OS(vf_id); + + ether_addr_copy(ifla_vf->mac, vf_info->user_mac); + ifla_vf->vf = SSSNIC_HW_VF_ID_TO_OS(vf_id); + ifla_vf->qos = vf_info->pf_qos; + ifla_vf->vlan = vf_info->pf_vlan; + +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + ifla_vf->spoofchk = vf_info->spoofchk; +#endif + +#ifdef HAVE_NDO_SET_VF_TRUST + ifla_vf->trusted = vf_info->trust; +#endif + +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + ifla_vf->min_tx_rate = vf_info->min_rate; + ifla_vf->max_tx_rate = vf_info->max_rate; +#else + ifla_vf->tx_rate = vf_info->max_rate; +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ + +#ifdef HAVE_NDO_SET_VF_LINK_STATE + if (!vf_info->link_forced) + ifla_vf->linkstate = IFLA_VF_LINK_STATE_AUTO; + else if (vf_info->link_up) + ifla_vf->linkstate = IFLA_VF_LINK_STATE_ENABLE; + else + ifla_vf->linkstate = IFLA_VF_LINK_STATE_DISABLE; +#endif +} + +static void sss_nic_init_link_disable_vf(struct sss_nic_vf_info *vf_info) +{ + vf_info->link_forced = true; + vf_info->link_up = false; +} + +static void sss_nic_init_link_enable_vf(struct sss_nic_vf_info *vf_info) +{ + vf_info->link_forced = true; + vf_info->link_up = true; +} + +static void sss_nic_init_link_auto_vf(struct sss_nic_vf_info *vf_info) +{ + vf_info->link_forced = false; +} + +static int sss_nic_init_vf_info(struct sss_nic_io *nic_io, u16 vf_id) +{ + u8 link_state; + struct sss_nic_vf_info *vf_info_group = nic_io->vf_info_group; + sss_nic_link_vf_handler_t handler[SSSNIC_IFLA_VF_LINK_STATE_MAX] = { + sss_nic_init_link_auto_vf, + sss_nic_init_link_enable_vf, + sss_nic_init_link_disable_vf + }; + + if (vf_link_state >= SSSNIC_IFLA_VF_LINK_STATE_MAX) { + vf_link_state = SSSNIC_IFLA_VF_LINK_STATE_AUTO; + nic_warn(nic_io->dev_hdl, "Invalid vf_link_state: %u out of range[%u - %u], adjust to %d\n", + vf_link_state, SSSNIC_IFLA_VF_LINK_STATE_AUTO, + SSSNIC_IFLA_VF_LINK_STATE_DISABLE, SSSNIC_IFLA_VF_LINK_STATE_AUTO); + } + + link_state = vf_link_state; + if (link_state < SSSNIC_IFLA_VF_LINK_STATE_MAX) { + handler[link_state](&vf_info_group[vf_id]); + } else { + nic_err(nic_io->dev_hdl, "Fail to input vf_link_state: %u\n", + link_state); + return -EINVAL; + } + + return 0; +} + +static int sss_nic_register_vf_to_hw(struct sss_nic_io *nic_io) +{ + u16 out_len; + int ret; + struct sss_nic_mbx_attach_vf cmd_register_info = {0}; + + cmd_register_info.op_register = 1; + out_len = sizeof(cmd_register_info); + ret = sss_mbx_send_to_pf(nic_io->hwdev, SSS_MOD_TYPE_L2NIC, + SSSNIC_MBX_OPCODE_VF_REGISTER, + &cmd_register_info, sizeof(cmd_register_info), + &cmd_register_info, &out_len, 0, + SSS_CHANNEL_NIC); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_register_info)) { + nic_err(nic_io->dev_hdl, "Fail to register VF, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_register_info.head.state, out_len); + return -EIO; + } + + return 0; +} + +static void sss_nic_unregister_vf_to_hw(struct sss_nic_io *nic_io) +{ + int ret; + struct sss_nic_mbx_attach_vf cmd_register_info = {0}; + u16 out_len = sizeof(cmd_register_info); + + cmd_register_info.op_register = 0; + + ret = sss_mbx_send_to_pf(nic_io->hwdev, SSS_MOD_TYPE_L2NIC, SSSNIC_MBX_OPCODE_VF_REGISTER, + &cmd_register_info, sizeof(cmd_register_info), &cmd_register_info, + &out_len, 0, SSS_CHANNEL_NIC); + if (SSS_ASSERT_SEND_MSG_RETURN(ret, out_len, &cmd_register_info)) + nic_err(nic_io->dev_hdl, + "Fail to unregister VF, ret: %d, status: 0x%x, out_len: 0x%x\n", + ret, cmd_register_info.head.state, out_len); +} + +static void sss_nic_vf_unregister(struct sss_nic_io *nic_io) +{ + sss_nic_unregister_vf_to_hw(nic_io); + sss_unregister_vf_mbx_handler(nic_io->hwdev, SSS_MOD_TYPE_SSSLINK); + sss_unregister_vf_mbx_handler(nic_io->hwdev, SSS_MOD_TYPE_L2NIC); +} + +static int sss_nic_vf_register(struct sss_nic_io *nic_io) +{ + int ret; + + ret = sss_register_vf_mbx_handler(nic_io->hwdev, SSS_MOD_TYPE_L2NIC, + nic_io->hwdev, sss_nic_vf_event_handler); + if (ret != 0) + return ret; + + ret = sss_register_vf_mbx_handler(nic_io->hwdev, SSS_MOD_TYPE_SSSLINK, + nic_io->hwdev, sss_nic_vf_mag_event_handler); + if (ret != 0) + goto reg_cb_error; + + ret = sss_nic_register_vf_to_hw(nic_io); + if (ret != 0) + goto register_vf_error; + + return 0; + +register_vf_error: + sss_unregister_vf_mbx_handler(nic_io->hwdev, SSS_MOD_TYPE_SSSLINK); + +reg_cb_error: + sss_unregister_vf_mbx_handler(nic_io->hwdev, SSS_MOD_TYPE_L2NIC); + + return ret; +} + +void sss_nic_deinit_pf_vf_info(struct sss_nic_io *nic_io) +{ + if (sss_get_func_type(nic_io->hwdev) == SSS_FUNC_TYPE_VF) + return; + kfree(nic_io->vf_info_group); + nic_io->vf_info_group = NULL; +} + +int sss_nic_init_pf_vf_info(struct sss_nic_io *nic_io) +{ + u16 i; + int ret; + u32 len; + + if (sss_get_func_type(nic_io->hwdev) == SSS_FUNC_TYPE_VF) + return 0; + + nic_io->max_vf_num = sss_get_max_vf_num(nic_io->hwdev); + if (nic_io->max_vf_num == 0) + return 0; + + len = sizeof(*nic_io->vf_info_group) * nic_io->max_vf_num; + nic_io->vf_info_group = kzalloc(len, GFP_KERNEL); + if (!nic_io->vf_info_group) + return -ENOMEM; + + for (i = 0; i < nic_io->max_vf_num; i++) { + ret = sss_nic_init_vf_info(nic_io, i); + if (ret != 0) + goto init_vf_info_error; + } + + return 0; + +init_vf_info_error: + kfree(nic_io->vf_info_group); + nic_io->vf_info_group = NULL; + + return ret; +} + +int sss_nic_register_io_callback(struct sss_nic_io *nic_io) +{ + int ret; + + if (sss_get_func_type(nic_io->hwdev) == SSS_FUNC_TYPE_VF) + return sss_nic_vf_register(nic_io); + + ret = sss_register_mgmt_msg_handler(nic_io->hwdev, SSS_MOD_TYPE_L2NIC, + nic_io->hwdev, sss_nic_pf_event_handler); + if (ret != 0) + return ret; + + ret = sss_register_mgmt_msg_handler(nic_io->hwdev, SSS_MOD_TYPE_SSSLINK, + nic_io->hwdev, sss_nic_pf_mag_event_handler); + if (ret != 0) + goto register_pf_mag_event_handler; + + ret = sss_register_pf_mbx_handler(nic_io->hwdev, SSS_MOD_TYPE_L2NIC, + nic_io->hwdev, sss_nic_pf_mbx_handler); + if (ret != 0) + goto register_pf_mbx_cb_error; + + ret = sss_register_pf_mbx_handler(nic_io->hwdev, SSS_MOD_TYPE_SSSLINK, + nic_io->hwdev, sss_nic_pf_mag_mbx_handler); + if (ret != 0) + goto register_pf_mag_mbx_cb_error; + + return 0; + +register_pf_mag_mbx_cb_error: + sss_unregister_pf_mbx_handler(nic_io->hwdev, SSS_MOD_TYPE_L2NIC); + +register_pf_mbx_cb_error: + sss_unregister_mgmt_msg_handler(nic_io->hwdev, SSS_MOD_TYPE_SSSLINK); + +register_pf_mag_event_handler: + sss_unregister_mgmt_msg_handler(nic_io->hwdev, SSS_MOD_TYPE_L2NIC); + + return ret; +} + +void sss_nic_unregister_io_callback(struct sss_nic_io *nic_io) +{ + if (sss_get_func_type(nic_io->hwdev) == SSS_FUNC_TYPE_VF) { + sss_nic_vf_unregister(nic_io); + } else { + if (nic_io->vf_info_group) { + sss_unregister_pf_mbx_handler(nic_io->hwdev, SSS_MOD_TYPE_SSSLINK); + sss_unregister_pf_mbx_handler(nic_io->hwdev, SSS_MOD_TYPE_L2NIC); + } + sss_unregister_mgmt_msg_handler(nic_io->hwdev, SSS_MOD_TYPE_SSSLINK); + sss_unregister_mgmt_msg_handler(nic_io->hwdev, SSS_MOD_TYPE_L2NIC); + } +} + +static void sss_nic_clear_vf_info(struct sss_nic_io *nic_io, u16 vf_id) +{ + u16 func_id; + struct sss_nic_vf_info *vf_info; + + func_id = sss_get_glb_pf_vf_offset(nic_io->hwdev) + vf_id; + vf_info = nic_io->vf_info_group + SSSNIC_HW_VF_ID_TO_OS(vf_id); + if (vf_info->specified_mac) + sss_nic_del_mac(nic_io->nic_dev, vf_info->drv_mac, + vf_info->pf_vlan, func_id, SSS_CHANNEL_NIC); + + if (sss_nic_vf_info_vlan_prio(nic_io, vf_id)) + sss_nic_destroy_vf_vlan(nic_io, vf_id); + + if (vf_info->max_rate && SSSNIC_SUPPORT_RATE_LIMIT(nic_io)) + sss_nic_set_vf_tx_rate_limit(nic_io, vf_id, 0, 0); + + if (vf_info->spoofchk) + sss_nic_set_vf_spoofchk(nic_io, vf_id, false); + +#ifdef HAVE_NDO_SET_VF_TRUST + if (vf_info->trust) + sss_nic_set_vf_trust(nic_io, vf_id, false); +#endif + + memset(vf_info, 0, sizeof(*vf_info)); + sss_nic_init_vf_info(nic_io, SSSNIC_HW_VF_ID_TO_OS(vf_id)); +} + +void sss_nic_clear_all_vf_info(struct sss_nic_io *nic_io) +{ + u16 i; + + for (i = 0; i < nic_io->max_vf_num; i++) + sss_nic_clear_vf_info(nic_io, SSSNIC_OS_VF_ID_TO_HW(i)); +} diff --git a/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_vf_cfg.h b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_vf_cfg.h new file mode 100644 index 0000000000000000000000000000000000000000..4256e118558e1289c96041aa218693e7d745a952 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/sss_nic_vf_cfg.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_NIC_VF_CFG_H +#define SSS_NIC_VF_CFG_H + +#include "sss_nic_cfg_vf_define.h" +#include "sss_nic_io_define.h" + +#define SSSNIC_GET_VF_SPOOFCHK(nic_io, vf_id) \ + (!!(nic_io)->vf_info_group[vf_id].spoofchk) + +int sss_nic_set_vf_spoofchk(struct sss_nic_io *nic_io, u16 vf_id, bool spoofchk); + +int sss_nic_create_vf_vlan(struct sss_nic_io *nic_io, int vf_id, u16 vlan, u8 qos); + +int sss_nic_destroy_vf_vlan(struct sss_nic_io *nic_io, int vf_id); + +u16 sss_nic_vf_info_vlan_prio(struct sss_nic_io *nic_io, int vf_id); + +int sss_nic_set_vf_tx_rate_limit(struct sss_nic_io *nic_io, u16 vf_id, u32 min_rate, u32 max_rate); + +void sss_nic_get_vf_attribute(struct sss_nic_io *nic_io, u16 vf_id, + struct ifla_vf_info *ifla_vf); + +int sss_nic_set_vf_link_state(struct sss_nic_io *nic_io, u16 vf_id, int link); + +void sss_nic_clear_all_vf_info(struct sss_nic_io *nic_io); + +#ifdef HAVE_NDO_SET_VF_TRUST +bool sss_nic_get_vf_trust(struct sss_nic_io *nic_io, int vf_id); +int sss_nic_set_vf_trust(struct sss_nic_io *nic_io, u16 vf_id, bool trust); +#endif + +int sss_nic_set_vf_vlan(struct sss_nic_io *nic_io, u8 opcode, u16 vid, + u8 qos, int vf_id); + +int sss_nic_register_io_callback(struct sss_nic_io *nic_io); + +void sss_nic_unregister_io_callback(struct sss_nic_io *nic_io); + +int sss_nic_init_pf_vf_info(struct sss_nic_io *nic_io); + +void sss_nic_deinit_pf_vf_info(struct sss_nic_io *nic_io); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic.h b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic.h new file mode 100644 index 0000000000000000000000000000000000000000..b91ecd8e7e3a1632e1b868d15fe8d3e56fb479b9 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_TOOL_NIC_H +#define SSS_TOOL_NIC_H + +#define SSS_TOOL_DCB_OPCODE_WR BIT(0) /* 1 - write, 0 - read */ + +#define SSS_TOOL_MSG_QOS_DEV_TRUST BIT(0) +#define SSS_TOOL_MSG_QOS_DEV_DFT_COS BIT(1) +#define SSS_TOOL_MSG_QOS_DEV_PCP2COS BIT(2) +#define SSS_TOOL_MSG_QOS_DEV_DSCP2COS BIT(3) + +struct sss_tool_loop_mode { + u32 loop_mode; + u32 loop_ctrl; +}; + +struct sss_tool_wqe_info { + int q_id; + void *slq_handle; + unsigned int wqe_id; +}; + +struct sss_tool_hw_page { + u64 phy_addr; + u64 *map_addr; +}; + +struct sss_tool_sq_info { + u16 q_id; + u16 pi; + u16 ci; /* sw_ci */ + u16 fi; /* hw_ci */ + u32 q_depth; + u16 pi_reverse; /* TODO: what is this? */ + u16 wqebb_size; + u8 priority; + u16 *ci_addr; + u64 cla_addr; + void *slq_handle; + /* TODO: NIC don't use direct wqe */ + struct sss_tool_hw_page direct_wqe; + struct sss_tool_hw_page doorbell; + u32 page_idx; + u32 glb_sq_id; +}; + +struct sss_tool_rq_info { + u16 q_id; + u16 delta; + u16 hw_pi; + u16 ci; /* sw_ci */ + u16 sw_pi; + u16 wqebb_size; + u16 q_depth; + u16 buf_len; + + void *slq_handle; + u64 ci_wqe_page_addr; + u64 ci_cla_tbl_addr; + + u8 coalesc_timer_cfg; + u8 pending_limt; + u16 msix_idx; + u32 msix_vector; +}; + +struct sss_tool_msg_head { + u8 status; + u8 rsvd1[3]; +}; + +struct sss_tool_dcb_state { + struct sss_tool_msg_head head; + + u16 op_code; /* 0 - get dcb state, 1 - set dcb state */ + u8 state; /* 0 - disable, 1 - enable dcb */ + u8 rsvd; +}; + +struct sss_tool_qos_dev_cfg { + struct sss_tool_msg_head head; + + u8 op_code; /* 0:get 1: set */ + u8 rsvd0; + u16 cfg_bitmap; /* bit0 - trust, bit1 - dft_cos, bit2 - pcp2cos, bit3 - dscp2cos */ + + u8 trust; /* 0 - pcp, 1 - dscp */ + u8 dft_cos; + u16 rsvd1; + u8 pcp2cos[8]; /* 必须8个一起配置 */ + + /* 配置dscp2cos时,若cos值设置为0xFF*/ + /*驱动则忽略此dscp优先级的配置*/ + /*允许一次性配置多个dscp跟cos的映射关系 */ + u8 dscp2cos[64]; + u32 rsvd2[4]; +}; + +struct sss_tool_qos_cos_cfg { + struct sss_tool_msg_head head; + + u8 port_id; + u8 func_cos_bitmap; + u8 port_cos_bitmap; + u8 func_max_cos_num; + u32 rsvd2[4]; +}; + +#endif /* SSS_TOOL_NIC_H */ + diff --git a/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_dcb.c b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_dcb.c new file mode 100644 index 0000000000000000000000000000000000000000..a49d6bc8b3ba2ab2fa7b3c12f70b16db86e9453a --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_dcb.c @@ -0,0 +1,458 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [TOOL]" fmt + +#include "sss_nic_cfg.h" +#include "sss_nic_dcb.h" +#include "sss_tool_comm.h" +#include "sss_tool_nic.h" +#include "sss_nic_rx_init.h" +#include "sss_nic_netdev_ops_api.h" + +#define SSS_TOOL_DBG_DFLT_DSCP_VAL 0xFF + +static int sss_tool_update_pcp_cfg(struct sss_nic_dev *nic_dev, + const struct sss_tool_qos_dev_cfg *qos_cfg) +{ + u8 valid_cos_bitmap = 0; + u8 cos_num = 0; + int i; + + if (!(qos_cfg->cfg_bitmap & SSS_TOOL_MSG_QOS_DEV_PCP2COS)) + return 0; + + for (i = 0; i < SSSNIC_DCB_UP_MAX; i++) { + if (!(nic_dev->dft_func_cos_bitmap & BIT(qos_cfg->pcp2cos[i]))) { + tool_err("Invalid pcp cos:%u, func cos valid map is %u", + qos_cfg->pcp2cos[i], nic_dev->dft_func_cos_bitmap); + return -EINVAL; + } + + if ((BIT(qos_cfg->pcp2cos[i]) & valid_cos_bitmap) == 0) { + cos_num++; + valid_cos_bitmap |= (u8)BIT(qos_cfg->pcp2cos[i]); + } + } + + nic_dev->backup_dcb_cfg.pcp_valid_cos_map = valid_cos_bitmap; + nic_dev->backup_dcb_cfg.pcp_user_cos_num = cos_num; + memcpy(nic_dev->backup_dcb_cfg.pcp2cos, qos_cfg->pcp2cos, sizeof(qos_cfg->pcp2cos)); + + return 0; +} + +static int sss_tool_update_dscp_cfg(struct sss_nic_dev *nic_dev, + const struct sss_tool_qos_dev_cfg *qos_cfg) +{ + u8 valid_cos_bitmap = 0; + u8 cos_num = 0; + u8 cos; + int i; + + if (!(qos_cfg->cfg_bitmap & SSS_TOOL_MSG_QOS_DEV_DSCP2COS)) + return 0; + + for (i = 0; i < SSSNIC_DCB_IP_PRI_MAX; i++) { + if (qos_cfg->dscp2cos[i] != SSS_TOOL_DBG_DFLT_DSCP_VAL) + cos = qos_cfg->dscp2cos[i]; + else + cos = nic_dev->backup_dcb_cfg.dscp2cos[i]; + + if (cos >= SSSNIC_DCB_UP_MAX || !(nic_dev->dft_func_cos_bitmap & BIT(cos))) { + tool_err("Invalid dscp cos:%u, func cos valid map is %u", + cos, nic_dev->dft_func_cos_bitmap); + return -EINVAL; + } + + if ((BIT(cos) & valid_cos_bitmap) == 0) { + cos_num++; + valid_cos_bitmap |= (u8)BIT(cos); + } + } + + for (i = 0; i < SSSNIC_DCB_IP_PRI_MAX; i++) { + if (qos_cfg->dscp2cos[i] != SSS_TOOL_DBG_DFLT_DSCP_VAL) + nic_dev->backup_dcb_cfg.dscp2cos[i] = qos_cfg->dscp2cos[i]; + else + nic_dev->backup_dcb_cfg.dscp2cos[i] = nic_dev->hw_dcb_cfg.dscp2cos[i]; + } + + nic_dev->backup_dcb_cfg.dscp_valid_cos_map = valid_cos_bitmap; + nic_dev->backup_dcb_cfg.dscp_user_cos_num = cos_num; + + return 0; +} + +static int sss_tool_update_pcp_dscp_cfg(struct sss_nic_dev *nic_dev, + const struct sss_tool_qos_dev_cfg *qos_cfg) +{ + int ret; + + ret = sss_tool_update_pcp_cfg(nic_dev, qos_cfg); + if (ret != 0) { + tool_err("Fail to update pcp cfg\n"); + return ret; + } + + ret = sss_tool_update_dscp_cfg(nic_dev, qos_cfg); + if (ret != 0) + tool_err("Fail to update dscp cfg\n"); + + return ret; +} + +static int sss_tool_update_wanted_qos_cfg(struct sss_nic_dev *nic_dev, + const void *in_buf) +{ + const struct sss_tool_qos_dev_cfg *qos_cfg = in_buf; + u8 valid_cos_bitmap; + u8 cos_num; + int ret; + + if (qos_cfg->cfg_bitmap & SSS_TOOL_MSG_QOS_DEV_TRUST) { + if (qos_cfg->trust > DCB_DSCP) { + tool_err("Invalid trust:%u of qos cfg\n", qos_cfg->trust); + return -EINVAL; + } + + nic_dev->backup_dcb_cfg.trust = qos_cfg->trust; + } + + if (qos_cfg->cfg_bitmap & SSS_TOOL_MSG_QOS_DEV_DFT_COS) { + if (!(BIT(qos_cfg->dft_cos) & nic_dev->dft_func_cos_bitmap)) { + tool_err("Invalid default cos:%u of qos cfg\n", qos_cfg->dft_cos); + return -EINVAL; + } + + nic_dev->backup_dcb_cfg.default_cos = qos_cfg->dft_cos; + } + + ret = sss_tool_update_pcp_dscp_cfg(nic_dev, qos_cfg); + if (ret != 0) + return ret; + + if (nic_dev->backup_dcb_cfg.trust != DCB_PCP) { + valid_cos_bitmap = nic_dev->backup_dcb_cfg.dscp_valid_cos_map; + cos_num = nic_dev->backup_dcb_cfg.dscp_user_cos_num; + } else { + valid_cos_bitmap = nic_dev->backup_dcb_cfg.pcp_valid_cos_map; + cos_num = nic_dev->backup_dcb_cfg.pcp_user_cos_num; + } + + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE)) { + if (cos_num > nic_dev->qp_res.qp_num) { + tool_err("Invalid cos num, DCB is on, cos num:%d need less than channel num:%u\n", + cos_num, nic_dev->qp_res.qp_num); + return -EOPNOTSUPP; + } + } + + if (!(BIT(nic_dev->backup_dcb_cfg.default_cos) & valid_cos_bitmap)) { + tool_info("Success to update cos %u to %u\n", + nic_dev->backup_dcb_cfg.default_cos, (u8)fls(valid_cos_bitmap) - 1); + nic_dev->backup_dcb_cfg.default_cos = (u8)fls(valid_cos_bitmap) - 1; + } + + return 0; +} + +static int sss_tool_set_tx_cos_state(struct sss_nic_dev *nic_dev, u8 dcb_en) +{ + int ret; + u8 i; + struct sss_nic_dcb_info dcb_info = {0}; + struct sss_nic_dcb_config *dcb_cfg = &nic_dev->hw_dcb_cfg; + + dcb_info.trust = dcb_cfg->trust; + dcb_info.default_cos = dcb_cfg->default_cos; + dcb_info.dcb_on = dcb_en; + + if (!dcb_en) { + memset(dcb_info.dscp2cos, dcb_cfg->default_cos, sizeof(dcb_info.dscp2cos)); + memset(dcb_info.pcp2cos, dcb_cfg->default_cos, sizeof(dcb_info.pcp2cos)); + + } else { + for (i = 0; i < SSSNIC_DCB_IP_PRI_MAX; i++) + dcb_info.dscp2cos[i] = dcb_cfg->dscp2cos[i]; + for (i = 0; i < SSSNIC_DCB_COS_MAX; i++) + dcb_info.pcp2cos[i] = dcb_cfg->pcp2cos[i]; + } + + ret = sss_nic_set_dcb_info(nic_dev->nic_io, &dcb_info); + if (ret != 0) + tool_err("Fail to set dcb state\n"); + + return ret; +} + +static int sss_tool_configure_dcb_hw(struct sss_nic_dev *nic_dev, u8 dcb_en) +{ + int ret; + u8 user_cos_num = sss_nic_get_user_cos_num(nic_dev); + + ret = sss_nic_set_hw_dcb_state(nic_dev, 1, dcb_en); + if (ret != 0) { + tool_err("Fail to set dcb state\n"); + return ret; + } + + sss_nic_update_qp_cos_map(nic_dev, user_cos_num); + sss_nic_update_sq_cos(nic_dev, dcb_en); + + if (SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) { + /* VF does not support DCB, use the default cos */ + nic_dev->hw_dcb_cfg.default_cos = (u8)fls(nic_dev->dft_func_cos_bitmap) - 1; + + return 0; + } + + ret = sss_tool_set_tx_cos_state(nic_dev, dcb_en); + if (ret != 0) { + tool_err("Fail to set tx cos state\n"); + goto set_tx_cos_fail; + } + + ret = sss_nic_update_rx_rss(nic_dev); + if (ret != 0) { + tool_err("Fail to configure rx\n"); + goto update_rx_rss_fail; + } + + if (!dcb_en) + SSSNIC_CLEAR_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE); + else + SSSNIC_SET_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE); + + return 0; +update_rx_rss_fail: + sss_tool_set_tx_cos_state(nic_dev, dcb_en ? 0 : 1); + +set_tx_cos_fail: + sss_nic_update_sq_cos(nic_dev, dcb_en ? 0 : 1); + sss_nic_set_hw_dcb_state(nic_dev->hwdev, 1, dcb_en ? 0 : 1); + + return ret; +} + +static int sss_tool_setup_cos(struct net_device *netdev, u8 cos) +{ + struct sss_nic_dev *nic_dev = netdev_priv(netdev); + + if (cos > nic_dev->max_cos_num) { + tool_err("Invalid num_tc: %u more then max cos: %u\n", cos, nic_dev->max_cos_num); + return -EINVAL; + } + + if (cos && SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_SAME_RXTX)) { + tool_err("Fail to enable DCB while Symmetric RSS is enabled\n"); + return -EOPNOTSUPP; + } + + return sss_tool_configure_dcb_hw(nic_dev, cos ? 1 : 0); +} + +static void sss_tool_change_qos_cfg(struct sss_nic_dev *nic_dev, + const struct sss_nic_dcb_config *dcb_cfg) +{ + u8 user_cos_num = sss_nic_get_user_cos_num(nic_dev); + + sss_nic_sync_dcb_cfg(nic_dev, dcb_cfg); + sss_nic_update_qp_cos_map(nic_dev, user_cos_num); + + clear_bit(SSSNIC_DCB_UP_COS_SETTING, &nic_dev->dcb_flags); +} + +static int sss_tool_dcbcfg_set_up_bitmap(struct sss_nic_dev *nic_dev) +{ + int ret; + u8 user_cos_num = sss_nic_get_user_cos_num(nic_dev); + struct sss_nic_dcb_config old_dcb_cfg; + bool netif_run = false; + + memcpy(&old_dcb_cfg, &nic_dev->hw_dcb_cfg, sizeof(struct sss_nic_dcb_config)); + + if (!memcmp(&nic_dev->backup_dcb_cfg, &old_dcb_cfg, sizeof(struct sss_nic_dcb_config))) { + tool_info("Valid up bitmap is the same, nothing has to change\n"); + return 0; + } + + rtnl_lock(); + if (netif_running(nic_dev->netdev)) { + sss_nic_vport_down(nic_dev); + netif_run = true; + } + + if (test_and_set_bit(SSSNIC_DCB_UP_COS_SETTING, &nic_dev->dcb_flags)) { + tool_warn("Cos up map setup in inprocess, please try again later\n"); + ret = -EFAULT; + goto set_qos_cfg_fail; + } + + sss_tool_change_qos_cfg(nic_dev, &nic_dev->backup_dcb_cfg); + + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE)) { + ret = sss_tool_setup_cos(nic_dev->netdev, user_cos_num); + if (ret != 0) + goto setup_cos_fail; + } + + if (netif_run) { + ret = sss_nic_vport_up(nic_dev); + if (ret != 0) + goto vport_up_fail; + } + + rtnl_unlock(); + + return 0; + +vport_up_fail: + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE)) + sss_tool_setup_cos(nic_dev->netdev, user_cos_num ? 0 : user_cos_num); + +setup_cos_fail: + sss_tool_change_qos_cfg(nic_dev, &old_dcb_cfg); + +set_qos_cfg_fail: + if (netif_run) + sss_nic_vport_up(nic_dev); + + rtnl_unlock(); + + return ret; +} + +int sss_tool_dcb_mt_qos_map(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + int ret; + u8 i; + struct sss_tool_qos_dev_cfg *qos_out = out_buf; + + if (!out_buf || !out_len || !in_buf) { + tool_err("Invalid param, use null pointer\n"); + return -EFAULT; + } + + if (in_len != sizeof(*qos_out) || *out_len != sizeof(*qos_out)) { + tool_err("Invalid in len: %u or outlen: %u is not equal to %lu\n", + in_len, *out_len, sizeof(*qos_out)); + return -EINVAL; + } + + memcpy(qos_out, in_buf, sizeof(*qos_out)); + qos_out->head.status = 0; + if (qos_out->op_code & SSS_TOOL_DCB_OPCODE_WR) { + memcpy(&nic_dev->backup_dcb_cfg, &nic_dev->hw_dcb_cfg, + sizeof(struct sss_nic_dcb_config)); + ret = sss_tool_update_wanted_qos_cfg(nic_dev, in_buf); + if (ret != 0) { + qos_out->head.status = SSS_TOOL_EINVAL; + return 0; + } + + ret = sss_tool_dcbcfg_set_up_bitmap(nic_dev); + if (ret != 0) + qos_out->head.status = SSS_TOOL_EIO; + } else { + for (i = 0; i < SSSNIC_DCB_IP_PRI_MAX; i++) + qos_out->dscp2cos[i] = nic_dev->hw_dcb_cfg.dscp2cos[i]; + for (i = 0; i < SSSNIC_DCB_UP_MAX; i++) + qos_out->pcp2cos[i] = nic_dev->hw_dcb_cfg.pcp2cos[i]; + qos_out->trust = nic_dev->hw_dcb_cfg.trust; + qos_out->dft_cos = nic_dev->hw_dcb_cfg.default_cos; + } + + return 0; +} + +int sss_tool_dcb_mt_dcb_state(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + int ret; + u8 user_cos_num = sss_nic_get_user_cos_num(nic_dev); + struct sss_tool_dcb_state *dcb_out = out_buf; + const struct sss_tool_dcb_state *dcb_in = in_buf; + + if (!in_buf || !out_buf || !out_len) { + tool_err("Invalid param, use null pointer\n"); + return -EFAULT; + } + + if (in_len != sizeof(*dcb_in) || *out_len != sizeof(*dcb_out)) { + tool_err("Invalid in len: %u or out len: %u is not equal to %lu\n", + in_len, *out_len, sizeof(*dcb_in)); + return -EINVAL; + } + + memcpy(dcb_out, dcb_in, sizeof(*dcb_in)); + dcb_out->head.status = 0; + + if (!(dcb_in->op_code & SSS_TOOL_DCB_OPCODE_WR)) { + dcb_out->state = !!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE); + return 0; + } + + if (SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_DCB_ENABLE) == dcb_in->state) + return 0; + + if (dcb_in->state && user_cos_num > nic_dev->qp_res.qp_num) { + tool_err("Fail to mt dcb state, cos num %u larger than channel num %u\n", + user_cos_num, nic_dev->qp_res.qp_num); + return -EOPNOTSUPP; + } + + rtnl_lock(); + if (netif_running(nic_dev->netdev)) { + sss_nic_vport_down(nic_dev); + ret = sss_tool_setup_cos(nic_dev->netdev, dcb_in->state ? user_cos_num : 0); + if (ret != 0) { + sss_nic_vport_up(nic_dev); + rtnl_unlock(); + return ret; + } + + ret = sss_nic_vport_up(nic_dev); + if (ret != 0) { + sss_tool_setup_cos(nic_dev->netdev, dcb_in->state ? 0 : user_cos_num); + sss_nic_vport_up(nic_dev); + } + + rtnl_unlock(); + return ret; + } + + ret = sss_tool_setup_cos(nic_dev->netdev, dcb_in->state ? user_cos_num : 0); + rtnl_unlock(); + + return ret; +} + +int sss_tool_dcb_mt_hw_qos_get(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + struct sss_tool_qos_cos_cfg *out_cfg = out_buf; + const struct sss_tool_qos_cos_cfg *in_cfg = in_buf; + + if (!in_buf || !out_buf || !out_len) { + tool_err("Invalid param, use null pointer\n"); + return -EFAULT; + } + + if (in_len != sizeof(*in_cfg) || *out_len != sizeof(*out_cfg)) { + tool_err("Invalid in len: %u or out len: %u is not equal to %lu\n", + in_len, *out_len, sizeof(*in_cfg)); + return -EINVAL; + } + + memcpy(out_cfg, in_cfg, sizeof(*in_cfg)); + out_cfg->func_max_cos_num = nic_dev->max_cos_num; + out_cfg->head.status = 0; + out_cfg->port_cos_bitmap = (u8)nic_dev->dft_port_cos_bitmap; + out_cfg->func_cos_bitmap = (u8)nic_dev->dft_func_cos_bitmap; + out_cfg->port_id = sss_get_phy_port_id(nic_dev->hwdev); + + return 0; +} + diff --git a/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_dcb.h b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_dcb.h new file mode 100644 index 0000000000000000000000000000000000000000..c6f6f3c34925f6d3d18a78e8681d56fa92e5813c --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_dcb.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_TOOL_NIC_DCB_H +#define SSS_TOOL_NIC_DCB_H + +int sss_tool_dcb_mt_qos_map(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_dcb_mt_dcb_state(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_dcb_mt_hw_qos_get(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +#endif + diff --git a/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_func.c b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_func.c new file mode 100644 index 0000000000000000000000000000000000000000..d9a6210b1608a10ceef21340e96aa651c2363258 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_func.c @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [TOOL]" fmt + +#include "sss_nic_mag_cfg.h" +#include "sss_tool_comm.h" +#include "sss_tool_nic.h" +#include "sss_tool_nic_dcb.h" +#include "sss_tool_nic_qp_info.h" +#include "sss_tool_nic_phy_attr.h" +#include "sss_tool_nic_stats.h" + +typedef int (*sss_tool_cmd_func)(struct sss_nic_dev *nic_dev, + const void *in_buf, u32 in_len, + void *out_buf, u32 *out_len); + +struct sss_tool_cmd_handle { + enum sss_tool_driver_cmd_type cmd_type; + sss_tool_cmd_func func; +}; + +static int sss_tool_get_nic_version(void *out_buf, const u32 *out_len) +{ + struct sss_tool_drv_version_info *ver_info = out_buf; + int ret; + + if (!out_buf || !out_len) { + tool_err("Invalid param, use null pointer.\n"); + return -EINVAL; + } + + if (*out_len != sizeof(*ver_info)) { + tool_err("Invalid out len :%u is not equal to %lu\n", + *out_len, sizeof(*ver_info)); + return -EINVAL; + } + + ret = snprintf(ver_info->ver, sizeof(ver_info->ver), "%s %s", + SSSNIC_DRV_VERSION, __TIME_STR__); + if (ret < 0) + return -EINVAL; + + return 0; +} + +static const struct sss_tool_cmd_handle sss_tool_nic_cmd_handle[] = { + {SSS_TOOL_GET_TX_INFO, sss_tool_get_tx_info}, + {SSS_TOOL_GET_RX_INFO, sss_tool_get_rx_info}, + {SSS_TOOL_GET_TX_WQE_INFO, sss_tool_get_tx_wqe_info}, + {SSS_TOOL_GET_RX_WQE_INFO, sss_tool_get_rx_wqe_info}, + {SSS_TOOL_GET_Q_NUM, sss_tool_get_q_num}, + {SSS_TOOL_GET_RX_CQE_INFO, sss_tool_get_rx_cqe_info}, + {SSS_TOOL_GET_INTER_NUM, sss_tool_get_inter_num}, + {SSS_TOOL_SET_PF_BW_LIMIT, sss_tool_set_pf_bw_limit}, + {SSS_TOOL_GET_PF_BW_LIMIT, sss_tool_get_pf_bw_limit}, + {SSS_TOOL_GET_LOOPBACK_MODE, sss_tool_get_loopback_mode}, + {SSS_TOOL_SET_LOOPBACK_MODE, sss_tool_set_loopback_mode}, + {SSS_TOOL_GET_TX_TIMEOUT, sss_tool_get_netdev_tx_timeout}, + {SSS_TOOL_SET_TX_TIMEOUT, sss_tool_set_netdev_tx_timeout}, + {SSS_TOOL_GET_SSET_COUNT, sss_tool_get_sset_count}, + {SSS_TOOL_GET_SSET_ITEMS, sss_tool_get_sset_stats}, + {SSS_TOOL_GET_XSFP_PRESENT, sss_tool_get_xsfp_present}, + {SSS_TOOL_GET_XSFP_INFO, sss_tool_get_xsfp_info}, + {SSS_TOOL_GET_ULD_DEV_NAME, sss_tool_get_netdev_name}, + {SSS_TOOL_CLEAR_FUNC_STATS, sss_tool_clear_func_stats}, + {SSS_TOOL_SET_LINK_MODE, sss_tool_set_link_mode}, + {SSS_TOOL_DCB_STATE, sss_tool_dcb_mt_dcb_state}, + {SSS_TOOL_QOS_DEV, sss_tool_dcb_mt_qos_map}, + {SSS_TOOL_GET_QOS_COS, sss_tool_dcb_mt_hw_qos_get}, +}; + +static int sss_tool_cmd_to_nic_driver(struct sss_nic_dev *nic_dev, + u32 cmd, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + int idx; + int cmd_num = ARRAY_LEN(sss_tool_nic_cmd_handle); + enum sss_tool_driver_cmd_type cmd_type = (enum sss_tool_driver_cmd_type)cmd; + int ret = -EINVAL; + + mutex_lock(&nic_dev->qp_mutex); + for (idx = 0; idx < cmd_num; idx++) { + if (cmd_type == sss_tool_nic_cmd_handle[idx].cmd_type) { + ret = sss_tool_nic_cmd_handle[idx].func + (nic_dev, in_buf, in_len, out_buf, out_len); + break; + } + } + mutex_unlock(&nic_dev->qp_mutex); + + if (idx == cmd_num) + tool_err("Fail to send to nic driver, cmd %d is not exist\n", cmd_type); + + return ret; +} + +int sss_tool_ioctl(void *uld_dev, u32 cmd, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + if (cmd == SSS_TOOL_GET_DRV_VERSION) + return sss_tool_get_nic_version(out_buf, out_len); + + if (!uld_dev) + return -EINVAL; + + return sss_tool_cmd_to_nic_driver(uld_dev, cmd, in_buf, in_len, out_buf, out_len); +} + diff --git a/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_func.h b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_func.h new file mode 100644 index 0000000000000000000000000000000000000000..64bbd9c3a40c25fcc7e0f0078f67b2572432173e --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_func.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_TOOL_NIC_FUNC_H +#define SSS_TOOL_NIC_FUNC_H + +int sss_tool_ioctl(void *uld_dev, u32 cmd, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +#endif diff --git a/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_phy_attr.c b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_phy_attr.c new file mode 100644 index 0000000000000000000000000000000000000000..d79be081f5cc7e8a75045b6769b0e28578198067 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_phy_attr.c @@ -0,0 +1,416 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [TOOL]" fmt + +#include "sss_nic_cfg.h" +#include "sss_nic_mag_cfg.h" +#include "sss_nic_dev_define.h" +#include "sss_tool_comm.h" +#include "sss_tool_nic.h" +#include "sss_nic_netdev_ops_api.h" + +enum sss_tool_link_mode { + SSS_TOOL_LINK_MODE_AUTO = 0, + SSS_TOOL_LINK_MODE_UP, + SSS_TOOL_LINK_MODE_DOWN, + SSS_TOOL_LINK_MODE_MAX, +}; + +typedef void (*sss_tool_set_link_mode_handler_t)(struct sss_nic_dev *nic_dev); + +int sss_tool_get_loopback_mode(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + struct sss_tool_loop_mode *mode = out_buf; + + if (!out_len || !mode) { + tool_err("Invalid param, use null pointer\n"); + return -EINVAL; + } + + if (*out_len != sizeof(*mode)) { + tool_err("Invalid out len: %u is not equal to %lu\n", + *out_len, sizeof(*mode)); + return -EINVAL; + } + + return sss_nic_get_loopback_mode(nic_dev, (u8 *)&mode->loop_mode, + (u8 *)&mode->loop_ctrl); +} + +int sss_tool_set_loopback_mode(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + int ret; + const struct sss_tool_loop_mode *mode = in_buf; + + if (!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_INTF_UP)) { + tool_err("Fail to set lookback mode, netdev is down\n"); + return -EFAULT; + } + + if (!mode || !out_len) { + tool_err("Invalid param, use null pointer\n"); + return -EINVAL; + } + + if (in_len != sizeof(*mode) || *out_len != sizeof(*mode)) { + tool_err("Invalid in len %d or out len %u is not equal to %lu\n", + in_len, *out_len, sizeof(*mode)); + return -EINVAL; + } + + ret = sss_nic_set_loopback_mode(nic_dev->hwdev, (u8)mode->loop_mode, (u8)mode->loop_ctrl); + if (ret == 0) + tool_info("succeed to set loopback mode %u en %u\n", + mode->loop_mode, mode->loop_ctrl); + + return ret; +} + +static bool sss_tool_check_param_valid(struct sss_nic_dev *nic_dev, + const void *in_buf, u32 in_len, + const u32 *out_len) +{ + if (!SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_INTF_UP)) { + tool_err("Fail to set link mode, netdev is down\n"); + return false; + } + + if (!in_buf || !out_len) { + tool_err("Invalid param, use null pointer\n"); + return -EINVAL; + } + + if (in_len != sizeof(SSS_TOOL_LINK_MODE_MAX) || + *out_len != sizeof(SSS_TOOL_LINK_MODE_MAX)) { + tool_err("Invalid in len %d or out len: %u is not equal to %lu\n", + in_len, *out_len, sizeof(SSS_TOOL_LINK_MODE_MAX)); + return false; + } + + return true; +} + +static void sss_tool_set_link_status(struct sss_nic_dev *nic_dev, bool status) +{ + struct net_device *netdev = nic_dev->netdev; + + if (!SSS_CHANNEL_RES_VALID(nic_dev) || + SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_LP_TEST) || + SSSNIC_TEST_NIC_DEV_FLAG(nic_dev, SSSNIC_FORCE_LINK_UP)) + return; + + if (!status) { + if (!netif_carrier_ok(netdev)) + return; + + tool_info("Link down\n"); + nic_dev->link_status = status; + netif_carrier_off(netdev); + + } else { + if (netif_carrier_ok(netdev)) + return; + + tool_info("Link up\n"); + nic_dev->link_status = status; + netif_carrier_on(netdev); + } +} + +static void sss_tool_link_mode_auto(struct sss_nic_dev *nic_dev) +{ + u8 link_status; + + if (sss_nic_get_hw_link_state(nic_dev, &link_status)) + link_status = false; + + sss_tool_set_link_status(nic_dev, (bool)link_status); + tool_info("Success to set link mode to auto, the state is link %s\n", + (link_status ? "up" : "down")); +} + +static void sss_tool_link_mode_up(struct sss_nic_dev *nic_dev) +{ + sss_tool_set_link_status(nic_dev, true); + tool_info("Success to set link mode to up\n"); +} + +static void sss_tool_link_mode_down(struct sss_nic_dev *nic_dev) +{ + sss_tool_set_link_status(nic_dev, false); + tool_info("Success to set link mode to down\n"); +} + +int sss_tool_set_link_mode(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + const enum sss_tool_link_mode *mode = in_buf; + + sss_tool_set_link_mode_handler_t handler[] = { + sss_tool_link_mode_auto, + sss_tool_link_mode_up, + sss_tool_link_mode_down, + }; + + if (!sss_tool_check_param_valid(nic_dev, in_buf, in_len, out_len)) + return -EFAULT; + + if (*mode >= SSS_TOOL_LINK_MODE_MAX) { + tool_err("Fail to set link mode, mode %d\n", *mode); + return -EINVAL; + } + + handler[*mode](nic_dev); + + return 0; +} + +static int sss_tool_update_pf_bw_limit(struct sss_nic_dev *nic_dev, u32 bw_limit) +{ + int ret; + u32 old_bw_limit; + struct sss_nic_port_info port_info = {0}; + struct sss_nic_io *nic_io = nic_dev->nic_io; + + if (!nic_io) + return -EINVAL; + + if (bw_limit > SSSNIC_PF_LIMIT_BW_MAX) { + tool_err("Fail to update pf bw limit, bandwidth: %u large then max limit: %u\n", + bw_limit, SSSNIC_PF_LIMIT_BW_MAX); + return -EINVAL; + } + + old_bw_limit = nic_io->mag_cfg.pf_bw_limit; + nic_io->mag_cfg.pf_bw_limit = bw_limit; + + if (!SSSNIC_SUPPORT_RATE_LIMIT(nic_io)) + return 0; + + ret = sss_nic_get_hw_port_info(nic_dev, &port_info, SSS_CHANNEL_NIC); + if (ret != 0) { + tool_err("Fail to get port info\n"); + nic_io->mag_cfg.pf_bw_limit = bw_limit; + return -EIO; + } + + ret = sss_nic_set_pf_rate(nic_dev, port_info.speed); + if (ret != 0) { + tool_err("Fail to set pf bandwidth\n"); + nic_io->mag_cfg.pf_bw_limit = bw_limit; + return ret; + } + + return 0; +} + +static int sss_tool_check_preconditions(struct sss_nic_dev *nic_dev, + const void *in_buf, u32 in_len, + void *out_buf, u32 *out_len) +{ + int ret; + u8 link_state = 0; + + if (SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) { + tool_err("Fail to set VF bandwidth rate, please use ip link cmd\n"); + return -EINVAL; + } + + if (!in_buf || !out_buf || !out_len) { + tool_err("Invalid param, use null pointer\n"); + return -EINVAL; + } + + if (in_len != sizeof(in_len)) { + tool_err("Invalid in len %d is not equal to %lu\n", + in_len, sizeof(in_len)); + return -EINVAL; + } + + if (*out_len != sizeof(link_state)) { + tool_err("Invalid out len %d is not equal to %lu\n", + in_len, sizeof(link_state)); + return -EINVAL; + } + + ret = sss_nic_get_hw_link_state(nic_dev, &link_state); + if (ret != 0) { + tool_err("Fail to get link state\n"); + return -EIO; + } + + if (!link_state) { + tool_err("Fail to set pf rate, must be link up\n"); + return -EINVAL; + } + + return 0; +} + +int sss_tool_set_pf_bw_limit(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + int ret; + u32 pf_bw_limit; + + ret = sss_tool_check_preconditions(nic_dev, in_buf, in_len, out_buf, out_len); + if (ret != 0) + return -EINVAL; + + pf_bw_limit = *((u32 *)in_buf); + + ret = sss_tool_update_pf_bw_limit(nic_dev, pf_bw_limit); + if (ret != 0) { + tool_err("Fail to set pf bandwidth limit to %d%%\n", pf_bw_limit); + if (ret < 0) + return ret; + } + + *((u8 *)out_buf) = (u8)ret; + + return 0; +} + +int sss_tool_get_pf_bw_limit(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + struct sss_nic_io *nic_io = NULL; + + if (SSSNIC_FUNC_IS_VF(nic_dev->hwdev)) { + tool_err("Fail to get VF bandwidth rate, please use ip link cmd\n"); + return -EINVAL; + } + + if (!out_buf || !out_len) { + tool_err("Invalid param, use null pointer\n"); + return -EINVAL; + } + + if (*out_len != sizeof(in_len)) { + tool_err("Invalid out len %d is not equal to %lu\n", + *out_len, sizeof(in_len)); + return -EFAULT; + } + + nic_io = nic_dev->nic_io; + if (!nic_io) + return -EINVAL; + + *((u32 *)out_buf) = nic_io->mag_cfg.pf_bw_limit; + + return 0; +} + +int sss_tool_get_netdev_name(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + if (!out_buf || !out_len) { + tool_err("Invalid param, use null pointer\n"); + return -EFAULT; + } + + if (*out_len != IFNAMSIZ) { + tool_err("Invalid out len %u is not equal to %u\n\n", + *out_len, IFNAMSIZ); + return -EINVAL; + } + + strlcpy(out_buf, nic_dev->netdev->name, IFNAMSIZ); + + return 0; +} + +int sss_tool_get_netdev_tx_timeout(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + int *tx_timeout = out_buf; + struct net_device *net_dev = nic_dev->netdev; + + if (!out_buf || !out_len) { + tool_err("Fail to get netdev tx timeout, use null pointer\n"); + return -EFAULT; + } + + if (*out_len != sizeof(in_len)) { + tool_err("Fail to get netdev tx timeout, out len %u is not equal to %lu\n", + *out_len, sizeof(in_len)); + return -EINVAL; + } + + *tx_timeout = net_dev->watchdog_timeo; + + return 0; +} + +int sss_tool_set_netdev_tx_timeout(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + const int *tx_timeout = in_buf; + struct net_device *net_dev = nic_dev->netdev; + + if (!in_buf) { + tool_err("Invalid in buf is null\n"); + return -EFAULT; + } + + if (in_len != sizeof(in_len)) { + tool_err("Invalid in len: %u is not equal to %lu\n", + in_len, sizeof(in_len)); + return -EINVAL; + } + + net_dev->watchdog_timeo = *tx_timeout * HZ; + tool_info("Success to set tx timeout check period to %ds\n", *tx_timeout); + + return 0; +} + +int sss_tool_get_xsfp_present(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + struct sss_nic_mbx_get_xsfp_present *sfp_info = out_buf; + + if (!in_buf || !out_buf || !out_len) { + tool_err("Invalid param, use null pointer\n"); + return -EFAULT; + } + + if (in_len != sizeof(*sfp_info) || *out_len != sizeof(*sfp_info)) { + tool_err("Invalid in len: %u or out len: %u is not equal to %lu\n", + in_len, *out_len, sizeof(*sfp_info)); + return -EINVAL; + } + + sfp_info->abs_status = sss_nic_if_sfp_absent(nic_dev); + sfp_info->head.state = 0; + + return 0; +} + +int sss_tool_get_xsfp_info(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + int ret; + struct sss_nic_mbx_get_xsfp_info *xsfp_info = out_buf; + + if (!in_buf || !out_buf || !out_len) { + tool_err("Invalid param, use null pointer\n"); + return -EFAULT; + } + + if (in_len != sizeof(*xsfp_info) || *out_len != sizeof(*xsfp_info)) { + tool_err("Invalid in len: %u or out len: %u is not equal to %lu\n", + in_len, *out_len, sizeof(*xsfp_info)); + return -EINVAL; + } + + ret = sss_nic_get_sfp_info(nic_dev, xsfp_info); + if (ret != 0) + xsfp_info->head.state = SSS_TOOL_EIO; + + return 0; +} + diff --git a/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_phy_attr.h b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_phy_attr.h new file mode 100644 index 0000000000000000000000000000000000000000..5deb5348182947ff447b71d70421879a95cce41b --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_phy_attr.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_TOOL_NIC_PHY_ATTR_H +#define SSS_TOOL_NIC_PHY_ATTR_H + +int sss_tool_get_loopback_mode(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_set_loopback_mode(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_set_link_mode(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_set_pf_bw_limit(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_get_pf_bw_limit(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_get_netdev_name(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_get_netdev_tx_timeout(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_set_netdev_tx_timeout(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_get_xsfp_present(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_get_xsfp_info(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +#endif + diff --git a/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_qp_info.c b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_qp_info.c new file mode 100644 index 0000000000000000000000000000000000000000..1dae86be7bdf74010ded851aacd19264ba31de1e --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_qp_info.c @@ -0,0 +1,324 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [TOOL]" fmt + +#include "sss_kernel.h" +#include "sss_nic_tx.h" +#include "sss_tool_comm.h" +#include "sss_tool_nic.h" + +static int sss_tool_get_wqe_info(struct sss_nic_dev *nic_dev, + u16 q_id, u16 wqe_id, u16 wqebb_cnt, + u8 *out_buff, const u16 *out_len, + enum sss_nic_queue_type q_type) +{ + u32 i; + void *src_wqebb = NULL; + u32 offset; + struct sss_nic_io_queue *queue = NULL; + struct sss_nic_io *nic_io = NULL; + + nic_io = nic_dev->nic_io; + if (!nic_io) { + tool_err("Fail to get wqe info, nic_io is NULL.\n"); + return -EINVAL; + } + + if (q_id >= nic_io->max_qp_num) { + tool_err("Fail to get wqe info, q_id[%u] > num_qps_cfg[%u].\n", + q_id, nic_io->max_qp_num); + return -EINVAL; + } + + if (q_type == SSSNIC_RQ) + queue = &nic_io->rq_group[q_id]; + else + queue = &nic_io->sq_group[q_id]; + + if ((wqe_id + wqebb_cnt) > queue->wq.q_depth) { + tool_err("Fail to get wqe info, (idx[%u] + idx[%u]) > q_depth[%u].\n", + wqe_id, wqebb_cnt, queue->wq.q_depth); + return -EINVAL; + } + + if (*out_len != (queue->wq.elem_size * wqebb_cnt)) { + tool_err("Fail to get wqe info, out len :%u is not equal to %d\n", + *out_len, (queue->wq.elem_size * wqebb_cnt)); + return -EINVAL; + } + + for (i = 0; i < wqebb_cnt; i++) { + src_wqebb = sss_wq_wqebb_addr(&queue->wq, + (u16)SSS_WQ_MASK_ID(&queue->wq, wqe_id + i)); + offset = queue->wq.elem_size * i; + memcpy(out_buff + offset, src_wqebb, queue->wq.elem_size); + } + + return 0; +} + +static void sss_tool_get_sq_info(struct sss_nic_io *nic_io, u16 q_id, + struct sss_tool_sq_info *sq_info) +{ + struct sss_nic_io_queue *sq = NULL; + + sq = &nic_io->sq_group[q_id]; + + sq_info->q_depth = sq->wq.q_depth; + sq_info->q_id = q_id; + sq_info->pi = sss_nic_get_sq_local_pi(sq); + sq_info->doorbell.map_addr = (u64 *)sq->db_addr; + sq_info->fi = sss_nic_get_sq_hw_ci(sq); + sq_info->wqebb_size = sq->wq.elem_size; + sq_info->ci = sss_nic_get_sq_local_ci(sq); + sq_info->ci_addr = sq->tx.ci_addr; + sq_info->slq_handle = sq; + sq_info->cla_addr = sq->wq.block_paddr; +} + +static void sss_tool_get_rq_info(struct sss_nic_io *nic_io, u16 q_id, + struct sss_tool_rq_info *rq_info) +{ + struct sss_nic_io_queue *rq = NULL; + + rq = &nic_io->rq_group[q_id]; + + rq_info->msix_idx = rq->msix_id; + rq_info->hw_pi = cpu_to_be16(*rq->rx.pi_vaddr); + rq_info->buf_len = nic_io->rx_buff_len; + rq_info->wqebb_size = rq->wq.elem_size; + rq_info->slq_handle = rq; + rq_info->q_id = q_id; + rq_info->ci_cla_tbl_addr = rq->wq.block_paddr; + rq_info->q_depth = (u16)rq->wq.q_depth; + rq_info->ci_wqe_page_addr = sss_wq_get_first_wqe_page_addr(&rq->wq); +} + +static int sss_tool_get_queue_info(struct sss_nic_dev *nic_dev, u16 q_id, + void *out_buff, enum sss_nic_queue_type q_type) +{ + struct sss_nic_io *nic_io = NULL; + + nic_io = nic_dev->nic_io; + if (!nic_io) { + tool_err("Fail to get wqe info, nic_io is NULL.\n"); + return -EINVAL; + } + + if (q_id >= nic_io->max_qp_num) { + tool_err("Fail to get rq info, input q_id(%u) is larger than max qp num:%u\n", + q_id, nic_io->max_qp_num); + return -EINVAL; + } + + (q_type == SSSNIC_RQ) ? sss_tool_get_rq_info(nic_io, q_id, out_buff) : + sss_tool_get_sq_info(nic_io, q_id, out_buff); + + return 0; +} + +static bool sss_tool_check_input_pointer(struct sss_nic_dev *nic_dev, + const void *in_buf, void *out_buf, u32 *out_len) +{ + if (!SSS_CHANNEL_RES_VALID(nic_dev)) { + tool_err("Invalid input param nic_dev\n"); + return false; + } + + if (!in_buf || !out_buf || !out_len) { + tool_err("Invalid input param,in_buf/out_buf/out_len\n"); + return false; + } + + return true; +} + +int sss_tool_get_tx_info(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + u16 q_id; + struct sss_tool_sq_info sq_info = {0}; + + if (!sss_tool_check_input_pointer(nic_dev, in_buf, out_buf, out_len)) + return -EINVAL; + + if (in_len != sizeof(in_len)) { + tool_err("Fail to get tx info, in len :%u is not equal to %lu\n", + in_len, sizeof(in_len)); + return -EINVAL; + } + + if (*out_len != sizeof(sq_info)) { + tool_err("Fail to get tx info, out len :%u is not equal to %lu\n", + *out_len, sizeof(sq_info)); + return -EINVAL; + } + + q_id = (u16)(*((u32 *)in_buf)); + + return sss_tool_get_queue_info(nic_dev, q_id, out_buf, SSSNIC_SQ); +} + +int sss_tool_get_tx_wqe_info(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + u16 wqebb_cnt = 1; + const struct sss_tool_wqe_info *info = in_buf; + + if (!sss_tool_check_input_pointer(nic_dev, in_buf, out_buf, out_len)) + return -EINVAL; + + if (in_len != sizeof(*info)) { + tool_err("Fail to get tx wqe info, in len %u is not equal to %lu\n", + in_len, sizeof(*info)); + return -EINVAL; + } + + return sss_tool_get_wqe_info(nic_dev, (u16)info->q_id, (u16)info->wqe_id, wqebb_cnt, + out_buf, (u16 *)out_len, SSSNIC_SQ); +} + +int sss_tool_get_rx_info(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + int ret; + u16 q_id; + struct sss_tool_rq_info *rq_info = out_buf; + + if (!sss_tool_check_input_pointer(nic_dev, in_buf, out_buf, out_len)) + return -EINVAL; + + if (in_len != sizeof(u32)) { + tool_err("Invalid in len: %u is not equal to %lu\n", + in_len, sizeof(u32)); + return -EINVAL; + } + + if (*out_len != sizeof(*rq_info)) { + tool_err("Invalid out len: %u is not equal to %lu\n", + *out_len, sizeof(*rq_info)); + return -EINVAL; + } + + q_id = (u16)(*((u32 *)in_buf)); + + ret = sss_tool_get_queue_info(nic_dev, q_id, out_buf, SSSNIC_RQ); + if (ret != 0) { + tool_err("Fail to get rq info, ret: %d.\n", ret); + return ret; + } + + rq_info->pending_limt = nic_dev->rq_desc_group[q_id].last_pending_limt; + rq_info->msix_vector = nic_dev->rq_desc_group[q_id].irq_id; + rq_info->delta = (u16)nic_dev->rq_desc_group[q_id].delta; + rq_info->sw_pi = nic_dev->rq_desc_group[q_id].pi; + rq_info->coalesc_timer_cfg = nic_dev->rq_desc_group[q_id].last_coal_timer; + rq_info->ci = (u16)(nic_dev->rq_desc_group[q_id].ci & + nic_dev->rq_desc_group[q_id].qid_mask); + + return 0; +} + +int sss_tool_get_rx_wqe_info(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + u16 wqebb_cnt = 1; + const struct sss_tool_wqe_info *info = in_buf; + + if (!sss_tool_check_input_pointer(nic_dev, in_buf, out_buf, out_len)) + return -EINVAL; + + if (in_len != sizeof(struct sss_tool_wqe_info)) { + tool_err("Fail to get rx wqe info, in len: %u is not equal to %lu\n", + in_len, sizeof(struct sss_tool_wqe_info)); + return -EINVAL; + } + + return sss_tool_get_wqe_info(nic_dev, (u16)info->q_id, (u16)info->wqe_id, wqebb_cnt, + out_buf, (u16 *)out_len, SSSNIC_RQ); +} + +int sss_tool_get_rx_cqe_info(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + u16 wqe_id = 0; + u16 q_id = 0; + const struct sss_tool_wqe_info *info = in_buf; + + if (!sss_tool_check_input_pointer(nic_dev, in_buf, out_buf, out_len)) + return -EINVAL; + + if (in_len != sizeof(struct sss_tool_wqe_info)) { + tool_err("Fail to get rx cqe info, in len: %u is not equal to %lu\n", + in_len, sizeof(struct sss_tool_wqe_info)); + return -EINVAL; + } + + if (*out_len != sizeof(struct sss_nic_cqe)) { + tool_err("Fail to get rx cqe info, out len: %u is not equal to %lu\n", + *out_len, sizeof(struct sss_nic_cqe)); + return -EINVAL; + } + + wqe_id = (u16)info->wqe_id; + q_id = (u16)info->q_id; + + if (q_id >= nic_dev->qp_res.qp_num || wqe_id >= nic_dev->rq_desc_group[q_id].q_depth) { + tool_err("Fail to get rx cqe info, q_id[%u] >= %u, or wqe idx[%u] >= %u.\n", + q_id, nic_dev->qp_res.qp_num, wqe_id, + nic_dev->rq_desc_group[q_id].q_depth); + return -EFAULT; + } + + memcpy(out_buf, nic_dev->rq_desc_group[q_id].rx_desc_group[wqe_id].cqe, + sizeof(struct sss_nic_cqe)); + + return 0; +} + +int sss_tool_get_q_num(struct sss_nic_dev *nic_dev, const void *in_buf, u32 in_len, + void *out_buf, u32 *out_len) +{ + if (!SSS_CHANNEL_RES_VALID(nic_dev)) { + tool_err("Fail to get queue number, netdev is down\n"); + return -EFAULT; + } + + if (!out_buf || !out_len) { + tool_err("Invalid param, use null pointer.\n"); + return -EINVAL; + } + + if (*out_len != sizeof(nic_dev->qp_res.qp_num)) { + tool_err("Invalid out len: %u is not equal to %lu\n", + *out_len, sizeof(nic_dev->qp_res.qp_num)); + return -EINVAL; + } + + *((u16 *)out_buf) = nic_dev->qp_res.qp_num; + + return 0; +} + +int sss_tool_get_inter_num(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + u16 intr_num = sss_nic_intr_num(nic_dev->hwdev); + + if (!out_buf || !out_len) { + tool_err("Invalid param, use null pointer\n"); + return -EFAULT; + } + + if (*out_len != sizeof(intr_num)) { + tool_err("Invalid out len:%u is not equal to %lu\n", + *out_len, sizeof(intr_num)); + return -EFAULT; + } + + *(u16 *)out_buf = intr_num; + + return 0; +} + diff --git a/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_qp_info.h b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_qp_info.h new file mode 100644 index 0000000000000000000000000000000000000000..3c7f10c64462560dcb34ace695400c41c4500f7e --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_qp_info.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_TOOL_NIC_QP_INFO_H +#define SSS_TOOL_NIC_QP_INFO_H + +int sss_tool_get_tx_info(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_get_rx_info(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_get_tx_wqe_info(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_get_rx_wqe_info(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_get_rx_cqe_info(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_get_q_num(struct sss_nic_dev *nic_dev, const void *in_buf, u32 in_len, + void *out_buf, u32 *out_len); + +int sss_tool_get_inter_num(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +#endif + diff --git a/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_stats.c b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_stats.c new file mode 100644 index 0000000000000000000000000000000000000000..55c4880a0759bb33a9cb14ad15e5f3d691592250 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_stats.c @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [TOOL]" fmt + +#include "sss_kernel.h" +#include "sss_tool_comm.h" +#include "sss_tool_nic.h" +#include "sss_nic_netdev_ops_api.h" +#include "sss_nic_ethtool_stats_api.h" + +enum sss_tool_show_set { + SSS_TOOL_SHOW_SSET_IO_STATS = 1, +}; + +static void sss_tool_reset_nicdev_stats(struct sss_nic_dev *nic_dev) +{ + u64_stats_update_begin(&nic_dev->tx_stats.stats_sync); + nic_dev->tx_stats.rsvd1 = 0; + nic_dev->tx_stats.rsvd2 = 0; + nic_dev->tx_stats.tx_drop = 0; + nic_dev->tx_stats.tx_timeout = 0; + nic_dev->tx_stats.tx_invalid_qid = 0; + u64_stats_update_end(&nic_dev->tx_stats.stats_sync); +} + +static void sss_tool_reset_rq_stats(struct sss_nic_rq_stats *rq_stats) +{ + u64_stats_update_begin(&rq_stats->stats_sync); + rq_stats->reset_drop_sge = 0; + rq_stats->rx_packets = 0; + rq_stats->alloc_rx_dma_err = 0; + rq_stats->rx_bytes = 0; + + rq_stats->csum_errors = 0; + rq_stats->rx_dropped = 0; + rq_stats->errors = 0; + rq_stats->large_xdp_pkts = 0; + rq_stats->rx_buf_errors = 0; + rq_stats->alloc_skb_err = 0; + rq_stats->xdp_dropped = 0; + rq_stats->other_errors = 0; + rq_stats->rsvd2 = 0; + u64_stats_update_end(&rq_stats->stats_sync); +} + +static void sss_tool_reset_sq_stats(struct sss_nic_sq_stats *sq_stats) +{ + u64_stats_update_begin(&sq_stats->stats_sync); + sq_stats->unknown_tunnel_proto = 0; + sq_stats->tx_packets = 0; + sq_stats->tx_dropped = 0; + sq_stats->frag_len_overflow = 0; + sq_stats->tx_busy = 0; + sq_stats->wake = 0; + sq_stats->skb_pad_err = 0; + sq_stats->dma_map_err = 0; + sq_stats->frag_size_zero = 0; + sq_stats->tx_bytes = 0; + sq_stats->offload_err = 0; + sq_stats->rsvd1 = 0; + sq_stats->rsvd2 = 0; + u64_stats_update_end(&sq_stats->stats_sync); +} + +int sss_tool_clear_func_stats(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + int i; + + if (!out_len) { + tool_err("Invalid out len is null\n"); + return -EINVAL; + } + +#ifndef HAVE_NETDEV_STATS_IN_NETDEV + memset(&nic_dev->net_stats, 0, sizeof(nic_dev->net_stats)); +#endif + sss_tool_reset_nicdev_stats(nic_dev); + for (i = 0; i < nic_dev->max_qp_num; i++) { + sss_tool_reset_rq_stats(&nic_dev->rq_desc_group[i].stats); + sss_tool_reset_sq_stats(&nic_dev->sq_desc_group[i].stats); + } + + *out_len = 0; + + return 0; +} + +int sss_tool_get_sset_count(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + u32 count = 0; + + if (!in_buf || in_len != sizeof(count) || !out_len || + *out_len != sizeof(count) || !out_buf) { + tool_err("Invalid in_len: %u\n", in_len); + return -EINVAL; + } + + if (*((u32 *)in_buf) == SSS_TOOL_SHOW_SSET_IO_STATS) + count = sss_nic_get_io_stats_size(nic_dev); + + *((u32 *)out_buf) = count; + + return 0; +} + +int sss_tool_get_sset_stats(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len) +{ + struct sss_tool_show_item *items = out_buf; + u32 count; + + if (!in_buf || in_len != sizeof(count) || !out_len || !out_buf) { + tool_err("Invalid in_len: %u\n", in_len); + return -EINVAL; + } + + if (*((u32 *)in_buf) != SSS_TOOL_SHOW_SSET_IO_STATS) { + tool_err("Invalid input para %u stats\n", *((u32 *)in_buf)); + return -EINVAL; + } + + count = sss_nic_get_io_stats_size(nic_dev); + + if (count * sizeof(*items) != *out_len) { + tool_err("Invalid out len: %u is not equal to %lu\n", + *out_len, count * sizeof(*items)); + return -EINVAL; + } + + sss_nic_get_io_stats(nic_dev, items); + + return 0; +} + diff --git a/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_stats.h b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_stats.h new file mode 100644 index 0000000000000000000000000000000000000000..1a686e5712be6bc8cf8863e2ba6979c60e61c464 --- /dev/null +++ b/drivers/net/ethernet/3snic/sssnic/nic/tool/sss_tool_nic_stats.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 3snic Technologies Co., Ltd */ + +#ifndef SSS_TOOL_NIC_STATS_H +#define SSS_TOOL_NIC_STATS_H + +int sss_tool_clear_func_stats(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_get_sset_count(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +int sss_tool_get_sset_stats(struct sss_nic_dev *nic_dev, const void *in_buf, + u32 in_len, void *out_buf, u32 *out_len); + +#endif + diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig index f2f0264c58ba00632ac8012b0e5636c0c87a8c9b..443b34e2725f477e9f661fff327433b647e0da5b 100644 --- a/drivers/net/ethernet/8390/Kconfig +++ b/drivers/net/ethernet/8390/Kconfig @@ -49,7 +49,7 @@ config XSURF100 tristate "Amiga XSurf 100 AX88796/NE2000 clone support" depends on ZORRO select AX88796 - select ASIX_PHY + select AX88796B_PHY help This driver is for the Individual Computers X-Surf 100 Ethernet card (based on the Asix AX88796 chip). If you have such a card, diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c index 342ae08ec3c29832ae5be0da8d93e59d6441cab1..d60a86aa8aa8049e7c5216f15f64b6d8406ec115 100644 --- a/drivers/net/ethernet/8390/mac8390.c +++ b/drivers/net/ethernet/8390/mac8390.c @@ -153,8 +153,6 @@ static void dayna_block_input(struct net_device *dev, int count, static void dayna_block_output(struct net_device *dev, int count, const unsigned char *buf, int start_page); -#define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c)) - /* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */ static void slow_sane_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page); @@ -233,19 +231,26 @@ static enum mac8390_type mac8390_ident(struct nubus_rsrc *fres) static enum mac8390_access mac8390_testio(unsigned long membase) { - unsigned long outdata = 0xA5A0B5B0; - unsigned long indata = 0x00000000; + u32 outdata = 0xA5A0B5B0; + u32 indata = 0; + /* Try writing 32 bits */ - memcpy_toio((void __iomem *)membase, &outdata, 4); - /* Now compare them */ - if (memcmp_withio(&outdata, membase, 4) == 0) + nubus_writel(outdata, membase); + /* Now read it back */ + indata = nubus_readl(membase); + if (outdata == indata) return ACCESS_32; + + outdata = 0xC5C0D5D0; + indata = 0; + /* Write 16 bit output */ word_memcpy_tocard(membase, &outdata, 4); /* Now read it back */ word_memcpy_fromcard(&indata, membase, 4); if (outdata == indata) return ACCESS_16; + return ACCESS_UNKNOWN; } diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 6fde68aa13a40de376f472df200f36752f6a247e..5a8cda7d98c6d813f6f5e1d2e3b417cd94aca8f0 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -18,6 +18,7 @@ config SUNGEM_PHY tristate source "drivers/net/ethernet/3com/Kconfig" +source "drivers/net/ethernet/3snic/Kconfig" source "drivers/net/ethernet/adaptec/Kconfig" source "drivers/net/ethernet/aeroflex/Kconfig" source "drivers/net/ethernet/agere/Kconfig" @@ -82,6 +83,7 @@ source "drivers/net/ethernet/i825xx/Kconfig" source "drivers/net/ethernet/ibm/Kconfig" source "drivers/net/ethernet/intel/Kconfig" source "drivers/net/ethernet/xscale/Kconfig" +source "drivers/net/ethernet/netswift/Kconfig" config JME tristate "JMicron(R) PCI-Express Gigabit Ethernet support" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index b45d5f626b592356d222e9967c3b68a96dfb7c3f..d2239fee23b89f9db71a3d0dcef76cec7de24ca8 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -4,6 +4,7 @@ # obj-$(CONFIG_NET_VENDOR_3COM) += 3com/ +obj-$(CONFIG_NET_VENDOR_3SNIC) += 3snic/ obj-$(CONFIG_NET_VENDOR_8390) += 8390/ obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/ obj-$(CONFIG_GRETH) += aeroflex/ @@ -95,3 +96,4 @@ obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/ obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/ obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/ obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/ +obj-$(CONFIG_NET_VENDOR_NETSWIFT) += netswift/ diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c index 0fb986ba32905a5dac78c926333c9765ec4678de..0ae723f75341710a5347f061985a5fd3bbc2a862 100644 --- a/drivers/net/ethernet/altera/altera_msgdma.c +++ b/drivers/net/ethernet/altera/altera_msgdma.c @@ -145,7 +145,8 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv) & 0xffff; if (inuse) { /* Tx FIFO is not empty */ - ready = priv->tx_prod - priv->tx_cons - inuse - 1; + ready = max_t(int, + priv->tx_prod - priv->tx_cons - inuse - 1, 0); } else { /* Check for buffered last packet */ status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status)); diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index baca8f704a459530ce16c17a046d571a34ca416c..c3c1195021a2b72101f9a8aaa0fd0f58a15e561c 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -714,8 +714,10 @@ static struct phy_device *connect_local_phy(struct net_device *dev) phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link, priv->phy_iface); - if (IS_ERR(phydev)) + if (IS_ERR(phydev)) { netdev_err(dev, "Could not attach to PHY\n"); + phydev = NULL; + } } else { int ret; diff --git a/drivers/net/ethernet/amazon/Kconfig b/drivers/net/ethernet/amazon/Kconfig index 99b30353541ab7851cef3e5a9cdd10448f274a91..9e87d7b8360f59575b824ee734298409fcffbcb0 100644 --- a/drivers/net/ethernet/amazon/Kconfig +++ b/drivers/net/ethernet/amazon/Kconfig @@ -17,7 +17,7 @@ if NET_VENDOR_AMAZON config ENA_ETHERNET tristate "Elastic Network Adapter (ENA) support" - depends on (PCI_MSI && X86) + depends on PCI_MSI && !CPU_BIG_ENDIAN ---help--- This driver supports Elastic Network Adapter (ENA)" diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index d906293ce07d9d6edab5cd03f32dea94597987b1..e26c195fec83b4d68840f4244d92053e4c352feb 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -1197,8 +1197,8 @@ static int ena_io_poll(struct napi_struct *napi, int budget) struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi); struct ena_ring *tx_ring, *rx_ring; - u32 tx_work_done; - u32 rx_work_done; + int tx_work_done; + int rx_work_done = 0; int tx_budget; int napi_comp_call = 0; int ret; @@ -1215,7 +1215,11 @@ static int ena_io_poll(struct napi_struct *napi, int budget) } tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget); - rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget); + /* On netpoll the budget is zero and the handler should only clean the + * tx completions. + */ + if (likely(budget)) + rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget); /* If the device is about to reset or down, avoid unmask * the interrupt and return 0 so NAPI won't reschedule @@ -2223,7 +2227,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev) host_info->os_type = ENA_ADMIN_OS_LINUX; host_info->kernel_ver = LINUX_VERSION_CODE; - strncpy(host_info->kernel_ver_str, utsname()->version, + strlcpy(host_info->kernel_ver_str, utsname()->version, sizeof(host_info->kernel_ver_str) - 1); host_info->os_dist = 0; strncpy(host_info->os_dist_str, utsname()->release, @@ -2595,11 +2599,6 @@ static int ena_restore_device(struct ena_adapter *adapter) goto err_device_destroy; } - clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); - /* Make sure we don't have a race with AENQ Links state handler */ - if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) - netif_carrier_on(adapter->netdev); - rc = ena_enable_msix_and_set_admin_interrupts(adapter, adapter->num_queues); if (rc) { @@ -2616,6 +2615,11 @@ static int ena_restore_device(struct ena_adapter *adapter) } set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); + + clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); + if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) + netif_carrier_on(adapter->netdev); + mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); dev_err(&pdev->dev, "Device reset completed successfully\n"); @@ -2627,8 +2631,8 @@ static int ena_restore_device(struct ena_adapter *adapter) ena_com_abort_admin_commands(ena_dev); ena_com_wait_for_abort_completion(ena_dev); ena_com_admin_destroy(ena_dev); - ena_com_mmio_reg_read_request_destroy(ena_dev); ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE); + ena_com_mmio_reg_read_request_destroy(ena_dev); err: clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c index 01d132c02ff9039e31bcc9c4e10b1cbaeb315150..265039c57023fc40d9c605b9cbdd49b5270cfdd3 100644 --- a/drivers/net/ethernet/amd/am79c961a.c +++ b/drivers/net/ethernet/amd/am79c961a.c @@ -440,7 +440,7 @@ static void am79c961_timeout(struct net_device *dev) /* * Transmit a packet */ -static int +static netdev_tx_t am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev) { struct dev_priv *priv = netdev_priv(dev); diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c index c5b81268c2849491ba22d26181f4c1b8332526c9..d3d44e07afbc05b71ac1c160ba859c0b95f065be 100644 --- a/drivers/net/ethernet/amd/atarilance.c +++ b/drivers/net/ethernet/amd/atarilance.c @@ -339,7 +339,8 @@ static unsigned long lance_probe1( struct net_device *dev, struct lance_addr *init_rec ); static int lance_open( struct net_device *dev ); static void lance_init_ring( struct net_device *dev ); -static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ); +static netdev_tx_t lance_start_xmit(struct sk_buff *skb, + struct net_device *dev); static irqreturn_t lance_interrupt( int irq, void *dev_id ); static int lance_rx( struct net_device *dev ); static int lance_close( struct net_device *dev ); @@ -769,7 +770,8 @@ static void lance_tx_timeout (struct net_device *dev) /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ -static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) +static netdev_tx_t +lance_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); struct lance_ioreg *IO = lp->iobase; diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c index 00332a1ea84b9e770febc38dd69af42e3d5bba90..9f23703dd509f84981596313a1dbb066efa73479 100644 --- a/drivers/net/ethernet/amd/declance.c +++ b/drivers/net/ethernet/amd/declance.c @@ -894,7 +894,7 @@ static void lance_tx_timeout(struct net_device *dev) netif_wake_queue(dev); } -static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); volatile struct lance_regs *ll = lp->ll; diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c index 77b1db2677309e639f52c75ecbc96e9e451a7bbb..da7e3d4f41661bf911f767e525ab50b6bb8e9818 100644 --- a/drivers/net/ethernet/amd/sun3lance.c +++ b/drivers/net/ethernet/amd/sun3lance.c @@ -236,7 +236,8 @@ struct lance_private { static int lance_probe( struct net_device *dev); static int lance_open( struct net_device *dev ); static void lance_init_ring( struct net_device *dev ); -static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ); +static netdev_tx_t lance_start_xmit(struct sk_buff *skb, + struct net_device *dev); static irqreturn_t lance_interrupt( int irq, void *dev_id); static int lance_rx( struct net_device *dev ); static int lance_close( struct net_device *dev ); @@ -511,7 +512,8 @@ static void lance_init_ring( struct net_device *dev ) } -static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) +static netdev_tx_t +lance_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); int entry, len; diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c index cdd7a611479b27f4c59b15bffc42a73a9a7229ee..9d489982682336a7cef13186f20557a4a7d7daeb 100644 --- a/drivers/net/ethernet/amd/sunlance.c +++ b/drivers/net/ethernet/amd/sunlance.c @@ -1106,7 +1106,7 @@ static void lance_tx_timeout(struct net_device *dev) netif_wake_queue(dev); } -static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); int entry, skblen, len; @@ -1419,7 +1419,7 @@ static int sparc_lance_probe_one(struct platform_device *op, prop = of_get_property(nd, "tpe-link-test?", NULL); if (!prop) - goto no_link_test; + goto node_put; if (strcmp(prop, "true")) { printk(KERN_NOTICE "SunLance: warning: overriding option " @@ -1428,6 +1428,8 @@ static int sparc_lance_probe_one(struct platform_device *op, "to ecd@skynet.be\n"); auxio_set_lte(AUXIO_LTE_ON); } +node_put: + of_node_put(nd); no_link_test: lp->auto_select = 1; lp->tpe = 0; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index d272dc6984ac6ef3f61743a16912c13552c35b2f..b40d4377cc71d798e4cd1f8680a3620b90c65b0c 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -431,8 +431,6 @@ #define MAC_MDIOSCAR_PA_WIDTH 5 #define MAC_MDIOSCAR_RA_INDEX 0 #define MAC_MDIOSCAR_RA_WIDTH 16 -#define MAC_MDIOSCAR_REG_INDEX 0 -#define MAC_MDIOSCAR_REG_WIDTH 21 #define MAC_MDIOSCCDR_BUSY_INDEX 22 #define MAC_MDIOSCCDR_BUSY_WIDTH 1 #define MAC_MDIOSCCDR_CMD_INDEX 16 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 1e929a1e4ca78145f5c8214e5562c740472bfc97..4666084eda16a318a4042b81c2d3a03cb7664a19 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -1284,6 +1284,20 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, } } +static unsigned int xgbe_create_mdio_sca(int port, int reg) +{ + unsigned int mdio_sca, da; + + da = (reg & MII_ADDR_C45) ? reg >> 16 : 0; + + mdio_sca = 0; + XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg); + XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port); + XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da); + + return mdio_sca; +} + static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, int reg, u16 val) { @@ -1291,9 +1305,7 @@ static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, reinit_completion(&pdata->mdio_complete); - mdio_sca = 0; - XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg); - XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr); + mdio_sca = xgbe_create_mdio_sca(addr, reg); XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); mdio_sccd = 0; @@ -1317,9 +1329,7 @@ static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, reinit_completion(&pdata->mdio_complete); - mdio_sca = 0; - XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg); - XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr); + mdio_sca = xgbe_create_mdio_sca(addr, reg); XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); mdio_sccd = 0; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 24f1053b8785e65b3346b0e2c8fb2acbccbbad29..cdbabd77f2d4c2df6aa9eef693e45a9f9140f9b7 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -2009,7 +2009,7 @@ static int xgbe_close(struct net_device *netdev) return 0; } -static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) { struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_hw_if *hw_if = &pdata->hw_if; @@ -2018,7 +2018,7 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) struct xgbe_ring *ring; struct xgbe_packet_data *packet; struct netdev_queue *txq; - int ret; + netdev_tx_t ret; DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len); @@ -2765,6 +2765,14 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) buf2_len = xgbe_rx_buf2_len(rdata, packet, len); len += buf2_len; + if (buf2_len > rdata->rx.buf.dma_len) { + /* Hardware inconsistency within the descriptors + * that has resulted in a length underflow. + */ + error = 1; + goto skip_data; + } + if (!skb) { skb = xgbe_create_skb(pdata, napi, rdata, buf1_len); @@ -2794,8 +2802,10 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) if (!last || context_next) goto read_again; - if (!skb) + if (!skb || error) { + dev_kfree_skb(skb); goto next_packet; + } /* Be sure we don't exceed the configured MTU */ max_len = netdev->mtu + ETH_HLEN; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index b41f23679a087b2a49676ae362044c1b8085820a..7ce9c69e9c44f3d4288d04f710b96222ebf2fb77 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -469,13 +469,19 @@ static int __init xgbe_mod_init(void) ret = xgbe_platform_init(); if (ret) - return ret; + goto err_platform_init; ret = xgbe_pci_init(); if (ret) - return ret; + goto err_pci_init; return 0; + +err_pci_init: + xgbe_platform_exit(); +err_platform_init: + unregister_netdevice_notifier(&xgbe_netdev_notifier); + return ret; } static void __exit xgbe_mod_exit(void) diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 3b889efddf789643803b371420c321e90b0e5481..50dd6bf176d034721590bf15c2abfab88dd5285a 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -29,9 +29,6 @@ #define RES_RING_CSR 1 #define RES_RING_CMD 2 -static const struct of_device_id xgene_enet_of_match[]; -static const struct acpi_device_id xgene_enet_acpi_match[]; - static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool) { struct xgene_enet_raw_desc16 *raw_desc; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h index 91eb8910b1c992b1b7876f05a26753a5cf79c100..90a0e1d0d62219c00b807cddd0d276f532556eab 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h @@ -42,8 +42,8 @@ #define AQ_CFG_IS_LRO_DEF 1U /* RSS */ -#define AQ_CFG_RSS_INDIRECTION_TABLE_MAX 128U -#define AQ_CFG_RSS_HASHKEY_SIZE 320U +#define AQ_CFG_RSS_INDIRECTION_TABLE_MAX 64U +#define AQ_CFG_RSS_HASHKEY_SIZE 40U #define AQ_CFG_IS_RSS_DEF 1U #define AQ_CFG_NUM_RSS_QUEUES_DEF AQ_CFG_VECS_DEF diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 26dc6782b4750f311f8311f1523e6ea98bc08f5b..8cc34b0bedc3a2f388ac778fe50b573f24f4b5ac 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -44,7 +44,7 @@ static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues) struct aq_rss_parameters *rss_params = &cfg->aq_rss; int i = 0; - static u8 rss_key[40] = { + static u8 rss_key[AQ_CFG_RSS_HASHKEY_SIZE] = { 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d, 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18, 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8, @@ -590,7 +590,7 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev) } } - if (i > 0 && i < AQ_HW_MULTICAST_ADDRESS_MAX) { + if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) { packet_filter |= IFF_MULTICAST; self->mc_list.count = i; self->aq_hw_ops->hw_multicast_list_set(self->aq_hw, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index d1e1a0ba86150208e73ffdbce006e8d1a822302c..b3c7994d73eb1599456fa5c584f4297d4ebc0a83 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -139,10 +139,10 @@ void aq_ring_queue_stop(struct aq_ring_s *ring) bool aq_ring_tx_clean(struct aq_ring_s *self) { struct device *dev = aq_nic_get_dev(self->aq_nic); - unsigned int budget = AQ_CFG_TX_CLEAN_BUDGET; + unsigned int budget; - for (; self->sw_head != self->hw_head && budget--; - self->sw_head = aq_ring_next_dx(self, self->sw_head)) { + for (budget = AQ_CFG_TX_CLEAN_BUDGET; + budget && self->sw_head != self->hw_head; budget--) { struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; if (likely(buff->is_mapped)) { @@ -167,11 +167,34 @@ bool aq_ring_tx_clean(struct aq_ring_s *self) buff->pa = 0U; buff->eop_index = 0xffffU; + self->sw_head = aq_ring_next_dx(self, self->sw_head); } return !!budget; } +static void aq_rx_checksum(struct aq_ring_s *self, + struct aq_ring_buff_s *buff, + struct sk_buff *skb) +{ + if (!(self->aq_nic->ndev->features & NETIF_F_RXCSUM)) + return; + + if (unlikely(buff->is_cso_err)) { + ++self->stats.rx.errors; + skb->ip_summed = CHECKSUM_NONE; + return; + } + if (buff->is_ip_cso) { + __skb_incr_checksum_unnecessary(skb); + } else { + skb->ip_summed = CHECKSUM_NONE; + } + + if (buff->is_udp_cso || buff->is_tcp_cso) + __skb_incr_checksum_unnecessary(skb); +} + #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) int aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi, @@ -267,18 +290,8 @@ int aq_ring_rx_clean(struct aq_ring_s *self, } skb->protocol = eth_type_trans(skb, ndev); - if (unlikely(buff->is_cso_err)) { - ++self->stats.rx.errors; - skb->ip_summed = CHECKSUM_NONE; - } else { - if (buff->is_ip_cso) { - __skb_incr_checksum_unnecessary(skb); - if (buff->is_udp_cso || buff->is_tcp_cso) - __skb_incr_checksum_unnecessary(skb); - } else { - skb->ip_summed = CHECKSUM_NONE; - } - } + + aq_rx_checksum(self, buff, skb); skb_set_hash(skb, buff->rss_hash, buff->is_hash_l4 ? PKT_HASH_TYPE_L4 : diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index d335c334fa561ed2ae1a8dad45fcd9af822ee0a7..82582fa54d5d25fdce4b7d9bac10a2a146d3ab6a 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -89,6 +89,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget) } } +err_exit: if (!was_tx_cleaned) work_done = budget; @@ -98,7 +99,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget) 1U << self->aq_ring_param.vec_idx); } } -err_exit: + return work_done; } diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 1d44a386e7d34bc1775b3b66a3643111204c6909..51cd1f98bcf07e1321a13301db3fffbefd395e65 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -655,9 +655,9 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *) &ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE]; - unsigned int is_err = 1U; unsigned int is_rx_check_sum_enabled = 0U; unsigned int pkt_type = 0U; + u8 rx_stat = 0U; if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */ break; @@ -665,68 +665,71 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, buff = &ring->buff_ring[ring->hw_head]; - is_err = (0x0000003CU & rxd_wb->status); + rx_stat = (0x0000003CU & rxd_wb->status) >> 2; - is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19); - is_err &= ~0x20U; /* exclude validity bit */ + is_rx_check_sum_enabled = (rxd_wb->type >> 19) & 0x3U; pkt_type = 0xFFU & (rxd_wb->type >> 4); - if (is_rx_check_sum_enabled) { - if (0x0U == (pkt_type & 0x3U)) - buff->is_ip_cso = (is_err & 0x08U) ? 0U : 1U; + if (is_rx_check_sum_enabled & BIT(0) && + (0x0U == (pkt_type & 0x3U))) + buff->is_ip_cso = (rx_stat & BIT(1)) ? 0U : 1U; + if (is_rx_check_sum_enabled & BIT(1)) { if (0x4U == (pkt_type & 0x1CU)) - buff->is_udp_cso = buff->is_cso_err ? 0U : 1U; + buff->is_udp_cso = (rx_stat & BIT(2)) ? 0U : + !!(rx_stat & BIT(3)); else if (0x0U == (pkt_type & 0x1CU)) - buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U; - - /* Checksum offload workaround for small packets */ - if (rxd_wb->pkt_len <= 60) { - buff->is_ip_cso = 0U; - buff->is_cso_err = 0U; - } + buff->is_tcp_cso = (rx_stat & BIT(2)) ? 0U : + !!(rx_stat & BIT(3)); + } + buff->is_cso_err = !!(rx_stat & 0x6); + /* Checksum offload workaround for small packets */ + if (unlikely(rxd_wb->pkt_len <= 60)) { + buff->is_ip_cso = 0U; + buff->is_cso_err = 0U; } - - is_err &= ~0x18U; dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE); - if (is_err || rxd_wb->type & 0x1000U) { - /* status error or DMA error */ + if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) { + /* MAC error or DMA error */ buff->is_error = 1U; - } else { - if (self->aq_nic_cfg->is_rss) { - /* last 4 byte */ - u16 rss_type = rxd_wb->type & 0xFU; - - if (rss_type && rss_type < 0x8U) { - buff->is_hash_l4 = (rss_type == 0x4 || - rss_type == 0x5); - buff->rss_hash = rxd_wb->rss_hash; - } + } + if (self->aq_nic_cfg->is_rss) { + /* last 4 byte */ + u16 rss_type = rxd_wb->type & 0xFU; + + if (rss_type && rss_type < 0x8U) { + buff->is_hash_l4 = (rss_type == 0x4 || + rss_type == 0x5); + buff->rss_hash = rxd_wb->rss_hash; } + } - if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) { - buff->len = rxd_wb->pkt_len % - AQ_CFG_RX_FRAME_MAX; - buff->len = buff->len ? - buff->len : AQ_CFG_RX_FRAME_MAX; - buff->next = 0U; - buff->is_eop = 1U; + if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) { + buff->len = rxd_wb->pkt_len % + AQ_CFG_RX_FRAME_MAX; + buff->len = buff->len ? + buff->len : AQ_CFG_RX_FRAME_MAX; + buff->next = 0U; + buff->is_eop = 1U; + } else { + buff->len = + rxd_wb->pkt_len > AQ_CFG_RX_FRAME_MAX ? + AQ_CFG_RX_FRAME_MAX : rxd_wb->pkt_len; + + if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT & + rxd_wb->status) { + /* LRO */ + buff->next = rxd_wb->next_desc_ptr; + ++ring->stats.rx.lro_packets; } else { - if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT & - rxd_wb->status) { - /* LRO */ - buff->next = rxd_wb->next_desc_ptr; - ++ring->stats.rx.lro_packets; - } else { - /* jumbo */ - buff->next = - aq_ring_next_dx(ring, - ring->hw_head); - ++ring->stats.rx.jumbo_packets; - } + /* jumbo */ + buff->next = + aq_ring_next_dx(ring, + ring->hw_head); + ++ring->stats.rx.jumbo_packets; } } } @@ -915,6 +918,12 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self) static int hw_atl_b0_hw_stop(struct aq_hw_s *self) { hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK); + + /* Invalidate Descriptor Cache to prevent writing to the cached + * descriptors and to the data pointer of those descriptors + */ + hw_atl_rdm_rx_dma_desc_cache_init_set(self, 1); + return aq_hw_err_from_flags(self); } diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c index 10ba035dadb19c8ffa0409d9ed636009695265b5..5502ec5f0f6993502cd8d4e880032a9606da5c34 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c @@ -619,6 +619,14 @@ void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode); } +void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR, + HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK, + HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT, + init); +} + void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_pkt_buff_size_per_tc, u32 buffer) { @@ -1460,3 +1468,11 @@ void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw, aq_hw_write_reg(aq_hw, HW_ATL_GLB_CPU_SCRATCH_SCP_ADR(scratch_scp), glb_cpu_scratch_scp); } + +void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_MCP_UP_FORCE_INTERRUPT_ADR, + HW_ATL_MCP_UP_FORCE_INTERRUPT_MSK, + HW_ATL_MCP_UP_FORCE_INTERRUPT_SHIFT, + up_force_intr); +} diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h index dfb426f2dc2c8ef1aad6971fbd627a2312cf927d..41f239928c157f121b74f086c35c86457f2a3aba 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h @@ -325,6 +325,9 @@ void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_pkt_buff_size_per_tc, u32 buffer); +/* set rdm rx dma descriptor cache init */ +void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init); + /* set rx xoff enable (per tc) */ void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc, u32 buffer); @@ -698,4 +701,7 @@ void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe); /* set pci register reset disable */ void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis); +/* set uP Force Interrupt */ +void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr); + #endif /* HW_ATL_LLH_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h index e0cf70120f1d01aa5ad7c9cc46eeef75f71fe081..a715fa317b1c822781b4a0422033816b5684e7e3 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h @@ -293,6 +293,24 @@ /* default value of bitfield desc{d}_reset */ #define HW_ATL_RDM_DESCDRESET_DEFAULT 0x0 +/* rdm_desc_init_i bitfield definitions + * preprocessor definitions for the bitfield rdm_desc_init_i. + * port="pif_rdm_desc_init_i" + */ + +/* register address for bitfield rdm_desc_init_i */ +#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR 0x00005a00 +/* bitmask for bitfield rdm_desc_init_i */ +#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK 0xffffffff +/* inverted bitmask for bitfield rdm_desc_init_i */ +#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSKN 0x00000000 +/* lower bit position of bitfield rdm_desc_init_i */ +#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT 0 +/* width of bitfield rdm_desc_init_i */ +#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_WIDTH 32 +/* default value of bitfield rdm_desc_init_i */ +#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_DEFAULT 0x0 + /* rx int_desc_wrb_en bitfield definitions * preprocessor definitions for the bitfield "int_desc_wrb_en". * port="pif_rdm_int_desc_wrb_en_i" @@ -2387,4 +2405,17 @@ #define HW_ATL_GLB_CPU_SCRATCH_SCP_ADR(scratch_scp) \ (0x00000300u + (scratch_scp) * 0x4) +/* register address for bitfield uP Force Interrupt */ +#define HW_ATL_MCP_UP_FORCE_INTERRUPT_ADR 0x00000404 +/* bitmask for bitfield uP Force Interrupt */ +#define HW_ATL_MCP_UP_FORCE_INTERRUPT_MSK 0x00000002 +/* inverted bitmask for bitfield uP Force Interrupt */ +#define HW_ATL_MCP_UP_FORCE_INTERRUPT_MSKN 0xFFFFFFFD +/* lower bit position of bitfield uP Force Interrupt */ +#define HW_ATL_MCP_UP_FORCE_INTERRUPT_SHIFT 1 +/* width of bitfield uP Force Interrupt */ +#define HW_ATL_MCP_UP_FORCE_INTERRUPT_WIDTH 1 +/* default value of bitfield uP Force Interrupt */ +#define HW_ATL_MCP_UP_FORCE_INTERRUPT_DEFAULT 0x0 + #endif /* HW_ATL_LLH_INTERNAL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index c965e65d07db3be832b0edd332eaedfb17976143..49c80bac9ce2866ab6307158ca318420e66f9f0e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -262,6 +262,8 @@ int hw_atl_utils_soft_reset(struct aq_hw_s *self) AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR) & HW_ATL_MPI_STATE_MSK) == MPI_DEINIT, 10, 1000U); + if (err) + return err; } if (self->rbl_enabled) @@ -325,17 +327,31 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p, err = -ETIME; goto err_exit; } + if (IS_CHIP_FEATURE(REVISION_B1)) { + u32 offset = 0; + + for (; offset < cnt; ++offset) { + aq_hw_write_reg(self, 0x328, p[offset]); + aq_hw_write_reg(self, 0x32C, + (0x80000000 | (0xFFFF & (offset * 4)))); + hw_atl_mcp_up_force_intr_set(self, 1); + /* 1000 times by 10us = 10ms */ + AQ_HW_WAIT_FOR((aq_hw_read_reg(self, + 0x32C) & 0xF0000000) != + 0x80000000, + 10, 1000); + } + } else { + u32 offset = 0; - aq_hw_write_reg(self, 0x00000208U, a); - - for (++cnt; --cnt;) { - u32 i = 0U; + aq_hw_write_reg(self, 0x208, a); - aq_hw_write_reg(self, 0x0000020CU, *(p++)); - aq_hw_write_reg(self, 0x00000200U, 0xC000U); + for (; offset < cnt; ++offset) { + aq_hw_write_reg(self, 0x20C, p[offset]); + aq_hw_write_reg(self, 0x200, 0xC000); - for (i = 1024U; - (0x100U & aq_hw_read_reg(self, 0x00000200U)) && --i;) { + AQ_HW_WAIT_FOR((aq_hw_read_reg(self, 0x200U) & + 0x100) == 0, 10, 1000); } } @@ -399,7 +415,7 @@ struct aq_hw_atl_utils_fw_rpc_tid_s { #define hw_atl_utils_fw_rpc_init(_H_) hw_atl_utils_fw_rpc_wait(_H_, NULL) -static int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size) +int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size) { int err = 0; struct aq_hw_atl_utils_fw_rpc_tid_s sw; @@ -423,8 +439,8 @@ static int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size) return err; } -static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, - struct hw_aq_atl_utils_fw_rpc **rpc) +int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, + struct hw_aq_atl_utils_fw_rpc **rpc) { int err = 0; struct aq_hw_atl_utils_fw_rpc_tid_s sw; @@ -443,6 +459,11 @@ static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, goto err_exit; if (fw.len == 0xFFFFU) { + if (sw.len > sizeof(self->rpc)) { + printk(KERN_INFO "Invalid sw len: %x\n", sw.len); + err = -EINVAL; + goto err_exit; + } err = hw_atl_utils_fw_rpc_call(self, sw.len); if (err < 0) goto err_exit; @@ -453,6 +474,11 @@ static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, if (rpc) { if (fw.len) { + if (fw.len > sizeof(self->rpc)) { + printk(KERN_INFO "Invalid fw len: %x\n", fw.len); + err = -EINVAL; + goto err_exit; + } err = hw_atl_utils_fw_downld_dwords(self, self->rpc_addr, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h index b875590efcbddbeb5983f2da99f9785d4298734c..505c8a2abd9ca4ec41d28518d7963797483852b3 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h @@ -319,6 +319,11 @@ struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self); int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a, u32 *p, u32 cnt); +int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size); + +int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, + struct hw_aq_atl_utils_fw_rpc **rpc); + extern const struct aq_fw_ops aq_fw_1x_ops; extern const struct aq_fw_ops aq_fw_2x_ops; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c index e37943760a58b2a88b33de295e73dfbac71fc5bf..6300d94c9ff070ccc47cd33d0a27a758f571dc11 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c @@ -21,6 +21,7 @@ #define HW_ATL_FW2X_MPI_EFUSE_ADDR 0x364 #define HW_ATL_FW2X_MPI_MBOX_ADDR 0x360 +#define HW_ATL_FW2X_MPI_RPC_ADDR 0x334 #define HW_ATL_FW2X_MPI_CONTROL_ADDR 0x368 #define HW_ATL_FW2X_MPI_CONTROL2_ADDR 0x36C @@ -40,6 +41,10 @@ static int aq_fw2x_init(struct aq_hw_s *self) AQ_HW_WAIT_FOR(0U != (self->mbox_addr = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_MBOX_ADDR)), 1000U, 10U); + AQ_HW_WAIT_FOR(0U != (self->rpc_addr = + aq_hw_read_reg(self, HW_ATL_FW2X_MPI_RPC_ADDR)), + 1000U, 100U); + return err; } diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c index 0f65768026072ae7ded390fef283269f180f6e24..a1df2ebab07f021524586ee8cec66da3fbb6aa92 100644 --- a/drivers/net/ethernet/arc/emac_rockchip.c +++ b/drivers/net/ethernet/arc/emac_rockchip.c @@ -265,6 +265,9 @@ static int emac_rockchip_remove(struct platform_device *pdev) if (priv->regulator) regulator_disable(priv->regulator); + if (priv->soc_data->need_div_macclk) + clk_disable_unprepare(priv->macclk); + free_netdev(ndev); return err; } diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h index c46b489ce9b4d7f0fb46baa2497a0e22723b2159..86660a3cae49b6b06ec1f59ad889026d8cfd6b69 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c.h +++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h @@ -521,7 +521,6 @@ struct atl1c_adapter { struct napi_struct napi; struct page *rx_page; unsigned int rx_page_offset; - unsigned int rx_frag_size; struct atl1c_hw hw; struct atl1c_hw_stats hw_stats; struct mii_if_info mii; /* MII interface info */ diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 7087b88550db5fbbbfb909aee8c5a999db5bad57..d8eb7dcf278f41292ab1ce730a8f1af46f0d1ed1 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -480,15 +480,10 @@ static int atl1c_set_mac_addr(struct net_device *netdev, void *p) static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter, struct net_device *dev) { - unsigned int head_size; int mtu = dev->mtu; adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ? roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE; - - head_size = SKB_DATA_ALIGN(adapter->rx_buffer_len + NET_SKB_PAD) + - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - adapter->rx_frag_size = roundup_pow_of_two(head_size); } static netdev_features_t atl1c_fix_features(struct net_device *netdev, @@ -960,10 +955,6 @@ static void atl1c_free_ring_resources(struct atl1c_adapter *adapter) kfree(adapter->tpd_ring[0].buffer_info); adapter->tpd_ring[0].buffer_info = NULL; } - if (adapter->rx_page) { - put_page(adapter->rx_page); - adapter->rx_page = NULL; - } } /** @@ -1666,36 +1657,6 @@ static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter, skb_checksum_none_assert(skb); } -static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter) -{ - struct sk_buff *skb; - struct page *page; - - if (adapter->rx_frag_size > PAGE_SIZE) - return netdev_alloc_skb(adapter->netdev, - adapter->rx_buffer_len); - - page = adapter->rx_page; - if (!page) { - adapter->rx_page = page = alloc_page(GFP_ATOMIC); - if (unlikely(!page)) - return NULL; - adapter->rx_page_offset = 0; - } - - skb = build_skb(page_address(page) + adapter->rx_page_offset, - adapter->rx_frag_size); - if (likely(skb)) { - skb_reserve(skb, NET_SKB_PAD); - adapter->rx_page_offset += adapter->rx_frag_size; - if (adapter->rx_page_offset >= PAGE_SIZE) - adapter->rx_page = NULL; - else - get_page(page); - } - return skb; -} - static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter) { struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring; @@ -1717,13 +1678,24 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter) while (next_info->flags & ATL1C_BUFFER_FREE) { rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use); - skb = atl1c_alloc_skb(adapter); + /* When DMA RX address is set to something like + * 0x....fc0, it will be very likely to cause DMA + * RFD overflow issue. + * + * To work around it, we apply rx skb with 64 bytes + * longer space, and offset the address whenever + * 0x....fc0 is detected. + */ + skb = netdev_alloc_skb(adapter->netdev, adapter->rx_buffer_len + 64); if (unlikely(!skb)) { if (netif_msg_rx_err(adapter)) dev_warn(&pdev->dev, "alloc rx buffer failed\n"); break; } + if (((unsigned long)skb->data & 0xfff) == 0xfc0) + skb_reserve(skb, 64); + /* * Make buffer alignment 2 beyond a 16 byte boundary * this will result in a 16 byte aligned IP header after diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 9dc6da039a6d90ac4137a70e94b2c3213c2a4741..3164aad29bcf879aa0841a912d6bbeaa757caad6 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -473,7 +473,9 @@ static void atl1e_mdio_write(struct net_device *netdev, int phy_id, { struct atl1e_adapter *adapter = netdev_priv(netdev); - atl1e_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val); + if (atl1e_write_phy_reg(&adapter->hw, + reg_num & MDIO_REG_ADDR_MASK, val)) + netdev_err(netdev, "write phy register failed\n"); } static int atl1e_mii_ioctl(struct net_device *netdev, diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index bb41becb66099389216192c761541ad1fd51790d..31ff1e0d1baacc1fba3a95329b9de8159dfbadd6 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c @@ -1335,13 +1335,11 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; struct atl2_adapter *adapter; - static int cards_found; + static int cards_found = 0; unsigned long mmio_start; int mmio_len; int err; - cards_found = 0; - err = pci_enable_device(pdev); if (err) return err; diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 897302adc38ec16869609cccf6ddc8ee9941a3c7..50f8a377596e10bcb682f37fde9bd36b18701900 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -568,12 +568,13 @@ static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id) /* * tx request callback */ -static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct bcm_enet_priv *priv; struct bcm_enet_desc *desc; u32 len_stat; - int ret; + netdev_tx_t ret; priv = netdev_priv(dev); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index c57238fce86377b0095b8bfae082b1e776d06c20..8322af6e225c5c7806cacc008ebafc54474939d5 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -120,9 +120,13 @@ static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv, struct dma_desc *desc, unsigned int port) { + unsigned long desc_flags; + /* Ports are latched, so write upper address first */ + spin_lock_irqsave(&priv->desc_lock, desc_flags); tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port)); tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port)); + spin_unlock_irqrestore(&priv->desc_lock, desc_flags); } /* Ethtool operations */ @@ -134,6 +138,10 @@ static int bcm_sysport_set_rx_csum(struct net_device *dev, priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM); reg = rxchk_readl(priv, RXCHK_CONTROL); + /* Clear L2 header checks, which would prevent BPDUs + * from being received. + */ + reg &= ~RXCHK_L2_HDR_DIS; if (priv->rx_chk_en) reg |= RXCHK_EN; else @@ -519,7 +527,6 @@ static void bcm_sysport_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct bcm_sysport_priv *priv = netdev_priv(dev); - u32 reg; wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; wol->wolopts = priv->wolopts; @@ -527,11 +534,7 @@ static void bcm_sysport_get_wol(struct net_device *dev, if (!(priv->wolopts & WAKE_MAGICSECURE)) return; - /* Return the programmed SecureOn password */ - reg = umac_readl(priv, UMAC_PSW_MS); - put_unaligned_be16(reg, &wol->sopass[0]); - reg = umac_readl(priv, UMAC_PSW_LS); - put_unaligned_be32(reg, &wol->sopass[2]); + memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass)); } static int bcm_sysport_set_wol(struct net_device *dev, @@ -547,13 +550,8 @@ static int bcm_sysport_set_wol(struct net_device *dev, if (wol->wolopts & ~supported) return -EINVAL; - /* Program the SecureOn password */ - if (wol->wolopts & WAKE_MAGICSECURE) { - umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), - UMAC_PSW_MS); - umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), - UMAC_PSW_LS); - } + if (wol->wolopts & WAKE_MAGICSECURE) + memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass)); /* Flag the device and relevant IRQ as wakeup capable */ if (wol->wolopts) { @@ -1897,9 +1895,6 @@ static void bcm_sysport_netif_start(struct net_device *dev) intrl2_1_mask_clear(priv, 0xffffffff); else intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK); - - /* Last call before we start the real business */ - netif_tx_start_all_queues(dev); } static void rbuf_init(struct bcm_sysport_priv *priv) @@ -2011,6 +2006,7 @@ static int bcm_sysport_open(struct net_device *dev) } /* Initialize both hardware and software ring */ + spin_lock_init(&priv->desc_lock); for (i = 0; i < dev->num_tx_queues; i++) { ret = bcm_sysport_init_tx_ring(priv, i); if (ret) { @@ -2045,6 +2041,8 @@ static int bcm_sysport_open(struct net_device *dev) bcm_sysport_netif_start(dev); + netif_tx_start_all_queues(dev); + return 0; out_clear_rx_int: @@ -2068,7 +2066,7 @@ static void bcm_sysport_netif_stop(struct net_device *dev) struct bcm_sysport_priv *priv = netdev_priv(dev); /* stop all software from updating hardware */ - netif_tx_stop_all_queues(dev); + netif_tx_disable(dev); napi_disable(&priv->napi); cancel_work_sync(&priv->dim.dim.work); phy_stop(dev->phydev); @@ -2589,13 +2587,18 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv) unsigned int index, i = 0; u32 reg; - /* Password has already been programmed */ reg = umac_readl(priv, UMAC_MPD_CTRL); if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) reg |= MPD_EN; reg &= ~PSW_EN; - if (priv->wolopts & WAKE_MAGICSECURE) + if (priv->wolopts & WAKE_MAGICSECURE) { + /* Program the SecureOn password */ + umac_writel(priv, get_unaligned_be16(&priv->sopass[0]), + UMAC_PSW_MS); + umac_writel(priv, get_unaligned_be32(&priv->sopass[2]), + UMAC_PSW_LS); reg |= PSW_EN; + } umac_writel(priv, reg, UMAC_MPD_CTRL); if (priv->wolopts & WAKE_FILTER) { @@ -2654,12 +2657,12 @@ static int __maybe_unused bcm_sysport_suspend(struct device *d) if (!netif_running(dev)) return 0; + netif_device_detach(dev); + bcm_sysport_netif_stop(dev); phy_suspend(dev->phydev); - netif_device_detach(dev); - /* Disable UniMAC RX */ umac_enable_set(priv, CMD_RX_EN, 0); @@ -2743,8 +2746,6 @@ static int __maybe_unused bcm_sysport_resume(struct device *d) goto out_free_rx_ring; } - netif_device_attach(dev); - /* RX pipe enable */ topctrl_writel(priv, 0, RX_FLUSH_CNTL); @@ -2789,6 +2790,8 @@ static int __maybe_unused bcm_sysport_resume(struct device *d) bcm_sysport_netif_start(dev); + netif_device_attach(dev); + return 0; out_free_rx_ring: diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index 046c6c1d97fd705608ab14cbac129c529b5548a7..f438b818136ae8301438a4da406948e6c99f4c64 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -12,6 +12,7 @@ #define __BCM_SYSPORT_H #include +#include #include #include @@ -750,6 +751,7 @@ struct bcm_sysport_priv { int wol_irq; /* Transmit rings */ + spinlock_t desc_lock; struct bcm_sysport_tx_ring *tx_rings; /* Receive queue */ @@ -776,6 +778,7 @@ struct bcm_sysport_priv { unsigned int crc_fwd:1; u16 rev; u32 wolopts; + u8 sopass[SOPASS_MAX]; unsigned int wol_irq_disabled:1; /* MIB related fields */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index be1506169076f0a89f6a621d01dce81afe720ba7..98a837819b659e79b404054ec73919070bed8efc 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1256,7 +1256,7 @@ enum { struct bnx2x_fw_stats_req { struct stats_query_header hdr; - struct stats_query_entry query[FP_SB_MAX_E1x+ + struct stats_query_entry query[FP_SB_MAX_E2 + BNX2X_FIRST_QUEUE_QUERY_IDX]; }; @@ -1282,6 +1282,7 @@ enum sp_rtnl_flag { BNX2X_SP_RTNL_TX_STOP, BNX2X_SP_RTNL_GET_DRV_VERSION, BNX2X_SP_RTNL_CHANGE_UDP_PORT, + BNX2X_SP_RTNL_UPDATE_SVID, }; enum bnx2x_iov_flag { @@ -2191,6 +2192,13 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, #define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ E1HVN_MAX) +/* Following is the DMAE channel number allocation for the clients. + * MFW: OCBB/OCSD implementations use DMAE channels 14/15 respectively. + * Driver: 0-3 and 8-11 (for PF dmae operations) + * 4 and 12 (for stats requests) + */ +#define BNX2X_FW_DMAE_C 13 /* Channel for FW DMAE operations */ + /* PCIE link and speed */ #define PCICFG_LINK_WIDTH 0x1f00000 #define PCICFG_LINK_WIDTH_SHIFT 20 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 5a727d4729da7348075b75101154cca3cf515073..a123f1733a596307be846823eb8cef8f7b627502 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -286,6 +286,9 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) hw_cons = le16_to_cpu(*txdata->tx_cons_sb); sw_cons = txdata->tx_pkt_cons; + /* Ensure subsequent loads occur after hw_cons */ + smp_rmb(); + while (sw_cons != hw_cons) { u16 pkt_cons; @@ -785,6 +788,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n", pad, len, fp->rx_buf_size); bnx2x_panic(); + bnx2x_frag_free(fp, new_data); return; } #endif @@ -1933,8 +1937,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, } /* select a non-FCoE queue */ - return fallback(dev, skb, NULL) % - (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos); + return fallback(dev, skb, NULL) % (BNX2X_NUM_ETH_QUEUES(bp)); } void bnx2x_set_num_queues(struct bnx2x *bp) @@ -3056,12 +3059,13 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) /* if VF indicate to PF this function is going down (PF will delete sp * elements and clear initializations */ - if (IS_VF(bp)) + if (IS_VF(bp)) { + bnx2x_clear_vlan_info(bp); bnx2x_vfpf_close_vf(bp); - else if (unload_mode != UNLOAD_RECOVERY) + } else if (unload_mode != UNLOAD_RECOVERY) { /* if this is a normal/close unload need to clean up chip*/ bnx2x_chip_cleanup(bp, unload_mode, keep_link); - else { + } else { /* Send the UNLOAD_REQUEST to the MCP */ bnx2x_send_unload_req(bp, unload_mode); @@ -3858,9 +3862,12 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { if (!(bp->flags & TX_TIMESTAMPING_EN)) { + bp->eth_stats.ptp_skip_tx_ts++; BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n"); } else if (bp->ptp_tx_skb) { - BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n"); + bp->eth_stats.ptp_skip_tx_ts++; + netdev_err_once(bp->dev, + "Device supports only a single outstanding packet to timestamp, this packet won't be timestamped\n"); } else { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; /* schedule check for Tx timestamp */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 0e508e5defce315f2e5254ca238afe26b523054a..844195849ae76e41d9fa63c00ddb946481656ed1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -425,6 +425,8 @@ void bnx2x_set_reset_global(struct bnx2x *bp); void bnx2x_disable_close_the_gate(struct bnx2x *bp); int bnx2x_init_hw_func_cnic(struct bnx2x *bp); +void bnx2x_clear_vlan_info(struct bnx2x *bp); + /** * bnx2x_sp_event - handle ramrods completion. * @@ -1006,9 +1008,6 @@ static inline void bnx2x_set_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid, static inline void bnx2x_free_rx_mem_pool(struct bnx2x *bp, struct bnx2x_alloc_pool *pool) { - if (!pool->page) - return; - put_page(pool->page); pool->page = NULL; @@ -1019,6 +1018,9 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, { int i; + if (!fp->page_pool.page) + return; + if (fp->mode == TPA_MODE_DISABLED) return; @@ -1111,7 +1113,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp) for (i = 0; i < E1H_FUNC_MAX / 2; i++) { u32 func_config = MF_CFG_RD(bp, - func_mf_config[BP_PORT(bp) + 2 * i]. + func_mf_config[BP_PATH(bp) + 2 * i]. config); func_num += ((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index a4a90b6cdb467038457fca98e8ab9f25dd72cde8..00f9ed93360c6707860c5aa3538d518f45224f59 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -182,7 +182,9 @@ static const struct { { STATS_OFFSET32(driver_filtered_tx_pkt), 4, false, "driver_filtered_tx_pkt" }, { STATS_OFFSET32(eee_tx_lpi), - 4, true, "Tx LPI entry count"} + 4, true, "Tx LPI entry count"}, + { STATS_OFFSET32(ptp_skip_tx_ts), + 4, false, "ptp_skipped_tx_tstamp" }, }; #define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr) @@ -1581,7 +1583,8 @@ static int bnx2x_get_module_info(struct net_device *dev, } if (!sff8472_comp || - (diag_type & SFP_EEPROM_DIAG_ADDR_CHANGE_REQ)) { + (diag_type & SFP_EEPROM_DIAG_ADDR_CHANGE_REQ) || + !(diag_type & SFP_EEPROM_DDM_IMPLEMENTED)) { modinfo->type = ETH_MODULE_SFF_8079; modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; } else { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h index b7d251108c19f56345304d88097619a58b08b8e4..7115f502566458480bf4f73a01cc83e33545c486 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h @@ -62,6 +62,7 @@ #define SFP_EEPROM_DIAG_TYPE_ADDR 0x5c #define SFP_EEPROM_DIAG_TYPE_SIZE 1 #define SFP_EEPROM_DIAG_ADDR_CHANGE_REQ (1<<2) +#define SFP_EEPROM_DDM_IMPLEMENTED (1<<6) #define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e #define SFP_EEPROM_SFF_8472_COMP_SIZE 1 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index fcc2328bb0d953e797c49699b11e5e5956a0d583..df4f77ad95c4a9dbefae7daad22870b7a6c1d621 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -2925,6 +2925,10 @@ static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp) func_params.f_obj = &bp->func_obj; func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE; + /* Prepare parameters for function state transitions */ + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); + if (IS_MF_UFP(bp) || IS_MF_BD(bp)) { int func = BP_ABS_FUNC(bp); u32 val; @@ -3536,6 +3540,16 @@ static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp) */ static void bnx2x_config_mf_bw(struct bnx2x *bp) { + /* Workaround for MFW bug. + * MFW is not supposed to generate BW attention in + * single function mode. + */ + if (!IS_MF(bp)) { + DP(BNX2X_MSG_MCP, + "Ignoring MF BW config in single function mode\n"); + return; + } + if (bp->link_vars.link_up) { bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX); bnx2x_link_sync_notify(bp); @@ -4301,7 +4315,8 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) bnx2x_handle_eee_event(bp); if (val & DRV_STATUS_OEM_UPDATE_SVID) - bnx2x_handle_update_svid_cmd(bp); + bnx2x_schedule_sp_rtnl(bp, + BNX2X_SP_RTNL_UPDATE_SVID, 0); if (bp->link_vars.periodic_flags & PERIODIC_FLAGS_LINK_EVENT) { @@ -8462,6 +8477,7 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan, /* Fill a user request section if needed */ if (!test_bit(RAMROD_CONT, ramrod_flags)) { ramrod_param.user_req.u.vlan.vlan = vlan; + __set_bit(BNX2X_VLAN, &ramrod_param.user_req.vlan_mac_flags); /* Set the command: ADD or DEL */ if (set) ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; @@ -8482,6 +8498,34 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan, return rc; } +void bnx2x_clear_vlan_info(struct bnx2x *bp) +{ + struct bnx2x_vlan_entry *vlan; + + /* Mark that hw forgot all entries */ + list_for_each_entry(vlan, &bp->vlan_reg, link) + vlan->hw = false; + + bp->vlan_cnt = 0; +} + +static int bnx2x_del_all_vlans(struct bnx2x *bp) +{ + struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj; + unsigned long ramrod_flags = 0, vlan_flags = 0; + int rc; + + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + __set_bit(BNX2X_VLAN, &vlan_flags); + rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_flags, &ramrod_flags); + if (rc) + return rc; + + bnx2x_clear_vlan_info(bp); + + return 0; +} + int bnx2x_del_all_macs(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *mac_obj, int mac_type, bool wait_for_comp) @@ -9320,6 +9364,17 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link) BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n", rc); + /* The whole *vlan_obj structure may be not initialized if VLAN + * filtering offload is not supported by hardware. Currently this is + * true for all hardware covered by CHIP_IS_E1x(). + */ + if (!CHIP_IS_E1x(bp)) { + /* Remove all currently configured VLANs */ + rc = bnx2x_del_all_vlans(bp); + if (rc < 0) + BNX2X_ERR("Failed to delete all VLANs\n"); + } + /* Disable LLH */ if (!CHIP_IS_E1(bp)) REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); @@ -9940,10 +9995,18 @@ static void bnx2x_recovery_failed(struct bnx2x *bp) */ static void bnx2x_parity_recover(struct bnx2x *bp) { - bool global = false; u32 error_recovered, error_unrecovered; - bool is_parity; + bool is_parity, global = false; +#ifdef CONFIG_BNX2X_SRIOV + int vf_idx; + + for (vf_idx = 0; vf_idx < bp->requested_nr_virtfn; vf_idx++) { + struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); + if (vf) + vf->state = VF_LOST; + } +#endif DP(NETIF_MSG_HW, "Handling parity\n"); while (1) { switch (bp->recovery_state) { @@ -10349,6 +10412,9 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work) &bp->sp_rtnl_state)) bnx2x_update_mng_version(bp); + if (test_and_clear_bit(BNX2X_SP_RTNL_UPDATE_SVID, &bp->sp_rtnl_state)) + bnx2x_handle_update_svid_cmd(bp); + if (test_and_clear_bit(BNX2X_SP_RTNL_CHANGE_UDP_PORT, &bp->sp_rtnl_state)) { if (bnx2x_udp_port_update(bp)) { @@ -11740,8 +11806,10 @@ static void bnx2x_get_fcoe_info(struct bnx2x *bp) * If maximum allowed number of connections is zero - * disable the feature. */ - if (!bp->cnic_eth_dev.max_fcoe_conn) + if (!bp->cnic_eth_dev.max_fcoe_conn) { bp->flags |= NO_FCOE_FLAG; + eth_zero_addr(bp->fip_mac); + } } static void bnx2x_get_cnic_info(struct bnx2x *bp) @@ -13014,13 +13082,6 @@ static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode) int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp) { - struct bnx2x_vlan_entry *vlan; - - /* The hw forgot all entries after reload */ - list_for_each_entry(vlan, &bp->vlan_reg, link) - vlan->hw = false; - bp->vlan_cnt = 0; - /* Don't set rx mode here. Our caller will do it. */ bnx2x_vlan_configure(bp, false); @@ -15208,11 +15269,24 @@ static void bnx2x_ptp_task(struct work_struct *work) u32 val_seq; u64 timestamp, ns; struct skb_shared_hwtstamps shhwtstamps; + bool bail = true; + int i; + + /* FW may take a while to complete timestamping; try a bit and if it's + * still not complete, may indicate an error state - bail out then. + */ + for (i = 0; i < 10; i++) { + /* Read Tx timestamp registers */ + val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID : + NIG_REG_P0_TLLH_PTP_BUF_SEQID); + if (val_seq & 0x10000) { + bail = false; + break; + } + msleep(1 << i); + } - /* Read Tx timestamp registers */ - val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID : - NIG_REG_P0_TLLH_PTP_BUF_SEQID); - if (val_seq & 0x10000) { + if (!bail) { /* There is a valid timestamp value */ timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB : NIG_REG_P0_TLLH_PTP_BUF_TS_MSB); @@ -15227,16 +15301,18 @@ static void bnx2x_ptp_task(struct work_struct *work) memset(&shhwtstamps, 0, sizeof(shhwtstamps)); shhwtstamps.hwtstamp = ns_to_ktime(ns); skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps); - dev_kfree_skb_any(bp->ptp_tx_skb); - bp->ptp_tx_skb = NULL; DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n", timestamp, ns); } else { - DP(BNX2X_MSG_PTP, "There is no valid Tx timestamp yet\n"); - /* Reschedule to keep checking for a valid timestamp value */ - schedule_work(&bp->ptp_task); + DP(BNX2X_MSG_PTP, + "Tx timestamp is not recorded (register read=%u)\n", + val_seq); + bp->eth_stats.ptp_skip_tx_ts++; } + + dev_kfree_skb_any(bp->ptp_tx_skb); + bp->ptp_tx_skb = NULL; } void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 3f4d2c8da21a3a848b4149758883333522b6f77a..a9eaaf3e73a4c41f6dc6808f723a400750fd1ba1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -6149,6 +6149,7 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp, rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag); rdata->path_id = BP_PATH(bp); rdata->network_cos_mode = start_params->network_cos_mode; + rdata->dmae_cmd_id = BNX2X_FW_DMAE_C; rdata->vxlan_dst_port = cpu_to_le16(start_params->vxlan_dst_port); rdata->geneve_dst_port = cpu_to_le16(start_params->geneve_dst_port); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 0bf2fd470819e64d2d7788b57caee6569b9b3828..7a6e82db423123585574c88765fb00f14ab3b603 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -265,6 +265,7 @@ enum { BNX2X_ETH_MAC, BNX2X_ISCSI_ETH_MAC, BNX2X_NETQ_ETH_MAC, + BNX2X_VLAN, BNX2X_DONT_CONSUME_CAM_CREDIT, BNX2X_DONT_CONSUME_CAM_CREDIT_DEST, }; @@ -272,7 +273,8 @@ enum { #define BNX2X_VLAN_MAC_CMP_MASK (1 << BNX2X_UC_LIST_MAC | \ 1 << BNX2X_ETH_MAC | \ 1 << BNX2X_ISCSI_ETH_MAC | \ - 1 << BNX2X_NETQ_ETH_MAC) + 1 << BNX2X_NETQ_ETH_MAC | \ + 1 << BNX2X_VLAN) #define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \ ((flags) & BNX2X_VLAN_MAC_CMP_MASK) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 62da465377340249af3e7e0671f7802272ba410a..ab60f4f9cc246f08cbd7b8700e50dc2393114048 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -2394,15 +2394,21 @@ static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable) /* send the ramrod on all the queues of the PF */ for_each_eth_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; + int tx_idx; /* Set the appropriate Queue object */ q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; - /* Update the Queue state */ - rc = bnx2x_queue_state_change(bp, &q_params); - if (rc) { - BNX2X_ERR("Failed to configure Tx switching\n"); - return rc; + for (tx_idx = FIRST_TX_COS_INDEX; + tx_idx < fp->max_cos; tx_idx++) { + q_params.params.update.cid_index = tx_idx; + + /* Update the Queue state */ + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) { + BNX2X_ERR("Failed to configure Tx switching\n"); + return rc; + } } } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index eb814c65152f102cbbabaceac55bcc119ee57e85..4dc34de1a09a8888526a2f7c5c0346d6d22099c3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -139,6 +139,7 @@ struct bnx2x_virtf { #define VF_ACQUIRED 1 /* VF acquired, but not initialized */ #define VF_ENABLED 2 /* VF Enabled */ #define VF_RESET 3 /* VF FLR'd, pending cleanup */ +#define VF_LOST 4 /* Recovery while VFs are loaded */ bool flr_clnup_stage; /* true during flr cleanup */ bool malicious; /* true if FW indicated so, until FLR */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h index b2644ed13d064eacc3b34cf59d48b156bedac16b..d55e63692cf3bf5ab4c11b8ddc9d800c520d5e78 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h @@ -207,6 +207,9 @@ struct bnx2x_eth_stats { u32 driver_filtered_tx_pkt; /* src: Clear-on-Read register; Will not survive PMF Migration */ u32 eee_tx_lpi; + + /* PTP */ + u32 ptp_skip_tx_ts; }; struct bnx2x_eth_q_stats { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 8e0a317b31f7d31915a62f1a7cf993e0150227a2..152758a45150c712fefe427f83f88d663bf6ef5a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -2114,6 +2114,18 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, { int i; + if (vf->state == VF_LOST) { + /* Just ack the FW and return if VFs are lost + * in case of parity error. VFs are supposed to be timedout + * on waiting for PF response. + */ + DP(BNX2X_MSG_IOV, + "VF 0x%x lost, not handling the request\n", vf->abs_vfid); + + storm_memset_vf_mbx_ack(bp, vf->abs_vfid); + return; + } + /* check if tlv type is known */ if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) { /* Lock the per vf op mutex and note the locker's identity. diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index e2d92548226ad01eda69694876a85478e539c930..25104ad9354045ec3f0ec356ee4b048be13ec1de 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -463,6 +463,12 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) } length >>= 9; + if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) { + dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n", + skb->len); + i = 0; + goto tx_dma_error; + } flags |= bnxt_lhint_arr[length]; txbd->tx_bd_len_flags_type = cpu_to_le32(flags); @@ -1086,6 +1092,8 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, tpa_info = &rxr->rx_tpa[agg_id]; if (unlikely(cons != rxr->rx_next_cons)) { + netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n", + cons, rxr->rx_next_cons); bnxt_sched_reset(bp, rxr); return; } @@ -1538,15 +1546,19 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, } cons = rxcmp->rx_cmp_opaque; - rx_buf = &rxr->rx_buf_ring[cons]; - data = rx_buf->data; - data_ptr = rx_buf->data_ptr; if (unlikely(cons != rxr->rx_next_cons)) { - int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp); + int rc1 = bnxt_discard_rx(bp, bnapi, &tmp_raw_cons, rxcmp); + netdev_warn(bp->dev, "RX cons %x != expected cons %x\n", + cons, rxr->rx_next_cons); bnxt_sched_reset(bp, rxr); - return rc1; + if (rc1) + return rc1; + goto next_rx_no_prod_no_len; } + rx_buf = &rxr->rx_buf_ring[cons]; + data = rx_buf->data; + data_ptr = rx_buf->data_ptr; prefetch(data_ptr); misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); @@ -1563,12 +1575,18 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, rx_buf->data = NULL; if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { + u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2); + bnxt_reuse_rx_data(rxr, cons, data); if (agg_bufs) bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs); rc = -EIO; - goto next_rx; + if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { + netdev_warn(bp->dev, "RX buffer error %x\n", rx_err); + bnxt_sched_reset(bp, rxr); + } + goto next_rx_no_len; } len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; @@ -1583,6 +1601,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); bnxt_reuse_rx_data(rxr, cons, data); if (!skb) { + if (agg_bufs) + bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs); rc = -ENOMEM; goto next_rx; } @@ -1649,12 +1669,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, rc = 1; next_rx: - rxr->rx_prod = NEXT_RX(prod); - rxr->rx_next_cons = NEXT_RX(cons); - cpr->rx_packets += 1; cpr->rx_bytes += len; +next_rx_no_len: + rxr->rx_prod = NEXT_RX(prod); + rxr->rx_next_cons = NEXT_RX(cons); + next_rx_no_prod_no_len: *raw_cons = tmp_raw_cons; @@ -3536,7 +3557,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, if (len) break; /* on first few passes, just barely sleep */ - if (i < DFLT_HWRM_CMD_TIMEOUT) + if (i < HWRM_SHORT_TIMEOUT_COUNTER) usleep_range(HWRM_SHORT_MIN_TIMEOUT, HWRM_SHORT_MAX_TIMEOUT); else @@ -3559,7 +3580,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, dma_rmb(); if (*valid) break; - udelay(1); + usleep_range(1, 5); } if (j >= HWRM_VALID_BIT_DELAY_USEC) { @@ -6073,23 +6094,26 @@ static void bnxt_clear_int_mode(struct bnxt *bp) int bnxt_reserve_rings(struct bnxt *bp) { int tcs = netdev_get_num_tc(bp->dev); + bool reinit_irq = false; int rc; if (!bnxt_need_reserve_rings(bp)) return 0; - rc = __bnxt_reserve_rings(bp); - if (rc) { - netdev_err(bp->dev, "ring reservation failure rc: %d\n", rc); - return rc; - } if (BNXT_NEW_RM(bp) && (bnxt_get_num_msix(bp) != bp->total_irqs)) { bnxt_ulp_irq_stop(bp); bnxt_clear_int_mode(bp); - rc = bnxt_init_int_mode(bp); + reinit_irq = true; + } + rc = __bnxt_reserve_rings(bp); + if (reinit_irq) { + if (!rc) + rc = bnxt_init_int_mode(bp); bnxt_ulp_irq_restart(bp, rc); - if (rc) - return rc; + } + if (rc) { + netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc); + return rc; } if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) { netdev_err(bp->dev, "tx ring reservation failure\n"); @@ -7126,6 +7150,9 @@ static bool bnxt_drv_busy(struct bnxt *bp) test_bit(BNXT_STATE_READ_STATS, &bp->state)); } +static void bnxt_get_ring_stats(struct bnxt *bp, + struct rtnl_link_stats64 *stats); + static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) { @@ -7151,6 +7178,9 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, del_timer_sync(&bp->timer); bnxt_free_skbs(bp); + /* Save ring stats before shutdown */ + if (bp->bnapi) + bnxt_get_ring_stats(bp, &bp->net_stats_prev); if (irq_re_init) { bnxt_free_irq(bp); bnxt_del_napi(bp); @@ -7212,23 +7242,12 @@ static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return -EOPNOTSUPP; } -static void -bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +static void bnxt_get_ring_stats(struct bnxt *bp, + struct rtnl_link_stats64 *stats) { - u32 i; - struct bnxt *bp = netdev_priv(dev); + int i; - set_bit(BNXT_STATE_READ_STATS, &bp->state); - /* Make sure bnxt_close_nic() sees that we are reading stats before - * we check the BNXT_STATE_OPEN flag. - */ - smp_mb__after_atomic(); - if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { - clear_bit(BNXT_STATE_READ_STATS, &bp->state); - return; - } - /* TODO check if we need to synchronize with bnxt_close path */ for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; @@ -7257,6 +7276,40 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts); } +} + +static void bnxt_add_prev_stats(struct bnxt *bp, + struct rtnl_link_stats64 *stats) +{ + struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev; + + stats->rx_packets += prev_stats->rx_packets; + stats->tx_packets += prev_stats->tx_packets; + stats->rx_bytes += prev_stats->rx_bytes; + stats->tx_bytes += prev_stats->tx_bytes; + stats->rx_missed_errors += prev_stats->rx_missed_errors; + stats->multicast += prev_stats->multicast; + stats->tx_dropped += prev_stats->tx_dropped; +} + +static void +bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct bnxt *bp = netdev_priv(dev); + + set_bit(BNXT_STATE_READ_STATS, &bp->state); + /* Make sure bnxt_close_nic() sees that we are reading stats before + * we check the BNXT_STATE_OPEN flag. + */ + smp_mb__after_atomic(); + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { + clear_bit(BNXT_STATE_READ_STATS, &bp->state); + *stats = bp->net_stats_prev; + return; + } + + bnxt_get_ring_stats(bp, stats); + bnxt_add_prev_stats(bp, stats); if (bp->flags & BNXT_FLAG_PORT_STATS) { struct rx_port_stats *rx = bp->hw_rx_port_stats; @@ -7422,8 +7475,15 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp) skip_uc: rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); + if (rc && vnic->mc_list_count) { + netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n", + rc); + vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; + vnic->mc_list_count = 0; + rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); + } if (rc) - netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", + netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n", rc); return rc; @@ -9058,6 +9118,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bnxt_clear_int_mode(bp); init_err_pci_clean: + bnxt_free_hwrm_short_cmd_req(bp); bnxt_free_hwrm_resources(bp); bnxt_cleanup_pci(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index bde384630a75f9aaa723a6976ba1b891465cd9e2..f9e253b705ece65d61ff4b37f8207c39f99d2cd4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -548,7 +548,7 @@ struct rx_tpa_end_cmp_ext { (HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + \ ((n) - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT)) -#define HWRM_VALID_BIT_DELAY_USEC 20 +#define HWRM_VALID_BIT_DELAY_USEC 150 #define BNXT_RX_EVENT 1 #define BNXT_AGG_EVENT 2 @@ -1302,6 +1302,7 @@ struct bnxt { void *hwrm_cmd_resp_addr; dma_addr_t hwrm_cmd_resp_dma_addr; + struct rtnl_link_stats64 net_stats_prev; struct rx_port_stats *hw_rx_port_stats; struct tx_port_stats *hw_tx_port_stats; struct rx_port_stats_ext *hw_rx_port_stats_ext; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 790c684f08abcab21980cd9a8479f27cf516d464..b178c2e9dc2319b7c5ae77b61d7c2e4746f0e64c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -78,8 +78,12 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, memcpy(buf, data_addr, bytesize); dma_free_coherent(&bp->pdev->dev, bytesize, data_addr, data_dma_addr); - if (rc) + if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) { + netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n"); + return -EACCES; + } else if (rc) { return -EIO; + } return 0; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index e52d7af3ab3e1efdd227b7026a2d647da0076eee..0a409ba4012a3041726c959437b2676a702d68e3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1444,14 +1444,22 @@ static int bnxt_flash_nvram(struct net_device *dev, rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT); dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle); + if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) { + netdev_info(dev, + "PF does not have admin privileges to flash the device\n"); + rc = -EACCES; + } else if (rc) { + rc = -EIO; + } return rc; } static int bnxt_firmware_reset(struct net_device *dev, u16 dir_type) { - struct bnxt *bp = netdev_priv(dev); struct hwrm_fw_reset_input req = {0}; + struct bnxt *bp = netdev_priv(dev); + int rc; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1); @@ -1491,7 +1499,15 @@ static int bnxt_firmware_reset(struct net_device *dev, return -EINVAL; } - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) { + netdev_info(dev, + "PF does not have admin privileges to reset the device\n"); + rc = -EACCES; + } else if (rc) { + rc = -EIO; + } + return rc; } static int bnxt_flash_firmware(struct net_device *dev, @@ -1698,9 +1714,9 @@ static int bnxt_flash_package_from_file(struct net_device *dev, struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_nvm_install_update_input install = {0}; const struct firmware *fw; + int rc, hwrm_err = 0; u32 item_len; u16 index; - int rc; bnxt_hwrm_fw_set_time(bp); @@ -1743,15 +1759,16 @@ static int bnxt_flash_package_from_file(struct net_device *dev, memcpy(kmem, fw->data, fw->size); modify.host_src_addr = cpu_to_le64(dma_handle); - rc = hwrm_send_message(bp, &modify, sizeof(modify), - FLASH_PACKAGE_TIMEOUT); + hwrm_err = hwrm_send_message(bp, &modify, + sizeof(modify), + FLASH_PACKAGE_TIMEOUT); dma_free_coherent(&bp->pdev->dev, fw->size, kmem, dma_handle); } } release_firmware(fw); - if (rc) - return rc; + if (rc || hwrm_err) + goto err_exit; if ((install_type & 0xffff) == 0) install_type >>= 16; @@ -1759,12 +1776,10 @@ static int bnxt_flash_package_from_file(struct net_device *dev, install.install_type = cpu_to_le32(install_type); mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &install, sizeof(install), - INSTALL_PACKAGE_TIMEOUT); - if (rc) { - rc = -EOPNOTSUPP; + hwrm_err = _hwrm_send_message(bp, &install, sizeof(install), + INSTALL_PACKAGE_TIMEOUT); + if (hwrm_err) goto flash_pkg_exit; - } if (resp->error_code) { u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err; @@ -1772,12 +1787,11 @@ static int bnxt_flash_package_from_file(struct net_device *dev, if (error_code == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) { install.flags |= cpu_to_le16( NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); - rc = _hwrm_send_message(bp, &install, sizeof(install), - INSTALL_PACKAGE_TIMEOUT); - if (rc) { - rc = -EOPNOTSUPP; + hwrm_err = _hwrm_send_message(bp, &install, + sizeof(install), + INSTALL_PACKAGE_TIMEOUT); + if (hwrm_err) goto flash_pkg_exit; - } } } @@ -1788,6 +1802,14 @@ static int bnxt_flash_package_from_file(struct net_device *dev, } flash_pkg_exit: mutex_unlock(&bp->hwrm_cmd_lock); +err_exit: + if (hwrm_err == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) { + netdev_info(dev, + "PF does not have admin privileges to flash the device\n"); + rc = -EACCES; + } else if (hwrm_err) { + rc = -EOPNOTSUPP; + } return rc; } @@ -2368,17 +2390,37 @@ static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable) return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } +static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds) +{ + struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_port_phy_qcaps_input req = {0}; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) + *force_speeds = le16_to_cpu(resp->supported_speeds_force_mode); + + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + static int bnxt_disable_an_for_lpbk(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) { struct bnxt_link_info *link_info = &bp->link_info; - u16 fw_advertising = link_info->advertising; + u16 fw_advertising; u16 fw_speed; int rc; if (!link_info->autoneg) return 0; + rc = bnxt_query_force_speeds(bp, &fw_advertising); + if (rc) + return rc; + fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB; if (netif_carrier_ok(bp->dev)) fw_speed = bp->link_info.link_speed; @@ -2736,8 +2778,15 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len, } } - if (info->dest_buf) - memcpy(info->dest_buf + off, dma_buf, len); + if (info->dest_buf) { + if ((info->seg_start + off + len) <= + BNXT_COREDUMP_BUF_LEN(info->buf_len)) { + memcpy(info->dest_buf + off, dma_buf, len); + } else { + rc = -ENOBUFS; + break; + } + } if (cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE)) @@ -2791,7 +2840,7 @@ static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id, static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id, u16 segment_id, u32 *seg_len, - void *buf, u32 offset) + void *buf, u32 buf_len, u32 offset) { struct hwrm_dbg_coredump_retrieve_input req = {0}; struct bnxt_hwrm_dbg_dma_info info = {NULL}; @@ -2806,8 +2855,11 @@ static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id, seq_no); info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output, data_len); - if (buf) + if (buf) { info.dest_buf = buf + offset; + info.buf_len = buf_len; + info.seg_start = offset; + } rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info); if (!rc) @@ -2862,8 +2914,8 @@ bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record, record->asic_state = 0; strlcpy(record->system_name, utsname()->nodename, sizeof(record->system_name)); - record->year = cpu_to_le16(tm.tm_year); - record->month = cpu_to_le16(tm.tm_mon); + record->year = cpu_to_le16(tm.tm_year + 1900); + record->month = cpu_to_le16(tm.tm_mon + 1); record->day = cpu_to_le16(tm.tm_mday); record->hour = cpu_to_le16(tm.tm_hour); record->minute = cpu_to_le16(tm.tm_min); @@ -2897,14 +2949,17 @@ bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record, static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len) { u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output); + u32 offset = 0, seg_hdr_len, seg_record_len, buf_len = 0; struct coredump_segment_record *seg_record = NULL; - u32 offset = 0, seg_hdr_len, seg_record_len; struct bnxt_coredump_segment_hdr seg_hdr; struct bnxt_coredump coredump = {NULL}; time64_t start_time; u16 start_utc; int rc = 0, i; + if (buf) + buf_len = *dump_len; + start_time = ktime_get_real_seconds(); start_utc = sys_tz.tz_minuteswest * 60; seg_hdr_len = sizeof(seg_hdr); @@ -2937,6 +2992,12 @@ static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len) u32 duration = 0, seg_len = 0; unsigned long start, end; + if (buf && ((offset + seg_hdr_len) > + BNXT_COREDUMP_BUF_LEN(buf_len))) { + rc = -ENOBUFS; + goto err; + } + start = jiffies; rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id); @@ -2949,9 +3010,11 @@ static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len) /* Write segment data into the buffer */ rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id, - &seg_len, buf, + &seg_len, buf, buf_len, offset + seg_hdr_len); - if (rc) + if (rc && rc == -ENOBUFS) + goto err; + else if (rc) netdev_err(bp->dev, "Failed to retrieve coredump for seg = %d\n", seg_record->segment_id); @@ -2981,7 +3044,8 @@ static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len) rc); kfree(coredump.data); *dump_len += sizeof(struct bnxt_coredump_record); - + if (rc == -ENOBUFS) + netdev_err(bp->dev, "Firmware returned large coredump buffer"); return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h index b5b65b3f85340a932ef7ca25985b68ec07545903..3998f6e809a94cdd6233fe6496ab26a03f5a2a5a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h @@ -31,6 +31,8 @@ struct bnxt_coredump { u16 total_segs; }; +#define BNXT_COREDUMP_BUF_LEN(len) ((len) - sizeof(struct bnxt_coredump_record)) + struct bnxt_hwrm_dbg_dma_info { void *dest_buf; int dest_buf_size; @@ -38,6 +40,8 @@ struct bnxt_hwrm_dbg_dma_info { u16 seq_off; u16 data_len_off; u16 segs; + u32 seg_start; + u32 buf_len; }; struct hwrm_dbg_cmn_input { diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 20c1681bb1afeea35e23f20242abc0fe34fd1304..b7d75011cede58a13bf8e7188c7f8cd6b43e8735 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1169,7 +1169,7 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv, break; } - return 0; + return ret; } static void bcmgenet_power_up(struct bcmgenet_priv *priv, @@ -1998,8 +1998,6 @@ static void reset_umac(struct bcmgenet_priv *priv) /* issue soft reset with (rg)mii loopback to ensure a stable rxclk */ bcmgenet_umac_writel(priv, CMD_SW_RESET | CMD_LCL_LOOP_EN, UMAC_CMD); - udelay(2); - bcmgenet_umac_writel(priv, 0, UMAC_CMD); } static void bcmgenet_intr_disable(struct bcmgenet_priv *priv) @@ -2020,6 +2018,8 @@ static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv) */ if (priv->internal_phy) { int0_enable |= UMAC_IRQ_LINK_EVENT; + if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv)) + int0_enable |= UMAC_IRQ_PHY_DET_R; } else if (priv->ext_phy) { int0_enable |= UMAC_IRQ_LINK_EVENT; } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { @@ -2618,11 +2618,16 @@ static void bcmgenet_irq_task(struct work_struct *work) priv->irq0_stat = 0; spin_unlock_irq(&priv->lock); + if (status & UMAC_IRQ_PHY_DET_R && + priv->dev->phydev->autoneg != AUTONEG_ENABLE) { + phy_init_hw(priv->dev->phydev); + genphy_config_aneg(priv->dev->phydev); + } + /* Link UP/DOWN event */ - if (status & UMAC_IRQ_LINK_EVENT) { - priv->dev->phydev->link = !!(status & UMAC_IRQ_LINK_UP); + if (status & UMAC_IRQ_LINK_EVENT) phy_mac_interrupt(priv->dev->phydev); - } + } /* bcmgenet_isr1: handle Rx and Tx priority queues */ @@ -2717,7 +2722,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) } /* all other interested interrupts handled in bottom half */ - status &= UMAC_IRQ_LINK_EVENT; + status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_PHY_DET_R); if (status) { /* Save irq status for bottom-half processing. */ spin_lock_irqsave(&priv->lock, flags); @@ -2855,7 +2860,6 @@ static void bcmgenet_netif_start(struct net_device *dev) umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true); - netif_tx_start_all_queues(dev); bcmgenet_enable_tx_napi(priv); /* Monitor link interrupts now */ @@ -2937,6 +2941,8 @@ static int bcmgenet_open(struct net_device *dev) bcmgenet_netif_start(dev); + netif_tx_start_all_queues(dev); + return 0; err_irq1: @@ -2958,7 +2964,7 @@ static void bcmgenet_netif_stop(struct net_device *dev) struct bcmgenet_priv *priv = netdev_priv(dev); bcmgenet_disable_tx_napi(priv); - netif_tx_stop_all_queues(dev); + netif_tx_disable(dev); /* Disable MAC receive */ umac_enable_set(priv, CMD_RX_EN, false); @@ -3085,39 +3091,42 @@ static void bcmgenet_timeout(struct net_device *dev) netif_tx_wake_all_queues(dev); } -#define MAX_MC_COUNT 16 +#define MAX_MDF_FILTER 17 static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv, unsigned char *addr, - int *i, - int *mc) + int *i) { - u32 reg; - bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1], UMAC_MDF_ADDR + (*i * 4)); bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5], UMAC_MDF_ADDR + ((*i + 1) * 4)); - reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL); - reg |= (1 << (MAX_MC_COUNT - *mc)); - bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL); *i += 2; - (*mc)++; } static void bcmgenet_set_rx_mode(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); struct netdev_hw_addr *ha; - int i, mc; + int i, nfilter; u32 reg; netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags); - /* Promiscuous mode */ + /* Number of filters needed */ + nfilter = netdev_uc_count(dev) + netdev_mc_count(dev) + 2; + + /* + * Turn on promicuous mode for three scenarios + * 1. IFF_PROMISC flag is set + * 2. IFF_ALLMULTI flag is set + * 3. The number of filters needed exceeds the number filters + * supported by the hardware. + */ reg = bcmgenet_umac_readl(priv, UMAC_CMD); - if (dev->flags & IFF_PROMISC) { + if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) || + (nfilter > MAX_MDF_FILTER)) { reg |= CMD_PROMISC; bcmgenet_umac_writel(priv, reg, UMAC_CMD); bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL); @@ -3127,32 +3136,24 @@ static void bcmgenet_set_rx_mode(struct net_device *dev) bcmgenet_umac_writel(priv, reg, UMAC_CMD); } - /* UniMac doesn't support ALLMULTI */ - if (dev->flags & IFF_ALLMULTI) { - netdev_warn(dev, "ALLMULTI is not supported\n"); - return; - } - /* update MDF filter */ i = 0; - mc = 0; /* Broadcast */ - bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc); + bcmgenet_set_mdf_addr(priv, dev->broadcast, &i); /* my own address.*/ - bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc); - /* Unicast list*/ - if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc)) - return; + bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i); - if (!netdev_uc_empty(dev)) - netdev_for_each_uc_addr(ha, dev) - bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc); - /* Multicast */ - if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc)) - return; + /* Unicast */ + netdev_for_each_uc_addr(ha, dev) + bcmgenet_set_mdf_addr(priv, ha->addr, &i); + /* Multicast */ netdev_for_each_mc_addr(ha, dev) - bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc); + bcmgenet_set_mdf_addr(priv, ha->addr, &i); + + /* Enable filters */ + reg = GENMASK(MAX_MDF_FILTER - 1, MAX_MDF_FILTER - nfilter); + bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL); } /* Set the hardware MAC address. */ @@ -3620,13 +3621,13 @@ static int bcmgenet_suspend(struct device *d) if (!netif_running(dev)) return 0; + netif_device_detach(dev); + bcmgenet_netif_stop(dev); if (!device_may_wakeup(d)) phy_suspend(dev->phydev); - netif_device_detach(dev); - /* Prepare the device for Wake-on-LAN and switch to the slow clock */ if (device_may_wakeup(d) && priv->wolopts) { ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC); @@ -3674,6 +3675,7 @@ static int bcmgenet_resume(struct device *d) phy_init_hw(dev->phydev); /* Speed settings must be restored */ + genphy_config_aneg(dev->phydev); bcmgenet_mii_config(priv->dev, false); bcmgenet_set_hw_addr(priv, dev->dev_addr); @@ -3700,8 +3702,6 @@ static int bcmgenet_resume(struct device *d) /* Always enable ring 16 - descriptor ring */ bcmgenet_enable_dma(priv, dma_ctrl); - netif_device_attach(dev); - if (!device_may_wakeup(d)) phy_resume(dev->phydev); @@ -3710,6 +3710,8 @@ static int bcmgenet_resume(struct device *d) bcmgenet_netif_start(dev); + netif_device_attach(dev); + return 0; out_clk_disable: diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 14b49612aa8639816c1d4b58bdbf5d9ff1995248..4dabf37319c84301773dabaf3011accda2e15e54 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -369,6 +369,7 @@ struct bcmgenet_mib_counters { #define EXT_PWR_DOWN_PHY_EN (1 << 20) #define EXT_RGMII_OOB_CTRL 0x0C +#define RGMII_MODE_EN_V123 (1 << 0) #define RGMII_LINK (1 << 4) #define OOB_DISABLE (1 << 5) #define RGMII_MODE_EN (1 << 6) diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 34af5f1569c8f4105d3cf36789eefdb0d63509dc..a5049d637791d404e07329778d373c9dff24cf18 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -184,8 +184,38 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) const char *phy_name = NULL; u32 id_mode_dis = 0; u32 port_ctrl; + int bmcr = -1; + int ret; u32 reg; + /* MAC clocking workaround during reset of umac state machines */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + if (reg & CMD_SW_RESET) { + /* An MII PHY must be isolated to prevent TXC contention */ + if (priv->phy_interface == PHY_INTERFACE_MODE_MII) { + ret = phy_read(phydev, MII_BMCR); + if (ret >= 0) { + bmcr = ret; + ret = phy_write(phydev, MII_BMCR, + bmcr | BMCR_ISOLATE); + } + if (ret) { + netdev_err(dev, "failed to isolate PHY\n"); + return ret; + } + } + /* Switch MAC clocking to RGMII generated clock */ + bcmgenet_sys_writel(priv, PORT_MODE_EXT_GPHY, SYS_PORT_CTRL); + /* Ensure 5 clks with Rx disabled + * followed by 5 clks with Reset asserted + */ + udelay(4); + reg &= ~(CMD_SW_RESET | CMD_LCL_LOOP_EN); + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + /* Ensure 5 more clocks before Rx is enabled */ + udelay(2); + } + priv->ext_phy = !priv->internal_phy && (priv->phy_interface != PHY_INTERFACE_MODE_MOCA); @@ -217,6 +247,9 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) phydev->supported &= PHY_BASIC_FEATURES; bcmgenet_sys_writel(priv, PORT_MODE_EXT_EPHY, SYS_PORT_CTRL); + /* Restore the MII PHY after isolation */ + if (bmcr >= 0) + phy_write(phydev, MII_BMCR, bmcr); break; case PHY_INTERFACE_MODE_REVMII: @@ -226,11 +259,10 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) * capabilities, use that knowledge to also configure the * Reverse MII interface correctly. */ - if ((dev->phydev->supported & PHY_BASIC_FEATURES) == - PHY_BASIC_FEATURES) - port_ctrl = PORT_MODE_EXT_RVMII_25; - else + if (dev->phydev->supported & PHY_1000BT_FEATURES) port_ctrl = PORT_MODE_EXT_RVMII_50; + else + port_ctrl = PORT_MODE_EXT_RVMII_25; bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL); break; @@ -261,7 +293,11 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) */ if (priv->ext_phy) { reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); - reg |= RGMII_MODE_EN | id_mode_dis; + reg |= id_mode_dis; + if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv)) + reg |= RGMII_MODE_EN_V123; + else + reg |= RGMII_MODE_EN; bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); } @@ -276,11 +312,12 @@ int bcmgenet_mii_probe(struct net_device *dev) struct bcmgenet_priv *priv = netdev_priv(dev); struct device_node *dn = priv->pdev->dev.of_node; struct phy_device *phydev; - u32 phy_flags; + u32 phy_flags = 0; int ret; /* Communicate the integrated PHY revision */ - phy_flags = priv->gphy_rev; + if (priv->internal_phy) + phy_flags = priv->gphy_rev; /* Initialize link state variables that bcmgenet_mii_setup() uses */ priv->old_link = -1; @@ -342,7 +379,7 @@ static struct device_node *bcmgenet_mii_of_find_mdio(struct bcmgenet_priv *priv) if (!compat) return NULL; - priv->mdio_dn = of_find_compatible_node(dn, NULL, compat); + priv->mdio_dn = of_get_compatible_child(dn, compat); kfree(compat); if (!priv->mdio_dn) { dev_err(kdev, "unable to find MDIO bus node\n"); diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index ef4a0c326736dc2518216b3aee4108e5ecf5133c..7e3f9642ba6c56d04b8b0d1968a15da4edf19418 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -299,7 +299,7 @@ static enum sbmac_state sbmac_set_channel_state(struct sbmac_softc *, static void sbmac_promiscuous_mode(struct sbmac_softc *sc, int onoff); static uint64_t sbmac_addr2reg(unsigned char *ptr); static irqreturn_t sbmac_intr(int irq, void *dev_instance); -static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev); +static netdev_tx_t sbmac_start_tx(struct sk_buff *skb, struct net_device *dev); static void sbmac_setmulti(struct sbmac_softc *sc); static int sbmac_init(struct platform_device *pldev, long long base); static int sbmac_set_speed(struct sbmac_softc *s, enum sbmac_speed speed); @@ -2028,7 +2028,7 @@ static irqreturn_t sbmac_intr(int irq,void *dev_instance) * Return value: * nothing ********************************************************************* */ -static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t sbmac_start_tx(struct sk_buff *skb, struct net_device *dev) { struct sbmac_softc *sc = netdev_priv(dev); unsigned long flags; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index e6f28c7942abf08da76f4eef87bce5f88a7b0334..a12962702611f798d149cb93772177497be3fc9f 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -12426,6 +12426,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e { struct tg3 *tp = netdev_priv(dev); int i, irq_sync = 0, err = 0; + bool reset_phy = false; if ((ering->rx_pending > tp->rx_std_ring_mask) || (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) || @@ -12457,7 +12458,13 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e if (netif_running(dev)) { tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); - err = tg3_restart_hw(tp, false); + /* Reset PHY to avoid PHY lock up */ + if (tg3_asic_rev(tp) == ASIC_REV_5717 || + tg3_asic_rev(tp) == ASIC_REV_5719 || + tg3_asic_rev(tp) == ASIC_REV_5720) + reset_phy = true; + + err = tg3_restart_hw(tp, reset_phy); if (!err) tg3_netif_start(tp); } @@ -12491,6 +12498,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam { struct tg3 *tp = netdev_priv(dev); int err = 0; + bool reset_phy = false; if (tp->link_config.autoneg == AUTONEG_ENABLE) tg3_warn_mgmt_link_flap(tp); @@ -12581,7 +12589,13 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam if (netif_running(dev)) { tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); - err = tg3_restart_hw(tp, false); + /* Reset PHY to avoid PHY lock up */ + if (tg3_asic_rev(tp) == ASIC_REV_5717 || + tg3_asic_rev(tp) == ASIC_REV_5719 || + tg3_asic_rev(tp) == ASIC_REV_5720) + reset_phy = true; + + err = tg3_restart_hw(tp, reset_phy); if (!err) tg3_netif_start(tp); } diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h index c438d032e8bffcce79a2168460510cf385e4e7d5..1af883c849ad9c9471b52acac3f2b02fc0d5e452 100644 --- a/drivers/net/ethernet/brocade/bna/bna_types.h +++ b/drivers/net/ethernet/brocade/bna/bna_types.h @@ -418,7 +418,7 @@ struct bna_ib { /* Tx object */ /* Tx datapath control structure */ -#define BNA_Q_NAME_SIZE 16 +#define BNA_Q_NAME_SIZE (IFNAMSIZ + 6) struct bna_tcb { /* Fast path */ void **sw_qpt; diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index ea5f32ea308a94b26d590ed838eae79fbf51897a..abbe1e23bbe84da5fb950dde85f7106486847dd7 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -1543,8 +1543,9 @@ bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info, for (i = 0; i < num_txqs; i++) { vector_num = tx_info->tcb[i]->intr_vector; - sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name, - tx_id + tx_info->tcb[i]->id); + snprintf(tx_info->tcb[i]->name, BNA_Q_NAME_SIZE, "%s TXQ %d", + bnad->netdev->name, + tx_id + tx_info->tcb[i]->id); err = request_irq(bnad->msix_table[vector_num].vector, (irq_handler_t)bnad_msix_tx, 0, tx_info->tcb[i]->name, @@ -1594,9 +1595,9 @@ bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info, for (i = 0; i < num_rxps; i++) { vector_num = rx_info->rx_ctrl[i].ccb->intr_vector; - sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d", - bnad->netdev->name, - rx_id + rx_info->rx_ctrl[i].ccb->id); + snprintf(rx_info->rx_ctrl[i].ccb->name, BNA_Q_NAME_SIZE, + "%s CQ %d", bnad->netdev->name, + rx_id + rx_info->rx_ctrl[i].ccb->id); err = request_irq(bnad->msix_table[vector_num].vector, (irq_handler_t)bnad_msix_rx, 0, rx_info->rx_ctrl[i].ccb->name, diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c index 933799be0471bc9bccb3b816ad954eccf5564a43..d549fdb6bbe2ec3ca6c0b638abed98c96c1b26b9 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c +++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c @@ -320,7 +320,7 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf, void *kern_buf; /* Copy the user space buf */ - kern_buf = memdup_user(buf, nbytes); + kern_buf = memdup_user_nul(buf, nbytes); if (IS_ERR(kern_buf)) return PTR_ERR(kern_buf); @@ -380,7 +380,7 @@ bnad_debugfs_write_regwr(struct file *file, const char __user *buf, void *kern_buf; /* Copy the user space buf */ - kern_buf = memdup_user(buf, nbytes); + kern_buf = memdup_user_nul(buf, nbytes); if (IS_ERR(kern_buf)) return PTR_ERR(kern_buf); diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 3d45f4c92cf6e5d3f091ae654e5312165956d19f..efb44d5ab02156282f477fde76e8ca3bf4675f1b 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -499,7 +499,11 @@ /* Bitfields in TISUBN */ #define GEM_SUBNSINCR_OFFSET 0 -#define GEM_SUBNSINCR_SIZE 16 +#define GEM_SUBNSINCRL_OFFSET 24 +#define GEM_SUBNSINCRL_SIZE 8 +#define GEM_SUBNSINCRH_OFFSET 0 +#define GEM_SUBNSINCRH_SIZE 16 +#define GEM_SUBNSINCR_SIZE 24 /* Bitfields in TI */ #define GEM_NSINCR_OFFSET 0 @@ -643,6 +647,7 @@ #define MACB_CAPS_JUMBO 0x00000020 #define MACB_CAPS_GEM_HAS_PTP 0x00000040 #define MACB_CAPS_BD_RD_PREFETCH 0x00000080 +#define MACB_CAPS_NEEDS_RSTONUBR 0x00000100 #define MACB_CAPS_FIFO_MODE 0x10000000 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 #define MACB_CAPS_SG_DISABLED 0x40000000 @@ -1214,6 +1219,8 @@ struct macb { int rx_bd_rd_prefetch; int tx_bd_rd_prefetch; + + u32 rx_intr_mask; }; #ifdef CONFIG_MACB_USE_HWSTAMP diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 58b9744c405805d6c4ebf9c1bf5c840cee3ee3d3..c2eb188547942fa58c3b41d9b416b7f3062cf796 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -56,12 +56,12 @@ /* level of occupied TX descriptors under which we wake up TX process */ #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4) -#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ - | MACB_BIT(ISR_ROVR)) +#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR)) #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ | MACB_BIT(ISR_RLE) \ | MACB_BIT(TXERR)) -#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) +#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP) \ + | MACB_BIT(TXUBR)) /* Max length of transmit frame must be a multiple of 8 bytes */ #define MACB_TX_LEN_ALIGN 8 @@ -681,6 +681,11 @@ static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_ if (bp->hw_dma_cap & HW_DMA_CAP_64B) { desc_64 = macb_64b_desc(bp, desc); desc_64->addrh = upper_32_bits(addr); + /* The low bits of RX address contain the RX_USED bit, clearing + * of which allows packet RX. Make sure the high bits are also + * visible to HW at that point. + */ + dma_wmb(); } #endif desc->addr = lower_32_bits(addr); @@ -855,7 +860,9 @@ static void macb_tx_interrupt(struct macb_queue *queue) /* First, update TX stats if needed */ if (skb) { - if (gem_ptp_do_txstamp(queue, skb, desc) == 0) { + if (unlikely(skb_shinfo(skb)->tx_flags & + SKBTX_HW_TSTAMP) && + gem_ptp_do_txstamp(queue, skb, desc) == 0) { /* skb now belongs to timestamp buffer * and will be removed later */ @@ -929,14 +936,19 @@ static void gem_rx_refill(struct macb_queue *queue) if (entry == bp->rx_ring_size - 1) paddr |= MACB_BIT(RX_WRAP); - macb_set_addr(bp, desc, paddr); desc->ctrl = 0; + /* Setting addr clears RX_USED and allows reception, + * make sure ctrl is cleared first to avoid a race. + */ + dma_wmb(); + macb_set_addr(bp, desc, paddr); /* properly align Ethernet header */ skb_reserve(skb, NET_IP_ALIGN); } else { - desc->addr &= ~MACB_BIT(RX_USED); desc->ctrl = 0; + dma_wmb(); + desc->addr &= ~MACB_BIT(RX_USED); } } @@ -990,11 +1002,15 @@ static int gem_rx(struct macb_queue *queue, int budget) rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false; addr = macb_get_addr(bp, desc); - ctrl = desc->ctrl; if (!rxused) break; + /* Ensure ctrl is at least as up-to-date as rxused */ + dma_rmb(); + + ctrl = desc->ctrl; + queue->rx_tail++; count++; @@ -1169,11 +1185,14 @@ static int macb_rx(struct macb_queue *queue, int budget) /* Make hw descriptor updates visible to CPU */ rmb(); - ctrl = desc->ctrl; - if (!(desc->addr & MACB_BIT(RX_USED))) break; + /* Ensure ctrl is at least as up-to-date as addr */ + dma_rmb(); + + ctrl = desc->ctrl; + if (ctrl & MACB_BIT(RX_SOF)) { if (first_frag != -1) discard_partial_frame(queue, first_frag, tail); @@ -1253,7 +1272,7 @@ static int macb_poll(struct napi_struct *napi, int budget) queue_writel(queue, ISR, MACB_BIT(RCOMP)); napi_reschedule(napi); } else { - queue_writel(queue, IER, MACB_RX_INT_FLAGS); + queue_writel(queue, IER, bp->rx_intr_mask); } } @@ -1271,7 +1290,7 @@ static void macb_hresp_error_task(unsigned long data) u32 ctrl; for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { - queue_writel(queue, IDR, MACB_RX_INT_FLAGS | + queue_writel(queue, IDR, bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); } @@ -1301,7 +1320,7 @@ static void macb_hresp_error_task(unsigned long data) /* Enable interrupts */ queue_writel(queue, IER, - MACB_RX_INT_FLAGS | + bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); } @@ -1313,6 +1332,21 @@ static void macb_hresp_error_task(unsigned long data) netif_tx_start_all_queues(dev); } +static void macb_tx_restart(struct macb_queue *queue) +{ + unsigned int head = queue->tx_head; + unsigned int tail = queue->tx_tail; + struct macb *bp = queue->bp; + + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + queue_writel(queue, ISR, MACB_BIT(TXUBR)); + + if (head == tail) + return; + + macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); +} + static irqreturn_t macb_interrupt(int irq, void *dev_id) { struct macb_queue *queue = dev_id; @@ -1340,14 +1374,14 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) (unsigned int)(queue - bp->queues), (unsigned long)status); - if (status & MACB_RX_INT_FLAGS) { + if (status & bp->rx_intr_mask) { /* There's no point taking any more interrupts * until we have processed the buffers. The * scheduling call may fail if the poll routine * is already scheduled, so disable interrupts * now. */ - queue_writel(queue, IDR, MACB_RX_INT_FLAGS); + queue_writel(queue, IDR, bp->rx_intr_mask); if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) queue_writel(queue, ISR, MACB_BIT(RCOMP)); @@ -1370,6 +1404,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) if (status & MACB_BIT(TCOMP)) macb_tx_interrupt(queue); + if (status & MACB_BIT(TXUBR)) + macb_tx_restart(queue); + /* Link change detection isn't possible with RMII, so we'll * add that if/when we get our hands on a full-blown MII PHY. */ @@ -1377,8 +1414,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) /* There is a hardware issue under heavy load where DMA can * stop, this causes endless "used buffer descriptor read" * interrupts but it can be cleared by re-enabling RX. See - * the at91 manual, section 41.3.1 or the Zynq manual - * section 16.7.4 for details. + * the at91rm9200 manual, section 41.3.1 or the Zynq manual + * section 16.7.4 for details. RXUBR is only enabled for + * these two versions. */ if (status & MACB_BIT(RXUBR)) { ctrl = macb_readl(bp, NCR); @@ -1685,7 +1723,7 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev) padlen = 0; /* No room for FCS, need to reallocate skb. */ else - padlen = ETH_FCS_LEN - tailroom; + padlen = ETH_FCS_LEN; } else { /* Add room for FCS. */ padlen += ETH_FCS_LEN; @@ -2228,7 +2266,7 @@ static void macb_init_hw(struct macb *bp) /* Enable interrupts */ queue_writel(queue, IER, - MACB_RX_INT_FLAGS | + bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); } @@ -2383,12 +2421,12 @@ static int macb_open(struct net_device *dev) return err; } - bp->macbgem_ops.mog_init_rings(bp); - macb_init_hw(bp); - for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) napi_enable(&queue->napi); + bp->macbgem_ops.mog_init_rings(bp); + macb_init_hw(bp); + /* schedule a link state check */ phy_start(dev->phydev); @@ -3287,15 +3325,21 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, *hclk = devm_clk_get(&pdev->dev, "hclk"); } - if (IS_ERR(*pclk)) { + if (IS_ERR_OR_NULL(*pclk)) { err = PTR_ERR(*pclk); - dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err); + if (!err) + err = -ENODEV; + + dev_err(&pdev->dev, "failed to get macb_clk (%d)\n", err); return err; } - if (IS_ERR(*hclk)) { + if (IS_ERR_OR_NULL(*hclk)) { err = PTR_ERR(*hclk); - dev_err(&pdev->dev, "failed to get hclk (%u)\n", err); + if (!err) + err = -ENODEV; + + dev_err(&pdev->dev, "failed to get hclk (%d)\n", err); return err; } @@ -3309,25 +3353,25 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, err = clk_prepare_enable(*pclk); if (err) { - dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err); + dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err); return err; } err = clk_prepare_enable(*hclk); if (err) { - dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err); + dev_err(&pdev->dev, "failed to enable hclk (%d)\n", err); goto err_disable_pclk; } err = clk_prepare_enable(*tx_clk); if (err) { - dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); + dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err); goto err_disable_hclk; } err = clk_prepare_enable(*rx_clk); if (err) { - dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err); + dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err); goto err_disable_txclk; } @@ -3797,7 +3841,7 @@ static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk, err = clk_prepare_enable(*pclk); if (err) { - dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err); + dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err); return err; } @@ -3876,6 +3920,7 @@ static const struct macb_config sama5d4_config = { }; static const struct macb_config emac_config = { + .caps = MACB_CAPS_NEEDS_RSTONUBR, .clk_init = at91ether_clk_init, .init = at91ether_init, }; @@ -3897,7 +3942,8 @@ static const struct macb_config zynqmp_config = { }; static const struct macb_config zynq_config = { - .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF, + .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF | + MACB_CAPS_NEEDS_RSTONUBR, .dma_burst_length = 16, .clk_init = macb_clk_init, .init = macb_init, @@ -4052,6 +4098,10 @@ static int macb_probe(struct platform_device *pdev) macb_dma_desc_get_size(bp); } + bp->rx_intr_mask = MACB_RX_INT_FLAGS; + if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR) + bp->rx_intr_mask |= MACB_BIT(RXUBR); + mac = of_get_mac_address(np); if (mac) { ether_addr_copy(bp->dev->dev_addr, mac); @@ -4144,6 +4194,7 @@ static int macb_remove(struct platform_device *pdev) mdiobus_free(bp->mii_bus); unregister_netdev(dev); + tasklet_kill(&bp->hresp_err_tasklet); clk_disable_unprepare(bp->tx_clk); clk_disable_unprepare(bp->hclk); clk_disable_unprepare(bp->pclk); diff --git a/drivers/net/ethernet/cadence/macb_pci.c b/drivers/net/ethernet/cadence/macb_pci.c index 248a8fc450691d81f5553133460864fe114bc370..f06fddf9919bf29915d9b3be01eb96f993761d54 100644 --- a/drivers/net/ethernet/cadence/macb_pci.c +++ b/drivers/net/ethernet/cadence/macb_pci.c @@ -123,9 +123,9 @@ static void macb_remove(struct pci_dev *pdev) struct platform_device *plat_dev = pci_get_drvdata(pdev); struct macb_platform_data *plat_data = dev_get_platdata(&plat_dev->dev); - platform_device_unregister(plat_dev); clk_unregister(plat_data->pclk); clk_unregister(plat_data->hclk); + platform_device_unregister(plat_dev); } static const struct pci_device_id dev_id_table[] = { diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c index cd5296b842290302a5904b4c590e51fdea741bbd..8f912de44defcab99c4fc261249b79415f6b8c1f 100644 --- a/drivers/net/ethernet/cadence/macb_ptp.c +++ b/drivers/net/ethernet/cadence/macb_ptp.c @@ -115,7 +115,10 @@ static int gem_tsu_incr_set(struct macb *bp, struct tsu_incr *incr_spec) * to take effect. */ spin_lock_irqsave(&bp->tsu_clk_lock, flags); - gem_writel(bp, TISUBN, GEM_BF(SUBNSINCR, incr_spec->sub_ns)); + /* RegBit[15:0] = Subns[23:8]; RegBit[31:24] = Subns[7:0] */ + gem_writel(bp, TISUBN, GEM_BF(SUBNSINCRL, incr_spec->sub_ns) | + GEM_BF(SUBNSINCRH, (incr_spec->sub_ns >> + GEM_SUBNSINCRL_SIZE))); gem_writel(bp, TI, GEM_BF(NSINCR, incr_spec->ns)); spin_unlock_irqrestore(&bp->tsu_clk_lock, flags); @@ -319,6 +322,8 @@ int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb, desc_ptp = macb_ptp_desc(queue->bp, desc); tx_timestamp = &queue->tx_timestamps[head]; tx_timestamp->skb = skb; + /* ensure ts_1/ts_2 is loaded after ctrl (TX_USED check) */ + dma_rmb(); tx_timestamp->desc_ptp.ts_1 = desc_ptp->ts_1; tx_timestamp->desc_ptp.ts_2 = desc_ptp->ts_2; /* move head */ diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c index 6aeb1045c302ad4cb1dd4efab54e414eb2bc14b9..1ab40c97403bad5f9e6612367167b840c52e0bb8 100644 --- a/drivers/net/ethernet/cavium/common/cavium_ptp.c +++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c @@ -10,7 +10,7 @@ #include "cavium_ptp.h" -#define DRV_NAME "Cavium PTP Driver" +#define DRV_NAME "cavium_ptp" #define PCI_DEVICE_ID_CAVIUM_PTP 0xA00C #define PCI_DEVICE_ID_CAVIUM_RST 0xA00E diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index 8093c5eafea23f215e68792c725fb05eeafa87e4..781814835a4f4c06df84aa9495fce7dea8ffb695 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -985,7 +985,7 @@ static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct) if (droq->ops.poll_mode) { droq->ops.napi_fn(droq); - oct_priv->napi_mask |= (1 << oq_no); + oct_priv->napi_mask |= BIT_ULL(oq_no); } else { tasklet_schedule(&oct_priv->droq_tasklet); } diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 6fb13fa73b271c9d05f49ad5a04ea71f9a12c987..304e4b9436276c780e1e2f690f2c31d4d321166e 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -2324,7 +2324,7 @@ static inline int send_nic_timestamp_pkt(struct octeon_device *oct, * @returns whether the packet was transmitted to the device okay or not * (NETDEV_TX_OK or NETDEV_TX_BUSY) */ -static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) { struct lio *lio; struct octnet_buf_free_info *finfo; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index b77835724dc84d037c88bcb9ef7153db8f1f6e48..d83773bc0dd7f6352c8a010fdc6720e1d9d41a32 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1390,7 +1390,7 @@ static int send_nic_timestamp_pkt(struct octeon_device *oct, * @returns whether the packet was transmitted to the device okay or not * (NETDEV_TX_OK or NETDEV_TX_BUSY) */ -static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) { struct octnet_buf_free_info *finfo; union octnic_cmd_setup cmdsetup; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c index ddd7431579f4e6d51a335d9336074268f62fce8f..d90500573f5b796a874ea2e0d5a04d4c9770bf9f 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c @@ -31,7 +31,8 @@ static int lio_vf_rep_open(struct net_device *ndev); static int lio_vf_rep_stop(struct net_device *ndev); -static int lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev); +static netdev_tx_t lio_vf_rep_pkt_xmit(struct sk_buff *skb, + struct net_device *ndev); static void lio_vf_rep_tx_timeout(struct net_device *netdev); static int lio_vf_rep_phys_port_name(struct net_device *dev, char *buf, size_t len); @@ -288,13 +289,12 @@ lio_vf_rep_copy_packet(struct octeon_device *oct, pg_info->page_offset; memcpy(skb->data, va, MIN_SKB_SIZE); skb_put(skb, MIN_SKB_SIZE); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + pg_info->page, + pg_info->page_offset + MIN_SKB_SIZE, + len - MIN_SKB_SIZE, + LIO_RXBUFFER_SZ); } - - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - pg_info->page, - pg_info->page_offset + MIN_SKB_SIZE, - len - MIN_SKB_SIZE, - LIO_RXBUFFER_SZ); } else { struct octeon_skb_page_info *pg_info = ((struct octeon_skb_page_info *)(skb->cb)); @@ -367,20 +367,22 @@ lio_vf_rep_packet_sent_callback(struct octeon_device *oct, struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; struct sk_buff *skb = sc->ctxptr; struct net_device *ndev = skb->dev; + u32 iq_no; dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr, sc->datasize, DMA_TO_DEVICE); dev_kfree_skb_any(skb); + iq_no = sc->iq_no; octeon_free_soft_command(oct, sc); - if (octnet_iq_is_full(oct, sc->iq_no)) + if (octnet_iq_is_full(oct, iq_no)) return; if (netif_queue_stopped(ndev)) netif_wake_queue(ndev); } -static int +static netdev_tx_t lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev) { struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev); diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c index f878a552fef3b36fcf98ee822d6e7a635401b92b..d0ed6c4f9e1a2922aee49e9e36256a3268671072 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -1450,8 +1450,9 @@ void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq) } if (iq) { spin_lock_bh(&iq->lock); - writel(iq->pkt_in_done, iq->inst_cnt_reg); - iq->pkt_in_done = 0; + writel(iq->pkts_processed, iq->inst_cnt_reg); + iq->pkt_in_done -= iq->pkts_processed; + iq->pkts_processed = 0; /* this write needs to be flushed before we release the lock */ mmiowb(); spin_unlock_bh(&iq->lock); diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h index 2327062e8af6b7af4b5cc146a88234ec60e89bb4..aecd0d36d6349869e8703f48b62f2e94fd18397c 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h @@ -94,6 +94,8 @@ struct octeon_instr_queue { u32 pkt_in_done; + u32 pkts_processed; + /** A spinlock to protect access to the input ring.*/ spinlock_t iq_flush_running_lock; diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index 8f746e1348d4cf11272b4fb316b8ac084a32f6f0..1d9ab7f4a2fef2d6d0a09f34d22f43b4db564034 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -123,6 +123,7 @@ int octeon_init_instr_queue(struct octeon_device *oct, iq->do_auto_flush = 1; iq->db_timeout = (u32)conf->db_timeout; atomic_set(&iq->instr_pending, 0); + iq->pkts_processed = 0; /* Initialize the spinlock for this instruction queue */ spin_lock_init(&iq->lock); @@ -238,8 +239,10 @@ int octeon_setup_iq(struct octeon_device *oct, } oct->num_iqs++; - if (oct->fn_list.enable_io_queues(oct)) + if (oct->fn_list.enable_io_queues(oct)) { + octeon_delete_instr_queue(oct, iq_no); return 1; + } return 0; } @@ -495,6 +498,7 @@ octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, lio_process_iq_request_list(oct, iq, 0); if (inst_processed) { + iq->pkts_processed += inst_processed; atomic_sub(inst_processed, &iq->instr_pending); iq->stats.instr_processed += inst_processed; } diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index bb43ddb7539e719d0cbff780e5ddf17c756dbe05..0957e735cdc4d12baa2d94793b436205a3f6080a 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -1268,12 +1268,13 @@ static int octeon_mgmt_stop(struct net_device *netdev) return 0; } -static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t +octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); union mgmt_port_ring_entry re; unsigned long flags; - int rv = NETDEV_TX_BUSY; + netdev_tx_t rv = NETDEV_TX_BUSY; re.d64 = 0; re.s.tstamp = ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0); @@ -1495,7 +1496,7 @@ static int octeon_mgmt_probe(struct platform_device *pdev) netdev->ethtool_ops = &octeon_mgmt_ethtool_ops; netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM; - netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM; + netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM - VLAN_HLEN; mac = of_get_mac_address(pdev->dev.of_node); diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index 55af04fa03a77e850196e82e930e3f85af7c6aa7..90497a27df184892d81b9365759a8f841976c860 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -1039,7 +1039,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) case NIC_MBOX_MSG_CFG_DONE: /* Last message of VF config msg sequence */ nic_enable_vf(nic, vf, true); - goto unlock; + break; case NIC_MBOX_MSG_SHUTDOWN: /* First msg in VF teardown sequence */ if (vf >= nic->num_vf_en) @@ -1441,6 +1441,9 @@ static void nic_remove(struct pci_dev *pdev) { struct nicpf *nic = pci_get_drvdata(pdev); + if (!nic) + return; + if (nic->flags & NIC_SRIOV_ENABLED) pci_disable_sriov(pdev); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 768f584f8392732b19d6e889ed456a3c6de8e809..dca02b35c231a85a81ec98f7f0ce574fa206e2cd 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -32,6 +32,13 @@ #define DRV_NAME "nicvf" #define DRV_VERSION "1.0" +/* NOTE: Packets bigger than 1530 are split across multiple pages and XDP needs + * the buffer to be contiguous. Allow XDP to be set up only if we don't exceed + * this value, keeping headroom for the 14 byte Ethernet header and two + * VLAN tags (for QinQ) + */ +#define MAX_XDP_MTU (1530 - ETH_HLEN - VLAN_HLEN * 2) + /* Supported devices */ static const struct pci_device_id nicvf_id_table[] = { { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, @@ -172,6 +179,17 @@ static int nicvf_check_pf_ready(struct nicvf *nic) return 1; } +static void nicvf_send_cfg_done(struct nicvf *nic) +{ + union nic_mbx mbx = {}; + + mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE; + if (nicvf_send_msg_to_pf(nic, &mbx)) { + netdev_err(nic->netdev, + "PF didn't respond to CFG DONE msg\n"); + } +} + static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx) { if (bgx->rx) @@ -1416,7 +1434,6 @@ int nicvf_open(struct net_device *netdev) struct nicvf *nic = netdev_priv(netdev); struct queue_set *qs = nic->qs; struct nicvf_cq_poll *cq_poll = NULL; - union nic_mbx mbx = {}; netif_carrier_off(netdev); @@ -1512,8 +1529,7 @@ int nicvf_open(struct net_device *netdev) nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); /* Send VF config done msg to PF */ - mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE; - nicvf_write_to_mbx(nic, &mbx); + nicvf_send_cfg_done(nic); return 0; cleanup: @@ -1538,6 +1554,15 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu) struct nicvf *nic = netdev_priv(netdev); int orig_mtu = netdev->mtu; + /* For now just support only the usual MTU sized frames, + * plus some headroom for VLAN, QinQ. + */ + if (nic->xdp_prog && new_mtu > MAX_XDP_MTU) { + netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n", + netdev->mtu); + return -EINVAL; + } + netdev->mtu = new_mtu; if (!netif_running(netdev)) @@ -1784,9 +1809,12 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog) bool if_up = netif_running(nic->netdev); struct bpf_prog *old_prog; bool bpf_attached = false; + int ret = 0; - /* For now just support only the usual MTU sized frames */ - if (prog && (dev->mtu > 1500)) { + /* For now just support only the usual MTU sized frames, + * plus some headroom for VLAN, QinQ. + */ + if (prog && dev->mtu > MAX_XDP_MTU) { netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n", dev->mtu); return -EOPNOTSUPP; @@ -1817,8 +1845,12 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog) if (nic->xdp_prog) { /* Attach BPF program */ nic->xdp_prog = bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1); - if (!IS_ERR(nic->xdp_prog)) + if (!IS_ERR(nic->xdp_prog)) { bpf_attached = true; + } else { + ret = PTR_ERR(nic->xdp_prog); + nic->xdp_prog = NULL; + } } /* Calculate Tx queues needed for XDP and network stack */ @@ -1830,7 +1862,7 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog) netif_trans_update(nic->netdev); } - return 0; + return ret; } static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp) @@ -1936,7 +1968,8 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs, /* flush DMAC filters and reset RX mode */ mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST; - nicvf_send_msg_to_pf(nic, &mbx); + if (nicvf_send_msg_to_pf(nic, &mbx) < 0) + goto free_mc; if (mode & BGX_XCAST_MCAST_FILTER) { /* once enabling filtering, we need to signal to PF to add @@ -1944,7 +1977,8 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs, */ mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST; mbx.xcast.data.mac = 0; - nicvf_send_msg_to_pf(nic, &mbx); + if (nicvf_send_msg_to_pf(nic, &mbx) < 0) + goto free_mc; } /* check if we have any specific MACs to be added to PF DMAC filter */ @@ -1953,9 +1987,9 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs, for (idx = 0; idx < mc_addrs->count; idx++) { mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST; mbx.xcast.data.mac = mc_addrs->mc[idx]; - nicvf_send_msg_to_pf(nic, &mbx); + if (nicvf_send_msg_to_pf(nic, &mbx) < 0) + goto free_mc; } - kfree(mc_addrs); } /* and finally set rx mode for PF accordingly */ @@ -1963,6 +1997,8 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs, mbx.xcast.data.mode = mode; nicvf_send_msg_to_pf(nic, &mbx); +free_mc: + kfree(mc_addrs); } static void nicvf_set_rx_mode_task(struct work_struct *work_arg) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 187a249ff2d1d2fd4201a4e74b9459cbeb3a4b52..9a4cfa61ed93ad693883ebe16325005bdd743db9 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -105,20 +105,19 @@ static inline struct pgcache *nicvf_alloc_page(struct nicvf *nic, /* Check if page can be recycled */ if (page) { ref_count = page_ref_count(page); - /* Check if this page has been used once i.e 'put_page' - * called after packet transmission i.e internal ref_count - * and page's ref_count are equal i.e page can be recycled. + /* This page can be recycled if internal ref_count and page's + * ref_count are equal, indicating that the page has been used + * once for packet transmission. For non-XDP mode, internal + * ref_count is always '1'. */ - if (rbdr->is_xdp && (ref_count == pgcache->ref_count)) - pgcache->ref_count--; - else - page = NULL; - - /* In non-XDP mode, page's ref_count needs to be '1' for it - * to be recycled. - */ - if (!rbdr->is_xdp && (ref_count != 1)) + if (rbdr->is_xdp) { + if (ref_count == pgcache->ref_count) + pgcache->ref_count--; + else + page = NULL; + } else if (ref_count != 1) { page = NULL; + } } if (!page) { @@ -365,11 +364,10 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) while (head < rbdr->pgcnt) { pgcache = &rbdr->pgcache[head]; if (pgcache->page && page_ref_count(pgcache->page) != 0) { - if (!rbdr->is_xdp) { - put_page(pgcache->page); - continue; + if (rbdr->is_xdp) { + page_ref_sub(pgcache->page, + pgcache->ref_count - 1); } - page_ref_sub(pgcache->page, pgcache->ref_count - 1); put_page(pgcache->page); } head++; @@ -585,10 +583,12 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) if (!sq->dmem.base) return; - if (sq->tso_hdrs) + if (sq->tso_hdrs) { dma_free_coherent(&nic->pdev->dev, sq->dmem.q_len * TSO_HEADER_SIZE, sq->tso_hdrs, sq->tso_hdrs_phys); + sq->tso_hdrs = NULL; + } /* Free pending skbs in the queue */ smp_rmb(); diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index e337da6ba2a4c16973f4728c9fd9564da162942c..8ae28f82aafdc2226d2b26f24ef48a0809b32f4c 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -1118,7 +1118,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) phy_interface_mode(lmac->lmac_type))) return -ENODEV; - phy_start_aneg(lmac->phydev); + phy_start(lmac->phydev); return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index c34ea385fe4a5b40d9f2905780ed6224fccfb11c..6be6de0774b61f7cd90e7e607326c1e5ac8052c5 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -3270,7 +3270,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (!adapter->regs) { dev_err(&pdev->dev, "cannot map device registers\n"); err = -ENOMEM; - goto out_free_adapter; + goto out_free_adapter_nofail; } adapter->pdev = pdev; @@ -3398,6 +3398,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (adapter->port[i]) free_netdev(adapter->port[i]); +out_free_adapter_nofail: + kfree_skb(adapter->nofail_skb); + out_free_adapter: kfree(adapter); diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h index c2fd323c4078284ac84aa58825cc84958f108fe8..ea75f275023ffad95a4e2dfa46b1db51935417d8 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h @@ -75,8 +75,8 @@ struct l2t_data { struct l2t_entry *rover; /* starting point for next allocation */ atomic_t nfree; /* number of free entries */ rwlock_t lock; - struct l2t_entry l2tab[0]; struct rcu_head rcu_head; /* to handle rcu cleanup */ + struct l2t_entry l2tab[]; }; typedef void (*arp_failure_handler_func)(struct t3cdev * dev, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index d97e0d7e541afde772cf669a98f58d9a5a6eb626..b766362031c32e4be277d13c8f6b0c3397eab97d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -1065,14 +1065,12 @@ static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init, } } -static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err, - u8 mem_type) +static unsigned long cudbg_mem_region_size(struct cudbg_init *pdbg_init, + struct cudbg_error *cudbg_err, + u8 mem_type) { struct adapter *padap = pdbg_init->adap; struct cudbg_meminfo mem_info; - unsigned long size; u8 mc_idx; int rc; @@ -1086,7 +1084,16 @@ static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init, if (rc) return rc; - size = mem_info.avail[mc_idx].limit - mem_info.avail[mc_idx].base; + return mem_info.avail[mc_idx].limit - mem_info.avail[mc_idx].base; +} + +static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err, + u8 mem_type) +{ + unsigned long size = cudbg_mem_region_size(pdbg_init, cudbg_err, mem_type); + return cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, size, cudbg_err); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c index b34f0f077a3107662ffb2799e09a40e05ec5848a..838692948c0b22ea5447b07195d29cdbf76a60fc 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c @@ -273,8 +273,8 @@ void cxgb4_dcb_handle_fw_update(struct adapter *adap, enum cxgb4_dcb_state_input input = ((pcmd->u.dcb.control.all_syncd_pkd & FW_PORT_CMD_ALL_SYNCD_F) - ? CXGB4_DCB_STATE_FW_ALLSYNCED - : CXGB4_DCB_STATE_FW_INCOMPLETE); + ? CXGB4_DCB_INPUT_FW_ALLSYNCED + : CXGB4_DCB_INPUT_FW_INCOMPLETE); if (dcb->dcb_version != FW_PORT_DCB_VER_UNKNOWN) { dcb_running_version = FW_PORT_CMD_DCB_VERSION_G( diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h index 02040b99c78a0561da461c1497a26bfb92ce2f40..484ee829009036883efde95754d419930f7e096c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h @@ -67,7 +67,7 @@ do { \ if ((__dcb)->dcb_version == FW_PORT_DCB_VER_IEEE) \ cxgb4_dcb_state_fsm((__dev), \ - CXGB4_DCB_STATE_FW_ALLSYNCED); \ + CXGB4_DCB_INPUT_FW_ALLSYNCED); \ } while (0) /* States we can be in for a port's Data Center Bridging. diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 0f72f9c4ec74c6f42722d3e731071b535d1bab33..d320e9afab880cac92bb7d7eec7ce5c393c0a802 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -3035,6 +3035,9 @@ static int sge_queue_entries(const struct adapter *adap) int tot_uld_entries = 0; int i; + if (!is_uld(adap)) + goto lld_only; + mutex_lock(&uld_mutex); for (i = 0; i < CXGB4_TX_MAX; i++) tot_uld_entries += sge_qinfo_uld_txq_entries(adap, i); @@ -3045,6 +3048,7 @@ static int sge_queue_entries(const struct adapter *adap) } mutex_unlock(&uld_mutex); +lld_only: return DIV_ROUND_UP(adap->sge.ethqsets, 4) + tot_uld_entries + DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1; @@ -3276,8 +3280,10 @@ static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf, return -ENOMEM; err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz); - if (err) + if (err) { + kvfree(t); return err; + } bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz); kvfree(t); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 961e3087d1d38c3294c5e49c0fab9f1922bf15b9..bb04c695ab9fdc29d272695e3687472619276c18 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -6010,15 +6010,24 @@ static int __init cxgb4_init_module(void) ret = pci_register_driver(&cxgb4_driver); if (ret < 0) - debugfs_remove(cxgb4_debugfs_root); + goto err_pci; #if IS_ENABLED(CONFIG_IPV6) if (!inet6addr_registered) { - register_inet6addr_notifier(&cxgb4_inet6addr_notifier); - inet6addr_registered = true; + ret = register_inet6addr_notifier(&cxgb4_inet6addr_notifier); + if (ret) + pci_unregister_driver(&cxgb4_driver); + else + inet6addr_registered = true; } #endif + if (ret == 0) + return ret; + +err_pci: + debugfs_remove(cxgb4_debugfs_root); + return ret; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index c116f96956fe9370c7927070ee6c9196ba26582b..d45c435a599d667c8595f1ed9fc1d1f62c65ec52 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -67,7 +67,8 @@ static struct ch_tc_pedit_fields pedits[] = { static struct ch_tc_flower_entry *allocate_flower_entry(void) { struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL); - spin_lock_init(&new->lock); + if (new) + spin_lock_init(&new->lock); return new; } @@ -228,6 +229,9 @@ static void cxgb4_process_flow_match(struct net_device *dev, fs->val.ivlan = vlan_tci; fs->mask.ivlan = vlan_tci_mask; + fs->val.ivlan_vld = 1; + fs->mask.ivlan_vld = 1; + /* Chelsio adapters use ivlan_vld bit to match vlan packets * as 802.1Q. Also, when vlan tag is present in packets, * ethtype match is used then to match on ethtype of inner @@ -238,8 +242,6 @@ static void cxgb4_process_flow_match(struct net_device *dev, * ethtype value with ethtype of inner header. */ if (fs->val.ethtype == ETH_P_8021Q) { - fs->val.ivlan_vld = 1; - fs->mask.ivlan_vld = 1; fs->val.ethtype = 0; fs->mask.ethtype = 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index 4bc211093c98e3564e628a7ccbdacbadbd1c040d..3d834d40e81e897b8aa698ac0cee28a4e1753798 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c @@ -137,13 +137,12 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp, static int alloc_uld_rxqs(struct adapter *adap, struct sge_uld_rxq_info *rxq_info, bool lro) { - struct sge *s = &adap->sge; unsigned int nq = rxq_info->nrxq + rxq_info->nciq; + int i, err, msi_idx, que_idx = 0, bmap_idx = 0; struct sge_ofld_rxq *q = rxq_info->uldrxq; unsigned short *ids = rxq_info->rspq_id; - unsigned int bmap_idx = 0; + struct sge *s = &adap->sge; unsigned int per_chan; - int i, err, msi_idx, que_idx = 0; per_chan = rxq_info->nrxq / adap->params.nports; @@ -161,6 +160,10 @@ static int alloc_uld_rxqs(struct adapter *adap, if (msi_idx >= 0) { bmap_idx = get_msix_idx_from_bmap(adap); + if (bmap_idx < 0) { + err = -ENOSPC; + goto freeout; + } msi_idx = adap->msix_info_ulds[bmap_idx].idx; } err = t4_sge_alloc_rxq(adap, &q->rspq, false, @@ -670,10 +673,10 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld) lld->write_cmpl_support = adap->params.write_cmpl_support; } -static void uld_attach(struct adapter *adap, unsigned int uld) +static int uld_attach(struct adapter *adap, unsigned int uld) { - void *handle; struct cxgb4_lld_info lli; + void *handle; uld_init(adap, &lli); uld_queue_init(adap, uld, &lli); @@ -683,7 +686,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld) dev_warn(adap->pdev_dev, "could not attach to the %s driver, error %ld\n", adap->uld[uld].name, PTR_ERR(handle)); - return; + return PTR_ERR(handle); } adap->uld[uld].handle = handle; @@ -691,23 +694,24 @@ static void uld_attach(struct adapter *adap, unsigned int uld) if (adap->flags & FULL_INIT_DONE) adap->uld[uld].state_change(handle, CXGB4_STATE_UP); + + return 0; } -/** - * cxgb4_register_uld - register an upper-layer driver - * @type: the ULD type - * @p: the ULD methods +/* cxgb4_register_uld - register an upper-layer driver + * @type: the ULD type + * @p: the ULD methods * - * Registers an upper-layer driver with this driver and notifies the ULD - * about any presently available devices that support its type. Returns - * %-EBUSY if a ULD of the same type is already registered. + * Registers an upper-layer driver with this driver and notifies the ULD + * about any presently available devices that support its type. Returns + * %-EBUSY if a ULD of the same type is already registered. */ int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p) { - int ret = 0; unsigned int adap_idx = 0; struct adapter *adap; + int ret = 0; if (type >= CXGB4_ULD_MAX) return -EINVAL; @@ -741,12 +745,16 @@ int cxgb4_register_uld(enum cxgb4_uld type, if (ret) goto free_irq; adap->uld[type] = *p; - uld_attach(adap, type); + ret = uld_attach(adap, type); + if (ret) + goto free_txq; adap_idx++; } mutex_unlock(&uld_mutex); return 0; +free_txq: + release_sge_txq_uld(adap, type); free_irq: if (adap->flags & FULL_INIT_DONE) quiesce_rx_uld(adap, type); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 5fe5d16dee724cdca0366e99f06c6ef9b9a17d4b..8350c0c9b89d1878ca6ff81218a70841ce9f37d6 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3889,7 +3889,7 @@ int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op) c.param[0].mnem = cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE)); - c.param[0].val = (__force __be32)op; + c.param[0].val = cpu_to_be32(op); return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index f152da1ce0464c5c065010813213e5f60eb111af..c62a0c830705cc71681890bbcf17223de16bdbac 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -1453,6 +1453,9 @@ struct cpl_tx_data { #define T6_TX_FORCE_V(x) ((x) << T6_TX_FORCE_S) #define T6_TX_FORCE_F T6_TX_FORCE_V(1U) +#define TX_URG_S 16 +#define TX_URG_V(x) ((x) << TX_URG_S) + #define TX_SHOVE_S 14 #define TX_SHOVE_V(x) ((x) << TX_SHOVE_S) diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index ff84791a0ff853bf571aeb89014ec92d66eb6275..972dc7bd721d9a7412e48c429a75eb376a1b5486 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -722,6 +722,10 @@ static int adapter_up(struct adapter *adapter) if (adapter->flags & USING_MSIX) name_msix_vecs(adapter); + + /* Initialize hash mac addr list*/ + INIT_LIST_HEAD(&adapter->mac_hlist); + adapter->flags |= FULL_INIT_DONE; } @@ -747,8 +751,6 @@ static int adapter_up(struct adapter *adapter) enable_rx(adapter); t4vf_sge_start(adapter); - /* Initialize hash mac addr list*/ - INIT_LIST_HEAD(&adapter->mac_hlist); return 0; } diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c index 74849be5f004f59552892cf642a9b02efb393ac7..e2919005ead3e1592999b140841b0234344e87b7 100644 --- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c +++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c @@ -354,7 +354,10 @@ static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total, ppmax = max; /* pool size must be multiple of unsigned long */ - bmap = BITS_TO_LONGS(ppmax); + bmap = ppmax / BITS_PER_TYPE(unsigned long); + if (!bmap) + return NULL; + ppmax = (bmap * sizeof(unsigned long)) << 3; alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap; @@ -402,6 +405,10 @@ int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev, if (reserve_factor) { ppmax_pool = ppmax / reserve_factor; pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max); + if (!pool) { + ppmax_pool = 0; + reserve_factor = 0; + } pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n", ndev->name, ppmax, ppmax_pool, pool_index_max); diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c index 13dfdfca49fc7816b8fb64c55d372b19370e4a7f..edc1d19c9c02e32a2eccfad7571183a4e5221c10 100644 --- a/drivers/net/ethernet/cirrus/ep93xx_eth.c +++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c @@ -767,6 +767,7 @@ static int ep93xx_eth_remove(struct platform_device *pdev) { struct net_device *dev; struct ep93xx_priv *ep; + struct resource *mem; dev = platform_get_drvdata(pdev); if (dev == NULL) @@ -782,8 +783,8 @@ static int ep93xx_eth_remove(struct platform_device *pdev) iounmap(ep->base_addr); if (ep->res != NULL) { - release_resource(ep->res); - kfree(ep->res); + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(mem->start, resource_size(mem)); } free_netdev(dev); diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 60641e202534109f3d1341607b6c4d413b75f65b..d8b80f3634db000ff45e86292139a881a8f6cf89 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -119,7 +119,7 @@ static void enic_init_affinity_hint(struct enic *enic) for (i = 0; i < enic->intr_count; i++) { if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i) || - (enic->msix[i].affinity_mask && + (cpumask_available(enic->msix[i].affinity_mask) && !cpumask_empty(enic->msix[i].affinity_mask))) continue; if (zalloc_cpumask_var(&enic->msix[i].affinity_mask, @@ -148,7 +148,7 @@ static void enic_set_affinity_hint(struct enic *enic) for (i = 0; i < enic->intr_count; i++) { if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i) || - !enic->msix[i].affinity_mask || + !cpumask_available(enic->msix[i].affinity_mask) || cpumask_empty(enic->msix[i].affinity_mask)) continue; err = irq_set_affinity_hint(enic->msix_entry[i].vector, @@ -161,7 +161,7 @@ static void enic_set_affinity_hint(struct enic *enic) for (i = 0; i < enic->wq_count; i++) { int wq_intr = enic_msix_wq_intr(enic, i); - if (enic->msix[wq_intr].affinity_mask && + if (cpumask_available(enic->msix[wq_intr].affinity_mask) && !cpumask_empty(enic->msix[wq_intr].affinity_mask)) netif_set_xps_queue(enic->netdev, enic->msix[wq_intr].affinity_mask, @@ -803,7 +803,7 @@ static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq, return err; } -static inline void enic_queue_wq_skb(struct enic *enic, +static inline int enic_queue_wq_skb(struct enic *enic, struct vnic_wq *wq, struct sk_buff *skb) { unsigned int mss = skb_shinfo(skb)->gso_size; @@ -849,6 +849,7 @@ static inline void enic_queue_wq_skb(struct enic *enic, wq->to_use = buf->next; dev_kfree_skb(skb); } + return err; } /* netif_tx_lock held, process context with BHs disabled, or BH */ @@ -892,7 +893,8 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, return NETDEV_TX_BUSY; } - enic_queue_wq_skb(enic, wq, skb); + if (enic_queue_wq_skb(enic, wq, skb)) + goto error; if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) netif_tx_stop_queue(txq); @@ -900,6 +902,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, if (!skb->xmit_more || netif_xmit_stopped(txq)) vnic_wq_doorbell(wq); +error: spin_unlock(&enic->wq_lock[txq_map]); return NETDEV_TX_OK; @@ -1149,18 +1152,30 @@ static int enic_set_vf_port(struct net_device *netdev, int vf, pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]); if (port[IFLA_PORT_PROFILE]) { + if (nla_len(port[IFLA_PORT_PROFILE]) != PORT_PROFILE_MAX) { + memcpy(pp, &prev_pp, sizeof(*pp)); + return -EINVAL; + } pp->set |= ENIC_SET_NAME; memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]), PORT_PROFILE_MAX); } if (port[IFLA_PORT_INSTANCE_UUID]) { + if (nla_len(port[IFLA_PORT_INSTANCE_UUID]) != PORT_UUID_MAX) { + memcpy(pp, &prev_pp, sizeof(*pp)); + return -EINVAL; + } pp->set |= ENIC_SET_INSTANCE; memcpy(pp->instance_uuid, nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX); } if (port[IFLA_PORT_HOST_UUID]) { + if (nla_len(port[IFLA_PORT_HOST_UUID]) != PORT_UUID_MAX) { + memcpy(pp, &prev_pp, sizeof(*pp)); + return -EINVAL; + } pp->set |= ENIC_SET_HOST; memcpy(pp->host_uuid, nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX); @@ -1434,7 +1449,8 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, * csum is correct or is zero. */ if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc && - tcp_udp_csum_ok && ipv4_csum_ok && outer_csum_ok) { + tcp_udp_csum_ok && outer_csum_ok && + (ipv4_csum_ok || ipv6)) { skb->ip_summed = CHECKSUM_UNNECESSARY; skb->csum_level = encap; } diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 1c9ad3630c7754b692079c19ffafec7c07922c80..01a212097836019b8b8002e6ce53fb5f7f033da5 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -577,6 +577,8 @@ static int gmac_setup_txqs(struct net_device *netdev) if (port->txq_dma_base & ~DMA_Q_BASE_MASK) { dev_warn(geth->dev, "TX queue base is not aligned\n"); + dma_free_coherent(geth->dev, len * sizeof(*desc_ring), + desc_ring, port->txq_dma_base); kfree(skb_tab); return -ENOMEM; } @@ -661,7 +663,7 @@ static void gmac_clean_txq(struct net_device *netdev, struct gmac_txq *txq, u64_stats_update_begin(&port->tx_stats_syncp); port->tx_frag_stats[nfrags]++; - u64_stats_update_end(&port->ir_stats_syncp); + u64_stats_update_end(&port->tx_stats_syncp); } } @@ -2530,6 +2532,7 @@ static int gemini_ethernet_port_remove(struct platform_device *pdev) struct gemini_ethernet_port *port = platform_get_drvdata(pdev); gemini_port_remove(port); + free_netdev(port->netdev); return 0; } diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index 66535d1653f6d8c156b49493fd8ef7827be0b507..98639e772bcaeee2772e0d533ae01b4a0a10ff8d 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -2107,7 +2107,6 @@ static struct eisa_driver de4x5_eisa_driver = { .remove = de4x5_eisa_remove, } }; -MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids); #endif #ifdef CONFIG_PCI @@ -5000,19 +4999,23 @@ mii_get_phy(struct net_device *dev) } if ((j == limit) && (i < DE4X5_MAX_MII)) { for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++); - lp->phy[k].addr = i; - lp->phy[k].id = id; - lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */ - lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */ - lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */ - lp->mii_cnt++; - lp->active++; - printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name); - j = de4x5_debug; - de4x5_debug |= DEBUG_MII; - de4x5_dbg_mii(dev, k); - de4x5_debug = j; - printk("\n"); + if (k < DE4X5_MAX_PHY) { + lp->phy[k].addr = i; + lp->phy[k].id = id; + lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */ + lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */ + lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */ + lp->mii_cnt++; + lp->active++; + printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name); + j = de4x5_debug; + de4x5_debug |= DEBUG_MII; + de4x5_dbg_mii(dev, k); + de4x5_debug = j; + printk("\n"); + } else { + goto purgatory; + } } } purgatory: diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c index d71cba0842c5af5ff504a5c9b235e47d155bf563..59dc20020c7383a3c400dcfe4ec5060a4b264790 100644 --- a/drivers/net/ethernet/ec_bhf.c +++ b/drivers/net/ethernet/ec_bhf.c @@ -585,10 +585,12 @@ static void ec_bhf_remove(struct pci_dev *dev) struct ec_bhf_priv *priv = netdev_priv(net_dev); unregister_netdev(net_dev); - free_netdev(net_dev); pci_iounmap(dev, priv->dma_io); pci_iounmap(dev, priv->io); + + free_netdev(net_dev); + pci_release_regions(dev); pci_clear_master(dev); pci_disable_device(dev); diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 58bcee8f0a58b700593d44bc3638e2ed9a2ebfb2..ce041c90adb02855849ba0d4fd974a24da8113f1 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -185,6 +185,7 @@ static inline void queue_tail_inc(struct be_queue_info *q) struct be_eq_obj { struct be_queue_info q; + char desc[32]; struct be_adapter *adapter; struct napi_struct napi; diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index 3f6749fc889f97216e2dca7dae4a31b0293a83f8..d1905d50c26cb8a58dcee88648f09e1cd709ebd3 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -895,7 +895,7 @@ static void be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data) { struct be_adapter *adapter = netdev_priv(netdev); - int status; + int status, cnt; u8 link_status = 0; if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) { @@ -906,6 +906,9 @@ static void be_self_test(struct net_device *netdev, struct ethtool_test *test, memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM); + /* check link status before offline tests */ + link_status = netif_carrier_ok(netdev); + if (test->flags & ETH_TEST_FL_OFFLINE) { if (be_loopback_test(adapter, BE_MAC_LOOPBACK, &data[0]) != 0) test->flags |= ETH_TEST_FL_FAILED; @@ -926,13 +929,26 @@ static void be_self_test(struct net_device *netdev, struct ethtool_test *test, test->flags |= ETH_TEST_FL_FAILED; } - status = be_cmd_link_status_query(adapter, NULL, &link_status, 0); - if (status) { - test->flags |= ETH_TEST_FL_FAILED; - data[4] = -1; - } else if (!link_status) { + /* link status was down prior to test */ + if (!link_status) { test->flags |= ETH_TEST_FL_FAILED; data[4] = 1; + return; + } + + for (cnt = 10; cnt; cnt--) { + status = be_cmd_link_status_query(adapter, NULL, &link_status, + 0); + if (status) { + test->flags |= ETH_TEST_FL_FAILED; + data[4] = -1; + break; + } + + if (link_status) + break; + + msleep_interruptible(500); } } @@ -1105,7 +1121,7 @@ static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, cmd->data = be_get_rss_hash_opts(adapter, cmd->flow_type); break; case ETHTOOL_GRXRINGS: - cmd->data = adapter->num_rx_qs - 1; + cmd->data = adapter->num_rx_qs; break; default: return -EINVAL; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 534787291b44f17a6d47c480e35ee7da5e2e6319..63a0156922b4abf37ae989133cf21439fc7d5acf 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1385,10 +1385,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev) be_get_wrb_params_from_skb(adapter, skb, &wrb_params); wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params); - if (unlikely(!wrb_cnt)) { - dev_kfree_skb_any(skb); - goto drop; - } + if (unlikely(!wrb_cnt)) + goto drop_skb; /* if os2bmc is enabled and if the pkt is destined to bmc, * enqueue the pkt a 2nd time with mgmt bit set. @@ -1397,7 +1395,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev) BE_WRB_F_SET(wrb_params.features, OS2BMC, 1); wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params); if (unlikely(!wrb_cnt)) - goto drop; + goto drop_skb; else skb_get(skb); } @@ -1411,6 +1409,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev) be_xmit_flush(adapter, txo); return NETDEV_TX_OK; +drop_skb: + dev_kfree_skb_any(skb); drop: tx_stats(txo)->tx_drv_drops++; /* Flush the already enqueued tx requests */ @@ -3488,11 +3488,9 @@ static int be_msix_register(struct be_adapter *adapter) int status, i, vec; for_all_evt_queues(adapter, eqo, i) { - char irq_name[IFNAMSIZ+4]; - - snprintf(irq_name, sizeof(irq_name), "%s-q%d", netdev->name, i); + sprintf(eqo->desc, "%s-q%d", netdev->name, i); vec = be_msix_vec_get(adapter, eqo); - status = request_irq(vec, be_msix, 0, irq_name, eqo); + status = request_irq(vec, be_msix, 0, eqo->desc, eqo); if (status) goto err_msix; @@ -4702,8 +4700,12 @@ int be_update_queues(struct be_adapter *adapter) struct net_device *netdev = adapter->netdev; int status; - if (netif_running(netdev)) + if (netif_running(netdev)) { + /* device cannot transmit now, avoid dev_watchdog timeouts */ + netif_carrier_off(netdev); + be_close(netdev); + } be_cancel_worker(adapter); diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index ed6c76d20b45b2a38ccf87e63487e77a756812a3..e4fc38cbe8535134feabf677be3dfdfe0cbf7f1f 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -712,8 +712,8 @@ static bool ftgmac100_prep_tx_csum(struct sk_buff *skb, u32 *csum_vlan) return skb_checksum_help(skb) == 0; } -static int ftgmac100_hard_start_xmit(struct sk_buff *skb, - struct net_device *netdev) +static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb, + struct net_device *netdev) { struct ftgmac100 *priv = netdev_priv(netdev); struct ftgmac100_txdes *txdes, *first; @@ -739,6 +739,18 @@ static int ftgmac100_hard_start_xmit(struct sk_buff *skb, */ nfrags = skb_shinfo(skb)->nr_frags; + /* Setup HW checksumming */ + csum_vlan = 0; + if (skb->ip_summed == CHECKSUM_PARTIAL && + !ftgmac100_prep_tx_csum(skb, &csum_vlan)) + goto drop; + + /* Add VLAN tag */ + if (skb_vlan_tag_present(skb)) { + csum_vlan |= FTGMAC100_TXDES1_INS_VLANTAG; + csum_vlan |= skb_vlan_tag_get(skb) & 0xffff; + } + /* Get header len */ len = skb_headlen(skb); @@ -765,19 +777,6 @@ static int ftgmac100_hard_start_xmit(struct sk_buff *skb, if (nfrags == 0) f_ctl_stat |= FTGMAC100_TXDES0_LTS; txdes->txdes3 = cpu_to_le32(map); - - /* Setup HW checksumming */ - csum_vlan = 0; - if (skb->ip_summed == CHECKSUM_PARTIAL && - !ftgmac100_prep_tx_csum(skb, &csum_vlan)) - goto drop; - - /* Add VLAN tag */ - if (skb_vlan_tag_present(skb)) { - csum_vlan |= FTGMAC100_TXDES1_INS_VLANTAG; - csum_vlan |= skb_vlan_tag_get(skb) & 0xffff; - } - txdes->txdes1 = cpu_to_le32(csum_vlan); /* Next descriptor */ diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c index a1197d3adbe018318990d212566a874d42ced28b..084f24daf2b5a8854dcbb9002314f658f0ceaf26 100644 --- a/drivers/net/ethernet/faraday/ftmac100.c +++ b/drivers/net/ethernet/faraday/ftmac100.c @@ -634,8 +634,8 @@ static void ftmac100_tx_complete(struct ftmac100 *priv) ; } -static int ftmac100_xmit(struct ftmac100 *priv, struct sk_buff *skb, - dma_addr_t map) +static netdev_tx_t ftmac100_xmit(struct ftmac100 *priv, struct sk_buff *skb, + dma_addr_t map) { struct net_device *netdev = priv->netdev; struct ftmac100_txdes *txdes; @@ -872,11 +872,10 @@ static irqreturn_t ftmac100_interrupt(int irq, void *dev_id) struct net_device *netdev = dev_id; struct ftmac100 *priv = netdev_priv(netdev); - if (likely(netif_running(netdev))) { - /* Disable interrupts for polling */ - ftmac100_disable_all_int(priv); + /* Disable interrupts for polling */ + ftmac100_disable_all_int(priv); + if (likely(netif_running(netdev))) napi_schedule(&priv->napi); - } return IRQ_HANDLED; } @@ -1016,7 +1015,8 @@ static int ftmac100_stop(struct net_device *netdev) return 0; } -static int ftmac100_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t +ftmac100_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct ftmac100 *priv = netdev_priv(netdev); dma_addr_t map; diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 65a22cd9aef26197f79877862756133b4109b895..462bb8c4f80c93a21be4497cd978bf3304e98408 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -1648,7 +1648,7 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv, qm_sg_entry_get_len(&sgt[0]), dma_dir); /* remaining pages were mapped with skb_frag_dma_map() */ - for (i = 1; i < nr_frags; i++) { + for (i = 1; i <= nr_frags; i++) { WARN_ON(qm_sg_entry_is_ext(&sgt[i])); dma_unmap_page(dev, qm_sg_addr(&sgt[i]), @@ -2046,12 +2046,14 @@ static inline int dpaa_xmit(struct dpaa_priv *priv, return 0; } -static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) +static netdev_tx_t +dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) { const int queue_mapping = skb_get_queue_mapping(skb); bool nonlinear = skb_is_nonlinear(skb); struct rtnl_link_stats64 *percpu_stats; struct dpaa_percpu_priv *percpu_priv; + struct netdev_queue *txq; struct dpaa_priv *priv; struct qm_fd fd; int offset = 0; @@ -2101,6 +2103,11 @@ static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) if (unlikely(err < 0)) goto skb_to_fd_failed; + txq = netdev_get_tx_queue(net_dev, queue_mapping); + + /* LLTX requires to do our own update of trans_start */ + txq->trans_start = jiffies; + if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD); skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 7b98bb75ba8ac025584306668c0cc93e34943917..7b43bf74f2029835692bc59319569e3612e26153 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1689,10 +1689,10 @@ static void fec_get_mac(struct net_device *ndev) */ if (!is_valid_ether_addr(iap)) { /* Report it and use a random ethernet address instead */ - netdev_err(ndev, "Invalid MAC address: %pM\n", iap); + dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n", iap); eth_hw_addr_random(ndev); - netdev_info(ndev, "Using random MAC address: %pM\n", - ndev->dev_addr); + dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n", + ndev->dev_addr); return; } @@ -1850,13 +1850,9 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) int ret; if (enable) { - ret = clk_prepare_enable(fep->clk_ahb); - if (ret) - return ret; - ret = clk_prepare_enable(fep->clk_enet_out); if (ret) - goto failed_clk_enet_out; + return ret; if (fep->clk_ptp) { mutex_lock(&fep->ptp_clk_mutex); @@ -1876,7 +1872,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) phy_reset_after_clk_enable(ndev->phydev); } else { - clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_enet_out); if (fep->clk_ptp) { mutex_lock(&fep->ptp_clk_mutex); @@ -1895,8 +1890,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) failed_clk_ptp: if (fep->clk_enet_out) clk_disable_unprepare(fep->clk_enet_out); -failed_clk_enet_out: - clk_disable_unprepare(fep->clk_ahb); return ret; } @@ -3037,29 +3030,6 @@ fec_set_mac_address(struct net_device *ndev, void *p) return 0; } -#ifdef CONFIG_NET_POLL_CONTROLLER -/** - * fec_poll_controller - FEC Poll controller function - * @dev: The FEC network adapter - * - * Polled functionality used by netconsole and others in non interrupt mode - * - */ -static void fec_poll_controller(struct net_device *dev) -{ - int i; - struct fec_enet_private *fep = netdev_priv(dev); - - for (i = 0; i < FEC_IRQ_NUM; i++) { - if (fep->irq[i] > 0) { - disable_irq(fep->irq[i]); - fec_enet_interrupt(fep->irq[i], dev); - enable_irq(fep->irq[i]); - } - } -} -#endif - static inline void fec_enet_set_netdev_features(struct net_device *netdev, netdev_features_t features) { @@ -3108,9 +3078,6 @@ static const struct net_device_ops fec_netdev_ops = { .ndo_tx_timeout = fec_timeout, .ndo_set_mac_address = fec_set_mac_address, .ndo_do_ioctl = fec_enet_ioctl, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = fec_poll_controller, -#endif .ndo_set_features = fec_set_features, }; @@ -3154,7 +3121,9 @@ static int fec_enet_init(struct net_device *ndev) return ret; } - fec_enet_alloc_queue(ndev); + ret = fec_enet_alloc_queue(ndev); + if (ret) + return ret; bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize; @@ -3162,7 +3131,8 @@ static int fec_enet_init(struct net_device *ndev) cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma, GFP_KERNEL); if (!cbd_base) { - return -ENOMEM; + ret = -ENOMEM; + goto free_queue_mem; } memset(cbd_base, 0, bd_size); @@ -3242,6 +3212,10 @@ static int fec_enet_init(struct net_device *ndev) fec_enet_update_ethtool_stats(ndev); return 0; + +free_queue_mem: + fec_enet_free_queue(ndev); + return ret; } #ifdef CONFIG_OF @@ -3485,6 +3459,9 @@ fec_probe(struct platform_device *pdev) ret = clk_prepare_enable(fep->clk_ipg); if (ret) goto failed_clk_ipg; + ret = clk_prepare_enable(fep->clk_ahb); + if (ret) + goto failed_clk_ahb; fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); if (!IS_ERR(fep->reg_phy)) { @@ -3575,9 +3552,12 @@ fec_probe(struct platform_device *pdev) if (fep->reg_phy) regulator_disable(fep->reg_phy); failed_reset: - pm_runtime_put(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); pm_runtime_disable(&pdev->dev); failed_regulator: + clk_disable_unprepare(fep->clk_ahb); +failed_clk_ahb: + clk_disable_unprepare(fep->clk_ipg); failed_clk_ipg: fec_enet_clk_enable(ndev, false); failed_clk: @@ -3598,6 +3578,11 @@ fec_drv_remove(struct platform_device *pdev) struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); struct device_node *np = pdev->dev.of_node; + int ret; + + ret = pm_runtime_get_sync(&pdev->dev); + if (ret < 0) + return ret; cancel_work_sync(&fep->tx_timeout_work); fec_ptp_stop(pdev); @@ -3605,13 +3590,17 @@ fec_drv_remove(struct platform_device *pdev) fec_enet_mii_remove(fep); if (fep->reg_phy) regulator_disable(fep->reg_phy); - pm_runtime_put(&pdev->dev); - pm_runtime_disable(&pdev->dev); + if (of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); of_node_put(fep->phy_node); free_netdev(ndev); + clk_disable_unprepare(fep->clk_ahb); + clk_disable_unprepare(fep->clk_ipg); + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_disable(&pdev->dev); + return 0; } @@ -3701,6 +3690,7 @@ static int __maybe_unused fec_runtime_suspend(struct device *dev) struct net_device *ndev = dev_get_drvdata(dev); struct fec_enet_private *fep = netdev_priv(ndev); + clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_ipg); return 0; @@ -3710,8 +3700,20 @@ static int __maybe_unused fec_runtime_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct fec_enet_private *fep = netdev_priv(ndev); + int ret; - return clk_prepare_enable(fep->clk_ipg); + ret = clk_prepare_enable(fep->clk_ahb); + if (ret) + return ret; + ret = clk_prepare_enable(fep->clk_ipg); + if (ret) + goto failed_clk_ipg; + + return 0; + +failed_clk_ipg: + clk_disable_unprepare(fep->clk_ahb); + return ret; } static const struct dev_pm_ops fec_pm_ops = { diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index 6d7269d87a850bb4da693b19d5598bc81d18788a..b90bab72efdb3b3d4370bc5616ff0e8609b85cfd 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -305,7 +305,8 @@ static int mpc52xx_fec_close(struct net_device *dev) * invariant will hold if you make sure that the netif_*_queue() * calls are done at the proper times. */ -static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct mpc52xx_fec_priv *priv = netdev_priv(dev); struct bcom_fec_bd *bd; diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index c415ac67cb7bef218d476fc59f7302b83660513b..e80fedb27cee81411019914f483590c7ec6c1871 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c @@ -2786,7 +2786,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev) if (!muram_node) { dev_err(&of_dev->dev, "%s: could not find MURAM node\n", __func__); - goto fman_node_put; + goto fman_free; } err = of_address_to_resource(muram_node, 0, @@ -2795,11 +2795,10 @@ static struct fman *read_dts_node(struct platform_device *of_dev) of_node_put(muram_node); dev_err(&of_dev->dev, "%s: of_address_to_resource() = %d\n", __func__, err); - goto fman_node_put; + goto fman_free; } of_node_put(muram_node); - of_node_put(fm_node); err = devm_request_irq(&of_dev->dev, irq, fman_irq, IRQF_SHARED, "fman", fman); diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index bc6eb30aa20f1736cc49a78531a945740a76a62f..41c6fa200e7467af31a05ef4c365705fe89f2329 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -928,7 +928,7 @@ int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr) hash = get_mac_addr_hash_code(addr) & HASH_CTRL_ADDR_MASK; /* Create element to be added to the driver hash table */ - hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL); + hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC); if (!hash_entry) return -ENOMEM; hash_entry->addr = addr; diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c index 40705938eeccfb4e532d9d2732774934172bac6c..f75b9c11b2d293783cab997be83404b735b62ebe 100644 --- a/drivers/net/ethernet/freescale/fman/fman_tgec.c +++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c @@ -553,7 +553,7 @@ int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr) hash = (crc >> TGEC_HASH_MCAST_SHIFT) & TGEC_HASH_ADR_MSK; /* Create element to be added to the driver hash table */ - hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL); + hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC); if (!hash_entry) return -ENOMEM; hash_entry->addr = addr; diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 2c2976a2dda6b9f1055af98c19cbc9d17bf8279b..7c548ed535da5babe24c342243c82d737f257034 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -481,7 +481,8 @@ static struct sk_buff *tx_skb_align_workaround(struct net_device *dev, } #endif -static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); cbd_t __iomem *bdp; diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index f27f9bae1a4ac02811590636f55d74dcb0568225..6e2245fdc18e8428fee56517953c87a245fce3cc 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -112,7 +112,7 @@ const char gfar_driver_version[] = "2.0"; static int gfar_enet_open(struct net_device *dev); -static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); +static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); static void gfar_reset_task(struct work_struct *work); static void gfar_timeout(struct net_device *dev); static int gfar_close(struct net_device *dev); @@ -2334,7 +2334,7 @@ static inline bool gfar_csum_errata_76(struct gfar_private *priv, /* This is called by the kernel when a frame is ready for transmission. * It is pointed to by the dev->hard_start_xmit function pointer */ -static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); struct gfar_priv_tx_q *tx_queue = NULL; @@ -2942,6 +2942,10 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus, if (lstatus & BD_LFLAG(RXBD_LAST)) size -= skb->len; + WARN(size < 0, "gianfar: rx fragment size underflow"); + if (size < 0) + return false; + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, rxb->page_offset + RXBUF_ALIGNMENT, size, GFAR_RXB_TRUESIZE); @@ -3103,6 +3107,17 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) if (lstatus & BD_LFLAG(RXBD_EMPTY)) break; + /* lost RXBD_LAST descriptor due to overrun */ + if (skb && + (lstatus & BD_LFLAG(RXBD_FIRST))) { + /* discard faulty buffer */ + dev_kfree_skb(skb); + skb = NULL; + rx_queue->stats.rx_dropped++; + + /* can continue normally */ + } + /* order rx buffer descriptor reads */ rmb(); diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 395a5266ea30ad6186b78afd5f1b76f8a138cb84..0cddaaaf48aab3d53f8601dbbcea5c20eb25ce18 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -1528,6 +1528,7 @@ static int gfar_get_ts_info(struct net_device *dev, ptp_node = of_find_compatible_node(NULL, NULL, "fsl,etsec-ptp"); if (ptp_node) { ptp_dev = of_find_device_by_node(ptp_node); + of_node_put(ptp_node); if (ptp_dev) ptp = platform_get_drvdata(ptp_dev); } diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 22a817da861e3f62684123b26a81ae59944c22f3..a5bf02ae4bc5c15e808caac7199de871eaad88e8 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -1888,6 +1888,8 @@ static void ucc_geth_free_tx(struct ucc_geth_private *ugeth) u16 i, j; u8 __iomem *bd; + netdev_reset_queue(ugeth->ndev); + ug_info = ugeth->ug_info; uf_info = &ug_info->uf_info; @@ -3083,7 +3085,8 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) /* This is called by the kernel when a frame is ready for transmission. */ /* It is pointed to by the dev->hard_start_xmit function pointer */ -static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ucc_geth_private *ugeth = netdev_priv(dev); #ifdef CONFIG_UGETH_TX_ON_DEMAND diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c index 0beee2cc2ddd3164baf7211f49c3b5affef7a091..722b6de248164342af98f35104f0a2814e97e27e 100644 --- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c +++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c @@ -252,14 +252,12 @@ uec_set_ringparam(struct net_device *netdev, return -EINVAL; } + if (netif_running(netdev)) + return -EBUSY; + ug_info->bdRingLenRx[queue] = ring->rx_pending; ug_info->bdRingLenTx[queue] = ring->tx_pending; - if (netif_running(netdev)) { - /* FIXME: restart automatically */ - netdev_info(netdev, "Please re-open the interface\n"); - } - return ret; } diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c index a69cd19a55ae2f018cf5340f0f2680bff5f36c86..b8fc9bbeca2c7b3abaa3bd62d012a8c3b4195b50 100644 --- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c +++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c @@ -547,6 +547,11 @@ static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id) return -1; base = ioremap(link->resource[2]->start, resource_size(link->resource[2])); + if (!base) { + pcmcia_release_window(link, link->resource[2]); + return -1; + } + pcmcia_map_mem_page(link, link->resource[2], 0); /* diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig index 25152715396bc04eda5e9d793a608ff7ae546071..397ce4088b53feeaa42451a2493827dffd1ec75b 100644 --- a/drivers/net/ethernet/hisilicon/Kconfig +++ b/drivers/net/ethernet/hisilicon/Kconfig @@ -123,6 +123,16 @@ config HNS3_ENET family of SoCs. This module depends upon HNAE3 driver to access the HNAE3 devices and their associated operations. +config HNS3_CAE + tristate "Hisilicon HNS3 configuration & analysis & enhancement Support" + default m + depends on HNS3_HCLGE && HNS3_ENET + help + This selects the Configuration, Analysis and Enhancement Support for HNS3 Driver + (e.g. SFP management, self-adaptive port speed and DFX). It is supposed to + support upcoming User Mode Tool (i.e. hiarmtool) developed by Huawei. + This module depends upon HNS3_HCLGE and HNS3 driver. + endif #HNS3 endif # NET_VENDOR_HISILICON diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index 14374a856d3091a9489e92669c7d93e3e797a115..9630a6cdbd916639f0ff9c28ffc4f186f1b93cbf 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -1,11 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (c) 2014 Linaro Ltd. * Copyright (c) 2014 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include @@ -157,6 +153,7 @@ struct hip04_priv { unsigned int reg_inten; struct napi_struct napi; + struct device *dev; struct net_device *ndev; struct tx_desc *tx_desc; @@ -173,6 +170,7 @@ struct hip04_priv { dma_addr_t rx_phys[RX_DESC_NUM]; unsigned int rx_head; unsigned int rx_buf_size; + unsigned int rx_cnt_remaining; struct device_node *phy_node; struct phy_device *phy; @@ -185,7 +183,7 @@ struct hip04_priv { static inline unsigned int tx_count(unsigned int head, unsigned int tail) { - return (head - tail) % (TX_DESC_NUM - 1); + return (head - tail) % TX_DESC_NUM; } static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex) @@ -387,7 +385,7 @@ static int hip04_tx_reclaim(struct net_device *ndev, bool force) } if (priv->tx_phys[tx_tail]) { - dma_unmap_single(&ndev->dev, priv->tx_phys[tx_tail], + dma_unmap_single(priv->dev, priv->tx_phys[tx_tail], priv->tx_skb[tx_tail]->len, DMA_TO_DEVICE); priv->tx_phys[tx_tail] = 0; @@ -437,8 +435,8 @@ static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) return NETDEV_TX_BUSY; } - phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE); - if (dma_mapping_error(&ndev->dev, phys)) { + phys = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(priv->dev, phys)) { dev_kfree_skb(skb); return NETDEV_TX_OK; } @@ -453,9 +451,9 @@ static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) skb_tx_timestamp(skb); hip04_set_xmit_desc(priv, phys); - priv->tx_head = TX_NEXT(tx_head); count++; netdev_sent_queue(ndev, skb->len); + priv->tx_head = TX_NEXT(tx_head); stats->tx_bytes += skb->len; stats->tx_packets++; @@ -486,7 +484,6 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget) struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi); struct net_device *ndev = priv->ndev; struct net_device_stats *stats = &ndev->stats; - unsigned int cnt = hip04_recv_cnt(priv); struct rx_desc *desc; struct sk_buff *skb; unsigned char *buf; @@ -497,7 +494,10 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget) u16 len; u32 err; - while (cnt && !last) { + /* clean up tx descriptors */ + tx_remaining = hip04_tx_reclaim(ndev, false); + priv->rx_cnt_remaining += hip04_recv_cnt(priv); + while (priv->rx_cnt_remaining && !last) { buf = priv->rx_buf[priv->rx_head]; skb = build_skb(buf, priv->rx_buf_size); if (unlikely(!skb)) { @@ -505,7 +505,7 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget) goto refill; } - dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head], + dma_unmap_single(priv->dev, priv->rx_phys[priv->rx_head], RX_BUF_SIZE, DMA_FROM_DEVICE); priv->rx_phys[priv->rx_head] = 0; @@ -534,20 +534,22 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget) buf = netdev_alloc_frag(priv->rx_buf_size); if (!buf) goto done; - phys = dma_map_single(&ndev->dev, buf, + phys = dma_map_single(priv->dev, buf, RX_BUF_SIZE, DMA_FROM_DEVICE); - if (dma_mapping_error(&ndev->dev, phys)) + if (dma_mapping_error(priv->dev, phys)) goto done; priv->rx_buf[priv->rx_head] = buf; priv->rx_phys[priv->rx_head] = phys; hip04_set_recv_desc(priv, phys); priv->rx_head = RX_NEXT(priv->rx_head); - if (rx >= budget) + if (rx >= budget) { + --priv->rx_cnt_remaining; goto done; + } - if (--cnt == 0) - cnt = hip04_recv_cnt(priv); + if (--priv->rx_cnt_remaining == 0) + priv->rx_cnt_remaining += hip04_recv_cnt(priv); } if (!(priv->reg_inten & RCV_INT)) { @@ -557,8 +559,7 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget) } napi_complete_done(napi, rx); done: - /* clean up tx descriptors and start a new timer if necessary */ - tx_remaining = hip04_tx_reclaim(ndev, false); + /* start a new timer if necessary */ if (rx < budget && tx_remaining) hip04_start_tx_timer(priv); @@ -633,6 +634,7 @@ static int hip04_mac_open(struct net_device *ndev) int i; priv->rx_head = 0; + priv->rx_cnt_remaining = 0; priv->tx_head = 0; priv->tx_tail = 0; hip04_reset_ppe(priv); @@ -640,9 +642,9 @@ static int hip04_mac_open(struct net_device *ndev) for (i = 0; i < RX_DESC_NUM; i++) { dma_addr_t phys; - phys = dma_map_single(&ndev->dev, priv->rx_buf[i], + phys = dma_map_single(priv->dev, priv->rx_buf[i], RX_BUF_SIZE, DMA_FROM_DEVICE); - if (dma_mapping_error(&ndev->dev, phys)) + if (dma_mapping_error(priv->dev, phys)) return -EIO; priv->rx_phys[i] = phys; @@ -676,7 +678,7 @@ static int hip04_mac_stop(struct net_device *ndev) for (i = 0; i < RX_DESC_NUM; i++) { if (priv->rx_phys[i]) { - dma_unmap_single(&ndev->dev, priv->rx_phys[i], + dma_unmap_single(priv->dev, priv->rx_phys[i], RX_BUF_SIZE, DMA_FROM_DEVICE); priv->rx_phys[i] = 0; } @@ -820,6 +822,7 @@ static int hip04_mac_probe(struct platform_device *pdev) return -ENOMEM; priv = netdev_priv(ndev); + priv->dev = d; priv->ndev = ndev; platform_set_drvdata(pdev, ndev); SET_NETDEV_DEV(ndev, &pdev->dev); @@ -914,10 +917,8 @@ static int hip04_mac_probe(struct platform_device *pdev) } ret = register_netdev(ndev); - if (ret) { - free_netdev(ndev); + if (ret) goto alloc_fail; - } return 0; @@ -940,7 +941,6 @@ static int hip04_remove(struct platform_device *pdev) hip04_free_ring(ndev, d); unregister_netdev(ndev); - free_irq(ndev->irq, ndev); of_node_put(priv->phy_node); cancel_work_sync(&priv->tx_timeout_task); free_netdev(ndev); diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c index 2c2808830e95723604b7356514ea64ca123fac3a..f29040520ca0cf6cbc4e5b3ee394e94fc7693728 100644 --- a/drivers/net/ethernet/hisilicon/hisi_femac.c +++ b/drivers/net/ethernet/hisilicon/hisi_femac.c @@ -295,7 +295,7 @@ static int hisi_femac_rx(struct net_device *dev, int limit) skb->protocol = eth_type_trans(skb, dev); napi_gro_receive(&priv->napi, skb); dev->stats.rx_packets++; - dev->stats.rx_bytes += skb->len; + dev->stats.rx_bytes += len; next: pos = (pos + 1) % rxq->num; if (rx_pkts_num >= limit) diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c index c5727003af8c1438f9e4824b16f8774b8a383878..c95643a34e1e7edc88bf8ec61a517d5ab1718608 100644 --- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c +++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c @@ -1,10 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (c) 2014 Linaro Ltd. * Copyright (c) 2014 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include @@ -554,7 +550,7 @@ static int hix5hd2_rx(struct net_device *dev, int limit) skb->protocol = eth_type_trans(skb, dev); napi_gro_receive(&priv->napi, skb); dev->stats.rx_packets++; - dev->stats.rx_bytes += skb->len; + dev->stats.rx_bytes += len; next: pos = dma_ring_incr(pos, RX_DESC_NUM); } diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c index 79d03f8ee7b180d2cab9a2a647254461c0a0cb08..8a2197e3fedbffc673ddfdbd8da4fca04867955d 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.c +++ b/drivers/net/ethernet/hisilicon/hns/hnae.c @@ -1,10 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2014-2015 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include @@ -150,7 +146,6 @@ static int hnae_alloc_buffers(struct hnae_ring *ring) /* free desc along with its attached buffer */ static void hnae_free_desc(struct hnae_ring *ring) { - hnae_free_buffers(ring); dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr, ring->desc_num * sizeof(ring->desc[0]), ring_to_dma_dir(ring)); @@ -183,6 +178,9 @@ static int hnae_alloc_desc(struct hnae_ring *ring) /* fini ring, also free the buffer for the ring */ static void hnae_fini_ring(struct hnae_ring *ring) { + if (is_rx_ring(ring)) + hnae_free_buffers(ring); + hnae_free_desc(ring); kfree(ring->desc_cb); ring->desc_cb = NULL; @@ -201,7 +199,6 @@ hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags) ring->q = q; ring->flags = flags; - spin_lock_init(&ring->lock); ring->coal_param = q->handle->coal_param; assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr); @@ -317,8 +314,8 @@ EXPORT_SYMBOL(hnae_reinit_handle); /* hnae_get_handle - get a handle from the AE * @owner_dev: the dev use this handle - * @ae_id: the id of the ae to be used - * @ae_opts: the options set for the handle + * @fwnode: firmware device node that containing the hnae_ae_dev. + * @port_id: the port id of the mac * @bops: the callbacks for buffer management * * return handle ptr or ERR_PTR @@ -441,7 +438,7 @@ EXPORT_SYMBOL(hnae_ae_register); /** * hnae_ae_unregister - unregisters a HNAE AE engine - * @cdev: the device to unregister + * @hdev: the device to unregister */ void hnae_ae_unregister(struct hnae_ae_dev *hdev) { @@ -467,5 +464,4 @@ module_exit(hnae_exit); MODULE_AUTHOR("Hisilicon, Inc."); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Hisilicon Network Acceleration Engine Framework"); - -/* vi: set tw=78 noet: */ +MODULE_VERSION(HNAE_DRIVER_VERSION); diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index 08a750fb60c49d397c61845130e153fe1e3b0b3e..b0e0e65c120fc40baeff2269b39fb17436c92e8f 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h @@ -1,10 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 2014-2015 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #ifndef __HNAE_H @@ -36,7 +32,7 @@ #include #include -#define HNAE_DRIVER_VERSION "2.0" +#define HNAE_DRIVER_VERSION "23.12.1" #define HNAE_DRIVER_NAME "hns" #define HNAE_COPYRIGHT "Copyright(c) 2015 Huawei Corporation." #define HNAE_DRIVER_STRING "Hisilicon Network Subsystem Driver" @@ -278,9 +274,6 @@ struct hnae_ring { /* statistic */ struct ring_stats stats; - /* ring lock for poll one */ - spinlock_t lock; - dma_addr_t desc_dma_addr; u32 buf_size; /* size for hnae_desc->addr, preset by AE */ u16 desc_num; /* total number of desc */ @@ -357,7 +350,7 @@ struct hnae_buf_ops { }; struct hnae_queue { - void __iomem *io_base; + u8 __iomem *io_base; phys_addr_t phy_base; struct hnae_ae_dev *dev; /* the device who use this queue */ struct hnae_ring rx_ring ____cacheline_internodealigned_in_smp; @@ -494,8 +487,6 @@ struct hnae_ae_ops { u32 *uplimit); void (*get_pauseparam)(struct hnae_handle *handle, u32 *auto_neg, u32 *rx_en, u32 *tx_en); - int (*set_autoneg)(struct hnae_handle *handle, u8 enable); - int (*get_autoneg)(struct hnae_handle *handle); int (*set_pauseparam)(struct hnae_handle *handle, u32 auto_neg, u32 rx_en, u32 tx_en); void (*get_coalesce_usecs)(struct hnae_handle *handle, @@ -566,6 +557,8 @@ struct hnae_handle { u32 coal_ring_idx; u32 eport_id; u32 dport_id; /* v2 tx bd should fill the dport_id */ + u32 *rss_key; + u32 *rss_indir_table; bool coal_adapt_en; enum hnae_port_type port_type; enum hnae_media_type media_type; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index b52029e26d15323b98811c5180a3d78ac288bf52..2eedb420414d3da1314fb0b443fa64ec3e9ecd93 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -1,10 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2014-2015 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include @@ -17,8 +13,6 @@ #include "hns_dsaf_ppe.h" #include "hns_dsaf_rcb.h" -#define AE_NAME_PORT_ID_IDX 6 - static struct hns_mac_cb *hns_get_mac_cb(struct hnae_handle *handle) { struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); @@ -80,6 +74,7 @@ static struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev, struct hnae_handle *ae_handle; struct ring_pair_cb *ring_pair_cb; struct hnae_vf_cb *vf_cb; + struct hns_ppe_cb *ppe_cb; dsaf_dev = hns_ae_get_dsaf_dev(dev); @@ -128,12 +123,16 @@ static struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev, vf_cb->port_index = port_id; vf_cb->mac_cb = dsaf_dev->mac_cb[port_id]; + ppe_cb = hns_get_ppe_cb(ae_handle); + ae_handle->phy_if = vf_cb->mac_cb->phy_if; ae_handle->phy_dev = vf_cb->mac_cb->phy_dev; ae_handle->if_support = vf_cb->mac_cb->if_support; ae_handle->port_type = vf_cb->mac_cb->mac_type; ae_handle->media_type = vf_cb->mac_cb->media_type; ae_handle->dport_id = port_id; + ae_handle->rss_key = ppe_cb->rss_key; + ae_handle->rss_indir_table = ppe_cb->rss_indir_table; return ae_handle; vf_id_err: @@ -147,12 +146,10 @@ static void hns_ae_put_handle(struct hnae_handle *handle) struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); int i; - vf_cb->mac_cb = NULL; - - kfree(vf_cb); - for (i = 0; i < handle->q_num; i++) hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0; + + kfree(vf_cb); } static int hns_ae_wait_flow_down(struct hnae_handle *handle) @@ -379,6 +376,9 @@ static void hns_ae_stop(struct hnae_handle *handle) hns_ae_ring_enable_all(handle, 0); + /* clean rx fbd. */ + hns_rcb_wait_fbd_clean(handle->qs, handle->q_num, RCB_INT_FLAG_RX); + (void)hns_mac_vm_config_bc_en(mac_cb, 0, false); } @@ -492,13 +492,6 @@ static void hns_ae_get_pauseparam(struct hnae_handle *handle, hns_dsaf_get_rx_mac_pause_en(dsaf_dev, mac_cb->mac_id, rx_en); } -static int hns_ae_set_autoneg(struct hnae_handle *handle, u8 enable) -{ - assert(handle); - - return hns_mac_set_autoneg(hns_get_mac_cb(handle), enable); -} - static void hns_ae_set_promisc_mode(struct hnae_handle *handle, u32 en) { struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); @@ -507,17 +500,6 @@ static void hns_ae_set_promisc_mode(struct hnae_handle *handle, u32 en) hns_mac_set_promisc(mac_cb, (u8)!!en); } -static int hns_ae_get_autoneg(struct hnae_handle *handle) -{ - u32 auto_neg; - - assert(handle); - - hns_mac_get_autoneg(hns_get_mac_cb(handle), &auto_neg); - - return auto_neg; -} - static int hns_ae_set_pauseparam(struct hnae_handle *handle, u32 autoneg, u32 rx_en, u32 tx_en) { @@ -653,7 +635,7 @@ static void hns_ae_update_stats(struct hnae_handle *handle, struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); u64 tx_bytes = 0, rx_bytes = 0, tx_packets = 0, rx_packets = 0; u64 rx_errors = 0, tx_errors = 0, tx_dropped = 0; - u64 rx_missed_errors = 0; + u64 rx_missed_errors; dsaf_dev = hns_ae_get_dsaf_dev(handle->dev); if (!dsaf_dev) @@ -970,8 +952,6 @@ static struct hnae_ae_ops hns_dsaf_ops = { .set_loopback = hns_ae_config_loopback, .get_ring_bdnum_limit = hns_ae_get_ring_bdnum_limit, .get_pauseparam = hns_ae_get_pauseparam, - .set_autoneg = hns_ae_set_autoneg, - .get_autoneg = hns_ae_get_autoneg, .set_pauseparam = hns_ae_set_pauseparam, .get_coalesce_usecs = hns_ae_get_coalesce_usecs, .get_max_coalesced_frames = hns_ae_get_max_coalesced_frames, diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c index 09e4061d1fa60a80584b47cea3725374606a429d..267a95cb3e6b85c271fc9b788d5dde8e9365fa75 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c @@ -1,10 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2014-2015 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include @@ -67,11 +63,14 @@ static void hns_gmac_enable(void *mac_drv, enum mac_commom_mode mode) struct mac_driver *drv = (struct mac_driver *)mac_drv; /*enable GE rX/tX */ - if ((mode == MAC_COMM_MODE_TX) || (mode == MAC_COMM_MODE_RX_AND_TX)) + if (mode == MAC_COMM_MODE_TX || mode == MAC_COMM_MODE_RX_AND_TX) dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 1); - if ((mode == MAC_COMM_MODE_RX) || (mode == MAC_COMM_MODE_RX_AND_TX)) + if (mode == MAC_COMM_MODE_RX || mode == MAC_COMM_MODE_RX_AND_TX) { + /* enable rx pcs */ + dsaf_set_dev_bit(drv, GMAC_PCS_RX_EN_REG, 0, 0); dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 1); + } } static void hns_gmac_disable(void *mac_drv, enum mac_commom_mode mode) @@ -79,11 +78,14 @@ static void hns_gmac_disable(void *mac_drv, enum mac_commom_mode mode) struct mac_driver *drv = (struct mac_driver *)mac_drv; /*disable GE rX/tX */ - if ((mode == MAC_COMM_MODE_TX) || (mode == MAC_COMM_MODE_RX_AND_TX)) + if (mode == MAC_COMM_MODE_TX || mode == MAC_COMM_MODE_RX_AND_TX) dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 0); - if ((mode == MAC_COMM_MODE_RX) || (mode == MAC_COMM_MODE_RX_AND_TX)) + if (mode == MAC_COMM_MODE_RX || mode == MAC_COMM_MODE_RX_AND_TX) { + /* disable rx pcs */ + dsaf_set_dev_bit(drv, GMAC_PCS_RX_EN_REG, 0, 1); dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 0); + } } /* hns_gmac_get_en - get port enable @@ -128,14 +130,6 @@ static void hns_gmac_get_tx_auto_pause_frames(void *mac_drv, u16 *newval) GMAC_FC_TX_TIMER_M, GMAC_FC_TX_TIMER_S); } -static void hns_gmac_set_rx_auto_pause_frames(void *mac_drv, u32 newval) -{ - struct mac_driver *drv = (struct mac_driver *)mac_drv; - - dsaf_set_dev_bit(drv, GMAC_PAUSE_EN_REG, - GMAC_PAUSE_EN_RX_FDFC_B, !!newval); -} - static void hns_gmac_config_max_frame_length(void *mac_drv, u16 newval) { struct mac_driver *drv = (struct mac_driver *)mac_drv; @@ -177,14 +171,6 @@ static void hns_gmac_tx_loop_pkt_dis(void *mac_drv) dsaf_write_dev(drv, GMAC_TX_LOOP_PKT_PRI_REG, tx_loop_pkt_pri); } -static void hns_gmac_set_duplex_type(void *mac_drv, u8 newval) -{ - struct mac_driver *drv = (struct mac_driver *)mac_drv; - - dsaf_set_dev_bit(drv, GMAC_DUPLEX_TYPE_REG, - GMAC_DUPLEX_TYPE_B, !!newval); -} - static void hns_gmac_get_duplex_type(void *mac_drv, enum hns_gmac_duplex_mdoe *duplex_mode) { @@ -319,7 +305,7 @@ static void hns_gmac_set_promisc(void *mac_drv, u8 en) hns_gmac_set_uc_match(mac_drv, en); } -int hns_gmac_wait_fifo_clean(void *mac_drv) +static int hns_gmac_wait_fifo_clean(void *mac_drv) { struct mac_driver *drv = (struct mac_driver *)mac_drv; int wait_cnt; @@ -739,8 +725,7 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param) mac_drv->set_an_mode = hns_gmac_config_an_mode; mac_drv->config_loopback = hns_gmac_config_loopback; mac_drv->config_pad_and_crc = hns_gmac_config_pad_and_crc; - mac_drv->config_half_duplex = hns_gmac_set_duplex_type; - mac_drv->set_rx_ignore_pause_frames = hns_gmac_set_rx_auto_pause_frames; + mac_drv->config_half_duplex = NULL; mac_drv->get_info = hns_gmac_get_info; mac_drv->autoneg_stat = hns_gmac_autoneg_stat; mac_drv->get_pause_enable = hns_gmac_get_pausefrm_cfg; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.h index 44fe3010dc6dbd656fa94f52a3cb89de5a4bc051..ec266e7fff83fef08c260604acaccbfd0c7764bc 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.h @@ -1,10 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 2014-2015 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #ifndef _HNS_GMAC_H diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 6ed6f142427e4b68434df612568a7fea2f0433bc..f14680919d87b5ee0ac86c583c6c902febeae3da 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -1,10 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2014-2015 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include @@ -70,6 +66,27 @@ static enum mac_mode hns_get_enet_interface(const struct hns_mac_cb *mac_cb) } } +static u32 hns_mac_link_anti_shake(struct mac_driver *mac_ctrl_drv) +{ +#define HNS_MAC_LINK_WAIT_TIME 5 +#define HNS_MAC_LINK_WAIT_CNT 40 + + u32 link_status = 0; + int i; + + if (!mac_ctrl_drv->get_link_status) + return link_status; + + for (i = 0; i < HNS_MAC_LINK_WAIT_CNT; i++) { + msleep(HNS_MAC_LINK_WAIT_TIME); + mac_ctrl_drv->get_link_status(mac_ctrl_drv, &link_status); + if (!link_status) + break; + } + + return link_status; +} + void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status) { struct mac_driver *mac_ctrl_drv; @@ -87,6 +104,14 @@ void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status) &sfp_prsnt); if (!ret) *link_status = *link_status && sfp_prsnt; + + /* for FIBER port, it may have a fake link up. + * when the link status changes from down to up, we need to do + * anti-shake. the anti-shake time is base on tests. + * only FIBER port need to do this. + */ + if (*link_status && !mac_cb->link) + *link_status = hns_mac_link_anti_shake(mac_ctrl_drv); } mac_cb->link = *link_status; @@ -115,7 +140,7 @@ int hns_mac_get_port_info(struct hns_mac_cb *mac_cb, } /** - *hns_mac_is_adjust_link - check is need change mac speed and duplex register + *hns_mac_need_adjust_link - check is need change mac speed and duplex register *@mac_cb: mac device *@speed: phy device speed *@duplex:phy device duplex @@ -370,7 +395,7 @@ int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn) static void hns_mac_param_get(struct mac_params *param, struct hns_mac_cb *mac_cb) { - param->vaddr = (void *)mac_cb->vaddr; + param->vaddr = mac_cb->vaddr; param->mac_mode = hns_get_enet_interface(mac_cb); ether_addr_copy(param->addr, mac_cb->addr_entry_idx[0].addr); param->mac_id = mac_cb->mac_id; @@ -378,10 +403,11 @@ static void hns_mac_param_get(struct mac_params *param, } /** - *hns_mac_queue_config_bc_en - set broadcast rx&tx enable + *hns_mac_port_config_bc_en - set broadcast rx&tx enable *@mac_cb: mac device - *@queue: queue number - *@en:enable + *@port_num: the port number that needs to be configured + *@vlan_id: the vlan id that needs to be configured + *@enable: true means add mac mc port; false means delete mac mc port *retuen 0 - success , negative --fail */ static int hns_mac_port_config_bc_en(struct hns_mac_cb *mac_cb, @@ -415,7 +441,7 @@ static int hns_mac_port_config_bc_en(struct hns_mac_cb *mac_cb, *hns_mac_vm_config_bc_en - set broadcast rx&tx enable *@mac_cb: mac device *@vmid: vm id - *@en:enable + *@enable: true means add; false means delete *retuen 0 - success , negative --fail */ int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable) @@ -473,9 +499,6 @@ void hns_mac_reset(struct hns_mac_cb *mac_cb) if (drv->set_tx_auto_pause_frames) drv->set_tx_auto_pause_frames(drv, mac_cb->tx_pause_frm_time); - if (drv->set_an_mode) - drv->set_an_mode(drv, 1); - if (drv->mac_pausefrm_cfg) { if (mac_cb->mac_type == HNAE_PORT_DEBUG) drv->mac_pausefrm_cfg(drv, !is_ver1, !is_ver1); @@ -546,8 +569,7 @@ void hns_mac_stop(struct hns_mac_cb *mac_cb) /** * hns_mac_get_autoneg - get auto autonegotiation * @mac_cb: mac control block - * @enable: enable or not - * retuen 0 - success , negative --fail + * @auto_neg: the auto negotiation value to be configured */ void hns_mac_get_autoneg(struct hns_mac_cb *mac_cb, u32 *auto_neg) { @@ -564,7 +586,6 @@ void hns_mac_get_autoneg(struct hns_mac_cb *mac_cb, u32 *auto_neg) * @mac_cb: mac control block * @rx_en: rx enable status * @tx_en: tx enable status - * retuen 0 - success , negative --fail */ void hns_mac_get_pauseparam(struct hns_mac_cb *mac_cb, u32 *rx_en, u32 *tx_en) { @@ -600,7 +621,7 @@ int hns_mac_set_autoneg(struct hns_mac_cb *mac_cb, u8 enable) } /** - * hns_mac_set_autoneg - set rx & tx pause parameter + * hns_mac_set_pauseparam - set rx & tx pause parameter * @mac_cb: mac control block * @rx_en: rx enable or not * @tx_en: tx enable or not @@ -701,9 +722,9 @@ hns_mac_register_phydev(struct mii_bus *mdio, struct hns_mac_cb *mac_cb, return rc; if (!strcmp(phy_type, phy_modes(PHY_INTERFACE_MODE_XGMII))) - is_c45 = 1; + is_c45 = true; else if (!strcmp(phy_type, phy_modes(PHY_INTERFACE_MODE_SGMII))) - is_c45 = 0; + is_c45 = false; else return -ENODATA; @@ -778,6 +799,17 @@ static int hns_mac_register_phy(struct hns_mac_cb *mac_cb) return rc; } +static void hns_mac_remove_phydev(struct hns_mac_cb *mac_cb) +{ + if (!to_acpi_device_node(mac_cb->fw_port) || !mac_cb->phy_dev) + return; + + phy_device_remove(mac_cb->phy_dev); + phy_device_free(mac_cb->phy_dev); + + mac_cb->phy_dev = NULL; +} + #define MAC_MEDIA_TYPE_MAX_LEN 16 static const struct { @@ -793,7 +825,6 @@ static const struct { /** *hns_mac_get_info - get mac information from device node *@mac_cb: mac device - *@np:device node * return: 0 --success, negative --fail */ static int hns_mac_get_info(struct hns_mac_cb *mac_cb) @@ -837,8 +868,8 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb) */ put_device(&mac_cb->phy_dev->mdio.dev); - dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n", - mac_cb->mac_id, np->name); + dev_dbg(mac_cb->dev, "mac%d phy_node: %pOFn\n", + mac_cb->mac_id, np); } of_node_put(np); @@ -855,8 +886,8 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb) * if the phy_dev is found */ put_device(&mac_cb->phy_dev->mdio.dev); - dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n", - mac_cb->mac_id, np->name); + dev_dbg(mac_cb->dev, "mac%d phy_node: %pOFn\n", + mac_cb->mac_id, np); } of_node_put(np); @@ -1117,7 +1148,11 @@ void hns_mac_uninit(struct dsaf_device *dsaf_dev) int max_port_num = hns_mac_get_max_port_num(dsaf_dev); for (i = 0; i < max_port_num; i++) { + if (!dsaf_dev->mac_cb[i]) + continue; + dsaf_dev->misc_op->cpld_reset_led(dsaf_dev->mac_cb[i]); + hns_mac_remove_phydev(dsaf_dev->mac_cb[i]); dsaf_dev->mac_cb[i] = NULL; } } @@ -1191,7 +1226,7 @@ void hns_mac_get_regs(struct hns_mac_cb *mac_cb, void *data) void hns_set_led_opt(struct hns_mac_cb *mac_cb) { - int nic_data = 0; + int nic_data; int txpkts, rxpkts; txpkts = mac_cb->txpkt_for_led - mac_cb->hw_stats.tx_good_pkts; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h index fbc75341bef760b82a1d7a10469d5649db91e366..9771ba8f5be8f7ad6808df8e7730a079d2196e33 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h @@ -1,10 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 2014-2015 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #ifndef _HNS_DSAF_MAC_H @@ -187,7 +183,7 @@ struct mac_statistics { /*mac para struct ,mac get param from nic or dsaf when initialize*/ struct mac_params { char addr[ETH_ALEN]; - void *vaddr; /*virtual address*/ + u8 __iomem *vaddr; /*virtual address*/ struct device *dev; u8 mac_id; /**< Ethernet operation mode (MAC-PHY interface and speed) */ @@ -372,8 +368,6 @@ struct mac_driver { void (*config_half_duplex)(void *mac_drv, u8 newval); /*config tx pause time,if pause_time is zero,disable tx pause enable*/ void (*set_tx_auto_pause_frames)(void *mac_drv, u16 pause_time); - /*config rx pause enable*/ - void (*set_rx_ignore_pause_frames)(void *mac_drv, u32 enable); /* config rx mode for promiscuous*/ void (*set_promiscuous)(void *mac_drv, u8 enable); void (*mac_pausefrm_cfg)(void *mac_drv, u32 rx_en, u32 tx_en); @@ -402,7 +396,7 @@ struct mac_driver { enum mac_mode mac_mode; u8 mac_id; struct hns_mac_cb *mac_cb; - void __iomem *io_base; + u8 __iomem *io_base; unsigned int mac_en_flg;/*you'd better don't enable mac twice*/ unsigned int virt_dev_num; struct device *dev; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index e557a4ef5996c6772804ca746830af633a4adca9..0cadd2b6c612d374b54a13b7c13ad67a9eb9101e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -1,10 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2014-2015 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include @@ -28,7 +24,7 @@ #include "hns_dsaf_rcb.h" #include "hns_dsaf_misc.h" -const static char *g_dsaf_mode_match[DSAF_MODE_MAX] = { +static const char *g_dsaf_mode_match[DSAF_MODE_MAX] = { [DSAF_MODE_DISABLE_2PORT_64VM] = "2port-64vf", [DSAF_MODE_DISABLE_6PORT_0VM] = "6port-16rss", [DSAF_MODE_DISABLE_6PORT_16VM] = "6port-16vf", @@ -211,7 +207,7 @@ static int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) /** * hns_dsaf_sbm_link_sram_init_en - config dsaf_sbm_init_en - * @dsaf_id: dsa fabric id + * @dsaf_dev: dsa fabric device */ static void hns_dsaf_sbm_link_sram_init_en(struct dsaf_device *dsaf_dev) { @@ -220,8 +216,8 @@ static void hns_dsaf_sbm_link_sram_init_en(struct dsaf_device *dsaf_dev) /** * hns_dsaf_reg_cnt_clr_ce - config hns_dsaf_reg_cnt_clr_ce - * @dsaf_id: dsa fabric id - * @hns_dsaf_reg_cnt_clr_ce: config value + * @dsaf_dev: dsa fabric device + * @reg_cnt_clr_ce: config value */ static void hns_dsaf_reg_cnt_clr_ce(struct dsaf_device *dsaf_dev, u32 reg_cnt_clr_ce) @@ -231,9 +227,9 @@ hns_dsaf_reg_cnt_clr_ce(struct dsaf_device *dsaf_dev, u32 reg_cnt_clr_ce) } /** - * hns_ppe_qid_cfg - config ppe qid - * @dsaf_id: dsa fabric id - * @pppe_qid_cfg: value array + * hns_dsaf_ppe_qid_cfg - config ppe qid + * @dsaf_dev: dsa fabric device + * @qid_cfg: value array */ static void hns_dsaf_ppe_qid_cfg(struct dsaf_device *dsaf_dev, u32 qid_cfg) @@ -289,8 +285,8 @@ static void hns_dsaf_inner_qid_cfg(struct dsaf_device *dsaf_dev) /** * hns_dsaf_sw_port_type_cfg - cfg sw type - * @dsaf_id: dsa fabric id - * @psw_port_type: array + * @dsaf_dev: dsa fabric device + * @port_type: software port type */ static void hns_dsaf_sw_port_type_cfg(struct dsaf_device *dsaf_dev, enum dsaf_sw_port_type port_type) @@ -307,8 +303,8 @@ static void hns_dsaf_sw_port_type_cfg(struct dsaf_device *dsaf_dev, /** * hns_dsaf_stp_port_type_cfg - cfg stp type - * @dsaf_id: dsa fabric id - * @pstp_port_type: array + * @dsaf_dev: dsa fabric device + * @port_type: stp port type */ static void hns_dsaf_stp_port_type_cfg(struct dsaf_device *dsaf_dev, enum dsaf_stp_port_type port_type) @@ -327,7 +323,7 @@ static void hns_dsaf_stp_port_type_cfg(struct dsaf_device *dsaf_dev, (AE_IS_VER1((dev)->dsaf_ver) ? DSAF_SBM_NUM : DSAFV2_SBM_NUM) /** * hns_dsaf_sbm_cfg - config sbm - * @dsaf_id: dsa fabric id + * @dsaf_dev: dsa fabric device */ static void hns_dsaf_sbm_cfg(struct dsaf_device *dsaf_dev) { @@ -345,8 +341,8 @@ static void hns_dsaf_sbm_cfg(struct dsaf_device *dsaf_dev) } /** - * hns_dsaf_sbm_cfg_mib_en - config sbm - * @dsaf_id: dsa fabric id + * hns_dsaf_sbm_cfg_mib_en - config sbm and wait finish + * @dsaf_dev: dsa fabric device */ static int hns_dsaf_sbm_cfg_mib_en(struct dsaf_device *dsaf_dev) { @@ -366,7 +362,7 @@ static int hns_dsaf_sbm_cfg_mib_en(struct dsaf_device *dsaf_dev) dsaf_set_dev_bit(dsaf_dev, reg, DSAF_SBM_CFG_MIB_EN_S, 1); } - /* waitint for all sbm enable finished */ + /* waiting for all sbm enable finished */ for (i = 0; i < HNS_DSAF_SBM_NUM(dsaf_dev); i++) { read_cnt = 0; reg = DSAF_SBM_CFG_REG_0_REG + 0x80 * i; @@ -390,8 +386,8 @@ static int hns_dsaf_sbm_cfg_mib_en(struct dsaf_device *dsaf_dev) } /** - * hns_dsaf_sbm_bp_wl_cfg - config sbm - * @dsaf_id: dsa fabric id + * hns_dsaf_sbm_bp_wl_cfg - config sbm water line + * @dsaf_dev: dsa fabric device */ static void hns_dsaf_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev) { @@ -560,7 +556,7 @@ static void hns_dsafv2_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev) /** * hns_dsaf_voq_bp_all_thrd_cfg - voq - * @dsaf_id: dsa fabric id + * @dsaf_dev: dsa fabric device */ static void hns_dsaf_voq_bp_all_thrd_cfg(struct dsaf_device *dsaf_dev) { @@ -602,8 +598,8 @@ static void hns_dsaf_tbl_tcam_match_cfg( } /** - * hns_dsaf_tbl_tcam_data_cfg - tbl - * @dsaf_id: dsa fabric id + * hns_dsaf_tbl_tcam_data_cfg - which forwarding table to be configured + * @dsaf_dev: dsa fabric device * @ptbl_tcam_data: addr */ static void hns_dsaf_tbl_tcam_data_cfg( @@ -617,9 +613,9 @@ static void hns_dsaf_tbl_tcam_data_cfg( } /** - * dsaf_tbl_tcam_mcast_cfg - tbl - * @dsaf_id: dsa fabric id - * @ptbl_tcam_mcast: addr + * hns_dsaf_tbl_tcam_mcast_cfg - config multicast forwarding table + * @dsaf_dev: dsa fabric device + * @mcast: multicast table to be configured */ static void hns_dsaf_tbl_tcam_mcast_cfg( struct dsaf_device *dsaf_dev, @@ -651,9 +647,9 @@ static void hns_dsaf_tbl_tcam_mcast_cfg( } /** - * hns_dsaf_tbl_tcam_ucast_cfg - tbl - * @dsaf_id: dsa fabric id - * @ptbl_tcam_ucast: addr + * hns_dsaf_tbl_tcam_ucast_cfg - config unicast forwarding table + * @dsaf_dev: dsa fabric device + * @tbl_tcam_ucast: unicast table to be configured */ static void hns_dsaf_tbl_tcam_ucast_cfg( struct dsaf_device *dsaf_dev, @@ -678,8 +674,8 @@ static void hns_dsaf_tbl_tcam_ucast_cfg( /** * hns_dsaf_tbl_line_cfg - tbl - * @dsaf_id: dsa fabric id - * @ptbl_lin: addr + * @dsaf_dev: dsa fabric device + * @tbl_lin: addr */ static void hns_dsaf_tbl_line_cfg(struct dsaf_device *dsaf_dev, struct dsaf_tbl_line_cfg *tbl_lin) @@ -698,8 +694,9 @@ static void hns_dsaf_tbl_line_cfg(struct dsaf_device *dsaf_dev, } /** - * hns_dsaf_tbl_tcam_mcast_pul - tbl - * @dsaf_id: dsa fabric id + * hns_dsaf_tbl_tcam_mcast_pul - generate pulse for writing multicast + * forwarding table + * @dsaf_dev: dsa fabric device */ static void hns_dsaf_tbl_tcam_mcast_pul(struct dsaf_device *dsaf_dev) { @@ -713,8 +710,8 @@ static void hns_dsaf_tbl_tcam_mcast_pul(struct dsaf_device *dsaf_dev) } /** - * hns_dsaf_tbl_line_pul - tbl - * @dsaf_id: dsa fabric id + * hns_dsaf_tbl_line_pul - config pulse + * @dsaf_dev: dsa fabric device */ static void hns_dsaf_tbl_line_pul(struct dsaf_device *dsaf_dev) { @@ -728,8 +725,9 @@ static void hns_dsaf_tbl_line_pul(struct dsaf_device *dsaf_dev) } /** - * hns_dsaf_tbl_tcam_data_mcast_pul - tbl - * @dsaf_id: dsa fabric id + * hns_dsaf_tbl_tcam_data_mcast_pul - generate pulse for writing multicast + * forwarding table and tcam table + * @dsaf_dev: dsa fabric device */ static void hns_dsaf_tbl_tcam_data_mcast_pul( struct dsaf_device *dsaf_dev) @@ -746,8 +744,9 @@ static void hns_dsaf_tbl_tcam_data_mcast_pul( } /** - * hns_dsaf_tbl_tcam_data_ucast_pul - tbl - * @dsaf_id: dsa fabric id + * hns_dsaf_tbl_tcam_data_ucast_pul - generate pulse for writing unicast + * forwarding table and tcam table + * @dsaf_dev: dsa fabric device */ static void hns_dsaf_tbl_tcam_data_ucast_pul( struct dsaf_device *dsaf_dev) @@ -771,9 +770,8 @@ void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en) } /** - * hns_dsaf_tbl_stat_en - tbl - * @dsaf_id: dsa fabric id - * @ptbl_stat_en: addr + * hns_dsaf_tbl_stat_en - enable the dfx for all tables + * @dsaf_dev: dsa fabric device */ static void hns_dsaf_tbl_stat_en(struct dsaf_device *dsaf_dev) { @@ -789,7 +787,7 @@ static void hns_dsaf_tbl_stat_en(struct dsaf_device *dsaf_dev) /** * hns_dsaf_rocee_bp_en - rocee back press enable - * @dsaf_id: dsa fabric id + * @dsaf_dev: dsa fabric device */ static void hns_dsaf_rocee_bp_en(struct dsaf_device *dsaf_dev) { @@ -855,10 +853,10 @@ static void hns_dsaf_int_tbl_src_clr(struct dsaf_device *dsaf_dev, } /** - * hns_dsaf_single_line_tbl_cfg - INT - * @dsaf_id: dsa fabric id - * @address: - * @ptbl_line: + * hns_dsaf_single_line_tbl_cfg - config single line table + * @dsaf_dev: dsa fabric device + * @address: tcam index + * @ptbl_line: the line table */ static void hns_dsaf_single_line_tbl_cfg( struct dsaf_device *dsaf_dev, @@ -879,10 +877,11 @@ static void hns_dsaf_single_line_tbl_cfg( } /** - * hns_dsaf_tcam_uc_cfg - INT - * @dsaf_id: dsa fabric id - * @address, - * @ptbl_tcam_data, + * hns_dsaf_tcam_uc_cfg - config tcam unicast + * @dsaf_dev: dsa fabric device + * @address: tcam index + * @ptbl_tcam_data: tcam data table + * @ptbl_tcam_ucast: unicast tcam table */ static void hns_dsaf_tcam_uc_cfg( struct dsaf_device *dsaf_dev, u32 address, @@ -908,7 +907,8 @@ static void hns_dsaf_tcam_uc_cfg( * @dsaf_dev: dsa fabric device struct pointer * @address: tcam index * @ptbl_tcam_data: tcam data struct pointer - * @ptbl_tcam_mcast: tcam mask struct pointer, it must be null for HNSv1 + * @ptbl_tcam_mask: tcam mask struct pointer, it must be null for HNSv1 + * @ptbl_tcam_mcast: tcam multicast table */ static void hns_dsaf_tcam_mc_cfg( struct dsaf_device *dsaf_dev, u32 address, @@ -935,9 +935,68 @@ static void hns_dsaf_tcam_mc_cfg( } /** - * hns_dsaf_tcam_mc_invld - INT - * @dsaf_id: dsa fabric id - * @address + * hns_dsaf_tcam_uc_cfg_vague - cfg the tcam for uc vague match + * @dsaf_dev: dsa fabric device struct pointer + * @address: tcam index + * @tcam_data: tcam data struct pointer + * @tcam_mask: tcam data mask + * @tcam_uc: tcam unicast table + */ +static void hns_dsaf_tcam_uc_cfg_vague(struct dsaf_device *dsaf_dev, + u32 address, + struct dsaf_tbl_tcam_data *tcam_data, + struct dsaf_tbl_tcam_data *tcam_mask, + struct dsaf_tbl_tcam_ucast_cfg *tcam_uc) +{ + spin_lock_bh(&dsaf_dev->tcam_lock); + hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address); + hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, tcam_data); + hns_dsaf_tbl_tcam_ucast_cfg(dsaf_dev, tcam_uc); + hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask); + hns_dsaf_tbl_tcam_data_ucast_pul(dsaf_dev); + + /*Restore Match Data*/ + tcam_mask->tbl_tcam_data_high = 0xffffffff; + tcam_mask->tbl_tcam_data_low = 0xffffffff; + hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask); + + spin_unlock_bh(&dsaf_dev->tcam_lock); +} + +/** + * hns_dsaf_tcam_mc_cfg_vague - cfg the tcam for mc vague match + * @dsaf_dev: dsa fabric device struct pointer + * @address: tcam index + * @tcam_data: tcam data struct pointer + * @tcam_mask: tcam data mask + * @tcam_mc: tcam multicast table + + */ +static void hns_dsaf_tcam_mc_cfg_vague(struct dsaf_device *dsaf_dev, + u32 address, + struct dsaf_tbl_tcam_data *tcam_data, + struct dsaf_tbl_tcam_data *tcam_mask, + struct dsaf_tbl_tcam_mcast_cfg *tcam_mc) +{ + spin_lock_bh(&dsaf_dev->tcam_lock); + hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address); + hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, tcam_data); + hns_dsaf_tbl_tcam_mcast_cfg(dsaf_dev, tcam_mc); + hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask); + hns_dsaf_tbl_tcam_data_mcast_pul(dsaf_dev); + + /*Restore Match Data*/ + tcam_mask->tbl_tcam_data_high = 0xffffffff; + tcam_mask->tbl_tcam_data_low = 0xffffffff; + hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask); + + spin_unlock_bh(&dsaf_dev->tcam_lock); +} + +/** + * hns_dsaf_tcam_mc_invld - disable tcam multicast + * @dsaf_dev: dsa fabric device + * @address: tcam index */ static void hns_dsaf_tcam_mc_invld(struct dsaf_device *dsaf_dev, u32 address) { @@ -971,11 +1030,11 @@ hns_dsaf_tcam_addr_get(struct dsaf_drv_tbl_tcam_key *mac_key, u8 *addr) } /** - * hns_dsaf_tcam_uc_get - INT - * @dsaf_id: dsa fabric id - * @address - * @ptbl_tcam_data - * @ptbl_tcam_ucast + * hns_dsaf_tcam_uc_get - get tcam unicast configuration + * @dsaf_dev: dsa fabric device + * @address: tcam index + * @ptbl_tcam_data: output the tcam data + * @ptbl_tcam_ucast: output the tcam unicast configurations */ static void hns_dsaf_tcam_uc_get( struct dsaf_device *dsaf_dev, u32 address, @@ -1024,11 +1083,11 @@ static void hns_dsaf_tcam_uc_get( } /** - * hns_dsaf_tcam_mc_get - INT - * @dsaf_id: dsa fabric id - * @address - * @ptbl_tcam_data - * @ptbl_tcam_ucast + * hns_dsaf_tcam_mc_get - get tcam multicast configuration + * @dsaf_dev: dsa fabric device + * @address: tcam index + * @ptbl_tcam_data: output the tcam data + * @ptbl_tcam_ucast: output the tcam multicast configurations */ static void hns_dsaf_tcam_mc_get( struct dsaf_device *dsaf_dev, u32 address, @@ -1073,10 +1132,6 @@ static void hns_dsaf_tcam_mc_get( spin_unlock_bh(&dsaf_dev->tcam_lock); } -/** - * hns_dsaf_tbl_line_init - INT - * @dsaf_id: dsa fabric id - */ static void hns_dsaf_tbl_line_init(struct dsaf_device *dsaf_dev) { u32 i; @@ -1087,10 +1142,6 @@ static void hns_dsaf_tbl_line_init(struct dsaf_device *dsaf_dev) hns_dsaf_single_line_tbl_cfg(dsaf_dev, i, tbl_line); } -/** - * hns_dsaf_tbl_tcam_init - INT - * @dsaf_id: dsa fabric id - */ static void hns_dsaf_tbl_tcam_init(struct dsaf_device *dsaf_dev) { u32 i; @@ -1102,10 +1153,6 @@ static void hns_dsaf_tbl_tcam_init(struct dsaf_device *dsaf_dev) hns_dsaf_tcam_uc_cfg(dsaf_dev, i, tcam_data, tcam_ucast); } -/** - * hns_dsaf_pfc_en_cfg - dsaf pfc pause cfg - * @mac_cb: mac contrl block - */ static void hns_dsaf_pfc_en_cfg(struct dsaf_device *dsaf_dev, int mac_id, int tc_en) { @@ -1155,11 +1202,6 @@ void hns_dsaf_get_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id, DSAF_MAC_PAUSE_RX_EN_B); } -/** - * hns_dsaf_tbl_tcam_init - INT - * @dsaf_id: dsa fabric id - * @dsaf_mode - */ static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev) { u32 i; @@ -1209,10 +1251,6 @@ static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev) hns_dsaf_int_tbl_msk_set(dsaf_dev, 0xfffffffful); } -/** - * hns_dsaf_inode_init - INT - * @dsaf_id: dsa fabric id - */ static void hns_dsaf_inode_init(struct dsaf_device *dsaf_dev) { u32 reg; @@ -1261,10 +1299,6 @@ static void hns_dsaf_inode_init(struct dsaf_device *dsaf_dev) } } -/** - * hns_dsaf_sbm_init - INT - * @dsaf_id: dsa fabric id - */ static int hns_dsaf_sbm_init(struct dsaf_device *dsaf_dev) { u32 flag; @@ -1315,10 +1349,6 @@ static int hns_dsaf_sbm_init(struct dsaf_device *dsaf_dev) return 0; } -/** - * hns_dsaf_tbl_init - INT - * @dsaf_id: dsa fabric id - */ static void hns_dsaf_tbl_init(struct dsaf_device *dsaf_dev) { hns_dsaf_tbl_stat_en(dsaf_dev); @@ -1327,10 +1357,6 @@ static void hns_dsaf_tbl_init(struct dsaf_device *dsaf_dev) hns_dsaf_tbl_line_init(dsaf_dev); } -/** - * hns_dsaf_voq_init - INT - * @dsaf_id: dsa fabric id - */ static void hns_dsaf_voq_init(struct dsaf_device *dsaf_dev) { hns_dsaf_voq_bp_all_thrd_cfg(dsaf_dev); @@ -1456,7 +1482,7 @@ static u16 hns_dsaf_find_soft_mac_entry( u32 i; soft_mac_entry = priv->soft_mac_tbl; - for (i = 0; i < dsaf_dev->tcam_max_num; i++) { + for (i = 0; i < DSAF_TCAM_SUM; i++) { /* invall tab entry */ if ((soft_mac_entry->index != DSAF_INVALID_ENTRY_IDX) && (soft_mac_entry->tcam_key.high.val == mac_key->high.val) && @@ -1492,6 +1518,27 @@ static u16 hns_dsaf_find_empty_mac_entry(struct dsaf_device *dsaf_dev) return DSAF_INVALID_ENTRY_IDX; } +/** + * hns_dsaf_find_empty_mac_entry_reverse + * search dsa fabric soft empty-entry from the end + * @dsaf_dev: dsa fabric device struct pointer + */ +static u16 hns_dsaf_find_empty_mac_entry_reverse(struct dsaf_device *dsaf_dev) +{ + struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev); + struct dsaf_drv_soft_mac_tbl *soft_mac_entry; + int i; + + soft_mac_entry = priv->soft_mac_tbl + (DSAF_TCAM_SUM - 1); + for (i = (DSAF_TCAM_SUM - 1); i > 0; i--) { + /* search all entry from end to start.*/ + if (soft_mac_entry->index == DSAF_INVALID_ENTRY_IDX) + return i; + soft_mac_entry--; + } + return DSAF_INVALID_ENTRY_IDX; +} + /** * hns_dsaf_set_mac_key - set mac key * @dsaf_dev: dsa fabric device struct pointer @@ -1525,8 +1572,6 @@ static void hns_dsaf_set_mac_key( DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id); dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M, DSAF_TBL_TCAM_KEY_PORT_S, port); - - mac_key->low.bits.port_vlan = le16_to_cpu(mac_key->low.bits.port_vlan); } /** @@ -1538,7 +1583,7 @@ int hns_dsaf_set_mac_uc_entry( struct dsaf_device *dsaf_dev, struct dsaf_drv_mac_single_dest_entry *mac_entry) { - u16 entry_index = DSAF_INVALID_ENTRY_IDX; + u16 entry_index; struct dsaf_drv_tbl_tcam_key mac_key; struct dsaf_tbl_tcam_ucast_cfg mac_data; struct dsaf_drv_priv *priv = @@ -1586,8 +1631,8 @@ int hns_dsaf_set_mac_uc_entry( /* default config dvc to 0 */ mac_data.tbl_ucast_dvc = 0; mac_data.tbl_ucast_out_port = mac_entry->port_num; - tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); - tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); + tcam_data.tbl_tcam_data_high = mac_key.high.val; + tcam_data.tbl_tcam_data_low = mac_key.low.val; hns_dsaf_tcam_uc_cfg(dsaf_dev, entry_index, &tcam_data, &mac_data); @@ -1604,7 +1649,7 @@ int hns_dsaf_rm_mac_addr( struct dsaf_device *dsaf_dev, struct dsaf_drv_mac_single_dest_entry *mac_entry) { - u16 entry_index = DSAF_INVALID_ENTRY_IDX; + u16 entry_index; struct dsaf_tbl_tcam_ucast_cfg mac_data; struct dsaf_drv_tbl_tcam_key mac_key; @@ -1653,7 +1698,7 @@ static void hns_dsaf_setup_mc_mask(struct dsaf_device *dsaf_dev, u8 port_num, u8 *mask, u8 *addr) { if (MAC_IS_BROADCAST(addr)) - memset(mask, 0xff, ETH_ALEN); + eth_broadcast_addr(mask); else memcpy(mask, dsaf_dev->mac_cb[port_num]->mc_mask, ETH_ALEN); } @@ -1676,7 +1721,7 @@ static void hns_dsaf_mc_mask_bit_clear(char *dst, const char *src) int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev, struct dsaf_drv_mac_single_dest_entry *mac_entry) { - u16 entry_index = DSAF_INVALID_ENTRY_IDX; + u16 entry_index; struct dsaf_drv_tbl_tcam_key mac_key; struct dsaf_drv_tbl_tcam_key mask_key; struct dsaf_tbl_tcam_data *pmask_key = NULL; @@ -1709,9 +1754,6 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev, 0xff, mc_mask); - mask_key.high.val = le32_to_cpu(mask_key.high.val); - mask_key.low.val = le32_to_cpu(mask_key.low.val); - pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key); } @@ -1730,7 +1772,7 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev, if (entry_index == DSAF_INVALID_ENTRY_IDX) { /*if hasnot empty, error*/ dev_err(dsaf_dev->dev, - "set_uc_entry failed, %s Mac key(%#x:%#x)\n", + "set_mc_entry failed, %s Mac key(%#x:%#x)\n", dsaf_dev->ae_dev.name, mac_key.high.val, mac_key.low.val); return -EINVAL; @@ -1759,12 +1801,12 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev, mac_data.tbl_mcast_item_vld = 1; dev_dbg(dsaf_dev->dev, - "set_uc_entry, %s Mac key(%#x:%#x) entry_index%d\n", + "set_mc_entry, %s Mac key(%#x:%#x) entry_index%d\n", dsaf_dev->ae_dev.name, mac_key.high.val, mac_key.low.val, entry_index); - tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); - tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); + tcam_data.tbl_tcam_data_high = mac_key.high.val; + tcam_data.tbl_tcam_data_low = mac_key.low.val; /* config mc entry with mask */ hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data, @@ -1789,7 +1831,7 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev, int hns_dsaf_del_mac_entry(struct dsaf_device *dsaf_dev, u16 vlan_id, u8 in_port_num, u8 *addr) { - u16 entry_index = DSAF_INVALID_ENTRY_IDX; + u16 entry_index; struct dsaf_drv_tbl_tcam_key mac_key; struct dsaf_drv_priv *priv = (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev); @@ -1838,7 +1880,7 @@ int hns_dsaf_del_mac_entry(struct dsaf_device *dsaf_dev, u16 vlan_id, int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev, struct dsaf_drv_mac_single_dest_entry *mac_entry) { - u16 entry_index = DSAF_INVALID_ENTRY_IDX; + u16 entry_index; struct dsaf_drv_tbl_tcam_key mac_key; struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev); struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl; @@ -1879,9 +1921,6 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev, /* config key mask */ hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_mask); - mask_key.high.val = le32_to_cpu(mask_key.high.val); - mask_key.low.val = le32_to_cpu(mask_key.low.val); - pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key); } @@ -1935,8 +1974,8 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev, soft_mac_entry += entry_index; soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX; } else { /* not zero, just del port, update */ - tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); - tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); + tcam_data.tbl_tcam_data_high = mac_key.high.val; + tcam_data.tbl_tcam_data_low = mac_key.low.val; hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data, @@ -1957,7 +1996,7 @@ int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev, u8 mac_id, if (HNS_DSAF_IS_DEBUG(dsaf_dev)) return 0; - for (i = 0; i < DSAF_TCAM_SUM - DSAFV2_MAC_FUZZY_TCAM_NUM; i++) { + for (i = 0; i < dsaf_dev->tcam_max_num; i++) { u8 addr[ETH_ALEN]; u8 port; @@ -2034,19 +2073,14 @@ static struct dsaf_device *hns_dsaf_alloc_dev(struct device *dev, /** * hns_dsaf_free_dev - free dev mem - * @dev: struct device pointer + * @dsaf_dev: struct device pointer */ static void hns_dsaf_free_dev(struct dsaf_device *dsaf_dev) { (void)dev_set_drvdata(dsaf_dev->dev, NULL); } -/** - * dsaf_pfc_unit_cnt - set pfc unit count - * @dsaf_id: dsa fabric id - * @pport_rate: value array - * @pdsaf_pfc_unit_cnt: value array - */ +/* hns_dsaf_pfc_unit_cnt - set pfc unit count */ static void hns_dsaf_pfc_unit_cnt(struct dsaf_device *dsaf_dev, int mac_id, enum dsaf_port_rate_mode rate) { @@ -2072,11 +2106,6 @@ static void hns_dsaf_pfc_unit_cnt(struct dsaf_device *dsaf_dev, int mac_id, unit_cnt); } -/** - * dsaf_port_work_rate_cfg - fifo - * @dsaf_id: dsa fabric id - * @xge_ge_work_mode - */ static void hns_dsaf_port_work_rate_cfg(struct dsaf_device *dsaf_dev, int mac_id, enum dsaf_port_rate_mode rate_mode) @@ -2166,9 +2195,9 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num) DSAF_INODE_LOCAL_ADDR_FALSE_NUM_0_REG + 0x80 * (u64)node_num); hw_stats->vlan_drop += dsaf_read_dev(dsaf_dev, - DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + 0x80 * (u64)node_num); + DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + 4 * (u64)node_num); hw_stats->stp_drop += dsaf_read_dev(dsaf_dev, - DSAF_INODE_IN_DATA_STP_DISC_0_REG + 0x80 * (u64)node_num); + DSAF_INODE_IN_DATA_STP_DISC_0_REG + 4 * (u64)node_num); /* pfc pause frame statistics stored in dsaf inode*/ if ((node_num < DSAF_SERVICE_NW_NUM) && !is_ver1) { @@ -2188,12 +2217,13 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num) /** *hns_dsaf_get_regs - dump dsaf regs - *@dsaf_dev: dsaf device + *@ddev: dsaf device + *@port: mac port id *@data:data for value of regs */ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data) { - u32 i = 0; + u32 i; u32 j; u32 *p = data; u32 reg_tmp; @@ -2285,237 +2315,237 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data) DSAF_INODE_BD_ORDER_STATUS_0_REG + j * 4); p[223 + i] = dsaf_read_dev(ddev, DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + j * 4); - p[224 + i] = dsaf_read_dev(ddev, + p[226 + i] = dsaf_read_dev(ddev, DSAF_INODE_IN_DATA_STP_DISC_0_REG + j * 4); } - p[227] = dsaf_read_dev(ddev, DSAF_INODE_GE_FC_EN_0_REG + port * 4); + p[229] = dsaf_read_dev(ddev, DSAF_INODE_GE_FC_EN_0_REG + port * 4); for (i = 0; i < DSAF_INODE_NUM / DSAF_COMM_CHN; i++) { j = i * DSAF_COMM_CHN + port; - p[228 + i] = dsaf_read_dev(ddev, + p[230 + i] = dsaf_read_dev(ddev, DSAF_INODE_VC0_IN_PKT_NUM_0_REG + j * 4); } - p[231] = dsaf_read_dev(ddev, - DSAF_INODE_VC1_IN_PKT_NUM_0_REG + port * 4); + p[233] = dsaf_read_dev(ddev, + DSAF_INODE_VC1_IN_PKT_NUM_0_REG + port * 0x80); /* dsaf inode registers */ for (i = 0; i < HNS_DSAF_SBM_NUM(ddev) / DSAF_COMM_CHN; i++) { j = i * DSAF_COMM_CHN + port; - p[232 + i] = dsaf_read_dev(ddev, + p[234 + i] = dsaf_read_dev(ddev, DSAF_SBM_CFG_REG_0_REG + j * 0x80); - p[235 + i] = dsaf_read_dev(ddev, + p[237 + i] = dsaf_read_dev(ddev, DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + j * 0x80); - p[238 + i] = dsaf_read_dev(ddev, + p[240 + i] = dsaf_read_dev(ddev, DSAF_SBM_BP_CFG_1_REG_0_REG + j * 0x80); - p[241 + i] = dsaf_read_dev(ddev, + p[243 + i] = dsaf_read_dev(ddev, DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + j * 0x80); - p[244 + i] = dsaf_read_dev(ddev, + p[246 + i] = dsaf_read_dev(ddev, DSAF_SBM_FREE_CNT_0_0_REG + j * 0x80); - p[245 + i] = dsaf_read_dev(ddev, + p[249 + i] = dsaf_read_dev(ddev, DSAF_SBM_FREE_CNT_1_0_REG + j * 0x80); - p[248 + i] = dsaf_read_dev(ddev, + p[252 + i] = dsaf_read_dev(ddev, DSAF_SBM_BP_CNT_0_0_REG + j * 0x80); - p[251 + i] = dsaf_read_dev(ddev, + p[255 + i] = dsaf_read_dev(ddev, DSAF_SBM_BP_CNT_1_0_REG + j * 0x80); - p[254 + i] = dsaf_read_dev(ddev, + p[258 + i] = dsaf_read_dev(ddev, DSAF_SBM_BP_CNT_2_0_REG + j * 0x80); - p[257 + i] = dsaf_read_dev(ddev, + p[261 + i] = dsaf_read_dev(ddev, DSAF_SBM_BP_CNT_3_0_REG + j * 0x80); - p[260 + i] = dsaf_read_dev(ddev, + p[264 + i] = dsaf_read_dev(ddev, DSAF_SBM_INER_ST_0_REG + j * 0x80); - p[263 + i] = dsaf_read_dev(ddev, + p[267 + i] = dsaf_read_dev(ddev, DSAF_SBM_MIB_REQ_FAILED_TC_0_REG + j * 0x80); - p[266 + i] = dsaf_read_dev(ddev, + p[270 + i] = dsaf_read_dev(ddev, DSAF_SBM_LNK_INPORT_CNT_0_REG + j * 0x80); - p[269 + i] = dsaf_read_dev(ddev, + p[273 + i] = dsaf_read_dev(ddev, DSAF_SBM_LNK_DROP_CNT_0_REG + j * 0x80); - p[272 + i] = dsaf_read_dev(ddev, + p[276 + i] = dsaf_read_dev(ddev, DSAF_SBM_INF_OUTPORT_CNT_0_REG + j * 0x80); - p[275 + i] = dsaf_read_dev(ddev, + p[279 + i] = dsaf_read_dev(ddev, DSAF_SBM_LNK_INPORT_TC0_CNT_0_REG + j * 0x80); - p[278 + i] = dsaf_read_dev(ddev, + p[282 + i] = dsaf_read_dev(ddev, DSAF_SBM_LNK_INPORT_TC1_CNT_0_REG + j * 0x80); - p[281 + i] = dsaf_read_dev(ddev, + p[285 + i] = dsaf_read_dev(ddev, DSAF_SBM_LNK_INPORT_TC2_CNT_0_REG + j * 0x80); - p[284 + i] = dsaf_read_dev(ddev, + p[288 + i] = dsaf_read_dev(ddev, DSAF_SBM_LNK_INPORT_TC3_CNT_0_REG + j * 0x80); - p[287 + i] = dsaf_read_dev(ddev, + p[291 + i] = dsaf_read_dev(ddev, DSAF_SBM_LNK_INPORT_TC4_CNT_0_REG + j * 0x80); - p[290 + i] = dsaf_read_dev(ddev, + p[294 + i] = dsaf_read_dev(ddev, DSAF_SBM_LNK_INPORT_TC5_CNT_0_REG + j * 0x80); - p[293 + i] = dsaf_read_dev(ddev, + p[297 + i] = dsaf_read_dev(ddev, DSAF_SBM_LNK_INPORT_TC6_CNT_0_REG + j * 0x80); - p[296 + i] = dsaf_read_dev(ddev, + p[300 + i] = dsaf_read_dev(ddev, DSAF_SBM_LNK_INPORT_TC7_CNT_0_REG + j * 0x80); - p[299 + i] = dsaf_read_dev(ddev, + p[303 + i] = dsaf_read_dev(ddev, DSAF_SBM_LNK_REQ_CNT_0_REG + j * 0x80); - p[302 + i] = dsaf_read_dev(ddev, + p[306 + i] = dsaf_read_dev(ddev, DSAF_SBM_LNK_RELS_CNT_0_REG + j * 0x80); - p[305 + i] = dsaf_read_dev(ddev, + p[309 + i] = dsaf_read_dev(ddev, DSAF_SBM_BP_CFG_3_REG_0_REG + j * 0x80); - p[308 + i] = dsaf_read_dev(ddev, + p[312 + i] = dsaf_read_dev(ddev, DSAF_SBM_BP_CFG_4_REG_0_REG + j * 0x80); } /* dsaf onode registers */ for (i = 0; i < DSAF_XOD_NUM; i++) { - p[311 + i] = dsaf_read_dev(ddev, + p[315 + i] = dsaf_read_dev(ddev, DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG + i * 0x90); - p[319 + i] = dsaf_read_dev(ddev, + p[323 + i] = dsaf_read_dev(ddev, DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG + i * 0x90); - p[327 + i] = dsaf_read_dev(ddev, + p[331 + i] = dsaf_read_dev(ddev, DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG + i * 0x90); - p[335 + i] = dsaf_read_dev(ddev, + p[339 + i] = dsaf_read_dev(ddev, DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG + i * 0x90); - p[343 + i] = dsaf_read_dev(ddev, + p[347 + i] = dsaf_read_dev(ddev, DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG + i * 0x90); - p[351 + i] = dsaf_read_dev(ddev, + p[355 + i] = dsaf_read_dev(ddev, DSAF_XOD_ETS_TOKEN_CFG_0_REG + i * 0x90); } - p[359] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90); - p[360] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_1_0_REG + port * 0x90); - p[361] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_2_0_REG + port * 0x90); + p[363] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90); + p[364] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_1_0_REG + port * 0x90); + p[365] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_2_0_REG + port * 0x90); for (i = 0; i < DSAF_XOD_BIG_NUM / DSAF_COMM_CHN; i++) { j = i * DSAF_COMM_CHN + port; - p[362 + i] = dsaf_read_dev(ddev, + p[366 + i] = dsaf_read_dev(ddev, DSAF_XOD_GNT_L_0_REG + j * 0x90); - p[365 + i] = dsaf_read_dev(ddev, + p[369 + i] = dsaf_read_dev(ddev, DSAF_XOD_GNT_H_0_REG + j * 0x90); - p[368 + i] = dsaf_read_dev(ddev, + p[372 + i] = dsaf_read_dev(ddev, DSAF_XOD_CONNECT_STATE_0_REG + j * 0x90); - p[371 + i] = dsaf_read_dev(ddev, + p[375 + i] = dsaf_read_dev(ddev, DSAF_XOD_RCVPKT_CNT_0_REG + j * 0x90); - p[374 + i] = dsaf_read_dev(ddev, + p[378 + i] = dsaf_read_dev(ddev, DSAF_XOD_RCVTC0_CNT_0_REG + j * 0x90); - p[377 + i] = dsaf_read_dev(ddev, + p[381 + i] = dsaf_read_dev(ddev, DSAF_XOD_RCVTC1_CNT_0_REG + j * 0x90); - p[380 + i] = dsaf_read_dev(ddev, + p[384 + i] = dsaf_read_dev(ddev, DSAF_XOD_RCVTC2_CNT_0_REG + j * 0x90); - p[383 + i] = dsaf_read_dev(ddev, + p[387 + i] = dsaf_read_dev(ddev, DSAF_XOD_RCVTC3_CNT_0_REG + j * 0x90); - p[386 + i] = dsaf_read_dev(ddev, + p[390 + i] = dsaf_read_dev(ddev, DSAF_XOD_RCVVC0_CNT_0_REG + j * 0x90); - p[389 + i] = dsaf_read_dev(ddev, + p[393 + i] = dsaf_read_dev(ddev, DSAF_XOD_RCVVC1_CNT_0_REG + j * 0x90); } - p[392] = dsaf_read_dev(ddev, + p[396] = dsaf_read_dev(ddev, DSAF_XOD_XGE_RCVIN0_CNT_0_REG + port * 0x90); - p[393] = dsaf_read_dev(ddev, + p[397] = dsaf_read_dev(ddev, DSAF_XOD_XGE_RCVIN1_CNT_0_REG + port * 0x90); - p[394] = dsaf_read_dev(ddev, + p[398] = dsaf_read_dev(ddev, DSAF_XOD_XGE_RCVIN2_CNT_0_REG + port * 0x90); - p[395] = dsaf_read_dev(ddev, + p[399] = dsaf_read_dev(ddev, DSAF_XOD_XGE_RCVIN3_CNT_0_REG + port * 0x90); - p[396] = dsaf_read_dev(ddev, + p[400] = dsaf_read_dev(ddev, DSAF_XOD_XGE_RCVIN4_CNT_0_REG + port * 0x90); - p[397] = dsaf_read_dev(ddev, + p[401] = dsaf_read_dev(ddev, DSAF_XOD_XGE_RCVIN5_CNT_0_REG + port * 0x90); - p[398] = dsaf_read_dev(ddev, + p[402] = dsaf_read_dev(ddev, DSAF_XOD_XGE_RCVIN6_CNT_0_REG + port * 0x90); - p[399] = dsaf_read_dev(ddev, + p[403] = dsaf_read_dev(ddev, DSAF_XOD_XGE_RCVIN7_CNT_0_REG + port * 0x90); - p[400] = dsaf_read_dev(ddev, + p[404] = dsaf_read_dev(ddev, DSAF_XOD_PPE_RCVIN0_CNT_0_REG + port * 0x90); - p[401] = dsaf_read_dev(ddev, + p[405] = dsaf_read_dev(ddev, DSAF_XOD_PPE_RCVIN1_CNT_0_REG + port * 0x90); - p[402] = dsaf_read_dev(ddev, + p[406] = dsaf_read_dev(ddev, DSAF_XOD_ROCEE_RCVIN0_CNT_0_REG + port * 0x90); - p[403] = dsaf_read_dev(ddev, + p[407] = dsaf_read_dev(ddev, DSAF_XOD_ROCEE_RCVIN1_CNT_0_REG + port * 0x90); - p[404] = dsaf_read_dev(ddev, + p[408] = dsaf_read_dev(ddev, DSAF_XOD_FIFO_STATUS_0_REG + port * 0x90); /* dsaf voq registers */ for (i = 0; i < DSAF_VOQ_NUM / DSAF_COMM_CHN; i++) { j = (i * DSAF_COMM_CHN + port) * 0x90; - p[405 + i] = dsaf_read_dev(ddev, + p[409 + i] = dsaf_read_dev(ddev, DSAF_VOQ_ECC_INVERT_EN_0_REG + j); - p[408 + i] = dsaf_read_dev(ddev, + p[412 + i] = dsaf_read_dev(ddev, DSAF_VOQ_SRAM_PKT_NUM_0_REG + j); - p[411 + i] = dsaf_read_dev(ddev, DSAF_VOQ_IN_PKT_NUM_0_REG + j); - p[414 + i] = dsaf_read_dev(ddev, + p[415 + i] = dsaf_read_dev(ddev, DSAF_VOQ_IN_PKT_NUM_0_REG + j); + p[418 + i] = dsaf_read_dev(ddev, DSAF_VOQ_OUT_PKT_NUM_0_REG + j); - p[417 + i] = dsaf_read_dev(ddev, + p[421 + i] = dsaf_read_dev(ddev, DSAF_VOQ_ECC_ERR_ADDR_0_REG + j); - p[420 + i] = dsaf_read_dev(ddev, DSAF_VOQ_BP_STATUS_0_REG + j); - p[423 + i] = dsaf_read_dev(ddev, DSAF_VOQ_SPUP_IDLE_0_REG + j); - p[426 + i] = dsaf_read_dev(ddev, + p[424 + i] = dsaf_read_dev(ddev, DSAF_VOQ_BP_STATUS_0_REG + j); + p[427 + i] = dsaf_read_dev(ddev, DSAF_VOQ_SPUP_IDLE_0_REG + j); + p[430 + i] = dsaf_read_dev(ddev, DSAF_VOQ_XGE_XOD_REQ_0_0_REG + j); - p[429 + i] = dsaf_read_dev(ddev, + p[433 + i] = dsaf_read_dev(ddev, DSAF_VOQ_XGE_XOD_REQ_1_0_REG + j); - p[432 + i] = dsaf_read_dev(ddev, + p[436 + i] = dsaf_read_dev(ddev, DSAF_VOQ_PPE_XOD_REQ_0_REG + j); - p[435 + i] = dsaf_read_dev(ddev, + p[439 + i] = dsaf_read_dev(ddev, DSAF_VOQ_ROCEE_XOD_REQ_0_REG + j); - p[438 + i] = dsaf_read_dev(ddev, + p[442 + i] = dsaf_read_dev(ddev, DSAF_VOQ_BP_ALL_THRD_0_REG + j); } /* dsaf tbl registers */ - p[441] = dsaf_read_dev(ddev, DSAF_TBL_CTRL_0_REG); - p[442] = dsaf_read_dev(ddev, DSAF_TBL_INT_MSK_0_REG); - p[443] = dsaf_read_dev(ddev, DSAF_TBL_INT_SRC_0_REG); - p[444] = dsaf_read_dev(ddev, DSAF_TBL_INT_STS_0_REG); - p[445] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_ADDR_0_REG); - p[446] = dsaf_read_dev(ddev, DSAF_TBL_LINE_ADDR_0_REG); - p[447] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_HIGH_0_REG); - p[448] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_LOW_0_REG); - p[449] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG); - p[450] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG); - p[451] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG); - p[452] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG); - p[453] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG); - p[454] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_UCAST_CFG_0_REG); - p[455] = dsaf_read_dev(ddev, DSAF_TBL_LIN_CFG_0_REG); - p[456] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG); - p[457] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_LOW_0_REG); - p[458] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG); - p[459] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG); - p[460] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG); - p[461] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG); - p[462] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG); - p[463] = dsaf_read_dev(ddev, DSAF_TBL_LIN_RDATA_0_REG); + p[445] = dsaf_read_dev(ddev, DSAF_TBL_CTRL_0_REG); + p[446] = dsaf_read_dev(ddev, DSAF_TBL_INT_MSK_0_REG); + p[447] = dsaf_read_dev(ddev, DSAF_TBL_INT_SRC_0_REG); + p[448] = dsaf_read_dev(ddev, DSAF_TBL_INT_STS_0_REG); + p[449] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_ADDR_0_REG); + p[450] = dsaf_read_dev(ddev, DSAF_TBL_LINE_ADDR_0_REG); + p[451] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_HIGH_0_REG); + p[452] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_LOW_0_REG); + p[453] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG); + p[454] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG); + p[455] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG); + p[456] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG); + p[457] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG); + p[458] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_UCAST_CFG_0_REG); + p[459] = dsaf_read_dev(ddev, DSAF_TBL_LIN_CFG_0_REG); + p[460] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG); + p[461] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_LOW_0_REG); + p[462] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG); + p[463] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG); + p[464] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG); + p[465] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG); + p[466] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG); + p[467] = dsaf_read_dev(ddev, DSAF_TBL_LIN_RDATA_0_REG); for (i = 0; i < DSAF_SW_PORT_NUM; i++) { j = i * 0x8; - p[464 + 2 * i] = dsaf_read_dev(ddev, + p[468 + 2 * i] = dsaf_read_dev(ddev, DSAF_TBL_DA0_MIS_INFO1_0_REG + j); - p[465 + 2 * i] = dsaf_read_dev(ddev, + p[469 + 2 * i] = dsaf_read_dev(ddev, DSAF_TBL_DA0_MIS_INFO0_0_REG + j); } - p[480] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO2_0_REG); - p[481] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO1_0_REG); - p[482] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO0_0_REG); - p[483] = dsaf_read_dev(ddev, DSAF_TBL_PUL_0_REG); - p[484] = dsaf_read_dev(ddev, DSAF_TBL_OLD_RSLT_0_REG); - p[485] = dsaf_read_dev(ddev, DSAF_TBL_OLD_SCAN_VAL_0_REG); - p[486] = dsaf_read_dev(ddev, DSAF_TBL_DFX_CTRL_0_REG); - p[487] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_0_REG); - p[488] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_2_0_REG); - p[489] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_I_0_REG); - p[490] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_O_0_REG); - p[491] = dsaf_read_dev(ddev, DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG); + p[484] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO2_0_REG); + p[485] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO1_0_REG); + p[486] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO0_0_REG); + p[487] = dsaf_read_dev(ddev, DSAF_TBL_PUL_0_REG); + p[488] = dsaf_read_dev(ddev, DSAF_TBL_OLD_RSLT_0_REG); + p[489] = dsaf_read_dev(ddev, DSAF_TBL_OLD_SCAN_VAL_0_REG); + p[490] = dsaf_read_dev(ddev, DSAF_TBL_DFX_CTRL_0_REG); + p[491] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_0_REG); + p[492] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_2_0_REG); + p[493] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_I_0_REG); + p[494] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_O_0_REG); + p[495] = dsaf_read_dev(ddev, DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG); /* dsaf other registers */ - p[492] = dsaf_read_dev(ddev, DSAF_INODE_FIFO_WL_0_REG + port * 0x4); - p[493] = dsaf_read_dev(ddev, DSAF_ONODE_FIFO_WL_0_REG + port * 0x4); - p[494] = dsaf_read_dev(ddev, DSAF_XGE_GE_WORK_MODE_0_REG + port * 0x4); - p[495] = dsaf_read_dev(ddev, + p[496] = dsaf_read_dev(ddev, DSAF_INODE_FIFO_WL_0_REG + port * 0x4); + p[497] = dsaf_read_dev(ddev, DSAF_ONODE_FIFO_WL_0_REG + port * 0x4); + p[498] = dsaf_read_dev(ddev, DSAF_XGE_GE_WORK_MODE_0_REG + port * 0x4); + p[499] = dsaf_read_dev(ddev, DSAF_XGE_APP_RX_LINK_UP_0_REG + port * 0x4); - p[496] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4); - p[497] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4); + p[500] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4); + p[501] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4); if (!is_ver1) - p[498] = dsaf_read_dev(ddev, DSAF_PAUSE_CFG_REG + port * 0x4); + p[502] = dsaf_read_dev(ddev, DSAF_PAUSE_CFG_REG + port * 0x4); /* mark end of dsaf regs */ - for (i = 499; i < 504; i++) + for (i = 503; i < 504; i++) p[i] = 0xdddddddd; } @@ -2664,67 +2694,183 @@ void hns_dsaf_get_strings(int stringset, u8 *data, int port, (void)hns_dsaf_get_node_stats_strings(buff, node, dsaf_dev); } -/** - *hns_dsaf_get_sset_count - get dsaf regs count - *return dsaf regs count - */ int hns_dsaf_get_regs_count(void) { return DSAF_DUMP_REGS_NUM; } -/* Reserve the last TCAM entry for promisc support */ -#define dsaf_promisc_tcam_entry(port) \ - (DSAF_TCAM_SUM - DSAFV2_MAC_FUZZY_TCAM_NUM + (port)) -void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev, - u32 port, bool enable) +static int hns_dsaf_get_port_id(u8 port) { + if (port < DSAF_SERVICE_NW_NUM) + return port; + + if (port >= DSAF_BASE_INNER_PORT_NUM) + return port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM; + + return -EINVAL; +} + +static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port) +{ + struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80}; + struct dsaf_tbl_tcam_data tbl_tcam_data_mc = {0x01000000, port}; + struct dsaf_tbl_tcam_data tbl_tcam_mask_uc = {0x01000000, 0xf}; + struct dsaf_tbl_tcam_mcast_cfg tbl_tcam_mcast = {0, 0, {0} }; struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev); - struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl; + struct dsaf_tbl_tcam_data tbl_tcam_data_uc = {0, port}; + struct dsaf_drv_mac_single_dest_entry mask_entry; + struct dsaf_drv_tbl_tcam_key mask_key; + struct dsaf_drv_soft_mac_tbl *soft_mac_entry; u16 entry_index; - struct dsaf_drv_tbl_tcam_key tbl_tcam_data, tbl_tcam_mask; - struct dsaf_tbl_tcam_mcast_cfg mac_data = {0}; + struct dsaf_drv_tbl_tcam_key mac_key; + struct hns_mac_cb *mac_cb; + u8 addr[ETH_ALEN] = {0}; + u8 port_num; + int mskid; - if ((AE_IS_VER1(dsaf_dev->dsaf_ver)) || HNS_DSAF_IS_DEBUG(dsaf_dev)) + /* promisc use vague table match with vlanid = 0 & macaddr = 0 */ + hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr); + entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key); + if (entry_index != DSAF_INVALID_ENTRY_IDX) return; - /* find the tcam entry index for promisc */ - entry_index = dsaf_promisc_tcam_entry(port); - - memset(&tbl_tcam_data, 0, sizeof(tbl_tcam_data)); - memset(&tbl_tcam_mask, 0, sizeof(tbl_tcam_mask)); - - /* config key mask */ - if (enable) { - dsaf_set_field(tbl_tcam_data.low.bits.port_vlan, - DSAF_TBL_TCAM_KEY_PORT_M, - DSAF_TBL_TCAM_KEY_PORT_S, port); - dsaf_set_field(tbl_tcam_mask.low.bits.port_vlan, - DSAF_TBL_TCAM_KEY_PORT_M, - DSAF_TBL_TCAM_KEY_PORT_S, 0xf); - - /* SUB_QID */ - dsaf_set_bit(mac_data.tbl_mcast_port_msk[0], - DSAF_SERVICE_NW_NUM, true); - mac_data.tbl_mcast_item_vld = true; /* item_vld bit */ - } else { - mac_data.tbl_mcast_item_vld = false; /* item_vld bit */ + /* put promisc tcam entry in the end. */ + /* 1. set promisc unicast vague tcam entry. */ + entry_index = hns_dsaf_find_empty_mac_entry_reverse(dsaf_dev); + if (entry_index == DSAF_INVALID_ENTRY_IDX) { + dev_err(dsaf_dev->dev, + "enable uc promisc failed (port:%#x)\n", + port); + return; } - dev_dbg(dsaf_dev->dev, - "set_promisc_entry, %s Mac key(%#x:%#x) entry_index%d\n", - dsaf_dev->ae_dev.name, tbl_tcam_data.high.val, - tbl_tcam_data.low.val, entry_index); + mac_cb = dsaf_dev->mac_cb[port]; + (void)hns_mac_get_inner_port_num(mac_cb, 0, &port_num); + tbl_tcam_ucast.tbl_ucast_out_port = port_num; - /* config promisc entry with mask */ - hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, - (struct dsaf_tbl_tcam_data *)&tbl_tcam_data, - (struct dsaf_tbl_tcam_data *)&tbl_tcam_mask, - &mac_data); + /* config uc vague table */ + hns_dsaf_tcam_uc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_uc, + &tbl_tcam_mask_uc, &tbl_tcam_ucast); - /* config software entry */ + /* update software entry */ + soft_mac_entry = priv->soft_mac_tbl; soft_mac_entry += entry_index; - soft_mac_entry->index = enable ? entry_index : DSAF_INVALID_ENTRY_IDX; + soft_mac_entry->index = entry_index; + soft_mac_entry->tcam_key.high.val = mac_key.high.val; + soft_mac_entry->tcam_key.low.val = mac_key.low.val; + /* step back to the START for mc. */ + soft_mac_entry = priv->soft_mac_tbl; + + /* 2. set promisc multicast vague tcam entry. */ + entry_index = hns_dsaf_find_empty_mac_entry_reverse(dsaf_dev); + if (entry_index == DSAF_INVALID_ENTRY_IDX) { + dev_err(dsaf_dev->dev, + "enable mc promisc failed (port:%#x)\n", + port); + return; + } + + memset(&mask_entry, 0x0, sizeof(mask_entry)); + memset(&mask_key, 0x0, sizeof(mask_key)); + memset(&mac_key, 0x0, sizeof(mac_key)); + mask_entry.addr[0] = 0x01; + hns_dsaf_set_mac_key(dsaf_dev, &mac_key, mask_entry.in_vlan_id, + port, mask_entry.addr); + hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id, + 0xf, mask_entry.addr); + tbl_tcam_mcast.tbl_mcast_item_vld = 1; + tbl_tcam_mcast.tbl_mcast_old_en = 0; + + /* set MAC port to handle multicast */ + mskid = hns_dsaf_get_port_id(port); + if (mskid == -EINVAL) { + dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n", + dsaf_dev->ae_dev.name, port, + mac_key.high.val, mac_key.low.val); + return; + } + dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32], + mskid % 32, 1); + + /* set pool bit map to handle multicast */ + mskid = hns_dsaf_get_port_id(port_num); + if (mskid == -EINVAL) { + dev_err(dsaf_dev->dev, + "%s, pool bit map pnum(%d)error,key(%#x:%#x)\n", + dsaf_dev->ae_dev.name, port_num, + mac_key.high.val, mac_key.low.val); + return; + } + dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32], + mskid % 32, 1); + + hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc, + (struct dsaf_tbl_tcam_data *)(&mask_key), + &tbl_tcam_mcast); + + /* update software entry */ + soft_mac_entry += entry_index; + soft_mac_entry->index = entry_index; + soft_mac_entry->tcam_key.high.val = mac_key.high.val; + soft_mac_entry->tcam_key.low.val = mac_key.low.val; +} + +static void set_promisc_tcam_disable(struct dsaf_device *dsaf_dev, u32 port) +{ + struct dsaf_tbl_tcam_data tbl_tcam_data_mc = {0x01000000, port}; + struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 0, 0, 0, 0}; + struct dsaf_tbl_tcam_mcast_cfg tbl_tcam_mcast = {0, 0, {0} }; + struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev); + struct dsaf_tbl_tcam_data tbl_tcam_data_uc = {0, 0}; + struct dsaf_tbl_tcam_data tbl_tcam_mask = {0, 0}; + struct dsaf_drv_soft_mac_tbl *soft_mac_entry; + u16 entry_index; + struct dsaf_drv_tbl_tcam_key mac_key; + u8 addr[ETH_ALEN] = {0}; + + /* 1. delete uc vague tcam entry. */ + /* promisc use vague table match with vlanid = 0 & macaddr = 0 */ + hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr); + entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key); + + if (entry_index == DSAF_INVALID_ENTRY_IDX) + return; + + /* config uc vague table */ + hns_dsaf_tcam_uc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_uc, + &tbl_tcam_mask, &tbl_tcam_ucast); + /* update soft management table. */ + soft_mac_entry = priv->soft_mac_tbl; + soft_mac_entry += entry_index; + soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX; + /* step back to the START for mc. */ + soft_mac_entry = priv->soft_mac_tbl; + + /* 2. delete mc vague tcam entry. */ + addr[0] = 0x01; + memset(&mac_key, 0x0, sizeof(mac_key)); + hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr); + entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key); + + if (entry_index == DSAF_INVALID_ENTRY_IDX) + return; + + /* config mc vague table */ + hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc, + &tbl_tcam_mask, &tbl_tcam_mcast); + /* update soft management table. */ + soft_mac_entry += entry_index; + soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX; +} + +/* Reserve the last TCAM entry for promisc support */ +void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev, + u32 port, bool enable) +{ + if (enable) + set_promisc_tcam_enable(dsaf_dev, port); + else + set_promisc_tcam_disable(dsaf_dev, port); } int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port) @@ -2756,11 +2902,6 @@ int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port) return 0; } -/** - * dsaf_probe - probo dsaf dev - * @pdev: dasf platform device - * retuen 0 - success , negative --fail - */ static int hns_dsaf_probe(struct platform_device *pdev) { struct dsaf_device *dsaf_dev; @@ -2811,10 +2952,6 @@ static int hns_dsaf_probe(struct platform_device *pdev) return ret; } -/** - * dsaf_remove - remove dsaf dev - * @pdev: dasf platform device - */ static int hns_dsaf_remove(struct platform_device *pdev) { struct dsaf_device *dsaf_dev = dev_get_drvdata(&pdev->dev); @@ -2865,7 +3002,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset) u32 sl; u32 credit; int i; - const u32 port_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = { + static const u32 port_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = { {DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0}, {DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0}, {DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0}, @@ -2875,7 +3012,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset) {DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1}, {DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1}, }; - const u32 sl_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = { + static const u32 sl_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = { {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_0}, {DSAF_ROCE_SL_0, DSAF_ROCE_SL_1, DSAF_ROCE_SL_1}, {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_2}, @@ -2906,6 +3043,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset) dsaf_dev = dev_get_drvdata(&pdev->dev); if (!dsaf_dev) { dev_err(&pdev->dev, "dsaf_dev is NULL\n"); + put_device(&pdev->dev); return -ENODEV; } @@ -2913,6 +3051,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset) if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n", dsaf_dev->ae_dev.name); + put_device(&pdev->dev); return -ENODEV; } @@ -2951,6 +3090,9 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset) dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1); dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit); } + + put_device(&pdev->dev); + return 0; } EXPORT_SYMBOL(hns_dsaf_roce_reset); @@ -2958,4 +3100,4 @@ EXPORT_SYMBOL(hns_dsaf_roce_reset); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Huawei Tech. Co., Ltd."); MODULE_DESCRIPTION("HNS DSAF driver"); -MODULE_VERSION(DSAF_MOD_VERSION); +MODULE_VERSION(HNAE_DRIVER_VERSION); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index 0e1cd99831a6083faa790aa80be1f6c635b15a50..a36bde780f8e685d11a2346eaf6e6f2f3066a018 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h @@ -1,10 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 2014-2015 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #ifndef __HNS_DSAF_MAIN_H @@ -17,7 +13,6 @@ struct hns_mac_cb; #define DSAF_DRV_NAME "hns_dsaf" -#define DSAF_MOD_VERSION "v1.0" #define DSAF_DEVICE_NAME "dsaf" #define HNS_DSAF_DEBUG_NW_REG_OFFSET 0x100000 @@ -467,4 +462,6 @@ int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev, u8 mac_id, u8 port_num); int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port); +int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset); + #endif /* __HNS_DSAF_MAIN_H__ */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index 16294cd3c95459891c65080cd49c61f839afaa60..13affb9ce0c32400874e6f6d8fb439220063e803 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -1,10 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2014-2015 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include "hns_dsaf_mac.h" @@ -250,6 +246,7 @@ static int cpld_set_led_id_acpi(struct hns_mac_cb *mac_cb, } #define RESET_REQ_OR_DREQ 1 +#define DSAF_RESET_REQ_VAL 0xfffff static void hns_dsaf_acpi_srst_by_port(struct dsaf_device *dsaf_dev, u8 op_type, u32 port_type, u32 port, u32 val) @@ -284,16 +281,39 @@ static void hns_dsaf_rst(struct dsaf_device *dsaf_dev, bool dereset) u32 xbar_reg_addr; u32 nt_reg_addr; - if (!dereset) { - xbar_reg_addr = DSAF_SUB_SC_XBAR_RESET_REQ_REG; - nt_reg_addr = DSAF_SUB_SC_NT_RESET_REQ_REG; + if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { + if (!dereset) { + xbar_reg_addr = DSAF_SUB_SC_XBAR_RESET_REQ_REG; + nt_reg_addr = DSAF_SUB_SC_NT_RESET_REQ_REG; + } else { + xbar_reg_addr = DSAF_SUB_SC_XBAR_RESET_DREQ_REG; + nt_reg_addr = DSAF_SUB_SC_NT_RESET_DREQ_REG; + } + + dsaf_write_reg(dsaf_dev->sc_base, xbar_reg_addr, + RESET_REQ_OR_DREQ); + dsaf_write_reg(dsaf_dev->sc_base, nt_reg_addr, + RESET_REQ_OR_DREQ); } else { - xbar_reg_addr = DSAF_SUB_SC_XBAR_RESET_DREQ_REG; - nt_reg_addr = DSAF_SUB_SC_NT_RESET_DREQ_REG; - } + if (!dereset) { + xbar_reg_addr = DSAF_SUB_SC_DSAF_RESET_REQ_REG; + nt_reg_addr = DSAF_SUB_SC_DSAF_CLK_DIS_REG; + } else { + xbar_reg_addr = DSAF_SUB_SC_DSAF_RESET_DREQ_REG; + nt_reg_addr = DSAF_SUB_SC_DSAF_CLK_EN_REG; + } + + dsaf_write_reg(dsaf_dev->sc_base, xbar_reg_addr, + DSAF_RESET_REQ_VAL); + mdelay(10); - dsaf_write_sub(dsaf_dev, xbar_reg_addr, RESET_REQ_OR_DREQ); - dsaf_write_sub(dsaf_dev, nt_reg_addr, RESET_REQ_OR_DREQ); + /*enable com_st and xbar_com bits for init register first*/ + if (!dereset) + dsaf_write_reg(dsaf_dev->sc_base, nt_reg_addr, + DSAF_RESET_REQ_VAL); + else + dsaf_write_reg(dsaf_dev->sc_base, nt_reg_addr, 3 << 18); + } } static void hns_dsaf_rst_acpi(struct dsaf_device *dsaf_dev, bool dereset) @@ -338,7 +358,7 @@ static void hns_dsaf_xge_srst_by_port_acpi(struct dsaf_device *dsaf_dev, * bit6-11 for ppe0-5 * bit12-17 for roce0-5 * bit18-19 for com/dfx - * @enable: false - request reset , true - drop reset + * @dereset: false - request reset , true - drop reset */ static void hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset) @@ -354,14 +374,14 @@ hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset) } /** - * hns_dsaf_srst_chns - reset dsaf channels + * hns_dsaf_srst_chns_acpi - reset dsaf channels * @dsaf_dev: dsaf device struct pointer * @msk: xbar channels mask value: * bit0-5 for xge0-5 * bit6-11 for ppe0-5 * bit12-17 for roce0-5 * bit18-19 for com/dfx - * @enable: false - request reset , true - drop reset + * @dereset: false - request reset , true - drop reset */ static void hns_dsaf_srst_chns_acpi(struct dsaf_device *dsaf_dev, u32 msk, bool dereset) @@ -402,6 +422,10 @@ static void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, return; if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) { + /* DSAF_MAX_PORT_NUM is 6, but DSAF_GE_NUM is 8. + We need check to prevent array overflow */ + if (port >= DSAF_MAX_PORT_NUM) + return; reg_val_1 = 0x1 << port; port_rst_off = dsaf_dev->mac_cb[port]->port_rst_off; /* there is difference between V1 and V2 in register.*/ @@ -503,7 +527,7 @@ static void hns_ppe_com_srst(struct dsaf_device *dsaf_dev, bool dereset) } /** - * hns_mac_get_sds_mode - get phy ifterface form serdes mode + * hns_mac_get_phy_if - get phy ifterface form serdes mode * @mac_cb: mac control block * retuen phy interface */ @@ -548,9 +572,9 @@ static phy_interface_t hns_mac_get_phy_if_acpi(struct hns_mac_cb *mac_cb) obj_args.integer.type = ACPI_TYPE_INTEGER; obj_args.integer.value = mac_cb->mac_id; - argv4.type = ACPI_TYPE_PACKAGE, - argv4.package.count = 1, - argv4.package.elements = &obj_args, + argv4.type = ACPI_TYPE_PACKAGE; + argv4.package.count = 1; + argv4.package.elements = &obj_args; obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev), &hns_dsaf_acpi_dsm_guid, 0, @@ -595,9 +619,9 @@ static int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt) obj_args.integer.type = ACPI_TYPE_INTEGER; obj_args.integer.value = mac_cb->mac_id; - argv4.type = ACPI_TYPE_PACKAGE, - argv4.package.count = 1, - argv4.package.elements = &obj_args, + argv4.type = ACPI_TYPE_PACKAGE; + argv4.package.count = 1; + argv4.package.elements = &obj_args; obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev), &hns_dsaf_acpi_dsm_guid, 0, @@ -616,6 +640,7 @@ static int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt) /** * hns_mac_config_sds_loopback - set loop back for serdes * @mac_cb: mac control block + * @en: true means enable sds loopback; false means disable sds loopback * retuen 0 == success */ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en) @@ -670,7 +695,7 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en) dsaf_set_field(origin, 1ull << 10, 10, en); dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin); } else { - u8 *base_addr = (u8 *)mac_cb->serdes_vaddr + + u8 __iomem *base_addr = mac_cb->serdes_vaddr + (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000); dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en); } @@ -687,7 +712,7 @@ hns_mac_config_sds_loopback_acpi(struct hns_mac_cb *mac_cb, bool en) obj_args[0].integer.type = ACPI_TYPE_INTEGER; obj_args[0].integer.value = mac_cb->mac_id; obj_args[1].integer.type = ACPI_TYPE_INTEGER; - obj_args[1].integer.value = !!en; + obj_args[1].integer.value = en; argv4.type = ACPI_TYPE_PACKAGE; argv4.package.count = 2; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h index 310e80261366ae3879d52209e541b37d0b3a0d3e..f64c6667dd05fb7d281c49dfbf20e9b96e4f14ed 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h @@ -1,10 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 2014-2015 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #ifndef _HNS_DSAF_MISC_H diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c index 0942e4916d9d0d1b0b78958481ea099e66e59639..ad3b8adfa03718b6384d85e54d7dfdc105d91bd9 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c @@ -1,10 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2014-2015 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include @@ -61,7 +57,7 @@ void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb, } } -static void __iomem * +static u8 __iomem * hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common) { return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET; @@ -83,8 +79,9 @@ static int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index) else ppe_num = HNS_PPE_DEBUG_NW_ENGINE_NUM; - ppe_common = devm_kzalloc(dsaf_dev->dev, sizeof(*ppe_common) + - ppe_num * sizeof(struct hns_ppe_cb), GFP_KERNEL); + ppe_common = devm_kzalloc(dsaf_dev->dev, + struct_size(ppe_common, ppe_cb, ppe_num), + GFP_KERNEL); if (!ppe_common) return -ENOMEM; @@ -110,8 +107,8 @@ hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index) dsaf_dev->ppe_common[comm_index] = NULL; } -static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common, - int ppe_idx) +static u8 __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common, + int ppe_idx) { return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET; } @@ -182,7 +179,7 @@ static void hns_ppe_set_qid(struct ppe_common_cb *ppe_common, u32 qid) /** * hns_ppe_set_port_mode - set port mode - * @ppe_device: ppe device + * @ppe_cb: ppe device * @mode: port mode */ static void hns_ppe_set_port_mode(struct hns_ppe_cb *ppe_cb, @@ -299,7 +296,7 @@ int hns_ppe_wait_tx_fifo_clean(struct hns_ppe_cb *ppe_cb) } /** - * ppe_init_hw - init ppe + * hns_ppe_init_hw - init ppe * @ppe_cb: ppe device */ static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb) @@ -346,8 +343,8 @@ static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb) } /** - * ppe_uninit_hw - uninit ppe - * @ppe_device: ppe device + * hns_ppe_uninit_hw - uninit ppe + * @ppe_cb: ppe device */ static void hns_ppe_uninit_hw(struct hns_ppe_cb *ppe_cb) { @@ -385,8 +382,9 @@ void hns_ppe_uninit(struct dsaf_device *dsaf_dev) } /** - * hns_ppe_reset - reinit ppe/rcb hw + * hns_ppe_reset_common - reinit ppe/rcb hw * @dsaf_dev: dasf device + * @ppe_common_index: index of ppe device * retuen void */ void hns_ppe_reset_common(struct dsaf_device *dsaf_dev, u8 ppe_common_index) @@ -457,8 +455,8 @@ int hns_ppe_get_regs_count(void) } /** - * ppe_get_strings - get ppe srting - * @ppe_device: ppe device + * hns_ppe_get_strings - get ppe srting + * @ppe_cb: ppe device * @stringset: string set type * @data: output string */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h index f670e63a5a018cd5b48b4a62093c104905aa4463..0f0e16f9afc0d7cc5b6cb9dbe0d2cfd7d596107f 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h @@ -1,10 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 2014-2015 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #ifndef _HNS_DSAF_PPE_H @@ -80,7 +76,7 @@ struct hns_ppe_cb { struct hns_ppe_hw_stats hw_stats; u8 index; /* index in a ppe common device */ - void __iomem *io_base; + u8 __iomem *io_base; int virq; u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */ u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]; /* rss hash key */ @@ -89,14 +85,14 @@ struct hns_ppe_cb { struct ppe_common_cb { struct device *dev; struct dsaf_device *dsaf_dev; - void __iomem *io_base; + u8 __iomem *io_base; enum ppe_common_mode ppe_mode; u8 comm_index; /*ppe_common index*/ u32 ppe_num; - struct hns_ppe_cb ppe_cb[0]; + struct hns_ppe_cb ppe_cb[]; }; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index 5d64519b9b1dc3cfd6e2c403126aa4ffa5832dfc..c8803cd7271f5575002fac7f54207e06d48d1e44 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c @@ -1,10 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2014-2015 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include @@ -195,7 +191,8 @@ void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag) /** *hns_rcb_ring_enable_hw - enable ring - *@ring: rcb ring + *@q: hnae queue struct pointer + *@val: true means enable; false means disable */ void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val) { @@ -458,7 +455,7 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type) mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT; } else { ring = &q->tx_ring; - ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base + + ring->io_base = ring_pair_cb->q.io_base + HNS_RCB_TX_REG_OFFSET; irq_idx = HNS_RCB_IRQ_IDX_TX; mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT : @@ -764,7 +761,7 @@ static int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev) } } -static void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common) +static u8 __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common) { struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev; @@ -788,8 +785,9 @@ int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, int ring_num = hns_rcb_get_ring_num(dsaf_dev); rcb_common = - devm_kzalloc(dsaf_dev->dev, sizeof(*rcb_common) + - ring_num * sizeof(struct ring_pair_cb), GFP_KERNEL); + devm_kzalloc(dsaf_dev->dev, + struct_size(rcb_common, ring_pair_cb, ring_num), + GFP_KERNEL); if (!rcb_common) { dev_err(dsaf_dev->dev, "rcb common devm_kzalloc fail!\n"); return -ENOMEM; @@ -847,7 +845,7 @@ void hns_rcb_update_stats(struct hnae_queue *queue) /** *hns_rcb_get_stats - get rcb statistic - *@ring: rcb ring + *@queue: hnae_queue *@data:statistic value */ void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data) @@ -892,11 +890,6 @@ void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data) regs_buff[27] = queue->rx_ring.stats.l3l4_csum_err; } -/** - *hns_rcb_get_ring_sset_count - rcb string set count - *@stringset:ethtool cmd - *return rcb ring string set count - */ int hns_rcb_get_ring_sset_count(int stringset) { if (stringset == ETH_SS_STATS) @@ -905,19 +898,11 @@ int hns_rcb_get_ring_sset_count(int stringset) return 0; } -/** - *hns_rcb_get_common_regs_count - rcb common regs count - *return regs count - */ int hns_rcb_get_common_regs_count(void) { return HNS_RCB_COMMON_DUMP_REG_NUM; } -/** - *rcb_get_sset_count - rcb ring regs count - *return regs count - */ int hns_rcb_get_ring_regs_count(void) { return HNS_RCB_RING_DUMP_REG_NUM; @@ -1003,7 +988,7 @@ void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data) bool is_dbg = HNS_DSAF_IS_DEBUG(rcb_com->dsaf_dev); u32 reg_tmp; u32 reg_num_tmp; - u32 i = 0; + u32 i; /*rcb common registers */ regs[0] = dsaf_read_dev(rcb_com, RCB_COM_CFG_ENDIAN_REG); @@ -1074,7 +1059,7 @@ void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data) u32 *regs = data; struct ring_pair_cb *ring_pair = container_of(queue, struct ring_pair_cb, q); - u32 i = 0; + u32 i; /*rcb ring registers */ regs[0] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_L_REG); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h index 2319b772a271e519d6a69a0976713a19c60f4a8a..a9f8059256999a0b69098b67e53aff5e5b53c6fd 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h @@ -1,10 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 2014-2015 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #ifndef _HNS_DSAF_RCB_H @@ -112,7 +108,7 @@ struct rcb_common_cb { u32 ring_num; u32 desc_num; /* desc num per queue*/ - struct ring_pair_cb ring_pair_cb[0]; + struct ring_pair_cb ring_pair_cb[]; }; int hns_rcb_buf_size2type(u32 buf_size); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index 74d935d82cbc6050a287a07532024675ce75254e..b5a7d2609351371468dd5ba3d133b9f0bc833271 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h @@ -1,10 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 2014-2015 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #ifndef _DSAF_REG_H_ @@ -42,7 +38,7 @@ #define DSAF_TOTAL_QUEUE_NUM 129 /* reserved a tcam entry for each port to support promisc by fuzzy match */ -#define DSAFV2_MAC_FUZZY_TCAM_NUM DSAF_MAX_PORT_NUM +#define DSAFV2_MAC_FUZZY_TCAM_NUM (DSAF_MAX_PORT_NUM * 2) #define DSAF_TCAM_SUM 512 #define DSAF_LINE_SUM (2048 * 14) @@ -176,7 +172,7 @@ #define DSAF_INODE_IN_DATA_STP_DISC_0_REG 0x1A50 #define DSAF_INODE_GE_FC_EN_0_REG 0x1B00 #define DSAF_INODE_VC0_IN_PKT_NUM_0_REG 0x1B50 -#define DSAF_INODE_VC1_IN_PKT_NUM_0_REG 0x1C00 +#define DSAF_INODE_VC1_IN_PKT_NUM_0_REG 0x103C #define DSAF_INODE_IN_PRIO_PAUSE_BASE_REG 0x1C00 #define DSAF_INODE_IN_PRIO_PAUSE_BASE_OFFSET 0x100 #define DSAF_INODE_IN_PRIO_PAUSE_OFFSET 0x50 @@ -404,11 +400,11 @@ #define RCB_ECC_ERR_ADDR4_REG 0x460 #define RCB_ECC_ERR_ADDR5_REG 0x464 -#define RCB_COM_SF_CFG_INTMASK_RING 0x480 -#define RCB_COM_SF_CFG_RING_STS 0x484 -#define RCB_COM_SF_CFG_RING 0x488 -#define RCB_COM_SF_CFG_INTMASK_BD 0x48C -#define RCB_COM_SF_CFG_BD_RINT_STS 0x470 +#define RCB_COM_SF_CFG_INTMASK_RING 0x470 +#define RCB_COM_SF_CFG_RING_STS 0x474 +#define RCB_COM_SF_CFG_RING 0x478 +#define RCB_COM_SF_CFG_INTMASK_BD 0x47C +#define RCB_COM_SF_CFG_BD_RINT_STS 0x480 #define RCB_COM_RCB_RD_BD_BUSY 0x490 #define RCB_COM_RCB_FBD_CRT_EN 0x494 #define RCB_COM_AXI_WR_ERR_INTMASK 0x498 @@ -534,6 +530,7 @@ #define GMAC_LD_LINK_COUNTER_REG 0x01D0UL #define GMAC_LOOP_REG 0x01DCUL #define GMAC_RECV_CONTROL_REG 0x01E0UL +#define GMAC_PCS_RX_EN_REG 0x01E4UL #define GMAC_VLAN_CODE_REG 0x01E8UL #define GMAC_RX_OVERRUN_CNT_REG 0x01ECUL #define GMAC_RX_LENGTHFIELD_ERR_CNT_REG 0x01F4UL @@ -1017,7 +1014,7 @@ #define XGMAC_PAUSE_CTL_RSP_MODE_B 2 #define XGMAC_PAUSE_CTL_TX_XOFF_B 3 -static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value) +static inline void dsaf_write_reg(u8 __iomem *base, u32 reg, u32 value) { writel(value, base + reg); } @@ -1052,7 +1049,7 @@ static inline int dsaf_read_syscon(struct regmap *base, u32 reg, u32 *val) #define dsaf_set_bit(origin, shift, val) \ dsaf_set_field((origin), (1ull << (shift)), (shift), (val)) -static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask, +static inline void dsaf_set_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift, u32 val) { u32 origin = dsaf_read_reg(base, reg); @@ -1072,7 +1069,7 @@ static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask, #define dsaf_get_bit(origin, shift) \ dsaf_get_field((origin), (1ull << (shift)), (shift)) -static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask, +static inline u32 dsaf_get_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift) { u32 origin; @@ -1088,11 +1085,11 @@ static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask, dsaf_get_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit)) #define dsaf_write_b(addr, data)\ - writeb((data), (__iomem unsigned char *)(addr)) + writeb((data), (__iomem u8 *)(addr)) #define dsaf_read_b(addr)\ - readb((__iomem unsigned char *)(addr)) + readb((__iomem u8 *)(addr)) #define hns_mac_reg_read64(drv, offset) \ - readq((__iomem void *)(((u8 *)(drv)->io_base + 0xc00 + (offset)))) + readq((__iomem void *)(((drv)->io_base + 0xc00 + (offset)))) #endif /* _DSAF_REG_H */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c index ba4316910dea1726da855c13b78e95bb6bd36a3c..bb00a6feb5159f41d00ef07e4a4a786597e6794e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c @@ -1,10 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2014-2015 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include @@ -108,7 +104,7 @@ static void hns_xgmac_rx_enable(struct mac_driver *drv, u32 value) } /** - * hns_xgmac_tx_lf_rf_insert - insert lf rf control about xgmac + * hns_xgmac_lf_rf_insert - insert lf rf control about xgmac * @mac_drv: mac driver * @mode: inserf rf or lf */ @@ -119,7 +115,7 @@ static void hns_xgmac_lf_rf_insert(struct mac_driver *mac_drv, u32 mode) } /** - * hns_xgmac__lf_rf_control_init - initial the lf rf control register + * hns_xgmac_lf_rf_control_init - initial the lf rf control register * @mac_drv: mac driver */ static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv) @@ -129,12 +125,12 @@ static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv) dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0); dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1); dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0); - dsaf_write_reg(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val); + dsaf_write_dev(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val); } /** *hns_xgmac_enable - enable xgmac port - *@drv: mac driver + *@mac_drv: mac driver *@mode: mode of mac port */ static void hns_xgmac_enable(void *mac_drv, enum mac_commom_mode mode) @@ -181,7 +177,6 @@ static void hns_xgmac_disable(void *mac_drv, enum mac_commom_mode mode) *@drv: mac driver *@tx_value: tx value *@rx_value: rx value - *return status */ static void hns_xgmac_pma_fec_enable(struct mac_driver *drv, u32 tx_value, u32 rx_value) @@ -246,7 +241,8 @@ static void hns_xgmac_config_pad_and_crc(void *mac_drv, u8 newval) /** *hns_xgmac_pausefrm_cfg - set pause param about xgmac *@mac_drv: mac driver - *@newval:enable of pad and crc + *@rx_en: enable or disable rx + *@tx_en: enable or disable tx */ static void hns_xgmac_pausefrm_cfg(void *mac_drv, u32 rx_en, u32 tx_en) { @@ -269,19 +265,6 @@ static void hns_xgmac_set_pausefrm_mac_addr(void *mac_drv, char *mac_addr) dsaf_write_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_H_REG, high_val); } -/** - *hns_xgmac_set_rx_ignore_pause_frames - set rx pause param about xgmac - *@mac_drv: mac driver - *@enable:enable rx pause param - */ -static void hns_xgmac_set_rx_ignore_pause_frames(void *mac_drv, u32 enable) -{ - struct mac_driver *drv = (struct mac_driver *)mac_drv; - - dsaf_set_dev_bit(drv, XGMAC_MAC_PAUSE_CTRL_REG, - XGMAC_PAUSE_CTL_RX_B, !!enable); -} - /** *hns_xgmac_set_tx_auto_pause_frames - set tx pause param about xgmac *@mac_drv: mac driver @@ -494,12 +477,11 @@ static void hns_xgmac_get_link_status(void *mac_drv, u32 *link_stat) /** *hns_xgmac_get_regs - dump xgmac regs *@mac_drv: mac driver - *@cmd:ethtool cmd *@data:data for value of regs */ static void hns_xgmac_get_regs(void *mac_drv, void *data) { - u32 i = 0; + u32 i; struct mac_driver *drv = (struct mac_driver *)mac_drv; u32 *regs = data; u64 qtmp; @@ -774,11 +756,6 @@ static void hns_xgmac_get_strings(u32 stringset, u8 *data) } } -/** - *hns_xgmac_get_sset_count - get xgmac string set count - *@stringset: type of values in data - *return xgmac string set count - */ static int hns_xgmac_get_sset_count(int stringset) { if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS) @@ -787,10 +764,6 @@ static int hns_xgmac_get_sset_count(int stringset) return 0; } -/** - *hns_xgmac_get_regs_count - get xgmac regs count - *return xgmac regs count - */ static int hns_xgmac_get_regs_count(void) { return HNS_XGMAC_DUMP_NUM; @@ -819,8 +792,6 @@ void *hns_xgmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param) mac_drv->config_loopback = NULL; mac_drv->config_pad_and_crc = hns_xgmac_config_pad_and_crc; mac_drv->config_half_duplex = NULL; - mac_drv->set_rx_ignore_pause_frames = - hns_xgmac_set_rx_ignore_pause_frames; mac_drv->mac_free = hns_xgmac_free; mac_drv->adjust_link = NULL; mac_drv->set_tx_auto_pause_frames = hns_xgmac_set_tx_auto_pause_frames; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.h index da6c5343d3e139898662b3b60b2a1168d8aaccc3..e1b3db9807127d0fa417c004586d56af38c30517 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.h @@ -1,10 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 2014-2015 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #ifndef _HNS_XGMAC_H diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 28e907831b0eddbf760e0edb579ae7ae708520e0..3e1384b511bc8db016978b24787d9a4a1e4df646 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -1,10 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2014-2015 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include @@ -15,6 +11,8 @@ #include #include #include +#include +#include #include #include #include @@ -29,9 +27,6 @@ #define SERVICE_TIMER_HZ (1 * HZ) -#define NIC_TX_CLEAN_MAX_NUM 256 -#define NIC_RX_CLEAN_MAX_NUM 64 - #define RCB_IRQ_NOT_INITED 0 #define RCB_IRQ_INITED 1 #define HNS_BUFFER_SIZE_2048 2048 @@ -40,6 +35,8 @@ #define SKB_TMP_LEN(SKB) \ (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB)) +#define INVALID_TX_RING 0xffff + static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size, int send_sz, dma_addr_t dma, int frag_end, int buf_num, enum hns_desc_type type, int mtu) @@ -150,7 +147,8 @@ MODULE_DEVICE_TABLE(acpi, hns_enet_acpi_match); static void fill_desc(struct hnae_ring *ring, void *priv, int size, dma_addr_t dma, int frag_end, - int buf_num, enum hns_desc_type type, int mtu) + int buf_num, enum hns_desc_type type, int mtu, + bool is_gso) { struct hnae_desc *desc = &ring->desc[ring->next_to_use]; struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; @@ -283,6 +281,15 @@ static int hns_nic_maybe_stop_tso( return 0; } +static int hns_nic_maybe_stop_tx_v2(struct sk_buff **out_skb, int *bnum, + struct hnae_ring *ring) +{ + if (skb_is_gso(*out_skb)) + return hns_nic_maybe_stop_tso(out_skb, bnum, ring); + else + return hns_nic_maybe_stop_tx(out_skb, bnum, ring); +} + static void fill_tso_desc(struct hnae_ring *ring, void *priv, int size, dma_addr_t dma, int frag_end, int buf_num, enum hns_desc_type type, int mtu) @@ -308,6 +315,19 @@ static void fill_tso_desc(struct hnae_ring *ring, void *priv, mtu); } +static void fill_desc_v2(struct hnae_ring *ring, void *priv, + int size, dma_addr_t dma, int frag_end, + int buf_num, enum hns_desc_type type, int mtu, + bool is_gso) +{ + if (is_gso) + fill_tso_desc(ring, priv, size, dma, frag_end, buf_num, type, + mtu); + else + fill_v2_desc(ring, priv, size, dma, frag_end, buf_num, type, + mtu); +} + netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev, struct sk_buff *skb, struct hns_nic_ring_data *ring_data) @@ -321,6 +341,7 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev, int seg_num; dma_addr_t dma; int size, next_to_use; + bool is_gso; int i; switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) { @@ -347,8 +368,9 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev, ring->stats.sw_err_cnt++; goto out_err_tx_ok; } + is_gso = skb_is_gso(skb); priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0, - buf_num, DESC_TYPE_SKB, ndev->mtu); + buf_num, DESC_TYPE_SKB, ndev->mtu, is_gso); /* fill the fragments */ for (i = 1; i < seg_num; i++) { @@ -362,7 +384,7 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev, } priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma, seg_num - 1 == i ? 1 : 0, buf_num, - DESC_TYPE_PAGE, ndev->mtu); + DESC_TYPE_PAGE, ndev->mtu, is_gso); } /*complete translate all packets*/ @@ -376,8 +398,6 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev, wmb(); /* commit all data before submit */ assert(skb->queue_mapping < priv->ae_handle->q_num); hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num); - ring->stats.tx_pkts++; - ring->stats.tx_bytes += skb->len; return NETDEV_TX_OK; @@ -701,8 +721,6 @@ hns_nic_alloc_rx_buffers(struct hns_nic_ring_data *ring_data, int cleand_count) writel_relaxed(i, ring->io_base + RCB_REG_HEAD); } -/* return error number for error or number of desc left to take - */ static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data, struct sk_buff *skb) { @@ -780,7 +798,7 @@ static u32 smooth_alg(u32 new_param, u32 old_param) } /** - * hns_nic_adp_coalesce - self adapte coalesce according to rx rate + * hns_nic_adpt_coalesce - self adapte coalesce according to rx rate * @ring_data: pointer to hns_nic_ring_data **/ static void hns_nic_adpt_coalesce(struct hns_nic_ring_data *ring_data) @@ -882,7 +900,7 @@ static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data, static bool hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data) { struct hnae_ring *ring = ring_data->ring; - int num = 0; + int num; bool rx_stopped; hns_update_rx_rate(ring); @@ -952,15 +970,6 @@ static int is_valid_clean_head(struct hnae_ring *ring, int h) return u > c ? (h > c && h <= u) : (h > c || h <= u); } -/* netif_tx_lock will turn down the performance, set only when necessary */ -#ifdef CONFIG_NET_POLL_CONTROLLER -#define NETIF_TX_LOCK(ring) spin_lock(&(ring)->lock) -#define NETIF_TX_UNLOCK(ring) spin_unlock(&(ring)->lock) -#else -#define NETIF_TX_LOCK(ring) -#define NETIF_TX_UNLOCK(ring) -#endif - /* reclaim all desc in one budget * return error or number of desc left */ @@ -974,21 +983,19 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data, int head; int bytes, pkts; - NETIF_TX_LOCK(ring); - head = readl_relaxed(ring->io_base + RCB_REG_HEAD); rmb(); /* make sure head is ready before touch any data */ - if (is_ring_empty(ring) || head == ring->next_to_clean) { - NETIF_TX_UNLOCK(ring); + if (is_ring_empty(ring) || head == ring->next_to_clean) return 0; /* no data to poll */ - } if (!is_valid_clean_head(ring, head)) { - netdev_err(ndev, "wrong head (%d, %d-%d)\n", head, - ring->next_to_use, ring->next_to_clean); + netdev_err(ndev, "wrong head (%u-%u, %u-%u) fbd:%u offset:%u\n", + readl_relaxed(ring->io_base + RCB_REG_TAIL), head, + ring->next_to_use, ring->next_to_clean, + readl_relaxed(ring->io_base + RCB_REG_FBDNUM), + readl_relaxed(ring->io_base + RCB_REG_OFFSET)); ring->stats.io_err_cnt++; - NETIF_TX_UNLOCK(ring); return -EIO; } @@ -999,8 +1006,9 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data, /* issue prefetch for next Tx descriptor */ prefetch(&ring->desc_cb[ring->next_to_clean]); } - - NETIF_TX_UNLOCK(ring); + /* update tx ring statistics. */ + ring->stats.tx_pkts += pkts; + ring->stats.tx_bytes += bytes; dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index); netdev_tx_completed_queue(dev_queue, pkts, bytes); @@ -1061,16 +1069,12 @@ static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data) int head; int bytes, pkts; - NETIF_TX_LOCK(ring); - head = ring->next_to_use; /* ntu :soft setted ring position*/ bytes = 0; pkts = 0; while (head != ring->next_to_clean) hns_nic_reclaim_one_desc(ring, &bytes, &pkts); - NETIF_TX_UNLOCK(ring); - dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index); netdev_tx_reset_queue(dev_queue); } @@ -1155,6 +1159,13 @@ static void hns_nic_adjust_link(struct net_device *ndev) } } +static int hns_phy_marvell_fixup(struct phy_device *phydev) +{ + phydev->dev_flags |= MARVELL_PHY_LED0_LINK_LED1_ACTIVE; + + return 0; +} + /** *hns_nic_init_phy - init phy *@ndev: net device @@ -1169,9 +1180,25 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h) if (!h->phy_dev) return 0; + phy_dev->supported &= h->if_support; + phy_dev->advertising = phy_dev->supported; + + if (h->phy_if == PHY_INTERFACE_MODE_XGMII) + phy_dev->autoneg = false; + if (h->phy_if != PHY_INTERFACE_MODE_XGMII) { phy_dev->dev_flags = 0; + /* register the PHY fixup (for Marvell 88E1510) */ + ret = phy_register_fixup_for_uid(MARVELL_PHY_ID_88E1510, + MARVELL_PHY_ID_MASK, + hns_phy_marvell_fixup); + /* we can live without it, so just issue a warning */ + if (ret) + netdev_warn(ndev, + "Cannot register PHY fixup, ret=%d\n", + ret); + ret = phy_connect_direct(ndev, phy_dev, hns_nic_adjust_link, h->phy_if); } else { @@ -1180,11 +1207,7 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h) if (unlikely(ret)) return -ENODEV; - phy_dev->supported &= h->if_support; - phy_dev->advertising = phy_dev->supported; - - if (h->phy_if == PHY_INTERFACE_MODE_XGMII) - phy_dev->autoneg = false; + phy_attached_info(phy_dev); return 0; } @@ -1281,6 +1304,22 @@ static int hns_nic_init_affinity_mask(int q_num, int ring_idx, return cpu; } +static void hns_nic_free_irq(int q_num, struct hns_nic_priv *priv) +{ + int i; + + for (i = 0; i < q_num * 2; i++) { + if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) { + irq_set_affinity_hint(priv->ring_data[i].ring->irq, + NULL); + free_irq(priv->ring_data[i].ring->irq, + &priv->ring_data[i]); + priv->ring_data[i].ring->irq_init_flag = + RCB_IRQ_NOT_INITED; + } + } +} + static int hns_nic_init_irq(struct hns_nic_priv *priv) { struct hnae_handle *h = priv->ae_handle; @@ -1301,14 +1340,14 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv) rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0'; + irq_set_status_flags(rd->ring->irq, IRQ_NOAUTOEN); ret = request_irq(rd->ring->irq, hns_irq_handle, 0, rd->ring->ring_name, rd); if (ret) { netdev_err(priv->netdev, "request irq(%d) fail\n", rd->ring->irq); - return ret; + goto out_free_irq; } - disable_irq(rd->ring->irq); cpu = hns_nic_init_affinity_mask(h->q_num, i, rd->ring, &rd->mask); @@ -1321,6 +1360,10 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv) } return 0; + +out_free_irq: + hns_nic_free_irq(h->q_num, priv); + return ret; } static int hns_nic_net_up(struct net_device *ndev) @@ -1330,6 +1373,9 @@ static int hns_nic_net_up(struct net_device *ndev) int i, j; int ret; + if (!test_bit(NIC_STATE_DOWN, &priv->state)) + return 0; + ret = hns_nic_init_irq(priv); if (ret != 0) { netdev_err(ndev, "hns init irq failed! ret=%d\n", ret); @@ -1365,6 +1411,7 @@ static int hns_nic_net_up(struct net_device *ndev) for (j = i - 1; j >= 0; j--) hns_nic_ring_close(ndev, j); + hns_nic_free_irq(h->q_num, priv); set_bit(NIC_STATE_DOWN, &priv->state); return ret; @@ -1464,6 +1511,13 @@ static int hns_nic_net_open(struct net_device *ndev) return ret; } + /** + * The MAC is not XGE, we select fixed xmit queue. Mac0 select tx1, + * mac1 select tx2, and so on. + */ + if (!(h->if_support & SUPPORTED_10000baseKR_Full)) + priv->tx_queue = h->eport_id + 1; + ret = hns_nic_net_up(ndev); if (ret) { netdev_err(ndev, @@ -1482,11 +1536,19 @@ static int hns_nic_net_stop(struct net_device *ndev) } static void hns_tx_timeout_reset(struct hns_nic_priv *priv); +#define HNS_TX_TIMEO_LIMIT (40 * HZ) static void hns_nic_net_timeout(struct net_device *ndev) { struct hns_nic_priv *priv = netdev_priv(ndev); - hns_tx_timeout_reset(priv); + if (ndev->watchdog_timeo < HNS_TX_TIMEO_LIMIT) { + ndev->watchdog_timeo *= 2; + netdev_info(ndev, "watchdog_timo changed to %d.\n", + ndev->watchdog_timeo); + } else { + ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT; + hns_tx_timeout_reset(priv); + } } static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr, @@ -1508,7 +1570,7 @@ static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb, { struct hns_nic_priv *priv = netdev_priv(ndev); - assert(skb->queue_mapping < ndev->ae_handle->q_num); + assert(skb->queue_mapping < priv->ae_handle->q_num); return hns_nic_net_xmit_hw(ndev, skb, &tx_ring_data(priv, skb->queue_mapping)); @@ -1782,15 +1844,6 @@ static int hns_nic_set_features(struct net_device *netdev, netdev_info(netdev, "enet v1 do not support tso!\n"); break; default: - if (features & (NETIF_F_TSO | NETIF_F_TSO6)) { - priv->ops.fill_desc = fill_tso_desc; - priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso; - /* The chip only support 7*4096 */ - netif_set_gso_max_size(netdev, 7 * 4096); - } else { - priv->ops.fill_desc = fill_v2_desc; - priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; - } break; } netdev->features = features; @@ -1837,11 +1890,8 @@ static int hns_nic_uc_unsync(struct net_device *netdev, } /** - * nic_set_multicast_list - set mutl mac address - * @netdev: net device - * @p: mac address - * - * return void + * hns_set_multicast_list - set mutl mac address + * @ndev: net device */ static void hns_set_multicast_list(struct net_device *ndev) { @@ -1886,7 +1936,7 @@ static void hns_nic_set_rx_mode(struct net_device *ndev) static void hns_nic_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats) { - int idx = 0; + int idx; u64 tx_bytes = 0; u64 rx_bytes = 0; u64 tx_pkts = 0; @@ -1928,6 +1978,61 @@ static void hns_nic_get_stats64(struct net_device *ndev, stats->tx_compressed = ndev->stats.tx_compressed; } +static u32 hns_calc_tx_rss(u32 sip, u32 dip, u32 sport, u32 dport, u32 *rss_key) +{ + u32 rss = 0; + int i; + u32 port; + + port = (sport << 16) | dport; + + for (i = 0; i < 32; i++) + if (sip & (1 << (31 - i))) + rss ^= (rss_key[9] << i) | + (u32)((u64)rss_key[8] >> (32 - i)); + + for (i = 0; i < 32; i++) + if (dip & (1 << (31 - i))) + rss ^= (rss_key[8] << i) | + (u32)((u64)rss_key[7] >> (32 - i)); + + for (i = 0; i < 32; i++) + if (port & (1 << (31 - i))) + rss ^= (rss_key[7] << i) | + (u32)((u64)rss_key[6] >> (32 - i)); + + return rss; +} + +/* if tcp or udp, then calc tx ring index */ +static u16 hns_calc_tx_ring_idx(struct hns_nic_priv *priv, + struct sk_buff *skb) +{ + struct hnae_handle *handle; + struct iphdr *iphdr; + struct tcphdr *tcphdr; + u32 rss; + int protocol; + u16 ring = INVALID_TX_RING; + + if (skb->protocol == htons(ETH_P_IP)) { + iphdr = ip_hdr(skb); + protocol = iphdr->protocol; + if (protocol == IPPROTO_TCP) { + /* because tcp and udp dest and src port is same */ + tcphdr = tcp_hdr(skb); + handle = priv->ae_handle; + rss = hns_calc_tx_rss(ntohl(iphdr->daddr), + ntohl(iphdr->saddr), + ntohs(tcphdr->dest), + ntohs(tcphdr->source), + handle->rss_key); + ring = handle->rss_indir_table[rss & 0xff] & 0xf; + } + } + + return ring; +} static u16 hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb, struct net_device *sb_dev, @@ -1935,11 +2040,20 @@ hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb, { struct ethhdr *eth_hdr = (struct ethhdr *)skb->data; struct hns_nic_priv *priv = netdev_priv(ndev); + u16 ring; /* fix hardware broadcast/multicast packets queue loopback */ if (!AE_IS_VER1(priv->enet_ver) && is_multicast_ether_addr(eth_hdr->h_dest)) return 0; + + /* if netdev init select queue, apply it. */ + if (priv->tx_queue) + return priv->tx_queue; + + ring = hns_calc_tx_ring_idx(priv, skb); + if (ring != INVALID_TX_RING) + return ring; else return fallback(ndev, skb, NULL); } @@ -2049,11 +2163,11 @@ static void hns_nic_service_task(struct work_struct *work) = container_of(work, struct hns_nic_priv, service_task); struct hnae_handle *h = priv->ae_handle; + hns_nic_reset_subtask(priv); hns_nic_update_link_status(priv->netdev); h->dev->ops->update_led_status(h); hns_nic_update_stats(priv->netdev); - hns_nic_reset_subtask(priv); hns_nic_service_event_complete(priv); } @@ -2118,7 +2232,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv) hns_nic_tx_fini_pro_v2; netif_napi_add(priv->netdev, &rd->napi, - hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM); + hns_nic_common_poll, NAPI_POLL_WEIGHT); rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; } for (i = h->q_num; i < h->q_num * 2; i++) { @@ -2131,7 +2245,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv) hns_nic_rx_fini_pro_v2; netif_napi_add(priv->netdev, &rd->napi, - hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM); + hns_nic_common_poll, NAPI_POLL_WEIGHT); rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; } @@ -2169,16 +2283,9 @@ static void hns_nic_set_priv_ops(struct net_device *netdev) priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; } else { priv->ops.get_rxd_bnum = get_v2rx_desc_bnum; - if ((netdev->features & NETIF_F_TSO) || - (netdev->features & NETIF_F_TSO6)) { - priv->ops.fill_desc = fill_tso_desc; - priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso; - /* This chip only support 7*4096 */ - netif_set_gso_max_size(netdev, 7 * 4096); - } else { - priv->ops.fill_desc = fill_v2_desc; - priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; - } + priv->ops.fill_desc = fill_desc_v2; + priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx_v2; + netif_set_gso_max_size(netdev, 7 * 4096); /* enable tso when init * control tso on/off through TSE bit in bd */ @@ -2339,10 +2446,11 @@ static int hns_nic_dev_probe(struct platform_device *pdev) ndev->min_mtu = MAC_MIN_MTU; switch (priv->enet_ver) { case AE_VERSION_2: - ndev->features |= NETIF_F_TSO | NETIF_F_TSO6; + ndev->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_NTUPLE; ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6; + ndev->vlan_features |= NETIF_F_TSO | NETIF_F_TSO6; ndev->max_mtu = MAC_MAX_MTU_V2 - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); break; @@ -2384,6 +2492,8 @@ static int hns_nic_dev_probe(struct platform_device *pdev) out_notify_fail: (void)cancel_work_sync(&priv->service_task); out_read_prop_fail: + /* safe for ACPI FW */ + of_node_put(to_of_node(priv->fwnode)); free_netdev(ndev); return ret; } @@ -2400,8 +2510,11 @@ static int hns_nic_dev_remove(struct platform_device *pdev) hns_nic_uninit_ring_data(priv); priv->ring_data = NULL; - if (ndev->phydev) + if (ndev->phydev) { + phy_unregister_fixup_for_uid(MARVELL_PHY_ID_88E1510, + MARVELL_PHY_ID_MASK); phy_disconnect(ndev->phydev); + } if (!IS_ERR_OR_NULL(priv->ae_handle)) hnae_put_handle(priv->ae_handle); @@ -2413,6 +2526,9 @@ static int hns_nic_dev_remove(struct platform_device *pdev) set_bit(NIC_STATE_REMOVING, &priv->state); (void)cancel_work_sync(&priv->service_task); + /* safe for ACPI FW */ + of_node_put(to_of_node(priv->fwnode)); + free_netdev(ndev); return 0; } @@ -2441,3 +2557,4 @@ MODULE_DESCRIPTION("HISILICON HNS Ethernet driver"); MODULE_AUTHOR("Hisilicon, Inc."); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:hns-nic"); +MODULE_VERSION(HNAE_DRIVER_VERSION); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h index 26e9afcbdd50f77642d6455a6cea20b2ffc8032c..27986dbec927f9c7bafc8ad49c746e8161801be0 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h @@ -1,10 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 2014-2015 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #ifndef __HNS_ENET_H @@ -48,7 +44,8 @@ struct hns_nic_ring_data { struct hns_nic_ops { void (*fill_desc)(struct hnae_ring *ring, void *priv, int size, dma_addr_t dma, int frag_end, - int buf_num, enum hns_desc_type type, int mtu); + int buf_num, enum hns_desc_type type, int mtu, + bool is_gso); int (*maybe_stop_tx)(struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring); void (*get_rxd_bnum)(u32 bnum_flag, int *out_bnum); @@ -82,6 +79,8 @@ struct hns_nic_priv { struct work_struct service_task; struct notifier_block notifier_block; + + u16 tx_queue; }; #define tx_ring_data(priv, idx) ((priv)->ring_data[idx]) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 774beda040a16a93a80db08e9383a2fe69a85eba..8fcb189ed0ba57bfc5ab860b3940454cc58ad00a 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -1,10 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2014-2015 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include @@ -139,11 +135,6 @@ static int hns_nic_get_link_ksettings(struct net_device *net_dev, return -EINVAL; } - ethtool_convert_link_mode_to_legacy_u32(&supported, - cmd->link_modes.supported); - ethtool_convert_link_mode_to_legacy_u32(&advertising, - cmd->link_modes.advertising); - /* When there is no phy, autoneg is off. */ cmd->base.autoneg = false; cmd->base.speed = speed; @@ -152,6 +143,11 @@ static int hns_nic_get_link_ksettings(struct net_device *net_dev, if (net_dev->phydev) phy_ethtool_ksettings_get(net_dev->phydev, cmd); + ethtool_convert_link_mode_to_legacy_u32(&supported, + cmd->link_modes.supported); + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + link_stat = hns_nic_get_link(net_dev); if (!link_stat) { cmd->base.speed = (u32)SPEED_UNKNOWN; @@ -164,7 +160,6 @@ static int hns_nic_get_link_ksettings(struct net_device *net_dev, supported |= h->if_support; if (h->phy_if == PHY_INTERFACE_MODE_SGMII) { supported |= SUPPORTED_TP; - advertising |= ADVERTISED_1000baseT_Full; } else if (h->phy_if == PHY_INTERFACE_MODE_XGMII) { supported |= SUPPORTED_FIBRE; advertising |= ADVERTISED_10000baseKR_Full; @@ -197,7 +192,7 @@ static int hns_nic_get_link_ksettings(struct net_device *net_dev, } /** - *hns_nic_set_link_settings - implement ethtool set link ksettings + *hns_nic_set_link_ksettings - implement ethtool set link ksettings *@net_dev: net_device *@cmd: ethtool_link_ksettings *retuen 0 - success , negative --fail @@ -339,6 +334,7 @@ static int __lb_setup(struct net_device *ndev, static int __lb_up(struct net_device *ndev, enum hnae_loop loop_mode) { +#define NIC_LB_TEST_WAIT_PHY_LINK_TIME (300) struct hns_nic_priv *priv = netdev_priv(ndev); struct hnae_handle *h = priv->ae_handle; int speed, duplex; @@ -365,6 +361,9 @@ static int __lb_up(struct net_device *ndev, h->dev->ops->adjust_link(h, speed, duplex); + /* wait adjust link done and phy ready */ + msleep(NIC_LB_TEST_WAIT_PHY_LINK_TIME); + return 0; } @@ -415,6 +414,10 @@ static void __lb_other_process(struct hns_nic_ring_data *ring_data, /* for mutl buffer*/ new_skb = skb_copy(skb, GFP_ATOMIC); dev_kfree_skb_any(skb); + if (!new_skb) { + netdev_err(ndev, "skb alloc failed\n"); + return; + } skb = new_skb; check_ok = 0; @@ -462,9 +465,9 @@ static int __lb_clean_rings(struct hns_nic_priv *priv, } /** - * nic_run_loopback_test - run loopback test - * @nic_dev: net device - * @loopback_type: loopback type + * __lb_run_test - run loopback test + * @ndev: net device + * @loop_mode: loopback type */ static int __lb_run_test(struct net_device *ndev, enum hnae_loop loop_mode) @@ -572,7 +575,7 @@ static int __lb_down(struct net_device *ndev, enum hnae_loop loop) /** * hns_nic_self_test - self test - * @dev: net device + * @ndev: net device * @eth_test: test cmd * @data: test result */ @@ -633,7 +636,7 @@ static void hns_nic_self_test(struct net_device *ndev, /** * hns_nic_get_drvinfo - get net driver info - * @dev: net device + * @net_dev: net device * @drvinfo: driver info */ static void hns_nic_get_drvinfo(struct net_device *net_dev, @@ -658,7 +661,7 @@ static void hns_nic_get_drvinfo(struct net_device *net_dev, /** * hns_get_ringparam - get ring parameter - * @dev: net device + * @net_dev: net device * @param: ethtool parameter */ static void hns_get_ringparam(struct net_device *net_dev, @@ -683,7 +686,7 @@ static void hns_get_ringparam(struct net_device *net_dev, /** * hns_get_pauseparam - get pause parameter - * @dev: net device + * @net_dev: net device * @param: pause parameter */ static void hns_get_pauseparam(struct net_device *net_dev, @@ -701,7 +704,7 @@ static void hns_get_pauseparam(struct net_device *net_dev, /** * hns_set_pauseparam - set pause parameter - * @dev: net device + * @net_dev: net device * @param: pause parameter * * Return 0 on success, negative on failure @@ -725,7 +728,7 @@ static int hns_set_pauseparam(struct net_device *net_dev, /** * hns_get_coalesce - get coalesce info. - * @dev: net device + * @net_dev: net device * @ec: coalesce info. * * Return 0 on success, negative on failure. @@ -769,7 +772,7 @@ static int hns_get_coalesce(struct net_device *net_dev, /** * hns_set_coalesce - set coalesce info. - * @dev: net device + * @net_dev: net device * @ec: coalesce info. * * Return 0 on success, negative on failure. @@ -808,7 +811,7 @@ static int hns_set_coalesce(struct net_device *net_dev, /** * hns_get_channels - get channel info. - * @dev: net device + * @net_dev: net device * @ch: channel info. */ static void @@ -824,8 +827,8 @@ hns_get_channels(struct net_device *net_dev, struct ethtool_channels *ch) } /** - * get_ethtool_stats - get detail statistics. - * @dev: net device + * hns_get_ethtool_stats - get detail statistics. + * @netdev: net device * @stats: statistics info. * @data: statistics data. */ @@ -882,9 +885,9 @@ static void hns_get_ethtool_stats(struct net_device *netdev, } /** - * get_strings: Return a set of strings that describe the requested objects - * @dev: net device - * @stats: string set ID. + * hns_get_strings: Return a set of strings that describe the requested objects + * @netdev: net device + * @stringset: string set ID. * @data: objects data. */ static void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data) @@ -971,8 +974,8 @@ static void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data) } /** - * nic_get_sset_count - get string set count witch returned by nic_get_strings. - * @dev: net device + * hns_get_sset_count - get string set count witch returned by nic_get_strings. + * @netdev: net device * @stringset: string set index, 0: self test string; 1: statistics string. * * Return string set count. @@ -1006,7 +1009,7 @@ static int hns_get_sset_count(struct net_device *netdev, int stringset) /** * hns_phy_led_set - set phy LED status. - * @dev: net device + * @netdev: net device * @value: LED state. * * Return 0 on success, negative on failure. @@ -1027,8 +1030,8 @@ static int hns_phy_led_set(struct net_device *netdev, int value) } /** - * nic_set_phys_id - set phy identify LED. - * @dev: net device + * hns_set_phys_id - set phy identify LED. + * @netdev: net device * @state: LED state. * * Return 0 on success, negative on failure. @@ -1104,7 +1107,7 @@ hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) /** * hns_get_regs - get net device register - * @dev: net device + * @net_dev: net device * @cmd: ethtool cmd * @date: register data */ @@ -1125,8 +1128,8 @@ static void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd, } /** - * nic_get_regs_len - get total register len. - * @dev: net device + * hns_get_regs_len - get total register len. + * @net_dev: net device * * Return total register len. */ @@ -1151,22 +1154,24 @@ static int hns_get_regs_len(struct net_device *net_dev) /** * hns_nic_nway_reset - nway reset - * @dev: net device + * @netdev: net device * * Return 0 on success, negative on failure */ static int hns_nic_nway_reset(struct net_device *netdev) { - int ret = 0; struct phy_device *phy = netdev->phydev; - if (netif_running(netdev)) { - /* if autoneg is disabled, don't restart auto-negotiation */ - if (phy && phy->autoneg == AUTONEG_ENABLE) - ret = genphy_restart_aneg(phy); - } + if (!netif_running(netdev)) + return 0; - return ret; + if (!phy) + return -EOPNOTSUPP; + + if (phy->autoneg != AUTONEG_ENABLE) + return -EINVAL; + + return genphy_restart_aneg(phy); } static u32 diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile index 002534f12b661dc01da7aab04da179b3a4c05cb1..5bec09bb6e9ec19544073e37425c898ed3d128be 100644 --- a/drivers/net/ethernet/hisilicon/hns3/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/Makefile @@ -3,12 +3,78 @@ # Makefile for the HISILICON network device drivers. # -obj-$(CONFIG_HNS3) += hns3pf/ -obj-$(CONFIG_HNS3) += hns3vf/ +ccflags-y += -DCONFIG_IT_VALIDATION +ccflags-y += -DCONFIG_HNS3_TEST +PWD = $(srctree)/drivers/net/ethernet/hisilicon/hns3 +#add include path +ccflags-y += -I$(PWD) \ + -I$(PWD)/hns3pf \ + -I$(PWD)/hns3_extension \ + -I$(PWD)/hns3_extension/hns3pf \ + -I$(PWD)/hns3vf \ + -I$(PWD)/hns3_cae + +#### compile hnae3.ko obj-$(CONFIG_HNS3) += hnae3.o +#### compile hns3.ko +HNS3_OBJS = hns3_enet.o \ + hns3_ethtool.o \ + hns3_debugfs.o +HNS3_OBJS_IT = hns3_extension/hns3_enet_it.o obj-$(CONFIG_HNS3_ENET) += hns3.o -hns3-objs = hns3_enet.o hns3_ethtool.o +hns3-objs = $(HNS3_OBJS) $(HNS3_OBJS_IT) hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o + +#### compile hclge.ko +HCLGE_OBJ = hns3pf/hclge_main.o \ + hns3pf/hclge_cmd.o \ + hns3pf/hclge_mdio.o \ + hns3pf/hclge_debugfs.o \ + hns3pf/hclge_tm.o \ + hns3pf/hclge_mbx.o \ + hns3pf/hclge_err.o + + +HCLGE_OBJ_IT_MAIN = hns3_extension/hns3pf/hclge_main_it.o \ + hns3_extension/hns3pf/hclge_sysfs.o +obj-$(CONFIG_HNS3_HCLGE) += hclge.o +hclge-objs := $(HCLGE_OBJ) $(HCLGE_OBJ_IT_MAIN) +hclge-$(CONFIG_HNS3_DCB) += hns3pf/hclge_dcb.o +#### compile hclgevf.ko +obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o +hclgevf-objs := hns3vf/hclgevf_main.o hns3vf/hclgevf_cmd.o hns3vf/hclgevf_mbx.o + +#### compile hns3_cae.ko +#add rally code +HNS3_CAE_OBJS_PRIM = hns3_cae/hns3_cae_init.o \ + hns3_cae/hns3_cae_lib.o \ + hns3_cae/hns3_cae_dcqcn.o \ + hns3_cae/hns3_cae_version.o \ + hns3_cae/hns3_cae_reset.o \ + hns3_cae/hns3_cae_pfc_storm.o \ + hns3_cae/hns3_cae_cmd.o +#add test code +HNS3_CAE_OBJS_EXT = hns3_cae/hns3_cae_dfx.o \ + hns3_cae/hns3_cae_fd.o \ + hns3_cae/hns3_cae_hilink_param.o \ + hns3_cae/hns3_cae_checksum.o \ + hns3_cae/hns3_cae_irq.o \ + hns3_cae/hns3_cae_gro.o \ + hns3_cae/hns3_cae_mac.o \ + hns3_cae/hns3_cae_port.o \ + hns3_cae/hns3_cae_promisc.o \ + hns3_cae/hns3_cae_qinfo.o \ + hns3_cae/hns3_cae_qos.o \ + hns3_cae/hns3_cae_qres.o \ + hns3_cae/hns3_cae_rss.o \ + hns3_cae/hns3_cae_stat.o \ + hns3_cae/hns3_cae_tm.o \ + hns3_cae/hns3_cae_vlan.o \ + hns3_cae/hns3_cae_xsfp.o \ + hns3_cae/hns3_cae_led.o +obj-$(CONFIG_HNS3_CAE) += hns3_cae.o +hns3_cae-objs := $(HNS3_CAE_OBJS_PRIM) $(HNS3_CAE_OBJS_EXT) +hns3_cae-$(CONFIG_HNS3_DCB) += hns3_cae/hns3_cae_dcb.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index be9dc08ccf6785762a329016ed3f3e20d2f21dad..5ab56fd90738f16303a7a07d6dda7e8a058d6203 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -7,8 +7,6 @@ #include #include -#define HCLGE_MBX_VF_MSG_DATA_NUM 16 - enum HCLGE_MBX_OPCODE { HCLGE_MBX_RESET = 0x01, /* (VF -> PF) assert reset */ HCLGE_MBX_ASSERTING_RESET, /* (PF -> VF) PF is asserting reset*/ @@ -21,7 +19,8 @@ enum HCLGE_MBX_OPCODE { HCLGE_MBX_SET_MACVLAN, /* (VF -> PF) set unicast filter */ HCLGE_MBX_API_NEGOTIATE, /* (VF -> PF) negotiate API version */ HCLGE_MBX_GET_QINFO, /* (VF -> PF) get queue config */ - HCLGE_MBX_GET_TCINFO, /* (VF -> PF) get TC config */ + HCLGE_MBX_GET_QDEPTH, /* (VF -> PF) get queue depth */ + HCLGE_MBX_GET_BASIC_INFO, /* (VF -> PF) get basic info */ HCLGE_MBX_GET_RETA, /* (VF -> PF) get RETA */ HCLGE_MBX_GET_RSS_KEY, /* (VF -> PF) get RSS key */ HCLGE_MBX_GET_MAC_ADDR, /* (VF -> PF) get MAC addr */ @@ -36,6 +35,22 @@ enum HCLGE_MBX_OPCODE { HCLGE_MBX_BIND_FUNC_QUEUE, /* (VF -> PF) bind function and queue */ HCLGE_MBX_GET_LINK_STATUS, /* (VF -> PF) get link status */ HCLGE_MBX_QUEUE_RESET, /* (VF -> PF) reset queue */ + HCLGE_MBX_KEEP_ALIVE, /* (VF -> PF) send keep alive cmd */ + HCLGE_MBX_SET_ALIVE, /* (VF -> PF) set alive state */ + HCLGE_MBX_SET_MTU, /* (VF -> PF) set mtu */ + HCLGE_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */ + HCLGE_MBX_LINK_STAT_MODE, /* (PF -> VF) link mode has changed */ + HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */ + HCLGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */ + HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */ + HCLGE_MBX_PUSH_PROMISC_INFO, /* (PF -> VF) push vf promisc info */ + HCLGE_MBX_VF_UNINIT, /* (VF -> PF) vf is unintializing */ + HCLGE_MBX_HANDLE_VF_TBL, /* (VF -> PF) store/clear hw table */ + HCLGE_MBX_GET_RING_VECTOR_MAP, /* (VF -> PF) get ring-to-vector map */ + + HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */ + HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */ + HCLGE_MBX_NCSI_ERROR, /* (M7 -> PF) receive a NCSI error */ }; /* below are per-VF mac-vlan subcodes */ @@ -46,9 +61,6 @@ enum hclge_mbx_mac_vlan_subcode { HCLGE_MBX_MAC_VLAN_MC_MODIFY, /* modify MC mac addr */ HCLGE_MBX_MAC_VLAN_MC_ADD, /* add new MC mac addr */ HCLGE_MBX_MAC_VLAN_MC_REMOVE, /* remove MC mac addr */ - HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE, /* config func MTA enable */ - HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ, /* read func MTA type */ - HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE, /* update MTA status */ }; /* below are per-VF vlan cfg subcodes */ @@ -56,36 +68,107 @@ enum hclge_mbx_vlan_cfg_subcode { HCLGE_MBX_VLAN_FILTER = 0, /* set vlan filter */ HCLGE_MBX_VLAN_TX_OFF_CFG, /* set tx side vlan offload */ HCLGE_MBX_VLAN_RX_OFF_CFG, /* set rx side vlan offload */ + HCLGE_MBX_PORT_BASE_VLAN_CFG, /* set port based vlan configuration */ + HCLGE_MBX_GET_PORT_BASE_VLAN_STATE, /* get port based vlan state */ + HCLGE_MBX_ENABLE_VLAN_FILTER, +}; + +enum hclge_mbx_tbl_cfg_subcode { + HCLGE_MBX_VPORT_LIST_CLEAR, }; -#define HCLGE_MBX_MAX_MSG_SIZE 16 -#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8 -#define HCLGE_MBX_RING_MAP_BASIC_MSG_NUM 3 -#define HCLGE_MBX_RING_NODE_VARIABLE_NUM 3 +#define HCLGE_MBX_MAX_MSG_SIZE 14 +#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8U +#define HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM 4 + +struct hclge_ring_chain_param { + u8 ring_type; + u8 tqp_index; + u8 int_gl_index; +}; + +struct hclge_basic_info { + u8 hw_tc_map; + u8 rsv; + u16 mbx_api_version; + u32 pf_caps; +}; struct hclgevf_mbx_resp_status { struct mutex mbx_mutex; /* protects against contending sync cmd resp */ u32 origin_mbx_msg; bool received_resp; int resp_status; + u16 match_id; u8 additional_info[HCLGE_MBX_MAX_RESP_DATA_SIZE]; }; +struct errno_respcode_map { + u16 resp_code; + int errno; +}; + +struct hclge_vf_to_pf_msg { + u8 code; + union { + struct { + u8 subcode; + u8 data[HCLGE_MBX_MAX_MSG_SIZE]; + }; + struct { + u8 en_bc; + u8 en_uc; + u8 en_mc; + }; + struct { + u8 vector_id; + u8 ring_num; + struct hclge_ring_chain_param + param[HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM]; + }; + }; +}; + +struct hclge_pf_to_vf_msg { + u16 code; + u16 vf_mbx_msg_code; + u16 vf_mbx_msg_subcode; + u16 resp_status; + u8 resp_data[HCLGE_MBX_MAX_RESP_DATA_SIZE]; +}; + +struct hclge_respond_to_vf_msg { + int status; + u8 data[HCLGE_MBX_MAX_RESP_DATA_SIZE]; + u16 len; +}; + struct hclge_mbx_vf_to_pf_cmd { u8 rsv; u8 mbx_src_vfid; /* Auto filled by IMP */ - u8 rsv1[2]; + u8 mbx_need_resp; + u8 rsv1[1]; u8 msg_len; - u8 rsv2[3]; - u8 msg[HCLGE_MBX_MAX_MSG_SIZE]; + u8 rsv2; + u16 match_id; + struct hclge_vf_to_pf_msg msg; }; +#define HCLGE_MBX_NEED_RESP_B 0 + struct hclge_mbx_pf_to_vf_cmd { u8 dest_vfid; u8 rsv[3]; u8 msg_len; - u8 rsv1[3]; - u16 msg[8]; + u8 rsv1; + u16 match_id; + struct hclge_pf_to_vf_msg msg; +}; + +struct hclge_vf_rst_cmd { + u8 dest_vfid; + u8 vf_rst; + u8 rsv[22]; }; /* used by VF to store the received Async responses from PF */ @@ -95,14 +178,17 @@ struct hclgevf_mbx_arq_ring { struct hclgevf_dev *hdev; u32 head; u32 tail; - u32 count; + atomic_t count; u16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE]; }; #define hclge_mbx_ring_ptr_move_crq(crq) \ (crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num) #define hclge_mbx_tail_ptr_move_arq(arq) \ - (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE) + (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM) #define hclge_mbx_head_ptr_move_arq(arq) \ - (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE) + (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM) + +/* PF immediately push link status to VFs when link status changed */ +#define HCLGE_MBX_PUSH_LINK_STATUS_EN BIT(0) #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index fff5be8078ac388102456f3e505a5a0d46b630bf..53a87b318713907e27dc9b82c7a647c2b16a66bf 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -16,29 +16,26 @@ static LIST_HEAD(hnae3_ae_dev_list); */ static DEFINE_MUTEX(hnae3_common_lock); -static bool hnae3_client_match(enum hnae3_client_type client_type, - enum hnae3_dev_type dev_type) +static bool hnae3_client_match(enum hnae3_client_type client_type) { - if ((dev_type == HNAE3_DEV_KNIC) && (client_type == HNAE3_CLIENT_KNIC || - client_type == HNAE3_CLIENT_ROCE)) - return true; - - if (dev_type == HNAE3_DEV_UNIC && client_type == HNAE3_CLIENT_UNIC) + if (client_type == HNAE3_CLIENT_KNIC || + client_type == HNAE3_CLIENT_ROCE) return true; return false; } -static void hnae3_set_client_init_flag(struct hnae3_client *client, - struct hnae3_ae_dev *ae_dev, int inited) +void hnae3_set_client_init_flag(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev, + unsigned int inited) { + if (!client || !ae_dev) + return; + switch (client->type) { case HNAE3_CLIENT_KNIC: hnae3_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited); break; - case HNAE3_CLIENT_UNIC: - hnae3_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited); - break; case HNAE3_CLIENT_ROCE: hnae3_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited); break; @@ -46,9 +43,10 @@ static void hnae3_set_client_init_flag(struct hnae3_client *client, break; } } +EXPORT_SYMBOL(hnae3_set_client_init_flag); static int hnae3_get_client_init_flag(struct hnae3_client *client, - struct hnae3_ae_dev *ae_dev) + struct hnae3_ae_dev *ae_dev) { int inited = 0; @@ -57,10 +55,6 @@ static int hnae3_get_client_init_flag(struct hnae3_client *client, inited = hnae3_get_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B); break; - case HNAE3_CLIENT_UNIC: - inited = hnae3_get_bit(ae_dev->flag, - HNAE3_UNIC_CLIENT_INITED_B); - break; case HNAE3_CLIENT_ROCE: inited = hnae3_get_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B); @@ -72,44 +66,47 @@ static int hnae3_get_client_init_flag(struct hnae3_client *client, return inited; } -static int hnae3_match_n_instantiate(struct hnae3_client *client, - struct hnae3_ae_dev *ae_dev, bool is_reg) +static int hnae3_init_client_instance(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) { int ret; /* check if this client matches the type of ae_dev */ - if (!(hnae3_client_match(client->type, ae_dev->dev_type) && + if (!(hnae3_client_match(client->type) && hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) { return 0; } - /* now, (un-)instantiate client by calling lower layer */ - if (is_reg) { - ret = ae_dev->ops->init_client_instance(client, ae_dev); - if (ret) { - dev_err(&ae_dev->pdev->dev, - "fail to instantiate client, ret = %d\n", ret); - return ret; - } + ret = ae_dev->ops->init_client_instance(client, ae_dev); + if (ret) + dev_err(&ae_dev->pdev->dev, + "fail to instantiate client, ret = %d\n", ret); - hnae3_set_client_init_flag(client, ae_dev, 1); - return 0; - } + return ret; +} + +static void hnae3_uninit_client_instance(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) +{ + /* check if this client matches the type of ae_dev */ + if (!(hnae3_client_match(client->type) && + hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) + return; if (hnae3_get_client_init_flag(client, ae_dev)) { ae_dev->ops->uninit_client_instance(client, ae_dev); hnae3_set_client_init_flag(client, ae_dev, 0); } - - return 0; } int hnae3_register_client(struct hnae3_client *client) { struct hnae3_client *client_tmp; struct hnae3_ae_dev *ae_dev; - int ret = 0; + + if (!client) + return -ENODEV; mutex_lock(&hnae3_common_lock); /* one system should only have one client for every type */ @@ -125,7 +122,7 @@ int hnae3_register_client(struct hnae3_client *client) /* if the client could not be initialized on current port, for * any error reasons, move on to next available port */ - ret = hnae3_match_n_instantiate(client, ae_dev, true); + int ret = hnae3_init_client_instance(client, ae_dev); if (ret) dev_err(&ae_dev->pdev->dev, "match and instantiation failed for port, ret = %d\n", @@ -141,12 +138,31 @@ EXPORT_SYMBOL(hnae3_register_client); void hnae3_unregister_client(struct hnae3_client *client) { + struct hnae3_client *client_tmp; struct hnae3_ae_dev *ae_dev; + bool existed = false; + + if (!client) + return; mutex_lock(&hnae3_common_lock); + /* one system should only have one client for every type */ + list_for_each_entry(client_tmp, &hnae3_client_list, node) { + if (client_tmp->type == client->type) { + existed = true; + break; + } + } + + if (!existed) { + mutex_unlock(&hnae3_common_lock); + pr_err("client %s does not exist!\n", client->name); + return; + } + /* un-initialize the client on every matched port */ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { - hnae3_match_n_instantiate(client, ae_dev, false); + hnae3_uninit_client_instance(client, ae_dev); } list_del(&client->node); @@ -163,7 +179,10 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) const struct pci_device_id *id; struct hnae3_ae_dev *ae_dev; struct hnae3_client *client; - int ret = 0; + int ret; + + if (!ae_algo) + return; mutex_lock(&hnae3_common_lock); @@ -175,8 +194,12 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) if (!id) continue; - /* ae_dev init should set flag */ + if (!ae_algo->ops) { + dev_err(&ae_dev->pdev->dev, "ae_algo ops are null\n"); + continue; + } ae_dev->ops = ae_algo->ops; + ret = ae_algo->ops->init_ae_dev(ae_dev); if (ret) { dev_err(&ae_dev->pdev->dev, @@ -184,13 +207,14 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) continue; } + /* ae_dev init should set flag */ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); /* check the client list for the match with this ae_dev type and * initialize the figure out client instance */ list_for_each_entry(client, &hnae3_client_list, node) { - ret = hnae3_match_n_instantiate(client, ae_dev, true); + ret = hnae3_init_client_instance(client, ae_dev); if (ret) dev_err(&ae_dev->pdev->dev, "match and instantiation failed, ret = %d\n", @@ -211,6 +235,9 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo) struct hnae3_ae_dev *ae_dev; struct hnae3_client *client; + if (!ae_algo) + return; + mutex_lock(&hnae3_common_lock); /* Check if there are matched ae_dev */ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { @@ -225,9 +252,10 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo) * un-initialize the figure out client instance */ list_for_each_entry(client, &hnae3_client_list, node) - hnae3_match_n_instantiate(client, ae_dev, false); + hnae3_uninit_client_instance(client, ae_dev); ae_algo->ops->uninit_ae_dev(ae_dev); + ae_dev->ops = NULL; hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); } @@ -240,12 +268,15 @@ EXPORT_SYMBOL(hnae3_unregister_ae_algo); * @ae_dev: the AE device * NOTE: the duplicated name will not be checked */ -void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) +int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) { const struct pci_device_id *id; struct hnae3_ae_algo *ae_algo; struct hnae3_client *client; - int ret = 0; + int ret; + + if (!ae_dev) + return -ENODEV; mutex_lock(&hnae3_common_lock); @@ -257,14 +288,13 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) if (!id) continue; - ae_dev->ops = ae_algo->ops; - - if (!ae_dev->ops) { - dev_err(&ae_dev->pdev->dev, "ae_dev ops are null\n"); + if (!ae_algo->ops) { + dev_err(&ae_dev->pdev->dev, "ae_algo ops are null\n"); + ret = -EOPNOTSUPP; goto out_err; } + ae_dev->ops = ae_algo->ops; - /* ae_dev init should set flag */ ret = ae_dev->ops->init_ae_dev(ae_dev); if (ret) { dev_err(&ae_dev->pdev->dev, @@ -272,6 +302,7 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) goto out_err; } + /* ae_dev init should set flag */ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); break; } @@ -280,15 +311,22 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) * initialize the figure out client instance */ list_for_each_entry(client, &hnae3_client_list, node) { - ret = hnae3_match_n_instantiate(client, ae_dev, true); + ret = hnae3_init_client_instance(client, ae_dev); if (ret) dev_err(&ae_dev->pdev->dev, "match and instantiation failed, ret = %d\n", ret); } + mutex_unlock(&hnae3_common_lock); + + return 0; + out_err: + list_del(&ae_dev->node); mutex_unlock(&hnae3_common_lock); + + return ret; } EXPORT_SYMBOL(hnae3_register_ae_dev); @@ -301,6 +339,9 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev) struct hnae3_ae_algo *ae_algo; struct hnae3_client *client; + if (!ae_dev) + return; + mutex_lock(&hnae3_common_lock); /* Check if there are matched ae_algo */ list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) { @@ -312,10 +353,11 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev) continue; list_for_each_entry(client, &hnae3_client_list, node) - hnae3_match_n_instantiate(client, ae_dev, false); + hnae3_uninit_client_instance(client, ae_dev); ae_algo->ops->uninit_ae_dev(ae_dev); hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); + ae_dev->ops = NULL; } list_del(&ae_dev->node); diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 67befff0bfc508e04100a28ff8f374ca4844b73c..dcb5134a3c709a1a219373175b26ab72b897d18b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -28,9 +28,13 @@ #include #include #include +#include #include +#include -#define HNAE3_MOD_VERSION "1.0" +#define HNAE3_MOD_VERSION "24.3.1" + +#define HNAE3_MIN_VECTOR_NUM 2 /* first one for misc, another for IO */ /* Device IDs */ #define HNAE3_DEV_ID_GE 0xA220 @@ -45,29 +49,49 @@ #define HNAE3_CLASS_NAME_SIZE 16 +#define HNAE3_REVISION_ID_20 0x20 +#define HNAE3_REVISION_ID_21 0x21 + #define HNAE3_DEV_INITED_B 0x0 #define HNAE3_DEV_SUPPORT_ROCE_B 0x1 #define HNAE3_DEV_SUPPORT_DCB_B 0x2 #define HNAE3_KNIC_CLIENT_INITED_B 0x3 #define HNAE3_UNIC_CLIENT_INITED_B 0x4 #define HNAE3_ROCE_CLIENT_INITED_B 0x5 +#define HNAE3_DEV_SUPPORT_FD_B 0x6 +#define HNAE3_DEV_SUPPORT_GRO_B 0x7 +#define HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B 0x8 #define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\ BIT(HNAE3_DEV_SUPPORT_ROCE_B)) #define hnae3_dev_roce_supported(hdev) \ - hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B) + hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B) #define hnae3_dev_dcb_supported(hdev) \ - hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B) + hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B) + +#define hnae3_dev_fd_supported(hdev) \ + hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B) +#define hnae3_dev_gro_supported(hdev) \ + hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B) + +#define hnae3_dev_vlan_fltr_mdf_supported(hdev) \ + hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B) + +enum HNAE3_PF_CAP_BITS { + HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0, +}; #define ring_ptr_move_fw(ring, p) \ ((ring)->p = ((ring)->p + 1) % (ring)->desc_num) #define ring_ptr_move_bw(ring, p) \ ((ring)->p = ((ring)->p - 1 + (ring)->desc_num) % (ring)->desc_num) enum hns_desc_type { + DESC_TYPE_UNKNOWN, DESC_TYPE_SKB, + DESC_TYPE_FRAGLIST_SKB, DESC_TYPE_PAGE, }; @@ -77,36 +101,56 @@ struct hnae3_queue { void __iomem *io_base; struct hnae3_ae_algo *ae_algo; struct hnae3_handle *handle; - int tqp_index; /* index in a handle */ - u32 buf_size; /* size for hnae_desc->addr, preset by AE */ - u16 desc_num; /* total number of desc */ + int tqp_index; /* index in a handle */ + u32 buf_size; /* size for hnae_desc->addr, preset by AE */ + u16 tx_desc_num; /* total number of tx desc */ + u16 rx_desc_num; /* total number of rx desc */ }; -/*hnae3 loop mode*/ +struct hns3_mac_stats { + u64 tx_pause_cnt; + u64 rx_pause_cnt; +}; + +/* hnae3 loop mode */ enum hnae3_loop { - HNAE3_MAC_INTER_LOOP_MAC, - HNAE3_MAC_INTER_LOOP_SERDES, - HNAE3_MAC_INTER_LOOP_PHY, - HNAE3_MAC_LOOP_NONE, + HNAE3_LOOP_APP, + HNAE3_LOOP_SERIAL_SERDES, + HNAE3_LOOP_PARALLEL_SERDES, + HNAE3_LOOP_PHY, + HNAE3_LOOP_NONE, }; enum hnae3_client_type { HNAE3_CLIENT_KNIC, - HNAE3_CLIENT_UNIC, HNAE3_CLIENT_ROCE, }; -enum hnae3_dev_type { - HNAE3_DEV_KNIC, - HNAE3_DEV_UNIC, -}; - /* mac media type */ enum hnae3_media_type { HNAE3_MEDIA_TYPE_UNKNOWN, HNAE3_MEDIA_TYPE_FIBER, HNAE3_MEDIA_TYPE_COPPER, HNAE3_MEDIA_TYPE_BACKPLANE, + HNAE3_MEDIA_TYPE_NONE, +}; + +/* must be consistent with definition in firmware */ +enum hnae3_module_type { + HNAE3_MODULE_TYPE_UNKNOWN = 0x00, + HNAE3_MODULE_TYPE_FIBRE_LR = 0x01, + HNAE3_MODULE_TYPE_FIBRE_SR = 0x02, + HNAE3_MODULE_TYPE_AOC = 0x03, + HNAE3_MODULE_TYPE_CR = 0x04, + HNAE3_MODULE_TYPE_KR = 0x05, + HNAE3_MODULE_TYPE_TP = 0x06, +}; + +enum hnae3_fec_mode { + HNAE3_FEC_AUTO = 0, + HNAE3_FEC_BASER, + HNAE3_FEC_RS, + HNAE3_FEC_USER_DEF, }; enum hnae3_reset_notify_type { @@ -116,14 +160,75 @@ enum hnae3_reset_notify_type { HNAE3_UNINIT_CLIENT, }; +enum hnae3_hw_error_type { + HNAE3_PPU_POISON_ERROR, + HNAE3_CMDQ_ECC_ERROR, + HNAE3_IMP_RD_POISON_ERROR, + HNAE3_ROCEE_AXI_RESP_ERROR, +}; + enum hnae3_reset_type { HNAE3_VF_RESET, + HNAE3_VF_FUNC_RESET, + HNAE3_VF_PF_FUNC_RESET, HNAE3_VF_FULL_RESET, + HNAE3_FLR_RESET, HNAE3_FUNC_RESET, - HNAE3_CORE_RESET, HNAE3_GLOBAL_RESET, HNAE3_IMP_RESET, + HNAE3_UNKNOWN_RESET, HNAE3_NONE_RESET, + HNAE3_MAX_RESET, +}; + +enum hnae3_port_base_vlan_state { + HNAE3_PORT_BASE_VLAN_DISABLE, + HNAE3_PORT_BASE_VLAN_ENABLE, + HNAE3_PORT_BASE_VLAN_MODIFY, + HNAE3_PORT_BASE_VLAN_NOCHANGE, +}; + +enum hnae3_dbg_cmd { + HNAE3_DBG_CMD_TM_PRI, + HNAE3_DBG_CMD_TM_QSET, + HNAE3_DBG_CMD_TM_MAP, + HNAE3_DBG_CMD_TM_PG, + HNAE3_DBG_CMD_TM_PORT, + HNAE3_DBG_CMD_TC_SCH_INFO, + HNAE3_DBG_CMD_QOS_PAUSE_CFG, + HNAE3_DBG_CMD_QOS_PRI_MAP, + HNAE3_DBG_CMD_QOS_BUF_CFG, + HNAE3_DBG_CMD_TX_BD, + HNAE3_DBG_CMD_RX_BD, + HNAE3_DBG_CMD_MAC_UC, + HNAE3_DBG_CMD_MAC_MC, + HNAE3_DBG_CMD_MAC_TBL, + HNAE3_DBG_CMD_MNG_TBL, + HNAE3_DBG_CMD_LOOPBACK, + HNAE3_DBG_CMD_INTERRUPT_INFO, + HNAE3_DBG_CMD_RESET_INFO, + HNAE3_DBG_CMD_IMP_INFO, + HNAE3_DBG_CMD_NCL_CONFIG, + HNAE3_DBG_CMD_REG_BIOS_COMMON, + HNAE3_DBG_CMD_REG_SSU, + HNAE3_DBG_CMD_REG_IGU_EGU, + HNAE3_DBG_CMD_REG_RPU, + HNAE3_DBG_CMD_REG_NCSI, + HNAE3_DBG_CMD_REG_RTC, + HNAE3_DBG_CMD_REG_PPP, + HNAE3_DBG_CMD_REG_RCB, + HNAE3_DBG_CMD_REG_TQP, + HNAE3_DBG_CMD_REG_MAC, + HNAE3_DBG_CMD_REG_DCB, + HNAE3_DBG_CMD_VLAN_CONFIG, + HNAE3_DBG_CMD_QUEUE_MAP, + HNAE3_DBG_CMD_RX_QUEUE_INFO, + HNAE3_DBG_CMD_TX_QUEUE_INFO, + HNAE3_DBG_CMD_FD_TCAM, + HNAE3_DBG_CMD_MAC_TNL_STATUS, + HNAE3_DBG_CMD_SERV_INFO, + HNAE3_DBG_CMD_UMV_INFO, + HNAE3_DBG_CMD_UNKNOWN, }; struct hnae3_vector_info { @@ -139,6 +244,15 @@ struct hnae3_vector_info { #define HNAE3_RING_GL_RX 0 #define HNAE3_RING_GL_TX 1 +#define HNAE3_FW_VERSION_BYTE3_SHIFT 24 +#define HNAE3_FW_VERSION_BYTE3_MASK GENMASK(31, 24) +#define HNAE3_FW_VERSION_BYTE2_SHIFT 16 +#define HNAE3_FW_VERSION_BYTE2_MASK GENMASK(23, 16) +#define HNAE3_FW_VERSION_BYTE1_SHIFT 8 +#define HNAE3_FW_VERSION_BYTE1_MASK GENMASK(15, 8) +#define HNAE3_FW_VERSION_BYTE0_SHIFT 0 +#define HNAE3_FW_VERSION_BYTE0_MASK GENMASK(7, 0) + struct hnae3_ring_chain_node { struct hnae3_ring_chain_node *next; u32 tqp_index; @@ -153,9 +267,10 @@ struct hnae3_client_ops { int (*init_instance)(struct hnae3_handle *handle); void (*uninit_instance)(struct hnae3_handle *handle, bool reset); void (*link_status_change)(struct hnae3_handle *handle, bool state); - int (*setup_tc)(struct hnae3_handle *handle, u8 tc); int (*reset_notify)(struct hnae3_handle *handle, enum hnae3_reset_notify_type type); + void (*process_hw_error)(struct hnae3_handle *handle, + enum hnae3_hw_error_type); }; #define HNAE3_CLIENT_NAME_LENGTH 16 @@ -172,7 +287,7 @@ struct hnae3_ae_dev { const struct hnae3_ae_ops *ops; struct list_head node; u32 flag; - enum hnae3_dev_type dev_type; + unsigned long hw_err_reset_req; void *priv; }; @@ -190,21 +305,31 @@ struct hnae3_ae_dev { * Enable the hardware * stop() * Disable the hardware + * start_client() + * Inform the hclge that client has been started + * stop_client() + * Inform the hclge that client has been stopped * get_status() * Get the carrier state of the back channel of the handle, 1 for ok, 0 for * non-ok * get_ksettings_an_result() * Get negotiation status,speed and duplex - * update_speed_duplex_h() - * Update hardware speed and duplex * get_media_type() * Get media type of MAC + * check_port_speed() + * Check target speed whether is supported + * get_fec() + * Get fec ability and fec mode + * set_fec() + * Set fec * adjust_link() * Adjust link status * set_loopback() * Set loopback * set_promisc_mode * Set promisc mode + * request_update_promisc_mode + * request to hclge(vf) to update promisc mode * set_mtu() * set mtu * get_pauseparam() @@ -215,6 +340,10 @@ struct hnae3_ae_dev { * set auto autonegotiation of pause frame use * get_autoneg() * get auto autonegotiation of pause frame use + * restart_autoneg() + * restart autonegotiation + * halt_autoneg() + * halt/resume autonegotiation when autonegotiation on * get_coalesce_usecs() * get usecs to delay a TX interrupt after a packet is sent * get_rx_max_coalesced_frames() @@ -239,6 +368,8 @@ struct hnae3_ae_dev { * Remove multicast address from mac table * update_stats() * Update Old network device statistics + * get_mac_stats() + * get mac pause statistics including tx_cnt and rx_cnt * get_ethtool_stats() * Get ethtool network device statistics * get_strings() @@ -285,32 +416,62 @@ struct hnae3_ae_dev { * Set vlan filter config of vf * enable_hw_strip_rxvtag() * Enable/disable hardware strip vlan tag of packets received + * set_gro_en + * Enable/disable HW GRO + * add_arfs_entry + * Check the 5-tuples of flow, and create flow director rule + * dbg_read_cmd + * Execute debugfs read command. + * get_vf_config + * Get the VF configuration setting by the host + * set_vf_link_state + * Set VF link status + * set_vf_spoofchk + * Enable/disable spoof check for specified vf + * set_vf_trust + * Enable/disable trust for specified vf, if the vf being trusted, then + * it can enable promisc mode + * set_vf_rate + * Set the max tx rate of specified vf. + * set_vf_mac + * Configure the default MAC for specified VF + * get_module_eeprom + * Get the optical module eeprom info. */ struct hnae3_ae_ops { int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev); void (*uninit_ae_dev)(struct hnae3_ae_dev *ae_dev); - + void (*reset_prepare)(struct hnae3_ae_dev *ae_dev, + enum hnae3_reset_type rst_type); + void (*reset_done)(struct hnae3_ae_dev *ae_dev); int (*init_client_instance)(struct hnae3_client *client, struct hnae3_ae_dev *ae_dev); void (*uninit_client_instance)(struct hnae3_client *client, struct hnae3_ae_dev *ae_dev); int (*start)(struct hnae3_handle *handle); void (*stop)(struct hnae3_handle *handle); + int (*client_start)(struct hnae3_handle *handle); + void (*client_stop)(struct hnae3_handle *handle); int (*get_status)(struct hnae3_handle *handle); void (*get_ksettings_an_result)(struct hnae3_handle *handle, u8 *auto_neg, u32 *speed, u8 *duplex); - int (*update_speed_duplex_h)(struct hnae3_handle *handle); int (*cfg_mac_speed_dup_h)(struct hnae3_handle *handle, int speed, u8 duplex); - void (*get_media_type)(struct hnae3_handle *handle, u8 *media_type); + void (*get_media_type)(struct hnae3_handle *handle, u8 *media_type, + u8 *module_type); + int (*check_port_speed)(struct hnae3_handle *handle, u32 speed); + void (*get_fec)(struct hnae3_handle *handle, u8 *fec_ability, + u8 *fec_mode); + int (*set_fec)(struct hnae3_handle *handle, u32 fec_mode); void (*adjust_link)(struct hnae3_handle *handle, int speed, int duplex); int (*set_loopback)(struct hnae3_handle *handle, enum hnae3_loop loop_mode, bool en); - void (*set_promisc_mode)(struct hnae3_handle *handle, bool en_uc_pmc, - bool en_mc_pmc); + int (*set_promisc_mode)(struct hnae3_handle *handle, bool en_uc_pmc, + bool en_mc_pmc); + void (*request_update_promisc_mode)(struct hnae3_handle *handle); int (*set_mtu)(struct hnae3_handle *handle, int new_mtu); void (*get_pauseparam)(struct hnae3_handle *handle, @@ -320,6 +481,8 @@ struct hnae3_ae_ops { int (*set_autoneg)(struct hnae3_handle *handle, bool enable); int (*get_autoneg)(struct hnae3_handle *handle); + int (*restart_autoneg)(struct hnae3_handle *handle); + int (*halt_autoneg)(struct hnae3_handle *handle, bool halt); void (*get_coalesce_usecs)(struct hnae3_handle *handle, u32 *tx_usecs, u32 *rx_usecs); @@ -337,6 +500,8 @@ struct hnae3_ae_ops { void (*get_mac_addr)(struct hnae3_handle *handle, u8 *p); int (*set_mac_addr)(struct hnae3_handle *handle, void *p, bool is_first); + int (*do_ioctl)(struct hnae3_handle *handle, + struct ifreq *ifr, int cmd); int (*add_uc_addr)(struct hnae3_handle *handle, const unsigned char *addr); int (*rm_uc_addr)(struct hnae3_handle *handle, @@ -346,13 +511,12 @@ struct hnae3_ae_ops { const unsigned char *addr); int (*rm_mc_addr)(struct hnae3_handle *handle, const unsigned char *addr); - int (*update_mta_status)(struct hnae3_handle *handle); - void (*set_tso_stats)(struct hnae3_handle *handle, int enable); void (*update_stats)(struct hnae3_handle *handle, struct net_device_stats *net_stats); void (*get_stats)(struct hnae3_handle *handle, u64 *data); - + void (*get_mac_stats)(struct hnae3_handle *handle, + struct hns3_mac_stats *mac_stats); void (*get_strings)(struct hnae3_handle *handle, u32 stringset, u8 *data); int (*get_sset_count)(struct hnae3_handle *handle, int stringset); @@ -384,23 +548,28 @@ struct hnae3_ae_ops { int vector_num, struct hnae3_ring_chain_node *vr_chain); - void (*reset_queue)(struct hnae3_handle *handle, u16 queue_id); + int (*reset_queue)(struct hnae3_handle *handle); u32 (*get_fw_version)(struct hnae3_handle *handle); void (*get_mdix_mode)(struct hnae3_handle *handle, u8 *tp_mdix_ctrl, u8 *tp_mdix); - void (*enable_vlan_filter)(struct hnae3_handle *handle, bool enable); + int (*enable_vlan_filter)(struct hnae3_handle *handle, bool enable); int (*set_vlan_filter)(struct hnae3_handle *handle, __be16 proto, u16 vlan_id, bool is_kill); int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid, u16 vlan, u8 qos, __be16 proto); int (*enable_hw_strip_rxvtag)(struct hnae3_handle *handle, bool enable); - void (*reset_event)(struct hnae3_handle *handle); + void (*reset_event)(struct pci_dev *pdev, struct hnae3_handle *handle); + enum hnae3_reset_type (*get_reset_level)(struct hnae3_ae_dev *ae_dev, + unsigned long *addr); + void (*set_default_reset_request)(struct hnae3_ae_dev *ae_dev, + enum hnae3_reset_type rst_type); void (*get_channels)(struct hnae3_handle *handle, struct ethtool_channels *ch); void (*get_tqps_and_rss_info)(struct hnae3_handle *h, - u16 *free_tqps, u16 *max_rss_size); - int (*set_channels)(struct hnae3_handle *handle, u32 new_tqps_num); + u16 *alloc_tqps, u16 *max_rss_size); + int (*set_channels)(struct hnae3_handle *handle, u32 new_tqps_num, + bool rxfh_configured); void (*get_flowctrl_adv)(struct hnae3_handle *handle, u32 *flowctrl_adv); int (*set_led_id)(struct hnae3_handle *handle, @@ -408,7 +577,63 @@ struct hnae3_ae_ops { void (*get_link_mode)(struct hnae3_handle *handle, unsigned long *supported, unsigned long *advertising); - void (*get_port_type)(struct hnae3_handle *handle, u8 *port_type); + int (*add_fd_entry)(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd); + int (*del_fd_entry)(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd); + void (*del_all_fd_entries)(struct hnae3_handle *handle, + bool clear_list); + int (*get_fd_rule_cnt)(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd); + int (*get_fd_rule_info)(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd); + int (*get_fd_all_rules)(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd, u32 *rule_locs); + void (*restore_fd_rules)(struct hnae3_handle *handle); + void (*enable_fd)(struct hnae3_handle *handle, bool enable); + int (*add_arfs_entry)(struct hnae3_handle *handle, u16 queue_id, + u16 flow_id, struct flow_keys *fkeys); + int (*dbg_read_cmd)(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd, + char *buf, int len); + pci_ers_result_t (*handle_hw_ras_error)(struct hnae3_ae_dev *ae_dev); + bool (*get_hw_reset_stat)(struct hnae3_handle *handle); + bool (*ae_dev_resetting)(struct hnae3_handle *handle); + unsigned long (*ae_dev_reset_cnt)(struct hnae3_handle *handle); + int (*set_gro_en)(struct hnae3_handle *handle, bool enable); + u16 (*get_global_queue_id)(struct hnae3_handle *handle, u16 queue_id); + void (*set_timer_task)(struct hnae3_handle *handle, bool enable); + int (*mac_connect_phy)(struct hnae3_handle *handle); + void (*mac_disconnect_phy)(struct hnae3_handle *handle); + bool (*reset_end)(struct hnae3_handle *handle, bool done); + void (*handle_imp_error)(struct hnae3_handle *handle); + int (*get_vf_config)(struct hnae3_handle *handle, int vf, + struct ifla_vf_info *ivf); + int (*set_vf_link_state)(struct hnae3_handle *handle, int vf, + int link_state); + int (*set_vf_spoofchk)(struct hnae3_handle *handle, int vf, + bool enable); + int (*set_vf_trust)(struct hnae3_handle *handle, int vf, bool enable); + int (*set_vf_rate)(struct hnae3_handle *handle, int vf, + int min_tx_rate, int max_tx_rate, bool force); + int (*set_vf_mac)(struct hnae3_handle *handle, int vf, u8 *p); + int (*get_module_eeprom)(struct hnae3_handle *handle, u32 offset, + u32 len, u8 *data); + bool (*get_cmdq_stat)(struct hnae3_handle *handle); + +/* Notice! If the function is not for test, the definition must before + * CONFIG_HNS3_TEST! Because RoCE will use this head file, and it won't + * set CONFIG_HNS3_TEST, that may cause RoCE calling the wrong function. + */ +#ifdef CONFIG_HNS3_TEST + int (*send_cmdq)(struct hnae3_handle *handle, void *data, int num); + int (*test_cmdq)(struct hnae3_handle *handle, void *data, int *len); + int (*ecc_handle)(struct hnae3_ae_dev *ae_dev); + int (*priv_ops)(struct hnae3_handle *handle, int opcode, + void *data, int length); + void (*ext_init)(struct hnae3_handle *handle); + void (*ext_uninit)(struct hnae3_handle *handle); + void (*ext_reset_done)(struct hnae3_handle *handle); +#endif }; struct hnae3_dcb_ops { @@ -422,8 +647,8 @@ struct hnae3_dcb_ops { u8 (*getdcbx)(struct hnae3_handle *); u8 (*setdcbx)(struct hnae3_handle *, u8); - int (*map_update)(struct hnae3_handle *); - int (*setup_tc)(struct hnae3_handle *, u8, u8 *); + int (*setup_tc)(struct hnae3_handle *handle, + struct tc_mqprio_qopt_offload *mqprio_qopt); }; struct hnae3_ae_algo { @@ -432,33 +657,50 @@ struct hnae3_ae_algo { const struct pci_device_id *pdev_id_table; }; -#define HNAE3_INT_NAME_LEN (IFNAMSIZ + 16) +#define HNAE3_INT_NAME_LEN 32 #define HNAE3_ITR_COUNTDOWN_START 100 +#define HNAE3_MAX_TC 8 +#define HNAE3_MAX_USER_PRIO 8 + +#define HNAE3_FORMAT_MAC_ADDR_LEN 18 +#define HNAE3_FORMAT_MAC_ADDR_OFFSET_0 0 +#define HNAE3_FORMAT_MAC_ADDR_OFFSET_4 4 +#define HNAE3_FORMAT_MAC_ADDR_OFFSET_5 5 + +static inline void hnae3_format_mac_addr(char *format_mac_addr, + const u8 *mac_addr) +{ + snprintf(format_mac_addr, HNAE3_FORMAT_MAC_ADDR_LEN, "%02x:**:**:**:%02x:%02x", + mac_addr[HNAE3_FORMAT_MAC_ADDR_OFFSET_0], + mac_addr[HNAE3_FORMAT_MAC_ADDR_OFFSET_4], + mac_addr[HNAE3_FORMAT_MAC_ADDR_OFFSET_5]); +} + struct hnae3_tc_info { - u16 tqp_offset; /* TQP offset from base TQP */ - u16 tqp_count; /* Total TQPs */ - u8 tc; /* TC index */ - bool enable; /* If this TC is enable or not */ + u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */ + u16 tqp_count[HNAE3_MAX_TC]; + u16 tqp_offset[HNAE3_MAX_TC]; + u8 num_tc; /* Total number of enabled TCs */ + bool mqprio_active; }; -#define HNAE3_MAX_TC 8 -#define HNAE3_MAX_USER_PRIO 8 struct hnae3_knic_private_info { struct net_device *netdev; /* Set by KNIC client when init instance */ u16 rss_size; /* Allocated RSS queues */ + u16 req_rss_size; u16 rx_buf_len; - u16 num_desc; + u16 num_tx_desc; + u16 num_rx_desc; - u8 num_tc; /* Total number of enabled TCs */ - u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */ - struct hnae3_tc_info tc_info[HNAE3_MAX_TC]; /* Idx of array is HW TC */ + struct hnae3_tc_info tc_info; u16 num_tqps; /* total number of TQPs in this handle */ struct hnae3_queue **tqp; /* array base of all TQPs in this instance */ const struct hnae3_dcb_ops *dcb_ops; u16 int_rl_setting; + enum pkt_hash_types rss_type; }; struct hnae3_roce_private_info { @@ -466,30 +708,48 @@ struct hnae3_roce_private_info { void __iomem *roce_io_base; int base_vector; int num_vectors; + + /* The below attributes defined for RoCE client, hnae3 gives + * initial values to them, and RoCE client can modify and use + * them. + */ + unsigned long reset_state; + unsigned long instance_state; + unsigned long state; }; struct hnae3_unic_private_info { struct net_device *netdev; u16 rx_buf_len; - u16 num_desc; + u16 num_tx_desc; + u16 num_rx_desc; + u16 num_tqps; /* total number of tqps in this handle */ struct hnae3_queue **tqp; /* array base of all TQPs of this instance */ }; -#define HNAE3_SUPPORT_MAC_LOOPBACK BIT(0) +#define HNAE3_SUPPORT_APP_LOOPBACK BIT(0) #define HNAE3_SUPPORT_PHY_LOOPBACK BIT(1) -#define HNAE3_SUPPORT_SERDES_LOOPBACK BIT(2) +#define HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK BIT(2) #define HNAE3_SUPPORT_VF BIT(3) +#define HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK BIT(4) + +#define HNAE3_USER_UPE BIT(0) /* unicast promisc enabled by user */ +#define HNAE3_USER_MPE BIT(1) /* mulitcast promisc enabled by user */ +#define HNAE3_BPE BIT(2) /* broadcast promisc enable */ +#define HNAE3_OVERFLOW_UPE BIT(3) /* unicast mac vlan overflow */ +#define HNAE3_OVERFLOW_MPE BIT(4) /* multicast mac vlan overflow */ +#define HNAE3_VLAN_FLTR BIT(5) /* enable vlan filter */ +#define HNAE3_UPE (HNAE3_USER_UPE | HNAE3_OVERFLOW_UPE) +#define HNAE3_MPE (HNAE3_USER_MPE | HNAE3_OVERFLOW_MPE) +#define HNAE3_OVERFLOW_UMPE (HNAE3_OVERFLOW_UPE | HNAE3_OVERFLOW_MPE) struct hnae3_handle { struct hnae3_client *client; struct pci_dev *pdev; void *priv; struct hnae3_ae_algo *ae_algo; /* the class who provides this handle */ - u64 flags; /* Indicate the capabilities for this handle*/ - - unsigned long last_reset_time; - enum hnae3_reset_type reset_level; + u64 flags; /* Indicate the capabilities for this handle */ union { struct net_device *netdev; /* first member */ @@ -499,6 +759,21 @@ struct hnae3_handle { }; u32 numa_node_mask; /* for multi-chip support */ + + enum hnae3_port_base_vlan_state port_base_vlan_state; + + u8 netdev_flags; + struct dentry *hnae3_dbgfs; + struct mutex dbgfs_lock; + char **dbgfs_buf; + + /* Network interface message level enabled bits */ + u32 msg_enable; + +#ifdef CONFIG_HNS3_TEST + /* for sysfs */ + struct kobject *kobj; +#endif }; #define hnae3_set_field(origin, mask, shift, val) \ @@ -513,7 +788,7 @@ struct hnae3_handle { #define hnae3_get_bit(origin, shift) \ hnae3_get_field((origin), (0x1 << (shift)), (shift)) -void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev); +int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev); void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev); void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo); @@ -521,4 +796,8 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo); void hnae3_unregister_client(struct hnae3_client *client); int hnae3_register_client(struct hnae3_client *client); + +void hnae3_set_client_init_flag(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev, + unsigned int inited); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_checksum.c b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_checksum.c new file mode 100644 index 0000000000000000000000000000000000000000..ef41194497af92e379cbfefd030308d12fbc583e --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_checksum.c @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include +#include +#include +#include +#include + +#include "hns3_enet.h" +#include "hclge_cmd.h" +#include "hclge_main.h" +#include "hns3_cae_cmd.h" +#include "hns3_cae_checksum.h" + +static int hns3_cae_chs_set(struct hclge_dev *hdev, u8 chs_type, u8 enable) +{ + struct hns3_cae_chs_cmd_param *recv = NULL; + struct hclge_desc desc; + int ret; + + hns3_cae_cmd_setup_basic_desc(&desc, HCLGE_OPC_CHECKSUM_CHECK_EN, true); + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + pr_err("chs get cmd send failed!\n"); + return ret; + } + + recv = (struct hns3_cae_chs_cmd_param *)desc.data; + switch (chs_type) { + case CKS_OUTER_L3_EN: + hnae3_set_bit(recv->outer_en, HCLGE_CHS_OUT_L3_B, enable); + break; + case CKS_OUTER_UDP_EN: + hnae3_set_bit(recv->outer_en, HCLGE_CHS_OUT_UDP_B, enable); + break; + case CKS_INNER_L3_EN: + hnae3_set_bit(recv->inner_en, HCLGE_CHS_INNER_L3_B, enable); + break; + case CKS_INNER_TCP_EN: + hnae3_set_bit(recv->inner_en, HCLGE_CHS_INNER_TCP_B, enable); + break; + case CKS_INNER_UDP_EN: + hnae3_set_bit(recv->inner_en, HCLGE_CHS_INNER_UDP_B, enable); + break; + case CKS_INNER_SCTP_EN: + hnae3_set_bit(recv->inner_en, HCLGE_CHS_INNER_SCTP_B, enable); + break; + default: + break; + } + + hns3_cae_cmd_reuse_desc(&desc, false); + ret = hns3_cae_cmd_send(hdev, &desc, 1); + + return ret; +} + +static int hns3_cae_chs_get(struct hclge_dev *hdev, u8 chs_type, u8 *enable) +{ + struct hns3_cae_chs_cmd_param *recv = NULL; + struct hclge_desc desc; + u8 inner_sctp_en; + u8 inner_tcp_en; + u8 inner_udp_en; + u8 outer_udp_en; + u8 outer_l3_en; + u8 inner_l3_en; + int ret; + + hns3_cae_cmd_setup_basic_desc(&desc, HCLGE_OPC_CHECKSUM_CHECK_EN, true); + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + pr_err("chs get cmd send failed!\n"); + return ret; + } + + recv = (struct hns3_cae_chs_cmd_param *)desc.data; + outer_l3_en = hnae3_get_bit(recv->outer_en, HCLGE_CHS_OUT_L3_B); + outer_udp_en = hnae3_get_bit(recv->outer_en, HCLGE_CHS_OUT_UDP_B); + inner_l3_en = hnae3_get_bit(recv->inner_en, HCLGE_CHS_INNER_L3_B); + inner_tcp_en = hnae3_get_bit(recv->inner_en, HCLGE_CHS_INNER_TCP_B); + inner_udp_en = hnae3_get_bit(recv->inner_en, HCLGE_CHS_INNER_UDP_B); + inner_sctp_en = hnae3_get_bit(recv->inner_en, HCLGE_CHS_INNER_SCTP_B); + + switch (chs_type) { + case CKS_OUTER_L3_EN: + *enable = outer_l3_en; + break; + case CKS_OUTER_UDP_EN: + *enable = outer_udp_en; + break; + case CKS_INNER_L3_EN: + *enable = inner_l3_en; + break; + case CKS_INNER_TCP_EN: + *enable = inner_tcp_en; + break; + case CKS_INNER_UDP_EN: + *enable = inner_udp_en; + break; + case CKS_INNER_SCTP_EN: + *enable = inner_sctp_en; + break; + default: + break; + } + + return ret; +} + +int hns3_cae_chs_cfg(const struct hns3_nic_priv *net_priv, void *buf_in, + u32 in_size, void *buf_out, u32 out_size) +{ + struct hns3_cae_chs_param *in_info = + (struct hns3_cae_chs_param *)buf_in; + struct hclge_vport *vport = NULL; + struct hclge_dev *hdev = NULL; + u8 *out_info = NULL; + bool check = !buf_in || in_size < sizeof(struct hns3_cae_chs_param); + u8 is_set; + + if (check) { + pr_err("input param buf_in error in %s function\n", __func__); + return -EFAULT; + } + + vport = hns3_cae_get_vport(net_priv->ae_handle); + hdev = vport->back; + out_info = (u8 *)buf_out; + is_set = in_info->is_set; + + if (in_info->type >= CKS_MAX) { + pr_err("chs type is %d, param err!\n", in_info->type); + return -1; + } + + if (in_info->is_enable != 0 && in_info->is_enable != 1) { + pr_err("chs enable is %d, param err!\n", in_info->is_enable); + return -1; + } + if (is_set) { + if (hns3_cae_chs_set(hdev, in_info->type, in_info->is_enable)) { + pr_err("set chs type(%d) enable failed!\n", + in_info->type); + return -1; + } + } else { + check = !buf_out || out_size < sizeof(u8); + if (check) { + pr_err("input param buf_out error in %s.\n", __func__); + return -EFAULT; + } + if (hns3_cae_chs_get(hdev, in_info->type, out_info)) { + pr_err("get chs type(%d) enable failed!\n", + in_info->type); + return -1; + } + pr_err("chs type(%d) enable status is %d\n", + in_info->type, *out_info); + } + + return 0; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_checksum.h b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_checksum.h new file mode 100644 index 0000000000000000000000000000000000000000..7a4c2308b8622360dd496e2251d464299be3e1cf --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_checksum.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2019 Hisilicon Limited. */ + +#ifndef __HNS3_CAE_CHECKSUM_H__ +#define __HNS3_CAE_CHECKSUM_H__ + +#include "hns3_enet.h" + +#define HCLGE_CHS_OUT_L3_B 0 +#define HCLGE_CHS_OUT_UDP_B 1 +#define HCLGE_CHS_INNER_L3_B 0 +#define HCLGE_CHS_INNER_TCP_B 1 +#define HCLGE_CHS_INNER_UDP_B 2 +#define HCLGE_CHS_INNER_SCTP_B 3 +#define HCLGE_OPC_CHECKSUM_CHECK_EN 0x0601 + +#define OUTER_L3_CHECK_EN 0x1 +#define OUTER_UDP_CHECK_EN 0x1 +#define INNER_L3_CHECK_EN 0x1 +#define INNER_TCP_CHECK_EN 0x1 +#define INNER_UDP_CHECK_EN 0x1 +#define INNER_SCTP_CHECK_EN 0x1 + +enum { + CKS_OUTER_L3_EN = 0, + CKS_OUTER_UDP_EN, + CKS_INNER_L3_EN, + CKS_INNER_TCP_EN, + CKS_INNER_UDP_EN, + CKS_INNER_SCTP_EN, + CKS_MAX, +}; + +struct hns3_cae_chs_cmd_param { + u8 outer_en; + u8 inner_en; + u8 rsv[22]; +}; + +struct hns3_cae_chs_param { + u8 is_set; + u8 type; + u8 is_enable; +}; + +int hns3_cae_chs_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, void *buf_out, + u32 out_size); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_cmd.c new file mode 100644 index 0000000000000000000000000000000000000000..10ae1eee1ab9089ba5c9a36ccaf33372d7185b6f --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_cmd.c @@ -0,0 +1,249 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include "hns3_cae_cmd.h" + +static int hns3_cae_ring_space(struct hclge_cmq_ring *ring) +{ + int ntu = ring->next_to_use; + int ntc = ring->next_to_clean; + int used = (ntu - ntc + ring->desc_num) % ring->desc_num; + + return ring->desc_num - used - 1; +} + +static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int head) +{ + int ntu = ring->next_to_use; + int ntc = ring->next_to_clean; + + if (ntu > ntc) + return head >= ntc && head <= ntu; + + return head >= ntc || head <= ntu; +} + +static bool hns3_cae_is_special_opcode(u16 opcode) +{ + /* these commands have several descriptors, + * and use the first one to save opcode and return value + */ + u16 spec_opcode[] = {HCLGE_OPC_STATS_64_BIT, + HCLGE_OPC_STATS_32_BIT, + HCLGE_OPC_STATS_MAC, + HCLGE_OPC_STATS_MAC_ALL, + HCLGE_OPC_QUERY_32_BIT_REG, + HCLGE_OPC_QUERY_64_BIT_REG, + HCLGE_QUERY_CLEAR_MPF_RAS_INT, + HCLGE_QUERY_CLEAR_PF_RAS_INT, + HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT, + HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT}; + u16 i; + + for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) { + if (spec_opcode[i] == opcode) + return true; + } + + return false; +} + +static int hns3_cae_cmd_convert_err_code(u16 desc_ret) +{ + switch (desc_ret) { + case HCLGE_CMD_EXEC_SUCCESS: + return 0; + case HCLGE_CMD_NO_AUTH: + return -EPERM; + case HCLGE_CMD_NOT_SUPPORTED: + return -EOPNOTSUPP; + case HCLGE_CMD_QUEUE_FULL: + return -EXFULL; + case HCLGE_CMD_NEXT_ERR: + return -ENOSR; + case HCLGE_CMD_UNEXE_ERR: + return -ENOTBLK; + case HCLGE_CMD_PARA_ERR: + return -EINVAL; + case HCLGE_CMD_RESULT_ERR: + return -ERANGE; + case HCLGE_CMD_TIMEOUT: + return -ETIME; + case HCLGE_CMD_HILINK_ERR: + return -ENOLINK; + case HCLGE_CMD_QUEUE_ILLEGAL: + return -ENXIO; + case HCLGE_CMD_INVALID: + return -EBADR; + default: + return -EIO; + } +} + +static int hns3_cae_cmd_csq_done(struct hclge_hw *hw) +{ + u32 head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG); + + return head == hw->cmq.csq.next_to_use; +} + +static int hns3_cae_cmd_csq_clean(struct hclge_hw *hw) +{ + struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw); + struct hclge_cmq_ring *csq = &hw->cmq.csq; + int clean; + u32 head; + + head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG); + rmb(); /* Make sure head is ready before touch any data */ + + if (!is_valid_csq_clean_head(csq, head)) { + dev_warn(&hdev->pdev->dev, "wrong cmd head (%u, %d-%d)\n", head, + csq->next_to_use, csq->next_to_clean); + dev_warn(&hdev->pdev->dev, + "IMP firmware watchdog reset soon expected!\n"); + return -EIO; + } + + clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num; + csq->next_to_clean = head; + + return clean; +} + +static int hns3_cae_cmd_check_retval(struct hclge_hw *hw, + struct hclge_desc *desc, + int num, int ntc) +{ + u16 opcode, desc_ret; + int handle; + + opcode = le16_to_cpu(desc[0].opcode); + for (handle = 0; handle < num; handle++) { + desc[handle] = hw->cmq.csq.desc[ntc]; + ntc++; + if (ntc >= hw->cmq.csq.desc_num) + ntc = 0; + } + if (likely(!hns3_cae_is_special_opcode(opcode))) + desc_ret = le16_to_cpu(desc[num - 1].retval); + else + desc_ret = le16_to_cpu(desc[0].retval); + + hw->cmq.last_status = desc_ret; + + return hns3_cae_cmd_convert_err_code(desc_ret); +} + +void hns3_cae_cmd_reuse_desc(struct hclge_desc *desc, bool is_read) +{ + desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN); + if (is_read) + desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR); + else + desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR); +} + +void hns3_cae_cmd_setup_basic_desc(struct hclge_desc *desc, + enum hclge_opcode_type opcode, bool is_read) +{ + memset((void *)desc, 0, sizeof(struct hclge_desc)); + desc->opcode = cpu_to_le16(opcode); + desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN); + + if (is_read) + desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR); +} + +/** + * hns3_cae_cmd_send - send command to command queue + * @hdev: pointer to the hclge_dev + * @desc: prefilled descriptor for describing the command + * @num : the number of descriptors to be sent + * + * This is the main send command for command queue, it + * sends the queue, cleans the queue, etc + **/ +int hns3_cae_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc, int num) +{ + struct hclge_desc *desc_to_use = NULL; + struct hclge_cmq_ring *csq = NULL; + bool complete = false; + u32 timeout = 0; + int handle = 0; + int retval; + int ntc; + + csq = &hdev->hw.cmq.csq; + spin_lock_bh(&hdev->hw.cmq.csq.lock); + + if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) { + spin_unlock_bh(&hdev->hw.cmq.csq.lock); + return -EBUSY; + } + + if (num > hns3_cae_ring_space(&hdev->hw.cmq.csq)) { + /* If CMDQ ring is full, SW HEAD and HW HEAD may be different, + * need update the SW HEAD pointer csq->next_to_clean + */ + csq->next_to_clean = hclge_read_dev(&hdev->hw, + HCLGE_NIC_CSQ_HEAD_REG); + spin_unlock_bh(&hdev->hw.cmq.csq.lock); + return -EBUSY; + } + + /** + * Record the location of desc in the ring for this time + * which will be use for hardware to write back + */ + ntc = hdev->hw.cmq.csq.next_to_use; + while (handle < num) { + desc_to_use = + &hdev->hw.cmq.csq.desc[hdev->hw.cmq.csq.next_to_use]; + *desc_to_use = desc[handle]; + (hdev->hw.cmq.csq.next_to_use)++; + if (hdev->hw.cmq.csq.next_to_use >= hdev->hw.cmq.csq.desc_num) + hdev->hw.cmq.csq.next_to_use = 0; + handle++; + } + + /* Write to hardware */ + hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_TAIL_REG, + hdev->hw.cmq.csq.next_to_use); + + /** + * If the command is sync, wait for the firmware to write back, + * if multi descriptors to be sent, use the first one to check + */ + if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) { + do { + if (hns3_cae_cmd_csq_done(&hdev->hw)) { + complete = true; + break; + } + udelay(1); + timeout++; + } while (timeout < hdev->hw.cmq.tx_timeout); + } + + if (!complete) + retval = -EBADE; + else + retval = hns3_cae_cmd_check_retval(&hdev->hw, desc, num, ntc); + + handle = hns3_cae_cmd_csq_clean(&hdev->hw); + if (handle < 0) + retval = handle; + else if (handle != num) + dev_warn(&hdev->pdev->dev, + "cleaned %d, need to clean %d\n", handle, num); + + spin_unlock_bh(&hdev->hw.cmq.csq.lock); + + return retval; +} + +struct hclge_vport *hns3_cae_get_vport(struct hnae3_handle *handle) +{ + return container_of(handle, struct hclge_vport, nic); +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_cmd.h new file mode 100644 index 0000000000000000000000000000000000000000..c1160e507f1b1b5ba094da47a9cc0597e4136700 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_cmd.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2019 Hisilicon Limited. */ + +#ifndef __HNS3_CAE_CMD_H__ +#define __HNS3_CAE_CMD_H__ +#include +#include "hnae3.h" +#include "hclge_main.h" +#include "hclge_cmd.h" + +#define HCLGE_OPC_GRO_AGE_CFG 0x0c11 + +/* misc command */ +#define HCLGE_OPC_CHIP_ID_GET 0x7003 +#define HCLGE_OPC_IMP_COMMIT_ID_GET 0x7004 +#define HCLGE_OPC_GET_CHIP_NUM 0x7005 +#define HCLGE_OPC_GET_PORT_NUM 0x7006 +#define HCLGE_OPC_CFG_PAUSE_STORM_PARA 0x7019 +/* SFP command */ +#define HCLGE_OPC_SFP_GET_INFO 0x7100 +#define HCLGE_OPC_SFP_GET_PRESENT 0x7101 +#define HCLGE_OPC_SFP_SET_STATUS 0x7102 +/* DCQCN command */ +#define HCLGE_OPC_DCQCN_TEMPLATE_CFG 0x7014 +#define HCLGE_OPC_DCQCN_GET_MSG_CNT 0x7017 + +#define HNS3_CAE_DESC_DATA_LEN 6 + +struct hns3_cae_desc { + __le16 opcode; + +#define HNS3_CAE_CMDQ_RX_INVLD_B 0 +#define HNS3_CAE_CMDQ_RX_OUTVLD_B 1 + + __le16 flag; + __le16 retval; + __le16 rsv; + __le32 data[HNS3_CAE_DESC_DATA_LEN]; +}; + +int hns3_cae_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc, int num); +void hns3_cae_cmd_setup_basic_desc(struct hclge_desc *desc, + enum hclge_opcode_type opcode, bool is_read); +void hns3_cae_cmd_reuse_desc(struct hclge_desc *desc, bool is_read); +struct hclge_vport *hns3_cae_get_vport(struct hnae3_handle *handle); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_dcb.c new file mode 100644 index 0000000000000000000000000000000000000000..e3cfa5cfea223c50ec2c426758c744f4cf585ff5 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_dcb.c @@ -0,0 +1,406 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include +#include +#include +#include +#include + +#include "hnae3.h" +#include "hclge_main.h" +#include "hns3_enet.h" +#include "hclge_tm.h" +#include "hclge_cmd.h" +#include "hns3_cae_cmd.h" +#include "hns3_cae_dcb.h" +#define FUNKY_BUF_ERR -1 +#define MAX_DEV_LISTED 20 + +struct hns3_cae_dcb_info dcb_all_info[MAX_DEV_LISTED]; + +static int check_and_set_curr_dev(const struct hns3_nic_priv *net_priv) +{ + int i; + + for (i = 0; i < MAX_DEV_LISTED; i++) { + if (!dcb_all_info[i].net_priv) { + dcb_all_info[i].net_priv = net_priv; + break; + } else if (dcb_all_info[i].net_priv == net_priv) { + break; + } + } + if (i == MAX_DEV_LISTED) + return FUNKY_BUF_ERR; + return i; +} + +int hns3_cae_dcb_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, void *buf_out, + u32 out_size) +{ + struct hns3_cae_dcb_cfg_param *out_info = + (struct hns3_cae_dcb_cfg_param *)buf_out; + struct hns3_cae_dcb_cfg_param *in_info = + (struct hns3_cae_dcb_cfg_param *)buf_in; + bool check = !buf_in || in_size < sizeof(struct hns3_cae_dcb_cfg_param); + int curr_dev_idx; + + if (check) { + pr_err("input param buf_in error in %s function\n", __func__); + return -EFAULT; + } + + curr_dev_idx = check_and_set_curr_dev(net_priv); + if (curr_dev_idx < 0) { + pr_err("Exceed MAX_DEV_LISTED: %d\n", MAX_DEV_LISTED); + return -1; + } + if (in_info->is_read) { + check = !buf_out || + out_size < sizeof(struct hns3_cae_dcb_cfg_param); + if (check) { + pr_err("input param buf_out error in %s function\n", + __func__); + return -EFAULT; + } + out_info->dcb_en = + dcb_all_info[curr_dev_idx].dcb_cfg_info.dcb_en; + } else { + if (in_info->cfg_flag & HNS3_CAE_DCB_DCB_CFG_FLAG) + dcb_all_info[curr_dev_idx].dcb_cfg_info.dcb_en = + in_info->dcb_en; + } + + return 0; +} + +static int hns3_cae_cfg_pfc_en(u8 is_read, struct hclge_dev *hdev, + struct hns3_cae_pfc_cfg_param *info, int dev_idx) +{ + struct hclge_desc desc; + int ret; + + hns3_cae_cmd_setup_basic_desc(&desc, + HNS3_CAE_OPC_CFG_PFC_PAUSE_EN, true); + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + pr_err("read pfc enable status fail!ret = %d\n", ret); + return ret; + } + if (is_read) { + info->prien = ((desc.data[0] & 0xff00) >> 8); + info->pfc_en = ((desc.data[0] & 0x3) == 0x3); + } else { + hns3_cae_cmd_reuse_desc(&desc, false); + if (info->cfg_flag & HNS3_CAE_PFC_EN_CFG_FLAG) { + desc.data[0] = (desc.data[0] & (~0x3)) | + (info->pfc_en << 0) | + (info->pfc_en << 1); + dcb_all_info[dev_idx].pfc_cfg_info.pfc_en = + info->pfc_en; + } + if (info->cfg_flag & HNS3_CAE_PFC_PRIEN_CFG_FLAG) { + desc.data[0] = (desc.data[0] & (~0xff00)) | + (info->prien << 8); + dcb_all_info[dev_idx].pfc_cfg_info.prien = + info->prien; + } + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + pr_err("set pfc cmd return fail!ret = %d\n", ret); + return ret; + } + } + + return ret; +} + +static int hns3_cae_cfg_pause_param(struct hclge_dev *hdev, + struct hns3_cae_pfc_cfg_param *info, + u8 is_read) +{ + struct hclge_desc desc; + int ret; + + hns3_cae_cmd_setup_basic_desc(&desc, + HNS3_CAE_OPC_CFG_PAUSE_PARAM, true); + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + pr_err("pause param cfg cmd send fail\n"); + return ret; + } + + if (is_read) { + info->pause_time = desc.data[2] & 0xffff; + info->pause_gap = (desc.data[1] & 0xff0000) >> 16; + return 0; + } + + if (info->cfg_flag & HNS3_CAE_PFC_TIME_CFG_FLAG) + desc.data[2] = (desc.data[2] & (~0xffff)) | info->pause_time; + + if (info->cfg_flag & HNS3_CAE_PFC_GAP_CFG_FLAG) + desc.data[1] = (desc.data[1] & (~0xff0000)) | + (info->pause_gap << 16); + + hns3_cae_cmd_reuse_desc(&desc, false); + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "mac pause param cfg fail, ret = %d.\n", ret); + return ret; + } + return 0; +} + +int hns3_cae_dcb_pfc_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size) +{ + struct hns3_cae_pfc_cfg_param *out_info = + (struct hns3_cae_pfc_cfg_param *)buf_out; + struct hns3_cae_pfc_cfg_param *in_info = + (struct hns3_cae_pfc_cfg_param *)buf_in; + bool check = !buf_in || in_size < sizeof(struct hns3_cae_pfc_cfg_param); + struct hclge_vport *vport = NULL; + struct net_device *ndev = NULL; + struct hnae3_handle *h = NULL; + struct hclge_dev *hdev = NULL; + int curr_dev_idx; + int ret; + + if (check) { + pr_err("input param buf_in error in %s function\n", __func__); + return -EFAULT; + } + + curr_dev_idx = check_and_set_curr_dev(net_priv); + if (curr_dev_idx < 0) { + pr_err("Exceed MAX_DEV_LISTED: %d\n", MAX_DEV_LISTED); + return -EINVAL; + } + h = net_priv->ae_handle; + vport = hns3_cae_get_vport(h); + ndev = h->netdev; + hdev = vport->back; + + if (!in_info->is_read && + !dcb_all_info[curr_dev_idx].dcb_cfg_info.dcb_en) { + pr_err("please enable dcb cfg first!\n"); + return -EPERM; + } + + if (!hnae3_dev_dcb_supported(hdev) || vport->vport_id != 0) { + pr_err("this device doesn't support dcb!\n"); + return -EOPNOTSUPP; + } + + if (in_info->is_read) { + check = !buf_out || + out_size < sizeof(struct hns3_cae_pfc_cfg_param); + if (check) { + pr_err("input param buf_out error in %s function\n", + __func__); + return -EFAULT; + } + ret = hns3_cae_cfg_pfc_en(in_info->is_read, hdev, + out_info, curr_dev_idx); + if (ret) + return ret; + ret = hns3_cae_cfg_pause_param(hdev, out_info, true); + if (ret) + return ret; + } else { + struct ieee_pfc pfc = {0}; + + if (in_info->cfg_flag & HNS3_CAE_PFC_PRIEN_CFG_FLAG) { + pfc.pfc_en = in_info->prien; + dcb_all_info[curr_dev_idx].pfc_cfg_info.prien = + in_info->prien; + if (ndev->dcbnl_ops->ieee_setpfc) { + ret = ndev->dcbnl_ops->ieee_setpfc(ndev, &pfc); + if (ret) + return ret; + } + } + + if ((in_info->cfg_flag & HNS3_CAE_PFC_TIME_CFG_FLAG) || + (in_info->cfg_flag & HNS3_CAE_PFC_GAP_CFG_FLAG)) { + ret = hns3_cae_cfg_pause_param(hdev, in_info, false); + if (ret) + return ret; + } + } + + return 0; +} + +static void hns3_cae_disable_ets_cfg(struct hclge_dev *hdev, + struct ieee_ets *ets, int dev_idx) +{ + u8 percent = 0; + int i; + + for (i = 0; i < HNS3_CAE_ETS_MAC_TC_NUM; i++) { + ets->prio_tc[i] = hdev->tm_info.prio_tc[i]; + ets->tc_tsa[i] = IEEE_8021QAZ_TSA_ETS; + dcb_all_info[dev_idx].ets_cfg_info.schedule[i] = 0; + } + for (i = 0; i < hdev->tm_info.num_tc; i++) { + ets->tc_tx_bw[i] = 100 / hdev->tm_info.num_tc; + dcb_all_info[dev_idx].ets_cfg_info.bw[i] = + ets->tc_tx_bw[i]; + percent += ets->tc_tx_bw[i]; + } + if (percent != 100) { + ets->tc_tx_bw[i - 1] += (100 - percent); + dcb_all_info[dev_idx].ets_cfg_info.bw[i - 1] = + ets->tc_tx_bw[i - 1]; + } +} + +static void hns3_cae_enable_ets_cfg(struct hclge_dev *hdev, + struct ieee_ets *ets, + struct hns3_cae_ets_cfg_param *info, + int dev_idx) +{ + int i; + + if (info->cfg_flag & HNS3_CAE_ETS_UP2TC_CFG_FLAG) { + for (i = 0; i < HNS3_CAE_ETS_MAC_TC_NUM; i++) { + ets->prio_tc[i] = info->up2tc[i]; + dcb_all_info[dev_idx].ets_cfg_info.up2tc[i] = + info->up2tc[i]; + } + } else { + for (i = 0; i < HNS3_CAE_ETS_MAC_TC_NUM; i++) + ets->prio_tc[i] = hdev->tm_info.prio_tc[i]; + } + + if (info->cfg_flag & HNS3_CAE_ETS_BANDWIDTH_CFG_FLAG) { + for (i = 0; i < HNS3_CAE_ETS_MAC_TC_NUM; i++) { + ets->tc_tx_bw[i] = info->bw[i]; + dcb_all_info[dev_idx].ets_cfg_info.bw[i] = + info->bw[i]; + } + } else { + for (i = 0; i < HNS3_CAE_ETS_MAC_TC_NUM; i++) + ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i]; + } + + if (info->cfg_flag & HNS3_CAE_ETS_SCHEDULE_CFG_FLAG) { + for (i = 0; i < HNS3_CAE_ETS_MAC_TC_NUM; i++) { + ets->tc_tsa[i] = info->schedule[i] ? + IEEE_8021QAZ_TSA_STRICT : IEEE_8021QAZ_TSA_ETS; + dcb_all_info[dev_idx].ets_cfg_info.schedule[i] = + info->schedule[i]; + } + } else { + for (i = 0; i < HNS3_CAE_ETS_MAC_TC_NUM; i++) + ets->tc_tsa[i] = hdev->tm_info.tc_info[i].tc_sch_mode ? + IEEE_8021QAZ_TSA_ETS : IEEE_8021QAZ_TSA_STRICT; + } +} + +int hns3_cae_dcb_ets_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size) +{ +#define BYTELEN 4 + struct hns3_cae_ets_cfg_param *out_info = + (struct hns3_cae_ets_cfg_param *)buf_out; + struct hns3_cae_ets_cfg_param *in_info = + (struct hns3_cae_ets_cfg_param *)buf_in; + bool check = !buf_in || + in_size < sizeof(struct hns3_cae_ets_cfg_param) || + !buf_out || + out_size < sizeof(struct hns3_cae_ets_cfg_param); + struct hclge_vport *vport = NULL; + struct net_device *ndev = NULL; + struct hclge_dev *hdev = NULL; + struct hnae3_handle *h = NULL; + struct hclge_desc desc; + int curr_dev_idx; + int ret; + u32 i; + + if (check) { + pr_err("input parameter error in %s function\n", __func__); + return -EFAULT; + } + + curr_dev_idx = check_and_set_curr_dev(net_priv); + if (curr_dev_idx < 0) { + pr_err("Exceed MAX_DEV_LISTED: %d\n", MAX_DEV_LISTED); + return -EINVAL; + } + h = net_priv->ae_handle; + vport = hns3_cae_get_vport(h); + ndev = h->netdev; + hdev = vport->back; + + if (!in_info->is_read && + !dcb_all_info[curr_dev_idx].dcb_cfg_info.dcb_en) { + pr_err("please enable dcb cfg first!\n"); + return -EPERM; + } + + if (!hnae3_dev_dcb_supported(hdev) || vport->vport_id != 0) { + pr_err("this device doesn't support dcb!\n"); + return -EOPNOTSUPP; + } + + if (in_info->is_read) { + hns3_cae_cmd_setup_basic_desc(&desc, + HNS3_CAE_OPC_PRI_TO_TC_MAPPING, + true); + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + pr_err("read up2tc mapping fail!\n"); + return ret; + } + out_info->ets_en = + dcb_all_info[curr_dev_idx].ets_cfg_info.ets_en; + for (i = 0; i < HNS3_CAE_ETS_MAC_TC_NUM; i++) { + out_info->up2tc[i] = + (desc.data[0] & (0xfU << (BYTELEN * i))) >> + (BYTELEN * i); + dcb_all_info[curr_dev_idx].ets_cfg_info.up2tc[i] = + out_info->up2tc[i]; + out_info->bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i]; + dcb_all_info[curr_dev_idx].ets_cfg_info.bw[i] = + hdev->tm_info.pg_info[0].tc_dwrr[i]; + out_info->schedule[i] = + !hdev->tm_info.tc_info[i].tc_sch_mode; + dcb_all_info[curr_dev_idx].ets_cfg_info.schedule[i] = + !hdev->tm_info.tc_info[i].tc_sch_mode; + } + } else { + struct ieee_ets ets = {0}; + + if (in_info->cfg_flag & HNS3_CAE_ETS_EN_CFG_FLAG) + dcb_all_info[curr_dev_idx].ets_cfg_info.ets_en = + in_info->ets_en; + + if (!dcb_all_info[curr_dev_idx].ets_cfg_info.ets_en) + hns3_cae_disable_ets_cfg(hdev, &ets, curr_dev_idx); + else + hns3_cae_enable_ets_cfg(hdev, &ets, in_info, + curr_dev_idx); + + if (ndev->dcbnl_ops->ieee_setets) { + ret = ndev->dcbnl_ops->ieee_setets(ndev, &ets); + if (ret) + return ret; + } + + out_info->cfg_flag = in_info->cfg_flag; + out_info->is_read = in_info->is_read; + out_info->ets_en = + dcb_all_info[curr_dev_idx].ets_cfg_info.ets_en; + } + + return 0; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_dcb.h b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_dcb.h new file mode 100644 index 0000000000000000000000000000000000000000..f6466dba3729667304bdb217cfb6a2d80f2a0cb3 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_dcb.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2019 Hisilicon Limited. */ + +#ifndef __HNS3_CAE_DCB_H__ +#define __HNS3_CAE_DCB_H__ + +#define HNS3_CAE_OPC_CFG_MAC_PAUSE_EN 0x0701 +#define HNS3_CAE_OPC_CFG_PFC_PAUSE_EN 0x0702 +#define HNS3_CAE_OPC_CFG_PAUSE_PARAM 0x0703 +#define HNS3_CAE_OPC_PRI_TO_TC_MAPPING 0x0709 +#define HNS3_CAE_OPC_TM_PRI_WEIGHT 0x080b + +#define HNS3_CAE_DCB_DCB_CFG_FLAG 0x1 + +#define HNS3_CAE_ETS_EN_CFG_FLAG 0x1 +#define HNS3_CAE_ETS_UP2TC_CFG_FLAG 0x2 +#define HNS3_CAE_ETS_BANDWIDTH_CFG_FLAG 0x4 +#define HNS3_CAE_ETS_SCHEDULE_CFG_FLAG 0x8 + +#define HNS3_CAE_ETS_MAC_TC_NUM 8 + +#define HNS3_CAE_PFC_EN_CFG_FLAG 0x1 +#define HNS3_CAE_PFC_PRIEN_CFG_FLAG 0x2 +#define HNS3_CAE_PFC_TIME_CFG_FLAG 0x4 +#define HNS3_CAE_PFC_GAP_CFG_FLAG 0x8 + +#define HNS3_CAE_PFC_MAC_PRI 8 + +struct hns3_cae_pfc_cfg_param { + u8 is_read; + u8 cfg_flag; + u8 pfc_en; + u8 prien; + u16 pause_time; + u8 pause_gap; +}; + +struct hns3_cae_dcb_cfg_param { + u8 is_read; + u8 cfg_flag; + u8 dcb_en; +}; + +struct hns3_cae_ets_cfg_param { + u8 is_read; + u8 cfg_flag; + u8 ets_en; + u8 up2tc[HNS3_CAE_PFC_MAC_PRI]; + u8 bw[HNS3_CAE_ETS_MAC_TC_NUM]; + u8 schedule[HNS3_CAE_ETS_MAC_TC_NUM]; +}; + +struct hns3_cae_dcb_info { + const struct hns3_nic_priv *net_priv; + struct hns3_cae_pfc_cfg_param pfc_cfg_info; + struct hns3_cae_dcb_cfg_param dcb_cfg_info; + struct hns3_cae_ets_cfg_param ets_cfg_info; +}; + +#ifdef CONFIG_HNS3_DCB +int hns3_cae_dcb_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, void *buf_out, + u32 out_size); +int hns3_cae_dcb_ets_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size); +int hns3_cae_dcb_pfc_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size); +#else +static inline int hns3_cae_dcb_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, void *buf_out, + u32 out_size) +{ + return -EOPNOTSUPP; +} + +static inline int hns3_cae_dcb_ets_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size) +{ + return -EOPNOTSUPP; +} + +static inline int hns3_cae_dcb_pfc_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size) +{ + return -EOPNOTSUPP; +} + +#endif + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_dcqcn.c b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_dcqcn.c new file mode 100644 index 0000000000000000000000000000000000000000..fb7aafc7ea33c4a101e0d1cd293d511beb9f3c91 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_dcqcn.c @@ -0,0 +1,259 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include +#include +#include +#include +#include + +#include "hns3_enet.h" +#include "hclge_cmd.h" +#include "hclge_main.h" +#include "hns3_cae_cmd.h" +#include "hns3_cae_dcqcn.h" + +static int hns3_dcqcn_rw(const struct hns3_nic_priv *net_priv, + u32 offset, u32 *data, u32 rw_type) +{ + struct hnae3_handle *h = net_priv->ae_handle; + struct hclge_vport *vport = NULL; + struct hclge_dev *hdev = NULL; + struct hclge_desc desc; + int ret; + + if (!data) + return -EFAULT; + + vport = container_of(h, struct hclge_vport, nic); + hdev = vport->back; + + if (rw_type == DEVMEM_CFG_READ) { + hns3_cae_cmd_setup_basic_desc(&desc, + HCLGE_OPC_DCQCN_TEMPLATE_CFG, + true); + } else { + hns3_cae_cmd_setup_basic_desc(&desc, + HCLGE_OPC_DCQCN_TEMPLATE_CFG, + false); + desc.data[2] = *data; + } + + desc.data[0] = SCC_TEMP_LOW_ADDR + offset; + desc.data[1] = SCC_TEMP_HIGH_ADDR; + desc.data[4] = 32; + + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "disable net lane failed %d\n", ret); + return ret; + } + + if (rw_type == DEVMEM_CFG_READ) + *data = desc.data[2]; + + return 0; +} + +int hns3_nic_dcqcn(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, void *buf_out, u32 out_size) +{ +#define SCC_TEMP_CFG0 0x6000 +#define SCC_TEMP_CFG1 0x6004 +#define SCC_TEMP_CFG2 0x6008 +#define SCC_TEMP_CFG3 0x600c + struct hnae3_handle *h = net_priv->ae_handle; + struct hclge_vport *vport = container_of(h, struct hclge_vport, nic); + bool check = !buf_in || in_size < sizeof(struct cfg_dcqcn_param); + struct cfg_dcqcn_param *parm_out = buf_out; + struct cfg_dcqcn_param *parm_in = buf_in; + struct cfg_dcqcn_param tempbuffer = {0}; + struct hclge_dev *hdev = vport->back; + u32 tempoutbuff; + u32 offset; + int ret; + + if (check) { + pr_err("input param buf_in error in %s function\n", __func__); + return -EFAULT; + } + + if (!hnae3_dev_roce_supported(hdev)) { + dev_err(&hdev->pdev->dev, "This device is not support RoCE!\n"); + return -EINVAL; + } + + tempoutbuff = 0; + if (parm_in->device_number > 0xff) { + dev_err(&hdev->pdev->dev, + "parm_in->device_number=0x%x, max value is 0xff.\n", + parm_in->device_number); + return -ENXIO; + } + offset = 0x10 * parm_in->device_number + SCC_TEMP_CFG0; + ret = hns3_dcqcn_rw(net_priv, offset, (u32 *)&tempoutbuff, + DEVMEM_CFG_READ); + if (ret) { + dev_err(&hdev->pdev->dev, + "read dcqcn cfg 0~31 bit failed 0x%x\n", ret); + return ret; + } + tempbuffer.ai = (tempoutbuff & 0xffff); + tempbuffer.f = ((tempoutbuff >> 16) & 0xff); + tempbuffer.tkp = (tempoutbuff >> 24); + + offset = offset + 0x4; + ret = hns3_dcqcn_rw(net_priv, offset, (u32 *)&tempoutbuff, + DEVMEM_CFG_READ); + if (ret) { + dev_err(&hdev->pdev->dev, + "read dcqcn cfg 32~63 bit failed ret = 0x%x\n", ret); + return ret; + } + tempbuffer.tmp = (tempoutbuff & 0xffff); + tempbuffer.alp = (tempoutbuff >> 16); + + offset = offset + 0x4; + ret = hns3_dcqcn_rw(net_priv, offset, (u32 *)&tempoutbuff, + DEVMEM_CFG_READ); + if (ret) { + dev_err(&hdev->pdev->dev, + "read dcqcn cfg 64~95 bit failed ret = 0x%x\n", ret); + return ret; + } + tempbuffer.max_speed = tempoutbuff; + + offset = offset + 0x4; + ret = hns3_dcqcn_rw(net_priv, offset, (u32 *)&tempoutbuff, + DEVMEM_CFG_READ); + if (ret) { + dev_err(&hdev->pdev->dev, + "read dcqcn cfg 96~127 bit failed ret = 0x%x\n", ret); + return ret; + } + tempbuffer.g = (tempoutbuff & 0xff); + tempbuffer.al = ((tempoutbuff >> 8) & 0xff); + tempbuffer.cnp_time = ((tempoutbuff >> 16) & 0xff); + tempbuffer.alp_shift = ((tempoutbuff >> 24) & 0xff); + + if (parm_in->is_get == HIARM_DCQCN_WRITE_CFG_MODE) { + if ((parm_in->dcqcn_parm_opcode & 0x1) == 1) + tempbuffer.ai = parm_in->ai; + if ((parm_in->dcqcn_parm_opcode & 0x2) == 0x2) + tempbuffer.f = parm_in->f; + if ((parm_in->dcqcn_parm_opcode & 0x4) == 0x4) + tempbuffer.tkp = parm_in->tkp; + if ((parm_in->dcqcn_parm_opcode & 0x8) == 0x8) + tempbuffer.tmp = parm_in->tmp; + if ((parm_in->dcqcn_parm_opcode & 0x10) == 0x10) + tempbuffer.alp = parm_in->alp; + if ((parm_in->dcqcn_parm_opcode & 0x20) == 0x20) + tempbuffer.g = parm_in->g; + if ((parm_in->dcqcn_parm_opcode & 0x40) == 0x40) + tempbuffer.al = parm_in->al; + if ((parm_in->dcqcn_parm_opcode & 0x80) == 0x80) + tempbuffer.max_speed = parm_in->max_speed; + if ((parm_in->dcqcn_parm_opcode & 0x100) == 0x100) + tempbuffer.cnp_time = parm_in->cnp_time; + if ((parm_in->dcqcn_parm_opcode & 0x200) == 0x200) + tempbuffer.alp_shift = parm_in->alp_shift; + + ret = hns3_dcqcn_rw(net_priv, + 0x10 * parm_in->device_number + + SCC_TEMP_CFG0, + (u32 *)&tempbuffer.ai, DEVMEM_CFG_WRITE); + if (ret) { + dev_err(&hdev->pdev->dev, + "write dcqcn cfg 0~31 bit failed ret = 0x%x\n", + ret); + return ret; + } + ret = hns3_dcqcn_rw(net_priv, + 0x10 * parm_in->device_number + + SCC_TEMP_CFG1, + (u32 *)&tempbuffer.tmp, DEVMEM_CFG_WRITE); + if (ret) { + dev_err(&hdev->pdev->dev, + "write dcqcn cfg 32~63 bit failed ret = 0x%x\n", + ret); + return ret; + } + ret = hns3_dcqcn_rw(net_priv, + 0x10 * parm_in->device_number + + SCC_TEMP_CFG2, + (u32 *)&tempbuffer.max_speed, + DEVMEM_CFG_WRITE); + if (ret) { + dev_err(&hdev->pdev->dev, + "write dcqcn cfg 64~95 bit failed ret = 0x%x\n", + ret); + return ret; + } + ret = hns3_dcqcn_rw(net_priv, + 0x10 * parm_in->device_number + + SCC_TEMP_CFG3, + (u32 *)&tempbuffer.g, DEVMEM_CFG_WRITE); + if (ret) { + dev_err(&hdev->pdev->dev, + "write dcqcn cfg 96~127 bit failed ret = 0x%x\n", + ret); + return ret; + } + } else if (parm_in->is_get == HIARM_DCQCN_READ_CFG_MODE) { + check = !buf_out || out_size < sizeof(struct cfg_dcqcn_param); + if (check) { + pr_err("input param buf_out error in %s function\n", + __func__); + return -EFAULT; + } + parm_out->ai = tempbuffer.ai; + parm_out->f = tempbuffer.f; + parm_out->tkp = tempbuffer.tkp; + parm_out->tmp = tempbuffer.tmp; + parm_out->alp = tempbuffer.alp; + parm_out->max_speed = tempbuffer.max_speed; + parm_out->g = tempbuffer.g; + parm_out->al = tempbuffer.al; + parm_out->cnp_time = tempbuffer.cnp_time; + parm_out->alp_shift = tempbuffer.alp_shift; + } else { + dev_err(&hdev->pdev->dev, + "parm->is_get = 0x%x parm is error type\n", + parm_in->is_get); + } + + return 0; +} + +int hns3_dcqcn_get_msg_cnt(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size) +{ + struct hnae3_handle *h = net_priv->ae_handle; + struct hclge_vport *vport = container_of(h, struct hclge_vport, nic); + struct dcqcn_statistic_param *statistic_parm_out = buf_out; + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + bool check = !buf_out || + out_size < sizeof(struct dcqcn_statistic_param); + int ret; + + if (check) { + pr_err("input param buf_out error in %s function\n", __func__); + return -EFAULT; + } + + hns3_cae_cmd_setup_basic_desc(&desc, HCLGE_OPC_DCQCN_GET_MSG_CNT, true); + + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "disable net lane failed %d\n", ret); + return ret; + } + + statistic_parm_out->dcqcn_rx_cnt = desc.data[0]; + statistic_parm_out->dcqcn_tx_cnt = desc.data[2]; + statistic_parm_out->dcqcn_db_cnt = desc.data[4]; + + return 0; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_dcqcn.h b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_dcqcn.h new file mode 100644 index 0000000000000000000000000000000000000000..435f2221b46f811b21656dd3adcc6b6443594eab --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_dcqcn.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2019 Hisilicon Limited. */ + +#ifndef __HNS3_CAE_DCQCN_H__ +#define __HNS3_CAE_DCQCN_H__ + +enum { + DCQCN_MASK_AI = 0x0, + DCQCN_MASK_F, + DCQCN_MASK_TKP, + DCQCN_MASK_TMP, + DCQCN_MASK_ALP, + DCQCN_MASK_G, + DCQCN_MASK_AL, + DCQCN_MASK_MAX_SPEED, + DCQCN_MASK_CNP_TIME, + DCQCN_MASK_ALP_SHIFT, +}; + +enum DEVMEM_RW_TYPE { + DEVMEM_CFG_WRITE = 0, + DEVMEM_CFG_READ, +}; + +struct cfg_dcqcn_param { + u16 ai; + u8 f; + u8 tkp; + u16 tmp; + u16 alp; + u32 max_speed; + u8 g; + u8 al; + u8 cnp_time; + u8 alp_shift; + u16 dcqcn_parm_opcode; + u16 is_get; + u32 device_number; +}; + +struct dcqcn_statistic_param { + u32 dcqcn_rx_cnt; + u32 dcqcn_tx_cnt; + u32 dcqcn_db_cnt; + u32 dcqcn_statistic_enable; +}; + +#define SCC_TEMP_LOW_ADDR 0x11000000 +#define SCC_TEMP_HIGH_ADDR 0x0 + +#define HIARM_DCQCN_READ_CFG_MODE 30 +#define HIARM_DCQCN_WRITE_CFG_MODE 31 + +int hns3_nic_dcqcn(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, void *buf_out, + u32 out_size); +int hns3_dcqcn_get_msg_cnt(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_dfx.c b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_dfx.c new file mode 100644 index 0000000000000000000000000000000000000000..23c36077a183e7f819b3ef3f7fd0123a4ba13a8f --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_dfx.c @@ -0,0 +1,221 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "hclge_cmd.h" +#include "hnae3.h" +#include "hclge_main.h" +#include "hns3_enet.h" +#include "hns3_cae_cmd.h" +#include "hns3_cae_dfx.h" + +static int hns3_cae_operate_nic_regs(struct hclge_dev *hdev, + struct hns3_cae_reg_param *info) +{ + struct hclge_desc desc; + int ret; + + if (info->is_read) { + hns3_cae_cmd_setup_basic_desc(&desc, + OPC_WRITE_READ_REG_CMD, true); + desc.data[0] = (u32)(info->addr & 0xffffffff); + desc.data[1] = (u32)(info->addr >> 32); + desc.data[4] = (u32)info->bit_width; + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "read addr 0x%llx failed! ret = %d.\n", + info->addr, ret); + return ret; + } + info->value = (u64)desc.data[2] | ((u64)desc.data[3] << 32); + } else { + hns3_cae_cmd_setup_basic_desc(&desc, OPC_WRITE_READ_REG_CMD, + false); + desc.data[0] = (u32)(info->addr & 0xffffffff); + desc.data[1] = (u32)(info->addr >> 32); + desc.data[2] = (u32)(info->value & 0xffffffff); + desc.data[3] = (u32)(info->value >> 32); + desc.data[4] = (u32)info->bit_width; + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "write addr 0x%llx value 0x%llx failed! ret = %d.\n", + info->addr, info->value, ret); + return ret; + } + } + + return 0; +} + +static int hns3_cae_get_chip_and_mac_id(struct hnae3_handle *handle, + u32 *chip_id, u32 *mac_id) +{ +#define HNS3_CAE_GET_CHIP_MAC_ID_CMD 0x7003 + struct hclge_vport *vport = hns3_cae_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + int ret; + + hns3_cae_cmd_setup_basic_desc(&desc, + HNS3_CAE_GET_CHIP_MAC_ID_CMD, true); + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "get chip id and mac id failed %d\n", + ret); + return ret; + } + *chip_id = desc.data[0]; + *mac_id = desc.data[1]; + + return 0; +} + +int hns3_cae_get_dfx_info(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size) +{ +#define HNS3_CAE_MAC_MODE_ADDR 0x10000000U +#define HNS3_CAE_MAC_MAP_ADDR 0x10000008U + struct hns3_cae_dfx_param *out_info = + (struct hns3_cae_dfx_param *)buf_out; + struct hns3_cae_reg_param reg_info; + struct hnae3_handle *handle = NULL; + struct hclge_vport *vport = NULL; + struct hclge_dev *hdev = NULL; + u32 chip_id; + u32 mac_id; + bool check = !buf_out || out_size < sizeof(struct hns3_cae_dfx_param); + int ret; + u32 i; + + if (check) { + pr_err("input param buf_out error in %s function\n", __func__); + return -EFAULT; + } + + handle = net_priv->ae_handle; + vport = hns3_cae_get_vport(handle); + hdev = vport->back; + + ret = hns3_cae_get_chip_and_mac_id(handle, &chip_id, &mac_id); + if (ret) + return ret; + out_info->chip_id = (u8)chip_id; + out_info->mac_id = (u8)mac_id; + out_info->func_id = (u8)hdev->pdev->devfn; + out_info->is_cs_board = (handle->pdev->revision > 0x20) ? true : false; + reg_info.addr = HNS3_CAE_MAC_MODE_ADDR; + reg_info.bit_width = HNS3_CAE_BITWIDTH_32BIT; + reg_info.is_read = true; + ret = hns3_cae_operate_nic_regs(hdev, ®_info); + if (ret) { + dev_err(&hdev->pdev->dev, + "read chip%d's work mode failed!\n", chip_id); + return ret; + } + out_info->work_mode = reg_info.value; + reg_info.addr = HNS3_CAE_MAC_MAP_ADDR; + reg_info.bit_width = HNS3_CAE_BITWIDTH_64BIT; + reg_info.is_read = true; + ret = hns3_cae_operate_nic_regs(hdev, ®_info); + if (ret) { + dev_err(&hdev->pdev->dev, "read mac's map info failed!\n"); + return ret; + } + for (i = 0; i < HNS3_CAE_MAX_MAC_NUMBER; i++) + out_info->mac_used |= ((reg_info.value >> (i * 8)) & 0xff); + + return 0; +} + +int hns3_cae_read_dfx_info(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size) +{ + struct hns3_cae_reg_param *out_info = + (struct hns3_cae_reg_param *)buf_out; + struct hns3_cae_reg_param *in_info = + (struct hns3_cae_reg_param *)buf_in; + struct hclge_vport *vport = NULL; + struct hclge_dev *hdev = NULL; + bool check = !buf_in || in_size < sizeof(struct hns3_cae_reg_param); + int ret; + + if (check) { + pr_err("input param buf_in error in %s function\n", __func__); + return -EFAULT; + } + + vport = hns3_cae_get_vport(net_priv->ae_handle); + hdev = vport->back; + + if (in_info->is_read) { + check = !buf_out || + out_size < sizeof(struct hns3_cae_reg_param); + if (check) { + pr_err("input param buf_out error in %s function\n", + __func__); + return -EFAULT; + } + out_info->addr = in_info->addr; + out_info->is_read = true; + out_info->bit_width = in_info->bit_width; + + ret = hns3_cae_operate_nic_regs(hdev, out_info); + if (ret) + return ret; + } else { + ret = hns3_cae_operate_nic_regs(hdev, in_info); + if (ret) + return ret; + } + + return 0; +} + +int hns3_cae_event_injection(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size) +{ + struct hns3_cae_event_param *in_info = + (struct hns3_cae_event_param *)buf_in; + struct hns3_cae_reg_param reg_info; + struct hclge_vport *vport = NULL; + struct hclge_dev *hdev = NULL; + bool check = !buf_in || in_size < sizeof(struct hns3_cae_event_param); + int ret; + + if (check) { + pr_err("input param buf_in error in %s function\n", __func__); + return -EFAULT; + } + + vport = hns3_cae_get_vport(net_priv->ae_handle); + hdev = vport->back; + + reg_info.addr = in_info->addr; + reg_info.bit_width = HNS3_CAE_BITWIDTH_32BIT; + reg_info.is_read = false; + reg_info.value = in_info->value; + dev_info(&hdev->pdev->dev, + "Injection event: %s start.\n", in_info->event_name); + ret = hns3_cae_operate_nic_regs(hdev, ®_info); + if (ret) { + dev_err(&hdev->pdev->dev, "Injection event error!\n"); + return ret; + } + dev_info(&hdev->pdev->dev, + "Injection event: %s end.\n", in_info->event_name); + + return ret; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_dfx.h b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_dfx.h new file mode 100644 index 0000000000000000000000000000000000000000..d39b5da63c0ae42b157521965237b46dc940f88f --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_dfx.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2019 Hisilicon Limited. */ + +#ifndef __HNS3_CAE_DFX_H +#define __HNS3_CAE_DFX_H + +#define OPC_WRITE_READ_REG_CMD 0x7014 + +struct hns3_cae_reg_param { + u8 is_read; + u8 bit_width; + u64 value; + u64 addr; +}; + +struct hns3_cae_dfx_param { + u8 is_cs_board; + u8 work_mode; + u8 mac_used; + u8 chip_id; + u8 mac_id; + u8 func_id; +}; + +#define HNS3_CAE_EVENT_NAME_LEN 32 + +struct hns3_cae_event_param { + u8 event_name[HNS3_CAE_EVENT_NAME_LEN]; + u64 value; + u64 addr; +}; + +#define HNS3_READ_INFO_FLAG 0x1 +#define HNS3_READ_REGS_FLAG 0x2 + +#define HNS3_CAE_MAX_MAC_NUMBER 0x8 + +#define HNS3_CAE_BITWIDTH_32BIT 32 +#define HNS3_CAE_BITWIDTH_64BIT 64 + +int hns3_cae_get_dfx_info(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size); +int hns3_cae_read_dfx_info(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size); +int hns3_cae_event_injection(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_fd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_fd.c new file mode 100644 index 0000000000000000000000000000000000000000..21e034153808e2f31683586e1ae11661f1145b75 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_fd.c @@ -0,0 +1,351 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hclge_cmd.h" +#include "hnae3.h" +#include "hclge_main.h" +#include "hns3_enet.h" +#include "hns3_cae_cmd.h" +#include "hns3_cae_fd.h" + +static int hns3_cae_send_generic_cmd(struct hclge_dev *hdev, u8 *buf_in, + u32 in_size, u8 *buf_out, u32 out_size) +{ + struct fd_param *param = (struct fd_param *)buf_in; + struct hclge_get_fd_mode_cmd *mode_cfg = NULL; + struct hclge_get_fd_mode_cmd *req = NULL; + struct hclge_desc desc; + bool check = false; + int ret; + + hns3_cae_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, + param->is_read ? true : false); + + req = (struct hclge_get_fd_mode_cmd *)desc.data; + if (!param->is_read) { + mode_cfg = (struct hclge_get_fd_mode_cmd *)param->data; + req->mode = mode_cfg->mode; + req->enable = mode_cfg->enable; + } + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "set fd mode fail, ret = %d\n", ret); + return ret; + } + + if (param->is_read) { + check = !buf_out || + out_size < sizeof(struct hclge_get_fd_mode_cmd); + if (check) { + pr_err("input param buf_out error in %s function\n", + __func__); + return -EFAULT; + } + + mode_cfg = (struct hclge_get_fd_mode_cmd *)buf_out; + mode_cfg->mode = req->mode; + mode_cfg->enable = req->enable; + } + + return 0; +} + +static int hns3_cae_send_allocate_cmd(struct hclge_dev *hdev, u8 *buf_in, + u32 in_size, u8 *buf_out, u32 out_size) +{ + struct hclge_get_fd_allocation_cmd *allocation_cfg = NULL; + struct hclge_get_fd_allocation_cmd *req = NULL; + struct hclge_desc desc; + bool check = !buf_out || + out_size < sizeof(struct hclge_get_fd_allocation_cmd); + int ret; + + if (check) { + pr_err("input param buf_out error in %s function\n", __func__); + return -EFAULT; + } + + allocation_cfg = (struct hclge_get_fd_allocation_cmd *)buf_out; + + hns3_cae_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true); + + req = (struct hclge_get_fd_allocation_cmd *)desc.data; + + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "query fd allocation fail, ret = %d\n", + ret); + return ret; + } + + allocation_cfg->stage1_entry_num = req->stage1_entry_num; + allocation_cfg->stage2_entry_num = req->stage2_entry_num; + allocation_cfg->stage1_counter_num = req->stage1_counter_num; + allocation_cfg->stage2_counter_num = req->stage2_counter_num; + + return 0; +} + +static int hns3_cae_send_key_cfg_cmd(struct hclge_dev *hdev, u8 *buf_in, + u32 in_size, u8 *buf_out, u32 out_size) +{ + struct fd_param *param = (struct fd_param *)buf_in; + struct hclge_set_fd_key_config_cmd *key_cfg_data = NULL; + struct hclge_set_fd_key_config_cmd *req = NULL; + struct hclge_desc desc; + bool check = false; + int ret; + + hns3_cae_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, + param->is_read ? true : false); + + req = (struct hclge_set_fd_key_config_cmd *)desc.data; + req->stage = param->stage; + if (!param->is_read) { + key_cfg_data = + (struct hclge_set_fd_key_config_cmd *)param->data; + req->key_select = key_cfg_data->key_select; + req->inner_sipv6_word_en = key_cfg_data->inner_sipv6_word_en; + req->inner_dipv6_word_en = key_cfg_data->inner_dipv6_word_en; + req->outer_sipv6_word_en = key_cfg_data->outer_sipv6_word_en; + req->outer_dipv6_word_en = key_cfg_data->outer_dipv6_word_en; + req->tuple_mask = key_cfg_data->tuple_mask; + req->meta_data_mask = key_cfg_data->meta_data_mask; + } + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "set fd key fail, ret = %d\n", ret); + return ret; + } + + if (param->is_read) { + check = !buf_out || + out_size < sizeof(struct hclge_set_fd_key_config_cmd); + if (check) { + pr_err("input parameter error in %s function\n", + __func__); + return -EFAULT; + } + key_cfg_data = (struct hclge_set_fd_key_config_cmd *)buf_out; + key_cfg_data->key_select = req->key_select; + key_cfg_data->inner_sipv6_word_en = req->inner_sipv6_word_en; + key_cfg_data->inner_dipv6_word_en = req->inner_dipv6_word_en; + key_cfg_data->outer_sipv6_word_en = req->outer_sipv6_word_en; + key_cfg_data->outer_dipv6_word_en = req->outer_dipv6_word_en; + key_cfg_data->tuple_mask = req->tuple_mask; + key_cfg_data->meta_data_mask = req->meta_data_mask; + } + + return 0; +} + +static int hns3_cae_send_tcam_op_cmd(struct hclge_dev *hdev, u8 *buf_in, + u32 in_size, u8 *buf_out, u32 out_size) +{ +#define HNS3_CAE_FD_TCAM_BD_NUM 3 + struct fd_param *param = (struct fd_param *)buf_in; + struct hclge_desc desc[HNS3_CAE_FD_TCAM_BD_NUM]; + struct hclge_fd_tcam_config_1_cmd *req1 = NULL; + struct hclge_fd_tcam_config_2_cmd *req2 = NULL; + struct hclge_fd_tcam_config_3_cmd *req3 = NULL; + struct hclge_fd_tcam_data *tcam_data = NULL; + struct hclge_desc *pdesc = NULL; + bool check = false; + u8 *buf = NULL; + int ret; + int i; + + for (i = 0; i < HNS3_CAE_FD_TCAM_BD_NUM; i++) { + pdesc = &desc[i]; + hns3_cae_cmd_setup_basic_desc(pdesc, HCLGE_OPC_FD_TCAM_OP, + param->is_read ? true : false); + if (i < HNS3_CAE_FD_TCAM_BD_NUM - 1) + pdesc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + } + + req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data; + req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data; + req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data; + + req1->stage = param->stage; + req1->xy_sel = param->xy_sel; + req1->index = param->idx; + + if (!param->is_read) { + req1->entry_vld = param->entry_vld; + tcam_data = (struct hclge_fd_tcam_data *)param->data; + buf = tcam_data->tcam_data; + memcpy(req1->tcam_data, buf, sizeof(req1->tcam_data)); + buf += sizeof(req1->tcam_data); + memcpy(req2->tcam_data, buf, sizeof(req2->tcam_data)); + buf += sizeof(req2->tcam_data); + memcpy(req3->tcam_data, buf, sizeof(req3->tcam_data)); + } + + ret = hns3_cae_cmd_send(hdev, desc, HNS3_CAE_FD_TCAM_BD_NUM); + if (ret) { + dev_err(&hdev->pdev->dev, + "config tcam key fail, ret = %d\n", ret); + + return ret; + } + + if (param->is_read) { + check = !buf_out || + out_size < sizeof(struct hclge_fd_tcam_data); + if (check) { + pr_err("input param buf_out error in %s function\n", + __func__); + return -EFAULT; + } + + tcam_data = (struct hclge_fd_tcam_data *)buf_out; + tcam_data->vld = req1->entry_vld; + buf = tcam_data->tcam_data; + memcpy(buf, req1->tcam_data, sizeof(req1->tcam_data)); + buf += sizeof(req1->tcam_data); + memcpy(buf, req2->tcam_data, sizeof(req2->tcam_data)); + buf += sizeof(req2->tcam_data); + memcpy(buf, req3->tcam_data, sizeof(req3->tcam_data)); + } + + return 0; +} + +static int hns3_cae_send_ad_op_cmd(struct hclge_dev *hdev, u8 *buf_in, + u32 in_size, u8 *buf_out, u32 out_size) +{ + struct fd_param *param = (struct fd_param *)buf_in; + struct hclge_fd_ad_config_cmd *ad_data = NULL; + struct hclge_fd_ad_config_cmd *req = NULL; + struct hclge_desc desc; + bool check = false; + int ret; + + hns3_cae_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, + param->is_read ? true : false); + req = (struct hclge_fd_ad_config_cmd *)desc.data; + req->stage = param->stage; + req->index = param->idx; + + if (!param->is_read) { + ad_data = (struct hclge_fd_ad_config_cmd *)param->data; + memcpy(&req->ad_data, &ad_data->ad_data, sizeof(req->ad_data)); + } + + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "fd ad config fail, ret = %d\n", ret); + return ret; + } + + if (param->is_read) { + check = !buf_out || + out_size < sizeof(struct hclge_fd_ad_config_cmd); + if (check) { + pr_err("input param buf_out error in %s function\n", + __func__); + return -EFAULT; + } + + ad_data = (struct hclge_fd_ad_config_cmd *)buf_out; + memcpy(&ad_data->ad_data, &req->ad_data, sizeof(req->ad_data)); + } + + return 0; +} + +static int hns3_cae_send_cnt_op_cmd(struct hclge_dev *hdev, u8 *buf_in, + u32 in_size, u8 *buf_out, u32 out_size) +{ + struct fd_param *param = (struct fd_param *)buf_in; + struct hclge_fd_cnt_op_cmd *cnt_data = NULL; + struct hclge_fd_cnt_op_cmd *req = NULL; + struct hclge_desc desc; + bool check = !buf_out || out_size < sizeof(struct hclge_fd_cnt_op_cmd); + int ret; + + hns3_cae_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_CNT_OP, true); + req = (struct hclge_fd_cnt_op_cmd *)desc.data; + req->stage = param->stage; + req->cnt_idx = param->idx; + + if (check) { + pr_err("input param buf_out error in %s function\n", __func__); + return -EFAULT; + } + + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "read fd cnt fail, ret = %d\n", ret); + return ret; + } + + cnt_data = (struct hclge_fd_cnt_op_cmd *)buf_out; + memcpy(&cnt_data->cnt_value, &req->cnt_value, sizeof(req->cnt_value)); + + return 0; +} + +int hns3_cae_fd_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, void *buf_out, + u32 out_size) +{ + bool check = !buf_in || in_size < sizeof(struct fd_param); + struct hnae3_handle *handle = net_priv->ae_handle; + struct hclge_vport *vport = hns3_cae_get_vport(handle); + struct fd_param *param = (struct fd_param *)buf_in; + struct hclge_dev *hdev = vport->back; + int ret = -1; + + if (check) { + pr_err("input param buf_in error in %s function\n", __func__); + return -EFAULT; + } + + if (!hnae3_dev_fd_supported(hdev)) + return -EOPNOTSUPP; + + if (param->op == HCLGE_OPC_FD_MODE_CTRL) { + ret = hns3_cae_send_generic_cmd(hdev, buf_in, in_size, + buf_out, out_size); + } + + if (param->op == HCLGE_OPC_FD_GET_ALLOCATION) { + ret = hns3_cae_send_allocate_cmd(hdev, buf_in, in_size, + buf_out, out_size); + } + + if (param->op == HCLGE_OPC_FD_KEY_CONFIG) { + ret = hns3_cae_send_key_cfg_cmd(hdev, buf_in, in_size, + buf_out, out_size); + } + + if (param->op == HCLGE_OPC_FD_TCAM_OP) { + ret = hns3_cae_send_tcam_op_cmd(hdev, buf_in, in_size, + buf_out, out_size); + } + + if (param->op == HCLGE_OPC_FD_AD_OP) { + ret = hns3_cae_send_ad_op_cmd(hdev, buf_in, in_size, + buf_out, out_size); + } + + if (param->op == HCLGE_OPC_FD_CNT_OP) { + ret = hns3_cae_send_cnt_op_cmd(hdev, buf_in, in_size, + buf_out, out_size); + } + + return ret; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_fd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_fd.h new file mode 100644 index 0000000000000000000000000000000000000000..9e2053bc049a7c5d789552bfe19176eeeb4041b5 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_fd.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2019 Hisilicon Limited. */ + +#ifndef __HNS3_CAE_FD_H__ +#define __HNS3_CAE_FD_H__ + +#define HCLGE_OPC_FD_CNT_OP 0x1205 + +struct fd_param { + u8 is_read; + u8 stage; + u16 op; + u8 xy_sel; + __le32 idx; + u8 entry_vld; + u8 data[128]; + +}; + +struct hclge_fd_cnt_op_cmd { + u8 stage; + u8 rsv1[3]; + __le16 cnt_idx; + u8 rsv2[2]; + __le64 cnt_value; + u8 rsv3[8]; +}; + +struct hclge_fd_tcam_data { + u8 vld; + u8 tcam_data[52]; +}; + +int hns3_cae_fd_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, void *buf_out, + u32 out_size); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_gro.c b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_gro.c new file mode 100644 index 0000000000000000000000000000000000000000..afda7fe876d788d07e349b63ea5e93333cd431ad --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_gro.c @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include +#include +#include +#include +#include + +#include "hns3_enet.h" +#include "hns3_cae_cmd.h" +#include "hns3_cae_gro.h" + +int hns3_gro_age_handle(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size) +{ + struct hnae3_handle *h = net_priv->ae_handle; + struct hns3_cae_gro_age_config_cmd *req = NULL; + struct hclge_vport *vport = NULL; + struct gro_param *param = NULL; + struct hclge_dev *hdev = NULL; + struct hclge_desc desc; + bool check = !buf_in || in_size < sizeof(struct gro_param); + int ret; + + if (check) { + pr_err("input param buf_in error in %s function\n", __func__); + return -EFAULT; + } + + vport = container_of(h, struct hclge_vport, nic); + param = (struct gro_param *)buf_in; + hdev = vport->back; + + hns3_cae_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_AGE_CFG, + param->is_read ? true : false); + req = (struct hns3_cae_gro_age_config_cmd *)desc.data; + + if (!param->is_read) + req->ppu_gro_age_cnt = param->age_cnt; + + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "gro age config fail, ret = %d\n", + ret); + return ret; + } + + if (param->is_read) { + if (!buf_out || out_size < sizeof(u32)) { + pr_err("input param buf_out error in %s function\n", + __func__); + return -EFAULT; + } + memcpy(buf_out, &req->ppu_gro_age_cnt, + sizeof(req->ppu_gro_age_cnt)); + } + + return 0; +} + +int hns3_gro_dump_bd_buff_size(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, void *buf_out, + u32 out_size) +{ + struct hclge_vport *vport = NULL; + struct hnae3_handle *h = NULL; + struct hclge_dev *hdev = NULL; + + if (!buf_out || out_size < sizeof(u16)) { + pr_err("input param buf_out error in %s function\n", + __func__); + return -EFAULT; + } + + h = net_priv->ae_handle; + vport = container_of(h, struct hclge_vport, nic); + hdev = vport->back; + + memcpy(buf_out, &hdev->rx_buf_len, sizeof(hdev->rx_buf_len)); + + return 0; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_gro.h b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_gro.h new file mode 100644 index 0000000000000000000000000000000000000000..49a9548ba735db378490828124e3b187ac9579bf --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_gro.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2019 Hisilicon Limited. */ + +#ifndef __HNS3_CAE_GRO_H__ +#define __HNS3_CAE_GRO_H__ + +#define GRO_AGE_RESV_LEN 20 + +struct hns3_cae_gro_age_config_cmd { + u32 ppu_gro_age_cnt; + u8 rsv[GRO_AGE_RESV_LEN]; +}; + +struct gro_param { + u8 is_read; + u32 age_cnt; +}; + +int hns3_gro_age_handle(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size); +int hns3_gro_dump_bd_buff_size(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, void *buf_out, + u32 out_size); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_hilink_param.c b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_hilink_param.c new file mode 100644 index 0000000000000000000000000000000000000000..6aa003ff0a2c6dc8e5a4fdbac9b082c0b097d5b3 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_hilink_param.c @@ -0,0 +1,223 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include +#include + +#include "hclge_cmd.h" +#include "hnae3.h" +#include "hclge_main.h" +#include "hns3_enet.h" +#include "hns3_cae_cmd.h" +#include "hns3_cae_hilink_param.h" + +#define HILINK_PARAM_CMD_BD_LEN 10UL +#define HILINK_PARAM_SINGLE_PORT_LANE_NUM 4 + +static void copy_data_from_cmd(u8 *dest, u32 dest_len, u8 *src, u32 src_len) +{ + u32 cpy_len; + + cpy_len = dest_len >= src_len ? src_len : dest_len; + memcpy(dest, src, cpy_len); +} + +static int hns3_get_hilink_ctle(struct hclge_dev *hdev, + u32 lane_start, u32 lane_len, + struct hns3_hilink_param *hns3_param_out) +{ + struct hclge_desc ctle_desc[HILINK_LANE_MAX_NUM] = {0}; + u8 *ctle_data = NULL; + u32 bd_num; + int ret; + u32 i; + + for (i = 0; i < HILINK_PARAM_CMD_BD_LEN; i++) { + hns3_cae_cmd_setup_basic_desc(&ctle_desc[i], + HCLGE_OPC_DUMP_CTLE_PARAM, true); + if (i == 0) + ctle_desc[0].data[0] = lane_start | (lane_len << 4); + + if (i < HILINK_PARAM_CMD_BD_LEN - 1) + ctle_desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + else + ctle_desc[i].flag &= + ~(cpu_to_le16(HCLGE_CMD_FLAG_NEXT)); + } + + ret = hns3_cae_cmd_send(hdev, ctle_desc, HILINK_PARAM_CMD_BD_LEN); + if (ret) { + dev_err(&hdev->pdev->dev, "get hilink param cmd failed %d\n", + ret); + return ret; + } + + hns3_param_out->lane_start = ctle_desc[0].data[0] & 0xF; + hns3_param_out->lane_len = (ctle_desc[0].data[0] >> 4) & 0xF; + if (hns3_param_out->lane_len > HILINK_LANE_MAX_NUM) + hns3_param_out->lane_len = HILINK_LANE_MAX_NUM; + + bd_num = min_t(u32, hns3_param_out->lane_len, HILINK_PARAM_CMD_BD_LEN); + for (i = 0; i < bd_num; i++) { + ctle_data = (u8 *)&ctle_desc[i].data[0]; + if (i == 0) { + ctle_data = ctle_data + 1; + copy_data_from_cmd((u8 *)&hns3_param_out->ctle_param[i], + sizeof(struct hns3_ctle_data), + ctle_data, 23); + } else { + copy_data_from_cmd((u8 *)&hns3_param_out->ctle_param[i], + sizeof(struct hns3_ctle_data), + ctle_data, 24); + } + } + return ret; +} + +static int hns3_get_hilink_dfe(struct hclge_dev *hdev, + u32 lane_start, u32 lane_len, + struct hns3_hilink_param *hns3_param_out) +{ + struct hclge_desc dfe_desc[HILINK_LANE_MAX_NUM] = {0}; + u8 *dfe_data = NULL; + u32 bd_num; + int ret; + u32 i; + + for (i = 0; i < HILINK_PARAM_CMD_BD_LEN; i++) { + hns3_cae_cmd_setup_basic_desc(&dfe_desc[i], + HCLGE_OPC_DUMP_DFE_PARAM, true); + if (i == 0) + dfe_desc[0].data[0] = lane_start | (lane_len << 4); + + if (i < HILINK_PARAM_CMD_BD_LEN - 1) + dfe_desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + else + dfe_desc[i].flag &= ~(cpu_to_le16(HCLGE_CMD_FLAG_NEXT)); + } + + ret = hns3_cae_cmd_send(hdev, dfe_desc, HILINK_PARAM_CMD_BD_LEN); + if (ret) { + dev_err(&hdev->pdev->dev, "get hilink param cmd failed %d\n", + ret); + return ret; + } + + bd_num = min_t(u32, hns3_param_out->lane_len, HILINK_PARAM_CMD_BD_LEN); + for (i = 0; i < bd_num; i++) { + dfe_data = (u8 *)&dfe_desc[i].data[0]; + if (i == 0) { + dfe_data = dfe_data + 1; + copy_data_from_cmd((u8 *)&hns3_param_out->dfe_param[i], + sizeof(struct hns3_dfe_data), + dfe_data, 23); + } else { + copy_data_from_cmd((u8 *)&hns3_param_out->dfe_param[i], + sizeof(struct hns3_dfe_data), + dfe_data, 24); + } + } + + return ret; +} + +static int hns3_get_hilink_ffe(struct hclge_dev *hdev, + u32 lane_start, u32 lane_len, + struct hns3_hilink_param *hns3_param_out) +{ + struct hclge_desc ffe_desc[HILINK_LANE_MAX_NUM] = {0}; + u8 *ffe_data = NULL; + u32 bd_num; + int ret; + u32 i; + + for (i = 0; i < HILINK_PARAM_CMD_BD_LEN; i++) { + hns3_cae_cmd_setup_basic_desc(&ffe_desc[i], + HCLGE_OPC_DUMP_FFE_PARAM, true); + if (i == 0) + ffe_desc[0].data[0] = lane_start | (lane_len << 4); + + if (i < HILINK_PARAM_CMD_BD_LEN - 1) + ffe_desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + else + ffe_desc[i].flag &= ~(cpu_to_le16(HCLGE_CMD_FLAG_NEXT)); + } + + ret = hns3_cae_cmd_send(hdev, ffe_desc, HILINK_PARAM_CMD_BD_LEN); + if (ret) { + dev_err(&hdev->pdev->dev, "get hilink param cmd failed %d\n", + ret); + return ret; + } + + bd_num = min_t(u32, hns3_param_out->lane_len, HILINK_PARAM_CMD_BD_LEN); + for (i = 0; i < bd_num; i++) { + ffe_data = (u8 *)&ffe_desc[i].data[0]; + if (i == 0) { + ffe_data = ffe_data + 1; + copy_data_from_cmd((u8 *)&hns3_param_out->ffe_param[i], + sizeof(struct hns3_ffe_data), + ffe_data, 23); + } else { + copy_data_from_cmd((u8 *)&hns3_param_out->ffe_param[i], + sizeof(struct hns3_ffe_data), + ffe_data, 24); + } + } + + return ret; +} + +int hns3_get_hilink_param(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size) +{ + struct hnae3_handle *handle = hns3_get_handle(net_priv->netdev); + struct hclge_vport *vport = hns3_cae_get_vport(handle); + struct hns3_hilink_param *hns3_param_out = + (struct hns3_hilink_param *)buf_out; + struct hns3_hilink_param *hns3_param_in = + (struct hns3_hilink_param *)buf_in; + struct hclge_dev *hdev = vport->back; + bool check = !buf_in || in_size < sizeof(struct hns3_hilink_param) || + !buf_out || out_size < sizeof(struct hns3_hilink_param); + int ret; + + if (check) { + pr_err("input parameter error in %s function\n", __func__); + return -EFAULT; + } + + memset(hns3_param_out->ctle_param, 0x0, + sizeof(hns3_param_out->ctle_param)); + memset(hns3_param_out->dfe_param, 0x0, + sizeof(hns3_param_out->dfe_param)); + memset(hns3_param_out->ffe_param, 0x0, + sizeof(hns3_param_out->ffe_param)); + + ret = hns3_get_hilink_ctle(hdev, hns3_param_in->lane_start, + hns3_param_in->lane_len, hns3_param_out); + if (ret) { + dev_err(&hdev->pdev->dev, "get hilink ctle cmd failed %d\n", + ret); + return ret; + } + + ret = hns3_get_hilink_dfe(hdev, hns3_param_in->lane_start, + hns3_param_in->lane_len, hns3_param_out); + if (ret) { + dev_err(&hdev->pdev->dev, "get hilink dfe cmd failed %d\n", + ret); + return ret; + } + + ret = hns3_get_hilink_ffe(hdev, hns3_param_in->lane_start, + hns3_param_in->lane_len, hns3_param_out); + if (ret) { + dev_err(&hdev->pdev->dev, "get hilink ffe cmd failed %d\n", + ret); + return ret; + } + + return ret; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_hilink_param.h b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_hilink_param.h new file mode 100644 index 0000000000000000000000000000000000000000..507311ccdf0cf40040259525f7ee04bda3021bb3 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_hilink_param.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2019 Hisilicon Limited. */ + +#ifndef __HNS3_CAE_HILINK_PARAM_H__ +#define __HNS3_CAE_HILINK_PARAM_H__ + +#define HILINK_LANE_MAX_NUM 10 + +#define HCLGE_OPC_DUMP_CTLE_PARAM 0x0382 +#define HCLGE_OPC_DUMP_DFE_PARAM 0x0383 +#define HCLGE_OPC_DUMP_FFE_PARAM 0x0384 + +struct hns3_ctle_data { + u8 ctlebst[3]; + u8 ctlecmband[3]; + u8 ctlermband[3]; + u8 ctleza[3]; + u8 ctlesqh[3]; + u8 ctleactgn[3]; + u8 ctlepassgn; + u8 ctlerefsel; + u8 ctleibiastune; + u8 alos; + u8 lpbk; +}; + +struct hns3_dfe_data { + u8 dfefxtap[10]; /* DFE Fix Tap */ + u8 floatingtap[6]; /* DFE Floating Taps */ +}; + +struct hns3_ffe_data { + u8 pre2; + u8 pre1; + u8 main; + u8 post1; + u8 post2; +}; + +struct hns3_hilink_param { + u32 lane_start; + u32 lane_len; + struct hns3_ctle_data ctle_param[HILINK_LANE_MAX_NUM]; + struct hns3_dfe_data dfe_param[HILINK_LANE_MAX_NUM]; + struct hns3_ffe_data ffe_param[HILINK_LANE_MAX_NUM]; +}; + +int hns3_get_hilink_param(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_init.c b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_init.c new file mode 100644 index 0000000000000000000000000000000000000000..b84c85913d465ba4f1c155997955b49e837d041e --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_init.c @@ -0,0 +1,536 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2019 Hisilicon Limited. + +#include + +#ifdef CONFIG_HNS3_TEST +#include +#include +#include +#include +#include +#include +#include + +#include "hnae3.h" +#include "hns3_enet.h" +#include "hclge_cmd.h" +#include "hclge_main.h" +#include "hns3_cae_lib.h" +#include "hns3_cae_cmd.h" +#include "hns3_cae_tm.h" +#include "hns3_cae_dcb.h" +#include "hns3_cae_mac.h" +#include "hns3_cae_dfx.h" +#include "hns3_cae_vlan.h" +#include "hns3_cae_qos.h" +#include "hns3_cae_qinfo.h" +#include "hns3_cae_promisc.h" +#include "hns3_cae_fd.h" +#include "hns3_cae_rss.h" +#include "hns3_cae_qres.h" +#include "hns3_cae_stat.h" +#include "hns3_cae_irq.h" +#include "hns3_cae_pfc_storm.h" +#include "hns3_cae_xsfp.h" +#include "hns3_cae_port.h" +#include "hns3_cae_hilink_param.h" +#include "hns3_cae_version.h" +#include "hns3_cae_checksum.h" +#include "hns3_cae_dcqcn.h" +#include "hns3_cae_reset.h" +#include "hns3_cae_gro.h" +#include "hns3_cae_led.h" + +#define MAX_MSG_OUT_SIZE (1024U * 2048U) +#define MAX_MSG_IN_SIZE (1024U * 2048U) + +static dev_t g_dev_id = {0}; + +struct class *g_hns3_cae_class; +struct cdev g_hns3_cae_cdev; +static const char hns3_driver_name[] = "hns3"; + +int g_hns3_cae_init_flag; +int g_hns3_cae_ref_cnt; + +typedef int (*driv_module) (const struct hns3_nic_priv *nic_dev, + void *buf_in, u32 in_size, void *buf_out, + u32 out_size); + +struct drv_module_handle { + enum driver_cmd_type driv_cmd_name; + driv_module driv_func; +}; + +static void free_buff_in(void *buf_in) +{ + if (!buf_in) + return; + + kfree(buf_in); +} + +static int alloc_buff_in(struct msg_module *nt_msg, u32 in_size, void **buf_in) +{ + void *msg_buf = NULL; + + if (!in_size) + return 0; + + if (in_size > MAX_MSG_IN_SIZE) { + pr_err("msg in size(%u) more than %u\n", + in_size, MAX_MSG_IN_SIZE); + return -ENOMEM; + } + + msg_buf = kzalloc((unsigned long)in_size, GFP_KERNEL); + *buf_in = msg_buf; + if (ZERO_OR_NULL_PTR(*buf_in)) { + pr_err("alloc buf_in failed\n"); + return -ENOMEM; + } + + if (copy_from_user(msg_buf, nt_msg->in_buff, (unsigned long)in_size)) { + pr_err("Copy from user failed in %s function\n", __func__); + kfree(msg_buf); + return -EFAULT; + } + + return 0; +} + +static void free_buff_out(void *buf_out) +{ + if (!buf_out) + return; + + kfree(buf_out); +} + +static int alloc_buff_out(u32 out_size, void **buf_out) +{ + if (!out_size) + return 0; + + if (out_size > MAX_MSG_OUT_SIZE) { + pr_err("msg out size(%u) more than %u\n", + out_size, MAX_MSG_OUT_SIZE); + return -ENOMEM; + } + + *buf_out = kzalloc((unsigned long)out_size, GFP_KERNEL); + if (ZERO_OR_NULL_PTR(*buf_out)) { + pr_err("alloc buf_out failed\n"); + return -ENOMEM; + } + + return 0; +} + +static int copy_buf_out_to_user(struct msg_module *nt_msg, u32 out_size, + void **buf_out) +{ + int ret = 0; + void *msg_out = buf_out; + + if (copy_to_user(nt_msg->out_buf, msg_out, out_size)) + ret = -EFAULT; + + return ret; +} + +static int hns3_cae_netdev_match_check(struct net_device *netdev) +{ + struct ethtool_drvinfo drv_info = {0}; + + if (netdev->ethtool_ops->get_drvinfo) + netdev->ethtool_ops->get_drvinfo(netdev, &drv_info); + + if (!strncmp(drv_info.driver, hns3_driver_name, + strlen(hns3_driver_name) + 1)) + return 0; + + netdev_err(netdev, "match hns3 driver name(%s) failed\n", + drv_info.driver); + return -1; +} + +#if (KERNEL_VERSION(4, 16, 0) < LINUX_VERSION_CODE) +static int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg) +{ + mm_segment_t oldfs = get_fs(); + int err; + + set_fs(KERNEL_DS); + err = sock->ops->ioctl(sock, cmd, arg); + set_fs(oldfs); + + return err; +} +#endif + +static struct net_device *get_netdev_by_ifname(char *ifname) +{ + struct socket *temp_sock = NULL; + struct net_device *netdev = NULL; + struct ifreq ifr; + int err; + + err = sock_create(PF_INET, SOCK_DGRAM, 0, &temp_sock); + if (err < 0) { + pr_err("fail to enter sock_create, err = %d\n", err); + return NULL; + } + + strncpy(ifr.ifr_ifrn.ifrn_name, ifname, (unsigned long)IFNAMSIZ); + kernel_sock_ioctl(temp_sock, SIOCSIFNAME, (unsigned long)&ifr); + netdev = dev_get_by_name(sock_net(temp_sock->sk), ifname); + if (!netdev) + goto out; + + dev_put(netdev); + +out: + sock_release(temp_sock); + return netdev; +} + +static int hns3_cae_k_get_netdev_by_ifname(char *ifname, + struct hns3_nic_priv **nic_dev) +{ + struct net_device *netdev = NULL; + + netdev = get_netdev_by_ifname(ifname); + if (!netdev) { + pr_err("not find the netdevice(%s)!\n", ifname); + return -EFAULT; + } + + if (hns3_cae_netdev_match_check(netdev)) { + netdev_err(netdev, "netdevice is not hns device.\n"); + return -EFAULT; + } + + *nic_dev = (struct hns3_nic_priv *)netdev_priv(netdev); + if (!(*nic_dev)) { + netdev_err(netdev, "no private data\n"); + return -EFAULT; + } + + return 0; +} + +struct drv_module_handle driv_module_cmd_handle[] = { + {FW_VER, hns3_cae_get_fw_ver}, + {DRIVER_VER, hns3_cae_get_driver_ver}, + {CHECKSUM_CFG, hns3_cae_chs_cfg}, + {TM_QUEUE_CFG, hns3_cae_queue_cfg}, + {TM_QSET_CFG, hns3_cae_qs_cfg}, + {TM_PRI_CFG, hns3_cae_pri_cfg}, + {TM_PG_CFG, hns3_cae_pg_cfg}, + {TM_PORT_CFG, hns3_cae_port_cfg}, + {TM_ETS_CFG, hns3_cae_ets_cfg}, + {DCB_MODE_CFG, hns3_cae_dcb_cfg}, + {ETS_MODE_CFG, hns3_cae_dcb_ets_cfg}, + {PFC_MODE_CFG, hns3_cae_dcb_pfc_cfg}, + {MAC_LOOP_CFG, hns3_cae_mac_loop_cfg}, + {DFX_INFO_CMD, hns3_cae_get_dfx_info}, + {DFX_READ_CMD, hns3_cae_read_dfx_info}, + {EVENT_INJECTION_CMD, hns3_cae_event_injection}, + {RX_PRIV_BUFF_WL_CFG, hns3_cae_rx_priv_buff_wl_cfg}, + {RX_COMMON_THRD_CFG, hns3_cae_common_thrd_cfg}, + {RX_COMMON_WL_CFG, hns3_cae_common_wl_cfg}, + {SHOW_RX_PRIV_WL, hns3_cae_show_rx_priv_wl}, + {SHOW_RX_COMM_THRES, hns3_cae_show_comm_thres}, + {QCN_EN_CFG, hns3_cae_qcn_cfg}, + {RX_BUFF_CFG, hns3_cae_rx_buff_cfg}, + {TX_BUFF_CFG, hns3_cae_tx_buff_cfg}, + {RESET_CFG, hns3_cae_nic_reset}, + {TIMEOUT_CFG, hns3_cae_nic_timeout_cfg}, + {GET_BD_BUFF_SIZE, hns3_gro_dump_bd_buff_size}, + {PROMISC_MODE_CFG, hns3_promisc_mode_cfg}, + {QINFO_CFG, hns3_cae_qinfo_cfg}, + {CLEAN_STATS, hns3_cae_clean_stats}, + {FD_CFG, hns3_cae_fd_cfg}, + {RSS_GENERIC_CFG, hns3_cae_rss_cfg}, + {COM_REG_CFG, hns3_cae_common_cmd_send}, + {GRO_CFG, hns3_gro_age_handle}, + {QRES_CFG, hns3_cae_qres_cfg}, + {STAT_CFG, hns3_stat_mode_cfg}, + {IRQ_CFG, hns3_irq_lli_cfg}, + {VLAN_UPMAPPING, hns3_cae_upmapping_cfg}, + {EXTERN_INTERFACE_CFG, hns3_cae_pfc_storm_cfg}, + {XSFP_CFG, hns3_xsfp_cfg}, + {SHOW_PORT_INFO, hns3_get_port_info}, + {SHOW_HILINK_PARAM, hns3_get_hilink_param}, + {DCQCN_PARM_CFG, hns3_nic_dcqcn}, + {DCQCN_GET_MSG_CNT_CMD, hns3_dcqcn_get_msg_cnt}, + {LED_CFG_NCL_INFO_CMD, hns3_led_cfg_ncl_info}, +}; + +static int send_to_driver(struct hns3_nic_priv *nic_dev, + struct msg_module *nt_msg, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size) +{ + u32 num_cmds = ARRAY_SIZE(driv_module_cmd_handle); + enum driver_cmd_type cmd_type = + (enum driver_cmd_type)(nt_msg->msg_formate); + driv_module fn; + int err = -EOPNOTSUPP; + u32 index; + + for (index = 0; index < num_cmds; index++) { + if (cmd_type == driv_module_cmd_handle[index].driv_cmd_name) { + fn = driv_module_cmd_handle[index].driv_func; + err = fn(nic_dev, buf_in, in_size, buf_out, out_size); + break; + } + } + + return err; +} + +static long hns3_cae_k_unlocked_ioctl(struct file *pfile, unsigned int cmd, + unsigned long arg) +{ + struct hns3_nic_priv *nic_dev = NULL; + struct msg_module nt_msg; + void *buf_out = NULL; + void *buf_in = NULL; + u32 out_size; + u32 in_size; + int cmd_raw; + int ret; + + memset(&nt_msg, 0, sizeof(nt_msg)); + + if (copy_from_user(&nt_msg, (void *)arg, sizeof(nt_msg))) { + pr_err("copy from user failed in unlocked_ioctl function\n"); + return -EFAULT; + } + + cmd_raw = nt_msg.module; + out_size = nt_msg.len_info.out_buff_len; + in_size = nt_msg.len_info.in_buff_len; + + ret = alloc_buff_in(&nt_msg, in_size, &buf_in); + if (ret) { + pr_err("alloc in buffer failed\n"); + return -EFAULT; + } + + ret = alloc_buff_out(out_size, &buf_out); + if (ret) { + pr_err("alloc out buffer failed\n"); + goto out_free_buf_in; + } + /** + * After decoupling with driver, the scenario of hns driver unregister + * must be considered. In this scenario, driver unregister may happened + * between hns3_cae_k_get_netdev_by_ifname and send_to_driver, + * which may cause access null pointer or other exception. + * When CONFIG_EXT_TEST was defined, we haven't decoupled the tool + * code yet, so we don't need lock. + */ + rtnl_lock(); + if (nt_msg.device_name[IFNAMSIZ - 1] != '\0') { + pr_err("the device name is invalid.\n"); + ret = -EINVAL; + goto out_invalid; + } + ret = hns3_cae_k_get_netdev_by_ifname(nt_msg.device_name, &nic_dev); + if (ret) { + pr_err("can not get the netdevice correctly\n"); + ret = -EINVAL; + goto out_invalid; + } + + if (nic_dev->ae_handle->flags & HNAE3_SUPPORT_VF) { + pr_err("VF is not supported.\n"); + ret = -EINVAL; + goto out_invalid; + } + + switch (cmd_raw) { + case SEND_TO_DRIVER: + ret = send_to_driver(nic_dev, &nt_msg, buf_in, in_size, buf_out, + out_size); + if (ret) { + pr_err("send buffer to driver failed, ret = %d\n", ret); + goto out_invalid; + } + break; + default: + pr_err("module err!\n"); + ret = -EINVAL; + goto out_invalid; + } + rtnl_unlock(); + ret = copy_buf_out_to_user(&nt_msg, out_size, buf_out); + if (ret) + pr_err("copy buf to user failed\n"); + goto out_free_buf_out; + +out_invalid: + rtnl_unlock(); +out_free_buf_out: + free_buff_out(buf_out); +out_free_buf_in: + free_buff_in(buf_in); + + return (long)ret; +} + +static int hns3_cae_k_open(struct inode *pnode, struct file *pfile) +{ + return 0; +} + +static ssize_t hns3_cae_k_read(struct file *pfile, char __user *ubuf, + size_t size, loff_t *ppos) +{ + pr_info("%s read *ppos:%lld size = %lu\n", __func__, *ppos, size); + return 0; +} + +static ssize_t hns3_cae_k_write(struct file *pfile, const char __user *ubuf, + size_t size, loff_t *ppos) +{ + pr_info("%s write *ppos:%lld size = %lu\n", __func__, *ppos, size); + return 0; +} + +static const struct file_operations fifo_operations = { + .owner = THIS_MODULE, + .open = hns3_cae_k_open, + .read = hns3_cae_k_read, + .write = hns3_cae_k_write, + .unlocked_ioctl = hns3_cae_k_unlocked_ioctl, +}; + +static int if_hns3_cae_exist(void) +{ + struct file *fp = NULL; + int exist; + + fp = filp_open("/dev/nic_dev", O_RDONLY, 0); + if (IS_ERR(fp)) { + exist = 0; + } else { + (void)filp_close(fp, NULL); + exist = 1; + } + + return exist; +} + +static int hns3_cae_k_init(void) +{ + int ret; + struct device *pdevice = NULL; + + if (g_hns3_cae_init_flag) { + g_hns3_cae_ref_cnt++; + return 0; + } + + if (if_hns3_cae_exist()) { + pr_info("dev/nic_dev is existed!\n"); + return 0; + } + + ret = alloc_chrdev_region(&g_dev_id, 0, 1, "nic_dev"); + if (ret < 0) { + pr_err("alloc_chrdev_region fail, ret = %d.\n", ret); + return ret; + } + + g_hns3_cae_class = class_create(THIS_MODULE, "nic_class"); + if (IS_ERR(g_hns3_cae_class)) { + pr_err("class create fail.\n"); + ret = -EFAULT; + goto class_create_err; + } + + cdev_init(&g_hns3_cae_cdev, &fifo_operations); + ret = cdev_add(&g_hns3_cae_cdev, g_dev_id, 1); + if (ret < 0) { + pr_err("cdev_add fail, ret = %d.\n", ret); + goto cdev_add_err; + } + + pdevice = device_create(g_hns3_cae_class, NULL, g_dev_id, NULL, + "nic_dev"); + if (IS_ERR(pdevice)) { + pr_err("device_create fail.\n"); + ret = -EPERM; + goto device_create_err; + } + + g_hns3_cae_init_flag = 1; + g_hns3_cae_ref_cnt = 1; + pr_info("register hns3_cae_dev to system, ok!\n"); + + return 0; + +device_create_err: + cdev_del(&g_hns3_cae_cdev); + +cdev_add_err: + class_destroy(g_hns3_cae_class); + +class_create_err: + g_hns3_cae_class = NULL; + unregister_chrdev_region(g_dev_id, 1); + + return ret; +} + +static void hns3_cae_k_uninit(void) +{ + if (g_hns3_cae_init_flag) { + if ((--g_hns3_cae_ref_cnt)) + return; + } + + if (!g_hns3_cae_class || IS_ERR(g_hns3_cae_class)) + return; + + cdev_del(&g_hns3_cae_cdev); + device_destroy(g_hns3_cae_class, g_dev_id); + class_destroy(g_hns3_cae_class); + g_hns3_cae_class = NULL; + unregister_chrdev_region(g_dev_id, 1); + pr_info("unregister hns3_cae_dev ok!\n"); +} +#endif + +static int __init hns3_cae_init(void) +{ +#ifdef CONFIG_HNS3_TEST + int ret; + + pr_err("%s enter!\n", __func__); + + ret = hns3_cae_k_init(); + if (ret) + return ret; +#endif + return 0; +} + +static void __exit hns3_cae_exit(void) +{ +#ifdef CONFIG_HNS3_TEST + pr_err("%s exit!\n", __func__); + hns3_cae_k_uninit(); +#endif +} + +module_init(hns3_cae_init); +module_exit(hns3_cae_exit); +MODULE_DESCRIPTION("HNS3 CAE Driver"); +MODULE_VERSION(HNS3_CAE_MOD_VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_irq.c b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_irq.c new file mode 100644 index 0000000000000000000000000000000000000000..27a37ef615336eed7f2b1d7542395c47f4b3e699 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_irq.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include +#include +#include +#include +#include +#include + +#include "hclge_cmd.h" +#include "hclge_main.h" +#include "hnae3.h" +#include "hns3_enet.h" +#include "hns3_cae_irq.h" + +struct hns3_irq_lli_param { + int is_get; + u8 computer_cpus; + u16 tqp_nums; +}; + +int hns3_irq_lli_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, void *buf_out, + u32 out_size) +{ + struct hns3_irq_lli_param *in_info = + (struct hns3_irq_lli_param *)buf_in; + struct hns3_irq_lli_param *out_info = + (struct hns3_irq_lli_param *)buf_out; + struct hnae3_handle *handle = NULL; + int is_get; + bool check = !buf_in || in_size < sizeof(struct hns3_irq_lli_param); + + if (check) { + pr_err("input param buf_in error in %s function\n", __func__); + return -EFAULT; + } + + handle = net_priv->ae_handle; + is_get = in_info->is_get; + + if (is_get) { + check = !buf_out || + out_size < sizeof(struct hns3_irq_lli_param); + if (check) { + pr_err("input param buf_out error in %s function\n", + __func__); + return -EFAULT; + } + out_info->computer_cpus = net_priv->vector_num; + out_info->tqp_nums = handle->kinfo.num_tqps; + } + + return 0; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_irq.h b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_irq.h new file mode 100644 index 0000000000000000000000000000000000000000..30b9806785eec852706afac837124defdb3269a5 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_irq.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2019 Hisilicon Limited. */ + +#ifndef __HNS3_CAE_IRQ_H__ +#define __HNS3_CAE_IRQ_H__ + +int hns3_irq_lli_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, void *buf_out, + u32 out_size); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_led.c b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_led.c new file mode 100644 index 0000000000000000000000000000000000000000..75edb143269c512b60bc8508c40a467e6f8b9090 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_led.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2019 Hisilicon Limited. + +#include + +#include "hnae3.h" +#include "hns3_enet.h" +#include "hns3_cae_cmd.h" +#include "hns3_cae_led.h" + +int hns3_led_cfg_ncl_info(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, void *buf_out, + u32 out_size) +{ + struct hnae3_handle *handle = hns3_get_handle(net_priv->netdev); + struct hclge_vport *vport = hns3_cae_get_vport(handle); + struct led_statistic_param *parm_out = buf_out; + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc = {0}; + bool check = !buf_out || out_size < sizeof(struct led_statistic_param); + int index; + int ret; + + if (check) { + pr_err("input param buf_out error in %s function\n", __func__); + return -EFAULT; + } + + hns3_cae_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_CFG_NCL_INFO, true); + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "send led command failed %d\n", ret); + return ret; + } + + for (index = 0; index < HCLGE_DESC_DATA_LEN; index++) + parm_out->data[index] = desc.data[index]; + + return 0; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_led.h b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_led.h new file mode 100644 index 0000000000000000000000000000000000000000..4a6a50be313c58298005494788b006c7d21f740a --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_led.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2019 Hisilicon Limited. */ + +#ifndef __HNS3_CAE_LED_H__ +#define __HNS3_CAE_LED_H__ + +#define HCLGE_OPC_LED_CFG_NCL_INFO 0x7021 + +struct led_statistic_param { + u32 data[6]; +}; + +int hns3_led_cfg_ncl_info(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, void *buf_out, + u32 out_size); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_lib.c b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_lib.c new file mode 100644 index 0000000000000000000000000000000000000000..d5d9b484ae26b9cb004b7152f7072e9a0191b3f3 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_lib.c @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + + +#include +#include +#include +#include +#include + +#include "hnae3.h" +#include "hclge_cmd.h" +#include "hclge_main.h" +#include "hns3_enet.h" +#include "hns3_cae_cmd.h" +#include "hns3_cae_lib.h" + +int hns3_cae_common_cmd_send(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, void *buf_out, + u32 out_size) +{ +#define MAX_DESC_DATA_LEN 6 + struct cmd_desc_param *param_in = (struct cmd_desc_param *)buf_in; + struct hclge_vport *vport = NULL; + struct hclge_dev *hdev = NULL; + struct hclge_desc desc; + bool check = !buf_in || in_size < sizeof(struct cmd_desc_param); + int ret; + int i; + + if (check) { + pr_err("input param buf_in error in %s function\n", __func__); + return -EFAULT; + } + + vport = hns3_cae_get_vport(net_priv->ae_handle); + hdev = vport->back; + + hns3_cae_cmd_setup_basic_desc(&desc, param_in->fw_dw_opcode, + param_in->is_read); + for (i = 0; i < MAX_DESC_DATA_LEN; i++) + desc.data[i] = param_in->reg_desc.data[i]; + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "%s, ret is %d.\n", __func__, + ret); + return ret; + } + if (param_in->is_read) { + struct cmd_desc_param *param_out = + (struct cmd_desc_param *)buf_out; + + check = !buf_out || out_size < sizeof(struct cmd_desc_param); + if (check) { + pr_err("input param buf_out error in %s function\n", + __func__); + return -EFAULT; + } + for (i = 0; i < MAX_DESC_DATA_LEN; i++) + param_out->reg_desc.data[i] = desc.data[i]; + } + + return 0; +} + diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_lib.h b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_lib.h new file mode 100644 index 0000000000000000000000000000000000000000..749bf401c5259ef00e03025930c5cf70e41c06ca --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_lib.h @@ -0,0 +1,141 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2019 Hisilicon Limited. */ + +#ifndef HNS3_CAE_LIB_H_ +#define HNS3_CAE_LIB_H_ + +#ifndef IFNAMSIZ +#define IFNAMSIZ 16 +#endif + +#define NIC_TOOL_MAGIC 'x' + +enum module_name { + SEND_TO_DRIVER = 1, +}; + +enum driver_cmd_type { + FW_VER = 1, + DRIVER_VER, + CHECKSUM_CFG, + RX_CS_STATISTICS_INFO, + CLEAN_STASTICS, + MAX_TSO_SIZE, + FUNC_TYPE, + + TM_QUEUE_CFG = 100, + TM_QSET_CFG, + TM_PRI_CFG, + TM_PG_CFG, + TM_PORT_CFG, + TM_ETS_CFG, + + DCB_MODE_CFG = 150, + ETS_MODE_CFG, + PFC_MODE_CFG, + + MAC_LOOP_CFG = 200, + + DFX_INFO_CMD = 250, + DFX_READ_CMD, + EVENT_INJECTION_CMD, + + SEND_PKT = 300, + + RX_PRIV_BUFF_WL_CFG = 400, + RX_COMMON_THRD_CFG, + RX_COMMON_WL_CFG, + MAC_PAUSE_EN_CFG, + PFC_PAUSE_EN_CFG, + MAC_PAUSE_PARAM_CFG, + SHOW_PAUSE_CFG, + SHOW_PRI_MAP_CFG, + SHOW_RX_PRIV_WL, + SHOW_RX_COMM_THRES, + TX_BUFF_CFG, + RX_BUFF_CFG, + SHOW_TX_QUEUE_TO_TC, + L2_PFC_CFG, + QCN_EN_CFG, + + RESET_CFG = 500, + TIMEOUT_CFG = 550, + + CLEAN_STATS = 600, + PROMISC_MODE_CFG = 700, + QINFO_CFG = 800, + + MACTABLE_CFG = 900, + + PHY_REGISTER_CFG = 1000, + FD_CFG, + + RSS_GENERIC_CFG, + REG_CFG, + COM_REG_CFG, + GRO_CFG, + LAMP_CFG, + M7_CMD_MODE_CFG, /* M7 cmd */ + GET_BD_BUFF_SIZE, + QRES_CFG = 1100, + STAT_CFG, + IRQ_CFG, + + VLAN_UPMAPPING = 1200, + + EXTERN_INTERFACE_CFG = 1300, + XSFP_CFG = 1400, + SHOW_PORT_INFO, + SHOW_HILINK_PARAM, + DCQCN_PARM_CFG = 1500, + DCQCN_GET_MSG_CNT_CMD = 1600, + LED_CFG_NCL_INFO_CMD +}; + +#ifndef LINUX_VERSION_CODE +#include +#else +#define KERNEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c)) +#endif + +struct cmd_desc { + u16 opcode; + u16 flag; + u16 retval; + u16 rsv; + u32 data[6]; +}; + +struct cmd_desc_param { + struct cmd_desc reg_desc; + u32 fw_dw_opcode; + u32 is_read; +}; + +#define API_CMD (0x1) +#define API_CHAIN (0x2) + +struct msg_module { + char device_name[IFNAMSIZ]; + unsigned int module; + u32 msg_formate; /* cmd type for driver */ + struct { + u32 in_buff_len; + u32 out_buff_len; + } len_info; + u32 res; + void *in_buff; + void *out_buf; +}; + +struct m7_cmd_para { + u32 bd_count; + u32 bd_type; + void *bd_data; +}; + +int hns3_cae_common_cmd_send(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, void *buf_out, + u32 out_size); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_mac.c b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_mac.c new file mode 100644 index 0000000000000000000000000000000000000000..acbb684cf43b3701e2eb019c9734c59b7b213e98 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_mac.c @@ -0,0 +1,166 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include +#include +#include +#include +#include + +#include "hclge_cmd.h" +#include "hnae3.h" +#include "hclge_main.h" +#include "hns3_enet.h" +#include "hns3_cae_cmd.h" +#include "hns3_cae_mac.h" + +int hns3_cae_mac_loop_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size) +{ + struct hns3_cae_cfg_serdes_mode_cmd *req1 = NULL; + struct hns3_cae_cfg_mac_mode_cmd *req2 = NULL; + struct hns3_cae_loop_param *out_info = + (struct hns3_cae_loop_param *)buf_out; + struct hns3_cae_loop_param *in_info = + (struct hns3_cae_loop_param *)buf_in; + bool check = !buf_in || in_size < sizeof(struct hns3_cae_loop_param); + struct hclge_vport *vport = hns3_cae_get_vport(net_priv->ae_handle); + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + int ret; + + if (check) { + pr_err("input param buf_in error in %s function\n", __func__); + return -EFAULT; + } + + req1 = (struct hns3_cae_cfg_serdes_mode_cmd *)&desc.data[0]; + req2 = (struct hns3_cae_cfg_mac_mode_cmd *)&desc.data[0]; + + if (in_info->is_read) { + check = !buf_out || + out_size < sizeof(struct hns3_cae_loop_param); + if (check) { + pr_err("input param buf_out error in %s function\n", + __func__); + return -EFAULT; + } + + hns3_cae_cmd_setup_basic_desc(&desc, + HCLGE_OPC_CONFIG_MAC_MODE, true); + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "mac loopback read fail, ret = %d.\n", ret); + return -EIO; + } + out_info->tx2rx_loop_en = + hnae3_get_bit(req2->txrx_pad_fcs_loop_en, + HCLGE_MAC_APP_LP_B); + out_info->rx2tx_loop_en = + hnae3_get_bit(req2->txrx_pad_fcs_loop_en, + HCLGE_MAC_LINE_LP_B); + hns3_cae_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, + true); + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "serdes loopback read fail, ret = %d.\n", ret); + return -EIO; + } + out_info->serial_tx2rx_loop_en = + hnae3_get_bit(req1->loop_en, SERDES_SERIAL_INNER_LOOP_B); + out_info->parallel_rx2tx_loop_en = + hnae3_get_bit(req1->loop_en, SERDES_PARALLEL_OUTER_LOOP_B); + out_info->parallel_tx2rx_loop_en = + hnae3_get_bit(req1->loop_en, SERDES_PARALLEL_INNER_LOOP_B); + } else { + if (in_info->tx2rx_loop_en < MAINTAIN_LOOP_MODE || + in_info->rx2tx_loop_en < MAINTAIN_LOOP_MODE) { + hns3_cae_cmd_setup_basic_desc(&desc, + HCLGE_OPC_CONFIG_MAC_MODE, + true); + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "mac loopback set fail, ret = %d.\n", + ret); + return -EIO; + } + + /* 0: off, 1:on, >=2: not set. */ + if (in_info->tx2rx_loop_en < MAINTAIN_LOOP_MODE) + hnae3_set_bit(req2->txrx_pad_fcs_loop_en, + HCLGE_MAC_APP_LP_B, + in_info->tx2rx_loop_en); + + /* 0: off, 1:on, >=2: not set. */ + if (in_info->rx2tx_loop_en < MAINTAIN_LOOP_MODE) + hnae3_set_bit(req2->txrx_pad_fcs_loop_en, + HCLGE_MAC_LINE_LP_B, + in_info->rx2tx_loop_en); + + hns3_cae_cmd_reuse_desc(&desc, false); + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "mac loopback set fail, ret = %d.\n", + ret); + return -EIO; + } + } else { + hns3_cae_cmd_setup_basic_desc(&desc, + HCLGE_OPC_SERDES_LOOPBACK, + true); + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "mac loopback set fail, ret = %d.\n", + ret); + return -EIO; + } + + /* 0: off, 1:on, >=2: not set. */ + if (in_info->serial_tx2rx_loop_en < + MAINTAIN_LOOP_MODE) { + hnae3_set_bit(req1->loop_en, + SERDES_SERIAL_INNER_LOOP_B, + in_info->serial_tx2rx_loop_en); + hnae3_set_bit(req1->loop_valid, + SERDES_SERIAL_INNER_LOOP_B, true); + } + /* 0: off, 1:on, >=2: not set. */ + if (in_info->parallel_rx2tx_loop_en < + MAINTAIN_LOOP_MODE) { + hnae3_set_bit(req1->loop_en, + SERDES_PARALLEL_OUTER_LOOP_B, + in_info->parallel_rx2tx_loop_en); + hnae3_set_bit(req1->loop_valid, + SERDES_PARALLEL_OUTER_LOOP_B, + true); + } + /* 0: off, 1:on, >=2: not set. */ + if (in_info->parallel_tx2rx_loop_en < + MAINTAIN_LOOP_MODE) { + hnae3_set_bit(req1->loop_en, + SERDES_PARALLEL_INNER_LOOP_B, + in_info->parallel_tx2rx_loop_en); + hnae3_set_bit(req1->loop_valid, + SERDES_PARALLEL_INNER_LOOP_B, + true); + } + + hns3_cae_cmd_reuse_desc(&desc, false); + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "serdes loopback set fail, ret = %d.\n", + ret); + return -EIO; + } + } + } + + return 0; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_mac.h b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_mac.h new file mode 100644 index 0000000000000000000000000000000000000000..3ab73ca1ca45a11ad6c2fbe65da30f5c60f9ed38 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_mac.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2019 Hisilicon Limited. */ + +#ifndef __HNS3_CAE_MAC_H__ +#define __HNS3_CAE_MAC_H__ + +#define SERDES_SERIAL_INNER_LOOP_B 0x0 +#define SERDES_PARALLEL_OUTER_LOOP_B 0x1 +#define SERDES_PARALLEL_INNER_LOOP_B 0x2 + +#define MAINTAIN_LOOP_MODE 0x2 + +struct hns3_cae_loop_param { + u8 tx2rx_loop_en; /* 0: off, 1:on, >=2: not set. */ + u8 rx2tx_loop_en; /* 0: off, 1:on, >=2: not set. */ + u8 serial_tx2rx_loop_en; /* 0: off, 1:on, >=2: not set. */ + u8 parallel_rx2tx_loop_en; /* 0: off, 1:on, >=2: not set. */ + u8 parallel_tx2rx_loop_en; /* 0: off, 1:on, >=2: not set. */ + u8 is_read; +}; + +struct hns3_cae_cfg_mac_mode_cmd { + u32 txrx_pad_fcs_loop_en; + u8 rsv[20]; +}; + +struct hns3_cae_cfg_serdes_mode_cmd { + u8 loop_valid; + u8 loop_en; + u8 loop_status; + u8 rsv[21]; +}; + +int hns3_cae_mac_loop_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_pfc_storm.c b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_pfc_storm.c new file mode 100644 index 0000000000000000000000000000000000000000..cda7164fc61bc017911e5e6a518aefd60f3c6bfb --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_pfc_storm.c @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#if (!defined CONFIG_EXT_TEST) && (defined CONFIG_IT_VALIDATION) + +#include "hns3_cae_pfc_storm.h" +#include "hns3_enet.h" + +static int hns3_cae_set_pfc_storm_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size) +{ + struct cmd_pfc_storm_param *prelude_in = + (struct cmd_pfc_storm_param *)buf_in; + struct net_device *netdev = NULL; + struct hclge_vport *vport = NULL; + struct hnae3_handle *h = NULL; + struct hclge_dev *hdev = NULL; + struct hclge_desc desc; + int ret; + + netdev = net_priv->netdev; + h = hns3_get_handle(netdev); + vport = hns3_cae_get_vport(h); + hdev = vport->back; + + hns3_cae_cmd_setup_basic_desc(&desc, + HCLGE_OPC_CFG_PAUSE_STORM_PARA, + false); + desc.data[0] = prelude_in->pfc_storm_param_mkii.dir; + desc.data[1] = prelude_in->pfc_storm_param_mkii.enable; + desc.data[2] = prelude_in->pfc_storm_param_mkii.period_ms; + desc.data[3] = prelude_in->pfc_storm_param_mkii.times; + desc.data[4] = prelude_in->pfc_storm_param_mkii.recovery_period_ms; + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "set pfc storm para failed %d\n", + ret); + return ret; + } + + return 0; +} + +static int hns3_cae_get_pfc_storm_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size) +{ + struct cmd_pfc_storm_param *prelude_in = + (struct cmd_pfc_storm_param *)buf_in; + struct cmd_pfc_storm_param *info_dstn = + (struct cmd_pfc_storm_param *)buf_out; + struct net_device *netdev = NULL; + struct hclge_vport *vport = NULL; + struct hnae3_handle *h = NULL; + struct hclge_dev *hdev = NULL; + struct hclge_desc desc; + int check; + int ret; + + check = !buf_out || out_size < sizeof(struct cmd_pfc_storm_param); + if (check) { + pr_err("input param buf_out error in %s.\n", __func__); + return -EFAULT; + } + + netdev = net_priv->netdev; + h = hns3_get_handle(netdev); + vport = hns3_cae_get_vport(h); + hdev = vport->back; + + hns3_cae_cmd_setup_basic_desc(&desc, + HCLGE_OPC_CFG_PAUSE_STORM_PARA, + true); + desc.data[0] = prelude_in->pfc_storm_param_mkii.dir; + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "get pfc storm para failed %d\n", + ret); + return ret; + } + + info_dstn->pfc_storm_param_mkii.dir = + prelude_in->pfc_storm_param_mkii.dir; + info_dstn->pfc_storm_param_mkii.enable = desc.data[1]; + info_dstn->pfc_storm_param_mkii.period_ms = desc.data[2]; + info_dstn->pfc_storm_param_mkii.times = desc.data[3]; + info_dstn->pfc_storm_param_mkii.recovery_period_ms = desc.data[4]; + + return 0; +} + +int hns3_cae_pfc_storm_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, void *buf_out, + u32 out_size) +{ + struct cmd_pfc_storm_param *para_in = + (struct cmd_pfc_storm_param *)buf_in; + int check; + int ret; + + check = !buf_in || in_size < sizeof(struct cmd_pfc_storm_param); + if (check) { + pr_err("input param buf_in error in %s function\n", __func__); + return -EFAULT; + } + + if (para_in->op_code == SET_PFC_STORM_PARA) { + ret = hns3_cae_set_pfc_storm_cfg(net_priv, buf_in, in_size); + } else if (para_in->op_code == GET_PFC_STORM_PARA) { + ret = hns3_cae_get_pfc_storm_cfg(net_priv, buf_in, in_size, + buf_out, out_size); + } else { + ret = -EOPNOTSUPP; + } + + return ret; +} + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_pfc_storm.h b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_pfc_storm.h new file mode 100644 index 0000000000000000000000000000000000000000..2d3f5e866ac64a2286ff945a6af4d531295d3da9 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_pfc_storm.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2019 Hisilicon Limited. */ + +#ifndef __HNS3_CAE_PFC_STORM_H__ +#define __HNS3_CAE_PFC_STORM_H__ + +#include "hns3_cae_cmd.h" +#include "hns3_enet.h" + +enum opc_dup { + SET_PFC_STORM_PARA = 14, + GET_PFC_STORM_PARA, +}; + +struct cmd_pfc_storm_param { + u32 op_code; + u32 judge_class; + union { + struct hns3_pfc_storm_param_mkii { + u32 dir; + u32 enable; + u32 period_ms; + u32 times; + u32 recovery_period_ms; + } pfc_storm_param_mkii; + u8 buf[1024]; + }; +}; + +int hns3_cae_pfc_storm_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, void *buf_out, + u32 out_size); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_port.c b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_port.c new file mode 100644 index 0000000000000000000000000000000000000000..9af8d019b655bc8e4ef9a15612b0d2d00b1237e2 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_port.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include +#include +#include + +#include "hclge_cmd.h" +#include "hnae3.h" +#include "hclge_main.h" +#include "hns3_enet.h" +#include "hns3_cae_cmd.h" +#include "hns3_cae_port.h" + +#define HCLGE_CMD_DATA_BYTE_LEN 24 +#define BD_NUM_5 5 +#define BD_NUM_6 6 +#define BD_NUM_7 7 + +static void fill_port_info(struct hclge_port_info *get_port_info_out, + struct hclge_desc *port_desc, u32 bd_num) +{ + u8 *dest_data = NULL; + u8 *tmp_buff = NULL; + u32 i; + + dest_data = (u8 *)get_port_info_out; + + /* first BD (24 Bytes) */ + for (i = 0; i < bd_num; i++) { + tmp_buff = (u8 *)&port_desc[i].data[0]; + if (i == BD_NUM_5) { + get_port_info_out->his_link_machine_state = + port_desc[i].data[0]; + get_port_info_out->his_machine_state_length = + port_desc[i].data[1] & 0xFF; + memcpy(get_port_info_out->his_machine_state_data, + tmp_buff + 5, 19); + } else if (i == BD_NUM_6) { + get_port_info_out->cur_link_machine_state = + port_desc[i].data[0]; + get_port_info_out->cur_machine_state_length = + port_desc[i].data[1] & 0xFF; + memcpy(get_port_info_out->cur_machine_state_data, + tmp_buff + 5, 19); + } else { + if (i == BD_NUM_7) + dest_data = + (u8 *)&get_port_info_out->param_info; + + memcpy(dest_data, tmp_buff, HCLGE_CMD_DATA_BYTE_LEN); + if (i != (bd_num - 1)) + dest_data = dest_data + HCLGE_CMD_DATA_BYTE_LEN; + } + } +} + +int hns3_get_port_info(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, void *buf_out, + u32 out_size) +{ + struct hnae3_handle *handle = hns3_get_handle(net_priv->netdev); + struct hclge_vport *vport = hns3_cae_get_vport(handle); + struct hclge_port_info *get_port_info_out = + (struct hclge_port_info *)buf_out; + struct hclge_dev *hdev = vport->back; + struct hclge_desc *port_desc = NULL; + struct hclge_desc desc = {0}; + __le32 *desc_data = NULL; + u32 bd_num; + int ret; + u32 i; + + if (!buf_out || out_size < sizeof(struct hclge_port_info)) + return -EINVAL; + + get_port_info_out->gpio_insert = 0; + + hns3_cae_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PORTINFO_BD_NUM, + true); + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "hclge get port info BD num failed %d\n", ret); + return ret; + } + + desc_data = (__le32 *)(&desc.data[0]); + bd_num = le32_to_cpu(*desc_data); + + if (!bd_num || bd_num * sizeof(struct hclge_desc) > + sizeof(struct hclge_port_info)) { + dev_err(&hdev->pdev->dev, "get invalid BD num %u\n", bd_num); + return -EINVAL; + } + + port_desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL); + if (ZERO_OR_NULL_PTR(port_desc)) + return -ENOMEM; + + for (i = 0; i < bd_num; i++) { + hns3_cae_cmd_setup_basic_desc(&port_desc[i], + HCLGE_OPC_DUMP_PORT_INFO, true); + if (i < bd_num - 1) + port_desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + else + port_desc[i].flag &= + ~(cpu_to_le16(HCLGE_CMD_FLAG_NEXT)); + } + + ret = hns3_cae_cmd_send(hdev, port_desc, bd_num); + if (ret) { + dev_err(&hdev->pdev->dev, + "get port information cmd failed %d\n", ret); + kfree(port_desc); + return ret; + } + + fill_port_info(get_port_info_out, port_desc, bd_num); + + kfree(port_desc); + + return ret; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_port.h b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_port.h new file mode 100644 index 0000000000000000000000000000000000000000..37cfdae2155064f0ecd6b703a01a61bff5a0f86b --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_port.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2019 Hisilicon Limited. */ + +#ifndef __HNS3_CAE_PORT_H__ +#define __HNS3_CAE_PORT_H__ + +#define HCLGE_OPC_QUERY_PORTINFO_BD_NUM 0x0380 +#define HCLGE_OPC_DUMP_PORT_INFO 0x0381 + +struct port_cfg { + u8 an; + u8 fec; + u16 speed; +}; + +struct port_param_info { + /* BD7:24 byte */ + u8 chip_id; + u8 lane_id; + u8 lane_num; + u8 rsvd1; + struct port_cfg default_cfg; + struct port_cfg bios_cfg; + struct port_cfg user_cfg; + struct port_cfg final_cfg; + u8 adapt_default_en; + u8 adapt_cur_en; + u8 adapt_speed; + u8 rsvd2; +}; + +struct hclge_port_info { + /* BD0:24 Byte */ + u8 vendor_name[16]; + u32 port_type; + u32 port_sub_type; + + /* BD1:24 Byte */ + u32 cable_length; + u8 cable_temp; + u8 max_speed; + u8 sfp_type; + u8 rsvd2; + u32 power[4]; + + /* BD2:24 Byte */ + u8 an_state; + u8 fec; + u16 speed; + + u8 gpio_insert; + u8 alos; + u8 rx_los; + u8 pma_ctrl; + + u32 pma_fifo_reg; + u32 pma_signal_ok_reg; + u32 pcs_64_66b_reg; + u32 rf_lf; + + /* BD3 - BD4:24*2 Byte */ + u8 pcs_link; + u8 pcs_mac_link; + u8 tx_enable; + u8 rx_enable; + u32 pcs_err_cnt; + + u8 eq_data[38]; + u8 rsvd5[2]; + + /* BD5-BD6 */ + u32 his_link_machine_state; + u32 cur_link_machine_state; + + u8 his_machine_state_data[128]; + u8 cur_machine_state_data[128]; + + u8 his_machine_state_length; + u8 cur_machine_state_length; + + struct port_param_info param_info; + + u8 rsvd6[488]; +}; + +struct hclge_lsport_info { + u32 portinfo[6]; +}; + +int hns3_get_port_info(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_promisc.c b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_promisc.c new file mode 100644 index 0000000000000000000000000000000000000000..6dd022aed1943c4fae8b07760f56b9fbef6dda07 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_promisc.c @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include "hns3_cae_cmd.h" +#include "hns3_cae_promisc.h" + +int hns3_read_promisc_mode_cfg(const struct hns3_nic_priv *nic_dev, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size) +{ + struct hclge_promisc_cfg_cmd *req = NULL; + struct hclge_vport *vport = NULL; + struct hclge_dev *hdev = NULL; + struct hclge_desc desc; + u8 *out_buf = NULL; + bool check = !buf_out || out_size < sizeof(u8); + u8 enable; + int ret; + + if (check) { + pr_err("input param buf_out error in %s function\n", __func__); + return -EFAULT; + } + + out_buf = (u8 *)buf_out; + vport = hns3_cae_get_vport(nic_dev->ae_handle); + hdev = vport->back; + req = (struct hclge_promisc_cfg_cmd *)desc.data; + hns3_cae_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, true); + req->vf_id = vport->vport_id; + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get promisc mode fail, ret is %d.\n", ret); + return ret; + } + enable = req->flag >> HCLGE_PROMISC_EN_B; + *out_buf = enable; + + return 0; +} + +int hns3_set_promisc_mode_cfg(const struct hns3_nic_priv *nic_dev, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size) +{ +#define PROMISC_EN_MAX_VAL 0x1 + bool check = !buf_in || in_size < sizeof(struct promisc_mode_param); + struct promisc_mode_param *mode_param = NULL; + struct hclge_promisc_cfg_cmd *req = NULL; + struct hclge_vport *vport = NULL; + struct hclge_dev *hdev = NULL; + struct hclge_desc desc; + int ret; + + if (check) { + pr_err("input param buf_in error in %s function\n", __func__); + return -EFAULT; + } + + vport = hns3_cae_get_vport(nic_dev->ae_handle); + hdev = vport->back; + req = (struct hclge_promisc_cfg_cmd *)desc.data; + mode_param = (struct promisc_mode_param *)buf_in; + hns3_cae_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, true); + req->vf_id = vport->vport_id; + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get promisc mode fail, ret is %d.\n", ret); + return ret; + } + + hns3_cae_cmd_reuse_desc(&desc, false); + switch (mode_param->type) { + case HNS3_UNICAST: + req->flag &= ~BIT(HNS3_CAE_UC_PROMISC_EN_B); + if (mode_param->uc > PROMISC_EN_MAX_VAL) + return -EINVAL; + req->flag |= (mode_param->uc << HNS3_CAE_UC_PROMISC_EN_B) | + HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B; + break; + case HNS3_MULTICAST: + req->flag &= ~BIT(HNS3_CAE_MC_PROMISC_EN_B); + if (mode_param->mc > PROMISC_EN_MAX_VAL) + return -EINVAL; + req->flag |= (mode_param->mc << HNS3_CAE_MC_PROMISC_EN_B) | + HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B; + break; + case HNS3_BROADCAST: + req->flag &= ~BIT(HNS3_CAE_BC_PROMISC_EN_B); + if (mode_param->bc > PROMISC_EN_MAX_VAL) + return -EINVAL; + req->flag |= (mode_param->bc << HNS3_CAE_BC_PROMISC_EN_B) | + HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B; + break; + default: + return -1; + } + + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "Set promisc mode fail, ret is %d.\n", ret); + + return ret; +} + +int hns3_promisc_mode_cfg(const struct hns3_nic_priv *nic_dev, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size) +{ + bool check = !buf_in || in_size < sizeof(struct promisc_mode_param); + struct promisc_mode_param *mode_param = NULL; + int ret; + + if (check) { + pr_err("input param buf_in error in %s function\n", __func__); + return -EFAULT; + } + + mode_param = (struct promisc_mode_param *)buf_in; + if (mode_param->is_read == 1) + ret = hns3_read_promisc_mode_cfg(nic_dev, buf_in, in_size, + buf_out, out_size); + else + ret = hns3_set_promisc_mode_cfg(nic_dev, buf_in, in_size, + buf_out, out_size); + + return ret; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_promisc.h b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_promisc.h new file mode 100644 index 0000000000000000000000000000000000000000..51d4743c70cd206f19c7e2df61f72658320e1dd8 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_promisc.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2019 Hisilicon Limited. */ + +#ifndef __HNS3_CAE_PROMISC_H__ +#define __HNS3_CAE_PROMISC_H__ + +#include "hclge_main.h" +#include "hclge_cmd.h" +#include "hns3_enet.h" + +#define HNS3_CAE_UC_PROMISC_EN_B 0x1 +#define HNS3_CAE_MC_PROMISC_EN_B 0x2 +#define HNS3_CAE_BC_PROMISC_EN_B 0x3 + +enum promisc_mode { + HNS3_UNICAST = 0, + HNS3_MULTICAST, + HNS3_BROADCAST, +}; + +struct promisc_mode_param { + u8 uc; + u8 mc; + u8 bc; + u8 is_read; + u8 type; +}; + +int hns3_promisc_mode_cfg(const struct hns3_nic_priv *nic_dev, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_qinfo.c b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_qinfo.c new file mode 100644 index 0000000000000000000000000000000000000000..a55b8f3cecdcde2031738b6ebb776c618e7a673c --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_qinfo.c @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include "hns3_cae_qinfo.h" + +static int hns3_get_q_rx_fbd(const struct hns3_nic_priv *net_priv, int ring_id) +{ + struct hns3_enet_ring *ring = NULL; + int num; + int tqps_num; + + tqps_num = net_priv->ae_handle->kinfo.num_tqps; + ring = &net_priv->ring[ring_id + tqps_num]; + num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG); + + return num; +} + +static int hns3_get_q_rx_ebd(const struct hns3_nic_priv *net_priv, int ring_id) +{ + struct hns3_enet_ring *ring = NULL; + int num; + int tqps_num; + + tqps_num = net_priv->ae_handle->kinfo.num_tqps; + ring = &net_priv->ring[ring_id + tqps_num]; + num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_EBDNUM_REG); + + return num; +} + +static int hns3_get_q_tx_fbd(const struct hns3_nic_priv *net_priv, int ring_id) +{ + struct hns3_enet_ring *ring = NULL; + int num; + + ring = &net_priv->ring[ring_id]; + num = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_FBDNUM_REG); + + return num; +} + +static int hns3_get_q_tx_ebd(const struct hns3_nic_priv *net_priv, int ring_id) +{ + struct hns3_enet_ring *ring = NULL; + int num; + + ring = &net_priv->ring[ring_id]; + num = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_EBDNUM_REG); + + return num; +} + +static int hns3_get_q_rx_tail(const struct hns3_nic_priv *net_priv, int ring_id) +{ + struct hns3_enet_ring *ring = NULL; + int num; + int tqps_num; + + tqps_num = net_priv->ae_handle->kinfo.num_tqps; + ring = &net_priv->ring[ring_id + tqps_num]; + num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_TAIL_REG); + + return num; +} + +static int hns3_get_q_rx_head(const struct hns3_nic_priv *net_priv, int ring_id) +{ + struct hns3_enet_ring *ring = NULL; + int num; + int tqps_num; + + tqps_num = net_priv->ae_handle->kinfo.num_tqps; + ring = &net_priv->ring[ring_id + tqps_num]; + num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); + + return num; +} + +static int hns3_get_q_tx_tail(const struct hns3_nic_priv *net_priv, int ring_id) +{ + struct hns3_enet_ring *ring = NULL; + int num; + + ring = &net_priv->ring[ring_id]; + num = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG); + + return num; +} + +static int hns3_get_q_tx_head(const struct hns3_nic_priv *net_priv, int ring_id) +{ + struct hns3_enet_ring *ring = NULL; + int num; + + ring = &net_priv->ring[ring_id]; + num = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG); + + return num; +} + +int hns3_cae_qinfo_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size) +{ + struct qinfo_param *out_info = (struct qinfo_param *)buf_out; + bool check = !buf_in || in_size < sizeof(int) || !buf_out || + out_size < sizeof(struct qinfo_param); + int tqps_num; + int ring_id; + int rx_head; + int rx_tail; + int rx_ebd; + int rx_fbd; + int tx_head; + int tx_tail; + int tx_ebd; + int tx_fbd; + + if (check) { + pr_err("input parameter error in %s function\n", __func__); + return -EFAULT; + } + + tqps_num = net_priv->ae_handle->kinfo.num_tqps; + ring_id = *((int *)buf_in); + + if (ring_id >= tqps_num || ring_id < 0) { + pr_err("please input valid qid\n"); + return -1; + } + rx_head = hns3_get_q_rx_head(net_priv, ring_id); + rx_tail = hns3_get_q_rx_tail(net_priv, ring_id); + rx_ebd = hns3_get_q_rx_ebd(net_priv, ring_id); + rx_fbd = hns3_get_q_rx_fbd(net_priv, ring_id); + tx_head = hns3_get_q_tx_head(net_priv, ring_id); + tx_tail = hns3_get_q_tx_tail(net_priv, ring_id); + tx_ebd = hns3_get_q_tx_ebd(net_priv, ring_id); + tx_fbd = hns3_get_q_tx_fbd(net_priv, ring_id); + out_info->qid = ring_id; + out_info->rx_head = rx_head; + out_info->rx_tail = rx_tail; + out_info->rx_ebd = rx_ebd; + out_info->rx_fbd = rx_fbd; + out_info->tx_head = tx_head; + out_info->tx_tail = tx_tail; + out_info->tx_ebd = tx_ebd; + out_info->tx_fbd = tx_fbd; + + return 0; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_qinfo.h b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_qinfo.h new file mode 100644 index 0000000000000000000000000000000000000000..8e1f69757c5f8a2d9b67fea94273a602a9505736 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_qinfo.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2019 Hisilicon Limited. */ + +#ifndef __HNS3_CAE_QINFO_H__ +#define __HNS3_CAE_QINFO_H__ + +#include "hclge_main.h" +#include "hclge_cmd.h" +#include "hns3_enet.h" + +#define HNS3_RING_RX_RING_EBDNUM_REG 0x00024 +#ifndef HNS3_RING_TX_RING_EBDNUM_REG +#define HNS3_RING_TX_RING_EBDNUM_REG 0x00068 +#endif + +struct qinfo_param { + int qid; + int tx_head; + int tx_tail; + int tx_ebd; + int tx_fbd; + int rx_head; + int rx_tail; + int rx_ebd; + int rx_fbd; +}; + +int hns3_cae_qinfo_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_qos.c b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_qos.c new file mode 100644 index 0000000000000000000000000000000000000000..fb872de05c977afc46c4d8cde3307254b5ac0fcf --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_qos.c @@ -0,0 +1,463 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include "hclge_main.h" +#include "hclge_cmd.h" +#include "hns3_enet.h" +#include "hns3_cae_cmd.h" +#include "hns3_cae_qos.h" + +struct hclge_dev *get_val_hdev(const struct hns3_nic_priv *net_priv) +{ + struct hnae3_handle *handle = NULL; + struct hclge_vport *vport = NULL; + + handle = net_priv->ae_handle; + vport = hns3_cae_get_vport(handle); + return vport->back; +} + +int hns3_cmd_rx_priv_wl_config(struct hclge_dev *hdev, u16 tc, + u32 high, u32 low, u32 en) +{ + struct hclge_rx_priv_wl_buf *req = NULL; + enum hclge_cmd_status status; + struct hclge_desc desc[2]; + int idx; + int i; + int j; + + for (i = 0; i < 2; i++) { + hns3_cae_cmd_setup_basic_desc(&desc[i], + HCLGE_OPC_RX_PRIV_WL_ALLOC, + false); + req = (struct hclge_rx_priv_wl_buf *)desc[i].data; + /* The first descriptor set the NEXT bit to 1 */ + if (i == 0) + desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + else + desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + + for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { + idx = i * HCLGE_TC_NUM_ONE_DESC + j; + if ((tc >> idx) & 0x01) { + req->tc_wl[j].high = cpu_to_le16(high); + req->tc_wl[j].high |= + cpu_to_le16(en << HCLGE_RX_PRIV_EN_B); + req->tc_wl[j].low = cpu_to_le16(low); + req->tc_wl[j].low |= + cpu_to_le16(en << HCLGE_RX_PRIV_EN_B); + } + } + } + + /* Send 2 descriptor at one time */ + status = hns3_cae_cmd_send(hdev, desc, 2); + if (status) { + dev_err(&hdev->pdev->dev, + "Set rx private waterline fail, status %d\n", status); + return status; + } + return 0; +} + +int hns3_cae_rx_priv_buff_wl_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size) +{ + struct hclge_dev *hdev = get_val_hdev(net_priv); + struct hns3_rx_priv_buff_wl_param *in_info = + (struct hns3_rx_priv_buff_wl_param *)buf_in; + bool check = !buf_in || + in_size < sizeof(struct hns3_rx_priv_buff_wl_param); + + if (check) { + pr_err("input param buf_in error in %s function\n", __func__); + return -EFAULT; + } + + if (!hnae3_dev_dcb_supported(hdev)) { + dev_err(&hdev->pdev->dev, + "This device is not support this cmd!\n"); + return -EPERM; + } + + pr_err("wl is_set param, tc_no = 0x%x, hight = 0x%x, low = 0x%x\n", + in_info->tc_no, in_info->high_wl, in_info->low_wl); + + return hns3_cmd_rx_priv_wl_config(hdev, in_info->tc_no, + in_info->high_wl, in_info->low_wl, 1); +} + +int hns3_cmd_common_thrd_config(struct hclge_dev *hdev, u16 tc, + u32 high, u32 low, u32 en) +{ +#define HNS3_CAE_THRD_ALLOC_BD_NUM 2 + struct hclge_rx_com_thrd *req = NULL; + enum hclge_cmd_status status; + struct hclge_desc desc[2]; + int idx; + int i; + int j; + + for (i = 0; i < HNS3_CAE_THRD_ALLOC_BD_NUM; i++) { + hns3_cae_cmd_setup_basic_desc(&desc[i], + HCLGE_OPC_RX_COM_THRD_ALLOC, + false); + req = (struct hclge_rx_com_thrd *)desc[i].data; + + if (i == 0) + desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + else + desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + + for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { + idx = i * HCLGE_TC_NUM_ONE_DESC + j; + if ((tc >> idx) & 0x01) { + req->com_thrd[j].high = cpu_to_le16(high); + req->com_thrd[j].high |= + cpu_to_le16(en << HCLGE_RX_PRIV_EN_B); + req->com_thrd[j].low = cpu_to_le16(low); + req->com_thrd[j].low |= + cpu_to_le16(en << HCLGE_RX_PRIV_EN_B); + } + } + } + + /* Send 2 descriptors at one time */ + status = hns3_cae_cmd_send(hdev, desc, HNS3_CAE_THRD_ALLOC_BD_NUM); + if (status) { + dev_err(&hdev->pdev->dev, + "Set rx common threshold fail, status %d\n", status); + return status; + } + + return 0; +} + +int hns3_cae_common_thrd_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size) +{ + struct hclge_dev *hdev = get_val_hdev(net_priv); + struct hns3_rx_priv_buff_wl_param *in_info = + (struct hns3_rx_priv_buff_wl_param *)buf_in; + bool check = !buf_in || + in_size < sizeof(struct hns3_rx_priv_buff_wl_param); + + if (check) { + pr_err("input param buf_in error in %s function\n", __func__); + return -EFAULT; + } + + if (!hnae3_dev_dcb_supported(hdev)) { + dev_err(&hdev->pdev->dev, + "This device is not support this cmd!\n"); + return -EPERM; + } + + pr_info("common thrd is_set param, tc_no = 0x%x, hight = 0x%x, low = 0x%x\n", + in_info->tc_no, in_info->high_wl, in_info->low_wl); + + return hns3_cmd_common_thrd_config(hdev, in_info->tc_no, + in_info->high_wl, in_info->low_wl, + 1); +} + +int hns3_cmd_common_wl_config(struct hclge_dev *hdev, u32 high, u32 low, u32 en) +{ + enum hclge_cmd_status status; + struct hclge_rx_com_wl *req = NULL; + struct hclge_desc desc; + + req = (struct hclge_rx_com_wl *)desc.data; + hns3_cae_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false); + req->com_wl.high = cpu_to_le16(high); + req->com_wl.high |= cpu_to_le16(en << HCLGE_RX_PRIV_EN_B); + req->com_wl.low = cpu_to_le16(low); + req->com_wl.low |= cpu_to_le16(en << HCLGE_RX_PRIV_EN_B); + status = hns3_cae_cmd_send(hdev, &desc, 1); + if (status) { + dev_err(&hdev->pdev->dev, + "Set rx common waterline fail, status %d\n", status); + return status; + } + + return 0; +} + +int hns3_cae_common_wl_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size) +{ + struct hns3_rx_priv_buff_wl_param *out_info = + (struct hns3_rx_priv_buff_wl_param *)buf_out; + struct hns3_rx_priv_buff_wl_param *in_info = + (struct hns3_rx_priv_buff_wl_param *)buf_in; + bool check = !buf_in || + in_size < sizeof(struct hns3_rx_priv_buff_wl_param); + enum hclge_cmd_status status; + struct hclge_rx_com_wl *req = NULL; + struct hclge_vport *vport = NULL; + struct hclge_desc desc; + struct hclge_dev *hdev = NULL; + + if (check) { + pr_err("input param buf_in error in %s function\n", __func__); + return -EFAULT; + } + + vport = hns3_cae_get_vport(net_priv->ae_handle); + hdev = vport->back; + + if (in_info->is_read == IS_WRITE) { + status = hns3_cmd_common_wl_config(hdev, in_info->high_wl, + in_info->low_wl, 1); + } else { + check = !buf_out || + out_size < sizeof(struct hns3_rx_priv_buff_wl_param); + if (check) { + pr_err("input param buf_out error in %s function\n", + __func__); + return -EFAULT; + } + + hns3_cae_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, + true); + status = hns3_cae_cmd_send(hdev, &desc, 1); + if (status != 0) { + dev_err(&hdev->pdev->dev, + "get rx common waterline fail, status = %d\n", + status); + return status; + } + req = (struct hclge_rx_com_wl *)desc.data; + out_info->high_wl = req->com_wl.high; + out_info->low_wl = req->com_wl.low; + } + + return status; +} + +int hns3_cae_rx_buff_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size) +{ + struct hclge_rx_priv_buff_cmd *recv = NULL; + struct hns3_rx_buff_param *out_info = + (struct hns3_rx_buff_param *)buf_out; + struct hns3_rx_buff_param *in_info = + (struct hns3_rx_buff_param *)buf_in; + enum hclge_cmd_status status; + struct hclge_vport *vport = NULL; + struct hclge_dev *hdev = NULL; + struct hclge_desc desc; + bool check = !buf_in || in_size < sizeof(struct hns3_rx_buff_param) || + !buf_out || out_size < sizeof(struct hns3_rx_buff_param); + int i; + + if (check) { + pr_err("input parameter error in %s function\n", __func__); + return -EFAULT; + } + + vport = hns3_cae_get_vport(net_priv->ae_handle); + hdev = vport->back; + + if (in_info->is_read == IS_READ) { + hns3_cae_cmd_setup_basic_desc(&desc, + HCLGE_OPC_RX_PRIV_BUFF_ALLOC, + true); + recv = (struct hclge_rx_priv_buff_cmd *)desc.data; + status = hns3_cae_cmd_send(hdev, &desc, 1); + if (status) { + pr_err("rx buff get cmd send failed!\n"); + return status; + } + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) + out_info->buff_size[i] = recv->buf_num[i]; + + out_info->share_buff = recv->shared_buf; + } + + return 0; +} + +int hns3_cae_tx_buff_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size) +{ + struct hclge_tx_buff_alloc_cmd *recv = NULL; + struct hns3_tx_buff_param *out_info = + (struct hns3_tx_buff_param *)buf_out; + struct hns3_tx_buff_param *in_info = + (struct hns3_tx_buff_param *)buf_in; + enum hclge_cmd_status status; + struct hclge_vport *vport = NULL; + struct hclge_desc desc; + struct hclge_dev *hdev = NULL; + bool check = !buf_in || in_size < sizeof(struct hns3_tx_buff_param) || + !buf_out || out_size < sizeof(struct hns3_tx_buff_param); + int i; + + if (check) { + pr_err("input parameter error in %s function\n", __func__); + return -EFAULT; + } + + vport = hns3_cae_get_vport(net_priv->ae_handle); + hdev = vport->back; + + if (in_info->is_read == IS_READ) { + hns3_cae_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, + true); + status = hns3_cae_cmd_send(hdev, &desc, 1); + if (status) { + pr_err("tx buff get cmd send failed!\n"); + return status; + } + recv = (struct hclge_tx_buff_alloc_cmd *)desc.data; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) + out_info->buff_size[i] = recv->tx_pkt_buff[i]; + } + + return 0; +} + +int hns3_cae_show_comm_thres(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size) +{ +#define HNS3_CAE_THRD_ALLOC_BD_NUM 2 + struct hclge_dev *hdev = get_val_hdev(net_priv); + struct hns3_total_priv_wl_param *out_info = + (struct hns3_total_priv_wl_param *)buf_out; + struct hclge_rx_com_thrd *req = NULL; + enum hclge_cmd_status status; + struct hclge_desc desc[2]; + bool check = !buf_out || + out_size < sizeof(struct hns3_total_priv_wl_param); + int idx; + int i; + int j; + + if (check) { + pr_err("input param buf_out error in %s function\n", __func__); + return -EFAULT; + } + + for (i = 0; i < HNS3_CAE_THRD_ALLOC_BD_NUM; i++) { + hns3_cae_cmd_setup_basic_desc(&desc[i], + HCLGE_OPC_RX_COM_THRD_ALLOC, + true); + if (i == 0) + desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + else + desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + } + + status = hns3_cae_cmd_send(hdev, desc, HNS3_CAE_THRD_ALLOC_BD_NUM); + if (status) { + dev_err(&hdev->pdev->dev, + "Get rx common threshold fail, status = %d\n", status); + return status; + } + + for (i = 0; i < 2; i++) { + req = (struct hclge_rx_com_thrd *)desc[i].data; + for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { + idx = i * HCLGE_TC_NUM_ONE_DESC + j; + out_info->priv_wl[idx].high = req->com_thrd[j].high; + out_info->priv_wl[idx].low = req->com_thrd[j].low; + } + } + + return 0; +} + +int hns3_cae_show_rx_priv_wl(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size) +{ +#define HNS3_CAE_WL_ALLOC_BD_NUM 2 + struct hclge_dev *hdev = get_val_hdev(net_priv); + struct hns3_total_priv_wl_param *out_info = + (struct hns3_total_priv_wl_param *)buf_out; + struct hclge_rx_priv_wl_buf *req = NULL; + enum hclge_cmd_status status; + struct hclge_desc desc[2]; + bool check = !buf_out || + out_size < sizeof(struct hns3_total_priv_wl_param); + int idx; + int i; + int j; + + if (check) { + pr_err("input param buf_out error in %s function\n", __func__); + return -EFAULT; + } + + for (i = 0; i < HNS3_CAE_WL_ALLOC_BD_NUM; i++) { + hns3_cae_cmd_setup_basic_desc(&desc[i], + HCLGE_OPC_RX_PRIV_WL_ALLOC, + true); + if (i == 0) + desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + else + desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + } + + status = hns3_cae_cmd_send(hdev, desc, HNS3_CAE_WL_ALLOC_BD_NUM); + if (status) { + dev_err(&hdev->pdev->dev, + "Get rx private waterline fail, statu = %d\n", status); + return status; + } + + for (i = 0; i < HNS3_CAE_WL_ALLOC_BD_NUM; i++) { + req = (struct hclge_rx_priv_wl_buf *)desc[i].data; + for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { + idx = i * HCLGE_TC_NUM_ONE_DESC + j; + out_info->priv_wl[idx].high = req->tc_wl[j].high; + out_info->priv_wl[idx].low = req->tc_wl[j].low; + } + } + + return 0; +} + +int hns3_cae_qcn_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, void *buf_out, + u32 out_size) +{ +#define HCLGE_OPC_QCN_CFG 0x1A01 + bool check = !buf_in || in_size < sizeof(u32); + struct hclge_vport *vport = NULL; + struct hclge_dev *hdev = NULL; + struct hclge_desc desc; + u32 qcn_bypass; + u32 qcn_cfg; + int ret; + + if (check) { + pr_err("input param buf_in error in %s function\n", __func__); + return -EFAULT; + } + + qcn_bypass = *(u32 *)(buf_in); + vport = hns3_cae_get_vport(net_priv->ae_handle); + hdev = vport->back; + + hns3_cae_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_CFG, true); + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) + return ret; + qcn_cfg = desc.data[0] & HNS3_QCN_SHAP_BYPASS_MASK; + hns3_cae_cmd_reuse_desc(&desc, false); + desc.data[0] = (qcn_cfg | ((qcn_bypass << HNS3_QCN_SHAP_BYPASS_OFF) & + HNS3_QOS_QCN_BYPASS_MASK)) & HNS3_QOS_QCN_MASK; + + return hns3_cae_cmd_send(hdev, &desc, 1); +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_qos.h b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_qos.h new file mode 100644 index 0000000000000000000000000000000000000000..125ca69582740bd06dda8007c33438a132a9ea1a --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_qos.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2019 Hisilicon Limited. */ + +#ifndef __HNS3_CAE_QOS_H__ +#define __HNS3_CAE_QOS_H__ + +#include "hns3_cae_tm.h" + +struct hns3_rx_priv_buff_wl_param { + u32 tc_no; + u32 high_wl; + u32 low_wl; + u8 is_read; +}; + +struct hns3_tx_buff_param { + u16 buff_size[MAX_TC_NUM]; + u8 is_read; +}; + +struct hns3_rx_buff_param { + u16 buff_size[MAX_TC_NUM]; + u16 share_buff; + u8 is_read; +}; + +struct hns3_rx_priv_wl { + u16 high; + u16 low; +}; + +struct hns3_total_priv_wl_param { + struct hns3_rx_priv_wl priv_wl[MAX_TC_NUM]; +}; + +enum opt_type { + IS_READ = 1, + IS_WRITE, +}; + +#define HNS3_QOS_QCN_MASK 0xF0000 +#define HNS3_QCN_SHAP_BYPASS_MASK 0xCFFFF +#define HNS3_QOS_QCN_BYPASS_MASK 0x20000 +#define HNS3_QCN_SHAP_BYPASS_OFF 17 + +int hns3_cae_rx_priv_buff_wl_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size); +int hns3_cae_common_thrd_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size); +int hns3_cae_common_wl_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size); +int hns3_cae_tx_buff_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size); +int hns3_cae_rx_buff_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size); +int hns3_cae_show_rx_priv_wl(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size); +int hns3_cae_show_comm_thres(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size); +int hns3_cae_qcn_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, void *buf_out, + u32 out_size); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_qres.c b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_qres.c new file mode 100644 index 0000000000000000000000000000000000000000..17fece58102f14f3ff4be2b99458b1e626a0566d --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_qres.c @@ -0,0 +1,189 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include "hns3_cae_qres.h" + +static int hns3_get_qres_rx_value(const struct hns3_nic_priv *net_priv, + int ring_id, enum param_type type) +{ + struct hns3_enet_ring *ring = NULL; + int tqps_num; + int num; + + tqps_num = net_priv->ae_handle->kinfo.num_tqps; + ring = &net_priv->ring[ring_id + tqps_num]; + switch (type) { + case RX_HEAD_TYPE: + num = readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_RING_HEAD_REG); + /* Make sure num taken effect before other data is touched */ + rmb(); + break; + case RX_TAIL_TYPE: + num = readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_RING_TAIL_REG); + /* Make sure num taken effect before other data is touched */ + rmb(); + break; + case RX_EBD_TYPE: + num = readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_RING_EBDNUM_REG_ADRR); + /* Make sure num taken effect before other data is touched */ + rmb(); + break; + case RX_FBD_TYPE: + num = readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_RING_FBDNUM_REG); + /* Make sure num taken effect before other data is touched */ + rmb(); + break; + case RX_SOFTWARE_HEAD_TYPE: + num = ring->next_to_use; + break; + case RX_SOFTWARE_TAIL_TYPE: + num = ring->next_to_clean; + break; + default: + pr_err("please input valid param type!\n"); + return -1; + } + + return num; +} + +static int hns3_get_qres_tx_value(const struct hns3_nic_priv *net_priv, + int ring_id, enum param_type type) +{ + struct hns3_enet_ring *ring = NULL; + int num; + + ring = &net_priv->ring[ring_id]; + switch (type) { + case TX_HEAD_TYPE: + num = readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_HEAD_REG); + /* Make sure num taken effect before other data is touched */ + rmb(); + break; + case TX_TAIL_TYPE: + num = readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_TAIL_REG); + /* Make sure num taken effect before other data is touched */ + rmb(); + break; + case TX_EBD_TYPE: + num = readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_EBDNUM_REG_ADRR); + /* Make sure num taken effect before other data is touched */ + rmb(); + break; + case TX_FBD_TYPE: + num = readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_FBDNUM_REG); + /* Make sure num taken effect before other data is touched */ + rmb(); + break; + case TX_SOFTWARE_HEAD_TYPE: + num = ring->next_to_use; + break; + case TX_SOFTWARE_TAIL_TYPE: + num = ring->next_to_clean; + break; + default: + pr_err("please input valid param type!\n"); + return -1; + } + + return num; +} + +static void fill_queue_info(const struct hns3_nic_priv *net_priv, + struct qres_param *out_info, int ring_id) +{ + /* rx info */ + out_info->qid = ring_id; + out_info->rx_head = hns3_get_qres_rx_value(net_priv, ring_id, + RX_HEAD_TYPE); + out_info->rx_tail = hns3_get_qres_rx_value(net_priv, ring_id, + RX_TAIL_TYPE); + out_info->rx_ebd = hns3_get_qres_rx_value(net_priv, ring_id, + RX_EBD_TYPE); + out_info->rx_fbd = hns3_get_qres_rx_value(net_priv, ring_id, + RX_FBD_TYPE); + out_info->rx_software_head = + hns3_get_qres_rx_value(net_priv, ring_id, + RX_SOFTWARE_HEAD_TYPE); + out_info->rx_software_tail = + hns3_get_qres_rx_value(net_priv, ring_id, + RX_SOFTWARE_TAIL_TYPE); + /* tx info */ + out_info->tx_head = hns3_get_qres_tx_value(net_priv, ring_id, + TX_HEAD_TYPE); + out_info->tx_tail = hns3_get_qres_tx_value(net_priv, ring_id, + TX_TAIL_TYPE); + out_info->tx_ebd = hns3_get_qres_tx_value(net_priv, ring_id, + TX_EBD_TYPE); + out_info->tx_fbd = hns3_get_qres_tx_value(net_priv, ring_id, + TX_FBD_TYPE); + out_info->tx_software_head = + hns3_get_qres_tx_value(net_priv, ring_id, + TX_SOFTWARE_HEAD_TYPE); + out_info->tx_software_tail = + hns3_get_qres_tx_value(net_priv, ring_id, + TX_SOFTWARE_TAIL_TYPE); +} + +int hns3_cae_qres_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, void *buf_out, + u32 out_size) +{ + struct qres_bufin_param *qres_in_param = + (struct qres_bufin_param *)buf_in; + struct qres_param *out_info = (struct qres_param *)buf_out; + struct hns3_enet_ring *ring = NULL; + bool check = !buf_in || in_size < sizeof(struct qres_bufin_param) || + !buf_out || out_size < sizeof(struct qres_param); + int bd_index; + int tqps_num; + int ring_id; + + if (check) { + pr_err("input parameter error in %s function\n", __func__); + return -EFAULT; + } + + tqps_num = net_priv->ae_handle->kinfo.num_tqps; + ring_id = qres_in_param->queue_id; + bd_index = qres_in_param->BD_id; + + out_info->num_tqps = tqps_num; + + if (ring_id >= tqps_num || ring_id < 0) { + pr_err("please input valid qid\n"); + return -1; + } + + if (qres_in_param->mtype == MTYPE_QUEUE_INFO) { + fill_queue_info(net_priv, out_info, ring_id); + } else if (qres_in_param->mtype == MTYPE_BD_INFO) { + if (qres_in_param->queue_type == TYPE_TX) { + ring = &net_priv->ring[ring_id]; + if (bd_index >= ring->desc_num || bd_index < 0) { + out_info->num_bd = ring->desc_num; + pr_err("please input valid TX BD_id\n"); + return -1; + } + out_info->desc = ring->desc[bd_index]; + } else if (qres_in_param->queue_type == TYPE_RX) { + ring = &net_priv->ring[ring_id + tqps_num]; + if (bd_index >= ring->desc_num || bd_index < 0) { + out_info->num_bd = ring->desc_num; + pr_err("please input valid RX BD_id\n"); + return -1; + } + out_info->desc = ring->desc[bd_index]; + } + } + + return 0; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_qres.h b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_qres.h new file mode 100644 index 0000000000000000000000000000000000000000..08b11adb2741dfa5cdf8d734cbf886bf9acb5148 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_qres.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2019 Hisilicon Limited. */ + +#ifndef __HNS3_CAE_QRES_H__ +#define __HNS3_CAE_QRES_H__ + +#include "hclge_main.h" +#include "hclge_cmd.h" +#include "hns3_enet.h" + +#define HNS3_RING_RX_RING_EBDNUM_REG_ADRR 0x00008 +#define HNS3_RING_TX_RING_EBDNUM_REG_ADRR 0x00068 + +struct qres_param { + int qid; + int tx_head; + int tx_tail; + int tx_ebd; + int tx_fbd; + int tx_software_head; /* next_to_use */ + int tx_software_tail; /* next_to_clean */ + int rx_head; + int rx_tail; + int rx_ebd; + int rx_fbd; + int rx_software_head; + int rx_software_tail; + int num_tqps; + int num_bd; + struct hns3_desc desc; /* dma map address space */ +}; + +enum param_type { + TX_HEAD_TYPE, + TX_TAIL_TYPE, + TX_EBD_TYPE, + TX_FBD_TYPE, + TX_SOFTWARE_TAIL_TYPE, + TX_SOFTWARE_HEAD_TYPE, + RX_HEAD_TYPE, + RX_TAIL_TYPE, + RX_EBD_TYPE, + RX_FBD_TYPE, + RX_SOFTWARE_TAIL_TYPE, + RX_SOFTWARE_HEAD_TYPE, +}; + +struct qres_bufin_param { + int BD_id; + int queue_type; + int mtype; + int queue_id; +}; + +enum qres_main_type { + MTYPE_NULL, + MTYPE_BD_INFO, + MTYPE_QUEUE_INFO, +}; + +enum qres_queue_type { + TYPE_NULL, + TYPE_RX, + TYPE_TX, +}; + +int hns3_cae_qres_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_reset.c b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_reset.c new file mode 100644 index 0000000000000000000000000000000000000000..947bc554e4b2fa2255cf7381679ce0103ee49a0b --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_reset.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2019 Hisilicon Limited. + +#include +#include +#include +#include +#include + +#include "hns3_enet.h" +#include "hclge_cmd.h" +#include "hclge_main.h" +#include "hns3_cae_reset.h" + +int hns3_cae_nic_reset(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, void *buf_out, + u32 out_size) +{ +#define MIN_DOG_INTVAL 12 + struct hnae3_handle *h = net_priv->ae_handle; + struct reset_param *reset_info = (struct reset_param *)buf_in; + enum hnae3_reset_type rst_type; + struct hclge_vport *vport = NULL; + struct hclge_dev *hdev = NULL; + bool check = !buf_in || in_size < sizeof(struct reset_param); + + if (check) { + pr_err("input param buf_in error in %s function\n", __func__); + return -EFAULT; + } + + vport = container_of(h, struct hclge_vport, nic); + hdev = vport->back; + rst_type = HNAE3_NONE_RESET; + + if (test_bit(HCLGE_STATE_REMOVING, &hdev->state)) { + dev_info(&hdev->pdev->dev, "driver already uninit!\n"); + return 0; + } + + if (time_before(jiffies, (hdev->last_reset_time + MIN_DOG_INTVAL * HZ))) + return 0; + + if (reset_info->reset_level == HNAE3_FUNC_RESET) + rst_type = HNAE3_FUNC_RESET; + else if (reset_info->reset_level == HNAE3_GLOBAL_RESET) + rst_type = HNAE3_GLOBAL_RESET; + + hdev->reset_level = rst_type; + dev_info(&hdev->pdev->dev, + "user received reset event, reset type is %d\n", + hdev->reset_level); + + /* request reset & schedule reset task */ + set_bit(hdev->reset_level, &hdev->reset_request); + if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) + mod_delayed_work_on(cpumask_first(&hdev->affinity_mask), + system_wq, &hdev->service_task, 0); + + return 0; +} + +int hns3_cae_nic_timeout_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size) +{ + struct tx_timeout_param *out_info = (struct tx_timeout_param *)buf_out; + struct tx_timeout_param *in_info = (struct tx_timeout_param *)buf_in; + bool check = !buf_in || in_size < sizeof(struct tx_timeout_param); + struct net_device *netdev = net_priv->netdev; + + if (check) { + pr_err("input param buf_in error in %s function\n", __func__); + return -EFAULT; + } + + if (in_info->wr_flag) { + netdev->watchdog_timeo = (in_info->tx_timeout_size) * HZ; + } else { + check = !buf_out || out_size < sizeof(struct tx_timeout_param); + if (check) { + pr_err("input param buf_out error in %s function\n", + __func__); + return -EFAULT; + } + out_info->tx_timeout_size = (netdev->watchdog_timeo) / HZ; + } + + return 0; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_reset.h b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_reset.h new file mode 100644 index 0000000000000000000000000000000000000000..3439dd33bc2ce7dde6420e0d86bf7d88a3cfb9f1 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_reset.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2015-2019 Hisilicon Limited. */ + +#ifndef __HNS3_CAE_RESET_H__ +#define __HNS3_CAE_RESET_H__ + +struct reset_param { + u32 reset_level; +}; + +struct tx_timeout_param { + u16 wr_flag; + u16 tx_timeout_size; +}; + +int hns3_cae_nic_reset(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, void *buf_out, + u32 out_size); +int hns3_cae_nic_timeout_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_rss.c b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_rss.c new file mode 100644 index 0000000000000000000000000000000000000000..f263156bc547608e98e641646e3218b3bcc17947 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_rss.c @@ -0,0 +1,141 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hclge_cmd.h" +#include "hnae3.h" +#include "hclge_main.h" +#include "hns3_enet.h" +#include "hns3_cae_cmd.h" +#include "hns3_cae_rss.h" + +#define HASH_ALG_MASK 0XFC + +static int hclge_set_rss_algo_key(struct hclge_dev *hdev, + const u8 hfunc, const u8 *key) +{ + struct hclge_rss_config_cmd *req = NULL; + enum hclge_cmd_status status; + struct hclge_desc desc; + int key_offset; + int key_size; + + req = (struct hclge_rss_config_cmd *)desc.data; + for (key_offset = 0; key_offset < 3; key_offset++) { + hns3_cae_cmd_setup_basic_desc(&desc, + HCLGE_OPC_RSS_GENERIC_CONFIG, + false); + req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK); + req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B); + if (key_offset == 2) + key_size = + HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2; + else + key_size = HCLGE_RSS_HASH_KEY_NUM; + memcpy(req->hash_key, + key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size); + status = hns3_cae_cmd_send(hdev, &desc, 1); + if (status) { + dev_err(&hdev->pdev->dev, + "Configure RSS algo fail, status = %d\n", + status); + return -EINVAL; + } + } + + return 0; +} + +static int hns3_cae_set_rss_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size) +{ + struct hclge_rss_config_cmd *in_info = + (struct hclge_rss_config_cmd *)buf_in; + bool check = !buf_in || in_size < sizeof(struct hclge_rss_config_cmd); + struct hclge_vport *vport = hns3_cae_get_vport(net_priv->ae_handle); + struct hclge_dev *hdev = vport->back; + u8 *key = vport->rss_hash_key; + enum hclge_cmd_status status; + u8 hash_config; + + if (check) { + pr_err("input param buf_in error in %s function\n", __func__); + return -EFAULT; + } + + hash_config = + ((u8)(vport->rss_algo) & (HASH_ALG_MASK)) | in_info->hash_config; + status = hclge_set_rss_algo_key(hdev, hash_config, key); + if (status) { + dev_err(&hdev->pdev->dev, + "hclge_set_rss_algo_key, status = %d\n", status); + return -EINVAL; + } + vport->rss_algo = hash_config; + + return 0; +} + +static int hns3_cae_get_rss_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size) +{ + struct hclge_vport *vport = hns3_cae_get_vport(net_priv->ae_handle); + bool check = !buf_out || out_size < sizeof(u8); + struct hclge_rss_config_cmd *req = NULL; + struct hclge_dev *hdev = vport->back; + enum hclge_cmd_status status; + u8 *out_buf = (u8 *)buf_out; + struct hclge_desc desc; + + if (check) { + pr_err("input param buf_out error in %s function\n", __func__); + return -EFAULT; + } + + hns3_cae_cmd_setup_basic_desc(&desc, + HCLGE_OPC_RSS_GENERIC_CONFIG, true); + status = hns3_cae_cmd_send(hdev, &desc, 1); + if (status) { + dev_err(&hdev->pdev->dev, "%s fail, status is %d.\n", + __func__, status); + return status; + } + req = (struct hclge_rss_config_cmd *)desc.data; + *out_buf = req->hash_config; + + return 0; +} + +int hns3_cae_rss_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, void *buf_out, + u32 out_size) +{ + bool check = !buf_in || in_size < sizeof(struct rss_config); + struct rss_config *mode_param = NULL; + int ret; + + if (check) { + pr_err("input param buf_in error in %s function\n", __func__); + return -EFAULT; + } + + mode_param = (struct rss_config *)buf_in; + if (mode_param->is_read == 1) + ret = hns3_cae_get_rss_cfg(net_priv, buf_in, in_size, buf_out, + out_size); + else + ret = hns3_cae_set_rss_cfg(net_priv, buf_in, in_size, buf_out, + out_size); + + return ret; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_rss.h b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_rss.h new file mode 100644 index 0000000000000000000000000000000000000000..584970b4a95020456893bb814d98565e3b2e0c72 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_rss.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2019 Hisilicon Limited. */ + +#ifndef __HNS3_CAE_RSS_H +#define __HNS3_CAE_RSS_H + +#define RSS_HASH_KEY_NUM 40 + +struct rss_config { + u8 hash_config; + u8 rsv[7]; + u8 hash_key[RSS_HASH_KEY_NUM]; + u8 is_read; +}; + +int hns3_cae_rss_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_stat.c b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_stat.c new file mode 100644 index 0000000000000000000000000000000000000000..684d72953c306599ef3e80ad05bc59bcfd0cf9fd --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_stat.c @@ -0,0 +1,248 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. +#include "hns3_cae_cmd.h" +#include "hns3_cae_stat.h" + +const struct ring_stats_name hns3_ring_stats_name[] = { + {"sw_err_cnt", SW_ERR_CNT}, + {"seg_pkt_cnt", SEG_PKT_CNT}, + {"tx_pkts", TX_PKTS}, + {"tx_bytes", TX_BYTES}, + {"tx_more", TX_MORE}, + {"restart_queue", RESTART_QUEUE}, + {"tx_busy", TX_BUSY}, + {"rx_pkts", RX_PKTS}, + {"rx_bytes", RX_BYTES}, + {"rx_err_cnt", RX_ERR_CNT}, + {"reuse_pg_cnt", REUSE_PG_CNT}, + {"err_pkt_len", ERR_PKT_LEN}, + {"err_bd_num", ERR_BD_NUM}, + {"l2_err", L2_ERR}, + {"l3l4_csum_err", L3L4_CSUM_ERR}, + {"rx_multicast", RX_MULTICAST}, +}; + +static int hns3_get_stat_val(struct ring_stats *r_stats, char *val_name, + u32 max_name_len, u64 **val) +{ + u32 stats_name_id = 0; + u32 i; + + if (!r_stats || !val_name || !val || strlen(val_name) >= max_name_len) { + pr_info("%s param is null.\n", __func__); + return HCLGE_ERR_CSQ_ERROR; + } + + *val = NULL; + + for (i = 0; i < ARRAY_SIZE(hns3_ring_stats_name); i++) { + if (!strcmp(val_name, hns3_ring_stats_name[i].stats_name)) { + stats_name_id = hns3_ring_stats_name[i].stats_namd_id; + break; + } + } + switch (stats_name_id) { + case SW_ERR_CNT: + *val = &r_stats->sw_err_cnt; + break; + case SEG_PKT_CNT: + *val = &r_stats->seg_pkt_cnt; + break; + case TX_PKTS: + *val = &r_stats->tx_pkts; + break; + case TX_BYTES: + *val = &r_stats->tx_bytes; + break; + case TX_MORE: + *val = &r_stats->tx_more; + break; + case RESTART_QUEUE: + *val = &r_stats->restart_queue; + break; + case TX_BUSY: + *val = &r_stats->tx_busy; + break; + case RX_PKTS: + *val = &r_stats->rx_pkts; + break; + case RX_BYTES: + *val = &r_stats->rx_bytes; + break; + case RX_ERR_CNT: + *val = &r_stats->rx_err_cnt; + break; + case REUSE_PG_CNT: + *val = &r_stats->reuse_pg_cnt; + break; + case ERR_PKT_LEN: + *val = &r_stats->err_pkt_len; + break; + case ERR_BD_NUM: + *val = &r_stats->err_bd_num; + break; + case L2_ERR: + *val = &r_stats->l2_err; + break; + case L3L4_CSUM_ERR: + *val = &r_stats->l3l4_csum_err; + break; + case RX_MULTICAST: + *val = &r_stats->rx_multicast; + break; + default: + pr_info("val name [%s] is not existed.\n", val_name); + return HCLGE_ERR_CSQ_ERROR; + } + + return HCLGE_STATUS_SUCCESS; +} + +static int hns3_read_stat_mode_cfg(const struct hns3_nic_priv *nic_dev, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size) +{ + struct stat_sw_mode_param *stat_sw_param = NULL; + struct hnae3_knic_private_info *kinfo = NULL; + u64 *ret_data = (u64 *)buf_out; + struct hnae3_handle *handle = NULL; + struct hclge_vport *vport = NULL; + struct hclge_dev *hdev = NULL; + u64 *val = NULL; + u32 ring_idx; + int ret; + + handle = nic_dev->ae_handle; + vport = hns3_cae_get_vport(handle); + hdev = vport->back; + kinfo = &handle->kinfo; + stat_sw_param = (struct stat_sw_mode_param *)buf_in; + if (!buf_out || out_size < sizeof(u64)) { + dev_err(&hdev->pdev->dev, "Get stat buf out is null.\n"); + return HCLGE_ERR_CSQ_ERROR; + } + + ring_idx = stat_sw_param->ring_idx; + if (ring_idx >= kinfo->num_tqps) { + dev_err(&hdev->pdev->dev, + "Get stat ring_idx[%d] >= num_tqps[%d].\n", ring_idx, + kinfo->num_tqps); + return HCLGE_ERR_CSQ_ERROR; + } + + if (stat_sw_param->is_rx) + ring_idx += kinfo->num_tqps; + + ret = hns3_get_stat_val(&nic_dev->ring[ring_idx].stats, + stat_sw_param->val_name, + MAX_STAT_NAME_LEN, &val); + if (ret || !val) { + pr_info("get stat val name [%s] error.\n", + stat_sw_param->val_name); + return HCLGE_ERR_CSQ_ERROR; + } + + *ret_data = le64_to_cpu(*val); + + return HCLGE_STATUS_SUCCESS; +} + +static int hns3_set_stat_mode_cfg(const struct hns3_nic_priv *nic_dev, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size) +{ + struct stat_sw_mode_param *stat_sw_param = NULL; + struct hnae3_knic_private_info *kinfo = NULL; + struct hnae3_handle *handle = NULL; + struct hclge_vport *vport = NULL; + struct hclge_dev *hdev = NULL; + u64 *val = NULL; + u32 ring_idx; + int ret; + + handle = nic_dev->ae_handle; + vport = hns3_cae_get_vport(handle); + hdev = vport->back; + kinfo = &handle->kinfo; + stat_sw_param = (struct stat_sw_mode_param *)buf_in; + ring_idx = stat_sw_param->ring_idx; + if (ring_idx >= kinfo->num_tqps) { + dev_err(&hdev->pdev->dev, + "Set stat ring_idx[%d] >= num_tqps[%d].\n", ring_idx, + kinfo->num_tqps); + return HCLGE_ERR_CSQ_ERROR; + } + + if (stat_sw_param->is_rx) + ring_idx += kinfo->num_tqps; + + ret = hns3_get_stat_val(&nic_dev->ring[ring_idx].stats, + stat_sw_param->val_name, + MAX_STAT_NAME_LEN, &val); + if (ret || !val) { + pr_info("Set stat val name [%s] error.\n", + stat_sw_param->val_name); + return HCLGE_ERR_CSQ_ERROR; + } + + *val = cpu_to_le64(stat_sw_param->data); + + return HCLGE_STATUS_SUCCESS; +} + +int hns3_stat_mode_cfg(const struct hns3_nic_priv *nic_dev, + void *buf_in, u32 in_size, void *buf_out, + u32 out_size) +{ + struct stat_sw_mode_param *mode_param = NULL; + bool check = !buf_in || in_size < sizeof(struct stat_sw_mode_param); + int ret; + + if (check) { + pr_err("input param buf_in error in %s function\n", __func__); + return -EFAULT; + } + + mode_param = (struct stat_sw_mode_param *)buf_in; + if (mode_param->is_read == 1) + ret = hns3_read_stat_mode_cfg(nic_dev, buf_in, in_size, buf_out, + out_size); + else + ret = hns3_set_stat_mode_cfg(nic_dev, buf_in, in_size, buf_out, + out_size); + + return ret; +} + +int hns3_cae_clean_stats(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size) +{ + struct net_device *netdev = net_priv->netdev; + struct hnae3_knic_private_info *kinfo = NULL; + struct hnae3_handle *handle = NULL; + struct hns3_enet_ring *ring = NULL; + struct hclge_vport *vport = NULL; + struct hclge_dev *hdev = NULL; + struct hclge_tqp *tqp = NULL; + int i; + + handle = net_priv->ae_handle; + kinfo = &handle->kinfo; + vport = container_of(handle, struct hclge_vport, nic); + hdev = vport->back; + + for (i = 0; i < kinfo->num_tqps; i++) { + tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); + memset(&tqp->tqp_stats, 0, sizeof(struct hlcge_tqp_stats)); + + ring = &net_priv->ring[i]; + memset(&ring->stats, 0, sizeof(struct ring_stats)); + ring = &net_priv->ring[i + kinfo->num_tqps]; + memset(&ring->stats, 0, sizeof(struct ring_stats)); + } + memset(&hdev->mac_stats, 0, sizeof(struct hclge_mac_stats)); + memset(&netdev->stats, 0, sizeof(struct net_device_stats)); + + return 0; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_stat.h b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_stat.h new file mode 100644 index 0000000000000000000000000000000000000000..47c3faf44ebb711dfc6d01f5d5ac121cfc133a3c --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_stat.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2019 Hisilicon Limited. */ + +#ifndef __HNS3_CAE_STAT_H__ +#define __HNS3_CAE_STAT_H__ + +#include "hclge_main.h" +#include "hclge_cmd.h" +#include "hns3_enet.h" + +#define MAX_STAT_NAME_LEN 24 + +struct stat_sw_mode_param { + u64 data; + u32 ring_idx; + u8 val_name[MAX_STAT_NAME_LEN]; + u8 is_read; + u8 is_rx; +}; + +enum stats_name_type { + IO_ERR_CNT = 1, + SW_ERR_CNT, + SEG_PKT_CNT, + TX_PKTS, + TX_BYTES, + TX_MORE, + RESTART_QUEUE, + TX_BUSY, + RX_PKTS, + RX_BYTES, + RX_ERR_CNT, + REUSE_PG_CNT, + ERR_PKT_LEN, + ERR_BD_NUM, + L2_ERR, + L3L4_CSUM_ERR, + RX_MULTICAST, +}; + +struct ring_stats_name { + u8 stats_name[MAX_STAT_NAME_LEN]; + u32 stats_namd_id; +}; + +int hns3_stat_mode_cfg(const struct hns3_nic_priv *nic_dev, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size); +int hns3_cae_clean_stats(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_tm.c new file mode 100644 index 0000000000000000000000000000000000000000..37c9e76dc1ecfa9b8cf9c662fb4250d756de80f2 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_tm.c @@ -0,0 +1,1059 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include +#include +#include +#include +#include + +#include "hnae3.h" +#include "hclge_main.h" +#include "hclge_tm.h" +#include "hclge_cmd.h" +#include "hns3_cae_cmd.h" +#include "hns3_cae_tm.h" + +static int hns3_cae_tm_schd_mode_set(struct hclge_dev *hdev, + enum hclge_opcode_type opcode, + u8 mode, u16 id) +{ + struct hclge_desc desc; + + if (mode > HCLGE_SCH_MODE_DWRR) + return -EINVAL; + hns3_cae_cmd_setup_basic_desc(&desc, opcode, false); + if (mode == HCLGE_SCH_MODE_DWRR) + desc.data[1] = 1; + else + desc.data[1] = 0; + + desc.data[0] = cpu_to_le32(id); + + return hns3_cae_cmd_send(hdev, &desc, 1); +} + +static int hns3_cae_tm_schd_mode_get(struct hclge_dev *hdev, + enum hclge_opcode_type opcode, + u8 *mode, u16 id) +{ + struct hclge_desc desc; + int ret; + + hns3_cae_cmd_setup_basic_desc(&desc, opcode, true); + desc.data[0] = cpu_to_le32(id); + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (!ret) + *mode = desc.data[1]; + + return ret; +} + +static int hns3_cae_tm_q_to_qs_set(struct hclge_dev *hdev, u16 q_id, u16 qs_id) +{ + struct hclge_nq_to_qs_link_cmd *map = NULL; + struct hclge_desc desc; + + hns3_cae_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false); + map = (struct hclge_nq_to_qs_link_cmd *)desc.data; + map->nq_id = cpu_to_le16(q_id); + map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK); + + return hns3_cae_cmd_send(hdev, &desc, 1); +} + +static int hns3_cae_tm_q_to_qs_get(struct hclge_dev *hdev, u16 q_id, u16 *qs_id) +{ + struct hclge_nq_to_qs_link_cmd *map = NULL; + struct hclge_desc desc; + int ret; + + hns3_cae_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, true); + map = (struct hclge_nq_to_qs_link_cmd *)desc.data; + map->nq_id = cpu_to_le16(q_id); + map->qset_id = cpu_to_le16(*qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK); + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (!ret) + *qs_id = map->qset_id & HNS3_CAE_QS_ID_MSK; + + return ret; +} + +static int hns3_cae_tm_qs_to_pri_set(struct hclge_dev *hdev, u16 qs_id, u8 pri) +{ + struct hclge_qs_to_pri_link_cmd *map = NULL; + struct hclge_desc desc; + + hns3_cae_cmd_setup_basic_desc(&desc, + HCLGE_OPC_TM_QS_TO_PRI_LINK, false); + map = (struct hclge_qs_to_pri_link_cmd *)desc.data; + map->qs_id = cpu_to_le16(qs_id); + map->priority = pri; + map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK; + + return hns3_cae_cmd_send(hdev, &desc, 1); +} + +static int hns3_cae_tm_qs_to_pri_get(struct hclge_dev *hdev, u16 qs_id, u8 *pri) +{ + struct hclge_qs_to_pri_link_cmd *map = NULL; + struct hclge_desc desc; + int ret; + + hns3_cae_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, true); + map = (struct hclge_qs_to_pri_link_cmd *)desc.data; + map->qs_id = cpu_to_le16(qs_id); + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (!ret) + *pri = map->priority; + + return ret; +} + +static int hns3_cae_tm_qs_weight_set(struct hclge_dev *hdev, u16 qs_id, u8 dwrr) +{ + struct hclge_qs_weight_cmd *weight = NULL; + struct hclge_desc desc; + + hns3_cae_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false); + weight = (struct hclge_qs_weight_cmd *)desc.data; + weight->qs_id = cpu_to_le16(qs_id); + weight->dwrr = dwrr; + + return hns3_cae_cmd_send(hdev, &desc, 1); +} + +static int hns3_cae_tm_qs_weight_get(struct hclge_dev *hdev, + u16 qs_id, u8 *dwrr) +{ + struct hclge_qs_weight_cmd *weight = NULL; + struct hclge_desc desc; + int ret; + + hns3_cae_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, true); + weight = (struct hclge_qs_weight_cmd *)desc.data; + weight->qs_id = cpu_to_le16(qs_id); + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (!ret) + *dwrr = weight->dwrr; + + return ret; +} + +static int hns3_cae_tm_pri_weight_set(struct hclge_dev *hdev, + u8 pri_id, u8 dwrr) +{ + struct hclge_priority_weight_cmd *weight = NULL; + struct hclge_desc desc; + + hns3_cae_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false); + weight = (struct hclge_priority_weight_cmd *)desc.data; + weight->pri_id = pri_id; + weight->dwrr = dwrr; + + return hns3_cae_cmd_send(hdev, &desc, 1); +} + +static int hns3_cae_tm_pri_weight_get(struct hclge_dev *hdev, + u8 pri_id, u8 *dwrr) +{ + struct hclge_priority_weight_cmd *weight = NULL; + struct hclge_desc desc; + int ret; + + hns3_cae_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, true); + weight = (struct hclge_priority_weight_cmd *)desc.data; + weight->pri_id = pri_id; + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (!ret) + *dwrr = weight->dwrr; + + return ret; +} + +static int hns3_cae_tm_pri_pg_bitmap_set(struct hclge_dev *hdev, + u8 pg_id, u8 bitmap) +{ + struct hclge_pg_to_pri_link_cmd *map = NULL; + struct hclge_desc desc; + + hns3_cae_cmd_setup_basic_desc(&desc, + HCLGE_OPC_TM_PG_TO_PRI_LINK, false); + map = (struct hclge_pg_to_pri_link_cmd *)desc.data; + map->pg_id = cpu_to_le16(pg_id); + map->pri_bit_map = bitmap; + + return hns3_cae_cmd_send(hdev, &desc, 1); +} + +static int hns3_cae_tm_pri_pg_bitmap_get(struct hclge_dev *hdev, u8 pg_id, + u8 *bitmap) +{ + struct hclge_pg_to_pri_link_cmd *map = NULL; + struct hclge_desc desc; + int ret; + + hns3_cae_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, true); + map = (struct hclge_pg_to_pri_link_cmd *)desc.data; + map->pg_id = cpu_to_le16(pg_id); + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) + return ret; + + *bitmap = map->pri_bit_map; + + return 0; +} + +static int hns3_cae_tm_qs_bp_bitmap_set(struct hclge_dev *hdev, u8 tc, u8 gp_id, + u32 map) +{ + struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd = NULL; + struct hclge_desc desc; + + hns3_cae_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING, + false); + bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; + bp_to_qs_map_cmd->tc_id = tc; + bp_to_qs_map_cmd->qs_group_id = gp_id; + /* Qset and tc is one by one mapping */ + bp_to_qs_map_cmd->qs_bit_map = map; + + return hns3_cae_cmd_send(hdev, &desc, 1); +} + +static int hns3_cae_tm_qs_bp_bitmap_get(struct hclge_dev *hdev, u8 tc, u8 gp_id, + u32 *map) +{ + struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd = NULL; + struct hclge_desc desc; + int ret; + + hns3_cae_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING, + true); + bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; + bp_to_qs_map_cmd->tc_id = tc; + bp_to_qs_map_cmd->qs_group_id = gp_id; + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) + return ret; + + *map = bp_to_qs_map_cmd->qs_bit_map; + return 0; +} + +static int hns3_cae_tm_pri_shapping_set(struct hclge_dev *hdev, + enum hclge_shap_bucket bucket, + u8 pri_id, u32 shaper) +{ + struct hclge_pri_shapping_cmd *shap_cfg_cmd = NULL; + enum hclge_opcode_type opcode; + struct hclge_desc desc; + + opcode = bucket == HCLGE_TM_SHAP_P_BUCKET ? + HCLGE_OPC_TM_PRI_P_SHAPPING : HCLGE_OPC_TM_PRI_C_SHAPPING; + hns3_cae_cmd_setup_basic_desc(&desc, opcode, false); + shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; + shap_cfg_cmd->pri_id = pri_id; + shap_cfg_cmd->pri_shapping_para = shaper; + + return hns3_cae_cmd_send(hdev, &desc, 1); +} + +static int hns3_cae_tm_pri_shapping_get(struct hclge_dev *hdev, + enum hclge_shap_bucket bucket, + u8 pri_id, u32 *shaper) +{ + struct hclge_pri_shapping_cmd *shap_cfg_cmd = NULL; + enum hclge_opcode_type opcode; + struct hclge_desc desc; + int ret; + + opcode = bucket == HCLGE_TM_SHAP_P_BUCKET ? + HCLGE_OPC_TM_PRI_P_SHAPPING : HCLGE_OPC_TM_PRI_C_SHAPPING; + hns3_cae_cmd_setup_basic_desc(&desc, opcode, true); + shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; + shap_cfg_cmd->pri_id = pri_id; + ret = hns3_cae_cmd_send(hdev, &desc, 1); + *shaper = shap_cfg_cmd->pri_shapping_para; + + return ret; +} + +static int hns3_cae_tm_pg_weight_set(struct hclge_dev *hdev, u8 pg_id, u8 dwrr) +{ + struct hclge_pg_weight_cmd *weight = NULL; + struct hclge_desc desc; + + hns3_cae_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false); + weight = (struct hclge_pg_weight_cmd *)desc.data; + weight->pg_id = pg_id; + weight->dwrr = dwrr; + + return hns3_cae_cmd_send(hdev, &desc, 1); +} + +static int hns3_cae_tm_pg_weight_get(struct hclge_dev *hdev, u8 pg_id, u8 *dwrr) +{ + struct hclge_pg_weight_cmd *weight = NULL; + struct hclge_desc desc; + int ret; + + hns3_cae_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, true); + weight = (struct hclge_pg_weight_cmd *)desc.data; + weight->pg_id = pg_id; + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (!ret) + *dwrr = weight->dwrr; + + return ret; +} + +static int hns3_cae_tm_pg_shapping_set(struct hclge_dev *hdev, + enum hclge_shap_bucket bucket, + u8 pg_id, u32 shaper) +{ + struct hclge_pg_shapping_cmd *shap_cfg_cmd = NULL; + enum hclge_opcode_type opcode; + struct hclge_desc desc; + + opcode = bucket == HCLGE_TM_SHAP_P_BUCKET ? HCLGE_OPC_TM_PG_P_SHAPPING : + HCLGE_OPC_TM_PG_C_SHAPPING; + hns3_cae_cmd_setup_basic_desc(&desc, opcode, false); + shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; + shap_cfg_cmd->pg_id = pg_id; + shap_cfg_cmd->pg_shapping_para = shaper; + + return hns3_cae_cmd_send(hdev, &desc, 1); +} + +static int hns3_cae_tm_pg_shapping_get(struct hclge_dev *hdev, + enum hclge_shap_bucket bucket, + u8 pg_id, u32 *shaper) +{ + struct hclge_pg_shapping_cmd *shap_cfg_cmd = NULL; + enum hclge_opcode_type opcode; + struct hclge_desc desc; + int ret; + + opcode = bucket == HCLGE_TM_SHAP_P_BUCKET ? HCLGE_OPC_TM_PG_P_SHAPPING : + HCLGE_OPC_TM_PG_C_SHAPPING; + hns3_cae_cmd_setup_basic_desc(&desc, opcode, true); + shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; + shap_cfg_cmd->pg_id = pg_id; + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (!ret) + *shaper = shap_cfg_cmd->pg_shapping_para; + + return ret; +} + +static int hns3_cae_tm_port_shapping_set(struct hclge_dev *hdev, u32 shaper) +{ + struct hclge_port_shapping_cmd *shap_cfg_cmd = NULL; + enum hclge_opcode_type opcode; + struct hclge_desc desc; + + opcode = HCLGE_OPC_TM_PORT_SHAPPING; + hns3_cae_cmd_setup_basic_desc(&desc, opcode, false); + shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; + shap_cfg_cmd->port_shapping_para = shaper; + + return hns3_cae_cmd_send(hdev, &desc, 1); +} + +static int hns3_cae_tm_port_shapping_get(struct hclge_dev *hdev, u32 *shaper) +{ + struct hclge_port_shapping_cmd *shap_cfg_cmd = NULL; + enum hclge_opcode_type opcode; + struct hclge_desc desc; + int ret; + + opcode = HCLGE_OPC_TM_PORT_SHAPPING; + hns3_cae_cmd_setup_basic_desc(&desc, opcode, true); + shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (!ret) + *shaper = shap_cfg_cmd->port_shapping_para; + + return ret; +} + +static int hns3_cae_tm_ets_tc_dwrr_set(struct hclge_dev *hdev, u8 *weight, + u32 weight_cnt) +{ +#define DEFAULT_TC_WEIGHT 1 +#define DEFAULT_TC_OFFSET 14 + struct hns3_cae_ets_tc_weight_cmd *ets_weight = NULL; + struct hclge_desc desc; + u32 i; + + hns3_cae_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false); + ets_weight = (struct hns3_cae_ets_tc_weight_cmd *)desc.data; + + for (i = 0; i < weight_cnt; i++) + ets_weight->tc_weight[i] = weight[i]; + + ets_weight->weight_offset = DEFAULT_TC_OFFSET; + + return hns3_cae_cmd_send(hdev, &desc, 1); +} + +static int hns3_cae_tm_ets_tc_dwrr_get(struct hclge_dev *hdev, u8 *weight, + u32 weight_cnt) +{ + struct hns3_cae_ets_tc_weight_cmd *ets_weight = NULL; + struct hclge_desc desc; + int ret; + u32 i; + + hns3_cae_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true); + ets_weight = (struct hns3_cae_ets_tc_weight_cmd *)desc.data; + + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (!ret) { + for (i = 0; i < weight_cnt; i++) + weight[i] = ets_weight->tc_weight[i]; + } + + return ret; +} + +static int hns3_cae_tm_operate_nic_regs(struct hclge_dev *hdev, + u64 addr, u64 *value, u8 is_read) +{ +#define HNS3_WRITE_READ_REG_CMD 0x7014 + struct hclge_desc desc; + int ret; + + if (is_read) { + hns3_cae_cmd_setup_basic_desc(&desc, HNS3_WRITE_READ_REG_CMD, + true); + desc.data[0] = (u32)(addr & 0xffffffff); + desc.data[1] = (u32)(addr >> 32); + desc.data[4] = 32; + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "read addr 0x%llx failed! ret = %d.\n", + addr, ret); + return ret; + } + *value = (u64)desc.data[2] | ((u64)desc.data[3] << 32); + } else { + hns3_cae_cmd_setup_basic_desc(&desc, HNS3_WRITE_READ_REG_CMD, + false); + desc.data[0] = (u32)(addr & 0xffffffff); + desc.data[1] = (u32)(addr >> 32); + desc.data[2] = (u32)(*value & 0xffffffff); + desc.data[3] = (u32)(*value >> 32); + desc.data[4] = 32; + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "write addr 0x%llx value 0x%llx failed! ret = %d.\n", + addr, *value, ret); + return ret; + } + } + + return 0; +} + +int hns3_cae_queue_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size) +{ + struct hns3_cae_queue_cfg_info *out_info = + (struct hns3_cae_queue_cfg_info *)buf_out; + struct hns3_cae_queue_cfg_info *in_info = + (struct hns3_cae_queue_cfg_info *)buf_in; + bool check = !buf_in || + in_size < sizeof(struct hns3_cae_queue_cfg_info); + struct hclge_vport *vport = hns3_cae_get_vport(net_priv->ae_handle); + struct hclge_dev *hdev = vport->back; + int is_read; + + if (check) { + pr_err("input param buf_in error in %s function\n", __func__); + return -EFAULT; + } + + if (in_info->queue_id >= MAX_QUEUE_ID) + return -EINVAL; + is_read = in_info->is_read; + if (is_read) { + check = !buf_out || + out_size < sizeof(struct hns3_cae_queue_cfg_info); + if (check) { + pr_err("input param buf_out error in %s function\n", + __func__); + return -EFAULT; + } + out_info->queue_id = in_info->queue_id; + if (hns3_cae_tm_q_to_qs_get(hdev, in_info->queue_id, + &out_info->qs)) { + pr_err("%s,%d:get queue(%u) to qs failed!\n", __func__, + __LINE__, in_info->queue_id); + return -1; + } + } else { + if (in_info->qs >= MAX_QSET_ID) + return -EINVAL; + if (hns3_cae_tm_q_to_qs_set(hdev, in_info->queue_id, + in_info->qs)) { + pr_err("%s,%d:set queue(%u) to qs(%u) failed!\n", + __func__, __LINE__, + in_info->queue_id, in_info->qs); + return -1; + } + } + + return 0; +} + +static int hns3_cae_qs_set_new_map(int tc, u32 map, + struct hclge_dev *hdev, + struct hns3_cae_qs_cfg_info *in_info) +{ + u32 bp_map = map; + u16 offset; + u16 qs_id; + int gp_id; + + qs_id = in_info->qs_id; + gp_id = qs_id / 32; + offset = qs_id % 32; + + if (tc < MAX_TC_NUM) { + /* clear old bit */ + bp_map &= ~BIT(offset); + if (hns3_cae_tm_qs_bp_bitmap_set(hdev, tc, gp_id, bp_map)) { + pr_err("%s,%d:set qs(%u) bp map failed!\n", __func__, + __LINE__, qs_id); + return -1; + } + /* set new bit */ + if (hns3_cae_tm_qs_bp_bitmap_get + (hdev, in_info->tc, gp_id, &bp_map)) { + pr_err("%s,%d:get qs(%u) bp map failed!\n", __func__, + __LINE__, qs_id); + return -1; + } + + bp_map |= BIT(offset); + if (hns3_cae_tm_qs_bp_bitmap_set + (hdev, in_info->tc, gp_id, bp_map)) { + pr_err("%s,%d:set qs(%u) bp map failed!\n", __func__, + __LINE__, qs_id); + return -1; + } + } + + return 0; +} + +int hns3_cae_qs_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, void *buf_out, + u32 out_size) +{ + struct hns3_cae_qs_cfg_info *out_info = + (struct hns3_cae_qs_cfg_info *)buf_out; + struct hns3_cae_qs_cfg_info *in_info = + (struct hns3_cae_qs_cfg_info *)buf_in; + bool check = !buf_in || in_size < sizeof(struct hns3_cae_qs_cfg_info); + struct hclge_vport *vport = hns3_cae_get_vport(net_priv->ae_handle); + struct hclge_dev *hdev = vport->back; + int is_read; + u16 offset; + u32 bp_map; + u16 qs_id; + int gp_id; + int ret; + int tc; + + if (check) { + pr_err("input param buf_in error in %s function\n", __func__); + return -EFAULT; + } + + is_read = in_info->is_read; + qs_id = in_info->qs_id; + if (qs_id >= MAX_QSET_ID) + return -EINVAL; + gp_id = qs_id / 32; + offset = qs_id % 32; + + for (tc = 0; tc < MAX_TC_NUM; tc++) { + ret = hns3_cae_tm_qs_bp_bitmap_get(hdev, tc, gp_id, &bp_map); + if (ret) { + pr_err("%s,%d:get qs(%u) bp map failed! ret = %d\n", + __func__, __LINE__, qs_id, ret); + return -1; + } + if (bp_map & BIT(offset)) + break; + } + + if (is_read) { + check = !buf_out || + out_size < sizeof(struct hns3_cae_qs_cfg_info); + if (check) { + pr_err("input param buf_out error in %s function\n", + __func__); + return -EFAULT; + } + out_info->qs_id = qs_id; + out_info->tc = tc; + ret = hns3_cae_tm_qs_to_pri_get(hdev, qs_id, &out_info->pri); + if (ret) { + pr_err("%s,%d:get qs(%u) to pri failed! ret = %d\n", + __func__, __LINE__, qs_id, ret); + return -1; + } + + ret = hns3_cae_tm_schd_mode_get + (hdev, HCLGE_OPC_TM_QS_SCH_MODE_CFG, &out_info->mode, + qs_id); + if (ret) { + pr_err("%s,%d:get qs(%u) mode failed! ret = %d\n", + __func__, __LINE__, qs_id, ret); + return -1; + } + + ret = hns3_cae_tm_qs_weight_get(hdev, qs_id, + &out_info->weight); + if (ret) { + pr_err("%s,%d:get qs(%u) weight failed! ret = %d\n", + __func__, __LINE__, qs_id, ret); + return -1; + } + } else { + if ((in_info->flag & HNS3_TM_QSET_MAPPING_FLAG) && + hns3_cae_tm_qs_to_pri_set(hdev, qs_id, in_info->pri)) { + pr_err("%s,%d:set qs(%u) to pri(%u) failed!\n", + __func__, __LINE__, qs_id, in_info->pri); + return -1; + } + + if ((in_info->flag & HNS3_TM_QSET_MODE_CFG_FLAG) && + hns3_cae_tm_schd_mode_set(hdev, + HCLGE_OPC_TM_QS_SCH_MODE_CFG, + in_info->mode, qs_id)) { + pr_err("%s,%d:set qs(%u) mode(%u) failed!\n", __func__, + __LINE__, qs_id, in_info->mode); + return -1; + } + + if ((in_info->flag & HNS3_TM_QSET_WEIGHT_CFG_FLAG) && + hns3_cae_tm_qs_weight_set(hdev, qs_id, in_info->weight)) { + pr_err("%s,%d:set qs(%u) weight(%u) failed!\n", + __func__, __LINE__, qs_id, in_info->weight); + return -1; + } + + if ((in_info->flag & HNS3_TM_QSET_BP_CFG_FLAG) && + hns3_cae_qs_set_new_map(tc, bp_map, hdev, in_info)) { + pr_err("%s,%d:set qset %u bp cfg to tc %u failed!\n", + __func__, __LINE__, qs_id, in_info->tc); + return -1; + } + } + + return 0; +} + +static int hns3_cae_pri_pg_set_map(struct hclge_dev *hdev, + int cur_pg, u8 map, + struct hns3_cae_pri_cfg_info *in_info) +{ + u8 bitmap = map; + u16 pri_id = in_info->pri_id; + + if (pri_id >= MAX_TC_NUM) + return -EINVAL; + if (in_info->pg >= MAX_PG_NUM) + return -EINVAL; + /* clear old map */ + if (in_info->pg != cur_pg) { + bitmap &= ~BIT(pri_id); + if (hns3_cae_tm_pri_pg_bitmap_set(hdev, cur_pg, bitmap)) { + pr_err("%s,%d:set pg(%u) pri_map failed!\n", __func__, + __LINE__, cur_pg); + return -1; + } + + bitmap = 0; + if (hns3_cae_tm_pri_pg_bitmap_get(hdev, in_info->pg, + &bitmap)) { + pr_err("%s,%d:get pg(%u) pri_map failed!\n", __func__, + __LINE__, in_info->pg); + return -1; + } + } + + /* set new map */ + bitmap |= BIT(pri_id); + if (hns3_cae_tm_pri_pg_bitmap_set(hdev, in_info->pg, bitmap)) { + pr_err("%s,%d:set pg(%u) pri_map failed!\n", __func__, __LINE__, + in_info->pg); + return -1; + } + + return 0; +} + +int hns3_cae_pri_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, void *buf_out, + u32 out_size) +{ + struct hns3_cae_pri_cfg_info *out_info = + (struct hns3_cae_pri_cfg_info *)buf_out; + struct hns3_cae_pri_cfg_info *in_info = + (struct hns3_cae_pri_cfg_info *)buf_in; + bool check = !buf_in || in_size < sizeof(struct hns3_cae_pri_cfg_info); + struct hclge_vport *vport = hns3_cae_get_vport(net_priv->ae_handle); + struct hclge_dev *hdev = vport->back; + int is_read; + u16 pri_id; + int cur_pg; + u8 bitmap; + + if (check) { + pr_err("input param buf_in error in %s function\n", __func__); + return -EFAULT; + } + + is_read = in_info->is_read; + pri_id = in_info->pri_id; + if (pri_id >= MAX_TC_NUM) + return -EINVAL; + + for (cur_pg = 0; cur_pg < MAX_PG_NUM; cur_pg++) { + bitmap = 0; + if (hns3_cae_tm_pri_pg_bitmap_get(hdev, cur_pg, &bitmap)) { + pr_err("%s,%d:get pg(%u) pri_map failed!\n", __func__, + __LINE__, cur_pg); + return -1; + } + + if (bitmap & BIT(pri_id)) + break; + } + + if (cur_pg == MAX_PG_NUM) { + pr_err("%s,%d:find pri(%u) to pg failed!\n", __func__, __LINE__, + pri_id); + return -1; + } + + if (is_read) { + check = !buf_out || + out_size < sizeof(struct hns3_cae_pri_cfg_info); + if (check) { + pr_err("input param buf_out error in %s function\n", + __func__); + return -EFAULT; + } + out_info->pri_id = pri_id; + out_info->pg = cur_pg; + if (hns3_cae_tm_pri_shapping_get(hdev, HCLGE_TM_SHAP_C_BUCKET, + pri_id, + &out_info->c_shaping)) { + pr_err("%s,%d:get pri(%u) c shaping failed!\n", + __func__, __LINE__, pri_id); + return -1; + } + + if (hns3_cae_tm_pri_shapping_get(hdev, HCLGE_TM_SHAP_P_BUCKET, + pri_id, + &out_info->p_shaping)) { + pr_err("%s,%d:get pri(%u) p shaping failed!\n", + __func__, __LINE__, pri_id); + return -1; + } + + if (hns3_cae_tm_schd_mode_get + (hdev, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, &out_info->mode, + pri_id)) { + pr_err("%s,%d:get pri(%u) mode failed!\n", __func__, + __LINE__, pri_id); + return -1; + } + + if (hns3_cae_tm_pri_weight_get + (hdev, pri_id, &out_info->weight)) { + pr_err("%s,%d:set pri(%u) weight failed!\n", __func__, + __LINE__, pri_id); + return -1; + } + } else { + if ((in_info->flag & HNS3_TM_PRI_MAPPING_FLAG) && + hns3_cae_pri_pg_set_map(hdev, cur_pg, bitmap, in_info)) { + pr_err("%s,%d:set pri(%u) mapping to pg(%u) failed!\n", + __func__, __LINE__, pri_id, in_info->pg); + return -1; + } + + if ((in_info->flag & HNS3_TM_PRI_CSHAP_CFG_FLAG) && + hns3_cae_tm_pri_shapping_set(hdev, HCLGE_TM_SHAP_C_BUCKET, + pri_id, in_info->c_shaping)) { + pr_err("%s,%d:set pri(%u) c shaping(%u)) failed!\n", + __func__, __LINE__, pri_id, in_info->c_shaping); + return -1; + } + + if ((in_info->flag & HNS3_TM_PRI_PSHAP_CFG_FLAG) && + hns3_cae_tm_pri_shapping_set(hdev, HCLGE_TM_SHAP_P_BUCKET, + pri_id, in_info->p_shaping)) { + pr_err("%s,%d:set pri(%u) p shaping(%u) failed!\n", + __func__, __LINE__, pri_id, in_info->p_shaping); + return -1; + } + + if ((in_info->flag & HNS3_TM_PRI_MODE_CFG_FLAG) && + hns3_cae_tm_schd_mode_set(hdev, + HCLGE_OPC_TM_PRI_SCH_MODE_CFG, + in_info->mode, pri_id)) { + pr_err("%s,%d:set pri(%u) mode(%u) failed!\n", __func__, + __LINE__, pri_id, in_info->mode); + return -1; + } + + if ((in_info->flag & HNS3_TM_PRI_WEIGHT_CFG_FLAG) && + hns3_cae_tm_pri_weight_set(hdev, pri_id, + in_info->weight)) { + pr_err("%s,%d:set pri(%u) weight(%u) failed!\n", + __func__, __LINE__, pri_id, in_info->weight); + return -1; + } + } + + return 0; +} + +int hns3_cae_pg_cfg(const struct hns3_nic_priv *net_priv, void *buf_in, + u32 in_size, void *buf_out, u32 out_size) +{ + struct hns3_cae_pg_cfg_info *out_info = + (struct hns3_cae_pg_cfg_info *)buf_out; + struct hns3_cae_pg_cfg_info *in_info = + (struct hns3_cae_pg_cfg_info *)buf_in; + bool check = !buf_in || in_size < sizeof(struct hns3_cae_pg_cfg_info); + struct hclge_vport *vport = hns3_cae_get_vport(net_priv->ae_handle); + struct hclge_dev *hdev = vport->back; + int is_read; + u16 pg_id; + + if (check) { + pr_err("input param buf_in error in %s function\n", __func__); + return -EFAULT; + } + + is_read = in_info->is_read; + pg_id = in_info->pg_id; + if (pg_id >= MAX_PG_NUM) + return -EINVAL; + + if (is_read) { + check = !buf_out || + out_size < sizeof(struct hns3_cae_pg_cfg_info); + if (check) { + pr_err("input param buf_out error in %s function\n", + __func__); + return -EFAULT; + } + out_info->pg_id = pg_id; + if (hns3_cae_tm_pg_shapping_get(hdev, HCLGE_TM_SHAP_C_BUCKET, + pg_id, + &out_info->c_shaping)) { + pr_err("%s,%d:get pg(%d) c shaping failed!\n", __func__, + __LINE__, pg_id); + return -1; + } + + if (hns3_cae_tm_pg_shapping_get(hdev, HCLGE_TM_SHAP_P_BUCKET, + pg_id, + &out_info->p_shaping)) { + pr_err("%s,%d:get pg(%d) p shaping failed!\n", __func__, + __LINE__, pg_id); + return -1; + } + + if (hns3_cae_tm_schd_mode_get + (hdev, HCLGE_OPC_TM_PG_SCH_MODE_CFG, &out_info->mode, + pg_id)) { + pr_err("%s,%d:get pg(%d) mode failed!\n", __func__, + __LINE__, pg_id); + return -1; + } + + if (hns3_cae_tm_pg_weight_get(hdev, pg_id, + &out_info->weight)) { + pr_err("%s,%d:set pg(%d) weight failed!\n", __func__, + __LINE__, pg_id); + return -1; + } + + } else { + if ((in_info->flag & HNS3_TM_PG_CSHAP_CFG_FLAG) && + hns3_cae_tm_pg_shapping_set(hdev, HCLGE_TM_SHAP_C_BUCKET, + pg_id, in_info->c_shaping)) { + pr_err("%s,%d:set pg(%d) c shaping(%u) failed!\n", + __func__, __LINE__, pg_id, in_info->c_shaping); + return -1; + } + + if ((in_info->flag & HNS3_TM_PG_PSHAP_CFG_FLAG) && + hns3_cae_tm_pg_shapping_set(hdev, HCLGE_TM_SHAP_P_BUCKET, + pg_id, in_info->p_shaping)) { + pr_err("%s,%d:set pg(%d) p shaping(%u) failed!\n", + __func__, __LINE__, pg_id, in_info->p_shaping); + return -1; + } + + if ((in_info->flag & HNS3_TM_PG_MODE_CFG_FLAG) && + hns3_cae_tm_schd_mode_set(hdev, + HCLGE_OPC_TM_PG_SCH_MODE_CFG, + in_info->mode, pg_id)) { + pr_err("%s,%d:set pg(%d) mode(%d) failed!\n", __func__, + __LINE__, pg_id, in_info->mode); + return -1; + } + + if ((in_info->flag & HNS3_TM_PG_WEIGHT_CFG_FLAG) && + hns3_cae_tm_pg_weight_set(hdev, pg_id, in_info->weight)) { + pr_err("%s,%d:set pg(%d) weight(%d) failed!\n", + __func__, __LINE__, pg_id, in_info->weight); + return -1; + } + } + + return 0; +} + +int hns3_cae_port_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, void *buf_out, + u32 out_size) +{ + struct hns3_cae_port_cfg_info *out_info = + (struct hns3_cae_port_cfg_info *)buf_out; + struct hns3_cae_port_cfg_info *in_info = + (struct hns3_cae_port_cfg_info *)buf_in; + bool check = !buf_in || in_size < sizeof(struct hns3_cae_port_cfg_info); + struct hclge_vport *vport = hns3_cae_get_vport(net_priv->ae_handle); + struct hclge_dev *hdev = vport->back; + u16 port_id; + int is_read; + + if (check) { + pr_err("input parameter error in %s function\n", __func__); + return -EFAULT; + } + + is_read = in_info->is_read; + port_id = in_info->port_id; + + if (is_read) { + check = !buf_out || + out_size < sizeof(struct hns3_cae_port_cfg_info); + if (check) { + pr_err("input param buf_out error in %s function\n", + __func__); + return -EFAULT; + } + out_info->port_id = port_id; + if (hns3_cae_tm_port_shapping_get(hdev, &out_info->shaping)) { + pr_err("%s,%d:get port p shaping failed!\n", __func__, + __LINE__); + return -1; + } + } else { + if ((in_info->flag & HNS3_TM_PORT_PSHAP_CFG_FLAG) && + hns3_cae_tm_port_shapping_set(hdev, in_info->shaping)) { + pr_err("%s,%d:set port p shaping(%u) failed!\n", + __func__, __LINE__, in_info->shaping); + return -1; + } + } + + return 0; +} + +int hns3_cae_ets_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, void *buf_out, + u32 out_size) +{ +#define HNS3_TM_ETS_PORT_SHAPING 0x10820850 +#define HNS3_TM_MAC_ID_MASK 0x7 + struct hns3_cae_ets_cfg_info *out_info = + (struct hns3_cae_ets_cfg_info *)buf_out; + struct hns3_cae_ets_cfg_info *in_info = + (struct hns3_cae_ets_cfg_info *)buf_in; + bool check = !buf_in || + in_size < sizeof(struct hns3_cae_ets_cfg_info) || + !buf_out || + out_size < sizeof(struct hns3_cae_ets_cfg_info); + struct hclge_vport *vport = hns3_cae_get_vport(net_priv->ae_handle); + struct hclge_dev *hdev = vport->back; + u8 weight[MAX_TC_NUM]; + int is_read; + u16 tc_id; + u8 mac_id; + u64 value; + u64 addr; + + if (check) { + pr_err("input parameter error in %s function\n", __func__); + return -EFAULT; + } + + is_read = in_info->is_read; + tc_id = in_info->tc_id; + if (tc_id >= MAX_TC_NUM) { + pr_err("tc id(%d) is invalid in %s function\n", tc_id, __func__); + return -EFAULT; + } + mac_id = in_info->mac_id & HNS3_TM_MAC_ID_MASK; + if (mac_id & 0x1) { + pr_err("mac id(%d) is invalid in %s function\n", mac_id, __func__); + return -EFAULT; + } + + addr = (u64)HNS3_TM_ETS_PORT_SHAPING + ((u64)mac_id << 20); + out_info->tc_id = tc_id; + out_info->mac_id = mac_id; + if (hns3_cae_tm_ets_tc_dwrr_get(hdev, weight, MAX_TC_NUM)) { + pr_err("%s,%d:get ets tc dwrr failed!\n", __func__, + __LINE__); + return -1; + } + if (is_read) { + out_info->weight = weight[tc_id]; + if (hns3_cae_tm_operate_nic_regs(hdev, addr, &value, + is_read)) { + pr_err("%s,%d:get ets port shaper failed!\n", __func__, + __LINE__); + return -1; + } + out_info->shaping = (u32)value; + } else { + weight[tc_id] = in_info->weight; + if ((in_info->flag & HNS3_TM_ETS_TC_CFG_FLAG) && + hns3_cae_tm_ets_tc_dwrr_set(hdev, weight, MAX_TC_NUM)) { + pr_err("%s,%d:set ets tc dwrr failed!\n", __func__, + __LINE__); + return -1; + } + value = (u64)in_info->shaping; + if ((in_info->flag & HNS3_TM_ETS_PSHAP_CFG_FLAG) && + hns3_cae_tm_operate_nic_regs(hdev, addr, &value, + is_read)) { + pr_err("%s,%d:set ets port shaping(%u) failed!\n", + __func__, __LINE__, in_info->shaping); + return -1; + } + } + + return 0; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_tm.h new file mode 100644 index 0000000000000000000000000000000000000000..9f636e863557dee0b021058fe157d982d587f292 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_tm.h @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2019 Hisilicon Limited. */ + +#ifndef __HNS3_CAE_TM_H__ +#define __HNS3_CAE_TM_H__ + +#include "hns3_enet.h" + +#define MAX_QUEUE_NUM 16 +#define MAX_PG_NUM 4 +#define MAX_TC_NUM 8 +#define MAX_QUEUE_ID 1024 +#define MAX_QSET_ID 1024 +#define HNS3_CAE_QS_ID_MSK (BIT(10) - 1) +#define HCLGE_OPC_TM_PORT_SCH_MODE_CFG 0x0811 + +#define HNS3_TM_QSET_MAPPING_FLAG 0x01 +#define HNS3_TM_QSET_MODE_CFG_FLAG 0x02 +#define HNS3_TM_QSET_WEIGHT_CFG_FLAG 0x04 +#define HNS3_TM_QSET_BP_CFG_FLAG 0x08 + +#define HNS3_TM_PRI_MAPPING_FLAG 0x01 +#define HNS3_TM_PRI_MODE_CFG_FLAG 0x02 +#define HNS3_TM_PRI_WEIGHT_CFG_FLAG 0x04 +#define HNS3_TM_PRI_CSHAP_CFG_FLAG 0x08 +#define HNS3_TM_PRI_PSHAP_CFG_FLAG 0x10 + +#define HNS3_TM_PG_MODE_CFG_FLAG 0x01 +#define HNS3_TM_PG_WEIGHT_CFG_FLAG 0x02 +#define HNS3_TM_PG_CSHAP_CFG_FLAG 0x04 +#define HNS3_TM_PG_PSHAP_CFG_FLAG 0x08 + +#define HNS3_TM_PORT_MODE_CFG_FLAG 0x01 +#define HNS3_TM_PORT_WEIGHT_CFG_FLAG 0x02 +#define HNS3_TM_PORT_PSHAP_CFG_FLAG 0x04 + +#define HNS3_TM_ETS_PSHAP_CFG_FLAG 0x01 +#define HNS3_TM_ETS_TC_CFG_FLAG 0x02 + +struct hns3_cae_ets_tc_weight_cmd { + u8 tc_weight[MAX_TC_NUM]; + u8 weight_offset; + u8 rsvd[15]; +}; + +struct hns3_cae_queue_cfg_info { + int is_read; + u16 queue_id; + u16 qs; +}; + +struct hns3_cae_qs_cfg_info { + int is_read; + u16 qs_id; + u8 pri; + u8 mode; + u8 weight; + u8 tc; + u8 flag; +}; + +struct hns3_cae_pri_cfg_info { + int is_read; + u16 pri_id; + u8 pg; + u32 c_shaping; + u32 p_shaping; + u8 mode; + u8 weight; + u8 flag; +}; + +struct hns3_cae_pg_cfg_info { + int is_read; + u16 pg_id; + u32 c_shaping; + u32 p_shaping; + u8 mode; + u8 weight; + u8 flag; +}; + +struct hns3_cae_port_cfg_info { + int is_read; + u16 port_id; + u32 mode; + u32 shaping; + u8 weight; + u8 flag; +}; + +struct hns3_cae_ets_cfg_info { + int is_read; + u16 tc_id; + u8 weight; + u32 shaping; + u8 mac_id; + u8 flag; +}; + +int hns3_cae_queue_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size); +int hns3_cae_qs_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, void *buf_out, + u32 out_size); +int hns3_cae_pri_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, void *buf_out, + u32 out_size); +int hns3_cae_pg_cfg(const struct hns3_nic_priv *net_priv, void *buf_in, + u32 in_size, void *buf_out, u32 out_size); +int hns3_cae_port_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size); +int hns3_cae_ets_cfg(const struct hns3_nic_priv *net_priv, void *buf_in, + u32 in_size, void *buf_out, u32 out_size); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.c b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.c new file mode 100644 index 0000000000000000000000000000000000000000..99e11a4496fd683817f5ac6ff345551d98f7ca19 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.c @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2019 Hisilicon Limited. + +#include +#include +#include +#include +#include + +#include "hns3_enet.h" +#include "hclge_cmd.h" +#include "hclge_main.h" +#include "hns3_cae_cmd.h" +#include "hns3_cae_version.h" + +static int hns3_cae_get_commit_id(struct hnae3_handle *handle, u8 *commit_id, + u32 *ncl_version) +{ +#define COMMIT_ID_LEN 8 + struct hclge_vport *vport = hns3_cae_get_vport(handle); + struct hns3_cae_commit_id_param *resp = NULL; + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + int ret, i; + + hns3_cae_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_COMMIT_ID_GET, true); + resp = (struct hns3_cae_commit_id_param *)(desc.data); + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "get commit id failed %d\n", ret); + return ret; + } + + for (i = 0; i < COMMIT_ID_LEN; i++) + commit_id[i] = resp->commit_id[i]; + + commit_id[COMMIT_ID_LEN] = '\0'; + *ncl_version = resp->ncl_version; + + return 0; +} + +int hns3_cae_get_fw_ver(const struct hns3_nic_priv *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 out_size) +{ + struct hns3_cae_firmware_ver_param *out_buf = + (struct hns3_cae_firmware_ver_param *)buf_out; + bool check = !buf_out || + out_size < sizeof(struct hns3_cae_firmware_ver_param); + + struct hnae3_handle *handle = nic_dev->ae_handle; + struct hclge_vport *vport = + container_of(handle, struct hclge_vport, nic); + struct hclge_dev *hdev = vport->back; + u32 fw_ver; + + if (check) { + pr_err("input param buf_out error in %s function\n", __func__); + return -EFAULT; + } + + if (hns3_cae_get_commit_id(handle, out_buf->commit_id, + &out_buf->ncl_version)) + return -EFAULT; + + fw_ver = hdev->fw_version; + if (!fw_ver) + return -EFAULT; + out_buf->imp_ver = fw_ver; + + return 0; +} + +int hns3_cae_get_driver_ver(const struct hns3_nic_priv *nic_dev, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size) +{ + if (!buf_out || out_size < strlen(HNS3_CAE_MOD_VERSION)) + return -ENOMEM; + + strncpy(buf_out, HNS3_CAE_MOD_VERSION, strlen(HNS3_CAE_MOD_VERSION)); + + return 0; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.h b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.h new file mode 100644 index 0000000000000000000000000000000000000000..89610b25622ea8f6d23f9708901cfd395fc7a703 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2019 Hisilicon Limited. */ + +#ifndef __HNS3_CAE_VERSION_H__ +#define __HNS3_CAE_VERSION_H__ + +#define HNS3_CAE_MOD_VERSION "24.3.1" + +#define CMT_ID_LEN 8 +#define RESV_LEN 3 +#define FW_CMT_ID_LEN 9 +#define FW_RESV_LEN 3 + +struct hns3_cae_commit_id_param { + u8 commit_id[CMT_ID_LEN]; + u32 ncl_version; + u32 rsv[RESV_LEN]; +}; + +struct hns3_cae_firmware_ver_param { + u32 imp_ver; + u8 commit_id[FW_CMT_ID_LEN]; + u8 rsv[FW_RESV_LEN]; + u32 ncl_version; +}; + +int hns3_cae_get_fw_ver(const struct hns3_nic_priv *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 out_size); +int hns3_cae_get_driver_ver(const struct hns3_nic_priv *nic_dev, + void *buf_in, u32 in_size, void *buf_out, + u32 out_size); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_vlan.c b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_vlan.c new file mode 100644 index 0000000000000000000000000000000000000000..c626a5180044f102beb769a532d37f3bf6d1b672 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_vlan.c @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include +#include +#include +#include +#include + +#include "hclge_cmd.h" +#include "hnae3.h" +#include "hclge_main.h" +#include "hns3_enet.h" +#include "hns3_cae_cmd.h" +#include "hns3_cae_vlan.h" + +int hns3_cae_upmapping_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size) +{ +#define HCLGE_OPC_VLANUP_MAPPING_VF_TX_CFG 0x0F10 +#define HCLGE_OPC_VLANUP_MAPPING_PORT_TX_CFG 0x0F11 + struct hns3_cae_vlanup_param *out_info = + (struct hns3_cae_vlanup_param *)buf_out; + struct hns3_cae_vlanup_param *in_info = + (struct hns3_cae_vlanup_param *)buf_in; + bool check = !buf_in || in_size < sizeof(struct hns3_cae_vlanup_param); + struct hclge_vport *vport = hns3_cae_get_vport(net_priv->ae_handle); + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + int ret; + + if (check) { + pr_err("input param buf_in error in %s function\n", __func__); + return -EFAULT; + } + + if (in_info->is_read) { + check = !buf_out || + out_size < sizeof(struct hns3_cae_vlanup_param); + if (check) { + pr_err("input param buf_out is null in %s function\n", + __func__); + return 0; + } + + if (in_info->map_flag & HNS3_CAE_VLANUP_VF_CFG_FLAG) { + hns3_cae_cmd_setup_basic_desc + (&desc, HCLGE_OPC_VLANUP_MAPPING_VF_TX_CFG, true); + if (in_info->pf_valid) { + desc.data[0] |= HNS3_CAE_PFVLD_MASK; + desc.data[0] |= + (in_info->pf_id & HNS3_CAE_PFID_MASK); + out_info->pf_id = in_info->pf_id; + } + desc.data[0] |= + ((in_info->vf_id << 3) & HNS3_CAE_VFID_MASK); + desc.data[1] |= in_info->module & HNS3_CAE_MODULE_MASK; + out_info->vf_id = in_info->vf_id; + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "vf up mapping read fail, ret = %d.\n", + ret); + return -EIO; + } + out_info->ti2oupm = desc.data[2]; + out_info->tv2pupm = desc.data[4]; + } else if (in_info->map_flag & HNS3_CAE_VLANUP_TC_CFG_FLAG) { + hns3_cae_cmd_setup_basic_desc + (&desc, HCLGE_OPC_VLANUP_MAPPING_PORT_TX_CFG, true); + desc.data[0] |= in_info->tc_id & HNS3_CAE_TCID_MASK; + desc.data[1] |= in_info->module & HNS3_CAE_MODULE_MASK; + out_info->tc_id = in_info->tc_id; + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "port up mapping read fail, ret = %d.\n", + ret); + return -EIO; + } + out_info->tp2nupm = desc.data[2]; + out_info->tag_en = + (desc.data[4] & HNS3_CAE_TAGEN_MASK) | + (((desc.data[4] >> 4) & HNS3_CAE_TAGEN_MASK) << 2); + } + out_info->module = in_info->module; + out_info->map_flag = in_info->map_flag; + } else { + if (in_info->map_flag & HNS3_CAE_VLANUP_VF_CFG_FLAG) { + hns3_cae_cmd_setup_basic_desc + (&desc, HCLGE_OPC_VLANUP_MAPPING_VF_TX_CFG, true); + if (in_info->pf_valid) { + desc.data[0] |= HNS3_CAE_PFVLD_MASK; + desc.data[0] |= + (in_info->pf_id & HNS3_CAE_PFID_MASK); + } + desc.data[0] |= + ((in_info->vf_id << 3) & HNS3_CAE_VFID_MASK); + desc.data[1] |= + (in_info->module & HNS3_CAE_MODULE_MASK); + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "vf up mapping set fail, ret = %d.\n", + ret); + return -EIO; + } + + hns3_cae_cmd_reuse_desc(&desc, false); + if (in_info->map_flag & HNS3_CAE_VLANUP_TI2OUPM_FLAG) + desc.data[2] = in_info->ti2oupm; + if (in_info->map_flag & HNS3_CAE_VLANUP_TV2PUPM_FLAG) + desc.data[4] = in_info->tv2pupm; + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "vf up mapping set fail, ret = %d.\n", + ret); + return -EIO; + } + } else if (in_info->map_flag & HNS3_CAE_VLANUP_TC_CFG_FLAG) { + hns3_cae_cmd_setup_basic_desc + (&desc, HCLGE_OPC_VLANUP_MAPPING_PORT_TX_CFG, true); + desc.data[0] = (in_info->tc_id & HNS3_CAE_TCID_MASK); + desc.data[1] = (in_info->module & HNS3_CAE_MODULE_MASK); + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "port up mapping set fail, ret = %d.\n", + ret); + return -EIO; + } + + hns3_cae_cmd_reuse_desc(&desc, false); + if (in_info->map_flag & HNS3_CAE_VLANUP_TP2NUPM_FLAG) + desc.data[2] = in_info->tp2nupm; + if (in_info->map_flag & HNS3_CAE_VLANUP_CTRL_CFG_FLAG) { + desc.data[4] = (in_info->tag_en & + HNS3_CAE_TAGEN_MASK) | + (((in_info->tag_en >> 2) & + HNS3_CAE_TAGEN_MASK) << 4); + } + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "port up mapping set fail, ret = %d.\n", + ret); + return -EIO; + } + } + } + + return 0; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_vlan.h b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_vlan.h new file mode 100644 index 0000000000000000000000000000000000000000..54739fb1ee040e87f1c944beb052a19e39144391 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_vlan.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2019 Hisilicon Limited. */ + +#ifndef __HNS3_CAE_VLAN_H__ +#define __HNS3_CAE_VLAN_H__ + +#define HNS3_CAE_VLANUP_MODULE_FLAG 0x01 +#define HNS3_CAE_VLANUP_PF_CFG_FLAG 0x02 +#define HNS3_CAE_VLANUP_VF_CFG_FLAG 0x04 +#define HNS3_CAE_VLANUP_TC_CFG_FLAG 0x08 +#define HNS3_CAE_VLANUP_TI2OUPM_FLAG 0x10 +#define HNS3_CAE_VLANUP_TV2PUPM_FLAG 0x20 +#define HNS3_CAE_VLANUP_TP2NUPM_FLAG 0x40 +#define HNS3_CAE_VLANUP_CTRL_CFG_FLAG 0x80 + +#define HNS3_CAE_TAGEN_MASK 0x3 +#define HNS3_CAE_TCID_MASK 0x7 +#define HNS3_CAE_PFID_MASK 0x7 +#define HNS3_CAE_VFID_MASK 0x7F8 +#define HNS3_CAE_PFVLD_MASK 0x1000 +#define HNS3_CAE_MODULE_MASK 0x1 + +struct hns3_cae_vlanup_param { + u8 is_read; + u32 ti2oupm; + u32 tv2pupm; + u32 tp2nupm; + u32 vf_id; + u32 map_flag; + u8 pf_valid; + u8 pf_id; + u8 tc_id; + u8 tag_en; + u8 module; +}; + +int hns3_cae_upmapping_cfg(const struct hns3_nic_priv *net_priv, + void *buf_in, u32 in_size, + void *buf_out, u32 out_size); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_xsfp.c b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_xsfp.c new file mode 100644 index 0000000000000000000000000000000000000000..d6295d9e742738c3eb16956caf30a7ce193d6917 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_xsfp.c @@ -0,0 +1,205 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include +#include + +#include "hclge_cmd.h" +#include "hnae3.h" +#include "hclge_main.h" +#include "hns3_enet.h" +#include "hns3_cae_cmd.h" +#include "hns3_cae_xsfp.h" + +#define BD0_DATA_LEN 20 +#define BD1_DATA_LEN 24 + +static int hns3_get_sfp_present(struct hnae3_handle *handle, u32 *present) +{ + struct hclge_vport *vport = hns3_cae_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_sfp_present_cmd *resp = NULL; + struct hclge_desc desc; + int ret; + + hns3_cae_cmd_setup_basic_desc(&desc, XSFP_OPC_SFP_GET_PRESENT, true); + resp = (struct hclge_sfp_present_cmd *)desc.data; + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "get spf present failed %d\n", ret); + return ret; + } + + *present = resp->sfp_present; + return 0; +} + +static int _hns3_get_sfpinfo(struct hnae3_handle *handle, u8 *buff, + u16 offset, u16 size, u16 *outlen) +{ + struct hclge_vport *vport = hns3_cae_get_vport(handle); + struct hclge_desc desc[HCLGE_SFP_INFO_LEN]; + struct hclge_dev *hdev = vport->back; + struct hclge_sfp_info *resp = NULL; + u32 data_length; + u8 *temp_data = NULL; + u32 temp_len; + int ret; + u32 i; + u32 j; + + memset(desc, 0x0, sizeof(desc)); + + for (i = 0; i < HCLGE_SFP_INFO_LEN; i++) { + hns3_cae_cmd_setup_basic_desc(&desc[i], XSFP_OPC_SFP_GET_INFO, + true); + if (i == 0) + desc[0].data[0] = offset | (size << 16); + + if (i < HCLGE_SFP_INFO_LEN - 1) + desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + else + desc[i].flag &= ~(cpu_to_le16(HCLGE_CMD_FLAG_NEXT)); + } + + ret = hns3_cae_cmd_send(hdev, desc, HCLGE_SFP_INFO_LEN); + if (ret) { + dev_err(&hdev->pdev->dev, "get spf information cmd failed %d\n", + ret); + return ret; + } + + for (i = 0; i < HCLGE_SFP_INFO_LEN; i++) { + resp = (struct hclge_sfp_info *)desc[i].data; + if (i == 0) { + *outlen = (resp[i].sfpinfo[0] >> 16) & 0xFFFF; + temp_len = *outlen; + data_length = + (temp_len > BD0_DATA_LEN) ? BD0_DATA_LEN : temp_len; + temp_data = (u8 *)&resp->sfpinfo[1]; + } else { + data_length = + (temp_len > BD1_DATA_LEN) ? BD1_DATA_LEN : temp_len; + temp_data = (u8 *)&resp->sfpinfo[0]; + } + + for (j = 0; j < data_length; j++) + *buff++ = *temp_data++; + + temp_len -= data_length; + if (temp_len == 0) + break; + } + + return 0; +} + +static int hns3_get_sfpinfo(struct hnae3_handle *handle, u8 *buff, u16 offset, + u16 size, u16 *outlen) +{ + u16 tmp_size; + u8 *tmp_buff = NULL; + u16 tmp_outlen; + int ret; + + tmp_buff = buff; + while (size) { + WARN_ON_ONCE(!tmp_buff); + if (size > HCLGE_SFP_INFO_SIZE) + tmp_size = HCLGE_SFP_INFO_SIZE; + else + tmp_size = size; + ret = + _hns3_get_sfpinfo(handle, tmp_buff, offset, tmp_size, + &tmp_outlen); + if (ret) + return ret; + offset += tmp_size; + size -= tmp_size; + tmp_buff += tmp_size; + *outlen += tmp_outlen; + if (tmp_size != tmp_outlen) + break; + } + return 0; +} + +static int hns3_set_sfp_state(struct hnae3_handle *handle, bool en) +{ + struct hclge_vport *vport = hns3_cae_get_vport(handle); + struct hclge_sfp_enable_cmd *req = NULL; + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + int ret; + + hns3_cae_cmd_setup_basic_desc(&desc, XSFP_OPC_SFP_SET_STATUS, false); + req = (struct hclge_sfp_enable_cmd *)desc.data; + req->set_sfp_enable_flag = en; + + ret = hns3_cae_cmd_send(hdev, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, "set spf on/off cmd failed %d\n", + ret); + + return ret; +} + +int hns3_xsfp_cfg(const struct hns3_nic_priv *net_priv, void *buf_in, + u32 in_size, void *buf_out, u32 out_size) +{ + struct hns3_xsfp_info *xsfp_info_out = (struct hns3_xsfp_info *)buf_out; + bool check = !buf_in || in_size < sizeof(struct hns3_cfg_xsfp) || + !buf_out || out_size < sizeof(struct hns3_xsfp_info); + struct hnae3_handle *handle = hns3_get_handle(net_priv->netdev); + struct hns3_cfg_xsfp *param = (struct hns3_cfg_xsfp *)buf_in; + u32 sfp_present = 0; + int ret; + + if (check) + return -EINVAL; + + ret = hns3_get_sfp_present(handle, &sfp_present); + if (ret) { + pr_err("nic_get_sfp_present error.\n"); + xsfp_info_out->light_module_status = 0xff; + return 0; + } + + xsfp_info_out->light_module_status = (u8)sfp_present; + + if (sfp_present) { + if (param->cfg_optype == OPC_QUERY_XSFP_INFO) { + ret = hns3_get_sfpinfo(handle, xsfp_info_out->sfp_info, + 0, + STD_XSFP_INFO_A0_SIZE + + STD_XSFP_INFO_A2_SIZE, + &xsfp_info_out->eeprom_len); + if (ret) { + pr_err("hns3_get_sfpinfo error.\n"); + return ret; + } + } else if (param->cfg_optype == OPC_QUERY_ALL_XSFP_INFO) { + ret = hns3_get_sfpinfo(handle, xsfp_info_out->sfp_info, + 0, STD_XSFP_INFO_MAX_SIZE, + &xsfp_info_out->eeprom_len); + if (ret) { + pr_err("hns3_get_sfpinfo error.\n"); + return ret; + } + } else if (param->cfg_optype == OPC_CONFIG_XSFP_TX_STATUS) { + ret = hns3_set_sfp_state(handle, param->status); + if (ret) { + pr_err("nic_set_sfp_state error.\n"); + return ret; + } + } else { + pr_err("%s error: unsupport optype:%u.\n", + __func__, param->cfg_optype); + ret = -EINVAL; + } + } else { + ret = 0; + } + + return ret; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_xsfp.h b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_xsfp.h new file mode 100644 index 0000000000000000000000000000000000000000..afcf21b61a7d484d7d7eb41a396bd628e9a55a47 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_xsfp.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2019 Hisilicon Limited. */ + +#ifndef __HNS3_CAE_XSFP_H__ +#define __HNS3_CAE_XSFP_H__ + +#define STD_XSFP_INFO_A0_SIZE 256 +#define STD_XSFP_INFO_A2_SIZE 256 +#define STD_XSFP_INFO_MAX_SIZE 640 +#define HCLGE_SFP_INFO_LEN 6 +#define HCLGE_SFP_INFO_SIZE 140 +/* SFP command */ +#define XSFP_OPC_SFP_GET_INFO 0x7100 +#define XSFP_OPC_SFP_GET_PRESENT 0x7101 +#define XSFP_OPC_SFP_SET_STATUS 0x7102 + +struct hclge_sfp_info { + u32 sfpinfo[6]; +}; + +struct hclge_sfp_enable_cmd { + u32 set_sfp_enable_flag; + u32 rsv[5]; +}; + +struct hclge_sfp_present_cmd { + u32 sfp_present; + u32 rsv[5]; +}; + +enum hns3_xsfp_opcode_type { + OPC_QUERY_XSFP_INFO = 0, + OPC_QUERY_ALL_XSFP_INFO, + OPC_CONFIG_XSFP_TX_STATUS +}; + +struct hns3_cfg_xsfp { + u32 cfg_optype; + u8 status; /* 1: enable 0: disable */ +}; + +struct hns3_xsfp_info { + u8 light_module_status; + u16 eeprom_len; + u8 sfp_info[STD_XSFP_INFO_MAX_SIZE]; +}; + +int hns3_xsfp_cfg(const struct hns3_nic_priv *net_priv, void *buf_in, + u32 in_size, void *buf_out, u32 out_size); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c index ea5f8a84070dfd97e9644f9847c11bd2a46668a3..d2ec4c573bf86ff98f2556da3036e31cd5d7a28b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c @@ -4,44 +4,52 @@ #include "hnae3.h" #include "hns3_enet.h" -static -int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets) +static int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets) { struct hnae3_handle *h = hns3_get_handle(ndev); + if (hns3_nic_resetting(ndev)) + return -EBUSY; + if (h->kinfo.dcb_ops->ieee_getets) return h->kinfo.dcb_ops->ieee_getets(h, ets); return -EOPNOTSUPP; } -static -int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets) +static int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets) { struct hnae3_handle *h = hns3_get_handle(ndev); + if (hns3_nic_resetting(ndev)) + return -EBUSY; + if (h->kinfo.dcb_ops->ieee_setets) return h->kinfo.dcb_ops->ieee_setets(h, ets); return -EOPNOTSUPP; } -static -int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc) +static int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc) { struct hnae3_handle *h = hns3_get_handle(ndev); + if (hns3_nic_resetting(ndev)) + return -EBUSY; + if (h->kinfo.dcb_ops->ieee_getpfc) return h->kinfo.dcb_ops->ieee_getpfc(h, pfc); return -EOPNOTSUPP; } -static -int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc) +static int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc) { struct hnae3_handle *h = hns3_get_handle(ndev); + if (hns3_nic_resetting(ndev)) + return -EBUSY; + if (h->kinfo.dcb_ops->ieee_setpfc) return h->kinfo.dcb_ops->ieee_setpfc(h, pfc); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..8dbbf597d8a2c46fc67c8b6133d6c606d61c6853 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -0,0 +1,981 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2018-2019 Hisilicon Limited. */ + +#include +#include + +#include "hnae3.h" +#include "hns3_debugfs.h" +#include "hns3_enet.h" + +static struct dentry *hns3_dbgfs_root; + +static struct hns3_dbg_dentry_info hns3_dbg_dentry[] = { + { + .name = "tm" + }, + { + .name = "tx_bd_info" + }, + { + .name = "rx_bd_info" + }, + { + .name = "mac_list" + }, + { + .name = "reg" + }, + { + .name = "queue" + }, + { + .name = "fd" + }, + /* keep common at the bottom and add new directory above */ + { + .name = "common" + }, +}; + +static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, unsigned int cmd); +static int hns3_dbg_common_file_init(struct hnae3_handle *handle, + unsigned int cmd); + +static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = { + { + .name = "tm_priority", + .cmd = HNAE3_DBG_CMD_TM_PRI, + .dentry = HNS3_DBG_DENTRY_TM, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "tm_qset", + .cmd = HNAE3_DBG_CMD_TM_QSET, + .dentry = HNS3_DBG_DENTRY_TM, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "tm_map", + .cmd = HNAE3_DBG_CMD_TM_MAP, + .dentry = HNS3_DBG_DENTRY_TM, + .buf_len = HNS3_DBG_READ_LEN_1MB, + .init = hns3_dbg_common_file_init, + }, + { + .name = "tm_pg", + .cmd = HNAE3_DBG_CMD_TM_PG, + .dentry = HNS3_DBG_DENTRY_TM, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "tm_port", + .cmd = HNAE3_DBG_CMD_TM_PORT, + .dentry = HNS3_DBG_DENTRY_TM, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "tc_sch_info", + .cmd = HNAE3_DBG_CMD_TC_SCH_INFO, + .dentry = HNS3_DBG_DENTRY_TM, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "qos_pause_cfg", + .cmd = HNAE3_DBG_CMD_QOS_PAUSE_CFG, + .dentry = HNS3_DBG_DENTRY_TM, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "qos_pri_map", + .cmd = HNAE3_DBG_CMD_QOS_PRI_MAP, + .dentry = HNS3_DBG_DENTRY_TM, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "qos_buf_cfg", + .cmd = HNAE3_DBG_CMD_QOS_BUF_CFG, + .dentry = HNS3_DBG_DENTRY_TM, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "tx_bd_queue", + .cmd = HNAE3_DBG_CMD_TX_BD, + .dentry = HNS3_DBG_DENTRY_TX_BD, + .buf_len = HNS3_DBG_READ_LEN_4MB, + .init = hns3_dbg_bd_file_init, + }, + { + .name = "rx_bd_queue", + .cmd = HNAE3_DBG_CMD_RX_BD, + .dentry = HNS3_DBG_DENTRY_RX_BD, + .buf_len = HNS3_DBG_READ_LEN_4MB, + .init = hns3_dbg_bd_file_init, + }, + { + .name = "uc", + .cmd = HNAE3_DBG_CMD_MAC_UC, + .dentry = HNS3_DBG_DENTRY_MAC, + .buf_len = HNS3_DBG_READ_LEN_128KB, + .init = hns3_dbg_common_file_init, + }, + { + .name = "mc", + .cmd = HNAE3_DBG_CMD_MAC_MC, + .dentry = HNS3_DBG_DENTRY_MAC, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "mac_tbl", + .cmd = HNAE3_DBG_CMD_MAC_TBL, + .dentry = HNS3_DBG_DENTRY_COMMON, + .buf_len = HNS3_DBG_READ_LEN_1MB, + .init = hns3_dbg_common_file_init, + }, + { + .name = "mng_tbl", + .cmd = HNAE3_DBG_CMD_MNG_TBL, + .dentry = HNS3_DBG_DENTRY_COMMON, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "loopback", + .cmd = HNAE3_DBG_CMD_LOOPBACK, + .dentry = HNS3_DBG_DENTRY_COMMON, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "interrupt_info", + .cmd = HNAE3_DBG_CMD_INTERRUPT_INFO, + .dentry = HNS3_DBG_DENTRY_COMMON, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "reset_info", + .cmd = HNAE3_DBG_CMD_RESET_INFO, + .dentry = HNS3_DBG_DENTRY_COMMON, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "imp_info", + .cmd = HNAE3_DBG_CMD_IMP_INFO, + .dentry = HNS3_DBG_DENTRY_COMMON, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "ncl_config", + .cmd = HNAE3_DBG_CMD_NCL_CONFIG, + .dentry = HNS3_DBG_DENTRY_COMMON, + .buf_len = HNS3_DBG_READ_LEN_128KB, + .init = hns3_dbg_common_file_init, + }, + { + .name = "mac_tnl_status", + .cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS, + .dentry = HNS3_DBG_DENTRY_COMMON, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "bios_common", + .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON, + .dentry = HNS3_DBG_DENTRY_REG, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "ssu", + .cmd = HNAE3_DBG_CMD_REG_SSU, + .dentry = HNS3_DBG_DENTRY_REG, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "igu_egu", + .cmd = HNAE3_DBG_CMD_REG_IGU_EGU, + .dentry = HNS3_DBG_DENTRY_REG, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "rpu", + .cmd = HNAE3_DBG_CMD_REG_RPU, + .dentry = HNS3_DBG_DENTRY_REG, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "ncsi", + .cmd = HNAE3_DBG_CMD_REG_NCSI, + .dentry = HNS3_DBG_DENTRY_REG, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "rtc", + .cmd = HNAE3_DBG_CMD_REG_RTC, + .dentry = HNS3_DBG_DENTRY_REG, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "ppp", + .cmd = HNAE3_DBG_CMD_REG_PPP, + .dentry = HNS3_DBG_DENTRY_REG, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "rcb", + .cmd = HNAE3_DBG_CMD_REG_RCB, + .dentry = HNS3_DBG_DENTRY_REG, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "tqp", + .cmd = HNAE3_DBG_CMD_REG_TQP, + .dentry = HNS3_DBG_DENTRY_REG, + .buf_len = HNS3_DBG_READ_LEN_128KB, + .init = hns3_dbg_common_file_init, + }, + { + .name = "mac", + .cmd = HNAE3_DBG_CMD_REG_MAC, + .dentry = HNS3_DBG_DENTRY_REG, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "dcb", + .cmd = HNAE3_DBG_CMD_REG_DCB, + .dentry = HNS3_DBG_DENTRY_REG, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "queue_map", + .cmd = HNAE3_DBG_CMD_QUEUE_MAP, + .dentry = HNS3_DBG_DENTRY_QUEUE, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "rx_queue_info", + .cmd = HNAE3_DBG_CMD_RX_QUEUE_INFO, + .dentry = HNS3_DBG_DENTRY_QUEUE, + .buf_len = HNS3_DBG_READ_LEN_1MB, + .init = hns3_dbg_common_file_init, + }, + { + .name = "tx_queue_info", + .cmd = HNAE3_DBG_CMD_TX_QUEUE_INFO, + .dentry = HNS3_DBG_DENTRY_QUEUE, + .buf_len = HNS3_DBG_READ_LEN_1MB, + .init = hns3_dbg_common_file_init, + }, + { + .name = "fd_tcam", + .cmd = HNAE3_DBG_CMD_FD_TCAM, + .dentry = HNS3_DBG_DENTRY_FD, + .buf_len = HNS3_DBG_READ_LEN_1MB, + .init = hns3_dbg_common_file_init, + }, + { + .name = "service_task_info", + .cmd = HNAE3_DBG_CMD_SERV_INFO, + .dentry = HNS3_DBG_DENTRY_COMMON, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "umv_info", + .cmd = HNAE3_DBG_CMD_UMV_INFO, + .dentry = HNS3_DBG_DENTRY_COMMON, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "vlan_config", + .cmd = HNAE3_DBG_CMD_VLAN_CONFIG, + .dentry = HNS3_DBG_DENTRY_COMMON, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, +}; + +static void hns3_dbg_fill_content(char *content, u16 len, + const struct hns3_dbg_item *items, + const char **result, u16 size) +{ + char *pos = content; + u16 i; + + memset(content, ' ', len); + for (i = 0; i < size; i++) { + if (result) + strncpy(pos, result[i], strlen(result[i])); + else + strncpy(pos, items[i].name, strlen(items[i].name)); + + pos += strlen(items[i].name) + items[i].interval; + } + + *pos++ = '\n'; + *pos++ = '\0'; +} + +static const struct hns3_dbg_item rx_queue_info_items[] = { + { "QUEUE_ID", 2 }, + { "BD_NUM", 2 }, + { "BD_LEN", 2 }, + { "TAIL", 2 }, + { "HEAD", 2 }, + { "FBDNUM", 2 }, + { "PKTNUM", 5 }, + { "RING_EN", 2 }, + { "RX_RING_EN", 2 }, + { "BASE_ADDR", 10 }, +}; + +static void hns3_dump_rx_queue_info(struct hns3_enet_ring *ring, + struct hnae3_ae_dev *ae_dev, char **result, + u32 index) +{ + u32 base_add_l, base_add_h; + u32 j = 0; + + sprintf(result[j++], "%8u", index); + + sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_RING_BD_NUM_REG)); + + sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_RING_BD_LEN_REG)); + + sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_RING_TAIL_REG)); + + sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_RING_HEAD_REG)); + + sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_RING_FBDNUM_REG)); + + sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_RING_PKTNUM_RECORD_REG)); + + sprintf(result[j++], "%7s", readl_relaxed(ring->tqp->io_base + + HNS3_RING_EN_REG) ? "on" : "off"); + + sprintf(result[j++], "%10s", "NA"); + + base_add_h = readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_RING_BASEADDR_H_REG); + base_add_l = readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_RING_BASEADDR_L_REG); + sprintf(result[j++], "0x%08x%08x", base_add_h, base_add_l); +} + +static int hns3_dbg_rx_queue_info(struct hnae3_handle *h, + char *buf, int len) +{ + char data_str[ARRAY_SIZE(rx_queue_info_items)][HNS3_DBG_DATA_STR_LEN]; + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); + char *result[ARRAY_SIZE(rx_queue_info_items)]; + struct hns3_nic_priv *priv = h->priv; + char content[HNS3_DBG_INFO_LEN]; + struct hns3_enet_ring *ring; + int pos = 0; + u32 i; + + if (!priv->ring) { + dev_err(&h->pdev->dev, "priv->ring is NULL\n"); + return -EFAULT; + } + + for (i = 0; i < ARRAY_SIZE(rx_queue_info_items); i++) + result[i] = &data_str[i][0]; + + hns3_dbg_fill_content(content, sizeof(content), rx_queue_info_items, + NULL, ARRAY_SIZE(rx_queue_info_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + for (i = 0; i < h->kinfo.num_tqps; i++) { + /* Each cycle needs to determine whether the instance is reset, + * to prevent reference to invalid memory. And need to ensure + * that the following code is executed within 100ms. + */ + if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) || + test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) + return -EPERM; + + ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)]; + hns3_dump_rx_queue_info(ring, ae_dev, result, i); + hns3_dbg_fill_content(content, sizeof(content), + rx_queue_info_items, + (const char **)result, + ARRAY_SIZE(rx_queue_info_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + } + + return 0; +} + +static const struct hns3_dbg_item tx_queue_info_items[] = { + { "QUEUE_ID", 2 }, + { "BD_NUM", 2 }, + { "TC", 2 }, + { "TAIL", 2 }, + { "HEAD", 2 }, + { "FBDNUM", 2 }, + { "OFFSET", 2 }, + { "PKTNUM", 5 }, + { "RING_EN", 2 }, + { "TX_RING_EN", 2 }, + { "BASE_ADDR", 10 }, +}; + +static void hns3_dump_tx_queue_info(struct hns3_enet_ring *ring, + struct hnae3_ae_dev *ae_dev, char **result, + u32 index) +{ + u32 base_add_l, base_add_h; + u32 j = 0; + + sprintf(result[j++], "%8u", index); + sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_BD_NUM_REG)); + + sprintf(result[j++], "%2u", readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_TC_REG)); + + sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_TAIL_REG)); + + sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_HEAD_REG)); + + sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_FBDNUM_REG)); + + sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_OFFSET_REG)); + + sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_PKTNUM_RECORD_REG)); + + sprintf(result[j++], "%7s", readl_relaxed(ring->tqp->io_base + + HNS3_RING_EN_REG) ? "on" : "off"); + + sprintf(result[j++], "%10s", "NA"); + + base_add_h = readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_BASEADDR_H_REG); + base_add_l = readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_BASEADDR_L_REG); + sprintf(result[j++], "0x%08x%08x", base_add_h, base_add_l); +} + +static int hns3_dbg_tx_queue_info(struct hnae3_handle *h, + char *buf, int len) +{ + char data_str[ARRAY_SIZE(tx_queue_info_items)][HNS3_DBG_DATA_STR_LEN]; + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); + char *result[ARRAY_SIZE(tx_queue_info_items)]; + struct hns3_nic_priv *priv = h->priv; + char content[HNS3_DBG_INFO_LEN]; + struct hns3_enet_ring *ring; + int pos = 0; + u32 i; + + if (!priv->ring) { + dev_err(&h->pdev->dev, "priv->ring is NULL\n"); + return -EFAULT; + } + + for (i = 0; i < ARRAY_SIZE(tx_queue_info_items); i++) + result[i] = &data_str[i][0]; + + hns3_dbg_fill_content(content, sizeof(content), tx_queue_info_items, + NULL, ARRAY_SIZE(tx_queue_info_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + + for (i = 0; i < h->kinfo.num_tqps; i++) { + /* Each cycle needs to determine whether the instance is reset, + * to prevent reference to invalid memory. And need to ensure + * that the following code is executed within 100ms. + */ + if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) || + test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) + return -EPERM; + + ring = &priv->ring[i]; + hns3_dump_tx_queue_info(ring, ae_dev, result, i); + hns3_dbg_fill_content(content, sizeof(content), + tx_queue_info_items, + (const char **)result, + ARRAY_SIZE(tx_queue_info_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + } + + return 0; +} + +static const struct hns3_dbg_item queue_map_items[] = { + { "local_queue_id", 2 }, + { "global_queue_id", 2 }, + { "vector_id", 2 }, +}; + +static int hns3_dbg_queue_map(struct hnae3_handle *h, char *buf, int len) +{ + char data_str[ARRAY_SIZE(queue_map_items)][HNS3_DBG_DATA_STR_LEN]; + char *result[ARRAY_SIZE(queue_map_items)]; + struct hns3_nic_priv *priv = h->priv; + char content[HNS3_DBG_INFO_LEN]; + int pos = 0; + int j; + u32 i; + + if (!h->ae_algo->ops->get_global_queue_id) + return -EOPNOTSUPP; + + for (i = 0; i < ARRAY_SIZE(queue_map_items); i++) + result[i] = &data_str[i][0]; + + hns3_dbg_fill_content(content, sizeof(content), queue_map_items, + NULL, ARRAY_SIZE(queue_map_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + for (i = 0; i < h->kinfo.num_tqps; i++) { + if (!priv->ring || !priv->ring[i].tqp_vector) + continue; + + j = 0; + sprintf(result[j++], "%u", i); + sprintf(result[j++], "%u", + h->ae_algo->ops->get_global_queue_id(h, i)); + sprintf(result[j++], "%d", + priv->ring[i].tqp_vector->vector_irq); + hns3_dbg_fill_content(content, sizeof(content), queue_map_items, + (const char **)result, + ARRAY_SIZE(queue_map_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + } + + return 0; +} + +static const struct hns3_dbg_item rx_bd_info_items[] = { + { "BD_IDX", 3 }, + { "L234_INFO", 2 }, + { "PKT_LEN", 3 }, + { "SIZE", 4 }, + { "RSS_HASH", 4 }, + { "FD_ID", 2 }, + { "VLAN_TAG", 2 }, + { "O_DM_VLAN_ID_FB", 2 }, + { "OT_VLAN_TAG", 2 }, + { "BD_BASE_INFO", 2 }, + { "PTYPE", 2 }, + { "HW_CSUM", 2 }, +}; + +static void hns3_dump_rx_bd_info(struct hns3_nic_priv *priv, + struct hns3_desc *desc, char **result, int idx) +{ + unsigned int j = 0; + + sprintf(result[j++], "%5d", idx); + sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.l234_info)); + sprintf(result[j++], "%7u", le16_to_cpu(desc->rx.pkt_len)); + sprintf(result[j++], "%4u", le16_to_cpu(desc->rx.size)); + sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.rss_hash)); + sprintf(result[j++], "%5u", le16_to_cpu(desc->rx.fd_id)); + sprintf(result[j++], "%8u", le16_to_cpu(desc->rx.vlan_tag)); + sprintf(result[j++], "%15u", le16_to_cpu(desc->rx.o_dm_vlan_id_fb)); + sprintf(result[j++], "%11u", le16_to_cpu(desc->rx.ot_vlan_tag)); + sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.bd_base_info)); + sprintf(result[j++], "NA"); + sprintf(result[j++], "NA"); +} + +static int hns3_dbg_rx_bd_info(struct hns3_dbg_data *d, char *buf, int len) +{ + char data_str[ARRAY_SIZE(rx_bd_info_items)][HNS3_DBG_DATA_STR_LEN]; + struct hns3_nic_priv *priv = d->handle->priv; + char *result[ARRAY_SIZE(rx_bd_info_items)]; + char content[HNS3_DBG_INFO_LEN]; + struct hns3_enet_ring *ring; + struct hns3_desc *desc; + unsigned int i; + int pos = 0; + + if (d->qid >= d->handle->kinfo.num_tqps) { + dev_err(&d->handle->pdev->dev, + "queue%u is not in use\n", d->qid); + return -EINVAL; + } + + for (i = 0; i < ARRAY_SIZE(rx_bd_info_items); i++) + result[i] = &data_str[i][0]; + + pos += scnprintf(buf + pos, len - pos, + "Queue %u rx bd info:\n", d->qid); + hns3_dbg_fill_content(content, sizeof(content), rx_bd_info_items, + NULL, ARRAY_SIZE(rx_bd_info_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + + ring = &priv->ring[d->qid + d->handle->kinfo.num_tqps]; + for (i = 0; i < ring->desc_num; i++) { + desc = &ring->desc[i]; + + hns3_dump_rx_bd_info(priv, desc, result, i); + hns3_dbg_fill_content(content, sizeof(content), + rx_bd_info_items, (const char **)result, + ARRAY_SIZE(rx_bd_info_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + } + + return 0; +} + +static const struct hns3_dbg_item tx_bd_info_items[] = { + { "BD_IDX", 2 }, + { "ADDRESS", 13 }, + { "VLAN_TAG", 2 }, + { "SIZE", 2 }, + { "T_CS_VLAN_TSO", 2 }, + { "OT_VLAN_TAG", 3 }, + { "TV", 5 }, + { "OLT_VLAN_LEN", 2}, + { "PAYLEN_OL4CS", 2}, + { "BD_FE_SC_VLD", 2}, + { "MSS", 2}, +}; + +static void hns3_dump_tx_bd_info(struct hns3_nic_priv *priv, + struct hns3_desc *desc, char **result, int idx) +{ + unsigned int j = 0; + + sprintf(result[j++], "%6d", idx); + sprintf(result[j++], "%#llx", le64_to_cpu(desc->addr)); + sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.vlan_tag)); + sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.send_size)); + sprintf(result[j++], "%#x", + le32_to_cpu(desc->tx.type_cs_vlan_tso_len)); + sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.outer_vlan_tag)); + sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.tv)); + sprintf(result[j++], "%10u", + le32_to_cpu(desc->tx.ol_type_vlan_len_msec)); + sprintf(result[j++], "%#x", le32_to_cpu(desc->tx.paylen)); + sprintf(result[j++], "%#x", le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri)); + sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.mss)); +} + +static int hns3_dbg_tx_bd_info(struct hns3_dbg_data *d, char *buf, int len) +{ + char data_str[ARRAY_SIZE(tx_bd_info_items)][HNS3_DBG_DATA_STR_LEN]; + struct hns3_nic_priv *priv = d->handle->priv; + char *result[ARRAY_SIZE(tx_bd_info_items)]; + char content[HNS3_DBG_INFO_LEN]; + struct hns3_enet_ring *ring; + struct hns3_desc *desc; + unsigned int i; + int pos = 0; + + if (d->qid >= d->handle->kinfo.num_tqps) { + dev_err(&d->handle->pdev->dev, + "queue%u is not in use\n", d->qid); + return -EINVAL; + } + + for (i = 0; i < ARRAY_SIZE(tx_bd_info_items); i++) + result[i] = &data_str[i][0]; + + pos += scnprintf(buf + pos, len - pos, + "Queue %u tx bd info:\n", d->qid); + hns3_dbg_fill_content(content, sizeof(content), tx_bd_info_items, + NULL, ARRAY_SIZE(tx_bd_info_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + + ring = &priv->ring[d->qid]; + for (i = 0; i < ring->desc_num; i++) { + desc = &ring->desc[i]; + + hns3_dump_tx_bd_info(priv, desc, result, i); + hns3_dbg_fill_content(content, sizeof(content), + tx_bd_info_items, (const char **)result, + ARRAY_SIZE(tx_bd_info_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + } + + return 0; +} + +static int hns3_dbg_get_cmd_index(struct hns3_dbg_data *dbg_data, u32 *index) +{ + u32 i; + + for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) { + if (hns3_dbg_cmd[i].cmd == dbg_data->cmd) { + *index = i; + return 0; + } + } + + dev_err(&dbg_data->handle->pdev->dev, "unknown command(%d)\n", + dbg_data->cmd); + return -EINVAL; +} + +static const struct hns3_dbg_func hns3_dbg_cmd_func[] = { + { + .cmd = HNAE3_DBG_CMD_QUEUE_MAP, + .dbg_dump = hns3_dbg_queue_map, + }, + { + .cmd = HNAE3_DBG_CMD_TX_BD, + .dbg_dump_bd = hns3_dbg_tx_bd_info, + }, + { + .cmd = HNAE3_DBG_CMD_RX_BD, + .dbg_dump_bd = hns3_dbg_rx_bd_info, + }, + { + .cmd = HNAE3_DBG_CMD_RX_QUEUE_INFO, + .dbg_dump = hns3_dbg_rx_queue_info, + }, + { + .cmd = HNAE3_DBG_CMD_TX_QUEUE_INFO, + .dbg_dump = hns3_dbg_tx_queue_info, + }, +}; + +static int hns3_dbg_read_cmd(struct hns3_dbg_data *dbg_data, + enum hnae3_dbg_cmd cmd, char *buf, int len) +{ + const struct hnae3_ae_ops *ops = dbg_data->handle->ae_algo->ops; + const struct hns3_dbg_func *cmd_func; + u32 i; + + for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd_func); i++) { + if (cmd == hns3_dbg_cmd_func[i].cmd) { + cmd_func = &hns3_dbg_cmd_func[i]; + if (cmd_func->dbg_dump) + return cmd_func->dbg_dump(dbg_data->handle, buf, + len); + else + return cmd_func->dbg_dump_bd(dbg_data, buf, + len); + } + } + + if (!ops->dbg_read_cmd) + return -EOPNOTSUPP; + + return ops->dbg_read_cmd(dbg_data->handle, cmd, buf, len); +} + +static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct hns3_dbg_data *dbg_data = filp->private_data; + struct hnae3_handle *handle = dbg_data->handle; + struct hns3_nic_priv *priv = handle->priv; + ssize_t size = 0; + char **save_buf; + char *read_buf; + u32 index; + int ret; + + ret = hns3_dbg_get_cmd_index(dbg_data, &index); + if (ret) + return ret; + + mutex_lock(&handle->dbgfs_lock); + save_buf = &handle->dbgfs_buf[index]; + + if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) || + test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) { + ret = -EBUSY; + goto out; + } + + if (*save_buf) { + read_buf = *save_buf; + } else { + read_buf = kvzalloc(hns3_dbg_cmd[index].buf_len, GFP_KERNEL); + if (!read_buf) { + ret = -ENOMEM; + goto out; + } + + /* save the buffer addr until the last read operation */ + *save_buf = read_buf; + + /* get data ready for the first time to read */ + ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd, + read_buf, hns3_dbg_cmd[index].buf_len); + if (ret) + goto out; + } + + size = simple_read_from_buffer(buffer, count, ppos, read_buf, + strlen(read_buf)); + if (size > 0) { + mutex_unlock(&handle->dbgfs_lock); + return size; + } + +out: + /* free the buffer for the last read operation */ + if (*save_buf) { + kvfree(*save_buf); + *save_buf = NULL; + } + + mutex_unlock(&handle->dbgfs_lock); + return ret; +} + +static const struct file_operations hns3_dbg_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = hns3_dbg_read, +}; + +static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd) +{ + struct dentry *entry_dir; + struct hns3_dbg_data *data; + u16 max_queue_num; + unsigned int i; + + entry_dir = hns3_dbg_dentry[hns3_dbg_cmd[cmd].dentry].dentry; + max_queue_num = hns3_get_max_available_channels(handle); + data = devm_kzalloc(&handle->pdev->dev, max_queue_num * sizeof(*data), + GFP_KERNEL); + if (!data) + return -ENOMEM; + + for (i = 0; i < max_queue_num; i++) { + char name[HNS3_DBG_FILE_NAME_LEN]; + + data[i].handle = handle; + data[i].cmd = hns3_dbg_cmd[cmd].cmd; + data[i].qid = i; + sprintf(name, "%s%u", hns3_dbg_cmd[cmd].name, i); + debugfs_create_file(name, 0400, entry_dir, &data[i], + &hns3_dbg_fops); + } + + return 0; +} + +static int hns3_dbg_common_file_init(struct hnae3_handle *handle, u32 cmd) +{ + struct hns3_dbg_data *data; + struct dentry *entry_dir; + + data = devm_kzalloc(&handle->pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->handle = handle; + data->cmd = hns3_dbg_cmd[cmd].cmd; + entry_dir = hns3_dbg_dentry[hns3_dbg_cmd[cmd].dentry].dentry; + debugfs_create_file(hns3_dbg_cmd[cmd].name, 0400, entry_dir, + data, &hns3_dbg_fops); + + return 0; +} + +int hns3_dbg_init(struct hnae3_handle *handle) +{ + const char *name = pci_name(handle->pdev); + int ret; + u32 i; + + handle->dbgfs_buf = devm_kcalloc(&handle->pdev->dev, + ARRAY_SIZE(hns3_dbg_cmd), + sizeof(*handle->dbgfs_buf), + GFP_KERNEL); + if (!handle->dbgfs_buf) + return -ENOMEM; + + hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry = + debugfs_create_dir(name, hns3_dbgfs_root); + handle->hnae3_dbgfs = hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry; + + for (i = 0; i < HNS3_DBG_DENTRY_COMMON; i++) + hns3_dbg_dentry[i].dentry = + debugfs_create_dir(hns3_dbg_dentry[i].name, + handle->hnae3_dbgfs); + + mutex_init(&handle->dbgfs_lock); + for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) { + if (!hns3_dbg_cmd[i].init) { + dev_err(&handle->pdev->dev, + "cmd %s lack of init func\n", + hns3_dbg_cmd[i].name); + ret = -EINVAL; + goto out; + } + + ret = hns3_dbg_cmd[i].init(handle, i); + if (ret) { + dev_err(&handle->pdev->dev, "failed to init cmd %s\n", + hns3_dbg_cmd[i].name); + goto out; + } + } + + return 0; + +out: + mutex_destroy(&handle->dbgfs_lock); + debugfs_remove_recursive(handle->hnae3_dbgfs); + handle->hnae3_dbgfs = NULL; + return ret; +} + +void hns3_dbg_uninit(struct hnae3_handle *handle) +{ + u32 i; + + for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) + if (handle->dbgfs_buf[i]) { + kvfree(handle->dbgfs_buf[i]); + handle->dbgfs_buf[i] = NULL; + } + + mutex_destroy(&handle->dbgfs_lock); + debugfs_remove_recursive(handle->hnae3_dbgfs); + handle->hnae3_dbgfs = NULL; +} + +void hns3_dbg_register_debugfs(const char *debugfs_dir_name) +{ + hns3_dbgfs_root = debugfs_create_dir(debugfs_dir_name, NULL); +} + +void hns3_dbg_unregister_debugfs(void) +{ + debugfs_remove_recursive(hns3_dbgfs_root); + hns3_dbgfs_root = NULL; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h new file mode 100644 index 0000000000000000000000000000000000000000..902e16d99fb7c334e5cee2df7c9203b7f4dd79e7 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2021 Hisilicon Limited. */ + +#ifndef __HNS3_DEBUGFS_H +#define __HNS3_DEBUGFS_H + +#include "hnae3.h" + +#define HNS3_DBG_READ_LEN 65536 +#define HNS3_DBG_READ_LEN_128KB 0x20000 +#define HNS3_DBG_READ_LEN_1MB 0x100000 +#define HNS3_DBG_READ_LEN_4MB 0x400000 +#define HNS3_DBG_WRITE_LEN 1024 + +#define HNS3_DBG_DATA_STR_LEN 32 +#define HNS3_DBG_INFO_LEN 256 +#define HNS3_DBG_ITEM_NAME_LEN 32 +#define HNS3_DBG_FILE_NAME_LEN 16 + +struct hns3_dbg_item { + char name[HNS3_DBG_ITEM_NAME_LEN]; + u16 interval; /* blank numbers after the item */ +}; + +struct hns3_dbg_data { + struct hnae3_handle *handle; + enum hnae3_dbg_cmd cmd; + u16 qid; +}; + +enum hns3_dbg_dentry_type { + HNS3_DBG_DENTRY_TM, + HNS3_DBG_DENTRY_TX_BD, + HNS3_DBG_DENTRY_RX_BD, + HNS3_DBG_DENTRY_MAC, + HNS3_DBG_DENTRY_REG, + HNS3_DBG_DENTRY_QUEUE, + HNS3_DBG_DENTRY_FD, + HNS3_DBG_DENTRY_COMMON, +}; + +struct hns3_dbg_dentry_info { + const char *name; + struct dentry *dentry; +}; + +struct hns3_dbg_cmd_info { + const char *name; + enum hnae3_dbg_cmd cmd; + enum hns3_dbg_dentry_type dentry; + u32 buf_len; + int (*init)(struct hnae3_handle *handle, unsigned int cmd); +}; + +struct hns3_dbg_func { + enum hnae3_dbg_cmd cmd; + int (*dbg_dump)(struct hnae3_handle *handle, char *buf, int len); + int (*dbg_dump_bd)(struct hns3_dbg_data *data, char *buf, int len); +}; + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 955c4ab18b03bb300ec7beb8f8bcf953e817b2fc..89f1d0597844f95bda9891f94cdd17f3e53078a2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -4,30 +4,64 @@ #include #include #include +#include #include +#include #include #include #include #include +#include #include #include #include #include +#include #include +#include #include +#include +#include "kcompat.h" #include "hnae3.h" #include "hns3_enet.h" +/* All hns3 tracepoints are defined by the include below, which + * must be included exactly once across the whole kernel with + * CREATE_TRACE_POINTS defined + */ +#define CREATE_TRACE_POINTS +#include "hns3_trace.h" + +#define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift))) +#define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE) -static void hns3_clear_all_ring(struct hnae3_handle *h); -static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h); +#define hns3_rl_err(fmt, ...) \ + do { \ + if (net_ratelimit()) \ + netdev_err(fmt, ##__VA_ARGS__); \ + } while (0) -static const char hns3_driver_name[] = "hns3"; -const char hns3_driver_version[] = VERMAGIC_STRING; -static const char hns3_driver_string[] = +static void hns3_clear_all_ring(struct hnae3_handle *h, bool force); + +const char hns3_driver_name[] = "hns3"; +char hns3_driver_version[] = VERMAGIC_STRING; +const char hns3_driver_string[] = "Hisilicon Ethernet Network Driver for Hip08 Family"; -static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation."; -static struct hnae3_client client; +const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation."; +struct hnae3_client client; + +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, " Network interface message level setting"); + +#define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \ + NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) + +#define HNS3_INNER_VLAN_TAG 1 +#define HNS3_OUTER_VLAN_TAG 2 + +#define HNS3_MIN_TX_LEN 33U +#define HNS3_MIN_TUN_PKT_LEN 65U /* hns3_pci_tbl - PCI Device ID Table * @@ -36,7 +70,7 @@ static struct hnae3_client client; * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, * Class, Class Mask, private data (not used) } */ -static const struct pci_device_id hns3_pci_tbl[] = { +const struct pci_device_id hns3_pci_tbl[] = { {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), @@ -61,7 +95,7 @@ static irqreturn_t hns3_irq_handle(int irq, void *vector) { struct hns3_enet_tqp_vector *tqp_vector = vector; - napi_schedule(&tqp_vector->napi); + napi_schedule_irqoff(&tqp_vector->napi); return IRQ_HANDLED; } @@ -77,6 +111,9 @@ static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv) if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED) continue; + /* clear the affinity mask */ + irq_set_affinity_hint(tqp_vectors->vector_irq, NULL); + /* release the irq resource */ free_irq(tqp_vectors->vector_irq, tqp_vectors); tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED; @@ -99,18 +136,21 @@ static int hns3_nic_init_irq(struct hns3_nic_priv *priv) continue; if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) { - snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, - "%s-%s-%d", priv->netdev->name, "TxRx", - txrx_int_idx++); + snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, + "%s-%s-%s-%d", hns3_driver_name, + pci_name(priv->ae_handle->pdev), + "TxRx", txrx_int_idx++); txrx_int_idx++; } else if (tqp_vectors->rx_group.ring) { - snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, - "%s-%s-%d", priv->netdev->name, "Rx", - rx_int_idx++); + snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, + "%s-%s-%s-%d", hns3_driver_name, + pci_name(priv->ae_handle->pdev), + "Rx", rx_int_idx++); } else if (tqp_vectors->tx_group.ring) { - snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, - "%s-%s-%d", priv->netdev->name, "Tx", - tx_int_idx++); + snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, + "%s-%s-%s-%d", hns3_driver_name, + pci_name(priv->ae_handle->pdev), + "Tx", tx_int_idx++); } else { /* Skip this unused q_vector */ continue; @@ -118,15 +158,19 @@ static int hns3_nic_init_irq(struct hns3_nic_priv *priv) tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0'; + irq_set_status_flags(tqp_vectors->vector_irq, IRQ_NOAUTOEN); ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0, - tqp_vectors->name, - tqp_vectors); + tqp_vectors->name, tqp_vectors); if (ret) { netdev_err(priv->netdev, "request irq(%d) fail\n", tqp_vectors->vector_irq); + hns3_nic_uninit_irq(priv); return ret; } + irq_set_affinity_hint(tqp_vectors->vector_irq, + &tqp_vectors->affinity_mask); + tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED; } @@ -142,6 +186,7 @@ static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector, static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector) { napi_enable(&tqp_vector->napi); + enable_irq(tqp_vector->vector_irq); /* enable vector */ hns3_mask_vector_irq(tqp_vector, 1); @@ -195,24 +240,18 @@ void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector, static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector, struct hns3_nic_priv *priv) { - struct hnae3_handle *h = priv->ae_handle; - /* initialize the configuration for interrupt coalescing. * 1. GL (Interrupt Gap Limiter) * 2. RL (Interrupt Rate Limiter) + * + * Default: enable interrupt coalescing self-adaptive and GL */ - - /* Default: enable interrupt coalescing self-adaptive and GL */ tqp_vector->tx_group.coal.gl_adapt_enable = 1; tqp_vector->rx_group.coal.gl_adapt_enable = 1; tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K; tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K; - /* Default: disable RL */ - h->kinfo.int_rl_setting = 0; - - tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START; tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW; tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW; } @@ -233,35 +272,29 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev) { struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_knic_private_info *kinfo = &h->kinfo; - unsigned int queue_size = kinfo->rss_size * kinfo->num_tc; + struct hnae3_tc_info *tc_info = &kinfo->tc_info; + unsigned int queue_size = kinfo->num_tqps; int i, ret; - if (kinfo->num_tc <= 1) { + if (tc_info->num_tc <= 1 && !tc_info->mqprio_active) { netdev_reset_tc(netdev); } else { - ret = netdev_set_num_tc(netdev, kinfo->num_tc); + ret = netdev_set_num_tc(netdev, tc_info->num_tc); if (ret) { netdev_err(netdev, "netdev_set_num_tc fail, ret=%d!\n", ret); return ret; } - for (i = 0; i < HNAE3_MAX_TC; i++) { - if (!kinfo->tc_info[i].enable) - continue; - - netdev_set_tc_queue(netdev, - kinfo->tc_info[i].tc, - kinfo->tc_info[i].tqp_count, - kinfo->tc_info[i].tqp_offset); - } + for (i = 0; i < tc_info->num_tc; i++) + netdev_set_tc_queue(netdev, i, tc_info->tqp_count[i], + tc_info->tqp_offset[i]); } ret = netif_set_real_num_tx_queues(netdev, queue_size); if (ret) { netdev_err(netdev, - "netif_set_real_num_tx_queues fail, ret=%d!\n", - ret); + "netif_set_real_num_tx_queues fail, ret=%d!\n", ret); return ret; } @@ -275,14 +308,66 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev) return 0; } -static u16 hns3_get_max_available_channels(struct hnae3_handle *h) +u16 hns3_get_max_available_channels(struct hnae3_handle *h) +{ + u16 alloc_tqps, max_rss_size, rss_size; + + h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size); + rss_size = alloc_tqps / h->kinfo.tc_info.num_tc; + + return min_t(u16, rss_size, max_rss_size); +} + +static void hns3_tqp_enable(struct hnae3_queue *tqp) +{ + u32 rcb_reg; + + rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG); + rcb_reg |= BIT(HNS3_RING_EN_B); + hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg); +} + +static void hns3_tqp_disable(struct hnae3_queue *tqp) +{ + u32 rcb_reg; + + rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG); + rcb_reg &= ~BIT(HNS3_RING_EN_B); + hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg); +} + +static void hns3_free_rx_cpu_rmap(struct net_device *netdev) +{ +#ifdef CONFIG_RFS_ACCEL + free_irq_cpu_rmap(netdev->rx_cpu_rmap); + netdev->rx_cpu_rmap = NULL; +#endif +} + +static int hns3_set_rx_cpu_rmap(struct net_device *netdev) { - u16 free_tqps, max_rss_size, max_tqps; +#ifdef CONFIG_RFS_ACCEL + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hns3_enet_tqp_vector *tqp_vector; + int i, ret; - h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size); - max_tqps = h->kinfo.num_tc * max_rss_size; + if (!netdev->rx_cpu_rmap) { + netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->vector_num); + if (!netdev->rx_cpu_rmap) + return -ENOMEM; + } - return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps)); + for (i = 0; i < priv->vector_num; i++) { + tqp_vector = &priv->tqp_vector[i]; + ret = irq_cpu_rmap_add(netdev->rx_cpu_rmap, + tqp_vector->vector_irq); + if (ret) { + hns3_free_rx_cpu_rmap(netdev); + return ret; + } + } +#endif + return 0; } static int hns3_nic_net_up(struct net_device *netdev) @@ -296,33 +381,51 @@ static int hns3_nic_net_up(struct net_device *netdev) if (ret) return ret; - /* get irq resource for all vectors */ - ret = hns3_nic_init_irq(priv); - if (ret) { - netdev_err(netdev, "hns init irq failed! ret=%d\n", ret); - return ret; - } + clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); /* enable the vectors */ for (i = 0; i < priv->vector_num; i++) hns3_vector_enable(&priv->tqp_vector[i]); + /* enable rcb */ + for (j = 0; j < h->kinfo.num_tqps; j++) + hns3_tqp_enable(h->kinfo.tqp[j]); + /* start the ae_dev */ ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; - if (ret) - goto out_start_err; + if (ret) { + set_bit(HNS3_NIC_STATE_DOWN, &priv->state); + while (j--) + hns3_tqp_disable(h->kinfo.tqp[j]); - clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); + for (j = i - 1; j >= 0; j--) + hns3_vector_disable(&priv->tqp_vector[j]); + } - return 0; + return ret; +} -out_start_err: - for (j = i - 1; j >= 0; j--) - hns3_vector_disable(&priv->tqp_vector[j]); +static void hns3_config_xps(struct hns3_nic_priv *priv) +{ + int i; - hns3_nic_uninit_irq(priv); + for (i = 0; i < priv->vector_num; i++) { + struct hns3_enet_tqp_vector *tqp_vector = &priv->tqp_vector[i]; + struct hns3_enet_ring *ring = tqp_vector->tx_group.ring; - return ret; + while (ring) { + int ret; + + ret = netif_set_xps_queue(priv->netdev, + &tqp_vector->affinity_mask, + ring->tqp->tqp_index); + if (ret) + netdev_warn(priv->netdev, + "set xps queue failed: %d", ret); + + ring = ring->next; + } + } } static int hns3_nic_net_open(struct net_device *netdev) @@ -332,6 +435,14 @@ static int hns3_nic_net_open(struct net_device *netdev) struct hnae3_knic_private_info *kinfo; int i, ret; + if (hns3_nic_resetting(netdev)) + return -EBUSY; + + if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { + netdev_warn(netdev, "net open repeatedly!\n"); + return 0; + } + netif_carrier_off(netdev); ret = hns3_nic_set_real_num_queue(netdev); @@ -340,49 +451,85 @@ static int hns3_nic_net_open(struct net_device *netdev) ret = hns3_nic_net_up(netdev); if (ret) { - netdev_err(netdev, - "hns net up fail, ret=%d!\n", ret); + netdev_err(netdev, "net up fail, ret=%d!\n", ret); return ret; } kinfo = &h->kinfo; - for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { - netdev_set_prio_tc_map(netdev, i, - kinfo->prio_tc[i]); - } + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) + netdev_set_prio_tc_map(netdev, i, kinfo->tc_info.prio_tc[i]); + + if (h->ae_algo->ops->set_timer_task) + h->ae_algo->ops->set_timer_task(priv->ae_handle, true); + + hns3_config_xps(priv); + + if (netif_msg_ifup(h)) + netdev_info(netdev, "net open\n"); - priv->ae_handle->last_reset_time = jiffies; return 0; } +static void hns3_reset_tx_queue(struct hnae3_handle *h) +{ + struct net_device *ndev = h->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct netdev_queue *dev_queue; + u32 i; + + for (i = 0; i < h->kinfo.num_tqps; i++) { + dev_queue = netdev_get_tx_queue(ndev, + priv->ring[i].queue_index); + netdev_tx_reset_queue(dev_queue); + } +} + static void hns3_nic_net_down(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = hns3_get_handle(netdev); const struct hnae3_ae_ops *ops; int i; - if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) - return; - /* disable vectors */ for (i = 0; i < priv->vector_num; i++) hns3_vector_disable(&priv->tqp_vector[i]); + /* disable rcb */ + for (i = 0; i < h->kinfo.num_tqps; i++) + hns3_tqp_disable(h->kinfo.tqp[i]); + /* stop ae_dev */ ops = priv->ae_handle->ae_algo->ops; if (ops->stop) ops->stop(priv->ae_handle); - /* free irq resources */ - hns3_nic_uninit_irq(priv); + /* delay ring buffer clearing to hns3_reset_notify_uninit_enet + * during reset process, because driver may not be able + * to disable the ring through firmware when downing the netdev. + */ + if (!hns3_nic_resetting(netdev)) + hns3_clear_all_ring(priv->ae_handle, false); - hns3_clear_all_ring(priv->ae_handle); + hns3_reset_tx_queue(priv->ae_handle); } static int hns3_nic_net_stop(struct net_device *netdev) { - netif_tx_stop_all_queues(netdev); + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) + return 0; + + if (netif_msg_ifdown(h)) + netdev_info(netdev, "net stop\n"); + + if (h->ae_algo->ops->set_timer_task) + h->ae_algo->ops->set_timer_task(priv->ae_handle, false); + netif_carrier_off(netdev); + netif_tx_disable(netdev); hns3_nic_net_down(netdev); @@ -405,6 +552,9 @@ static int hns3_nic_uc_unsync(struct net_device *netdev, { struct hnae3_handle *h = hns3_get_handle(netdev); + if (ether_addr_equal(addr, netdev->dev_addr)) + return 0; + if (h->ae_algo->ops->rm_uc_addr) return h->ae_algo->ops->rm_uc_addr(h, addr); @@ -433,31 +583,45 @@ static int hns3_nic_mc_unsync(struct net_device *netdev, return 0; } +static u8 hns3_get_netdev_flags(struct net_device *netdev) +{ + u8 flags = 0; + + if (netdev->flags & IFF_PROMISC) + flags = HNAE3_USER_UPE | HNAE3_USER_MPE | HNAE3_BPE; + else if (netdev->flags & IFF_ALLMULTI) + flags = HNAE3_USER_MPE; + + return flags; +} + static void hns3_nic_set_rx_mode(struct net_device *netdev) { struct hnae3_handle *h = hns3_get_handle(netdev); + u8 new_flags; - if (h->ae_algo->ops->set_promisc_mode) { - if (netdev->flags & IFF_PROMISC) - h->ae_algo->ops->set_promisc_mode(h, true, true); - else if (netdev->flags & IFF_ALLMULTI) - h->ae_algo->ops->set_promisc_mode(h, false, true); - else - h->ae_algo->ops->set_promisc_mode(h, false, false); - } - if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync)) - netdev_err(netdev, "sync uc address fail\n"); - if (netdev->flags & IFF_MULTICAST) { - if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync)) - netdev_err(netdev, "sync mc address fail\n"); + new_flags = hns3_get_netdev_flags(netdev); - if (h->ae_algo->ops->update_mta_status) - h->ae_algo->ops->update_mta_status(h); - } + __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync); + __dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync); + + /* User mode Promisc mode enable and vlan filtering is disabled to + * let all packets in. + */ + h->netdev_flags = new_flags; + hns3_request_update_promisc_mode(h); +} + +void hns3_request_update_promisc_mode(struct hnae3_handle *handle) +{ + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + + if (ops->request_update_promisc_mode) + ops->request_update_promisc_mode(handle); } static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, - u16 *mss, u32 *type_cs_vlan_tso) + u16 *mss, u32 *type_cs_vlan_tso, u32 *send_bytes) { u32 l4_offset, hdr_len; union l3_hdr_info l3; @@ -469,7 +633,7 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, return 0; ret = skb_cow_head(skb, 0); - if (ret) + if (unlikely(ret < 0)) return ret; l3.hdr = skb_network_header(skb); @@ -481,15 +645,20 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, if (l3.v4->version == 4) l3.v4->check = 0; - /* tunnel packet.*/ + /* tunnel packet */ if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | SKB_GSO_GRE_CSUM | SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)) { +#ifdef NETIF_F_GSO_PARTIAL if ((!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL)) && (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { +#else + if (skb_shinfo(skb)->gso_type & + SKB_GSO_UDP_TUNNEL_CSUM) { +#endif /* Software should clear the udp's checksum * field when tso is needed. */ @@ -506,34 +675,33 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, l3.v4->check = 0; } - /* normal or tunnel packet*/ + /* normal or tunnel packet */ l4_offset = l4.hdr - skb->data; - hdr_len = (l4.tcp->doff * 4) + l4_offset; + hdr_len = (l4.tcp->doff << 2) + l4_offset; - /* remove payload length from inner pseudo checksum when tso*/ + /* remove payload length from inner pseudo checksum when tso */ l4_paylen = skb->len - l4_offset; csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(l4_paylen)); + *send_bytes = (skb_shinfo(skb)->gso_segs - 1) * hdr_len + skb->len; + /* find the txbd field values */ *paylen = skb->len - hdr_len; - hnae3_set_bit(*type_cs_vlan_tso, - HNS3_TXD_TSO_B, 1); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1); /* get MSS for TSO */ *mss = skb_shinfo(skb)->gso_size; + trace_hns3_tso(skb); + return 0; } static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, u8 *il4_proto) { - union { - struct iphdr *v4; - struct ipv6hdr *v6; - unsigned char *hdr; - } l3; + union l3_hdr_info l3; unsigned char *l4_hdr; unsigned char *exthdr; u8 l4_proto_tmp; @@ -582,184 +750,103 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, return 0; } -static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, - u8 il4_proto, u32 *type_cs_vlan_tso, - u32 *ol_type_vlan_len_msec) -{ - union { - struct iphdr *v4; - struct ipv6hdr *v6; - unsigned char *hdr; - } l3; - union { - struct tcphdr *tcp; - struct udphdr *udp; - struct gre_base_hdr *gre; - unsigned char *hdr; - } l4; - unsigned char *l2_hdr; - u8 l4_proto = ol4_proto; - u32 ol2_len; - u32 ol3_len; - u32 ol4_len; - u32 l2_len; - u32 l3_len; - - l3.hdr = skb_network_header(skb); - l4.hdr = skb_transport_header(skb); +/* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL + * and it is udp packet, which has a dest port as the IANA assigned. + * the hardware is expected to do the checksum offload, but the + * hardware will not do the checksum offload when udp dest port is + * 4789, 4790 or 6081. + */ +static bool hns3_tunnel_csum_bug(struct sk_buff *skb) +{ +#ifndef IANA_VXLAN_UDP_PORT +#define IANA_VXLAN_UDP_PORT 4789 +#endif - /* compute L2 header size for normal packet, defined in 2 Bytes */ - l2_len = l3.hdr - skb->data; - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, - HNS3_TXD_L2LEN_S, l2_len >> 1); +#ifndef GENEVE_UDP_PORT +#define GENEVE_UDP_PORT 6081 +#endif - /* tunnel packet*/ - if (skb->encapsulation) { - /* compute OL2 header size, defined in 2 Bytes */ - ol2_len = l2_len; - hnae3_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_L2LEN_M, - HNS3_TXD_L2LEN_S, ol2_len >> 1); - - /* compute OL3 header size, defined in 4 Bytes */ - ol3_len = l4.hdr - l3.hdr; - hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M, - HNS3_TXD_L3LEN_S, ol3_len >> 2); - - /* MAC in UDP, MAC in GRE (0x6558)*/ - if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) { - /* switch MAC header ptr from outer to inner header.*/ - l2_hdr = skb_inner_mac_header(skb); - - /* compute OL4 header size, defined in 4 Bytes. */ - ol4_len = l2_hdr - l4.hdr; - hnae3_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, - ol4_len >> 2); - - /* switch IP header ptr from outer to inner header */ - l3.hdr = skb_inner_network_header(skb); - - /* compute inner l2 header size, defined in 2 Bytes. */ - l2_len = l3.hdr - l2_hdr; - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, - HNS3_TXD_L2LEN_S, l2_len >> 1); - } else { - /* skb packet types not supported by hardware, - * txbd len fild doesn't be filled. - */ - return; - } +#ifndef IANA_VXLAN_GPE_UDP_PORT +#define IANA_VXLAN_GPE_UDP_PORT 4790 +#endif - /* switch L4 header pointer from outer to inner */ - l4.hdr = skb_inner_transport_header(skb); + union l4_hdr_info l4; - l4_proto = il4_proto; - } + l4.hdr = skb_transport_header(skb); - /* compute inner(/normal) L3 header size, defined in 4 Bytes */ - l3_len = l4.hdr - l3.hdr; - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M, - HNS3_TXD_L3LEN_S, l3_len >> 2); + if (!(!skb->encapsulation && + (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) || + l4.udp->dest == htons(GENEVE_UDP_PORT) || + l4.udp->dest == htons(IANA_VXLAN_GPE_UDP_PORT)))) + return false; - /* compute inner(/normal) L4 header size, defined in 4 Bytes */ - switch (l4_proto) { - case IPPROTO_TCP: - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S, l4.tcp->doff); - break; - case IPPROTO_SCTP: - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S, - (sizeof(struct sctphdr) >> 2)); - break; - case IPPROTO_UDP: - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S, - (sizeof(struct udphdr) >> 2)); - break; - default: - /* skb packet types not supported by hardware, - * txbd len fild doesn't be filled. - */ - return; - } + return true; } -/* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL - * and it is udp packet, which has a dest port as the IANA assigned. - * the hardware is expected to do the checksum offload, but the - * hardware will not do the checksum offload when udp dest port is - * 4789. - */ -static bool hns3_tunnel_csum_bug(struct sk_buff *skb) +static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto, + u32 *ol_type_vlan_len_msec) { -#define IANA_VXLAN_PORT 4789 - union { - struct tcphdr *tcp; - struct udphdr *udp; - struct gre_base_hdr *gre; - unsigned char *hdr; - } l4; + u32 l2_len, l3_len, l4_len; + unsigned char *il2_hdr; + union l3_hdr_info l3; + union l4_hdr_info l4; + l3.hdr = skb_network_header(skb); l4.hdr = skb_transport_header(skb); - if (!(!skb->encapsulation && l4.udp->dest == htons(IANA_VXLAN_PORT))) - return false; + /* compute OL2 header size, defined in 2 Bytes */ + l2_len = l3.hdr - skb->data; + hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L2LEN_S, l2_len >> 1); - skb_checksum_help(skb); + /* compute OL3 header size, defined in 4 Bytes */ + l3_len = l4.hdr - l3.hdr; + hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2); - return true; + il2_hdr = skb_inner_mac_header(skb); + /* compute OL4 header size, defined in 4 Bytes */ + l4_len = il2_hdr - l4.hdr; + hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2); + + /* define outer network header type */ + if (skb->protocol == htons(ETH_P_IP)) { + if (skb_is_gso(skb)) + hns3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV4_CSUM); + else + hns3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV4_NO_CSUM); + } else if (skb->protocol == htons(ETH_P_IPV6)) { + hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV6); + } + + if (ol4_proto == IPPROTO_UDP) + hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S, + HNS3_TUN_MAC_IN_UDP); + else if (ol4_proto == IPPROTO_GRE) + hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S, + HNS3_TUN_NVGRE); } -static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, - u8 il4_proto, u32 *type_cs_vlan_tso, - u32 *ol_type_vlan_len_msec) +static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto, + u8 il4_proto, u32 *type_cs_vlan_tso, + u32 *ol_type_vlan_len_msec) { - union { - struct iphdr *v4; - struct ipv6hdr *v6; - unsigned char *hdr; - } l3; + unsigned char *l2_hdr = skb->data; u32 l4_proto = ol4_proto; + union l4_hdr_info l4; + union l3_hdr_info l3; + u32 l2_len, l3_len; + l4.hdr = skb_transport_header(skb); l3.hdr = skb_network_header(skb); - /* define OL3 type and tunnel type(OL4).*/ + /* handle encapsulation skb */ if (skb->encapsulation) { - /* define outer network header type.*/ - if (skb->protocol == htons(ETH_P_IP)) { - if (skb_is_gso(skb)) - hnae3_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_OL3T_M, - HNS3_TXD_OL3T_S, - HNS3_OL3T_IPV4_CSUM); - else - hnae3_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_OL3T_M, - HNS3_TXD_OL3T_S, - HNS3_OL3T_IPV4_NO_CSUM); - - } else if (skb->protocol == htons(ETH_P_IPV6)) { - hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M, - HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6); - } - - /* define tunnel type(OL4).*/ - switch (l4_proto) { - case IPPROTO_UDP: - hnae3_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_TUNTYPE_M, - HNS3_TXD_TUNTYPE_S, - HNS3_TUN_MAC_IN_UDP); - break; - case IPPROTO_GRE: - hnae3_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_TUNTYPE_M, - HNS3_TXD_TUNTYPE_S, - HNS3_TUN_NVGRE); - break; - default: + /* If this is a not UDP/GRE encapsulation skb */ + if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) { /* drop the skb tunnel packet if hardware don't support, * because hardware can't calculate csum when TSO. */ @@ -769,52 +856,70 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, /* the stack computes the IP header already, * driver calculate l4 checksum when not TSO. */ - skb_checksum_help(skb); - return 0; + return skb_checksum_help(skb); } + hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec); + + /* switch to inner header */ + l2_hdr = skb_inner_mac_header(skb); l3.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); l4_proto = il4_proto; } if (l3.v4->version == 4) { - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, - HNS3_TXD_L3T_S, HNS3_L3T_IPV4); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, + HNS3_L3T_IPV4); /* the stack computes the IP header already, the only time we * need the hardware to recompute it is in the case of TSO. */ if (skb_is_gso(skb)) - hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); } else if (l3.v6->version == 6) { - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, - HNS3_TXD_L3T_S, HNS3_L3T_IPV6); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, + HNS3_L3T_IPV6); } + /* compute inner(/normal) L2 header size, defined in 2 Bytes */ + l2_len = l3.hdr - l2_hdr; + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1); + + /* compute inner(/normal) L3 header size, defined in 4 Bytes */ + l3_len = l4.hdr - l3.hdr; + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2); + + /* compute inner(/normal) L4 header size, defined in 4 Bytes */ switch (l4_proto) { case IPPROTO_TCP: - hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); - hnae3_set_field(*type_cs_vlan_tso, - HNS3_TXD_L4T_M, - HNS3_TXD_L4T_S, - HNS3_L4T_TCP); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, + HNS3_L4T_TCP); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, + l4.tcp->doff); break; case IPPROTO_UDP: - if (hns3_tunnel_csum_bug(skb)) - break; + if (hns3_tunnel_csum_bug(skb)) { + int ret = skb_put_padto(skb, HNS3_MIN_TUN_PKT_LEN); + if (ret) + return ret; + + return skb_checksum_help(skb); + } - hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); - hnae3_set_field(*type_cs_vlan_tso, - HNS3_TXD_L4T_M, - HNS3_TXD_L4T_S, - HNS3_L4T_UDP); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, + HNS3_L4T_UDP); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, + (sizeof(struct udphdr) >> 2)); break; case IPPROTO_SCTP: - hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); - hnae3_set_field(*type_cs_vlan_tso, - HNS3_TXD_L4T_M, - HNS3_TXD_L4T_S, - HNS3_L4T_SCTP); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, + HNS3_L4T_SCTP); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, + (sizeof(struct sctphdr) >> 2)); break; default: /* drop the skb tunnel packet if hardware don't support, @@ -826,35 +931,33 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, /* the stack computes the IP header already, * driver calculate l4 checksum when not TSO. */ - skb_checksum_help(skb); - return 0; + return skb_checksum_help(skb); } return 0; } -static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end) +static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring, + struct sk_buff *skb) { - /* Config bd buffer end */ - hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M, - HNS3_TXD_BDTYPE_S, 0); - hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end); - hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1); - hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0); -} + struct hnae3_handle *handle = tx_ring->tqp->handle; + struct vlan_ethhdr *vhdr; + int rc; -static int hns3_fill_desc_vtags(struct sk_buff *skb, - struct hns3_enet_ring *tx_ring, - u32 *inner_vlan_flag, - u32 *out_vlan_flag, - u16 *inner_vtag, - u16 *out_vtag) -{ -#define HNS3_TX_VLAN_PRIO_SHIFT 13 + if (!(skb->protocol == htons(ETH_P_8021Q) || + skb_vlan_tag_present(skb))) + return 0; + + /* Since HW limitation, if port based insert VLAN enabled, only one VLAN + * header is allowed in skb, otherwise it will cause RAS error. + */ + if (unlikely(skb_vlan_tagged_multi(skb) && + handle->port_base_vlan_state == + HNAE3_PORT_BASE_VLAN_ENABLE)) + return -EINVAL; if (skb->protocol == htons(ETH_P_8021Q) && - !(tx_ring->tqp->handle->kinfo.netdev->features & - NETIF_F_HW_VLAN_CTAG_TX)) { + !(handle->kinfo.netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { /* When HW VLAN acceleration is turned off, and the stack * sets the protocol to 802.1q, the driver just need to * set the protocol to the encapsulated ethertype. @@ -864,327 +967,573 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb, } if (skb_vlan_tag_present(skb)) { - u16 vlan_tag; - - vlan_tag = skb_vlan_tag_get(skb); - vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT; - /* Based on hw strategy, use out_vtag in two layer tag case, * and use inner_vtag in one tag case. */ - if (skb->protocol == htons(ETH_P_8021Q)) { - hnae3_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1); - *out_vtag = vlan_tag; - } else { - hnae3_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1); - *inner_vtag = vlan_tag; - } - } else if (skb->protocol == htons(ETH_P_8021Q)) { - struct vlan_ethhdr *vhdr; - int rc; + if (skb->protocol == htons(ETH_P_8021Q) && + handle->port_base_vlan_state == + HNAE3_PORT_BASE_VLAN_DISABLE) + rc = HNS3_OUTER_VLAN_TAG; + else + rc = HNS3_INNER_VLAN_TAG; - rc = skb_cow_head(skb, 0); - if (rc < 0) - return rc; - vhdr = (struct vlan_ethhdr *)skb->data; - vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7) - << HNS3_TX_VLAN_PRIO_SHIFT); + skb->protocol = vlan_get_protocol(skb); + return rc; } + rc = skb_cow_head(skb, 0); + if (unlikely(rc < 0)) + return rc; + + vhdr = (struct vlan_ethhdr *)skb->data; + vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT) + & VLAN_PRIO_MASK); + skb->protocol = vlan_get_protocol(skb); return 0; } -static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, - int size, dma_addr_t dma, int frag_end, - enum hns_desc_type type) +static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, + struct sk_buff *skb, struct hns3_desc *desc, + struct hns3_desc_cb *desc_cb) { - struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; - struct hns3_desc *desc = &ring->desc[ring->next_to_use]; u32 ol_type_vlan_len_msec = 0; - u16 bdtp_fe_sc_vld_ra_ri = 0; u32 type_cs_vlan_tso = 0; - struct sk_buff *skb; + u32 paylen = skb->len; u16 inner_vtag = 0; u16 out_vtag = 0; - u32 paylen = 0; u16 mss = 0; - u8 ol4_proto; - u8 il4_proto; int ret; - /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */ - desc_cb->priv = priv; - desc_cb->length = size; - desc_cb->dma = dma; - desc_cb->type = type; - - /* now, fill the descriptor */ - desc->addr = cpu_to_le64(dma); - desc->tx.send_size = cpu_to_le16((u16)size); - hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end); - desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri); + ret = hns3_handle_vtags(ring, skb); + if (unlikely(ret < 0)) { + u64_stats_update_begin(&ring->syncp); + ring->stats.tx_vlan_err++; + u64_stats_update_end(&ring->syncp); + return ret; + } else if (ret == HNS3_INNER_VLAN_TAG) { + inner_vtag = skb_vlan_tag_get(skb); + inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & + VLAN_PRIO_MASK; + hns3_set_field(type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1); + } else if (ret == HNS3_OUTER_VLAN_TAG) { + out_vtag = skb_vlan_tag_get(skb); + out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & + VLAN_PRIO_MASK; + hns3_set_field(ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B, + 1); + } - if (type == DESC_TYPE_SKB) { - skb = (struct sk_buff *)priv; - paylen = skb->len; + desc_cb->send_bytes = skb->len; - ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso, - &ol_type_vlan_len_msec, - &inner_vtag, &out_vtag); - if (unlikely(ret)) - return ret; + if (skb->ip_summed == CHECKSUM_PARTIAL) { + u8 ol4_proto, il4_proto; - if (skb->ip_summed == CHECKSUM_PARTIAL) { - skb_reset_mac_len(skb); + skb_reset_mac_len(skb); - ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); - if (ret) - return ret; - hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto, - &type_cs_vlan_tso, - &ol_type_vlan_len_msec); - ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto, - &type_cs_vlan_tso, - &ol_type_vlan_len_msec); - if (ret) - return ret; + ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); + if (unlikely(ret < 0)) { + u64_stats_update_begin(&ring->syncp); + ring->stats.tx_l4_proto_err++; + u64_stats_update_end(&ring->syncp); + return ret; + } - ret = hns3_set_tso(skb, &paylen, &mss, - &type_cs_vlan_tso); - if (ret) - return ret; + ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto, + &type_cs_vlan_tso, + &ol_type_vlan_len_msec); + if (unlikely(ret < 0)) { + u64_stats_update_begin(&ring->syncp); + ring->stats.tx_l2l3l4_err++; + u64_stats_update_end(&ring->syncp); + return ret; } - /* Set txbd */ - desc->tx.ol_type_vlan_len_msec = - cpu_to_le32(ol_type_vlan_len_msec); - desc->tx.type_cs_vlan_tso_len = - cpu_to_le32(type_cs_vlan_tso); - desc->tx.paylen = cpu_to_le32(paylen); - desc->tx.mss = cpu_to_le16(mss); - desc->tx.vlan_tag = cpu_to_le16(inner_vtag); - desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag); + ret = hns3_set_tso(skb, &paylen, &mss, + &type_cs_vlan_tso, &desc_cb->send_bytes); + if (unlikely(ret < 0)) { + u64_stats_update_begin(&ring->syncp); + ring->stats.tx_tso_err++; + u64_stats_update_end(&ring->syncp); + return ret; + } } - /* move ring pointer to next.*/ - ring_ptr_move_fw(ring, next_to_use); + /* Set txbd */ + desc->tx.ol_type_vlan_len_msec = + cpu_to_le32(ol_type_vlan_len_msec); + desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso); + desc->tx.paylen = cpu_to_le32(paylen); + desc->tx.mss = cpu_to_le16(mss); + desc->tx.vlan_tag = cpu_to_le16(inner_vtag); + desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag); return 0; } -static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv, - int size, dma_addr_t dma, int frag_end, - enum hns_desc_type type) +static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, + unsigned int size, enum hns_desc_type type) { - unsigned int frag_buf_num; - unsigned int k; - int sizeoflast; - int ret; +#define HNS3_LIKELY_BD_NUM 1 + + struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; + struct hns3_desc *desc = &ring->desc[ring->next_to_use]; + struct device *dev = ring_to_dev(ring); + struct skb_frag_struct *frag; + unsigned int frag_buf_num; + int k, sizeoflast; + dma_addr_t dma; + + if (type == DESC_TYPE_FRAGLIST_SKB || + type == DESC_TYPE_SKB) { + struct sk_buff *skb = (struct sk_buff *)priv; + + dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); + } else { + frag = (struct skb_frag_struct *)priv; + dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); + } + + if (unlikely(dma_mapping_error(dev, dma))) { + u64_stats_update_begin(&ring->syncp); + ring->stats.sw_err_cnt++; + u64_stats_update_end(&ring->syncp); + return -ENOMEM; + } + + desc_cb->priv = priv; + desc_cb->length = size; + desc_cb->dma = dma; + desc_cb->type = type; + + if (likely(size <= HNS3_MAX_BD_SIZE)) { + desc->addr = cpu_to_le64(dma); + desc->tx.send_size = cpu_to_le16(size); + desc->tx.bdtp_fe_sc_vld_ra_ri = + cpu_to_le16(BIT(HNS3_TXD_VLD_B)); + + trace_hns3_tx_desc(ring); + ring_ptr_move_fw(ring, next_to_use); + return HNS3_LIKELY_BD_NUM; + } - frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; + frag_buf_num = hns3_tx_bd_count(size); sizeoflast = size % HNS3_MAX_BD_SIZE; sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; - /* When the frag size is bigger than hardware, split this frag */ + /* When frag size is bigger than hardware limit, split this frag */ for (k = 0; k < frag_buf_num; k++) { - ret = hns3_fill_desc(ring, priv, - (k == frag_buf_num - 1) ? - sizeoflast : HNS3_MAX_BD_SIZE, - dma + HNS3_MAX_BD_SIZE * k, - frag_end && (k == frag_buf_num - 1) ? 1 : 0, - (type == DESC_TYPE_SKB && !k) ? - DESC_TYPE_SKB : DESC_TYPE_PAGE); - if (ret) - return ret; + /* now, fill the descriptor */ + desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k); + desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ? + (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE); + desc->tx.bdtp_fe_sc_vld_ra_ri = + cpu_to_le16(BIT(HNS3_TXD_VLD_B)); + + trace_hns3_tx_desc(ring); + /* move ring pointer to next */ + ring_ptr_move_fw(ring, next_to_use); + + desc = &ring->desc[ring->next_to_use]; } - return 0; + return frag_buf_num; } -static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum, - struct hns3_enet_ring *ring) +static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size, + unsigned int bd_num) { - struct sk_buff *skb = *out_skb; - struct skb_frag_struct *frag; - int bdnum_for_frag; - int frag_num; - int buf_num; - int size; + unsigned int size; int i; size = skb_headlen(skb); - buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; + while (size > HNS3_MAX_BD_SIZE) { + bd_size[bd_num++] = HNS3_MAX_BD_SIZE; + size -= HNS3_MAX_BD_SIZE; + + if (bd_num > HNS3_MAX_TSO_BD_NUM) + return bd_num; + } + + if (size) { + bd_size[bd_num++] = size; + if (bd_num > HNS3_MAX_TSO_BD_NUM) + return bd_num; + } + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; - frag_num = skb_shinfo(skb)->nr_frags; - for (i = 0; i < frag_num; i++) { - frag = &skb_shinfo(skb)->frags[i]; size = skb_frag_size(frag); - bdnum_for_frag = - (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; - if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG) - return -ENOMEM; + if (!size) + continue; + + while (size > HNS3_MAX_BD_SIZE) { + bd_size[bd_num++] = HNS3_MAX_BD_SIZE; + size -= HNS3_MAX_BD_SIZE; + + if (bd_num > HNS3_MAX_TSO_BD_NUM) + return bd_num; + } - buf_num += bdnum_for_frag; + bd_size[bd_num++] = size; + if (bd_num > HNS3_MAX_TSO_BD_NUM) + return bd_num; } - if (buf_num > ring_space(ring)) - return -EBUSY; + return bd_num; +} - *bnum = buf_num; - return 0; +static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size, + unsigned int bd_num, + unsigned int recursion_level) +{ +#define HNS3_MAX_RECURSION_LEVEL 24 + + struct sk_buff *frag_skb; + + /* If the total len is within the max bd limit */ + if (likely(skb->len <= HNS3_MAX_BD_SIZE && !recursion_level && + !skb_has_frag_list(skb) && + skb_shinfo(skb)->nr_frags < HNS3_MAX_NON_TSO_BD_NUM)) + return skb_shinfo(skb)->nr_frags + 1U; + + if (unlikely(recursion_level >= HNS3_MAX_RECURSION_LEVEL)) + return UINT_MAX; + + bd_num = hns3_skb_bd_num(skb, bd_size, bd_num); + + if (!skb_has_frag_list(skb) || bd_num > HNS3_MAX_TSO_BD_NUM) + return bd_num; + + skb_walk_frags(skb, frag_skb) { + bd_num = hns3_tx_bd_num(frag_skb, bd_size, bd_num, + recursion_level + 1); + if (bd_num > HNS3_MAX_TSO_BD_NUM) + return bd_num; + } + + return bd_num; } -static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum, - struct hns3_enet_ring *ring) +static unsigned int hns3_gso_hdr_len(struct sk_buff *skb) { - struct sk_buff *skb = *out_skb; - int buf_num; + if (!skb->encapsulation) + return skb_transport_offset(skb) + tcp_hdrlen(skb); - /* No. of segments (plus a header) */ - buf_num = skb_shinfo(skb)->nr_frags + 1; + return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); +} - if (buf_num > ring_space(ring)) - return -EBUSY; +/* HW need every continuous 8 buffer data to be larger than MSS, + * we simplify it by ensuring skb_headlen + the first continuous + * 7 frags to to be larger than gso header len + mss, and the remaining + * continuous 7 frags to be larger than MSS except the last 7 frags. + */ +static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size, + unsigned int bd_num) +{ + unsigned int tot_len = 0; + int i; + + for (i = 0; i < HNS3_MAX_NON_TSO_BD_NUM - 1U; i++) + tot_len += bd_size[i]; + + /* ensure the first 8 frags is greater than mss + header */ + if (tot_len + bd_size[HNS3_MAX_NON_TSO_BD_NUM - 1U] < + skb_shinfo(skb)->gso_size + hns3_gso_hdr_len(skb)) + return true; + + /* ensure every continuous 7 buffer is greater than mss + * except the last one. + */ + for (i = 0; i < bd_num - HNS3_MAX_NON_TSO_BD_NUM; i++) { + tot_len -= bd_size[i]; + tot_len += bd_size[i + HNS3_MAX_NON_TSO_BD_NUM - 1U]; + + if (tot_len < skb_shinfo(skb)->gso_size) + return true; + } + + return false; +} + +void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size) +{ + int i; + + for (i = 0; i < MAX_SKB_FRAGS; i++) + size[i] = skb_frag_size(&shinfo->frags[i]); +} + +static int hns3_skb_linearize(struct hns3_enet_ring *ring, struct sk_buff *skb, + unsigned int bd_num) +{ + /* 'bd_num == UINT_MAX' means the skb' fraglist has a + * recursion level of over HNS3_MAX_RECURSION_LEVEL. + */ + if (bd_num == UINT_MAX) { + u64_stats_update_begin(&ring->syncp); + ring->stats.over_max_recursion++; + u64_stats_update_end(&ring->syncp); + return -ENOMEM; + } + + /* The skb->len has exceeded the hw limitation, linearization + * will not help. + */ + if (skb->len > HNS3_MAX_TSO_SIZE || + (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) { + u64_stats_update_begin(&ring->syncp); + ring->stats.hw_limitation++; + u64_stats_update_end(&ring->syncp); + return -ENOMEM; + } - *bnum = buf_num; + if (__skb_linearize(skb)) { + u64_stats_update_begin(&ring->syncp); + ring->stats.sw_err_cnt++; + u64_stats_update_end(&ring->syncp); + return -ENOMEM; + } return 0; } -static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig) +static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, + struct net_device *netdev, + struct sk_buff *skb) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U]; + unsigned int bd_num; + + bd_num = hns3_tx_bd_num(skb, bd_size, 0, 0); + if (unlikely(bd_num > HNS3_MAX_NON_TSO_BD_NUM)) { + if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) && + !hns3_skb_need_linearized(skb, bd_size, bd_num)) { + trace_hns3_over_8bd(skb); + goto out; + } + + if (hns3_skb_linearize(ring, skb, bd_num)) + return -ENOMEM; + + bd_num = hns3_tx_bd_count(skb->len); + + u64_stats_update_begin(&ring->syncp); + ring->stats.tx_copy++; + u64_stats_update_end(&ring->syncp); + } + +out: + if (likely(ring_space(ring) >= bd_num)) + return bd_num; + + netif_stop_subqueue(netdev, ring->queue_index); + smp_mb(); /* Memory barrier before checking ring_space */ + + /* Start queue in case hns3_clean_tx_ring has just made room + * available and has not seen the queue stopped state performed + * by netif_stop_subqueue above. + */ + if (ring_space(ring) >= bd_num && netif_carrier_ok(netdev) && + !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { + netif_start_subqueue(netdev, ring->queue_index); + return bd_num; + } + + u64_stats_update_begin(&ring->syncp); + ring->stats.tx_busy++; + u64_stats_update_end(&ring->syncp); + + return -EBUSY; +} + +static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) { struct device *dev = ring_to_dev(ring); unsigned int i; for (i = 0; i < ring->desc_num; i++) { + struct hns3_desc *desc = &ring->desc[ring->next_to_use]; + + memset(desc, 0, sizeof(*desc)); + /* check if this is where we started */ if (ring->next_to_use == next_to_use_orig) break; + /* rollback one */ + ring_ptr_move_bw(ring, next_to_use); + + if (!ring->desc_cb[ring->next_to_use].dma) + continue; + /* unmap the descriptor dma address */ - if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB) + if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB || + ring->desc_cb[ring->next_to_use].type == + DESC_TYPE_FRAGLIST_SKB) dma_unmap_single(dev, ring->desc_cb[ring->next_to_use].dma, ring->desc_cb[ring->next_to_use].length, DMA_TO_DEVICE); - else + else if (ring->desc_cb[ring->next_to_use].length) dma_unmap_page(dev, ring->desc_cb[ring->next_to_use].dma, ring->desc_cb[ring->next_to_use].length, DMA_TO_DEVICE); - /* rollback one */ - ring_ptr_move_bw(ring, next_to_use); + ring->desc_cb[ring->next_to_use].length = 0; + ring->desc_cb[ring->next_to_use].dma = 0; + ring->desc_cb[ring->next_to_use].type = DESC_TYPE_UNKNOWN; + } +} + +static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring, + struct sk_buff *skb, enum hns_desc_type type) +{ + unsigned int size = skb_headlen(skb); + struct sk_buff *frag_skb; + int i, ret, bd_num = 0; + + if (size) { + ret = hns3_fill_desc(ring, skb, size, type); + if (unlikely(ret < 0)) + return ret; + + bd_num += ret; + } + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + + size = skb_frag_size(frag); + if (!size) + continue; + + ret = hns3_fill_desc(ring, frag, size, DESC_TYPE_PAGE); + if (unlikely(ret < 0)) + return ret; + + bd_num += ret; + } + + skb_walk_frags(skb, frag_skb) { + ret = hns3_fill_skb_to_desc(ring, frag_skb, + DESC_TYPE_FRAGLIST_SKB); + if (unlikely(ret < 0)) + return ret; + + bd_num += ret; + } + + return bd_num; +} + +static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num, + bool doorbell) +{ + ring->pending_buf += num; + + if (!doorbell) { + u64_stats_update_begin(&ring->syncp); + ring->stats.tx_more++; + u64_stats_update_end(&ring->syncp); + return; } + + if (!ring->pending_buf) + return; + + /* This smp_store_release() pairs with smp_load_aquire() in + * hns3_nic_reclaim_desc(). Ensure that the BD valid bit is updated. + */ + smp_store_release(&ring->last_to_use, ring->next_to_use); + + writel(ring->pending_buf, + ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG); + ring->pending_buf = 0; } netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hns3_nic_ring_data *ring_data = - &tx_ring_data(priv, skb->queue_mapping); - struct hns3_enet_ring *ring = ring_data->ring; - struct device *dev = priv->dev; + struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping]; + struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; struct netdev_queue *dev_queue; - struct skb_frag_struct *frag; - int next_to_use_head; - int next_to_use_frag; - dma_addr_t dma; - int buf_num; - int seg_num; - int size; + int pre_ntu, next_to_use_head; int ret; - int i; - /* Prefetch the data used later */ - prefetch(skb->data); - - switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) { - case -EBUSY: - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_busy++; - u64_stats_update_end(&ring->syncp); + /* Hardware can only handle short frames above 32 bytes */ + if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) { + hns3_tx_doorbell(ring, 0, !netdev_xmit_more()); - goto out_net_tx_busy; - case -ENOMEM: u64_stats_update_begin(&ring->syncp); ring->stats.sw_err_cnt++; u64_stats_update_end(&ring->syncp); - netdev_err(netdev, "no memory to xmit!\n"); - goto out_err_tx_ok; - default: - break; + return NETDEV_TX_OK; } - /* No. of segments (plus a header) */ - seg_num = skb_shinfo(skb)->nr_frags + 1; - /* Fill the first part */ - size = skb_headlen(skb); + /* Prefetch the data used later */ + prefetch(skb->data); - next_to_use_head = ring->next_to_use; + ret = hns3_nic_maybe_stop_tx(ring, netdev, skb); + if (unlikely(ret <= 0)) { + if (ret == -EBUSY) { + hns3_tx_doorbell(ring, 0, true); + return NETDEV_TX_BUSY; + } - dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); - if (dma_mapping_error(dev, dma)) { - netdev_err(netdev, "TX head DMA map failed\n"); - ring->stats.sw_err_cnt++; + hns3_rl_err(netdev, "xmit error: %d!\n", ret); goto out_err_tx_ok; } - ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0, - DESC_TYPE_SKB); - if (ret) - goto head_dma_map_err; - - next_to_use_frag = ring->next_to_use; - /* Fill the fragments */ - for (i = 1; i < seg_num; i++) { - frag = &skb_shinfo(skb)->frags[i - 1]; - size = skb_frag_size(frag); - dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); - if (dma_mapping_error(dev, dma)) { - netdev_err(netdev, "TX frag(%d) DMA map failed\n", i); - ring->stats.sw_err_cnt++; - goto frag_dma_map_err; - } - ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma, - seg_num - 1 == i ? 1 : 0, - DESC_TYPE_PAGE); + next_to_use_head = ring->next_to_use; - if (ret) - goto frag_dma_map_err; - } + ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use], + desc_cb); + if (unlikely(ret < 0)) + goto fill_err; - /* Complete translate all packets */ - dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index); - netdev_tx_sent_queue(dev_queue, skb->len); + /* 'ret < 0' means filling error, 'ret == 0' means skb->len is + * zero, which is unlikely, and 'ret > 0' means how many tx desc + * need to be notified to the hw. + */ + ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB); + if (unlikely(ret <= 0)) + goto fill_err; - wmb(); /* Commit all data before submit */ + pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) : + (ring->desc_num - 1); + ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |= + cpu_to_le16(BIT(HNS3_TXD_FE_B)); - hnae3_queue_xmit(ring->tqp, buf_num); + /* Complete translate all packets */ + dev_queue = netdev_get_tx_queue(netdev, ring->queue_index); + if (!netdev_xmit_more()) { + netdev_tx_sent_queue(dev_queue, desc_cb->send_bytes); + hns3_tx_doorbell(ring, ret, true); + } else { + dql_queued(&dev_queue->dql, desc_cb->send_bytes); + hns3_tx_doorbell(ring, ret, netif_tx_queue_stopped(dev_queue)); + } return NETDEV_TX_OK; -frag_dma_map_err: - hns_nic_dma_unmap(ring, next_to_use_frag); - -head_dma_map_err: - hns_nic_dma_unmap(ring, next_to_use_head); +fill_err: + hns3_clear_desc(ring, next_to_use_head); out_err_tx_ok: dev_kfree_skb_any(skb); + hns3_tx_doorbell(ring, 0, !netdev_xmit_more()); return NETDEV_TX_OK; - -out_net_tx_busy: - netif_stop_subqueue(netdev, ring_data->queue_index); - smp_mb(); /* Commit all data before submit */ - - return NETDEV_TX_BUSY; } static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) { + char format_mac_addr_perm[HNAE3_FORMAT_MAC_ADDR_LEN]; + char format_mac_addr_sa[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hnae3_handle *h = hns3_get_handle(netdev); struct sockaddr *mac_addr = p; int ret; @@ -1193,11 +1542,24 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) return -EADDRNOTAVAIL; if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) { - netdev_info(netdev, "already using mac address %pM\n", - mac_addr->sa_data); + hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data); + netdev_info(netdev, "already using mac address %s\n", + format_mac_addr_sa); return 0; } + /* For VF device, if there is a perm_addr, then the user will not + * be allowed to change the address. + */ + if (!hns3_is_phys_func(h->pdev) && + !is_zero_ether_addr(netdev->perm_addr)) { + hnae3_format_mac_addr(format_mac_addr_perm, netdev->perm_addr); + hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data); + netdev_err(netdev, "has permanent MAC %s, user MAC %s not allow\n", + format_mac_addr_perm, format_mac_addr_sa); + return -EPERM; + } + ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false); if (ret) { netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret); @@ -1209,55 +1571,112 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) return 0; } +static int hns3_nic_do_ioctl(struct net_device *netdev, + struct ifreq *ifr, int cmd) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (!netif_running(netdev)) + return -EINVAL; + + if (!h->ae_algo->ops->do_ioctl) + return -EOPNOTSUPP; + + return h->ae_algo->ops->do_ioctl(h, ifr, cmd); +} + static int hns3_nic_set_features(struct net_device *netdev, netdev_features_t features) { netdev_features_t changed = netdev->features ^ features; struct hns3_nic_priv *priv = netdev_priv(netdev); struct hnae3_handle *h = priv->ae_handle; + bool enable; int ret; - if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) { - if (features & (NETIF_F_TSO | NETIF_F_TSO6)) { - priv->ops.fill_desc = hns3_fill_desc_tso; - priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; - } else { - priv->ops.fill_desc = hns3_fill_desc; - priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; - } +#ifdef NETIF_F_GRO_HW + if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) { + enable = !!(features & NETIF_F_GRO_HW); + ret = h->ae_algo->ops->set_gro_en(h, enable); + if (ret) + return ret; } +#endif if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) && h->ae_algo->ops->enable_vlan_filter) { - if (features & NETIF_F_HW_VLAN_CTAG_FILTER) - h->ae_algo->ops->enable_vlan_filter(h, true); - else - h->ae_algo->ops->enable_vlan_filter(h, false); + enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER); + ret = h->ae_algo->ops->enable_vlan_filter(h, enable); + if (ret) + return ret; } if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && h->ae_algo->ops->enable_hw_strip_rxvtag) { - if (features & NETIF_F_HW_VLAN_CTAG_RX) - ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true); - else - ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, false); - + enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); + ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable); if (ret) return ret; } + if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) { + enable = !!(features & NETIF_F_NTUPLE); + h->ae_algo->ops->enable_fd(h, enable); + } + netdev->features = features; return 0; } +static netdev_features_t hns3_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ +#define HNS3_MAX_HDR_LEN 480U +#define HNS3_MAX_L4_HDR_LEN 60U + + size_t len; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return features; + + if (skb->encapsulation) + len = skb_inner_transport_header(skb) - skb->data; + else + len = skb_transport_header(skb) - skb->data; + + /* Assume L4 is 60 byte as TCP is the only protocol with a + * a flexible value, and it's max len is 60 bytes. + */ + len += HNS3_MAX_L4_HDR_LEN; + + /* Hardware only supports checksum on the skb with a max header + * len of 480 bytes. + */ + if (len > HNS3_MAX_HDR_LEN) + features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); + + return features; +} + +#ifdef HAVE_VOID_NDO_GET_STATS64 static void hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) +#else +static struct rtnl_link_stats64 *hns3_nic_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#endif { struct hns3_nic_priv *priv = netdev_priv(netdev); int queue_num = priv->ae_handle->kinfo.num_tqps; struct hnae3_handle *handle = priv->ae_handle; struct hns3_enet_ring *ring; + u64 rx_length_errors = 0; + u64 rx_crc_errors = 0; + u64 rx_multicast = 0; unsigned int start; + u64 tx_errors = 0; + u64 rx_errors = 0; unsigned int idx; u64 tx_bytes = 0; u64 rx_bytes = 0; @@ -1267,30 +1686,49 @@ static void hns3_nic_get_stats64(struct net_device *netdev, u64 rx_drop = 0; if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) +#ifdef HAVE_VOID_NDO_GET_STATS64 return; +#else + return NULL; +#endif handle->ae_algo->ops->update_stats(handle, &netdev->stats); for (idx = 0; idx < queue_num; idx++) { /* fetch the tx stats */ - ring = priv->ring_data[idx].ring; + ring = &priv->ring[idx]; do { start = u64_stats_fetch_begin_irq(&ring->syncp); tx_bytes += ring->stats.tx_bytes; tx_pkts += ring->stats.tx_pkts; - tx_drop += ring->stats.tx_busy; tx_drop += ring->stats.sw_err_cnt; + tx_drop += ring->stats.tx_vlan_err; + tx_drop += ring->stats.tx_l4_proto_err; + tx_drop += ring->stats.tx_l2l3l4_err; + tx_drop += ring->stats.tx_tso_err; + tx_drop += ring->stats.over_max_recursion; + tx_drop += ring->stats.hw_limitation; + tx_errors += ring->stats.sw_err_cnt; + tx_errors += ring->stats.tx_vlan_err; + tx_errors += ring->stats.tx_l4_proto_err; + tx_errors += ring->stats.tx_l2l3l4_err; + tx_errors += ring->stats.tx_tso_err; + tx_errors += ring->stats.over_max_recursion; + tx_errors += ring->stats.hw_limitation; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); /* fetch the rx stats */ - ring = priv->ring_data[idx + queue_num].ring; + ring = &priv->ring[idx + queue_num]; do { start = u64_stats_fetch_begin_irq(&ring->syncp); rx_bytes += ring->stats.rx_bytes; rx_pkts += ring->stats.rx_pkts; - rx_drop += ring->stats.non_vld_descs; - rx_drop += ring->stats.err_pkt_len; rx_drop += ring->stats.l2_err; + rx_errors += ring->stats.l2_err; + rx_errors += ring->stats.l3l4_csum_err; + rx_crc_errors += ring->stats.l2_err; + rx_multicast += ring->stats.rx_multicast; + rx_length_errors += ring->stats.err_pkt_len; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); } @@ -1299,15 +1737,15 @@ static void hns3_nic_get_stats64(struct net_device *netdev, stats->rx_bytes = rx_bytes; stats->rx_packets = rx_pkts; - stats->rx_errors = netdev->stats.rx_errors; - stats->multicast = netdev->stats.multicast; - stats->rx_length_errors = netdev->stats.rx_length_errors; - stats->rx_crc_errors = netdev->stats.rx_crc_errors; + stats->rx_errors = rx_errors; + stats->multicast = rx_multicast; + stats->rx_length_errors = rx_length_errors; + stats->rx_crc_errors = rx_crc_errors; stats->rx_missed_errors = netdev->stats.rx_missed_errors; - stats->tx_errors = netdev->stats.tx_errors; - stats->rx_dropped = rx_drop + netdev->stats.rx_dropped; - stats->tx_dropped = tx_drop + netdev->stats.tx_dropped; + stats->tx_errors = tx_errors; + stats->rx_dropped = rx_drop; + stats->tx_dropped = tx_drop; stats->collisions = netdev->stats.collisions; stats->rx_over_errors = netdev->stats.rx_over_errors; stats->rx_frame_errors = netdev->stats.rx_frame_errors; @@ -1319,19 +1757,20 @@ static void hns3_nic_get_stats64(struct net_device *netdev, stats->tx_window_errors = netdev->stats.tx_window_errors; stats->rx_compressed = netdev->stats.rx_compressed; stats->tx_compressed = netdev->stats.tx_compressed; +#ifndef HAVE_VOID_NDO_GET_STATS64 + return stats; +#endif } +#ifdef TC_MQPRIO_HW_OFFLOAD_MAX static int hns3_setup_tc(struct net_device *netdev, void *type_data) { struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; - struct hnae3_handle *h = hns3_get_handle(netdev); - struct hnae3_knic_private_info *kinfo = &h->kinfo; - u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map; + struct hnae3_knic_private_info *kinfo; u8 tc = mqprio_qopt->qopt.num_tc; u16 mode = mqprio_qopt->mode; u8 hw = mqprio_qopt->qopt.hw; - bool if_running; - int ret; + struct hnae3_handle *h; if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS && mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0))) @@ -1343,48 +1782,44 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data) if (!netdev) return -EINVAL; - if_running = netif_running(netdev); - if (if_running) { - hns3_nic_net_stop(netdev); - msleep(100); - } - - ret = (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? - kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP; - if (ret) - goto out; - - ret = hns3_nic_set_real_num_queue(netdev); + h = hns3_get_handle(netdev); + kinfo = &h->kinfo; -out: - if (if_running) - hns3_nic_net_open(netdev); + if (netif_msg_ifdown(h)) + netdev_info(netdev, "setup tc: num_tc=%u\n", tc); - return ret; + return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? + kinfo->dcb_ops->setup_tc(h, mqprio_qopt) : -EOPNOTSUPP; } +#endif +#if defined(HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV) static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { +#ifdef TC_MQPRIO_HW_OFFLOAD_MAX if (type != TC_SETUP_QDISC_MQPRIO) return -EOPNOTSUPP; return hns3_setup_tc(dev, type_data); +#else + return -EOPNOTSUPP; +#endif } +#endif static int hns3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct hnae3_handle *h = hns3_get_handle(netdev); - struct hns3_nic_priv *priv = netdev_priv(netdev); int ret = -EIO; + if (hns3_nic_resetting(netdev)) + return -EBUSY; + if (h->ae_algo->ops->set_vlan_filter) ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false); - if (!ret) - set_bit(vid, priv->active_vlans); - return ret; } @@ -1392,86 +1827,108 @@ static int hns3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct hnae3_handle *h = hns3_get_handle(netdev); - struct hns3_nic_priv *priv = netdev_priv(netdev); int ret = -EIO; if (h->ae_algo->ops->set_vlan_filter) ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true); - if (!ret) - clear_bit(vid, priv->active_vlans); - return ret; } -static void hns3_restore_vlan(struct net_device *netdev) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - u16 vid; - int ret; - - for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) { - ret = hns3_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); - if (ret) - netdev_warn(netdev, "Restore vlan: %d filter, ret:%d\n", - vid, ret); - } -} - +#ifdef IFLA_VF_VLAN_INFO_MAX static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, __be16 vlan_proto) +#else +static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, + u8 qos) +#endif { struct hnae3_handle *h = hns3_get_handle(netdev); int ret = -EIO; +#ifndef IFLA_VF_VLAN_INFO_MAX + __be16 vlan_proto = htons(ETH_P_8021Q); +#endif + + if (hns3_nic_resetting(netdev)) + return -EBUSY; + + if (netif_msg_ifdown(h)) + netdev_info(netdev, + "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=0x%x\n", + vf, vlan, qos, ntohs(vlan_proto)); + if (h->ae_algo->ops->set_vf_vlan_filter) ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan, - qos, vlan_proto); + qos, vlan_proto); return ret; } +static int hns3_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + + if (hns3_nic_resetting(netdev)) + return -EBUSY; + + if (!handle->ae_algo->ops->set_vf_spoofchk) + return -EOPNOTSUPP; + + return handle->ae_algo->ops->set_vf_spoofchk(handle, vf, enable); +} + +static int hns3_set_vf_trust(struct net_device *netdev, int vf, bool enable) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + + if (!handle->ae_algo->ops->set_vf_trust) + return -EOPNOTSUPP; + + return handle->ae_algo->ops->set_vf_trust(handle, vf, enable); +} + static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) { struct hnae3_handle *h = hns3_get_handle(netdev); - bool if_running = netif_running(netdev); int ret; + if (hns3_nic_resetting(netdev)) + return -EBUSY; + if (!h->ae_algo->ops->set_mtu) return -EOPNOTSUPP; - /* if this was called with netdev up then bring netdevice down */ - if (if_running) { - (void)hns3_nic_net_stop(netdev); - msleep(100); - } + if (netif_msg_ifdown(h)) + netdev_info(netdev, "change mtu from %u to %d\n", + netdev->mtu, new_mtu); ret = h->ae_algo->ops->set_mtu(h, new_mtu); - if (ret) { + if (ret) netdev_err(netdev, "failed to change MTU in hardware %d\n", ret); - return ret; - } - - netdev->mtu = new_mtu; - - /* if the netdev was running earlier, bring it up again */ - if (if_running && hns3_nic_net_open(netdev)) - ret = -EINVAL; + else + netdev->mtu = new_mtu; return ret; } -static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) +bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) { struct hns3_nic_priv *priv = netdev_priv(ndev); - struct hns3_enet_ring *tx_ring = NULL; + struct hnae3_handle *h = hns3_get_handle(ndev); + struct hns3_enet_ring *tx_ring; + struct napi_struct *napi; int timeout_queue = 0; int hw_head, hw_tail; + int fbd_num, fbd_oft; + int ebd_num, ebd_oft; + int bd_num, bd_err; + int ring_en, tc; int i; /* Find the stopped queue the same way the stack does */ - for (i = 0; i < ndev->real_num_tx_queues; i++) { + for (i = 0; i < ndev->num_tx_queues; i++) { struct netdev_queue *q; unsigned long trans_start; @@ -1481,6 +1938,9 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) time_after(jiffies, (trans_start + ndev->watchdog_timeo))) { timeout_queue = i; + netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n", + q->state, + jiffies_to_msecs(jiffies - trans_start)); break; } } @@ -1492,60 +1952,193 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) return false; } - tx_ring = priv->ring_data[timeout_queue].ring; + priv->tx_timeout_count++; + tx_ring = &priv->ring[timeout_queue]; + napi = &tx_ring->tqp_vector->napi; + + netdev_info(ndev, + "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n", + priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use, + tx_ring->next_to_clean, napi->state); + + netdev_info(ndev, + "tx_pkts: %llu, tx_bytes: %llu, sw_err_cnt: %llu, tx_pending: %d\n", + tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes, + tx_ring->stats.sw_err_cnt, tx_ring->pending_buf); + + netdev_info(ndev, + "seg_pkt_cnt: %llu, tx_more: %llu, restart_queue: %llu, tx_busy: %llu\n", + tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more, + tx_ring->stats.restart_queue, tx_ring->stats.tx_busy); + + /* When mac received many pause frames continuous, it's unable to send + * packets, which may cause tx timeout + */ + if (h->ae_algo->ops->get_mac_stats) { + struct hns3_mac_stats mac_stats; + + h->ae_algo->ops->get_mac_stats(h, &mac_stats); + netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n", + mac_stats.tx_pause_cnt, mac_stats.rx_pause_cnt); + } + + hw_head = readl_relaxed(tx_ring->tqp->io_base + + HNS3_RING_TX_RING_HEAD_REG); + hw_tail = readl_relaxed(tx_ring->tqp->io_base + + HNS3_RING_TX_RING_TAIL_REG); + fbd_num = readl_relaxed(tx_ring->tqp->io_base + + HNS3_RING_TX_RING_FBDNUM_REG); + fbd_oft = readl_relaxed(tx_ring->tqp->io_base + + HNS3_RING_TX_RING_OFFSET_REG); + ebd_num = readl_relaxed(tx_ring->tqp->io_base + + HNS3_RING_TX_RING_EBDNUM_REG); + ebd_oft = readl_relaxed(tx_ring->tqp->io_base + + HNS3_RING_TX_RING_EBD_OFFSET_REG); + bd_num = readl_relaxed(tx_ring->tqp->io_base + + HNS3_RING_TX_RING_BD_NUM_REG); + bd_err = readl_relaxed(tx_ring->tqp->io_base + + HNS3_RING_TX_RING_BD_ERR_REG); + ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG); + tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG); + + netdev_info(ndev, + "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n", + bd_num, hw_head, hw_tail, bd_err, + readl(tx_ring->tqp_vector->mask_addr)); + netdev_info(ndev, + "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n", + ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft); + + if ((hw_tail == hw_head) && (priv->tx_timeout_count % 2)) + return false; + + return true; +} + +static void hns3_nic_net_timeout(struct net_device *ndev) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + + if (!hns3_get_tx_timeo_queue_info(ndev)) + return; + + /* request the reset, and let the hclge to determine + * which reset level should be done + */ + if (h->ae_algo->ops->reset_event) + h->ae_algo->ops->reset_event(h->pdev, h); +} + +#ifdef CONFIG_RFS_ACCEL +static int hns3_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, + u16 rxq_index, u32 flow_id) +{ + struct hnae3_handle *h = hns3_get_handle(dev); + struct flow_keys fkeys; + + if (!h->ae_algo->ops->add_arfs_entry) + return -EOPNOTSUPP; + + if (skb->encapsulation) + return -EPROTONOSUPPORT; + + if (!skb_flow_dissect_flow_keys(skb, &fkeys, 0)) + return -EPROTONOSUPPORT; + + if ((fkeys.basic.n_proto != htons(ETH_P_IP) && + fkeys.basic.n_proto != htons(ETH_P_IPV6)) || + (fkeys.basic.ip_proto != IPPROTO_TCP && + fkeys.basic.ip_proto != IPPROTO_UDP)) + return -EPROTONOSUPPORT; + + return h->ae_algo->ops->add_arfs_entry(h, rxq_index, flow_id, &fkeys); +} +#endif + +static int hns3_nic_get_vf_config(struct net_device *ndev, int vf, + struct ifla_vf_info *ivf) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + + if (!h->ae_algo->ops->get_vf_config) + return -EOPNOTSUPP; + + return h->ae_algo->ops->get_vf_config(h, vf, ivf); +} + +static int hns3_nic_set_vf_link_state(struct net_device *ndev, int vf, + int link_state) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); - hw_head = readl_relaxed(tx_ring->tqp->io_base + - HNS3_RING_TX_RING_HEAD_REG); - hw_tail = readl_relaxed(tx_ring->tqp->io_base + - HNS3_RING_TX_RING_TAIL_REG); - netdev_info(ndev, - "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n", - priv->tx_timeout_count, - timeout_queue, - tx_ring->next_to_use, - tx_ring->next_to_clean, - hw_head, - hw_tail, - readl(tx_ring->tqp_vector->mask_addr)); + if (!h->ae_algo->ops->set_vf_link_state) + return -EOPNOTSUPP; - return true; + return h->ae_algo->ops->set_vf_link_state(h, vf, link_state); } -static void hns3_nic_net_timeout(struct net_device *ndev) +static int hns3_nic_set_vf_rate(struct net_device *ndev, int vf, + int min_tx_rate, int max_tx_rate) { - struct hns3_nic_priv *priv = netdev_priv(ndev); - struct hnae3_handle *h = priv->ae_handle; + struct hnae3_handle *h = hns3_get_handle(ndev); - if (!hns3_get_tx_timeo_queue_info(ndev)) - return; + if (!h->ae_algo->ops->set_vf_rate) + return -EOPNOTSUPP; - priv->tx_timeout_count++; + return h->ae_algo->ops->set_vf_rate(h, vf, min_tx_rate, max_tx_rate, + false); +} - if (time_before(jiffies, (h->last_reset_time + ndev->watchdog_timeo))) - return; +static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; - /* request the reset */ - if (h->ae_algo->ops->reset_event) - h->ae_algo->ops->reset_event(h); + if (!h->ae_algo->ops->set_vf_mac) + return -EOPNOTSUPP; + + if (is_multicast_ether_addr(mac)) { + hnae3_format_mac_addr(format_mac_addr, mac); + netdev_err(netdev, + "Invalid MAC:%s specified. Could not set MAC\n", + format_mac_addr); + return -EINVAL; + } + + return h->ae_algo->ops->set_vf_mac(h, vf_id, mac); } -static const struct net_device_ops hns3_nic_netdev_ops = { +struct net_device_ops hns3_nic_netdev_ops = { .ndo_open = hns3_nic_net_open, .ndo_stop = hns3_nic_net_stop, .ndo_start_xmit = hns3_nic_net_xmit, .ndo_tx_timeout = hns3_nic_net_timeout, .ndo_set_mac_address = hns3_nic_net_set_mac_address, + .ndo_do_ioctl = hns3_nic_do_ioctl, .ndo_change_mtu = hns3_nic_change_mtu, .ndo_set_features = hns3_nic_set_features, + .ndo_features_check = hns3_features_check, .ndo_get_stats64 = hns3_nic_get_stats64, +#if defined(HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV) .ndo_setup_tc = hns3_nic_setup_tc, +#endif .ndo_set_rx_mode = hns3_nic_set_rx_mode, .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid, .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan, + .ndo_set_vf_spoofchk = hns3_set_vf_spoofchk, + .ndo_set_vf_trust = hns3_set_vf_trust, +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = hns3_rx_flow_steer, +#endif + .ndo_get_vf_config = hns3_nic_get_vf_config, + .ndo_set_vf_link_state = hns3_nic_set_vf_link_state, + .ndo_set_vf_rate = hns3_nic_set_vf_rate, + .ndo_set_vf_mac = hns3_nic_set_vf_mac, }; -static bool hns3_is_phys_func(struct pci_dev *pdev) +bool hns3_is_phys_func(struct pci_dev *pdev) { u32 dev_id = pdev->device; @@ -1562,7 +2155,7 @@ static bool hns3_is_phys_func(struct pci_dev *pdev) case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF: return false; default: - dev_warn(&pdev->dev, "un-recognized pci device-id %d", + dev_warn(&pdev->dev, "un-recognized pci device-id %u", dev_id); } @@ -1584,6 +2177,15 @@ static void hns3_disable_sriov(struct pci_dev *pdev) pci_disable_sriov(pdev); } +static void hns3_get_dev_capability(struct pci_dev *pdev, + struct hnae3_ae_dev *ae_dev) +{ + if (pdev->revision >= 0x21) { + hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1); + hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B, 1); + } +} + /* hns3_probe - Device initialization routine * @pdev: PCI device information struct * @ent: entry in hns3_pci_tbl @@ -1599,21 +2201,20 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct hnae3_ae_dev *ae_dev; int ret; - ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), - GFP_KERNEL); - if (!ae_dev) { - ret = -ENOMEM; - return ret; - } + ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL); + if (!ae_dev) + return -ENOMEM; ae_dev->pdev = pdev; ae_dev->flag = ent->driver_data; - ae_dev->dev_type = HNAE3_DEV_KNIC; + hns3_get_dev_capability(pdev, ae_dev); pci_set_drvdata(pdev, ae_dev); - hnae3_register_ae_dev(ae_dev); + ret = hnae3_register_ae_dev(ae_dev); + if (ret) + pci_set_drvdata(pdev, NULL); - return 0; + return ret; } /* hns3_remove - Device removal routine @@ -1627,6 +2228,7 @@ static void hns3_remove(struct pci_dev *pdev) hns3_disable_sriov(pdev); hnae3_unregister_ae_dev(ae_dev); + pci_set_drvdata(pdev, NULL); } /** @@ -1662,12 +2264,132 @@ static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) return 0; } -static struct pci_driver hns3_driver = { +static void hns3_shutdown(struct pci_dev *pdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + + hnae3_unregister_ae_dev(ae_dev); + pci_set_drvdata(pdev, NULL); + + if (system_state == SYSTEM_POWER_OFF) + pci_set_power_state(pdev, PCI_D3hot); +} + +static int __maybe_unused hns3_suspend(struct device *dev) +{ + struct hnae3_ae_dev *ae_dev = dev_get_drvdata(dev); + + if (ae_dev && hns3_is_phys_func(ae_dev->pdev)) { + dev_info(dev, "Begin to suspend.\n"); + if (ae_dev->ops && ae_dev->ops->reset_prepare) + ae_dev->ops->reset_prepare(ae_dev, HNAE3_FUNC_RESET); + } + + return 0; +} + +static int __maybe_unused hns3_resume(struct device *dev) +{ + struct hnae3_ae_dev *ae_dev = dev_get_drvdata(dev); + + if (ae_dev && hns3_is_phys_func(ae_dev->pdev)) { + dev_info(dev, "Begin to resume.\n"); + if (ae_dev->ops && ae_dev->ops->reset_done) + ae_dev->ops->reset_done(ae_dev); + } + + return 0; +} + +static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + pci_ers_result_t ret; + + dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (!ae_dev || !ae_dev->ops) { + dev_err(&pdev->dev, + "Can't recover - error happened before device initialized\n"); + return PCI_ERS_RESULT_NONE; + } + + if (ae_dev->ops->handle_hw_ras_error) + ret = ae_dev->ops->handle_hw_ras_error(ae_dev); + else + return PCI_ERS_RESULT_NONE; + + return ret; +} + +static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + const struct hnae3_ae_ops *ops; + enum hnae3_reset_type reset_type; + struct device *dev = &pdev->dev; + + if (!ae_dev || !ae_dev->ops) + return PCI_ERS_RESULT_NONE; + + ops = ae_dev->ops; + /* request the reset */ + if (ops->reset_event && ops->get_reset_level && + ops->set_default_reset_request) { + if (ae_dev->hw_err_reset_req) { + reset_type = ops->get_reset_level(ae_dev, + &ae_dev->hw_err_reset_req); + ops->set_default_reset_request(ae_dev, reset_type); + dev_info(dev, "requesting reset due to PCI error\n"); + ops->reset_event(pdev, NULL); + } + + return PCI_ERS_RESULT_RECOVERED; + } + + return PCI_ERS_RESULT_DISCONNECT; +} + +static void hns3_reset_prepare(struct pci_dev *pdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + + dev_info(&pdev->dev, "FLR prepare\n"); + if (ae_dev && ae_dev->ops && ae_dev->ops->reset_prepare) + ae_dev->ops->reset_prepare(ae_dev, HNAE3_FLR_RESET); +} + +static void hns3_reset_done(struct pci_dev *pdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + + dev_info(&pdev->dev, "FLR done\n"); + if (ae_dev && ae_dev->ops && ae_dev->ops->reset_done) + ae_dev->ops->reset_done(ae_dev); +} + +struct pci_error_handlers hns3_err_handler = { + .error_detected = hns3_error_detected, + .slot_reset = hns3_slot_reset, + .reset_prepare = hns3_reset_prepare, + .reset_done = hns3_reset_done, +}; + +static SIMPLE_DEV_PM_OPS(hns3_pm_ops, hns3_suspend, hns3_resume); + +struct pci_driver hns3_driver = { .name = hns3_driver_name, .id_table = hns3_pci_tbl, .probe = hns3_probe, .remove = hns3_remove, + .shutdown = hns3_shutdown, + .driver.pm = &hns3_pm_ops, .sriov_configure = hns3_pci_sriov_configure, + .err_handler = &hns3_err_handler, }; /* set default feature to hns3 */ @@ -1675,6 +2397,7 @@ static void hns3_set_default_feature(struct net_device *netdev) { struct hnae3_handle *h = hns3_get_handle(netdev); struct pci_dev *pdev = h->pdev; + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); netdev->priv_flags |= IFF_UNICAST_FLT; @@ -1682,11 +2405,14 @@ static void hns3_set_default_feature(struct net_device *netdev) NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM; + NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC | + NETIF_F_FRAGLIST; +#ifdef NETIF_F_GSO_PARTIAL netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; +#endif netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_CTAG_FILTER | @@ -1694,30 +2420,44 @@ static void hns3_set_default_feature(struct net_device *netdev) NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM; + NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC | + NETIF_F_FRAGLIST; netdev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM; + NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC | + NETIF_F_FRAGLIST; netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM; + NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC | + NETIF_F_FRAGLIST; + + if (pdev->revision > HNAE3_REVISION_ID_20) { +#ifdef NETIF_F_GRO_HW + netdev->features |= NETIF_F_GRO_HW; + netdev->hw_features |= NETIF_F_GRO_HW; +#endif + if (!(h->flags & HNAE3_SUPPORT_VF)) { + netdev->hw_features |= NETIF_F_NTUPLE; + netdev->features |= NETIF_F_NTUPLE; + } + } - if (pdev->revision != 0x20) + if (hnae3_get_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B)) netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; } static int hns3_alloc_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) { - unsigned int order = hnae3_page_order(ring); + unsigned int order = hns3_page_order(ring); struct page *p; p = dev_alloc_pages(order); @@ -1728,19 +2468,21 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring, cb->page_offset = 0; cb->reuse_flag = 0; cb->buf = page_address(p); - cb->length = hnae3_page_size(ring); + cb->length = hns3_page_size(ring); cb->type = DESC_TYPE_PAGE; + page_ref_add(p, USHRT_MAX - 1); + cb->pagecnt_bias = USHRT_MAX; return 0; } static void hns3_free_buffer(struct hns3_enet_ring *ring, - struct hns3_desc_cb *cb) + struct hns3_desc_cb *cb, int budget) { if (cb->type == DESC_TYPE_SKB) - dev_kfree_skb_any((struct sk_buff *)cb->priv); - else if (!HNAE3_IS_TX_RING(ring)) - put_page((struct page *)cb->priv); + napi_consume_skb(cb->priv, budget); + else if (!HNAE3_IS_TX_RING(ring) && cb->pagecnt_bias) + __page_frag_cache_drain(cb->priv, cb->pagecnt_bias); memset(cb, 0, sizeof(*cb)); } @@ -1749,7 +2491,7 @@ static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, cb->length, ring_to_dma_dir(ring)); - if (dma_mapping_error(ring_to_dev(ring), cb->dma)) + if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma))) return -EIO; return 0; @@ -1758,10 +2500,10 @@ static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) static void hns3_unmap_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) { - if (cb->type == DESC_TYPE_SKB) + if (cb->type == DESC_TYPE_SKB || cb->type == DESC_TYPE_FRAGLIST_SKB) dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, ring_to_dma_dir(ring)); - else + else if (cb->length) dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, ring_to_dma_dir(ring)); } @@ -1772,7 +2514,8 @@ static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i) ring->desc[i].addr = 0; } -static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i) +static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i, + int budget) { struct hns3_desc_cb *cb = &ring->desc_cb[i]; @@ -1780,7 +2523,7 @@ static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i) return; hns3_buffer_detach(ring, i); - hns3_free_buffer(ring, cb); + hns3_free_buffer(ring, cb, budget); } static void hns3_free_buffers(struct hns3_enet_ring *ring) @@ -1788,7 +2531,7 @@ static void hns3_free_buffers(struct hns3_enet_ring *ring) int i; for (i = 0; i < ring->desc_num; i++) - hns3_free_buffer_detach(ring, i); + hns3_free_buffer_detach(ring, i, 0); } /* free desc along with its attached buffer */ @@ -1810,16 +2553,15 @@ static int hns3_alloc_desc(struct hns3_enet_ring *ring) int size = ring->desc_num * sizeof(ring->desc[0]); ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size, - &ring->desc_dma_addr, - GFP_KERNEL); + &ring->desc_dma_addr, GFP_KERNEL); if (!ring->desc) return -ENOMEM; return 0; } -static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring, - struct hns3_desc_cb *cb) +static int hns3_alloc_and_map_buffer(struct hns3_enet_ring *ring, + struct hns3_desc_cb *cb) { int ret; @@ -1834,14 +2576,14 @@ static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring, return 0; out_with_buf: - hns3_free_buffer(ring, cb); + hns3_free_buffer(ring, cb, 0); out: return ret; } -static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i) +static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i) { - int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]); + int ret = hns3_alloc_and_map_buffer(ring, &ring->desc_cb[i]); if (ret) return ret; @@ -1857,7 +2599,7 @@ static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring) int i, j, ret; for (i = 0; i < ring->desc_num; i++) { - ret = hns3_alloc_buffer_attach(ring, i); + ret = hns3_alloc_and_attach_buffer(ring, i); if (ret) goto out_buffer_fail; } @@ -1866,11 +2608,11 @@ static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring) out_buffer_fail: for (j = i - 1; j >= 0; j--) - hns3_free_buffer_detach(ring, j); + hns3_free_buffer_detach(ring, j, 0); return ret; } -/* detach a in-used buffer and replace with a reserved one */ +/* detach a in-used buffer and replace with a reserved one */ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, struct hns3_desc_cb *res_cb) { @@ -1883,66 +2625,75 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) { ring->desc_cb[i].reuse_flag = 0; - ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma - + ring->desc_cb[i].page_offset); + ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + + ring->desc_cb[i].page_offset); ring->desc[i].rx.bd_base_info = 0; + + dma_sync_single_for_device(ring_to_dev(ring), + ring->desc_cb[i].dma + ring->desc_cb[i].page_offset, + hns3_buf_size(ring), + DMA_FROM_DEVICE); } -static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes, - int *pkts) +static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, + int *bytes, int *pkts, int budget) { - struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; + /* This smp_load_acquire() pairs with smp_store_release() in + * hns3_tx_doorbell(). + */ + int ltu = smp_load_acquire(&ring->last_to_use); + int ntc = ring->next_to_clean; + struct hns3_desc_cb *desc_cb; + bool reclaimed = false; + struct hns3_desc *desc; - (*pkts) += (desc_cb->type == DESC_TYPE_SKB); - (*bytes) += desc_cb->length; - /* desc_cb will be cleaned, after hnae3_free_buffer_detach*/ - hns3_free_buffer_detach(ring, ring->next_to_clean); + while (ltu != ntc) { + desc = &ring->desc[ntc]; - ring_ptr_move_fw(ring, next_to_clean); -} + if (le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri) & + BIT(HNS3_TXD_VLD_B)) + break; -static int is_valid_clean_head(struct hns3_enet_ring *ring, int h) -{ - int u = ring->next_to_use; - int c = ring->next_to_clean; + desc_cb = &ring->desc_cb[ntc]; - if (unlikely(h > ring->desc_num)) - return 0; + if (desc_cb->type == DESC_TYPE_SKB) { + (*pkts)++; + (*bytes) += desc_cb->send_bytes; + } - return u > c ? (h > c && h <= u) : (h > c || h <= u); -} + /* desc_cb will be cleaned, after hnae3_free_buffer_detach */ + hns3_free_buffer_detach(ring, ntc, budget); -bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) -{ - struct net_device *netdev = ring->tqp->handle->kinfo.netdev; - struct netdev_queue *dev_queue; - int bytes, pkts; - int head; + if (++ntc == ring->desc_num) + ntc = 0; - head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG); - rmb(); /* Make sure head is ready before touch any data */ + /* Issue prefetch for next Tx descriptor */ + prefetch(&ring->desc_cb[ntc]); + reclaimed = true; + } - if (is_ring_empty(ring) || head == ring->next_to_clean) - return true; /* no data to poll */ + if (unlikely(!reclaimed)) + return false; - if (unlikely(!is_valid_clean_head(ring, head))) { - netdev_err(netdev, "wrong head (%d, %d-%d)\n", head, - ring->next_to_use, ring->next_to_clean); + /* This smp_store_release() pairs with smp_load_acquire() in + * ring_space called by hns3_nic_net_xmit. + */ + smp_store_release(&ring->next_to_clean, ntc); + return true; +} - u64_stats_update_begin(&ring->syncp); - ring->stats.io_err_cnt++; - u64_stats_update_end(&ring->syncp); - return true; - } +void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) +{ + struct net_device *netdev = ring_to_netdev(ring); + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct netdev_queue *dev_queue; + int bytes, pkts; bytes = 0; pkts = 0; - while (head != ring->next_to_clean && budget) { - hns3_nic_reclaim_one_desc(ring, &bytes, &pkts); - /* Issue prefetch for next Tx descriptor */ - prefetch(&ring->desc_cb[ring->next_to_clean]); - budget--; - } + + if (unlikely(!hns3_nic_reclaim_desc(ring, &bytes, &pkts, budget))) + return; ring->tqp_vector->tx_group.total_bytes += bytes; ring->tqp_vector->tx_group.total_packets += pkts; @@ -1955,19 +2706,18 @@ bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index); netdev_tx_completed_queue(dev_queue, pkts, bytes); - if (unlikely(pkts && netif_carrier_ok(netdev) && - (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) { + if (unlikely(netif_carrier_ok(netdev) && + ring_space(ring) > HNS3_MAX_TSO_BD_NUM)) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. */ smp_mb(); - if (netif_tx_queue_stopped(dev_queue)) { + if (netif_tx_queue_stopped(dev_queue) && + !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { netif_tx_wake_queue(dev_queue); ring->stats.restart_queue++; } } - - return !!budget; } static int hns3_desc_unused(struct hns3_enet_ring *ring) @@ -1978,8 +2728,8 @@ static int hns3_desc_unused(struct hns3_enet_ring *ring) return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; } -static void -hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count) +static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, + int cleand_count) { struct hns3_desc_cb *desc_cb; struct hns3_desc_cb res_cbs; @@ -1994,88 +2744,145 @@ hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count) hns3_reuse_buffer(ring, ring->next_to_use); } else { - ret = hns3_reserve_buffer_map(ring, &res_cbs); + ret = hns3_alloc_and_map_buffer(ring, &res_cbs); if (ret) { u64_stats_update_begin(&ring->syncp); ring->stats.sw_err_cnt++; u64_stats_update_end(&ring->syncp); - netdev_err(ring->tqp->handle->kinfo.netdev, - "hnae reserve buffer map failed.\n"); + hns3_rl_err(ring_to_netdev(ring), + "alloc rx buffer failed: %d\n", + ret); break; } hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); + + u64_stats_update_begin(&ring->syncp); + ring->stats.non_reuse_pg++; + u64_stats_update_end(&ring->syncp); } ring_ptr_move_fw(ring, next_to_use); } - wmb(); /* Make all data has been write before submit */ - writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); + writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); +} + +static bool hns3_page_is_reusable(struct page *page) +{ + return page_to_nid(page) == numa_mem_id() && + !page_is_pfmemalloc(page); +} + +static bool hns3_can_reuse_page(struct hns3_desc_cb *cb) +{ + return (page_count(cb->priv) - cb->pagecnt_bias) == 1; } static void hns3_nic_reuse_page(struct sk_buff *skb, int i, struct hns3_enet_ring *ring, int pull_len, struct hns3_desc_cb *desc_cb) { - struct hns3_desc *desc; - u32 truesize; - int size; - int last_offset; - bool twobufs; - - twobufs = ((PAGE_SIZE < 8192) && - hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048); - - desc = &ring->desc[ring->next_to_clean]; - size = le16_to_cpu(desc->rx.size); - - truesize = hnae3_buf_size(ring); - - if (!twobufs) - last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring); + struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; + int size = le16_to_cpu(desc->rx.size); + u32 truesize = hns3_buf_size(ring); + desc_cb->pagecnt_bias--; skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, size - pull_len, truesize); - /* Avoid re-using remote pages,flag default unreuse */ - if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) - return; - - if (twobufs) { - /* If we are only owner of page we can reuse it */ - if (likely(page_count(desc_cb->priv) == 1)) { - /* Flip page offset to other buffer */ - desc_cb->page_offset ^= truesize; - - desc_cb->reuse_flag = 1; - /* bump ref count on page before it is given*/ - get_page(desc_cb->priv); - } + /* Avoid re-using remote pages, or the stack is still using the page + * when page_offset rollback to zero, flag default unreuse + */ + if (unlikely(!hns3_page_is_reusable(desc_cb->priv)) || + (!desc_cb->page_offset && !hns3_can_reuse_page(desc_cb))) { + __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias); return; } /* Move offset up to the next cache line */ desc_cb->page_offset += truesize; - if (desc_cb->page_offset <= last_offset) { + if (desc_cb->page_offset + truesize <= hns3_page_size(ring)) { + desc_cb->reuse_flag = 1; + } else if (hns3_can_reuse_page(desc_cb)) { desc_cb->reuse_flag = 1; - /* Bump ref count on page before it is given*/ - get_page(desc_cb->priv); + desc_cb->page_offset = 0; + } else if (desc_cb->pagecnt_bias) { + __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias); + return; + } + + if (unlikely(!desc_cb->pagecnt_bias)) { + page_ref_add(desc_cb->priv, USHRT_MAX); + desc_cb->pagecnt_bias = USHRT_MAX; + } +} + +static int hns3_gro_complete(struct sk_buff *skb, u32 l234info) +{ + __be16 type = skb->protocol; + struct tcphdr *th; + int depth = 0; + + while (eth_type_vlan(type)) { + struct vlan_hdr *vh; + + if ((depth + VLAN_HLEN) > skb_headlen(skb)) + return -EFAULT; + + vh = (struct vlan_hdr *)(skb->data + depth); + type = vh->h_vlan_encapsulated_proto; + depth += VLAN_HLEN; } + + skb_set_network_header(skb, depth); + + if (type == htons(ETH_P_IP)) { + const struct iphdr *iph = ip_hdr(skb); + + depth += sizeof(struct iphdr); + skb_set_transport_header(skb, depth); + th = tcp_hdr(skb); + th->check = ~tcp_v4_check(skb->len - depth, iph->saddr, + iph->daddr, 0); + } else if (type == htons(ETH_P_IPV6)) { + const struct ipv6hdr *iph = ipv6_hdr(skb); + + depth += sizeof(struct ipv6hdr); + skb_set_transport_header(skb, depth); + th = tcp_hdr(skb); + th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr, + &iph->daddr, 0); + } else { + hns3_rl_err(skb->dev, + "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n", + be16_to_cpu(type), depth); + return -EFAULT; + } + + skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; + if (th->cwr) + skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; + + if (l234info & BIT(HNS3_RXD_GRO_FIXID_B)) + skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID; + + skb->csum_start = (unsigned char *)th - skb->head; + skb->csum_offset = offsetof(struct tcphdr, check); + skb->ip_summed = CHECKSUM_PARTIAL; + + trace_hns3_gro(skb); + + return 0; } static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, - struct hns3_desc *desc) + u32 l234info, u32 bd_base_info, u32 ol_info) { - struct net_device *netdev = ring->tqp->handle->kinfo.netdev; + struct net_device *netdev = ring_to_netdev(ring); int l3_type, l4_type; - u32 bd_base_info; int ol4_type; - u32 l234info; - - bd_base_info = le32_to_cpu(desc->rx.bd_base_info); - l234info = le32_to_cpu(desc->rx.l234_info); skb->ip_summed = CHECKSUM_NONE; @@ -2085,14 +2892,12 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, return; /* check if hardware has done checksum */ - if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B)) + if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B))) return; - if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L3E_B) || - hnae3_get_bit(l234info, HNS3_RXD_L4E_B) || - hnae3_get_bit(l234info, HNS3_RXD_OL3E_B) || - hnae3_get_bit(l234info, HNS3_RXD_OL4E_B))) { - netdev_err(netdev, "L3/L4 error pkt\n"); + if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) | + BIT(HNS3_RXD_OL3E_B) | + BIT(HNS3_RXD_OL4E_B)))) { u64_stats_update_begin(&ring->syncp); ring->stats.l3l4_csum_err++; u64_stats_update_end(&ring->syncp); @@ -2100,12 +2905,7 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, return; } - l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, - HNS3_RXD_L3ID_S); - l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M, - HNS3_RXD_L4ID_S); - - ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M, + ol4_type = hnae3_get_field(ol_info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S); switch (ol4_type) { case HNS3_OL4_TYPE_MAC_IN_UDP: @@ -2113,6 +2913,11 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, skb->csum_level = 1; /* fall through */ case HNS3_OL4_TYPE_NO_TUN: + l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, + HNS3_RXD_L3ID_S); + l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M, + HNS3_RXD_L4ID_S); + /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */ if ((l3_type == HNS3_L3_TYPE_IPV4 || l3_type == HNS3_L3_TYPE_IPV6) && @@ -2121,91 +2926,92 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, l4_type == HNS3_L4_TYPE_SCTP)) skb->ip_summed = CHECKSUM_UNNECESSARY; break; + default: + break; } } static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb) { + if (skb_has_frag_list(skb)) + napi_gro_flush(&ring->tqp_vector->napi, false); + napi_gro_receive(&ring->tqp_vector->napi, skb); } -static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring, - struct hns3_desc *desc, u32 l234info) +static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring, + struct hns3_desc *desc, u32 l234info, + u16 *vlan_tag) { + struct hnae3_handle *handle = ring->tqp->handle; struct pci_dev *pdev = ring->tqp->handle->pdev; - u16 vlan_tag; - if (pdev->revision == 0x20) { - vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); - if (!(vlan_tag & VLAN_VID_MASK)) - vlan_tag = le16_to_cpu(desc->rx.vlan_tag); + if (pdev->revision == HNAE3_REVISION_ID_20) { + *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); + if (!(*vlan_tag & VLAN_VID_MASK)) + *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); - return vlan_tag; + return (*vlan_tag != 0); } #define HNS3_STRP_OUTER_VLAN 0x1 #define HNS3_STRP_INNER_VLAN 0x2 +#define HNS3_STRP_BOTH 0x3 + /* Hardware always insert VLAN tag into RX descriptor when + * remove the tag from packet, driver needs to determine + * reporting which tag to stack. + */ switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M, HNS3_RXD_STRP_TAGP_S)) { case HNS3_STRP_OUTER_VLAN: - vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); - break; + if (handle->port_base_vlan_state != + HNAE3_PORT_BASE_VLAN_DISABLE) + return false; + + *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); + return true; case HNS3_STRP_INNER_VLAN: - vlan_tag = le16_to_cpu(desc->rx.vlan_tag); - break; + if (handle->port_base_vlan_state != + HNAE3_PORT_BASE_VLAN_DISABLE) + return false; + + *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); + return true; + case HNS3_STRP_BOTH: + if (handle->port_base_vlan_state == + HNAE3_PORT_BASE_VLAN_DISABLE) + *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); + else + *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); + + return true; default: - vlan_tag = 0; - break; + return false; } - - return vlan_tag; } -static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, - struct sk_buff **out_skb, int *out_bnum) +static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring) { - struct net_device *netdev = ring->tqp->handle->kinfo.netdev; - struct hns3_desc_cb *desc_cb; - struct hns3_desc *desc; - struct sk_buff *skb; - unsigned char *va; - u32 bd_base_info; - int pull_len; - u32 l234info; - int length; - int bnum; - - desc = &ring->desc[ring->next_to_clean]; - desc_cb = &ring->desc_cb[ring->next_to_clean]; - - prefetch(desc); - - length = le16_to_cpu(desc->rx.size); - bd_base_info = le32_to_cpu(desc->rx.bd_base_info); - - /* Check valid BD */ - if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) - return -EFAULT; + ring->desc[ring->next_to_clean].rx.bd_base_info &= + cpu_to_le32(~BIT(HNS3_RXD_VLD_B)); + ring->next_to_clean += 1; - va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; + if (unlikely(ring->next_to_clean == ring->desc_num)) + ring->next_to_clean = 0; +} - /* Prefetch first cache line of first page - * Idea is to cache few bytes of the header of the packet. Our L1 Cache - * line size is 64B so need to prefetch twice to make it 128B. But in - * actual we can have greater size of caches with 128B Level 1 cache - * lines. In such a case, single fetch would suffice to cache in the - * relevant part of the header. - */ - prefetch(va); -#if L1_CACHE_BYTES < 128 - prefetch(va + L1_CACHE_BYTES); -#endif +static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, + unsigned char *va) +{ + struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; + struct net_device *netdev = ring_to_netdev(ring); + struct sk_buff *skb; - skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi, - HNS3_RX_HEAD_SIZE); + ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE); + skb = ring->skb; if (unlikely(!skb)) { - netdev_err(netdev, "alloc rx skb fail\n"); + hns3_rl_err(netdev, "alloc rx skb fail\n"); u64_stats_update_begin(&ring->syncp); ring->stats.sw_err_cnt++; @@ -2214,45 +3020,169 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, return -ENOMEM; } + trace_hns3_rx_desc(ring); prefetchw(skb->data); - bnum = 1; + ring->pending_buf = 1; + ring->frag_num = 0; + ring->tail_skb = NULL; if (length <= HNS3_RX_HEAD_SIZE) { memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); - /* We can reuse buffer as-is, just make sure it is local */ - if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) - desc_cb->reuse_flag = 1; - else /* This page cannot be reused so discard it */ - put_page(desc_cb->priv); + /* We can reuse buffer as-is, just make sure it is local */ + if (likely(hns3_page_is_reusable(desc_cb->priv))) + desc_cb->reuse_flag = 1; + else /* This page cannot be reused so discard it */ + __page_frag_cache_drain(desc_cb->priv, + desc_cb->pagecnt_bias); + + hns3_rx_ring_move_fw(ring); + return 0; + } + u64_stats_update_begin(&ring->syncp); + ring->stats.seg_pkt_cnt++; + u64_stats_update_end(&ring->syncp); + + ring->pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE); + __skb_put(skb, ring->pull_len); + hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len, + desc_cb); + hns3_rx_ring_move_fw(ring); + + return 0; +} + +static int hns3_add_frag(struct hns3_enet_ring *ring) +{ + struct sk_buff *skb = ring->skb; + struct sk_buff *head_skb = skb; + struct sk_buff *new_skb; + struct hns3_desc_cb *desc_cb; + struct hns3_desc *desc; + u32 bd_base_info; + + do { + desc = &ring->desc[ring->next_to_clean]; + desc_cb = &ring->desc_cb[ring->next_to_clean]; + bd_base_info = le32_to_cpu(desc->rx.bd_base_info); + /* make sure HW write desc complete */ + dma_rmb(); + if (!(bd_base_info & BIT(HNS3_RXD_VLD_B))) + return -ENXIO; + + if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) { + new_skb = napi_alloc_skb(&ring->tqp_vector->napi, 0); + if (unlikely(!new_skb)) { + hns3_rl_err(ring_to_netdev(ring), + "alloc rx fraglist skb fail\n"); + return -ENXIO; + } + ring->frag_num = 0; + + if (ring->tail_skb) { + ring->tail_skb->next = new_skb; + ring->tail_skb = new_skb; + } else { + skb_shinfo(skb)->frag_list = new_skb; + ring->tail_skb = new_skb; + } + } + + if (ring->tail_skb) { + head_skb->truesize += hns3_buf_size(ring); + head_skb->data_len += le16_to_cpu(desc->rx.size); + head_skb->len += le16_to_cpu(desc->rx.size); + skb = ring->tail_skb; + } + + dma_sync_single_for_cpu(ring_to_dev(ring), + desc_cb->dma + desc_cb->page_offset, + hns3_buf_size(ring), + DMA_FROM_DEVICE); + + hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb); + trace_hns3_rx_desc(ring); + hns3_rx_ring_move_fw(ring); + ring->pending_buf++; + } while (!(bd_base_info & BIT(HNS3_RXD_FE_B))); - ring_ptr_move_fw(ring, next_to_clean); - } else { - u64_stats_update_begin(&ring->syncp); - ring->stats.seg_pkt_cnt++; - u64_stats_update_end(&ring->syncp); + return 0; +} - pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE); +static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring, + struct sk_buff *skb, u32 l234info, + u32 bd_base_info, u32 ol_info) +{ + u32 l3_type; + + skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info, + HNS3_RXD_GRO_SIZE_M, + HNS3_RXD_GRO_SIZE_S); + /* if there is no HW GRO, do not set gro params */ + if (!skb_shinfo(skb)->gso_size) { + hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info); + return 0; + } - memcpy(__skb_put(skb, pull_len), va, - ALIGN(pull_len, sizeof(long))); + NAPI_GRO_CB(skb)->count = hnae3_get_field(l234info, + HNS3_RXD_GRO_COUNT_M, + HNS3_RXD_GRO_COUNT_S); - hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb); - ring_ptr_move_fw(ring, next_to_clean); + l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S); + if (l3_type == HNS3_L3_TYPE_IPV4) + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; + else if (l3_type == HNS3_L3_TYPE_IPV6) + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; + else + return -EFAULT; - while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) { - desc = &ring->desc[ring->next_to_clean]; - desc_cb = &ring->desc_cb[ring->next_to_clean]; - bd_base_info = le32_to_cpu(desc->rx.bd_base_info); - hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb); - ring_ptr_move_fw(ring, next_to_clean); - bnum++; - } + return hns3_gro_complete(skb, l234info); +} + +static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring, + struct sk_buff *skb, u32 rss_hash, + u32 l234info) +{ + enum pkt_hash_types rss_type = PKT_HASH_TYPE_NONE; + struct hnae3_handle *handle = ring->tqp->handle; + int l3_type; + int l4_type; + + l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S); + l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S); + if (l3_type == HNS3_L3_TYPE_IPV4 || + l3_type == HNS3_L3_TYPE_IPV6) { + if (l4_type == HNS3_L4_TYPE_UDP || + l4_type == HNS3_L4_TYPE_TCP || + l4_type == HNS3_L4_TYPE_SCTP) + rss_type = PKT_HASH_TYPE_L4; + else if (l4_type == HNS3_L4_TYPE_IGMP || + l4_type == HNS3_L4_TYPE_ICMP) + rss_type = PKT_HASH_TYPE_L3; } - *out_bnum = bnum; + skb_set_hash(skb, rss_hash, rss_type); +} + +static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) +{ + struct net_device *netdev = ring_to_netdev(ring); + enum hns3_pkt_l2t_type l2_frame_type; + u32 bd_base_info, l234info, ol_info; + struct hns3_desc *desc; + unsigned int len; + int pre_ntc, ret; + /* bdinfo handled below is only valid on the last BD of the + * current packet, and ring->next_to_clean indicates the first + * descriptor of next packet, so need - 1 below. + */ + pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) : + (ring->desc_num - 1); + desc = &ring->desc[pre_ntc]; + bd_base_info = le32_to_cpu(desc->rx.bd_base_info); l234info = le32_to_cpu(desc->rx.l234_info); + ol_info = le32_to_cpu(desc->rx.ol_info); /* Based on hw strategy, the tag offloaded will be stored at * ot_vlan_tag in two layer tag case, and stored at vlan_tag @@ -2261,140 +3191,188 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { u16 vlan_tag; - vlan_tag = hns3_parse_vlan_tag(ring, desc, l234info); - if (vlan_tag & VLAN_VID_MASK) - __vlan_hwaccel_put_tag(skb, - htons(ETH_P_8021Q), + if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); } - if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) { - netdev_err(netdev, "no valid bd,%016llx,%016llx\n", - ((u64 *)desc)[0], ((u64 *)desc)[1]); + if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) | + BIT(HNS3_RXD_L2E_B))))) { u64_stats_update_begin(&ring->syncp); - ring->stats.non_vld_descs++; + if (l234info & BIT(HNS3_RXD_L2E_B)) + ring->stats.l2_err++; + else + ring->stats.err_pkt_len++; u64_stats_update_end(&ring->syncp); - dev_kfree_skb_any(skb); - return -EINVAL; + return -EFAULT; } - if (unlikely((!desc->rx.pkt_len) || - hnae3_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) { - netdev_err(netdev, "truncated pkt\n"); - u64_stats_update_begin(&ring->syncp); - ring->stats.err_pkt_len++; - u64_stats_update_end(&ring->syncp); + len = skb->len; - dev_kfree_skb_any(skb); - return -EFAULT; - } + /* Do update ip stack process */ + skb->protocol = eth_type_trans(skb, netdev); - if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L2E_B))) { - netdev_err(netdev, "L2 error pkt\n"); + /* This is needed in order to enable forwarding support */ + ret = hns3_set_gro_and_checksum(ring, skb, l234info, + bd_base_info, ol_info); + if (unlikely(ret)) { u64_stats_update_begin(&ring->syncp); - ring->stats.l2_err++; + ring->stats.rx_err_cnt++; u64_stats_update_end(&ring->syncp); - - dev_kfree_skb_any(skb); - return -EFAULT; + return ret; } + l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M, + HNS3_RXD_DMAC_S); + u64_stats_update_begin(&ring->syncp); ring->stats.rx_pkts++; - ring->stats.rx_bytes += skb->len; + ring->stats.rx_bytes += len; + + if (l2_frame_type == HNS3_L2_TYPE_MULTICAST) + ring->stats.rx_multicast++; + u64_stats_update_end(&ring->syncp); - ring->tqp_vector->rx_group.total_bytes += skb->len; + ring->tqp_vector->rx_group.total_bytes += len; + + hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash), + l234info); + return 0; +} + +static int hns3_handle_rx_bd(struct hns3_enet_ring *ring) +{ + struct sk_buff *skb = ring->skb; + struct hns3_desc_cb *desc_cb; + struct hns3_desc *desc; + unsigned int length; + u32 bd_base_info; + int ret; + + desc = &ring->desc[ring->next_to_clean]; + desc_cb = &ring->desc_cb[ring->next_to_clean]; + + prefetch(desc); + + if (!skb) { + bd_base_info = le32_to_cpu(desc->rx.bd_base_info); + + /* Check valid BD */ + if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) + return -ENXIO; + + dma_rmb(); + length = le16_to_cpu(desc->rx.size); + + ring->va = desc_cb->buf + desc_cb->page_offset; + + dma_sync_single_for_cpu(ring_to_dev(ring), + desc_cb->dma + desc_cb->page_offset, + hns3_buf_size(ring), + DMA_FROM_DEVICE); + + /* Prefetch first cache line of first page. + * Idea is to cache few bytes of the header of the packet. + * Our L1 Cache line size is 64B so need to prefetch twice to make + * it 128B. But in actual we can have greater size of caches with + * 128B Level 1 cache lines. In such a case, single fetch would + * suffice to cache in the relevant part of the header. + */ + prefetch(ring->va); +#if L1_CACHE_BYTES < 128 + prefetch(ring->va + L1_CACHE_BYTES); +#endif + + ret = hns3_alloc_skb(ring, length, ring->va); + skb = ring->skb; + + if (ret < 0) /* alloc buffer fail */ + return ret; + if (!(bd_base_info & BIT(HNS3_RXD_FE_B))) { /* need add frag */ + ret = hns3_add_frag(ring); + if (ret) + return ret; + } + } else { + ret = hns3_add_frag(ring); + if (ret) + return ret; + } + + /* As the head data may be changed when GRO enable, copy + * the head data in after other data rx completed + */ + if (skb->len > HNS3_RX_HEAD_SIZE) + memcpy(skb->data, ring->va, + ALIGN(ring->pull_len, sizeof(long))); + + ret = hns3_handle_bdinfo(ring, skb); + if (unlikely(ret)) { + dev_kfree_skb_any(skb); + return ret; + } - hns3_rx_checksum(ring, skb, desc); + skb_record_rx_queue(skb, ring->tqp->tqp_index); return 0; } -int hns3_clean_rx_ring( - struct hns3_enet_ring *ring, int budget, - void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)) +int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget, + void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)) { #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 - struct net_device *netdev = ring->tqp->handle->kinfo.netdev; - int recv_pkts, recv_bds, clean_count, err; int unused_count = hns3_desc_unused(ring); - struct sk_buff *skb = NULL; - int num, bnum = 0; + int recv_pkts = 0; + int err; - num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG); - rmb(); /* Make sure num taken effect before the other data is touched */ + unused_count -= ring->pending_buf; - recv_pkts = 0, recv_bds = 0, clean_count = 0; - num -= unused_count; - - while (recv_pkts < budget && recv_bds < num) { + while (recv_pkts < budget) { /* Reuse or realloc buffers */ - if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { - hns3_nic_alloc_rx_buffers(ring, - clean_count + unused_count); - clean_count = 0; - unused_count = hns3_desc_unused(ring); + if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { + hns3_nic_alloc_rx_buffers(ring, unused_count); + unused_count = hns3_desc_unused(ring) - + ring->pending_buf; } /* Poll one pkt */ - err = hns3_handle_rx_bd(ring, &skb, &bnum); - if (unlikely(!skb)) /* This fault cannot be repaired */ + err = hns3_handle_rx_bd(ring); + /* Do not get FE for the packet or failed to alloc skb */ + if (unlikely(!ring->skb || err == -ENXIO)) { goto out; - - recv_bds += bnum; - clean_count += bnum; - if (unlikely(err)) { /* Do jump the err */ + } else if (likely(!err)) { + rx_fn(ring, ring->skb); recv_pkts++; - continue; } - /* Do update ip stack process */ - skb->protocol = eth_type_trans(skb, netdev); - rx_fn(ring, skb); - - recv_pkts++; + unused_count += ring->pending_buf; + ring->skb = NULL; + ring->pending_buf = 0; } out: /* Make all data has been write before submit */ - if (clean_count + unused_count > 0) - hns3_nic_alloc_rx_buffers(ring, - clean_count + unused_count); + if (unused_count > 0) + hns3_nic_alloc_rx_buffers(ring, unused_count); return recv_pkts; } -static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) +static bool hns3_get_new_flow_lvl(struct hns3_enet_ring_group *ring_group) { - struct hns3_enet_tqp_vector *tqp_vector = - ring_group->ring->tqp_vector; +#define HNS3_RX_LOW_BYTE_RATE 10000 +#define HNS3_RX_MID_BYTE_RATE 20000 +#define HNS3_RX_ULTRA_PACKET_RATE 40 + enum hns3_flow_level_range new_flow_level; - int packets_per_msecs; - int bytes_per_msecs; + struct hns3_enet_tqp_vector *tqp_vector; + int packets_per_msecs, bytes_per_msecs; u32 time_passed_ms; - u16 new_int_gl; - - if (!ring_group->coal.int_gl || !tqp_vector->last_jiffies) - return false; - - if (ring_group->total_packets == 0) { - ring_group->coal.int_gl = HNS3_INT_GL_50K; - ring_group->coal.flow_level = HNS3_FLOW_LOW; - return true; - } - /* Simple throttlerate management - * 0-10MB/s lower (50000 ints/s) - * 10-20MB/s middle (20000 ints/s) - * 20-1249MB/s high (18000 ints/s) - * > 40000pps ultra (8000 ints/s) - */ - new_flow_level = ring_group->coal.flow_level; - new_int_gl = ring_group->coal.int_gl; + tqp_vector = ring_group->ring->tqp_vector; time_passed_ms = jiffies_to_msecs(jiffies - tqp_vector->last_jiffies); - if (!time_passed_ms) return false; @@ -2404,9 +3382,14 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) do_div(ring_group->total_bytes, time_passed_ms); bytes_per_msecs = ring_group->total_bytes; -#define HNS3_RX_LOW_BYTE_RATE 10000 -#define HNS3_RX_MID_BYTE_RATE 20000 + new_flow_level = ring_group->coal.flow_level; + /* Simple throttlerate management + * 0-10MB/s lower (50000 ints/s) + * 10-20MB/s middle (20000 ints/s) + * 20-1249MB/s high (18000 ints/s) + * > 40000pps ultra (8000 ints/s) + */ switch (new_flow_level) { case HNS3_FLOW_LOW: if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE) @@ -2426,13 +3409,40 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) break; } -#define HNS3_RX_ULTRA_PACKET_RATE 40 - if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE && &tqp_vector->rx_group == ring_group) new_flow_level = HNS3_FLOW_ULTRA; - switch (new_flow_level) { + ring_group->total_bytes = 0; + ring_group->total_packets = 0; + ring_group->coal.flow_level = new_flow_level; + + return true; +} + +static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) +{ + struct hns3_enet_tqp_vector *tqp_vector; + u16 new_int_gl; + + if (!ring_group->ring) + return false; + + tqp_vector = ring_group->ring->tqp_vector; + if (!tqp_vector->last_jiffies) + return false; + + if (ring_group->total_packets == 0) { + ring_group->coal.int_gl = HNS3_INT_GL_50K; + ring_group->coal.flow_level = HNS3_FLOW_LOW; + return true; + } + + if (!hns3_get_new_flow_lvl(ring_group)) + return false; + + new_int_gl = ring_group->coal.int_gl; + switch (ring_group->coal.flow_level) { case HNS3_FLOW_LOW: new_int_gl = HNS3_INT_GL_50K; break; @@ -2449,9 +3459,6 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) break; } - ring_group->total_bytes = 0; - ring_group->total_packets = 0; - ring_group->coal.flow_level = new_flow_level; if (new_int_gl != ring_group->coal.int_gl) { ring_group->coal.int_gl = new_int_gl; return true; @@ -2465,10 +3472,10 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector) struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group; bool rx_update, tx_update; - if (tqp_vector->int_adapt_down > 0) { - tqp_vector->int_adapt_down--; + /* update param every 1000ms */ + if (time_before(jiffies, + tqp_vector->last_jiffies + msecs_to_jiffies(1000))) return; - } if (rx_group->coal.gl_adapt_enable) { rx_update = hns3_get_new_int_gl(rx_group); @@ -2478,36 +3485,40 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector) } if (tx_group->coal.gl_adapt_enable) { - tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group); + tx_update = hns3_get_new_int_gl(tx_group); if (tx_update) hns3_set_vector_coalesce_tx_gl(tqp_vector, tx_group->coal.int_gl); } tqp_vector->last_jiffies = jiffies; - tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START; } static int hns3_nic_common_poll(struct napi_struct *napi, int budget) { + struct hns3_nic_priv *priv = netdev_priv(napi->dev); struct hns3_enet_ring *ring; int rx_pkt_total = 0; struct hns3_enet_tqp_vector *tqp_vector = container_of(napi, struct hns3_enet_tqp_vector, napi); bool clean_complete = true; - int rx_budget; + int rx_budget = budget; + + if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { + napi_complete(napi); + return 0; + } /* Since the actual Tx work is minimal, we can give the Tx a larger * budget and be more aggressive about cleaning up the Tx descriptors. */ - hns3_for_each_ring(ring, tqp_vector->tx_group) { - if (!hns3_clean_tx_ring(ring, budget)) - clean_complete = false; - } + hns3_for_each_ring(ring, tqp_vector->tx_group) + hns3_clean_tx_ring(ring, budget); /* make sure rx ring budget not smaller than 1 */ - rx_budget = max(budget / tqp_vector->num_tqps, 1); + if (tqp_vector->num_tqps > 1) + rx_budget = max(budget / tqp_vector->num_tqps, 1); hns3_for_each_ring(ring, tqp_vector->rx_group) { int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget, @@ -2524,9 +3535,11 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget) if (!clean_complete) return budget; - napi_complete(napi); - hns3_update_new_int_gl(tqp_vector); - hns3_mask_vector_irq(tqp_vector, 1); + if (napi_complete(napi) && + likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { + hns3_update_new_int_gl(tqp_vector); + hns3_mask_vector_irq(tqp_vector, 1); + } return rx_pkt_total; } @@ -2556,7 +3569,7 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); if (!chain) - return -ENOMEM; + goto err_free_chain; cur_chain->next = chain; chain->tqp_index = tx_ring->tqp->tqp_index; @@ -2586,7 +3599,7 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, while (rx_ring) { chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); if (!chain) - return -ENOMEM; + goto err_free_chain; cur_chain->next = chain; chain->tqp_index = rx_ring->tqp->tqp_index; @@ -2601,6 +3614,17 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, } return 0; + +err_free_chain: + cur_chain = head->next; + while (cur_chain) { + chain = cur_chain->next; + devm_kfree(&pdev->dev, cur_chain); + cur_chain = chain; + } + head->next = NULL; + + return -ENOMEM; } static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, @@ -2627,13 +3651,32 @@ static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group, group->count++; } +static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv) +{ + struct pci_dev *pdev = priv->ae_handle->pdev; + struct hns3_enet_tqp_vector *tqp_vector; + int num_vectors = priv->vector_num; + int numa_node; + int vector_i; + + numa_node = dev_to_node(&pdev->dev); + + for (vector_i = 0; vector_i < num_vectors; vector_i++) { + tqp_vector = &priv->tqp_vector[vector_i]; + cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node), + &tqp_vector->affinity_mask); + } +} + static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) { struct hnae3_ring_chain_node vector_ring_chain; struct hnae3_handle *h = priv->ae_handle; struct hns3_enet_tqp_vector *tqp_vector; int ret = 0; - u16 i; + int i; + + hns3_nic_set_cpumask(priv); for (i = 0; i < priv->vector_num; i++) { tqp_vector = &priv->tqp_vector[i]; @@ -2648,13 +3691,13 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) tqp_vector = &priv->tqp_vector[vector_i]; hns3_add_ring_to_group(&tqp_vector->tx_group, - priv->ring_data[i].ring); + &priv->ring[i]); hns3_add_ring_to_group(&tqp_vector->rx_group, - priv->ring_data[i + tqp_num].ring); + &priv->ring[i + tqp_num]); - priv->ring_data[i].ring->tqp_vector = tqp_vector; - priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector; + priv->ring[i].tqp_vector = tqp_vector; + priv->ring[i + tqp_num].tqp_vector = tqp_vector; tqp_vector->num_tqps++; } @@ -2670,7 +3713,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) ret = hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain); if (ret) - return ret; + goto map_ring_fail; ret = h->ae_algo->ops->map_ring_to_vector(h, tqp_vector->vector_irq, &vector_ring_chain); @@ -2678,17 +3721,25 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); if (ret) - return ret; + goto map_ring_fail; netif_napi_add(priv->netdev, &tqp_vector->napi, hns3_nic_common_poll, NAPI_POLL_WEIGHT); } return 0; + +map_ring_fail: + while (i--) + netif_napi_del(&priv->tqp_vector[i].napi); + + return ret; } static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv) { +#define HNS3_VECTOR_PF_MAX_NUM 64 + struct hnae3_handle *h = priv->ae_handle; struct hns3_enet_tqp_vector *tqp_vector; struct hnae3_vector_info *vector; @@ -2701,11 +3752,14 @@ static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv) /* RSS size, cpu online and vector_num should be the same */ /* Should consider 2p/4p later */ vector_num = min_t(u16, num_online_cpus(), tqp_num); + vector_num = min_t(u16, vector_num, HNS3_VECTOR_PF_MAX_NUM); + vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector), GFP_KERNEL); if (!vector) return -ENOMEM; + /* save the actual available vector number */ vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector); priv->vector_num = vector_num; @@ -2736,46 +3790,38 @@ static void hns3_clear_ring_group(struct hns3_enet_ring_group *group) group->count = 0; } -static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) +static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) { struct hnae3_ring_chain_node vector_ring_chain; struct hnae3_handle *h = priv->ae_handle; struct hns3_enet_tqp_vector *tqp_vector; - int i, ret; + int i; for (i = 0; i < priv->vector_num; i++) { tqp_vector = &priv->tqp_vector[i]; - ret = hns3_get_vector_ring_chain(tqp_vector, - &vector_ring_chain); - if (ret) - return ret; + if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring) + continue; + + /* Since the mapping can be overwritten, when fail to get the + * chain between vector and ring, we should go on to deal with + * the remaining options. + */ + if (hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain)) + dev_warn(priv->dev, "failed to get ring chain\n"); - ret = h->ae_algo->ops->unmap_ring_from_vector(h, + h->ae_algo->ops->unmap_ring_from_vector(h, tqp_vector->vector_irq, &vector_ring_chain); - if (ret) - return ret; hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); - if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) { - (void)irq_set_affinity_hint( - priv->tqp_vector[i].vector_irq, - NULL); - free_irq(priv->tqp_vector[i].vector_irq, - &priv->tqp_vector[i]); - } - - priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED; hns3_clear_ring_group(&tqp_vector->rx_group); hns3_clear_ring_group(&tqp_vector->tx_group); netif_napi_del(&priv->tqp_vector[i].napi); } - - return 0; } -static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv) +static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv) { struct hnae3_handle *h = priv->ae_handle; struct pci_dev *pdev = h->pdev; @@ -2787,32 +3833,28 @@ static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv) tqp_vector = &priv->tqp_vector[i]; ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq); if (ret) - return ret; + return; } devm_kfree(&pdev->dev, priv->tqp_vector); - return 0; } -static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, - int ring_type) +static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, + unsigned int ring_type) { - struct hns3_nic_ring_data *ring_data = priv->ring_data; int queue_num = priv->ae_handle->kinfo.num_tqps; - struct pci_dev *pdev = priv->ae_handle->pdev; struct hns3_enet_ring *ring; - - ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL); - if (!ring) - return -ENOMEM; + int desc_num; if (ring_type == HNAE3_RING_TYPE_TX) { - ring_data[q->tqp_index].ring = ring; - ring_data[q->tqp_index].queue_index = q->tqp_index; + ring = &priv->ring[q->tqp_index]; + desc_num = priv->ae_handle->kinfo.num_tx_desc; + ring->queue_index = q->tqp_index; ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET; } else { - ring_data[q->tqp_index + queue_num].ring = ring; - ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index; + ring = &priv->ring[q->tqp_index + queue_num]; + desc_num = priv->ae_handle->kinfo.num_rx_desc; + ring->queue_index = q->tqp_index; ring->io_base = q->io_base; } @@ -2824,66 +3866,45 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, ring->dev = priv->dev; ring->desc_dma_addr = 0; ring->buf_size = q->buf_size; - ring->desc_num = q->desc_num; + ring->desc_num = desc_num; ring->next_to_use = 0; ring->next_to_clean = 0; - - return 0; + ring->last_to_use = 0; } -static int hns3_queue_to_ring(struct hnae3_queue *tqp, - struct hns3_nic_priv *priv) +static void hns3_queue_to_ring(struct hnae3_queue *tqp, + struct hns3_nic_priv *priv) { - int ret; - - ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX); - if (ret) - return ret; - - ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX); - if (ret) - return ret; - - return 0; + hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX); + hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX); } static int hns3_get_ring_config(struct hns3_nic_priv *priv) { struct hnae3_handle *h = priv->ae_handle; struct pci_dev *pdev = h->pdev; - int i, ret; + int i; - priv->ring_data = devm_kzalloc(&pdev->dev, - array3_size(h->kinfo.num_tqps, - sizeof(*priv->ring_data), - 2), - GFP_KERNEL); - if (!priv->ring_data) + priv->ring = devm_kzalloc(&pdev->dev, + array3_size(h->kinfo.num_tqps, + sizeof(*priv->ring), 2), + GFP_KERNEL); + if (!priv->ring) return -ENOMEM; - for (i = 0; i < h->kinfo.num_tqps; i++) { - ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv); - if (ret) - goto err; - } + for (i = 0; i < h->kinfo.num_tqps; i++) + hns3_queue_to_ring(h->kinfo.tqp[i], priv); return 0; -err: - devm_kfree(&pdev->dev, priv->ring_data); - return ret; } static void hns3_put_ring_config(struct hns3_nic_priv *priv) { - struct hnae3_handle *h = priv->ae_handle; - int i; + if (!priv->ring) + return; - for (i = 0; i < h->kinfo.num_tqps; i++) { - devm_kfree(priv->dev, priv->ring_data[i].ring); - devm_kfree(priv->dev, - priv->ring_data[i + h->kinfo.num_tqps].ring); - } - devm_kfree(priv->dev, priv->ring_data); + devm_kfree(priv->dev, priv->ring); + priv->ring = NULL; } static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) @@ -2893,8 +3914,8 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) if (ring->desc_num <= 0 || ring->buf_size <= 0) return -EINVAL; - ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]), - GFP_KERNEL); + ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num, + sizeof(ring->desc_cb[0]), GFP_KERNEL); if (!ring->desc_cb) { ret = -ENOMEM; goto out; @@ -2915,19 +3936,25 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) out_with_desc: hns3_free_desc(ring); out_with_desc_cb: - kfree(ring->desc_cb); + devm_kfree(ring_to_dev(ring), ring->desc_cb); ring->desc_cb = NULL; out: return ret; } -static void hns3_fini_ring(struct hns3_enet_ring *ring) +void hns3_fini_ring(struct hns3_enet_ring *ring) { hns3_free_desc(ring); - kfree(ring->desc_cb); + devm_kfree(ring_to_dev(ring), ring->desc_cb); ring->desc_cb = NULL; ring->next_to_clean = 0; ring->next_to_use = 0; + ring->last_to_use = 0; + ring->pending_buf = 0; + if (ring->skb) { + dev_kfree_skb_any(ring->skb); + ring->skb = NULL; + } } static int hns3_buf_size2type(u32 buf_size) @@ -2960,8 +3987,7 @@ static void hns3_init_ring_hw(struct hns3_enet_ring *ring) struct hnae3_queue *q = ring->tqp; if (!HNAE3_IS_TX_RING(ring)) { - hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, - (u32)dma); + hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, (u32)dma); hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG, (u32)((dma >> 31) >> 1)); @@ -2984,21 +4010,17 @@ static void hns3_init_ring_hw(struct hns3_enet_ring *ring) static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv) { struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; + struct hnae3_tc_info *tc_info = &kinfo->tc_info; int i; - for (i = 0; i < HNAE3_MAX_TC; i++) { - struct hnae3_tc_info *tc_info = &kinfo->tc_info[i]; + for (i = 0; i < tc_info->num_tc; i++) { int j; - if (!tc_info->enable) - continue; - - for (j = 0; j < tc_info->tqp_count; j++) { + for (j = 0; j < tc_info->tqp_count[i]; j++) { struct hnae3_queue *q; - q = priv->ring_data[tc_info->tqp_offset + j].ring->tqp; - hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG, - tc_info->tc); + q = priv->ring[tc_info->tqp_offset[i] + j].tqp; + hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG, i); } } } @@ -3011,21 +4033,21 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv) int ret; for (i = 0; i < ring_num; i++) { - ret = hns3_alloc_ring_memory(priv->ring_data[i].ring); + ret = hns3_alloc_ring_memory(&priv->ring[i]); if (ret) { dev_err(priv->dev, "Alloc ring memory fail! ret=%d\n", ret); goto out_when_alloc_ring_memory; } - u64_stats_init(&priv->ring_data[i].ring->syncp); + u64_stats_init(&priv->ring[i].syncp); } return 0; out_when_alloc_ring_memory: for (j = i - 1; j >= 0; j--) - hns3_fini_ring(priv->ring_data[j].ring); + hns3_fini_ring(&priv->ring[j]); return -ENOMEM; } @@ -3036,71 +4058,122 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv) int i; for (i = 0; i < h->kinfo.num_tqps; i++) { - if (h->ae_algo->ops->reset_queue) - h->ae_algo->ops->reset_queue(h, i); - - hns3_fini_ring(priv->ring_data[i].ring); - hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring); + hns3_fini_ring(&priv->ring[i]); + hns3_fini_ring(&priv->ring[i + h->kinfo.num_tqps]); } return 0; } /* Set mac addr if it is configured. or leave it to the AE driver */ -static void hns3_init_mac_addr(struct net_device *netdev, bool init) +static int hns3_init_mac_addr(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hnae3_handle *h = priv->ae_handle; - u8 mac_addr_temp[ETH_ALEN]; + u8 mac_addr_temp[ETH_ALEN] = { 0 }; + int ret = 0; - if (h->ae_algo->ops->get_mac_addr && init) { + if (h->ae_algo->ops->get_mac_addr) h->ae_algo->ops->get_mac_addr(h, mac_addr_temp); + + /* Check if the MAC address is valid, if not get a random one */ + if (!is_valid_ether_addr(mac_addr_temp)) { + eth_hw_addr_random(netdev); + hnae3_format_mac_addr(format_mac_addr, netdev->dev_addr); + dev_warn(priv->dev, "using random MAC address %s\n", + format_mac_addr); + } else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) { ether_addr_copy(netdev->dev_addr, mac_addr_temp); + ether_addr_copy(netdev->perm_addr, mac_addr_temp); + } else { + return 0; } - /* Check if the MAC address is valid, if not get a random one */ - if (!is_valid_ether_addr(netdev->dev_addr)) { - eth_hw_addr_random(netdev); - dev_warn(priv->dev, "using random MAC address %pM\n", - netdev->dev_addr); - } + if (h->ae_algo->ops->set_mac_addr) + ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true); + + return ret; +} + +static int hns3_init_phy(struct net_device *netdev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + int ret = 0; + + if (h->ae_algo->ops->mac_connect_phy) + ret = h->ae_algo->ops->mac_connect_phy(h); + + return ret; +} + +static void hns3_uninit_phy(struct net_device *netdev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (h->ae_algo->ops->mac_disconnect_phy) + h->ae_algo->ops->mac_disconnect_phy(h); +} + +static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (h->ae_algo->ops->del_all_fd_entries) + h->ae_algo->ops->del_all_fd_entries(h, clear_list); +} - if (h->ae_algo->ops->set_mac_addr) - h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true); +static int hns3_client_start(struct hnae3_handle *handle) +{ + if (!handle->ae_algo->ops->client_start) + return 0; + return handle->ae_algo->ops->client_start(handle); } -static void hns3_uninit_mac_addr(struct net_device *netdev) +static void hns3_client_stop(struct hnae3_handle *handle) { - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; + if (!handle->ae_algo->ops->client_stop) + return; - if (h->ae_algo->ops->rm_uc_addr) - h->ae_algo->ops->rm_uc_addr(h, netdev->dev_addr); + handle->ae_algo->ops->client_stop(handle); } -static void hns3_nic_set_priv_ops(struct net_device *netdev) +static void hns3_info_show(struct hns3_nic_priv *priv) { - struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; + + hnae3_format_mac_addr(format_mac_addr, priv->netdev->dev_addr); + dev_info(priv->dev, "MAC address: %s\n", format_mac_addr); + dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps); + dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size); + dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size); + dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len); + dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc); + dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc); + dev_info(priv->dev, "Total number of enabled TCs: %u\n", + kinfo->tc_info.num_tc); + dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu); +} - if ((netdev->features & NETIF_F_TSO) || - (netdev->features & NETIF_F_TSO6)) { - priv->ops.fill_desc = hns3_fill_desc_tso; - priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; - } else { - priv->ops.fill_desc = hns3_fill_desc; - priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; - } +static void hns3_state_uninit(struct hnae3_handle *handle) +{ + struct hns3_nic_priv *priv = handle->priv; + + clear_bit(HNS3_NIC_STATE_INITED, &priv->state); } static int hns3_client_init(struct hnae3_handle *handle) { struct pci_dev *pdev = handle->pdev; + u16 alloc_tqps, max_rss_size; struct hns3_nic_priv *priv; struct net_device *netdev; int ret; - netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), - hns3_get_max_available_channels(handle)); + handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps, + &max_rss_size); + netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps); if (!netdev) return -ENOMEM; @@ -3108,13 +4181,15 @@ static int hns3_client_init(struct hnae3_handle *handle) priv->dev = &pdev->dev; priv->netdev = netdev; priv->ae_handle = handle; - priv->ae_handle->last_reset_time = jiffies; priv->tx_timeout_count = 0; + set_bit(HNS3_NIC_STATE_DOWN, &priv->state); + + handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL); handle->kinfo.netdev = netdev; handle->priv = (void *)priv; - hns3_init_mac_addr(netdev, true); + hns3_init_mac_addr(netdev); hns3_set_default_feature(netdev); @@ -3123,16 +4198,10 @@ static int hns3_client_init(struct hnae3_handle *handle) netdev->netdev_ops = &hns3_nic_netdev_ops; SET_NETDEV_DEV(netdev, &pdev->dev); hns3_ethtool_set_ops(netdev); - hns3_nic_set_priv_ops(netdev); /* Carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); - if (handle->flags & HNAE3_SUPPORT_VF) - handle->reset_level = HNAE3_VF_RESET; - else - handle->reset_level = HNAE3_FUNC_RESET; - ret = hns3_get_ring_config(priv); if (ret) { ret = -ENOMEM; @@ -3154,29 +4223,75 @@ static int hns3_client_init(struct hnae3_handle *handle) ret = hns3_init_all_ring(priv); if (ret) { ret = -ENOMEM; - goto out_init_ring_data; + goto out_init_ring; + } + + ret = hns3_init_phy(netdev); + if (ret) + goto out_init_phy; + + /* the device can work without cpu rmap, only aRFS needs it */ + ret = hns3_set_rx_cpu_rmap(netdev); + if (ret) + dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret); + + ret = hns3_nic_init_irq(priv); + if (ret) { + dev_err(priv->dev, "init irq failed! ret=%d\n", ret); + hns3_free_rx_cpu_rmap(netdev); + goto out_init_irq_fail; + } + + ret = hns3_client_start(handle); + if (ret) { + dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); + goto out_client_start; + } + + hns3_dcbnl_setup(handle); + + ret = hns3_dbg_init(handle); + if (ret) { + dev_err(priv->dev, "failed to init debugfs, ret = %d\n", + ret); + goto out_client_start; } +#ifdef HAVE_NETDEVICE_MIN_MAX_MTU + /* MTU range: (ETH_MIN_MTU(kernel default) - 9702) */ + netdev->max_mtu = HNS3_MAX_MTU; +#endif + + set_bit(HNS3_NIC_STATE_INITED, &priv->state); + ret = register_netdev(netdev); if (ret) { dev_err(priv->dev, "probe register netdev fail!\n"); goto out_reg_netdev_fail; } - hns3_dcbnl_setup(handle); - - /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */ - netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); + if (netif_msg_drv(handle)) + hns3_info_show(priv); return ret; out_reg_netdev_fail: -out_init_ring_data: - (void)hns3_nic_uninit_vector_data(priv); + hns3_state_uninit(handle); + hns3_dbg_uninit(handle); + hns3_client_stop(handle); +out_client_start: + hns3_free_rx_cpu_rmap(netdev); + hns3_nic_uninit_irq(priv); +out_init_irq_fail: + hns3_uninit_phy(netdev); +out_init_phy: + hns3_uninit_all_ring(priv); +out_init_ring: + hns3_nic_uninit_vector_data(priv); out_init_vector_data: hns3_nic_dealloc_vector_data(priv); out_alloc_vector_data: - priv->ring_data = NULL; + priv->ring = NULL; out_get_ring_cfg: priv->ae_handle = NULL; free_netdev(netdev); @@ -3192,15 +4307,26 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) if (netdev->reg_state != NETREG_UNINITIALIZED) unregister_netdev(netdev); - hns3_force_clear_all_rx_ring(handle); + hns3_client_stop(handle); - ret = hns3_nic_uninit_vector_data(priv); - if (ret) - netdev_err(netdev, "uninit vector error\n"); + hns3_uninit_phy(netdev); - ret = hns3_nic_dealloc_vector_data(priv); - if (ret) - netdev_err(netdev, "dealloc vector error\n"); + if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { + netdev_warn(netdev, "already uninitialized\n"); + goto out_netdev_free; + } + + hns3_free_rx_cpu_rmap(netdev); + + hns3_nic_uninit_irq(priv); + + hns3_del_all_fd_rules(netdev, true); + + hns3_clear_all_ring(handle, true); + + hns3_nic_uninit_vector_data(priv); + + hns3_nic_dealloc_vector_data(priv); ret = hns3_uninit_all_ring(priv); if (ret) @@ -3208,10 +4334,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) hns3_put_ring_config(priv); - priv->ring_data = NULL; - - hns3_uninit_mac_addr(netdev); - +out_netdev_free: + hns3_dbg_uninit(handle); free_netdev(netdev); } @@ -3223,73 +4347,27 @@ static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup) return; if (linkup) { - netif_carrier_on(netdev); netif_tx_wake_all_queues(netdev); - netdev_info(netdev, "link up\n"); + netif_carrier_on(netdev); + if (netif_msg_link(handle)) + netdev_info(netdev, "link up\n"); } else { netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); - netdev_info(netdev, "link down\n"); - } -} - -static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) -{ - struct hnae3_knic_private_info *kinfo = &handle->kinfo; - struct net_device *ndev = kinfo->netdev; - bool if_running; - int ret; - - if (tc > HNAE3_MAX_TC) - return -EINVAL; - - if (!ndev) - return -ENODEV; - - if_running = netif_running(ndev); - - if (if_running) { - (void)hns3_nic_net_stop(ndev); - msleep(100); + if (netif_msg_link(handle)) + netdev_info(netdev, "link down\n"); } - - ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ? - kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP; - if (ret) - goto err_out; - - ret = hns3_nic_set_real_num_queue(ndev); - -err_out: - if (if_running) - (void)hns3_nic_net_open(ndev); - - return ret; -} - -static void hns3_recover_hw_addr(struct net_device *ndev) -{ - struct netdev_hw_addr_list *list; - struct netdev_hw_addr *ha, *tmp; - - /* go through and sync uc_addr entries to the device */ - list = &ndev->uc; - list_for_each_entry_safe(ha, tmp, &list->list, list) - hns3_nic_uc_sync(ndev, ha->addr); - - /* go through and sync mc_addr entries to the device */ - list = &ndev->mc; - list_for_each_entry_safe(ha, tmp, &list->list, list) - hns3_nic_mc_sync(ndev, ha->addr); } static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) { while (ring->next_to_clean != ring->next_to_use) { ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0; - hns3_free_buffer_detach(ring, ring->next_to_clean); + hns3_free_buffer_detach(ring, ring->next_to_clean, 0); ring_ptr_move_fw(ring, next_to_clean); } + + ring->pending_buf = 0; } static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) @@ -3303,7 +4381,7 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) * stack, so we need to replace the buffer here. */ if (!ring->desc_cb[ring->next_to_use].reuse_flag) { - ret = hns3_reserve_buffer_map(ring, &res_cbs); + ret = hns3_alloc_and_map_buffer(ring, &res_cbs); if (ret) { u64_stats_update_begin(&ring->syncp); ring->stats.sw_err_cnt++; @@ -3311,17 +4389,23 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) /* if alloc new buffer fail, exit directly * and reclear in up flow. */ - netdev_warn(ring->tqp->handle->kinfo.netdev, + netdev_warn(ring_to_netdev(ring), "reserve buffer map failed, ret = %d\n", ret); return ret; } - hns3_replace_buffer(ring, ring->next_to_use, - &res_cbs); + hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); } ring_ptr_move_fw(ring, next_to_use); } + /* Free the pending skb in rx ring */ + if (ring->skb) { + dev_kfree_skb_any(ring->skb); + ring->skb = NULL; + ring->pending_buf = 0; + } + return 0; } @@ -3342,40 +4426,26 @@ static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring) } } -static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h) -{ - struct net_device *ndev = h->kinfo.netdev; - struct hns3_nic_priv *priv = netdev_priv(ndev); - struct hns3_enet_ring *ring; - u32 i; - - for (i = 0; i < h->kinfo.num_tqps; i++) { - ring = priv->ring_data[i + h->kinfo.num_tqps].ring; - hns3_force_clear_rx_ring(ring); - } -} - -static void hns3_clear_all_ring(struct hnae3_handle *h) +static void hns3_clear_all_ring(struct hnae3_handle *h, bool force) { struct net_device *ndev = h->kinfo.netdev; struct hns3_nic_priv *priv = netdev_priv(ndev); u32 i; for (i = 0; i < h->kinfo.num_tqps; i++) { - struct netdev_queue *dev_queue; struct hns3_enet_ring *ring; - ring = priv->ring_data[i].ring; + ring = &priv->ring[i]; hns3_clear_tx_ring(ring); - dev_queue = netdev_get_tx_queue(ndev, - priv->ring_data[i].queue_index); - netdev_tx_reset_queue(dev_queue); - ring = priv->ring_data[i + h->kinfo.num_tqps].ring; + ring = &priv->ring[i + h->kinfo.num_tqps]; /* Continue to clear other rings even if clearing some * rings failed. */ - hns3_clear_rx_ring(ring); + if (force) + hns3_force_clear_rx_ring(ring); + else + hns3_clear_rx_ring(ring); } } @@ -3387,18 +4457,22 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h) int i, j; int ret; + ret = h->ae_algo->ops->reset_queue(h); + if (ret) + return ret; + for (i = 0; i < h->kinfo.num_tqps; i++) { - h->ae_algo->ops->reset_queue(h, i); - hns3_init_ring_hw(priv->ring_data[i].ring); + hns3_init_ring_hw(&priv->ring[i]); /* We need to clear tx ring here because self test will * use the ring and will not run down before up */ - hns3_clear_tx_ring(priv->ring_data[i].ring); - priv->ring_data[i].ring->next_to_clean = 0; - priv->ring_data[i].ring->next_to_use = 0; + hns3_clear_tx_ring(&priv->ring[i]); + priv->ring[i].next_to_clean = 0; + priv->ring[i].next_to_use = 0; + priv->ring[i].last_to_use = 0; - rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring; + rx_ring = &priv->ring[i + h->kinfo.num_tqps]; hns3_init_ring_hw(rx_ring); ret = hns3_clear_rx_ring(rx_ring); if (ret) @@ -3419,10 +4493,39 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h) return 0; } +static void hns3_store_coal(struct hns3_nic_priv *priv) +{ + /* ethtool only support setting and querying one coal + * configuration for now, so save the vector 0' coal + * configuration here in order to restore it. + */ + memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal, + sizeof(struct hns3_enet_coalesce)); + memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal, + sizeof(struct hns3_enet_coalesce)); +} + +static void hns3_restore_coal(struct hns3_nic_priv *priv) +{ + u16 vector_num = priv->vector_num; + int i; + + for (i = 0; i < vector_num; i++) { + memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal, + sizeof(struct hns3_enet_coalesce)); + memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal, + sizeof(struct hns3_enet_coalesce)); + } +} + static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) { struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct net_device *ndev = kinfo->netdev; + struct hns3_nic_priv *priv = netdev_priv(ndev); + + if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) + return 0; if (!netif_running(ndev)) return 0; @@ -3433,16 +4536,24 @@ static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) static int hns3_reset_notify_up_enet(struct hnae3_handle *handle) { struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev); int ret = 0; + if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) { + netdev_err(kinfo->netdev, "device is not initialized yet\n"); + return -EFAULT; + } + + clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state); + if (netif_running(kinfo->netdev)) { - ret = hns3_nic_net_up(kinfo->netdev); + ret = hns3_nic_net_open(kinfo->netdev); if (ret) { + set_bit(HNS3_NIC_STATE_RESETTING, &priv->state); netdev_err(kinfo->netdev, - "hns net up fail, ret=%d!\n", ret); + "net up fail, ret=%d!\n", ret); return ret; } - handle->last_reset_time = jiffies; } return ret; @@ -3454,27 +4565,64 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) struct hns3_nic_priv *priv = netdev_priv(netdev); int ret; - hns3_init_mac_addr(netdev, false); - hns3_nic_set_rx_mode(netdev); - hns3_recover_hw_addr(netdev); - - /* Hardware table is only clear when pf resets */ - if (!(handle->flags & HNAE3_SUPPORT_VF)) - hns3_restore_vlan(netdev); - /* Carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); - ret = hns3_nic_init_vector_data(priv); + ret = hns3_get_ring_config(priv); if (ret) return ret; + ret = hns3_nic_alloc_vector_data(priv); + if (ret) + goto err_put_ring; + + hns3_restore_coal(priv); + + ret = hns3_nic_init_vector_data(priv); + if (ret) + goto err_dealloc_vector; + ret = hns3_init_all_ring(priv); + if (ret) + goto err_uninit_vector; + + /* the device can work without cpu rmap, only aRFS needs it */ + ret = hns3_set_rx_cpu_rmap(netdev); + if (ret) + dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret); + + ret = hns3_nic_init_irq(priv); + if (ret) { + dev_err(priv->dev, "init irq failed! ret=%d\n", ret); + hns3_free_rx_cpu_rmap(netdev); + goto err_init_irq_fail; + } + + if (!hns3_is_phys_func(handle->pdev)) + hns3_init_mac_addr(netdev); + + ret = hns3_client_start(handle); if (ret) { - hns3_nic_uninit_vector_data(priv); - priv->ring_data = NULL; + dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); + goto err_client_start_fail; } + set_bit(HNS3_NIC_STATE_INITED, &priv->state); + + return ret; + +err_client_start_fail: + hns3_free_rx_cpu_rmap(netdev); + hns3_nic_uninit_irq(priv); +err_init_irq_fail: + hns3_uninit_all_ring(priv); +err_uninit_vector: + hns3_nic_uninit_vector_data(priv); +err_dealloc_vector: + hns3_nic_dealloc_vector_data(priv); +err_put_ring: + hns3_put_ring_config(priv); + return ret; } @@ -3484,19 +4632,27 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) struct hns3_nic_priv *priv = netdev_priv(netdev); int ret; - hns3_force_clear_all_rx_ring(handle); - - ret = hns3_nic_uninit_vector_data(priv); - if (ret) { - netdev_err(netdev, "uninit vector error\n"); - return ret; + if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { + netdev_warn(netdev, "already uninitialized\n"); + return 0; } + hns3_free_rx_cpu_rmap(netdev); + hns3_nic_uninit_irq(priv); + hns3_clear_all_ring(handle, true); + hns3_reset_tx_queue(priv->ae_handle); + + hns3_nic_uninit_vector_data(priv); + + hns3_store_coal(priv); + + hns3_nic_dealloc_vector_data(priv); + ret = hns3_uninit_all_ring(priv); if (ret) netdev_err(netdev, "uninit ring error\n"); - hns3_uninit_mac_addr(netdev); + hns3_put_ring_config(priv); return ret; } @@ -3526,150 +4682,130 @@ static int hns3_reset_notify(struct hnae3_handle *handle, return ret; } -static void hns3_restore_coal(struct hns3_nic_priv *priv, - struct hns3_enet_coalesce *tx, - struct hns3_enet_coalesce *rx) -{ - u16 vector_num = priv->vector_num; - int i; - - for (i = 0; i < vector_num; i++) { - memcpy(&priv->tqp_vector[i].tx_group.coal, tx, - sizeof(struct hns3_enet_coalesce)); - memcpy(&priv->tqp_vector[i].rx_group.coal, rx, - sizeof(struct hns3_enet_coalesce)); - } -} - -static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num, - struct hns3_enet_coalesce *tx, - struct hns3_enet_coalesce *rx) +static int hns3_change_channels(struct hnae3_handle *handle, u32 new_tqp_num, + bool rxfh_configured) { - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = hns3_get_handle(netdev); int ret; - ret = h->ae_algo->ops->set_channels(h, new_tqp_num); - if (ret) + ret = handle->ae_algo->ops->set_channels(handle, new_tqp_num, + rxfh_configured); + if (ret) { + dev_err(&handle->pdev->dev, + "Change tqp num(%u) fail.\n", new_tqp_num); return ret; + } - ret = hns3_get_ring_config(priv); + ret = hns3_reset_notify(handle, HNAE3_INIT_CLIENT); if (ret) return ret; - ret = hns3_nic_alloc_vector_data(priv); - if (ret) - goto err_alloc_vector; - - hns3_restore_coal(priv, tx, rx); - - ret = hns3_nic_init_vector_data(priv); - if (ret) - goto err_uninit_vector; - - ret = hns3_init_all_ring(priv); + ret = hns3_reset_notify(handle, HNAE3_UP_CLIENT); if (ret) - goto err_put_ring; - - return 0; + hns3_reset_notify(handle, HNAE3_UNINIT_CLIENT); -err_put_ring: - hns3_put_ring_config(priv); -err_uninit_vector: - hns3_nic_uninit_vector_data(priv); -err_alloc_vector: - hns3_nic_dealloc_vector_data(priv); return ret; } -static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num) -{ - return (new_tqp_num / num_tc) * num_tc; -} - int hns3_set_channels(struct net_device *netdev, struct ethtool_channels *ch) { - struct hns3_nic_priv *priv = netdev_priv(netdev); struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_knic_private_info *kinfo = &h->kinfo; - struct hns3_enet_coalesce tx_coal, rx_coal; - bool if_running = netif_running(netdev); + bool rxfh_configured = netif_is_rxfh_configured(netdev); u32 new_tqp_num = ch->combined_count; u16 org_tqp_num; int ret; + if (hns3_nic_resetting(netdev)) + return -EBUSY; + if (ch->rx_count || ch->tx_count) return -EINVAL; + if (kinfo->tc_info.mqprio_active) { + dev_err(&netdev->dev, + "it's not allowed to set channels via ethtool when MQPRIO mode is on\n"); + return -EINVAL; + } + if (new_tqp_num > hns3_get_max_available_channels(h) || - new_tqp_num < kinfo->num_tc) { + new_tqp_num < 1) { dev_err(&netdev->dev, - "Change tqps fail, the tqp range is from %d to %d", - kinfo->num_tc, + "Change tqps fail, the tqp range is from 1 to %u", hns3_get_max_available_channels(h)); return -EINVAL; } - new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num); - if (kinfo->num_tqps == new_tqp_num) + if (kinfo->rss_size == new_tqp_num) return 0; - if (if_running) - hns3_nic_net_stop(netdev); - - ret = hns3_nic_uninit_vector_data(priv); - if (ret) { - dev_err(&netdev->dev, - "Unbind vector with tqp fail, nothing is changed"); - goto open_netdev; - } - - /* Changing the tqp num may also change the vector num, - * ethtool only support setting and querying one coal - * configuation for now, so save the vector 0' coal - * configuation here in order to restore it. - */ - memcpy(&tx_coal, &priv->tqp_vector[0].tx_group.coal, - sizeof(struct hns3_enet_coalesce)); - memcpy(&rx_coal, &priv->tqp_vector[0].rx_group.coal, - sizeof(struct hns3_enet_coalesce)); + if (netif_msg_ifdown(h)) + netdev_info(netdev, + "set channels: tqp_num=%u, rxfh=%d\n", + new_tqp_num, rxfh_configured); - hns3_nic_dealloc_vector_data(priv); + ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT); + if (ret) + return ret; - hns3_uninit_all_ring(priv); - hns3_put_ring_config(priv); + ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT); + if (ret) + return ret; org_tqp_num = h->kinfo.num_tqps; - ret = hns3_modify_tqp_num(netdev, new_tqp_num, &tx_coal, &rx_coal); + ret = hns3_change_channels(h, new_tqp_num, rxfh_configured); if (ret) { - ret = hns3_modify_tqp_num(netdev, org_tqp_num, - &tx_coal, &rx_coal); - if (ret) { - /* If revert to old tqp failed, fatal error occurred */ - dev_err(&netdev->dev, - "Revert to old tqp num fail, ret=%d", ret); - return ret; + int ret1; + + netdev_warn(netdev, + "Change channels fail, revert to old value\n"); + ret1 = hns3_change_channels(h, org_tqp_num, rxfh_configured); + if (ret1) { + netdev_err(netdev, + "revert to old channel fail\n"); + return ret1; } - dev_info(&netdev->dev, - "Change tqp num fail, Revert to old tqp num"); + + return ret; } -open_netdev: - if (if_running) - hns3_nic_net_open(netdev); + return 0; +} + +static const struct hns3_hw_error_info hns3_hw_err[] = { + { .type = HNAE3_PPU_POISON_ERROR, + .msg = "PPU poison" }, + { .type = HNAE3_CMDQ_ECC_ERROR, + .msg = "IMP CMDQ error" }, + { .type = HNAE3_IMP_RD_POISON_ERROR, + .msg = "IMP RD poison" }, + { .type = HNAE3_ROCEE_AXI_RESP_ERROR, + .msg = "ROCEE AXI RESP error" }, +}; - return ret; +static void hns3_process_hw_error(struct hnae3_handle *handle, + enum hnae3_hw_error_type type) +{ + u32 i; + + for (i = 0; i < ARRAY_SIZE(hns3_hw_err); i++) { + if (hns3_hw_err[i].type == type) { + dev_err(&handle->pdev->dev, "Detected %s!\n", + hns3_hw_err[i].msg); + break; + } + } } -static const struct hnae3_client_ops client_ops = { +const struct hnae3_client_ops client_ops = { .init_instance = hns3_client_init, .uninit_instance = hns3_client_uninit, .link_status_change = hns3_link_status_change, - .setup_tc = hns3_client_setup_tc, .reset_notify = hns3_reset_notify, + .process_hw_error = hns3_process_hw_error, }; +#ifndef CONFIG_IT_VALIDATION /* hns3_init_module - Driver registration routine * hns3_init_module is the first routine called when the driver is * loaded. All it does is register with the PCI subsystem. @@ -3682,24 +4818,33 @@ static int __init hns3_init_module(void) pr_info("%s: %s\n", hns3_driver_name, hns3_copyright); client.type = HNAE3_CLIENT_KNIC; - snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s", + snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH, "%s", hns3_driver_name); client.ops = &client_ops; INIT_LIST_HEAD(&client.node); + hns3_dbg_register_debugfs(hns3_driver_name); + ret = hnae3_register_client(&client); if (ret) - return ret; + goto err_reg_client; ret = pci_register_driver(&hns3_driver); if (ret) - hnae3_unregister_client(&client); + goto err_reg_driver; return ret; + +err_reg_driver: + hnae3_unregister_client(&client); +err_reg_client: + hns3_dbg_unregister_debugfs(); + return ret; } module_init(hns3_init_module); +#endif /* hns3_exit_module - Driver exit cleanup routine * hns3_exit_module is called just before the driver is removed @@ -3709,6 +4854,7 @@ static void __exit hns3_exit_module(void) { pci_unregister_driver(&hns3_driver); hnae3_unregister_client(&client); + hns3_dbg_unregister_debugfs(); } module_exit(hns3_exit_module); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index cb450d7ec8c1665a9ae690a3e3a9b78843976cf4..1cd9c0c34a766f23112655b862fce5b0448e7dcb 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -8,14 +8,14 @@ #include "hnae3.h" -#define HNS3_MOD_VERSION "1.0" +#define HNS3_MOD_VERSION "24.3.1" -extern const char hns3_driver_version[]; +extern char hns3_driver_version[]; enum hns3_nic_state { HNS3_NIC_STATE_TESTING, HNS3_NIC_STATE_RESETTING, - HNS3_NIC_STATE_REINITING, + HNS3_NIC_STATE_INITED, HNS3_NIC_STATE_DOWN, HNS3_NIC_STATE_DISABLED, HNS3_NIC_STATE_REMOVING, @@ -42,12 +42,14 @@ enum hns3_nic_state { #define HNS3_RING_TX_RING_HEAD_REG 0x0005C #define HNS3_RING_TX_RING_FBDNUM_REG 0x00060 #define HNS3_RING_TX_RING_OFFSET_REG 0x00064 +#define HNS3_RING_TX_RING_EBDNUM_REG 0x00068 #define HNS3_RING_TX_RING_PKTNUM_RECORD_REG 0x0006C - +#define HNS3_RING_TX_RING_EBD_OFFSET_REG 0x00070 +#define HNS3_RING_TX_RING_BD_ERR_REG 0x00074 #define HNS3_RING_PREFETCH_EN_REG 0x0007C #define HNS3_RING_CFG_VF_NUM_REG 0x00080 #define HNS3_RING_ASID_REG 0x0008C -#define HNS3_RING_RX_VM_REG 0x00090 +#define HNS3_RING_EN_REG 0x00090 #define HNS3_RING_T0_BE_RST 0x00094 #define HNS3_RING_COULD_BE_RST 0x00098 #define HNS3_RING_WRR_WEIGHT_REG 0x0009c @@ -73,10 +75,13 @@ enum hns3_nic_state { #define HNS3_TX_TIMEOUT (5 * HZ) #define HNS3_RING_NAME_LEN 16 #define HNS3_BUFFER_SIZE_2048 2048 -#define HNS3_RING_MAX_PENDING 32768 -#define HNS3_RING_MIN_PENDING 8 +#define HNS3_RING_MAX_PENDING 32760 +#define HNS3_RING_MIN_PENDING 72 #define HNS3_RING_BD_MULTIPLE 8 -#define HNS3_MAX_MTU 9728 +/* max frame size of mac */ +#define HNS3_MAC_MAX_FRAME 9728 +#define HNS3_MAX_MTU \ + (HNS3_MAC_MAX_FRAME - (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN)) #define HNS3_BD_SIZE_512_TYPE 0 #define HNS3_BD_SIZE_1024_TYPE 1 @@ -109,6 +114,10 @@ enum hns3_nic_state { #define HNS3_RXD_DOI_B 21 #define HNS3_RXD_OL3E_B 22 #define HNS3_RXD_OL4E_B 23 +#define HNS3_RXD_GRO_COUNT_S 24 +#define HNS3_RXD_GRO_COUNT_M (0x3f << HNS3_RXD_GRO_COUNT_S) +#define HNS3_RXD_GRO_FIXID_B 30 +#define HNS3_RXD_GRO_ECN_B 31 #define HNS3_RXD_ODMAC_S 0 #define HNS3_RXD_ODMAC_M (0x3 << HNS3_RXD_ODMAC_S) @@ -135,9 +144,8 @@ enum hns3_nic_state { #define HNS3_RXD_TSIND_S 12 #define HNS3_RXD_TSIND_M (0x7 << HNS3_RXD_TSIND_S) #define HNS3_RXD_LKBK_B 15 -#define HNS3_RXD_HDL_S 16 -#define HNS3_RXD_HDL_M (0x7ff << HNS3_RXD_HDL_S) -#define HNS3_RXD_HSIND_B 31 +#define HNS3_RXD_GRO_SIZE_S 16 +#define HNS3_RXD_GRO_SIZE_M (0x3fff << HNS3_RXD_GRO_SIZE_S) #define HNS3_TXD_L3T_S 0 #define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S) @@ -185,8 +193,13 @@ enum hns3_nic_state { #define HNS3_VECTOR_INITED 1 #define HNS3_MAX_BD_SIZE 65535 -#define HNS3_MAX_BD_PER_FRAG 8 -#define HNS3_MAX_BD_PER_PKT MAX_SKB_FRAGS +#define HNS3_MAX_NON_TSO_BD_NUM 8U +#define HNS3_MAX_TSO_BD_NUM 63U +#define HNS3_MAX_TSO_SIZE \ + (HNS3_MAX_BD_SIZE * HNS3_MAX_TSO_BD_NUM) + +#define HNS3_MAX_NON_TSO_SIZE \ + (HNS3_MAX_BD_SIZE * HNS3_MAX_NON_TSO_BD_NUM) #define HNS3_VECTOR_GL0_OFFSET 0x100 #define HNS3_VECTOR_GL1_OFFSET 0x200 @@ -194,6 +207,15 @@ enum hns3_nic_state { #define HNS3_VECTOR_RL_OFFSET 0x900 #define HNS3_VECTOR_RL_EN_B 6 +#define HNS3_RING_EN_B 0 + +enum hns3_pkt_l2t_type { + HNS3_L2_TYPE_UNICAST, + HNS3_L2_TYPE_MULTICAST, + HNS3_L2_TYPE_BROADCAST, + HNS3_L2_TYPE_INVALID, +}; + enum hns3_pkt_l3t_type { HNS3_L3T_NONE, HNS3_L3T_IPV6, @@ -282,15 +304,21 @@ struct hns3_desc_cb { dma_addr_t dma; /* dma address of this desc */ void *buf; /* cpu addr for a desc */ - /* priv data for the desc, e.g. skb when use with ip stack*/ + /* priv data for the desc, e.g. skb when use with ip stack */ void *priv; - u32 page_offset; + + union { + u32 page_offset; /* for rx */ + u32 send_bytes; /* for tx */ + }; + u32 length; /* length of the buffer */ u16 reuse_flag; - /* desc type, used by the ring user to mark the type of the priv data */ + /* desc type, used by the ring user to mark the type of the priv data */ u16 type; + u16 pagecnt_bias; }; enum hns3_pkt_l3type { @@ -305,11 +333,11 @@ enum hns3_pkt_l3type { HNS3_L3_TYPE_MAC_PAUSE, HNS3_L3_TYPE_PFC_PAUSE,/* 0x9*/ - /* reserved for 0xA~0xB*/ + /* reserved for 0xA~0xB */ HNS3_L3_TYPE_CNM = 0xc, - /* reserved for 0xD~0xE*/ + /* reserved for 0xD~0xE */ HNS3_L3_TYPE_PARSE_FAIL = 0xf /* must be last */ }; @@ -334,7 +362,7 @@ enum hns3_pkt_ol3type { HNS3_OL3_TYPE_IPV4_OPT = 4, HNS3_OL3_TYPE_IPV6_EXT, - /* reserved for 0x6~0xE*/ + /* reserved for 0x6~0xE */ HNS3_OL3_TYPE_PARSE_FAIL = 0xf /* must be last */ }; @@ -347,16 +375,22 @@ enum hns3_pkt_ol4type { }; struct ring_stats { - u64 io_err_cnt; u64 sw_err_cnt; u64 seg_pkt_cnt; union { struct { u64 tx_pkts; u64 tx_bytes; - u64 tx_err_cnt; + u64 tx_more; u64 restart_queue; u64 tx_busy; + u64 tx_copy; + u64 tx_vlan_err; + u64 tx_l4_proto_err; + u64 tx_l2l3l4_err; + u64 tx_tso_err; + u64 over_max_recursion; + u64 hw_limitation; }; struct { u64 rx_pkts; @@ -364,10 +398,11 @@ struct ring_stats { u64 rx_err_cnt; u64 reuse_pg_cnt; u64 err_pkt_len; - u64 non_vld_descs; u64 err_bd_num; u64 l2_err; u64 l3l4_csum_err; + u64 rx_multicast; + u64 non_reuse_pg; }; }; }; @@ -379,7 +414,7 @@ struct hns3_enet_ring { struct hns3_enet_ring *next; struct hns3_enet_tqp_vector *tqp_vector; struct hnae3_queue *tqp; - char ring_name[HNS3_RING_NAME_LEN]; + int queue_index; struct device *dev; /* will be used for DMA mapping of descriptors */ /* statistic */ @@ -389,42 +424,25 @@ struct hns3_enet_ring { dma_addr_t desc_dma_addr; u32 buf_size; /* size for hnae_desc->addr, preset by AE */ u16 desc_num; /* total number of desc */ - u16 max_desc_num_per_pkt; - u16 max_raw_data_sz_per_desc; - u16 max_pkt_size; int next_to_use; /* idx of next spare desc */ /* idx of lastest sent desc, the ring is empty when equal to * next_to_use */ int next_to_clean; + union { + int last_to_use; /* last idx used by xmit */ + u32 pull_len; /* memcpy len for current rx packet */ + }; + u32 frag_num; + void *va; /* first buffer address for current packet */ u32 flag; /* ring attribute */ - int irq_init_flag; - - int numa_node; - cpumask_t affinity_mask; -}; - -struct hns_queue; - -struct hns3_nic_ring_data { - struct hns3_enet_ring *ring; - struct napi_struct napi; - int queue_index; - int (*poll_one)(struct hns3_nic_ring_data *, int, void *); - void (*ex_process)(struct hns3_nic_ring_data *, struct sk_buff *); - void (*fini_process)(struct hns3_nic_ring_data *); -}; -struct hns3_nic_ops { - int (*fill_desc)(struct hns3_enet_ring *ring, void *priv, - int size, dma_addr_t dma, int frag_end, - enum hns_desc_type type); - int (*maybe_stop_tx)(struct sk_buff **out_skb, - int *bnum, struct hns3_enet_ring *ring); - void (*get_rxd_bnum)(u32 bnum_flag, int *out_bnum); -}; + int pending_buf; + struct sk_buff *skb; + struct sk_buff *tail_skb; +} ____cacheline_internodealigned_in_smp; enum hns3_flow_level_range { HNS3_FLOW_LOW = 0, @@ -433,25 +451,6 @@ enum hns3_flow_level_range { HNS3_FLOW_ULTRA = 3, }; -enum hns3_link_mode_bits { - HNS3_LM_FIBRE_BIT = BIT(0), - HNS3_LM_AUTONEG_BIT = BIT(1), - HNS3_LM_TP_BIT = BIT(2), - HNS3_LM_PAUSE_BIT = BIT(3), - HNS3_LM_BACKPLANE_BIT = BIT(4), - HNS3_LM_10BASET_HALF_BIT = BIT(5), - HNS3_LM_10BASET_FULL_BIT = BIT(6), - HNS3_LM_100BASET_HALF_BIT = BIT(7), - HNS3_LM_100BASET_FULL_BIT = BIT(8), - HNS3_LM_1000BASET_FULL_BIT = BIT(9), - HNS3_LM_10000BASEKR_FULL_BIT = BIT(10), - HNS3_LM_25000BASEKR_FULL_BIT = BIT(11), - HNS3_LM_40000BASELR4_FULL_BIT = BIT(12), - HNS3_LM_50000BASEKR2_FULL_BIT = BIT(13), - HNS3_LM_100000BASEKR4_FULL_BIT = BIT(14), - HNS3_LM_COUNT = 15 -}; - #define HNS3_INT_GL_MAX 0x1FE0 #define HNS3_INT_GL_50K 0x0014 #define HNS3_INT_GL_20K 0x0032 @@ -461,8 +460,6 @@ enum hns3_link_mode_bits { #define HNS3_INT_RL_MAX 0x00EC #define HNS3_INT_RL_ENABLE_MASK 0x40 -#define HNS3_INT_ADAPT_DOWN_START 100 - struct hns3_enet_coalesce { u16 int_gl; u8 gl_adapt_enable; @@ -491,12 +488,12 @@ struct hns3_enet_tqp_vector { struct hns3_enet_ring_group rx_group; struct hns3_enet_ring_group tx_group; + cpumask_t affinity_mask; u16 num_tqps; /* total number of tqps in TQP vector */ + struct irq_affinity_notify affinity_notify; char name[HNAE3_INT_NAME_LEN]; - /* when 0 should adjust interrupt coalesce parameter */ - u8 int_adapt_down; unsigned long last_jiffies; } ____cacheline_internodealigned_in_smp; @@ -517,13 +514,12 @@ struct hns3_nic_priv { u32 port_id; struct net_device *netdev; struct device *dev; - struct hns3_nic_ops ops; /** * the cb for nic to manage the ring buffer, the first half of the * array is for tx_ring and vice versa for the second half */ - struct hns3_nic_ring_data *ring_data; + struct hns3_enet_ring *ring; struct hns3_enet_tqp_vector *tqp_vector; u16 vector_num; @@ -540,7 +536,8 @@ struct hns3_nic_priv { struct notifier_block notifier_block; /* Vxlan/Geneve information */ struct hns3_udp_tunnel udp_tnl[HNS3_UDP_TNL_MAX]; - unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + struct hns3_enet_coalesce tx_coal; + struct hns3_enet_coalesce rx_coal; }; union l3_hdr_info { @@ -552,26 +549,30 @@ union l3_hdr_info { union l4_hdr_info { struct tcphdr *tcp; struct udphdr *udp; + struct gre_base_hdr *gre; unsigned char *hdr; }; -/* the distance between [begin, end) in a ring buffer - * note: there is a unuse slot between the begin and the end - */ -static inline int ring_dist(struct hns3_enet_ring *ring, int begin, int end) -{ - return (end - begin + ring->desc_num) % ring->desc_num; -} +struct hns3_hw_error_info { + enum hnae3_hw_error_type type; + const char *msg; +}; static inline int ring_space(struct hns3_enet_ring *ring) { - return ring->desc_num - - ring_dist(ring, ring->next_to_clean, ring->next_to_use) - 1; + /* This smp_load_acquire() pairs with smp_store_release() in + * hns3_nic_reclaim_one_desc called by hns3_clean_tx_ring. + */ + int begin = smp_load_acquire(&ring->next_to_clean); + int end = READ_ONCE(ring->next_to_use); + + return ((end >= begin) ? (ring->desc_num - end + begin) : + (begin - end)) - 1; } -static inline int is_ring_empty(struct hns3_enet_ring *ring) +static inline u32 hns3_read_reg(void __iomem *base, u32 reg) { - return ring->next_to_use == ring->next_to_clean; + return readl(base + reg); } static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) @@ -581,22 +582,38 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) writel(value, reg_addr + reg); } +#define hns3_read_dev(a, reg) \ + hns3_read_reg((a)->io_base, (reg)) + +static inline bool hns3_nic_resetting(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + + return test_bit(HNS3_NIC_STATE_RESETTING, &priv->state); +} + #define hns3_write_dev(a, reg, value) \ hns3_write_reg((a)->io_base, (reg), (value)) -#define hnae3_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \ - (tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG) +#define ring_to_dev(ring) ((ring)->dev) -#define ring_to_dev(ring) (&(ring)->tqp->handle->pdev->dev) +#define ring_to_netdev(ring) ((ring)->tqp_vector->napi.dev) #define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \ DMA_TO_DEVICE : DMA_FROM_DEVICE) -#define tx_ring_data(priv, idx) ((priv)->ring_data[idx]) +#define hns3_buf_size(_ring) ((_ring)->buf_size) -#define hnae3_buf_size(_ring) ((_ring)->buf_size) -#define hnae3_page_order(_ring) (get_order(hnae3_buf_size(_ring))) -#define hnae3_page_size(_ring) (PAGE_SIZE << hnae3_page_order(_ring)) +static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring->buf_size > (PAGE_SIZE / 2)) + return 1; +#endif + return 0; +} + +#define hns3_page_size(_ring) (PAGE_SIZE << hns3_page_order(_ring)) /* iterator for handling rings in ring group */ #define hns3_for_each_ring(pos, head) \ @@ -615,11 +632,13 @@ void hns3_ethtool_set_ops(struct net_device *netdev); int hns3_set_channels(struct net_device *netdev, struct ethtool_channels *ch); -bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget); +void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget); int hns3_init_all_ring(struct hns3_nic_priv *priv); int hns3_uninit_all_ring(struct hns3_nic_priv *priv); int hns3_nic_reset_all_ring(struct hnae3_handle *h); +void hns3_fini_ring(struct hns3_enet_ring *ring); netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev); +bool hns3_is_phys_func(struct pci_dev *pdev); int hns3_clean_rx_ring( struct hns3_enet_ring *ring, int budget, void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)); @@ -631,10 +650,18 @@ void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector, void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, u32 rl_value); +void hns3_request_update_promisc_mode(struct hnae3_handle *handle); + #ifdef CONFIG_HNS3_DCB void hns3_dcbnl_setup(struct hnae3_handle *handle); #else static inline void hns3_dcbnl_setup(struct hnae3_handle *handle) {} #endif - +bool hns3_get_tx_timeo_queue_info(struct net_device *ndev); +int hns3_dbg_init(struct hnae3_handle *handle); +void hns3_dbg_uninit(struct hnae3_handle *handle); +void hns3_dbg_register_debugfs(const char *debugfs_dir_name); +void hns3_dbg_unregister_debugfs(void); +void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size); +u16 hns3_get_max_available_channels(struct hnae3_handle *h); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index f70ee6910ee27a436575734b068a2ee3599a78ae..be771c75e37e3ffa49b9ed952e28274a8a6eea8f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -4,7 +4,7 @@ #include #include #include - +#include "kcompat.h" #include "hns3_enet.h" struct hns3_stats { @@ -12,6 +12,16 @@ struct hns3_stats { int stats_offset; }; +#define HNS3_MODULE_TYPE_QSFP 0x0C +#define HNS3_MODULE_TYPE_QSFP_P 0x0D +#define HNS3_MODULE_TYPE_QSFP_28 0x11 +#define HNS3_MODULE_TYPE_SFP 0x03 + +struct hns3_sfp_type { + u8 type; + u8 ext_type; +}; + /* tqp related stats */ #define HNS3_TQP_STAT(_string, _member) { \ .stats_string = _string, \ @@ -21,53 +31,55 @@ struct hns3_stats { static const struct hns3_stats hns3_txq_stats[] = { /* Tx per-queue statistics */ - HNS3_TQP_STAT("io_err_cnt", io_err_cnt), - HNS3_TQP_STAT("tx_dropped", sw_err_cnt), + HNS3_TQP_STAT("dropped", sw_err_cnt), HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt), HNS3_TQP_STAT("packets", tx_pkts), HNS3_TQP_STAT("bytes", tx_bytes), - HNS3_TQP_STAT("errors", tx_err_cnt), - HNS3_TQP_STAT("tx_wake", restart_queue), - HNS3_TQP_STAT("tx_busy", tx_busy), + HNS3_TQP_STAT("more", tx_more), + HNS3_TQP_STAT("wake", restart_queue), + HNS3_TQP_STAT("busy", tx_busy), + HNS3_TQP_STAT("copy", tx_copy), + HNS3_TQP_STAT("vlan_err", tx_vlan_err), + HNS3_TQP_STAT("l4_proto_err", tx_l4_proto_err), + HNS3_TQP_STAT("l2l3l4_err", tx_l2l3l4_err), + HNS3_TQP_STAT("tso_err", tx_tso_err), + HNS3_TQP_STAT("over_max_recursion", over_max_recursion), + HNS3_TQP_STAT("hw_limitation", hw_limitation), }; #define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats) static const struct hns3_stats hns3_rxq_stats[] = { /* Rx per-queue statistics */ - HNS3_TQP_STAT("io_err_cnt", io_err_cnt), - HNS3_TQP_STAT("rx_dropped", sw_err_cnt), + HNS3_TQP_STAT("dropped", sw_err_cnt), HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt), HNS3_TQP_STAT("packets", rx_pkts), HNS3_TQP_STAT("bytes", rx_bytes), HNS3_TQP_STAT("errors", rx_err_cnt), HNS3_TQP_STAT("reuse_pg_cnt", reuse_pg_cnt), HNS3_TQP_STAT("err_pkt_len", err_pkt_len), - HNS3_TQP_STAT("non_vld_descs", non_vld_descs), HNS3_TQP_STAT("err_bd_num", err_bd_num), HNS3_TQP_STAT("l2_err", l2_err), HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err), + HNS3_TQP_STAT("multicast", rx_multicast), + HNS3_TQP_STAT("non_reuse_pg", non_reuse_pg), }; #define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats) #define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT) -#define HNS3_SELF_TEST_TYPE_NUM 2 +#define HNS3_SELF_TEST_TYPE_NUM 4 #define HNS3_NIC_LB_TEST_PKT_NUM 1 #define HNS3_NIC_LB_TEST_RING_ID 0 #define HNS3_NIC_LB_TEST_PACKET_SIZE 128 +#define HNS3_NIC_LB_SETUP_USEC 10000 /* Nic loopback test err */ #define HNS3_NIC_LB_TEST_NO_MEM_ERR 1 #define HNS3_NIC_LB_TEST_TX_CNT_ERR 2 #define HNS3_NIC_LB_TEST_RX_CNT_ERR 3 -struct hns3_link_mode_mapping { - u32 hns3_link_mode; - u32 ethtool_link_mode; -}; - static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en) { struct hnae3_handle *h = hns3_get_handle(ndev); @@ -78,8 +90,10 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en) return -EOPNOTSUPP; switch (loop) { - case HNAE3_MAC_INTER_LOOP_SERDES: - case HNAE3_MAC_INTER_LOOP_MAC: + case HNAE3_LOOP_SERIAL_SERDES: + case HNAE3_LOOP_PARALLEL_SERDES: + case HNAE3_LOOP_APP: + case HNAE3_LOOP_PHY: ret = h->ae_algo->ops->set_loopback(h, loop, en); break; default: @@ -87,10 +101,14 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en) break; } - if (ret) + if (ret || h->pdev->revision >= 0x21) return ret; - h->ae_algo->ops->set_promisc_mode(h, en, en); + if (en) + h->ae_algo->ops->set_promisc_mode(h, true, true); + else + /* recover promisc mode before loopback test */ + hns3_request_update_promisc_mode(h); return ret; } @@ -100,58 +118,60 @@ static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode) struct hnae3_handle *h = hns3_get_handle(ndev); int ret; - if (!h->ae_algo->ops->start) - return -EOPNOTSUPP; - ret = hns3_nic_reset_all_ring(h); if (ret) return ret; - ret = h->ae_algo->ops->start(h); - if (ret) { - netdev_err(ndev, - "hns3_lb_up ae start return error: %d\n", ret); - return ret; - } - ret = hns3_lp_setup(ndev, loop_mode, true); - usleep_range(10000, 20000); + usleep_range(HNS3_NIC_LB_SETUP_USEC, HNS3_NIC_LB_SETUP_USEC * 2); return ret; } static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode) { - struct hnae3_handle *h = hns3_get_handle(ndev); int ret; - if (!h->ae_algo->ops->stop) - return -EOPNOTSUPP; - ret = hns3_lp_setup(ndev, loop_mode, false); if (ret) { netdev_err(ndev, "lb_setup return error: %d\n", ret); return ret; } - h->ae_algo->ops->stop(h); - usleep_range(10000, 20000); + usleep_range(HNS3_NIC_LB_SETUP_USEC, HNS3_NIC_LB_SETUP_USEC * 2); return 0; } static void hns3_lp_setup_skb(struct sk_buff *skb) { +#define HNS3_NIC_LB_DST_MAC_ADDR 0x1f + struct net_device *ndev = skb->dev; + struct hnae3_handle *handle; unsigned char *packet; struct ethhdr *ethh; unsigned int i; skb_reserve(skb, NET_IP_ALIGN); + +#ifdef SKB_PUT_RETURN_VOID_POINT ethh = skb_put(skb, sizeof(struct ethhdr)); +#else + ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr)); +#endif packet = skb_put(skb, HNS3_NIC_LB_TEST_PACKET_SIZE); memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN); + + /* The dst mac addr of loopback packet is the same as the host' + * mac addr, the SSU component may loop back the packet to host + * before the packet reaches mac or serdes, which will defect + * the purpose of mac or serdes selftest. + */ + handle = hns3_get_handle(ndev); + if (handle->pdev->revision == 0x20) + ethh->h_dest[5] += HNS3_NIC_LB_DST_MAC_ADDR; eth_zero_addr(ethh->h_source); ethh->h_proto = htons(ETH_P_ARP); skb_reset_mac_header(skb); @@ -165,18 +185,21 @@ static void hns3_lb_check_skb_data(struct hns3_enet_ring *ring, { struct hns3_enet_tqp_vector *tqp_vector = ring->tqp_vector; unsigned char *packet = skb->data; + u32 len = skb_headlen(skb); u32 i; - for (i = 0; i < skb->len; i++) + len = min_t(u32, len, HNS3_NIC_LB_TEST_PACKET_SIZE); + + for (i = 0; i < len; i++) if (packet[i] != (unsigned char)(i & 0xff)) break; /* The packet is correctly received */ - if (i == skb->len) + if (i == len) tqp_vector->rx_group.total_packets++; else print_hex_dump(KERN_ERR, "selftest:", DUMP_PREFIX_OFFSET, 16, 1, - skb->data, skb->len, true); + skb->data, len, true); dev_kfree_skb_any(skb); } @@ -189,7 +212,7 @@ static u32 hns3_lb_check_rx_ring(struct hns3_nic_priv *priv, u32 budget) kinfo = &h->kinfo; for (i = kinfo->num_tqps; i < kinfo->num_tqps * 2; i++) { - struct hns3_enet_ring *ring = priv->ring_data[i].ring; + struct hns3_enet_ring *ring = &priv->ring[i]; struct hns3_enet_ring_group *rx_group; u64 pre_rx_pkt; @@ -212,9 +235,9 @@ static void hns3_lb_clear_tx_ring(struct hns3_nic_priv *priv, u32 start_ringid, u32 i; for (i = start_ringid; i <= end_ringid; i++) { - struct hns3_enet_ring *ring = priv->ring_data[i].ring; + struct hns3_enet_ring *ring = &priv->ring[i]; - hns3_clean_tx_ring(ring, budget); + hns3_clean_tx_ring(ring, 0); } } @@ -245,11 +268,13 @@ static int hns3_lp_run_test(struct net_device *ndev, enum hnae3_loop mode) skb_get(skb); tx_ret = hns3_nic_net_xmit(skb, ndev); - if (tx_ret == NETDEV_TX_OK) + if (tx_ret == NETDEV_TX_OK) { good_cnt++; - else + } else { + kfree_skb(skb); netdev_err(ndev, "hns3_lb_run_test xmit failed: %d\n", tx_ret); + } } if (good_cnt != HNS3_NIC_LB_TEST_PKT_NUM) { ret_val = HNS3_NIC_LB_TEST_TX_CNT_ERR; @@ -296,20 +321,37 @@ static void hns3_self_test(struct net_device *ndev, int test_index = 0; u32 i; + if (hns3_nic_resetting(ndev)) { + netdev_err(ndev, "dev resetting!"); + return; + } + /* Only do offline selftest, or pass by default */ if (eth_test->flags != ETH_TEST_FL_OFFLINE) return; - st_param[HNAE3_MAC_INTER_LOOP_MAC][0] = HNAE3_MAC_INTER_LOOP_MAC; - st_param[HNAE3_MAC_INTER_LOOP_MAC][1] = - h->flags & HNAE3_SUPPORT_MAC_LOOPBACK; + if (netif_msg_ifdown(h)) + netdev_info(ndev, "self test start\n"); + + st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP; + st_param[HNAE3_LOOP_APP][1] = + h->flags & HNAE3_SUPPORT_APP_LOOPBACK; + + st_param[HNAE3_LOOP_SERIAL_SERDES][0] = HNAE3_LOOP_SERIAL_SERDES; + st_param[HNAE3_LOOP_SERIAL_SERDES][1] = + h->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; - st_param[HNAE3_MAC_INTER_LOOP_SERDES][0] = HNAE3_MAC_INTER_LOOP_SERDES; - st_param[HNAE3_MAC_INTER_LOOP_SERDES][1] = - h->flags & HNAE3_SUPPORT_SERDES_LOOPBACK; + st_param[HNAE3_LOOP_PARALLEL_SERDES][0] = + HNAE3_LOOP_PARALLEL_SERDES; + st_param[HNAE3_LOOP_PARALLEL_SERDES][1] = + h->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; + + st_param[HNAE3_LOOP_PHY][0] = HNAE3_LOOP_PHY; + st_param[HNAE3_LOOP_PHY][1] = + h->flags & HNAE3_SUPPORT_PHY_LOOPBACK; if (if_running) - dev_close(ndev); + ndev->netdev_ops->ndo_stop(ndev); #if IS_ENABLED(CONFIG_VLAN_8021Q) /* Disable the vlan filter for selftest does not support it */ @@ -319,6 +361,13 @@ static void hns3_self_test(struct net_device *ndev, h->ae_algo->ops->enable_vlan_filter(h, false); #endif + /* Tell firmware to stop mac autoneg before loopback test start, + * otherwise loopback test may be failed when the port is still + * negotiating. + */ + if (h->ae_algo->ops->halt_autoneg) + h->ae_algo->ops->halt_autoneg(h, true); + set_bit(HNS3_NIC_STATE_TESTING, &priv->state); for (i = 0; i < HNS3_SELF_TEST_TYPE_NUM; i++) { @@ -328,10 +377,10 @@ static void hns3_self_test(struct net_device *ndev, continue; data[test_index] = hns3_lp_up(ndev, loop_type); - if (!data[test_index]) { + if (!data[test_index]) data[test_index] = hns3_lp_run_test(ndev, loop_type); - hns3_lp_down(ndev, loop_type); - } + + hns3_lp_down(ndev, loop_type); if (data[test_index]) eth_test->flags |= ETH_TEST_FL_FAILED; @@ -341,13 +390,19 @@ static void hns3_self_test(struct net_device *ndev, clear_bit(HNS3_NIC_STATE_TESTING, &priv->state); + if (h->ae_algo->ops->halt_autoneg) + h->ae_algo->ops->halt_autoneg(h, false); + #if IS_ENABLED(CONFIG_VLAN_8021Q) if (dis_vlan_filter) h->ae_algo->ops->enable_vlan_filter(h, true); #endif if (if_running) - dev_open(ndev); + ndev->netdev_ops->ndo_open(ndev); + + if (netif_msg_ifdown(h)) + netdev_info(ndev, "self test end\n"); } static int hns3_get_sset_count(struct net_device *netdev, int stringset) @@ -365,9 +420,10 @@ static int hns3_get_sset_count(struct net_device *netdev, int stringset) case ETH_SS_TEST: return ops->get_sset_count(h, stringset); - } - return 0; + default: + return -EOPNOTSUPP; + } } static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats, @@ -383,9 +439,8 @@ static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats, data[ETH_GSTRING_LEN - 1] = '\0'; /* first, prepend the prefix string */ - n1 = snprintf(data, MAX_PREFIX_SIZE, "%s#%d_", - prefix, i); - n1 = min_t(uint, n1, MAX_PREFIX_SIZE - 1); + n1 = scnprintf(data, MAX_PREFIX_SIZE, "%s%d_", + prefix, i); size_left = (ETH_GSTRING_LEN - 1) - n1; /* now, concatenate the stats string to it */ @@ -426,11 +481,13 @@ static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data) switch (stringset) { case ETH_SS_STATS: buff = hns3_get_strings_tqps(h, buff); - h->ae_algo->ops->get_strings(h, stringset, (u8 *)buff); + ops->get_strings(h, stringset, (u8 *)buff); break; case ETH_SS_TEST: ops->get_strings(h, stringset, data); break; + default: + break; } } @@ -444,7 +501,7 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data) /* get stats for Tx */ for (i = 0; i < kinfo->num_tqps; i++) { - ring = nic_priv->ring_data[i].ring; + ring = &nic_priv->ring[i]; for (j = 0; j < HNS3_TXQ_STATS_COUNT; j++) { stat = (u8 *)ring + hns3_txq_stats[j].stats_offset; *data++ = *(u64 *)stat; @@ -453,7 +510,7 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data) /* get stats for Rx */ for (i = 0; i < kinfo->num_tqps; i++) { - ring = nic_priv->ring_data[i + kinfo->num_tqps].ring; + ring = &nic_priv->ring[i + kinfo->num_tqps]; for (j = 0; j < HNS3_RXQ_STATS_COUNT; j++) { stat = (u8 *)ring + hns3_rxq_stats[j].stats_offset; *data++ = *(u64 *)stat; @@ -474,6 +531,11 @@ static void hns3_get_stats(struct net_device *netdev, struct hnae3_handle *h = hns3_get_handle(netdev); u64 *p = data; + if (hns3_nic_resetting(netdev)) { + netdev_err(netdev, "dev resetting, could not get stats\n"); + return; + } + if (!h->ae_algo->ops->get_stats || !h->ae_algo->ops->update_stats) { netdev_err(netdev, "could not get any statistics\n"); return; @@ -493,6 +555,12 @@ static void hns3_get_drvinfo(struct net_device *netdev, { struct hns3_nic_priv *priv = netdev_priv(netdev); struct hnae3_handle *h = priv->ae_handle; + u32 fw_version; + + if (!h->ae_algo->ops->get_fw_version) { + netdev_err(netdev, "could not get fw version!\n"); + return; + } strncpy(drvinfo->version, hns3_driver_version, sizeof(drvinfo->version)); @@ -506,15 +574,25 @@ static void hns3_get_drvinfo(struct net_device *netdev, sizeof(drvinfo->bus_info)); drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0'; - snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x", - priv->ae_handle->ae_algo->ops->get_fw_version(h)); + fw_version = priv->ae_handle->ae_algo->ops->get_fw_version(h); + + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%lu.%lu.%lu.%lu", + hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE3_MASK, + HNAE3_FW_VERSION_BYTE3_SHIFT), + hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE2_MASK, + HNAE3_FW_VERSION_BYTE2_SHIFT), + hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE1_MASK, + HNAE3_FW_VERSION_BYTE1_SHIFT), + hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE0_MASK, + HNAE3_FW_VERSION_BYTE0_SHIFT)); } static u32 hns3_get_link(struct net_device *netdev) { struct hnae3_handle *h = hns3_get_handle(netdev); - if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_status) + if (h->ae_algo->ops->get_status) return h->ae_algo->ops->get_status(h); else return 0; @@ -527,11 +605,16 @@ static void hns3_get_ringparam(struct net_device *netdev, struct hnae3_handle *h = priv->ae_handle; int queue_num = h->kinfo.num_tqps; + if (hns3_nic_resetting(netdev)) { + netdev_err(netdev, "dev resetting!"); + return; + } + param->tx_max_pending = HNS3_RING_MAX_PENDING; param->rx_max_pending = HNS3_RING_MAX_PENDING; - param->tx_pending = priv->ring_data[0].ring->desc_num; - param->rx_pending = priv->ring_data[queue_num].ring->desc_num; + param->tx_pending = priv->ring[0].desc_num; + param->rx_pending = priv->ring[queue_num].desc_num; } static void hns3_get_pauseparam(struct net_device *netdev, @@ -539,7 +622,7 @@ static void hns3_get_pauseparam(struct net_device *netdev, { struct hnae3_handle *h = hns3_get_handle(netdev); - if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_pauseparam) + if (h->ae_algo->ops->get_pauseparam) h->ae_algo->ops->get_pauseparam(h, ¶m->autoneg, ¶m->rx_pause, ¶m->tx_pause); } @@ -549,6 +632,11 @@ static int hns3_set_pauseparam(struct net_device *netdev, { struct hnae3_handle *h = hns3_get_handle(netdev); + if (netif_msg_ifdown(h)) + netdev_info(netdev, + "set pauseparam: autoneg=%u, rx:%u, tx:%u\n", + param->autoneg, param->rx_pause, param->tx_pause); + if (h->ae_algo->ops->set_pauseparam) return h->ae_algo->ops->set_pauseparam(h, param->autoneg, param->rx_pause, @@ -556,30 +644,77 @@ static int hns3_set_pauseparam(struct net_device *netdev, return -EOPNOTSUPP; } +static void hns3_get_ksettings(struct hnae3_handle *h, + struct ethtool_link_ksettings *cmd) +{ + const struct hnae3_ae_ops *ops = h->ae_algo->ops; + + /* 1.auto_neg & speed & duplex from cmd */ + if (ops->get_ksettings_an_result) + ops->get_ksettings_an_result(h, + &cmd->base.autoneg, + &cmd->base.speed, + &cmd->base.duplex); + + /* 2.get link mode */ + if (ops->get_link_mode) + ops->get_link_mode(h, + cmd->link_modes.supported, + cmd->link_modes.advertising); + + /* 3.mdix_ctrl&mdix get from phy reg */ + if (ops->get_mdix_mode) + ops->get_mdix_mode(h, &cmd->base.eth_tp_mdix_ctrl, + &cmd->base.eth_tp_mdix); +} + static int hns3_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct hnae3_handle *h = hns3_get_handle(netdev); - u32 flowctrl_adv = 0; + const struct hnae3_ae_ops *ops; + u8 module_type; + u8 media_type; u8 link_stat; - if (!h->ae_algo || !h->ae_algo->ops) + ops = h->ae_algo->ops; + if (ops->get_media_type) + ops->get_media_type(h, &media_type, &module_type); + else return -EOPNOTSUPP; - /* 1.auto_neg & speed & duplex from cmd */ - if (netdev->phydev) { - phy_ethtool_ksettings_get(netdev->phydev, cmd); + switch (media_type) { + case HNAE3_MEDIA_TYPE_NONE: + cmd->base.port = PORT_NONE; + hns3_get_ksettings(h, cmd); + break; + case HNAE3_MEDIA_TYPE_FIBER: + if (module_type == HNAE3_MODULE_TYPE_CR) + cmd->base.port = PORT_DA; + else + cmd->base.port = PORT_FIBRE; + hns3_get_ksettings(h, cmd); + break; + case HNAE3_MEDIA_TYPE_BACKPLANE: + cmd->base.port = PORT_NONE; + hns3_get_ksettings(h, cmd); + break; + case HNAE3_MEDIA_TYPE_COPPER: + cmd->base.port = PORT_TP; + if (!netdev->phydev) + hns3_get_ksettings(h, cmd); + else + phy_ethtool_ksettings_get(netdev->phydev, cmd); + break; + default: + + netdev_warn(netdev, "Unknown media type"); return 0; } - if (h->ae_algo->ops->get_ksettings_an_result) - h->ae_algo->ops->get_ksettings_an_result(h, - &cmd->base.autoneg, - &cmd->base.speed, - &cmd->base.duplex); - else - return -EOPNOTSUPP; + /* mdio_support */ + cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22; link_stat = hns3_get_link(netdev); if (!link_stat) { @@ -587,35 +722,51 @@ static int hns3_get_link_ksettings(struct net_device *netdev, cmd->base.duplex = DUPLEX_UNKNOWN; } - /* 2.get link mode and port type*/ - if (h->ae_algo->ops->get_link_mode) - h->ae_algo->ops->get_link_mode(h, - cmd->link_modes.supported, - cmd->link_modes.advertising); + return 0; +} - cmd->base.port = PORT_NONE; - if (h->ae_algo->ops->get_port_type) - h->ae_algo->ops->get_port_type(h, - &cmd->base.port); +static int hns3_check_ksettings_param(const struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + u8 module_type = HNAE3_MODULE_TYPE_UNKNOWN; + u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN; + u8 autoneg; + u32 speed; + u8 duplex; + int ret; - /* 3.mdix_ctrl&mdix get from phy reg */ - if (h->ae_algo->ops->get_mdix_mode) - h->ae_algo->ops->get_mdix_mode(h, &cmd->base.eth_tp_mdix_ctrl, - &cmd->base.eth_tp_mdix); - /* 4.mdio_support */ - cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22; + /* hw doesn't support use specified speed and duplex to negotiate, + * unnecessary to check them when autoneg on. + */ + if (cmd->base.autoneg) + return 0; - /* 5.get flow control setttings */ - if (h->ae_algo->ops->get_flowctrl_adv) - h->ae_algo->ops->get_flowctrl_adv(h, &flowctrl_adv); + if (ops->get_ksettings_an_result) { + ops->get_ksettings_an_result(handle, &autoneg, &speed, &duplex); + if (cmd->base.autoneg == autoneg && cmd->base.speed == speed && + cmd->base.duplex == duplex) + return 0; + } - if (flowctrl_adv & ADVERTISED_Pause) - ethtool_link_ksettings_add_link_mode(cmd, advertising, - Pause); + if (ops->get_media_type) + ops->get_media_type(handle, &media_type, &module_type); - if (flowctrl_adv & ADVERTISED_Asym_Pause) - ethtool_link_ksettings_add_link_mode(cmd, advertising, - Asym_Pause); + if (cmd->base.duplex == DUPLEX_HALF && + media_type != HNAE3_MEDIA_TYPE_COPPER) { + netdev_err(netdev, + "only copper port supports half duplex!"); + return -EINVAL; + } + + if (ops->check_port_speed) { + ret = ops->check_port_speed(handle, cmd->base.speed); + if (ret) { + netdev_err(netdev, "unsupported speed\n"); + return ret; + } + } return 0; } @@ -623,19 +774,64 @@ static int hns3_get_link_ksettings(struct net_device *netdev, static int hns3_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd) { + struct hnae3_handle *handle = hns3_get_handle(netdev); + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + int ret; + + /* Chip don't support this mode. */ + if (cmd->base.speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF) + return -EINVAL; + + if (netif_msg_ifdown(handle)) + netdev_info(netdev, + "set link(%s): autoneg=%u, speed=%u, duplex=%u\n", + netdev->phydev ? "phy" : "mac", + cmd->base.autoneg, cmd->base.speed, + cmd->base.duplex); + /* Only support ksettings_set for netdev with phy attached for now */ - if (netdev->phydev) + if (netdev->phydev) { + if (cmd->base.speed == SPEED_1000 && + cmd->base.autoneg == AUTONEG_DISABLE) + return -EINVAL; + return phy_ethtool_ksettings_set(netdev->phydev, cmd); + } - return -EOPNOTSUPP; + if (handle->pdev->revision == 0x20) + return -EOPNOTSUPP; + + ret = hns3_check_ksettings_param(netdev, cmd); + if (ret) + return ret; + + if (ops->set_autoneg) { + ret = ops->set_autoneg(handle, cmd->base.autoneg); + if (ret) + return ret; + } + + /* hw doesn't support use specified speed and duplex to negotiate, + * ignore them when autoneg on. + */ + if (cmd->base.autoneg) { + netdev_info(netdev, + "autoneg is on, ignore the speed and duplex\n"); + return 0; + } + + if (ops->cfg_mac_speed_dup_h) + ret = ops->cfg_mac_speed_dup_h(handle, cmd->base.speed, + cmd->base.duplex); + + return ret; } static u32 hns3_get_rss_key_size(struct net_device *netdev) { struct hnae3_handle *h = hns3_get_handle(netdev); - if (!h->ae_algo || !h->ae_algo->ops || - !h->ae_algo->ops->get_rss_key_size) + if (!h->ae_algo->ops->get_rss_key_size) return 0; return h->ae_algo->ops->get_rss_key_size(h); @@ -645,8 +841,7 @@ static u32 hns3_get_rss_indir_size(struct net_device *netdev) { struct hnae3_handle *h = hns3_get_handle(netdev); - if (!h->ae_algo || !h->ae_algo->ops || - !h->ae_algo->ops->get_rss_indir_size) + if (!h->ae_algo->ops->get_rss_indir_size) return 0; return h->ae_algo->ops->get_rss_indir_size(h); @@ -657,7 +852,7 @@ static int hns3_get_rss(struct net_device *netdev, u32 *indir, u8 *key, { struct hnae3_handle *h = hns3_get_handle(netdev); - if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss) + if (!h->ae_algo->ops->get_rss) return -EOPNOTSUPP; return h->ae_algo->ops->get_rss(h, indir, key, hfunc); @@ -668,15 +863,16 @@ static int hns3_set_rss(struct net_device *netdev, const u32 *indir, { struct hnae3_handle *h = hns3_get_handle(netdev); - if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss) + if (!h->ae_algo->ops->set_rss) return -EOPNOTSUPP; - /* currently we only support Toeplitz hash */ - if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_TOP)) { - netdev_err(netdev, - "hash func not supported (only Toeplitz hash)\n"); + if ((h->pdev->revision == HNAE3_REVISION_ID_20 && + hfunc != ETH_RSS_HASH_TOP) || (hfunc != ETH_RSS_HASH_NO_CHANGE && + hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR)) { + netdev_err(netdev, "hash func not supported\n"); return -EOPNOTSUPP; } + if (!indir) { netdev_err(netdev, "set rss failed for indir is empty\n"); @@ -692,94 +888,148 @@ static int hns3_get_rxnfc(struct net_device *netdev, { struct hnae3_handle *h = hns3_get_handle(netdev); - if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss_tuple) - return -EOPNOTSUPP; - switch (cmd->cmd) { case ETHTOOL_GRXRINGS: - cmd->data = h->kinfo.rss_size; - break; + cmd->data = h->kinfo.num_tqps; + return 0; case ETHTOOL_GRXFH: - return h->ae_algo->ops->get_rss_tuple(h, cmd); + if (h->ae_algo->ops->get_rss_tuple) + return h->ae_algo->ops->get_rss_tuple(h, cmd); + return -EOPNOTSUPP; + case ETHTOOL_GRXCLSRLCNT: + if (h->ae_algo->ops->get_fd_rule_cnt) + return h->ae_algo->ops->get_fd_rule_cnt(h, cmd); + return -EOPNOTSUPP; + case ETHTOOL_GRXCLSRULE: + if (h->ae_algo->ops->get_fd_rule_info) + return h->ae_algo->ops->get_fd_rule_info(h, cmd); + return -EOPNOTSUPP; + case ETHTOOL_GRXCLSRLALL: + if (h->ae_algo->ops->get_fd_all_rules) + return h->ae_algo->ops->get_fd_all_rules(h, cmd, + rule_locs); + return -EOPNOTSUPP; default: return -EOPNOTSUPP; } - - return 0; } -static int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv, - u32 new_desc_num) +static void hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv, + u32 tx_desc_num, u32 rx_desc_num) { struct hnae3_handle *h = priv->ae_handle; int i; - h->kinfo.num_desc = new_desc_num; + h->kinfo.num_tx_desc = tx_desc_num; + h->kinfo.num_rx_desc = rx_desc_num; + + for (i = 0; i < h->kinfo.num_tqps; i++) { + priv->ring[i].desc_num = tx_desc_num; + priv->ring[i + h->kinfo.num_tqps].desc_num = rx_desc_num; + } +} - for (i = 0; i < h->kinfo.num_tqps * 2; i++) - priv->ring_data[i].ring->desc_num = new_desc_num; +static struct hns3_enet_ring *hns3_backup_ringparam(struct hns3_nic_priv *priv) +{ + struct hnae3_handle *handle = priv->ae_handle; + struct hns3_enet_ring *tmp_rings; + int i; - return hns3_init_all_ring(priv); + tmp_rings = kcalloc(handle->kinfo.num_tqps * 2, + sizeof(struct hns3_enet_ring), GFP_KERNEL); + if (!tmp_rings) + return NULL; + + for (i = 0; i < handle->kinfo.num_tqps * 2; i++) { + memcpy(&tmp_rings[i], &priv->ring[i], + sizeof(struct hns3_enet_ring)); + tmp_rings[i].skb = NULL; + } + + return tmp_rings; } -static int hns3_set_ringparam(struct net_device *ndev, - struct ethtool_ringparam *param) +static int hns3_check_ringparam(struct net_device *ndev, + struct ethtool_ringparam *param) { - struct hns3_nic_priv *priv = netdev_priv(ndev); - struct hnae3_handle *h = priv->ae_handle; - bool if_running = netif_running(ndev); - u32 old_desc_num, new_desc_num; - int ret; + if (hns3_nic_resetting(ndev)) + return -EBUSY; if (param->rx_mini_pending || param->rx_jumbo_pending) return -EINVAL; - if (param->tx_pending != param->rx_pending) { - netdev_err(ndev, - "Descriptors of tx and rx must be equal"); - return -EINVAL; - } - if (param->tx_pending > HNS3_RING_MAX_PENDING || - param->tx_pending < HNS3_RING_MIN_PENDING) { - netdev_err(ndev, - "Descriptors requested (Tx/Rx: %d) out of range [%d-%d]\n", - param->tx_pending, HNS3_RING_MIN_PENDING, - HNS3_RING_MAX_PENDING); + param->tx_pending < HNS3_RING_MIN_PENDING || + param->rx_pending > HNS3_RING_MAX_PENDING || + param->rx_pending < HNS3_RING_MIN_PENDING) { + netdev_err(ndev, "Queue depth out of range [%d-%d]\n", + HNS3_RING_MIN_PENDING, HNS3_RING_MAX_PENDING); return -EINVAL; } - new_desc_num = param->tx_pending; + return 0; +} + +static int hns3_set_ringparam(struct net_device *ndev, + struct ethtool_ringparam *param) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + bool if_running = netif_running(ndev); + u32 old_tx_desc_num, new_tx_desc_num; + u32 old_rx_desc_num, new_rx_desc_num; + u16 queue_num = h->kinfo.num_tqps; + struct hns3_enet_ring *tmp_rings; + int ret, i; + + ret = hns3_check_ringparam(ndev, param); + if (ret) + return ret; /* Hardware requires that its descriptors must be multiple of eight */ - new_desc_num = ALIGN(new_desc_num, HNS3_RING_BD_MULTIPLE); - old_desc_num = h->kinfo.num_desc; - if (old_desc_num == new_desc_num) + new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE); + new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE); + old_tx_desc_num = priv->ring[0].desc_num; + old_rx_desc_num = priv->ring[queue_num].desc_num; + if (old_tx_desc_num == new_tx_desc_num && + old_rx_desc_num == new_rx_desc_num) return 0; + tmp_rings = hns3_backup_ringparam(priv); + if (!tmp_rings) { + netdev_err(ndev, + "backup ring param failed by allocating memory fail\n"); + return -ENOMEM; + } + netdev_info(ndev, - "Changing descriptor count from %d to %d.\n", - old_desc_num, new_desc_num); + "Changing Tx/Rx ring depth from %u/%u to %u/%u\n", + old_tx_desc_num, old_rx_desc_num, + new_tx_desc_num, new_rx_desc_num); if (if_running) - dev_close(ndev); - - ret = hns3_uninit_all_ring(priv); - if (ret) - return ret; + ndev->netdev_ops->ndo_stop(ndev); - ret = hns3_change_all_ring_bd_num(priv, new_desc_num); + hns3_change_all_ring_bd_num(priv, new_tx_desc_num, new_rx_desc_num); + ret = hns3_init_all_ring(priv); if (ret) { - ret = hns3_change_all_ring_bd_num(priv, old_desc_num); - if (ret) { - netdev_err(ndev, - "Revert to old bd num fail, ret=%d.\n", ret); - return ret; - } + netdev_err(ndev, "Change bd num fail, revert to old value(%d)\n", + ret); + + hns3_change_all_ring_bd_num(priv, old_tx_desc_num, + old_rx_desc_num); + for (i = 0; i < h->kinfo.num_tqps * 2; i++) + memcpy(&priv->ring[i], &tmp_rings[i], + sizeof(struct hns3_enet_ring)); + } else { + for (i = 0; i < h->kinfo.num_tqps * 2; i++) + hns3_fini_ring(&tmp_rings[i]); } + kfree(tmp_rings); + if (if_running) - ret = dev_open(ndev); + ret = ndev->netdev_ops->ndo_open(ndev); return ret; } @@ -788,12 +1038,19 @@ static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) { struct hnae3_handle *h = hns3_get_handle(netdev); - if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss_tuple) - return -EOPNOTSUPP; - switch (cmd->cmd) { case ETHTOOL_SRXFH: - return h->ae_algo->ops->set_rss_tuple(h, cmd); + if (h->ae_algo->ops->set_rss_tuple) + return h->ae_algo->ops->set_rss_tuple(h, cmd); + return -EOPNOTSUPP; + case ETHTOOL_SRXCLSRLINS: + if (h->ae_algo->ops->add_fd_entry) + return h->ae_algo->ops->add_fd_entry(h, cmd); + return -EOPNOTSUPP; + case ETHTOOL_SRXCLSRLDEL: + if (h->ae_algo->ops->del_fd_entry) + return h->ae_algo->ops->del_fd_entry(h, cmd); + return -EOPNOTSUPP; default: return -EOPNOTSUPP; } @@ -801,19 +1058,40 @@ static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) static int hns3_nway_reset(struct net_device *netdev) { + struct hnae3_handle *handle = hns3_get_handle(netdev); + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; struct phy_device *phy = netdev->phydev; + int autoneg; if (!netif_running(netdev)) return 0; - /* Only support nway_reset for netdev with phy attached for now */ - if (!phy) + if (hns3_nic_resetting(netdev)) { + netdev_err(netdev, "dev resetting!"); + return -EBUSY; + } + + if (!ops->get_autoneg || !ops->restart_autoneg) return -EOPNOTSUPP; - if (phy->autoneg != AUTONEG_ENABLE) + autoneg = ops->get_autoneg(handle); + if (autoneg != AUTONEG_ENABLE) { + netdev_err(netdev, + "Autoneg is off, don't support to restart it\n"); return -EINVAL; + } + + if (netif_msg_ifdown(handle)) + netdev_info(netdev, "nway reset (using %s)\n", + phy ? "phy" : "mac"); + + if (phy) + return genphy_restart_aneg(phy); + + if (handle->pdev->revision == 0x20) + return -EOPNOTSUPP; - return genphy_restart_aneg(phy); + return ops->restart_autoneg(handle); } static void hns3_get_channels(struct net_device *netdev, @@ -833,15 +1111,18 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue, struct hnae3_handle *h = priv->ae_handle; u16 queue_num = h->kinfo.num_tqps; + if (hns3_nic_resetting(netdev)) + return -EBUSY; + if (queue >= queue_num) { netdev_err(netdev, - "Invalid queue value %d! Queue max id=%d\n", + "Invalid queue value %u! Queue max id=%u\n", queue, queue_num - 1); return -EINVAL; } - tx_vector = priv->ring_data[queue].ring->tqp_vector; - rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector; + tx_vector = priv->ring[queue].tqp_vector; + rx_vector = priv->ring[queue_num + queue].tqp_vector; cmd->use_adaptive_tx_coalesce = tx_vector->tx_group.coal.gl_adapt_enable; @@ -885,14 +1166,14 @@ static int hns3_check_gl_coalesce_para(struct net_device *netdev, rx_gl = hns3_gl_round_down(cmd->rx_coalesce_usecs); if (rx_gl != cmd->rx_coalesce_usecs) { netdev_info(netdev, - "rx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n", + "rx_usecs(%u) rounded down to %u, because it must be multiple of 2.\n", cmd->rx_coalesce_usecs, rx_gl); } tx_gl = hns3_gl_round_down(cmd->tx_coalesce_usecs); if (tx_gl != cmd->tx_coalesce_usecs) { netdev_info(netdev, - "tx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n", + "tx_usecs(%u) rounded down to %u, because it must be multiple of 2.\n", cmd->tx_coalesce_usecs, tx_gl); } @@ -920,7 +1201,7 @@ static int hns3_check_rl_coalesce_para(struct net_device *netdev, rl = hns3_rl_round_down(cmd->rx_coalesce_usecs_high); if (rl != cmd->rx_coalesce_usecs_high) { netdev_info(netdev, - "usecs_high(%d) rounded down to %d, because it must be multiple of 4.\n", + "usecs_high(%u) rounded down to %u, because it must be multiple of 4.\n", cmd->rx_coalesce_usecs_high, rl); } @@ -949,7 +1230,7 @@ static int hns3_check_coalesce_para(struct net_device *netdev, if (cmd->use_adaptive_tx_coalesce == 1 || cmd->use_adaptive_rx_coalesce == 1) { netdev_info(netdev, - "adaptive-tx=%d and adaptive-rx=%d, tx_usecs or rx_usecs will changed dynamically.\n", + "adaptive-tx=%u and adaptive-rx=%u, tx_usecs or rx_usecs will changed dynamically.\n", cmd->use_adaptive_tx_coalesce, cmd->use_adaptive_rx_coalesce); } @@ -966,8 +1247,8 @@ static void hns3_set_coalesce_per_queue(struct net_device *netdev, struct hnae3_handle *h = priv->ae_handle; int queue_num = h->kinfo.num_tqps; - tx_vector = priv->ring_data[queue].ring->tqp_vector; - rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector; + tx_vector = priv->ring[queue].tqp_vector; + rx_vector = priv->ring[queue_num + queue].tqp_vector; tx_vector->tx_group.coal.gl_adapt_enable = cmd->use_adaptive_tx_coalesce; @@ -994,6 +1275,9 @@ static int hns3_set_coalesce(struct net_device *netdev, int ret; int i; + if (hns3_nic_resetting(netdev)) + return -EBUSY; + ret = hns3_check_coalesce_para(netdev, cmd); if (ret) return ret; @@ -1033,12 +1317,172 @@ static int hns3_set_phys_id(struct net_device *netdev, { struct hnae3_handle *h = hns3_get_handle(netdev); - if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_led_id) + if (!h->ae_algo->ops->set_led_id) return -EOPNOTSUPP; return h->ae_algo->ops->set_led_id(h, state); } +static u32 hns3_get_msglevel(struct net_device *netdev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + return h->msg_enable; +} + +static void hns3_set_msglevel(struct net_device *netdev, u32 msg_level) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + h->msg_enable = msg_level; +} + +/* Translate local fec value into ethtool value. */ +static unsigned int loc_to_eth_fec(u8 loc_fec) +{ + u32 eth_fec = 0; + + if (loc_fec & BIT(HNAE3_FEC_AUTO)) + eth_fec |= ETHTOOL_FEC_AUTO; + if (loc_fec & BIT(HNAE3_FEC_RS)) + eth_fec |= ETHTOOL_FEC_RS; + if (loc_fec & BIT(HNAE3_FEC_BASER)) + eth_fec |= ETHTOOL_FEC_BASER; + + /* if nothing is set, then FEC is off */ + if (!eth_fec) + eth_fec = ETHTOOL_FEC_OFF; + + return eth_fec; +} + +/* Translate ethtool fec value into local value. */ +static unsigned int eth_to_loc_fec(unsigned int eth_fec) +{ + u32 loc_fec = 0; + + if (eth_fec & ETHTOOL_FEC_OFF) + return loc_fec; + + if (eth_fec & ETHTOOL_FEC_AUTO) + loc_fec |= BIT(HNAE3_FEC_AUTO); + if (eth_fec & ETHTOOL_FEC_RS) + loc_fec |= BIT(HNAE3_FEC_RS); + if (eth_fec & ETHTOOL_FEC_BASER) + loc_fec |= BIT(HNAE3_FEC_BASER); + + return loc_fec; +} + +static int hns3_get_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fec) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + u8 fec_ability; + u8 fec_mode; + + if (handle->pdev->revision == 0x20) + return -EOPNOTSUPP; + + if (!ops->get_fec) + return -EOPNOTSUPP; + + ops->get_fec(handle, &fec_ability, &fec_mode); + + fec->fec = loc_to_eth_fec(fec_ability); + fec->active_fec = loc_to_eth_fec(fec_mode); + + return 0; +} + +static int hns3_set_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fec) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + u32 fec_mode; + + if (handle->pdev->revision == 0x20) + return -EOPNOTSUPP; + + if (!ops->set_fec) + return -EOPNOTSUPP; + fec_mode = eth_to_loc_fec(fec->fec); + + if (netif_msg_ifdown(handle)) + netdev_info(netdev, "set fecparam: mode=%u\n", fec_mode); + + return ops->set_fec(handle, fec_mode); +} + +static int hns3_get_module_info(struct net_device *netdev, + struct ethtool_modinfo *modinfo) +{ +#define HNS3_SFF_8636_V1_3 0x03 + + struct hnae3_handle *handle = hns3_get_handle(netdev); + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + struct hns3_sfp_type sfp_type; + int ret; + + if (handle->pdev->revision == 0x20 || !ops->get_module_eeprom) + return -EOPNOTSUPP; + + memset(&sfp_type, 0, sizeof(sfp_type)); + ret = ops->get_module_eeprom(handle, 0, sizeof(sfp_type) / sizeof(u8), + (u8 *)&sfp_type); + if (ret) + return ret; + + switch (sfp_type.type) { + case HNS3_MODULE_TYPE_SFP: + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + break; + case HNS3_MODULE_TYPE_QSFP: + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; + break; + case HNS3_MODULE_TYPE_QSFP_P: + if (sfp_type.ext_type < HNS3_SFF_8636_V1_3) { + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; + } + break; + case HNS3_MODULE_TYPE_QSFP_28: + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; + break; + default: + netdev_err(netdev, "Optical module unknown:%#x\n", + sfp_type.type); + return -EINVAL; + } + + return 0; +} + +static int hns3_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + + if (handle->pdev->revision == 0x20 || !ops->get_module_eeprom) + return -EOPNOTSUPP; + + if (!ee->len) + return -EINVAL; + + memset(data, 0, ee->len); + + return ops->get_module_eeprom(handle, ee->offset, ee->len, data); +} + static const struct ethtool_ops hns3vf_ethtool_ops = { .get_drvinfo = hns3_get_drvinfo, .get_ringparam = hns3_get_ringparam, @@ -1047,15 +1491,21 @@ static const struct ethtool_ops hns3vf_ethtool_ops = { .get_ethtool_stats = hns3_get_stats, .get_sset_count = hns3_get_sset_count, .get_rxnfc = hns3_get_rxnfc, + .set_rxnfc = hns3_set_rxnfc, .get_rxfh_key_size = hns3_get_rss_key_size, .get_rxfh_indir_size = hns3_get_rss_indir_size, .get_rxfh = hns3_get_rss, .set_rxfh = hns3_set_rss, .get_link_ksettings = hns3_get_link_ksettings, .get_channels = hns3_get_channels, + .set_channels = hns3_set_channels, .get_coalesce = hns3_get_coalesce, .set_coalesce = hns3_set_coalesce, + .get_regs_len = hns3_get_regs_len, + .get_regs = hns3_get_regs, .get_link = hns3_get_link, + .get_msglevel = hns3_get_msglevel, + .set_msglevel = hns3_set_msglevel, }; static const struct ethtool_ops hns3_ethtool_ops = { @@ -1069,6 +1519,8 @@ static const struct ethtool_ops hns3_ethtool_ops = { .get_strings = hns3_get_strings, .get_ethtool_stats = hns3_get_stats, .get_sset_count = hns3_get_sset_count, + .get_channels = hns3_get_channels, + .set_channels = hns3_set_channels, .get_rxnfc = hns3_get_rxnfc, .set_rxnfc = hns3_set_rxnfc, .get_rxfh_key_size = hns3_get_rss_key_size, @@ -1078,13 +1530,17 @@ static const struct ethtool_ops hns3_ethtool_ops = { .get_link_ksettings = hns3_get_link_ksettings, .set_link_ksettings = hns3_set_link_ksettings, .nway_reset = hns3_nway_reset, - .get_channels = hns3_get_channels, - .set_channels = hns3_set_channels, .get_coalesce = hns3_get_coalesce, .set_coalesce = hns3_set_coalesce, .get_regs_len = hns3_get_regs_len, .get_regs = hns3_get_regs, .set_phys_id = hns3_set_phys_id, + .get_msglevel = hns3_get_msglevel, + .set_msglevel = hns3_set_msglevel, + .get_fecparam = hns3_get_fecparam, + .set_fecparam = hns3_set_fecparam, + .get_module_info = hns3_get_module_info, + .get_module_eeprom = hns3_get_module_eeprom, }; void hns3_ethtool_set_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_extension/hns3_enet_it.c b/drivers/net/ethernet/hisilicon/hns3/hns3_extension/hns3_enet_it.c new file mode 100644 index 0000000000000000000000000000000000000000..a6a724da05485fe117a7af64570c2f7ada331ae3 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_extension/hns3_enet_it.c @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include +#include +#include +#include +#include +#include + +#include "hnae3.h" +#include "hns3_enet_it.h" +#include "hns3_enet.h" + +extern const char hns3_driver_string[]; +extern const char hns3_copyright[]; + +#ifdef CONFIG_IT_VALIDATION + +extern struct net_device_ops hns3_nic_netdev_ops; +extern const struct hnae3_client_ops client_ops; +extern struct hnae3_client client; +extern struct pci_driver hns3_driver; +extern const char hns3_driver_name[]; + +#if (KERNEL_VERSION(4, 19, 0) > LINUX_VERSION_CODE) +u16 hns3_nic_select_queue_it(struct net_device *ndev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback) +#else +u16 hns3_nic_select_queue_it(struct net_device *ndev, struct sk_buff *skb, + struct net_device *accel_priv, + select_queue_fallback_t fallback) +#endif +{ +#define HNS3_VLAN_PRIO_SHIFT 13 + if (!accel_priv) + if (skb->vlan_tci && !skb->priority) + skb->priority = skb->vlan_tci >> HNS3_VLAN_PRIO_SHIFT; + +#if (KERNEL_VERSION(4, 19, 0) > LINUX_VERSION_CODE) + return fallback(ndev, skb); +#else + return fallback(ndev, skb, accel_priv); +#endif +} + +static int __init hns3_init_module_it(void) +{ + int ret; + + pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string); + pr_info("%s: %s\n", hns3_driver_name, hns3_copyright); + + strncpy(hns3_driver_version, HNS3_MOD_VERSION, + strlen(hns3_driver_version)); + + client.type = HNAE3_CLIENT_KNIC; + snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH, "%s", hns3_driver_name); + + client.ops = &client_ops; + hns3_nic_netdev_ops.ndo_select_queue = hns3_nic_select_queue_it; + + INIT_LIST_HEAD(&client.node); + hns3_dbg_register_debugfs(hns3_driver_name); + + ret = hnae3_register_client(&client); + if (ret) + goto err_reg_client; + + ret = pci_register_driver(&hns3_driver); + if (ret) + goto err_reg_driver; + + return ret; + +err_reg_driver: + hnae3_unregister_client(&client); +err_reg_client: + hns3_dbg_unregister_debugfs(); + return ret; +} + +module_init(hns3_init_module_it); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_extension/hns3_enet_it.h b/drivers/net/ethernet/hisilicon/hns3/hns3_extension/hns3_enet_it.h new file mode 100644 index 0000000000000000000000000000000000000000..86efcd355afa662ea6eb4d7ba37da1ef418be9c4 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_extension/hns3_enet_it.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2017 Hisilicon Limited. */ + +#ifndef __HNS3_ENET_IT_H +#define __HNS3_ENET_IT_H + +#ifndef LINUX_VERSION_CODE +#include +#else +#define KERNEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c)) +#endif + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_extension/hns3pf/hclge_main_it.c b/drivers/net/ethernet/hisilicon/hns3/hns3_extension/hns3pf/hclge_main_it.c new file mode 100644 index 0000000000000000000000000000000000000000..0118b9577a6684f5fe7c4fdf46ac1bdf2226d9bb --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_extension/hns3pf/hclge_main_it.c @@ -0,0 +1,220 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "kcompat.h" +#include "hclge_cmd.h" +#include "hclge_main.h" +#include "hnae3.h" +#include "hclge_main_it.h" +#ifdef CONFIG_HNS3_TEST +#include "hclge_sysfs.h" +#endif + +#ifdef CONFIG_IT_VALIDATION +#define HCLGE_RESET_MAX_FAIL_CNT 1 + +static nic_event_fn_t nic_event_call; + +int nic_register_event(nic_event_fn_t event_call) +{ + if (!event_call) { + pr_err("register event handle is null.\n"); + return -EINVAL; + } + + nic_event_call = event_call; + + pr_info("netdev register success.\n"); + return 0; +} +EXPORT_SYMBOL(nic_register_event); + +int nic_unregister_event(void) +{ + nic_event_call = NULL; + return 0; +} +EXPORT_SYMBOL(nic_unregister_event); + +static void nic_call_event(struct net_device *netdev, + enum hnae3_event_type_custom event_t) +{ + if (nic_event_call) { + nic_event_call(netdev, event_t); + netdev_info(netdev, "report event %d\n", event_t); + } +} + +static void hclge_handle_imp_error_it(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct net_device *netdev; + u32 reg_val; + + netdev = hdev->vport[0].nic.netdev; + + if (test_and_clear_bit(HCLGE_IMP_RD_POISON, &hdev->imp_err_state)) { + dev_err(&hdev->pdev->dev, "Detected IMP RD poison!\n"); + if (nic_event_call) + nic_call_event(netdev, HNAE3_IMP_RD_POISON_CUSTOM); + reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG) & + ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B); + hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); + } + + if (test_and_clear_bit(HCLGE_IMP_CMDQ_ERROR, &hdev->imp_err_state)) { + dev_err(&hdev->pdev->dev, "Detected CMDQ ECC error!\n"); + if (nic_event_call) + nic_call_event(netdev, HNAE3_IMP_RESET_CUSTOM); + reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG) & + ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B); + hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); + } + +} + +static void hclge_reset_task_schedule_it(struct hclge_dev *hdev) +{ + if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && + !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) + mod_delayed_work_on(cpumask_first(&hdev->affinity_mask), + system_wq, &hdev->service_task, 0); +} + +void hclge_reset_event_it(struct pci_dev *pdev, struct hnae3_handle *handle) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + struct hclge_dev *hdev = ae_dev->priv; + struct net_device *netdev; + + netdev = hdev->vport[0].nic.netdev; + + /* We might end up getting called broadly because of 2 below cases: + * 1. Recoverable error was conveyed through APEI and only way to bring + * normalcy is to reset. + * 2. A new reset request from the stack due to timeout + * + * For the first case,error event might not have ae handle available. + * check if this is a new reset request and we are not here just because + * last reset attempt did not succeed and watchdog hit us again. We will + * know this if last reset request did not occur very recently (watchdog + * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz) + * In case of new request we reset the "reset level" to PF reset. + * And if it is a repeat reset request of the most recent one then we + * want to make sure we throttle the reset request. Therefore, we will + * not allow it again before 12*HZ times. + */ + if (time_before(jiffies, (hdev->last_reset_time + + HCLGE_RESET_INTERVAL))) { + mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL); + return; + } else if (hdev->default_reset_request) { + hdev->reset_level = + hclge_get_reset_level(ae_dev, &hdev->default_reset_request); + } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) { + hdev->reset_level = HNAE3_FUNC_RESET; + } + + dev_info(&hdev->pdev->dev, "IT received reset event, reset type is %d", + hdev->reset_level); + + if (hdev->ppu_poison_ras_err && nic_event_call) { + nic_call_event(netdev, HNAE3_PPU_POISON_CUSTOM); + hdev->ppu_poison_ras_err = false; + } + + if (nic_event_call) { + nic_call_event(netdev, hdev->reset_level); + } else { + /* request reset & schedule reset task */ + set_bit(hdev->reset_level, &hdev->reset_request); + hclge_reset_task_schedule_it(hdev); + } +} + +bool hclge_reset_end_it(struct hnae3_handle *handle, bool done) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct net_device *netdev; + + netdev = hdev->vport[0].nic.netdev; + + if (done) { + dev_info(&hdev->pdev->dev, "IT Report Reset DONE!\n"); + if (nic_event_call) + nic_call_event(netdev, HNAE3_RESET_DONE_CUSTOM); + } + + if (hdev->rst_stats.reset_fail_cnt >= HCLGE_RESET_MAX_FAIL_CNT) { + dev_err(&hdev->pdev->dev, "IT Report Reset fail!\n"); + if (nic_event_call) { + if (hdev->reset_type == HNAE3_FUNC_RESET) + nic_call_event(netdev, + HNAE3_FUNC_RESET_FAIL_CUSTOM); + else if (hdev->reset_type == HNAE3_GLOBAL_RESET) + nic_call_event(netdev, + HNAE3_GLOBAL_RESET_FAIL_CUSTOM); + else if (hdev->reset_type == HNAE3_IMP_RESET) + nic_call_event(netdev, + HNAE3_IMP_RESET_FAIL_CUSTOM); + } + } + + return done; +} + +#ifdef CONFIG_HNS3_TEST +void hclge_ext_init(struct hnae3_handle *handle) +{ + hclge_sysfs_init(handle); +} + +void hclge_ext_uninit(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + hclge_reset_pf_rate(hdev); + hclge_sysfs_uninit(handle); +} + +void hclge_ext_reset_done(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + hclge_resume_pf_rate(hdev); +} +#endif + +int hclge_init_it(void) +{ +#ifdef CONFIG_HNS3_TEST + hclge_ops.ext_init = hclge_ext_init; + hclge_ops.ext_uninit = hclge_ext_uninit; + hclge_ops.ext_reset_done = hclge_ext_reset_done; +#endif + + hclge_ops.reset_event = hclge_reset_event_it; + hclge_ops.reset_end = hclge_reset_end_it; + hclge_ops.handle_imp_error = hclge_handle_imp_error_it; + + return hclge_init(); +} + +module_init(hclge_init_it); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_extension/hns3pf/hclge_main_it.h b/drivers/net/ethernet/hisilicon/hns3/hns3_extension/hns3pf/hclge_main_it.h new file mode 100644 index 0000000000000000000000000000000000000000..343e036412d42e0aad42ec8bdce3dfca35d11fb1 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_extension/hns3pf/hclge_main_it.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2017 Hisilicon Limited. */ + +#ifndef __HCLGE_MAIN_IT_H +#define __HCLGE_MAIN_IT_H + +extern struct hnae3_ae_algo ae_algo; +extern struct hnae3_ae_ops hclge_ops; + +enum hnae3_event_type_custom { + HNAE3_VF_RESET_CUSTOM, + HNAE3_VF_FUNC_RESET_CUSTOM, + HNAE3_VF_PF_FUNC_RESET_CUSTOM, + HNAE3_VF_FULL_RESET_CUSTOM, + HNAE3_FLR_RESET_CUSTOM, + HNAE3_FUNC_RESET_CUSTOM, + HNAE3_GLOBAL_RESET_CUSTOM, + HNAE3_IMP_RESET_CUSTOM, + HNAE3_UNKNOWN_RESET_CUSTOM, + HNAE3_NONE_RESET_CUSTOM, + HNAE3_PORT_FAULT, + HNAE3_RESET_DONE_CUSTOM, + HNAE3_FUNC_RESET_FAIL_CUSTOM, + HNAE3_GLOBAL_RESET_FAIL_CUSTOM, + HNAE3_IMP_RESET_FAIL_CUSTOM, + HNAE3_PPU_POISON_CUSTOM, + HNAE3_IMP_RD_POISON_CUSTOM, +}; + +/** + * nic_event_fn_t - nic event handler prototype + * @netdev: net device + * @hnae3_event_type_custom: nic device event type + */ +typedef void (*nic_event_fn_t) (struct net_device *netdev, + enum hnae3_event_type_custom); + +/** + * nic_register_event - register for nic event handling + * @event_call: nic event handler + * return 0 - success , negative - fail + */ +int nic_register_event(nic_event_fn_t event_call); + +/** + * nic_unregister_event - quit nic event handling + * return 0 - success , negative - fail + */ +int nic_unregister_event(void); + +int hclge_init(void); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_extension/hns3pf/hclge_sysfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_extension/hns3pf/hclge_sysfs.c new file mode 100644 index 0000000000000000000000000000000000000000..a307b592dc9271123f0424453f5dfdb9632a791b --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_extension/hns3pf/hclge_sysfs.c @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2018-2021 Hisilicon Limited. */ + +#include +#include "hnae3.h" +#include "hclge_main.h" +#include "hclge_tm.h" +#include "hclge_sysfs.h" + +void hclge_reset_pf_rate(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = &hdev->vport[0]; + int ret; + + /* zero means max rate, if max_tx_rate is zero, just return */ + if (!vport->vf_info.max_tx_rate) + return; + + vport->vf_info.max_tx_rate = 0; + + ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to reset pf tx rate to default, ret = %d.\n", + ret); +} + +int hclge_resume_pf_rate(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = &hdev->vport[0]; + int ret; + + /* zero means max rate, after reset, firmware already set it to + * max rate, so just continue. + */ + if (!vport->vf_info.max_tx_rate) + return 0; + + ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to resume pf tx rate:%u, ret = %d.\n", + vport->vf_info.max_tx_rate, ret); + return ret; + } + + return 0; +} + +static ssize_t hclge_max_tx_rate_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + struct hclge_vport *vport = + container_of(kobj, struct hclge_vport, kobj); + + return sprintf(buf, "%d Mbit/s (0 means no limit)\n", + vport->vf_info.max_tx_rate); +} + +static ssize_t hclge_max_tx_rate_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, + size_t size) +{ + struct hclge_vport *vport = + container_of(kobj, struct hclge_vport, kobj); + struct hclge_dev *hdev = vport->back; + int max_tx_rate; + int ret; + + ret = kstrtoint(buf, 0, &max_tx_rate); + if (ret) + return -EINVAL; + + if (max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) { + dev_err(&hdev->pdev->dev, + "invalid max_tx_rate:%d [0, %u]\n", + max_tx_rate, hdev->hw.mac.max_speed); + return -EINVAL; + } + + ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate); + if (ret) + return ret; + + vport->vf_info.max_tx_rate = max_tx_rate; + + return ret ? (ssize_t)ret : size; +} + +static struct kobj_attribute hclge_attr_max_tx_rate = { + .attr = {.name = "max_tx_rate", + .mode = 0644 }, + .show = hclge_max_tx_rate_show, + .store = hclge_max_tx_rate_store, +}; + +static struct attribute *hclge_sysfs_attrs[] = { + &hclge_attr_max_tx_rate.attr, + NULL, +}; + +static struct kobj_type hclge_sysfs_type = { + .sysfs_ops = &kobj_sysfs_ops, + .default_attrs = hclge_sysfs_attrs, +}; + +void hclge_sysfs_init(struct hnae3_handle *handle) +{ + struct net_device *netdev = handle->netdev; + struct hclge_vport *vport = hclge_get_vport(handle); + int ret; + + handle->kobj = kobject_create_and_add("kunpeng", &netdev->dev.kobj); + if (!handle->kobj) { + netdev_err(netdev, "failed to create kobj!\n"); + return; + } + + ret = kobject_init_and_add(&vport->kobj, &hclge_sysfs_type, + handle->kobj, "pf"); + if (ret) { + netdev_err(netdev, "failed to init kobj, ret = %d\n", ret); + kobject_put(handle->kobj); + handle->kobj = NULL; + } +} + +void hclge_sysfs_uninit(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + if (!handle->kobj) + return; + + kobject_put(&vport->kobj); + kobject_put(handle->kobj); + handle->kobj = NULL; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_extension/hns3pf/hclge_sysfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3_extension/hns3pf/hclge_sysfs.h new file mode 100644 index 0000000000000000000000000000000000000000..8eb33357c577957df5506800268e899614f2e346 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_extension/hns3pf/hclge_sysfs.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0+ + * Copyright (c) 2018-2021 Hisilicon Limited. + */ + +#ifndef __HCLGE_SYSFS_H +#define __HCLGE_SYSFS_H + +void hclge_reset_pf_rate(struct hclge_dev *hdev); +int hclge_resume_pf_rate(struct hclge_dev *hdev); + +void hclge_sysfs_init(struct hnae3_handle *handle); +void hclge_sysfs_uninit(struct hnae3_handle *handle); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h new file mode 100644 index 0000000000000000000000000000000000000000..43be997cb70191f82a7ebcb7f8276ae9da9507c5 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2018-2019 Hisilicon Limited. */ + +/* This must be outside ifdef _HNS3_TRACE_H */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM hns3 + +#if !defined(_HNS3_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _HNS3_TRACE_H_ + +#include + +#define DESC_NR (sizeof(struct hns3_desc) / sizeof(u32)) + +DECLARE_EVENT_CLASS(hns3_skb_template, + TP_PROTO(struct sk_buff *skb), + TP_ARGS(skb), + + TP_STRUCT__entry( + __field(unsigned int, headlen) + __field(unsigned int, len) + __field(__u8, nr_frags) + __field(__u8, ip_summed) + __field(unsigned int, hdr_len) + __field(unsigned short, gso_size) + __field(unsigned short, gso_segs) + __field(unsigned int, gso_type) + __array(__u32, size, MAX_SKB_FRAGS) + ), + + TP_fast_assign( + __entry->headlen = skb_headlen(skb); + __entry->len = skb->len; + __entry->nr_frags = skb_shinfo(skb)->nr_frags; + __entry->gso_size = skb_shinfo(skb)->gso_size; + __entry->gso_segs = skb_shinfo(skb)->gso_segs; + __entry->gso_type = skb_shinfo(skb)->gso_type; + __entry->hdr_len = skb->encapsulation ? + skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb) : + skb_transport_offset(skb) + tcp_hdrlen(skb); + __entry->ip_summed = skb->ip_summed; + hns3_shinfo_pack(skb_shinfo(skb), __entry->size); + ), + + TP_printk( + "len: %u, %u, %u, cs: %u, gso: %u, %u, %x, frag(%u): %s", + __entry->headlen, __entry->len, __entry->hdr_len, + __entry->ip_summed, __entry->gso_size, __entry->gso_segs, + __entry->gso_type, __entry->nr_frags, + __print_array(__entry->size, MAX_SKB_FRAGS, sizeof(__u32)) + ) +); + +DEFINE_EVENT(hns3_skb_template, hns3_over_8bd, + TP_PROTO(struct sk_buff *skb), + TP_ARGS(skb)); + +DEFINE_EVENT(hns3_skb_template, hns3_gro, + TP_PROTO(struct sk_buff *skb), + TP_ARGS(skb)); + +DEFINE_EVENT(hns3_skb_template, hns3_tso, + TP_PROTO(struct sk_buff *skb), + TP_ARGS(skb)); + +TRACE_EVENT(hns3_tx_desc, + TP_PROTO(struct hns3_enet_ring *ring), + TP_ARGS(ring), + + TP_STRUCT__entry( + __field(int, index) + __field(int, ntu) + __field(int, ntc) + __field(dma_addr_t, desc_dma) + __array(u32, desc, DESC_NR) + __string(devname, ring->tqp->handle->kinfo.netdev->name) + ), + + TP_fast_assign( + __entry->index = ring->tqp->tqp_index; + __entry->ntu = ring->next_to_use; + __entry->ntc = ring->next_to_clean; + __entry->desc_dma = ring->desc_dma_addr, + memcpy(__entry->desc, &ring->desc[ring->next_to_use], + sizeof(struct hns3_desc)); + __assign_str(devname, ring->tqp->handle->kinfo.netdev->name); + ), + + TP_printk( + "%s-%d-%d/%d desc(%pad): %s", + __get_str(devname), __entry->index, __entry->ntu, + __entry->ntc, &__entry->desc_dma, + __print_array(__entry->desc, DESC_NR, sizeof(u32)) + ) +); + +TRACE_EVENT(hns3_rx_desc, + TP_PROTO(struct hns3_enet_ring *ring), + TP_ARGS(ring), + + TP_STRUCT__entry( + __field(int, index) + __field(int, ntu) + __field(int, ntc) + __field(dma_addr_t, desc_dma) + __field(dma_addr_t, buf_dma) + __array(u32, desc, DESC_NR) + __string(devname, ring->tqp->handle->kinfo.netdev->name) + ), + + TP_fast_assign( + __entry->index = ring->tqp->tqp_index; + __entry->ntu = ring->next_to_use; + __entry->ntc = ring->next_to_clean; + __entry->desc_dma = ring->desc_dma_addr; + __entry->buf_dma = ring->desc_cb[ring->next_to_clean].dma; + memcpy(__entry->desc, &ring->desc[ring->next_to_clean], + sizeof(struct hns3_desc)); + __assign_str(devname, ring->tqp->handle->kinfo.netdev->name); + ), + + TP_printk( + "%s-%d-%d/%d desc(%pad) buf(%pad): %s", + __get_str(devname), __entry->index, __entry->ntu, + __entry->ntc, &__entry->desc_dma, &__entry->buf_dma, + __print_array(__entry->desc, DESC_NR, sizeof(u32)) + ) +); + +#endif /* _HNS3_TRACE_H_ */ + +/* This must be outside ifdef _HNS3_TRACE_H */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE hns3_trace +#include diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile index cb8ddd04347686ac436b1f3b8acfc155cf4db685..14541e98407f0aa551d8946e5aeed3021fc7b218 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile @@ -3,9 +3,10 @@ # Makefile for the HISILICON network device drivers. # -ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 +ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3 +ccflags-y += -I $(srctree)/$(src) obj-$(CONFIG_HNS3_HCLGE) += hclge.o -hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o +hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_debugfs.o hclge_err.o hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index ac13cb2b168e5a6e67517837dd470e092a0db8f8..16262eb3960529d709bf66a1b3c4f64725c9068e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -7,6 +7,7 @@ #include #include #include +#include "kcompat.h" #include "hclge_cmd.h" #include "hnae3.h" #include "hclge_main.h" @@ -24,15 +25,15 @@ static int hclge_ring_space(struct hclge_cmq_ring *ring) return ring->desc_num - used - 1; } -static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int h) +static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int head) { - int u = ring->next_to_use; - int c = ring->next_to_clean; + int ntu = ring->next_to_use; + int ntc = ring->next_to_clean; - if (unlikely(h >= ring->desc_num)) - return 0; + if (ntu > ntc) + return head >= ntc && head <= ntu; - return u > c ? (h > c && h <= u) : (h > c || h <= u); + return head >= ntc || head <= ntu; } static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring) @@ -104,15 +105,17 @@ static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring) dma_addr_t dma = ring->desc_dma_addr; struct hclge_dev *hdev = ring->dev; struct hclge_hw *hw = &hdev->hw; + u32 reg_val; if (ring->ring_type == HCLGE_TYPE_CSQ) { hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG, lower_32_bits(dma)); hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG, upper_32_bits(dma)); - hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, - (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) | - HCLGE_NIC_CMQ_ENABLE); + reg_val = hclge_read_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG); + reg_val &= HCLGE_NIC_SW_RST_RDY; + reg_val |= ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S; + hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val); hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0); hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0); } else { @@ -121,13 +124,26 @@ static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring) hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG, upper_32_bits(dma)); hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG, - (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) | - HCLGE_NIC_CMQ_ENABLE); + ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S); hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0); hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0); } } +static void hclge_cmd_clear_regs(struct hclge_hw *hw) +{ + hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0); +} + static void hclge_cmd_init_regs(struct hclge_hw *hw) { hclge_cmd_config_regs(&hw->cmq.csq); @@ -145,7 +161,7 @@ static int hclge_cmd_csq_clean(struct hclge_hw *hw) rmb(); /* Make sure head is ready before touch any data */ if (!is_valid_csq_clean_head(csq, head)) { - dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head, + dev_warn(&hdev->pdev->dev, "wrong cmd head (%u, %d-%d)\n", head, csq->next_to_use, csq->next_to_clean); dev_warn(&hdev->pdev->dev, "Disabling any further commands to IMP firmware\n"); @@ -171,8 +187,16 @@ static bool hclge_is_special_opcode(u16 opcode) /* these commands have several descriptors, * and use the first one to save opcode and return value */ - u16 spec_opcode[3] = {HCLGE_OPC_STATS_64_BIT, - HCLGE_OPC_STATS_32_BIT, HCLGE_OPC_STATS_MAC}; + u16 spec_opcode[] = {HCLGE_OPC_STATS_64_BIT, + HCLGE_OPC_STATS_32_BIT, + HCLGE_OPC_STATS_MAC, + HCLGE_OPC_STATS_MAC_ALL, + HCLGE_OPC_QUERY_32_BIT_REG, + HCLGE_OPC_QUERY_64_BIT_REG, + HCLGE_QUERY_CLEAR_MPF_RAS_INT, + HCLGE_QUERY_CLEAR_PF_RAS_INT, + HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT, + HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT}; int i; for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) { @@ -183,6 +207,61 @@ static bool hclge_is_special_opcode(u16 opcode) return false; } +static int hclge_cmd_convert_err_code(u16 desc_ret) +{ + switch (desc_ret) { + case HCLGE_CMD_EXEC_SUCCESS: + return 0; + case HCLGE_CMD_NO_AUTH: + return -EPERM; + case HCLGE_CMD_NOT_SUPPORTED: + return -EOPNOTSUPP; + case HCLGE_CMD_QUEUE_FULL: + return -EXFULL; + case HCLGE_CMD_NEXT_ERR: + return -ENOSR; + case HCLGE_CMD_UNEXE_ERR: + return -ENOTBLK; + case HCLGE_CMD_PARA_ERR: + return -EINVAL; + case HCLGE_CMD_RESULT_ERR: + return -ERANGE; + case HCLGE_CMD_TIMEOUT: + return -ETIME; + case HCLGE_CMD_HILINK_ERR: + return -ENOLINK; + case HCLGE_CMD_QUEUE_ILLEGAL: + return -ENXIO; + case HCLGE_CMD_INVALID: + return -EBADR; + default: + return -EIO; + } +} + +static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc, + int num, int ntc) +{ + u16 opcode, desc_ret; + int handle; + + opcode = le16_to_cpu(desc[0].opcode); + for (handle = 0; handle < num; handle++) { + desc[handle] = hw->cmq.csq.desc[ntc]; + ntc++; + if (ntc >= hw->cmq.csq.desc_num) + ntc = 0; + } + if (likely(!hclge_is_special_opcode(opcode))) + desc_ret = le16_to_cpu(desc[num - 1].retval); + else + desc_ret = le16_to_cpu(desc[0].retval); + + hw->cmq.last_status = desc_ret; + + return hclge_cmd_convert_err_code(desc_ret); +} + /** * hclge_cmd_send - send command to command queue * @hw: pointer to the hw struct @@ -195,18 +274,26 @@ static bool hclge_is_special_opcode(u16 opcode) int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) { struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw); + struct hclge_cmq_ring *csq = &hw->cmq.csq; struct hclge_desc *desc_to_use; bool complete = false; u32 timeout = 0; int handle = 0; - int retval = 0; - u16 opcode, desc_ret; + int retval; int ntc; spin_lock_bh(&hw->cmq.csq.lock); - if (num > hclge_ring_space(&hw->cmq.csq) || - test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) { + if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) { + spin_unlock_bh(&hw->cmq.csq.lock); + return -EBUSY; + } + + if (num > hclge_ring_space(&hw->cmq.csq)) { + /* If CMDQ ring is full, SW HEAD and HW HEAD may be different, + * need update the SW HEAD pointer csq->next_to_clean + */ + csq->next_to_clean = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG); spin_unlock_bh(&hw->cmq.csq.lock); return -EBUSY; } @@ -216,12 +303,11 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) * which will be use for hardware to write back */ ntc = hw->cmq.csq.next_to_use; - opcode = le16_to_cpu(desc[0].opcode); while (handle < num) { desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use]; *desc_to_use = desc[handle]; (hw->cmq.csq.next_to_use)++; - if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num) + if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num) hw->cmq.csq.next_to_use = 0; handle++; } @@ -244,31 +330,10 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) } while (timeout < hw->cmq.tx_timeout); } - if (!complete) { - retval = -EAGAIN; - } else { - handle = 0; - while (handle < num) { - /* Get the result of hardware write back */ - desc_to_use = &hw->cmq.csq.desc[ntc]; - desc[handle] = *desc_to_use; - - if (likely(!hclge_is_special_opcode(opcode))) - desc_ret = le16_to_cpu(desc[handle].retval); - else - desc_ret = le16_to_cpu(desc[0].retval); - - if (desc_ret == HCLGE_CMD_EXEC_SUCCESS) - retval = 0; - else - retval = -EIO; - hw->cmq.last_status = desc_ret; - ntc++; - handle++; - if (ntc == hw->cmq.csq.desc_num) - ntc = 0; - } - } + if (!complete) + retval = -EBADE; + else + retval = hclge_cmd_check_retval(hw, desc, num, ntc); /* Clean the command send queue */ handle = hclge_cmd_csq_clean(hw); @@ -304,6 +369,15 @@ int hclge_cmd_queue_init(struct hclge_dev *hdev) { int ret; + /* Setup the lock for command queue */ + spin_lock_init(&hdev->hw.cmq.csq.lock); + spin_lock_init(&hdev->hw.cmq.crq.lock); + + /* clear up all command register, + * in case there are some residual values + */ + hclge_cmd_clear_regs(&hdev->hw); + /* Setup the queue entries for use cmd queue */ hdev->hw.cmq.csq.desc_num = HCLGE_NIC_CMQ_DESC_NUM; hdev->hw.cmq.crq.desc_num = HCLGE_NIC_CMQ_DESC_NUM; @@ -332,45 +406,107 @@ int hclge_cmd_queue_init(struct hclge_dev *hdev) return ret; } +/* ask the firmware to enable some features, driver can work without it. */ +static int hclge_firmware_compat_config(struct hclge_dev *hdev, bool en) +{ + struct hclge_firmware_compat_cmd *req; + struct hclge_desc desc; + u32 compat = 0; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_COMPAT_CFG, false); + + if (en) { + req = (struct hclge_firmware_compat_cmd *)desc.data; + + hnae3_set_bit(compat, HCLGE_LINK_EVENT_REPORT_EN_B, 1); + hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1); + req->compat = cpu_to_le32(compat); + } + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + int hclge_cmd_init(struct hclge_dev *hdev) { u32 version; int ret; + spin_lock_bh(&hdev->hw.cmq.csq.lock); + spin_lock(&hdev->hw.cmq.crq.lock); + hdev->hw.cmq.csq.next_to_clean = 0; hdev->hw.cmq.csq.next_to_use = 0; hdev->hw.cmq.crq.next_to_clean = 0; hdev->hw.cmq.crq.next_to_use = 0; - /* Setup the lock for command queue */ - spin_lock_init(&hdev->hw.cmq.csq.lock); - spin_lock_init(&hdev->hw.cmq.crq.lock); - hclge_cmd_init_regs(&hdev->hw); + + spin_unlock(&hdev->hw.cmq.crq.lock); + spin_unlock_bh(&hdev->hw.cmq.csq.lock); + clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + /* Check if there is new reset pending, because the higher level + * reset may happen when lower level reset is being processed. + */ + if ((hclge_is_reset_pending(hdev))) { + dev_err(&hdev->pdev->dev, + "failed to init cmd since reset %#lx pending\n", + hdev->reset_pending); + ret = -EBUSY; + goto err_cmd_init; + } + ret = hclge_cmd_query_firmware_version(&hdev->hw, &version); if (ret) { dev_err(&hdev->pdev->dev, "firmware version query failed %d\n", ret); - return ret; + goto err_cmd_init; } hdev->fw_version = version; - dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version); + dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n", + hnae3_get_field(version, HNAE3_FW_VERSION_BYTE3_MASK, + HNAE3_FW_VERSION_BYTE3_SHIFT), + hnae3_get_field(version, HNAE3_FW_VERSION_BYTE2_MASK, + HNAE3_FW_VERSION_BYTE2_SHIFT), + hnae3_get_field(version, HNAE3_FW_VERSION_BYTE1_MASK, + HNAE3_FW_VERSION_BYTE1_SHIFT), + hnae3_get_field(version, HNAE3_FW_VERSION_BYTE0_MASK, + HNAE3_FW_VERSION_BYTE0_SHIFT)); + + /* ask the firmware to enable some features, driver can work without + * it. + */ + ret = hclge_firmware_compat_config(hdev, true); + if (ret) + dev_warn(&hdev->pdev->dev, + "Firmware compatible features not enabled(%d).\n", + ret); return 0; -} -static void hclge_destroy_queue(struct hclge_cmq_ring *ring) -{ - spin_lock(&ring->lock); - hclge_free_cmd_desc(ring); - spin_unlock(&ring->lock); +err_cmd_init: + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + + return ret; } -void hclge_destroy_cmd_queue(struct hclge_hw *hw) +void hclge_cmd_uninit(struct hclge_dev *hdev) { - hclge_destroy_queue(&hw->cmq.csq); - hclge_destroy_queue(&hw->cmq.crq); + hclge_firmware_compat_config(hdev, false); + + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + /* wait to ensure that the firmware completes the possible left + * over commands. + */ + msleep(HCLGE_CMDQ_CLEAR_WAIT_TIME); + spin_lock_bh(&hdev->hw.cmq.csq.lock); + spin_lock(&hdev->hw.cmq.crq.lock); + hclge_cmd_clear_regs(&hdev->hw); + spin_unlock(&hdev->hw.cmq.crq.lock); + spin_unlock_bh(&hdev->hw.cmq.csq.lock); + + hclge_free_cmd_desc(&hdev->hw.cmq.csq); + hclge_free_cmd_desc(&hdev->hw.cmq.crq); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 821d4c2f84bd3e7415a3a83c9de566fcea9ac491..9c49d2760ab5272ce2f05dfbbd2bf0e98f30f474 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -3,10 +3,14 @@ #ifndef __HCLGE_CMD_H #define __HCLGE_CMD_H +#include #include #include +#include "hnae3.h" #define HCLGE_CMDQ_TX_TIMEOUT 30000 +#define HCLGE_CMDQ_CLEAR_WAIT_TIME 200 +#define HCLGE_DESC_DATA_LEN 6 struct hclge_dev; struct hclge_desc { @@ -18,7 +22,7 @@ struct hclge_desc { __le16 flag; __le16 retval; __le16 rsv; - __le32 data[6]; + __le32 data[HCLGE_DESC_DATA_LEN]; }; struct hclge_cmq_ring { @@ -39,8 +43,16 @@ struct hclge_cmq_ring { enum hclge_cmd_return_status { HCLGE_CMD_EXEC_SUCCESS = 0, HCLGE_CMD_NO_AUTH = 1, - HCLGE_CMD_NOT_EXEC = 2, + HCLGE_CMD_NOT_SUPPORTED = 2, HCLGE_CMD_QUEUE_FULL = 3, + HCLGE_CMD_NEXT_ERR = 4, + HCLGE_CMD_UNEXE_ERR = 5, + HCLGE_CMD_PARA_ERR = 6, + HCLGE_CMD_RESULT_ERR = 7, + HCLGE_CMD_TIMEOUT = 8, + HCLGE_CMD_HILINK_ERR = 9, + HCLGE_CMD_QUEUE_ILLEGAL = 10, + HCLGE_CMD_INVALID = 11, }; enum hclge_cmd_status { @@ -53,6 +65,7 @@ enum hclge_cmd_status { struct hclge_misc_vector { u8 __iomem *addr; int vector_irq; + char name[HNAE3_INT_NAME_LEN]; }; struct hclge_cmq { @@ -78,23 +91,46 @@ enum hclge_opcode_type { HCLGE_OPC_QUERY_PF_RSRC = 0x0023, HCLGE_OPC_QUERY_VF_RSRC = 0x0024, HCLGE_OPC_GET_CFG_PARAM = 0x0025, + HCLGE_OPC_PF_RST_DONE = 0x0026, + HCLGE_OPC_QUERY_VF_RST_RDY = 0x0027, HCLGE_OPC_STATS_64_BIT = 0x0030, HCLGE_OPC_STATS_32_BIT = 0x0031, HCLGE_OPC_STATS_MAC = 0x0032, + HCLGE_OPC_QUERY_MAC_REG_NUM = 0x0033, + HCLGE_OPC_STATS_MAC_ALL = 0x0034, HCLGE_OPC_QUERY_REG_NUM = 0x0040, HCLGE_OPC_QUERY_32_BIT_REG = 0x0041, HCLGE_OPC_QUERY_64_BIT_REG = 0x0042, + HCLGE_OPC_DFX_BD_NUM = 0x0043, + HCLGE_OPC_DFX_BIOS_COMMON_REG = 0x0044, + HCLGE_OPC_DFX_SSU_REG_0 = 0x0045, + HCLGE_OPC_DFX_SSU_REG_1 = 0x0046, + HCLGE_OPC_DFX_IGU_EGU_REG = 0x0047, + HCLGE_OPC_DFX_RPU_REG_0 = 0x0048, + HCLGE_OPC_DFX_RPU_REG_1 = 0x0049, + HCLGE_OPC_DFX_NCSI_REG = 0x004A, + HCLGE_OPC_DFX_RTC_REG = 0x004B, + HCLGE_OPC_DFX_PPP_REG = 0x004C, + HCLGE_OPC_DFX_RCB_REG = 0x004D, + HCLGE_OPC_DFX_TQP_REG = 0x004E, + HCLGE_OPC_DFX_SSU_REG_2 = 0x004F, + HCLGE_OPC_DFX_QUERY_CHIP_CAP = 0x0050, /* MAC command */ HCLGE_OPC_CONFIG_MAC_MODE = 0x0301, HCLGE_OPC_CONFIG_AN_MODE = 0x0304, - HCLGE_OPC_QUERY_AN_RESULT = 0x0306, HCLGE_OPC_QUERY_LINK_STATUS = 0x0307, HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308, HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309, + HCLGE_OPC_QUERY_MAC_TNL_INT = 0x0310, + HCLGE_OPC_MAC_TNL_INT_EN = 0x0311, + HCLGE_OPC_CLEAR_MAC_TNL_INT = 0x0312, HCLGE_OPC_SERDES_LOOPBACK = 0x0315, + HCLGE_OPC_CONFIG_FEC_MODE = 0x031A, + /* check sum command */ + HCLGE_OPC_CFG_CHECKSUM_EN = 0x0601, /* PFC/Pause commands */ HCLGE_OPC_CFG_MAC_PAUSE_EN = 0x0701, @@ -126,6 +162,16 @@ enum hclge_opcode_type { HCLGE_OPC_TM_PRI_SCH_MODE_CFG = 0x0813, HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814, HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815, + HCLGE_OPC_ETS_TC_WEIGHT = 0x0843, + HCLGE_OPC_QSET_DFX_STS = 0x0844, + HCLGE_OPC_PRI_DFX_STS = 0x0845, + HCLGE_OPC_PG_DFX_STS = 0x0846, + HCLGE_OPC_PORT_DFX_STS = 0x0847, + HCLGE_OPC_SCH_NQ_CNT = 0x0848, + HCLGE_OPC_SCH_RQ_CNT = 0x0849, + HCLGE_OPC_TM_INTERNAL_STS = 0x0850, + HCLGE_OPC_TM_INTERNAL_CNT = 0x0851, + HCLGE_OPC_TM_INTERNAL_STS_1 = 0x0852, /* Packet buffer allocate commands */ HCLGE_OPC_TX_BUFF_ALLOC = 0x0901, @@ -142,6 +188,7 @@ enum hclge_opcode_type { HCLGE_OPC_CFG_TX_QUEUE = 0x0B01, HCLGE_OPC_QUERY_TX_POINTER = 0x0B02, HCLGE_OPC_QUERY_TX_STATUS = 0x0B03, + HCLGE_OPC_TQP_TX_QUEUE_TC = 0x0B04, HCLGE_OPC_CFG_RX_QUEUE = 0x0B11, HCLGE_OPC_QUERY_RX_POINTER = 0x0B12, HCLGE_OPC_QUERY_RX_STATUS = 0x0B13, @@ -150,8 +197,12 @@ enum hclge_opcode_type { HCLGE_OPC_CFG_COM_TQP_QUEUE = 0x0B20, HCLGE_OPC_RESET_TQP_QUEUE = 0x0B22, + /* PPU commands */ + HCLGE_OPC_PPU_PF_OTHER_INT_DFX = 0x0B4A, + /* TSO command */ HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01, + HCLGE_OPC_GRO_GENERIC_CONFIG = 0x0C10, /* RSS commands */ HCLGE_OPC_RSS_GENERIC_CONFIG = 0x0D01, @@ -175,28 +226,32 @@ enum hclge_opcode_type { HCLGE_OPC_MAC_VLAN_REMOVE = 0x1001, HCLGE_OPC_MAC_VLAN_TYPE_ID = 0x1002, HCLGE_OPC_MAC_VLAN_INSERT = 0x1003, + HCLGE_OPC_MAC_VLAN_ALLOCATE = 0x1004, HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010, HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011, - HCLGE_OPC_MAC_VLAN_MASK_SET = 0x1012, - /* Multicast linear table commands */ - HCLGE_OPC_MTA_MAC_MODE_CFG = 0x1020, - HCLGE_OPC_MTA_MAC_FUNC_CFG = 0x1021, - HCLGE_OPC_MTA_TBL_ITEM_CFG = 0x1022, - HCLGE_OPC_MTA_TBL_ITEM_QUERY = 0x1023, + /* MAC VLAN commands */ + HCLGE_OPC_MAC_VLAN_SWITCH_PARAM = 0x1033, /* VLAN commands */ HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100, HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101, HCLGE_OPC_VLAN_FILTER_VF_CFG = 0x1102, + /* Flow Director commands */ + HCLGE_OPC_FD_MODE_CTRL = 0x1200, + HCLGE_OPC_FD_GET_ALLOCATION = 0x1201, + HCLGE_OPC_FD_KEY_CONFIG = 0x1202, + HCLGE_OPC_FD_TCAM_OP = 0x1203, + HCLGE_OPC_FD_AD_OP = 0x1204, + /* MDIO command */ HCLGE_OPC_MDIO_CONFIG = 0x1900, /* QCN commands */ HCLGE_OPC_QCN_MOD_CFG = 0x1A01, HCLGE_OPC_QCN_GRP_TMPLT_CFG = 0x1A02, - HCLGE_OPC_QCN_SHAPPING_IR_CFG = 0x1A03, + HCLGE_OPC_QCN_SHAPPING_CFG = 0x1A03, HCLGE_OPC_QCN_SHAPPING_BS_CFG = 0x1A04, HCLGE_OPC_QCN_QSET_LINK_CFG = 0x1A05, HCLGE_OPC_QCN_RP_STATUS_GET = 0x1A06, @@ -208,6 +263,51 @@ enum hclge_opcode_type { /* Led command */ HCLGE_OPC_LED_STATUS_CFG = 0xB000, + + /* clear hardware resource command */ + HCLGE_OPC_CLEAR_HW_RESOURCE = 0x700B, + + /* NCL config command */ + HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011, + + /* IMP stats command */ + HCLGE_OPC_IMP_STATS_BD = 0x7012, + HCLGE_OPC_IMP_STATS_INFO = 0x7013, + HCLGE_OPC_IMP_COMPAT_CFG = 0x701A, + + /* SFP command */ + HCLGE_OPC_GET_SFP_EEPROM = 0x7100, + HCLGE_OPC_GET_SFP_EXIST = 0x7101, + HCLGE_OPC_GET_SFP_INFO = 0x7104, + + /* Error INT commands */ + HCLGE_MAC_COMMON_INT_EN = 0x030E, + HCLGE_TM_SCH_ECC_INT_EN = 0x0829, + HCLGE_SSU_ECC_INT_CMD = 0x0989, + HCLGE_SSU_COMMON_INT_CMD = 0x098C, + HCLGE_PPU_MPF_ECC_INT_CMD = 0x0B40, + HCLGE_PPU_MPF_OTHER_INT_CMD = 0x0B41, + HCLGE_PPU_PF_OTHER_INT_CMD = 0x0B42, + HCLGE_COMMON_ECC_INT_CFG = 0x1505, + HCLGE_QUERY_RAS_INT_STS_BD_NUM = 0x1510, + HCLGE_QUERY_CLEAR_MPF_RAS_INT = 0x1511, + HCLGE_QUERY_CLEAR_PF_RAS_INT = 0x1512, + HCLGE_QUERY_MSIX_INT_STS_BD_NUM = 0x1513, + HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT = 0x1514, + HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT = 0x1515, + HCLGE_CONFIG_ROCEE_RAS_INT_EN = 0x1580, + HCLGE_QUERY_CLEAR_ROCEE_RAS_INT = 0x1581, + HCLGE_ROCEE_PF_RAS_INT_CMD = 0x1584, + HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD = 0x1585, + HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD = 0x1586, + HCLGE_IGU_EGU_TNL_INT_EN = 0x1803, + HCLGE_IGU_COMMON_INT_EN = 0x1806, + HCLGE_TM_QCN_MEM_INT_CFG = 0x1A14, + HCLGE_PPP_CMD0_INT_CMD = 0x2100, + HCLGE_PPP_CMD1_INT_CMD = 0x2101, + HCLGE_PPP_MAC_VLAN_IDX_RD = 0x2104, + HCLGE_MAC_ETHERTYPE_IDX_RD = 0x2105, + HCLGE_NCSI_INT_EN = 0x2401, }; #define HCLGE_TQP_REG_OFFSET 0x80000 @@ -255,16 +355,16 @@ struct hclge_ctrl_vector_chain_cmd { u8 rsv; }; -#define HCLGE_TC_NUM 8 +#define HCLGE_MAX_TC_NUM 8 #define HCLGE_TC0_PRI_BUF_EN_B 15 /* Bit 15 indicate enable or not */ #define HCLGE_BUF_UNIT_S 7 /* Buf size is united by 128 bytes */ struct hclge_tx_buff_alloc_cmd { - __le16 tx_pkt_buff[HCLGE_TC_NUM]; + __le16 tx_pkt_buff[HCLGE_MAX_TC_NUM]; u8 tx_buff_rsv[8]; }; struct hclge_rx_priv_buff_cmd { - __le16 buf_num[HCLGE_TC_NUM]; + __le16 buf_num[HCLGE_MAX_TC_NUM]; __le16 shared_buf; u8 rsv[6]; }; @@ -310,7 +410,6 @@ struct hclge_priv_buf { u32 enable; /* Enable TC private buffer or not */ }; -#define HCLGE_MAX_TC_NUM 8 struct hclge_shared_buf { struct hclge_waterline self; struct hclge_tc_thrd tc_thrd[HCLGE_MAX_TC_NUM]; @@ -343,8 +442,10 @@ struct hclge_rx_pkt_buf_cmd { #define HCLGE_PF_MAC_NUM_MASK 0x3 #define HCLGE_PF_STATE_MAIN BIT(HCLGE_PF_STATE_MAIN_B) #define HCLGE_PF_STATE_DONE BIT(HCLGE_PF_STATE_DONE_B) +#define HCLGE_VF_RST_STATUS_CMD 4 + struct hclge_func_status_cmd { - __le32 vf_rst_state[4]; + __le32 vf_rst_state[HCLGE_VF_RST_STATUS_CMD]; u8 pf_state; u8 mac_id; u8 rsv1; @@ -365,7 +466,9 @@ struct hclge_pf_res_cmd { #define HCLGE_PF_VEC_NUM_M GENMASK(7, 0) __le16 pf_intr_vector_number; __le16 pf_own_fun_number; - __le32 rsv[3]; + __le16 tx_buf_size; + __le16 dv_buf_size; + __le32 rsv[2]; }; #define HCLGE_CFG_OFFSET_S 0 @@ -395,11 +498,17 @@ struct hclge_pf_res_cmd { #define HCLGE_CFG_RSS_SIZE_M GENMASK(31, 24) #define HCLGE_CFG_SPEED_ABILITY_S 0 #define HCLGE_CFG_SPEED_ABILITY_M GENMASK(7, 0) +#define HCLGE_CFG_VLAN_FLTR_CAP_S 8 +#define HCLGE_CFG_VLAN_FLTR_CAP_M GENMASK(9, 8) +#define HCLGE_CFG_UMV_TBL_SPACE_S 16 +#define HCLGE_CFG_UMV_TBL_SPACE_M GENMASK(31, 16) + +#define HCLGE_CFG_CMD_CNT 4 struct hclge_cfg_param_cmd { __le32 offset; __le32 rsv; - __le32 param[4]; + __le32 param[HCLGE_CFG_CMD_CNT]; }; #define HCLGE_MAC_MODE 0x0 @@ -503,6 +612,12 @@ struct hclge_config_mac_mode_cmd { u8 rsv[20]; }; +struct hclge_pf_rst_sync_cmd { +#define HCLGE_PF_RST_ALL_VF_RDY_B 0 + u8 all_vf_ready; + u8 rsv[23]; +}; + #define HCLGE_CFG_SPEED_S 0 #define HCLGE_CFG_SPEED_M GENMASK(5, 0) @@ -517,20 +632,6 @@ struct hclge_config_mac_speed_dup_cmd { u8 rsv[22]; }; -#define HCLGE_QUERY_SPEED_S 3 -#define HCLGE_QUERY_AN_B 0 -#define HCLGE_QUERY_DUPLEX_B 2 - -#define HCLGE_QUERY_SPEED_M GENMASK(4, 0) -#define HCLGE_QUERY_AN_M BIT(HCLGE_QUERY_AN_B) -#define HCLGE_QUERY_DUPLEX_M BIT(HCLGE_QUERY_DUPLEX_B) - -struct hclge_query_an_speed_dup_cmd { - u8 an_syn_dup_speed; - u8 pause; - u8 rsv[23]; -}; - #define HCLGE_RING_ID_MASK GENMASK(9, 0) #define HCLGE_TQP_ENABLE_B 0 @@ -547,6 +648,32 @@ struct hclge_config_auto_neg_cmd { u8 rsv[20]; }; +struct hclge_sfp_info_cmd { + __le32 speed; + u8 query_type; /* 0: sfp speed, 1: active speed */ + u8 active_fec; + u8 autoneg; /* autoneg state */ + u8 autoneg_ability; /* whether support autoneg */ + __le32 speed_ability; /* speed ability for current media */ + __le32 module_type; + u8 rsv[8]; +}; + +#define HCLGE_MAC_CFG_FEC_AUTO_EN_B 0 +#define HCLGE_MAC_CFG_FEC_MODE_S 1 +#define HCLGE_MAC_CFG_FEC_MODE_M GENMASK(3, 1) +#define HCLGE_MAC_CFG_FEC_SET_DEF_B 0 +#define HCLGE_MAC_CFG_FEC_CLR_DEF_B 1 + +#define HCLGE_MAC_FEC_OFF 0 +#define HCLGE_MAC_FEC_BASER 1 +#define HCLGE_MAC_FEC_RS 2 +struct hclge_config_fec_cmd { + u8 fec_mode; + u8 default_config; + u8 rsv[22]; +}; + #define HCLGE_MAC_UPLINK_PORT 0x100 struct hclge_config_max_frm_size_cmd { @@ -562,6 +689,11 @@ enum hclge_mac_vlan_tbl_opcode { HCLGE_MAC_VLAN_LKUP, /* Lookup a entry through mac_vlan key */ }; +enum hclge_mac_vlan_add_resp_code { + HCLGE_ADD_UC_OVERFLOW = 2, /* ADD failed for UC overflow */ + HCLGE_ADD_MC_OVERFLOW, /* ADD failed for MC overflow */ +}; + #define HCLGE_MAC_VLAN_BIT0_EN_B 0 #define HCLGE_MAC_VLAN_BIT1_EN_B 1 #define HCLGE_MAC_EPORT_SW_EN_B 12 @@ -584,13 +716,12 @@ struct hclge_mac_vlan_tbl_entry_cmd { u8 rsv2[6]; }; -#define HCLGE_VLAN_MASK_EN_B 0 -struct hclge_mac_vlan_mask_entry_cmd { - u8 rsv0[2]; - u8 vlan_mask; - u8 rsv1; - u8 mac_mask[6]; - u8 rsv2[14]; +#define HCLGE_UMV_SPC_ALC_B 0 +struct hclge_umv_spc_alc_cmd { + u8 allocate; + u8 rsv1[3]; + __le32 space_size; + u8 rsv2[16]; }; #define HCLGE_MAC_MGR_MASK_VLAN_B BIT(0) @@ -602,8 +733,7 @@ struct hclge_mac_mgr_tbl_entry_cmd { u8 flags; u8 resp_code; __le16 vlan_tag; - __le32 mac_addr_hi32; - __le16 mac_addr_lo16; + u8 mac_addr[ETH_ALEN]; __le16 rsv1; __le16 ethter_type; __le16 egress_port; @@ -615,75 +745,85 @@ struct hclge_mac_mgr_tbl_entry_cmd { u8 rsv3[2]; }; -#define HCLGE_CFG_MTA_MAC_SEL_S 0 -#define HCLGE_CFG_MTA_MAC_SEL_M GENMASK(1, 0) -#define HCLGE_CFG_MTA_MAC_EN_B 7 -struct hclge_mta_filter_mode_cmd { - u8 dmac_sel_en; /* Use lowest 2 bit as sel_mode, bit 7 as enable */ - u8 rsv[23]; -}; - -#define HCLGE_CFG_FUNC_MTA_ACCEPT_B 0 -struct hclge_cfg_func_mta_filter_cmd { - u8 accept; /* Only used lowest 1 bit */ - u8 function_id; - u8 rsv[22]; -}; - -#define HCLGE_CFG_MTA_ITEM_ACCEPT_B 0 -#define HCLGE_CFG_MTA_ITEM_IDX_S 0 -#define HCLGE_CFG_MTA_ITEM_IDX_M GENMASK(11, 0) -struct hclge_cfg_func_mta_item_cmd { - __le16 item_idx; /* Only used lowest 12 bit */ - u8 accept; /* Only used lowest 1 bit */ - u8 rsv[21]; -}; - -struct hclge_mac_vlan_add_cmd { - __le16 flags; - __le16 mac_addr_hi16; - __le32 mac_addr_lo32; - __le32 mac_addr_msk_hi32; - __le16 mac_addr_msk_lo16; +#pragma pack(1) +struct hclge_mac_vlan_idx_rd_cmd { + u8 rsv0; + u8 resp_code; __le16 vlan_tag; - __le16 ingress_port; + u8 mac_addr[ETH_ALEN]; + __le16 port; + u8 entry_type; + u8 mc_mac_en; __le16 egress_port; - u8 rsv[4]; + __le16 egress_queue; + __le16 vsi; + __le32 index; }; -#define HNS3_MAC_VLAN_CFG_FLAG_BIT 0 -struct hclge_mac_vlan_remove_cmd { - __le16 flags; - __le16 mac_addr_hi16; - __le32 mac_addr_lo32; - __le32 mac_addr_msk_hi32; - __le16 mac_addr_msk_lo16; - __le16 vlan_tag; - __le16 ingress_port; - __le16 egress_port; - u8 rsv[4]; -}; +#pragma pack() struct hclge_vlan_filter_ctrl_cmd { u8 vlan_type; u8 vlan_fe; - u8 rsv[22]; + u8 rsv1[2]; + u8 vf_id; + u8 rsv2[19]; }; +#define HCLGE_VLAN_ID_OFFSET_STEP 160 +#define HCLGE_VLAN_BYTE_SIZE 8 +#define HCLGE_VLAN_OFFSET_BITMAP \ + (HCLGE_VLAN_ID_OFFSET_STEP / HCLGE_VLAN_BYTE_SIZE) + struct hclge_vlan_filter_pf_cfg_cmd { u8 vlan_offset; u8 vlan_cfg; u8 rsv[2]; - u8 vlan_offset_bitmap[20]; + u8 vlan_offset_bitmap[HCLGE_VLAN_OFFSET_BITMAP]; }; +#define HCLGE_MAX_VF_BYTES 16 + struct hclge_vlan_filter_vf_cfg_cmd { __le16 vlan_id; u8 resp_code; u8 rsv; u8 vlan_cfg; u8 rsv1[3]; - u8 vf_bitmap[16]; + u8 vf_bitmap[HCLGE_MAX_VF_BYTES]; +}; + +#define HCLGE_INGRESS_BYPASS_B 0 +struct hclge_port_vlan_filter_bypass_cmd { + u8 bypass_state; + u8 rsv1[3]; + u8 vf_id; + u8 rsv2[19]; +}; + +#define HCLGE_SWITCH_ANTI_SPOOF_B 0U +#define HCLGE_SWITCH_ALW_LPBK_B 1U +#define HCLGE_SWITCH_ALW_LCL_LPBK_B 2U +#define HCLGE_SWITCH_ALW_DST_OVRD_B 3U +#define HCLGE_SWITCH_NO_MASK 0x0 +#define HCLGE_SWITCH_ANTI_SPOOF_MASK 0xFE +#define HCLGE_SWITCH_ALW_LPBK_MASK 0xFD +#define HCLGE_SWITCH_ALW_LCL_LPBK_MASK 0xFB +#define HCLGE_SWITCH_LW_DST_OVRD_MASK 0xF7 + +struct hclge_mac_vlan_switch_cmd { + u8 roce_sel; + u8 rsv1[3]; + __le32 func_id; + u8 switch_param; + u8 rsv2[3]; + u8 param_mask; + u8 rsv3[11]; +}; + +enum hclge_mac_vlan_cfg_sel { + HCLGE_MAC_VLAN_NIC_SEL = 0, + HCLGE_MAC_VLAN_ROCE_SEL, }; #define HCLGE_ACCEPT_TAG1_B 0 @@ -693,6 +833,7 @@ struct hclge_vlan_filter_vf_cfg_cmd { #define HCLGE_CFG_NIC_ROCE_SEL_B 4 #define HCLGE_ACCEPT_TAG2_B 5 #define HCLGE_ACCEPT_UNTAG2_B 6 +#define HCLGE_VF_NUM_PER_BYTE 8 struct hclge_vport_vtag_tx_cfg_cmd { u8 vport_vlan_cfg; @@ -700,7 +841,7 @@ struct hclge_vport_vtag_tx_cfg_cmd { u8 rsv1[2]; __le16 def_vlan_tag1; __le16 def_vlan_tag2; - u8 vf_bitmap[8]; + u8 vf_bitmap[HCLGE_VF_NUM_PER_BYTE]; u8 rsv2[8]; }; @@ -712,7 +853,7 @@ struct hclge_vport_vtag_rx_cfg_cmd { u8 vport_vlan_cfg; u8 vf_offset; u8 rsv1[6]; - u8 vf_bitmap[8]; + u8 vf_bitmap[HCLGE_VF_NUM_PER_BYTE]; u8 rsv2[8]; }; @@ -746,6 +887,24 @@ struct hclge_cfg_tx_queue_pointer_cmd { u8 rsv[14]; }; +#pragma pack(1) +struct hclge_mac_ethertype_idx_rd_cmd { + u8 flags; + u8 resp_code; + __le16 vlan_tag; + u8 mac_addr[ETH_ALEN]; + __le16 index; + __le16 ethter_type; + __le16 egress_port; + __le16 egress_queue; + __le16 rev0; + u8 i_port_bitmap; + u8 i_port_direction; + u8 rev1[2]; +}; + +#pragma pack() + #define HCLGE_TSO_MSS_MIN_S 0 #define HCLGE_TSO_MSS_MIN_M GENMASK(13, 0) @@ -758,6 +917,12 @@ struct hclge_cfg_tso_status_cmd { u8 rsv[20]; }; +#define HCLGE_GRO_EN_B 0 +struct hclge_cfg_gro_status_cmd { + __le16 gro_en; + u8 rsv[22]; +}; + #define HCLGE_TSO_MSS_MIN 256 #define HCLGE_TSO_MSS_MAX 9668 @@ -771,13 +936,26 @@ struct hclge_reset_tqp_queue_cmd { #define HCLGE_CFG_RESET_MAC_B 3 #define HCLGE_CFG_RESET_FUNC_B 7 +#define HCLGE_CFG_RESET_RCB_B 1 struct hclge_reset_cmd { u8 mac_func_reset; u8 fun_reset_vfid; - u8 rsv[22]; + u8 fun_reset_rcb; + __le16 fun_reset_rcb_vqid_start; + __le16 fun_reset_rcb_vqid_num; + u8 fun_reset_rcb_return_status; + u8 rsv[16]; +}; + +#define HCLGE_PF_RESET_DONE_BIT BIT(0) + +struct hclge_pf_rst_done_cmd { + u8 pf_rst_done; + u8 rsv[23]; }; #define HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B BIT(0) +#define HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B BIT(2) #define HCLGE_CMD_SERDES_DONE_B BIT(0) #define HCLGE_CMD_SERDES_SUCCESS_B BIT(1) struct hclge_serdes_lb_cmd { @@ -791,6 +969,7 @@ struct hclge_serdes_lb_cmd { #define HCLGE_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */ #define HCLGE_DEFAULT_DV 0xA000 /* 40k byte */ #define HCLGE_DEFAULT_NON_DCB_DV 0x7800 /* 30K byte */ +#define HCLGE_NON_DCB_ADDITIONAL_BUF 0x1400 /* 5120 byte */ #define HCLGE_TYPE_CRQ 0 #define HCLGE_TYPE_CSQ 1 @@ -804,8 +983,11 @@ struct hclge_serdes_lb_cmd { #define HCLGE_NIC_CRQ_DEPTH_REG 0x27020 #define HCLGE_NIC_CRQ_TAIL_REG 0x27024 #define HCLGE_NIC_CRQ_HEAD_REG 0x27028 -#define HCLGE_NIC_CMQ_EN_B 16 -#define HCLGE_NIC_CMQ_ENABLE BIT(HCLGE_NIC_CMQ_EN_B) + +/* this bit indicates that the driver is ready for hardware reset */ +#define HCLGE_NIC_SW_RST_RDY_B 16 +#define HCLGE_NIC_SW_RST_RDY BIT(HCLGE_NIC_SW_RST_RDY_B) + #define HCLGE_NIC_CMQ_DESC_NUM 1024 #define HCLGE_NIC_CMQ_DESC_NUM_S 3 @@ -818,6 +1000,115 @@ struct hclge_set_led_state_cmd { u8 rsv2[20]; }; +struct hclge_get_fd_mode_cmd { + u8 mode; + u8 enable; + u8 rsv[22]; +}; + +struct hclge_get_fd_allocation_cmd { + __le32 stage1_entry_num; + __le32 stage2_entry_num; + __le16 stage1_counter_num; + __le16 stage2_counter_num; + u8 rsv[12]; +}; + +struct hclge_set_fd_key_config_cmd { + u8 stage; + u8 key_select; + u8 inner_sipv6_word_en; + u8 inner_dipv6_word_en; + u8 outer_sipv6_word_en; + u8 outer_dipv6_word_en; + u8 rsv1[2]; + __le32 tuple_mask; + __le32 meta_data_mask; + u8 rsv2[8]; +}; + +#define HCLGE_FD_EPORT_SW_EN_B 0 +struct hclge_fd_tcam_config_1_cmd { + u8 stage; + u8 xy_sel; + u8 port_info; + u8 rsv1[1]; + __le32 index; + u8 entry_vld; + u8 rsv2[7]; + u8 tcam_data[8]; +}; + +struct hclge_fd_tcam_config_2_cmd { + u8 tcam_data[24]; +}; + +struct hclge_fd_tcam_config_3_cmd { + u8 tcam_data[20]; + u8 rsv[4]; +}; + +#define HCLGE_FD_AD_DROP_B 0 +#define HCLGE_FD_AD_DIRECT_QID_B 1 +#define HCLGE_FD_AD_QID_S 2 +#define HCLGE_FD_AD_QID_M GENMASK(11, 2) +#define HCLGE_FD_AD_USE_COUNTER_B 12 +#define HCLGE_FD_AD_COUNTER_NUM_S 13 +#define HCLGE_FD_AD_COUNTER_NUM_M GENMASK(20, 13) +#define HCLGE_FD_AD_NXT_STEP_B 20 +#define HCLGE_FD_AD_NXT_KEY_S 21 +#define HCLGE_FD_AD_NXT_KEY_M GENMASK(25, 21) +#define HCLGE_FD_AD_WR_RULE_ID_B 0 +#define HCLGE_FD_AD_RULE_ID_S 1 +#define HCLGE_FD_AD_RULE_ID_M GENMASK(12, 1) + +struct hclge_fd_ad_config_cmd { + u8 stage; + u8 rsv1[3]; + __le32 index; + __le64 ad_data; + u8 rsv2[8]; +}; + +struct hclge_get_imp_bd_cmd { + __le32 bd_num; + u8 rsv[20]; +}; + +struct hclge_query_ppu_pf_other_int_dfx_cmd { + __le16 over_8bd_no_fe_qid; + __le16 over_8bd_no_fe_vf_id; + __le16 tso_mss_cmp_min_err_qid; + __le16 tso_mss_cmp_min_err_vf_id; + __le16 tso_mss_cmp_max_err_qid; + __le16 tso_mss_cmp_max_err_vf_id; + __le16 tx_rd_fbd_poison_qid; + __le16 tx_rd_fbd_poison_vf_id; + __le16 rx_rd_fbd_poison_qid; + __le16 rx_rd_fbd_poison_vf_id; + u8 rsv[4]; +}; + +#define HCLGE_LINK_EVENT_REPORT_EN_B 0 +#define HCLGE_NCSI_ERROR_REPORT_EN_B 1 +struct hclge_firmware_compat_cmd { + __le32 compat; + u8 rsv[20]; +}; + +#define HCLGE_SFP_INFO_CMD_NUM 6 +#define HCLGE_SFP_INFO_BD0_LEN 20 +#define HCLGE_SFP_INFO_BDX_LEN 24 +#define HCLGE_SFP_INFO_MAX_LEN \ + (HCLGE_SFP_INFO_BD0_LEN + \ + (HCLGE_SFP_INFO_CMD_NUM - 1) * HCLGE_SFP_INFO_BDX_LEN) + +struct hclge_sfp_info_bd0_cmd { + __le16 offset; + __le16 read_len; + u8 data[HCLGE_SFP_INFO_BD0_LEN]; +}; + int hclge_cmd_init(struct hclge_dev *hdev); static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value) { @@ -845,14 +1136,11 @@ void hclge_cmd_setup_basic_desc(struct hclge_desc *desc, enum hclge_opcode_type opcode, bool is_read); void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read); -int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, - struct hclge_promisc_param *param); - enum hclge_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw, struct hclge_desc *desc); enum hclge_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw, struct hclge_desc *desc); -void hclge_destroy_cmd_queue(struct hclge_hw *hw); +void hclge_cmd_uninit(struct hclge_dev *hdev); int hclge_cmd_queue_init(struct hclge_dev *hdev); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c index f08ebb7caaaf5e359fe2ba159fde8e873204979d..636f8dd996aceb73882c85cc791ac8d4eea10111 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c @@ -2,6 +2,7 @@ // Copyright (c) 2016-2017 Hisilicon Limited. #include "hclge_main.h" +#include "hclge_dcb.h" #include "hclge_tm.h" #include "hnae3.h" @@ -35,7 +36,9 @@ static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev, } } - return hclge_tm_prio_tc_info_update(hdev, ets->prio_tc); + hclge_tm_prio_tc_info_update(hdev, ets->prio_tc); + + return 0; } static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev, @@ -70,24 +73,59 @@ static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets) return 0; } +static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc, + u8 *prio_tc) +{ + int i; + + if (num_tc > hdev->tc_max) { + dev_err(&hdev->pdev->dev, + "tc num checking failed, %u > tc_max(%u)\n", + num_tc, hdev->tc_max); + return -EINVAL; + } + + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { + if (prio_tc[i] >= num_tc) { + dev_err(&hdev->pdev->dev, + "prio_tc[%d] checking failed, %u >= num_tc(%u)\n", + i, prio_tc[i], num_tc); + return -EINVAL; + } + } + + if (num_tc > hdev->vport[0].alloc_tqps) { + dev_err(&hdev->pdev->dev, + "allocated tqp checking failed, %u > tqp(%u)\n", + num_tc, hdev->vport[0].alloc_tqps); + return -EINVAL; + } + + return 0; +} + static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets, u8 *tc, bool *changed) { + bool has_ets_tc = false; u32 total_ets_bw = 0; u8 max_tc = 0; + int ret; u8 i; - for (i = 0; i < HNAE3_MAX_TC; i++) { - if (ets->prio_tc[i] >= hdev->tc_max || - i >= hdev->tc_max) - return -EINVAL; - + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i]) *changed = true; if (ets->prio_tc[i] > max_tc) max_tc = ets->prio_tc[i]; + } + + ret = hclge_dcb_common_validate(hdev, max_tc + 1, ets->prio_tc); + if (ret) + return ret; + for (i = 0; i < hdev->tc_max; i++) { switch (ets->tc_tsa[i]) { case IEEE_8021QAZ_TSA_STRICT: if (hdev->tm_info.tc_info[i].tc_sch_mode != @@ -100,13 +138,14 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets, *changed = true; total_ets_bw += ets->tc_tx_bw[i]; - break; + has_ets_tc = true; + break; default: return -EINVAL; } } - if (total_ets_bw != BW_PERCENT) + if (has_ets_tc && total_ets_bw != BW_PERCENT) return -EINVAL; *tc = max_tc + 1; @@ -116,21 +155,15 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets, return 0; } -static int hclge_map_update(struct hnae3_handle *h) +static int hclge_map_update(struct hclge_dev *hdev) { - struct hclge_vport *vport = hclge_get_vport(h); - struct hclge_dev *hdev = vport->back; int ret; - ret = hclge_tm_map_cfg(hdev); + ret = hclge_tm_schd_setup_hw(hdev); if (ret) return ret; - ret = hclge_tm_schd_mode_hw(hdev); - if (ret) - return ret; - - ret = hclge_pause_setup_hw(hdev); + ret = hclge_pause_setup_hw(hdev, false); if (ret) return ret; @@ -143,32 +176,32 @@ static int hclge_map_update(struct hnae3_handle *h) return hclge_rss_init_hw(hdev); } -static int hclge_client_setup_tc(struct hclge_dev *hdev) +static int hclge_notify_down_uinit(struct hclge_dev *hdev) { - struct hclge_vport *vport = hdev->vport; - struct hnae3_client *client; - struct hnae3_handle *handle; int ret; - u32 i; - for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { - handle = &vport[i].nic; - client = handle->client; + ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) + return ret; + + return hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); +} - if (!client || !client->ops || !client->ops->setup_tc) - continue; +static int hclge_notify_init_up(struct hclge_dev *hdev) +{ + int ret; - ret = client->ops->setup_tc(handle, hdev->tm_info.num_tc); - if (ret) - return ret; - } + ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT); + if (ret) + return ret; - return 0; + return hclge_notify_client(hdev, HNAE3_UP_CLIENT); } static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets) { struct hclge_vport *vport = hclge_get_vport(h); + struct net_device *netdev = h->kinfo.netdev; struct hclge_dev *hdev = vport->back; bool map_changed = false; u8 num_tc = 0; @@ -182,67 +215,83 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets) if (ret) return ret; + if (map_changed) { + if (netif_msg_ifdown(h)) + netdev_info(netdev, "set ets\n"); + + ret = hclge_notify_down_uinit(hdev); + if (ret) + return ret; + } + hclge_tm_schd_info_update(hdev, num_tc); + if (num_tc > 1) + hdev->flag |= HCLGE_FLAG_DCB_ENABLE; + else + hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE; ret = hclge_ieee_ets_to_tm_info(hdev, ets); if (ret) - return ret; + goto err_out; if (map_changed) { - ret = hclge_client_setup_tc(hdev); + ret = hclge_map_update(hdev); + if (ret) + goto err_out; + + ret = hclge_notify_init_up(hdev); if (ret) return ret; } return hclge_tm_dwrr_cfg(hdev); + +err_out: + if (!map_changed) + return ret; + + (void)hclge_notify_init_up(hdev); + + return ret; } static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) { - u64 requests[HNAE3_MAX_TC], indications[HNAE3_MAX_TC]; struct hclge_vport *vport = hclge_get_vport(h); struct hclge_dev *hdev = vport->back; - u8 i, j, pfc_map, *prio_tc; int ret; memset(pfc, 0, sizeof(*pfc)); pfc->pfc_cap = hdev->pfc_max; - prio_tc = hdev->tm_info.prio_tc; - pfc_map = hdev->tm_info.hw_pfc_map; + pfc->pfc_en = hdev->tm_info.pfc_en; - /* Pfc setting is based on TC */ - for (i = 0; i < hdev->tm_info.num_tc; i++) { - for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) { - if ((prio_tc[j] == i) && (pfc_map & BIT(i))) - pfc->pfc_en |= BIT(j); - } - } - - ret = hclge_pfc_tx_stats_get(hdev, requests); - if (ret) + ret = hclge_mac_update_stats(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to update MAC stats, ret = %d.\n", ret); return ret; + } - ret = hclge_pfc_rx_stats_get(hdev, indications); - if (ret) - return ret; + hclge_pfc_tx_stats_get(hdev, pfc->requests); + hclge_pfc_rx_stats_get(hdev, pfc->indications); - for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { - pfc->requests[i] = requests[i]; - pfc->indications[i] = indications[i]; - } return 0; } static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) { struct hclge_vport *vport = hclge_get_vport(h); + struct net_device *netdev = h->kinfo.netdev; struct hclge_dev *hdev = vport->back; u8 i, j, pfc_map, *prio_tc; + int ret; - if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) || - hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE) + if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) return -EINVAL; + if (pfc->pfc_en == hdev->tm_info.pfc_en) + return 0; + prio_tc = hdev->tm_info.prio_tc; pfc_map = 0; @@ -255,12 +304,31 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) } } - if (pfc_map == hdev->tm_info.hw_pfc_map) - return 0; - hdev->tm_info.hw_pfc_map = pfc_map; + hdev->tm_info.pfc_en = pfc->pfc_en; + + if (netif_msg_ifdown(h)) + netdev_info(netdev, + "set pfc: pfc_en=0x%x, pfc_map=0x%x, num_tc=%u\n", + pfc->pfc_en, pfc_map, hdev->tm_info.num_tc); + + hclge_tm_pfc_info_update(hdev); + + ret = hclge_pause_setup_hw(hdev, false); + if (ret) + return ret; + + ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) + return ret; + + ret = hclge_buffer_alloc(hdev); + if (ret) { + hclge_notify_client(hdev, HNAE3_UP_CLIENT); + return ret; + } - return hclge_pause_setup_hw(hdev); + return hclge_notify_client(hdev, HNAE3_UP_CLIENT); } /* DCBX configuration */ @@ -278,8 +346,12 @@ static u8 hclge_getdcbx(struct hnae3_handle *h) static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode) { struct hclge_vport *vport = hclge_get_vport(h); + struct net_device *netdev = h->kinfo.netdev; struct hclge_dev *hdev = vport->back; + if (netif_msg_drv(h)) + netdev_info(netdev, "set dcbx: mode=%u\n", mode); + /* No support for LLD_MANAGED modes or CEE */ if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || (mode & DCB_CAP_DCBX_VER_CEE) || @@ -291,32 +363,127 @@ static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode) return 0; } +static int hclge_mqprio_qopt_check(struct hclge_dev *hdev, + struct tc_mqprio_qopt_offload *mqprio_qopt) +{ + u16 queue_sum = 0; + int ret; + int i; + + if (!mqprio_qopt->qopt.num_tc) { + mqprio_qopt->qopt.num_tc = 1; + return 0; + } + + ret = hclge_dcb_common_validate(hdev, mqprio_qopt->qopt.num_tc, + mqprio_qopt->qopt.prio_tc_map); + if (ret) + return ret; + + for (i = 0; i < mqprio_qopt->qopt.num_tc; i++) { + if (!is_power_of_2(mqprio_qopt->qopt.count[i])) { + dev_err(&hdev->pdev->dev, + "qopt queue count must be power of 2\n"); + return -EINVAL; + } + + if (mqprio_qopt->qopt.count[i] > hdev->rss_size_max) { + dev_err(&hdev->pdev->dev, + "qopt queue count should be no more than %u\n", + hdev->rss_size_max); + return -EINVAL; + } + + if (mqprio_qopt->qopt.offset[i] != queue_sum) { + dev_err(&hdev->pdev->dev, + "qopt queue offset must start from 0, and being continuous\n"); + return -EINVAL; + } + + if (mqprio_qopt->min_rate[i] || mqprio_qopt->max_rate[i]) { + dev_err(&hdev->pdev->dev, + "qopt tx_rate is not supported\n"); + return -EOPNOTSUPP; + } + + queue_sum = mqprio_qopt->qopt.offset[i]; + queue_sum += mqprio_qopt->qopt.count[i]; + } + if (hdev->vport[0].alloc_tqps < queue_sum) { + dev_err(&hdev->pdev->dev, + "qopt queue count sum should be less than %u\n", + hdev->vport[0].alloc_tqps); + return -EINVAL; + } + + return 0; +} + +static void hclge_sync_mqprio_qopt(struct hnae3_tc_info *tc_info, + struct tc_mqprio_qopt_offload *mqprio_qopt) +{ + memset(tc_info, 0, sizeof(*tc_info)); + tc_info->num_tc = mqprio_qopt->qopt.num_tc; + memcpy(tc_info->prio_tc, mqprio_qopt->qopt.prio_tc_map, + FIELD_SIZEOF(struct hnae3_tc_info, prio_tc)); + memcpy(tc_info->tqp_count, mqprio_qopt->qopt.count, + FIELD_SIZEOF(struct hnae3_tc_info, tqp_count)); + memcpy(tc_info->tqp_offset, mqprio_qopt->qopt.offset, + FIELD_SIZEOF(struct hnae3_tc_info, tqp_offset)); +} + +static int hclge_config_tc(struct hclge_dev *hdev, + struct hnae3_tc_info *tc_info) +{ + int i; + + hclge_tm_schd_info_update(hdev, tc_info->num_tc); + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) + hdev->tm_info.prio_tc[i] = tc_info->prio_tc[i]; + + return hclge_map_update(hdev); +} + /* Set up TC for hardware offloaded mqprio in channel mode */ -static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc) +static int hclge_setup_tc(struct hnae3_handle *h, + struct tc_mqprio_qopt_offload *mqprio_qopt) { struct hclge_vport *vport = hclge_get_vport(h); + struct hnae3_knic_private_info *kinfo; struct hclge_dev *hdev = vport->back; + struct hnae3_tc_info old_tc_info; + u8 tc = mqprio_qopt->qopt.num_tc; int ret; + /* if client unregistered, it's not allowed to change + * mqprio configuration, which may cause uninit ring + * fail. + */ + if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state)) + return -EBUSY; + if (hdev->flag & HCLGE_FLAG_DCB_ENABLE) return -EINVAL; - if (tc > hdev->tc_max) { + ret = hclge_mqprio_qopt_check(hdev, mqprio_qopt); + if (ret) { dev_err(&hdev->pdev->dev, - "setup tc failed, tc(%u) > tc_max(%u)\n", - tc, hdev->tc_max); - return -EINVAL; + "failed to check mqprio qopt params, ret = %d\n", ret); + return ret; } - hclge_tm_schd_info_update(hdev, tc); - - ret = hclge_tm_prio_tc_info_update(hdev, prio_tc); + ret = hclge_notify_down_uinit(hdev); if (ret) return ret; - ret = hclge_tm_init_hw(hdev); + kinfo = &vport->nic.kinfo; + memcpy(&old_tc_info, &kinfo->tc_info, sizeof(old_tc_info)); + hclge_sync_mqprio_qopt(&kinfo->tc_info, mqprio_qopt); + kinfo->tc_info.mqprio_active = tc > 0; + + ret = hclge_config_tc(hdev, &kinfo->tc_info); if (ret) - return ret; + goto err_out; hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE; @@ -325,7 +492,18 @@ static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc) else hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE; - return 0; + return hclge_notify_init_up(hdev); + +err_out: + /* roll-back */ + memcpy(&kinfo->tc_info, &old_tc_info, sizeof(old_tc_info)); + if (hclge_config_tc(hdev, &kinfo->tc_info)) + dev_err(&hdev->pdev->dev, + "failed to roll back tc configuration\n"); + + (void)hclge_notify_init_up(hdev); + + return ret; } static const struct hnae3_dcb_ops hns3_dcb_ops = { @@ -335,7 +513,6 @@ static const struct hnae3_dcb_ops hns3_dcb_ops = { .ieee_setpfc = hclge_ieee_setpfc, .getdcbx = hclge_getdcbx, .setdcbx = hclge_setdcbx, - .map_update = hclge_map_update, .setup_tc = hclge_setup_tc, }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..e73e10d1722247e5c4d0db0171f94229ec5dd7b3 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -0,0 +1,2414 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2018-2019 Hisilicon Limited. */ + +#include + +#include "hclge_debugfs.h" +#include "hclge_err.h" +#include "hclge_main.h" +#include "hclge_tm.h" +#include "hnae3.h" + +static const char * const state_str[] = { "off", "on" }; +static const char * const hclge_mac_state_str[] = { + "TO_ADD", "TO_DEL", "ACTIVE" +}; + +static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = { + { .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON, + .dfx_msg = &hclge_dbg_bios_common_reg[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg), + .offset = HCLGE_DBG_DFX_BIOS_OFFSET, + .cmd = HCLGE_OPC_DFX_BIOS_COMMON_REG } }, + { .cmd = HNAE3_DBG_CMD_REG_SSU, + .dfx_msg = &hclge_dbg_ssu_reg_0[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_0), + .offset = HCLGE_DBG_DFX_SSU_0_OFFSET, + .cmd = HCLGE_OPC_DFX_SSU_REG_0 } }, + { .cmd = HNAE3_DBG_CMD_REG_SSU, + .dfx_msg = &hclge_dbg_ssu_reg_1[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_1), + .offset = HCLGE_DBG_DFX_SSU_1_OFFSET, + .cmd = HCLGE_OPC_DFX_SSU_REG_1 } }, + { .cmd = HNAE3_DBG_CMD_REG_SSU, + .dfx_msg = &hclge_dbg_ssu_reg_2[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_2), + .offset = HCLGE_DBG_DFX_SSU_2_OFFSET, + .cmd = HCLGE_OPC_DFX_SSU_REG_2 } }, + { .cmd = HNAE3_DBG_CMD_REG_IGU_EGU, + .dfx_msg = &hclge_dbg_igu_egu_reg[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_igu_egu_reg), + .offset = HCLGE_DBG_DFX_IGU_OFFSET, + .cmd = HCLGE_OPC_DFX_IGU_EGU_REG } }, + { .cmd = HNAE3_DBG_CMD_REG_RPU, + .dfx_msg = &hclge_dbg_rpu_reg_0[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_0), + .offset = HCLGE_DBG_DFX_RPU_0_OFFSET, + .cmd = HCLGE_OPC_DFX_RPU_REG_0 } }, + { .cmd = HNAE3_DBG_CMD_REG_RPU, + .dfx_msg = &hclge_dbg_rpu_reg_1[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_1), + .offset = HCLGE_DBG_DFX_RPU_1_OFFSET, + .cmd = HCLGE_OPC_DFX_RPU_REG_1 } }, + { .cmd = HNAE3_DBG_CMD_REG_NCSI, + .dfx_msg = &hclge_dbg_ncsi_reg[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ncsi_reg), + .offset = HCLGE_DBG_DFX_NCSI_OFFSET, + .cmd = HCLGE_OPC_DFX_NCSI_REG } }, + { .cmd = HNAE3_DBG_CMD_REG_RTC, + .dfx_msg = &hclge_dbg_rtc_reg[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rtc_reg), + .offset = HCLGE_DBG_DFX_RTC_OFFSET, + .cmd = HCLGE_OPC_DFX_RTC_REG } }, + { .cmd = HNAE3_DBG_CMD_REG_PPP, + .dfx_msg = &hclge_dbg_ppp_reg[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ppp_reg), + .offset = HCLGE_DBG_DFX_PPP_OFFSET, + .cmd = HCLGE_OPC_DFX_PPP_REG } }, + { .cmd = HNAE3_DBG_CMD_REG_RCB, + .dfx_msg = &hclge_dbg_rcb_reg[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rcb_reg), + .offset = HCLGE_DBG_DFX_RCB_OFFSET, + .cmd = HCLGE_OPC_DFX_RCB_REG } }, + { .cmd = HNAE3_DBG_CMD_REG_TQP, + .dfx_msg = &hclge_dbg_tqp_reg[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_tqp_reg), + .offset = HCLGE_DBG_DFX_TQP_OFFSET, + .cmd = HCLGE_OPC_DFX_TQP_REG } }, +}; + +static void hclge_dbg_fill_content(char *content, u16 len, + const struct hclge_dbg_item *items, + const char **result, u16 size) +{ + char *pos = content; + u16 i; + + memset(content, ' ', len); + for (i = 0; i < size; i++) { + if (result) + strncpy(pos, result[i], strlen(result[i])); + else + strncpy(pos, items[i].name, strlen(items[i].name)); + pos += strlen(items[i].name) + items[i].interval; + } + *pos++ = '\n'; + *pos++ = '\0'; +} + +static char *hclge_dbg_get_func_id_str(char *buf, u8 id) +{ + if (id) + sprintf(buf, "vf%u", id - 1); + else + sprintf(buf, "pf"); + + return buf; +} + +static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset, + u32 *bd_num) +{ + struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT]; + int entries_per_desc; + int index; + int ret; + + ret = hclge_query_bd_num_cmd_send(hdev, desc); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get dfx bd_num, offset = %d, ret = %d\n", + offset, ret); + return ret; + } + + entries_per_desc = ARRAY_SIZE(desc[0].data); + index = offset % entries_per_desc; + + *bd_num = le32_to_cpu(desc[offset / entries_per_desc].data[index]); + if (!(*bd_num)) { + dev_err(&hdev->pdev->dev, "The value of dfx bd_num is 0!\n"); + return -EINVAL; + } + + return 0; +} + +static int hclge_dbg_cmd_send(struct hclge_dev *hdev, + struct hclge_desc *desc_src, + int index, int bd_num, + enum hclge_opcode_type cmd) +{ + struct hclge_desc *desc = desc_src; + int ret, i; + + hclge_cmd_setup_basic_desc(desc, cmd, true); + desc->data[0] = cpu_to_le32(index); + + for (i = 1; i < bd_num; i++) { + desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + desc++; + hclge_cmd_setup_basic_desc(desc, cmd, true); + } + + ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num); + if (ret) + dev_err(&hdev->pdev->dev, + "cmd(0x%x) send fail, ret = %d\n", cmd, ret); + return ret; +} + +static int +hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev, + const struct hclge_dbg_reg_type_info *reg_info, + char *buf, int len, int *pos) +{ + const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg; + const struct hclge_dbg_reg_common_msg *reg_msg = ®_info->reg_msg; + struct hclge_desc *desc_src; + u32 index, entry, i, cnt; + int bd_num, min_num, ret; + struct hclge_desc *desc; + + ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num); + if (ret) + return ret; + + desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL); + if (!desc_src) + return -ENOMEM; + + min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num); + + for (i = 0, cnt = 0; i < min_num; i++, dfx_message++) + *pos += scnprintf(buf + *pos, len - *pos, "item%u = %s\n", + cnt++, dfx_message->message); + + for (i = 0; i < cnt; i++) + *pos += scnprintf(buf + *pos, len - *pos, "item%u\t", i); + + *pos += scnprintf(buf + *pos, len - *pos, "\n"); + + for (index = 0; index < hdev->vport[0].alloc_tqps; index++) { + dfx_message = reg_info->dfx_msg; + desc = desc_src; + ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, + reg_msg->cmd); + if (ret) + break; + + for (i = 0; i < min_num; i++, dfx_message++) { + entry = i % HCLGE_DESC_DATA_LEN; + if (i > 0 && !entry) + desc++; + + *pos += scnprintf(buf + *pos, len - *pos, "%#x\t", + le32_to_cpu(desc->data[entry])); + } + *pos += scnprintf(buf + *pos, len - *pos, "\n"); + } + + kfree(desc_src); + return ret; +} + +static int +hclge_dbg_dump_reg_common(struct hclge_dev *hdev, + const struct hclge_dbg_reg_type_info *reg_info, + char *buf, int len, int *pos) +{ + const struct hclge_dbg_reg_common_msg *reg_msg = ®_info->reg_msg; + const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg; + struct hclge_desc *desc_src; + int bd_num, min_num, ret; + struct hclge_desc *desc; + u32 entry, i; + + ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num); + if (ret) + return ret; + + desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL); + if (!desc_src) + return -ENOMEM; + + desc = desc_src; + + ret = hclge_dbg_cmd_send(hdev, desc, 0, bd_num, reg_msg->cmd); + if (ret) { + kfree(desc); + return ret; + } + + min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num); + + for (i = 0; i < min_num; i++, dfx_message++) { + entry = i % HCLGE_DESC_DATA_LEN; + if (i > 0 && !entry) + desc++; + if (!dfx_message->flag) + continue; + + *pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n", + dfx_message->message, + le32_to_cpu(desc->data[entry])); + } + + kfree(desc_src); + return 0; +} + +static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf, + int len, int *pos) +{ + struct hclge_config_mac_mode_cmd *req; + struct hclge_desc desc; + u32 loop_en; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump mac enable status, ret = %d\n", ret); + return ret; + } + + req = (struct hclge_config_mac_mode_cmd *)desc.data; + loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); + + *pos += scnprintf(buf + *pos, len - *pos, "mac_trans_en: %#x\n", + hnae3_get_bit(loop_en, HCLGE_MAC_TX_EN_B)); + *pos += scnprintf(buf + *pos, len - *pos, "mac_rcv_en: %#x\n", + hnae3_get_bit(loop_en, HCLGE_MAC_RX_EN_B)); + *pos += scnprintf(buf + *pos, len - *pos, "pad_trans_en: %#x\n", + hnae3_get_bit(loop_en, HCLGE_MAC_PAD_TX_B)); + *pos += scnprintf(buf + *pos, len - *pos, "pad_rcv_en: %#x\n", + hnae3_get_bit(loop_en, HCLGE_MAC_PAD_RX_B)); + *pos += scnprintf(buf + *pos, len - *pos, "1588_trans_en: %#x\n", + hnae3_get_bit(loop_en, HCLGE_MAC_1588_TX_B)); + *pos += scnprintf(buf + *pos, len - *pos, "1588_rcv_en: %#x\n", + hnae3_get_bit(loop_en, HCLGE_MAC_1588_RX_B)); + *pos += scnprintf(buf + *pos, len - *pos, "mac_app_loop_en: %#x\n", + hnae3_get_bit(loop_en, HCLGE_MAC_APP_LP_B)); + *pos += scnprintf(buf + *pos, len - *pos, "mac_line_loop_en: %#x\n", + hnae3_get_bit(loop_en, HCLGE_MAC_LINE_LP_B)); + *pos += scnprintf(buf + *pos, len - *pos, "mac_fcs_tx_en: %#x\n", + hnae3_get_bit(loop_en, HCLGE_MAC_FCS_TX_B)); + *pos += scnprintf(buf + *pos, len - *pos, + "mac_rx_oversize_truncate_en: %#x\n", + hnae3_get_bit(loop_en, + HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B)); + *pos += scnprintf(buf + *pos, len - *pos, "mac_rx_fcs_strip_en: %#x\n", + hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B)); + *pos += scnprintf(buf + *pos, len - *pos, "mac_rx_fcs_en: %#x\n", + hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_B)); + *pos += scnprintf(buf + *pos, len - *pos, + "mac_tx_under_min_err_en: %#x\n", + hnae3_get_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B)); + *pos += scnprintf(buf + *pos, len - *pos, + "mac_tx_oversize_truncate_en: %#x\n", + hnae3_get_bit(loop_en, + HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B)); + + return 0; +} + +static int hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev, char *buf, + int len, int *pos) +{ + struct hclge_config_max_frm_size_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, true); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump mac frame size, ret = %d\n", ret); + return ret; + } + + req = (struct hclge_config_max_frm_size_cmd *)desc.data; + + *pos += scnprintf(buf + *pos, len - *pos, "max_frame_size: %u\n", + le16_to_cpu(req->max_frm_size)); + *pos += scnprintf(buf + *pos, len - *pos, "min_frame_size: %u\n", + req->min_frm_size); + + return 0; +} + +static int hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev, char *buf, + int len, int *pos) +{ +#define HCLGE_MAC_SPEED_SHIFT 0 +#define HCLGE_MAC_SPEED_MASK GENMASK(5, 0) +#define HCLGE_MAC_DUPLEX_SHIFT 7 + + struct hclge_config_mac_speed_dup_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, true); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump mac speed duplex, ret = %d\n", ret); + return ret; + } + + req = (struct hclge_config_mac_speed_dup_cmd *)desc.data; + + *pos += scnprintf(buf + *pos, len - *pos, "speed: %#lx\n", + hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK, + HCLGE_MAC_SPEED_SHIFT)); + *pos += scnprintf(buf + *pos, len - *pos, "duplex: %#x\n", + hnae3_get_bit(req->speed_dup, + HCLGE_MAC_DUPLEX_SHIFT)); + return 0; +} + +static int hclge_dbg_dump_mac(struct hclge_dev *hdev, char *buf, int len) +{ + int pos = 0; + int ret; + + ret = hclge_dbg_dump_mac_enable_status(hdev, buf, len, &pos); + if (ret) + return ret; + + ret = hclge_dbg_dump_mac_frame_size(hdev, buf, len, &pos); + if (ret) + return ret; + + return hclge_dbg_dump_mac_speed_duplex(hdev, buf, len, &pos); +} + +static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len, + int *pos) +{ + struct hclge_dbg_bitmap_cmd req; + struct hclge_desc desc; + u16 qset_id, qset_num; + int ret; + + ret = hclge_tm_get_qset_num(hdev, &qset_num); + if (ret) + return ret; + + *pos += scnprintf(buf + *pos, len - *pos, + "qset_id roce_qset_mask nic_qset_mask qset_shaping_pass qset_bp_status\n"); + for (qset_id = 0; qset_id < qset_num; qset_id++) { + ret = hclge_dbg_cmd_send(hdev, &desc, qset_id, 1, + HCLGE_OPC_QSET_DFX_STS); + if (ret) + return ret; + + req.bitmap = (u8)le32_to_cpu(desc.data[1]); + + *pos += scnprintf(buf + *pos, len - *pos, + "%04u %#x %#x %#x %#x\n", + qset_id, req.bit0, req.bit1, req.bit2, + req.bit3); + } + + return 0; +} + +static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len, + int *pos) +{ + struct hclge_dbg_bitmap_cmd req; + struct hclge_desc desc; + u8 pri_id, pri_num; + int ret; + + ret = hclge_tm_get_pri_num(hdev, &pri_num); + if (ret) + return ret; + + *pos += scnprintf(buf + *pos, len - *pos, + "pri_id pri_mask pri_cshaping_pass pri_pshaping_pass\n"); + for (pri_id = 0; pri_id < pri_num; pri_id++) { + ret = hclge_dbg_cmd_send(hdev, &desc, pri_id, 1, + HCLGE_OPC_PRI_DFX_STS); + if (ret) + return ret; + + req.bitmap = (u8)le32_to_cpu(desc.data[1]); + + *pos += scnprintf(buf + *pos, len - *pos, + "%03u %#x %#x %#x\n", + pri_id, req.bit0, req.bit1, req.bit2); + } + + return 0; +} + +static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len, + int *pos) +{ + struct hclge_dbg_bitmap_cmd req; + struct hclge_desc desc; + u8 pg_id; + int ret; + + *pos += scnprintf(buf + *pos, len - *pos, + "pg_id pg_mask pg_cshaping_pass pg_pshaping_pass\n"); + for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) { + ret = hclge_dbg_cmd_send(hdev, &desc, pg_id, 1, + HCLGE_OPC_PG_DFX_STS); + if (ret) + return ret; + + req.bitmap = (u8)le32_to_cpu(desc.data[1]); + + *pos += scnprintf(buf + *pos, len - *pos, + "%03u %#x %#x %#x\n", + pg_id, req.bit0, req.bit1, req.bit2); + } + + return 0; +} + +static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, char *buf, int len, + int *pos) +{ + struct hclge_desc desc; + u16 nq_id; + int ret; + + *pos += scnprintf(buf + *pos, len - *pos, + "nq_id sch_nic_queue_cnt sch_roce_queue_cnt\n"); + for (nq_id = 0; nq_id < hdev->num_tqps; nq_id++) { + ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1, + HCLGE_OPC_SCH_NQ_CNT); + if (ret) + return ret; + + *pos += scnprintf(buf + *pos, len - *pos, "%04u %#x", + nq_id, le32_to_cpu(desc.data[1])); + + ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1, + HCLGE_OPC_SCH_RQ_CNT); + if (ret) + return ret; + + *pos += scnprintf(buf + *pos, len - *pos, + " %#x\n", + le32_to_cpu(desc.data[1])); + } + + return 0; +} + +static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len, + int *pos) +{ + struct hclge_dbg_bitmap_cmd req; + struct hclge_desc desc; + u8 port_id = 0; + int ret; + + ret = hclge_dbg_cmd_send(hdev, &desc, port_id, 1, + HCLGE_OPC_PORT_DFX_STS); + if (ret) + return ret; + + req.bitmap = (u8)le32_to_cpu(desc.data[1]); + + *pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n", req.bit0); + *pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n", + req.bit1); + + return 0; +} + +static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, char *buf, int len, + int *pos) +{ + struct hclge_desc desc[2]; + u8 port_id = 0; + int ret; + + ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, + HCLGE_OPC_TM_INTERNAL_CNT); + if (ret) + return ret; + + *pos += scnprintf(buf + *pos, len - *pos, "SCH_NIC_NUM: %#x\n", + le32_to_cpu(desc[0].data[1])); + *pos += scnprintf(buf + *pos, len - *pos, "SCH_ROCE_NUM: %#x\n", + le32_to_cpu(desc[0].data[2])); + + ret = hclge_dbg_cmd_send(hdev, desc, port_id, 2, + HCLGE_OPC_TM_INTERNAL_STS); + if (ret) + return ret; + + *pos += scnprintf(buf + *pos, len - *pos, "pri_bp: %#x\n", + le32_to_cpu(desc[0].data[1])); + *pos += scnprintf(buf + *pos, len - *pos, "fifo_dfx_info: %#x\n", + le32_to_cpu(desc[0].data[2])); + *pos += scnprintf(buf + *pos, len - *pos, + "sch_roce_fifo_afull_gap: %#x\n", + le32_to_cpu(desc[0].data[3])); + *pos += scnprintf(buf + *pos, len - *pos, + "tx_private_waterline: %#x\n", + le32_to_cpu(desc[0].data[4])); + *pos += scnprintf(buf + *pos, len - *pos, "tm_bypass_en: %#x\n", + le32_to_cpu(desc[0].data[5])); + *pos += scnprintf(buf + *pos, len - *pos, "SSU_TM_BYPASS_EN: %#x\n", + le32_to_cpu(desc[1].data[0])); + *pos += scnprintf(buf + *pos, len - *pos, "SSU_RESERVE_CFG: %#x\n", + le32_to_cpu(desc[1].data[1])); + + if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) + return 0; + + ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, + HCLGE_OPC_TM_INTERNAL_STS_1); + if (ret) + return ret; + + *pos += scnprintf(buf + *pos, len - *pos, "TC_MAP_SEL: %#x\n", + le32_to_cpu(desc[0].data[1])); + *pos += scnprintf(buf + *pos, len - *pos, "IGU_PFC_PRI_EN: %#x\n", + le32_to_cpu(desc[0].data[2])); + *pos += scnprintf(buf + *pos, len - *pos, "MAC_PFC_PRI_EN: %#x\n", + le32_to_cpu(desc[0].data[3])); + *pos += scnprintf(buf + *pos, len - *pos, "IGU_PRI_MAP_TC_CFG: %#x\n", + le32_to_cpu(desc[0].data[4])); + *pos += scnprintf(buf + *pos, len - *pos, + "IGU_TX_PRI_MAP_TC_CFG: %#x\n", + le32_to_cpu(desc[0].data[5])); + + return 0; +} + +static int hclge_dbg_dump_dcb(struct hclge_dev *hdev, char *buf, int len) +{ + int pos = 0; + int ret; + + ret = hclge_dbg_dump_dcb_qset(hdev, buf, len, &pos); + if (ret) + return ret; + + ret = hclge_dbg_dump_dcb_pri(hdev, buf, len, &pos); + if (ret) + return ret; + + ret = hclge_dbg_dump_dcb_pg(hdev, buf, len, &pos); + if (ret) + return ret; + + ret = hclge_dbg_dump_dcb_queue(hdev, buf, len, &pos); + if (ret) + return ret; + + ret = hclge_dbg_dump_dcb_port(hdev, buf, len, &pos); + if (ret) + return ret; + + return hclge_dbg_dump_dcb_tm(hdev, buf, len, &pos); +} + +static int hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, + enum hnae3_dbg_cmd cmd, char *buf, int len) +{ + const struct hclge_dbg_reg_type_info *reg_info; + int pos = 0, ret = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) { + reg_info = &hclge_dbg_reg_info[i]; + if (cmd == reg_info->cmd) { + if (cmd == HNAE3_DBG_CMD_REG_TQP) + return hclge_dbg_dump_reg_tqp(hdev, reg_info, + buf, len, &pos); + + ret = hclge_dbg_dump_reg_common(hdev, reg_info, buf, + len, &pos); + if (ret) + break; + } + } + + return ret; +} + +static int hclge_dbg_dump_tc(struct hclge_dev *hdev, char *buf, int len) +{ + struct hclge_ets_tc_weight_cmd *ets_weight; + struct hclge_desc desc; + char *sch_mode_str; + int pos = 0; + int ret; + u8 i; + + if (!hnae3_dev_dcb_supported(hdev)) { + dev_err(&hdev->pdev->dev, + "Only DCB-supported dev supports tc\n"); + return -EOPNOTSUPP; + } + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "failed to get tc weight, ret = %d\n", + ret); + return ret; + } + + ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data; + + pos += scnprintf(buf + pos, len - pos, "enabled tc number: %u\n", + hdev->tm_info.num_tc); + pos += scnprintf(buf + pos, len - pos, "weight_offset: %u\n", + ets_weight->weight_offset); + + pos += scnprintf(buf + pos, len - pos, "TC MODE WEIGHT\n"); + for (i = 0; i < HNAE3_MAX_TC; i++) { + sch_mode_str = ets_weight->tc_weight[i] ? "dwrr" : "sp"; + pos += scnprintf(buf + pos, len - pos, "%u %4s %3u\n", + i, sch_mode_str, + hdev->tm_info.pg_info[0].tc_dwrr[i]); + } + + return 0; +} + +static const struct hclge_dbg_item tm_pg_items[] = { + { "ID", 2 }, + { "PRI_MAP", 2 }, + { "MODE", 2 }, + { "DWRR", 2 }, + { "C_IR_B", 2 }, + { "C_IR_U", 2 }, + { "C_IR_S", 2 }, + { "C_BS_B", 2 }, + { "C_BS_S", 2 }, + { "C_FLAG", 2 }, + { "C_RATE(Mbps)", 2 }, + { "P_IR_B", 2 }, + { "P_IR_U", 2 }, + { "P_IR_S", 2 }, + { "P_BS_B", 2 }, + { "P_BS_S", 2 }, + { "P_FLAG", 2 }, + { "P_RATE(Mbps)", 0 } +}; + +static void hclge_dbg_fill_shaper_content(struct hclge_tm_shaper_para *para, + char **result, u8 *index) +{ + sprintf(result[(*index)++], "%3u", para->ir_b); + sprintf(result[(*index)++], "%3u", para->ir_u); + sprintf(result[(*index)++], "%3u", para->ir_s); + sprintf(result[(*index)++], "%3u", para->bs_b); + sprintf(result[(*index)++], "%3u", para->bs_s); + sprintf(result[(*index)++], "%3u", para->flag); + sprintf(result[(*index)++], "%6u", para->rate); +} + +static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len) +{ + char data_str[ARRAY_SIZE(tm_pg_items)][HCLGE_DBG_DATA_STR_LEN]; + struct hclge_tm_shaper_para c_shaper_para, p_shaper_para; + char *result[ARRAY_SIZE(tm_pg_items)], *sch_mode_str; + u8 pg_id, sch_mode, weight, pri_bit_map, i, j; + char content[HCLGE_DBG_TM_INFO_LEN]; + int pos = 0; + int ret; + + for (i = 0; i < ARRAY_SIZE(tm_pg_items); i++) + result[i] = &data_str[i][0]; + + hclge_dbg_fill_content(content, sizeof(content), tm_pg_items, + NULL, ARRAY_SIZE(tm_pg_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + + for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) { + ret = hclge_tm_get_pg_to_pri_map(hdev, pg_id, &pri_bit_map); + if (ret) + return ret; + + ret = hclge_tm_get_pg_sch_mode(hdev, pg_id, &sch_mode); + if (ret) + return ret; + + ret = hclge_tm_get_pg_weight(hdev, pg_id, &weight); + if (ret) + return ret; + + ret = hclge_tm_get_pg_shaper(hdev, pg_id, + HCLGE_OPC_TM_PG_C_SHAPPING, + &c_shaper_para); + if (ret) + return ret; + + ret = hclge_tm_get_pg_shaper(hdev, pg_id, + HCLGE_OPC_TM_PG_P_SHAPPING, + &p_shaper_para); + if (ret) + return ret; + + sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" : + "sp"; + + j = 0; + sprintf(result[j++], "%02u", pg_id); + sprintf(result[j++], "0x%02x", pri_bit_map); + sprintf(result[j++], "%4s", sch_mode_str); + sprintf(result[j++], "%3u", weight); + hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j); + hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j); + + hclge_dbg_fill_content(content, sizeof(content), tm_pg_items, + (const char **)result, + ARRAY_SIZE(tm_pg_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + } + + return 0; +} + +static int hclge_dbg_dump_tm_port(struct hclge_dev *hdev, char *buf, int len) +{ + struct hclge_tm_shaper_para shaper_para; + int pos = 0; + int ret; + + ret = hclge_tm_get_port_shaper(hdev, &shaper_para); + if (ret) + return ret; + + pos += scnprintf(buf + pos, len - pos, + "IR_B IR_U IR_S BS_B BS_S FLAG RATE(Mbps)\n"); + pos += scnprintf(buf + pos, len - pos, + "%3u %3u %3u %3u %3u %1u %6u\n", + shaper_para.ir_b, shaper_para.ir_u, shaper_para.ir_s, + shaper_para.bs_b, shaper_para.bs_s, shaper_para.flag, + shaper_para.rate); + + return 0; +} + +static int hclge_dbg_dump_tm_bp_qset_map(struct hclge_dev *hdev, u8 tc_id, + char *buf, int len) +{ + u32 qset_mapping[HCLGE_BP_GRP_NUM]; + struct hclge_bp_to_qs_map_cmd *map; + struct hclge_desc desc; + int pos = 0; + u8 group_id; + u8 grp_num; + u16 i = 0; + int ret; + + grp_num = HCLGE_BP_GRP_NUM; + map = (struct hclge_bp_to_qs_map_cmd *)desc.data; + for (group_id = 0; group_id < grp_num; group_id++) { + hclge_cmd_setup_basic_desc(&desc, + HCLGE_OPC_TM_BP_TO_QSET_MAPPING, + true); + map->tc_id = tc_id; + map->qs_group_id = group_id; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get bp to qset map, ret = %d\n", + ret); + return ret; + } + + qset_mapping[group_id] = le32_to_cpu(map->qs_bit_map); + } + + pos += scnprintf(buf + pos, len - pos, "INDEX | TM BP QSET MAPPING:\n"); + for (group_id = 0; group_id < grp_num / 8; group_id++) { + pos += scnprintf(buf + pos, len - pos, + "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n", + group_id * 256, qset_mapping[i + 7], + qset_mapping[i + 6], qset_mapping[i + 5], + qset_mapping[i + 4], qset_mapping[i + 3], + qset_mapping[i + 2], qset_mapping[i + 1], + qset_mapping[i]); + i += 8; + } + + return pos; +} + +static int hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *buf, int len) +{ + u16 queue_id; + u16 qset_id; + u8 link_vld; + int pos = 0; + u8 pri_id; + u8 tc_id; + int ret; + + for (queue_id = 0; queue_id < hdev->num_tqps; queue_id++) { + ret = hclge_tm_get_q_to_qs_map(hdev, queue_id, &qset_id); + if (ret) + return ret; + + ret = hclge_tm_get_qset_map_pri(hdev, qset_id, &pri_id, + &link_vld); + if (ret) + return ret; + + ret = hclge_tm_get_q_to_tc(hdev, queue_id, &tc_id); + if (ret) + return ret; + + pos += scnprintf(buf + pos, len - pos, + "QUEUE_ID QSET_ID PRI_ID TC_ID\n"); + pos += scnprintf(buf + pos, len - pos, + "%04u %4u %3u %2u\n", + queue_id, qset_id, pri_id, tc_id); + + if (!hnae3_dev_dcb_supported(hdev)) + continue; + + ret = hclge_dbg_dump_tm_bp_qset_map(hdev, tc_id, buf + pos, + len - pos); + if (ret < 0) + return ret; + pos += ret; + + pos += scnprintf(buf + pos, len - pos, "\n"); + } + + return 0; +} + +static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len) +{ + struct hclge_tm_shaper_para c_shaper_para; + struct hclge_tm_shaper_para p_shaper_para; + u8 pri_num, sch_mode, weight; + char *sch_mode_str; + int pos = 0; + int ret; + u8 i; + + ret = hclge_tm_get_pri_num(hdev, &pri_num); + if (ret) + return ret; + + pos += scnprintf(buf + pos, len - pos, + "ID MODE DWRR C_IR_B C_IR_U C_IR_S C_BS_B "); + pos += scnprintf(buf + pos, len - pos, + "C_BS_S C_FLAG C_RATE(Mbps) P_IR_B P_IR_U "); + pos += scnprintf(buf + pos, len - pos, + "P_IR_S P_BS_B P_BS_S P_FLAG P_RATE(Mbps)\n"); + + for (i = 0; i < pri_num; i++) { + ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode); + if (ret) + return ret; + + ret = hclge_tm_get_pri_weight(hdev, i, &weight); + if (ret) + return ret; + + ret = hclge_tm_get_pri_shaper(hdev, i, + HCLGE_OPC_TM_PRI_C_SHAPPING, + &c_shaper_para); + if (ret) + return ret; + + ret = hclge_tm_get_pri_shaper(hdev, i, + HCLGE_OPC_TM_PRI_P_SHAPPING, + &p_shaper_para); + if (ret) + return ret; + + sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" : + "sp"; + + pos += scnprintf(buf + pos, len - pos, + "%04u %4s %3u %3u %3u %3u ", + i, sch_mode_str, weight, c_shaper_para.ir_b, + c_shaper_para.ir_u, c_shaper_para.ir_s); + pos += scnprintf(buf + pos, len - pos, + "%3u %3u %1u %6u ", + c_shaper_para.bs_b, c_shaper_para.bs_s, + c_shaper_para.flag, c_shaper_para.rate); + pos += scnprintf(buf + pos, len - pos, + "%3u %3u %3u %3u %3u ", + p_shaper_para.ir_b, p_shaper_para.ir_u, + p_shaper_para.ir_s, p_shaper_para.bs_b, + p_shaper_para.bs_s); + pos += scnprintf(buf + pos, len - pos, "%1u %6u\n", + p_shaper_para.flag, p_shaper_para.rate); + } + + return 0; +} + +static const struct hclge_dbg_item tm_qset_items[] = { + { "ID", 4 }, + { "MAP_PRI", 2 }, + { "LINK_VLD", 2 }, + { "MODE", 2 }, + { "DWRR", 2 }, + { "IR_B", 2 }, + { "IR_U", 2 }, + { "IR_S", 2 }, + { "BS_B", 2 }, + { "BS_S", 2 }, + { "FLAG", 2 }, + { "RATE(Mbps)", 0 } +}; + +static int hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, char *buf, int len) +{ + char data_str[ARRAY_SIZE(tm_qset_items)][HCLGE_DBG_DATA_STR_LEN]; + char *result[ARRAY_SIZE(tm_qset_items)], *sch_mode_str; + u8 priority, link_vld, sch_mode, weight; + struct hclge_tm_shaper_para shaper_para; + char content[HCLGE_DBG_TM_INFO_LEN]; + u16 qset_num, i; + int ret, pos; + u8 j; + + ret = hclge_tm_get_qset_num(hdev, &qset_num); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(tm_qset_items); i++) + result[i] = &data_str[i][0]; + + hclge_dbg_fill_content(content, sizeof(content), tm_qset_items, + NULL, ARRAY_SIZE(tm_qset_items)); + pos = scnprintf(buf, len, "%s", content); + + for (i = 0; i < qset_num; i++) { + ret = hclge_tm_get_qset_map_pri(hdev, i, &priority, &link_vld); + if (ret) + return ret; + + ret = hclge_tm_get_qset_sch_mode(hdev, i, &sch_mode); + if (ret) + return ret; + + ret = hclge_tm_get_qset_weight(hdev, i, &weight); + if (ret) + return ret; + + ret = hclge_tm_get_qset_shaper(hdev, i, &shaper_para); + if (ret) + return ret; + + sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" : + "sp"; + + j = 0; + sprintf(result[j++], "%04u", i); + sprintf(result[j++], "%4u", priority); + sprintf(result[j++], "%4u", link_vld); + sprintf(result[j++], "%4s", sch_mode_str); + sprintf(result[j++], "%3u", weight); + hclge_dbg_fill_shaper_content(&shaper_para, result, &j); + + hclge_dbg_fill_content(content, sizeof(content), tm_qset_items, + (const char **)result, + ARRAY_SIZE(tm_qset_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + } + + return 0; +} + +static int hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev, char *buf, + int len) +{ + struct hclge_cfg_pause_param_cmd *pause_param; + struct hclge_desc desc; + int pos = 0; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump qos pause, ret = %d\n", ret); + return ret; + } + + pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; + + pos += scnprintf(buf + pos, len - pos, "pause_trans_gap: 0x%x\n", + pause_param->pause_trans_gap); + pos += scnprintf(buf + pos, len - pos, "pause_trans_time: 0x%x\n", + le16_to_cpu(pause_param->pause_trans_time)); + return 0; +} + +static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf, + int len) +{ +#define HCLGE_DBG_TC_MASK 0x0F +#define HCLGE_DBG_TC_BIT_WIDTH 4 + + struct hclge_qos_pri_map_cmd *pri_map; + struct hclge_desc desc; + int pos = 0; + u8 *pri_tc; + u8 tc, i; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump qos pri map, ret = %d\n", ret); + return ret; + } + + pri_map = (struct hclge_qos_pri_map_cmd *)desc.data; + + pos += scnprintf(buf + pos, len - pos, "vlan_to_pri: 0x%x\n", + pri_map->vlan_pri); + pos += scnprintf(buf + pos, len - pos, "PRI TC\n"); + + pri_tc = (u8 *)pri_map; + for (i = 0; i < HNAE3_MAX_TC; i++) { + tc = pri_tc[i >> 1] >> ((i & 1) * HCLGE_DBG_TC_BIT_WIDTH); + tc &= HCLGE_DBG_TC_MASK; + pos += scnprintf(buf + pos, len - pos, "%u %u\n", i, tc); + } + + return 0; +} + +static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, char *buf, int len) +{ + struct hclge_tx_buff_alloc_cmd *tx_buf_cmd; + struct hclge_desc desc; + int pos = 0; + int i, ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump tx buf, ret = %d\n", ret); + return ret; + } + + tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc.data; + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) + pos += scnprintf(buf + pos, len - pos, + "tx_packet_buf_tc_%d: 0x%x\n", i, + le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i])); + + return pos; +} + +static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev, char *buf, + int len) +{ + struct hclge_rx_priv_buff_cmd *rx_buf_cmd; + struct hclge_desc desc; + int pos = 0; + int i, ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump rx priv buf, ret = %d\n", ret); + return ret; + } + + pos += scnprintf(buf + pos, len - pos, "\n"); + + rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc.data; + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) + pos += scnprintf(buf + pos, len - pos, + "rx_packet_buf_tc_%d: 0x%x\n", i, + le16_to_cpu(rx_buf_cmd->buf_num[i])); + + pos += scnprintf(buf + pos, len - pos, "rx_share_buf: 0x%x\n", + le16_to_cpu(rx_buf_cmd->shared_buf)); + + return pos; +} + +static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev, char *buf, + int len) +{ + struct hclge_rx_com_wl *rx_com_wl; + struct hclge_desc desc; + int pos = 0; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump rx common wl, ret = %d\n", ret); + return ret; + } + + rx_com_wl = (struct hclge_rx_com_wl *)desc.data; + pos += scnprintf(buf + pos, len - pos, "\n"); + pos += scnprintf(buf + pos, len - pos, + "rx_com_wl: high: 0x%x, low: 0x%x\n", + le16_to_cpu(rx_com_wl->com_wl.high), + le16_to_cpu(rx_com_wl->com_wl.low)); + + return pos; +} + +static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev, char *buf, + int len) +{ + struct hclge_rx_com_wl *rx_packet_cnt; + struct hclge_desc desc; + int pos = 0; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_GBL_PKT_CNT, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump rx global pkt cnt, ret = %d\n", ret); + return ret; + } + + rx_packet_cnt = (struct hclge_rx_com_wl *)desc.data; + pos += scnprintf(buf + pos, len - pos, + "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n", + le16_to_cpu(rx_packet_cnt->com_wl.high), + le16_to_cpu(rx_packet_cnt->com_wl.low)); + + return pos; +} + +static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev, char *buf, + int len) +{ + struct hclge_rx_priv_wl_buf *rx_priv_wl; + struct hclge_desc desc[2]; + int pos = 0; + int i, ret; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_PRIV_WL_ALLOC, true); + ret = hclge_cmd_send(&hdev->hw, desc, 2); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump rx priv wl buf, ret = %d\n", ret); + return ret; + } + + rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data; + for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) + pos += scnprintf(buf + pos, len - pos, + "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i, + le16_to_cpu(rx_priv_wl->tc_wl[i].high), + le16_to_cpu(rx_priv_wl->tc_wl[i].low)); + + rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data; + for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) + pos += scnprintf(buf + pos, len - pos, + "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", + i + HCLGE_TC_NUM_ONE_DESC, + le16_to_cpu(rx_priv_wl->tc_wl[i].high), + le16_to_cpu(rx_priv_wl->tc_wl[i].low)); + + return pos; +} + +static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev, + char *buf, int len) +{ + struct hclge_rx_com_thrd *rx_com_thrd; + struct hclge_desc desc[2]; + int pos = 0; + int i, ret; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_COM_THRD_ALLOC, true); + ret = hclge_cmd_send(&hdev->hw, desc, 2); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump rx common threshold, ret = %d\n", ret); + return ret; + } + + pos += scnprintf(buf + pos, len - pos, "\n"); + rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data; + for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) + pos += scnprintf(buf + pos, len - pos, + "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i, + le16_to_cpu(rx_com_thrd->com_thrd[i].high), + le16_to_cpu(rx_com_thrd->com_thrd[i].low)); + + rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data; + for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) + pos += scnprintf(buf + pos, len - pos, + "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", + i + HCLGE_TC_NUM_ONE_DESC, + le16_to_cpu(rx_com_thrd->com_thrd[i].high), + le16_to_cpu(rx_com_thrd->com_thrd[i].low)); + + return pos; +} + +static int hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev, char *buf, + int len) +{ + int pos = 0; + int ret; + + ret = hclge_dbg_dump_tx_buf_cfg(hdev, buf + pos, len - pos); + if (ret < 0) + return ret; + pos += ret; + + ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev, buf + pos, len - pos); + if (ret < 0) + return ret; + pos += ret; + + ret = hclge_dbg_dump_rx_common_wl_cfg(hdev, buf + pos, len - pos); + if (ret < 0) + return ret; + pos += ret; + + ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev, buf + pos, len - pos); + if (ret < 0) + return ret; + pos += ret; + + pos += scnprintf(buf + pos, len - pos, "\n"); + if (!hnae3_dev_dcb_supported(hdev)) + return 0; + + ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev, buf + pos, len - pos); + if (ret < 0) + return ret; + pos += ret; + + ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev, buf + pos, + len - pos); + if (ret < 0) + return ret; + + return 0; +} + +static int hclge_dbg_dump_mac_table(struct hclge_dev *hdev, char *buf, int len) +{ + struct hclge_mac_vlan_idx_rd_cmd *mac_rd_cmd; + struct hclge_mac_vlan_idx_rd_mc *mc_mac_tbl; + struct hclge_desc desc[3]; + u32 mc_tbl_idx, i; + int mc_tbl_len; + int pos = 0; + int ret; + int j; + + mc_tbl_len = sizeof(struct hclge_mac_vlan_idx_rd_mc) * + HCLGE_DBG_MAC_TBL_MAX; + mc_mac_tbl = kzalloc(mc_tbl_len, GFP_KERNEL); + if (!mc_mac_tbl) + return -ENOMEM; + + pos += scnprintf(buf + pos, len - pos, "Unicast tab:\n"); + pos += scnprintf(buf + pos, len - pos, + " index mac_addr vlan_id VMDq1 "); + pos += scnprintf(buf + pos, len - pos, + "U_M mac_en in_port E_type E_Port\n"); + + mc_tbl_idx = 0; + for (i = 0; i < HCLGE_DBG_MAC_TBL_MAX; i++) { + /* Prevent long-term occupation of the command channel. */ + if ((i % HCLGE_DBG_SCAN_STEP) == 0) + msleep(HCLGE_DBG_PAUSE_TIME); + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_PPP_MAC_VLAN_IDX_RD, + true); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_PPP_MAC_VLAN_IDX_RD, + true); + desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[2], HCLGE_PPP_MAC_VLAN_IDX_RD, + true); + + mac_rd_cmd = (struct hclge_mac_vlan_idx_rd_cmd *)desc[0].data; + + mac_rd_cmd->index = cpu_to_le32(i); + ret = hclge_cmd_send(&hdev->hw, desc, 3); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump mac table, ret = %d\n", ret); + kfree(mc_mac_tbl); + return ret; + } + + if (mac_rd_cmd->resp_code) + continue; + + if (mac_rd_cmd->entry_type == HCLGE_DBG_MAC_MC_TBL) { + mc_mac_tbl[mc_tbl_idx].index = i; + memcpy(mc_mac_tbl[mc_tbl_idx].mac_addr, + mac_rd_cmd->mac_addr, ETH_ALEN); + memcpy(mc_mac_tbl[mc_tbl_idx].mg_vf_mb, + desc[1].data, 24); + memcpy(&mc_mac_tbl[mc_tbl_idx].mg_vf_mb[24], + desc[2].data, 8); + mc_tbl_idx++; + + continue; + } + + pos += scnprintf(buf + pos, len - pos, " %04u %pM ", + i, mac_rd_cmd->mac_addr); + + pos += scnprintf(buf + pos, len - pos, + " %04u %u %u %u %u ", + le16_to_cpu(mac_rd_cmd->vlan_tag), + mac_rd_cmd->entry_type & + HCLGE_DBG_MAC_TBL_EN_TYPE, + mac_rd_cmd->entry_type & + HCLGE_DBG_MAC_TBL_MC_TYPE, + mac_rd_cmd->mc_mac_en & + HCLGE_DBG_MAC_TBL_MAC_EN, + le16_to_cpu(mac_rd_cmd->port) & + HCLGE_DBG_MAC_TBL_IN_PORT); + + pos += scnprintf(buf + pos, len - pos, + "%lu %04x\n", + le16_to_cpu(mac_rd_cmd->egress_port) & + HCLGE_DBG_MAC_TBL_E_PORT_B, + le16_to_cpu(mac_rd_cmd->egress_port) & + HCLGE_DBG_MAC_TBL_E_PORT); + } + + if (mc_tbl_idx > 0) { + pos += scnprintf(buf + pos, len - pos, + "Multicast tab: entry number = %u\n", + mc_tbl_idx); + pos += scnprintf(buf + pos, len - pos, + " index mac_addr UM_MC_RDATA\n"); + } + + for (i = 0; i < mc_tbl_idx; i++) { + pos += scnprintf(buf + pos, len - pos, " %04u %pM ", + mc_mac_tbl[i].index, mc_mac_tbl[i].mac_addr); + + for (j = 31; j >= 3; j -= 4) + pos += scnprintf(buf + pos, len - pos, + "%02x%02x%02x%02x ", + mc_mac_tbl[i].mg_vf_mb[j], + mc_mac_tbl[i].mg_vf_mb[j - 1], + mc_mac_tbl[i].mg_vf_mb[j - 2], + mc_mac_tbl[i].mg_vf_mb[j - 3]); + + pos += scnprintf(buf + pos, len - pos, "\n"); + } + + kfree(mc_mac_tbl); + + return 0; +} + +static int hclge_dbg_dump_mng_table(struct hclge_dev *hdev, char *buf, int len) +{ + struct hclge_mac_ethertype_idx_rd_cmd *req0; + struct hclge_desc desc; + u32 msg_egress_port; + int pos = 0; + int ret, i; + + pos += scnprintf(buf + pos, len - pos, + "entry mac_addr mask ether "); + pos += scnprintf(buf + pos, len - pos, + "mask vlan mask i_map i_dir e_type "); + pos += scnprintf(buf + pos, len - pos, "pf_id vf_id q_id drop\n"); + + for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) { + hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD, + true); + req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data; + req0->index = cpu_to_le16(i); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump manage table, ret = %d\n", ret); + return ret; + } + + if (!req0->resp_code) + continue; + + pos += scnprintf(buf + pos, len - pos, "%02u %pM ", + le16_to_cpu(req0->index), req0->mac_addr); + + pos += scnprintf(buf + pos, len - pos, + "%x %04x %x %04x ", + !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B), + le16_to_cpu(req0->ethter_type), + !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B), + le16_to_cpu(req0->vlan_tag) & + HCLGE_DBG_MNG_VLAN_TAG); + + pos += scnprintf(buf + pos, len - pos, + "%x %02x %02x ", + !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B), + req0->i_port_bitmap, req0->i_port_direction); + + msg_egress_port = le16_to_cpu(req0->egress_port); + pos += scnprintf(buf + pos, len - pos, + "%x %x %02x %04x %x\n", + !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B), + msg_egress_port & HCLGE_DBG_MNG_PF_ID, + (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID, + le16_to_cpu(req0->egress_queue), + !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B)); + } + + return 0; +} + +#define HCLGE_DBG_TCAM_BUF_SIZE 256 + +static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x, + char *tcam_buf, + struct hclge_dbg_tcam_msg tcam_msg) +{ + struct hclge_fd_tcam_config_1_cmd *req1; + struct hclge_fd_tcam_config_2_cmd *req2; + struct hclge_fd_tcam_config_3_cmd *req3; + struct hclge_desc desc[3]; + int pos = 0; + int ret, i; + u32 *req; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true); + desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true); + + req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data; + req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data; + req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data; + + req1->stage = tcam_msg.stage; + req1->xy_sel = sel_x ? 1 : 0; + req1->index = cpu_to_le32(tcam_msg.loc); + + ret = hclge_cmd_send(&hdev->hw, desc, 3); + if (ret) + return ret; + + pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos, + "read result tcam key %s(%u):\n", sel_x ? "x" : "y", + tcam_msg.loc); + + /* tcam_data0 ~ tcam_data1 */ + req = (u32 *)req1->tcam_data; + for (i = 0; i < 2; i++) + pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos, + "%08x\n", *req++); + + /* tcam_data2 ~ tcam_data7 */ + req = (u32 *)req2->tcam_data; + for (i = 0; i < 6; i++) + pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos, + "%08x\n", *req++); + + /* tcam_data8 ~ tcam_data12 */ + req = (u32 *)req3->tcam_data; + for (i = 0; i < 5; i++) + pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos, + "%08x\n", *req++); + + return ret; +} + +static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs) +{ + struct hclge_fd_rule *rule; + struct hlist_node *node; + int cnt = 0; + + spin_lock_bh(&hdev->fd_rule_lock); + hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { + rule_locs[cnt] = rule->location; + cnt++; + } + spin_unlock_bh(&hdev->fd_rule_lock); + + if (cnt != hdev->hclge_fd_rule_num || cnt == 0) + return -EINVAL; + + return cnt; +} + +static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len) +{ + u32 rule_num = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; + struct hclge_dbg_tcam_msg tcam_msg; + int i, ret, rule_cnt; + u16 *rule_locs; + char *tcam_buf; + int pos = 0; + + if (!hnae3_dev_fd_supported(hdev)) { + dev_err(&hdev->pdev->dev, + "Only FD-supported dev supports dump fd tcam\n"); + return -EOPNOTSUPP; + } + + if (!hdev->hclge_fd_rule_num || !rule_num) + return 0; + + rule_locs = kcalloc(rule_num, sizeof(u16), GFP_KERNEL); + if (!rule_locs) + return -ENOMEM; + + tcam_buf = kzalloc(HCLGE_DBG_TCAM_BUF_SIZE, GFP_KERNEL); + if (!tcam_buf) { + kfree(rule_locs); + return -ENOMEM; + } + + rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs); + if (rule_cnt < 0) { + ret = rule_cnt; + dev_err(&hdev->pdev->dev, + "failed to get rule number, ret = %d\n", ret); + goto out; + } + + ret = 0; + for (i = 0; i < rule_cnt; i++) { + tcam_msg.stage = HCLGE_FD_STAGE_1; + tcam_msg.loc = rule_locs[i]; + + ret = hclge_dbg_fd_tcam_read(hdev, true, tcam_buf, tcam_msg); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get fd tcam key x, ret = %d\n", ret); + goto out; + } + + pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf); + + ret = hclge_dbg_fd_tcam_read(hdev, false, tcam_buf, tcam_msg); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get fd tcam key y, ret = %d\n", ret); + goto out; + } + + pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf); + } + +out: + kfree(tcam_buf); + kfree(rule_locs); + return ret; +} + +int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len) +{ + int pos = 0; + + pos += scnprintf(buf + pos, len - pos, "PF reset count: %u\n", + hdev->rst_stats.pf_rst_cnt); + pos += scnprintf(buf + pos, len - pos, "FLR reset count: %u\n", + hdev->rst_stats.flr_rst_cnt); + pos += scnprintf(buf + pos, len - pos, "GLOBAL reset count: %u\n", + hdev->rst_stats.global_rst_cnt); + pos += scnprintf(buf + pos, len - pos, "IMP reset count: %u\n", + hdev->rst_stats.imp_rst_cnt); + pos += scnprintf(buf + pos, len - pos, "reset done count: %u\n", + hdev->rst_stats.reset_done_cnt); + pos += scnprintf(buf + pos, len - pos, "HW reset done count: %u\n", + hdev->rst_stats.hw_reset_done_cnt); + pos += scnprintf(buf + pos, len - pos, "reset count: %u\n", + hdev->rst_stats.reset_cnt); + pos += scnprintf(buf + pos, len - pos, "reset fail count: %u\n", + hdev->rst_stats.reset_fail_cnt); + pos += scnprintf(buf + pos, len - pos, + "vector0 interrupt enable status: 0x%x\n", + hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_REG_BASE)); + pos += scnprintf(buf + pos, len - pos, "reset interrupt source: 0x%x\n", + hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG)); + pos += scnprintf(buf + pos, len - pos, "reset interrupt status: 0x%x\n", + hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS)); + pos += scnprintf(buf + pos, len - pos, "RAS interrupt status: 0x%x\n", + hclge_read_dev(&hdev->hw, + HCLGE_RAS_PF_OTHER_INT_STS_REG)); + pos += scnprintf(buf + pos, len - pos, "hardware reset status: 0x%x\n", + hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG)); + pos += scnprintf(buf + pos, len - pos, "handshake status: 0x%x\n", + hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG)); + pos += scnprintf(buf + pos, len - pos, "function reset status: 0x%x\n", + hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING)); + pos += scnprintf(buf + pos, len - pos, "hdev state: 0x%lx\n", + hdev->state); + + return 0; +} + +static int hclge_dbg_dump_serv_info(struct hclge_dev *hdev, char *buf, int len) +{ + unsigned long rem_nsec; + int pos = 0; + u64 lc; + + lc = local_clock(); + rem_nsec = do_div(lc, HCLGE_BILLION_NANO_SECONDS); + + pos += scnprintf(buf + pos, len - pos, "local_clock: [%5lu.%06lu]\n", + (unsigned long)lc, rem_nsec / 1000); + pos += scnprintf(buf + pos, len - pos, "delta: %u(ms)\n", + jiffies_to_msecs(jiffies - hdev->last_serv_processed)); + pos += scnprintf(buf + pos, len - pos, + "last_service_task_processed: %lu(jiffies)\n", + hdev->last_serv_processed); + pos += scnprintf(buf + pos, len - pos, "last_service_task_cnt: %lu\n", + hdev->serv_processed_cnt); + + return 0; +} + +static int hclge_dbg_dump_interrupt(struct hclge_dev *hdev, char *buf, int len) +{ + int pos = 0; + + pos += scnprintf(buf + pos, len - pos, "num_nic_msi: %u\n", + hdev->num_nic_msi); + pos += scnprintf(buf + pos, len - pos, "num_roce_msi: %u\n", + hdev->num_roce_msi); + pos += scnprintf(buf + pos, len - pos, "num_msi_used: %u\n", + hdev->num_msi_used); + pos += scnprintf(buf + pos, len - pos, "num_msi_left: %u\n", + hdev->num_msi_left); + + return 0; +} + +static void hclge_dbg_imp_info_data_print(struct hclge_desc *desc_src, + char *buf, int len, u32 bd_num) +{ +#define HCLGE_DBG_IMP_INFO_PRINT_OFFSET 0x2 + + struct hclge_desc *desc_index = desc_src; + u32 offset = 0; + int pos = 0; + u32 i, j; + + pos += scnprintf(buf + pos, len - pos, "offset | data\n"); + + for (i = 0; i < bd_num; i++) { + j = 0; + while (j < HCLGE_DESC_DATA_LEN - 1) { + pos += scnprintf(buf + pos, len - pos, "0x%04x | ", + offset); + pos += scnprintf(buf + pos, len - pos, "0x%08x ", + le32_to_cpu(desc_index->data[j++])); + pos += scnprintf(buf + pos, len - pos, "0x%08x\n", + le32_to_cpu(desc_index->data[j++])); + offset += sizeof(u32) * HCLGE_DBG_IMP_INFO_PRINT_OFFSET; + } + desc_index++; + } +} + +static int +hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len) +{ + struct hclge_get_imp_bd_cmd *req; + struct hclge_desc *desc_src; + struct hclge_desc desc; + u32 bd_num; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_STATS_BD, true); + + req = (struct hclge_get_imp_bd_cmd *)desc.data; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get imp statistics bd number, ret = %d\n", + ret); + return ret; + } + + bd_num = le32_to_cpu(req->bd_num); + if (!bd_num) { + dev_err(&hdev->pdev->dev, "imp statistics bd number is 0!\n"); + return -EINVAL; + } + + desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL); + if (!desc_src) + return -ENOMEM; + + ret = hclge_dbg_cmd_send(hdev, desc_src, 0, bd_num, + HCLGE_OPC_IMP_STATS_INFO); + if (ret) { + kfree(desc_src); + dev_err(&hdev->pdev->dev, + "failed to get imp statistics, ret = %d\n", ret); + return ret; + } + + hclge_dbg_imp_info_data_print(desc_src, buf, len, bd_num); + + kfree(desc_src); + + return 0; +} + +#define HCLGE_CMD_NCL_CONFIG_BD_NUM 5 +#define HCLGE_MAX_NCL_CONFIG_LENGTH 16384 + +static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index, + char *buf, int *len, int *pos) +{ +#define HCLGE_CMD_DATA_NUM 6 + + int offset = HCLGE_MAX_NCL_CONFIG_LENGTH - *index; + int i, j; + + for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) { + for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) { + if (i == 0 && j == 0) + continue; + + *pos += scnprintf(buf + *pos, *len - *pos, + "0x%04x | 0x%08x\n", offset, + le32_to_cpu(desc[i].data[j])); + + offset += sizeof(u32); + *index -= sizeof(u32); + + if (*index <= 0) + return; + } + } +} + +static int hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *buf, int len) +{ +#define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD (20 + 24 * 4) + + struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM]; + int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM; + int index = HCLGE_MAX_NCL_CONFIG_LENGTH; + int pos = 0; + u32 data0; + int ret; + + pos += scnprintf(buf + pos, len - pos, "offset | data\n"); + + while (index > 0) { + data0 = HCLGE_MAX_NCL_CONFIG_LENGTH - index; + if (index >= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD) + data0 |= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD << 16; + else + data0 |= (u32)index << 16; + ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num, + HCLGE_OPC_QUERY_NCL_CONFIG); + if (ret) + return ret; + + hclge_ncl_config_data_print(desc, &index, buf, &len, &pos); + } + + return 0; +} + +static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len) +{ + struct phy_device *phydev = hdev->hw.mac.phydev; + struct hclge_config_mac_mode_cmd *req_app; + struct hclge_serdes_lb_cmd *req_serdes; + struct hclge_desc desc; + u8 loopback_en; + int pos = 0; + int ret; + + req_app = (struct hclge_config_mac_mode_cmd *)desc.data; + req_serdes = (struct hclge_serdes_lb_cmd *)desc.data; + + pos += scnprintf(buf + pos, len - pos, "mac id: %u\n", + hdev->hw.mac.mac_id); + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump app loopback status, ret = %d\n", ret); + return ret; + } + + loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en), + HCLGE_MAC_APP_LP_B); + pos += scnprintf(buf + pos, len - pos, "app loopback: %s\n", + state_str[loopback_en]); + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump serdes loopback status, ret = %d\n", + ret); + return ret; + } + + loopback_en = req_serdes->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; + pos += scnprintf(buf + pos, len - pos, "serdes serial loopback: %s\n", + state_str[loopback_en]); + + loopback_en = req_serdes->enable & + HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B ? 1 : 0; + pos += scnprintf(buf + pos, len - pos, "serdes parallel loopback: %s\n", + state_str[loopback_en]); + + if (phydev) { + loopback_en = phydev->loopback_enabled; + pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n", + state_str[loopback_en]); + } + + return 0; +} + +/* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt + * @hdev: pointer to struct hclge_dev + */ +static int hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev, char *buf, + int len) +{ + struct hclge_mac_tnl_stats stats; + unsigned long rem_nsec; + int pos = 0; + + pos += scnprintf(buf + pos, len - pos, + "Recently generated mac tnl interruption:\n"); + + while (kfifo_get(&hdev->mac_tnl_log, &stats)) { + rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS); + + pos += scnprintf(buf + pos, len - pos, + "[%07lu.%03lu] status = 0x%x\n", + (unsigned long)stats.time, rem_nsec / 1000, + stats.status); + } + + return 0; +} + +static const struct hclge_dbg_item mac_list_items[] = { + { "FUNC_ID", 2 }, + { "MAC_ADDR", 12 }, + { "STATE", 2 }, +}; + +static void hclge_dbg_dump_mac_list(struct hclge_dev *hdev, char *buf, int len, + bool is_unicast) +{ + char data_str[ARRAY_SIZE(mac_list_items)][HCLGE_DBG_DATA_STR_LEN]; + char content[HCLGE_DBG_INFO_LEN], str_id[HCLGE_DBG_ID_LEN]; + char *result[ARRAY_SIZE(mac_list_items)]; + struct hclge_vport_mac_addr_cfg *mac_node, *tmp; + struct hclge_vport *vport; + struct list_head *list; + u32 func_id; + int pos = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(mac_list_items); i++) + result[i] = &data_str[i][0]; + + pos += scnprintf(buf + pos, len - pos, "%s MAC_LIST:\n", + is_unicast ? "UC" : "MC"); + hclge_dbg_fill_content(content, sizeof(content), mac_list_items, + NULL, ARRAY_SIZE(mac_list_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + + for (func_id = 0; func_id < hdev->num_alloc_vport; func_id++) { + vport = &hdev->vport[func_id]; + list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list; + spin_lock_bh(&vport->mac_list_lock); + list_for_each_entry_safe(mac_node, tmp, list, node) { + i = 0; + result[i++] = hclge_dbg_get_func_id_str(str_id, + func_id); + sprintf(result[i++], "%pM", mac_node->mac_addr); + sprintf(result[i++], "%5s", + hclge_mac_state_str[mac_node->state]); + hclge_dbg_fill_content(content, sizeof(content), + mac_list_items, + (const char **)result, + ARRAY_SIZE(mac_list_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + } + spin_unlock_bh(&vport->mac_list_lock); + } +} + +static int hclge_get_vlan_rx_offload_cfg(struct hclge_dev *hdev, u8 vf_id, + struct hclge_dbg_vlan_cfg *vlan_cfg) +{ + struct hclge_vport_vtag_rx_cfg_cmd *req; + struct hclge_desc desc; + u16 bmap_index; + u8 rx_cfg; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, true); + + req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data; + req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD; + bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE; + req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get vport%u rxvlan cfg, ret = %d\n", + vf_id, ret); + return ret; + } + + rx_cfg = req->vport_vlan_cfg; + vlan_cfg->strip_tag1 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG1_EN_B); + vlan_cfg->strip_tag2 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG2_EN_B); + vlan_cfg->pri_only1 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG1_EN_B); + vlan_cfg->pri_only2 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG2_EN_B); + + return 0; +} + +static int hclge_get_vlan_tx_offload_cfg(struct hclge_dev *hdev, u8 vf_id, + struct hclge_dbg_vlan_cfg *vlan_cfg) +{ + struct hclge_vport_vtag_tx_cfg_cmd *req; + struct hclge_desc desc; + u16 bmap_index; + u8 tx_cfg; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, true); + req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; + req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD; + bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE; + req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get vport%u txvlan cfg, ret = %d\n", + vf_id, ret); + return ret; + } + + tx_cfg = req->vport_vlan_cfg; + vlan_cfg->pvid = le16_to_cpu(req->def_vlan_tag1); + + vlan_cfg->accept_tag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG1_B); + vlan_cfg->accept_tag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG2_B); + vlan_cfg->accept_untag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG1_B); + vlan_cfg->accept_untag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG2_B); + vlan_cfg->insert_tag1 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG1_EN_B); + vlan_cfg->insert_tag2 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG2_EN_B); + + return 0; +} + +static int hclge_get_vlan_filter_config_cmd(struct hclge_dev *hdev, + u8 vlan_type, u8 vf_id, + struct hclge_desc *desc) +{ + struct hclge_vlan_filter_ctrl_cmd *req; + int ret; + + hclge_cmd_setup_basic_desc(desc, HCLGE_OPC_VLAN_FILTER_CTRL, true); + req = (struct hclge_vlan_filter_ctrl_cmd *)desc->data; + req->vlan_type = vlan_type; + req->vf_id = vf_id; + + ret = hclge_cmd_send(&hdev->hw, desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to get vport%u vlan filter config, ret = %d.\n", + vf_id, ret); + + return ret; +} + +static int hclge_get_vlan_filter_state(struct hclge_dev *hdev, u8 vlan_type, + u8 vf_id, u8 *vlan_fe) +{ + struct hclge_vlan_filter_ctrl_cmd *req; + struct hclge_desc desc; + int ret; + + ret = hclge_get_vlan_filter_config_cmd(hdev, vlan_type, vf_id, &desc); + if (ret) + return ret; + + req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; + *vlan_fe = req->vlan_fe; + + return 0; +} + +static const struct hclge_dbg_item vlan_filter_items[] = { + { "FUNC_ID", 2 }, + { "I_VF_VLAN_FILTER", 2 }, + { "E_VF_VLAN_FILTER", 2 }, +}; + +static const struct hclge_dbg_item vlan_offload_items[] = { + { "FUNC_ID", 2 }, + { "PVID", 4 }, + { "ACCEPT_TAG1", 2 }, + { "ACCEPT_TAG2", 2 }, + { "ACCEPT_UNTAG1", 2 }, + { "ACCEPT_UNTAG2", 2 }, + { "INSERT_TAG1", 2 }, + { "INSERT_TAG2", 2 }, + { "STRIP_TAG1", 2 }, + { "STRIP_TAG2", 2 }, + { "PRI_ONLY_TAG1", 2 }, + { "PRI_ONLY_TAG2", 0 } +}; + +static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf, + int len, int *pos) +{ + char content[HCLGE_DBG_VLAN_FLTR_INFO_LEN], str_id[HCLGE_DBG_ID_LEN]; + const char *result[ARRAY_SIZE(vlan_filter_items)]; + u8 i, j, vlan_fe, ingress, egress; + u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */ + int ret; + + ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_PORT, 0, + &vlan_fe); + if (ret) + return ret; + ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B; + egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0; + + *pos += scnprintf(buf, len, "I_PORT_VLAN_FILTER: %s\n", + state_str[ingress]); + *pos += scnprintf(buf + *pos, len - *pos, "E_PORT_VLAN_FILTER: %s\n", + state_str[egress]); + + hclge_dbg_fill_content(content, sizeof(content), vlan_filter_items, + NULL, ARRAY_SIZE(vlan_filter_items)); + *pos += scnprintf(buf + *pos, len - *pos, "%s", content); + + for (i = 0; i < func_num; i++) { + ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_VF, i, + &vlan_fe); + if (ret) + return ret; + + ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B; + egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0; + j = 0; + result[j++] = hclge_dbg_get_func_id_str(str_id, i); + result[j++] = state_str[ingress]; + result[j++] = state_str[egress]; + hclge_dbg_fill_content(content, sizeof(content), + vlan_filter_items, result, + ARRAY_SIZE(vlan_filter_items)); + *pos += scnprintf(buf + *pos, len - *pos, "%s", content); + } + *pos += scnprintf(buf + *pos, len - *pos, "\n"); + + return 0; +} + +static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev, char *buf, + int len, int *pos) +{ + char str_id[HCLGE_DBG_ID_LEN], str_pvid[HCLGE_DBG_ID_LEN]; + const char *result[ARRAY_SIZE(vlan_offload_items)]; + char content[HCLGE_DBG_VLAN_OFFLOAD_INFO_LEN]; + u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */ + struct hclge_dbg_vlan_cfg vlan_cfg; + int ret; + u8 i, j; + + hclge_dbg_fill_content(content, sizeof(content), vlan_offload_items, + NULL, ARRAY_SIZE(vlan_offload_items)); + *pos += scnprintf(buf + *pos, len - *pos, "%s", content); + + for (i = 0; i < func_num; i++) { + ret = hclge_get_vlan_tx_offload_cfg(hdev, i, &vlan_cfg); + if (ret) + return ret; + + ret = hclge_get_vlan_rx_offload_cfg(hdev, i, &vlan_cfg); + if (ret) + return ret; + + sprintf(str_pvid, "%u", vlan_cfg.pvid); + j = 0; + result[j++] = hclge_dbg_get_func_id_str(str_id, i); + result[j++] = str_pvid; + result[j++] = state_str[vlan_cfg.accept_tag1]; + result[j++] = state_str[vlan_cfg.accept_tag2]; + result[j++] = state_str[vlan_cfg.accept_untag1]; + result[j++] = state_str[vlan_cfg.accept_untag2]; + result[j++] = state_str[vlan_cfg.insert_tag1]; + result[j++] = state_str[vlan_cfg.insert_tag2]; + result[j++] = state_str[vlan_cfg.strip_tag1]; + result[j++] = state_str[vlan_cfg.strip_tag2]; + result[j++] = state_str[vlan_cfg.pri_only1]; + result[j++] = state_str[vlan_cfg.pri_only2]; + + hclge_dbg_fill_content(content, sizeof(content), + vlan_offload_items, result, + ARRAY_SIZE(vlan_offload_items)); + *pos += scnprintf(buf + *pos, len - *pos, "%s", content); + } + + return 0; +} + +static int hclge_dbg_dump_vlan_config(struct hclge_dev *hdev, char *buf, + int len) +{ + int pos = 0; + int ret; + + ret = hclge_dbg_dump_vlan_filter_config(hdev, buf, len, &pos); + if (ret) + return ret; + + return hclge_dbg_dump_vlan_offload_config(hdev, buf, len, &pos); +} + +static int hclge_dbg_dump_mac_uc(struct hclge_dev *hdev, char *buf, int len) +{ + hclge_dbg_dump_mac_list(hdev, buf, len, true); + + return 0; +} + +static int hclge_dbg_dump_mac_mc(struct hclge_dev *hdev, char *buf, int len) +{ + hclge_dbg_dump_mac_list(hdev, buf, len, false); + + return 0; +} + +static int hclge_dbg_dump_umv_info(struct hclge_dev *hdev, char *buf, int len) +{ + u8 func_num = pci_num_vf(hdev->pdev) + 1; + struct hclge_vport *vport; + int pos = 0; + u8 i; + + pos += scnprintf(buf, len, "num_alloc_vport : %u\n", + hdev->num_alloc_vport); + pos += scnprintf(buf + pos, len - pos, "max_umv_size : %u\n", + hdev->max_umv_size); + pos += scnprintf(buf + pos, len - pos, "wanted_umv_size : %u\n", + hdev->wanted_umv_size); + pos += scnprintf(buf + pos, len - pos, "priv_umv_size : %u\n", + hdev->priv_umv_size); + + mutex_lock(&hdev->vport_lock); + pos += scnprintf(buf + pos, len - pos, "share_umv_size : %u\n", + hdev->share_umv_size); + for (i = 0; i < func_num; i++) { + vport = &hdev->vport[i]; + pos += scnprintf(buf + pos, len - pos, + "vport(%u) used_umv_num : %u\n", + i, vport->used_umv_num); + } + mutex_unlock(&hdev->vport_lock); + + return 0; +} + +static const struct hclge_dbg_func hclge_dbg_cmd_func[] = { + { + .cmd = HNAE3_DBG_CMD_TM_PRI, + .dbg_dump = hclge_dbg_dump_tm_pri, + }, + { + .cmd = HNAE3_DBG_CMD_TM_QSET, + .dbg_dump = hclge_dbg_dump_tm_qset, + }, + { + .cmd = HNAE3_DBG_CMD_TM_MAP, + .dbg_dump = hclge_dbg_dump_tm_map, + }, + { + .cmd = HNAE3_DBG_CMD_TM_PG, + .dbg_dump = hclge_dbg_dump_tm_pg, + }, + { + .cmd = HNAE3_DBG_CMD_TM_PORT, + .dbg_dump = hclge_dbg_dump_tm_port, + }, + { + .cmd = HNAE3_DBG_CMD_TC_SCH_INFO, + .dbg_dump = hclge_dbg_dump_tc, + }, + { + .cmd = HNAE3_DBG_CMD_QOS_PAUSE_CFG, + .dbg_dump = hclge_dbg_dump_qos_pause_cfg, + }, + { + .cmd = HNAE3_DBG_CMD_QOS_PRI_MAP, + .dbg_dump = hclge_dbg_dump_qos_pri_map, + }, + { + .cmd = HNAE3_DBG_CMD_QOS_BUF_CFG, + .dbg_dump = hclge_dbg_dump_qos_buf_cfg, + }, + { + .cmd = HNAE3_DBG_CMD_MAC_UC, + .dbg_dump = hclge_dbg_dump_mac_uc, + }, + { + .cmd = HNAE3_DBG_CMD_MAC_MC, + .dbg_dump = hclge_dbg_dump_mac_mc, + }, + { + .cmd = HNAE3_DBG_CMD_MAC_TBL, + .dbg_dump = hclge_dbg_dump_mac_table, + }, + { + .cmd = HNAE3_DBG_CMD_MNG_TBL, + .dbg_dump = hclge_dbg_dump_mng_table, + }, + { + .cmd = HNAE3_DBG_CMD_LOOPBACK, + .dbg_dump = hclge_dbg_dump_loopback, + }, + { + .cmd = HNAE3_DBG_CMD_INTERRUPT_INFO, + .dbg_dump = hclge_dbg_dump_interrupt, + }, + { + .cmd = HNAE3_DBG_CMD_RESET_INFO, + .dbg_dump = hclge_dbg_dump_rst_info, + }, + { + .cmd = HNAE3_DBG_CMD_IMP_INFO, + .dbg_dump = hclge_dbg_get_imp_stats_info, + }, + { + .cmd = HNAE3_DBG_CMD_NCL_CONFIG, + .dbg_dump = hclge_dbg_dump_ncl_config, + }, + { + .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON, + .dbg_dump_reg = hclge_dbg_dump_reg_cmd, + }, + { + .cmd = HNAE3_DBG_CMD_REG_SSU, + .dbg_dump_reg = hclge_dbg_dump_reg_cmd, + }, + { + .cmd = HNAE3_DBG_CMD_REG_IGU_EGU, + .dbg_dump_reg = hclge_dbg_dump_reg_cmd, + }, + { + .cmd = HNAE3_DBG_CMD_REG_RPU, + .dbg_dump_reg = hclge_dbg_dump_reg_cmd, + }, + { + .cmd = HNAE3_DBG_CMD_REG_NCSI, + .dbg_dump_reg = hclge_dbg_dump_reg_cmd, + }, + { + .cmd = HNAE3_DBG_CMD_REG_RTC, + .dbg_dump_reg = hclge_dbg_dump_reg_cmd, + }, + { + .cmd = HNAE3_DBG_CMD_REG_PPP, + .dbg_dump_reg = hclge_dbg_dump_reg_cmd, + }, + { + .cmd = HNAE3_DBG_CMD_REG_RCB, + .dbg_dump_reg = hclge_dbg_dump_reg_cmd, + }, + { + .cmd = HNAE3_DBG_CMD_REG_TQP, + .dbg_dump_reg = hclge_dbg_dump_reg_cmd, + }, + { + .cmd = HNAE3_DBG_CMD_REG_MAC, + .dbg_dump = hclge_dbg_dump_mac, + }, + { + .cmd = HNAE3_DBG_CMD_REG_DCB, + .dbg_dump = hclge_dbg_dump_dcb, + }, + { + .cmd = HNAE3_DBG_CMD_FD_TCAM, + .dbg_dump = hclge_dbg_dump_fd_tcam, + }, + { + .cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS, + .dbg_dump = hclge_dbg_dump_mac_tnl_status, + }, + { + .cmd = HNAE3_DBG_CMD_SERV_INFO, + .dbg_dump = hclge_dbg_dump_serv_info, + }, + { + .cmd = HNAE3_DBG_CMD_UMV_INFO, + .dbg_dump = hclge_dbg_dump_umv_info, + }, + { + .cmd = HNAE3_DBG_CMD_VLAN_CONFIG, + .dbg_dump = hclge_dbg_dump_vlan_config, + }, +}; + +int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd, + char *buf, int len) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + const struct hclge_dbg_func *cmd_func; + struct hclge_dev *hdev = vport->back; + u32 i; + + for (i = 0; i < ARRAY_SIZE(hclge_dbg_cmd_func); i++) { + if (cmd == hclge_dbg_cmd_func[i].cmd) { + cmd_func = &hclge_dbg_cmd_func[i]; + if (cmd_func->dbg_dump) + return cmd_func->dbg_dump(hdev, buf, len); + else + return cmd_func->dbg_dump_reg(hdev, cmd, buf, + len); + } + } + + dev_err(&hdev->pdev->dev, "invalid command(%d)\n", cmd); + return -EINVAL; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h new file mode 100644 index 0000000000000000000000000000000000000000..dab61afe3d330dee8a612a7ced9240b4f8b2edcf --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h @@ -0,0 +1,785 @@ +/* SPDX-License-Identifier: GPL-2.0+ + * Copyright (c) 2018-2019 Hisilicon Limited. + */ + +#ifndef __HCLGE_DEBUGFS_H +#define __HCLGE_DEBUGFS_H + +#include +#include "hclge_cmd.h" + +#define HCLGE_DBG_BUF_LEN 256 +#define HCLGE_DBG_MNG_TBL_MAX 64 +#define HCLGE_DBG_MAC_TBL_MAX 4223 +#define HCLGE_DBG_MAC_MC_TBL 2 +#define HCLGE_DBG_MAC_TBL_EN_TYPE 0x01 +#define HCLGE_DBG_MAC_TBL_MC_TYPE 0x02 +#define HCLGE_DBG_MAC_TBL_MAC_EN 0x01 +#define HCLGE_DBG_MAC_TBL_IN_PORT 0x07 +#define HCLGE_DBG_MAC_TBL_E_PORT 0x3FF +#define HCLGE_DBG_MAC_TBL_E_PORT_B BIT(11) + +#define HCLGE_DBG_MNG_VLAN_MASK_B BIT(0) +#define HCLGE_DBG_MNG_MAC_MASK_B BIT(1) +#define HCLGE_DBG_MNG_ETHER_MASK_B BIT(2) +#define HCLGE_DBG_MNG_E_TYPE_B BIT(11) +#define HCLGE_DBG_MNG_DROP_B BIT(13) +#define HCLGE_DBG_MNG_VLAN_TAG 0x0FFF +#define HCLGE_DBG_MNG_PF_ID 0x0007 +#define HCLGE_DBG_MNG_VF_ID 0x00FF + +/* Get DFX BD number offset */ +#define HCLGE_DBG_DFX_BIOS_OFFSET 1 +#define HCLGE_DBG_DFX_SSU_0_OFFSET 2 +#define HCLGE_DBG_DFX_SSU_1_OFFSET 3 +#define HCLGE_DBG_DFX_IGU_OFFSET 4 +#define HCLGE_DBG_DFX_RPU_0_OFFSET 5 + +#define HCLGE_DBG_DFX_RPU_1_OFFSET 6 +#define HCLGE_DBG_DFX_NCSI_OFFSET 7 +#define HCLGE_DBG_DFX_RTC_OFFSET 8 +#define HCLGE_DBG_DFX_PPP_OFFSET 9 +#define HCLGE_DBG_DFX_RCB_OFFSET 10 +#define HCLGE_DBG_DFX_TQP_OFFSET 11 + +#define HCLGE_DBG_DFX_SSU_2_OFFSET 12 + +#define HCLGE_DBG_SCAN_STEP 100 +#define HCLGE_DBG_PAUSE_TIME 50 + +struct hclge_qos_pri_map_cmd { + u8 pri0_tc : 4, + pri1_tc : 4; + u8 pri2_tc : 4, + pri3_tc : 4; + u8 pri4_tc : 4, + pri5_tc : 4; + u8 pri6_tc : 4, + pri7_tc : 4; + u8 vlan_pri : 4, + rev : 4; +}; + +struct hclge_dbg_bitmap_cmd { + union { + u8 bitmap; + struct { + u8 bit0 : 1, + bit1 : 1, + bit2 : 1, + bit3 : 1, + bit4 : 1, + bit5 : 1, + bit6 : 1, + bit7 : 1; + }; + }; +}; + +struct hclge_dbg_reg_common_msg { + int msg_num; + int offset; + enum hclge_opcode_type cmd; +}; + +struct hclge_mac_vlan_idx_rd_mc { + u32 index; + u8 mac_addr[ETH_ALEN]; + u8 mg_vf_mb[32]; +}; + +struct hclge_dbg_tcam_msg { + u8 stage; + u32 loc; +}; + +#define HCLGE_DBG_MAX_DFX_MSG_LEN 60 +struct hclge_dbg_dfx_message { + int flag; + char message[HCLGE_DBG_MAX_DFX_MSG_LEN]; +}; + +#define HCLGE_DBG_MAC_REG_TYPE_LEN 32 +struct hclge_dbg_reg_type_info { + enum hnae3_dbg_cmd cmd; + const struct hclge_dbg_dfx_message *dfx_msg; + struct hclge_dbg_reg_common_msg reg_msg; +}; + +struct hclge_dbg_func { + enum hnae3_dbg_cmd cmd; + int (*dbg_dump)(struct hclge_dev *hdev, char *buf, int len); + int (*dbg_dump_reg)(struct hclge_dev *hdev, enum hnae3_dbg_cmd cmd, + char *buf, int len); +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = { + {false, "Reserved"}, + {true, "BP_CPU_STATE"}, + {true, "DFX_MSIX_INFO_NIC_0"}, + {true, "DFX_MSIX_INFO_NIC_1"}, + {true, "DFX_MSIX_INFO_NIC_2"}, + {true, "DFX_MSIX_INFO_NIC_3"}, + + {true, "DFX_MSIX_INFO_ROC_0"}, + {true, "DFX_MSIX_INFO_ROC_1"}, + {true, "DFX_MSIX_INFO_ROC_2"}, + {true, "DFX_MSIX_INFO_ROC_3"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_0[] = { + {false, "Reserved"}, + {true, "SSU_ETS_PORT_STATUS"}, + {true, "SSU_ETS_TCG_STATUS"}, + {false, "Reserved"}, + {false, "Reserved"}, + {true, "SSU_BP_STATUS_0"}, + + {true, "SSU_BP_STATUS_1"}, + {true, "SSU_BP_STATUS_2"}, + {true, "SSU_BP_STATUS_3"}, + {true, "SSU_BP_STATUS_4"}, + {true, "SSU_BP_STATUS_5"}, + {true, "SSU_MAC_TX_PFC_IND"}, + + {true, "MAC_SSU_RX_PFC_IND"}, + {true, "BTMP_AGEING_ST_B0"}, + {true, "BTMP_AGEING_ST_B1"}, + {true, "BTMP_AGEING_ST_B2"}, + {false, "Reserved"}, + {false, "Reserved"}, + + {true, "FULL_DROP_NUM"}, + {true, "PART_DROP_NUM"}, + {true, "PPP_KEY_DROP_NUM"}, + {true, "PPP_RLT_DROP_NUM"}, + {true, "LO_PRI_UNICAST_RLT_DROP_NUM"}, + {true, "HI_PRI_MULTICAST_RLT_DROP_NUM"}, + + {true, "LO_PRI_MULTICAST_RLT_DROP_NUM"}, + {true, "NCSI_PACKET_CURR_BUFFER_CNT"}, + {true, "BTMP_AGEING_RLS_CNT_BANK0"}, + {true, "BTMP_AGEING_RLS_CNT_BANK1"}, + {true, "BTMP_AGEING_RLS_CNT_BANK2"}, + {true, "SSU_MB_RD_RLT_DROP_CNT"}, + + {true, "SSU_PPP_MAC_KEY_NUM_L"}, + {true, "SSU_PPP_MAC_KEY_NUM_H"}, + {true, "SSU_PPP_HOST_KEY_NUM_L"}, + {true, "SSU_PPP_HOST_KEY_NUM_H"}, + {true, "PPP_SSU_MAC_RLT_NUM_L"}, + {true, "PPP_SSU_MAC_RLT_NUM_H"}, + + {true, "PPP_SSU_HOST_RLT_NUM_L"}, + {true, "PPP_SSU_HOST_RLT_NUM_H"}, + {true, "NCSI_RX_PACKET_IN_CNT_L"}, + {true, "NCSI_RX_PACKET_IN_CNT_H"}, + {true, "NCSI_TX_PACKET_OUT_CNT_L"}, + {true, "NCSI_TX_PACKET_OUT_CNT_H"}, + + {true, "SSU_KEY_DROP_NUM"}, + {true, "MB_UNCOPY_NUM"}, + {true, "RX_OQ_DROP_PKT_CNT"}, + {true, "TX_OQ_DROP_PKT_CNT"}, + {true, "BANK_UNBALANCE_DROP_CNT"}, + {true, "BANK_UNBALANCE_RX_DROP_CNT"}, + + {true, "NIC_L2_ERR_DROP_PKT_CNT"}, + {true, "ROC_L2_ERR_DROP_PKT_CNT"}, + {true, "NIC_L2_ERR_DROP_PKT_CNT_RX"}, + {true, "ROC_L2_ERR_DROP_PKT_CNT_RX"}, + {true, "RX_OQ_GLB_DROP_PKT_CNT"}, + {false, "Reserved"}, + + {true, "LO_PRI_UNICAST_CUR_CNT"}, + {true, "HI_PRI_MULTICAST_CUR_CNT"}, + {true, "LO_PRI_MULTICAST_CUR_CNT"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_1[] = { + {true, "prt_id"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_0"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_1"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_2"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_3"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_4"}, + + {true, "PACKET_TC_CURR_BUFFER_CNT_5"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_6"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_7"}, + {true, "PACKET_CURR_BUFFER_CNT"}, + {false, "Reserved"}, + {false, "Reserved"}, + + {true, "RX_PACKET_IN_CNT_L"}, + {true, "RX_PACKET_IN_CNT_H"}, + {true, "RX_PACKET_OUT_CNT_L"}, + {true, "RX_PACKET_OUT_CNT_H"}, + {true, "TX_PACKET_IN_CNT_L"}, + {true, "TX_PACKET_IN_CNT_H"}, + + {true, "TX_PACKET_OUT_CNT_L"}, + {true, "TX_PACKET_OUT_CNT_H"}, + {true, "ROC_RX_PACKET_IN_CNT_L"}, + {true, "ROC_RX_PACKET_IN_CNT_H"}, + {true, "ROC_TX_PACKET_OUT_CNT_L"}, + {true, "ROC_TX_PACKET_OUT_CNT_H"}, + + {true, "RX_PACKET_TC_IN_CNT_0_L"}, + {true, "RX_PACKET_TC_IN_CNT_0_H"}, + {true, "RX_PACKET_TC_IN_CNT_1_L"}, + {true, "RX_PACKET_TC_IN_CNT_1_H"}, + {true, "RX_PACKET_TC_IN_CNT_2_L"}, + {true, "RX_PACKET_TC_IN_CNT_2_H"}, + + {true, "RX_PACKET_TC_IN_CNT_3_L"}, + {true, "RX_PACKET_TC_IN_CNT_3_H"}, + {true, "RX_PACKET_TC_IN_CNT_4_L"}, + {true, "RX_PACKET_TC_IN_CNT_4_H"}, + {true, "RX_PACKET_TC_IN_CNT_5_L"}, + {true, "RX_PACKET_TC_IN_CNT_5_H"}, + + {true, "RX_PACKET_TC_IN_CNT_6_L"}, + {true, "RX_PACKET_TC_IN_CNT_6_H"}, + {true, "RX_PACKET_TC_IN_CNT_7_L"}, + {true, "RX_PACKET_TC_IN_CNT_7_H"}, + {true, "RX_PACKET_TC_OUT_CNT_0_L"}, + {true, "RX_PACKET_TC_OUT_CNT_0_H"}, + + {true, "RX_PACKET_TC_OUT_CNT_1_L"}, + {true, "RX_PACKET_TC_OUT_CNT_1_H"}, + {true, "RX_PACKET_TC_OUT_CNT_2_L"}, + {true, "RX_PACKET_TC_OUT_CNT_2_H"}, + {true, "RX_PACKET_TC_OUT_CNT_3_L"}, + {true, "RX_PACKET_TC_OUT_CNT_3_H"}, + + {true, "RX_PACKET_TC_OUT_CNT_4_L"}, + {true, "RX_PACKET_TC_OUT_CNT_4_H"}, + {true, "RX_PACKET_TC_OUT_CNT_5_L"}, + {true, "RX_PACKET_TC_OUT_CNT_5_H"}, + {true, "RX_PACKET_TC_OUT_CNT_6_L"}, + {true, "RX_PACKET_TC_OUT_CNT_6_H"}, + + {true, "RX_PACKET_TC_OUT_CNT_7_L"}, + {true, "RX_PACKET_TC_OUT_CNT_7_H"}, + {true, "TX_PACKET_TC_IN_CNT_0_L"}, + {true, "TX_PACKET_TC_IN_CNT_0_H"}, + {true, "TX_PACKET_TC_IN_CNT_1_L"}, + {true, "TX_PACKET_TC_IN_CNT_1_H"}, + + {true, "TX_PACKET_TC_IN_CNT_2_L"}, + {true, "TX_PACKET_TC_IN_CNT_2_H"}, + {true, "TX_PACKET_TC_IN_CNT_3_L"}, + {true, "TX_PACKET_TC_IN_CNT_3_H"}, + {true, "TX_PACKET_TC_IN_CNT_4_L"}, + {true, "TX_PACKET_TC_IN_CNT_4_H"}, + + {true, "TX_PACKET_TC_IN_CNT_5_L"}, + {true, "TX_PACKET_TC_IN_CNT_5_H"}, + {true, "TX_PACKET_TC_IN_CNT_6_L"}, + {true, "TX_PACKET_TC_IN_CNT_6_H"}, + {true, "TX_PACKET_TC_IN_CNT_7_L"}, + {true, "TX_PACKET_TC_IN_CNT_7_H"}, + + {true, "TX_PACKET_TC_OUT_CNT_0_L"}, + {true, "TX_PACKET_TC_OUT_CNT_0_H"}, + {true, "TX_PACKET_TC_OUT_CNT_1_L"}, + {true, "TX_PACKET_TC_OUT_CNT_1_H"}, + {true, "TX_PACKET_TC_OUT_CNT_2_L"}, + {true, "TX_PACKET_TC_OUT_CNT_2_H"}, + + {true, "TX_PACKET_TC_OUT_CNT_3_L"}, + {true, "TX_PACKET_TC_OUT_CNT_3_H"}, + {true, "TX_PACKET_TC_OUT_CNT_4_L"}, + {true, "TX_PACKET_TC_OUT_CNT_4_H"}, + {true, "TX_PACKET_TC_OUT_CNT_5_L"}, + {true, "TX_PACKET_TC_OUT_CNT_5_H"}, + + {true, "TX_PACKET_TC_OUT_CNT_6_L"}, + {true, "TX_PACKET_TC_OUT_CNT_6_H"}, + {true, "TX_PACKET_TC_OUT_CNT_7_L"}, + {true, "TX_PACKET_TC_OUT_CNT_7_H"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_2[] = { + {true, "OQ_INDEX"}, + {true, "QUEUE_CNT"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = { + {true, "prt_id"}, + {true, "IGU_RX_ERR_PKT"}, + {true, "IGU_RX_NO_SOF_PKT"}, + {true, "EGU_TX_1588_SHORT_PKT"}, + {true, "EGU_TX_1588_PKT"}, + {true, "EGU_TX_ERR_PKT"}, + + {true, "IGU_RX_OUT_L2_PKT"}, + {true, "IGU_RX_OUT_L3_PKT"}, + {true, "IGU_RX_OUT_L4_PKT"}, + {true, "IGU_RX_IN_L2_PKT"}, + {true, "IGU_RX_IN_L3_PKT"}, + {true, "IGU_RX_IN_L4_PKT"}, + + {true, "IGU_RX_EL3E_PKT"}, + {true, "IGU_RX_EL4E_PKT"}, + {true, "IGU_RX_L3E_PKT"}, + {true, "IGU_RX_L4E_PKT"}, + {true, "IGU_RX_ROCEE_PKT"}, + {true, "IGU_RX_OUT_UDP0_PKT"}, + + {true, "IGU_RX_IN_UDP0_PKT"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + + {true, "IGU_RX_OVERSIZE_PKT_L"}, + {true, "IGU_RX_OVERSIZE_PKT_H"}, + {true, "IGU_RX_UNDERSIZE_PKT_L"}, + {true, "IGU_RX_UNDERSIZE_PKT_H"}, + {true, "IGU_RX_OUT_ALL_PKT_L"}, + {true, "IGU_RX_OUT_ALL_PKT_H"}, + + {true, "IGU_TX_OUT_ALL_PKT_L"}, + {true, "IGU_TX_OUT_ALL_PKT_H"}, + {true, "IGU_RX_UNI_PKT_L"}, + {true, "IGU_RX_UNI_PKT_H"}, + {true, "IGU_RX_MULTI_PKT_L"}, + {true, "IGU_RX_MULTI_PKT_H"}, + + {true, "IGU_RX_BROAD_PKT_L"}, + {true, "IGU_RX_BROAD_PKT_H"}, + {true, "EGU_TX_OUT_ALL_PKT_L"}, + {true, "EGU_TX_OUT_ALL_PKT_H"}, + {true, "EGU_TX_UNI_PKT_L"}, + {true, "EGU_TX_UNI_PKT_H"}, + + {true, "EGU_TX_MULTI_PKT_L"}, + {true, "EGU_TX_MULTI_PKT_H"}, + {true, "EGU_TX_BROAD_PKT_L"}, + {true, "EGU_TX_BROAD_PKT_H"}, + {true, "IGU_TX_KEY_NUM_L"}, + {true, "IGU_TX_KEY_NUM_H"}, + + {true, "IGU_RX_NON_TUN_PKT_L"}, + {true, "IGU_RX_NON_TUN_PKT_H"}, + {true, "IGU_RX_TUN_PKT_L"}, + {true, "IGU_RX_TUN_PKT_H"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_0[] = { + {true, "tc_queue_num"}, + {true, "FSM_DFX_ST0"}, + {true, "FSM_DFX_ST1"}, + {true, "RPU_RX_PKT_DROP_CNT"}, + {true, "BUF_WAIT_TIMEOUT"}, + {true, "BUF_WAIT_TIMEOUT_QID"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_1[] = { + {false, "Reserved"}, + {true, "FIFO_DFX_ST0"}, + {true, "FIFO_DFX_ST1"}, + {true, "FIFO_DFX_ST2"}, + {true, "FIFO_DFX_ST3"}, + {true, "FIFO_DFX_ST4"}, + + {true, "FIFO_DFX_ST5"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_ncsi_reg[] = { + {false, "Reserved"}, + {true, "NCSI_EGU_TX_FIFO_STS"}, + {true, "NCSI_PAUSE_STATUS"}, + {true, "NCSI_RX_CTRL_DMAC_ERR_CNT"}, + {true, "NCSI_RX_CTRL_SMAC_ERR_CNT"}, + {true, "NCSI_RX_CTRL_CKS_ERR_CNT"}, + + {true, "NCSI_RX_CTRL_PKT_CNT"}, + {true, "NCSI_RX_PT_DMAC_ERR_CNT"}, + {true, "NCSI_RX_PT_SMAC_ERR_CNT"}, + {true, "NCSI_RX_PT_PKT_CNT"}, + {true, "NCSI_RX_FCS_ERR_CNT"}, + {true, "NCSI_TX_CTRL_DMAC_ERR_CNT"}, + + {true, "NCSI_TX_CTRL_SMAC_ERR_CNT"}, + {true, "NCSI_TX_CTRL_PKT_CNT"}, + {true, "NCSI_TX_PT_DMAC_ERR_CNT"}, + {true, "NCSI_TX_PT_SMAC_ERR_CNT"}, + {true, "NCSI_TX_PT_PKT_CNT"}, + {true, "NCSI_TX_PT_PKT_TRUNC_CNT"}, + + {true, "NCSI_TX_PT_PKT_ERR_CNT"}, + {true, "NCSI_TX_CTRL_PKT_ERR_CNT"}, + {true, "NCSI_RX_CTRL_PKT_TRUNC_CNT"}, + {true, "NCSI_RX_CTRL_PKT_CFLIT_CNT"}, + {false, "Reserved"}, + {false, "Reserved"}, + + {true, "NCSI_MAC_RX_OCTETS_OK"}, + {true, "NCSI_MAC_RX_OCTETS_BAD"}, + {true, "NCSI_MAC_RX_UC_PKTS"}, + {true, "NCSI_MAC_RX_MC_PKTS"}, + {true, "NCSI_MAC_RX_BC_PKTS"}, + {true, "NCSI_MAC_RX_PKTS_64OCTETS"}, + + {true, "NCSI_MAC_RX_PKTS_65TO127OCTETS"}, + {true, "NCSI_MAC_RX_PKTS_128TO255OCTETS"}, + {true, "NCSI_MAC_RX_PKTS_255TO511OCTETS"}, + {true, "NCSI_MAC_RX_PKTS_512TO1023OCTETS"}, + {true, "NCSI_MAC_RX_PKTS_1024TO1518OCTETS"}, + {true, "NCSI_MAC_RX_PKTS_1519TOMAXOCTETS"}, + + {true, "NCSI_MAC_RX_FCS_ERRORS"}, + {true, "NCSI_MAC_RX_LONG_ERRORS"}, + {true, "NCSI_MAC_RX_JABBER_ERRORS"}, + {true, "NCSI_MAC_RX_RUNT_ERR_CNT"}, + {true, "NCSI_MAC_RX_SHORT_ERR_CNT"}, + {true, "NCSI_MAC_RX_FILT_PKT_CNT"}, + + {true, "NCSI_MAC_RX_OCTETS_TOTAL_FILT"}, + {true, "NCSI_MAC_TX_OCTETS_OK"}, + {true, "NCSI_MAC_TX_OCTETS_BAD"}, + {true, "NCSI_MAC_TX_UC_PKTS"}, + {true, "NCSI_MAC_TX_MC_PKTS"}, + {true, "NCSI_MAC_TX_BC_PKTS"}, + + {true, "NCSI_MAC_TX_PKTS_64OCTETS"}, + {true, "NCSI_MAC_TX_PKTS_65TO127OCTETS"}, + {true, "NCSI_MAC_TX_PKTS_128TO255OCTETS"}, + {true, "NCSI_MAC_TX_PKTS_256TO511OCTETS"}, + {true, "NCSI_MAC_TX_PKTS_512TO1023OCTETS"}, + {true, "NCSI_MAC_TX_PKTS_1024TO1518OCTETS"}, + + {true, "NCSI_MAC_TX_PKTS_1519TOMAXOCTETS"}, + {true, "NCSI_MAC_TX_UNDERRUN"}, + {true, "NCSI_MAC_TX_CRC_ERROR"}, + {true, "NCSI_MAC_TX_PAUSE_FRAMES"}, + {true, "NCSI_MAC_RX_PAD_PKTS"}, + {true, "NCSI_MAC_RX_PAUSE_FRAMES"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_rtc_reg[] = { + {false, "Reserved"}, + {true, "LGE_IGU_AFIFO_DFX_0"}, + {true, "LGE_IGU_AFIFO_DFX_1"}, + {true, "LGE_IGU_AFIFO_DFX_2"}, + {true, "LGE_IGU_AFIFO_DFX_3"}, + {true, "LGE_IGU_AFIFO_DFX_4"}, + + {true, "LGE_IGU_AFIFO_DFX_5"}, + {true, "LGE_IGU_AFIFO_DFX_6"}, + {true, "LGE_IGU_AFIFO_DFX_7"}, + {true, "LGE_EGU_AFIFO_DFX_0"}, + {true, "LGE_EGU_AFIFO_DFX_1"}, + {true, "LGE_EGU_AFIFO_DFX_2"}, + + {true, "LGE_EGU_AFIFO_DFX_3"}, + {true, "LGE_EGU_AFIFO_DFX_4"}, + {true, "LGE_EGU_AFIFO_DFX_5"}, + {true, "LGE_EGU_AFIFO_DFX_6"}, + {true, "LGE_EGU_AFIFO_DFX_7"}, + {true, "CGE_IGU_AFIFO_DFX_0"}, + + {true, "CGE_IGU_AFIFO_DFX_1"}, + {true, "CGE_EGU_AFIFO_DFX_0"}, + {true, "CGE_EGU_AFIFO_DFX_1"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_ppp_reg[] = { + {false, "Reserved"}, + {true, "DROP_FROM_PRT_PKT_CNT"}, + {true, "DROP_FROM_HOST_PKT_CNT"}, + {true, "DROP_TX_VLAN_PROC_CNT"}, + {true, "DROP_MNG_CNT"}, + {true, "DROP_FD_CNT"}, + + {true, "DROP_NO_DST_CNT"}, + {true, "DROP_MC_MBID_FULL_CNT"}, + {true, "DROP_SC_FILTERED"}, + {true, "PPP_MC_DROP_PKT_CNT"}, + {true, "DROP_PT_CNT"}, + {true, "DROP_MAC_ANTI_SPOOF_CNT"}, + + {true, "DROP_IG_VFV_CNT"}, + {true, "DROP_IG_PRTV_CNT"}, + {true, "DROP_CNM_PFC_PAUSE_CNT"}, + {true, "DROP_TORUS_TC_CNT"}, + {true, "DROP_TORUS_LPBK_CNT"}, + {true, "PPP_HFS_STS"}, + + {true, "PPP_MC_RSLT_STS"}, + {true, "PPP_P3U_STS"}, + {true, "PPP_RSLT_DESCR_STS"}, + {true, "PPP_UMV_STS_0"}, + {true, "PPP_UMV_STS_1"}, + {true, "PPP_VFV_STS"}, + + {true, "PPP_GRO_KEY_CNT"}, + {true, "PPP_GRO_INFO_CNT"}, + {true, "PPP_GRO_DROP_CNT"}, + {true, "PPP_GRO_OUT_CNT"}, + {true, "PPP_GRO_KEY_MATCH_DATA_CNT"}, + {true, "PPP_GRO_KEY_MATCH_TCAM_CNT"}, + + {true, "PPP_GRO_INFO_MATCH_CNT"}, + {true, "PPP_GRO_FREE_ENTRY_CNT"}, + {true, "PPP_GRO_INNER_DFX_SIGNAL"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + + {true, "GET_RX_PKT_CNT_L"}, + {true, "GET_RX_PKT_CNT_H"}, + {true, "GET_TX_PKT_CNT_L"}, + {true, "GET_TX_PKT_CNT_H"}, + {true, "SEND_UC_PRT2HOST_PKT_CNT_L"}, + {true, "SEND_UC_PRT2HOST_PKT_CNT_H"}, + + {true, "SEND_UC_PRT2PRT_PKT_CNT_L"}, + {true, "SEND_UC_PRT2PRT_PKT_CNT_H"}, + {true, "SEND_UC_HOST2HOST_PKT_CNT_L"}, + {true, "SEND_UC_HOST2HOST_PKT_CNT_H"}, + {true, "SEND_UC_HOST2PRT_PKT_CNT_L"}, + {true, "SEND_UC_HOST2PRT_PKT_CNT_H"}, + + {true, "SEND_MC_FROM_PRT_CNT_L"}, + {true, "SEND_MC_FROM_PRT_CNT_H"}, + {true, "SEND_MC_FROM_HOST_CNT_L"}, + {true, "SEND_MC_FROM_HOST_CNT_H"}, + {true, "SSU_MC_RD_CNT_L"}, + {true, "SSU_MC_RD_CNT_H"}, + + {true, "SSU_MC_DROP_CNT_L"}, + {true, "SSU_MC_DROP_CNT_H"}, + {true, "SSU_MC_RD_PKT_CNT_L"}, + {true, "SSU_MC_RD_PKT_CNT_H"}, + {true, "PPP_MC_2HOST_PKT_CNT_L"}, + {true, "PPP_MC_2HOST_PKT_CNT_H"}, + + {true, "PPP_MC_2PRT_PKT_CNT_L"}, + {true, "PPP_MC_2PRT_PKT_CNT_H"}, + {true, "NTSNOS_PKT_CNT_L"}, + {true, "NTSNOS_PKT_CNT_H"}, + {true, "NTUP_PKT_CNT_L"}, + {true, "NTUP_PKT_CNT_H"}, + + {true, "NTLCL_PKT_CNT_L"}, + {true, "NTLCL_PKT_CNT_H"}, + {true, "NTTGT_PKT_CNT_L"}, + {true, "NTTGT_PKT_CNT_H"}, + {true, "RTNS_PKT_CNT_L"}, + {true, "RTNS_PKT_CNT_H"}, + + {true, "RTLPBK_PKT_CNT_L"}, + {true, "RTLPBK_PKT_CNT_H"}, + {true, "NR_PKT_CNT_L"}, + {true, "NR_PKT_CNT_H"}, + {true, "RR_PKT_CNT_L"}, + {true, "RR_PKT_CNT_H"}, + + {true, "MNG_TBL_HIT_CNT_L"}, + {true, "MNG_TBL_HIT_CNT_H"}, + {true, "FD_TBL_HIT_CNT_L"}, + {true, "FD_TBL_HIT_CNT_H"}, + {true, "FD_LKUP_CNT_L"}, + {true, "FD_LKUP_CNT_H"}, + + {true, "BC_HIT_CNT_L"}, + {true, "BC_HIT_CNT_H"}, + {true, "UM_TBL_UC_HIT_CNT_L"}, + {true, "UM_TBL_UC_HIT_CNT_H"}, + {true, "UM_TBL_MC_HIT_CNT_L"}, + {true, "UM_TBL_MC_HIT_CNT_H"}, + + {true, "UM_TBL_VMDQ1_HIT_CNT_L"}, + {true, "UM_TBL_VMDQ1_HIT_CNT_H"}, + {true, "MTA_TBL_HIT_CNT_L"}, + {true, "MTA_TBL_HIT_CNT_H"}, + {true, "FWD_BONDING_HIT_CNT_L"}, + {true, "FWD_BONDING_HIT_CNT_H"}, + + {true, "PROMIS_TBL_HIT_CNT_L"}, + {true, "PROMIS_TBL_HIT_CNT_H"}, + {true, "GET_TUNL_PKT_CNT_L"}, + {true, "GET_TUNL_PKT_CNT_H"}, + {true, "GET_BMC_PKT_CNT_L"}, + {true, "GET_BMC_PKT_CNT_H"}, + + {true, "SEND_UC_PRT2BMC_PKT_CNT_L"}, + {true, "SEND_UC_PRT2BMC_PKT_CNT_H"}, + {true, "SEND_UC_HOST2BMC_PKT_CNT_L"}, + {true, "SEND_UC_HOST2BMC_PKT_CNT_H"}, + {true, "SEND_UC_BMC2HOST_PKT_CNT_L"}, + {true, "SEND_UC_BMC2HOST_PKT_CNT_H"}, + + {true, "SEND_UC_BMC2PRT_PKT_CNT_L"}, + {true, "SEND_UC_BMC2PRT_PKT_CNT_H"}, + {true, "PPP_MC_2BMC_PKT_CNT_L"}, + {true, "PPP_MC_2BMC_PKT_CNT_H"}, + {true, "VLAN_MIRR_CNT_L"}, + {true, "VLAN_MIRR_CNT_H"}, + + {true, "IG_MIRR_CNT_L"}, + {true, "IG_MIRR_CNT_H"}, + {true, "EG_MIRR_CNT_L"}, + {true, "EG_MIRR_CNT_H"}, + {true, "RX_DEFAULT_HOST_HIT_CNT_L"}, + {true, "RX_DEFAULT_HOST_HIT_CNT_H"}, + + {true, "LAN_PAIR_CNT_L"}, + {true, "LAN_PAIR_CNT_H"}, + {true, "UM_TBL_MC_HIT_PKT_CNT_L"}, + {true, "UM_TBL_MC_HIT_PKT_CNT_H"}, + {true, "MTA_TBL_HIT_PKT_CNT_L"}, + {true, "MTA_TBL_HIT_PKT_CNT_H"}, + + {true, "PROMIS_TBL_HIT_PKT_CNT_L"}, + {true, "PROMIS_TBL_HIT_PKT_CNT_H"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_rcb_reg[] = { + {false, "Reserved"}, + {true, "FSM_DFX_ST0"}, + {true, "FSM_DFX_ST1"}, + {true, "FSM_DFX_ST2"}, + {true, "FIFO_DFX_ST0"}, + {true, "FIFO_DFX_ST1"}, + + {true, "FIFO_DFX_ST2"}, + {true, "FIFO_DFX_ST3"}, + {true, "FIFO_DFX_ST4"}, + {true, "FIFO_DFX_ST5"}, + {true, "FIFO_DFX_ST6"}, + {true, "FIFO_DFX_ST7"}, + + {true, "FIFO_DFX_ST8"}, + {true, "FIFO_DFX_ST9"}, + {true, "FIFO_DFX_ST10"}, + {true, "FIFO_DFX_ST11"}, + {true, "Q_CREDIT_VLD_0"}, + {true, "Q_CREDIT_VLD_1"}, + + {true, "Q_CREDIT_VLD_2"}, + {true, "Q_CREDIT_VLD_3"}, + {true, "Q_CREDIT_VLD_4"}, + {true, "Q_CREDIT_VLD_5"}, + {true, "Q_CREDIT_VLD_6"}, + {true, "Q_CREDIT_VLD_7"}, + + {true, "Q_CREDIT_VLD_8"}, + {true, "Q_CREDIT_VLD_9"}, + {true, "Q_CREDIT_VLD_10"}, + {true, "Q_CREDIT_VLD_11"}, + {true, "Q_CREDIT_VLD_12"}, + {true, "Q_CREDIT_VLD_13"}, + + {true, "Q_CREDIT_VLD_14"}, + {true, "Q_CREDIT_VLD_15"}, + {true, "Q_CREDIT_VLD_16"}, + {true, "Q_CREDIT_VLD_17"}, + {true, "Q_CREDIT_VLD_18"}, + {true, "Q_CREDIT_VLD_19"}, + + {true, "Q_CREDIT_VLD_20"}, + {true, "Q_CREDIT_VLD_21"}, + {true, "Q_CREDIT_VLD_22"}, + {true, "Q_CREDIT_VLD_23"}, + {true, "Q_CREDIT_VLD_24"}, + {true, "Q_CREDIT_VLD_25"}, + + {true, "Q_CREDIT_VLD_26"}, + {true, "Q_CREDIT_VLD_27"}, + {true, "Q_CREDIT_VLD_28"}, + {true, "Q_CREDIT_VLD_29"}, + {true, "Q_CREDIT_VLD_30"}, + {true, "Q_CREDIT_VLD_31"}, + + {true, "GRO_BD_SERR_CNT"}, + {true, "GRO_CONTEXT_SERR_CNT"}, + {true, "RX_STASH_CFG_SERR_CNT"}, + {true, "AXI_RD_FBD_SERR_CNT"}, + {true, "GRO_BD_MERR_CNT"}, + {true, "GRO_CONTEXT_MERR_CNT"}, + + {true, "RX_STASH_CFG_MERR_CNT"}, + {true, "AXI_RD_FBD_MERR_CNT"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_tqp_reg[] = { + {true, "q_num"}, + {true, "RCB_CFG_RX_RING_TAIL"}, + {true, "RCB_CFG_RX_RING_HEAD"}, + {true, "RCB_CFG_RX_RING_FBDNUM"}, + {true, "RCB_CFG_RX_RING_OFFSET"}, + {true, "RCB_CFG_RX_RING_FBDOFFSET"}, + + {true, "RCB_CFG_RX_RING_PKTNUM_RECORD"}, + {true, "RCB_CFG_TX_RING_TAIL"}, + {true, "RCB_CFG_TX_RING_HEAD"}, + {true, "RCB_CFG_TX_RING_FBDNUM"}, + {true, "RCB_CFG_TX_RING_OFFSET"}, + {true, "RCB_CFG_TX_RING_EBDNUM"}, +}; + +#define HCLGE_DBG_INFO_LEN 256 +#define HCLGE_DBG_VLAN_FLTR_INFO_LEN 256 +#define HCLGE_DBG_VLAN_OFFLOAD_INFO_LEN 512 +#define HCLGE_DBG_ID_LEN 16 +#define HCLGE_DBG_ITEM_NAME_LEN 32 +#define HCLGE_DBG_DATA_STR_LEN 32 +#define HCLGE_DBG_TM_INFO_LEN 256 + +#define HCLGE_BILLION_NANO_SECONDS 1000000000 + +struct hclge_dbg_item { + char name[HCLGE_DBG_ITEM_NAME_LEN]; + u16 interval; /* blank numbers after the item */ +}; + +struct hclge_dbg_vlan_cfg { + u16 pvid; + u8 accept_tag1; + u8 accept_tag2; + u8 accept_untag1; + u8 accept_untag2; + u8 insert_tag1; + u8 insert_tag2; + u8 strip_tag1; + u8 strip_tag2; + u8 pri_only1; + u8 pri_only2; +}; + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c new file mode 100644 index 0000000000000000000000000000000000000000..9338f030842cb04b6dcbb63e65800a3f5be85c21 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -0,0 +1,2011 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2016-2017 Hisilicon Limited. */ + +#include "hclge_err.h" + +const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = { + { .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(17), .msg = "imp_itcm4_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { /* sentinel */ } +}; + +const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = { + { .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(17), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(19), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(21), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(23), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(25), .msg = "cmdq_rocee_rx_head_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(27), .msg = "cmdq_rocee_tx_head_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(29), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(31), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { /* sentinel */ } +}; + +const struct hclge_hw_error hclge_tqp_int_ecc_int[] = { + { .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { /* sentinel */ } +}; + +const struct hclge_hw_error hclge_msix_sram_ecc_int[] = { + { .int_msk = BIT(1), .msg = "msix_nic_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(3), .msg = "msix_rocee_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { /* sentinel */ } +}; + +const struct hclge_hw_error hclge_igu_int[] = { + { .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { /* sentinel */ } +}; + +const struct hclge_hw_error hclge_igu_egu_tnl_int[] = { + { .int_msk = BIT(0), .msg = "rx_buf_overflow", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(2), .msg = "rx_stp_fifo_underflow", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "tx_buf_overflow", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(4), .msg = "tx_buf_underrun", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(5), .msg = "rx_stp_buf_overflow", + .reset_level = HNAE3_GLOBAL_RESET }, + { /* sentinel */ } +}; + +const struct hclge_hw_error hclge_ncsi_err_int[] = { + { .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { /* sentinel */ } +}; + +const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = { + { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(27), .msg = "flow_director_ad_mem0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(28), .msg = "flow_director_ad_mem1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(29), .msg = "rx_vlan_tag_memory_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(30), .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { /* sentinel */ } +}; + +const struct hclge_hw_error hclge_ppp_pf_abnormal_int[] = { + { .int_msk = BIT(0), .msg = "tx_vlan_tag_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err", + .reset_level = HNAE3_NONE_RESET }, + { /* sentinel */ } +}; + +const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st3[] = { + { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { /* sentinel */ } +}; + +const struct hclge_hw_error hclge_tm_sch_rint[] = { + { .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(12), .msg = "tm_sch_port_shap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(13), .msg = "tm_sch_port_shap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(14), .msg = "tm_sch_pg_pshap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(15), .msg = "tm_sch_pg_pshap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(16), .msg = "tm_sch_pg_cshap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(17), .msg = "tm_sch_pg_cshap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(18), .msg = "tm_sch_pri_pshap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(19), .msg = "tm_sch_pri_pshap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(20), .msg = "tm_sch_pri_cshap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(21), .msg = "tm_sch_pri_cshap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { /* sentinel */ } +}; + +const struct hclge_hw_error hclge_qcn_fifo_rint[] = { + { .int_msk = BIT(0), .msg = "qcn_shap_gp0_sch_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(1), .msg = "qcn_shap_gp0_sch_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(2), .msg = "qcn_shap_gp1_sch_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "qcn_shap_gp1_sch_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(4), .msg = "qcn_shap_gp2_sch_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(5), .msg = "qcn_shap_gp2_sch_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(6), .msg = "qcn_shap_gp3_sch_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(7), .msg = "qcn_shap_gp3_sch_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(8), .msg = "qcn_shap_gp0_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(9), .msg = "qcn_shap_gp0_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(10), .msg = "qcn_shap_gp1_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(11), .msg = "qcn_shap_gp1_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(12), .msg = "qcn_shap_gp2_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(13), .msg = "qcn_shap_gp2_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(14), .msg = "qcn_shap_gp3_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(15), .msg = "qcn_shap_gp3_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(16), .msg = "qcn_byte_info_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(17), .msg = "qcn_byte_info_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { /* sentinel */ } +}; + +const struct hclge_hw_error hclge_qcn_ecc_rint[] = { + { .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { /* sentinel */ } +}; + +const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = { + { .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(1), .msg = "egu_cge_afifo_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(2), .msg = "egu_lge_afifo_ecc_1bit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(3), .msg = "egu_lge_afifo_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(4), .msg = "cge_igu_afifo_ecc_1bit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(8), .msg = "cge_igu_afifo_overflow_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(9), .msg = "lge_igu_afifo_overflow_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(10), .msg = "egu_cge_afifo_underrun_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(11), .msg = "egu_lge_afifo_underrun_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(12), .msg = "egu_ge_afifo_underrun_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(13), .msg = "ge_igu_afifo_overflow_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { /* sentinel */ } +}; + +const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[] = { + { .int_msk = BIT(13), .msg = "rpu_rx_pkt_bit32_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(14), .msg = "rpu_rx_pkt_bit33_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(15), .msg = "rpu_rx_pkt_bit34_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(16), .msg = "rpu_rx_pkt_bit35_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(17), .msg = "rcb_tx_ring_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(18), .msg = "rcb_rx_ring_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(19), .msg = "rcb_tx_fbd_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(20), .msg = "rcb_rx_ebd_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(21), .msg = "rcb_tso_info_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(22), .msg = "rcb_tx_int_info_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(23), .msg = "rcb_rx_int_info_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(24), .msg = "tpu_tx_pkt_0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(25), .msg = "tpu_tx_pkt_1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(26), .msg = "rd_bus_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(27), .msg = "wr_bus_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(28), .msg = "reg_search_miss", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(29), .msg = "rx_q_search_miss", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(30), .msg = "ooo_ecc_err_detect", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(31), .msg = "ooo_ecc_err_multpl", + .reset_level = HNAE3_GLOBAL_RESET }, + { /* sentinel */ } +}; + +const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[] = { + { .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(7), .msg = "axi_rd_fbd_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { /* sentinel */ } +}; + +const struct hclge_hw_error hclge_ppu_pf_abnormal_int[] = { + { .int_msk = BIT(0), .msg = "over_8bd_no_fe", + .reset_level = HNAE3_FUNC_RESET }, + { .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(3), .msg = "tx_rd_fbd_poison", + .reset_level = HNAE3_FUNC_RESET }, + { .int_msk = BIT(4), .msg = "rx_rd_ebd_poison", + .reset_level = HNAE3_FUNC_RESET }, + { .int_msk = BIT(5), .msg = "buf_wait_timeout", + .reset_level = HNAE3_NONE_RESET }, + { /* sentinel */ } +}; + +const struct hclge_hw_error hclge_ssu_com_err_int[] = { + { .int_msk = BIT(0), .msg = "buf_sum_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(1), .msg = "ppp_mb_num_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(2), .msg = "ppp_mbid_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "ppp_rlt_mac_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(4), .msg = "ppp_rlt_host_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(5), .msg = "cks_edit_position_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(6), .msg = "cks_edit_condition_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(7), .msg = "vlan_edit_condition_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(8), .msg = "vlan_num_ot_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(9), .msg = "vlan_num_in_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { /* sentinel */ } +}; + +#define HCLGE_SSU_MEM_ECC_ERR(x) \ + { .int_msk = BIT(x), .msg = "ssu_mem" #x "_ecc_mbit_err", \ + .reset_level = HNAE3_GLOBAL_RESET } + +const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = { + HCLGE_SSU_MEM_ECC_ERR(0), + HCLGE_SSU_MEM_ECC_ERR(1), + HCLGE_SSU_MEM_ECC_ERR(2), + HCLGE_SSU_MEM_ECC_ERR(3), + HCLGE_SSU_MEM_ECC_ERR(4), + HCLGE_SSU_MEM_ECC_ERR(5), + HCLGE_SSU_MEM_ECC_ERR(6), + HCLGE_SSU_MEM_ECC_ERR(7), + HCLGE_SSU_MEM_ECC_ERR(8), + HCLGE_SSU_MEM_ECC_ERR(9), + HCLGE_SSU_MEM_ECC_ERR(10), + HCLGE_SSU_MEM_ECC_ERR(11), + HCLGE_SSU_MEM_ECC_ERR(12), + HCLGE_SSU_MEM_ECC_ERR(13), + HCLGE_SSU_MEM_ECC_ERR(14), + HCLGE_SSU_MEM_ECC_ERR(15), + HCLGE_SSU_MEM_ECC_ERR(16), + HCLGE_SSU_MEM_ECC_ERR(17), + HCLGE_SSU_MEM_ECC_ERR(18), + HCLGE_SSU_MEM_ECC_ERR(19), + HCLGE_SSU_MEM_ECC_ERR(20), + HCLGE_SSU_MEM_ECC_ERR(21), + HCLGE_SSU_MEM_ECC_ERR(22), + HCLGE_SSU_MEM_ECC_ERR(23), + HCLGE_SSU_MEM_ECC_ERR(24), + HCLGE_SSU_MEM_ECC_ERR(25), + HCLGE_SSU_MEM_ECC_ERR(26), + HCLGE_SSU_MEM_ECC_ERR(27), + HCLGE_SSU_MEM_ECC_ERR(28), + HCLGE_SSU_MEM_ECC_ERR(29), + HCLGE_SSU_MEM_ECC_ERR(30), + HCLGE_SSU_MEM_ECC_ERR(31), + { /* sentinel */ } +}; + +const struct hclge_hw_error hclge_ssu_port_based_err_int[] = { + { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port", + .reset_level = HNAE3_FUNC_RESET }, + { .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(2), .msg = "igu_pkt_without_key_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "roc_eof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(4), .msg = "tpu_eof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(5), .msg = "igu_eof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(6), .msg = "roc_sof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(7), .msg = "tpu_sof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(8), .msg = "igu_sof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(11), .msg = "ets_rd_int_rx_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(12), .msg = "ets_wr_int_rx_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(13), .msg = "ets_rd_int_tx_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(14), .msg = "ets_wr_int_tx_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { /* sentinel */ } +}; + +const struct hclge_hw_error hclge_ssu_fifo_overflow_int[] = { + { .int_msk = BIT(0), .msg = "ig_mac_inf_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(1), .msg = "ig_host_inf_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(2), .msg = "ig_roc_buf_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "ig_host_data_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(4), .msg = "ig_host_key_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(5), .msg = "tx_qcn_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(6), .msg = "rx_qcn_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(7), .msg = "tx_pf_rd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(8), .msg = "rx_pf_rd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(9), .msg = "qm_eof_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(10), .msg = "mb_rlt_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(11), .msg = "dup_uncopy_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(12), .msg = "dup_cnt_rd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(13), .msg = "dup_cnt_drop_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(14), .msg = "dup_cnt_wrb_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(15), .msg = "host_cmd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(16), .msg = "mac_cmd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(17), .msg = "host_cmd_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(18), .msg = "mac_cmd_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(19), .msg = "dup_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(20), .msg = "out_queue_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(21), .msg = "bank2_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(22), .msg = "bank1_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(23), .msg = "bank0_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { /* sentinel */ } +}; + +const struct hclge_hw_error hclge_ssu_ets_tcg_int[] = { + { .int_msk = BIT(0), .msg = "ets_rd_int_rx_tcg", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(1), .msg = "ets_wr_int_rx_tcg", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(2), .msg = "ets_rd_int_tx_tcg", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "ets_wr_int_tx_tcg", + .reset_level = HNAE3_GLOBAL_RESET }, + { /* sentinel */ } +}; + +const struct hclge_hw_error hclge_ssu_port_based_pf_int[] = { + { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port", + .reset_level = HNAE3_FUNC_RESET }, + { .int_msk = BIT(9), .msg = "low_water_line_err_port", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(10), .msg = "hi_water_line_err_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { /* sentinel */ } +}; + +const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = { + { .int_msk = 0, .msg = "rocee qmm ovf: sgid invalid err" }, + { .int_msk = 0x4, .msg = "rocee qmm ovf: sgid ovf err" }, + { .int_msk = 0x8, .msg = "rocee qmm ovf: smac invalid err" }, + { .int_msk = 0xC, .msg = "rocee qmm ovf: smac ovf err" }, + { .int_msk = 0x10, .msg = "rocee qmm ovf: cqc invalid err" }, + { .int_msk = 0x11, .msg = "rocee qmm ovf: cqc ovf err" }, + { .int_msk = 0x12, .msg = "rocee qmm ovf: cqc hopnum err" }, + { .int_msk = 0x13, .msg = "rocee qmm ovf: cqc ba0 err" }, + { .int_msk = 0x14, .msg = "rocee qmm ovf: srqc invalid err" }, + { .int_msk = 0x15, .msg = "rocee qmm ovf: srqc ovf err" }, + { .int_msk = 0x16, .msg = "rocee qmm ovf: srqc hopnum err" }, + { .int_msk = 0x17, .msg = "rocee qmm ovf: srqc ba0 err" }, + { .int_msk = 0x18, .msg = "rocee qmm ovf: mpt invalid err" }, + { .int_msk = 0x19, .msg = "rocee qmm ovf: mpt ovf err" }, + { .int_msk = 0x1A, .msg = "rocee qmm ovf: mpt hopnum err" }, + { .int_msk = 0x1B, .msg = "rocee qmm ovf: mpt ba0 err" }, + { .int_msk = 0x1C, .msg = "rocee qmm ovf: qpc invalid err" }, + { .int_msk = 0x1D, .msg = "rocee qmm ovf: qpc ovf err" }, + { .int_msk = 0x1E, .msg = "rocee qmm ovf: qpc hopnum err" }, + { .int_msk = 0x1F, .msg = "rocee qmm ovf: qpc ba0 err" }, + { /* sentinel */ } +}; + +void hclge_log_error(struct device *dev, char *reg, + const struct hclge_hw_error *err, + u32 err_sts, unsigned long *reset_requests) +{ + while (err->msg) { + if (err->int_msk & err_sts) { + dev_err(dev, "%s %s found [error status=0x%x]\n", + reg, err->msg, err_sts); + if (err->reset_level && + err->reset_level != HNAE3_NONE_RESET) + set_bit(err->reset_level, reset_requests); + } + err++; + } +} + +/* hclge_cmd_query_error: read the error information + * @hdev: pointer to struct hclge_dev + * @desc: descriptor for describing the command + * @cmd: command opcode + * @flag: flag for extended command structure + * + * This function query the error info from hw register/s using command + */ +static int hclge_cmd_query_error(struct hclge_dev *hdev, + struct hclge_desc *desc, u32 cmd, u16 flag) +{ + struct device *dev = &hdev->pdev->dev; + int desc_num = 1; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], cmd, true); + if (flag) { + desc[0].flag |= cpu_to_le16(flag); + hclge_cmd_setup_basic_desc(&desc[1], cmd, true); + desc_num = 2; + } + + ret = hclge_cmd_send(&hdev->hw, &desc[0], desc_num); + if (ret) + dev_err(dev, "query error cmd failed (%d)\n", ret); + + return ret; +} + +static int hclge_clear_mac_tnl_int(struct hclge_dev *hdev) +{ + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_MAC_TNL_INT, false); + desc.data[0] = cpu_to_le32(HCLGE_MAC_TNL_INT_CLR); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_config_common_hw_err_int(struct hclge_dev *hdev, bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc[2]; + int ret; + + /* configure common error interrupts */ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_COMMON_ECC_INT_CFG, false); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_COMMON_ECC_INT_CFG, false); + + if (en) { + desc[0].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN); + desc[0].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN | + HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN); + desc[0].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN); + desc[0].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN | + HCLGE_MSIX_SRAM_ECC_ERR_INT_EN); + desc[0].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN); + } + + desc[1].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN_MASK); + desc[1].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN_MASK | + HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN_MASK); + desc[1].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN_MASK); + desc[1].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN_MASK | + HCLGE_MSIX_SRAM_ECC_ERR_INT_EN_MASK); + desc[1].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN_MASK); + + ret = hclge_cmd_send(&hdev->hw, &desc[0], 2); + if (ret) + dev_err(dev, + "fail(%d) to configure common err interrupts\n", ret); + + return ret; +} + +static int hclge_config_ncsi_hw_err_int(struct hclge_dev *hdev, bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc; + int ret; + + if (hdev->pdev->revision < 0x21) + return 0; + + /* configure NCSI error interrupts */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_NCSI_INT_EN, false); + if (en) + desc.data[0] = cpu_to_le32(HCLGE_NCSI_ERR_INT_EN); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(dev, + "fail(%d) to configure NCSI error interrupts\n", ret); + + return ret; +} + +static int hclge_config_igu_egu_hw_err_int(struct hclge_dev *hdev, bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc; + int ret; + + /* configure IGU,EGU error interrupts */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_COMMON_INT_EN, false); + desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_TYPE); + if (en) + desc.data[0] |= cpu_to_le32(HCLGE_IGU_ERR_INT_EN); + + desc.data[1] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN_MASK); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(dev, + "fail(%d) to configure IGU common interrupts\n", ret); + return ret; + } + + hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_EGU_TNL_INT_EN, false); + if (en) + desc.data[0] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN); + + desc.data[1] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN_MASK); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(dev, + "fail(%d) to configure IGU-EGU TNL interrupts\n", ret); + return ret; + } + + ret = hclge_config_ncsi_hw_err_int(hdev, en); + + return ret; +} + +static int hclge_config_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd, + bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc[2]; + int ret; + + /* configure PPP error interrupts */ + hclge_cmd_setup_basic_desc(&desc[0], cmd, false); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], cmd, false); + + if (cmd == HCLGE_PPP_CMD0_INT_CMD) { + if (en) { + desc[0].data[0] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN); + desc[0].data[1] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN); + desc[0].data[4] = cpu_to_le32(HCLGE_PPP_PF_ERR_INT_EN); + } + + desc[1].data[0] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN_MASK); + desc[1].data[1] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN_MASK); + if (hdev->pdev->revision >= 0x21) + desc[1].data[2] = + cpu_to_le32(HCLGE_PPP_PF_ERR_INT_EN_MASK); + } else if (cmd == HCLGE_PPP_CMD1_INT_CMD) { + if (en) { + desc[0].data[0] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN); + desc[0].data[1] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN); + } + + desc[1].data[0] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN_MASK); + desc[1].data[1] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK); + } + + ret = hclge_cmd_send(&hdev->hw, &desc[0], 2); + if (ret) + dev_err(dev, "fail(%d) to configure PPP error intr\n", ret); + + return ret; +} + +static int hclge_config_ppp_hw_err_int(struct hclge_dev *hdev, bool en) +{ + int ret; + + ret = hclge_config_ppp_error_interrupt(hdev, HCLGE_PPP_CMD0_INT_CMD, + en); + if (ret) + return ret; + + ret = hclge_config_ppp_error_interrupt(hdev, HCLGE_PPP_CMD1_INT_CMD, + en); + + return ret; +} + +static int hclge_config_tm_hw_err_int(struct hclge_dev *hdev, bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc; + int ret; + + /* configure TM SCH hw errors */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_SCH_ECC_INT_EN, false); + if (en) + desc.data[0] = cpu_to_le32(HCLGE_TM_SCH_ECC_ERR_INT_EN); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(dev, "fail(%d) to configure TM SCH errors\n", ret); + return ret; + } + + /* configure TM QCN hw errors */ + ret = hclge_cmd_query_error(hdev, &desc, HCLGE_TM_QCN_MEM_INT_CFG, 0); + if (ret) { + dev_err(dev, "fail(%d) to read TM QCN CFG status\n", ret); + return ret; + } + + hclge_cmd_reuse_desc(&desc, false); + if (en) + desc.data[1] = cpu_to_le32(HCLGE_TM_QCN_MEM_ERR_INT_EN); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(dev, + "fail(%d) to configure TM QCN mem errors\n", ret); + + return ret; +} + +static int hclge_config_mac_err_int(struct hclge_dev *hdev, bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc; + int ret; + + /* configure MAC common error interrupts */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_COMMON_INT_EN, false); + if (en) + desc.data[0] = cpu_to_le32(HCLGE_MAC_COMMON_ERR_INT_EN); + + desc.data[1] = cpu_to_le32(HCLGE_MAC_COMMON_ERR_INT_EN_MASK); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(dev, + "fail(%d) to configure MAC COMMON error intr\n", ret); + + return ret; +} + +int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en) +{ + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_TNL_INT_EN, false); + if (en) + desc.data[0] = cpu_to_le32(HCLGE_MAC_TNL_INT_EN); + else + desc.data[0] = 0; + + desc.data[1] = cpu_to_le32(HCLGE_MAC_TNL_INT_EN_MASK); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd, + bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc[2]; + int desc_num = 1; + int ret; + + /* configure PPU error interrupts */ + if (cmd == HCLGE_PPU_MPF_ECC_INT_CMD) { + hclge_cmd_setup_basic_desc(&desc[0], cmd, false); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], cmd, false); + if (en) { + desc[0].data[0] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT0_EN); + desc[0].data[1] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT1_EN); + desc[1].data[3] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT3_EN); + desc[1].data[4] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN); + } + + desc[1].data[0] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK); + desc[1].data[1] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK); + desc[1].data[2] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK); + desc[1].data[3] |= + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK); + desc_num = 2; + } else if (cmd == HCLGE_PPU_MPF_OTHER_INT_CMD) { + hclge_cmd_setup_basic_desc(&desc[0], cmd, false); + if (en) + desc[0].data[0] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN2); + + desc[0].data[2] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN2_MASK); + } else if (cmd == HCLGE_PPU_PF_OTHER_INT_CMD) { + hclge_cmd_setup_basic_desc(&desc[0], cmd, false); + if (en) + desc[0].data[0] = + cpu_to_le32(HCLGE_PPU_PF_ABNORMAL_INT_EN); + + desc[0].data[2] = + cpu_to_le32(HCLGE_PPU_PF_ABNORMAL_INT_EN_MASK); + } else { + dev_err(dev, "Invalid cmd to configure PPU error interrupts\n"); + return -EINVAL; + } + + ret = hclge_cmd_send(&hdev->hw, &desc[0], desc_num); + + return ret; +} + +static int hclge_config_ppu_hw_err_int(struct hclge_dev *hdev, bool en) +{ + struct device *dev = &hdev->pdev->dev; + int ret; + + ret = hclge_config_ppu_error_interrupts(hdev, HCLGE_PPU_MPF_ECC_INT_CMD, + en); + if (ret) { + dev_err(dev, "fail(%d) to configure PPU MPF ECC error intr\n", + ret); + return ret; + } + + ret = hclge_config_ppu_error_interrupts(hdev, + HCLGE_PPU_MPF_OTHER_INT_CMD, + en); + if (ret) { + dev_err(dev, "fail(%d) to configure PPU MPF other intr\n", ret); + return ret; + } + + ret = hclge_config_ppu_error_interrupts(hdev, + HCLGE_PPU_PF_OTHER_INT_CMD, en); + if (ret) + dev_err(dev, "fail(%d) to configure PPU PF error interrupts\n", + ret); + return ret; +} + +static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc[2]; + int ret; + + /* configure SSU ecc error interrupts */ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_ECC_INT_CMD, false); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_ECC_INT_CMD, false); + if (en) { + desc[0].data[0] = cpu_to_le32(HCLGE_SSU_1BIT_ECC_ERR_INT_EN); + desc[0].data[1] = + cpu_to_le32(HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN); + desc[0].data[4] = cpu_to_le32(HCLGE_SSU_BIT32_ECC_ERR_INT_EN); + } + + desc[1].data[0] = cpu_to_le32(HCLGE_SSU_1BIT_ECC_ERR_INT_EN_MASK); + desc[1].data[1] = cpu_to_le32(HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN_MASK); + desc[1].data[2] = cpu_to_le32(HCLGE_SSU_BIT32_ECC_ERR_INT_EN_MASK); + + ret = hclge_cmd_send(&hdev->hw, &desc[0], 2); + if (ret) { + dev_err(dev, + "fail(%d) to configure SSU ECC error interrupt\n", ret); + return ret; + } + + /* configure SSU common error interrupts */ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_COMMON_INT_CMD, false); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_COMMON_INT_CMD, false); + + if (en) { + if (hdev->pdev->revision >= 0x21) + desc[0].data[0] = + cpu_to_le32(HCLGE_SSU_COMMON_INT_EN); + else + desc[0].data[0] = + cpu_to_le32(HCLGE_SSU_COMMON_INT_EN & ~BIT(5)); + desc[0].data[1] = cpu_to_le32(HCLGE_SSU_PORT_BASED_ERR_INT_EN); + desc[0].data[2] = + cpu_to_le32(HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN); + } + + desc[1].data[0] = cpu_to_le32(HCLGE_SSU_COMMON_INT_EN_MASK | + HCLGE_SSU_PORT_BASED_ERR_INT_EN_MASK); + desc[1].data[1] = cpu_to_le32(HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN_MASK); + + ret = hclge_cmd_send(&hdev->hw, &desc[0], 2); + if (ret) + dev_err(dev, + "fail(%d) to configure SSU COMMON error intr\n", ret); + + return ret; +} + +/* hclge_query_bd_num: query number of buffer descriptors + * @hdev: pointer to struct hclge_dev + * @is_ras: true for ras, false for msix + * @mpf_bd_num: number of main PF interrupt buffer descriptors + * @pf_bd_num: number of not main PF interrupt buffer descriptors + * + * This function querys number of mpf and pf buffer descriptors. + */ +static int hclge_query_bd_num(struct hclge_dev *hdev, bool is_ras, + int *mpf_bd_num, int *pf_bd_num) +{ + struct device *dev = &hdev->pdev->dev; + u32 mpf_min_bd_num, pf_min_bd_num; + enum hclge_opcode_type opcode; + struct hclge_desc desc_bd; + int ret; + + if (is_ras) { + opcode = HCLGE_QUERY_RAS_INT_STS_BD_NUM; + mpf_min_bd_num = HCLGE_MPF_RAS_INT_MIN_BD_NUM; + pf_min_bd_num = HCLGE_PF_RAS_INT_MIN_BD_NUM; + } else { + opcode = HCLGE_QUERY_MSIX_INT_STS_BD_NUM; + mpf_min_bd_num = HCLGE_MPF_MSIX_INT_MIN_BD_NUM; + pf_min_bd_num = HCLGE_PF_MSIX_INT_MIN_BD_NUM; + } + + hclge_cmd_setup_basic_desc(&desc_bd, opcode, true); + ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1); + if (ret) { + dev_err(dev, "fail(%d) to query msix int status bd num\n", + ret); + return ret; + } + + *mpf_bd_num = le32_to_cpu(desc_bd.data[0]); + *pf_bd_num = le32_to_cpu(desc_bd.data[1]); + if (*mpf_bd_num < mpf_min_bd_num || *pf_bd_num < pf_min_bd_num) { + dev_err(dev, "Invalid bd num: mpf(%d), pf(%d)\n", + *mpf_bd_num, *pf_bd_num); + return -EINVAL; + } + + return 0; +} + +/* hclge_handle_mpf_ras_error: handle all main PF RAS errors + * @hdev: pointer to struct hclge_dev + * @desc: descriptor for describing the command + * @num: number of extended command structures + * + * This function handles all the main PF RAS errors in the + * hw register/s using command. + */ +static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev, + struct hclge_desc *desc, + int num) +{ + struct hnae3_ae_dev *ae_dev = hdev->ae_dev; + struct device *dev = &hdev->pdev->dev; + __le32 *desc_data; + u32 status; + int ret; + + /* query all main PF RAS errors */ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_MPF_RAS_INT, + true); + ret = hclge_cmd_send(&hdev->hw, &desc[0], num); + if (ret) { + dev_err(dev, "query all mpf ras int cmd failed (%d)\n", ret); + return ret; + } + + /* log HNS common errors */ + status = le32_to_cpu(desc[0].data[0]); + if (status) + hclge_log_error(dev, "IMP_TCM_ECC_INT_STS", + &hclge_imp_tcm_ecc_int[0], status, + &ae_dev->hw_err_reset_req); + + status = le32_to_cpu(desc[0].data[1]); + if (status) + hclge_log_error(dev, "CMDQ_MEM_ECC_INT_STS", + &hclge_cmdq_nic_mem_ecc_int[0], status, + &ae_dev->hw_err_reset_req); + + if ((le32_to_cpu(desc[0].data[2])) & BIT(0)) + dev_warn(dev, "imp_rd_data_poison_err found\n"); + + status = le32_to_cpu(desc[0].data[3]); + if (status) + hclge_log_error(dev, "TQP_INT_ECC_INT_STS", + &hclge_tqp_int_ecc_int[0], status, + &ae_dev->hw_err_reset_req); + + status = le32_to_cpu(desc[0].data[4]); + if (status) + hclge_log_error(dev, "MSIX_ECC_INT_STS", + &hclge_msix_sram_ecc_int[0], status, + &ae_dev->hw_err_reset_req); + + /* log SSU(Storage Switch Unit) errors */ + desc_data = (__le32 *)&desc[2]; + status = le32_to_cpu(*(desc_data + 2)); + if (status) + hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0", + &hclge_ssu_mem_ecc_err_int[0], status, + &ae_dev->hw_err_reset_req); + + status = le32_to_cpu(*(desc_data + 3)) & BIT(0); + if (status) { + dev_err(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n", + status); + set_bit(HNAE3_GLOBAL_RESET, &ae_dev->hw_err_reset_req); + } + + status = le32_to_cpu(*(desc_data + 4)) & HCLGE_SSU_COMMON_ERR_INT_MASK; + if (status) + hclge_log_error(dev, "SSU_COMMON_ERR_INT", + &hclge_ssu_com_err_int[0], status, + &ae_dev->hw_err_reset_req); + + /* log IGU(Ingress Unit) errors */ + desc_data = (__le32 *)&desc[3]; + status = le32_to_cpu(*desc_data) & HCLGE_IGU_INT_MASK; + if (status) + hclge_log_error(dev, "IGU_INT_STS", + &hclge_igu_int[0], status, + &ae_dev->hw_err_reset_req); + + /* log PPP(Programmable Packet Process) errors */ + desc_data = (__le32 *)&desc[4]; + status = le32_to_cpu(*(desc_data + 1)); + if (status) + hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST1", + &hclge_ppp_mpf_abnormal_int_st1[0], status, + &ae_dev->hw_err_reset_req); + + status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPP_MPF_INT_ST3_MASK; + if (status) + hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST3", + &hclge_ppp_mpf_abnormal_int_st3[0], status, + &ae_dev->hw_err_reset_req); + + /* log PPU(RCB) errors */ + desc_data = (__le32 *)&desc[5]; + status = le32_to_cpu(*(desc_data + 1)); + if (status) { + dev_err(dev, + "PPU_MPF_ABNORMAL_INT_ST1 rpu_rx_pkt_ecc_mbit_err found\n"); + set_bit(HNAE3_GLOBAL_RESET, &ae_dev->hw_err_reset_req); + } + + status = le32_to_cpu(*(desc_data + 2)); + if (status) + hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2", + &hclge_ppu_mpf_abnormal_int_st2[0], status, + &ae_dev->hw_err_reset_req); + + status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPU_MPF_INT_ST3_MASK; + if (status) + hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST3", + &hclge_ppu_mpf_abnormal_int_st3[0], status, + &ae_dev->hw_err_reset_req); + + /* log TM(Traffic Manager) errors */ + desc_data = (__le32 *)&desc[6]; + status = le32_to_cpu(*desc_data); + if (status) + hclge_log_error(dev, "TM_SCH_RINT", + &hclge_tm_sch_rint[0], status, + &ae_dev->hw_err_reset_req); + + /* log QCN(Quantized Congestion Control) errors */ + desc_data = (__le32 *)&desc[7]; + status = le32_to_cpu(*desc_data) & HCLGE_QCN_FIFO_INT_MASK; + if (status) + hclge_log_error(dev, "QCN_FIFO_RINT", + &hclge_qcn_fifo_rint[0], status, + &ae_dev->hw_err_reset_req); + + status = le32_to_cpu(*(desc_data + 1)) & HCLGE_QCN_ECC_INT_MASK; + if (status) + hclge_log_error(dev, "QCN_ECC_RINT", + &hclge_qcn_ecc_rint[0], status, + &ae_dev->hw_err_reset_req); + + /* log NCSI errors */ + desc_data = (__le32 *)&desc[9]; + status = le32_to_cpu(*desc_data) & HCLGE_NCSI_ECC_INT_MASK; + if (status) + hclge_log_error(dev, "NCSI_ECC_INT_RPT", + &hclge_ncsi_err_int[0], status, + &ae_dev->hw_err_reset_req); + + /* clear all main PF RAS errors */ + hclge_cmd_reuse_desc(&desc[0], false); + ret = hclge_cmd_send(&hdev->hw, &desc[0], num); + if (ret) + dev_err(dev, "clear all mpf ras int cmd failed (%d)\n", ret); + + return ret; +} + +/* hclge_handle_pf_ras_error: handle all PF RAS errors + * @hdev: pointer to struct hclge_dev + * @desc: descriptor for describing the command + * @num: number of extended command structures + * + * This function handles all the PF RAS errors in the + * hw register/s using command. + */ +static int hclge_handle_pf_ras_error(struct hclge_dev *hdev, + struct hclge_desc *desc, + int num) +{ + struct hnae3_ae_dev *ae_dev = hdev->ae_dev; + struct device *dev = &hdev->pdev->dev; + __le32 *desc_data; + u32 status; + int ret; + + /* query all PF RAS errors */ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_PF_RAS_INT, + true); + ret = hclge_cmd_send(&hdev->hw, &desc[0], num); + if (ret) { + dev_err(dev, "query all pf ras int cmd failed (%d)\n", ret); + return ret; + } + + /* log SSU(Storage Switch Unit) errors */ + status = le32_to_cpu(desc[0].data[0]); + if (status) + hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT", + &hclge_ssu_port_based_err_int[0], status, + &ae_dev->hw_err_reset_req); + + status = le32_to_cpu(desc[0].data[1]); + if (status) + hclge_log_error(dev, "SSU_FIFO_OVERFLOW_INT", + &hclge_ssu_fifo_overflow_int[0], status, + &ae_dev->hw_err_reset_req); + + status = le32_to_cpu(desc[0].data[2]); + if (status) + hclge_log_error(dev, "SSU_ETS_TCG_INT", + &hclge_ssu_ets_tcg_int[0], status, + &ae_dev->hw_err_reset_req); + + /* log IGU(Ingress Unit) EGU(Egress Unit) TNL errors */ + desc_data = (__le32 *)&desc[1]; + status = le32_to_cpu(*desc_data) & HCLGE_IGU_EGU_TNL_INT_MASK; + if (status) + hclge_log_error(dev, "IGU_EGU_TNL_INT_STS", + &hclge_igu_egu_tnl_int[0], status, + &ae_dev->hw_err_reset_req); + + /* log PPU(RCB) errors */ + desc_data = (__le32 *)&desc[3]; + status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_RAS_MASK; + if (status) { + hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0", + &hclge_ppu_pf_abnormal_int[0], status, + &ae_dev->hw_err_reset_req); + hclge_report_hw_error(hdev, HNAE3_PPU_POISON_ERROR); + } + + /* clear all PF RAS errors */ + hclge_cmd_reuse_desc(&desc[0], false); + ret = hclge_cmd_send(&hdev->hw, &desc[0], num); + if (ret) + dev_err(dev, "clear all pf ras int cmd failed (%d)\n", ret); + + return ret; +} + +int hclge_handle_all_ras_errors(struct hclge_dev *hdev) +{ + u32 mpf_bd_num, pf_bd_num, bd_num; + struct hclge_desc *desc; + int ret; + + /* query the number of registers in the RAS int status */ + ret = hclge_query_bd_num(hdev, true, &mpf_bd_num, &pf_bd_num); + if (ret) + return ret; + + bd_num = max_t(u32, mpf_bd_num, pf_bd_num); + desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL); + if (!desc) + return -ENOMEM; + + /* handle all main PF RAS errors */ + ret = hclge_handle_mpf_ras_error(hdev, desc, mpf_bd_num); + if (ret) { + kfree(desc); + return ret; + } + memset(desc, 0, bd_num * sizeof(struct hclge_desc)); + + /* handle all PF RAS errors */ + ret = hclge_handle_pf_ras_error(hdev, desc, pf_bd_num); + kfree(desc); + + return ret; +} + +static int hclge_log_rocee_axi_error(struct hclge_dev *hdev) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc[3]; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD, + true); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD, + true); + hclge_cmd_setup_basic_desc(&desc[2], HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD, + true); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + + ret = hclge_cmd_send(&hdev->hw, &desc[0], 3); + if (ret) { + dev_err(dev, "failed(%d) to query ROCEE AXI error sts\n", ret); + return ret; + } + + dev_err(dev, "AXI1: %08X %08X %08X %08X %08X %08X\n", + le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]), + le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]), + le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5])); + dev_err(dev, "AXI2: %08X %08X %08X %08X %08X %08X\n", + le32_to_cpu(desc[1].data[0]), le32_to_cpu(desc[1].data[1]), + le32_to_cpu(desc[1].data[2]), le32_to_cpu(desc[1].data[3]), + le32_to_cpu(desc[1].data[4]), le32_to_cpu(desc[1].data[5])); + dev_err(dev, "AXI3: %08X %08X %08X %08X\n", + le32_to_cpu(desc[2].data[0]), le32_to_cpu(desc[2].data[1]), + le32_to_cpu(desc[2].data[2]), le32_to_cpu(desc[2].data[3])); + + return 0; +} + +static int hclge_log_rocee_ecc_error(struct hclge_dev *hdev) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc[2]; + int ret; + + ret = hclge_cmd_query_error(hdev, &desc[0], + HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD, + HCLGE_CMD_FLAG_NEXT); + if (ret) { + dev_err(dev, "failed(%d) to query ROCEE ECC error sts\n", ret); + return ret; + } + + dev_err(dev, "ECC1: %08X %08X %08X %08X %08X %08X\n", + le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]), + le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]), + le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5])); + dev_err(dev, "ECC2: %08X %08X %08X\n", le32_to_cpu(desc[1].data[0]), + le32_to_cpu(desc[1].data[1]), le32_to_cpu(desc[1].data[2])); + + return 0; +} + +static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc[2]; + int ret; + + /* read overflow error status */ + ret = hclge_cmd_query_error(hdev, &desc[0], HCLGE_ROCEE_PF_RAS_INT_CMD, + 0); + if (ret) { + dev_err(dev, "failed(%d) to query ROCEE OVF error sts\n", ret); + return ret; + } + + /* log overflow error */ + if (le32_to_cpu(desc[0].data[0]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) { + const struct hclge_hw_error *err; + u32 err_sts; + + err = &hclge_rocee_qmm_ovf_err_int[0]; + err_sts = HCLGE_ROCEE_OVF_ERR_TYPE_MASK & + le32_to_cpu(desc[0].data[0]); + while (err->msg) { + if (err->int_msk == err_sts) { + dev_err(dev, "%s [error status=0x%x] found\n", + err->msg, + le32_to_cpu(desc[0].data[0])); + break; + } + err++; + } + } + + if (le32_to_cpu(desc[0].data[1]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) { + dev_err(dev, "ROCEE TSP OVF [error status=0x%x] found\n", + le32_to_cpu(desc[0].data[1])); + } + + if (le32_to_cpu(desc[0].data[2]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) { + dev_err(dev, "ROCEE SCC OVF [error status=0x%x] found\n", + le32_to_cpu(desc[0].data[2])); + } + + return 0; +} + +static enum hnae3_reset_type +hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev) +{ + enum hnae3_reset_type reset_type = HNAE3_NONE_RESET; + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc[2]; + unsigned int status; + int ret; + + /* read RAS error interrupt status */ + ret = hclge_cmd_query_error(hdev, &desc[0], + HCLGE_QUERY_CLEAR_ROCEE_RAS_INT, 0); + if (ret) { + dev_err(dev, "failed(%d) to query ROCEE RAS INT SRC\n", ret); + /* reset everything for now */ + return HNAE3_GLOBAL_RESET; + } + + status = le32_to_cpu(desc[0].data[0]); + + if (status & HCLGE_ROCEE_AXI_ERR_INT_MASK) { + if (status & HCLGE_ROCEE_RERR_INT_MASK) + dev_err(dev, "ROCEE RAS AXI rresp error\n"); + + if (status & HCLGE_ROCEE_BERR_INT_MASK) + dev_err(dev, "ROCEE RAS AXI bresp error\n"); + + reset_type = HNAE3_FUNC_RESET; + + hclge_report_hw_error(hdev, HNAE3_ROCEE_AXI_RESP_ERROR); + + ret = hclge_log_rocee_axi_error(hdev); + if (ret) + return HNAE3_GLOBAL_RESET; + } + + if (status & HCLGE_ROCEE_ECC_INT_MASK) { + dev_err(dev, "ROCEE RAS 2bit ECC error\n"); + reset_type = HNAE3_GLOBAL_RESET; + + ret = hclge_log_rocee_ecc_error(hdev); + if (ret) + return HNAE3_GLOBAL_RESET; + } + + if (status & HCLGE_ROCEE_OVF_INT_MASK) { + ret = hclge_log_rocee_ovf_error(hdev); + if (ret) { + dev_err(dev, "failed(%d) to process ovf error\n", ret); + /* reset everything for now */ + return HNAE3_GLOBAL_RESET; + } + } + + /* clear error status */ + hclge_cmd_reuse_desc(&desc[0], false); + ret = hclge_cmd_send(&hdev->hw, &desc[0], 1); + if (ret) { + dev_err(dev, "failed(%d) to clear ROCEE RAS error\n", ret); + /* reset everything for now */ + return HNAE3_GLOBAL_RESET; + } + + return reset_type; +} + +int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc; + int ret; + + if (hdev->pdev->revision < 0x21 || !hnae3_dev_roce_supported(hdev)) + return 0; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_CONFIG_ROCEE_RAS_INT_EN, false); + if (en) { + /* enable ROCEE hw error interrupts */ + desc.data[0] = cpu_to_le32(HCLGE_ROCEE_RAS_NFE_INT_EN); + desc.data[1] = cpu_to_le32(HCLGE_ROCEE_RAS_CE_INT_EN); + + hclge_log_and_clear_rocee_ras_error(hdev); + } + desc.data[2] = cpu_to_le32(HCLGE_ROCEE_RAS_NFE_INT_EN_MASK); + desc.data[3] = cpu_to_le32(HCLGE_ROCEE_RAS_CE_INT_EN_MASK); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(dev, "failed(%d) to config ROCEE RAS interrupt\n", ret); + + return ret; +} + +int hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev) +{ + enum hnae3_reset_type reset_type = HNAE3_NONE_RESET; + struct hclge_dev *hdev = ae_dev->priv; + + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || + hdev->pdev->revision < 0x21) + return reset_type; + + reset_type = hclge_log_and_clear_rocee_ras_error(hdev); + if (reset_type != HNAE3_NONE_RESET) + set_bit(reset_type, &ae_dev->hw_err_reset_req); + + return reset_type; +} + +static const struct hclge_hw_blk hw_blk[] = { + { + .msk = BIT(0), .name = "IGU_EGU", + .config_err_int = hclge_config_igu_egu_hw_err_int, + }, + { + .msk = BIT(1), .name = "PPP", + .config_err_int = hclge_config_ppp_hw_err_int, + }, + { + .msk = BIT(2), .name = "SSU", + .config_err_int = hclge_config_ssu_hw_err_int, + }, + { + .msk = BIT(3), .name = "PPU", + .config_err_int = hclge_config_ppu_hw_err_int, + }, + { + .msk = BIT(4), .name = "TM", + .config_err_int = hclge_config_tm_hw_err_int, + }, + { + .msk = BIT(5), .name = "COMMON", + .config_err_int = hclge_config_common_hw_err_int, + }, + { + .msk = BIT(8), .name = "MAC", + .config_err_int = hclge_config_mac_err_int, + }, + { /* sentinel */ } +}; + +int hclge_config_nic_hw_error(struct hclge_dev *hdev, bool state) +{ + const struct hclge_hw_blk *module = hw_blk; + int ret = 0; + + while (module->name) { + if (module->config_err_int) { + ret = module->config_err_int(hdev, state); + if (ret) + return ret; + } + module++; + } + + return ret; +} + +pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev) +{ + struct hclge_dev *hdev = ae_dev->priv; + struct device *dev = &hdev->pdev->dev; + u32 status; + + if (!test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state)) { + dev_err(dev, + "Can't recover - RAS error reported during dev init\n"); + return PCI_ERS_RESULT_NONE; + } + + status = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG); + + if (status & HCLGE_RAS_REG_NFE_MASK || + status & HCLGE_RAS_REG_ROCEE_ERR_MASK) + ae_dev->hw_err_reset_req = 0; + else + goto out; + + /* Handling Non-fatal HNS RAS errors */ + if (status & HCLGE_RAS_REG_NFE_MASK) { + dev_err(dev, + "HNS Non-Fatal RAS error(status=0x%x) identified\n", + status); + hclge_handle_all_ras_errors(hdev); + } + + /* Handling Non-fatal Rocee RAS errors */ + if (hdev->pdev->revision >= 0x21 && + status & HCLGE_RAS_REG_ROCEE_ERR_MASK) { + dev_err(dev, "ROCEE Non-Fatal RAS error identified\n"); + hclge_handle_rocee_ras_error(ae_dev); + } + + if (ae_dev->hw_err_reset_req) + return PCI_ERS_RESULT_NEED_RESET; + +out: + return PCI_ERS_RESULT_RECOVERED; +} + +static int hclge_clear_hw_msix_error(struct hclge_dev *hdev, + struct hclge_desc *desc, bool is_mpf, + u32 bd_num) +{ + if (is_mpf) + desc[0].opcode = + cpu_to_le16(HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT); + else + desc[0].opcode = cpu_to_le16(HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT); + + desc[0].flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN); + + return hclge_cmd_send(&hdev->hw, &desc[0], bd_num); +} + +/* hclge_query_8bd_info: query information about over_8bd_nfe_err + * @hdev: pointer to struct hclge_dev + * @vf_id: Index of the virtual function with error + * @q_id: Physical index of the queue with error + * + * This function get specific index of queue and function which causes + * over_8bd_nfe_err by using command. If vf_id is 0, it means error is + * caused by PF instead of VF. + */ +static int hclge_query_over_8bd_err_info(struct hclge_dev *hdev, u16 *vf_id, + u16 *q_id) +{ + struct hclge_query_ppu_pf_other_int_dfx_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PPU_PF_OTHER_INT_DFX, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + return ret; + + req = (struct hclge_query_ppu_pf_other_int_dfx_cmd *)desc.data; + *vf_id = le16_to_cpu(req->over_8bd_no_fe_vf_id); + *q_id = le16_to_cpu(req->over_8bd_no_fe_qid); + + return 0; +} + +/* hclge_handle_over_8bd_err: handle MSI-X error named over_8bd_nfe_err + * @hdev: pointer to struct hclge_dev + * @reset_requests: reset level that we need to trigger later + * + * over_8bd_nfe_err is a special MSI-X because it may caused by a VF, in + * that case, we need to trigger VF reset. Otherwise, a PF reset is needed. + */ +static void hclge_handle_over_8bd_err(struct hclge_dev *hdev, + unsigned long *reset_requests) +{ + struct device *dev = &hdev->pdev->dev; + u16 vf_id; + u16 q_id; + int ret; + + ret = hclge_query_over_8bd_err_info(hdev, &vf_id, &q_id); + if (ret) { + dev_err(dev, "fail(%d) to query over_8bd_no_fe info\n", + ret); + return; + } + + dev_err(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vport(%u), queue_id(%u)\n", + vf_id, q_id); + + if (vf_id) { + if (vf_id >= hdev->num_alloc_vport) { + dev_err(dev, "invalid vport(%u)\n", vf_id); + return; + } + + /* If we need to trigger other reset whose level is higher + * than HNAE3_VF_FUNC_RESET, no need to trigger a VF reset + * here. + */ + if (*reset_requests != 0) + return; + + ret = hclge_inform_reset_assert_to_vf(&hdev->vport[vf_id]); + if (ret) + dev_err(dev, "inform reset to vport(%u) failed %d!\n", + hdev->vport->vport_id, ret); + } else { + set_bit(HNAE3_FUNC_RESET, reset_requests); + } +} + +/* hclge_handle_mpf_msix_error: handle all main PF MSI-X errors + * @hdev: pointer to struct hclge_dev + * @desc: descriptor for describing the command + * @mpf_bd_num: number of extended command structures + * @reset_requests: record of the reset level that we need + * + * This function handles all the main PF MSI-X errors in the hw register/s + * using command. + */ +static int hclge_handle_mpf_msix_error(struct hclge_dev *hdev, + struct hclge_desc *desc, + int mpf_bd_num, + unsigned long *reset_requests) +{ + struct device *dev = &hdev->pdev->dev; + __le32 *desc_data; + u32 status; + int ret; + /* query all main PF MSIx errors */ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT, + true); + ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num); + if (ret) { + dev_err(dev, "query all mpf msix int cmd failed (%d)\n", ret); + return ret; + } + + /* log MAC errors */ + desc_data = (__le32 *)&desc[1]; + status = le32_to_cpu(*desc_data); + if (status) + hclge_log_error(dev, "MAC_AFIFO_TNL_INT_R", + &hclge_mac_afifo_tnl_int[0], status, + reset_requests); + + /* log PPU(RCB) MPF errors */ + desc_data = (__le32 *)&desc[5]; + status = le32_to_cpu(*(desc_data + 2)) & + HCLGE_PPU_MPF_INT_ST2_MSIX_MASK; + if (status) + dev_err(dev, "PPU_MPF_ABNORMAL_INT_ST2 rx_q_search_miss found [dfx status=0x%x\n]", + status); + + /* clear all main PF MSIx errors */ + ret = hclge_clear_hw_msix_error(hdev, desc, true, mpf_bd_num); + if (ret) + dev_err(dev, "clear all mpf msix int cmd failed (%d)\n", ret); + + return ret; +} + +/* hclge_handle_pf_msix_error: handle all PF MSI-X errors + * @hdev: pointer to struct hclge_dev + * @desc: descriptor for describing the command + * @mpf_bd_num: number of extended command structures + * @reset_requests: record of the reset level that we need + * + * This function handles all the PF MSI-X errors in the hw register/s using + * command. + */ +static int hclge_handle_pf_msix_error(struct hclge_dev *hdev, + struct hclge_desc *desc, + int pf_bd_num, + unsigned long *reset_requests) +{ + struct device *dev = &hdev->pdev->dev; + __le32 *desc_data; + u32 status; + int ret; + + /* query all PF MSIx errors */ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT, + true); + ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num); + if (ret) { + dev_err(dev, "query all pf msix int cmd failed (%d)\n", ret); + return ret; + } + + /* log SSU PF errors */ + status = le32_to_cpu(desc[0].data[0]) & HCLGE_SSU_PORT_INT_MSIX_MASK; + if (status) + hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT", + &hclge_ssu_port_based_pf_int[0], + status, reset_requests); + + /* read and log PPP PF errors */ + desc_data = (__le32 *)&desc[2]; + status = le32_to_cpu(*desc_data); + if (status) + hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0", + &hclge_ppp_pf_abnormal_int[0], + status, reset_requests); + + /* log PPU(RCB) PF errors */ + desc_data = (__le32 *)&desc[3]; + status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_MSIX_MASK; + if (status) + hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST", + &hclge_ppu_pf_abnormal_int[0], + status, reset_requests); + + status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_OVER_8BD_ERR_MASK; + if (status) + hclge_handle_over_8bd_err(hdev, reset_requests); + + /* clear all PF MSIx errors */ + ret = hclge_clear_hw_msix_error(hdev, desc, false, pf_bd_num); + if (ret) + dev_err(dev, "clear all pf msix int cmd failed (%d)\n", ret); + + return ret; +} + +static int hclge_handle_all_hw_msix_error(struct hclge_dev *hdev, + unsigned long *reset_requests) +{ + struct hclge_mac_tnl_stats mac_tnl_stats; + struct device *dev = &hdev->pdev->dev; + u32 mpf_bd_num, pf_bd_num, bd_num; + struct hclge_desc *desc; + u32 status; + int ret; + + /* query the number of bds for the MSIx int status */ + ret = hclge_query_bd_num(hdev, false, &mpf_bd_num, &pf_bd_num); + if (ret) + goto out; + + bd_num = max_t(u32, mpf_bd_num, pf_bd_num); + desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL); + if (!desc) + return -ENOMEM; + + ret = hclge_handle_mpf_msix_error(hdev, desc, mpf_bd_num, + reset_requests); + if (ret) + goto msi_error; + + memset(desc, 0, bd_num * sizeof(struct hclge_desc)); + ret = hclge_handle_pf_msix_error(hdev, desc, pf_bd_num, reset_requests); + if (ret) + goto msi_error; + + /* query and clear mac tnl interruptions */ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_MAC_TNL_INT, + true); + ret = hclge_cmd_send(&hdev->hw, &desc[0], 1); + if (ret) { + dev_err(dev, "query mac tnl int cmd failed (%d)\n", ret); + goto msi_error; + } + + status = le32_to_cpu(desc->data[0]); + if (status) { + /* When mac tnl interrupt occurs, we record current time and + * register status here in a fifo, then clear the status. So + * that if link status changes suddenly at some time, we can + * query them by debugfs. + */ + mac_tnl_stats.time = local_clock(); + mac_tnl_stats.status = status; + kfifo_put(&hdev->mac_tnl_log, mac_tnl_stats); + ret = hclge_clear_mac_tnl_int(hdev); + if (ret) + dev_err(dev, "clear mac tnl int failed (%d)\n", ret); + } + +msi_error: + kfree(desc); +out: + return ret; +} + +int hclge_handle_hw_msix_error(struct hclge_dev *hdev, + unsigned long *reset_requests) +{ + struct device *dev = &hdev->pdev->dev; + + if (!test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state)) { + dev_err(dev, + "Can't handle - MSIx error reported during dev init\n"); + return 0; + } + + return hclge_handle_all_hw_msix_error(hdev, reset_requests); +} + +void hclge_handle_all_hns_hw_errors(struct hnae3_ae_dev *ae_dev) +{ +#define HCLGE_DESC_NO_DATA_LEN 8 + + struct hclge_dev *hdev = ae_dev->priv; + struct device *dev = &hdev->pdev->dev; + u32 mpf_bd_num, pf_bd_num, bd_num; + struct hclge_desc *desc; + u32 status; + int ret; + + ae_dev->hw_err_reset_req = 0; + status = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG); + + /* query the number of bds for the MSIx int status */ + ret = hclge_query_bd_num(hdev, false, &mpf_bd_num, &pf_bd_num); + if (ret) + return; + + bd_num = max_t(u32, mpf_bd_num, pf_bd_num); + desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL); + if (!desc) + return; + + /* Clear HNS hw errors reported through msix */ + memset(&desc[0].data[0], 0xFF, mpf_bd_num * sizeof(struct hclge_desc) - + HCLGE_DESC_NO_DATA_LEN); + ret = hclge_clear_hw_msix_error(hdev, desc, true, mpf_bd_num); + if (ret) { + dev_err(dev, "fail(%d) to clear mpf msix int during init\n", + ret); + goto msi_error; + } + + memset(&desc[0].data[0], 0xFF, pf_bd_num * sizeof(struct hclge_desc) - + HCLGE_DESC_NO_DATA_LEN); + ret = hclge_clear_hw_msix_error(hdev, desc, false, pf_bd_num); + if (ret) { + dev_err(dev, "fail(%d) to clear pf msix int during init\n", + ret); + goto msi_error; + } + + /* Handle Non-fatal HNS RAS errors */ + if (status & HCLGE_RAS_REG_NFE_MASK) { + dev_err(dev, "HNS hw error(RAS) identified during init\n"); + hclge_handle_all_ras_errors(hdev); + } + +msi_error: + kfree(desc); +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h new file mode 100644 index 0000000000000000000000000000000000000000..67e972a25f47ba75273421c98c18aaec979b2dec --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h @@ -0,0 +1,170 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2017 Hisilicon Limited. */ + +#ifndef __HCLGE_ERR_H +#define __HCLGE_ERR_H + +#include "hclge_main.h" + +#define HCLGE_MPF_RAS_INT_MIN_BD_NUM 10 +#define HCLGE_PF_RAS_INT_MIN_BD_NUM 4 +#define HCLGE_MPF_MSIX_INT_MIN_BD_NUM 10 +#define HCLGE_PF_MSIX_INT_MIN_BD_NUM 4 + +#define HCLGE_RAS_PF_OTHER_INT_STS_REG 0x20B00 +#define HCLGE_RAS_IMP_RD_POISON_MASK \ + BIT(HCLGE_VECTOR0_IMP_RD_POISON_B) +#define HCLGE_RAS_IMP_CMDQ_ERR_MASK \ + BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B) +#define HCLGE_RAS_REG_NFE_MASK 0xFF00 +#define HCLGE_RAS_REG_ROCEE_ERR_MASK 0x3000000 + +#define HCLGE_VECTOR0_PF_OTHER_INT_STS_REG 0x20800 +#define HCLGE_VECTOR0_REG_MSIX_MASK 0x1FF00 + +#define HCLGE_IMP_TCM_ECC_ERR_INT_EN 0xFFFF0000 +#define HCLGE_IMP_TCM_ECC_ERR_INT_EN_MASK 0xFFFF0000 +#define HCLGE_IMP_ITCM4_ECC_ERR_INT_EN 0x300 +#define HCLGE_IMP_ITCM4_ECC_ERR_INT_EN_MASK 0x300 +#define HCLGE_CMDQ_NIC_ECC_ERR_INT_EN 0xFFFF +#define HCLGE_CMDQ_NIC_ECC_ERR_INT_EN_MASK 0xFFFF +#define HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN 0xFFFF0000 +#define HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN_MASK 0xFFFF0000 +#define HCLGE_IMP_RD_POISON_ERR_INT_EN 0x0100 +#define HCLGE_IMP_RD_POISON_ERR_INT_EN_MASK 0x0100 +#define HCLGE_TQP_ECC_ERR_INT_EN 0x0FFF +#define HCLGE_TQP_ECC_ERR_INT_EN_MASK 0x0FFF +#define HCLGE_MSIX_SRAM_ECC_ERR_INT_EN_MASK 0x0F000000 +#define HCLGE_MSIX_SRAM_ECC_ERR_INT_EN 0x0F000000 +#define HCLGE_IGU_ERR_INT_EN 0x0000000F +#define HCLGE_IGU_ERR_INT_TYPE 0x00000660 +#define HCLGE_IGU_ERR_INT_EN_MASK 0x000F +#define HCLGE_IGU_TNL_ERR_INT_EN 0x0002AABF +#define HCLGE_IGU_TNL_ERR_INT_EN_MASK 0x003F +#define HCLGE_PPP_MPF_ECC_ERR_INT0_EN 0xFFFFFFFF +#define HCLGE_PPP_MPF_ECC_ERR_INT0_EN_MASK 0xFFFFFFFF +#define HCLGE_PPP_MPF_ECC_ERR_INT1_EN 0xFFFFFFFF +#define HCLGE_PPP_MPF_ECC_ERR_INT1_EN_MASK 0xFFFFFFFF +#define HCLGE_PPP_PF_ERR_INT_EN 0x0003 +#define HCLGE_PPP_PF_ERR_INT_EN_MASK 0x0003 +#define HCLGE_PPP_MPF_ECC_ERR_INT2_EN 0x003F +#define HCLGE_PPP_MPF_ECC_ERR_INT2_EN_MASK 0x003F +#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN 0x003F +#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK 0x003F +#define HCLGE_TM_SCH_ECC_ERR_INT_EN 0x3 +#define HCLGE_TM_QCN_MEM_ERR_INT_EN 0xFFFFFF +#define HCLGE_NCSI_ERR_INT_EN 0x3 +#define HCLGE_NCSI_ERR_INT_TYPE 0x9 +#define HCLGE_MAC_COMMON_ERR_INT_EN 0x107FF +#define HCLGE_MAC_COMMON_ERR_INT_EN_MASK 0x107FF +#define HCLGE_MAC_TNL_INT_EN GENMASK(9, 0) +#define HCLGE_MAC_TNL_INT_EN_MASK GENMASK(9, 0) +#define HCLGE_MAC_TNL_INT_CLR GENMASK(9, 0) +#define HCLGE_PPU_MPF_ABNORMAL_INT0_EN GENMASK(31, 0) +#define HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK GENMASK(31, 0) +#define HCLGE_PPU_MPF_ABNORMAL_INT1_EN GENMASK(31, 0) +#define HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK GENMASK(31, 0) +#define HCLGE_PPU_MPF_ABNORMAL_INT2_EN 0x3FFF3FFF +#define HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK 0x3FFF3FFF +#define HCLGE_PPU_MPF_ABNORMAL_INT2_EN2 0xB +#define HCLGE_PPU_MPF_ABNORMAL_INT2_EN2_MASK 0xB +#define HCLGE_PPU_MPF_ABNORMAL_INT3_EN GENMASK(7, 0) +#define HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK GENMASK(23, 16) +#define HCLGE_PPU_PF_ABNORMAL_INT_EN GENMASK(5, 0) +#define HCLGE_PPU_PF_ABNORMAL_INT_EN_MASK GENMASK(5, 0) +#define HCLGE_SSU_1BIT_ECC_ERR_INT_EN GENMASK(31, 0) +#define HCLGE_SSU_1BIT_ECC_ERR_INT_EN_MASK GENMASK(31, 0) +#define HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN GENMASK(31, 0) +#define HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN_MASK GENMASK(31, 0) +#define HCLGE_SSU_BIT32_ECC_ERR_INT_EN 0x0101 +#define HCLGE_SSU_BIT32_ECC_ERR_INT_EN_MASK 0x0101 +#define HCLGE_SSU_COMMON_INT_EN GENMASK(9, 0) +#define HCLGE_SSU_COMMON_INT_EN_MASK GENMASK(9, 0) +#define HCLGE_SSU_PORT_BASED_ERR_INT_EN 0x0BFF +#define HCLGE_SSU_PORT_BASED_ERR_INT_EN_MASK 0x0BFF0000 +#define HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN GENMASK(23, 0) +#define HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN_MASK GENMASK(23, 0) + +#define HCLGE_SSU_COMMON_ERR_INT_MASK GENMASK(9, 0) +#define HCLGE_SSU_PORT_INT_MSIX_MASK 0x7BFF +#define HCLGE_IGU_INT_MASK GENMASK(3, 0) +#define HCLGE_IGU_EGU_TNL_INT_MASK GENMASK(5, 0) +#define HCLGE_PPP_MPF_INT_ST3_MASK GENMASK(5, 0) +#define HCLGE_PPU_MPF_INT_ST3_MASK GENMASK(7, 0) +#define HCLGE_PPU_MPF_INT_ST2_MSIX_MASK BIT(29) +#define HCLGE_PPU_PF_INT_RAS_MASK 0x18 +#define HCLGE_PPU_PF_INT_MSIX_MASK 0x26 +#define HCLGE_PPU_PF_OVER_8BD_ERR_MASK 0x01 +#define HCLGE_QCN_FIFO_INT_MASK GENMASK(17, 0) +#define HCLGE_QCN_ECC_INT_MASK GENMASK(21, 0) +#define HCLGE_NCSI_ECC_INT_MASK GENMASK(1, 0) + +#define HCLGE_ROCEE_RAS_NFE_INT_EN 0xF +#define HCLGE_ROCEE_RAS_CE_INT_EN 0x1 +#define HCLGE_ROCEE_RAS_NFE_INT_EN_MASK 0xF +#define HCLGE_ROCEE_RAS_CE_INT_EN_MASK 0x1 +#define HCLGE_ROCEE_RERR_INT_MASK BIT(0) +#define HCLGE_ROCEE_BERR_INT_MASK BIT(1) +#define HCLGE_ROCEE_AXI_ERR_INT_MASK GENMASK(1, 0) +#define HCLGE_ROCEE_ECC_INT_MASK BIT(2) +#define HCLGE_ROCEE_OVF_INT_MASK BIT(3) +#define HCLGE_ROCEE_OVF_ERR_INT_MASK 0x10000 +#define HCLGE_ROCEE_OVF_ERR_TYPE_MASK 0x3F + +enum hclge_err_int_type { + HCLGE_ERR_INT_MSIX = 0, + HCLGE_ERR_INT_RAS_CE = 1, + HCLGE_ERR_INT_RAS_NFE = 2, + HCLGE_ERR_INT_RAS_FE = 3, +}; + +struct hclge_hw_blk { + u32 msk; + const char *name; + int (*config_err_int)(struct hclge_dev *hdev, bool en); +}; + +struct hclge_hw_error { + u32 int_msk; + const char *msg; + enum hnae3_reset_type reset_level; +}; + +extern const struct hclge_hw_error hclge_imp_tcm_ecc_int[]; +extern const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[]; +extern const struct hclge_hw_error hclge_tqp_int_ecc_int[]; +extern const struct hclge_hw_error hclge_msix_sram_ecc_int[]; +extern const struct hclge_hw_error hclge_igu_int[]; +extern const struct hclge_hw_error hclge_igu_egu_tnl_int[]; +extern const struct hclge_hw_error hclge_ncsi_err_int[]; +extern const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[]; +extern const struct hclge_hw_error hclge_ppp_pf_abnormal_int[]; +extern const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st3[]; +extern const struct hclge_hw_error hclge_tm_sch_rint[]; +extern const struct hclge_hw_error hclge_qcn_fifo_rint[]; +extern const struct hclge_hw_error hclge_qcn_ecc_rint[]; +extern const struct hclge_hw_error hclge_mac_afifo_tnl_int[]; +extern const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[]; +extern const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[]; +extern const struct hclge_hw_error hclge_ppu_pf_abnormal_int[]; +extern const struct hclge_hw_error hclge_ssu_com_err_int[]; +extern const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[]; +extern const struct hclge_hw_error hclge_ssu_port_based_err_int[]; +extern const struct hclge_hw_error hclge_ssu_fifo_overflow_int[]; +extern const struct hclge_hw_error hclge_ssu_ets_tcg_int[]; +extern const struct hclge_hw_error hclge_ssu_port_based_pf_int[]; +extern const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[]; + +int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en); +int hclge_config_nic_hw_error(struct hclge_dev *hdev, bool state); +int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en); +void hclge_handle_all_hns_hw_errors(struct hnae3_ae_dev *ae_dev); +pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev); +int hclge_handle_hw_msix_error(struct hclge_dev *hdev, + unsigned long *reset_requests); +int hclge_handle_all_ras_errors(struct hclge_dev *hdev); +int hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev); +void hclge_log_error(struct device *dev, char *reg, + const struct hclge_hw_error *err, + u32 err_sts, unsigned long *reset_requests); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 8577dfc799ad6b13dfcc6b6042891d71d6c95a45..6c61016bd4d95e9cb86ea05343a9d07434cd21db 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -12,30 +12,65 @@ #include #include #include +#include #include +#include "kcompat.h" #include "hclge_cmd.h" #include "hclge_dcb.h" #include "hclge_main.h" #include "hclge_mbx.h" #include "hclge_mdio.h" #include "hclge_tm.h" +#include "hclge_err.h" #include "hnae3.h" #define HCLGE_NAME "hclge" -#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) -#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f)) -#define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f)) -#define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f)) - -static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, - enum hclge_mta_dmac_sel_type mta_mac_sel, - bool enable); -static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu); + +#define HCLGE_BUF_SIZE_UNIT 256U +#define HCLGE_BUF_MUL_BY 2 +#define HCLGE_BUF_DIV_BY 2 +#define NEED_RESERVE_TC_NUM 2 +#define BUF_MAX_PERCENT 100 +#define BUF_RESERVE_PERCENT 90 + +#define HCLGE_RESET_MAX_FAIL_CNT 5 +#define HCLGE_RESET_SYNC_TIME 100 +#define HCLGE_PF_RESET_SYNC_TIME 20 +#define HCLGE_PF_RESET_SYNC_CNT 1500 + +/* Get DFX BD number offset */ +#define HCLGE_DFX_BIOS_BD_OFFSET 1 +#define HCLGE_DFX_SSU_0_BD_OFFSET 2 +#define HCLGE_DFX_SSU_1_BD_OFFSET 3 +#define HCLGE_DFX_IGU_BD_OFFSET 4 +#define HCLGE_DFX_RPU_0_BD_OFFSET 5 +#define HCLGE_DFX_RPU_1_BD_OFFSET 6 +#define HCLGE_DFX_NCSI_BD_OFFSET 7 +#define HCLGE_DFX_RTC_BD_OFFSET 8 +#define HCLGE_DFX_PPP_BD_OFFSET 9 +#define HCLGE_DFX_RCB_BD_OFFSET 10 +#define HCLGE_DFX_TQP_BD_OFFSET 11 +#define HCLGE_DFX_SSU_2_BD_OFFSET 12 + +#define HCLGE_LINK_STATUS_MS 10 + +static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps); static int hclge_init_vlan_config(struct hclge_dev *hdev); +static void hclge_sync_vlan_filter(struct hclge_dev *hdev); static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); +static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle); +static void hclge_rfs_filter_expire(struct hclge_dev *hdev); +static void hclge_clear_arfs_rules(struct hnae3_handle *handle); +static int hclge_set_default_loopback(struct hclge_dev *hdev); + +static void hclge_sync_mac_table(struct hclge_dev *hdev); +static void hclge_restore_hw_table(struct hclge_dev *hdev); +static void hclge_sync_promisc_mode(struct hclge_dev *hdev); static struct hnae3_ae_algo ae_algo; +static struct workqueue_struct *hclge_wq; + static const struct pci_device_id ae_algo_pci_tbl[] = { {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, @@ -50,181 +85,80 @@ static const struct pci_device_id ae_algo_pci_tbl[] = { MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl); +static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG, + HCLGE_CMDQ_TX_ADDR_H_REG, + HCLGE_CMDQ_TX_DEPTH_REG, + HCLGE_CMDQ_TX_TAIL_REG, + HCLGE_CMDQ_TX_HEAD_REG, + HCLGE_CMDQ_RX_ADDR_L_REG, + HCLGE_CMDQ_RX_ADDR_H_REG, + HCLGE_CMDQ_RX_DEPTH_REG, + HCLGE_CMDQ_RX_TAIL_REG, + HCLGE_CMDQ_RX_HEAD_REG, + HCLGE_VECTOR0_CMDQ_SRC_REG, + HCLGE_CMDQ_INTR_STS_REG, + HCLGE_CMDQ_INTR_EN_REG, + HCLGE_CMDQ_INTR_GEN_REG}; + +static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE, + HCLGE_VECTOR0_OTER_EN_REG, + HCLGE_MISC_RESET_STS_REG, + HCLGE_MISC_VECTOR_INT_STS, + HCLGE_GLOBAL_RESET_REG, + HCLGE_FUN_RST_ING, + HCLGE_GRO_EN_REG}; + +static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG, + HCLGE_RING_RX_ADDR_H_REG, + HCLGE_RING_RX_BD_NUM_REG, + HCLGE_RING_RX_BD_LENGTH_REG, + HCLGE_RING_RX_MERGE_EN_REG, + HCLGE_RING_RX_TAIL_REG, + HCLGE_RING_RX_HEAD_REG, + HCLGE_RING_RX_FBD_NUM_REG, + HCLGE_RING_RX_OFFSET_REG, + HCLGE_RING_RX_FBD_OFFSET_REG, + HCLGE_RING_RX_STASH_REG, + HCLGE_RING_RX_BD_ERR_REG, + HCLGE_RING_TX_ADDR_L_REG, + HCLGE_RING_TX_ADDR_H_REG, + HCLGE_RING_TX_BD_NUM_REG, + HCLGE_RING_TX_PRIORITY_REG, + HCLGE_RING_TX_TC_REG, + HCLGE_RING_TX_MERGE_EN_REG, + HCLGE_RING_TX_TAIL_REG, + HCLGE_RING_TX_HEAD_REG, + HCLGE_RING_TX_FBD_NUM_REG, + HCLGE_RING_TX_OFFSET_REG, + HCLGE_RING_TX_EBD_NUM_REG, + HCLGE_RING_TX_EBD_OFFSET_REG, + HCLGE_RING_TX_BD_ERR_REG, + HCLGE_RING_EN_REG}; + +static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG, + HCLGE_TQP_INTR_GL0_REG, + HCLGE_TQP_INTR_GL1_REG, + HCLGE_TQP_INTR_GL2_REG, + HCLGE_TQP_INTR_RL_REG}; + static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { - "Mac Loopback test", - "Serdes Loopback test", + "App Loopback test", + "Serdes serial Loopback test", + "Serdes parallel Loopback test", "Phy Loopback test" }; -static const struct hclge_comm_stats_str g_all_64bit_stats_string[] = { - {"igu_rx_oversize_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt)}, - {"igu_rx_undersize_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt)}, - {"igu_rx_out_all_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt)}, - {"igu_rx_uni_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt)}, - {"igu_rx_multi_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt)}, - {"igu_rx_broad_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt)}, - {"egu_tx_out_all_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt)}, - {"egu_tx_uni_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt)}, - {"egu_tx_multi_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt)}, - {"egu_tx_broad_pkt", - HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt)}, - {"ssu_ppp_mac_key_num", - HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num)}, - {"ssu_ppp_host_key_num", - HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num)}, - {"ppp_ssu_mac_rlt_num", - HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num)}, - {"ppp_ssu_host_rlt_num", - HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num)}, - {"ssu_tx_in_num", - HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num)}, - {"ssu_tx_out_num", - HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num)}, - {"ssu_rx_in_num", - HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num)}, - {"ssu_rx_out_num", - HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num)} -}; - -static const struct hclge_comm_stats_str g_all_32bit_stats_string[] = { - {"igu_rx_err_pkt", - HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt)}, - {"igu_rx_no_eof_pkt", - HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt)}, - {"igu_rx_no_sof_pkt", - HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt)}, - {"egu_tx_1588_pkt", - HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt)}, - {"ssu_full_drop_num", - HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num)}, - {"ssu_part_drop_num", - HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num)}, - {"ppp_key_drop_num", - HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num)}, - {"ppp_rlt_drop_num", - HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num)}, - {"ssu_key_drop_num", - HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num)}, - {"pkt_curr_buf_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt)}, - {"qcn_fb_rcv_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt)}, - {"qcn_fb_drop_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt)}, - {"qcn_fb_invaild_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt)}, - {"rx_packet_tc0_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt)}, - {"rx_packet_tc1_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt)}, - {"rx_packet_tc2_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt)}, - {"rx_packet_tc3_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt)}, - {"rx_packet_tc4_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt)}, - {"rx_packet_tc5_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt)}, - {"rx_packet_tc6_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt)}, - {"rx_packet_tc7_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt)}, - {"rx_packet_tc0_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt)}, - {"rx_packet_tc1_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt)}, - {"rx_packet_tc2_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt)}, - {"rx_packet_tc3_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt)}, - {"rx_packet_tc4_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt)}, - {"rx_packet_tc5_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt)}, - {"rx_packet_tc6_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt)}, - {"rx_packet_tc7_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt)}, - {"tx_packet_tc0_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt)}, - {"tx_packet_tc1_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt)}, - {"tx_packet_tc2_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt)}, - {"tx_packet_tc3_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt)}, - {"tx_packet_tc4_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt)}, - {"tx_packet_tc5_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt)}, - {"tx_packet_tc6_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt)}, - {"tx_packet_tc7_in_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt)}, - {"tx_packet_tc0_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt)}, - {"tx_packet_tc1_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt)}, - {"tx_packet_tc2_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt)}, - {"tx_packet_tc3_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt)}, - {"tx_packet_tc4_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt)}, - {"tx_packet_tc5_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt)}, - {"tx_packet_tc6_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt)}, - {"tx_packet_tc7_out_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt)}, - {"pkt_curr_buf_tc0_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt)}, - {"pkt_curr_buf_tc1_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt)}, - {"pkt_curr_buf_tc2_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt)}, - {"pkt_curr_buf_tc3_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt)}, - {"pkt_curr_buf_tc4_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt)}, - {"pkt_curr_buf_tc5_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt)}, - {"pkt_curr_buf_tc6_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt)}, - {"pkt_curr_buf_tc7_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt)}, - {"mb_uncopy_num", - HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num)}, - {"lo_pri_unicast_rlt_drop_num", - HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num)}, - {"hi_pri_multicast_rlt_drop_num", - HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num)}, - {"lo_pri_multicast_rlt_drop_num", - HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num)}, - {"rx_oq_drop_pkt_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt)}, - {"tx_oq_drop_pkt_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt)}, - {"nic_l2_err_drop_pkt_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt)}, - {"roc_l2_err_drop_pkt_cnt", - HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt)} -}; - static const struct hclge_comm_stats_str g_mac_stats_string[] = { {"mac_tx_mac_pause_num", HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)}, {"mac_rx_mac_pause_num", HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)}, + {"mac_tx_control_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)}, + {"mac_rx_control_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)}, + {"mac_tx_pfc_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)}, {"mac_tx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)}, {"mac_tx_pfc_pri1_pkt_num", @@ -241,6 +175,8 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = { HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)}, {"mac_tx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)}, + {"mac_rx_pfc_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)}, {"mac_rx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)}, {"mac_rx_pfc_pri1_pkt_num", @@ -388,40 +324,126 @@ static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = { { .flags = HCLGE_MAC_MGR_MASK_VLAN_B, .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP), - .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)), - .mac_addr_lo16 = cpu_to_le16(htons(0x000E)), + .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e}, .i_port_bitmap = 0x1, }, }; -static int hclge_64_bit_update_stats(struct hclge_dev *hdev) +static const u8 hclge_hash_key[] = { + 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, + 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, + 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, + 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, + 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA +}; + +static const u32 hclge_dfx_bd_offset_list[] = { + HCLGE_DFX_BIOS_BD_OFFSET, + HCLGE_DFX_SSU_0_BD_OFFSET, + HCLGE_DFX_SSU_1_BD_OFFSET, + HCLGE_DFX_IGU_BD_OFFSET, + HCLGE_DFX_RPU_0_BD_OFFSET, + HCLGE_DFX_RPU_1_BD_OFFSET, + HCLGE_DFX_NCSI_BD_OFFSET, + HCLGE_DFX_RTC_BD_OFFSET, + HCLGE_DFX_PPP_BD_OFFSET, + HCLGE_DFX_RCB_BD_OFFSET, + HCLGE_DFX_TQP_BD_OFFSET, + HCLGE_DFX_SSU_2_BD_OFFSET +}; + +static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = { + HCLGE_OPC_DFX_BIOS_COMMON_REG, + HCLGE_OPC_DFX_SSU_REG_0, + HCLGE_OPC_DFX_SSU_REG_1, + HCLGE_OPC_DFX_IGU_EGU_REG, + HCLGE_OPC_DFX_RPU_REG_0, + HCLGE_OPC_DFX_RPU_REG_1, + HCLGE_OPC_DFX_NCSI_REG, + HCLGE_OPC_DFX_RTC_REG, + HCLGE_OPC_DFX_PPP_REG, + HCLGE_OPC_DFX_RCB_REG, + HCLGE_OPC_DFX_TQP_REG, + HCLGE_OPC_DFX_SSU_REG_2 +}; + +static const struct key_info meta_data_key_info[] = { + { PACKET_TYPE_ID, 6}, + { IP_FRAGEMENT, 1}, + { ROCE_TYPE, 1}, + { NEXT_KEY, 5}, + { VLAN_NUMBER, 2}, + { SRC_VPORT, 12}, + { DST_VPORT, 12}, + { TUNNEL_PACKET, 1}, +}; + +static const struct key_info tuple_key_info[] = { + { OUTER_DST_MAC, 48}, + { OUTER_SRC_MAC, 48}, + { OUTER_VLAN_TAG_FST, 16}, + { OUTER_VLAN_TAG_SEC, 16}, + { OUTER_ETH_TYPE, 16}, + { OUTER_L2_RSV, 16}, + { OUTER_IP_TOS, 8}, + { OUTER_IP_PROTO, 8}, + { OUTER_SRC_IP, 32}, + { OUTER_DST_IP, 32}, + { OUTER_L3_RSV, 16}, + { OUTER_SRC_PORT, 16}, + { OUTER_DST_PORT, 16}, + { OUTER_L4_RSV, 32}, + { OUTER_TUN_VNI, 24}, + { OUTER_TUN_FLOW_ID, 8}, + { INNER_DST_MAC, 48}, + { INNER_SRC_MAC, 48}, + { INNER_VLAN_TAG_FST, 16}, + { INNER_VLAN_TAG_SEC, 16}, + { INNER_ETH_TYPE, 16}, + { INNER_L2_RSV, 16}, + { INNER_IP_TOS, 8}, + { INNER_IP_PROTO, 8}, + { INNER_SRC_IP, 32}, + { INNER_DST_IP, 32}, + { INNER_L3_RSV, 16}, + { INNER_SRC_PORT, 16}, + { INNER_DST_PORT, 16}, + { INNER_L4_RSV, 32}, +}; + +static int hclge_mac_update_stats_defective(struct hclge_dev *hdev) { -#define HCLGE_64_BIT_CMD_NUM 5 -#define HCLGE_64_BIT_RTN_DATANUM 4 - u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats); - struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM]; +#define HCLGE_MAC_CMD_NUM 21 + + u64 *data = (u64 *)(&hdev->mac_stats); + struct hclge_desc desc[HCLGE_MAC_CMD_NUM]; __le64 *desc_data; int i, k, n; int ret; - hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_64_BIT, true); - ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_64_BIT_CMD_NUM); + memset(desc, 0, sizeof(desc)); + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true); + ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); if (ret) { dev_err(&hdev->pdev->dev, - "Get 64 bit pkt stats fail, status = %d.\n", ret); + "Get MAC pkt stats fail, status = %d.\n", ret); + return ret; } - for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) { + for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) { + /* for special opcode 0032, only the first desc has the head */ if (unlikely(i == 0)) { desc_data = (__le64 *)(&desc[i].data[0]); - n = HCLGE_64_BIT_RTN_DATANUM - 1; + n = HCLGE_RD_FIRST_STATS_NUM; } else { desc_data = (__le64 *)(&desc[i]); - n = HCLGE_64_BIT_RTN_DATANUM; + n = HCLGE_RD_OTHER_STATS_NUM; } + for (k = 0; k < n; k++) { - *data++ += le64_to_cpu(*desc_data); + *data += le64_to_cpu(*desc_data); + data++; desc_data++; } } @@ -429,111 +451,93 @@ static int hclge_64_bit_update_stats(struct hclge_dev *hdev) return 0; } -static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats *stats) -{ - stats->pkt_curr_buf_cnt = 0; - stats->pkt_curr_buf_tc0_cnt = 0; - stats->pkt_curr_buf_tc1_cnt = 0; - stats->pkt_curr_buf_tc2_cnt = 0; - stats->pkt_curr_buf_tc3_cnt = 0; - stats->pkt_curr_buf_tc4_cnt = 0; - stats->pkt_curr_buf_tc5_cnt = 0; - stats->pkt_curr_buf_tc6_cnt = 0; - stats->pkt_curr_buf_tc7_cnt = 0; -} - -static int hclge_32_bit_update_stats(struct hclge_dev *hdev) +static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num) { -#define HCLGE_32_BIT_CMD_NUM 8 -#define HCLGE_32_BIT_RTN_DATANUM 8 - - struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM]; - struct hclge_32_bit_stats *all_32_bit_stats; - __le32 *desc_data; - int i, k, n; - u64 *data; + u64 *data = (u64 *)(&hdev->mac_stats); + struct hclge_desc *desc; + __le64 *desc_data; + u16 i, k, n; int ret; - all_32_bit_stats = &hdev->hw_stats.all_32_bit_stats; - data = (u64 *)(&all_32_bit_stats->egu_tx_1588_pkt); + /* This may be called inside atomic sections, + * so GFP_ATOMIC is more suitalbe here + */ + desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC); + if (!desc) + return -ENOMEM; - hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_32_BIT, true); - ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_32_BIT_CMD_NUM); + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true); + ret = hclge_cmd_send(&hdev->hw, desc, desc_num); if (ret) { - dev_err(&hdev->pdev->dev, - "Get 32 bit pkt stats fail, status = %d.\n", ret); - + kfree(desc); return ret; } - hclge_reset_partial_32bit_counter(all_32_bit_stats); - for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) { - if (unlikely(i == 0)) { - __le16 *desc_data_16bit; - - all_32_bit_stats->igu_rx_err_pkt += - le32_to_cpu(desc[i].data[0]); - - desc_data_16bit = (__le16 *)&desc[i].data[1]; - all_32_bit_stats->igu_rx_no_eof_pkt += - le16_to_cpu(*desc_data_16bit); - - desc_data_16bit++; - all_32_bit_stats->igu_rx_no_sof_pkt += - le16_to_cpu(*desc_data_16bit); - - desc_data = &desc[i].data[2]; - n = HCLGE_32_BIT_RTN_DATANUM - 4; + for (i = 0; i < desc_num; i++) { + /* for special opcode 0034, only the first desc has the head */ + if (i == 0) { + desc_data = (__le64 *)(&desc[i].data[0]); + n = HCLGE_RD_FIRST_STATS_NUM; } else { - desc_data = (__le32 *)&desc[i]; - n = HCLGE_32_BIT_RTN_DATANUM; + desc_data = (__le64 *)(&desc[i]); + n = HCLGE_RD_OTHER_STATS_NUM; } + for (k = 0; k < n; k++) { - *data++ += le32_to_cpu(*desc_data); + *data += le64_to_cpu(*desc_data); + data++; desc_data++; } } + kfree(desc); + return 0; } -static int hclge_mac_update_stats(struct hclge_dev *hdev) +static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num) { -#define HCLGE_MAC_CMD_NUM 21 -#define HCLGE_RTN_DATA_NUM 4 - - u64 *data = (u64 *)(&hdev->hw_stats.mac_stats); - struct hclge_desc desc[HCLGE_MAC_CMD_NUM]; - __le64 *desc_data; - int i, k, n; + struct hclge_desc desc; + __le32 *desc_data; + u32 reg_num; int ret; - hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true); - ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); - if (ret) { - dev_err(&hdev->pdev->dev, - "Get MAC pkt stats fail, status = %d.\n", ret); - + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) return ret; - } - for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) { - if (unlikely(i == 0)) { - desc_data = (__le64 *)(&desc[i].data[0]); - n = HCLGE_RTN_DATA_NUM - 2; - } else { - desc_data = (__le64 *)(&desc[i]); - n = HCLGE_RTN_DATA_NUM; - } - for (k = 0; k < n; k++) { - *data++ += le64_to_cpu(*desc_data); - desc_data++; - } + desc_data = (__le32 *)(&desc.data[0]); + reg_num = le32_to_cpu(*desc_data); + + *desc_num = 1 + ((reg_num - 3) >> 2) + + (u32)(((reg_num - 3) & 0x3) ? 1 : 0); + if (!(*desc_num)) { + dev_err(&hdev->pdev->dev, "Invalid desc num: %u\n", *desc_num); + return -EINVAL; } return 0; } +int hclge_mac_update_stats(struct hclge_dev *hdev) +{ + u32 desc_num; + int ret; + + ret = hclge_mac_query_reg_num(hdev, &desc_num); + + /* The firmware supports the new statistics acquisition method */ + if (!ret) + ret = hclge_mac_update_stats_complete(hdev, desc_num); + else if (ret == -EOPNOTSUPP) + ret = hclge_mac_update_stats_defective(hdev); + else + dev_err(&hdev->pdev->dev, "query mac reg num fail!\n"); + + return ret; +} + static int hclge_tqps_update_stats(struct hnae3_handle *handle) { struct hnae3_knic_private_info *kinfo = &handle->kinfo; @@ -548,8 +552,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle) queue = handle->kinfo.tqp[i]; tqp = container_of(queue, struct hclge_tqp, q); /* command : HCLGE_OPC_QUERY_IGU_STAT */ - hclge_cmd_setup_basic_desc(&desc[0], - HCLGE_OPC_QUERY_RX_STATUS, + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS, true); desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); @@ -557,7 +560,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle) if (ret) { dev_err(&hdev->pdev->dev, "Query tqp stat fail, status = %d,queue = %d\n", - ret, i); + ret, i); return ret; } tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += @@ -611,6 +614,7 @@ static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset) { struct hnae3_knic_private_info *kinfo = &handle->kinfo; + /* each tqp has TX & RX two queues */ return kinfo->num_tqps * (2); } @@ -618,12 +622,12 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data) { struct hnae3_knic_private_info *kinfo = &handle->kinfo; u8 *buff = data; - int i = 0; + int i; for (i = 0; i < kinfo->num_tqps; i++) { struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i], struct hclge_tqp, q); - snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd", + snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", tqp->index); buff = buff + ETH_GSTRING_LEN; } @@ -631,7 +635,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data) for (i = 0; i < kinfo->num_tqps; i++) { struct hclge_tqp *tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); - snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd", + snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", tqp->index); buff = buff + ETH_GSTRING_LEN; } @@ -639,7 +643,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data) return buff; } -static u64 *hclge_comm_get_stats(void *comm_stats, +static u64 *hclge_comm_get_stats(const void *comm_stats, const struct hclge_comm_stats_str strs[], int size, u64 *data) { @@ -663,40 +667,13 @@ static u8 *hclge_comm_get_strings(u32 stringset, return buff; for (i = 0; i < size; i++) { - snprintf(buff, ETH_GSTRING_LEN, - strs[i].desc); + snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc); buff = buff + ETH_GSTRING_LEN; } return (u8 *)buff; } -static void hclge_update_netstat(struct hclge_hw_stats *hw_stats, - struct net_device_stats *net_stats) -{ - net_stats->tx_dropped = 0; - net_stats->rx_dropped = hw_stats->all_32_bit_stats.ssu_full_drop_num; - net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num; - net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num; - - net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num; - net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num; - net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt; - net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt; - net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; - - net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num; - net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num; - - net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; - net_stats->rx_length_errors = - hw_stats->mac_stats.mac_rx_undersize_pkt_num; - net_stats->rx_length_errors += - hw_stats->mac_stats.mac_rx_oversize_pkt_num; - net_stats->rx_over_errors = - hw_stats->mac_stats.mac_rx_oversize_pkt_num; -} - static void hclge_update_stats_for_all(struct hclge_dev *hdev) { struct hnae3_handle *handle; @@ -716,14 +693,6 @@ static void hclge_update_stats_for_all(struct hclge_dev *hdev) if (status) dev_err(&hdev->pdev->dev, "Update MAC stats fail, status = %d.\n", status); - - status = hclge_32_bit_update_stats(hdev); - if (status) - dev_err(&hdev->pdev->dev, - "Update 32 bit stats fail, status = %d.\n", - status); - - hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats); } static void hclge_update_stats(struct hnae3_handle *handle, @@ -731,7 +700,6 @@ static void hclge_update_stats(struct hnae3_handle *handle, { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - struct hclge_hw_stats *hw_stats = &hdev->hw_stats; int status; if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) @@ -743,32 +711,21 @@ static void hclge_update_stats(struct hnae3_handle *handle, "Update MAC stats fail, status = %d.\n", status); - status = hclge_32_bit_update_stats(hdev); - if (status) - dev_err(&hdev->pdev->dev, - "Update 32 bit stats fail, status = %d.\n", - status); - - status = hclge_64_bit_update_stats(hdev); - if (status) - dev_err(&hdev->pdev->dev, - "Update 64 bit stats fail, status = %d.\n", - status); - status = hclge_tqps_update_stats(handle); if (status) dev_err(&hdev->pdev->dev, "Update TQPS stats fail, status = %d.\n", status); - hclge_update_netstat(hw_stats, net_stats); - clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state); } static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) { -#define HCLGE_LOOPBACK_TEST_FLAGS 0x7 +#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\ + HNAE3_SUPPORT_PHY_LOOPBACK |\ + HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\ + HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; @@ -782,27 +739,33 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) if (stringset == ETH_SS_TEST) { /* clear loopback bit flags at first */ handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS)); - if (hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || + if (hdev->pdev->revision >= HNAE3_REVISION_ID_21 || + hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { count += 1; - handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK; + handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK; + } + + count += 2; + handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; + handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; + + if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv && + hdev->hw.mac.phydev->drv->set_loopback) { + count += 1; + handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK; } - count++; - handle->flags |= HNAE3_SUPPORT_SERDES_LOOPBACK; } else if (stringset == ETH_SS_STATS) { count = ARRAY_SIZE(g_mac_stats_string) + - ARRAY_SIZE(g_all_32bit_stats_string) + - ARRAY_SIZE(g_all_64bit_stats_string) + hclge_tqps_get_sset_count(handle, stringset); } return count; } -static void hclge_get_strings(struct hnae3_handle *handle, - u32 stringset, +static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset, u8 *data) { u8 *p = (char *)data; @@ -810,37 +773,28 @@ static void hclge_get_strings(struct hnae3_handle *handle, if (stringset == ETH_SS_STATS) { size = ARRAY_SIZE(g_mac_stats_string); - p = hclge_comm_get_strings(stringset, - g_mac_stats_string, - size, - p); - size = ARRAY_SIZE(g_all_32bit_stats_string); - p = hclge_comm_get_strings(stringset, - g_all_32bit_stats_string, - size, - p); - size = ARRAY_SIZE(g_all_64bit_stats_string); - p = hclge_comm_get_strings(stringset, - g_all_64bit_stats_string, - size, - p); + p = hclge_comm_get_strings(stringset, g_mac_stats_string, + size, p); p = hclge_tqps_get_strings(handle, p); } else if (stringset == ETH_SS_TEST) { - if (handle->flags & HNAE3_SUPPORT_MAC_LOOPBACK) { - memcpy(p, - hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_MAC], + if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) { + memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP], + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) { + memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES], ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } - if (handle->flags & HNAE3_SUPPORT_SERDES_LOOPBACK) { + if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) { memcpy(p, - hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_SERDES], + hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES], ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { - memcpy(p, - hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_PHY], + memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY], ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } @@ -853,24 +807,28 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) struct hclge_dev *hdev = vport->back; u64 *p; - p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, - g_mac_stats_string, - ARRAY_SIZE(g_mac_stats_string), - data); - p = hclge_comm_get_stats(&hdev->hw_stats.all_32_bit_stats, - g_all_32bit_stats_string, - ARRAY_SIZE(g_all_32bit_stats_string), - p); - p = hclge_comm_get_stats(&hdev->hw_stats.all_64_bit_stats, - g_all_64bit_stats_string, - ARRAY_SIZE(g_all_64bit_stats_string), - p); + p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string, + ARRAY_SIZE(g_mac_stats_string), data); p = hclge_tqps_get_stats(handle, p); } +static void hclge_get_mac_stat(struct hnae3_handle *handle, + struct hns3_mac_stats *mac_stats) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + hclge_update_stats(handle, NULL); + + mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num; + mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num; +} + static int hclge_parse_func_status(struct hclge_dev *hdev, struct hclge_func_status_cmd *status) { +#define HCLGE_MAC_ID_MASK 0xF + if (!(status->pf_state & HCLGE_PF_STATE_DONE)) return -EINVAL; @@ -880,11 +838,14 @@ static int hclge_parse_func_status(struct hclge_dev *hdev, else hdev->flag &= ~HCLGE_FLAG_MAIN; + hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK; return 0; } static int hclge_query_function_status(struct hclge_dev *hdev) { +#define HCLGE_QUERY_MAX_CNT 5 + struct hclge_func_status_cmd *req; struct hclge_desc desc; int timeout = 0; @@ -897,9 +858,7 @@ static int hclge_query_function_status(struct hclge_dev *hdev) ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, - "query function status failed %d.\n", - ret); - + "query function status failed %d.\n", ret); return ret; } @@ -907,11 +866,9 @@ static int hclge_query_function_status(struct hclge_dev *hdev) if (req->pf_state) break; usleep_range(1000, 2000); - } while (timeout++ < 5); - - ret = hclge_parse_func_status(hdev, req); + } while (timeout++ < HCLGE_QUERY_MAX_CNT); - return ret; + return hclge_parse_func_status(hdev, req); } static int hclge_query_pf_resource(struct hclge_dev *hdev) @@ -929,26 +886,54 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev) } req = (struct hclge_pf_res_cmd *)desc.data; - hdev->num_tqps = __le16_to_cpu(req->tqp_num); - hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; + hdev->num_tqps = le16_to_cpu(req->tqp_num); + hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; + + if (req->tx_buf_size) + hdev->tx_buf_size = + le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S; + else + hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF; + + hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT); + + if (req->dv_buf_size) + hdev->dv_buf_size = + le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S; + else + hdev->dv_buf_size = HCLGE_DEFAULT_DV; + + hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT); if (hnae3_dev_roce_supported(hdev)) { hdev->roce_base_msix_offset = - hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), + hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee), HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S); hdev->num_roce_msi = - hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number), + hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number), HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); + /* nic's msix numbers is always equals to the roce's. */ + hdev->num_nic_msi = hdev->num_roce_msi; + /* PF should have NIC vectors and Roce vectors, * NIC vectors are queued before Roce vectors. */ - hdev->num_msi = hdev->num_roce_msi + + hdev->num_msi = hdev->num_roce_msi + hdev->roce_base_msix_offset; } else { hdev->num_msi = - hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number), + hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number), HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); + + hdev->num_nic_msi = hdev->num_msi; + } + + if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) { + dev_err(&hdev->pdev->dev, + "Just %u msi resources, not enough for pf(min:2).\n", + hdev->num_nic_msi); + return -EINVAL; } return 0; @@ -988,58 +973,300 @@ static int hclge_parse_speed(int speed_cmd, int *speed) return 0; } +static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u32 speed_ability = hdev->hw.mac.speed_ability; + u32 speed_bit = 0; + + switch (speed) { + case HCLGE_MAC_SPEED_10M: + speed_bit = HCLGE_SUPPORT_10M_BIT; + break; + case HCLGE_MAC_SPEED_100M: + speed_bit = HCLGE_SUPPORT_100M_BIT; + break; + case HCLGE_MAC_SPEED_1G: + speed_bit = HCLGE_SUPPORT_1G_BIT; + break; + case HCLGE_MAC_SPEED_10G: + speed_bit = HCLGE_SUPPORT_10G_BIT; + break; + case HCLGE_MAC_SPEED_25G: + speed_bit = HCLGE_SUPPORT_25G_BIT; + break; + case HCLGE_MAC_SPEED_40G: + speed_bit = HCLGE_SUPPORT_40G_BIT; + break; + case HCLGE_MAC_SPEED_50G: + speed_bit = HCLGE_SUPPORT_50G_BIT; + break; + case HCLGE_MAC_SPEED_100G: + speed_bit = HCLGE_SUPPORT_100G_BIT; + break; + default: + return -EINVAL; + } + + if (speed_bit & speed_ability) + return 0; + + return -EINVAL; +} + +#ifdef HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE +static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability) +{ + if (speed_ability & HCLGE_SUPPORT_10G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + mac->supported); + if (speed_ability & HCLGE_SUPPORT_25G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, + mac->supported); + if (speed_ability & HCLGE_SUPPORT_40G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, + mac->supported); + if (speed_ability & HCLGE_SUPPORT_50G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, + mac->supported); + if (speed_ability & HCLGE_SUPPORT_100G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + mac->supported); +} + +static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability) +{ + if (speed_ability & HCLGE_SUPPORT_10G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, + mac->supported); + if (speed_ability & HCLGE_SUPPORT_25G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, + mac->supported); + if (speed_ability & HCLGE_SUPPORT_40G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, + mac->supported); + if (speed_ability & HCLGE_SUPPORT_100G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, + mac->supported); +} + +static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability) +{ + if (speed_ability & HCLGE_SUPPORT_10G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, + mac->supported); + if (speed_ability & HCLGE_SUPPORT_25G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + mac->supported); + if (speed_ability & HCLGE_SUPPORT_40G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, + mac->supported); + if (speed_ability & HCLGE_SUPPORT_50G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, + mac->supported); + if (speed_ability & HCLGE_SUPPORT_100G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, + mac->supported); +} + +static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability) +{ + if (speed_ability & HCLGE_SUPPORT_1G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + mac->supported); + if (speed_ability & HCLGE_SUPPORT_10G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + mac->supported); + if (speed_ability & HCLGE_SUPPORT_25G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, + mac->supported); + if (speed_ability & HCLGE_SUPPORT_40G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, + mac->supported); + if (speed_ability & HCLGE_SUPPORT_50G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, + mac->supported); + if (speed_ability & HCLGE_SUPPORT_100G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, + mac->supported); +} + +static void hclge_convert_setting_fec(struct hclge_mac *mac) +{ + linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported); + linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported); + + switch (mac->speed) { + case HCLGE_MAC_SPEED_10G: + case HCLGE_MAC_SPEED_40G: + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, + mac->supported); + mac->fec_ability = + BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO); + break; + case HCLGE_MAC_SPEED_25G: + case HCLGE_MAC_SPEED_50G: + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, + mac->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, + mac->supported); + mac->fec_ability = + BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) | + BIT(HNAE3_FEC_AUTO); + break; + case HCLGE_MAC_SPEED_100G: + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported); + mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO); + break; + default: + mac->fec_ability = 0; + break; + } +} +#endif + static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, u8 speed_ability) { +#ifdef HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE + struct hclge_mac *mac = &hdev->hw.mac; + + if (speed_ability & HCLGE_SUPPORT_1G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, + mac->supported); + + hclge_convert_setting_sr(mac, speed_ability); + hclge_convert_setting_lr(mac, speed_ability); + hclge_convert_setting_cr(mac, speed_ability); + if (hdev->pdev->revision >= 0x21) + hclge_convert_setting_fec(mac); + + linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); +#else unsigned long *supported = hdev->hw.mac.supported; if (speed_ability & HCLGE_SUPPORT_1G_BIT) - set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, - supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + supported); if (speed_ability & HCLGE_SUPPORT_10G_BIT) - set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, - supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + supported); - if (speed_ability & HCLGE_SUPPORT_25G_BIT) - set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, - supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); +#endif +} - if (speed_ability & HCLGE_SUPPORT_50G_BIT) - set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, - supported); +static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev, + u8 speed_ability) +{ + struct hclge_mac *mac = &hdev->hw.mac; - if (speed_ability & HCLGE_SUPPORT_100G_BIT) - set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, - supported); +#ifdef HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE + hclge_convert_setting_kr(mac, speed_ability); + if (hdev->pdev->revision >= 0x21) + hclge_convert_setting_fec(mac); +#else + if (speed_ability & HCLGE_SUPPORT_1G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + mac->supported); + if (speed_ability & HCLGE_SUPPORT_10G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + mac->supported); +#endif + linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); +} + +static void hclge_parse_copper_link_mode(struct hclge_dev *hdev, + u8 speed_ability) +{ + unsigned long *supported = hdev->hw.mac.supported; + + /* default to support all speed for GE port */ + if (!speed_ability) + speed_ability = HCLGE_SUPPORT_GE; + + if (speed_ability & HCLGE_SUPPORT_1G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + supported); + + if (speed_ability & HCLGE_SUPPORT_100M_BIT) { + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + supported); + } - set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported); - set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); + if (speed_ability & HCLGE_SUPPORT_10M_BIT) { + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported); + } + + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported); } static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability) { u8 media_type = hdev->hw.mac.media_type; - if (media_type != HNAE3_MEDIA_TYPE_FIBER) - return; - - hclge_parse_fiber_link_mode(hdev, speed_ability); + if (media_type == HNAE3_MEDIA_TYPE_FIBER) + hclge_parse_fiber_link_mode(hdev, speed_ability); + else if (media_type == HNAE3_MEDIA_TYPE_COPPER) + hclge_parse_copper_link_mode(hdev, speed_ability); + else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE) + hclge_parse_backplane_link_mode(hdev, speed_ability); } -static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) +static u32 hclge_get_max_speed(u8 speed_ability) { - struct hclge_cfg_param_cmd *req; - u64 mac_addr_tmp_high; - u64 mac_addr_tmp; - int i; + if (speed_ability & HCLGE_SUPPORT_100G_BIT) + return HCLGE_MAC_SPEED_100G; - req = (struct hclge_cfg_param_cmd *)desc[0].data; + if (speed_ability & HCLGE_SUPPORT_50G_BIT) + return HCLGE_MAC_SPEED_50G; - /* get the configuration */ - cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]), - HCLGE_CFG_VMDQ_M, - HCLGE_CFG_VMDQ_S); + if (speed_ability & HCLGE_SUPPORT_40G_BIT) + return HCLGE_MAC_SPEED_40G; + + if (speed_ability & HCLGE_SUPPORT_25G_BIT) + return HCLGE_MAC_SPEED_25G; + + if (speed_ability & HCLGE_SUPPORT_10G_BIT) + return HCLGE_MAC_SPEED_10G; + + if (speed_ability & HCLGE_SUPPORT_1G_BIT) + return HCLGE_MAC_SPEED_1G; + + if (speed_ability & HCLGE_SUPPORT_100M_BIT) + return HCLGE_MAC_SPEED_100M; + + if (speed_ability & HCLGE_SUPPORT_10M_BIT) + return HCLGE_MAC_SPEED_10M; + + return HCLGE_MAC_SPEED_1G; +} + +static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) +{ + struct hclge_cfg_param_cmd *req; + u64 mac_addr_tmp_high; + u64 mac_addr_tmp; + unsigned int i; + + req = (struct hclge_cfg_param_cmd *)desc[0].data; + + /* get the configuration */ + cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]), + HCLGE_CFG_VMDQ_M, + HCLGE_CFG_VMDQ_S); cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), @@ -1079,6 +1306,16 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]), HCLGE_CFG_SPEED_ABILITY_M, HCLGE_CFG_SPEED_ABILITY_S); + + cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_VLAN_FLTR_CAP_M, + HCLGE_CFG_VLAN_FLTR_CAP_S); + + cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_UMV_TBL_SPACE_M, + HCLGE_CFG_UMV_TBL_SPACE_S); + if (!cfg->umv_space) + cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF; } /* hclge_get_cfg: query the static parameter from flash @@ -1089,7 +1326,8 @@ static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) { struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM]; struct hclge_cfg_param_cmd *req; - int i, ret; + unsigned int i; + int ret; for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) { u32 offset = 0; @@ -1128,17 +1366,33 @@ static int hclge_get_cap(struct hclge_dev *hdev) } /* get pf resource */ - ret = hclge_query_pf_resource(hdev); - if (ret) - dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret); + return hclge_query_pf_resource(hdev); +} - return ret; +static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev) +{ +#define HCLGE_MIN_TX_DESC 64 +#define HCLGE_MIN_RX_DESC 64 + + if (!is_kdump_kernel()) + return; + + dev_info(&hdev->pdev->dev, + "Running kdump kernel. Using minimal resources\n"); + + /* minimal queue pairs equals to the number of vports */ + hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; + hdev->num_tx_desc = HCLGE_MIN_TX_DESC; + hdev->num_rx_desc = HCLGE_MIN_RX_DESC; } static int hclge_configure(struct hclge_dev *hdev) { + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + const struct cpumask *cpumask = cpu_online_mask; struct hclge_cfg cfg; - int ret, i; + unsigned int i; + int node, ret; ret = hclge_get_cfg(hdev, &cfg); if (ret) { @@ -1153,22 +1407,35 @@ static int hclge_configure(struct hclge_dev *hdev) ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); hdev->hw.mac.media_type = cfg.media_type; hdev->hw.mac.phy_addr = cfg.phy_addr; - hdev->num_desc = cfg.tqp_desc_num; + hdev->num_tx_desc = cfg.tqp_desc_num; + hdev->num_rx_desc = cfg.tqp_desc_num; hdev->tm_info.num_pg = 1; hdev->tc_max = cfg.tc_num; hdev->tm_info.hw_pfc_map = 0; + hdev->wanted_umv_size = cfg.umv_space; + if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF) + hnae3_set_bit(ae_dev->flag, + HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, 1); + + if (hnae3_dev_fd_supported(hdev)) { + hdev->fd_en = true; + hdev->fd_active_type = HCLGE_FD_RULE_NONE; + } ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); if (ret) { - dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret); + dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n", + cfg.default_speed, ret); return ret; } hclge_parse_link_mode(hdev, cfg.speed_ability); + hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability); + if ((hdev->tc_max > HNAE3_MAX_TC) || (hdev->tc_max < 1)) { - dev_warn(&hdev->pdev->dev, "TC num = %d.\n", + dev_warn(&hdev->pdev->dev, "TC num = %u.\n", hdev->tc_max); hdev->tc_max = 1; } @@ -1181,7 +1448,7 @@ static int hclge_configure(struct hclge_dev *hdev) hdev->pfc_max = hdev->tc_max; } - hdev->tm_info.num_tc = hdev->tc_max; + hdev->tm_info.num_tc = 1; /* Currently not support uncontiuous tc */ for (i = 0; i < hdev->tm_info.num_tc; i++) @@ -1189,11 +1456,20 @@ static int hclge_configure(struct hclge_dev *hdev) hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; + hclge_init_kdump_kernel_config(hdev); + + /* Set the affinity based on numa node */ + node = dev_to_node(&hdev->pdev->dev); + if (node != NUMA_NO_NODE) + cpumask = cpumask_of_node(node); + + cpumask_copy(&hdev->affinity_mask, cpumask); + return ret; } -static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min, - int tso_mss_max) +static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min, + unsigned int tso_mss_max) { struct hclge_cfg_tso_status_cmd *req; struct hclge_desc desc; @@ -1216,6 +1492,28 @@ static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min, return hclge_cmd_send(&hdev->hw, &desc, 1); } +static int hclge_config_gro(struct hclge_dev *hdev, bool en) +{ + struct hclge_cfg_gro_status_cmd *req; + struct hclge_desc desc; + int ret; + + if (!hnae3_dev_gro_supported(hdev)) + return 0; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false); + req = (struct hclge_cfg_gro_status_cmd *)desc.data; + + req->gro_en = cpu_to_le16(en ? 1 : 0); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "GRO hardware config cmd failed, ret = %d\n", ret); + + return ret; +} + static int hclge_alloc_tqps(struct hclge_dev *hdev) { struct hclge_tqp *tqp; @@ -1234,7 +1532,8 @@ static int hclge_alloc_tqps(struct hclge_dev *hdev) tqp->q.ae_algo = &ae_algo; tqp->q.buf_size = hdev->rx_buf_len; - tqp->q.desc_num = hdev->num_desc; + tqp->q.tx_desc_num = hdev->num_tx_desc; + tqp->q.rx_desc_num = hdev->num_rx_desc; tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET + i * HCLGE_TQP_REG_SIZE; @@ -1256,8 +1555,9 @@ static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, req = (struct hclge_tqp_map_cmd *)desc.data; req->tqp_id = cpu_to_le16(tqp_pid); req->tqp_vf = func_id; - req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B | - 1 << HCLGE_TQP_MAP_EN_B; + req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B; + if (!is_pf) + req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B; req->tqp_vid = cpu_to_le16(tqp_vid); ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -1267,64 +1567,55 @@ static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, return ret; } -static int hclge_assign_tqp(struct hclge_vport *vport) +static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps) { struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; struct hclge_dev *hdev = vport->back; int i, alloced; for (i = 0, alloced = 0; i < hdev->num_tqps && - alloced < kinfo->num_tqps; i++) { + alloced < num_tqps; i++) { if (!hdev->htqp[i].alloced) { hdev->htqp[i].q.handle = &vport->nic; hdev->htqp[i].q.tqp_index = alloced; - hdev->htqp[i].q.desc_num = kinfo->num_desc; + hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc; + hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc; kinfo->tqp[alloced] = &hdev->htqp[i].q; hdev->htqp[i].alloced = true; alloced++; } } - vport->alloc_tqps = kinfo->num_tqps; + vport->alloc_tqps = alloced; + kinfo->rss_size = min_t(u16, hdev->rss_size_max, + vport->alloc_tqps / hdev->tm_info.num_tc); + + /* ensure one to one mapping between irq and queue at default */ + kinfo->rss_size = min_t(u16, kinfo->rss_size, + (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc); return 0; } -static int hclge_knic_setup(struct hclge_vport *vport, - u16 num_tqps, u16 num_desc) +static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps, + u16 num_tx_desc, u16 num_rx_desc) + { struct hnae3_handle *nic = &vport->nic; struct hnae3_knic_private_info *kinfo = &nic->kinfo; struct hclge_dev *hdev = vport->back; - int i, ret; + int ret; - kinfo->num_desc = num_desc; - kinfo->rx_buf_len = hdev->rx_buf_len; - kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc); - kinfo->rss_size - = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc); - kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc; + kinfo->num_tx_desc = num_tx_desc; + kinfo->num_rx_desc = num_rx_desc; - for (i = 0; i < HNAE3_MAX_TC; i++) { - if (hdev->hw_tc_map & BIT(i)) { - kinfo->tc_info[i].enable = true; - kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; - kinfo->tc_info[i].tqp_count = kinfo->rss_size; - kinfo->tc_info[i].tc = i; - } else { - /* Set to default queue if TC is disable */ - kinfo->tc_info[i].enable = false; - kinfo->tc_info[i].tqp_offset = 0; - kinfo->tc_info[i].tqp_count = 1; - kinfo->tc_info[i].tc = 0; - } - } + kinfo->rx_buf_len = hdev->rx_buf_len; - kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, + kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps, sizeof(struct hnae3_queue *), GFP_KERNEL); if (!kinfo->tqp) return -ENOMEM; - ret = hclge_assign_tqp(vport); + ret = hclge_assign_tqp(vport, num_tqps); if (ret) dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); @@ -1339,7 +1630,7 @@ static int hclge_map_tqp_to_vport(struct hclge_dev *hdev, u16 i; kinfo = &nic->kinfo; - for (i = 0; i < kinfo->num_tqps; i++) { + for (i = 0; i < vport->alloc_tqps; i++) { struct hclge_tqp *q = container_of(kinfo->tqp[i], struct hclge_tqp, q); bool is_pf; @@ -1361,7 +1652,7 @@ static int hclge_map_tqp(struct hclge_dev *hdev) u16 i, num_vport; num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; - for (i = 0; i < num_vport; i++) { + for (i = 0; i < num_vport; i++) { int ret; ret = hclge_map_tqp_to_vport(hdev, vport); @@ -1374,11 +1665,6 @@ static int hclge_map_tqp(struct hclge_dev *hdev) return 0; } -static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps) -{ - /* this would be initialized later */ -} - static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps) { struct hnae3_handle *nic = &vport->nic; @@ -1389,18 +1675,12 @@ static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps) nic->ae_algo = &ae_algo; nic->numa_node_mask = hdev->numa_node_mask; - if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) { - ret = hclge_knic_setup(vport, num_tqps, hdev->num_desc); - if (ret) { - dev_err(&hdev->pdev->dev, "knic setup failed %d\n", - ret); - return ret; - } - } else { - hclge_unic_setup(vport, num_tqps); - } + ret = hclge_knic_setup(vport, num_tqps, + hdev->num_tx_desc, hdev->num_rx_desc); + if (ret) + dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret); - return 0; + return ret; } static int hclge_alloc_vport(struct hclge_dev *hdev) @@ -1416,7 +1696,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; if (hdev->num_tqps < num_vport) { - dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)", + dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)", hdev->num_tqps, num_vport); return -EINVAL; } @@ -1439,6 +1719,16 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) for (i = 0; i < num_vport; i++) { vport->back = hdev; vport->vport_id = i; + vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO; + vport->mps = HCLGE_MAC_DEFAULT_FRAME; + vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE; + vport->port_base_vlan_cfg.tbl_sta = true; + vport->rxvlan_cfg.rx_vlan_offload_en = true; + vport->req_vlan_fltr_en = true; + INIT_LIST_HEAD(&vport->vlan_list); + INIT_LIST_HEAD(&vport->uc_mac_list); + INIT_LIST_HEAD(&vport->mc_mac_list); + spin_lock_init(&vport->mac_list_lock); if (i == 0) ret = hclge_vport_setup(vport, tqp_main_vport); @@ -1471,7 +1761,7 @@ static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, req = (struct hclge_tx_buff_alloc_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0); - for (i = 0; i < HCLGE_TC_NUM; i++) { + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size; req->tx_pkt_buff[i] = @@ -1498,9 +1788,10 @@ static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, return ret; } -static int hclge_get_tc_num(struct hclge_dev *hdev) +static u32 hclge_get_tc_num(struct hclge_dev *hdev) { - int i, cnt = 0; + unsigned int i; + u32 cnt = 0; for (i = 0; i < HCLGE_MAX_TC_NUM; i++) if (hdev->hw_tc_map & BIT(i)) @@ -1508,23 +1799,13 @@ static int hclge_get_tc_num(struct hclge_dev *hdev) return cnt; } -static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev) -{ - int i, cnt = 0; - - for (i = 0; i < HCLGE_MAX_TC_NUM; i++) - if (hdev->hw_tc_map & BIT(i) && - hdev->tm_info.hw_pfc_map & BIT(i)) - cnt++; - return cnt; -} - /* Get the number of pfc enabled TCs, which have private buffer */ static int hclge_get_pfc_priv_num(struct hclge_dev *hdev, struct hclge_pkt_buf_alloc *buf_alloc) { struct hclge_priv_buf *priv; - int i, cnt = 0; + unsigned int i; + int cnt = 0; for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { priv = &buf_alloc->priv_buf[i]; @@ -1541,7 +1822,8 @@ static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev, struct hclge_pkt_buf_alloc *buf_alloc) { struct hclge_priv_buf *priv; - int i, cnt = 0; + unsigned int i; + int cnt = 0; for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { priv = &buf_alloc->priv_buf[i]; @@ -1582,43 +1864,63 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, struct hclge_pkt_buf_alloc *buf_alloc, u32 rx_all) { - u32 shared_buf_min, shared_buf_tc, shared_std; - int tc_num, pfc_enable_num; - u32 shared_buf; + u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd; + u32 tc_num = hclge_get_tc_num(hdev); + u32 shared_buf, aligned_mps; u32 rx_priv; int i; - tc_num = hclge_get_tc_num(hdev); - pfc_enable_num = hclge_get_pfc_enalbe_num(hdev); + aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT); if (hnae3_dev_dcb_supported(hdev)) - shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV; + shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps + + hdev->dv_buf_size; else - shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV; + shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF + + hdev->dv_buf_size; - shared_buf_tc = pfc_enable_num * hdev->mps + - (tc_num - pfc_enable_num) * hdev->mps / 2 + - hdev->mps; - shared_std = max_t(u32, shared_buf_min, shared_buf_tc); + shared_buf_tc = tc_num * aligned_mps + aligned_mps; + shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc), + HCLGE_BUF_SIZE_UNIT); rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc); - if (rx_all <= rx_priv + shared_std) + if (rx_all < rx_priv + shared_std) return false; - shared_buf = rx_all - rx_priv; + shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT); buf_alloc->s_buf.buf_size = shared_buf; - buf_alloc->s_buf.self.high = shared_buf; - buf_alloc->s_buf.self.low = 2 * hdev->mps; + if (hnae3_dev_dcb_supported(hdev)) { + buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size; + buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high + - roundup(aligned_mps / HCLGE_BUF_DIV_BY, + HCLGE_BUF_SIZE_UNIT); + } else { + buf_alloc->s_buf.self.high = aligned_mps + + HCLGE_NON_DCB_ADDITIONAL_BUF; + buf_alloc->s_buf.self.low = aligned_mps; + } + + if (hnae3_dev_dcb_supported(hdev)) { + hi_thrd = shared_buf - hdev->dv_buf_size; + + if (tc_num <= NEED_RESERVE_TC_NUM) + hi_thrd = hi_thrd * BUF_RESERVE_PERCENT + / BUF_MAX_PERCENT; + + if (tc_num) + hi_thrd = hi_thrd / tc_num; + + hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps); + hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT); + lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY; + } else { + hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF; + lo_thrd = aligned_mps; + } for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { - if ((hdev->hw_tc_map & BIT(i)) && - (hdev->tm_info.hw_pfc_map & BIT(i))) { - buf_alloc->s_buf.tc_thrd[i].low = hdev->mps; - buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps; - } else { - buf_alloc->s_buf.tc_thrd[i].low = 0; - buf_alloc->s_buf.tc_thrd[i].high = hdev->mps; - } + buf_alloc->s_buf.tc_thrd[i].low = lo_thrd; + buf_alloc->s_buf.tc_thrd[i].high = hi_thrd; } return true; @@ -1635,13 +1937,14 @@ static int hclge_tx_buffer_calc(struct hclge_dev *hdev, for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; - if (total_size < HCLGE_DEFAULT_TX_BUF) - return -ENOMEM; + if (hdev->hw_tc_map & BIT(i)) { + if (total_size < hdev->tx_buf_size) + return -ENOMEM; - if (hdev->hw_tc_map & BIT(i)) - priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF; - else + priv->tx_buf_size = hdev->tx_buf_size; + } else { priv->tx_buf_size = 0; + } total_size -= priv->tx_buf_size; } @@ -1649,62 +1952,15 @@ static int hclge_tx_buffer_calc(struct hclge_dev *hdev, return 0; } -/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs - * @hdev: pointer to struct hclge_dev - * @buf_alloc: pointer to buffer calculation data - * @return: 0: calculate sucessful, negative: fail - */ -static int hclge_rx_buffer_calc(struct hclge_dev *hdev, - struct hclge_pkt_buf_alloc *buf_alloc) +static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max, + struct hclge_pkt_buf_alloc *buf_alloc) { - u32 rx_all = hdev->pkt_buf_size; - int no_pfc_priv_num, pfc_priv_num; - struct hclge_priv_buf *priv; - int i; - - rx_all -= hclge_get_tx_buff_alloced(buf_alloc); - - /* When DCB is not supported, rx private - * buffer is not allocated. - */ - if (!hnae3_dev_dcb_supported(hdev)) { - if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) - return -ENOMEM; - - return 0; - } - - /* step 1, try to alloc private buffer for all enabled tc */ - for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { - priv = &buf_alloc->priv_buf[i]; - if (hdev->hw_tc_map & BIT(i)) { - priv->enable = 1; - if (hdev->tm_info.hw_pfc_map & BIT(i)) { - priv->wl.low = hdev->mps; - priv->wl.high = priv->wl.low + hdev->mps; - priv->buf_size = priv->wl.high + - HCLGE_DEFAULT_DV; - } else { - priv->wl.low = 0; - priv->wl.high = 2 * hdev->mps; - priv->buf_size = priv->wl.high; - } - } else { - priv->enable = 0; - priv->wl.low = 0; - priv->wl.high = 0; - priv->buf_size = 0; - } - } - - if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) - return 0; + u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); + u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT); + unsigned int i; - /* step 2, try to decrease the buffer size of - * no pfc TC's private buffer - */ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { - priv = &buf_alloc->priv_buf[i]; + struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; priv->enable = 0; priv->wl.low = 0; @@ -1717,31 +1973,35 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev, priv->enable = 1; if (hdev->tm_info.hw_pfc_map & BIT(i)) { - priv->wl.low = 128; - priv->wl.high = priv->wl.low + hdev->mps; - priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV; + priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT; + priv->wl.high = roundup(priv->wl.low + aligned_mps, + HCLGE_BUF_SIZE_UNIT); } else { priv->wl.low = 0; - priv->wl.high = hdev->mps; - priv->buf_size = priv->wl.high; + priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) : + aligned_mps; } + + priv->buf_size = priv->wl.high + hdev->dv_buf_size; } - if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) - return 0; + return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); +} - /* step 3, try to reduce the number of pfc disabled TCs, - * which have private buffer - */ - /* get the total no pfc enable TC number, which have private buffer */ - no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); +static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ + u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); + int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); + int i; /* let the last to be cleared first */ for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { - priv = &buf_alloc->priv_buf[i]; + struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; + unsigned int mask = BIT((unsigned int)i); - if (hdev->hw_tc_map & BIT(i) && - !(hdev->tm_info.hw_pfc_map & BIT(i))) { + if (hdev->hw_tc_map & mask && + !(hdev->tm_info.hw_pfc_map & mask)) { /* Clear the no pfc TC private buffer */ priv->wl.low = 0; priv->wl.high = 0; @@ -1755,20 +2015,23 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev, break; } - if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) - return 0; + return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); +} - /* step 4, try to reduce the number of pfc enabled TCs - * which have private buffer. - */ - pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); +static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ + u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); + int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); + int i; /* let the last to be cleared first */ for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { - priv = &buf_alloc->priv_buf[i]; + struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; + unsigned int mask = BIT((unsigned int)i); - if (hdev->hw_tc_map & BIT(i) && - hdev->tm_info.hw_pfc_map & BIT(i)) { + if (hdev->hw_tc_map & mask && + hdev->tm_info.hw_pfc_map & mask) { /* Reduce the number of pfc TC with private buffer */ priv->wl.low = 0; priv->enable = 0; @@ -1781,7 +2044,92 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev, pfc_priv_num == 0) break; } - if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) + + return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); +} + +static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ +#define COMPENSATE_BUFFER 0x3C00 +#define COMPENSATE_HALF_MPS_NUM 5 +#define PRIV_WL_GAP 0x1800 + + u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); + u32 tc_num = hclge_get_tc_num(hdev); + u32 half_mps = hdev->mps >> 1; + u32 min_rx_priv; + unsigned int i; + + if (tc_num) + rx_priv = rx_priv / tc_num; + + if (tc_num <= NEED_RESERVE_TC_NUM) + rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT; + + min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER + + COMPENSATE_HALF_MPS_NUM * half_mps; + min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT); + rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT); + + if (rx_priv < min_rx_priv) + return false; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; + + priv->enable = 0; + priv->wl.low = 0; + priv->wl.high = 0; + priv->buf_size = 0; + + if (!(hdev->hw_tc_map & BIT(i))) + continue; + + priv->enable = 1; + priv->buf_size = rx_priv; + priv->wl.high = rx_priv - hdev->dv_buf_size; + priv->wl.low = priv->wl.high - PRIV_WL_GAP; + } + + buf_alloc->s_buf.buf_size = 0; + + return true; +} + +/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs + * @hdev: pointer to struct hclge_dev + * @buf_alloc: pointer to buffer calculation data + * @return: 0: calculate sucessful, negative: fail + */ +static int hclge_rx_buffer_calc(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ + /* When DCB is not supported, rx private buffer is not allocated. */ + if (!hnae3_dev_dcb_supported(hdev)) { + u32 rx_all = hdev->pkt_buf_size; + + rx_all -= hclge_get_tx_buff_alloced(buf_alloc); + if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) + return -ENOMEM; + + return 0; + } + + if (hclge_only_alloc_priv_buff(hdev, buf_alloc)) + return 0; + + if (hclge_rx_buf_calc_all(hdev, true, buf_alloc)) + return 0; + + /* try to decrease the buffer size */ + if (hclge_rx_buf_calc_all(hdev, false, buf_alloc)) + return 0; + + if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc)) + return 0; + + if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc)) return 0; return -ENOMEM; @@ -2027,7 +2375,8 @@ static int hclge_init_msi(struct hclge_dev *hdev) int vectors; int i; - vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, + vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM, + hdev->num_msi, PCI_IRQ_MSI | PCI_IRQ_MSIX); if (vectors < 0) { dev_err(&pdev->dev, @@ -2037,11 +2386,12 @@ static int hclge_init_msi(struct hclge_dev *hdev) } if (vectors < hdev->num_msi) dev_warn(&hdev->pdev->dev, - "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", + "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", hdev->num_msi, vectors); hdev->num_msi = vectors; hdev->num_msi_left = vectors; + hdev->base_msi_vector = pdev->irq; hdev->roce_base_vector = hdev->base_msi_vector + hdev->roce_base_msix_offset; @@ -2066,19 +2416,16 @@ static int hclge_init_msi(struct hclge_dev *hdev) return 0; } -static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed) +static u8 hclge_check_speed_dup(u8 duplex, int speed) { - struct hclge_mac *mac = &hdev->hw.mac; - - if ((speed == HCLGE_MAC_SPEED_10M) || (speed == HCLGE_MAC_SPEED_100M)) - mac->duplex = (u8)duplex; - else - mac->duplex = HCLGE_MAC_FULL; + if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M)) + duplex = HCLGE_MAC_FULL; - mac->speed = speed; + return duplex; } -int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) +static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed, + u8 duplex) { struct hclge_config_mac_speed_dup_cmd *req; struct hclge_desc desc; @@ -2088,7 +2435,8 @@ int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false); - hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex); + if (duplex) + hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1); switch (speed) { case HCLGE_MAC_SPEED_10M: @@ -2138,49 +2486,36 @@ int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) return ret; } - hclge_check_speed_dup(hdev, duplex, speed); - return 0; } -static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed, - u8 duplex) -{ - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; - - return hclge_cfg_mac_speed_dup(hdev, speed, duplex); -} - -static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed, - u8 *duplex) +int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) { - struct hclge_query_an_speed_dup_cmd *req; - struct hclge_desc desc; - int speed_tmp; + struct hclge_mac *mac = &hdev->hw.mac; int ret; - req = (struct hclge_query_an_speed_dup_cmd *)desc.data; + duplex = hclge_check_speed_dup(duplex, speed); + if (!mac->support_autoneg && mac->speed == speed && + mac->duplex == duplex) + return 0; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true); - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { - dev_err(&hdev->pdev->dev, - "mac speed/autoneg/duplex query cmd failed %d\n", - ret); + ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex); + if (ret) return ret; - } - *duplex = hnae3_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B); - speed_tmp = hnae3_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M, - HCLGE_QUERY_SPEED_S); + hdev->hw.mac.speed = speed; + hdev->hw.mac.duplex = duplex; - ret = hclge_parse_speed(speed_tmp, speed); - if (ret) - dev_err(&hdev->pdev->dev, - "could not parse speed(=%d), %d\n", speed_tmp, ret); + return 0; +} - return ret; +static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed, + u8 duplex) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hclge_cfg_mac_speed_dup(hdev, speed, duplex); } static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) @@ -2193,7 +2528,8 @@ static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false); req = (struct hclge_config_auto_neg_cmd *)desc.data; - hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable); + if (enable) + hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U); req->cfg_an_cmd_flag = cpu_to_le32(flag); ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -2209,6 +2545,16 @@ static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable) struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; + if (!hdev->hw.mac.support_autoneg) { + if (enable) { + dev_err(&hdev->pdev->dev, + "autoneg is not supported by current port\n"); + return -EOPNOTSUPP; + } else { + return 0; + } + } + return hclge_set_autoneg_en(hdev, enable); } @@ -2224,121 +2570,159 @@ static int hclge_get_autoneg(struct hnae3_handle *handle) return hdev->hw.mac.autoneg; } -static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev, - bool mask_vlan, - u8 *mac_mask) +static int hclge_restart_autoneg(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int ret; + + dev_dbg(&hdev->pdev->dev, "restart autoneg\n"); + + ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) + return ret; + return hclge_notify_client(hdev, HNAE3_UP_CLIENT); +} + +static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt) { - struct hclge_mac_vlan_mask_entry_cmd *req; + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg) + return hclge_set_autoneg_en(hdev, !halt); + + return 0; +} + +static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode) +{ + struct hclge_config_fec_cmd *req; struct hclge_desc desc; - int status; + int ret; - req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false); + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false); - hnae3_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B, - mask_vlan ? 1 : 0); - ether_addr_copy(req->mac_mask, mac_mask); + req = (struct hclge_config_fec_cmd *)desc.data; + if (fec_mode & BIT(HNAE3_FEC_AUTO)) + hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1); + if (fec_mode & BIT(HNAE3_FEC_RS)) + hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M, + HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS); + if (fec_mode & BIT(HNAE3_FEC_BASER)) + hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M, + HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER); - status = hclge_cmd_send(&hdev->hw, &desc, 1); - if (status) - dev_err(&hdev->pdev->dev, - "Config mac_vlan_mask failed for cmd_send, ret =%d\n", - status); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret); - return status; + return ret; } -static int hclge_mac_init(struct hclge_dev *hdev) +static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode) { - struct hnae3_handle *handle = &hdev->vport[0].nic; - struct net_device *netdev = handle->kinfo.netdev; + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; struct hclge_mac *mac = &hdev->hw.mac; - u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; - struct hclge_vport *vport; - int mtu; int ret; - int i; - ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL); - if (ret) { - dev_err(&hdev->pdev->dev, - "Config mac speed dup fail ret=%d\n", ret); - return ret; + if (fec_mode && !(mac->fec_ability & fec_mode)) { + dev_err(&hdev->pdev->dev, "unsupported fec mode\n"); + return -EINVAL; } - mac->link = 0; + ret = hclge_set_fec_hw(hdev, fec_mode); + if (ret) + return ret; + + mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF); + return 0; +} - /* Initialize the MTA table work mode */ - hdev->enable_mta = true; - hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36; +static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability, + u8 *fec_mode) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_mac *mac = &hdev->hw.mac; - ret = hclge_set_mta_filter_mode(hdev, - hdev->mta_mac_sel_type, - hdev->enable_mta); - if (ret) { - dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n", - ret); + if (fec_ability) + *fec_ability = mac->fec_ability; + if (fec_mode) + *fec_mode = mac->fec_mode; +} + +static int hclge_mac_init(struct hclge_dev *hdev) +{ + struct hclge_mac *mac = &hdev->hw.mac; + int ret; + + hdev->support_sfp_query = true; + hdev->hw.mac.duplex = HCLGE_MAC_FULL; + ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed, + hdev->hw.mac.duplex); + if (ret) return ret; + + if (hdev->hw.mac.support_autoneg) { + ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg); + if (ret) + return ret; } - for (i = 0; i < hdev->num_alloc_vport; i++) { - vport = &hdev->vport[i]; - vport->accept_mta_mc = false; + mac->link = 0; - memset(vport->mta_shadow, 0, sizeof(vport->mta_shadow)); - ret = hclge_cfg_func_mta_filter(hdev, vport->vport_id, false); - if (ret) { - dev_err(&hdev->pdev->dev, - "set mta filter mode fail ret=%d\n", ret); + if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) { + ret = hclge_set_fec_hw(hdev, mac->user_fec_mode); + if (ret) return ret; - } } - ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask); + ret = hclge_set_mac_mtu(hdev, hdev->mps); if (ret) { - dev_err(&hdev->pdev->dev, - "set default mac_vlan_mask fail ret=%d\n", ret); + dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret); return ret; } - if (netdev) - mtu = netdev->mtu; - else - mtu = ETH_DATA_LEN; + ret = hclge_set_default_loopback(hdev); + if (ret) + return ret; - ret = hclge_set_mtu(handle, mtu); + ret = hclge_buffer_alloc(hdev); if (ret) dev_err(&hdev->pdev->dev, - "set mtu failed ret=%d\n", ret); + "allocate buffer fail, ret=%d\n", ret); return ret; } static void hclge_mbx_task_schedule(struct hclge_dev *hdev) { - if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) - schedule_work(&hdev->mbx_service_task); + if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && + !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) + mod_delayed_work(hclge_wq, &hdev->service_task, 0); } static void hclge_reset_task_schedule(struct hclge_dev *hdev) { - if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) - schedule_work(&hdev->rst_service_task); + if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && + test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) && + !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) + mod_delayed_work(hclge_wq, &hdev->service_task, 0); } -static void hclge_task_schedule(struct hclge_dev *hdev) +void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time) { - if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) && - !test_bit(HCLGE_STATE_REMOVING, &hdev->state) && - !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) - (void)schedule_work(&hdev->service_task); + if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && + !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) + mod_delayed_work(hclge_wq, &hdev->service_task, delay_time); } -static int hclge_get_mac_link_status(struct hclge_dev *hdev) +static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status) { struct hclge_link_status_cmd *req; struct hclge_desc desc; - int link_status; int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true); @@ -2350,89 +2734,230 @@ static int hclge_get_mac_link_status(struct hclge_dev *hdev) } req = (struct hclge_link_status_cmd *)desc.data; - link_status = req->status & HCLGE_LINK_STATUS_UP_M; + *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ? + HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN; - return !!link_status; + return 0; } -static int hclge_get_mac_phy_link(struct hclge_dev *hdev) +static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status) { - int mac_state; - int link_stat; + struct phy_device *phydev = hdev->hw.mac.phydev; - mac_state = hclge_get_mac_link_status(hdev); + *link_status = HCLGE_LINK_STATUS_DOWN; - if (hdev->hw.mac.phydev) { - if (!genphy_read_status(hdev->hw.mac.phydev)) - link_stat = mac_state & - hdev->hw.mac.phydev->link; - else - link_stat = 0; + if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) + return 0; - } else { - link_stat = mac_state; - } + if (phydev && (phydev->state != PHY_RUNNING || !phydev->link)) + return 0; + + return hclge_get_mac_link_status(hdev, link_status); +} + +static void hclge_push_link_status(struct hclge_dev *hdev) +{ + struct hclge_vport *vport; + int ret; + u16 i; + + for (i = 0; i < pci_num_vf(hdev->pdev); i++) { + vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM]; + + if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) || + vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO) + continue; - return !!link_stat; + ret = hclge_push_vf_link_status(vport); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to push link status to vf%u, ret=%d\n", + i, ret); + } + } } static void hclge_update_link_status(struct hclge_dev *hdev) { + struct hnae3_client *rclient = hdev->roce_client; struct hnae3_client *client = hdev->nic_client; + struct hnae3_handle *rhandle; struct hnae3_handle *handle; int state; + int ret; int i; if (!client) return; - state = hclge_get_mac_phy_link(hdev); + + if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state)) + return; + + ret = hclge_get_mac_phy_link(hdev, &state); + if (ret) { + clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); + return; + } + if (state != hdev->hw.mac.link) { + hdev->hw.mac.link = state; for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { handle = &hdev->vport[i].nic; client->ops->link_status_change(handle, state); + hclge_config_mac_tnl_int(hdev, state); + rhandle = &hdev->vport[i].roce; + if (rclient && rclient->ops->link_status_change) + rclient->ops->link_status_change(rhandle, + state); } - hdev->hw.mac.link = state; + + hclge_push_link_status(hdev); } + + clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); } -static int hclge_update_speed_duplex(struct hclge_dev *hdev) +static void hclge_update_port_capability(struct hclge_mac *mac) { - struct hclge_mac mac = hdev->hw.mac; - u8 duplex; - int speed; +#ifdef HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE + /* update fec ability by speed */ + hclge_convert_setting_fec(mac); +#endif + /* firmware can not identify back plane type, the media type + * read from configuration can help deal it + */ + if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE && + mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN) + mac->module_type = HNAE3_MODULE_TYPE_KR; + else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER) + mac->module_type = HNAE3_MODULE_TYPE_TP; + + if (mac->support_autoneg) { + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported); + linkmode_copy(mac->advertising, mac->supported); + } else { + linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + mac->supported); + linkmode_zero(mac->advertising); + } +} + +static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed) +{ + struct hclge_sfp_info_cmd *resp; + struct hclge_desc desc; int ret; - /* get the speed and duplex as autoneg'result from mac cmd when phy - * doesn't exit. - */ - if (mac.phydev || !mac.autoneg) - return 0; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true); + resp = (struct hclge_sfp_info_cmd *)desc.data; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret == -EOPNOTSUPP) { + dev_warn(&hdev->pdev->dev, + "IMP do not support get SFP speed %d\n", ret); + return ret; + } else if (ret) { + dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret); + return ret; + } - ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex); - if (ret) { - dev_err(&hdev->pdev->dev, - "mac autoneg/speed/duplex query failed %d\n", ret); + *speed = le32_to_cpu(resp->speed); + + return 0; +} + +static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac) +{ + struct hclge_sfp_info_cmd *resp; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true); + resp = (struct hclge_sfp_info_cmd *)desc.data; + + resp->query_type = QUERY_ACTIVE_SPEED; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret == -EOPNOTSUPP) { + dev_warn(&hdev->pdev->dev, + "IMP does not support get SFP info %d\n", ret); + return ret; + } else if (ret) { + dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret); return ret; } - if ((mac.speed != speed) || (mac.duplex != duplex)) { - ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex); - if (ret) { - dev_err(&hdev->pdev->dev, - "mac speed/duplex config failed %d\n", ret); - return ret; - } + /* In some case, mac speed get from IMP may be 0, it shouldn't be + * set to mac->speed. + */ + if (!le32_to_cpu(resp->speed)) + return 0; + + mac->speed = le32_to_cpu(resp->speed); + /* if resp->speed_ability is 0, it means it's an old version + * firmware, do not update these params + */ + if (resp->speed_ability) { + mac->module_type = le32_to_cpu(resp->module_type); + mac->speed_ability = le32_to_cpu(resp->speed_ability); + mac->autoneg = resp->autoneg; + mac->support_autoneg = resp->autoneg_ability; + mac->speed_type = QUERY_ACTIVE_SPEED; + if (!resp->active_fec) + mac->fec_mode = 0; + else + mac->fec_mode = BIT(resp->active_fec); + } else { + mac->speed_type = QUERY_SFP_SPEED; } return 0; } -static int hclge_update_speed_duplex_h(struct hnae3_handle *handle) +static int hclge_update_port_info(struct hclge_dev *hdev) { - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; + struct hclge_mac *mac = &hdev->hw.mac; + int speed; + int ret; - return hclge_update_speed_duplex(hdev); + /* get the port info from SFP cmd if not copper port */ + if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER) + return 0; + + /* if IMP does not support get SFP/qSFP info, return directly */ + if (!hdev->support_sfp_query) + return 0; + + if (hdev->pdev->revision >= 0x21) { + speed = mac->speed; + ret = hclge_get_sfp_info(hdev, mac); + } else { + speed = HCLGE_MAC_SPEED_UNKNOWN; + ret = hclge_get_sfp_speed(hdev, &speed); + } + + if (ret == -EOPNOTSUPP) { + hdev->support_sfp_query = false; + return ret; + } else if (ret) { + return ret; + } + + if (hdev->pdev->revision >= 0x21) { + if (mac->speed_type == QUERY_ACTIVE_SPEED) { + hclge_update_port_capability(mac); + if (mac->speed != speed) + (void)hclge_tm_port_shaper_cfg(hdev); + return 0; + } + return hclge_cfg_mac_speed_dup(hdev, mac->speed, + HCLGE_MAC_FULL); + } else { + if (speed == HCLGE_MAC_SPEED_UNKNOWN) + return 0; /* do nothing if no SFP */ + + /* must config full duplex for SFP */ + return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL); + } } static int hclge_get_status(struct hnae3_handle *handle) @@ -2445,59 +2970,118 @@ static int hclge_get_status(struct hnae3_handle *handle) return hdev->hw.mac.link; } -static void hclge_service_timer(struct timer_list *t) +static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf) { - struct hclge_dev *hdev = from_timer(hdev, t, service_timer); + if (!pci_num_vf(hdev->pdev)) { + dev_err(&hdev->pdev->dev, + "SRIOV is disabled, can not get vport(%d) info.\n", vf); + return NULL; + } + + if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) { + dev_err(&hdev->pdev->dev, + "vf id(%d) is out of range(0 <= vfid < %d)\n", + vf, pci_num_vf(hdev->pdev)); + return NULL; + } + + /* VF start from 1 in vport */ + vf += HCLGE_VF_VPORT_START_NUM; + return &hdev->vport[vf]; +} + +static int hclge_get_vf_config(struct hnae3_handle *handle, int vf, + struct ifla_vf_info *ivf) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + vport = hclge_get_vf_vport(hdev, vf); + if (!vport) + return -EINVAL; - mod_timer(&hdev->service_timer, jiffies + HZ); - hdev->hw_stats.stats_timer++; - hclge_task_schedule(hdev); + ivf->vf = vf; + ivf->linkstate = vport->vf_info.link_state; + ivf->spoofchk = vport->vf_info.spoofchk; + ivf->trusted = vport->vf_info.trusted; + ivf->min_tx_rate = 0; + ivf->max_tx_rate = vport->vf_info.max_tx_rate; + ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag; + ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto); + ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos; + ether_addr_copy(ivf->mac, vport->vf_info.mac); + + return 0; } -static void hclge_service_complete(struct hclge_dev *hdev) +static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf, + int link_state) { - WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)); + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int link_state_old; + int ret; + + vport = hclge_get_vf_vport(hdev, vf); + if (!vport) + return -EINVAL; + + link_state_old = vport->vf_info.link_state; + vport->vf_info.link_state = link_state; + + if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) + return 0; + + ret = hclge_push_vf_link_status(vport); + if (ret) { + vport->vf_info.link_state = link_state_old; + dev_err(&hdev->pdev->dev, + "failed to push vf%d link status, ret = %d\n", vf, ret); + } - /* Flush memory before next watchdog */ - smp_mb__before_atomic(); - clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); + return ret; } static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) { - u32 rst_src_reg; - u32 cmdq_src_reg; + u32 rst_src_reg, cmdq_src_reg, msix_src_reg; /* fetch the events from their corresponding regs */ rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); + msix_src_reg = hclge_read_dev(&hdev->hw, + HCLGE_VECTOR0_PF_OTHER_INT_STS_REG); /* Assumption: If by any chance reset and mailbox events are reported * together then we will only process reset event in this go and will * defer the processing of the mailbox events. Since, we would have not * cleared RX CMDQ event this time we would receive again another * interrupt from H/W just for the mailbox. + * + * check for vector0 reset event sources */ - - /* check for vector0 reset event sources */ - if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) { + if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) { + dev_info(&hdev->pdev->dev, "IMP reset interrupt\n"); + set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); - set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); - *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); + *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); + hdev->rst_stats.imp_rst_cnt++; return HCLGE_VECTOR0_EVENT_RST; } - if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) { + if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) { + dev_info(&hdev->pdev->dev, "global reset interrupt\n"); set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); - set_bit(HNAE3_CORE_RESET, &hdev->reset_pending); - *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); + set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); + *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); + hdev->rst_stats.global_rst_cnt++; return HCLGE_VECTOR0_EVENT_RST; } - if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) { - set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); - *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); - return HCLGE_VECTOR0_EVENT_RST; + /* check for vector0 msix event source */ + if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) { + *clearval = msix_src_reg; + return HCLGE_VECTOR0_EVENT_ERR; } /* check for vector0 mailbox(=CMDQ RX) event source */ @@ -2507,6 +3091,12 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) return HCLGE_VECTOR0_EVENT_MBX; } + /* print other vector0 event source */ + dev_info(&hdev->pdev->dev, + "CMDQ INT status:0x%x, other INT status:0x%x\n", + cmdq_src_reg, msix_src_reg); + *clearval = msix_src_reg; + return HCLGE_VECTOR0_EVENT_OTHER; } @@ -2520,6 +3110,8 @@ static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, case HCLGE_VECTOR0_EVENT_MBX: hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr); break; + default: + break; } } @@ -2540,14 +3132,27 @@ static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable) static irqreturn_t hclge_misc_irq_handle(int irq, void *data) { struct hclge_dev *hdev = data; + u32 clearval = 0; u32 event_cause; - u32 clearval; hclge_enable_vector(&hdev->misc_vector, false); event_cause = hclge_check_event_cause(hdev, &clearval); /* vector 0 interrupt is shared with reset and mailbox source events.*/ switch (event_cause) { + case HCLGE_VECTOR0_EVENT_ERR: + /* we do not know what type of reset is required now. This could + * only be decided after we fetch the type of errors which + * caused this event. Therefore, we will do below for now: + * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we + * have defered type of reset to be used. + * 2. Schedule the reset serivce task. + * 3. When service task receives HNAE3_UNKNOWN_RESET type it + * will fetch the correct type of reset. This would be done + * by first decoding the types of errors. + */ + set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request); + /* fall through */ case HCLGE_VECTOR0_EVENT_RST: hclge_reset_task_schedule(hdev); break; @@ -2569,9 +3174,15 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data) break; } - /* clear the source of interrupt if it is not cause by reset */ - if (event_cause != HCLGE_VECTOR0_EVENT_RST) { - hclge_clear_event_cause(hdev, event_cause, clearval); + hclge_clear_event_cause(hdev, event_cause, clearval); + + /* Enable interrupt if it is not cause by reset. And when + * clearval equal to 0, it means interrupt status may be + * cleared by hardware before driver reads status register. + * For this case, vector0 interrupt also should be enabled. + */ + if (!clearval || + event_cause == HCLGE_VECTOR0_EVENT_MBX) { hclge_enable_vector(&hdev->misc_vector, true); } @@ -2604,6 +3215,17 @@ static void hclge_get_misc_vector(struct hclge_dev *hdev) hdev->num_msi_used += 1; } +static void hclge_misc_affinity_setup(struct hclge_dev *hdev) +{ + irq_set_affinity_hint(hdev->misc_vector.vector_irq, + &hdev->affinity_mask); +} + +static void hclge_misc_affinity_teardown(struct hclge_dev *hdev) +{ + irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL); +} + static int hclge_misc_irq_init(struct hclge_dev *hdev) { int ret; @@ -2611,8 +3233,10 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev) hclge_get_misc_vector(hdev); /* this would be explicitly freed in the end */ + snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", + HCLGE_NAME, pci_name(hdev->pdev)); ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle, - 0, "hclge_misc", hdev); + 0, hdev->misc_vector.name, hdev); if (ret) { hclge_free_vector(hdev, 0); dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n", @@ -2628,12 +3252,15 @@ static void hclge_misc_irq_uninit(struct hclge_dev *hdev) hclge_free_vector(hdev, 0); } -static int hclge_notify_client(struct hclge_dev *hdev, - enum hnae3_reset_notify_type type) +int hclge_notify_client(struct hclge_dev *hdev, + enum hnae3_reset_notify_type type) { struct hnae3_client *client = hdev->nic_client; u16 i; + if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client) + return 0; + if (!client->ops->reset_notify) return -EOPNOTSUPP; @@ -2642,33 +3269,65 @@ static int hclge_notify_client(struct hclge_dev *hdev, int ret; ret = client->ops->reset_notify(handle, type); - if (ret) + if (ret) { + dev_err(&hdev->pdev->dev, + "notify nic client failed %d(%d)\n", type, ret); return ret; + } } return 0; } -static int hclge_reset_wait(struct hclge_dev *hdev) +static int hclge_notify_roce_client(struct hclge_dev *hdev, + enum hnae3_reset_notify_type type) { -#define HCLGE_RESET_WATI_MS 100 -#define HCLGE_RESET_WAIT_CNT 5 - u32 val, reg, reg_bit; - u32 cnt = 0; + struct hnae3_client *client = hdev->roce_client; + int ret = 0; + u16 i; - switch (hdev->reset_type) { - case HNAE3_GLOBAL_RESET: - reg = HCLGE_GLOBAL_RESET_REG; - reg_bit = HCLGE_GLOBAL_RESET_BIT; - break; - case HNAE3_CORE_RESET: - reg = HCLGE_GLOBAL_RESET_REG; - reg_bit = HCLGE_CORE_RESET_BIT; - break; - case HNAE3_FUNC_RESET: - reg = HCLGE_FUN_RST_ING; - reg_bit = HCLGE_FUN_RST_ING_B; - break; + if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client) + return 0; + + if (!client->ops->reset_notify) + return -EOPNOTSUPP; + + for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { + struct hnae3_handle *handle = &hdev->vport[i].roce; + + ret = client->ops->reset_notify(handle, type); + if (ret) { + dev_err(&hdev->pdev->dev, + "notify roce client failed %d(%d)", + type, ret); + return ret; + } + } + + return ret; +} + +static int hclge_reset_wait(struct hclge_dev *hdev) +{ +#define HCLGE_RESET_WATI_MS 100 +#define HCLGE_RESET_WAIT_CNT 350 + + u32 val, reg, reg_bit; + u32 cnt = 0; + + switch (hdev->reset_type) { + case HNAE3_IMP_RESET: + reg = HCLGE_GLOBAL_RESET_REG; + reg_bit = HCLGE_IMP_RESET_BIT; + break; + case HNAE3_GLOBAL_RESET: + reg = HCLGE_GLOBAL_RESET_REG; + reg_bit = HCLGE_GLOBAL_RESET_BIT; + break; + case HNAE3_FUNC_RESET: + reg = HCLGE_FUN_RST_ING; + reg_bit = HCLGE_FUN_RST_ING_B; + break; default: dev_err(&hdev->pdev->dev, "Wait for unsupported reset type: %d\n", @@ -2692,6 +3351,136 @@ static int hclge_reset_wait(struct hclge_dev *hdev) return 0; } +static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset) +{ + struct hclge_vf_rst_cmd *req; + struct hclge_desc desc; + + req = (struct hclge_vf_rst_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false); + req->dest_vfid = func_id; + + if (reset) + req->vf_rst = 0x1; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) +{ + int i; + + for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) { + struct hclge_vport *vport = &hdev->vport[i]; + int ret; + + /* Send cmd to set/clear VF's FUNC_RST_ING */ + ret = hclge_set_vf_rst(hdev, vport->vport_id, reset); + if (ret) { + dev_err(&hdev->pdev->dev, + "set vf(%u) rst failed %d!\n", + vport->vport_id - HCLGE_VF_VPORT_START_NUM, + ret); + return ret; + } + + if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) + continue; + + /* Inform VF to process the reset. + * hclge_inform_reset_assert_to_vf may fail if VF + * driver is not loaded. + */ + ret = hclge_inform_reset_assert_to_vf(vport); + if (ret) + dev_warn(&hdev->pdev->dev, + "inform reset to vf(%u) failed %d!\n", + vport->vport_id - HCLGE_VF_VPORT_START_NUM, + ret); + } + + return 0; +} + +static void hclge_mailbox_service_task(struct hclge_dev *hdev) +{ + if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) || + test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) || + test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state)) + return; + + hclge_mbx_handler(hdev); + + clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); +} + +static void hclge_func_reset_sync_vf(struct hclge_dev *hdev) +{ + struct hclge_pf_rst_sync_cmd *req; + struct hclge_desc desc; + int cnt = 0; + int ret; + + req = (struct hclge_pf_rst_sync_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true); + + do { + /* vf need to down netdev by mbx during PF or FLR reset */ + hclge_mailbox_service_task(hdev); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + /* for compatible with old firmware, wait + * 100 ms for VF to stop IO + */ + if (ret == -EOPNOTSUPP) { + msleep(HCLGE_RESET_SYNC_TIME); + return; + } else if (ret) { + dev_err(&hdev->pdev->dev, "sync with VF fail %d!\n", + ret); + return; + } else if (req->all_vf_ready) { + return; + } + msleep(HCLGE_PF_RESET_SYNC_TIME); + hclge_cmd_reuse_desc(&desc, true); + } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT); + + dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n"); +} + +void hclge_report_hw_error(struct hclge_dev *hdev, + enum hnae3_hw_error_type type) +{ + struct hnae3_client *client = hdev->nic_client; + u16 i; + + if (!client || !client->ops->process_hw_error || + !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state)) + return; + + for (i = 0; i < hdev->num_vmdq_vport + 1; i++) + client->ops->process_hw_error(&hdev->vport[i].nic, type); +} + +static void hclge_handle_imp_error(struct hclge_dev *hdev) +{ + u32 reg_val; + + reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); + if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) { + hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR); + reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B); + hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); + } + + if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) { + hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR); + reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B); + hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); + } +} + int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) { struct hclge_desc desc; @@ -2712,56 +3501,65 @@ int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) static void hclge_do_reset(struct hclge_dev *hdev) { + struct hnae3_handle *handle = &hdev->vport[0].nic; struct pci_dev *pdev = hdev->pdev; u32 val; + if (hclge_get_hw_reset_stat(handle)) { + dev_info(&pdev->dev, "hardware reset not finish\n"); + dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n", + hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING), + hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG)); + return; + } + switch (hdev->reset_type) { case HNAE3_GLOBAL_RESET: + dev_info(&pdev->dev, "global reset requested\n"); val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1); hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); - dev_info(&pdev->dev, "Global Reset requested\n"); - break; - case HNAE3_CORE_RESET: - val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); - hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1); - hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); - dev_info(&pdev->dev, "Core Reset requested\n"); break; case HNAE3_FUNC_RESET: - dev_info(&pdev->dev, "PF Reset requested\n"); - hclge_func_reset_cmd(hdev, 0); + dev_info(&pdev->dev, "PF reset requested\n"); /* schedule again to check later */ set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending); hclge_reset_task_schedule(hdev); break; default: dev_warn(&pdev->dev, - "Unsupported reset type: %d\n", hdev->reset_type); + "unsupported reset type: %d\n", hdev->reset_type); break; } } -static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev, - unsigned long *addr) +enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev, + unsigned long *addr) { enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; + struct hclge_dev *hdev = ae_dev->priv; /* return the highest priority reset level amongst all */ - if (test_bit(HNAE3_GLOBAL_RESET, addr)) - rst_level = HNAE3_GLOBAL_RESET; - else if (test_bit(HNAE3_CORE_RESET, addr)) - rst_level = HNAE3_CORE_RESET; - else if (test_bit(HNAE3_IMP_RESET, addr)) + if (test_bit(HNAE3_IMP_RESET, addr)) { rst_level = HNAE3_IMP_RESET; - else if (test_bit(HNAE3_FUNC_RESET, addr)) + clear_bit(HNAE3_IMP_RESET, addr); + clear_bit(HNAE3_GLOBAL_RESET, addr); + clear_bit(HNAE3_FUNC_RESET, addr); + } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) { + rst_level = HNAE3_GLOBAL_RESET; + clear_bit(HNAE3_GLOBAL_RESET, addr); + clear_bit(HNAE3_FUNC_RESET, addr); + } else if (test_bit(HNAE3_FUNC_RESET, addr)) { rst_level = HNAE3_FUNC_RESET; + clear_bit(HNAE3_FUNC_RESET, addr); + } else if (test_bit(HNAE3_FLR_RESET, addr)) { + rst_level = HNAE3_FLR_RESET; + clear_bit(HNAE3_FLR_RESET, addr); + } - /* now, clear all other resets */ - clear_bit(HNAE3_GLOBAL_RESET, addr); - clear_bit(HNAE3_CORE_RESET, addr); - clear_bit(HNAE3_IMP_RESET, addr); - clear_bit(HNAE3_FUNC_RESET, addr); + if (hdev->reset_type != HNAE3_NONE_RESET && + rst_level < hdev->reset_type) + return HNAE3_NONE_RESET; return rst_level; } @@ -2777,9 +3575,6 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev) case HNAE3_GLOBAL_RESET: clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); break; - case HNAE3_CORE_RESET: - clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); - break; default: break; } @@ -2787,155 +3582,573 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev) if (!clearval) return; - hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval); + /* For revision 0x20, the reset interrupt source + * can only be cleared after hardware reset done + */ + if (hdev->pdev->revision == 0x20) + hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, + clearval); + hclge_enable_vector(&hdev->misc_vector, true); } -static void hclge_reset(struct hclge_dev *hdev) +static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable) { - struct hnae3_handle *handle; - - /* perform reset of the stack & ae device for a client */ - handle = &hdev->vport[0].nic; - rtnl_lock(); - hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); - - if (!hclge_reset_wait(hdev)) { - hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); - hclge_reset_ae_dev(hdev->ae_dev); - hclge_notify_client(hdev, HNAE3_INIT_CLIENT); + u32 reg_val; - hclge_clear_reset_cause(hdev); - } else { - /* schedule again to check pending resets later */ - set_bit(hdev->reset_type, &hdev->reset_pending); - hclge_reset_task_schedule(hdev); - } + reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG); + if (enable) + reg_val |= HCLGE_NIC_SW_RST_RDY; + else + reg_val &= ~HCLGE_NIC_SW_RST_RDY; - hclge_notify_client(hdev, HNAE3_UP_CLIENT); - handle->last_reset_time = jiffies; - rtnl_unlock(); + hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val); } -static void hclge_reset_event(struct hnae3_handle *handle) +static int hclge_func_reset_notify_vf(struct hclge_dev *hdev) { - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; - - /* check if this is a new reset request and we are not here just because - * last reset attempt did not succeed and watchdog hit us again. We will - * know this if last reset request did not occur very recently (watchdog - * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz) - * In case of new request we reset the "reset level" to PF reset. - * And if it is a repeat reset request of the most recent one then we - * want to make sure we throttle the reset request. Therefore, we will - * not allow it again before 3*HZ times. - */ - if (time_before(jiffies, (handle->last_reset_time + 3 * HZ))) - return; - else if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ))) - handle->reset_level = HNAE3_FUNC_RESET; + int ret; - dev_info(&hdev->pdev->dev, "received reset event , reset type is %d", - handle->reset_level); + ret = hclge_set_all_vf_rst(hdev, true); + if (ret) + return ret; - /* request reset & schedule reset task */ - set_bit(handle->reset_level, &hdev->reset_request); - hclge_reset_task_schedule(hdev); + hclge_func_reset_sync_vf(hdev); - if (handle->reset_level < HNAE3_GLOBAL_RESET) - handle->reset_level++; + return ret; } -static void hclge_reset_subtask(struct hclge_dev *hdev) +static int hclge_reset_prepare_wait(struct hclge_dev *hdev) { - /* check if there is any ongoing reset in the hardware. This status can - * be checked from reset_pending. If there is then, we need to wait for - * hardware to complete reset. - * a. If we are able to figure out in reasonable time that hardware - * has fully resetted then, we can proceed with driver, client - * reset. - * b. else, we can come back later to check this status so re-sched - * now. - */ - hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending); - if (hdev->reset_type != HNAE3_NONE_RESET) - hclge_reset(hdev); + u32 reg_val; + int ret = 0; - /* check if we got any *new* reset requests to be honored */ - hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request); - if (hdev->reset_type != HNAE3_NONE_RESET) - hclge_do_reset(hdev); + switch (hdev->reset_type) { + case HNAE3_FUNC_RESET: + ret = hclge_func_reset_notify_vf(hdev); + if (ret) + return ret; - hdev->reset_type = HNAE3_NONE_RESET; + ret = hclge_func_reset_cmd(hdev, 0); + if (ret) { + dev_err(&hdev->pdev->dev, + "asserting function reset fail %d!\n", ret); + return ret; + } + + /* After performaning pf reset, it is not necessary to do the + * mailbox handling or send any command to firmware, because + * any mailbox handling or command to firmware is only valid + * after hclge_cmd_init is called. + */ + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + hdev->rst_stats.pf_rst_cnt++; + break; + case HNAE3_FLR_RESET: + ret = hclge_func_reset_notify_vf(hdev); + if (ret) + return ret; + break; + case HNAE3_IMP_RESET: + hclge_handle_imp_error(hdev); + reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); + hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, + BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val); + break; + default: + break; + } + + /* inform hardware that preparatory work is done */ + msleep(HCLGE_RESET_SYNC_TIME); + hclge_reset_handshake(hdev, true); + dev_info(&hdev->pdev->dev, "prepare wait ok\n"); + + return ret; } -static void hclge_reset_service_task(struct work_struct *work) +static void hclge_show_rst_info(struct hclge_dev *hdev) { - struct hclge_dev *hdev = - container_of(work, struct hclge_dev, rst_service_task); + char *buf; - if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL); + if (!buf) return; - clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); + hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN); - hclge_reset_subtask(hdev); + dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf); - clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); + kfree(buf); } -static void hclge_mailbox_service_task(struct work_struct *work) +static bool hclge_reset_err_handle(struct hclge_dev *hdev) { - struct hclge_dev *hdev = - container_of(work, struct hclge_dev, mbx_service_task); + struct hnae3_handle *handle = &hdev->vport[0].nic; - if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state)) - return; + if (hdev->reset_pending) { + dev_info(&hdev->pdev->dev, "Reset pending %lu\n", + hdev->reset_pending); + return true; + } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) & + HCLGE_RESET_INT_M) { + dev_info(&hdev->pdev->dev, + "reset failed because new reset interrupt\n"); + hclge_clear_reset_cause(hdev); + return false; + } else if (hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT) { + hdev->rst_stats.reset_fail_cnt++; + set_bit(hdev->reset_type, &hdev->reset_pending); + dev_info(&hdev->pdev->dev, + "re-schedule reset task(%u)\n", + hdev->rst_stats.reset_fail_cnt); + return true; + } - clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); + hclge_clear_reset_cause(hdev); - hclge_mbx_handler(hdev); + /* recover the handshake status when reset fail */ + hclge_reset_handshake(hdev, true); - clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); + if (handle && handle->ae_algo->ops->reset_end) + handle->ae_algo->ops->reset_end(handle, false); + + hclge_show_rst_info(hdev); + + set_bit(HCLGE_STATE_RST_FAIL, &hdev->state); + + return false; } -static void hclge_service_task(struct work_struct *work) +static int hclge_set_rst_done(struct hclge_dev *hdev) { - struct hclge_dev *hdev = - container_of(work, struct hclge_dev, service_task); + struct hclge_pf_rst_done_cmd *req; + struct hclge_desc desc; + int ret; - if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) { - hclge_update_stats_for_all(hdev); - hdev->hw_stats.stats_timer = 0; + req = (struct hclge_pf_rst_done_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false); + req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + /* To be compatible with the old firmware, which does not support + * command HCLGE_OPC_PF_RST_DONE, just print a warning and + * return success + */ + if (ret == -EOPNOTSUPP) { + dev_warn(&hdev->pdev->dev, + "current firmware does not support command(0x%x)!\n", + HCLGE_OPC_PF_RST_DONE); + return 0; + } else if (ret) { + dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n", + ret); } - hclge_update_speed_duplex(hdev); - hclge_update_link_status(hdev); - hclge_service_complete(hdev); + return ret; } -struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle) +static int hclge_reset_prepare_up(struct hclge_dev *hdev) { - /* VF handle has no client */ - if (!handle->client) - return container_of(handle, struct hclge_vport, nic); - else if (handle->client->type == HNAE3_CLIENT_ROCE) - return container_of(handle, struct hclge_vport, roce); - else - return container_of(handle, struct hclge_vport, nic); + int ret = 0; + + switch (hdev->reset_type) { + case HNAE3_FUNC_RESET: + /* fall through */ + case HNAE3_FLR_RESET: + ret = hclge_set_all_vf_rst(hdev, false); + break; + case HNAE3_GLOBAL_RESET: + /* fall through */ + case HNAE3_IMP_RESET: + ret = hclge_set_rst_done(hdev); + break; + default: + break; + } + + /* clear up the handshake status after re-initialize done */ + hclge_reset_handshake(hdev, false); + + return ret; } -static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num, - struct hnae3_vector_info *vector_info) +static int hclge_reset_stack(struct hclge_dev *hdev) { - struct hclge_vport *vport = hclge_get_vport(handle); - struct hnae3_vector_info *vector = vector_info; - struct hclge_dev *hdev = vport->back; - int alloc = 0; - int i, j; + int ret; - vector_num = min(hdev->num_msi_left, vector_num); + ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); + if (ret) + return ret; + + ret = hclge_reset_ae_dev(hdev->ae_dev); + if (ret) + return ret; + + return hclge_notify_client(hdev, HNAE3_INIT_CLIENT); +} + +static int hclge_reset_prepare(struct hclge_dev *hdev) +{ + int ret; + + hdev->rst_stats.reset_cnt++; + /* perform reset of the stack & ae device for a client */ + ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) + return ret; + + rtnl_lock(); + ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + rtnl_unlock(); + if (ret) + return ret; + + return hclge_reset_prepare_wait(hdev); +} + +static int hclge_reset_rebuild(struct hclge_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + struct hnae3_handle *handle = &hdev->vport[0].nic; + enum hnae3_reset_type reset_level; + int ret; + + hdev->rst_stats.hw_reset_done_cnt++; + + ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); + if (ret) + return ret; + + rtnl_lock(); + ret = hclge_reset_stack(hdev); + rtnl_unlock(); + if (ret) + return ret; + + hclge_clear_reset_cause(hdev); + + ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT); + /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1 + * times + */ + if (ret && + hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1) + return ret; + + ret = hclge_reset_prepare_up(hdev); + if (ret) + return ret; + + rtnl_lock(); + ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT); + rtnl_unlock(); + if (ret) + return ret; + + ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT); + if (ret) + return ret; + + hdev->last_reset_time = jiffies; + hdev->rst_stats.reset_fail_cnt = 0; + hdev->rst_stats.reset_done_cnt++; + clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state); + + /* if default_reset_request has a higher level reset request, + * it should be handled as soon as possible. since some errors + * need this kind of reset to fix. + */ + reset_level = hclge_get_reset_level(ae_dev, + &hdev->default_reset_request); + if (reset_level != HNAE3_NONE_RESET) + set_bit(reset_level, &hdev->reset_request); + + if (handle && handle->ae_algo->ops->reset_end) + handle->ae_algo->ops->reset_end(handle, true); + + return 0; +} + +static void hclge_reset(struct hclge_dev *hdev) +{ + if (hclge_reset_prepare(hdev)) + goto err_reset; + + if (hclge_reset_wait(hdev)) + goto err_reset; + + if (hclge_reset_rebuild(hdev)) + goto err_reset; + + return; + +err_reset: + if (hclge_reset_err_handle(hdev)) + hclge_reset_task_schedule(hdev); +} + +static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + struct hclge_dev *hdev = ae_dev->priv; + + /* We might end up getting called broadly because of 2 below cases: + * 1. Recoverable error was conveyed through APEI and only way to bring + * normalcy is to reset. + * 2. A new reset request from the stack due to timeout + * + * For the first case,error event might not have ae handle available. + * check if this is a new reset request and we are not here just because + * last reset attempt did not succeed and watchdog hit us again. We will + * know this if last reset request did not occur very recently (watchdog + * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz) + * In case of new request we reset the "reset level" to PF reset. + * And if it is a repeat reset request of the most recent one then we + * want to make sure we throttle the reset request. Therefore, we will + * not allow it again before 3*HZ times. + */ + if (!handle) + handle = &hdev->vport[0].nic; + + if (time_before(jiffies, (hdev->last_reset_time + + HCLGE_RESET_INTERVAL))) { + mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL); + return; + } else if (hdev->default_reset_request) { + hdev->reset_level = + hclge_get_reset_level(ae_dev, + &hdev->default_reset_request); + } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) { + hdev->reset_level = HNAE3_FUNC_RESET; + } + + dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n", + hdev->reset_level); + + /* request reset & schedule reset task */ + set_bit(hdev->reset_level, &hdev->reset_request); + hclge_reset_task_schedule(hdev); + + if (hdev->reset_level < HNAE3_GLOBAL_RESET) + hdev->reset_level++; +} + +static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev, + enum hnae3_reset_type rst_type) +{ + struct hclge_dev *hdev = ae_dev->priv; + + set_bit(rst_type, &hdev->default_reset_request); +} + +static void hclge_reset_timer(struct timer_list *t) +{ + struct hclge_dev *hdev = from_timer(hdev, t, reset_timer); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + + /* if default_reset_request has no value, it means that this reset + * request has already be handled, so just return here + */ + if (!hdev->default_reset_request) + return; + + dev_info(&hdev->pdev->dev, + "triggering reset in reset timer\n"); + + if (ae_dev->ops->reset_event) + ae_dev->ops->reset_event(hdev->pdev, NULL); +} + +static bool hclge_reset_end(struct hnae3_handle *handle, bool done) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + if (hdev->rst_stats.reset_fail_cnt >= HCLGE_RESET_MAX_FAIL_CNT) + dev_err(&hdev->pdev->dev, "Reset fail!\n"); + + return done; +} + +static void hclge_reset_subtask(struct hclge_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + + /* check if there is any ongoing reset in the hardware. This status can + * be checked from reset_pending. If there is then, we need to wait for + * hardware to complete reset. + * a. If we are able to figure out in reasonable time that hardware + * has fully resetted then, we can proceed with driver, client + * reset. + * b. else, we can come back later to check this status so re-sched + * now. + */ + hdev->last_reset_time = jiffies; + hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending); + if (hdev->reset_type != HNAE3_NONE_RESET) + hclge_reset(hdev); + + /* check if we got any *new* reset requests to be honored */ + hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request); + if (hdev->reset_type != HNAE3_NONE_RESET) + hclge_do_reset(hdev); + + hdev->reset_type = HNAE3_NONE_RESET; +} + +static void hclge_misc_err_recovery(struct hclge_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + struct device *dev = &hdev->pdev->dev; + u32 msix_sts_reg; + + msix_sts_reg = hclge_read_dev(&hdev->hw, + HCLGE_VECTOR0_PF_OTHER_INT_STS_REG); + + if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) { + if (hclge_handle_hw_msix_error(hdev, + &hdev->default_reset_request)) + dev_info(dev, "received msix interrupt 0x%x\n", + msix_sts_reg); + + if (hdev->default_reset_request) + if (ae_dev->ops->reset_event) + ae_dev->ops->reset_event(hdev->pdev, NULL); + } + + hclge_enable_vector(&hdev->misc_vector, true); +} + +static void hclge_reset_service_task(struct hclge_dev *hdev) +{ + if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) + return; + + if (test_and_clear_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request)) { + hclge_misc_err_recovery(hdev); + return; + } + + down(&hdev->reset_sem); + set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); + + hclge_reset_subtask(hdev); + + clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); + up(&hdev->reset_sem); +} + +static void hclge_update_vport_alive(struct hclge_dev *hdev) +{ + int i; + + /* start from vport 1 for PF is always alive */ + for (i = 1; i < hdev->num_alloc_vport; i++) { + struct hclge_vport *vport = &hdev->vport[i]; + + /* vf keeps sending alive msg to pf per 2s, if pf doesn't + * receive a vf's alive msg for 8s, regards the vf is offline + */ + if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ)) + if (test_and_clear_bit(HCLGE_VPORT_STATE_ALIVE, + &vport->state)) + dev_info(&hdev->pdev->dev, + "VF %u keep alive lost!", + vport->vport_id - + HCLGE_VF_VPORT_START_NUM); + + /* If vf is not alive, set to default value */ + if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) + vport->mps = HCLGE_MAC_DEFAULT_FRAME; + } +} + +static void hclge_periodic_service_task(struct hclge_dev *hdev) +{ + unsigned long delta = round_jiffies_relative(HZ); + + if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) + return; + + /* Always handle the link updating to make sure link state is + * updated when it is triggered by mbx. + */ + hclge_update_link_status(hdev); + hclge_sync_mac_table(hdev); + hclge_sync_promisc_mode(hdev); + + if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { + delta = jiffies - hdev->last_serv_processed; + + if (delta < round_jiffies_relative(HZ)) { + delta = round_jiffies_relative(HZ) - delta; + goto out; + } + } + + hdev->serv_processed_cnt++; + hclge_update_vport_alive(hdev); + + if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) { + hdev->last_serv_processed = jiffies; + goto out; + } + + if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL)) + hclge_update_stats_for_all(hdev); + + hclge_update_port_info(hdev); + hclge_sync_vlan_filter(hdev); + + if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL)) + hclge_rfs_filter_expire(hdev); + + hdev->last_serv_processed = jiffies; + +out: + hclge_task_schedule(hdev, delta); +} + +static void hclge_service_task(struct work_struct *work) +{ + struct hclge_dev *hdev = + container_of(work, struct hclge_dev, service_task.work); + + hclge_reset_service_task(hdev); + hclge_mailbox_service_task(hdev); + hclge_periodic_service_task(hdev); + + /* Handle reset and mbx again in case periodical task delays the + * handling by calling hclge_task_schedule() in + * hclge_periodic_service_task(). + */ + hclge_reset_service_task(hdev); + hclge_mailbox_service_task(hdev); +} + +struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle) +{ + /* VF handle has no client */ + if (!handle->client) + return container_of(handle, struct hclge_vport, nic); + else if (handle->client->type == HNAE3_CLIENT_ROCE) + return container_of(handle, struct hclge_vport, roce); + else + return container_of(handle, struct hclge_vport, nic); +} + +static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num, + struct hnae3_vector_info *vector_info) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hnae3_vector_info *vector = vector_info; + struct hclge_dev *hdev = vport->back; + int alloc = 0; + int i, j; + + vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num); + vector_num = min(hdev->num_msi_left, vector_num); for (j = 0; j < vector_num; j++) { for (i = 1; i < hdev->num_msi; i++) { @@ -2982,7 +4195,7 @@ static int hclge_put_vector(struct hnae3_handle *handle, int vector) vector_id = hclge_get_vector_index(hdev, vector); if (vector_id < 0) { dev_err(&hdev->pdev->dev, - "Get vector index fail. vector_id =%d\n", vector_id); + "Get vector index fail. vector = %d\n", vector); return vector_id; } @@ -3005,29 +4218,28 @@ static int hclge_set_rss_algo_key(struct hclge_dev *hdev, const u8 hfunc, const u8 *key) { struct hclge_rss_config_cmd *req; + unsigned int key_offset = 0; struct hclge_desc desc; - int key_offset; + int key_counts; int key_size; int ret; + key_counts = HCLGE_RSS_KEY_SIZE; req = (struct hclge_rss_config_cmd *)desc.data; - for (key_offset = 0; key_offset < 3; key_offset++) { + while (key_counts) { hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, false); req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK); req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B); - if (key_offset == 2) - key_size = - HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2; - else - key_size = HCLGE_RSS_HASH_KEY_NUM; - + key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts); memcpy(req->hash_key, key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size); + key_counts -= key_size; + key_offset++; ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, @@ -3102,6 +4314,22 @@ static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, return ret; } +static void hclge_get_rss_type(struct hclge_vport *vport) +{ + if (vport->rss_tuple_sets.ipv4_tcp_en || + vport->rss_tuple_sets.ipv4_udp_en || + vport->rss_tuple_sets.ipv4_sctp_en || + vport->rss_tuple_sets.ipv6_tcp_en || + vport->rss_tuple_sets.ipv6_udp_en || + vport->rss_tuple_sets.ipv6_sctp_en) + vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4; + else if (vport->rss_tuple_sets.ipv4_fragment_en || + vport->rss_tuple_sets.ipv6_fragment_en) + vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3; + else + vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE; +} + static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) { struct hclge_rss_input_tuple_cmd *req; @@ -3121,6 +4349,7 @@ static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en; req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en; req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en; + hclge_get_rss_type(&hdev->vport[0]); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) dev_err(&hdev->pdev->dev, @@ -3135,8 +4364,19 @@ static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, int i; /* Get hash algorithm */ - if (hfunc) - *hfunc = vport->rss_algo; + if (hfunc) { + switch (vport->rss_algo) { + case HCLGE_RSS_HASH_ALGO_TOEPLITZ: + *hfunc = ETH_RSS_HASH_TOP; + break; + case HCLGE_RSS_HASH_ALGO_SIMPLE: + *hfunc = ETH_RSS_HASH_XOR; + break; + default: + *hfunc = ETH_RSS_HASH_UNKNOWN; + break; + } + } /* Get the RSS Key required by the user */ if (key) @@ -3150,6 +4390,24 @@ static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, return 0; } +static int hclge_parse_rss_hfunc(struct hclge_vport *vport, const u8 hfunc, + u8 *hash_algo) +{ + switch (hfunc) { + case ETH_RSS_HASH_TOP: + *hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; + return 0; + case ETH_RSS_HASH_XOR: + *hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE; + return 0; + case ETH_RSS_HASH_NO_CHANGE: + *hash_algo = vport->rss_algo; + return 0; + default: + return -EINVAL; + } +} + static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, const u8 *key, const u8 hfunc) { @@ -3158,22 +4416,27 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, u8 hash_algo; int ret, i; + ret = hclge_parse_rss_hfunc(vport, hfunc, &hash_algo); + if (ret) { + dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc); + return ret; + } + /* Set the RSS Hash Key if specififed by the user */ if (key) { - - if (hfunc == ETH_RSS_HASH_TOP || - hfunc == ETH_RSS_HASH_NO_CHANGE) - hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; - else - return -EINVAL; ret = hclge_set_rss_algo_key(hdev, hash_algo, key); if (ret) return ret; /* Update the shadow RSS key with user specified qids */ - memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE); - vport->rss_algo = hash_algo; + memcpy(vport->rss_hash_key, key, sizeof(vport->rss_hash_key)); + } else { + ret = hclge_set_rss_algo_key(hdev, hash_algo, + vport->rss_hash_key); + if (ret) + return ret; } + vport->rss_algo = hash_algo; /* Update the shadow RSS table with user specified qids */ for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) @@ -3283,6 +4546,7 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle, vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; + hclge_get_rss_type(vport); return 0; } @@ -3344,59 +4608,71 @@ static int hclge_get_tc_size(struct hnae3_handle *handle) return hdev->rss_size_max; } -int hclge_rss_init_hw(struct hclge_dev *hdev) +static int hclge_init_rss_tc_mode(struct hclge_dev *hdev) { struct hclge_vport *vport = hdev->vport; - u8 *rss_indir = vport[0].rss_indirection_tbl; - u16 rss_size = vport[0].alloc_rss_size; - u8 *key = vport[0].rss_hash_key; - u8 hfunc = vport[0].rss_algo; - u16 tc_offset[HCLGE_MAX_TC_NUM]; - u16 tc_valid[HCLGE_MAX_TC_NUM]; - u16 tc_size[HCLGE_MAX_TC_NUM]; + u16 tc_offset[HCLGE_MAX_TC_NUM] = {0}; + u16 tc_valid[HCLGE_MAX_TC_NUM] = {0}; + u16 tc_size[HCLGE_MAX_TC_NUM] = {0}; + struct hnae3_tc_info *tc_info; u16 roundup_size; - int i, ret; + u16 rss_size; + int i; - ret = hclge_set_rss_indir_table(hdev, rss_indir); - if (ret) - return ret; - - ret = hclge_set_rss_algo_key(hdev, hfunc, key); - if (ret) - return ret; - - ret = hclge_set_rss_input_tuple(hdev); - if (ret) - return ret; - - /* Each TC have the same queue size, and tc_size set to hardware is - * the log2 of roundup power of two of rss_size, the acutal queue - * size is limited by indirection table. - */ - if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) { - dev_err(&hdev->pdev->dev, - "Configure rss tc size failed, invalid TC_SIZE = %d\n", - rss_size); - return -EINVAL; - } - - roundup_size = roundup_pow_of_two(rss_size); - roundup_size = ilog2(roundup_size); - - for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { - tc_valid[i] = 0; + tc_info = &vport->nic.kinfo.tc_info; + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + rss_size = tc_info->tqp_count[i]; + tc_valid[i] = 0; if (!(hdev->hw_tc_map & BIT(i))) continue; + /* tc_size set to hardware is the log2 of roundup power of two + * of rss_size, the acutal queue size is limited by indirection + * table. + */ + if (rss_size > HCLGE_RSS_IND_TBL_SIZE || + rss_size == 0) { + dev_err(&hdev->pdev->dev, + "Configure rss tc size failed, invalid TC_SIZE = %u\n", + rss_size); + return -EINVAL; + } + + roundup_size = roundup_pow_of_two(rss_size); + roundup_size = ilog2(roundup_size); + tc_valid[i] = 1; tc_size[i] = roundup_size; - tc_offset[i] = rss_size * i; + tc_offset[i] = tc_info->tqp_offset[i]; } return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); } +int hclge_rss_init_hw(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + u8 *rss_indir = vport[0].rss_indirection_tbl; + u8 *key = vport[0].rss_hash_key; + u8 hfunc = vport[0].rss_algo; + int ret; + + ret = hclge_set_rss_indir_table(hdev, rss_indir); + if (ret) + return ret; + + ret = hclge_set_rss_algo_key(hdev, hfunc, key); + if (ret) + return ret; + + ret = hclge_set_rss_input_tuple(hdev); + if (ret) + return ret; + + return hclge_init_rss_tc_mode(hdev); +} + void hclge_rss_indir_init_cfg(struct hclge_dev *hdev) { struct hclge_vport *vport = hdev->vport; @@ -3411,8 +4687,11 @@ void hclge_rss_indir_init_cfg(struct hclge_dev *hdev) static void hclge_rss_init_cfg(struct hclge_dev *hdev) { + int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; struct hclge_vport *vport = hdev->vport; - int i; + + if (hdev->pdev->revision >= 0x21) + rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE; for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { vport[i].rss_tuple_sets.ipv4_tcp_en = @@ -3420,7 +4699,7 @@ static void hclge_rss_init_cfg(struct hclge_dev *hdev) vport[i].rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; vport[i].rss_tuple_sets.ipv4_sctp_en = - HCLGE_RSS_INPUT_TUPLE_SCTP; + HCLGE_RSS_INPUT_TUPLE_SCTP4; vport[i].rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; vport[i].rss_tuple_sets.ipv6_tcp_en = @@ -3428,13 +4707,14 @@ static void hclge_rss_init_cfg(struct hclge_dev *hdev) vport[i].rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; vport[i].rss_tuple_sets.ipv6_sctp_en = - HCLGE_RSS_INPUT_TUPLE_SCTP; + HCLGE_RSS_INPUT_TUPLE_SCTP6; vport[i].rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; - vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; + vport[i].rss_algo = rss_algo; - netdev_rss_key_fill(vport[i].rss_hash_key, HCLGE_RSS_KEY_SIZE); + memcpy(vport[i].rss_hash_key, hclge_hash_key, + HCLGE_RSS_KEY_SIZE); } hclge_rss_indir_init_cfg(hdev); @@ -3447,8 +4727,8 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport, struct hclge_dev *hdev = vport->back; struct hnae3_ring_chain_node *node; struct hclge_desc desc; - struct hclge_ctrl_vector_chain_cmd *req - = (struct hclge_ctrl_vector_chain_cmd *)desc.data; + struct hclge_ctrl_vector_chain_cmd *req = + (struct hclge_ctrl_vector_chain_cmd *)desc.data; enum hclge_cmd_status status; enum hclge_opcode_type op; u16 tqp_type_and_id; @@ -3506,8 +4786,7 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport, return 0; } -static int hclge_map_ring_to_vector(struct hnae3_handle *handle, - int vector, +static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector, struct hnae3_ring_chain_node *ring_chain) { struct hclge_vport *vport = hclge_get_vport(handle); @@ -3524,8 +4803,7 @@ static int hclge_map_ring_to_vector(struct hnae3_handle *handle, return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain); } -static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, - int vector, +static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector, struct hnae3_ring_chain_node *ring_chain) { struct hclge_vport *vport = hclge_get_vport(handle); @@ -3546,14 +4824,13 @@ static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, if (ret) dev_err(&handle->pdev->dev, "Unmap ring from vector fail. vectorid=%d, ret =%d\n", - vector_id, - ret); + vector_id, ret); return ret; } -int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, - struct hclge_promisc_param *param) +static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, + struct hclge_promisc_param *param) { struct hclge_promisc_cfg_cmd *req; struct hclge_desc desc; @@ -3575,13 +4852,15 @@ int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) dev_err(&hdev->pdev->dev, - "Set promisc mode fail, status is %d.\n", ret); + "Set vport %d promisc mode fail, ret = %d.\n", + param->vf_id, ret); return ret; } -void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, - bool en_mc, bool en_bc, int vport_id) +static void hclge_promisc_param_init(struct hclge_promisc_param *param, + bool en_uc, bool en_mc, bool en_bc, + int vport_id) { if (!param) return; @@ -3596,1279 +4875,3890 @@ void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, param->vf_id = vport_id; } -static void hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, - bool en_mc_pmc) +int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc, + bool en_mc_pmc, bool en_bc_pmc) { - struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; struct hclge_promisc_param param; - hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, true, + hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc, vport->vport_id); - hclge_cmd_set_promisc_mode(hdev, ¶m); + return hclge_cmd_set_promisc_mode(hdev, ¶m); } -static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) +static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, + bool en_mc_pmc) { - struct hclge_desc desc; - struct hclge_config_mac_mode_cmd *req = - (struct hclge_config_mac_mode_cmd *)desc.data; - u32 loop_en = 0; - int ret; + struct hclge_vport *vport = hclge_get_vport(handle); + bool en_bc_pmc = true; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false); - hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable); - hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable); - hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable); - hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable); - hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0); - hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0); - hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0); - hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0); - hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable); - hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable); - hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable); - hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable); - hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable); - hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable); - req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); + /* For revision 0x20, if broadcast promisc enabled, vlan filter is + * always bypassed. So broadcast promisc should be disabled until + * user enable promisc mode + */ + if (handle->pdev->revision == 0x20) + en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false; - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) - dev_err(&hdev->pdev->dev, - "mac enable fail, ret =%d.\n", ret); + return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc, + en_bc_pmc); } -static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en) +static void hclge_request_update_promisc_mode(struct hnae3_handle *handle) { - struct hclge_config_mac_mode_cmd *req; + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state); +} + +static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode) +{ + struct hclge_get_fd_mode_cmd *req; struct hclge_desc desc; - u32 loop_en; int ret; - req = (struct hclge_config_mac_mode_cmd *)&desc.data[0]; - /* 1 Read out the MAC mode config at first */ - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true); + + req = (struct hclge_get_fd_mode_cmd *)desc.data; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { - dev_err(&hdev->pdev->dev, - "mac loopback get fail, ret =%d.\n", ret); + dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret); return ret; } - /* 2 Then setup the loopback flag */ - loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); - hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); - - req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); + *fd_mode = req->mode; - /* 3 Config mac work mode with loopback flag - * and its original configure parameters - */ - hclge_cmd_reuse_desc(&desc, false); - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) - dev_err(&hdev->pdev->dev, - "mac loopback set fail, ret =%d.\n", ret); return ret; } -static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en) +static int hclge_get_fd_allocation(struct hclge_dev *hdev, + u32 *stage1_entry_num, + u32 *stage2_entry_num, + u16 *stage1_counter_num, + u16 *stage2_counter_num) { -#define HCLGE_SERDES_RETRY_MS 10 -#define HCLGE_SERDES_RETRY_NUM 100 - struct hclge_serdes_lb_cmd *req; + struct hclge_get_fd_allocation_cmd *req; struct hclge_desc desc; - int ret, i = 0; + int ret; - req = (struct hclge_serdes_lb_cmd *)&desc.data[0]; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false); + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true); - if (en) { - req->enable = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; - req->mask = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; - } else { - req->mask = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; - } + req = (struct hclge_get_fd_allocation_cmd *)desc.data; ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { - dev_err(&hdev->pdev->dev, - "serdes loopback set fail, ret = %d\n", ret); + dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n", + ret); return ret; } - do { - msleep(HCLGE_SERDES_RETRY_MS); - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, - true); - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { - dev_err(&hdev->pdev->dev, - "serdes loopback get, ret = %d\n", ret); - return ret; - } - } while (++i < HCLGE_SERDES_RETRY_NUM && - !(req->result & HCLGE_CMD_SERDES_DONE_B)); + *stage1_entry_num = le32_to_cpu(req->stage1_entry_num); + *stage2_entry_num = le32_to_cpu(req->stage2_entry_num); + *stage1_counter_num = le16_to_cpu(req->stage1_counter_num); + *stage2_counter_num = le16_to_cpu(req->stage2_counter_num); - if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) { - dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n"); - return -EBUSY; - } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) { - dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n"); - return -EIO; - } + return ret; +} - return 0; +static int hclge_set_fd_key_config(struct hclge_dev *hdev, + enum HCLGE_FD_STAGE stage_num) +{ + struct hclge_set_fd_key_config_cmd *req; + struct hclge_fd_key_cfg *stage; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false); + + req = (struct hclge_set_fd_key_config_cmd *)desc.data; + stage = &hdev->fd_cfg.key_cfg[stage_num]; + req->stage = stage_num; + req->key_select = stage->key_sel; + req->inner_sipv6_word_en = stage->inner_sipv6_word_en; + req->inner_dipv6_word_en = stage->inner_dipv6_word_en; + req->outer_sipv6_word_en = stage->outer_sipv6_word_en; + req->outer_dipv6_word_en = stage->outer_dipv6_word_en; + req->tuple_mask = cpu_to_le32(~stage->tuple_active); + req->meta_data_mask = cpu_to_le32(~stage->meta_data_active); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret); + + return ret; } -static int hclge_set_loopback(struct hnae3_handle *handle, - enum hnae3_loop loop_mode, bool en) +static int hclge_init_fd_config(struct hclge_dev *hdev) { - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; +#define LOW_2_WORDS 0x03 + struct hclge_fd_key_cfg *key_cfg; int ret; - switch (loop_mode) { - case HNAE3_MAC_INTER_LOOP_MAC: - ret = hclge_set_mac_loopback(hdev, en); + if (!hnae3_dev_fd_supported(hdev)) + return 0; + + ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode); + if (ret) + return ret; + + switch (hdev->fd_cfg.fd_mode) { + case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1: + hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH; break; - case HNAE3_MAC_INTER_LOOP_SERDES: - ret = hclge_set_serdes_loopback(hdev, en); + case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1: + hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2; break; default: - ret = -ENOTSUPP; dev_err(&hdev->pdev->dev, - "loop_mode %d is not supported\n", loop_mode); - break; + "Unsupported flow director mode %u\n", + hdev->fd_cfg.fd_mode); + return -EOPNOTSUPP; + } + + key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1]; + key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE; + key_cfg->inner_sipv6_word_en = LOW_2_WORDS; + key_cfg->inner_dipv6_word_en = LOW_2_WORDS; + key_cfg->outer_sipv6_word_en = 0; + key_cfg->outer_dipv6_word_en = 0; + + key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) | + BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) | + BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | + BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); + + /* If use max 400bit key, we can support tuples for ether type */ + if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) + key_cfg->tuple_active |= + BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC); + + /* roce_type is used to filter roce frames + * dst_vport is used to specify the rule + */ + key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT); + + ret = hclge_get_fd_allocation(hdev, + &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1], + &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2], + &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1], + &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]); + if (ret) + return ret; + + return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1); +} + +static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x, + int loc, u8 *key, bool is_add) +{ + struct hclge_fd_tcam_config_1_cmd *req1; + struct hclge_fd_tcam_config_2_cmd *req2; + struct hclge_fd_tcam_config_3_cmd *req3; + struct hclge_desc desc[3]; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false); + desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false); + + req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data; + req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data; + req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data; + + req1->stage = stage; + req1->xy_sel = sel_x ? 1 : 0; + hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0); + req1->index = cpu_to_le32(loc); + req1->entry_vld = sel_x ? is_add : 0; + + if (key) { + memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data)); + memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)], + sizeof(req2->tcam_data)); + memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) + + sizeof(req2->tcam_data)], sizeof(req3->tcam_data)); } + ret = hclge_cmd_send(&hdev->hw, desc, 3); + if (ret) + dev_err(&hdev->pdev->dev, + "config tcam key fail, ret=%d\n", + ret); + return ret; } -static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id, - int stream_id, bool enable) +static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc, + struct hclge_fd_ad_data *action) { + struct hclge_fd_ad_config_cmd *req; struct hclge_desc desc; - struct hclge_cfg_com_tqp_queue_cmd *req = - (struct hclge_cfg_com_tqp_queue_cmd *)desc.data; + u64 ad_data = 0; int ret; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); - req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK); - req->stream_id = cpu_to_le16(stream_id); - req->enable |= enable << HCLGE_TQP_ENABLE_B; - + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false); + + req = (struct hclge_fd_ad_config_cmd *)desc.data; + req->index = cpu_to_le32(loc); + req->stage = stage; + + hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B, + action->write_rule_id_to_bd); + hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S, + action->rule_id); + ad_data <<= 32; + hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet); + hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B, + action->forward_to_direct_queue); + hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S, + action->queue_id); + hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter); + hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M, + HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id); + hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage); + hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S, + action->counter_id); + + req->ad_data = cpu_to_le64(ad_data); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) - dev_err(&hdev->pdev->dev, - "Tqp enable fail, status =%d.\n", ret); + dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret); + return ret; } -static void hclge_reset_tqp_stats(struct hnae3_handle *handle) +static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y, + struct hclge_fd_rule *rule) { - struct hclge_vport *vport = hclge_get_vport(handle); - struct hnae3_queue *queue; - struct hclge_tqp *tqp; + u16 tmp_x_s, tmp_y_s; + u32 tmp_x_l, tmp_y_l; int i; - for (i = 0; i < vport->alloc_tqps; i++) { - queue = handle->kinfo.tqp[i]; - tqp = container_of(queue, struct hclge_tqp, q); - memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); + if (rule->unused_tuple & tuple_bit) + return true; + + switch (tuple_bit) { + case BIT(INNER_DST_MAC): + for (i = 0; i < ETH_ALEN; i++) { + calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i], + rule->tuples_mask.dst_mac[i]); + calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i], + rule->tuples_mask.dst_mac[i]); + } + + return true; + case BIT(INNER_SRC_MAC): + for (i = 0; i < ETH_ALEN; i++) { + calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i], + rule->tuples_mask.src_mac[i]); + calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i], + rule->tuples_mask.src_mac[i]); + } + + return true; + case BIT(INNER_VLAN_TAG_FST): + calc_x(tmp_x_s, rule->tuples.vlan_tag1, + rule->tuples_mask.vlan_tag1); + calc_y(tmp_y_s, rule->tuples.vlan_tag1, + rule->tuples_mask.vlan_tag1); + *(__le16 *)key_x = cpu_to_le16(tmp_x_s); + *(__le16 *)key_y = cpu_to_le16(tmp_y_s); + + return true; + case BIT(INNER_ETH_TYPE): + calc_x(tmp_x_s, rule->tuples.ether_proto, + rule->tuples_mask.ether_proto); + calc_y(tmp_y_s, rule->tuples.ether_proto, + rule->tuples_mask.ether_proto); + *(__le16 *)key_x = cpu_to_le16(tmp_x_s); + *(__le16 *)key_y = cpu_to_le16(tmp_y_s); + + return true; + case BIT(INNER_IP_TOS): + calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos); + calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos); + + return true; + case BIT(INNER_IP_PROTO): + calc_x(*key_x, rule->tuples.ip_proto, + rule->tuples_mask.ip_proto); + calc_y(*key_y, rule->tuples.ip_proto, + rule->tuples_mask.ip_proto); + + return true; + case BIT(INNER_SRC_IP): + calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX], + rule->tuples_mask.src_ip[IPV4_INDEX]); + calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX], + rule->tuples_mask.src_ip[IPV4_INDEX]); + *(__le32 *)key_x = cpu_to_le32(tmp_x_l); + *(__le32 *)key_y = cpu_to_le32(tmp_y_l); + + return true; + case BIT(INNER_DST_IP): + calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX], + rule->tuples_mask.dst_ip[IPV4_INDEX]); + calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX], + rule->tuples_mask.dst_ip[IPV4_INDEX]); + *(__le32 *)key_x = cpu_to_le32(tmp_x_l); + *(__le32 *)key_y = cpu_to_le32(tmp_y_l); + + return true; + case BIT(INNER_SRC_PORT): + calc_x(tmp_x_s, rule->tuples.src_port, + rule->tuples_mask.src_port); + calc_y(tmp_y_s, rule->tuples.src_port, + rule->tuples_mask.src_port); + *(__le16 *)key_x = cpu_to_le16(tmp_x_s); + *(__le16 *)key_y = cpu_to_le16(tmp_y_s); + + return true; + case BIT(INNER_DST_PORT): + calc_x(tmp_x_s, rule->tuples.dst_port, + rule->tuples_mask.dst_port); + calc_y(tmp_y_s, rule->tuples.dst_port, + rule->tuples_mask.dst_port); + *(__le16 *)key_x = cpu_to_le16(tmp_x_s); + *(__le16 *)key_y = cpu_to_le16(tmp_y_s); + + return true; + default: + return false; } } -static int hclge_ae_start(struct hnae3_handle *handle) +static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id, + u8 vf_id, u8 network_port_id) { - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; - int i; + u32 port_number = 0; - for (i = 0; i < vport->alloc_tqps; i++) - hclge_tqp_enable(hdev, i, 0, true); + if (port_type == HOST_PORT) { + hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S, + pf_id); + hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S, + vf_id); + hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT); + } else { + hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M, + HCLGE_NETWORK_PORT_ID_S, network_port_id); + hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT); + } - /* mac enable */ - hclge_cfg_mac_mode(hdev, true); - clear_bit(HCLGE_STATE_DOWN, &hdev->state); - mod_timer(&hdev->service_timer, jiffies + HZ); - hdev->hw.mac.link = 0; + return port_number; +} - /* reset tqp stats */ - hclge_reset_tqp_stats(handle); +static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg, + __le32 *key_x, __le32 *key_y, + struct hclge_fd_rule *rule) +{ + u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number; + u8 cur_pos = 0, tuple_size, shift_bits; + unsigned int i; - hclge_mac_start_phy(hdev); + for (i = 0; i < MAX_META_DATA; i++) { + tuple_size = meta_data_key_info[i].key_length; + tuple_bit = key_cfg->meta_data_active & BIT(i); - return 0; + switch (tuple_bit) { + case BIT(ROCE_TYPE): + hnae3_set_bit(meta_data, cur_pos, NIC_PACKET); + cur_pos += tuple_size; + break; + case BIT(DST_VPORT): + port_number = hclge_get_port_number(HOST_PORT, 0, + rule->vf_id, 0); + hnae3_set_field(meta_data, + GENMASK(cur_pos + tuple_size, cur_pos), + cur_pos, port_number); + cur_pos += tuple_size; + break; + default: + break; + } + } + + calc_x(tmp_x, meta_data, 0xFFFFFFFF); + calc_y(tmp_y, meta_data, 0xFFFFFFFF); + shift_bits = sizeof(meta_data) * 8 - cur_pos; + + *key_x = cpu_to_le32(tmp_x << shift_bits); + *key_y = cpu_to_le32(tmp_y << shift_bits); } -static void hclge_ae_stop(struct hnae3_handle *handle) -{ - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; - int i; +/* A complete key is combined with meta data key and tuple key. + * Meta data key is stored at the MSB region, and tuple key is stored at + * the LSB region, unused bits will be filled 0. + */ +static int hclge_config_key(struct hclge_dev *hdev, u8 stage, + struct hclge_fd_rule *rule) +{ + struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage]; + u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES]; + u8 *cur_key_x, *cur_key_y; + u8 meta_data_region; + u8 tuple_size; + int ret; + u32 i; - del_timer_sync(&hdev->service_timer); - cancel_work_sync(&hdev->service_task); - clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); + memset(key_x, 0, sizeof(key_x)); + memset(key_y, 0, sizeof(key_y)); + cur_key_x = key_x; + cur_key_y = key_y; - if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) { - hclge_mac_stop_phy(hdev); - return; + for (i = 0 ; i < MAX_TUPLE; i++) { + bool tuple_valid; + u32 check_tuple; + + tuple_size = tuple_key_info[i].key_length / 8; + check_tuple = key_cfg->tuple_active & BIT(i); + + tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x, + cur_key_y, rule); + if (tuple_valid) { + cur_key_x += tuple_size; + cur_key_y += tuple_size; + } } - for (i = 0; i < vport->alloc_tqps; i++) - hclge_tqp_enable(hdev, i, 0, false); + meta_data_region = hdev->fd_cfg.max_key_length / 8 - + MAX_META_DATA_LENGTH / 8; - /* Mac disable */ - hclge_cfg_mac_mode(hdev, false); + hclge_fd_convert_meta_data(key_cfg, + (__le32 *)(key_x + meta_data_region), + (__le32 *)(key_y + meta_data_region), + rule); - hclge_mac_stop_phy(hdev); + ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y, + true); + if (ret) { + dev_err(&hdev->pdev->dev, + "fd key_y config fail, loc=%u, ret=%d\n", + rule->queue_id, ret); + return ret; + } - /* reset tqp stats */ - hclge_reset_tqp_stats(handle); - del_timer_sync(&hdev->service_timer); - cancel_work_sync(&hdev->service_task); - hclge_update_link_status(hdev); + ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x, + true); + if (ret) + dev_err(&hdev->pdev->dev, + "fd key_x config fail, loc=%u, ret=%d\n", + rule->queue_id, ret); + return ret; } -static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, - u16 cmdq_resp, u8 resp_code, - enum hclge_mac_vlan_tbl_opcode op) +static int hclge_config_action(struct hclge_dev *hdev, u8 stage, + struct hclge_fd_rule *rule) { - struct hclge_dev *hdev = vport->back; - int return_status = -EIO; + struct hclge_fd_ad_data ad_data; - if (cmdq_resp) { - dev_err(&hdev->pdev->dev, - "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n", - cmdq_resp); - return -EIO; + ad_data.ad_id = rule->location; + + if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { + ad_data.drop_packet = true; + ad_data.forward_to_direct_queue = false; + ad_data.queue_id = 0; + } else { + ad_data.drop_packet = false; + ad_data.forward_to_direct_queue = true; + ad_data.queue_id = rule->queue_id; } - if (op == HCLGE_MAC_VLAN_ADD) { - if ((!resp_code) || (resp_code == 1)) { - return_status = 0; - } else if (resp_code == 2) { - return_status = -ENOSPC; - dev_err(&hdev->pdev->dev, - "add mac addr failed for uc_overflow.\n"); - } else if (resp_code == 3) { - return_status = -ENOSPC; - dev_err(&hdev->pdev->dev, - "add mac addr failed for mc_overflow.\n"); - } else { - dev_err(&hdev->pdev->dev, - "add mac addr failed for undefined, code=%d.\n", - resp_code); - } - } else if (op == HCLGE_MAC_VLAN_REMOVE) { - if (!resp_code) { - return_status = 0; - } else if (resp_code == 1) { - return_status = -ENOENT; - dev_dbg(&hdev->pdev->dev, - "remove mac addr failed for miss.\n"); - } else { - dev_err(&hdev->pdev->dev, - "remove mac addr failed for undefined, code=%d.\n", - resp_code); - } - } else if (op == HCLGE_MAC_VLAN_LKUP) { - if (!resp_code) { - return_status = 0; - } else if (resp_code == 1) { - return_status = -ENOENT; - dev_dbg(&hdev->pdev->dev, - "lookup mac addr failed for miss.\n"); - } else { - dev_err(&hdev->pdev->dev, - "lookup mac addr failed for undefined, code=%d.\n", - resp_code); - } - } else { - return_status = -EINVAL; - dev_err(&hdev->pdev->dev, - "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n", - op); - } + ad_data.use_counter = false; + ad_data.counter_id = 0; - return return_status; + ad_data.use_next_stage = false; + ad_data.next_input_key = 0; + + ad_data.write_rule_id_to_bd = true; + ad_data.rule_id = rule->location; + + return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data); } -static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr) +static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec, + u32 *unused_tuple) { - int word_num; - int bit_num; + if (!spec || !unused_tuple) + return -EINVAL; - if (vfid > 255 || vfid < 0) - return -EIO; + *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC); - if (vfid >= 0 && vfid <= 191) { - word_num = vfid / 32; - bit_num = vfid % 32; - if (clr) - desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num)); - else - desc[1].data[word_num] |= cpu_to_le32(1 << bit_num); - } else { - word_num = (vfid - 192) / 32; - bit_num = vfid % 32; - if (clr) - desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num)); - else - desc[2].data[word_num] |= cpu_to_le32(1 << bit_num); - } + if (!spec->ip4src) + *unused_tuple |= BIT(INNER_SRC_IP); - return 0; -} + if (!spec->ip4dst) + *unused_tuple |= BIT(INNER_DST_IP); -static bool hclge_is_all_function_id_zero(struct hclge_desc *desc) -{ -#define HCLGE_DESC_NUMBER 3 -#define HCLGE_FUNC_NUMBER_PER_DESC 6 - int i, j; + if (!spec->psrc) + *unused_tuple |= BIT(INNER_SRC_PORT); - for (i = 1; i < HCLGE_DESC_NUMBER; i++) - for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++) - if (desc[i].data[j]) - return false; + if (!spec->pdst) + *unused_tuple |= BIT(INNER_DST_PORT); - return true; + if (!spec->tos) + *unused_tuple |= BIT(INNER_IP_TOS); + + return 0; } -static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req, - const u8 *addr) +static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec, + u32 *unused_tuple) { - const unsigned char *mac_addr = addr; - u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) | - (mac_addr[0]) | (mac_addr[1] << 8); - u32 low_val = mac_addr[4] | (mac_addr[5] << 8); + if (!spec || !unused_tuple) + return -EINVAL; - new_req->mac_addr_hi32 = cpu_to_le32(high_val); - new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); -} + *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | + BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); -static u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport, - const u8 *addr) -{ - u16 high_val = addr[1] | (addr[0] << 8); - struct hclge_dev *hdev = vport->back; - u32 rsh = 4 - hdev->mta_mac_sel_type; - u16 ret_val = (high_val >> rsh) & 0xfff; + if (!spec->ip4src) + *unused_tuple |= BIT(INNER_SRC_IP); - return ret_val; -} + if (!spec->ip4dst) + *unused_tuple |= BIT(INNER_DST_IP); -static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, - enum hclge_mta_dmac_sel_type mta_mac_sel, - bool enable) -{ - struct hclge_mta_filter_mode_cmd *req; - struct hclge_desc desc; - int ret; + if (!spec->tos) + *unused_tuple |= BIT(INNER_IP_TOS); - req = (struct hclge_mta_filter_mode_cmd *)desc.data; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false); + if (!spec->proto) + *unused_tuple |= BIT(INNER_IP_PROTO); - hnae3_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B, - enable); - hnae3_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M, - HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel); + if (spec->l4_4_bytes) + return -EOPNOTSUPP; - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) - dev_err(&hdev->pdev->dev, - "Config mat filter mode failed for cmd_send, ret =%d.\n", - ret); + if (spec->ip_ver != ETH_RX_NFC_IP4) + return -EOPNOTSUPP; - return ret; + return 0; } -int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, - u8 func_id, - bool enable) +static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec, + u32 *unused_tuple) { - struct hclge_cfg_func_mta_filter_cmd *req; - struct hclge_desc desc; - int ret; + if (!spec || !unused_tuple) + return -EINVAL; - req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false); + *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | + BIT(INNER_IP_TOS); - hnae3_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B, - enable); - req->function_id = func_id; + /* check whether src/dst ip address used */ + if (!spec->ip6src[0] && !spec->ip6src[1] && + !spec->ip6src[2] && !spec->ip6src[3]) + *unused_tuple |= BIT(INNER_SRC_IP); - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) - dev_err(&hdev->pdev->dev, - "Config func_id enable failed for cmd_send, ret =%d.\n", - ret); + if (!spec->ip6dst[0] && !spec->ip6dst[1] && + !spec->ip6dst[2] && !spec->ip6dst[3]) + *unused_tuple |= BIT(INNER_DST_IP); - return ret; + if (!spec->psrc) + *unused_tuple |= BIT(INNER_SRC_PORT); + + if (!spec->pdst) + *unused_tuple |= BIT(INNER_DST_PORT); + + if (spec->tclass) + return -EOPNOTSUPP; + + return 0; } -static int hclge_set_mta_table_item(struct hclge_vport *vport, - u16 idx, - bool enable) +static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec, + u32 *unused_tuple) { - struct hclge_dev *hdev = vport->back; - struct hclge_cfg_func_mta_item_cmd *req; - struct hclge_desc desc; - u16 item_idx = 0; - int ret; + if (!spec || !unused_tuple) + return -EINVAL; - req = (struct hclge_cfg_func_mta_item_cmd *)desc.data; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false); - hnae3_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable); + *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | + BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); - hnae3_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M, - HCLGE_CFG_MTA_ITEM_IDX_S, idx); - req->item_idx = cpu_to_le16(item_idx); + /* check whether src/dst ip address used */ + if (!spec->ip6src[0] && !spec->ip6src[1] && + !spec->ip6src[2] && !spec->ip6src[3]) + *unused_tuple |= BIT(INNER_SRC_IP); - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { - dev_err(&hdev->pdev->dev, - "Config mta table item failed for cmd_send, ret =%d.\n", - ret); - return ret; - } + if (!spec->ip6dst[0] && !spec->ip6dst[1] && + !spec->ip6dst[2] && !spec->ip6dst[3]) + *unused_tuple |= BIT(INNER_DST_IP); - if (enable) - set_bit(idx, vport->mta_shadow); - else - clear_bit(idx, vport->mta_shadow); + if (!spec->l4_proto) + *unused_tuple |= BIT(INNER_IP_PROTO); + + if (spec->tclass) + return -EOPNOTSUPP; + + if (spec->l4_4_bytes) + return -EOPNOTSUPP; return 0; } -static int hclge_update_mta_status(struct hnae3_handle *handle) +static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple) { - unsigned long mta_status[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)]; - struct hclge_vport *vport = hclge_get_vport(handle); - struct net_device *netdev = handle->kinfo.netdev; - struct netdev_hw_addr *ha; - u16 tbl_idx; + if (!spec || !unused_tuple) + return -EINVAL; - memset(mta_status, 0, sizeof(mta_status)); + *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | + BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) | + BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO); - /* update mta_status from mc addr list */ - netdev_for_each_mc_addr(ha, netdev) { - tbl_idx = hclge_get_mac_addr_to_mta_index(vport, ha->addr); - set_bit(tbl_idx, mta_status); - } + if (is_zero_ether_addr(spec->h_source)) + *unused_tuple |= BIT(INNER_SRC_MAC); - return hclge_update_mta_status_common(vport, mta_status, - 0, HCLGE_MTA_TBL_SIZE, true); -} + if (is_zero_ether_addr(spec->h_dest)) + *unused_tuple |= BIT(INNER_DST_MAC); -int hclge_update_mta_status_common(struct hclge_vport *vport, - unsigned long *status, - u16 idx, - u16 count, - bool update_filter) -{ - struct hclge_dev *hdev = vport->back; - u16 update_max = idx + count; - u16 check_max; - int ret = 0; - bool used; - u16 i; + if (!spec->h_proto) + *unused_tuple |= BIT(INNER_ETH_TYPE); - /* setup mta check range */ - if (update_filter) { - i = 0; - check_max = HCLGE_MTA_TBL_SIZE; - } else { - i = idx; - check_max = update_max; - } + return 0; +} - used = false; - /* check and update all mta item */ - for (; i < check_max; i++) { - /* ignore unused item */ - if (!test_bit(i, vport->mta_shadow)) - continue; +static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + u32 *unused_tuple) +{ + if (fs->flow_type & FLOW_EXT) { + if (fs->h_ext.vlan_etype) { + dev_err(&hdev->pdev->dev, "vlan-etype does not support!\n"); + return -EOPNOTSUPP; + } - /* if i in update range then update it */ - if (i >= idx && i < update_max) - if (!test_bit(i - idx, status)) - hclge_set_mta_table_item(vport, i, false); + if (!fs->h_ext.vlan_tci) + *unused_tuple |= BIT(INNER_VLAN_TAG_FST); - if (!used && test_bit(i, vport->mta_shadow)) - used = true; + if (fs->m_ext.vlan_tci && + be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) { + dev_err(&hdev->pdev->dev, "failed to config vlan_tci, invalid vlan_tci: %u, max is %u.\n", + ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1); + return -EINVAL; + } + } else { + *unused_tuple |= BIT(INNER_VLAN_TAG_FST); } - /* no longer use mta, disable it */ - if (vport->accept_mta_mc && update_filter && !used) { - ret = hclge_cfg_func_mta_filter(hdev, - vport->vport_id, - false); - if (ret) + if (fs->flow_type & FLOW_MAC_EXT) { + if (hdev->fd_cfg.fd_mode != + HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) { dev_err(&hdev->pdev->dev, - "disable func mta filter fail ret=%d\n", - ret); + "FLOW_MAC_EXT does not support in current FD mode!\n"); + return -EOPNOTSUPP; + } + + if (is_zero_ether_addr(fs->h_ext.h_dest)) + *unused_tuple |= BIT(INNER_DST_MAC); else - vport->accept_mta_mc = false; + *unused_tuple &= ~BIT(INNER_DST_MAC); } - return ret; + return 0; } -static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, - struct hclge_mac_vlan_tbl_entry_cmd *req) +static int hclge_fd_check_spec(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + u32 *unused_tuple) { - struct hclge_dev *hdev = vport->back; - struct hclge_desc desc; - u8 resp_code; - u16 retval; + u32 flow_type; int ret; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false); + if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { + dev_err(&hdev->pdev->dev, "failed to config fd rules, invalid rule location: %u, max is %u\n.", + fs->location, + hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1); + return -EINVAL; + } - memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); + if ((fs->flow_type & FLOW_EXT) && + (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) { + dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n"); + return -EOPNOTSUPP; + } + + flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); + switch (flow_type) { + case SCTP_V4_FLOW: + case TCP_V4_FLOW: + case UDP_V4_FLOW: + ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec, + unused_tuple); + break; + case IP_USER_FLOW: + ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec, + unused_tuple); + break; + case SCTP_V6_FLOW: + case TCP_V6_FLOW: + case UDP_V6_FLOW: + ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec, + unused_tuple); + break; + case IPV6_USER_FLOW: + ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec, + unused_tuple); + break; + case ETHER_FLOW: + if (hdev->fd_cfg.fd_mode != + HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) { + dev_err(&hdev->pdev->dev, + "ETHER_FLOW does not support in current FD mode!\n"); + return -EOPNOTSUPP; + } + + ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec, + unused_tuple); + break; + default: + dev_err(&hdev->pdev->dev, "unsupported protocol type, protocol type = 0x%x\n", + flow_type); + return -EOPNOTSUPP; + } - ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { - dev_err(&hdev->pdev->dev, - "del mac addr failed for cmd_send, ret =%d.\n", + dev_err(&hdev->pdev->dev, "failed to check flow union tuple, ret = %d\n", ret); return ret; } - resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; - retval = le16_to_cpu(desc.retval); - return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, - HCLGE_MAC_VLAN_REMOVE); + return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple); } -static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport, - struct hclge_mac_vlan_tbl_entry_cmd *req, - struct hclge_desc *desc, - bool is_mc) +static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location) { - struct hclge_dev *hdev = vport->back; - u8 resp_code; - u16 retval; - int ret; + struct hclge_fd_rule *rule = NULL; + struct hlist_node *node2; - hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true); - if (is_mc) { - desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); - memcpy(desc[0].data, - req, - sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); - hclge_cmd_setup_basic_desc(&desc[1], - HCLGE_OPC_MAC_VLAN_ADD, - true); - desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); - hclge_cmd_setup_basic_desc(&desc[2], - HCLGE_OPC_MAC_VLAN_ADD, - true); - ret = hclge_cmd_send(&hdev->hw, desc, 3); - } else { - memcpy(desc[0].data, - req, - sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); - ret = hclge_cmd_send(&hdev->hw, desc, 1); - } - if (ret) { - dev_err(&hdev->pdev->dev, - "lookup mac addr failed for cmd_send, ret =%d.\n", - ret); - return ret; + spin_lock_bh(&hdev->fd_rule_lock); + hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { + if (rule->location >= location) + break; } - resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff; - retval = le16_to_cpu(desc[0].retval); - return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, - HCLGE_MAC_VLAN_LKUP); + spin_unlock_bh(&hdev->fd_rule_lock); + + return rule && rule->location == location; } -static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, - struct hclge_mac_vlan_tbl_entry_cmd *req, - struct hclge_desc *mc_desc) +/* make sure being called after lock up with fd_rule_lock */ +static int hclge_fd_update_rule_list(struct hclge_dev *hdev, + struct hclge_fd_rule *new_rule, + u16 location, + bool is_add) { - struct hclge_dev *hdev = vport->back; - int cfg_status; - u8 resp_code; - u16 retval; - int ret; + struct hclge_fd_rule *rule = NULL, *parent = NULL; + struct hlist_node *node2; - if (!mc_desc) { - struct hclge_desc desc; + if (is_add && !new_rule) + return -EINVAL; - hclge_cmd_setup_basic_desc(&desc, - HCLGE_OPC_MAC_VLAN_ADD, - false); - memcpy(desc.data, req, - sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; - retval = le16_to_cpu(desc.retval); + hlist_for_each_entry_safe(rule, node2, + &hdev->fd_rule_list, rule_node) { + if (rule->location >= location) + break; + parent = rule; + } - cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, - resp_code, - HCLGE_MAC_VLAN_ADD); - } else { - hclge_cmd_reuse_desc(&mc_desc[0], false); - mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); - hclge_cmd_reuse_desc(&mc_desc[1], false); - mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); - hclge_cmd_reuse_desc(&mc_desc[2], false); - mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT); - memcpy(mc_desc[0].data, req, - sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); - ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); - resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff; - retval = le16_to_cpu(mc_desc[0].retval); + if (rule && rule->location == location) { + hlist_del(&rule->rule_node); + kfree(rule); + hdev->hclge_fd_rule_num--; - cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, - resp_code, - HCLGE_MAC_VLAN_ADD); - } + if (!is_add) { + if (!hdev->hclge_fd_rule_num) + hdev->fd_active_type = HCLGE_FD_RULE_NONE; + clear_bit(location, hdev->fd_bmap); - if (ret) { + return 0; + } + } else if (!is_add) { dev_err(&hdev->pdev->dev, - "add mac addr failed for cmd_send, ret =%d.\n", - ret); - return ret; + "delete fail, rule %u is inexistent\n", + location); + return -EINVAL; } - return cfg_status; -} + INIT_HLIST_NODE(&new_rule->rule_node); -static int hclge_add_uc_addr(struct hnae3_handle *handle, - const unsigned char *addr) -{ - struct hclge_vport *vport = hclge_get_vport(handle); + if (parent) + hlist_add_behind(&new_rule->rule_node, &parent->rule_node); + else + hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list); + + set_bit(location, hdev->fd_bmap); + hdev->hclge_fd_rule_num++; + hdev->fd_active_type = new_rule->rule_type; - return hclge_add_uc_addr_common(vport, addr); + return 0; } -int hclge_add_uc_addr_common(struct hclge_vport *vport, - const unsigned char *addr) +static int hclge_fd_get_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule) { - struct hclge_dev *hdev = vport->back; - struct hclge_mac_vlan_tbl_entry_cmd req; - struct hclge_desc desc; - u16 egress_port = 0; - int ret; + u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); - /* mac addr check */ - if (is_zero_ether_addr(addr) || - is_broadcast_ether_addr(addr) || - is_multicast_ether_addr(addr)) { - dev_err(&hdev->pdev->dev, - "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n", - addr, - is_zero_ether_addr(addr), - is_broadcast_ether_addr(addr), - is_multicast_ether_addr(addr)); - return -EINVAL; - } + switch (flow_type) { + case SCTP_V4_FLOW: + case TCP_V4_FLOW: + case UDP_V4_FLOW: + rule->tuples.src_ip[IPV4_INDEX] = + be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src); + rule->tuples_mask.src_ip[IPV4_INDEX] = + be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src); - memset(&req, 0, sizeof(req)); - hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + rule->tuples.dst_ip[IPV4_INDEX] = + be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst); + rule->tuples_mask.dst_ip[IPV4_INDEX] = + be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst); - hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, - HCLGE_MAC_EPORT_VFID_S, vport->vport_id); + rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc); + rule->tuples_mask.src_port = + be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc); - req.egress_port = cpu_to_le16(egress_port); + rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst); + rule->tuples_mask.dst_port = + be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst); - hclge_prepare_mac_addr(&req, addr); + rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos; + rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos; - /* Lookup the mac address in the mac_vlan table, and add - * it if the entry is inexistent. Repeated unicast entry - * is not allowed in the mac vlan table. - */ - ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false); - if (ret == -ENOENT) - return hclge_add_mac_vlan_tbl(vport, &req, NULL); + rule->tuples.ether_proto = ETH_P_IP; + rule->tuples_mask.ether_proto = 0xFFFF; - /* check if we just hit the duplicate */ - if (!ret) - ret = -EINVAL; + break; + case IP_USER_FLOW: + rule->tuples.src_ip[IPV4_INDEX] = + be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src); + rule->tuples_mask.src_ip[IPV4_INDEX] = + be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src); - dev_err(&hdev->pdev->dev, - "PF failed to add unicast entry(%pM) in the MAC table\n", - addr); + rule->tuples.dst_ip[IPV4_INDEX] = + be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst); + rule->tuples_mask.dst_ip[IPV4_INDEX] = + be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst); - return ret; -} + rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos; + rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos; -static int hclge_rm_uc_addr(struct hnae3_handle *handle, - const unsigned char *addr) -{ - struct hclge_vport *vport = hclge_get_vport(handle); + rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto; + rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto; - return hclge_rm_uc_addr_common(vport, addr); -} + rule->tuples.ether_proto = ETH_P_IP; + rule->tuples_mask.ether_proto = 0xFFFF; -int hclge_rm_uc_addr_common(struct hclge_vport *vport, - const unsigned char *addr) -{ - struct hclge_dev *hdev = vport->back; - struct hclge_mac_vlan_tbl_entry_cmd req; - int ret; + break; + case SCTP_V6_FLOW: + case TCP_V6_FLOW: + case UDP_V6_FLOW: + be32_to_cpu_array(rule->tuples.src_ip, + fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE); + be32_to_cpu_array(rule->tuples_mask.src_ip, + fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE); - /* mac addr check */ - if (is_zero_ether_addr(addr) || - is_broadcast_ether_addr(addr) || - is_multicast_ether_addr(addr)) { - dev_dbg(&hdev->pdev->dev, - "Remove mac err! invalid mac:%pM.\n", - addr); - return -EINVAL; + be32_to_cpu_array(rule->tuples.dst_ip, + fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE); + be32_to_cpu_array(rule->tuples_mask.dst_ip, + fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE); + + rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc); + rule->tuples_mask.src_port = + be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc); + + rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst); + rule->tuples_mask.dst_port = + be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst); + + rule->tuples.ether_proto = ETH_P_IPV6; + rule->tuples_mask.ether_proto = 0xFFFF; + + break; + case IPV6_USER_FLOW: + be32_to_cpu_array(rule->tuples.src_ip, + fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE); + be32_to_cpu_array(rule->tuples_mask.src_ip, + fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE); + + be32_to_cpu_array(rule->tuples.dst_ip, + fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE); + be32_to_cpu_array(rule->tuples_mask.dst_ip, + fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE); + + rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto; + rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto; + + rule->tuples.ether_proto = ETH_P_IPV6; + rule->tuples_mask.ether_proto = 0xFFFF; + + break; + case ETHER_FLOW: + ether_addr_copy(rule->tuples.src_mac, + fs->h_u.ether_spec.h_source); + ether_addr_copy(rule->tuples_mask.src_mac, + fs->m_u.ether_spec.h_source); + + ether_addr_copy(rule->tuples.dst_mac, + fs->h_u.ether_spec.h_dest); + ether_addr_copy(rule->tuples_mask.dst_mac, + fs->m_u.ether_spec.h_dest); + + rule->tuples.ether_proto = + be16_to_cpu(fs->h_u.ether_spec.h_proto); + rule->tuples_mask.ether_proto = + be16_to_cpu(fs->m_u.ether_spec.h_proto); + + break; + default: + return -EOPNOTSUPP; } - memset(&req, 0, sizeof(req)); - hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); - hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); - hclge_prepare_mac_addr(&req, addr); - ret = hclge_remove_mac_vlan_tbl(vport, &req); + switch (flow_type) { + case SCTP_V4_FLOW: + case SCTP_V6_FLOW: + rule->tuples.ip_proto = IPPROTO_SCTP; + rule->tuples_mask.ip_proto = 0xFF; + break; + case TCP_V4_FLOW: + case TCP_V6_FLOW: + rule->tuples.ip_proto = IPPROTO_TCP; + rule->tuples_mask.ip_proto = 0xFF; + break; + case UDP_V4_FLOW: + case UDP_V6_FLOW: + rule->tuples.ip_proto = IPPROTO_UDP; + rule->tuples_mask.ip_proto = 0xFF; + break; + default: + break; + } - return ret; -} + if (fs->flow_type & FLOW_EXT) { + rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci); + rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci); + } -static int hclge_add_mc_addr(struct hnae3_handle *handle, - const unsigned char *addr) -{ - struct hclge_vport *vport = hclge_get_vport(handle); + if (fs->flow_type & FLOW_MAC_EXT) { + ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest); + ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest); + } - return hclge_add_mc_addr_common(vport, addr); + return 0; } -int hclge_add_mc_addr_common(struct hclge_vport *vport, - const unsigned char *addr) +/* make sure being called after lock up with fd_rule_lock */ +static int hclge_fd_config_rule(struct hclge_dev *hdev, + struct hclge_fd_rule *rule) { - struct hclge_dev *hdev = vport->back; - struct hclge_mac_vlan_tbl_entry_cmd req; - struct hclge_desc desc[3]; - u16 tbl_idx; - int status; + int ret; - /* mac addr check */ - if (!is_multicast_ether_addr(addr)) { + if (!rule) { dev_err(&hdev->pdev->dev, - "Add mc mac err! invalid mac:%pM.\n", - addr); + "The flow director rule is NULL\n"); return -EINVAL; } - memset(&req, 0, sizeof(req)); - hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); - hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); - hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); - hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); - hclge_prepare_mac_addr(&req, addr); - status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); - if (!status) { - /* This mac addr exist, update VFID for it */ - hclge_update_desc_vfid(desc, vport->vport_id, false); - status = hclge_add_mac_vlan_tbl(vport, &req, desc); - } else { - /* This mac addr do not exist, add new entry for it */ - memset(desc[0].data, 0, sizeof(desc[0].data)); - memset(desc[1].data, 0, sizeof(desc[0].data)); - memset(desc[2].data, 0, sizeof(desc[0].data)); - hclge_update_desc_vfid(desc, vport->vport_id, false); - status = hclge_add_mac_vlan_tbl(vport, &req, desc); - } - /* If mc mac vlan table is full, use MTA table */ - if (status == -ENOSPC) { - if (!vport->accept_mta_mc) { - status = hclge_cfg_func_mta_filter(hdev, - vport->vport_id, - true); - if (status) { - dev_err(&hdev->pdev->dev, - "set mta filter mode fail ret=%d\n", - status); - return status; - } - vport->accept_mta_mc = true; - } + /* it will never fail here, so needn't to check return value */ + hclge_fd_update_rule_list(hdev, rule, rule->location, true); - /* Set MTA table for this MAC address */ - tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr); - status = hclge_set_mta_table_item(vport, tbl_idx, true); - } + ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); + if (ret) + goto clear_rule; - return status; -} + ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); + if (ret) + goto clear_rule; -static int hclge_rm_mc_addr(struct hnae3_handle *handle, - const unsigned char *addr) -{ - struct hclge_vport *vport = hclge_get_vport(handle); + return 0; - return hclge_rm_mc_addr_common(vport, addr); +clear_rule: + hclge_fd_update_rule_list(hdev, rule, rule->location, false); + return ret; } -int hclge_rm_mc_addr_common(struct hclge_vport *vport, - const unsigned char *addr) +static int hclge_add_fd_entry(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd) { + struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - struct hclge_mac_vlan_tbl_entry_cmd req; - enum hclge_cmd_status status; - struct hclge_desc desc[3]; + u16 dst_vport_id = 0, q_index = 0; + struct ethtool_rx_flow_spec *fs; + struct hclge_fd_rule *rule; + u32 unused = 0; + u8 action; + int ret; - /* mac addr check */ - if (!is_multicast_ether_addr(addr)) { - dev_dbg(&hdev->pdev->dev, - "Remove mc mac err! invalid mac:%pM.\n", - addr); - return -EINVAL; + if (!hnae3_dev_fd_supported(hdev)) { + dev_err(&hdev->pdev->dev, "Flow Table Director is unsupported\n"); + return -EOPNOTSUPP; } - memset(&req, 0, sizeof(req)); - hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); - hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); - hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); - hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); - hclge_prepare_mac_addr(&req, addr); - status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); - if (!status) { - /* This mac addr exist, remove this handle's VFID for it */ - hclge_update_desc_vfid(desc, vport->vport_id, true); + if (!hdev->fd_en) { + dev_err(&hdev->pdev->dev, + "Please enable flow director first\n"); + return -EOPNOTSUPP; + } - if (hclge_is_all_function_id_zero(desc)) - /* All the vfid is zero, so need to delete this entry */ - status = hclge_remove_mac_vlan_tbl(vport, &req); - else - /* Not all the vfid is zero, update the vfid */ - status = hclge_add_mac_vlan_tbl(vport, &req, desc); + fs = (struct ethtool_rx_flow_spec *)&cmd->fs; + + ret = hclge_fd_check_spec(hdev, fs, &unused); + if (ret) + return ret; + if (fs->ring_cookie == RX_CLS_FLOW_DISC) { + action = HCLGE_FD_ACTION_DROP_PACKET; } else { - /* Maybe this mac address is in mta table, but it cannot be - * deleted here because an entry of mta represents an address - * range rather than a specific address. the delete action to - * all entries will take effect in update_mta_status called by - * hns3_nic_set_rx_mode. - */ - status = 0; - } + u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); + u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); + u16 tqps; - return status; -} + /* To keep consistent with user's configuration, minus 1 when + * printing 'vf', because vf id from ethtool is added 1 for vf. + */ + if (vf > hdev->num_req_vfs) { + dev_err(&hdev->pdev->dev, + "Error: vf id (%u) should be less than %u\n", + vf - 1, hdev->num_req_vfs); + return -EINVAL; + } -static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev, - u16 cmdq_resp, u8 resp_code) -{ -#define HCLGE_ETHERTYPE_SUCCESS_ADD 0 -#define HCLGE_ETHERTYPE_ALREADY_ADD 1 -#define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2 -#define HCLGE_ETHERTYPE_KEY_CONFLICT 3 + dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id; + tqps = hdev->vport[vf].nic.kinfo.num_tqps; - int return_status; + if (ring >= tqps) { + dev_err(&hdev->pdev->dev, + "Error: queue id (%u) > max tqp num (%u)\n", + ring, tqps - 1); + return -EINVAL; + } - if (cmdq_resp) { - dev_err(&hdev->pdev->dev, - "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n", - cmdq_resp); - return -EIO; + action = HCLGE_FD_ACTION_ACCEPT_PACKET; + q_index = ring; } - switch (resp_code) { - case HCLGE_ETHERTYPE_SUCCESS_ADD: - case HCLGE_ETHERTYPE_ALREADY_ADD: - return_status = 0; - break; - case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW: - dev_err(&hdev->pdev->dev, - "add mac ethertype failed for manager table overflow.\n"); - return_status = -EIO; - break; - case HCLGE_ETHERTYPE_KEY_CONFLICT: - dev_err(&hdev->pdev->dev, - "add mac ethertype failed for key conflict.\n"); - return_status = -EIO; - break; - default: - dev_err(&hdev->pdev->dev, - "add mac ethertype failed for undefined, code=%d.\n", - resp_code); - return_status = -EIO; + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + ret = hclge_fd_get_tuple(hdev, fs, rule); + if (ret) { + kfree(rule); + return ret; } - return return_status; + rule->flow_type = fs->flow_type; + rule->location = fs->location; + rule->unused_tuple = unused; + rule->vf_id = dst_vport_id; + rule->queue_id = q_index; + rule->action = action; + rule->rule_type = HCLGE_FD_EP_ACTIVE; + + /* to avoid rule conflict, when user configure rule by ethtool, + * we need to clear all arfs rules + */ + spin_lock_bh(&hdev->fd_rule_lock); + hclge_clear_arfs_rules(handle); + + ret = hclge_fd_config_rule(hdev, rule); + + spin_unlock_bh(&hdev->fd_rule_lock); + + return ret; } -static int hclge_add_mgr_tbl(struct hclge_dev *hdev, - const struct hclge_mac_mgr_tbl_entry_cmd *req) +static int hclge_del_fd_entry(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd) { - struct hclge_desc desc; - u8 resp_code; - u16 retval; + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct ethtool_rx_flow_spec *fs; int ret; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false); - memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd)); + if (!hnae3_dev_fd_supported(hdev)) + return -EOPNOTSUPP; - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { + fs = (struct ethtool_rx_flow_spec *)&cmd->fs; + + if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) + return -EINVAL; + + if (!hclge_fd_rule_exist(hdev, fs->location)) { dev_err(&hdev->pdev->dev, - "add mac ethertype failed for cmd_send, ret =%d.\n", - ret); - return ret; + "Delete fail, rule %u is inexistent\n", fs->location); + return -ENOENT; } - resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; - retval = le16_to_cpu(desc.retval); - - return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code); -} + ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location, + NULL, false); + if (ret) + return ret; -static int init_mgr_tbl(struct hclge_dev *hdev) -{ - int ret; - int i; + spin_lock_bh(&hdev->fd_rule_lock); + ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false); - for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) { - ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]); - if (ret) { - dev_err(&hdev->pdev->dev, - "add mac ethertype failed, ret =%d.\n", - ret); - return ret; - } - } + spin_unlock_bh(&hdev->fd_rule_lock); - return 0; + return ret; } -static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p) +/* make sure being called after lock up with fd_rule_lock */ +static void hclge_del_all_fd_entries(struct hnae3_handle *handle, + bool clear_list) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; + struct hclge_fd_rule *rule; + struct hlist_node *node; + u16 location; - ether_addr_copy(p, hdev->hw.mac.mac_addr); + if (!hnae3_dev_fd_supported(hdev)) + return; + + for_each_set_bit(location, hdev->fd_bmap, + hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) + hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location, + NULL, false); + + if (clear_list) { + hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, + rule_node) { + hlist_del(&rule->rule_node); + kfree(rule); + } + hdev->fd_active_type = HCLGE_FD_RULE_NONE; + hdev->hclge_fd_rule_num = 0; + bitmap_zero(hdev->fd_bmap, + hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); + } } -static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, - bool is_first) +static void hclge_restore_fd_entries(struct hnae3_handle *handle) { - const unsigned char *new_addr = (const unsigned char *)p; struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; + struct hclge_fd_rule *rule; + struct hlist_node *node; int ret; - /* mac addr check */ - if (is_zero_ether_addr(new_addr) || - is_broadcast_ether_addr(new_addr) || - is_multicast_ether_addr(new_addr)) { - dev_err(&hdev->pdev->dev, - "Change uc mac err! invalid mac:%p.\n", - new_addr); - return -EINVAL; - } - - if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr)) - dev_warn(&hdev->pdev->dev, - "remove old uc mac address fail.\n"); - - ret = hclge_add_uc_addr(handle, new_addr); - if (ret) { - dev_err(&hdev->pdev->dev, - "add uc mac address fail, ret =%d.\n", - ret); + /* Return ok here, because reset error handling will check this + * return value. If error is returned here, the reset process will + * fail. + */ + if (!hnae3_dev_fd_supported(hdev)) + return; - if (!is_first && - hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr)) - dev_err(&hdev->pdev->dev, - "restore uc mac address fail.\n"); + /* if fd is disabled, should not restore it when reset */ + if (!hdev->fd_en) + return; - return -EIO; - } + spin_lock_bh(&hdev->fd_rule_lock); + hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { + ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); + if (!ret) + ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); - ret = hclge_pause_addr_cfg(hdev, new_addr); - if (ret) { - dev_err(&hdev->pdev->dev, - "configure mac pause address fail, ret =%d.\n", - ret); - return -EIO; + if (ret) { + dev_warn(&hdev->pdev->dev, + "Restore rule %u failed, remove it\n", + rule->location); + clear_bit(rule->location, hdev->fd_bmap); + hlist_del(&rule->rule_node); + kfree(rule); + hdev->hclge_fd_rule_num--; + } } - ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); + if (hdev->hclge_fd_rule_num) + hdev->fd_active_type = HCLGE_FD_EP_ACTIVE; - return 0; + spin_unlock_bh(&hdev->fd_rule_lock); } -static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, - bool filter_en) +static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd) { - struct hclge_vlan_filter_ctrl_cmd *req; - struct hclge_desc desc; - int ret; - - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false); + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; - req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; - req->vlan_type = vlan_type; - req->vlan_fe = filter_en; + if (!hnae3_dev_fd_supported(hdev)) + return -EOPNOTSUPP; - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) - dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n", - ret); + cmd->rule_cnt = hdev->hclge_fd_rule_num; + cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; - return ret; + return 0; } -#define HCLGE_FILTER_TYPE_VF 0 -#define HCLGE_FILTER_TYPE_PORT 1 - -static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) +static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule, + struct ethtool_tcpip4_spec *spec, + struct ethtool_tcpip4_spec *spec_mask) { - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; + spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]); + spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ? + 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]); + + spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[3]); + spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ? + 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]); + + spec->psrc = cpu_to_be16(rule->tuples.src_port); + spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ? + 0 : cpu_to_be16(rule->tuples_mask.src_port); + + spec->pdst = cpu_to_be16(rule->tuples.dst_port); + spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ? + 0 : cpu_to_be16(rule->tuples_mask.dst_port); - hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable); + spec->tos = rule->tuples.ip_tos; + spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ? + 0 : rule->tuples_mask.ip_tos; } -static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, - bool is_kill, u16 vlan, u8 qos, - __be16 proto) +static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule, + struct ethtool_usrip4_spec *spec, + struct ethtool_usrip4_spec *spec_mask) { -#define HCLGE_MAX_VF_BYTES 16 - struct hclge_vlan_filter_vf_cfg_cmd *req0; - struct hclge_vlan_filter_vf_cfg_cmd *req1; - struct hclge_desc desc[2]; - u8 vf_byte_val; - u8 vf_byte_off; - int ret; - - hclge_cmd_setup_basic_desc(&desc[0], - HCLGE_OPC_VLAN_FILTER_VF_CFG, false); - hclge_cmd_setup_basic_desc(&desc[1], - HCLGE_OPC_VLAN_FILTER_VF_CFG, false); + spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]); + spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ? + 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]); - desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]); + spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ? + 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]); - vf_byte_off = vfid / 8; - vf_byte_val = 1 << (vfid % 8); + spec->tos = rule->tuples.ip_tos; + spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ? + 0 : rule->tuples_mask.ip_tos; - req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data; - req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data; + spec->proto = rule->tuples.ip_proto; + spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ? + 0 : rule->tuples_mask.ip_proto; - req0->vlan_id = cpu_to_le16(vlan); - req0->vlan_cfg = is_kill; + spec->ip_ver = ETH_RX_NFC_IP4; +} - if (vf_byte_off < HCLGE_MAX_VF_BYTES) - req0->vf_bitmap[vf_byte_off] = vf_byte_val; +static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule, + struct ethtool_tcpip6_spec *spec, + struct ethtool_tcpip6_spec *spec_mask) +{ + cpu_to_be32_array(spec->ip6src, + rule->tuples.src_ip, IPV6_SIZE); + cpu_to_be32_array(spec->ip6dst, + rule->tuples.dst_ip, IPV6_SIZE); + if (rule->unused_tuple & BIT(INNER_SRC_IP)) + memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src)); else - req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val; + cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip, + IPV6_SIZE); - ret = hclge_cmd_send(&hdev->hw, desc, 2); - if (ret) { - dev_err(&hdev->pdev->dev, - "Send vf vlan command fail, ret =%d.\n", - ret); - return ret; - } + if (rule->unused_tuple & BIT(INNER_DST_IP)) + memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst)); + else + cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip, + IPV6_SIZE); - if (!is_kill) { -#define HCLGE_VF_VLAN_NO_ENTRY 2 - if (!req0->resp_code || req0->resp_code == 1) - return 0; + spec->psrc = cpu_to_be16(rule->tuples.src_port); + spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ? + 0 : cpu_to_be16(rule->tuples_mask.src_port); - if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) { - dev_warn(&hdev->pdev->dev, - "vf vlan table is full, vf vlan filter is disabled\n"); - return 0; - } + spec->pdst = cpu_to_be16(rule->tuples.dst_port); + spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ? + 0 : cpu_to_be16(rule->tuples_mask.dst_port); +} - dev_err(&hdev->pdev->dev, - "Add vf vlan filter fail, ret =%d.\n", - req0->resp_code); - } else { - if (!req0->resp_code) - return 0; +static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule, + struct ethtool_usrip6_spec *spec, + struct ethtool_usrip6_spec *spec_mask) +{ + cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE); + cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE); + if (rule->unused_tuple & BIT(INNER_SRC_IP)) + memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src)); + else + cpu_to_be32_array(spec_mask->ip6src, + rule->tuples_mask.src_ip, IPV6_SIZE); - dev_err(&hdev->pdev->dev, - "Kill vf vlan filter fail, ret =%d.\n", - req0->resp_code); - } + if (rule->unused_tuple & BIT(INNER_DST_IP)) + memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst)); + else + cpu_to_be32_array(spec_mask->ip6dst, + rule->tuples_mask.dst_ip, IPV6_SIZE); - return -EIO; + spec->l4_proto = rule->tuples.ip_proto; + spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ? + 0 : rule->tuples_mask.ip_proto; } -static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto, - u16 vlan_id, bool is_kill) +static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule, + struct ethhdr *spec, + struct ethhdr *spec_mask) { - struct hclge_vlan_filter_pf_cfg_cmd *req; - struct hclge_desc desc; - u8 vlan_offset_byte_val; - u8 vlan_offset_byte; - u8 vlan_offset_160; - int ret; + ether_addr_copy(spec->h_source, rule->tuples.src_mac); + ether_addr_copy(spec->h_dest, rule->tuples.dst_mac); - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false); + if (rule->unused_tuple & BIT(INNER_SRC_MAC)) + eth_zero_addr(spec_mask->h_source); + else + ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac); + + if (rule->unused_tuple & BIT(INNER_DST_MAC)) + eth_zero_addr(spec_mask->h_dest); + else + ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac); - vlan_offset_160 = vlan_id / 160; - vlan_offset_byte = (vlan_id % 160) / 8; - vlan_offset_byte_val = 1 << (vlan_id % 8); + spec->h_proto = cpu_to_be16(rule->tuples.ether_proto); + spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ? + 0 : cpu_to_be16(rule->tuples_mask.ether_proto); +} - req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data; - req->vlan_offset = vlan_offset_160; - req->vlan_cfg = is_kill; - req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; +static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule) +{ + if (fs->flow_type & FLOW_EXT) { + fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1); + fs->m_ext.vlan_tci = + rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ? + 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1); + } - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) - dev_err(&hdev->pdev->dev, - "port vlan command, send fail, ret =%d.\n", ret); - return ret; + if (fs->flow_type & FLOW_MAC_EXT) { + ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac); + if (rule->unused_tuple & BIT(INNER_DST_MAC)) + eth_zero_addr(fs->m_u.ether_spec.h_dest); + else + ether_addr_copy(fs->m_u.ether_spec.h_dest, + rule->tuples_mask.dst_mac); + } } -static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, - u16 vport_id, u16 vlan_id, u8 qos, - bool is_kill) +static int hclge_get_fd_rule_info(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd) { - u16 vport_idx, vport_num = 0; - int ret; + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_fd_rule *rule = NULL; + struct hclge_dev *hdev = vport->back; + struct ethtool_rx_flow_spec *fs; + struct hlist_node *node2; - ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id, - 0, proto); - if (ret) { - dev_err(&hdev->pdev->dev, - "Set %d vport vlan filter config fail, ret =%d.\n", - vport_id, ret); - return ret; + if (!hnae3_dev_fd_supported(hdev)) + return -EOPNOTSUPP; + + fs = (struct ethtool_rx_flow_spec *)&cmd->fs; + + spin_lock_bh(&hdev->fd_rule_lock); + + hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { + if (rule->location >= fs->location) + break; } - /* vlan 0 may be added twice when 8021q module is enabled */ - if (!is_kill && !vlan_id && - test_bit(vport_id, hdev->vlan_table[vlan_id])) - return 0; + if (!rule || fs->location != rule->location) { + spin_unlock_bh(&hdev->fd_rule_lock); - if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) { - dev_err(&hdev->pdev->dev, - "Add port vlan failed, vport %d is already in vlan %d\n", - vport_id, vlan_id); - return -EINVAL; + return -ENOENT; } - if (is_kill && - !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) { - dev_err(&hdev->pdev->dev, - "Delete port vlan failed, vport %d is not in vlan %d\n", - vport_id, vlan_id); - return -EINVAL; + fs->flow_type = rule->flow_type; + switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case SCTP_V4_FLOW: + case TCP_V4_FLOW: + case UDP_V4_FLOW: + hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec, + &fs->m_u.tcp_ip4_spec); + break; + case IP_USER_FLOW: + hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec, + &fs->m_u.usr_ip4_spec); + break; + case SCTP_V6_FLOW: + case TCP_V6_FLOW: + case UDP_V6_FLOW: + hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec, + &fs->m_u.tcp_ip6_spec); + break; + case IPV6_USER_FLOW: + hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec, + &fs->m_u.usr_ip6_spec); + break; + /* The flow type of fd rule has been checked before adding in to rule + * list. As other flow types have been handled, it must be ETHER_FLOW + * for the default case + */ + default: + hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec, + &fs->m_u.ether_spec); + break; } - for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], VLAN_N_VID) - vport_num++; + hclge_fd_get_ext_info(fs, rule); - if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1)) - ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id, - is_kill); + if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { + fs->ring_cookie = RX_CLS_FLOW_DISC; + } else { + u64 vf_id; - return ret; -} + fs->ring_cookie = rule->queue_id; + vf_id = rule->vf_id; + vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; + fs->ring_cookie |= vf_id; + } -int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, - u16 vlan_id, bool is_kill) -{ - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; + spin_unlock_bh(&hdev->fd_rule_lock); - return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id, - 0, is_kill); + return 0; } -static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, - u16 vlan, u8 qos, __be16 proto) +static int hclge_get_all_rules(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd, u32 *rule_locs) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; + struct hclge_fd_rule *rule; + struct hlist_node *node2; + int cnt = 0; - if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7)) - return -EINVAL; - if (proto != htons(ETH_P_8021Q)) - return -EPROTONOSUPPORT; - - return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false); -} + if (!hnae3_dev_fd_supported(hdev)) + return -EOPNOTSUPP; -static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) -{ - struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg; - struct hclge_vport_vtag_tx_cfg_cmd *req; - struct hclge_dev *hdev = vport->back; - struct hclge_desc desc; - int status; + cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false); + spin_lock_bh(&hdev->fd_rule_lock); + hlist_for_each_entry_safe(rule, node2, + &hdev->fd_rule_list, rule_node) { + if (cnt == cmd->rule_cnt) { + spin_unlock_bh(&hdev->fd_rule_lock); + return -EMSGSIZE; + } - req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; - req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); - req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); - hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B, - vcfg->accept_tag1 ? 1 : 0); - hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B, - vcfg->accept_untag1 ? 1 : 0); - hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B, - vcfg->accept_tag2 ? 1 : 0); - hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B, - vcfg->accept_untag2 ? 1 : 0); - hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, - vcfg->insert_tag1_en ? 1 : 0); - hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, - vcfg->insert_tag2_en ? 1 : 0); - hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); + rule_locs[cnt] = rule->location; + cnt++; + } - req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; - req->vf_bitmap[req->vf_offset] = - 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); + spin_unlock_bh(&hdev->fd_rule_lock); - status = hclge_cmd_send(&hdev->hw, &desc, 1); - if (status) - dev_err(&hdev->pdev->dev, - "Send port txvlan cfg command fail, ret =%d\n", - status); + cmd->rule_cnt = cnt; - return status; + return 0; } -static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) +static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys, + struct hclge_fd_rule_tuples *tuples) { - struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg; - struct hclge_vport_vtag_rx_cfg_cmd *req; - struct hclge_dev *hdev = vport->back; - struct hclge_desc desc; - int status; +#define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32 +#define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32 - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false); + tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto); + tuples->ip_proto = fkeys->basic.ip_proto; + tuples->dst_port = be16_to_cpu(fkeys->ports.dst); - req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data; - hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, - vcfg->strip_tag1_en ? 1 : 0); - hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, - vcfg->strip_tag2_en ? 1 : 0); - hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, - vcfg->vlan1_vlan_prionly ? 1 : 0); - hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, - vcfg->vlan2_vlan_prionly ? 1 : 0); + if (fkeys->basic.n_proto == htons(ETH_P_IP)) { + tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src); + tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst); + } else { + int i; - req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; - req->vf_bitmap[req->vf_offset] = - 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); + for (i = 0; i < IPV6_SIZE; i++) { + tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]); + tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]); + } + } +} - status = hclge_cmd_send(&hdev->hw, &desc, 1); - if (status) - dev_err(&hdev->pdev->dev, - "Send port rxvlan cfg command fail, ret =%d\n", - status); +/* traverse all rules, check whether an existed rule has the same tuples */ +static struct hclge_fd_rule * +hclge_fd_search_flow_keys(struct hclge_dev *hdev, + const struct hclge_fd_rule_tuples *tuples) +{ + struct hclge_fd_rule *rule = NULL; + struct hlist_node *node; - return status; + hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { + if (!memcmp(tuples, &rule->tuples, sizeof(*tuples))) + return rule; + } + + return NULL; } -static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) +static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples, + struct hclge_fd_rule *rule) +{ + rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | + BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) | + BIT(INNER_SRC_PORT); + rule->action = 0; + rule->vf_id = 0; + rule->rule_type = HCLGE_FD_ARFS_ACTIVE; + if (tuples->ether_proto == ETH_P_IP) { + if (tuples->ip_proto == IPPROTO_TCP) + rule->flow_type = TCP_V4_FLOW; + else + rule->flow_type = UDP_V4_FLOW; + } else { + if (tuples->ip_proto == IPPROTO_TCP) + rule->flow_type = TCP_V6_FLOW; + else + rule->flow_type = UDP_V6_FLOW; + } + memcpy(&rule->tuples, tuples, sizeof(rule->tuples)); + memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask)); +} + +static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, + u16 flow_id, struct flow_keys *fkeys) +{ +#ifdef CONFIG_RFS_ACCEL + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_fd_rule_tuples new_tuples; + struct hclge_dev *hdev = vport->back; + struct hclge_fd_rule *rule; + u16 tmp_queue_id; + u16 bit_id; + int ret; + + if (!hnae3_dev_fd_supported(hdev)) + return -EOPNOTSUPP; + + /* when there is already fd rule existed add by user, + * arfs should not work + */ + spin_lock_bh(&hdev->fd_rule_lock); + if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) { + spin_unlock_bh(&hdev->fd_rule_lock); + return -EOPNOTSUPP; + } + + memset(&new_tuples, 0, sizeof(new_tuples)); + hclge_fd_get_flow_tuples(fkeys, &new_tuples); + + /* check is there flow director filter existed for this flow, + * if not, create a new filter for it; + * if filter exist with different queue id, modify the filter; + * if filter exist with same queue id, do nothing + */ + rule = hclge_fd_search_flow_keys(hdev, &new_tuples); + if (!rule) { + bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM); + if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { + spin_unlock_bh(&hdev->fd_rule_lock); + return -ENOSPC; + } + + rule = kzalloc(sizeof(*rule), GFP_ATOMIC); + if (!rule) { + spin_unlock_bh(&hdev->fd_rule_lock); + return -ENOMEM; + } + + set_bit(bit_id, hdev->fd_bmap); + rule->location = bit_id; + rule->flow_id = flow_id; + rule->queue_id = queue_id; + hclge_fd_build_arfs_rule(&new_tuples, rule); + ret = hclge_fd_config_rule(hdev, rule); + + spin_unlock_bh(&hdev->fd_rule_lock); + + if (ret) + return ret; + + return rule->location; + } + + spin_unlock_bh(&hdev->fd_rule_lock); + + if (rule->queue_id == queue_id) + return rule->location; + + tmp_queue_id = rule->queue_id; + rule->queue_id = queue_id; + ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); + if (ret) { + rule->queue_id = tmp_queue_id; + return ret; + } + + return rule->location; +#endif +} + +static void hclge_rfs_filter_expire(struct hclge_dev *hdev) +{ +#ifdef CONFIG_RFS_ACCEL + struct hnae3_handle *handle = &hdev->vport[0].nic; + struct hclge_fd_rule *rule; + struct hlist_node *node; + HLIST_HEAD(del_list); + + spin_lock_bh(&hdev->fd_rule_lock); + if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) { + spin_unlock_bh(&hdev->fd_rule_lock); + return; + } + hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { + if (rps_may_expire_flow(handle->netdev, rule->queue_id, + rule->flow_id, rule->location)) { + hlist_del_init(&rule->rule_node); + hlist_add_head(&rule->rule_node, &del_list); + hdev->hclge_fd_rule_num--; + clear_bit(rule->location, hdev->fd_bmap); + } + } + spin_unlock_bh(&hdev->fd_rule_lock); + + hlist_for_each_entry_safe(rule, node, &del_list, rule_node) { + hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, + rule->location, NULL, false); + kfree(rule); + } +#endif +} + +/* make sure being called after lock up with fd_rule_lock */ +static void hclge_clear_arfs_rules(struct hnae3_handle *handle) +{ +#ifdef CONFIG_RFS_ACCEL + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE) + hclge_del_all_fd_entries(handle, true); +#endif +} + +static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) || + (hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING) & BIT(0)); +} + +static bool hclge_get_cmdq_stat(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); +} + +static bool hclge_ae_dev_resetting(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); +} + +static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hdev->rst_stats.hw_reset_done_cnt; +} + +static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + bool clear; + + hdev->fd_en = enable; + clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE; + + if (!enable) { + spin_lock_bh(&hdev->fd_rule_lock); + hclge_del_all_fd_entries(handle, clear); + spin_unlock_bh(&hdev->fd_rule_lock); + } else { + hclge_restore_fd_entries(handle); + } +} + +static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) +{ + struct hclge_desc desc; + struct hclge_config_mac_mode_cmd *req = + (struct hclge_config_mac_mode_cmd *)desc.data; + u32 loop_en = 0; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false); + + if (enable) { + hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U); + hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U); + hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U); + hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U); + hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U); + hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U); + hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U); + hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U); + hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U); + hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U); + } + + req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "mac enable fail, ret =%d.\n", ret); +} + +static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid, + u8 switch_param, u8 param_mask) +{ + struct hclge_mac_vlan_switch_cmd *req; + struct hclge_desc desc; + u32 func_id; + int ret; + + func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0); + req = (struct hclge_mac_vlan_switch_cmd *)desc.data; + + /* read current config parameter */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM, + true); + req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL; + req->func_id = cpu_to_le32(func_id); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "read mac vlan switch parameter fail, ret = %d\n", ret); + return ret; + } + + /* modify and write new config parameter */ + hclge_cmd_reuse_desc(&desc, false); + req->switch_param = (req->switch_param & param_mask) | switch_param; + req->param_mask = param_mask; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "set mac vlan switch parameter fail, ret = %d\n", ret); + return ret; +} + +static void hclge_phy_link_status_wait(struct hclge_dev *hdev, + int link_ret) +{ +#define HCLGE_PHY_LINK_STATUS_NUM 200 + + struct phy_device *phydev = hdev->hw.mac.phydev; + int i = 0; + int ret; + + do { + ret = phy_read_status(phydev); + if (ret) { + dev_err(&hdev->pdev->dev, + "phy update link status fail, ret = %d\n", ret); + return; + } + + if (phydev->link == link_ret) + break; + + msleep(HCLGE_LINK_STATUS_MS); + } while (++i < HCLGE_PHY_LINK_STATUS_NUM); +} + +static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret) +{ +#define HCLGE_MAC_LINK_STATUS_NUM 100 + + int link_status; + int i = 0; + int ret; + + do { + ret = hclge_get_mac_link_status(hdev, &link_status); + if (ret) + return ret; + if (link_status == link_ret) + return 0; + + msleep(HCLGE_LINK_STATUS_MS); + } while (++i < HCLGE_MAC_LINK_STATUS_NUM); + return -EBUSY; +} + +static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en, + bool is_phy) +{ + int link_ret; + + link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN; + + if (is_phy) + hclge_phy_link_status_wait(hdev, link_ret); + + return hclge_mac_link_status_wait(hdev, link_ret); +} + +static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en) +{ + struct hclge_config_mac_mode_cmd *req; + struct hclge_desc desc; + u32 loop_en; + int ret; + + req = (struct hclge_config_mac_mode_cmd *)&desc.data[0]; + /* 1 Read out the MAC mode config at first */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "mac loopback get fail, ret =%d.\n", ret); + return ret; + } + + /* 2 Then setup the loopback flag */ + loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); + hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); + + req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); + + /* 3 Config mac work mode with loopback flag + * and its original configure parameters + */ + hclge_cmd_reuse_desc(&desc, false); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "mac loopback set fail, ret =%d.\n", ret); + return ret; +} + +static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en, + enum hnae3_loop loop_mode) +{ +#define HCLGE_SERDES_RETRY_MS 10 +#define HCLGE_SERDES_RETRY_NUM 100 + + struct hclge_serdes_lb_cmd *req; + struct hclge_desc desc; + int ret, i = 0; + u8 loop_mode_b; + + req = (struct hclge_serdes_lb_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false); + + switch (loop_mode) { + case HNAE3_LOOP_SERIAL_SERDES: + loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; + break; + case HNAE3_LOOP_PARALLEL_SERDES: + loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B; + break; + default: + dev_err(&hdev->pdev->dev, + "unsupported serdes loopback mode %d\n", loop_mode); + return -ENOTSUPP; + } + + if (en) { + req->enable = loop_mode_b; + req->mask = loop_mode_b; + } else { + req->mask = loop_mode_b; + } + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "serdes loopback set fail, ret = %d\n", ret); + return ret; + } + + do { + msleep(HCLGE_SERDES_RETRY_MS); + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, + true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "serdes loopback get, ret = %d\n", ret); + return ret; + } + } while (++i < HCLGE_SERDES_RETRY_NUM && + !(req->result & HCLGE_CMD_SERDES_DONE_B)); + + if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) { + dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n"); + return -EBUSY; + } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) { + dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n"); + return -EIO; + } + return ret; +} + +static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en, + enum hnae3_loop loop_mode) +{ + u8 duplex; + int ret; + + duplex = en ? DUPLEX_FULL : hdev->hw.mac.duplex; + ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed, duplex); + if (ret) + return ret; + + ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode); + if (ret) + return ret; + + hclge_cfg_mac_mode(hdev, en); + + ret = hclge_mac_phy_link_status_wait(hdev, en, false); + if (ret) + dev_err(&hdev->pdev->dev, + "serdes loopback config mac mode timeout\n"); + + return ret; +} + +static int hclge_enable_phy_loopback(struct hclge_dev *hdev, + struct phy_device *phydev) +{ + int ret; + + if (!phydev->suspended) { + ret = phy_suspend(phydev); + if (ret) + return ret; + } + + hdev->hw.mac.duplex_last = phydev->duplex; + + ret = phy_set_bits(phydev, MII_BMCR, BMCR_FULLDPLX); + if (ret) + return ret; + + ret = phy_resume(phydev); + if (ret) + return ret; + + return phy_loopback(phydev, true); +} + +static int hclge_disable_phy_loopback(struct hclge_dev *hdev, + struct phy_device *phydev) +{ + int ret; + + ret = phy_loopback(phydev, false); + if (ret) + return ret; + + if (hdev->hw.mac.duplex_last == DUPLEX_HALF) { + ret = phy_clear_bits(phydev, MII_BMCR, BMCR_FULLDPLX); + if (ret) + return ret; + } + + return phy_suspend(phydev); +} + +static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en) +{ + struct phy_device *phydev = hdev->hw.mac.phydev; + u8 duplex; + int ret; + + if (!phydev) + return -ENOTSUPP; + + duplex = en ? DUPLEX_FULL : hdev->hw.mac.duplex; + ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed, duplex); + if (ret) + return ret; + + if (en) + ret = hclge_enable_phy_loopback(hdev, phydev); + else + ret = hclge_disable_phy_loopback(hdev, phydev); + if (ret) { + dev_err(&hdev->pdev->dev, + "set phy loopback fail, ret = %d\n", ret); + return ret; + } + + hclge_cfg_mac_mode(hdev, en); + + ret = hclge_mac_phy_link_status_wait(hdev, en, true); + if (ret) + dev_err(&hdev->pdev->dev, + "phy loopback config mac mode timeout\n"); + + return ret; +} + +static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id, + u16 stream_id, bool enable) +{ + struct hclge_desc desc; + struct hclge_cfg_com_tqp_queue_cmd *req = + (struct hclge_cfg_com_tqp_queue_cmd *)desc.data; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); + req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK); + req->stream_id = cpu_to_le16(stream_id); + if (enable) + req->enable |= 1U << HCLGE_TQP_ENABLE_B; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int ret; + u16 i; + + for (i = 0; i < handle->kinfo.num_tqps; i++) { + ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable); + if (ret) + return ret; + } + return 0; +} + +static int hclge_set_loopback(struct hnae3_handle *handle, + enum hnae3_loop loop_mode, bool en) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int ret; + + /* Loopback can be enabled in three places: SSU, MAC, and serdes. By + * default, SSU loopback is enabled, so if the SMAC and the DMAC are + * the same, the packets are looped back in the SSU. If SSU loopback + * is disabled, packets can reach MAC even if SMAC is the same as DMAC. + */ + if (hdev->pdev->revision >= 0x21) { + u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B); + + ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param, + HCLGE_SWITCH_ALW_LPBK_MASK); + if (ret) + return ret; + } + + switch (loop_mode) { + case HNAE3_LOOP_APP: + ret = hclge_set_app_loopback(hdev, en); + break; + case HNAE3_LOOP_SERIAL_SERDES: + case HNAE3_LOOP_PARALLEL_SERDES: + ret = hclge_set_serdes_loopback(hdev, en, loop_mode); + break; + case HNAE3_LOOP_PHY: + ret = hclge_set_phy_loopback(hdev, en); + break; + default: + ret = -ENOTSUPP; + dev_err(&hdev->pdev->dev, + "loop_mode %d is not supported\n", loop_mode); + break; + } + + if (ret) + return ret; + + ret = hclge_tqp_enable(handle, en); + if (ret) + dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n", + en ? "enable" : "disable", ret); + + return ret; +} + +static int hclge_set_default_loopback(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_set_app_loopback(hdev, false); + if (ret) + return ret; + + ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES); + if (ret) + return ret; + + return hclge_cfg_serdes_loopback(hdev, false, + HNAE3_LOOP_PARALLEL_SERDES); +} + +static void hclge_reset_tqp_stats(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hnae3_knic_private_info *kinfo; + struct hnae3_queue *queue; + struct hclge_tqp *tqp; + int i; + + kinfo = &vport->nic.kinfo; + for (i = 0; i < kinfo->num_tqps; i++) { + queue = handle->kinfo.tqp[i]; + tqp = container_of(queue, struct hclge_tqp, q); + memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); + } +} + +static void hclge_flush_link_update(struct hclge_dev *hdev) +{ +#define HCLGE_FLUSH_LINK_TIMEOUT 100000 + + unsigned long last = hdev->serv_processed_cnt; + int i = 0; + + while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) && + i++ < HCLGE_FLUSH_LINK_TIMEOUT && + last == hdev->serv_processed_cnt) + usleep_range(1, 1); +} + +static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + if (enable) { + hclge_task_schedule(hdev, 0); + } else { + /* Set the DOWN flag here to disable link updating */ + set_bit(HCLGE_STATE_DOWN, &hdev->state); + + /* flush memory to make sure DOWN is seen by service task */ + smp_mb__before_atomic(); + hclge_flush_link_update(hdev); + } +} + +static int hclge_ae_start(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + /* mac enable */ + hclge_cfg_mac_mode(hdev, true); + clear_bit(HCLGE_STATE_DOWN, &hdev->state); + hdev->hw.mac.link = 0; + + /* reset tqp stats */ + hclge_reset_tqp_stats(handle); + + hclge_mac_start_phy(hdev); + + return 0; +} + +static void hclge_ae_stop(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + set_bit(HCLGE_STATE_DOWN, &hdev->state); + spin_lock_bh(&hdev->fd_rule_lock); + hclge_clear_arfs_rules(handle); + spin_unlock_bh(&hdev->fd_rule_lock); + + /* If it is not PF reset or FLR, the firmware will disable the MAC, + * so it only need to stop phy here. + */ + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && + hdev->reset_type != HNAE3_FUNC_RESET && + hdev->reset_type != HNAE3_FLR_RESET) { + hclge_mac_stop_phy(hdev); + hclge_update_link_status(hdev); + return; + } + + hclge_reset_tqp(handle); + + hclge_config_mac_tnl_int(hdev, false); + + /* Mac disable */ + hclge_cfg_mac_mode(hdev, false); + + hclge_mac_stop_phy(hdev); + + /* reset tqp stats */ + hclge_reset_tqp_stats(handle); + + hclge_update_link_status(hdev); +} + +int hclge_vport_start(struct hclge_vport *vport) +{ + struct hclge_dev *hdev = vport->back; + + vport->last_active_jiffies = jiffies; + set_bit(HCLGE_VPORT_STATE_START, &vport->state); + set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); + + if (test_bit(vport->vport_id, hdev->vport_config_block)) { + if (vport->vport_id) { + hclge_restore_mac_table_common(vport); + hclge_restore_vport_vlan_table(vport); + } else { + hclge_restore_hw_table(hdev); + } + } + clear_bit(vport->vport_id, hdev->vport_config_block); + + return 0; +} + +void hclge_vport_stop(struct hclge_vport *vport) +{ + clear_bit(HCLGE_VPORT_STATE_START, &vport->state); + clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); +} + +static int hclge_client_start(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_vport_start(vport); +} + +static void hclge_client_stop(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + hclge_vport_stop(vport); +} + +static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, + u16 cmdq_resp, u8 resp_code, + enum hclge_mac_vlan_tbl_opcode op) +{ + struct hclge_dev *hdev = vport->back; + + if (cmdq_resp) { + dev_err(&hdev->pdev->dev, + "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n", + cmdq_resp); + return -EIO; + } + + if (op == HCLGE_MAC_VLAN_ADD) { + if (!resp_code || resp_code == 1) + return 0; + else if (resp_code == HCLGE_ADD_UC_OVERFLOW || + resp_code == HCLGE_ADD_MC_OVERFLOW) + return -ENOSPC; + + dev_err(&hdev->pdev->dev, + "add mac addr failed for undefined, code=%u.\n", + resp_code); + return -EIO; + } else if (op == HCLGE_MAC_VLAN_REMOVE) { + if (!resp_code) { + return 0; + } else if (resp_code == 1) { + dev_dbg(&hdev->pdev->dev, + "remove mac addr failed for miss.\n"); + return -ENOENT; + } + + dev_err(&hdev->pdev->dev, + "remove mac addr failed for undefined, code=%u.\n", + resp_code); + return -EIO; + } else if (op == HCLGE_MAC_VLAN_LKUP) { + if (!resp_code) { + return 0; + } else if (resp_code == 1) { + dev_dbg(&hdev->pdev->dev, + "lookup mac addr failed for miss.\n"); + return -ENOENT; + } + + dev_err(&hdev->pdev->dev, + "lookup mac addr failed for undefined, code=%u.\n", + resp_code); + return -EIO; + } + + dev_err(&hdev->pdev->dev, + "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op); + + return -EINVAL; +} + +static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr) +{ +#define HCLGE_VF_NUM_IN_FIRST_DESC 192 + + unsigned int word_num; + unsigned int bit_num; + + if (vfid > 255 || vfid < 0) + return -EIO; + + if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) { + word_num = vfid / 32; + bit_num = vfid % 32; + if (clr) + desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num)); + else + desc[1].data[word_num] |= cpu_to_le32(1 << bit_num); + } else { + word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32; + bit_num = vfid % 32; + if (clr) + desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num)); + else + desc[2].data[word_num] |= cpu_to_le32(1 << bit_num); + } + + return 0; +} + +static bool hclge_is_all_function_id_zero(struct hclge_desc *desc) +{ +#define HCLGE_DESC_NUMBER 3 +#define HCLGE_FUNC_NUMBER_PER_DESC 6 + int i, j; + + for (i = 1; i < HCLGE_DESC_NUMBER; i++) + for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++) + if (desc[i].data[j]) + return false; + + return true; +} + +static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req, + const u8 *addr, bool is_mc) +{ + const unsigned char *mac_addr = addr; + u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) | + (mac_addr[0]) | (mac_addr[1] << 8); + u32 low_val = mac_addr[4] | (mac_addr[5] << 8); + + hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + if (is_mc) { + hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); + hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + } + + new_req->mac_addr_hi32 = cpu_to_le32(high_val); + new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); +} + +static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, + struct hclge_mac_vlan_tbl_entry_cmd *req) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + u8 resp_code; + u16 retval; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false); + + memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "del mac addr failed for cmd_send, ret =%d.\n", + ret); + return ret; + } + resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; + retval = le16_to_cpu(desc.retval); + + return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, + HCLGE_MAC_VLAN_REMOVE); +} + +static int +hclge_lookup_mc_mac_vlan_tbl(struct hclge_vport *vport, + struct hclge_mac_vlan_tbl_entry_cmd *req, + struct hclge_desc *desc) +{ + struct hclge_dev *hdev = vport->back; + u8 resp_code; + u16 retval; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + memcpy(desc[0].data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_MAC_VLAN_ADD, true); + desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_MAC_VLAN_ADD, true); + + ret = hclge_cmd_send(&hdev->hw, desc, 3); + if (ret) { + dev_err(&hdev->pdev->dev, + "lookup mac addr failed for cmd_send, ret =%d.\n", + ret); + return ret; + } + + resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff; + retval = le16_to_cpu(desc[0].retval); + + return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, + HCLGE_MAC_VLAN_LKUP); +} + +static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, + struct hclge_mac_vlan_tbl_entry_cmd *req, + struct hclge_desc *mc_desc) +{ + struct hclge_dev *hdev = vport->back; + int cfg_status; + u8 resp_code; + u16 retval; + int ret; + + if (!mc_desc) { + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, + HCLGE_OPC_MAC_VLAN_ADD, + false); + memcpy(desc.data, req, + sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; + retval = le16_to_cpu(desc.retval); + + cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, + resp_code, + HCLGE_MAC_VLAN_ADD); + } else { + hclge_cmd_reuse_desc(&mc_desc[0], false); + mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_reuse_desc(&mc_desc[1], false); + mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_reuse_desc(&mc_desc[2], false); + mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT); + memcpy(mc_desc[0].data, req, + sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); + ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); + resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff; + retval = le16_to_cpu(mc_desc[0].retval); + + cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, + resp_code, + HCLGE_MAC_VLAN_ADD); + } + + if (ret) { + dev_err(&hdev->pdev->dev, + "add mac addr failed for cmd_send, ret =%d.\n", + ret); + return ret; + } + + return cfg_status; +} + +static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, + u16 *allocated_size) +{ + struct hclge_umv_spc_alc_cmd *req; + struct hclge_desc desc; + int ret; + + req = (struct hclge_umv_spc_alc_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false); + + req->space_size = cpu_to_le32(space_size); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n", + ret); + return ret; + } + + *allocated_size = le32_to_cpu(desc.data[1]); + + return 0; +} + +static int hclge_init_umv_space(struct hclge_dev *hdev) +{ + u16 allocated_size = 0; + int ret; + + ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size); + if (ret) + return ret; + + if (allocated_size < hdev->wanted_umv_size) + dev_warn(&hdev->pdev->dev, + "Alloc umv space failed, want %u, get %u\n", + hdev->wanted_umv_size, allocated_size); + + hdev->max_umv_size = allocated_size; + hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1); + hdev->share_umv_size = hdev->priv_umv_size + + hdev->max_umv_size % (hdev->num_alloc_vport + 1); + + return 0; +} + +static void hclge_reset_umv_space(struct hclge_dev *hdev) +{ + struct hclge_vport *vport; + int i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + vport->used_umv_num = 0; + } + + mutex_lock(&hdev->vport_lock); + hdev->share_umv_size = hdev->priv_umv_size + + hdev->max_umv_size % (hdev->num_alloc_vport + 1); + mutex_unlock(&hdev->vport_lock); +} + +static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock) +{ + struct hclge_dev *hdev = vport->back; + bool is_full; + + if (need_lock) + mutex_lock(&hdev->vport_lock); + + is_full = (vport->used_umv_num >= hdev->priv_umv_size && + hdev->share_umv_size == 0); + + if (need_lock) + mutex_unlock(&hdev->vport_lock); + + return is_full; +} + +static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free) +{ + struct hclge_dev *hdev = vport->back; + + if (is_free) { + if (vport->used_umv_num > hdev->priv_umv_size) + hdev->share_umv_size++; + + if (vport->used_umv_num > 0) + vport->used_umv_num--; + } else { + if (vport->used_umv_num >= hdev->priv_umv_size && + hdev->share_umv_size > 0) + hdev->share_umv_size--; + vport->used_umv_num++; + } +} + +static struct hclge_vport_mac_addr_cfg * +hclge_find_mac_node(struct list_head *list, const u8 *mac_addr) +{ + struct hclge_vport_mac_addr_cfg *mac_node, *tmp; + + list_for_each_entry_safe(mac_node, tmp, list, node) { + if (ether_addr_equal(mac_addr, mac_node->mac_addr)) + return mac_node; + } + return NULL; +} + +static void hclge_update_mac_node(struct hclge_vport_mac_addr_cfg *mac_node, + enum HCLGE_MAC_NODE_STATE state) +{ + switch (state) { + /* from set_rx_mode or tmp_add_list */ + case HCLGE_MAC_TO_ADD: + if (mac_node->state == HCLGE_MAC_TO_DEL) + mac_node->state = HCLGE_MAC_ACTIVE; + break; + /* only from set_rx_mode */ + case HCLGE_MAC_TO_DEL: + if (mac_node->state == HCLGE_MAC_TO_ADD) { + list_del(&mac_node->node); + kfree(mac_node); + } else { + mac_node->state = HCLGE_MAC_TO_DEL; + } + break; + /* only from tmp_add_list, the mac_node->state won't be + * ACTIVE. + */ + case HCLGE_MAC_ACTIVE: + if (mac_node->state == HCLGE_MAC_TO_ADD) + mac_node->state = HCLGE_MAC_ACTIVE; + + break; + } +} + +int hclge_update_mac_list(struct hclge_vport *vport, + enum HCLGE_MAC_NODE_STATE state, + enum HCLGE_MAC_ADDR_TYPE mac_type, + const unsigned char *addr) +{ + struct hclge_vport_mac_addr_cfg *mac_node; + struct list_head *list; + + list = (mac_type == HCLGE_MAC_ADDR_UC) ? + &vport->uc_mac_list : &vport->mc_mac_list; + + spin_lock_bh(&vport->mac_list_lock); + + /* if the mac addr is already in the mac list, no need to add a new + * one into it, just check the mac addr state, convert it to a new + * new state, or just remove it, or do nothing. + */ + mac_node = hclge_find_mac_node(list, addr); + if (mac_node) { + hclge_update_mac_node(mac_node, state); + spin_unlock_bh(&vport->mac_list_lock); + set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); + return 0; + } + + /* if this address is never added, unnecessary to delete */ + if (state == HCLGE_MAC_TO_DEL) { + spin_unlock_bh(&vport->mac_list_lock); + return -ENOENT; + } + + mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); + if (!mac_node) { + spin_unlock_bh(&vport->mac_list_lock); + return -ENOMEM; + } + + set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); + + mac_node->state = state; + ether_addr_copy(mac_node->mac_addr, addr); + list_add_tail(&mac_node->node, list); + + spin_unlock_bh(&vport->mac_list_lock); + + return 0; +} + +static int hclge_add_uc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC, + addr); +} + +int hclge_add_uc_addr_common(struct hclge_vport *vport, + const unsigned char *addr) +{ + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; + struct hclge_dev *hdev = vport->back; + struct hclge_mac_vlan_tbl_entry_cmd req; + u16 egress_port = 0; + int ret; + + /* mac addr check */ + if (is_zero_ether_addr(addr) || + is_broadcast_ether_addr(addr) || + is_multicast_ether_addr(addr)) { + hnae3_format_mac_addr(format_mac_addr, addr); + dev_err(&hdev->pdev->dev, + "Set_uc mac err! invalid mac:%s. is_zero:%d,is_br=%d,is_mul=%d\n", + format_mac_addr, is_zero_ether_addr(addr), + is_broadcast_ether_addr(addr), + is_multicast_ether_addr(addr)); + return -EINVAL; + } + + memset(&req, 0, sizeof(req)); + + hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, + HCLGE_MAC_EPORT_VFID_S, vport->vport_id); + + req.egress_port = cpu_to_le16(egress_port); + + hclge_prepare_mac_addr(&req, addr, false); + + mutex_lock(&hdev->vport_lock); + if (!hclge_is_umv_space_full(vport, false)) { + ret = hclge_add_mac_vlan_tbl(vport, &req, NULL); + if (!ret) + hclge_update_umv_space(vport, false); + + mutex_unlock(&hdev->vport_lock); + return ret; + } + mutex_unlock(&hdev->vport_lock); + + if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE)) + dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n", + hdev->priv_umv_size); + + return -ENOSPC; +} + +static int hclge_rm_uc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC, + addr); +} + +int hclge_rm_uc_addr_common(struct hclge_vport *vport, + const unsigned char *addr) +{ + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; + struct hclge_dev *hdev = vport->back; + struct hclge_mac_vlan_tbl_entry_cmd req; + int ret; + + /* mac addr check */ + if (is_zero_ether_addr(addr) || + is_broadcast_ether_addr(addr) || + is_multicast_ether_addr(addr)) { + hnae3_format_mac_addr(format_mac_addr, addr); + dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%s.\n", + format_mac_addr); + return -EINVAL; + } + + memset(&req, 0, sizeof(req)); + hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hclge_prepare_mac_addr(&req, addr, false); + ret = hclge_remove_mac_vlan_tbl(vport, &req); + if (!ret || ret == -ENOENT) { + mutex_lock(&hdev->vport_lock); + hclge_update_umv_space(vport, true); + mutex_unlock(&hdev->vport_lock); + return 0; + } + + return ret; +} + +static int hclge_add_mc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC, + addr); +} + +int hclge_add_mc_addr_common(struct hclge_vport *vport, + const unsigned char *addr) +{ + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; + struct hclge_dev *hdev = vport->back; + struct hclge_mac_vlan_tbl_entry_cmd req; + struct hclge_desc desc[3]; + int status; + + /* mac addr check */ + if (!is_multicast_ether_addr(addr)) { + hnae3_format_mac_addr(format_mac_addr, addr); + dev_err(&hdev->pdev->dev, + "Add mc mac err! invalid mac:%s.\n", + format_mac_addr); + return -EINVAL; + } + memset(&req, 0, sizeof(req)); + hclge_prepare_mac_addr(&req, addr, true); + status = hclge_lookup_mc_mac_vlan_tbl(vport, &req, desc); + if (status) { + /* This mac addr do not exist, add new entry for it */ + memset(desc[0].data, 0, sizeof(desc[0].data)); + memset(desc[1].data, 0, sizeof(desc[0].data)); + memset(desc[2].data, 0, sizeof(desc[0].data)); + } + status = hclge_update_desc_vfid(desc, vport->vport_id, false); + if (status) + return status; + status = hclge_add_mac_vlan_tbl(vport, &req, desc); + + /* if already overflow, not to print each time */ + if (status == -ENOSPC && + !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE)) { + dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); + } + + return status; +} + +static int hclge_rm_mc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC, + addr); +} + +int hclge_rm_mc_addr_common(struct hclge_vport *vport, + const unsigned char *addr) +{ + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; + struct hclge_dev *hdev = vport->back; + struct hclge_mac_vlan_tbl_entry_cmd req; + enum hclge_cmd_status status; + struct hclge_desc desc[3]; + + /* mac addr check */ + if (!is_multicast_ether_addr(addr)) { + hnae3_format_mac_addr(format_mac_addr, addr); + dev_dbg(&hdev->pdev->dev, + "Remove mc mac err! invalid mac:%s.\n", + format_mac_addr); + return -EINVAL; + } + + memset(&req, 0, sizeof(req)); + hclge_prepare_mac_addr(&req, addr, true); + status = hclge_lookup_mc_mac_vlan_tbl(vport, &req, desc); + if (!status) { + /* This mac addr exist, remove this handle's VFID for it */ + status = hclge_update_desc_vfid(desc, vport->vport_id, true); + if (status) + return status; + + if (hclge_is_all_function_id_zero(desc)) + /* All the vfid is zero, so need to delete this entry */ + status = hclge_remove_mac_vlan_tbl(vport, &req); + else + /* Not all the vfid is zero, update the vfid */ + status = hclge_add_mac_vlan_tbl(vport, &req, desc); + + } else if (status == -ENOENT) { + status = 0; + } + + return status; +} + +static void hclge_sync_mac_list(struct hclge_vport *vport, + struct list_head *list, + int (*sync)(struct hclge_vport *, + const unsigned char *)) +{ + struct hclge_vport_mac_addr_cfg *mac_node, *tmp; + int ret; + + list_for_each_entry_safe(mac_node, tmp, list, node) { + ret = sync(vport, mac_node->mac_addr); + if (!ret) { + mac_node->state = HCLGE_MAC_ACTIVE; + } else { + set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, + &vport->state); + break; + } + } +} + +static void hclge_unsync_mac_list(struct hclge_vport *vport, + struct list_head *list, + int (*unsync)(struct hclge_vport *, + const unsigned char *)) +{ + struct hclge_vport_mac_addr_cfg *mac_node, *tmp; + int ret; + + list_for_each_entry_safe(mac_node, tmp, list, node) { + ret = unsync(vport, mac_node->mac_addr); + if (!ret || ret == -ENOENT) { + list_del(&mac_node->node); + kfree(mac_node); + } else { + set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, + &vport->state); + break; + } + } +} + +static bool hclge_sync_from_add_list(struct list_head *add_list, + struct list_head *mac_list) +{ + struct hclge_vport_mac_addr_cfg *mac_node, *tmp, *new_node; + bool all_added = true; + + list_for_each_entry_safe(mac_node, tmp, add_list, node) { + if (mac_node->state == HCLGE_MAC_TO_ADD) + all_added = false; + + /* if the mac address from tmp_add_list is not in the + * uc/mc_mac_list, it means have received a TO_DEL request + * during the time window of adding the mac address into mac + * table. if mac_node state is ACTIVE, then change it to TO_DEL, + * then it will be removed at next time. else it must be TO_ADD, + * this address hasn't been added into mac table, + * so just remove the mac node. + */ + new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr); + if (new_node) { + hclge_update_mac_node(new_node, mac_node->state); + list_del(&mac_node->node); + kfree(mac_node); + } else if (mac_node->state == HCLGE_MAC_ACTIVE) { + mac_node->state = HCLGE_MAC_TO_DEL; + list_del(&mac_node->node); + list_add_tail(&mac_node->node, mac_list); + } else { + list_del(&mac_node->node); + kfree(mac_node); + } + } + return all_added; +} + +static void hclge_sync_from_del_list(struct list_head *del_list, + struct list_head *mac_list) +{ + struct hclge_vport_mac_addr_cfg *mac_node, *tmp, *new_node; + + list_for_each_entry_safe(mac_node, tmp, del_list, node) { + new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr); + if (new_node) { + /* If the mac addr exists in the mac list, it means + * received a new TO_ADD request during the time window + * of configuring the mac address. For the mac node + * state is TO_ADD, and the address is already in the + * in the hardware(due to delete fail), so we just need + * to change the mac node state to ACTIVE. + */ + new_node->state = HCLGE_MAC_ACTIVE; + list_del(&mac_node->node); + kfree(mac_node); + } else { + list_del(&mac_node->node); + list_add_tail(&mac_node->node, mac_list); + } + } +} + +static void hclge_update_overflow_flags(struct hclge_vport *vport, + enum HCLGE_MAC_ADDR_TYPE mac_type, + bool is_all_added) +{ + if (mac_type == HCLGE_MAC_ADDR_UC) { + if (is_all_added) + vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE; + else + vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE; + } else { + if (is_all_added) + vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE; + else + vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE; + } +} + +static void hclge_sync_vport_mac_table(struct hclge_vport *vport, + enum HCLGE_MAC_ADDR_TYPE mac_type) +{ + struct hclge_vport_mac_addr_cfg *mac_node, *tmp, *new_node; + struct list_head tmp_add_list, tmp_del_list; + struct list_head *list; + bool all_added; + + INIT_LIST_HEAD(&tmp_add_list); + INIT_LIST_HEAD(&tmp_del_list); + + /* move the mac addr to the tmp_add_list and tmp_del_list, then + * we can add/delete these mac addr outside the spin lock + */ + list = (mac_type == HCLGE_MAC_ADDR_UC) ? + &vport->uc_mac_list : &vport->mc_mac_list; + + spin_lock_bh(&vport->mac_list_lock); + + list_for_each_entry_safe(mac_node, tmp, list, node) { + switch (mac_node->state) { + case HCLGE_MAC_TO_DEL: + list_del(&mac_node->node); + list_add_tail(&mac_node->node, &tmp_del_list); + break; + case HCLGE_MAC_TO_ADD: + new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); + if (!new_node) + goto stop_traverse; + ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); + new_node->state = mac_node->state; + list_add_tail(&new_node->node, &tmp_add_list); + break; + default: + break; + } + } +stop_traverse: + spin_unlock_bh(&vport->mac_list_lock); + + /* delete first, in order to get max mac table space for adding */ + if (mac_type == HCLGE_MAC_ADDR_UC) { + hclge_unsync_mac_list(vport, &tmp_del_list, + hclge_rm_uc_addr_common); + hclge_sync_mac_list(vport, &tmp_add_list, + hclge_add_uc_addr_common); + } else { + hclge_unsync_mac_list(vport, &tmp_del_list, + hclge_rm_mc_addr_common); + hclge_sync_mac_list(vport, &tmp_add_list, + hclge_add_mc_addr_common); + } + + /* if some mac addresses were added/deleted fail, move back to the + * mac_list, and retry at next time. + */ + spin_lock_bh(&vport->mac_list_lock); + + hclge_sync_from_del_list(&tmp_del_list, list); + all_added = hclge_sync_from_add_list(&tmp_add_list, list); + + spin_unlock_bh(&vport->mac_list_lock); + + hclge_update_overflow_flags(vport, mac_type, all_added); + + return; +} + +static bool hclge_need_sync_mac_table(struct hclge_vport *vport) +{ + struct hclge_dev *hdev = vport->back; + bool time_up; + + if (test_bit(vport->vport_id, hdev->vport_config_block)) + return false; + + time_up = !(hdev->serv_processed_cnt % HCLGE_MAC_TBL_SYNC_INTERVAL); + if (time_up || + test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state)) + return true; + + return false; +} + +static void hclge_sync_mac_table(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = &hdev->vport[0]; + int i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + + if (!hclge_need_sync_mac_table(vport)) + continue; + + hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC); + hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC); + } +} + +void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, + enum HCLGE_MAC_ADDR_TYPE mac_type) +{ + int (*unsync)(struct hclge_vport *vport, const unsigned char *addr); + struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp; + struct hclge_dev *hdev = vport->back; + struct list_head tmp_del_list, *list; + int ret; + + if (mac_type == HCLGE_MAC_ADDR_UC) { + list = &vport->uc_mac_list; + unsync = hclge_rm_uc_addr_common; + } else { + list = &vport->mc_mac_list; + unsync = hclge_rm_mc_addr_common; + } + + INIT_LIST_HEAD(&tmp_del_list); + + if (!is_del_list) + set_bit(vport->vport_id, hdev->vport_config_block); + + spin_lock_bh(&vport->mac_list_lock); + + list_for_each_entry_safe(mac_cfg, tmp, list, node) { + switch (mac_cfg->state) { + case HCLGE_MAC_TO_DEL: + case HCLGE_MAC_ACTIVE: + list_del(&mac_cfg->node); + list_add_tail(&mac_cfg->node, &tmp_del_list); + break; + case HCLGE_MAC_TO_ADD: + if (is_del_list) { + list_del(&mac_cfg->node); + kfree(mac_cfg); + } + break; + } + } + + spin_unlock_bh(&vport->mac_list_lock); + + list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) { + ret = unsync(vport, mac_cfg->mac_addr); + if (!ret || ret == -ENOENT) { + /* clear all mac addr from hardware, but remain these + * mac addr in the mac list, and restore them after + * vf reset finished. + */ + if (!is_del_list && + mac_cfg->state == HCLGE_MAC_ACTIVE) { + mac_cfg->state = HCLGE_MAC_TO_ADD; + } else { + list_del(&mac_cfg->node); + kfree(mac_cfg); + } + } else if (is_del_list) { + mac_cfg->state = HCLGE_MAC_TO_DEL; + } + } + + spin_lock_bh(&vport->mac_list_lock); + + hclge_sync_from_del_list(&tmp_del_list, list); + + spin_unlock_bh(&vport->mac_list_lock); +} + +/* remove all mac address when uninitailize */ +static void hclge_uninit_mac_list(struct hclge_vport *vport, + enum HCLGE_MAC_ADDR_TYPE mac_type) +{ + struct hclge_vport_mac_addr_cfg *mac_node, *tmp; + struct list_head tmp_del_list, *list; + + INIT_LIST_HEAD(&tmp_del_list); + + list = (mac_type == HCLGE_MAC_ADDR_UC) ? + &vport->uc_mac_list : &vport->mc_mac_list; + + spin_lock_bh(&vport->mac_list_lock); + + list_for_each_entry_safe(mac_node, tmp, list, node) { + switch (mac_node->state) { + case HCLGE_MAC_TO_DEL: + case HCLGE_MAC_ACTIVE: + list_del(&mac_node->node); + list_add_tail(&mac_node->node, &tmp_del_list); + break; + case HCLGE_MAC_TO_ADD: + list_del(&mac_node->node); + kfree(mac_node); + break; + } + } + + spin_unlock_bh(&vport->mac_list_lock); + + if (mac_type == HCLGE_MAC_ADDR_UC) + hclge_unsync_mac_list(vport, &tmp_del_list, + hclge_rm_uc_addr_common); + else + hclge_unsync_mac_list(vport, &tmp_del_list, + hclge_rm_mc_addr_common); + + list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) { + list_del(&mac_node->node); + kfree(mac_node); + } +} + +static void hclge_uninit_vport_mac_table(struct hclge_dev *hdev) +{ + struct hclge_vport *vport; + int i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + hclge_uninit_mac_list(vport, HCLGE_MAC_ADDR_UC); + hclge_uninit_mac_list(vport, HCLGE_MAC_ADDR_MC); + } +} + +static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev, + u16 cmdq_resp, u8 resp_code) +{ +#define HCLGE_ETHERTYPE_SUCCESS_ADD 0 +#define HCLGE_ETHERTYPE_ALREADY_ADD 1 +#define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2 +#define HCLGE_ETHERTYPE_KEY_CONFLICT 3 + + int return_status; + + if (cmdq_resp) { + dev_err(&hdev->pdev->dev, + "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n", + cmdq_resp); + return -EIO; + } + + switch (resp_code) { + case HCLGE_ETHERTYPE_SUCCESS_ADD: + case HCLGE_ETHERTYPE_ALREADY_ADD: + return_status = 0; + break; + case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW: + dev_err(&hdev->pdev->dev, + "add mac ethertype failed for manager table overflow.\n"); + return_status = -EIO; + break; + case HCLGE_ETHERTYPE_KEY_CONFLICT: + dev_err(&hdev->pdev->dev, + "add mac ethertype failed for key conflict.\n"); + return_status = -EIO; + break; + default: + dev_err(&hdev->pdev->dev, + "add mac ethertype failed for undefined, code=%u.\n", + resp_code); + return_status = -EIO; + } + + return return_status; +} + +static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf, + u8 *mac_addr) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; + struct hclge_dev *hdev = vport->back; + + vport = hclge_get_vf_vport(hdev, vf); + if (!vport) + return -EINVAL; + + hnae3_format_mac_addr(format_mac_addr, mac_addr); + if (ether_addr_equal(mac_addr, vport->vf_info.mac)) { + dev_info(&hdev->pdev->dev, + "Specified MAC(=%s) is same as before, no change committed!\n", + format_mac_addr); + return 0; + } + + ether_addr_copy(vport->vf_info.mac, mac_addr); + + /* there is a timewindow for PF to know VF unalive, it may + * cause send mailbox fail, but it doesn't matter, VF will + * query it when reinit. + */ + if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) { + dev_info(&hdev->pdev->dev, + "MAC of VF %d has been set to %s, and it will be reinitialized!\n", + vf, format_mac_addr); + (void)hclge_inform_reset_assert_to_vf(vport); + return 0; + } + + dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %s\n", + vf, format_mac_addr); + return 0; +} + +static int hclge_add_mgr_tbl(struct hclge_dev *hdev, + const struct hclge_mac_mgr_tbl_entry_cmd *req) +{ + struct hclge_desc desc; + u8 resp_code; + u16 retval; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false); + memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd)); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "add mac ethertype failed for cmd_send, ret =%d.\n", + ret); + return ret; + } + + resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; + retval = le16_to_cpu(desc.retval); + + return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code); +} + +static int init_mgr_tbl(struct hclge_dev *hdev) +{ + int ret; + int i; + + for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) { + ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]); + if (ret) { + dev_err(&hdev->pdev->dev, + "add mac ethertype failed, ret =%d.\n", + ret); + return ret; + } + } + + return 0; +} + +static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + ether_addr_copy(p, hdev->hw.mac.mac_addr); +} + +int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport, + const u8 *old_addr, const u8 *new_addr) +{ + struct hclge_vport_mac_addr_cfg *old_node, *new_node; + struct list_head *list = &vport->uc_mac_list; + + new_node = hclge_find_mac_node(list, new_addr); + if (!new_node) { + new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); + if (!new_node) + return -ENOMEM; + new_node->state = HCLGE_MAC_TO_ADD; + ether_addr_copy(new_node->mac_addr, new_addr); + list_add(&new_node->node, list); + } else { + if (new_node->state == HCLGE_MAC_TO_DEL) + new_node->state = HCLGE_MAC_ACTIVE; + + /* make sure the new addr is in the list head, avoid dev + * addr may be not re-added into mac table for the umv space + * limitation after global/imp reset which will clear mac + * table by hardware. + */ + list_move(&new_node->node, list); + } + + if (old_addr && !ether_addr_equal(old_addr, new_addr)) { + old_node = hclge_find_mac_node(list, old_addr); + if (old_node) { + if (old_node->state == HCLGE_MAC_TO_ADD) { + list_del(&old_node->node); + kfree(old_node); + } else { + old_node->state = HCLGE_MAC_TO_DEL; + } + } + } + + set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); + + return 0; +} + +static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, + bool is_first) +{ + const unsigned char *new_addr = (const unsigned char *)p; + struct hclge_vport *vport = hclge_get_vport(handle); + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; + struct hclge_dev *hdev = vport->back; + unsigned char *old_addr = NULL; + int ret; + + /* mac addr check */ + if (is_zero_ether_addr(new_addr) || + is_broadcast_ether_addr(new_addr) || + is_multicast_ether_addr(new_addr)) { + hnae3_format_mac_addr(format_mac_addr, new_addr); + dev_err(&hdev->pdev->dev, + "Change uc mac err! invalid mac:%s.\n", + format_mac_addr); + return -EINVAL; + } + + ret = hclge_pause_addr_cfg(hdev, new_addr); + if (ret) { + dev_err(&hdev->pdev->dev, + "Failed to configure mac pause address, ret=%d\n", + ret); + return ret; + } + + if (!is_first) + old_addr = hdev->hw.mac.mac_addr; + + spin_lock_bh(&vport->mac_list_lock); + ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr); + if (ret) { + hnae3_format_mac_addr(format_mac_addr, new_addr); + dev_err(&hdev->pdev->dev, + "Failed to change the mac addr:%s, ret =%d\n", + format_mac_addr, ret); + spin_unlock_bh(&vport->mac_list_lock); + return ret; + } + /* we must update dev addr with spin lock protect, preventing dev addr + * being removed by set_rx_mode path. + */ + ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); + spin_unlock_bh(&vport->mac_list_lock); + + hclge_task_schedule(hdev, 0); + + return 0; +} + +static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr, + int cmd) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + if (!hdev->hw.mac.phydev) + return -EOPNOTSUPP; + + return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd); +} + +static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, + u8 fe_type, bool filter_en, u8 vf_id) +{ + struct hclge_vlan_filter_ctrl_cmd *req; + struct hclge_desc desc; + int ret; + + /* read current vlan filter parameter */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true); + req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; + req->vlan_type = vlan_type; + req->vf_id = vf_id; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get vport%u vlan filter config, ret = %d.\n", + vf_id, ret); + return ret; + } + + /* modify and write new config parameter */ + hclge_cmd_reuse_desc(&desc, false); + req->vlan_fe = filter_en ? + (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to set vport%u vlan filter, ret = %d.\n", + vf_id, ret); + + return ret; +} + +static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable) +{ + struct hclge_dev *hdev = vport->back; + int ret; + + if (hdev->pdev->revision < HNAE3_REVISION_ID_21) + return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_EGRESS_V1_B, + enable, vport->vport_id); + + ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_EGRESS, enable, + vport->vport_id); + if (ret) + return ret; + + if (!vport->vport_id) { + if (hnae3_dev_vlan_fltr_mdf_supported(hdev)) + enable = false; + + ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, + HCLGE_FILTER_FE_INGRESS, + enable, 0); + } + + return ret; +} + +static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport) +{ + struct hnae3_handle *handle = &vport->nic; + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_dev *hdev = vport->back; + + if (vport->vport_id) { + if (vport->port_base_vlan_cfg.state != + HNAE3_PORT_BASE_VLAN_DISABLE) + return true; + + if (vport->vf_info.trusted && vport->vf_info.request_uc_en) + return false; + } else if (handle->netdev_flags & HNAE3_USER_UPE) { + return false; + } + + if (!vport->req_vlan_fltr_en) + return false; + + /* compatible with former device, always enable vlan filter */ + if (!hnae3_dev_vlan_fltr_mdf_supported(hdev)) + return true; + + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) + if (vlan->vlan_id != 0) + return true; + + return false; +} + +int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en) +{ + struct hclge_dev *hdev = vport->back; + bool need_en; + int ret; + + mutex_lock(&hdev->vport_lock); + + vport->req_vlan_fltr_en = request_en; + + need_en = hclge_need_enable_vport_vlan_filter(vport); + if (need_en == vport->cur_vlan_fltr_en) { + mutex_unlock(&hdev->vport_lock); + return 0; + } + + ret = hclge_set_vport_vlan_filter(vport, need_en); + if (ret) { + mutex_unlock(&hdev->vport_lock); + return ret; + } + + vport->cur_vlan_fltr_en = need_en; + + mutex_unlock(&hdev->vport_lock); + + return 0; +} + +static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_enable_vport_vlan_filter(vport, enable); +} + +static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid, + bool is_kill, u16 vlan) +{ + struct hclge_vport *vport = &hdev->vport[vfid]; + struct hclge_vlan_filter_vf_cfg_cmd *req0; + struct hclge_vlan_filter_vf_cfg_cmd *req1; + struct hclge_desc desc[2]; + u8 vf_byte_val; + u8 vf_byte_off; + int ret; + + /* if vf vlan table is full, firmware will close vf vlan filter, it + * is unable and unnecessary to add new vlan id to vf vlan filter. + * If spoof check is enable, and vf vlan is full, it shouldn't add + * new vlan, because tx packets with these vlan id will be dropped. + */ + if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) { + if (vport->vf_info.spoofchk && vlan) { + dev_err(&hdev->pdev->dev, + "Can't add vlan due to spoof check is on and vf vlan table is full\n"); + return -EPERM; + } + return 0; + } + + hclge_cmd_setup_basic_desc(&desc[0], + HCLGE_OPC_VLAN_FILTER_VF_CFG, false); + hclge_cmd_setup_basic_desc(&desc[1], + HCLGE_OPC_VLAN_FILTER_VF_CFG, false); + + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + + vf_byte_off = vfid / 8; + vf_byte_val = 1 << (vfid % 8); + + req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data; + req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data; + + req0->vlan_id = cpu_to_le16(vlan); + req0->vlan_cfg = is_kill; + + if (vf_byte_off < HCLGE_MAX_VF_BYTES) + req0->vf_bitmap[vf_byte_off] = vf_byte_val; + else + req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val; + + ret = hclge_cmd_send(&hdev->hw, desc, 2); + if (ret) { + dev_err(&hdev->pdev->dev, + "Send vf vlan command fail, ret =%d.\n", + ret); + return ret; + } + + if (!is_kill) { +#define HCLGE_VF_VLAN_NO_ENTRY 2 + if (!req0->resp_code || req0->resp_code == 1) + return 0; + + if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) { + set_bit(vfid, hdev->vf_vlan_full); + dev_warn(&hdev->pdev->dev, + "vf vlan table is full, vf vlan filter is disabled\n"); + return 0; + } + + dev_err(&hdev->pdev->dev, + "Add vf vlan filter fail, ret =%u.\n", + req0->resp_code); + } else { +#define HCLGE_VF_VLAN_DEL_NO_FOUND 1 + if (!req0->resp_code) + return 0; + + /* vf vlan filter is disabled when vf vlan table is full, + * then new vlan id will not be added into vf vlan table. + * Just return 0 without warning, avoid massive verbose + * print logs when unload. + */ + if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) + return 0; + + dev_err(&hdev->pdev->dev, + "Kill vf vlan filter fail, ret =%u.\n", + req0->resp_code); + } + + return -EIO; +} + +static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto, + u16 vlan_id, bool is_kill) +{ + struct hclge_vlan_filter_pf_cfg_cmd *req; + struct hclge_desc desc; + u8 vlan_offset_byte_val; + u8 vlan_offset_byte; + u8 vlan_offset_160; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false); + + vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP; + vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) / + HCLGE_VLAN_BYTE_SIZE; + vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE); + + req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data; + req->vlan_offset = vlan_offset_160; + req->vlan_cfg = is_kill; + req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "port vlan command, send fail, ret =%d.\n", ret); + return ret; +} + +static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, + u16 vport_id, u16 vlan_id, + bool is_kill) +{ + u16 vport_idx, vport_num = 0; + int ret; + + if (is_kill && !vlan_id) + return 0; + + if (vlan_id >= VLAN_N_VID) + return -EINVAL; + + ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id); + if (ret) { + dev_err(&hdev->pdev->dev, + "Set %u vport vlan filter config fail, ret =%d.\n", + vport_id, ret); + return ret; + } + + /* vlan 0 may be added twice when 8021q module is enabled */ + if (!is_kill && !vlan_id && + test_bit(vport_id, hdev->vlan_table[vlan_id])) + return 0; + + if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) { + dev_err(&hdev->pdev->dev, + "Add port vlan failed, vport %u is already in vlan %u\n", + vport_id, vlan_id); + return -EINVAL; + } + + if (is_kill && + !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) { + dev_err(&hdev->pdev->dev, + "Delete port vlan failed, vlan %u is not in vport %u\n", + vlan_id, vport_id); + return -EINVAL; + } + + for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM) + vport_num++; + + if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1)) + ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id, + is_kill); + + return ret; +} + +static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) +{ + struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg; + struct hclge_vport_vtag_tx_cfg_cmd *req; + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + u16 bmap_index; + int status; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false); + + req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; + req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); + req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B, + vcfg->accept_tag1 ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B, + vcfg->accept_untag1 ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B, + vcfg->accept_tag2 ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B, + vcfg->accept_untag2 ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, + vcfg->insert_tag1_en ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, + vcfg->insert_tag2_en ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); + + req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; + bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD / + HCLGE_VF_NUM_PER_BYTE; + req->vf_bitmap[bmap_index] = + 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); + + status = hclge_cmd_send(&hdev->hw, &desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "Send port txvlan cfg command fail, ret =%d\n", + status); + + return status; +} + +static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) +{ + struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg; + struct hclge_vport_vtag_rx_cfg_cmd *req; + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + u16 bmap_index; + int status; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false); + + req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data; + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, + vcfg->strip_tag1_en ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, + vcfg->strip_tag2_en ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, + vcfg->vlan1_vlan_prionly ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, + vcfg->vlan2_vlan_prionly ? 1 : 0); + + req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; + bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD / + HCLGE_VF_NUM_PER_BYTE; + req->vf_bitmap[bmap_index] = + 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); + + status = hclge_cmd_send(&hdev->hw, &desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "Send port rxvlan cfg command fail, ret =%d\n", + status); + + return status; +} + +static int hclge_vlan_offload_cfg(struct hclge_vport *vport, + u16 port_base_vlan_state, + u16 vlan_tag, u8 qos) +{ + int ret; + + if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { + vport->txvlan_cfg.accept_tag1 = true; + vport->txvlan_cfg.insert_tag1_en = false; + vport->txvlan_cfg.default_tag1 = 0; + } else { + vport->txvlan_cfg.accept_tag1 = false; + vport->txvlan_cfg.insert_tag1_en = true; + vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) | + vlan_tag; + } + + vport->txvlan_cfg.accept_untag1 = true; + + /* accept_tag2 and accept_untag2 are not supported on + * pdev revision(0x20), new revision support them, + * this two fields can not be configured by user. + */ + vport->txvlan_cfg.accept_tag2 = true; + vport->txvlan_cfg.accept_untag2 = true; + vport->txvlan_cfg.insert_tag2_en = false; + vport->txvlan_cfg.default_tag2 = 0; + + if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { + vport->rxvlan_cfg.strip_tag1_en = false; + vport->rxvlan_cfg.strip_tag2_en = + vport->rxvlan_cfg.rx_vlan_offload_en; + } else { + vport->rxvlan_cfg.strip_tag1_en = + vport->rxvlan_cfg.rx_vlan_offload_en; + vport->rxvlan_cfg.strip_tag2_en = true; + } + vport->rxvlan_cfg.vlan1_vlan_prionly = false; + vport->rxvlan_cfg.vlan2_vlan_prionly = false; + + ret = hclge_set_vlan_tx_offload_cfg(vport); + if (ret) + return ret; + + return hclge_set_vlan_rx_offload_cfg(vport); +} + +static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) { struct hclge_rx_vlan_type_cfg_cmd *rx_req; struct hclge_tx_vlan_type_cfg_cmd *tx_req; @@ -4896,7 +8786,7 @@ static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false); - tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)&desc.data; + tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data; tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); @@ -4913,18 +8803,42 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) { #define HCLGE_DEF_VLAN_TYPE 0x8100 - struct hnae3_handle *handle; + struct hnae3_handle *handle = &hdev->vport[0].nic; struct hclge_vport *vport; int ret; int i; - ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true); - if (ret) - return ret; + if (hdev->pdev->revision >= HNAE3_REVISION_ID_21) { + bool enable = true; + + /* for revision 0x21, vf vlan filter is per function */ + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + ret = hclge_set_vlan_filter_ctrl(hdev, + HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_EGRESS, + true, + vport->vport_id); + if (ret) + return ret; + vport->cur_vlan_fltr_en = true; + } - ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true); - if (ret) - return ret; + if (hnae3_dev_vlan_fltr_mdf_supported(hdev)) + enable = false; + + ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, + HCLGE_FILTER_FE_INGRESS, + enable, 0); + if (ret) + return ret; + } else { + ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_EGRESS_V1_B, + true, 0); + if (ret) + return ret; + } hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; @@ -4938,106 +8852,654 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) return ret; for (i = 0; i < hdev->num_alloc_vport; i++) { + u16 vlan_tag; + u8 qos; + vport = &hdev->vport[i]; - vport->txvlan_cfg.accept_tag1 = true; - vport->txvlan_cfg.accept_untag1 = true; + vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag; + qos = vport->port_base_vlan_cfg.vlan_info.qos; + + ret = hclge_vlan_offload_cfg(vport, + vport->port_base_vlan_cfg.state, + vlan_tag, qos); + if (ret) + return ret; + } + + return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); +} + +static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, + bool writen_to_tbl) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_dev *hdev = vport->back; + + mutex_lock(&hdev->vport_lock); + + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + if (vlan->vlan_id == vlan_id) { + mutex_unlock(&hdev->vport_lock); + return; + } + } + + vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); + if (!vlan) { + mutex_unlock(&hdev->vport_lock); + return; + } + + vlan->hd_tbl_status = writen_to_tbl; + vlan->vlan_id = vlan_id; + + list_add_tail(&vlan->node, &vport->vlan_list); + mutex_unlock(&hdev->vport_lock); +} + +static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_dev *hdev = vport->back; + int ret; + + mutex_lock(&hdev->vport_lock); + + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + if (!vlan->hd_tbl_status) { + ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), + vport->vport_id, + vlan->vlan_id, false); + if (ret) { + dev_err(&hdev->pdev->dev, + "restore vport vlan list failed, ret=%d\n", + ret); + + mutex_unlock(&hdev->vport_lock); + return ret; + } + } + vlan->hd_tbl_status = true; + } + + mutex_unlock(&hdev->vport_lock); + + return 0; +} + +static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, + bool is_write_tbl) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_dev *hdev = vport->back; + + mutex_lock(&hdev->vport_lock); + + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + if (vlan->vlan_id == vlan_id) { + if (is_write_tbl && vlan->hd_tbl_status) + hclge_set_vlan_filter_hw(hdev, + htons(ETH_P_8021Q), + vport->vport_id, + vlan_id, + true); + + list_del(&vlan->node); + kfree(vlan); + break; + } + } + + mutex_unlock(&hdev->vport_lock); +} + +void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_dev *hdev = vport->back; + + mutex_lock(&hdev->vport_lock); + + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + if (!vlan->vlan_id) + continue; + + if (vlan->hd_tbl_status) + hclge_set_vlan_filter_hw(hdev, + htons(ETH_P_8021Q), + vport->vport_id, + vlan->vlan_id, + true); + + vlan->hd_tbl_status = false; + if (is_del_list) { + list_del(&vlan->node); + kfree(vlan); + } + } + clear_bit(vport->vport_id, hdev->vf_vlan_full); + mutex_unlock(&hdev->vport_lock); +} + +void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_vport *vport; + int i; + + mutex_lock(&hdev->vport_lock); + + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + list_del(&vlan->node); + kfree(vlan); + } + } + + mutex_unlock(&hdev->vport_lock); +} + +void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev) +{ + struct hclge_vlan_info *vlan_info; + struct hclge_vport *vport; + u16 vlan_proto; + u16 vlan_id; + u16 state; + int vf_id; + int ret; + + /* PF should restore all vfs port base vlan */ + for (vf_id = 0; vf_id < hdev->num_alloc_vfs; vf_id++) { + vport = &hdev->vport[vf_id + HCLGE_VF_VPORT_START_NUM]; + vlan_info = vport->port_base_vlan_cfg.tbl_sta ? + &vport->port_base_vlan_cfg.vlan_info : + &vport->port_base_vlan_cfg.old_vlan_info; + + vlan_id = vlan_info->vlan_tag; + vlan_proto = vlan_info->vlan_proto; + state = vport->port_base_vlan_cfg.state; + + if (state != HNAE3_PORT_BASE_VLAN_DISABLE) { + clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]); + ret = hclge_set_vlan_filter_hw(hdev, htons(vlan_proto), + vport->vport_id, + vlan_id, false); + vport->port_base_vlan_cfg.tbl_sta = ret == 0; + } + } +} + +void hclge_restore_vport_vlan_table(struct hclge_vport *vport) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_dev *hdev = vport->back; + int ret; + + mutex_lock(&hdev->vport_lock); + + if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) { + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), + vport->vport_id, + vlan->vlan_id, false); + if (ret) + break; + vlan->hd_tbl_status = true; + } + } + + mutex_unlock(&hdev->vport_lock); +} + +/* For global reset and imp reset, hardware will clear the mac table, + * so we change the mac address state from ACTIVE to TO_ADD, then they + * can be restored in the service task after reset complete. Furtherly, + * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to + * be restored after reset, so just remove these mac nodes from mac_list. + */ +static void hclge_mac_node_convert_for_reset(struct list_head *list) +{ + struct hclge_vport_mac_addr_cfg *mac_node, *tmp; + + list_for_each_entry_safe(mac_node, tmp, list, node) { + if (mac_node->state == HCLGE_MAC_ACTIVE) { + mac_node->state = HCLGE_MAC_TO_ADD; + } else if (mac_node->state == HCLGE_MAC_TO_DEL) { + list_del(&mac_node->node); + kfree(mac_node); + } + } +} + +void hclge_restore_mac_table_common(struct hclge_vport *vport) +{ + spin_lock_bh(&vport->mac_list_lock); + + hclge_mac_node_convert_for_reset(&vport->uc_mac_list); + hclge_mac_node_convert_for_reset(&vport->mc_mac_list); + + spin_unlock_bh(&vport->mac_list_lock); +} + +static void hclge_restore_hw_table(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = &hdev->vport[0]; + struct hnae3_handle *handle = &vport->nic; + + hclge_restore_mac_table_common(vport); + hclge_restore_vport_port_base_vlan_config(hdev); + hclge_restore_vport_vlan_table(vport); + set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state); - /* accept_tag2 and accept_untag2 are not supported on - * pdev revision(0x20), new revision support them. The - * value of this two fields will not return error when driver - * send command to fireware in revision(0x20). - * This two fields can not configured by user. + hclge_restore_fd_entries(handle); +} + +int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) { + vport->rxvlan_cfg.strip_tag1_en = false; + vport->rxvlan_cfg.strip_tag2_en = enable; + } else { + vport->rxvlan_cfg.strip_tag1_en = enable; + vport->rxvlan_cfg.strip_tag2_en = true; + } + vport->rxvlan_cfg.vlan1_vlan_prionly = false; + vport->rxvlan_cfg.vlan2_vlan_prionly = false; + vport->rxvlan_cfg.rx_vlan_offload_en = enable; + + return hclge_set_vlan_rx_offload_cfg(vport); +} + +static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport) +{ + struct hclge_dev *hdev = vport->back; + + if (hnae3_dev_vlan_fltr_mdf_supported(hdev)) + set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state); +} + +static int hclge_update_vlan_filter_entries(struct hclge_vport *vport, + u16 port_base_vlan_state, + struct hclge_vlan_info *new_info, + struct hclge_vlan_info *old_info) +{ + struct hclge_dev *hdev = vport->back; + int ret; + + if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) { + hclge_rm_vport_all_vlan_table(vport, false); + /* force clear VLAN 0 */ + ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0); + if (ret) + return ret; + return hclge_set_vlan_filter_hw(hdev, + htons(new_info->vlan_proto), + vport->vport_id, + new_info->vlan_tag, + false); + } + + vport->port_base_vlan_cfg.tbl_sta = false; + + /* force add vlan 0 */ + ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0); + if (ret) + return ret; + + ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto), + vport->vport_id, old_info->vlan_tag, + true); + if (ret) + return ret; + + return hclge_add_vport_all_vlan_table(vport); +} + +static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg, + const struct hclge_vlan_info *old_cfg) +{ + if (new_cfg->vlan_tag != old_cfg->vlan_tag) + return true; + + if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0)) + return true; + + return false; +} + +int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, + struct hclge_vlan_info *vlan_info) +{ + struct hnae3_handle *nic = &vport->nic; + struct hclge_vlan_info *old_vlan_info; + struct hclge_dev *hdev = vport->back; + int ret; + + old_vlan_info = &vport->port_base_vlan_cfg.vlan_info; + + ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag, + vlan_info->qos); + if (ret) + return ret; + + if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info)) + goto out; + + if (state == HNAE3_PORT_BASE_VLAN_MODIFY) { + /* add new VLAN tag */ + ret = hclge_set_vlan_filter_hw(hdev, + htons(vlan_info->vlan_proto), + vport->vport_id, + vlan_info->vlan_tag, + false); + if (ret) + return ret; + vport->port_base_vlan_cfg.tbl_sta = false; + /* remove old VLAN tag */ + if (old_vlan_info->vlan_tag == 0) + ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, + true, 0); + else + ret = hclge_set_vlan_filter_hw(hdev, + htons(ETH_P_8021Q), + vport->vport_id, + old_vlan_info->vlan_tag, + true); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to clear vport%u port base vlan %u, ret = %d.\n", + vport->vport_id, old_vlan_info->vlan_tag, ret); + return ret; + } + + goto out; + } + + ret = hclge_update_vlan_filter_entries(vport, state, vlan_info, + old_vlan_info); + if (ret) + return ret; + +out: + vport->port_base_vlan_cfg.state = state; + if (state == HNAE3_PORT_BASE_VLAN_DISABLE) + nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; + else + nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; + + vport->port_base_vlan_cfg.old_vlan_info = *old_vlan_info; + vport->port_base_vlan_cfg.vlan_info = *vlan_info; + vport->port_base_vlan_cfg.tbl_sta = true; + hclge_set_vport_vlan_fltr_change(vport); + + return 0; +} + +static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport, + enum hnae3_port_base_vlan_state state, + u16 vlan, u8 qos) +{ + if (state == HNAE3_PORT_BASE_VLAN_DISABLE) { + if (!vlan && !qos) + return HNAE3_PORT_BASE_VLAN_NOCHANGE; + + return HNAE3_PORT_BASE_VLAN_ENABLE; + } + + if (!vlan && !qos) + return HNAE3_PORT_BASE_VLAN_DISABLE; + + if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan && + vport->port_base_vlan_cfg.vlan_info.qos == qos) + return HNAE3_PORT_BASE_VLAN_NOCHANGE; + + return HNAE3_PORT_BASE_VLAN_MODIFY; +} + +static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, + u16 vlan, u8 qos, __be16 proto) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_vlan_info vlan_info; + u16 state; + int ret; + + if (hdev->pdev->revision == 0x20) + return -EOPNOTSUPP; + + vport = hclge_get_vf_vport(hdev, vfid); + if (!vport) + return -EINVAL; + + /* qos is a 3 bits value, so can not be bigger than 7 */ + if (vlan > VLAN_N_VID - 1 || qos > 7) + return -EINVAL; + if (proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; + + state = hclge_get_port_base_vlan_state(vport, + vport->port_base_vlan_cfg.state, + vlan, qos); + if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE) + return 0; + + vlan_info.vlan_tag = vlan; + vlan_info.qos = qos; + vlan_info.vlan_proto = ntohs(proto); + + ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to update port base vlan for vf %d, ret = %d\n", + vfid, ret); + return ret; + } + + /* there is a timewindow for PF to know VF unalive, it may + * cause send mailbox fail, but it doesn't matter, VF will + * query it when reinit. + */ + if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) + (void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0], + vport->vport_id, + state, &vlan_info); + return 0; +} + +int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, + u16 vlan_id, bool is_kill) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + bool writen_to_tbl = false; + int ret = 0; + + /* When device is resetting or reset failed, firmware is unable to + * handle mailbox. Just record the vlan id, and remove it after + * reset finished. + */ + if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || + test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) { + set_bit(vlan_id, vport->vlan_del_fail_bmap); + return -EBUSY; + } + + /* when port base vlan enabled, we use port base vlan as the vlan + * filter entry. In this case, we don't update vlan filter table + * when user add new vlan or remove exist vlan, just update the vport + * vlan list. The vlan id in vlan list will be writen in vlan filter + * table until port base vlan disabled + */ + if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { + ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, + vlan_id, is_kill); + writen_to_tbl = true; + } + + if (!ret) { + if (is_kill && vlan_id != 0) + hclge_rm_vport_vlan_table(vport, vlan_id, false); + else + hclge_add_vport_vlan_table(vport, vlan_id, + writen_to_tbl); + } else if (is_kill) { + /* when remove hw vlan filter failed, record the vlan id, + * and try to remove it from hw later, to be consistence + * with stack */ - vport->txvlan_cfg.accept_tag2 = true; - vport->txvlan_cfg.accept_untag2 = true; + set_bit(vlan_id, vport->vlan_del_fail_bmap); + } + + hclge_set_vport_vlan_fltr_change(vport); - vport->txvlan_cfg.insert_tag1_en = false; - vport->txvlan_cfg.insert_tag2_en = false; - vport->txvlan_cfg.default_tag1 = 0; - vport->txvlan_cfg.default_tag2 = 0; + return ret; +} - ret = hclge_set_vlan_tx_offload_cfg(vport); - if (ret) - return ret; +static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev) +{ + struct hclge_vport *vport; + int ret; + u16 i; - vport->rxvlan_cfg.strip_tag1_en = false; - vport->rxvlan_cfg.strip_tag2_en = true; - vport->rxvlan_cfg.vlan1_vlan_prionly = false; - vport->rxvlan_cfg.vlan2_vlan_prionly = false; + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, + &vport->state)) + continue; - ret = hclge_set_vlan_rx_offload_cfg(vport); - if (ret) - return ret; + ret = hclge_enable_vport_vlan_filter(vport, + vport->req_vlan_fltr_en); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to sync vlan filter state for vport%u, ret = %d\n", + vport->vport_id, ret); + set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, + &vport->state); + return; + } } - - handle = &hdev->vport[0].nic; - return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); } -int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) +static void hclge_sync_vlan_filter(struct hclge_dev *hdev) { - struct hclge_vport *vport = hclge_get_vport(handle); +#define HCLGE_MAX_SYNC_COUNT 60 - vport->rxvlan_cfg.strip_tag1_en = false; - vport->rxvlan_cfg.strip_tag2_en = enable; - vport->rxvlan_cfg.vlan1_vlan_prionly = false; - vport->rxvlan_cfg.vlan2_vlan_prionly = false; + int i, ret, sync_cnt = 0; + u16 vlan_id; - return hclge_set_vlan_rx_offload_cfg(vport); + /* start from vport 1 for PF is always alive */ + for (i = 0; i < hdev->num_alloc_vport; i++) { + struct hclge_vport *vport = &hdev->vport[i]; + + vlan_id = find_first_bit(vport->vlan_del_fail_bmap, + VLAN_N_VID); + while (vlan_id != VLAN_N_VID) { + ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), + vport->vport_id, vlan_id, + true); + if (ret && ret != -EINVAL) + return; + + clear_bit(vlan_id, vport->vlan_del_fail_bmap); + hclge_rm_vport_vlan_table(vport, vlan_id, false); + hclge_set_vport_vlan_fltr_change(vport); + + sync_cnt++; + if (sync_cnt >= HCLGE_MAX_SYNC_COUNT) + return; + + vlan_id = find_first_bit(vport->vlan_del_fail_bmap, + VLAN_N_VID); + } + } + + hclge_sync_vlan_fltr_state(hdev); } -static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu) +static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps) { struct hclge_config_max_frm_size_cmd *req; struct hclge_desc desc; - int max_frm_size; - int ret; - - max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; - - if (max_frm_size < HCLGE_MAC_MIN_FRAME || - max_frm_size > HCLGE_MAC_MAX_FRAME) - return -EINVAL; - - max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME); hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); req = (struct hclge_config_max_frm_size_cmd *)desc.data; - req->max_frm_size = cpu_to_le16(max_frm_size); + req->max_frm_size = cpu_to_le16(new_mps); req->min_frm_size = HCLGE_MAC_MIN_FRAME; - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) - dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret); - else - hdev->mps = max_frm_size; - - return ret; + return hclge_cmd_send(&hdev->hw, &desc, 1); } static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) { struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_set_vport_mtu(vport, new_mtu); +} + +int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu) +{ struct hclge_dev *hdev = vport->back; - int ret; + int i, max_frm_size, ret; + + /* HW supprt 2 layer vlan */ + max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN; + if (max_frm_size < HCLGE_MAC_MIN_FRAME || + max_frm_size > HCLGE_MAC_MAX_FRAME) + return -EINVAL; + + max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME); + mutex_lock(&hdev->vport_lock); + /* VF's mps must fit within hdev->mps */ + if (vport->vport_id && max_frm_size > hdev->mps) { + mutex_unlock(&hdev->vport_lock); + return -EINVAL; + } else if (vport->vport_id) { + vport->mps = max_frm_size; + mutex_unlock(&hdev->vport_lock); + return 0; + } + + /* PF's mps must be greater then VF's mps */ + for (i = 1; i < hdev->num_alloc_vport; i++) + if (max_frm_size < hdev->vport[i].mps) { + mutex_unlock(&hdev->vport_lock); + return -EINVAL; + } - ret = hclge_set_mac_mtu(hdev, new_mtu); + ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) { + mutex_unlock(&hdev->vport_lock); + return ret; + } + + ret = hclge_set_mac_mtu(hdev, max_frm_size); if (ret) { dev_err(&hdev->pdev->dev, "Change mtu fail, ret =%d\n", ret); - return ret; + goto out; } + hdev->mps = max_frm_size; + vport->mps = max_frm_size; + ret = hclge_buffer_alloc(hdev); if (ret) dev_err(&hdev->pdev->dev, "Allocate buffer fail, ret =%d\n", ret); +out: + hclge_notify_client(hdev, HNAE3_UP_CLIENT); + mutex_unlock(&hdev->vport_lock); return ret; } -static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id, +static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id, bool enable) { struct hclge_reset_tqp_queue_cmd *req; @@ -5048,7 +9510,8 @@ static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id, req = (struct hclge_reset_tqp_queue_cmd *)desc.data; req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); - hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable); + if (enable) + hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { @@ -5060,7 +9523,8 @@ static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id, return 0; } -static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) +static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id, + u8 *reset_status) { struct hclge_reset_tqp_queue_cmd *req; struct hclge_desc desc; @@ -5078,11 +9542,12 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) return ret; } - return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); + *reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); + + return 0; } -static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, - u16 queue_id) +u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id) { struct hnae3_queue *queue; struct hclge_tqp *tqp; @@ -5093,112 +9558,125 @@ static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, return tqp->index; } -void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) +int hclge_reset_tqp_cmd(struct hnae3_handle *handle) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - int reset_try_times = 0; - int reset_status; + u16 reset_try_times = 0; + u8 reset_status; u16 queue_gid; int ret; + u16 i; - if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) - return; - - queue_gid = hclge_covert_handle_qid_global(handle, queue_id); + for (i = 0; i < handle->kinfo.num_tqps; i++) { + queue_gid = hclge_covert_handle_qid_global(handle, i); + ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to send reset tqp cmd, ret = %d\n", + ret); + return ret; + } - ret = hclge_tqp_enable(hdev, queue_id, 0, false); - if (ret) { - dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret); - return; - } + while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { + ret = hclge_get_reset_status(hdev, queue_gid, + &reset_status); + if (ret) + return ret; - ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); - if (ret) { - dev_warn(&hdev->pdev->dev, - "Send reset tqp cmd fail, ret = %d\n", ret); - return; - } + if (reset_status) + break; - reset_try_times = 0; - while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { - /* Wait for tqp hw reset */ - msleep(20); - reset_status = hclge_get_reset_status(hdev, queue_gid); - if (reset_status) - break; - } + /* Wait for tqp hw reset */ + usleep_range(1000, 1200); + } - if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { - dev_warn(&hdev->pdev->dev, "Reset TQP fail\n"); - return; - } + if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { + dev_err(&hdev->pdev->dev, + "wait for tqp hw reset timeout\n"); + return -ETIME; + } - ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); - if (ret) { - dev_warn(&hdev->pdev->dev, - "Deassert the soft reset fail, ret = %d\n", ret); - return; + ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to deassert soft reset, ret = %d\n", + ret); + return ret; + } + reset_try_times = 0; } + return 0; } -void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id) +int hclge_reset_rcb(struct hnae3_handle *handle) { +#define HCLGE_RESET_RCB_NOT_SUPPORT 0U +#define HCLGE_RESET_RCB_SUCCESS 1U + struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - int reset_try_times = 0; - int reset_status; + struct hclge_reset_cmd *req; + struct hclge_desc desc; + u8 return_status; u16 queue_gid; int ret; - queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id); + queue_gid = hclge_covert_handle_qid_global(handle, 0); + + req = (struct hclge_reset_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); + hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1); + req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid); + req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps); - ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { - dev_warn(&hdev->pdev->dev, - "Send reset tqp cmd fail, ret = %d\n", ret); - return; + dev_err(&hdev->pdev->dev, + "failed to send rcb reset cmd, ret = %d\n", ret); + return ret; } - reset_try_times = 0; - while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { - /* Wait for tqp hw reset */ - msleep(20); - reset_status = hclge_get_reset_status(hdev, queue_gid); - if (reset_status) - break; - } + return_status = req->fun_reset_rcb_return_status; + if (return_status == HCLGE_RESET_RCB_SUCCESS) + return 0; - if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { - dev_warn(&hdev->pdev->dev, "Reset TQP fail\n"); - return; + if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) { + dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n", + return_status); + return -EIO; } - ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); - if (ret) - dev_warn(&hdev->pdev->dev, - "Deassert the soft reset fail, ret = %d\n", ret); + /* if reset rcb cmd is unsupported, we need to send reset tqp cmd + * again to reset all tqps + */ + return hclge_reset_tqp_cmd(handle); } -static u32 hclge_get_fw_version(struct hnae3_handle *handle) +int hclge_reset_tqp(struct hnae3_handle *handle) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; + int ret; - return hdev->fw_version; + /* only need to disable PF's tqp */ + if (!vport->vport_id) { + ret = hclge_tqp_enable(handle, false); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to disable tqp, ret = %d\n", ret); + return ret; + } + } + + return hclge_reset_rcb(handle); } -static void hclge_get_flowctrl_adv(struct hnae3_handle *handle, - u32 *flowctrl_adv) +static u32 hclge_get_fw_version(struct hnae3_handle *handle) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - struct phy_device *phydev = hdev->hw.mac.phydev; - - if (!phydev) - return; - *flowctrl_adv |= (phydev->advertising & ADVERTISED_Pause) | - (phydev->advertising & ADVERTISED_Asym_Pause); + return hdev->fw_version; } static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) @@ -5207,7 +9685,10 @@ static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) if (!phydev) return; +#ifdef HAS_LINK_MODE_OPS + phy_set_asym_pause(phydev, rx_en, tx_en); +#else phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); if (rx_en) @@ -5215,34 +9696,22 @@ static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) if (tx_en) phydev->advertising ^= ADVERTISED_Asym_Pause; +#endif } static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) { int ret; - if (rx_en && tx_en) - hdev->fc_mode_last_time = HCLGE_FC_FULL; - else if (rx_en && !tx_en) - hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE; - else if (!rx_en && tx_en) - hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE; - else - hdev->fc_mode_last_time = HCLGE_FC_NONE; - if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) return 0; ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); - if (ret) { - dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n", - ret); - return ret; - } - - hdev->tm_info.fc_mode = hdev->fc_mode_last_time; + if (ret) + dev_err(&hdev->pdev->dev, + "configure pauseparam error, ret = %d.\n", ret); - return 0; + return ret; } int hclge_cfg_flowctrl(struct hclge_dev *hdev) @@ -5255,12 +9724,15 @@ int hclge_cfg_flowctrl(struct hclge_dev *hdev) if (!phydev->link || !phydev->autoneg) return 0; - +#ifdef HAS_LINK_MODE_OPS + local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising); +#else if (phydev->advertising & ADVERTISED_Pause) local_advertising = ADVERTISE_PAUSE_CAP; if (phydev->advertising & ADVERTISED_Asym_Pause) local_advertising |= ADVERTISE_PAUSE_ASYM; +#endif if (phydev->pause) remote_advertising = LPA_PAUSE_CAP; @@ -5286,8 +9758,9 @@ static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg, { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; + struct phy_device *phydev = hdev->hw.mac.phydev; - *auto_neg = hclge_get_autoneg(handle); + *auto_neg = phydev ? hclge_get_autoneg(handle) : 0; if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { *rx_en = 0; @@ -5310,6 +9783,21 @@ static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg, } } +static void hclge_record_user_pauseparam(struct hclge_dev *hdev, + u32 rx_en, u32 tx_en) +{ + if (rx_en && tx_en) + hdev->fc_mode_last_time = HCLGE_FC_FULL; + else if (rx_en && !tx_en) + hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE; + else if (!rx_en && tx_en) + hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE; + else + hdev->fc_mode_last_time = HCLGE_FC_NONE; + + hdev->tm_info.fc_mode = hdev->fc_mode_last_time; +} + static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, u32 rx_en, u32 tx_en) { @@ -5318,11 +9806,13 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, struct phy_device *phydev = hdev->hw.mac.phydev; u32 fc_autoneg; - fc_autoneg = hclge_get_autoneg(handle); - if (auto_neg != fc_autoneg) { - dev_info(&hdev->pdev->dev, - "To change autoneg please use: ethtool -s autoneg \n"); - return -EOPNOTSUPP; + if (phydev) { + fc_autoneg = hclge_get_autoneg(handle); + if (auto_neg != fc_autoneg) { + dev_info(&hdev->pdev->dev, + "To change autoneg please use: ethtool -s autoneg \n"); + return -EOPNOTSUPP; + } } if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { @@ -5333,16 +9823,15 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, hclge_set_flowctrl_adv(hdev, rx_en, tx_en); - if (!fc_autoneg) + hclge_record_user_pauseparam(hdev, rx_en, tx_en); + + if (!auto_neg) return hclge_cfg_pauseparam(hdev, rx_en, tx_en); - /* Only support flow control negotiation for netdev with - * phy attached for now. - */ - if (!phydev) - return -EOPNOTSUPP; + if (phydev) + return phy_start_aneg(phydev); - return phy_start_aneg(phydev); + return -EOPNOTSUPP; } static void hclge_get_ksettings_an_result(struct hnae3_handle *handle, @@ -5359,13 +9848,23 @@ static void hclge_get_ksettings_an_result(struct hnae3_handle *handle, *auto_neg = hdev->hw.mac.autoneg; } -static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type) +static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type, + u8 *module_type) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; + /* When nic is down, the service task is not running, doesn't update + * the port information per second. Query the port information before + * return the media type, ensure getting the correct media information. + */ + hclge_update_port_info(hdev); + if (media_type) *media_type = hdev->hw.mac.media_type; + + if (module_type) + *module_type = hdev->hw.mac.module_type; } static void hclge_get_mdix_mode(struct hnae3_handle *handle, @@ -5374,7 +9873,8 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle, struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; struct phy_device *phydev = hdev->hw.mac.phydev; - int mdix_ctrl, mdix, retval, is_resolved; + int mdix_ctrl, mdix, is_resolved; + unsigned int retval; if (!phydev) { *tp_mdix_ctrl = ETH_TP_MDI_INVALID; @@ -5417,14 +9917,130 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle, *tp_mdix = ETH_TP_MDI; } -static int hclge_init_instance_hw(struct hclge_dev *hdev) +static void hclge_info_show(struct hclge_dev *hdev) +{ + struct device *dev = &hdev->pdev->dev; + + dev_info(dev, "PF info begin:\n"); + + dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); + dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); + dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); + dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); + dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport); + dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs); + dev_info(dev, "HW tc map: %u\n", hdev->hw_tc_map); + dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size); + dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size); + dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size); + dev_info(dev, "This is %s PF\n", + hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main"); + dev_info(dev, "DCB %s\n", + hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable"); + dev_info(dev, "MQPRIO %s\n", + hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable"); + + dev_info(dev, "PF info end.\n"); +} + +static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, + struct hclge_vport *vport) { - return hclge_mac_connect_phy(hdev); + struct hnae3_client *client = vport->nic.client; + struct hclge_dev *hdev = ae_dev->priv; + int rst_cnt = hdev->rst_stats.reset_cnt; + int ret; + + ret = client->ops->init_instance(&vport->nic); + if (ret) + return ret; + +#ifdef CONFIG_HNS3_TEST + if (ae_dev->ops->ext_init) + ae_dev->ops->ext_init(&vport->nic); +#endif + + set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || + rst_cnt != hdev->rst_stats.reset_cnt) { + ret = -EBUSY; + goto init_nic_err; + } + + /* Enable nic hw error interrupts */ + ret = hclge_config_nic_hw_error(hdev, true); + if (ret) { + dev_err(&ae_dev->pdev->dev, + "fail(%d) to enable hw error interrupts\n", ret); + goto init_nic_err; + } + + hnae3_set_client_init_flag(client, ae_dev, 1); + + if (netif_msg_drv(&hdev->vport->nic)) + hclge_info_show(hdev); + + return ret; + +init_nic_err: + clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); + while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + msleep(HCLGE_WAIT_RESET_DONE); + + client->ops->uninit_instance(&vport->nic, 0); + + return ret; } -static void hclge_uninit_instance_hw(struct hclge_dev *hdev) +static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, + struct hclge_vport *vport) { - hclge_mac_disconnect_phy(hdev); + struct hnae3_client *client = vport->roce.client; + struct hclge_dev *hdev = ae_dev->priv; + int rst_cnt; + int ret; + + if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || + !hdev->nic_client) + return 0; + + client = hdev->roce_client; + ret = hclge_init_roce_base_info(vport); + if (ret) + return ret; + + rst_cnt = hdev->rst_stats.reset_cnt; + ret = client->ops->init_instance(&vport->roce); + if (ret) + return ret; + + set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || + rst_cnt != hdev->rst_stats.reset_cnt) { + ret = -EBUSY; + goto init_roce_err; + } + + /* Enable roce ras interrupts */ + ret = hclge_config_rocee_ras_interrupt(hdev, true); + if (ret) { + dev_err(&ae_dev->pdev->dev, + "fail(%d) to enable roce ras interrupts\n", ret); + goto init_roce_err; + } + + hnae3_set_client_init_flag(client, ae_dev, 1); + + return 0; + +init_roce_err: + clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); + while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + msleep(HCLGE_WAIT_RESET_DONE); + + hdev->roce_client->ops->uninit_instance(&vport->roce, 0); + + return ret; } static int hclge_init_client_instance(struct hnae3_client *client, @@ -5439,41 +10055,15 @@ static int hclge_init_client_instance(struct hnae3_client *client, switch (client->type) { case HNAE3_CLIENT_KNIC: - hdev->nic_client = client; vport->nic.client = client; - ret = client->ops->init_instance(&vport->nic); + ret = hclge_init_nic_client_instance(ae_dev, vport); if (ret) - return ret; - - ret = hclge_init_instance_hw(hdev); - if (ret) { - client->ops->uninit_instance(&vport->nic, - 0); - return ret; - } + goto clear_nic; - if (hdev->roce_client && - hnae3_dev_roce_supported(hdev)) { - struct hnae3_client *rc = hdev->roce_client; - - ret = hclge_init_roce_base_info(vport); - if (ret) - return ret; - - ret = rc->ops->init_instance(&vport->roce); - if (ret) - return ret; - } - - break; - case HNAE3_CLIENT_UNIC: - hdev->nic_client = client; - vport->nic.client = client; - - ret = client->ops->init_instance(&vport->nic); + ret = hclge_init_roce_client_instance(ae_dev, vport); if (ret) - return ret; + goto clear_roce; break; case HNAE3_CLIENT_ROCE: @@ -5482,19 +10072,26 @@ static int hclge_init_client_instance(struct hnae3_client *client, vport->roce.client = client; } - if (hdev->roce_client && hdev->nic_client) { - ret = hclge_init_roce_base_info(vport); - if (ret) - return ret; + ret = hclge_init_roce_client_instance(ae_dev, vport); + if (ret) + goto clear_roce; - ret = client->ops->init_instance(&vport->roce); - if (ret) - return ret; - } + break; + default: + return -EINVAL; } } return 0; + +clear_nic: + hdev->nic_client = NULL; + vport->nic.client = NULL; + return ret; +clear_roce: + hdev->roce_client = NULL; + vport->roce.client = NULL; + return ret; } static void hclge_uninit_client_instance(struct hnae3_client *client, @@ -5504,9 +10101,20 @@ static void hclge_uninit_client_instance(struct hnae3_client *client, struct hclge_vport *vport; int i; +#ifdef CONFIG_HNS3_TEST + if (ae_dev->ops->ext_uninit) { + vport = &hdev->vport[0]; + ae_dev->ops->ext_uninit(&vport->nic); + } +#endif + for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { vport = &hdev->vport[i]; if (hdev->roce_client) { + clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); + while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + msleep(HCLGE_WAIT_RESET_DONE); + hdev->roce_client->ops->uninit_instance(&vport->roce, 0); hdev->roce_client = NULL; @@ -5514,8 +10122,11 @@ static void hclge_uninit_client_instance(struct hnae3_client *client, } if (client->type == HNAE3_CLIENT_ROCE) return; - if (client->ops->uninit_instance) { - hclge_uninit_instance_hw(hdev); + if (hdev->nic_client && client->ops->uninit_instance) { + clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); + while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + msleep(HCLGE_WAIT_RESET_DONE); + client->ops->uninit_instance(&vport->nic, 0); hdev->nic_client = NULL; vport->nic.client = NULL; @@ -5590,6 +10201,7 @@ static void hclge_state_init(struct hclge_dev *hdev) set_bit(HCLGE_STATE_DOWN, &hdev->state); clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); + clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state); clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); } @@ -5597,15 +10209,99 @@ static void hclge_state_init(struct hclge_dev *hdev) static void hclge_state_uninit(struct hclge_dev *hdev) { set_bit(HCLGE_STATE_DOWN, &hdev->state); + set_bit(HCLGE_STATE_REMOVING, &hdev->state); + + if (hdev->reset_timer.function) + del_timer_sync(&hdev->reset_timer); + if (hdev->service_task.work.func) + cancel_delayed_work_sync(&hdev->service_task); +} + +static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev, + enum hnae3_reset_type rst_type) +{ +#define HCLGE_RESET_RETRY_WAIT_MS 500 +#define HCLGE_RESET_RETRY_CNT 5 + + struct hclge_dev *hdev = ae_dev->priv; + int retry_cnt = 0; + int ret; + +retry: + down(&hdev->reset_sem); + set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); + hdev->reset_type = rst_type; + ret = hclge_reset_prepare(hdev); + if (ret || hdev->reset_pending) { + dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n", + ret); + if (hdev->reset_pending || + retry_cnt++ < HCLGE_RESET_RETRY_CNT) { + dev_err(&hdev->pdev->dev, + "reset_pending:0x%lx, retry_cnt:%d\n", + hdev->reset_pending, retry_cnt); + clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); + up(&hdev->reset_sem); + msleep(HCLGE_RESET_RETRY_WAIT_MS); + goto retry; + } + } + + /* disable misc vector before reset done */ + hclge_enable_vector(&hdev->misc_vector, false); + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + + if (hdev->reset_type == HNAE3_FLR_RESET) + hdev->rst_stats.flr_rst_cnt++; +} + +static void hclge_reset_done(struct hnae3_ae_dev *ae_dev) +{ + struct hclge_dev *hdev = ae_dev->priv; + int ret; + + hclge_enable_vector(&hdev->misc_vector, true); + + ret = hclge_reset_rebuild(hdev); + if (ret) + dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret); + + hdev->reset_type = HNAE3_NONE_RESET; + clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); + up(&hdev->reset_sem); +} - if (hdev->service_timer.function) - del_timer_sync(&hdev->service_timer); - if (hdev->service_task.func) - cancel_work_sync(&hdev->service_task); - if (hdev->rst_service_task.func) - cancel_work_sync(&hdev->rst_service_task); - if (hdev->mbx_service_task.func) - cancel_work_sync(&hdev->mbx_service_task); +static void hclge_clear_resetting_state(struct hclge_dev *hdev) +{ + u16 i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + struct hclge_vport *vport = &hdev->vport[i]; + int ret; + + /* Send cmd to clear vport's FUNC_RST_ING */ + ret = hclge_set_vf_rst(hdev, vport->vport_id, false); + if (ret) + dev_warn(&hdev->pdev->dev, + "clear vport(%u) rst failed %d!\n", + vport->vport_id, ret); + } +} + +static void hclge_clear_hw_resource(struct hclge_dev *hdev) +{ + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + /* To be compatible with the old firmware, which does not support + * command HCLGE_OPC_CLEAR_HW_RESOURCE, just return without warning + */ + if (ret && ret != -EOPNOTSUPP) + dev_warn(&hdev->pdev->dev, + "clear hw resource incomplete, ret = %d\n", ret); } static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) @@ -5615,40 +10311,41 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) int ret; hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); - if (!hdev) { - ret = -ENOMEM; - goto out; - } + if (!hdev) + return -ENOMEM; hdev->pdev = pdev; hdev->ae_dev = ae_dev; hdev->reset_type = HNAE3_NONE_RESET; + hdev->reset_level = HNAE3_FUNC_RESET; ae_dev->priv = hdev; + /* HW supprt 2 layer vlan */ + hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN; + + mutex_init(&hdev->vport_lock); + spin_lock_init(&hdev->fd_rule_lock); + sema_init(&hdev->reset_sem, 1); + ret = hclge_pci_init(hdev); - if (ret) { - dev_err(&pdev->dev, "PCI init failed\n"); + if (ret) goto out; - } /* Firmware command queue initialize */ ret = hclge_cmd_queue_init(hdev); - if (ret) { - dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret); + if (ret) goto err_pci_uninit; - } /* Firmware command initialize */ ret = hclge_cmd_init(hdev); if (ret) goto err_cmd_uninit; + hclge_clear_hw_resource(hdev); + ret = hclge_get_cap(hdev); - if (ret) { - dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", - ret); + if (ret) goto err_cmd_uninit; - } ret = hclge_configure(hdev); if (ret) { @@ -5663,12 +10360,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) } ret = hclge_misc_irq_init(hdev); - if (ret) { - dev_err(&pdev->dev, - "Misc IRQ(vector0) init error, ret = %d.\n", - ret); + if (ret) goto err_msi_uninit; - } ret = hclge_alloc_tqps(hdev); if (ret) { @@ -5677,26 +10370,23 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) } ret = hclge_alloc_vport(hdev); - if (ret) { - dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret); + if (ret) goto err_msi_irq_uninit; - } ret = hclge_map_tqp(hdev); - if (ret) { - dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); + if (ret) goto err_msi_irq_uninit; - } if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { ret = hclge_mac_mdio_config(hdev); - if (ret) { - dev_err(&hdev->pdev->dev, - "mdio config fail ret=%d\n", ret); + if (ret) goto err_msi_irq_uninit; - } } + ret = hclge_init_umv_space(hdev); + if (ret) + goto err_mdiobus_unreg; + ret = hclge_mac_init(hdev); if (ret) { dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); @@ -5709,6 +10399,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) goto err_mdiobus_unreg; } + ret = hclge_config_gro(hdev, true); + if (ret) + goto err_mdiobus_unreg; + ret = hclge_init_vlan_config(hdev); if (ret) { dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); @@ -5734,21 +10428,54 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) goto err_mdiobus_unreg; } + ret = hclge_init_fd_config(hdev); + if (ret) { + dev_err(&pdev->dev, + "fd table init fail, ret=%d\n", ret); + goto err_mdiobus_unreg; + } + + INIT_KFIFO(hdev->mac_tnl_log); + hclge_dcb_ops_set(hdev); - timer_setup(&hdev->service_timer, hclge_service_timer, 0); - INIT_WORK(&hdev->service_task, hclge_service_task); - INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task); - INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task); + timer_setup(&hdev->reset_timer, hclge_reset_timer, 0); + INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task); + + /* Setup affinity after service timer setup because add_timer_on + * is called in affinity notify. + */ + hclge_misc_affinity_setup(hdev); hclge_clear_all_event_cause(hdev); + hclge_clear_resetting_state(hdev); + + /* Log and clear the hw errors those already occurred */ + hclge_handle_all_hns_hw_errors(ae_dev); + + /* request delayed reset for the error recovery because an immediate + * global reset on a PF affecting pending initialization of other PFs + */ + if (ae_dev->hw_err_reset_req) { + enum hnae3_reset_type reset_level; + + reset_level = hclge_get_reset_level(ae_dev, + &ae_dev->hw_err_reset_req); + hclge_set_def_reset_request(ae_dev, reset_level); + mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL); + } /* Enable MISC vector(vector0) */ hclge_enable_vector(&hdev->misc_vector, true); hclge_state_init(hdev); + hdev->last_reset_time = jiffies; + + dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n", + HCLGE_DRIVER_NAME); + + hclge_task_schedule(hdev, round_jiffies_relative(HZ)); - pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME); return 0; err_mdiobus_unreg: @@ -5759,19 +10486,234 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) err_msi_uninit: pci_free_irq_vectors(pdev); err_cmd_uninit: - hclge_destroy_cmd_queue(&hdev->hw); + hclge_cmd_uninit(hdev); err_pci_uninit: pcim_iounmap(pdev, hdev->hw.io_base); pci_clear_master(pdev); pci_release_regions(pdev); pci_disable_device(pdev); out: + mutex_destroy(&hdev->vport_lock); return ret; } static void hclge_stats_clear(struct hclge_dev *hdev) { - memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats)); + memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats)); +} + +static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable) +{ + return hclge_config_switch_param(hdev, vf, enable, + HCLGE_SWITCH_ANTI_SPOOF_MASK); +} + +static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable) +{ + return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_NIC_INGRESS_B, + enable, vf); +} + +static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable) +{ + int ret; + + ret = hclge_set_mac_spoofchk(hdev, vf, enable); + if (ret) { + dev_err(&hdev->pdev->dev, + "Set vf %d mac spoof check %s failed, ret=%d\n", + vf, enable ? "on" : "off", ret); + return ret; + } + + ret = hclge_set_vlan_spoofchk(hdev, vf, enable); + if (ret) + dev_err(&hdev->pdev->dev, + "Set vf %d vlan spoof check %s failed, ret=%d\n", + vf, enable ? "on" : "off", ret); + + return ret; +} + +static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf, + bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u32 new_spoofchk = enable ? 1 : 0; + int ret; + + if (hdev->pdev->revision == 0x20) + return -EOPNOTSUPP; + + vport = hclge_get_vf_vport(hdev, vf); + if (!vport) + return -EINVAL; + + if (vport->vf_info.spoofchk == new_spoofchk) + return 0; + + if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full)) + dev_warn(&hdev->pdev->dev, + "vf %d vlan table is full, enable spoof check may cause its packet send fail\n", + vf); + else if (enable && hclge_is_umv_space_full(vport, true)) + dev_warn(&hdev->pdev->dev, + "vf %d mac table is full, enable spoof check may cause its packet send fail\n", + vf); + + ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable); + if (ret) + return ret; + + vport->vf_info.spoofchk = new_spoofchk; + return 0; +} + +static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int ret; + int i; + + if (hdev->pdev->revision == 0x20) + return 0; + + /* resume the vf spoof check state after reset */ + for (i = 0; i < hdev->num_alloc_vport; i++) { + ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, + vport->vf_info.spoofchk); + if (ret) + return ret; + + vport++; + } + + return 0; +} + +static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u32 new_trusted = enable ? 1 : 0; + + vport = hclge_get_vf_vport(hdev, vf); + if (!vport) + return -EINVAL; + + if (vport->vf_info.trusted == new_trusted) + return 0; + + vport->vf_info.trusted = new_trusted; + + set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); + hclge_task_schedule(hdev, 0); + + return 0; +} + +static void hclge_reset_vf_rate(struct hclge_dev *hdev) +{ + int ret; + int vf; + + /* reset vf rate to default value */ + for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) { + struct hclge_vport *vport = &hdev->vport[vf]; + + vport->vf_info.max_tx_rate = 0; + ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate); + if (ret) + dev_err(&hdev->pdev->dev, + "vf%d failed to reset to default, ret=%d\n", + vf - HCLGE_VF_VPORT_START_NUM, ret); + } +} + +static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf, + int min_tx_rate, int max_tx_rate) +{ + if (min_tx_rate != 0 || + max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) { + dev_err(&hdev->pdev->dev, + "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n", + min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed); + return -EINVAL; + } + + return 0; +} + +static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf, + int min_tx_rate, int max_tx_rate, bool force) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int ret; + + ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate); + if (ret) + return ret; + + vport = hclge_get_vf_vport(hdev, vf); + if (!vport) + return -EINVAL; + + if (!force && max_tx_rate == vport->vf_info.max_tx_rate) + return 0; + + ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate); + if (ret) + return ret; + + vport->vf_info.max_tx_rate = max_tx_rate; + + return 0; +} + +static int hclge_resume_vf_rate(struct hclge_dev *hdev) +{ + struct hnae3_handle *handle = &hdev->vport->nic; + struct hclge_vport *vport; + int ret; + int vf; + + /* resume the vf max_tx_rate after reset */ + for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) { + vport = hclge_get_vf_vport(hdev, vf); + if (!vport) + return -EINVAL; + + /* zero means max rate, after reset, firmware already set it to + * max rate, so just continue. + */ + if (!vport->vf_info.max_tx_rate) + continue; + + ret = hclge_set_vf_rate(handle, vf, 0, + vport->vf_info.max_tx_rate, true); + if (ret) { + dev_err(&hdev->pdev->dev, + "vf%d failed to resume tx_rate:%u, ret=%d\n", + vf, vport->vf_info.max_tx_rate, ret); + return ret; + } + } + + return 0; +} + +static void hclge_reset_vport_state(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + hclge_vport_stop(vport); + vport++; + } } static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) @@ -5783,7 +10725,16 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) set_bit(HCLGE_STATE_DOWN, &hdev->state); hclge_stats_clear(hdev); - memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); + /* NOTE: pf reset needn't to clear or restore pf and vf table entry. + * so here should not clean table in memory. + */ + if (hdev->reset_type == HNAE3_IMP_RESET || + hdev->reset_type == HNAE3_GLOBAL_RESET) { + memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); + memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full)); + bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport); + hclge_reset_umv_space(hdev); + } ret = hclge_cmd_init(hdev); if (ret) { @@ -5791,19 +10742,6 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } - ret = hclge_get_cap(hdev); - if (ret) { - dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", - ret); - return ret; - } - - ret = hclge_configure(hdev); - if (ret) { - dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); - return ret; - } - ret = hclge_map_tqp(hdev); if (ret) { dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); @@ -5822,13 +10760,17 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } + ret = hclge_config_gro(hdev, true); + if (ret) + return ret; + ret = hclge_init_vlan_config(hdev); if (ret) { dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); return ret; } - ret = hclge_tm_init_hw(hdev); + ret = hclge_tm_init_hw(hdev, true); if (ret) { dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret); return ret; @@ -5840,6 +10782,59 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } + ret = init_mgr_tbl(hdev); + if (ret) { + dev_err(&pdev->dev, + "failed to reinit manager table, ret = %d\n", ret); + return ret; + } + + ret = hclge_init_fd_config(hdev); + if (ret) { + dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret); + return ret; + } + + set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state); + + /* Log and clear the hw errors those already occurred */ + hclge_handle_all_hns_hw_errors(ae_dev); + + /* Re-enable the hw error interrupts because + * the interrupts get disabled on global reset. + */ + ret = hclge_config_nic_hw_error(hdev, true); + if (ret) { + dev_err(&pdev->dev, + "fail(%d) to re-enable NIC hw error interrupts\n", + ret); + return ret; + } + + if (hdev->roce_client) { + ret = hclge_config_rocee_ras_interrupt(hdev, true); + if (ret) { + dev_err(&pdev->dev, + "fail(%d) to re-enable roce ras interrupts\n", + ret); + return ret; + } + } + + hclge_reset_vport_state(hdev); + ret = hclge_reset_vport_spoofchk(hdev); + if (ret) + return ret; + + ret = hclge_resume_vf_rate(hdev); + if (ret) + return ret; + +#ifdef CONFIG_HNS3_TEST + if (ae_dev->ops->ext_reset_done) + ae_dev->ops->ext_reset_done(&hdev->vport->nic); +#endif + dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", HCLGE_DRIVER_NAME); @@ -5851,7 +10846,10 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) struct hclge_dev *hdev = ae_dev->priv; struct hclge_mac *mac = &hdev->hw.mac; + hclge_reset_vf_rate(hdev); + hclge_misc_affinity_teardown(hdev); hclge_state_uninit(hdev); + hclge_uninit_vport_mac_table(hdev); if (mac->phydev) mdiobus_unregister(mac->mdio_bus); @@ -5860,99 +10858,67 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_enable_vector(&hdev->misc_vector, false); synchronize_irq(hdev->misc_vector.vector_irq); - hclge_destroy_cmd_queue(&hdev->hw); + /* Disable all hw interrupts */ + hclge_config_mac_tnl_int(hdev, false); + hclge_config_nic_hw_error(hdev, false); + hclge_config_rocee_ras_interrupt(hdev, false); + + hclge_cmd_uninit(hdev); hclge_misc_irq_uninit(hdev); hclge_pci_uninit(hdev); + hclge_uninit_vport_vlan_table(hdev); + mutex_destroy(&hdev->vport_lock); ae_dev->priv = NULL; } static u32 hclge_get_max_channels(struct hnae3_handle *handle) { - struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); + return min_t(u32, hdev->rss_size_max, vport->alloc_tqps); } static void hclge_get_channels(struct hnae3_handle *handle, struct ethtool_channels *ch) { - struct hclge_vport *vport = hclge_get_vport(handle); - ch->max_combined = hclge_get_max_channels(handle); ch->other_count = 1; ch->max_other = 1; - ch->combined_count = vport->alloc_tqps; + ch->combined_count = handle->kinfo.rss_size; } static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, - u16 *free_tqps, u16 *max_rss_size) + u16 *alloc_tqps, u16 *max_rss_size) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - u16 temp_tqps = 0; - int i; - for (i = 0; i < hdev->num_tqps; i++) { - if (!hdev->htqp[i].alloced) - temp_tqps++; - } - *free_tqps = temp_tqps; + *alloc_tqps = vport->alloc_tqps; *max_rss_size = hdev->rss_size_max; } -static void hclge_release_tqp(struct hclge_vport *vport) -{ - struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; - struct hclge_dev *hdev = vport->back; - int i; - - for (i = 0; i < kinfo->num_tqps; i++) { - struct hclge_tqp *tqp = - container_of(kinfo->tqp[i], struct hclge_tqp, q); - - tqp->q.handle = NULL; - tqp->q.tqp_index = 0; - tqp->alloced = false; - } - - devm_kfree(&hdev->pdev->dev, kinfo->tqp); - kinfo->tqp = NULL; -} - -static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num) +static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, + bool rxfh_configured) { struct hclge_vport *vport = hclge_get_vport(handle); struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + u16 tc_offset[HCLGE_MAX_TC_NUM] = {0}; struct hclge_dev *hdev = vport->back; - int cur_rss_size = kinfo->rss_size; - int cur_tqps = kinfo->num_tqps; - u16 tc_offset[HCLGE_MAX_TC_NUM]; + u16 tc_size[HCLGE_MAX_TC_NUM] = {0}; + u16 cur_rss_size = kinfo->rss_size; + u16 cur_tqps = kinfo->num_tqps; u16 tc_valid[HCLGE_MAX_TC_NUM]; - u16 tc_size[HCLGE_MAX_TC_NUM]; u16 roundup_size; u32 *rss_indir; - int ret, i; - - /* Free old tqps, and reallocate with new tqp number when nic setup */ - hclge_release_tqp(vport); - - ret = hclge_knic_setup(vport, new_tqps_num, kinfo->num_desc); - if (ret) { - dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret); - return ret; - } + unsigned int i; + int ret; - ret = hclge_map_tqp_to_vport(hdev, vport); - if (ret) { - dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret); - return ret; - } + kinfo->req_rss_size = new_tqps_num; - ret = hclge_tm_schd_init(hdev); + ret = hclge_tm_vport_map_update(hdev); if (ret) { - dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret); + dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret); return ret; } @@ -5973,6 +10939,10 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num) if (ret) return ret; + /* RSS indirection table has been configuared by user */ + if (rxfh_configured) + goto out; + /* Reinitializes the rss indirect table according to the new RSS size */ rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); if (!rss_indir) @@ -5988,11 +10958,12 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num) kfree(rss_indir); +out: if (!ret) dev_info(&hdev->pdev->dev, - "Channels changed, rss_size from %d to %d, tqps from %d to %d", + "Channels changed, rss_size from %u to %u, tqps from %u to %u", cur_rss_size, kinfo->rss_size, - cur_tqps, kinfo->rss_size * kinfo->num_tc); + cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc); return ret; } @@ -6026,10 +10997,12 @@ static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num, void *data) { #define HCLGE_32_BIT_REG_RTN_DATANUM 8 +#define HCLGE_32_BIT_DESC_NODATA_LEN 2 struct hclge_desc *desc; u32 *reg_val = data; __le32 *desc_data; + int nodata_num; int cmd_num; int i, k, n; int ret; @@ -6037,7 +11010,9 @@ static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num, if (regs_num == 0) return 0; - cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM); + nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN; + cmd_num = DIV_ROUND_UP(regs_num + nodata_num, + HCLGE_32_BIT_REG_RTN_DATANUM); desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); if (!desc) return -ENOMEM; @@ -6054,7 +11029,7 @@ static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num, for (i = 0; i < cmd_num; i++) { if (i == 0) { desc_data = (__le32 *)(&desc[i].data[0]); - n = HCLGE_32_BIT_REG_RTN_DATANUM - 2; + n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num; } else { desc_data = (__le32 *)(&desc[i]); n = HCLGE_32_BIT_REG_RTN_DATANUM; @@ -6076,10 +11051,12 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num, void *data) { #define HCLGE_64_BIT_REG_RTN_DATANUM 4 +#define HCLGE_64_BIT_DESC_NODATA_LEN 1 struct hclge_desc *desc; u64 *reg_val = data; __le64 *desc_data; + int nodata_len; int cmd_num; int i, k, n; int ret; @@ -6087,7 +11064,9 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num, if (regs_num == 0) return 0; - cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM); + nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN; + cmd_num = DIV_ROUND_UP(regs_num + nodata_len, + HCLGE_64_BIT_REG_RTN_DATANUM); desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); if (!desc) return -ENOMEM; @@ -6104,7 +11083,7 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num, for (i = 0; i < cmd_num; i++) { if (i == 0) { desc_data = (__le64 *)(&desc[i].data[0]); - n = HCLGE_64_BIT_REG_RTN_DATANUM - 1; + n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len; } else { desc_data = (__le64 *)(&desc[i]); n = HCLGE_64_BIT_REG_RTN_DATANUM; @@ -6122,30 +11101,295 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num, return 0; } +#define MAX_SEPARATE_NUM 4 +#define SEPARATOR_VALUE 0xFDFCFBFA +#define REG_NUM_PER_LINE 4 +#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) +#define REG_SEPARATOR_LINE 1 +#define REG_NUM_REMAIN_MASK 3 + +int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc) +{ + /*prepare 4 commands to query DFX BD number*/ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true); + desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true); + desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true); + + return hclge_cmd_send(&hdev->hw, desc, 4); +} + +static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev, + int *bd_num_list, + u32 type_num) +{ + u32 entries_per_desc, desc_index, index, offset, i; + struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT]; + int ret; + + ret = hclge_query_bd_num_cmd_send(hdev, desc); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get dfx bd num fail, status is %d.\n", ret); + return ret; + } + + entries_per_desc = ARRAY_SIZE(desc[0].data); + for (i = 0; i < type_num; i++) { + offset = hclge_dfx_bd_offset_list[i]; + index = offset % entries_per_desc; + desc_index = offset / entries_per_desc; + bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]); + } + + return ret; +} + +static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev, + struct hclge_desc *desc_src, int bd_num, + enum hclge_opcode_type cmd) +{ + struct hclge_desc *desc = desc_src; + int i, ret; + + hclge_cmd_setup_basic_desc(desc, cmd, true); + for (i = 0; i < bd_num - 1; i++) { + desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + desc++; + hclge_cmd_setup_basic_desc(desc, cmd, true); + } + + desc = desc_src; + ret = hclge_cmd_send(&hdev->hw, desc, bd_num); + if (ret) + dev_err(&hdev->pdev->dev, + "Query dfx reg cmd(0x%x) send fail, status is %d.\n", + cmd, ret); + + return ret; +} + +static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num, + void *data) +{ + int entries_per_desc, reg_num, separator_num, desc_index, index, i; + struct hclge_desc *desc = desc_src; + u32 *reg = data; + + entries_per_desc = ARRAY_SIZE(desc->data); + reg_num = entries_per_desc * bd_num; + separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK); + for (i = 0; i < reg_num; i++) { + index = i % entries_per_desc; + desc_index = i / entries_per_desc; + *reg++ = le32_to_cpu(desc[desc_index].data[index]); + } + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + + return reg_num + separator_num; +} + +static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len) +{ + u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list); + int data_len_per_desc, bd_num, i; + int *bd_num_list; + u32 data_len; + int ret; + + bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL); + if (!bd_num_list) + return -ENOMEM; + + ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get dfx reg bd num fail, status is %d.\n", ret); + goto out; + } + + data_len_per_desc = FIELD_SIZEOF(struct hclge_desc, data); + *len = 0; + for (i = 0; i < dfx_reg_type_num; i++) { + bd_num = bd_num_list[i]; + data_len = data_len_per_desc * bd_num; + *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE; + } + +out: + kfree(bd_num_list); + return ret; +} + +static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data) +{ +#define HCLGE_DFX_BD_NUM_MAX 64 + + u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list); + int bd_num, bd_num_max, buf_len, i; + struct hclge_desc *desc_src; + int *bd_num_list; + u32 *reg = data; + int ret; + + bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL); + if (!bd_num_list) + return -ENOMEM; + + ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get dfx reg bd num fail, status is %d.\n", ret); + goto out; + } + + bd_num_max = bd_num_list[0]; + for (i = 1; i < dfx_reg_type_num; i++) + bd_num_max = max_t(int, bd_num_max, bd_num_list[i]); + + if (bd_num_max <= 0 || bd_num_max > HCLGE_DFX_BD_NUM_MAX) { + dev_err(&hdev->pdev->dev, + "Get dfx reg fail, invalid bd number: %d\n", + bd_num_max); + ret = -EINVAL; + goto out; + } + + buf_len = sizeof(*desc_src) * bd_num_max; + desc_src = kzalloc(buf_len, GFP_KERNEL); + if (!desc_src) { + ret = -ENOMEM; + goto out; + } + + for (i = 0; i < dfx_reg_type_num; i++) { + bd_num = bd_num_list[i]; + ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num, + hclge_dfx_reg_opcode_list[i]); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get dfx reg fail, status is %d.\n", ret); + break; + } + + reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg); + } + + kfree(desc_src); +out: + kfree(bd_num_list); + return ret; +} + +static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data, + struct hnae3_knic_private_info *kinfo) +{ +#define HCLGE_RING_REG_OFFSET 0x200 +#define HCLGE_RING_INT_REG_OFFSET 0x4 + + int i, j, reg_num, separator_num; + int data_num_sum; + u32 *reg = data; + + /* fetching per-PF registers valus from PF PCIe register space */ + reg_num = ARRAY_SIZE(cmdq_reg_addr_list); + separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); + for (i = 0; i < reg_num; i++) + *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + data_num_sum = reg_num + separator_num; + + reg_num = ARRAY_SIZE(common_reg_addr_list); + separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); + for (i = 0; i < reg_num; i++) + *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + data_num_sum += reg_num + separator_num; + + reg_num = ARRAY_SIZE(ring_reg_addr_list); + separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); + for (j = 0; j < kinfo->num_tqps; j++) { + for (i = 0; i < reg_num; i++) + *reg++ = hclge_read_dev(&hdev->hw, + ring_reg_addr_list[i] + + HCLGE_RING_REG_OFFSET * j); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + } + data_num_sum += (reg_num + separator_num) * kinfo->num_tqps; + + reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list); + separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); + for (j = 0; j < hdev->num_msi_used - 1; j++) { + for (i = 0; i < reg_num; i++) + *reg++ = hclge_read_dev(&hdev->hw, + tqp_intr_reg_addr_list[i] + + HCLGE_RING_INT_REG_OFFSET * j); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + } + data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1); + + return data_num_sum; +} + static int hclge_get_regs_len(struct hnae3_handle *handle) { + int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; + struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - u32 regs_num_32_bit, regs_num_64_bit; + int regs_num_32_bit, regs_num_64_bit, dfx_regs_len; + int regs_lines_32_bit, regs_lines_64_bit; int ret; ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); if (ret) { dev_err(&hdev->pdev->dev, "Get register number failed, ret = %d.\n", ret); - return -EOPNOTSUPP; + return ret; + } + + ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get dfx reg len failed, ret = %d.\n", ret); + return ret; } - return regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64); + cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + + REG_SEPARATOR_LINE; + common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + + REG_SEPARATOR_LINE; + ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + + REG_SEPARATOR_LINE; + tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + + REG_SEPARATOR_LINE; + regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE + + REG_SEPARATOR_LINE; + regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE + + REG_SEPARATOR_LINE; + + return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps + + tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit + + regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len; } static void hclge_get_regs(struct hnae3_handle *handle, u32 *version, void *data) { + struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; u32 regs_num_32_bit, regs_num_64_bit; - int ret; + int i, reg_num, separator_num, ret; + u32 *reg = data; *version = hdev->fw_version; @@ -6156,19 +11400,36 @@ static void hclge_get_regs(struct hnae3_handle *handle, u32 *version, return; } - ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, data); + reg += hclge_fetch_pf_reg(hdev, reg, kinfo); + + ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg); if (ret) { dev_err(&hdev->pdev->dev, "Get 32 bit register failed, ret = %d.\n", ret); return; } + reg_num = regs_num_32_bit; + reg += reg_num; + separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; - data = (u32 *)data + regs_num_32_bit; - ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, - data); - if (ret) + ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg); + if (ret) { dev_err(&hdev->pdev->dev, "Get 64 bit register failed, ret = %d.\n", ret); + return; + } + reg_num = regs_num_64_bit * 2; + reg += reg_num; + separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + + ret = hclge_get_dfx_reg(hdev, reg); + if (ret) + dev_err(&hdev->pdev->dev, + "Get dfx register failed, ret = %d.\n", ret); } static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status) @@ -6228,30 +11489,171 @@ static void hclge_get_link_mode(struct hnae3_handle *handle, } } -static void hclge_get_port_type(struct hnae3_handle *handle, - u8 *port_type) +static int hclge_gro_en(struct hnae3_handle *handle, bool enable) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - u8 media_type = hdev->hw.mac.media_type; - switch (media_type) { - case HNAE3_MEDIA_TYPE_FIBER: - *port_type = PORT_FIBRE; - break; - case HNAE3_MEDIA_TYPE_COPPER: - *port_type = PORT_TP; - break; - case HNAE3_MEDIA_TYPE_UNKNOWN: - default: - *port_type = PORT_OTHER; - break; + return hclge_config_gro(hdev, enable); +} + +static void hclge_sync_promisc_mode(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = &hdev->vport[0]; + struct hnae3_handle *handle = &vport->nic; + u8 tmp_flags = 0; + int ret; + int i; + + if (vport->last_promisc_flags != vport->overflow_promisc_flags) { + set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state); + vport->last_promisc_flags = vport->overflow_promisc_flags; + } + + if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) { + tmp_flags = handle->netdev_flags | vport->last_promisc_flags; + ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE, + tmp_flags & HNAE3_MPE); + if (!ret) { + clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state); + set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, + &vport->state); + } + } + + for (i = 1; i < hdev->num_alloc_vport; i++) { + bool uc_en = false; + bool mc_en = false; + bool bc_en; + + vport = &hdev->vport[i]; + + if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, + &vport->state)) + continue; + + if (vport->vf_info.trusted) { + uc_en = vport->vf_info.request_uc_en > 0; + mc_en = vport->vf_info.request_mc_en > 0; + } + bc_en = vport->vf_info.request_bc_en > 0; + + ret = hclge_set_vport_promisc_mode(vport, uc_en, mc_en, bc_en); + if (ret) { + set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, + &vport->state); + return; + } + hclge_set_vport_vlan_fltr_change(vport); + } +} + +static bool hclge_module_existed(struct hclge_dev *hdev) +{ + struct hclge_desc desc; + u32 existed; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get SFP exist state, ret = %d\n", ret); + return false; + } + + existed = le32_to_cpu(desc.data[0]); + + return existed != 0; +} + +/* need 6 bds(total 140 bytes) in one reading + * return the number of bytes actually read, 0 means read failed. + */ +static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset, + u32 len, u8 *data) +{ + struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM]; + struct hclge_sfp_info_bd0_cmd *sfp_info_bd0; + u16 read_len; + u16 copy_len; + int ret; + int i; + + /* setup all 6 bds to read module eeprom info. */ + for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) { + hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM, + true); + + /* bd0~bd4 need next flag */ + if (i < HCLGE_SFP_INFO_CMD_NUM - 1) + desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + } + + /* setup bd0, this bd contains offset and read length. */ + sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data; + sfp_info_bd0->offset = cpu_to_le16((u16)offset); + read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN); + sfp_info_bd0->read_len = cpu_to_le16(read_len); + + ret = hclge_cmd_send(&hdev->hw, desc, i); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get SFP eeprom info, ret = %d\n", ret); + return 0; + } + + /* copy sfp info from bd0 to out buffer. */ + copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN); + memcpy(data, sfp_info_bd0->data, copy_len); + read_len = copy_len; + + /* copy sfp info from bd1~bd5 to out buffer if needed. */ + for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) { + if (read_len >= len) + return read_len; + + copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN); + memcpy(data + read_len, desc[i].data, copy_len); + read_len += copy_len; + } + + return read_len; +} + +static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset, + u32 len, u8 *data) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u32 read_len = 0; + u16 data_len; + + if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER) + return -EOPNOTSUPP; + + if (!hclge_module_existed(hdev)) + return -ENXIO; + + while (read_len < len) { + data_len = hclge_get_sfp_eeprom_info(hdev, + offset + read_len, + len - read_len, + data + read_len); + if (!data_len) + return -EIO; + + read_len += data_len; } + + return 0; } -static const struct hnae3_ae_ops hclge_ops = { +struct hnae3_ae_ops hclge_ops = { .init_ae_dev = hclge_init_ae_dev, .uninit_ae_dev = hclge_uninit_ae_dev, + .reset_prepare = hclge_reset_prepare_general, + .reset_done = hclge_reset_done, .init_client_instance = hclge_init_client_instance, .uninit_client_instance = hclge_uninit_client_instance, .map_ring_to_vector = hclge_map_ring_to_vector, @@ -6259,14 +11661,19 @@ static const struct hnae3_ae_ops hclge_ops = { .get_vector = hclge_get_vector, .put_vector = hclge_put_vector, .set_promisc_mode = hclge_set_promisc_mode, + .request_update_promisc_mode = hclge_request_update_promisc_mode, .set_loopback = hclge_set_loopback, .start = hclge_ae_start, .stop = hclge_ae_stop, + .client_start = hclge_client_start, + .client_stop = hclge_client_stop, .get_status = hclge_get_status, .get_ksettings_an_result = hclge_get_ksettings_an_result, - .update_speed_duplex_h = hclge_update_speed_duplex_h, .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h, .get_media_type = hclge_get_media_type, + .check_port_speed = hclge_check_port_speed, + .get_fec = hclge_get_fec, + .set_fec = hclge_set_fec, .get_rss_key_size = hclge_get_rss_key_size, .get_rss_indir_size = hclge_get_rss_indir_size, .get_rss = hclge_get_rss, @@ -6276,18 +11683,21 @@ static const struct hnae3_ae_ops hclge_ops = { .get_tc_size = hclge_get_tc_size, .get_mac_addr = hclge_get_mac_addr, .set_mac_addr = hclge_set_mac_addr, + .do_ioctl = hclge_do_ioctl, .add_uc_addr = hclge_add_uc_addr, .rm_uc_addr = hclge_rm_uc_addr, .add_mc_addr = hclge_add_mc_addr, .rm_mc_addr = hclge_rm_mc_addr, - .update_mta_status = hclge_update_mta_status, .set_autoneg = hclge_set_autoneg, .get_autoneg = hclge_get_autoneg, + .restart_autoneg = hclge_restart_autoneg, + .halt_autoneg = hclge_halt_autoneg, .get_pauseparam = hclge_get_pauseparam, .set_pauseparam = hclge_set_pauseparam, .set_mtu = hclge_set_mtu, .reset_queue = hclge_reset_tqp, .get_stats = hclge_get_stats, + .get_mac_stats = hclge_get_mac_stat, .update_stats = hclge_update_stats, .get_strings = hclge_get_strings, .get_sset_count = hclge_get_sset_count, @@ -6298,15 +11708,43 @@ static const struct hnae3_ae_ops hclge_ops = { .set_vf_vlan_filter = hclge_set_vf_vlan_filter, .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag, .reset_event = hclge_reset_event, + .get_reset_level = hclge_get_reset_level, + .set_default_reset_request = hclge_set_def_reset_request, .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info, .set_channels = hclge_set_channels, .get_channels = hclge_get_channels, - .get_flowctrl_adv = hclge_get_flowctrl_adv, .get_regs_len = hclge_get_regs_len, .get_regs = hclge_get_regs, .set_led_id = hclge_set_led_id, .get_link_mode = hclge_get_link_mode, - .get_port_type = hclge_get_port_type, + .add_fd_entry = hclge_add_fd_entry, + .del_fd_entry = hclge_del_fd_entry, + .del_all_fd_entries = hclge_del_all_fd_entries, + .get_fd_rule_cnt = hclge_get_fd_rule_cnt, + .get_fd_rule_info = hclge_get_fd_rule_info, + .get_fd_all_rules = hclge_get_all_rules, + .restore_fd_rules = hclge_restore_fd_entries, + .enable_fd = hclge_enable_fd, + .add_arfs_entry = hclge_add_fd_entry_by_arfs, + .dbg_read_cmd = hclge_dbg_read_cmd, + .handle_hw_ras_error = hclge_handle_hw_ras_error, + .get_hw_reset_stat = hclge_get_hw_reset_stat, + .ae_dev_resetting = hclge_ae_dev_resetting, + .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt, + .set_gro_en = hclge_gro_en, + .get_global_queue_id = hclge_covert_handle_qid_global, + .set_timer_task = hclge_set_timer_task, + .mac_connect_phy = hclge_mac_connect_phy, + .mac_disconnect_phy = hclge_mac_disconnect_phy, + .reset_end = hclge_reset_end, + .get_vf_config = hclge_get_vf_config, + .set_vf_link_state = hclge_set_vf_link_state, + .set_vf_spoofchk = hclge_set_vf_spoofchk, + .set_vf_trust = hclge_set_vf_trust, + .set_vf_rate = hclge_set_vf_rate, + .set_vf_mac = hclge_set_vf_mac, + .get_module_eeprom = hclge_get_module_eeprom, + .get_cmdq_stat = hclge_get_cmdq_stat, }; static struct hnae3_ae_algo ae_algo = { @@ -6314,20 +11752,30 @@ static struct hnae3_ae_algo ae_algo = { .pdev_id_table = ae_algo_pci_tbl, }; -static int hclge_init(void) +int hclge_init(void) { pr_info("%s is initializing\n", HCLGE_NAME); + hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME); + if (!hclge_wq) { + pr_err("%s: failed to create workqueue\n", HCLGE_NAME); + return -ENOMEM; + } + hnae3_register_ae_algo(&ae_algo); return 0; } +#ifndef CONFIG_IT_VALIDATION +module_init(hclge_init); +#endif + static void hclge_exit(void) { hnae3_unregister_ae_algo(&ae_algo); + destroy_workqueue(hclge_wq); } -module_init(hclge_init); module_exit(hclge_exit); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 1528fb3fa6be6d4da6afcba7bdbbf5e1a7608171..9fbaceaa939f75f00a2c18ea933af93ffe24b033 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -7,15 +7,23 @@ #include #include #include +#include #include "hclge_cmd.h" #include "hnae3.h" -#define HCLGE_MOD_VERSION "1.0" +#define HCLGE_MOD_VERSION "24.3.1" #define HCLGE_DRIVER_NAME "hclge" +#define HCLGE_MAX_PF_NUM 8 + +#define HCLGE_RD_FIRST_STATS_NUM 2 +#define HCLGE_RD_OTHER_STATS_NUM 4 + #define HCLGE_INVALID_VPORT 0xffff +#define HCLGE_VF_VPORT_START_NUM 1 + #define HCLGE_PF_CFG_BLOCK_SIZE 32 #define HCLGE_PF_CFG_DESC_NUM \ (HCLGE_PF_CFG_BLOCK_SIZE / HCLGE_CFG_RD_LEN_BYTES) @@ -26,6 +34,62 @@ #define HCLGE_VECTOR_REG_OFFSET 0x4 #define HCLGE_VECTOR_VF_OFFSET 0x100000 +#define HCLGE_CMDQ_TX_ADDR_L_REG 0x27000 +#define HCLGE_CMDQ_TX_ADDR_H_REG 0x27004 +#define HCLGE_CMDQ_TX_DEPTH_REG 0x27008 +#define HCLGE_CMDQ_TX_TAIL_REG 0x27010 +#define HCLGE_CMDQ_TX_HEAD_REG 0x27014 +#define HCLGE_CMDQ_RX_ADDR_L_REG 0x27018 +#define HCLGE_CMDQ_RX_ADDR_H_REG 0x2701C +#define HCLGE_CMDQ_RX_DEPTH_REG 0x27020 +#define HCLGE_CMDQ_RX_TAIL_REG 0x27024 +#define HCLGE_CMDQ_RX_HEAD_REG 0x27028 +#define HCLGE_CMDQ_INTR_SRC_REG 0x27100 +#define HCLGE_CMDQ_INTR_STS_REG 0x27104 +#define HCLGE_CMDQ_INTR_EN_REG 0x27108 +#define HCLGE_CMDQ_INTR_GEN_REG 0x2710C + +/* bar registers for common func */ +#define HCLGE_VECTOR0_OTER_EN_REG 0x20600 +#define HCLGE_RAS_OTHER_STS_REG 0x20B00 +#define HCLGE_FUNC_RESET_STS_REG 0x20C00 +#define HCLGE_GRO_EN_REG 0x28000 + +/* bar registers for rcb */ +#define HCLGE_RING_RX_ADDR_L_REG 0x80000 +#define HCLGE_RING_RX_ADDR_H_REG 0x80004 +#define HCLGE_RING_RX_BD_NUM_REG 0x80008 +#define HCLGE_RING_RX_BD_LENGTH_REG 0x8000C +#define HCLGE_RING_RX_MERGE_EN_REG 0x80014 +#define HCLGE_RING_RX_TAIL_REG 0x80018 +#define HCLGE_RING_RX_HEAD_REG 0x8001C +#define HCLGE_RING_RX_FBD_NUM_REG 0x80020 +#define HCLGE_RING_RX_OFFSET_REG 0x80024 +#define HCLGE_RING_RX_FBD_OFFSET_REG 0x80028 +#define HCLGE_RING_RX_STASH_REG 0x80030 +#define HCLGE_RING_RX_BD_ERR_REG 0x80034 +#define HCLGE_RING_TX_ADDR_L_REG 0x80040 +#define HCLGE_RING_TX_ADDR_H_REG 0x80044 +#define HCLGE_RING_TX_BD_NUM_REG 0x80048 +#define HCLGE_RING_TX_PRIORITY_REG 0x8004C +#define HCLGE_RING_TX_TC_REG 0x80050 +#define HCLGE_RING_TX_MERGE_EN_REG 0x80054 +#define HCLGE_RING_TX_TAIL_REG 0x80058 +#define HCLGE_RING_TX_HEAD_REG 0x8005C +#define HCLGE_RING_TX_FBD_NUM_REG 0x80060 +#define HCLGE_RING_TX_OFFSET_REG 0x80064 +#define HCLGE_RING_TX_EBD_NUM_REG 0x80068 +#define HCLGE_RING_TX_EBD_OFFSET_REG 0x80070 +#define HCLGE_RING_TX_BD_ERR_REG 0x80074 +#define HCLGE_RING_EN_REG 0x80090 + +/* bar registers for tqp interrupt */ +#define HCLGE_TQP_INTR_CTRL_REG 0x20000 +#define HCLGE_TQP_INTR_GL0_REG 0x20100 +#define HCLGE_TQP_INTR_GL1_REG 0x20200 +#define HCLGE_TQP_INTR_GL2_REG 0x20300 +#define HCLGE_TQP_INTR_RL_REG 0x20900 + #define HCLGE_RSS_IND_TBL_SIZE 512 #define HCLGE_RSS_SET_BITMAP_MSK GENMASK(15, 0) #define HCLGE_RSS_KEY_SIZE 40 @@ -37,12 +101,14 @@ (HCLGE_RSS_IND_TBL_SIZE / HCLGE_RSS_CFG_TBL_SIZE) #define HCLGE_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0) -#define HCLGE_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0) +#define HCLGE_RSS_INPUT_TUPLE_SCTP4 GENMASK(4, 0) #define HCLGE_D_PORT_BIT BIT(0) #define HCLGE_S_PORT_BIT BIT(1) #define HCLGE_D_IP_BIT BIT(2) #define HCLGE_S_IP_BIT BIT(3) #define HCLGE_V_TAG_BIT BIT(4) +#define HCLGE_RSS_INPUT_TUPLE_SCTP6 \ + (HCLGE_D_IP_BIT | HCLGE_S_IP_BIT | HCLGE_V_TAG_BIT) #define HCLGE_RSS_TC_SIZE_0 1 #define HCLGE_RSS_TC_SIZE_1 2 @@ -53,9 +119,11 @@ #define HCLGE_RSS_TC_SIZE_6 64 #define HCLGE_RSS_TC_SIZE_7 128 -#define HCLGE_MTA_TBL_SIZE 4096 +#define HCLGE_UMV_TBL_SIZE 3072 +#define HCLGE_DEFAULT_UMV_SPACE_PER_PF \ + (HCLGE_UMV_TBL_SIZE / HCLGE_MAX_PF_NUM) -#define HCLGE_TQP_RESET_TRY_TIMES 10 +#define HCLGE_TQP_RESET_TRY_TIMES 200 #define HCLGE_PHY_PAGE_MDIX 0 #define HCLGE_PHY_PAGE_COPPER 0 @@ -75,16 +143,37 @@ #define HCLGE_PHY_MDIX_STATUS_B 6 #define HCLGE_PHY_SPEED_DUP_RESOLVE_B 11 +#define HCLGE_GET_DFX_REG_TYPE_CNT 4 + /* Factor used to calculate offset and bitmap of VF num */ #define HCLGE_VF_NUM_PER_CMD 64 -#define HCLGE_VF_NUM_PER_BYTE 8 + +#define HCLGE_DBG_RESET_INFO_LEN 1024 + +enum HLCGE_PORT_TYPE { + HOST_PORT, + NETWORK_PORT +}; + +#define PF_VPORT_ID 0 + +#define HCLGE_PF_ID_S 0 +#define HCLGE_PF_ID_M GENMASK(2, 0) +#define HCLGE_VF_ID_S 3 +#define HCLGE_VF_ID_M GENMASK(10, 3) +#define HCLGE_PORT_TYPE_B 11 +#define HCLGE_NETWORK_PORT_ID_S 0 +#define HCLGE_NETWORK_PORT_ID_M GENMASK(3, 0) /* Reset related Registers */ +#define HCLGE_PF_OTHER_INT_REG 0x20600 #define HCLGE_MISC_RESET_STS_REG 0x20700 #define HCLGE_MISC_VECTOR_INT_STS 0x20800 #define HCLGE_GLOBAL_RESET_REG 0x20A00 #define HCLGE_GLOBAL_RESET_BIT 0 #define HCLGE_CORE_RESET_BIT 1 +#define HCLGE_IMP_RESET_BIT 2 +#define HCLGE_RESET_INT_M GENMASK(7, 5) #define HCLGE_FUN_RST_ING 0x20C00 #define HCLGE_FUN_RST_ING_B 0 @@ -98,8 +187,12 @@ /* CMDQ register bits for RX event(=MBX event) */ #define HCLGE_VECTOR0_RX_CMDQ_INT_B 1 +#define HCLGE_VECTOR0_IMP_RESET_INT_B 1 +#define HCLGE_VECTOR0_IMP_CMDQ_ERR_B 4U +#define HCLGE_VECTOR0_IMP_RD_POISON_B 5U + #define HCLGE_MAC_DEFAULT_FRAME \ - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN) + (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN) #define HCLGE_MAC_MIN_FRAME 64 #define HCLGE_MAC_MAX_FRAME 9728 @@ -108,32 +201,44 @@ #define HCLGE_SUPPORT_25G_BIT BIT(2) #define HCLGE_SUPPORT_50G_BIT BIT(3) #define HCLGE_SUPPORT_100G_BIT BIT(4) +/* to be compatible with exsit board */ +#define HCLGE_SUPPORT_40G_BIT BIT(5) +#define HCLGE_SUPPORT_100M_BIT BIT(6) +#define HCLGE_SUPPORT_10M_BIT BIT(7) +#define HCLGE_SUPPORT_GE \ + (HCLGE_SUPPORT_1G_BIT | HCLGE_SUPPORT_100M_BIT | HCLGE_SUPPORT_10M_BIT) enum HCLGE_DEV_STATE { HCLGE_STATE_REINITING, HCLGE_STATE_DOWN, HCLGE_STATE_DISABLED, HCLGE_STATE_REMOVING, + HCLGE_STATE_NIC_REGISTERED, + HCLGE_STATE_ROCE_REGISTERED, HCLGE_STATE_SERVICE_INITED, - HCLGE_STATE_SERVICE_SCHED, HCLGE_STATE_RST_SERVICE_SCHED, HCLGE_STATE_RST_HANDLING, HCLGE_STATE_MBX_SERVICE_SCHED, HCLGE_STATE_MBX_HANDLING, HCLGE_STATE_STATISTICS_UPDATING, HCLGE_STATE_CMD_DISABLE, + HCLGE_STATE_LINK_UPDATING, + HCLGE_STATE_PROMISC_CHANGED, + HCLGE_STATE_RST_FAIL, HCLGE_STATE_MAX }; enum hclge_evt_cause { HCLGE_VECTOR0_EVENT_RST, HCLGE_VECTOR0_EVENT_MBX, + HCLGE_VECTOR0_EVENT_ERR, HCLGE_VECTOR0_EVENT_OTHER, }; #define HCLGE_MPF_ENBALE 1 enum HCLGE_MAC_SPEED { + HCLGE_MAC_SPEED_UNKNOWN = 0, /* unknown */ HCLGE_MAC_SPEED_10M = 10, /* 10 Mbps */ HCLGE_MAC_SPEED_100M = 100, /* 100 Mbps */ HCLGE_MAC_SPEED_1G = 1000, /* 1000 Mbps = 1 Gbps */ @@ -149,22 +254,29 @@ enum HCLGE_MAC_DUPLEX { HCLGE_MAC_FULL }; -enum hclge_mta_dmac_sel_type { - HCLGE_MAC_ADDR_47_36, - HCLGE_MAC_ADDR_46_35, - HCLGE_MAC_ADDR_45_34, - HCLGE_MAC_ADDR_44_33, -}; +#define QUERY_SFP_SPEED 0 +#define QUERY_ACTIVE_SPEED 1 struct hclge_mac { + u8 mac_id; u8 phy_addr; u8 flag; - u8 media_type; + u8 media_type; /* port media type, e.g. fibre/copper/backplane */ u8 mac_addr[ETH_ALEN]; u8 autoneg; + u8 autoneg_last; u8 duplex; + u8 duplex_last; + u8 support_autoneg; + u8 speed_type; /* 0: sfp speed, 1: active speed */ u32 speed; - int link; /* store the link status of mac & phy (if phy exit)*/ + u32 max_speed; + u32 speed_ability; /* speed ability supported by current media */ + u32 module_type; /* sub media type, e.g. kr/cr/sr/lr */ + u32 fec_mode; /* active fec mode */ + u32 user_fec_mode; + u32 fec_ability; + int link; /* store the link status of mac & phy (if phy exit) */ struct phy_device *phydev; struct mii_bus *mdio_bus; phy_interface_t phy_if; @@ -208,6 +320,33 @@ enum hclge_fc_mode { HCLGE_FC_DEFAULT }; +#define HCLGE_FILTER_TYPE_VF 0 +#define HCLGE_FILTER_TYPE_PORT 1 +#define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0) +#define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0) +#define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1) +#define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2) +#define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3) +#define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \ + | HCLGE_FILTER_FE_ROCE_EGRESS_B) +#define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \ + | HCLGE_FILTER_FE_ROCE_INGRESS_B) + +enum hclge_vlan_fltr_cap { + HCLGE_VLAN_FLTR_DEF, + HCLGE_VLAN_FLTR_CAN_MDF, +}; + +enum hclge_link_fail_code { + HCLGE_LF_NORMAL, + HCLGE_LF_REF_CLOCK_LOST, + HCLGE_LF_XSFP_TX_DISABLE, + HCLGE_LF_XSFP_ABSENT, +}; + +#define HCLGE_LINK_STATUS_DOWN 0 +#define HCLGE_LINK_STATUS_UP 1 + #define HCLGE_PG_NUM 4 #define HCLGE_SCH_MODE_SP 0 #define HCLGE_SCH_MODE_DWRR 1 @@ -229,6 +368,7 @@ struct hclge_tc_info { struct hclge_cfg { u8 vmdq_vport_num; u8 tc_num; + u8 vlan_fliter_cap; u16 tqp_desc_num; u16 rx_buf_len; u16 rss_size_max; @@ -238,6 +378,7 @@ struct hclge_cfg { u8 default_speed; u32 numa_node_map; u8 speed_ability; + u16 umv_space; }; struct hclge_tm_info { @@ -249,6 +390,7 @@ struct hclge_tm_info { struct hclge_tc_info tc_info[HNAE3_MAX_TC]; enum hclge_fc_mode fc_mode; u8 hw_pfc_map; /* Allow for packet drop or not on this TC */ + u8 pfc_en; /* PFC enabled or not for user priority */ }; struct hclge_comm_stats_str { @@ -256,109 +398,6 @@ struct hclge_comm_stats_str { unsigned long offset; }; -/* all 64bit stats, opcode id: 0x0030 */ -struct hclge_64_bit_stats { - /* query_igu_stat */ - u64 igu_rx_oversize_pkt; - u64 igu_rx_undersize_pkt; - u64 igu_rx_out_all_pkt; - u64 igu_rx_uni_pkt; - u64 igu_rx_multi_pkt; - u64 igu_rx_broad_pkt; - u64 rsv0; - - /* query_egu_stat */ - u64 egu_tx_out_all_pkt; - u64 egu_tx_uni_pkt; - u64 egu_tx_multi_pkt; - u64 egu_tx_broad_pkt; - - /* ssu_ppp packet stats */ - u64 ssu_ppp_mac_key_num; - u64 ssu_ppp_host_key_num; - u64 ppp_ssu_mac_rlt_num; - u64 ppp_ssu_host_rlt_num; - - /* ssu_tx_in_out_dfx_stats */ - u64 ssu_tx_in_num; - u64 ssu_tx_out_num; - /* ssu_rx_in_out_dfx_stats */ - u64 ssu_rx_in_num; - u64 ssu_rx_out_num; -}; - -/* all 32bit stats, opcode id: 0x0031 */ -struct hclge_32_bit_stats { - u64 igu_rx_err_pkt; - u64 igu_rx_no_eof_pkt; - u64 igu_rx_no_sof_pkt; - u64 egu_tx_1588_pkt; - u64 egu_tx_err_pkt; - u64 ssu_full_drop_num; - u64 ssu_part_drop_num; - u64 ppp_key_drop_num; - u64 ppp_rlt_drop_num; - u64 ssu_key_drop_num; - u64 pkt_curr_buf_cnt; - u64 qcn_fb_rcv_cnt; - u64 qcn_fb_drop_cnt; - u64 qcn_fb_invaild_cnt; - u64 rsv0; - u64 rx_packet_tc0_in_cnt; - u64 rx_packet_tc1_in_cnt; - u64 rx_packet_tc2_in_cnt; - u64 rx_packet_tc3_in_cnt; - u64 rx_packet_tc4_in_cnt; - u64 rx_packet_tc5_in_cnt; - u64 rx_packet_tc6_in_cnt; - u64 rx_packet_tc7_in_cnt; - u64 rx_packet_tc0_out_cnt; - u64 rx_packet_tc1_out_cnt; - u64 rx_packet_tc2_out_cnt; - u64 rx_packet_tc3_out_cnt; - u64 rx_packet_tc4_out_cnt; - u64 rx_packet_tc5_out_cnt; - u64 rx_packet_tc6_out_cnt; - u64 rx_packet_tc7_out_cnt; - - /* Tx packet level statistics */ - u64 tx_packet_tc0_in_cnt; - u64 tx_packet_tc1_in_cnt; - u64 tx_packet_tc2_in_cnt; - u64 tx_packet_tc3_in_cnt; - u64 tx_packet_tc4_in_cnt; - u64 tx_packet_tc5_in_cnt; - u64 tx_packet_tc6_in_cnt; - u64 tx_packet_tc7_in_cnt; - u64 tx_packet_tc0_out_cnt; - u64 tx_packet_tc1_out_cnt; - u64 tx_packet_tc2_out_cnt; - u64 tx_packet_tc3_out_cnt; - u64 tx_packet_tc4_out_cnt; - u64 tx_packet_tc5_out_cnt; - u64 tx_packet_tc6_out_cnt; - u64 tx_packet_tc7_out_cnt; - - /* packet buffer statistics */ - u64 pkt_curr_buf_tc0_cnt; - u64 pkt_curr_buf_tc1_cnt; - u64 pkt_curr_buf_tc2_cnt; - u64 pkt_curr_buf_tc3_cnt; - u64 pkt_curr_buf_tc4_cnt; - u64 pkt_curr_buf_tc5_cnt; - u64 pkt_curr_buf_tc6_cnt; - u64 pkt_curr_buf_tc7_cnt; - - u64 mb_uncopy_num; - u64 lo_pri_unicast_rlt_drop_num; - u64 hi_pri_multicast_rlt_drop_num; - u64 lo_pri_multicast_rlt_drop_num; - u64 rx_oq_drop_pkt_cnt; - u64 tx_oq_drop_pkt_cnt; - u64 nic_l2_err_drop_pkt_cnt; - u64 roc_l2_err_drop_pkt_cnt; -}; - /* mac stats ,opcode id: 0x0032 */ struct hclge_mac_stats { u64 mac_tx_mac_pause_num; @@ -445,15 +484,13 @@ struct hclge_mac_stats { u64 mac_rx_fcs_err_pkt_num; u64 mac_rx_send_app_good_pkt_num; u64 mac_rx_send_app_bad_pkt_num; + u64 mac_tx_pfc_pause_pkt_num; + u64 mac_rx_pfc_pause_pkt_num; + u64 mac_tx_ctrl_pkt_num; + u64 mac_rx_ctrl_pkt_num; }; -#define HCLGE_STATS_TIMER_INTERVAL (60 * 5) -struct hclge_hw_stats { - struct hclge_mac_stats mac_stats; - struct hclge_64_bit_stats all_64_bit_stats; - struct hclge_32_bit_stats all_32_bit_stats; - u32 stats_timer; -}; +#define HCLGE_STATS_TIMER_INTERVAL 300UL struct hclge_vlan_type_cfg { u16 rx_ot_fst_vlan_type; @@ -464,18 +501,281 @@ struct hclge_vlan_type_cfg { u16 tx_in_vlan_type; }; +enum HCLGE_FD_MODE { + HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1, + HCLGE_FD_MODE_DEPTH_1K_WIDTH_400B_STAGE_2, + HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1, + HCLGE_FD_MODE_DEPTH_2K_WIDTH_200B_STAGE_2, +}; + +enum HCLGE_FD_KEY_TYPE { + HCLGE_FD_KEY_BASE_ON_PTYPE, + HCLGE_FD_KEY_BASE_ON_TUPLE, +}; + +enum HCLGE_FD_STAGE { + HCLGE_FD_STAGE_1, + HCLGE_FD_STAGE_2, + MAX_STAGE_NUM, +}; + +/* OUTER_XXX indicates tuples in tunnel header of tunnel packet + * INNER_XXX indicate tuples in tunneled header of tunnel packet or + * tuples of non-tunnel packet + */ +enum HCLGE_FD_TUPLE { + OUTER_DST_MAC, + OUTER_SRC_MAC, + OUTER_VLAN_TAG_FST, + OUTER_VLAN_TAG_SEC, + OUTER_ETH_TYPE, + OUTER_L2_RSV, + OUTER_IP_TOS, + OUTER_IP_PROTO, + OUTER_SRC_IP, + OUTER_DST_IP, + OUTER_L3_RSV, + OUTER_SRC_PORT, + OUTER_DST_PORT, + OUTER_L4_RSV, + OUTER_TUN_VNI, + OUTER_TUN_FLOW_ID, + INNER_DST_MAC, + INNER_SRC_MAC, + INNER_VLAN_TAG_FST, + INNER_VLAN_TAG_SEC, + INNER_ETH_TYPE, + INNER_L2_RSV, + INNER_IP_TOS, + INNER_IP_PROTO, + INNER_SRC_IP, + INNER_DST_IP, + INNER_L3_RSV, + INNER_SRC_PORT, + INNER_DST_PORT, + INNER_L4_RSV, + MAX_TUPLE, +}; + +enum HCLGE_FD_META_DATA { + PACKET_TYPE_ID, + IP_FRAGEMENT, + ROCE_TYPE, + NEXT_KEY, + VLAN_NUMBER, + SRC_VPORT, + DST_VPORT, + TUNNEL_PACKET, + MAX_META_DATA, +}; + +struct key_info { + u8 key_type; + u8 key_length; /* use bit as unit */ +}; + +#define MAX_KEY_LENGTH 400 +#define MAX_KEY_DWORDS DIV_ROUND_UP(MAX_KEY_LENGTH / 8, 4) +#define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4) +#define MAX_META_DATA_LENGTH 32 + +/* assigned by firmware, the real filter number for each pf may be less */ +#define MAX_FD_FILTER_NUM 4096 +#define HCLGE_ARFS_EXPIRE_INTERVAL 5UL + +enum HCLGE_FD_ACTIVE_RULE_TYPE { + HCLGE_FD_RULE_NONE, + HCLGE_FD_ARFS_ACTIVE, + HCLGE_FD_EP_ACTIVE, +}; + +enum HCLGE_FD_PACKET_TYPE { + NIC_PACKET, + ROCE_PACKET, +}; + +enum HCLGE_FD_ACTION { + HCLGE_FD_ACTION_ACCEPT_PACKET, + HCLGE_FD_ACTION_DROP_PACKET, +}; + +struct hclge_fd_key_cfg { + u8 key_sel; + u8 inner_sipv6_word_en; + u8 inner_dipv6_word_en; + u8 outer_sipv6_word_en; + u8 outer_dipv6_word_en; + u32 tuple_active; + u32 meta_data_active; +}; + +struct hclge_fd_cfg { + u8 fd_mode; + u16 max_key_length; /* use bit as unit */ + u32 rule_num[MAX_STAGE_NUM]; /* rule entry number */ + u16 cnt_num[MAX_STAGE_NUM]; /* rule hit counter number */ + struct hclge_fd_key_cfg key_cfg[MAX_STAGE_NUM]; +}; + +#define IPV4_INDEX 3 +#define IPV6_SIZE 4 +struct hclge_fd_rule_tuples { + u8 src_mac[ETH_ALEN]; + u8 dst_mac[ETH_ALEN]; + /* Be compatible for ip address of both ipv4 and ipv6. + * For ipv4 address, we store it in src/dst_ip[3]. + */ + u32 src_ip[IPV6_SIZE]; + u32 dst_ip[IPV6_SIZE]; + u16 src_port; + u16 dst_port; + u16 vlan_tag1; + u16 ether_proto; + u8 ip_tos; + u8 ip_proto; +}; + +struct hclge_fd_rule { + struct hlist_node rule_node; + struct hclge_fd_rule_tuples tuples; + struct hclge_fd_rule_tuples tuples_mask; + u32 unused_tuple; + u32 flow_type; + u8 action; + u16 vf_id; + u16 queue_id; + u16 location; + u16 flow_id; /* only used for arfs */ + enum HCLGE_FD_ACTIVE_RULE_TYPE rule_type; +}; + +struct hclge_fd_ad_data { + u16 ad_id; + u8 drop_packet; + u8 forward_to_direct_queue; + u16 queue_id; + u8 use_counter; + u8 counter_id; + u8 use_next_stage; + u8 write_rule_id_to_bd; + u8 next_input_key; + u16 rule_id; +}; + +enum HCLGE_MAC_NODE_STATE { + HCLGE_MAC_TO_ADD, + HCLGE_MAC_TO_DEL, + HCLGE_MAC_ACTIVE +}; + +struct hclge_vport_mac_addr_cfg { + struct list_head node; + enum HCLGE_MAC_NODE_STATE state; + u8 mac_addr[ETH_ALEN]; +}; + +enum HCLGE_MAC_ADDR_TYPE { + HCLGE_MAC_ADDR_UC, + HCLGE_MAC_ADDR_MC +}; + +enum HCLGE_IMP_ERR_TYPE { + HCLGE_IMP_RD_POISON, + HCLGE_IMP_CMDQ_ERROR, +}; + +struct hclge_vport_vlan_cfg { + struct list_head node; + int hd_tbl_status; + u16 vlan_id; +}; + +struct hclge_rst_stats { + u32 reset_done_cnt; /* the number of reset has completed */ + u32 hw_reset_done_cnt; /* the number of HW reset has completed */ + u32 pf_rst_cnt; /* the number of PF reset */ + u32 flr_rst_cnt; /* the number of FLR */ + u32 core_rst_cnt; /* the number of CORE reset */ + u32 global_rst_cnt; /* the number of GLOBAL */ + u32 imp_rst_cnt; /* the number of IMP reset */ + u32 reset_cnt; /* the number of reset */ + u32 reset_fail_cnt; /* the number of reset fail */ +}; + +/* time and register status when mac tunnel interruption occur */ +struct hclge_mac_tnl_stats { + u64 time; + u32 status; +}; + +#define HCLGE_RESET_INTERVAL (12 * HZ) +#define HCLGE_WAIT_RESET_DONE 100 + +#pragma pack(1) +struct hclge_vf_vlan_cfg { + u8 mbx_cmd; + u8 subcode; + union { + struct { + u8 is_kill; + u16 vlan; + u16 proto; + }; + u8 enable; + }; +}; + +#pragma pack() + +/* For each bit of TCAM entry, it uses a pair of 'x' and + * 'y' to indicate which value to match, like below: + * ---------------------------------- + * | bit x | bit y | search value | + * ---------------------------------- + * | 0 | 0 | always hit | + * ---------------------------------- + * | 1 | 0 | match '0' | + * ---------------------------------- + * | 0 | 1 | match '1' | + * ---------------------------------- + * | 1 | 1 | invalid | + * ---------------------------------- + * Then for input key(k) and mask(v), we can calculate the value by + * the formulae: + * x = (~k) & v + * y = (k ^ ~v) & k + */ +#define calc_x(x, k, v) ((x) = (~(k) & (v))) +#define calc_y(y, k, v) \ + do { \ + const typeof(k) _k_ = (k); \ + const typeof(v) _v_ = (v); \ + (y) = (_k_ ^ ~_v_) & (_k_); \ + } while (0) + +#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f)) +#define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset))) + +#define HCLGE_MAC_TNL_LOG_SIZE 8 #define HCLGE_VPORT_NUM 256 struct hclge_dev { struct pci_dev *pdev; struct hnae3_ae_dev *ae_dev; struct hclge_hw hw; struct hclge_misc_vector misc_vector; - struct hclge_hw_stats hw_stats; + struct hclge_mac_stats mac_stats; unsigned long state; + unsigned long flr_state; + unsigned long last_reset_time; enum hnae3_reset_type reset_type; + enum hnae3_reset_type reset_level; + unsigned long default_reset_request; unsigned long reset_request; /* reset has been requested */ unsigned long reset_pending; /* client rst is pending to be served */ + struct hclge_rst_stats rst_stats; + struct semaphore reset_sem; /* protect reset process */ + u32 reset_fail_cnt; u32 fw_version; u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */ u16 num_tqps; /* Num task queue pairs of this PF */ @@ -489,10 +789,14 @@ struct hclge_dev { u16 num_alloc_vport; /* Num vports this driver supports */ u32 numa_node_mask; u16 rx_buf_len; - u16 num_desc; + u16 num_tx_desc; /* desc num of per tx queue */ + u16 num_rx_desc; /* desc num of per rx queue */ u8 hw_tc_map; u8 tc_num_last_time; enum hclge_fc_mode fc_mode_last_time; + bool support_sfp_query; + bool ppu_poison_ras_err; + unsigned long imp_err_state; #define HCLGE_FLAG_TC_BASE_SCH_MODE 1 #define HCLGE_FLAG_VNET_BASE_SCH_MODE 2 @@ -511,6 +815,7 @@ struct hclge_dev { u32 base_msi_vector; u16 *vector_status; int *vector_irq; + u16 num_nic_msi; /* Num of nic vectors for this PF */ u16 num_roce_msi; /* Num of roce vectors for this PF */ int roce_base_vector; @@ -522,10 +827,8 @@ struct hclge_dev { u16 adminq_work_limit; /* Num of admin receive queue desc to process */ unsigned long service_timer_period; unsigned long service_timer_previous; - struct timer_list service_timer; - struct work_struct service_task; - struct work_struct rst_service_task; - struct work_struct mbx_service_task; + struct timer_list reset_timer; + struct delayed_work service_task; bool cur_promisc; int num_alloc_vfs; /* Actual number of VFs allocated */ @@ -545,14 +848,43 @@ struct hclge_dev { u32 flag; u32 pkt_buf_size; /* Total pf buf size for tx/rx */ - u32 mps; /* Max packet size */ + u32 tx_buf_size; /* Tx buffer size for each TC */ + u32 dv_buf_size; /* Dv buffer size for each TC */ - enum hclge_mta_dmac_sel_type mta_mac_sel_type; - bool enable_mta; /* Multicast filter enable */ + int mps; /* Max packet size */ + /* vport_lock protect resource shared by vports */ + struct mutex vport_lock; struct hclge_vlan_type_cfg vlan_type_cfg; unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)]; + unsigned long vf_vlan_full[BITS_TO_LONGS(HCLGE_VPORT_NUM)]; + + unsigned long vport_config_block[BITS_TO_LONGS(HCLGE_VPORT_NUM)]; + + struct hclge_fd_cfg fd_cfg; + struct hlist_head fd_rule_list; + spinlock_t fd_rule_lock; /* protect fd_rule_list and fd_bmap */ + u16 hclge_fd_rule_num; + unsigned long serv_processed_cnt; + unsigned long last_serv_processed; + unsigned long fd_bmap[BITS_TO_LONGS(MAX_FD_FILTER_NUM)]; + enum HCLGE_FD_ACTIVE_RULE_TYPE fd_active_type; + u8 fd_en; + + u16 wanted_umv_size; + /* max available unicast mac vlan space */ + u16 max_umv_size; + /* private unicast mac vlan space, it's same for PF and its VFs */ + u16 priv_umv_size; + /* unicast mac vlan space shared by PF and its VFs */ + u16 share_umv_size; + + DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats, + HCLGE_MAC_TNL_LOG_SIZE); + + /* affinity mask and notify for misc interrupt */ + cpumask_t affinity_mask; }; /* VPort level vlan tag configuration for TX direction */ @@ -569,10 +901,11 @@ struct hclge_tx_vtag_cfg { /* VPort level vlan tag configuration for RX direction */ struct hclge_rx_vtag_cfg { - bool strip_tag1_en; /* Whether strip inner vlan tag */ - bool strip_tag2_en; /* Whether strip outer vlan tag */ - bool vlan1_vlan_prionly;/* Inner VLAN Tag up to descriptor Enable */ - bool vlan2_vlan_prionly;/* Outer VLAN Tag up to descriptor Enable */ + u8 rx_vlan_offload_en; /* Whether enable rx vlan offload */ + u8 strip_tag1_en; /* Whether strip inner vlan tag */ + u8 strip_tag2_en; /* Whether strip outer vlan tag */ + u8 vlan1_vlan_prionly; /* Inner VLAN Tag up to descriptor Enable */ + u8 vlan2_vlan_prionly; /* Outer VLAN Tag up to descriptor Enable */ }; struct hclge_rss_tuple_cfg { @@ -586,6 +919,41 @@ struct hclge_rss_tuple_cfg { u8 ipv6_fragment_en; }; +#define HCLGE_MAC_TBL_SYNC_INTERVAL 3U + +enum HCLGE_VPORT_STATE { + HCLGE_VPORT_STATE_ALIVE, + HCLGE_VPORT_STATE_MAC_TBL_CHANGE, + HCLGE_VPORT_STATE_PROMISC_CHANGE, + HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, + HCLGE_VPORT_STATE_START, + HCLGE_VPORT_STATE_MAX +}; + +struct hclge_vlan_info { + u16 vlan_proto; /* so far support 802.1Q only */ + u16 qos; + u16 vlan_tag; +}; + +struct hclge_port_base_vlan_config { + u16 state; + bool tbl_sta; + struct hclge_vlan_info vlan_info; + struct hclge_vlan_info old_vlan_info; +}; + +struct hclge_vf_info { + int link_state; + u8 mac[ETH_ALEN]; + u32 spoofchk; + u32 max_tx_rate; + u32 trusted; + u8 request_uc_en; + u8 request_mc_en; + u8 request_bc_en; +}; + struct hclge_vport { u16 alloc_tqps; /* Allocated Tx/Rx queues */ @@ -599,24 +967,44 @@ struct hclge_vport { u16 alloc_rss_size; u16 qs_offset; - u16 bw_limit; /* VSI BW Limit (0 = disabled) */ + u32 bw_limit; /* VSI BW Limit (0 = disabled) */ u8 dwrr; + bool req_vlan_fltr_en; + bool cur_vlan_fltr_en; + unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)]; + struct hclge_port_base_vlan_config port_base_vlan_cfg; struct hclge_tx_vtag_cfg txvlan_cfg; struct hclge_rx_vtag_cfg rxvlan_cfg; - int vport_id; + u16 used_umv_num; + + u16 vport_id; struct hclge_dev *back; /* Back reference to associated dev */ struct hnae3_handle nic; struct hnae3_handle roce; - bool accept_mta_mc; /* whether to accept mta filter multicast */ - unsigned long mta_shadow[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)]; -}; + unsigned long state; + unsigned long last_active_jiffies; + int mps; /* Max packet size */ + struct hclge_vf_info vf_info; -void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, - bool en_mc, bool en_bc, int vport_id); + u8 overflow_promisc_flags; + u8 last_promisc_flags; + spinlock_t mac_list_lock; /* protect mac address need to add/detele */ + struct list_head uc_mac_list; /* Store VF unicast table */ + struct list_head mc_mac_list; /* Store VF multicast table */ + struct list_head vlan_list; /* Store VF vlan table */ + +#ifdef CONFIG_HNS3_TEST + /* for sysfs */ + struct kobject kobj; +#endif +}; + +int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc, + bool en_mc_pmc, bool en_bc_pmc); int hclge_add_uc_addr_common(struct hclge_vport *vport, const unsigned char *addr); int hclge_rm_uc_addr_common(struct hclge_vport *vport, @@ -626,15 +1014,6 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, int hclge_rm_mc_addr_common(struct hclge_vport *vport, const unsigned char *addr); -int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, - u8 func_id, - bool enable); -int hclge_update_mta_status_common(struct hclge_vport *vport, - unsigned long *status, - u16 idx, - u16 count, - bool update_filter); - struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle); int hclge_bind_ring_with_vector(struct hclge_vport *vport, int vector_id, bool en, @@ -647,6 +1026,12 @@ static inline int hclge_get_queue_id(struct hnae3_queue *queue) return tqp->index; } +static inline bool hclge_is_reset_pending(struct hclge_dev *hdev) +{ + return !!hdev->reset_pending; +} + +int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport); int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex); int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, u16 vlan_id, bool is_kill); @@ -657,8 +1042,44 @@ int hclge_rss_init_hw(struct hclge_dev *hdev); void hclge_rss_indir_init_cfg(struct hclge_dev *hdev); void hclge_mbx_handler(struct hclge_dev *hdev); -void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id); -void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id); +int hclge_reset_tqp(struct hnae3_handle *handle); int hclge_cfg_flowctrl(struct hclge_dev *hdev); int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id); +int hclge_vport_start(struct hclge_vport *vport); +void hclge_vport_stop(struct hclge_vport *vport); +int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu); +int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd, + char *buf, int len); +u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id); +int hclge_notify_client(struct hclge_dev *hdev, + enum hnae3_reset_notify_type type); +int hclge_update_mac_list(struct hclge_vport *vport, + enum HCLGE_MAC_NODE_STATE state, + enum HCLGE_MAC_ADDR_TYPE mac_type, + const unsigned char *addr); +int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport, + const u8 *old_addr, const u8 *new_addr); +void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, + enum HCLGE_MAC_ADDR_TYPE mac_type); +void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list); +void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev); +void hclge_restore_mac_table_common(struct hclge_vport *vport); +void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev); +void hclge_restore_vport_vlan_table(struct hclge_vport *vport); +int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, + struct hclge_vlan_info *vlan_info); +int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, + u16 state, + struct hclge_vlan_info *vlan_info); +enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev, + unsigned long *addr); +void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time); +int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, + struct hclge_desc *desc); +void hclge_report_hw_error(struct hclge_dev *hdev, + enum hnae3_hw_error_type type); +int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len); +int hclge_push_vf_link_status(struct hclge_vport *vport); +int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en); +int hclge_mac_update_stats(struct hclge_dev *hdev); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index f34851c91eb39432705a6206959feffa7cc56529..893f6e0ce4731313367c08e916a551dfe015f434 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -5,6 +5,38 @@ #include "hclge_mbx.h" #include "hnae3.h" +#define CREATE_TRACE_POINTS +#include "hclge_trace.h" + +static const struct errno_respcode_map err_code_map[] = { + {0, 0}, + {1, -EPERM}, + {2, -ENOENT}, + {5, -EIO}, + {11, -EAGAIN}, + {12, -ENOMEM}, + {16, -EBUSY}, + {22, -EINVAL}, + {28, -ENOSPC}, + {95, -EOPNOTSUPP}, +}; + +static u16 hclge_errno_to_resp(int errno) +{ +#define UNKNOWN_ERR 0xFFFF + + u32 i; + + for (i = 0; + i < sizeof(err_code_map) / sizeof(struct errno_respcode_map); + i++) { + if (err_code_map[i].errno == errno) + return err_code_map[i].resp_code; + } + + return UNKNOWN_ERR; +} + /* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF * receives a mailbox message from VF. * @vport: pointer to struct hclge_vport @@ -14,8 +46,7 @@ */ static int hclge_gen_resp_to_vf(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *vf_to_pf_req, - int resp_status, - u8 *resp_data, u16 resp_data_len) + struct hclge_respond_to_vf_msg *resp_msg) { struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf; struct hclge_dev *hdev = vport->back; @@ -24,30 +55,39 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport, resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data; - if (resp_data_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) { + if (resp_msg->len > HCLGE_MBX_MAX_RESP_DATA_SIZE) { dev_err(&hdev->pdev->dev, - "PF fail to gen resp to VF len %d exceeds max len %d\n", - resp_data_len, + "PF fail to gen resp to VF len %u exceeds max len %d\n", + resp_msg->len, HCLGE_MBX_MAX_RESP_DATA_SIZE); + /* If resp_msg->len is too long, set the value to max length + * and return the msg to VF + */ + resp_msg->len = HCLGE_MBX_MAX_RESP_DATA_SIZE; } hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false); resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid; resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len; + resp_pf_to_vf->match_id = vf_to_pf_req->match_id; + resp_pf_to_vf->msg.code = HCLGE_MBX_PF_VF_RESP; + resp_pf_to_vf->msg.vf_mbx_msg_code = vf_to_pf_req->msg.code; + resp_pf_to_vf->msg.vf_mbx_msg_subcode = vf_to_pf_req->msg.subcode; + resp_pf_to_vf->msg.resp_status = hclge_errno_to_resp(resp_msg->status); - resp_pf_to_vf->msg[0] = HCLGE_MBX_PF_VF_RESP; - resp_pf_to_vf->msg[1] = vf_to_pf_req->msg[0]; - resp_pf_to_vf->msg[2] = vf_to_pf_req->msg[1]; - resp_pf_to_vf->msg[3] = (resp_status == 0) ? 0 : 1; + if (resp_msg->len > 0) + memcpy(resp_pf_to_vf->msg.resp_data, resp_msg->data, + resp_msg->len); - if (resp_data && resp_data_len > 0) - memcpy(&resp_pf_to_vf->msg[4], resp_data, resp_data_len); + trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf); status = hclge_cmd_send(&hdev->hw, &desc, 1); if (status) dev_err(&hdev->pdev->dev, - "PF failed(=%d) to send response to VF\n", status); + "PF failed(=%d) to send response(vfid:%u, code:%u, subcode:%u) to VF\n", + status, vf_to_pf_req->mbx_src_vfid, + vf_to_pf_req->msg.code, vf_to_pf_req->msg.subcode); return status; } @@ -66,28 +106,43 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, resp_pf_to_vf->dest_vfid = dest_vfid; resp_pf_to_vf->msg_len = msg_len; - resp_pf_to_vf->msg[0] = mbx_opcode; + resp_pf_to_vf->msg.code = mbx_opcode; + + memcpy(&resp_pf_to_vf->msg.vf_mbx_msg_code, msg, msg_len); - memcpy(&resp_pf_to_vf->msg[1], msg, msg_len); + trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf); status = hclge_cmd_send(&hdev->hw, &desc, 1); if (status) dev_err(&hdev->pdev->dev, - "PF failed(=%d) to send mailbox message to VF\n", - status); + "PF failed(=%d) to send mailbox message(vfid:%u, opcode:%u) to VF\n", + status, dest_vfid, mbx_opcode); return status; } -static int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport) +int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport) { + struct hclge_dev *hdev = vport->back; + u16 reset_type; u8 msg_data[2]; u8 dest_vfid; + BUILD_BUG_ON(HNAE3_MAX_RESET > U16_MAX); + dest_vfid = (u8)vport->vport_id; + if (hdev->reset_type == HNAE3_FUNC_RESET) + reset_type = HNAE3_VF_PF_FUNC_RESET; + else if (hdev->reset_type == HNAE3_FLR_RESET) + reset_type = HNAE3_VF_FULL_RESET; + else + reset_type = HNAE3_VF_FUNC_RESET; + + memcpy(&msg_data[0], &reset_type, sizeof(u16)); + /* send this requested info to VF */ - return hclge_send_mbx_msg(vport, msg_data, sizeof(u8), + return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), HCLGE_MBX_ASSERTING_RESET, dest_vfid); } @@ -120,22 +175,31 @@ static int hclge_get_ring_chain_from_mbx( struct hclge_vport *vport) { struct hnae3_ring_chain_node *cur_chain, *new_chain; + struct hclge_dev *hdev = vport->back; int ring_num; int i; - ring_num = req->msg[2]; + ring_num = req->msg.ring_num; - if (ring_num > ((HCLGE_MBX_VF_MSG_DATA_NUM - - HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / - HCLGE_MBX_RING_NODE_VARIABLE_NUM)) + if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM) return -ENOMEM; - hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]); + for (i = 0; i < ring_num; i++) { + if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) { + dev_err(&hdev->pdev->dev, "tqp index(%u) is out of range(0-%u)\n", + req->msg.param[i].tqp_index, + vport->nic.kinfo.rss_size - 1); + return -EINVAL; + } + } + + hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, + req->msg.param[0].ring_type); ring_chain->tqp_index = - hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]); + hclge_get_queue_id(vport->nic.kinfo.tqp + [req->msg.param[0].tqp_index]); hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S, - req->msg[5]); + HNAE3_RING_GL_IDX_S, req->msg.param[0].int_gl_index); cur_chain = ring_chain; @@ -145,18 +209,15 @@ static int hclge_get_ring_chain_from_mbx( goto err; hnae3_set_bit(new_chain->flag, HNAE3_RING_TYPE_B, - req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + - HCLGE_MBX_RING_MAP_BASIC_MSG_NUM]); + req->msg.param[i].ring_type); new_chain->tqp_index = hclge_get_queue_id(vport->nic.kinfo.tqp - [req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + - HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 1]]); + [req->msg.param[i].tqp_index]); hnae3_set_field(new_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, HNAE3_RING_GL_IDX_S, - req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + - HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]); + req->msg.param[i].int_gl_index); cur_chain->next = new_chain; cur_chain = new_chain; @@ -172,7 +233,7 @@ static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en, struct hclge_mbx_vf_to_pf_cmd *req) { struct hnae3_ring_chain_node ring_chain; - int vector_id = req->msg[1]; + int vector_id = req->msg.vector_id; int ret; memset(&ring_chain, 0, sizeof(ring_chain)); @@ -181,264 +242,516 @@ static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en, return ret; ret = hclge_bind_ring_with_vector(vport, vector_id, en, &ring_chain); + + hclge_free_vector_ring_chain(&ring_chain); + + return ret; +} + +static int hclge_query_ring_vector_map(struct hclge_vport *vport, + struct hnae3_ring_chain_node *ring_chain, + struct hclge_desc *desc) +{ + struct hclge_ctrl_vector_chain_cmd *req = + (struct hclge_ctrl_vector_chain_cmd *)desc->data; + struct hclge_dev *hdev = vport->back; + u16 tqp_type_and_id; + int status; + + hclge_cmd_setup_basic_desc(desc, HCLGE_OPC_ADD_RING_TO_VECTOR, true); + + tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[0]); + hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S, + hnae3_get_bit(ring_chain->flag, HNAE3_RING_TYPE_B)); + hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S, + ring_chain->tqp_index); + req->tqp_type_and_id[0] = cpu_to_le16(tqp_type_and_id); + req->vfid = vport->vport_id; + + status = hclge_cmd_send(&hdev->hw, desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "Get VF ring vector map info fail, status is %d.\n", + status); + + return status; +} + +static int hclge_get_vf_ring_vector_map(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *req, + struct hclge_respond_to_vf_msg *resp) +{ +#define HCLGE_LIMIT_RING_NUM 1 +#define HCLGE_RING_TYPE_OFFSET 0 +#define HCLGE_TQP_INDEX_OFFSET 1 +#define HCLGE_INT_GL_INDEX_OFFSET 2 +#define HCLGE_VECTOR_ID_OFFSET 3 +#define HCLGE_RING_VECTOR_MAP_INFO_LEN 4 + struct hnae3_ring_chain_node ring_chain; + struct hclge_desc desc; + struct hclge_ctrl_vector_chain_cmd *data = + (struct hclge_ctrl_vector_chain_cmd *)desc.data; + u16 tqp_type_and_id; + u8 int_gl_index; + int ret; + + req->msg.ring_num = HCLGE_LIMIT_RING_NUM; + + memset(&ring_chain, 0, sizeof(ring_chain)); + ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport); if (ret) return ret; + ret = hclge_query_ring_vector_map(vport, &ring_chain, &desc); + if (ret) { + hclge_free_vector_ring_chain(&ring_chain); + return ret; + } + + tqp_type_and_id = le16_to_cpu(data->tqp_type_and_id[0]); + int_gl_index = hnae3_get_field(tqp_type_and_id, + HCLGE_INT_GL_IDX_M, HCLGE_INT_GL_IDX_S); + + resp->data[HCLGE_RING_TYPE_OFFSET] = req->msg.param[0].ring_type; + resp->data[HCLGE_TQP_INDEX_OFFSET] = req->msg.param[0].tqp_index; + resp->data[HCLGE_INT_GL_INDEX_OFFSET] = int_gl_index; + resp->data[HCLGE_VECTOR_ID_OFFSET] = data->int_vector_id; + resp->len = HCLGE_RING_VECTOR_MAP_INFO_LEN; + hclge_free_vector_ring_chain(&ring_chain); - return 0; + return ret; } -static int hclge_set_vf_promisc_mode(struct hclge_vport *vport, - struct hclge_mbx_vf_to_pf_cmd *req) +static void hclge_set_vf_promisc_mode(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *req) { - bool en_uc = req->msg[1] ? true : false; - bool en_mc = req->msg[2] ? true : false; - struct hclge_promisc_param param; + struct hclge_dev *hdev = vport->back; + + vport->vf_info.request_uc_en = req->msg.en_uc; + vport->vf_info.request_mc_en = req->msg.en_mc; + vport->vf_info.request_bc_en = req->msg.en_bc; - /* always enable broadcast promisc bit */ - hclge_promisc_param_init(¶m, en_uc, en_mc, true, vport->vport_id); - return hclge_cmd_set_promisc_mode(vport->back, ¶m); + set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); + hclge_task_schedule(hdev, 0); } static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, - struct hclge_mbx_vf_to_pf_cmd *mbx_req, - bool gen_resp) + struct hclge_mbx_vf_to_pf_cmd *mbx_req) { - const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]); +#define HCLGE_MBX_VF_OLD_MAC_ADDR_OFFSET 6 + + const u8 *mac_addr = (const u8 *)(mbx_req->msg.data); struct hclge_dev *hdev = vport->back; int status; - if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_MODIFY) { - const u8 *old_addr = (const u8 *)(&mbx_req->msg[8]); - - hclge_rm_uc_addr_common(vport, old_addr); - status = hclge_add_uc_addr_common(vport, mac_addr); - if (status) - hclge_add_uc_addr_common(vport, old_addr); - } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_ADD) { - status = hclge_add_uc_addr_common(vport, mac_addr); - } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_REMOVE) { - status = hclge_rm_uc_addr_common(vport, mac_addr); + if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_MODIFY) { + const u8 *old_addr = (const u8 *) + (&mbx_req->msg.data[HCLGE_MBX_VF_OLD_MAC_ADDR_OFFSET]); + + /* If VF MAC has been configured by the host then it + * cannot be overridden by the MAC specified by the VM. + */ + if (!is_zero_ether_addr(vport->vf_info.mac) && + !ether_addr_equal(mac_addr, vport->vf_info.mac)) + return -EPERM; + + if (!is_valid_ether_addr(mac_addr)) + return -EINVAL; + + spin_lock_bh(&vport->mac_list_lock); + status = hclge_update_mac_node_for_dev_addr(vport, old_addr, + mac_addr); + spin_unlock_bh(&vport->mac_list_lock); + hclge_task_schedule(hdev, 0); + } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_ADD) { + status = hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, + HCLGE_MAC_ADDR_UC, mac_addr); + } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_REMOVE) { + status = hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, + HCLGE_MAC_ADDR_UC, mac_addr); } else { dev_err(&hdev->pdev->dev, - "failed to set unicast mac addr, unknown subcode %d\n", - mbx_req->msg[1]); + "failed to set unicast mac addr, unknown subcode %u\n", + mbx_req->msg.subcode); return -EIO; } - if (gen_resp) - hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0); - - return 0; -} - -static int hclge_set_vf_mc_mta_status(struct hclge_vport *vport, - u8 *msg, u8 idx, bool is_end) -{ -#define HCLGE_MTA_STATUS_MSG_SIZE 13 -#define HCLGE_MTA_STATUS_MSG_BITS \ - (HCLGE_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE) -#define HCLGE_MTA_STATUS_MSG_END_BITS \ - (HCLGE_MTA_TBL_SIZE % HCLGE_MTA_STATUS_MSG_BITS) - unsigned long status[BITS_TO_LONGS(HCLGE_MTA_STATUS_MSG_BITS)]; - u16 tbl_cnt; - u16 tbl_idx; - u8 msg_ofs; - u8 msg_bit; - - tbl_cnt = is_end ? HCLGE_MTA_STATUS_MSG_END_BITS : - HCLGE_MTA_STATUS_MSG_BITS; - - /* set msg field */ - msg_ofs = 0; - msg_bit = 0; - memset(status, 0, sizeof(status)); - for (tbl_idx = 0; tbl_idx < tbl_cnt; tbl_idx++) { - if (msg[msg_ofs] & BIT(msg_bit)) - set_bit(tbl_idx, status); - - msg_bit++; - if (msg_bit == BITS_PER_BYTE) { - msg_bit = 0; - msg_ofs++; - } - } - - return hclge_update_mta_status_common(vport, - status, idx * HCLGE_MTA_STATUS_MSG_BITS, - tbl_cnt, is_end); + return status; } static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, - struct hclge_mbx_vf_to_pf_cmd *mbx_req, - bool gen_resp) + struct hclge_mbx_vf_to_pf_cmd *mbx_req) { - const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]); + const u8 *mac_addr = (const u8 *)(mbx_req->msg.data); struct hclge_dev *hdev = vport->back; - u8 resp_len = 0; - u8 resp_data; - int status; - - if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_ADD) { - status = hclge_add_mc_addr_common(vport, mac_addr); - } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_REMOVE) { - status = hclge_rm_mc_addr_common(vport, mac_addr); - } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE) { - u8 func_id = vport->vport_id; - bool enable = mbx_req->msg[2]; - - status = hclge_cfg_func_mta_filter(hdev, func_id, enable); - } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ) { - resp_data = hdev->mta_mac_sel_type; - resp_len = sizeof(u8); - gen_resp = true; - status = 0; - } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE) { - /* mta status update msg format - * msg[2.6 : 2.0] msg index - * msg[2.7] msg is end - * msg[15 : 3] mta status bits[103 : 0] - */ - bool is_end = (mbx_req->msg[2] & 0x80) ? true : false; - status = hclge_set_vf_mc_mta_status(vport, &mbx_req->msg[3], - mbx_req->msg[2] & 0x7F, - is_end); + if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_ADD) { + hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, + HCLGE_MAC_ADDR_MC, mac_addr); + } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_REMOVE) { + hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, + HCLGE_MAC_ADDR_MC, mac_addr); } else { dev_err(&hdev->pdev->dev, - "failed to set mcast mac addr, unknown subcode %d\n", - mbx_req->msg[1]); + "failed to set mcast mac addr, unknown subcode %u\n", + mbx_req->msg.subcode); return -EIO; } - if (gen_resp) - hclge_gen_resp_to_vf(vport, mbx_req, status, - &resp_data, resp_len); - return 0; } +int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, + u16 state, + struct hclge_vlan_info *vlan_info) +{ +#define MSG_DATA_SIZE 8 + + u8 msg_data[MSG_DATA_SIZE]; + + memcpy(&msg_data[0], &state, sizeof(u16)); + memcpy(&msg_data[2], &vlan_info->vlan_proto, sizeof(u16)); + memcpy(&msg_data[4], &vlan_info->qos, sizeof(u16)); + memcpy(&msg_data[6], &vlan_info->vlan_tag, sizeof(u16)); + + return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), + HCLGE_MBX_PUSH_VLAN_INFO, vfid); +} + static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *mbx_req, - bool gen_resp) + struct hclge_respond_to_vf_msg *resp_msg) { - int status = 0; +#define HCLGE_MBX_VLAN_STATE_OFFSET 0 +#define HCLGE_MBX_VLAN_INFO_OFFSET 2 + struct hnae3_handle *handle = &vport->nic; + struct hclge_vf_vlan_cfg *msg_cmd; + + msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg; + switch (msg_cmd->subcode) { + case HCLGE_MBX_VLAN_FILTER: + return hclge_set_vlan_filter(handle, + cpu_to_be16(msg_cmd->proto), + msg_cmd->vlan, msg_cmd->is_kill); + case HCLGE_MBX_VLAN_RX_OFF_CFG: + return hclge_en_hw_strip_rxvtag(handle, msg_cmd->enable); + case HCLGE_MBX_GET_PORT_BASE_VLAN_STATE: + resp_msg->data[0] = vport->port_base_vlan_cfg.state; + resp_msg->len = sizeof(u8); + return 0; + case HCLGE_MBX_ENABLE_VLAN_FILTER: + return hclge_enable_vport_vlan_filter(vport, msg_cmd->enable); + default: + return 0; + } +} - if (mbx_req->msg[1] == HCLGE_MBX_VLAN_FILTER) { - struct hnae3_handle *handle = &vport->nic; - u16 vlan, proto; - bool is_kill; +static int hclge_set_vf_alive(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ + bool alive = !!mbx_req->msg.data[0]; + int ret = 0; - is_kill = !!mbx_req->msg[2]; - memcpy(&vlan, &mbx_req->msg[3], sizeof(vlan)); - memcpy(&proto, &mbx_req->msg[5], sizeof(proto)); - status = hclge_set_vlan_filter(handle, cpu_to_be16(proto), - vlan, is_kill); - } else if (mbx_req->msg[1] == HCLGE_MBX_VLAN_RX_OFF_CFG) { - struct hnae3_handle *handle = &vport->nic; - bool en = mbx_req->msg[2] ? true : false; + if (alive) + ret = hclge_vport_start(vport); + else + hclge_vport_stop(vport); - status = hclge_en_hw_strip_rxvtag(handle, en); - } + return ret; +} - if (gen_resp) - status = hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0); +static void hclge_get_basic_info(struct hclge_vport *vport, + struct hclge_respond_to_vf_msg *resp_msg) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hnae3_ae_dev *ae_dev = vport->back->ae_dev; + struct hclge_basic_info *basic_info; + unsigned int i; - return status; + basic_info = (struct hclge_basic_info *)resp_msg->data; + for (i = 0; i < kinfo->tc_info.num_tc; i++) + basic_info->hw_tc_map |= BIT(i); + + if (hnae3_get_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B)) + hnae3_set_bit(basic_info->pf_caps, + HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, 1); + + resp_msg->len = HCLGE_MBX_MAX_RESP_DATA_SIZE; } -static int hclge_get_vf_tcinfo(struct hclge_vport *vport, - struct hclge_mbx_vf_to_pf_cmd *mbx_req, - bool gen_resp) +static void hclge_get_vf_queue_info(struct hclge_vport *vport, + struct hclge_respond_to_vf_msg *resp_msg) { +#define HCLGE_TQPS_RSS_INFO_LEN 6 +#define HCLGE_TQPS_ALLOC_OFFSET 0 +#define HCLGE_TQPS_RSS_SIZE_OFFSET 2 +#define HCLGE_TQPS_RX_BUFFER_LEN_OFFSET 4 + struct hclge_dev *hdev = vport->back; - int ret; - ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &hdev->hw_tc_map, - sizeof(u8)); + /* get the queue related info */ + memcpy(&resp_msg->data[HCLGE_TQPS_ALLOC_OFFSET], + &vport->alloc_tqps, sizeof(u16)); + memcpy(&resp_msg->data[HCLGE_TQPS_RSS_SIZE_OFFSET], + &vport->nic.kinfo.rss_size, sizeof(u16)); + memcpy(&resp_msg->data[HCLGE_TQPS_RX_BUFFER_LEN_OFFSET], + &hdev->rx_buf_len, sizeof(u16)); + resp_msg->len = HCLGE_TQPS_RSS_INFO_LEN; +} - return ret; +static void hclge_get_vf_mac_addr(struct hclge_vport *vport, + struct hclge_respond_to_vf_msg *resp_msg) +{ + ether_addr_copy(resp_msg->data, vport->vf_info.mac); + resp_msg->len = ETH_ALEN; } -static int hclge_get_vf_queue_info(struct hclge_vport *vport, - struct hclge_mbx_vf_to_pf_cmd *mbx_req, - bool gen_resp) +static void hclge_get_vf_queue_depth(struct hclge_vport *vport, + struct hclge_respond_to_vf_msg *resp_msg) { -#define HCLGE_TQPS_RSS_INFO_LEN 8 - u8 resp_data[HCLGE_TQPS_RSS_INFO_LEN]; +#define HCLGE_TQPS_DEPTH_INFO_LEN 4 +#define HCLGE_TQPS_NUM_TX_DESC_OFFSET 0 +#define HCLGE_TQPS_NUM_RX_DESC_OFFSET 2 + struct hclge_dev *hdev = vport->back; - /* get the queue related info */ - memcpy(&resp_data[0], &vport->alloc_tqps, sizeof(u16)); - memcpy(&resp_data[2], &vport->nic.kinfo.rss_size, sizeof(u16)); - memcpy(&resp_data[4], &hdev->num_desc, sizeof(u16)); - memcpy(&resp_data[6], &hdev->rx_buf_len, sizeof(u16)); + /* get the queue depth info */ + memcpy(&resp_msg->data[HCLGE_TQPS_NUM_TX_DESC_OFFSET], + &hdev->num_tx_desc, sizeof(u16)); + memcpy(&resp_msg->data[HCLGE_TQPS_NUM_RX_DESC_OFFSET], + &hdev->num_rx_desc, sizeof(u16)); + resp_msg->len = HCLGE_TQPS_DEPTH_INFO_LEN; +} + +static void hclge_get_vf_media_type(struct hclge_vport *vport, + struct hclge_respond_to_vf_msg *resp_msg) +{ +#define HCLGE_VF_MEDIA_TYPE_OFFSET 0 +#define HCLGE_VF_MODULE_TYPE_OFFSET 1 +#define HCLGE_VF_MEDIA_TYPE_LENGTH 2 - return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data, - HCLGE_TQPS_RSS_INFO_LEN); + struct hclge_dev *hdev = vport->back; + + resp_msg->data[HCLGE_VF_MEDIA_TYPE_OFFSET] = + hdev->hw.mac.media_type; + resp_msg->data[HCLGE_VF_MODULE_TYPE_OFFSET] = + hdev->hw.mac.module_type; + resp_msg->len = HCLGE_VF_MEDIA_TYPE_LENGTH; } -static int hclge_get_link_info(struct hclge_vport *vport, - struct hclge_mbx_vf_to_pf_cmd *mbx_req) +int hclge_push_vf_link_status(struct hclge_vport *vport) { +#define HCLGE_VF_LINK_STATE_UP 1U +#define HCLGE_VF_LINK_STATE_DOWN 0U + struct hclge_dev *hdev = vport->back; u16 link_status; - u8 msg_data[8]; - u8 dest_vfid; + u8 msg_data[9]; u16 duplex; /* mac.link can only be 0 or 1 */ - link_status = (u16)hdev->hw.mac.link; + switch (vport->vf_info.link_state) { + case IFLA_VF_LINK_STATE_ENABLE: + link_status = HCLGE_VF_LINK_STATE_UP; + break; + case IFLA_VF_LINK_STATE_DISABLE: + link_status = HCLGE_VF_LINK_STATE_DOWN; + break; + case IFLA_VF_LINK_STATE_AUTO: + default: + link_status = (u16)hdev->hw.mac.link; + break; + } + duplex = hdev->hw.mac.duplex; memcpy(&msg_data[0], &link_status, sizeof(u16)); memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32)); memcpy(&msg_data[6], &duplex, sizeof(u16)); - dest_vfid = mbx_req->mbx_src_vfid; + msg_data[8] = HCLGE_MBX_PUSH_LINK_STATUS_EN; /* send this requested info to VF */ return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), - HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid); + HCLGE_MBX_LINK_STAT_CHANGE, vport->vport_id); +} + +static void hclge_get_link_mode(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ +#define HCLGE_SUPPORTED 1 + struct hclge_dev *hdev = vport->back; + unsigned long advertising; + unsigned long supported; + unsigned long send_data; + u8 msg_data[10]; + u8 dest_vfid; + + memset(msg_data, 0, sizeof(msg_data)); + advertising = hdev->hw.mac.advertising[0]; + supported = hdev->hw.mac.supported[0]; + dest_vfid = mbx_req->mbx_src_vfid; + msg_data[0] = mbx_req->msg.data[0]; + + send_data = msg_data[0] == HCLGE_SUPPORTED ? supported : advertising; + + memcpy(&msg_data[2], &send_data, sizeof(unsigned long)); + hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), + HCLGE_MBX_LINK_STAT_MODE, dest_vfid); } -static void hclge_mbx_reset_vf_queue(struct hclge_vport *vport, - struct hclge_mbx_vf_to_pf_cmd *mbx_req) +static int hclge_mbx_reset_vf_queue(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req, + struct hclge_respond_to_vf_msg *resp_msg) { +#define HCLGE_RESET_ALL_QUEUE_DONE 1U + struct hnae3_handle *handle = &vport->nic; + struct hclge_dev *hdev = vport->back; u16 queue_id; + int ret; - memcpy(&queue_id, &mbx_req->msg[2], sizeof(queue_id)); + memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id)); + resp_msg->data[0] = HCLGE_RESET_ALL_QUEUE_DONE; + resp_msg->len = sizeof(u8); - hclge_reset_vf_queue(vport, queue_id); + /* pf will reset vf's all queues at a time. So it is unnecessary + * to reset queues if queue_id > 0, just return success. + */ + if (queue_id > 0) + return 0; - /* send response msg to VF after queue reset complete*/ - hclge_gen_resp_to_vf(vport, mbx_req, 0, NULL, 0); + ret = hclge_reset_tqp(handle); + if (ret) + dev_err(&hdev->pdev->dev, "failed to reset vf %u queue, ret = %d\n", + vport->vport_id - HCLGE_VF_VPORT_START_NUM, ret); + + return ret; } -static void hclge_reset_vf(struct hclge_vport *vport, - struct hclge_mbx_vf_to_pf_cmd *mbx_req) +static int hclge_reset_vf(struct hclge_vport *vport) +{ + struct hclge_dev *hdev = vport->back; + + dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %u!", + vport->vport_id - HCLGE_VF_VPORT_START_NUM); + + return hclge_func_reset_cmd(hdev, vport->vport_id); +} + +static void hclge_vf_keep_alive(struct hclge_vport *vport) { struct hclge_dev *hdev = vport->back; int ret; - dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %d!", - mbx_req->mbx_src_vfid); + vport->last_active_jiffies = jiffies; + + if (test_bit(HCLGE_VPORT_STATE_START, &vport->state) && + !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) { + set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); - /* Acknowledge VF that PF is now about to assert the reset for the VF. - * On receiving this message VF will get into pending state and will - * start polling for the hardware reset completion status. + dev_info(&hdev->pdev->dev, "VF %u keep alive resume!", + vport->vport_id - HCLGE_VF_VPORT_START_NUM); + + /* if vf support push link, need to push link status after keep + * alive restore, because the vf will not fetch the link status + * of it's own. + */ + ret = hclge_push_vf_link_status(vport); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to push link status to vf%u, ret=%d\n", + vport->vport_id - HCLGE_VF_VPORT_START_NUM, + ret); + } + } +} + +static int hclge_set_vf_mtu(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ + u32 mtu; + + memcpy(&mtu, mbx_req->msg.data, sizeof(mtu)); + + return hclge_set_vport_mtu(vport, mtu); +} + +static void hclge_get_queue_id_in_pf(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req, + struct hclge_respond_to_vf_msg *resp_msg) +{ + struct hnae3_handle *handle = &vport->nic; + struct hclge_dev *hdev = vport->back; + u16 queue_id, qid_in_pf; + + memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id)); + if (queue_id >= handle->kinfo.num_tqps) { + dev_err(&hdev->pdev->dev, "Invalid queue id(%u) from VF %u\n", + queue_id, mbx_req->mbx_src_vfid); + return; + } + + qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id); + memcpy(resp_msg->data, &qid_in_pf, sizeof(qid_in_pf)); + resp_msg->len = sizeof(qid_in_pf); +} + +static void hclge_get_rss_key(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req, + struct hclge_respond_to_vf_msg *resp_msg) +{ +#define HCLGE_RSS_MBX_RESP_LEN 8 + struct hclge_dev *hdev = vport->back; + u8 index; + + index = mbx_req->msg.data[0]; + + /* The length of RSS key is large than the length of MBX response. + * So multiple times of copy is needed, which is the meaning of "index". + * And RSS key will be copied 'HCLGE_RSS_MBX_RESP_LEN' byte per time. + * If (index * HCLGE_RSS_MBX_RESP_LEN) large than + * (rss_key_size - HCLGE_RSS_MBX_RESP_LEN), it will cause + * "out-of-bounds array" error, So it's necessary to check with it. */ - ret = hclge_inform_reset_assert_to_vf(vport); - if (ret) { - dev_err(&hdev->pdev->dev, - "PF fail(%d) to inform VF(%d)of reset, reset failed!\n", - ret, vport->vport_id); + if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) > + sizeof(vport[0].rss_hash_key)) { + dev_warn(&hdev->pdev->dev, + "failed to get the rss hash key, the index(%u) invalid !\n", + index); return; } - dev_warn(&hdev->pdev->dev, "PF is now resetting VF %d.\n", - mbx_req->mbx_src_vfid); - /* reset this virtual function */ - hclge_func_reset_cmd(hdev, mbx_req->mbx_src_vfid); + memcpy(resp_msg->data, + &hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN], + HCLGE_RSS_MBX_RESP_LEN); + resp_msg->len = HCLGE_RSS_MBX_RESP_LEN; +} + +static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code) +{ + switch (link_fail_code) { + case HCLGE_LF_REF_CLOCK_LOST: + dev_warn(&hdev->pdev->dev, "Reference clock lost!\n"); + break; + case HCLGE_LF_XSFP_TX_DISABLE: + dev_warn(&hdev->pdev->dev, "SFP tx is disabled!\n"); + break; + case HCLGE_LF_XSFP_ABSENT: + dev_warn(&hdev->pdev->dev, "SFP is absent!\n"); + break; + default: + break; + } +} + +static void hclge_handle_link_change_event(struct hclge_dev *hdev, + struct hclge_mbx_vf_to_pf_cmd *req) +{ + int link_status = req->msg.subcode; + + hclge_task_schedule(hdev, 0); + + if (!link_status) + hclge_link_fail_parse(hdev, req->msg.data[0]); } static bool hclge_cmd_crq_empty(struct hclge_hw *hw) @@ -448,24 +761,59 @@ static bool hclge_cmd_crq_empty(struct hclge_hw *hw) return tail == hw->cmq.crq.next_to_use; } +static void hclge_handle_ncsi_error(struct hclge_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = hdev->ae_dev; + + ae_dev->ops->set_default_reset_request(ae_dev, HNAE3_GLOBAL_RESET); + dev_warn(&hdev->pdev->dev, "requesting reset due to NCSI error\n"); + ae_dev->ops->reset_event(hdev->pdev, NULL); +} + +static void hclge_handle_vf_tbl(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_vf_vlan_cfg *msg_cmd; + + msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg; + if (msg_cmd->subcode == HCLGE_MBX_VPORT_LIST_CLEAR) { + hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_UC); + hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_MC); + hclge_rm_vport_all_vlan_table(vport, true); + } else { + dev_warn(&hdev->pdev->dev, "Invalid cmd(%u)\n", + msg_cmd->subcode); + } +} + void hclge_mbx_handler(struct hclge_dev *hdev) { struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq; + struct hclge_respond_to_vf_msg resp_msg; struct hclge_mbx_vf_to_pf_cmd *req; struct hclge_vport *vport; struct hclge_desc *desc; - int ret, flag; + bool is_del = false; + unsigned int flag; + int ret = 0; /* handle all the mailbox requests in the queue */ while (!hclge_cmd_crq_empty(&hdev->hw)) { + if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) { + dev_warn(&hdev->pdev->dev, + "command queue needs re-initializing\n"); + return; + } + desc = &crq->desc[crq->next_to_use]; req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data; flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) { dev_warn(&hdev->pdev->dev, - "dropped invalid mailbox message, code = %d\n", - req->msg[0]); + "dropped invalid mailbox message, code = %u\n", + req->msg.code); /* dropping/not processing this invalid message */ crq->desc[crq->next_to_use].flag = 0; @@ -475,7 +823,12 @@ void hclge_mbx_handler(struct hclge_dev *hdev) vport = &hdev->vport[req->mbx_src_vfid]; - switch (req->msg[0]) { + trace_hclge_pf_mbx_get(hdev, req); + + /* clear the resp_msg before processing every mailbox message */ + memset(&resp_msg, 0, sizeof(resp_msg)); + + switch (req->msg.code) { case HCLGE_MBX_MAP_RING_TO_VECTOR: ret = hclge_map_unmap_ring_to_vf_vector(vport, true, req); @@ -484,69 +837,128 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ret = hclge_map_unmap_ring_to_vf_vector(vport, false, req); break; - case HCLGE_MBX_SET_PROMISC_MODE: - ret = hclge_set_vf_promisc_mode(vport, req); + case HCLGE_MBX_GET_RING_VECTOR_MAP: + ret = hclge_get_vf_ring_vector_map(vport, req, + &resp_msg); if (ret) dev_err(&hdev->pdev->dev, - "PF fail(%d) to set VF promisc mode\n", + "PF fail(%d) to get VF ring vector map\n", ret); break; + case HCLGE_MBX_SET_PROMISC_MODE: + hclge_set_vf_promisc_mode(vport, req); + break; case HCLGE_MBX_SET_UNICAST: - ret = hclge_set_vf_uc_mac_addr(vport, req, true); + ret = hclge_set_vf_uc_mac_addr(vport, req); if (ret) dev_err(&hdev->pdev->dev, "PF fail(%d) to set VF UC MAC Addr\n", ret); break; case HCLGE_MBX_SET_MULTICAST: - ret = hclge_set_vf_mc_mac_addr(vport, req, false); + ret = hclge_set_vf_mc_mac_addr(vport, req); if (ret) dev_err(&hdev->pdev->dev, "PF fail(%d) to set VF MC MAC Addr\n", ret); break; case HCLGE_MBX_SET_VLAN: - ret = hclge_set_vf_vlan_cfg(vport, req, false); + ret = hclge_set_vf_vlan_cfg(vport, req, &resp_msg); if (ret) dev_err(&hdev->pdev->dev, "PF failed(%d) to config VF's VLAN\n", ret); break; - case HCLGE_MBX_GET_QINFO: - ret = hclge_get_vf_queue_info(vport, req, true); + case HCLGE_MBX_SET_ALIVE: + ret = hclge_set_vf_alive(vport, req); if (ret) dev_err(&hdev->pdev->dev, - "PF failed(%d) to get Q info for VF\n", + "PF failed(%d) to set VF's ALIVE\n", ret); break; - case HCLGE_MBX_GET_TCINFO: - ret = hclge_get_vf_tcinfo(vport, req, true); - if (ret) - dev_err(&hdev->pdev->dev, - "PF failed(%d) to get TC info for VF\n", - ret); + case HCLGE_MBX_GET_QINFO: + hclge_get_vf_queue_info(vport, &resp_msg); + break; + case HCLGE_MBX_GET_QDEPTH: + hclge_get_vf_queue_depth(vport, &resp_msg); + break; + case HCLGE_MBX_GET_BASIC_INFO: + hclge_get_basic_info(vport, &resp_msg); break; case HCLGE_MBX_GET_LINK_STATUS: - ret = hclge_get_link_info(vport, req); + ret = hclge_push_vf_link_status(vport); if (ret) dev_err(&hdev->pdev->dev, - "PF fail(%d) to get link stat for VF\n", + "failed to inform link stat to VF, ret = %d\n", ret); break; case HCLGE_MBX_QUEUE_RESET: - hclge_mbx_reset_vf_queue(vport, req); + ret = hclge_mbx_reset_vf_queue(vport, req, &resp_msg); break; case HCLGE_MBX_RESET: - hclge_reset_vf(vport, req); + ret = hclge_reset_vf(vport); + break; + case HCLGE_MBX_KEEP_ALIVE: + hclge_vf_keep_alive(vport); + break; + case HCLGE_MBX_SET_MTU: + ret = hclge_set_vf_mtu(vport, req); + if (ret) + dev_err(&hdev->pdev->dev, + "VF fail(%d) to set mtu\n", ret); + break; + case HCLGE_MBX_GET_QID_IN_PF: + hclge_get_queue_id_in_pf(vport, req, &resp_msg); + break; + case HCLGE_MBX_GET_RSS_KEY: + hclge_get_rss_key(vport, req, &resp_msg); + break; + case HCLGE_MBX_GET_LINK_MODE: + hclge_get_link_mode(vport, req); + break; + case HCLGE_MBX_GET_VF_FLR_STATUS: + case HCLGE_MBX_VF_UNINIT: + is_del = req->msg.code == HCLGE_MBX_VF_UNINIT; + hclge_rm_vport_all_mac_table(vport, is_del, + HCLGE_MAC_ADDR_UC); + hclge_rm_vport_all_mac_table(vport, is_del, + HCLGE_MAC_ADDR_MC); + hclge_rm_vport_all_vlan_table(vport, is_del); + break; + case HCLGE_MBX_GET_MEDIA_TYPE: + hclge_get_vf_media_type(vport, &resp_msg); + break; + case HCLGE_MBX_PUSH_LINK_STATUS: + hclge_handle_link_change_event(hdev, req); + break; + case HCLGE_MBX_GET_MAC_ADDR: + hclge_get_vf_mac_addr(vport, &resp_msg); + break; + case HCLGE_MBX_NCSI_ERROR: + hclge_handle_ncsi_error(hdev); + break; + case HCLGE_MBX_HANDLE_VF_TBL: + hclge_handle_vf_tbl(vport, req); break; default: dev_err(&hdev->pdev->dev, - "un-supported mailbox message, code = %d\n", - req->msg[0]); + "un-supported mailbox message, code = %u\n", + req->msg.code); break; } + + /* PF driver should not reply IMP */ + if (hnae3_get_bit(req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) && + req->msg.code < HCLGE_MBX_GET_VF_FLR_STATUS) { + resp_msg.status = ret; + hclge_gen_resp_to_vf(vport, req, &resp_msg); + } + crq->desc[crq->next_to_use].flag = 0; hclge_mbx_ring_ptr_move_crq(crq); + + /* reinitialize ret after complete the mbx message processing */ + ret = 0; } /* Write back CMDQ_RQ header pointer, M7 need this pointer */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index 398971a062f475202887ca5cbccccae653052bde..d0247f4ab1e8034831beee84e8f1743bd77512ad 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -2,20 +2,13 @@ // Copyright (c) 2016-2017 Hisilicon Limited. #include +#include #include - +#include "kcompat.h" #include "hclge_cmd.h" #include "hclge_main.h" #include "hclge_mdio.h" -#define HCLGE_PHY_SUPPORTED_FEATURES (SUPPORTED_Autoneg | \ - SUPPORTED_TP | \ - SUPPORTED_Pause | \ - SUPPORTED_Asym_Pause | \ - PHY_10BT_FEATURES | \ - PHY_100BT_FEATURES | \ - PHY_1000BT_FEATURES) - enum hclge_mdio_c22_op_seq { HCLGE_MDIO_C22_WRITE = 1, HCLGE_MDIO_C22_READ = 2 @@ -54,7 +47,7 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum, struct hclge_desc desc; int ret; - if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) return 0; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, false); @@ -62,9 +55,9 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum, mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data; hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M, - HCLGE_MDIO_PHYID_S, phyid); + HCLGE_MDIO_PHYID_S, (u32)phyid); hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M, - HCLGE_MDIO_PHYREG_S, regnum); + HCLGE_MDIO_PHYREG_S, (u32)regnum); hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1); hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M, @@ -92,7 +85,7 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum) struct hclge_desc desc; int ret; - if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) return 0; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, true); @@ -100,9 +93,9 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum) mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data; hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M, - HCLGE_MDIO_PHYID_S, phyid); + HCLGE_MDIO_PHYID_S, (u32)phyid); hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M, - HCLGE_MDIO_PHYREG_S, regnum); + HCLGE_MDIO_PHYREG_S, (u32)regnum); hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1); hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M, @@ -129,13 +122,19 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum) int hclge_mac_mdio_config(struct hclge_dev *hdev) { +#define PHY_INEXISTENT 255 + struct hclge_mac *mac = &hdev->hw.mac; struct phy_device *phydev; struct mii_bus *mdio_bus; int ret; - if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) { - dev_err(&hdev->pdev->dev, "phy_addr(%d) is too large.\n", + if (hdev->hw.mac.phy_addr == PHY_INEXISTENT) { + dev_info(&hdev->pdev->dev, + "no phy device is connected to mdio bus\n"); + return 0; + } else if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) { + dev_err(&hdev->pdev->dev, "phy_addr(%u) is too large.\n", hdev->hw.mac.phy_addr); return -EINVAL; } @@ -181,6 +180,10 @@ static void hclge_mac_adjust_link(struct net_device *netdev) int duplex, speed; int ret; + /* When phy link down, do nothing */ + if (netdev->phydev->link == 0) + return; + speed = netdev->phydev->speed; duplex = netdev->phydev->duplex; @@ -193,15 +196,44 @@ static void hclge_mac_adjust_link(struct net_device *netdev) netdev_err(netdev, "failed to configure flow control.\n"); } -int hclge_mac_connect_phy(struct hclge_dev *hdev) +int hclge_mac_connect_phy(struct hnae3_handle *handle) { + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; struct net_device *netdev = hdev->vport[0].nic.netdev; struct phy_device *phydev = hdev->hw.mac.phydev; +#ifdef HAS_LINK_MODE_OPS + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; +#endif int ret; if (!phydev) return 0; + phydev->dev_flags |= MARVELL_PHY_LED0_LINK_LED1_ACTIVE; + +#ifdef HAS_LINK_MODE_OPS + linkmode_clear_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported); + + ret = phy_connect_direct(netdev, phydev, + hclge_mac_adjust_link, + PHY_INTERFACE_MODE_SGMII); + if (ret) { + netdev_err(netdev, "phy_connect_direct err.\n"); + return ret; + } + + linkmode_copy(mask, hdev->hw.mac.supported); + linkmode_and(phydev->supported, phydev->supported, mask); + linkmode_copy(phydev->advertising, phydev->supported); + + /* supported flag is Pause and Asym Pause, but default advertising + * should be rx on, tx on, so need clear Asym Pause in advertising + * flag + */ + linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + phydev->advertising); +#else phydev->supported &= ~SUPPORTED_FIBRE; ret = phy_connect_direct(netdev, phydev, @@ -212,14 +244,25 @@ int hclge_mac_connect_phy(struct hclge_dev *hdev) return ret; } - phydev->supported &= HCLGE_PHY_SUPPORTED_FEATURES; + phydev->supported &= *hdev->hw.mac.supported; phydev->advertising = phydev->supported; + /* supported flag is Pause and Asym Pause, but default advertising + * should be rx on, tx on, so need clear Asym Pause in advertising + * flag + */ + phydev->advertising &= ~ADVERTISED_Asym_Pause; +#endif + + phy_attached_info(phydev); + return 0; } -void hclge_mac_disconnect_phy(struct hclge_dev *hdev) +void hclge_mac_disconnect_phy(struct hnae3_handle *handle) { + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; struct phy_device *phydev = hdev->hw.mac.phydev; if (!phydev) @@ -235,6 +278,8 @@ void hclge_mac_start_phy(struct hclge_dev *hdev) if (!phydev) return; + phy_loopback(phydev, false); + phy_start(phydev); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h index 5fbf7dddb5d9ba9d7f4400738837c0c033c26e3e..ef095d9c566f4918713b0e029e2200f4b90fd0ca 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h @@ -5,8 +5,8 @@ #define __HCLGE_MDIO_H int hclge_mac_mdio_config(struct hclge_dev *hdev); -int hclge_mac_connect_phy(struct hclge_dev *hdev); -void hclge_mac_disconnect_phy(struct hclge_dev *hdev); +int hclge_mac_connect_phy(struct hnae3_handle *handle); +void hclge_mac_disconnect_phy(struct hnae3_handle *handle); void hclge_mac_start_phy(struct hclge_dev *hdev); void hclge_mac_stop_phy(struct hclge_dev *hdev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 5db70a1451c58c8f683fb67320f39e50640f5d6a..8bed53bcdcc48475069d84b92bab34e4c216767f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -43,18 +43,23 @@ enum hclge_shaper_level { static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, u8 *ir_b, u8 *ir_u, u8 *ir_s) { - const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = { +#define DIVISOR_CLK (1000 * 8) +#define DIVISOR_IR_B_126 (126 * DIVISOR_CLK) + + static const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = { 6 * 256, /* Prioriy level */ 6 * 32, /* Prioriy group level */ 6 * 8, /* Port level */ 6 * 256 /* Qset level */ }; - u8 ir_u_calc = 0, ir_s_calc = 0; + u8 ir_u_calc = 0; + u8 ir_s_calc = 0; u32 ir_calc; u32 tick; /* Calc tick */ - if (shaper_level >= HCLGE_SHAPER_LVL_CNT) + if (shaper_level >= HCLGE_SHAPER_LVL_CNT || + ir > HCLGE_ETHER_MAX_RATE) return -EINVAL; tick = tick_array[shaper_level]; @@ -66,7 +71,7 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, * ir_calc = ---------------- * 1000 * tick * 1 */ - ir_calc = (1008000 + (tick >> 1) - 1) / tick; + ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick; if (ir_calc == ir) { *ir_b = 126; @@ -76,29 +81,27 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, return 0; } else if (ir_calc > ir) { /* Increasing the denominator to select ir_s value */ - while (ir_calc > ir) { + while (ir_calc >= ir && ir) { ir_s_calc++; - ir_calc = 1008000 / (tick * (1 << ir_s_calc)); + ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc)); } - if (ir_calc == ir) - *ir_b = 126; - else - *ir_b = (ir * tick * (1 << ir_s_calc) + 4000) / 8000; + *ir_b = (ir * tick * (1 << ir_s_calc) + (DIVISOR_CLK >> 1)) / + DIVISOR_CLK; } else { /* Increasing the numerator to select ir_u value */ u32 numerator; while (ir_calc < ir) { ir_u_calc++; - numerator = 1008000 * (1 << ir_u_calc); + numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc); ir_calc = (numerator + (tick >> 1)) / tick; } if (ir_calc == ir) { *ir_b = 126; } else { - u32 denominator = (8000 * (1 << --ir_u_calc)); + u32 denominator = DIVISOR_CLK * (1 << --ir_u_calc); *ir_b = (ir * tick + (denominator >> 1)) / denominator; } } @@ -109,51 +112,50 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, return 0; } -static int hclge_pfc_stats_get(struct hclge_dev *hdev, - enum hclge_opcode_type opcode, u64 *stats) -{ - struct hclge_desc desc[HCLGE_TM_PFC_PKT_GET_CMD_NUM]; - int ret, i, j; - - if (!(opcode == HCLGE_OPC_QUERY_PFC_RX_PKT_CNT || - opcode == HCLGE_OPC_QUERY_PFC_TX_PKT_CNT)) - return -EINVAL; - - for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) { - hclge_cmd_setup_basic_desc(&desc[i], opcode, true); - if (i != (HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1)) - desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); - else - desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); - } +static const u16 hclge_pfc_tx_stats_offset[] = { + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num) +}; - ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM); - if (ret) - return ret; +static const u16 hclge_pfc_rx_stats_offset[] = { + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num) +}; - for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) { - struct hclge_pfc_stats_cmd *pfc_stats = - (struct hclge_pfc_stats_cmd *)desc[i].data; +static void hclge_pfc_stats_get(struct hclge_dev *hdev, bool tx, u64 *stats) +{ + const u16 *offset; + int i; - for (j = 0; j < HCLGE_TM_PFC_NUM_GET_PER_CMD; j++) { - u32 index = i * HCLGE_TM_PFC_PKT_GET_CMD_NUM + j; + if (tx) + offset = hclge_pfc_tx_stats_offset; + else + offset = hclge_pfc_rx_stats_offset; - if (index < HCLGE_MAX_TC_NUM) - stats[index] = - le64_to_cpu(pfc_stats->pkt_num[j]); - } - } - return 0; + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) + stats[i] = HCLGE_STATS_READ(&hdev->mac_stats, offset[i]); } -int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats) +void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats) { - return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_RX_PKT_CNT, stats); + hclge_pfc_stats_get(hdev, false, stats); } -int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats) +void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats) { - return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_TX_PKT_CNT, stats); + hclge_pfc_stats_get(hdev, true, stats); } int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx) @@ -172,7 +174,7 @@ static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap, u8 pfc_bitmap) { struct hclge_desc desc; - struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)&desc.data; + struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false); @@ -188,11 +190,12 @@ static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr, struct hclge_cfg_pause_param_cmd *pause_param; struct hclge_desc desc; - pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data; + pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false); ether_addr_copy(pause_param->mac_addr, addr); + ether_addr_copy(pause_param->mac_addr_extra, addr); pause_param->pause_trans_gap = pause_trans_gap; pause_param->pause_trans_time = cpu_to_le16(pause_trans_time); @@ -207,7 +210,7 @@ int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr) u8 trans_gap; int ret; - pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data; + pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true); @@ -218,8 +221,7 @@ int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr) trans_gap = pause_param->pause_trans_gap; trans_time = le16_to_cpu(pause_param->pause_trans_time); - return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, - trans_time); + return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time); } static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id) @@ -297,7 +299,7 @@ static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, } static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev, - u8 q_id, u16 qs_id) + u16 q_id, u16 qs_id) { struct hclge_nq_to_qs_link_cmd *map; struct hclge_desc desc; @@ -360,43 +362,50 @@ static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id, return hclge_cmd_send(&hdev->hw, &desc, 1); } +static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s, + u8 bs_b, u8 bs_s) +{ + u32 shapping_para = 0; + + hclge_tm_set_field(shapping_para, IR_B, ir_b); + hclge_tm_set_field(shapping_para, IR_U, ir_u); + hclge_tm_set_field(shapping_para, IR_S, ir_s); + hclge_tm_set_field(shapping_para, BS_B, bs_b); + hclge_tm_set_field(shapping_para, BS_S, bs_s); + + return shapping_para; +} + static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, enum hclge_shap_bucket bucket, u8 pg_id, - u8 ir_b, u8 ir_u, u8 ir_s, u8 bs_b, u8 bs_s) + u32 shapping_para) { struct hclge_pg_shapping_cmd *shap_cfg_cmd; enum hclge_opcode_type opcode; struct hclge_desc desc; - u32 shapping_para = 0; opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING : - HCLGE_OPC_TM_PG_C_SHAPPING; + HCLGE_OPC_TM_PG_C_SHAPPING; hclge_cmd_setup_basic_desc(&desc, opcode, false); shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; shap_cfg_cmd->pg_id = pg_id; - hclge_tm_set_field(shapping_para, IR_B, ir_b); - hclge_tm_set_field(shapping_para, IR_U, ir_u); - hclge_tm_set_field(shapping_para, IR_S, ir_s); - hclge_tm_set_field(shapping_para, BS_B, bs_b); - hclge_tm_set_field(shapping_para, BS_S, bs_s); - shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para); return hclge_cmd_send(&hdev->hw, &desc, 1); } -static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev) +int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev) { struct hclge_port_shapping_cmd *shap_cfg_cmd; struct hclge_desc desc; - u32 shapping_para = 0; u8 ir_u, ir_b, ir_s; + u32 shapping_para; int ret; - ret = hclge_shaper_para_calc(HCLGE_ETHER_MAX_RATE, + ret = hclge_shaper_para_calc(hdev->hw.mac.speed, HCLGE_SHAPER_LVL_PORT, &ir_b, &ir_u, &ir_s); if (ret) @@ -405,11 +414,9 @@ static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev) hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false); shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; - hclge_tm_set_field(shapping_para, IR_B, ir_b); - hclge_tm_set_field(shapping_para, IR_U, ir_u); - hclge_tm_set_field(shapping_para, IR_S, ir_s); - hclge_tm_set_field(shapping_para, BS_B, HCLGE_SHAPER_BS_U_DEF); - hclge_tm_set_field(shapping_para, BS_S, HCLGE_SHAPER_BS_S_DEF); + shapping_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para); @@ -418,16 +425,14 @@ static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev) static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev, enum hclge_shap_bucket bucket, u8 pri_id, - u8 ir_b, u8 ir_u, u8 ir_s, - u8 bs_b, u8 bs_s) + u32 shapping_para) { struct hclge_pri_shapping_cmd *shap_cfg_cmd; enum hclge_opcode_type opcode; struct hclge_desc desc; - u32 shapping_para = 0; opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING : - HCLGE_OPC_TM_PRI_C_SHAPPING; + HCLGE_OPC_TM_PRI_C_SHAPPING; hclge_cmd_setup_basic_desc(&desc, opcode, false); @@ -435,12 +440,6 @@ static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev, shap_cfg_cmd->pri_id = pri_id; - hclge_tm_set_field(shapping_para, IR_B, ir_b); - hclge_tm_set_field(shapping_para, IR_U, ir_u); - hclge_tm_set_field(shapping_para, IR_S, ir_s); - hclge_tm_set_field(shapping_para, BS_B, bs_b); - hclge_tm_set_field(shapping_para, BS_S, bs_s); - shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para); return hclge_cmd_send(&hdev->hw, &desc, 1); @@ -512,39 +511,141 @@ static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id, return hclge_cmd_send(&hdev->hw, &desc, 1); } +int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_qs_shapping_cmd *shap_cfg_cmd; + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + u8 ir_b, ir_u, ir_s; + u32 shaper_para; + int ret, i; + + if (!max_tx_rate) + max_tx_rate = HCLGE_ETHER_MAX_RATE; + + ret = hclge_shaper_para_calc(max_tx_rate, HCLGE_SHAPER_LVL_QSET, + &ir_b, &ir_u, &ir_s); + if (ret) + return ret; + + shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + + for (i = 0; i < kinfo->tc_info.num_tc; i++) { + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, + false); + + shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data; + shap_cfg_cmd->qs_id = cpu_to_le16(vport->qs_offset + i); + shap_cfg_cmd->qs_shapping_para = cpu_to_le32(shaper_para); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "vport%u, qs%u failed to set tx_rate:%d, ret=%d\n", + vport->vport_id, shap_cfg_cmd->qs_id, + max_tx_rate, ret); + return ret; + } + } + + return 0; +} + +static u16 hclge_vport_get_max_rss_size(struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hnae3_tc_info *tc_info = &kinfo->tc_info; + struct hclge_dev *hdev = vport->back; + u16 max_rss_size = 0; + int i; + + if (!tc_info->mqprio_active) + return vport->alloc_tqps / tc_info->num_tc; + + for (i = 0; i < HNAE3_MAX_TC; i++) { + if (!(hdev->hw_tc_map & BIT(i)) || i >= tc_info->num_tc) + continue; + if (max_rss_size < tc_info->tqp_count[i]) + max_rss_size = tc_info->tqp_count[i]; + } + + return max_rss_size; +} + +static u16 hclge_vport_get_tqp_num(struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hnae3_tc_info *tc_info = &kinfo->tc_info; + struct hclge_dev *hdev = vport->back; + int sum = 0; + int i; + + if (!tc_info->mqprio_active) + return kinfo->rss_size * tc_info->num_tc; + + for (i = 0; i < HNAE3_MAX_TC; i++) { + if (hdev->hw_tc_map & BIT(i) && i < tc_info->num_tc) + sum += tc_info->tqp_count[i]; + } + + return sum; +} + static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) { struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; struct hclge_dev *hdev = vport->back; + u16 max_rss_size; u8 i; - vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit; - kinfo->num_tc = - min_t(u16, kinfo->num_tqps, hdev->tm_info.num_tc); - kinfo->rss_size - = min_t(u16, hdev->rss_size_max, - kinfo->num_tqps / kinfo->num_tc); - vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id; + /* TC configuration is shared by PF/VF in one port, only allow + * one tc for VF for simplicity. VF's vport_id is non zero. + */ + kinfo->tc_info.num_tc = vport->vport_id ? 1 : + min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc); + vport->qs_offset = (vport->vport_id ? HNAE3_MAX_TC : 0) + + (vport->vport_id ? (vport->vport_id - 1) : 0); + + max_rss_size = min_t(u16, hdev->rss_size_max, + hclge_vport_get_max_rss_size(vport)); + + /* Set to user value, no larger than max_rss_size. */ + if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && + kinfo->req_rss_size <= max_rss_size) { + dev_info(&hdev->pdev->dev, "rss changes from %u to %u\n", + kinfo->rss_size, kinfo->req_rss_size); + kinfo->rss_size = kinfo->req_rss_size; + } else if (kinfo->rss_size > max_rss_size || + (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) { + /* Set to the maximum specification value (max_rss_size). */ + kinfo->rss_size = max_rss_size; + } + + kinfo->num_tqps = hclge_vport_get_tqp_num(vport); vport->dwrr = 100; /* 100 percent as init */ vport->alloc_rss_size = kinfo->rss_size; + vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit; + + /* when enable mqprio, the tc_info has been updated. */ + if (kinfo->tc_info.mqprio_active) + return; - for (i = 0; i < kinfo->num_tc; i++) { - if (hdev->hw_tc_map & BIT(i)) { - kinfo->tc_info[i].enable = true; - kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; - kinfo->tc_info[i].tqp_count = kinfo->rss_size; - kinfo->tc_info[i].tc = i; + for (i = 0; i < HNAE3_MAX_TC; i++) { + if (hdev->hw_tc_map & BIT(i) && i < kinfo->tc_info.num_tc) { + kinfo->tc_info.tqp_offset[i] = i * kinfo->rss_size; + kinfo->tc_info.tqp_count[i] = kinfo->rss_size; } else { /* Set to default queue if TC is disable */ - kinfo->tc_info[i].enable = false; - kinfo->tc_info[i].tqp_offset = 0; - kinfo->tc_info[i].tqp_count = 1; - kinfo->tc_info[i].tc = 0; + kinfo->tc_info.tqp_offset[i] = 0; + kinfo->tc_info.tqp_count[i] = 1; } } - memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc, - FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc)); + memcpy(kinfo->tc_info.prio_tc, hdev->tm_info.prio_tc, + FIELD_SIZEOF(struct hnae3_tc_info, prio_tc)); } static void hclge_tm_vport_info_update(struct hclge_dev *hdev) @@ -574,22 +675,18 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev) for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) hdev->tm_info.prio_tc[i] = (i >= hdev->tm_info.num_tc) ? 0 : i; - - /* DCB is enabled if we have more than 1 TC */ - if (hdev->tm_info.num_tc > 1) - hdev->flag |= HCLGE_FLAG_DCB_ENABLE; - else - hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE; } static void hclge_tm_pg_info_init(struct hclge_dev *hdev) { +#define BW_PERCENT 100 + u8 i; for (i = 0; i < hdev->tm_info.num_pg; i++) { int k; - hdev->tm_info.pg_dwrr[i] = i ? 0 : 100; + hdev->tm_info.pg_dwrr[i] = i ? 0 : BW_PERCENT; hdev->tm_info.pg_info[i].pg_id = i; hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR; @@ -601,16 +698,18 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev) hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map; for (k = 0; k < hdev->tm_info.num_tc; k++) - hdev->tm_info.pg_info[i].tc_dwrr[k] = 100; + hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT; + for (; k < HNAE3_MAX_TC; k++) + hdev->tm_info.pg_info[i].tc_dwrr[k] = 0; } } -static void hclge_pfc_info_init(struct hclge_dev *hdev) +void hclge_tm_pfc_info_update(struct hclge_dev *hdev) { - if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) { + if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) { if (hdev->fc_mode_last_time == HCLGE_FC_PFC) dev_warn(&hdev->pdev->dev, - "DCB is disable, but last mode is FC_PFC\n"); + "Only 1 tc used, but last mode is FC_PFC\n"); hdev->tm_info.fc_mode = hdev->fc_mode_last_time; } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) { @@ -623,21 +722,15 @@ static void hclge_pfc_info_init(struct hclge_dev *hdev) } } -static int hclge_tm_schd_info_init(struct hclge_dev *hdev) +static void hclge_tm_schd_info_init(struct hclge_dev *hdev) { - if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) && - (hdev->tm_info.num_pg != 1)) - return -EINVAL; - hclge_tm_pg_info_init(hdev); hclge_tm_tc_info_init(hdev); hclge_tm_vport_info_update(hdev); - hclge_pfc_info_init(hdev); - - return 0; + hclge_tm_pfc_info_update(hdev); } static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev) @@ -662,6 +755,7 @@ static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev) static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev) { u8 ir_u, ir_b, ir_s; + u32 shaper_para; int ret; u32 i; @@ -679,18 +773,21 @@ static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev) if (ret) return ret; + shaper_para = hclge_tm_get_shapping_para(0, 0, 0, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); ret = hclge_tm_pg_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i, - 0, 0, 0, HCLGE_SHAPER_BS_U_DEF, - HCLGE_SHAPER_BS_S_DEF); + shaper_para); if (ret) return ret; + shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); ret = hclge_tm_pg_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i, - ir_b, ir_u, ir_s, - HCLGE_SHAPER_BS_U_DEF, - HCLGE_SHAPER_BS_S_DEF); + shaper_para); if (ret) return ret; } @@ -710,8 +807,7 @@ static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev) /* pg to prio */ for (i = 0; i < hdev->tm_info.num_pg; i++) { /* Cfg dwrr */ - ret = hclge_tm_pg_weight_cfg(hdev, i, - hdev->tm_info.pg_dwrr[i]); + ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]); if (ret) return ret; } @@ -723,15 +819,14 @@ static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev, struct hclge_vport *vport) { struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hnae3_tc_info *tc_info = &kinfo->tc_info; struct hnae3_queue **tqp = kinfo->tqp; - struct hnae3_tc_info *v_tc_info; u32 i, j; int ret; - for (i = 0; i < kinfo->num_tc; i++) { - v_tc_info = &kinfo->tc_info[i]; - for (j = 0; j < v_tc_info->tqp_count; j++) { - struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j]; + for (i = 0; i < tc_info->num_tc; i++) { + for (j = 0; j < tc_info->tqp_count[i]; j++) { + struct hnae3_queue *q = tqp[tc_info->tqp_offset[i] + j]; ret = hclge_tm_q_to_qs_map_cfg(hdev, hclge_get_queue_id(q), @@ -752,13 +847,17 @@ static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev) if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { /* Cfg qs -> pri mapping, one by one mapping */ - for (k = 0; k < hdev->num_alloc_vport; k++) - for (i = 0; i < hdev->tm_info.num_tc; i++) { + for (k = 0; k < hdev->num_alloc_vport; k++) { + struct hnae3_knic_private_info *kinfo = + &vport[k].nic.kinfo; + + for (i = 0; i < kinfo->tc_info.num_tc; i++) { ret = hclge_tm_qs_to_pri_map_cfg( hdev, vport[k].qs_offset + i, i); if (ret) return ret; } + } } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) { /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */ for (k = 0; k < hdev->num_alloc_vport; k++) @@ -787,6 +886,7 @@ static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev) static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev) { u8 ir_u, ir_b, ir_s; + u32 shaper_para; int ret; u32 i; @@ -798,17 +898,19 @@ static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev) if (ret) return ret; - ret = hclge_tm_pri_shapping_cfg( - hdev, HCLGE_TM_SHAP_C_BUCKET, i, - 0, 0, 0, HCLGE_SHAPER_BS_U_DEF, - HCLGE_SHAPER_BS_S_DEF); + shaper_para = hclge_tm_get_shapping_para(0, 0, 0, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i, + shaper_para); if (ret) return ret; - ret = hclge_tm_pri_shapping_cfg( - hdev, HCLGE_TM_SHAP_P_BUCKET, i, - ir_b, ir_u, ir_s, HCLGE_SHAPER_BS_U_DEF, - HCLGE_SHAPER_BS_S_DEF); + shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i, + shaper_para); if (ret) return ret; } @@ -820,6 +922,7 @@ static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport) { struct hclge_dev *hdev = vport->back; u8 ir_u, ir_b, ir_s; + u32 shaper_para; int ret; ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF, @@ -827,18 +930,19 @@ static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport) if (ret) return ret; + shaper_para = hclge_tm_get_shapping_para(0, 0, 0, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, - vport->vport_id, - 0, 0, 0, HCLGE_SHAPER_BS_U_DEF, - HCLGE_SHAPER_BS_S_DEF); + vport->vport_id, shaper_para); if (ret) return ret; + shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, - vport->vport_id, - ir_b, ir_u, ir_s, - HCLGE_SHAPER_BS_U_DEF, - HCLGE_SHAPER_BS_S_DEF); + vport->vport_id, shaper_para); if (ret) return ret; @@ -853,7 +957,7 @@ static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport) u32 i; int ret; - for (i = 0; i < kinfo->num_tc; i++) { + for (i = 0; i < kinfo->tc_info.num_tc; i++) { ret = hclge_shaper_para_calc( hdev->tm_info.tc_info[i].bw_limit, HCLGE_SHAPER_LVL_QSET, @@ -933,6 +1037,36 @@ static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev) return 0; } +static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev) +{ +#define DEFAULT_TC_WEIGHT 1 +#define DEFAULT_TC_OFFSET 14 + + struct hclge_ets_tc_weight_cmd *ets_weight; + struct hclge_desc desc; + unsigned int i; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false); + ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data; + + for (i = 0; i < HNAE3_MAX_TC; i++) { + struct hclge_pg_info *pg_info; + + ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT; + + if (!(hdev->hw_tc_map & BIT(i))) + continue; + + pg_info = + &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid]; + ets_weight->tc_weight[i] = pg_info->tc_dwrr[i]; + } + + ets_weight->weight_offset = DEFAULT_TC_OFFSET; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport) { struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; @@ -946,7 +1080,7 @@ static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport) return ret; /* Qset dwrr */ - for (i = 0; i < kinfo->num_tc; i++) { + for (i = 0; i < kinfo->tc_info.num_tc; i++) { ret = hclge_tm_qs_weight_cfg( hdev, vport->qs_offset + i, hdev->tm_info.pg_info[0].tc_dwrr[i]); @@ -982,6 +1116,19 @@ static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev) ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev); if (ret) return ret; + + if (!hnae3_dev_dcb_supported(hdev)) + return 0; + + ret = hclge_tm_ets_tc_dwrr_cfg(hdev); + if (ret == -EOPNOTSUPP) { + dev_warn(&hdev->pdev->dev, + "fw %08x does't support ets tc weight cmd\n", + hdev->fw_version); + ret = 0; + } + + return ret; } else { ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev); if (ret) @@ -991,7 +1138,7 @@ static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev) return 0; } -int hclge_tm_map_cfg(struct hclge_dev *hdev) +static int hclge_tm_map_cfg(struct hclge_dev *hdev) { int ret; @@ -1057,11 +1204,14 @@ static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport) int ret; u8 i; + if (vport->vport_id >= HNAE3_MAX_TC) + return -EINVAL; + ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id); if (ret) return ret; - for (i = 0; i < kinfo->num_tc; i++) { + for (i = 0; i < kinfo->tc_info.num_tc; i++) { u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode; ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i, @@ -1106,7 +1256,7 @@ static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev) return 0; } -int hclge_tm_schd_mode_hw(struct hclge_dev *hdev) +static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev) { int ret; @@ -1117,7 +1267,7 @@ int hclge_tm_schd_mode_hw(struct hclge_dev *hdev) return hclge_tm_lvl34_schd_mode_cfg(hdev); } -static int hclge_tm_schd_setup_hw(struct hclge_dev *hdev) +int hclge_tm_schd_setup_hw(struct hclge_dev *hdev) { int ret; @@ -1145,8 +1295,8 @@ static int hclge_pause_param_setup_hw(struct hclge_dev *hdev) struct hclge_mac *mac = &hdev->hw.mac; return hclge_pause_param_cfg(hdev, mac->mac_addr, - HCLGE_DEFAULT_PAUSE_TRANS_GAP, - HCLGE_DEFAULT_PAUSE_TRANS_TIME); + HCLGE_DEFAULT_PAUSE_TRANS_GAP, + HCLGE_DEFAULT_PAUSE_TRANS_TIME); } static int hclge_pfc_setup_hw(struct hclge_dev *hdev) @@ -1158,7 +1308,7 @@ static int hclge_pfc_setup_hw(struct hclge_dev *hdev) HCLGE_RX_MAC_PAUSE_EN_MSK; return hclge_pfc_pause_en_cfg(hdev, enable_bitmap, - hdev->tm_info.hw_pfc_map); + hdev->tm_info.pfc_en); } /* Each Tc has a 1024 queue sets to backpress, it divides to @@ -1167,14 +1317,14 @@ static int hclge_pfc_setup_hw(struct hclge_dev *hdev) */ static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc) { - struct hclge_vport *vport = hdev->vport; - u32 i, k, qs_bitmap; - int ret; + int i; for (i = 0; i < HCLGE_BP_GRP_NUM; i++) { - qs_bitmap = 0; + u32 qs_bitmap = 0; + int k, ret; for (k = 0; k < hdev->num_alloc_vport; k++) { + struct hclge_vport *vport = &hdev->vport[k]; u16 qs_id = vport->qs_offset + tc; u8 grp, sub_grp; @@ -1184,8 +1334,6 @@ static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc) HCLGE_BP_SUB_GRP_ID_S); if (i == grp) qs_bitmap |= (1 << sub_grp); - - vport++; } ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap); @@ -1229,10 +1377,23 @@ static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev) return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); } -int hclge_pause_setup_hw(struct hclge_dev *hdev) +static int hclge_tm_bp_setup(struct hclge_dev *hdev) +{ + int ret = 0; + int i; + + for (i = 0; i < hdev->tm_info.num_tc; i++) { + ret = hclge_bp_setup_hw(hdev, i); + if (ret) + return ret; + } + + return ret; +} + +int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init) { int ret; - u8 i; ret = hclge_pause_param_setup_hw(hdev); if (ret) @@ -1246,42 +1407,42 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev) if (!hnae3_dev_dcb_supported(hdev)) return 0; - /* When MAC is GE Mode, hdev does not support pfc setting */ + /* GE MAC does not support PFC, when driver is initializing and MAC + * is in GE Mode, ignore the error here, otherwise initialization + * will fail. + */ ret = hclge_pfc_setup_hw(hdev); - if (ret) - dev_warn(&hdev->pdev->dev, "set pfc pause failed:%d\n", ret); - - for (i = 0; i < hdev->tm_info.num_tc; i++) { - ret = hclge_bp_setup_hw(hdev, i); - if (ret) - return ret; + if (init && ret == -EOPNOTSUPP) + dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n"); + else if (ret) { + dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n", + ret); + return ret; } - return 0; + return hclge_tm_bp_setup(hdev); } -int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc) +void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc) { struct hclge_vport *vport = hdev->vport; struct hnae3_knic_private_info *kinfo; u32 i, k; for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { - if (prio_tc[i] >= hdev->tm_info.num_tc) - return -EINVAL; hdev->tm_info.prio_tc[i] = prio_tc[i]; for (k = 0; k < hdev->num_alloc_vport; k++) { kinfo = &vport[k].nic.kinfo; - kinfo->prio_tc[i] = prio_tc[i]; + kinfo->tc_info.prio_tc[i] = prio_tc[i]; } } - return 0; } void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc) { - u8 i, bit_map = 0; + u8 bit_map = 0; + u8 i; hdev->tm_info.num_tc = num_tc; @@ -1298,7 +1459,7 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc) hclge_tm_schd_info_init(hdev); } -int hclge_tm_init_hw(struct hclge_dev *hdev) +int hclge_tm_init_hw(struct hclge_dev *hdev, bool init) { int ret; @@ -1310,7 +1471,7 @@ int hclge_tm_init_hw(struct hclge_dev *hdev) if (ret) return ret; - ret = hclge_pause_setup_hw(hdev); + ret = hclge_pause_setup_hw(hdev, init); if (ret) return ret; @@ -1319,15 +1480,379 @@ int hclge_tm_init_hw(struct hclge_dev *hdev) int hclge_tm_schd_init(struct hclge_dev *hdev) { - int ret; - /* fc_mode is HCLGE_FC_FULL on reset */ hdev->tm_info.fc_mode = HCLGE_FC_FULL; hdev->fc_mode_last_time = hdev->tm_info.fc_mode; - ret = hclge_tm_schd_info_init(hdev); + if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE && + hdev->tm_info.num_pg != 1) + return -EINVAL; + + hclge_tm_schd_info_init(hdev); + + return hclge_tm_init_hw(hdev, true); +} + +int hclge_tm_vport_map_update(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int ret; + + hclge_tm_vport_tc_info_update(vport); + + ret = hclge_vport_q_to_qs_map(hdev, vport); if (ret) return ret; - return hclge_tm_init_hw(hdev); + if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) + return 0; + + return hclge_tm_bp_setup(hdev); +} + +int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num) +{ + *qset_num = HCLGE_TM_PF_MAX_QSET_NUM + pci_num_vf(hdev->pdev); + return 0; +} + +int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num) +{ + *pri_num = HCLGE_TM_PF_MAX_PRI_NUM; + return 0; +} + +int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority, + u8 *link_vld) +{ + struct hclge_qs_to_pri_link_cmd *map; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, true); + map = (struct hclge_qs_to_pri_link_cmd *)desc.data; + map->qs_id = cpu_to_le16(qset_id); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get qset map priority, ret = %d\n", ret); + return ret; + } + + *priority = map->priority; + *link_vld = map->link_vld; + return 0; +} + +int hclge_tm_get_qset_sch_mode(struct hclge_dev *hdev, u16 qset_id, u8 *mode) +{ + struct hclge_qs_sch_mode_cfg_cmd *qs_sch_mode; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, true); + qs_sch_mode = (struct hclge_qs_sch_mode_cfg_cmd *)desc.data; + qs_sch_mode->qs_id = cpu_to_le16(qset_id); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get qset sch mode, ret = %d\n", ret); + return ret; + } + + *mode = qs_sch_mode->sch_mode; + return 0; +} + +int hclge_tm_get_qset_weight(struct hclge_dev *hdev, u16 qset_id, u8 *weight) +{ + struct hclge_qs_weight_cmd *qs_weight; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, true); + qs_weight = (struct hclge_qs_weight_cmd *)desc.data; + qs_weight->qs_id = cpu_to_le16(qset_id); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get qset weight, ret = %d\n", ret); + return ret; + } + + *weight = qs_weight->dwrr; + return 0; +} + +int hclge_tm_get_qset_shaper(struct hclge_dev *hdev, u16 qset_id, + struct hclge_tm_shaper_para *para) +{ + struct hclge_qs_shapping_cmd *shap_cfg_cmd; + struct hclge_desc desc; + u32 shapping_para; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true); + shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data; + shap_cfg_cmd->qs_id = cpu_to_le16(qset_id); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get qset %u shaper, ret = %d\n", qset_id, + ret); + return ret; + } + + shapping_para = le32_to_cpu(shap_cfg_cmd->qs_shapping_para); + para->ir_b = hclge_tm_get_field(shapping_para, IR_B); + para->ir_u = hclge_tm_get_field(shapping_para, IR_U); + para->ir_s = hclge_tm_get_field(shapping_para, IR_S); + para->bs_b = hclge_tm_get_field(shapping_para, BS_B); + para->bs_s = hclge_tm_get_field(shapping_para, BS_S); + para->flag = 0; + para->rate = 0; + return 0; +} + +int hclge_tm_get_pri_sch_mode(struct hclge_dev *hdev, u8 pri_id, u8 *mode) +{ + struct hclge_pri_sch_mode_cfg_cmd *pri_sch_mode; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, true); + pri_sch_mode = (struct hclge_pri_sch_mode_cfg_cmd *)desc.data; + pri_sch_mode->pri_id = pri_id; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get priority sch mode, ret = %d\n", ret); + return ret; + } + + *mode = pri_sch_mode->sch_mode; + return 0; +} + +int hclge_tm_get_pri_weight(struct hclge_dev *hdev, u8 pri_id, u8 *weight) +{ + struct hclge_priority_weight_cmd *priority_weight; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, true); + priority_weight = (struct hclge_priority_weight_cmd *)desc.data; + priority_weight->pri_id = pri_id; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get priority weight, ret = %d\n", ret); + return ret; + } + + *weight = priority_weight->dwrr; + return 0; +} + +int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id, + enum hclge_opcode_type cmd, + struct hclge_tm_shaper_para *para) +{ + struct hclge_pri_shapping_cmd *shap_cfg_cmd; + struct hclge_desc desc; + u32 shapping_para; + int ret; + + if (cmd != HCLGE_OPC_TM_PRI_C_SHAPPING && + cmd != HCLGE_OPC_TM_PRI_P_SHAPPING) + return -EINVAL; + + hclge_cmd_setup_basic_desc(&desc, cmd, true); + shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; + shap_cfg_cmd->pri_id = pri_id; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get priority shaper(%#x), ret = %d\n", + cmd, ret); + return ret; + } + + shapping_para = le32_to_cpu(shap_cfg_cmd->pri_shapping_para); + para->ir_b = hclge_tm_get_field(shapping_para, IR_B); + para->ir_u = hclge_tm_get_field(shapping_para, IR_U); + para->ir_s = hclge_tm_get_field(shapping_para, IR_S); + para->bs_b = hclge_tm_get_field(shapping_para, BS_B); + para->bs_s = hclge_tm_get_field(shapping_para, BS_S); + para->flag = 0; + para->rate = 0; + + return 0; +} + +int hclge_tm_get_q_to_qs_map(struct hclge_dev *hdev, u16 q_id, u16 *qset_id) +{ + struct hclge_nq_to_qs_link_cmd *map; + struct hclge_desc desc; + int ret; + + map = (struct hclge_nq_to_qs_link_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, true); + map->nq_id = cpu_to_le16(q_id); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get queue to qset map, ret = %d\n", ret); + return ret; + } + + *qset_id = hnae3_get_field(le16_to_cpu(map->qset_id), + HCLGE_TM_QS_ID_MSK, + HCLGE_TM_QS_ID_S); + return 0; +} + +int hclge_tm_get_q_to_tc(struct hclge_dev *hdev, u16 q_id, u8 *tc_id) +{ +#define HCLGE_TM_TC_MASK 0x7 + + struct hclge_tqp_tx_queue_tc_cmd *tc; + struct hclge_desc desc; + int ret; + + tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TQP_TX_QUEUE_TC, true); + tc->queue_id = cpu_to_le16(q_id); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get queue to tc map, ret = %d\n", ret); + return ret; + } + + *tc_id = tc->tc_id & HCLGE_TM_TC_MASK; + return 0; +} + +int hclge_tm_get_pg_to_pri_map(struct hclge_dev *hdev, u8 pg_id, + u8 *pri_bit_map) +{ + struct hclge_pg_to_pri_link_cmd *map; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, true); + map = (struct hclge_pg_to_pri_link_cmd *)desc.data; + map->pg_id = pg_id; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get pg to pri map, ret = %d\n", ret); + return ret; + } + + *pri_bit_map = map->pri_bit_map; + return 0; +} + +int hclge_tm_get_pg_weight(struct hclge_dev *hdev, u8 pg_id, u8 *weight) +{ + struct hclge_pg_weight_cmd *pg_weight_cmd; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, true); + pg_weight_cmd = (struct hclge_pg_weight_cmd *)desc.data; + pg_weight_cmd->pg_id = pg_id; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get pg weight, ret = %d\n", ret); + return ret; + } + + *weight = pg_weight_cmd->dwrr; + return 0; +} + +int hclge_tm_get_pg_sch_mode(struct hclge_dev *hdev, u8 pg_id, u8 *mode) +{ + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, true); + desc.data[0] = cpu_to_le32(pg_id); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get pg sch mode, ret = %d\n", ret); + return ret; + } + + *mode = (u8)le32_to_cpu(desc.data[1]); + return 0; +} + +int hclge_tm_get_pg_shaper(struct hclge_dev *hdev, u8 pg_id, + enum hclge_opcode_type cmd, + struct hclge_tm_shaper_para *para) +{ + struct hclge_pg_shapping_cmd *shap_cfg_cmd; + struct hclge_desc desc; + u32 shapping_para; + int ret; + + if (cmd != HCLGE_OPC_TM_PG_C_SHAPPING && + cmd != HCLGE_OPC_TM_PG_P_SHAPPING) + return -EINVAL; + + hclge_cmd_setup_basic_desc(&desc, cmd, true); + shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; + shap_cfg_cmd->pg_id = pg_id; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get pg shaper(%#x), ret = %d\n", + cmd, ret); + return ret; + } + + shapping_para = le32_to_cpu(shap_cfg_cmd->pg_shapping_para); + para->ir_b = hclge_tm_get_field(shapping_para, IR_B); + para->ir_u = hclge_tm_get_field(shapping_para, IR_U); + para->ir_s = hclge_tm_get_field(shapping_para, IR_S); + para->bs_b = hclge_tm_get_field(shapping_para, BS_B); + para->bs_s = hclge_tm_get_field(shapping_para, BS_S); + para->flag = 0; + para->rate = 0; + return 0; +} + +int hclge_tm_get_port_shaper(struct hclge_dev *hdev, + struct hclge_tm_shaper_para *para) +{ + struct hclge_port_shapping_cmd *port_shap_cfg_cmd; + struct hclge_desc desc; + u32 shapping_para; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get port shaper, ret = %d\n", ret); + return ret; + } + + port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; + shapping_para = le32_to_cpu(port_shap_cfg_cmd->port_shapping_para); + para->ir_b = hclge_tm_get_field(shapping_para, IR_B); + para->ir_u = hclge_tm_get_field(shapping_para, IR_U); + para->ir_s = hclge_tm_get_field(shapping_para, IR_S); + para->bs_b = hclge_tm_get_field(shapping_para, BS_B); + para->bs_s = hclge_tm_get_field(shapping_para, BS_S); + para->flag = 0; + para->rate = 0; + + return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index dd4c194747c16cbf4a716d811dd4102615231e2f..c54f2dc4351b38b4aa2ac344ef97e9afa57546d2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -12,13 +12,16 @@ #define HCLGE_TM_PORT_BASE_MODE_MSK BIT(0) -#define HCLGE_DEFAULT_PAUSE_TRANS_GAP 0xFF +#define HCLGE_DEFAULT_PAUSE_TRANS_GAP 0x7F #define HCLGE_DEFAULT_PAUSE_TRANS_TIME 0xFFFF /* SP or DWRR */ #define HCLGE_TM_TX_SCHD_DWRR_MSK BIT(0) #define HCLGE_TM_TX_SCHD_SP_MSK (0xFE) +#define HCLGE_TM_PF_MAX_PRI_NUM 8 +#define HCLGE_TM_PF_MAX_QSET_NUM 8 + struct hclge_pg_to_pri_link_cmd { u8 pg_id; u8 rsvd1[3]; @@ -37,9 +40,18 @@ struct hclge_nq_to_qs_link_cmd { __le16 nq_id; __le16 rsvd; #define HCLGE_TM_Q_QS_LINK_VLD_MSK BIT(10) +#define HCLGE_TM_QS_ID_MSK GENMASK(9, 0) +#define HCLGE_TM_QS_ID_S 0 __le16 qset_id; }; +struct hclge_tqp_tx_queue_tc_cmd { + __le16 queue_id; + __le16 rsvd; + u8 tc_id; + u8 rev[3]; +}; + struct hclge_pg_weight_cmd { u8 pg_id; u8 dwrr; @@ -50,11 +62,29 @@ struct hclge_priority_weight_cmd { u8 dwrr; }; +struct hclge_pri_sch_mode_cfg_cmd { + u8 pri_id; + u8 rsvd[3]; + u8 sch_mode; +}; + +struct hclge_qs_sch_mode_cfg_cmd { + __le16 qs_id; + u8 rsvd[2]; + u8 sch_mode; +}; + struct hclge_qs_weight_cmd { __le16 qs_id; u8 dwrr; }; +struct hclge_ets_tc_weight_cmd { + u8 tc_weight[HNAE3_MAX_TC]; + u8 weight_offset; + u8 rsvd[15]; +}; + #define HCLGE_TM_SHAP_IR_B_MSK GENMASK(7, 0) #define HCLGE_TM_SHAP_IR_B_LSH 0 #define HCLGE_TM_SHAP_IR_U_MSK GENMASK(11, 8) @@ -83,6 +113,12 @@ struct hclge_pg_shapping_cmd { __le32 pg_shapping_para; }; +struct hclge_qs_shapping_cmd { + __le16 qs_id; + u8 rsvd[2]; + __le32 qs_shapping_para; +}; + #define HCLGE_BP_GRP_NUM 32 #define HCLGE_BP_SUB_GRP_ID_S 0 #define HCLGE_BP_SUB_GRP_ID_M GENMASK(4, 0) @@ -106,6 +142,10 @@ struct hclge_cfg_pause_param_cmd { u8 pause_trans_gap; u8 rsvd; __le16 pause_trans_time; + u8 rsvd1[6]; + /* extra mac address to do double check for pause frame */ + u8 mac_addr_extra[ETH_ALEN]; + u16 rsvd2; }; struct hclge_pfc_stats_cmd { @@ -116,6 +156,16 @@ struct hclge_port_shapping_cmd { __le32 port_shapping_para; }; +struct hclge_tm_shaper_para { + u32 rate; + u8 ir_b; + u8 ir_u; + u8 ir_s; + u8 bs_b; + u8 bs_s; + u8 flag; +}; + #define hclge_tm_set_field(dest, string, val) \ hnae3_set_field((dest), \ (HCLGE_TM_SHAP_##string##_MSK), \ @@ -125,15 +175,42 @@ struct hclge_port_shapping_cmd { (HCLGE_TM_SHAP_##string##_LSH)) int hclge_tm_schd_init(struct hclge_dev *hdev); -int hclge_pause_setup_hw(struct hclge_dev *hdev); -int hclge_tm_schd_mode_hw(struct hclge_dev *hdev); -int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc); +int hclge_tm_vport_map_update(struct hclge_dev *hdev); +int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init); +int hclge_tm_schd_setup_hw(struct hclge_dev *hdev); +void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc); void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc); +void hclge_tm_pfc_info_update(struct hclge_dev *hdev); int hclge_tm_dwrr_cfg(struct hclge_dev *hdev); -int hclge_tm_map_cfg(struct hclge_dev *hdev); -int hclge_tm_init_hw(struct hclge_dev *hdev); +int hclge_tm_init_hw(struct hclge_dev *hdev, bool init); int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx); int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr); -int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats); -int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats); +void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats); +void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats); +int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate); +int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev); +int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num); +int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num); +int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority, + u8 *link_vld); +int hclge_tm_get_qset_sch_mode(struct hclge_dev *hdev, u16 qset_id, u8 *mode); +int hclge_tm_get_qset_weight(struct hclge_dev *hdev, u16 qset_id, u8 *weight); +int hclge_tm_get_qset_shaper(struct hclge_dev *hdev, u16 qset_id, + struct hclge_tm_shaper_para *para); +int hclge_tm_get_pri_sch_mode(struct hclge_dev *hdev, u8 pri_id, u8 *mode); +int hclge_tm_get_pri_weight(struct hclge_dev *hdev, u8 pri_id, u8 *weight); +int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id, + enum hclge_opcode_type cmd, + struct hclge_tm_shaper_para *para); +int hclge_tm_get_q_to_qs_map(struct hclge_dev *hdev, u16 q_id, u16 *qset_id); +int hclge_tm_get_q_to_tc(struct hclge_dev *hdev, u16 q_id, u8 *tc_id); +int hclge_tm_get_pg_to_pri_map(struct hclge_dev *hdev, u8 pg_id, + u8 *pri_bit_map); +int hclge_tm_get_pg_weight(struct hclge_dev *hdev, u8 pg_id, u8 *weight); +int hclge_tm_get_pg_sch_mode(struct hclge_dev *hdev, u8 pg_id, u8 *mode); +int hclge_tm_get_pg_shaper(struct hclge_dev *hdev, u8 pg_id, + enum hclge_opcode_type cmd, + struct hclge_tm_shaper_para *para); +int hclge_tm_get_port_shaper(struct hclge_dev *hdev, + struct hclge_tm_shaper_para *para); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h new file mode 100644 index 0000000000000000000000000000000000000000..5b0b71bd61200e47d70e74895d30533a95092bc1 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2018-2020 Hisilicon Limited. */ + +/* This must be outside ifdef _HCLGE_TRACE_H */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM hns3 + +#if !defined(_HCLGE_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _HCLGE_TRACE_H_ + +#include + +#define PF_GET_MBX_LEN (sizeof(struct hclge_mbx_vf_to_pf_cmd) / sizeof(u32)) +#define PF_SEND_MBX_LEN (sizeof(struct hclge_mbx_pf_to_vf_cmd) / sizeof(u32)) + +TRACE_EVENT(hclge_pf_mbx_get, + TP_PROTO( + struct hclge_dev *hdev, + struct hclge_mbx_vf_to_pf_cmd *req), + TP_ARGS(hdev, req), + + TP_STRUCT__entry( + __field(u8, vfid) + __field(u8, code) + __field(u8, subcode) + __string(pciname, pci_name(hdev->pdev)) + __string(devname, &hdev->vport[0].nic.kinfo.netdev->name) + __array(u32, mbx_data, PF_GET_MBX_LEN) + ), + + TP_fast_assign( + __entry->vfid = req->mbx_src_vfid; + __entry->code = req->msg.code; + __entry->subcode = req->msg.subcode; + __assign_str(pciname, pci_name(hdev->pdev)); + __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name); + memcpy(__entry->mbx_data, req, + sizeof(struct hclge_mbx_vf_to_pf_cmd)); + ), + + TP_printk( + "%s %s vfid:%u code:%u subcode:%u data:%s", + __get_str(pciname), __get_str(devname), __entry->vfid, + __entry->code, __entry->subcode, + __print_array(__entry->mbx_data, PF_GET_MBX_LEN, sizeof(u32)) + ) +); + +TRACE_EVENT(hclge_pf_mbx_send, + TP_PROTO( + struct hclge_dev *hdev, + struct hclge_mbx_pf_to_vf_cmd *req), + TP_ARGS(hdev, req), + + TP_STRUCT__entry( + __field(u8, vfid) + __field(u16, code) + __string(pciname, pci_name(hdev->pdev)) + __string(devname, &hdev->vport[0].nic.kinfo.netdev->name) + __array(u32, mbx_data, PF_SEND_MBX_LEN) + ), + + TP_fast_assign( + __entry->vfid = req->dest_vfid; + __entry->code = req->msg.code; + __assign_str(pciname, pci_name(hdev->pdev)); + __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name); + memcpy(__entry->mbx_data, req, + sizeof(struct hclge_mbx_pf_to_vf_cmd)); + ), + + TP_printk( + "%s %s vfid:%u code:%u data:%s", + __get_str(pciname), __get_str(devname), __entry->vfid, + __entry->code, + __print_array(__entry->mbx_data, PF_SEND_MBX_LEN, sizeof(u32)) + ) +); + +#endif /* _HCLGE_TRACE_H_ */ + +/* This must be outside ifdef _HCLGE_TRACE_H */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE hclge_trace +#include diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile index fb93bbd358455a735880d6e83cd314c0773a636a..aa06b79e85a375b6c2ec8654e8443ac96fba00ff 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile @@ -3,7 +3,8 @@ # Makefile for the HISILICON network device drivers. # -ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 +ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3 +ccflags-y += -I $(srctree)/$(src) obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o \ No newline at end of file diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c index fb471fe2c4946692e1c36b31bf92ec325812a372..84f54a7c2e4d8d6b51331e3af1e8089f27f88b11 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c @@ -7,6 +7,7 @@ #include #include #include +#include "kcompat.h" #include "hclgevf_cmd.h" #include "hclgevf_main.h" #include "hnae3.h" @@ -27,26 +28,39 @@ static int hclgevf_ring_space(struct hclgevf_cmq_ring *ring) return ring->desc_num - used - 1; } +static int hclgevf_is_valid_csq_clean_head(struct hclgevf_cmq_ring *ring, + int head) +{ + int ntu = ring->next_to_use; + int ntc = ring->next_to_clean; + + if (ntu > ntc) + return head >= ntc && head <= ntu; + + return head >= ntc || head <= ntu; +} + static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw) { + struct hclgevf_dev *hdev = container_of(hw, struct hclgevf_dev, hw); struct hclgevf_cmq_ring *csq = &hw->cmq.csq; - u16 ntc = csq->next_to_clean; - struct hclgevf_desc *desc; - int clean = 0; + int clean; u32 head; - desc = &csq->desc[ntc]; head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG); - while (head != ntc) { - memset(desc, 0, sizeof(*desc)); - ntc++; - if (ntc == csq->desc_num) - ntc = 0; - desc = &csq->desc[ntc]; - clean++; + rmb(); /* Make sure head is ready before touch any data */ + + if (!hclgevf_is_valid_csq_clean_head(csq, head)) { + dev_warn(&hdev->pdev->dev, "wrong cmd head (%u, %d-%d)\n", head, + csq->next_to_use, csq->next_to_clean); + dev_warn(&hdev->pdev->dev, + "Disabling any further commands to IMP firmware\n"); + set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + return -EIO; } - csq->next_to_clean = ntc; + clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num; + csq->next_to_clean = head; return clean; } @@ -61,7 +75,7 @@ static bool hclgevf_cmd_csq_done(struct hclgevf_hw *hw) static bool hclgevf_is_special_opcode(u16 opcode) { - u16 spec_opcode[] = {0x30, 0x31, 0x32}; + static const u16 spec_opcode[] = {0x30, 0x31, 0x32}; int i; for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) { @@ -72,6 +86,59 @@ static bool hclgevf_is_special_opcode(u16 opcode) return false; } +static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring) +{ + struct hclgevf_dev *hdev = ring->dev; + struct hclgevf_hw *hw = &hdev->hw; + u32 reg_val; + + if (ring->flag == HCLGEVF_TYPE_CSQ) { + reg_val = lower_32_bits(ring->desc_dma_addr); + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val); + reg_val = upper_32_bits(ring->desc_dma_addr); + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val); + + reg_val = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG); + reg_val &= HCLGEVF_NIC_SW_RST_RDY; + reg_val |= (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S); + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val); + + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0); + } else { + reg_val = lower_32_bits(ring->desc_dma_addr); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val); + reg_val = upper_32_bits(ring->desc_dma_addr); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val); + + reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val); + + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0); + } +} + +static void hclgevf_cmd_init_regs(struct hclgevf_hw *hw) +{ + hclgevf_cmd_config_regs(&hw->cmq.csq); + hclgevf_cmd_config_regs(&hw->cmq.crq); +} + +static void hclgevf_cmd_clear_regs(struct hclgevf_hw *hw) +{ + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0); +} + static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring) { int size = ring->desc_num * sizeof(struct hclgevf_desc); @@ -96,61 +163,23 @@ static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring) } } -static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev, - struct hclgevf_cmq_ring *ring) +static int hclgevf_alloc_cmd_queue(struct hclgevf_dev *hdev, int ring_type) { struct hclgevf_hw *hw = &hdev->hw; - int ring_type = ring->flag; - u32 reg_val; + struct hclgevf_cmq_ring *ring = + (ring_type == HCLGEVF_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq; int ret; - ring->desc_num = HCLGEVF_NIC_CMQ_DESC_NUM; - spin_lock_init(&ring->lock); - ring->next_to_clean = 0; - ring->next_to_use = 0; ring->dev = hdev; + ring->flag = ring_type; /* allocate CSQ/CRQ descriptor */ ret = hclgevf_alloc_cmd_desc(ring); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, "failed(%d) to alloc %s desc\n", ret, (ring_type == HCLGEVF_TYPE_CSQ) ? "CSQ" : "CRQ"); - return ret; - } - - /* initialize the hardware registers with csq/crq dma-address, - * descriptor number, head & tail pointers - */ - switch (ring_type) { - case HCLGEVF_TYPE_CSQ: - reg_val = (u32)ring->desc_dma_addr; - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val); - reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1); - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val); - - reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S); - reg_val |= HCLGEVF_NIC_CMQ_ENABLE; - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val); - - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0); - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0); - break; - case HCLGEVF_TYPE_CRQ: - reg_val = (u32)ring->desc_dma_addr; - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val); - reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1); - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val); - reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S); - reg_val |= HCLGEVF_NIC_CMQ_ENABLE; - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val); - - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0); - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0); - break; - } - - return 0; + return ret; } void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc, @@ -166,6 +195,38 @@ void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc, desc->flag &= cpu_to_le16(~HCLGEVF_CMD_FLAG_WR); } +static int hclgevf_cmd_convert_err_code(u16 desc_ret) +{ + switch (desc_ret) { + case HCLGEVF_CMD_EXEC_SUCCESS: + return 0; + case HCLGEVF_CMD_NO_AUTH: + return -EPERM; + case HCLGEVF_CMD_NOT_SUPPORTED: + return -EOPNOTSUPP; + case HCLGEVF_CMD_QUEUE_FULL: + return -EXFULL; + case HCLGEVF_CMD_NEXT_ERR: + return -ENOSR; + case HCLGEVF_CMD_UNEXE_ERR: + return -ENOTBLK; + case HCLGEVF_CMD_PARA_ERR: + return -EINVAL; + case HCLGEVF_CMD_RESULT_ERR: + return -ERANGE; + case HCLGEVF_CMD_TIMEOUT: + return -ETIME; + case HCLGEVF_CMD_HILINK_ERR: + return -ENOLINK; + case HCLGEVF_CMD_QUEUE_ILLEGAL: + return -ENXIO; + case HCLGEVF_CMD_INVALID: + return -EBADR; + default: + return -EIO; + } +} + /* hclgevf_cmd_send - send command to command queue * @hw: pointer to the hw struct * @desc: prefilled descriptor for describing the command @@ -177,6 +238,7 @@ void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc, int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num) { struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev; + struct hclgevf_cmq_ring *csq = &hw->cmq.csq; struct hclgevf_desc *desc_to_use; bool complete = false; u32 timeout = 0; @@ -188,7 +250,17 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num) spin_lock_bh(&hw->cmq.csq.lock); + if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) { + spin_unlock_bh(&hw->cmq.csq.lock); + return -EBUSY; + } + if (num > hclgevf_ring_space(&hw->cmq.csq)) { + /* If CMDQ ring is full, SW HEAD and HW HEAD may be different, + * need update the SW HEAD pointer csq->next_to_clean + */ + csq->next_to_clean = hclgevf_read_dev(hw, + HCLGEVF_NIC_CSQ_HEAD_REG); spin_unlock_bh(&hw->cmq.csq.lock); return -EBUSY; } @@ -237,11 +309,7 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num) else retval = le16_to_cpu(desc[0].retval); - if ((enum hclgevf_cmd_return_status)retval == - HCLGEVF_CMD_EXEC_SUCCESS) - status = 0; - else - status = -EIO; + status = hclgevf_cmd_convert_err_code(retval); hw->cmq.last_status = (enum hclgevf_cmd_status)retval; ntc++; handle++; @@ -251,14 +319,13 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num) } if (!complete) - status = -EAGAIN; + status = -EBADE; /* Clean the command send queue */ handle = hclgevf_cmd_csq_clean(hw); - if (handle != num) { + if (handle != num) dev_warn(&hdev->pdev->dev, "cleaned %d, need to clean %d\n", handle, num); - } spin_unlock_bh(&hw->cmq.csq.lock); @@ -282,59 +349,116 @@ static int hclgevf_cmd_query_firmware_version(struct hclgevf_hw *hw, return status; } -int hclgevf_cmd_init(struct hclgevf_dev *hdev) +int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev) { - u32 version; int ret; - /* setup Tx write back timeout */ + /* Setup the lock for command queue */ + spin_lock_init(&hdev->hw.cmq.csq.lock); + spin_lock_init(&hdev->hw.cmq.crq.lock); + + /* clear up all command register, + * in case there are some residual values + */ + hclgevf_cmd_clear_regs(&hdev->hw); + hdev->hw.cmq.tx_timeout = HCLGEVF_CMDQ_TX_TIMEOUT; + hdev->hw.cmq.csq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM; + hdev->hw.cmq.crq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM; - /* setup queue CSQ/CRQ rings */ - hdev->hw.cmq.csq.flag = HCLGEVF_TYPE_CSQ; - ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.csq); + ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CSQ); if (ret) { dev_err(&hdev->pdev->dev, - "failed(%d) to initialize CSQ ring\n", ret); + "CSQ ring setup error %d\n", ret); return ret; } - hdev->hw.cmq.crq.flag = HCLGEVF_TYPE_CRQ; - ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.crq); + ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CRQ); if (ret) { dev_err(&hdev->pdev->dev, - "failed(%d) to initialize CRQ ring\n", ret); + "CRQ ring setup error %d\n", ret); goto err_csq; } + return 0; +err_csq: + hclgevf_free_cmd_desc(&hdev->hw.cmq.csq); + return ret; +} + +int hclgevf_cmd_init(struct hclgevf_dev *hdev) +{ + u32 version; + int ret; + + spin_lock_bh(&hdev->hw.cmq.csq.lock); + spin_lock(&hdev->hw.cmq.crq.lock); + /* initialize the pointers of async rx queue of mailbox */ hdev->arq.hdev = hdev; hdev->arq.head = 0; hdev->arq.tail = 0; - hdev->arq.count = 0; + atomic_set(&hdev->arq.count, 0); + hdev->hw.cmq.csq.next_to_clean = 0; + hdev->hw.cmq.csq.next_to_use = 0; + hdev->hw.cmq.crq.next_to_clean = 0; + hdev->hw.cmq.crq.next_to_use = 0; + + hclgevf_cmd_init_regs(&hdev->hw); + + spin_unlock(&hdev->hw.cmq.crq.lock); + spin_unlock_bh(&hdev->hw.cmq.csq.lock); + + clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + + /* Check if there is new reset pending, because the higher level + * reset may happen when lower level reset is being processed. + */ + if (hclgevf_is_reset_pending(hdev)) { + ret = -EBUSY; + goto err_cmd_init; + } /* get firmware version */ ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version); if (ret) { dev_err(&hdev->pdev->dev, "failed(%d) to query firmware version\n", ret); - goto err_crq; + goto err_cmd_init; } hdev->fw_version = version; - dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version); + dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n", + hnae3_get_field(version, HNAE3_FW_VERSION_BYTE3_MASK, + HNAE3_FW_VERSION_BYTE3_SHIFT), + hnae3_get_field(version, HNAE3_FW_VERSION_BYTE2_MASK, + HNAE3_FW_VERSION_BYTE2_SHIFT), + hnae3_get_field(version, HNAE3_FW_VERSION_BYTE1_MASK, + HNAE3_FW_VERSION_BYTE1_SHIFT), + hnae3_get_field(version, HNAE3_FW_VERSION_BYTE0_MASK, + HNAE3_FW_VERSION_BYTE0_SHIFT)); return 0; -err_crq: - hclgevf_free_cmd_desc(&hdev->hw.cmq.crq); -err_csq: - hclgevf_free_cmd_desc(&hdev->hw.cmq.csq); + +err_cmd_init: + set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); return ret; } void hclgevf_cmd_uninit(struct hclgevf_dev *hdev) { + set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + /* wait to ensure that the firmware completes the possible left + * over commands. + */ + msleep(HCLGEVF_CMDQ_CLEAR_WAIT_TIME); + spin_lock_bh(&hdev->hw.cmq.csq.lock); + spin_lock(&hdev->hw.cmq.crq.lock); + hclgevf_cmd_clear_regs(&hdev->hw); + spin_unlock(&hdev->hw.cmq.crq.lock); + spin_unlock_bh(&hdev->hw.cmq.csq.lock); + hclgevf_free_cmd_desc(&hdev->hw.cmq.csq); hclgevf_free_cmd_desc(&hdev->hw.cmq.crq); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h index 19b32860309ca9a8b5412a760bf66707a6a6b0b4..c87ec29f4f380997c01c1044bbfad1bab7255c78 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h @@ -8,6 +8,7 @@ #include "hnae3.h" #define HCLGEVF_CMDQ_TX_TIMEOUT 30000 +#define HCLGEVF_CMDQ_CLEAR_WAIT_TIME 200 #define HCLGEVF_CMDQ_RX_INVLD_B 0 #define HCLGEVF_CMDQ_RX_OUTVLD_B 1 @@ -46,9 +47,17 @@ struct hclgevf_cmq_ring { enum hclgevf_cmd_return_status { HCLGEVF_CMD_EXEC_SUCCESS = 0, - HCLGEVF_CMD_NO_AUTH = 1, - HCLGEVF_CMD_NOT_EXEC = 2, - HCLGEVF_CMD_QUEUE_FULL = 3, + HCLGEVF_CMD_NO_AUTH = 1, + HCLGEVF_CMD_NOT_SUPPORTED = 2, + HCLGEVF_CMD_QUEUE_FULL = 3, + HCLGEVF_CMD_NEXT_ERR = 4, + HCLGEVF_CMD_UNEXE_ERR = 5, + HCLGEVF_CMD_PARA_ERR = 6, + HCLGEVF_CMD_RESULT_ERR = 7, + HCLGEVF_CMD_TIMEOUT = 8, + HCLGEVF_CMD_HILINK_ERR = 9, + HCLGEVF_CMD_QUEUE_ILLEGAL = 10, + HCLGEVF_CMD_INVALID = 11, }; enum hclgevf_cmd_status { @@ -87,8 +96,11 @@ enum hclgevf_opcode_type { HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03, HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13, HCLGEVF_OPC_CFG_COM_TQP_QUEUE = 0x0B20, + /* GRO command */ + HCLGEVF_OPC_GRO_GENERIC_CONFIG = 0x0C10, /* RSS cmd */ HCLGEVF_OPC_RSS_GENERIC_CONFIG = 0x0D01, + HCLGEVF_OPC_RSS_INPUT_TUPLE = 0x0D02, HCLGEVF_OPC_RSS_INDIR_TABLE = 0x0D07, HCLGEVF_OPC_RSS_TC_MODE = 0x0D08, /* Mailbox cmd */ @@ -148,7 +160,14 @@ struct hclgevf_query_res_cmd { __le16 rsv[7]; }; -#define HCLGEVF_RSS_HASH_KEY_OFFSET 4 +#define HCLGEVF_GRO_EN_B 0 +struct hclgevf_cfg_gro_status_cmd { + __le16 gro_en; + u8 rsv[22]; +}; + +#define HCLGEVF_RSS_DEFAULT_OUTPORT_B 4 +#define HCLGEVF_RSS_HASH_KEY_OFFSET_B 4 #define HCLGEVF_RSS_HASH_KEY_NUM 16 struct hclgevf_rss_config_cmd { u8 hash_config; @@ -159,11 +178,11 @@ struct hclgevf_rss_config_cmd { struct hclgevf_rss_input_tuple_cmd { u8 ipv4_tcp_en; u8 ipv4_udp_en; - u8 ipv4_stcp_en; + u8 ipv4_sctp_en; u8 ipv4_fragment_en; u8 ipv6_tcp_en; u8 ipv6_udp_en; - u8 ipv6_stcp_en; + u8 ipv6_sctp_en; u8 ipv6_fragment_en; u8 rsv[16]; }; @@ -171,8 +190,8 @@ struct hclgevf_rss_input_tuple_cmd { #define HCLGEVF_RSS_CFG_TBL_SIZE 16 struct hclgevf_rss_indirection_table_cmd { - u16 start_table_index; - u16 rss_set_bitmap; + __le16 start_table_index; + __le16 rss_set_bitmap; u8 rsv[4]; u8 rss_result[HCLGEVF_RSS_CFG_TBL_SIZE]; }; @@ -184,7 +203,7 @@ struct hclgevf_rss_indirection_table_cmd { #define HCLGEVF_RSS_TC_VALID_B 15 #define HCLGEVF_MAX_TC_NUM 8 struct hclgevf_rss_tc_mode_cmd { - u16 rss_tc_mode[HCLGEVF_MAX_TC_NUM]; + __le16 rss_tc_mode[HCLGEVF_MAX_TC_NUM]; u8 rsv[8]; }; @@ -226,12 +245,17 @@ struct hclgevf_cfg_tx_queue_pointer_cmd { #define HCLGEVF_NIC_CRQ_DEPTH_REG 0x27020 #define HCLGEVF_NIC_CRQ_TAIL_REG 0x27024 #define HCLGEVF_NIC_CRQ_HEAD_REG 0x27028 -#define HCLGEVF_NIC_CMQ_EN_B 16 -#define HCLGEVF_NIC_CMQ_ENABLE BIT(HCLGEVF_NIC_CMQ_EN_B) + +/* this bit indicates that the driver is ready for hardware reset */ +#define HCLGEVF_NIC_SW_RST_RDY_B 16 +#define HCLGEVF_NIC_SW_RST_RDY BIT(HCLGEVF_NIC_SW_RST_RDY_B) + #define HCLGEVF_NIC_CMQ_DESC_NUM 1024 #define HCLGEVF_NIC_CMQ_DESC_NUM_S 3 #define HCLGEVF_NIC_CMDQ_INT_SRC_REG 0x27100 +#define HCLGEVF_RING_BASEADDR_SHIFT 32 + static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value) { writel(value, base + reg); @@ -254,6 +278,7 @@ static inline u32 hclgevf_read_reg(u8 __iomem *base, u32 reg) int hclgevf_cmd_init(struct hclgevf_dev *hdev); void hclgevf_cmd_uninit(struct hclgevf_dev *hdev); +int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev); int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num); void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 9c0091f2addfcff7bd565963c43003b210c3d571..ebe09a304aeda2592f3128c496c87b03a3ff4ea1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -2,18 +2,23 @@ // Copyright (c) 2016-2017 Hisilicon Limited. #include +#include #include #include "hclgevf_cmd.h" #include "hclgevf_main.h" #include "hclge_mbx.h" #include "hnae3.h" +#include "kcompat.h" #define HCLGEVF_NAME "hclgevf" -static int hclgevf_init_hdev(struct hclgevf_dev *hdev); -static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev); +#define HCLGEVF_RESET_MAX_FAIL_CNT 5 + +static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); static struct hnae3_ae_algo ae_algovf; +static struct workqueue_struct *hclgevf_wq; + static const struct pci_device_id ae_algovf_pci_tbl[] = { {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, @@ -21,26 +26,89 @@ static const struct pci_device_id ae_algovf_pci_tbl[] = { {0, } }; +static const u8 hclgevf_hash_key[] = { + 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, + 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, + 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, + 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, + 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA +}; + MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); -static inline struct hclgevf_dev *hclgevf_ae_get_hdev( - struct hnae3_handle *handle) -{ - return container_of(handle, struct hclgevf_dev, nic); +static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, + HCLGEVF_CMDQ_TX_ADDR_H_REG, + HCLGEVF_CMDQ_TX_DEPTH_REG, + HCLGEVF_CMDQ_TX_TAIL_REG, + HCLGEVF_CMDQ_TX_HEAD_REG, + HCLGEVF_CMDQ_RX_ADDR_L_REG, + HCLGEVF_CMDQ_RX_ADDR_H_REG, + HCLGEVF_CMDQ_RX_DEPTH_REG, + HCLGEVF_CMDQ_RX_TAIL_REG, + HCLGEVF_CMDQ_RX_HEAD_REG, + HCLGEVF_VECTOR0_CMDQ_SRC_REG, + HCLGEVF_CMDQ_INTR_STS_REG, + HCLGEVF_CMDQ_INTR_EN_REG, + HCLGEVF_CMDQ_INTR_GEN_REG}; + +static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, + HCLGEVF_RST_ING, + HCLGEVF_GRO_EN_REG}; + +static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, + HCLGEVF_RING_RX_ADDR_H_REG, + HCLGEVF_RING_RX_BD_NUM_REG, + HCLGEVF_RING_RX_BD_LENGTH_REG, + HCLGEVF_RING_RX_MERGE_EN_REG, + HCLGEVF_RING_RX_TAIL_REG, + HCLGEVF_RING_RX_HEAD_REG, + HCLGEVF_RING_RX_FBD_NUM_REG, + HCLGEVF_RING_RX_OFFSET_REG, + HCLGEVF_RING_RX_FBD_OFFSET_REG, + HCLGEVF_RING_RX_STASH_REG, + HCLGEVF_RING_RX_BD_ERR_REG, + HCLGEVF_RING_TX_ADDR_L_REG, + HCLGEVF_RING_TX_ADDR_H_REG, + HCLGEVF_RING_TX_BD_NUM_REG, + HCLGEVF_RING_TX_PRIORITY_REG, + HCLGEVF_RING_TX_TC_REG, + HCLGEVF_RING_TX_MERGE_EN_REG, + HCLGEVF_RING_TX_TAIL_REG, + HCLGEVF_RING_TX_HEAD_REG, + HCLGEVF_RING_TX_FBD_NUM_REG, + HCLGEVF_RING_TX_OFFSET_REG, + HCLGEVF_RING_TX_EBD_NUM_REG, + HCLGEVF_RING_TX_EBD_OFFSET_REG, + HCLGEVF_RING_TX_BD_ERR_REG, + HCLGEVF_RING_EN_REG}; + +static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, + HCLGEVF_TQP_INTR_GL0_REG, + HCLGEVF_TQP_INTR_GL1_REG, + HCLGEVF_TQP_INTR_GL2_REG, + HCLGEVF_TQP_INTR_RL_REG}; + +static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle) +{ + if (!handle->client) + return container_of(handle, struct hclgevf_dev, nic); + else if (handle->client->type == HNAE3_CLIENT_ROCE) + return container_of(handle, struct hclgevf_dev, roce); + else + return container_of(handle, struct hclgevf_dev, nic); } static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) { + struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hnae3_queue *queue; struct hclgevf_desc desc; struct hclgevf_tqp *tqp; int status; int i; - for (i = 0; i < hdev->num_tqps; i++) { - queue = handle->kinfo.tqp[i]; - tqp = container_of(queue, struct hclgevf_tqp, q); + for (i = 0; i < kinfo->num_tqps; i++) { + tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_RX_STATUS, true); @@ -77,17 +145,16 @@ static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) { struct hnae3_knic_private_info *kinfo = &handle->kinfo; - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclgevf_tqp *tqp; u64 *buff = data; int i; - for (i = 0; i < hdev->num_tqps; i++) { - tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q); + for (i = 0; i < kinfo->num_tqps; i++) { + tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; } for (i = 0; i < kinfo->num_tqps; i++) { - tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q); + tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; } @@ -96,29 +163,29 @@ static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) { - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hnae3_knic_private_info *kinfo = &handle->kinfo; - return hdev->num_tqps * 2; + return kinfo->num_tqps * 2; } static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) { - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hnae3_knic_private_info *kinfo = &handle->kinfo; u8 *buff = data; - int i = 0; + int i; - for (i = 0; i < hdev->num_tqps; i++) { - struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i], - struct hclgevf_tqp, q); - snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd", + for (i = 0; i < kinfo->num_tqps; i++) { + struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], + struct hclgevf_tqp, q); + snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", tqp->index); buff += ETH_GSTRING_LEN; } - for (i = 0; i < hdev->num_tqps; i++) { - struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i], - struct hclgevf_tqp, q); - snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd", + for (i = 0; i < kinfo->num_tqps; i++) { + struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], + struct hclgevf_tqp, q); + snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", tqp->index); buff += ETH_GSTRING_LEN; } @@ -152,10 +219,8 @@ static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, u8 *data) { - u8 *p = (char *)data; - if (strset == ETH_SS_STATS) - p = hclgevf_tqps_get_strings(handle, p); + (void)hclgevf_tqps_get_strings(handle, data); } static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) @@ -163,33 +228,81 @@ static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) hclgevf_tqps_get_stats(handle, data); } -static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) +static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code, + u8 subcode) { - u8 resp_msg; + if (msg) { + memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg)); + msg->code = code; + msg->subcode = subcode; + } +} + +static int hclgevf_get_basic_info(struct hclgevf_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = hdev->ae_dev; + u8 resp_msg[HCLGE_MBX_MAX_RESP_DATA_SIZE]; + struct hclge_basic_info *basic_info; + struct hclge_vf_to_pf_msg send_msg; + unsigned long caps; int status; - status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, - true, &resp_msg, sizeof(u8)); + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_BASIC_INFO, 0); + status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, + sizeof(resp_msg)); if (status) { dev_err(&hdev->pdev->dev, - "VF request to get TC info from PF failed %d", - status); + "failed to get basic info from pf, ret = %d", status); return status; } - hdev->hw_tc_map = resp_msg; + basic_info = (struct hclge_basic_info *)resp_msg; + + hdev->hw_tc_map = basic_info->hw_tc_map; + hdev->mbx_api_version = basic_info->mbx_api_version; + caps = basic_info->pf_caps; + if (test_bit(HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, &caps)) + hnae3_set_bit(ae_dev->flag, + HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, 1); + + return 0; +} + +static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) +{ + struct hnae3_handle *nic = &hdev->nic; + struct hclge_vf_to_pf_msg send_msg; + u8 resp_msg; + int ret; + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, + HCLGE_MBX_GET_PORT_BASE_VLAN_STATE); + ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, + sizeof(u8)); + if (ret) { + dev_err(&hdev->pdev->dev, + "VF request to get port based vlan state failed %d", + ret); + return ret; + } + + nic->port_base_vlan_state = resp_msg; return 0; } -static int hclge_get_queue_info(struct hclgevf_dev *hdev) +static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) { -#define HCLGEVF_TQPS_RSS_INFO_LEN 8 +#define HCLGEVF_TQPS_RSS_INFO_LEN 6 +#define HCLGEVF_TQPS_ALLOC_OFFSET 0 +#define HCLGEVF_TQPS_RSS_SIZE_OFFSET 2 +#define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET 4 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; + struct hclge_vf_to_pf_msg send_msg; int status; - status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0, - true, resp_msg, + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0); + status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, HCLGEVF_TQPS_RSS_INFO_LEN); if (status) { dev_err(&hdev->pdev->dev, @@ -198,10 +311,79 @@ static int hclge_get_queue_info(struct hclgevf_dev *hdev) return status; } - memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); - memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); - memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16)); - memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16)); + memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET], + sizeof(u16)); + memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET], + sizeof(u16)); + memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET], + sizeof(u16)); + + return 0; +} + +static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) +{ +#define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 +#define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET 0 +#define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET 2 + u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; + struct hclge_vf_to_pf_msg send_msg; + int ret; + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0); + ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, + HCLGEVF_TQPS_DEPTH_INFO_LEN); + if (ret) { + dev_err(&hdev->pdev->dev, + "VF request to get tqp depth info from PF failed %d", + ret); + return ret; + } + + memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET], + sizeof(u16)); + memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET], + sizeof(u16)); + + return 0; +} + +static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclge_vf_to_pf_msg send_msg; + u16 qid_in_pf = 0; + u8 resp_data[2]; + int ret; + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0); + memcpy(send_msg.data, &queue_id, sizeof(queue_id)); + ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data, + sizeof(resp_data)); + if (!ret) + qid_in_pf = *(u16 *)resp_data; + + return qid_in_pf; +} + +static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) +{ + struct hclge_vf_to_pf_msg send_msg; + u8 resp_msg[2]; + int ret; + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0); + ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, + sizeof(resp_msg)); + if (ret) { + dev_err(&hdev->pdev->dev, + "VF request to get the pf port media type failed %d", + ret); + return ret; + } + + hdev->hw.mac.media_type = resp_msg[0]; + hdev->hw.mac.module_type = resp_msg[1]; return 0; } @@ -211,12 +393,6 @@ static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) struct hclgevf_tqp *tqp; int i; - /* if this is on going reset then we need to re-allocate the TPQs - * since we cannot assume we would get same number of TPQs back from PF - */ - if (hclgevf_dev_ongoing_reset(hdev)) - devm_kfree(&hdev->pdev->dev, hdev->htqp); - hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, sizeof(struct hclgevf_tqp), GFP_KERNEL); if (!hdev->htqp) @@ -230,7 +406,8 @@ static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) tqp->q.ae_algo = &ae_algovf; tqp->q.buf_size = hdev->rx_buf_len; - tqp->q.desc_num = hdev->num_desc; + tqp->q.tx_desc_num = hdev->num_tx_desc; + tqp->q.rx_desc_num = hdev->num_rx_desc; tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + i * HCLGEVF_TQP_REG_SIZE; @@ -245,27 +422,23 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev) struct hnae3_handle *nic = &hdev->nic; struct hnae3_knic_private_info *kinfo; u16 new_tqps = hdev->num_tqps; - int i; + unsigned int i; + u8 num_tc = 0; kinfo = &nic->kinfo; - kinfo->num_tc = 0; - kinfo->num_desc = hdev->num_desc; + kinfo->num_tx_desc = hdev->num_tx_desc; + kinfo->num_rx_desc = hdev->num_rx_desc; kinfo->rx_buf_len = hdev->rx_buf_len; for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) if (hdev->hw_tc_map & BIT(i)) - kinfo->num_tc++; + num_tc++; - kinfo->rss_size - = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); - new_tqps = kinfo->rss_size * kinfo->num_tc; + num_tc = num_tc ? num_tc : 1; + kinfo->tc_info.num_tc = num_tc; + kinfo->rss_size = min_t(u16, hdev->rss_size_max, new_tqps / num_tc); + new_tqps = kinfo->rss_size * num_tc; kinfo->num_tqps = min(new_tqps, hdev->num_tqps); - /* if this is on going reset then we need to re-allocate the hnae queues - * as well since number of TPQs from PF might have changed. - */ - if (hclgevf_dev_ongoing_reset(hdev)) - devm_kfree(&hdev->pdev->dev, kinfo->tqp); - kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, sizeof(struct hnae3_queue *), GFP_KERNEL); if (!kinfo->tqp) @@ -277,16 +450,23 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev) kinfo->tqp[i] = &hdev->htqp[i].q; } + /* after init the max rss_size and tqps, adjust the default tqp numbers + * and rss size with the actual vector numbers + */ + kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps); + kinfo->rss_size = min_t(u16, kinfo->num_tqps / num_tc, + kinfo->rss_size); + return 0; } static void hclgevf_request_link_info(struct hclgevf_dev *hdev) { + struct hclge_vf_to_pf_msg send_msg; int status; - u8 resp_msg; - status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, - 0, false, &resp_msg, sizeof(u8)); + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0); + status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); if (status) dev_err(&hdev->pdev->dev, "VF failed to fetch link status(%d) from PF", status); @@ -294,15 +474,41 @@ static void hclgevf_request_link_info(struct hclgevf_dev *hdev) void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) { + struct hnae3_handle *rhandle = &hdev->roce; struct hnae3_handle *handle = &hdev->nic; + struct hnae3_client *rclient; struct hnae3_client *client; + if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state)) + return; + client = handle->client; + rclient = hdev->roce_client; + + link_state = + test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; if (link_state != hdev->hw.mac.link) { - client->ops->link_status_change(handle, !!link_state); hdev->hw.mac.link = link_state; + client->ops->link_status_change(handle, !!link_state); + if (rclient && rclient->ops->link_status_change) + rclient->ops->link_status_change(rhandle, !!link_state); } + + clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state); +} + +void hclgevf_update_link_mode(struct hclgevf_dev *hdev) +{ +#define HCLGEVF_ADVERTISING 0 +#define HCLGEVF_SUPPORTED 1 + struct hclge_vf_to_pf_msg send_msg; + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0); + send_msg.data[0] = HCLGEVF_ADVERTISING; + hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); + send_msg.data[0] = HCLGEVF_SUPPORTED; + hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); } static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) @@ -315,12 +521,6 @@ static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) nic->numa_node_mask = hdev->numa_node_mask; nic->flags |= HNAE3_SUPPORT_VF; - if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) { - dev_err(&hdev->pdev->dev, "unsupported device type %d\n", - hdev->ae_dev->dev_type); - return -EINVAL; - } - ret = hclgevf_knic_setup(hdev); if (ret) dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", @@ -349,6 +549,7 @@ static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, int alloc = 0; int i, j; + vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num); vector_num = min(hdev->num_msi_left, vector_num); for (j = 0; j < vector_num; j++) { @@ -385,6 +586,46 @@ static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) return -EINVAL; } +static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, + const u8 hfunc, const u8 *key) +{ + struct hclgevf_rss_config_cmd *req; + unsigned int key_offset = 0; + struct hclgevf_desc desc; + int key_counts; + int key_size; + int ret; + + key_counts = HCLGEVF_RSS_KEY_SIZE; + req = (struct hclgevf_rss_config_cmd *)desc.data; + + while (key_counts) { + hclgevf_cmd_setup_basic_desc(&desc, + HCLGEVF_OPC_RSS_GENERIC_CONFIG, + false); + + req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); + req->hash_config |= + (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); + + key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts); + memcpy(req->hash_key, + key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); + + key_counts -= key_size; + key_offset++; + ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Configure RSS config fail, status = %d\n", + ret); + return ret; + } + } + + return 0; +} + static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) { return HCLGEVF_RSS_KEY_SIZE; @@ -408,8 +649,10 @@ static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) { hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, false); - req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE; - req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK; + req->start_table_index = + cpu_to_le16(i * HCLGEVF_RSS_CFG_TBL_SIZE); + req->rss_set_bitmap = cpu_to_le16(HCLGEVF_RSS_SET_BITMAP_MSK); + for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) req->rss_result[j] = indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; @@ -435,7 +678,7 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) struct hclgevf_desc desc; u16 roundup_size; int status; - int i; + unsigned int i; req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; @@ -443,19 +686,23 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) roundup_size = ilog2(roundup_size); for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { - tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); + tc_valid[i] = 1; tc_size[i] = roundup_size; - tc_offset[i] = rss_size * i; + tc_offset[i] = (hdev->hw_tc_map & BIT(i)) ? rss_size * i : 0; } hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { - hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, + u16 mode = 0; + + hnae3_set_bit(mode, HCLGEVF_RSS_TC_VALID_B, (tc_valid[i] & 0x1)); - hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, + hnae3_set_field(mode, HCLGEVF_RSS_TC_SIZE_M, HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); - hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, + hnae3_set_field(mode, HCLGEVF_RSS_TC_OFFSET_M, HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); + + req->rss_tc_mode[i] = cpu_to_le16(mode); } status = hclgevf_cmd_send(&hdev->hw, &desc, 1); if (status) @@ -465,51 +712,39 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) return status; } -static int hclgevf_get_rss_hw_cfg(struct hnae3_handle *handle, u8 *hash, - u8 *key) +/* for revision 0x20, vf shared the same rss config with pf */ +static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) { - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hclgevf_rss_config_cmd *req; - int lkup_times = key ? 3 : 1; - struct hclgevf_desc desc; - int key_offset; - int key_size; - int status; - - req = (struct hclgevf_rss_config_cmd *)desc.data; - lkup_times = (lkup_times == 3) ? 3 : ((hash) ? 1 : 0); - - for (key_offset = 0; key_offset < lkup_times; key_offset++) { - hclgevf_cmd_setup_basic_desc(&desc, - HCLGEVF_OPC_RSS_GENERIC_CONFIG, - true); - req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET); +#define HCLGEVF_RSS_MBX_RESP_LEN 8 + struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; + struct hclge_vf_to_pf_msg send_msg; + u16 msg_num, hash_key_index; + u8 index; + int ret; - status = hclgevf_cmd_send(&hdev->hw, &desc, 1); - if (status) { + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0); + msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / + HCLGEVF_RSS_MBX_RESP_LEN; + for (index = 0; index < msg_num; index++) { + send_msg.data[0] = index; + ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, + HCLGEVF_RSS_MBX_RESP_LEN); + if (ret) { dev_err(&hdev->pdev->dev, - "failed to get hardware RSS cfg, status = %d\n", - status); - return status; + "VF get rss hash key from PF failed, ret=%d", + ret); + return ret; } - if (key_offset == 2) - key_size = - HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; - else - key_size = HCLGEVF_RSS_HASH_KEY_NUM; - - if (key) - memcpy(key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, - req->hash_key, - key_size); - } - - if (hash) { - if ((req->hash_config & 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ) - *hash = ETH_RSS_HASH_TOP; + hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index; + if (index == msg_num - 1) + memcpy(&rss_cfg->rss_hash_key[hash_key_index], + &resp_msg[0], + HCLGEVF_RSS_KEY_SIZE - hash_key_index); else - *hash = ETH_RSS_HASH_UNKNOWN; + memcpy(&rss_cfg->rss_hash_key[hash_key_index], + &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); } return 0; @@ -520,21 +755,98 @@ static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; - int i; + int i, ret; + + if (handle->pdev->revision >= HNAE3_REVISION_ID_21) { + /* Get hash algorithm */ + if (hfunc) { + switch (rss_cfg->hash_algo) { + case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: + *hfunc = ETH_RSS_HASH_TOP; + break; + case HCLGEVF_RSS_HASH_ALGO_SIMPLE: + *hfunc = ETH_RSS_HASH_XOR; + break; + default: + *hfunc = ETH_RSS_HASH_UNKNOWN; + break; + } + } + + /* Get the RSS Key required by the user */ + if (key) + memcpy(key, rss_cfg->rss_hash_key, + HCLGEVF_RSS_KEY_SIZE); + } else { + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (key) { + ret = hclgevf_get_rss_hash_key(hdev); + if (ret) + return ret; + memcpy(key, rss_cfg->rss_hash_key, + HCLGEVF_RSS_KEY_SIZE); + } + } if (indir) for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) indir[i] = rss_cfg->rss_indirection_tbl[i]; - return hclgevf_get_rss_hw_cfg(handle, hfunc, key); + return 0; +} + +static int hclgevf_parse_rss_hfunc(struct hclgevf_dev *hdev, const u8 hfunc, + u8 *hash_algo) +{ + switch (hfunc) { + case ETH_RSS_HASH_TOP: + *hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; + return 0; + case ETH_RSS_HASH_XOR: + *hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; + return 0; + case ETH_RSS_HASH_NO_CHANGE: + *hash_algo = hdev->rss_cfg.hash_algo; + return 0; + default: + return -EINVAL; + } } static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, - const u8 *key, const u8 hfunc) + const u8 *key, const u8 hfunc) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; - int i; + u8 hash_algo; + int ret, i; + + if (handle->pdev->revision >= HNAE3_REVISION_ID_21) { + ret = hclgevf_parse_rss_hfunc(hdev, hfunc, &hash_algo); + if (ret) + return ret; + + /* Set the RSS Hash Key if specififed by the user */ + if (key) { + ret = hclgevf_set_rss_algo_key(hdev, hash_algo, key); + if (ret) { + dev_err(&hdev->pdev->dev, + "invalid hfunc type %u\n", hfunc); + return ret; + } + + /* Update the shadow RSS key with user specified qids */ + memcpy(rss_cfg->rss_hash_key, key, + HCLGEVF_RSS_KEY_SIZE); + } else { + ret = hclgevf_set_rss_algo_key(hdev, hash_algo, + rss_cfg->rss_hash_key); + if (ret) + return ret; + } + rss_cfg->hash_algo = hash_algo; + } /* update the shadow RSS table with user specified qids */ for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) @@ -544,91 +856,260 @@ static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, return hclgevf_set_rss_indir_table(hdev); } -static int hclgevf_get_tc_size(struct hnae3_handle *handle) +static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) { - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; - return rss_cfg->rss_size; + if (nfc->data & RXH_L4_B_2_3) + hash_sets |= HCLGEVF_D_PORT_BIT; + else + hash_sets &= ~HCLGEVF_D_PORT_BIT; + + if (nfc->data & RXH_IP_SRC) + hash_sets |= HCLGEVF_S_IP_BIT; + else + hash_sets &= ~HCLGEVF_S_IP_BIT; + + if (nfc->data & RXH_IP_DST) + hash_sets |= HCLGEVF_D_IP_BIT; + else + hash_sets &= ~HCLGEVF_D_IP_BIT; + + if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) + hash_sets |= HCLGEVF_V_TAG_BIT; + + return hash_sets; } -static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, - int vector_id, - struct hnae3_ring_chain_node *ring_chain) +static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, + struct ethtool_rxnfc *nfc) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hnae3_ring_chain_node *node; - struct hclge_mbx_vf_to_pf_cmd *req; + struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + struct hclgevf_rss_input_tuple_cmd *req; struct hclgevf_desc desc; - int i = 0; - int status; - u8 type; + u8 tuple_sets; + int ret; - req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; + if (handle->pdev->revision == HNAE3_REVISION_ID_20) + return -EOPNOTSUPP; - for (node = ring_chain; node; node = node->next) { - int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + - HCLGE_MBX_RING_NODE_VARIABLE_NUM * i; - - if (i == 0) { - hclgevf_cmd_setup_basic_desc(&desc, - HCLGEVF_OPC_MBX_VF_TO_PF, - false); - type = en ? - HCLGE_MBX_MAP_RING_TO_VECTOR : - HCLGE_MBX_UNMAP_RING_TO_VECTOR; - req->msg[0] = type; - req->msg[1] = vector_id; - } + if (nfc->data & + ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; - req->msg[idx_offset] = - hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); - req->msg[idx_offset + 1] = node->tqp_index; - req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx, - HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S); + req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); + + req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; + req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; + req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; + req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; + req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; + req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; + req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; + req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; + + tuple_sets = hclgevf_get_rss_hash_bits(nfc); + switch (nfc->flow_type) { + case TCP_V4_FLOW: + req->ipv4_tcp_en = tuple_sets; + break; + case TCP_V6_FLOW: + req->ipv6_tcp_en = tuple_sets; + break; + case UDP_V4_FLOW: + req->ipv4_udp_en = tuple_sets; + break; + case UDP_V6_FLOW: + req->ipv6_udp_en = tuple_sets; + break; + case SCTP_V4_FLOW: + req->ipv4_sctp_en = tuple_sets; + break; + case SCTP_V6_FLOW: + if ((nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; - i++; - if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - - HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) / - HCLGE_MBX_RING_NODE_VARIABLE_NUM) || - !node->next) { - req->msg[2] = i; + req->ipv6_sctp_en = tuple_sets; + break; + case IPV4_FLOW: + req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; + break; + case IPV6_FLOW: + req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; + break; + default: + return -EINVAL; + } - status = hclgevf_cmd_send(&hdev->hw, &desc, 1); - if (status) { - dev_err(&hdev->pdev->dev, - "Map TQP fail, status is %d.\n", - status); - return status; - } - i = 0; - hclgevf_cmd_setup_basic_desc(&desc, - HCLGEVF_OPC_MBX_VF_TO_PF, - false); - req->msg[0] = type; - req->msg[1] = vector_id; - } + ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Set rss tuple fail, status = %d\n", ret); + return ret; } + rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; + rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; + rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; + rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; + rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; + rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; + rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; + rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; return 0; } -static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, - struct hnae3_ring_chain_node *ring_chain) +static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, + struct ethtool_rxnfc *nfc) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - int vector_id; + struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + u8 tuple_sets; - vector_id = hclgevf_get_vector_index(hdev, vector); - if (vector_id < 0) { - dev_err(&handle->pdev->dev, - "Get vector index fail. ret =%d\n", vector_id); - return vector_id; + if (handle->pdev->revision == HNAE3_REVISION_ID_20) + return -EOPNOTSUPP; + + nfc->data = 0; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; + break; + case UDP_V4_FLOW: + tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; + break; + case TCP_V6_FLOW: + tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; + break; + case UDP_V6_FLOW: + tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; + break; + case SCTP_V4_FLOW: + tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; + break; + case SCTP_V6_FLOW: + tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; + break; + case IPV4_FLOW: + case IPV6_FLOW: + tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; + break; + default: + return -EINVAL; } - return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); -} + if (!tuple_sets) + return 0; + + if (tuple_sets & HCLGEVF_D_PORT_BIT) + nfc->data |= RXH_L4_B_2_3; + if (tuple_sets & HCLGEVF_S_PORT_BIT) + nfc->data |= RXH_L4_B_0_1; + if (tuple_sets & HCLGEVF_D_IP_BIT) + nfc->data |= RXH_IP_DST; + if (tuple_sets & HCLGEVF_S_IP_BIT) + nfc->data |= RXH_IP_SRC; + + return 0; +} + +static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, + struct hclgevf_rss_cfg *rss_cfg) +{ + struct hclgevf_rss_input_tuple_cmd *req; + struct hclgevf_desc desc; + int ret; + + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); + + req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; + + req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; + req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; + req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; + req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; + req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; + req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; + req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; + req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; + + ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "Configure rss input fail, status = %d\n", ret); + return ret; +} + +static int hclgevf_get_tc_size(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + + return rss_cfg->rss_size; +} + +static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, + int vector_id, + struct hnae3_ring_chain_node *ring_chain) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclge_vf_to_pf_msg send_msg; + struct hnae3_ring_chain_node *node; + int status; + int i = 0; + + memset(&send_msg, 0, sizeof(send_msg)); + send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR : + HCLGE_MBX_UNMAP_RING_TO_VECTOR; + send_msg.vector_id = vector_id; + + for (node = ring_chain; node; node = node->next) { + send_msg.param[i].ring_type = + hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); + + send_msg.param[i].tqp_index = node->tqp_index; + send_msg.param[i].int_gl_index = + hnae3_get_field(node->int_gl_idx, + HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S); + + i++; + if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) { + send_msg.ring_num = i; + + status = hclgevf_send_mbx_msg(hdev, &send_msg, false, + NULL, 0); + if (status) { + dev_err(&hdev->pdev->dev, + "Map TQP fail, status is %d.\n", + status); + return status; + } + i = 0; + } + } + + return 0; +} + +static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, + struct hnae3_ring_chain_node *ring_chain) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int vector_id; + + vector_id = hclgevf_get_vector_index(hdev, vector); + if (vector_id < 0) { + dev_err(&handle->pdev->dev, + "Get vector index fail. ret =%d\n", vector_id); + return vector_id; + } + + return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); +} static int hclgevf_unmap_ring_from_vector( struct hnae3_handle *handle, @@ -638,6 +1119,9 @@ static int hclgevf_unmap_ring_from_vector( struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); int ret, vector_id; + if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) + return 0; + vector_id = hclgevf_get_vector_index(hdev, vector); if (vector_id < 0) { dev_err(&handle->pdev->dev, @@ -674,41 +1158,66 @@ static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) } static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, - bool en_uc_pmc, bool en_mc_pmc) + bool en_uc_pmc, bool en_mc_pmc, + bool en_bc_pmc) { - struct hclge_mbx_vf_to_pf_cmd *req; - struct hclgevf_desc desc; - int status; + struct hclge_vf_to_pf_msg send_msg; + int ret; - req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; + memset(&send_msg, 0, sizeof(send_msg)); + send_msg.code = HCLGE_MBX_SET_PROMISC_MODE; + send_msg.en_bc = en_bc_pmc ? 1 : 0; + send_msg.en_uc = en_uc_pmc ? 1 : 0; + send_msg.en_mc = en_mc_pmc ? 1 : 0; - hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); - req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; - req->msg[1] = en_uc_pmc ? 1 : 0; - req->msg[2] = en_mc_pmc ? 1 : 0; + ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); - status = hclgevf_cmd_send(&hdev->hw, &desc, 1); - if (status) + if (ret) dev_err(&hdev->pdev->dev, - "Set promisc mode fail, status is %d.\n", status); + "Set promisc mode fail, status is %d.\n", ret); - return status; + return ret; } -static void hclgevf_set_promisc_mode(struct hnae3_handle *handle, - bool en_uc_pmc, bool en_mc_pmc) +static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, + bool en_mc_pmc) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct pci_dev *pdev = hdev->pdev; + bool en_bc_pmc; - hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc); + en_bc_pmc = pdev->revision != 0x20; + + return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc, + en_bc_pmc); } -static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, - int stream_id, bool enable) +static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); +} + +static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev) +{ + struct hnae3_handle *handle = &hdev->nic; + bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE; + bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE; + int ret; + + if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) { + ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc); + if (!ret) + clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); + } +} + +static int hclgevf_tqp_enable_cmd_send(struct hclgevf_dev *hdev, u16 tqp_id, + u16 stream_id, bool enable) { struct hclgevf_cfg_com_tqp_queue_cmd *req; struct hclgevf_desc desc; - int status; req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; @@ -716,277 +1225,532 @@ static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, false); req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); req->stream_id = cpu_to_le16(stream_id); - req->enable |= enable << HCLGEVF_TQP_ENABLE_B; + if (enable) + req->enable |= 1U << HCLGEVF_TQP_ENABLE_B; - status = hclgevf_cmd_send(&hdev->hw, &desc, 1); - if (status) - dev_err(&hdev->pdev->dev, - "TQP enable fail, status =%d.\n", status); - - return status; + return hclgevf_cmd_send(&hdev->hw, &desc, 1); } -static int hclgevf_get_queue_id(struct hnae3_queue *queue) +static int hclgevf_tqp_enable(struct hnae3_handle *handle, bool enable) { - struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q); + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int ret; + u16 i; + + for (i = 0; i < handle->kinfo.num_tqps; i++) { + ret = hclgevf_tqp_enable_cmd_send(hdev, i, 0, enable); + if (ret) + return ret; + } - return tqp->index; + return 0; } static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) { - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hnae3_queue *queue; + struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hclgevf_tqp *tqp; int i; - for (i = 0; i < hdev->num_tqps; i++) { - queue = handle->kinfo.tqp[i]; - tqp = container_of(queue, struct hclgevf_tqp, q); + for (i = 0; i < kinfo->num_tqps; i++) { + tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); } } -static int hclgevf_cfg_func_mta_type(struct hclgevf_dev *hdev) +static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p) { - u8 resp_msg = HCLGEVF_MTA_TYPE_SEL_MAX; - int ret; - - ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, - HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ, - NULL, 0, true, &resp_msg, sizeof(u8)); - - if (ret) { - dev_err(&hdev->pdev->dev, - "Read mta type fail, ret=%d.\n", ret); - return ret; - } + struct hclge_vf_to_pf_msg send_msg; + u8 host_mac[ETH_ALEN]; + int status; - if (resp_msg > HCLGEVF_MTA_TYPE_SEL_MAX) { + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0); + status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac, + ETH_ALEN); + if (status) { dev_err(&hdev->pdev->dev, - "Read mta type invalid, resp=%d.\n", resp_msg); - return -EINVAL; + "fail to get VF MAC from host %d", status); + return status; } - hdev->mta_mac_sel_type = resp_msg; + ether_addr_copy(p, host_mac); return 0; } -static u16 hclgevf_get_mac_addr_to_mta_index(struct hclgevf_dev *hdev, - const u8 *addr) +static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) { - u32 rsh = HCLGEVF_MTA_TYPE_SEL_MAX - hdev->mta_mac_sel_type; - u16 high_val = addr[1] | (addr[0] << 8); + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + u8 host_mac_addr[ETH_ALEN]; - return (high_val >> rsh) & 0xfff; + if (hclgevf_get_host_mac_addr(hdev, host_mac_addr)) + return; + + hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr); + if (hdev->has_pf_mac) + ether_addr_copy(p, host_mac_addr); + else + ether_addr_copy(p, hdev->hw.mac.mac_addr); } -static int hclgevf_do_update_mta_status(struct hclgevf_dev *hdev, - unsigned long *status) +static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, + bool is_first) { -#define HCLGEVF_MTA_STATUS_MSG_SIZE 13 -#define HCLGEVF_MTA_STATUS_MSG_BITS \ - (HCLGEVF_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE) -#define HCLGEVF_MTA_STATUS_MSG_END_BITS \ - (HCLGEVF_MTA_TBL_SIZE % HCLGEVF_MTA_STATUS_MSG_BITS) - u16 tbl_cnt; - u16 tbl_idx; - u8 msg_cnt; - u8 msg_idx; - int ret; + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; + struct hclge_vf_to_pf_msg send_msg; + u8 *new_mac_addr = (u8 *)p; + int status; + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0); + send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY; + ether_addr_copy(send_msg.data, new_mac_addr); + if (is_first && !hdev->has_pf_mac) + eth_zero_addr(&send_msg.data[ETH_ALEN]); + else + ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr); + status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); + if (!status) + ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); + + return status; +} - msg_cnt = DIV_ROUND_UP(HCLGEVF_MTA_TBL_SIZE, - HCLGEVF_MTA_STATUS_MSG_BITS); - tbl_idx = 0; - msg_idx = 0; - while (msg_cnt--) { - u8 msg[HCLGEVF_MTA_STATUS_MSG_SIZE + 1]; - u8 *p = &msg[1]; - u8 msg_ofs; - u8 msg_bit; - - memset(msg, 0, sizeof(msg)); - - /* set index field */ - msg[0] = 0x7F & msg_idx; - - /* set end flag field */ - if (msg_cnt == 0) { - msg[0] |= 0x80; - tbl_cnt = HCLGEVF_MTA_STATUS_MSG_END_BITS; +static struct hclgevf_mac_addr_node * +hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr) +{ + struct hclgevf_mac_addr_node *mac_node, *tmp; + + list_for_each_entry_safe(mac_node, tmp, list, node) { + if (ether_addr_equal(mac_addr, mac_node->mac_addr)) + return mac_node; + } + return NULL; +} + +static void hclgevf_mac_node_convert(struct hclgevf_mac_addr_node *mac_node, + enum HCLGEVF_MAC_ADDR_STATE state) +{ + switch (state) { + /* from set_rx_mode or tmp_add_list */ + case HCLGEVF_MAC_TO_ADD: + if (mac_node->state == HCLGEVF_MAC_TO_DEL) + mac_node->state = HCLGEVF_MAC_ACTIVE; + break; + /* only from set_rx_mode */ + case HCLGEVF_MAC_TO_DEL: + if (mac_node->state == HCLGEVF_MAC_TO_ADD) { + list_del(&mac_node->node); + kfree(mac_node); } else { - tbl_cnt = HCLGEVF_MTA_STATUS_MSG_BITS; + mac_node->state = HCLGEVF_MAC_TO_DEL; } + break; + /* only from tmp_add_list, the mac_node->state won't be + * HCLGEVF_MAC_ACTIVE + */ + case HCLGEVF_MAC_ACTIVE: + if (mac_node->state == HCLGEVF_MAC_TO_ADD) + mac_node->state = HCLGEVF_MAC_ACTIVE; + break; + } +} - /* set status field */ - msg_ofs = 0; - msg_bit = 0; - while (tbl_cnt--) { - if (test_bit(tbl_idx, status)) - p[msg_ofs] |= BIT(msg_bit); +static int hclgevf_update_mac_list(struct hnae3_handle *handle, + enum HCLGEVF_MAC_ADDR_STATE state, + enum HCLGEVF_MAC_ADDR_TYPE mac_type, + const unsigned char *addr) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclgevf_mac_addr_node *mac_node; + struct list_head *list; - tbl_idx++; + list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? + &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; - msg_bit++; - if (msg_bit == BITS_PER_BYTE) { - msg_bit = 0; - msg_ofs++; - } - } + spin_lock_bh(&hdev->mac_table.mac_list_lock); - ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, - HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE, - msg, sizeof(msg), false, NULL, 0); - if (ret) - break; + /* if the mac addr is already in the mac list, no need to add a new + * one into it, just check the mac addr state, convert it to a new + * new state, or just remove it, or do nothing. + */ + mac_node = hclgevf_find_mac_node(list, addr); + if (mac_node) { + hclgevf_mac_node_convert(mac_node, state); + spin_unlock_bh(&hdev->mac_table.mac_list_lock); + return 0; + } + /* if this address is never added, unnecessary to delete */ + if (state == HCLGEVF_MAC_TO_DEL) { + spin_unlock_bh(&hdev->mac_table.mac_list_lock); + return -ENOENT; + } - msg_idx++; + mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); + if (!mac_node) { + spin_unlock_bh(&hdev->mac_table.mac_list_lock); + return -ENOMEM; } - return ret; + mac_node->state = state; + ether_addr_copy(mac_node->mac_addr, addr); + list_add_tail(&mac_node->node, list); + + spin_unlock_bh(&hdev->mac_table.mac_list_lock); + return 0; } -static int hclgevf_update_mta_status(struct hnae3_handle *handle) +static int hclgevf_add_uc_addr(struct hnae3_handle *handle, + const unsigned char *addr) { - unsigned long mta_status[BITS_TO_LONGS(HCLGEVF_MTA_TBL_SIZE)]; - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct net_device *netdev = hdev->nic.kinfo.netdev; - struct netdev_hw_addr *ha; - u16 tbl_idx; + return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, + HCLGEVF_MAC_ADDR_UC, addr); +} - /* clear status */ - memset(mta_status, 0, sizeof(mta_status)); +static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, + HCLGEVF_MAC_ADDR_UC, addr); +} - /* update status from mc addr list */ - netdev_for_each_mc_addr(ha, netdev) { - tbl_idx = hclgevf_get_mac_addr_to_mta_index(hdev, ha->addr); - set_bit(tbl_idx, mta_status); - } +static int hclgevf_add_mc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, + HCLGEVF_MAC_ADDR_MC, addr); +} - return hclgevf_do_update_mta_status(hdev, mta_status); +static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, + HCLGEVF_MAC_ADDR_MC, addr); } -static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) +static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev, + struct hclgevf_mac_addr_node *mac_node, + enum HCLGEVF_MAC_ADDR_TYPE mac_type) { - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclge_vf_to_pf_msg send_msg; + u8 code, subcode; - ether_addr_copy(p, hdev->hw.mac.mac_addr); + if (mac_type == HCLGEVF_MAC_ADDR_UC) { + code = HCLGE_MBX_SET_UNICAST; + if (mac_node->state == HCLGEVF_MAC_TO_ADD) + subcode = HCLGE_MBX_MAC_VLAN_UC_ADD; + else + subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE; + } else { + code = HCLGE_MBX_SET_MULTICAST; + if (mac_node->state == HCLGEVF_MAC_TO_ADD) + subcode = HCLGE_MBX_MAC_VLAN_MC_ADD; + else + subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE; + } + + hclgevf_build_send_msg(&send_msg, code, subcode); + ether_addr_copy(send_msg.data, mac_node->mac_addr); + return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); } -static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, - bool is_first) +static void hclgevf_config_mac_list(struct hclgevf_dev *hdev, + struct list_head *list, + enum HCLGEVF_MAC_ADDR_TYPE mac_type) { - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; - u8 *new_mac_addr = (u8 *)p; - u8 msg_data[ETH_ALEN * 2]; - u16 subcode; - int status; + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; + struct hclgevf_mac_addr_node *mac_node, *tmp; + int ret; - ether_addr_copy(msg_data, new_mac_addr); - ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr); + list_for_each_entry_safe(mac_node, tmp, list, node) { + ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type); + if (ret) { + hnae3_format_mac_addr(format_mac_addr, + mac_node->mac_addr); + dev_err(&hdev->pdev->dev, + "request configure mac %s failed, state=%d, ret=%d\n", + format_mac_addr, mac_node->state, ret); + return; + } + if (mac_node->state == HCLGEVF_MAC_TO_ADD) { + mac_node->state = HCLGEVF_MAC_ACTIVE; + } else { + list_del(&mac_node->node); + kfree(mac_node); + } + } +} - subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : - HCLGE_MBX_MAC_VLAN_UC_MODIFY; +static void hclgevf_sync_from_add_list(struct list_head *add_list, + struct list_head *mac_list) +{ + struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; - status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, - subcode, msg_data, ETH_ALEN * 2, - true, NULL, 0); - if (!status) - ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); + list_for_each_entry_safe(mac_node, tmp, add_list, node) { + /* if the mac address from tmp_add_list is not in the + * uc/mc_mac_list, it means have received a TO_DEL request + * during the time window of sending mac config request to PF + * If mac_node state is ACTIVE, then change its state to TO_DEL, + * then it will be removed at next time. If is TO_ADD, it means + * send TO_ADD request failed, so just remove the mac node. + */ + new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); + if (new_node) { + hclgevf_mac_node_convert(new_node, mac_node->state); + list_del(&mac_node->node); + kfree(mac_node); + } else if (mac_node->state == HCLGEVF_MAC_ACTIVE) { + mac_node->state = HCLGEVF_MAC_TO_DEL; + list_del(&mac_node->node); + list_add_tail(&mac_node->node, mac_list); + } else { + list_del(&mac_node->node); + kfree(mac_node); + } + } +} - return status; +static void hclgevf_sync_from_del_list(struct list_head *del_list, + struct list_head *mac_list) +{ + struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; + + list_for_each_entry_safe(mac_node, tmp, del_list, node) { + new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); + if (new_node) { + /* If the mac addr is exist in the mac list, it means + * received a new request TO_ADD during the time window + * of sending mac addr configurrequest to PF, so just + * change the mac state to ACTIVE. + */ + new_node->state = HCLGEVF_MAC_ACTIVE; + list_del(&mac_node->node); + kfree(mac_node); + } else { + list_del(&mac_node->node); + list_add_tail(&mac_node->node, mac_list); + } + } } -static int hclgevf_add_uc_addr(struct hnae3_handle *handle, - const unsigned char *addr) +static void hclgevf_clear_list(struct list_head *list) { - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclgevf_mac_addr_node *mac_node, *tmp; - return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, - HCLGE_MBX_MAC_VLAN_UC_ADD, - addr, ETH_ALEN, false, NULL, 0); + list_for_each_entry_safe(mac_node, tmp, list, node) { + list_del(&mac_node->node); + kfree(mac_node); + } } -static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, - const unsigned char *addr) +static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev, + enum HCLGEVF_MAC_ADDR_TYPE mac_type) { - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; + struct list_head tmp_add_list, tmp_del_list; + struct list_head *list; + + INIT_LIST_HEAD(&tmp_add_list); + INIT_LIST_HEAD(&tmp_del_list); + + /* move the mac addr to the tmp_add_list and tmp_del_list, then + * we can add/delete these mac addr outside the spin lock + */ + list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? + &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; + + spin_lock_bh(&hdev->mac_table.mac_list_lock); + + list_for_each_entry_safe(mac_node, tmp, list, node) { + switch (mac_node->state) { + case HCLGEVF_MAC_TO_DEL: + list_del(&mac_node->node); + list_add_tail(&mac_node->node, &tmp_del_list); + break; + case HCLGEVF_MAC_TO_ADD: + new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); + if (!new_node) + goto stop_traverse; + ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); + new_node->state = mac_node->state; + list_add_tail(&new_node->node, &tmp_add_list); + break; + default: + break; + } + } +stop_traverse: + spin_unlock_bh(&hdev->mac_table.mac_list_lock); + + /* delete first, in order to get max mac table space for adding */ + hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type); + hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type); + + /* if some mac addresses were added/deleted fail, move back to the + * mac_list, and retry at next time. + */ + spin_lock_bh(&hdev->mac_table.mac_list_lock); - return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, - HCLGE_MBX_MAC_VLAN_UC_REMOVE, - addr, ETH_ALEN, false, NULL, 0); + hclgevf_sync_from_del_list(&tmp_del_list, list); + hclgevf_sync_from_add_list(&tmp_add_list, list); + + spin_unlock_bh(&hdev->mac_table.mac_list_lock); } -static int hclgevf_add_mc_addr(struct hnae3_handle *handle, - const unsigned char *addr) +static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev) { - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC); + hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC); +} + +static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev) +{ + spin_lock_bh(&hdev->mac_table.mac_list_lock); + + hclgevf_clear_list(&hdev->mac_table.uc_mac_list); + hclgevf_clear_list(&hdev->mac_table.mc_mac_list); - return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, - HCLGE_MBX_MAC_VLAN_MC_ADD, - addr, ETH_ALEN, false, NULL, 0); + spin_unlock_bh(&hdev->mac_table.mac_list_lock); } -static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, - const unsigned char *addr) +static int hclgevf_enable_vlan_filter(struct hnae3_handle *handle, bool enable) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hnae3_ae_dev *ae_dev = hdev->ae_dev; + struct hclge_vf_to_pf_msg send_msg; + + if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B)) + return -EOPNOTSUPP; - return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, - HCLGE_MBX_MAC_VLAN_MC_REMOVE, - addr, ETH_ALEN, false, NULL, 0); + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, + HCLGE_MBX_ENABLE_VLAN_FILTER); + send_msg.data[0] = enable ? 1 : 0; + + return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); } static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, u16 vlan_id, bool is_kill) { -#define HCLGEVF_VLAN_MBX_MSG_LEN 5 +#define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET 0 +#define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET 1 +#define HCLGEVF_VLAN_MBX_PROTO_OFFSET 3 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; + struct hclge_vf_to_pf_msg send_msg; + int ret; - if (vlan_id > 4095) + if (vlan_id > HCLGEVF_MAX_VLAN_ID) return -EINVAL; if (proto != htons(ETH_P_8021Q)) return -EPROTONOSUPPORT; - msg_data[0] = is_kill; - memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); - memcpy(&msg_data[3], &proto, sizeof(proto)); - return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, - HCLGE_MBX_VLAN_FILTER, msg_data, - HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); + /* When device is resetting or reset failed, firmware is unable to + * handle mailbox. Just record the vlan id, and remove it after + * reset finished. + */ + if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || + test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) { + set_bit(vlan_id, hdev->vlan_del_fail_bmap); + return -EBUSY; + } + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, + HCLGE_MBX_VLAN_FILTER); + send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill; + memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id, + sizeof(vlan_id)); + memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto, + sizeof(proto)); + ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); + /* when remove hw vlan filter failed, record the vlan id, + * and try to remove it from hw later, to be consistence + * with stack. + */ + if (is_kill && ret) + set_bit(vlan_id, hdev->vlan_del_fail_bmap); + + return ret; +} + +static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) +{ +#define HCLGEVF_MAX_SYNC_COUNT 60 + struct hnae3_handle *handle = &hdev->nic; + int ret, sync_cnt = 0; + u16 vlan_id; + + vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); + while (vlan_id != VLAN_N_VID) { + ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q), + vlan_id, true); + if (ret) + return; + + clear_bit(vlan_id, hdev->vlan_del_fail_bmap); + sync_cnt++; + if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT) + return; + + vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); + } } static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - u8 msg_data; + struct hclge_vf_to_pf_msg send_msg; - msg_data = enable ? 1 : 0; - return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, - HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data, - 1, false, NULL, 0); + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, + HCLGE_MBX_VLAN_RX_OFF_CFG); + send_msg.data[0] = enable ? 1 : 0; + return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); } -static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) +static int hclgevf_reset_tqp(struct hnae3_handle *handle) { +#define HCLGEVF_RESET_ALL_QUEUE_DONE 1U struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - u8 msg_data[2]; + struct hclge_vf_to_pf_msg send_msg; + u8 return_status = 0; int ret; - - memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); + u16 i; /* disable vf queue before send queue reset msg to PF */ - ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); - if (ret) - return; + ret = hclgevf_tqp_enable(handle, false); + if (ret) { + dev_err(&hdev->pdev->dev, "failed to disable tqp, ret = %d\n", + ret); + return ret; + } + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); + + ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &return_status, + sizeof(return_status)); + if (ret || return_status == HCLGEVF_RESET_ALL_QUEUE_DONE) + return ret; - hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, - 2, true, NULL, 0); + for (i = 1; i < handle->kinfo.num_tqps; i++) { + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); + memcpy(send_msg.data, &i, sizeof(i)); + ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); + if (ret) + return ret; + } + + return 0; +} + +static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclge_vf_to_pf_msg send_msg; + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0); + memcpy(send_msg.data, &new_mtu, sizeof(new_mtu)); + return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); } static int hclgevf_notify_client(struct hclgevf_dev *hdev, @@ -994,53 +1758,87 @@ static int hclgevf_notify_client(struct hclgevf_dev *hdev, { struct hnae3_client *client = hdev->nic_client; struct hnae3_handle *handle = &hdev->nic; + int ret; + + if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) || + !client) + return 0; if (!client->ops->reset_notify) return -EOPNOTSUPP; - return client->ops->reset_notify(handle, type); + ret = client->ops->reset_notify(handle, type); + if (ret) + dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", + type, ret); + + return ret; } static int hclgevf_reset_wait(struct hclgevf_dev *hdev) { -#define HCLGEVF_RESET_WAIT_MS 500 -#define HCLGEVF_RESET_WAIT_CNT 20 - u32 val, cnt = 0; +#define HCLGEVF_RESET_WAIT_US 20000 +#define HCLGEVF_RESET_WAIT_CNT 2000 +#define HCLGEVF_RESET_WAIT_TIMEOUT_US \ + (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) - /* wait to check the hardware reset completion status */ - val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); - while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) && - (cnt < HCLGEVF_RESET_WAIT_CNT)) { - msleep(HCLGEVF_RESET_WAIT_MS); - val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); - cnt++; - } + u32 val; + int ret; + + if (hdev->reset_type == HNAE3_VF_RESET) + ret = readl_poll_timeout(hdev->hw.io_base + + HCLGEVF_VF_RST_ING, val, + !(val & HCLGEVF_VF_RST_ING_BIT), + HCLGEVF_RESET_WAIT_US, + HCLGEVF_RESET_WAIT_TIMEOUT_US); + else + ret = readl_poll_timeout(hdev->hw.io_base + + HCLGEVF_RST_ING, val, + !(val & HCLGEVF_RST_ING_BITS), + HCLGEVF_RESET_WAIT_US, + HCLGEVF_RESET_WAIT_TIMEOUT_US); /* hardware completion status should be available by this time */ - if (cnt >= HCLGEVF_RESET_WAIT_CNT) { - dev_warn(&hdev->pdev->dev, - "could'nt get reset done status from h/w, timeout!\n"); - return -EBUSY; + if (ret) { + dev_err(&hdev->pdev->dev, + "could'nt get reset done status from h/w, timeout!\n"); + return ret; } /* we will wait a bit more to let reset of the stack to complete. This * might happen in case reset assertion was made by PF. Yes, this also * means we might end up waiting bit more even for VF reset. */ - msleep(5000); + msleep(500); return 0; } +static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable) +{ + u32 reg_val; + + reg_val = hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG); + if (enable) + reg_val |= HCLGEVF_NIC_SW_RST_RDY; + else + reg_val &= ~HCLGEVF_NIC_SW_RST_RDY; + + hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG, + reg_val); +} + static int hclgevf_reset_stack(struct hclgevf_dev *hdev) { int ret; /* uninitialize the nic client */ - hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); + ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); + if (ret) + return ret; /* re-initialize the hclge device */ - ret = hclgevf_init_hdev(hdev); + ret = hclgevf_reset_hdev(hdev); if (ret) { dev_err(&hdev->pdev->dev, "hclge device re-init failed, VF is disabled!\n"); @@ -1048,77 +1846,272 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev) } /* bring up the nic client again */ - hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); + ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); + if (ret) + return ret; + + /* clear handshake status with IMP */ + hclgevf_reset_handshake(hdev, false); + + /* bring up the nic to enable TX/RX again */ + return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); +} + +static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) +{ +#define HCLGEVF_RESET_SYNC_TIME 100 + + if (hdev->reset_type == HNAE3_VF_FUNC_RESET) { + struct hclge_vf_to_pf_msg send_msg; + int ret; + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0); + ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to assert VF reset, ret = %d\n", ret); + return ret; + } + hdev->rst_stats.vf_func_rst_cnt++; + } + + set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + /* inform hardware that preparatory work is done */ + msleep(HCLGEVF_RESET_SYNC_TIME); + hclgevf_reset_handshake(hdev, true); + dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done\n", + hdev->reset_type); return 0; } -static int hclgevf_reset(struct hclgevf_dev *hdev) +static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev) +{ + dev_info(&hdev->pdev->dev, "VF function reset count: %u\n", + hdev->rst_stats.vf_func_rst_cnt); + dev_info(&hdev->pdev->dev, "FLR reset count: %u\n", + hdev->rst_stats.flr_rst_cnt); + dev_info(&hdev->pdev->dev, "VF reset count: %u\n", + hdev->rst_stats.vf_rst_cnt); + dev_info(&hdev->pdev->dev, "reset done count: %u\n", + hdev->rst_stats.rst_done_cnt); + dev_info(&hdev->pdev->dev, "HW reset done count: %u\n", + hdev->rst_stats.hw_rst_done_cnt); + dev_info(&hdev->pdev->dev, "reset count: %u\n", + hdev->rst_stats.rst_cnt); + dev_info(&hdev->pdev->dev, "reset fail count: %u\n", + hdev->rst_stats.rst_fail_cnt); + dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", + hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE)); + dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n", + hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STAT_REG)); + dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", + hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG)); + dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", + hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING)); + dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); +} + +static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) +{ + /* recover handshake status with IMP when reset fail */ + hclgevf_reset_handshake(hdev, true); + hdev->rst_stats.rst_fail_cnt++; + dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n", + hdev->rst_stats.rst_fail_cnt); + + if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT) + set_bit(hdev->reset_type, &hdev->reset_pending); + + if (hclgevf_is_reset_pending(hdev)) { + set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); + hclgevf_reset_task_schedule(hdev); + } else { + set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); + hclgevf_dump_rst_info(hdev); + } +} + +static int hclgevf_reset_prepare(struct hclgevf_dev *hdev) { int ret; - rtnl_lock(); + hdev->rst_stats.rst_cnt++; + rtnl_lock(); /* bring down the nic to stop any ongoing TX/RX */ - hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); + ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); + rtnl_unlock(); + if (ret) + return ret; + + return hclgevf_reset_prepare_wait(hdev); +} + +static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev) +{ + int ret; + + hdev->rst_stats.hw_rst_done_cnt++; + + rtnl_lock(); + /* now, re-initialize the nic client and ae device */ + ret = hclgevf_reset_stack(hdev); + rtnl_unlock(); + if (ret) { + dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); + return ret; + } + + hdev->last_reset_time = jiffies; + hdev->rst_stats.rst_done_cnt++; + hdev->rst_stats.rst_fail_cnt = 0; + clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); + + return 0; +} + +static void hclgevf_reset(struct hclgevf_dev *hdev) +{ + if (hclgevf_reset_prepare(hdev)) + goto err_reset; /* check if VF could successfully fetch the hardware reset completion * status from the hardware */ - ret = hclgevf_reset_wait(hdev); - if (ret) { + if (hclgevf_reset_wait(hdev)) { /* can't do much in this situation, will disable VF */ dev_err(&hdev->pdev->dev, - "VF failed(=%d) to fetch H/W reset completion status\n", - ret); - - dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n"); - hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); + "failed to fetch H/W reset completion status\n"); + goto err_reset; + } - rtnl_unlock(); - return ret; + if (hclgevf_reset_rebuild(hdev)) + goto err_reset; + + return; + +err_reset: + hclgevf_reset_err_handle(hdev); +} + +static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, + unsigned long *addr) +{ + enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; + + /* return the highest priority reset level amongst all */ + if (test_bit(HNAE3_VF_RESET, addr)) { + rst_level = HNAE3_VF_RESET; + clear_bit(HNAE3_VF_RESET, addr); + clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); + clear_bit(HNAE3_VF_FUNC_RESET, addr); + } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { + rst_level = HNAE3_VF_FULL_RESET; + clear_bit(HNAE3_VF_FULL_RESET, addr); + clear_bit(HNAE3_VF_FUNC_RESET, addr); + } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { + rst_level = HNAE3_VF_PF_FUNC_RESET; + clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); + clear_bit(HNAE3_VF_FUNC_RESET, addr); + } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { + rst_level = HNAE3_VF_FUNC_RESET; + clear_bit(HNAE3_VF_FUNC_RESET, addr); + } else if (test_bit(HNAE3_FLR_RESET, addr)) { + rst_level = HNAE3_FLR_RESET; + clear_bit(HNAE3_FLR_RESET, addr); } - /* now, re-initialize the nic client and ae device*/ - ret = hclgevf_reset_stack(hdev); - if (ret) - dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); + return rst_level; +} + +static void hclgevf_reset_event(struct pci_dev *pdev, + struct hnae3_handle *handle) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + struct hclgevf_dev *hdev = ae_dev->priv; + + dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); + + if (hdev->default_reset_request) + hdev->reset_level = + hclgevf_get_reset_level(hdev, + &hdev->default_reset_request); + else + hdev->reset_level = HNAE3_VF_FUNC_RESET; + + /* reset of this VF requested */ + set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); + hclgevf_reset_task_schedule(hdev); + + hdev->last_reset_time = jiffies; +} - /* bring up the nic to enable TX/RX again */ - hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); +static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, + enum hnae3_reset_type rst_type) +{ + struct hclgevf_dev *hdev = ae_dev->priv; - rtnl_unlock(); + set_bit(rst_type, &hdev->default_reset_request); +} - return ret; +static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) +{ + writel(en ? 1 : 0, vector->addr); } -static int hclgevf_do_reset(struct hclgevf_dev *hdev) +static void hclgevf_reset_prepare_general(struct hnae3_ae_dev *ae_dev, + enum hnae3_reset_type rst_type) { - int status; - u8 respmsg; +#define HCLGEVF_RESET_RETRY_WAIT_MS 500 +#define HCLGEVF_RESET_RETRY_CNT 5 - status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, - 0, false, &respmsg, sizeof(u8)); - if (status) - dev_err(&hdev->pdev->dev, - "VF reset request to PF failed(=%d)\n", status); + struct hclgevf_dev *hdev = ae_dev->priv; + int retry_cnt = 0; + int ret; - return status; +retry: + down(&hdev->reset_sem); + set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); + hdev->reset_type = rst_type; + ret = hclgevf_reset_prepare(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n", + ret); + if (hdev->reset_pending || + retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) { + dev_err(&hdev->pdev->dev, + "reset_pending:0x%lx, retry_cnt:%d\n", + hdev->reset_pending, retry_cnt); + clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); + up(&hdev->reset_sem); + msleep(HCLGEVF_RESET_RETRY_WAIT_MS); + goto retry; + } + } + + /* disable misc vector before reset done */ + hclgevf_enable_vector(&hdev->misc_vector, false); + + if (hdev->reset_type == HNAE3_FLR_RESET) + hdev->rst_stats.flr_rst_cnt++; } -static void hclgevf_reset_event(struct hnae3_handle *handle) +static void hclgevf_reset_done(struct hnae3_ae_dev *ae_dev) { - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - - dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); + struct hclgevf_dev *hdev = ae_dev->priv; + int ret; - handle->reset_level = HNAE3_VF_RESET; + hclgevf_enable_vector(&hdev->misc_vector, true); - /* reset of this VF requested */ - set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); - hclgevf_reset_task_schedule(hdev); + ret = hclgevf_reset_rebuild(hdev); + if (ret) + dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", + ret); - handle->last_reset_time = jiffies; + hdev->reset_type = HNAE3_NONE_RESET; + clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); + up(&hdev->reset_sem); } static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) @@ -1145,75 +2138,57 @@ static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) { - if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) && - !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) { - set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); - schedule_work(&hdev->rst_service_task); - } + if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && + test_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state) && + !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, + &hdev->state)) + mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); } void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) { - if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) && - !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) { - set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); - schedule_work(&hdev->mbx_service_task); - } -} - -static void hclgevf_task_schedule(struct hclgevf_dev *hdev) -{ - if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) && - !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state)) - schedule_work(&hdev->service_task); + if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && + !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, + &hdev->state)) + mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); } -static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev) +static void hclgevf_task_schedule(struct hclgevf_dev *hdev, + unsigned long delay) { - /* if we have any pending mailbox event then schedule the mbx task */ - if (hdev->mbx_event_pending) - hclgevf_mbx_task_schedule(hdev); - - if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) - hclgevf_reset_task_schedule(hdev); -} - -static void hclgevf_service_timer(struct timer_list *t) -{ - struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); - - mod_timer(&hdev->service_timer, jiffies + 5 * HZ); - - hclgevf_task_schedule(hdev); + if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && + !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) + mod_delayed_work(hclgevf_wq, &hdev->service_task, delay); } -static void hclgevf_reset_service_task(struct work_struct *work) +static void hclgevf_reset_service_task(struct hclgevf_dev *hdev) { - struct hclgevf_dev *hdev = - container_of(work, struct hclgevf_dev, rst_service_task); - int ret; +#define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3 - if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) + if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) return; - clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); + down(&hdev->reset_sem); + set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); if (test_and_clear_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state)) { /* PF has initmated that it is about to reset the hardware. - * We now have to poll & check if harware has actually completed - * the reset sequence. On hardware reset completion, VF needs to - * reset the client and ae device. + * We now have to poll & check if hardware has actually + * completed the reset sequence. On hardware reset completion, + * VF needs to reset the client and ae device. */ hdev->reset_attempts = 0; - ret = hclgevf_reset(hdev); - if (ret) - dev_err(&hdev->pdev->dev, "VF stack reset failed.\n"); + hdev->last_reset_time = jiffies; + hdev->reset_type = + hclgevf_get_reset_level(hdev, &hdev->reset_pending); + if (hdev->reset_type != HNAE3_NONE_RESET) + hclgevf_reset(hdev); } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state)) { /* we could be here when either of below happens: - * 1. reset was initiated due to watchdog timeout due to + * 1. reset was initiated due to watchdog timeout caused by * a. IMP was earlier reset and our TX got choked down and * which resulted in watchdog reacting and inducing VF * reset. This also means our cmdq would be unreliable. @@ -1227,65 +2202,126 @@ static void hclgevf_reset_service_task(struct work_struct *work) * 1b and 2. cases but we will not get any intimation about 1a * from PF as cmdq would be in unreliable state i.e. mailbox * communication between PF and VF would be broken. - */ - - /* if we are never geting into pending state it means either: + * + * if we are never geting into pending state it means either: * 1. PF is not receiving our request which could be due to IMP * reset * 2. PF is screwed * We cannot do much for 2. but to check first we can try reset * our PCIe + stack and see if it alleviates the problem. */ - if (hdev->reset_attempts > 3) { + if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) { /* prepare for full reset of stack + pcie interface */ - hdev->nic.reset_level = HNAE3_VF_FULL_RESET; + set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); /* "defer" schedule the reset task again */ set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); } else { hdev->reset_attempts++; - /* request PF for resetting this VF via mailbox */ - ret = hclgevf_do_reset(hdev); - if (ret) - dev_warn(&hdev->pdev->dev, - "VF rst fail, stack will call\n"); + set_bit(hdev->reset_level, &hdev->reset_pending); + set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); } + hclgevf_reset_task_schedule(hdev); } + hdev->reset_type = HNAE3_NONE_RESET; clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); + up(&hdev->reset_sem); } -static void hclgevf_mailbox_service_task(struct work_struct *work) +static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev) { - struct hclgevf_dev *hdev; - - hdev = container_of(work, struct hclgevf_dev, mbx_service_task); + if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state)) + return; if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) return; - clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); - hclgevf_mbx_async_handler(hdev); clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); } -static void hclgevf_service_task(struct work_struct *work) +static void hclgevf_keep_alive(struct hclgevf_dev *hdev) { - struct hclgevf_dev *hdev; + struct hclge_vf_to_pf_msg send_msg; + int ret; + + if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) + return; + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0); + ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); + if (ret) + dev_err(&hdev->pdev->dev, + "VF sends keep alive cmd failed(=%d)\n", ret); +} + +static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) +{ + unsigned long delta = round_jiffies_relative(HZ); + struct hnae3_handle *handle = &hdev->nic; + + if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) + return; + + if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { + delta = jiffies - hdev->last_serv_processed; + + if (delta < round_jiffies_relative(HZ)) { + delta = round_jiffies_relative(HZ) - delta; + goto out; + } + } + + hdev->serv_processed_cnt++; + if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL)) + hclgevf_keep_alive(hdev); + + if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) { + hdev->last_serv_processed = jiffies; + goto out; + } - hdev = container_of(work, struct hclgevf_dev, service_task); + if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL)) + hclgevf_tqps_update_stats(handle); - /* request the link status from the PF. PF would be able to tell VF - * about such updates in future so we might remove this later + /* VF does not need to request link status when this bit is set, because + * PF will push its link status to VFs when link status changed. */ - hclgevf_request_link_info(hdev); + if (!test_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state)) + hclgevf_request_link_info(hdev); + + hclgevf_update_link_mode(hdev); + + hclgevf_sync_vlan_filter(hdev); + + hclgevf_sync_mac_table(hdev); + + hclgevf_sync_promisc_mode(hdev); + + hdev->last_serv_processed = jiffies; + +out: + hclgevf_task_schedule(hdev, delta); +} + +static void hclgevf_service_task(struct work_struct *work) +{ + struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev, + service_task.work); - hclgevf_deferred_task_schedule(hdev); + hclgevf_reset_service_task(hdev); + hclgevf_mailbox_service_task(hdev); + hclgevf_periodic_service_task(hdev); - clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); + /* Handle reset and mbx again in case periodical task delays the + * handling by calling hclgevf_task_schedule() in + * hclgevf_periodic_service_task() + */ + hclgevf_reset_service_task(hdev); + hclgevf_mailbox_service_task(hdev); } static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) @@ -1293,46 +2329,83 @@ static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); } -static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval) +static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, + u32 *clearval) { - u32 cmdq_src_reg; + u32 val, cmdq_stat_reg, rst_ing_reg; /* fetch the events from their corresponding regs */ - cmdq_src_reg = hclgevf_read_dev(&hdev->hw, - HCLGEVF_VECTOR0_CMDQ_SRC_REG); + cmdq_stat_reg = hclgevf_read_dev(&hdev->hw, + HCLGEVF_VECTOR0_CMDQ_STAT_REG); + + if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) { + rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); + dev_info(&hdev->pdev->dev, + "receive reset interrupt 0x%x!\n", rst_ing_reg); + set_bit(HNAE3_VF_RESET, &hdev->reset_pending); + set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); + set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B); + hdev->rst_stats.vf_rst_cnt++; + /* set up VF hardware reset status, its PF will clear + * this status when PF has initialized done. + */ + val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING); + hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING, + val | HCLGEVF_VF_RST_ING_BIT); + return HCLGEVF_VECTOR0_EVENT_RST; + } /* check for vector0 mailbox(=CMDQ RX) event source */ - if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { - cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); - *clearval = cmdq_src_reg; - return true; - } + if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) { + /* for revision 0x21, clearing interrupt is writing bit 0 + * to the clear register, writing bit 1 means to keep the + * old value. + * for revision 0x20, the clear register is a read & write + * register, so we should just write 0 to the bit we are + * handling, and keep other bits as cmdq_stat_reg. + */ + if (hdev->pdev->revision >= 0x21) + *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B); + else + *clearval = cmdq_stat_reg & + ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); - dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); + return HCLGEVF_VECTOR0_EVENT_MBX; + } - return false; -} + /* print other vector0 event source */ + dev_info(&hdev->pdev->dev, + "vector 0 interrupt from unknown source, cmdq_src = %#x\n", + cmdq_stat_reg); -static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) -{ - writel(en ? 1 : 0, vector->addr); + return HCLGEVF_VECTOR0_EVENT_OTHER; } static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) { + enum hclgevf_evt_cause event_cause; struct hclgevf_dev *hdev = data; u32 clearval; hclgevf_enable_vector(&hdev->misc_vector, false); - if (!hclgevf_check_event_cause(hdev, &clearval)) - goto skip_sched; - - hclgevf_mbx_handler(hdev); + event_cause = hclgevf_check_evt_cause(hdev, &clearval); + if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) + hclgevf_clear_event_cause(hdev, clearval); - hclgevf_clear_event_cause(hdev, clearval); + switch (event_cause) { + case HCLGEVF_VECTOR0_EVENT_RST: + hclgevf_reset_task_schedule(hdev); + break; + case HCLGEVF_VECTOR0_EVENT_MBX: + hclgevf_mbx_handler(hdev); + break; + default: + break; + } -skip_sched: - hclgevf_enable_vector(&hdev->misc_vector, true); + if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) + hclgevf_enable_vector(&hdev->misc_vector, true); return IRQ_HANDLED; } @@ -1341,18 +2414,32 @@ static int hclgevf_configure(struct hclgevf_dev *hdev) { int ret; + ret = hclgevf_get_basic_info(hdev); + if (ret) + return ret; + + /* get current port based vlan state from PF */ + ret = hclgevf_get_port_base_vlan_filter_state(hdev); + if (ret) + return ret; + /* get queue configuration from PF */ - ret = hclge_get_queue_info(hdev); + ret = hclgevf_get_queue_info(hdev); if (ret) return ret; - /* get tc configuration from PF */ - return hclgevf_get_tc_info(hdev); + + /* get queue depth info from PF */ + ret = hclgevf_get_queue_depth(hdev); + if (ret) + return ret; + + return hclgevf_get_pf_media_type(hdev); } static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) { struct pci_dev *pdev = ae_dev->pdev; - struct hclgevf_dev *hdev = ae_dev->priv; + struct hclgevf_dev *hdev; hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); if (!hdev) @@ -1388,57 +2475,127 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) return 0; } +static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) +{ + struct hclgevf_cfg_gro_status_cmd *req; + struct hclgevf_desc desc; + int ret; + + if (!hnae3_dev_gro_supported(hdev)) + return 0; + + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, + false); + req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; + + req->gro_en = cpu_to_le16(en ? 1 : 0); + + ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "VF GRO hardware config cmd failed, ret = %d.\n", ret); + + return ret; +} + +static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev) +{ + struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + struct hclgevf_rss_tuple_cfg *tuple_sets; + u32 i; + + rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; + rss_cfg->rss_size = hdev->nic.kinfo.rss_size; + tuple_sets = &rss_cfg->rss_tuple_sets; + if (hdev->pdev->revision >= 0x21) { + rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; + memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, + HCLGEVF_RSS_KEY_SIZE); + + tuple_sets->ipv4_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; + tuple_sets->ipv4_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; + tuple_sets->ipv4_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP4; + tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; + tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; + tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; + tuple_sets->ipv6_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP6; + tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; + } + + /* Initialize RSS indirect table */ + for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) + rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size; +} + static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) { struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; - int i, ret; + int ret; - rss_cfg->rss_size = hdev->rss_size_max; + if (hdev->pdev->revision >= HNAE3_REVISION_ID_21) { + ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, + rss_cfg->rss_hash_key); + if (ret) + return ret; - /* Initialize RSS indirect table for each vport */ - for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) - rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; + ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); + if (ret) + return ret; + } ret = hclgevf_set_rss_indir_table(hdev); if (ret) return ret; - return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max); + return hclgevf_set_rss_tc_mode(hdev, rss_cfg->rss_size); } static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) { - /* other vlan config(like, VLAN TX/RX offload) would also be added - * here later - */ return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, false); } -static int hclgevf_ae_start(struct hnae3_handle *handle) +static void hclgevf_flush_link_update(struct hclgevf_dev *hdev) +{ +#define HCLGEVF_FLUSH_LINK_TIMEOUT 100000 + + unsigned long last = hdev->serv_processed_cnt; + int i = 0; + + while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) && + i++ < HCLGEVF_FLUSH_LINK_TIMEOUT && + last == hdev->serv_processed_cnt) + usleep_range(1, 1); +} + +static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - int i, queue_id; - for (i = 0; i < handle->kinfo.num_tqps; i++) { - /* ring enable */ - queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]); - if (queue_id < 0) { - dev_warn(&hdev->pdev->dev, - "Get invalid queue id, ignore it\n"); - continue; - } + if (enable) { + hclgevf_task_schedule(hdev, 0); + } else { + set_bit(HCLGEVF_STATE_DOWN, &hdev->state); - hclgevf_tqp_enable(hdev, queue_id, 0, true); + /* flush memory to make sure DOWN is seen by service task */ + smp_mb__before_atomic(); + hclgevf_flush_link_update(hdev); } +} + +static int hclgevf_ae_start(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); + clear_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state); - /* reset tqp stats */ hclgevf_reset_tqp_stats(handle); hclgevf_request_link_info(hdev); - clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); - mod_timer(&hdev->service_timer, jiffies + HZ); + hclgevf_update_link_mode(hdev); return 0; } @@ -1446,48 +2603,65 @@ static int hclgevf_ae_start(struct hnae3_handle *handle) static void hclgevf_ae_stop(struct hnae3_handle *handle) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - int i, queue_id; - for (i = 0; i < hdev->num_tqps; i++) { - /* Ring disable */ - queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]); - if (queue_id < 0) { - dev_warn(&hdev->pdev->dev, - "Get invalid queue id, ignore it\n"); - continue; - } + set_bit(HCLGEVF_STATE_DOWN, &hdev->state); - hclgevf_tqp_enable(hdev, queue_id, 0, false); - } + if (hdev->reset_type != HNAE3_VF_RESET) + hclgevf_reset_tqp(handle); - /* reset tqp stats */ hclgevf_reset_tqp_stats(handle); - del_timer_sync(&hdev->service_timer); - cancel_work_sync(&hdev->service_task); - clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); hclgevf_update_link_status(hdev, 0); } -static void hclgevf_state_init(struct hclgevf_dev *hdev) +static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) { - /* if this is on going reset then skip this initialization */ - if (hclgevf_dev_ongoing_reset(hdev)) - return; +#define HCLGEVF_STATE_ALIVE 1 +#define HCLGEVF_STATE_NOT_ALIVE 0 + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclge_vf_to_pf_msg send_msg; - /* setup tasks for the MBX */ - INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); - clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); - clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0); + send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE : + HCLGEVF_STATE_NOT_ALIVE; + return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); +} + +static int hclgevf_client_start(struct hnae3_handle *handle) +{ + int ret; + + ret = hclgevf_set_alive(handle, true); + if (ret) + return ret; + + return 0; +} + +static void hclgevf_client_stop(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int ret; - /* setup tasks for service timer */ - timer_setup(&hdev->service_timer, hclgevf_service_timer, 0); + ret = hclgevf_set_alive(handle, false); + if (ret) + dev_warn(&hdev->pdev->dev, + "%s failed %d\n", __func__, ret); +} - INIT_WORK(&hdev->service_task, hclgevf_service_task); - clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); +static void hclgevf_state_init(struct hclgevf_dev *hdev) +{ + clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); + clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); + clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); - INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task); + INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task); mutex_init(&hdev->mbx_resp.mbx_mutex); + sema_init(&hdev->reset_sem, 1); + + spin_lock_init(&hdev->mac_table.mac_list_lock); + INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list); + INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list); /* bring the device down */ set_bit(HCLGEVF_STATE_DOWN, &hdev->state); @@ -1496,15 +2670,10 @@ static void hclgevf_state_init(struct hclgevf_dev *hdev) static void hclgevf_state_uninit(struct hclgevf_dev *hdev) { set_bit(HCLGEVF_STATE_DOWN, &hdev->state); + set_bit(HCLGEVF_STATE_REMOVING, &hdev->state); - if (hdev->service_timer.function) - del_timer_sync(&hdev->service_timer); - if (hdev->service_task.func) - cancel_work_sync(&hdev->service_task); - if (hdev->mbx_service_task.func) - cancel_work_sync(&hdev->mbx_service_task); - if (hdev->rst_service_task.func) - cancel_work_sync(&hdev->rst_service_task); + if (hdev->service_task.work.func) + cancel_delayed_work_sync(&hdev->service_task); mutex_destroy(&hdev->mbx_resp.mbx_mutex); } @@ -1515,17 +2684,14 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev) int vectors; int i; - /* if this is on going reset then skip this initialization */ - if (hclgevf_dev_ongoing_reset(hdev)) - return 0; - - if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) + if (hnae3_dev_roce_supported(hdev)) vectors = pci_alloc_irq_vectors(pdev, hdev->roce_base_msix_offset + 1, hdev->num_msi, PCI_IRQ_MSIX); else - vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, + vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM, + hdev->num_msi, PCI_IRQ_MSI | PCI_IRQ_MSIX); if (vectors < 0) { @@ -1536,11 +2702,12 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev) } if (vectors < hdev->num_msi) dev_warn(&hdev->pdev->dev, - "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", + "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", hdev->num_msi, vectors); hdev->num_msi = vectors; hdev->num_msi_left = vectors; + hdev->base_msi_vector = pdev->irq; hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; @@ -1557,6 +2724,7 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev) hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, sizeof(int), GFP_KERNEL); if (!hdev->vector_irq) { + devm_kfree(&pdev->dev, hdev->vector_status); pci_free_irq_vectors(pdev); return -ENOMEM; } @@ -1568,21 +2736,21 @@ static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) { struct pci_dev *pdev = hdev->pdev; + devm_kfree(&pdev->dev, hdev->vector_status); + devm_kfree(&pdev->dev, hdev->vector_irq); pci_free_irq_vectors(pdev); } static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) { - int ret = 0; - - /* if this is on going reset then skip this initialization */ - if (hclgevf_dev_ongoing_reset(hdev)) - return 0; + int ret; hclgevf_get_misc_vector(hdev); + snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", + HCLGEVF_NAME, pci_name(hdev->pdev)); ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, - 0, "hclgevf_cmd", hdev); + 0, hdev->misc_vector.name, hdev); if (ret) { dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", hdev->misc_vector.vector_irq); @@ -1591,19 +2759,87 @@ static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) hclgevf_clear_event_cause(hdev, 0); - /* enable misc. vector(vector 0) */ - hclgevf_enable_vector(&hdev->misc_vector, true); + /* enable misc. vector(vector 0) */ + hclgevf_enable_vector(&hdev->misc_vector, true); + + return ret; +} + +static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) +{ + /* disable misc vector(vector 0) */ + hclgevf_enable_vector(&hdev->misc_vector, false); + synchronize_irq(hdev->misc_vector.vector_irq); + free_irq(hdev->misc_vector.vector_irq, hdev); + hclgevf_free_vector(hdev, 0); +} + +static void hclgevf_info_show(struct hclgevf_dev *hdev) +{ + struct device *dev = &hdev->pdev->dev; + + dev_info(dev, "VF info begin:\n"); + + dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); + dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); + dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); + dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); + dev_info(dev, "HW tc map: %u\n", hdev->hw_tc_map); + dev_info(dev, "PF media type of this VF: %u\n", + hdev->hw.mac.media_type); + + dev_info(dev, "VF info end.\n"); +} + +static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, + struct hnae3_client *client) +{ + struct hclgevf_dev *hdev = ae_dev->priv; + int rst_cnt = hdev->rst_stats.rst_cnt; + int ret; + + ret = client->ops->init_instance(&hdev->nic); + if (ret) + return ret; + + set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); + if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || + rst_cnt != hdev->rst_stats.rst_cnt) { + clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); + + client->ops->uninit_instance(&hdev->nic, 0); + return -EBUSY; + } + + hnae3_set_client_init_flag(client, ae_dev, 1); + + if (netif_msg_drv(&hdev->nic)) + hclgevf_info_show(hdev); + + return 0; +} + +static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, + struct hnae3_client *client) +{ + struct hclgevf_dev *hdev = ae_dev->priv; + int ret; + + if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || + !hdev->nic_client) + return 0; + + ret = hclgevf_init_roce_base_info(hdev); + if (ret) + return ret; + + ret = client->ops->init_instance(&hdev->roce); + if (ret) + return ret; - return ret; -} + hnae3_set_client_init_flag(client, ae_dev, 1); -static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) -{ - /* disable misc vector(vector 0) */ - hclgevf_enable_vector(&hdev->misc_vector, false); - synchronize_irq(hdev->misc_vector.vector_irq); - free_irq(hdev->misc_vector.vector_irq, hdev); - hclgevf_free_vector(hdev, 0); + return 0; } static int hclgevf_init_client_instance(struct hnae3_client *client, @@ -1617,28 +2853,15 @@ static int hclgevf_init_client_instance(struct hnae3_client *client, hdev->nic_client = client; hdev->nic.client = client; - ret = client->ops->init_instance(&hdev->nic); + ret = hclgevf_init_nic_client_instance(ae_dev, client); if (ret) - return ret; - - if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { - struct hnae3_client *rc = hdev->roce_client; - - ret = hclgevf_init_roce_base_info(hdev); - if (ret) - return ret; - ret = rc->ops->init_instance(&hdev->roce); - if (ret) - return ret; - } - break; - case HNAE3_CLIENT_UNIC: - hdev->nic_client = client; - hdev->nic.client = client; + goto clear_nic; - ret = client->ops->init_instance(&hdev->nic); + ret = hclgevf_init_roce_client_instance(ae_dev, + hdev->roce_client); if (ret) - return ret; + goto clear_roce; + break; case HNAE3_CLIENT_ROCE: if (hnae3_dev_roce_supported(hdev)) { @@ -1646,18 +2869,25 @@ static int hclgevf_init_client_instance(struct hnae3_client *client, hdev->roce.client = client; } - if (hdev->roce_client && hdev->nic_client) { - ret = hclgevf_init_roce_base_info(hdev); - if (ret) - return ret; + ret = hclgevf_init_roce_client_instance(ae_dev, client); + if (ret) + goto clear_roce; - ret = client->ops->init_instance(&hdev->roce); - if (ret) - return ret; - } + break; + default: + return -EINVAL; } return 0; + +clear_nic: + hdev->nic_client = NULL; + hdev->nic.client = NULL; + return ret; +clear_roce: + hdev->roce_client = NULL; + hdev->roce.client = NULL; + return ret; } static void hclgevf_uninit_client_instance(struct hnae3_client *client, @@ -1666,13 +2896,25 @@ static void hclgevf_uninit_client_instance(struct hnae3_client *client, struct hclgevf_dev *hdev = ae_dev->priv; /* un-init roce, if it exists */ - if (hdev->roce_client) + if (hdev->roce_client) { + while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) + msleep(100); hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); + hdev->roce_client = NULL; + hdev->roce.client = NULL; + } /* un-init nic/unic, if this was not called by roce client */ - if ((client->ops->uninit_instance) && - (client->type != HNAE3_CLIENT_ROCE)) + if (client->ops->uninit_instance && hdev->nic_client && + client->type != HNAE3_CLIENT_ROCE) { + while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) + msleep(100); + clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); + client->ops->uninit_instance(&hdev->nic, 0); + hdev->nic_client = NULL; + hdev->nic.client = NULL; + } } static int hclgevf_pci_init(struct hclgevf_dev *hdev) @@ -1681,14 +2923,6 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev) struct hclgevf_hw *hw; int ret; - /* check if we need to skip initialization of pci. This will happen if - * device is undergoing VF reset. Otherwise, we would need to - * re-initialize pci interface again i.e. when device is not going - * through *any* reset or actually undergoing full reset. - */ - if (hclgevf_dev_ongoing_reset(hdev)) - return 0; - ret = pci_enable_device(pdev); if (ret) { dev_err(&pdev->dev, "failed to enable PCI device\n"); @@ -1754,15 +2988,18 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) req = (struct hclgevf_query_res_cmd *)desc.data; - if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) { + if (hnae3_dev_roce_supported(hdev)) { hdev->roce_base_msix_offset = - hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), + hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee), HCLGEVF_MSIX_OFT_ROCEE_M, HCLGEVF_MSIX_OFT_ROCEE_S); hdev->num_roce_msix = - hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), + hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); + /* nic's msix numbers is always equals to the roce's. */ + hdev->num_nic_msix = hdev->num_roce_msix; + /* VF should have NIC vectors and Roce vectors, NIC vectors * are queued before Roce vectors. The offset is fixed to 64. */ @@ -1770,56 +3007,146 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) hdev->roce_base_msix_offset; } else { hdev->num_msi = - hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), + hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); + + hdev->num_nic_msix = hdev->num_msi; + } + + if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) { + dev_err(&hdev->pdev->dev, + "Just %u msi resources, not enough for vf(min:2).\n", + hdev->num_nic_msix); + return -EINVAL; } return 0; } -static int hclgevf_init_hdev(struct hclgevf_dev *hdev) +static int hclgevf_pci_reset(struct hclgevf_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + int ret = 0; + + if (hdev->reset_type == HNAE3_VF_FULL_RESET && + test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { + hclgevf_misc_irq_uninit(hdev); + hclgevf_uninit_msi(hdev); + clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); + } + + if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { + pci_set_master(pdev); + ret = hclgevf_init_msi(hdev); + if (ret) { + dev_err(&pdev->dev, + "failed(%d) to init MSI/MSI-X\n", ret); + return ret; + } + + ret = hclgevf_misc_irq_init(hdev); + if (ret) { + hclgevf_uninit_msi(hdev); + dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", + ret); + return ret; + } + + set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); + } + + return ret; +} + +static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev) +{ + struct hclge_vf_to_pf_msg send_msg; + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL, + HCLGE_MBX_VPORT_LIST_CLEAR); + return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); +} + +static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) { struct pci_dev *pdev = hdev->pdev; int ret; - /* check if device is on-going full reset(i.e. pcie as well) */ - if (hclgevf_dev_ongoing_full_reset(hdev)) { - dev_warn(&pdev->dev, "device is going full reset\n"); - hclgevf_uninit_hdev(hdev); + ret = hclgevf_pci_reset(hdev); + if (ret) { + dev_err(&pdev->dev, "pci reset failed %d\n", ret); + return ret; } - ret = hclgevf_pci_init(hdev); + ret = hclgevf_cmd_init(hdev); + if (ret) { + dev_err(&pdev->dev, "cmd failed %d\n", ret); + return ret; + } + + /* Initialize RSS for this VF */ + ret = hclgevf_rss_init_hw(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed(%d) to initialize RSS\n", ret); + return ret; + } + + ret = hclgevf_config_gro(hdev, true); + if (ret) + return ret; + + ret = hclgevf_init_vlan_config(hdev); if (ret) { - dev_err(&pdev->dev, "PCI initialization failed\n"); + dev_err(&hdev->pdev->dev, + "failed(%d) to initialize VLAN config\n", ret); return ret; } + set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); + + dev_info(&hdev->pdev->dev, "Reset done\n"); + + return 0; +} + +static int hclgevf_init_hdev(struct hclgevf_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + int ret; + + ret = hclgevf_pci_init(hdev); + if (ret) + return ret; + + ret = hclgevf_cmd_queue_init(hdev); + if (ret) + goto err_cmd_queue_init; + ret = hclgevf_cmd_init(hdev); if (ret) goto err_cmd_init; /* Get vf resource */ ret = hclgevf_query_vf_resource(hdev); - if (ret) { - dev_err(&hdev->pdev->dev, - "Query vf status error, ret = %d.\n", ret); - goto err_query_vf; - } + if (ret) + goto err_cmd_init; ret = hclgevf_init_msi(hdev); if (ret) { dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); - goto err_query_vf; + goto err_cmd_init; } hclgevf_state_init(hdev); + hdev->reset_level = HNAE3_VF_FUNC_RESET; + hdev->reset_type = HNAE3_NONE_RESET; ret = hclgevf_misc_irq_init(hdev); - if (ret) { - dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", - ret); + if (ret) goto err_misc_irq_init; - } + + set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); ret = hclgevf_configure(hdev); if (ret) { @@ -1834,20 +3161,15 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) } ret = hclgevf_set_handle_info(hdev); - if (ret) { - dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret); + if (ret) goto err_config; - } - /* Initialize mta type for this VF */ - ret = hclgevf_cfg_func_mta_type(hdev); - if (ret) { - dev_err(&hdev->pdev->dev, - "failed(%d) to initialize MTA type\n", ret); + ret = hclgevf_config_gro(hdev, true); + if (ret) goto err_config; - } /* Initialize RSS for this VF */ + hclgevf_rss_init_cfg(hdev); ret = hclgevf_rss_init_hw(hdev); if (ret) { dev_err(&hdev->pdev->dev, @@ -1855,6 +3177,14 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) goto err_config; } + /* ensure vf tbl list as empty before init*/ + ret = hclgevf_clear_vport_list(hdev); + if (ret) { + dev_err(&pdev->dev, + "failed(%d) to clear tbl list configuration\n", ret); + goto err_config; + } + ret = hclgevf_init_vlan_config(hdev); if (ret) { dev_err(&hdev->pdev->dev, @@ -1862,7 +3192,13 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) goto err_config; } - pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); + set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state); + + hdev->last_reset_time = jiffies; + dev_info(&hdev->pdev->dev, "finished initializing %s driver\n", + HCLGEVF_DRIVER_NAME); + + hclgevf_task_schedule(hdev, round_jiffies_relative(HZ)); return 0; @@ -1871,20 +3207,31 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) err_misc_irq_init: hclgevf_state_uninit(hdev); hclgevf_uninit_msi(hdev); -err_query_vf: - hclgevf_cmd_uninit(hdev); err_cmd_init: + hclgevf_cmd_uninit(hdev); +err_cmd_queue_init: hclgevf_pci_uninit(hdev); + clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); return ret; } static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) { + struct hclge_vf_to_pf_msg send_msg; + hclgevf_state_uninit(hdev); - hclgevf_misc_irq_uninit(hdev); + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0); + hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); + + if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { + hclgevf_misc_irq_uninit(hdev); + hclgevf_uninit_msi(hdev); + } + hclgevf_cmd_uninit(hdev); - hclgevf_uninit_msi(hdev); hclgevf_pci_uninit(hdev); + hclgevf_uninit_mac_list(hdev); } static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) @@ -1899,10 +3246,12 @@ static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) } ret = hclgevf_init_hdev(ae_dev->priv); - if (ret) + if (ret) { dev_err(&pdev->dev, "hclge device initialization failed\n"); + return ret; + } - return ret; + return 0; } static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) @@ -1918,7 +3267,8 @@ static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) struct hnae3_handle *nic = &hdev->nic; struct hnae3_knic_private_info *kinfo = &nic->kinfo; - return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); + return min_t(u32, hdev->rss_size_max, + hdev->num_tqps / kinfo->tc_info.num_tc); } /** @@ -1939,18 +3289,95 @@ static void hclgevf_get_channels(struct hnae3_handle *handle, ch->max_combined = hclgevf_get_max_channels(hdev); ch->other_count = 0; ch->max_other = 0; - ch->combined_count = hdev->num_tqps; + ch->combined_count = handle->kinfo.rss_size; } static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, - u16 *free_tqps, u16 *max_rss_size) + u16 *alloc_tqps, u16 *max_rss_size) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - *free_tqps = 0; + *alloc_tqps = hdev->num_tqps; *max_rss_size = hdev->rss_size_max; } +static void hclgevf_update_rss_size(struct hnae3_handle *handle, + u32 new_tqps_num) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + u16 max_rss_size; + + kinfo->req_rss_size = new_tqps_num; + + max_rss_size = min_t(u16, hdev->rss_size_max, + hdev->num_tqps / kinfo->tc_info.num_tc); + + /* Set to user value, no larger than max_rss_size. */ + if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && + kinfo->req_rss_size <= max_rss_size) { + dev_info(&hdev->pdev->dev, "rss changes from %u to %u\n", + kinfo->rss_size, kinfo->req_rss_size); + kinfo->rss_size = kinfo->req_rss_size; + } else if (kinfo->rss_size > max_rss_size || + (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) { + /* Set to the maximum specification value (max_rss_size). */ + dev_info(&hdev->pdev->dev, "rss changes from %u to %u\n", + kinfo->rss_size, max_rss_size); + kinfo->rss_size = max_rss_size; + } + + kinfo->num_tqps = kinfo->tc_info.num_tc * kinfo->rss_size; +} + +static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, + bool rxfh_configured) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + u16 cur_rss_size = kinfo->rss_size; + u16 cur_tqps = kinfo->num_tqps; + u32 *rss_indir; + unsigned int i; + int ret; + + hclgevf_update_rss_size(handle, new_tqps_num); + + ret = hclgevf_set_rss_tc_mode(hdev, kinfo->rss_size); + if (ret) + return ret; + + /* RSS indirection table has been configuared by user */ + if (rxfh_configured) + goto out; + + /* Reinitializes the rss indirect table according to the new RSS size */ + rss_indir = kcalloc(HCLGEVF_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); + if (!rss_indir) + return -ENOMEM; + + for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) + rss_indir[i] = i % kinfo->rss_size; + + hdev->rss_cfg.rss_size = kinfo->rss_size; + + ret = hclgevf_set_rss(handle, rss_indir, NULL, 0); + if (ret) + dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", + ret); + + kfree(rss_indir); + +out: + if (!ret) + dev_info(&hdev->pdev->dev, + "Channels changed, rss_size from %u to %u, tqps from %u to %u", + cur_rss_size, kinfo->rss_size, + cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc); + + return ret; +} + static int hclgevf_get_status(struct hnae3_handle *handle) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); @@ -1979,26 +3406,191 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, hdev->hw.mac.duplex = duplex; } +static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + return hclgevf_config_gro(hdev, enable); +} + +static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type, + u8 *module_type) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + if (media_type) + *media_type = hdev->hw.mac.media_type; + + if (module_type) + *module_type = hdev->hw.mac.module_type; +} + +static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); +} + +static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); +} + +static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + return hdev->rst_stats.hw_rst_done_cnt; +} + +static void hclgevf_get_link_mode(struct hnae3_handle *handle, + unsigned long *supported, + unsigned long *advertising) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + *supported = hdev->hw.mac.supported; + *advertising = hdev->hw.mac.advertising; +} + +#define MAX_SEPARATE_NUM 4 +#define SEPARATOR_VALUE 0xFDFCFBFA +#define REG_NUM_PER_LINE 4 +#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) + +static int hclgevf_get_regs_len(struct hnae3_handle *handle) +{ + int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; + common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; + ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; + tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; + + return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + + tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; +} + +static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, + void *data) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int i, j, reg_um, separator_num; + u32 *reg = data; + + *version = hdev->fw_version; + + /* fetching per-VF registers values from VF PCIe register space */ + reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); + separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; + for (i = 0; i < reg_um; i++) + *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + + reg_um = sizeof(common_reg_addr_list) / sizeof(u32); + separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; + for (i = 0; i < reg_um; i++) + *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + + reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); + separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; + for (j = 0; j < hdev->num_tqps; j++) { + for (i = 0; i < reg_um; i++) + *reg++ = hclgevf_read_dev(&hdev->hw, + ring_reg_addr_list[i] + + 0x200 * j); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + } + + reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); + separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; + for (j = 0; j < hdev->num_msi_used - 1; j++) { + for (i = 0; i < reg_um; i++) + *reg++ = hclgevf_read_dev(&hdev->hw, + tqp_intr_reg_addr_list[i] + + 4 * j); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + } +} + +void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, + u8 *port_base_vlan_info, u8 data_size) +{ +#define MAX_LENGTH_OF_PORT_BASE_VLAN_INFO 8U + struct hnae3_handle *nic = &hdev->nic; + struct hclge_vf_to_pf_msg send_msg; + int ret; + + if (data_size > MAX_LENGTH_OF_PORT_BASE_VLAN_INFO) { + dev_info(&hdev->pdev->dev, + "the VF info len %u exceeds max len %u\n", + data_size, MAX_LENGTH_OF_PORT_BASE_VLAN_INFO); + /* If data_size is too long, set the value to max length */ + data_size = MAX_LENGTH_OF_PORT_BASE_VLAN_INFO; + } + + rtnl_lock(); + + if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || + test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) { + dev_err(&hdev->pdev->dev, "dev resetting\n"); + rtnl_unlock(); + return; + } + + ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) { + rtnl_unlock(); + return; + } + + /* send msg to PF and wait update port based vlan info */ + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, + HCLGE_MBX_PORT_BASE_VLAN_CFG); + memcpy(send_msg.data, port_base_vlan_info, data_size); + ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); + if (!ret) { + if (state == HNAE3_PORT_BASE_VLAN_DISABLE) + nic->port_base_vlan_state = state; + else + nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; + } + + hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); + rtnl_unlock(); +} + static const struct hnae3_ae_ops hclgevf_ops = { .init_ae_dev = hclgevf_init_ae_dev, .uninit_ae_dev = hclgevf_uninit_ae_dev, + .reset_prepare = hclgevf_reset_prepare_general, + .reset_done = hclgevf_reset_done, .init_client_instance = hclgevf_init_client_instance, .uninit_client_instance = hclgevf_uninit_client_instance, .start = hclgevf_ae_start, .stop = hclgevf_ae_stop, + .client_start = hclgevf_client_start, + .client_stop = hclgevf_client_stop, .map_ring_to_vector = hclgevf_map_ring_to_vector, .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, .get_vector = hclgevf_get_vector, .put_vector = hclgevf_put_vector, .reset_queue = hclgevf_reset_tqp, - .set_promisc_mode = hclgevf_set_promisc_mode, .get_mac_addr = hclgevf_get_mac_addr, .set_mac_addr = hclgevf_set_mac_addr, .add_uc_addr = hclgevf_add_uc_addr, .rm_uc_addr = hclgevf_rm_uc_addr, .add_mc_addr = hclgevf_add_mc_addr, .rm_mc_addr = hclgevf_rm_mc_addr, - .update_mta_status = hclgevf_update_mta_status, .get_stats = hclgevf_get_stats, .update_stats = hclgevf_update_stats, .get_strings = hclgevf_get_strings, @@ -2007,15 +3599,33 @@ static const struct hnae3_ae_ops hclgevf_ops = { .get_rss_indir_size = hclgevf_get_rss_indir_size, .get_rss = hclgevf_get_rss, .set_rss = hclgevf_set_rss, + .get_rss_tuple = hclgevf_get_rss_tuple, + .set_rss_tuple = hclgevf_set_rss_tuple, .get_tc_size = hclgevf_get_tc_size, .get_fw_version = hclgevf_get_fw_version, .set_vlan_filter = hclgevf_set_vlan_filter, + .enable_vlan_filter = hclgevf_enable_vlan_filter, .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, .reset_event = hclgevf_reset_event, + .set_default_reset_request = hclgevf_set_def_reset_request, + .set_channels = hclgevf_set_channels, .get_channels = hclgevf_get_channels, .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, + .get_regs_len = hclgevf_get_regs_len, + .get_regs = hclgevf_get_regs, .get_status = hclgevf_get_status, .get_ksettings_an_result = hclgevf_get_ksettings_an_result, + .get_media_type = hclgevf_get_media_type, + .get_hw_reset_stat = hclgevf_get_hw_reset_stat, + .ae_dev_resetting = hclgevf_ae_dev_resetting, + .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, + .set_gro_en = hclgevf_gro_en, + .set_mtu = hclgevf_set_mtu, + .get_global_queue_id = hclgevf_get_qid_global, + .set_timer_task = hclgevf_set_timer_task, + .get_link_mode = hclgevf_get_link_mode, + .set_promisc_mode = hclgevf_set_promisc_mode, + .request_update_promisc_mode = hclgevf_request_update_promisc_mode, }; static struct hnae3_ae_algo ae_algovf = { @@ -2027,6 +3637,12 @@ static int hclgevf_init(void) { pr_info("%s is initializing\n", HCLGEVF_NAME); + hclgevf_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGEVF_NAME); + if (!hclgevf_wq) { + pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME); + return -ENOMEM; + } + hnae3_register_ae_algo(&ae_algovf); return 0; @@ -2035,6 +3651,7 @@ static int hclgevf_init(void) static void hclgevf_exit(void) { hnae3_unregister_ae_algo(&ae_algovf); + destroy_workqueue(hclgevf_wq); } module_init(hclgevf_init); module_exit(hclgevf_exit); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index b23ba171473c9c3cee18ba05d4f08adeb8ac15a6..a64d7f1f6ffa7a97c66b904b9cc5316a72317ed6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -4,17 +4,21 @@ #ifndef __HCLGEVF_MAIN_H #define __HCLGEVF_MAIN_H #include +#include #include #include "hclge_mbx.h" #include "hclgevf_cmd.h" #include "hnae3.h" -#define HCLGEVF_MOD_VERSION "1.0" +#define HCLGEVF_MOD_VERSION "24.3.1" #define HCLGEVF_DRIVER_NAME "hclgevf" +#define HCLGEVF_MAX_VLAN_ID 4095 #define HCLGEVF_MISC_VECTOR_NUM 0 #define HCLGEVF_INVALID_VPORT 0xffff +#define HCLGEVF_GENERAL_TASK_INTERVAL 5 +#define HCLGEVF_KEEP_ALIVE_TASK_INTERVAL 2 /* This number in actual depends upon the total number of VFs * created by physical function. But the maximum number of @@ -27,15 +31,82 @@ #define HCLGEVF_VECTOR_REG_OFFSET 0x4 #define HCLGEVF_VECTOR_VF_OFFSET 0x100000 +/* bar registers for cmdq */ +#define HCLGEVF_CMDQ_TX_ADDR_L_REG 0x27000 +#define HCLGEVF_CMDQ_TX_ADDR_H_REG 0x27004 +#define HCLGEVF_CMDQ_TX_DEPTH_REG 0x27008 +#define HCLGEVF_CMDQ_TX_TAIL_REG 0x27010 +#define HCLGEVF_CMDQ_TX_HEAD_REG 0x27014 +#define HCLGEVF_CMDQ_RX_ADDR_L_REG 0x27018 +#define HCLGEVF_CMDQ_RX_ADDR_H_REG 0x2701C +#define HCLGEVF_CMDQ_RX_DEPTH_REG 0x27020 +#define HCLGEVF_CMDQ_RX_TAIL_REG 0x27024 +#define HCLGEVF_CMDQ_RX_HEAD_REG 0x27028 +#define HCLGEVF_CMDQ_INTR_SRC_REG 0x27100 +#define HCLGEVF_CMDQ_INTR_STS_REG 0x27104 +#define HCLGEVF_CMDQ_INTR_EN_REG 0x27108 +#define HCLGEVF_CMDQ_INTR_GEN_REG 0x2710C + +/* bar registers for common func */ +#define HCLGEVF_GRO_EN_REG 0x28000 + +/* bar registers for rcb */ +#define HCLGEVF_RING_RX_ADDR_L_REG 0x80000 +#define HCLGEVF_RING_RX_ADDR_H_REG 0x80004 +#define HCLGEVF_RING_RX_BD_NUM_REG 0x80008 +#define HCLGEVF_RING_RX_BD_LENGTH_REG 0x8000C +#define HCLGEVF_RING_RX_MERGE_EN_REG 0x80014 +#define HCLGEVF_RING_RX_TAIL_REG 0x80018 +#define HCLGEVF_RING_RX_HEAD_REG 0x8001C +#define HCLGEVF_RING_RX_FBD_NUM_REG 0x80020 +#define HCLGEVF_RING_RX_OFFSET_REG 0x80024 +#define HCLGEVF_RING_RX_FBD_OFFSET_REG 0x80028 +#define HCLGEVF_RING_RX_STASH_REG 0x80030 +#define HCLGEVF_RING_RX_BD_ERR_REG 0x80034 +#define HCLGEVF_RING_TX_ADDR_L_REG 0x80040 +#define HCLGEVF_RING_TX_ADDR_H_REG 0x80044 +#define HCLGEVF_RING_TX_BD_NUM_REG 0x80048 +#define HCLGEVF_RING_TX_PRIORITY_REG 0x8004C +#define HCLGEVF_RING_TX_TC_REG 0x80050 +#define HCLGEVF_RING_TX_MERGE_EN_REG 0x80054 +#define HCLGEVF_RING_TX_TAIL_REG 0x80058 +#define HCLGEVF_RING_TX_HEAD_REG 0x8005C +#define HCLGEVF_RING_TX_FBD_NUM_REG 0x80060 +#define HCLGEVF_RING_TX_OFFSET_REG 0x80064 +#define HCLGEVF_RING_TX_EBD_NUM_REG 0x80068 +#define HCLGEVF_RING_TX_EBD_OFFSET_REG 0x80070 +#define HCLGEVF_RING_TX_BD_ERR_REG 0x80074 +#define HCLGEVF_RING_EN_REG 0x80090 + +/* bar registers for tqp interrupt */ +#define HCLGEVF_TQP_INTR_CTRL_REG 0x20000 +#define HCLGEVF_TQP_INTR_GL0_REG 0x20100 +#define HCLGEVF_TQP_INTR_GL1_REG 0x20200 +#define HCLGEVF_TQP_INTR_GL2_REG 0x20300 +#define HCLGEVF_TQP_INTR_RL_REG 0x20900 + /* Vector0 interrupt CMDQ event source register(RW) */ #define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100 +/* Vector0 interrupt CMDQ event status register(RO) */ +#define HCLGEVF_VECTOR0_CMDQ_STAT_REG 0x27104 /* CMDQ register bits for RX event(=MBX event) */ #define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1 +/* RST register bits for RESET event */ +#define HCLGEVF_VECTOR0_RST_INT_B 2 #define HCLGEVF_TQP_RESET_TRY_TIMES 10 /* Reset related Registers */ -#define HCLGEVF_FUN_RST_ING 0x20C00 -#define HCLGEVF_FUN_RST_ING_B 0 +#define HCLGEVF_RST_ING 0x20C00 +#define HCLGEVF_FUN_RST_ING_BIT BIT(0) +#define HCLGEVF_GLOBAL_RST_ING_BIT BIT(5) +#define HCLGEVF_CORE_RST_ING_BIT BIT(6) +#define HCLGEVF_IMP_RST_ING_BIT BIT(7) +#define HCLGEVF_RST_ING_BITS \ + (HCLGEVF_FUN_RST_ING_BIT | HCLGEVF_GLOBAL_RST_ING_BIT | \ + HCLGEVF_CORE_RST_ING_BIT | HCLGEVF_IMP_RST_ING_BIT) + +#define HCLGEVF_VF_RST_ING 0x07008 +#define HCLGEVF_VF_RST_ING_BIT BIT(16) #define HCLGEVF_RSS_IND_TBL_SIZE 512 #define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff @@ -46,30 +117,54 @@ #define HCLGEVF_RSS_HASH_ALGO_MASK 0xf #define HCLGEVF_RSS_CFG_TBL_NUM \ (HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE) - -#define HCLGEVF_MTA_TBL_SIZE 4096 -#define HCLGEVF_MTA_TYPE_SEL_MAX 4 +#define HCLGEVF_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0) +#define HCLGEVF_RSS_INPUT_TUPLE_SCTP4 GENMASK(4, 0) +#define HCLGEVF_D_PORT_BIT BIT(0) +#define HCLGEVF_S_PORT_BIT BIT(1) +#define HCLGEVF_D_IP_BIT BIT(2) +#define HCLGEVF_S_IP_BIT BIT(3) +#define HCLGEVF_V_TAG_BIT BIT(4) +#define HCLGEVF_RSS_INPUT_TUPLE_SCTP6 \ + (HCLGEVF_D_IP_BIT | HCLGEVF_S_IP_BIT | HCLGEVF_V_TAG_BIT) + +#define HCLGEVF_STATS_TIMER_INTERVAL 36U + +enum hclgevf_evt_cause { + HCLGEVF_VECTOR0_EVENT_RST, + HCLGEVF_VECTOR0_EVENT_MBX, + HCLGEVF_VECTOR0_EVENT_OTHER, +}; /* states of hclgevf device & tasks */ enum hclgevf_states { /* device states */ HCLGEVF_STATE_DOWN, HCLGEVF_STATE_DISABLED, + HCLGEVF_STATE_IRQ_INITED, + HCLGEVF_STATE_NIC_REGISTERED, + HCLGEVF_STATE_REMOVING, + HCLGEVF_STATE_SERVICE_INITED, /* task states */ - HCLGEVF_STATE_SERVICE_SCHED, HCLGEVF_STATE_RST_SERVICE_SCHED, HCLGEVF_STATE_RST_HANDLING, HCLGEVF_STATE_MBX_SERVICE_SCHED, HCLGEVF_STATE_MBX_HANDLING, + HCLGEVF_STATE_CMD_DISABLE, + HCLGEVF_STATE_LINK_UPDATING, + HCLGEVF_STATE_PROMISC_CHANGED, + HCLGEVF_STATE_RST_FAIL, + HCLGEVF_STATE_PF_PUSH_LINK_STATUS, }; -#define HCLGEVF_MPF_ENBALE 1 - struct hclgevf_mac { + u8 media_type; + u8 module_type; u8 mac_addr[ETH_ALEN]; int link; u8 duplex; u32 speed; + u64 supported; + u64 advertising; }; struct hclgevf_hw { @@ -108,17 +203,63 @@ struct hclgevf_cfg { u32 numa_node_map; }; +struct hclgevf_rss_tuple_cfg { + u8 ipv4_tcp_en; + u8 ipv4_udp_en; + u8 ipv4_sctp_en; + u8 ipv4_fragment_en; + u8 ipv6_tcp_en; + u8 ipv6_udp_en; + u8 ipv6_sctp_en; + u8 ipv6_fragment_en; +}; + struct hclgevf_rss_cfg { u8 rss_hash_key[HCLGEVF_RSS_KEY_SIZE]; /* user configured hash keys */ u32 hash_algo; u32 rss_size; u8 hw_tc_map; u8 rss_indirection_tbl[HCLGEVF_RSS_IND_TBL_SIZE]; /* shadow table */ + struct hclgevf_rss_tuple_cfg rss_tuple_sets; }; struct hclgevf_misc_vector { u8 __iomem *addr; int vector_irq; + char name[HNAE3_INT_NAME_LEN]; +}; + +struct hclgevf_rst_stats { + u32 rst_cnt; /* the number of reset */ + u32 vf_func_rst_cnt; /* the number of VF function reset */ + u32 flr_rst_cnt; /* the number of FLR */ + u32 vf_rst_cnt; /* the number of VF reset */ + u32 rst_done_cnt; /* the number of reset completed */ + u32 hw_rst_done_cnt; /* the number of HW reset completed */ + u32 rst_fail_cnt; /* the number of VF reset fail */ +}; + +enum HCLGEVF_MAC_ADDR_TYPE { + HCLGEVF_MAC_ADDR_UC, + HCLGEVF_MAC_ADDR_MC +}; + +enum HCLGEVF_MAC_ADDR_STATE { + HCLGEVF_MAC_TO_ADD, + HCLGEVF_MAC_TO_DEL, + HCLGEVF_MAC_ACTIVE +}; + +struct hclgevf_mac_addr_node { + struct list_head node; + enum HCLGEVF_MAC_ADDR_STATE state; + u8 mac_addr[ETH_ALEN]; +}; + +struct hclgevf_mac_table_cfg { + spinlock_t mac_list_lock; /* protect mac address need to add/detele */ + struct list_head uc_mac_list; + struct list_head mc_mac_list; }; struct hclgevf_dev { @@ -128,14 +269,23 @@ struct hclgevf_dev { struct hclgevf_misc_vector misc_vector; struct hclgevf_rss_cfg rss_cfg; unsigned long state; + unsigned long flr_state; + unsigned long default_reset_request; + unsigned long last_reset_time; + enum hnae3_reset_type reset_level; + unsigned long reset_pending; + enum hnae3_reset_type reset_type; #define HCLGEVF_RESET_REQUESTED 0 #define HCLGEVF_RESET_PENDING 1 unsigned long reset_state; /* requested, pending */ + struct hclgevf_rst_stats rst_stats; u32 reset_attempts; + struct semaphore reset_sem; /* protect reset process */ u32 fw_version; - u16 num_tqps; /* num task queue pairs of this PF */ + u16 mbx_api_version; + u16 num_tqps; /* num task queue pairs of this VF */ u16 alloc_rss_size; /* allocated RSS task queue */ u16 rss_size_max; /* HW defined max RSS task queue */ @@ -143,12 +293,15 @@ struct hclgevf_dev { u16 num_alloc_vport; /* num vports this driver supports */ u32 numa_node_mask; u16 rx_buf_len; - u16 num_desc; + u16 num_tx_desc; /* desc num of per tx queue */ + u16 num_rx_desc; /* desc num of per rx queue */ u8 hw_tc_map; + u8 has_pf_mac; u16 num_msi; u16 num_msi_left; u16 num_msi_used; + u16 num_nic_msix; /* Num of nic vectors for this VF */ u16 num_roce_msix; /* Num of roce vectors for this VF */ u16 roce_base_msix_offset; int roce_base_vector; @@ -156,16 +309,14 @@ struct hclgevf_dev { u16 *vector_status; int *vector_irq; - bool accept_mta_mc; /* whether to accept mta filter multicast */ - u8 mta_mac_sel_type; - bool mbx_event_pending; + unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)]; + + struct hclgevf_mac_table_cfg mac_table; + struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */ struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */ - struct timer_list service_timer; - struct work_struct service_task; - struct work_struct rst_service_task; - struct work_struct mbx_service_task; + struct delayed_work service_task; struct hclgevf_tqp *htqp; @@ -175,24 +326,17 @@ struct hclgevf_dev { struct hnae3_client *nic_client; struct hnae3_client *roce_client; u32 flag; + unsigned long serv_processed_cnt; + unsigned long last_serv_processed; }; -static inline bool hclgevf_dev_ongoing_reset(struct hclgevf_dev *hdev) -{ - return (hdev && - (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) && - (hdev->nic.reset_level == HNAE3_VF_RESET)); -} - -static inline bool hclgevf_dev_ongoing_full_reset(struct hclgevf_dev *hdev) +static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev) { - return (hdev && - (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) && - (hdev->nic.reset_level == HNAE3_VF_FULL_RESET)); + return !!hdev->reset_pending; } -int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode, - const u8 *msg_data, u8 msg_len, bool need_resp, +int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, + struct hclge_vf_to_pf_msg *send_msg, bool need_resp, u8 *resp_data, u16 resp_len); void hclgevf_mbx_handler(struct hclgevf_dev *hdev); void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev); @@ -202,4 +346,6 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, u8 duplex); void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev); void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev); +void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, + u8 *port_base_vlan_info, u8 data_size); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index e9d5a4f96304e114722caea9c21509a4e0b6cc6c..ec2b4e65389388d76817a67df47a8b5ac92dca08 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -5,6 +5,37 @@ #include "hclgevf_main.h" #include "hnae3.h" +#define CREATE_TRACE_POINTS +#include "hclgevf_trace.h" + +static const struct errno_respcode_map err_code_map[] = { + {0, 0}, + {1, -EPERM}, + {2, -ENOENT}, + {5, -EIO}, + {11, -EAGAIN}, + {12, -ENOMEM}, + {16, -EBUSY}, + {22, -EINVAL}, + {28, -ENOSPC}, + {95, -EOPNOTSUPP}, +}; + +static int hclgevf_resp_to_errno(u16 resp_code) +{ + u32 i; + + for (i = 0; + i < sizeof(err_code_map) / sizeof(struct errno_respcode_map); + i++) { + if (err_code_map[i].resp_code == resp_code) + return err_code_map[i].errno; + } + + return -EIO; +} + +#define HCLGEVF_MBX_MATCH_ID_START 1 static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev) { /* this function should be called with mbx_resp.mbx_mutex held @@ -13,6 +44,10 @@ static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev) hdev->mbx_resp.received_resp = false; hdev->mbx_resp.origin_mbx_msg = 0; hdev->mbx_resp.resp_status = 0; + hdev->mbx_resp.match_id++; + /* Update match_id and ensure the value of match_id is not zero */ + if (hdev->mbx_resp.match_id == 0) + hdev->mbx_resp.match_id = HCLGEVF_MBX_MATCH_ID_START; memset(hdev->mbx_resp.additional_info, 0, HCLGE_MBX_MAX_RESP_DATA_SIZE); } @@ -26,28 +61,34 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1, u8 *resp_data, u16 resp_len) { #define HCLGEVF_MAX_TRY_TIMES 500 -#define HCLGEVF_SLEEP_USCOEND 1000 +#define HCLGEVF_SLEEP_USECOND 1000 struct hclgevf_mbx_resp_status *mbx_resp; u16 r_code0, r_code1; int i = 0; if (resp_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) { dev_err(&hdev->pdev->dev, - "VF mbx response len(=%d) exceeds maximum(=%d)\n", + "VF mbx response len(=%u) exceeds maximum(=%u)\n", resp_len, HCLGE_MBX_MAX_RESP_DATA_SIZE); return -EINVAL; } while ((!hdev->mbx_resp.received_resp) && (i < HCLGEVF_MAX_TRY_TIMES)) { - udelay(HCLGEVF_SLEEP_USCOEND); + if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) + return -EIO; + + usleep_range(HCLGEVF_SLEEP_USECOND, HCLGEVF_SLEEP_USECOND * 2); i++; } + /* ensure additional_info will be seen after received_resp */ + smp_rmb(); + if (i >= HCLGEVF_MAX_TRY_TIMES) { dev_err(&hdev->pdev->dev, - "VF could not get mbx resp(=%d) from PF in %d tries\n", - hdev->mbx_resp.received_resp, i); + "VF could not get mbx(%u,%u) resp(=%d) from PF in %d tries\n", + code0, code1, hdev->mbx_resp.received_resp, i); return -EIO; } @@ -65,16 +106,19 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1, if (!(r_code0 == code0 && r_code1 == code1 && !mbx_resp->resp_status)) { dev_err(&hdev->pdev->dev, - "VF could not match resp code(code0=%d,code1=%d), %d", + "VF could not match resp code(code0=%u,code1=%u), %d\n", code0, code1, mbx_resp->resp_status); + dev_err(&hdev->pdev->dev, + "VF could not match resp r_code(r_code0=%u,r_code1=%u)\n", + r_code0, r_code1); return -EIO; } return 0; } -int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode, - const u8 *msg_data, u8 msg_len, bool need_resp, +int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, + struct hclge_vf_to_pf_msg *send_msg, bool need_resp, u8 *resp_data, u16 resp_len) { struct hclge_mbx_vf_to_pf_cmd *req; @@ -83,23 +127,26 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode, req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; - /* first two bytes are reserved for code & subcode */ - if (msg_len > (HCLGE_MBX_MAX_MSG_SIZE - 2)) { + if (!send_msg) { dev_err(&hdev->pdev->dev, - "VF send mbx msg fail, msg len %d exceeds max len %d\n", - msg_len, HCLGE_MBX_MAX_MSG_SIZE); + "VF send mbx msg fail, msg is NULL\n"); return -EINVAL; } hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); - req->msg[0] = code; - req->msg[1] = subcode; - memcpy(&req->msg[2], msg_data, msg_len); + if (need_resp) + hnae3_set_bit(req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B, 1); + + memcpy(&req->msg, send_msg, sizeof(struct hclge_vf_to_pf_msg)); + + if (test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state)) + trace_hclge_vf_mbx_send(hdev, req); /* synchronous send */ if (need_resp) { mutex_lock(&hdev->mbx_resp.mbx_mutex); hclgevf_reset_mbx_resp_status(hdev); + req->match_id = hdev->mbx_resp.match_id; status = hclgevf_cmd_send(&hdev->hw, &desc, 1); if (status) { dev_err(&hdev->pdev->dev, @@ -109,7 +156,8 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode, return status; } - status = hclgevf_get_mbx_resp(hdev, code, subcode, resp_data, + status = hclgevf_get_mbx_resp(hdev, send_msg->code, + send_msg->subcode, resp_data, resp_len); mutex_unlock(&hdev->mbx_resp.mbx_mutex); } else { @@ -148,14 +196,19 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) crq = &hdev->hw.cmq.crq; while (!hclgevf_cmd_crq_empty(&hdev->hw)) { + if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) { + dev_info(&hdev->pdev->dev, "vf crq need init\n"); + return; + } + desc = &crq->desc[crq->next_to_use]; req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data; flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) { dev_warn(&hdev->pdev->dev, - "dropped invalid mailbox message, code = %d\n", - req->msg[0]); + "dropped invalid mailbox message, code = %u\n", + req->msg.code); /* dropping/not processing this invalid message */ crq->desc[crq->next_to_use].flag = 0; @@ -163,63 +216,81 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) continue; } + trace_hclge_vf_mbx_get(hdev, req); + /* synchronous messages are time critical and need preferential * treatment. Therefore, we need to acknowledge all the sync * responses as quickly as possible so that waiting tasks do not * timeout and simultaneously queue the async messages for later * prcessing in context of mailbox task i.e. the slow path. */ - switch (req->msg[0]) { + switch (req->msg.code) { case HCLGE_MBX_PF_VF_RESP: if (resp->received_resp) dev_warn(&hdev->pdev->dev, - "VF mbx resp flag not clear(%d)\n", - req->msg[1]); - resp->received_resp = true; + "VF mbx resp flag not clear(%u)\n", + req->msg.vf_mbx_msg_code); - resp->origin_mbx_msg = (req->msg[1] << 16); - resp->origin_mbx_msg |= req->msg[2]; - resp->resp_status = req->msg[3]; + resp->origin_mbx_msg = + (req->msg.vf_mbx_msg_code << 16); + resp->origin_mbx_msg |= req->msg.vf_mbx_msg_subcode; + resp->resp_status = + hclgevf_resp_to_errno(req->msg.resp_status); - temp = (u8 *)&req->msg[4]; + temp = (u8 *)req->msg.resp_data; for (i = 0; i < HCLGE_MBX_MAX_RESP_DATA_SIZE; i++) { resp->additional_info[i] = *temp; temp++; } + + /* ensure additional_info will be seen before setting + * received_resp + */ + smp_wmb(); + + if (req->match_id) { + /* If match_id is not zero, it means PF support + * match_id. If the match_id is right, VF get + * the right response, orignore the response, + * and driver will clear hdev->mbx_resp when + * send next message which need response. + */ + if (req->match_id == resp->match_id) + resp->received_resp = true; + } else { + resp->received_resp = true; + } break; case HCLGE_MBX_LINK_STAT_CHANGE: case HCLGE_MBX_ASSERTING_RESET: - /* set this mbx event as pending. This is required as we - * might loose interrupt event when mbx task is busy - * handling. This shall be cleared when mbx task just - * enters handling state. - */ - hdev->mbx_event_pending = true; - + case HCLGE_MBX_LINK_STAT_MODE: + case HCLGE_MBX_PUSH_VLAN_INFO: + case HCLGE_MBX_PUSH_PROMISC_INFO: /* we will drop the async msg if we find ARQ as full * and continue with next message */ - if (hdev->arq.count >= HCLGE_MBX_MAX_ARQ_MSG_NUM) { + if (atomic_read(&hdev->arq.count) >= + HCLGE_MBX_MAX_ARQ_MSG_NUM) { dev_warn(&hdev->pdev->dev, - "Async Q full, dropping msg(%d)\n", - req->msg[1]); + "Async Q full, dropping msg(%u)\n", + req->msg.code); break; } /* tail the async message in arq */ msg_q = hdev->arq.msg_q[hdev->arq.tail]; - memcpy(&msg_q[0], req->msg, + memcpy(&msg_q[0], &req->msg, HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16)); hclge_mbx_tail_ptr_move_arq(hdev->arq); - hdev->arq.count++; + atomic_inc(&hdev->arq.count); hclgevf_mbx_task_schedule(hdev); break; default: dev_err(&hdev->pdev->dev, - "VF received unsupported(%d) mbx msg from PF\n", - req->msg[0]); + "VF received unsupported(%u) mbx msg from PF\n", + req->msg.code); break; } crq->desc[crq->next_to_use].flag = 0; @@ -231,56 +302,91 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) crq->next_to_use); } +static void hclgevf_parse_promisc_info(struct hclgevf_dev *hdev, + u16 promisc_info) +{ + if (!promisc_info) + dev_info(&hdev->pdev->dev, + "Promisc mode is closed by host for being untrusted.\n"); +} + void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) { - u16 link_status; - u16 *msg_q; + enum hnae3_reset_type reset_type; + u16 link_status, state; + u16 *msg_q, *vlan_info; u8 duplex; u32 speed; u32 tail; - - /* we can safely clear it now as we are at start of the async message - * processing - */ - hdev->mbx_event_pending = false; + u8 flag; + u8 idx; tail = hdev->arq.tail; /* process all the async queue messages */ while (tail != hdev->arq.head) { + if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) { + dev_info(&hdev->pdev->dev, + "vf crq need init in async\n"); + return; + } + msg_q = hdev->arq.msg_q[hdev->arq.head]; switch (msg_q[0]) { case HCLGE_MBX_LINK_STAT_CHANGE: - link_status = le16_to_cpu(msg_q[1]); + link_status = msg_q[1]; memcpy(&speed, &msg_q[2], sizeof(speed)); - duplex = (u8)le16_to_cpu(msg_q[4]); + duplex = (u8)msg_q[4]; + flag = (u8)msg_q[5]; /* update upper layer with new link link status */ - hclgevf_update_link_status(hdev, link_status); hclgevf_update_speed_duplex(hdev, speed, duplex); + hclgevf_update_link_status(hdev, link_status); + if (flag & HCLGE_MBX_PUSH_LINK_STATUS_EN) + set_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, + &hdev->state); break; + case HCLGE_MBX_LINK_STAT_MODE: + idx = (u8)msg_q[1]; + if (idx) + memcpy(&hdev->hw.mac.supported, &msg_q[2], + sizeof(unsigned long)); + else + memcpy(&hdev->hw.mac.advertising, &msg_q[2], + sizeof(unsigned long)); + break; case HCLGE_MBX_ASSERTING_RESET: /* PF has asserted reset hence VF should go in pending * state and poll for the hardware reset status till it * has been completely reset. After this stack should * eventually be re-initialized. */ - hdev->nic.reset_level = HNAE3_VF_RESET; + reset_type = (enum hnae3_reset_type)msg_q[1]; + set_bit(reset_type, &hdev->reset_pending); set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); hclgevf_reset_task_schedule(hdev); + break; + case HCLGE_MBX_PUSH_VLAN_INFO: + state = msg_q[1]; + vlan_info = &msg_q[1]; + hclgevf_update_port_base_vlan_info(hdev, state, + (u8 *)vlan_info, 8); + break; + case HCLGE_MBX_PUSH_PROMISC_INFO: + hclgevf_parse_promisc_info(hdev, msg_q[1]); break; default: dev_err(&hdev->pdev->dev, - "fetched unsupported(%d) message from arq\n", + "fetched unsupported(%u) message from arq\n", msg_q[0]); break; } hclge_mbx_head_ptr_move_arq(hdev->arq); - hdev->arq.count--; + atomic_dec(&hdev->arq.count); msg_q = hdev->arq.msg_q[hdev->arq.head]; } } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h new file mode 100644 index 0000000000000000000000000000000000000000..e4bfb6191fef579e8c758062700750e62844941f --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2018-2019 Hisilicon Limited. */ + +/* This must be outside ifdef _HCLGEVF_TRACE_H */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM hns3 + +#if !defined(_HCLGEVF_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _HCLGEVF_TRACE_H_ + +#include + +#define VF_GET_MBX_LEN (sizeof(struct hclge_mbx_pf_to_vf_cmd) / sizeof(u32)) +#define VF_SEND_MBX_LEN (sizeof(struct hclge_mbx_vf_to_pf_cmd) / sizeof(u32)) + +TRACE_EVENT(hclge_vf_mbx_get, + TP_PROTO( + struct hclgevf_dev *hdev, + struct hclge_mbx_pf_to_vf_cmd *req), + TP_ARGS(hdev, req), + + TP_STRUCT__entry( + __field(u8, vfid) + __field(u16, code) + __string(pciname, pci_name(hdev->pdev)) + __string(devname, &hdev->nic.kinfo.netdev->name) + __array(u32, mbx_data, VF_GET_MBX_LEN) + ), + + TP_fast_assign( + __entry->vfid = req->dest_vfid; + __entry->code = req->msg.code; + __assign_str(pciname, pci_name(hdev->pdev)); + __assign_str(devname, &hdev->nic.kinfo.netdev->name); + memcpy(__entry->mbx_data, req, + sizeof(struct hclge_mbx_pf_to_vf_cmd)); + ), + + TP_printk( + "%s %s vfid:%u code:%u data:%s", + __get_str(pciname), __get_str(devname), __entry->vfid, + __entry->code, + __print_array(__entry->mbx_data, VF_GET_MBX_LEN, sizeof(u32)) + ) +); + +TRACE_EVENT(hclge_vf_mbx_send, + TP_PROTO( + struct hclgevf_dev *hdev, + struct hclge_mbx_vf_to_pf_cmd *req), + TP_ARGS(hdev, req), + + TP_STRUCT__entry( + __field(u8, vfid) + __field(u8, code) + __field(u8, subcode) + __string(pciname, pci_name(hdev->pdev)) + __string(devname, &hdev->nic.kinfo.netdev->name) + __array(u32, mbx_data, VF_SEND_MBX_LEN) + ), + + TP_fast_assign( + __entry->vfid = req->mbx_src_vfid; + __entry->code = req->msg.code; + __entry->subcode = req->msg.subcode; + __assign_str(pciname, pci_name(hdev->pdev)); + __assign_str(devname, &hdev->nic.kinfo.netdev->name); + memcpy(__entry->mbx_data, req, + sizeof(struct hclge_mbx_vf_to_pf_cmd)); + ), + + TP_printk( + "%s %s vfid:%u code:%u subcode:%u data:%s", + __get_str(pciname), __get_str(devname), __entry->vfid, + __entry->code, __entry->subcode, + __print_array(__entry->mbx_data, VF_SEND_MBX_LEN, sizeof(u32)) + ) +); + +#endif /* _HCLGEVF_TRACE_H_ */ + +/* This must be outside ifdef _HCLGEVF_TRACE_H */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE hclgevf_trace +#include diff --git a/drivers/net/ethernet/hisilicon/hns3/kcompat.c b/drivers/net/ethernet/hisilicon/hns3/kcompat.c new file mode 100644 index 0000000000000000000000000000000000000000..4cadddabeaf1181790ff8d9f40979caa1095adda --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/kcompat.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include +#include "kcompat.h" + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) +#ifdef CONFIG_PCI_MSI +void _kc_pci_free_irq_vectors(struct pci_dev *dev) +{ + pci_disable_msix(dev); + pci_disable_msi(dev); +} + +int _kc_pci_irq_vector(struct pci_dev *dev, unsigned int nr) +{ + if (dev->msix_enabled) { + struct msi_desc *entry; + int i = 0; + + for_each_pci_msi_entry(entry, dev) { + if (i == nr) + return entry->irq; + i++; + } + WARN_ON_ONCE(1); + return -EINVAL; + } + + if (dev->msi_enabled) { + struct msi_desc *entry = first_pci_msi_entry(dev); + + if (WARN_ON_ONCE(nr >= entry->nvec_used)) + return -EINVAL; + } else { + if (WARN_ON_ONCE(nr > 0)) + return -EINVAL; + } + + return dev->irq + nr; +} + +int _kc_pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, + unsigned int max_vecs, unsigned int flags) +{ + int vecs; + + if (flags & PCI_IRQ_MSIX) { + vecs = pci_enable_msix_range(dev, NULL, min_vecs, max_vecs); + if (vecs > 0) + return vecs; + } + + if (flags & PCI_IRQ_MSI) { + vecs = pci_enable_msi_range(dev, min_vecs, max_vecs); + if (vecs > 0) + return vecs; + } + + return vecs; +} +#else +void _kc_pci_free_irq_vectors(struct pci_dev *dev) {} + +int _kc_pci_irq_vector(struct pci_dev *dev, unsigned int nr) +{ + if (WARN_ON_ONCE(nr > 0)) + return -EINVAL; + return dev->irq; +} + +int _kc_pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, + unsigned int max_vecs, unsigned int flags) + +{ + if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1 && dev->irq) + return 1; + return -ENOSPC; +} + +#endif +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/kcompat.h b/drivers/net/ethernet/hisilicon/hns3/kcompat.h new file mode 100644 index 0000000000000000000000000000000000000000..5cf3a35ca607a718ecf004f7d4176235fa20f2f0 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/kcompat.h @@ -0,0 +1,428 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2017 Hisilicon Limited. */ + +#ifndef _KCOMPAT_H_ +#define _KCOMPAT_H_ + +#include +#include +#include +#include + +#ifndef LINUX_VERSION_CODE +#include +#else +#define KERNEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c)) +#endif + +#ifndef PCI_VENDOR_ID_HUAWEI +#define PCI_VENDOR_ID_HUAWEI 0x19e5 +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) +#undef mdiobus_get_phy +#define mdiobus_get_phy _kc_mdiobus_get_phy + +static inline +struct phy_device *_kc_mdiobus_get_phy(struct mii_bus *bus, int addr) +{ + struct phy_device *phydev = bus->phy_map[addr]; + + if (!phydev) + return NULL; + + return phydev; +} + +#endif /* 4.5.0 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) +#undef csum_replace_by_diff +#define csum_replace_by_diff _kc_csum_replace_by_diff + +static inline void _kc_csum_replace_by_diff(__sum16 *sum, __wsum diff) +{ + *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum))); +} + +#else +#define HAVE_ETHTOOL_IPV6_NFC_API +#endif /* 4.6.0 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)) + +#undef phy_ethtool_ksettings_get +#define phy_ethtool_ksettings_get _kc_phy_ethtool_ksettings_get + +/* Hi1980 IO have no external phy devices, so just return not support */ +static inline int _kc_phy_ethtool_ksettings_get(struct phy_device *phydev, + const struct ethtool_link_ksettings *cmd) +{ + return -EOPNOTSUPP; +} + +#undef phy_ethtool_ksettings_set +#define phy_ethtool_ksettings_set _kc_phy_ethtool_ksettings_set +static inline int _kc_phy_ethtool_ksettings_set(struct phy_device *phydev, + const struct ethtool_link_ksettings *cmd) +{ + return -EOPNOTSUPP; +} + +#else + +#define HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE +#endif /* 4.7.0 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) +static inline void +pci_release_mem_regions(struct pci_dev *pdev) +{ + return pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +} + +#define PCI_IRQ_LEGACY (1 << 0) /* Allow legacy interrupts */ +#define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */ +#define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */ +#define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */ +#define PCI_IRQ_ALL_TYPES \ + (PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX) + +#undef pci_free_irq_vectors +#define pci_free_irq_vectors _kc_pci_free_irq_vectors + +#undef pci_irq_vector +#define pci_irq_vector _kc_pci_irq_vector + +#undef pci_alloc_irq_vectors +#define pci_alloc_irq_vectors _kc_pci_alloc_irq_vectors + +void _kc_pci_free_irq_vectors(struct pci_dev *dev); + +int _kc_pci_irq_vector(struct pci_dev *dev, unsigned int nr); + +int _kc_pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, + unsigned int max_vecs, unsigned int flags); + +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) +#else +#define HAVE_NETDEVICE_MIN_MAX_MTU +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) +#else /* >= 4.11 */ +#define HAVE_VOID_NDO_GET_STATS64 +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)) +#else /* >= 4.13 */ +#define SKB_PUT_RETURN_VOID_POINT +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) +#define TIMER_DATA_TYPE unsigned long +#define TIMER_FUNC_TYPE void (*)(TIMER_DATA_TYPE) + +#define timer_setup(timer, callback, flags) \ + __setup_timer((timer), (TIMER_FUNC_TYPE)(callback), \ + (TIMER_DATA_TYPE)(timer), (flags)) + +#define from_timer(var, callback_timer, timer_fieldname) \ + container_of(callback_timer, typeof(*var), timer_fieldname) + +#else +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)) +static inline void cpu_to_be32_array(__be32 *dst, const u32 *src, size_t len) +{ + int i; + + for (i = 0; i < len; i++) + dst[i] = cpu_to_be32(src[i]); +} + +static inline void be32_to_cpu_array(u32 *dst, const __be32 *src, size_t len) +{ + int i; + + for (i = 0; i < len; i++) + dst[i] = be32_to_cpu(src[i]); +} + +#define TC_SETUP_QDISC_MQPRIO TC_SETUP_MQPRIO + +/* MQPRIO */ +#define TC_QOPT_BITMASK 15 +#define TC_QOPT_MAX_QUEUE 16 + +enum { + TC_MQPRIO_MODE_DCB, + TC_MQPRIO_MODE_CHANNEL, + __TC_MQPRIO_MODE_MAX +}; + +struct tc_mqprio_qopt_offload { + /* struct tc_mqprio_qopt must always be the first element */ + struct tc_mqprio_qopt qopt; + u16 mode; + u16 shaper; + u32 flags; + u64 min_rate[TC_QOPT_MAX_QUEUE]; + u64 max_rate[TC_QOPT_MAX_QUEUE]; +}; + +#else +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) + +#define is_signed_type(type) (((type)(-1)) < (type)1) +#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 \ + - is_signed_type(type))) +#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T))) +#define type_min(T) ((T)((T)-type_max(T)-(T)1)) + +#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW +/* + * For simplicity and code hygiene, the fallback code below insists on + * a, b and *d having the same type (similar to the min() and max() + * macros), whereas gcc's type-generic overflow checkers accept + * different types. Hence we don't just make check_add_overflow an + * alias for __builtin_add_overflow, but add type checks similar to + * below. + */ +#define check_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_add_overflow(__a, __b, __d); \ +}) + +#define check_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_sub_overflow(__a, __b, __d); \ +}) + +#define check_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_mul_overflow(__a, __b, __d); \ +}) + +#else + +/* Checking for unsigned overflow is relatively easy without causing UB. */ +#define __unsigned_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a + __b; \ + *__d < __a; \ +}) +#define __unsigned_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a - __b; \ + __a < __b; \ +}) +/* + * If one of a or b is a compile-time constant, this avoids a division. + */ +#define __unsigned_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a * __b; \ + __builtin_constant_p(__b) ? \ + __b > 0 && __a > type_max(typeof(__a)) / __b : \ + __a > 0 && __b > type_max(typeof(__b)) / __a; \ +}) + +/* + * For signed types, detecting overflow is much harder, especially if + * we want to avoid UB. But the interface of these macros is such that + * we must provide a result in *d, and in fact we must produce the + * result promised by gcc's builtins, which is simply the possibly + * wrapped-around value. Fortunately, we can just formally do the + * operations in the widest relevant unsigned type (u64) and then + * truncate the result - gcc is smart enough to generate the same code + * with and without the (u64) casts. + */ + +/* + * Adding two signed integers can overflow only if they have the same + * sign, and overflow has happened iff the result has the opposite + * sign. + */ +#define __signed_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a + (u64)__b; \ + (((~(__a ^ __b)) & (*__d ^ __a)) \ + & type_min(typeof(__a))) != 0; \ +}) + +/* + * Subtraction is similar, except that overflow can now happen only + * when the signs are opposite. In this case, overflow has happened if + * the result has the opposite sign of a. + */ +#define __signed_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a - (u64)__b; \ + ((((__a ^ __b)) & (*__d ^ __a)) \ + & type_min(typeof(__a))) != 0; \ +}) + +/* + * Signed multiplication is rather hard. gcc always follows C99, so + * division is truncated towards 0. This means that we can write the + * overflow check like this: + * + * (a > 0 && (b > MAX/a || b < MIN/a)) || + * (a < -1 && (b > MIN/a || b < MAX/a) || + * (a == -1 && b == MIN) + * + * The redundant casts of -1 are to silence an annoying -Wtype-limits + * (included in -Wextra) warning: When the type is u8 or u16, the + * __b_c_e in check_mul_overflow obviously selects + * __unsigned_mul_overflow, but unfortunately gcc still parses this + * code and warns about the limited range of __b. + */ + +#define __signed_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + typeof(a) __tmax = type_max(typeof(a)); \ + typeof(a) __tmin = type_min(typeof(a)); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a * (u64)__b; \ + (__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \ + (__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \ + (__b == (typeof(__b))-1 && __a == __tmin); \ +}) + + +#define check_add_overflow(a, b, d) \ + __builtin_choose_expr(is_signed_type(typeof(a)), \ + __signed_add_overflow(a, b, d), \ + __unsigned_add_overflow(a, b, d)) + +#define check_sub_overflow(a, b, d) \ + __builtin_choose_expr(is_signed_type(typeof(a)), \ + __signed_sub_overflow(a, b, d), \ + __unsigned_sub_overflow(a, b, d)) + +#define check_mul_overflow(a, b, d) \ + __builtin_choose_expr(is_signed_type(typeof(a)), \ + __signed_mul_overflow(a, b, d), \ + __unsigned_mul_overflow(a, b, d)) + + +#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */ + +#ifndef array3_size +/** + * array3_size() - Calculate size of 3-dimensional array. + * + * @a: dimension one + * @b: dimension two + * @c: dimension three + * + * Calculates size of 3-dimensional array: @a * @b * @c. + * + * Returns: number of bytes needed to represent the array or SIZE_MAX on + * overflow. + */ +static inline __must_check size_t array3_size(size_t a, size_t b, size_t c) +{ + size_t bytes; + + if (check_mul_overflow(a, b, &bytes)) + return SIZE_MAX; + if (check_mul_overflow(bytes, c, &bytes)) + return SIZE_MAX; + + return bytes; +} +#endif +#else +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 4)) + +#include + +static inline void linkmode_set_bit(int nr, volatile unsigned long *addr) +{ + __set_bit(nr, addr); +} + +static inline void linkmode_copy(unsigned long *dst, const unsigned long *src) +{ + bitmap_copy(dst, src, __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static inline void linkmode_clear_bit(int nr, volatile unsigned long *addr) +{ + __clear_bit(nr, addr); +} + +static inline void linkmode_zero(unsigned long *dst) +{ + bitmap_zero(dst, __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +#else + +#define HAS_LINK_MODE_OPS + +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) +#ifndef ETH_MODULE_SFF_8636_MAX_LEN +#define ETH_MODULE_SFF_8636_MAX_LEN 640 +#endif + +#ifndef ETH_MODULE_SFF_8436_MAX_LEN +#define ETH_MODULE_SFF_8436_MAX_LEN 640 +#endif +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)) +#ifndef dma_zalloc_coherent +#define dma_zalloc_coherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) +#endif +#endif + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c index 017e08452d8c0b2ae9b1117e182b099aa36c0447..0df85713e435a6c2dad4914d0b32aef9eaa7d914 100644 --- a/drivers/net/ethernet/hisilicon/hns_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns_mdio.c @@ -1,10 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2014-2015 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include @@ -26,6 +22,7 @@ #define MDIO_DRV_NAME "Hi-HNS_MDIO" #define MDIO_BUS_NAME "Hisilicon MII Bus" +#define MDIO_MOD_VERSION "23.12.1" #define MDIO_TIMEOUT 1000000 @@ -39,7 +36,7 @@ struct hns_mdio_sc_reg { }; struct hns_mdio_device { - void *vbase; /* mdio reg base address */ + u8 __iomem *vbase; /* mdio reg base address */ struct regmap *subctrl_vbase; struct hns_mdio_sc_reg sc_reg; }; @@ -96,21 +93,17 @@ enum mdio_c45_op_seq { #define MDIO_SC_CLK_ST 0x531C #define MDIO_SC_RESET_ST 0x5A1C -static void mdio_write_reg(void *base, u32 reg, u32 value) +static void mdio_write_reg(u8 __iomem *base, u32 reg, u32 value) { - u8 __iomem *reg_addr = (u8 __iomem *)base; - - writel_relaxed(value, reg_addr + reg); + writel_relaxed(value, base + reg); } #define MDIO_WRITE_REG(a, reg, value) \ mdio_write_reg((a)->vbase, (reg), (value)) -static u32 mdio_read_reg(void *base, u32 reg) +static u32 mdio_read_reg(u8 __iomem *base, u32 reg) { - u8 __iomem *reg_addr = (u8 __iomem *)base; - - return readl_relaxed(reg_addr + reg); + return readl_relaxed(base + reg); } #define mdio_set_field(origin, mask, shift, val) \ @@ -121,7 +114,7 @@ static u32 mdio_read_reg(void *base, u32 reg) #define mdio_get_field(origin, mask, shift) (((origin) >> (shift)) & (mask)) -static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift, +static void mdio_set_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift, u32 val) { u32 origin = mdio_read_reg(base, reg); @@ -133,7 +126,7 @@ static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift, #define MDIO_SET_REG_FIELD(dev, reg, mask, shift, val) \ mdio_set_reg_field((dev)->vbase, (reg), (mask), (shift), (val)) -static u32 mdio_get_reg_field(void *base, u32 reg, u32 mask, u32 shift) +static u32 mdio_get_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift) { u32 origin; @@ -156,11 +149,15 @@ static int mdio_sc_cfg_reg_write(struct hns_mdio_device *mdio_dev, { u32 time_cnt; u32 reg_value; + int ret; regmap_write(mdio_dev->subctrl_vbase, cfg_reg, set_val); for (time_cnt = MDIO_TIMEOUT; time_cnt; time_cnt--) { - regmap_read(mdio_dev->subctrl_vbase, st_reg, ®_value); + ret = regmap_read(mdio_dev->subctrl_vbase, st_reg, ®_value); + if (ret) + return ret; + reg_value &= st_msk; if ((!!check_st) == (!!reg_value)) break; @@ -214,7 +211,7 @@ static void hns_mdio_cmd_write(struct hns_mdio_device *mdio_dev, * @bus: mdio bus * @phy_id: phy id * @regnum: register num - * @value: register value + * @data: register value * * Return 0 on success, negative on failure */ @@ -277,14 +274,13 @@ static int hns_mdio_write(struct mii_bus *bus, * @bus: mdio bus * @phy_id: phy id * @regnum: register num - * @value: register value * * Return phy register value */ static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum) { int ret; - u16 reg_val = 0; + u16 reg_val; u8 devad = ((regnum >> 16) & 0x1f); u8 is_c45 = !!(regnum & MII_ADDR_C45); u16 reg = (u16)(regnum & 0xffff); @@ -321,7 +317,7 @@ static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum) } hns_mdio_cmd_write(mdio_dev, is_c45, - MDIO_C45_WRITE_ADDR, phy_id, devad); + MDIO_C45_READ, phy_id, devad); } /* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/ @@ -426,7 +422,7 @@ static int hns_mdio_probe(struct platform_device *pdev) struct hns_mdio_device *mdio_dev; struct mii_bus *new_bus; struct resource *res; - int ret = -ENODEV; + int ret; if (!pdev) { dev_err(NULL, "pdev is NULL!\r\n"); @@ -579,3 +575,4 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Huawei Tech. Co., Ltd."); MODULE_DESCRIPTION("Hisilicon HNS MDIO driver"); MODULE_ALIAS("platform:" MDIO_DRV_NAME); +MODULE_VERSION(MDIO_MOD_VERSION); diff --git a/drivers/net/ethernet/huawei/Kconfig b/drivers/net/ethernet/huawei/Kconfig index c1a95ae4058b075cd84b3aeff8d2a93f3c69d3ac..eb9a4e45a8a29c73c4624bf5a244dbd01ba43565 100644 --- a/drivers/net/ethernet/huawei/Kconfig +++ b/drivers/net/ethernet/huawei/Kconfig @@ -15,5 +15,6 @@ config NET_VENDOR_HUAWEI if NET_VENDOR_HUAWEI source "drivers/net/ethernet/huawei/hinic/Kconfig" +source "drivers/net/ethernet/huawei/bma/Kconfig" endif # NET_VENDOR_HUAWEI diff --git a/drivers/net/ethernet/huawei/Makefile b/drivers/net/ethernet/huawei/Makefile index 5c37cc8fc1bc371a9920dae03e02acd6b852acb2..b80925adee0703a9479da10e1ce69dcc09ab2867 100644 --- a/drivers/net/ethernet/huawei/Makefile +++ b/drivers/net/ethernet/huawei/Makefile @@ -3,3 +3,4 @@ # obj-$(CONFIG_HINIC) += hinic/ +obj-$(CONFIG_BMA) += bma/ diff --git a/drivers/net/ethernet/huawei/bma/Kconfig b/drivers/net/ethernet/huawei/bma/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..3c4c7b5d9757503dc7d7d064700e2106004a2e60 --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/Kconfig @@ -0,0 +1,10 @@ +# +# Huawei BMA software driver configuration +# + +config BMA + tristate "Huawei BMA Driver" + + help + This driver supports Huawei BMA Software. It is used + to communication between Huawei BMA and BMC software. \ No newline at end of file diff --git a/drivers/net/ethernet/huawei/bma/Makefile b/drivers/net/ethernet/huawei/bma/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..a96f4120815bbe8810c69908833d845746c4b11e --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/Makefile @@ -0,0 +1,9 @@ +# +# Makefile for BMA software driver +# + +obj-$(CONFIG_BMA) += edma_drv/ +obj-$(CONFIG_BMA) += cdev_drv/ +obj-$(CONFIG_BMA) += veth_drv/ +obj-$(CONFIG_BMA) += kbox_drv/ +obj-$(CONFIG_BMA) += cdev_veth_drv/ \ No newline at end of file diff --git a/drivers/net/ethernet/huawei/bma/cdev_drv/Makefile b/drivers/net/ethernet/huawei/bma/cdev_drv/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..a6186ca12038a9e9417da152f53f2be41c8f4b3f --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/cdev_drv/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_BMA) += host_cdev_drv.o +host_cdev_drv-y := bma_cdev.o \ No newline at end of file diff --git a/drivers/net/ethernet/huawei/bma/cdev_drv/bma_cdev.c b/drivers/net/ethernet/huawei/bma/cdev_drv/bma_cdev.c new file mode 100644 index 0000000000000000000000000000000000000000..275c2cdfe5db2011dc2001da7bd5beb3b5c1f433 --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/cdev_drv/bma_cdev.c @@ -0,0 +1,369 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei iBMA driver. + * Copyright (c) 2017, Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "../edma_drv/bma_include.h" +#include "../include/bma_ker_intf.h" + +#define CDEV_NAME_PREFIX "hwibmc" + +#ifdef DRV_VERSION +#define CDEV_VERSION MICRO_TO_STR(DRV_VERSION) +#else +#define CDEV_VERSION "0.3.10" +#endif + +#define CDEV_DEFAULT_NUM 4 +#define CDEV_MAX_NUM 8 + +#define CDEV_NAME_MAX_LEN 32 +#define CDEV_INVALID_ID (0xffffffff) + +struct cdev_statistics_s { + unsigned int recv_bytes; + unsigned int send_bytes; + unsigned int send_pkgs; + unsigned int recv_pkgs; + unsigned int send_failed_count; + unsigned int recv_failed_count; + unsigned int open_status; +}; + +struct cdev_dev { + struct miscdevice dev_struct; + struct cdev_statistics_s s; + char dev_name[CDEV_NAME_MAX_LEN]; + dev_t dev_id; + void *dev_data; + atomic_t open; + int type; +}; + +struct cdev_dev_set { + struct cdev_dev dev_list[CDEV_MAX_NUM]; + int dev_num; + unsigned int init_time; +}; + +int dev_num = CDEV_DEFAULT_NUM; /* the dev num want to create */ +int debug = DLOG_ERROR; /* debug switch */ +module_param(dev_num, int, 0640); +MODULE_PARM_DESC(dev_num, "cdev num you want"); +MODULE_PARM_DESC(debug, "Debug switch (0=close debug, 1=open debug)"); + +#define CDEV_LOG(level, fmt, args...) do {\ + if (debug >= (level)) {\ + netdev_info(0, "edma_cdev: %s, %d, " fmt "\n", \ + __func__, __LINE__, ## args);\ + } \ +} while (0) + +static int cdev_open(struct inode *inode, struct file *filp); +static int cdev_release(struct inode *inode, struct file *filp); +static unsigned int cdev_poll(struct file *file, poll_table *wait); +static ssize_t cdev_read(struct file *filp, char __user *data, size_t count, + loff_t *ppos); +static ssize_t cdev_write(struct file *filp, const char __user *data, + size_t count, loff_t *ppos); + +struct cdev_dev_set g_cdev_set; + +#define GET_PRIVATE_DATA(f) (((struct cdev_dev *)((f)->private_data))->dev_data) + +module_param_call(debug, &edma_param_set_debug, ¶m_get_int, &debug, 0644); + +static int cdev_param_get_statics(char *buf, const struct kernel_param *kp) +{ + int len = 0; + int i = 0; + __kernel_time_t running_time = 0; + + if (!buf) + return 0; + + GET_SYS_SECONDS(running_time); + running_time -= g_cdev_set.init_time; + len += sprintf(buf + len, + "============================CDEV_DRIVER_INFO=======================\n"); + len += sprintf(buf + len, "version :%s\n", CDEV_VERSION); + + len += sprintf(buf + len, "running_time :%luD %02lu:%02lu:%02lu\n", + running_time / (SECONDS_PER_DAY), + running_time % (SECONDS_PER_DAY) / SECONDS_PER_HOUR, + running_time % SECONDS_PER_HOUR / SECONDS_PER_MINUTE, + running_time % SECONDS_PER_MINUTE); + + for (i = 0; i < g_cdev_set.dev_num; i++) { + len += sprintf(buf + len, + "===================================================\n"); + len += sprintf(buf + len, "name :%s\n", + g_cdev_set.dev_list[i].dev_name); + len += + sprintf(buf + len, "dev_id :%08x\n", + g_cdev_set.dev_list[i].dev_id); + len += sprintf(buf + len, "type :%u\n", + g_cdev_set.dev_list[i].type); + len += sprintf(buf + len, "status :%s\n", + g_cdev_set.dev_list[i].s.open_status == + 1 ? "open" : "close"); + len += sprintf(buf + len, "send_pkgs :%u\n", + g_cdev_set.dev_list[i].s.send_pkgs); + len += + sprintf(buf + len, "send_bytes:%u\n", + g_cdev_set.dev_list[i].s.send_bytes); + len += sprintf(buf + len, "send_failed_count:%u\n", + g_cdev_set.dev_list[i].s.send_failed_count); + len += sprintf(buf + len, "recv_pkgs :%u\n", + g_cdev_set.dev_list[i].s.recv_pkgs); + len += sprintf(buf + len, "recv_bytes:%u\n", + g_cdev_set.dev_list[i].s.recv_bytes); + len += sprintf(buf + len, "recv_failed_count:%u\n", + g_cdev_set.dev_list[i].s.recv_failed_count); + } + + return len; +} +module_param_call(statistics, NULL, cdev_param_get_statics, &debug, 0444); +MODULE_PARM_DESC(statistics, "Statistics info of cdev driver,readonly"); + +const struct file_operations g_bma_cdev_fops = { + .owner = THIS_MODULE, + .open = cdev_open, + .release = cdev_release, + .poll = cdev_poll, + .read = cdev_read, + .write = cdev_write, +}; + +static int __init bma_cdev_init(void) +{ + int i = 0; + + int ret = 0; + int err_count = 0; + + if (!bma_intf_check_edma_supported()) + return -ENXIO; + + if (dev_num <= 0 || dev_num > CDEV_MAX_NUM) + return -EINVAL; + + memset(&g_cdev_set, 0, sizeof(struct cdev_dev_set)); + g_cdev_set.dev_num = dev_num; + + for (i = 0; i < dev_num; i++) { + struct cdev_dev *pdev = &g_cdev_set.dev_list[i]; + + sprintf(pdev->dev_name, "%s%d", CDEV_NAME_PREFIX, i); + pdev->dev_struct.name = pdev->dev_name; + pdev->dev_struct.minor = MISC_DYNAMIC_MINOR; + pdev->dev_struct.fops = &g_bma_cdev_fops; + + pdev->dev_id = CDEV_INVALID_ID; + + ret = misc_register(&pdev->dev_struct); + + if (ret) { + CDEV_LOG(DLOG_DEBUG, "misc_register failed %d", i); + err_count++; + continue; + } + + pdev->dev_id = MKDEV(MISC_MAJOR, pdev->dev_struct.minor); + + ret = bma_intf_register_type(TYPE_CDEV + i, 0, INTR_DISABLE, + &pdev->dev_data); + + if (ret) { + CDEV_LOG(DLOG_ERROR, + "cdev %d open failed ,result = %d", + i, ret); + misc_deregister(&pdev->dev_struct); + pdev->dev_id = CDEV_INVALID_ID; + err_count++; + continue; + } else { + pdev->type = TYPE_CDEV + i; + atomic_set(&pdev->open, 1); + } + + CDEV_LOG(DLOG_DEBUG, "%s id is %08x", pdev->dev_struct.name, + pdev->dev_id); + } + + if (err_count == dev_num) { + CDEV_LOG(DLOG_ERROR, "init cdev failed!"); + return -EFAULT; + } + GET_SYS_SECONDS(g_cdev_set.init_time); + return 0; +} + +static void __exit bma_cdev_exit(void) +{ + while (dev_num--) { + struct cdev_dev *pdev = &g_cdev_set.dev_list[dev_num]; + + if (pdev->dev_id != CDEV_INVALID_ID) { + if (pdev->dev_data && pdev->type != 0) + (void)bma_intf_unregister_type(&pdev->dev_data); + + (void)misc_deregister + (&g_cdev_set.dev_list[dev_num].dev_struct); + } + } +} + +int cdev_open(struct inode *inode_prt, struct file *filp) +{ + int i = 0; + struct cdev_dev *pdev = NULL; + + if (!inode_prt) + return -EFAULT; + if (!filp) + return -EFAULT; + + if (dev_num <= 0) { + CDEV_LOG(DLOG_ERROR, "dev_num error"); + return -EFAULT; + } + + for (i = 0; i < dev_num; i++) { + pdev = &g_cdev_set.dev_list[i]; + + if (pdev->dev_id == inode_prt->i_rdev) + break; + } + + if (i == dev_num) { + CDEV_LOG(DLOG_ERROR, "can not find dev id %08x", + inode_prt->i_rdev); + return -ENODEV; + } + /*each device can be opened only onece */ + if (atomic_dec_and_test(&pdev->open) == 0) { + CDEV_LOG(DLOG_ERROR, "%s is already opened", + pdev->dev_name); + atomic_inc(&pdev->open); + return -EBUSY; /* already opened */ + } + + filp->private_data = &g_cdev_set.dev_list[i]; + bma_intf_set_open_status(pdev->dev_data, DEV_OPEN); + ((struct cdev_dev *)filp->private_data)->s.open_status++; + + return 0; +} + +int cdev_release(struct inode *inode_prt, struct file *filp) +{ + struct cdev_dev *pdev = NULL; + + if (!filp) + return 0; + + pdev = (struct cdev_dev *)filp->private_data; + if (pdev) { + ((struct cdev_dev *)filp->private_data)->s.open_status--; + bma_intf_set_open_status(pdev->dev_data, DEV_CLOSE); + atomic_inc(&pdev->open); + filp->private_data = NULL; + } + + return 0; +} + +unsigned int cdev_poll(struct file *filp, poll_table *wait) +{ + unsigned int mask = 0; + wait_queue_head_t *queue_head = NULL; + + if (!filp) + return 0; + queue_head = (wait_queue_head_t *) + bma_cdev_get_wait_queue(GET_PRIVATE_DATA(filp)); + + if (!queue_head) + return 0; + + poll_wait(filp, queue_head, wait); + + if (bma_cdev_check_recv(GET_PRIVATE_DATA(filp))) + mask |= (POLLIN | POLLRDNORM); + + CDEV_LOG(DLOG_DEBUG, "poll return %08x", mask); + + return mask; +} + +ssize_t cdev_read(struct file *filp, char __user *data, size_t count, + loff_t *ppos) +{ + int ret = 0; + + CDEV_LOG(DLOG_DEBUG, "data is %p,count is %u", data, + (unsigned int)count); + + if (!data || count <= 0) + return -EFAULT; + + ret = bma_cdev_recv_msg(GET_PRIVATE_DATA(filp), data, count); + + if (ret > 0) { + ((struct cdev_dev *)filp->private_data)->s.recv_bytes += ret; + ((struct cdev_dev *)filp->private_data)->s.recv_pkgs++; + } else { + ((struct cdev_dev *)filp->private_data)->s.recv_failed_count++; + } + + return ret; +} + +ssize_t cdev_write(struct file *filp, const char __user *data, size_t count, + loff_t *ppos) +{ + int ret = 0; + + if (!data || count <= 0) + return -EFAULT; + + CDEV_LOG(DLOG_DEBUG, "data is %p,count is %u", data, + (unsigned int)count); + ret = bma_cdev_add_msg(GET_PRIVATE_DATA(filp), data, count); + + if (ret > 0) { + ((struct cdev_dev *)filp->private_data)->s.send_bytes += ret; + ((struct cdev_dev *)filp->private_data)->s.send_pkgs++; + } else { + ((struct cdev_dev *)filp->private_data)->s.send_failed_count++; + } + + return ret; +} + +MODULE_AUTHOR("HUAWEI TECHNOLOGIES CO., LTD."); +MODULE_DESCRIPTION("HUAWEI CDEV DRIVER"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(CDEV_VERSION); + +module_init(bma_cdev_init); +module_exit(bma_cdev_exit); diff --git a/drivers/net/ethernet/huawei/bma/cdev_veth_drv/Makefile b/drivers/net/ethernet/huawei/bma/cdev_veth_drv/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..b3e748d577ac39ad28e901ff2dcce136d68b4c0e --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/cdev_veth_drv/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_BMA) += cdev_veth_drv.o +cdev_veth_drv-y := virtual_cdev_eth_net.o \ No newline at end of file diff --git a/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.c b/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.c new file mode 100644 index 0000000000000000000000000000000000000000..6df11dc9c8f92bea2902ff0df4888a9f02a62492 --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.c @@ -0,0 +1,1864 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei iBMA driver. + * Copyright (c) 2019, Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "virtual_cdev_eth_net.h" + +static struct edma_eth_dev_s g_eth_edmaprivate; +static struct edma_packet_node_s g_edma_recv_packet_tmp = {0, NULL}; +static struct edma_cut_packet_node_s *g_edma_send_cut_packet; +static unsigned int g_last_token = TK_START_END; +static unsigned int g_device_opened = CDEV_CLOSED; +static unsigned int g_last_number; +static unsigned int g_peer_not_ready; +static unsigned int g_read_pos; +static unsigned int g_delay_ms; +static int g_write_count; + +static const int NO_SPACE_RETRY = 10; +static const int NO_SPACE_WAIT_MS = 2000; +static const int CUT_PKG_SLEEP_MS = 1; +static const int CUT_PKG_LIMIT_COUNT = 30; +static const int SEND_INT_PKG_COUNT = 50; + +static int cdev_open(struct inode *inode_ptr, struct file *filp); +static int cdev_release(struct inode *inode_ptr, struct file *filp); +static unsigned int cdev_poll(struct file *file, poll_table *wait); +static ssize_t cdev_read(struct file *filp, char __user *data, + size_t count, loff_t *ppos); +static ssize_t cdev_write(struct file *filp, const char __user *data, + size_t count, loff_t *ppos); + +#define IS_CDEV_IN_OPEN_STATE() (g_device_opened != CDEV_CLOSED) +#define SET_CDEV_OPEN_STATE(x) (g_device_opened = (x)) + +int debug = DLOG_ERROR; +module_param(debug, int, 0644); +MODULE_PARM_DESC(debug, "Debug switch (0=close debug, 1=open debug)"); + +#define GET_PRIVATE_DATA(f) (((struct cdev_dev_s *)((f)->private_data))->priv) + +const struct file_operations g_eth_edma_cdev_fops = { + .owner = THIS_MODULE, + .open = cdev_open, + .release = cdev_release, + .poll = cdev_poll, + .read = cdev_read, + .write = cdev_write, +}; + +void dump_global_info(void) +{ + struct edma_shmq_hd_s *pshmqhd_v = NULL; + + if (!debug) + return; + + LOG(DLOG_DEBUG, "\r\n=================VETH INFO=================\r\n"); + + pshmqhd_v = g_eth_edmaprivate.ptx_queue->pshmqhd_v; + LOG(DLOG_DEBUG, "TX head/tail: %u/%u ------------", + pshmqhd_v->head, pshmqhd_v->tail); + + pshmqhd_v = g_eth_edmaprivate.prx_queue->pshmqhd_v; + LOG(DLOG_DEBUG, "RX head/tail: %u/%u ------------", + pshmqhd_v->head, pshmqhd_v->tail); +} + +static inline int edma_is_queue_ready(struct edma_rxtx_q_s *prxtx_queue) +{ + if (!prxtx_queue) + return 0; + + return (prxtx_queue->pshmqhd_v->init == BSPVETH_SHMQUEUE_INITOK_V2); +} + +static inline void edma_veth_host_addr_init(void *priv) +{ + struct bma_priv_data_s *edma_priv = (struct bma_priv_data_s *)priv; + + g_eth_edmaprivate.pshmpool_p = + (u8 *)edma_priv->specific.veth.veth_swap_phy_addr; + g_eth_edmaprivate.pshmpool_v = + (u8 *)edma_priv->specific.veth.veth_swap_addr; + g_eth_edmaprivate.shmpoolsize = + (u32)edma_priv->specific.veth.veth_swap_len; +} + +void edma_veth_free_tx_resources(struct edma_rxtx_q_s *ptx_queue) +{ + struct edma_bd_info_s *pbdinfobase_v = NULL; + + if (!ptx_queue || !ptx_queue->pbdinfobase_v) + return; + + pbdinfobase_v = ptx_queue->pbdinfobase_v; + ptx_queue->pbdinfobase_v = NULL; + ptx_queue->pshmqhd_v = NULL; + + vfree(pbdinfobase_v); + + LOG(DLOG_DEBUG, "%s ok. count=%d", __func__, ptx_queue->count); +} + +void edma_veth_free_all_tx_resources(struct edma_eth_dev_s *edma_eth) +{ + if (edma_eth && edma_eth->ptx_queue) { + edma_veth_free_tx_resources(edma_eth->ptx_queue); + kfree(edma_eth->ptx_queue); + edma_eth->ptx_queue = NULL; + } +} + +int edma_veth_setup_tx_resources(struct edma_rxtx_q_s *ptx_queue) +{ + int size; + + ptx_queue->count = MAX_QUEUE_BDNUM; + size = sizeof(struct edma_bd_info_s) * ptx_queue->count; + + ptx_queue->pbdinfobase_v = vmalloc(size); + if (!ptx_queue->pbdinfobase_v) { + LOG(DLOG_ERROR, "Failed to alloc memory for the TX queue."); + return -ENOMEM; + } + + memset(ptx_queue->pbdinfobase_v, 0, size); + + /* round up to nearest 4K */ + size = sizeof(struct edma_dma_shmbd_s) * ptx_queue->count; + ptx_queue->size = ALIGN(size, ALIGN_MASK); + + ptx_queue->work_limit = BSPVETH_WORK_LIMIT; + + return 0; +} + +int edma_veth_setup_all_tx_resources(struct edma_eth_dev_s *edma_eth) +{ + int err; + u8 *shmq_head = NULL; + u8 *shmq_head_p = NULL; + struct edma_rxtx_q_s *tx_queue = NULL; + + tx_queue = (struct edma_rxtx_q_s *) + kmalloc(sizeof(struct edma_rxtx_q_s), GFP_KERNEL); + if (!tx_queue) { + LOG(DLOG_ERROR, "Failed to alloc TX queue."); + return -ENOMEM; + } + + memset(tx_queue, 0, sizeof(struct edma_rxtx_q_s)); + + shmq_head = edma_eth->pshmpool_v + (MAX_SHAREQUEUE_SIZE * 0); + shmq_head_p = edma_eth->pshmpool_p + (MAX_SHAREQUEUE_SIZE * 0); + + tx_queue->pshmqhd_v = (struct edma_shmq_hd_s *)shmq_head; + tx_queue->pshmqhd_p = shmq_head_p; + + tx_queue->pshmbdbase_v = (struct edma_dma_shmbd_s *) + (shmq_head + BSPVETH_SHMBDBASE_OFFSET); + tx_queue->pshmbdbase_p = shmq_head_p + BSPVETH_SHMBDBASE_OFFSET; + + tx_queue->pdmalbase_v = (struct edma_dmal_s *) + (shmq_head + SHMDMAL_OFFSET); + tx_queue->pdmalbase_p = (u8 *)(VETH_SHAREPOOL_BASE_INBMC + + (MAX_SHAREQUEUE_SIZE * 0) + SHMDMAL_OFFSET); + + memset(tx_queue->pdmalbase_v, 0, MAX_SHMDMAL_SIZE); + + err = edma_veth_setup_tx_resources(tx_queue); + if (err) { + kfree(tx_queue); + return err; + } + + edma_eth->ptx_queue = tx_queue; + + return 0; +} + +int edma_veth_setup_rx_resources(struct edma_rxtx_q_s *prx_queue) +{ + int size; + + prx_queue->count = MAX_QUEUE_BDNUM; + size = sizeof(struct edma_bd_info_s) * prx_queue->count; + + prx_queue->pbdinfobase_v = vmalloc(size); + if (!prx_queue->pbdinfobase_v) { + LOG(DLOG_ERROR, "Failed to alloc memory for the RX queue."); + return -ENOMEM; + } + + memset(prx_queue->pbdinfobase_v, 0, size); + + /* Round up to nearest 4K */ + size = sizeof(struct edma_dma_shmbd_s) * prx_queue->count; + prx_queue->size = ALIGN(size, ALIGN_MASK); + + prx_queue->work_limit = BSPVETH_WORK_LIMIT; + + return 0; +} + +int edma_veth_setup_all_rx_resources(struct edma_eth_dev_s *edma_eth) +{ + int err; + u8 *shmq_head = NULL; + u8 *shmq_head_p = NULL; + struct edma_rxtx_q_s *rx_queue = NULL; + + rx_queue = (struct edma_rxtx_q_s *) + kmalloc(sizeof(struct edma_rxtx_q_s), GFP_KERNEL); + if (!rx_queue) { + LOG(DLOG_ERROR, "Failed to alloc RX queue."); + return -ENOMEM; + } + + memset(rx_queue, 0, sizeof(struct edma_rxtx_q_s)); + + shmq_head = edma_eth->pshmpool_v + MAX_SHAREQUEUE_SIZE; + shmq_head_p = edma_eth->pshmpool_p + MAX_SHAREQUEUE_SIZE; + rx_queue->pshmqhd_v = (struct edma_shmq_hd_s *)shmq_head; + rx_queue->pshmqhd_p = shmq_head_p; + + rx_queue->pshmbdbase_v = (struct edma_dma_shmbd_s *)(shmq_head + + BSPVETH_SHMBDBASE_OFFSET); + rx_queue->pshmbdbase_p = shmq_head_p + BSPVETH_SHMBDBASE_OFFSET; + + /* DMA address list (only used in host). */ + rx_queue->pdmalbase_v = (struct edma_dmal_s *) + (shmq_head + SHMDMAL_OFFSET); + rx_queue->pdmalbase_p = (u8 *)(VETH_SHAREPOOL_BASE_INBMC + + MAX_SHAREQUEUE_SIZE + SHMDMAL_OFFSET); + memset(rx_queue->pdmalbase_v, 0, MAX_SHMDMAL_SIZE); + + err = edma_veth_setup_rx_resources(rx_queue); + if (err) { + kfree(rx_queue); + return err; + } + + edma_eth->prx_queue = rx_queue; + + return 0; +} + +void edma_veth_free_rx_resources(struct edma_rxtx_q_s *prx_queue) +{ + struct edma_bd_info_s *pbdinfobase_v = NULL; + + if (!prx_queue || !prx_queue->pbdinfobase_v) + return; + + pbdinfobase_v = prx_queue->pbdinfobase_v; + prx_queue->pbdinfobase_v = NULL; + prx_queue->pshmqhd_v = NULL; + + /* Free all the Rx ring pages */ + vfree(pbdinfobase_v); + + LOG(DLOG_DEBUG, "%s ok. count=%d", __func__, prx_queue->count); +} + +void edma_veth_free_all_rx_resources(struct edma_eth_dev_s *edma_eth) +{ + if (edma_eth && edma_eth->prx_queue) { + edma_veth_free_rx_resources(edma_eth->prx_queue); + kfree(edma_eth->prx_queue); + edma_eth->prx_queue = NULL; + } +} + +int edma_veth_setup_all_rxtx_queue(struct edma_eth_dev_s *edma_eth) +{ + void *buf = NULL; + unsigned int i; + unsigned int j; + + dma_addr_t dmaaddr; + struct edma_bd_info_s *pbdinfobase_v = NULL; + + struct edma_rxtx_q_s *ptx_queue = NULL; + struct edma_rxtx_q_s *prx_queue = NULL; + + struct bma_priv_data_s *priv = NULL; + struct device *dev = NULL; + + priv = (struct bma_priv_data_s *)edma_eth->edma_priv; + dev = &priv->specific.veth.pdev->dev; + + ptx_queue = edma_eth->ptx_queue; + prx_queue = edma_eth->prx_queue; + + edma_eth->pages_tx = 0; + + pbdinfobase_v = ptx_queue->pbdinfobase_v; + + for (i = 0; i < MAX_QUEUE_BDNUM; i++) { + buf = kmalloc(NODE_SIZE, GFP_KERNEL | GFP_DMA); + if (!buf) { + for (j = 0; j < i; j++) + kfree((void *)pbdinfobase_v[j].pdma_v); + + LOG(DLOG_ERROR, "Fail to alloc tx buf."); + return -ENOMEM; + } + + dmaaddr = dma_map_single(dev, buf, NODE_SIZE, DMA_TO_DEVICE); + if (dma_mapping_error(dev, dmaaddr)) { + LOG(DLOG_ERROR, "Failed to map tx DMA address."); + kfree(buf); + return -EIO; + } + + memset(buf, 0xFF, NODE_SIZE); + + pbdinfobase_v[i].pdma_v = (u8 *)(buf); + pbdinfobase_v[i].dma_p = dmaaddr; + pbdinfobase_v[i].len = NODE_SIZE; + } + + LOG(DLOG_DEBUG, "set tx done."); + + edma_eth->pages_rx = 0; + + pbdinfobase_v = prx_queue->pbdinfobase_v; + + for (i = 0; i < MAX_QUEUE_BDNUM; i++) { + buf = kmalloc(NODE_SIZE, GFP_KERNEL | GFP_DMA); + if (!buf) { + for (j = 0; j < i; j++) + kfree((void *)pbdinfobase_v[j].pdma_v); + + LOG(DLOG_ERROR, "Fail to alloc rx buf."); + return -ENOMEM; + } + + dmaaddr = dma_map_single(dev, buf, NODE_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, dmaaddr)) { + LOG(DLOG_ERROR, "Failed to map rx DMA address."); + kfree(buf); + return -EIO; + } + + memset(buf, 0xFF, NODE_SIZE); + + pbdinfobase_v[i].pdma_v = (u8 *)(buf); + pbdinfobase_v[i].dma_p = dmaaddr; + pbdinfobase_v[i].len = NODE_SIZE; + } + + LOG(DLOG_DEBUG, "set rx done."); + + return 0; +} + +void edma_veth_dump(void) +{ + struct edma_eth_dev_s *edma_eth = &g_eth_edmaprivate; + struct edma_rxtx_q_s *ptx_queue = edma_eth->ptx_queue; + struct edma_rxtx_q_s *prx_queue = edma_eth->prx_queue; + struct edma_shmq_hd_s *pshmq_head = NULL; + + if (!debug) + return; + + pshmq_head = prx_queue->pshmqhd_v; + + LOG(DLOG_DEBUG, + "RX host_head:%u, host_tail:%u, shm_head:%u, shm_tail:%u, ", + prx_queue->head, prx_queue->tail, + pshmq_head->head, pshmq_head->tail); + LOG(DLOG_DEBUG, "count: %u, total: %u, init: %u.", + pshmq_head->count, pshmq_head->total, pshmq_head->init); + + pshmq_head = ptx_queue->pshmqhd_v; + + LOG(DLOG_DEBUG, + "TX host_head:%u, host_tail:%u, shm_head:%u, shm_tail:%u, ", + ptx_queue->head, ptx_queue->tail, + pshmq_head->head, pshmq_head->tail); + LOG(DLOG_DEBUG, "count: %u, total: %u, init: %u.", + pshmq_head->count, pshmq_head->total, pshmq_head->init); +} + +int edma_veth_setup_resource(struct edma_eth_dev_s *edma_eth) +{ + int err; + + err = edma_veth_setup_all_rx_resources(edma_eth); + if (err < 0) + return err; + + err = edma_veth_setup_all_tx_resources(edma_eth); + if (err < 0) + goto FREE_RX; + + err = edma_veth_setup_all_rxtx_queue(edma_eth); + if (err < 0) + goto FREE_TX; + + return 0; + +FREE_TX: + edma_veth_free_all_tx_resources(edma_eth); +FREE_RX: + edma_veth_free_all_rx_resources(edma_eth); + + return err; +} + +int edma_veth_free_rxtx_queue(struct edma_eth_dev_s *edma_eth) +{ + int i; + struct edma_rxtx_q_s *ptx_queue = NULL; + struct edma_rxtx_q_s *prx_queue = NULL; + + struct bma_priv_data_s *priv = NULL; + struct device *dev = NULL; + + struct edma_bd_info_s *pbdinfobase_v = NULL; + + if (!edma_eth || !edma_eth->edma_priv) + return 0; + + priv = (struct bma_priv_data_s *)edma_eth->edma_priv; + dev = &priv->specific.veth.pdev->dev; + + ptx_queue = edma_eth->ptx_queue; + prx_queue = edma_eth->prx_queue; + + pbdinfobase_v = ptx_queue->pbdinfobase_v; + + for (i = 0; i < MAX_QUEUE_BDNUM; i++) { + dma_unmap_single(dev, pbdinfobase_v[i].dma_p, + NODE_SIZE, DMA_TO_DEVICE); + kfree(pbdinfobase_v[i].pdma_v); + } + + pbdinfobase_v = prx_queue->pbdinfobase_v; + + for (i = 0; i < MAX_QUEUE_BDNUM; i++) { + dma_unmap_single(dev, pbdinfobase_v[i].dma_p, + NODE_SIZE, DMA_FROM_DEVICE); + kfree(pbdinfobase_v[i].pdma_v); + } + + return 0; +} + +void edma_veth_free_resource(struct edma_eth_dev_s *edma_eth) +{ + edma_veth_free_rxtx_queue(edma_eth); + LOG(DLOG_DEBUG, "edma_veth_free_rxtx_queue done."); + + edma_veth_free_all_rx_resources(edma_eth); + LOG(DLOG_DEBUG, "edma_veth_free_all_rx_resources done."); + + edma_veth_free_all_tx_resources(edma_eth); + LOG(DLOG_DEBUG, "edma_veth_free_all_tx_resources done."); +} + +int edma_veth_send_one_pkt(struct edma_cut_packet_node_s *cut_packet_node) +{ + u32 head, tail, i; + struct edma_bd_info_s *pbdinfo_v = NULL; + struct edma_rxtx_q_s *ptx_queue = g_eth_edmaprivate.ptx_queue; + struct bma_priv_data_s *priv = NULL; + struct device *dev = NULL; + + if (!cut_packet_node || !ptx_queue || !ptx_queue->pshmbdbase_v) { + LOG(DLOG_ERROR, "Invalid packet node."); + return -EFAULT; + } + + priv = (struct bma_priv_data_s *)(g_eth_edmaprivate.edma_priv); + dev = &priv->specific.veth.pdev->dev; + + if (!bma_intf_is_link_ok()) { + LOG(DLOG_ERROR, "EDMA link is not ready."); + return -EIO; + } + + for (i = 0; i < NO_SPACE_RETRY; i++) { + head = ptx_queue->head; + tail = ptx_queue->tail; + + LOG(DLOG_DEBUG, "TX queue, before: head/tail: %u/%u", head, tail); + + if (JUDGE_RING_QUEUE_SPACE(head, tail, 1)) + break; + + if (i == NO_SPACE_RETRY - 1) { + LOG(DLOG_ERROR, "EDMA queue has no space."); + return -EBUSY; + } + + tasklet_hi_schedule(&g_eth_edmaprivate.dma_task); + msleep(NO_SPACE_WAIT_MS); + } + + ptx_queue->head = (head + 1) & BSPVETH_POINT_MASK; + + pbdinfo_v = ptx_queue->pbdinfobase_v + head; + + pbdinfo_v->len = NODE_TO_PACKET_SIZE(cut_packet_node); + (void)memcpy(pbdinfo_v->pdma_v, cut_packet_node, pbdinfo_v->len); + + /* Force sync data from CPU to device. */ + dma_sync_single_for_device(dev, pbdinfo_v->dma_p, + pbdinfo_v->len, DMA_TO_DEVICE); + + LOG(DLOG_DEBUG, "TX queue, after: head/tail: %u -> %u\n", + ptx_queue->head, ptx_queue->tail); + + return 0; +} + +static inline unsigned int edma_veth_get_ring_buf_count(unsigned int head, + unsigned int tail, + unsigned int size) +{ + return (tail + size - head) % size; +} + +static inline void edma_veth_flush_ring_node(struct edma_packet_node_s *node, + unsigned int ring_len) +{ + unsigned int i; + + for (i = 0; i < ring_len; i++) { + kfree(node[i].packet); + node[i].packet = NULL; + } +} + +static int get_peer_queue_stress(struct edma_rxtx_q_s *queue) +{ + int stress; + + if (++g_write_count < RL_MAX_PACKET) { + /* not enough packets, use the last delay. */ + return -1; + } + + g_write_count = 0; + + /* check peer rx queue stress. */ + if (!queue || queue->pshmqhd_v->total == 0) { + /* no rate limit allowed. */ + return 0; + } + + stress = (int)((queue->pshmqhd_v->count * STRESS_FACTOR) / + queue->pshmqhd_v->total); + + return stress; +} + +static void do_queue_rate_limit(struct edma_rxtx_q_s *queue) +{ + unsigned long delay_jiffies; + int stress = get_peer_queue_stress(queue); + + LOG(DLOG_DEBUG, "count: %u, total: %u, stress: %d", + queue->pshmqhd_v->count, queue->pshmqhd_v->total, stress); + + if (stress >= RL_STRESS_HIGH) + g_delay_ms = RL_DELAY_MS_HIGH; + else if (stress >= RL_STRESS_LOW) + g_delay_ms = RL_DELAY_MS_LOW; + else if (stress >= 0) + g_delay_ms = 0; + + if (g_delay_ms) { + delay_jiffies = msecs_to_jiffies(g_delay_ms); + schedule_timeout_killable(delay_jiffies); + } +} + +static int edma_veth_cut_tx_packet_send(struct edma_eth_dev_s *eth_dev, + const char __user *data, size_t len) +{ + int ret = 0; + struct edma_cut_packet_node_s *tx_cut_pkt = g_edma_send_cut_packet; + unsigned int length = len; + unsigned int already_read_len = 0; + unsigned int count = 0; + + if (!tx_cut_pkt) + return -EFAULT; + + do_queue_rate_limit(eth_dev->ptx_queue); + + while (length > 0) { + LOG(DLOG_DEBUG, "length: %u/%lu", length, len); + + if (length > BSPPACKET_MTU_MAX) { + /* fragment. */ + if (copy_from_user(tx_cut_pkt->cut_packet, + data + already_read_len, + BSPPACKET_MTU_MAX)) { + LOG(DLOG_DEBUG, "Failed to copy user data."); + return -EFAULT; + } + tx_cut_pkt->number = count++; + length = length - BSPPACKET_MTU_MAX; + + if (tx_cut_pkt->number == 0) { + tx_cut_pkt->token = TK_START_PACKET; + tx_cut_pkt->cut_packet_len = BSPPACKET_MTU_MAX; + } else { + tx_cut_pkt->token = TK_MIDDLE_PACKET; + tx_cut_pkt->cut_packet_len = BSPPACKET_MTU_MAX; + } + } else { + if (copy_from_user(tx_cut_pkt->cut_packet, + data + already_read_len, length)) { + LOG(DLOG_DEBUG, "Failed to copy user data."); + return -EFAULT; + } + tx_cut_pkt->number = count++; + if (len > BSPPACKET_MTU_MAX) + tx_cut_pkt->token = TK_END_PACKET; + else + tx_cut_pkt->token = TK_START_END; + + tx_cut_pkt->cut_packet_len = length; + length = 0; + } + + already_read_len += tx_cut_pkt->cut_packet_len; + ret = edma_veth_send_one_pkt(tx_cut_pkt); + if (ret < 0) { + LOG(DLOG_DEBUG, "edma_veth_send_one_pkt failed, %d.", + ret); + return ret; + } + if (length > 0 && count > CUT_PKG_LIMIT_COUNT) { + LOG(DLOG_DEBUG, "middle pkg: %d, need sleep.", + count); + msleep(CUT_PKG_SLEEP_MS); + /* send a interrupt to BMC for recv package */ + if (count % SEND_INT_PKG_COUNT == 0) + tasklet_hi_schedule(&g_eth_edmaprivate.dma_task); + } + } + + LOG(DLOG_DEBUG, "send done, length: %u", length); + + return 0; +} + +static int edma_veth_copy_full_packet(struct edma_eth_dev_s *eth_dev, + u8 *packet, u32 len) +{ + unsigned int count = 0; + unsigned long flags = 0; + u8 *ptr = NULL; + + LOG(DLOG_DEBUG, "Recv full packet, len %u.", len); + + ptr = kmalloc(len, GFP_ATOMIC); + if (ptr) { + /* lock the queue. */ + spin_lock_irqsave(ð_dev->rx_queue_lock, flags); + + count = edma_veth_get_ring_buf_count(eth_dev->rx_packet_head, + eth_dev->rx_packet_tail, + MAX_RXTX_PACKET_LEN); + if (count >= (MAX_RXTX_PACKET_LEN - 1)) { + LOG(DLOG_DEBUG, "The rx queue is full."); + spin_unlock_irqrestore(ð_dev->rx_queue_lock, flags); + kfree(ptr); + return -EBUSY; + } + + (void)memcpy(ptr, packet, len); + eth_dev->rx_packet[eth_dev->rx_packet_tail].packet = ptr; + eth_dev->rx_packet[eth_dev->rx_packet_tail].len = len; + eth_dev->rx_packet_tail = (eth_dev->rx_packet_tail + 1) % + MAX_RXTX_PACKET_LEN; + + spin_unlock_irqrestore(ð_dev->rx_queue_lock, flags); + + return 0; + } + + return -ENOMEM; +} + +static int edma_veth_cut_rx_packet_recv(struct edma_eth_dev_s *eth_dev, + u8 *packet, u32 len) +{ + int ret = 0; + struct edma_cut_packet_node_s *node = + (struct edma_cut_packet_node_s *)packet; + struct edma_packet_node_s *g_packet = &g_edma_recv_packet_tmp; + unsigned int copy_back = 0; + + if (node->cut_packet_len && len > NODE_TO_PACKET_SIZE(node)) + len = NODE_TO_PACKET_SIZE(node); + + LOG(DLOG_DEBUG, + "cut_packet_len: %u, token: %u/%u, number: %u, real length: %u.", + node->cut_packet_len, node->token, g_last_token, node->number, len); + + if (node->cut_packet_len > BSPPACKET_MTU_MAX || + ((g_packet->len + node->cut_packet_len) > MAX_PACKET_LEN)) { + LOG(DLOG_ERROR, "This packet is too long, packet length %u/%u", + node->cut_packet_len, g_packet->len); + ret = -EINVAL; + goto fail; + } + + if (g_last_token == TK_START_END || g_last_token == TK_END_PACKET) { + /* This should be a new packet. */ + if (node->token == TK_START_PACKET || + node->token == TK_START_END) { + (void)memcpy(g_packet->packet, node->cut_packet, + node->cut_packet_len); + g_packet->len = node->cut_packet_len; + + if (node->token == TK_START_END) { + /* A full packet, increase tail. */ + copy_back = 1; + } else { + LOG(DLOG_DEBUG, + "Add middle packet with length %u", + node->cut_packet_len); + } + } else { + LOG(DLOG_ERROR, "The rx packet is out-of-order"); + LOG(DLOG_ERROR, "token: %d, len: %u, number: %u", + node->token, node->cut_packet_len, node->number); + ret = -EINVAL; + goto fail; + } + } else { + /* Fragments, last token: TK_MIDDLE_PACKET/TK_START_PACKET. */ + if (g_last_number != (node->number - 1)) { + LOG(DLOG_ERROR, "The number is not correct (%u/%u)", + g_last_number, node->number); + ret = -EINVAL; + goto fail; + } + + if (node->token == TK_MIDDLE_PACKET) { + (void)memcpy(g_packet->packet + g_packet->len, + node->cut_packet, node->cut_packet_len); + g_packet->len = g_packet->len + node->cut_packet_len; + LOG(DLOG_DEBUG, "Add middle packet with length %u", + node->cut_packet_len); + } else if (node->token == TK_END_PACKET) { + (void)memcpy(g_packet->packet + g_packet->len, + node->cut_packet, node->cut_packet_len); + g_packet->len = g_packet->len + node->cut_packet_len; + copy_back = 1; + } else { + LOG(DLOG_ERROR, "Unexpected token: %u", node->token); + ret = -EINVAL; + goto fail; + } + } + + if (copy_back) { + ret = edma_veth_copy_full_packet(eth_dev, g_packet->packet, + g_packet->len); + g_packet->len = 0; + } + + g_last_token = node->token; + g_last_number = node->number; + + LOG(DLOG_DEBUG, "rx_packet_head:%u, rx_packet_tail: %u", + eth_dev->rx_packet_head, eth_dev->rx_packet_tail); + + return copy_back; + +fail: + g_last_token = TK_START_END; + g_last_number = 0; + memset(g_packet->packet, 0, MAX_PACKET_LEN); + g_packet->len = 0; + + return ret; +} + +int edma_veth_recv_pkt(struct edma_rxtx_q_s *prx_queue, + struct bma_priv_data_s *priv) +{ + int ret = BSP_OK; + + u32 i, work_limit; + u32 tail, head; + + struct edma_bd_info_s *prx_bdinfo_v = NULL; + struct device *dev = NULL; + + u8 *packet = NULL; + u32 len; + u32 off; + + wait_queue_head_t *queue_head = NULL; + u8 do_wake_up = 0; + + if (!priv) + return BSP_OK; + + dev = &priv->specific.veth.pdev->dev; + + work_limit = prx_queue->work_limit; + tail = prx_queue->tail; + + for (i = 0; i < work_limit; i++) { + head = prx_queue->head; + + if (tail == head) + break; + + LOG(DLOG_DEBUG, "===== enter ===== [%u/%u] ======", head, tail); + prx_bdinfo_v = prx_queue->pbdinfobase_v + tail; + + len = prx_bdinfo_v->len; + off = prx_bdinfo_v->off; + packet = prx_bdinfo_v->pdma_v; + + LOG(DLOG_DEBUG, "off:%u, len: %u.", off, len); + + if (!IS_CDEV_IN_OPEN_STATE()) { + LOG(DLOG_DEBUG, + "Local char device is not opened, drop packet"); + tail = BD_QUEUE_MASK(tail + 1); + continue; + } + + dma_sync_single_for_cpu(dev, prx_bdinfo_v->dma_p, + len + off, DMA_FROM_DEVICE); + + if (off) + packet += off; + + ret = edma_veth_cut_rx_packet_recv(&g_eth_edmaprivate, + packet, len); + if (ret < 0) + LOG(DLOG_DEBUG, "recv rx pkt fail, ret: %d", ret); + else if (ret != 0) + do_wake_up = 1; + + tail = BD_QUEUE_MASK(tail + 1); + } + + prx_queue->tail = tail; + head = prx_queue->head; + + if (tail != head) { + /* check if more processing is needed. */ + return BSP_ERR_AGAIN; + } else if (do_wake_up) { + queue_head = (wait_queue_head_t *)bma_cdev_get_wait_queue(priv); + /* finish reciving pkt, wake up the waiting process. */ + if (queue_head && waitqueue_active(queue_head)) { + LOG(DLOG_DEBUG, "Wake up queue."); + wake_up(queue_head); + } + } + + return BSP_OK; +} + +void edma_task_do_packet_recv(unsigned long data) +{ + int ret = BSP_OK; + struct edma_rxtx_q_s *prx_queue = NULL; + struct bma_priv_data_s *priv = NULL; + struct tasklet_struct *t = (struct tasklet_struct *)data; + + priv = (struct bma_priv_data_s *)g_eth_edmaprivate.edma_priv; + prx_queue = g_eth_edmaprivate.prx_queue; + + if (prx_queue) { + g_eth_edmaprivate.run_skb_RX_task++; + + ret = edma_veth_recv_pkt(prx_queue, priv); + } + + if (ret == BSP_ERR_AGAIN) + tasklet_hi_schedule(t); +} + +static inline void edma_veth_reset_dma(int type) +{ + bma_intf_reset_dma(GET_DMA_DIRECTION(type)); +} + +int __dmacmp_err_deal_2(struct edma_rxtx_q_s *prxtx_queue, u32 type) +{ + prxtx_queue->dmacmperr = 0; + prxtx_queue->start_dma = 0; + + (void)edma_veth_reset_dma(type); + + if (type == BSPVETH_RX) { + LOG(DLOG_DEBUG, + "bmc->host dma time out, dma count:%d, work_limit:%d\n", + prxtx_queue->dmal_cnt, + prxtx_queue->work_limit); + + prxtx_queue->s.dma_failed++; + } else { + LOG(DLOG_DEBUG, + "host->bmc dma time out, dma count:%d, work_limit:%d\n", + prxtx_queue->dmal_cnt, + prxtx_queue->work_limit); + + prxtx_queue->s.dma_failed++; + } + + if (prxtx_queue->dmal_cnt > 1) + prxtx_queue->work_limit = (prxtx_queue->dmal_cnt >> 1); + + prxtx_queue->dma_overtime++; + if (prxtx_queue->dma_overtime > BSPVETH_MAX_QUE_DEEP) + return BSPVETH_DMA_BUSY; + + return BSP_OK; +} + +int edma_veth_check_dma_status(struct edma_rxtx_q_s *prxtx_queue, u32 type) +{ + int i = 0; + enum dma_direction_e dir = GET_DMA_DIRECTION(type); + + for (i = 0; i < BSPVETH_CHECK_DMA_STATUS_TIMES; i++) { + if (bma_intf_check_dma_status(dir) == BSPVETH_DMA_OK) + return BSP_OK; + + cpu_relax(); + + if (i > DMA_STATUS_CHECK_DELAY_LIMIT) + udelay(DMA_STATUS_CHECK_DELAY_MS); + } + + prxtx_queue->s.dma_busy++; + prxtx_queue->dmacmperr++; + + return BSPVETH_DMA_BUSY; +} + +int __check_dmacmp_H_2(struct edma_rxtx_q_s *prxtx_queue, u32 type) +{ + u16 start_dma; + u16 dmacmperr; + u32 cnt = 0; + u32 len = 0; + u32 host_head = 0; + u32 host_tail = 0; + u32 shm_head = 0; + u32 shm_tail = 0; + s32 ret; + struct edma_shmq_hd_s *pshmq_head = NULL; + + if (!prxtx_queue || !prxtx_queue->pshmqhd_v) + return BSP_ERR_NULL_POINTER; + + start_dma = prxtx_queue->start_dma; + if (!start_dma) + return BSP_OK; + + pshmq_head = prxtx_queue->pshmqhd_v; + dmacmperr = prxtx_queue->dmacmperr; + + if (dmacmperr > BSPVETH_WORK_LIMIT / DMACMP_ERR_FACTOR) + return __dmacmp_err_deal_2(prxtx_queue, type); + + ret = edma_veth_check_dma_status(prxtx_queue, type); + if (ret != BSP_OK) + return ret; + + prxtx_queue->start_dma = 0; + prxtx_queue->dma_overtime = 0; + + if (type == BSPVETH_RX) { + cnt = prxtx_queue->dmal_cnt; + len = prxtx_queue->dmal_byte; + + host_head = prxtx_queue->head; + shm_tail = pshmq_head->tail; + + pshmq_head->tail = BD_QUEUE_MASK(shm_tail + cnt); + prxtx_queue->head = BD_QUEUE_MASK(host_head + cnt); + + LOG(DLOG_DEBUG, "RX:host_head:%u, host_tail:%u, ", + prxtx_queue->head, prxtx_queue->tail); + + LOG(DLOG_DEBUG, "shm_head:%u, shm_tail:%u, inc: %u.", + pshmq_head->head, pshmq_head->tail, cnt); + + prxtx_queue->s.dmapkt += cnt; + prxtx_queue->s.dmapktbyte += len; + } else { + cnt = prxtx_queue->dmal_cnt; + len = prxtx_queue->dmal_byte; + + host_tail = prxtx_queue->tail; + shm_head = pshmq_head->head; + + prxtx_queue->tail = BD_QUEUE_MASK(host_tail + cnt); + pshmq_head->head = BD_QUEUE_MASK(shm_head + cnt); + + LOG(DLOG_DEBUG, "TX:host_head:%u, host_tail:%u, ", + prxtx_queue->head, prxtx_queue->tail); + + LOG(DLOG_DEBUG, "shm_head:%u, shm_tail:%u, inc: %u.", + pshmq_head->head, pshmq_head->tail, cnt); + + prxtx_queue->s.dmapkt += cnt; + prxtx_queue->s.dmapktbyte += len; + } + + tasklet_hi_schedule(&g_eth_edmaprivate.skb_task); + + (void)bma_intf_int_to_bmc(g_eth_edmaprivate.edma_priv); + + g_eth_edmaprivate.tobmc_int++; + + return BSP_OK; +} + +int __checkspace_H_2(struct edma_rxtx_q_s *prxtx_queue, u32 type, u32 *pcnt) +{ + u32 host_head, host_tail; + u32 shm_head, shm_tail; + u32 shm_cnt, host_cnt, cnt_tmp, cnt; + struct edma_shmq_hd_s *pshmq_head = NULL; + + if (!prxtx_queue || !prxtx_queue->pshmqhd_v) + return -EFAULT; + + pshmq_head = prxtx_queue->pshmqhd_v; + + host_head = prxtx_queue->head; + host_tail = prxtx_queue->tail; + shm_head = pshmq_head->head; + shm_tail = pshmq_head->tail; + + LOG(DLOG_DEBUG, "host_head:%u, host_tail:%u, shm_head:%u, shm_tail:%u.", + host_head, host_tail, shm_head, shm_tail); + + switch (type) { + case BSPVETH_RX: + if (shm_head == shm_tail) { + prxtx_queue->s.shm_empty++; + return BSP_ERR_NOT_TO_HANDLE; + } + + if (!JUDGE_RING_QUEUE_SPACE(host_head, host_tail, 1)) + return -EFAULT; + + shm_cnt = GET_BD_RING_QUEUE_COUNT(shm_head, shm_tail); + cnt_tmp = min(shm_cnt, prxtx_queue->work_limit); + + host_cnt = GET_BD_RING_QUEUE_SPACE(host_tail, host_head); + cnt = min(cnt_tmp, host_cnt); + + LOG(DLOG_DEBUG, + "RX, host_cnt: %u, shm_cnt: %u, cnt_tmp: %u, cnt: %u", + host_cnt, shm_cnt, cnt_tmp, cnt); + + break; + + case BSPVETH_TX: + if (host_tail == host_head) { + prxtx_queue->s.q_empty++; + return BSP_ERR_NOT_TO_HANDLE; + } + + host_cnt = GET_BD_RING_QUEUE_COUNT(host_head, host_tail); + cnt_tmp = min(host_cnt, prxtx_queue->work_limit); + + shm_cnt = GET_BD_RING_QUEUE_SPACE(shm_head, shm_tail); + cnt = min(cnt_tmp, shm_cnt); + + LOG(DLOG_DEBUG, + "TX, host_cnt: %u, shm_cnt: %u, cnt_tmp: %u, cnt: %u", + host_cnt, shm_cnt, cnt_tmp, cnt); + + break; + + default: + prxtx_queue->s.type_err++; + return -EFAULT; + } + + if (cnt > ((BSPVETH_DMABURST_MAX * DMABURST_FACTOR) / + (DMABURST_FACTOR + 1))) + prxtx_queue->s.dma_burst++; + + *pcnt = cnt; + + return BSP_OK; +} + +int __make_dmalistbd_h2b_H_2(struct edma_rxtx_q_s *prxtx_queue, u32 cnt) +{ + u32 i = 0; + u32 len = 0; + u32 off = 0; + struct edma_dmal_s *pdmalbase_v = NULL; + struct edma_shmq_hd_s *pshmq_head = NULL; + struct edma_bd_info_s *pbdinfobase_v = NULL; + struct edma_dma_shmbd_s *pshmbdbase_v = NULL; + + unsigned long addr; + + u32 host_tail; + u32 shm_head; + + if (!prxtx_queue) + return -EFAULT; + + if (cnt == 0) + return 0; + + pdmalbase_v = prxtx_queue->pdmalbase_v; + pbdinfobase_v = prxtx_queue->pbdinfobase_v; + pshmbdbase_v = prxtx_queue->pshmbdbase_v; + + pshmq_head = prxtx_queue->pshmqhd_v; + + host_tail = prxtx_queue->tail; + shm_head = pshmq_head->head; + + for (i = 0; i < cnt; i++) { + LOG(DLOG_DEBUG, "TX DMA, HOST: %u -> BMC: %u", + host_tail, shm_head); + + pdmalbase_v[i].chl = 0x1; + + addr = EDMA_ADDR_ALIGNED(pbdinfobase_v[host_tail].dma_p); + off = EDMA_ADDR_OFFSET(addr); + + /* src: veth_send_one_pkt. */ + pdmalbase_v[i].slow = lower_32_bits(addr); + pdmalbase_v[i].shi = upper_32_bits(addr); + + /* dst: bmc dma, in shared memory. */ + pdmalbase_v[i].dlow = + lower_32_bits(pshmbdbase_v[shm_head].dma_p); + pdmalbase_v[i].dhi = 0; + + /* len: len + offset caused by alignment */ + pdmalbase_v[i].len = pbdinfobase_v[host_tail].len + off; + + LOG(DLOG_DEBUG, + "TX DMA %08x%08x -> %08x%08x, off: %u, len: %u.", + pdmalbase_v[i].shi, pdmalbase_v[i].slow, + pdmalbase_v[i].dhi, pdmalbase_v[i].dlow, + off, pbdinfobase_v[host_tail].len); + + pshmbdbase_v[shm_head].len = pbdinfobase_v[host_tail].len; + pshmbdbase_v[shm_head].off = off; + + len += pdmalbase_v[i].len; + + /* ready for the next round. */ + host_tail = BD_QUEUE_MASK(host_tail + 1); + shm_head = BD_QUEUE_MASK(shm_head + 1); + } + + pdmalbase_v[i - 1].chl = 0x9; + + pdmalbase_v[i].chl = 0x7; + pdmalbase_v[i].len = 0x0; + pdmalbase_v[i].slow = lower_32_bits((u64)prxtx_queue->pdmalbase_p); + pdmalbase_v[i].shi = upper_32_bits((u64)prxtx_queue->pdmalbase_p); + pdmalbase_v[i].dlow = 0; + pdmalbase_v[i].dhi = 0; + + prxtx_queue->dmal_cnt = cnt; + prxtx_queue->dmal_byte = len; + + return 0; +} + +int __make_dmalistbd_b2h_H_2(struct edma_rxtx_q_s *prxtx_queue, u32 cnt) +{ + u32 i; + u32 len = 0; + + struct edma_dmal_s *pdmalbase_v = NULL; + struct edma_shmq_hd_s *pshmq_head = NULL; + struct edma_bd_info_s *pbdinfobase_v = NULL; + struct edma_dma_shmbd_s *pshmbdbase_v = NULL; + + u32 host_head; + u32 shm_tail; + + if (!prxtx_queue) + return -EFAULT; + + if (cnt == 0) + return -EFAULT; + + pdmalbase_v = prxtx_queue->pdmalbase_v; + pbdinfobase_v = prxtx_queue->pbdinfobase_v; + pshmbdbase_v = prxtx_queue->pshmbdbase_v; + + pshmq_head = prxtx_queue->pshmqhd_v; + + host_head = prxtx_queue->head; + shm_tail = pshmq_head->tail; + + for (i = 0; i < cnt; i++) { + LOG(DLOG_DEBUG, "RX DMA, BMC: %u -> HOST: %u", + shm_tail, host_head); + + pbdinfobase_v[host_head].off = pshmbdbase_v[shm_tail].off; + pbdinfobase_v[host_head].len = pshmbdbase_v[shm_tail].len; + + pdmalbase_v[i].chl = 0x1; + + /* src: bmc set in shared memory. */ + pdmalbase_v[i].slow = + lower_32_bits(pshmbdbase_v[shm_tail].dma_p); + pdmalbase_v[i].shi = 0; + + /* dst: edma_veth_setup_all_rxtx_queue. */ + pdmalbase_v[i].dlow = + lower_32_bits(pbdinfobase_v[host_head].dma_p); + pdmalbase_v[i].dhi = + upper_32_bits(pbdinfobase_v[host_head].dma_p); + + pdmalbase_v[i].len = pshmbdbase_v[shm_tail].len + + pshmbdbase_v[shm_tail].off; + + LOG(DLOG_DEBUG, + "RX DMA %08x%08x -> %08x%08x, off: %u, len: %u, total: %u.", + pdmalbase_v[i].shi, pdmalbase_v[i].slow, + pdmalbase_v[i].dhi, pdmalbase_v[i].dlow, + pshmbdbase_v[shm_tail].off, pshmbdbase_v[shm_tail].len, + pdmalbase_v[i].len); + + len += pdmalbase_v[i].len; + + /* ready for the next round. */ + host_head = BD_QUEUE_MASK(host_head + 1); + shm_tail = BD_QUEUE_MASK(shm_tail + 1); + } + + pdmalbase_v[i - 1].chl = 0x9; + + pdmalbase_v[i].chl = 0x7; + pdmalbase_v[i].len = 0x0; + pdmalbase_v[i].slow = lower_32_bits((u64)prxtx_queue->pdmalbase_p); + pdmalbase_v[i].shi = upper_32_bits((u64)prxtx_queue->pdmalbase_p); + pdmalbase_v[i].dlow = 0; + pdmalbase_v[i].dhi = 0; + + prxtx_queue->dmal_cnt = cnt; + prxtx_queue->dmal_byte = len; + + return 0; +} + +int __start_dmalist_H_2(struct edma_rxtx_q_s *prxtx_queue, u32 type, u32 cnt) +{ + int ret = BSP_OK; + struct bma_dma_transfer_s dma_transfer = { 0 }; + struct edma_shmq_hd_s *pshmq_head = NULL; + + if (!prxtx_queue) + return -1; + + pshmq_head = prxtx_queue->pshmqhd_v; + + LOG(DLOG_DEBUG, "before -> %u/%u/%u/%u.", + prxtx_queue->head, prxtx_queue->tail, + pshmq_head->head, pshmq_head->tail); + + if (type == BSPVETH_RX) { + dma_transfer.dir = BMC_TO_HOST; + ret = __make_dmalistbd_b2h_H_2(prxtx_queue, cnt); + } else { + dma_transfer.dir = HOST_TO_BMC; + ret = __make_dmalistbd_h2b_H_2(prxtx_queue, cnt); + } + + if (ret < 0) + return ret; + + dma_transfer.type = DMA_LIST; + dma_transfer.transfer.list.dma_addr = + (dma_addr_t)prxtx_queue->pdmalbase_p; + + ret = bma_intf_start_dma(g_eth_edmaprivate.edma_priv, &dma_transfer); + LOG(DLOG_DEBUG, "after -> %u/%u/%u/%u, ret: %d", + prxtx_queue->head, prxtx_queue->tail, + pshmq_head->head, pshmq_head->tail, + ret); + + if (ret < 0) + return ret; + + prxtx_queue->start_dma = 1; + + return BSP_OK; +} + +int check_dma_queue_fault_2(struct edma_rxtx_q_s *prxtx_queue, + u32 type, u32 *pcnt) +{ + int ret; + u32 cnt = 0; + + if (prxtx_queue->dma_overtime > BSPVETH_MAX_QUE_DEEP) + return -EFAULT; + + ret = __check_dmacmp_H_2(prxtx_queue, type); + if (ret != BSP_OK) + return -EFAULT; + + ret = __checkspace_H_2(prxtx_queue, type, &cnt); + if (ret != BSP_OK) + return -EFAULT; + + if (CHECK_DMA_RXQ_FAULT(prxtx_queue, type, cnt)) { + udelay(DMA_RXQ_FAULT_DELAY); + + prxtx_queue->dmal_cnt--; + + return -EFAULT; + } + + *pcnt = cnt; + + return BSP_OK; +} + +int __dma_rxtx_H_2(struct edma_rxtx_q_s *prxtx_queue, u32 type) +{ + int ret; + u32 cnt = 0; + + if (!prxtx_queue || !prxtx_queue->pshmqhd_v) + return -EFAULT; + + if (CHECK_DMA_QUEUE_EMPTY(type, prxtx_queue)) { + LOG(DLOG_DEBUG, "Queue (type: %u) is empty.", type); + return BSP_OK; + } + + ret = check_dma_queue_fault_2(prxtx_queue, type, &cnt); + if (ret != BSP_OK) { + LOG(DLOG_DEBUG, "check_dma_queue_fault_2 (ret: %d).", ret); + return -EFAULT; + } + + if (cnt == 0) + return BSP_OK; + + ret = __start_dmalist_H_2(prxtx_queue, type, cnt); + if (ret != BSP_OK) { + LOG(DLOG_DEBUG, "__start_dmalist_H_2 returns %d", ret); + return -EFAULT; + } + + if (cnt <= DMA_QUEUE_FAULT_LIMIT) { + ret = __check_dmacmp_H_2(prxtx_queue, type); + if (ret != BSP_OK) { + LOG(DLOG_DEBUG, "__check_dmacmp_H_2 returns %d", ret); + return -EFAULT; + } + } + + return BSP_OK; +} + +inline int veth_dma_task_H_2(u32 type) +{ + struct edma_rxtx_q_s *prxtx_queue = NULL; + + if (type == BSPVETH_RX) { + g_eth_edmaprivate.run_dma_RX_task++; + prxtx_queue = g_eth_edmaprivate.prx_queue; + } else { + g_eth_edmaprivate.run_dma_TX_task++; + prxtx_queue = g_eth_edmaprivate.ptx_queue; + } + + if (prxtx_queue) { + if (!edma_is_queue_ready(prxtx_queue)) { + LOG(DLOG_DEBUG, "queue is not ready, init flag: %u.", + prxtx_queue->pshmqhd_v->init); + return BSP_OK; + } + + (void)__dma_rxtx_H_2(prxtx_queue, type); + + if (!CHECK_DMA_QUEUE_EMPTY(type, prxtx_queue)) + return BSP_ERR_AGAIN; + } + + return BSP_OK; +} + +void edma_task_do_data_transmit(unsigned long data) +{ + struct tasklet_struct *t = (struct tasklet_struct *)data; + int txret, rxret; + + LOG(DLOG_DEBUG, "host_head/host_tail/shm_head/shm_tail - "); + LOG(DLOG_DEBUG, "rx:%u/%u/%u/%u, tx:%u/%u/%u/%u.", + g_eth_edmaprivate.prx_queue->head, + g_eth_edmaprivate.prx_queue->tail, + g_eth_edmaprivate.prx_queue->pshmqhd_v->head, + g_eth_edmaprivate.prx_queue->pshmqhd_v->tail, + g_eth_edmaprivate.ptx_queue->head, + g_eth_edmaprivate.ptx_queue->tail, + g_eth_edmaprivate.ptx_queue->pshmqhd_v->head, + g_eth_edmaprivate.ptx_queue->pshmqhd_v->tail); + + txret = veth_dma_task_H_2(BSPVETH_TX); + + rxret = veth_dma_task_H_2(BSPVETH_RX); + + LOG(DLOG_DEBUG, "host_head/host_tail/shm_head/shm_tail - "); + LOG(DLOG_DEBUG, "rx:%u/%u/%u/%u, tx:%u/%u/%u/%u.\n", + g_eth_edmaprivate.prx_queue->head, + g_eth_edmaprivate.prx_queue->tail, + g_eth_edmaprivate.prx_queue->pshmqhd_v->head, + g_eth_edmaprivate.prx_queue->pshmqhd_v->tail, + g_eth_edmaprivate.ptx_queue->head, + g_eth_edmaprivate.ptx_queue->tail, + g_eth_edmaprivate.ptx_queue->pshmqhd_v->head, + g_eth_edmaprivate.ptx_queue->pshmqhd_v->tail); + + if (txret == BSP_ERR_AGAIN || rxret == BSP_ERR_AGAIN) { + /* restart transmission. */ + tasklet_hi_schedule(t); + } +} + +int edma_tasklet_setup(struct edma_eth_dev_s *dev, u8 **rx_buf, + struct edma_cut_packet_node_s **tx_cut_pkt_buf) +{ + u8 *rx_pkt_buf; + struct edma_packet_node_s *rx_packet = NULL; + struct edma_cut_packet_node_s *tx_cut_buf = NULL; + size_t rx_size = + sizeof(struct edma_packet_node_s) * MAX_RXTX_PACKET_LEN; + + rx_pkt_buf = kmalloc(MAX_PACKET_LEN, GFP_KERNEL); + if (!rx_pkt_buf) + return -ENOMEM; + + tx_cut_buf = (struct edma_cut_packet_node_s *) + kmalloc(sizeof(*tx_cut_buf), GFP_KERNEL); + if (!tx_cut_buf) { + kfree(rx_pkt_buf); + return -ENOMEM; + } + + rx_packet = kmalloc(rx_size, GFP_KERNEL); + if (!rx_packet) { + kfree(rx_pkt_buf); + kfree(tx_cut_buf); + return -ENOMEM; + } + + memset(rx_pkt_buf, 0, MAX_PACKET_LEN); + memset(tx_cut_buf, 0, sizeof(*tx_cut_buf)); + memset(rx_packet, 0, rx_size); + + *rx_buf = rx_pkt_buf; + *tx_cut_pkt_buf = tx_cut_buf; + dev->rx_packet = rx_packet; + + spin_lock_init(&dev->rx_queue_lock); + + tasklet_init(&dev->skb_task, + edma_task_do_packet_recv, + (unsigned long)&dev->skb_task); + + tasklet_init(&dev->dma_task, + edma_task_do_data_transmit, + (unsigned long)&dev->dma_task); + + return 0; +} + +void edma_tasklet_free(struct edma_eth_dev_s *dev, u8 **rx_buf, + struct edma_cut_packet_node_s **tx_cut_pkt_buf) +{ + if (!*rx_buf) + return; + + /* stop task before releasing resource. */ + tasklet_kill(&dev->dma_task); + tasklet_kill(&dev->skb_task); + + kfree(*rx_buf); + kfree(*tx_cut_pkt_buf); + + /* flush the ring buf. */ + edma_veth_flush_ring_node(dev->rx_packet, MAX_RXTX_PACKET_LEN); + kfree(dev->rx_packet); + + *rx_buf = NULL; + *tx_cut_pkt_buf = NULL; + dev->rx_packet = NULL; +} + +static int edma_veth_int_handler(struct notifier_block *nb, + unsigned long ev, void *unuse) +{ + g_eth_edmaprivate.recv_int++; + + if (g_eth_edmaprivate.dma_task.func) + tasklet_hi_schedule(&g_eth_edmaprivate.dma_task); + + return IRQ_HANDLED; +} + +static struct notifier_block g_edma_veth_int_nb = { + .notifier_call = edma_veth_int_handler, +}; + +static int comm_init_dev(struct edma_eth_dev_s *edma, + const struct file_operations *fops) +{ + struct cdev_dev_s *dev = &edma->cdev; + int ret; + + dev->priv = edma->edma_priv; + dev->dev.minor = MISC_DYNAMIC_MINOR; + dev->dev.name = CDEV_VETH_NAME; + dev->dev.fops = fops; + + ret = misc_register(&dev->dev); + if (ret < 0) { + LOG(DLOG_ERROR, "Failed to alloc major number, %d", ret); + return ret; + } + + return 0; +} + +static inline void comm_cleanup_dev(struct edma_eth_dev_s *edma) +{ + struct cdev_dev_s *dev = &edma->cdev; + + misc_deregister(&dev->dev); +} + +static int __init edma_cdev_init(void) +{ + int ret; + + g_write_count = 0; + g_delay_ms = 0; + g_last_number = 0; + g_peer_not_ready = 0; + + LOG(DLOG_DEBUG, "Module init."); + + if (!bma_intf_check_edma_supported()) + return -ENXIO; + + (void)memset(&g_eth_edmaprivate, 0, sizeof(g_eth_edmaprivate)); + + /* register EDMA sub-subyem. */ + ret = bma_intf_register_type(TYPE_VETH, 0, INTR_ENABLE, + &g_eth_edmaprivate.edma_priv); + if (ret < 0) { + LOG(DLOG_ERROR, "Failed to register EDMA interface."); + goto failed; + } + + /* initialize host DMA address. */ + edma_veth_host_addr_init(g_eth_edmaprivate.edma_priv); + + /* setup TX/RX resource */ + ret = edma_veth_setup_resource(&g_eth_edmaprivate); + if (ret < 0) { + LOG(DLOG_ERROR, "Failed to setup resource."); + goto failed1; + } + + /* setup resource for user packets. */ + ret = edma_tasklet_setup(&g_eth_edmaprivate, + &g_edma_recv_packet_tmp.packet, + &g_edma_send_cut_packet); + if (ret < 0) + goto failed2; + + /* register char device. */ + ret = comm_init_dev(&g_eth_edmaprivate, &g_eth_edma_cdev_fops); + if (ret != 0) { + LOG(DLOG_ERROR, "Failed to register cdev device."); + goto failed3; + } + + /* register EDMA INT notifier. */ + ret = bma_intf_register_int_notifier(&g_edma_veth_int_nb); + if (ret < 0) { + LOG(DLOG_ERROR, "Failed to register INT notifier."); + goto failed4; + } + + dump_global_info(); + + GET_SYS_SECONDS(g_eth_edmaprivate.init_time); + + return 0; + +failed4: + comm_cleanup_dev(&g_eth_edmaprivate); +failed3: + edma_tasklet_free(&g_eth_edmaprivate, + &g_edma_recv_packet_tmp.packet, + &g_edma_send_cut_packet); +failed2: + edma_veth_free_resource(&g_eth_edmaprivate); +failed1: + (void)bma_intf_unregister_type(&g_eth_edmaprivate.edma_priv); +failed: + return ret; +} + +static void __exit edma_cdev_exit(void) +{ + LOG(DLOG_DEBUG, "Module exit."); + + bma_intf_unregister_int_notifier(&g_edma_veth_int_nb); + + comm_cleanup_dev(&g_eth_edmaprivate); + + edma_tasklet_free(&g_eth_edmaprivate, + &g_edma_recv_packet_tmp.packet, + &g_edma_send_cut_packet); + + edma_veth_free_resource(&g_eth_edmaprivate); + + bma_intf_unregister_type(&g_eth_edmaprivate.edma_priv); +} + +static inline int cdev_check_ring_recv(void) +{ + unsigned int count; + + count = edma_veth_get_ring_buf_count(g_eth_edmaprivate.rx_packet_head, + g_eth_edmaprivate.rx_packet_tail, + MAX_RXTX_PACKET_LEN); + return (count > 0 ? 1 : 0); +} + +static ssize_t cdev_copy_packet_to_user(struct edma_eth_dev_s *dev, + char __user *data, size_t count) +{ + unsigned char *packet = NULL; + unsigned char *start = NULL; + unsigned int free_packet = 0; + unsigned long flags = 0; + ssize_t length = (ssize_t)count; + ssize_t left; + + LOG(DLOG_DEBUG, "rx_packet_head:%u, rx_packet_tail: %u", + dev->rx_packet_head, dev->rx_packet_tail); + + spin_lock_irqsave(&dev->rx_queue_lock, flags); + + if (!cdev_check_ring_recv()) { + spin_unlock_irqrestore(&dev->rx_queue_lock, flags); + return -EAGAIN; + } + + left = (ssize_t)(dev->rx_packet[dev->rx_packet_head].len) - g_read_pos; + start = dev->rx_packet[dev->rx_packet_head].packet + g_read_pos; + + LOG(DLOG_DEBUG, + "User needs %ld bytes, pos: %u, total len: %u, left: %ld.", + count, g_read_pos, dev->rx_packet[dev->rx_packet_head].len, left); + if (left <= 0) { + /* No more data in this message, retry. */ + length = -EAGAIN; + free_packet = 1; + } else if (length > left) { + /* A full message is returned. */ + length = left; + free_packet = 1; + } else { + /* Update pos. */ + g_read_pos += length; + } + + if (free_packet) { + g_read_pos = 0; + packet = dev->rx_packet[dev->rx_packet_head].packet; + dev->rx_packet[dev->rx_packet_head].packet = NULL; + dev->rx_packet_head = (dev->rx_packet_head + 1) % + MAX_RXTX_PACKET_LEN; + } + + spin_unlock_irqrestore(&dev->rx_queue_lock, flags); + + if (length > 0 && copy_to_user(data, start, length)) { + LOG(DLOG_DEBUG, "Failed to copy to user, skip this message."); + length = -EFAULT; + g_read_pos = 0; + } + + LOG(DLOG_DEBUG, + "Copied bytes: %ld, pos: %d, buf len: %lu, free_packet: %d.", + length, g_read_pos, count, free_packet); + + if (packet) { + /* Free the packet as needed. */ + kfree(packet); + } + + return length; +} + +int cdev_open(struct inode *inode_ptr, struct file *filp) +{ + struct cdev_dev_s *dev = &g_eth_edmaprivate.cdev; + + LOG(DLOG_DEBUG, "Open device."); + + if (!inode_ptr || !filp) + return -EFAULT; + + /* only one instance is allowed. */ + if (IS_CDEV_IN_OPEN_STATE()) + return -EBUSY; + + LOG(DLOG_DEBUG, "Init flag, rx: %d, tx:%d", + g_eth_edmaprivate.prx_queue->pshmqhd_v->init, + g_eth_edmaprivate.ptx_queue->pshmqhd_v->init); + + /* save to private data. */ + filp->private_data = dev; + SET_CDEV_OPEN_STATE(CDEV_OPENED); + g_read_pos = 0; + + return 0; +} + +int cdev_release(struct inode *inode_ptr, struct file *filp) +{ + LOG(DLOG_DEBUG, "Close device."); + + if (!filp) + return 0; + + filp->private_data = NULL; + + SET_CDEV_OPEN_STATE(CDEV_CLOSED); + + return 0; +} + +unsigned int cdev_poll(struct file *filp, poll_table *wait) +{ + unsigned int mask = 0; + wait_queue_head_t *queue_head = NULL; + + if (!filp) + return 0; + + edma_veth_dump(); + + queue_head = (wait_queue_head_t *) + bma_cdev_get_wait_queue(GET_PRIVATE_DATA(filp)); + if (!queue_head) + return 0; + + /* check or add to wait queue. */ + poll_wait(filp, queue_head, wait); + + if (!edma_is_queue_ready(g_eth_edmaprivate.prx_queue)) + return 0; + + if (cdev_check_ring_recv() > 0) + mask = (POLLIN | POLLRDNORM); + + return mask; +} + +ssize_t cdev_read(struct file *filp, char __user *data, + size_t count, loff_t *ppos) +{ + struct edma_eth_dev_s *dev = &g_eth_edmaprivate; + ssize_t length = 0; + + if (!data || count >= MAX_PACKET_LEN) + return -EFAULT; + + LOG(DLOG_DEBUG, "read begin, count: %ld, pos: %u.", count, g_read_pos); + + length = cdev_copy_packet_to_user(dev, data, count); + + LOG(DLOG_DEBUG, "read done, length: %ld, pos: %u.", length, g_read_pos); + + return length; +} + +ssize_t cdev_write(struct file *filp, const char __user *data, + size_t count, loff_t *ppos) +{ + int ret = 0; + struct edma_eth_dev_s *pdev = &g_eth_edmaprivate; + + if (!data || count <= 0 || count > MAX_PACKET_LEN) + return -EINVAL; + + if (!edma_is_queue_ready(pdev->ptx_queue)) { + if (g_peer_not_ready == 0 && pdev->ptx_queue) { + LOG(DLOG_ERROR, "Peer rx queue is not ready (%u).", + pdev->ptx_queue->pshmqhd_v->init); + g_peer_not_ready = 1; + } + return -EPERM; + } else if (g_peer_not_ready) { + LOG(DLOG_ERROR, "Peer rx queue becomes ready."); + g_peer_not_ready = 0; + } + + LOG(DLOG_DEBUG, "data length is %lu, pos: %u (%u/%u)", + count, g_read_pos, + pdev->ptx_queue->pshmqhd_v->count, + pdev->ptx_queue->pshmqhd_v->total); + + ret = edma_veth_cut_tx_packet_send(pdev, data, count); + if (ret < 0) { + LOG(DLOG_ERROR, "Failed to send packet, return code: %d.", ret); + } else { + tasklet_hi_schedule(&g_eth_edmaprivate.dma_task); + ret = count; + } + + return ret; +} + +MODULE_VERSION(MICRO_TO_STR(CDEV_VETH_VERSION)); +MODULE_AUTHOR("HUAWEI TECHNOLOGIES CO., LTD."); +MODULE_DESCRIPTION("HUAWEI CDEV DRIVER"); +MODULE_LICENSE("GPL"); + +module_init(edma_cdev_init); +module_exit(edma_cdev_exit); \ No newline at end of file diff --git a/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.h b/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.h new file mode 100644 index 0000000000000000000000000000000000000000..44abe56dd7877ca6f5b8a2b2cf3a3e4b0bfc2f2a --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.h @@ -0,0 +1,299 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei iBMA driver. + * Copyright (c) 2019, Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _VETH_CDEV_NET_H_ +#define _VETH_CDEV_NET_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../edma_drv/bma_include.h" +#include "../include/bma_ker_intf.h" + +#define BSP_OK (0) +#define BSP_ERR (0xFFFFFFFF) +#define BSP_NETDEV_TX_BUSY (1) +#define BSP_ERR_INIT_ERR (BSP_NETDEV_TX_BUSY) +#define BSP_ETH_ERR_BASE (0x0FFFF000) +#define BSP_ERR_OUT_OF_MEM (BSP_ETH_ERR_BASE + 1) +#define BSP_ERR_NULL_POINTER (BSP_ETH_ERR_BASE + 2) +#define BSP_ERR_INVALID_STR (BSP_ETH_ERR_BASE + 3) +#define BSP_ERR_INVALID_PARAM (BSP_ETH_ERR_BASE + 4) +#define BSP_ERR_INVALID_DATA (BSP_ETH_ERR_BASE + 5) +#define BSP_ERR_OUT_OF_RANGE (BSP_ETH_ERR_BASE + 6) +#define BSP_ERR_INVALID_CARD (BSP_ETH_ERR_BASE + 7) +#define BSP_ERR_INVALID_GRP (BSP_ETH_ERR_BASE + 8) +#define BSP_ERR_INVALID_ETH (BSP_ETH_ERR_BASE + 9) +#define BSP_ERR_SEND_ERR (BSP_ETH_ERR_BASE + 10) +#define BSP_ERR_DMA_ERR (BSP_ETH_ERR_BASE + 11) +#define BSP_ERR_RECV_ERR (BSP_ETH_ERR_BASE + 12) +#define BSP_ERR_SKB_ERR (BSP_ETH_ERR_BASE + 13) +#define BSP_ERR_DMA_ADDR_ERR (BSP_ETH_ERR_BASE + 14) +#define BSP_ERR_IOREMAP_ERR (BSP_ETH_ERR_BASE + 15) +#define BSP_ERR_LEN_ERR (BSP_ETH_ERR_BASE + 16) +#define BSP_ERR_STAT_ERR (BSP_ETH_ERR_BASE + 17) +#define BSP_ERR_AGAIN (BSP_ETH_ERR_BASE + 18) +#define BSP_ERR_NOT_TO_HANDLE (BSP_ETH_ERR_BASE + 19) + +#define VETH_SHAREPOOL_BASE_INBMC (0x84820000) +#define VETH_SHAREPOOL_SIZE (0xdf000) +#define VETH_SHAREPOOL_OFFSET (0x10000) +#define MAX_SHAREQUEUE_SIZE (0x20000) + +#define BSPVETH_DMABURST_MAX (64) +#define BSPVETH_SHMBDBASE_OFFSET (0x80) +#define SHMDMAL_OFFSET (0x10000) +#define MAX_SHMDMAL_SIZE (BSPVETH_DMABURST_MAX * 32) +#define MAX_QUEUE_NUM (1) +#define MAX_QUEUE_BDNUM (128) +#define BSPVETH_MAX_QUE_DEEP (MAX_QUEUE_BDNUM) +#define BSPVETH_POINT_MASK (MAX_QUEUE_BDNUM - 1) +#define BSPVETH_WORK_LIMIT (64) +#define BSPVETH_CHECK_DMA_STATUS_TIMES (512) + +#define BSPPACKET_MTU_MAX (1500) + +#define BSPVETH_DMA_OK (1) +#define BSPVETH_DMA_BUSY (0) +#define BSPVETH_RX (2) +#define BSPVETH_TX (3) +#define BSPVETH_SHMQUEUE_INITOK (0x12) +#define BSPVETH_SHMQUEUE_INITOK_V2 (0x16) + +#define MAX_PACKET_LEN (128 * BSPPACKET_MTU_MAX) +#define MAX_RXTX_PACKET_LEN 64 +#define RESERVE_SPACE 24 + +/* device name. */ +#define CDEV_VETH_NAME "net_cdev" +#define CDEV_OPENED (1) +#define CDEV_CLOSED (0) + +#ifndef GET_SYS_SECONDS +#define GET_SYS_SECONDS(t) do { \ + struct timespec _uptime; \ + get_monotonic_boottime(&_uptime); \ + t = _uptime.tv_sec; \ +} while (0) +#endif + +struct edma_packet_node_s { + u32 len; + u8 *packet; +}; + +struct edma_cut_packet_node_s { + u32 token; + u32 number; + u32 cut_packet_len; + u8 cut_packet[BSPPACKET_MTU_MAX]; + u8 resv[RESERVE_SPACE]; +}; + +#define TK_MIDDLE_PACKET 0 +#define TK_START_PACKET 1 +#define TK_END_PACKET 2 +#define TK_START_END 3 + +/* EDMA transfer requires an alignment of 4. */ +#define EDMA_ADDR_ALIGNMENT (4UL) +#define EDMA_ADDR_ALIGN_MASK (EDMA_ADDR_ALIGNMENT - 1) +#define EDMA_ADDR_ALIGNED(dma_p) (((unsigned long)(dma_p)) & \ + (~(EDMA_ADDR_ALIGN_MASK))) +#define EDMA_ADDR_OFFSET(dma_p) (((unsigned long)(dma_p)) & \ + (EDMA_ADDR_ALIGN_MASK)) + +#define NODE_SIZE (sizeof(struct edma_cut_packet_node_s)) +#define NODE_TO_PACKET_SIZE(n) (n->cut_packet_len + (3 * sizeof(u32))) +#define NODE_PER_PAGE (PAGE_SIZE / (NODE_SIZE)) + +#define ALIGN_MASK 4096 +#define STRESS_FACTOR 100 +#define DMA_STATUS_CHECK_DELAY_LIMIT 20 +#define DMA_STATUS_CHECK_DELAY_MS 5 +#define DMA_RXQ_FAULT_DELAY 50 +#define DMA_QUEUE_FAULT_LIMIT 16 +#define DMACMP_ERR_FACTOR 4 +#define DMABURST_FACTOR 7 + +struct cdev_dev_s { + struct miscdevice dev; + void *priv; +}; + +struct edma_rxtx_statistics { + u64 dmapkt; + u64 dmapktbyte; + + u32 q_empty; + u32 shm_empty; + u32 dma_busy; + u32 type_err; + + u32 dma_need_offset; + u32 dma_failed; + u32 dma_burst; +}; + +struct edma_bd_info_s { + u8 *pdma_v; + dma_addr_t dma_p; + u32 len; + u32 off; +}; + +struct edma_dma_shmbd_s { + u32 dma_p; + u32 len; + u32 off; +}; + +struct edma_shmq_hd_s { + u32 count; + u32 total; + u32 next_to_fill; + u32 next_to_free; + u32 resv1; + u32 resv2; + u32 init; + u32 head; + u32 tail; +}; + +struct edma_dmal_s { + u32 chl; + u32 len; + u32 slow; + u32 shi; + u32 dlow; + u32 dhi; +}; + +struct edma_rxtx_q_s { + struct edma_bd_info_s *pbdinfobase_v; + + struct edma_shmq_hd_s *pshmqhd_v; + u8 *pshmqhd_p; + + struct edma_dma_shmbd_s *pshmbdbase_v; + u8 *pshmbdbase_p; + + struct edma_dmal_s *pdmalbase_v; + u8 *pdmalbase_p; + + u32 dmal_cnt; + u32 dmal_byte; + + u32 count; + u32 size; + + u32 head; + u32 tail; + + u16 start_dma; + u16 dmacmperr; + u16 dma_overtime; + + u32 work_limit; + + struct edma_rxtx_statistics s; +}; + +struct edma_eth_dev_s { + struct edma_rxtx_q_s *ptx_queue; + struct edma_rxtx_q_s *prx_queue; + + struct edma_packet_node_s *rx_packet; + spinlock_t rx_queue_lock; /* spinlock for rx queue */ + + u32 rx_packet_head; + u32 rx_packet_tail; + + unsigned long pages_tx; + unsigned long pages_rx; + + u8 *pshmpool_p; + u8 *pshmpool_v; + u32 shmpoolsize; + + u32 recv_int; + u32 tobmc_int; + u32 run_dma_TX_task; + u32 run_dma_RX_task; + u32 run_skb_RX_task; + + struct tasklet_struct skb_task; + struct tasklet_struct dma_task; + + struct cdev_dev_s cdev; + __kernel_time_t init_time; + + void *edma_priv; +}; + +#ifndef LOG +#define LOG(level, fmt, ...) do {\ + if (debug >= (level)) {\ + netdev_err(0, "[%s,%d] -> " fmt "\n", \ + __func__, __LINE__, ##__VA_ARGS__); \ + } \ +} while (0) +#endif + +#define BD_QUEUE_MASK(p) ((p) & (BSPVETH_POINT_MASK)) + +#define GET_BD_RING_QUEUE_COUNT(head, tail) \ + ((BSPVETH_MAX_QUE_DEEP + (head) - (tail)) & BSPVETH_POINT_MASK) +#define GET_BD_RING_QUEUE_SPACE(head, tail) \ + ((BSPVETH_MAX_QUE_DEEP - 1 + (tail) - (head)) & BSPVETH_POINT_MASK) +#define JUDGE_RING_QUEUE_SPACE(head, tail, len) \ + (GET_BD_RING_QUEUE_SPACE(head, tail) >= (len)) + +#define CHECK_DMA_QUEUE_EMPTY(type, queue) \ + (((type) == BSPVETH_RX && \ + (queue)->pshmqhd_v->head == (queue)->pshmqhd_v->tail) || \ + ((type) == BSPVETH_TX && (queue)->head == (queue)->tail)) + +#define CHECK_DMA_RXQ_FAULT(queue, type, cnt) \ + ((type) == BSPVETH_RX && (queue)->dmal_cnt > 1 && \ + (cnt) < ((queue)->work_limit / 2)) + +#define GET_DMA_DIRECTION(type) \ + (((type) == BSPVETH_RX) ? BMC_TO_HOST : HOST_TO_BMC) + +/******* rate limit *********/ +#define RL_MAX_PACKET 10 +#define RL_STRESS_LOW 50 +#define RL_STRESS_HIGH 80 +#define RL_DELAY_MS_LOW 20 +#define RL_DELAY_MS_HIGH 100 + +void veth_dma_task_H(u32 type); +void veth_skbtimer_close(void); +int veth_skbtimer_init(void); +int veth_dmatimer_close_H(void); +int veth_dmatimer_init_H(void); +int veth_skb_tr_task(unsigned long data); + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/Makefile b/drivers/net/ethernet/huawei/bma/edma_drv/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..46cc51275a71efc06d4aff734f2bf3f9593f39e0 --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/edma_drv/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_BMA) += host_edma_drv.o +host_edma_drv-y := bma_pci.o bma_devintf.o edma_host.o diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/bma_devintf.c b/drivers/net/ethernet/huawei/bma/edma_drv/bma_devintf.c new file mode 100644 index 0000000000000000000000000000000000000000..baa65b8885ff3e721e23b0d351feb72be76fcbf6 --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/edma_drv/bma_devintf.c @@ -0,0 +1,627 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei iBMA driver. + * Copyright (c) 2017, Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "../include/bma_ker_intf.h" +#include "bma_include.h" +#include "bma_devintf.h" +#include "bma_pci.h" +#include "edma_host.h" + +static struct bma_dev_s *g_bma_dev; + +static ATOMIC_NOTIFIER_HEAD(bma_int_notify_list); + +static int bma_priv_insert_priv_list(struct bma_priv_data_s *priv, u32 type, + u32 sub_type) +{ + unsigned long flags = 0; + int ret = 0; + struct edma_user_inft_s *user_inft = NULL; + + if (type >= TYPE_MAX || !priv) + return -EFAULT; + + user_inft = edma_host_get_user_inft(type); + + if (user_inft && user_inft->user_register) { + ret = user_inft->user_register(priv); + if (ret) { + BMA_LOG(DLOG_ERROR, "register failed\n"); + return -EFAULT; + } + } else { + if (!g_bma_dev) + return -ENXIO; + + if (atomic_dec_and_test(&g_bma_dev->au_count[type]) == 0) { + BMA_LOG(DLOG_ERROR, + "busy, init_dev_type.type = %d, au_count = %d\n", + type, + atomic_read(&g_bma_dev->au_count[type])); + atomic_inc(&g_bma_dev->au_count[type]); + return -EBUSY; /* already register */ + } + + priv->user.type = type; + priv->user.sub_type = sub_type; + priv->user.user_id = 0; + + spin_lock_irqsave(&g_bma_dev->priv_list_lock, flags); + + list_add_rcu(&priv->user.link, &g_bma_dev->priv_list); + + spin_unlock_irqrestore(&g_bma_dev->priv_list_lock, flags); + } + + return 0; +} + +static int bma_priv_delete_priv_list(struct bma_priv_data_s *priv) +{ + unsigned long flags = 0; + struct edma_user_inft_s *user_inft = NULL; + + if (!priv || priv->user.type >= TYPE_MAX) + return -EFAULT; + user_inft = edma_host_get_user_inft(priv->user.type); + if (user_inft && user_inft->user_register) { + user_inft->user_unregister(priv); + } else { + if (!g_bma_dev) + return -ENXIO; + spin_lock_irqsave(&g_bma_dev->priv_list_lock, flags); + list_del_rcu(&priv->user.link); + spin_unlock_irqrestore(&g_bma_dev->priv_list_lock, flags); + /* release the type */ + atomic_inc(&g_bma_dev->au_count[priv->user.type]); + } + return 0; +} + +static int bma_priv_init(struct bma_priv_data_s **bma_priv) +{ + struct bma_priv_data_s *priv = NULL; + + if (!bma_priv) + return -EFAULT; + + priv = kmalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + BMA_LOG(DLOG_ERROR, "malloc priv failed\n"); + return -ENOMEM; + } + + memset(priv, 0, sizeof(struct bma_priv_data_s)); + + spin_lock_init(&priv->recv_msg_lock); + INIT_LIST_HEAD(&priv->recv_msgs); + init_waitqueue_head(&priv->wait); + + priv->user.type = TYPE_UNKNOWN; + priv->user.sub_type = 0; + priv->user.dma_transfer = 0; + priv->user.seq = 0; + priv->user.cur_recvmsg_nums = 0; + priv->user.max_recvmsg_nums = DEFAULT_MAX_RECV_MSG_NUMS; + + *bma_priv = priv; + + return 0; +} + +static void bma_priv_clean_up(struct bma_priv_data_s *bma_priv) +{ + int ret = 0; + int i = 0; + struct bma_priv_data_s *priv = bma_priv; + struct edma_recv_msg_s *msg = NULL; + unsigned long flags = 0; + + if (!priv) + return; + + if (priv->user.type == TYPE_UNKNOWN) { + BMA_LOG(DLOG_ERROR, "already unknown type\n"); + return; + } + + spin_lock_irqsave(&priv->recv_msg_lock, flags); + for (i = 0; i < priv->user.max_recvmsg_nums; i++) { + ret = edma_host_recv_msg(&g_bma_dev->edma_host, priv, &msg); + if (ret) + break; + + kfree(msg); + } + spin_unlock_irqrestore(&priv->recv_msg_lock, flags); + + priv->user.type = TYPE_UNKNOWN; + priv->user.sub_type = 0; + priv->user.dma_transfer = 0; + priv->user.seq = 0; + priv->user.cur_recvmsg_nums = 0; + priv->user.max_recvmsg_nums = DEFAULT_MAX_RECV_MSG_NUMS; + kfree(priv); +} + +static irqreturn_t bma_irq_handle(int irq, void *data) +{ + struct bma_dev_s *bma_dev = (struct bma_dev_s *)data; + + if (!bma_dev) + return IRQ_HANDLED; + + bma_dev->edma_host.statistics.b2h_int++; + + if (!is_edma_b2h_int(&bma_dev->edma_host)) + return edma_host_irq_handle(&bma_dev->edma_host); + + return (irqreturn_t)atomic_notifier_call_chain(&bma_int_notify_list, 0, + data); +} + +int bma_devinft_init(struct bma_pci_dev_s *bma_pci_dev) +{ + int ret = 0; + int i = 0; + struct bma_dev_s *bma_dev = NULL; + + if (!bma_pci_dev) + return -EFAULT; + + bma_dev = kmalloc(sizeof(*bma_dev), (int)GFP_KERNEL); + if (!bma_dev) + return -ENOMEM; + + memset(bma_dev, 0, sizeof(struct bma_dev_s)); + + bma_dev->bma_pci_dev = bma_pci_dev; + bma_pci_dev->bma_dev = bma_dev; + + INIT_LIST_HEAD(&bma_dev->priv_list); + spin_lock_init(&bma_dev->priv_list_lock); + + for (i = 0; i < TYPE_MAX; i++) + atomic_set(&bma_dev->au_count[i], 1); + + ret = edma_host_init(&bma_dev->edma_host); + if (ret) { + BMA_LOG(DLOG_ERROR, "init edma host failed!err = %d\n", ret); + goto err_free_bma_dev; + } + + BMA_LOG(DLOG_DEBUG, "irq = %d\n", bma_pci_dev->pdev->irq); + + ret = request_irq(bma_pci_dev->pdev->irq, bma_irq_handle, IRQF_SHARED, + "EDMA_IRQ", (void *)bma_dev); + if (ret) { + BMA_LOG(DLOG_ERROR, "request_irq failed!err = %d\n", ret); + goto err_edma_host_exit; + } + + g_bma_dev = bma_dev; + BMA_LOG(DLOG_DEBUG, "ok\n"); + + return 0; + +err_edma_host_exit: + edma_host_cleanup(&bma_dev->edma_host); + +err_free_bma_dev: + kfree(bma_dev); + bma_pci_dev->bma_dev = NULL; + + return ret; +} + +void bma_devinft_cleanup(struct bma_pci_dev_s *bma_pci_dev) +{ + if (g_bma_dev) { + if ((bma_pci_dev) && bma_pci_dev->pdev && + bma_pci_dev->pdev->irq) { + BMA_LOG(DLOG_DEBUG, "irq = %d\n", + bma_pci_dev->pdev->irq); + free_irq(bma_pci_dev->pdev->irq, + (void *)bma_pci_dev->bma_dev); + } + + edma_host_cleanup(&g_bma_dev->edma_host); + + if ((bma_pci_dev) && bma_pci_dev->bma_dev) { + kfree(bma_pci_dev->bma_dev); + bma_pci_dev->bma_dev = NULL; + } + + g_bma_dev = NULL; + } +} + +int bma_intf_register_int_notifier(struct notifier_block *nb) +{ + if (!nb) + return -1; + + return atomic_notifier_chain_register(&bma_int_notify_list, nb); +} +EXPORT_SYMBOL_GPL(bma_intf_register_int_notifier); + +void bma_intf_unregister_int_notifier(struct notifier_block *nb) +{ + if (!nb) + return; + + atomic_notifier_chain_unregister(&bma_int_notify_list, nb); +} +EXPORT_SYMBOL_GPL(bma_intf_unregister_int_notifier); + +int bma_intf_register_type(u32 type, u32 sub_type, enum intr_mod support_int, + void **handle) +{ + int ret = 0; + struct bma_priv_data_s *priv = NULL; + + if (!handle) + return -EFAULT; + + ret = bma_priv_init(&priv); + if (ret) { + BMA_LOG(DLOG_ERROR, "bma_priv_init failed! ret = %d\n", ret); + return ret; + } + + ret = bma_priv_insert_priv_list(priv, type, sub_type); + if (ret) { + bma_priv_clean_up(priv); + BMA_LOG(DLOG_ERROR, + "bma_priv_insert_priv_list failed! ret = %d\n", ret); + return ret; + } + + if (support_int) + priv->user.support_int = INTR_ENABLE; + + if (type == TYPE_VETH) { + priv->specific.veth.pdev = g_bma_dev->bma_pci_dev->pdev; + + priv->specific.veth.veth_swap_phy_addr = + g_bma_dev->bma_pci_dev->veth_swap_phy_addr; + priv->specific.veth.veth_swap_addr = + g_bma_dev->bma_pci_dev->veth_swap_addr; + priv->specific.veth.veth_swap_len = + g_bma_dev->bma_pci_dev->veth_swap_len; + } + + *handle = priv; + + return 0; +} +EXPORT_SYMBOL(bma_intf_register_type); + +int bma_intf_unregister_type(void **handle) +{ + struct bma_priv_data_s *priv = NULL; + + if (!handle) { + BMA_LOG(DLOG_ERROR, "edna_priv is NULL\n"); + return -EFAULT; + } + + priv = (struct bma_priv_data_s *)*handle; + *handle = NULL; + + priv->user.cur_recvmsg_nums++; + wake_up_interruptible(&priv->wait); + + msleep(500); + + bma_priv_delete_priv_list(priv); + + bma_priv_clean_up(priv); + + return 0; +} +EXPORT_SYMBOL(bma_intf_unregister_type); + +int bma_intf_check_edma_supported(void) +{ + return !(!g_bma_dev); +} +EXPORT_SYMBOL(bma_intf_check_edma_supported); + +int bma_intf_check_dma_status(enum dma_direction_e dir) +{ + return edma_host_check_dma_status(dir); +} +EXPORT_SYMBOL(bma_intf_check_dma_status); + +void bma_intf_reset_dma(enum dma_direction_e dir) +{ + edma_host_reset_dma(&g_bma_dev->edma_host, dir); +} +EXPORT_SYMBOL(bma_intf_reset_dma); + +void bma_intf_clear_dma_int(enum dma_direction_e dir) +{ + if (dir == BMC_TO_HOST) + clear_int_dmab2h(&g_bma_dev->edma_host); + else if (dir == HOST_TO_BMC) + clear_int_dmah2b(&g_bma_dev->edma_host); + else + return; +} +EXPORT_SYMBOL(bma_intf_clear_dma_int); + +int bma_intf_start_dma(void *handle, struct bma_dma_transfer_s *dma_transfer) +{ + int ret = 0; + struct bma_priv_data_s *priv = (struct bma_priv_data_s *)handle; + + if (!handle || !dma_transfer) + return -EFAULT; + + ret = edma_host_dma_start(&g_bma_dev->edma_host, priv); + if (ret) { + BMA_LOG(DLOG_ERROR, + "edma_host_dma_start failed! result = %d\n", ret); + return ret; + } + + ret = edma_host_dma_transfer(&g_bma_dev->edma_host, priv, dma_transfer); + if (ret) + BMA_LOG(DLOG_ERROR, + "edma_host_dma_transfer failed! ret = %d\n", ret); + + ret = edma_host_dma_stop(&g_bma_dev->edma_host, priv); + if (ret) { + BMA_LOG(DLOG_ERROR, + "edma_host_dma_stop failed! result = %d\n", ret); + return ret; + } + + return ret; +} +EXPORT_SYMBOL(bma_intf_start_dma); + +int bma_intf_int_to_bmc(void *handle) +{ + struct bma_priv_data_s *priv = (struct bma_priv_data_s *)handle; + + if (!handle) + return -EFAULT; + + if (priv->user.support_int == 0) { + BMA_LOG(DLOG_ERROR, "not support int to bmc.\n"); + return -EFAULT; + } + + edma_int_to_bmc(&g_bma_dev->edma_host); + + return 0; +} +EXPORT_SYMBOL(bma_intf_int_to_bmc); + +int bma_intf_is_link_ok(void) +{ + if (g_bma_dev && + g_bma_dev->edma_host.statistics.remote_status == REGISTERED) + return 1; + return 0; +} +EXPORT_SYMBOL(bma_intf_is_link_ok); + +int bma_cdev_recv_msg(void *handle, char __user *data, size_t count) +{ + struct bma_priv_data_s *priv = NULL; + struct edma_recv_msg_s *msg = NULL; + int result = 0; + int len = 0; + unsigned long flags = 0; + + if (!handle || !data || count == 0) { + BMA_LOG(DLOG_DEBUG, "input NULL point!\n"); + return -EFAULT; + } + + priv = (struct bma_priv_data_s *)handle; + + spin_lock_irqsave(&priv->recv_msg_lock, flags); + result = edma_host_recv_msg(&g_bma_dev->edma_host, priv, &msg); + spin_unlock_irqrestore(&priv->recv_msg_lock, flags); + if (result != 0) + return -ENODATA; + + if (msg->msg_len > count) { + kfree(msg); + return -EFAULT; + } + + if (copy_to_user(data, (void *)msg->msg_data, msg->msg_len)) { + kfree(msg); + return -EFAULT; + } + + len = msg->msg_len; + + kfree(msg); + + return len; +} +EXPORT_SYMBOL_GPL(bma_cdev_recv_msg); + +static int check_cdev_add_msg_param(struct bma_priv_data_s *handle, +const char __user *msg, size_t msg_len) +{ + struct bma_priv_data_s *priv = NULL; + + if (!handle || !msg || msg_len == 0) { + BMA_LOG(DLOG_DEBUG, "input NULL point!\n"); + return -EFAULT; + } + + if (msg_len > CDEV_MAX_WRITE_LEN) { + BMA_LOG(DLOG_DEBUG, "input data is overlen!\n"); + return -EINVAL; + } + + priv = handle; + + if (priv->user.type >= TYPE_MAX) { + BMA_LOG(DLOG_DEBUG, "error type = %d\n", priv->user.type); + return -EFAULT; + } + + return 0; +} + +static void edma_msg_hdr_init(struct edma_msg_hdr_s *hdr, + struct bma_priv_data_s *private_data, + char *msg_buf, size_t msg_len) +{ + hdr->type = private_data->user.type; + hdr->sub_type = private_data->user.sub_type; + hdr->user_id = private_data->user.user_id; + hdr->datalen = msg_len; + BMA_LOG(DLOG_DEBUG, "msg_len is %ld\n", msg_len); + + memcpy(hdr->data, msg_buf, msg_len); +} + +int bma_cdev_add_msg(void *handle, const char __user *msg, size_t msg_len) +{ + struct bma_priv_data_s *priv = NULL; + struct edma_msg_hdr_s *hdr = NULL; + unsigned long flags = 0; + unsigned int total_len = 0; + int ret = 0; + struct edma_host_s *phost = &g_bma_dev->edma_host; + char *msg_buf = NULL; + + ret = check_cdev_add_msg_param(handle, msg, msg_len); + if (ret != 0) + return ret; + + priv = (struct bma_priv_data_s *)handle; + + total_len = (unsigned int)(SIZE_OF_MSG_HDR + msg_len); + if (phost->msg_send_write + total_len > HOST_MAX_SEND_MBX_LEN - SIZE_OF_MBX_HDR) { + BMA_LOG(DLOG_DEBUG, "msg lost,msg_send_write: %u,msg_len:%u,max_len: %d\n", + phost->msg_send_write, total_len, HOST_MAX_SEND_MBX_LEN); + return -ENOSPC; + } + + msg_buf = (char *)kmalloc(msg_len, GFP_KERNEL); + if (!msg_buf) { + BMA_LOG(DLOG_ERROR, "malloc msg_buf failed\n"); + return -ENOMEM; + } + + if (copy_from_user(msg_buf, msg, msg_len)) { + BMA_LOG(DLOG_ERROR, "copy_from_user error\n"); + kfree(msg_buf); + return -EFAULT; + } + + spin_lock_irqsave(&phost->send_msg_lock, flags); + + hdr = (struct edma_msg_hdr_s *)(phost->msg_send_buf + phost->msg_send_write); + edma_msg_hdr_init(hdr, priv, msg_buf, msg_len); + + phost->msg_send_write += total_len; + phost->statistics.send_bytes += total_len; + phost->statistics.send_pkgs++; +#ifdef EDMA_TIMER + (void)mod_timer(&phost->timer, jiffies_64); +#endif + BMA_LOG(DLOG_DEBUG, "msg_send_write = %d\n", phost->msg_send_write); + + ret = msg_len; + spin_unlock_irqrestore(&g_bma_dev->edma_host.send_msg_lock, flags); + kfree(msg_buf); + return ret; +} +EXPORT_SYMBOL_GPL(bma_cdev_add_msg); + +unsigned int bma_cdev_check_recv(void *handle) +{ + struct bma_priv_data_s *priv = (struct bma_priv_data_s *)handle; + unsigned long flags = 0; + unsigned int result = 0; + + if (priv) { + spin_lock_irqsave(&priv->recv_msg_lock, flags); + + if (!list_empty(&priv->recv_msgs)) + result = 1; + + spin_unlock_irqrestore(&priv->recv_msg_lock, flags); + } + + return result; +} +EXPORT_SYMBOL_GPL(bma_cdev_check_recv); + +void *bma_cdev_get_wait_queue(void *handle) +{ + struct bma_priv_data_s *priv = (struct bma_priv_data_s *)handle; + + return priv ? ((void *)&priv->wait) : NULL; +} +EXPORT_SYMBOL_GPL(bma_cdev_get_wait_queue); + +void bma_intf_set_open_status(void *handle, int s) +{ + struct bma_priv_data_s *priv = (struct bma_priv_data_s *)handle; + int i = 0; + int ret = 0; + unsigned long flags = 0; + char drv_msg[3] = { 0 }; + struct edma_recv_msg_s *tmp_msg = NULL; + + if (!priv || priv->user.type >= TYPE_MAX) + return; + + drv_msg[0] = 1; + drv_msg[1] = priv->user.type; + drv_msg[2] = s; + + (void)edma_host_send_driver_msg((void *)drv_msg, sizeof(drv_msg), + DEV_OPEN_STATUS_ANS); + + spin_lock_irqsave(&priv->recv_msg_lock, flags); + g_bma_dev->edma_host.local_open_status[priv->user.type] = s; + + if (s == DEV_CLOSE && priv->user.cur_recvmsg_nums > 0) { + for (i = 0; i < priv->user.max_recvmsg_nums; i++) { + ret = edma_host_recv_msg(&g_bma_dev->edma_host, + priv, &tmp_msg); + if (ret < 0) + break; + + kfree(tmp_msg); + tmp_msg = NULL; + } + } + + spin_unlock_irqrestore(&priv->recv_msg_lock, flags); +} +EXPORT_SYMBOL_GPL(bma_intf_set_open_status); diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/bma_devintf.h b/drivers/net/ethernet/huawei/bma/edma_drv/bma_devintf.h new file mode 100644 index 0000000000000000000000000000000000000000..138d1e2784799a9f752a067039bfb0fbdb3731f7 --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/edma_drv/bma_devintf.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei iBMA driver. + * Copyright (c) 2017, Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _BMA_DEVINTF_H_ +#define _BMA_DEVINTF_H_ + +#include +#include "bma_pci.h" +#include "edma_host.h" + +struct bma_dev_s { + /* proc */ + struct proc_dir_entry *proc_bma_root; + + atomic_t au_count[TYPE_MAX]; + + struct list_head priv_list; + /* spinlock for priv list */ + spinlock_t priv_list_lock; + + struct bma_pci_dev_s *bma_pci_dev; + struct edma_host_s edma_host; +}; + +int bma_devinft_init(struct bma_pci_dev_s *bma_pci_dev); +void bma_devinft_cleanup(struct bma_pci_dev_s *bma_pci_dev); + +#endif diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/bma_include.h b/drivers/net/ethernet/huawei/bma/edma_drv/bma_include.h new file mode 100644 index 0000000000000000000000000000000000000000..2c122ae914635376209c2805b4969433e40551a9 --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/edma_drv/bma_include.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei iBMA driver. + * Copyright (c) 2017, Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _BMA_INCLUDE_H_ +#define _BMA_INCLUDE_H_ + +#include +#include +#include +#include /* copy_*_user */ +#include /* udelay */ +#include +#include +#include +#include +#include /*tasklet */ +#include +#include +#include +#include +#include +#include + +#define UNUSED(x) (x = x) +#define KBOX_FALSE (-1) +#define KBOX_TRUE 0 + +#define KBOX_IOC_MAGIC (0xB2) + +#define DEFAULT_MAX_RECV_MSG_NUMS 32 +#define MAX_RECV_MSG_NUMS 1024 + +#define STRFICATION(R) #R +#define MICRO_TO_STR(R) STRFICATION(R) + +enum { + DLOG_ERROR = 0, + DLOG_DEBUG = 1, +}; + +enum { + DEV_CLOSE = 0, + DEV_OPEN = 1, + DEV_OPEN_STATUS_REQ = 0xf0, + DEV_OPEN_STATUS_ANS +}; + +struct bma_user_s { + struct list_head link; + + u32 type; + u32 sub_type; + u8 user_id; + + u8 dma_transfer:1, support_int:1; + + u8 reserve1[2]; + u32 seq; + u16 cur_recvmsg_nums; + u16 max_recvmsg_nums; +}; + +struct bma_priv_data_veth_s { + struct pci_dev *pdev; + + unsigned long veth_swap_phy_addr; + void __iomem *veth_swap_addr; + unsigned long veth_swap_len; +}; + +struct bma_priv_data_s { + struct bma_user_s user; + /* spinlock for recv msg list */ + spinlock_t recv_msg_lock; + struct list_head recv_msgs; + struct file *file; + wait_queue_head_t wait; + + union { + struct bma_priv_data_veth_s veth; + } specific; +}; + +#if defined(timer_setup) && defined(from_timer) +#define HAVE_TIMER_SETUP +#endif + +void __iomem *kbox_get_base_addr(void); +unsigned long kbox_get_io_len(void); +unsigned long kbox_get_base_phy_addr(void); +int edma_param_set_debug(const char *buf, const struct kernel_param *kp); + +#define GET_SYS_SECONDS(t) do \ + {\ + struct timespec64 uptime;\ + ktime_get_coarse_real_ts64(&uptime);\ + t = uptime.tv_sec;\ + } while (0) + +#define SECONDS_PER_DAY (24 * 3600) +#define SECONDS_PER_HOUR (3600) +#define SECONDS_PER_MINUTE (60) + +#endif diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.c b/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.c new file mode 100644 index 0000000000000000000000000000000000000000..577acaedb0e2fd16e8b9da93cf1957db9da172eb --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.c @@ -0,0 +1,533 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei iBMA driver. + * Copyright (c) 2017, Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include + +#include "bma_include.h" +#include "bma_devintf.h" +#include "bma_pci.h" + +#define PCI_KBOX_MODULE_NAME "edma_drv" +#define PCI_VENDOR_ID_HUAWEI_FPGA 0x19aa +#define PCI_DEVICE_ID_KBOX_0 0xe004 + +#define PCI_VENDOR_ID_HUAWEI_PME 0x19e5 +#define PCI_DEVICE_ID_KBOX_0_PME 0x1710 +#define PCI_PME_USEABLE_SPACE (4 * 1024 * 1024) +#define PME_DEV_CHECK(device, vendor) ((device) == PCI_DEVICE_ID_KBOX_0_PME && \ + (vendor) == PCI_VENDOR_ID_HUAWEI_PME) + +#define PCI_BAR0_PME_1710 0x85800000 +#define PCI_BAR0 0 +#define PCI_BAR1 1 +#define PCI_USING_DAC_DEFAULT 0 + +#define GET_HIGH_ADDR(address) ((sizeof(unsigned long) == 8) ? \ + ((u64)(address) >> 32) : 0) + +/* The value of the expression is true + * only when dma_set_mask and dma_set_coherent_mask failed. + */ +#define SET_DMA_MASK(p_dev) \ + (dma_set_mask((p_dev), DMA_BIT_MASK(64)) && \ + dma_set_coherent_mask((p_dev), DMA_BIT_MASK(64))) + +int pci_using_dac = PCI_USING_DAC_DEFAULT; +int debug = DLOG_ERROR; +MODULE_PARM_DESC(debug, "Debug switch (0=close debug, 1=open debug)"); + +static struct bma_pci_dev_s *g_bma_pci_dev; + +static int bma_pci_suspend(struct pci_dev *pdev, pm_message_t state); +static int bma_pci_resume(struct pci_dev *pdev); +static int bma_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static void bma_pci_remove(struct pci_dev *pdev); + +static const struct pci_device_id bma_pci_tbl[] = { + {PCI_DEVICE(PCI_VENDOR_ID_HUAWEI_FPGA, PCI_DEVICE_ID_KBOX_0)}, + {PCI_DEVICE(PCI_VENDOR_ID_HUAWEI_PME, PCI_DEVICE_ID_KBOX_0_PME)}, + {} +}; +MODULE_DEVICE_TABLE(pci, bma_pci_tbl); + +int edma_param_get_statics(char *buf, const struct kernel_param *kp) +{ + if (!buf) + return 0; + + return edmainfo_show(buf); +} + +module_param_call(statistics, NULL, edma_param_get_statics, &debug, 0444); +MODULE_PARM_DESC(statistics, "Statistics info of edma driver,readonly"); + +int edma_param_set_debug(const char *buf, const struct kernel_param *kp) +{ + unsigned long val = 0; + int ret = 0; + + if (!buf) + return -EINVAL; + + ret = kstrtoul(buf, 0, &val); + + if (ret) + return ret; + + if (val > 1) + return -EINVAL; + + return param_set_int(buf, kp); +} +EXPORT_SYMBOL_GPL(edma_param_set_debug); + +module_param_call(debug, &edma_param_set_debug, ¶m_get_int, &debug, 0644); + +void __iomem *kbox_get_base_addr(void) +{ + if (!g_bma_pci_dev || (!(g_bma_pci_dev->kbox_base_addr))) { + BMA_LOG(DLOG_ERROR, "kbox_base_addr NULL point\n"); + return NULL; + } + + return g_bma_pci_dev->kbox_base_addr; +} +EXPORT_SYMBOL_GPL(kbox_get_base_addr); + +unsigned long kbox_get_io_len(void) +{ + if (!g_bma_pci_dev) { + BMA_LOG(DLOG_ERROR, "kbox_io_len is error,can not get it\n"); + return 0; + } + + return g_bma_pci_dev->kbox_base_len; +} +EXPORT_SYMBOL_GPL(kbox_get_io_len); + +unsigned long kbox_get_base_phy_addr(void) +{ + if (!g_bma_pci_dev || !g_bma_pci_dev->kbox_base_phy_addr) { + BMA_LOG(DLOG_ERROR, "kbox_base_phy_addr NULL point\n"); + return 0; + } + + return g_bma_pci_dev->kbox_base_phy_addr; +} +EXPORT_SYMBOL_GPL(kbox_get_base_phy_addr); + +static struct pci_driver bma_driver = { + .name = PCI_KBOX_MODULE_NAME, + .id_table = bma_pci_tbl, + .probe = bma_pci_probe, + .remove = bma_pci_remove, + .suspend = bma_pci_suspend, + .resume = bma_pci_resume, +}; + +s32 __atu_config_H(struct pci_dev *pdev, unsigned int region, + unsigned int hostaddr_h, unsigned int hostaddr_l, + unsigned int bmcaddr_h, unsigned int bmcaddr_l, + unsigned int len) +{ + /* atu index reg,inbound and region*/ + (void)pci_write_config_dword(pdev, ATU_VIEWPORT, + REGION_DIR_INPUT + (region & REGION_INDEX_MASK)); + (void)pci_write_config_dword(pdev, ATU_BASE_LOW, hostaddr_l); + (void)pci_write_config_dword(pdev, ATU_BASE_HIGH, hostaddr_h); + (void)pci_write_config_dword(pdev, ATU_LIMIT, hostaddr_l + len - 1); + (void)pci_write_config_dword(pdev, ATU_TARGET_LOW, bmcaddr_l); + (void)pci_write_config_dword(pdev, ATU_TARGET_HIGH, bmcaddr_h); + /* atu ctrl1 reg */ + (void)pci_write_config_dword(pdev, ATU_REGION_CTRL1, ATU_CTRL1_DEFAULT); + /* atu ctrl2 reg */ + (void)pci_write_config_dword(pdev, ATU_REGION_CTRL2, REGION_ENABLE); + + return 0; +} + +static void iounmap_bar_mem(struct bma_pci_dev_s *bma_pci_dev) +{ + if (bma_pci_dev->kbox_base_addr) { + iounmap(bma_pci_dev->kbox_base_addr); + bma_pci_dev->kbox_base_addr = NULL; + } + + if (bma_pci_dev->bma_base_addr) { + iounmap(bma_pci_dev->bma_base_addr); + bma_pci_dev->bma_base_addr = NULL; + bma_pci_dev->edma_swap_addr = NULL; + bma_pci_dev->hostrtc_viraddr = NULL; + } +} + +static int ioremap_pme_bar1_mem(struct pci_dev *pdev, + struct bma_pci_dev_s *bma_pci_dev) +{ + unsigned long bar1_resource_flag = 0; + u32 data = 0; + + bma_pci_dev->kbox_base_len = PCI_PME_USEABLE_SPACE; + BMA_LOG(DLOG_DEBUG, "1710\n"); + + bma_pci_dev->bma_base_phy_addr = + pci_resource_start(pdev, PCI_BAR1); + bar1_resource_flag = pci_resource_flags(pdev, PCI_BAR1); + + if (!(bar1_resource_flag & IORESOURCE_MEM)) { + BMA_LOG(DLOG_ERROR, + "Cannot find proper PCI device base address, aborting\n"); + return -ENODEV; + } + + bma_pci_dev->bma_base_len = pci_resource_len(pdev, PCI_BAR1); + bma_pci_dev->edma_swap_len = EDMA_SWAP_DATA_SIZE; + bma_pci_dev->veth_swap_len = VETH_SWAP_DATA_SIZE; + + BMA_LOG(DLOG_DEBUG, + "bar1: bma_base_len = 0x%lx, edma_swap_len = %ld, veth_swap_len = %ld(0x%lx)\n", + bma_pci_dev->bma_base_len, bma_pci_dev->edma_swap_len, + bma_pci_dev->veth_swap_len, bma_pci_dev->veth_swap_len); + + bma_pci_dev->hostrtc_phyaddr = bma_pci_dev->bma_base_phy_addr; + /* edma */ + bma_pci_dev->edma_swap_phy_addr = + bma_pci_dev->bma_base_phy_addr + EDMA_SWAP_BASE_OFFSET; + /* veth */ + bma_pci_dev->veth_swap_phy_addr = + bma_pci_dev->edma_swap_phy_addr + EDMA_SWAP_DATA_SIZE; + + BMA_LOG(DLOG_DEBUG, + "bar1: hostrtc_phyaddr = 0x%lx, edma_swap_phy_addr = 0x%lx, veth_swap_phy_addr = 0x%lx\n", + bma_pci_dev->hostrtc_phyaddr, + bma_pci_dev->edma_swap_phy_addr, + bma_pci_dev->veth_swap_phy_addr); + + __atu_config_H(pdev, 0, + GET_HIGH_ADDR(bma_pci_dev->kbox_base_phy_addr), + (bma_pci_dev->kbox_base_phy_addr & 0xffffffff), + 0, PCI_BAR0_PME_1710, PCI_PME_USEABLE_SPACE); + + __atu_config_H(pdev, 1, + GET_HIGH_ADDR(bma_pci_dev->hostrtc_phyaddr), + (bma_pci_dev->hostrtc_phyaddr & 0xffffffff), + 0, HOSTRTC_REG_BASE, HOSTRTC_REG_SIZE); + + __atu_config_H(pdev, 2, + GET_HIGH_ADDR(bma_pci_dev->edma_swap_phy_addr), + (bma_pci_dev->edma_swap_phy_addr & 0xffffffff), + 0, EDMA_SWAP_DATA_BASE, EDMA_SWAP_DATA_SIZE); + + __atu_config_H(pdev, 3, + GET_HIGH_ADDR(bma_pci_dev->veth_swap_phy_addr), + (bma_pci_dev->veth_swap_phy_addr & 0xffffffff), + 0, VETH_SWAP_DATA_BASE, VETH_SWAP_DATA_SIZE); + + if (bar1_resource_flag & IORESOURCE_CACHEABLE) { + bma_pci_dev->bma_base_addr = + ioremap(bma_pci_dev->bma_base_phy_addr, + bma_pci_dev->bma_base_len); + } else { + bma_pci_dev->bma_base_addr = + IOREMAP(bma_pci_dev->bma_base_phy_addr, + bma_pci_dev->bma_base_len); + } + + if (!bma_pci_dev->bma_base_addr) { + BMA_LOG(DLOG_ERROR, + "Cannot map device registers, aborting\n"); + + return -ENODEV; + } + + bma_pci_dev->hostrtc_viraddr = bma_pci_dev->bma_base_addr; + bma_pci_dev->edma_swap_addr = + (unsigned char *)bma_pci_dev->bma_base_addr + + EDMA_SWAP_BASE_OFFSET; + bma_pci_dev->veth_swap_addr = + (unsigned char *)bma_pci_dev->edma_swap_addr + + EDMA_SWAP_DATA_SIZE; + + (void)pci_read_config_dword(pdev, 0x78, &data); + data = data & 0xfffffff0; + (void)pci_write_config_dword(pdev, 0x78, data); + (void)pci_read_config_dword(pdev, 0x78, &data); + + return 0; +} + +static int ioremap_bar_mem(struct pci_dev *pdev, + struct bma_pci_dev_s *bma_pci_dev) +{ + int err = 0; + unsigned long bar0_resource_flag = 0; + + bar0_resource_flag = pci_resource_flags(pdev, PCI_BAR0); + + if (!(bar0_resource_flag & IORESOURCE_MEM)) { + BMA_LOG(DLOG_ERROR, + "Cannot find proper PCI device base address, aborting\n"); + err = -ENODEV; + return err; + } + + bma_pci_dev->kbox_base_phy_addr = pci_resource_start(pdev, PCI_BAR0); + + bma_pci_dev->kbox_base_len = pci_resource_len(pdev, PCI_BAR0); + + BMA_LOG(DLOG_DEBUG, + "bar0: kbox_base_phy_addr = 0x%lx, base_len = %ld(0x%lx)\n", + bma_pci_dev->kbox_base_phy_addr, bma_pci_dev->kbox_base_len, + bma_pci_dev->kbox_base_len); + + if (PME_DEV_CHECK(pdev->device, pdev->vendor)) { + err = ioremap_pme_bar1_mem(pdev, bma_pci_dev); + if (err != 0) + return err; + } + + BMA_LOG(DLOG_DEBUG, "remap BAR0 KBOX\n"); + + if (bar0_resource_flag & IORESOURCE_CACHEABLE) { + bma_pci_dev->kbox_base_addr = + ioremap(bma_pci_dev->kbox_base_phy_addr, + bma_pci_dev->kbox_base_len); + } else { + bma_pci_dev->kbox_base_addr = + IOREMAP(bma_pci_dev->kbox_base_phy_addr, + bma_pci_dev->kbox_base_len); + } + + if (!bma_pci_dev->kbox_base_addr) { + BMA_LOG(DLOG_ERROR, "Cannot map device registers, aborting\n"); + + iounmap(bma_pci_dev->bma_base_addr); + bma_pci_dev->bma_base_addr = NULL; + bma_pci_dev->edma_swap_addr = NULL; + bma_pci_dev->hostrtc_viraddr = NULL; + return -ENOMEM; + } + + return 0; +} + +int pme_pci_enable_msi(struct pci_dev *pdev) +{ + int err = 0; + + pci_set_master(pdev); + +#ifdef CONFIG_PCI_MSI + if (pci_find_capability(pdev, PCI_CAP_ID_MSI) == 0) { + BMA_LOG(DLOG_ERROR, "not support msi\n"); + pci_disable_device(pdev); + return err; + } + + BMA_LOG(DLOG_DEBUG, "support msi\n"); + + err = pci_enable_msi(pdev); + if (err) { + BMA_LOG(DLOG_ERROR, "pci_enable_msi failed\n"); + pci_disable_device(pdev); + return err; + } +#endif + + return err; +} + +int pci_device_init(struct pci_dev *pdev, struct bma_pci_dev_s *bma_pci_dev) +{ + int err = 0; + + if (PME_DEV_CHECK(pdev->device, pdev->vendor)) { + err = bma_devinft_init(bma_pci_dev); + if (err) { + BMA_LOG(DLOG_ERROR, "bma_devinft_init failed\n"); + bma_devinft_cleanup(bma_pci_dev); + iounmap_bar_mem(bma_pci_dev); + g_bma_pci_dev = NULL; + pci_release_regions(pdev); + kfree(bma_pci_dev); + #ifdef CONFIG_PCI_MSI + pci_disable_msi(pdev); + #endif + pci_disable_device(pdev); + + return err; + } + } else { + BMA_LOG(DLOG_DEBUG, "edma is not supported on this pcie\n"); + } + + pci_set_drvdata(pdev, bma_pci_dev); + + return 0; +} + +int pci_device_config(struct pci_dev *pdev) +{ + int err = 0; + struct bma_pci_dev_s *bma_pci_dev = NULL; + + bma_pci_dev = kmalloc(sizeof(*bma_pci_dev), GFP_KERNEL); + if (!bma_pci_dev) { + err = -ENOMEM; + goto err_out_disable_msi; + } + memset(bma_pci_dev, 0, sizeof(*bma_pci_dev)); + + bma_pci_dev->pdev = pdev; + + err = pci_request_regions(pdev, PCI_KBOX_MODULE_NAME); + if (err) { + BMA_LOG(DLOG_ERROR, "Cannot obtain PCI resources, aborting\n"); + goto err_out_free_dev; + } + + err = ioremap_bar_mem(pdev, bma_pci_dev); + if (err) { + BMA_LOG(DLOG_ERROR, "ioremap_edma_io_mem failed\n"); + goto err_out_release_regions; + } + + g_bma_pci_dev = bma_pci_dev; + + if (SET_DMA_MASK(&pdev->dev)) { + BMA_LOG(DLOG_ERROR, + "No usable DMA ,configuration, aborting,goto failed2!!!\n"); + goto err_out_unmap_bar; + } + + g_bma_pci_dev = bma_pci_dev; + + return pci_device_init(pdev, bma_pci_dev); + +err_out_unmap_bar: + iounmap_bar_mem(bma_pci_dev); + g_bma_pci_dev = NULL; +err_out_release_regions: + pci_release_regions(pdev); +err_out_free_dev: + kfree(bma_pci_dev); + bma_pci_dev = NULL; +err_out_disable_msi: +#ifdef CONFIG_PCI_MSI + pci_disable_msi(pdev); +#endif + + pci_disable_device(pdev); + + return err; +} + +static int bma_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + int err = 0; + + UNUSED(ent); + + if (g_bma_pci_dev) + return -EPERM; + + err = pci_enable_device(pdev); + if (err) { + BMA_LOG(DLOG_ERROR, "Cannot enable PCI device,aborting\n"); + return err; + } + + if (PME_DEV_CHECK(pdev->device, pdev->vendor)) { + err = pme_pci_enable_msi(pdev); + if (err) + return err; + } + + BMA_LOG(DLOG_DEBUG, "pdev->device = 0x%x\n", pdev->device); + BMA_LOG(DLOG_DEBUG, "pdev->vendor = 0x%x\n", pdev->vendor); + + return pci_device_config(pdev); +} + +static void bma_pci_remove(struct pci_dev *pdev) +{ + struct bma_pci_dev_s *bma_pci_dev = + (struct bma_pci_dev_s *)pci_get_drvdata(pdev); + + g_bma_pci_dev = NULL; + (void)pci_set_drvdata(pdev, NULL); + + if (bma_pci_dev) { + bma_devinft_cleanup(bma_pci_dev); + + iounmap_bar_mem(bma_pci_dev); + + kfree(bma_pci_dev); + } + + pci_release_regions(pdev); + +#ifdef CONFIG_PCI_MSI + pci_disable_msi(pdev); +#endif + pci_disable_device(pdev); +} + +static int bma_pci_suspend(struct pci_dev *pdev, pm_message_t state) +{ + UNUSED(pdev); + UNUSED(state); + + return 0; +} + +static int bma_pci_resume(struct pci_dev *pdev) +{ + UNUSED(pdev); + + return 0; +} + +int __init bma_pci_init(void) +{ + int ret = 0; + + BMA_LOG(DLOG_DEBUG, "\n"); + + ret = pci_register_driver(&bma_driver); + if (ret) + BMA_LOG(DLOG_ERROR, "pci_register_driver failed\n"); + + return ret; +} + +void __exit bma_pci_cleanup(void) +{ + BMA_LOG(DLOG_DEBUG, "\n"); + + pci_unregister_driver(&bma_driver); +} + +MODULE_AUTHOR("HUAWEI TECHNOLOGIES CO., LTD."); +MODULE_DESCRIPTION("HUAWEI EDMA DRIVER"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(BMA_VERSION); +#ifndef _lint + +module_init(bma_pci_init); +module_exit(bma_pci_cleanup); +#endif diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.h b/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.h new file mode 100644 index 0000000000000000000000000000000000000000..7639b11a3f50ce1f5b352ea26256678841705194 --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei iBMA driver. + * Copyright (c) 2017, Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _BMA_PCI_H_ +#define _BMA_PCI_H_ + +#include "bma_devintf.h" +#include "bma_include.h" +#include + +#define EDMA_SWAP_BASE_OFFSET 0x10000 + +#define HOSTRTC_REG_BASE 0x2f000000 +#define HOSTRTC_REG_SIZE EDMA_SWAP_BASE_OFFSET + +#define EDMA_SWAP_DATA_BASE 0x84810000 +#define EDMA_SWAP_DATA_SIZE 65536 + +#define VETH_SWAP_DATA_BASE 0x84820000 +#define VETH_SWAP_DATA_SIZE 0xdf000 + +#define ATU_VIEWPORT 0x900 +#define ATU_REGION_CTRL1 0x904 +#define ATU_REGION_CTRL2 0x908 +#define ATU_BASE_LOW 0x90C +#define ATU_BASE_HIGH 0x910 +#define ATU_LIMIT 0x914 +#define ATU_TARGET_LOW 0x918 +#define ATU_TARGET_HIGH 0x91C +#define REGION_DIR_OUTPUT (0x0 << 31) +#define REGION_DIR_INPUT (0x1 << 31) +#define REGION_INDEX_MASK 0x7 +#define REGION_ENABLE (0x1 << 31) +#define ATU_CTRL1_DEFAULT 0x0 +struct bma_pci_dev_s { + unsigned long kbox_base_phy_addr; + void __iomem *kbox_base_addr; + unsigned long kbox_base_len; + + unsigned long bma_base_phy_addr; + void __iomem *bma_base_addr; + unsigned long bma_base_len; + + unsigned long hostrtc_phyaddr; + void __iomem *hostrtc_viraddr; + + unsigned long edma_swap_phy_addr; + void __iomem *edma_swap_addr; + unsigned long edma_swap_len; + + unsigned long veth_swap_phy_addr; + void __iomem *veth_swap_addr; + unsigned long veth_swap_len; + + struct pci_dev *pdev; + struct bma_dev_s *bma_dev; +}; + +#ifdef DRV_VERSION +#define BMA_VERSION MICRO_TO_STR(DRV_VERSION) +#else +#define BMA_VERSION "0.3.10" +#endif + +#ifdef CONFIG_ARM64 +#define IOREMAP ioremap_wc +#else +#define IOREMAP ioremap_nocache +#endif + +extern int debug; + +#define BMA_LOG(level, fmt, args...) \ + do { \ + if (debug >= (level))\ + netdev_alert(0, "edma: %s, %d, " fmt, \ + __func__, __LINE__, ## args); \ + } while (0) + +int edmainfo_show(char *buff); + +#endif diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.c b/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.c new file mode 100644 index 0000000000000000000000000000000000000000..d9da64b94eb940b347f5bebdd13cd7ad0fadb19d --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.c @@ -0,0 +1,1441 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei iBMA driver. + * Copyright (c) 2017, Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include + +#include "bma_pci.h" +#include "edma_host.h" + +static struct edma_user_inft_s *g_user_func[TYPE_MAX] = { 0 }; + +static struct bma_dev_s *g_bma_dev; +static int edma_host_dma_interrupt(struct edma_host_s *edma_host); + +int edmainfo_show(char *buf) +{ + struct bma_user_s *user_ptr = NULL; + struct edma_host_s *host_ptr = NULL; + int len = 0; + __kernel_time_t running_time = 0; + static const char * const host_status[] = { + "deregistered", "registered", "lost"}; + + if (!buf) + return 0; + + if (!g_bma_dev) { + len += sprintf(buf, "EDMA IS NOT SUPPORTED"); + return len; + } + + host_ptr = &g_bma_dev->edma_host; + + GET_SYS_SECONDS(running_time); + running_time -= host_ptr->statistics.init_time; + len += sprintf(buf + len, + "============================EDMA_DRIVER_INFO============================\n"); + len += sprintf(buf + len, "version :" BMA_VERSION "\n"); + + len += sprintf(buf + len, "running_time :%luD %02lu:%02lu:%02lu\n", + running_time / SECONDS_PER_DAY, + running_time % SECONDS_PER_DAY / SECONDS_PER_HOUR, + running_time % SECONDS_PER_HOUR / SECONDS_PER_MINUTE, + running_time % SECONDS_PER_MINUTE); + + len += sprintf(buf + len, "remote_status:%s\n", + host_status[host_ptr->statistics.remote_status]); + len += sprintf(buf + len, "lost_count :%d\n", + host_ptr->statistics.lost_count); + len += sprintf(buf + len, "b2h_int :%d\n", + host_ptr->statistics.b2h_int); + len += sprintf(buf + len, "h2b_int :%d\n", + host_ptr->statistics.h2b_int); + len += sprintf(buf + len, "dma_count :%d\n", + host_ptr->statistics.dma_count); + len += sprintf(buf + len, "recv_bytes :%d\n", + host_ptr->statistics.recv_bytes); + len += sprintf(buf + len, "send_bytes :%d\n", + host_ptr->statistics.send_bytes); + len += sprintf(buf + len, "recv_pkgs :%d\n", + host_ptr->statistics.recv_pkgs); + len += sprintf(buf + len, "send_pkgs :%d\n", + host_ptr->statistics.send_pkgs); + len += sprintf(buf + len, "drop_pkgs :%d\n", + host_ptr->statistics.drop_pkgs); + len += sprintf(buf + len, "fail_count :%d\n", + host_ptr->statistics.failed_count); + len += sprintf(buf + len, "debug :%d\n", debug); + len += sprintf(buf + len, + "================================USER_INFO===============================\n"); + + list_for_each_entry_rcu(user_ptr, &g_bma_dev->priv_list, link) { + len += sprintf(buf + len, + "type: %d\nsub type: %d\nopen:%d\nmax recvmsg nums: %d\ncur recvmsg nums: %d\n", + user_ptr->type, user_ptr->sub_type, + host_ptr->local_open_status[user_ptr->type], + user_ptr->max_recvmsg_nums, + user_ptr->cur_recvmsg_nums); + len += sprintf(buf + len, + "========================================================================\n"); + } + + return len; +} + +int is_edma_b2h_int(struct edma_host_s *edma_host) +{ + struct notify_msg *pnm = NULL; + + if (!edma_host) + return -1; + + pnm = (struct notify_msg *)edma_host->edma_flag; + if (!pnm) { + BMA_LOG(DLOG_ERROR, "pnm is 0\n"); + return -1; + } + + if (IS_EDMA_B2H_INT(pnm->int_flag)) { + CLEAR_EDMA_B2H_INT(pnm->int_flag); + return 0; + } + + return -1; +} + +void edma_int_to_bmc(struct edma_host_s *edma_host) +{ + unsigned int data = 0; + + if (!edma_host) + return; + + edma_host->statistics.h2b_int++; + + data = *(unsigned int *)((char *)edma_host->hostrtc_viraddr + + HOSTRTC_INT_OFFSET); + + data |= 0x00000001; + + *(unsigned int *)((char *)edma_host->hostrtc_viraddr + + HOSTRTC_INT_OFFSET) = data; +} + +static void edma_host_int_to_bmc(struct edma_host_s *edma_host) +{ + struct notify_msg *pnm = NULL; + + if (!edma_host) + return; + + pnm = (struct notify_msg *)edma_host->edma_flag; + if (pnm) { + SET_EDMA_H2B_INT(pnm->int_flag); + edma_int_to_bmc(edma_host); + } +} + +static int check_status_dmah2b(struct edma_host_s *edma_host) +{ + unsigned int data = 0; + struct pci_dev *pdev = NULL; + + if (!edma_host) + return 0; + + pdev = edma_host->pdev; + if (!pdev) + return 0; + + (void)pci_read_config_dword(pdev, REG_PCIE1_DMAREAD_STATUS, + (u32 *)&data); + + if (data & (1 << SHIFT_PCIE1_DMAREAD_STATUS)) + return 1; /* ok */ + else + return 0; /* busy */ +} + +static int check_status_dmab2h(struct edma_host_s *edma_host) +{ + unsigned int data = 0; + struct pci_dev *pdev = NULL; + + if (!edma_host) + return 0; + + pdev = edma_host->pdev; + if (!pdev) + return 0; + + (void)pci_read_config_dword(pdev, REG_PCIE1_DMAWRITE_STATUS, + (u32 *)&data); + + if (data & (1 << SHIFT_PCIE1_DMAWRITE_STATUS)) + return 1; /* ok */ + else + return 0; /* busy */ +} + +void clear_int_dmah2b(struct edma_host_s *edma_host) +{ + unsigned int data = 0; + struct pci_dev *pdev = NULL; + + if (!edma_host) + return; + + pdev = edma_host->pdev; + if (!pdev) + return; + + (void)pci_read_config_dword(pdev, REG_PCIE1_DMAREADINT_CLEAR, + (u32 *)&data); + data = data & (~((1 << SHIFT_PCIE1_DMAREADINT_CLEAR))); + data = data | (1 << SHIFT_PCIE1_DMAREADINT_CLEAR); + (void)pci_write_config_dword(pdev, REG_PCIE1_DMAREADINT_CLEAR, data); +} + +void clear_int_dmab2h(struct edma_host_s *edma_host) +{ + unsigned int data = 0; + struct pci_dev *pdev = NULL; + + if (!edma_host) + return; + + pdev = edma_host->pdev; + if (!pdev) + return; + + (void)pci_read_config_dword(pdev, REG_PCIE1_DMAWRITEINT_CLEAR, + (u32 *)&data); + data = data & (~((1 << SHIFT_PCIE1_DMAWRITEINT_CLEAR))); + data = data | (1 << SHIFT_PCIE1_DMAWRITEINT_CLEAR); + (void)pci_write_config_dword(pdev, REG_PCIE1_DMAWRITEINT_CLEAR, data); +} + +int edma_host_check_dma_status(enum dma_direction_e dir) +{ + int ret = 0; + + switch (dir) { + case BMC_TO_HOST: + ret = check_status_dmab2h(&g_bma_dev->edma_host); + if (ret == 1) + clear_int_dmab2h(&g_bma_dev->edma_host); + + break; + + case HOST_TO_BMC: + ret = check_status_dmah2b(&g_bma_dev->edma_host); + if (ret == 1) + clear_int_dmah2b(&g_bma_dev->edma_host); + + break; + + default: + BMA_LOG(DLOG_ERROR, "direction failed, dir = %d\n", dir); + ret = -EFAULT; + break; + } + + return ret; +} + +#ifdef USE_DMA + +static int start_transfer_h2b(struct edma_host_s *edma_host, unsigned int len, + unsigned int src_h, unsigned int src_l, + unsigned int dst_h, unsigned int dst_l) +{ + unsigned long flags = 0; + struct pci_dev *pdev = edma_host->pdev; + + spin_lock_irqsave(&edma_host->reg_lock, flags); + /* read engine enable */ + (void)pci_write_config_dword(pdev, 0x99c, 0x00000001); + /* read ch,ch index 0 */ + (void)pci_write_config_dword(pdev, 0xa6c, 0x80000000); + /* ch ctrl,local int enable */ + (void)pci_write_config_dword(pdev, 0xa70, 0x00000008); + /* size */ + (void)pci_write_config_dword(pdev, 0xa78, len); + /* src lower 32b */ + (void)pci_write_config_dword(pdev, 0xa7c, src_l); + /* src upper 32b */ + (void)pci_write_config_dword(pdev, 0xa80, src_h); + /* dst lower 32b */ + (void)pci_write_config_dword(pdev, 0xa84, dst_l); + /* dst upper 32b */ + (void)pci_write_config_dword(pdev, 0xa88, dst_h); + /* start read dma,ch 0 */ + (void)pci_write_config_dword(pdev, 0x9a0, 0x00000000); + spin_unlock_irqrestore(&edma_host->reg_lock, flags); + return 0; +} + +static int start_transfer_b2h(struct edma_host_s *edma_host, unsigned int len, + unsigned int src_h, unsigned int src_l, + unsigned int dst_h, unsigned int dst_l) +{ + unsigned long flags = 0; + struct pci_dev *pdev = edma_host->pdev; + + BMA_LOG(DLOG_DEBUG, + "len = 0x%8x,src_h = 0x%8x,src_l = 0x%8x,dst_h = 0x%8x,dst_l = 0x%8x\n", + len, src_h, src_l, dst_h, dst_l); + + spin_lock_irqsave(&edma_host->reg_lock, flags); + /* write engine enable */ + (void)pci_write_config_dword(pdev, 0x97c, 0x00000001); + /* write ch,ch index 0 */ + (void)pci_write_config_dword(pdev, 0xa6c, 0x00000000); + /* ch ctrl,local int enable */ + (void)pci_write_config_dword(pdev, 0xa70, 0x00000008); + /* size */ + (void)pci_write_config_dword(pdev, 0xa78, len); + /* src lower 32b */ + (void)pci_write_config_dword(pdev, 0xa7c, src_l); + /* src upper 32b */ + (void)pci_write_config_dword(pdev, 0xa80, src_h); + /* dst lower 32b */ + (void)pci_write_config_dword(pdev, 0xa84, dst_l); + /* dst upper 32b */ + (void)pci_write_config_dword(pdev, 0xa88, dst_h); + /* start write dma,ch 0 */ + (void)pci_write_config_dword(pdev, 0x980, 0x00000000); + spin_unlock_irqrestore(&edma_host->reg_lock, flags); + + return 0; +} +#endif + +static void start_listtransfer_h2b(struct edma_host_s *edma_host, + unsigned int list_h, unsigned int list_l) +{ + unsigned long flags = 0; + struct pci_dev *pdev = NULL; + + if (!edma_host) + return; + + pdev = edma_host->pdev; + if (!pdev) + return; + + spin_lock_irqsave(&edma_host->reg_lock, flags); + + /* write engine enable */ + (void)pci_write_config_dword(pdev, 0x700 + 0x29c, 0x00000001); + /* write list err enable */ + (void)pci_write_config_dword(pdev, 0x700 + 0x334, 0x00010000); + /* write ch,ch index 0 */ + (void)pci_write_config_dword(pdev, 0x700 + 0x36c, 0x80000000); + /* ch ctrl,local int enable */ + (void)pci_write_config_dword(pdev, 0x700 + 0x370, 0x00000300); + /* list lower 32b */ + (void)pci_write_config_dword(pdev, 0x700 + 0x38c, list_l); + /* list upper 32b */ + (void)pci_write_config_dword(pdev, 0x700 + 0x390, list_h); + /* start write dma,ch 0 */ + (void)pci_write_config_dword(pdev, 0x700 + 0x2a0, 0x00000000); + + spin_unlock_irqrestore(&edma_host->reg_lock, flags); +} + +static void start_listtransfer_b2h(struct edma_host_s *edma_host, + unsigned int list_h, unsigned int list_l) +{ + unsigned long flags = 0; + struct pci_dev *pdev = NULL; + + if (!edma_host) + return; + + pdev = edma_host->pdev; + if (!pdev) + return; + + spin_lock_irqsave(&edma_host->reg_lock, flags); + + /* write engine enable */ + (void)pci_write_config_dword(pdev, 0x700 + 0x27c, 0x00000001); + /* write list err enable */ + (void)pci_write_config_dword(pdev, 0x700 + 0x300, 0x00000001); + /* write ch,ch index 0 */ + (void)pci_write_config_dword(pdev, 0x700 + 0x36c, 0x00000000); + /* ch ctrl,local int enable */ + (void)pci_write_config_dword(pdev, 0x700 + 0x370, 0x00000300); + /* list lower 32b */ + (void)pci_write_config_dword(pdev, 0x700 + 0x38c, list_l); + /* list upper 32b */ + (void)pci_write_config_dword(pdev, 0x700 + 0x390, list_h); + /* start write dma,ch 0 */ + (void)pci_write_config_dword(pdev, 0x700 + 0x280, 0x00000000); + + spin_unlock_irqrestore(&edma_host->reg_lock, flags); +} + +int edma_host_dma_start(struct edma_host_s *edma_host, + struct bma_priv_data_s *priv) +{ + struct bma_user_s *puser = NULL; + struct bma_dev_s *bma_dev = NULL; + unsigned long flags = 0; + + if (!edma_host || !priv) + return -EFAULT; + + bma_dev = list_entry(edma_host, struct bma_dev_s, edma_host); + + spin_lock_irqsave(&bma_dev->priv_list_lock, flags); + + list_for_each_entry_rcu(puser, &bma_dev->priv_list, link) { + if (puser->dma_transfer) { + spin_unlock_irqrestore(&bma_dev->priv_list_lock, flags); + BMA_LOG(DLOG_ERROR, "type = %d dma is started\n", + puser->type); + + return -EBUSY; + } + } + + priv->user.dma_transfer = 1; + + spin_unlock_irqrestore(&bma_dev->priv_list_lock, flags); + + return 0; +} + +#ifdef USE_DMA + +static int edma_host_dma_h2b(struct edma_host_s *edma_host, + struct bma_dma_addr_s *host_addr, + struct bma_dma_addr_s *bmc_addr) +{ + int ret = 0; + struct notify_msg *pnm = (struct notify_msg *)edma_host->edma_flag; + unsigned long host_h2b_addr = 0; + unsigned long bmc_h2b_addr = 0; + unsigned int bmc_h2b_size = 0; + unsigned int src_h, src_l, dst_h, dst_l; + + if (!host_addr) { + BMA_LOG(DLOG_ERROR, "host_addr is NULL\n"); + return -EFAULT; + } + + BMA_LOG(DLOG_DEBUG, "host_addr->dma_addr = 0x%llx\n", + host_addr->dma_addr); + + if (host_addr->dma_addr) + host_h2b_addr = (unsigned long)(host_addr->dma_addr); + else + host_h2b_addr = edma_host->h2b_addr.dma_addr; + + bmc_h2b_addr = pnm->h2b_addr; + bmc_h2b_size = pnm->h2b_size; + + BMA_LOG(DLOG_DEBUG, + "host_h2b_addr = 0x%lx, dma_data_len = %d, bmc_h2b_addr = 0x%lx, bmc_h2b_size = %d\n", + host_h2b_addr, host_addr->dma_data_len, bmc_h2b_addr, + bmc_h2b_size); + + if (host_addr->dma_data_len > EDMA_DMABUF_SIZE || + bmc_h2b_addr == 0 || + host_addr->dma_data_len > bmc_h2b_size) { + BMA_LOG(DLOG_ERROR, + "dma_data_len too large = %d, bmc_h2b_size = %d\n", + host_addr->dma_data_len, bmc_h2b_size); + return -EFAULT; + } + + edma_host->h2b_state = H2BSTATE_WAITDMA; + + src_h = (unsigned int)((sizeof(unsigned long) == 8) ? + (host_h2b_addr >> 32) : 0); + src_l = (unsigned int)(host_h2b_addr & 0xffffffff); + dst_h = (unsigned int)((sizeof(unsigned long) == 8) ? + (bmc_h2b_addr >> 32) : 0); + dst_l = (unsigned int)(bmc_h2b_addr & 0xffffffff); + (void)start_transfer_h2b(edma_host, + host_addr->dma_data_len, src_h, + src_l, dst_h, dst_l); + + (void)mod_timer(&edma_host->dma_timer, + jiffies_64 + TIMER_INTERVAL_CHECK); + + ret = wait_event_interruptible_timeout(edma_host->wq_dmah2b, + (edma_host->h2b_state == + H2BSTATE_IDLE), + EDMA_DMA_TRANSFER_WAIT_TIMEOUT); + + if (ret == -ERESTARTSYS) { + BMA_LOG(DLOG_ERROR, "eintr 1\n"); + ret = -EINTR; + goto end; + } else if (ret == 0) { + BMA_LOG(DLOG_ERROR, "timeout 2\n"); + ret = -ETIMEDOUT; + goto end; + } else { + ret = 0; + BMA_LOG(DLOG_ERROR, "h2b dma successful\n"); + } + +end: + + return ret; +} + +static int edma_host_dma_b2h(struct edma_host_s *edma_host, + struct bma_dma_addr_s *host_addr, + struct bma_dma_addr_s *bmc_addr) +{ + int ret = 0; + struct notify_msg *pnm = (struct notify_msg *)edma_host->edma_flag; + unsigned long bmc_b2h_addr = 0; + unsigned long host_b2h_addr = 0; + unsigned int src_h, src_l, dst_h, dst_l; + + if (!bmc_addr) + return -EFAULT; + + if (host_addr->dma_addr) + host_b2h_addr = (unsigned long)(host_addr->dma_addr); + else + host_b2h_addr = edma_host->b2h_addr.dma_addr; + + if (bmc_addr->dma_addr) + bmc_b2h_addr = (unsigned long)(bmc_addr->dma_addr); + else + bmc_b2h_addr = pnm->b2h_addr; + + BMA_LOG(DLOG_DEBUG, + "bmc_b2h_addr = 0x%lx, host_b2h_addr = 0x%lx, dma_data_len = %d\n", + bmc_b2h_addr, host_b2h_addr, bmc_addr->dma_data_len); + + if (bmc_addr->dma_data_len > EDMA_DMABUF_SIZE || + bmc_addr->dma_data_len > edma_host->b2h_addr.len) { + BMA_LOG(DLOG_ERROR, + "dma_data_len too large = %d, b2h_addr = %d\n", + host_addr->dma_data_len, edma_host->b2h_addr.len); + return -EFAULT; + } + + edma_host->b2h_state = B2HSTATE_WAITDMA; + + src_h = (unsigned int)((sizeof(unsigned long) == 8) ? + (bmc_b2h_addr >> 32) : 0); + src_l = (unsigned int)(bmc_b2h_addr & 0xffffffff); + dst_h = (unsigned int)((sizeof(unsigned long) == 8) ? + (host_b2h_addr >> 32) : 0); + dst_l = (unsigned int)(host_b2h_addr & 0xffffffff); + (void)start_transfer_b2h(edma_host, + bmc_addr->dma_data_len, src_h, + src_l, dst_h, dst_l); + + (void)mod_timer(&edma_host->dma_timer, + jiffies_64 + TIMER_INTERVAL_CHECK); + + ret = wait_event_interruptible_timeout(edma_host->wq_dmab2h, + (edma_host->b2h_state == + B2HSTATE_IDLE), + EDMA_DMA_TRANSFER_WAIT_TIMEOUT); + + if (ret == -ERESTARTSYS) { + BMA_LOG(DLOG_ERROR, "eintr 1\n"); + ret = -EINTR; + goto end; + } else if (ret == 0) { + BMA_LOG(DLOG_ERROR, "timeout 2\n"); + ret = -ETIMEDOUT; + goto end; + } else { + BMA_LOG(DLOG_DEBUG, "h2b dma successful\n"); + } + +end: + + return ret; +} +#endif + +void host_dma_transfer_without_list(struct edma_host_s *edma_host, + struct bma_dma_transfer_s *dma_transfer, + int *return_code) +{ +#ifdef USE_DMA + union transfer_u *transfer = &dma_transfer->transfer; + + switch (dma_transfer->dir) { + case BMC_TO_HOST: + *return_code = edma_host_dma_b2h(edma_host, + &transfer->nolist.host_addr, + &transfer->nolist.bmc_addr); + break; + case HOST_TO_BMC: + *return_code = edma_host_dma_h2b(edma_host, + &transfer->nolist.host_addr, + &transfer->nolist.bmc_addr); + break; + default: + BMA_LOG(DLOG_ERROR, "direction failed, dir = %d\n", + dma_transfer->dir); + *return_code = -EFAULT; + break; + } +#endif +} + +void host_dma_transfer_withlist(struct edma_host_s *edma_host, + struct bma_dma_transfer_s *dma_transfer, + int *return_code) +{ + unsigned int list_h = 0; + unsigned int list_l = 0; + union transfer_u *transfer = &dma_transfer->transfer; + + list_h = (unsigned int)((sizeof(unsigned long) == 8) ? + (transfer->list.dma_addr >> 32) : 0); + list_l = (unsigned int)(transfer->list.dma_addr + & 0xffffffff); + + switch (dma_transfer->dir) { + case BMC_TO_HOST: + start_listtransfer_b2h(edma_host, list_h, list_l); + break; + case HOST_TO_BMC: + start_listtransfer_h2b(edma_host, list_h, list_l); + break; + default: + BMA_LOG(DLOG_ERROR, "direction failed, dir = %d\n\n", + dma_transfer->dir); + *return_code = -EFAULT; + break; + } +} + +int edma_host_dma_transfer(struct edma_host_s *edma_host, + struct bma_priv_data_s *priv, + struct bma_dma_transfer_s *dma_transfer) +{ + int ret = 0; + unsigned long flags = 0; + struct bma_dev_s *bma_dev = NULL; + + if (!edma_host || !priv || !dma_transfer) + return -EFAULT; + + bma_dev = list_entry(edma_host, struct bma_dev_s, edma_host); + + spin_lock_irqsave(&bma_dev->priv_list_lock, flags); + + if (priv->user.dma_transfer == 0) { + spin_unlock_irqrestore(&bma_dev->priv_list_lock, flags); + BMA_LOG(DLOG_ERROR, "dma_transfer = %hhd\n", + priv->user.dma_transfer); + return -EFAULT; + } + + spin_unlock_irqrestore(&bma_dev->priv_list_lock, flags); + + edma_host->statistics.dma_count++; + + if (dma_transfer->type == DMA_NOT_LIST) { + host_dma_transfer_without_list(edma_host, + dma_transfer, &ret); + } else if (dma_transfer->type == DMA_LIST) { + host_dma_transfer_withlist(edma_host, dma_transfer, &ret); + } else { + BMA_LOG(DLOG_ERROR, "type failed! type = %d\n", + dma_transfer->type); + return -EFAULT; + } + + return ret; +} + +void edma_host_reset_dma(struct edma_host_s *edma_host, int dir) +{ + u32 data = 0; + u32 reg_addr = 0; + unsigned long flags = 0; + int count = 0; + struct pci_dev *pdev = NULL; + + if (!edma_host) + return; + + pdev = edma_host->pdev; + if (!pdev) + return; + + if (dir == BMC_TO_HOST) + reg_addr = REG_PCIE1_DMA_WRITE_ENGINE_ENABLE; + else if (dir == HOST_TO_BMC) + reg_addr = REG_PCIE1_DMA_READ_ENGINE_ENABLE; + else + return; + + spin_lock_irqsave(&edma_host->reg_lock, flags); + + (void)pci_read_config_dword(pdev, reg_addr, &data); + data &= ~(1 << SHIFT_PCIE1_DMA_ENGINE_ENABLE); + (void)pci_write_config_dword(pdev, reg_addr, data); + + while (count++ < 10) { + (void)pci_read_config_dword(pdev, reg_addr, &data); + + if (0 == (data & (1 << SHIFT_PCIE1_DMA_ENGINE_ENABLE))) { + BMA_LOG(DLOG_DEBUG, "reset dma succesfull\n"); + break; + } + + mdelay(100); + } + + spin_unlock_irqrestore(&edma_host->reg_lock, flags); + BMA_LOG(DLOG_DEBUG, "reset dma reg_addr=0x%x count=%d data=0x%08x\n", + reg_addr, count, data); +} + +int edma_host_dma_stop(struct edma_host_s *edma_host, + struct bma_priv_data_s *priv) +{ + unsigned long flags = 0; + struct bma_dev_s *bma_dev = NULL; + + if (!edma_host || !priv) + return -1; + + bma_dev = list_entry(edma_host, struct bma_dev_s, edma_host); + + spin_lock_irqsave(&bma_dev->priv_list_lock, flags); + priv->user.dma_transfer = 0; + spin_unlock_irqrestore(&bma_dev->priv_list_lock, flags); + + return 0; +} + +static int edma_host_send_msg(struct edma_host_s *edma_host) +{ + void *vaddr = NULL; + unsigned long flags = 0; + struct edma_mbx_hdr_s *send_mbx_hdr = NULL; + static unsigned long last_timer_record; + + if (!edma_host) + return 0; + + send_mbx_hdr = (struct edma_mbx_hdr_s *)edma_host->edma_send_addr; + + if (send_mbx_hdr->mbxlen > 0) { + if (send_mbx_hdr->mbxlen > HOST_MAX_SEND_MBX_LEN) { + /*share memory is disable */ + send_mbx_hdr->mbxlen = 0; + BMA_LOG(DLOG_ERROR, "mbxlen is too long\n"); + return -EFAULT; + } + + if (time_after(jiffies, last_timer_record + 10 * HZ)) { + BMA_LOG(DLOG_ERROR, "no response in 10s,clean msg\n"); + edma_host->statistics.failed_count++; + send_mbx_hdr->mbxlen = 0; + return -EFAULT; + } + + BMA_LOG(DLOG_DEBUG, + "still have msg : mbxlen: %d, msg_send_write: %d\n", + send_mbx_hdr->mbxlen, edma_host->msg_send_write); + + /* resend door bell */ + if (time_after(jiffies, last_timer_record + 5 * HZ)) + edma_host_int_to_bmc(edma_host); + + return -EFAULT; + } + + vaddr = + (void *)((unsigned char *)edma_host->edma_send_addr + + SIZE_OF_MBX_HDR); + + last_timer_record = jiffies; + + spin_lock_irqsave(&edma_host->send_msg_lock, flags); + + if (edma_host->msg_send_write == 0) { + spin_unlock_irqrestore(&edma_host->send_msg_lock, flags); + return 0; + } + + if (edma_host->msg_send_write > + HOST_MAX_SEND_MBX_LEN - SIZE_OF_MBX_HDR) { + BMA_LOG(DLOG_ERROR, + "Length of send message %u is larger than %lu\n", + edma_host->msg_send_write, + HOST_MAX_SEND_MBX_LEN - SIZE_OF_MBX_HDR); + edma_host->msg_send_write = 0; + spin_unlock_irqrestore(&edma_host->send_msg_lock, flags); + return 0; + } + + memcpy(vaddr, edma_host->msg_send_buf, + edma_host->msg_send_write); + + send_mbx_hdr->mbxlen = edma_host->msg_send_write; + edma_host->msg_send_write = 0; + + spin_unlock_irqrestore(&edma_host->send_msg_lock, flags); + + edma_host_int_to_bmc(edma_host); + + BMA_LOG(DLOG_DEBUG, + "vaddr: %p, mbxlen : %d, msg_send_write: %d\n", vaddr, + send_mbx_hdr->mbxlen, edma_host->msg_send_write); + + return -EAGAIN; +} + +#ifdef EDMA_TIMER +#ifdef HAVE_TIMER_SETUP +static void edma_host_timeout(struct timer_list *t) +{ + struct edma_host_s *edma_host = from_timer(edma_host, t, timer); +#else +static void edma_host_timeout(unsigned long data) +{ + struct edma_host_s *edma_host = (struct edma_host_s *)data; +#endif + int ret = 0; + unsigned long flags = 0; + + ret = edma_host_send_msg(edma_host); + if (ret < 0) { + spin_lock_irqsave(&g_bma_dev->edma_host.send_msg_lock, flags); + (void)mod_timer(&edma_host->timer, + jiffies_64 + TIMER_INTERVAL_CHECK); + spin_unlock_irqrestore(&edma_host->send_msg_lock, flags); + } +} + +#ifdef HAVE_TIMER_SETUP +static void edma_host_heartbeat_timer(struct timer_list *t) +{ + struct edma_host_s *edma_host = from_timer(edma_host, t, + heartbeat_timer); +#else +static void edma_host_heartbeat_timer(unsigned long data) +{ + struct edma_host_s *edma_host = (struct edma_host_s *)data; +#endif + struct edma_statistics_s *edma_stats = &edma_host->statistics; + unsigned int *remote_status = &edma_stats->remote_status; + static unsigned int bmc_heartbeat; + struct notify_msg *pnm = (struct notify_msg *)edma_host->edma_flag; + + if (pnm) { + if (pnm->bmc_registered) { + if ((pnm->host_heartbeat & 7) == 0) { + if (bmc_heartbeat != pnm->bmc_heartbeat) { + if (*remote_status != REGISTERED) { + BMA_LOG(DLOG_DEBUG, + "bmc is registered\n"); + *remote_status = REGISTERED; + } + + bmc_heartbeat = pnm->bmc_heartbeat; + } else { + if (*remote_status == REGISTERED) { + *remote_status = LOST; + edma_stats->lost_count++; + BMA_LOG(DLOG_DEBUG, + "bmc is lost\n"); + } + } + } + } else { + if (*remote_status == REGISTERED) + BMA_LOG(DLOG_DEBUG, "bmc is deregistered\n"); + + *remote_status = DEREGISTERED; + } + + pnm->host_heartbeat++; + } + + (void)mod_timer(&edma_host->heartbeat_timer, + jiffies_64 + HEARTBEAT_TIMER_INTERVAL_CHECK); +} + +#ifdef USE_DMA +#ifdef HAVE_TIMER_SETUP +static void edma_host_dma_timeout(struct timer_list *t) +{ + struct edma_host_s *edma_host = from_timer(edma_host, t, dma_timer); +#else +static void edma_host_dma_timeout(unsigned long data) +{ + struct edma_host_s *edma_host = (struct edma_host_s *)data; +#endif + int ret = 0; + + ret = edma_host_dma_interrupt(edma_host); + if (ret < 0) + (void)mod_timer(&edma_host->dma_timer, + jiffies_64 + DMA_TIMER_INTERVAL_CHECK); +} +#endif +#else + +static int edma_host_thread(void *arg) +{ + struct edma_host_s *edma_host = (struct edma_host_s *)arg; + + BMA_LOG(DLOG_ERROR, "edma host thread\n"); + + while (!kthread_should_stop()) { + wait_for_completion_interruptible_timeout(&edma_host->msg_ready, + 1 * HZ); + edma_host_send_msg(edma_host); + (void)edma_host_dma_interrupt(edma_host); + } + + BMA_LOG(DLOG_ERROR, "edma host thread exiting\n"); + + return 0; +} + +#endif + +int edma_host_send_driver_msg(const void *msg, size_t msg_len, int subtype) +{ + int ret = 0; + unsigned long flags = 0; + struct edma_host_s *edma_host = NULL; + struct edma_msg_hdr_s *hdr = NULL; + int total_len = msg_len + SIZE_OF_MSG_HDR; + + if (!msg || !g_bma_dev) + return -1; + + edma_host = &g_bma_dev->edma_host; + if (!edma_host) + return -1; + + spin_lock_irqsave(&edma_host->send_msg_lock, flags); + + if (edma_host->msg_send_write + total_len <= + (HOST_MAX_SEND_MBX_LEN - SIZE_OF_MBX_HDR)) { + hdr = (struct edma_msg_hdr_s *)(edma_host->msg_send_buf + + edma_host->msg_send_write); + hdr->type = TYPE_EDMA_DRIVER; + hdr->sub_type = subtype; + hdr->datalen = msg_len; + + memcpy(hdr->data, msg, msg_len); + + edma_host->msg_send_write += total_len; + + spin_unlock_irqrestore(&edma_host->send_msg_lock, flags); + + (void)mod_timer(&edma_host->timer, jiffies_64); + BMA_LOG(DLOG_DEBUG, "msg_send_write = %d\n", + edma_host->msg_send_write); + } else { + ret = -ENOSPC; + spin_unlock_irqrestore(&edma_host->send_msg_lock, flags); + + BMA_LOG(DLOG_DEBUG, + "msg lost,msg_send_write: %d,msg_len:%d,max_len: %d\n", + edma_host->msg_send_write, total_len, + HOST_MAX_SEND_MBX_LEN); + } + + return ret; +} + +static int edma_host_insert_recv_msg(struct edma_host_s *edma_host, + struct edma_msg_hdr_s *msg_header) +{ + unsigned long flags = 0, msg_flags = 0; + struct bma_dev_s *bma_dev = NULL; + struct bma_priv_data_s *priv = NULL; + struct bma_user_s *puser = NULL; + struct list_head *entry = NULL; + struct edma_recv_msg_s *msg_tmp = NULL; + struct bma_user_s usertmp = { }; + struct edma_recv_msg_s *recv_msg = NULL; + + if (!edma_host || !msg_header || + msg_header->datalen > CDEV_MAX_WRITE_LEN) { + return -EFAULT; + } + + bma_dev = list_entry(edma_host, struct bma_dev_s, edma_host); + + recv_msg = kmalloc(sizeof(*recv_msg) + msg_header->datalen, GFP_ATOMIC); + if (!recv_msg) { + BMA_LOG(DLOG_ERROR, "malloc recv_msg failed\n"); + return -ENOMEM; + } + + recv_msg->msg_len = msg_header->datalen; + memcpy(recv_msg->msg_data, msg_header->data, + msg_header->datalen); + + spin_lock_irqsave(&bma_dev->priv_list_lock, flags); + list_for_each_entry_rcu(puser, &bma_dev->priv_list, link) { + if (puser->type != msg_header->type || + puser->sub_type != msg_header->sub_type) + continue; + + priv = list_entry(puser, struct bma_priv_data_s, user); + + memcpy(&usertmp, puser, + sizeof(struct bma_user_s)); + + spin_lock_irqsave(&priv->recv_msg_lock, msg_flags); + + if (puser->cur_recvmsg_nums >= puser->max_recvmsg_nums || + puser->cur_recvmsg_nums >= MAX_RECV_MSG_NUMS) { + entry = priv->recv_msgs.next; + msg_tmp = + list_entry(entry, struct edma_recv_msg_s, + link); + list_del(entry); + puser->cur_recvmsg_nums--; + kfree(msg_tmp); + } + + if (edma_host->local_open_status[puser->type] + == DEV_OPEN) { + list_add_tail(&recv_msg->link, &priv->recv_msgs); + puser->cur_recvmsg_nums++; + usertmp.cur_recvmsg_nums = + puser->cur_recvmsg_nums; + spin_unlock_irqrestore(&priv->recv_msg_lock, + msg_flags); + + } else { + spin_unlock_irqrestore(&priv->recv_msg_lock, + msg_flags); + break; + } + + wake_up_interruptible(&priv->wait); + spin_unlock_irqrestore(&bma_dev->priv_list_lock, flags); + + BMA_LOG(DLOG_DEBUG, + "find user, type = %d, sub_type = %d, user_id = %d, insert msg\n", + usertmp.type, usertmp.sub_type, + usertmp.user_id); + BMA_LOG(DLOG_DEBUG, + "msg_len = %d, cur_recvmsg_nums: %d, max_recvmsg_nums: %d\n", + recv_msg->msg_len, usertmp.cur_recvmsg_nums, + usertmp.max_recvmsg_nums); + + return 0; + } + + spin_unlock_irqrestore(&bma_dev->priv_list_lock, flags); + kfree(recv_msg); + edma_host->statistics.drop_pkgs++; + BMA_LOG(DLOG_DEBUG, + "insert msg failed! not find user, type = %d, sub_type = %d\n", + msg_header->type, msg_header->sub_type); + + return -EFAULT; +} + +int edma_host_recv_msg(struct edma_host_s *edma_host, + struct bma_priv_data_s *priv, + struct edma_recv_msg_s **msg) +{ + struct list_head *entry = NULL; + struct edma_recv_msg_s *msg_tmp = NULL; + + if (!edma_host || !priv || !msg) + return -EAGAIN; + + if (list_empty(&priv->recv_msgs)) { + priv->user.cur_recvmsg_nums = 0; + BMA_LOG(DLOG_DEBUG, "recv msgs empty\n"); + return -EAGAIN; + } + + entry = priv->recv_msgs.next; + msg_tmp = list_entry(entry, struct edma_recv_msg_s, link); + list_del(entry); + + if (priv->user.cur_recvmsg_nums > 0) + priv->user.cur_recvmsg_nums--; + + *msg = msg_tmp; + + BMA_LOG(DLOG_DEBUG, "msg->msg_len = %d\n", (int)msg_tmp->msg_len); + + return 0; +} + +static int edma_host_msg_process(struct edma_host_s *edma_host, + struct edma_msg_hdr_s *msg_header) +{ + struct bma_user_s *user_ptr = NULL; + char drv_msg[TYPE_MAX * 2 + 1] = { 0 }; + + if (!edma_host || !msg_header) + return 0; + + if (msg_header->type != TYPE_EDMA_DRIVER) + return -1; + + if (msg_header->sub_type != DEV_OPEN_STATUS_REQ) + return 0; + + list_for_each_entry_rcu(user_ptr, &g_bma_dev->priv_list, link) { + drv_msg[drv_msg[0] * 2 + 1] = user_ptr->type; + drv_msg[drv_msg[0] * 2 + 2] = + edma_host->local_open_status[user_ptr->type]; + BMA_LOG(DLOG_DEBUG, + "send DEV_OPEN_STATUS_ANS index=%d type=%d status=%d\n", + drv_msg[0], drv_msg[drv_msg[0] * 2 + 1], + drv_msg[drv_msg[0] * 2 + 2]); + drv_msg[0]++; + } + + if (drv_msg[0]) { + (void)edma_host_send_driver_msg((void *)drv_msg, + drv_msg[0] * 2 + + 1, + DEV_OPEN_STATUS_ANS); + BMA_LOG(DLOG_DEBUG, + "send DEV_OPEN_STATUS_ANS %d\n", + drv_msg[0]); + } + + return 0; +} + +void edma_host_isr_tasklet(unsigned long data) +{ + int result = 0; + u16 len = 0; + u16 off = 0; + u16 msg_cnt = 0; + struct edma_mbx_hdr_s *recv_mbx_hdr = NULL; + struct edma_host_s *edma_host = (struct edma_host_s *)data; + struct edma_msg_hdr_s *msg_header = NULL; + unsigned char *ptr = NULL; + + if (!edma_host) + return; + + recv_mbx_hdr = (struct edma_mbx_hdr_s *)(edma_host->edma_recv_addr); + msg_header = + (struct edma_msg_hdr_s *)((char *)(edma_host->edma_recv_addr) + + SIZE_OF_MBX_HDR + recv_mbx_hdr->mbxoff); + + off = readw((unsigned char *)edma_host->edma_recv_addr + + EDMA_B2H_INT_FLAG); + len = readw((unsigned char *)edma_host->edma_recv_addr) - off; + + BMA_LOG(DLOG_DEBUG, + " edma_host->edma_recv_addr = %p, len = %d, off = %d, mbxlen = %d\n", + edma_host->edma_recv_addr, len, off, recv_mbx_hdr->mbxlen); + edma_host->statistics.recv_bytes += (recv_mbx_hdr->mbxlen - off); + + if (len == 0) { + writel(0, (void *)(edma_host->edma_recv_addr)); + return; + } + + while (recv_mbx_hdr->mbxlen - off) { + if (len == 0) { + BMA_LOG(DLOG_DEBUG, " receive done\n"); + break; + } + + if (len < (SIZE_OF_MSG_HDR + msg_header->datalen)) { + BMA_LOG(DLOG_ERROR, " len too less, is %d\n", len); + break; + } + + edma_host->statistics.recv_pkgs++; + + if (edma_host_msg_process(edma_host, msg_header) == -1) { + result = edma_host_insert_recv_msg(edma_host, + msg_header); + if (result < 0) + BMA_LOG(DLOG_DEBUG, + "edma_host_insert_recv_msg failed\n"); + } + + BMA_LOG(DLOG_DEBUG, "len = %d\n", len); + BMA_LOG(DLOG_DEBUG, "off = %d\n", off); + len -= (msg_header->datalen + SIZE_OF_MSG_HDR); + BMA_LOG(DLOG_DEBUG, + "msg_header->datalen = %d, SIZE_OF_MSG_HDR=%d\n", + msg_header->datalen, (int)SIZE_OF_MSG_HDR); + off += (msg_header->datalen + SIZE_OF_MSG_HDR); + + msg_cnt++; + + ptr = (unsigned char *)msg_header; + msg_header = (struct edma_msg_hdr_s *)(ptr + + (msg_header->datalen + + SIZE_OF_MSG_HDR)); + + if (msg_cnt > 2) { + recv_mbx_hdr->mbxoff = off; + BMA_LOG(DLOG_DEBUG, "len = %d\n", len); + BMA_LOG(DLOG_DEBUG, "off = %d\n", off); + BMA_LOG(DLOG_DEBUG, "off works\n"); + + tasklet_hi_schedule(&edma_host->tasklet); + + break; + } + + if (!len) { + writel(0, (void *)(edma_host->edma_recv_addr)); + recv_mbx_hdr->mbxoff = 0; + } + } +} + +static int edma_host_dma_interrupt(struct edma_host_s *edma_host) +{ + if (!edma_host) + return 0; + + if (check_status_dmah2b(edma_host)) { + clear_int_dmah2b(edma_host); + + edma_host->h2b_state = H2BSTATE_IDLE; + wake_up_interruptible(&edma_host->wq_dmah2b); + return 0; + } + + if (check_status_dmab2h(edma_host)) { + clear_int_dmab2h(edma_host); + + edma_host->b2h_state = B2HSTATE_IDLE; + wake_up_interruptible(&edma_host->wq_dmab2h); + + return 0; + } + + return -EAGAIN; +} + +irqreturn_t edma_host_irq_handle(struct edma_host_s *edma_host) +{ + if (edma_host) { + (void)edma_host_dma_interrupt(edma_host); + + tasklet_hi_schedule(&edma_host->tasklet); + } + + return IRQ_HANDLED; +} + +struct edma_user_inft_s *edma_host_get_user_inft(u32 type) +{ + if (type >= TYPE_MAX) { + BMA_LOG(DLOG_ERROR, "type error %d\n", type); + return NULL; + } + + return g_user_func[type]; +} + +int edma_host_user_register(u32 type, struct edma_user_inft_s *func) +{ + if (type >= TYPE_MAX) { + BMA_LOG(DLOG_ERROR, "type error %d\n", type); + return -EFAULT; + } + + if (!func) { + BMA_LOG(DLOG_ERROR, "func is NULL\n"); + return -EFAULT; + } + + g_user_func[type] = func; + + return 0; +} + +int edma_host_user_unregister(u32 type) +{ + if (type >= TYPE_MAX) { + BMA_LOG(DLOG_ERROR, "type error %d\n", type); + return -EFAULT; + } + + g_user_func[type] = NULL; + + return 0; +} + +int edma_host_init(struct edma_host_s *edma_host) +{ + int ret = 0; + struct bma_dev_s *bma_dev = NULL; + struct notify_msg *pnm = NULL; + + if (!edma_host) + return -1; + + bma_dev = list_entry(edma_host, struct bma_dev_s, edma_host); + g_bma_dev = bma_dev; + + edma_host->pdev = bma_dev->bma_pci_dev->pdev; + + edma_host->msg_send_buf = kmalloc(HOST_MAX_SEND_MBX_LEN, GFP_KERNEL); + if (!edma_host->msg_send_buf) { + BMA_LOG(DLOG_ERROR, "malloc msg_send_buf failed!"); + ret = -ENOMEM; + return ret; + } + + edma_host->msg_send_write = 0; + /* init send_msg_lock before timer setup */ + spin_lock_init(&edma_host->send_msg_lock); + + tasklet_init(&edma_host->tasklet, + (void (*)(unsigned long))edma_host_isr_tasklet, + (unsigned long)edma_host); + + edma_host->edma_flag = bma_dev->bma_pci_dev->edma_swap_addr; + + edma_host->edma_send_addr = + (void *)((unsigned char *)bma_dev->bma_pci_dev->edma_swap_addr + + HOST_DMA_FLAG_LEN); + memset(edma_host->edma_send_addr, 0, SIZE_OF_MBX_HDR); + + edma_host->edma_recv_addr = + (void *)((unsigned char *)edma_host->edma_send_addr + + HOST_MAX_SEND_MBX_LEN); + + BMA_LOG(DLOG_DEBUG, + "edma_flag = %p, edma_send_addr = %p, edma_recv_addr = %p\n", + edma_host->edma_flag, edma_host->edma_send_addr, + edma_host->edma_recv_addr); + + edma_host->hostrtc_viraddr = bma_dev->bma_pci_dev->hostrtc_viraddr; + + init_waitqueue_head(&edma_host->wq_dmah2b); + init_waitqueue_head(&edma_host->wq_dmab2h); + + spin_lock_init(&edma_host->reg_lock); + + edma_host->h2b_state = H2BSTATE_IDLE; + edma_host->b2h_state = B2HSTATE_IDLE; + +#ifdef EDMA_TIMER + #ifdef HAVE_TIMER_SETUP + timer_setup(&edma_host->timer, edma_host_timeout, 0); + #else + setup_timer(&edma_host->timer, edma_host_timeout, + (unsigned long)edma_host); + #endif + (void)mod_timer(&edma_host->timer, jiffies_64 + TIMER_INTERVAL_CHECK); +#ifdef USE_DMA + #ifdef HAVE_TIMER_SETUP + timer_setup(&edma_host->dma_timer, edma_host_dma_timeout, 0); + + #else + setup_timer(&edma_host->dma_timer, edma_host_dma_timeout, + (unsigned long)edma_host); + #endif + (void)mod_timer(&edma_host->dma_timer, + jiffies_64 + DMA_TIMER_INTERVAL_CHECK); +#endif + +#else + init_completion(&edma_host->msg_ready); + + edma_host->edma_thread = + kthread_run(edma_host_thread, (void *)edma_host, "edma_host_msg"); + + if (IS_ERR(edma_host->edma_thread)) { + BMA_LOG(DLOG_ERROR, "kernel_run edma_host_msg failed\n"); + return PTR_ERR(edma_host->edma_thread); + } +#endif + + #ifdef HAVE_TIMER_SETUP + timer_setup(&edma_host->heartbeat_timer, + edma_host_heartbeat_timer, 0); + #else + setup_timer(&edma_host->heartbeat_timer, + edma_host_heartbeat_timer, + (unsigned long)edma_host); + #endif + (void)mod_timer(&edma_host->heartbeat_timer, + jiffies_64 + HEARTBEAT_TIMER_INTERVAL_CHECK); + + pnm = (struct notify_msg *)edma_host->edma_flag; + if (pnm) + pnm->host_registered = REGISTERED; + + GET_SYS_SECONDS(edma_host->statistics.init_time); + +#ifdef EDMA_TIMER + BMA_LOG(DLOG_DEBUG, "timer ok\n"); +#else + BMA_LOG(DLOG_ERROR, "thread ok\n"); +#endif + return 0; +} + +void edma_host_cleanup(struct edma_host_s *edma_host) +{ + struct bma_dev_s *bma_dev = NULL; + struct notify_msg *pnm = NULL; + + if (!edma_host) + return; + + bma_dev = list_entry(edma_host, struct bma_dev_s, edma_host); + (void)del_timer_sync(&edma_host->heartbeat_timer); + pnm = (struct notify_msg *)edma_host->edma_flag; + + if (pnm) + pnm->host_registered = DEREGISTERED; + + tasklet_kill(&edma_host->tasklet); + + kfree(edma_host->msg_send_buf); + edma_host->msg_send_buf = NULL; +#ifdef EDMA_TIMER + (void)del_timer_sync(&edma_host->timer); +#ifdef USE_DMA + (void)del_timer_sync(&edma_host->dma_timer); +#endif + +#else + kthread_stop(edma_host->edma_thread); + + complete(&edma_host->msg_ready); +#endif +} diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.h b/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.h new file mode 100644 index 0000000000000000000000000000000000000000..9d86b5b0fdd6d0ddc5e379d929751d319c825bec --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.h @@ -0,0 +1,351 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei iBMA driver. + * Copyright (c) 2017, Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _EDMA_HOST_H_ +#define _EDMA_HOST_H_ + +#include "bma_include.h" +#include "../include/bma_ker_intf.h" + +#define EDMA_TIMER + +#ifndef IN +#define IN +#endif + +#ifndef OUT +#define OUT +#endif + +#ifndef UNUSED +#define UNUSED +#endif + +/* vm_flags in vm_area_struct, see mm_types.h. */ +#define VM_NONE 0x00000000 + +#define VM_READ 0x00000001 /* currently active flags */ +#define VM_WRITE 0x00000002 +#define VM_EXEC 0x00000004 +#define VM_SHARED 0x00000008 + +#define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */ +#define VM_MAYWRITE 0x00000020 +#define VM_MAYEXEC 0x00000040 +#define VM_MAYSHARE 0x00000080 + +#define VM_GROWSDOWN 0x00000100 /* general info on the segment */ +/* Page-ranges managed without "struct page", just pure PFN */ +#define VM_PFNMAP 0x00000400 +#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ + +#define VM_LOCKED 0x00002000 +#define VM_IO 0x00004000 /* Memory mapped I/O or similar */ + + /* Used by sys_madvise() */ +#define VM_SEQ_READ 0x00008000 /* App will access data sequentially */ +/* App will not benefit from clustered reads */ +#define VM_RAND_READ 0x00010000 + +#define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */ +#define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ +#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ +#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ +#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ +#define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ +#define VM_ARCH_1 0x01000000 /* Architecture-specific flag */ +#define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */ +/* Can contain "struct page" and pure PFN pages */ +#define VM_MIXEDMAP 0x10000000 + +#define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ + +#if defined(CONFIG_X86) +/* PAT reserves whole VMA at once (x86) */ +#define VM_PAT VM_ARCH_1 +#elif defined(CONFIG_PPC) +#define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ +#elif defined(CONFIG_PARISC) +#define VM_GROWSUP VM_ARCH_1 +#elif defined(CONFIG_METAG) +#define VM_GROWSUP VM_ARCH_1 +#elif defined(CONFIG_IA64) +#define VM_GROWSUP VM_ARCH_1 +#elif !defined(CONFIG_MMU) +#define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */ +#endif + +#ifndef VM_GROWSUP +#define VM_GROWSUP VM_NONE +#endif + +/* Bits set in the VMA until the stack is in its final location */ +#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ) + +#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ +#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS +#endif + +#define VM_READHINTMASK (VM_SEQ_READ | VM_RAND_READ) +#define VM_NORMAL_READ_HINT(v) (!((v)->vm_flags & VM_READHINTMASK)) +#define VM_SEQUENTIAL_READ_HINT(v) ((v)->vm_flags & VM_SEQ_READ) +#define VM_RANDOM_READ_HINT(v) ((v)->vm_flags & VM_RAND_READ) + +#define REG_PCIE1_DMAREAD_ENABLE 0xa18 +#define SHIFT_PCIE1_DMAREAD_ENABLE 0 + +#define REG_PCIE1_DMAWRITE_ENABLE 0x9c4 +#define SHIFT_PCIE1_DMAWRITE_ENABLE 0 + +#define REG_PCIE1_DMAREAD_STATUS 0xa10 +#define SHIFT_PCIE1_DMAREAD_STATUS 0 +#define REG_PCIE1_DMAREADINT_CLEAR 0xa1c +#define SHIFT_PCIE1_DMAREADINT_CLEAR 0 + +#define REG_PCIE1_DMAWRITE_STATUS 0x9bc +#define SHIFT_PCIE1_DMAWRITE_STATUS 0 +#define REG_PCIE1_DMAWRITEINT_CLEAR 0x9c8 +#define SHIFT_PCIE1_DMAWRITEINT_CLEAR 0 + +#define REG_PCIE1_DMA_READ_ENGINE_ENABLE (0x99c) +#define SHIFT_PCIE1_DMA_ENGINE_ENABLE (0) +#define REG_PCIE1_DMA_WRITE_ENGINE_ENABLE (0x97C) + +#define HOSTRTC_INT_OFFSET 0x10 + +#define H2BSTATE_IDLE 0 +#define H2BSTATE_WAITREADY 1 +#define H2BSTATE_WAITDMA 2 +#define H2BSTATE_WAITACK 3 +#define H2BSTATE_ERROR 4 + +#define B2HSTATE_IDLE 0 +#define B2HSTATE_WAITREADY 1 +#define B2HSTATE_WAITRECV 2 +#define B2HSTATE_WAITDMA 3 +#define B2HSTATE_ERROR 4 + +#define PAGE_ORDER 8 +#define EDMA_DMABUF_SIZE (1 << (PAGE_SHIFT + PAGE_ORDER)) + +#define EDMA_DMA_TRANSFER_WAIT_TIMEOUT (10 * HZ) +#define TIMEOUT_WAIT_NOSIGNAL 2 + +#define TIMER_INTERVAL_CHECK (HZ / 10) +#define DMA_TIMER_INTERVAL_CHECK 50 +#define HEARTBEAT_TIMER_INTERVAL_CHECK HZ + +#define EDMA_PCI_MSG_LEN (56 * 1024) + +#define HOST_DMA_FLAG_LEN (64) + +#define HOST_MAX_SEND_MBX_LEN (40 * 1024) +#define BMC_MAX_RCV_MBX_LEN HOST_MAX_SEND_MBX_LEN + +#define HOST_MAX_RCV_MBX_LEN (16 * 1024) +#define BMC_MAX_SEND_MBX_LEN HOST_MAX_RCV_MBX_LEN +#define CDEV_MAX_WRITE_LEN (4 * 1024) + +#define HOST_MAX_MSG_LENGTH 272 + +#define EDMA_MMAP_H2B_DMABUF 0xf1000000 + +#define EDMA_MMAP_B2H_DMABUF 0xf2000000 + +#define EDMA_IOC_MAGIC 'e' + +#define EDMA_H_REGISTER_TYPE _IOW(EDMA_IOC_MAGIC, 100, unsigned long) + +#define EDMA_H_UNREGISTER_TYPE _IOW(EDMA_IOC_MAGIC, 101, unsigned long) + +#define EDMA_H_DMA_START _IOW(EDMA_IOC_MAGIC, 102, unsigned long) + +#define EDMA_H_DMA_TRANSFER _IOW(EDMA_IOC_MAGIC, 103, unsigned long) + +#define EDMA_H_DMA_STOP _IOW(EDMA_IOC_MAGIC, 104, unsigned long) + +#define U64ADDR_H(addr) ((((u64)addr) >> 32) & 0xffffffff) +#define U64ADDR_L(addr) ((addr) & 0xffffffff) + +struct bma_register_dev_type_s { + u32 type; + u32 sub_type; +}; + +struct edma_mbx_hdr_s { + u16 mbxlen; + u16 mbxoff; + u8 reserve[28]; +} __packed; + +#define SIZE_OF_MBX_HDR (sizeof(struct edma_mbx_hdr_s)) + +struct edma_recv_msg_s { + struct list_head link; + u32 msg_len; + unsigned char msg_data[0]; +}; + +struct edma_dma_addr_s { + void *kvaddr; + dma_addr_t dma_addr; + u32 len; +}; + +struct edma_msg_hdr_s { + u32 type; + u32 sub_type; + u8 user_id; + u8 dma_flag; + u8 reserve1[2]; + u32 datalen; + u8 data[0]; +}; + +#define SIZE_OF_MSG_HDR (sizeof(struct edma_msg_hdr_s)) + +#pragma pack(1) + +#define IS_EDMA_B2H_INT(flag) ((flag) & 0x02) +#define CLEAR_EDMA_B2H_INT(flag) ((flag) = (flag) & 0xfffffffd) +#define SET_EDMA_H2B_INT(flag) ((flag) = (flag) | 0x01) +#define EDMA_B2H_INT_FLAG 0x02 + +struct notify_msg { + unsigned int host_registered; + unsigned int host_heartbeat; + unsigned int bmc_registered; + unsigned int bmc_heartbeat; + unsigned int int_flag; + + unsigned int reservrd5; + unsigned int h2b_addr; + unsigned int h2b_size; + unsigned int h2b_rsize; + unsigned int b2h_addr; + unsigned int b2h_size; + unsigned int b2h_rsize; +}; + +#pragma pack() + +struct edma_statistics_s { + unsigned int remote_status; + __kernel_time_t init_time; + unsigned int h2b_int; + unsigned int b2h_int; + unsigned int recv_bytes; + unsigned int send_bytes; + unsigned int send_pkgs; + unsigned int recv_pkgs; + unsigned int failed_count; + unsigned int drop_pkgs; + unsigned int dma_count; + unsigned int lost_count; +}; + +struct edma_host_s { + struct pci_dev *pdev; + + struct tasklet_struct tasklet; + + void __iomem *hostrtc_viraddr; + + void __iomem *edma_flag; + void __iomem *edma_send_addr; + void __iomem *edma_recv_addr; +#ifdef USE_DMA + struct timer_list dma_timer; +#endif + + struct timer_list heartbeat_timer; + +#ifdef EDMA_TIMER + struct timer_list timer; +#else + struct completion msg_ready; /* to sleep thread on */ + struct task_struct *edma_thread; +#endif + /* spinlock for send msg buf */ + spinlock_t send_msg_lock; + unsigned char *msg_send_buf; + unsigned int msg_send_write; + + /* DMA */ + wait_queue_head_t wq_dmah2b; + wait_queue_head_t wq_dmab2h; + + /* spinlock for read pci register */ + spinlock_t reg_lock; + int h2b_state; + int b2h_state; + struct edma_dma_addr_s h2b_addr; + struct edma_dma_addr_s b2h_addr; + + struct proc_dir_entry *proc_edma_dir; + + struct edma_statistics_s statistics; + unsigned char local_open_status[TYPE_MAX]; + unsigned char remote_open_status[TYPE_MAX]; +}; + +struct edma_user_inft_s { + /* register user */ + int (*user_register)(struct bma_priv_data_s *priv); + + /* unregister user */ + void (*user_unregister)(struct bma_priv_data_s *priv); + + /* add msg */ + int (*add_msg)(void *msg, size_t msg_len); +}; + +int is_edma_b2h_int(struct edma_host_s *edma_host); +void edma_int_to_bmc(struct edma_host_s *edma_host); +int edma_host_mmap(struct edma_host_s *edma_hos, struct file *filp, + struct vm_area_struct *vma); +int edma_host_copy_msg(struct edma_host_s *edma_host, void *msg, + size_t msg_len); +int edma_host_add_msg(struct edma_host_s *edma_host, + struct bma_priv_data_s *priv, void *msg, size_t msg_len); +int edma_host_recv_msg(struct edma_host_s *edma_host, + struct bma_priv_data_s *priv, + struct edma_recv_msg_s **msg); +void edma_host_isr_tasklet(unsigned long data); +int edma_host_check_dma_status(enum dma_direction_e dir); +int edma_host_dma_start(struct edma_host_s *edma_host, + struct bma_priv_data_s *priv); +int edma_host_dma_transfer(struct edma_host_s *edma_host, + struct bma_priv_data_s *priv, + struct bma_dma_transfer_s *dma_transfer); +int edma_host_dma_stop(struct edma_host_s *edma_host, + struct bma_priv_data_s *priv); +irqreturn_t edma_host_irq_handle(struct edma_host_s *edma_host); +struct edma_user_inft_s *edma_host_get_user_inft(u32 type); +int edma_host_user_register(u32 type, struct edma_user_inft_s *func); +int edma_host_user_unregister(u32 type); +int edma_host_init(struct edma_host_s *edma_host); +void edma_host_cleanup(struct edma_host_s *edma_host); +int edma_host_send_driver_msg(const void *msg, size_t msg_len, int subtype); +void edma_host_reset_dma(struct edma_host_s *edma_host, int dir); +void clear_int_dmah2b(struct edma_host_s *edma_host); +void clear_int_dmab2h(struct edma_host_s *edma_host); + +enum EDMA_STATUS { + DEREGISTERED = 0, + REGISTERED = 1, + LOST, +}; +#endif diff --git a/drivers/net/ethernet/huawei/bma/include/bma_ker_intf.h b/drivers/net/ethernet/huawei/bma/include/bma_ker_intf.h new file mode 100644 index 0000000000000000000000000000000000000000..d1df99b0c9fd5046ad5014477b8f55ab2f04a4bc --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/include/bma_ker_intf.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei iBMA driver. + * Copyright (c) 2017, Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _BMA_KER_INTF_H_ +#define _BMA_KER_INTF_H_ + +typedef long __kernel_time_t; +#define BAD_FUNC_ADDR(x) ((0xFFFFFFFF == (x)) || (0 == (x))) + +enum { + /* 0 -127 msg */ + TYPE_LOGIC_PARTITION = 0, + TYPE_UPGRADE = 1, + TYPE_CDEV = 2, + TYPE_VETH = 0x40, + TYPE_MAX = 128, + + TYPE_KBOX = 129, + TYPE_EDMA_DRIVER = 130, + TYPE_UNKNOWN = 0xff, +}; + +enum dma_direction_e { + BMC_TO_HOST = 0, + HOST_TO_BMC = 1, +}; + +enum dma_type_e { + DMA_NOT_LIST = 0, + DMA_LIST = 1, +}; + +enum intr_mod { + INTR_DISABLE = 0, + INTR_ENABLE = 1, +}; + +struct bma_dma_addr_s { + dma_addr_t dma_addr; + u32 dma_data_len; +}; + +struct dma_transfer_s { + struct bma_dma_addr_s host_addr; + struct bma_dma_addr_s bmc_addr; +}; + +struct dmalist_transfer_s { + dma_addr_t dma_addr; +}; + +union transfer_u { + struct dma_transfer_s nolist; + struct dmalist_transfer_s list; +}; + +struct bma_dma_transfer_s { + enum dma_type_e type; + enum dma_direction_e dir; + union transfer_u transfer; +}; + +int bma_intf_register_int_notifier(struct notifier_block *nb); +void bma_intf_unregister_int_notifier(struct notifier_block *nb); +int bma_intf_register_type(u32 type, u32 sub_type, enum intr_mod support_int, + void **handle); +int bma_intf_unregister_type(void **handle); +int bma_intf_check_dma_status(enum dma_direction_e dir); +int bma_intf_start_dma(void *handle, struct bma_dma_transfer_s *dma_transfer); +int bma_intf_int_to_bmc(void *handle); +void bma_intf_set_open_status(void *handle, int s); +int bma_intf_is_link_ok(void); +void bma_intf_reset_dma(enum dma_direction_e dir); +void bma_intf_clear_dma_int(enum dma_direction_e dir); + +int bma_cdev_recv_msg(void *handle, char __user *data, size_t count); +int bma_cdev_add_msg(void *handle, const char __user *msg, size_t msg_len); + +unsigned int bma_cdev_check_recv(void *handle); +void *bma_cdev_get_wait_queue(void *handle); +int bma_intf_check_edma_supported(void); +#endif diff --git a/drivers/net/ethernet/huawei/bma/kbox_drv/Makefile b/drivers/net/ethernet/huawei/bma/kbox_drv/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..7d908d5b7f4bef4c6e25cc315ab3ac1e82813f24 --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/kbox_drv/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_BMA) += host_kbox_drv.o +host_kbox_drv-y := kbox_main.o kbox_ram_drive.o kbox_ram_image.o kbox_ram_op.o kbox_printk.o kbox_dump.o kbox_hook.o kbox_panic.o +ifdef CONFIG_X86 +host_kbox_drv-y += kbox_mce.o +endif \ No newline at end of file diff --git a/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_dump.c b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_dump.c new file mode 100644 index 0000000000000000000000000000000000000000..1f3a73ca9d1f5c4892da5ad8ac33cb81129b8b26 --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_dump.c @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei iBMA driver. + * Copyright (c) 2017, Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include /* system_utsname */ +#include /* struct rtc_time */ +#include "kbox_include.h" +#include "kbox_main.h" +#include "kbox_printk.h" +#include "kbox_ram_image.h" +#include "kbox_ram_op.h" +#include "kbox_dump.h" +#include "kbox_panic.h" + +#ifdef CONFIG_X86 +#include "kbox_mce.h" +#endif + +#define THREAD_TMP_BUF_SIZE 256 + +static DEFINE_SPINLOCK(g_dump_lock); + +static const char g_day_in_month[] = { + 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 +}; + +#define LEAPS_THRU_END_OF(y) ((y) / 4 - (y) / 100 + (y) / 400) +#define LEAP_YEAR(year) \ + ((!((year) % 4) && ((year) % 100)) || !((year) % 400)) +#define MONTH_DAYS(month, year) \ + (g_day_in_month[(month)] + (int)(LEAP_YEAR(year) && (month == 1))) + +static void kbox_show_kernel_version(void) +{ + (void)kbox_dump_painc_info + ("\nOS : %s,\nRelease : %s,\nVersion : %s,\n", + init_uts_ns.name.sysname, + init_uts_ns.name.release, + init_uts_ns.name.version); + (void)kbox_dump_painc_info + ("Machine : %s,\nNodename : %s\n", + init_uts_ns.name.machine, + init_uts_ns.name.nodename); +} + +static void kbox_show_version(void) +{ + (void)kbox_dump_painc_info("\nKBOX_VERSION : %s\n", + KBOX_VERSION); +} + +static void kbox_show_time_stamps(void) +{ + struct rtc_time rtc_time_val = { }; + struct timespec64 uptime; + + ktime_get_coarse_real_ts64(&uptime); + rtc_time64_to_tm(uptime.tv_sec, &rtc_time_val); + + (void)kbox_dump_painc_info + ("Current time : %04d-%02d-%02d %02d:%02d:%02d\n", + rtc_time_val.tm_year + 1900, rtc_time_val.tm_mon + 1, + rtc_time_val.tm_mday, rtc_time_val.tm_hour, + rtc_time_val.tm_min, rtc_time_val.tm_sec); +} + +void kbox_dump_event(enum kbox_error_type_e type, unsigned long event, + const char *msg) +{ + if (!spin_trylock(&g_dump_lock)) + return; + + (void)kbox_dump_painc_info("\n====kbox begin dumping...====\n"); + + switch (type) { +#ifdef CONFIG_X86 + case KBOX_MCE_EVENT: + + kbox_handle_mce_dump(msg); + + break; +#endif + + case KBOX_OPPS_EVENT: + + break; + case KBOX_PANIC_EVENT: + if (kbox_handle_panic_dump(msg) == KBOX_FALSE) + goto end; + + break; + default: + break; + } + + kbox_show_kernel_version(); + + kbox_show_version(); + + kbox_show_time_stamps(); + + (void)kbox_dump_painc_info("\n====kbox end dump====\n"); + + kbox_output_syslog_info(); + kbox_output_printk_info(); + +end: + spin_unlock(&g_dump_lock); +} diff --git a/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_dump.h b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_dump.h new file mode 100644 index 0000000000000000000000000000000000000000..cba31377fbf33b3ae3bb86ea6cbc211d8848679b --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_dump.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei iBMA driver. + * Copyright (c) 2017, Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _KBOX_DUMP_H_ +#define _KBOX_DUMP_H_ + +#define DUMPSTATE_MCE_RESET 1 +#define DUMPSTATE_OPPS_RESET 2 +#define DUMPSTATE_PANIC_RESET 3 + +enum kbox_error_type_e { + KBOX_MCE_EVENT = 1, + KBOX_OPPS_EVENT, + KBOX_PANIC_EVENT +}; + +int kbox_dump_thread_info(const char *fmt, ...); +void kbox_dump_event(enum kbox_error_type_e type, unsigned long event, + const char *msg); + +#endif diff --git a/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_hook.c b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_hook.c new file mode 100644 index 0000000000000000000000000000000000000000..b2acdf24188bb2a07c1abfe4275c319bd02e9859 --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_hook.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei iBMA driver. + * Copyright (c) 2017, Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include "kbox_include.h" +#include "kbox_dump.h" +#include "kbox_hook.h" + +int panic_notify(struct notifier_block *this, + unsigned long event, void *msg); + +static int die_notify(struct notifier_block *self, + unsigned long val, void *data); + +static struct notifier_block g_panic_nb = { + .notifier_call = panic_notify, + .priority = 100, +}; + +static struct notifier_block g_die_nb = { + .notifier_call = die_notify, +}; + +int panic_notify(struct notifier_block *pthis, unsigned long event, void *msg) +{ + UNUSED(pthis); + UNUSED(event); + + kbox_dump_event(KBOX_PANIC_EVENT, DUMPSTATE_PANIC_RESET, + (const char *)msg); + + return NOTIFY_OK; +} + +int die_notify(struct notifier_block *self, unsigned long val, void *data) +{ + struct kbox_die_args *args = (struct kbox_die_args *)data; + + if (!args) + return NOTIFY_OK; + + switch (val) { + case 1: + break; + case 5: + if (strcmp(args->str, "nmi") == 0) + return NOTIFY_OK; +#ifdef CONFIG_X86 + kbox_dump_event(KBOX_MCE_EVENT, DUMPSTATE_MCE_RESET, args->str); +#endif + break; + + default: + break; + } + + return NOTIFY_OK; +} + +int kbox_register_hook(void) +{ + int ret = 0; + + ret = atomic_notifier_chain_register(&panic_notifier_list, &g_panic_nb); + if (ret) + KBOX_MSG("atomic_notifier_chain_register g_panic_nb failed!\n"); + + ret = register_die_notifier(&g_die_nb); + if (ret) + KBOX_MSG("register_die_notifier g_die_nb failed!\n"); + + return ret; +} + +void kbox_unregister_hook(void) +{ + int ret = 0; + + ret = + atomic_notifier_chain_unregister(&panic_notifier_list, &g_panic_nb); + if (ret < 0) { + KBOX_MSG + ("atomic_notifier_chain_unregister g_panic_nb failed!\n"); + } + + ret = unregister_die_notifier(&g_die_nb); + if (ret < 0) + KBOX_MSG("unregister_die_notifier g_die_nb failed!\n"); +} diff --git a/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_hook.h b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_hook.h new file mode 100644 index 0000000000000000000000000000000000000000..00b3deb510b73ae555a969eddde6630e80563c25 --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_hook.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei iBMA driver. + * Copyright (c) 2017, Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _KBOX_PANIC_HOOK_H_ +#define _KBOX_PANIC_HOOK_H_ + +struct kbox_die_args { + struct pt_regs *regs; + const char *str; + long err; + int trapnr; + int signr; +}; + +int register_die_notifier(struct notifier_block *nb); +int unregister_die_notifier(struct notifier_block *nb); + +int kbox_register_hook(void); +void kbox_unregister_hook(void); + +#endif diff --git a/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_include.h b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_include.h new file mode 100644 index 0000000000000000000000000000000000000000..0d82ee6f7c831851f4c08d5724f2e562398e9816 --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_include.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei iBMA driver. + * Copyright (c) 2017, Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _KBOX_INCLUDE_H_ +#define _KBOX_INCLUDE_H_ + +#include +#include +#include + +#ifdef DRV_VERSION +#define KBOX_VERSION MICRO_TO_STR(DRV_VERSION) +#else +#define KBOX_VERSION "0.3.10" +#endif + +#define UNUSED(x) (x = x) +#define KBOX_FALSE (-1) +#define KBOX_TRUE 0 + +#ifdef KBOX_DEBUG +#define KBOX_MSG(fmt, args...) \ + netdev_notice(0, "kbox: %s(), %d, " fmt, __func__, __LINE__, ## args) +#else +#define KBOX_MSG(fmt, args...) +#endif + +#endif diff --git a/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_main.c b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_main.c new file mode 100644 index 0000000000000000000000000000000000000000..374ce49d570e2338fa3d4a8b3513aa4502208534 --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_main.c @@ -0,0 +1,168 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei iBMA driver. + * Copyright (c) 2017, Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include /* for rdmsr and MSR_IA32_MCG_STATUS */ +#include /* everything... */ +#include /* for fput */ +#include +#include /* copy_*_user */ +#include +#include "kbox_include.h" +#include "kbox_panic.h" +#include "kbox_main.h" +#include "kbox_printk.h" +#include "kbox_ram_image.h" +#include "kbox_ram_op.h" +#include "kbox_dump.h" +#include "kbox_hook.h" +#include "kbox_ram_drive.h" + +#ifdef CONFIG_X86 +#include +#include "kbox_mce.h" +#endif + +#define KBOX_LOADED_FILE ("/proc/kbox") + +#define KBOX_ROOT_ENTRY_NAME ("kbox") + +static int kbox_is_loaded(void) +{ + struct file *fp = NULL; + mm_segment_t old_fs; + + old_fs = get_fs(); /* save old flag */ + set_fs(KERNEL_DS); /* mark data from kernel space */ + + fp = filp_open(KBOX_LOADED_FILE, O_RDONLY, 0); + + if (IS_ERR(fp)) { + set_fs(old_fs); + return KBOX_FALSE; + } + + (void)filp_close(fp, NULL); + + set_fs(old_fs); /* restore old flag */ + + return KBOX_TRUE; +} + +static int kbox_printk_proc_init(void) +{ + struct proc_dir_entry *kbox_entry = NULL; + + if (kbox_is_loaded() != KBOX_TRUE) { + kbox_entry = proc_mkdir(KBOX_ROOT_ENTRY_NAME, NULL); + if (!kbox_entry) { + KBOX_MSG("can not create %s entry\n", + KBOX_ROOT_ENTRY_NAME); + return -ENOMEM; + } + } + + return KBOX_TRUE; +} + +int __init kbox_init(void) +{ + int ret = KBOX_TRUE; + int kbox_proc_exist = 0; + + if (!kbox_get_base_phy_addr()) + return -ENXIO; + + ret = kbox_super_block_init(); + if (ret) { + KBOX_MSG("kbox_super_block_init failed!\n"); + return ret; + } + + if (kbox_is_loaded() == KBOX_TRUE) + kbox_proc_exist = 1; + + ret = kbox_printk_init(kbox_proc_exist); + if (ret) + KBOX_MSG("kbox_printk_init failed!\n"); + + ret = kbox_panic_init(); + if (ret) { + KBOX_MSG("kbox_panic_init failed!\n"); + goto fail1; + } + + ret = kbox_register_hook(); + if (ret) { + KBOX_MSG("kbox_register_hook failed!\n"); + goto fail2; + } + +#ifdef CONFIG_X86 + (void)kbox_mce_init(); +#endif + ret = kbox_read_super_block(); + if (ret) { + KBOX_MSG("update super block failed!\n"); + goto fail3; + } + + if (kbox_printk_proc_init() != 0) { + KBOX_MSG("kbox_printk_proc_init failed!\n"); + goto fail4; + } + + ret = kbox_drive_init(); + if (ret) { + KBOX_MSG("kbox_drive_init failed!\n"); + goto fail5; + } + + return KBOX_TRUE; + +fail5: +fail4: +fail3: +#ifdef CONFIG_X86 + kbox_mce_exit(); +#endif + kbox_unregister_hook(); +fail2: + kbox_panic_exit(); +fail1: + kbox_printk_exit(); + + return ret; +} + +void __exit kbox_cleanup(void) +{ + kbox_drive_cleanup(); +#ifdef CONFIG_X86 + kbox_mce_exit(); +#endif + kbox_unregister_hook(); + kbox_panic_exit(); + kbox_printk_exit(); +} + +MODULE_AUTHOR("HUAWEI TECHNOLOGIES CO., LTD."); +MODULE_DESCRIPTION("HUAWEI KBOX DRIVER"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(KBOX_VERSION); +#ifndef _lint +module_init(kbox_init); +module_exit(kbox_cleanup); +#endif diff --git a/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_main.h b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_main.h new file mode 100644 index 0000000000000000000000000000000000000000..2ae02b73652979a5e67023367b990619920feb0d --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_main.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei iBMA driver. + * Copyright (c) 2017, Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _KBOX_MAIN_H_ +#define _KBOX_MAIN_H_ + +#include "../edma_drv/bma_include.h" +int kbox_init(void); +void kbox_cleanup(void); + +#endif diff --git a/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_mce.c b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_mce.c new file mode 100644 index 0000000000000000000000000000000000000000..e9bd931b826e78b37bb2e135ec794bf12549ccc4 --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_mce.c @@ -0,0 +1,264 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei iBMA driver. + * Copyright (c) 2017, Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "kbox_include.h" +#include "kbox_mce.h" +#include "kbox_dump.h" +#include "kbox_printk.h" +#include "kbox_panic.h" + +enum context { + KBOX_IN_KERNEL = 1, KBOX_IN_USER = 2 +}; + +enum ser { + KBOX_SER_REQUIRED = 1, KBOX_NO_SER = 2 +}; + +enum severity_level { + KBOX_MCE_NO_SEVERITY, + KBOX_MCE_KEEP_SEVERITY, + KBOX_MCE_SOME_SEVERITY, + KBOX_MCE_AO_SEVERITY, + KBOX_MCE_UC_SEVERITY, + KBOX_MCE_AR_SEVERITY, + KBOX_MCE_PANIC_SEVERITY, +}; + +static struct severity { + u64 kbox_mask; + u64 kbox_result; + unsigned char kbox_sev; + unsigned char kbox_mcgmask; + unsigned char kbox_mcgres; + unsigned char kbox_ser; + unsigned char kbox_context; + unsigned char kbox_covered; + char *kbox_msg; +} kbox_severities[] = { +#define KBOX_KERNEL .kbox_context = KBOX_IN_KERNEL +#define KBOX_USER .kbox_context = KBOX_IN_USER +#define KBOX_SER .kbox_ser = KBOX_SER_REQUIRED +#define KBOX_NOSER .kbox_ser = KBOX_NO_SER +#define KBOX_SEV(s) .kbox_sev = KBOX_MCE_ ## s ## _SEVERITY +#define KBOX_BITCLR(x, s, m, r...) \ + { .kbox_mask = x, .kbox_result = 0, KBOX_SEV(s), .kbox_msg = m, ## r } +#define KBOX_BITSET(x, s, m, r...) \ + { .kbox_mask = x, .kbox_result = x, KBOX_SEV(s), .kbox_msg = m, ## r } +#define KBOX_MCGMASK(x, res, s, m, r...) \ + { .kbox_mcgmask = x, .kbox_mcgres = res, KBOX_SEV(s), \ + .kbox_msg = m, ## r } +#define KBOX_MASK(x, y, s, m, r...) \ + { .kbox_mask = x, .kbox_result = y, KBOX_SEV(s), .kbox_msg = m, ## r } +#define KBOX_MCI_UC_S (MCI_STATUS_UC | MCI_STATUS_S) +#define KBOX_MCI_UC_SAR (MCI_STATUS_UC | MCI_STATUS_S | MCI_STATUS_AR) +#define KBOX_MCACOD 0xffff + +KBOX_BITCLR(MCI_STATUS_VAL, NO, "Invalid"), +KBOX_BITCLR(MCI_STATUS_EN, NO, "Not enabled"), +KBOX_BITSET(MCI_STATUS_PCC, PANIC, "Processor context corrupt"), + +KBOX_MCGMASK(MCG_STATUS_MCIP, 0, PANIC, "MCIP not set in MCA handler"), + +KBOX_MCGMASK(MCG_STATUS_RIPV | MCG_STATUS_EIPV, 0, PANIC, + "Neither restart nor error IP"), +KBOX_MCGMASK(MCG_STATUS_RIPV, 0, PANIC, "In kernel and no restart IP", + KBOX_KERNEL), +KBOX_BITCLR(MCI_STATUS_UC, KEEP, "Corrected error", KBOX_NOSER), +KBOX_MASK(MCI_STATUS_OVER | MCI_STATUS_UC | MCI_STATUS_EN, MCI_STATUS_UC, SOME, + "Spurious not enabled", KBOX_SER), + +KBOX_MASK(KBOX_MCI_UC_SAR, MCI_STATUS_UC, KEEP, + "Uncorrected no action required", KBOX_SER), +KBOX_MASK(MCI_STATUS_OVER | KBOX_MCI_UC_SAR, MCI_STATUS_UC | MCI_STATUS_AR, + PANIC, "Illegal combination (UCNA with AR=1)", KBOX_SER), +KBOX_MASK(MCI_STATUS_S, 0, KEEP, "Non signalled machine check", KBOX_SER), + +KBOX_MASK(MCI_STATUS_OVER | KBOX_MCI_UC_SAR, MCI_STATUS_OVER | KBOX_MCI_UC_SAR, + PANIC, "Action required with lost events", KBOX_SER), +KBOX_MASK(MCI_STATUS_OVER | KBOX_MCI_UC_SAR | KBOX_MCACOD, KBOX_MCI_UC_SAR, + PANIC, "Action required; unknown MCACOD", KBOX_SER), + +KBOX_MASK(KBOX_MCI_UC_SAR | MCI_STATUS_OVER | 0xfff0, KBOX_MCI_UC_S | 0xc0, + AO, "Action optional: memory scrubbing error", KBOX_SER), +KBOX_MASK(KBOX_MCI_UC_SAR | MCI_STATUS_OVER | KBOX_MCACOD, + KBOX_MCI_UC_S | 0x17a, AO, + "Action optional: last level cache writeback error", KBOX_SER), + +KBOX_MASK(MCI_STATUS_OVER | KBOX_MCI_UC_SAR, KBOX_MCI_UC_S, SOME, + "Action optional unknown MCACOD", KBOX_SER), +KBOX_MASK(MCI_STATUS_OVER | KBOX_MCI_UC_SAR, KBOX_MCI_UC_S | MCI_STATUS_OVER, + SOME, "Action optional with lost events", KBOX_SER), +KBOX_BITSET(MCI_STATUS_UC | MCI_STATUS_OVER, PANIC, "Overflowed uncorrected"), +KBOX_BITSET(MCI_STATUS_UC, UC, "Uncorrected"), +KBOX_BITSET(0, SOME, "No match") +}; + +static unsigned int g_kbox_nr_mce_banks; +static unsigned int g_kbox_mce_ser; +static atomic_t g_mce_dump_state = ATOMIC_INIT(0); + +static int kbox_mce_severity(u64 mcgstatus, u64 status) +{ + struct severity *s; + + for (s = kbox_severities;; s++) { + if ((status & s->kbox_mask) != s->kbox_result) + continue; + + if ((mcgstatus & s->kbox_mcgmask) != s->kbox_mcgres) + continue; + + if (s->kbox_ser == KBOX_SER_REQUIRED && !g_kbox_mce_ser) + continue; + + if (s->kbox_ser == KBOX_NO_SER && g_kbox_mce_ser) + continue; + + break; + } + + return s->kbox_sev; +} + +static u64 kbox_mce_rdmsrl(u32 ulmsr) +{ + u64 ullv = 0; + + if (rdmsrl_safe(ulmsr, &ullv)) { + (void)kbox_dump_painc_info("mce: Unable to read msr %d!\n", + ulmsr); + ullv = 0; + } + + return ullv; +} + +static int kbox_intel_machine_check(void) +{ + unsigned int idx = 0; + u64 mcgstatus = 0; + int worst = 0; + + mcgstatus = kbox_mce_rdmsrl(MSR_IA32_MCG_STATUS); + + (void) + kbox_dump_painc_info + ("CPU %d: Machine Check Exception MCG STATUS: 0x%016llx\n", + smp_processor_id(), mcgstatus); + + if (!(mcgstatus & MCG_STATUS_RIPV)) + (void)kbox_dump_painc_info("Unable to continue\n"); + + for (idx = 0; idx < g_kbox_nr_mce_banks; idx++) { + u64 status = 0; + u64 misc = 0; + u64 addr = 0; + int lseverity = 0; + + status = kbox_mce_rdmsrl(MSR_IA32_MCx_STATUS(idx)); + + (void)kbox_dump_painc_info("Bank %d STATUS: 0x%016llx\n", idx, + status); + + if (0 == (status & MCI_STATUS_VAL)) + continue; + + lseverity = kbox_mce_severity(mcgstatus, status); + if (lseverity == KBOX_MCE_KEEP_SEVERITY || + lseverity == KBOX_MCE_NO_SEVERITY) + continue; + + (void)kbox_dump_painc_info("severity = %d\n", lseverity); + + if (status & MCI_STATUS_MISCV) { + misc = kbox_mce_rdmsrl(MSR_IA32_MCx_MISC(idx)); + (void)kbox_dump_painc_info("misc = 0x%016llx\n", misc); + } + + if (status & MCI_STATUS_ADDRV) { + addr = kbox_mce_rdmsrl(MSR_IA32_MCx_ADDR(idx)); + (void)kbox_dump_painc_info("addr = 0x%016llx\n", addr); + } + + (void)kbox_dump_painc_info("\n"); + + if (lseverity > worst) + worst = lseverity; + } + + if (worst >= KBOX_MCE_UC_SEVERITY) + return KBOX_FALSE; + + (void)kbox_dump_painc_info("Attempting to continue.\n"); + + return KBOX_TRUE; +} + +int kbox_handle_mce_dump(const char *msg) +{ + int mce_recoverable = KBOX_FALSE; + + atomic_read(&g_mce_dump_state); + + mce_recoverable = kbox_intel_machine_check(); + if (mce_recoverable != KBOX_TRUE) { + static atomic_t mce_entry_tmp; + int flag = atomic_add_return(1, &mce_entry_tmp); + + if (flag != 1) + return KBOX_FALSE; + } + + atomic_set(&g_mce_dump_state, DUMPSTATE_MCE_RESET); + + if (msg) { + (void) + kbox_dump_painc_info + ("-------[ System may reset by %s! ]-------\n\n", msg); + } + + return KBOX_TRUE; +} + +int kbox_mce_init(void) +{ + u64 cap = 0; + + cap = kbox_mce_rdmsrl(MSR_IA32_MCG_CAP); + g_kbox_nr_mce_banks = cap & MCG_BANKCNT_MASK; + + if (cap & MCG_SER_P) + g_kbox_mce_ser = 1; + + KBOX_MSG("get nr_mce_banks:%d, g_kbox_mce_ser = %d, cap = 0x%016llx\n", + g_kbox_nr_mce_banks, g_kbox_mce_ser, cap); + + return KBOX_TRUE; +} + +void kbox_mce_exit(void) +{ + g_kbox_nr_mce_banks = 0; + g_kbox_mce_ser = 0; +} diff --git a/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_mce.h b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_mce.h new file mode 100644 index 0000000000000000000000000000000000000000..00d3b787c140ea7f40015867c2da511a62208c8d --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_mce.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei iBMA driver. + * Copyright (c) 2017, Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _KBOX_MCE_H_ +#define _KBOX_MCE_H_ + +int kbox_handle_mce_dump(const char *msg); +int kbox_mce_init(void); +void kbox_mce_exit(void); + +#endif diff --git a/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_panic.c b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_panic.c new file mode 100644 index 0000000000000000000000000000000000000000..2b142ae9bff6cada37863093a846ea603e3faf14 --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_panic.c @@ -0,0 +1,187 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei iBMA driver. + * Copyright (c) 2017, Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include "kbox_include.h" +#include "kbox_panic.h" +#include "kbox_ram_op.h" + +#ifdef CONFIG_X86 +#include +#endif + +#define PANIC_TMP_BUF_SIZE 256 + +static int g_panic_init_ok = KBOX_FALSE; + +static char *g_panic_info_buf_tmp; +static char *g_panic_info_buf; + +static unsigned int g_panic_info_start; + +static unsigned int g_panic_info_end; + +static unsigned int g_panic_info_len; + +static DEFINE_SPINLOCK(g_panic_buf_lock); + +static void kbox_emit_syslog_char(const char c) +{ + if (unlikely(!g_panic_info_buf)) + return; + + *(g_panic_info_buf + (g_panic_info_end % SLOT_LENGTH)) = c; + g_panic_info_end++; + + if (g_panic_info_end > SLOT_LENGTH) + g_panic_info_start++; + + if (g_panic_info_len < SLOT_LENGTH) + g_panic_info_len++; +} + +static int kbox_duplicate_syslog_info(const char *syslog_buf, + unsigned int buf_len) +{ + unsigned int idx = 0; + unsigned long flags = 0; + + if (!syslog_buf) + return 0; + + spin_lock_irqsave(&g_panic_buf_lock, flags); + + for (idx = 0; idx < buf_len; idx++) + kbox_emit_syslog_char(*syslog_buf++); + + spin_unlock_irqrestore(&g_panic_buf_lock, flags); + + return buf_len; +} + +int kbox_dump_painc_info(const char *fmt, ...) +{ + va_list args; + int num = 0; + char tmp_buf[PANIC_TMP_BUF_SIZE] = { }; + + va_start(args, fmt); + + num = vsnprintf(tmp_buf, sizeof(tmp_buf) - 1, fmt, args); + if (num >= 0) + (void)kbox_duplicate_syslog_info(tmp_buf, num); + + va_end(args); + + return num; +} + +void kbox_output_syslog_info(void) +{ + unsigned int start_tmp = 0; + unsigned int end_tmp = 0; + unsigned int len_tmp = 0; + unsigned long flags = 0; + + if (unlikely + (!g_panic_info_buf || !g_panic_info_buf_tmp)) + return; + + spin_lock_irqsave(&g_panic_buf_lock, flags); + if (g_panic_info_len == 0) { + spin_unlock_irqrestore(&g_panic_buf_lock, flags); + return; + } + + start_tmp = (g_panic_info_start % SLOT_LENGTH); + end_tmp = ((g_panic_info_end - 1) % SLOT_LENGTH); + len_tmp = g_panic_info_len; + + if (start_tmp > end_tmp) { + memcpy(g_panic_info_buf_tmp, + (g_panic_info_buf + start_tmp), + len_tmp - start_tmp); + memcpy((g_panic_info_buf_tmp + len_tmp - start_tmp), + g_panic_info_buf, + end_tmp + 1); + } else { + memcpy(g_panic_info_buf_tmp, + (char *)(g_panic_info_buf + start_tmp), + len_tmp); + } + + spin_unlock_irqrestore(&g_panic_buf_lock, flags); + + (void)kbox_write_panic_info(g_panic_info_buf_tmp, len_tmp); +} + +int kbox_panic_init(void) +{ + int ret = KBOX_TRUE; + + g_panic_info_buf = kmalloc(SLOT_LENGTH, GFP_KERNEL); + if (!g_panic_info_buf) { + KBOX_MSG("kmalloc g_panic_info_buf fail!\n"); + ret = -ENOMEM; + goto fail; + } + + memset(g_panic_info_buf, 0, SLOT_LENGTH); + + g_panic_info_buf_tmp = kmalloc(SLOT_LENGTH, GFP_KERNEL); + if (!g_panic_info_buf_tmp) { + KBOX_MSG("kmalloc g_panic_info_buf_tmp fail!\n"); + ret = -ENOMEM; + goto fail; + } + + memset(g_panic_info_buf_tmp, 0, SLOT_LENGTH); + + g_panic_init_ok = KBOX_TRUE; + + return ret; +fail: + + kfree(g_panic_info_buf); + g_panic_info_buf = NULL; + + kfree(g_panic_info_buf_tmp); + g_panic_info_buf_tmp = NULL; + + return ret; +} + +void kbox_panic_exit(void) +{ + if (g_panic_init_ok != KBOX_TRUE) + return; + + kfree(g_panic_info_buf); + g_panic_info_buf = NULL; + + kfree(g_panic_info_buf_tmp); + g_panic_info_buf_tmp = NULL; +} + +int kbox_handle_panic_dump(const char *msg) +{ + if (msg) + (void)kbox_dump_painc_info("panic string: %s\n", msg); + + return KBOX_TRUE; +} diff --git a/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_panic.h b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_panic.h new file mode 100644 index 0000000000000000000000000000000000000000..5715b66c06590b0a586eb08c55638f4b5f2df4b5 --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_panic.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei iBMA driver. + * Copyright (c) 2017, Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _KBOX_PANIC_H_ +#define _KBOX_PANIC_H_ + +int kbox_handle_panic_dump(const char *msg); +void kbox_output_syslog_info(void); +int kbox_dump_painc_info(const char *fmt, ...); +int kbox_panic_init(void); +void kbox_panic_exit(void); + +#endif diff --git a/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_printk.c b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_printk.c new file mode 100644 index 0000000000000000000000000000000000000000..630a1e16ea24b72f5fe9ded6a1d9260331ee90aa --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_printk.c @@ -0,0 +1,363 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei iBMA driver. + * Copyright (c) 2017, Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include /* struct console */ +#include +#include +#include "kbox_include.h" +#include "kbox_main.h" +#include "kbox_printk.h" +#include "kbox_ram_image.h" +#include "kbox_ram_op.h" + +#define TMP_BUF_SIZE 256 + +static int g_printk_init_ok = KBOX_FALSE; +static char *g_printk_info_buf; +static char *g_printk_info_buf_tmp; +static struct printk_ctrl_block_tmp_s g_printk_ctrl_block_tmp = { }; + +static DEFINE_SPINLOCK(g_printk_buf_lock); + +static void kbox_printk_info_write(struct console *console, + const char *printk_buf, + unsigned int buf_len); + +static struct console g_printk_console = { + .name = "k_prtk", + .flags = CON_ENABLED | CON_PRINTBUFFER, + .write = kbox_printk_info_write, +}; + +static int kbox_printk_format_is_order(struct printk_info_ctrl_block_s * + printk_ctrl_blk_first, + struct printk_info_ctrl_block_s * + printk_ctrl_blk_second) +{ + if (!printk_ctrl_blk_first || !printk_ctrl_blk_second) + return KBOX_FALSE; + + if (!memcmp(printk_ctrl_blk_first->flag, PRINTK_CURR_FLAG, + PRINTK_FLAG_LEN) && + !memcmp(printk_ctrl_blk_second->flag, PRINTK_LAST_FLAG, + PRINTK_FLAG_LEN)) { + return KBOX_TRUE; + } + + return KBOX_FALSE; +} + +static void kbox_printk_format(struct printk_info_ctrl_block_s *printk_ctrl_blk, + const unsigned int len, const char *flag) +{ + if (!printk_ctrl_blk || !flag) + return; + + memset(printk_ctrl_blk, 0, len); + memcpy(printk_ctrl_blk->flag, flag, PRINTK_FLAG_LEN); +} + +static void kbox_printk_init_info_first + (struct image_super_block_s *kbox_super_block) +{ + KBOX_MSG("\n"); + if (kbox_printk_format_is_order(kbox_super_block->printk_ctrl_blk, + kbox_super_block->printk_ctrl_blk + + 1) == KBOX_TRUE) { + memcpy(kbox_super_block->printk_ctrl_blk[0].flag, + PRINTK_LAST_FLAG, PRINTK_FLAG_LEN); + memcpy(kbox_super_block->printk_ctrl_blk[1].flag, + PRINTK_CURR_FLAG, PRINTK_FLAG_LEN); + kbox_super_block->printk_ctrl_blk[1].len = 0; + g_printk_ctrl_block_tmp.printk_region = 1; + g_printk_ctrl_block_tmp.section = KBOX_SECTION_PRINTK2; + (void)kbox_clear_region(KBOX_SECTION_PRINTK2); + } else if (kbox_printk_format_is_order + (kbox_super_block->printk_ctrl_blk + 1, + kbox_super_block->printk_ctrl_blk) == KBOX_TRUE) { + memcpy(kbox_super_block->printk_ctrl_blk[1].flag, + PRINTK_LAST_FLAG, + PRINTK_FLAG_LEN); + memcpy(kbox_super_block->printk_ctrl_blk[0].flag, + PRINTK_CURR_FLAG, + PRINTK_FLAG_LEN); + + kbox_super_block->printk_ctrl_blk[0].len = 0; + g_printk_ctrl_block_tmp.printk_region = 0; + g_printk_ctrl_block_tmp.section = KBOX_SECTION_PRINTK1; + (void)kbox_clear_region(KBOX_SECTION_PRINTK1); + } else { + kbox_printk_format(kbox_super_block->printk_ctrl_blk, + sizeof(struct printk_info_ctrl_block_s), + PRINTK_CURR_FLAG); + kbox_printk_format(kbox_super_block->printk_ctrl_blk + 1, + sizeof(struct printk_info_ctrl_block_s), + PRINTK_LAST_FLAG); + g_printk_ctrl_block_tmp.printk_region = 0; + g_printk_ctrl_block_tmp.section = KBOX_SECTION_PRINTK1; + (void)kbox_clear_region(KBOX_SECTION_PRINTK1); + (void)kbox_clear_region(KBOX_SECTION_PRINTK2); + } + + g_printk_ctrl_block_tmp.start = 0; + g_printk_ctrl_block_tmp.end = 0; + g_printk_ctrl_block_tmp.valid_len = 0; +} + +static void kbox_printk_init_info_not_first + (struct image_super_block_s *kbox_super_block) +{ + KBOX_MSG("\n"); + if (KBOX_TRUE == + kbox_printk_format_is_order(kbox_super_block->printk_ctrl_blk, + kbox_super_block->printk_ctrl_blk + + 1)) { + g_printk_ctrl_block_tmp.printk_region = 0; + g_printk_ctrl_block_tmp.section = KBOX_SECTION_PRINTK1; + + } else if (KBOX_TRUE == + kbox_printk_format_is_order + (kbox_super_block->printk_ctrl_blk + 1, + kbox_super_block->printk_ctrl_blk)) { + g_printk_ctrl_block_tmp.printk_region = 1; + g_printk_ctrl_block_tmp.section = KBOX_SECTION_PRINTK2; + + } else { + kbox_printk_format(kbox_super_block->printk_ctrl_blk, + sizeof(struct printk_info_ctrl_block_s), + PRINTK_CURR_FLAG); + kbox_printk_format(kbox_super_block->printk_ctrl_blk + 1, + sizeof(struct printk_info_ctrl_block_s), + PRINTK_LAST_FLAG); + g_printk_ctrl_block_tmp.printk_region = 0; + g_printk_ctrl_block_tmp.section = KBOX_SECTION_PRINTK1; + (void)kbox_clear_region(KBOX_SECTION_PRINTK1); + (void)kbox_clear_region(KBOX_SECTION_PRINTK2); + } + + g_printk_ctrl_block_tmp.start = 0; +} + +static int kbox_printk_init_info(int kbox_proc_exist) +{ + struct image_super_block_s kbox_super_block = { }; + unsigned int read_len = 0; + unsigned int write_len = 0; + + read_len = + kbox_read_from_ram(SECTION_KERNEL_OFFSET, + (unsigned int)sizeof(struct image_super_block_s), + (char *)&kbox_super_block, KBOX_SECTION_KERNEL); + if (read_len != sizeof(struct image_super_block_s)) { + KBOX_MSG("fail to get superblock data!\n"); + return KBOX_FALSE; + } + + if (kbox_proc_exist) { + kbox_printk_init_info_not_first(&kbox_super_block); + if (KBOX_TRUE != + kbox_read_printk_info(g_printk_info_buf, + &g_printk_ctrl_block_tmp)) { + g_printk_ctrl_block_tmp.end = 0; + g_printk_ctrl_block_tmp.valid_len = 0; + } + } else { + kbox_printk_init_info_first(&kbox_super_block); + } + + kbox_super_block.checksum = 0; + kbox_super_block.checksum = + ~((unsigned char) + kbox_checksum((char *)&kbox_super_block, + (unsigned int)sizeof(kbox_super_block))) + 1; + write_len = + kbox_write_to_ram(SECTION_KERNEL_OFFSET, + (unsigned int)sizeof(struct image_super_block_s), + (char *)&kbox_super_block, KBOX_SECTION_KERNEL); + if (write_len <= 0) { + KBOX_MSG("fail to write superblock data!\n"); + return KBOX_FALSE; + } + + return KBOX_TRUE; +} + +void kbox_output_printk_info(void) +{ + unsigned int start_tmp = 0; + unsigned int end_tmp = 0; + unsigned int len_tmp = 0; + unsigned long flags = 0; + + if (unlikely(!g_printk_info_buf || !g_printk_info_buf_tmp)) + return; + + if (g_printk_init_ok != KBOX_TRUE) + return; + + spin_lock_irqsave(&g_printk_buf_lock, flags); + if (g_printk_ctrl_block_tmp.valid_len == 0) { + spin_unlock_irqrestore(&g_printk_buf_lock, flags); + return; + } + + start_tmp = (g_printk_ctrl_block_tmp.start % SECTION_PRINTK_LEN); + end_tmp = ((g_printk_ctrl_block_tmp.end - 1) % SECTION_PRINTK_LEN); + len_tmp = g_printk_ctrl_block_tmp.valid_len; + + if (start_tmp > end_tmp) { + memcpy(g_printk_info_buf_tmp, + g_printk_info_buf + start_tmp, + len_tmp - start_tmp); + memcpy(g_printk_info_buf_tmp + len_tmp - start_tmp, + g_printk_info_buf, + end_tmp + 1); + } else { + memcpy(g_printk_info_buf_tmp, + g_printk_info_buf + start_tmp, + len_tmp); + } + + spin_unlock_irqrestore(&g_printk_buf_lock, flags); + + (void)kbox_write_printk_info(g_printk_info_buf_tmp, + &g_printk_ctrl_block_tmp); +} + +static void kbox_emit_printk_char(const char c) +{ + if (unlikely(!g_printk_info_buf)) + return; + + *(g_printk_info_buf + + (g_printk_ctrl_block_tmp.end % SECTION_PRINTK_LEN)) = c; + g_printk_ctrl_block_tmp.end++; + + if (g_printk_ctrl_block_tmp.end > SECTION_PRINTK_LEN) + g_printk_ctrl_block_tmp.start++; + + if (g_printk_ctrl_block_tmp.end < SECTION_PRINTK_LEN) + g_printk_ctrl_block_tmp.valid_len++; +} + +static int kbox_duplicate_printk_info(const char *printk_buf, + unsigned int buf_len) +{ + unsigned int idx = 0; + unsigned long flags = 0; + + spin_lock_irqsave(&g_printk_buf_lock, flags); + for (idx = 0; idx < buf_len; idx++) + kbox_emit_printk_char(*printk_buf++); + + spin_unlock_irqrestore(&g_printk_buf_lock, flags); + + return buf_len; +} + +int kbox_dump_printk_info(const char *fmt, ...) +{ + va_list args; + int num = 0; + char tmp_buf[TMP_BUF_SIZE] = { }; + + if (g_printk_init_ok != KBOX_TRUE) + return 0; + + va_start(args, fmt); + num = vsnprintf(tmp_buf, sizeof(tmp_buf) - 1, fmt, args); + if (num >= 0) + (void)kbox_duplicate_printk_info(tmp_buf, num); + + va_end(args); + + return num; +} + +static void kbox_printk_info_write(struct console *pconsole, + const char *printk_buf, unsigned int buf_len) +{ + UNUSED(pconsole); + + if (unlikely(!printk_buf)) + return; + + (void)kbox_duplicate_printk_info(printk_buf, buf_len); +} + +int kbox_printk_init(int kbox_proc_exist) +{ + int ret = KBOX_TRUE; + + g_printk_info_buf = kmalloc(SECTION_PRINTK_LEN, + GFP_KERNEL); + if (!g_printk_info_buf) { + KBOX_MSG("kmalloc g_printk_info_buf fail!\n"); + ret = -ENOMEM; + goto fail; + } + + memset(g_printk_info_buf, 0, SECTION_PRINTK_LEN); + + g_printk_info_buf_tmp = kmalloc(SECTION_PRINTK_LEN, + GFP_KERNEL); + if (!g_printk_info_buf_tmp) { + KBOX_MSG("kmalloc g_printk_info_buf_tmp fail!\n"); + ret = -ENOMEM; + goto fail; + } + + memset(g_printk_info_buf_tmp, 0, SECTION_PRINTK_LEN); + + ret = kbox_printk_init_info(kbox_proc_exist); + if (ret != KBOX_TRUE) { + KBOX_MSG("kbox_printk_init_info failed!\n"); + goto fail; + } + + register_console(&g_printk_console); + + g_printk_init_ok = KBOX_TRUE; + + return ret; +fail: + + kfree(g_printk_info_buf); + g_printk_info_buf = NULL; + + kfree(g_printk_info_buf_tmp); + g_printk_info_buf_tmp = NULL; + + return ret; +} + +void kbox_printk_exit(void) +{ + int ret = 0; + + if (g_printk_init_ok != KBOX_TRUE) + return; + + kfree(g_printk_info_buf); + g_printk_info_buf = NULL; + + kfree(g_printk_info_buf_tmp); + g_printk_info_buf_tmp = NULL; + + ret = unregister_console(&g_printk_console); + if (ret) + KBOX_MSG("unregister_console failed!\n"); +} diff --git a/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_printk.h b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_printk.h new file mode 100644 index 0000000000000000000000000000000000000000..cece825626a84f8189894fe194c681899a4c2f43 --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_printk.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei iBMA driver. + * Copyright (c) 2017, Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _KBOX_PRINTK_H_ +#define _KBOX_PRINTK_H_ +#include "kbox_ram_image.h" + +struct printk_ctrl_block_tmp_s { + int printk_region; + enum kbox_section_e section; + unsigned int start; + unsigned int end; + unsigned int valid_len;/* valid length of printk section */ +}; + +int kbox_printk_init(int kbox_proc_exist); +void kbox_output_printk_info(void); +int kbox_dump_printk_info(const char *fmt, ...); +void kbox_printk_exit(void); + +#endif diff --git a/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_ram_drive.c b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_ram_drive.c new file mode 100644 index 0000000000000000000000000000000000000000..829e2a498843639de51f4ef4ae4c57529dfc7141 --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_ram_drive.c @@ -0,0 +1,188 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei iBMA driver. + * Copyright (c) 2017, Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include /* everything... */ +#include +#include +#include +#include +#include "kbox_include.h" +#include "kbox_ram_drive.h" +#include "kbox_main.h" +#include "kbox_ram_image.h" +#include "kbox_ram_op.h" + +#define KBOX_DEVICE_NAME "kbox" +#define KBOX_DEVICE_MINOR 255 + +static struct kbox_dev_s *g_kbox_dev; +static ssize_t kbox_read(struct file *filp, char __user *data, size_t count, + loff_t *ppos); +static ssize_t kbox_write(struct file *filp, const char __user *data, + size_t count, loff_t *ppos); + +static long kbox_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); +static int kbox_mmap(struct file *filp, struct vm_area_struct *vma); +static int kbox_open(struct inode *inode, struct file *filp); +static int kbox_release(struct inode *inode, struct file *filp); + +const struct file_operations kbox_fops = { + .owner = THIS_MODULE, + .read = kbox_read, + .write = kbox_write, + .unlocked_ioctl = kbox_ioctl, + .mmap = kbox_mmap, + .open = kbox_open, + .release = kbox_release, +}; + +static struct miscdevice kbox_device = { + KBOX_DEVICE_MINOR, + KBOX_DEVICE_NAME, + &kbox_fops, +}; + +static ssize_t kbox_read(struct file *filp, char __user *data, size_t count, + loff_t *ppos) +{ + int read_len = 0; + + if (!filp || !data || !ppos) { + KBOX_MSG("input NULL point!\n"); + return -EFAULT; + } + + read_len = kbox_read_op((long long)(*ppos), + count, + data, + KBOX_SECTION_USER); + if (read_len < 0) + return -EFAULT; + + *ppos += read_len; + + return read_len; +} + +static ssize_t kbox_write(struct file *filp, const char __user *data, + size_t count, loff_t *ppos) +{ + int write_len = 0; + + if (!filp || !data || !ppos) { + KBOX_MSG("input NULL point!\n"); + return -EFAULT; + } + + write_len = kbox_write_op((long long)(*ppos), + count, data, KBOX_SECTION_USER); + if (write_len < 0) + return -EFAULT; + + *ppos += write_len; + + return write_len; +} + +static long kbox_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + UNUSED(filp); + + if (kbox_ioctl_detail(cmd, arg) < 0) + return -ENOTTY; + + return 0; +} + +static int kbox_mmap(struct file *filp, struct vm_area_struct *vma) +{ + if (!filp || !vma) { + KBOX_MSG("input NULL point!\n"); + return -EFAULT; + } + + if (kbox_mmap_ram(filp, vma, KBOX_SECTION_USER) < 0) + return -EFAULT; + + return 0; +} + +static int kbox_open(struct inode *pinode, struct file *filp) +{ + UNUSED(pinode); + + if ((g_kbox_dev) && (!atomic_dec_and_test(&g_kbox_dev->au_count))) { + atomic_inc(&g_kbox_dev->au_count); + KBOX_MSG("EBUSY\n"); + return -EBUSY; + } + + filp->private_data = (void *)g_kbox_dev; + + return 0; +} + +int kbox_release(struct inode *pinode, struct file *filp) +{ + struct kbox_dev_s *kbox_dev = (struct kbox_dev_s *)filp->private_data; + + UNUSED(pinode); + + KBOX_MSG("\n"); + + if (kbox_dev) + atomic_inc(&kbox_dev->au_count); + + return 0; +} + +int kbox_drive_init(void) +{ + int ret = 0; + + KBOX_MSG("\n"); + + g_kbox_dev = + kmalloc(sizeof(struct kbox_dev_s), GFP_KERNEL); + if (!g_kbox_dev) + return -ENOMEM; + + ret = misc_register(&kbox_device); + if (ret) + goto fail; + + atomic_set(&g_kbox_dev->au_count, 1); + + KBOX_MSG("ok!\n"); + + return ret; + +fail: + kfree(g_kbox_dev); + g_kbox_dev = NULL; + + return ret; +} + +void kbox_drive_cleanup(void) +{ + if (!g_kbox_dev) + return; + + misc_deregister(&kbox_device); + + kfree(g_kbox_dev); + g_kbox_dev = NULL; +} diff --git a/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_ram_drive.h b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_ram_drive.h new file mode 100644 index 0000000000000000000000000000000000000000..52707c4b82c54b9cc48d8774c9663c8010b439b9 --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_ram_drive.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei iBMA driver. + * Copyright (c) 2017, Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _KBOX_RAM_DRIVE_H_ +#define _KBOX_RAM_DRIVE_H_ + +#include +#include + +struct kbox_dev_s { + atomic_t au_count; + + struct kbox_pci_dev_s *kbox_pci_dev; +}; + +int kbox_drive_init(void); +void kbox_drive_cleanup(void); + +#endif diff --git a/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_ram_image.c b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_ram_image.c new file mode 100644 index 0000000000000000000000000000000000000000..f57083261983a16a47f0eee745e5297bec73be18 --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_ram_image.c @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei iBMA driver. + * Copyright (c) 2017, Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "kbox_include.h" +#include "kbox_main.h" +#include "kbox_ram_image.h" + +void __iomem *kbox_get_section_addr(enum kbox_section_e kbox_section) +{ + void __iomem *kbox_addr = kbox_get_base_addr(); + unsigned long kbox_len = kbox_get_io_len(); + + if (!kbox_addr || kbox_len == 0) { + KBOX_MSG("get kbox_addr or kbox_len failed!\n"); + return NULL; + } + + switch (kbox_section) { + case KBOX_SECTION_KERNEL: + return kbox_addr; + + case KBOX_SECTION_PANIC: + return kbox_addr + SECTION_KERNEL_LEN; + + case KBOX_SECTION_THREAD: + return kbox_addr + SECTION_KERNEL_LEN + SECTION_PANIC_LEN; + + case KBOX_SECTION_PRINTK1: + return kbox_addr + (kbox_len - (2 * SECTION_PRINTK_LEN) - + SECTION_USER_LEN); + + case KBOX_SECTION_PRINTK2: + return kbox_addr + (kbox_len - SECTION_PRINTK_LEN - + SECTION_USER_LEN); + + case KBOX_SECTION_USER: + return kbox_addr + (kbox_len - SECTION_USER_LEN); + + case KBOX_SECTION_ALL: + return kbox_addr; + + default: + KBOX_MSG("input kbox_section error!\n"); + return NULL; + } +} + +unsigned long kbox_get_section_len(enum kbox_section_e kbox_section) +{ + unsigned long kbox_len = kbox_get_io_len(); + + if (kbox_len == 0) { + KBOX_MSG("get kbox_len failed!\n"); + return 0; + } + + switch (kbox_section) { + case KBOX_SECTION_KERNEL: + return SECTION_KERNEL_LEN; + + case KBOX_SECTION_PANIC: + return SECTION_PANIC_LEN; + + case KBOX_SECTION_THREAD: + return (kbox_len - (2 * SECTION_PRINTK_LEN) - + SECTION_USER_LEN - SECTION_KERNEL_LEN - + SECTION_PANIC_LEN); + + case KBOX_SECTION_PRINTK1: + case KBOX_SECTION_PRINTK2: + return SECTION_PRINTK_LEN; + + case KBOX_SECTION_USER: + return SECTION_USER_LEN; + + case KBOX_SECTION_ALL: + return kbox_len; + + default: + KBOX_MSG("input kbox_section error!\n"); + return 0; + } +} + +unsigned long kbox_get_section_phy_addr(enum kbox_section_e kbox_section) +{ + unsigned long kbox_phy_addr = kbox_get_base_phy_addr(); + unsigned long kbox_len = kbox_get_io_len(); + + if (kbox_phy_addr == 0 || kbox_len == 0) { + KBOX_MSG("get kbox_phy_addr or kbox_len failed!\n"); + return 0; + } + + switch (kbox_section) { + case KBOX_SECTION_KERNEL: + return kbox_phy_addr; + + case KBOX_SECTION_PANIC: + return kbox_phy_addr + SECTION_KERNEL_LEN; + + case KBOX_SECTION_THREAD: + return kbox_phy_addr + SECTION_KERNEL_LEN + SECTION_PANIC_LEN; + + case KBOX_SECTION_PRINTK1: + return kbox_phy_addr + (kbox_len - (2 * SECTION_PRINTK_LEN) - + SECTION_USER_LEN); + + case KBOX_SECTION_PRINTK2: + return kbox_phy_addr + (kbox_len - SECTION_PRINTK_LEN - + SECTION_USER_LEN); + + case KBOX_SECTION_USER: + return kbox_phy_addr + (kbox_len - SECTION_USER_LEN); + + case KBOX_SECTION_ALL: + return kbox_phy_addr; + + default: + KBOX_MSG("input kbox_section error!\n"); + return 0; + } +} diff --git a/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_ram_image.h b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_ram_image.h new file mode 100644 index 0000000000000000000000000000000000000000..d1b01bd9ea115d6aa4f6ae0cab446cef047bf627 --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_ram_image.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei iBMA driver. + * Copyright (c) 2017, Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _KBOX_RAM_IMAGE_H_ +#define _KBOX_RAM_IMAGE_H_ + +enum kbox_section_e { + KBOX_SECTION_KERNEL = 1, + KBOX_SECTION_PANIC = 2, + KBOX_SECTION_THREAD = 3, + KBOX_SECTION_PRINTK1 = 4, + KBOX_SECTION_PRINTK2 = 5, + KBOX_SECTION_USER = 6, + KBOX_SECTION_ALL = 7 +}; + +#define KBOX_BIG_ENDIAN (0x2B) +#define KBOX_LITTLE_ENDIAN (0xB2) +#define IMAGE_VER (0x0001) +#define IMAGE_MAGIC (0xB202C086) +#define VALID_IMAGE(x) (IMAGE_MAGIC == (x)->magic_flag) +#define SLOT_NUM (8) +#define SLOT_LENGTH (16 * 1024) +#define MAX_RECORD_NO (0xFF) +#define MAX_USE_NUMS (0xFF) + +#define PRINTK_NUM (2) +#define PRINTK_CURR_FLAG ("curr") +#define PRINTK_LAST_FLAG ("last") +#define PRINTK_FLAG_LEN (4) + +struct panic_ctrl_block_s { + unsigned char use_nums; + unsigned char number; + unsigned short len; + unsigned int time; +}; + +struct thread_info_ctrl_block_s { + unsigned int thread_info_len; +}; + +struct printk_info_ctrl_block_s { + unsigned char flag[PRINTK_FLAG_LEN]; + unsigned int len; +}; + +struct image_super_block_s { + unsigned char byte_order; + unsigned char checksum; + unsigned short version; + unsigned int magic_flag; + unsigned int panic_nums; + struct panic_ctrl_block_s panic_ctrl_blk[SLOT_NUM]; + struct printk_info_ctrl_block_s printk_ctrl_blk[PRINTK_NUM]; + struct thread_info_ctrl_block_s thread_ctrl_blk; +}; + +#define SECTION_KERNEL_LEN (sizeof(struct image_super_block_s)) +#define SECTION_PANIC_LEN (8 * SLOT_LENGTH) +#define SECTION_PRINTK_LEN (512 * 1024) +#define SECTION_USER_LEN (2 * 1024 * 1024) + +#define SECTION_KERNEL_OFFSET (0) +#define SECTION_PANIC_OFFSET SECTION_KERNEL_LEN +#define SECTION_THREAD_OFFSET (SECTION_KERNEL_LEN + SECTION_PANIC_LEN) + +void __iomem *kbox_get_section_addr(enum kbox_section_e kbox_section); +unsigned long kbox_get_section_len(enum kbox_section_e kbox_section); +unsigned long kbox_get_section_phy_addr(enum kbox_section_e kbox_section); + +#endif diff --git a/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_ram_op.c b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_ram_op.c new file mode 100644 index 0000000000000000000000000000000000000000..9f6dfe55e3fb18162217b3d65e14f5a73f7ddc94 --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_ram_op.c @@ -0,0 +1,986 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei iBMA driver. + * Copyright (c) 2017, Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include /* copy_*_user */ +#include /* udelay */ +#include +#include "kbox_include.h" +#include "kbox_main.h" +#include "kbox_ram_image.h" +#include "kbox_ram_op.h" + +#ifndef VM_RESERVED +#define VM_RESERVED 0x00080000 +#endif + +static DEFINE_SPINLOCK(g_kbox_super_block_lock); +static DEFINE_SEMAPHORE(user_sem); + +union char_int_transfer_u { + int data_int; + char data_char[KBOX_RW_UNIT]; +}; + +static struct image_super_block_s g_kbox_super_block = { }; + +void kbox_write_to_pci(void __iomem *dest, const void *src, int len, + unsigned long offset) +{ + union char_int_transfer_u transfer = { }; + int idx = 0; + int j = 0; + int four_byte_len = 0; + int left_len = 0; + char *src_temp = (char *)src; + char *dest_temp = (char *)dest; + int first_write_num = 0; + + if ((offset % KBOX_RW_UNIT) != 0) { + transfer.data_int = + *(int *)(dest_temp + offset - (offset % KBOX_RW_UNIT)); + + rmb();/* memory barriers. */ + first_write_num = + ((len + (offset % KBOX_RW_UNIT)) > + KBOX_RW_UNIT) ? (KBOX_RW_UNIT - + (offset % KBOX_RW_UNIT)) : len; + for (idx = (int)(offset % KBOX_RW_UNIT); + idx < (int)(first_write_num + (offset % KBOX_RW_UNIT)); + idx++) { + if (!src_temp) + return; + + transfer.data_char[idx] = *src_temp; + src_temp++; + } + *(int *)(dest_temp + offset - (offset % KBOX_RW_UNIT)) = + transfer.data_int; + wmb();/* memory barriers. */ + len -= first_write_num; + offset += first_write_num; + } + + four_byte_len = (len / KBOX_RW_UNIT); + left_len = (len % KBOX_RW_UNIT); + for (idx = 0; idx < four_byte_len; idx++) { + for (j = 0; j < KBOX_RW_UNIT; j++) { + if (!src_temp) + return; + + transfer.data_char[j] = *src_temp; + src_temp++; + } + *(int *)(dest_temp + offset) = transfer.data_int; + wmb();/* memory barriers. */ + offset += KBOX_RW_UNIT; + } + + if (left_len != 0) { + transfer.data_int = *(int *)(dest_temp + offset); + rmb();/* memory barriers. */ + for (idx = 0; idx < left_len; idx++) { + if (!src_temp) + return; + + transfer.data_char[idx] = *src_temp; + src_temp++; + } + *(int *)(dest_temp + offset) = transfer.data_int; + wmb();/* memory barriers. */ + } + + udelay(1); +} + +void kbox_read_from_pci(void *dest, void __iomem *src, int len, + unsigned long offset) +{ + union char_int_transfer_u transfer = { }; + int idx = 0; + int j = 0; + int four_byte_len = 0; + int left_len = 0; + char *dest_temp = (char *)dest; + char *src_temp = (char *)src; + int first_read_num = 0; + + if ((offset % KBOX_RW_UNIT) != 0) { + transfer.data_int = + *(int *)(src_temp + offset - (offset % KBOX_RW_UNIT)); + first_read_num = + ((len + (offset % KBOX_RW_UNIT)) > + KBOX_RW_UNIT) ? (KBOX_RW_UNIT - + (offset % KBOX_RW_UNIT)) : len; + rmb();/* memory barriers. */ + for (idx = (int)(offset % KBOX_RW_UNIT); + idx < (int)(first_read_num + (offset % KBOX_RW_UNIT)); + idx++) { + if (!dest_temp) + return; + + *dest_temp = transfer.data_char[idx]; + dest_temp++; + } + len -= first_read_num; + offset += first_read_num; + } + + four_byte_len = (len / KBOX_RW_UNIT); + left_len = (len % KBOX_RW_UNIT); + for (idx = 0; idx < four_byte_len; idx++) { + transfer.data_int = *(int *)(src_temp + offset); + rmb();/* memory barriers. */ + for (j = 0; j < KBOX_RW_UNIT; j++) { + if (!dest_temp) + return; + + *dest_temp = transfer.data_char[j]; + dest_temp++; + } + offset += KBOX_RW_UNIT; + } + + if (left_len != 0) { + transfer.data_int = *(int *)(src_temp + offset); + rmb();/* memory barriers. */ + for (idx = 0; idx < left_len; idx++) { + if (!dest_temp) + return; + + *dest_temp = transfer.data_char[idx]; + dest_temp++; + } + } +} + +void kbox_memset_pci(void __iomem *dest, const char set_byte, int len, + unsigned long offset) +{ + union char_int_transfer_u transfer = { }; + int idx = 0; + int four_byte_len = 0; + int left_len = 0; + char *dest_temp = (char *)dest; + int first_memset_num = 0; + + if ((offset % KBOX_RW_UNIT) != 0) { + transfer.data_int = + *(int *)(dest_temp + offset - (offset % KBOX_RW_UNIT)); + rmb();/* memory barriers. */ + first_memset_num = + ((len + (offset % KBOX_RW_UNIT)) > + KBOX_RW_UNIT) ? (KBOX_RW_UNIT - + (offset % KBOX_RW_UNIT)) : len; + for (idx = (int)(offset % KBOX_RW_UNIT); + idx < (int)(first_memset_num + (offset % KBOX_RW_UNIT)); + idx++) { + transfer.data_char[idx] = set_byte; + } + *(int *)(dest_temp + offset - (offset % KBOX_RW_UNIT)) = + transfer.data_int; + wmb();/* memory barriers. */ + len -= first_memset_num; + offset += first_memset_num; + } + + four_byte_len = (len / KBOX_RW_UNIT); + left_len = (len % KBOX_RW_UNIT); + for (idx = 0; idx < KBOX_RW_UNIT; idx++) + transfer.data_char[idx] = set_byte; + + for (idx = 0; idx < four_byte_len; idx++) { + *(int *)(dest_temp + offset) = transfer.data_int; + wmb();/* memory barriers. */ + offset += KBOX_RW_UNIT; + } + + if (left_len != 0) { + transfer.data_int = *(int *)(dest_temp + offset); + rmb();/* memory barriers. */ + for (idx = 0; idx < left_len; idx++) + transfer.data_char[idx] = set_byte; + + *(int *)(dest_temp + offset) = transfer.data_int; + wmb();/* memory barriers. */ + } + + udelay(1); +} + +int kbox_read_from_ram(unsigned long offset, unsigned int count, char *data, + enum kbox_section_e section) +{ + unsigned int read_len_total = count; + unsigned long offset_temp = offset; + void __iomem *kbox_section_addr = kbox_get_section_addr(section); + unsigned long kbox_section_len = kbox_get_section_len(section); + unsigned int read_len_real = 0; + + if (!data) { + KBOX_MSG("input NULL point!\n"); + return -EFAULT; + } + + if (!kbox_section_addr || kbox_section_len == 0) { + KBOX_MSG("get kbox_section_addr or kbox_section_len failed!\n"); + return -EFAULT; + } + + if (offset >= kbox_section_len) { + KBOX_MSG("input offset is error!\n"); + return -EFAULT; + } + + if ((offset + count) > kbox_section_len) + read_len_total = (unsigned int)(kbox_section_len - offset); + + while (1) { + unsigned int read_bytes = 0; + + if (read_len_real >= count) + break; + + read_bytes = + (read_len_total > + TEMP_BUF_SIZE) ? TEMP_BUF_SIZE : read_len_total; + + kbox_read_from_pci(data, kbox_section_addr, read_bytes, + offset_temp); + + read_len_total -= read_bytes; + read_len_real += read_bytes; + data += read_bytes; + offset_temp += read_bytes; + } + + return (int)read_len_real; +} + +int kbox_write_to_ram(unsigned long offset, unsigned int count, + const char *data, enum kbox_section_e section) +{ + unsigned int write_len_total = count; + unsigned long offset_temp = offset; + void __iomem *kbox_section_addr = kbox_get_section_addr(section); + unsigned long kbox_section_len = kbox_get_section_len(section); + unsigned int write_len_real = 0; + + if (!data) { + KBOX_MSG("input NULL point!\n"); + return -EFAULT; + } + + if (!kbox_section_addr || kbox_section_len == 0) { + KBOX_MSG("get kbox_section_addr or kbox_section_len failed!\n"); + return -EFAULT; + } + + if (offset >= kbox_section_len) { + KBOX_MSG("input offset is error!\n"); + return -EFAULT; + } + + if ((offset + count) > kbox_section_len) + write_len_total = (unsigned int)(kbox_section_len - offset); + + KBOX_MSG("struct image_super_block_s = %x\n", count); + while (1) { + unsigned int write_bytes = 0; + + if (write_len_real >= count) { + KBOX_MSG("write_len_real = %x\n", write_len_real); + break; + } + KBOX_MSG("write_len_total = %x\n", write_len_total); + + write_bytes = + (write_len_total > + TEMP_BUF_SIZE) ? TEMP_BUF_SIZE : write_len_total; + KBOX_MSG("write_bytes = %x\n", write_bytes); + + kbox_write_to_pci(kbox_section_addr, data, write_bytes, + offset_temp); + + write_len_total -= write_bytes; + write_len_real += write_bytes; + data += write_bytes; + offset_temp += write_bytes; + } + + return (int)write_len_real; +} + +int kbox_memset_ram(unsigned long offset, unsigned int count, + const char set_byte, enum kbox_section_e section) +{ + unsigned int memset_len = count; + void __iomem *kbox_section_addr = kbox_get_section_addr(section); + unsigned long kbox_section_len = kbox_get_section_len(section); + + if (!kbox_section_addr || kbox_section_len == 0) { + KBOX_MSG("get kbox_section_addr or kbox_section_len failed!\n"); + return -EFAULT; + } + + if (offset >= kbox_section_len) { + KBOX_MSG("input offset is error!\n"); + return -EFAULT; + } + + if ((offset + count) > kbox_section_len) + memset_len = (unsigned int)(kbox_section_len - offset); + + kbox_memset_pci(kbox_section_addr, set_byte, memset_len, offset); + + return KBOX_TRUE; +} + +int kbox_read_op(long long offset, unsigned int count, char __user *data, + enum kbox_section_e section) +{ + unsigned int read_bytes = 0; + unsigned int read_len = 0; + unsigned int left_len = count; + char *user_buf = data; + char *temp_buf_char = NULL; + unsigned long offset_tmp = offset; + + if (!data) { + KBOX_MSG("input NULL point!\n"); + return -EFAULT; + } + + if (down_interruptible(&user_sem) != 0) + return KBOX_FALSE; + + temp_buf_char = kmalloc(TEMP_BUF_DATA_SIZE, GFP_KERNEL); + if (!temp_buf_char) { + KBOX_MSG("kmalloc temp_buf_char fail!\n"); + up(&user_sem); + return -ENOMEM; + } + + memset((void *)temp_buf_char, 0, TEMP_BUF_DATA_SIZE); + + while (1) { + if (read_len >= count) + break; + + read_bytes = + (left_len > + TEMP_BUF_DATA_SIZE) ? TEMP_BUF_DATA_SIZE : left_len; + + if (kbox_read_from_ram + (offset_tmp, read_bytes, temp_buf_char, section) < 0) { + KBOX_MSG("kbox_read_from_ram fail!\n"); + break; + } + + if (copy_to_user(user_buf, temp_buf_char, read_bytes)) { + KBOX_MSG("copy_to_user fail!\n"); + break; + } + + left_len -= read_bytes; + read_len += read_bytes; + user_buf += read_bytes; + + offset_tmp += read_bytes; + memset((void *)temp_buf_char, 0, TEMP_BUF_DATA_SIZE); + + msleep(20); + } + + kfree(temp_buf_char); + + up(&user_sem); + + return (int)read_len; +} + +int kbox_write_op(long long offset, unsigned int count, + const char __user *data, enum kbox_section_e section) +{ + unsigned int write_len = 0; + unsigned int left_len = count; + const char *user_buf = data; + char *temp_buf_char = NULL; + unsigned long offset_tmp = offset; + + if (!data) { + KBOX_MSG("input NULL point!\n"); + return -EFAULT; + } + + if (down_interruptible(&user_sem) != 0) + return KBOX_FALSE; + + temp_buf_char = kmalloc(TEMP_BUF_DATA_SIZE, GFP_KERNEL); + if (!temp_buf_char) { + KBOX_MSG("kmalloc temp_buf_char fail!\n"); + up(&user_sem); + return -ENOMEM; + } + + memset((void *)temp_buf_char, 0, TEMP_BUF_DATA_SIZE); + + while (1) { + unsigned int write_bytes = 0; + + if (write_len >= count) + break; + + write_bytes = + (left_len > + TEMP_BUF_DATA_SIZE) ? TEMP_BUF_DATA_SIZE : left_len; + + if (copy_from_user(temp_buf_char, user_buf, write_bytes)) { + KBOX_MSG("copy_from_user fail!\n"); + break; + } + + if (kbox_write_to_ram + (offset_tmp, write_bytes, temp_buf_char, section) < 0) { + KBOX_MSG("kbox_write_to_ram fail!\n"); + break; + } + + left_len -= write_bytes; + write_len += write_bytes; + user_buf += write_bytes; + + offset_tmp += write_bytes; + memset((void *)temp_buf_char, 0, TEMP_BUF_DATA_SIZE); + + msleep(20); + } + + kfree(temp_buf_char); + + up(&user_sem); + + return (int)write_len; +} + +char kbox_checksum(const char *input_buf, unsigned int len) +{ + unsigned int idx = 0; + char checksum = 0; + + for (idx = 0; idx < len; idx++) + checksum += input_buf[idx]; + + return checksum; +} + +static int kbox_update_super_block(void) +{ + int write_len = 0; + + g_kbox_super_block.checksum = 0; + g_kbox_super_block.checksum = + ~((unsigned char) + kbox_checksum((char *)&g_kbox_super_block, + (unsigned int)sizeof(g_kbox_super_block))) + 1; + write_len = + kbox_write_to_ram(SECTION_KERNEL_OFFSET, + (unsigned int)sizeof(struct image_super_block_s), + (char *)&g_kbox_super_block, KBOX_SECTION_KERNEL); + if (write_len <= 0) { + KBOX_MSG("fail to write superblock data!\n"); + return KBOX_FALSE; + } + + return KBOX_TRUE; +} + +int kbox_read_super_block(void) +{ + int read_len = 0; + + read_len = + kbox_read_from_ram(SECTION_KERNEL_OFFSET, + (unsigned int)sizeof(struct image_super_block_s), + (char *)&g_kbox_super_block, + KBOX_SECTION_KERNEL); + if (read_len != sizeof(struct image_super_block_s)) { + KBOX_MSG("fail to get superblock data!\n"); + return KBOX_FALSE; + } + + return KBOX_TRUE; +} + +static unsigned char kbox_get_byte_order(void) +{ + unsigned short data_short = 0xB22B; + unsigned char *data_char = (unsigned char *)&data_short; + + return (unsigned char)((*data_char == 0xB2) ? KBOX_BIG_ENDIAN : + KBOX_LITTLE_ENDIAN); +} + +int kbox_super_block_init(void) +{ + int ret = 0; + + ret = kbox_read_super_block(); + if (ret != KBOX_TRUE) { + KBOX_MSG("kbox_read_super_block fail!\n"); + return ret; + } + + if (!VALID_IMAGE(&g_kbox_super_block) || + kbox_checksum((char *)&g_kbox_super_block, + (unsigned int)sizeof(g_kbox_super_block)) != 0) { + if (!VALID_IMAGE(&g_kbox_super_block)) { + memset((void *)&g_kbox_super_block, 0x00, + sizeof(struct image_super_block_s)); + } + + g_kbox_super_block.byte_order = kbox_get_byte_order(); + g_kbox_super_block.version = IMAGE_VER; + g_kbox_super_block.magic_flag = IMAGE_MAGIC; + } + + g_kbox_super_block.thread_ctrl_blk.thread_info_len = 0; + + if (kbox_update_super_block() != KBOX_TRUE) { + KBOX_MSG("kbox_update_super_block failed!\n"); + return KBOX_FALSE; + } + + return KBOX_TRUE; +} + +static unsigned char kbox_get_write_slot_num(void) +{ + struct panic_ctrl_block_s *panic_ctrl_block = NULL; + unsigned int idx = 0; + unsigned char slot_num = 0; + unsigned char min_use_nums = 0; + + panic_ctrl_block = g_kbox_super_block.panic_ctrl_blk; + min_use_nums = panic_ctrl_block->use_nums; + + for (idx = 1; idx < SLOT_NUM; idx++) { + panic_ctrl_block++; + if (panic_ctrl_block->use_nums < min_use_nums) { + min_use_nums = panic_ctrl_block->use_nums; + slot_num = (unsigned char)idx; + } + } + + if (min_use_nums == MAX_USE_NUMS) { + panic_ctrl_block = g_kbox_super_block.panic_ctrl_blk; + for (idx = 0; idx < SLOT_NUM; idx++) { + panic_ctrl_block->use_nums = 1; + panic_ctrl_block++; + } + } + + return slot_num; +} + +static unsigned char kbox_get_new_record_number(void) +{ + struct panic_ctrl_block_s *panic_ctrl_block = NULL; + unsigned int idx = 0; + unsigned char max_number = 0; + + panic_ctrl_block = g_kbox_super_block.panic_ctrl_blk; + for (idx = 0; idx < SLOT_NUM; idx++) { + if (panic_ctrl_block->number >= max_number) + max_number = panic_ctrl_block->number; + + panic_ctrl_block++; + } + + return (unsigned char)((max_number + 1) % MAX_RECORD_NO); +} + +int kbox_write_panic_info(const char *input_data, unsigned int data_len) +{ + int write_len = 0; + unsigned int offset = 0; + struct panic_ctrl_block_s *panic_ctrl_block = NULL; + unsigned long time = get_seconds(); + unsigned char slot_num = 0; + unsigned long flags = 0; + + if (!input_data || data_len == 0) { + KBOX_MSG("input parameter error!\n"); + return KBOX_FALSE; + } + + if (data_len > SLOT_LENGTH) + data_len = SLOT_LENGTH; + + spin_lock_irqsave(&g_kbox_super_block_lock, flags); + + slot_num = kbox_get_write_slot_num(); + + panic_ctrl_block = &g_kbox_super_block.panic_ctrl_blk[slot_num]; + panic_ctrl_block->use_nums++; + + panic_ctrl_block->number = kbox_get_new_record_number(); + panic_ctrl_block->len = 0; + panic_ctrl_block->time = (unsigned int)time; + + g_kbox_super_block.panic_nums++; + + spin_unlock_irqrestore(&g_kbox_super_block_lock, flags); + + offset = slot_num * SLOT_LENGTH; + write_len = + kbox_write_to_ram(offset, data_len, input_data, KBOX_SECTION_PANIC); + if (write_len <= 0) { + KBOX_MSG("fail to save panic information!\n"); + return KBOX_FALSE; + } + + spin_lock_irqsave(&g_kbox_super_block_lock, flags); + + panic_ctrl_block->len += (unsigned short)write_len; + + if (kbox_update_super_block() != KBOX_TRUE) { + KBOX_MSG("kbox_update_super_block failed!\n"); + spin_unlock_irqrestore(&g_kbox_super_block_lock, flags); + return KBOX_FALSE; + } + + spin_unlock_irqrestore(&g_kbox_super_block_lock, flags); + + return KBOX_TRUE; +} + +int kbox_write_thread_info(const char *input_data, unsigned int data_len) +{ + int write_len = 0; + unsigned int offset = 0; + unsigned long flags = 0; + unsigned int date_len_tmp = data_len; + + if (!input_data || date_len_tmp == 0) { + KBOX_MSG("input parameter error!\n"); + return KBOX_FALSE; + } + + spin_lock_irqsave(&g_kbox_super_block_lock, flags); + + offset = g_kbox_super_block.thread_ctrl_blk.thread_info_len; + write_len = + kbox_write_to_ram(offset, date_len_tmp, input_data, + KBOX_SECTION_THREAD); + if (write_len <= 0) { + KBOX_MSG("fail to save thread information!\n"); + spin_unlock_irqrestore(&g_kbox_super_block_lock, flags); + return KBOX_FALSE; + } + + g_kbox_super_block.thread_ctrl_blk.thread_info_len += write_len; + + if (kbox_update_super_block() != KBOX_TRUE) { + KBOX_MSG("kbox_update_super_block failed!\n"); + spin_unlock_irqrestore(&g_kbox_super_block_lock, flags); + return KBOX_FALSE; + } + + spin_unlock_irqrestore(&g_kbox_super_block_lock, flags); + + return KBOX_TRUE; +} + +int kbox_read_printk_info(char *input_data, + struct printk_ctrl_block_tmp_s *printk_ctrl_block_tmp) +{ + int read_len = 0; + int printk_region = printk_ctrl_block_tmp->printk_region; + unsigned int len = 0; + + if (!input_data) { + KBOX_MSG("input parameter error!\n"); + return KBOX_FALSE; + } + + len = g_kbox_super_block.printk_ctrl_blk[printk_region].len; + if (len <= 0) { + printk_ctrl_block_tmp->end = 0; + printk_ctrl_block_tmp->valid_len = 0; + return KBOX_TRUE; + } + + read_len = + kbox_read_from_ram(0, len, input_data, + printk_ctrl_block_tmp->section); + if (read_len < 0) { + KBOX_MSG("fail to read printk information!(1)\n"); + return KBOX_FALSE; + } + + printk_ctrl_block_tmp->end = len; + printk_ctrl_block_tmp->valid_len = len; + + return KBOX_TRUE; +} + +int kbox_write_printk_info(const char *input_data, + struct printk_ctrl_block_tmp_s * + printk_ctrl_block_tmp) +{ + int write_len = 0; + int printk_region = printk_ctrl_block_tmp->printk_region; + unsigned long flags = 0; + unsigned int len = 0; + + if (!input_data) { + KBOX_MSG("input parameter error!\n"); + return KBOX_FALSE; + } + + len = printk_ctrl_block_tmp->valid_len; + write_len = + kbox_write_to_ram(0, len, input_data, + printk_ctrl_block_tmp->section); + if (write_len <= 0) { + KBOX_MSG("fail to save printk information!(1)\n"); + return KBOX_FALSE; + } + + spin_lock_irqsave(&g_kbox_super_block_lock, flags); + + g_kbox_super_block.printk_ctrl_blk[printk_region].len = len; + + if (kbox_update_super_block() != KBOX_TRUE) { + KBOX_MSG("kbox_update_super_block failed!\n"); + spin_unlock_irqrestore(&g_kbox_super_block_lock, flags); + return KBOX_FALSE; + } + + spin_unlock_irqrestore(&g_kbox_super_block_lock, flags); + + return KBOX_TRUE; +} + +static int kbox_read_region(unsigned long arg) +{ + unsigned int read_len = 0; + struct kbox_region_arg_s region_arg = { }; + + if (copy_from_user + ((void *)®ion_arg, (void __user *)arg, + sizeof(struct kbox_region_arg_s))) { + KBOX_MSG("fail to copy_from_user!\n"); + return KBOX_FALSE; + } + + read_len = kbox_read_op((long long)region_arg.offset, region_arg.count, + (char __user *)region_arg.data, + KBOX_SECTION_ALL); + if (read_len <= 0) { + KBOX_MSG("fail to get kbox data!\n"); + return KBOX_FALSE; + } + + if (copy_to_user + ((void __user *)arg, (void *)®ion_arg, + sizeof(struct kbox_region_arg_s))) { + KBOX_MSG("fail to copy_to_user!\n"); + return KBOX_FALSE; + } + + return KBOX_TRUE; +} + +static int kbox_writer_region(unsigned long arg) +{ + unsigned int write_len = 0; + struct kbox_region_arg_s region_arg = { }; + + if (copy_from_user + ((void *)®ion_arg, (void __user *)arg, + sizeof(struct kbox_region_arg_s))) { + KBOX_MSG("fail to copy_from_user!\n"); + return KBOX_FALSE; + } + + write_len = kbox_write_op((long long)region_arg.offset, + region_arg.count, + (char __user *)region_arg.data, + KBOX_SECTION_ALL); + if (write_len <= 0) { + KBOX_MSG("fail to write kbox data!\n"); + return KBOX_FALSE; + } + + if (copy_to_user + ((void __user *)arg, (void *)®ion_arg, + sizeof(struct kbox_region_arg_s))) { + KBOX_MSG("fail to copy_to_user!\n"); + return KBOX_FALSE; + } + + return KBOX_TRUE; +} + +int kbox_clear_region(enum kbox_section_e section) +{ + int ret = KBOX_TRUE; + unsigned long kbox_section_len = kbox_get_section_len(section); + + if (kbox_section_len == 0) { + KBOX_MSG("get kbox_section_len failed!\n"); + return -EFAULT; + } + + ret = kbox_memset_ram(0, (unsigned int)kbox_section_len, 0, section); + if (ret != KBOX_TRUE) { + KBOX_MSG("kbox_memset_ram failed!\n"); + return -EFAULT; + } + + return KBOX_TRUE; +} + +static int kbox_get_image_len(unsigned long arg) +{ + unsigned long __user *ptr = (unsigned long __user *)arg; + unsigned long kbox_len = 0; + + kbox_len = kbox_get_section_len(KBOX_SECTION_ALL); + if (kbox_len == 0) { + KBOX_MSG("kbox_get_section_len section all fail!\n"); + return -EFAULT; + } + + return put_user(kbox_len, ptr); +} + +static int kbox_get_user_region_len(unsigned long arg) +{ + unsigned long __user *ptr = (unsigned long __user *)arg; + unsigned long kbox_user_region_len = 0; + + kbox_user_region_len = kbox_get_section_len(KBOX_SECTION_USER); + if (kbox_user_region_len == 0) { + KBOX_MSG("kbox_get_section_len section user fail!\n"); + return -EFAULT; + } + + return put_user(kbox_user_region_len, ptr); +} + +static int kbox_ioctl_verify_cmd(unsigned int cmd, unsigned long arg) +{ + if (arg == 0 || (_IOC_TYPE(cmd) != KBOX_IOC_MAGIC)) + return KBOX_FALSE; + + if (_IOC_NR(cmd) > KBOX_IOC_MAXNR) + return KBOX_FALSE; + + if (!capable(CAP_SYS_ADMIN)) { + KBOX_MSG("permit error\n"); + return KBOX_FALSE; + } + + return KBOX_TRUE; +} + +int kbox_ioctl_detail(unsigned int cmd, unsigned long arg) +{ + if (kbox_ioctl_verify_cmd(cmd, arg) != KBOX_TRUE) + return -EFAULT; + + switch (cmd) { + case GET_KBOX_TOTAL_LEN: + return kbox_get_image_len(arg); + + case GET_KBOX_REGION_USER_LEN: + return kbox_get_user_region_len(arg); + + case KBOX_REGION_READ: + return kbox_read_region(arg); + + case KBOX_REGION_WRITE: + return kbox_writer_region(arg); + + case CLEAR_KBOX_REGION_ALL: + return kbox_clear_region(KBOX_SECTION_ALL); + + case CLEAR_KBOX_REGION_USER: + return kbox_clear_region(KBOX_SECTION_USER); + + default: + return -ENOTTY; + } +} + +int kbox_mmap_ram(struct file *pfile, struct vm_area_struct *vma, + enum kbox_section_e section) +{ + unsigned long kbox_section_phy_addr = + kbox_get_section_phy_addr(section); + unsigned long kbox_section_len = kbox_get_section_len(section); + unsigned long offset = 0; + unsigned long length = 0; + unsigned long vm_size = 0; + int ret = 0; + + UNUSED(pfile); + + if (kbox_section_phy_addr == 0 || kbox_section_len == 0) { + KBOX_MSG + ("get kbox_section_phy_addr or kbox_section_len failed!\n"); + return -EFAULT; + } + + offset = vma->vm_pgoff << PAGE_SHIFT; + vm_size = vma->vm_end - vma->vm_start; + + if (offset >= kbox_section_len) { + KBOX_MSG("vma offset is invalid!\n"); + return -ESPIPE; + } + + if (vma->vm_flags & VM_LOCKED) { + KBOX_MSG("vma is locked!\n"); + return -EPERM; + } + + length = kbox_section_len - offset; + if (vm_size > length) { + KBOX_MSG("vm_size is invalid!\n"); + return -ENOSPC; + } + + vma->vm_flags |= VM_RESERVED; + vma->vm_flags |= VM_IO; + + ret = remap_pfn_range(vma, + vma->vm_start, + (unsigned long)(kbox_section_phy_addr >> + PAGE_SHIFT), vm_size, + vma->vm_page_prot); + if (ret) { + KBOX_MSG("remap_pfn_range failed! ret = %d\n", ret); + return -EAGAIN; + } + + return 0; +} diff --git a/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_ram_op.h b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_ram_op.h new file mode 100644 index 0000000000000000000000000000000000000000..4a92c87de139a0ce9503de598ef3dd52698a6974 --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_ram_op.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei iBMA driver. + * Copyright (c) 2017, Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _KBOX_RAM_OP_H_ +#define _KBOX_RAM_OP_H_ + +#include +#include +#include "kbox_printk.h" + +#define KBOX_IOC_MAGIC (0xB2) + +#define GET_KBOX_TOTAL_LEN _IOR(KBOX_IOC_MAGIC, 0, unsigned long) + +#define GET_KBOX_REGION_USER_LEN _IOR(KBOX_IOC_MAGIC, 1, unsigned long) + +#define CLEAR_KBOX_REGION_ALL _IO(KBOX_IOC_MAGIC, 2) + +#define CLEAR_KBOX_REGION_USER _IO(KBOX_IOC_MAGIC, 3) + +#define KBOX_REGION_READ _IOR(KBOX_IOC_MAGIC, 4, struct kbox_region_arg_s) + +#define KBOX_REGION_WRITE _IOW(KBOX_IOC_MAGIC, 5, struct kbox_region_arg_s) + +#define KBOX_IOC_MAXNR 6 + +#define TEMP_BUF_SIZE (32 * 1024) +#define TEMP_BUF_DATA_SIZE (128 * 1024) +#define KBOX_RW_UNIT 4 + +struct kbox_region_arg_s { + unsigned long offset; + unsigned int count; + char *data; +}; + +enum kbox_section_e; + +int kbox_read_op(long long offset, unsigned int count, char __user *data, + enum kbox_section_e section); +int kbox_write_op(long long offset, unsigned int count, + const char __user *data, enum kbox_section_e section); +int kbox_read_super_block(void); +int kbox_super_block_init(void); +int kbox_write_panic_info(const char *input_data, unsigned int data_len); +int kbox_write_thread_info(const char *input_data, unsigned int data_len); +int kbox_write_printk_info(const char *input_data, + struct printk_ctrl_block_tmp_s + *printk_ctrl_block_tmp); +int kbox_read_printk_info(char *input_data, + struct printk_ctrl_block_tmp_s + *printk_ctrl_block_tmp); +int kbox_ioctl_detail(unsigned int cmd, unsigned long arg); +int kbox_mmap_ram(struct file *file, struct vm_area_struct *vma, + enum kbox_section_e section); +char kbox_checksum(const char *input_buf, unsigned int len); +int kbox_write_to_ram(unsigned long offset, unsigned int count, + const char *data, enum kbox_section_e section); +int kbox_read_from_ram(unsigned long offset, unsigned int count, char *data, + enum kbox_section_e section); +int kbox_clear_region(enum kbox_section_e section); +int kbox_memset_ram(unsigned long offset, unsigned int count, + const char set_byte, enum kbox_section_e section); + +#endif diff --git a/drivers/net/ethernet/huawei/bma/veth_drv/Makefile b/drivers/net/ethernet/huawei/bma/veth_drv/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..c9ab07371ef4cc671f32bdf5cb88bc79402fc83a --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/veth_drv/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_BMA) += host_veth_drv.o +host_veth_drv-y := veth_hb.o \ No newline at end of file diff --git a/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.c b/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.c new file mode 100644 index 0000000000000000000000000000000000000000..dd2764bd00ff61efe5f3a1706f87864383ffe318 --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.c @@ -0,0 +1,2515 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei iBMA driver. + * Copyright (c) 2017, Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +#include "veth_hb.h" + +#define GET_QUEUE_STAT(node, stat) \ + ((node) ? ((char *)(node) + (stat)->stat_offset) : NULL) + +#define GET_SHM_QUEUE_STAT(node, stat) \ + (((node) && (node)->pshmqhd_v) ? \ + ((char *)(node)->pshmqhd_v + (stat)->stat_offset) : NULL) + +#define GET_STATS_VALUE(ptr, pstat) \ + ((ptr) ? (((pstat)->sizeof_stat == sizeof(u64)) ? \ + (*(u64 *)(ptr)) : (*(u32 *)(ptr))) : 0) + +#define GET_DMA_DIRECTION(type) \ + (((type) == BSPVETH_RX) ? BMC_TO_HOST : HOST_TO_BMC) + +#define CHECK_DMA_QUEUE_EMPTY(type, queue) \ + (((type) == BSPVETH_RX && \ + (queue)->pshmqhd_v->head == (queue)->pshmqhd_v->tail) || \ + ((type) != BSPVETH_RX && (queue)->head == (queue)->tail)) + +#define CHECK_DMA_RXQ_FAULT(queue, type, cnt) \ + ((queue)->dmal_cnt > 1 && (cnt) < ((queue)->work_limit / 2) && \ + (type) == BSPVETH_RX) + +static u32 veth_ethtool_get_link(struct net_device *dev); + +int debug; /* debug switch*/ +module_param_call(debug, &edma_param_set_debug, ¶m_get_int, &debug, 0644); + +MODULE_PARM_DESC(debug, "Debug switch (0=close debug, 1=open debug)"); + +#define VETH_LOG(lv, fmt, args...) \ +do { \ + if (debug < (lv)) \ + continue; \ + if (lv == DLOG_DEBUG) \ + netdev_dbg(g_bspveth_dev.pnetdev, "%s(), %d, " \ + fmt, __func__, __LINE__, ## args); \ + else if (lv == DLOG_ERROR) \ + netdev_err(g_bspveth_dev.pnetdev, "%s(), %d, " \ + fmt, __func__, __LINE__, ## args); \ +} while (0) + +#ifdef __UT_TEST +u32 g_testdma; + +u32 g_testlbk; + +#endif + +struct bspveth_device g_bspveth_dev = {}; + +/* g_shutdown_flag is used to prevent veth_shutdown_task + * from being preempted by veth_dma_tx_timer_do_H. + * The default value is 0.The value 1 + * indicates that veth_shutdown_flag cannot be preempted, + * and the value 0 indicates that veth_shutdown_task can be preempted. + */ +static int g_shutdown_flag; +static int veth_int_handler(struct notifier_block *pthis, unsigned long ev, + void *unuse); + +static struct notifier_block g_veth_int_nb = { + .notifier_call = veth_int_handler, +}; + +static const struct veth_stats veth_gstrings_stats[] = { + {"rx_packets", NET_STATS, VETH_STAT_SIZE(stats.rx_packets), + VETH_STAT_OFFSET(stats.rx_packets)}, + {"rx_bytes", NET_STATS, VETH_STAT_SIZE(stats.rx_bytes), + VETH_STAT_OFFSET(stats.rx_bytes)}, + {"rx_dropped", NET_STATS, VETH_STAT_SIZE(stats.rx_dropped), + VETH_STAT_OFFSET(stats.rx_dropped)}, + {"rx_head", QUEUE_RX_STATS, QUEUE_TXRX_STAT_SIZE(head), + QUEUE_TXRX_STAT_OFFSET(head)}, + {"rx_tail", QUEUE_RX_STATS, QUEUE_TXRX_STAT_SIZE(tail), + QUEUE_TXRX_STAT_OFFSET(tail)}, + {"rx_next_to_fill", QUEUE_RX_STATS, + QUEUE_TXRX_STAT_SIZE(next_to_fill), + QUEUE_TXRX_STAT_OFFSET(next_to_fill)}, + {"rx_shmq_head", SHMQ_RX_STATS, SHMQ_TXRX_STAT_SIZE(head), + SHMQ_TXRX_STAT_OFFSET(head)}, + {"rx_shmq_tail", SHMQ_RX_STATS, SHMQ_TXRX_STAT_SIZE(tail), + SHMQ_TXRX_STAT_OFFSET(tail)}, + {"rx_shmq_next_to_free", SHMQ_RX_STATS, + SHMQ_TXRX_STAT_SIZE(next_to_free), + SHMQ_TXRX_STAT_OFFSET(next_to_free)}, + {"rx_queue_full", QUEUE_RX_STATS, + QUEUE_TXRX_STAT_SIZE(s.q_full), + QUEUE_TXRX_STAT_OFFSET(s.q_full)}, + {"rx_dma_busy", QUEUE_RX_STATS, + QUEUE_TXRX_STAT_SIZE(s.dma_busy), + QUEUE_TXRX_STAT_OFFSET(s.dma_busy)}, + {"rx_dma_failed", QUEUE_RX_STATS, + QUEUE_TXRX_STAT_SIZE(s.dma_failed), + QUEUE_TXRX_STAT_OFFSET(s.dma_failed)}, + + {"tx_packets", NET_STATS, VETH_STAT_SIZE(stats.tx_packets), + VETH_STAT_OFFSET(stats.tx_packets)}, + {"tx_bytes", NET_STATS, VETH_STAT_SIZE(stats.tx_bytes), + VETH_STAT_OFFSET(stats.tx_bytes)}, + {"tx_dropped", NET_STATS, VETH_STAT_SIZE(stats.tx_dropped), + VETH_STAT_OFFSET(stats.tx_dropped)}, + + {"tx_head", QUEUE_TX_STATS, QUEUE_TXRX_STAT_SIZE(head), + QUEUE_TXRX_STAT_OFFSET(head)}, + {"tx_tail", QUEUE_TX_STATS, QUEUE_TXRX_STAT_SIZE(tail), + QUEUE_TXRX_STAT_OFFSET(tail)}, + {"tx_next_to_free", QUEUE_TX_STATS, + QUEUE_TXRX_STAT_SIZE(next_to_free), + QUEUE_TXRX_STAT_OFFSET(next_to_free)}, + {"tx_shmq_head", SHMQ_TX_STATS, SHMQ_TXRX_STAT_SIZE(head), + SHMQ_TXRX_STAT_OFFSET(head)}, + {"tx_shmq_tail", SHMQ_TX_STATS, SHMQ_TXRX_STAT_SIZE(tail), + SHMQ_TXRX_STAT_OFFSET(tail)}, + {"tx_shmq_next_to_free", SHMQ_TX_STATS, + SHMQ_TXRX_STAT_SIZE(next_to_free), + SHMQ_TXRX_STAT_OFFSET(next_to_free)}, + + {"tx_queue_full", QUEUE_TX_STATS, + QUEUE_TXRX_STAT_SIZE(s.q_full), + QUEUE_TXRX_STAT_OFFSET(s.q_full)}, + {"tx_dma_busy", QUEUE_TX_STATS, + QUEUE_TXRX_STAT_SIZE(s.dma_busy), + QUEUE_TXRX_STAT_OFFSET(s.dma_busy)}, + {"tx_dma_failed", QUEUE_TX_STATS, + QUEUE_TXRX_STAT_SIZE(s.dma_failed), + QUEUE_TXRX_STAT_OFFSET(s.dma_failed)}, + + {"recv_int", VETH_STATS, VETH_STAT_SIZE(recv_int), + VETH_STAT_OFFSET(recv_int)}, + {"tobmc_int", VETH_STATS, VETH_STAT_SIZE(tobmc_int), + VETH_STAT_OFFSET(tobmc_int)}, +}; + +#define VETH_GLOBAL_STATS_LEN \ + (sizeof(veth_gstrings_stats) / sizeof(struct veth_stats)) + +static int veth_param_get_statics(char *buf, const struct kernel_param *kp) +{ + int len = 0; + int i = 0, j = 0, type = 0; + struct bspveth_rxtx_q *pqueue = NULL; + __kernel_time_t running_time = 0; + + if (!buf) + return 0; + + GET_SYS_SECONDS(running_time); + + running_time -= g_bspveth_dev.init_time; + + len += sprintf(buf + len, + "================VETH INFO=============\r\n"); + len += sprintf(buf + len, "[version ]:" VETH_VERSION "\n"); + len += sprintf(buf + len, "[link state ]:%d\n", + veth_ethtool_get_link(g_bspveth_dev.pnetdev)); + len += sprintf(buf + len, "[running_time]:%luD %02lu:%02lu:%02lu\n", + running_time / (SECONDS_PER_DAY), + running_time % (SECONDS_PER_DAY) / SECONDS_PER_HOUR, + running_time % SECONDS_PER_HOUR / SECONDS_PER_MINUTE, + running_time % SECONDS_PER_MINUTE); + len += sprintf(buf + len, + "[bspveth_dev ]:MAX_QUEUE_NUM :0x%-16x ", + MAX_QUEUE_NUM); + len += sprintf(buf + len, + "MAX_QUEUE_BDNUM :0x%-16x\r\n", MAX_QUEUE_BDNUM); + len += sprintf(buf + len, + "[bspveth_dev ]:pnetdev :0x%-16p ", + g_bspveth_dev.pnetdev); + len += sprintf(buf + len, + "ppcidev :0x%-16p\r\n", + g_bspveth_dev.ppcidev); + len += sprintf(buf + len, + "[bspveth_dev ]:pshmpool_p:0x%-16p ", + g_bspveth_dev.pshmpool_p); + len += sprintf(buf + len, + "pshmpool_v :0x%-16p\r\n", + g_bspveth_dev.pshmpool_v); + len += sprintf(buf + len, + "[bspveth_dev ]:shmpoolsize:0x%-16x ", + g_bspveth_dev.shmpoolsize); + len += sprintf(buf + len, + "g_veth_dbg_lv :0x%-16x\r\n", debug); + + for (i = 0; i < MAX_QUEUE_NUM; i++) { + for (j = 0, type = BSPVETH_RX; j < 2; j++, type++) { + if (type == BSPVETH_RX) { + pqueue = g_bspveth_dev.prx_queue[i]; + len += sprintf(buf + len, + "=============RXQUEUE STATIS============\r\n"); + } else { + pqueue = g_bspveth_dev.ptx_queue[i]; + len += sprintf(buf + len, + "=============TXQUEUE STATIS============\r\n"); + } + + if (!pqueue) { + len += sprintf(buf + len, "NULL\r\n"); + continue; + } + + len += sprintf(buf + len, + "QUEUE[%d]--[pkt ] :%lld\r\n", i, + pqueue->s.pkt); + len += sprintf(buf + len, + "QUEUE[%d]--[pktbyte ] :%lld\r\n", i, + pqueue->s.pktbyte); + len += sprintf(buf + len, + "QUEUE[%d]--[refill ] :%lld\r\n", i, + pqueue->s.refill); + len += sprintf(buf + len, + "QUEUE[%d]--[freetx ] :%lld\r\n", i, + pqueue->s.freetx); + len += sprintf(buf + len, + "QUEUE[%d]--[dmapkt ] :%lld\r\n", i, + pqueue->s.dmapkt); + len += sprintf(buf + len, + "QUEUE[%d]--[dmapktbyte ] :%lld\r\n", i, + pqueue->s.dmapktbyte); + len += sprintf(buf + len, + "QUEUE[%d]--[next_to_fill ] :%d\r\n", i, + pqueue->next_to_fill); + len += sprintf(buf + len, + "QUEUE[%d]--[next_to_free ] :%d\r\n", i, + pqueue->next_to_free); + len += sprintf(buf + len, + "QUEUE[%d]--[head ] :%d\r\n", i, + pqueue->head); + len += sprintf(buf + len, + "QUEUE[%d]--[tail ] :%d\r\n", i, + pqueue->tail); + len += sprintf(buf + len, + "QUEUE[%d]--[work_limit ] :%d\r\n", i, + pqueue->work_limit); + len += sprintf(buf + len, + "=================SHARE=================\r\n"); + len += sprintf(buf + len, + "QUEUE[%d]--[next_to_fill] :%d\r\n", i, + pqueue->pshmqhd_v->next_to_fill); + len += sprintf(buf + len, + "QUEUE[%d]--[next_to_free] :%d\r\n", i, + pqueue->pshmqhd_v->next_to_free); + len += sprintf(buf + len, + "QUEUE[%d]--[head ] :%d\r\n", i, + pqueue->pshmqhd_v->head); + len += sprintf(buf + len, + "QUEUE[%d]--[tail ] :%d\r\n", i, + pqueue->pshmqhd_v->tail); + len += sprintf(buf + len, + "=======================================\r\n"); + len += sprintf(buf + len, + "QUEUE[%d]--[dropped_pkt] :%d\r\n", i, + pqueue->s.dropped_pkt); + len += sprintf(buf + len, + "QUEUE[%d]--[netifrx_err] :%d\r\n", i, + pqueue->s.netifrx_err); + len += sprintf(buf + len, + "QUEUE[%d]--[null_point ] :%d\r\n", i, + pqueue->s.null_point); + len += sprintf(buf + len, + "QUEUE[%d]--[retry_err ] :%d\r\n", i, + pqueue->s.retry_err); + len += sprintf(buf + len, + "QUEUE[%d]--[allocskb_err ] :%d\r\n", + i, pqueue->s.allocskb_err); + len += sprintf(buf + len, + "QUEUE[%d]--[q_full ] :%d\r\n", i, + pqueue->s.q_full); + len += sprintf(buf + len, + "QUEUE[%d]--[q_emp ] :%d\r\n", i, + pqueue->s.q_emp); + len += sprintf(buf + len, + "QUEUE[%d]--[need_fill ] :%d\r\n", i, + pqueue->s.need_fill); + len += sprintf(buf + len, + "QUEUE[%d]--[need_free ] :%d\r\n", i, + pqueue->s.need_free); + len += sprintf(buf + len, + "QUEUE[%d]--[type_err ] :%d\r\n", i, + pqueue->s.type_err); + len += sprintf(buf + len, + "QUEUE[%d]--[shm_full ] :%d\r\n", i, + pqueue->s.shm_full); + len += sprintf(buf + len, + "QUEUE[%d]--[shm_emp ] :%d\r\n", i, + pqueue->s.shm_emp); + len += sprintf(buf + len, + "QUEUE[%d]--[shmretry_err ] :%d\r\n", i, + pqueue->s.shmretry_err); + len += sprintf(buf + len, + "QUEUE[%d]--[shmqueue_noinit] :%d\r\n", + i, pqueue->s.shmqueue_noinit); + len += sprintf(buf + len, + "QUEUE[%d]--[dma_busy ] :%d\r\n", i, + pqueue->s.dma_busy); + len += sprintf(buf + len, + "QUEUE[%d]--[dma_mapping_err] :%d\r\n", + i, pqueue->s.dma_mapping_err); + len += sprintf(buf + len, + "QUEUE[%d]--[dma_failed ] :%d\r\n", i, + pqueue->s.dma_failed); + len += sprintf(buf + len, + "QUEUE[%d]--[dma_burst ] :%d\r\n", i, + pqueue->s.dma_burst); + len += sprintf(buf + len, + "QUEUE[%d]--[lbk_cnt ] :%d\r\n", i, + pqueue->s.lbk_cnt); + len += sprintf(buf + len, + "QUEUE[%d]--[dma_need_offset] :%d\r\n", + i, pqueue->s.dma_need_offset); + len += sprintf(buf + len, + "QUEUE[%d]--[lbk_txerr ] :%d\r\n", i, + pqueue->s.lbk_txerr); + } + } + + len += sprintf(buf + len, "=============BSPVETH STATIS===========\r\n"); + len += sprintf(buf + len, + "[bspveth_dev]:run_dma_rx_task:0x%-8x(%d)\r\n", + g_bspveth_dev.run_dma_rx_task, + g_bspveth_dev.run_dma_rx_task); + len += sprintf(buf + len, + "[bspveth_dev]:run_dma_tx_task:0x%-8x(%d)\r\n", + g_bspveth_dev.run_dma_tx_task, + g_bspveth_dev.run_dma_tx_task); + len += sprintf(buf + len, + "[bspveth_dev]:run_skb_rx_task:0x%-8x(%d)\r\n", + g_bspveth_dev.run_skb_rx_task, + g_bspveth_dev.run_skb_rx_task); + len += sprintf(buf + len, + "[bspveth_dev]:run_skb_fr_task:0x%-8x(%d)\r\n", + g_bspveth_dev.run_skb_fr_task, + g_bspveth_dev.run_skb_fr_task); + len += sprintf(buf + len, + "[bspveth_dev]:recv_int :0x%-8x(%d)\r\n", + g_bspveth_dev.recv_int, g_bspveth_dev.recv_int); + len += sprintf(buf + len, + "[bspveth_dev]:tobmc_int :0x%-8x(%d)\r\n", + g_bspveth_dev.tobmc_int, + g_bspveth_dev.tobmc_int); + len += sprintf(buf + len, + "[bspveth_dev]:shutdown_cnt :0x%-8x(%d)\r\n", + g_bspveth_dev.shutdown_cnt, + g_bspveth_dev.shutdown_cnt); + + return len; +} + +module_param_call(statistics, NULL, veth_param_get_statics, &debug, 0444); + +MODULE_PARM_DESC(statistics, "Statistics info of veth driver,readonly"); + +static void veth_reset_dma(int type) +{ + if (type == BSPVETH_RX) + bma_intf_reset_dma(BMC_TO_HOST); + else if (type == BSPVETH_TX) + bma_intf_reset_dma(HOST_TO_BMC); + else + return; +} + +s32 bspveth_setup_tx_resources(struct bspveth_device *pvethdev, + struct bspveth_rxtx_q *ptx_queue) +{ + unsigned int size; + + if (!pvethdev || !ptx_queue) + return BSP_ERR_NULL_POINTER; + + ptx_queue->count = MAX_QUEUE_BDNUM; + + size = sizeof(struct bspveth_bd_info) * ptx_queue->count; + ptx_queue->pbdinfobase_v = vmalloc(size); + if (!ptx_queue->pbdinfobase_v) + goto alloc_failed; + + memset(ptx_queue->pbdinfobase_v, 0, size); + + /* round up to nearest 4K */ + ptx_queue->size = ptx_queue->count * sizeof(struct bspveth_bd_info); + ptx_queue->size = ALIGN(ptx_queue->size, 4096); + + /* prepare 4096 send buffer */ + ptx_queue->pbdbase_v = kmalloc(ptx_queue->size, GFP_KERNEL); + if (!ptx_queue->pbdbase_v) { + VETH_LOG(DLOG_ERROR, + "Unable to kmalloc for the receive descriptor ring\n"); + + vfree(ptx_queue->pbdinfobase_v); + ptx_queue->pbdinfobase_v = NULL; + + goto alloc_failed; + } + + ptx_queue->pbdbase_p = (u8 *)(__pa((BSP_VETH_T)(ptx_queue->pbdbase_v))); + + ptx_queue->next_to_fill = 0; + ptx_queue->next_to_free = 0; + ptx_queue->head = 0; + ptx_queue->tail = 0; + ptx_queue->work_limit = BSPVETH_WORK_LIMIT; + + memset(&ptx_queue->s, 0, sizeof(struct bspveth_rxtx_statis)); + + return 0; + +alloc_failed: + return -ENOMEM; +} + +void bspveth_free_tx_resources(struct bspveth_device *pvethdev, + struct bspveth_rxtx_q *ptx_queue) +{ + unsigned int i; + unsigned long size; + struct bspveth_bd_info *pbdinfobase_v = NULL; + struct sk_buff *skb = NULL; + + if (!ptx_queue || !pvethdev) + return; + + pbdinfobase_v = ptx_queue->pbdinfobase_v; + if (!pbdinfobase_v) + return; + + for (i = 0; i < ptx_queue->count; i++) { + skb = pbdinfobase_v[i].pdma_v; + if (skb) + dev_kfree_skb_any(skb); + + pbdinfobase_v[i].pdma_v = NULL; + } + + size = sizeof(struct bspveth_bd_info) * ptx_queue->count; + memset(ptx_queue->pbdinfobase_v, 0, size); + memset(ptx_queue->pbdbase_v, 0, ptx_queue->size); + + ptx_queue->next_to_fill = 0; + ptx_queue->next_to_free = 0; + ptx_queue->head = 0; + ptx_queue->tail = 0; + + vfree(ptx_queue->pbdinfobase_v); + ptx_queue->pbdinfobase_v = NULL; + + kfree(ptx_queue->pbdbase_v); + ptx_queue->pbdbase_v = NULL; + + VETH_LOG(DLOG_DEBUG, "bspveth free tx resources ok, count=%d\n", + ptx_queue->count); +} + +s32 bspveth_setup_all_tx_resources(struct bspveth_device *pvethdev) +{ + int qid = 0; + int i = 0; + int err = 0; + u8 *shmq_head_p = NULL; + struct bspveth_shmq_hd *shmq_head = NULL; + + if (!pvethdev) + return BSP_ERR_NULL_POINTER; + for (qid = 0; qid < MAX_QUEUE_NUM; qid++) { + pvethdev->ptx_queue[qid] = + kmalloc(sizeof(*pvethdev->ptx_queue[qid]), + GFP_KERNEL); + if (!pvethdev->ptx_queue[qid]) { + VETH_LOG(DLOG_ERROR, + "kmalloc failed for ptx_queue[%d]\n", qid); + err = -1; + goto failed; + } + memset(pvethdev->ptx_queue[qid], + 0, sizeof(struct bspveth_rxtx_q)); + shmq_head = (struct bspveth_shmq_hd *)(pvethdev->pshmpool_v + + MAX_SHAREQUEUE_SIZE * (qid)); + pvethdev->ptx_queue[qid]->pshmqhd_v = shmq_head; + shmq_head_p = pvethdev->pshmpool_p + MAX_SHAREQUEUE_SIZE * qid; + pvethdev->ptx_queue[qid]->pshmqhd_p = shmq_head_p; + + pvethdev->ptx_queue[qid]->pshmbdbase_v = + (struct bspveth_dma_shmbd *)((BSP_VETH_T)(shmq_head) + + BSPVETH_SHMBDBASE_OFFSET); + pvethdev->ptx_queue[qid]->pshmbdbase_p = + (u8 *)((BSP_VETH_T)(shmq_head_p) + + BSPVETH_SHMBDBASE_OFFSET); + pvethdev->ptx_queue[qid]->pdmalbase_v = + (struct bspveth_dmal *)((BSP_VETH_T)(shmq_head) + + SHMDMAL_OFFSET); + pvethdev->ptx_queue[qid]->pdmalbase_p = + (u8 *)(u64)(VETH_SHAREPOOL_BASE_INBMC + + MAX_SHAREQUEUE_SIZE * qid + + SHMDMAL_OFFSET); + + memset(pvethdev->ptx_queue[qid]->pdmalbase_v, + 0, MAX_SHMDMAL_SIZE); + + err = bspveth_setup_tx_resources(pvethdev, + pvethdev->ptx_queue[qid]); + if (err) { + pvethdev->ptx_queue[qid]->pshmqhd_v = NULL; + kfree(pvethdev->ptx_queue[qid]); + pvethdev->ptx_queue[i] = NULL; + VETH_LOG(DLOG_ERROR, + "Allocation for Tx Queue %u failed\n", qid); + + goto failed; + } + } + + return 0; +failed: + for (i = 0; i < MAX_QUEUE_NUM; i++) { + bspveth_free_tx_resources(pvethdev, pvethdev->ptx_queue[i]); + kfree(pvethdev->ptx_queue[i]); + pvethdev->ptx_queue[i] = NULL; + } + + return err; +} + +void bspveth_free_all_tx_resources(struct bspveth_device *pvethdev) +{ + int i; + + if (!pvethdev) + return; + + for (i = 0; i < MAX_QUEUE_NUM; i++) { + if (pvethdev->ptx_queue[i]) + bspveth_free_tx_resources(pvethdev, + pvethdev->ptx_queue[i]); + + kfree(pvethdev->ptx_queue[i]); + pvethdev->ptx_queue[i] = NULL; + } +} + +s32 veth_alloc_one_rx_skb(struct bspveth_rxtx_q *prx_queue, int idx) +{ + dma_addr_t dma = 0; + struct sk_buff *skb; + struct bspveth_bd_info *pbdinfobase_v = NULL; + struct bspveth_dma_bd *pbdbase_v = NULL; + + pbdinfobase_v = prx_queue->pbdinfobase_v; + pbdbase_v = prx_queue->pbdbase_v; + + skb = netdev_alloc_skb(g_bspveth_dev.pnetdev, + BSPVETH_SKB_SIZE + BSPVETH_CACHELINE_SIZE); + if (!skb) { + VETH_LOG(DLOG_ERROR, "netdev_alloc_skb failed\n"); + return -ENOMEM; + } + + /* advance the data pointer to the next cache line */ + skb_reserve(skb, PTR_ALIGN(skb->data, + BSPVETH_CACHELINE_SIZE) - skb->data); + + dma = dma_map_single(&g_bspveth_dev.ppcidev->dev, + skb->data, BSPVETH_SKB_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(&g_bspveth_dev.ppcidev->dev, dma)) { + VETH_LOG(DLOG_ERROR, "dma_mapping_error failed\n"); + dev_kfree_skb_any(skb); + return -EFAULT; + } + +#ifdef __UT_TEST + if (g_testdma) + VETH_LOG(DLOG_ERROR, + "[refill]:dma=0x%llx,skb=%p,skb->len=%d\r\n", + dma, skb, skb->len); +#endif + + pbdinfobase_v[idx].pdma_v = skb; + pbdinfobase_v[idx].len = BSPVETH_SKB_SIZE; + + pbdbase_v[idx].dma_p = dma; + pbdbase_v[idx].len = BSPVETH_SKB_SIZE; + + return 0; +} + +s32 veth_refill_rxskb(struct bspveth_rxtx_q *prx_queue, int queue) +{ + int i, work_limit; + unsigned int next_to_fill, tail; + int ret = BSP_OK; + + if (!prx_queue) + return BSP_ERR_AGAIN; + + work_limit = prx_queue->work_limit; + next_to_fill = prx_queue->next_to_fill; + tail = prx_queue->tail; + + for (i = 0; i < work_limit; i++) { + if (!JUDGE_RX_QUEUE_SPACE(next_to_fill, tail, 1)) + break; + + ret = veth_alloc_one_rx_skb(prx_queue, next_to_fill); + if (ret) + break; + + g_bspveth_dev.prx_queue[queue]->s.refill++; + next_to_fill = (next_to_fill + 1) & BSPVETH_POINT_MASK; + } + + mb();/* memory barriers. */ + prx_queue->next_to_fill = next_to_fill; + + tail = prx_queue->tail; + if (JUDGE_RX_QUEUE_SPACE(next_to_fill, tail, 1)) { + VETH_LOG(DLOG_DEBUG, "next_to_fill(%d) != tail(%d)\n", + next_to_fill, tail); + + return BSP_ERR_AGAIN; + } + + return 0; +} + +s32 bspveth_setup_rx_skb(struct bspveth_device *pvethdev, + struct bspveth_rxtx_q *prx_queue) +{ + u32 idx; + int ret = 0; + + if (!pvethdev || !prx_queue) + return BSP_ERR_NULL_POINTER; + + VETH_LOG(DLOG_DEBUG, "waite setup rx skb ,count=%d\n", + prx_queue->count); + + for (idx = 0; idx < prx_queue->count - 1; idx++) { + ret = veth_alloc_one_rx_skb(prx_queue, idx); + if (ret) + break; + } + + if (!idx) /* Can't alloc even one packets */ + return -EFAULT; + + mb();/* memory barriers. */ + prx_queue->next_to_fill = idx; + + VETH_LOG(DLOG_DEBUG, "prx_queue->next_to_fill=%d\n", + prx_queue->next_to_fill); + + VETH_LOG(DLOG_DEBUG, "setup rx skb ok, count=%d\n", prx_queue->count); + + return BSP_OK; +} + +void bspveth_free_rx_skb(struct bspveth_device *pvethdev, + struct bspveth_rxtx_q *prx_queue) +{ + u32 i = 0; + struct bspveth_bd_info *pbdinfobase_v = NULL; + struct bspveth_dma_bd *pbdbase_v = NULL; + struct sk_buff *skb = NULL; + + if (!pvethdev || !prx_queue) + return; + + pbdinfobase_v = prx_queue->pbdinfobase_v; + pbdbase_v = prx_queue->pbdbase_v; + if (!pbdinfobase_v || !pbdbase_v) + return; + + /* Free all the Rx ring pages */ + for (i = 0; i < prx_queue->count; i++) { + skb = pbdinfobase_v[i].pdma_v; + if (!skb) + continue; + + dma_unmap_single(&g_bspveth_dev.ppcidev->dev, + pbdbase_v[i].dma_p, BSPVETH_SKB_SIZE, + DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + + pbdinfobase_v[i].pdma_v = NULL; + } + + prx_queue->next_to_fill = 0; +} + +s32 bspveth_setup_all_rx_skb(struct bspveth_device *pvethdev) +{ + int qid, i, err = BSP_OK; + + if (!pvethdev) + return BSP_ERR_NULL_POINTER; + + for (qid = 0; qid < MAX_QUEUE_NUM; qid++) { + err = bspveth_setup_rx_skb(pvethdev, pvethdev->prx_queue[qid]); + if (err) { + VETH_LOG(DLOG_ERROR, "queue[%d]setup RX skb failed\n", + qid); + goto failed; + } + + VETH_LOG(DLOG_DEBUG, "queue[%d] bspveth_setup_rx_skb ok\n", + qid); + } + + return 0; + +failed: + for (i = 0; i < MAX_QUEUE_NUM; i++) + bspveth_free_rx_skb(pvethdev, pvethdev->prx_queue[i]); + + return err; +} + +void bspveth_free_all_rx_skb(struct bspveth_device *pvethdev) +{ + int qid; + + if (!pvethdev) + return; + + /* Free all the Rx ring pages */ + for (qid = 0; qid < MAX_QUEUE_NUM; qid++) + bspveth_free_rx_skb(pvethdev, pvethdev->prx_queue[qid]); +} + +s32 bspveth_setup_rx_resources(struct bspveth_device *pvethdev, + struct bspveth_rxtx_q *prx_queue) +{ + int size; + + if (!pvethdev || !prx_queue) + return BSP_ERR_NULL_POINTER; + + prx_queue->count = MAX_QUEUE_BDNUM; + size = sizeof(*prx_queue->pbdinfobase_v) * prx_queue->count; + prx_queue->pbdinfobase_v = vmalloc(size); + if (!prx_queue->pbdinfobase_v) { + VETH_LOG(DLOG_ERROR, + "Unable to vmalloc for the receive descriptor ring\n"); + + goto alloc_failed; + } + + memset(prx_queue->pbdinfobase_v, 0, size); + + /* Round up to nearest 4K */ + prx_queue->size = prx_queue->count * sizeof(*prx_queue->pbdbase_v); + prx_queue->size = ALIGN(prx_queue->size, 4096); + prx_queue->pbdbase_v = kmalloc(prx_queue->size, GFP_ATOMIC); + if (!prx_queue->pbdbase_v) { + VETH_LOG(DLOG_ERROR, + "Unable to kmalloc for the receive descriptor ring\n"); + + vfree(prx_queue->pbdinfobase_v); + prx_queue->pbdinfobase_v = NULL; + + goto alloc_failed; + } + + prx_queue->pbdbase_p = (u8 *)__pa((BSP_VETH_T) (prx_queue->pbdbase_v)); + + prx_queue->next_to_fill = 0; + prx_queue->next_to_free = 0; + prx_queue->head = 0; + prx_queue->tail = 0; + + prx_queue->work_limit = BSPVETH_WORK_LIMIT; + + memset(&prx_queue->s, 0, sizeof(struct bspveth_rxtx_statis)); + + return 0; + +alloc_failed: + return -ENOMEM; +} + +void bspveth_free_rx_resources(struct bspveth_device *pvethdev, + struct bspveth_rxtx_q *prx_queue) +{ + unsigned long size; + struct bspveth_bd_info *pbdinfobase_v = NULL; + + if (!pvethdev || !prx_queue) + return; + + pbdinfobase_v = prx_queue->pbdinfobase_v; + if (!pbdinfobase_v) + return; + + if (!prx_queue->pbdbase_v) + return; + + size = sizeof(struct bspveth_bd_info) * prx_queue->count; + memset(prx_queue->pbdinfobase_v, 0, size); + + /* Zero out the descriptor ring */ + memset(prx_queue->pbdbase_v, 0, prx_queue->size); + + vfree(prx_queue->pbdinfobase_v); + prx_queue->pbdinfobase_v = NULL; + + kfree(prx_queue->pbdbase_v); + prx_queue->pbdbase_v = NULL; + + VETH_LOG(DLOG_DEBUG, "bspveth free rx resources ok!!count=%d\n", + prx_queue->count); +} + +s32 bspveth_setup_all_rx_resources(struct bspveth_device *pvethdev) +{ + int qid, i, err = 0; + struct bspveth_shmq_hd *shmq_head = NULL; + u8 *shmq_head_p = NULL; + + if (!pvethdev) + return BSP_ERR_NULL_POINTER; + + for (qid = 0; qid < MAX_QUEUE_NUM; qid++) { + pvethdev->prx_queue[qid] = + kmalloc(sizeof(*pvethdev->prx_queue[qid]), GFP_KERNEL); + if (!pvethdev->prx_queue[qid]) { + VETH_LOG(DLOG_ERROR, + "kmalloc failed for prx_queue[%d]\n", qid); + + goto failed; + } + + memset(pvethdev->prx_queue[qid], 0, + sizeof(struct bspveth_rxtx_q)); + + shmq_head = (struct bspveth_shmq_hd *)(pvethdev->pshmpool_v + + MAX_SHAREQUEUE_SIZE * (qid + 1)); + + pvethdev->prx_queue[qid]->pshmqhd_v = shmq_head; + shmq_head_p = + pvethdev->pshmpool_p + MAX_SHAREQUEUE_SIZE * (qid + 1); + pvethdev->prx_queue[qid]->pshmqhd_p = shmq_head_p; + pvethdev->prx_queue[qid]->pshmbdbase_v = + (struct bspveth_dma_shmbd *)((BSP_VETH_T)(shmq_head) + + BSPVETH_SHMBDBASE_OFFSET); + pvethdev->prx_queue[qid]->pshmbdbase_p = + (u8 *)((BSP_VETH_T)(shmq_head_p) + + BSPVETH_SHMBDBASE_OFFSET); + pvethdev->prx_queue[qid]->pdmalbase_v = + (struct bspveth_dmal *)((BSP_VETH_T)(shmq_head) + + SHMDMAL_OFFSET); + pvethdev->prx_queue[qid]->pdmalbase_p = + (u8 *)(u64)(VETH_SHAREPOOL_BASE_INBMC + + MAX_SHAREQUEUE_SIZE * (qid + 1) + + SHMDMAL_OFFSET); + memset(pvethdev->prx_queue[qid]->pdmalbase_v, 0, + MAX_SHMDMAL_SIZE); + + err = bspveth_setup_rx_resources(pvethdev, + pvethdev->prx_queue[qid]); + if (err) { + VETH_LOG(DLOG_ERROR, + "Allocation for Rx Queue %u failed\n", qid); + + goto failed; + } + } + + return 0; +failed: + for (i = 0; i < MAX_QUEUE_NUM; i++) { + bspveth_free_rx_resources(pvethdev, pvethdev->prx_queue[i]); + kfree(pvethdev->prx_queue[i]); + pvethdev->prx_queue[i] = NULL; + } + return err; +} + +void bspveth_free_all_rx_resources(struct bspveth_device *pvethdev) +{ + int i; + + if (!pvethdev) + return; + + for (i = 0; i < MAX_QUEUE_NUM; i++) { + if (pvethdev->prx_queue[i]) { + bspveth_free_rx_resources(pvethdev, + pvethdev->prx_queue[i]); + } + + kfree(pvethdev->prx_queue[i]); + pvethdev->prx_queue[i] = NULL; + } +} + +s32 bspveth_dev_install(void) +{ + int err; + + err = bspveth_setup_all_rx_resources(&g_bspveth_dev); + if (err != BSP_OK) { + err = -1; + goto err_setup_rx; + } + + err = bspveth_setup_all_tx_resources(&g_bspveth_dev); + if (err != BSP_OK) { + err = -1; + goto err_setup_tx; + } + + err = bspveth_setup_all_rx_skb(&g_bspveth_dev); + if (err != BSP_OK) { + err = -1; + goto err_setup_rx_skb; + } + + return BSP_OK; + +err_setup_rx_skb: + bspveth_free_all_tx_resources(&g_bspveth_dev); + +err_setup_tx: + bspveth_free_all_rx_resources(&g_bspveth_dev); + +err_setup_rx: + + return err; +} + +s32 bspveth_dev_uninstall(void) +{ + int err = BSP_OK; + + /* Free all the Rx ring pages */ + bspveth_free_all_rx_skb(&g_bspveth_dev); + + bspveth_free_all_tx_resources(&g_bspveth_dev); + + VETH_LOG(DLOG_DEBUG, "bspveth_free_all_tx_resources ok\n"); + + bspveth_free_all_rx_resources(&g_bspveth_dev); + + VETH_LOG(DLOG_DEBUG, "bspveth_free_all_rx_resources ok\n"); + + return err; +} + +s32 veth_open(struct net_device *pstr_dev) +{ + s32 ret = BSP_OK; + + if (!pstr_dev) + return -1; + + if (!g_bspveth_dev.pnetdev) + g_bspveth_dev.pnetdev = pstr_dev; + + ret = bspveth_dev_install(); + if (ret != BSP_OK) { + ret = -1; + goto failed1; + } + + veth_skbtimer_init(); + + veth_dmatimer_init_H(); + + ret = bma_intf_register_int_notifier(&g_veth_int_nb); + if (ret != BSP_OK) { + ret = -1; + goto failed2; + } + + bma_intf_set_open_status(g_bspveth_dev.bma_priv, DEV_OPEN); + + g_bspveth_dev.prx_queue[0]->pshmqhd_v->tail = + g_bspveth_dev.prx_queue[0]->pshmqhd_v->head; + + bma_intf_int_to_bmc(g_bspveth_dev.bma_priv); + + netif_start_queue(g_bspveth_dev.pnetdev); + netif_carrier_on(pstr_dev); + + return BSP_OK; + +failed2: + veth_dmatimer_close_H(); + + veth_skbtimer_close(); + + (void)bspveth_dev_uninstall(); + +failed1: + return ret; +} + +s32 veth_close(struct net_device *pstr_dev) +{ + (void)bma_intf_unregister_int_notifier(&g_veth_int_nb); + + netif_carrier_off(pstr_dev); + + bma_intf_set_open_status(g_bspveth_dev.bma_priv, DEV_CLOSE); + + netif_stop_queue(g_bspveth_dev.pnetdev); + + (void)veth_dmatimer_close_H(); + (void)veth_skbtimer_close(); + + (void)bspveth_dev_uninstall(); + + return BSP_OK; +} + +s32 veth_config(struct net_device *pstr_dev, struct ifmap *pstr_map) +{ + if (!pstr_dev || !pstr_map) + return BSP_ERR_NULL_POINTER; + + /* can't act on a running interface */ + if (pstr_dev->flags & IFF_UP) + return -EBUSY; + + /* Don't allow changing the I/O address */ + if (pstr_map->base_addr != pstr_dev->base_addr) + return -EOPNOTSUPP; + + /* ignore other fields */ + return BSP_OK; +} + +void bspveth_initstatis(void) +{ + int i; + struct bspveth_rxtx_q *prx_queue = NULL; + struct bspveth_rxtx_q *ptx_queue = NULL; + + for (i = 0; i < MAX_QUEUE_NUM; i++) { + prx_queue = g_bspveth_dev.prx_queue[i]; + ptx_queue = g_bspveth_dev.ptx_queue[i]; + + if (prx_queue && ptx_queue) { + memset(&prx_queue->s, + 0, sizeof(struct bspveth_rxtx_statis)); + + memset(&ptx_queue->s, + 0, sizeof(struct bspveth_rxtx_statis)); + } else { + VETH_LOG(DLOG_ERROR, + "prx_queue OR ptx_queue is NULL\n"); + } + } + + VETH_LOG(DLOG_DEBUG, "bspveth initstatis ok\n"); +} + +s32 veth_ioctl(struct net_device *pstr_dev, struct ifreq *pifr, s32 l_cmd) +{ + return -EFAULT; +} + +struct net_device_stats *veth_stats(struct net_device *pstr_dev) +{ + return &g_bspveth_dev.stats; +} + +s32 veth_mac_set(struct net_device *pstr_dev, void *p_mac) +{ + struct sockaddr *str_addr = NULL; + u8 *puc_mac = NULL; + + if (!pstr_dev || !p_mac) + return BSP_ERR_NULL_POINTER; + + str_addr = (struct sockaddr *)p_mac; + puc_mac = (u8 *)str_addr->sa_data; + + pstr_dev->dev_addr[0] = puc_mac[0]; + pstr_dev->dev_addr[1] = puc_mac[1]; + pstr_dev->dev_addr[2] = puc_mac[2]; + pstr_dev->dev_addr[3] = puc_mac[3]; + pstr_dev->dev_addr[4] = puc_mac[4]; + pstr_dev->dev_addr[5] = puc_mac[5]; + + return BSP_OK; +} + +static u32 veth_ethtool_get_link(struct net_device *dev) +{ + if (!bma_intf_is_link_ok() || !netif_running(g_bspveth_dev.pnetdev)) + return 0; + + if (g_bspveth_dev.ptx_queue[0] && + g_bspveth_dev.ptx_queue[0]->pshmqhd_v) + return (u32)((BSPVETH_SHMQUEUE_INITOK == + g_bspveth_dev.ptx_queue[0]->pshmqhd_v->init) && + netif_carrier_ok(dev)); + + return 0; +} + +static void veth_ethtool_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, VETH_VERSION, sizeof(info->version)); + + info->n_stats = VETH_GLOBAL_STATS_LEN; +} + +static void veth_ethtool_get_stats(struct net_device *netdev, + struct ethtool_stats *tool_stats, u64 *data) +{ + unsigned int i = 0; + char *p = NULL; + const struct veth_stats *p_stat = veth_gstrings_stats; + struct bspveth_rxtx_q *ptx_node = g_bspveth_dev.ptx_queue[0]; + struct bspveth_rxtx_q *prx_node = g_bspveth_dev.prx_queue[0]; + char * const pstat_map[] = { + /* QUEUE TX STATS*/ + GET_QUEUE_STAT(ptx_node, p_stat), + /* QUEUE RX STATS*/ + GET_QUEUE_STAT(prx_node, p_stat), + /* VETH STATS */ + (char *)&g_bspveth_dev + p_stat->stat_offset, + /* SHMQ TX STATS */ + GET_SHM_QUEUE_STAT(ptx_node, p_stat), + /* SHMQ RX STATS */ + GET_SHM_QUEUE_STAT(prx_node, p_stat), + /* NET STATS */ + (char *)&g_bspveth_dev + p_stat->stat_offset + }; + + if (!data || !netdev || !tool_stats) + return; + + for (i = 0; i < VETH_GLOBAL_STATS_LEN; i++) { + p = NULL; + + if (p_stat->type > NET_STATS) + break; + + p = pstat_map[p_stat->type]; + + data[i] = GET_STATS_VALUE(p, p_stat); + + p_stat++; + } +} + +static void veth_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + u8 *p = data; + unsigned int i; + + if (!p) + return; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < VETH_GLOBAL_STATS_LEN; i++) { + memcpy(p, veth_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + + p += ETH_GSTRING_LEN; + } + + break; + } +} + +static int veth_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return VETH_GLOBAL_STATS_LEN; + + default: + return -EOPNOTSUPP; + } +} + +const struct ethtool_ops veth_ethtool_ops = { + .get_drvinfo = veth_ethtool_get_drvinfo, + .get_link = veth_ethtool_get_link, + + .get_ethtool_stats = veth_ethtool_get_stats, + .get_strings = veth_get_strings, + .get_sset_count = veth_get_sset_count, + +}; + +static const struct net_device_ops veth_ops = { + .ndo_open = veth_open, + .ndo_stop = veth_close, + .ndo_set_config = veth_config, + .ndo_start_xmit = veth_tx, + .ndo_do_ioctl = veth_ioctl, + .ndo_get_stats = veth_stats, + .ndo_set_mac_address = veth_mac_set, +}; + +void veth_netdev_func_init(struct net_device *dev) +{ + struct tag_pcie_comm_priv *priv = + (struct tag_pcie_comm_priv *)netdev_priv(dev); + + VETH_LOG(DLOG_DEBUG, "eth init start\n"); + + ether_setup(dev); + + dev->netdev_ops = &veth_ops; + + dev->watchdog_timeo = BSPVETH_NET_TIMEOUT; + dev->mtu = BSPVETH_MTU_MAX; + dev->flags = IFF_BROADCAST; + dev->tx_queue_len = BSPVETH_MAX_QUE_DEEP; + dev->ethtool_ops = &veth_ethtool_ops; + + /* Then, initialize the priv field. This encloses the statistics + * and a few private fields. + */ + memset(priv, 0, sizeof(struct tag_pcie_comm_priv)); + strncpy(priv->net_type, MODULE_NAME, NET_TYPE_LEN); + + /*9C:7D:A3:28:6F:F9*/ + dev->dev_addr[0] = 0x9c; + dev->dev_addr[1] = 0x7d; + dev->dev_addr[2] = 0xa3; + dev->dev_addr[3] = 0x28; + dev->dev_addr[4] = 0x6f; + dev->dev_addr[5] = 0xf9; + + VETH_LOG(DLOG_DEBUG, "set veth MAC addr OK\n"); +} + +s32 veth_send_one_pkt(struct sk_buff *skb, int queue) +{ + u32 head, next_to_free; + dma_addr_t dma = 0; + u32 off = 0; + int ret = 0; + int type = BSPVETH_TX; + struct bspveth_bd_info *pbdinfo_v = NULL; + struct bspveth_dma_bd *pbd_v = NULL; + struct bspveth_rxtx_q *ptx_queue = g_bspveth_dev.ptx_queue[queue]; + + if (!skb || !ptx_queue || !ptx_queue->pbdinfobase_v || + !ptx_queue->pbdbase_v) { + INC_STATIS_RXTX(queue, null_point, 1, type); + return BSP_ERR_NULL_POINTER; + } + + if (!bma_intf_is_link_ok() || + ptx_queue->pshmqhd_v->init != BSPVETH_SHMQUEUE_INITOK) + return -1; + + head = ptx_queue->head; + next_to_free = ptx_queue->next_to_free; + + /* stop to send pkt when queue is going to full */ + if (!JUDGE_TX_QUEUE_SPACE(head, next_to_free, 3)) { + netif_stop_subqueue(g_bspveth_dev.pnetdev, queue); + VETH_LOG(DLOG_DEBUG, + "going to full, head: %d, nex to free: %d\n", + head, next_to_free); + } + + if (!JUDGE_TX_QUEUE_SPACE(head, next_to_free, 1)) + return BSP_NETDEV_TX_BUSY; + + if (skb_shinfo(skb)->nr_frags) { + /* We don't support frags */ + ret = skb_linearize(skb); + if (ret) + return -ENOMEM; + } + + dma = dma_map_single(&g_bspveth_dev.ppcidev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + + ret = dma_mapping_error(&g_bspveth_dev.ppcidev->dev, dma); + if (ret != BSP_OK) { + ret = BSP_ERR_DMA_ERR; + g_bspveth_dev.ptx_queue[queue]->s.dma_mapping_err++; + goto failed; + } + + off = dma & 0x3; + if (off) + g_bspveth_dev.ptx_queue[queue]->s.dma_need_offset++; + + pbdinfo_v = &ptx_queue->pbdinfobase_v[head]; + pbdinfo_v->pdma_v = skb; + pbd_v = &ptx_queue->pbdbase_v[head]; + pbd_v->dma_p = dma & (~((u64)0x3)); + pbd_v->off = off; + pbd_v->len = skb->len; + + mb();/* memory barriers. */ + head = (head + 1) & BSPVETH_POINT_MASK; + ptx_queue->head = head; + + VETH_LOG(DLOG_DEBUG, + "[send]:oridma=0x%llx,skb=%p,skb->data=%p,skb->len=%d,", + (u64)dma, skb, skb->data, skb->len); + VETH_LOG(DLOG_DEBUG, "head=%d,off=%d, alidma0x%llx\n", head, off, + (u64)(dma & (~((u64)0x3)))); + + return BSP_OK; + +failed: + return ret; +} + +int veth_tx(struct sk_buff *skb, struct net_device *pstr_dev) +{ + u32 ul_ret = 0; + int queue = 0; + + VETH_LOG(DLOG_DEBUG, "===============enter==================\n"); + + if (!skb || !pstr_dev) { + g_bspveth_dev.ptx_queue[queue]->s.null_point++; + return NETDEV_TX_OK; + } + + VETH_LOG(DLOG_DEBUG, "skb->data=%p\n", skb->data); + VETH_LOG(DLOG_DEBUG, "skb->len=%d\n", skb->len); + + ul_ret = veth_send_one_pkt(skb, queue); + + if (ul_ret == BSP_OK) { + g_bspveth_dev.ptx_queue[queue]->s.pkt++; + g_bspveth_dev.stats.tx_packets++; + g_bspveth_dev.ptx_queue[queue]->s.pktbyte += skb->len; + g_bspveth_dev.stats.tx_bytes += skb->len; + +#ifndef USE_TASKLET + (void)mod_timer(&g_bspveth_dev.dmatimer, jiffies_64); +#else + tasklet_hi_schedule(&g_bspveth_dev.dma_task); +#endif + + } else { + VETH_LOG(DLOG_DEBUG, "=======exit ret = %d=======\n", ul_ret); + g_bspveth_dev.ptx_queue[queue]->s.dropped_pkt++; + g_bspveth_dev.stats.tx_dropped++; + dev_kfree_skb_any(skb); + } + + return NETDEV_TX_OK; +} + +s32 veth_free_txskb(struct bspveth_rxtx_q *ptx_queue, int queue) +{ + int i, work_limit; + unsigned int tail, next_to_free; + struct bspveth_bd_info *ptx_bdinfo_v = NULL; + struct sk_buff *skb = NULL; + struct bspveth_dma_bd *pbd_v = NULL; + + if (!ptx_queue) + return BSP_ERR_AGAIN; + + work_limit = ptx_queue->work_limit; + tail = ptx_queue->tail; + next_to_free = ptx_queue->next_to_free; + + for (i = 0; i < work_limit; i++) { + if (next_to_free == tail) + break; + + ptx_bdinfo_v = &ptx_queue->pbdinfobase_v[next_to_free]; + + pbd_v = &ptx_queue->pbdbase_v[next_to_free]; + + skb = ptx_bdinfo_v->pdma_v; + + dma_unmap_single(&g_bspveth_dev.ppcidev->dev, + pbd_v->dma_p | pbd_v->off, + pbd_v->len, DMA_TO_DEVICE); + + if (skb) + dev_kfree_skb_any(skb); + else + VETH_LOG(DLOG_ERROR, + "skb is NULL,tail=%d next_to_free=%d\n", + tail, next_to_free); + + ptx_bdinfo_v->pdma_v = NULL; + g_bspveth_dev.ptx_queue[queue]->s.freetx++; + + next_to_free = (next_to_free + 1) & BSPVETH_POINT_MASK; + } + + mb(); /* memory barriers. */ + ptx_queue->next_to_free = next_to_free; + tail = ptx_queue->tail; + + if (next_to_free != tail) { + VETH_LOG(DLOG_DEBUG, "next_to_free(%d) != tail(%d)\n", + next_to_free, tail); + + return BSP_ERR_AGAIN; + } + + return BSP_OK; +} + +s32 veth_recv_pkt(struct bspveth_rxtx_q *prx_queue, int queue) +{ + int ret = BSP_OK, i, work_limit; + u32 tail, head; + struct bspveth_bd_info *prx_bdinfo_v = NULL; + struct bspveth_dma_bd *pbd_v = NULL; + struct sk_buff *skb = NULL; + dma_addr_t dma_map = 0; + u32 off = 0; + + if (!prx_queue) + return BSP_ERR_AGAIN; + + work_limit = prx_queue->work_limit; + tail = prx_queue->tail; + + for (i = 0; i < work_limit; i++) { + head = prx_queue->head; + if (tail == head) + break; + + prx_bdinfo_v = &prx_queue->pbdinfobase_v[tail]; + + skb = prx_bdinfo_v->pdma_v; + if (!skb) { + tail = (tail + 1) & BSPVETH_POINT_MASK; + continue; + } + + prx_bdinfo_v->pdma_v = NULL; + pbd_v = &prx_queue->pbdbase_v[tail]; + + off = pbd_v->off; + if (off) + skb_reserve(skb, off); + + dma_unmap_single(&g_bspveth_dev.ppcidev->dev, pbd_v->dma_p, + BSPVETH_SKB_SIZE, DMA_FROM_DEVICE); + + tail = (tail + 1) & BSPVETH_POINT_MASK; + + skb_put(skb, pbd_v->len); + + skb->protocol = eth_type_trans(skb, g_bspveth_dev.pnetdev); + skb->ip_summed = CHECKSUM_NONE; + + VETH_LOG(DLOG_DEBUG, + "skb->len=%d,skb->protocol=%d\n", + skb->len, skb->protocol); + + VETH_LOG(DLOG_DEBUG, + "dma_p=0x%llx,dma_map=0x%llx,", + pbd_v->dma_p, dma_map); + + VETH_LOG(DLOG_DEBUG, + "skb=%p,skb->data=%p,skb->len=%d,tail=%d,shm_off=%d\n", + skb, skb->data, skb->len, tail, off); + + VETH_LOG(DLOG_DEBUG, + "skb_transport_header=%p skb_mac_header=%p ", + skb_transport_header(skb), skb_mac_header(skb)); + + VETH_LOG(DLOG_DEBUG, + "skb_network_header=%p\n", skb_network_header(skb)); + + VETH_LOG(DLOG_DEBUG, + "skb->data=0x%p skb->tail=%08x skb->len=%08x\n", + skb->data, + (unsigned int)skb->tail, + (unsigned int)skb->len); + + g_bspveth_dev.prx_queue[queue]->s.pkt++; + g_bspveth_dev.stats.rx_packets++; + g_bspveth_dev.prx_queue[queue]->s.pktbyte += skb->len; + g_bspveth_dev.stats.rx_bytes += skb->len; + + ret = netif_rx(skb); + if (ret == NET_RX_DROP) { + g_bspveth_dev.prx_queue[queue]->s.netifrx_err++; + g_bspveth_dev.stats.rx_errors++; + + VETH_LOG(DLOG_DEBUG, "netif_rx failed\n"); + } + } + + mb();/* memory barriers. */ + prx_queue->tail = tail; + head = prx_queue->head; + + ret = veth_refill_rxskb(prx_queue, queue); + if (ret != BSP_OK) + VETH_LOG(DLOG_DEBUG, "veth_refill_rxskb failed\n"); + + if (tail != head) { + VETH_LOG(DLOG_DEBUG, "tail(%d) != head(%d)\n", tail, head); + + return BSP_ERR_AGAIN; + } + + return BSP_OK; +} + +#if !defined(USE_TASKLET) && defined(HAVE_TIMER_SETUP) +void veth_skbtrtimer_do(struct timer_list *t) +#else +void veth_skbtrtimer_do(unsigned long data) +#endif +{ + int ret = 0; + + ret = veth_skb_tr_task(); + if (ret == BSP_ERR_AGAIN) { +#ifndef USE_TASKLET + (void)mod_timer(&g_bspveth_dev.skbtrtimer, jiffies_64); +#else + tasklet_hi_schedule(&g_bspveth_dev.skb_task); +#endif + } +} + +s32 veth_skbtimer_close(void) +{ +#ifndef USE_TASKLET + (void)del_timer_sync(&g_bspveth_dev.skbtrtimer); +#else + tasklet_kill(&g_bspveth_dev.skb_task); +#endif + + VETH_LOG(DLOG_DEBUG, "veth skbtimer close ok\n"); + + return 0; +} + +void veth_skbtimer_init(void) +{ +#ifndef USE_TASKLET +#ifdef HAVE_TIMER_SETUP + timer_setup(&g_bspveth_dev.skbtrtimer, veth_skbtrtimer_do, 0); +#else + setup_timer(&g_bspveth_dev.skbtrtimer, veth_skbtrtimer_do, + (unsigned long)&g_bspveth_dev); +#endif + (void)mod_timer(&g_bspveth_dev.skbtrtimer, + jiffies_64 + BSPVETH_SKBTIMER_INTERVAL); +#else + tasklet_init(&g_bspveth_dev.skb_task, veth_skbtrtimer_do, + (unsigned long)&g_bspveth_dev); +#endif + + VETH_LOG(DLOG_DEBUG, "veth skbtimer init OK\n"); +} + +void veth_netdev_exit(void) +{ + if (g_bspveth_dev.pnetdev) { + netif_stop_queue(g_bspveth_dev.pnetdev); + unregister_netdev(g_bspveth_dev.pnetdev); + free_netdev(g_bspveth_dev.pnetdev); + + VETH_LOG(DLOG_DEBUG, "veth netdev exit OK.\n"); + } else { + VETH_LOG(DLOG_DEBUG, "veth_dev.pnetdev NULL.\n"); + } +} + +static void veth_shutdown_task(struct work_struct *work) +{ + struct net_device *netdev = g_bspveth_dev.pnetdev; + g_shutdown_flag = 1; + + VETH_LOG(DLOG_ERROR, "veth is going down, please restart it manual\n"); + + g_bspveth_dev.shutdown_cnt++; + + if (netif_carrier_ok(netdev)) { + (void)bma_intf_unregister_int_notifier(&g_veth_int_nb); + + netif_carrier_off(netdev); + + bma_intf_set_open_status(g_bspveth_dev.bma_priv, DEV_CLOSE); + + /* can't transmit any more */ + netif_stop_queue(g_bspveth_dev.pnetdev); + + (void)veth_skbtimer_close(); + + (void)veth_dmatimer_close_H(); + } + g_shutdown_flag = 0; +} + +s32 veth_netdev_init(void) +{ + s32 l_ret = 0; + struct net_device *netdev = NULL; + + netdev = alloc_netdev_mq(sizeof(struct tag_pcie_comm_priv), + BSPVETH_DEV_NAME, NET_NAME_UNKNOWN, + veth_netdev_func_init, 1); + + /* register netdev */ + l_ret = register_netdev(netdev); + if (l_ret < 0) { + VETH_LOG(DLOG_ERROR, "register_netdev failed!ret=%d\n", l_ret); + + return -ENODEV; + } + + g_bspveth_dev.pnetdev = netdev; + + VETH_LOG(DLOG_DEBUG, "veth netdev init OK\n"); + + INIT_WORK(&g_bspveth_dev.shutdown_task, veth_shutdown_task); + + netif_carrier_off(netdev); + + return BSP_OK; +} + +int veth_skb_tr_task(void) +{ + int rett = BSP_OK; + int retr = BSP_OK; + int i = 0; + int task_state = BSP_OK; + struct bspveth_rxtx_q *ptx_queue = NULL; + struct bspveth_rxtx_q *prx_queue = NULL; + + for (i = 0; i < MAX_QUEUE_NUM; i++) { + prx_queue = g_bspveth_dev.prx_queue[i]; + if (prx_queue) { + g_bspveth_dev.run_skb_rx_task++; + retr = veth_recv_pkt(prx_queue, i); + } + + ptx_queue = g_bspveth_dev.ptx_queue[i]; + if (ptx_queue) { + g_bspveth_dev.run_skb_fr_task++; + rett = veth_free_txskb(ptx_queue, i); + if (__netif_subqueue_stopped + (g_bspveth_dev.pnetdev, i) && + JUDGE_TX_QUEUE_SPACE + (ptx_queue->head, + ptx_queue->next_to_free, 5)) { + netif_wake_subqueue(g_bspveth_dev.pnetdev, i); + VETH_LOG(DLOG_DEBUG, "queue is free, "); + VETH_LOG(DLOG_DEBUG, + "head: %d, next to free: %d\n", + ptx_queue->head, + ptx_queue->next_to_free); + } + } + + if (rett == BSP_ERR_AGAIN || retr == BSP_ERR_AGAIN) + task_state = BSP_ERR_AGAIN; + } + + return task_state; +} + +static int veth_int_handler(struct notifier_block *pthis, unsigned long ev, + void *unuse) +{ + g_bspveth_dev.recv_int++; + + if (netif_running(g_bspveth_dev.pnetdev)) { +#ifndef USE_TASKLET + (void)mod_timer(&g_bspveth_dev.dmatimer, jiffies_64); +#else + tasklet_schedule(&g_bspveth_dev.dma_task); + +#endif + } else { + VETH_LOG(DLOG_DEBUG, "netif is not running\n"); + } + + return IRQ_HANDLED; +} + +#if !defined(USE_TASKLET) && defined(HAVE_TIMER_SETUP) +void veth_dma_tx_timer_do_H(struct timer_list *t) +#else +void veth_dma_tx_timer_do_H(unsigned long data) +#endif +{ + int txret, rxret; + + txret = veth_dma_task_H(BSPVETH_TX); + + rxret = veth_dma_task_H(BSPVETH_RX); + + if ((txret == BSP_ERR_AGAIN || rxret == BSP_ERR_AGAIN) && + g_shutdown_flag == 0) { +#ifndef USE_TASKLET + (void)mod_timer(&g_bspveth_dev.dmatimer, jiffies_64); +#else + tasklet_hi_schedule(&g_bspveth_dev.dma_task); +#endif + } +} + +s32 veth_dmatimer_close_H(void) +{ +#ifndef USE_TASKLET + (void)del_timer_sync(&g_bspveth_dev.dmatimer); +#else + tasklet_kill(&g_bspveth_dev.dma_task); +#endif + + VETH_LOG(DLOG_DEBUG, "bspveth_dmatimer_close RXTX TIMER ok\n"); + + return 0; +} + +void veth_dmatimer_init_H(void) +{ +#ifndef USE_TASKLET +#ifdef HAVE_TIMER_SETUP + timer_setup(&g_bspveth_dev.dmatimer, veth_dma_tx_timer_do_H, 0); +#else + setup_timer(&g_bspveth_dev.dmatimer, veth_dma_tx_timer_do_H, + (unsigned long)&g_bspveth_dev); +#endif + (void)mod_timer(&g_bspveth_dev.dmatimer, + jiffies_64 + BSPVETH_DMATIMER_INTERVAL); +#else + tasklet_init(&g_bspveth_dev.dma_task, veth_dma_tx_timer_do_H, + (unsigned long)&g_bspveth_dev); +#endif + + VETH_LOG(DLOG_DEBUG, "bspveth_dmatimer_init RXTX TIMER OK\n"); +} + +s32 dmacmp_err_deal(struct bspveth_rxtx_q *prxtx_queue, u32 queue, + u32 type) +{ + prxtx_queue->dmacmperr = 0; + prxtx_queue->start_dma = 0; + + (void)veth_reset_dma(type); + + if (type == BSPVETH_RX) { + VETH_LOG(DLOG_DEBUG, + "bmc->host dma time out,dma count:%d,work_limit:%d\n", + prxtx_queue->dmal_cnt, + prxtx_queue->work_limit); + + g_bspveth_dev.prx_queue[queue]->s.dma_failed++; + } else { + VETH_LOG(DLOG_DEBUG, + "host->bmc dma time out,dma count:%d,work_limit:%d\n", + prxtx_queue->dmal_cnt, + prxtx_queue->work_limit); + + g_bspveth_dev.ptx_queue[queue]->s.dma_failed++; + } + + if (prxtx_queue->dmal_cnt > 1) + prxtx_queue->work_limit = (prxtx_queue->dmal_cnt >> 1); + + prxtx_queue->dma_overtime++; + if (prxtx_queue->dma_overtime > BSPVETH_MAX_QUE_DEEP) { + schedule_work(&g_bspveth_dev.shutdown_task); + + return -EFAULT; + } + + return BSP_OK; +} + +s32 veth_check_dma_status(struct bspveth_rxtx_q *prxtx_queue, + u32 queue, u32 type) +{ + int i = 0; + enum dma_direction_e dir; + + dir = GET_DMA_DIRECTION(type); + + for (i = 0; i < BSPVETH_CHECK_DMA_STATUS_TIMES; i++) { + if (bma_intf_check_dma_status(dir) == BSPVETH_DMA_OK) + break; + + cpu_relax(); + + if (i > 20) + udelay(5); + } + + if (i >= BSPVETH_CHECK_DMA_STATUS_TIMES) { + INC_STATIS_RXTX(queue, dma_busy, 1, type); + prxtx_queue->dmacmperr++; + + return -EFAULT; + } + + return BSP_OK; +} + +s32 __check_dmacmp_H(struct bspveth_rxtx_q *prxtx_queue, u32 queue, + u32 type) +{ + u16 start_dma = 0; + u16 dmacmperr = 0; + u32 cnt = 0; + u32 len = 0; + u32 host_head = 0; + u32 host_tail = 0; + u32 shm_head = 0; + u32 shm_tail = 0; + s32 ret = 0; + struct bspveth_shmq_hd *pshmq_head = NULL; + + if (!prxtx_queue || !prxtx_queue->pshmqhd_v) + return BSP_ERR_NULL_POINTER; + + pshmq_head = prxtx_queue->pshmqhd_v; + dmacmperr = prxtx_queue->dmacmperr; + start_dma = prxtx_queue->start_dma; + if (!start_dma) + return BSP_OK; + + if (dmacmperr > BSPVETH_WORK_LIMIT / 4) + return dmacmp_err_deal(prxtx_queue, queue, type); + + ret = veth_check_dma_status(prxtx_queue, queue, type); + if (ret != BSP_OK) + return ret; + + prxtx_queue->start_dma = 0; + prxtx_queue->dma_overtime = 0; + + if (type == BSPVETH_RX) { + cnt = prxtx_queue->dmal_cnt; + len = prxtx_queue->dmal_byte; + + host_head = prxtx_queue->head; + shm_tail = pshmq_head->tail; + + pshmq_head->tail = (shm_tail + cnt) & BSPVETH_POINT_MASK; + prxtx_queue->head = (host_head + cnt) & BSPVETH_POINT_MASK; + + g_bspveth_dev.prx_queue[queue]->s.dmapkt += cnt; + g_bspveth_dev.prx_queue[queue]->s.dmapktbyte += len; + } else { + cnt = prxtx_queue->dmal_cnt; + len = prxtx_queue->dmal_byte; + + host_tail = prxtx_queue->tail; + shm_head = pshmq_head->head; + + prxtx_queue->tail = (host_tail + cnt) & BSPVETH_POINT_MASK; + pshmq_head->head = (shm_head + cnt) & BSPVETH_POINT_MASK; + + g_bspveth_dev.ptx_queue[queue]->s.dmapkt += cnt; + g_bspveth_dev.ptx_queue[queue]->s.dmapktbyte += len; + } + +#ifndef USE_TASKLET + (void)mod_timer(&g_bspveth_dev.skbtrtimer, jiffies_64); +#else + tasklet_hi_schedule(&g_bspveth_dev.skb_task); +#endif + + (void)bma_intf_int_to_bmc(g_bspveth_dev.bma_priv); + + g_bspveth_dev.tobmc_int++; + + return BSP_OK; +} + +s32 __checkspace_H(struct bspveth_rxtx_q *prxtx_queue, u32 queue, + u32 type, u32 *pcnt) +{ + int ret = BSP_OK; + u32 host_head, host_tail, host_nextfill; + u32 shm_head, shm_tail, shm_nextfill; + u32 shm_cnt, host_cnt, cnt_tmp, cnt; + struct bspveth_shmq_hd *pshmq_head = NULL; + + if (!prxtx_queue || !prxtx_queue->pshmqhd_v) + return BSP_ERR_NULL_POINTER; + + pshmq_head = prxtx_queue->pshmqhd_v; + host_head = prxtx_queue->head; + host_tail = prxtx_queue->tail; + host_nextfill = prxtx_queue->next_to_fill; + shm_head = pshmq_head->head; + shm_tail = pshmq_head->tail; + shm_nextfill = pshmq_head->next_to_fill; + + switch (type) { + case BSPVETH_RX: + if (shm_tail == shm_head) { + INC_STATIS_RXTX(queue, shm_emp, 1, type); + ret = BSP_ERR_NOT_TO_HANDLE; + goto failed; + } + + if (!JUDGE_RX_QUEUE_SPACE(host_head, host_nextfill, 1)) + return -EFAULT; + + shm_cnt = (shm_head - shm_tail) & BSPVETH_POINT_MASK; + cnt_tmp = min(shm_cnt, prxtx_queue->work_limit); + + host_cnt = (host_nextfill - host_head) & BSPVETH_POINT_MASK; + cnt = min(cnt_tmp, host_cnt); + + break; + + case BSPVETH_TX: + if (host_tail == host_head) { + INC_STATIS_RXTX(queue, q_emp, 1, type); + ret = BSP_ERR_NOT_TO_HANDLE; + goto failed; + } + + if (!JUDGE_TX_QUEUE_SPACE(shm_head, shm_nextfill, 1)) + return -EFAULT; + + host_cnt = (host_head - host_tail) & BSPVETH_POINT_MASK; + cnt_tmp = min(host_cnt, prxtx_queue->work_limit); + shm_cnt = (shm_nextfill - (shm_head + 1)) & BSPVETH_POINT_MASK; + cnt = min(cnt_tmp, shm_cnt); + + break; + + default: + INC_STATIS_RXTX(queue, type_err, 1, type); + ret = -EFAULT; + goto failed; + } + + if (cnt > (BSPVETH_DMABURST_MAX * 7 / 8)) + INC_STATIS_RXTX(queue, dma_burst, 1, type); + +#ifdef __UT_TEST + if (g_testdma) { + VETH_LOG(DLOG_ERROR, + "[type %d],host_cnt=%d cnt_tmp=%d shm_cnt=%d cnt=%d\n", + type, host_cnt, cnt_tmp, shm_cnt, cnt); + } +#endif + + *pcnt = cnt; + + return BSP_OK; + +failed: + return ret; +} + +int __make_dmalistbd_h2b_H(struct bspveth_rxtx_q *prxtx_queue, + u32 cnt, u32 type) +{ + u32 i = 0; + u32 len = 0; + u32 host_tail = 0; + u32 shm_head = 0; + u32 off = 0; + struct bspveth_dmal *pdmalbase_v = NULL; + struct bspveth_shmq_hd *pshmq_head = NULL; + struct bspveth_bd_info *pbdinfobase_v = NULL; + struct bspveth_dma_bd *pbdbase_v = NULL; + struct bspveth_dma_shmbd *pshmbdbase_v = NULL; + + if (!prxtx_queue) + return BSP_ERR_NULL_POINTER; + + pdmalbase_v = prxtx_queue->pdmalbase_v; + pshmq_head = prxtx_queue->pshmqhd_v; + pbdinfobase_v = prxtx_queue->pbdinfobase_v; + pbdbase_v = prxtx_queue->pbdbase_v; + pshmbdbase_v = prxtx_queue->pshmbdbase_v; + if (!pdmalbase_v || !pshmq_head || !pbdinfobase_v || + !pbdbase_v || !pshmbdbase_v) + return BSP_ERR_NULL_POINTER; + + host_tail = prxtx_queue->tail; + shm_head = pshmq_head->head; + + for (i = 0; i < cnt; i++) { + off = pbdbase_v[QUEUE_MASK(host_tail + i)].off; + + if (i == (cnt - 1)) + pdmalbase_v[i].chl = 0x9; + else + pdmalbase_v[i].chl = 0x0000001; + pdmalbase_v[i].len = + (pbdinfobase_v[QUEUE_MASK(host_tail + i)].pdma_v)->len; + pdmalbase_v[i].slow = + lower_32_bits(pbdbase_v[QUEUE_MASK(host_tail + i)].dma_p); + pdmalbase_v[i].shi = + upper_32_bits(pbdbase_v[QUEUE_MASK(host_tail + i)].dma_p); + pdmalbase_v[i].dlow = + lower_32_bits(pshmbdbase_v[QUEUE_MASK(shm_head + i)].dma_p); + pdmalbase_v[i].dhi = 0; + + pshmbdbase_v[QUEUE_MASK(shm_head + i)].len = pdmalbase_v[i].len; + + pdmalbase_v[i].len += off; + + pshmbdbase_v[QUEUE_MASK(shm_head + i)].off = off; + + len += pdmalbase_v[i].len; + +#ifdef __UT_TEST + if (g_testdma) { + struct sk_buff *skb = + pbdinfobase_v[QUEUE_MASK(host_tail + i)].pdma_v; + + VETH_LOG(DLOG_ERROR, + "[%d][makebd-H2B]:chl=0x%x,len=%d,slow=0x%x,", + i, pdmalbase_v[i].chl, pdmalbase_v[i].len, + pdmalbase_v[i].slow); + VETH_LOG(DLOG_ERROR, + "shi=0x%x,dlow=0x%x,dhi=0x%x,skb=%p,", + pdmalbase_v[i].shi, pdmalbase_v[i].dlow, + pdmalbase_v[i].dhi, skb); + VETH_LOG(DLOG_ERROR, + "skb->data=%p,skb->len=%d,host_tail+i=%d,", + skb->data, skb->len, + QUEUE_MASK(host_tail + i)); + VETH_LOG(DLOG_ERROR, + "shm_head+i=%d,off=%d\n", + QUEUE_MASK(shm_head + i), off); + } +#endif + } + + pdmalbase_v[i].chl = 0x7; + pdmalbase_v[i].len = 0x0; + pdmalbase_v[i].slow = lower_32_bits((u64)prxtx_queue->pdmalbase_p); + pdmalbase_v[i].shi = upper_32_bits((u64)prxtx_queue->pdmalbase_p); + pdmalbase_v[i].dlow = 0; + pdmalbase_v[i].dhi = 0; + + prxtx_queue->dmal_cnt = cnt; + prxtx_queue->dmal_byte = len; + +#ifdef __UT_TEST + if (g_testdma) { + VETH_LOG(DLOG_ERROR, + "[END][makebd-H2B]:chl=0x%x,len=%d,slow=0x%x,", + pdmalbase_v[i].chl, pdmalbase_v[i].len, + pdmalbase_v[i].slow); + VETH_LOG(DLOG_ERROR, + "shi=0x%x,dmal_cnt=%d,dmal_dir=%d,dmal_byte=%d,", + pdmalbase_v[i].shi, cnt, type, len); + VETH_LOG(DLOG_ERROR, "pdmalbase_v=%p\n", pdmalbase_v); + } +#endif + + return 0; +} + +int __make_dmalistbd_b2h_H(struct bspveth_rxtx_q *prxtx_queue, u32 cnt, + u32 type) +{ + u32 i, len = 0, host_head, shm_tail, off; + struct bspveth_dmal *pdmalbase_v = NULL; + struct bspveth_shmq_hd *pshmq_head = NULL; + struct bspveth_bd_info *pbdinfobase_v = NULL; + struct bspveth_dma_bd *pbdbase_v = NULL; + struct bspveth_dma_shmbd *pshmbdbase_v = NULL; + + if (!prxtx_queue) { + VETH_LOG(DLOG_ERROR, + "[END][makebd-B2H]:prxtx_queue NULL!!!\n"); + return BSP_ERR_NULL_POINTER; + } + + pdmalbase_v = prxtx_queue->pdmalbase_v; + pshmq_head = prxtx_queue->pshmqhd_v; + pbdinfobase_v = prxtx_queue->pbdinfobase_v; + pbdbase_v = prxtx_queue->pbdbase_v; + pshmbdbase_v = prxtx_queue->pshmbdbase_v; + if (!pdmalbase_v || !pshmq_head || !pbdinfobase_v || + !pbdbase_v || !pshmbdbase_v) { + VETH_LOG(DLOG_ERROR, + "[END][makebd-B2H]:pdmalbase_v NULL!!!\n"); + return BSP_ERR_NULL_POINTER; + } + + host_head = prxtx_queue->head; + shm_tail = pshmq_head->tail; + + for (i = 0; i < cnt; i++) { + off = pshmbdbase_v[QUEUE_MASK(shm_tail + i)].off; + if (i == (cnt - 1)) + pdmalbase_v[i].chl = 0x9; + else + pdmalbase_v[i].chl = 0x0000001; + pdmalbase_v[i].len = pshmbdbase_v[QUEUE_MASK(shm_tail + i)].len; + pdmalbase_v[i].slow = + lower_32_bits(pshmbdbase_v[QUEUE_MASK(shm_tail + i)].dma_p); + pdmalbase_v[i].shi = 0; + pdmalbase_v[i].dlow = + lower_32_bits(pbdbase_v[QUEUE_MASK(host_head + i)].dma_p); + pdmalbase_v[i].dhi = + upper_32_bits(pbdbase_v[QUEUE_MASK(host_head + i)].dma_p); + pdmalbase_v[i].len += off; + + pbdbase_v[QUEUE_MASK(host_head + i)].off = off; + pbdbase_v[QUEUE_MASK(host_head + i)].len = pdmalbase_v[i].len; + + len += pdmalbase_v[i].len; + +#ifdef __UT_TEST + if (g_testdma) { + struct sk_buff *skb = + pbdinfobase_v[QUEUE_MASK(host_head + i)].pdma_v; + + VETH_LOG(DLOG_ERROR, + "[%d][makebd-B2H]:chl=0x%x,len=%d,slow=0x%x,", + i, pdmalbase_v[i].chl, pdmalbase_v[i].len, + pdmalbase_v[i].slow); + VETH_LOG(DLOG_ERROR, + "shi=0x%x,dlow=0x%x,dhi=0x%x,skb=%p,", + pdmalbase_v[i].shi, pdmalbase_v[i].dlow, + pdmalbase_v[i].dhi, skb); + VETH_LOG(DLOG_ERROR, + "skb->data=%p,skb->len=%d,shm_tail+i=%d,", + skb->data, skb->len, + QUEUE_MASK(shm_tail + i)); + VETH_LOG(DLOG_ERROR, + "host_head+i=%d,off=%d\n", + QUEUE_MASK(host_head + i), off); + } +#endif + } + + pdmalbase_v[i].chl = 0x0000007; + pdmalbase_v[i].len = 0x0; + pdmalbase_v[i].slow = lower_32_bits((u64)prxtx_queue->pdmalbase_p); + pdmalbase_v[i].shi = upper_32_bits((u64)prxtx_queue->pdmalbase_p); + pdmalbase_v[i].dlow = 0; + pdmalbase_v[i].dhi = 0; + + prxtx_queue->dmal_cnt = cnt; + prxtx_queue->dmal_byte = len; + +#ifdef __UT_TEST + if (g_testdma) { + VETH_LOG(DLOG_ERROR, + "[END][makebd-B2H]:chl=0x%x,len=%d,slow=0x%x,", + pdmalbase_v[i].chl, pdmalbase_v[i].len, + pdmalbase_v[i].slow); + VETH_LOG(DLOG_ERROR, + "shi=0x%x,dmal_cnt=%d,dmal_dir=%d,dmal_byte=%d ", + pdmalbase_v[i].shi, cnt, type, len); + VETH_LOG(DLOG_ERROR, "pdmalbase_v=%p\n", pdmalbase_v); + } + +#endif + + return 0; +} + +s32 __start_dmalist_H(struct bspveth_rxtx_q *prxtx_queue, u32 cnt, u32 type) +{ + int ret = BSP_OK; + struct bma_dma_transfer_s dma_transfer = { 0 }; + + if (!prxtx_queue) + return -1; + + switch (type) { + case BSPVETH_RX: + ret = __make_dmalistbd_b2h_H(prxtx_queue, cnt, type); + if (ret) + goto failed; + dma_transfer.dir = BMC_TO_HOST; + + break; + + case BSPVETH_TX: + ret = __make_dmalistbd_h2b_H(prxtx_queue, cnt, type); + if (ret) + goto failed; + dma_transfer.dir = HOST_TO_BMC; + + break; + + default: + ret = -1; + goto failed; + } + + dma_transfer.type = DMA_LIST; + dma_transfer.transfer.list.dma_addr = + (dma_addr_t)prxtx_queue->pdmalbase_p; + + ret = bma_intf_start_dma(g_bspveth_dev.bma_priv, &dma_transfer); + if (ret < 0) + goto failed; + + prxtx_queue->start_dma = 1; + + return BSP_OK; + +failed: + return ret; +} + +int check_dma_queue_fault(struct bspveth_rxtx_q *prxtx_queue, + u32 queue, u32 type, u32 *pcnt) +{ + int ret = BSP_OK; + u32 cnt = 0; + + if (prxtx_queue->dma_overtime > BSPVETH_MAX_QUE_DEEP) + return -EFAULT; + + ret = __check_dmacmp_H(prxtx_queue, queue, type); + if (ret != BSP_OK) + return -EFAULT; + + ret = __checkspace_H(prxtx_queue, queue, type, &cnt); + if (ret != BSP_OK) + return -EFAULT; + + if (CHECK_DMA_RXQ_FAULT(prxtx_queue, type, cnt)) { + udelay(50); + prxtx_queue->dmal_cnt--; + + return -EFAULT; + } + + *pcnt = cnt; + + return BSP_OK; +} + +s32 __dma_rxtx_H(struct bspveth_rxtx_q *prxtx_queue, u32 queue, u32 type) +{ + int ret = BSP_OK; + u32 cnt = 0; + u32 shm_init; + struct bspveth_shmq_hd *pshmq_head = NULL; + + if (!prxtx_queue || !prxtx_queue->pshmqhd_v) + return BSP_ERR_NULL_POINTER; + + pshmq_head = prxtx_queue->pshmqhd_v; + shm_init = pshmq_head->init; + if (shm_init != BSPVETH_SHMQUEUE_INITOK) { + INC_STATIS_RXTX(queue, shmqueue_noinit, 1, type); + return -EFAULT; + } + + if (CHECK_DMA_QUEUE_EMPTY(type, prxtx_queue)) + return BSP_OK; + + ret = check_dma_queue_fault(prxtx_queue, queue, type, &cnt); + if (ret != BSP_OK) + return -EFAULT; + + ret = __start_dmalist_H(prxtx_queue, cnt, type); + if (ret != BSP_OK) + return -EFAULT; + + if (cnt <= 16) { + ret = __check_dmacmp_H(prxtx_queue, queue, type); + if (ret != BSP_OK) + return -EFAULT; + } + + return BSP_OK; +} + +int veth_dma_task_H(u32 type) +{ + int i; + struct bspveth_rxtx_q *prxtx_queue = NULL; + + for (i = 0; i < MAX_QUEUE_NUM; i++) { + if (type == BSPVETH_RX) { + g_bspveth_dev.run_dma_rx_task++; + prxtx_queue = g_bspveth_dev.prx_queue[i]; + } else { + g_bspveth_dev.run_dma_tx_task++; + prxtx_queue = g_bspveth_dev.ptx_queue[i]; + } + + if (prxtx_queue) { + struct bspveth_shmq_hd *pshmq_head = + prxtx_queue->pshmqhd_v; + (void)__dma_rxtx_H(prxtx_queue, i, type); + if ((type == BSPVETH_RX && + pshmq_head->head != pshmq_head->tail) || + (type == BSPVETH_TX && + prxtx_queue->head != prxtx_queue->tail)) + return BSP_ERR_AGAIN; + } + } + + return BSP_OK; +} + +#ifdef __UT_TEST + +s32 __atu_config_H(struct pci_dev *pdev, unsigned int region, + unsigned int hostaddr_h, unsigned int hostaddr_l, + unsigned int bmcaddr_h, unsigned int bmcaddr_l, + unsigned int len) +{ + (void)pci_write_config_dword(pdev, 0x900, + 0x80000000 + (region & 0x00000007)); + (void)pci_write_config_dword(pdev, 0x90c, hostaddr_l); + (void)pci_write_config_dword(pdev, 0x910, hostaddr_h); + (void)pci_write_config_dword(pdev, 0x914, hostaddr_l + len - 1); + (void)pci_write_config_dword(pdev, 0x918, bmcaddr_l); + (void)pci_write_config_dword(pdev, 0x91c, bmcaddr_h); + /* atu ctrl1 reg */ + (void)pci_write_config_dword(pdev, 0x904, 0x00000000); + /* atu ctrl2 reg */ + (void)pci_write_config_dword(pdev, 0x908, 0x80000000); + + return 0; +} + +void bspveth_atu_config_H(void) +{ + __atu_config_H(g_bspveth_dev.ppcidev, + REGION_HOST, + (sizeof(unsigned long) == SIZE_OF_UNSIGNED_LONG) ? + ((u64)(g_bspveth_dev.phostrtc_p) >> ADDR_H_SHIFT) : 0, + ((u64)(g_bspveth_dev.phostrtc_p) & 0xffffffff), + 0, HOSTRTC_REG_BASE, HOSTRTC_REG_SIZE); + + __atu_config_H(g_bspveth_dev.ppcidev, + REGION_BMC, + (sizeof(unsigned long) == SIZE_OF_UNSIGNED_LONG) ? + ((u64)(g_bspveth_dev.pshmpool_p) >> ADDR_H_SHIFT) : 0, + ((u64)(g_bspveth_dev.pshmpool_p) & 0xffffffff), + 0, VETH_SHAREPOOL_BASE_INBMC, VETH_SHAREPOOL_SIZE); +} + +void bspveth_pcie_free_H(void) +{ + struct pci_dev *pdev = g_bspveth_dev.ppcidev; + + if (pdev) + pci_disable_device(pdev); + else + VETH_LOG(DLOG_ERROR, "bspveth_dev.ppcidev IS NULL\n"); + + VETH_LOG(DLOG_DEBUG, "bspveth_pcie_exit_H ok\n"); +} + +#endif + +void bspveth_host_exit_H(void) +{ + int ret = 0; + + ret = bma_intf_unregister_type((void **)&g_bspveth_dev.bma_priv); + if (ret < 0) { + VETH_LOG(DLOG_ERROR, "bma_intf_unregister_type failed\n"); + + return; + } + + VETH_LOG(DLOG_DEBUG, "bspveth host exit H OK\n"); +} + +s32 bspveth_host_init_H(void) +{ + int ret = 0; + struct bma_priv_data_s *bma_priv = NULL; + + ret = bma_intf_register_type(TYPE_VETH, 0, INTR_ENABLE, + (void **)&bma_priv); + if (ret) { + ret = -1; + goto failed; + } + + if (!bma_priv) { + VETH_LOG(DLOG_ERROR, "bma_priv is NULL\n"); + return -1; + } + + VETH_LOG(DLOG_DEBUG, + "bma_intf_register_type pdev = %p, veth_swap_addr = %p, ", + bma_priv->specific.veth.pdev, + bma_priv->specific.veth.veth_swap_addr); + + VETH_LOG(DLOG_DEBUG, + "veth_swap_len = 0x%lx, veth_swap_phy_addr = 0x%lx\n", + bma_priv->specific.veth.veth_swap_len, + bma_priv->specific.veth.veth_swap_phy_addr); + + g_bspveth_dev.bma_priv = bma_priv; + g_bspveth_dev.ppcidev = bma_priv->specific.veth.pdev; + + /*bspveth_dev.phostrtc_p = (u8 *)bar1_base;*/ + /*bspveth_dev.phostrtc_v = (u8 *)bar1_remap;*/ + g_bspveth_dev.pshmpool_p = + (u8 *)bma_priv->specific.veth.veth_swap_phy_addr; + g_bspveth_dev.pshmpool_v = + (u8 *)bma_priv->specific.veth.veth_swap_addr; + g_bspveth_dev.shmpoolsize = bma_priv->specific.veth.veth_swap_len; + + VETH_LOG(DLOG_DEBUG, "bspveth host init H OK\n"); + + return BSP_OK; + +failed: + return ret; +} + +static int __init veth_init(void) +{ + int ret = BSP_OK; + int buf_len = 0; + + if (!bma_intf_check_edma_supported()) + return -ENXIO; + + memset(&g_bspveth_dev, 0, sizeof(g_bspveth_dev)); + + buf_len = snprintf(g_bspveth_dev.name, NET_NAME_LEN, + "%s", BSPVETH_DEV_NAME); + if (buf_len < 0 || ((u32)buf_len >= (NET_NAME_LEN))) { + VETH_LOG(DLOG_ERROR, "BSP_SNPRINTF lRet =0x%x\n", buf_len); + return BSP_ERR_INVALID_STR; + } + + ret = bspveth_host_init_H(); + if (ret != BSP_OK) { + ret = -1; + goto failed1; + } + + ret = veth_netdev_init(); + if (ret != BSP_OK) { + ret = -1; + goto failed2; + } + + GET_SYS_SECONDS(g_bspveth_dev.init_time); + + return BSP_OK; + +failed2: + bspveth_host_exit_H(); + +failed1: + + return ret; +} + +static void __exit veth_exit(void) +{ + veth_netdev_exit(); + + bspveth_host_exit_H(); +} + +MODULE_AUTHOR("HUAWEI TECHNOLOGIES CO., LTD."); +MODULE_DESCRIPTION("HUAWEI VETH DRIVER"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(VETH_VERSION); + +module_init(veth_init); +module_exit(veth_exit); diff --git a/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.h b/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.h new file mode 100644 index 0000000000000000000000000000000000000000..242d3ec128d31525b90246c3b1166f5bb297297a --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.h @@ -0,0 +1,440 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/*Huawei iBMA driver. + *Copyright (c) 2017, Huawei Technologies Co., Ltd. + * + *This program is free software; you can redistribute it and/or + *modify it under the terms of the GNU General Public License + *as published by the Free Software Foundation; either version 2 + *of the License, or (at your option) any later version. + * + *This program is distributed in the hope that it will be useful, + *but WITHOUT ANY WARRANTY; without even the implied warranty of + *MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + *GNU General Public License for more details. + * + */ + +#ifndef _VETH_HB_H_ +#define _VETH_HB_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +#define DEP_BMA + +#include "../edma_drv/bma_include.h" +#include "../include/bma_ker_intf.h" + +#ifdef DRV_VERSION +#define VETH_VERSION MICRO_TO_STR(DRV_VERSION) +#else +#define VETH_VERSION "0.3.10" +#endif + +#define MODULE_NAME "veth" +#define BSP_VETH_T u64 + +#define BSP_OK (0) +#define BSP_ERR (0xFFFFFFFF) +#define BSP_NETDEV_TX_BUSY (1) +#define BSP_ERR_INIT_ERR (BSP_NETDEV_TX_BUSY) +#define BSP_ETH_ERR_BASE (0x0FFFF000) +#define BSP_ERR_OUT_OF_MEM (BSP_ETH_ERR_BASE + 1) +#define BSP_ERR_NULL_POINTER (BSP_ETH_ERR_BASE + 2) +#define BSP_ERR_INVALID_STR (BSP_ETH_ERR_BASE + 3) +#define BSP_ERR_INVALID_PARAM (BSP_ETH_ERR_BASE + 4) +#define BSP_ERR_INVALID_DATA (BSP_ETH_ERR_BASE + 5) +#define BSP_ERR_OUT_OF_RANGE (BSP_ETH_ERR_BASE + 6) +#define BSP_ERR_INVALID_CARD (BSP_ETH_ERR_BASE + 7) +#define BSP_ERR_INVALID_GRP (BSP_ETH_ERR_BASE + 8) +#define BSP_ERR_INVALID_ETH (BSP_ETH_ERR_BASE + 9) +#define BSP_ERR_SEND_ERR (BSP_ETH_ERR_BASE + 10) +#define BSP_ERR_DMA_ERR (BSP_ETH_ERR_BASE + 11) +#define BSP_ERR_RECV_ERR (BSP_ETH_ERR_BASE + 12) +#define BSP_ERR_SKB_ERR (BSP_ETH_ERR_BASE + 13) +#define BSP_ERR_DMA_ADDR_ERR (BSP_ETH_ERR_BASE + 14) +#define BSP_ERR_IOREMAP_ERR (BSP_ETH_ERR_BASE + 15) +#define BSP_ERR_LEN_ERR (BSP_ETH_ERR_BASE + 16) +#define BSP_ERR_STAT_ERR (BSP_ETH_ERR_BASE + 17) +#define BSP_ERR_AGAIN (BSP_ETH_ERR_BASE + 18) +#define BSP_ERR_NOT_TO_HANDLE (BSP_ETH_ERR_BASE + 19) + +#define VETH_H2B_IRQ_NO (113) +#define SYSCTL_REG_BASE (0x20000000) +#define SYSCTL_REG_SIZE (0x1000) +#define PCIE1_REG_BASE (0x29000000) +#define PCIE1_REG_SIZE (0x1000) +#define VETH_SHAREPOOL_BASE_INBMC (0x84820000) +#define VETH_SHAREPOOL_SIZE (0xdf000) +#define VETH_SHAREPOOL_OFFSET (0x10000) +#define MAX_SHAREQUEUE_SIZE (0x20000) + +#define BSPVETH_SHMBDBASE_OFFSET (0x80) +#define SHMDMAL_OFFSET (0x10000) +#define MAX_SHMDMAL_SIZE (BSPVETH_DMABURST_MAX * 32) + +#define BSPVETH_DMABURST_MAX 64 +#define BSPVETH_SKBTIMER_INTERVAL (1) +#define BSPVETH_DMATIMER_INTERVAL (1) +#define BSPVETH_CTLTIMER_INTERVAL (10) +#define BSPVETH_HDCMD_CHKTIMER_INTERVAL (10) +#define BSP_DMA_64BIT_MASK (0xffffffffffffffffULL) +#define BSP_DMA_32BIT_MASK (0x00000000ffffffffULL) +#define HOSTRTC_REG_BASE (0x2f000000) +#define HOSTRTC_REG_SIZE (0x10000) +#define REG_SYSCTL_HOSTINT_CLEAR (0x44) +#define SHIFT_SYSCTL_HOSTINT_CLEAR (22) +#define REG_SYSCTL_HOSTINT (0xf4) +#define SHIFT_SYSCTL_HOSTINT (26) + +#define NET_TYPE_LEN (16) + +#define MAX_QUEUE_NUM (1) +#define MAX_QUEUE_BDNUM (128) +#define BSPVETH_MAX_QUE_DEEP (MAX_QUEUE_BDNUM) +#define BSPVETH_POINT_MASK (MAX_QUEUE_BDNUM - 1) +#define BSPVETH_WORK_LIMIT (64) +#define BSPVETH_CHECK_DMA_STATUS_TIMES (120) + +#define REG_PCIE1_DMAREAD_ENABLE (0xa18) +#define SHIFT_PCIE1_DMAREAD_ENABLE (0) +#define REG_PCIE1_DMAWRITE_ENABLE (0x9c4) +#define SHIFT_PCIE1_DMAWRITE_ENABLE (0) +#define REG_PCIE1_DMAREAD_STATUS (0xa10) +#define SHIFT_PCIE1_DMAREAD_STATUS (0) +#define REG_PCIE1_DMAREADINT_CLEAR (0xa1c) +#define SHIFT_PCIE1_DMAREADINT_CLEAR (0) +#define REG_PCIE1_DMAWRITE_STATUS (0x9bc) +#define SHIFT_PCIE1_DMAWRITE_STATUS (0) +#define REG_PCIE1_DMAWRITEINT_CLEAR (0x9c8) +#define SHIFT_PCIE1_DMAWRITEINT_CLEAR (0) + +#define BSPVETH_DMA_OK (1) +#define BSPVETH_DMA_BUSY (0) +#define BSPVETH_RX (2) +#define BSPVETH_TX (3) +#define HOSTRTC_INT_OFFSET (0x10) +#define BSPVETH_DEV_NAME (MODULE_NAME) +#define NET_NAME_LEN (64) + +#ifdef PCI_VENDOR_ID_HUAWEI +#undef PCI_VENDOR_ID_HUAWEI +#endif +#define PCI_VENDOR_ID_HUAWEI (0x19e5) + +#define PCI_DEVICE_ID_KBOX (0x1710) +#define BSPVETH_MTU_MAX (1500) +#define BSPVETH_MTU_MIN (64) +#define BSPVETH_SKB_SIZE (1536) +#define BSPVETH_NET_TIMEOUT (5 * HZ) +#define BSPVETH_QUEUE_TIMEOUT_10MS (100) +#define BSPVETH_SHMQUEUE_INITOK (0x12) +#define BSPVETH_LBK_TYPE (0x800) + +#ifndef VETH_BMC +#define BSPVETH_CACHELINE_SIZE (64) +#else +#define BSPVETH_CACHELINE_SIZE (32) +#endif +#define BSPVETH_HBCMD_WCMP (0x44) +#define BSPVETH_HBCMD_CMP (0x55) +#define BSPVETH_HBCMD_OK (0x66) +#define BSPVETH_HEART_WACK (0x99) +#define BSPVETH_HEART_ACK (0xaa) + +#define BSPVETH_HBCMD_TIMEOUT (1000) + +#define SIZE_OF_UNSIGNED_LONG 8 +#define ADDR_H_SHIFT 32 +#define REGION_HOST 1 +#define REGION_BMC 2 + +enum veth_hb_cmd { + VETH_HBCMD_UNKNOWN = 0x0, + VETH_HBCMD_SETIP, + + VETH_HBCMD_MAX, +}; + +#define USE_TASKLET + +#define BSPVETH_ETHTOOL_BASE 0x89F0 +#define BSPVETH_ETHTOOL_TESTINT (BSPVETH_ETHTOOL_BASE + 1) +#define BSPVETH_ETHTOOL_TESTSHAREMEM (BSPVETH_ETHTOOL_BASE + 2) +#define BSPVETH_ETHTOOL_DUMPSHAREMEM (BSPVETH_ETHTOOL_BASE + 3) +#define BSPVETH_ETHTOOL_TESTDMA (BSPVETH_ETHTOOL_BASE + 4) +#define BSPVETH_ETHTOOL_RWPCIEREG (BSPVETH_ETHTOOL_BASE + 5) +#define BSPVETH_ETHTOOL_TESTLBK (BSPVETH_ETHTOOL_BASE + 6) +#define BSPVETH_ETHTOOL_INITSTATIS (BSPVETH_ETHTOOL_BASE + 7) +#define BSPVETH_HBCMD (BSPVETH_ETHTOOL_BASE + 8) + +struct bspveth_test { + u32 intdirect; /*0--H2B,1--B2H*/ + u32 rwshmcheck; /*0--w,1--r and check*/ + u32 dshmbase; + u32 dshmlen; + u32 testdma; /*0--disable,1---enable*/ + u32 pcierw; /*0--w,1---r*/ + u32 reg; + u32 data; + u32 testlbk; /*0--disable,1---enable*/ +}; + +struct bspveth_hdcmd { + u32 cmd; + u32 stat; + u32 heart; + u32 err; + u32 sequence; + u32 len; + u8 data[256]; +}; + +struct bspveth_rxtx_statis { + u64 pkt; + u64 pktbyte; + u64 refill; + u64 freetx; + u64 dmapkt; + u64 dmapktbyte; + + u32 dropped_pkt; + u32 netifrx_err; + u32 null_point; + u32 retry_err; + u32 dma_mapping_err; + u32 allocskb_err; + u32 q_full; + u32 q_emp; + u32 shm_full; + u32 shm_emp; + u32 dma_busy; + u32 need_fill; + u32 need_free; + u32 dmacmp_err; + u32 type_err; + u32 shmqueue_noinit; + u32 shmretry_err; + u32 dma_earlyint; + u32 clr_dma_earlyint; + u32 clr_dma_int; + u32 dmarx_shmaddr_unalign; + u32 dmarx_hostaddr_unalign; + u32 dmatx_shmaddr_unalign; + u32 dmatx_hostaddr_unalign; + u32 dma_need_offset; + u32 lastdmadir_err; + u32 dma_failed; + u32 dma_burst; + u32 lbk_cnt; + u32 lbk_txerr; +}; + +struct bspveth_bd_info { + struct sk_buff *pdma_v; + u32 len; + unsigned long time_stamp; +}; + +struct bspveth_dma_shmbd { + u32 dma_p; + u32 len; + u32 off; +}; + +struct bspveth_shmq_hd { + u32 count; + u32 size; /*count x sizeof(dmaBD)*/ + u32 next_to_fill; + u32 next_to_free; + u32 head; + u32 tail; + u16 init; /* 1--ok,0--nok*/ +}; + +struct bspveth_dma_bd { + u64 dma_p; + u32 len; + u32 off; +}; + +struct bspveth_dmal { + u32 chl; + u32 len; + u32 slow; + u32 shi; + u32 dlow; + u32 dhi; +}; + +struct bspveth_rxtx_q { +#ifndef VETH_BMC + struct bspveth_dma_bd *pbdbase_v; + u8 *pbdbase_p; +#endif + + struct bspveth_bd_info *pbdinfobase_v; + struct bspveth_shmq_hd *pshmqhd_v; + u8 *pshmqhd_p; + + struct bspveth_dma_shmbd *pshmbdbase_v; + u8 *pshmbdbase_p; + + struct bspveth_dmal *pdmalbase_v; + u8 *pdmalbase_p; + + u32 dmal_cnt; + u32 dmal_byte; + + u32 count; + u32 size; + u32 rx_buf_len; + + u32 next_to_fill; + u32 next_to_free; + u32 head; + u32 tail; + u16 start_dma; + u16 dmacmperr; + + u16 dma_overtime; + + u32 work_limit; + struct bspveth_rxtx_statis s; +}; + +struct bspveth_device { + struct bspveth_rxtx_q *ptx_queue[MAX_QUEUE_NUM]; + struct bspveth_rxtx_q *prx_queue[MAX_QUEUE_NUM]; + struct net_device *pnetdev; + char name[NET_NAME_LEN]; + + struct pci_dev *ppcidev; + u8 *phostrtc_p; + u8 *phostrtc_v; + + u8 *psysctl_v; + u8 *ppcie1_v; + + u8 *pshmpool_p; + u8 *pshmpool_v; + u32 shmpoolsize; + + u32 recv_int; + u32 tobmc_int; + u32 tohost_int; + u32 run_dma_tx_task; + u32 run_dma_rx_task; + u32 run_skb_rx_task; + u32 run_skb_fr_task; + u32 shutdown_cnt; + __kernel_time_t init_time; + + /* spinlock for register */ + spinlock_t reg_lock; +#ifndef USE_TASKLET + struct timer_list skbtrtimer; + struct timer_list dmatimer; +#else + struct tasklet_struct skb_task; + struct tasklet_struct dma_task; +#endif + + struct net_device_stats stats; + struct work_struct shutdown_task; +#ifdef DEP_BMA + struct bma_priv_data_s *bma_priv; +#else + void *edma_priv; +#endif +}; + +struct tag_pcie_comm_priv { + char net_type[NET_TYPE_LEN]; + struct net_device_stats stats; + int status; + int irq_enable; + int pcie_comm_rx_flag; + spinlock_t lock; /* spinlock for priv data */ +}; + +#define QUEUE_MASK(p) ((p) & (BSPVETH_POINT_MASK)) + +#define CHECK_ADDR_ALIGN(addr, statis)\ +do { \ + if ((addr) & 0x3) \ + statis;\ +} while (0) + +#define PROC_P_STATIS(name, statis)\ + PROC_DPRINTK("[%10s]:\t0x%llx", #name, statis) + +#define INC_STATIS_RXTX(queue, name, count, type) \ +do { \ + if (type == BSPVETH_RX)\ + g_bspveth_dev.prx_queue[queue]->s.name += count;\ + else\ + g_bspveth_dev.ptx_queue[queue]->s.name += count;\ +} while (0) + +#define PROC_DPRINTK(fmt, args...) (len += sprintf(buf + len, fmt, ##args)) + +#define JUDGE_TX_QUEUE_SPACE(head, tail, len) \ + (((BSPVETH_MAX_QUE_DEEP + (tail) - (head) - 1) \ + & BSPVETH_POINT_MASK) >= (len)) + +#define JUDGE_RX_QUEUE_SPACE(head, tail, len) \ + (((BSPVETH_MAX_QUE_DEEP + (tail) - (head)) \ + & BSPVETH_POINT_MASK) > (len)) + +#ifndef VETH_BMC +#define BSPVETH_UNMAP_DMA(data, len) \ + dma_unmap_single(&g_bspveth_dev.ppcidev->dev, \ + data, len, DMA_FROM_DEVICE) +#else +#define BSPVETH_UNMAP_DMA(data, len) \ + dma_unmap_single(NULL, data, len, DMA_FROM_DEVICE) +#endif + +int veth_tx(struct sk_buff *pstr_skb, struct net_device *pstr_dev); +int veth_dma_task_H(u32 type); +s32 veth_skbtimer_close(void); +void veth_skbtimer_init(void); +s32 veth_dmatimer_close_H(void); +void veth_dmatimer_init_H(void); +int veth_skb_tr_task(void); + +s32 __dma_rxtx_H(struct bspveth_rxtx_q *prxtx_queue, u32 queue, u32 type); +s32 veth_recv_pkt(struct bspveth_rxtx_q *prx_queue, int queue); +s32 veth_free_txskb(struct bspveth_rxtx_q *ptx_queue, int queue); + +enum { + QUEUE_TX_STATS, + QUEUE_RX_STATS, + VETH_STATS, + SHMQ_TX_STATS, + SHMQ_RX_STATS, + NET_STATS, +}; + +struct veth_stats { + char stat_string[ETH_GSTRING_LEN]; + int type; + int sizeof_stat; + int stat_offset; +}; + +#define VETH_STAT_SIZE(m) sizeof(((struct bspveth_device *)0)->m) +#define VETH_STAT_OFFSET(m) offsetof(struct bspveth_device, m) +#define QUEUE_TXRX_STAT_SIZE(m) sizeof(((struct bspveth_rxtx_q *)0)->m) +#define QUEUE_TXRX_STAT_OFFSET(m) offsetof(struct bspveth_rxtx_q, m) +#define SHMQ_TXRX_STAT_SIZE(m) sizeof(((struct bspveth_shmq_hd *)0)->m) +#define SHMQ_TXRX_STAT_OFFSET(m) offsetof(struct bspveth_shmq_hd, m) + +#ifdef __cplusplus +} +#endif +#endif diff --git a/drivers/net/ethernet/huawei/hinic/Kconfig b/drivers/net/ethernet/huawei/hinic/Kconfig index e4e8b24c1a5d0435f3d63e84c4232c66f6d5a83b..312b9d637d17909ed05b3f4316894a389f46e3e0 100644 --- a/drivers/net/ethernet/huawei/hinic/Kconfig +++ b/drivers/net/ethernet/huawei/hinic/Kconfig @@ -4,9 +4,10 @@ config HINIC tristate "Huawei Intelligent PCIE Network Interface Card" - depends on (PCI_MSI && (X86 || ARM64)) + default n + depends on PCI_MSI && NUMA && PCI_IOV && DCB ---help--- This driver supports HiNIC PCIE Ethernet cards. To compile this driver as part of the kernel, choose Y here. If unsure, choose N. - The default is compiled as module. + The default is N. diff --git a/drivers/net/ethernet/huawei/hinic/Makefile b/drivers/net/ethernet/huawei/hinic/Makefile index 289ce88bb2d031ec78eed0b14062e6961b1f166c..bb0533e4ed8b3edd3c03028d8da3de9c78a33f5e 100644 --- a/drivers/net/ethernet/huawei/hinic/Makefile +++ b/drivers/net/ethernet/huawei/hinic/Makefile @@ -1,6 +1,12 @@ obj-$(CONFIG_HINIC) += hinic.o -hinic-y := hinic_main.o hinic_tx.o hinic_rx.o hinic_port.o hinic_hw_dev.o \ - hinic_hw_io.o hinic_hw_qp.o hinic_hw_cmdq.o hinic_hw_wq.o \ - hinic_hw_mgmt.o hinic_hw_api_cmd.o hinic_hw_eqs.o hinic_hw_if.o \ - hinic_common.o +hinic-y := hinic_nic_cfg.o hinic_nic_io.o hinic_nic_dbg.o \ + hinic_hwif.o hinic_msix_attr.o hinic_eqs.o \ + hinic_mbox.o hinic_api_cmd.o hinic_mgmt.o \ + hinic_wq.o hinic_cmdq.o hinic_hwdev.o hinic_cfg.o \ + ossl_knl_linux.o \ + hinic_sml_counter.o hinic_sml_lt.o \ + hinic_multi_host_mgmt.o hinic_main.o hinic_lld.o \ + hinic_qp.o hinic_rx.o hinic_tx.o hinic_dbgtool_knl.o \ + hinic_nictool.o hinic_sriov.o hinic_dcb.o\ + hinic_ethtool.o diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_api_cmd.c similarity index 38% rename from drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c rename to drivers/net/ethernet/huawei/hinic/hinic_api_cmd.c index c40603a183df640084cb1da0e1342253598802dc..4df801459111646c0217d56848c13cfb337b0b33 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_api_cmd.c @@ -1,5 +1,5 @@ -/* - * Huawei HiNIC PCI Express Linux driver +// SPDX-License-Identifier: GPL-2.0 +/* Huawei HiNIC PCI Express Linux driver * Copyright(c) 2017 Huawei Technologies Co., Ltd * * This program is free software; you can redistribute it and/or modify it @@ -13,81 +13,87 @@ * */ -#include +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + #include #include -#include +#include +#include #include -#include +#include #include -#include -#include +#include #include #include -#include -#include -#include -#include -#include "hinic_hw_csr.h" -#include "hinic_hw_if.h" -#include "hinic_hw_api_cmd.h" +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hwdev.h" +#include "hinic_csr.h" +#include "hinic_hwif.h" +#include "hinic_api_cmd.h" -#define API_CHAIN_NUM_CELLS 32 +#define API_CMD_CHAIN_CELL_SIZE_SHIFT 6U -#define API_CMD_CELL_SIZE_SHIFT 6 -#define API_CMD_CELL_SIZE_MIN (BIT(API_CMD_CELL_SIZE_SHIFT)) +#define API_CMD_CELL_DESC_SIZE 8 +#define API_CMD_CELL_DATA_ADDR_SIZE 8 -#define API_CMD_CELL_SIZE(cell_size) \ - (((cell_size) >= API_CMD_CELL_SIZE_MIN) ? \ - (1 << (fls(cell_size - 1))) : API_CMD_CELL_SIZE_MIN) +#define API_CHAIN_NUM_CELLS 32 +#define API_CHAIN_CELL_SIZE 128 +#define API_CHAIN_RSP_DATA_SIZE 128 -#define API_CMD_CELL_SIZE_VAL(size) \ - ilog2((size) >> API_CMD_CELL_SIZE_SHIFT) +#define API_CMD_CELL_WB_ADDR_SIZE 8 -#define API_CMD_BUF_SIZE 2048 +#define API_CHAIN_CELL_ALIGNMENT 8 -/* Sizes of the members in hinic_api_cmd_cell */ -#define API_CMD_CELL_DESC_SIZE 8 -#define API_CMD_CELL_DATA_ADDR_SIZE 8 +#define API_CMD_TIMEOUT 10000 +#define API_CMD_STATUS_TIMEOUT 100000 + +#define API_CMD_BUF_SIZE 2048ULL + +#define API_CMD_NODE_ALIGN_SIZE 512ULL +#define API_PAYLOAD_ALIGN_SIZE 64ULL -#define API_CMD_CELL_ALIGNMENT 8 +#define API_CHAIN_RESP_ALIGNMENT 64ULL -#define API_CMD_TIMEOUT 1000 +#define COMPLETION_TIMEOUT_DEFAULT 1000UL +#define POLLING_COMPLETION_TIMEOUT_DEFAULT 1000U -#define MASKED_IDX(chain, idx) ((idx) & ((chain)->num_cells - 1)) +#define API_CMD_RESPONSE_DATA_PADDR(val) be64_to_cpu(*((u64 *)(val))) -#define SIZE_8BYTES(size) (ALIGN((size), 8) >> 3) -#define SIZE_4BYTES(size) (ALIGN((size), 4) >> 2) +#define READ_API_CMD_PRIV_DATA(id, token) (((id) << 16) + (token)) +#define WRITE_API_CMD_PRIV_DATA(id) (((u8)id) << 16) -#define RD_DMA_ATTR_DEFAULT 0 -#define WR_DMA_ATTR_DEFAULT 0 +#define MASKED_IDX(chain, idx) ((idx) & ((chain)->num_cells - 1)) + +#define SIZE_4BYTES(size) (ALIGN((u32)(size), 4U) >> 2) +#define SIZE_8BYTES(size) (ALIGN((u32)(size), 8U) >> 3) enum api_cmd_data_format { - SGE_DATA = 1, /* cell data is passed by hw address */ + SGL_DATA = 1, }; enum api_cmd_type { - API_CMD_WRITE = 0, + API_CMD_WRITE_TYPE = 0, + API_CMD_READ_TYPE = 1, }; enum api_cmd_bypass { - NO_BYPASS = 0, - BYPASS = 1, + NOT_BYPASS = 0, + BYPASS = 1, }; -enum api_cmd_xor_chk_level { - XOR_CHK_DIS = 0, - - XOR_CHK_ALL = 3, +enum api_cmd_resp_aeq { + NOT_TRIGGER = 0, + TRIGGER = 1, }; static u8 xor_chksum_set(void *data) { int idx; - u8 *val, checksum = 0; - - val = data; + u8 checksum = 0; + u8 *val = data; for (idx = 0; idx < 7; idx++) checksum ^= val[idx]; @@ -98,77 +104,116 @@ static u8 xor_chksum_set(void *data) static void set_prod_idx(struct hinic_api_cmd_chain *chain) { enum hinic_api_cmd_chain_type chain_type = chain->chain_type; - struct hinic_hwif *hwif = chain->hwif; - u32 addr, prod_idx; + struct hinic_hwif *hwif = chain->hwdev->hwif; + u32 hw_prod_idx_addr = HINIC_CSR_API_CMD_CHAIN_PI_ADDR(chain_type); + u32 prod_idx = chain->prod_idx; - addr = HINIC_CSR_API_CMD_CHAIN_PI_ADDR(chain_type); - prod_idx = hinic_hwif_read_reg(hwif, addr); + hinic_hwif_write_reg(hwif, hw_prod_idx_addr, prod_idx); +} - prod_idx = HINIC_API_CMD_PI_CLEAR(prod_idx, IDX); +static u32 get_hw_cons_idx(struct hinic_api_cmd_chain *chain) +{ + u32 addr, val; - prod_idx |= HINIC_API_CMD_PI_SET(chain->prod_idx, IDX); + addr = HINIC_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type); + val = hinic_hwif_read_reg(chain->hwdev->hwif, addr); - hinic_hwif_write_reg(hwif, addr, prod_idx); + return HINIC_API_CMD_STATUS_GET(val, CONS_IDX); } -static u32 get_hw_cons_idx(struct hinic_api_cmd_chain *chain) +static void dump_api_chain_reg(struct hinic_api_cmd_chain *chain) { + void *dev = chain->hwdev->dev_hdl; u32 addr, val; - addr = HINIC_CSR_API_CMD_STATUS_ADDR(chain->chain_type); - val = hinic_hwif_read_reg(chain->hwif, addr); + addr = HINIC_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type); + val = hinic_hwif_read_reg(chain->hwdev->hwif, addr); - return HINIC_API_CMD_STATUS_GET(val, CONS_IDX); + sdk_err(dev, "Chain type: 0x%x, cpld error: 0x%x, check error: 0x%x, current fsm: 0x%x\n", + chain->chain_type, HINIC_API_CMD_STATUS_GET(val, CPLD_ERR), + HINIC_API_CMD_STATUS_GET(val, CHKSUM_ERR), + HINIC_API_CMD_STATUS_GET(val, FSM)); + + sdk_err(dev, "Chain hw current ci: 0x%x\n", + HINIC_API_CMD_STATUS_GET(val, CONS_IDX)); + + addr = HINIC_CSR_API_CMD_CHAIN_PI_ADDR(chain->chain_type); + val = hinic_hwif_read_reg(chain->hwdev->hwif, addr); + sdk_err(dev, "Chain hw current pi: 0x%x\n", val); } /** * chain_busy - check if the chain is still processing last requests * @chain: chain to check - * - * Return 0 - Success, negative - Failure - **/ + * Return: 0 - success, negative - failure + */ static int chain_busy(struct hinic_api_cmd_chain *chain) { - struct hinic_hwif *hwif = chain->hwif; - struct pci_dev *pdev = hwif->pdev; - u32 prod_idx; + void *dev = chain->hwdev->dev_hdl; + struct hinic_api_cmd_cell_ctxt *ctxt; + u64 resp_header; + + ctxt = &chain->cell_ctxt[chain->prod_idx]; switch (chain->chain_type) { + case HINIC_API_CMD_MULTI_READ: + case HINIC_API_CMD_POLL_READ: + resp_header = be64_to_cpu(ctxt->resp->header); + if (ctxt->status && + !HINIC_API_CMD_RESP_HEADER_VALID(resp_header)) { + sdk_err(dev, "Context(0x%x) busy, pi: %d, resp_header: 0x%08x%08x\n", + ctxt->status, chain->prod_idx, + upper_32_bits(resp_header), + lower_32_bits(resp_header)); + dump_api_chain_reg(chain); + return -EBUSY; + } + break; + case HINIC_API_CMD_POLL_WRITE: case HINIC_API_CMD_WRITE_TO_MGMT_CPU: + case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: chain->cons_idx = get_hw_cons_idx(chain); - prod_idx = chain->prod_idx; - /* check for a space for a new command */ - if (chain->cons_idx == MASKED_IDX(chain, prod_idx + 1)) { - dev_err(&pdev->dev, "API CMD chain %d is busy\n", - chain->chain_type); + if (chain->cons_idx == MASKED_IDX(chain, chain->prod_idx + 1)) { + sdk_err(dev, "API CMD chain %d is busy, cons_idx = %d, prod_idx = %d\n", + chain->chain_type, chain->cons_idx, + chain->prod_idx); + dump_api_chain_reg(chain); return -EBUSY; } break; - default: - dev_err(&pdev->dev, "Unknown API CMD Chain type\n"); - break; + sdk_err(dev, "Unknown Chain type %d\n", chain->chain_type); + return -EINVAL; } return 0; } /** - * get_cell_data_size - get the data size of a specific cell type + * get_cell_data_size - get the data size of specific cell type * @type: chain type - * - * Return the data(Desc + Address) size in the cell - **/ -static u8 get_cell_data_size(enum hinic_api_cmd_chain_type type) + * @cmd_size: the command size + * Return: cell_data_size + */ +static u16 get_cell_data_size(enum hinic_api_cmd_chain_type type, u16 cmd_size) { - u8 cell_data_size = 0; + u16 cell_data_size = 0; switch (type) { + case HINIC_API_CMD_POLL_READ: + cell_data_size = ALIGN(API_CMD_CELL_DESC_SIZE + + API_CMD_CELL_WB_ADDR_SIZE + + API_CMD_CELL_DATA_ADDR_SIZE, + API_CHAIN_CELL_ALIGNMENT); + break; + case HINIC_API_CMD_WRITE_TO_MGMT_CPU: + case HINIC_API_CMD_POLL_WRITE: + case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: cell_data_size = ALIGN(API_CMD_CELL_DESC_SIZE + API_CMD_CELL_DATA_ADDR_SIZE, - API_CMD_CELL_ALIGNMENT); + API_CHAIN_CELL_ALIGNMENT); break; default: break; @@ -179,17 +224,17 @@ static u8 get_cell_data_size(enum hinic_api_cmd_chain_type type) /** * prepare_cell_ctrl - prepare the ctrl of the cell for the command - * @cell_ctrl: the control of the cell to set the control value into it - * @data_size: the size of the data in the cell - **/ -static void prepare_cell_ctrl(u64 *cell_ctrl, u16 data_size) + * @cell_ctrl: the control of the cell to set the control into it + * @cell_len: the size of the cell + */ +static void prepare_cell_ctrl(u64 *cell_ctrl, u16 cell_len) { - u8 chksum; u64 ctrl; + u8 chksum; - ctrl = HINIC_API_CMD_CELL_CTRL_SET(SIZE_8BYTES(data_size), DATA_SZ) | - HINIC_API_CMD_CELL_CTRL_SET(RD_DMA_ATTR_DEFAULT, RD_DMA_ATTR) | - HINIC_API_CMD_CELL_CTRL_SET(WR_DMA_ATTR_DEFAULT, WR_DMA_ATTR); + ctrl = HINIC_API_CMD_CELL_CTRL_SET(SIZE_8BYTES(cell_len), CELL_LEN) | + HINIC_API_CMD_CELL_CTRL_SET(0ULL, RD_DMA_ATTR_OFF) | + HINIC_API_CMD_CELL_CTRL_SET(0ULL, WR_DMA_ATTR_OFF); chksum = xor_chksum_set(&ctrl); @@ -202,34 +247,55 @@ static void prepare_cell_ctrl(u64 *cell_ctrl, u16 data_size) /** * prepare_api_cmd - prepare API CMD command * @chain: chain for the command + * @cell: the cell of the command * @dest: destination node on the card that will receive the command * @cmd: command data * @cmd_size: the command size - **/ + */ static void prepare_api_cmd(struct hinic_api_cmd_chain *chain, + struct hinic_api_cmd_cell *cell, enum hinic_node_id dest, - void *cmd, u16 cmd_size) + const void *cmd, u16 cmd_size) { - struct hinic_api_cmd_cell *cell = chain->curr_node; - struct hinic_api_cmd_cell_ctxt *cell_ctxt; - struct hinic_hwif *hwif = chain->hwif; - struct pci_dev *pdev = hwif->pdev; + struct hinic_api_cmd_cell_ctxt *cell_ctxt; + u32 priv; cell_ctxt = &chain->cell_ctxt[chain->prod_idx]; switch (chain->chain_type) { + case HINIC_API_CMD_POLL_READ: + priv = READ_API_CMD_PRIV_DATA(chain->chain_type, + cell_ctxt->saved_prod_idx); + cell->desc = HINIC_API_CMD_DESC_SET(SGL_DATA, API_TYPE) | + HINIC_API_CMD_DESC_SET(API_CMD_READ_TYPE, RD_WR) | + HINIC_API_CMD_DESC_SET(BYPASS, MGMT_BYPASS) | + HINIC_API_CMD_DESC_SET(NOT_TRIGGER, RESP_AEQE_EN) | + HINIC_API_CMD_DESC_SET(priv, PRIV_DATA); + break; + case HINIC_API_CMD_POLL_WRITE: + priv = WRITE_API_CMD_PRIV_DATA(chain->chain_type); + cell->desc = HINIC_API_CMD_DESC_SET(SGL_DATA, API_TYPE) | + HINIC_API_CMD_DESC_SET(API_CMD_WRITE_TYPE, RD_WR) | + HINIC_API_CMD_DESC_SET(BYPASS, MGMT_BYPASS) | + HINIC_API_CMD_DESC_SET(NOT_TRIGGER, RESP_AEQE_EN) | + HINIC_API_CMD_DESC_SET(priv, PRIV_DATA); + break; + case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: case HINIC_API_CMD_WRITE_TO_MGMT_CPU: - cell->desc = HINIC_API_CMD_DESC_SET(SGE_DATA, API_TYPE) | - HINIC_API_CMD_DESC_SET(API_CMD_WRITE, RD_WR) | - HINIC_API_CMD_DESC_SET(NO_BYPASS, MGMT_BYPASS); + priv = WRITE_API_CMD_PRIV_DATA(chain->chain_type); + cell->desc = HINIC_API_CMD_DESC_SET(SGL_DATA, API_TYPE) | + HINIC_API_CMD_DESC_SET(API_CMD_WRITE_TYPE, RD_WR) | + HINIC_API_CMD_DESC_SET(NOT_BYPASS, MGMT_BYPASS) | + HINIC_API_CMD_DESC_SET(TRIGGER, RESP_AEQE_EN) | + HINIC_API_CMD_DESC_SET(priv, PRIV_DATA); break; - default: - dev_err(&pdev->dev, "unknown Chain type\n"); + sdk_err(chain->hwdev->dev_hdl, "Unknown Chain type: %d\n", + chain->chain_type); return; } - cell->desc |= HINIC_API_CMD_DESC_SET(dest, DEST) | + cell->desc |= HINIC_API_CMD_DESC_SET(dest, DEST) | HINIC_API_CMD_DESC_SET(SIZE_4BYTES(cmd_size), SIZE); cell->desc |= HINIC_API_CMD_DESC_SET(xor_chksum_set(&cell->desc), @@ -242,23 +308,25 @@ static void prepare_api_cmd(struct hinic_api_cmd_chain *chain, } /** - * prepare_cell - prepare cell ctrl and cmd in the current cell + * prepare_cell - prepare cell ctrl and cmd in the current producer cell * @chain: chain for the command * @dest: destination node on the card that will receive the command * @cmd: command data * @cmd_size: the command size - * - * Return 0 - Success, negative - Failure - **/ + */ static void prepare_cell(struct hinic_api_cmd_chain *chain, enum hinic_node_id dest, - void *cmd, u16 cmd_size) + void *cmd, u16 cmd_size) { - struct hinic_api_cmd_cell *curr_node = chain->curr_node; - u16 data_size = get_cell_data_size(chain->chain_type); + struct hinic_api_cmd_cell *curr_node; + u16 cell_size; + + curr_node = chain->curr_node; - prepare_cell_ctrl(&curr_node->ctrl, data_size); - prepare_api_cmd(chain, dest, cmd, cmd_size); + cell_size = get_cell_data_size(chain->chain_type, cmd_size); + + prepare_cell_ctrl(&curr_node->ctrl, cell_size); + prepare_api_cmd(chain, curr_node, dest, cmd, cmd_size); } static inline void cmd_chain_prod_idx_inc(struct hinic_api_cmd_chain *chain) @@ -266,130 +334,212 @@ static inline void cmd_chain_prod_idx_inc(struct hinic_api_cmd_chain *chain) chain->prod_idx = MASKED_IDX(chain, chain->prod_idx + 1); } +static void issue_api_cmd(struct hinic_api_cmd_chain *chain) +{ + set_prod_idx(chain); +} + /** - * api_cmd_status_update - update the status in the chain struct + * api_cmd_status_update - update the status of the chain * @chain: chain to update - **/ + */ static void api_cmd_status_update(struct hinic_api_cmd_chain *chain) { - enum hinic_api_cmd_chain_type chain_type; struct hinic_api_cmd_status *wb_status; - struct hinic_hwif *hwif = chain->hwif; - struct pci_dev *pdev = hwif->pdev; - u64 status_header; - u32 status; + enum hinic_api_cmd_chain_type chain_type; + u64 status_header; + u32 buf_desc; wb_status = chain->wb_status; - status_header = be64_to_cpu(wb_status->header); - status = be32_to_cpu(wb_status->status); - if (HINIC_API_CMD_STATUS_GET(status, CHKSUM_ERR)) { - dev_err(&pdev->dev, "API CMD status: Xor check error\n"); + buf_desc = be32_to_cpu(wb_status->buf_desc); + if (HINIC_API_CMD_STATUS_GET(buf_desc, CHKSUM_ERR)) return; - } + status_header = be64_to_cpu(wb_status->header); chain_type = HINIC_API_CMD_STATUS_HEADER_GET(status_header, CHAIN_ID); - if (chain_type >= HINIC_API_CMD_MAX) { - dev_err(&pdev->dev, "unknown API CMD Chain %d\n", chain_type); + if (chain_type >= HINIC_API_CMD_MAX) return; - } - chain->cons_idx = HINIC_API_CMD_STATUS_GET(status, CONS_IDX); + if (chain_type != chain->chain_type) + return; + + chain->cons_idx = HINIC_API_CMD_STATUS_GET(buf_desc, CONS_IDX); } /** - * wait_for_status_poll - wait for write to api cmd command to complete + * wait_for_status_poll - wait for write to mgmt command to complete * @chain: the chain of the command - * - * Return 0 - Success, negative - Failure - **/ + * Return: 0 - success, negative - failure + */ static int wait_for_status_poll(struct hinic_api_cmd_chain *chain) { int err = -ETIMEDOUT; - unsigned long end; + u32 cnt = 0; - end = jiffies + msecs_to_jiffies(API_CMD_TIMEOUT); - do { + while (cnt < API_CMD_STATUS_TIMEOUT && + chain->hwdev->chip_present_flag) { api_cmd_status_update(chain); - /* wait for CI to be updated - sign for completion */ + /* SYNC API CMD cmd should start after prev cmd finished */ if (chain->cons_idx == chain->prod_idx) { err = 0; break; } - msleep(20); - } while (time_before(jiffies, end)); + usleep_range(50, 100); + cnt++; + } return err; } +static void copy_resp_data(struct hinic_api_cmd_cell_ctxt *ctxt, void *ack, + u16 ack_size) +{ + struct hinic_api_cmd_resp_fmt *resp = ctxt->resp; + + memcpy(ack, &resp->resp_data, ack_size); + ctxt->status = 0; +} + +/** + * prepare_cell - polling for respense data of the read api-command + * @ctxt: pointer to api cmd cell ctxt + * + * Return: 0 - success, negative - failure + */ +static int wait_for_resp_polling(struct hinic_api_cmd_cell_ctxt *ctxt) +{ + u64 resp_header; + int ret = -ETIMEDOUT; + u32 cnt = 0; + + while (cnt < POLLING_COMPLETION_TIMEOUT_DEFAULT) { + resp_header = be64_to_cpu(ctxt->resp->header); + + rmb(); /* read the latest header */ + + if (HINIC_API_CMD_RESP_HEADER_VALID(resp_header)) { + ret = 0; + break; + } + usleep_range(100, 1000); + cnt++; + } + + if (ret) + pr_err("Wait for api chain response timeout\n"); + + return ret; +} + /** * wait_for_api_cmd_completion - wait for command to complete * @chain: chain for the command - * - * Return 0 - Success, negative - Failure - **/ -static int wait_for_api_cmd_completion(struct hinic_api_cmd_chain *chain) + * @ctxt: pointer to api cmd cell ctxt + * @ack: pointer to ack message + * @ack_size: the size of ack message + * Return: 0 - success, negative - failure + */ +static int wait_for_api_cmd_completion(struct hinic_api_cmd_chain *chain, + struct hinic_api_cmd_cell_ctxt *ctxt, + void *ack, u16 ack_size) { - struct hinic_hwif *hwif = chain->hwif; - struct pci_dev *pdev = hwif->pdev; - int err; + void *dev = chain->hwdev->dev_hdl; + int err = 0; switch (chain->chain_type) { + case HINIC_API_CMD_POLL_READ: + err = wait_for_resp_polling(ctxt); + if (!err) + copy_resp_data(ctxt, ack, ack_size); + break; + case HINIC_API_CMD_POLL_WRITE: case HINIC_API_CMD_WRITE_TO_MGMT_CPU: err = wait_for_status_poll(chain); if (err) { - dev_err(&pdev->dev, "API CMD Poll status timeout\n"); + sdk_err(dev, "API CMD Poll status timeout, chain type: %d\n", + chain->chain_type); break; } break; - + case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + /* No need to wait */ + break; default: - dev_err(&pdev->dev, "unknown API CMD Chain type\n"); + sdk_err(dev, "Unknown API CMD Chain type: %d\n", + chain->chain_type); err = -EINVAL; break; } + if (err) + dump_api_chain_reg(chain); + return err; } +static inline void update_api_cmd_ctxt(struct hinic_api_cmd_chain *chain, + struct hinic_api_cmd_cell_ctxt *ctxt) +{ + ctxt->status = 1; + ctxt->saved_prod_idx = chain->prod_idx; + if (ctxt->resp) { + ctxt->resp->header = 0; + + /* make sure "header" was cleared */ + wmb(); + } +} + /** * api_cmd - API CMD command * @chain: chain for the command * @dest: destination node on the card that will receive the command * @cmd: command data - * @size: the command size - * - * Return 0 - Success, negative - Failure - **/ + * @cmd_size: the command size + * @ack: the buffer for ack + * @ack_size: the size of ack + * Return: 0 - success, negative - failure + */ static int api_cmd(struct hinic_api_cmd_chain *chain, - enum hinic_node_id dest, u8 *cmd, u16 cmd_size) + enum hinic_node_id dest, + void *cmd, u16 cmd_size, void *ack, u16 ack_size) { struct hinic_api_cmd_cell_ctxt *ctxt; - int err; - down(&chain->sem); + if (chain->chain_type == HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) + spin_lock(&chain->async_lock); + else + down(&chain->sem); + ctxt = &chain->cell_ctxt[chain->prod_idx]; if (chain_busy(chain)) { - up(&chain->sem); + if (chain->chain_type == HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) + spin_unlock(&chain->async_lock); + else + up(&chain->sem); return -EBUSY; } + update_api_cmd_ctxt(chain, ctxt); prepare_cell(chain, dest, cmd, cmd_size); - cmd_chain_prod_idx_inc(chain); - wmb(); /* inc pi before issue the command */ + cmd_chain_prod_idx_inc(chain); - set_prod_idx(chain); /* issue the command */ + wmb(); /* issue the command */ - ctxt = &chain->cell_ctxt[chain->prod_idx]; + issue_api_cmd(chain); - chain->curr_node = ctxt->cell_vaddr; + /* incremented prod idx, update ctxt */ - err = wait_for_api_cmd_completion(chain); + chain->curr_node = chain->cell_ctxt[chain->prod_idx].cell_vaddr; + if (chain->chain_type == HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) + spin_unlock(&chain->async_lock); + else + up(&chain->sem); - up(&chain->sem); - return err; + return wait_for_api_cmd_completion(chain, ctxt, ack, ack_size); } /** @@ -398,34 +548,35 @@ static int api_cmd(struct hinic_api_cmd_chain *chain, * @dest: destination node on the card that will receive the command * @cmd: command data * @size: the command size - * - * Return 0 - Success, negative - Failure - **/ + * Return: 0 - success, negative - failure + */ int hinic_api_cmd_write(struct hinic_api_cmd_chain *chain, - enum hinic_node_id dest, u8 *cmd, u16 size) + enum hinic_node_id dest, void *cmd, u16 size) { /* Verify the chain type */ - if (chain->chain_type == HINIC_API_CMD_WRITE_TO_MGMT_CPU) - return api_cmd(chain, dest, cmd, size); + return api_cmd(chain, dest, cmd, size, NULL, 0); +} - return -EINVAL; +int hinic_api_cmd_read(struct hinic_api_cmd_chain *chain, + enum hinic_node_id dest, + void *cmd, u16 size, void *ack, u16 ack_size) +{ + return api_cmd(chain, dest, cmd, size, ack, ack_size); } /** * api_cmd_hw_restart - restart the chain in the HW - * @chain: the API CMD specific chain to restart - * - * Return 0 - Success, negative - Failure - **/ -static int api_cmd_hw_restart(struct hinic_api_cmd_chain *chain) + * @cmd_chain: the API CMD specific chain to restart + */ +static int api_cmd_hw_restart(struct hinic_api_cmd_chain *cmd_chain) { - struct hinic_hwif *hwif = chain->hwif; - int err = -ETIMEDOUT; - unsigned long end; + struct hinic_hwif *hwif = cmd_chain->hwdev->hwif; u32 reg_addr, val; + int err; + u32 cnt = 0; /* Read Modify Write */ - reg_addr = HINIC_CSR_API_CMD_CHAIN_REQ_ADDR(chain->chain_type); + reg_addr = HINIC_CSR_API_CMD_CHAIN_REQ_ADDR(cmd_chain->chain_type); val = hinic_hwif_read_reg(hwif, reg_addr); val = HINIC_API_CMD_CHAIN_REQ_CLEAR(val, RESTART); @@ -433,8 +584,8 @@ static int api_cmd_hw_restart(struct hinic_api_cmd_chain *chain) hinic_hwif_write_reg(hwif, reg_addr, val); - end = jiffies + msecs_to_jiffies(API_CMD_TIMEOUT); - do { + err = -ETIMEDOUT; + while (cnt < API_CMD_TIMEOUT) { val = hinic_hwif_read_reg(hwif, reg_addr); if (!HINIC_API_CMD_CHAIN_REQ_GET(val, RESTART)) { @@ -442,8 +593,9 @@ static int api_cmd_hw_restart(struct hinic_api_cmd_chain *chain) break; } - msleep(20); - } while (time_before(jiffies, end)); + usleep_range(900, 1000); + cnt++; + } return err; } @@ -451,40 +603,36 @@ static int api_cmd_hw_restart(struct hinic_api_cmd_chain *chain) /** * api_cmd_ctrl_init - set the control register of a chain * @chain: the API CMD specific chain to set control register for - **/ + */ static void api_cmd_ctrl_init(struct hinic_api_cmd_chain *chain) { - struct hinic_hwif *hwif = chain->hwif; - u32 addr, ctrl; - u16 cell_size; + struct hinic_hwif *hwif = chain->hwdev->hwif; + u32 reg_addr, ctrl; + u32 size; /* Read Modify Write */ - addr = HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type); + reg_addr = HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type); - cell_size = API_CMD_CELL_SIZE_VAL(chain->cell_size); + size = (u32)ilog2(chain->cell_size >> API_CMD_CHAIN_CELL_SIZE_SHIFT); - ctrl = hinic_hwif_read_reg(hwif, addr); + ctrl = hinic_hwif_read_reg(hwif, reg_addr); - ctrl = HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, RESTART_WB_STAT) & - HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_ERR) & - HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) & - HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_CHK_EN) & + ctrl = HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) & HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE); - ctrl |= HINIC_API_CMD_CHAIN_CTRL_SET(1, XOR_ERR) | - HINIC_API_CMD_CHAIN_CTRL_SET(XOR_CHK_ALL, XOR_CHK_EN) | - HINIC_API_CMD_CHAIN_CTRL_SET(cell_size, CELL_SIZE); + ctrl |= HINIC_API_CMD_CHAIN_CTRL_SET(0, AEQE_EN) | + HINIC_API_CMD_CHAIN_CTRL_SET(size, CELL_SIZE); - hinic_hwif_write_reg(hwif, addr, ctrl); + hinic_hwif_write_reg(hwif, reg_addr, ctrl); } /** * api_cmd_set_status_addr - set the status address of a chain in the HW - * @chain: the API CMD specific chain to set in HW status address for - **/ + * @chain: the API CMD specific chain to set status address for + */ static void api_cmd_set_status_addr(struct hinic_api_cmd_chain *chain) { - struct hinic_hwif *hwif = chain->hwif; + struct hinic_hwif *hwif = chain->hwdev->hwif; u32 addr, val; addr = HINIC_CSR_API_CMD_STATUS_HI_ADDR(chain->chain_type); @@ -498,11 +646,11 @@ static void api_cmd_set_status_addr(struct hinic_api_cmd_chain *chain) /** * api_cmd_set_num_cells - set the number cells of a chain in the HW - * @chain: the API CMD specific chain to set in HW the number of cells for - **/ + * @chain: the API CMD specific chain to set the number of cells for + */ static void api_cmd_set_num_cells(struct hinic_api_cmd_chain *chain) { - struct hinic_hwif *hwif = chain->hwif; + struct hinic_hwif *hwif = chain->hwdev->hwif; u32 addr, val; addr = HINIC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(chain->chain_type); @@ -511,12 +659,12 @@ static void api_cmd_set_num_cells(struct hinic_api_cmd_chain *chain) } /** - * api_cmd_head_init - set the head of a chain in the HW - * @chain: the API CMD specific chain to set in HW the head for - **/ + * api_cmd_head_init - set the head cell of a chain in the HW + * @chain: the API CMD specific chain to set the head for + */ static void api_cmd_head_init(struct hinic_api_cmd_chain *chain) { - struct hinic_hwif *hwif = chain->hwif; + struct hinic_hwif *hwif = chain->hwdev->hwif; u32 addr, val; addr = HINIC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(chain->chain_type); @@ -528,22 +676,54 @@ static void api_cmd_head_init(struct hinic_api_cmd_chain *chain) hinic_hwif_write_reg(hwif, addr, val); } +/** + * wait_for_ready_chain - wait for the chain to be ready + * @chain: the API CMD specific chain to wait for + * Return: 0 - success, negative - failure + */ +static int wait_for_ready_chain(struct hinic_api_cmd_chain *chain) +{ + struct hinic_hwif *hwif = chain->hwdev->hwif; + u32 addr, val; + u32 hw_cons_idx; + u32 cnt = 0; + int err; + + addr = HINIC_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type); + err = -ETIMEDOUT; + while (cnt < API_CMD_TIMEOUT) { + val = hinic_hwif_read_reg(hwif, addr); + hw_cons_idx = HINIC_API_CMD_STATUS_GET(val, CONS_IDX); + + /* wait for HW cons idx to be updated */ + if (hw_cons_idx == chain->cons_idx) { + err = 0; + break; + } + + usleep_range(900, 1000); + cnt++; + } + + return err; +} + /** * api_cmd_chain_hw_clean - clean the HW * @chain: the API CMD specific chain - **/ + */ static void api_cmd_chain_hw_clean(struct hinic_api_cmd_chain *chain) { - struct hinic_hwif *hwif = chain->hwif; + struct hinic_hwif *hwif = chain->hwdev->hwif; u32 addr, ctrl; addr = HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type); ctrl = hinic_hwif_read_reg(hwif, addr); - ctrl = HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, RESTART_WB_STAT) & - HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_ERR) & - HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) & - HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_CHK_EN) & + ctrl = HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, RESTART_EN) & + HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_ERR) & + HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) & + HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_CHK_EN) & HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE); hinic_hwif_write_reg(hwif, addr, ctrl); @@ -552,47 +732,24 @@ static void api_cmd_chain_hw_clean(struct hinic_api_cmd_chain *chain) /** * api_cmd_chain_hw_init - initialize the chain in the HW * @chain: the API CMD specific chain to initialize in HW - * - * Return 0 - Success, negative - Failure - **/ + * Return: 0 - success, negative - failure + */ static int api_cmd_chain_hw_init(struct hinic_api_cmd_chain *chain) { - struct hinic_hwif *hwif = chain->hwif; - struct pci_dev *pdev = hwif->pdev; - int err; - api_cmd_chain_hw_clean(chain); api_cmd_set_status_addr(chain); - err = api_cmd_hw_restart(chain); - if (err) { - dev_err(&pdev->dev, "Failed to restart API CMD HW\n"); - return err; + if (api_cmd_hw_restart(chain)) { + sdk_err(chain->hwdev->dev_hdl, "Failed to restart api_cmd_hw\n"); + return -EBUSY; } api_cmd_ctrl_init(chain); api_cmd_set_num_cells(chain); api_cmd_head_init(chain); - return 0; -} -/** - * free_cmd_buf - free the dma buffer of API CMD command - * @chain: the API CMD specific chain of the cmd - * @cell_idx: the cell index of the cmd - **/ -static void free_cmd_buf(struct hinic_api_cmd_chain *chain, int cell_idx) -{ - struct hinic_api_cmd_cell_ctxt *cell_ctxt; - struct hinic_hwif *hwif = chain->hwif; - struct pci_dev *pdev = hwif->pdev; - - cell_ctxt = &chain->cell_ctxt[cell_idx]; - - dma_free_coherent(&pdev->dev, API_CMD_BUF_SIZE, - cell_ctxt->api_cmd_vaddr, - cell_ctxt->api_cmd_paddr); + return wait_for_ready_chain(chain); } /** @@ -600,41 +757,40 @@ static void free_cmd_buf(struct hinic_api_cmd_chain *chain, int cell_idx) * @chain: the API CMD specific chain for the cmd * @cell: the cell in the HW for the cmd * @cell_idx: the index of the cell - * - * Return 0 - Success, negative - Failure - **/ + * Return: 0 - success, negative - failure + */ static int alloc_cmd_buf(struct hinic_api_cmd_chain *chain, - struct hinic_api_cmd_cell *cell, int cell_idx) + struct hinic_api_cmd_cell *cell, u32 cell_idx) { struct hinic_api_cmd_cell_ctxt *cell_ctxt; - struct hinic_hwif *hwif = chain->hwif; - struct pci_dev *pdev = hwif->pdev; - dma_addr_t cmd_paddr; - u8 *cmd_vaddr; + void *dev = chain->hwdev->dev_hdl; + void *buf_vaddr; + u64 buf_paddr; int err = 0; - cmd_vaddr = dma_zalloc_coherent(&pdev->dev, API_CMD_BUF_SIZE, - &cmd_paddr, GFP_KERNEL); - if (!cmd_vaddr) { - dev_err(&pdev->dev, "Failed to allocate API CMD DMA memory\n"); - return -ENOMEM; - } + buf_vaddr = (u8 *)((u64)chain->buf_vaddr_base + + chain->buf_size_align * cell_idx); + buf_paddr = chain->buf_paddr_base + + chain->buf_size_align * cell_idx; cell_ctxt = &chain->cell_ctxt[cell_idx]; - cell_ctxt->api_cmd_vaddr = cmd_vaddr; - cell_ctxt->api_cmd_paddr = cmd_paddr; + cell_ctxt->api_cmd_vaddr = buf_vaddr; /* set the cmd DMA address in the cell */ switch (chain->chain_type) { + case HINIC_API_CMD_POLL_READ: + cell->read.hw_cmd_paddr = cpu_to_be64(buf_paddr); + break; case HINIC_API_CMD_WRITE_TO_MGMT_CPU: + case HINIC_API_CMD_POLL_WRITE: + case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: /* The data in the HW should be in Big Endian Format */ - cell->write.hw_cmd_paddr = cpu_to_be64(cmd_paddr); + cell->write.hw_cmd_paddr = cpu_to_be64(buf_paddr); break; - default: - dev_err(&pdev->dev, "Unsupported API CMD chain type\n"); - free_cmd_buf(chain, cell_idx); + sdk_err(dev, "Unknown API CMD Chain type: %d\n", + chain->chain_type); err = -EINVAL; break; } @@ -642,337 +798,370 @@ static int alloc_cmd_buf(struct hinic_api_cmd_chain *chain, return err; } -/** - * api_cmd_create_cell - create API CMD cell for specific chain - * @chain: the API CMD specific chain to create its cell - * @cell_idx: the index of the cell to create - * @pre_node: previous cell - * @node_vaddr: the returned virt addr of the cell - * - * Return 0 - Success, negative - Failure - **/ -static int api_cmd_create_cell(struct hinic_api_cmd_chain *chain, - int cell_idx, - struct hinic_api_cmd_cell *pre_node, - struct hinic_api_cmd_cell **node_vaddr) +static void alloc_resp_buf(struct hinic_api_cmd_chain *chain, + struct hinic_api_cmd_cell *cell, u32 cell_idx) { struct hinic_api_cmd_cell_ctxt *cell_ctxt; - struct hinic_hwif *hwif = chain->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_api_cmd_cell *node; - dma_addr_t node_paddr; - int err; - - node = dma_zalloc_coherent(&pdev->dev, chain->cell_size, - &node_paddr, GFP_KERNEL); - if (!node) { - dev_err(&pdev->dev, "Failed to allocate dma API CMD cell\n"); - return -ENOMEM; - } + void *resp_vaddr; + u64 resp_paddr; - node->read.hw_wb_resp_paddr = 0; + resp_vaddr = (u8 *)((u64)chain->rsp_vaddr_base + + chain->rsp_size_align * cell_idx); + resp_paddr = chain->rsp_paddr_base + + chain->rsp_size_align * cell_idx; cell_ctxt = &chain->cell_ctxt[cell_idx]; - cell_ctxt->cell_vaddr = node; - cell_ctxt->cell_paddr = node_paddr; - if (!pre_node) { - chain->head_cell_paddr = node_paddr; - chain->head_node = node; - } else { - /* The data in the HW should be in Big Endian Format */ - pre_node->next_cell_paddr = cpu_to_be64(node_paddr); - } + cell_ctxt->resp = resp_vaddr; + cell->read.hw_wb_resp_paddr = cpu_to_be64(resp_paddr); +} + +static int hinic_alloc_api_cmd_cell_buf(struct hinic_api_cmd_chain *chain, + u32 cell_idx, + struct hinic_api_cmd_cell *node) +{ + void *dev = chain->hwdev->dev_hdl; + int err; + + /* For read chain, we should allocate buffer for the response data */ + if (chain->chain_type == HINIC_API_CMD_MULTI_READ || + chain->chain_type == HINIC_API_CMD_POLL_READ) + alloc_resp_buf(chain, node, cell_idx); switch (chain->chain_type) { case HINIC_API_CMD_WRITE_TO_MGMT_CPU: + case HINIC_API_CMD_POLL_WRITE: + case HINIC_API_CMD_POLL_READ: + case HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: err = alloc_cmd_buf(chain, node, cell_idx); if (err) { - dev_err(&pdev->dev, "Failed to allocate cmd buffer\n"); - goto err_alloc_cmd_buf; + sdk_err(dev, "Failed to allocate cmd buffer\n"); + goto alloc_cmd_buf_err; } break; - + /* For api command write and api command read, the data section + * is directly inserted in the cell, so no need to allocate. + */ + case HINIC_API_CMD_MULTI_READ: + chain->cell_ctxt[cell_idx].api_cmd_vaddr = + &node->read.hw_cmd_paddr; + break; default: - dev_err(&pdev->dev, "Unsupported API CMD chain type\n"); + sdk_err(dev, "Unsupported API CMD chain type\n"); err = -EINVAL; - goto err_alloc_cmd_buf; + goto alloc_cmd_buf_err; } - *node_vaddr = node; return 0; -err_alloc_cmd_buf: - dma_free_coherent(&pdev->dev, chain->cell_size, node, node_paddr); +alloc_cmd_buf_err: + return err; } /** - * api_cmd_destroy_cell - destroy API CMD cell of specific chain - * @chain: the API CMD specific chain to destroy its cell - * @cell_idx: the cell to destroy - **/ -static void api_cmd_destroy_cell(struct hinic_api_cmd_chain *chain, - int cell_idx) + * api_cmd_create_cell - create API CMD cell of specific chain + * @chain: the API CMD specific chain to create its cell + * @cell_idx: the cell index to create + * @pre_node: previous cell + * @node_vaddr: the virt addr of the cell + * Return: 0 - success, negative - failure + */ +static int api_cmd_create_cell(struct hinic_api_cmd_chain *chain, u32 cell_idx, + struct hinic_api_cmd_cell *pre_node, + struct hinic_api_cmd_cell **node_vaddr) { struct hinic_api_cmd_cell_ctxt *cell_ctxt; - struct hinic_hwif *hwif = chain->hwif; - struct pci_dev *pdev = hwif->pdev; struct hinic_api_cmd_cell *node; - dma_addr_t node_paddr; - size_t node_size; + void *cell_vaddr; + u64 cell_paddr; + int err; - cell_ctxt = &chain->cell_ctxt[cell_idx]; + cell_vaddr = (void *)((u64)chain->cell_vaddr_base + + chain->cell_size_align * cell_idx); + cell_paddr = chain->cell_paddr_base + + chain->cell_size_align * cell_idx; + cell_ctxt = &chain->cell_ctxt[cell_idx]; + cell_ctxt->cell_vaddr = cell_vaddr; node = cell_ctxt->cell_vaddr; - node_paddr = cell_ctxt->cell_paddr; - node_size = chain->cell_size; - if (cell_ctxt->api_cmd_vaddr) { - switch (chain->chain_type) { - case HINIC_API_CMD_WRITE_TO_MGMT_CPU: - free_cmd_buf(chain, cell_idx); - break; - default: - dev_err(&pdev->dev, "Unsupported API CMD chain type\n"); - break; - } - - dma_free_coherent(&pdev->dev, node_size, node, - node_paddr); + if (!pre_node) { + chain->head_node = cell_vaddr; + chain->head_cell_paddr = cell_paddr; + } else { + /* The data in the HW should be in Big Endian Format */ + pre_node->next_cell_paddr = cpu_to_be64(cell_paddr); } -} -/** - * api_cmd_destroy_cells - destroy API CMD cells of specific chain - * @chain: the API CMD specific chain to destroy its cells - * @num_cells: number of cells to destroy - **/ -static void api_cmd_destroy_cells(struct hinic_api_cmd_chain *chain, - int num_cells) -{ - int cell_idx; + /* Driver software should make sure that there is an empty API + * command cell at the end the chain + */ + node->next_cell_paddr = 0; + + err = hinic_alloc_api_cmd_cell_buf(chain, cell_idx, node); + if (err) + return err; + + *node_vaddr = node; - for (cell_idx = 0; cell_idx < num_cells; cell_idx++) - api_cmd_destroy_cell(chain, cell_idx); + return 0; } /** * api_cmd_create_cells - create API CMD cells for specific chain * @chain: the API CMD specific chain - * - * Return 0 - Success, negative - Failure - **/ + * Return: 0 - success, negative - failure + */ static int api_cmd_create_cells(struct hinic_api_cmd_chain *chain) { struct hinic_api_cmd_cell *node = NULL, *pre_node = NULL; - struct hinic_hwif *hwif = chain->hwif; - struct pci_dev *pdev = hwif->pdev; - int err, cell_idx; + void *dev = chain->hwdev->dev_hdl; + u32 cell_idx; + int err; for (cell_idx = 0; cell_idx < chain->num_cells; cell_idx++) { err = api_cmd_create_cell(chain, cell_idx, pre_node, &node); if (err) { - dev_err(&pdev->dev, "Failed to create API CMD cell\n"); - goto err_create_cell; + sdk_err(dev, "Failed to create API CMD cell\n"); + return err; } pre_node = node; } + if (!node) + return -EFAULT; + /* set the Final node to point on the start */ node->next_cell_paddr = cpu_to_be64(chain->head_cell_paddr); /* set the current node to be the head */ chain->curr_node = chain->head_node; return 0; - -err_create_cell: - api_cmd_destroy_cells(chain, cell_idx); - return err; } /** * api_chain_init - initialize API CMD specific chain * @chain: the API CMD specific chain to initialize * @attr: attributes to set in the chain - * - * Return 0 - Success, negative - Failure - **/ + * Return: 0 - success, negative - failure + */ static int api_chain_init(struct hinic_api_cmd_chain *chain, struct hinic_api_cmd_chain_attr *attr) { - struct hinic_hwif *hwif = attr->hwif; - struct pci_dev *pdev = hwif->pdev; + void *dev = chain->hwdev->dev_hdl; size_t cell_ctxt_size; + size_t cells_buf_size; + int err; - chain->hwif = hwif; chain->chain_type = attr->chain_type; chain->num_cells = attr->num_cells; chain->cell_size = attr->cell_size; + chain->rsp_size = attr->rsp_size; chain->prod_idx = 0; chain->cons_idx = 0; - sema_init(&chain->sem, 1); + if (chain->chain_type == HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) + spin_lock_init(&chain->async_lock); + else + sema_init(&chain->sem, 1); cell_ctxt_size = chain->num_cells * sizeof(*chain->cell_ctxt); - chain->cell_ctxt = devm_kzalloc(&pdev->dev, cell_ctxt_size, GFP_KERNEL); - if (!chain->cell_ctxt) + if (!cell_ctxt_size) { + sdk_err(dev, "Api chain cell size cannot be zero\n"); + return -EINVAL; + } + + chain->cell_ctxt = kzalloc(cell_ctxt_size, GFP_KERNEL); + if (!chain->cell_ctxt) { + sdk_err(dev, "Failed to allocate cell contexts for a chain\n"); return -ENOMEM; + } - chain->wb_status = dma_zalloc_coherent(&pdev->dev, + chain->wb_status = dma_zalloc_coherent(dev, sizeof(*chain->wb_status), &chain->wb_status_paddr, GFP_KERNEL); if (!chain->wb_status) { - dev_err(&pdev->dev, "Failed to allocate DMA wb status\n"); - return -ENOMEM; + sdk_err(dev, "Failed to allocate DMA wb status\n"); + err = -ENOMEM; + goto alloc_wb_status_err; + } + + chain->cell_size_align = ALIGN((u64)chain->cell_size, + API_CMD_NODE_ALIGN_SIZE); + chain->rsp_size_align = ALIGN((u64)chain->rsp_size, + API_CHAIN_RESP_ALIGNMENT); + chain->buf_size_align = ALIGN(API_CMD_BUF_SIZE, API_PAYLOAD_ALIGN_SIZE); + + cells_buf_size = (chain->cell_size_align + chain->rsp_size_align + + chain->buf_size_align) * chain->num_cells; + + err = hinic_dma_zalloc_coherent_align(dev, cells_buf_size, + API_CMD_NODE_ALIGN_SIZE, + GFP_KERNEL, + &chain->cells_addr); + if (err) { + sdk_err(dev, "Failed to allocate API CMD cells buffer\n"); + goto alloc_cells_buf_err; } + chain->cell_vaddr_base = chain->cells_addr.align_vaddr; + chain->cell_paddr_base = chain->cells_addr.align_paddr; + + chain->rsp_vaddr_base = (u8 *)((u64)chain->cell_vaddr_base + + chain->cell_size_align * chain->num_cells); + chain->rsp_paddr_base = chain->cell_paddr_base + + chain->cell_size_align * chain->num_cells; + + chain->buf_vaddr_base = (u8 *)((u64)chain->rsp_vaddr_base + + chain->rsp_size_align * chain->num_cells); + chain->buf_paddr_base = chain->rsp_paddr_base + + chain->rsp_size_align * chain->num_cells; + return 0; + +alloc_cells_buf_err: + dma_free_coherent(dev, sizeof(*chain->wb_status), + chain->wb_status, chain->wb_status_paddr); + +alloc_wb_status_err: + kfree(chain->cell_ctxt); + + return err; } /** * api_chain_free - free API CMD specific chain * @chain: the API CMD specific chain to free - **/ + */ static void api_chain_free(struct hinic_api_cmd_chain *chain) { - struct hinic_hwif *hwif = chain->hwif; - struct pci_dev *pdev = hwif->pdev; + void *dev = chain->hwdev->dev_hdl; + + hinic_dma_free_coherent_align(dev, &chain->cells_addr); - dma_free_coherent(&pdev->dev, sizeof(*chain->wb_status), + dma_free_coherent(dev, sizeof(*chain->wb_status), chain->wb_status, chain->wb_status_paddr); + kfree(chain->cell_ctxt); } /** * api_cmd_create_chain - create API CMD specific chain - * @attr: attributes to set the chain - * - * Return the created chain - **/ -static struct hinic_api_cmd_chain * - api_cmd_create_chain(struct hinic_api_cmd_chain_attr *attr) + * @chain: the API CMD specific chain to create + * @attr: attributes to set in the chain + * Return: 0 - success, negative - failure + */ +static int api_cmd_create_chain(struct hinic_api_cmd_chain **cmd_chain, + struct hinic_api_cmd_chain_attr *attr) { - struct hinic_hwif *hwif = attr->hwif; - struct pci_dev *pdev = hwif->pdev; + struct hinic_hwdev *hwdev = attr->hwdev; struct hinic_api_cmd_chain *chain; int err; if (attr->num_cells & (attr->num_cells - 1)) { - dev_err(&pdev->dev, "Invalid number of cells, must be power of 2\n"); - return ERR_PTR(-EINVAL); + sdk_err(hwdev->dev_hdl, "Invalid number of cells, must be power of 2\n"); + return -EINVAL; } - chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); + chain = kzalloc(sizeof(*chain), GFP_KERNEL); if (!chain) - return ERR_PTR(-ENOMEM); + return -ENOMEM; + + chain->hwdev = hwdev; err = api_chain_init(chain, attr); if (err) { - dev_err(&pdev->dev, "Failed to initialize chain\n"); - return ERR_PTR(err); + sdk_err(hwdev->dev_hdl, "Failed to initialize chain\n"); + goto chain_init_err; } err = api_cmd_create_cells(chain); if (err) { - dev_err(&pdev->dev, "Failed to create cells for API CMD chain\n"); - goto err_create_cells; + sdk_err(hwdev->dev_hdl, "Failed to create cells for API CMD chain\n"); + goto create_cells_err; } err = api_cmd_chain_hw_init(chain); if (err) { - dev_err(&pdev->dev, "Failed to initialize chain HW\n"); - goto err_chain_hw_init; + sdk_err(hwdev->dev_hdl, "Failed to initialize chain HW\n"); + goto chain_hw_init_err; } - return chain; - -err_chain_hw_init: - api_cmd_destroy_cells(chain, chain->num_cells); + *cmd_chain = chain; + return 0; -err_create_cells: +chain_hw_init_err: +create_cells_err: api_chain_free(chain); - return ERR_PTR(err); + +chain_init_err: + kfree(chain); + return err; } /** * api_cmd_destroy_chain - destroy API CMD specific chain * @chain: the API CMD specific chain to destroy - **/ + */ static void api_cmd_destroy_chain(struct hinic_api_cmd_chain *chain) { - api_cmd_chain_hw_clean(chain); - api_cmd_destroy_cells(chain, chain->num_cells); api_chain_free(chain); + kfree(chain); } /** * hinic_api_cmd_init - Initialize all the API CMD chains - * @chain: the API CMD chains that are initialized - * @hwif: the hardware interface of a pci function device - * - * Return 0 - Success, negative - Failure - **/ -int hinic_api_cmd_init(struct hinic_api_cmd_chain **chain, - struct hinic_hwif *hwif) + * @hwdev: the pointer to hw device + * @chain: the API CMD chains that will be initialized + * Return: 0 - success, negative - failure + */ +int hinic_api_cmd_init(struct hinic_hwdev *hwdev, + struct hinic_api_cmd_chain **chain) { - enum hinic_api_cmd_chain_type type, chain_type; + void *dev = hwdev->dev_hdl; struct hinic_api_cmd_chain_attr attr; - struct pci_dev *pdev = hwif->pdev; - size_t hw_cell_sz; + enum hinic_api_cmd_chain_type chain_type, i; int err; - hw_cell_sz = sizeof(struct hinic_api_cmd_cell); - - attr.hwif = hwif; + attr.hwdev = hwdev; attr.num_cells = API_CHAIN_NUM_CELLS; - attr.cell_size = API_CMD_CELL_SIZE(hw_cell_sz); + attr.cell_size = API_CHAIN_CELL_SIZE; + attr.rsp_size = API_CHAIN_RSP_DATA_SIZE; chain_type = HINIC_API_CMD_WRITE_TO_MGMT_CPU; - for ( ; chain_type < HINIC_API_CMD_MAX; chain_type++) { + for (; chain_type < HINIC_API_CMD_MAX; chain_type++) { attr.chain_type = chain_type; - if (chain_type != HINIC_API_CMD_WRITE_TO_MGMT_CPU) - continue; - - chain[chain_type] = api_cmd_create_chain(&attr); - if (IS_ERR(chain[chain_type])) { - dev_err(&pdev->dev, "Failed to create chain %d\n", - chain_type); - err = PTR_ERR(chain[chain_type]); - goto err_create_chain; + err = api_cmd_create_chain(&chain[chain_type], &attr); + if (err) { + sdk_err(dev, "Failed to create chain %d\n", chain_type); + goto create_chain_err; } } return 0; -err_create_chain: - type = HINIC_API_CMD_WRITE_TO_MGMT_CPU; - for ( ; type < chain_type; type++) { - if (type != HINIC_API_CMD_WRITE_TO_MGMT_CPU) - continue; - - api_cmd_destroy_chain(chain[type]); - } +create_chain_err: + i = HINIC_API_CMD_WRITE_TO_MGMT_CPU; + for (; i < chain_type; i++) + api_cmd_destroy_chain(chain[i]); return err; } /** * hinic_api_cmd_free - free the API CMD chains - * @chain: the API CMD chains that are freed - **/ + * @chain: the API CMD chains that will be freed + */ void hinic_api_cmd_free(struct hinic_api_cmd_chain **chain) { enum hinic_api_cmd_chain_type chain_type; chain_type = HINIC_API_CMD_WRITE_TO_MGMT_CPU; - for ( ; chain_type < HINIC_API_CMD_MAX; chain_type++) { - if (chain_type != HINIC_API_CMD_WRITE_TO_MGMT_CPU) - continue; + for (; chain_type < HINIC_API_CMD_MAX; chain_type++) api_cmd_destroy_chain(chain[chain_type]); - } } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_api_cmd.h b/drivers/net/ethernet/huawei/hinic/hinic_api_cmd.h new file mode 100644 index 0000000000000000000000000000000000000000..9c77ac181d9133ef6039a61129c94c1571048c75 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_api_cmd.h @@ -0,0 +1,298 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_API_CMD_H_ +#define HINIC_API_CMD_H_ + +#define HINIC_API_CMD_CELL_CTRL_CELL_LEN_SHIFT 0 +#define HINIC_API_CMD_CELL_CTRL_RD_DMA_ATTR_OFF_SHIFT 16 +#define HINIC_API_CMD_CELL_CTRL_WR_DMA_ATTR_OFF_SHIFT 24 +#define HINIC_API_CMD_CELL_CTRL_XOR_CHKSUM_SHIFT 56 + +#define HINIC_API_CMD_CELL_CTRL_CELL_LEN_MASK 0x3FU +#define HINIC_API_CMD_CELL_CTRL_RD_DMA_ATTR_OFF_MASK 0x3FU +#define HINIC_API_CMD_CELL_CTRL_WR_DMA_ATTR_OFF_MASK 0x3FU +#define HINIC_API_CMD_CELL_CTRL_XOR_CHKSUM_MASK 0xFFU + +#define HINIC_API_CMD_CELL_CTRL_SET(val, member) \ + ((((u64)val) & HINIC_API_CMD_CELL_CTRL_##member##_MASK) << \ + HINIC_API_CMD_CELL_CTRL_##member##_SHIFT) + +#define HINIC_API_CMD_DESC_API_TYPE_SHIFT 0 +#define HINIC_API_CMD_DESC_RD_WR_SHIFT 1 +#define HINIC_API_CMD_DESC_MGMT_BYPASS_SHIFT 2 +#define HINIC_API_CMD_DESC_RESP_AEQE_EN_SHIFT 3 +#define HINIC_API_CMD_DESC_PRIV_DATA_SHIFT 8 +#define HINIC_API_CMD_DESC_DEST_SHIFT 32 +#define HINIC_API_CMD_DESC_SIZE_SHIFT 40 +#define HINIC_API_CMD_DESC_XOR_CHKSUM_SHIFT 56 + +#define HINIC_API_CMD_DESC_API_TYPE_MASK 0x1U +#define HINIC_API_CMD_DESC_RD_WR_MASK 0x1U +#define HINIC_API_CMD_DESC_MGMT_BYPASS_MASK 0x1U +#define HINIC_API_CMD_DESC_RESP_AEQE_EN_MASK 0x1U +#define HINIC_API_CMD_DESC_DEST_MASK 0x1FU +#define HINIC_API_CMD_DESC_SIZE_MASK 0x7FFU +#define HINIC_API_CMD_DESC_XOR_CHKSUM_MASK 0xFFU +#define HINIC_API_CMD_DESC_PRIV_DATA_MASK 0xFFFFFFU + +#define HINIC_API_CMD_DESC_SET(val, member) \ + ((((u64)val) & HINIC_API_CMD_DESC_##member##_MASK) << \ + HINIC_API_CMD_DESC_##member##_SHIFT) + +#define HINIC_API_CMD_STATUS_HEADER_VALID_SHIFT 0 +#define HINIC_API_CMD_STATUS_HEADER_CHAIN_ID_SHIFT 16 + +#define HINIC_API_CMD_STATUS_HEADER_VALID_MASK 0xFFU +#define HINIC_API_CMD_STATUS_HEADER_CHAIN_ID_MASK 0xFFU + +#define HINIC_API_CMD_STATUS_VALID_CODE 0xFF + +#define HINIC_API_CMD_STATUS_HEADER_GET(val, member) \ + (((val) >> HINIC_API_CMD_STATUS_HEADER_##member##_SHIFT) & \ + HINIC_API_CMD_STATUS_HEADER_##member##_MASK) + +#define HINIC_API_CMD_CHAIN_REQ_RESTART_SHIFT 1 +#define HINIC_API_CMD_CHAIN_REQ_WB_TRIGGER_SHIFT 2 + +#define HINIC_API_CMD_CHAIN_REQ_RESTART_MASK 0x1U +#define HINIC_API_CMD_CHAIN_REQ_WB_TRIGGER_MASK 0x1U + +#define HINIC_API_CMD_CHAIN_REQ_SET(val, member) \ + (((val) & HINIC_API_CMD_CHAIN_REQ_##member##_MASK) << \ + HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT) + +#define HINIC_API_CMD_CHAIN_REQ_GET(val, member) \ + (((val) >> HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT) & \ + HINIC_API_CMD_CHAIN_REQ_##member##_MASK) + +#define HINIC_API_CMD_CHAIN_REQ_CLEAR(val, member) \ + ((val) & (~(HINIC_API_CMD_CHAIN_REQ_##member##_MASK \ + << HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT))) + +#define HINIC_API_CMD_CHAIN_CTRL_RESTART_EN_SHIFT 1 +#define HINIC_API_CMD_CHAIN_CTRL_XOR_ERR_SHIFT 2 +#define HINIC_API_CMD_CHAIN_CTRL_AEQE_EN_SHIFT 4 +#define HINIC_API_CMD_CHAIN_CTRL_AEQ_ID_SHIFT 8 +#define HINIC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_SHIFT 28 +#define HINIC_API_CMD_CHAIN_CTRL_CELL_SIZE_SHIFT 30 + +#define HINIC_API_CMD_CHAIN_CTRL_RESTART_EN_MASK 0x1U +#define HINIC_API_CMD_CHAIN_CTRL_XOR_ERR_MASK 0x1U +#define HINIC_API_CMD_CHAIN_CTRL_AEQE_EN_MASK 0x1U +#define HINIC_API_CMD_CHAIN_CTRL_AEQ_ID_MASK 0x3U +#define HINIC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_MASK 0x3U +#define HINIC_API_CMD_CHAIN_CTRL_CELL_SIZE_MASK 0x3U + +#define HINIC_API_CMD_CHAIN_CTRL_SET(val, member) \ + (((val) & HINIC_API_CMD_CHAIN_CTRL_##member##_MASK) << \ + HINIC_API_CMD_CHAIN_CTRL_##member##_SHIFT) + +#define HINIC_API_CMD_CHAIN_CTRL_CLEAR(val, member) \ + ((val) & (~(HINIC_API_CMD_CHAIN_CTRL_##member##_MASK \ + << HINIC_API_CMD_CHAIN_CTRL_##member##_SHIFT))) + +#define HINIC_API_CMD_RESP_HEAD_VALID_MASK 0xFF +#define HINIC_API_CMD_RESP_HEAD_VALID_CODE 0xFF + +#define HINIC_API_CMD_RESP_HEADER_VALID(val) \ + (((val) & HINIC_API_CMD_RESP_HEAD_VALID_MASK) == \ + HINIC_API_CMD_RESP_HEAD_VALID_CODE) + +#define HINIC_API_CMD_RESP_HEAD_STATUS_SHIFT 8 +#define HINIC_API_CMD_RESP_HEAD_STATUS_MASK 0xFFU + +#define HINIC_API_CMD_RESP_HEAD_ERR_CODE 0x1 +#define HINIC_API_CMD_RESP_HEAD_ERR(val) \ + ((((val) >> HINIC_API_CMD_RESP_HEAD_STATUS_SHIFT) & \ + HINIC_API_CMD_RESP_HEAD_STATUS_MASK) == \ + HINIC_API_CMD_RESP_HEAD_ERR_CODE) + +#define HINIC_API_CMD_RESP_HEAD_CHAIN_ID_SHIFT 16 +#define HINIC_API_CMD_RESP_HEAD_CHAIN_ID_MASK 0xFF + +#define HINIC_API_CMD_RESP_RESERVED 3 +#define HINIC_API_CMD_RESP_HEAD_CHAIN_ID(val) \ + (((val) >> HINIC_API_CMD_RESP_HEAD_CHAIN_ID_SHIFT) & \ + HINIC_API_CMD_RESP_HEAD_CHAIN_ID_MASK) + +#define HINIC_API_CMD_RESP_HEAD_DRIVER_PRIV_SHIFT 40 +#define HINIC_API_CMD_RESP_HEAD_DRIVER_PRIV_MASK 0xFFFFFFU + +#define HINIC_API_CMD_RESP_HEAD_DRIVER_PRIV(val) \ + (u16)(((val) >> HINIC_API_CMD_RESP_HEAD_DRIVER_PRIV_SHIFT) & \ + HINIC_API_CMD_RESP_HEAD_DRIVER_PRIV_MASK) + +#define HINIC_API_CMD_STATUS_HEAD_VALID_MASK 0xFFU +#define HINIC_API_CMD_STATUS_HEAD_VALID_SHIFT 0 + +#define HINIC_API_CMD_STATUS_HEAD_CHAIN_ID_MASK 0xFFU +#define HINIC_API_CMD_STATUS_HEAD_CHAIN_ID_VALID_SHIFT 16 + +#define HINIC_API_CMD_STATUS_CONS_IDX_MASK 0xFFFFFFU +#define HINIC_API_CMD_STATUS_CONS_IDX_SHIFT 0 + +#define HINIC_API_CMD_STATUS_FSM_MASK 0xFU +#define HINIC_API_CMD_STATUS_FSM_SHIFT 24 + +#define HINIC_API_CMD_STATUS_CHKSUM_ERR_MASK 0x3U +#define HINIC_API_CMD_STATUS_CHKSUM_ERR_SHIFT 28 + +#define HINIC_API_CMD_STATUS_CPLD_ERR_MASK 0x1U +#define HINIC_API_CMD_STATUS_CPLD_ERR_SHIFT 30 + +#define HINIC_API_CMD_STATUS_CHAIN_ID(val) \ + (((val) >> HINIC_API_CMD_STATUS_HEAD_CHAIN_ID_VALID_SHIFT) & \ + HINIC_API_CMD_STATUS_HEAD_VALID_MASK) + +#define HINIC_API_CMD_STATUS_CONS_IDX(val) \ + ((val) & HINIC_API_CMD_STATUS_CONS_IDX_MASK) + +#define HINIC_API_CMD_STATUS_CHKSUM_ERR(val) \ + (((val) >> HINIC_API_CMD_STATUS_CHKSUM_ERR_SHIFT) & \ + HINIC_API_CMD_STATUS_CHKSUM_ERR_MASK) + +#define HINIC_API_CMD_STATUS_GET(val, member) \ + (((val) >> HINIC_API_CMD_STATUS_##member##_SHIFT) & \ + HINIC_API_CMD_STATUS_##member##_MASK) + +enum hinic_api_cmd_chain_type { + /* write command with completion notification */ + HINIC_API_CMD_WRITE = 0, + /* read command with completion notification */ + HINIC_API_CMD_READ = 1, + /* write to mgmt cpu command with completion */ + HINIC_API_CMD_WRITE_TO_MGMT_CPU = 2, + /* multi read command with completion notification - not used */ + HINIC_API_CMD_MULTI_READ = 3, + /* write command without completion notification */ + HINIC_API_CMD_POLL_WRITE = 4, + /* read command without completion notification */ + HINIC_API_CMD_POLL_READ = 5, + /* read from mgmt cpu command with completion */ + HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU = 6, + HINIC_API_CMD_MAX, +}; + +struct hinic_api_cmd_status { + u64 header; + u32 buf_desc; + u32 cell_addr_hi; + u32 cell_addr_lo; + u32 rsvd0; + u64 rsvd1; +}; + +/* HW struct */ +struct hinic_api_cmd_cell { + u64 ctrl; + + /* address is 64 bit in HW struct */ + u64 next_cell_paddr; + + u64 desc; + + /* HW struct */ + union { + struct { + u64 hw_cmd_paddr; + } write; + + struct { + u64 hw_wb_resp_paddr; + u64 hw_cmd_paddr; + } read; + }; +}; + +struct hinic_api_cmd_resp_fmt { + u64 header; + u64 rsvd[3]; + u64 resp_data; +}; + +struct hinic_api_cmd_cell_ctxt { + struct hinic_api_cmd_cell *cell_vaddr; + + void *api_cmd_vaddr; + + struct hinic_api_cmd_resp_fmt *resp; + + struct completion done; + int status; + + u32 saved_prod_idx; +}; + +struct hinic_api_cmd_chain_attr { + struct hinic_hwdev *hwdev; + enum hinic_api_cmd_chain_type chain_type; + + u32 num_cells; + u16 rsp_size; + u16 cell_size; +}; + +struct hinic_api_cmd_chain { + struct hinic_hwdev *hwdev; + enum hinic_api_cmd_chain_type chain_type; + + u32 num_cells; + u16 cell_size; + u16 rsp_size; + + /* HW members is 24 bit format */ + u32 prod_idx; + u32 cons_idx; + + struct semaphore sem; + /* Async cmd can not be scheduling */ + spinlock_t async_lock; + + dma_addr_t wb_status_paddr; + struct hinic_api_cmd_status *wb_status; + + dma_addr_t head_cell_paddr; + struct hinic_api_cmd_cell *head_node; + + struct hinic_api_cmd_cell_ctxt *cell_ctxt; + struct hinic_api_cmd_cell *curr_node; + + struct hinic_dma_addr_align cells_addr; + + u8 *cell_vaddr_base; + u64 cell_paddr_base; + u8 *rsp_vaddr_base; + u64 rsp_paddr_base; + u8 *buf_vaddr_base; + u64 buf_paddr_base; + u64 cell_size_align; + u64 rsp_size_align; + u64 buf_size_align; +}; + +int hinic_api_cmd_write(struct hinic_api_cmd_chain *chain, + enum hinic_node_id dest, void *cmd, u16 size); + +int hinic_api_cmd_read(struct hinic_api_cmd_chain *chain, + enum hinic_node_id dest, void *cmd, u16 size, + void *ack, u16 ack_size); + +int hinic_api_cmd_init(struct hinic_hwdev *hwdev, + struct hinic_api_cmd_chain **chain); + +void hinic_api_cmd_free(struct hinic_api_cmd_chain **chain); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_cfg.c b/drivers/net/ethernet/huawei/hinic/hinic_cfg.c new file mode 100644 index 0000000000000000000000000000000000000000..0c3d362a78de2fb132887a7ae0e32805479e21cc --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_cfg.c @@ -0,0 +1,2478 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_hwdev.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hwif.h" +#include "hinic_mbox.h" +#include "hinic_cfg.h" +#include "hinic_nic_cfg.h" +#include "hinic_mgmt_interface.h" +#include "hinic_multi_host_mgmt.h" + +uint g_rdma_mtts_num; +uint g_rdma_qps_num; +uint g_rdma_mpts_num; +uint g_vfs_num; +module_param(g_rdma_mtts_num, uint, 0444); +MODULE_PARM_DESC(g_rdma_mtts_num, "number of roce used mtts, use default value when pass 0"); +module_param(g_rdma_qps_num, uint, 0444); +MODULE_PARM_DESC(g_rdma_qps_num, "number of roce used qps, use default value when pass 0"); +module_param(g_rdma_mpts_num, uint, 0444); +MODULE_PARM_DESC(g_rdma_mpts_num, "number of roce used mpts, use default value when pass 0"); +module_param(g_vfs_num, uint, 0444); +MODULE_PARM_DESC(g_vfs_num, "number of used vfs, use default value when pass 0 "); + +static uint intr_mode; + +uint timer_enable = 1; +uint bloomfilter_enable; +uint g_test_qpc_num; +uint g_test_qpc_resvd_num; +uint g_test_pagesize_reorder; +uint g_test_xid_alloc_mode = 1; +uint g_test_gpa_check_enable = 1; +uint g_test_qpc_alloc_mode = 2; +uint g_test_scqc_alloc_mode = 2; +uint g_test_max_conn; +uint g_test_max_cache_conn; +uint g_test_scqc_num; +uint g_test_mpt_num; +uint g_test_mpt_resvd; +uint g_test_scq_resvd; +uint g_test_hash_num; +uint g_test_reorder_num; + +static void set_cfg_test_param(struct cfg_mgmt_info *cfg_mgmt) +{ + cfg_mgmt->svc_cap.timer_en = (u8)timer_enable; + cfg_mgmt->svc_cap.bloomfilter_en = (u8)bloomfilter_enable; + cfg_mgmt->svc_cap.test_qpc_num = g_test_qpc_num; + cfg_mgmt->svc_cap.test_qpc_resvd_num = g_test_qpc_resvd_num; + cfg_mgmt->svc_cap.test_page_size_reorder = g_test_pagesize_reorder; + cfg_mgmt->svc_cap.test_xid_alloc_mode = (bool)g_test_xid_alloc_mode; + cfg_mgmt->svc_cap.test_gpa_check_enable = (bool)g_test_gpa_check_enable; + cfg_mgmt->svc_cap.test_qpc_alloc_mode = (u8)g_test_qpc_alloc_mode; + cfg_mgmt->svc_cap.test_scqc_alloc_mode = (u8)g_test_scqc_alloc_mode; + cfg_mgmt->svc_cap.test_max_conn_num = g_test_max_conn; + cfg_mgmt->svc_cap.test_max_cache_conn_num = g_test_max_cache_conn; + cfg_mgmt->svc_cap.test_scqc_num = g_test_scqc_num; + cfg_mgmt->svc_cap.test_mpt_num = g_test_mpt_num; + cfg_mgmt->svc_cap.test_scq_resvd_num = g_test_scq_resvd; + cfg_mgmt->svc_cap.test_mpt_recvd_num = g_test_mpt_resvd; + cfg_mgmt->svc_cap.test_hash_num = g_test_hash_num; + cfg_mgmt->svc_cap.test_reorder_num = g_test_reorder_num; +} + +int hinic_sync_time(void *hwdev, u64 time) +{ + struct hinic_sync_time_info time_info = {0}; + u16 out_size = sizeof(time_info); + int err; + + time_info.mstime = time; + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_SYNC_TIME, &time_info, + sizeof(time_info), &time_info, &out_size, + 0); + if (err || time_info.status || !out_size) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to sync time to mgmt, err: %d, status: 0x%x, out size: 0x%x\n", + err, time_info.status, out_size); + return -EFAULT; + } + + return err; +} + +void hinic_sync_time_async(void *hwdev, u64 time) +{ + struct hinic_sync_time_info time_info = { 0 }; + + time_info.mstime = time; + hinic_msg_to_mgmt_async(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_SYNC_TIME, &time_info, + sizeof(time_info)); +} + +static void parse_sf_en_cap(struct service_cap *cap, + struct hinic_dev_cap *dev_cap, enum func_type type) +{ + struct dev_sf_svc_attr *attr = &cap->sf_svc_attr; + + if (type == TYPE_PPF) { + /* For PPF's SF EN flag, we assign it in get_dynamic_res_cap(). + * we only save its VF's flag. + */ + attr->sf_en_vf = dev_cap->sf_en_vf; + } else if (type == TYPE_PF) { + if (dev_cap->sf_en_pf) + cap->sf_en = true; + else + cap->sf_en = false; + + attr->sf_en_vf = dev_cap->sf_en_vf; + } else { + /* VF gets SF_EN_VF from PPF/PF */ + if (dev_cap->sf_en_vf) + cap->sf_en = true; + else + cap->sf_en = false; + + attr->sf_en_vf = 0; + } +} + +static void parse_pub_res_cap(struct hinic_hwdev *hwdev, + struct service_cap *cap, + struct hinic_dev_cap *dev_cap, + enum func_type type) +{ + struct dev_sf_svc_attr *attr = &cap->sf_svc_attr; + + cap->svc_type = dev_cap->svc_cap_en; + cap->chip_svc_type = cap->svc_type; + + if (dev_cap->sf_svc_attr & SF_SVC_FT_BIT) + attr->ft_en = true; + else + attr->ft_en = false; + + if (dev_cap->sf_svc_attr & SF_SVC_RDMA_BIT) + attr->rdma_en = true; + else + attr->rdma_en = false; + + cap->host_id = dev_cap->host_id; + cap->ep_id = dev_cap->ep_id; + + cap->max_cos_id = dev_cap->max_cos_id; + cap->cos_valid_bitmap = dev_cap->valid_cos_bitmap; + cap->er_id = dev_cap->er_id; + cap->port_id = dev_cap->port_id; + cap->force_up = dev_cap->force_up; + + parse_sf_en_cap(cap, dev_cap, type); + + /* PF/PPF */ + if (type == TYPE_PF || type == TYPE_PPF) { + cap->max_vf = dev_cap->max_vf; + cap->pf_num = dev_cap->pf_num; + cap->pf_id_start = dev_cap->pf_id_start; + cap->vf_num = dev_cap->vf_num; + cap->vf_id_start = dev_cap->vf_id_start; + + /* FC need max queue number, but max queue number info is in + * l2nic cap, we also put max queue num info in public cap, so + * FC can get correct max queue number info. + */ + cap->max_sqs = dev_cap->nic_max_sq + 1; + cap->max_rqs = dev_cap->nic_max_rq + 1; + } else { + cap->max_vf = 0; + cap->max_sqs = dev_cap->nic_max_sq; + cap->max_rqs = dev_cap->nic_max_rq; + } + + cap->host_total_function = dev_cap->host_total_func; + cap->host_oq_id_mask_val = dev_cap->host_oq_id_mask_val; + cap->max_connect_num = dev_cap->max_conn_num; + cap->max_stick2cache_num = dev_cap->max_stick2cache_num; + cap->bfilter_start_addr = dev_cap->max_bfilter_start_addr; + cap->bfilter_len = dev_cap->bfilter_len; + cap->hash_bucket_num = dev_cap->hash_bucket_num; + cap->dev_ver_info.cfg_file_ver = dev_cap->cfg_file_ver; + cap->net_port_mode = dev_cap->net_port_mode; + + /* FC does not use VF */ + if (cap->net_port_mode == CFG_NET_MODE_FC) + cap->max_vf = 0; + + sdk_info(hwdev->dev_hdl, "Get public resource capbility, svc_cap_en: 0x%x\n", + dev_cap->svc_cap_en); + sdk_info(hwdev->dev_hdl, "Host_id=0x%x, ep_id=0x%x, max_cos_id=0x%x, cos_bitmap=0x%x, er_id=0x%x, port_id=0x%x\n", + cap->host_id, cap->ep_id, + cap->max_cos_id, cap->cos_valid_bitmap, + cap->er_id, cap->port_id); + sdk_info(hwdev->dev_hdl, "Host_total_function=0x%x, host_oq_id_mask_val=0x%x, net_port_mode=0x%x, max_vf=0x%x\n", + cap->host_total_function, cap->host_oq_id_mask_val, + cap->net_port_mode, cap->max_vf); + + sdk_info(hwdev->dev_hdl, "Pf_num=0x%x, pf_id_start=0x%x, vf_num=0x%x, vf_id_start=0x%x\n", + cap->pf_num, cap->pf_id_start, + cap->vf_num, cap->vf_id_start); + + /* Check parameters from firmware */ + if (cap->max_sqs > HINIC_CFG_MAX_QP || + cap->max_rqs > HINIC_CFG_MAX_QP) { + sdk_info(hwdev->dev_hdl, "Number of qp exceed limit[1-%d]: sq: %d, rq: %d\n", + HINIC_CFG_MAX_QP, cap->max_sqs, cap->max_rqs); + cap->max_sqs = HINIC_CFG_MAX_QP; + cap->max_rqs = HINIC_CFG_MAX_QP; + } +} + +static void parse_dynamic_share_res_cap(struct hinic_hwdev *hwdev, + struct service_cap *cap, + struct hinic_dev_cap *dev_cap, + enum func_type type) +{ + struct host_shared_resource_cap *shared_cap = &cap->shared_res_cap; + + shared_cap->host_pctxs = dev_cap->host_pctx_num; + + if (dev_cap->host_sf_en) + cap->sf_en = true; + else + cap->sf_en = false; + + shared_cap->host_cctxs = dev_cap->host_ccxt_num; + shared_cap->host_scqs = dev_cap->host_scq_num; + shared_cap->host_srqs = dev_cap->host_srq_num; + shared_cap->host_mpts = dev_cap->host_mpt_num; + + sdk_info(hwdev->dev_hdl, "Dynamic share resource capbility, host_pctxs=0x%x, host_cctxs=0x%x, host_scqs=0x%x, host_srqs=0x%x, host_mpts=0x%x\n", + shared_cap->host_pctxs, shared_cap->host_cctxs, + shared_cap->host_scqs, shared_cap->host_srqs, + shared_cap->host_mpts); +} + +static void parse_l2nic_res_cap(struct hinic_hwdev *hwdev, + struct service_cap *cap, + struct hinic_dev_cap *dev_cap, + enum func_type type) +{ + struct nic_service_cap *nic_cap = &cap->nic_cap; + + /* PF/PPF */ + if (type == TYPE_PF || type == TYPE_PPF) { + nic_cap->max_sqs = dev_cap->nic_max_sq + 1; + nic_cap->max_rqs = dev_cap->nic_max_rq + 1; + nic_cap->vf_max_sqs = dev_cap->nic_vf_max_sq + 1; + nic_cap->vf_max_rqs = dev_cap->nic_vf_max_rq + 1; + nic_cap->max_queue_allowed = 0; + nic_cap->dynamic_qp = 0; + } else { + nic_cap->max_sqs = dev_cap->nic_max_sq; + nic_cap->max_rqs = dev_cap->nic_max_rq; + nic_cap->vf_max_sqs = 0; + nic_cap->vf_max_rqs = 0; + nic_cap->max_queue_allowed = dev_cap->max_queue_allowed; + nic_cap->dynamic_qp = dev_cap->ovs_dq_en; + } + + if (dev_cap->nic_lro_en) + nic_cap->lro_en = true; + else + nic_cap->lro_en = false; + + nic_cap->lro_sz = dev_cap->nic_lro_sz; + nic_cap->tso_sz = dev_cap->nic_tso_sz; + + sdk_info(hwdev->dev_hdl, "L2nic resource capbility, max_sqs=0x%x, max_rqs=0x%x, vf_max_sqs=0x%x, vf_max_rqs=0x%x, max_queue_allowed=0x%x\n", + nic_cap->max_sqs, nic_cap->max_rqs, + nic_cap->vf_max_sqs, nic_cap->vf_max_rqs, + nic_cap->max_queue_allowed); + + /* Check parameters from firmware */ + if (nic_cap->max_sqs > HINIC_CFG_MAX_QP || + nic_cap->max_rqs > HINIC_CFG_MAX_QP) { + sdk_info(hwdev->dev_hdl, "Number of qp exceed limit[1-%d]: sq: %d, rq: %d\n", + HINIC_CFG_MAX_QP, nic_cap->max_sqs, nic_cap->max_rqs); + nic_cap->max_sqs = HINIC_CFG_MAX_QP; + nic_cap->max_rqs = HINIC_CFG_MAX_QP; + } +} + +static void parse_roce_res_cap(struct hinic_hwdev *hwdev, + struct service_cap *cap, + struct hinic_dev_cap *dev_cap, + enum func_type type) +{ + struct dev_roce_svc_own_cap *roce_cap = + &cap->rdma_cap.dev_rdma_cap.roce_own_cap; + + roce_cap->max_qps = dev_cap->roce_max_qp; + roce_cap->max_cqs = dev_cap->roce_max_cq; + roce_cap->max_srqs = dev_cap->roce_max_srq; + roce_cap->max_mpts = dev_cap->roce_max_mpt; + roce_cap->num_cos = dev_cap->max_cos_id + 1; + + /* PF/PPF */ + if (type == TYPE_PF || type == TYPE_PPF) { + roce_cap->vf_max_qps = dev_cap->roce_vf_max_qp; + roce_cap->vf_max_cqs = dev_cap->roce_vf_max_cq; + roce_cap->vf_max_srqs = dev_cap->roce_vf_max_srq; + roce_cap->vf_max_mpts = dev_cap->roce_vf_max_mpt; + } else { + roce_cap->vf_max_qps = 0; + roce_cap->vf_max_cqs = 0; + roce_cap->vf_max_srqs = 0; + roce_cap->vf_max_mpts = 0; + } + + roce_cap->cmtt_cl_start = dev_cap->roce_cmtt_cl_start; + roce_cap->cmtt_cl_end = dev_cap->roce_cmtt_cl_end; + roce_cap->cmtt_cl_sz = dev_cap->roce_cmtt_cl_size; + + roce_cap->dmtt_cl_start = dev_cap->roce_dmtt_cl_start; + roce_cap->dmtt_cl_end = dev_cap->roce_dmtt_cl_end; + roce_cap->dmtt_cl_sz = dev_cap->roce_dmtt_cl_size; + + roce_cap->wqe_cl_start = dev_cap->roce_wqe_cl_start; + roce_cap->wqe_cl_end = dev_cap->roce_wqe_cl_end; + roce_cap->wqe_cl_sz = dev_cap->roce_wqe_cl_size; + + sdk_info(hwdev->dev_hdl, "Get roce resource capbility\n"); + sdk_info(hwdev->dev_hdl, "Max_qps=0x%x, max_cqs=0x%x, max_srqs=0x%x, max_mpts=0x%x\n", + roce_cap->max_qps, roce_cap->max_cqs, + roce_cap->max_srqs, roce_cap->max_mpts); + + sdk_info(hwdev->dev_hdl, "Vf_max_qps=0x%x, vf_max_cqs=0x%x, vf_max_srqs= 0x%x, vf_max_mpts= 0x%x\n", + roce_cap->vf_max_qps, roce_cap->vf_max_cqs, + roce_cap->vf_max_srqs, roce_cap->vf_max_mpts); + + sdk_info(hwdev->dev_hdl, "Cmtt_start=0x%x, cmtt_end=0x%x, cmtt_sz=0x%x\n", + roce_cap->cmtt_cl_start, roce_cap->cmtt_cl_end, + roce_cap->cmtt_cl_sz); + + sdk_info(hwdev->dev_hdl, "Dmtt_start=0x%x, dmtt_end=0x%x, dmtt_sz=0x%x\n", + roce_cap->dmtt_cl_start, roce_cap->dmtt_cl_end, + roce_cap->dmtt_cl_sz); + + sdk_info(hwdev->dev_hdl, "Wqe_start=0x%x, wqe_end=0x%x, wqe_sz=0x%x\n", + roce_cap->wqe_cl_start, roce_cap->wqe_cl_end, + roce_cap->wqe_cl_sz); + + if (roce_cap->max_qps == 0) { + roce_cap->max_qps = 1024; + roce_cap->max_cqs = 2048; + roce_cap->max_srqs = 1024; + roce_cap->max_mpts = 1024; + + if (type == TYPE_PF || type == TYPE_PPF) { + roce_cap->vf_max_qps = 512; + roce_cap->vf_max_cqs = 1024; + roce_cap->vf_max_srqs = 512; + roce_cap->vf_max_mpts = 512; + } + } +} + +static void parse_iwarp_res_cap(struct hinic_hwdev *hwdev, + struct service_cap *cap, + struct hinic_dev_cap *dev_cap, + enum func_type type) + +{ + struct dev_iwarp_svc_own_cap *iwarp_cap = + &cap->rdma_cap.dev_rdma_cap.iwarp_own_cap; + + iwarp_cap->max_qps = dev_cap->iwarp_max_qp; + iwarp_cap->max_cqs = dev_cap->iwarp_max_cq; + iwarp_cap->max_mpts = dev_cap->iwarp_max_mpt; + iwarp_cap->num_cos = dev_cap->max_cos_id + 1; + + /* PF/PPF */ + if (type == TYPE_PF || type == TYPE_PPF) { + iwarp_cap->vf_max_qps = dev_cap->iwarp_vf_max_qp; + iwarp_cap->vf_max_cqs = dev_cap->iwarp_vf_max_cq; + iwarp_cap->vf_max_mpts = dev_cap->iwarp_vf_max_mpt; + } else { + iwarp_cap->vf_max_qps = 0; + iwarp_cap->vf_max_cqs = 0; + iwarp_cap->vf_max_mpts = 0; + } + + iwarp_cap->cmtt_cl_start = dev_cap->iwarp_cmtt_cl_start; + iwarp_cap->cmtt_cl_end = dev_cap->iwarp_cmtt_cl_end; + iwarp_cap->cmtt_cl_sz = dev_cap->iwarp_cmtt_cl_size; + + iwarp_cap->dmtt_cl_start = dev_cap->iwarp_dmtt_cl_start; + iwarp_cap->dmtt_cl_end = dev_cap->iwarp_dmtt_cl_end; + iwarp_cap->dmtt_cl_sz = dev_cap->iwarp_dmtt_cl_size; + + iwarp_cap->wqe_cl_start = dev_cap->iwarp_wqe_cl_start; + iwarp_cap->wqe_cl_end = dev_cap->iwarp_wqe_cl_end; + iwarp_cap->wqe_cl_sz = dev_cap->iwarp_wqe_cl_size; + + sdk_info(hwdev->dev_hdl, "Get iwrap resource capbility\n"); + sdk_info(hwdev->dev_hdl, "Max_qps=0x%x, max_cqs=0x%x, max_mpts=0x%x\n", + iwarp_cap->max_qps, iwarp_cap->max_cqs, + iwarp_cap->max_mpts); + sdk_info(hwdev->dev_hdl, "Vf_max_qps=0x%x, vf_max_cqs=0x%x, vf_max_mpts=0x%x\n", + iwarp_cap->vf_max_qps, iwarp_cap->vf_max_cqs, + iwarp_cap->vf_max_mpts); + + sdk_info(hwdev->dev_hdl, "Cmtt_start=0x%x, cmtt_end=0x%x, cmtt_sz=0x%x\n", + iwarp_cap->cmtt_cl_start, iwarp_cap->cmtt_cl_end, + iwarp_cap->cmtt_cl_sz); + + sdk_info(hwdev->dev_hdl, "Dmtt_start=0x%x, dmtt_end=0x%x, dmtt_sz=0x%x\n", + iwarp_cap->dmtt_cl_start, iwarp_cap->dmtt_cl_end, + iwarp_cap->dmtt_cl_sz); + + sdk_info(hwdev->dev_hdl, "Wqe_start=0x%x, wqe_end=0x%x, wqe_sz=0x%x\n", + iwarp_cap->wqe_cl_start, iwarp_cap->wqe_cl_end, + iwarp_cap->wqe_cl_sz); + + if (iwarp_cap->max_qps == 0) { + iwarp_cap->max_qps = 8; + iwarp_cap->max_cqs = 16; + iwarp_cap->max_mpts = 8; + + if (type == TYPE_PF || type == TYPE_PPF) { + iwarp_cap->vf_max_qps = 8; + iwarp_cap->vf_max_cqs = 16; + iwarp_cap->vf_max_mpts = 8; + } + } +} + +static void parse_fcoe_res_cap(struct hinic_hwdev *hwdev, + struct service_cap *cap, + struct hinic_dev_cap *dev_cap, + enum func_type type) +{ + struct dev_fcoe_svc_cap *fcoe_cap = &cap->fcoe_cap.dev_fcoe_cap; + + fcoe_cap->max_qps = dev_cap->fcoe_max_qp; + fcoe_cap->max_cqs = dev_cap->fcoe_max_cq; + fcoe_cap->max_srqs = dev_cap->fcoe_max_srq; + fcoe_cap->max_cctxs = dev_cap->fcoe_max_cctx; + fcoe_cap->cctxs_id_start = dev_cap->fcoe_cctx_id_start; + fcoe_cap->vp_id_start = dev_cap->fcoe_vp_id_start; + fcoe_cap->vp_id_end = dev_cap->fcoe_vp_id_end; + + sdk_info(hwdev->dev_hdl, "Get fcoe resource capbility\n"); + sdk_info(hwdev->dev_hdl, "Max_qps=0x%x, max_cqs=0x%x, max_srqs=0x%x, max_cctxs=0x%x, cctxs_id_start=0x%x\n", + fcoe_cap->max_qps, fcoe_cap->max_cqs, fcoe_cap->max_srqs, + fcoe_cap->max_cctxs, fcoe_cap->cctxs_id_start); + sdk_info(hwdev->dev_hdl, "Vp_id_start=0x%x, vp_id_end=0x%x\n", + fcoe_cap->vp_id_start, fcoe_cap->vp_id_end); +} + +static void parse_toe_res_cap(struct hinic_hwdev *hwdev, + struct service_cap *cap, + struct hinic_dev_cap *dev_cap, + enum func_type type) +{ + struct dev_toe_svc_cap *toe_cap = &cap->toe_cap.dev_toe_cap; + + toe_cap->max_pctxs = dev_cap->toe_max_pctx; + toe_cap->max_cqs = dev_cap->toe_max_cq; + toe_cap->max_srqs = dev_cap->toe_max_srq; + toe_cap->srq_id_start = dev_cap->toe_srq_id_start; + toe_cap->num_cos = dev_cap->max_cos_id + 1; + + sdk_info(hwdev->dev_hdl, "Get toe resource capbility, max_pctxs=0x%x, max_cqs=0x%x, max_srqs=0x%x, srq_id_start=0x%x\n", + toe_cap->max_pctxs, toe_cap->max_cqs, toe_cap->max_srqs, + toe_cap->srq_id_start); +} + +static void parse_fc_res_cap(struct hinic_hwdev *hwdev, + struct service_cap *cap, + struct hinic_dev_cap *dev_cap, + enum func_type type) +{ + struct dev_fc_svc_cap *fc_cap = &cap->fc_cap.dev_fc_cap; + + fc_cap->max_parent_qpc_num = dev_cap->fc_max_pctx; + fc_cap->scq_num = dev_cap->fc_max_scq; + fc_cap->srq_num = dev_cap->fc_max_srq; + fc_cap->max_child_qpc_num = dev_cap->fc_max_cctx; + fc_cap->child_qpc_id_start = dev_cap->fc_cctx_id_start; + fc_cap->vp_id_start = dev_cap->fc_vp_id_start; + fc_cap->vp_id_end = dev_cap->fc_vp_id_end; + + sdk_info(hwdev->dev_hdl, "Get fc resource capbility\n"); + sdk_info(hwdev->dev_hdl, "Max_parent_qpc_num=0x%x, scq_num=0x%x, srq_num=0x%x, max_child_qpc_num=0x%x, child_qpc_id_start=0x%x\n", + fc_cap->max_parent_qpc_num, fc_cap->scq_num, fc_cap->srq_num, + fc_cap->max_child_qpc_num, fc_cap->child_qpc_id_start); + sdk_info(hwdev->dev_hdl, "Vp_id_start=0x%x, vp_id_end=0x%x\n", + fc_cap->vp_id_start, fc_cap->vp_id_end); +} + +static void parse_ovs_res_cap(struct hinic_hwdev *hwdev, + struct service_cap *cap, + struct hinic_dev_cap *dev_cap, + enum func_type type) +{ + struct ovs_service_cap *ovs_cap = &cap->ovs_cap; + + ovs_cap->dev_ovs_cap.max_pctxs = dev_cap->ovs_max_qpc; + ovs_cap->dev_ovs_cap.max_cqs = 0; + + if (type == TYPE_PF || type == TYPE_PPF) + ovs_cap->dev_ovs_cap.dynamic_qp_en = dev_cap->ovs_dq_en; + + sdk_info(hwdev->dev_hdl, "Get ovs resource capbility, max_qpc: 0x%x\n", + ovs_cap->dev_ovs_cap.max_pctxs); +} + +static void parse_acl_res_cap(struct service_cap *cap, + struct hinic_dev_cap *dev_cap, + enum func_type type) +{ + struct acl_service_cap *acl_cap = &cap->acl_cap; + + acl_cap->dev_acl_cap.max_pctxs = 1024 * 1024; + acl_cap->dev_acl_cap.max_cqs = 8; +} + +static void parse_dev_cap(struct hinic_hwdev *dev, + struct hinic_dev_cap *dev_cap, enum func_type type) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + + /* Public resource */ + parse_pub_res_cap(dev, cap, dev_cap, type); + + /* PPF managed dynamic resource */ + if (type == TYPE_PPF) + parse_dynamic_share_res_cap(dev, cap, dev_cap, type); + + /* L2 NIC resource */ + if (IS_NIC_TYPE(dev)) + parse_l2nic_res_cap(dev, cap, dev_cap, type); + + /* FCoE/IOE/TOE/FC without virtulization */ + if (type == TYPE_PF || type == TYPE_PPF) { + if (IS_FC_TYPE(dev)) + parse_fc_res_cap(dev, cap, dev_cap, type); + + if (IS_FCOE_TYPE(dev)) + parse_fcoe_res_cap(dev, cap, dev_cap, type); + + if (IS_TOE_TYPE(dev)) + parse_toe_res_cap(dev, cap, dev_cap, type); + } + + /* RoCE resource */ + if (IS_ROCE_TYPE(dev)) + parse_roce_res_cap(dev, cap, dev_cap, type); + + /* iWARP resource */ + if (IS_IWARP_TYPE(dev)) + parse_iwarp_res_cap(dev, cap, dev_cap, type); + + if (IS_OVS_TYPE(dev)) + parse_ovs_res_cap(dev, cap, dev_cap, type); + + if (IS_ACL_TYPE(dev)) + parse_acl_res_cap(cap, dev_cap, type); +} + +static int get_cap_from_fw(struct hinic_hwdev *dev, enum func_type type) +{ + struct hinic_dev_cap dev_cap = {0}; + u16 out_len = sizeof(dev_cap); + int err; + + dev_cap.version = HINIC_CMD_VER_FUNC_ID; + err = hinic_global_func_id_get(dev, &dev_cap.func_id); + if (err) + return err; + + sdk_info(dev->dev_hdl, "Get cap from fw, func_idx: %d\n", + dev_cap.func_id); + + err = hinic_msg_to_mgmt_sync(dev, HINIC_MOD_CFGM, HINIC_CFG_NIC_CAP, + &dev_cap, sizeof(dev_cap), + &dev_cap, &out_len, 0); + if (err || dev_cap.status || !out_len) { + sdk_err(dev->dev_hdl, + "Failed to get capability from FW, err: %d, status: 0x%x, out size: 0x%x\n", + err, dev_cap.status, out_len); + return -EFAULT; + } + + parse_dev_cap(dev, &dev_cap, type); + return 0; +} + +static int get_cap_from_pf(struct hinic_hwdev *dev, enum func_type type) +{ + struct hinic_dev_cap dev_cap = {0}; + u16 in_len, out_len; + int err; + + in_len = sizeof(dev_cap); + out_len = in_len; + + err = hinic_msg_to_mgmt_sync(dev, HINIC_MOD_CFGM, HINIC_CFG_MBOX_CAP, + &dev_cap, in_len, &dev_cap, &out_len, 0); + if (err || dev_cap.status || !out_len) { + sdk_err(dev->dev_hdl, "Failed to get capability from PF, err: %d, status: 0x%x, out size: 0x%x\n", + err, dev_cap.status, out_len); + return -EFAULT; + } + + parse_dev_cap(dev, &dev_cap, type); + return 0; +} + +static int get_dev_cap(struct hinic_hwdev *dev) +{ + int err; + enum func_type type = HINIC_FUNC_TYPE(dev); + + switch (type) { + case TYPE_PF: + case TYPE_PPF: + err = get_cap_from_fw(dev, type); + if (err) { + sdk_err(dev->dev_hdl, "Failed to get PF/PPF capability\n"); + return err; + } + break; + case TYPE_VF: + err = get_cap_from_pf(dev, type); + if (err) { + sdk_err(dev->dev_hdl, "Failed to get VF capability\n"); + return err; + } + break; + default: + sdk_err(dev->dev_hdl, "Unsupported PCI Function type: %d\n", + type); + return -EINVAL; + } + + return 0; +} + +static void nic_param_fix(struct hinic_hwdev *dev) +{ + struct nic_service_cap *nic_cap = &dev->cfg_mgmt->svc_cap.nic_cap; + + if ((hinic_func_type(dev) == TYPE_VF) && + nic_cap->max_queue_allowed != 0) { + nic_cap->max_rqs = nic_cap->max_queue_allowed; + nic_cap->max_sqs = nic_cap->max_queue_allowed; + } +} + +static void rdma_param_fix(struct hinic_hwdev *dev) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct rdma_service_cap *rdma_cap = &cap->rdma_cap; + struct dev_roce_svc_own_cap *roce_cap = + &rdma_cap->dev_rdma_cap.roce_own_cap; + struct dev_iwarp_svc_own_cap *iwarp_cap = + &rdma_cap->dev_rdma_cap.iwarp_own_cap; + + rdma_cap->log_mtt = LOG_MTT_SEG; + rdma_cap->log_rdmarc = LOG_RDMARC_SEG; + rdma_cap->reserved_qps = RDMA_RSVD_QPS; + rdma_cap->max_sq_sg = RDMA_MAX_SQ_SGE; + + /* RoCE */ + if (IS_ROCE_TYPE(dev)) { + roce_cap->qpc_entry_sz = ROCE_QPC_ENTRY_SZ; + roce_cap->max_wqes = ROCE_MAX_WQES; + roce_cap->max_rq_sg = ROCE_MAX_RQ_SGE; + roce_cap->max_sq_inline_data_sz = ROCE_MAX_SQ_INLINE_DATA_SZ; + roce_cap->max_rq_desc_sz = ROCE_MAX_RQ_DESC_SZ; + roce_cap->rdmarc_entry_sz = ROCE_RDMARC_ENTRY_SZ; + roce_cap->max_qp_init_rdma = ROCE_MAX_QP_INIT_RDMA; + roce_cap->max_qp_dest_rdma = ROCE_MAX_QP_DEST_RDMA; + roce_cap->max_srq_wqes = ROCE_MAX_SRQ_WQES; + roce_cap->reserved_srqs = ROCE_RSVD_SRQS; + roce_cap->max_srq_sge = ROCE_MAX_SRQ_SGE; + roce_cap->srqc_entry_sz = ROCE_SRQC_ENTERY_SZ; + roce_cap->max_msg_sz = ROCE_MAX_MSG_SZ; + } else { + iwarp_cap->qpc_entry_sz = IWARP_QPC_ENTRY_SZ; + iwarp_cap->max_wqes = IWARP_MAX_WQES; + iwarp_cap->max_rq_sg = IWARP_MAX_RQ_SGE; + iwarp_cap->max_sq_inline_data_sz = IWARP_MAX_SQ_INLINE_DATA_SZ; + iwarp_cap->max_rq_desc_sz = IWARP_MAX_RQ_DESC_SZ; + iwarp_cap->max_irq_depth = IWARP_MAX_IRQ_DEPTH; + iwarp_cap->irq_entry_size = IWARP_IRQ_ENTRY_SZ; + iwarp_cap->max_orq_depth = IWARP_MAX_ORQ_DEPTH; + iwarp_cap->orq_entry_size = IWARP_ORQ_ENTRY_SZ; + iwarp_cap->max_rtoq_depth = IWARP_MAX_RTOQ_DEPTH; + iwarp_cap->rtoq_entry_size = IWARP_RTOQ_ENTRY_SZ; + iwarp_cap->max_ackq_depth = IWARP_MAX_ACKQ_DEPTH; + iwarp_cap->ackq_entry_size = IWARP_ACKQ_ENTRY_SZ; + iwarp_cap->max_msg_sz = IWARP_MAX_MSG_SZ; + } + + rdma_cap->max_sq_desc_sz = RDMA_MAX_SQ_DESC_SZ; + rdma_cap->wqebb_size = WQEBB_SZ; + rdma_cap->max_cqes = RDMA_MAX_CQES; + rdma_cap->reserved_cqs = RDMA_RSVD_CQS; + rdma_cap->cqc_entry_sz = RDMA_CQC_ENTRY_SZ; + rdma_cap->cqe_size = RDMA_CQE_SZ; + rdma_cap->reserved_mrws = RDMA_RSVD_MRWS; + rdma_cap->mpt_entry_sz = RDMA_MPT_ENTRY_SZ; + + /* 2^8 - 1 + * +------------------------+-----------+ + * | 4B | 1M(20b) | Key(8b) | + * +------------------------+-----------+ + * key = 8bit key + 24bit index, + * now Lkey of SGE uses 2bit(bit31 and bit30), so key only have 10bit, + * we use original 8bits directly for simpilification + */ + rdma_cap->max_fmr_maps = 255; + rdma_cap->num_mtts = (g_rdma_mtts_num > 0 ? + g_rdma_mtts_num : RDMA_NUM_MTTS); + rdma_cap->log_mtt_seg = LOG_MTT_SEG; + rdma_cap->mtt_entry_sz = MTT_ENTRY_SZ; + rdma_cap->log_rdmarc_seg = LOG_RDMARC_SEG; + rdma_cap->local_ca_ack_delay = LOCAL_ACK_DELAY; + rdma_cap->num_ports = RDMA_NUM_PORTS; + rdma_cap->db_page_size = DB_PAGE_SZ; + rdma_cap->direct_wqe_size = DWQE_SZ; + rdma_cap->num_pds = NUM_PD; + rdma_cap->reserved_pds = RSVD_PD; + rdma_cap->max_xrcds = MAX_XRCDS; + rdma_cap->reserved_xrcds = RSVD_XRCDS; + rdma_cap->max_gid_per_port = MAX_GID_PER_PORT; + rdma_cap->gid_entry_sz = GID_ENTRY_SZ; + rdma_cap->reserved_lkey = RSVD_LKEY; + rdma_cap->num_comp_vectors = (u32)dev->cfg_mgmt->eq_info.num_ceq; + rdma_cap->page_size_cap = PAGE_SZ_CAP; + rdma_cap->flags = (RDMA_BMME_FLAG_LOCAL_INV | + RDMA_BMME_FLAG_REMOTE_INV | + RDMA_BMME_FLAG_FAST_REG_WR | + RDMA_DEV_CAP_FLAG_XRC | + RDMA_DEV_CAP_FLAG_MEM_WINDOW | + RDMA_BMME_FLAG_TYPE_2_WIN | + RDMA_BMME_FLAG_WIN_TYPE_2B | + RDMA_DEV_CAP_FLAG_ATOMIC); + rdma_cap->max_frpl_len = MAX_FRPL_LEN; + rdma_cap->max_pkeys = MAX_PKEYS; +} + +static void fcoe_param_fix(struct hinic_hwdev *dev) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct fcoe_service_cap *fcoe_cap = &cap->fcoe_cap; + + fcoe_cap->qpc_basic_size = FCOE_PCTX_SZ; + fcoe_cap->childc_basic_size = FCOE_CCTX_SZ; + fcoe_cap->sqe_size = FCOE_SQE_SZ; + + fcoe_cap->scqc_basic_size = FCOE_SCQC_SZ; + fcoe_cap->scqe_size = FCOE_SCQE_SZ; + + fcoe_cap->srqc_size = FCOE_SRQC_SZ; + fcoe_cap->srqe_size = FCOE_SRQE_SZ; +} + +static void toe_param_fix(struct hinic_hwdev *dev) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct toe_service_cap *toe_cap = &cap->toe_cap; + + toe_cap->pctx_sz = TOE_PCTX_SZ; + toe_cap->scqc_sz = TOE_CQC_SZ; +} + +static void fc_param_fix(struct hinic_hwdev *dev) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct fc_service_cap *fc_cap = &cap->fc_cap; + + fc_cap->parent_qpc_size = FC_PCTX_SZ; + fc_cap->child_qpc_size = FC_CCTX_SZ; + fc_cap->sqe_size = FC_SQE_SZ; + + fc_cap->scqc_size = FC_SCQC_SZ; + fc_cap->scqe_size = FC_SCQE_SZ; + + fc_cap->srqc_size = FC_SRQC_SZ; + fc_cap->srqe_size = FC_SRQE_SZ; +} + +static void ovs_param_fix(struct hinic_hwdev *dev) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct ovs_service_cap *ovs_cap = &cap->ovs_cap; + + ovs_cap->pctx_sz = OVS_PCTX_SZ; + ovs_cap->scqc_sz = OVS_SCQC_SZ; +} + +static void acl_param_fix(struct hinic_hwdev *dev) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct acl_service_cap *acl_cap = &cap->acl_cap; + + acl_cap->pctx_sz = ACL_PCTX_SZ; + acl_cap->scqc_sz = ACL_SCQC_SZ; +} + +static void init_service_param(struct hinic_hwdev *dev) +{ + if (IS_NIC_TYPE(dev)) + nic_param_fix(dev); + + if (IS_RDMA_TYPE(dev)) + rdma_param_fix(dev); + + if (IS_FCOE_TYPE(dev)) + fcoe_param_fix(dev); + + if (IS_TOE_TYPE(dev)) + toe_param_fix(dev); + + if (IS_FC_TYPE(dev)) + fc_param_fix(dev); + + if (IS_OVS_TYPE(dev)) + ovs_param_fix(dev); + + if (IS_ACL_TYPE(dev)) + acl_param_fix(dev); +} + +static void cfg_get_eq_num(struct hinic_hwdev *dev) +{ + struct cfg_eq_info *eq_info = &dev->cfg_mgmt->eq_info; + + eq_info->num_ceq = dev->hwif->attr.num_ceqs; + eq_info->num_ceq_remain = eq_info->num_ceq; +} + +static int cfg_init_eq(struct hinic_hwdev *dev) +{ + struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; + struct cfg_eq *eq; + u8 num_ceq, i = 0; + + cfg_get_eq_num(dev); + num_ceq = cfg_mgmt->eq_info.num_ceq; + + sdk_info(dev->dev_hdl, "Cfg mgmt: ceqs=0x%x, remain=0x%x\n", + cfg_mgmt->eq_info.num_ceq, cfg_mgmt->eq_info.num_ceq_remain); + + if (!num_ceq) { + sdk_err(dev->dev_hdl, "Ceq num cfg in fw is zero\n"); + return -EFAULT; + } + eq = kcalloc(num_ceq, sizeof(*eq), GFP_KERNEL); + if (!eq) + return -ENOMEM; + + for (i = 0; i < num_ceq; ++i) { + eq[i].eqn = i; + eq[i].free = CFG_FREE; + eq[i].type = SERVICE_T_MAX; + } + + cfg_mgmt->eq_info.eq = eq; + mutex_init(&cfg_mgmt->eq_info.eq_mutex); + + return 0; +} + +int hinic_dev_ver_info(void *hwdev, struct dev_version_info *ver) +{ + struct hinic_hwdev *dev = hwdev; + struct cfg_mgmt_info *cfg_mgmt; + + if (!hwdev || !ver) + return -EINVAL; + + cfg_mgmt = dev->cfg_mgmt; + + memcpy(ver, &cfg_mgmt->svc_cap.dev_ver_info, sizeof(*ver)); + + return 0; +} +EXPORT_SYMBOL(hinic_dev_ver_info); + +int hinic_vector_to_eqn(void *hwdev, enum hinic_service_type type, int vector) +{ + struct hinic_hwdev *dev = hwdev; + struct cfg_mgmt_info *cfg_mgmt; + struct cfg_eq *eq; + int eqn = -EINVAL; + + if (!hwdev || vector < 0) + return -EINVAL; + + if (type != SERVICE_T_ROCE && type != SERVICE_T_IWARP) { + sdk_err(dev->dev_hdl, + "Service type: %d, only RDMA service could get eqn by vector\n", + type); + return -EINVAL; + } + + cfg_mgmt = dev->cfg_mgmt; + vector = (vector % cfg_mgmt->eq_info.num_ceq) + CFG_RDMA_CEQ_BASE; + + eq = cfg_mgmt->eq_info.eq; + if ((eq[vector].type == SERVICE_T_ROCE || + eq[vector].type == SERVICE_T_IWARP) && + eq[vector].free == CFG_BUSY) + eqn = eq[vector].eqn; + + return eqn; +} +EXPORT_SYMBOL(hinic_vector_to_eqn); + +static int cfg_init_interrupt(struct hinic_hwdev *dev) +{ + struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; + struct cfg_irq_info *irq_info = &cfg_mgmt->irq_param_info; + u16 intr_num = dev->hwif->attr.num_irqs; + + if (!intr_num) { + sdk_err(dev->dev_hdl, "Irq num cfg in fw is zero\n"); + return -EFAULT; + } + irq_info->alloc_info = kcalloc(intr_num, sizeof(*irq_info->alloc_info), + GFP_KERNEL); + if (!irq_info->alloc_info) + return -ENOMEM; + + irq_info->num_irq_hw = intr_num; + + /* Production requires VF only surppots MSI-X */ + if (HINIC_FUNC_TYPE(dev) == TYPE_VF) + cfg_mgmt->svc_cap.interrupt_type = INTR_TYPE_MSIX; + else + cfg_mgmt->svc_cap.interrupt_type = intr_mode; + mutex_init(&irq_info->irq_mutex); + return 0; +} + +static int cfg_enable_interrupt(struct hinic_hwdev *dev) +{ + struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; + u16 nreq = cfg_mgmt->irq_param_info.num_irq_hw; + + void *pcidev = dev->pcidev_hdl; + struct irq_alloc_info_st *irq_info; + struct msix_entry *entry; + u16 i = 0; + int actual_irq; + + irq_info = cfg_mgmt->irq_param_info.alloc_info; + + sdk_info(dev->dev_hdl, "Interrupt type: %d, irq num: %d\n", + cfg_mgmt->svc_cap.interrupt_type, nreq); + + switch (cfg_mgmt->svc_cap.interrupt_type) { + case INTR_TYPE_MSIX: + if (!nreq) { + sdk_err(dev->dev_hdl, "Interrupt number cannot be zero\n"); + return -EINVAL; + } + entry = kcalloc(nreq, sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + for (i = 0; i < nreq; i++) + entry[i].entry = i; + + actual_irq = pci_enable_msix_range(pcidev, entry, + VECTOR_THRESHOLD, nreq); + if (actual_irq < 0) { + sdk_err(dev->dev_hdl, "Alloc msix entries with threshold 2 failed\n"); + kfree(entry); + return -ENOMEM; + } + + nreq = (u16)actual_irq; + cfg_mgmt->irq_param_info.num_total = nreq; + cfg_mgmt->irq_param_info.num_irq_remain = nreq; + sdk_info(dev->dev_hdl, "Request %d msix vector success\n", + nreq); + + for (i = 0; i < nreq; ++i) { + /* u16 driver uses to specify entry, OS writes */ + irq_info[i].info.msix_entry_idx = entry[i].entry; + /* u32 kernel uses to write allocated vector */ + irq_info[i].info.irq_id = entry[i].vector; + irq_info[i].type = SERVICE_T_MAX; + irq_info[i].free = CFG_FREE; + } + + kfree(entry); + + break; + + default: + sdk_err(dev->dev_hdl, "Unsupport interrupt type %d\n", + cfg_mgmt->svc_cap.interrupt_type); + break; + } + + return 0; +} + +int hinic_alloc_irqs(void *hwdev, enum hinic_service_type type, u16 num, + struct irq_info *irq_info_array, u16 *act_num) +{ + struct hinic_hwdev *dev = hwdev; + struct cfg_mgmt_info *cfg_mgmt; + struct cfg_irq_info *irq_info; + struct irq_alloc_info_st *alloc_info; + int max_num_irq; + u16 free_num_irq; + int i, j; + + if (!hwdev || !irq_info_array || !act_num) + return -EINVAL; + + cfg_mgmt = dev->cfg_mgmt; + irq_info = &cfg_mgmt->irq_param_info; + alloc_info = irq_info->alloc_info; + max_num_irq = irq_info->num_total; + free_num_irq = irq_info->num_irq_remain; + + mutex_lock(&irq_info->irq_mutex); + + if (num > free_num_irq) { + if (free_num_irq == 0) { + sdk_err(dev->dev_hdl, + "no free irq resource in cfg mgmt\n"); + mutex_unlock(&irq_info->irq_mutex); + return -ENOMEM; + } + + sdk_warn(dev->dev_hdl, "only %d irq resource in cfg mgmt\n", + free_num_irq); + num = free_num_irq; + } + + *act_num = 0; + + for (i = 0; i < num; i++) { + for (j = 0; j < max_num_irq; j++) { + if (alloc_info[j].free == CFG_FREE) { + if (irq_info->num_irq_remain == 0) { + sdk_err(dev->dev_hdl, "No free irq resource in cfg mgmt\n"); + mutex_unlock(&irq_info->irq_mutex); + return -EINVAL; + } + alloc_info[j].type = type; + alloc_info[j].free = CFG_BUSY; + + irq_info_array[i].msix_entry_idx = + alloc_info[j].info.msix_entry_idx; + irq_info_array[i].irq_id = + alloc_info[j].info.irq_id; + (*act_num)++; + irq_info->num_irq_remain--; + + break; + } + } + } + + mutex_unlock(&irq_info->irq_mutex); + return 0; +} +EXPORT_SYMBOL(hinic_alloc_irqs); + +void hinic_free_irq(void *hwdev, enum hinic_service_type type, u32 irq_id) +{ + struct hinic_hwdev *dev = hwdev; + struct cfg_mgmt_info *cfg_mgmt; + struct cfg_irq_info *irq_info; + struct irq_alloc_info_st *alloc_info; + int max_num_irq; + int i; + + if (!hwdev) + return; + + cfg_mgmt = dev->cfg_mgmt; + irq_info = &cfg_mgmt->irq_param_info; + alloc_info = irq_info->alloc_info; + max_num_irq = irq_info->num_total; + + mutex_lock(&irq_info->irq_mutex); + + for (i = 0; i < max_num_irq; i++) { + if (irq_id == alloc_info[i].info.irq_id && + type == alloc_info[i].type) { + if (alloc_info[i].free == CFG_BUSY) { + alloc_info[i].free = CFG_FREE; + irq_info->num_irq_remain++; + if (irq_info->num_irq_remain > max_num_irq) { + sdk_err(dev->dev_hdl, "Find target, but over range\n"); + mutex_unlock(&irq_info->irq_mutex); + return; + } + break; + } + } + } + + if (i >= max_num_irq) + sdk_warn(dev->dev_hdl, "Irq %d don't need to free\n", irq_id); + + mutex_unlock(&irq_info->irq_mutex); +} +EXPORT_SYMBOL(hinic_free_irq); + +int hinic_vector_to_irq(void *hwdev, enum hinic_service_type type, int vector) +{ + struct hinic_hwdev *dev = hwdev; + struct cfg_mgmt_info *cfg_mgmt; + struct irq_alloc_info_st *irq_info; + int irq = -EINVAL; + + if (!hwdev) + return -EINVAL; + + cfg_mgmt = dev->cfg_mgmt; + if (type != SERVICE_T_ROCE && type != SERVICE_T_IWARP) { + sdk_err(dev->dev_hdl, + "Service type: %u, only RDMA service could get eqn by vector\n", + type); + return -EINVAL; + } + + /* Current RDMA CEQ are 2 - 31, will change in the future */ + vector = ((vector % cfg_mgmt->eq_info.num_ceq) + CFG_RDMA_CEQ_BASE); + + irq_info = cfg_mgmt->irq_param_info.alloc_info; + if (irq_info[vector].type == SERVICE_T_ROCE || + irq_info[vector].type == SERVICE_T_IWARP) + if (irq_info[vector].free == CFG_BUSY) + irq = (int)irq_info[vector].info.irq_id; + + return irq; +} +EXPORT_SYMBOL(hinic_vector_to_irq); + +int hinic_alloc_ceqs(void *hwdev, enum hinic_service_type type, int num, + int *ceq_id_array, int *act_num) +{ + struct hinic_hwdev *dev = hwdev; + struct cfg_mgmt_info *cfg_mgmt; + struct cfg_eq_info *eq; + int free_ceq; + int i, j; + + if (!hwdev || !ceq_id_array || !act_num) + return -EINVAL; + + cfg_mgmt = dev->cfg_mgmt; + eq = &cfg_mgmt->eq_info; + free_ceq = eq->num_ceq_remain; + + mutex_lock(&eq->eq_mutex); + + if (num > free_ceq) { + if (free_ceq <= 0) { + sdk_err(dev->dev_hdl, "No free ceq resource in cfg mgmt\n"); + mutex_unlock(&eq->eq_mutex); + return -ENOMEM; + } + + sdk_warn(dev->dev_hdl, "Only %d ceq resource in cfg mgmt\n", + free_ceq); + } + + *act_num = 0; + + num = min(num, eq->num_ceq - CFG_RDMA_CEQ_BASE); + for (i = 0; i < num; i++) { + if (eq->num_ceq_remain == 0) { + sdk_warn(dev->dev_hdl, "Alloc %d ceqs, less than required %d ceqs\n", + *act_num, num); + mutex_unlock(&eq->eq_mutex); + return 0; + } + + for (j = CFG_RDMA_CEQ_BASE; j < eq->num_ceq; j++) { + if (eq->eq[j].free == CFG_FREE) { + eq->eq[j].type = type; + eq->eq[j].free = CFG_BUSY; + eq->num_ceq_remain--; + ceq_id_array[i] = eq->eq[j].eqn; + (*act_num)++; + break; + } + } + } + + mutex_unlock(&eq->eq_mutex); + return 0; +} +EXPORT_SYMBOL(hinic_alloc_ceqs); + +void hinic_free_ceq(void *hwdev, enum hinic_service_type type, int ceq_id) +{ + struct hinic_hwdev *dev = hwdev; + struct cfg_mgmt_info *cfg_mgmt; + struct cfg_eq_info *eq; + u8 num_ceq; + u8 i = 0; + + if (!hwdev) + return; + + cfg_mgmt = dev->cfg_mgmt; + eq = &cfg_mgmt->eq_info; + num_ceq = eq->num_ceq; + + mutex_lock(&eq->eq_mutex); + + for (i = 0; i < num_ceq; i++) { + if (ceq_id == eq->eq[i].eqn && + type == cfg_mgmt->eq_info.eq[i].type) { + if (eq->eq[i].free == CFG_BUSY) { + eq->eq[i].free = CFG_FREE; + eq->num_ceq_remain++; + if (eq->num_ceq_remain > num_ceq) + eq->num_ceq_remain %= num_ceq; + + mutex_unlock(&eq->eq_mutex); + return; + } + } + } + + if (i >= num_ceq) + sdk_warn(dev->dev_hdl, "ceq %d don't need to free\n", ceq_id); + + mutex_unlock(&eq->eq_mutex); +} +EXPORT_SYMBOL(hinic_free_ceq); + +static int cfg_mbx_pf_proc_vf_msg(void *hwdev, u16 vf_id, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_dev_cap *dev_cap = buf_out; + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct nic_service_cap *nic_cap = &cap->nic_cap; + struct dev_roce_svc_own_cap *roce_cap = + &cap->rdma_cap.dev_rdma_cap.roce_own_cap; + struct dev_iwarp_svc_own_cap *iwarp_cap = + &cap->rdma_cap.dev_rdma_cap.iwarp_own_cap; + struct dev_ovs_svc_cap *ovs_cap = &cap->ovs_cap.dev_ovs_cap; + struct hinic_dev_cap dev_cap_tmp = {0}; + u16 out_len = 0; + u16 func_id; + int err; + + memset(dev_cap, 0, sizeof(*dev_cap)); + + if (cap->sf_svc_attr.ft_en) + dev_cap->sf_svc_attr |= SF_SVC_FT_BIT; + else + dev_cap->sf_svc_attr &= ~SF_SVC_FT_BIT; + + if (cap->sf_svc_attr.rdma_en) + dev_cap->sf_svc_attr |= SF_SVC_RDMA_BIT; + else + dev_cap->sf_svc_attr &= ~SF_SVC_RDMA_BIT; + + dev_cap->sf_en_vf = cap->sf_svc_attr.sf_en_vf; + + dev_cap->host_id = cap->host_id; + dev_cap->ep_id = cap->ep_id; + dev_cap->intr_type = cap->interrupt_type; + dev_cap->max_cos_id = cap->max_cos_id; + dev_cap->er_id = cap->er_id; + dev_cap->port_id = cap->port_id; + dev_cap->max_vf = cap->max_vf; + dev_cap->svc_cap_en = cap->chip_svc_type; + dev_cap->host_total_func = cap->host_total_function; + dev_cap->host_oq_id_mask_val = cap->host_oq_id_mask_val; + dev_cap->net_port_mode = cap->net_port_mode; + + /* Parameters below is uninitialized because NIC and ROCE not use it + * max_connect_num + * max_stick2cache_num + * bfilter_start_addr + * bfilter_len + * hash_bucket_num + * cfg_file_ver + */ + + /* NIC VF resources */ + dev_cap->nic_max_sq = nic_cap->vf_max_sqs; + dev_cap->nic_max_rq = nic_cap->vf_max_rqs; + + /* ROCE VF resources */ + dev_cap->roce_max_qp = roce_cap->vf_max_qps; + dev_cap->roce_max_cq = roce_cap->vf_max_cqs; + dev_cap->roce_max_srq = roce_cap->vf_max_srqs; + dev_cap->roce_max_mpt = roce_cap->vf_max_mpts; + + dev_cap->roce_cmtt_cl_start = roce_cap->cmtt_cl_start; + dev_cap->roce_cmtt_cl_end = roce_cap->cmtt_cl_end; + dev_cap->roce_cmtt_cl_size = roce_cap->cmtt_cl_sz; + + dev_cap->roce_dmtt_cl_start = roce_cap->dmtt_cl_start; + dev_cap->roce_dmtt_cl_end = roce_cap->dmtt_cl_end; + dev_cap->roce_dmtt_cl_size = roce_cap->dmtt_cl_sz; + + dev_cap->roce_wqe_cl_start = roce_cap->wqe_cl_start; + dev_cap->roce_wqe_cl_end = roce_cap->wqe_cl_end; + dev_cap->roce_wqe_cl_size = roce_cap->wqe_cl_sz; + + /* Iwarp VF resources */ + dev_cap->iwarp_max_qp = iwarp_cap->vf_max_qps; + dev_cap->iwarp_max_cq = iwarp_cap->vf_max_cqs; + dev_cap->iwarp_max_mpt = iwarp_cap->vf_max_mpts; + + /* OVS VF resources */ + dev_cap->ovs_max_qpc = ovs_cap->max_pctxs; + dev_cap->ovs_dq_en = ovs_cap->dynamic_qp_en; + + *out_size = sizeof(*dev_cap); + + if (!IS_OVS_TYPE(dev)) + return 0; + + out_len = sizeof(dev_cap_tmp); + /* fixed qnum in ovs mode */ + func_id = vf_id + hinic_glb_pf_vf_offset(hwdev); + dev_cap_tmp.func_id = func_id; + err = hinic_pf_msg_to_mgmt_sync(dev, HINIC_MOD_CFGM, HINIC_CFG_FUNC_CAP, + &dev_cap_tmp, sizeof(dev_cap_tmp), + &dev_cap_tmp, &out_len, 0); + if (err && err != HINIC_DEV_BUSY_ACTIVE_FW && + err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) { + sdk_err(dev->dev_hdl, + "Get func_id: %u capability from FW failed, err: %d, status: 0x%x, out_size: 0x%x\n", + func_id, err, dev_cap_tmp.status, out_len); + return -EFAULT; + } else if (err) { + return err; + } + + dev_cap->nic_max_sq = dev_cap_tmp.nic_max_sq + 1; + dev_cap->nic_max_rq = dev_cap_tmp.nic_max_rq + 1; + dev_cap->max_queue_allowed = dev_cap_tmp.max_queue_allowed; + + sdk_info(dev->dev_hdl, "func_id(%u) %s qnum %u max_queue_allowed %u\n", + func_id, (ovs_cap->dynamic_qp_en ? "dynamic" : "fixed"), + dev_cap->nic_max_sq, dev_cap->max_queue_allowed); + + return 0; +} + +static int cfg_mbx_ppf_proc_msg(void *hwdev, u16 pf_id, u16 vf_id, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size) +{ + struct hinic_hwdev *dev = hwdev; + + sdk_info(dev->dev_hdl, "ppf receive other pf cfgmgmt cmd %d mbox msg\n", + cmd); + + return hinic_ppf_process_mbox_msg(hwdev, pf_id, vf_id, HINIC_MOD_CFGM, + cmd, buf_in, in_size, buf_out, + out_size); +} + +static int cfg_mbx_vf_proc_msg(void *hwdev, u8 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_hwdev *dev = hwdev; + + *out_size = 0; + sdk_err(dev->dev_hdl, "VF msg callback not supported\n"); + + return -EOPNOTSUPP; +} + +static int cfg_mbx_init(struct hinic_hwdev *dev, struct cfg_mgmt_info *cfg_mgmt) +{ + int err; + enum func_type type = dev->hwif->attr.func_type; + + if (type == TYPE_PF) { + err = hinic_register_pf_mbox_cb(dev, HINIC_MOD_CFGM, + cfg_mbx_pf_proc_vf_msg); + if (err) { + sdk_err(dev->dev_hdl, + "PF: Register PF mailbox callback failed\n"); + return err; + } + } else if (type == TYPE_PPF) { + err = hinic_register_ppf_mbox_cb(dev, HINIC_MOD_CFGM, + cfg_mbx_ppf_proc_msg); + if (err) { + sdk_err(dev->dev_hdl, + "PPF: Register PPF mailbox callback failed\n"); + return err; + } + + err = hinic_register_pf_mbox_cb(dev, HINIC_MOD_CFGM, + cfg_mbx_pf_proc_vf_msg); + if (err) { + sdk_err(dev->dev_hdl, + "PPF: Register PF mailbox callback failed\n"); + hinic_unregister_ppf_mbox_cb(dev, HINIC_MOD_CFGM); + return err; + } + } else if (type == TYPE_VF) { + err = hinic_register_vf_mbox_cb(dev, HINIC_MOD_CFGM, + cfg_mbx_vf_proc_msg); + if (err) { + sdk_err(dev->dev_hdl, + "VF: Register VF mailbox callback failed\n"); + return err; + } + } else { + sdk_err(dev->dev_hdl, "Invalid func_type: %d, not supported\n", + type); + return -EINVAL; + } + + return 0; +} + +static void cfg_mbx_cleanup(struct hinic_hwdev *dev) +{ + hinic_unregister_ppf_mbox_cb(dev, HINIC_MOD_CFGM); + hinic_unregister_pf_mbox_cb(dev, HINIC_MOD_CFGM); + hinic_unregister_vf_mbox_cb(dev, HINIC_MOD_CFGM); +} + +static int init_cfg_mgmt(struct hinic_hwdev *dev) +{ + int err; + struct cfg_mgmt_info *cfg_mgmt; + + cfg_mgmt = kzalloc(sizeof(*cfg_mgmt), GFP_KERNEL); + if (!cfg_mgmt) + return -ENOMEM; + + dev->cfg_mgmt = cfg_mgmt; + cfg_mgmt->hwdev = dev; + + err = cfg_init_eq(dev); + if (err) { + sdk_err(dev->dev_hdl, "Failed to init cfg event queue, err: %d\n", + err); + goto free_mgmt_mem; + } + + err = cfg_init_interrupt(dev); + if (err) { + sdk_err(dev->dev_hdl, "Failed to init cfg interrupt, err: %d\n", + err); + goto free_eq_mem; + } + + err = cfg_enable_interrupt(dev); + if (err) { + sdk_err(dev->dev_hdl, "Failed to enable cfg interrupt, err: %d\n", + err); + goto free_interrupt_mem; + } + + return 0; + +free_interrupt_mem: + kfree(cfg_mgmt->irq_param_info.alloc_info); + cfg_mgmt->irq_param_info.alloc_info = NULL; + +free_eq_mem: + kfree(cfg_mgmt->eq_info.eq); + cfg_mgmt->eq_info.eq = NULL; + +free_mgmt_mem: + kfree(cfg_mgmt); + return err; +} + +static void free_cfg_mgmt(struct hinic_hwdev *dev) +{ + struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; + + /* if the allocated resource were recycled */ + if (cfg_mgmt->irq_param_info.num_irq_remain != + cfg_mgmt->irq_param_info.num_total || + cfg_mgmt->eq_info.num_ceq_remain != cfg_mgmt->eq_info.num_ceq) + sdk_err(dev->dev_hdl, "Can't reclaim all irq and event queue, please check\n"); + + switch (cfg_mgmt->svc_cap.interrupt_type) { + case INTR_TYPE_MSIX: + pci_disable_msix(dev->pcidev_hdl); + break; + + case INTR_TYPE_MSI: + pci_disable_msi(dev->pcidev_hdl); + break; + + case INTR_TYPE_INT: + default: + break; + } + + kfree(cfg_mgmt->irq_param_info.alloc_info); + cfg_mgmt->irq_param_info.alloc_info = NULL; + + kfree(cfg_mgmt->eq_info.eq); + cfg_mgmt->eq_info.eq = NULL; + + kfree(cfg_mgmt); +} + +static int init_capability(struct hinic_hwdev *dev) +{ + struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; + int err; + + set_cfg_test_param(cfg_mgmt); + + err = cfg_mbx_init(dev, cfg_mgmt); + if (err) { + sdk_err(dev->dev_hdl, "Configure mailbox init failed, err: %d\n", + err); + return err; + } + + cfg_mgmt->svc_cap.sf_svc_attr.ft_pf_en = false; + cfg_mgmt->svc_cap.sf_svc_attr.rdma_pf_en = false; + + err = get_dev_cap(dev); + if (err) { + cfg_mbx_cleanup(dev); + return err; + } + + init_service_param(dev); + + sdk_info(dev->dev_hdl, "Init capability success\n"); + return 0; +} + +static void free_capability(struct hinic_hwdev *dev) +{ + cfg_mbx_cleanup(dev); + sdk_info(dev->dev_hdl, "Free capability success"); +} + +/* 0 - MSIx, 1 - MSI, 2 - INTx */ +enum intr_type hinic_intr_type(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return INTR_TYPE_NONE; + + return dev->cfg_mgmt->svc_cap.interrupt_type; +} +EXPORT_SYMBOL(hinic_intr_type); + +bool hinic_support_nic(void *hwdev, struct nic_service_cap *cap) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_NIC_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.nic_cap, sizeof(*cap)); + + return true; +} +EXPORT_SYMBOL(hinic_support_nic); + +bool hinic_support_roce(void *hwdev, struct rdma_service_cap *cap) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_ROCE_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.rdma_cap, sizeof(*cap)); + + return true; +} +EXPORT_SYMBOL(hinic_support_roce); + +bool hinic_support_fcoe(void *hwdev, struct fcoe_service_cap *cap) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_FCOE_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.fcoe_cap, sizeof(*cap)); + + return true; +} +EXPORT_SYMBOL(hinic_support_fcoe); + +/* Only PPF support it, PF is not */ +bool hinic_support_toe(void *hwdev, struct toe_service_cap *cap) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_TOE_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.toe_cap, sizeof(*cap)); + + return true; +} +EXPORT_SYMBOL(hinic_support_toe); + +bool hinic_support_iwarp(void *hwdev, struct rdma_service_cap *cap) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_IWARP_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.rdma_cap, sizeof(*cap)); + + return true; +} +EXPORT_SYMBOL(hinic_support_iwarp); + +bool hinic_support_fc(void *hwdev, struct fc_service_cap *cap) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_FC_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.fc_cap, sizeof(*cap)); + + return true; +} +EXPORT_SYMBOL(hinic_support_fc); + +bool hinic_support_fic(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_FIC_TYPE(dev)) + return false; + + return true; +} +EXPORT_SYMBOL(hinic_support_fic); + +bool hinic_support_ovs(void *hwdev, struct ovs_service_cap *cap) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_OVS_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.ovs_cap, sizeof(*cap)); + + return true; +} +EXPORT_SYMBOL(hinic_support_ovs); + +bool hinic_support_acl(void *hwdev, struct acl_service_cap *cap) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_ACL_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.acl_cap, sizeof(*cap)); + + return true; +} +EXPORT_SYMBOL(hinic_support_acl); + +bool hinic_support_rdma(void *hwdev, struct rdma_service_cap *cap) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_RDMA_TYPE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.rdma_cap, sizeof(*cap)); + + return true; +} +EXPORT_SYMBOL(hinic_support_rdma); + +bool hinic_support_ft(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_FT_TYPE(dev)) + return false; + + return true; +} +EXPORT_SYMBOL(hinic_support_ft); + +bool hinic_support_dynamic_q(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + return dev->cfg_mgmt->svc_cap.nic_cap.dynamic_qp ? true : false; +} + +bool hinic_func_for_mgmt(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (dev->cfg_mgmt->svc_cap.chip_svc_type >= CFG_SVC_NIC_BIT0) + return false; + else + return true; +} + +bool hinic_func_for_hwpt(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (IS_HWPT_TYPE(dev)) + return true; + else + return false; +} + +bool hinic_func_for_pt(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (dev->cfg_mgmt->svc_cap.force_up) + return true; + else + return false; +} + +int cfg_set_func_sf_en(void *hwdev, u32 enbits, u32 enmask) +{ + struct hinic_hwdev *dev = hwdev; + struct nic_misc_func_sf_enbits *func_sf_enbits; + u16 out_size = sizeof(*func_sf_enbits); + u16 glb_func_idx; + u16 api_info_len; + int err; + + api_info_len = sizeof(struct nic_misc_func_sf_enbits); + func_sf_enbits = kzalloc(api_info_len, GFP_KERNEL); + if (!func_sf_enbits) { + sdk_err(dev->dev_hdl, "Alloc cfg api info failed\n"); + return -ENOMEM; + } + + err = hinic_global_func_id_get(dev, &glb_func_idx); + if (err) { + kfree(func_sf_enbits); + return err; + } + + func_sf_enbits->stateful_enbits = enbits; + func_sf_enbits->stateful_enmask = enmask; + func_sf_enbits->function_id = glb_func_idx; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, + HINIC_MISC_SET_FUNC_SF_ENBITS, + (void *)func_sf_enbits, api_info_len, + (void *)func_sf_enbits, &out_size, + VSW_UP_CFG_TIMEOUT); + if (err || !out_size || func_sf_enbits->status) { + sdk_err(dev->dev_hdl, + "Failed to set stateful enable, err: %d, status: 0x%x, out_size: 0x%x\n", + err, func_sf_enbits->status, out_size); + kfree(func_sf_enbits); + return -EFAULT; + } + + kfree(func_sf_enbits); + return 0; +} + +int cfg_get_func_sf_en(void *hwdev, u32 *enbits) +{ + struct nic_misc_func_sf_enbits *func_sf_enbits; + struct hinic_hwdev *dev = hwdev; + u16 out_size = sizeof(*func_sf_enbits); + u16 glb_func_idx; + u16 api_info_len; + int err; + + api_info_len = sizeof(struct nic_misc_func_sf_enbits); + func_sf_enbits = kzalloc(api_info_len, GFP_KERNEL); + if (!func_sf_enbits) { + sdk_err(dev->dev_hdl, "Alloc cfg api info failed\n"); + return -ENOMEM; + } + + err = hinic_global_func_id_get(dev, &glb_func_idx); + if (err) { + kfree(func_sf_enbits); + return err; + } + + func_sf_enbits->function_id = glb_func_idx; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, + HINIC_MISC_GET_FUNC_SF_ENBITS, + (void *)func_sf_enbits, api_info_len, + (void *)func_sf_enbits, &out_size, + VSW_UP_CFG_TIMEOUT); + if (err || !out_size || func_sf_enbits->status) { + sdk_err(dev->dev_hdl, "Failed to get stateful enable, err: %d, status: 0x%x, out_size: 0x%x\n", + err, func_sf_enbits->status, out_size); + kfree(func_sf_enbits); + return -EFAULT; + } + + *enbits = func_sf_enbits->stateful_enbits; + + kfree(func_sf_enbits); + return 0; +} + +int hinic_set_toe_enable(void *hwdev, bool enable) +{ + u32 enbits; + u32 enmask; + + if (!hwdev) + return -EINVAL; + + enbits = VSW_SET_STATEFUL_BITS_TOE((u16)enable); + enmask = VSW_SET_STATEFUL_BITS_TOE(0x1U); + + return cfg_set_func_sf_en(hwdev, enbits, enmask); +} +EXPORT_SYMBOL(hinic_set_toe_enable); + +bool hinic_get_toe_enable(void *hwdev) +{ + int err; + u32 enbits; + + if (!hwdev) + return false; + + err = cfg_get_func_sf_en(hwdev, &enbits); + if (err) + return false; + + return VSW_GET_STATEFUL_BITS_TOE(enbits); +} +EXPORT_SYMBOL(hinic_get_toe_enable); + +int hinic_set_fcoe_enable(void *hwdev, bool enable) +{ + u32 enbits; + u32 enmask; + + if (!hwdev) + return -EINVAL; + + enbits = VSW_SET_STATEFUL_BITS_FCOE((u16)enable); + enmask = VSW_SET_STATEFUL_BITS_FCOE(0x1U); + + return cfg_set_func_sf_en(hwdev, enbits, enmask); +} +EXPORT_SYMBOL(hinic_set_fcoe_enable); + +bool hinic_get_fcoe_enable(void *hwdev) +{ + int err; + u32 enbits; + + if (!hwdev) + return false; + + err = cfg_get_func_sf_en(hwdev, &enbits); + if (err) + return false; + + return VSW_GET_STATEFUL_BITS_FCOE(enbits); +} +EXPORT_SYMBOL(hinic_get_fcoe_enable); + +bool hinic_get_stateful_enable(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + return dev->cfg_mgmt->svc_cap.sf_en; +} +EXPORT_SYMBOL(hinic_get_stateful_enable); + +u8 hinic_host_oq_id_mask(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting host oq id mask\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.host_oq_id_mask_val; +} +EXPORT_SYMBOL(hinic_host_oq_id_mask); + +u8 hinic_host_id(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting host id\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.host_id; +} +EXPORT_SYMBOL(hinic_host_id); + +u16 hinic_host_total_func(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting host total function number\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.host_total_function; +} +EXPORT_SYMBOL(hinic_host_total_func); + +u16 hinic_func_max_qnum(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting function max queue number\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.max_sqs; +} +EXPORT_SYMBOL(hinic_func_max_qnum); + +u16 hinic_func_max_nic_qnum(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting function max queue number\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.nic_cap.max_sqs; +} +EXPORT_SYMBOL(hinic_func_max_nic_qnum); + +u8 hinic_ep_id(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting ep id\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.ep_id; +} +EXPORT_SYMBOL(hinic_ep_id); + +u8 hinic_er_id(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting er id\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.er_id; +} +EXPORT_SYMBOL(hinic_er_id); + +u8 hinic_physical_port_id(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting physical port id\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.port_id; +} +EXPORT_SYMBOL(hinic_physical_port_id); + +u8 hinic_func_max_vf(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting max vf number\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.max_vf; +} +EXPORT_SYMBOL(hinic_func_max_vf); + +u8 hinic_max_num_cos(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting max cos number\n"); + return 0; + } + return (u8)(dev->cfg_mgmt->svc_cap.max_cos_id + 1); +} +EXPORT_SYMBOL(hinic_max_num_cos); + +u8 hinic_cos_valid_bitmap(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting cos valid bitmap\n"); + return 0; + } + return (u8)(dev->cfg_mgmt->svc_cap.cos_valid_bitmap); +} +EXPORT_SYMBOL(hinic_cos_valid_bitmap); + +u8 hinic_net_port_mode(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting net port mode\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.net_port_mode; +} +EXPORT_SYMBOL(hinic_net_port_mode); + +bool hinic_is_hwdev_mod_inited(void *hwdev, enum hinic_hwdev_init_state state) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev || state >= HINIC_HWDEV_MAX_INVAL_INITED) + return false; + + return !!test_bit(state, &dev->func_state); +} + +static int hinic_os_dep_init(struct hinic_hwdev *hwdev) +{ + hwdev->workq = create_singlethread_workqueue(HINIC_HW_WQ_NAME); + if (!hwdev->workq) { + sdk_err(hwdev->dev_hdl, "Failed to initialize hardware workqueue\n"); + return -EFAULT; + } + + return 0; +} + +static void hinic_os_dep_deinit(struct hinic_hwdev *hwdev) +{ + destroy_workqueue(hwdev->workq); +} + +void hinic_ppf_hwdev_unreg(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return; + + down(&dev->ppf_sem); + dev->ppf_hwdev = NULL; + up(&dev->ppf_sem); + + sdk_info(dev->dev_hdl, "Unregister PPF hwdev\n"); +} + +void hinic_ppf_hwdev_reg(void *hwdev, void *ppf_hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return; + + down(&dev->ppf_sem); + dev->ppf_hwdev = ppf_hwdev; + up(&dev->ppf_sem); + + sdk_info(dev->dev_hdl, "Register PPF hwdev\n"); +} + +static int __vf_func_init(struct hinic_hwdev *hwdev) +{ + int err; + + err = hinic_vf_mbox_random_id_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init vf mbox random id\n"); + return err; + } + err = hinic_vf_func_init(hwdev); + if (err) + nic_err(hwdev->dev_hdl, "Failed to init nic mbox\n"); + + return err; +} + +static int __hilink_phy_init(struct hinic_hwdev *hwdev) +{ + int err; + + if (!HINIC_IS_VF(hwdev)) { + err = hinic_phy_init_status_judge(hwdev); + if (err) { + sdk_info(hwdev->dev_hdl, "Phy init failed\n"); + return err; + } + + if (hinic_support_nic(hwdev, NULL)) + hinic_hilink_info_show(hwdev); + } + + return 0; +} + +/* Return: + * 0: all success + * >0: partitial success + * <0: all failed + */ +int hinic_init_hwdev(struct hinic_init_para *para) +{ + struct hinic_hwdev *hwdev; + int err; + + if (!(*para->hwdev)) { + hwdev = kzalloc(sizeof(*hwdev), GFP_KERNEL); + if (!hwdev) + return -ENOMEM; + + *para->hwdev = hwdev; + hwdev->adapter_hdl = para->adapter_hdl; + hwdev->pcidev_hdl = para->pcidev_hdl; + hwdev->dev_hdl = para->dev_hdl; + hwdev->chip_node = para->chip_node; + hwdev->ppf_hwdev = para->ppf_hwdev; + sema_init(&hwdev->ppf_sem, 1); + sema_init(&hwdev->func_sem, 1); + hwdev->func_ref = 0; + + hwdev->chip_fault_stats = vzalloc(HINIC_CHIP_FAULT_SIZE); + if (!hwdev->chip_fault_stats) + goto alloc_chip_fault_stats_err; + + err = hinic_init_hwif(hwdev, para->cfg_reg_base, + para->intr_reg_base, + para->db_base_phy, para->db_base, + para->dwqe_mapping); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init hwif\n"); + goto init_hwif_err; + } + } else { + hwdev = *para->hwdev; + } + + /* detect slave host according to BAR reg */ + detect_host_mode_pre(hwdev); + + if (IS_BMGW_SLAVE_HOST(hwdev) && + (!hinic_get_master_host_mbox_enable(hwdev))) { + set_bit(HINIC_HWDEV_NONE_INITED, &hwdev->func_state); + sdk_info(hwdev->dev_hdl, "Master host not ready, init hwdev later\n"); + return (1 << HINIC_HWDEV_ALL_INITED); + } + + err = hinic_os_dep_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init os dependent\n"); + goto os_dep_init_err; + } + + hinic_set_chip_present(hwdev); + hinic_init_heartbeat(hwdev); + + err = init_cfg_mgmt(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init config mgmt\n"); + goto init_cfg_mgmt_err; + } + + err = hinic_init_comm_ch(hwdev); + if (err) { + if (!(hwdev->func_state & HINIC_HWDEV_INIT_MODES_MASK)) { + sdk_err(hwdev->dev_hdl, "Failed to init communication channel\n"); + goto init_comm_ch_err; + } else { + sdk_err(hwdev->dev_hdl, "Init communication channel partitail failed\n"); + return hwdev->func_state & HINIC_HWDEV_INIT_MODES_MASK; + } + } + + err = init_capability(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init capability\n"); + goto init_cap_err; + } + + if (hwdev->cfg_mgmt->svc_cap.force_up) + hwdev->feature_cap |= HINIC_FUNC_FORCE_LINK_UP; + + err = __vf_func_init(hwdev); + if (err) + goto vf_func_init_err; + + err = __hilink_phy_init(hwdev); + if (err) + goto hilink_phy_init_err; + + set_bit(HINIC_HWDEV_ALL_INITED, &hwdev->func_state); + + sdk_info(hwdev->dev_hdl, "Init hwdev success\n"); + + return 0; + +hilink_phy_init_err: + + hinic_vf_func_free(hwdev); +vf_func_init_err: + free_capability(hwdev); +init_cap_err: + return (hwdev->func_state & HINIC_HWDEV_INIT_MODES_MASK); + +init_comm_ch_err: + free_cfg_mgmt(hwdev); + +init_cfg_mgmt_err: + hinic_destroy_heartbeat(hwdev); + hinic_os_dep_deinit(hwdev); + +os_dep_init_err: + hinic_free_hwif(hwdev); + +init_hwif_err: + vfree(hwdev->chip_fault_stats); + +alloc_chip_fault_stats_err: + kfree(hwdev); + *para->hwdev = NULL; + + return -EFAULT; +} + +/** + * hinic_set_vf_dev_cap - Set max queue num for VF + * @hwdev: the HW device for VF + */ +int hinic_set_vf_dev_cap(void *hwdev) +{ + int err; + struct hinic_hwdev *dev; + enum func_type type; + + if (!hwdev) + return -EFAULT; + + dev = (struct hinic_hwdev *)hwdev; + type = HINIC_FUNC_TYPE(dev); + if (type != TYPE_VF) + return -EPERM; + + err = get_dev_cap(dev); + if (err) + return err; + + nic_param_fix(dev); + + return 0; +} + +void hinic_free_hwdev(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + enum hinic_hwdev_init_state state = HINIC_HWDEV_ALL_INITED; + int flag = 0; + + if (!hwdev) + return; + + if (test_bit(HINIC_HWDEV_ALL_INITED, &dev->func_state)) { + clear_bit(HINIC_HWDEV_ALL_INITED, &dev->func_state); + + /* BM slave function not need to exec rx_tx_flush */ + if (dev->func_mode != FUNC_MOD_MULTI_BM_SLAVE) + hinic_func_rx_tx_flush(hwdev); + + hinic_vf_func_free(hwdev); + + free_capability(dev); + } + while (state > HINIC_HWDEV_NONE_INITED) { + if (test_bit(state, &dev->func_state)) { + flag = 1; + break; + } + state--; + } + if (flag) { + hinic_uninit_comm_ch(dev); + free_cfg_mgmt(dev); + hinic_destroy_heartbeat(dev); + hinic_os_dep_deinit(dev); + } + clear_bit(HINIC_HWDEV_NONE_INITED, &dev->func_state); + hinic_free_hwif(dev); + vfree(dev->chip_fault_stats); + kfree(dev); +} + +void hinic_set_api_stop(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return; + + dev->chip_present_flag = HINIC_CHIP_ABSENT; + sdk_info(dev->dev_hdl, "Set card absent\n"); + hinic_force_complete_all(dev); + sdk_info(dev->dev_hdl, "All messages interacting with the chip will stop\n"); +} + +void hinic_shutdown_hwdev(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return; + + if (IS_SLAVE_HOST(dev)) + set_slave_host_enable(hwdev, hinic_pcie_itf_id(hwdev), false); +} + +u32 hinic_func_pf_num(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting pf number capability\n"); + return 0; + } + + return dev->cfg_mgmt->svc_cap.pf_num; +} + +u64 hinic_get_func_feature_cap(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting function feature capability\n"); + return 0; + } + + return dev->feature_cap; +} + +enum hinic_func_mode hinic_get_func_mode(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting function mode\n"); + return 0; + } + + return dev->func_mode; +} +EXPORT_SYMBOL(hinic_get_func_mode); + +enum hinic_service_mode hinic_get_service_mode(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting service mode\n"); + return HINIC_WORK_MODE_INVALID; + } + + return dev->board_info.service_mode; +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_cfg.h b/drivers/net/ethernet/huawei/hinic/hinic_cfg.h new file mode 100644 index 0000000000000000000000000000000000000000..28b9c0c4b93e73d976d995e7b6f413cc08ce5e7b --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_cfg.h @@ -0,0 +1,526 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef __CFG_MGT_H__ +#define __CFG_MGT_H__ + +#include "hinic_ctx_def.h" + +enum { + CFG_FREE = 0, + CFG_BUSY = 1 +}; + +/* start position for CEQs allocation, Max number of CEQs is 32 */ +/*lint -save -e849*/ +enum { + CFG_RDMA_CEQ_BASE = 0 +}; + +/*lint -restore*/ +enum { + CFG_NET_MODE_ETH = 0, /* Eth */ + CFG_NET_MODE_FIC = 1, /* FIC */ + CFG_NET_MODE_FC = 2 /* FC */ +}; + +enum { + SF_SVC_FT_BIT = (1 << 0), + SF_SVC_RDMA_BIT = (1 << 1), +}; + +/* RDMA resource */ +#define K_UNIT BIT(10) +#define M_UNIT BIT(20) +#define G_UNIT BIT(30) + +/* number of PFs and VFs */ +#define HOST_PF_NUM 4 +#define HOST_VF_NUM 0 +#define HOST_OQID_MASK_VAL 2 + +/* L2NIC */ +#define L2NIC_SQ_DEPTH (4 * K_UNIT) +#define L2NIC_RQ_DEPTH (4 * K_UNIT) + +#define HINIC_CFG_MAX_QP 128 + +/* RDMA */ +#define RDMA_RSVD_QPS 2 +#define ROCE_MAX_WQES (16 * K_UNIT - 1) +#define IWARP_MAX_WQES (8 * K_UNIT) + +#define RDMA_MAX_SQ_SGE 8 + +#define ROCE_MAX_RQ_SGE 8 +#define IWARP_MAX_RQ_SGE 2 + +#define RDMA_MAX_SQ_DESC_SZ (1 * K_UNIT) + +/* (256B(cache_line_len) - 16B(ctrl_seg_len) - 64B(max_task_seg_len)) */ +#define ROCE_MAX_SQ_INLINE_DATA_SZ 192 + +#define IWARP_MAX_SQ_INLINE_DATA_SZ 108 + +#define ROCE_MAX_RQ_DESC_SZ 128 +#define IWARP_MAX_RQ_DESC_SZ 64 + +#define IWARP_MAX_IRQ_DEPTH 1024 +#define IWARP_IRQ_ENTRY_SZ 64 + +#define IWARP_MAX_ORQ_DEPTH 1024 +#define IWARP_ORQ_ENTRY_SZ 32 + +#define IWARP_MAX_RTOQ_DEPTH 1024 +#define IWARP_RTOQ_ENTRY_SZ 32 + +#define IWARP_MAX_ACKQ_DEPTH 1024 +#define IWARP_ACKQ_ENTRY_SZ 16 + +#define ROCE_QPC_ENTRY_SZ 512 +#define IWARP_QPC_ENTRY_SZ 1024 + +#define WQEBB_SZ 64 + +#define ROCE_RDMARC_ENTRY_SZ 32 +#define ROCE_MAX_QP_INIT_RDMA 128 +#define ROCE_MAX_QP_DEST_RDMA 128 + +#define ROCE_MAX_SRQ_WQES (16 * K_UNIT - 1) +#define ROCE_RSVD_SRQS 0 +#define ROCE_MAX_SRQ_SGE 7 +#define ROCE_SRQC_ENTERY_SZ 64 + +#define RDMA_MAX_CQES (64 * K_UNIT - 1) +#define RDMA_RSVD_CQS 0 + +#define RDMA_CQC_ENTRY_SZ 128 + +#define RDMA_CQE_SZ 32 +#define RDMA_RSVD_MRWS 128 +#define RDMA_MPT_ENTRY_SZ 64 +#define RDMA_NUM_MTTS (1 * G_UNIT) +#define LOG_MTT_SEG 5 +#define MTT_ENTRY_SZ 8 +#define LOG_RDMARC_SEG 3 + +#define LOCAL_ACK_DELAY 15 +#define RDMA_NUM_PORTS 1 +#define ROCE_MAX_MSG_SZ (2 * G_UNIT) +#define IWARP_MAX_MSG_SZ (1 * G_UNIT) + +#define DB_PAGE_SZ (4 * K_UNIT) +#define DWQE_SZ 256 + +#define NUM_PD (128 * K_UNIT) +#define RSVD_PD 0 + +#define MAX_XRCDS (64 * K_UNIT) +#define RSVD_XRCDS 0 + +#define MAX_GID_PER_PORT 16 +#define GID_ENTRY_SZ 32 +#define RSVD_LKEY ((RDMA_RSVD_MRWS - 1) << 8) +#define NUM_COMP_VECTORS 32 +#define PAGE_SZ_CAP ((1UL << 12) | (1UL << 13) | (1UL << 14) | \ + (1UL << 16) | (1UL << 18) | (1UL << 20) | \ + (1UL << 22)) +#define ROCE_MODE 1 + +#define MAX_FRPL_LEN 511 +#define MAX_PKEYS 1 + +/* FCoE */ +#define FCOE_PCTX_SZ 256 +#define FCOE_CCTX_SZ 256 +#define FCOE_SQE_SZ 128 +#define FCOE_SCQC_SZ 64 +#define FCOE_SCQE_SZ 64 +#define FCOE_SRQC_SZ 64 +#define FCOE_SRQE_SZ 32 + +/* ToE */ +#define TOE_PCTX_SZ 1024 +#define TOE_CQC_SZ 64 + +/* IoE */ +#define IOE_PCTX_SZ 512 + +/* FC */ +#define FC_PCTX_SZ 256 +#define FC_CCTX_SZ 256 +#define FC_SQE_SZ 128 +#define FC_SCQC_SZ 64 +#define FC_SCQE_SZ 64 +#define FC_SRQC_SZ 64 +#define FC_SRQE_SZ 32 + +/* OVS */ +#define OVS_PCTX_SZ 256 +#define OVS_SCQC_SZ 64 + +/* ACL */ +#define ACL_PCTX_SZ 512 +#define ACL_SCQC_SZ 64 + +struct dev_sf_svc_attr { + bool ft_en; /* business enable flag (not include RDMA) */ + bool ft_pf_en; /* In FPGA Test VF resource is in PF or not, + * 0 - VF, 1 - PF, VF doesn't need this bit. + */ + bool rdma_en; + bool rdma_pf_en;/* In FPGA Test VF RDMA resource is in PF or not, + * 0 - VF, 1 - PF, VF doesn't need this bit. + */ + u8 sf_en_vf; /* SF_EN for PPF/PF's VF */ +}; + +struct host_shared_resource_cap { + u32 host_pctxs; /* Parent Context max 1M, IOE and FCoE max 8K flows */ + u32 host_cctxs; /* Child Context: max 8K */ + u32 host_scqs; /* shared CQ, chip interface module uses 1 SCQ + * TOE/IOE/FCoE each uses 1 SCQ + * RoCE/IWARP uses multiple SCQs + * So 6 SCQ least + */ + u32 host_srqs; /* SRQ number: 256K */ + u32 host_mpts; /* MR number:1M */ +}; + +/* device capability */ +struct service_cap { + struct dev_sf_svc_attr sf_svc_attr; + enum cfg_svc_type_en svc_type; /* user input service type */ + enum cfg_svc_type_en chip_svc_type; /* HW supported service type */ + + /* Host global resources */ + u16 host_total_function; + u8 host_oq_id_mask_val; + u8 host_id; + u8 ep_id; + /* DO NOT get interrupt_type from firmware */ + enum intr_type interrupt_type; + u8 intr_chip_en; + u8 max_cos_id; /* PF/VF's max cos id */ + u8 cos_valid_bitmap; + u8 er_id; /* PF/VF's ER */ + u8 port_id; /* PF/VF's physical port */ + u8 max_vf; /* max VF number that PF supported */ + u8 force_up; + bool sf_en; /* stateful business status */ + u8 timer_en; /* 0:disable, 1:enable */ + u8 bloomfilter_en; /* 0:disable, 1:enable*/ + u16 max_sqs; + u16 max_rqs; + + /* For test */ + u32 test_qpc_num; + u32 test_qpc_resvd_num; + u32 test_page_size_reorder; + bool test_xid_alloc_mode; + bool test_gpa_check_enable; + u8 test_qpc_alloc_mode; + u8 test_scqc_alloc_mode; + + u32 test_max_conn_num; + u32 test_max_cache_conn_num; + u32 test_scqc_num; + u32 test_mpt_num; + u32 test_scq_resvd_num; + u32 test_mpt_recvd_num; + u32 test_hash_num; + u32 test_reorder_num; + + u32 max_connect_num; /* PF/VF maximum connection number(1M) */ + /* The maximum connections which can be stick to cache memory, max 1K */ + u16 max_stick2cache_num; + /* Starting address in cache memory for bloom filter, 64Bytes aligned */ + u16 bfilter_start_addr; + /* Length for bloom filter, aligned on 64Bytes. The size is length*64B. + * Bloom filter memory size + 1 must be power of 2. + * The maximum memory size of bloom filter is 4M + */ + u16 bfilter_len; + /* The size of hash bucket tables, align on 64 entries. + * Be used to AND (&) the hash value. Bucket Size +1 must be power of 2. + * The maximum number of hash bucket is 4M + */ + u16 hash_bucket_num; + u8 net_port_mode; /* 0:ETH,1:FIC,2:4FC */ + + u32 pf_num; + u32 pf_id_start; + u32 vf_num; /* max numbers of vf in current host */ + u32 vf_id_start; + + struct host_shared_resource_cap shared_res_cap; /* shared capability */ + struct dev_version_info dev_ver_info; /* version */ + struct nic_service_cap nic_cap; /* NIC capability */ + struct rdma_service_cap rdma_cap; /* RDMA capability */ + struct fcoe_service_cap fcoe_cap; /* FCoE capability */ + struct toe_service_cap toe_cap; /* ToE capability */ + struct fc_service_cap fc_cap; /* FC capability */ + struct ovs_service_cap ovs_cap; /* OVS capability */ + struct acl_service_cap acl_cap; /* ACL capability */ +}; + +struct cfg_eq { + enum hinic_service_type type; + int eqn; + int free; /* 1 - alocated, 0- freed */ +}; + +struct cfg_eq_info { + struct cfg_eq *eq; + + u8 num_ceq; + u8 num_ceq_remain; + + /* mutex used for allocate EQs */ + struct mutex eq_mutex; +}; + +struct irq_alloc_info_st { + enum hinic_service_type type; + int free; /* 1 - alocated, 0- freed */ + struct irq_info info; +}; + +struct cfg_irq_info { + struct irq_alloc_info_st *alloc_info; + u16 num_total; + u16 num_irq_remain; + u16 num_irq_hw; /* device max irq number */ + + /* mutex used for allocate EQs */ + struct mutex irq_mutex; +}; + +#define VECTOR_THRESHOLD 2 + +struct cfg_mgmt_info { + struct hinic_hwdev *hwdev; + struct service_cap svc_cap; + struct cfg_eq_info eq_info; /* EQ */ + struct cfg_irq_info irq_param_info; /* IRQ */ + u32 func_seq_num; /* temporary */ +}; + +enum cfg_sub_cmd { + /* PPF(PF) <-> FW */ + HINIC_CFG_NIC_CAP = 0, + CFG_FW_VERSION, + CFG_UCODE_VERSION, + HINIC_CFG_FUNC_CAP, + HINIC_CFG_MBOX_CAP = 6, +}; + +struct hinic_dev_cap { + u8 status; + u8 version; + u8 rsvd0[6]; + + /* Public resource */ + u8 sf_svc_attr; + u8 host_id; + u8 sf_en_pf; + u8 sf_en_vf; + + u8 ep_id; + u8 intr_type; + u8 max_cos_id; + u8 er_id; + u8 port_id; + u8 max_vf; + u16 svc_cap_en; + u16 host_total_func; + u8 host_oq_id_mask_val; + u8 max_vf_cos_id; + + u32 max_conn_num; + u16 max_stick2cache_num; + u16 max_bfilter_start_addr; + u16 bfilter_len; + u16 hash_bucket_num; + u8 cfg_file_ver; + u8 net_port_mode; + u8 valid_cos_bitmap; /* every bit indicate cos is valid */ + u8 force_up; + u32 pf_num; + u32 pf_id_start; + u32 vf_num; + u32 vf_id_start; + + /* shared resource */ + u32 host_pctx_num; + u8 host_sf_en; + u8 rsvd2[3]; + u32 host_ccxt_num; + u32 host_scq_num; + u32 host_srq_num; + u32 host_mpt_num; + + /* l2nic */ + u16 nic_max_sq; + u16 nic_max_rq; + u16 nic_vf_max_sq; + u16 nic_vf_max_rq; + u8 nic_lro_en; + u8 nic_lro_sz; + u8 nic_tso_sz; + u8 max_queue_allowed; + + /* RoCE */ + u32 roce_max_qp; + u32 roce_max_cq; + u32 roce_max_srq; + u32 roce_max_mpt; + + u32 roce_vf_max_qp; + u32 roce_vf_max_cq; + u32 roce_vf_max_srq; + u32 roce_vf_max_mpt; + + u32 roce_cmtt_cl_start; + u32 roce_cmtt_cl_end; + u32 roce_cmtt_cl_size; + + u32 roce_dmtt_cl_start; + u32 roce_dmtt_cl_end; + u32 roce_dmtt_cl_size; + + u32 roce_wqe_cl_start; + u32 roce_wqe_cl_end; + u32 roce_wqe_cl_size; + + /* IWARP */ + u32 iwarp_max_qp; + u32 iwarp_max_cq; + u32 iwarp_max_mpt; + + u32 iwarp_vf_max_qp; + u32 iwarp_vf_max_cq; + u32 iwarp_vf_max_mpt; + + u32 iwarp_cmtt_cl_start; + u32 iwarp_cmtt_cl_end; + u32 iwarp_cmtt_cl_size; + + u32 iwarp_dmtt_cl_start; + u32 iwarp_dmtt_cl_end; + u32 iwarp_dmtt_cl_size; + + u32 iwarp_wqe_cl_start; + u32 iwarp_wqe_cl_end; + u32 iwarp_wqe_cl_size; + + /* FCoE */ + u32 fcoe_max_qp; + u32 fcoe_max_cq; + u32 fcoe_max_srq; + + u32 fcoe_max_cctx; + u32 fcoe_cctx_id_start; + + u8 fcoe_vp_id_start; + u8 fcoe_vp_id_end; + u8 rsvd4[2]; + + /* OVS */ + u32 ovs_max_qpc; + u8 ovs_dq_en; + u8 rsvd5[3]; + + /* ToE */ + u32 toe_max_pctx; + u32 toe_max_cq; + u32 toe_max_srq; + u32 toe_srq_id_start; + + /* FC */ + u32 fc_max_pctx; + u32 fc_max_scq; + u32 fc_max_srq; + + u32 fc_max_cctx; + u32 fc_cctx_id_start; + + u8 fc_vp_id_start; + u8 fc_vp_id_end; + u16 func_id; +}; + +#define VSW_UP_CFG_TIMEOUT (0xFF00000) + +#define VSW_SET_STATEFUL_BITS_TOE(flag) \ + ((flag) << VSW_STATEFUL_TOE_EN) +#define VSW_SET_STATEFUL_BITS_FCOE(flag) \ + ((flag) << VSW_STATEFUL_FCOE_EN) +#define VSW_SET_STATEFUL_BITS_IWARP(flag) \ + ((flag) << VSW_STATEFUL_IWARP_EN) +#define VSW_SET_STATEFUL_BITS_ROCE(flag) \ + ((flag) << VSW_STATEFUL_ROCE_EN) + +#define VSW_GET_STATEFUL_BITS_TOE(flag) \ + ((bool)(((flag) >> VSW_STATEFUL_TOE_EN) & 0x1U)) +#define VSW_GET_STATEFUL_BITS_FCOE(flag) \ + ((bool)(((flag) >> VSW_STATEFUL_FCOE_EN) & 0x1U)) +#define VSW_GET_STATEFUL_BITS_IWARP(flag) \ + ((bool)(((flag) >> VSW_STATEFUL_IWARP_EN) & 0x1U)) +#define VSW_GET_STATEFUL_BITS_ROCE(flag) \ + ((bool)(((flag) >> VSW_STATEFUL_ROCE_EN) & 0x1U)) + +enum tag_vsw_major_cmd { + VSW_MAJOR_MISC = 10, /* 0~9 reserved for driver */ + VSW_MAJOR_L2SWITCH, + VSW_MAJOR_L2MULTICAST, + VSW_MAJOR_QOS, + VSW_MAJOR_PKTSUPS, + VSW_MAJOR_VLANFILTER, + VSW_MAJOR_MACFILTER, + VSW_MAJOR_IPFILTER, + VSW_MAJOR_VLANMAPPING, + VSW_MAJOR_ETHTRUNK, + VSW_MAJOR_MIRROR, + VSW_MAJOR_DFX, + VSW_MAJOR_ACL, +}; + +enum tag_vsw_minor_misc_cmd { + VSW_MINOR_MISC_INIT_FUNC = 0, + VSW_MINOR_MISC_SET_FUNC_SF_ENBITS, + VSW_MINOR_MISC_GET_FUNC_SF_ENBITS, + VSW_MINOR_MISC_CMD_MAX, +}; + +/* vswitch eth-trunk sub-command */ +enum tag_nic_stateful_enbits { + VSW_STATEFUL_TOE_EN = 0, + VSW_STATEFUL_FCOE_EN = 1, + VSW_STATEFUL_IWARP_EN = 2, + VSW_STATEFUL_ROCE_EN = 3, +}; + +/* function stateful enable parameters */ +struct nic_misc_func_sf_enbits { + u8 status; + u8 version; + u8 rsvd0[6]; + u32 function_id; + u32 stateful_enbits; /* b0:toe, b1:fcoe, b2:iwarp, b3:roce */ + u32 stateful_enmask; /* b0:toe, b1:fcoe, b2:iwarp, b3:roce */ +}; + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_cmdq.c new file mode 100644 index 0000000000000000000000000000000000000000..96df2f70e0b03c9cb27dec0a63d59e23989999a6 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_cmdq.c @@ -0,0 +1,1575 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hwdev.h" +#include "hinic_hwif.h" +#include "hinic_nic_io.h" +#include "hinic_eqs.h" +#include "hinic_wq.h" +#include "hinic_cmdq.h" + +#define CMDQ_CMD_TIMEOUT 5000 /* millisecond */ +#define CMDQ_CMD_RETRY_TIMEOUT 1000 /* millisecond */ + +#define UPPER_8_BITS(data) (((data) >> 8) & 0xFF) +#define LOWER_8_BITS(data) ((data) & 0xFF) + +#define CMDQ_DB_INFO_HI_PROD_IDX_SHIFT 0 +#define CMDQ_DB_INFO_QUEUE_TYPE_SHIFT 23 +#define CMDQ_DB_INFO_CMDQ_TYPE_SHIFT 24 +#define CMDQ_DB_INFO_SRC_TYPE_SHIFT 27 + +#define CMDQ_DB_INFO_HI_PROD_IDX_MASK 0xFFU +#define CMDQ_DB_INFO_QUEUE_TYPE_MASK 0x1U +#define CMDQ_DB_INFO_CMDQ_TYPE_MASK 0x7U +#define CMDQ_DB_INFO_SRC_TYPE_MASK 0x1FU + +#define CMDQ_DB_INFO_SET(val, member) \ + (((val) & CMDQ_DB_INFO_##member##_MASK) \ + << CMDQ_DB_INFO_##member##_SHIFT) + +#define CMDQ_CTRL_PI_SHIFT 0 +#define CMDQ_CTRL_CMD_SHIFT 16 +#define CMDQ_CTRL_MOD_SHIFT 24 +#define CMDQ_CTRL_ACK_TYPE_SHIFT 29 +#define CMDQ_CTRL_HW_BUSY_BIT_SHIFT 31 + +#define CMDQ_CTRL_PI_MASK 0xFFFFU +#define CMDQ_CTRL_CMD_MASK 0xFFU +#define CMDQ_CTRL_MOD_MASK 0x1FU +#define CMDQ_CTRL_ACK_TYPE_MASK 0x3U +#define CMDQ_CTRL_HW_BUSY_BIT_MASK 0x1U + +#define CMDQ_CTRL_SET(val, member) \ + (((val) & CMDQ_CTRL_##member##_MASK) \ + << CMDQ_CTRL_##member##_SHIFT) + +#define CMDQ_CTRL_GET(val, member) \ + (((val) >> CMDQ_CTRL_##member##_SHIFT) \ + & CMDQ_CTRL_##member##_MASK) + +#define CMDQ_WQE_HEADER_BUFDESC_LEN_SHIFT 0 +#define CMDQ_WQE_HEADER_COMPLETE_FMT_SHIFT 15 +#define CMDQ_WQE_HEADER_DATA_FMT_SHIFT 22 +#define CMDQ_WQE_HEADER_COMPLETE_REQ_SHIFT 23 +#define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_SHIFT 27 +#define CMDQ_WQE_HEADER_CTRL_LEN_SHIFT 29 +#define CMDQ_WQE_HEADER_HW_BUSY_BIT_SHIFT 31 + +#define CMDQ_WQE_HEADER_BUFDESC_LEN_MASK 0xFFU +#define CMDQ_WQE_HEADER_COMPLETE_FMT_MASK 0x1U +#define CMDQ_WQE_HEADER_DATA_FMT_MASK 0x1U +#define CMDQ_WQE_HEADER_COMPLETE_REQ_MASK 0x1U +#define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_MASK 0x3U +#define CMDQ_WQE_HEADER_CTRL_LEN_MASK 0x3U +#define CMDQ_WQE_HEADER_HW_BUSY_BIT_MASK 0x1U + +#define CMDQ_WQE_HEADER_SET(val, member) \ + (((val) & CMDQ_WQE_HEADER_##member##_MASK) \ + << CMDQ_WQE_HEADER_##member##_SHIFT) + +#define CMDQ_WQE_HEADER_GET(val, member) \ + (((val) >> CMDQ_WQE_HEADER_##member##_SHIFT) \ + & CMDQ_WQE_HEADER_##member##_MASK) + +#define CMDQ_CTXT_CURR_WQE_PAGE_PFN_SHIFT 0 +#define CMDQ_CTXT_EQ_ID_SHIFT 56 +#define CMDQ_CTXT_CEQ_ARM_SHIFT 61 +#define CMDQ_CTXT_CEQ_EN_SHIFT 62 +#define CMDQ_CTXT_HW_BUSY_BIT_SHIFT 63 + +#define CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK 0xFFFFFFFFFFFFF +#define CMDQ_CTXT_EQ_ID_MASK 0x1F +#define CMDQ_CTXT_CEQ_ARM_MASK 0x1 +#define CMDQ_CTXT_CEQ_EN_MASK 0x1 +#define CMDQ_CTXT_HW_BUSY_BIT_MASK 0x1 + +#define CMDQ_CTXT_PAGE_INFO_SET(val, member) \ + (((u64)(val) & CMDQ_CTXT_##member##_MASK) \ + << CMDQ_CTXT_##member##_SHIFT) + +#define CMDQ_CTXT_PAGE_INFO_GET(val, member) \ + (((u64)(val) >> CMDQ_CTXT_##member##_SHIFT) \ + & CMDQ_CTXT_##member##_MASK) + +#define CMDQ_CTXT_WQ_BLOCK_PFN_SHIFT 0 +#define CMDQ_CTXT_CI_SHIFT 52 + +#define CMDQ_CTXT_WQ_BLOCK_PFN_MASK 0xFFFFFFFFFFFFF +#define CMDQ_CTXT_CI_MASK 0xFFF + +#define CMDQ_CTXT_BLOCK_INFO_SET(val, member) \ + (((u64)(val) & CMDQ_CTXT_##member##_MASK) \ + << CMDQ_CTXT_##member##_SHIFT) + +#define CMDQ_CTXT_BLOCK_INFO_GET(val, member) \ + (((u64)(val) >> CMDQ_CTXT_##member##_SHIFT) \ + & CMDQ_CTXT_##member##_MASK) + +#define SAVED_DATA_ARM_SHIFT 31 + +#define SAVED_DATA_ARM_MASK 0x1U + +#define SAVED_DATA_SET(val, member) \ + (((val) & SAVED_DATA_##member##_MASK) \ + << SAVED_DATA_##member##_SHIFT) + +#define SAVED_DATA_CLEAR(val, member) \ + ((val) & (~(SAVED_DATA_##member##_MASK \ + << SAVED_DATA_##member##_SHIFT))) + +#define WQE_ERRCODE_VAL_SHIFT 20 + +#define WQE_ERRCODE_VAL_MASK 0xF + +#define WQE_ERRCODE_GET(val, member) \ + (((val) >> WQE_ERRCODE_##member##_SHIFT) & \ + WQE_ERRCODE_##member##_MASK) + +#define CEQE_CMDQ_TYPE_SHIFT 0 + +#define CEQE_CMDQ_TYPE_MASK 0x7 + +#define CEQE_CMDQ_GET(val, member) \ + (((val) >> CEQE_CMDQ_##member##_SHIFT) & CEQE_CMDQ_##member##_MASK) + +#define WQE_COMPLETED(ctrl_info) CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT) + +#define WQE_HEADER(wqe) ((struct hinic_cmdq_header *)(wqe)) + +#define CMDQ_DB_PI_OFF(pi) (((u16)LOWER_8_BITS(pi)) << 3) + +#define CMDQ_DB_ADDR(db_base, pi) \ + (((u8 *)(db_base) + HINIC_DB_OFF) + CMDQ_DB_PI_OFF(pi)) + +#define CMDQ_PFN_SHIFT 12 +#define CMDQ_PFN(addr) ((addr) >> CMDQ_PFN_SHIFT) + +#define FIRST_DATA_TO_WRITE_LAST sizeof(u64) + +#define WQE_LCMD_SIZE 64 +#define WQE_SCMD_SIZE 64 + +#define COMPLETE_LEN 3 + +#define CMDQ_WQEBB_SIZE 64 +#define CMDQ_WQE_SIZE 64 + +#define CMDQ_WQ_PAGE_SIZE 4096 + +#define WQE_NUM_WQEBBS(wqe_size, wq) \ + ((u16)(ALIGN((u32)(wqe_size), (wq)->wqebb_size) / (wq)->wqebb_size)) + +#define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \ + struct hinic_cmdqs, cmdq[0]) + +#define CMDQ_SEND_CMPT_CODE 10 +#define CMDQ_COMPLETE_CMPT_CODE 11 + +#define HINIC_GET_CMDQ_FREE_WQEBBS(cmdq_wq) \ + atomic_read(&(cmdq_wq)->delta) + +enum cmdq_scmd_type { + CMDQ_SET_ARM_CMD = 2, +}; + +enum cmdq_wqe_type { + WQE_LCMD_TYPE, + WQE_SCMD_TYPE, +}; + +enum ctrl_sect_len { + CTRL_SECT_LEN = 1, + CTRL_DIRECT_SECT_LEN = 2, +}; + +enum bufdesc_len { + BUFDESC_LCMD_LEN = 2, + BUFDESC_SCMD_LEN = 3, +}; + +enum data_format { + DATA_SGE, + DATA_DIRECT, +}; + +enum completion_format { + COMPLETE_DIRECT, + COMPLETE_SGE, +}; + +enum completion_request { + CEQ_SET = 1, +}; + +enum cmdq_cmd_type { + SYNC_CMD_DIRECT_RESP, + SYNC_CMD_SGE_RESP, + ASYNC_CMD, +}; + +bool hinic_cmdq_idle(struct hinic_cmdq *cmdq) +{ + struct hinic_wq *wq = cmdq->wq; + + return (atomic_read(&wq->delta) == wq->q_depth ? true : false); +} + +struct hinic_cmd_buf *hinic_alloc_cmd_buf(void *hwdev) +{ + struct hinic_cmdqs *cmdqs; + struct hinic_cmd_buf *cmd_buf; + void *dev; + + if (!hwdev) { + pr_err("Failed to alloc cmd buf, invalid hwdev\n"); + return NULL; + } + + cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs; + dev = ((struct hinic_hwdev *)hwdev)->dev_hdl; + + cmd_buf = kzalloc(sizeof(*cmd_buf), GFP_ATOMIC); + if (!cmd_buf) + return NULL; + + cmd_buf->buf = pci_pool_alloc(cmdqs->cmd_buf_pool, GFP_ATOMIC, + &cmd_buf->dma_addr); + if (!cmd_buf->buf) { + sdk_err(dev, "Failed to allocate cmdq cmd buf from the pool\n"); + goto alloc_pci_buf_err; + } + + return cmd_buf; + +alloc_pci_buf_err: + kfree(cmd_buf); + return NULL; +} +EXPORT_SYMBOL(hinic_alloc_cmd_buf); + +void hinic_free_cmd_buf(void *hwdev, struct hinic_cmd_buf *cmd_buf) +{ + struct hinic_cmdqs *cmdqs; + + if (!hwdev || !cmd_buf) { + pr_err("Failed to free cmd buf\n"); + return; + } + + cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs; + + pci_pool_free(cmdqs->cmd_buf_pool, cmd_buf->buf, cmd_buf->dma_addr); + kfree(cmd_buf); +} +EXPORT_SYMBOL(hinic_free_cmd_buf); + +static int cmdq_wqe_size(enum cmdq_wqe_type wqe_type) +{ + int wqe_size = 0; + + switch (wqe_type) { + case WQE_LCMD_TYPE: + wqe_size = WQE_LCMD_SIZE; + break; + case WQE_SCMD_TYPE: + wqe_size = WQE_SCMD_SIZE; + break; + } + + return wqe_size; +} + +static int cmdq_get_wqe_size(enum bufdesc_len len) +{ + int wqe_size = 0; + + switch (len) { + case BUFDESC_LCMD_LEN: + wqe_size = WQE_LCMD_SIZE; + break; + case BUFDESC_SCMD_LEN: + wqe_size = WQE_SCMD_SIZE; + break; + } + + return wqe_size; +} + +static void cmdq_set_completion(struct hinic_cmdq_completion *complete, + struct hinic_cmd_buf *buf_out) +{ + struct hinic_sge_resp *sge_resp = &complete->sge_resp; + + hinic_set_sge(&sge_resp->sge, buf_out->dma_addr, + HINIC_CMDQ_BUF_SIZE); +} + +static void cmdq_set_lcmd_bufdesc(struct hinic_cmdq_wqe_lcmd *wqe, + struct hinic_cmd_buf *buf_in) +{ + hinic_set_sge(&wqe->buf_desc.sge, buf_in->dma_addr, buf_in->size); +} + +static void cmdq_set_inline_wqe_data(struct hinic_cmdq_inline_wqe *wqe, + const void *buf_in, u32 in_size) +{ + struct hinic_cmdq_wqe_scmd *wqe_scmd = &wqe->wqe_scmd; + + wqe_scmd->buf_desc.buf_len = in_size; + memcpy(wqe_scmd->buf_desc.data, buf_in, in_size); +} + +static void cmdq_fill_db(struct hinic_cmdq_db *db, + enum hinic_cmdq_type cmdq_type, u16 prod_idx) +{ + db->db_info = CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx), HI_PROD_IDX) | + CMDQ_DB_INFO_SET(HINIC_DB_CMDQ_TYPE, QUEUE_TYPE) | + CMDQ_DB_INFO_SET(cmdq_type, CMDQ_TYPE) | + CMDQ_DB_INFO_SET(HINIC_DB_SRC_CMDQ_TYPE, SRC_TYPE); +} + +static void cmdq_set_db(struct hinic_cmdq *cmdq, + enum hinic_cmdq_type cmdq_type, u16 prod_idx) +{ + struct hinic_cmdq_db db; + + cmdq_fill_db(&db, cmdq_type, prod_idx); + + /* The data that is written to HW should be in Big Endian Format */ + db.db_info = cpu_to_be32(db.db_info); + + wmb(); /* write all before the doorbell */ + writel(db.db_info, CMDQ_DB_ADDR(cmdq->db_base, prod_idx)); +} + +static void cmdq_wqe_fill(void *dst, const void *src) +{ + memcpy((u8 *)dst + FIRST_DATA_TO_WRITE_LAST, + (u8 *)src + FIRST_DATA_TO_WRITE_LAST, + CMDQ_WQE_SIZE - FIRST_DATA_TO_WRITE_LAST); + + wmb(); /* The first 8 bytes should be written last */ + + *(u64 *)dst = *(u64 *)src; +} + +static void cmdq_prepare_wqe_ctrl(struct hinic_cmdq_wqe *wqe, int wrapped, + enum hinic_ack_type ack_type, + enum hinic_mod_type mod, u8 cmd, u16 prod_idx, + enum completion_format complete_format, + enum data_format data_format, + enum bufdesc_len buf_len) +{ + struct hinic_ctrl *ctrl; + enum ctrl_sect_len ctrl_len; + struct hinic_cmdq_wqe_lcmd *wqe_lcmd; + struct hinic_cmdq_wqe_scmd *wqe_scmd; + u32 saved_data = WQE_HEADER(wqe)->saved_data; + + if (data_format == DATA_SGE) { + wqe_lcmd = &wqe->wqe_lcmd; + + wqe_lcmd->status.status_info = 0; + ctrl = &wqe_lcmd->ctrl; + ctrl_len = CTRL_SECT_LEN; + } else { + wqe_scmd = &wqe->inline_wqe.wqe_scmd; + + wqe_scmd->status.status_info = 0; + ctrl = &wqe_scmd->ctrl; + ctrl_len = CTRL_DIRECT_SECT_LEN; + } + + ctrl->ctrl_info = CMDQ_CTRL_SET(prod_idx, PI) | + CMDQ_CTRL_SET(cmd, CMD) | + CMDQ_CTRL_SET(mod, MOD) | + CMDQ_CTRL_SET(ack_type, ACK_TYPE); + + WQE_HEADER(wqe)->header_info = + CMDQ_WQE_HEADER_SET(buf_len, BUFDESC_LEN) | + CMDQ_WQE_HEADER_SET(complete_format, COMPLETE_FMT) | + CMDQ_WQE_HEADER_SET(data_format, DATA_FMT) | + CMDQ_WQE_HEADER_SET(CEQ_SET, COMPLETE_REQ) | + CMDQ_WQE_HEADER_SET(COMPLETE_LEN, COMPLETE_SECT_LEN) | + CMDQ_WQE_HEADER_SET(ctrl_len, CTRL_LEN) | + CMDQ_WQE_HEADER_SET((u32)wrapped, HW_BUSY_BIT); + + if (cmd == CMDQ_SET_ARM_CMD && mod == HINIC_MOD_COMM) { + saved_data &= SAVED_DATA_CLEAR(saved_data, ARM); + WQE_HEADER(wqe)->saved_data = saved_data | + SAVED_DATA_SET(1, ARM); + } else { + saved_data &= SAVED_DATA_CLEAR(saved_data, ARM); + WQE_HEADER(wqe)->saved_data = saved_data; + } +} + +static void cmdq_set_lcmd_wqe(struct hinic_cmdq_wqe *wqe, + enum cmdq_cmd_type cmd_type, + struct hinic_cmd_buf *buf_in, + struct hinic_cmd_buf *buf_out, int wrapped, + enum hinic_ack_type ack_type, + enum hinic_mod_type mod, u8 cmd, u16 prod_idx) +{ + struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd; + enum completion_format complete_format = COMPLETE_DIRECT; + + switch (cmd_type) { + case SYNC_CMD_SGE_RESP: + if (buf_out) { + complete_format = COMPLETE_SGE; + cmdq_set_completion(&wqe_lcmd->completion, buf_out); + } + break; + case SYNC_CMD_DIRECT_RESP: + complete_format = COMPLETE_DIRECT; + wqe_lcmd->completion.direct_resp = 0; + break; + case ASYNC_CMD: + complete_format = COMPLETE_DIRECT; + wqe_lcmd->completion.direct_resp = 0; + + wqe_lcmd->buf_desc.saved_async_buf = (u64)(buf_in); + break; + } + + cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd, + prod_idx, complete_format, DATA_SGE, + BUFDESC_LCMD_LEN); + + cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in); +} + +static void cmdq_set_inline_wqe(struct hinic_cmdq_wqe *wqe, + enum cmdq_cmd_type cmd_type, + void *buf_in, u16 in_size, + struct hinic_cmd_buf *buf_out, int wrapped, + enum hinic_ack_type ack_type, + enum hinic_mod_type mod, u8 cmd, u16 prod_idx) +{ + struct hinic_cmdq_wqe_scmd *wqe_scmd = &wqe->inline_wqe.wqe_scmd; + enum completion_format complete_format = COMPLETE_DIRECT; + + switch (cmd_type) { + case SYNC_CMD_SGE_RESP: + complete_format = COMPLETE_SGE; + cmdq_set_completion(&wqe_scmd->completion, buf_out); + break; + case SYNC_CMD_DIRECT_RESP: + complete_format = COMPLETE_DIRECT; + wqe_scmd->completion.direct_resp = 0; + break; + default: + break; + } + + cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd, prod_idx, + complete_format, DATA_DIRECT, BUFDESC_SCMD_LEN); + + cmdq_set_inline_wqe_data(&wqe->inline_wqe, buf_in, in_size); +} + +static void cmdq_update_cmd_status(struct hinic_cmdq *cmdq, u16 prod_idx, + struct hinic_cmdq_wqe *wqe) +{ + struct hinic_cmdq_cmd_info *cmd_info; + struct hinic_cmdq_wqe_lcmd *wqe_lcmd; + u32 status_info; + + wqe_lcmd = &wqe->wqe_lcmd; + cmd_info = &cmdq->cmd_infos[prod_idx]; + + if (cmd_info->errcode) { + status_info = be32_to_cpu(wqe_lcmd->status.status_info); + *cmd_info->errcode = WQE_ERRCODE_GET(status_info, VAL); + } + + if (cmd_info->direct_resp && + cmd_info->cmd_type == HINIC_CMD_TYPE_DIRECT_RESP) + *cmd_info->direct_resp = + cpu_to_be64(wqe_lcmd->completion.direct_resp); +} + +static int hinic_cmdq_sync_timeout_check(struct hinic_cmdq *cmdq, + struct hinic_cmdq_wqe *wqe, u16 pi, + enum hinic_mod_type mod, u8 cmd) +{ + struct hinic_cmdq_wqe_lcmd *wqe_lcmd; + struct hinic_ctrl *ctrl; + u32 ctrl_info; + + wqe_lcmd = &wqe->wqe_lcmd; + ctrl = &wqe_lcmd->ctrl; + ctrl_info = be32_to_cpu((ctrl)->ctrl_info); + if (!WQE_COMPLETED(ctrl_info)) { + sdk_info(cmdq->hwdev->dev_hdl, "Cmdq sync command check busy bit not set, mod: %u, cmd: 0x%x\n", + mod, cmd); + return -EFAULT; + } + + cmdq_update_cmd_status(cmdq, pi, wqe); + + sdk_info(cmdq->hwdev->dev_hdl, "Cmdq sync command check succeed, mod: %u, cmd: 0x%x\n", + mod, cmd); + return 0; +} + +static void __clear_cmd_info(struct hinic_cmdq_cmd_info *cmd_info, + const int *errcode, struct completion *done, + u64 *out_param) +{ + if (cmd_info->errcode == errcode) + cmd_info->errcode = NULL; + + if (cmd_info->done == done) + cmd_info->done = NULL; + + if (cmd_info->direct_resp == out_param) + cmd_info->direct_resp = NULL; +} + +static int cmdq_retry_get_ack(struct hinic_hwdev *hwdev, + struct completion *done, u8 ceq_id) +{ + ulong timeo = msecs_to_jiffies(CMDQ_CMD_RETRY_TIMEOUT); + int err; + + init_completion(done); + + err = hinic_reschedule_eq(hwdev, HINIC_CEQ, ceq_id); + if (err) + return err; + + if (!wait_for_completion_timeout(done, timeo)) + return -ETIMEDOUT; + + return 0; +} + +static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq, + enum hinic_ack_type ack_type, + enum hinic_mod_type mod, u8 cmd, + struct hinic_cmd_buf *buf_in, + u64 *out_param, u32 timeout) +{ + struct hinic_wq *wq = cmdq->wq; + struct hinic_cmdq_wqe *curr_wqe, wqe; + struct hinic_cmdq_cmd_info *cmd_info; + struct completion done; + u16 curr_prod_idx, next_prod_idx, num_wqebbs; + int wrapped, errcode = 0, wqe_size = cmdq_wqe_size(WQE_LCMD_TYPE); + int cmpt_code = CMDQ_SEND_CMPT_CODE; + ulong timeo; + u64 curr_msg_id; + int err; + + num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq); + + /* Keep wrapped and doorbell index correct. bh - for tasklet(ceq) */ + spin_lock_bh(&cmdq->cmdq_lock); + + /* in order to save a wqebb for setting arm_bit when + * send cmdq commands frequently resulting in cmdq full + */ + if (HINIC_GET_CMDQ_FREE_WQEBBS(wq) < num_wqebbs + 1) { + spin_unlock_bh(&cmdq->cmdq_lock); + return -EBUSY; + } + + /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow */ + curr_wqe = hinic_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx); + if (!curr_wqe) { + spin_unlock_bh(&cmdq->cmdq_lock); + sdk_err(cmdq->hwdev->dev_hdl, "Can not get avalible wqebb, mod: %u, cmd: 0x%x\n", + mod, cmd); + return -EBUSY; + } + + memset(&wqe, 0, sizeof(wqe)); + + wrapped = cmdq->wrapped; + + next_prod_idx = curr_prod_idx + num_wqebbs; + if (next_prod_idx >= wq->q_depth) { + cmdq->wrapped = !cmdq->wrapped; + next_prod_idx -= wq->q_depth; + } + + cmd_info = &cmdq->cmd_infos[curr_prod_idx]; + + init_completion(&done); + + cmd_info->done = &done; + cmd_info->errcode = &errcode; + cmd_info->direct_resp = out_param; + cmd_info->cmpt_code = &cmpt_code; + + cmdq_set_lcmd_wqe(&wqe, SYNC_CMD_DIRECT_RESP, buf_in, NULL, + wrapped, ack_type, mod, cmd, curr_prod_idx); + + /* The data that is written to HW should be in Big Endian Format */ + hinic_cpu_to_be32(&wqe, wqe_size); + + /* CMDQ WQE is not shadow, therefore wqe will be written to wq */ + cmdq_wqe_fill(curr_wqe, &wqe); + + cmd_info->cmd_type = HINIC_CMD_TYPE_DIRECT_RESP; + + (cmd_info->cmdq_msg_id)++; + curr_msg_id = cmd_info->cmdq_msg_id; + + cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx); + + spin_unlock_bh(&cmdq->cmdq_lock); + + timeo = msecs_to_jiffies(timeout ? timeout : CMDQ_CMD_TIMEOUT); + if (!wait_for_completion_timeout(&done, timeo) && + cmdq_retry_get_ack(cmdq->hwdev, &done, HINIC_CEQ_ID_CMDQ)) { + spin_lock_bh(&cmdq->cmdq_lock); + + if (cmd_info->cmpt_code == &cmpt_code) + cmd_info->cmpt_code = NULL; + + if (cmpt_code == CMDQ_COMPLETE_CMPT_CODE) { + sdk_info(cmdq->hwdev->dev_hdl, "Cmdq direct sync command has been completed\n"); + spin_unlock_bh(&cmdq->cmdq_lock); + goto timeout_check_ok; + } + + if (curr_msg_id == cmd_info->cmdq_msg_id) { + err = hinic_cmdq_sync_timeout_check(cmdq, curr_wqe, + curr_prod_idx, + mod, cmd); + if (err) + cmd_info->cmd_type = HINIC_CMD_TYPE_TIMEOUT; + else + cmd_info->cmd_type = + HINIC_CMD_TYPE_FAKE_TIMEOUT; + } else { + err = -ETIMEDOUT; + sdk_err(cmdq->hwdev->dev_hdl, + "Cmdq sync command current msg id dismatch with cmd_info msg id, mod: %u, cmd: 0x%x\n", + mod, cmd); + } + + __clear_cmd_info(cmd_info, &errcode, &done, out_param); + + spin_unlock_bh(&cmdq->cmdq_lock); + + if (!err) + goto timeout_check_ok; + + sdk_err(cmdq->hwdev->dev_hdl, "Cmdq sync command timeout, mod: %d cmd: 0x%x prod idx: 0x%x\n", + mod, cmd, curr_prod_idx); + hinic_dump_ceq_info(cmdq->hwdev); + return -ETIMEDOUT; + } + +timeout_check_ok: + smp_rmb(); /* read error code after completion */ + + if (errcode > 1) + return errcode; + + return 0; +} + +static int cmdq_sync_cmd_detail_resp(struct hinic_cmdq *cmdq, + enum hinic_ack_type ack_type, + enum hinic_mod_type mod, u8 cmd, + struct hinic_cmd_buf *buf_in, + struct hinic_cmd_buf *buf_out, + u32 timeout) +{ + struct hinic_wq *wq = cmdq->wq; + struct hinic_cmdq_wqe *curr_wqe, wqe; + struct hinic_cmdq_cmd_info *cmd_info; + struct completion done; + u16 curr_prod_idx, next_prod_idx, num_wqebbs; + int wrapped, errcode = 0, wqe_size = cmdq_wqe_size(WQE_LCMD_TYPE); + int cmpt_code = CMDQ_SEND_CMPT_CODE; + ulong timeo; + u64 curr_msg_id; + int err; + + num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq); + + /* Keep wrapped and doorbell index correct. bh - for tasklet(ceq) */ + spin_lock_bh(&cmdq->cmdq_lock); + + /* in order to save a wqebb for setting arm_bit when + * send cmdq commands frequently resulting in cmdq full + */ + if (HINIC_GET_CMDQ_FREE_WQEBBS(wq) < num_wqebbs + 1) { + spin_unlock_bh(&cmdq->cmdq_lock); + return -EBUSY; + } + + /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/ + curr_wqe = hinic_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx); + if (!curr_wqe) { + spin_unlock_bh(&cmdq->cmdq_lock); + sdk_err(cmdq->hwdev->dev_hdl, "Can not get avalible wqebb, mod: %u, cmd: 0x%x\n", + mod, cmd); + return -EBUSY; + } + + memset(&wqe, 0, sizeof(wqe)); + + wrapped = cmdq->wrapped; + + next_prod_idx = curr_prod_idx + num_wqebbs; + if (next_prod_idx >= wq->q_depth) { + cmdq->wrapped = !cmdq->wrapped; + next_prod_idx -= wq->q_depth; + } + + cmd_info = &cmdq->cmd_infos[curr_prod_idx]; + + init_completion(&done); + + cmd_info->done = &done; + cmd_info->errcode = &errcode; + cmd_info->cmpt_code = &cmpt_code; + + cmdq_set_lcmd_wqe(&wqe, SYNC_CMD_SGE_RESP, buf_in, buf_out, + wrapped, ack_type, mod, cmd, curr_prod_idx); + + hinic_cpu_to_be32(&wqe, wqe_size); + + cmdq_wqe_fill(curr_wqe, &wqe); + + cmd_info->cmd_type = HINIC_CMD_TYPE_SGE_RESP; + + (cmd_info->cmdq_msg_id)++; + curr_msg_id = cmd_info->cmdq_msg_id; + + cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx); + + spin_unlock_bh(&cmdq->cmdq_lock); + + timeo = msecs_to_jiffies(timeout ? timeout : CMDQ_CMD_TIMEOUT); + if (!wait_for_completion_timeout(&done, timeo) && + cmdq_retry_get_ack(cmdq->hwdev, &done, HINIC_CEQ_ID_CMDQ)) { + spin_lock_bh(&cmdq->cmdq_lock); + + if (cmd_info->cmpt_code == &cmpt_code) + cmd_info->cmpt_code = NULL; + + if (cmpt_code == CMDQ_COMPLETE_CMPT_CODE) { + sdk_info(cmdq->hwdev->dev_hdl, "Cmdq detail sync command has been completed\n"); + spin_unlock_bh(&cmdq->cmdq_lock); + goto timeout_check_ok; + } + + if (curr_msg_id == cmd_info->cmdq_msg_id) { + err = hinic_cmdq_sync_timeout_check(cmdq, curr_wqe, + curr_prod_idx, + mod, cmd); + if (err) + cmd_info->cmd_type = HINIC_CMD_TYPE_TIMEOUT; + else + cmd_info->cmd_type = + HINIC_CMD_TYPE_FAKE_TIMEOUT; + } else { + err = -ETIMEDOUT; + sdk_err(cmdq->hwdev->dev_hdl, + "Cmdq sync command current msg id dismatch with cmd_info msg id, mod: %u, cmd: 0x%x\n", + mod, cmd); + } + + if (cmd_info->errcode == &errcode) + cmd_info->errcode = NULL; + + if (cmd_info->done == &done) + cmd_info->done = NULL; + + spin_unlock_bh(&cmdq->cmdq_lock); + + if (!err) + goto timeout_check_ok; + + sdk_err(cmdq->hwdev->dev_hdl, "Cmdq sync command timeout, mod: %d cmd: 0x%x prod idx: 0x%x\n", + mod, cmd, curr_prod_idx); + hinic_dump_ceq_info(cmdq->hwdev); + return -ETIMEDOUT; + } + +timeout_check_ok: + smp_rmb(); /* read error code after completion */ + + if (errcode > 1) + return errcode; + + return 0; +} + +static int cmdq_async_cmd(struct hinic_cmdq *cmdq, enum hinic_ack_type ack_type, + enum hinic_mod_type mod, u8 cmd, + struct hinic_cmd_buf *buf_in) +{ + struct hinic_wq *wq = cmdq->wq; + int wqe_size = cmdq_wqe_size(WQE_LCMD_TYPE); + u16 curr_prod_idx, next_prod_idx, num_wqebbs; + struct hinic_cmdq_wqe *curr_wqe, wqe; + int wrapped; + + num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq); + + spin_lock_bh(&cmdq->cmdq_lock); + + /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow */ + curr_wqe = hinic_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx); + if (!curr_wqe) { + spin_unlock_bh(&cmdq->cmdq_lock); + return -EBUSY; + } + + memset(&wqe, 0, sizeof(wqe)); + + wrapped = cmdq->wrapped; + next_prod_idx = curr_prod_idx + num_wqebbs; + if (next_prod_idx >= cmdq->wq->q_depth) { + cmdq->wrapped = !cmdq->wrapped; + next_prod_idx -= cmdq->wq->q_depth; + } + + cmdq_set_lcmd_wqe(&wqe, ASYNC_CMD, buf_in, NULL, wrapped, + ack_type, mod, cmd, curr_prod_idx); + + /* The data that is written to HW should be in Big Endian Format */ + hinic_cpu_to_be32(&wqe, wqe_size); + + cmdq_wqe_fill(curr_wqe, &wqe); + + cmdq->cmd_infos[curr_prod_idx].cmd_type = HINIC_CMD_TYPE_ASYNC; + + cmdq_set_db(cmdq, HINIC_CMDQ_ASYNC, next_prod_idx); + + spin_unlock_bh(&cmdq->cmdq_lock); + + return 0; +} + +static int cmdq_set_arm_bit(struct hinic_cmdq *cmdq, void *buf_in, u16 in_size) +{ + struct hinic_wq *wq = cmdq->wq; + struct hinic_cmdq_wqe *curr_wqe, wqe; + u16 curr_prod_idx, next_prod_idx, num_wqebbs; + int wrapped, wqe_size = cmdq_wqe_size(WQE_SCMD_TYPE); + + num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq); + + /* Keep wrapped and doorbell index correct. bh - for tasklet(ceq) */ + spin_lock_bh(&cmdq->cmdq_lock); + + /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow */ + curr_wqe = hinic_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx); + if (!curr_wqe) { + spin_unlock_bh(&cmdq->cmdq_lock); + sdk_err(cmdq->hwdev->dev_hdl, "Can not get avalible wqebb setting arm\n"); + return -EBUSY; + } + + memset(&wqe, 0, sizeof(wqe)); + + wrapped = cmdq->wrapped; + + next_prod_idx = curr_prod_idx + num_wqebbs; + if (next_prod_idx >= wq->q_depth) { + cmdq->wrapped = !cmdq->wrapped; + next_prod_idx -= wq->q_depth; + } + + cmdq_set_inline_wqe(&wqe, SYNC_CMD_DIRECT_RESP, buf_in, in_size, NULL, + wrapped, HINIC_ACK_TYPE_CMDQ, HINIC_MOD_COMM, + CMDQ_SET_ARM_CMD, curr_prod_idx); + + /* The data that is written to HW should be in Big Endian Format */ + hinic_cpu_to_be32(&wqe, wqe_size); + + /* cmdq wqe is not shadow, therefore wqe will be written to wq */ + cmdq_wqe_fill(curr_wqe, &wqe); + + cmdq->cmd_infos[curr_prod_idx].cmd_type = HINIC_CMD_TYPE_SET_ARM; + + cmdq_set_db(cmdq, cmdq->cmdq_type, next_prod_idx); + + spin_unlock_bh(&cmdq->cmdq_lock); + + return 0; +} + +static int cmdq_params_valid(void *hwdev, struct hinic_cmd_buf *buf_in) +{ + if (!buf_in || !hwdev) { + pr_err("Invalid CMDQ buffer addr\n"); + return -EINVAL; + } + + if (!buf_in->size || buf_in->size > HINIC_CMDQ_MAX_DATA_SIZE) { + pr_err("Invalid CMDQ buffer size: 0x%x\n", buf_in->size); + return -EINVAL; + } + + return 0; +} + +#define WAIT_CMDQ_ENABLE_TIMEOUT 300 + +static int wait_cmdqs_enable(struct hinic_cmdqs *cmdqs) +{ + unsigned long end; + + end = jiffies + msecs_to_jiffies(WAIT_CMDQ_ENABLE_TIMEOUT); + do { + if (cmdqs->status & HINIC_CMDQ_ENABLE) + return 0; + } while (time_before(jiffies, end) && cmdqs->hwdev->chip_present_flag && + !cmdqs->disable_flag); + + cmdqs->disable_flag = 1; + + return -EBUSY; +} + +int hinic_cmdq_direct_resp(void *hwdev, enum hinic_ack_type ack_type, + enum hinic_mod_type mod, u8 cmd, + struct hinic_cmd_buf *buf_in, u64 *out_param, + u32 timeout) +{ + struct hinic_cmdqs *cmdqs; + int err = cmdq_params_valid(hwdev, buf_in); + + if (err) { + pr_err("Invalid CMDQ parameters\n"); + return err; + } + + cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs; + + if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag) || + !hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_CMDQ_INITED)) + return -EPERM; + + err = wait_cmdqs_enable(cmdqs); + if (err) { + sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n"); + return err; + } + + err = hinic_func_own_get(hwdev); + if (err) + return err; + + err = cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HINIC_CMDQ_SYNC], ack_type, + mod, cmd, buf_in, out_param, timeout); + hinic_func_own_free(hwdev); + if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag)) + return -ETIMEDOUT; + else + return err; +} +EXPORT_SYMBOL(hinic_cmdq_direct_resp); + +int hinic_cmdq_detail_resp(void *hwdev, + enum hinic_ack_type ack_type, + enum hinic_mod_type mod, u8 cmd, + struct hinic_cmd_buf *buf_in, + struct hinic_cmd_buf *buf_out, + u32 timeout) +{ + struct hinic_cmdqs *cmdqs; + int err = cmdq_params_valid(hwdev, buf_in); + + if (err) + return err; + + cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs; + + if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag) || + !hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_CMDQ_INITED)) + return -EPERM; + + err = wait_cmdqs_enable(cmdqs); + if (err) { + sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n"); + return err; + } + + err = cmdq_sync_cmd_detail_resp(&cmdqs->cmdq[HINIC_CMDQ_SYNC], ack_type, + mod, cmd, buf_in, buf_out, timeout); + if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag)) + return -ETIMEDOUT; + else + return err; +} +EXPORT_SYMBOL(hinic_cmdq_detail_resp); + +int hinic_cmdq_async(void *hwdev, enum hinic_ack_type ack_type, + enum hinic_mod_type mod, u8 cmd, + struct hinic_cmd_buf *buf_in) +{ + struct hinic_cmdqs *cmdqs; + int err = cmdq_params_valid(hwdev, buf_in); + + if (err) + return err; + + cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs; + + if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag) || + !hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_CMDQ_INITED)) + return -EPERM; + + err = wait_cmdqs_enable(cmdqs); + if (err) { + sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n"); + return err; + } + + return cmdq_async_cmd(&cmdqs->cmdq[HINIC_CMDQ_ASYNC], ack_type, mod, + cmd, buf_in); +} +EXPORT_SYMBOL(hinic_cmdq_async); + +int hinic_set_arm_bit(void *hwdev, enum hinic_set_arm_type q_type, u16 q_id) +{ + struct hinic_cmdqs *cmdqs; + struct hinic_cmdq *cmdq; + struct hinic_cmdq_arm_bit arm_bit; + enum hinic_cmdq_type cmdq_type = HINIC_CMDQ_SYNC; + u16 in_size; + int err; + + if (!hwdev) + return -EINVAL; + + if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag) || + !hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_CMDQ_INITED)) + return -EPERM; + + cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs; + + if (!(cmdqs->status & HINIC_CMDQ_ENABLE)) + return -EBUSY; + + if (q_type == HINIC_SET_ARM_CMDQ) { + if (q_id >= HINIC_MAX_CMDQ_TYPES) + return -EFAULT; + + cmdq_type = q_id; + } + /* sq is using interrupt now, so we only need to set arm bit for cmdq, + * remove comment below if need to set sq arm bit + * else + * cmdq_type = HINIC_CMDQ_SYNC; + */ + + cmdq = &cmdqs->cmdq[cmdq_type]; + + arm_bit.q_type = q_type; + arm_bit.q_id = q_id; + in_size = sizeof(arm_bit); + + err = cmdq_set_arm_bit(cmdq, &arm_bit, in_size); + if (err) { + sdk_err(cmdqs->hwdev->dev_hdl, + "Failed to set arm for q_type: %d, qid %d\n", + q_type, q_id); + return err; + } + + return 0; +} +EXPORT_SYMBOL(hinic_set_arm_bit); + +static void clear_wqe_complete_bit(struct hinic_cmdq *cmdq, + struct hinic_cmdq_wqe *wqe, u16 ci) +{ + struct hinic_cmdq_wqe_lcmd *wqe_lcmd; + struct hinic_cmdq_inline_wqe *inline_wqe; + struct hinic_cmdq_wqe_scmd *wqe_scmd; + struct hinic_ctrl *ctrl; + u32 header_info = be32_to_cpu(WQE_HEADER(wqe)->header_info); + int buf_len = CMDQ_WQE_HEADER_GET(header_info, BUFDESC_LEN); + int wqe_size = cmdq_get_wqe_size(buf_len); + u16 num_wqebbs; + + if (wqe_size == WQE_LCMD_SIZE) { + wqe_lcmd = &wqe->wqe_lcmd; + ctrl = &wqe_lcmd->ctrl; + } else { + inline_wqe = &wqe->inline_wqe; + wqe_scmd = &inline_wqe->wqe_scmd; + ctrl = &wqe_scmd->ctrl; + } + + /* clear HW busy bit */ + ctrl->ctrl_info = 0; + cmdq->cmd_infos[ci].cmd_type = HINIC_CMD_TYPE_NONE; + + wmb(); /* verify wqe is clear */ + + num_wqebbs = WQE_NUM_WQEBBS(wqe_size, cmdq->wq); + hinic_put_wqe(cmdq->wq, num_wqebbs); +} + +static void cmdq_sync_cmd_handler(struct hinic_cmdq *cmdq, + struct hinic_cmdq_wqe *wqe, u16 cons_idx) +{ + u16 prod_idx = cons_idx; + + spin_lock(&cmdq->cmdq_lock); + + cmdq_update_cmd_status(cmdq, prod_idx, wqe); + + if (cmdq->cmd_infos[prod_idx].cmpt_code) { + *cmdq->cmd_infos[prod_idx].cmpt_code = + CMDQ_COMPLETE_CMPT_CODE; + cmdq->cmd_infos[prod_idx].cmpt_code = NULL; + } + + /* make sure cmpt_code operation before done operation */ + smp_rmb(); + + if (cmdq->cmd_infos[prod_idx].done) { + complete(cmdq->cmd_infos[prod_idx].done); + cmdq->cmd_infos[prod_idx].done = NULL; + } + + spin_unlock(&cmdq->cmdq_lock); + + clear_wqe_complete_bit(cmdq, wqe, cons_idx); +} + +static void cmdq_async_cmd_handler(struct hinic_hwdev *hwdev, + struct hinic_cmdq *cmdq, + struct hinic_cmdq_wqe *wqe, u16 ci) +{ + u64 buf = wqe->wqe_lcmd.buf_desc.saved_async_buf; + int addr_sz = sizeof(u64); + + hinic_be32_to_cpu((void *)&buf, addr_sz); + if (buf) + hinic_free_cmd_buf(hwdev, (struct hinic_cmd_buf *)buf); + + clear_wqe_complete_bit(cmdq, wqe, ci); +} + +static int cmdq_arm_ceq_handler(struct hinic_cmdq *cmdq, + struct hinic_cmdq_wqe *wqe, u16 ci) +{ + struct hinic_cmdq_inline_wqe *inline_wqe = &wqe->inline_wqe; + struct hinic_cmdq_wqe_scmd *wqe_scmd = &inline_wqe->wqe_scmd; + struct hinic_ctrl *ctrl = &wqe_scmd->ctrl; + u32 ctrl_info = be32_to_cpu((ctrl)->ctrl_info); + + if (!WQE_COMPLETED(ctrl_info)) + return -EBUSY; + + clear_wqe_complete_bit(cmdq, wqe, ci); + + return 0; +} + +#define HINIC_CMDQ_WQE_HEAD_LEN 32 +static void hinic_dump_cmdq_wqe_head(struct hinic_hwdev *hwdev, + struct hinic_cmdq_wqe *wqe) +{ + u32 i; + u32 *data = (u32 *)wqe; + + for (i = 0; i < (HINIC_CMDQ_WQE_HEAD_LEN / sizeof(u32)); i += 4) { + sdk_info(hwdev->dev_hdl, "wqe data: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", + data[i], data[i + 1], data[i + 2], + data[i + 3]);/*lint !e679*/ + } +} + +void hinic_cmdq_ceq_handler(void *handle, u32 ceqe_data) +{ + struct hinic_cmdqs *cmdqs = ((struct hinic_hwdev *)handle)->cmdqs; + enum hinic_cmdq_type cmdq_type = CEQE_CMDQ_GET(ceqe_data, TYPE); + struct hinic_cmdq *cmdq = &cmdqs->cmdq[cmdq_type]; + struct hinic_hwdev *hwdev = cmdqs->hwdev; + struct hinic_cmdq_wqe *wqe; + struct hinic_cmdq_wqe_lcmd *wqe_lcmd; + struct hinic_ctrl *ctrl; + struct hinic_cmdq_cmd_info *cmd_info; + u32 ctrl_info; + u16 ci; + int set_arm = 1; + + while ((wqe = hinic_read_wqe(cmdq->wq, 1, &ci)) != NULL) { + cmd_info = &cmdq->cmd_infos[ci]; + + if (cmd_info->cmd_type == HINIC_CMD_TYPE_NONE) { + set_arm = 1; + break; + } else if (cmd_info->cmd_type == HINIC_CMD_TYPE_TIMEOUT || + cmd_info->cmd_type == HINIC_CMD_TYPE_FAKE_TIMEOUT) { + if (cmd_info->cmd_type == HINIC_CMD_TYPE_TIMEOUT) { + sdk_info(hwdev->dev_hdl, "Cmdq timeout, q_id: %u, ci: %u\n", + cmdq_type, ci); + hinic_dump_cmdq_wqe_head(hwdev, wqe); + } + + set_arm = 1; + clear_wqe_complete_bit(cmdq, wqe, ci); + } else if (cmd_info->cmd_type == HINIC_CMD_TYPE_SET_ARM) { + /* arm_bit was set until here */ + set_arm = 0; + + if (cmdq_arm_ceq_handler(cmdq, wqe, ci)) + break; + } else { + set_arm = 1; + + /* only arm bit is using scmd wqe, the wqe is lcmd */ + wqe_lcmd = &wqe->wqe_lcmd; + ctrl = &wqe_lcmd->ctrl; + ctrl_info = be32_to_cpu((ctrl)->ctrl_info); + + if (!WQE_COMPLETED(ctrl_info)) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the cmdq wqe until we have + * verified the command has been processed and + * written back. + */ + dma_rmb(); + + if (cmdq_type == HINIC_CMDQ_ASYNC) + cmdq_async_cmd_handler(hwdev, cmdq, wqe, ci); + else + cmdq_sync_cmd_handler(cmdq, wqe, ci); + } + } + + if (set_arm) + hinic_set_arm_bit(hwdev, HINIC_SET_ARM_CMDQ, cmdq_type); +} + +static void cmdq_init_queue_ctxt(struct hinic_cmdq *cmdq, + struct hinic_cmdq_pages *cmdq_pages, + struct hinic_cmdq_ctxt *cmdq_ctxt) +{ + struct hinic_cmdqs *cmdqs = cmdq_to_cmdqs(cmdq); + struct hinic_hwdev *hwdev = cmdqs->hwdev; + struct hinic_wq *wq = cmdq->wq; + struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info; + u64 wq_first_page_paddr, cmdq_first_block_paddr, pfn; + u16 start_ci = (u16)wq->cons_idx; + + /* The data in the HW is in Big Endian Format */ + wq_first_page_paddr = be64_to_cpu(*wq->block_vaddr); + + pfn = CMDQ_PFN(wq_first_page_paddr); + + ctxt_info->curr_wqe_page_pfn = + CMDQ_CTXT_PAGE_INFO_SET(1, HW_BUSY_BIT) | + CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN) | + CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_ARM) | + CMDQ_CTXT_PAGE_INFO_SET(HINIC_CEQ_ID_CMDQ, EQ_ID) | + CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN); + + /* If only use one page, use 0-level CLA */ + if (cmdq->wq->num_q_pages != 1) { + cmdq_first_block_paddr = cmdq_pages->cmdq_page_paddr; + pfn = CMDQ_PFN(cmdq_first_block_paddr); + } + + ctxt_info->wq_block_pfn = CMDQ_CTXT_BLOCK_INFO_SET(start_ci, CI) | + CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN); + + cmdq_ctxt->func_idx = hinic_global_func_id_hw(hwdev); + cmdq_ctxt->ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif); + cmdq_ctxt->cmdq_id = cmdq->cmdq_type; +} + +bool hinic_cmdq_check_vf_ctxt(struct hinic_hwdev *hwdev, + struct hinic_cmdq_ctxt *cmdq_ctxt) +{ + struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info; + u64 curr_pg_pfn, wq_block_pfn; + + if (cmdq_ctxt->ppf_idx != hinic_ppf_idx(hwdev) || + cmdq_ctxt->cmdq_id > HINIC_MAX_CMDQ_TYPES) + return false; + + curr_pg_pfn = CMDQ_CTXT_PAGE_INFO_GET(ctxt_info->curr_wqe_page_pfn, + CURR_WQE_PAGE_PFN); + wq_block_pfn = CMDQ_CTXT_BLOCK_INFO_GET(ctxt_info->wq_block_pfn, + WQ_BLOCK_PFN); + /* VF must use 0-level CLA */ + if (curr_pg_pfn != wq_block_pfn) + return false; + + return true; +} + +static int init_cmdq(struct hinic_cmdq *cmdq, struct hinic_hwdev *hwdev, + struct hinic_wq *wq, enum hinic_cmdq_type q_type) +{ + void __iomem *db_base; + int err = 0; + + cmdq->wq = wq; + cmdq->cmdq_type = q_type; + cmdq->wrapped = 1; + cmdq->hwdev = hwdev; + + spin_lock_init(&cmdq->cmdq_lock); + + cmdq->cmd_infos = kcalloc(wq->q_depth, sizeof(*cmdq->cmd_infos), + GFP_KERNEL); + if (!cmdq->cmd_infos) { + err = -ENOMEM; + goto cmd_infos_err; + } + + err = hinic_alloc_db_addr(hwdev, &db_base, NULL); + if (err) + goto alloc_db_err; + + cmdq->db_base = (u8 *)db_base; + return 0; + +alloc_db_err: + kfree(cmdq->cmd_infos); + +cmd_infos_err: + + return err; +} + +static void free_cmdq(struct hinic_hwdev *hwdev, struct hinic_cmdq *cmdq) +{ + hinic_free_db_addr(hwdev, cmdq->db_base, NULL); + kfree(cmdq->cmd_infos); +} + +int hinic_set_cmdq_ctxts(struct hinic_hwdev *hwdev) +{ + struct hinic_cmdqs *cmdqs = hwdev->cmdqs; + struct hinic_cmdq_ctxt *cmdq_ctxt, cmdq_ctxt_out = {0}; + enum hinic_cmdq_type cmdq_type; + u16 in_size; + u16 out_size = sizeof(*cmdq_ctxt); + int err; + + cmdq_type = HINIC_CMDQ_SYNC; + for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) { + cmdq_ctxt = &cmdqs->cmdq[cmdq_type].cmdq_ctxt; + cmdq_ctxt->func_idx = hinic_global_func_id_hw(hwdev); + in_size = sizeof(*cmdq_ctxt); + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_CMDQ_CTXT_SET, + cmdq_ctxt, in_size, + &cmdq_ctxt_out, &out_size, 0); + if (err || !out_size || cmdq_ctxt_out.status) { + sdk_err(hwdev->dev_hdl, "Failed to set cmdq ctxt, err: %d, status: 0x%x, out_size: 0x%x\n", + err, cmdq_ctxt_out.status, out_size); + return -EFAULT; + } + } + + cmdqs->status |= HINIC_CMDQ_ENABLE; + cmdqs->disable_flag = 0; + + return 0; +} + +void hinic_cmdq_flush_cmd(struct hinic_hwdev *hwdev, + struct hinic_cmdq *cmdq) +{ + struct hinic_cmdq_wqe *wqe; + struct hinic_cmdq_cmd_info *cmdq_info; + u16 ci, wqe_left, i; + u64 buf; + + spin_lock_bh(&cmdq->cmdq_lock); + wqe_left = cmdq->wq->q_depth - (u16)atomic_read(&cmdq->wq->delta); + ci = MASKED_WQE_IDX(cmdq->wq, cmdq->wq->cons_idx); + for (i = 0; i < wqe_left; i++, ci++) { + ci = MASKED_WQE_IDX(cmdq->wq, ci); + cmdq_info = &cmdq->cmd_infos[ci]; + + if (cmdq_info->cmd_type == HINIC_CMD_TYPE_SET_ARM) + continue; + + if (cmdq->cmdq_type == HINIC_CMDQ_ASYNC) { + wqe = hinic_get_wqebb_addr(cmdq->wq, ci); + buf = wqe->wqe_lcmd.buf_desc.saved_async_buf; + wqe->wqe_lcmd.buf_desc.saved_async_buf = 0; + + hinic_be32_to_cpu((void *)&buf, sizeof(u64)); + if (buf) + hinic_free_cmd_buf(hwdev, + (struct hinic_cmd_buf *)buf); + } else { + if (cmdq_info->done) { + complete(cmdq_info->done); + cmdq_info->done = NULL; + cmdq_info->cmpt_code = NULL; + cmdq_info->direct_resp = NULL; + cmdq_info->errcode = NULL; + } + } + } + + spin_unlock_bh(&cmdq->cmdq_lock); +} + +int hinic_reinit_cmdq_ctxts(struct hinic_hwdev *hwdev) +{ + struct hinic_cmdqs *cmdqs = hwdev->cmdqs; + enum hinic_cmdq_type cmdq_type; + + cmdq_type = HINIC_CMDQ_SYNC; + for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) { + hinic_cmdq_flush_cmd(hwdev, &cmdqs->cmdq[cmdq_type]); + cmdqs->cmdq[cmdq_type].wrapped = 1; + hinic_wq_wqe_pg_clear(cmdqs->cmdq[cmdq_type].wq); + } + + return hinic_set_cmdq_ctxts(hwdev); +} + +int hinic_cmdqs_init(struct hinic_hwdev *hwdev) +{ + struct hinic_cmdqs *cmdqs; + struct hinic_cmdq_ctxt *cmdq_ctxt; + enum hinic_cmdq_type type, cmdq_type; + size_t saved_wqs_size; + u32 max_wqe_size; + int err; + + cmdqs = kzalloc(sizeof(*cmdqs), GFP_KERNEL); + if (!cmdqs) + return -ENOMEM; + + hwdev->cmdqs = cmdqs; + cmdqs->hwdev = hwdev; + + saved_wqs_size = HINIC_MAX_CMDQ_TYPES * sizeof(struct hinic_wq); + cmdqs->saved_wqs = kzalloc(saved_wqs_size, GFP_KERNEL); + if (!cmdqs->saved_wqs) { + sdk_err(hwdev->dev_hdl, "Failed to allocate saved wqs\n"); + err = -ENOMEM; + goto alloc_wqs_err; + } + + cmdqs->cmd_buf_pool = dma_pool_create("hinic_cmdq", hwdev->dev_hdl, + HINIC_CMDQ_BUF_SIZE, + HINIC_CMDQ_BUF_SIZE, 0ULL); + if (!cmdqs->cmd_buf_pool) { + sdk_err(hwdev->dev_hdl, "Failed to create cmdq buffer pool\n"); + err = -ENOMEM; + goto pool_create_err; + } + + max_wqe_size = (u32)cmdq_wqe_size(WQE_LCMD_TYPE); + err = hinic_cmdq_alloc(&cmdqs->cmdq_pages, cmdqs->saved_wqs, + hwdev->dev_hdl, HINIC_MAX_CMDQ_TYPES, + hwdev->wq_page_size, CMDQ_WQEBB_SIZE, + HINIC_CMDQ_DEPTH, max_wqe_size); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to allocate cmdq\n"); + goto cmdq_alloc_err; + } + + cmdq_type = HINIC_CMDQ_SYNC; + for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) { + err = init_cmdq(&cmdqs->cmdq[cmdq_type], hwdev, + &cmdqs->saved_wqs[cmdq_type], cmdq_type); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to initialize cmdq type: %d\n", + cmdq_type); + goto init_cmdq_err; + } + + cmdq_ctxt = &cmdqs->cmdq[cmdq_type].cmdq_ctxt; + cmdq_init_queue_ctxt(&cmdqs->cmdq[cmdq_type], + &cmdqs->cmdq_pages, cmdq_ctxt); + } + + err = hinic_set_cmdq_ctxts(hwdev); + if (err) + goto init_cmdq_err; + + return 0; + +init_cmdq_err: + type = HINIC_CMDQ_SYNC; + for (; type < cmdq_type; type++) + free_cmdq(hwdev, &cmdqs->cmdq[type]); + + hinic_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs, + HINIC_MAX_CMDQ_TYPES); + +cmdq_alloc_err: + dma_pool_destroy(cmdqs->cmd_buf_pool); + +pool_create_err: + kfree(cmdqs->saved_wqs); + +alloc_wqs_err: + kfree(cmdqs); + + return err; +} + +void hinic_cmdqs_free(struct hinic_hwdev *hwdev) +{ + struct hinic_cmdqs *cmdqs = hwdev->cmdqs; + enum hinic_cmdq_type cmdq_type = HINIC_CMDQ_SYNC; + + cmdqs->status &= ~HINIC_CMDQ_ENABLE; + + for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) { + hinic_cmdq_flush_cmd(hwdev, &cmdqs->cmdq[cmdq_type]); + free_cmdq(cmdqs->hwdev, &cmdqs->cmdq[cmdq_type]); + } + + hinic_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs, + HINIC_MAX_CMDQ_TYPES); + + dma_pool_destroy(cmdqs->cmd_buf_pool); + + kfree(cmdqs->saved_wqs); + + kfree(cmdqs); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_cmdq.h b/drivers/net/ethernet/huawei/hinic/hinic_cmdq.h new file mode 100644 index 0000000000000000000000000000000000000000..411a9bd7103502721a3c9b7e2a48610ec7c81ad8 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_cmdq.h @@ -0,0 +1,217 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_CMDQ_H_ +#define HINIC_CMDQ_H_ + +#define HINIC_DB_OFF 0x00000800 + +#define HINIC_SCMD_DATA_LEN 16 + +#define HINIC_CMDQ_DEPTH 4096 + +#define HINIC_CMDQ_BUF_SIZE 2048U +#define HINIC_CMDQ_BUF_HW_RSVD 8 +#define HINIC_CMDQ_MAX_DATA_SIZE \ + (HINIC_CMDQ_BUF_SIZE - HINIC_CMDQ_BUF_HW_RSVD) + +enum hinic_cmdq_type { + HINIC_CMDQ_SYNC, + HINIC_CMDQ_ASYNC, + HINIC_MAX_CMDQ_TYPES, +}; + +enum hinic_db_src_type { + HINIC_DB_SRC_CMDQ_TYPE, + HINIC_DB_SRC_L2NIC_SQ_TYPE, +}; + +enum hinic_cmdq_db_type { + HINIC_DB_SQ_RQ_TYPE, + HINIC_DB_CMDQ_TYPE, +}; + +/* CMDQ WQE CTRLS */ +struct hinic_cmdq_header { + u32 header_info; + u32 saved_data; +}; + +struct hinic_scmd_bufdesc { + u32 buf_len; + u32 rsvd; + u8 data[HINIC_SCMD_DATA_LEN]; +}; + +struct hinic_lcmd_bufdesc { + struct hinic_sge sge; + u32 rsvd1; + u64 saved_async_buf; + u64 rsvd3; +}; + +struct hinic_cmdq_db { + u32 db_info; + u32 rsvd; +}; + +struct hinic_status { + u32 status_info; +}; + +struct hinic_ctrl { + u32 ctrl_info; +}; + +struct hinic_sge_resp { + struct hinic_sge sge; + u32 rsvd; +}; + +struct hinic_cmdq_completion { + /* HW Format */ + union { + struct hinic_sge_resp sge_resp; + u64 direct_resp; + }; +}; + +struct hinic_cmdq_wqe_scmd { + struct hinic_cmdq_header header; + struct hinic_cmdq_db db; + struct hinic_status status; + struct hinic_ctrl ctrl; + struct hinic_cmdq_completion completion; + struct hinic_scmd_bufdesc buf_desc; +}; + +struct hinic_cmdq_wqe_lcmd { + struct hinic_cmdq_header header; + struct hinic_status status; + struct hinic_ctrl ctrl; + struct hinic_cmdq_completion completion; + struct hinic_lcmd_bufdesc buf_desc; +}; + +struct hinic_cmdq_inline_wqe { + struct hinic_cmdq_wqe_scmd wqe_scmd; +}; + +struct hinic_cmdq_wqe { + /* HW Format */ + union { + struct hinic_cmdq_inline_wqe inline_wqe; + struct hinic_cmdq_wqe_lcmd wqe_lcmd; + }; +}; + +struct hinic_cmdq_arm_bit { + u32 q_type; + u32 q_id; +}; + +struct hinic_cmdq_ctxt_info { + u64 curr_wqe_page_pfn; + u64 wq_block_pfn; +}; + +struct hinic_cmdq_ctxt { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 cmdq_id; + u8 ppf_idx; + + u8 rsvd1[4]; + + struct hinic_cmdq_ctxt_info ctxt_info; +}; + +enum hinic_cmdq_status { + HINIC_CMDQ_ENABLE = BIT(0), +}; + +enum hinic_cmdq_cmd_type { + HINIC_CMD_TYPE_NONE, + HINIC_CMD_TYPE_SET_ARM, + HINIC_CMD_TYPE_DIRECT_RESP, + HINIC_CMD_TYPE_SGE_RESP, + HINIC_CMD_TYPE_ASYNC, + HINIC_CMD_TYPE_TIMEOUT, + HINIC_CMD_TYPE_FAKE_TIMEOUT, +}; + +struct hinic_cmdq_cmd_info { + enum hinic_cmdq_cmd_type cmd_type; + + struct completion *done; + int *errcode; + int *cmpt_code; + u64 *direct_resp; + u64 cmdq_msg_id; +}; + +struct hinic_cmdq { + struct hinic_wq *wq; + + enum hinic_cmdq_type cmdq_type; + int wrapped; + + /* spinlock for send cmdq commands */ + spinlock_t cmdq_lock; + + /* doorbell area */ + u8 __iomem *db_base; + + struct hinic_cmdq_ctxt cmdq_ctxt; + + struct hinic_cmdq_cmd_info *cmd_infos; + + struct hinic_hwdev *hwdev; +}; + +struct hinic_cmdqs { + struct hinic_hwdev *hwdev; + + struct pci_pool *cmd_buf_pool; + + struct hinic_wq *saved_wqs; + + struct hinic_cmdq_pages cmdq_pages; + struct hinic_cmdq cmdq[HINIC_MAX_CMDQ_TYPES]; + + u32 status; + u32 disable_flag; +}; + +void hinic_cmdq_ceq_handler(void *hwdev, u32 ceqe_data); + +int hinic_reinit_cmdq_ctxts(struct hinic_hwdev *hwdev); + +bool hinic_cmdq_idle(struct hinic_cmdq *cmdq); + +int hinic_cmdqs_init(struct hinic_hwdev *hwdev); + +void hinic_cmdqs_free(struct hinic_hwdev *hwdev); + +bool hinic_cmdq_check_vf_ctxt(struct hinic_hwdev *hwdev, + struct hinic_cmdq_ctxt *cmdq_ctxt); + +void hinic_cmdq_flush_cmd(struct hinic_hwdev *hwdev, + struct hinic_cmdq *cmdq); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_common.c b/drivers/net/ethernet/huawei/hinic/hinic_common.c deleted file mode 100644 index 02c74fd8380e38acdb22f972e9597bc4eae413c9..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_common.c +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - */ - -#include -#include -#include - -#include "hinic_common.h" - -/** - * hinic_cpu_to_be32 - convert data to big endian 32 bit format - * @data: the data to convert - * @len: length of data to convert - **/ -void hinic_cpu_to_be32(void *data, int len) -{ - u32 *mem = data; - int i; - - len = len / sizeof(u32); - - for (i = 0; i < len; i++) { - *mem = cpu_to_be32(*mem); - mem++; - } -} - -/** - * hinic_be32_to_cpu - convert data from big endian 32 bit format - * @data: the data to convert - * @len: length of data to convert - **/ -void hinic_be32_to_cpu(void *data, int len) -{ - u32 *mem = data; - int i; - - len = len / sizeof(u32); - - for (i = 0; i < len; i++) { - *mem = be32_to_cpu(*mem); - mem++; - } -} - -/** - * hinic_set_sge - set dma area in scatter gather entry - * @sge: scatter gather entry - * @addr: dma address - * @len: length of relevant data in the dma address - **/ -void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, int len) -{ - sge->hi_addr = upper_32_bits(addr); - sge->lo_addr = lower_32_bits(addr); - sge->len = len; -} - -/** - * hinic_sge_to_dma - get dma address from scatter gather entry - * @sge: scatter gather entry - * - * Return dma address of sg entry - **/ -dma_addr_t hinic_sge_to_dma(struct hinic_sge *sge) -{ - return (dma_addr_t)((((u64)sge->hi_addr) << 32) | sge->lo_addr); -} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_csr.h b/drivers/net/ethernet/huawei/hinic/hinic_csr.h new file mode 100644 index 0000000000000000000000000000000000000000..045a32d07359765cab486b18508c7163783e6b5a --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_csr.h @@ -0,0 +1,207 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_CSR_H +#define HINIC_CSR_H + +#define HINIC_CSR_GLOBAL_BASE_ADDR 0x4000 + +/* HW interface registers */ +#define HINIC_CSR_FUNC_ATTR0_ADDR 0x0 +#define HINIC_CSR_FUNC_ATTR1_ADDR 0x4 +#define HINIC_CSR_FUNC_ATTR2_ADDR 0x8 +#define HINIC_CSR_FUNC_ATTR4_ADDR 0x10 +#define HINIC_CSR_FUNC_ATTR5_ADDR 0x14 + +#define HINIC_FUNC_CSR_MAILBOX_DATA_OFF 0x80 +#define HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF 0x0100 +#define HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF 0x0104 +#define HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF 0x0108 +#define HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF 0x010C + +#define HINIC_CSR_DMA_ATTR_TBL_BASE 0xC80 + +#define HINIC_ELECTION_BASE 0x200 + +#define HINIC_CSR_DMA_ATTR_TBL_STRIDE 0x4 +#define HINIC_CSR_DMA_ATTR_TBL_ADDR(idx) \ + (HINIC_CSR_DMA_ATTR_TBL_BASE \ + + (idx) * HINIC_CSR_DMA_ATTR_TBL_STRIDE) + +#define HINIC_PPF_ELECTION_STRIDE 0x4 +#define HINIC_CSR_MAX_PORTS 4 +#define HINIC_CSR_PPF_ELECTION_ADDR \ + (HINIC_CSR_GLOBAL_BASE_ADDR + HINIC_ELECTION_BASE) + +#define HINIC_CSR_GLOBAL_MPF_ELECTION_ADDR \ + (HINIC_CSR_GLOBAL_BASE_ADDR + HINIC_ELECTION_BASE + \ + HINIC_CSR_MAX_PORTS * HINIC_PPF_ELECTION_STRIDE) + +/* MSI-X registers */ +#define HINIC_CSR_MSIX_CTRL_BASE 0x2000 +#define HINIC_CSR_MSIX_CNT_BASE 0x2004 + +#define HINIC_CSR_MSIX_STRIDE 0x8 + +#define HINIC_CSR_MSIX_CTRL_ADDR(idx) \ + (HINIC_CSR_MSIX_CTRL_BASE + (idx) * HINIC_CSR_MSIX_STRIDE) + +#define HINIC_CSR_MSIX_CNT_ADDR(idx) \ + (HINIC_CSR_MSIX_CNT_BASE + (idx) * HINIC_CSR_MSIX_STRIDE) + +/* EQ registers */ +#define HINIC_AEQ_MTT_OFF_BASE_ADDR 0x200 +#define HINIC_CEQ_MTT_OFF_BASE_ADDR 0x400 + +#define HINIC_EQ_MTT_OFF_STRIDE 0x40 + +#define HINIC_CSR_AEQ_MTT_OFF(id) \ + (HINIC_AEQ_MTT_OFF_BASE_ADDR + (id) * HINIC_EQ_MTT_OFF_STRIDE) + +#define HINIC_CSR_CEQ_MTT_OFF(id) \ + (HINIC_CEQ_MTT_OFF_BASE_ADDR + (id) * HINIC_EQ_MTT_OFF_STRIDE) + +#define HINIC_CSR_EQ_PAGE_OFF_STRIDE 8 + +#define HINIC_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \ + (HINIC_CSR_AEQ_MTT_OFF(q_id) + \ + (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE) + +#define HINIC_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \ + (HINIC_CSR_AEQ_MTT_OFF(q_id) + \ + (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE + 4) + +#define HINIC_CEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \ + (HINIC_CSR_CEQ_MTT_OFF(q_id) + \ + (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE) + +#define HINIC_CEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \ + (HINIC_CSR_CEQ_MTT_OFF(q_id) + \ + (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE + 4) + +#define HINIC_EQ_HI_PHYS_ADDR_REG(type, q_id, pg_num) \ + ((u32)(((type) == HINIC_AEQ) ? \ + HINIC_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num) : \ + HINIC_CEQ_HI_PHYS_ADDR_REG(q_id, pg_num))) + +#define HINIC_EQ_LO_PHYS_ADDR_REG(type, q_id, pg_num) \ + ((u32)(((type) == HINIC_AEQ) ? \ + HINIC_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num) : \ + HINIC_CEQ_LO_PHYS_ADDR_REG(q_id, pg_num))) + +#define HINIC_AEQ_CTRL_0_ADDR_BASE 0xE00 +#define HINIC_AEQ_CTRL_1_ADDR_BASE 0xE04 +#define HINIC_AEQ_CONS_IDX_0_ADDR_BASE 0xE08 +#define HINIC_AEQ_CONS_IDX_1_ADDR_BASE 0xE0C + +#define HINIC_EQ_OFF_STRIDE 0x80 + +#define HINIC_CSR_AEQ_CTRL_0_ADDR(idx) \ + (HINIC_AEQ_CTRL_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) + +#define HINIC_CSR_AEQ_CTRL_1_ADDR(idx) \ + (HINIC_AEQ_CTRL_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) + +#define HINIC_CSR_AEQ_CONS_IDX_ADDR(idx) \ + (HINIC_AEQ_CONS_IDX_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) + +#define HINIC_CSR_AEQ_PROD_IDX_ADDR(idx) \ + (HINIC_AEQ_CONS_IDX_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) + +#define HINIC_CEQ_CTRL_0_ADDR_BASE 0x1000 +#define HINIC_CEQ_CTRL_1_ADDR_BASE 0x1004 +#define HINIC_CEQ_CONS_IDX_0_ADDR_BASE 0x1008 +#define HINIC_CEQ_CONS_IDX_1_ADDR_BASE 0x100C + +/* For multi-host mgmt + * CEQ_CTRL_0_ADDR: bit26~29: uP write vf mode is normal(0x0),bmgw(0x1), + * vmgw(0x2) + */ +#define HINIC_CSR_CEQ_CTRL_0_ADDR(idx) \ + (HINIC_CEQ_CTRL_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) + +#define HINIC_CSR_CEQ_CTRL_1_ADDR(idx) \ + (HINIC_CEQ_CTRL_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) + +#define HINIC_CSR_CEQ_CONS_IDX_ADDR(idx) \ + (HINIC_CEQ_CONS_IDX_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) + +#define HINIC_CSR_CEQ_PROD_IDX_ADDR(idx) \ + (HINIC_CEQ_CONS_IDX_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) + +/* API CMD registers */ +#define HINIC_CSR_API_CMD_BASE 0xF000 + +#define HINIC_CSR_API_CMD_STRIDE 0x100 + +#define HINIC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x0 + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x4 + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_STATUS_HI_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x8 + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_STATUS_LO_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0xC + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x10 + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x14 + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_CHAIN_PI_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x1C + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_CHAIN_REQ_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x20 + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_STATUS_0_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x30 + (idx) * HINIC_CSR_API_CMD_STRIDE) + +/* VF control registers in pf */ +#define HINIC_PF_CSR_VF_FLUSH_BASE 0x1F400 +#define HINIC_PF_CSR_VF_FLUSH_STRIDE 0x4 + +#define HINIC_GLB_DMA_SO_RO_REPLACE_ADDR 0x488C + +#define HINIC_ICPL_RESERVD_ADDR 0x9204 + +#define HINIC_PF_CSR_VF_FLUSH_OFF(idx) \ + (HINIC_PF_CSR_VF_FLUSH_BASE + (idx) * HINIC_PF_CSR_VF_FLUSH_STRIDE) + +#define HINIC_IPSU_CHANNEL_NUM 7 +#define HINIC_IPSU_CHANNEL0_ADDR 0x404 +#define HINIC_IPSU_CHANNEL_OFFSET 0x14 +#define HINIC_IPSU_DIP_OFFSET 13 +#define HINIC_IPSU_SIP_OFFSET 14 +#define HINIC_IPSU_DIP_SIP_MASK \ + ((0x1 << HINIC_IPSU_SIP_OFFSET) | (0x1 << HINIC_IPSU_DIP_OFFSET)) + +#define HINIC_IPSURX_VXLAN_DPORT_ADDR 0x6d4 + +/* For multi-host mgmt + * 0x75C0: bit0~3: uP write, host mode is bmwg or normal host + * bit4~7: master host ppf write when function initializing + * bit8~23: only for slave host PXE + * 0x75C4: slave host status + * bit0~7: host 0~7 functions status + */ +#define HINIC_HOST_MODE_ADDR 0x75C0 +#define HINIC_MULT_HOST_SLAVE_STATUS_ADDR 0x75C4 + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ctx_def.h b/drivers/net/ethernet/huawei/hinic/hinic_ctx_def.h new file mode 100644 index 0000000000000000000000000000000000000000..d6a23e28e17872c4803cb9ab54cf27d9373cd5e9 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_ctx_def.h @@ -0,0 +1,242 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef __HINIC_CTX_DEF_H__ +#define __HINIC_CTX_DEF_H__ + +#define MASKED_SQ_IDX(sq, idx) ((idx) & (sq)->wq->mask) + +#define HINIC_CEQE_QN_MASK 0x3FFU + +#define HINIC_Q_CTXT_MAX 42 + +#define HINIC_RQ_CQ_MAX 128 + +#define MAX_WQE_SIZE(max_sge, wqebb_size) \ + (((max_sge) <= 2) ? (wqebb_size) : \ + ((ALIGN(((max_sge) - 2), 4) / 4 + 1) * (wqebb_size))) + +/* performance: ci addr RTE_CACHE_SIZE(64B) alignment */ +#define HINIC_CI_Q_ADDR_SIZE 64 + +#define CI_TABLE_SIZE(num_qps, pg_sz) \ + (ALIGN((num_qps) * HINIC_CI_Q_ADDR_SIZE, pg_sz)) + +#define HINIC_CI_VADDR(base_addr, q_id) ((u8 *)(base_addr) + \ + (q_id) * HINIC_CI_Q_ADDR_SIZE) + +#define HINIC_CI_PADDR(base_paddr, q_id) ((base_paddr) + \ + (q_id) * HINIC_CI_Q_ADDR_SIZE) + +#define Q_CTXT_SIZE 48 +#define TSO_LRO_CTXT_SIZE 240 + +#define SQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \ + (((max_rqs) + (max_sqs)) * TSO_LRO_CTXT_SIZE \ + + (q_id) * Q_CTXT_SIZE) + +#define RQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \ + (((max_rqs) + (max_sqs)) * TSO_LRO_CTXT_SIZE \ + + (max_sqs) * Q_CTXT_SIZE + (q_id) * Q_CTXT_SIZE) + +#define SQ_CTXT_SIZE(num_sqs) ((u16)(sizeof(struct hinic_qp_ctxt_header) \ + + (num_sqs) * sizeof(struct hinic_sq_ctxt))) + +#define RQ_CTXT_SIZE(num_rqs) ((u16)(sizeof(struct hinic_qp_ctxt_header) \ + + (num_rqs) * sizeof(struct hinic_rq_ctxt))) + +#define SQ_CTXT_CEQ_ATTR_CEQ_ID_SHIFT 8 +#define SQ_CTXT_CEQ_ATTR_GLOBAL_SQ_ID_SHIFT 13 +#define SQ_CTXT_CEQ_ATTR_EN_SHIFT 23 +#define SQ_CTXT_CEQ_ATTR_ARM_SHIFT 31 + +#define SQ_CTXT_CEQ_ATTR_CEQ_ID_MASK 0x1FU +#define SQ_CTXT_CEQ_ATTR_GLOBAL_SQ_ID_MASK 0x3FFU +#define SQ_CTXT_CEQ_ATTR_EN_MASK 0x1U +#define SQ_CTXT_CEQ_ATTR_ARM_MASK 0x1U + +#define SQ_CTXT_CEQ_ATTR_SET(val, member) \ + (((val) & SQ_CTXT_CEQ_ATTR_##member##_MASK) \ + << SQ_CTXT_CEQ_ATTR_##member##_SHIFT) + +#define SQ_CTXT_CI_IDX_SHIFT 11 +#define SQ_CTXT_CI_OWNER_SHIFT 23 + +#define SQ_CTXT_CI_IDX_MASK 0xFFFU +#define SQ_CTXT_CI_OWNER_MASK 0x1U + +#define SQ_CTXT_CI_SET(val, member) \ + (((val) & SQ_CTXT_CI_##member##_MASK) \ + << SQ_CTXT_CI_##member##_SHIFT) + +#define SQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0 +#define SQ_CTXT_WQ_PAGE_PI_SHIFT 20 + +#define SQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFFU +#define SQ_CTXT_WQ_PAGE_PI_MASK 0xFFFU + +#define SQ_CTXT_WQ_PAGE_SET(val, member) \ + (((val) & SQ_CTXT_WQ_PAGE_##member##_MASK) \ + << SQ_CTXT_WQ_PAGE_##member##_SHIFT) + +#define SQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0 +#define SQ_CTXT_PREF_CACHE_MAX_SHIFT 14 +#define SQ_CTXT_PREF_CACHE_MIN_SHIFT 25 + +#define SQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFFU +#define SQ_CTXT_PREF_CACHE_MAX_MASK 0x7FFU +#define SQ_CTXT_PREF_CACHE_MIN_MASK 0x7FU + +#define SQ_CTXT_PREF_WQ_PFN_HI_SHIFT 0 +#define SQ_CTXT_PREF_CI_SHIFT 20 + +#define SQ_CTXT_PREF_WQ_PFN_HI_MASK 0xFFFFFU +#define SQ_CTXT_PREF_CI_MASK 0xFFFU + +#define SQ_CTXT_PREF_SET(val, member) \ + (((val) & SQ_CTXT_PREF_##member##_MASK) \ + << SQ_CTXT_PREF_##member##_SHIFT) + +#define SQ_CTXT_WQ_BLOCK_PFN_HI_SHIFT 0 + +#define SQ_CTXT_WQ_BLOCK_PFN_HI_MASK 0x7FFFFFU + +#define SQ_CTXT_WQ_BLOCK_SET(val, member) \ + (((val) & SQ_CTXT_WQ_BLOCK_##member##_MASK) \ + << SQ_CTXT_WQ_BLOCK_##member##_SHIFT) + +#define RQ_CTXT_CEQ_ATTR_EN_SHIFT 0 +#define RQ_CTXT_CEQ_ATTR_OWNER_SHIFT 1 + +#define RQ_CTXT_CEQ_ATTR_EN_MASK 0x1U +#define RQ_CTXT_CEQ_ATTR_OWNER_MASK 0x1U + +#define RQ_CTXT_CEQ_ATTR_SET(val, member) \ + (((val) & RQ_CTXT_CEQ_ATTR_##member##_MASK) \ + << RQ_CTXT_CEQ_ATTR_##member##_SHIFT) + +#define RQ_CTXT_PI_IDX_SHIFT 0 +#define RQ_CTXT_PI_INTR_SHIFT 22 +#define RQ_CTXT_PI_CEQ_ARM_SHIFT 31 + +#define RQ_CTXT_PI_IDX_MASK 0xFFFU +#define RQ_CTXT_PI_INTR_MASK 0x3FFU +#define RQ_CTXT_PI_CEQ_ARM_MASK 0x1U + +#define RQ_CTXT_PI_SET(val, member) \ + (((val) & RQ_CTXT_PI_##member##_MASK) << \ + RQ_CTXT_PI_##member##_SHIFT) + +#define RQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0 +#define RQ_CTXT_WQ_PAGE_CI_SHIFT 20 + +#define RQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFFU +#define RQ_CTXT_WQ_PAGE_CI_MASK 0xFFFU + +#define RQ_CTXT_WQ_PAGE_SET(val, member) \ + (((val) & RQ_CTXT_WQ_PAGE_##member##_MASK) << \ + RQ_CTXT_WQ_PAGE_##member##_SHIFT) + +#define RQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0 +#define RQ_CTXT_PREF_CACHE_MAX_SHIFT 14 +#define RQ_CTXT_PREF_CACHE_MIN_SHIFT 25 + +#define RQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFFU +#define RQ_CTXT_PREF_CACHE_MAX_MASK 0x7FFU +#define RQ_CTXT_PREF_CACHE_MIN_MASK 0x7FU + +#define RQ_CTXT_PREF_WQ_PFN_HI_SHIFT 0 +#define RQ_CTXT_PREF_CI_SHIFT 20 + +#define RQ_CTXT_PREF_WQ_PFN_HI_MASK 0xFFFFFU +#define RQ_CTXT_PREF_CI_MASK 0xFFFU + +#define RQ_CTXT_PREF_SET(val, member) \ + (((val) & RQ_CTXT_PREF_##member##_MASK) << \ + RQ_CTXT_PREF_##member##_SHIFT) + +#define RQ_CTXT_WQ_BLOCK_PFN_HI_SHIFT 0 + +#define RQ_CTXT_WQ_BLOCK_PFN_HI_MASK 0x7FFFFFU + +#define RQ_CTXT_WQ_BLOCK_SET(val, member) \ + (((val) & RQ_CTXT_WQ_BLOCK_##member##_MASK) << \ + RQ_CTXT_WQ_BLOCK_##member##_SHIFT) + +#define SIZE_16BYTES(size) (ALIGN((size), 16) >> 4) + +#define WQ_PAGE_PFN_SHIFT 12 +#define WQ_BLOCK_PFN_SHIFT 9 + +#define WQ_PAGE_PFN(page_addr) ((page_addr) >> WQ_PAGE_PFN_SHIFT) +#define WQ_BLOCK_PFN(page_addr) ((page_addr) >> WQ_BLOCK_PFN_SHIFT) + +enum sq_cflag { + CFLAG_DATA_PATH = 0, +}; + +enum hinic_qp_ctxt_type { + HINIC_QP_CTXT_TYPE_SQ, + HINIC_QP_CTXT_TYPE_RQ, +}; + +/* service type relates define */ +enum cfg_svc_type_en { + CFG_SVC_NIC_BIT0 = (1 << 0), + CFG_SVC_ROCE_BIT1 = (1 << 1), + CFG_SVC_FCOE_BIT2 = (1 << 2), + CFG_SVC_TOE_BIT3 = (1 << 3), + CFG_SVC_IWARP_BIT4 = (1 << 4), + CFG_SVC_FC_BIT5 = (1 << 5), + + CFG_SVC_FIC_BIT6 = (1 << 6), + CFG_SVC_OVS_BIT7 = (1 << 7), + CFG_SVC_ACL_BIT8 = (1 << 8), + CFG_SVC_IOE_BIT9 = (1 << 9), + CFG_SVC_HWPT_BIT10 = (1 << 10), + + CFG_SVC_FT_EN = (CFG_SVC_FCOE_BIT2 | CFG_SVC_TOE_BIT3 | + CFG_SVC_FC_BIT5 | CFG_SVC_IOE_BIT9), + CFG_SVC_RDMA_EN = (CFG_SVC_ROCE_BIT1 | CFG_SVC_IWARP_BIT4) +}; + +#define IS_NIC_TYPE(dev) \ + ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_NIC_BIT0) +#define IS_ROCE_TYPE(dev) \ + ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_ROCE_BIT1) +#define IS_FCOE_TYPE(dev) \ + ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_FCOE_BIT2) +#define IS_TOE_TYPE(dev) \ + ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_TOE_BIT3) +#define IS_IWARP_TYPE(dev) \ + ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_IWARP_BIT4) +#define IS_FC_TYPE(dev) \ + ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_FC_BIT5) +#define IS_FIC_TYPE(dev) \ + ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_FIC_BIT6) +#define IS_OVS_TYPE(dev) \ + ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_OVS_BIT7) +#define IS_ACL_TYPE(dev) \ + ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_ACL_BIT8) +#define IS_IOE_TYPE(dev) \ + ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_IOE_BIT9) +#define IS_FT_TYPE(dev) \ + ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_FT_EN) +#define IS_RDMA_TYPE(dev) \ + ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_RDMA_EN) +#define IS_HWPT_TYPE(dev) \ + ((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_HWPT_BIT10) + +#endif /* __HINIC_CTX_DEF_H__ */ diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dbg.h b/drivers/net/ethernet/huawei/hinic/hinic_dbg.h new file mode 100644 index 0000000000000000000000000000000000000000..415fe989ef33f079e1727fd7a6fd8ec1b2d28a66 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_dbg.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_NIC_DBG_H_ +#define HINIC_NIC_DBG_H_ + +u16 hinic_dbg_get_qp_num(void *hwdev); + +void *hinic_dbg_get_qp_handle(void *hwdev, u16 q_id); + +void *hinic_dbg_get_sq_wq_handle(void *hwdev, u16 q_id); + +void *hinic_dbg_get_rq_wq_handle(void *hwdev, u16 q_id); + +u16 hinic_dbg_get_sq_pi(void *hwdev, u16 q_id); + +u16 hinic_dbg_get_rq_hw_pi(void *hwdev, u16 q_id); + +u16 hinic_dbg_get_rq_sw_pi(void *hwdev, u16 q_id); + +void *hinic_dbg_get_sq_ci_addr(void *hwdev, u16 q_id); + +u64 hinic_dbg_get_sq_cla_addr(void *hwdev, u16 q_id); + +u64 hinic_dbg_get_rq_cla_addr(void *hwdev, u16 q_id); + +int hinic_dbg_get_sq_db_addr(void *hwdev, u16 q_id, u64 **map_addr, + u64 *phy_addr, u32 *pg_idx); + +u16 hinic_dbg_get_global_qpn(const void *hwdev); + +int hinic_dbg_get_sq_wqe_info(void *hwdev, u16 q_id, u16 idx, u16 wqebb_cnt, + u8 *wqe, u16 *wqe_size); + +int hinic_dbg_get_rq_wqe_info(void *hwdev, u16 q_id, u16 idx, u16 wqebb_cnt, + u8 *wqe, u16 *wqe_size); + +int hinic_dbg_lt_rd_16byte(void *hwdev, u8 dest, u8 instance, + u32 lt_index, u8 *data); + +int hinic_dbg_lt_wr_16byte_mask(void *hwdev, u8 dest, u8 instance, + u32 lt_index, u8 *data, u16 mask); + +int hinic_sm_ctr_rd32(void *hwdev, u8 node, u8 instance, + u32 ctr_id, u32 *value); + +int hinic_sm_ctr_rd32_clear(void *hwdev, u8 node, u8 instance, + u32 ctr_id, u32 *value); + +int hinic_sm_ctr_wr32(void *hwdev, u8 node, u8 instance, u32 ctr_id, u32 value); + +int hinic_sm_ctr_rd64(void *hwdev, u8 node, u8 instance, + u32 ctr_id, u64 *value); + +int hinic_sm_ctr_wr64(void *hwdev, u8 node, u8 instance, + u32 ctr_id, u64 value); + +int hinic_sm_ctr_rd64_pair(void *hwdev, u8 node, u8 instance, u32 ctr_id, + u64 *value1, u64 *value2); + +int hinic_sm_ctr_wr64_pair(void *hwdev, u8 node, u8 instance, u32 ctr_id, + u64 value1, u64 value2); + +int hinic_api_csr_rd32(void *hwdev, u8 dest, u32 addr, u32 *val); + +int hinic_api_csr_wr32(void *hwdev, u8 dest, u32 addr, u32 val); + +int hinic_api_csr_rd64(void *hwdev, u8 dest, u32 addr, u64 *val); + +int hinic_api_csr_wr64(void *hwdev, u8 dest, u32 addr, u64 val); + +int hinic_dbg_get_hw_stats(const void *hwdev, u8 *hw_stats, u16 *out_size); + +u16 hinic_dbg_clear_hw_stats(void *hwdev, u32 *out_size); + +void hinic_get_chip_fault_stats(const void *hwdev, + u8 *chip_fault_stats, int offset); + +int hinic_dbg_get_pf_bw_limit(void *hwdev, u32 *pf_bw_limit); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dbgtool_knl.c b/drivers/net/ethernet/huawei/hinic/hinic_dbgtool_knl.c new file mode 100644 index 0000000000000000000000000000000000000000..f277c2e379e0f8783857b5121cca42609a1bcf06 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_dbgtool_knl.c @@ -0,0 +1,914 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_hwdev.h" +#include "hinic_hw_mgmt.h" +#include "hinic_nic_dev.h" +#include "hinic_lld.h" +#include "hinic_dbgtool_knl.h" + +struct ffm_intr_info { + u8 node_id; + /* error level of the interrupt source */ + u8 err_level; + /* Classification by interrupt source properties */ + u16 err_type; + u32 err_csr_addr; + u32 err_csr_value; +}; + +#define DBGTOOL_MSG_MAX_SIZE 2048ULL +#define HINIC_SELF_CMD_UP2PF_FFM 0x26 + +void *g_hinic_card_node_array[MAX_CARD_NUM] = {0}; +void *g_hinic_card_vir_addr[MAX_CARD_NUM] = {0}; +u64 g_hinic_card_phy_addr[MAX_CARD_NUM] = {0}; +/* lock for g_hinic_card_vir_addr */ +struct mutex g_hinic_addr_lock; +int g_hinic_card_id; + +/* dbgtool character device name, class name, dev path */ +#define CHR_DEV_DBGTOOL "dbgtool_chr_dev" +#define CLASS_DBGTOOL "dbgtool_class" +#define DBGTOOL_DEV_PATH "/dev/dbgtool_chr_dev" + +struct dbgtool_k_glb_info { + struct semaphore dbgtool_sem; + struct ffm_record_info *ffm; +}; + +static dev_t dbgtool_dev_id; /* device id */ +static struct cdev dbgtool_chr_dev; /* struct of char device */ + +/*lint -save -e104 -e808*/ +static struct class *dbgtool_d_class; /* struct of char class */ +/*lint -restore*/ + +static int g_dbgtool_init_flag; +static int g_dbgtool_ref_cnt; + +static int dbgtool_knl_open(struct inode *pnode, + struct file *pfile) +{ + return 0; +} + +static int dbgtool_knl_release(struct inode *pnode, + struct file *pfile) +{ + return 0; +} + +static ssize_t dbgtool_knl_read(struct file *pfile, + char __user *ubuf, + size_t size, + loff_t *ppos) +{ + return 0; +} + +static ssize_t dbgtool_knl_write(struct file *pfile, + const char __user *ubuf, + size_t size, + loff_t *ppos) +{ + return 0; +} + +static bool is_valid_phy_addr(u64 offset) +{ + int i; + + for (i = 0; i < MAX_CARD_NUM; i++) { + if (offset == g_hinic_card_phy_addr[i]) + return true; + } + + return false; +} + +int hinic_mem_mmap(struct file *filp, struct vm_area_struct *vma) +{ + unsigned long vmsize = vma->vm_end - vma->vm_start; + phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; + phys_addr_t phy_addr; + + if (vmsize > (PAGE_SIZE * (1 << DBGTOOL_PAGE_ORDER))) { + pr_err("Map size = %lu is bigger than alloc\n", vmsize); + return -EAGAIN; + } + + if (offset && !is_valid_phy_addr((u64)offset) && + !hinic_is_valid_bar_addr((u64)offset)) { + pr_err("offset is invalid"); + return -EAGAIN; + } + + /* old version of tool set vma->vm_pgoff to 0 */ + phy_addr = offset ? offset : g_hinic_card_phy_addr[g_hinic_card_id]; + if (!phy_addr) { + pr_err("Card_id = %d physical address is 0\n", g_hinic_card_id); + return -EAGAIN; + } + + if (remap_pfn_range(vma, vma->vm_start, + (phy_addr >> PAGE_SHIFT), + vmsize, vma->vm_page_prot)) + return -EAGAIN; + + return 0; +} + +/** + * dbgtool_knl_api_cmd_read - used for read operations + * @para: the dbgtool parameter + * @g_func_handle_array: global function handle + * Return: 0 - success, negative - failure + */ +static long dbgtool_knl_api_cmd_read(struct dbgtool_param *para, + void **g_func_handle_array) +{ + long ret = 0; + u8 *cmd; + u16 size; + void *ack; + u16 ack_size; + u32 pf_id; + void *hwdev; + + pf_id = para->param.api_rd.pf_id; + if (pf_id >= 16) { + pr_err("PF id(0x%x) too big\n", pf_id); + return -EFAULT; + } + + /* obtaining pf_id chipif pointer */ + hwdev = g_func_handle_array[pf_id]; + if (!hwdev) { + pr_err("PF id(0x%x) handle null in api cmd read\n", pf_id); + return -EFAULT; + } + + /* alloc cmd and ack memory */ + size = para->param.api_rd.size; + if (para->param.api_rd.size == 0 || size > DBGTOOL_MSG_MAX_SIZE) { + pr_err("Read cmd size invalid or more than 2M\n"); + return -EINVAL; + } + cmd = kzalloc((unsigned long long)size, GFP_KERNEL); + if (!cmd) { + pr_err("Alloc read cmd mem fail\n"); + return -ENOMEM; + } + + ack_size = para->param.api_rd.ack_size; + if (para->param.api_rd.ack_size == 0) { + pr_err("Read cmd ack size is 0\n"); + ret = -ENOMEM; + goto alloc_ack_mem_fail; + } + + ack = kzalloc((unsigned long long)ack_size, GFP_KERNEL); + if (!ack) { + pr_err("Alloc read ack mem fail\n"); + ret = -ENOMEM; + goto alloc_ack_mem_fail; + } + + /* cmd content copied from user-mode */ + if (copy_from_user(cmd, para->param.api_rd.cmd, (unsigned long)size)) { + pr_err("Copy cmd from user fail\n"); + ret = -EFAULT; + goto copy_user_cmd_fail; + } + /* Invoke the api cmd interface read content*/ + ret = hinic_api_cmd_read_ack(hwdev, para->param.api_rd.dest, + cmd, size, ack, ack_size); + if (ret) { + pr_err("Api send single cmd ack fail!\n"); + goto api_rd_fail; + } + + /* Copy the contents of the ack to the user state */ + if (copy_to_user(para->param.api_rd.ack, ack, ack_size)) { + pr_err("Copy ack to user fail\n"); + ret = -EFAULT; + } +api_rd_fail: +copy_user_cmd_fail: + kfree(ack); +alloc_ack_mem_fail: + kfree(cmd); + return ret; +} + +/** + * dbgtool_knl_api_cmd_write - used for write operations + * @para: the dbgtool parameter + * @g_func_handle_array: global function handle + * Return: 0 - success, negative - failure + */ +static long dbgtool_knl_api_cmd_write(struct dbgtool_param *para, + void **g_func_handle_array) +{ + long ret = 0; + u8 *cmd; + u16 size; + u32 pf_id; + void *hwdev; + + pf_id = para->param.api_wr.pf_id; + if (pf_id >= 16) { + pr_err("PF id(0x%x) too big\n", pf_id); + return -EFAULT; + } + + /* obtaining chipif pointer according to pf_id */ + hwdev = g_func_handle_array[pf_id]; + if (!hwdev) { + pr_err("PF id(0x%x) handle null\n", pf_id); + return -EFAULT; + } + + /* alloc cmd memory */ + size = para->param.api_wr.size; + if (para->param.api_wr.size == 0 || size > DBGTOOL_MSG_MAX_SIZE) { + pr_err("Write cmd size invalid or more than 2M\n"); + return -EINVAL; + } + cmd = kzalloc((unsigned long long)size, GFP_KERNEL); + if (!cmd) { + pr_err("Alloc write cmd mem fail\n"); + return -ENOMEM; + } + + /* cmd content copied from user-mode */ + if (copy_from_user(cmd, para->param.api_wr.cmd, (unsigned long)size)) { + pr_err("Copy cmd from user fail\n"); + ret = -EFAULT; + goto copy_user_cmd_fail; + } + + /* api cmd interface is invoked to write the content */ + ret = hinic_api_cmd_write_nack(hwdev, para->param.api_wr.dest, + cmd, size); + if (ret) + pr_err("Api send single cmd nack fail\n"); + +copy_user_cmd_fail: + kfree(cmd); + return ret; +} + +void hinic_chipif_get_all_pf_dev_info(struct pf_dev_info *dev_info, + int card_idx, void **g_func_handle_array) +{ + u32 func_idx; + struct hinic_hwdev *hwdev; + + if (!dev_info) { + pr_err("Params error!\n"); + return; + } + + /* pf at most 16 */ + for (func_idx = 0; func_idx < 16; func_idx++) { + hwdev = (struct hinic_hwdev *)g_func_handle_array[func_idx]; + + dev_info[func_idx].phy_addr = g_hinic_card_phy_addr[card_idx]; + + if (!hwdev) { + dev_info[func_idx].bar0_size = 0; + dev_info[func_idx].bus = 0; + dev_info[func_idx].slot = 0; + dev_info[func_idx].func = 0; + } else { + dev_info[func_idx].bar0_size = + pci_resource_len + (((struct pci_dev *)hwdev->pcidev_hdl), 0); + dev_info[func_idx].bus = + ((struct pci_dev *) + hwdev->pcidev_hdl)->bus->number; + dev_info[func_idx].slot = + PCI_SLOT(((struct pci_dev *)hwdev->pcidev_hdl) + ->devfn); + dev_info[func_idx].func = + PCI_FUNC(((struct pci_dev *)hwdev->pcidev_hdl) + ->devfn); + } + } +} + +/** + * dbgtool_knl_pf_dev_info_get - Obtain the pf sdk_info + * @para: the dbgtool parameter + * @g_func_handle_array: global function handle + * Return: 0 - success, negative - failure + */ +static long dbgtool_knl_pf_dev_info_get(struct dbgtool_param *para, + void **g_func_handle_array) +{ + struct pf_dev_info dev_info[16] = { {0} }; + unsigned char *tmp; + int i; + + mutex_lock(&g_hinic_addr_lock); + if (!g_hinic_card_vir_addr[g_hinic_card_id]) { + g_hinic_card_vir_addr[g_hinic_card_id] = + (void *)__get_free_pages(GFP_KERNEL, + DBGTOOL_PAGE_ORDER); + if (!g_hinic_card_vir_addr[g_hinic_card_id]) { + pr_err("Alloc dbgtool api chain fail!\n"); + mutex_unlock(&g_hinic_addr_lock); + return -EFAULT; + } + + memset(g_hinic_card_vir_addr[g_hinic_card_id], 0, + PAGE_SIZE * (1 << DBGTOOL_PAGE_ORDER)); + + g_hinic_card_phy_addr[g_hinic_card_id] = + virt_to_phys(g_hinic_card_vir_addr[g_hinic_card_id]); + if (!g_hinic_card_phy_addr[g_hinic_card_id]) { + pr_err("phy addr for card %d is 0\n", g_hinic_card_id); + free_pages((unsigned long)g_hinic_card_vir_addr + [g_hinic_card_id], DBGTOOL_PAGE_ORDER); + g_hinic_card_vir_addr[g_hinic_card_id] = NULL; + mutex_unlock(&g_hinic_addr_lock); + return -EFAULT; + } + + tmp = g_hinic_card_vir_addr[g_hinic_card_id]; + for (i = 0; i < (1 << DBGTOOL_PAGE_ORDER); i++) { + SetPageReserved(virt_to_page(tmp)); + tmp += PAGE_SIZE; + } + } + mutex_unlock(&g_hinic_addr_lock); + + hinic_chipif_get_all_pf_dev_info(dev_info, g_hinic_card_id, + g_func_handle_array); + + /* Copy the dev_info to user mode */ + if (copy_to_user(para->param.dev_info, dev_info, + (unsigned int)sizeof(dev_info))) { + pr_err("Copy dev_info to user fail\n"); + return -EFAULT; + } + + return 0; +} + +/** + * dbgtool_knl_ffm_info_rd - Read ffm information + * @para: the dbgtool parameter + * @dbgtool_info: the dbgtool info + * Return: 0 - success, negative - failure + */ +static long dbgtool_knl_ffm_info_rd(struct dbgtool_param *para, + struct dbgtool_k_glb_info *dbgtool_info) +{ + /* Copy the ffm_info to user mode */ + if (copy_to_user(para->param.ffm_rd, dbgtool_info->ffm, + (unsigned int)sizeof(struct ffm_record_info))) { + pr_err("Copy ffm_info to user fail\n"); + return -EFAULT; + } + + return 0; +} + +/** + * dbgtool_knl_ffm_info_clr - Clear FFM information + * @para: unused + * @dbgtool_info: the dbgtool info + */ +static void dbgtool_knl_ffm_info_clr(struct dbgtool_param *para, + struct dbgtool_k_glb_info *dbgtool_info) +{ + dbgtool_info->ffm->ffm_num = 0; +} + +/** + * dbgtool_knl_msg_to_up - After receiving dbgtool command sends a message to uP + * @para: the dbgtool parameter + * @g_func_handle_array: global function handle + * Return: 0 - success, negative - failure + */ +static long dbgtool_knl_msg_to_up(struct dbgtool_param *para, + void **g_func_handle_array) +{ + long ret = 0; + void *buf_in; + void *buf_out; + u16 out_size; + u8 pf_id; + + if (para->param.msg2up.in_size > DBGTOOL_MSG_MAX_SIZE) { + pr_err("User data(%d) more than 2KB\n", + para->param.msg2up.in_size); + return -EFAULT; + } + + pf_id = para->param.msg2up.pf_id; + /* pf at most 16 */ + if (pf_id >= 16) { + pr_err("PF id(0x%x) too big in message to mgmt\n", pf_id); + return -EFAULT; + } + + if (!g_func_handle_array[pf_id]) { + pr_err("PF id(0x%x) handle null in message to mgmt\n", pf_id); + return -EFAULT; + } + + /* alloc buf_in and buf_out memory, apply for 2K */ + buf_in = kzalloc(DBGTOOL_MSG_MAX_SIZE, GFP_KERNEL); + if (!buf_in) { + pr_err("Alloc buf_in mem fail\n"); + return -ENOMEM; + } + + buf_out = kzalloc(DBGTOOL_MSG_MAX_SIZE, 0); + if (!buf_out) { + pr_err("Alloc buf_out mem fail\n"); + ret = -ENOMEM; + goto alloc_buf_out_mem_fail; + } + + /* copy buf_in from the user state */ + if (copy_from_user(buf_in, para->param.msg2up.buf_in, + (unsigned long)para->param.msg2up.in_size)) { + pr_err("Copy buf_in from user fail\n"); + ret = -EFAULT; + goto copy_user_buf_in_fail; + } + + out_size = DBGTOOL_MSG_MAX_SIZE; + /* Invoke the pf2up communication interface */ + ret = hinic_msg_to_mgmt_sync(g_func_handle_array[pf_id], + para->param.msg2up.mod, + para->param.msg2up.cmd, + buf_in, + para->param.msg2up.in_size, + buf_out, + &out_size, + 0); + if (ret) + goto msg_2_up_fail; + + /* Copy the out_size and buf_out content to user mode */ + if (copy_to_user(para->param.msg2up.out_size, &out_size, + (unsigned int)sizeof(out_size))) { + pr_err("Copy out_size to user fail\n"); + ret = -EFAULT; + goto copy_out_size_fail; + } + + if (copy_to_user(para->param.msg2up.buf_out, buf_out, out_size)) { + pr_err("Copy buf_out to user fail\n"); + ret = -EFAULT; + } + +copy_out_size_fail: +msg_2_up_fail: +copy_user_buf_in_fail: + kfree(buf_out); +alloc_buf_out_mem_fail: + kfree(buf_in); + return ret; +} + +long hinic_dbgtool_knl_free_mem(int id) +{ + unsigned char *tmp; + int i; + + mutex_lock(&g_hinic_addr_lock); + + if (!g_hinic_card_vir_addr[id]) { + mutex_unlock(&g_hinic_addr_lock); + return 0; + } + + tmp = g_hinic_card_vir_addr[id]; + for (i = 0; i < (1 << DBGTOOL_PAGE_ORDER); i++) { + ClearPageReserved(virt_to_page(tmp)); + tmp += PAGE_SIZE; + } + + free_pages((unsigned long)g_hinic_card_vir_addr[id], + DBGTOOL_PAGE_ORDER); + g_hinic_card_vir_addr[id] = NULL; + g_hinic_card_phy_addr[id] = 0; + + mutex_unlock(&g_hinic_addr_lock); + + return 0; +} + +/*lint -save -e771 -e794*/ + +/** + * dbgtool_knl_unlocked_ioctl - dbgtool ioctl entry + * @pfile: the pointer to file + * @cmd: the command type + */ +static long dbgtool_knl_unlocked_ioctl(struct file *pfile, + unsigned int cmd, + unsigned long arg) +{ + long ret = 0; + unsigned int real_cmd; + struct dbgtool_param param; + struct dbgtool_k_glb_info *dbgtool_info; + struct card_node *card_info = NULL; + int i; + + (void)memset(¶m, 0, sizeof(param)); + + if (copy_from_user(¶m, (void *)arg, sizeof(param))) { + pr_err("Copy param from user fail\n"); + return -EFAULT; + } + + param.chip_name[IFNAMSIZ - 1] = '\0'; + for (i = 0; i < MAX_CARD_NUM; i++) { + card_info = (struct card_node *)g_hinic_card_node_array[i]; + if (!card_info) + continue; + if (!strncmp(param.chip_name, card_info->chip_name, IFNAMSIZ)) + break; + } + + if (i == MAX_CARD_NUM || !card_info) { + pr_err("Can't find this card %s\n", param.chip_name); + return -EFAULT; + } + + g_hinic_card_id = i; + + dbgtool_info = (struct dbgtool_k_glb_info *)card_info->dbgtool_info; + + down(&dbgtool_info->dbgtool_sem); + + real_cmd = _IOC_NR(cmd); + + switch (real_cmd) { + case DBGTOOL_CMD_API_RD: + ret = dbgtool_knl_api_cmd_read(¶m, + card_info->func_handle_array); + break; + case DBGTOOL_CMD_API_WR: + ret = dbgtool_knl_api_cmd_write(¶m, + card_info->func_handle_array); + break; + case DBGTOOL_CMD_FFM_RD: + ret = dbgtool_knl_ffm_info_rd(¶m, dbgtool_info); + break; + case DBGTOOL_CMD_FFM_CLR: + dbgtool_knl_ffm_info_clr(¶m, dbgtool_info); + break; + case DBGTOOL_CMD_PF_DEV_INFO_GET: + ret = dbgtool_knl_pf_dev_info_get(¶m, + card_info->func_handle_array); + break; + case DBGTOOL_CMD_MSG_2_UP: + ret = dbgtool_knl_msg_to_up(¶m, + card_info->func_handle_array); + break; + case DBGTOOL_CMD_FREE_MEM: + ret = hinic_dbgtool_knl_free_mem(i); + break; + default: + pr_err("Dbgtool cmd(x%x) not support now\n", real_cmd); + ret = -EFAULT; + } + + up(&dbgtool_info->dbgtool_sem); + return ret; +} + +/** + * ffm_intr_msg_record - FFM interruption records sent up + * @handle: the function handle + * @buf_in: the pointer to input buffer + * @buf_out: the pointer to outputput buffer + */ +static void ffm_intr_msg_record(void *handle, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct dbgtool_k_glb_info *dbgtool_info; + struct ffm_intr_info *intr; + u32 ffm_idx; + struct timex txc; + struct rtc_time rctm; + struct card_node *card_info = NULL; + struct hinic_hwdev *hwdev = handle; + bool flag = false; + int i, j; + + for (i = 0; i < MAX_CARD_NUM; i++) { + card_info = (struct card_node *)g_hinic_card_node_array[i]; + if (!card_info) + continue; + + for (j = 0; j < MAX_FUNCTION_NUM; j++) { + if (handle == card_info->func_handle_array[j]) { + flag = true; + break; + } + } + + if (flag) + break; + } + + if (i == MAX_CARD_NUM || !card_info) { + pr_err("Id(%d) cant find this card\n", i); + return; + } + + dbgtool_info = (struct dbgtool_k_glb_info *)card_info->dbgtool_info; + if (!dbgtool_info) { + pr_err("Dbgtool info is null\n"); + return; + } + + intr = (struct ffm_intr_info *)buf_in; + + if (!dbgtool_info->ffm) + return; + + ffm_idx = dbgtool_info->ffm->ffm_num; + if (ffm_idx < FFM_RECORD_NUM_MAX) { + nic_info(hwdev->dev_hdl, "%s: recv intr, ffm_idx: %d\n", + __func__, ffm_idx); + + dbgtool_info->ffm->ffm[ffm_idx].node_id = intr->node_id; + dbgtool_info->ffm->ffm[ffm_idx].err_level = intr->err_level; + dbgtool_info->ffm->ffm[ffm_idx].err_type = intr->err_type; + dbgtool_info->ffm->ffm[ffm_idx].err_csr_addr = + intr->err_csr_addr; + dbgtool_info->ffm->ffm[ffm_idx].err_csr_value = + intr->err_csr_value; + + /* Obtain the current UTC time */ + do_gettimeofday(&txc.time); + + /* Calculate the time in date value to tm */ + rtc_time_to_tm((unsigned long)txc.time.tv_sec + + 60 * 60 * 8, &rctm); + + /* tm_year starts from 1900; 0->1900, 1->1901, and so on */ + dbgtool_info->ffm->ffm[ffm_idx].year = + (u16)(rctm.tm_year + 1900); + /* tm_mon starts from 0, 0 indicates January, and so on */ + dbgtool_info->ffm->ffm[ffm_idx].mon = (u8)rctm.tm_mon + 1; + dbgtool_info->ffm->ffm[ffm_idx].mday = (u8)rctm.tm_mday; + dbgtool_info->ffm->ffm[ffm_idx].hour = (u8)rctm.tm_hour; + dbgtool_info->ffm->ffm[ffm_idx].min = (u8)rctm.tm_min; + dbgtool_info->ffm->ffm[ffm_idx].sec = (u8)rctm.tm_sec; + + dbgtool_info->ffm->ffm_num++; + } +} + +/*lint -restore*/ + +/*lint -save -e785 -e438*/ +static const struct file_operations dbgtool_file_operations = { + .owner = THIS_MODULE, + .open = dbgtool_knl_open, + .release = dbgtool_knl_release, + .read = dbgtool_knl_read, + .write = dbgtool_knl_write, + .unlocked_ioctl = dbgtool_knl_unlocked_ioctl, + .mmap = hinic_mem_mmap, +}; + +/** + * hinic_dbgtool_knl_init - dbgtool character device init + * @hwdev: the pointer to hardware device + * @chip_node: the pointer to card node + * Return: 0 - success, negative - failure + */ +int hinic_dbgtool_knl_init(void *vhwdev, void *chip_node) +{ + int ret = 0; + int id; + struct dbgtool_k_glb_info *dbgtool_info; + struct device *pdevice; + struct card_node *chip_info = (struct card_node *)chip_node; + struct hinic_hwdev *hwdev = vhwdev; + + if (hinic_func_type(hwdev) == TYPE_VF) + return 0; + + ret = sysfs_create_file(&((struct device *)(hwdev->dev_hdl))->kobj, + &chip_info->dbgtool_attr_file); + if (ret) { + pr_err("Failed to sysfs create file\n"); + return ret; + } + + chip_info->func_handle_array[hinic_global_func_id(hwdev)] = hwdev; + + hinic_comm_recv_mgmt_self_cmd_reg(hwdev, HINIC_SELF_CMD_UP2PF_FFM, + ffm_intr_msg_record); + + if (chip_info->dbgtool_info) { + chip_info->func_num++; + return 0; + } + + dbgtool_info = (struct dbgtool_k_glb_info *) + kzalloc(sizeof(struct dbgtool_k_glb_info), GFP_KERNEL); + if (!dbgtool_info) { + pr_err("Failed to allocate dbgtool_info\n"); + ret = -EFAULT; + goto dbgtool_info_fail; + } + chip_info->dbgtool_info = dbgtool_info; + + /* FFM init */ + dbgtool_info->ffm = (struct ffm_record_info *) + kzalloc(sizeof(struct ffm_record_info), + GFP_KERNEL); + if (!dbgtool_info->ffm) { + pr_err("Failed to allocate cell contexts for a chain\n"); + ret = -EFAULT; + goto dbgtool_info_ffm_fail; + } + + sema_init(&dbgtool_info->dbgtool_sem, 1); + + ret = sscanf(chip_info->chip_name, HINIC_CHIP_NAME "%d", &id); + if (ret <= 0) { + pr_err("Failed to get hinic id\n"); + goto sscanf_chdev_fail; + } + + g_hinic_card_node_array[id] = chip_info; + chip_info->func_num++; + + if (g_dbgtool_init_flag) { + g_dbgtool_ref_cnt++; + /* already initialized */ + return 0; + } + + /*alloc device id*/ + ret = alloc_chrdev_region(&(dbgtool_dev_id), 0, 1, CHR_DEV_DBGTOOL); + if (ret) { + pr_err("Alloc dbgtool chrdev region fail, ret=0x%x\n", ret); + goto alloc_chdev_fail; + } + + /*init device*/ + cdev_init(&(dbgtool_chr_dev), &dbgtool_file_operations); + + /*add device*/ + ret = cdev_add(&(dbgtool_chr_dev), dbgtool_dev_id, 1); + if (ret) { + pr_err("Add dgbtool dev fail, ret=0x%x\n", ret); + goto cdev_add_fail; + } + + /*lint -save -e160*/ + dbgtool_d_class = class_create(THIS_MODULE, CLASS_DBGTOOL); + /*lint -restore*/ + if (IS_ERR(dbgtool_d_class)) { + pr_err("Create dgbtool class fail\n"); + ret = -EFAULT; + goto cls_create_fail; + } + + /* Export device information to user space + * (/sys/class/class name/device name) + */ + pdevice = device_create(dbgtool_d_class, NULL, + dbgtool_dev_id, NULL, CHR_DEV_DBGTOOL); + if (IS_ERR(pdevice)) { + pr_err("Create dgbtool device fail\n"); + ret = -EFAULT; + goto dev_create_fail; + } + g_dbgtool_init_flag = 1; + g_dbgtool_ref_cnt = 1; + mutex_init(&g_hinic_addr_lock); + + return 0; + +dev_create_fail: + class_destroy(dbgtool_d_class); +cls_create_fail: + cdev_del(&(dbgtool_chr_dev)); +cdev_add_fail: + unregister_chrdev_region(dbgtool_dev_id, 1); +alloc_chdev_fail: + g_hinic_card_node_array[id] = NULL; +sscanf_chdev_fail: + kfree(dbgtool_info->ffm); +dbgtool_info_ffm_fail: + kfree(dbgtool_info); + dbgtool_info = NULL; + chip_info->dbgtool_info = NULL; +dbgtool_info_fail: + hinic_comm_recv_up_self_cmd_unreg(hwdev, HINIC_SELF_CMD_UP2PF_FFM); + chip_info->func_handle_array[hinic_global_func_id(hwdev)] = NULL; + sysfs_remove_file(&((struct device *)(hwdev->dev_hdl))->kobj, + &chip_info->dbgtool_attr_file); + return ret; +} + +/** + * hinic_dbgtool_knl_deinit - dbgtool character device deinit + * @hwdev: the pointer to hardware device + * @chip_node: the pointer to card node + */ +void hinic_dbgtool_knl_deinit(void *vhwdev, void *chip_node) +{ + struct dbgtool_k_glb_info *dbgtool_info; + struct card_node *chip_info = (struct card_node *)chip_node; + int id; + int err; + struct hinic_hwdev *hwdev = vhwdev; + + if (hinic_func_type(hwdev) == TYPE_VF) + return; + + hinic_comm_recv_up_self_cmd_unreg(hwdev, HINIC_SELF_CMD_UP2PF_FFM); + + chip_info->func_handle_array[hinic_global_func_id(hwdev)] = NULL; + + sysfs_remove_file(&((struct device *)(hwdev->dev_hdl))->kobj, + &chip_info->dbgtool_attr_file); + + chip_info->func_num--; + if (chip_info->func_num) + return; + + err = sscanf(chip_info->chip_name, HINIC_CHIP_NAME "%d", &id); + if (err <= 0) + pr_err("Failed to get hinic id\n"); + + g_hinic_card_node_array[id] = NULL; + + dbgtool_info = chip_info->dbgtool_info; + /* FFM deinit */ + kfree(dbgtool_info->ffm); + dbgtool_info->ffm = NULL; + + kfree(dbgtool_info); + chip_info->dbgtool_info = NULL; + + (void)hinic_dbgtool_knl_free_mem(id); + + if (g_dbgtool_init_flag) { + if ((--g_dbgtool_ref_cnt)) + return; + } + + if (!dbgtool_d_class) + return; + + device_destroy(dbgtool_d_class, dbgtool_dev_id); + class_destroy(dbgtool_d_class); + dbgtool_d_class = NULL; + + cdev_del(&(dbgtool_chr_dev)); + unregister_chrdev_region(dbgtool_dev_id, 1); + + g_dbgtool_init_flag = 0; +} + +/*lint -restore*/ diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dbgtool_knl.h b/drivers/net/ethernet/huawei/hinic/hinic_dbgtool_knl.h new file mode 100644 index 0000000000000000000000000000000000000000..05323db52eff912cc2ba1e2250ccde830c0ba8ae --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_dbgtool_knl.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef __DBGTOOL_KNL_H__ +#define __DBGTOOL_KNL_H__ + +#define DBG_TOOL_MAGIC 'w' + +/* dbgtool command type */ +/* You can add the required dbgtool through these commands + * can invoke all X86 kernel mode driver interface + */ +typedef enum { + DBGTOOL_CMD_API_RD = 0, + DBGTOOL_CMD_API_WR, + + DBGTOOL_CMD_FFM_RD, + DBGTOOL_CMD_FFM_CLR, + + DBGTOOL_CMD_PF_DEV_INFO_GET, + + DBGTOOL_CMD_MSG_2_UP, + + DBGTOOL_CMD_FREE_MEM, + DBGTOOL_CMD_NUM +} dbgtool_cmd; + +struct api_cmd_rd { + u32 pf_id; + u8 dest; + u8 *cmd; + u16 size; + void *ack; + u16 ack_size; +}; + +struct api_cmd_wr { + u32 pf_id; + u8 dest; + u8 *cmd; + u16 size; +}; + +struct pf_dev_info { + u64 bar0_size; + u8 bus; + u8 slot; + u8 func; + u64 phy_addr; +}; + +/* Interrupt at most records, interrupt will be recorded in the FFM */ +#define FFM_RECORD_NUM_MAX 64 + +struct ffm_intr_tm_info { + u8 node_id; + /* error level of the interrupt source */ + u8 err_level; + /* Classification by interrupt source properties */ + u16 err_type; + u32 err_csr_addr; + u32 err_csr_value; + + u8 sec; /* second*/ + u8 min; /* minute */ + u8 hour; /* hour */ + u8 mday; /* day */ + u8 mon; /* month */ + u16 year; /* year */ +}; + +struct ffm_record_info { + u32 ffm_num; + struct ffm_intr_tm_info ffm[FFM_RECORD_NUM_MAX]; +}; + +struct msg_2_up { + u8 pf_id; /* which pf sends messages to the up */ + u8 mod; + u8 cmd; + void *buf_in; + u16 in_size; + void *buf_out; + u16 *out_size; +}; + +struct dbgtool_param { + union { + struct api_cmd_rd api_rd; + struct api_cmd_wr api_wr; + struct pf_dev_info *dev_info; + struct ffm_record_info *ffm_rd; + struct msg_2_up msg2up; + } param; + char chip_name[16]; +}; + +#define MAX_CARD_NUM 64 +#define DBGTOOL_PAGE_ORDER 10 + +int hinic_dbgtool_knl_init(void *vhwdev, void *chip_node); +void hinic_dbgtool_knl_deinit(void *vhwdev, void *chip_node); +int hinic_mem_mmap(struct file *filp, struct vm_area_struct *vma); +void hinic_chipif_get_all_pf_dev_info(struct pf_dev_info *dev_info, int card_id, + void **g_func_handle_array); +long hinic_dbgtool_knl_free_mem(int id); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dcb.c b/drivers/net/ethernet/huawei/hinic/hinic_dcb.c new file mode 100644 index 0000000000000000000000000000000000000000..033549b249830278c9aed2d365d8a08c4947abe8 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_dcb.c @@ -0,0 +1,1795 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_hw_mgmt.h" +#include "hinic_lld.h" +#include "hinic_nic_cfg.h" +#include "hinic_nic_dev.h" +#include "hinic_dcb.h" + +#define DCB_HW_CFG_CHG 0 +#define DCB_HW_CFG_NO_CHG 1 +#define DCB_HW_CFG_ERR 2 + +#define DCB_CFG_CHG_PG_TX 0x1 +#define DCB_CFG_CHG_PG_RX 0x2 +#define DCB_CFG_CHG_PFC 0x4 +#define DCB_CFG_CHG_UP_COS 0x8 + +u8 hinic_dcb_get_tc(struct hinic_dcb_config *dcb_cfg, int dir, u8 up) +{ + struct hinic_tc_cfg *tc_cfg = &dcb_cfg->tc_cfg[0]; + u8 tc = dcb_cfg->pg_tcs; + + if (!tc) + return 0; + + for (tc--; tc; tc--) { + if (BIT(up) & tc_cfg[tc].path[dir].up_map) + break; + } + + return tc; +} + +#define UP_MAPPING(prio) ((u8)(1U << ((HINIC_DCB_UP_MAX - 1) - (prio)))) + +void hinic_dcb_config_init(struct hinic_nic_dev *nic_dev, + struct hinic_dcb_config *dcb_cfg) +{ + struct hinic_tc_cfg *tc; + int i; + + memset(dcb_cfg->tc_cfg, 0, sizeof(dcb_cfg->tc_cfg)); + tc = &dcb_cfg->tc_cfg[0]; + /* All TC mapping to PG0 */ + for (i = 0; i < dcb_cfg->pg_tcs; i++) { + tc = &dcb_cfg->tc_cfg[i]; + tc->path[HINIC_DCB_CFG_TX].pg_id = 0; + tc->path[HINIC_DCB_CFG_TX].bw_pct = 100; + tc->path[HINIC_DCB_CFG_TX].up_map = UP_MAPPING(i); + tc->path[HINIC_DCB_CFG_RX].pg_id = 0; + tc->path[HINIC_DCB_CFG_RX].bw_pct = 100; + tc->path[HINIC_DCB_CFG_RX].up_map = UP_MAPPING(i); + + tc->pfc_en = false; + } + + for (; i < HINIC_DCB_UP_MAX; i++) { + tc->path[HINIC_DCB_CFG_TX].up_map |= UP_MAPPING(i); + tc->path[HINIC_DCB_CFG_RX].up_map |= UP_MAPPING(i); + } + + memset(dcb_cfg->bw_pct, 0, sizeof(dcb_cfg->bw_pct)); + /* Use PG0 in default, PG0's bw is 100% */ + dcb_cfg->bw_pct[HINIC_DCB_CFG_TX][0] = 100; + dcb_cfg->bw_pct[HINIC_DCB_CFG_RX][0] = 100; + dcb_cfg->pfc_state = false; +} + +void hinic_init_ieee_settings(struct hinic_nic_dev *nic_dev) +{ + struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg; + struct ieee_ets *ets = &nic_dev->hinic_ieee_ets_default; + struct ieee_pfc *pfc = &nic_dev->hinic_ieee_pfc; + struct hinic_tc_attr *tc_attr; + u8 i; + + memset(ets, 0x0, sizeof(struct ieee_ets)); + memset(&nic_dev->hinic_ieee_ets, 0x0, sizeof(struct ieee_ets)); + ets->ets_cap = dcb_cfg->pg_tcs; + for (i = 0; i < HINIC_DCB_TC_MAX; i++) { + tc_attr = &dcb_cfg->tc_cfg[i].path[HINIC_DCB_CFG_TX]; + ets->tc_tsa[i] = tc_attr->prio_type ? + IEEE8021Q_TSA_STRICT : IEEE8021Q_TSA_ETS; + ets->tc_tx_bw[i] = nic_dev->dcb_cfg.bw_pct[HINIC_DCB_CFG_TX][i]; + ets->tc_rx_bw[i] = nic_dev->dcb_cfg.bw_pct[HINIC_DCB_CFG_RX][i]; + ets->prio_tc[i] = hinic_dcb_get_tc(dcb_cfg, + HINIC_DCB_CFG_TX, i); + } + memcpy(&nic_dev->hinic_ieee_ets, ets, sizeof(struct ieee_ets)); + + memset(pfc, 0x0, sizeof(struct ieee_pfc)); + pfc->pfc_cap = dcb_cfg->pfc_tcs; + for (i = 0; i < dcb_cfg->pfc_tcs; i++) { + if (dcb_cfg->tc_cfg[i].pfc_en) + pfc->pfc_en |= (u8)BIT(i); + } +} + +static int hinic_set_up_cos_map(struct hinic_nic_dev *nic_dev, + u8 num_cos, u8 *cos_up) +{ + u8 up_valid_bitmap, up_cos[HINIC_DCB_UP_MAX] = {0}; + u8 i; + + up_valid_bitmap = 0; + for (i = 0; i < num_cos; i++) { + if (cos_up[i] >= HINIC_DCB_UP_MAX) { + hinic_info(nic_dev, drv, "Invalid up %d mapping to cos %d\n", + cos_up[i], i); + return -EFAULT; + } + + if (i > 0 && cos_up[i] >= cos_up[i - 1]) { + hinic_info(nic_dev, drv, + "Invalid priority order, should be descending cos[%d]=%d, cos[%d]=%d\n", + i, cos_up[i], i - 1, cos_up[i - 1]); + return -EINVAL; + } + + up_valid_bitmap |= (u8)BIT(cos_up[i]); + if (i == (num_cos - 1)) + up_cos[cos_up[i]] = nic_dev->default_cos_id; + else + up_cos[cos_up[i]] = i; /* reverse up and cos */ + } + + for (i = 0; i < HINIC_DCB_UP_MAX; i++) { + if (up_valid_bitmap & (u8)BIT(i)) + continue; + + up_cos[i] = nic_dev->default_cos_id; + } + + nic_dev->up_valid_bitmap = up_valid_bitmap; + memcpy(nic_dev->up_cos, up_cos, sizeof(up_cos)); + + return hinic_sq_cos_mapping(nic_dev->netdev); +} + +static int hinic_init_up_cos_map(struct hinic_nic_dev *nic_dev, u8 num_cos) +{ + u8 default_map[HINIC_DCB_COS_MAX] = {0}; + bool setted = false; + u8 max_cos, cos_id, up; + int err; + + max_cos = hinic_max_num_cos(nic_dev->hwdev); + if (!max_cos || ((max_cos - 1) < nic_dev->default_cos_id)) { + hinic_err(nic_dev, drv, "Max_cos is %d, default cos id %d\n", + max_cos, nic_dev->default_cos_id); + return -EFAULT; + } + + err = hinic_get_chip_cos_up_map(nic_dev->pdev, &setted, default_map); + if (err) { + hinic_err(nic_dev, drv, "Get chip cos_up map failed\n"); + return -EFAULT; + } + + if (!setted) { + /* Use (max_cos-1)~0 as default user priority and mapping + * to cos0~(max_cos-1) + */ + up = nic_dev->max_cos - 1; + for (cos_id = 0; cos_id < nic_dev->max_cos; cos_id++, up--) + default_map[cos_id] = up; + } + + return hinic_set_up_cos_map(nic_dev, num_cos, default_map); +} + +int hinic_dcb_init(struct hinic_nic_dev *nic_dev) +{ + struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg; + u8 num_cos, support_cos = 0, default_cos = 0; + u8 i, cos_valid_bitmap; + int err; + + if (HINIC_FUNC_IS_VF(nic_dev->hwdev)) + return 0; + + cos_valid_bitmap = hinic_cos_valid_bitmap(nic_dev->hwdev); + if (!cos_valid_bitmap) { + hinic_err(nic_dev, drv, "None cos supported\n"); + return -EFAULT; + } + + for (i = 0; i < HINIC_DCB_COS_MAX; i++) { + if (cos_valid_bitmap & BIT(i)) { + support_cos++; + default_cos = i; /* Find max cos id as default cos */ + } + } + + hinic_info(nic_dev, drv, "Support num cos %d, default cos %d\n", + support_cos, default_cos); + + num_cos = (u8)(1U << ilog2(support_cos)); + if (num_cos != support_cos) + hinic_info(nic_dev, drv, "Adjust num_cos from %d to %d\n", + support_cos, num_cos); + + nic_dev->dcbx_cap = 0; + nic_dev->max_cos = num_cos; + nic_dev->default_cos_id = default_cos; + dcb_cfg->pfc_tcs = nic_dev->max_cos; + dcb_cfg->pg_tcs = nic_dev->max_cos; + err = hinic_init_up_cos_map(nic_dev, num_cos); + if (err) { + hinic_info(nic_dev, drv, "Initialize up_cos mapping failed\n"); + return -EFAULT; + } + + hinic_dcb_config_init(nic_dev, dcb_cfg); + + nic_dev->dcb_changes = DCB_CFG_CHG_PFC | DCB_CFG_CHG_PG_TX | + DCB_CFG_CHG_PG_RX | DCB_CFG_CHG_UP_COS; + nic_dev->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; + + memcpy(&nic_dev->tmp_dcb_cfg, &nic_dev->dcb_cfg, + sizeof(nic_dev->tmp_dcb_cfg)); + memcpy(&nic_dev->save_dcb_cfg, &nic_dev->dcb_cfg, + sizeof(nic_dev->save_dcb_cfg)); + + hinic_init_ieee_settings(nic_dev); + + sema_init(&nic_dev->dcb_sem, 1); + + return 0; +} + +void hinic_set_prio_tc_map(struct hinic_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + u8 prio, tc; + + for (prio = 0; prio < HINIC_DCB_UP_MAX; prio++) { + tc = nic_dev->up_cos[prio]; + if (tc == nic_dev->default_cos_id) + tc = nic_dev->max_cos - 1; + + netdev_set_prio_tc_map(netdev, prio, tc); + } +} + +int hinic_setup_tc(struct net_device *netdev, u8 tc) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + int err; + + if (!FUNC_SUPPORT_DCB(nic_dev->hwdev)) { + nicif_err(nic_dev, drv, netdev, + "Current function don't support DCB\n"); + return -EOPNOTSUPP; + } + + if (tc > nic_dev->dcb_cfg.pg_tcs) { + nicif_err(nic_dev, drv, netdev, "Invalid num_tc: %d, max tc: %d\n", + tc, nic_dev->dcb_cfg.pg_tcs); + return -EINVAL; + } + + if (netif_running(netdev)) { + err = hinic_close(netdev); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to close device\n"); + return -EFAULT; + } + } + + if (tc) { + if (tc & (tc - 1)) { + nicif_err(nic_dev, drv, netdev, + "Invalid num_tc: %d, must be power of 2\n", + tc); + return -EINVAL; + } + + netdev_set_num_tc(netdev, tc); + hinic_set_prio_tc_map(nic_dev); + + set_bit(HINIC_DCB_ENABLE, &nic_dev->flags); + } else { + netdev_reset_tc(netdev); + + clear_bit(HINIC_DCB_ENABLE, &nic_dev->flags); + } + + hinic_sq_cos_mapping(netdev); + + if (netif_running(netdev)) { + err = hinic_open(netdev); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to open device\n"); + return -EFAULT; + } + } else { + hinic_update_num_qps(netdev); + } + + hinic_configure_dcb(netdev); + + return 0; +} + +u8 hinic_setup_dcb_tool(struct net_device *netdev, u8 *dcb_en, bool wr_flag) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + int err = 0; + + if (wr_flag) { + if (nic_dev->max_qps < nic_dev->dcb_cfg.pg_tcs && *dcb_en) { + nicif_err(nic_dev, drv, netdev, + "max_qps: %d is less than %d\n", + nic_dev->max_qps, nic_dev->dcb_cfg.pg_tcs); + return 1; + } + if (*dcb_en) + set_bit(HINIC_DCB_ENABLE, &nic_dev->flags); + else + clear_bit(HINIC_DCB_ENABLE, &nic_dev->flags); + /*hinic_setup_tc need get the nic_mutex lock again */ + mutex_unlock(&nic_dev->nic_mutex); + /* kill the rtnl assert warning */ + rtnl_lock(); + err = hinic_setup_tc(netdev, + *dcb_en ? nic_dev->dcb_cfg.pg_tcs : 0); + rtnl_unlock(); + mutex_lock(&nic_dev->nic_mutex); + + if (!err) + nicif_info(nic_dev, drv, netdev, "%s DCB\n", + *dcb_en ? "Enable" : "Disable"); + } else { + *dcb_en = (u8)test_bit(HINIC_DCB_ENABLE, &nic_dev->flags); + } + + return !!err; +} + +static u8 hinic_dcbnl_get_state(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + return !!test_bit(HINIC_DCB_ENABLE, &nic_dev->flags); +} + +static u8 hinic_dcbnl_set_state(struct net_device *netdev, u8 state) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u8 curr_state = !!test_bit(HINIC_DCB_ENABLE, &nic_dev->flags); + int err = 0; + + if (!(nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return 1; + + if (state == curr_state) + return 0; + + if (nic_dev->max_qps < nic_dev->dcb_cfg.pg_tcs && state) { + nicif_err(nic_dev, drv, netdev, + "max_qps: %d is less than %d\n", + nic_dev->max_qps, nic_dev->dcb_cfg.pg_tcs); + return 1; + } + + err = hinic_setup_tc(netdev, state ? nic_dev->dcb_cfg.pg_tcs : 0); + if (!err) + nicif_info(nic_dev, drv, netdev, "%s DCB\n", + state ? "Enable" : "Disable"); + + return !!err; +} + +static void hinic_dcbnl_get_perm_hw_addr(struct net_device *netdev, + u8 *perm_addr) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + int err; + + memset(perm_addr, 0xff, MAX_ADDR_LEN); + + err = hinic_get_default_mac(nic_dev->hwdev, perm_addr); + if (err) + nicif_err(nic_dev, drv, netdev, "Failed to get default mac\n"); +} + +void hinic_dcbnl_set_ets_tc_tool(struct net_device *netdev, u8 tc[], bool flag) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic_tc_cfg *cfg = nic_dev->tmp_dcb_cfg.tc_cfg; + struct hinic_tc_cfg *tc_conf = nic_dev->dcb_cfg.tc_cfg; + u8 i, tc_tmp, j; + + if (flag) { + /*need to clear first */ + for (i = 0; i < HINIC_DCB_TC_MAX; i++) { + cfg[i].path[HINIC_DCB_CFG_TX].up_map = 0; + cfg[i].path[HINIC_DCB_CFG_RX].up_map = 0; + } + for (i = 0; i < HINIC_DCB_TC_MAX; i++) { + tc_tmp = tc[i]; + cfg[tc_tmp].path[HINIC_DCB_CFG_TX].up_map |= (u8)BIT(i); + cfg[tc_tmp].path[HINIC_DCB_CFG_RX].up_map |= (u8)BIT(i); + cfg[tc_tmp].path[HINIC_DCB_CFG_TX].pg_id = (u8)tc_tmp; + cfg[tc_tmp].path[HINIC_DCB_CFG_RX].pg_id = (u8)tc_tmp; + } + } else { + for (i = 0; i < HINIC_DCB_TC_MAX; i++) { + for (j = 0; j < HINIC_DCB_TC_MAX; j++) { + if (tc_conf[i].path[HINIC_DCB_CFG_TX].up_map & + (u8)BIT(j)) { + tc[j] = i; + } + } + } + } +} + +void hinic_dcbnl_set_ets_pecent_tool(struct net_device *netdev, + u8 percent[], bool flag) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + int i; + + if (flag) { + for (i = 0; i < HINIC_DCB_COS_MAX; i++) { + nic_dev->tmp_dcb_cfg.bw_pct[HINIC_DCB_CFG_TX][i] = + percent[i]; + nic_dev->tmp_dcb_cfg.bw_pct[HINIC_DCB_CFG_RX][i] = + percent[i]; + } + } else { + for (i = 0; i < HINIC_DCB_COS_MAX; i++) + percent[i] = + nic_dev->dcb_cfg.bw_pct[HINIC_DCB_CFG_TX][i]; + } +} + +static void hinic_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, + u8 prio, u8 pg_id, u8 bw_pct, + u8 up_map) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + if (tc > HINIC_DCB_TC_MAX - 1) + return; + + if (prio != DCB_ATTR_VALUE_UNDEFINED) + nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[0].prio_type = prio; + if (pg_id != DCB_ATTR_VALUE_UNDEFINED) + nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[0].pg_id = pg_id; + if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) + nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[0].bw_pct = bw_pct; + /* if all priority mapping to the same tc, + * up_map is 0xFF, and it's a valid value + */ + nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[0].up_map = up_map; +} + +static void hinic_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, + u8 bw_pct) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + if (bwg_id > HINIC_DCB_PG_MAX - 1) + return; + + nic_dev->tmp_dcb_cfg.bw_pct[0][bwg_id] = bw_pct; +} + +static void hinic_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, + u8 prio, u8 pg_id, u8 bw_pct, + u8 up_map) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + if (tc > HINIC_DCB_TC_MAX - 1) + return; + + if (prio != DCB_ATTR_VALUE_UNDEFINED) + nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[1].prio_type = prio; + if (pg_id != DCB_ATTR_VALUE_UNDEFINED) + nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[1].pg_id = pg_id; + if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) + nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[1].bw_pct = bw_pct; + + nic_dev->tmp_dcb_cfg.tc_cfg[tc].path[1].up_map = up_map; +} + +static void hinic_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, + u8 bw_pct) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + if (bwg_id > HINIC_DCB_PG_MAX - 1) + return; + + nic_dev->tmp_dcb_cfg.bw_pct[1][bwg_id] = bw_pct; +} + +static void hinic_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, + u8 *prio, u8 *pg_id, u8 *bw_pct, + u8 *up_map) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + if (tc > HINIC_DCB_TC_MAX - 1) + return; + + *prio = nic_dev->dcb_cfg.tc_cfg[tc].path[0].prio_type; + *pg_id = nic_dev->dcb_cfg.tc_cfg[tc].path[0].pg_id; + *bw_pct = nic_dev->dcb_cfg.tc_cfg[tc].path[0].bw_pct; + *up_map = nic_dev->dcb_cfg.tc_cfg[tc].path[0].up_map; +} + +static void hinic_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, + u8 *bw_pct) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + if (bwg_id > HINIC_DCB_PG_MAX - 1) + return; + + *bw_pct = nic_dev->dcb_cfg.bw_pct[0][bwg_id]; +} + +static void hinic_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc, + u8 *prio, u8 *pg_id, u8 *bw_pct, + u8 *up_map) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + if (tc > HINIC_DCB_TC_MAX - 1) + return; + + *prio = nic_dev->dcb_cfg.tc_cfg[tc].path[1].prio_type; + *pg_id = nic_dev->dcb_cfg.tc_cfg[tc].path[1].pg_id; + *bw_pct = nic_dev->dcb_cfg.tc_cfg[tc].path[1].bw_pct; + *up_map = nic_dev->dcb_cfg.tc_cfg[tc].path[1].up_map; +} + +static void hinic_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, + u8 *bw_pct) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + if (bwg_id > HINIC_DCB_PG_MAX - 1) + return; + + *bw_pct = nic_dev->dcb_cfg.bw_pct[1][bwg_id]; +} + +void hinic_dcbnl_set_pfc_cfg_tool(struct net_device *netdev, u8 setting) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u8 i; + + for (i = 0; i < HINIC_DCB_TC_MAX; i++) { + nic_dev->tmp_dcb_cfg.tc_cfg[i].pfc_en = !!(setting & BIT(i)); + if (nic_dev->tmp_dcb_cfg.tc_cfg[i].pfc_en != + nic_dev->dcb_cfg.tc_cfg[i].pfc_en) { + nic_dev->tmp_dcb_cfg.pfc_state = true; + } + } +} + +void hinic_dcbnl_set_ets_strict_tool(struct net_device *netdev, + u8 *setting, bool flag) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic_tc_cfg *cfg = nic_dev->tmp_dcb_cfg.tc_cfg; + struct hinic_tc_cfg *conf = nic_dev->dcb_cfg.tc_cfg; + u8 i; + + if (flag) { + for (i = 0; i < HINIC_DCB_COS_MAX; i++) { + cfg[i].path[HINIC_DCB_CFG_TX].prio_type = + !!(*setting & BIT(i)) ? 2 : 0; + cfg[i].path[HINIC_DCB_CFG_RX].prio_type = + !!(*setting & BIT(i)) ? 2 : 0; + } + } else { + for (i = 0; i < HINIC_DCB_COS_MAX; i++) { + *setting = *setting | + (u8)((u32)(!!(conf[i].path[0].prio_type)) << i); + } + } +} + +void hinic_dcbnl_set_pfc_en_tool(struct net_device *netdev, + u8 *value, bool flag) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + if (flag) + nic_dev->tmp_dcb_cfg.pfc_state = !!(*value); + else + *value = nic_dev->tmp_dcb_cfg.pfc_state; +} + +void hinic_dcbnl_set_ets_en_tool(struct net_device *netdev, + u8 *value, bool flag) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + if (flag) { + if (*value) + set_bit(HINIC_ETS_ENABLE, &nic_dev->flags); + else + clear_bit(HINIC_ETS_ENABLE, &nic_dev->flags); + } else { + *value = (u8)test_bit(HINIC_ETS_ENABLE, &nic_dev->flags); + } +} + +static void hinic_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio, + u8 setting) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + nic_dev->tmp_dcb_cfg.tc_cfg[prio].pfc_en = !!setting; + if (nic_dev->tmp_dcb_cfg.tc_cfg[prio].pfc_en != + nic_dev->dcb_cfg.tc_cfg[prio].pfc_en) + nic_dev->tmp_dcb_cfg.pfc_state = true; +} + +void hinic_dcbnl_get_pfc_cfg_tool(struct net_device *netdev, u8 *setting) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u8 i; + + for (i = 0; i < HINIC_DCB_TC_MAX; i++) { + *setting = *setting | + (u8)((u32)(nic_dev->dcb_cfg.tc_cfg[i].pfc_en) << i); + } +} + +void hinic_dcbnl_get_tc_num_tool(struct net_device *netdev, u8 *tc_num) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + *tc_num = nic_dev->max_cos; +} + +static void hinic_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, + u8 *setting) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + if (prio > HINIC_DCB_TC_MAX - 1) + return; + + *setting = nic_dev->dcb_cfg.tc_cfg[prio].pfc_en; +} + +static u8 hinic_dcbnl_getcap(struct net_device *netdev, int cap_id, + u8 *dcb_cap) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + switch (cap_id) { + case DCB_CAP_ATTR_PG: + *dcb_cap = true; + break; + case DCB_CAP_ATTR_PFC: + *dcb_cap = true; + break; + case DCB_CAP_ATTR_UP2TC: + *dcb_cap = false; + break; + case DCB_CAP_ATTR_PG_TCS: + *dcb_cap = 0x80; + break; + case DCB_CAP_ATTR_PFC_TCS: + *dcb_cap = 0x80; + break; + case DCB_CAP_ATTR_GSP: + *dcb_cap = true; + break; + case DCB_CAP_ATTR_BCN: + *dcb_cap = false; + break; + case DCB_CAP_ATTR_DCBX: + *dcb_cap = nic_dev->dcbx_cap; + break; + default: + *dcb_cap = false; + break; + } + + return 0; +} + +static u8 hinic_sync_tc_cfg(struct hinic_tc_cfg *tc_dst, + struct hinic_tc_cfg *tc_src, int dir) +{ + u8 tc_dir_change = (dir == HINIC_DCB_CFG_TX) ? + DCB_CFG_CHG_PG_TX : DCB_CFG_CHG_PG_RX; + u8 changes = 0; + + if (tc_dst->path[dir].prio_type != tc_src->path[dir].prio_type) { + tc_dst->path[dir].prio_type = tc_src->path[dir].prio_type; + changes |= tc_dir_change; + } + + if (tc_dst->path[dir].pg_id != tc_src->path[dir].pg_id) { + tc_dst->path[dir].pg_id = tc_src->path[dir].pg_id; + changes |= tc_dir_change; + } + + if (tc_dst->path[dir].bw_pct != tc_src->path[dir].bw_pct) { + tc_dst->path[dir].bw_pct = tc_src->path[dir].bw_pct; + changes |= tc_dir_change; + } + + if (tc_dst->path[dir].up_map != tc_src->path[dir].up_map) { + tc_dst->path[dir].up_map = tc_src->path[dir].up_map; + changes |= (tc_dir_change | DCB_CFG_CHG_PFC); + } + + return changes; +} + +static u8 hinic_sync_dcb_cfg(struct hinic_nic_dev *nic_dev) +{ + struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg; + struct hinic_dcb_config *tmp_dcb_cfg = &nic_dev->tmp_dcb_cfg; + struct hinic_tc_cfg *tc_dst, *tc_src; + u8 changes = 0; + int i; + + for (i = 0; i < HINIC_DCB_UP_MAX; i++) { + tc_src = &tmp_dcb_cfg->tc_cfg[i]; + tc_dst = &dcb_cfg->tc_cfg[i]; + + changes |= hinic_sync_tc_cfg(tc_dst, tc_src, HINIC_DCB_CFG_TX); + changes |= hinic_sync_tc_cfg(tc_dst, tc_src, HINIC_DCB_CFG_RX); + } + + for (i = 0; i < HINIC_DCB_PG_MAX; i++) { + if (dcb_cfg->bw_pct[HINIC_DCB_CFG_TX][i] != + tmp_dcb_cfg->bw_pct[HINIC_DCB_CFG_TX][i]) { + dcb_cfg->bw_pct[HINIC_DCB_CFG_TX][i] = + tmp_dcb_cfg->bw_pct[HINIC_DCB_CFG_TX][i]; + changes |= DCB_CFG_CHG_PG_TX; + } + + if (dcb_cfg->bw_pct[HINIC_DCB_CFG_RX][i] != + tmp_dcb_cfg->bw_pct[HINIC_DCB_CFG_RX][i]) { + dcb_cfg->bw_pct[HINIC_DCB_CFG_RX][i] = + tmp_dcb_cfg->bw_pct[HINIC_DCB_CFG_RX][i]; + changes |= DCB_CFG_CHG_PG_RX; + } + } + + for (i = 0; i < HINIC_DCB_UP_MAX; i++) { + if (dcb_cfg->tc_cfg[i].pfc_en != + tmp_dcb_cfg->tc_cfg[i].pfc_en) { + dcb_cfg->tc_cfg[i].pfc_en = + tmp_dcb_cfg->tc_cfg[i].pfc_en; + changes |= DCB_CFG_CHG_PFC; + } + } + + if (dcb_cfg->pfc_state != tmp_dcb_cfg->pfc_state) { + dcb_cfg->pfc_state = tmp_dcb_cfg->pfc_state; + changes |= DCB_CFG_CHG_PFC; + } + + return changes; +} + +static void hinic_dcb_get_pfc_map(struct hinic_nic_dev *nic_dev, + struct hinic_dcb_config *dcb_cfg, u8 *pfc_map) +{ + u8 i, up; + u8 pfc_en = 0, outof_range_pfc = 0; + + for (i = 0; i < dcb_cfg->pfc_tcs; i++) { + up = (HINIC_DCB_UP_MAX - 1) - i; + if (dcb_cfg->tc_cfg[up].pfc_en) + *pfc_map |= (u8)BIT(up); + } + + for (i = 0; i < HINIC_DCB_UP_MAX; i++) { + up = (HINIC_DCB_UP_MAX - 1) - i; + if (dcb_cfg->tc_cfg[up].pfc_en) + pfc_en |= (u8)BIT(up); + } + + *pfc_map = pfc_en & nic_dev->up_valid_bitmap; + outof_range_pfc = pfc_en & (~nic_dev->up_valid_bitmap); + + if (outof_range_pfc) + hinic_info(nic_dev, drv, + "PFC setting out of range, 0x%x will be ignored\n", + outof_range_pfc); +} + +static bool is_cos_in_use(u8 cos, u8 up_valid_bitmap, u8 *up_cos) +{ + u32 i; + + for (i = 0; i < HINIC_DCB_UP_MAX; i++) { + if (!(up_valid_bitmap & BIT(i))) + continue; + + if (cos == up_cos[i]) + return true; + } + + return false; +} + +static void hinic_dcb_adjust_up_bw(struct hinic_nic_dev *nic_dev, u8 *up_pgid, + u8 *up_bw) +{ + u8 tmp_cos, pg_id; + u16 bw_all; + u8 bw_remain, cos_cnt; + + for (pg_id = 0; pg_id < HINIC_DCB_PG_MAX; pg_id++) { + bw_all = 0; + cos_cnt = 0; + /* Find all up mapping to the same pg */ + for (tmp_cos = 0; tmp_cos < HINIC_DCB_UP_MAX; tmp_cos++) { + if (!is_cos_in_use(tmp_cos, nic_dev->up_valid_bitmap, + nic_dev->up_cos)) + continue; + + if (up_pgid[tmp_cos] == pg_id) { + bw_all += up_bw[tmp_cos]; + cos_cnt++; + } + } + + if (bw_all <= 100 || !cos_cnt) + continue; + + /* Calculate up percent of bandwidth group, The sum of + * percentages for priorities in the same priority group + * must be 100 + */ + bw_remain = 100 % cos_cnt; + for (tmp_cos = 0; tmp_cos < HINIC_DCB_UP_MAX; tmp_cos++) { + if (!is_cos_in_use(tmp_cos, nic_dev->up_valid_bitmap, + nic_dev->up_cos)) + continue; + + if (up_pgid[tmp_cos] == pg_id) { + up_bw[tmp_cos] = + (u8)(100 * up_bw[tmp_cos] / bw_all + + (u8)!!bw_remain); + if (bw_remain) + bw_remain--; + } + } + } +} + +static void hinic_dcb_dump_configuration(struct hinic_nic_dev *nic_dev, + u8 *up_tc, u8 *up_pgid, u8 *up_bw, + u8 *pg_bw, u8 *up_strict, u8 *bw_pct) +{ + u8 i; + u8 cos; + + for (i = 0; i < HINIC_DCB_UP_MAX; i++) { + if (!(nic_dev->up_valid_bitmap & BIT(i))) + continue; + + cos = nic_dev->up_cos[i]; + hinic_info(nic_dev, drv, + "up: %d, cos: %d, tc: %d, pgid: %d, bw: %d, tsa: %d\n", + i, cos, up_tc[cos], up_pgid[cos], up_bw[cos], + up_strict[cos]); + } + + for (i = 0; i < HINIC_DCB_PG_MAX; i++) + hinic_info(nic_dev, drv, "pgid: %d, bw: %d\n", i, pg_bw[i]); +} + +/* Ucode thread timeout is 210ms, must be lagger then 210ms */ +#define HINIC_WAIT_PORT_IO_STOP 250 + +static int hinic_stop_port_traffic_flow(struct hinic_nic_dev *nic_dev) +{ + int err = 0; + + down(&nic_dev->dcb_sem); + + if (nic_dev->disable_port_cnt++ != 0) + goto out; + + err = hinic_force_port_disable(nic_dev); + if (err) { + hinic_err(nic_dev, drv, "Failed to disable port\n"); + goto set_port_err; + } + + err = hinic_set_port_funcs_state(nic_dev->hwdev, false); + if (err) { + hinic_err(nic_dev, drv, + "Failed to disable all functions in port\n"); + goto set_port_funcs_err; + } + + hinic_info(nic_dev, drv, "Stop port traffic flow\n"); + + goto out; + +set_port_funcs_err: + hinic_force_set_port_state(nic_dev, !!netif_running(nic_dev->netdev)); + +set_port_err: +out: + if (err) + nic_dev->disable_port_cnt--; + + up(&nic_dev->dcb_sem); + + return err; +} + +static int hinic_start_port_traffic_flow(struct hinic_nic_dev *nic_dev) +{ + int err; + + down(&nic_dev->dcb_sem); + + nic_dev->disable_port_cnt--; + if (nic_dev->disable_port_cnt > 0) { + up(&nic_dev->dcb_sem); + return 0; + } + + nic_dev->disable_port_cnt = 0; + up(&nic_dev->dcb_sem); + + err = hinic_force_set_port_state(nic_dev, + !!netif_running(nic_dev->netdev)); + if (err) + hinic_err(nic_dev, drv, "Failed to disable port\n"); + + err = hinic_set_port_funcs_state(nic_dev->hwdev, true); + if (err) + hinic_err(nic_dev, drv, + "Failed to disable all functions in port\n"); + + hinic_info(nic_dev, drv, "Start port traffic flow\n"); + + return err; +} + +static int __set_hw_cos_up_map(struct hinic_nic_dev *nic_dev) +{ + u8 cos, cos_valid_bitmap, cos_up_map[HINIC_DCB_COS_MAX] = {0}; + u8 i; + int err; + + cos_valid_bitmap = 0; + for (i = 0; i < HINIC_DCB_UP_MAX; i++) { + if (!(nic_dev->up_valid_bitmap & BIT(i))) + continue; + + cos = nic_dev->up_cos[i]; + cos_up_map[cos] = i; + cos_valid_bitmap |= (u8)BIT(cos); + } + + err = hinic_dcb_set_cos_up_map(nic_dev->hwdev, cos_valid_bitmap, + cos_up_map); + if (err) { + hinic_info(nic_dev, drv, "Set cos_up map failed\n"); + return err; + } + + return 0; +} + +static int __set_hw_ets(struct hinic_nic_dev *nic_dev) +{ + struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg; + struct ieee_ets *my_ets = &nic_dev->hinic_ieee_ets; + struct hinic_tc_attr *tc_attr; + u8 up_tc[HINIC_DCB_UP_MAX] = {0}; + u8 up_pgid[HINIC_DCB_UP_MAX] = {0}; + u8 up_bw[HINIC_DCB_UP_MAX] = {0}; + u8 pg_bw[HINIC_DCB_UP_MAX] = {0}; + u8 up_strict[HINIC_DCB_UP_MAX] = {0}; + u8 i, tc, cos; + int err; + + for (i = 0; i < HINIC_DCB_UP_MAX; i++) { + if (!(nic_dev->up_valid_bitmap & BIT(i))) + continue; + + cos = nic_dev->up_cos[i]; + if ((nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) { + up_tc[cos] = my_ets->prio_tc[i]; + up_pgid[cos] = my_ets->prio_tc[i]; + up_bw[cos] = 100; + up_strict[i] = + (my_ets->tc_tsa[cos] == IEEE8021Q_TSA_STRICT) ? + HINIC_DCB_TSA_TC_SP : HINIC_DCB_TSA_TC_DWRR; + + } else { + tc = hinic_dcb_get_tc(dcb_cfg, HINIC_DCB_CFG_TX, i); + tc_attr = &dcb_cfg->tc_cfg[tc].path[HINIC_DCB_CFG_TX]; + up_tc[cos] = tc; + up_pgid[cos] = tc_attr->pg_id; + up_bw[cos] = tc_attr->bw_pct; + up_strict[cos] = tc_attr->prio_type ? + HINIC_DCB_TSA_TC_SP : HINIC_DCB_TSA_TC_DWRR; + } + } + + hinic_dcb_adjust_up_bw(nic_dev, up_pgid, up_bw); + + if (nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) { + for (i = 0; i < HINIC_DCB_PG_MAX; i++) + pg_bw[i] = my_ets->tc_tx_bw[i]; + } else { + for (i = 0; i < HINIC_DCB_PG_MAX; i++) + pg_bw[i] = dcb_cfg->bw_pct[HINIC_DCB_CFG_TX][i]; + } + + if (test_bit(HINIC_DCB_ENABLE, &nic_dev->flags)) + hinic_dcb_dump_configuration(nic_dev, up_tc, up_pgid, + up_bw, pg_bw, up_strict, + pg_bw); + + err = hinic_dcb_set_ets(nic_dev->hwdev, up_tc, pg_bw, up_pgid, + up_bw, up_strict); + if (err) { + hinic_err(nic_dev, drv, "Failed to set ets with mode: %d\n", + nic_dev->dcbx_cap); + return err; + } + + hinic_info(nic_dev, drv, "Set ets to hw done with mode: %d\n", + nic_dev->dcbx_cap); + + return 0; +} + +u8 hinic_dcbnl_set_ets_tool(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u8 state = DCB_HW_CFG_CHG; + int err; + + nic_dev->dcb_changes |= hinic_sync_dcb_cfg(nic_dev); + if (!nic_dev->dcb_changes) + return DCB_HW_CFG_CHG; + + err = hinic_stop_port_traffic_flow(nic_dev); + if (err) + return DCB_HW_CFG_ERR; + /* wait all traffic flow stopped */ + if (netdev->reg_state == NETREG_REGISTERED) + msleep(HINIC_WAIT_PORT_IO_STOP); + + if (nic_dev->dcb_changes & DCB_CFG_CHG_UP_COS) { + err = __set_hw_cos_up_map(nic_dev); + if (err) { + hinic_info(nic_dev, drv, + "Set cos_up map to hardware failed\n"); + state = DCB_HW_CFG_ERR; + goto out; + } + + nic_dev->dcb_changes &= (~DCB_CFG_CHG_UP_COS); + } + + if (nic_dev->dcb_changes & (DCB_CFG_CHG_PG_TX | DCB_CFG_CHG_PG_RX)) { + err = __set_hw_ets(nic_dev); + if (err) { + state = DCB_HW_CFG_ERR; + goto out; + } + + nic_dev->dcb_changes &= + (~(DCB_CFG_CHG_PG_TX | DCB_CFG_CHG_PG_RX)); + } + +out: + hinic_start_port_traffic_flow(nic_dev); + + return state; +} + +static int hinic_dcbnl_set_df_ieee_cfg(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct ieee_ets *ets_default = &nic_dev->hinic_ieee_ets_default; + struct ieee_pfc *my_pfc = &nic_dev->hinic_ieee_pfc; + struct ieee_ets *my_ets = &nic_dev->hinic_ieee_ets; + struct ieee_pfc pfc = {0}; + int err1 = 0; + int err2 = 0; + u8 flag = 0; + + if (!(nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return 0; + + if (memcmp(my_ets, ets_default, sizeof(struct ieee_ets))) + flag |= (u8)BIT(0); + + if (my_pfc->pfc_en) + flag |= (u8)BIT(1); + if (!flag) + return 0; + + err1 = hinic_stop_port_traffic_flow(nic_dev); + if (err1) + return err1; + if (netdev->reg_state == NETREG_REGISTERED) + msleep(HINIC_WAIT_PORT_IO_STOP); + + if (flag & BIT(0)) { + memcpy(my_ets, ets_default, sizeof(struct ieee_ets)); + err1 = __set_hw_ets(nic_dev); + } + if (flag & BIT(1)) { + my_pfc->pfc_en = 0; + err2 = hinic_dcb_set_pfc(nic_dev->hwdev, false, pfc.pfc_en); + if (err2) + nicif_err(nic_dev, drv, netdev, "Failed to set pfc\n"); + } + + hinic_start_port_traffic_flow(nic_dev); + + return (err1 || err2) ? -EINVAL : 0; +} + +u8 hinic_dcbnl_set_pfc_tool(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg; + u8 state = DCB_HW_CFG_CHG; + int err; + + nic_dev->dcb_changes |= hinic_sync_dcb_cfg(nic_dev); + if (!nic_dev->dcb_changes) + return DCB_HW_CFG_CHG; + + if (nic_dev->dcb_changes & DCB_CFG_CHG_PFC) { + u8 pfc_map = 0; + + hinic_dcb_get_pfc_map(nic_dev, dcb_cfg, &pfc_map); + err = hinic_dcb_set_pfc(nic_dev->hwdev, dcb_cfg->pfc_state, + pfc_map); + if (err) { + hinic_info(nic_dev, drv, "Failed to %s PFC\n", + dcb_cfg->pfc_state ? "enable" : "disable"); + state = DCB_HW_CFG_ERR; + goto out; + } + + if (dcb_cfg->pfc_state) + hinic_info(nic_dev, drv, "Set PFC: 0x%x to hw done\n", + pfc_map); + else + hinic_info(nic_dev, drv, "Disable PFC, enable tx/rx pause\n"); + + nic_dev->dcb_changes &= (~DCB_CFG_CHG_PFC); + } +out: + + return state; +} + +u8 hinic_dcbnl_set_all(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg; + u8 state = DCB_HW_CFG_CHG; + int err; + + if (!(nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return DCB_HW_CFG_ERR; + + nic_dev->dcb_changes |= hinic_sync_dcb_cfg(nic_dev); + if (!nic_dev->dcb_changes) + return DCB_HW_CFG_NO_CHG; + + err = hinic_stop_port_traffic_flow(nic_dev); + if (err) + return DCB_HW_CFG_ERR; + /* wait all traffic flow stopped */ + if (netdev->reg_state == NETREG_REGISTERED) + msleep(HINIC_WAIT_PORT_IO_STOP); + + if (nic_dev->dcb_changes & DCB_CFG_CHG_UP_COS) { + err = __set_hw_cos_up_map(nic_dev); + if (err) { + hinic_info(nic_dev, drv, + "Set cos_up map to hardware failed\n"); + state = DCB_HW_CFG_ERR; + goto out; + } + + nic_dev->dcb_changes &= (~DCB_CFG_CHG_UP_COS); + } + + if (nic_dev->dcb_changes & (DCB_CFG_CHG_PG_TX | DCB_CFG_CHG_PG_RX)) { + err = __set_hw_ets(nic_dev); + if (err) { + state = DCB_HW_CFG_ERR; + goto out; + } + + nic_dev->dcb_changes &= + (~(DCB_CFG_CHG_PG_TX | DCB_CFG_CHG_PG_RX)); + } + + if (nic_dev->dcb_changes & DCB_CFG_CHG_PFC) { + u8 pfc_map = 0; + + hinic_dcb_get_pfc_map(nic_dev, dcb_cfg, &pfc_map); + err = hinic_dcb_set_pfc(nic_dev->hwdev, dcb_cfg->pfc_state, + pfc_map); + if (err) { + hinic_info(nic_dev, drv, "Failed to %s PFC\n", + dcb_cfg->pfc_state ? "enable" : "disable"); + state = DCB_HW_CFG_ERR; + goto out; + } + + if (dcb_cfg->pfc_state) + hinic_info(nic_dev, drv, "Set PFC: 0x%x to hw done\n", + pfc_map); + else + hinic_info(nic_dev, drv, "Disable PFC, enable tx/rx pause\n"); + + nic_dev->dcb_changes &= (~DCB_CFG_CHG_PFC); + } + +out: + hinic_start_port_traffic_flow(nic_dev); + + return state; +} + +static int hinic_dcbnl_ieee_get_ets(struct net_device *netdev, + struct ieee_ets *ets) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct ieee_ets *my_ets = &nic_dev->hinic_ieee_ets; + + ets->ets_cap = my_ets->ets_cap; + memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw)); + memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw)); + memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc)); + memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa)); + + return 0; +} + +static int hinic_dcbnl_ieee_set_ets(struct net_device *netdev, + struct ieee_ets *ets) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg; + struct ieee_ets *my_ets = &nic_dev->hinic_ieee_ets; + struct ieee_ets back_ets; + int err, i; + u8 max_tc = 0; + u16 total_bw = 0; + + if (!(nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + if (!memcmp(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw)) && + !memcmp(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw)) && + !memcmp(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc)) && + !memcmp(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa))) + return 0; + + for (i = 0; i < HINIC_DCB_TC_MAX; i++) + total_bw += ets->tc_tx_bw[i]; + if (!total_bw) + return -EINVAL; + + for (i = 0; i < dcb_cfg->pg_tcs; i++) { + if (ets->prio_tc[i] > max_tc) + max_tc = ets->prio_tc[i]; + } + if (max_tc) + max_tc++; + + if (max_tc > dcb_cfg->pg_tcs) + return -EINVAL; + + max_tc = max_tc ? dcb_cfg->pg_tcs : 0; + memcpy(&back_ets, my_ets, sizeof(struct ieee_ets)); + memcpy(my_ets->tc_tx_bw, ets->tc_tx_bw, sizeof(ets->tc_tx_bw)); + memcpy(my_ets->tc_rx_bw, ets->tc_rx_bw, sizeof(ets->tc_rx_bw)); + memcpy(my_ets->prio_tc, ets->prio_tc, sizeof(ets->prio_tc)); + memcpy(my_ets->tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa)); + + if (max_tc != netdev_get_num_tc(netdev)) { + err = hinic_setup_tc(netdev, max_tc); + if (err) { + nicif_err(nic_dev, drv, netdev, + "Failed to setup tc with max_tc: %d, err: %d\n", + max_tc, err); + memcpy(my_ets, &back_ets, sizeof(struct ieee_ets)); + return err; + } + } + + err = hinic_stop_port_traffic_flow(nic_dev); + if (err) + return err; + if (netdev->reg_state == NETREG_REGISTERED) + msleep(HINIC_WAIT_PORT_IO_STOP); + + err = __set_hw_ets(nic_dev); + + hinic_start_port_traffic_flow(nic_dev); + + return err; +} + +static int hinic_dcbnl_ieee_get_pfc(struct net_device *netdev, + struct ieee_pfc *pfc) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct ieee_pfc *my_pfc = &nic_dev->hinic_ieee_pfc; + + pfc->pfc_en = my_pfc->pfc_en; + pfc->pfc_cap = my_pfc->pfc_cap; + + return 0; +} + +static int hinic_dcbnl_ieee_set_pfc(struct net_device *netdev, + struct ieee_pfc *pfc) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg; + struct ieee_pfc *my_pfc = &nic_dev->hinic_ieee_pfc; + struct ieee_ets *my_ets = &nic_dev->hinic_ieee_ets; + int err, i; + u8 pfc_map, max_tc; + u8 outof_range_pfc = 0; + bool pfc_en; + + if (!(nic_dev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + if (my_pfc->pfc_en == pfc->pfc_en) + return 0; + + pfc_map = pfc->pfc_en & nic_dev->up_valid_bitmap; + outof_range_pfc = pfc->pfc_en & (~nic_dev->up_valid_bitmap); + if (outof_range_pfc) + nicif_info(nic_dev, drv, netdev, + "pfc setting out of range, 0x%x will be ignored\n", + outof_range_pfc); + + err = hinic_stop_port_traffic_flow(nic_dev); + if (err) + return err; + if (netdev->reg_state == NETREG_REGISTERED) + msleep(HINIC_WAIT_PORT_IO_STOP); + + pfc_en = pfc_map ? true : false; + max_tc = 0; + for (i = 0; i < dcb_cfg->pg_tcs; i++) { + if (my_ets->prio_tc[i] > max_tc) + max_tc = my_ets->prio_tc[i]; + } + pfc_en = max_tc ? pfc_en : false; + + err = hinic_dcb_set_pfc(nic_dev->hwdev, pfc_en, pfc_map); + if (err) { + hinic_info(nic_dev, drv, + "Failed to set pfc to hw with pfc_map: 0x%x err: %d\n", + pfc_map, err); + hinic_start_port_traffic_flow(nic_dev); + return err; + } + + hinic_start_port_traffic_flow(nic_dev); + my_pfc->pfc_en = pfc->pfc_en; + hinic_info(nic_dev, drv, + "Set pfc successfully with pfc_map: 0x%x, pfc_en: %d\n", + pfc_map, pfc_en); + + return 0; +} + +static int hinic_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic_dcb_config *dcb_cfg = &nic_dev->dcb_cfg; + + if (!test_bit(HINIC_DCB_ENABLE, &nic_dev->flags)) + return -EINVAL; + + switch (tcid) { + case DCB_NUMTCS_ATTR_PG: + *num = dcb_cfg->pg_tcs; + break; + case DCB_NUMTCS_ATTR_PFC: + *num = dcb_cfg->pfc_tcs; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int hinic_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) +{ + return -EINVAL; +} + +static u8 hinic_dcbnl_getpfcstate(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + return (u8)nic_dev->dcb_cfg.pfc_state; +} + +static void hinic_dcbnl_setpfcstate(struct net_device *netdev, u8 state) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + nic_dev->tmp_dcb_cfg.pfc_state = !!state; +} + +static u8 hinic_dcbnl_getdcbx(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + return nic_dev->dcbx_cap; +} + +static u8 hinic_dcbnl_setdcbx(struct net_device *netdev, u8 mode) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + int err; + + if (((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) || + ((mode & DCB_CAP_DCBX_LLD_MANAGED) && + (!(mode & DCB_CAP_DCBX_HOST)))) { + nicif_info(nic_dev, drv, netdev, + "Set dcbx failed with invalid mode: %d\n", mode); + return 1; + } + + if (nic_dev->dcbx_cap == mode) + return 0; + nic_dev->dcbx_cap = mode; + + if (mode & DCB_CAP_DCBX_VER_CEE) { + u8 mask = DCB_CFG_CHG_PFC | DCB_CFG_CHG_PG_TX | + DCB_CFG_CHG_PG_RX; + nic_dev->dcb_changes |= mask; + hinic_dcbnl_set_all(netdev); + } else if (mode & DCB_CAP_DCBX_VER_IEEE) { + if (netdev_get_num_tc(netdev)) { + err = hinic_setup_tc(netdev, 0); + if (err) { + nicif_err(nic_dev, drv, netdev, + "Failed to setup tc with mode: %d\n", + mode); + return 1; + } + } + + hinic_dcbnl_set_df_ieee_cfg(netdev); + hinic_force_port_relink(nic_dev->hwdev); + } else { + err = hinic_setup_tc(netdev, 0); + if (err) { + nicif_err(nic_dev, drv, netdev, + "Failed to setup tc with mode: %d\n", mode); + return 1; + } + } + nicif_info(nic_dev, drv, netdev, "Change dcbx mode to 0x%x\n", mode); + + return 0; +} + +const struct dcbnl_rtnl_ops hinic_dcbnl_ops = { + /* IEEE 802.1Qaz std */ + .ieee_getets = hinic_dcbnl_ieee_get_ets, + .ieee_setets = hinic_dcbnl_ieee_set_ets, + .ieee_getpfc = hinic_dcbnl_ieee_get_pfc, + .ieee_setpfc = hinic_dcbnl_ieee_set_pfc, + + /* CEE std */ + .getstate = hinic_dcbnl_get_state, + .setstate = hinic_dcbnl_set_state, + .getpermhwaddr = hinic_dcbnl_get_perm_hw_addr, + .setpgtccfgtx = hinic_dcbnl_set_pg_tc_cfg_tx, + .setpgbwgcfgtx = hinic_dcbnl_set_pg_bwg_cfg_tx, + .setpgtccfgrx = hinic_dcbnl_set_pg_tc_cfg_rx, + .setpgbwgcfgrx = hinic_dcbnl_set_pg_bwg_cfg_rx, + .getpgtccfgtx = hinic_dcbnl_get_pg_tc_cfg_tx, + .getpgbwgcfgtx = hinic_dcbnl_get_pg_bwg_cfg_tx, + .getpgtccfgrx = hinic_dcbnl_get_pg_tc_cfg_rx, + .getpgbwgcfgrx = hinic_dcbnl_get_pg_bwg_cfg_rx, + .setpfccfg = hinic_dcbnl_set_pfc_cfg, + .getpfccfg = hinic_dcbnl_get_pfc_cfg, + .setall = hinic_dcbnl_set_all, + .getcap = hinic_dcbnl_getcap, + .getnumtcs = hinic_dcbnl_getnumtcs, + .setnumtcs = hinic_dcbnl_setnumtcs, + .getpfcstate = hinic_dcbnl_getpfcstate, + .setpfcstate = hinic_dcbnl_setpfcstate, + + /* DCBX configuration */ + .getdcbx = hinic_dcbnl_getdcbx, + .setdcbx = hinic_dcbnl_setdcbx, +}; + +int hinic_dcb_reset_hw_config(struct hinic_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + u8 state; + + hinic_dcb_config_init(nic_dev, &nic_dev->tmp_dcb_cfg); + state = hinic_dcbnl_set_all(netdev); + if (state == DCB_HW_CFG_ERR) + return -EFAULT; + + if (state == DCB_HW_CFG_CHG) + hinic_info(nic_dev, drv, + "Reset hardware DCB configuration done\n"); + + return 0; +} + +void hinic_configure_dcb(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + int err; + + if (test_bit(HINIC_DCB_ENABLE, &nic_dev->flags)) { + memcpy(&nic_dev->tmp_dcb_cfg, &nic_dev->save_dcb_cfg, + sizeof(nic_dev->tmp_dcb_cfg)); + hinic_dcbnl_set_all(netdev); + } else { + memcpy(&nic_dev->save_dcb_cfg, &nic_dev->tmp_dcb_cfg, + sizeof(nic_dev->save_dcb_cfg)); + err = hinic_dcb_reset_hw_config(nic_dev); + if (err) + nicif_warn(nic_dev, drv, netdev, + "Failed to reset hw dcb configuration\n"); + } +} + +static bool __is_cos_up_map_change(struct hinic_nic_dev *nic_dev, u8 *cos_up) +{ + u8 cos, up; + + for (cos = 0; cos < nic_dev->max_cos; cos++) { + up = cos_up[cos]; + if (BIT(up) != (nic_dev->up_valid_bitmap & BIT(up))) + return true; + } + + return false; +} + +int __set_cos_up_map(struct hinic_nic_dev *nic_dev, u8 *cos_up) +{ + struct net_device *netdev; + u8 state; + int err = 0; + + if (!nic_dev || !cos_up) + return -EINVAL; + + netdev = nic_dev->netdev; + + if (test_and_set_bit(HINIC_DCB_UP_COS_SETTING, &nic_dev->dcb_flags)) { + nicif_err(nic_dev, drv, netdev, + "Cos_up map setting in inprocess, please try again later\n"); + return -EFAULT; + } + + nicif_info(nic_dev, drv, netdev, "Set cos2up: %d%d%d%d%d%d%d%d\n", + cos_up[0], cos_up[1], cos_up[2], cos_up[3], + cos_up[4], cos_up[5], cos_up[6], cos_up[7]); + + if (!__is_cos_up_map_change(nic_dev, cos_up)) { + nicif_err(nic_dev, drv, netdev, + "Same mapping, don't need to change anything\n"); + err = 0; + goto out; + } + + err = hinic_set_up_cos_map(nic_dev, nic_dev->max_cos, cos_up); + if (err) { + err = -EFAULT; + goto out; + } + + nic_dev->dcb_changes = DCB_CFG_CHG_PG_TX | DCB_CFG_CHG_PG_RX | + DCB_CFG_CHG_PFC | DCB_CFG_CHG_UP_COS; + + if (test_bit(HINIC_DCB_ENABLE, &nic_dev->flags)) { + /* Change map in kernel */ + hinic_set_prio_tc_map(nic_dev); + + state = hinic_dcbnl_set_all(netdev); + if (state == DCB_HW_CFG_ERR) { + nicif_err(nic_dev, drv, netdev, + "Reconfig dcb to hw failed\n"); + err = -EFAULT; + } + } + +out: + clear_bit(HINIC_DCB_UP_COS_SETTING, &nic_dev->dcb_flags); + + return err; +} + +int hinic_get_num_cos(struct hinic_nic_dev *nic_dev, u8 *num_cos) +{ + if (!nic_dev || !num_cos) + return -EINVAL; + + *num_cos = nic_dev->max_cos; + + return 0; +} + +int hinic_get_cos_up_map(struct hinic_nic_dev *nic_dev, u8 *num_cos, + u8 *cos_up) +{ + u8 up, cos; + + if (!nic_dev || !cos_up) + return -EINVAL; + + for (cos = 0; cos < HINIC_DCB_COS_MAX; cos++) { + for (up = 0; up < HINIC_DCB_UP_MAX; up++) { + if (!(nic_dev->up_valid_bitmap & BIT(up))) + continue; + + if (nic_dev->up_cos[up] == cos || + nic_dev->up_cos[up] == nic_dev->default_cos_id) + cos_up[cos] = up; + } + } + + *num_cos = nic_dev->max_cos; + + return 0; +} + +static int __stop_port_flow(void *uld_array[], u32 num_dev) +{ + struct hinic_nic_dev *tmp_dev; + u32 i, idx; + int err; + + for (idx = 0; idx < num_dev; idx++) { + tmp_dev = (struct hinic_nic_dev *)uld_array[idx]; + err = hinic_stop_port_traffic_flow(tmp_dev); + if (err) { + nicif_err(tmp_dev, drv, tmp_dev->netdev, + "Stop port traffic flow failed\n"); + goto stop_port_err; + } + } + + /* wait all traffic flow stopped */ + msleep(HINIC_WAIT_PORT_IO_STOP); + + return 0; + +stop_port_err: + for (i = 0; i < idx; i++) { + tmp_dev = (struct hinic_nic_dev *)uld_array[i]; + hinic_start_port_traffic_flow(tmp_dev); + } + + return err; +} + +static void __start_port_flow(void *uld_array[], u32 num_dev) +{ + struct hinic_nic_dev *tmp_dev; + u32 idx; + + for (idx = 0; idx < num_dev; idx++) { + tmp_dev = (struct hinic_nic_dev *)uld_array[idx]; + hinic_start_port_traffic_flow(tmp_dev); + } +} + +/* for hinicadm tool, need to chang all port of the chip */ +int hinic_set_cos_up_map(struct hinic_nic_dev *nic_dev, u8 *cos_up) +{ + void *uld_array[HINIC_MAX_PF_NUM]; + struct hinic_nic_dev *tmp_dev; + u8 num_cos, old_cos_up[HINIC_DCB_COS_MAX] = {0}; + u32 i, idx, num_dev = 0; + int err, rollback_err; + + /* Save old map, in case of set failed */ + err = hinic_get_cos_up_map(nic_dev, &num_cos, old_cos_up); + if (err || !num_cos) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Get old cos_up map failed\n"); + return -EFAULT; + } + + if (!memcmp(cos_up, old_cos_up, sizeof(u8) * num_cos)) { + nicif_info(nic_dev, drv, nic_dev->netdev, + "Same cos2up map, don't need to change anything\n"); + return 0; + } + + /* Get all pf of this chip */ + err = hinic_get_pf_uld_array(nic_dev->pdev, &num_dev, uld_array); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Get all pf private handle failed\n"); + return -EFAULT; + } + + err = __stop_port_flow(uld_array, num_dev); + if (err) + return -EFAULT; + + for (idx = 0; idx < num_dev; idx++) { + tmp_dev = (struct hinic_nic_dev *)uld_array[idx]; + err = __set_cos_up_map(tmp_dev, cos_up); + if (err) { + nicif_err(tmp_dev, drv, tmp_dev->netdev, + "Set cos_up map to hw failed\n"); + goto set_err; + } + } + + __start_port_flow(uld_array, num_dev); + + hinic_set_chip_cos_up_map(nic_dev->pdev, cos_up); + + return 0; + +set_err: + /* undo all settings */ + for (i = 0; i < idx; i++) { + tmp_dev = (struct hinic_nic_dev *)uld_array[i]; + rollback_err = __set_cos_up_map(tmp_dev, old_cos_up); + if (rollback_err) + nicif_err(tmp_dev, drv, tmp_dev->netdev, + "Undo cos_up map to hw failed\n"); + } + + __start_port_flow(uld_array, num_dev); + + return err; +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dcb.h b/drivers/net/ethernet/huawei/hinic/hinic_dcb.h new file mode 100644 index 0000000000000000000000000000000000000000..89074a73fe6ac88f007160448bcfb4cac3f4ddef --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_dcb.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_DCB_H_ +#define HINIC_DCB_H_ + +#define HINIC_DCB_CFG_TX 0 +#define HINIC_DCB_CFG_RX 1 + +/* IEEE8021QAZ Transmission selection algorithm identifiers */ +#define IEEE8021Q_TSA_STRICT 0x0 +#define IEEE8021Q_TSA_CBSHAPER 0x1 +#define IEEE8021Q_TSA_ETS 0x2 +#define IEEE8021Q_TSA_VENDOR 0xFF + +enum HINIC_DCB_FLAGS { + HINIC_DCB_UP_COS_SETTING, + HINIC_DCB_TRAFFIC_STOPPED, +}; + +extern const struct dcbnl_rtnl_ops hinic_dcbnl_ops; + +u8 hinic_dcb_get_tc(struct hinic_dcb_config *dcb_cfg, int dir, u8 up); + +int hinic_dcb_init(struct hinic_nic_dev *nic_dev); + +int hinic_dcb_reset_hw_config(struct hinic_nic_dev *nic_dev); + +int hinic_setup_tc(struct net_device *netdev, u8 tc); + +void hinic_configure_dcb(struct net_device *netdev); + +int hinic_set_cos_up_map(struct hinic_nic_dev *nic_dev, u8 *cos_up); + +int hinic_get_num_cos(struct hinic_nic_dev *nic_dev, u8 *num_cos); + +int hinic_get_cos_up_map(struct hinic_nic_dev *nic_dev, + u8 *num_cos, u8 *cos_up); +u8 hinic_setup_dcb_tool(struct net_device *netdev, u8 *dcb_en, bool wr_flag); +void hinic_dcbnl_set_pfc_en_tool(struct net_device *netdev, + u8 *value, bool flag); +void hinic_dcbnl_set_pfc_cfg_tool(struct net_device *netdev, u8 setting); +void hinic_dcbnl_get_pfc_cfg_tool(struct net_device *netdev, u8 *setting); +u8 hinic_dcbnl_set_pfc_tool(struct net_device *netdev); +void hinic_dcbnl_get_tc_num_tool(struct net_device *netdev, u8 *tc_num); +void hinic_dcbnl_set_ets_tc_tool(struct net_device *netdev, u8 tc[], bool flag); +void hinic_dcbnl_set_ets_pecent_tool(struct net_device *netdev, + u8 percent[], bool flag); +void hinic_dcbnl_set_ets_en_tool(struct net_device *netdev, + u8 *value, bool flag); +void hinic_dcbnl_set_ets_strict_tool(struct net_device *netdev, + u8 *setting, bool flag); +u8 hinic_dcbnl_set_ets_tool(struct net_device *netdev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dfx_def.h b/drivers/net/ethernet/huawei/hinic/hinic_dfx_def.h new file mode 100644 index 0000000000000000000000000000000000000000..9a89eed2ddfe5a5d2e46177c82597bd13b418c38 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_dfx_def.h @@ -0,0 +1,150 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef __HINIC_DFX_DEF_H__ +#define __HINIC_DFX_DEF_H__ + +enum module_name { + SEND_TO_NIC_DRIVER = 1, + SEND_TO_HW_DRIVER, + SEND_TO_UCODE, + SEND_TO_UP, + SEND_TO_SM, + + HINICADM_OVS_DRIVER = 6, + HINICADM_ROCE_DRIVER, + HINICADM_TOE_DRIVER, + HINICADM_IWAP_DRIVER, + HINICADM_FC_DRIVER, + HINICADM_FCOE_DRIVER, +}; + +enum driver_cmd_type { + TX_INFO = 1, + Q_NUM, + TX_WQE_INFO, + TX_MAPPING, + RX_INFO, + RX_WQE_INFO, + RX_CQE_INFO, + UPRINT_FUNC_EN, + UPRINT_FUNC_RESET, + UPRINT_SET_PATH, + UPRINT_GET_STATISTICS, + FUNC_TYPE, + GET_FUNC_IDX, + GET_INTER_NUM, + CLOSE_TX_STREAM, + GET_DRV_VERSION, + CLEAR_FUNC_STASTIC, + GET_HW_STATS, + CLEAR_HW_STATS, + GET_SELF_TEST_RES, + GET_CHIP_FAULT_STATS, + GET_NUM_COS, + SET_COS_UP_MAP, + GET_COS_UP_MAP, + GET_CHIP_ID, + GET_SINGLE_CARD_INFO, + GET_FIRMWARE_ACTIVE_STATUS, + ROCE_DFX_FUNC, + GET_DEVICE_ID, + GET_PF_DEV_INFO, + CMD_FREE_MEM, + GET_LOOPBACK_MODE = 32, + SET_LOOPBACK_MODE, + SET_LINK_MODE, + SET_PF_BW_LIMIT, + GET_PF_BW_LIMIT, + ROCE_CMD, + GET_POLL_WEIGHT, + SET_POLL_WEIGHT, + GET_HOMOLOGUE, + SET_HOMOLOGUE, + GET_SSET_COUNT, + GET_SSET_ITEMS, + IS_DRV_IN_VM, + LRO_ADPT_MGMT, + SET_INTER_COAL_PARAM, + GET_INTER_COAL_PARAM, + GET_CHIP_INFO, + GET_NIC_STATS_LEN, + GET_NIC_STATS_STRING, + GET_NIC_STATS_INFO, + GET_PF_ID, + SET_DCB_CFG, + SET_PFC_PRIORITY, + GET_PFC_INFO, + SET_PFC_CONTROL, + SET_ETS, + GET_ETS_INFO, + GET_SUPPORT_UP, + GET_SUPPORT_TC, + + RSS_CFG = 0x40, + RSS_INDIR, + PORT_ID, + + GET_WIN_STAT = 0x60, + WIN_CSR_READ = 0x61, + WIN_CSR_WRITE = 0x62, + WIN_API_CMD_RD = 0x63, + + GET_NICTOOL_CAP = 0x74, + + VM_COMPAT_TEST = 0xFF +}; + +enum hinic_nic_link_mode { + HINIC_LINK_MODE_AUTO = 0, + HINIC_LINK_MODE_UP, + HINIC_LINK_MODE_DOWN, + HINIC_LINK_MODE_MAX +}; + +enum api_chain_cmd_type { + API_CSR_READ, + API_CSR_WRITE, + API_CSR_READ_8B, + API_CSR_WRITE_8B, +}; + +enum sm_cmd_type { + SM_CTR_RD32 = 1, + SM_CTR_RD64_PAIR, + SM_CTR_RD64 +}; + +enum hinic_show_set { + HINIC_SHOW_SSET_IO_STATS = 1, +}; + +#define HINIC_SHOW_ITEM_LEN 32 +struct hinic_show_item { + char name[HINIC_SHOW_ITEM_LEN]; + u8 hexadecimal; /* 0: decimal , 1: Hexadecimal */ + u8 rsvd[7]; + u64 value; +}; + +#define UP_UPDATEFW_TIME_OUT_VAL 20000U +#define UCODE_COMP_TIME_OUT_VAL 0xFF00000 +#define NIC_TOOL_MAGIC 'x' + +enum hinic_nictool_drv_cap { + NICTOOL_SUPPORT_API_CSR = 0x1, +}; + +#endif /* __HINIC_DFX_DEF_H__ */ diff --git a/drivers/net/ethernet/huawei/hinic/hinic_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_eqs.c new file mode 100644 index 0000000000000000000000000000000000000000..6cf0b9ff0aeff5365251c772b70ceb43108ce07a --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_eqs.c @@ -0,0 +1,1462 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hwdev.h" +#include "hinic_hwif.h" +#include "hinic_csr.h" +#include "hinic_eqs.h" + +#define HINIC_EQS_WQ_NAME "hinic_eqs" + +#define AEQ_CTRL_0_INTR_IDX_SHIFT 0 +#define AEQ_CTRL_0_FUNC_BUSY_SHIFT 10 +#define AEQ_CTRL_0_DMA_ATTR_SHIFT 12 +#define AEQ_CTRL_0_PCI_INTF_IDX_SHIFT 20 +#define AEQ_CTRL_0_QPS_NUM_SHIFT 22 +#define AEQ_CTRL_0_INTR_MODE_SHIFT 31 + +#define AEQ_CTRL_0_INTR_IDX_MASK 0x3FFU +#define AEQ_CTRL_0_FUNC_BUSY_MASK 0x1U +#define AEQ_CTRL_0_DMA_ATTR_MASK 0x3FU +#define AEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3U +#define AEQ_CTRL_0_QPS_NUM_MASK 0xFFU +#define AEQ_CTRL_0_INTR_MODE_MASK 0x1U + +#define AEQ_CTRL_0_GET(val, member) \ + (((val) >> AEQ_CTRL_0_##member##_SHIFT) & \ + AEQ_CTRL_0_##member##_MASK) + +#define AEQ_CTRL_0_SET(val, member) \ + (((val) & AEQ_CTRL_0_##member##_MASK) << \ + AEQ_CTRL_0_##member##_SHIFT) + +#define AEQ_CTRL_0_CLEAR(val, member) \ + ((val) & (~(AEQ_CTRL_0_##member##_MASK \ + << AEQ_CTRL_0_##member##_SHIFT))) + +#define AEQ_CTRL_1_LEN_SHIFT 0 +#define AEQ_CTRL_1_FUNC_OWN_SHIFT 21 +#define AEQ_CTRL_1_ELEM_SIZE_SHIFT 24 +#define AEQ_CTRL_1_PAGE_SIZE_SHIFT 28 + +#define AEQ_CTRL_1_LEN_MASK 0x1FFFFFU +#define AEQ_CTRL_1_FUNC_OWN_MASK 0x1U +#define AEQ_CTRL_1_ELEM_SIZE_MASK 0x3U +#define AEQ_CTRL_1_PAGE_SIZE_MASK 0xFU + +#define AEQ_CTRL_1_GET(val, member) \ + (((val) >> AEQ_CTRL_1_##member##_SHIFT) & \ + AEQ_CTRL_1_##member##_MASK) + +#define AEQ_CTRL_1_SET(val, member) \ + (((val) & AEQ_CTRL_1_##member##_MASK) << \ + AEQ_CTRL_1_##member##_SHIFT) + +#define AEQ_CTRL_1_CLEAR(val, member) \ + ((val) & (~(AEQ_CTRL_1_##member##_MASK \ + << AEQ_CTRL_1_##member##_SHIFT))) + +#define HINIC_EQ_PROD_IDX_MASK 0xFFFFF +#define HINIC_TASK_PROCESS_EQE_LIMIT 1024 +#define HINIC_EQ_UPDATE_CI_STEP 64 + +static uint g_aeq_len = HINIC_DEFAULT_AEQ_LEN; +module_param(g_aeq_len, uint, 0444); +MODULE_PARM_DESC(g_aeq_len, + "aeq depth, valid range is " __stringify(HINIC_MIN_AEQ_LEN) + " - " __stringify(HINIC_MAX_AEQ_LEN)); + +static uint g_ceq_len = HINIC_DEFAULT_CEQ_LEN; +module_param(g_ceq_len, uint, 0444); +MODULE_PARM_DESC(g_ceq_len, + "ceq depth, valid range is " __stringify(HINIC_MIN_CEQ_LEN) + " - " __stringify(HINIC_MAX_CEQ_LEN)); + +static uint g_num_ceqe_in_tasklet = HINIC_TASK_PROCESS_EQE_LIMIT; +module_param(g_num_ceqe_in_tasklet, uint, 0444); +MODULE_PARM_DESC(g_num_ceqe_in_tasklet, + "The max number of ceqe can be processed in tasklet, default = 1024"); + +#define CEQ_CTRL_0_INTR_IDX_SHIFT 0 +#define CEQ_CTRL_0_DMA_ATTR_SHIFT 12 +#define CEQ_CTRL_0_LIMIT_KICK_SHIFT 20 +#define CEQ_CTRL_0_PCI_INTF_IDX_SHIFT 24 +#define CEQ_CTRL_0_INTR_MODE_SHIFT 31 + +#define CEQ_CTRL_0_INTR_IDX_MASK 0x3FFU +#define CEQ_CTRL_0_DMA_ATTR_MASK 0x3FU +#define CEQ_CTRL_0_LIMIT_KICK_MASK 0xFU +#define CEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3U +#define CEQ_CTRL_0_INTR_MODE_MASK 0x1U + +#define CEQ_CTRL_0_SET(val, member) \ + (((val) & CEQ_CTRL_0_##member##_MASK) << \ + CEQ_CTRL_0_##member##_SHIFT) + +#define CEQ_CTRL_1_LEN_SHIFT 0 +#define CEQ_CTRL_1_PAGE_SIZE_SHIFT 28 + +#define CEQ_CTRL_1_LEN_MASK 0x1FFFFFU +#define CEQ_CTRL_1_PAGE_SIZE_MASK 0xFU + +#define CEQ_CTRL_1_SET(val, member) \ + (((val) & CEQ_CTRL_1_##member##_MASK) << \ + CEQ_CTRL_1_##member##_SHIFT) + +#define EQ_ELEM_DESC_TYPE_SHIFT 0 +#define EQ_ELEM_DESC_SRC_SHIFT 7 +#define EQ_ELEM_DESC_SIZE_SHIFT 8 +#define EQ_ELEM_DESC_WRAPPED_SHIFT 31 + +#define EQ_ELEM_DESC_TYPE_MASK 0x7FU +#define EQ_ELEM_DESC_SRC_MASK 0x1U +#define EQ_ELEM_DESC_SIZE_MASK 0xFFU +#define EQ_ELEM_DESC_WRAPPED_MASK 0x1U + +#define EQ_ELEM_DESC_GET(val, member) \ + (((val) >> EQ_ELEM_DESC_##member##_SHIFT) & \ + EQ_ELEM_DESC_##member##_MASK) + +#define EQ_CONS_IDX_CONS_IDX_SHIFT 0 +#define EQ_CONS_IDX_XOR_CHKSUM_SHIFT 24 +#define EQ_CONS_IDX_INT_ARMED_SHIFT 31 + +#define EQ_CONS_IDX_CONS_IDX_MASK 0x1FFFFFU +#define EQ_CONS_IDX_XOR_CHKSUM_MASK 0xFU +#define EQ_CONS_IDX_INT_ARMED_MASK 0x1U + +#define EQ_CONS_IDX_SET(val, member) \ + (((val) & EQ_CONS_IDX_##member##_MASK) << \ + EQ_CONS_IDX_##member##_SHIFT) + +#define EQ_CONS_IDX_CLEAR(val, member) \ + ((val) & (~(EQ_CONS_IDX_##member##_MASK \ + << EQ_CONS_IDX_##member##_SHIFT))) + +#define EQ_WRAPPED(eq) ((u32)(eq)->wrapped << EQ_VALID_SHIFT) + +#define EQ_CONS_IDX(eq) ((eq)->cons_idx | \ + ((u32)(eq)->wrapped << EQ_WRAPPED_SHIFT)) + +#define EQ_CONS_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \ + HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) : \ + HINIC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id)) + +#define EQ_PROD_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \ + HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) : \ + HINIC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id)) + +#define GET_EQ_NUM_PAGES(eq, size) \ + ((u16)(ALIGN((u32)((eq)->eq_len * (eq)->elem_size), \ + (size)) / (size))) + +#define GET_EQ_NUM_ELEMS(eq, pg_size) ((pg_size) / (u32)(eq)->elem_size) + +#define GET_EQ_ELEMENT(eq, idx) \ + (((u8 *)(eq)->virt_addr[(idx) / (eq)->num_elem_in_pg]) + \ + (u32)(((idx) & ((eq)->num_elem_in_pg - 1)) * (eq)->elem_size)) + +#define GET_AEQ_ELEM(eq, idx) ((struct hinic_aeq_elem *)\ + GET_EQ_ELEMENT((eq), (idx))) + +#define GET_CEQ_ELEM(eq, idx) ((u32 *)GET_EQ_ELEMENT((eq), (idx))) + +#define GET_CURR_AEQ_ELEM(eq) GET_AEQ_ELEM((eq), (eq)->cons_idx) + +#define GET_CURR_CEQ_ELEM(eq) GET_CEQ_ELEM((eq), (eq)->cons_idx) + +#define PAGE_IN_4K(page_size) ((page_size) >> 12) +#define EQ_SET_HW_PAGE_SIZE_VAL(eq) \ + ((u32)ilog2(PAGE_IN_4K((eq)->page_size))) + +#define ELEMENT_SIZE_IN_32B(eq) (((eq)->elem_size) >> 5) +#define EQ_SET_HW_ELEM_SIZE_VAL(eq) ((u32)ilog2(ELEMENT_SIZE_IN_32B(eq))) + +#define AEQ_DMA_ATTR_DEFAULT 0 +#define CEQ_DMA_ATTR_DEFAULT 0 + +#define CEQ_LMT_KICK_DEFAULT 0 + +#define EQ_MSIX_RESEND_TIMER_CLEAR 1 + +#define EQ_WRAPPED_SHIFT 20 + +#define EQ_VALID_SHIFT 31 + +#define CEQE_TYPE_SHIFT 23 +#define CEQE_TYPE_MASK 0x7 + +#define CEQE_TYPE(type) (((type) >> CEQE_TYPE_SHIFT) & \ + CEQE_TYPE_MASK) + +#define CEQE_DATA_MASK 0x3FFFFFF +#define CEQE_DATA(data) ((data) & CEQE_DATA_MASK) + +#define EQ_MIN_PAGE_SIZE 0x1000U +#define aeq_to_aeqs(eq) \ + container_of((eq) - (eq)->q_id, struct hinic_aeqs, aeq[0]) + +#define ceq_to_ceqs(eq) \ + container_of((eq) - (eq)->q_id, struct hinic_ceqs, ceq[0]) + +static irqreturn_t aeq_interrupt(int irq, void *data); +static irqreturn_t ceq_interrupt(int irq, void *data); + +/** + * hinic_qps_num_set - set the number of queues that are actually opened, + * and instructs the migration driver to migrate specified queues + * during VF live migration. + * + * @hwdev: the pointer to hw device + * @num_qps: number of queue + */ +void hinic_qps_num_set(void *hwdev, u32 num_qps) +{ + struct hinic_hwif *hwif = ((struct hinic_hwdev *)hwdev)->hwif; + u32 addr, val, ctrl; + + addr = HINIC_CSR_AEQ_CTRL_0_ADDR(0); + val = hinic_hwif_read_reg(hwif, addr); + val = AEQ_CTRL_0_CLEAR(val, QPS_NUM); + ctrl = AEQ_CTRL_0_SET(num_qps, QPS_NUM); + val |= ctrl; + hinic_hwif_write_reg(hwif, addr, val); +} + +u32 hinic_func_busy_state_get(struct hinic_hwdev *hwdev) +{ + struct hinic_hwif *hwif = hwdev->hwif; + u32 addr, val; + + addr = HINIC_CSR_AEQ_CTRL_0_ADDR(0); + val = hinic_hwif_read_reg(hwif, addr); + return AEQ_CTRL_0_GET(val, FUNC_BUSY); +} + +void hinic_func_busy_state_set(struct hinic_hwdev *hwdev, u32 cfg) +{ + struct hinic_hwif *hwif = hwdev->hwif; + u32 addr, val, ctrl; + + addr = HINIC_CSR_AEQ_CTRL_0_ADDR(0); + val = hinic_hwif_read_reg(hwif, addr); + val = AEQ_CTRL_0_CLEAR(val, FUNC_BUSY); + ctrl = AEQ_CTRL_0_SET(cfg, FUNC_BUSY); + val |= ctrl; + hinic_hwif_write_reg(hwif, addr, val); +} + +u32 hinic_func_own_bit_get(struct hinic_hwdev *hwdev) +{ + struct hinic_hwif *hwif = hwdev->hwif; + u32 addr, val; + + addr = HINIC_CSR_AEQ_CTRL_1_ADDR(0); + val = hinic_hwif_read_reg(hwif, addr); + return AEQ_CTRL_1_GET(val, FUNC_OWN); +} + +void hinic_func_own_bit_set(struct hinic_hwdev *hwdev, u32 cfg) +{ + struct hinic_hwif *hwif = hwdev->hwif; + u32 addr, val, ctrl; + + addr = HINIC_CSR_AEQ_CTRL_1_ADDR(0); + val = hinic_hwif_read_reg(hwif, addr); + val = AEQ_CTRL_1_CLEAR(val, FUNC_OWN); + ctrl = AEQ_CTRL_1_SET(cfg, FUNC_OWN); + val |= ctrl; + hinic_hwif_write_reg(hwif, addr, val); +} + +static void ceq_tasklet(ulong eq_tasklet); + +static u8 eq_cons_idx_checksum_set(u32 val) +{ + u8 checksum = 0; + u8 idx; + + for (idx = 0; idx < 32; idx += 4) + checksum ^= ((val >> idx) & 0xF); + + return checksum & 0xF; +} + +/** + * hinic_aeq_register_hw_cb - register aeq callback for specific event + * @hwdev: pointer to hw device + * @event: event for the handler + * @hw_cb: callback function + * Return: 0 - success, negative - failure + */ +int hinic_aeq_register_hw_cb(void *hwdev, enum hinic_aeq_type event, + hinic_aeq_hwe_cb hwe_cb) +{ + struct hinic_aeqs *aeqs; + + if (!hwdev || !hwe_cb || event >= HINIC_MAX_AEQ_EVENTS) + return -EINVAL; + + aeqs = ((struct hinic_hwdev *)hwdev)->aeqs; + + aeqs->aeq_hwe_cb[event] = hwe_cb; + + set_bit(HINIC_AEQ_HW_CB_REG, &aeqs->aeq_hw_cb_state[event]); + + return 0; +} +EXPORT_SYMBOL(hinic_aeq_register_hw_cb); + +/** + * hinic_aeq_unregister_hw_cb - unregister the aeq callback for specific event + * @hwdev: pointer to hw device + * @event: event for the handler + */ +void hinic_aeq_unregister_hw_cb(void *hwdev, enum hinic_aeq_type event) +{ + struct hinic_aeqs *aeqs; + + if (!hwdev || event >= HINIC_MAX_AEQ_EVENTS) + return; + + aeqs = ((struct hinic_hwdev *)hwdev)->aeqs; + + clear_bit(HINIC_AEQ_HW_CB_REG, &aeqs->aeq_hw_cb_state[event]); + + while (test_bit(HINIC_AEQ_HW_CB_RUNNING, &aeqs->aeq_hw_cb_state[event])) + usleep_range(900, 1000); + + aeqs->aeq_hwe_cb[event] = NULL; +} +EXPORT_SYMBOL(hinic_aeq_unregister_hw_cb); + +/** + * hinic_aeq_register_sw_cb - register aeq callback for sw event + * @hwdev: pointer to hw device + * @event: soft event for the handler + * @sw_cb: callback function + * Return: 0 - success, negative - failure + */ +int hinic_aeq_register_swe_cb(void *hwdev, enum hinic_aeq_sw_type event, + hinic_aeq_swe_cb aeq_swe_cb) +{ + struct hinic_aeqs *aeqs; + + if (!hwdev || !aeq_swe_cb || event >= HINIC_MAX_AEQ_SW_EVENTS) + return -EINVAL; + + aeqs = ((struct hinic_hwdev *)hwdev)->aeqs; + + aeqs->aeq_swe_cb[event] = aeq_swe_cb; + + set_bit(HINIC_AEQ_SW_CB_REG, &aeqs->aeq_sw_cb_state[event]); + + return 0; +} +EXPORT_SYMBOL(hinic_aeq_register_swe_cb); + +/** + * hinic_aeq_unregister_sw_cb - unregister the aeq callback for sw event + * @hwdev: pointer to hw device + * @event: soft event for the handler + */ +void hinic_aeq_unregister_swe_cb(void *hwdev, enum hinic_aeq_sw_type event) +{ + struct hinic_aeqs *aeqs; + + if (!hwdev || event >= HINIC_MAX_AEQ_SW_EVENTS) + return; + + aeqs = ((struct hinic_hwdev *)hwdev)->aeqs; + + clear_bit(HINIC_AEQ_SW_CB_REG, &aeqs->aeq_sw_cb_state[event]); + + while (test_bit(HINIC_AEQ_SW_CB_RUNNING, &aeqs->aeq_sw_cb_state[event])) + usleep_range(900, 1000); + + aeqs->aeq_swe_cb[event] = NULL; +} +EXPORT_SYMBOL(hinic_aeq_unregister_swe_cb); + +/** + * hinic_ceq_register_sw_cb - register ceq callback for specific event + * @hwdev: pointer to hw device + * @event: event for the handler + * @callback: callback function + * Return: 0 - success, negative - failure + */ +int hinic_ceq_register_cb(void *hwdev, enum hinic_ceq_event event, + hinic_ceq_event_cb callback) +{ + struct hinic_ceqs *ceqs; + + if (!hwdev || event >= HINIC_MAX_CEQ_EVENTS) + return -EINVAL; + + ceqs = ((struct hinic_hwdev *)hwdev)->ceqs; + + ceqs->ceq_cb[event] = callback; + + set_bit(HINIC_CEQ_CB_REG, &ceqs->ceq_cb_state[event]); + + return 0; +} +EXPORT_SYMBOL(hinic_ceq_register_cb); + +/** + * hinic_ceq_unregister_cb - unregister ceq callback for specific event + * @hwdev: pointer to hw device + * @event: event for the handler + */ +void hinic_ceq_unregister_cb(void *hwdev, enum hinic_ceq_event event) +{ + struct hinic_ceqs *ceqs; + + if (!hwdev || event >= HINIC_MAX_CEQ_EVENTS) + return; + + ceqs = ((struct hinic_hwdev *)hwdev)->ceqs; + + clear_bit(HINIC_CEQ_CB_REG, &ceqs->ceq_cb_state[event]); + + while (test_bit(HINIC_CEQ_CB_RUNNING, &ceqs->ceq_cb_state[event])) + usleep_range(900, 1000); + + ceqs->ceq_cb[event] = NULL; +} +EXPORT_SYMBOL(hinic_ceq_unregister_cb); + +/** + * set_eq_cons_idx - write the cons idx to the hw + * @eq: The event queue to update the cons idx for + * @arm_state: arm state value + */ +static void set_eq_cons_idx(struct hinic_eq *eq, u32 arm_state) +{ + u32 eq_wrap_ci, val; + u32 addr = EQ_CONS_IDX_REG_ADDR(eq); + + eq_wrap_ci = EQ_CONS_IDX(eq); + + /* other filed is resverd, set to 0 */ + val = EQ_CONS_IDX_SET(eq_wrap_ci, CONS_IDX) | + EQ_CONS_IDX_SET(arm_state, INT_ARMED); + + val |= EQ_CONS_IDX_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM); + + hinic_hwif_write_reg(eq->hwdev->hwif, addr, val); +} + +/** + * ceq_event_handler - handle for the ceq events + * @ceqs: eqs part of the chip + * @ceqe: ceq element of the event + */ +static void ceq_event_handler(struct hinic_ceqs *ceqs, u32 ceqe) +{ + struct hinic_hwdev *hwdev = ceqs->hwdev; + enum hinic_ceq_event event = CEQE_TYPE(ceqe); + u32 ceqe_data = CEQE_DATA(ceqe); + + if (event >= HINIC_MAX_CEQ_EVENTS) { + sdk_err(hwdev->dev_hdl, "Ceq unknown event: %d, ceqe date: 0x%x\n", + event, ceqe_data); + return; + } + + set_bit(HINIC_CEQ_CB_RUNNING, &ceqs->ceq_cb_state[event]); + + if (ceqs->ceq_cb[event] && + test_bit(HINIC_CEQ_CB_REG, &ceqs->ceq_cb_state[event])) + ceqs->ceq_cb[event](hwdev, ceqe_data); + + clear_bit(HINIC_CEQ_CB_RUNNING, &ceqs->ceq_cb_state[event]); +} + +/** + * aeq_irq_handler - handler for the aeq event + * @eq: the async event queue of the event + */ +static bool aeq_irq_handler(struct hinic_eq *eq) +{ + struct hinic_aeqs *aeqs = aeq_to_aeqs(eq); + struct hinic_aeq_elem *aeqe_pos; + enum hinic_aeq_type event; + enum hinic_aeq_sw_type sw_event; + enum hinic_ucode_event_type ucode_event; + u64 aeqe_data; + u32 aeqe_desc; + u32 i, eqe_cnt = 0; + u8 size; + u8 lev; + + for (i = 0; i < HINIC_TASK_PROCESS_EQE_LIMIT; i++) { + aeqe_pos = GET_CURR_AEQ_ELEM(eq); + + /* Data in HW is in Big endian Format */ + aeqe_desc = be32_to_cpu(aeqe_pos->desc); + + /* HW updates wrapped bit, when it adds eq element event */ + if (EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped) + return false; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the cmdq wqe until we have + * verified the command has been processed and + * written back. + */ + dma_rmb(); + + event = EQ_ELEM_DESC_GET(aeqe_desc, TYPE); + if (EQ_ELEM_DESC_GET(aeqe_desc, SRC)) { + ucode_event = event; + /* SW event uses only the first 8B */ + sw_event = ucode_event >= HINIC_NIC_FATAL_ERROR_MAX ? + HINIC_STATEFULL_EVENT : + HINIC_STATELESS_EVENT; + aeqe_data = be64_to_cpu((*(u64 *)aeqe_pos->aeqe_data)); + set_bit(HINIC_AEQ_SW_CB_RUNNING, + &aeqs->aeq_sw_cb_state[sw_event]); + if (aeqs->aeq_swe_cb[sw_event] && + test_bit(HINIC_AEQ_SW_CB_REG, + &aeqs->aeq_sw_cb_state[sw_event])) { + lev = aeqs->aeq_swe_cb[sw_event](aeqs->hwdev, + ucode_event, + aeqe_data); + } + clear_bit(HINIC_AEQ_SW_CB_RUNNING, + &aeqs->aeq_sw_cb_state[sw_event]); + } else { + if (event < HINIC_MAX_AEQ_EVENTS) { + size = EQ_ELEM_DESC_GET(aeqe_desc, SIZE); + set_bit(HINIC_AEQ_HW_CB_RUNNING, + &aeqs->aeq_hw_cb_state[event]); + if (aeqs->aeq_hwe_cb[event] && + test_bit(HINIC_AEQ_HW_CB_REG, + &aeqs->aeq_hw_cb_state[event])) + aeqs->aeq_hwe_cb[event](aeqs->hwdev, + aeqe_pos->aeqe_data, size); + clear_bit(HINIC_AEQ_HW_CB_RUNNING, + &aeqs->aeq_hw_cb_state[event]); + } else { + sdk_warn(eq->hwdev->dev_hdl, + "Unknown aeq hw event %d\n", event); + } + } + + eq->cons_idx++; + + if (eq->cons_idx == eq->eq_len) { + eq->cons_idx = 0; + eq->wrapped = !eq->wrapped; + } + + if (++eqe_cnt >= HINIC_EQ_UPDATE_CI_STEP) { + eqe_cnt = 0; + set_eq_cons_idx(eq, HINIC_EQ_NOT_ARMED); + } + } + + return true; +} + +/** + * ceq_irq_handler - handler for the ceq event + * @eq: the completion event queue of the event + * Return: true - success, false - failure + */ +static bool ceq_irq_handler(struct hinic_eq *eq) +{ + struct hinic_ceqs *ceqs = ceq_to_ceqs(eq); + u32 ceqe, eqe_cnt = 0; + u32 i; + + for (i = 0; i < g_num_ceqe_in_tasklet; i++) { + ceqe = *(GET_CURR_CEQ_ELEM(eq)); + ceqe = be32_to_cpu(ceqe); + + /* HW updates wrapped bit, when it adds eq element event */ + if (EQ_ELEM_DESC_GET(ceqe, WRAPPED) == eq->wrapped) + return false; + + ceq_event_handler(ceqs, ceqe); + + eq->cons_idx++; + + if (eq->cons_idx == eq->eq_len) { + eq->cons_idx = 0; + eq->wrapped = !eq->wrapped; + } + + if (++eqe_cnt >= HINIC_EQ_UPDATE_CI_STEP) { + eqe_cnt = 0; + set_eq_cons_idx(eq, HINIC_EQ_NOT_ARMED); + } + } + + return true; +} + +static void reschedule_eq_handler(struct hinic_eq *eq) +{ + if (eq->type == HINIC_AEQ) { + struct hinic_aeqs *aeqs = aeq_to_aeqs(eq); + + queue_work(aeqs->workq, &eq->aeq_work); + } else { + tasklet_schedule(&eq->ceq_tasklet); + } +} + +int hinic_reschedule_eq(struct hinic_hwdev *hwdev, enum hinic_eq_type type, + u16 eq_id) +{ + if (type == HINIC_AEQ) { + if (eq_id >= hwdev->aeqs->num_aeqs) + return -EINVAL; + + reschedule_eq_handler(&hwdev->aeqs->aeq[eq_id]); + } else { + if (eq_id >= hwdev->ceqs->num_ceqs) + return -EINVAL; + + reschedule_eq_handler(&hwdev->ceqs->ceq[eq_id]); + } + + return 0; +} + +/** + * eq_irq_handler - handler for the eq event + * @data: the event queue of the event + * Return: true - success, false - failure + */ +static bool eq_irq_handler(void *data) +{ + struct hinic_eq *eq = (struct hinic_eq *)data; + bool uncompleted; + + if (eq->type == HINIC_AEQ) + uncompleted = aeq_irq_handler(eq); + else + uncompleted = ceq_irq_handler(eq); + + set_eq_cons_idx(eq, uncompleted ? HINIC_EQ_NOT_ARMED : HINIC_EQ_ARMED); + + return uncompleted; +} + +static struct hinic_eq *find_eq(struct hinic_hwdev *hwdev, int msix_entry_idx) +{ + struct hinic_aeqs *aeqs = hwdev->aeqs; + struct hinic_ceqs *ceqs = hwdev->ceqs; + int i; + + for (i = 0; i < aeqs->num_aeqs; i++) { + struct hinic_eq *eq = &aeqs->aeq[i]; + + if (eq->eq_irq.msix_entry_idx == msix_entry_idx) + return eq; + } + + for (i = 0; i < ceqs->num_ceqs; i++) { + struct hinic_eq *eq = &ceqs->ceq[i]; + + if (eq->eq_irq.msix_entry_idx == msix_entry_idx) + return eq; + } + + return NULL; +} + +/* for windows */ +bool hinic_eq_intr_handler(void *hwdev, int msix_entry_idx) +{ + struct hinic_eq *eq; + + eq = find_eq(hwdev, msix_entry_idx); + if (!eq) { + pr_err("Can't find eq in eq interrupt handler\n"); + return false; + } + + return eq_irq_handler(eq); +} + +/** + * eq_irq_work - eq work for the event + * @work: the work that is associated with the eq + */ +static void eq_irq_work(struct work_struct *work) +{ + struct hinic_eq *eq = container_of(work, struct hinic_eq, aeq_work); + + if (eq_irq_handler(eq)) + reschedule_eq_handler(eq); +} + +/** + * aeq_interrupt - aeq interrupt handler + * @irq: irq number + * @data: the async event queue of the event + */ +static irqreturn_t aeq_interrupt(int irq, void *data) +{ + struct hinic_eq *aeq = (struct hinic_eq *)data; + struct hinic_hwdev *hwdev = aeq->hwdev; + struct hinic_aeqs *aeqs = aeq_to_aeqs(aeq); + + /* clear resend timer cnt register */ + hinic_misx_intr_clear_resend_bit(hwdev, aeq->eq_irq.msix_entry_idx, + EQ_MSIX_RESEND_TIMER_CLEAR); + + queue_work(aeqs->workq, &aeq->aeq_work); + + return IRQ_HANDLED; +} + +/** + * ceq_tasklet - ceq tasklet for the event + * @ceq_data: data that will be used by the tasklet(ceq) + */ +static void ceq_tasklet(ulong ceq_data) +{ + struct hinic_eq *eq = (struct hinic_eq *)ceq_data; + + eq->soft_intr_jif = jiffies; + + if (eq_irq_handler(eq)) + reschedule_eq_handler(eq); +} + +/** + * ceq_interrupt - ceq interrupt handler + * @irq: irq number + * @data: the completion event queue of the event + */ +static irqreturn_t ceq_interrupt(int irq, void *data) +{ + struct hinic_eq *ceq = (struct hinic_eq *)data; + + ceq->hard_intr_jif = jiffies; + + /* clear resend timer counters */ + hinic_misx_intr_clear_resend_bit(ceq->hwdev, ceq->eq_irq.msix_entry_idx, + EQ_MSIX_RESEND_TIMER_CLEAR); + + tasklet_schedule(&ceq->ceq_tasklet); + + return IRQ_HANDLED; +} + +struct hinic_ceq_ctrl_reg { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 q_id; + u32 ctrl0; + u32 ctrl1; +}; + +static int set_ceq_ctrl_reg(struct hinic_hwdev *hwdev, u16 q_id, + u32 ctrl0, u32 ctrl1) +{ + struct hinic_ceq_ctrl_reg ceq_ctrl = {0}; + u16 in_size = sizeof(ceq_ctrl); + u16 out_size = sizeof(ceq_ctrl); + int err; + + err = hinic_global_func_id_get(hwdev, &ceq_ctrl.func_id); + if (err) + return err; + + ceq_ctrl.q_id = q_id; + ceq_ctrl.ctrl0 = ctrl0; + ceq_ctrl.ctrl1 = ctrl1; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_CEQ_CTRL_REG_WR_BY_UP, + &ceq_ctrl, in_size, + &ceq_ctrl, &out_size, 0); + if (err || !out_size || ceq_ctrl.status) { + sdk_err(hwdev->dev_hdl, "Failed to set ceq %d ctrl reg, err: %d status: 0x%x, out_size: 0x%x\n", + q_id, err, ceq_ctrl.status, out_size); + return -EFAULT; + } + + return 0; +} + +/** + * set_eq_ctrls - setting eq's ctrls registers + * @eq: the event queue for setting + * Return: 0 - success, negative - failure + */ +static int set_eq_ctrls(struct hinic_eq *eq) +{ + enum hinic_eq_type type = eq->type; + struct hinic_hwif *hwif = eq->hwdev->hwif; + struct irq_info *eq_irq = &eq->eq_irq; + u32 addr, val, ctrl0, ctrl1, page_size_val, elem_size; + u32 pci_intf_idx = HINIC_PCI_INTF_IDX(hwif); + int err; + + if (type == HINIC_AEQ) { + /* set ctrl0 */ + addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id); + + val = hinic_hwif_read_reg(hwif, addr); + + val = AEQ_CTRL_0_CLEAR(val, INTR_IDX) & + AEQ_CTRL_0_CLEAR(val, DMA_ATTR) & + AEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) & + AEQ_CTRL_0_CLEAR(val, INTR_MODE); + + if (HINIC_IS_VF(eq->hwdev)) { + val = AEQ_CTRL_0_CLEAR(val, FUNC_BUSY) & + AEQ_CTRL_1_CLEAR(val, FUNC_OWN); + } + + ctrl0 = AEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) | + AEQ_CTRL_0_SET(AEQ_DMA_ATTR_DEFAULT, DMA_ATTR) | + AEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) | + AEQ_CTRL_0_SET(HINIC_INTR_MODE_ARMED, INTR_MODE); + val |= ctrl0; + + hinic_hwif_write_reg(hwif, addr, val); + + /* set ctrl1 */ + addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id); + + page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq); + elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq); + + ctrl1 = AEQ_CTRL_1_SET(eq->eq_len, LEN) | + AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) | + AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE); + + hinic_hwif_write_reg(hwif, addr, ctrl1); + + } else { + ctrl0 = CEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) | + CEQ_CTRL_0_SET(CEQ_DMA_ATTR_DEFAULT, DMA_ATTR) | + CEQ_CTRL_0_SET(CEQ_LMT_KICK_DEFAULT, LIMIT_KICK) | + CEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) | + CEQ_CTRL_0_SET(HINIC_INTR_MODE_ARMED, INTR_MODE); + + page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq); + + ctrl1 = CEQ_CTRL_1_SET(eq->eq_len, LEN) | + CEQ_CTRL_1_SET(page_size_val, PAGE_SIZE); + + /* set ceq ctrl reg through mgmt cpu */ + err = set_ceq_ctrl_reg(eq->hwdev, eq->q_id, ctrl0, ctrl1); + if (err) + return err; + } + + return 0; +} + +/** + * ceq_elements_init - Initialize all the elements in the ceq + * @eq: the event queue + * @init_val: value to init with it the elements + */ +static void ceq_elements_init(struct hinic_eq *eq, u32 init_val) +{ + u32 i; + u32 *ceqe; + + for (i = 0; i < eq->eq_len; i++) { + ceqe = GET_CEQ_ELEM(eq, i); + *(ceqe) = cpu_to_be32(init_val); + } + + wmb(); /* Write the init values */ +} + +/** + * aeq_elements_init - initialize all the elements in the aeq + * @eq: the event queue + * @init_val: value to init with it the elements + */ +static void aeq_elements_init(struct hinic_eq *eq, u32 init_val) +{ + struct hinic_aeq_elem *aeqe; + u32 i; + + for (i = 0; i < eq->eq_len; i++) { + aeqe = GET_AEQ_ELEM(eq, i); + aeqe->desc = cpu_to_be32(init_val); + } + + wmb(); /* Write the init values */ +} + +/** + * alloc_eq_pages - allocate the pages for the queue + * @eq: the event queue + */ +static int alloc_eq_pages(struct hinic_eq *eq) +{ + struct hinic_hwif *hwif = eq->hwdev->hwif; + u32 init_val; + u64 dma_addr_size, virt_addr_size; + u16 pg_num, i; + u32 reg; + int err; + u8 flag = 0; + + dma_addr_size = eq->num_pages * sizeof(*eq->dma_addr); + virt_addr_size = eq->num_pages * sizeof(*eq->virt_addr); + + eq->dma_addr = kzalloc(dma_addr_size, GFP_KERNEL); + if (!eq->dma_addr) + return -ENOMEM; + + eq->virt_addr = kzalloc(virt_addr_size, GFP_KERNEL); + if (!eq->virt_addr) { + err = -ENOMEM; + goto virt_addr_alloc_err; + } + + eq->dma_addr_for_free = kzalloc(dma_addr_size, GFP_KERNEL); + if (!eq->dma_addr_for_free) { + err = -ENOMEM; + goto dma_addr_free_alloc_err; + } + + eq->virt_addr_for_free = kzalloc(virt_addr_size, GFP_KERNEL); + if (!eq->virt_addr_for_free) { + err = -ENOMEM; + goto virt_addr_free_alloc_err; + } + + for (pg_num = 0; pg_num < eq->num_pages; pg_num++) { + eq->virt_addr_for_free[pg_num] = dma_zalloc_coherent + (eq->hwdev->dev_hdl, eq->page_size, + &eq->dma_addr_for_free[pg_num], GFP_KERNEL); + if (!eq->virt_addr_for_free[pg_num]) { + err = -ENOMEM; + goto dma_alloc_err; + } + + eq->dma_addr[pg_num] = eq->dma_addr_for_free[pg_num]; + eq->virt_addr[pg_num] = eq->virt_addr_for_free[pg_num]; + if (eq->dma_addr_for_free[pg_num] & (eq->page_size - 1)) { + sdk_info(eq->hwdev->dev_hdl, + "Address is not aligned to %u-bytes as hardware required\n", + eq->page_size); + sdk_info(eq->hwdev->dev_hdl, "Change eq's page size %u\n", + ((eq->page_size) >> 1)); + eq->dma_addr[pg_num] = ALIGN + (eq->dma_addr_for_free[pg_num], + (u64)((eq->page_size) >> 1)); + eq->virt_addr[pg_num] = eq->virt_addr_for_free[pg_num] + + ((u64)eq->dma_addr[pg_num] + - (u64)eq->dma_addr_for_free[pg_num]); + flag = 1; + } + reg = HINIC_EQ_HI_PHYS_ADDR_REG(eq->type, eq->q_id, pg_num); + hinic_hwif_write_reg(hwif, reg, + upper_32_bits(eq->dma_addr[pg_num])); + + reg = HINIC_EQ_LO_PHYS_ADDR_REG(eq->type, eq->q_id, pg_num); + hinic_hwif_write_reg(hwif, reg, + lower_32_bits(eq->dma_addr[pg_num])); + } + + if (flag) { + eq->page_size = eq->page_size >> 1; + eq->eq_len = eq->eq_len >> 1; + } + + eq->num_elem_in_pg = GET_EQ_NUM_ELEMS(eq, eq->page_size); + if (eq->num_elem_in_pg & (eq->num_elem_in_pg - 1)) { + sdk_err(eq->hwdev->dev_hdl, "Number element in eq page != power of 2\n"); + err = -EINVAL; + goto dma_alloc_err; + } + init_val = EQ_WRAPPED(eq); + + if (eq->type == HINIC_AEQ) + aeq_elements_init(eq, init_val); + else + ceq_elements_init(eq, init_val); + + return 0; + +dma_alloc_err: + for (i = 0; i < pg_num; i++) + dma_free_coherent(eq->hwdev->dev_hdl, eq->page_size, + eq->virt_addr_for_free[i], + eq->dma_addr_for_free[i]); + kfree(eq->virt_addr_for_free); +virt_addr_free_alloc_err: + kfree(eq->dma_addr_for_free); +dma_addr_free_alloc_err: + kfree(eq->virt_addr); +virt_addr_alloc_err: + kfree(eq->dma_addr); + return err; +} + +/** + * free_eq_pages - free the pages of the queue + * @eq: the event queue + */ +static void free_eq_pages(struct hinic_eq *eq) +{ + struct hinic_hwdev *hwdev = eq->hwdev; + u16 pg_num; + + for (pg_num = 0; pg_num < eq->num_pages; pg_num++) + dma_free_coherent(hwdev->dev_hdl, eq->orig_page_size, + eq->virt_addr_for_free[pg_num], + eq->dma_addr_for_free[pg_num]); + + kfree(eq->virt_addr_for_free); + kfree(eq->dma_addr_for_free); + kfree(eq->virt_addr); + kfree(eq->dma_addr); +} + +static inline u32 get_page_size(struct hinic_eq *eq) +{ + u32 total_size; + u16 count, n = 0; + + total_size = ALIGN((eq->eq_len * eq->elem_size), EQ_MIN_PAGE_SIZE); + + if (total_size <= (HINIC_EQ_MAX_PAGES * EQ_MIN_PAGE_SIZE)) + return EQ_MIN_PAGE_SIZE; + + count = (u16)(ALIGN((total_size / HINIC_EQ_MAX_PAGES), + EQ_MIN_PAGE_SIZE) / EQ_MIN_PAGE_SIZE); + + if (!(count & (count - 1))) + return EQ_MIN_PAGE_SIZE * count; + + while (count) { + count >>= 1; + n++; + } + + return EQ_MIN_PAGE_SIZE << n; +} + +/** + * init_eq - initialize eq + * @eq: the event queue + * @hwdev: the pointer to hw device + * @q_id: Queue id number + * @q_len: the number of EQ elements + * @type: the type of the event queue, ceq or aeq + * @entry: msix entry associated with the event queue + * Return: 0 - Success, Negative - failure + */ +static int init_eq(struct hinic_eq *eq, struct hinic_hwdev *hwdev, u16 q_id, + u32 q_len, enum hinic_eq_type type, struct irq_info *entry) +{ + int err = 0; + + eq->hwdev = hwdev; + eq->q_id = q_id; + eq->type = type; + eq->eq_len = q_len; + + /* clear eq_len to force eqe drop in hardware */ + if (eq->type == HINIC_AEQ) + hinic_hwif_write_reg(eq->hwdev->hwif, + HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0); + else + set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0); + + eq->cons_idx = 0; + eq->wrapped = 0; + + eq->elem_size = (type == HINIC_AEQ) ? + HINIC_AEQE_SIZE : HINIC_CEQE_SIZE; + + eq->page_size = get_page_size(eq); + eq->orig_page_size = eq->page_size; + eq->num_pages = GET_EQ_NUM_PAGES(eq, eq->page_size); + if (eq->num_pages > HINIC_EQ_MAX_PAGES) { + sdk_err(hwdev->dev_hdl, "Number pages: %d too many pages for eq\n", + eq->num_pages); + return -EINVAL; + } + + err = alloc_eq_pages(eq); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to allocate pages for eq\n"); + return err; + } + + eq->eq_irq.msix_entry_idx = entry->msix_entry_idx; + eq->eq_irq.irq_id = entry->irq_id; + + err = set_eq_ctrls(eq); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to allocate pages for eq\n"); + goto init_eq_ctrls_err; + } + + hinic_hwif_write_reg(eq->hwdev->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0); + set_eq_cons_idx(eq, HINIC_EQ_ARMED); + + if (type == HINIC_AEQ) + INIT_WORK(&eq->aeq_work, eq_irq_work); + else + tasklet_init(&eq->ceq_tasklet, ceq_tasklet, (ulong)eq); + + if (type == HINIC_AEQ) { + err = snprintf(eq->irq_name, sizeof(eq->irq_name), + "hinic_aeq%d@pci:%s", eq->q_id, + pci_name(hwdev->pcidev_hdl)); + if (err <= 0 || err >= (int)sizeof(eq->irq_name)) { + sdk_err(hwdev->dev_hdl, "Failed snprintf irq_name, function return(%d) and dest_len(%d)\n", + err, (int)sizeof(eq->irq_name)); + err = -EINVAL; + goto req_irq_err; + } + err = request_irq(entry->irq_id, aeq_interrupt, 0UL, + eq->irq_name, eq); + } else { + err = snprintf(eq->irq_name, sizeof(eq->irq_name), + "hinic_ceq%d@pci:%s", eq->q_id, + pci_name(hwdev->pcidev_hdl)); + if (err <= 0 || err >= (int)sizeof(eq->irq_name)) { + sdk_err(hwdev->dev_hdl, "Failed snprintf irq_name, function return(%d) and dest_len(%d)\n", + err, (int)sizeof(eq->irq_name)); + err = -EINVAL; + goto req_irq_err; + } + err = request_irq(entry->irq_id, ceq_interrupt, 0UL, + eq->irq_name, eq); + } + + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to request irq for the eq, err: %d\n", + err); + goto req_irq_err; + } + + hinic_set_msix_state(hwdev, entry->msix_entry_idx, HINIC_MSIX_ENABLE); + + return 0; + +init_eq_ctrls_err: +req_irq_err: + free_eq_pages(eq); + return err; +} + +/** + * remove_eq - remove eq + * @eq: the event queue + */ +static void remove_eq(struct hinic_eq *eq) +{ + struct irq_info *entry = &eq->eq_irq; + + hinic_set_msix_state(eq->hwdev, entry->msix_entry_idx, + HINIC_MSIX_DISABLE); + synchronize_irq(entry->irq_id); + + free_irq(entry->irq_id, eq); + + if (eq->type == HINIC_AEQ) { + cancel_work_sync(&eq->aeq_work); + + /* clear eq_len to avoid hw access host memory */ + hinic_hwif_write_reg(eq->hwdev->hwif, + HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0); + } else { + tasklet_kill(&eq->ceq_tasklet); + + set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0); + } + + /* update cons_idx to avoid invalid interrupt */ + eq->cons_idx = hinic_hwif_read_reg(eq->hwdev->hwif, + EQ_PROD_IDX_REG_ADDR(eq)); + set_eq_cons_idx(eq, HINIC_EQ_NOT_ARMED); + + free_eq_pages(eq); +} + +/** + * hinic_aeqs_init - init all the aeqs + * @hwdev: the pointer to hw device + * @num_aeqs: number of AEQs + * @msix_entries: msix entries associated with the event queues + * Return: 0 - Success, Negative - failure + */ +int hinic_aeqs_init(struct hinic_hwdev *hwdev, u16 num_aeqs, + struct irq_info *msix_entries) +{ + struct hinic_aeqs *aeqs; + int err; + u16 i, q_id; + u32 aeq_len; + + aeqs = kzalloc(sizeof(*aeqs), GFP_KERNEL); + if (!aeqs) + return -ENOMEM; + + hwdev->aeqs = aeqs; + aeqs->hwdev = hwdev; + aeqs->num_aeqs = num_aeqs; + + aeqs->workq = create_singlethread_workqueue(HINIC_EQS_WQ_NAME); + if (!aeqs->workq) { + sdk_err(hwdev->dev_hdl, "Failed to initialize aeq workqueue\n"); + err = -ENOMEM; + goto create_work_err; + } + + if (g_aeq_len < HINIC_MIN_AEQ_LEN || g_aeq_len > HINIC_MAX_AEQ_LEN) { + sdk_warn(hwdev->dev_hdl, "Module Parameter g_aeq_len value %d out of range, resetting to %d\n", + g_aeq_len, HINIC_DEFAULT_AEQ_LEN); + g_aeq_len = HINIC_DEFAULT_AEQ_LEN; + } + + if (HINIC_FUNC_TYPE(hwdev) == TYPE_VF && + hwdev->hwif->chip_mode != CHIP_MODE_NORMAL) + aeq_len = HINIC_VMGW_DEFAULT_AEQ_LEN; + else + aeq_len = g_aeq_len; + + for (q_id = 0; q_id < num_aeqs; q_id++) { + err = init_eq(&aeqs->aeq[q_id], hwdev, q_id, aeq_len, + HINIC_AEQ, &msix_entries[q_id]); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init aeq %d\n", + q_id); + goto init_aeq_err; + } + } + + return 0; + +init_aeq_err: + for (i = 0; i < q_id; i++) + remove_eq(&aeqs->aeq[i]); + + destroy_workqueue(aeqs->workq); + +create_work_err: + kfree(aeqs); + + return err; +} + +/** + * hinic_aeqs_free - free all the aeqs + * @hwdev: the pointer to hw device + */ +void hinic_aeqs_free(struct hinic_hwdev *hwdev) +{ + struct hinic_aeqs *aeqs = hwdev->aeqs; + enum hinic_aeq_type aeq_event = HINIC_HW_INTER_INT; + enum hinic_aeq_sw_type sw_aeq_event = HINIC_STATELESS_EVENT; + u16 q_id; + + for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) + remove_eq(&aeqs->aeq[q_id]); + + for (; sw_aeq_event < HINIC_MAX_AEQ_SW_EVENTS; sw_aeq_event++) + hinic_aeq_unregister_swe_cb(hwdev, sw_aeq_event); + + for (; aeq_event < HINIC_MAX_AEQ_EVENTS; aeq_event++) + hinic_aeq_unregister_hw_cb(hwdev, aeq_event); + + destroy_workqueue(aeqs->workq); + + kfree(aeqs); +} + +/** + * hinic_ceqs_init - init all the ceqs + * @hwdev: the pointer to hw device + * @num_ceqs: number of CEQs + * @msix_entries: msix entries associated with the event queues + * Return: 0 - Success, Negative - failure + */ +int hinic_ceqs_init(struct hinic_hwdev *hwdev, u16 num_ceqs, + struct irq_info *msix_entries) +{ + struct hinic_ceqs *ceqs; + int err; + u16 i, q_id; + u32 ceq_len; + + ceqs = kzalloc(sizeof(*ceqs), GFP_KERNEL); + if (!ceqs) + return -ENOMEM; + + hwdev->ceqs = ceqs; + ceqs->hwdev = hwdev; + ceqs->num_ceqs = num_ceqs; + + if (g_ceq_len < HINIC_MIN_CEQ_LEN || g_ceq_len > HINIC_MAX_CEQ_LEN) { + sdk_warn(hwdev->dev_hdl, "Module Parameter g_ceq_len value %d out of range, resetting to %d\n", + g_ceq_len, HINIC_DEFAULT_CEQ_LEN); + g_ceq_len = HINIC_DEFAULT_CEQ_LEN; + } + + if (HINIC_FUNC_TYPE(hwdev) == TYPE_VF && + hwdev->hwif->chip_mode != CHIP_MODE_NORMAL) + ceq_len = HINIC_VMGW_DEFAULT_CEQ_LEN; + else + ceq_len = g_ceq_len; + + if (!g_num_ceqe_in_tasklet) { + sdk_warn(hwdev->dev_hdl, "Module Parameter g_num_ceqe_in_tasklet can not be zero, resetting to %d\n", + HINIC_TASK_PROCESS_EQE_LIMIT); + g_num_ceqe_in_tasklet = HINIC_TASK_PROCESS_EQE_LIMIT; + } + + for (q_id = 0; q_id < num_ceqs; q_id++) { + err = init_eq(&ceqs->ceq[q_id], hwdev, q_id, ceq_len, + HINIC_CEQ, &msix_entries[q_id]); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init ceq %d\n", + q_id); + goto init_ceq_err; + } + } + + return 0; + +init_ceq_err: + for (i = 0; i < q_id; i++) + remove_eq(&ceqs->ceq[i]); + + kfree(ceqs); + + return err; +} + +/** + * hinic_ceqs_free - free all the ceqs + * @hwdev: the pointer to hw device + */ +void hinic_ceqs_free(struct hinic_hwdev *hwdev) +{ + struct hinic_ceqs *ceqs = hwdev->ceqs; + enum hinic_ceq_event ceq_event = HINIC_CMDQ; + u16 q_id; + + for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) + remove_eq(&ceqs->ceq[q_id]); + + for (; ceq_event < HINIC_MAX_CEQ_EVENTS; ceq_event++) + hinic_ceq_unregister_cb(hwdev, ceq_event); + + kfree(ceqs); +} + +void hinic_get_ceq_irqs(struct hinic_hwdev *hwdev, struct irq_info *irqs, + u16 *num_irqs) +{ + struct hinic_ceqs *ceqs = hwdev->ceqs; + u16 q_id; + + for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) { + irqs[q_id].irq_id = ceqs->ceq[q_id].eq_irq.irq_id; + irqs[q_id].msix_entry_idx = + ceqs->ceq[q_id].eq_irq.msix_entry_idx; + } + + *num_irqs = ceqs->num_ceqs; +} + +void hinic_get_aeq_irqs(struct hinic_hwdev *hwdev, struct irq_info *irqs, + u16 *num_irqs) +{ + struct hinic_aeqs *aeqs = hwdev->aeqs; + u16 q_id; + + for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) { + irqs[q_id].irq_id = aeqs->aeq[q_id].eq_irq.irq_id; + irqs[q_id].msix_entry_idx = + aeqs->aeq[q_id].eq_irq.msix_entry_idx; + } + + *num_irqs = aeqs->num_aeqs; +} + +void hinic_dump_aeq_info(struct hinic_hwdev *hwdev) +{ + struct hinic_aeq_elem *aeqe_pos; + struct hinic_eq *eq; + u32 addr, ci, pi; + int q_id; + + for (q_id = 0; q_id < hwdev->aeqs->num_aeqs; q_id++) { + eq = &hwdev->aeqs->aeq[q_id]; + addr = EQ_CONS_IDX_REG_ADDR(eq); + ci = hinic_hwif_read_reg(hwdev->hwif, addr); + addr = EQ_PROD_IDX_REG_ADDR(eq); + pi = hinic_hwif_read_reg(hwdev->hwif, addr); + aeqe_pos = GET_CURR_AEQ_ELEM(eq); + sdk_err(hwdev->dev_hdl, "Aeq id: %d, ci: 0x%08x, pi: 0x%x, work_state: 0x%x, wrap: %d, desc: 0x%x\n", + q_id, ci, pi, work_busy(&eq->aeq_work), + eq->wrapped, be32_to_cpu(aeqe_pos->desc)); + } +} + +void hinic_dump_ceq_info(struct hinic_hwdev *hwdev) +{ + struct hinic_eq *eq; + u32 addr, ci, pi; + int q_id; + + for (q_id = 0; q_id < hwdev->ceqs->num_ceqs; q_id++) { + eq = &hwdev->ceqs->ceq[q_id]; + addr = EQ_CONS_IDX_REG_ADDR(eq); + ci = hinic_hwif_read_reg(hwdev->hwif, addr); + addr = EQ_PROD_IDX_REG_ADDR(eq); + pi = hinic_hwif_read_reg(hwdev->hwif, addr); + sdk_err(hwdev->dev_hdl, "Ceq id: %d, ci: 0x%08x, sw_ci: 0x%08x, pi: 0x%x, tasklet_state: 0x%lx, wrap: %d, ceqe: 0x%x\n", + q_id, ci, eq->cons_idx, pi, + tasklet_state(&eq->ceq_tasklet), + eq->wrapped, be32_to_cpu(*(GET_CURR_CEQ_ELEM(eq)))); + sdk_err(hwdev->dev_hdl, "Ceq last response hard interrupt time: %u\n", + jiffies_to_msecs(jiffies - eq->hard_intr_jif)); + sdk_err(hwdev->dev_hdl, "Ceq last response soft interrupt time: %u\n", + jiffies_to_msecs(jiffies - eq->soft_intr_jif)); + } +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_eqs.h b/drivers/net/ethernet/huawei/hinic/hinic_eqs.h new file mode 100644 index 0000000000000000000000000000000000000000..81c34ca573d4a9e6d4b321cea63bc2cbc9a6b859 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_eqs.h @@ -0,0 +1,177 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_EQS_H +#include + +#define HINIC_EQS_H + +#define HINIC_EQ_PAGE_SIZE 0x00001000 + +#define HINIC_HW_MAX_AEQS 4 +#define HINIC_MAX_AEQS 3 +#define HINIC_MAX_CEQS 32 + +#define HINIC_EQ_MAX_PAGES 8 + +#define HINIC_AEQE_SIZE 64 +#define HINIC_CEQE_SIZE 4 + +#define HINIC_AEQE_DESC_SIZE 4 +#define HINIC_AEQE_DATA_SIZE \ + (HINIC_AEQE_SIZE - HINIC_AEQE_DESC_SIZE) + +#define HINIC_DEFAULT_AEQ_LEN 0x10000 +#define HINIC_DEFAULT_CEQ_LEN 0x10000 + +#define HINIC_VMGW_DEFAULT_AEQ_LEN 128 +#define HINIC_VMGW_DEFAULT_CEQ_LEN 1024 + +#define HINIC_MIN_AEQ_LEN 64 +#define HINIC_MAX_AEQ_LEN (512 * 1024) +#define HINIC_MIN_CEQ_LEN 64 +#define HINIC_MAX_CEQ_LEN (1024 * 1024) + +#define HINIC_CEQ_ID_CMDQ 0 + +#define EQ_IRQ_NAME_LEN 64 + +enum hinic_eq_type { + HINIC_AEQ, + HINIC_CEQ +}; + +enum hinic_eq_intr_mode { + HINIC_INTR_MODE_ARMED, + HINIC_INTR_MODE_ALWAYS, +}; + +enum hinic_eq_ci_arm_state { + HINIC_EQ_NOT_ARMED, + HINIC_EQ_ARMED, +}; + +struct hinic_eq { + struct hinic_hwdev *hwdev; + u16 q_id; + enum hinic_eq_type type; + u32 page_size; + u32 orig_page_size; + u32 eq_len; + + u32 cons_idx; + u16 wrapped; + + u16 elem_size; + u16 num_pages; + u32 num_elem_in_pg; + + struct irq_info eq_irq; + char irq_name[EQ_IRQ_NAME_LEN]; + + dma_addr_t *dma_addr; + u8 **virt_addr; + dma_addr_t *dma_addr_for_free; + u8 **virt_addr_for_free; + + struct work_struct aeq_work; + struct tasklet_struct ceq_tasklet; + + u64 hard_intr_jif; + u64 soft_intr_jif; +}; + +struct hinic_aeq_elem { + u8 aeqe_data[HINIC_AEQE_DATA_SIZE]; + u32 desc; +}; + +enum hinic_aeq_cb_state { + HINIC_AEQ_HW_CB_REG = 0, + HINIC_AEQ_HW_CB_RUNNING, + HINIC_AEQ_SW_CB_REG, + HINIC_AEQ_SW_CB_RUNNING, +}; + +struct hinic_aeqs { + struct hinic_hwdev *hwdev; + + hinic_aeq_hwe_cb aeq_hwe_cb[HINIC_MAX_AEQ_EVENTS]; + hinic_aeq_swe_cb aeq_swe_cb[HINIC_MAX_AEQ_SW_EVENTS]; + unsigned long aeq_hw_cb_state[HINIC_MAX_AEQ_EVENTS]; + unsigned long aeq_sw_cb_state[HINIC_MAX_AEQ_SW_EVENTS]; + + struct hinic_eq aeq[HINIC_MAX_AEQS]; + u16 num_aeqs; + + struct workqueue_struct *workq; +}; + +enum hinic_ceq_cb_state { + HINIC_CEQ_CB_REG = 0, + HINIC_CEQ_CB_RUNNING, +}; + +struct hinic_ceqs { + struct hinic_hwdev *hwdev; + + hinic_ceq_event_cb ceq_cb[HINIC_MAX_CEQ_EVENTS]; + void *ceq_data[HINIC_MAX_CEQ_EVENTS]; + unsigned long ceq_cb_state[HINIC_MAX_CEQ_EVENTS]; + + struct hinic_eq ceq[HINIC_MAX_CEQS]; + u16 num_ceqs; +}; + +enum hinic_msg_pipe_state { + PIPE_STATE_IDLE, + PIPE_STATE_BUSY, + PIPE_STATE_SUSPEND, +}; + +#define PIPE_CYCLE_MAX 10000 + +u32 hinic_func_busy_state_get(struct hinic_hwdev *hwdev); + +void hinic_func_busy_state_set(struct hinic_hwdev *hwdev, u32 cfg); + +u32 hinic_func_own_bit_get(struct hinic_hwdev *hwdev); + +void hinic_func_own_bit_set(struct hinic_hwdev *hwdev, u32 cfg); + +int hinic_aeqs_init(struct hinic_hwdev *hwdev, u16 num_aeqs, + struct irq_info *msix_entries); + +void hinic_aeqs_free(struct hinic_hwdev *hwdev); + +int hinic_ceqs_init(struct hinic_hwdev *hwdev, u16 num_ceqs, + struct irq_info *msix_entries); + +void hinic_ceqs_free(struct hinic_hwdev *hwdev); + +void hinic_get_ceq_irqs(struct hinic_hwdev *hwdev, struct irq_info *irqs, + u16 *num_irqs); + +void hinic_get_aeq_irqs(struct hinic_hwdev *hwdev, struct irq_info *irqs, + u16 *num_irqs); + +void hinic_dump_ceq_info(struct hinic_hwdev *hwdev); + +void hinic_dump_aeq_info(struct hinic_hwdev *hwdev); + +int hinic_reschedule_eq(struct hinic_hwdev *hwdev, enum hinic_eq_type type, + u16 eq_id); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c new file mode 100644 index 0000000000000000000000000000000000000000..9796f2fa2f0626b7ec3309e2e1c4f9e22d53dfaf --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c @@ -0,0 +1,2493 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hw.h" +#include "hinic_nic_cfg.h" +#include "hinic_nic_dev.h" +#include "hinic_dfx_def.h" +#include "hinic_tx.h" +#include "hinic_rx.h" +#include "hinic_qp.h" + +#ifndef SET_ETHTOOL_OPS +#define SET_ETHTOOL_OPS(netdev, ops) \ + ((netdev)->ethtool_ops = (ops)) +#endif + +struct hinic_stats { + char name[ETH_GSTRING_LEN]; + u32 size; + int offset; +}; + +#define ARRAY_LEN(arr) ((int)((int)sizeof(arr) / (int)sizeof(arr[0]))) + +#define HINIC_NETDEV_STAT(_stat_item) { \ + .name = #_stat_item, \ + .size = FIELD_SIZEOF(struct rtnl_link_stats64, _stat_item), \ + .offset = offsetof(struct rtnl_link_stats64, _stat_item) \ +} + +static struct hinic_stats hinic_netdev_stats[] = { + HINIC_NETDEV_STAT(rx_packets), + HINIC_NETDEV_STAT(tx_packets), + HINIC_NETDEV_STAT(rx_bytes), + HINIC_NETDEV_STAT(tx_bytes), + HINIC_NETDEV_STAT(rx_errors), + HINIC_NETDEV_STAT(tx_errors), + HINIC_NETDEV_STAT(rx_dropped), + HINIC_NETDEV_STAT(tx_dropped), + HINIC_NETDEV_STAT(multicast), + HINIC_NETDEV_STAT(collisions), + HINIC_NETDEV_STAT(rx_length_errors), + HINIC_NETDEV_STAT(rx_over_errors), + HINIC_NETDEV_STAT(rx_crc_errors), + HINIC_NETDEV_STAT(rx_frame_errors), + HINIC_NETDEV_STAT(rx_fifo_errors), + HINIC_NETDEV_STAT(rx_missed_errors), + HINIC_NETDEV_STAT(tx_aborted_errors), + HINIC_NETDEV_STAT(tx_carrier_errors), + HINIC_NETDEV_STAT(tx_fifo_errors), + HINIC_NETDEV_STAT(tx_heartbeat_errors), +}; + +#define HINIC_NIC_STAT(_stat_item) { \ + .name = #_stat_item, \ + .size = FIELD_SIZEOF(struct hinic_nic_stats, _stat_item), \ + .offset = offsetof(struct hinic_nic_stats, _stat_item) \ +} + +static struct hinic_stats hinic_nic_dev_stats[] = { + HINIC_NIC_STAT(netdev_tx_timeout), +}; + +static struct hinic_stats hinic_nic_dev_stats_extern[] = { + HINIC_NIC_STAT(tx_carrier_off_drop), + HINIC_NIC_STAT(tx_invalid_qid), +}; + +#define HINIC_RXQ_STAT(_stat_item) { \ + .name = "rxq%d_"#_stat_item, \ + .size = FIELD_SIZEOF(struct hinic_rxq_stats, _stat_item), \ + .offset = offsetof(struct hinic_rxq_stats, _stat_item) \ +} + +#define HINIC_TXQ_STAT(_stat_item) { \ + .name = "txq%d_"#_stat_item, \ + .size = FIELD_SIZEOF(struct hinic_txq_stats, _stat_item), \ + .offset = offsetof(struct hinic_txq_stats, _stat_item) \ +} + +/*lint -save -e786*/ +static struct hinic_stats hinic_rx_queue_stats[] = { + HINIC_RXQ_STAT(packets), + HINIC_RXQ_STAT(bytes), + HINIC_RXQ_STAT(errors), + HINIC_RXQ_STAT(csum_errors), + HINIC_RXQ_STAT(other_errors), + HINIC_RXQ_STAT(dropped), + HINIC_RXQ_STAT(xdp_dropped), + HINIC_RXQ_STAT(rx_buf_empty), +}; + +static struct hinic_stats hinic_rx_queue_stats_extern[] = { + HINIC_RXQ_STAT(alloc_skb_err), + HINIC_RXQ_STAT(alloc_rx_buf_err), + HINIC_RXQ_STAT(map_rx_buf_err), + HINIC_RXQ_STAT(xdp_large_pkt), +}; + +static struct hinic_stats hinic_tx_queue_stats[] = { + HINIC_TXQ_STAT(packets), + HINIC_TXQ_STAT(bytes), + HINIC_TXQ_STAT(busy), + HINIC_TXQ_STAT(wake), + HINIC_TXQ_STAT(dropped), + HINIC_TXQ_STAT(big_frags_pkts), + HINIC_TXQ_STAT(big_udp_pkts), +}; + +static struct hinic_stats hinic_tx_queue_stats_extern[] = { + HINIC_TXQ_STAT(ufo_pkt_unsupport), + HINIC_TXQ_STAT(ufo_linearize_err), + HINIC_TXQ_STAT(ufo_alloc_skb_err), + HINIC_TXQ_STAT(skb_pad_err), + HINIC_TXQ_STAT(frag_len_overflow), + HINIC_TXQ_STAT(offload_cow_skb_err), + HINIC_TXQ_STAT(alloc_cpy_frag_err), + HINIC_TXQ_STAT(map_cpy_frag_err), + HINIC_TXQ_STAT(map_frag_err), + HINIC_TXQ_STAT(frag_size_err), + HINIC_TXQ_STAT(unknown_tunnel_pkt), +};/*lint -restore*/ + +#define HINIC_FUNC_STAT(_stat_item) { \ + .name = #_stat_item, \ + .size = FIELD_SIZEOF(struct hinic_vport_stats, _stat_item), \ + .offset = offsetof(struct hinic_vport_stats, _stat_item) \ +} + +static struct hinic_stats hinic_function_stats[] = { + HINIC_FUNC_STAT(tx_unicast_pkts_vport), + HINIC_FUNC_STAT(tx_unicast_bytes_vport), + HINIC_FUNC_STAT(tx_multicast_pkts_vport), + HINIC_FUNC_STAT(tx_multicast_bytes_vport), + HINIC_FUNC_STAT(tx_broadcast_pkts_vport), + HINIC_FUNC_STAT(tx_broadcast_bytes_vport), + + HINIC_FUNC_STAT(rx_unicast_pkts_vport), + HINIC_FUNC_STAT(rx_unicast_bytes_vport), + HINIC_FUNC_STAT(rx_multicast_pkts_vport), + HINIC_FUNC_STAT(rx_multicast_bytes_vport), + HINIC_FUNC_STAT(rx_broadcast_pkts_vport), + HINIC_FUNC_STAT(rx_broadcast_bytes_vport), + + HINIC_FUNC_STAT(tx_discard_vport), + HINIC_FUNC_STAT(rx_discard_vport), + HINIC_FUNC_STAT(tx_err_vport), + HINIC_FUNC_STAT(rx_err_vport), +}; + +#define HINIC_PORT_STAT(_stat_item) { \ + .name = #_stat_item, \ + .size = FIELD_SIZEOF(struct hinic_phy_port_stats, _stat_item), \ + .offset = offsetof(struct hinic_phy_port_stats, _stat_item) \ +} + +static struct hinic_stats hinic_port_stats[] = { + HINIC_PORT_STAT(mac_rx_total_pkt_num), + HINIC_PORT_STAT(mac_rx_total_oct_num), + HINIC_PORT_STAT(mac_rx_bad_pkt_num), + HINIC_PORT_STAT(mac_rx_bad_oct_num), + HINIC_PORT_STAT(mac_rx_good_pkt_num), + HINIC_PORT_STAT(mac_rx_good_oct_num), + HINIC_PORT_STAT(mac_rx_uni_pkt_num), + HINIC_PORT_STAT(mac_rx_multi_pkt_num), + HINIC_PORT_STAT(mac_rx_broad_pkt_num), + HINIC_PORT_STAT(mac_tx_total_pkt_num), + HINIC_PORT_STAT(mac_tx_total_oct_num), + HINIC_PORT_STAT(mac_tx_bad_pkt_num), + HINIC_PORT_STAT(mac_tx_bad_oct_num), + HINIC_PORT_STAT(mac_tx_good_pkt_num), + HINIC_PORT_STAT(mac_tx_good_oct_num), + HINIC_PORT_STAT(mac_tx_uni_pkt_num), + HINIC_PORT_STAT(mac_tx_multi_pkt_num), + HINIC_PORT_STAT(mac_tx_broad_pkt_num), + HINIC_PORT_STAT(mac_rx_fragment_pkt_num), + HINIC_PORT_STAT(mac_rx_undersize_pkt_num), + HINIC_PORT_STAT(mac_rx_undermin_pkt_num), + HINIC_PORT_STAT(mac_rx_64_oct_pkt_num), + HINIC_PORT_STAT(mac_rx_65_127_oct_pkt_num), + HINIC_PORT_STAT(mac_rx_128_255_oct_pkt_num), + HINIC_PORT_STAT(mac_rx_256_511_oct_pkt_num), + HINIC_PORT_STAT(mac_rx_512_1023_oct_pkt_num), + HINIC_PORT_STAT(mac_rx_1024_1518_oct_pkt_num), + HINIC_PORT_STAT(mac_rx_1519_2047_oct_pkt_num), + HINIC_PORT_STAT(mac_rx_2048_4095_oct_pkt_num), + HINIC_PORT_STAT(mac_rx_4096_8191_oct_pkt_num), + HINIC_PORT_STAT(mac_rx_8192_9216_oct_pkt_num), + HINIC_PORT_STAT(mac_rx_9217_12287_oct_pkt_num), + HINIC_PORT_STAT(mac_rx_12288_16383_oct_pkt_num), + HINIC_PORT_STAT(mac_rx_1519_max_good_pkt_num), + HINIC_PORT_STAT(mac_rx_1519_max_bad_pkt_num), + HINIC_PORT_STAT(mac_rx_oversize_pkt_num), + HINIC_PORT_STAT(mac_rx_jabber_pkt_num), + HINIC_PORT_STAT(mac_rx_pause_num), + HINIC_PORT_STAT(mac_rx_pfc_pkt_num), + HINIC_PORT_STAT(mac_rx_pfc_pri0_pkt_num), + HINIC_PORT_STAT(mac_rx_pfc_pri1_pkt_num), + HINIC_PORT_STAT(mac_rx_pfc_pri2_pkt_num), + HINIC_PORT_STAT(mac_rx_pfc_pri3_pkt_num), + HINIC_PORT_STAT(mac_rx_pfc_pri4_pkt_num), + HINIC_PORT_STAT(mac_rx_pfc_pri5_pkt_num), + HINIC_PORT_STAT(mac_rx_pfc_pri6_pkt_num), + HINIC_PORT_STAT(mac_rx_pfc_pri7_pkt_num), + HINIC_PORT_STAT(mac_rx_control_pkt_num), + HINIC_PORT_STAT(mac_rx_sym_err_pkt_num), + HINIC_PORT_STAT(mac_rx_fcs_err_pkt_num), + HINIC_PORT_STAT(mac_rx_send_app_good_pkt_num), + HINIC_PORT_STAT(mac_rx_send_app_bad_pkt_num), + HINIC_PORT_STAT(mac_tx_fragment_pkt_num), + HINIC_PORT_STAT(mac_tx_undersize_pkt_num), + HINIC_PORT_STAT(mac_tx_undermin_pkt_num), + HINIC_PORT_STAT(mac_tx_64_oct_pkt_num), + HINIC_PORT_STAT(mac_tx_65_127_oct_pkt_num), + HINIC_PORT_STAT(mac_tx_128_255_oct_pkt_num), + HINIC_PORT_STAT(mac_tx_256_511_oct_pkt_num), + HINIC_PORT_STAT(mac_tx_512_1023_oct_pkt_num), + HINIC_PORT_STAT(mac_tx_1024_1518_oct_pkt_num), + HINIC_PORT_STAT(mac_tx_1519_2047_oct_pkt_num), + HINIC_PORT_STAT(mac_tx_2048_4095_oct_pkt_num), + HINIC_PORT_STAT(mac_tx_4096_8191_oct_pkt_num), + HINIC_PORT_STAT(mac_tx_8192_9216_oct_pkt_num), + HINIC_PORT_STAT(mac_tx_9217_12287_oct_pkt_num), + HINIC_PORT_STAT(mac_tx_12288_16383_oct_pkt_num), + HINIC_PORT_STAT(mac_tx_1519_max_good_pkt_num), + HINIC_PORT_STAT(mac_tx_1519_max_bad_pkt_num), + HINIC_PORT_STAT(mac_tx_oversize_pkt_num), + HINIC_PORT_STAT(mac_tx_jabber_pkt_num), + HINIC_PORT_STAT(mac_tx_pause_num), + HINIC_PORT_STAT(mac_tx_pfc_pkt_num), + HINIC_PORT_STAT(mac_tx_pfc_pri0_pkt_num), + HINIC_PORT_STAT(mac_tx_pfc_pri1_pkt_num), + HINIC_PORT_STAT(mac_tx_pfc_pri2_pkt_num), + HINIC_PORT_STAT(mac_tx_pfc_pri3_pkt_num), + HINIC_PORT_STAT(mac_tx_pfc_pri4_pkt_num), + HINIC_PORT_STAT(mac_tx_pfc_pri5_pkt_num), + HINIC_PORT_STAT(mac_tx_pfc_pri6_pkt_num), + HINIC_PORT_STAT(mac_tx_pfc_pri7_pkt_num), + HINIC_PORT_STAT(mac_tx_control_pkt_num), + HINIC_PORT_STAT(mac_tx_err_all_pkt_num), + HINIC_PORT_STAT(mac_tx_from_app_good_pkt_num), + HINIC_PORT_STAT(mac_tx_from_app_bad_pkt_num), +}; + +u32 hinic_get_io_stats_size(struct hinic_nic_dev *nic_dev) +{ + return ARRAY_LEN(hinic_nic_dev_stats) + + ARRAY_LEN(hinic_nic_dev_stats_extern) + + (ARRAY_LEN(hinic_tx_queue_stats) + + ARRAY_LEN(hinic_tx_queue_stats_extern) + + ARRAY_LEN(hinic_rx_queue_stats) + + ARRAY_LEN(hinic_rx_queue_stats_extern)) * nic_dev->max_qps; +} + +#define GET_VALUE_OF_PTR(size, ptr) ( \ + (size) == sizeof(u64) ? *(u64 *)(ptr) : \ + (size) == sizeof(u32) ? *(u32 *)(ptr) : \ + (size) == sizeof(u16) ? *(u16 *)(ptr) : *(u8 *)(ptr) \ +) + +#define DEV_STATS_PACK(items, item_idx, array, stats_ptr) { \ + int j; \ + for (j = 0; j < ARRAY_LEN(array); j++) { \ + memcpy((items)[item_idx].name, (array)[j].name, \ + HINIC_SHOW_ITEM_LEN); \ + (items)[item_idx].hexadecimal = 0; \ + (items)[item_idx].value = \ + GET_VALUE_OF_PTR((array)[j].size, \ + (char *)(stats_ptr) + (array)[j].offset); \ + item_idx++; \ + } \ +} + +#define QUEUE_STATS_PACK(items, item_idx, array, stats_ptr, qid) { \ + int j, err; \ + for (j = 0; j < ARRAY_LEN(array); j++) { \ + memcpy((items)[item_idx].name, (array)[j].name, \ + HINIC_SHOW_ITEM_LEN); \ + err = snprintf((items)[item_idx].name, HINIC_SHOW_ITEM_LEN,\ + (array)[j].name, (qid)); \ + if (err <= 0 || err >= HINIC_SHOW_ITEM_LEN) \ + pr_err("Failed snprintf: func_ret(%d), dest_len(%d)\n",\ + err, HINIC_SHOW_ITEM_LEN); \ + (items)[item_idx].hexadecimal = 0; \ + (items)[item_idx].value = \ + GET_VALUE_OF_PTR((array)[j].size, \ + (char *)(stats_ptr) + (array)[j].offset); \ + item_idx++; \ + } \ +} + +void hinic_get_io_stats(struct hinic_nic_dev *nic_dev, + struct hinic_show_item *items) +{ + int item_idx = 0; + u16 qid; + + DEV_STATS_PACK(items, item_idx, hinic_nic_dev_stats, &nic_dev->stats); + DEV_STATS_PACK(items, item_idx, hinic_nic_dev_stats_extern, + &nic_dev->stats); + + for (qid = 0; qid < nic_dev->max_qps; qid++) { + QUEUE_STATS_PACK(items, item_idx, hinic_tx_queue_stats, + &nic_dev->txqs[qid].txq_stats, qid); + QUEUE_STATS_PACK(items, item_idx, hinic_tx_queue_stats_extern, + &nic_dev->txqs[qid].txq_stats, qid); + } + + for (qid = 0; qid < nic_dev->max_qps; qid++) { + QUEUE_STATS_PACK(items, item_idx, hinic_rx_queue_stats, + &nic_dev->rxqs[qid].rxq_stats, qid); + QUEUE_STATS_PACK(items, item_idx, hinic_rx_queue_stats_extern, + &nic_dev->rxqs[qid].rxq_stats, qid); + } +} + +#define LP_DEFAULT_TIME 5 /* seconds */ +#define LP_PKT_LEN 1514 +#define OBJ_STR_MAX_LEN 32 +#define SET_LINK_STR_MAX_LEN 128 + +#define PORT_DOWN_ERR_IDX 0 +enum diag_test_index { + INTERNAL_LP_TEST = 0, + EXTERNAL_LP_TEST = 1, + DIAG_TEST_MAX = 2, +}; + +static char hinic_test_strings[][ETH_GSTRING_LEN] = { + "Internal lb test (on/offline)", + "External lb test (external_lb)", +}; + +struct hw2ethtool_link_mode { + u32 supported; + u32 advertised; + enum ethtool_link_mode_bit_indices link_mode_bit; + u32 speed; + enum hinic_link_mode hw_link_mode; +}; + +static struct hw2ethtool_link_mode + hw_to_ethtool_link_mode_table[HINIC_LINK_MODE_NUMBERS] = { + { + .supported = SUPPORTED_10000baseKR_Full, + .advertised = ADVERTISED_10000baseKR_Full, + .link_mode_bit = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + .speed = SPEED_10000, + .hw_link_mode = HINIC_10GE_BASE_KR, + }, + { + .supported = SUPPORTED_40000baseKR4_Full, + .advertised = ADVERTISED_40000baseKR4_Full, + .link_mode_bit = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, + .speed = SPEED_40000, + .hw_link_mode = HINIC_40GE_BASE_KR4, + }, + { + .supported = SUPPORTED_40000baseCR4_Full, + .advertised = ADVERTISED_40000baseCR4_Full, + .link_mode_bit = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, + .speed = SPEED_40000, + .hw_link_mode = HINIC_40GE_BASE_CR4, + }, + { + .supported = SUPPORTED_100000baseKR4_Full, + .advertised = ADVERTISED_100000baseKR4_Full, + .link_mode_bit = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, + .speed = SPEED_100000, + .hw_link_mode = HINIC_100GE_BASE_KR4, + }, + { + .supported = SUPPORTED_100000baseCR4_Full, + .advertised = ADVERTISED_100000baseCR4_Full, + .link_mode_bit = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, + .speed = SPEED_100000, + .hw_link_mode = HINIC_100GE_BASE_CR4, + }, + { + .supported = SUPPORTED_25000baseKR_Full, + .advertised = ADVERTISED_25000baseKR_Full, + .link_mode_bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, + .speed = SPEED_25000, + .hw_link_mode = HINIC_25GE_BASE_KR_S, + }, + { + .supported = SUPPORTED_25000baseCR_Full, + .advertised = ADVERTISED_25000baseCR_Full, + .link_mode_bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + .speed = SPEED_25000, + .hw_link_mode = HINIC_25GE_BASE_CR_S, + }, + { + .supported = SUPPORTED_25000baseKR_Full, + .advertised = ADVERTISED_25000baseKR_Full, + .link_mode_bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, + .speed = SPEED_25000, + .hw_link_mode = HINIC_25GE_BASE_KR, + }, + { + .supported = SUPPORTED_25000baseCR_Full, + .advertised = ADVERTISED_25000baseCR_Full, + .link_mode_bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + .speed = SPEED_25000, + .hw_link_mode = HINIC_25GE_BASE_CR, + }, + { + .supported = SUPPORTED_1000baseKX_Full, + .advertised = ADVERTISED_1000baseKX_Full, + .link_mode_bit = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + .speed = SPEED_1000, + .hw_link_mode = HINIC_GE_BASE_KX, + }, +}; + +u32 hw_to_ethtool_speed[LINK_SPEED_LEVELS] = { + SPEED_10, SPEED_100, + SPEED_1000, SPEED_10000, + SPEED_25000, SPEED_40000, + SPEED_100000 +}; + +static int hinic_ethtool_to_hw_speed_level(u32 speed) +{ + int i; + + for (i = 0; i < LINK_SPEED_LEVELS; i++) { + if (hw_to_ethtool_speed[i] == speed) + break; + } + + return i; +} + +static int hinic_get_link_mode_index(enum hinic_link_mode link_mode) +{ + int i = 0; + + for (i = 0; i < HINIC_LINK_MODE_NUMBERS; i++) { + if (link_mode == hw_to_ethtool_link_mode_table[i].hw_link_mode) + break; + } + + return i; +} + +static int hinic_is_support_speed(enum hinic_link_mode supported_link, + u32 speed) +{ + enum hinic_link_mode link_mode; + int idx; + + for (link_mode = 0; link_mode < HINIC_LINK_MODE_NUMBERS; link_mode++) { + if (!(supported_link & ((u32)1 << link_mode))) + continue; + + idx = hinic_get_link_mode_index(link_mode); + if (idx >= HINIC_LINK_MODE_NUMBERS) + continue; + + if (hw_to_ethtool_link_mode_table[idx].speed == speed) + return 1; + } + + return 0; +} + +#define GET_SUPPORTED_MODE 0 +#define GET_ADVERTISED_MODE 1 + +struct cmd_link_settings { + u64 supported; + u64 advertising; + + u32 speed; + u8 duplex; + u8 port; + u8 autoneg; +}; + +#define ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE(ecmd, mode) \ + ((ecmd)->supported |= \ + (1UL << hw_to_ethtool_link_mode_table[mode].link_mode_bit)) +#define ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE(ecmd, mode) \ + ((ecmd)->advertising |= \ + (1UL << hw_to_ethtool_link_mode_table[mode].link_mode_bit)) + +#define ETHTOOL_ADD_SUPPORTED_LINK_MODE(ecmd, mode) \ + ((ecmd)->supported |= SUPPORTED_##mode) +#define ETHTOOL_ADD_ADVERTISED_LINK_MODE(ecmd, mode) \ + ((ecmd)->advertising |= ADVERTISED_##mode) +#define ETHTOOL_TEST_LINK_MODE_SUPPORTED(ecmd, mode) \ + ((ecmd)->supported & SUPPORTED_##Autoneg) + +static void hinic_link_port_type(struct cmd_link_settings *link_settings, + enum hinic_port_type port_type) +{ + switch (port_type) { + case HINIC_PORT_ELEC: + case HINIC_PORT_TP: + ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, TP); + ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, TP); + link_settings->port = PORT_TP; + break; + + case HINIC_PORT_AOC: + case HINIC_PORT_FIBRE: + ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE); + ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE); + link_settings->port = PORT_FIBRE; + break; + + case HINIC_PORT_COPPER: + ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE); + ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE); + link_settings->port = PORT_DA; + break; + + case HINIC_PORT_BACKPLANE: + ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, Backplane); + ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Backplane); + link_settings->port = PORT_NONE; + break; + + default: + link_settings->port = PORT_OTHER; + break; + } +} + +static void hinic_add_ethtool_link_mode(struct cmd_link_settings *link_settings, + enum hinic_link_mode hw_link_mode, + u32 name) +{ + enum hinic_link_mode link_mode; + int idx = 0; + + for (link_mode = 0; link_mode < HINIC_LINK_MODE_NUMBERS; link_mode++) { + if (hw_link_mode & ((u32)1 << link_mode)) { + idx = hinic_get_link_mode_index(link_mode); + if (idx >= HINIC_LINK_MODE_NUMBERS) + continue; + + if (name == GET_SUPPORTED_MODE) + ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE + (link_settings, idx); + else + ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE + (link_settings, idx); + } + } +} + +static int hinic_link_speed_set(struct hinic_nic_dev *nic_dev, + struct cmd_link_settings *link_settings, + struct nic_port_info *port_info) +{ + struct net_device *netdev = nic_dev->netdev; + enum hinic_link_mode supported_link = 0, advertised_link = 0; + u8 link_state = 0; + int err; + + err = hinic_get_link_mode(nic_dev->hwdev, + &supported_link, &advertised_link); + if (err || supported_link == HINIC_SUPPORTED_UNKNOWN || + advertised_link == HINIC_SUPPORTED_UNKNOWN) { + nicif_err(nic_dev, drv, netdev, "Failed to get supported link modes\n"); + return err; + } + + hinic_add_ethtool_link_mode(link_settings, supported_link, + GET_SUPPORTED_MODE); + hinic_add_ethtool_link_mode(link_settings, advertised_link, + GET_ADVERTISED_MODE); + + err = hinic_get_link_state(nic_dev->hwdev, &link_state); + if (!err && link_state) { + link_settings->speed = port_info->speed < LINK_SPEED_LEVELS ? + hw_to_ethtool_speed[port_info->speed] : + (u32)SPEED_UNKNOWN; + + link_settings->duplex = port_info->duplex; + } else { + link_settings->speed = (u32)SPEED_UNKNOWN; + link_settings->duplex = DUPLEX_UNKNOWN; + } + + return 0; +} + +static int get_link_settings(struct net_device *netdev, + struct cmd_link_settings *link_settings) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct nic_port_info port_info = {0}; + struct nic_pause_config nic_pause = {0}; + int err; + + err = hinic_get_port_info(nic_dev->hwdev, &port_info); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to get port info\n"); + return err; + } + + err = hinic_link_speed_set(nic_dev, link_settings, &port_info); + if (err) + return err; + + hinic_link_port_type(link_settings, port_info.port_type); + + link_settings->autoneg = port_info.autoneg_state; + if (port_info.autoneg_cap) + ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, Autoneg); + if (port_info.autoneg_state) + ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Autoneg); + + if (!HINIC_FUNC_IS_VF(nic_dev->hwdev)) { + err = hinic_get_pause_info(nic_dev->hwdev, &nic_pause); + if (err) { + nicif_err(nic_dev, drv, netdev, + "Failed to get pauseparam from hw\n"); + return err; + } + + ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, Pause); + if (nic_pause.rx_pause && nic_pause.tx_pause) { + ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Pause); + } else if (nic_pause.tx_pause) { + ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, + Asym_Pause); + } else if (nic_pause.rx_pause) { + ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Pause); + ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, + Asym_Pause); + } + } + + return 0; +} + +static int hinic_get_settings(struct net_device *netdev, + struct ethtool_cmd *ep) +{ + struct cmd_link_settings settings = {0}; + int err; + + err = get_link_settings(netdev, &settings); + if (err) + return err; + + ep->supported = settings.supported & ((u32)~0); + ep->advertising = settings.advertising & ((u32)~0); + + ep->autoneg = settings.autoneg; + ethtool_cmd_speed_set(ep, settings.speed); + ep->duplex = settings.duplex; + ep->port = settings.port; + ep->transceiver = XCVR_INTERNAL; + + return 0; +} + +static int hinic_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct cmd_link_settings settings = {0}; + struct ethtool_link_settings *base = &cmd->base; + int err; + + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + + err = get_link_settings(netdev, &settings); + if (err) + return err; + + bitmap_copy(cmd->link_modes.supported, + (unsigned long *)&settings.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_copy(cmd->link_modes.advertising, + (unsigned long *)&settings.advertising, + __ETHTOOL_LINK_MODE_MASK_NBITS); + + base->autoneg = settings.autoneg; + base->speed = settings.speed; + base->duplex = settings.duplex; + base->port = settings.port; + + return 0; +} + +static int hinic_is_speed_legal(struct hinic_nic_dev *nic_dev, u32 speed) +{ + struct net_device *netdev = nic_dev->netdev; + enum hinic_link_mode supported_link = 0, advertised_link = 0; + enum nic_speed_level speed_level = 0; + int err; + + err = hinic_get_link_mode(nic_dev->hwdev, + &supported_link, &advertised_link); + if (err || supported_link == HINIC_SUPPORTED_UNKNOWN || + advertised_link == HINIC_SUPPORTED_UNKNOWN) { + nicif_err(nic_dev, drv, netdev, + "Failed to get supported link modes\n"); + return -EAGAIN; + } + + speed_level = hinic_ethtool_to_hw_speed_level(speed); + if (speed_level >= LINK_SPEED_LEVELS || + !hinic_is_support_speed(supported_link, speed)) { + nicif_err(nic_dev, drv, netdev, + "Not supported speed: %d\n", speed); + return -EINVAL; + } + + return 0; +} + +static int hinic_set_settings_to_hw(struct hinic_nic_dev *nic_dev, + u32 set_settings, u8 autoneg, u32 speed) +{ + struct net_device *netdev = nic_dev->netdev; + struct hinic_link_ksettings settings = {0}; + enum nic_speed_level speed_level = 0; + char set_link_str[SET_LINK_STR_MAX_LEN] = {0}; + int err = 0; + + err = snprintf(set_link_str, sizeof(set_link_str), "%s", + (set_settings & HILINK_LINK_SET_AUTONEG) ? + (autoneg ? "autong enable " : "autong disable ") : ""); + if (err < 0 || err >= SET_LINK_STR_MAX_LEN) { + nicif_err(nic_dev, drv, netdev, + "Failed to snprintf link state, function return(%d) and dest_len(%d)\n", + err, SET_LINK_STR_MAX_LEN); + return -EFAULT; + } + if (set_settings & HILINK_LINK_SET_SPEED) { + speed_level = hinic_ethtool_to_hw_speed_level(speed); + err = snprintf(set_link_str, sizeof(set_link_str), + "%sspeed %d ", set_link_str, speed); + if (err <= 0 || err >= SET_LINK_STR_MAX_LEN) { + nicif_err(nic_dev, drv, netdev, + "Failed to snprintf link speed, function return(%d) and dest_len(%d)\n", + err, SET_LINK_STR_MAX_LEN); + return -EFAULT; + } + } + + settings.valid_bitmap = set_settings; + settings.autoneg = autoneg; + settings.speed = speed_level; + + err = hinic_set_link_settings(nic_dev->hwdev, &settings); + if (err != HINIC_MGMT_CMD_UNSUPPORTED) { + if (err) + nicif_err(nic_dev, drv, netdev, "Set %sfailed\n", + set_link_str); + else + nicif_info(nic_dev, drv, netdev, "Set %ssuccess\n", + set_link_str); + + return err; + } + + if (set_settings & HILINK_LINK_SET_AUTONEG) { + err = hinic_set_autoneg(nic_dev->hwdev, + (autoneg == AUTONEG_ENABLE)); + if (err) + nicif_err(nic_dev, drv, netdev, "%s autoneg failed\n", + (autoneg == AUTONEG_ENABLE) ? + "Enable" : "Disable"); + else + nicif_info(nic_dev, drv, netdev, "%s autoneg success\n", + (autoneg == AUTONEG_ENABLE) ? + "Enable" : "Disable"); + } + + if (!err && (set_settings & HILINK_LINK_SET_SPEED)) { + err = hinic_set_speed(nic_dev->hwdev, speed_level); + if (err) + nicif_err(nic_dev, drv, netdev, "Set speed %d failed\n", + speed); + else + nicif_info(nic_dev, drv, netdev, "Set speed %d success\n", + speed); + } + + return err; +} + +static int set_link_settings(struct net_device *netdev, u8 autoneg, u32 speed) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct nic_port_info port_info = {0}; + u32 set_settings = 0; + int err = 0; + + if (!FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) { + nicif_err(nic_dev, drv, netdev, "Not support set link settings\n"); + return -EOPNOTSUPP; + } + + err = hinic_get_port_info(nic_dev->hwdev, &port_info); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to get current settings\n"); + return -EAGAIN; + } + + /* Alwayse set autonegation */ + if (port_info.autoneg_cap) + set_settings |= HILINK_LINK_SET_AUTONEG; + + if (autoneg == AUTONEG_ENABLE) { + if (!port_info.autoneg_cap) { + nicif_err(nic_dev, drv, netdev, "Not support autoneg\n"); + return -EOPNOTSUPP; + } + } else if (speed != (u32)SPEED_UNKNOWN) { + /* Set speed only when autoneg is disable */ + err = hinic_is_speed_legal(nic_dev, speed); + if (err) + return err; + + set_settings |= HILINK_LINK_SET_SPEED; + } else { + nicif_err(nic_dev, drv, netdev, "Need to set speed when autoneg is off\n"); + return -EOPNOTSUPP; + } + + if (set_settings) + err = hinic_set_settings_to_hw(nic_dev, set_settings, + autoneg, speed); + else + nicif_info(nic_dev, drv, netdev, "Nothing changed, exiting without setting anything\n"); + + return err; +} + +static int hinic_set_settings(struct net_device *netdev, + struct ethtool_cmd *link_settings) +{ + /* Only support to set autoneg and speed */ + return set_link_settings(netdev, link_settings->autoneg, + ethtool_cmd_speed(link_settings)); +} + +static int hinic_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + /* Only support to set autoneg and speed */ + return set_link_settings(netdev, cmd->base.autoneg, + cmd->base.speed); +} + +static void hinic_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct pci_dev *pdev = nic_dev->pdev; + u8 mgmt_ver[HINIC_MGMT_VERSION_MAX_LEN] = {0}; + int err; + + strlcpy(info->driver, HINIC_DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, HINIC_DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info)); + + err = hinic_get_mgmt_version(nic_dev->hwdev, mgmt_ver); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to get fw version\n"); + return; + } + + err = snprintf(info->fw_version, sizeof(info->fw_version), + "%s", mgmt_ver); + if (err <= 0 || err >= (int)sizeof(info->fw_version)) + nicif_err(nic_dev, drv, netdev, + "Failed to snprintf fw_version, function return(%d) and dest_len(%d)\n", + err, (int)sizeof(info->fw_version)); +} + +static u32 hinic_get_msglevel(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + return nic_dev->msg_enable; +} + +static void hinic_set_msglevel(struct net_device *netdev, u32 data) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + nic_dev->msg_enable = data; + + nicif_info(nic_dev, drv, netdev, "Set message level: 0x%x\n", data); +} + +static int hinic_nway_reset(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct nic_port_info port_info = {0}; + int err; + + if (!FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) { + nicif_err(nic_dev, drv, netdev, "Current function don't support to restart autoneg\n"); + return -EOPNOTSUPP; + } + + err = hinic_get_port_info(nic_dev->hwdev, &port_info); + if (err) { + nicif_err(nic_dev, drv, netdev, + "Get autonegotiation state failed\n"); + return -EFAULT; + } + + if (!port_info.autoneg_state) { + nicif_err(nic_dev, drv, netdev, + "Autonegotiation is off, don't support to restart it\n"); + return -EINVAL; + } + + err = hinic_set_autoneg(nic_dev->hwdev, true); + if (err) { + nicif_err(nic_dev, drv, netdev, + "Restart autonegotiation failed\n"); + return -EFAULT; + } + + nicif_info(nic_dev, drv, netdev, "Restart autonegotiation success\n"); + + return 0; +} + +static void hinic_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + ring->rx_max_pending = HINIC_MAX_QUEUE_DEPTH; + ring->tx_max_pending = HINIC_MAX_QUEUE_DEPTH; + ring->rx_pending = nic_dev->rxqs[0].q_depth; + ring->tx_pending = nic_dev->txqs[0].q_depth; +} + +static void hinic_update_qp_depth(struct hinic_nic_dev *nic_dev, + u16 sq_depth, u16 rq_depth) +{ + u16 i; + + nic_dev->sq_depth = sq_depth; + nic_dev->rq_depth = rq_depth; + for (i = 0; i < nic_dev->max_qps; i++) { + nic_dev->txqs[i].q_depth = sq_depth; + nic_dev->txqs[i].q_mask = sq_depth - 1; + nic_dev->rxqs[i].q_depth = rq_depth; + nic_dev->rxqs[i].q_mask = rq_depth - 1; + } +} + +static int hinic_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u16 new_sq_depth, new_rq_depth; + int err; + + if (ring->rx_jumbo_pending || ring->rx_mini_pending) { + nicif_err(nic_dev, drv, netdev, + "Unsupported rx_jumbo_pending/rx_mini_pending\n"); + return -EINVAL; + } + + if (ring->tx_pending > HINIC_MAX_QUEUE_DEPTH || + ring->tx_pending < HINIC_MIN_QUEUE_DEPTH || + ring->rx_pending > HINIC_MAX_QUEUE_DEPTH || + ring->rx_pending < HINIC_MIN_QUEUE_DEPTH) { + nicif_err(nic_dev, drv, netdev, + "Queue depth out of range [%d-%d]\n", + HINIC_MIN_QUEUE_DEPTH, HINIC_MAX_QUEUE_DEPTH); + return -EINVAL; + } + + new_sq_depth = (u16)(1U << (u16)ilog2(ring->tx_pending)); + new_rq_depth = (u16)(1U << (u16)ilog2(ring->rx_pending)); + + if (new_sq_depth == nic_dev->sq_depth && + new_rq_depth == nic_dev->rq_depth) + return 0; + + nicif_info(nic_dev, drv, netdev, + "Change Tx/Rx ring depth from %d/%d to %d/%d\n", + nic_dev->sq_depth, nic_dev->rq_depth, + new_sq_depth, new_rq_depth); + + if (!netif_running(netdev)) { + hinic_update_qp_depth(nic_dev, new_sq_depth, new_rq_depth); + } else { + nicif_info(nic_dev, drv, netdev, "Restarting netdev\n"); + err = hinic_close(netdev); + if (err) { + nicif_err(nic_dev, drv, netdev, + "Failed to close netdev\n"); + return -EFAULT; + } + + hinic_update_qp_depth(nic_dev, new_sq_depth, new_rq_depth); + + err = hinic_open(netdev); + if (err) { + nicif_err(nic_dev, drv, netdev, + "Failed to open netdev\n"); + return -EFAULT; + } + } + + return 0; +} + +static u16 hinic_max_channels(struct hinic_nic_dev *nic_dev) +{ + u8 tcs = (u8)netdev_get_num_tc(nic_dev->netdev); + + return tcs ? nic_dev->max_qps / tcs : nic_dev->max_qps; +} + +static u16 hinic_curr_channels(struct hinic_nic_dev *nic_dev) +{ + if (netif_running(nic_dev->netdev)) + return nic_dev->num_rss ? nic_dev->num_rss : 1; + else + return min_t(u16, hinic_max_channels(nic_dev), + nic_dev->rss_limit); +} + +static void hinic_get_channels(struct net_device *netdev, + struct ethtool_channels *channels) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + channels->max_rx = 0; + channels->max_tx = 0; + channels->max_other = 0; + channels->max_combined = hinic_max_channels(nic_dev); + channels->rx_count = 0; + channels->tx_count = 0; + channels->other_count = 0; + channels->combined_count = hinic_curr_channels(nic_dev); +} + +void hinic_update_num_qps(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u16 num_qps; + u8 tcs; + + /* change num_qps to change counter in ethtool -S */ + tcs = (u8)netdev_get_num_tc(nic_dev->netdev); + num_qps = (u16)(nic_dev->rss_limit * (tcs ? tcs : 1)); + nic_dev->num_qps = min_t(u16, nic_dev->max_qps, num_qps); +} + +static int hinic_set_channels(struct net_device *netdev, + struct ethtool_channels *channels) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + unsigned int count = channels->combined_count; + int err; + + if (!count) { + nicif_err(nic_dev, drv, netdev, + "Unsupported combined_count=0\n"); + return -EINVAL; + } + + if (channels->tx_count || channels->rx_count || channels->other_count) { + nicif_err(nic_dev, drv, netdev, + "Setting rx/tx/other count not supported\n"); + return -EINVAL; + } + + if (!test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) { + nicif_err(nic_dev, drv, netdev, + "This function don't support RSS, only support 1 queue pair\n"); + return -EOPNOTSUPP; + } + + if (count > hinic_max_channels(nic_dev)) { + nicif_err(nic_dev, drv, netdev, + "Combined count %d exceed limit %d\n", + count, hinic_max_channels(nic_dev)); + return -EINVAL; + } + + nicif_info(nic_dev, drv, netdev, "Set max combined queue number from %d to %d\n", + nic_dev->rss_limit, count); + nic_dev->rss_limit = (u16)count; + + if (netif_running(netdev)) { + nicif_info(nic_dev, drv, netdev, "Restarting netdev\n"); + err = hinic_close(netdev); + if (err) { + nicif_err(nic_dev, drv, netdev, + "Failed to close netdev\n"); + return -EFAULT; + } + /* Discard user configured rss */ + hinic_set_default_rss_indir(netdev); + + err = hinic_open(netdev); + if (err) { + nicif_err(nic_dev, drv, netdev, + "Failed to open netdev\n"); + return -EFAULT; + } + } else { + /* Discard user configured rss */ + hinic_set_default_rss_indir(netdev); + + hinic_update_num_qps(netdev); + } + + return 0; +} + +static int hinic_get_sset_count(struct net_device *netdev, int sset) +{ + int count = 0, q_num = 0; + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + switch (sset) { + case ETH_SS_TEST: + return ARRAY_LEN(hinic_test_strings); + case ETH_SS_STATS: + q_num = nic_dev->num_qps; + count = ARRAY_LEN(hinic_netdev_stats) + + ARRAY_LEN(hinic_nic_dev_stats) + + (ARRAY_LEN(hinic_tx_queue_stats) + + ARRAY_LEN(hinic_rx_queue_stats)) * q_num; + + if (!HINIC_FUNC_IS_VF(nic_dev->hwdev)) + count += ARRAY_LEN(hinic_function_stats); + + if (!HINIC_FUNC_IS_VF(nic_dev->hwdev) && + FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) + count += ARRAY_LEN(hinic_port_stats); + + return count; + default: + return -EOPNOTSUPP; + } +} + +#define COALESCE_ALL_QUEUE 0xFFFF +#define COALESCE_MAX_PENDING_LIMIT (255 * COALESCE_PENDING_LIMIT_UNIT) +#define COALESCE_MAX_TIMER_CFG (255 * COALESCE_TIMER_CFG_UNIT) +#define COALESCE_PENDING_LIMIT_UNIT 8 +#define COALESCE_TIMER_CFG_UNIT 9 + +static int __hinic_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, u16 queue) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic_intr_coal_info *interrupt_info; + + if (queue == COALESCE_ALL_QUEUE) { + /* get tx/rx irq0 as default parameters */ + interrupt_info = &nic_dev->intr_coalesce[0]; + } else { + if (queue >= nic_dev->num_qps) { + nicif_err(nic_dev, drv, netdev, + "Invalid queue_id: %d\n", queue); + return -EINVAL; + } + interrupt_info = &nic_dev->intr_coalesce[queue]; + } + + /* coalescs_timer is in unit of 9us */ + coal->rx_coalesce_usecs = interrupt_info->coalesce_timer_cfg * + COALESCE_TIMER_CFG_UNIT; + /* coalescs_frams is in unit of 8 */ + coal->rx_max_coalesced_frames = interrupt_info->pending_limt * + COALESCE_PENDING_LIMIT_UNIT; + + /* tx/rx use the same interrupt */ + coal->tx_coalesce_usecs = coal->rx_coalesce_usecs; + coal->tx_max_coalesced_frames = coal->rx_max_coalesced_frames; + coal->use_adaptive_rx_coalesce = nic_dev->adaptive_rx_coal; + + coal->pkt_rate_high = (u32)interrupt_info->pkt_rate_high; + coal->rx_coalesce_usecs_high = interrupt_info->rx_usecs_high * + COALESCE_TIMER_CFG_UNIT; + coal->rx_max_coalesced_frames_high = + interrupt_info->rx_pending_limt_high * + COALESCE_PENDING_LIMIT_UNIT; + + coal->pkt_rate_low = (u32)interrupt_info->pkt_rate_low; + coal->rx_coalesce_usecs_low = interrupt_info->rx_usecs_low * + COALESCE_TIMER_CFG_UNIT; + coal->rx_max_coalesced_frames_low = + interrupt_info->rx_pending_limt_low * + COALESCE_PENDING_LIMIT_UNIT; + + return 0; +} + +static int set_queue_coalesce(struct hinic_nic_dev *nic_dev, u16 q_id, + struct hinic_intr_coal_info *coal) +{ + struct hinic_intr_coal_info *intr_coal; + struct nic_interrupt_info interrupt_info = {0}; + struct net_device *netdev = nic_dev->netdev; + int err; + + intr_coal = &nic_dev->intr_coalesce[q_id]; + if (intr_coal->coalesce_timer_cfg != coal->coalesce_timer_cfg || + intr_coal->pending_limt != coal->pending_limt) + intr_coal->user_set_intr_coal_flag = 1; + + intr_coal->coalesce_timer_cfg = coal->coalesce_timer_cfg; + intr_coal->pending_limt = coal->pending_limt; + intr_coal->pkt_rate_low = coal->pkt_rate_low; + intr_coal->rx_usecs_low = coal->rx_usecs_low; + intr_coal->rx_pending_limt_low = coal->rx_pending_limt_low; + intr_coal->pkt_rate_high = coal->pkt_rate_high; + intr_coal->rx_usecs_high = coal->rx_usecs_high; + intr_coal->rx_pending_limt_high = coal->rx_pending_limt_high; + + /* netdev not running or qp not in using, + * don't need to set coalesce to hw + */ + if (!test_bit(HINIC_INTF_UP, &nic_dev->flags) || + q_id >= nic_dev->num_qps || nic_dev->adaptive_rx_coal) + return 0; + + interrupt_info.msix_index = nic_dev->irq_cfg[q_id].msix_entry_idx; + interrupt_info.lli_set = 0; + interrupt_info.interrupt_coalesc_set = 1; + interrupt_info.coalesc_timer_cfg = intr_coal->coalesce_timer_cfg; + interrupt_info.pending_limt = intr_coal->pending_limt; + interrupt_info.resend_timer_cfg = intr_coal->resend_timer_cfg; + nic_dev->rxqs[q_id].last_coalesc_timer_cfg = + intr_coal->coalesce_timer_cfg; + nic_dev->rxqs[q_id].last_pending_limt = intr_coal->pending_limt; + err = hinic_set_interrupt_cfg(nic_dev->hwdev, interrupt_info); + if (err) + nicif_warn(nic_dev, drv, netdev, + "Failed to set queue%d coalesce", q_id); + + return err; +} + +static int is_coalesce_legal(struct net_device *netdev, + const struct ethtool_coalesce *coal) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct ethtool_coalesce tmp_coal = {0}; + + if (coal->rx_coalesce_usecs != coal->tx_coalesce_usecs) { + nicif_err(nic_dev, drv, netdev, + "tx-usecs must be equal to rx-usecs\n"); + return -EINVAL; + } + + if (coal->rx_max_coalesced_frames != coal->tx_max_coalesced_frames) { + nicif_err(nic_dev, drv, netdev, + "tx-frames must be equal to rx-frames\n"); + return -EINVAL; + } + + tmp_coal.cmd = coal->cmd; + tmp_coal.rx_coalesce_usecs = coal->rx_coalesce_usecs; + tmp_coal.rx_max_coalesced_frames = coal->rx_max_coalesced_frames; + tmp_coal.tx_coalesce_usecs = coal->tx_coalesce_usecs; + tmp_coal.tx_max_coalesced_frames = coal->tx_max_coalesced_frames; + tmp_coal.use_adaptive_rx_coalesce = coal->use_adaptive_rx_coalesce; + + tmp_coal.pkt_rate_low = coal->pkt_rate_low; + tmp_coal.rx_coalesce_usecs_low = coal->rx_coalesce_usecs_low; + tmp_coal.rx_max_coalesced_frames_low = + coal->rx_max_coalesced_frames_low; + + tmp_coal.pkt_rate_high = coal->pkt_rate_high; + tmp_coal.rx_coalesce_usecs_high = coal->rx_coalesce_usecs_high; + tmp_coal.rx_max_coalesced_frames_high = + coal->rx_max_coalesced_frames_high; + + if (memcmp(coal, &tmp_coal, sizeof(struct ethtool_coalesce))) { + nicif_err(nic_dev, drv, netdev, + "Only support to change rx/tx-usecs and rx/tx-frames\n"); + return -EOPNOTSUPP; + } + + if (coal->rx_coalesce_usecs > COALESCE_MAX_TIMER_CFG) { + nicif_err(nic_dev, drv, netdev, + "rx_coalesce_usecs out of range[%d-%d]\n", 0, + COALESCE_MAX_TIMER_CFG); + return -EOPNOTSUPP; + } + + if (coal->rx_max_coalesced_frames > COALESCE_MAX_PENDING_LIMIT) { + nicif_err(nic_dev, drv, netdev, + "rx_max_coalesced_frames out of range[%d-%d]\n", 0, + COALESCE_MAX_PENDING_LIMIT); + return -EOPNOTSUPP; + } + + if (coal->rx_coalesce_usecs_low > COALESCE_MAX_TIMER_CFG) { + nicif_err(nic_dev, drv, netdev, + "rx_coalesce_usecs_low out of range[%d-%d]\n", 0, + COALESCE_MAX_TIMER_CFG); + return -EOPNOTSUPP; + } + + if (coal->rx_max_coalesced_frames_low > COALESCE_MAX_PENDING_LIMIT) { + nicif_err(nic_dev, drv, netdev, + "rx_max_coalesced_frames_low out of range[%d-%d]\n", + 0, COALESCE_MAX_PENDING_LIMIT); + return -EOPNOTSUPP; + } + + if (coal->rx_coalesce_usecs_high > COALESCE_MAX_TIMER_CFG) { + nicif_err(nic_dev, drv, netdev, + "rx_coalesce_usecs_high out of range[%d-%d]\n", 0, + COALESCE_MAX_TIMER_CFG); + return -EOPNOTSUPP; + } + + if (coal->rx_max_coalesced_frames_high > COALESCE_MAX_PENDING_LIMIT) { + nicif_err(nic_dev, drv, netdev, + "rx_max_coalesced_frames_high out of range[%d-%d]\n", + 0, COALESCE_MAX_PENDING_LIMIT); + return -EOPNOTSUPP; + } + + if (coal->rx_coalesce_usecs_low / COALESCE_TIMER_CFG_UNIT >= + coal->rx_coalesce_usecs_high / COALESCE_TIMER_CFG_UNIT) { + nicif_err(nic_dev, drv, netdev, + "coalesce_usecs_high(%u) must more than coalesce_usecs_low(%u), after dividing %d usecs unit\n", + coal->rx_coalesce_usecs_high, + coal->rx_coalesce_usecs_low, + COALESCE_TIMER_CFG_UNIT); + return -EOPNOTSUPP; + } + + if (coal->rx_max_coalesced_frames_low / COALESCE_PENDING_LIMIT_UNIT >= + coal->rx_max_coalesced_frames_high / COALESCE_PENDING_LIMIT_UNIT) { + nicif_err(nic_dev, drv, netdev, + "coalesced_frames_high(%u) must more than coalesced_frames_low(%u), after dividing %d frames unit\n", + coal->rx_max_coalesced_frames_high, + coal->rx_max_coalesced_frames_low, + COALESCE_PENDING_LIMIT_UNIT); + return -EOPNOTSUPP; + } + + if (coal->pkt_rate_low >= coal->pkt_rate_high) { + nicif_err(nic_dev, drv, netdev, + "pkt_rate_high(%u) must more than pkt_rate_low(%u)\n", + coal->pkt_rate_high, + coal->pkt_rate_low); + return -EOPNOTSUPP; + } + + return 0; +} + +#define CHECK_COALESCE_ALIGN(coal, item, unit) \ +do { \ + if ((coal)->item % (unit)) \ + nicif_warn(nic_dev, drv, netdev, \ + "%s in %d units, change to %d\n", \ + #item, (unit), ALIGN_DOWN((coal)->item, unit));\ +} while (0) + +#define CHECK_COALESCE_CHANGED(coal, item, unit, ori_val, obj_str) \ +do { \ + if (((coal)->item / (unit)) != (ori_val)) \ + nicif_info(nic_dev, drv, netdev, \ + "Change %s from %d to %d %s\n", \ + #item, (ori_val) * (unit), \ + ALIGN_DOWN((coal)->item, unit), (obj_str));\ +} while (0) + +#define CHECK_PKT_RATE_CHANGED(coal, item, ori_val, obj_str) \ +do { \ + if ((coal)->item != (ori_val)) \ + nicif_info(nic_dev, drv, netdev, \ + "Change %s from %llu to %u %s\n", \ + #item, (ori_val), (coal)->item, (obj_str));\ +} while (0) + +static int __hinic_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, u16 queue) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic_intr_coal_info intr_coal = {0}; + struct hinic_intr_coal_info *ori_intr_coal; + char obj_str[OBJ_STR_MAX_LEN] = {0}; + u16 i; + int err = 0; + + err = is_coalesce_legal(netdev, coal); + if (err) + return err; + + CHECK_COALESCE_ALIGN(coal, rx_coalesce_usecs, COALESCE_TIMER_CFG_UNIT); + CHECK_COALESCE_ALIGN(coal, rx_max_coalesced_frames, + COALESCE_PENDING_LIMIT_UNIT); + CHECK_COALESCE_ALIGN(coal, rx_coalesce_usecs_high, + COALESCE_TIMER_CFG_UNIT); + CHECK_COALESCE_ALIGN(coal, rx_max_coalesced_frames_high, + COALESCE_PENDING_LIMIT_UNIT); + CHECK_COALESCE_ALIGN(coal, rx_coalesce_usecs_low, + COALESCE_TIMER_CFG_UNIT); + CHECK_COALESCE_ALIGN(coal, rx_max_coalesced_frames_low, + COALESCE_PENDING_LIMIT_UNIT); + + if (queue == COALESCE_ALL_QUEUE) { + ori_intr_coal = &nic_dev->intr_coalesce[0]; + err = snprintf(obj_str, sizeof(obj_str), "for netdev"); + if (err <= 0 || err >= OBJ_STR_MAX_LEN) { + nicif_err(nic_dev, drv, netdev, + "Failed to snprintf string, function return(%d) and dest_len(%d)\n", + err, OBJ_STR_MAX_LEN); + return -EFAULT; + } + } else { + ori_intr_coal = &nic_dev->intr_coalesce[queue]; + err = snprintf(obj_str, sizeof(obj_str), "for queue %d", queue); + if (err <= 0 || err >= OBJ_STR_MAX_LEN) { + nicif_err(nic_dev, drv, netdev, + "Failed to snprintf string, function return(%d) and dest_len(%d)\n", + err, OBJ_STR_MAX_LEN); + return -EFAULT; + } + } + CHECK_COALESCE_CHANGED(coal, rx_coalesce_usecs, COALESCE_TIMER_CFG_UNIT, + ori_intr_coal->coalesce_timer_cfg, obj_str); + CHECK_COALESCE_CHANGED(coal, rx_max_coalesced_frames, + COALESCE_PENDING_LIMIT_UNIT, + ori_intr_coal->pending_limt, obj_str); + CHECK_PKT_RATE_CHANGED(coal, pkt_rate_high, + ori_intr_coal->pkt_rate_high, obj_str); + CHECK_COALESCE_CHANGED(coal, rx_coalesce_usecs_high, + COALESCE_TIMER_CFG_UNIT, + ori_intr_coal->rx_usecs_high, obj_str); + CHECK_COALESCE_CHANGED(coal, rx_max_coalesced_frames_high, + COALESCE_PENDING_LIMIT_UNIT, + ori_intr_coal->rx_pending_limt_high, obj_str); + CHECK_PKT_RATE_CHANGED(coal, pkt_rate_low, + ori_intr_coal->pkt_rate_low, obj_str); + CHECK_COALESCE_CHANGED(coal, rx_coalesce_usecs_low, + COALESCE_TIMER_CFG_UNIT, + ori_intr_coal->rx_usecs_low, obj_str); + CHECK_COALESCE_CHANGED(coal, rx_max_coalesced_frames_low, + COALESCE_PENDING_LIMIT_UNIT, + ori_intr_coal->rx_pending_limt_low, obj_str); + + intr_coal.coalesce_timer_cfg = + (u8)(coal->rx_coalesce_usecs / COALESCE_TIMER_CFG_UNIT); + intr_coal.pending_limt = (u8)(coal->rx_max_coalesced_frames / + COALESCE_PENDING_LIMIT_UNIT); + + nic_dev->adaptive_rx_coal = coal->use_adaptive_rx_coalesce; + + intr_coal.pkt_rate_high = coal->pkt_rate_high; + intr_coal.rx_usecs_high = + (u8)(coal->rx_coalesce_usecs_high / COALESCE_TIMER_CFG_UNIT); + intr_coal.rx_pending_limt_high = + (u8)(coal->rx_max_coalesced_frames_high / + COALESCE_PENDING_LIMIT_UNIT); + + intr_coal.pkt_rate_low = coal->pkt_rate_low; + intr_coal.rx_usecs_low = + (u8)(coal->rx_coalesce_usecs_low / COALESCE_TIMER_CFG_UNIT); + intr_coal.rx_pending_limt_low = + (u8)(coal->rx_max_coalesced_frames_low / + COALESCE_PENDING_LIMIT_UNIT); + + /* coalesce timer or pending set to zero will disable coalesce */ + if (!nic_dev->adaptive_rx_coal && + (!intr_coal.coalesce_timer_cfg || !intr_coal.pending_limt)) + nicif_warn(nic_dev, drv, netdev, "Coalesce will be disabled\n"); + + if (queue == COALESCE_ALL_QUEUE) { + for (i = 0; i < nic_dev->max_qps; i++) + set_queue_coalesce(nic_dev, i, &intr_coal); + } else { + if (queue >= nic_dev->num_qps) { + nicif_err(nic_dev, drv, netdev, + "Invalid queue_id: %d\n", queue); + return -EINVAL; + } + set_queue_coalesce(nic_dev, queue, &intr_coal); + } + + return 0; +} + +static int hinic_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal) +{ + return __hinic_get_coalesce(netdev, coal, COALESCE_ALL_QUEUE); +} + +static int hinic_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal) +{ + return __hinic_set_coalesce(netdev, coal, COALESCE_ALL_QUEUE); +} + +static int hinic_get_per_queue_coalesce(struct net_device *netdev, u32 queue, + struct ethtool_coalesce *coal) +{ + return __hinic_get_coalesce(netdev, coal, queue); +} + +static int hinic_set_per_queue_coalesce(struct net_device *netdev, u32 queue, + struct ethtool_coalesce *coal) +{ + return __hinic_set_coalesce(netdev, coal, queue); +} + +static void get_drv_queue_stats(struct hinic_nic_dev *nic_dev, u64 *data) +{ + struct hinic_txq_stats txq_stats; + struct hinic_rxq_stats rxq_stats; + u16 i = 0, j = 0, qid = 0; + char *p; + + for (qid = 0; qid < nic_dev->num_qps; qid++) { + if (!nic_dev->txqs) + break; + + hinic_txq_get_stats(&nic_dev->txqs[qid], &txq_stats); + for (j = 0; j < ARRAY_LEN(hinic_tx_queue_stats); j++, i++) { + p = (char *)(&txq_stats) + + hinic_tx_queue_stats[j].offset; + data[i] = (hinic_tx_queue_stats[j].size == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + } + + for (qid = 0; qid < nic_dev->num_qps; qid++) { + if (!nic_dev->rxqs) + break; + + hinic_rxq_get_stats(&nic_dev->rxqs[qid], &rxq_stats); + for (j = 0; j < ARRAY_LEN(hinic_rx_queue_stats); j++, i++) { + p = (char *)(&rxq_stats) + + hinic_rx_queue_stats[j].offset; + data[i] = (hinic_rx_queue_stats[j].size == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + } +} + +static void hinic_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct rtnl_link_stats64 temp; + const struct rtnl_link_stats64 *net_stats; + struct hinic_phy_port_stats *port_stats; + struct hinic_nic_stats *nic_stats; + struct hinic_vport_stats vport_stats = {0}; + u16 i = 0, j = 0; + char *p; + int err; + + net_stats = dev_get_stats(netdev, &temp); + for (j = 0; j < ARRAY_LEN(hinic_netdev_stats); j++, i++) { + p = (char *)(net_stats) + hinic_netdev_stats[j].offset; + data[i] = (hinic_netdev_stats[j].size == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + nic_stats = &nic_dev->stats; + for (j = 0; j < ARRAY_LEN(hinic_nic_dev_stats); j++, i++) { + p = (char *)(nic_stats) + hinic_nic_dev_stats[j].offset; + data[i] = (hinic_nic_dev_stats[j].size == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + if (!HINIC_FUNC_IS_VF(nic_dev->hwdev)) { + err = hinic_get_vport_stats(nic_dev->hwdev, &vport_stats); + if (err) + nicif_err(nic_dev, drv, netdev, + "Failed to get function stats from fw\n"); + + for (j = 0; j < ARRAY_LEN(hinic_function_stats); j++, i++) { + p = (char *)(&vport_stats) + + hinic_function_stats[j].offset; + data[i] = (hinic_function_stats[j].size == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + } + + if (!HINIC_FUNC_IS_VF(nic_dev->hwdev) && + FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) { + port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL); + if (!port_stats) { + nicif_err(nic_dev, drv, netdev, + "Failed to malloc port stats\n"); + memset(&data[i], 0, + ARRAY_LEN(hinic_port_stats) * sizeof(*data)); + i += ARRAY_LEN(hinic_port_stats); + goto get_drv_stats; + } + + err = hinic_get_phy_port_stats(nic_dev->hwdev, port_stats); + if (err) + nicif_err(nic_dev, drv, netdev, + "Failed to get port stats from fw\n"); + + for (j = 0; j < ARRAY_LEN(hinic_port_stats); j++, i++) { + p = (char *)(port_stats) + hinic_port_stats[j].offset; + data[i] = (hinic_port_stats[j].size == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + kfree(port_stats); + } + +get_drv_stats: + get_drv_queue_stats(nic_dev, data + i); +} + +static void hinic_get_strings(struct net_device *netdev, + u32 stringset, u8 *data) +{ + u16 i = 0, j = 0; + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + char *p = (char *)data; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *hinic_test_strings, sizeof(hinic_test_strings)); + return; + case ETH_SS_STATS: + for (i = 0; i < ARRAY_LEN(hinic_netdev_stats); i++) { + memcpy(p, hinic_netdev_stats[i].name, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + + for (i = 0; i < ARRAY_LEN(hinic_nic_dev_stats); i++) { + memcpy(p, hinic_nic_dev_stats[i].name, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + + if (!HINIC_FUNC_IS_VF(nic_dev->hwdev)) { + for (i = 0; i < ARRAY_LEN(hinic_function_stats); i++) { + memcpy(p, hinic_function_stats[i].name, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + } + + if (!HINIC_FUNC_IS_VF(nic_dev->hwdev) && + FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) { + for (i = 0; i < ARRAY_LEN(hinic_port_stats); i++) { + memcpy(p, hinic_port_stats[i].name, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + } + + for (i = 0; i < nic_dev->num_qps; i++) { + for (j = 0; j < ARRAY_LEN(hinic_tx_queue_stats); j++) { + sprintf(p, hinic_tx_queue_stats[j].name, i); + p += ETH_GSTRING_LEN; + } + } + + for (i = 0; i < nic_dev->num_qps; i++) { + for (j = 0; j < ARRAY_LEN(hinic_rx_queue_stats); j++) { + sprintf(p, hinic_rx_queue_stats[j].name, i); + p += ETH_GSTRING_LEN; + } + } + + return; + default: + nicif_err(nic_dev, drv, netdev, + "Invalid string set %d", stringset); + return; + } +} + +static int hinic_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u8 port; + int err; + + if (!FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) { + nicif_err(nic_dev, drv, netdev, "Current function don't support to set LED status\n"); + return -EOPNOTSUPP; + } + + port = hinic_physical_port_id(nic_dev->hwdev); + + switch (state) { + case ETHTOOL_ID_ACTIVE: + err = hinic_set_led_status(nic_dev->hwdev, port, + HINIC_LED_TYPE_LINK, + HINIC_LED_MODE_FORCE_2HZ); + if (err) + nicif_err(nic_dev, drv, netdev, + "Set LED blinking in 2HZ failed\n"); + else + nicif_info(nic_dev, drv, netdev, + "Set LED blinking in 2HZ success\n"); + break; + + case ETHTOOL_ID_INACTIVE: + err = hinic_reset_led_status(nic_dev->hwdev, port); + if (err) + nicif_err(nic_dev, drv, netdev, + "Reset LED to original status failed\n"); + else + nicif_info(nic_dev, drv, netdev, + "Reset LED to original status success\n"); + + break; + + default: + return -EOPNOTSUPP; + } + + if (err) + return -EFAULT; + else + return 0; +} + +static void hinic_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct nic_pause_config nic_pause = {0}; + int err; + + err = hinic_get_pause_info(nic_dev->hwdev, &nic_pause); + if (err) { + nicif_err(nic_dev, drv, netdev, + "Failed to get pauseparam from hw\n"); + } else { + pause->autoneg = nic_pause.auto_neg; + pause->rx_pause = nic_pause.rx_pause; + pause->tx_pause = nic_pause.tx_pause; + } +} + +static int hinic_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct nic_pause_config nic_pause = {0}; + struct nic_port_info port_info = {0}; + int err; + + if (!FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) { + nicif_err(nic_dev, drv, netdev, "Not support to set pause parameters\n"); + return -EOPNOTSUPP; + } + + err = hinic_get_port_info(nic_dev->hwdev, &port_info); + if (err) { + nicif_err(nic_dev, drv, netdev, + "Failed to get auto-negotiation state\n"); + return -EFAULT; + } + + if (pause->autoneg != port_info.autoneg_state) { + nicif_err(nic_dev, drv, netdev, + "To change autoneg please use: ethtool -s autoneg \n"); + return -EOPNOTSUPP; + } + + nic_pause.auto_neg = pause->autoneg; + nic_pause.rx_pause = pause->rx_pause; + nic_pause.tx_pause = pause->tx_pause; + + err = hinic_set_pause_info(nic_dev->hwdev, nic_pause); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to set pauseparam\n"); + return err; + } + + nicif_info(nic_dev, drv, netdev, "Set pause options, tx: %s, rx: %s\n", + pause->tx_pause ? "on" : "off", + pause->rx_pause ? "on" : "off"); + + return 0; +} + +static int hinic_run_lp_test(struct hinic_nic_dev *nic_dev, u32 test_time) +{ + u32 i; + u8 j; + u32 cnt = test_time * 5; + struct sk_buff *skb = NULL; + struct sk_buff *skb_tmp = NULL; + u8 *test_data = NULL; + u8 *lb_test_rx_buf = nic_dev->lb_test_rx_buf; + struct net_device *netdev = nic_dev->netdev; + + skb_tmp = alloc_skb(LP_PKT_LEN, GFP_ATOMIC); + if (!skb_tmp) { + nicif_err(nic_dev, drv, netdev, + "Alloc xmit skb template failed for loopback test\n"); + return -ENOMEM; + } + + test_data = __skb_put(skb_tmp, LP_PKT_LEN); + + memset(test_data, 0xFF, (2 * ETH_ALEN)); + test_data[ETH_ALEN] = 0xFE; + test_data[2 * ETH_ALEN] = 0x08; + test_data[2 * ETH_ALEN + 1] = 0x0; + + for (i = ETH_HLEN; i < LP_PKT_LEN; i++) + test_data[i] = i & 0xFF; + + skb_tmp->queue_mapping = 0; + skb_tmp->ip_summed = CHECKSUM_COMPLETE; + skb_tmp->dev = netdev; + + for (i = 0; i < cnt; i++) { + nic_dev->lb_test_rx_idx = 0; + memset(lb_test_rx_buf, 0, (LP_PKT_CNT * LP_PKT_LEN)); + + for (j = 0; j < LP_PKT_CNT; j++) { + skb = pskb_copy(skb_tmp, GFP_ATOMIC); + if (!skb) { + dev_kfree_skb_any(skb_tmp); + nicif_err(nic_dev, drv, netdev, + "Copy skb failed for loopback test\n"); + return -ENOMEM; + } + + /* mark index for every pkt */ + skb->data[LP_PKT_LEN - 1] = j; + + if (hinic_lb_xmit_frame(skb, netdev)) { + dev_kfree_skb_any(skb); + dev_kfree_skb_any(skb_tmp); + nicif_err(nic_dev, drv, netdev, + "Xmit pkt failed for loopback test\n"); + return -EBUSY; + } + } + + /* wait till all pkt received to rx buffer */ + msleep(200); + + for (j = 0; j < LP_PKT_CNT; j++) { + if (memcmp((lb_test_rx_buf + (j * LP_PKT_LEN)), + skb_tmp->data, (LP_PKT_LEN - 1)) || + (*(lb_test_rx_buf + ((j * LP_PKT_LEN) + + (LP_PKT_LEN - 1))) != j)) { + dev_kfree_skb_any(skb_tmp); + nicif_err(nic_dev, drv, netdev, + "Compare pkt failed in loopback test(index=0x%02x, data[%d]=0x%02x)\n", + (j + (i * LP_PKT_CNT)), + (LP_PKT_LEN - 1), + *(lb_test_rx_buf + + (((j * LP_PKT_LEN) + + (LP_PKT_LEN - 1))))); + return -EIO; + } + } + } + + dev_kfree_skb_any(skb_tmp); + nicif_info(nic_dev, drv, netdev, "Loopback test succeed\n"); + return 0; +} + +void hinic_lp_test(struct net_device *netdev, struct ethtool_test *eth_test, + u64 *data, u32 test_time) +{ + int err = 0; + u8 link_status = 0; + u8 *lb_test_rx_buf = NULL; + struct ethtool_test test = {0}; + enum diag_test_index test_index = 0; + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + memset(data, 0, (DIAG_TEST_MAX * sizeof(u64))); + + /* Do not support loopback test when netdev is closed. */ + if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) { + nicif_err(nic_dev, drv, netdev, + "Do not support loopback test when netdev is closed.\n"); + eth_test->flags |= ETH_TEST_FL_FAILED; + data[PORT_DOWN_ERR_IDX] = 1; + return; + } + + test.flags = eth_test->flags; + + if (test_time == 0) + test_time = LP_DEFAULT_TIME; + + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + if (!(test.flags & ETH_TEST_FL_EXTERNAL_LB)) { + test_index = INTERNAL_LP_TEST; + + if (hinic_set_loopback_mode(nic_dev->hwdev, true)) { + nicif_err(nic_dev, drv, netdev, + "Failed to set port loopback mode before loopback test\n"); + err = 1; + goto resume_link; + } + } else { + test_index = EXTERNAL_LP_TEST; + } + + lb_test_rx_buf = vmalloc(LP_PKT_CNT * LP_PKT_LEN); + if (!lb_test_rx_buf) { + nicif_err(nic_dev, drv, netdev, + "Failed to alloc rx buffer for loopback test\n"); + err = 1; + } else { + nic_dev->lb_test_rx_buf = lb_test_rx_buf; + nic_dev->lb_pkt_len = LP_PKT_LEN; + set_bit(HINIC_LP_TEST, &nic_dev->flags); + + if (hinic_run_lp_test(nic_dev, test_time)) + err = 1; + + clear_bit(HINIC_LP_TEST, &nic_dev->flags); + msleep(100); + vfree(lb_test_rx_buf); + nic_dev->lb_test_rx_buf = NULL; + } + + if (!(test.flags & ETH_TEST_FL_EXTERNAL_LB)) { + if (hinic_set_loopback_mode(nic_dev->hwdev, false)) { + nicif_err(nic_dev, drv, netdev, + "Failed to cancel port loopback mode after loopback test\n"); + err = 1; + + goto resume_link; + } + } + +resume_link: + if (err) { + eth_test->flags |= ETH_TEST_FL_FAILED; + data[test_index] = 1; + } + + netif_tx_wake_all_queues(netdev); + err = hinic_get_link_state(nic_dev->hwdev, &link_status); + if (!err && link_status) + netif_carrier_on(netdev); +} + +static void hinic_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + if (!FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) { + nicif_err(nic_dev, drv, netdev, "Current function don't support self test\n"); + return; + } + + hinic_lp_test(netdev, eth_test, data, 0); +} + +static int hinic_get_module_info(struct net_device *netdev, + struct ethtool_modinfo *modinfo) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u8 sfp_type; + u8 sfp_type_ext; + int err; + + err = hinic_get_sfp_type(nic_dev->hwdev, &sfp_type, &sfp_type_ext); + if (err) + return err; + + switch (sfp_type) { + case MODULE_TYPE_SFP: + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + break; + case MODULE_TYPE_QSFP: + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = STD_SFP_INFO_MAX_SIZE; + break; + case MODULE_TYPE_QSFP_PLUS: + if (sfp_type_ext >= 0x3) { + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = STD_SFP_INFO_MAX_SIZE; + + } else { + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = STD_SFP_INFO_MAX_SIZE; + } + break; + case MODULE_TYPE_QSFP28: + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = STD_SFP_INFO_MAX_SIZE; + break; + default: + nicif_warn(nic_dev, drv, netdev, + "Optical module unknown: 0x%x\n", sfp_type); + return -EINVAL; + } + + return 0; +} + +static int hinic_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u8 sfp_data[STD_SFP_INFO_MAX_SIZE]; + u16 len; + int err; + + if (!ee->len || ((ee->len + ee->offset) > STD_SFP_INFO_MAX_SIZE)) + return -EINVAL; + + memset(data, 0, ee->len); + + err = hinic_get_sfp_eeprom(nic_dev->hwdev, sfp_data, &len); + if (err) + return err; + + memcpy(data, sfp_data + ee->offset, ee->len); + + return 0; +} + +static int set_l4_rss_hash_ops(struct ethtool_rxnfc *cmd, + struct nic_rss_type *rss_type) +{ + u8 rss_l4_en = 0; + + switch (cmd->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + rss_l4_en = 0; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + rss_l4_en = 1; + break; + default: + return -EINVAL; + } + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + rss_type->tcp_ipv4 = rss_l4_en; + break; + case TCP_V6_FLOW: + rss_type->tcp_ipv6 = rss_l4_en; + break; + case UDP_V4_FLOW: + rss_type->udp_ipv4 = rss_l4_en; + break; + case UDP_V6_FLOW: + rss_type->udp_ipv6 = rss_l4_en; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int hinic_set_rss_hash_opts(struct hinic_nic_dev *nic_dev, + struct ethtool_rxnfc *cmd) +{ + struct nic_rss_type *rss_type = &nic_dev->rss_type; + int err; + + if (!test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) { + cmd->data = 0; + nicif_err(nic_dev, drv, nic_dev->netdev, + "RSS is disable, not support to set flow-hash\n"); + return -EOPNOTSUPP; + } + + /* RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (cmd->data & ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | + RXH_L4_B_2_3)) + return -EINVAL; + + /* We need at least the IP SRC and DEST fields for hashing */ + if (!(cmd->data & RXH_IP_SRC) || !(cmd->data & RXH_IP_DST)) + return -EINVAL; + + err = hinic_get_rss_type(nic_dev->hwdev, + nic_dev->rss_tmpl_idx, rss_type); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to get rss type\n"); + return -EFAULT; + } + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + case UDP_V4_FLOW: + case UDP_V6_FLOW: + err = set_l4_rss_hash_ops(cmd, rss_type); + if (err) + return err; + + break; + case IPV4_FLOW: + rss_type->ipv4 = 1; + break; + case IPV6_FLOW: + rss_type->ipv6 = 1; + break; + default: + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unsupported flow type\n"); + return -EINVAL; + } + + err = hinic_set_rss_type(nic_dev->hwdev, nic_dev->rss_tmpl_idx, + *rss_type); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to set rss type\n"); + return -EFAULT; + } + + nicif_info(nic_dev, drv, nic_dev->netdev, "Set rss hash options success\n"); + + return 0; +} + +static int hinic_get_rss_hash_opts(struct hinic_nic_dev *nic_dev, + struct ethtool_rxnfc *cmd) +{ + struct nic_rss_type rss_type = {0}; + int err; + + cmd->data = 0; + + if (!test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) + return 0; + + err = hinic_get_rss_type(nic_dev->hwdev, nic_dev->rss_tmpl_idx, + &rss_type); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to get rss type\n"); + return err; + } + + cmd->data = RXH_IP_SRC | RXH_IP_DST; + switch (cmd->flow_type) { + case TCP_V4_FLOW: + if (rss_type.tcp_ipv4) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case TCP_V6_FLOW: + if (rss_type.tcp_ipv6) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case UDP_V4_FLOW: + if (rss_type.udp_ipv4) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case UDP_V6_FLOW: + if (rss_type.udp_ipv6) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case IPV4_FLOW: + case IPV6_FLOW: + break; + default: + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unsupported flow type\n"); + cmd->data = 0; + return -EINVAL; + } + + return 0; +} + + static int hinic_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + int err = 0; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = nic_dev->num_qps; + break; + case ETHTOOL_GRXFH: + err = hinic_get_rss_hash_opts(nic_dev, cmd); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int hinic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + int err = 0; + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + err = hinic_set_rss_hash_opts(nic_dev, cmd); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static u32 hinic_get_rxfh_indir_size(struct net_device *netdev) +{ + return HINIC_RSS_INDIR_SIZE; +} + +static int __set_rss_rxfh(struct net_device *netdev, + const u32 *indir, const u8 *key) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + int err, i; + + if (indir) { + if (!nic_dev->rss_indir_user) { + nic_dev->rss_indir_user = + kzalloc(sizeof(u32) * HINIC_RSS_INDIR_SIZE, + GFP_KERNEL); + if (!nic_dev->rss_indir_user) { + nicif_err(nic_dev, drv, netdev, + "Failed to alloc memory for rss_indir_usr\n"); + return -ENOMEM; + } + } + + memcpy(nic_dev->rss_indir_user, indir, + sizeof(u32) * HINIC_RSS_INDIR_SIZE); + + err = hinic_rss_set_indir_tbl(nic_dev->hwdev, + nic_dev->rss_tmpl_idx, indir); + if (err) { + nicif_err(nic_dev, drv, netdev, + "Failed to set rss indir table\n"); + return -EFAULT; + } + + nicif_info(nic_dev, drv, netdev, "Change rss indir success\n"); + } + + if (key) { + if (!nic_dev->rss_hkey_user) { + /* We request double spaces for the hash key, + * the second one holds the key of Big Edian + * format. + */ + nic_dev->rss_hkey_user = + kzalloc(HINIC_RSS_KEY_SIZE * 2, GFP_KERNEL); + + if (!nic_dev->rss_hkey_user) { + nicif_err(nic_dev, drv, netdev, + "Failed to alloc memory for rss_hkey_user\n"); + return -ENOMEM; + } + + /* The second space is for big edian hash key */ + nic_dev->rss_hkey_user_be = + (u32 *)(nic_dev->rss_hkey_user + + HINIC_RSS_KEY_SIZE); + } + + memcpy(nic_dev->rss_hkey_user, key, HINIC_RSS_KEY_SIZE); + + /* make a copy of the key, and convert it to Big Endian */ + memcpy(nic_dev->rss_hkey_user_be, key, HINIC_RSS_KEY_SIZE); + for (i = 0; i < HINIC_RSS_KEY_SIZE / 4; i++) + nic_dev->rss_hkey_user_be[i] = + cpu_to_be32(nic_dev->rss_hkey_user_be[i]); + + err = hinic_rss_set_template_tbl(nic_dev->hwdev, + nic_dev->rss_tmpl_idx, key); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to set rss key\n"); + return -EFAULT; + } + + nicif_info(nic_dev, drv, netdev, "Change rss key success\n"); + } + + return 0; +} + +static u32 hinic_get_rxfh_key_size(struct net_device *netdev) +{ + return HINIC_RSS_KEY_SIZE; +} + +static int hinic_get_rxfh(struct net_device *netdev, + u32 *indir, u8 *key, u8 *hfunc) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + int err = 0; + + if (!test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) + return -EOPNOTSUPP; + + if (hfunc) { + u8 hash_engine_type = 0; + + err = hinic_rss_get_hash_engine(nic_dev->hwdev, + nic_dev->rss_tmpl_idx, + &hash_engine_type); + if (err) + return -EFAULT; + + *hfunc = hash_engine_type ? ETH_RSS_HASH_TOP : ETH_RSS_HASH_XOR; + } + + if (indir) { + err = hinic_rss_get_indir_tbl(nic_dev->hwdev, + nic_dev->rss_tmpl_idx, indir); + if (err) + return -EFAULT; + } + + if (key) + err = hinic_rss_get_template_tbl(nic_dev->hwdev, + nic_dev->rss_tmpl_idx, key); + + return err; +} + +static int hinic_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + int err = 0; + + if (!test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Not support to set rss parameters when rss is disable\n"); + return -EOPNOTSUPP; + } + + if (test_bit(HINIC_DCB_ENABLE, &nic_dev->flags) && indir) { + nicif_err(nic_dev, drv, netdev, + "Not support to set indir when DCB is enabled\n"); + return -EOPNOTSUPP; + } + + if (hfunc != ETH_RSS_HASH_NO_CHANGE) { + if (hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR) { + nicif_err(nic_dev, drv, netdev, + "Not support to set hfunc type except TOP and XOR\n"); + return -EOPNOTSUPP; + } + + nic_dev->rss_hash_engine = (hfunc == ETH_RSS_HASH_XOR) ? + HINIC_RSS_HASH_ENGINE_TYPE_XOR : + HINIC_RSS_HASH_ENGINE_TYPE_TOEP; + err = hinic_rss_set_hash_engine + (nic_dev->hwdev, nic_dev->rss_tmpl_idx, + nic_dev->rss_hash_engine); + if (err) + return -EFAULT; + + nicif_info(nic_dev, drv, netdev, + "Change hfunc to RSS_HASH_%s success\n", + (hfunc == ETH_RSS_HASH_XOR) ? "XOR" : "TOP"); + } + + err = __set_rss_rxfh(netdev, indir, key); + + return err; +} + +static const struct ethtool_ops hinic_ethtool_ops = { + .get_link_ksettings = hinic_get_link_ksettings, + .set_link_ksettings = hinic_set_link_ksettings, + .get_settings = hinic_get_settings, + .set_settings = hinic_set_settings, + .get_drvinfo = hinic_get_drvinfo, + .get_msglevel = hinic_get_msglevel, + .set_msglevel = hinic_set_msglevel, + .nway_reset = hinic_nway_reset, + .get_link = ethtool_op_get_link, + .get_ringparam = hinic_get_ringparam, + .set_ringparam = hinic_set_ringparam, + .get_pauseparam = hinic_get_pauseparam, + .set_pauseparam = hinic_set_pauseparam, + .get_sset_count = hinic_get_sset_count, + .get_coalesce = hinic_get_coalesce, + .set_coalesce = hinic_set_coalesce, + .get_per_queue_coalesce = hinic_get_per_queue_coalesce, + .set_per_queue_coalesce = hinic_set_per_queue_coalesce, + .get_ethtool_stats = hinic_get_ethtool_stats, + .get_strings = hinic_get_strings, + .set_phys_id = hinic_set_phys_id, + .self_test = hinic_diag_test, + .get_rxnfc = hinic_get_rxnfc, + .set_rxnfc = hinic_set_rxnfc, + .get_channels = hinic_get_channels, + .set_channels = hinic_set_channels, + .get_module_info = hinic_get_module_info, + .get_module_eeprom = hinic_get_module_eeprom, + .get_rxfh_indir_size = hinic_get_rxfh_indir_size, + .get_rxfh_key_size = hinic_get_rxfh_key_size, + .get_rxfh = hinic_get_rxfh, + .set_rxfh = hinic_set_rxfh, +}; + +static const struct ethtool_ops hinicvf_ethtool_ops = { + .get_link_ksettings = hinic_get_link_ksettings, + .get_drvinfo = hinic_get_drvinfo, + .get_msglevel = hinic_get_msglevel, + .set_msglevel = hinic_set_msglevel, + .get_link = ethtool_op_get_link, + .get_ringparam = hinic_get_ringparam, + .set_ringparam = hinic_set_ringparam, + .get_sset_count = hinic_get_sset_count, + .get_coalesce = hinic_get_coalesce, + .set_coalesce = hinic_set_coalesce, + .get_per_queue_coalesce = hinic_get_per_queue_coalesce, + .set_per_queue_coalesce = hinic_set_per_queue_coalesce, + .get_ethtool_stats = hinic_get_ethtool_stats, + .get_strings = hinic_get_strings, + .get_rxnfc = hinic_get_rxnfc, + .set_rxnfc = hinic_set_rxnfc, + + .get_channels = hinic_get_channels, + .set_channels = hinic_set_channels, + .get_rxfh_indir_size = hinic_get_rxfh_indir_size, + .get_rxfh_key_size = hinic_get_rxfh_key_size, + .get_rxfh = hinic_get_rxfh, + .set_rxfh = hinic_set_rxfh, +}; + +void hinic_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &hinic_ethtool_ops); +} + +void hinicvf_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &hinicvf_ethtool_ops); +} /*lint -e766*/ diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw.h b/drivers/net/ethernet/huawei/hinic/hinic_hw.h new file mode 100644 index 0000000000000000000000000000000000000000..9f50bc27a8561d609f2471cb218621f7cd039ee0 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw.h @@ -0,0 +1,770 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_HW_H_ +#define HINIC_HW_H_ + +enum hinic_mod_type { + HINIC_MOD_COMM = 0, /* HW communication module */ + HINIC_MOD_L2NIC = 1, /* L2NIC module */ + HINIC_MOD_ROCE = 2, + HINIC_MOD_IWARP = 3, + HINIC_MOD_TOE = 4, + HINIC_MOD_FLR = 5, + HINIC_MOD_FCOE = 6, + HINIC_MOD_CFGM = 7, /* Configuration module */ + HINIC_MOD_CQM = 8, + HINIC_MOD_VSWITCH = 9, + HINIC_MOD_FC = 10, + HINIC_MOD_OVS = 11, + HINIC_MOD_FIC = 12, + HINIC_MOD_MIGRATE = 13, + HINIC_MOD_HILINK = 14, + HINIC_MOD_HW_MAX = 16, /* hardware max module id */ + + /* Software module id, for PF/VF and multi-host */ + HINIC_MOD_SW_FUNC = 17, + HINIC_MOD_MAX, +}; + +struct hinic_cmd_buf { + void *buf; + dma_addr_t dma_addr; + u16 size; +}; + +enum hinic_ack_type { + HINIC_ACK_TYPE_CMDQ, + HINIC_ACK_TYPE_SHARE_CQN, + HINIC_ACK_TYPE_APP_CQN, + + HINIC_MOD_ACK_MAX = 15, + +}; + +#define HINIC_MGMT_CMD_UNSUPPORTED 0xFF + +int hinic_msg_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout); + +/* for pxe, ovs */ +int hinic_msg_to_mgmt_poll_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout); + +/* PF/VF send msg to uP by api cmd, and return immediately */ +int hinic_msg_to_mgmt_async(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size); + +int hinic_mbox_to_vf(void *hwdev, enum hinic_mod_type mod, + u16 vf_id, u8 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout); + +int hinic_api_cmd_write_nack(void *hwdev, u8 dest, + void *cmd, u16 size); + +int hinic_api_cmd_read_ack(void *hwdev, u8 dest, + void *cmd, u16 size, void *ack, u16 ack_size); +/* PF/VF send cmd to ucode by cmdq, and return if success. + * timeout=0, use default timeout. + */ +int hinic_cmdq_direct_resp(void *hwdev, enum hinic_ack_type ack_type, + enum hinic_mod_type mod, u8 cmd, + struct hinic_cmd_buf *buf_in, + u64 *out_param, u32 timeout); +/* 1. whether need the timeout parameter + * 2. out_param indicates the status of the microcode processing command + */ + +/* PF/VF send cmd to ucode by cmdq, and return detailed result. + * timeout=0, use default timeout. + */ +int hinic_cmdq_detail_resp(void *hwdev, enum hinic_ack_type ack_type, + enum hinic_mod_type mod, u8 cmd, + struct hinic_cmd_buf *buf_in, + struct hinic_cmd_buf *buf_out, u32 timeout); + +/* PF/VF send cmd to ucode by cmdq, and return immediately */ +int hinic_cmdq_async(void *hwdev, enum hinic_ack_type ack_type, + enum hinic_mod_type mod, u8 cmd, + struct hinic_cmd_buf *buf_in); + +int hinic_ppf_tmr_start(void *hwdev); +int hinic_ppf_tmr_stop(void *hwdev); + +/* CLP */ +int hinic_clp_to_mgmt(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +/* FOR windows */ +bool hinic_eq_intr_handler(void *hwdev, int msix_entry_idx); + +enum hinic_ceq_event { + HINIC_NON_L2NIC_SCQ, + HINIC_NON_L2NIC_ECQ, + HINIC_NON_L2NIC_NO_CQ_EQ, + HINIC_CMDQ, + HINIC_L2NIC_SQ, + HINIC_L2NIC_RQ, + HINIC_MAX_CEQ_EVENTS, +}; + +typedef void (*hinic_ceq_event_cb)(void *handle, u32 ceqe_data); +int hinic_ceq_register_cb(void *hwdev, enum hinic_ceq_event event, + hinic_ceq_event_cb callback); +void hinic_ceq_unregister_cb(void *hwdev, enum hinic_ceq_event event); + +enum hinic_aeq_type { + HINIC_HW_INTER_INT = 0, + HINIC_MBX_FROM_FUNC = 1, + HINIC_MSG_FROM_MGMT_CPU = 2, + HINIC_API_RSP = 3, + HINIC_API_CHAIN_STS = 4, + HINIC_MBX_SEND_RSLT = 5, + HINIC_MAX_AEQ_EVENTS +}; + +enum hinic_aeq_sw_type { + HINIC_STATELESS_EVENT = 0, + HINIC_STATEFULL_EVENT = 1, + HINIC_MAX_AEQ_SW_EVENTS +}; + +typedef void (*hinic_aeq_hwe_cb)(void *handle, u8 *data, u8 size); +int hinic_aeq_register_hw_cb(void *hwdev, enum hinic_aeq_type event, + hinic_aeq_hwe_cb hwe_cb); +void hinic_aeq_unregister_hw_cb(void *hwdev, enum hinic_aeq_type event); + +typedef u8 (*hinic_aeq_swe_cb)(void *handle, u8 event, u64 data); +int hinic_aeq_register_swe_cb(void *hwdev, enum hinic_aeq_sw_type event, + hinic_aeq_swe_cb aeq_swe_cb); +void hinic_aeq_unregister_swe_cb(void *hwdev, enum hinic_aeq_sw_type event); + +typedef void (*hinic_mgmt_msg_cb)(void *hwdev, void *pri_handle, + u8 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +int hinic_register_mgmt_msg_cb(void *hwdev, + enum hinic_mod_type mod, void *pri_handle, + hinic_mgmt_msg_cb callback); +void hinic_unregister_mgmt_msg_cb(void *hwdev, enum hinic_mod_type mod); + +struct hinic_cmd_buf *hinic_alloc_cmd_buf(void *hwdev); +void hinic_free_cmd_buf(void *hwdev, struct hinic_cmd_buf *buf); + +int hinic_alloc_db_phy_addr(void *hwdev, u64 *db_base, u64 *dwqe_base); +void hinic_free_db_phy_addr(void *hwdev, u64 db_base, u64 dwqe_base); +int hinic_alloc_db_addr(void *hwdev, void __iomem **db_base, + void __iomem **dwqe_base); +void hinic_free_db_addr(void *hwdev, void __iomem *db_base, + void __iomem *dwqe_base); + +struct nic_interrupt_info { + u32 lli_set; + u32 interrupt_coalesc_set; + u16 msix_index; + u8 lli_credit_limit; + u8 lli_timer_cfg; + u8 pending_limt; + u8 coalesc_timer_cfg; + u8 resend_timer_cfg; +}; + +int hinic_get_interrupt_cfg(void *hwdev, + struct nic_interrupt_info *interrupt_info); +int hinic_set_interrupt_cfg_direct(void *hwdev, + struct nic_interrupt_info *interrupt_info); +int hinic_set_interrupt_cfg(void *hwdev, + struct nic_interrupt_info interrupt_info); + +/* The driver code implementation interface */ +void hinic_misx_intr_clear_resend_bit(void *hwdev, + u16 msix_idx, u8 clear_resend_en); + +struct hinic_sq_attr { + u8 dma_attr_off; + u8 pending_limit; + u8 coalescing_time; + u8 intr_en; + u16 intr_idx; + u32 l2nic_sqn; + u64 ci_dma_base; +}; + +int hinic_set_ci_table(void *hwdev, u16 q_id, struct hinic_sq_attr *attr); + +int hinic_set_root_ctxt(void *hwdev, u16 rq_depth, u16 sq_depth, int rx_buf_sz); +int hinic_clean_root_ctxt(void *hwdev); +void hinic_record_pcie_error(void *hwdev); + +int hinic_func_rx_tx_flush(void *hwdev); + +int hinic_func_tmr_bitmap_set(void *hwdev, bool enable); + +struct hinic_init_para { + /* Record hinic_pcidev or NDIS_Adapter pointer address */ + void *adapter_hdl; + /* Record pcidev or Handler pointer address + * for example: ioremap interface input parameter + */ + void *pcidev_hdl; + /* Record pcidev->dev or Handler pointer address which used to + * dma address application or dev_err print the parameter + */ + void *dev_hdl; + + void *cfg_reg_base; /* Configure virtual address, bar0/1 */ + /* interrupt configuration register address, bar2/3 */ + void *intr_reg_base; + u64 db_base_phy; + void *db_base; /* the doorbell address, bar4/5 higher 4M space */ + void *dwqe_mapping; /* direct wqe 4M, follow the doorbell address */ + void **hwdev; + void *chip_node; + /* In bmgw x86 host, driver can't send message to mgmt cpu directly, + * need to trasmit message ppf mbox to bmgw arm host. + */ + void *ppf_hwdev; +}; + +#define MAX_FUNCTION_NUM 512 +#define HINIC_MAX_PF_NUM 16 +#define HINIC_MAX_COS 8 +#define INIT_FAILED 0 +#define INIT_SUCCESS 1 +#define MAX_DRV_BUF_SIZE 4096 + +struct hinic_cmd_get_light_module_abs { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 port_id; + u8 abs_status; /* 0:present, 1:absent */ + u8 rsv[2]; +}; + +#define MODULE_TYPE_SFP 0x3 +#define MODULE_TYPE_QSFP28 0x11 +#define MODULE_TYPE_QSFP 0x0C +#define MODULE_TYPE_QSFP_PLUS 0x0D + +#define SFP_INFO_MAX_SIZE 512 +struct hinic_cmd_get_sfp_qsfp_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 port_id; + u8 wire_type; + u16 out_len; + u8 sfp_qsfp_info[SFP_INFO_MAX_SIZE]; +}; + +#define STD_SFP_INFO_MAX_SIZE 640 +struct hinic_cmd_get_std_sfp_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 port_id; + u8 wire_type; + u16 eeprom_len; + u32 rsvd; + u8 sfp_info[STD_SFP_INFO_MAX_SIZE]; +}; + +#define HINIC_MAX_PORT_ID 4 + +struct hinic_port_routine_cmd { + int up_send_sfp_info; + int up_send_sfp_abs; + + struct hinic_cmd_get_sfp_qsfp_info sfp_info; + struct hinic_cmd_get_light_module_abs abs; +}; + +struct card_node { + struct list_head node; + struct list_head func_list; + char chip_name[IFNAMSIZ]; + void *log_info; + void *dbgtool_info; + void *func_handle_array[MAX_FUNCTION_NUM]; + unsigned char dp_bus_num; + u8 func_num; + struct attribute dbgtool_attr_file; + + bool cos_up_setted; + u8 cos_up[HINIC_MAX_COS]; + bool ppf_state; + u8 pf_bus_num[HINIC_MAX_PF_NUM]; + bool disable_vf_load[HINIC_MAX_PF_NUM]; + u32 vf_mbx_old_rand_id[MAX_FUNCTION_NUM]; + u32 vf_mbx_rand_id[MAX_FUNCTION_NUM]; + struct hinic_port_routine_cmd rt_cmd[HINIC_MAX_PORT_ID]; + + /* mutex used for copy sfp info */ + struct mutex sfp_mutex; +}; + +enum hinic_hwdev_init_state { + HINIC_HWDEV_NONE_INITED = 0, + HINIC_HWDEV_CLP_INITED, + HINIC_HWDEV_AEQ_INITED, + HINIC_HWDEV_MGMT_INITED, + HINIC_HWDEV_MBOX_INITED, + HINIC_HWDEV_CMDQ_INITED, + HINIC_HWDEV_COMM_CH_INITED, + HINIC_HWDEV_ALL_INITED, + HINIC_HWDEV_MAX_INVAL_INITED +}; + +enum hinic_func_mode { + FUNC_MOD_NORMAL_HOST, + FUNC_MOD_MULTI_BM_MASTER, + FUNC_MOD_MULTI_BM_SLAVE, + FUNC_MOD_MULTI_VM_MASTER, + FUNC_MOD_MULTI_VM_SLAVE, +}; + +enum hinic_func_cap { + /* send message to mgmt cpu directly */ + HINIC_FUNC_MGMT = 1 << 0, + /* setting port attribute, pause/speed etc. */ + HINIC_FUNC_PORT = 1 << 1, + /* Enable SR-IOV in default */ + HINIC_FUNC_SRIOV_EN_DFLT = 1 << 2, + /* Can't change VF num */ + HINIC_FUNC_SRIOV_NUM_FIX = 1 << 3, + /* Fcorce pf/vf link up */ + HINIC_FUNC_FORCE_LINK_UP = 1 << 4, + /* Support rate limit */ + HINIC_FUNC_SUPP_RATE_LIMIT = 1 << 5, + HINIC_FUNC_SUPP_DFX_REG = 1 << 6, + /* Support promisc/multicast/all-multi */ + HINIC_FUNC_SUPP_RX_MODE = 1 << 7, + /* Set vf mac and vlan by ip link */ + HINIC_FUNC_SUPP_SET_VF_MAC_VLAN = 1 << 8, + /* Support set mac by ifconfig */ + HINIC_FUNC_SUPP_CHANGE_MAC = 1 << 9, + /* OVS don't support SCTP_CRC/HW_VLAN/LRO */ + HINIC_FUNC_OFFLOAD_OVS_UNSUPP = 1 << 10, + /* OVS don't support encap-tso/encap-csum */ + HINIC_FUNC_SUPP_ENCAP_TSO_CSUM = 1 << 11, +}; + +#define FUNC_SUPPORT_MGMT(hwdev) \ + (!!(hinic_get_func_feature_cap(hwdev) & HINIC_FUNC_MGMT)) +#define FUNC_SUPPORT_PORT_SETTING(hwdev) \ + (!!(hinic_get_func_feature_cap(hwdev) & HINIC_FUNC_PORT)) +#define FUNC_SUPPORT_DCB(hwdev) \ + (FUNC_SUPPORT_PORT_SETTING(hwdev)) +#define FUNC_ENABLE_SRIOV_IN_DEFAULT(hwdev) \ + (!!(hinic_get_func_feature_cap(hwdev) & \ + HINIC_FUNC_SRIOV_EN_DFLT)) +#define FUNC_SRIOV_FIX_NUM_VF(hwdev) \ + (!!(hinic_get_func_feature_cap(hwdev) & \ + HINIC_FUNC_SRIOV_NUM_FIX)) +#define FUNC_SUPPORT_RX_MODE(hwdev) \ + (!!(hinic_get_func_feature_cap(hwdev) & \ + HINIC_FUNC_SUPP_RX_MODE)) +#define FUNC_SUPPORT_RATE_LIMIT(hwdev) \ + (!!(hinic_get_func_feature_cap(hwdev) & \ + HINIC_FUNC_SUPP_RATE_LIMIT)) +#define FUNC_SUPPORT_SET_VF_MAC_VLAN(hwdev) \ + (!!(hinic_get_func_feature_cap(hwdev) & \ + HINIC_FUNC_SUPP_SET_VF_MAC_VLAN)) +#define FUNC_SUPPORT_CHANGE_MAC(hwdev) \ + (!!(hinic_get_func_feature_cap(hwdev) & \ + HINIC_FUNC_SUPP_CHANGE_MAC)) +#define FUNC_FORCE_LINK_UP(hwdev) \ + (!!(hinic_get_func_feature_cap(hwdev) & \ + HINIC_FUNC_FORCE_LINK_UP)) +#define FUNC_SUPPORT_SCTP_CRC(hwdev) \ + (!(hinic_get_func_feature_cap(hwdev) & \ + HINIC_FUNC_OFFLOAD_OVS_UNSUPP)) +#define FUNC_SUPPORT_HW_VLAN(hwdev) \ + (!(hinic_get_func_feature_cap(hwdev) & \ + HINIC_FUNC_OFFLOAD_OVS_UNSUPP)) +#define FUNC_SUPPORT_LRO(hwdev) \ + (!(hinic_get_func_feature_cap(hwdev) & \ + HINIC_FUNC_OFFLOAD_OVS_UNSUPP)) +#define FUNC_SUPPORT_ENCAP_TSO_CSUM(hwdev) \ + (!!(hinic_get_func_feature_cap(hwdev) & \ + HINIC_FUNC_SUPP_ENCAP_TSO_CSUM)) + +int hinic_init_hwdev(struct hinic_init_para *para); +int hinic_set_vf_dev_cap(void *hwdev); +void hinic_free_hwdev(void *hwdev); +void hinic_shutdown_hwdev(void *hwdev); +void hinic_set_api_stop(void *hwdev); + +void hinic_ppf_hwdev_unreg(void *hwdev); +void hinic_ppf_hwdev_reg(void *hwdev, void *ppf_hwdev); + +void hinic_qps_num_set(void *hwdev, u32 num_qps); + +bool hinic_is_hwdev_mod_inited(void *hwdev, enum hinic_hwdev_init_state state); +enum hinic_func_mode hinic_get_func_mode(void *hwdev); +u64 hinic_get_func_feature_cap(void *hwdev); + +enum hinic_service_mode { + HINIC_WORK_MODE_OVS = 0, + HINIC_WORK_MODE_UNKNOWN, + HINIC_WORK_MODE_NIC, + HINIC_WORK_MODE_INVALID = 0xFF, +}; + +enum hinic_service_mode hinic_get_service_mode(void *hwdev); + +int hinic_slq_init(void *dev, int num_wqs); +void hinic_slq_uninit(void *dev); +int hinic_slq_alloc(void *dev, u16 wqebb_size, u16 q_depth, + u16 page_size, u64 *cla_addr, void **handle); +void hinic_slq_free(void *dev, void *handle); +u64 hinic_slq_get_addr(void *handle, u16 index); +u64 hinic_slq_get_first_pageaddr(void *handle); + +typedef void (*comm_up_self_msg_proc)(void *handle, void *buf_in, + u16 in_size, void *buf_out, + u16 *out_size); + +void hinic_comm_recv_mgmt_self_cmd_reg(void *hwdev, u8 cmd, + comm_up_self_msg_proc proc); + +void hinic_comm_recv_up_self_cmd_unreg(void *hwdev, u8 cmd); + +int hinic_micro_log_path_set(void *hwdev, u8 *log_path); +int hinic_micro_log_func_en(void *hwdev, u8 is_en); + +/* defined by chip */ +enum hinic_fault_type { + FAULT_TYPE_CHIP, + FAULT_TYPE_UCODE, + FAULT_TYPE_MEM_RD_TIMEOUT, + FAULT_TYPE_MEM_WR_TIMEOUT, + FAULT_TYPE_REG_RD_TIMEOUT, + FAULT_TYPE_REG_WR_TIMEOUT, + FAULT_TYPE_PHY_FAULT, + FAULT_TYPE_MAX, +}; + +/* defined by chip */ +enum hinic_fault_err_level { + /* default err_level=FAULT_LEVEL_FATAL if + * type==FAULT_TYPE_MEM_RD_TIMEOUT || FAULT_TYPE_MEM_WR_TIMEOUT || + * FAULT_TYPE_REG_RD_TIMEOUT || FAULT_TYPE_REG_WR_TIMEOUT || + * FAULT_TYPE_UCODE + * other: err_level in event.chip.err_level if type==FAULT_TYPE_CHIP + */ + FAULT_LEVEL_FATAL, + FAULT_LEVEL_SERIOUS_RESET, + FAULT_LEVEL_SERIOUS_FLR, + FAULT_LEVEL_GENERAL, + FAULT_LEVEL_SUGGESTION, + FAULT_LEVEL_MAX +}; + +enum hinic_fault_source_type { + /* same as FAULT_TYPE_CHIP */ + HINIC_FAULT_SRC_HW_MGMT_CHIP = 0, + /* same as FAULT_TYPE_UCODE */ + HINIC_FAULT_SRC_HW_MGMT_UCODE, + /* same as FAULT_TYPE_MEM_RD_TIMEOUT */ + HINIC_FAULT_SRC_HW_MGMT_MEM_RD_TIMEOUT, + /* same as FAULT_TYPE_MEM_WR_TIMEOUT */ + HINIC_FAULT_SRC_HW_MGMT_MEM_WR_TIMEOUT, + /* same as FAULT_TYPE_REG_RD_TIMEOUT */ + HINIC_FAULT_SRC_HW_MGMT_REG_RD_TIMEOUT, + /* same as FAULT_TYPE_REG_WR_TIMEOUT */ + HINIC_FAULT_SRC_HW_MGMT_REG_WR_TIMEOUT, + HINIC_FAULT_SRC_SW_MGMT_UCODE, + HINIC_FAULT_SRC_MGMT_WATCHDOG, + HINIC_FAULT_SRC_MGMT_RESET = 8, + HINIC_FAULT_SRC_HW_PHY_FAULT, + HINIC_FAULT_SRC_HOST_HEARTBEAT_LOST = 20, + HINIC_FAULT_SRC_TYPE_MAX, +}; + +struct hinic_fault_sw_mgmt { + u8 event_id; + u64 event_data; +}; + +union hinic_fault_hw_mgmt { + u32 val[4]; + /* valid only type==FAULT_TYPE_CHIP */ + struct { + u8 node_id; + /* enum hinic_fault_err_level */ + u8 err_level; + u16 err_type; + u32 err_csr_addr; + u32 err_csr_value; + /* func_id valid only err_level==FAULT_LEVEL_SERIOUS_FLR */ + u16 func_id; + u16 rsvd2; + } chip; + + /* valid only type==FAULT_TYPE_UCODE */ + struct { + u8 cause_id; + u8 core_id; + u8 c_id; + u8 rsvd3; + u32 epc; + u32 rsvd4; + u32 rsvd5; + } ucode; + + /* valid only type==FAULT_TYPE_MEM_RD_TIMEOUT || + * FAULT_TYPE_MEM_WR_TIMEOUT + */ + struct { + u32 err_csr_ctrl; + u32 err_csr_data; + u32 ctrl_tab; + u32 mem_index; + } mem_timeout; + + /* valid only type==FAULT_TYPE_REG_RD_TIMEOUT || + * FAULT_TYPE_REG_WR_TIMEOUT + */ + struct { + u32 err_csr; + u32 rsvd6; + u32 rsvd7; + u32 rsvd8; + } reg_timeout; + + struct { + /* 0: read; 1: write */ + u8 op_type; + u8 port_id; + u8 dev_ad; + u8 rsvd9; + u32 csr_addr; + u32 op_data; + u32 rsvd10; + } phy_fault; +}; + +/* defined by chip */ +struct hinic_fault_event { + /* enum hinic_fault_type */ + u8 type; + u8 fault_level; /* sdk write fault level for uld event */ + u8 rsvd0[2]; + union hinic_fault_hw_mgmt event; +}; + +struct hinic_dcb_state { + u8 dcb_on; + u8 default_cos; + u8 up_cos[8]; +}; + +enum link_err_type { + LINK_ERR_MODULE_UNRECOGENIZED, + LINK_ERR_NUM, +}; + +enum port_module_event_type { + HINIC_PORT_MODULE_CABLE_PLUGGED, + HINIC_PORT_MODULE_CABLE_UNPLUGGED, + HINIC_PORT_MODULE_LINK_ERR, + HINIC_PORT_MODULE_MAX_EVENT, +}; + +struct hinic_port_module_event { + enum port_module_event_type type; + enum link_err_type err_type; +}; + +struct hinic_event_link_info { + u8 valid; + u8 port_type; + u8 autoneg_cap; + u8 autoneg_state; + u8 duplex; + u8 speed; +}; + +struct hinic_mctp_host_info { + u8 major_cmd; + u8 sub_cmd; + u8 rsvd[2]; + + u32 data_len; + void *data; +}; + +/* multi host mgmt event sub cmd */ +enum hinic_mhost_even_type { + HINIC_MHOST_NIC_STATE_CHANGE = 1, +}; + +struct hinic_mhost_nic_func_state { + u8 status; + + u8 enable; + u16 func_idx; +}; + +struct hinic_multi_host_mgmt_event { + u16 sub_cmd; + u16 rsvd[3]; + + void *data; +}; + +enum hinic_event_type { + HINIC_EVENT_LINK_DOWN = 0, + HINIC_EVENT_LINK_UP = 1, + HINIC_EVENT_HEART_LOST = 2, + HINIC_EVENT_FAULT = 3, + HINIC_EVENT_NOTIFY_VF_DCB_STATE = 4, + HINIC_EVENT_DCB_STATE_CHANGE = 5, + HINIC_EVENT_FMW_ACT_NTC = 6, + HINIC_EVENT_PORT_MODULE_EVENT = 7, + HINIC_EVENT_MCTP_GET_HOST_INFO, + HINIC_EVENT_MULTI_HOST_MGMT, + HINIC_EVENT_INIT_MIGRATE_PF, + HINIC_EVENT_MGMT_WATCHDOG_EVENT, +}; + +struct hinic_event_info { + enum hinic_event_type type; + union { + struct hinic_event_link_info link_info; + struct hinic_fault_event info; + struct hinic_dcb_state dcb_state; + struct hinic_port_module_event module_event; + u8 vf_default_cos; + struct hinic_mctp_host_info mctp_info; + struct hinic_multi_host_mgmt_event mhost_mgmt; + }; +}; + +enum hinic_ucode_event_type { + HINIC_INTERNAL_TSO_FATAL_ERROR = 0x0, + HINIC_INTERNAL_LRO_FATAL_ERROR = 0x1, + HINIC_INTERNAL_TX_FATAL_ERROR = 0x2, + HINIC_INTERNAL_RX_FATAL_ERROR = 0x3, + HINIC_INTERNAL_OTHER_FATAL_ERROR = 0x4, + HINIC_NIC_FATAL_ERROR_MAX = 0x8, +}; + +typedef void (*hinic_event_handler)(void *handle, + struct hinic_event_info *event); + +/* only register once */ +void hinic_event_register(void *dev, void *pri_handle, + hinic_event_handler callback); +void hinic_event_unregister(void *dev); + +void hinic_detect_hw_present(void *hwdev); + +void hinic_set_chip_absent(void *hwdev); + +int hinic_get_chip_present_flag(void *hwdev); + +void hinic_set_pcie_order_cfg(void *handle); + +int hinic_get_mgmt_channel_status(void *handle); + +enum hinic_led_mode { + HINIC_LED_MODE_ON, + HINIC_LED_MODE_OFF, + HINIC_LED_MODE_FORCE_1HZ, + HINIC_LED_MODE_FORCE_2HZ, + HINIC_LED_MODE_FORCE_4HZ, + HINIC_LED_MODE_1HZ, + HINIC_LED_MODE_2HZ, + HINIC_LED_MODE_4HZ, + HINIC_LED_MODE_INVALID, +}; + +enum hinic_led_type { + HINIC_LED_TYPE_LINK, + HINIC_LED_TYPE_LOW_SPEED, + HINIC_LED_TYPE_HIGH_SPEED, + HINIC_LED_TYPE_INVALID, +}; + +int hinic_reset_led_status(void *hwdev, u8 port); +int hinic_set_led_status(void *hwdev, u8 port, enum hinic_led_type type, + enum hinic_led_mode mode); + +struct hinic_board_info { + u32 board_type; + u32 port_num; + u32 port_speed; + u32 pcie_width; + u32 host_num; + u32 pf_num; + u32 vf_total_num; + u32 tile_num; + u32 qcm_num; + u32 core_num; + u32 work_mode; + u32 service_mode; + u32 pcie_mode; + u32 cfg_addr; + u32 boot_sel; + u32 board_id; +}; + +int hinic_get_board_info(void *hwdev, struct hinic_board_info *info); +bool hinic_get_ppf_status(void *hwdev); + +struct hw_pf_info { + u16 glb_func_idx; + u16 glb_pf_vf_offset; + u8 p2p_idx; + u8 itf_idx; + u16 max_vfs; + u16 max_queue_num; + u16 ovs_q_vf_num[9]; + u32 resv; +}; + +struct hinic_hw_pf_infos { + u8 num_pfs; + u8 rsvd1[3]; + + struct hw_pf_info infos[16]; +}; + +int hinic_get_hw_pf_infos(void *hwdev, struct hinic_hw_pf_infos *infos); +int hinic_set_ip_check(void *hwdev, bool ip_check_ctl); +int hinic_mbox_to_host_sync(void *hwdev, enum hinic_mod_type mod, + u8 cmd, void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout); +int hinic_mbox_ppf_to_vf(void *hwdev, enum hinic_mod_type mod, u16 func_id, + u8 cmd, void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout); + +int hinic_get_card_present_state(void *hwdev, bool *card_present_state); + +void hinic_migrate_report(void *dev); +int hinic_set_vxlan_udp_dport(void *hwdev, u32 udp_port); +bool is_multi_vm_slave(void *hwdev); +bool is_multi_bm_slave(void *hwdev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h deleted file mode 100644 index 31b94d5d47f775b772f4996ddd5f35f2249e43ce..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h +++ /dev/null @@ -1,208 +0,0 @@ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - */ - -#ifndef HINIC_HW_API_CMD_H -#define HINIC_HW_API_CMD_H - -#include -#include - -#include "hinic_hw_if.h" - -#define HINIC_API_CMD_PI_IDX_SHIFT 0 - -#define HINIC_API_CMD_PI_IDX_MASK 0xFFFFFF - -#define HINIC_API_CMD_PI_SET(val, member) \ - (((u32)(val) & HINIC_API_CMD_PI_##member##_MASK) << \ - HINIC_API_CMD_PI_##member##_SHIFT) - -#define HINIC_API_CMD_PI_CLEAR(val, member) \ - ((val) & (~(HINIC_API_CMD_PI_##member##_MASK \ - << HINIC_API_CMD_PI_##member##_SHIFT))) - -#define HINIC_API_CMD_CHAIN_REQ_RESTART_SHIFT 1 - -#define HINIC_API_CMD_CHAIN_REQ_RESTART_MASK 0x1 - -#define HINIC_API_CMD_CHAIN_REQ_SET(val, member) \ - (((u32)(val) & HINIC_API_CMD_CHAIN_REQ_##member##_MASK) << \ - HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT) - -#define HINIC_API_CMD_CHAIN_REQ_GET(val, member) \ - (((val) >> HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT) & \ - HINIC_API_CMD_CHAIN_REQ_##member##_MASK) - -#define HINIC_API_CMD_CHAIN_REQ_CLEAR(val, member) \ - ((val) & (~(HINIC_API_CMD_CHAIN_REQ_##member##_MASK \ - << HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT))) - -#define HINIC_API_CMD_CHAIN_CTRL_RESTART_WB_STAT_SHIFT 1 -#define HINIC_API_CMD_CHAIN_CTRL_XOR_ERR_SHIFT 2 -#define HINIC_API_CMD_CHAIN_CTRL_AEQE_EN_SHIFT 4 -#define HINIC_API_CMD_CHAIN_CTRL_AEQ_ID_SHIFT 8 -#define HINIC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_SHIFT 28 -#define HINIC_API_CMD_CHAIN_CTRL_CELL_SIZE_SHIFT 30 - -#define HINIC_API_CMD_CHAIN_CTRL_RESTART_WB_STAT_MASK 0x1 -#define HINIC_API_CMD_CHAIN_CTRL_XOR_ERR_MASK 0x1 -#define HINIC_API_CMD_CHAIN_CTRL_AEQE_EN_MASK 0x1 -#define HINIC_API_CMD_CHAIN_CTRL_AEQ_ID_MASK 0x3 -#define HINIC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_MASK 0x3 -#define HINIC_API_CMD_CHAIN_CTRL_CELL_SIZE_MASK 0x3 - -#define HINIC_API_CMD_CHAIN_CTRL_SET(val, member) \ - (((u32)(val) & HINIC_API_CMD_CHAIN_CTRL_##member##_MASK) << \ - HINIC_API_CMD_CHAIN_CTRL_##member##_SHIFT) - -#define HINIC_API_CMD_CHAIN_CTRL_CLEAR(val, member) \ - ((val) & (~(HINIC_API_CMD_CHAIN_CTRL_##member##_MASK \ - << HINIC_API_CMD_CHAIN_CTRL_##member##_SHIFT))) - -#define HINIC_API_CMD_CELL_CTRL_DATA_SZ_SHIFT 0 -#define HINIC_API_CMD_CELL_CTRL_RD_DMA_ATTR_SHIFT 16 -#define HINIC_API_CMD_CELL_CTRL_WR_DMA_ATTR_SHIFT 24 -#define HINIC_API_CMD_CELL_CTRL_XOR_CHKSUM_SHIFT 56 - -#define HINIC_API_CMD_CELL_CTRL_DATA_SZ_MASK 0x3F -#define HINIC_API_CMD_CELL_CTRL_RD_DMA_ATTR_MASK 0x3F -#define HINIC_API_CMD_CELL_CTRL_WR_DMA_ATTR_MASK 0x3F -#define HINIC_API_CMD_CELL_CTRL_XOR_CHKSUM_MASK 0xFF - -#define HINIC_API_CMD_CELL_CTRL_SET(val, member) \ - ((((u64)val) & HINIC_API_CMD_CELL_CTRL_##member##_MASK) << \ - HINIC_API_CMD_CELL_CTRL_##member##_SHIFT) - -#define HINIC_API_CMD_DESC_API_TYPE_SHIFT 0 -#define HINIC_API_CMD_DESC_RD_WR_SHIFT 1 -#define HINIC_API_CMD_DESC_MGMT_BYPASS_SHIFT 2 -#define HINIC_API_CMD_DESC_DEST_SHIFT 32 -#define HINIC_API_CMD_DESC_SIZE_SHIFT 40 -#define HINIC_API_CMD_DESC_XOR_CHKSUM_SHIFT 56 - -#define HINIC_API_CMD_DESC_API_TYPE_MASK 0x1 -#define HINIC_API_CMD_DESC_RD_WR_MASK 0x1 -#define HINIC_API_CMD_DESC_MGMT_BYPASS_MASK 0x1 -#define HINIC_API_CMD_DESC_DEST_MASK 0x1F -#define HINIC_API_CMD_DESC_SIZE_MASK 0x7FF -#define HINIC_API_CMD_DESC_XOR_CHKSUM_MASK 0xFF - -#define HINIC_API_CMD_DESC_SET(val, member) \ - ((((u64)val) & HINIC_API_CMD_DESC_##member##_MASK) << \ - HINIC_API_CMD_DESC_##member##_SHIFT) - -#define HINIC_API_CMD_STATUS_HEADER_CHAIN_ID_SHIFT 16 - -#define HINIC_API_CMD_STATUS_HEADER_CHAIN_ID_MASK 0xFF - -#define HINIC_API_CMD_STATUS_HEADER_GET(val, member) \ - (((val) >> HINIC_API_CMD_STATUS_HEADER_##member##_SHIFT) & \ - HINIC_API_CMD_STATUS_HEADER_##member##_MASK) - -#define HINIC_API_CMD_STATUS_CONS_IDX_SHIFT 0 -#define HINIC_API_CMD_STATUS_CHKSUM_ERR_SHIFT 28 - -#define HINIC_API_CMD_STATUS_CONS_IDX_MASK 0xFFFFFF -#define HINIC_API_CMD_STATUS_CHKSUM_ERR_MASK 0x3 - -#define HINIC_API_CMD_STATUS_GET(val, member) \ - (((val) >> HINIC_API_CMD_STATUS_##member##_SHIFT) & \ - HINIC_API_CMD_STATUS_##member##_MASK) - -enum hinic_api_cmd_chain_type { - HINIC_API_CMD_WRITE_TO_MGMT_CPU = 2, - - HINIC_API_CMD_MAX, -}; - -struct hinic_api_cmd_chain_attr { - struct hinic_hwif *hwif; - enum hinic_api_cmd_chain_type chain_type; - - u32 num_cells; - u16 cell_size; -}; - -struct hinic_api_cmd_status { - u64 header; - u32 status; - u32 rsvd0; - u32 rsvd1; - u32 rsvd2; - u64 rsvd3; -}; - -/* HW struct */ -struct hinic_api_cmd_cell { - u64 ctrl; - - /* address is 64 bit in HW struct */ - u64 next_cell_paddr; - - u64 desc; - - /* HW struct */ - union { - struct { - u64 hw_cmd_paddr; - } write; - - struct { - u64 hw_wb_resp_paddr; - u64 hw_cmd_paddr; - } read; - }; -}; - -struct hinic_api_cmd_cell_ctxt { - dma_addr_t cell_paddr; - struct hinic_api_cmd_cell *cell_vaddr; - - dma_addr_t api_cmd_paddr; - u8 *api_cmd_vaddr; -}; - -struct hinic_api_cmd_chain { - struct hinic_hwif *hwif; - enum hinic_api_cmd_chain_type chain_type; - - u32 num_cells; - u16 cell_size; - - /* HW members in 24 bit format */ - u32 prod_idx; - u32 cons_idx; - - struct semaphore sem; - - struct hinic_api_cmd_cell_ctxt *cell_ctxt; - - dma_addr_t wb_status_paddr; - struct hinic_api_cmd_status *wb_status; - - dma_addr_t head_cell_paddr; - struct hinic_api_cmd_cell *head_node; - struct hinic_api_cmd_cell *curr_node; -}; - -int hinic_api_cmd_write(struct hinic_api_cmd_chain *chain, - enum hinic_node_id dest, u8 *cmd, u16 size); - -int hinic_api_cmd_init(struct hinic_api_cmd_chain **chain, - struct hinic_hwif *hwif); - -void hinic_api_cmd_free(struct hinic_api_cmd_chain **chain); - -#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c deleted file mode 100644 index 4d09ea786b35fee607ecb6ea45b148458b98e374..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c +++ /dev/null @@ -1,947 +0,0 @@ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "hinic_common.h" -#include "hinic_hw_if.h" -#include "hinic_hw_eqs.h" -#include "hinic_hw_mgmt.h" -#include "hinic_hw_wqe.h" -#include "hinic_hw_wq.h" -#include "hinic_hw_cmdq.h" -#include "hinic_hw_io.h" -#include "hinic_hw_dev.h" - -#define CMDQ_CEQE_TYPE_SHIFT 0 - -#define CMDQ_CEQE_TYPE_MASK 0x7 - -#define CMDQ_CEQE_GET(val, member) \ - (((val) >> CMDQ_CEQE_##member##_SHIFT) \ - & CMDQ_CEQE_##member##_MASK) - -#define CMDQ_WQE_ERRCODE_VAL_SHIFT 20 - -#define CMDQ_WQE_ERRCODE_VAL_MASK 0xF - -#define CMDQ_WQE_ERRCODE_GET(val, member) \ - (((val) >> CMDQ_WQE_ERRCODE_##member##_SHIFT) \ - & CMDQ_WQE_ERRCODE_##member##_MASK) - -#define CMDQ_DB_PI_OFF(pi) (((u16)LOWER_8_BITS(pi)) << 3) - -#define CMDQ_DB_ADDR(db_base, pi) ((db_base) + CMDQ_DB_PI_OFF(pi)) - -#define CMDQ_WQE_HEADER(wqe) ((struct hinic_cmdq_header *)(wqe)) - -#define CMDQ_WQE_COMPLETED(ctrl_info) \ - HINIC_CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT) - -#define FIRST_DATA_TO_WRITE_LAST sizeof(u64) - -#define CMDQ_DB_OFF SZ_2K - -#define CMDQ_WQEBB_SIZE 64 -#define CMDQ_WQE_SIZE 64 -#define CMDQ_DEPTH SZ_4K - -#define CMDQ_WQ_PAGE_SIZE SZ_4K - -#define WQE_LCMD_SIZE 64 -#define WQE_SCMD_SIZE 64 - -#define COMPLETE_LEN 3 - -#define CMDQ_TIMEOUT 1000 - -#define CMDQ_PFN(addr, page_size) ((addr) >> (ilog2(page_size))) - -#define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \ - struct hinic_cmdqs, cmdq[0]) - -#define cmdqs_to_func_to_io(cmdqs) container_of(cmdqs, \ - struct hinic_func_to_io, \ - cmdqs) - -enum cmdq_wqe_type { - WQE_LCMD_TYPE = 0, - WQE_SCMD_TYPE = 1, -}; - -enum completion_format { - COMPLETE_DIRECT = 0, - COMPLETE_SGE = 1, -}; - -enum data_format { - DATA_SGE = 0, - DATA_DIRECT = 1, -}; - -enum bufdesc_len { - BUFDESC_LCMD_LEN = 2, /* 16 bytes - 2(8 byte unit) */ - BUFDESC_SCMD_LEN = 3, /* 24 bytes - 3(8 byte unit) */ -}; - -enum ctrl_sect_len { - CTRL_SECT_LEN = 1, /* 4 bytes (ctrl) - 1(8 byte unit) */ - CTRL_DIRECT_SECT_LEN = 2, /* 12 bytes (ctrl + rsvd) - 2(8 byte unit) */ -}; - -enum cmdq_scmd_type { - CMDQ_SET_ARM_CMD = 2, -}; - -enum cmdq_cmd_type { - CMDQ_CMD_SYNC_DIRECT_RESP = 0, - CMDQ_CMD_SYNC_SGE_RESP = 1, -}; - -enum completion_request { - NO_CEQ = 0, - CEQ_SET = 1, -}; - -/** - * hinic_alloc_cmdq_buf - alloc buffer for sending command - * @cmdqs: the cmdqs - * @cmdq_buf: the buffer returned in this struct - * - * Return 0 - Success, negative - Failure - **/ -int hinic_alloc_cmdq_buf(struct hinic_cmdqs *cmdqs, - struct hinic_cmdq_buf *cmdq_buf) -{ - struct hinic_hwif *hwif = cmdqs->hwif; - struct pci_dev *pdev = hwif->pdev; - - cmdq_buf->buf = dma_pool_alloc(cmdqs->cmdq_buf_pool, GFP_KERNEL, - &cmdq_buf->dma_addr); - if (!cmdq_buf->buf) { - dev_err(&pdev->dev, "Failed to allocate cmd from the pool\n"); - return -ENOMEM; - } - - return 0; -} - -/** - * hinic_free_cmdq_buf - free buffer - * @cmdqs: the cmdqs - * @cmdq_buf: the buffer to free that is in this struct - **/ -void hinic_free_cmdq_buf(struct hinic_cmdqs *cmdqs, - struct hinic_cmdq_buf *cmdq_buf) -{ - dma_pool_free(cmdqs->cmdq_buf_pool, cmdq_buf->buf, cmdq_buf->dma_addr); -} - -static unsigned int cmdq_wqe_size_from_bdlen(enum bufdesc_len len) -{ - unsigned int wqe_size = 0; - - switch (len) { - case BUFDESC_LCMD_LEN: - wqe_size = WQE_LCMD_SIZE; - break; - case BUFDESC_SCMD_LEN: - wqe_size = WQE_SCMD_SIZE; - break; - } - - return wqe_size; -} - -static void cmdq_set_sge_completion(struct hinic_cmdq_completion *completion, - struct hinic_cmdq_buf *buf_out) -{ - struct hinic_sge_resp *sge_resp = &completion->sge_resp; - - hinic_set_sge(&sge_resp->sge, buf_out->dma_addr, buf_out->size); -} - -static void cmdq_prepare_wqe_ctrl(struct hinic_cmdq_wqe *wqe, int wrapped, - enum hinic_cmd_ack_type ack_type, - enum hinic_mod_type mod, u8 cmd, u16 prod_idx, - enum completion_format complete_format, - enum data_format data_format, - enum bufdesc_len buf_len) -{ - struct hinic_cmdq_wqe_lcmd *wqe_lcmd; - struct hinic_cmdq_wqe_scmd *wqe_scmd; - enum ctrl_sect_len ctrl_len; - struct hinic_ctrl *ctrl; - u32 saved_data; - - if (data_format == DATA_SGE) { - wqe_lcmd = &wqe->wqe_lcmd; - - wqe_lcmd->status.status_info = 0; - ctrl = &wqe_lcmd->ctrl; - ctrl_len = CTRL_SECT_LEN; - } else { - wqe_scmd = &wqe->direct_wqe.wqe_scmd; - - wqe_scmd->status.status_info = 0; - ctrl = &wqe_scmd->ctrl; - ctrl_len = CTRL_DIRECT_SECT_LEN; - } - - ctrl->ctrl_info = HINIC_CMDQ_CTRL_SET(prod_idx, PI) | - HINIC_CMDQ_CTRL_SET(cmd, CMD) | - HINIC_CMDQ_CTRL_SET(mod, MOD) | - HINIC_CMDQ_CTRL_SET(ack_type, ACK_TYPE); - - CMDQ_WQE_HEADER(wqe)->header_info = - HINIC_CMDQ_WQE_HEADER_SET(buf_len, BUFDESC_LEN) | - HINIC_CMDQ_WQE_HEADER_SET(complete_format, COMPLETE_FMT) | - HINIC_CMDQ_WQE_HEADER_SET(data_format, DATA_FMT) | - HINIC_CMDQ_WQE_HEADER_SET(CEQ_SET, COMPLETE_REQ) | - HINIC_CMDQ_WQE_HEADER_SET(COMPLETE_LEN, COMPLETE_SECT_LEN) | - HINIC_CMDQ_WQE_HEADER_SET(ctrl_len, CTRL_LEN) | - HINIC_CMDQ_WQE_HEADER_SET(wrapped, TOGGLED_WRAPPED); - - saved_data = CMDQ_WQE_HEADER(wqe)->saved_data; - saved_data = HINIC_SAVED_DATA_CLEAR(saved_data, ARM); - - if ((cmd == CMDQ_SET_ARM_CMD) && (mod == HINIC_MOD_COMM)) - CMDQ_WQE_HEADER(wqe)->saved_data |= - HINIC_SAVED_DATA_SET(1, ARM); - else - CMDQ_WQE_HEADER(wqe)->saved_data = saved_data; -} - -static void cmdq_set_lcmd_bufdesc(struct hinic_cmdq_wqe_lcmd *wqe_lcmd, - struct hinic_cmdq_buf *buf_in) -{ - hinic_set_sge(&wqe_lcmd->buf_desc.sge, buf_in->dma_addr, buf_in->size); -} - -static void cmdq_set_direct_wqe_data(struct hinic_cmdq_direct_wqe *wqe, - void *buf_in, u32 in_size) -{ - struct hinic_cmdq_wqe_scmd *wqe_scmd = &wqe->wqe_scmd; - - wqe_scmd->buf_desc.buf_len = in_size; - memcpy(wqe_scmd->buf_desc.data, buf_in, in_size); -} - -static void cmdq_set_lcmd_wqe(struct hinic_cmdq_wqe *wqe, - enum cmdq_cmd_type cmd_type, - struct hinic_cmdq_buf *buf_in, - struct hinic_cmdq_buf *buf_out, int wrapped, - enum hinic_cmd_ack_type ack_type, - enum hinic_mod_type mod, u8 cmd, u16 prod_idx) -{ - struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd; - enum completion_format complete_format; - - switch (cmd_type) { - case CMDQ_CMD_SYNC_SGE_RESP: - complete_format = COMPLETE_SGE; - cmdq_set_sge_completion(&wqe_lcmd->completion, buf_out); - break; - case CMDQ_CMD_SYNC_DIRECT_RESP: - complete_format = COMPLETE_DIRECT; - wqe_lcmd->completion.direct_resp = 0; - break; - } - - cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd, - prod_idx, complete_format, DATA_SGE, - BUFDESC_LCMD_LEN); - - cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in); -} - -static void cmdq_set_direct_wqe(struct hinic_cmdq_wqe *wqe, - enum cmdq_cmd_type cmd_type, - void *buf_in, u16 in_size, - struct hinic_cmdq_buf *buf_out, int wrapped, - enum hinic_cmd_ack_type ack_type, - enum hinic_mod_type mod, u8 cmd, u16 prod_idx) -{ - struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe; - enum completion_format complete_format; - struct hinic_cmdq_wqe_scmd *wqe_scmd; - - wqe_scmd = &direct_wqe->wqe_scmd; - - switch (cmd_type) { - case CMDQ_CMD_SYNC_SGE_RESP: - complete_format = COMPLETE_SGE; - cmdq_set_sge_completion(&wqe_scmd->completion, buf_out); - break; - case CMDQ_CMD_SYNC_DIRECT_RESP: - complete_format = COMPLETE_DIRECT; - wqe_scmd->completion.direct_resp = 0; - break; - } - - cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd, prod_idx, - complete_format, DATA_DIRECT, BUFDESC_SCMD_LEN); - - cmdq_set_direct_wqe_data(direct_wqe, buf_in, in_size); -} - -static void cmdq_wqe_fill(void *dst, void *src) -{ - memcpy(dst + FIRST_DATA_TO_WRITE_LAST, src + FIRST_DATA_TO_WRITE_LAST, - CMDQ_WQE_SIZE - FIRST_DATA_TO_WRITE_LAST); - - wmb(); /* The first 8 bytes should be written last */ - - *(u64 *)dst = *(u64 *)src; -} - -static void cmdq_fill_db(u32 *db_info, - enum hinic_cmdq_type cmdq_type, u16 prod_idx) -{ - *db_info = HINIC_CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx), HI_PROD_IDX) | - HINIC_CMDQ_DB_INFO_SET(HINIC_CTRL_PATH, PATH) | - HINIC_CMDQ_DB_INFO_SET(cmdq_type, CMDQ_TYPE) | - HINIC_CMDQ_DB_INFO_SET(HINIC_DB_CMDQ_TYPE, DB_TYPE); -} - -static void cmdq_set_db(struct hinic_cmdq *cmdq, - enum hinic_cmdq_type cmdq_type, u16 prod_idx) -{ - u32 db_info; - - cmdq_fill_db(&db_info, cmdq_type, prod_idx); - - /* The data that is written to HW should be in Big Endian Format */ - db_info = cpu_to_be32(db_info); - - wmb(); /* write all before the doorbell */ - - writel(db_info, CMDQ_DB_ADDR(cmdq->db_base, prod_idx)); -} - -static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq, - enum hinic_mod_type mod, u8 cmd, - struct hinic_cmdq_buf *buf_in, - u64 *resp) -{ - struct hinic_cmdq_wqe *curr_cmdq_wqe, cmdq_wqe; - u16 curr_prod_idx, next_prod_idx; - int errcode, wrapped, num_wqebbs; - struct hinic_wq *wq = cmdq->wq; - struct hinic_hw_wqe *hw_wqe; - struct completion done; - - /* Keep doorbell index correct. bh - for tasklet(ceq). */ - spin_lock_bh(&cmdq->cmdq_lock); - - /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/ - hw_wqe = hinic_get_wqe(wq, WQE_LCMD_SIZE, &curr_prod_idx); - if (IS_ERR(hw_wqe)) { - spin_unlock_bh(&cmdq->cmdq_lock); - return -EBUSY; - } - - curr_cmdq_wqe = &hw_wqe->cmdq_wqe; - - wrapped = cmdq->wrapped; - - num_wqebbs = ALIGN(WQE_LCMD_SIZE, wq->wqebb_size) / wq->wqebb_size; - next_prod_idx = curr_prod_idx + num_wqebbs; - if (next_prod_idx >= wq->q_depth) { - cmdq->wrapped = !cmdq->wrapped; - next_prod_idx -= wq->q_depth; - } - - cmdq->errcode[curr_prod_idx] = &errcode; - - init_completion(&done); - cmdq->done[curr_prod_idx] = &done; - - cmdq_set_lcmd_wqe(&cmdq_wqe, CMDQ_CMD_SYNC_DIRECT_RESP, buf_in, NULL, - wrapped, HINIC_CMD_ACK_TYPE_CMDQ, mod, cmd, - curr_prod_idx); - - /* The data that is written to HW should be in Big Endian Format */ - hinic_cpu_to_be32(&cmdq_wqe, WQE_LCMD_SIZE); - - /* CMDQ WQE is not shadow, therefore wqe will be written to wq */ - cmdq_wqe_fill(curr_cmdq_wqe, &cmdq_wqe); - - cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx); - - spin_unlock_bh(&cmdq->cmdq_lock); - - if (!wait_for_completion_timeout(&done, CMDQ_TIMEOUT)) { - spin_lock_bh(&cmdq->cmdq_lock); - - if (cmdq->errcode[curr_prod_idx] == &errcode) - cmdq->errcode[curr_prod_idx] = NULL; - - if (cmdq->done[curr_prod_idx] == &done) - cmdq->done[curr_prod_idx] = NULL; - - spin_unlock_bh(&cmdq->cmdq_lock); - - return -ETIMEDOUT; - } - - smp_rmb(); /* read error code after completion */ - - if (resp) { - struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &curr_cmdq_wqe->wqe_lcmd; - - *resp = cpu_to_be64(wqe_lcmd->completion.direct_resp); - } - - if (errcode != 0) - return -EFAULT; - - return 0; -} - -static int cmdq_set_arm_bit(struct hinic_cmdq *cmdq, void *buf_in, - u16 in_size) -{ - struct hinic_cmdq_wqe *curr_cmdq_wqe, cmdq_wqe; - u16 curr_prod_idx, next_prod_idx; - struct hinic_wq *wq = cmdq->wq; - struct hinic_hw_wqe *hw_wqe; - int wrapped, num_wqebbs; - - /* Keep doorbell index correct */ - spin_lock(&cmdq->cmdq_lock); - - /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/ - hw_wqe = hinic_get_wqe(wq, WQE_SCMD_SIZE, &curr_prod_idx); - if (IS_ERR(hw_wqe)) { - spin_unlock(&cmdq->cmdq_lock); - return -EBUSY; - } - - curr_cmdq_wqe = &hw_wqe->cmdq_wqe; - - wrapped = cmdq->wrapped; - - num_wqebbs = ALIGN(WQE_SCMD_SIZE, wq->wqebb_size) / wq->wqebb_size; - next_prod_idx = curr_prod_idx + num_wqebbs; - if (next_prod_idx >= wq->q_depth) { - cmdq->wrapped = !cmdq->wrapped; - next_prod_idx -= wq->q_depth; - } - - cmdq_set_direct_wqe(&cmdq_wqe, CMDQ_CMD_SYNC_DIRECT_RESP, buf_in, - in_size, NULL, wrapped, HINIC_CMD_ACK_TYPE_CMDQ, - HINIC_MOD_COMM, CMDQ_SET_ARM_CMD, curr_prod_idx); - - /* The data that is written to HW should be in Big Endian Format */ - hinic_cpu_to_be32(&cmdq_wqe, WQE_SCMD_SIZE); - - /* cmdq wqe is not shadow, therefore wqe will be written to wq */ - cmdq_wqe_fill(curr_cmdq_wqe, &cmdq_wqe); - - cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx); - - spin_unlock(&cmdq->cmdq_lock); - return 0; -} - -static int cmdq_params_valid(struct hinic_cmdq_buf *buf_in) -{ - if (buf_in->size > HINIC_CMDQ_MAX_DATA_SIZE) - return -EINVAL; - - return 0; -} - -/** - * hinic_cmdq_direct_resp - send command with direct data as resp - * @cmdqs: the cmdqs - * @mod: module on the card that will handle the command - * @cmd: the command - * @buf_in: the buffer for the command - * @resp: the response to return - * - * Return 0 - Success, negative - Failure - **/ -int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs, - enum hinic_mod_type mod, u8 cmd, - struct hinic_cmdq_buf *buf_in, u64 *resp) -{ - struct hinic_hwif *hwif = cmdqs->hwif; - struct pci_dev *pdev = hwif->pdev; - int err; - - err = cmdq_params_valid(buf_in); - if (err) { - dev_err(&pdev->dev, "Invalid CMDQ parameters\n"); - return err; - } - - return cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HINIC_CMDQ_SYNC], - mod, cmd, buf_in, resp); -} - -/** - * hinic_set_arm_bit - set arm bit for enable interrupt again - * @cmdqs: the cmdqs - * @q_type: type of queue to set the arm bit for - * @q_id: the queue number - * - * Return 0 - Success, negative - Failure - **/ -int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs, - enum hinic_set_arm_qtype q_type, u32 q_id) -{ - struct hinic_cmdq *cmdq = &cmdqs->cmdq[HINIC_CMDQ_SYNC]; - struct hinic_hwif *hwif = cmdqs->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_cmdq_arm_bit arm_bit; - int err; - - arm_bit.q_type = q_type; - arm_bit.q_id = q_id; - - err = cmdq_set_arm_bit(cmdq, &arm_bit, sizeof(arm_bit)); - if (err) { - dev_err(&pdev->dev, "Failed to set arm for qid %d\n", q_id); - return err; - } - - return 0; -} - -static void clear_wqe_complete_bit(struct hinic_cmdq *cmdq, - struct hinic_cmdq_wqe *wqe) -{ - u32 header_info = be32_to_cpu(CMDQ_WQE_HEADER(wqe)->header_info); - unsigned int bufdesc_len, wqe_size; - struct hinic_ctrl *ctrl; - - bufdesc_len = HINIC_CMDQ_WQE_HEADER_GET(header_info, BUFDESC_LEN); - wqe_size = cmdq_wqe_size_from_bdlen(bufdesc_len); - if (wqe_size == WQE_LCMD_SIZE) { - struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd; - - ctrl = &wqe_lcmd->ctrl; - } else { - struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe; - struct hinic_cmdq_wqe_scmd *wqe_scmd; - - wqe_scmd = &direct_wqe->wqe_scmd; - ctrl = &wqe_scmd->ctrl; - } - - /* clear HW busy bit */ - ctrl->ctrl_info = 0; - - wmb(); /* verify wqe is clear */ -} - -/** - * cmdq_arm_ceq_handler - cmdq completion event handler for arm command - * @cmdq: the cmdq of the arm command - * @wqe: the wqe of the arm command - * - * Return 0 - Success, negative - Failure - **/ -static int cmdq_arm_ceq_handler(struct hinic_cmdq *cmdq, - struct hinic_cmdq_wqe *wqe) -{ - struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe; - struct hinic_cmdq_wqe_scmd *wqe_scmd; - struct hinic_ctrl *ctrl; - u32 ctrl_info; - - wqe_scmd = &direct_wqe->wqe_scmd; - ctrl = &wqe_scmd->ctrl; - ctrl_info = be32_to_cpu(ctrl->ctrl_info); - - /* HW should toggle the HW BUSY BIT */ - if (!CMDQ_WQE_COMPLETED(ctrl_info)) - return -EBUSY; - - clear_wqe_complete_bit(cmdq, wqe); - - hinic_put_wqe(cmdq->wq, WQE_SCMD_SIZE); - return 0; -} - -static void cmdq_update_errcode(struct hinic_cmdq *cmdq, u16 prod_idx, - int errcode) -{ - if (cmdq->errcode[prod_idx]) - *cmdq->errcode[prod_idx] = errcode; -} - -/** - * cmdq_arm_ceq_handler - cmdq completion event handler for sync command - * @cmdq: the cmdq of the command - * @cons_idx: the consumer index to update the error code for - * @errcode: the error code - **/ -static void cmdq_sync_cmd_handler(struct hinic_cmdq *cmdq, u16 cons_idx, - int errcode) -{ - u16 prod_idx = cons_idx; - - spin_lock(&cmdq->cmdq_lock); - cmdq_update_errcode(cmdq, prod_idx, errcode); - - wmb(); /* write all before update for the command request */ - - if (cmdq->done[prod_idx]) - complete(cmdq->done[prod_idx]); - spin_unlock(&cmdq->cmdq_lock); -} - -static int cmdq_cmd_ceq_handler(struct hinic_cmdq *cmdq, u16 ci, - struct hinic_cmdq_wqe *cmdq_wqe) -{ - struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &cmdq_wqe->wqe_lcmd; - struct hinic_status *status = &wqe_lcmd->status; - struct hinic_ctrl *ctrl = &wqe_lcmd->ctrl; - int errcode; - - if (!CMDQ_WQE_COMPLETED(be32_to_cpu(ctrl->ctrl_info))) - return -EBUSY; - - errcode = CMDQ_WQE_ERRCODE_GET(be32_to_cpu(status->status_info), VAL); - - cmdq_sync_cmd_handler(cmdq, ci, errcode); - - clear_wqe_complete_bit(cmdq, cmdq_wqe); - hinic_put_wqe(cmdq->wq, WQE_LCMD_SIZE); - return 0; -} - -/** - * cmdq_ceq_handler - cmdq completion event handler - * @handle: private data for the handler(cmdqs) - * @ceqe_data: ceq element data - **/ -static void cmdq_ceq_handler(void *handle, u32 ceqe_data) -{ - enum hinic_cmdq_type cmdq_type = CMDQ_CEQE_GET(ceqe_data, TYPE); - struct hinic_cmdqs *cmdqs = (struct hinic_cmdqs *)handle; - struct hinic_cmdq *cmdq = &cmdqs->cmdq[cmdq_type]; - struct hinic_cmdq_header *header; - struct hinic_hw_wqe *hw_wqe; - int err, set_arm = 0; - u32 saved_data; - u16 ci; - - /* Read the smallest wqe size for getting wqe size */ - while ((hw_wqe = hinic_read_wqe(cmdq->wq, WQE_SCMD_SIZE, &ci))) { - if (IS_ERR(hw_wqe)) - break; - - header = CMDQ_WQE_HEADER(&hw_wqe->cmdq_wqe); - saved_data = be32_to_cpu(header->saved_data); - - if (HINIC_SAVED_DATA_GET(saved_data, ARM)) { - /* arm_bit was set until here */ - set_arm = 0; - - if (cmdq_arm_ceq_handler(cmdq, &hw_wqe->cmdq_wqe)) - break; - } else { - set_arm = 1; - - hw_wqe = hinic_read_wqe(cmdq->wq, WQE_LCMD_SIZE, &ci); - if (IS_ERR(hw_wqe)) - break; - - if (cmdq_cmd_ceq_handler(cmdq, ci, &hw_wqe->cmdq_wqe)) - break; - } - } - - if (set_arm) { - struct hinic_hwif *hwif = cmdqs->hwif; - struct pci_dev *pdev = hwif->pdev; - - err = hinic_set_arm_bit(cmdqs, HINIC_SET_ARM_CMDQ, cmdq_type); - if (err) - dev_err(&pdev->dev, "Failed to set arm for CMDQ\n"); - } -} - -/** - * cmdq_init_queue_ctxt - init the queue ctxt of a cmdq - * @cmdq_ctxt: cmdq ctxt to initialize - * @cmdq: the cmdq - * @cmdq_pages: the memory of the queue - **/ -static void cmdq_init_queue_ctxt(struct hinic_cmdq_ctxt *cmdq_ctxt, - struct hinic_cmdq *cmdq, - struct hinic_cmdq_pages *cmdq_pages) -{ - struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info; - u64 wq_first_page_paddr, cmdq_first_block_paddr, pfn; - struct hinic_cmdqs *cmdqs = cmdq_to_cmdqs(cmdq); - struct hinic_wq *wq = cmdq->wq; - - /* The data in the HW is in Big Endian Format */ - wq_first_page_paddr = be64_to_cpu(*wq->block_vaddr); - - pfn = CMDQ_PFN(wq_first_page_paddr, wq->wq_page_size); - - ctxt_info->curr_wqe_page_pfn = - HINIC_CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN) | - HINIC_CMDQ_CTXT_PAGE_INFO_SET(HINIC_CEQ_ID_CMDQ, EQ_ID) | - HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_ARM) | - HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN) | - HINIC_CMDQ_CTXT_PAGE_INFO_SET(cmdq->wrapped, WRAPPED); - - /* block PFN - Read Modify Write */ - cmdq_first_block_paddr = cmdq_pages->page_paddr; - - pfn = CMDQ_PFN(cmdq_first_block_paddr, wq->wq_page_size); - - ctxt_info->wq_block_pfn = - HINIC_CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN) | - HINIC_CMDQ_CTXT_BLOCK_INFO_SET(atomic_read(&wq->cons_idx), CI); - - cmdq_ctxt->func_idx = HINIC_HWIF_FUNC_IDX(cmdqs->hwif); - cmdq_ctxt->cmdq_type = cmdq->cmdq_type; -} - -/** - * init_cmdq - initialize cmdq - * @cmdq: the cmdq - * @wq: the wq attaced to the cmdq - * @q_type: the cmdq type of the cmdq - * @db_area: doorbell area for the cmdq - * - * Return 0 - Success, negative - Failure - **/ -static int init_cmdq(struct hinic_cmdq *cmdq, struct hinic_wq *wq, - enum hinic_cmdq_type q_type, void __iomem *db_area) -{ - int err; - - cmdq->wq = wq; - cmdq->cmdq_type = q_type; - cmdq->wrapped = 1; - - spin_lock_init(&cmdq->cmdq_lock); - - cmdq->done = vzalloc(array_size(sizeof(*cmdq->done), wq->q_depth)); - if (!cmdq->done) - return -ENOMEM; - - cmdq->errcode = vzalloc(array_size(sizeof(*cmdq->errcode), - wq->q_depth)); - if (!cmdq->errcode) { - err = -ENOMEM; - goto err_errcode; - } - - cmdq->db_base = db_area + CMDQ_DB_OFF; - return 0; - -err_errcode: - vfree(cmdq->done); - return err; -} - -/** - * free_cmdq - Free cmdq - * @cmdq: the cmdq to free - **/ -static void free_cmdq(struct hinic_cmdq *cmdq) -{ - vfree(cmdq->errcode); - vfree(cmdq->done); -} - -/** - * init_cmdqs_ctxt - write the cmdq ctxt to HW after init all cmdq - * @hwdev: the NIC HW device - * @cmdqs: cmdqs to write the ctxts for - * &db_area: db_area for all the cmdqs - * - * Return 0 - Success, negative - Failure - **/ -static int init_cmdqs_ctxt(struct hinic_hwdev *hwdev, - struct hinic_cmdqs *cmdqs, void __iomem **db_area) -{ - struct hinic_hwif *hwif = hwdev->hwif; - enum hinic_cmdq_type type, cmdq_type; - struct hinic_cmdq_ctxt *cmdq_ctxts; - struct pci_dev *pdev = hwif->pdev; - struct hinic_pfhwdev *pfhwdev; - size_t cmdq_ctxts_size; - int err; - - if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { - dev_err(&pdev->dev, "Unsupported PCI function type\n"); - return -EINVAL; - } - - cmdq_ctxts_size = HINIC_MAX_CMDQ_TYPES * sizeof(*cmdq_ctxts); - cmdq_ctxts = devm_kzalloc(&pdev->dev, cmdq_ctxts_size, GFP_KERNEL); - if (!cmdq_ctxts) - return -ENOMEM; - - pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); - - cmdq_type = HINIC_CMDQ_SYNC; - for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) { - err = init_cmdq(&cmdqs->cmdq[cmdq_type], - &cmdqs->saved_wqs[cmdq_type], cmdq_type, - db_area[cmdq_type]); - if (err) { - dev_err(&pdev->dev, "Failed to initialize cmdq\n"); - goto err_init_cmdq; - } - - cmdq_init_queue_ctxt(&cmdq_ctxts[cmdq_type], - &cmdqs->cmdq[cmdq_type], - &cmdqs->cmdq_pages); - } - - /* Write the CMDQ ctxts */ - cmdq_type = HINIC_CMDQ_SYNC; - for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) { - err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, - HINIC_COMM_CMD_CMDQ_CTXT_SET, - &cmdq_ctxts[cmdq_type], - sizeof(cmdq_ctxts[cmdq_type]), - NULL, NULL, HINIC_MGMT_MSG_SYNC); - if (err) { - dev_err(&pdev->dev, "Failed to set CMDQ CTXT type = %d\n", - cmdq_type); - goto err_write_cmdq_ctxt; - } - } - - devm_kfree(&pdev->dev, cmdq_ctxts); - return 0; - -err_write_cmdq_ctxt: - cmdq_type = HINIC_MAX_CMDQ_TYPES; - -err_init_cmdq: - for (type = HINIC_CMDQ_SYNC; type < cmdq_type; type++) - free_cmdq(&cmdqs->cmdq[type]); - - devm_kfree(&pdev->dev, cmdq_ctxts); - return err; -} - -/** - * hinic_init_cmdqs - init all cmdqs - * @cmdqs: cmdqs to init - * @hwif: HW interface for accessing cmdqs - * @db_area: doorbell areas for all the cmdqs - * - * Return 0 - Success, negative - Failure - **/ -int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif, - void __iomem **db_area) -{ - struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs); - struct pci_dev *pdev = hwif->pdev; - struct hinic_hwdev *hwdev; - size_t saved_wqs_size; - u16 max_wqe_size; - int err; - - cmdqs->hwif = hwif; - cmdqs->cmdq_buf_pool = dma_pool_create("hinic_cmdq", &pdev->dev, - HINIC_CMDQ_BUF_SIZE, - HINIC_CMDQ_BUF_SIZE, 0); - if (!cmdqs->cmdq_buf_pool) - return -ENOMEM; - - saved_wqs_size = HINIC_MAX_CMDQ_TYPES * sizeof(struct hinic_wq); - cmdqs->saved_wqs = devm_kzalloc(&pdev->dev, saved_wqs_size, GFP_KERNEL); - if (!cmdqs->saved_wqs) { - err = -ENOMEM; - goto err_saved_wqs; - } - - max_wqe_size = WQE_LCMD_SIZE; - err = hinic_wqs_cmdq_alloc(&cmdqs->cmdq_pages, cmdqs->saved_wqs, hwif, - HINIC_MAX_CMDQ_TYPES, CMDQ_WQEBB_SIZE, - CMDQ_WQ_PAGE_SIZE, CMDQ_DEPTH, max_wqe_size); - if (err) { - dev_err(&pdev->dev, "Failed to allocate CMDQ wqs\n"); - goto err_cmdq_wqs; - } - - hwdev = container_of(func_to_io, struct hinic_hwdev, func_to_io); - err = init_cmdqs_ctxt(hwdev, cmdqs, db_area); - if (err) { - dev_err(&pdev->dev, "Failed to write cmdq ctxt\n"); - goto err_cmdq_ctxt; - } - - hinic_ceq_register_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ, cmdqs, - cmdq_ceq_handler); - return 0; - -err_cmdq_ctxt: - hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs, - HINIC_MAX_CMDQ_TYPES); - -err_cmdq_wqs: - devm_kfree(&pdev->dev, cmdqs->saved_wqs); - -err_saved_wqs: - dma_pool_destroy(cmdqs->cmdq_buf_pool); - return err; -} - -/** - * hinic_free_cmdqs - free all cmdqs - * @cmdqs: cmdqs to free - **/ -void hinic_free_cmdqs(struct hinic_cmdqs *cmdqs) -{ - struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs); - struct hinic_hwif *hwif = cmdqs->hwif; - struct pci_dev *pdev = hwif->pdev; - enum hinic_cmdq_type cmdq_type; - - hinic_ceq_unregister_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ); - - cmdq_type = HINIC_CMDQ_SYNC; - for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) - free_cmdq(&cmdqs->cmdq[cmdq_type]); - - hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs, - HINIC_MAX_CMDQ_TYPES); - - devm_kfree(&pdev->dev, cmdqs->saved_wqs); - - dma_pool_destroy(cmdqs->cmdq_buf_pool); -} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h deleted file mode 100644 index 23f8d39eab68d46cd269a821a243f68ec0ccf010..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h +++ /dev/null @@ -1,187 +0,0 @@ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - */ - -#ifndef HINIC_CMDQ_H -#define HINIC_CMDQ_H - -#include -#include -#include -#include - -#include "hinic_hw_if.h" -#include "hinic_hw_wq.h" - -#define HINIC_CMDQ_CTXT_CURR_WQE_PAGE_PFN_SHIFT 0 -#define HINIC_CMDQ_CTXT_EQ_ID_SHIFT 56 -#define HINIC_CMDQ_CTXT_CEQ_ARM_SHIFT 61 -#define HINIC_CMDQ_CTXT_CEQ_EN_SHIFT 62 -#define HINIC_CMDQ_CTXT_WRAPPED_SHIFT 63 - -#define HINIC_CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK 0xFFFFFFFFFFFFF -#define HINIC_CMDQ_CTXT_EQ_ID_MASK 0x1F -#define HINIC_CMDQ_CTXT_CEQ_ARM_MASK 0x1 -#define HINIC_CMDQ_CTXT_CEQ_EN_MASK 0x1 -#define HINIC_CMDQ_CTXT_WRAPPED_MASK 0x1 - -#define HINIC_CMDQ_CTXT_PAGE_INFO_SET(val, member) \ - (((u64)(val) & HINIC_CMDQ_CTXT_##member##_MASK) \ - << HINIC_CMDQ_CTXT_##member##_SHIFT) - -#define HINIC_CMDQ_CTXT_PAGE_INFO_CLEAR(val, member) \ - ((val) & (~((u64)HINIC_CMDQ_CTXT_##member##_MASK \ - << HINIC_CMDQ_CTXT_##member##_SHIFT))) - -#define HINIC_CMDQ_CTXT_WQ_BLOCK_PFN_SHIFT 0 -#define HINIC_CMDQ_CTXT_CI_SHIFT 52 - -#define HINIC_CMDQ_CTXT_WQ_BLOCK_PFN_MASK 0xFFFFFFFFFFFFF -#define HINIC_CMDQ_CTXT_CI_MASK 0xFFF - -#define HINIC_CMDQ_CTXT_BLOCK_INFO_SET(val, member) \ - (((u64)(val) & HINIC_CMDQ_CTXT_##member##_MASK) \ - << HINIC_CMDQ_CTXT_##member##_SHIFT) - -#define HINIC_CMDQ_CTXT_BLOCK_INFO_CLEAR(val, member) \ - ((val) & (~((u64)HINIC_CMDQ_CTXT_##member##_MASK \ - << HINIC_CMDQ_CTXT_##member##_SHIFT))) - -#define HINIC_SAVED_DATA_ARM_SHIFT 31 - -#define HINIC_SAVED_DATA_ARM_MASK 0x1 - -#define HINIC_SAVED_DATA_SET(val, member) \ - (((u32)(val) & HINIC_SAVED_DATA_##member##_MASK) \ - << HINIC_SAVED_DATA_##member##_SHIFT) - -#define HINIC_SAVED_DATA_GET(val, member) \ - (((val) >> HINIC_SAVED_DATA_##member##_SHIFT) \ - & HINIC_SAVED_DATA_##member##_MASK) - -#define HINIC_SAVED_DATA_CLEAR(val, member) \ - ((val) & (~(HINIC_SAVED_DATA_##member##_MASK \ - << HINIC_SAVED_DATA_##member##_SHIFT))) - -#define HINIC_CMDQ_DB_INFO_HI_PROD_IDX_SHIFT 0 -#define HINIC_CMDQ_DB_INFO_PATH_SHIFT 23 -#define HINIC_CMDQ_DB_INFO_CMDQ_TYPE_SHIFT 24 -#define HINIC_CMDQ_DB_INFO_DB_TYPE_SHIFT 27 - -#define HINIC_CMDQ_DB_INFO_HI_PROD_IDX_MASK 0xFF -#define HINIC_CMDQ_DB_INFO_PATH_MASK 0x1 -#define HINIC_CMDQ_DB_INFO_CMDQ_TYPE_MASK 0x7 -#define HINIC_CMDQ_DB_INFO_DB_TYPE_MASK 0x1F - -#define HINIC_CMDQ_DB_INFO_SET(val, member) \ - (((u32)(val) & HINIC_CMDQ_DB_INFO_##member##_MASK) \ - << HINIC_CMDQ_DB_INFO_##member##_SHIFT) - -#define HINIC_CMDQ_BUF_SIZE 2048 - -#define HINIC_CMDQ_BUF_HW_RSVD 8 -#define HINIC_CMDQ_MAX_DATA_SIZE (HINIC_CMDQ_BUF_SIZE - \ - HINIC_CMDQ_BUF_HW_RSVD) - -enum hinic_cmdq_type { - HINIC_CMDQ_SYNC, - - HINIC_MAX_CMDQ_TYPES, -}; - -enum hinic_set_arm_qtype { - HINIC_SET_ARM_CMDQ, -}; - -enum hinic_cmd_ack_type { - HINIC_CMD_ACK_TYPE_CMDQ, -}; - -struct hinic_cmdq_buf { - void *buf; - dma_addr_t dma_addr; - size_t size; -}; - -struct hinic_cmdq_arm_bit { - u32 q_type; - u32 q_id; -}; - -struct hinic_cmdq_ctxt_info { - u64 curr_wqe_page_pfn; - u64 wq_block_pfn; -}; - -struct hinic_cmdq_ctxt { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_idx; - u8 cmdq_type; - u8 rsvd1[1]; - - u8 rsvd2[4]; - - struct hinic_cmdq_ctxt_info ctxt_info; -}; - -struct hinic_cmdq { - struct hinic_wq *wq; - - enum hinic_cmdq_type cmdq_type; - int wrapped; - - /* Lock for keeping the doorbell order */ - spinlock_t cmdq_lock; - - struct completion **done; - int **errcode; - - /* doorbell area */ - void __iomem *db_base; -}; - -struct hinic_cmdqs { - struct hinic_hwif *hwif; - - struct dma_pool *cmdq_buf_pool; - - struct hinic_wq *saved_wqs; - - struct hinic_cmdq_pages cmdq_pages; - - struct hinic_cmdq cmdq[HINIC_MAX_CMDQ_TYPES]; -}; - -int hinic_alloc_cmdq_buf(struct hinic_cmdqs *cmdqs, - struct hinic_cmdq_buf *cmdq_buf); - -void hinic_free_cmdq_buf(struct hinic_cmdqs *cmdqs, - struct hinic_cmdq_buf *cmdq_buf); - -int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs, - enum hinic_mod_type mod, u8 cmd, - struct hinic_cmdq_buf *buf_in, u64 *out_param); - -int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs, - enum hinic_set_arm_qtype q_type, u32 q_id); - -int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif, - void __iomem **db_area); - -void hinic_free_cmdqs(struct hinic_cmdqs *cmdqs); - -#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h deleted file mode 100644 index f39b184f674da99e3d3206be3e50727a967fef7e..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h +++ /dev/null @@ -1,149 +0,0 @@ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - */ - -#ifndef HINIC_HW_CSR_H -#define HINIC_HW_CSR_H - -/* HW interface registers */ -#define HINIC_CSR_FUNC_ATTR0_ADDR 0x0 -#define HINIC_CSR_FUNC_ATTR1_ADDR 0x4 - -#define HINIC_CSR_FUNC_ATTR4_ADDR 0x10 -#define HINIC_CSR_FUNC_ATTR5_ADDR 0x14 - -#define HINIC_DMA_ATTR_BASE 0xC80 -#define HINIC_ELECTION_BASE 0x4200 - -#define HINIC_DMA_ATTR_STRIDE 0x4 -#define HINIC_CSR_DMA_ATTR_ADDR(idx) \ - (HINIC_DMA_ATTR_BASE + (idx) * HINIC_DMA_ATTR_STRIDE) - -#define HINIC_PPF_ELECTION_STRIDE 0x4 -#define HINIC_CSR_MAX_PORTS 4 - -#define HINIC_CSR_PPF_ELECTION_ADDR(idx) \ - (HINIC_ELECTION_BASE + (idx) * HINIC_PPF_ELECTION_STRIDE) - -/* API CMD registers */ -#define HINIC_CSR_API_CMD_BASE 0xF000 - -#define HINIC_CSR_API_CMD_STRIDE 0x100 - -#define HINIC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(idx) \ - (HINIC_CSR_API_CMD_BASE + 0x0 + (idx) * HINIC_CSR_API_CMD_STRIDE) - -#define HINIC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(idx) \ - (HINIC_CSR_API_CMD_BASE + 0x4 + (idx) * HINIC_CSR_API_CMD_STRIDE) - -#define HINIC_CSR_API_CMD_STATUS_HI_ADDR(idx) \ - (HINIC_CSR_API_CMD_BASE + 0x8 + (idx) * HINIC_CSR_API_CMD_STRIDE) - -#define HINIC_CSR_API_CMD_STATUS_LO_ADDR(idx) \ - (HINIC_CSR_API_CMD_BASE + 0xC + (idx) * HINIC_CSR_API_CMD_STRIDE) - -#define HINIC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(idx) \ - (HINIC_CSR_API_CMD_BASE + 0x10 + (idx) * HINIC_CSR_API_CMD_STRIDE) - -#define HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(idx) \ - (HINIC_CSR_API_CMD_BASE + 0x14 + (idx) * HINIC_CSR_API_CMD_STRIDE) - -#define HINIC_CSR_API_CMD_CHAIN_PI_ADDR(idx) \ - (HINIC_CSR_API_CMD_BASE + 0x1C + (idx) * HINIC_CSR_API_CMD_STRIDE) - -#define HINIC_CSR_API_CMD_CHAIN_REQ_ADDR(idx) \ - (HINIC_CSR_API_CMD_BASE + 0x20 + (idx) * HINIC_CSR_API_CMD_STRIDE) - -#define HINIC_CSR_API_CMD_STATUS_ADDR(idx) \ - (HINIC_CSR_API_CMD_BASE + 0x30 + (idx) * HINIC_CSR_API_CMD_STRIDE) - -/* MSI-X registers */ -#define HINIC_CSR_MSIX_CTRL_BASE 0x2000 -#define HINIC_CSR_MSIX_CNT_BASE 0x2004 - -#define HINIC_CSR_MSIX_STRIDE 0x8 - -#define HINIC_CSR_MSIX_CTRL_ADDR(idx) \ - (HINIC_CSR_MSIX_CTRL_BASE + (idx) * HINIC_CSR_MSIX_STRIDE) - -#define HINIC_CSR_MSIX_CNT_ADDR(idx) \ - (HINIC_CSR_MSIX_CNT_BASE + (idx) * HINIC_CSR_MSIX_STRIDE) - -/* EQ registers */ -#define HINIC_AEQ_MTT_OFF_BASE_ADDR 0x200 -#define HINIC_CEQ_MTT_OFF_BASE_ADDR 0x400 - -#define HINIC_EQ_MTT_OFF_STRIDE 0x40 - -#define HINIC_CSR_AEQ_MTT_OFF(id) \ - (HINIC_AEQ_MTT_OFF_BASE_ADDR + (id) * HINIC_EQ_MTT_OFF_STRIDE) - -#define HINIC_CSR_CEQ_MTT_OFF(id) \ - (HINIC_CEQ_MTT_OFF_BASE_ADDR + (id) * HINIC_EQ_MTT_OFF_STRIDE) - -#define HINIC_CSR_EQ_PAGE_OFF_STRIDE 8 - -#define HINIC_CSR_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \ - (HINIC_CSR_AEQ_MTT_OFF(q_id) + \ - (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE) - -#define HINIC_CSR_CEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \ - (HINIC_CSR_CEQ_MTT_OFF(q_id) + \ - (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE) - -#define HINIC_CSR_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \ - (HINIC_CSR_AEQ_MTT_OFF(q_id) + \ - (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE + 4) - -#define HINIC_CSR_CEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \ - (HINIC_CSR_CEQ_MTT_OFF(q_id) + \ - (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE + 4) - -#define HINIC_AEQ_CTRL_0_ADDR_BASE 0xE00 -#define HINIC_AEQ_CTRL_1_ADDR_BASE 0xE04 -#define HINIC_AEQ_CONS_IDX_ADDR_BASE 0xE08 -#define HINIC_AEQ_PROD_IDX_ADDR_BASE 0xE0C - -#define HINIC_CEQ_CTRL_0_ADDR_BASE 0x1000 -#define HINIC_CEQ_CTRL_1_ADDR_BASE 0x1004 -#define HINIC_CEQ_CONS_IDX_ADDR_BASE 0x1008 -#define HINIC_CEQ_PROD_IDX_ADDR_BASE 0x100C - -#define HINIC_EQ_OFF_STRIDE 0x80 - -#define HINIC_CSR_AEQ_CTRL_0_ADDR(idx) \ - (HINIC_AEQ_CTRL_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) - -#define HINIC_CSR_AEQ_CTRL_1_ADDR(idx) \ - (HINIC_AEQ_CTRL_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) - -#define HINIC_CSR_AEQ_CONS_IDX_ADDR(idx) \ - (HINIC_AEQ_CONS_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) - -#define HINIC_CSR_AEQ_PROD_IDX_ADDR(idx) \ - (HINIC_AEQ_PROD_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) - -#define HINIC_CSR_CEQ_CTRL_0_ADDR(idx) \ - (HINIC_CEQ_CTRL_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) - -#define HINIC_CSR_CEQ_CTRL_1_ADDR(idx) \ - (HINIC_CEQ_CTRL_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) - -#define HINIC_CSR_CEQ_CONS_IDX_ADDR(idx) \ - (HINIC_CEQ_CONS_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) - -#define HINIC_CSR_CEQ_PROD_IDX_ADDR(idx) \ - (HINIC_CEQ_PROD_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) - -#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c deleted file mode 100644 index 6b19607a4caac0f846186917b9c4b286f3b0b9b6..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ /dev/null @@ -1,1010 +0,0 @@ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "hinic_hw_if.h" -#include "hinic_hw_eqs.h" -#include "hinic_hw_mgmt.h" -#include "hinic_hw_qp_ctxt.h" -#include "hinic_hw_qp.h" -#include "hinic_hw_io.h" -#include "hinic_hw_dev.h" - -#define IO_STATUS_TIMEOUT 100 -#define OUTBOUND_STATE_TIMEOUT 100 -#define DB_STATE_TIMEOUT 100 - -#define MAX_IRQS(max_qps, num_aeqs, num_ceqs) \ - (2 * (max_qps) + (num_aeqs) + (num_ceqs)) - -#define ADDR_IN_4BYTES(addr) ((addr) >> 2) - -enum intr_type { - INTR_MSIX_TYPE, -}; - -enum io_status { - IO_STOPPED = 0, - IO_RUNNING = 1, -}; - -enum hw_ioctxt_set_cmdq_depth { - HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT, -}; - -/* HW struct */ -struct hinic_dev_cap { - u8 status; - u8 version; - u8 rsvd0[6]; - - u8 rsvd1[5]; - u8 intr_type; - u8 rsvd2[66]; - u16 max_sqs; - u16 max_rqs; - u8 rsvd3[208]; -}; - -/** - * get_capability - convert device capabilities to NIC capabilities - * @hwdev: the HW device to set and convert device capabilities for - * @dev_cap: device capabilities from FW - * - * Return 0 - Success, negative - Failure - **/ -static int get_capability(struct hinic_hwdev *hwdev, - struct hinic_dev_cap *dev_cap) -{ - struct hinic_cap *nic_cap = &hwdev->nic_cap; - int num_aeqs, num_ceqs, num_irqs; - - if (!HINIC_IS_PF(hwdev->hwif) && !HINIC_IS_PPF(hwdev->hwif)) - return -EINVAL; - - if (dev_cap->intr_type != INTR_MSIX_TYPE) - return -EFAULT; - - num_aeqs = HINIC_HWIF_NUM_AEQS(hwdev->hwif); - num_ceqs = HINIC_HWIF_NUM_CEQS(hwdev->hwif); - num_irqs = HINIC_HWIF_NUM_IRQS(hwdev->hwif); - - /* Each QP has its own (SQ + RQ) interrupts */ - nic_cap->num_qps = (num_irqs - (num_aeqs + num_ceqs)) / 2; - - if (nic_cap->num_qps > HINIC_Q_CTXT_MAX) - nic_cap->num_qps = HINIC_Q_CTXT_MAX; - - /* num_qps must be power of 2 */ - nic_cap->num_qps = BIT(fls(nic_cap->num_qps) - 1); - - nic_cap->max_qps = dev_cap->max_sqs + 1; - if (nic_cap->max_qps != (dev_cap->max_rqs + 1)) - return -EFAULT; - - if (nic_cap->num_qps > nic_cap->max_qps) - nic_cap->num_qps = nic_cap->max_qps; - - return 0; -} - -/** - * get_cap_from_fw - get device capabilities from FW - * @pfhwdev: the PF HW device to get capabilities for - * - * Return 0 - Success, negative - Failure - **/ -static int get_cap_from_fw(struct hinic_pfhwdev *pfhwdev) -{ - struct hinic_hwdev *hwdev = &pfhwdev->hwdev; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_dev_cap dev_cap; - u16 in_len, out_len; - int err; - - in_len = 0; - out_len = sizeof(dev_cap); - - err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_CFGM, - HINIC_CFG_NIC_CAP, &dev_cap, in_len, &dev_cap, - &out_len, HINIC_MGMT_MSG_SYNC); - if (err) { - dev_err(&pdev->dev, "Failed to get capability from FW\n"); - return err; - } - - return get_capability(hwdev, &dev_cap); -} - -/** - * get_dev_cap - get device capabilities - * @hwdev: the NIC HW device to get capabilities for - * - * Return 0 - Success, negative - Failure - **/ -static int get_dev_cap(struct hinic_hwdev *hwdev) -{ - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_pfhwdev *pfhwdev; - int err; - - switch (HINIC_FUNC_TYPE(hwif)) { - case HINIC_PPF: - case HINIC_PF: - pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); - - err = get_cap_from_fw(pfhwdev); - if (err) { - dev_err(&pdev->dev, "Failed to get capability from FW\n"); - return err; - } - break; - - default: - dev_err(&pdev->dev, "Unsupported PCI Function type\n"); - return -EINVAL; - } - - return 0; -} - -/** - * init_msix - enable the msix and save the entries - * @hwdev: the NIC HW device - * - * Return 0 - Success, negative - Failure - **/ -static int init_msix(struct hinic_hwdev *hwdev) -{ - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - int nr_irqs, num_aeqs, num_ceqs; - size_t msix_entries_size; - int i, err; - - num_aeqs = HINIC_HWIF_NUM_AEQS(hwif); - num_ceqs = HINIC_HWIF_NUM_CEQS(hwif); - nr_irqs = MAX_IRQS(HINIC_MAX_QPS, num_aeqs, num_ceqs); - if (nr_irqs > HINIC_HWIF_NUM_IRQS(hwif)) - nr_irqs = HINIC_HWIF_NUM_IRQS(hwif); - - msix_entries_size = nr_irqs * sizeof(*hwdev->msix_entries); - hwdev->msix_entries = devm_kzalloc(&pdev->dev, msix_entries_size, - GFP_KERNEL); - if (!hwdev->msix_entries) - return -ENOMEM; - - for (i = 0; i < nr_irqs; i++) - hwdev->msix_entries[i].entry = i; - - err = pci_enable_msix_exact(pdev, hwdev->msix_entries, nr_irqs); - if (err) { - dev_err(&pdev->dev, "Failed to enable pci msix\n"); - return err; - } - - return 0; -} - -/** - * disable_msix - disable the msix - * @hwdev: the NIC HW device - **/ -static void disable_msix(struct hinic_hwdev *hwdev) -{ - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - - pci_disable_msix(pdev); -} - -/** - * hinic_port_msg_cmd - send port msg to mgmt - * @hwdev: the NIC HW device - * @cmd: the port command - * @buf_in: input buffer - * @in_size: input size - * @buf_out: output buffer - * @out_size: returned output size - * - * Return 0 - Success, negative - Failure - **/ -int hinic_port_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_port_cmd cmd, - void *buf_in, u16 in_size, void *buf_out, u16 *out_size) -{ - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_pfhwdev *pfhwdev; - - if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { - dev_err(&pdev->dev, "unsupported PCI Function type\n"); - return -EINVAL; - } - - pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); - - return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC, cmd, - buf_in, in_size, buf_out, out_size, - HINIC_MGMT_MSG_SYNC); -} - -/** - * init_fw_ctxt- Init Firmware tables before network mgmt and io operations - * @hwdev: the NIC HW device - * - * Return 0 - Success, negative - Failure - **/ -static int init_fw_ctxt(struct hinic_hwdev *hwdev) -{ - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_cmd_fw_ctxt fw_ctxt; - u16 out_size; - int err; - - if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { - dev_err(&pdev->dev, "Unsupported PCI Function type\n"); - return -EINVAL; - } - - fw_ctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif); - fw_ctxt.rx_buf_sz = HINIC_RX_BUF_SZ; - - err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_FWCTXT_INIT, - &fw_ctxt, sizeof(fw_ctxt), - &fw_ctxt, &out_size); - if (err || (out_size != sizeof(fw_ctxt)) || fw_ctxt.status) { - dev_err(&pdev->dev, "Failed to init FW ctxt, ret = %d\n", - fw_ctxt.status); - return -EFAULT; - } - - return 0; -} - -/** - * set_hw_ioctxt - set the shape of the IO queues in FW - * @hwdev: the NIC HW device - * @rq_depth: rq depth - * @sq_depth: sq depth - * - * Return 0 - Success, negative - Failure - **/ -static int set_hw_ioctxt(struct hinic_hwdev *hwdev, unsigned int rq_depth, - unsigned int sq_depth) -{ - struct hinic_hwif *hwif = hwdev->hwif; - struct hinic_cmd_hw_ioctxt hw_ioctxt; - struct pci_dev *pdev = hwif->pdev; - struct hinic_pfhwdev *pfhwdev; - - if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { - dev_err(&pdev->dev, "Unsupported PCI Function type\n"); - return -EINVAL; - } - - hw_ioctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif); - - hw_ioctxt.set_cmdq_depth = HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT; - hw_ioctxt.cmdq_depth = 0; - - hw_ioctxt.rq_depth = ilog2(rq_depth); - - hw_ioctxt.rx_buf_sz_idx = HINIC_RX_BUF_SZ_IDX; - - hw_ioctxt.sq_depth = ilog2(sq_depth); - - pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); - - return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, - HINIC_COMM_CMD_HWCTXT_SET, - &hw_ioctxt, sizeof(hw_ioctxt), NULL, - NULL, HINIC_MGMT_MSG_SYNC); -} - -static int wait_for_outbound_state(struct hinic_hwdev *hwdev) -{ - enum hinic_outbound_state outbound_state; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - unsigned long end; - - end = jiffies + msecs_to_jiffies(OUTBOUND_STATE_TIMEOUT); - do { - outbound_state = hinic_outbound_state_get(hwif); - - if (outbound_state == HINIC_OUTBOUND_ENABLE) - return 0; - - msleep(20); - } while (time_before(jiffies, end)); - - dev_err(&pdev->dev, "Wait for OUTBOUND - Timeout\n"); - return -EFAULT; -} - -static int wait_for_db_state(struct hinic_hwdev *hwdev) -{ - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - enum hinic_db_state db_state; - unsigned long end; - - end = jiffies + msecs_to_jiffies(DB_STATE_TIMEOUT); - do { - db_state = hinic_db_state_get(hwif); - - if (db_state == HINIC_DB_ENABLE) - return 0; - - msleep(20); - } while (time_before(jiffies, end)); - - dev_err(&pdev->dev, "Wait for DB - Timeout\n"); - return -EFAULT; -} - -static int wait_for_io_stopped(struct hinic_hwdev *hwdev) -{ - struct hinic_cmd_io_status cmd_io_status; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_pfhwdev *pfhwdev; - unsigned long end; - u16 out_size; - int err; - - if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { - dev_err(&pdev->dev, "Unsupported PCI Function type\n"); - return -EINVAL; - } - - pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); - - cmd_io_status.func_idx = HINIC_HWIF_FUNC_IDX(hwif); - - end = jiffies + msecs_to_jiffies(IO_STATUS_TIMEOUT); - do { - err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, - HINIC_COMM_CMD_IO_STATUS_GET, - &cmd_io_status, sizeof(cmd_io_status), - &cmd_io_status, &out_size, - HINIC_MGMT_MSG_SYNC); - if ((err) || (out_size != sizeof(cmd_io_status))) { - dev_err(&pdev->dev, "Failed to get IO status, ret = %d\n", - err); - return err; - } - - if (cmd_io_status.status == IO_STOPPED) { - dev_info(&pdev->dev, "IO stopped\n"); - return 0; - } - - msleep(20); - } while (time_before(jiffies, end)); - - dev_err(&pdev->dev, "Wait for IO stopped - Timeout\n"); - return -ETIMEDOUT; -} - -/** - * clear_io_resource - set the IO resources as not active in the NIC - * @hwdev: the NIC HW device - * - * Return 0 - Success, negative - Failure - **/ -static int clear_io_resources(struct hinic_hwdev *hwdev) -{ - struct hinic_cmd_clear_io_res cmd_clear_io_res; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_pfhwdev *pfhwdev; - int err; - - if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { - dev_err(&pdev->dev, "Unsupported PCI Function type\n"); - return -EINVAL; - } - - err = wait_for_io_stopped(hwdev); - if (err) { - dev_err(&pdev->dev, "IO has not stopped yet\n"); - return err; - } - - cmd_clear_io_res.func_idx = HINIC_HWIF_FUNC_IDX(hwif); - - pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); - - err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, - HINIC_COMM_CMD_IO_RES_CLEAR, &cmd_clear_io_res, - sizeof(cmd_clear_io_res), NULL, NULL, - HINIC_MGMT_MSG_SYNC); - if (err) { - dev_err(&pdev->dev, "Failed to clear IO resources\n"); - return err; - } - - return 0; -} - -/** - * set_resources_state - set the state of the resources in the NIC - * @hwdev: the NIC HW device - * @state: the state to set - * - * Return 0 - Success, negative - Failure - **/ -static int set_resources_state(struct hinic_hwdev *hwdev, - enum hinic_res_state state) -{ - struct hinic_cmd_set_res_state res_state; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_pfhwdev *pfhwdev; - - if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { - dev_err(&pdev->dev, "Unsupported PCI Function type\n"); - return -EINVAL; - } - - res_state.func_idx = HINIC_HWIF_FUNC_IDX(hwif); - res_state.state = state; - - pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); - - return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, - HINIC_MOD_COMM, - HINIC_COMM_CMD_RES_STATE_SET, - &res_state, sizeof(res_state), NULL, - NULL, HINIC_MGMT_MSG_SYNC); -} - -/** - * get_base_qpn - get the first qp number - * @hwdev: the NIC HW device - * @base_qpn: returned qp number - * - * Return 0 - Success, negative - Failure - **/ -static int get_base_qpn(struct hinic_hwdev *hwdev, u16 *base_qpn) -{ - struct hinic_cmd_base_qpn cmd_base_qpn; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - u16 out_size; - int err; - - cmd_base_qpn.func_idx = HINIC_HWIF_FUNC_IDX(hwif); - - err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_GLOBAL_QPN, - &cmd_base_qpn, sizeof(cmd_base_qpn), - &cmd_base_qpn, &out_size); - if (err || (out_size != sizeof(cmd_base_qpn)) || cmd_base_qpn.status) { - dev_err(&pdev->dev, "Failed to get base qpn, status = %d\n", - cmd_base_qpn.status); - return -EFAULT; - } - - *base_qpn = cmd_base_qpn.qpn; - return 0; -} - -/** - * hinic_hwdev_ifup - Preparing the HW for passing IO - * @hwdev: the NIC HW device - * - * Return 0 - Success, negative - Failure - **/ -int hinic_hwdev_ifup(struct hinic_hwdev *hwdev) -{ - struct hinic_func_to_io *func_to_io = &hwdev->func_to_io; - struct hinic_cap *nic_cap = &hwdev->nic_cap; - struct hinic_hwif *hwif = hwdev->hwif; - int err, num_aeqs, num_ceqs, num_qps; - struct msix_entry *ceq_msix_entries; - struct msix_entry *sq_msix_entries; - struct msix_entry *rq_msix_entries; - struct pci_dev *pdev = hwif->pdev; - u16 base_qpn; - - err = get_base_qpn(hwdev, &base_qpn); - if (err) { - dev_err(&pdev->dev, "Failed to get global base qp number\n"); - return err; - } - - num_aeqs = HINIC_HWIF_NUM_AEQS(hwif); - num_ceqs = HINIC_HWIF_NUM_CEQS(hwif); - - ceq_msix_entries = &hwdev->msix_entries[num_aeqs]; - - err = hinic_io_init(func_to_io, hwif, nic_cap->max_qps, num_ceqs, - ceq_msix_entries); - if (err) { - dev_err(&pdev->dev, "Failed to init IO channel\n"); - return err; - } - - num_qps = nic_cap->num_qps; - sq_msix_entries = &hwdev->msix_entries[num_aeqs + num_ceqs]; - rq_msix_entries = &hwdev->msix_entries[num_aeqs + num_ceqs + num_qps]; - - err = hinic_io_create_qps(func_to_io, base_qpn, num_qps, - sq_msix_entries, rq_msix_entries); - if (err) { - dev_err(&pdev->dev, "Failed to create QPs\n"); - goto err_create_qps; - } - - err = wait_for_db_state(hwdev); - if (err) { - dev_warn(&pdev->dev, "db - disabled, try again\n"); - hinic_db_state_set(hwif, HINIC_DB_ENABLE); - } - - err = set_hw_ioctxt(hwdev, HINIC_SQ_DEPTH, HINIC_RQ_DEPTH); - if (err) { - dev_err(&pdev->dev, "Failed to set HW IO ctxt\n"); - goto err_hw_ioctxt; - } - - return 0; - -err_hw_ioctxt: - hinic_io_destroy_qps(func_to_io, num_qps); - -err_create_qps: - hinic_io_free(func_to_io); - return err; -} - -/** - * hinic_hwdev_ifdown - Closing the HW for passing IO - * @hwdev: the NIC HW device - * - **/ -void hinic_hwdev_ifdown(struct hinic_hwdev *hwdev) -{ - struct hinic_func_to_io *func_to_io = &hwdev->func_to_io; - struct hinic_cap *nic_cap = &hwdev->nic_cap; - - clear_io_resources(hwdev); - - hinic_io_destroy_qps(func_to_io, nic_cap->num_qps); - hinic_io_free(func_to_io); -} - -/** - * hinic_hwdev_cb_register - register callback handler for MGMT events - * @hwdev: the NIC HW device - * @cmd: the mgmt event - * @handle: private data for the handler - * @handler: event handler - **/ -void hinic_hwdev_cb_register(struct hinic_hwdev *hwdev, - enum hinic_mgmt_msg_cmd cmd, void *handle, - void (*handler)(void *handle, void *buf_in, - u16 in_size, void *buf_out, - u16 *out_size)) -{ - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_pfhwdev *pfhwdev; - struct hinic_nic_cb *nic_cb; - u8 cmd_cb; - - if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { - dev_err(&pdev->dev, "unsupported PCI Function type\n"); - return; - } - - pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); - - cmd_cb = cmd - HINIC_MGMT_MSG_CMD_BASE; - nic_cb = &pfhwdev->nic_cb[cmd_cb]; - - nic_cb->handler = handler; - nic_cb->handle = handle; - nic_cb->cb_state = HINIC_CB_ENABLED; -} - -/** - * hinic_hwdev_cb_unregister - unregister callback handler for MGMT events - * @hwdev: the NIC HW device - * @cmd: the mgmt event - **/ -void hinic_hwdev_cb_unregister(struct hinic_hwdev *hwdev, - enum hinic_mgmt_msg_cmd cmd) -{ - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_pfhwdev *pfhwdev; - struct hinic_nic_cb *nic_cb; - u8 cmd_cb; - - if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { - dev_err(&pdev->dev, "unsupported PCI Function type\n"); - return; - } - - pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); - - cmd_cb = cmd - HINIC_MGMT_MSG_CMD_BASE; - nic_cb = &pfhwdev->nic_cb[cmd_cb]; - - nic_cb->cb_state &= ~HINIC_CB_ENABLED; - - while (nic_cb->cb_state & HINIC_CB_RUNNING) - schedule(); - - nic_cb->handler = NULL; -} - -/** - * nic_mgmt_msg_handler - nic mgmt event handler - * @handle: private data for the handler - * @buf_in: input buffer - * @in_size: input size - * @buf_out: output buffer - * @out_size: returned output size - **/ -static void nic_mgmt_msg_handler(void *handle, u8 cmd, void *buf_in, - u16 in_size, void *buf_out, u16 *out_size) -{ - struct hinic_pfhwdev *pfhwdev = handle; - enum hinic_cb_state cb_state; - struct hinic_nic_cb *nic_cb; - struct hinic_hwdev *hwdev; - struct hinic_hwif *hwif; - struct pci_dev *pdev; - u8 cmd_cb; - - hwdev = &pfhwdev->hwdev; - hwif = hwdev->hwif; - pdev = hwif->pdev; - - if ((cmd < HINIC_MGMT_MSG_CMD_BASE) || - (cmd >= HINIC_MGMT_MSG_CMD_MAX)) { - dev_err(&pdev->dev, "unknown L2NIC event, cmd = %d\n", cmd); - return; - } - - cmd_cb = cmd - HINIC_MGMT_MSG_CMD_BASE; - - nic_cb = &pfhwdev->nic_cb[cmd_cb]; - - cb_state = cmpxchg(&nic_cb->cb_state, - HINIC_CB_ENABLED, - HINIC_CB_ENABLED | HINIC_CB_RUNNING); - - if ((cb_state == HINIC_CB_ENABLED) && (nic_cb->handler)) - nic_cb->handler(nic_cb->handle, buf_in, - in_size, buf_out, out_size); - else - dev_err(&pdev->dev, "Unhandled NIC Event %d\n", cmd); - - nic_cb->cb_state &= ~HINIC_CB_RUNNING; -} - -/** - * init_pfhwdev - Initialize the extended components of PF - * @pfhwdev: the HW device for PF - * - * Return 0 - success, negative - failure - **/ -static int init_pfhwdev(struct hinic_pfhwdev *pfhwdev) -{ - struct hinic_hwdev *hwdev = &pfhwdev->hwdev; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - int err; - - err = hinic_pf_to_mgmt_init(&pfhwdev->pf_to_mgmt, hwif); - if (err) { - dev_err(&pdev->dev, "Failed to initialize PF to MGMT channel\n"); - return err; - } - - hinic_register_mgmt_msg_cb(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC, - pfhwdev, nic_mgmt_msg_handler); - - hinic_set_pf_action(hwif, HINIC_PF_MGMT_ACTIVE); - return 0; -} - -/** - * free_pfhwdev - Free the extended components of PF - * @pfhwdev: the HW device for PF - **/ -static void free_pfhwdev(struct hinic_pfhwdev *pfhwdev) -{ - struct hinic_hwdev *hwdev = &pfhwdev->hwdev; - - hinic_set_pf_action(hwdev->hwif, HINIC_PF_MGMT_INIT); - - hinic_unregister_mgmt_msg_cb(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC); - - hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt); -} - -/** - * hinic_init_hwdev - Initialize the NIC HW - * @pdev: the NIC pci device - * - * Return initialized NIC HW device - * - * Initialize the NIC HW device and return a pointer to it - **/ -struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev) -{ - struct hinic_pfhwdev *pfhwdev; - struct hinic_hwdev *hwdev; - struct hinic_hwif *hwif; - int err, num_aeqs; - - hwif = devm_kzalloc(&pdev->dev, sizeof(*hwif), GFP_KERNEL); - if (!hwif) - return ERR_PTR(-ENOMEM); - - err = hinic_init_hwif(hwif, pdev); - if (err) { - dev_err(&pdev->dev, "Failed to init HW interface\n"); - return ERR_PTR(err); - } - - if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { - dev_err(&pdev->dev, "Unsupported PCI Function type\n"); - err = -EFAULT; - goto err_func_type; - } - - pfhwdev = devm_kzalloc(&pdev->dev, sizeof(*pfhwdev), GFP_KERNEL); - if (!pfhwdev) { - err = -ENOMEM; - goto err_pfhwdev_alloc; - } - - hwdev = &pfhwdev->hwdev; - hwdev->hwif = hwif; - - err = init_msix(hwdev); - if (err) { - dev_err(&pdev->dev, "Failed to init msix\n"); - goto err_init_msix; - } - - err = wait_for_outbound_state(hwdev); - if (err) { - dev_warn(&pdev->dev, "outbound - disabled, try again\n"); - hinic_outbound_state_set(hwif, HINIC_OUTBOUND_ENABLE); - } - - num_aeqs = HINIC_HWIF_NUM_AEQS(hwif); - - err = hinic_aeqs_init(&hwdev->aeqs, hwif, num_aeqs, - HINIC_DEFAULT_AEQ_LEN, HINIC_EQ_PAGE_SIZE, - hwdev->msix_entries); - if (err) { - dev_err(&pdev->dev, "Failed to init async event queues\n"); - goto err_aeqs_init; - } - - err = init_pfhwdev(pfhwdev); - if (err) { - dev_err(&pdev->dev, "Failed to init PF HW device\n"); - goto err_init_pfhwdev; - } - - err = get_dev_cap(hwdev); - if (err) { - dev_err(&pdev->dev, "Failed to get device capabilities\n"); - goto err_dev_cap; - } - - err = init_fw_ctxt(hwdev); - if (err) { - dev_err(&pdev->dev, "Failed to init function table\n"); - goto err_init_fw_ctxt; - } - - err = set_resources_state(hwdev, HINIC_RES_ACTIVE); - if (err) { - dev_err(&pdev->dev, "Failed to set resources state\n"); - goto err_resources_state; - } - - return hwdev; - -err_resources_state: -err_init_fw_ctxt: -err_dev_cap: - free_pfhwdev(pfhwdev); - -err_init_pfhwdev: - hinic_aeqs_free(&hwdev->aeqs); - -err_aeqs_init: - disable_msix(hwdev); - -err_init_msix: -err_pfhwdev_alloc: -err_func_type: - hinic_free_hwif(hwif); - return ERR_PTR(err); -} - -/** - * hinic_free_hwdev - Free the NIC HW device - * @hwdev: the NIC HW device - **/ -void hinic_free_hwdev(struct hinic_hwdev *hwdev) -{ - struct hinic_pfhwdev *pfhwdev = container_of(hwdev, - struct hinic_pfhwdev, - hwdev); - - set_resources_state(hwdev, HINIC_RES_CLEAN); - - free_pfhwdev(pfhwdev); - - hinic_aeqs_free(&hwdev->aeqs); - - disable_msix(hwdev); - - hinic_free_hwif(hwdev->hwif); -} - -/** - * hinic_hwdev_num_qps - return the number QPs available for use - * @hwdev: the NIC HW device - * - * Return number QPs available for use - **/ -int hinic_hwdev_num_qps(struct hinic_hwdev *hwdev) -{ - struct hinic_cap *nic_cap = &hwdev->nic_cap; - - return nic_cap->num_qps; -} - -/** - * hinic_hwdev_get_sq - get SQ - * @hwdev: the NIC HW device - * @i: the position of the SQ - * - * Return: the SQ in the i position - **/ -struct hinic_sq *hinic_hwdev_get_sq(struct hinic_hwdev *hwdev, int i) -{ - struct hinic_func_to_io *func_to_io = &hwdev->func_to_io; - struct hinic_qp *qp = &func_to_io->qps[i]; - - if (i >= hinic_hwdev_num_qps(hwdev)) - return NULL; - - return &qp->sq; -} - -/** - * hinic_hwdev_get_sq - get RQ - * @hwdev: the NIC HW device - * @i: the position of the RQ - * - * Return: the RQ in the i position - **/ -struct hinic_rq *hinic_hwdev_get_rq(struct hinic_hwdev *hwdev, int i) -{ - struct hinic_func_to_io *func_to_io = &hwdev->func_to_io; - struct hinic_qp *qp = &func_to_io->qps[i]; - - if (i >= hinic_hwdev_num_qps(hwdev)) - return NULL; - - return &qp->rq; -} - -/** - * hinic_hwdev_msix_cnt_set - clear message attribute counters for msix entry - * @hwdev: the NIC HW device - * @msix_index: msix_index - * - * Return 0 - Success, negative - Failure - **/ -int hinic_hwdev_msix_cnt_set(struct hinic_hwdev *hwdev, u16 msix_index) -{ - return hinic_msix_attr_cnt_clear(hwdev->hwif, msix_index); -} - -/** - * hinic_hwdev_msix_set - set message attribute for msix entry - * @hwdev: the NIC HW device - * @msix_index: msix_index - * @pending_limit: the maximum pending interrupt events (unit 8) - * @coalesc_timer: coalesc period for interrupt (unit 8 us) - * @lli_timer: replenishing period for low latency credit (unit 8 us) - * @lli_credit_limit: maximum credits for low latency msix messages (unit 8) - * @resend_timer: maximum wait for resending msix (unit coalesc period) - * - * Return 0 - Success, negative - Failure - **/ -int hinic_hwdev_msix_set(struct hinic_hwdev *hwdev, u16 msix_index, - u8 pending_limit, u8 coalesc_timer, - u8 lli_timer_cfg, u8 lli_credit_limit, - u8 resend_timer) -{ - return hinic_msix_attr_set(hwdev->hwif, msix_index, - pending_limit, coalesc_timer, - lli_timer_cfg, lli_credit_limit, - resend_timer); -} - -/** - * hinic_hwdev_hw_ci_addr_set - set cons idx addr and attributes in HW for sq - * @hwdev: the NIC HW device - * @sq: send queue - * @pending_limit: the maximum pending update ci events (unit 8) - * @coalesc_timer: coalesc period for update ci (unit 8 us) - * - * Return 0 - Success, negative - Failure - **/ -int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq, - u8 pending_limit, u8 coalesc_timer) -{ - struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq); - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_pfhwdev *pfhwdev; - struct hinic_cmd_hw_ci hw_ci; - - if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { - dev_err(&pdev->dev, "Unsupported PCI Function type\n"); - return -EINVAL; - } - - hw_ci.dma_attr_off = 0; - hw_ci.pending_limit = pending_limit; - hw_ci.coalesc_timer = coalesc_timer; - - hw_ci.msix_en = 1; - hw_ci.msix_entry_idx = sq->msix_entry; - - hw_ci.func_idx = HINIC_HWIF_FUNC_IDX(hwif); - - hw_ci.sq_id = qp->q_id; - - hw_ci.ci_addr = ADDR_IN_4BYTES(sq->hw_ci_dma_addr); - - pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); - return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, - HINIC_MOD_COMM, - HINIC_COMM_CMD_SQ_HI_CI_SET, - &hw_ci, sizeof(hw_ci), NULL, - NULL, HINIC_MGMT_MSG_SYNC); -} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h deleted file mode 100644 index 0f5563f3b77988c69d91205ec31396d6326fec78..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h +++ /dev/null @@ -1,239 +0,0 @@ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - */ - -#ifndef HINIC_HW_DEV_H -#define HINIC_HW_DEV_H - -#include -#include -#include - -#include "hinic_hw_if.h" -#include "hinic_hw_eqs.h" -#include "hinic_hw_mgmt.h" -#include "hinic_hw_qp.h" -#include "hinic_hw_io.h" - -#define HINIC_MAX_QPS 32 - -#define HINIC_MGMT_NUM_MSG_CMD (HINIC_MGMT_MSG_CMD_MAX - \ - HINIC_MGMT_MSG_CMD_BASE) - -struct hinic_cap { - u16 max_qps; - u16 num_qps; -}; - -enum hinic_port_cmd { - HINIC_PORT_CMD_CHANGE_MTU = 2, - - HINIC_PORT_CMD_ADD_VLAN = 3, - HINIC_PORT_CMD_DEL_VLAN = 4, - - HINIC_PORT_CMD_SET_MAC = 9, - HINIC_PORT_CMD_GET_MAC = 10, - HINIC_PORT_CMD_DEL_MAC = 11, - - HINIC_PORT_CMD_SET_RX_MODE = 12, - - HINIC_PORT_CMD_GET_LINK_STATE = 24, - - HINIC_PORT_CMD_SET_PORT_STATE = 41, - - HINIC_PORT_CMD_FWCTXT_INIT = 69, - - HINIC_PORT_CMD_SET_FUNC_STATE = 93, - - HINIC_PORT_CMD_GET_GLOBAL_QPN = 102, - - HINIC_PORT_CMD_GET_CAP = 170, -}; - -enum hinic_mgmt_msg_cmd { - HINIC_MGMT_MSG_CMD_BASE = 160, - - HINIC_MGMT_MSG_CMD_LINK_STATUS = 160, - - HINIC_MGMT_MSG_CMD_MAX, -}; - -enum hinic_cb_state { - HINIC_CB_ENABLED = BIT(0), - HINIC_CB_RUNNING = BIT(1), -}; - -enum hinic_res_state { - HINIC_RES_CLEAN = 0, - HINIC_RES_ACTIVE = 1, -}; - -struct hinic_cmd_fw_ctxt { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_idx; - u16 rx_buf_sz; - - u32 rsvd1; -}; - -struct hinic_cmd_hw_ioctxt { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_idx; - - u16 rsvd1; - - u8 set_cmdq_depth; - u8 cmdq_depth; - - u8 rsvd2; - u8 rsvd3; - u8 rsvd4; - u8 rsvd5; - - u16 rq_depth; - u16 rx_buf_sz_idx; - u16 sq_depth; -}; - -struct hinic_cmd_io_status { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_idx; - u8 rsvd1; - u8 rsvd2; - u32 io_status; -}; - -struct hinic_cmd_clear_io_res { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_idx; - u8 rsvd1; - u8 rsvd2; -}; - -struct hinic_cmd_set_res_state { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_idx; - u8 state; - u8 rsvd1; - u32 rsvd2; -}; - -struct hinic_cmd_base_qpn { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_idx; - u16 qpn; -}; - -struct hinic_cmd_hw_ci { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_idx; - - u8 dma_attr_off; - u8 pending_limit; - u8 coalesc_timer; - - u8 msix_en; - u16 msix_entry_idx; - - u32 sq_id; - u32 rsvd1; - u64 ci_addr; -}; - -struct hinic_hwdev { - struct hinic_hwif *hwif; - struct msix_entry *msix_entries; - - struct hinic_aeqs aeqs; - struct hinic_func_to_io func_to_io; - - struct hinic_cap nic_cap; -}; - -struct hinic_nic_cb { - void (*handler)(void *handle, void *buf_in, - u16 in_size, void *buf_out, - u16 *out_size); - - void *handle; - unsigned long cb_state; -}; - -struct hinic_pfhwdev { - struct hinic_hwdev hwdev; - - struct hinic_pf_to_mgmt pf_to_mgmt; - - struct hinic_nic_cb nic_cb[HINIC_MGMT_NUM_MSG_CMD]; -}; - -void hinic_hwdev_cb_register(struct hinic_hwdev *hwdev, - enum hinic_mgmt_msg_cmd cmd, void *handle, - void (*handler)(void *handle, void *buf_in, - u16 in_size, void *buf_out, - u16 *out_size)); - -void hinic_hwdev_cb_unregister(struct hinic_hwdev *hwdev, - enum hinic_mgmt_msg_cmd cmd); - -int hinic_port_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_port_cmd cmd, - void *buf_in, u16 in_size, void *buf_out, - u16 *out_size); - -int hinic_hwdev_ifup(struct hinic_hwdev *hwdev); - -void hinic_hwdev_ifdown(struct hinic_hwdev *hwdev); - -struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev); - -void hinic_free_hwdev(struct hinic_hwdev *hwdev); - -int hinic_hwdev_num_qps(struct hinic_hwdev *hwdev); - -struct hinic_sq *hinic_hwdev_get_sq(struct hinic_hwdev *hwdev, int i); - -struct hinic_rq *hinic_hwdev_get_rq(struct hinic_hwdev *hwdev, int i); - -int hinic_hwdev_msix_cnt_set(struct hinic_hwdev *hwdev, u16 msix_index); - -int hinic_hwdev_msix_set(struct hinic_hwdev *hwdev, u16 msix_index, - u8 pending_limit, u8 coalesc_timer, - u8 lli_timer_cfg, u8 lli_credit_limit, - u8 resend_timer); - -int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq, - u8 pending_limit, u8 coalesc_timer); - -#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c deleted file mode 100644 index 7cb8b9b94726d3a98008c9f2a452b09036ba5a36..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c +++ /dev/null @@ -1,886 +0,0 @@ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "hinic_hw_csr.h" -#include "hinic_hw_if.h" -#include "hinic_hw_eqs.h" - -#define HINIC_EQS_WQ_NAME "hinic_eqs" - -#define GET_EQ_NUM_PAGES(eq, pg_size) \ - (ALIGN((eq)->q_len * (eq)->elem_size, pg_size) / (pg_size)) - -#define GET_EQ_NUM_ELEMS_IN_PG(eq, pg_size) ((pg_size) / (eq)->elem_size) - -#define EQ_CONS_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \ - HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) : \ - HINIC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id)) - -#define EQ_PROD_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \ - HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) : \ - HINIC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id)) - -#define EQ_HI_PHYS_ADDR_REG(eq, pg_num) (((eq)->type == HINIC_AEQ) ? \ - HINIC_CSR_AEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num) : \ - HINIC_CSR_CEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num)) - -#define EQ_LO_PHYS_ADDR_REG(eq, pg_num) (((eq)->type == HINIC_AEQ) ? \ - HINIC_CSR_AEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num) : \ - HINIC_CSR_CEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num)) - -#define GET_EQ_ELEMENT(eq, idx) \ - ((eq)->virt_addr[(idx) / (eq)->num_elem_in_pg] + \ - (((idx) & ((eq)->num_elem_in_pg - 1)) * (eq)->elem_size)) - -#define GET_AEQ_ELEM(eq, idx) ((struct hinic_aeq_elem *) \ - GET_EQ_ELEMENT(eq, idx)) - -#define GET_CEQ_ELEM(eq, idx) ((u32 *) \ - GET_EQ_ELEMENT(eq, idx)) - -#define GET_CURR_AEQ_ELEM(eq) GET_AEQ_ELEM(eq, (eq)->cons_idx) - -#define GET_CURR_CEQ_ELEM(eq) GET_CEQ_ELEM(eq, (eq)->cons_idx) - -#define PAGE_IN_4K(page_size) ((page_size) >> 12) -#define EQ_SET_HW_PAGE_SIZE_VAL(eq) (ilog2(PAGE_IN_4K((eq)->page_size))) - -#define ELEMENT_SIZE_IN_32B(eq) (((eq)->elem_size) >> 5) -#define EQ_SET_HW_ELEM_SIZE_VAL(eq) (ilog2(ELEMENT_SIZE_IN_32B(eq))) - -#define EQ_MAX_PAGES 8 - -#define CEQE_TYPE_SHIFT 23 -#define CEQE_TYPE_MASK 0x7 - -#define CEQE_TYPE(ceqe) (((ceqe) >> CEQE_TYPE_SHIFT) & \ - CEQE_TYPE_MASK) - -#define CEQE_DATA_MASK 0x3FFFFFF -#define CEQE_DATA(ceqe) ((ceqe) & CEQE_DATA_MASK) - -#define aeq_to_aeqs(eq) \ - container_of((eq) - (eq)->q_id, struct hinic_aeqs, aeq[0]) - -#define ceq_to_ceqs(eq) \ - container_of((eq) - (eq)->q_id, struct hinic_ceqs, ceq[0]) - -#define work_to_aeq_work(work) \ - container_of(work, struct hinic_eq_work, work) - -#define DMA_ATTR_AEQ_DEFAULT 0 -#define DMA_ATTR_CEQ_DEFAULT 0 - -/* No coalescence */ -#define THRESH_CEQ_DEFAULT 0 - -enum eq_int_mode { - EQ_INT_MODE_ARMED, - EQ_INT_MODE_ALWAYS -}; - -enum eq_arm_state { - EQ_NOT_ARMED, - EQ_ARMED -}; - -/** - * hinic_aeq_register_hw_cb - register AEQ callback for specific event - * @aeqs: pointer to Async eqs of the chip - * @event: aeq event to register callback for it - * @handle: private data will be used by the callback - * @hw_handler: callback function - **/ -void hinic_aeq_register_hw_cb(struct hinic_aeqs *aeqs, - enum hinic_aeq_type event, void *handle, - void (*hwe_handler)(void *handle, void *data, - u8 size)) -{ - struct hinic_hw_event_cb *hwe_cb = &aeqs->hwe_cb[event]; - - hwe_cb->hwe_handler = hwe_handler; - hwe_cb->handle = handle; - hwe_cb->hwe_state = HINIC_EQE_ENABLED; -} - -/** - * hinic_aeq_unregister_hw_cb - unregister the AEQ callback for specific event - * @aeqs: pointer to Async eqs of the chip - * @event: aeq event to unregister callback for it - **/ -void hinic_aeq_unregister_hw_cb(struct hinic_aeqs *aeqs, - enum hinic_aeq_type event) -{ - struct hinic_hw_event_cb *hwe_cb = &aeqs->hwe_cb[event]; - - hwe_cb->hwe_state &= ~HINIC_EQE_ENABLED; - - while (hwe_cb->hwe_state & HINIC_EQE_RUNNING) - schedule(); - - hwe_cb->hwe_handler = NULL; -} - -/** - * hinic_ceq_register_cb - register CEQ callback for specific event - * @ceqs: pointer to Completion eqs part of the chip - * @event: ceq event to register callback for it - * @handle: private data will be used by the callback - * @handler: callback function - **/ -void hinic_ceq_register_cb(struct hinic_ceqs *ceqs, - enum hinic_ceq_type event, void *handle, - void (*handler)(void *handle, u32 ceqe_data)) -{ - struct hinic_ceq_cb *ceq_cb = &ceqs->ceq_cb[event]; - - ceq_cb->handler = handler; - ceq_cb->handle = handle; - ceq_cb->ceqe_state = HINIC_EQE_ENABLED; -} - -/** - * hinic_ceq_unregister_cb - unregister the CEQ callback for specific event - * @ceqs: pointer to Completion eqs part of the chip - * @event: ceq event to unregister callback for it - **/ -void hinic_ceq_unregister_cb(struct hinic_ceqs *ceqs, - enum hinic_ceq_type event) -{ - struct hinic_ceq_cb *ceq_cb = &ceqs->ceq_cb[event]; - - ceq_cb->ceqe_state &= ~HINIC_EQE_ENABLED; - - while (ceq_cb->ceqe_state & HINIC_EQE_RUNNING) - schedule(); - - ceq_cb->handler = NULL; -} - -static u8 eq_cons_idx_checksum_set(u32 val) -{ - u8 checksum = 0; - int idx; - - for (idx = 0; idx < 32; idx += 4) - checksum ^= ((val >> idx) & 0xF); - - return (checksum & 0xF); -} - -/** - * eq_update_ci - update the HW cons idx of event queue - * @eq: the event queue to update the cons idx for - **/ -static void eq_update_ci(struct hinic_eq *eq) -{ - u32 val, addr = EQ_CONS_IDX_REG_ADDR(eq); - - /* Read Modify Write */ - val = hinic_hwif_read_reg(eq->hwif, addr); - - val = HINIC_EQ_CI_CLEAR(val, IDX) & - HINIC_EQ_CI_CLEAR(val, WRAPPED) & - HINIC_EQ_CI_CLEAR(val, INT_ARMED) & - HINIC_EQ_CI_CLEAR(val, XOR_CHKSUM); - - val |= HINIC_EQ_CI_SET(eq->cons_idx, IDX) | - HINIC_EQ_CI_SET(eq->wrapped, WRAPPED) | - HINIC_EQ_CI_SET(EQ_ARMED, INT_ARMED); - - val |= HINIC_EQ_CI_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM); - - hinic_hwif_write_reg(eq->hwif, addr, val); -} - -/** - * aeq_irq_handler - handler for the AEQ event - * @eq: the Async Event Queue that received the event - **/ -static void aeq_irq_handler(struct hinic_eq *eq) -{ - struct hinic_aeqs *aeqs = aeq_to_aeqs(eq); - struct hinic_hwif *hwif = aeqs->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_aeq_elem *aeqe_curr; - struct hinic_hw_event_cb *hwe_cb; - enum hinic_aeq_type event; - unsigned long eqe_state; - u32 aeqe_desc; - int i, size; - - for (i = 0; i < eq->q_len; i++) { - aeqe_curr = GET_CURR_AEQ_ELEM(eq); - - /* Data in HW is in Big endian Format */ - aeqe_desc = be32_to_cpu(aeqe_curr->desc); - - /* HW toggles the wrapped bit, when it adds eq element */ - if (HINIC_EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped) - break; - - event = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, TYPE); - if (event >= HINIC_MAX_AEQ_EVENTS) { - dev_err(&pdev->dev, "Unknown AEQ Event %d\n", event); - return; - } - - if (!HINIC_EQ_ELEM_DESC_GET(aeqe_desc, SRC)) { - hwe_cb = &aeqs->hwe_cb[event]; - - size = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, SIZE); - - eqe_state = cmpxchg(&hwe_cb->hwe_state, - HINIC_EQE_ENABLED, - HINIC_EQE_ENABLED | - HINIC_EQE_RUNNING); - if ((eqe_state == HINIC_EQE_ENABLED) && - (hwe_cb->hwe_handler)) - hwe_cb->hwe_handler(hwe_cb->handle, - aeqe_curr->data, size); - else - dev_err(&pdev->dev, "Unhandled AEQ Event %d\n", - event); - - hwe_cb->hwe_state &= ~HINIC_EQE_RUNNING; - } - - eq->cons_idx++; - - if (eq->cons_idx == eq->q_len) { - eq->cons_idx = 0; - eq->wrapped = !eq->wrapped; - } - } -} - -/** - * ceq_event_handler - handler for the ceq events - * @ceqs: ceqs part of the chip - * @ceqe: ceq element that describes the event - **/ -static void ceq_event_handler(struct hinic_ceqs *ceqs, u32 ceqe) -{ - struct hinic_hwif *hwif = ceqs->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_ceq_cb *ceq_cb; - enum hinic_ceq_type event; - unsigned long eqe_state; - - event = CEQE_TYPE(ceqe); - if (event >= HINIC_MAX_CEQ_EVENTS) { - dev_err(&pdev->dev, "Unknown CEQ event, event = %d\n", event); - return; - } - - ceq_cb = &ceqs->ceq_cb[event]; - - eqe_state = cmpxchg(&ceq_cb->ceqe_state, - HINIC_EQE_ENABLED, - HINIC_EQE_ENABLED | HINIC_EQE_RUNNING); - - if ((eqe_state == HINIC_EQE_ENABLED) && (ceq_cb->handler)) - ceq_cb->handler(ceq_cb->handle, CEQE_DATA(ceqe)); - else - dev_err(&pdev->dev, "Unhandled CEQ Event %d\n", event); - - ceq_cb->ceqe_state &= ~HINIC_EQE_RUNNING; -} - -/** - * ceq_irq_handler - handler for the CEQ event - * @eq: the Completion Event Queue that received the event - **/ -static void ceq_irq_handler(struct hinic_eq *eq) -{ - struct hinic_ceqs *ceqs = ceq_to_ceqs(eq); - u32 ceqe; - int i; - - for (i = 0; i < eq->q_len; i++) { - ceqe = *(GET_CURR_CEQ_ELEM(eq)); - - /* Data in HW is in Big endian Format */ - ceqe = be32_to_cpu(ceqe); - - /* HW toggles the wrapped bit, when it adds eq element event */ - if (HINIC_EQ_ELEM_DESC_GET(ceqe, WRAPPED) == eq->wrapped) - break; - - ceq_event_handler(ceqs, ceqe); - - eq->cons_idx++; - - if (eq->cons_idx == eq->q_len) { - eq->cons_idx = 0; - eq->wrapped = !eq->wrapped; - } - } -} - -/** - * eq_irq_handler - handler for the EQ event - * @data: the Event Queue that received the event - **/ -static void eq_irq_handler(void *data) -{ - struct hinic_eq *eq = data; - - if (eq->type == HINIC_AEQ) - aeq_irq_handler(eq); - else if (eq->type == HINIC_CEQ) - ceq_irq_handler(eq); - - eq_update_ci(eq); -} - -/** - * eq_irq_work - the work of the EQ that received the event - * @work: the work struct that is associated with the EQ - **/ -static void eq_irq_work(struct work_struct *work) -{ - struct hinic_eq_work *aeq_work = work_to_aeq_work(work); - struct hinic_eq *aeq; - - aeq = aeq_work->data; - eq_irq_handler(aeq); -} - -/** - * ceq_tasklet - the tasklet of the EQ that received the event - * @ceq_data: the eq - **/ -static void ceq_tasklet(unsigned long ceq_data) -{ - struct hinic_eq *ceq = (struct hinic_eq *)ceq_data; - - eq_irq_handler(ceq); -} - -/** - * aeq_interrupt - aeq interrupt handler - * @irq: irq number - * @data: the Async Event Queue that collected the event - **/ -static irqreturn_t aeq_interrupt(int irq, void *data) -{ - struct hinic_eq_work *aeq_work; - struct hinic_eq *aeq = data; - struct hinic_aeqs *aeqs; - - /* clear resend timer cnt register */ - hinic_msix_attr_cnt_clear(aeq->hwif, aeq->msix_entry.entry); - - aeq_work = &aeq->aeq_work; - aeq_work->data = aeq; - - aeqs = aeq_to_aeqs(aeq); - queue_work(aeqs->workq, &aeq_work->work); - - return IRQ_HANDLED; -} - -/** - * ceq_interrupt - ceq interrupt handler - * @irq: irq number - * @data: the Completion Event Queue that collected the event - **/ -static irqreturn_t ceq_interrupt(int irq, void *data) -{ - struct hinic_eq *ceq = data; - - /* clear resend timer cnt register */ - hinic_msix_attr_cnt_clear(ceq->hwif, ceq->msix_entry.entry); - - tasklet_schedule(&ceq->ceq_tasklet); - - return IRQ_HANDLED; -} - -static void set_ctrl0(struct hinic_eq *eq) -{ - struct msix_entry *msix_entry = &eq->msix_entry; - enum hinic_eq_type type = eq->type; - u32 addr, val, ctrl0; - - if (type == HINIC_AEQ) { - /* RMW Ctrl0 */ - addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id); - - val = hinic_hwif_read_reg(eq->hwif, addr); - - val = HINIC_AEQ_CTRL_0_CLEAR(val, INT_IDX) & - HINIC_AEQ_CTRL_0_CLEAR(val, DMA_ATTR) & - HINIC_AEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) & - HINIC_AEQ_CTRL_0_CLEAR(val, INT_MODE); - - ctrl0 = HINIC_AEQ_CTRL_0_SET(msix_entry->entry, INT_IDX) | - HINIC_AEQ_CTRL_0_SET(DMA_ATTR_AEQ_DEFAULT, DMA_ATTR) | - HINIC_AEQ_CTRL_0_SET(HINIC_HWIF_PCI_INTF(eq->hwif), - PCI_INTF_IDX) | - HINIC_AEQ_CTRL_0_SET(EQ_INT_MODE_ARMED, INT_MODE); - - val |= ctrl0; - - hinic_hwif_write_reg(eq->hwif, addr, val); - } else if (type == HINIC_CEQ) { - /* RMW Ctrl0 */ - addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id); - - val = hinic_hwif_read_reg(eq->hwif, addr); - - val = HINIC_CEQ_CTRL_0_CLEAR(val, INTR_IDX) & - HINIC_CEQ_CTRL_0_CLEAR(val, DMA_ATTR) & - HINIC_CEQ_CTRL_0_CLEAR(val, KICK_THRESH) & - HINIC_CEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) & - HINIC_CEQ_CTRL_0_CLEAR(val, INTR_MODE); - - ctrl0 = HINIC_CEQ_CTRL_0_SET(msix_entry->entry, INTR_IDX) | - HINIC_CEQ_CTRL_0_SET(DMA_ATTR_CEQ_DEFAULT, DMA_ATTR) | - HINIC_CEQ_CTRL_0_SET(THRESH_CEQ_DEFAULT, KICK_THRESH) | - HINIC_CEQ_CTRL_0_SET(HINIC_HWIF_PCI_INTF(eq->hwif), - PCI_INTF_IDX) | - HINIC_CEQ_CTRL_0_SET(EQ_INT_MODE_ARMED, INTR_MODE); - - val |= ctrl0; - - hinic_hwif_write_reg(eq->hwif, addr, val); - } -} - -static void set_ctrl1(struct hinic_eq *eq) -{ - enum hinic_eq_type type = eq->type; - u32 page_size_val, elem_size; - u32 addr, val, ctrl1; - - if (type == HINIC_AEQ) { - /* RMW Ctrl1 */ - addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id); - - page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq); - elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq); - - val = hinic_hwif_read_reg(eq->hwif, addr); - - val = HINIC_AEQ_CTRL_1_CLEAR(val, LEN) & - HINIC_AEQ_CTRL_1_CLEAR(val, ELEM_SIZE) & - HINIC_AEQ_CTRL_1_CLEAR(val, PAGE_SIZE); - - ctrl1 = HINIC_AEQ_CTRL_1_SET(eq->q_len, LEN) | - HINIC_AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) | - HINIC_AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE); - - val |= ctrl1; - - hinic_hwif_write_reg(eq->hwif, addr, val); - } else if (type == HINIC_CEQ) { - /* RMW Ctrl1 */ - addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id); - - page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq); - - val = hinic_hwif_read_reg(eq->hwif, addr); - - val = HINIC_CEQ_CTRL_1_CLEAR(val, LEN) & - HINIC_CEQ_CTRL_1_CLEAR(val, PAGE_SIZE); - - ctrl1 = HINIC_CEQ_CTRL_1_SET(eq->q_len, LEN) | - HINIC_CEQ_CTRL_1_SET(page_size_val, PAGE_SIZE); - - val |= ctrl1; - - hinic_hwif_write_reg(eq->hwif, addr, val); - } -} - -/** - * set_eq_ctrls - setting eq's ctrl registers - * @eq: the Event Queue for setting - **/ -static void set_eq_ctrls(struct hinic_eq *eq) -{ - set_ctrl0(eq); - set_ctrl1(eq); -} - -/** - * aeq_elements_init - initialize all the elements in the aeq - * @eq: the Async Event Queue - * @init_val: value to initialize the elements with it - **/ -static void aeq_elements_init(struct hinic_eq *eq, u32 init_val) -{ - struct hinic_aeq_elem *aeqe; - int i; - - for (i = 0; i < eq->q_len; i++) { - aeqe = GET_AEQ_ELEM(eq, i); - aeqe->desc = cpu_to_be32(init_val); - } - - wmb(); /* Write the initilzation values */ -} - -/** - * ceq_elements_init - Initialize all the elements in the ceq - * @eq: the event queue - * @init_val: value to init with it the elements - **/ -static void ceq_elements_init(struct hinic_eq *eq, u32 init_val) -{ - u32 *ceqe; - int i; - - for (i = 0; i < eq->q_len; i++) { - ceqe = GET_CEQ_ELEM(eq, i); - *(ceqe) = cpu_to_be32(init_val); - } - - wmb(); /* Write the initilzation values */ -} - -/** - * alloc_eq_pages - allocate the pages for the queue - * @eq: the event queue - * - * Return 0 - Success, Negative - Failure - **/ -static int alloc_eq_pages(struct hinic_eq *eq) -{ - struct hinic_hwif *hwif = eq->hwif; - struct pci_dev *pdev = hwif->pdev; - u32 init_val, addr, val; - size_t addr_size; - int err, pg; - - addr_size = eq->num_pages * sizeof(*eq->dma_addr); - eq->dma_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL); - if (!eq->dma_addr) - return -ENOMEM; - - addr_size = eq->num_pages * sizeof(*eq->virt_addr); - eq->virt_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL); - if (!eq->virt_addr) { - err = -ENOMEM; - goto err_virt_addr_alloc; - } - - for (pg = 0; pg < eq->num_pages; pg++) { - eq->virt_addr[pg] = dma_zalloc_coherent(&pdev->dev, - eq->page_size, - &eq->dma_addr[pg], - GFP_KERNEL); - if (!eq->virt_addr[pg]) { - err = -ENOMEM; - goto err_dma_alloc; - } - - addr = EQ_HI_PHYS_ADDR_REG(eq, pg); - val = upper_32_bits(eq->dma_addr[pg]); - - hinic_hwif_write_reg(hwif, addr, val); - - addr = EQ_LO_PHYS_ADDR_REG(eq, pg); - val = lower_32_bits(eq->dma_addr[pg]); - - hinic_hwif_write_reg(hwif, addr, val); - } - - init_val = HINIC_EQ_ELEM_DESC_SET(eq->wrapped, WRAPPED); - - if (eq->type == HINIC_AEQ) - aeq_elements_init(eq, init_val); - else if (eq->type == HINIC_CEQ) - ceq_elements_init(eq, init_val); - - return 0; - -err_dma_alloc: - while (--pg >= 0) - dma_free_coherent(&pdev->dev, eq->page_size, - eq->virt_addr[pg], - eq->dma_addr[pg]); - - devm_kfree(&pdev->dev, eq->virt_addr); - -err_virt_addr_alloc: - devm_kfree(&pdev->dev, eq->dma_addr); - return err; -} - -/** - * free_eq_pages - free the pages of the queue - * @eq: the Event Queue - **/ -static void free_eq_pages(struct hinic_eq *eq) -{ - struct hinic_hwif *hwif = eq->hwif; - struct pci_dev *pdev = hwif->pdev; - int pg; - - for (pg = 0; pg < eq->num_pages; pg++) - dma_free_coherent(&pdev->dev, eq->page_size, - eq->virt_addr[pg], - eq->dma_addr[pg]); - - devm_kfree(&pdev->dev, eq->virt_addr); - devm_kfree(&pdev->dev, eq->dma_addr); -} - -/** - * init_eq - initialize Event Queue - * @eq: the event queue - * @hwif: the HW interface of a PCI function device - * @type: the type of the event queue, aeq or ceq - * @q_id: Queue id number - * @q_len: the number of EQ elements - * @page_size: the page size of the pages in the event queue - * @entry: msix entry associated with the event queue - * - * Return 0 - Success, Negative - Failure - **/ -static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif, - enum hinic_eq_type type, int q_id, u32 q_len, u32 page_size, - struct msix_entry entry) -{ - struct pci_dev *pdev = hwif->pdev; - int err; - - eq->hwif = hwif; - eq->type = type; - eq->q_id = q_id; - eq->q_len = q_len; - eq->page_size = page_size; - - /* Clear PI and CI, also clear the ARM bit */ - hinic_hwif_write_reg(eq->hwif, EQ_CONS_IDX_REG_ADDR(eq), 0); - hinic_hwif_write_reg(eq->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0); - - eq->cons_idx = 0; - eq->wrapped = 0; - - if (type == HINIC_AEQ) { - eq->elem_size = HINIC_AEQE_SIZE; - } else if (type == HINIC_CEQ) { - eq->elem_size = HINIC_CEQE_SIZE; - } else { - dev_err(&pdev->dev, "Invalid EQ type\n"); - return -EINVAL; - } - - eq->num_pages = GET_EQ_NUM_PAGES(eq, page_size); - eq->num_elem_in_pg = GET_EQ_NUM_ELEMS_IN_PG(eq, page_size); - - eq->msix_entry = entry; - - if (eq->num_elem_in_pg & (eq->num_elem_in_pg - 1)) { - dev_err(&pdev->dev, "num elements in eq page != power of 2\n"); - return -EINVAL; - } - - if (eq->num_pages > EQ_MAX_PAGES) { - dev_err(&pdev->dev, "too many pages for eq\n"); - return -EINVAL; - } - - set_eq_ctrls(eq); - eq_update_ci(eq); - - err = alloc_eq_pages(eq); - if (err) { - dev_err(&pdev->dev, "Failed to allocate pages for eq\n"); - return err; - } - - if (type == HINIC_AEQ) { - struct hinic_eq_work *aeq_work = &eq->aeq_work; - - INIT_WORK(&aeq_work->work, eq_irq_work); - } else if (type == HINIC_CEQ) { - tasklet_init(&eq->ceq_tasklet, ceq_tasklet, - (unsigned long)eq); - } - - /* set the attributes of the msix entry */ - hinic_msix_attr_set(eq->hwif, eq->msix_entry.entry, - HINIC_EQ_MSIX_PENDING_LIMIT_DEFAULT, - HINIC_EQ_MSIX_COALESC_TIMER_DEFAULT, - HINIC_EQ_MSIX_LLI_TIMER_DEFAULT, - HINIC_EQ_MSIX_LLI_CREDIT_LIMIT_DEFAULT, - HINIC_EQ_MSIX_RESEND_TIMER_DEFAULT); - - if (type == HINIC_AEQ) - err = request_irq(entry.vector, aeq_interrupt, 0, - "hinic_aeq", eq); - else if (type == HINIC_CEQ) - err = request_irq(entry.vector, ceq_interrupt, 0, - "hinic_ceq", eq); - - if (err) { - dev_err(&pdev->dev, "Failed to request irq for the EQ\n"); - goto err_req_irq; - } - - return 0; - -err_req_irq: - free_eq_pages(eq); - return err; -} - -/** - * remove_eq - remove Event Queue - * @eq: the event queue - **/ -static void remove_eq(struct hinic_eq *eq) -{ - struct msix_entry *entry = &eq->msix_entry; - - free_irq(entry->vector, eq); - - if (eq->type == HINIC_AEQ) { - struct hinic_eq_work *aeq_work = &eq->aeq_work; - - cancel_work_sync(&aeq_work->work); - } else if (eq->type == HINIC_CEQ) { - tasklet_kill(&eq->ceq_tasklet); - } - - free_eq_pages(eq); -} - -/** - * hinic_aeqs_init - initialize all the aeqs - * @aeqs: pointer to Async eqs of the chip - * @hwif: the HW interface of a PCI function device - * @num_aeqs: number of AEQs - * @q_len: number of EQ elements - * @page_size: the page size of the pages in the event queue - * @msix_entries: msix entries associated with the event queues - * - * Return 0 - Success, negative - Failure - **/ -int hinic_aeqs_init(struct hinic_aeqs *aeqs, struct hinic_hwif *hwif, - int num_aeqs, u32 q_len, u32 page_size, - struct msix_entry *msix_entries) -{ - struct pci_dev *pdev = hwif->pdev; - int err, i, q_id; - - aeqs->workq = create_singlethread_workqueue(HINIC_EQS_WQ_NAME); - if (!aeqs->workq) - return -ENOMEM; - - aeqs->hwif = hwif; - aeqs->num_aeqs = num_aeqs; - - for (q_id = 0; q_id < num_aeqs; q_id++) { - err = init_eq(&aeqs->aeq[q_id], hwif, HINIC_AEQ, q_id, q_len, - page_size, msix_entries[q_id]); - if (err) { - dev_err(&pdev->dev, "Failed to init aeq %d\n", q_id); - goto err_init_aeq; - } - } - - return 0; - -err_init_aeq: - for (i = 0; i < q_id; i++) - remove_eq(&aeqs->aeq[i]); - - destroy_workqueue(aeqs->workq); - return err; -} - -/** - * hinic_aeqs_free - free all the aeqs - * @aeqs: pointer to Async eqs of the chip - **/ -void hinic_aeqs_free(struct hinic_aeqs *aeqs) -{ - int q_id; - - for (q_id = 0; q_id < aeqs->num_aeqs ; q_id++) - remove_eq(&aeqs->aeq[q_id]); - - destroy_workqueue(aeqs->workq); -} - -/** - * hinic_ceqs_init - init all the ceqs - * @ceqs: ceqs part of the chip - * @hwif: the hardware interface of a pci function device - * @num_ceqs: number of CEQs - * @q_len: number of EQ elements - * @page_size: the page size of the event queue - * @msix_entries: msix entries associated with the event queues - * - * Return 0 - Success, Negative - Failure - **/ -int hinic_ceqs_init(struct hinic_ceqs *ceqs, struct hinic_hwif *hwif, - int num_ceqs, u32 q_len, u32 page_size, - struct msix_entry *msix_entries) -{ - struct pci_dev *pdev = hwif->pdev; - int i, q_id, err; - - ceqs->hwif = hwif; - ceqs->num_ceqs = num_ceqs; - - for (q_id = 0; q_id < num_ceqs; q_id++) { - err = init_eq(&ceqs->ceq[q_id], hwif, HINIC_CEQ, q_id, q_len, - page_size, msix_entries[q_id]); - if (err) { - dev_err(&pdev->dev, "Failed to init ceq %d\n", q_id); - goto err_init_ceq; - } - } - - return 0; - -err_init_ceq: - for (i = 0; i < q_id; i++) - remove_eq(&ceqs->ceq[i]); - - return err; -} - -/** - * hinic_ceqs_free - free all the ceqs - * @ceqs: ceqs part of the chip - **/ -void hinic_ceqs_free(struct hinic_ceqs *ceqs) -{ - int q_id; - - for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) - remove_eq(&ceqs->ceq[q_id]); -} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h deleted file mode 100644 index ecb9c2bc6dc8c6b37bbb6c2ea659d8a1543cff2d..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h +++ /dev/null @@ -1,265 +0,0 @@ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - */ - -#ifndef HINIC_HW_EQS_H -#define HINIC_HW_EQS_H - -#include -#include -#include -#include -#include -#include - -#include "hinic_hw_if.h" - -#define HINIC_AEQ_CTRL_0_INT_IDX_SHIFT 0 -#define HINIC_AEQ_CTRL_0_DMA_ATTR_SHIFT 12 -#define HINIC_AEQ_CTRL_0_PCI_INTF_IDX_SHIFT 20 -#define HINIC_AEQ_CTRL_0_INT_MODE_SHIFT 31 - -#define HINIC_AEQ_CTRL_0_INT_IDX_MASK 0x3FF -#define HINIC_AEQ_CTRL_0_DMA_ATTR_MASK 0x3F -#define HINIC_AEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3 -#define HINIC_AEQ_CTRL_0_INT_MODE_MASK 0x1 - -#define HINIC_AEQ_CTRL_0_SET(val, member) \ - (((u32)(val) & HINIC_AEQ_CTRL_0_##member##_MASK) << \ - HINIC_AEQ_CTRL_0_##member##_SHIFT) - -#define HINIC_AEQ_CTRL_0_CLEAR(val, member) \ - ((val) & (~(HINIC_AEQ_CTRL_0_##member##_MASK \ - << HINIC_AEQ_CTRL_0_##member##_SHIFT))) - -#define HINIC_AEQ_CTRL_1_LEN_SHIFT 0 -#define HINIC_AEQ_CTRL_1_ELEM_SIZE_SHIFT 24 -#define HINIC_AEQ_CTRL_1_PAGE_SIZE_SHIFT 28 - -#define HINIC_AEQ_CTRL_1_LEN_MASK 0x1FFFFF -#define HINIC_AEQ_CTRL_1_ELEM_SIZE_MASK 0x3 -#define HINIC_AEQ_CTRL_1_PAGE_SIZE_MASK 0xF - -#define HINIC_AEQ_CTRL_1_SET(val, member) \ - (((u32)(val) & HINIC_AEQ_CTRL_1_##member##_MASK) << \ - HINIC_AEQ_CTRL_1_##member##_SHIFT) - -#define HINIC_AEQ_CTRL_1_CLEAR(val, member) \ - ((val) & (~(HINIC_AEQ_CTRL_1_##member##_MASK \ - << HINIC_AEQ_CTRL_1_##member##_SHIFT))) - -#define HINIC_CEQ_CTRL_0_INTR_IDX_SHIFT 0 -#define HINIC_CEQ_CTRL_0_DMA_ATTR_SHIFT 12 -#define HINIC_CEQ_CTRL_0_KICK_THRESH_SHIFT 20 -#define HINIC_CEQ_CTRL_0_PCI_INTF_IDX_SHIFT 24 -#define HINIC_CEQ_CTRL_0_INTR_MODE_SHIFT 31 - -#define HINIC_CEQ_CTRL_0_INTR_IDX_MASK 0x3FF -#define HINIC_CEQ_CTRL_0_DMA_ATTR_MASK 0x3F -#define HINIC_CEQ_CTRL_0_KICK_THRESH_MASK 0xF -#define HINIC_CEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3 -#define HINIC_CEQ_CTRL_0_INTR_MODE_MASK 0x1 - -#define HINIC_CEQ_CTRL_0_SET(val, member) \ - (((u32)(val) & HINIC_CEQ_CTRL_0_##member##_MASK) << \ - HINIC_CEQ_CTRL_0_##member##_SHIFT) - -#define HINIC_CEQ_CTRL_0_CLEAR(val, member) \ - ((val) & (~(HINIC_CEQ_CTRL_0_##member##_MASK \ - << HINIC_CEQ_CTRL_0_##member##_SHIFT))) - -#define HINIC_CEQ_CTRL_1_LEN_SHIFT 0 -#define HINIC_CEQ_CTRL_1_PAGE_SIZE_SHIFT 28 - -#define HINIC_CEQ_CTRL_1_LEN_MASK 0x1FFFFF -#define HINIC_CEQ_CTRL_1_PAGE_SIZE_MASK 0xF - -#define HINIC_CEQ_CTRL_1_SET(val, member) \ - (((u32)(val) & HINIC_CEQ_CTRL_1_##member##_MASK) << \ - HINIC_CEQ_CTRL_1_##member##_SHIFT) - -#define HINIC_CEQ_CTRL_1_CLEAR(val, member) \ - ((val) & (~(HINIC_CEQ_CTRL_1_##member##_MASK \ - << HINIC_CEQ_CTRL_1_##member##_SHIFT))) - -#define HINIC_EQ_ELEM_DESC_TYPE_SHIFT 0 -#define HINIC_EQ_ELEM_DESC_SRC_SHIFT 7 -#define HINIC_EQ_ELEM_DESC_SIZE_SHIFT 8 -#define HINIC_EQ_ELEM_DESC_WRAPPED_SHIFT 31 - -#define HINIC_EQ_ELEM_DESC_TYPE_MASK 0x7F -#define HINIC_EQ_ELEM_DESC_SRC_MASK 0x1 -#define HINIC_EQ_ELEM_DESC_SIZE_MASK 0xFF -#define HINIC_EQ_ELEM_DESC_WRAPPED_MASK 0x1 - -#define HINIC_EQ_ELEM_DESC_SET(val, member) \ - (((u32)(val) & HINIC_EQ_ELEM_DESC_##member##_MASK) << \ - HINIC_EQ_ELEM_DESC_##member##_SHIFT) - -#define HINIC_EQ_ELEM_DESC_GET(val, member) \ - (((val) >> HINIC_EQ_ELEM_DESC_##member##_SHIFT) & \ - HINIC_EQ_ELEM_DESC_##member##_MASK) - -#define HINIC_EQ_CI_IDX_SHIFT 0 -#define HINIC_EQ_CI_WRAPPED_SHIFT 20 -#define HINIC_EQ_CI_XOR_CHKSUM_SHIFT 24 -#define HINIC_EQ_CI_INT_ARMED_SHIFT 31 - -#define HINIC_EQ_CI_IDX_MASK 0xFFFFF -#define HINIC_EQ_CI_WRAPPED_MASK 0x1 -#define HINIC_EQ_CI_XOR_CHKSUM_MASK 0xF -#define HINIC_EQ_CI_INT_ARMED_MASK 0x1 - -#define HINIC_EQ_CI_SET(val, member) \ - (((u32)(val) & HINIC_EQ_CI_##member##_MASK) << \ - HINIC_EQ_CI_##member##_SHIFT) - -#define HINIC_EQ_CI_CLEAR(val, member) \ - ((val) & (~(HINIC_EQ_CI_##member##_MASK \ - << HINIC_EQ_CI_##member##_SHIFT))) - -#define HINIC_MAX_AEQS 4 -#define HINIC_MAX_CEQS 32 - -#define HINIC_AEQE_SIZE 64 -#define HINIC_CEQE_SIZE 4 - -#define HINIC_AEQE_DESC_SIZE 4 -#define HINIC_AEQE_DATA_SIZE \ - (HINIC_AEQE_SIZE - HINIC_AEQE_DESC_SIZE) - -#define HINIC_DEFAULT_AEQ_LEN 64 -#define HINIC_DEFAULT_CEQ_LEN 1024 - -#define HINIC_EQ_PAGE_SIZE SZ_4K - -#define HINIC_CEQ_ID_CMDQ 0 - -enum hinic_eq_type { - HINIC_AEQ, - HINIC_CEQ, -}; - -enum hinic_aeq_type { - HINIC_MSG_FROM_MGMT_CPU = 2, - - HINIC_MAX_AEQ_EVENTS, -}; - -enum hinic_ceq_type { - HINIC_CEQ_CMDQ = 3, - - HINIC_MAX_CEQ_EVENTS, -}; - -enum hinic_eqe_state { - HINIC_EQE_ENABLED = BIT(0), - HINIC_EQE_RUNNING = BIT(1), -}; - -struct hinic_aeq_elem { - u8 data[HINIC_AEQE_DATA_SIZE]; - u32 desc; -}; - -struct hinic_eq_work { - struct work_struct work; - void *data; -}; - -struct hinic_eq { - struct hinic_hwif *hwif; - - enum hinic_eq_type type; - int q_id; - u32 q_len; - u32 page_size; - - u32 cons_idx; - int wrapped; - - size_t elem_size; - int num_pages; - int num_elem_in_pg; - - struct msix_entry msix_entry; - - dma_addr_t *dma_addr; - void **virt_addr; - - struct hinic_eq_work aeq_work; - - struct tasklet_struct ceq_tasklet; -}; - -struct hinic_hw_event_cb { - void (*hwe_handler)(void *handle, void *data, u8 size); - void *handle; - unsigned long hwe_state; -}; - -struct hinic_aeqs { - struct hinic_hwif *hwif; - - struct hinic_eq aeq[HINIC_MAX_AEQS]; - int num_aeqs; - - struct hinic_hw_event_cb hwe_cb[HINIC_MAX_AEQ_EVENTS]; - - struct workqueue_struct *workq; -}; - -struct hinic_ceq_cb { - void (*handler)(void *handle, u32 ceqe_data); - void *handle; - enum hinic_eqe_state ceqe_state; -}; - -struct hinic_ceqs { - struct hinic_hwif *hwif; - - struct hinic_eq ceq[HINIC_MAX_CEQS]; - int num_ceqs; - - struct hinic_ceq_cb ceq_cb[HINIC_MAX_CEQ_EVENTS]; -}; - -void hinic_aeq_register_hw_cb(struct hinic_aeqs *aeqs, - enum hinic_aeq_type event, void *handle, - void (*hwe_handler)(void *handle, void *data, - u8 size)); - -void hinic_aeq_unregister_hw_cb(struct hinic_aeqs *aeqs, - enum hinic_aeq_type event); - -void hinic_ceq_register_cb(struct hinic_ceqs *ceqs, - enum hinic_ceq_type event, void *handle, - void (*ceq_cb)(void *handle, u32 ceqe_data)); - -void hinic_ceq_unregister_cb(struct hinic_ceqs *ceqs, - enum hinic_ceq_type event); - -int hinic_aeqs_init(struct hinic_aeqs *aeqs, struct hinic_hwif *hwif, - int num_aeqs, u32 q_len, u32 page_size, - struct msix_entry *msix_entries); - -void hinic_aeqs_free(struct hinic_aeqs *aeqs); - -int hinic_ceqs_init(struct hinic_ceqs *ceqs, struct hinic_hwif *hwif, - int num_ceqs, u32 q_len, u32 page_size, - struct msix_entry *msix_entries); - -void hinic_ceqs_free(struct hinic_ceqs *ceqs); - -#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c deleted file mode 100644 index 823a17061a97970b2a25fabdf8bd4505e3e3f02d..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c +++ /dev/null @@ -1,351 +0,0 @@ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - */ - -#include -#include -#include -#include -#include -#include - -#include "hinic_hw_csr.h" -#include "hinic_hw_if.h" - -#define PCIE_ATTR_ENTRY 0 - -#define VALID_MSIX_IDX(attr, msix_index) ((msix_index) < (attr)->num_irqs) - -/** - * hinic_msix_attr_set - set message attribute for msix entry - * @hwif: the HW interface of a pci function device - * @msix_index: msix_index - * @pending_limit: the maximum pending interrupt events (unit 8) - * @coalesc_timer: coalesc period for interrupt (unit 8 us) - * @lli_timer: replenishing period for low latency credit (unit 8 us) - * @lli_credit_limit: maximum credits for low latency msix messages (unit 8) - * @resend_timer: maximum wait for resending msix (unit coalesc period) - * - * Return 0 - Success, negative - Failure - **/ -int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index, - u8 pending_limit, u8 coalesc_timer, - u8 lli_timer, u8 lli_credit_limit, - u8 resend_timer) -{ - u32 msix_ctrl, addr; - - if (!VALID_MSIX_IDX(&hwif->attr, msix_index)) - return -EINVAL; - - msix_ctrl = HINIC_MSIX_ATTR_SET(pending_limit, PENDING_LIMIT) | - HINIC_MSIX_ATTR_SET(coalesc_timer, COALESC_TIMER) | - HINIC_MSIX_ATTR_SET(lli_timer, LLI_TIMER) | - HINIC_MSIX_ATTR_SET(lli_credit_limit, LLI_CREDIT) | - HINIC_MSIX_ATTR_SET(resend_timer, RESEND_TIMER); - - addr = HINIC_CSR_MSIX_CTRL_ADDR(msix_index); - - hinic_hwif_write_reg(hwif, addr, msix_ctrl); - return 0; -} - -/** - * hinic_msix_attr_get - get message attribute of msix entry - * @hwif: the HW interface of a pci function device - * @msix_index: msix_index - * @pending_limit: the maximum pending interrupt events (unit 8) - * @coalesc_timer: coalesc period for interrupt (unit 8 us) - * @lli_timer: replenishing period for low latency credit (unit 8 us) - * @lli_credit_limit: maximum credits for low latency msix messages (unit 8) - * @resend_timer: maximum wait for resending msix (unit coalesc period) - * - * Return 0 - Success, negative - Failure - **/ -int hinic_msix_attr_get(struct hinic_hwif *hwif, u16 msix_index, - u8 *pending_limit, u8 *coalesc_timer, - u8 *lli_timer, u8 *lli_credit_limit, - u8 *resend_timer) -{ - u32 addr, val; - - if (!VALID_MSIX_IDX(&hwif->attr, msix_index)) - return -EINVAL; - - addr = HINIC_CSR_MSIX_CTRL_ADDR(msix_index); - val = hinic_hwif_read_reg(hwif, addr); - - *pending_limit = HINIC_MSIX_ATTR_GET(val, PENDING_LIMIT); - *coalesc_timer = HINIC_MSIX_ATTR_GET(val, COALESC_TIMER); - *lli_timer = HINIC_MSIX_ATTR_GET(val, LLI_TIMER); - *lli_credit_limit = HINIC_MSIX_ATTR_GET(val, LLI_CREDIT); - *resend_timer = HINIC_MSIX_ATTR_GET(val, RESEND_TIMER); - return 0; -} - -/** - * hinic_msix_attr_cnt_clear - clear message attribute counters for msix entry - * @hwif: the HW interface of a pci function device - * @msix_index: msix_index - * - * Return 0 - Success, negative - Failure - **/ -int hinic_msix_attr_cnt_clear(struct hinic_hwif *hwif, u16 msix_index) -{ - u32 msix_ctrl, addr; - - if (!VALID_MSIX_IDX(&hwif->attr, msix_index)) - return -EINVAL; - - msix_ctrl = HINIC_MSIX_CNT_SET(1, RESEND_TIMER); - addr = HINIC_CSR_MSIX_CNT_ADDR(msix_index); - - hinic_hwif_write_reg(hwif, addr, msix_ctrl); - return 0; -} - -/** - * hinic_set_pf_action - set action on pf channel - * @hwif: the HW interface of a pci function device - * @action: action on pf channel - * - * Return 0 - Success, negative - Failure - **/ -void hinic_set_pf_action(struct hinic_hwif *hwif, enum hinic_pf_action action) -{ - u32 attr5 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR5_ADDR); - - attr5 = HINIC_FA5_CLEAR(attr5, PF_ACTION); - attr5 |= HINIC_FA5_SET(action, PF_ACTION); - - hinic_hwif_write_reg(hwif, HINIC_CSR_FUNC_ATTR5_ADDR, attr5); -} - -enum hinic_outbound_state hinic_outbound_state_get(struct hinic_hwif *hwif) -{ - u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR); - - return HINIC_FA4_GET(attr4, OUTBOUND_STATE); -} - -void hinic_outbound_state_set(struct hinic_hwif *hwif, - enum hinic_outbound_state outbound_state) -{ - u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR); - - attr4 = HINIC_FA4_CLEAR(attr4, OUTBOUND_STATE); - attr4 |= HINIC_FA4_SET(outbound_state, OUTBOUND_STATE); - - hinic_hwif_write_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR, attr4); -} - -enum hinic_db_state hinic_db_state_get(struct hinic_hwif *hwif) -{ - u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR); - - return HINIC_FA4_GET(attr4, DB_STATE); -} - -void hinic_db_state_set(struct hinic_hwif *hwif, - enum hinic_db_state db_state) -{ - u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR); - - attr4 = HINIC_FA4_CLEAR(attr4, DB_STATE); - attr4 |= HINIC_FA4_SET(db_state, DB_STATE); - - hinic_hwif_write_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR, attr4); -} - -/** - * hwif_ready - test if the HW is ready for use - * @hwif: the HW interface of a pci function device - * - * Return 0 - Success, negative - Failure - **/ -static int hwif_ready(struct hinic_hwif *hwif) -{ - struct pci_dev *pdev = hwif->pdev; - u32 addr, attr1; - - addr = HINIC_CSR_FUNC_ATTR1_ADDR; - attr1 = hinic_hwif_read_reg(hwif, addr); - - if (!HINIC_FA1_GET(attr1, INIT_STATUS)) { - dev_err(&pdev->dev, "hwif status is not ready\n"); - return -EFAULT; - } - - return 0; -} - -/** - * set_hwif_attr - set the attributes in the relevant members in hwif - * @hwif: the HW interface of a pci function device - * @attr0: the first attribute that was read from the hw - * @attr1: the second attribute that was read from the hw - **/ -static void set_hwif_attr(struct hinic_hwif *hwif, u32 attr0, u32 attr1) -{ - hwif->attr.func_idx = HINIC_FA0_GET(attr0, FUNC_IDX); - hwif->attr.pf_idx = HINIC_FA0_GET(attr0, PF_IDX); - hwif->attr.pci_intf_idx = HINIC_FA0_GET(attr0, PCI_INTF_IDX); - hwif->attr.func_type = HINIC_FA0_GET(attr0, FUNC_TYPE); - - hwif->attr.num_aeqs = BIT(HINIC_FA1_GET(attr1, AEQS_PER_FUNC)); - hwif->attr.num_ceqs = BIT(HINIC_FA1_GET(attr1, CEQS_PER_FUNC)); - hwif->attr.num_irqs = BIT(HINIC_FA1_GET(attr1, IRQS_PER_FUNC)); - hwif->attr.num_dma_attr = BIT(HINIC_FA1_GET(attr1, DMA_ATTR_PER_FUNC)); -} - -/** - * read_hwif_attr - read the attributes and set members in hwif - * @hwif: the HW interface of a pci function device - **/ -static void read_hwif_attr(struct hinic_hwif *hwif) -{ - u32 addr, attr0, attr1; - - addr = HINIC_CSR_FUNC_ATTR0_ADDR; - attr0 = hinic_hwif_read_reg(hwif, addr); - - addr = HINIC_CSR_FUNC_ATTR1_ADDR; - attr1 = hinic_hwif_read_reg(hwif, addr); - - set_hwif_attr(hwif, attr0, attr1); -} - -/** - * set_ppf - try to set hwif as ppf and set the type of hwif in this case - * @hwif: the HW interface of a pci function device - **/ -static void set_ppf(struct hinic_hwif *hwif) -{ - struct hinic_func_attr *attr = &hwif->attr; - u32 addr, val, ppf_election; - - /* Read Modify Write */ - addr = HINIC_CSR_PPF_ELECTION_ADDR(HINIC_HWIF_PCI_INTF(hwif)); - - val = hinic_hwif_read_reg(hwif, addr); - val = HINIC_PPF_ELECTION_CLEAR(val, IDX); - - ppf_election = HINIC_PPF_ELECTION_SET(HINIC_HWIF_FUNC_IDX(hwif), IDX); - - val |= ppf_election; - hinic_hwif_write_reg(hwif, addr, val); - - /* check PPF */ - val = hinic_hwif_read_reg(hwif, addr); - - attr->ppf_idx = HINIC_PPF_ELECTION_GET(val, IDX); - if (attr->ppf_idx == HINIC_HWIF_FUNC_IDX(hwif)) - attr->func_type = HINIC_PPF; -} - -/** - * set_dma_attr - set the dma attributes in the HW - * @hwif: the HW interface of a pci function device - * @entry_idx: the entry index in the dma table - * @st: PCIE TLP steering tag - * @at: PCIE TLP AT field - * @ph: PCIE TLP Processing Hint field - * @no_snooping: PCIE TLP No snooping - * @tph_en: PCIE TLP Processing Hint Enable - **/ -static void set_dma_attr(struct hinic_hwif *hwif, u32 entry_idx, - u8 st, u8 at, u8 ph, - enum hinic_pcie_nosnoop no_snooping, - enum hinic_pcie_tph tph_en) -{ - u32 addr, val, dma_attr_entry; - - /* Read Modify Write */ - addr = HINIC_CSR_DMA_ATTR_ADDR(entry_idx); - - val = hinic_hwif_read_reg(hwif, addr); - val = HINIC_DMA_ATTR_CLEAR(val, ST) & - HINIC_DMA_ATTR_CLEAR(val, AT) & - HINIC_DMA_ATTR_CLEAR(val, PH) & - HINIC_DMA_ATTR_CLEAR(val, NO_SNOOPING) & - HINIC_DMA_ATTR_CLEAR(val, TPH_EN); - - dma_attr_entry = HINIC_DMA_ATTR_SET(st, ST) | - HINIC_DMA_ATTR_SET(at, AT) | - HINIC_DMA_ATTR_SET(ph, PH) | - HINIC_DMA_ATTR_SET(no_snooping, NO_SNOOPING) | - HINIC_DMA_ATTR_SET(tph_en, TPH_EN); - - val |= dma_attr_entry; - hinic_hwif_write_reg(hwif, addr, val); -} - -/** - * dma_attr_table_init - initialize the the default dma attributes - * @hwif: the HW interface of a pci function device - **/ -static void dma_attr_init(struct hinic_hwif *hwif) -{ - set_dma_attr(hwif, PCIE_ATTR_ENTRY, HINIC_PCIE_ST_DISABLE, - HINIC_PCIE_AT_DISABLE, HINIC_PCIE_PH_DISABLE, - HINIC_PCIE_SNOOP, HINIC_PCIE_TPH_DISABLE); -} - -/** - * hinic_init_hwif - initialize the hw interface - * @hwif: the HW interface of a pci function device - * @pdev: the pci device for acessing PCI resources - * - * Return 0 - Success, negative - Failure - **/ -int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev) -{ - int err; - - hwif->pdev = pdev; - - hwif->cfg_regs_bar = pci_ioremap_bar(pdev, HINIC_PCI_CFG_REGS_BAR); - if (!hwif->cfg_regs_bar) { - dev_err(&pdev->dev, "Failed to map configuration regs\n"); - return -ENOMEM; - } - - err = hwif_ready(hwif); - if (err) { - dev_err(&pdev->dev, "HW interface is not ready\n"); - goto err_hwif_ready; - } - - read_hwif_attr(hwif); - - if (HINIC_IS_PF(hwif)) - set_ppf(hwif); - - /* No transactionss before DMA is initialized */ - dma_attr_init(hwif); - return 0; - -err_hwif_ready: - iounmap(hwif->cfg_regs_bar); - return err; -} - -/** - * hinic_free_hwif - free the HW interface - * @hwif: the HW interface of a pci function device - **/ -void hinic_free_hwif(struct hinic_hwif *hwif) -{ - iounmap(hwif->cfg_regs_bar); -} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h deleted file mode 100644 index 5b4760c0e9f531301db219de567198b01b81c995..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h +++ /dev/null @@ -1,272 +0,0 @@ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - */ - -#ifndef HINIC_HW_IF_H -#define HINIC_HW_IF_H - -#include -#include -#include -#include - -#define HINIC_DMA_ATTR_ST_SHIFT 0 -#define HINIC_DMA_ATTR_AT_SHIFT 8 -#define HINIC_DMA_ATTR_PH_SHIFT 10 -#define HINIC_DMA_ATTR_NO_SNOOPING_SHIFT 12 -#define HINIC_DMA_ATTR_TPH_EN_SHIFT 13 - -#define HINIC_DMA_ATTR_ST_MASK 0xFF -#define HINIC_DMA_ATTR_AT_MASK 0x3 -#define HINIC_DMA_ATTR_PH_MASK 0x3 -#define HINIC_DMA_ATTR_NO_SNOOPING_MASK 0x1 -#define HINIC_DMA_ATTR_TPH_EN_MASK 0x1 - -#define HINIC_DMA_ATTR_SET(val, member) \ - (((u32)(val) & HINIC_DMA_ATTR_##member##_MASK) << \ - HINIC_DMA_ATTR_##member##_SHIFT) - -#define HINIC_DMA_ATTR_CLEAR(val, member) \ - ((val) & (~(HINIC_DMA_ATTR_##member##_MASK \ - << HINIC_DMA_ATTR_##member##_SHIFT))) - -#define HINIC_FA0_FUNC_IDX_SHIFT 0 -#define HINIC_FA0_PF_IDX_SHIFT 10 -#define HINIC_FA0_PCI_INTF_IDX_SHIFT 14 -/* reserved members - off 16 */ -#define HINIC_FA0_FUNC_TYPE_SHIFT 24 - -#define HINIC_FA0_FUNC_IDX_MASK 0x3FF -#define HINIC_FA0_PF_IDX_MASK 0xF -#define HINIC_FA0_PCI_INTF_IDX_MASK 0x3 -#define HINIC_FA0_FUNC_TYPE_MASK 0x1 - -#define HINIC_FA0_GET(val, member) \ - (((val) >> HINIC_FA0_##member##_SHIFT) & HINIC_FA0_##member##_MASK) - -#define HINIC_FA1_AEQS_PER_FUNC_SHIFT 8 -/* reserved members - off 10 */ -#define HINIC_FA1_CEQS_PER_FUNC_SHIFT 12 -/* reserved members - off 15 */ -#define HINIC_FA1_IRQS_PER_FUNC_SHIFT 20 -#define HINIC_FA1_DMA_ATTR_PER_FUNC_SHIFT 24 -/* reserved members - off 27 */ -#define HINIC_FA1_INIT_STATUS_SHIFT 30 - -#define HINIC_FA1_AEQS_PER_FUNC_MASK 0x3 -#define HINIC_FA1_CEQS_PER_FUNC_MASK 0x7 -#define HINIC_FA1_IRQS_PER_FUNC_MASK 0xF -#define HINIC_FA1_DMA_ATTR_PER_FUNC_MASK 0x7 -#define HINIC_FA1_INIT_STATUS_MASK 0x1 - -#define HINIC_FA1_GET(val, member) \ - (((val) >> HINIC_FA1_##member##_SHIFT) & HINIC_FA1_##member##_MASK) - -#define HINIC_FA4_OUTBOUND_STATE_SHIFT 0 -#define HINIC_FA4_DB_STATE_SHIFT 1 - -#define HINIC_FA4_OUTBOUND_STATE_MASK 0x1 -#define HINIC_FA4_DB_STATE_MASK 0x1 - -#define HINIC_FA4_GET(val, member) \ - (((val) >> HINIC_FA4_##member##_SHIFT) & HINIC_FA4_##member##_MASK) - -#define HINIC_FA4_SET(val, member) \ - ((((u32)val) & HINIC_FA4_##member##_MASK) << HINIC_FA4_##member##_SHIFT) - -#define HINIC_FA4_CLEAR(val, member) \ - ((val) & (~(HINIC_FA4_##member##_MASK << HINIC_FA4_##member##_SHIFT))) - -#define HINIC_FA5_PF_ACTION_SHIFT 0 -#define HINIC_FA5_PF_ACTION_MASK 0xFFFF - -#define HINIC_FA5_SET(val, member) \ - (((u32)(val) & HINIC_FA5_##member##_MASK) << HINIC_FA5_##member##_SHIFT) - -#define HINIC_FA5_CLEAR(val, member) \ - ((val) & (~(HINIC_FA5_##member##_MASK << HINIC_FA5_##member##_SHIFT))) - -#define HINIC_PPF_ELECTION_IDX_SHIFT 0 -#define HINIC_PPF_ELECTION_IDX_MASK 0x1F - -#define HINIC_PPF_ELECTION_SET(val, member) \ - (((u32)(val) & HINIC_PPF_ELECTION_##member##_MASK) << \ - HINIC_PPF_ELECTION_##member##_SHIFT) - -#define HINIC_PPF_ELECTION_GET(val, member) \ - (((val) >> HINIC_PPF_ELECTION_##member##_SHIFT) & \ - HINIC_PPF_ELECTION_##member##_MASK) - -#define HINIC_PPF_ELECTION_CLEAR(val, member) \ - ((val) & (~(HINIC_PPF_ELECTION_##member##_MASK \ - << HINIC_PPF_ELECTION_##member##_SHIFT))) - -#define HINIC_MSIX_PENDING_LIMIT_SHIFT 0 -#define HINIC_MSIX_COALESC_TIMER_SHIFT 8 -#define HINIC_MSIX_LLI_TIMER_SHIFT 16 -#define HINIC_MSIX_LLI_CREDIT_SHIFT 24 -#define HINIC_MSIX_RESEND_TIMER_SHIFT 29 - -#define HINIC_MSIX_PENDING_LIMIT_MASK 0xFF -#define HINIC_MSIX_COALESC_TIMER_MASK 0xFF -#define HINIC_MSIX_LLI_TIMER_MASK 0xFF -#define HINIC_MSIX_LLI_CREDIT_MASK 0x1F -#define HINIC_MSIX_RESEND_TIMER_MASK 0x7 - -#define HINIC_MSIX_ATTR_SET(val, member) \ - (((u32)(val) & HINIC_MSIX_##member##_MASK) << \ - HINIC_MSIX_##member##_SHIFT) - -#define HINIC_MSIX_ATTR_GET(val, member) \ - (((val) >> HINIC_MSIX_##member##_SHIFT) & \ - HINIC_MSIX_##member##_MASK) - -#define HINIC_MSIX_CNT_RESEND_TIMER_SHIFT 29 - -#define HINIC_MSIX_CNT_RESEND_TIMER_MASK 0x1 - -#define HINIC_MSIX_CNT_SET(val, member) \ - (((u32)(val) & HINIC_MSIX_CNT_##member##_MASK) << \ - HINIC_MSIX_CNT_##member##_SHIFT) - -#define HINIC_HWIF_NUM_AEQS(hwif) ((hwif)->attr.num_aeqs) -#define HINIC_HWIF_NUM_CEQS(hwif) ((hwif)->attr.num_ceqs) -#define HINIC_HWIF_NUM_IRQS(hwif) ((hwif)->attr.num_irqs) -#define HINIC_HWIF_FUNC_IDX(hwif) ((hwif)->attr.func_idx) -#define HINIC_HWIF_PCI_INTF(hwif) ((hwif)->attr.pci_intf_idx) -#define HINIC_HWIF_PF_IDX(hwif) ((hwif)->attr.pf_idx) - -#define HINIC_FUNC_TYPE(hwif) ((hwif)->attr.func_type) -#define HINIC_IS_PF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PF) -#define HINIC_IS_PPF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PPF) - -#define HINIC_PCI_CFG_REGS_BAR 0 -#define HINIC_PCI_DB_BAR 4 - -#define HINIC_PCIE_ST_DISABLE 0 -#define HINIC_PCIE_AT_DISABLE 0 -#define HINIC_PCIE_PH_DISABLE 0 - -#define HINIC_EQ_MSIX_PENDING_LIMIT_DEFAULT 0 /* Disabled */ -#define HINIC_EQ_MSIX_COALESC_TIMER_DEFAULT 0xFF /* max */ -#define HINIC_EQ_MSIX_LLI_TIMER_DEFAULT 0 /* Disabled */ -#define HINIC_EQ_MSIX_LLI_CREDIT_LIMIT_DEFAULT 0 /* Disabled */ -#define HINIC_EQ_MSIX_RESEND_TIMER_DEFAULT 7 /* max */ - -enum hinic_pcie_nosnoop { - HINIC_PCIE_SNOOP = 0, - HINIC_PCIE_NO_SNOOP = 1, -}; - -enum hinic_pcie_tph { - HINIC_PCIE_TPH_DISABLE = 0, - HINIC_PCIE_TPH_ENABLE = 1, -}; - -enum hinic_func_type { - HINIC_PF = 0, - HINIC_PPF = 2, -}; - -enum hinic_mod_type { - HINIC_MOD_COMM = 0, /* HW communication module */ - HINIC_MOD_L2NIC = 1, /* L2NIC module */ - HINIC_MOD_CFGM = 7, /* Configuration module */ - - HINIC_MOD_MAX = 15 -}; - -enum hinic_node_id { - HINIC_NODE_ID_MGMT = 21, -}; - -enum hinic_pf_action { - HINIC_PF_MGMT_INIT = 0x0, - - HINIC_PF_MGMT_ACTIVE = 0x11, -}; - -enum hinic_outbound_state { - HINIC_OUTBOUND_ENABLE = 0, - HINIC_OUTBOUND_DISABLE = 1, -}; - -enum hinic_db_state { - HINIC_DB_ENABLE = 0, - HINIC_DB_DISABLE = 1, -}; - -struct hinic_func_attr { - u16 func_idx; - u8 pf_idx; - u8 pci_intf_idx; - - enum hinic_func_type func_type; - - u8 ppf_idx; - - u16 num_irqs; - u8 num_aeqs; - u8 num_ceqs; - - u8 num_dma_attr; -}; - -struct hinic_hwif { - struct pci_dev *pdev; - void __iomem *cfg_regs_bar; - - struct hinic_func_attr attr; -}; - -static inline u32 hinic_hwif_read_reg(struct hinic_hwif *hwif, u32 reg) -{ - return be32_to_cpu(readl(hwif->cfg_regs_bar + reg)); -} - -static inline void hinic_hwif_write_reg(struct hinic_hwif *hwif, u32 reg, - u32 val) -{ - writel(cpu_to_be32(val), hwif->cfg_regs_bar + reg); -} - -int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index, - u8 pending_limit, u8 coalesc_timer, - u8 lli_timer_cfg, u8 lli_credit_limit, - u8 resend_timer); - -int hinic_msix_attr_get(struct hinic_hwif *hwif, u16 msix_index, - u8 *pending_limit, u8 *coalesc_timer_cfg, - u8 *lli_timer, u8 *lli_credit_limit, - u8 *resend_timer); - -int hinic_msix_attr_cnt_clear(struct hinic_hwif *hwif, u16 msix_index); - -void hinic_set_pf_action(struct hinic_hwif *hwif, enum hinic_pf_action action); - -enum hinic_outbound_state hinic_outbound_state_get(struct hinic_hwif *hwif); - -void hinic_outbound_state_set(struct hinic_hwif *hwif, - enum hinic_outbound_state outbound_state); - -enum hinic_db_state hinic_db_state_get(struct hinic_hwif *hwif); - -void hinic_db_state_set(struct hinic_hwif *hwif, - enum hinic_db_state db_state); - -int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev); - -void hinic_free_hwif(struct hinic_hwif *hwif); - -#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c deleted file mode 100644 index 8e5897669a3a21190286b5883504ba9431579380..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c +++ /dev/null @@ -1,533 +0,0 @@ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "hinic_hw_if.h" -#include "hinic_hw_eqs.h" -#include "hinic_hw_wqe.h" -#include "hinic_hw_wq.h" -#include "hinic_hw_cmdq.h" -#include "hinic_hw_qp_ctxt.h" -#include "hinic_hw_qp.h" -#include "hinic_hw_io.h" - -#define CI_Q_ADDR_SIZE sizeof(u32) - -#define CI_ADDR(base_addr, q_id) ((base_addr) + \ - (q_id) * CI_Q_ADDR_SIZE) - -#define CI_TABLE_SIZE(num_qps) ((num_qps) * CI_Q_ADDR_SIZE) - -#define DB_IDX(db, db_base) \ - (((unsigned long)(db) - (unsigned long)(db_base)) / HINIC_DB_PAGE_SIZE) - -enum io_cmd { - IO_CMD_MODIFY_QUEUE_CTXT = 0, -}; - -static void init_db_area_idx(struct hinic_free_db_area *free_db_area) -{ - int i; - - for (i = 0; i < HINIC_DB_MAX_AREAS; i++) - free_db_area->db_idx[i] = i; - - free_db_area->alloc_pos = 0; - free_db_area->return_pos = HINIC_DB_MAX_AREAS; - - free_db_area->num_free = HINIC_DB_MAX_AREAS; - - sema_init(&free_db_area->idx_lock, 1); -} - -static void __iomem *get_db_area(struct hinic_func_to_io *func_to_io) -{ - struct hinic_free_db_area *free_db_area = &func_to_io->free_db_area; - int pos, idx; - - down(&free_db_area->idx_lock); - - free_db_area->num_free--; - - if (free_db_area->num_free < 0) { - free_db_area->num_free++; - up(&free_db_area->idx_lock); - return ERR_PTR(-ENOMEM); - } - - pos = free_db_area->alloc_pos++; - pos &= HINIC_DB_MAX_AREAS - 1; - - idx = free_db_area->db_idx[pos]; - - free_db_area->db_idx[pos] = -1; - - up(&free_db_area->idx_lock); - - return func_to_io->db_base + idx * HINIC_DB_PAGE_SIZE; -} - -static void return_db_area(struct hinic_func_to_io *func_to_io, - void __iomem *db_base) -{ - struct hinic_free_db_area *free_db_area = &func_to_io->free_db_area; - int pos, idx = DB_IDX(db_base, func_to_io->db_base); - - down(&free_db_area->idx_lock); - - pos = free_db_area->return_pos++; - pos &= HINIC_DB_MAX_AREAS - 1; - - free_db_area->db_idx[pos] = idx; - - free_db_area->num_free++; - - up(&free_db_area->idx_lock); -} - -static int write_sq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn, - u16 num_sqs) -{ - struct hinic_hwif *hwif = func_to_io->hwif; - struct hinic_sq_ctxt_block *sq_ctxt_block; - struct pci_dev *pdev = hwif->pdev; - struct hinic_cmdq_buf cmdq_buf; - struct hinic_sq_ctxt *sq_ctxt; - struct hinic_qp *qp; - u64 out_param; - int err, i; - - err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf); - if (err) { - dev_err(&pdev->dev, "Failed to allocate cmdq buf\n"); - return err; - } - - sq_ctxt_block = cmdq_buf.buf; - sq_ctxt = sq_ctxt_block->sq_ctxt; - - hinic_qp_prepare_header(&sq_ctxt_block->hdr, HINIC_QP_CTXT_TYPE_SQ, - num_sqs, func_to_io->max_qps); - for (i = 0; i < num_sqs; i++) { - qp = &func_to_io->qps[i]; - - hinic_sq_prepare_ctxt(&sq_ctxt[i], &qp->sq, - base_qpn + qp->q_id); - } - - cmdq_buf.size = HINIC_SQ_CTXT_SIZE(num_sqs); - - err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC, - IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf, - &out_param); - if ((err) || (out_param != 0)) { - dev_err(&pdev->dev, "Failed to set SQ ctxts\n"); - err = -EFAULT; - } - - hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf); - return err; -} - -static int write_rq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn, - u16 num_rqs) -{ - struct hinic_hwif *hwif = func_to_io->hwif; - struct hinic_rq_ctxt_block *rq_ctxt_block; - struct pci_dev *pdev = hwif->pdev; - struct hinic_cmdq_buf cmdq_buf; - struct hinic_rq_ctxt *rq_ctxt; - struct hinic_qp *qp; - u64 out_param; - int err, i; - - err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf); - if (err) { - dev_err(&pdev->dev, "Failed to allocate cmdq buf\n"); - return err; - } - - rq_ctxt_block = cmdq_buf.buf; - rq_ctxt = rq_ctxt_block->rq_ctxt; - - hinic_qp_prepare_header(&rq_ctxt_block->hdr, HINIC_QP_CTXT_TYPE_RQ, - num_rqs, func_to_io->max_qps); - for (i = 0; i < num_rqs; i++) { - qp = &func_to_io->qps[i]; - - hinic_rq_prepare_ctxt(&rq_ctxt[i], &qp->rq, - base_qpn + qp->q_id); - } - - cmdq_buf.size = HINIC_RQ_CTXT_SIZE(num_rqs); - - err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC, - IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf, - &out_param); - if ((err) || (out_param != 0)) { - dev_err(&pdev->dev, "Failed to set RQ ctxts\n"); - err = -EFAULT; - } - - hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf); - return err; -} - -/** - * write_qp_ctxts - write the qp ctxt to HW - * @func_to_io: func to io channel that holds the IO components - * @base_qpn: first qp number - * @num_qps: number of qps to write - * - * Return 0 - Success, negative - Failure - **/ -static int write_qp_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn, - u16 num_qps) -{ - return (write_sq_ctxts(func_to_io, base_qpn, num_qps) || - write_rq_ctxts(func_to_io, base_qpn, num_qps)); -} - -/** - * init_qp - Initialize a Queue Pair - * @func_to_io: func to io channel that holds the IO components - * @qp: pointer to the qp to initialize - * @q_id: the id of the qp - * @sq_msix_entry: msix entry for sq - * @rq_msix_entry: msix entry for rq - * - * Return 0 - Success, negative - Failure - **/ -static int init_qp(struct hinic_func_to_io *func_to_io, - struct hinic_qp *qp, int q_id, - struct msix_entry *sq_msix_entry, - struct msix_entry *rq_msix_entry) -{ - struct hinic_hwif *hwif = func_to_io->hwif; - struct pci_dev *pdev = hwif->pdev; - void __iomem *db_base; - int err; - - qp->q_id = q_id; - - err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->sq_wq[q_id], - HINIC_SQ_WQEBB_SIZE, HINIC_SQ_PAGE_SIZE, - HINIC_SQ_DEPTH, HINIC_SQ_WQE_MAX_SIZE); - if (err) { - dev_err(&pdev->dev, "Failed to allocate WQ for SQ\n"); - return err; - } - - err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->rq_wq[q_id], - HINIC_RQ_WQEBB_SIZE, HINIC_RQ_PAGE_SIZE, - HINIC_RQ_DEPTH, HINIC_RQ_WQE_SIZE); - if (err) { - dev_err(&pdev->dev, "Failed to allocate WQ for RQ\n"); - goto err_rq_alloc; - } - - db_base = get_db_area(func_to_io); - if (IS_ERR(db_base)) { - dev_err(&pdev->dev, "Failed to get DB area for SQ\n"); - err = PTR_ERR(db_base); - goto err_get_db; - } - - func_to_io->sq_db[q_id] = db_base; - - err = hinic_init_sq(&qp->sq, hwif, &func_to_io->sq_wq[q_id], - sq_msix_entry, - CI_ADDR(func_to_io->ci_addr_base, q_id), - CI_ADDR(func_to_io->ci_dma_base, q_id), db_base); - if (err) { - dev_err(&pdev->dev, "Failed to init SQ\n"); - goto err_sq_init; - } - - err = hinic_init_rq(&qp->rq, hwif, &func_to_io->rq_wq[q_id], - rq_msix_entry); - if (err) { - dev_err(&pdev->dev, "Failed to init RQ\n"); - goto err_rq_init; - } - - return 0; - -err_rq_init: - hinic_clean_sq(&qp->sq); - -err_sq_init: - return_db_area(func_to_io, db_base); - -err_get_db: - hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]); - -err_rq_alloc: - hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]); - return err; -} - -/** - * destroy_qp - Clean the resources of a Queue Pair - * @func_to_io: func to io channel that holds the IO components - * @qp: pointer to the qp to clean - **/ -static void destroy_qp(struct hinic_func_to_io *func_to_io, - struct hinic_qp *qp) -{ - int q_id = qp->q_id; - - hinic_clean_rq(&qp->rq); - hinic_clean_sq(&qp->sq); - - return_db_area(func_to_io, func_to_io->sq_db[q_id]); - - hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]); - hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]); -} - -/** - * hinic_io_create_qps - Create Queue Pairs - * @func_to_io: func to io channel that holds the IO components - * @base_qpn: base qp number - * @num_qps: number queue pairs to create - * @sq_msix_entry: msix entries for sq - * @rq_msix_entry: msix entries for rq - * - * Return 0 - Success, negative - Failure - **/ -int hinic_io_create_qps(struct hinic_func_to_io *func_to_io, - u16 base_qpn, int num_qps, - struct msix_entry *sq_msix_entries, - struct msix_entry *rq_msix_entries) -{ - struct hinic_hwif *hwif = func_to_io->hwif; - struct pci_dev *pdev = hwif->pdev; - size_t qps_size, wq_size, db_size; - void *ci_addr_base; - int i, j, err; - - qps_size = num_qps * sizeof(*func_to_io->qps); - func_to_io->qps = devm_kzalloc(&pdev->dev, qps_size, GFP_KERNEL); - if (!func_to_io->qps) - return -ENOMEM; - - wq_size = num_qps * sizeof(*func_to_io->sq_wq); - func_to_io->sq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL); - if (!func_to_io->sq_wq) { - err = -ENOMEM; - goto err_sq_wq; - } - - wq_size = num_qps * sizeof(*func_to_io->rq_wq); - func_to_io->rq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL); - if (!func_to_io->rq_wq) { - err = -ENOMEM; - goto err_rq_wq; - } - - db_size = num_qps * sizeof(*func_to_io->sq_db); - func_to_io->sq_db = devm_kzalloc(&pdev->dev, db_size, GFP_KERNEL); - if (!func_to_io->sq_db) { - err = -ENOMEM; - goto err_sq_db; - } - - ci_addr_base = dma_zalloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps), - &func_to_io->ci_dma_base, - GFP_KERNEL); - if (!ci_addr_base) { - dev_err(&pdev->dev, "Failed to allocate CI area\n"); - err = -ENOMEM; - goto err_ci_base; - } - - func_to_io->ci_addr_base = ci_addr_base; - - for (i = 0; i < num_qps; i++) { - err = init_qp(func_to_io, &func_to_io->qps[i], i, - &sq_msix_entries[i], &rq_msix_entries[i]); - if (err) { - dev_err(&pdev->dev, "Failed to create QP %d\n", i); - goto err_init_qp; - } - } - - err = write_qp_ctxts(func_to_io, base_qpn, num_qps); - if (err) { - dev_err(&pdev->dev, "Failed to init QP ctxts\n"); - goto err_write_qp_ctxts; - } - - return 0; - -err_write_qp_ctxts: -err_init_qp: - for (j = 0; j < i; j++) - destroy_qp(func_to_io, &func_to_io->qps[j]); - - dma_free_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps), - func_to_io->ci_addr_base, func_to_io->ci_dma_base); - -err_ci_base: - devm_kfree(&pdev->dev, func_to_io->sq_db); - -err_sq_db: - devm_kfree(&pdev->dev, func_to_io->rq_wq); - -err_rq_wq: - devm_kfree(&pdev->dev, func_to_io->sq_wq); - -err_sq_wq: - devm_kfree(&pdev->dev, func_to_io->qps); - return err; -} - -/** - * hinic_io_destroy_qps - Destroy the IO Queue Pairs - * @func_to_io: func to io channel that holds the IO components - * @num_qps: number queue pairs to destroy - **/ -void hinic_io_destroy_qps(struct hinic_func_to_io *func_to_io, int num_qps) -{ - struct hinic_hwif *hwif = func_to_io->hwif; - struct pci_dev *pdev = hwif->pdev; - size_t ci_table_size; - int i; - - ci_table_size = CI_TABLE_SIZE(num_qps); - - for (i = 0; i < num_qps; i++) - destroy_qp(func_to_io, &func_to_io->qps[i]); - - dma_free_coherent(&pdev->dev, ci_table_size, func_to_io->ci_addr_base, - func_to_io->ci_dma_base); - - devm_kfree(&pdev->dev, func_to_io->sq_db); - - devm_kfree(&pdev->dev, func_to_io->rq_wq); - devm_kfree(&pdev->dev, func_to_io->sq_wq); - - devm_kfree(&pdev->dev, func_to_io->qps); -} - -/** - * hinic_io_init - Initialize the IO components - * @func_to_io: func to io channel that holds the IO components - * @hwif: HW interface for accessing IO - * @max_qps: maximum QPs in HW - * @num_ceqs: number completion event queues - * @ceq_msix_entries: msix entries for ceqs - * - * Return 0 - Success, negative - Failure - **/ -int hinic_io_init(struct hinic_func_to_io *func_to_io, - struct hinic_hwif *hwif, u16 max_qps, int num_ceqs, - struct msix_entry *ceq_msix_entries) -{ - struct pci_dev *pdev = hwif->pdev; - enum hinic_cmdq_type cmdq, type; - void __iomem *db_area; - int err; - - func_to_io->hwif = hwif; - func_to_io->qps = NULL; - func_to_io->max_qps = max_qps; - - err = hinic_ceqs_init(&func_to_io->ceqs, hwif, num_ceqs, - HINIC_DEFAULT_CEQ_LEN, HINIC_EQ_PAGE_SIZE, - ceq_msix_entries); - if (err) { - dev_err(&pdev->dev, "Failed to init CEQs\n"); - return err; - } - - err = hinic_wqs_alloc(&func_to_io->wqs, 2 * max_qps, hwif); - if (err) { - dev_err(&pdev->dev, "Failed to allocate WQS for IO\n"); - goto err_wqs_alloc; - } - - func_to_io->db_base = pci_ioremap_bar(pdev, HINIC_PCI_DB_BAR); - if (!func_to_io->db_base) { - dev_err(&pdev->dev, "Failed to remap IO DB area\n"); - err = -ENOMEM; - goto err_db_ioremap; - } - - init_db_area_idx(&func_to_io->free_db_area); - - for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) { - db_area = get_db_area(func_to_io); - if (IS_ERR(db_area)) { - dev_err(&pdev->dev, "Failed to get cmdq db area\n"); - err = PTR_ERR(db_area); - goto err_db_area; - } - - func_to_io->cmdq_db_area[cmdq] = db_area; - } - - err = hinic_init_cmdqs(&func_to_io->cmdqs, hwif, - func_to_io->cmdq_db_area); - if (err) { - dev_err(&pdev->dev, "Failed to initialize cmdqs\n"); - goto err_init_cmdqs; - } - - return 0; - -err_init_cmdqs: -err_db_area: - for (type = HINIC_CMDQ_SYNC; type < cmdq; type++) - return_db_area(func_to_io, func_to_io->cmdq_db_area[type]); - - iounmap(func_to_io->db_base); - -err_db_ioremap: - hinic_wqs_free(&func_to_io->wqs); - -err_wqs_alloc: - hinic_ceqs_free(&func_to_io->ceqs); - return err; -} - -/** - * hinic_io_free - Free the IO components - * @func_to_io: func to io channel that holds the IO components - **/ -void hinic_io_free(struct hinic_func_to_io *func_to_io) -{ - enum hinic_cmdq_type cmdq; - - hinic_free_cmdqs(&func_to_io->cmdqs); - - for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) - return_db_area(func_to_io, func_to_io->cmdq_db_area[cmdq]); - - iounmap(func_to_io->db_base); - hinic_wqs_free(&func_to_io->wqs); - hinic_ceqs_free(&func_to_io->ceqs); -} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h deleted file mode 100644 index adb64179d47d878aa3a329195c8d36a6cb6bc203..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - */ - -#ifndef HINIC_HW_IO_H -#define HINIC_HW_IO_H - -#include -#include -#include -#include - -#include "hinic_hw_if.h" -#include "hinic_hw_eqs.h" -#include "hinic_hw_wq.h" -#include "hinic_hw_cmdq.h" -#include "hinic_hw_qp.h" - -#define HINIC_DB_PAGE_SIZE SZ_4K -#define HINIC_DB_SIZE SZ_4M - -#define HINIC_DB_MAX_AREAS (HINIC_DB_SIZE / HINIC_DB_PAGE_SIZE) - -enum hinic_db_type { - HINIC_DB_CMDQ_TYPE, - HINIC_DB_SQ_TYPE, -}; - -enum hinic_io_path { - HINIC_CTRL_PATH, - HINIC_DATA_PATH, -}; - -struct hinic_free_db_area { - int db_idx[HINIC_DB_MAX_AREAS]; - - int alloc_pos; - int return_pos; - - int num_free; - - /* Lock for getting db area */ - struct semaphore idx_lock; -}; - -struct hinic_func_to_io { - struct hinic_hwif *hwif; - - struct hinic_ceqs ceqs; - - struct hinic_wqs wqs; - - struct hinic_wq *sq_wq; - struct hinic_wq *rq_wq; - - struct hinic_qp *qps; - u16 max_qps; - - void __iomem **sq_db; - void __iomem *db_base; - - void *ci_addr_base; - dma_addr_t ci_dma_base; - - struct hinic_free_db_area free_db_area; - - void __iomem *cmdq_db_area[HINIC_MAX_CMDQ_TYPES]; - - struct hinic_cmdqs cmdqs; -}; - -int hinic_io_create_qps(struct hinic_func_to_io *func_to_io, - u16 base_qpn, int num_qps, - struct msix_entry *sq_msix_entries, - struct msix_entry *rq_msix_entries); - -void hinic_io_destroy_qps(struct hinic_func_to_io *func_to_io, - int num_qps); - -int hinic_io_init(struct hinic_func_to_io *func_to_io, - struct hinic_hwif *hwif, u16 max_qps, int num_ceqs, - struct msix_entry *ceq_msix_entries); - -void hinic_io_free(struct hinic_func_to_io *func_to_io); - -#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c deleted file mode 100644 index 278dc13f3dae8ccda357cf866adb01ada0d11786..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c +++ /dev/null @@ -1,597 +0,0 @@ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "hinic_hw_if.h" -#include "hinic_hw_eqs.h" -#include "hinic_hw_api_cmd.h" -#include "hinic_hw_mgmt.h" -#include "hinic_hw_dev.h" - -#define SYNC_MSG_ID_MASK 0x1FF - -#define SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id) - -#define SYNC_MSG_ID_INC(pf_to_mgmt) (SYNC_MSG_ID(pf_to_mgmt) = \ - ((SYNC_MSG_ID(pf_to_mgmt) + 1) & \ - SYNC_MSG_ID_MASK)) - -#define MSG_SZ_IS_VALID(in_size) ((in_size) <= MAX_MSG_LEN) - -#define MGMT_MSG_LEN_MIN 20 -#define MGMT_MSG_LEN_STEP 16 -#define MGMT_MSG_RSVD_FOR_DEV 8 - -#define SEGMENT_LEN 48 - -#define MAX_PF_MGMT_BUF_SIZE 2048 - -/* Data should be SEG LEN size aligned */ -#define MAX_MSG_LEN 2016 - -#define MSG_NOT_RESP 0xFFFF - -#define MGMT_MSG_TIMEOUT 1000 - -#define mgmt_to_pfhwdev(pf_mgmt) \ - container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt) - -enum msg_segment_type { - NOT_LAST_SEGMENT = 0, - LAST_SEGMENT = 1, -}; - -enum mgmt_direction_type { - MGMT_DIRECT_SEND = 0, - MGMT_RESP = 1, -}; - -enum msg_ack_type { - MSG_ACK = 0, - MSG_NO_ACK = 1, -}; - -/** - * hinic_register_mgmt_msg_cb - register msg handler for a msg from a module - * @pf_to_mgmt: PF to MGMT channel - * @mod: module in the chip that this handler will handle its messages - * @handle: private data for the callback - * @callback: the handler that will handle messages - **/ -void hinic_register_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt, - enum hinic_mod_type mod, - void *handle, - void (*callback)(void *handle, - u8 cmd, void *buf_in, - u16 in_size, void *buf_out, - u16 *out_size)) -{ - struct hinic_mgmt_cb *mgmt_cb = &pf_to_mgmt->mgmt_cb[mod]; - - mgmt_cb->cb = callback; - mgmt_cb->handle = handle; - mgmt_cb->state = HINIC_MGMT_CB_ENABLED; -} - -/** - * hinic_unregister_mgmt_msg_cb - unregister msg handler for a msg from a module - * @pf_to_mgmt: PF to MGMT channel - * @mod: module in the chip that this handler handles its messages - **/ -void hinic_unregister_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt, - enum hinic_mod_type mod) -{ - struct hinic_mgmt_cb *mgmt_cb = &pf_to_mgmt->mgmt_cb[mod]; - - mgmt_cb->state &= ~HINIC_MGMT_CB_ENABLED; - - while (mgmt_cb->state & HINIC_MGMT_CB_RUNNING) - schedule(); - - mgmt_cb->cb = NULL; -} - -/** - * prepare_header - prepare the header of the message - * @pf_to_mgmt: PF to MGMT channel - * @msg_len: the length of the message - * @mod: module in the chip that will get the message - * @ack_type: ask for response - * @direction: the direction of the message - * @cmd: command of the message - * @msg_id: message id - * - * Return the prepared header value - **/ -static u64 prepare_header(struct hinic_pf_to_mgmt *pf_to_mgmt, - u16 msg_len, enum hinic_mod_type mod, - enum msg_ack_type ack_type, - enum mgmt_direction_type direction, - u16 cmd, u16 msg_id) -{ - struct hinic_hwif *hwif = pf_to_mgmt->hwif; - - return HINIC_MSG_HEADER_SET(msg_len, MSG_LEN) | - HINIC_MSG_HEADER_SET(mod, MODULE) | - HINIC_MSG_HEADER_SET(SEGMENT_LEN, SEG_LEN) | - HINIC_MSG_HEADER_SET(ack_type, NO_ACK) | - HINIC_MSG_HEADER_SET(0, ASYNC_MGMT_TO_PF) | - HINIC_MSG_HEADER_SET(0, SEQID) | - HINIC_MSG_HEADER_SET(LAST_SEGMENT, LAST) | - HINIC_MSG_HEADER_SET(direction, DIRECTION) | - HINIC_MSG_HEADER_SET(cmd, CMD) | - HINIC_MSG_HEADER_SET(HINIC_HWIF_PCI_INTF(hwif), PCI_INTF) | - HINIC_MSG_HEADER_SET(HINIC_HWIF_PF_IDX(hwif), PF_IDX) | - HINIC_MSG_HEADER_SET(msg_id, MSG_ID); -} - -/** - * prepare_mgmt_cmd - prepare the mgmt command - * @mgmt_cmd: pointer to the command to prepare - * @header: pointer of the header for the message - * @msg: the data of the message - * @msg_len: the length of the message - **/ -static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, u8 *msg, u16 msg_len) -{ - memset(mgmt_cmd, 0, MGMT_MSG_RSVD_FOR_DEV); - - mgmt_cmd += MGMT_MSG_RSVD_FOR_DEV; - memcpy(mgmt_cmd, header, sizeof(*header)); - - mgmt_cmd += sizeof(*header); - memcpy(mgmt_cmd, msg, msg_len); -} - -/** - * mgmt_msg_len - calculate the total message length - * @msg_data_len: the length of the message data - * - * Return the total message length - **/ -static u16 mgmt_msg_len(u16 msg_data_len) -{ - /* RSVD + HEADER_SIZE + DATA_LEN */ - u16 msg_len = MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) + msg_data_len; - - if (msg_len > MGMT_MSG_LEN_MIN) - msg_len = MGMT_MSG_LEN_MIN + - ALIGN((msg_len - MGMT_MSG_LEN_MIN), - MGMT_MSG_LEN_STEP); - else - msg_len = MGMT_MSG_LEN_MIN; - - return msg_len; -} - -/** - * send_msg_to_mgmt - send message to mgmt by API CMD - * @pf_to_mgmt: PF to MGMT channel - * @mod: module in the chip that will get the message - * @cmd: command of the message - * @data: the msg data - * @data_len: the msg data length - * @ack_type: ask for response - * @direction: the direction of the original message - * @resp_msg_id: msg id to response for - * - * Return 0 - Success, negative - Failure - **/ -static int send_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt, - enum hinic_mod_type mod, u8 cmd, - u8 *data, u16 data_len, - enum msg_ack_type ack_type, - enum mgmt_direction_type direction, - u16 resp_msg_id) -{ - struct hinic_api_cmd_chain *chain; - u64 header; - u16 msg_id; - - msg_id = SYNC_MSG_ID(pf_to_mgmt); - - if (direction == MGMT_RESP) { - header = prepare_header(pf_to_mgmt, data_len, mod, ack_type, - direction, cmd, resp_msg_id); - } else { - SYNC_MSG_ID_INC(pf_to_mgmt); - header = prepare_header(pf_to_mgmt, data_len, mod, ack_type, - direction, cmd, msg_id); - } - - prepare_mgmt_cmd(pf_to_mgmt->sync_msg_buf, &header, data, data_len); - - chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_WRITE_TO_MGMT_CPU]; - return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT, - pf_to_mgmt->sync_msg_buf, - mgmt_msg_len(data_len)); -} - -/** - * msg_to_mgmt_sync - send sync message to mgmt - * @pf_to_mgmt: PF to MGMT channel - * @mod: module in the chip that will get the message - * @cmd: command of the message - * @buf_in: the msg data - * @in_size: the msg data length - * @buf_out: response - * @out_size: response length - * @direction: the direction of the original message - * @resp_msg_id: msg id to response for - * - * Return 0 - Success, negative - Failure - **/ -static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt, - enum hinic_mod_type mod, u8 cmd, - u8 *buf_in, u16 in_size, - u8 *buf_out, u16 *out_size, - enum mgmt_direction_type direction, - u16 resp_msg_id) -{ - struct hinic_hwif *hwif = pf_to_mgmt->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_recv_msg *recv_msg; - struct completion *recv_done; - u16 msg_id; - int err; - - /* Lock the sync_msg_buf */ - down(&pf_to_mgmt->sync_msg_lock); - - recv_msg = &pf_to_mgmt->recv_resp_msg_from_mgmt; - recv_done = &recv_msg->recv_done; - - if (resp_msg_id == MSG_NOT_RESP) - msg_id = SYNC_MSG_ID(pf_to_mgmt); - else - msg_id = resp_msg_id; - - init_completion(recv_done); - - err = send_msg_to_mgmt(pf_to_mgmt, mod, cmd, buf_in, in_size, - MSG_ACK, direction, resp_msg_id); - if (err) { - dev_err(&pdev->dev, "Failed to send sync msg to mgmt\n"); - goto unlock_sync_msg; - } - - if (!wait_for_completion_timeout(recv_done, MGMT_MSG_TIMEOUT)) { - dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id); - err = -ETIMEDOUT; - goto unlock_sync_msg; - } - - smp_rmb(); /* verify reading after completion */ - - if (recv_msg->msg_id != msg_id) { - dev_err(&pdev->dev, "incorrect MSG for id = %d\n", msg_id); - err = -EFAULT; - goto unlock_sync_msg; - } - - if ((buf_out) && (recv_msg->msg_len <= MAX_PF_MGMT_BUF_SIZE)) { - memcpy(buf_out, recv_msg->msg, recv_msg->msg_len); - *out_size = recv_msg->msg_len; - } - -unlock_sync_msg: - up(&pf_to_mgmt->sync_msg_lock); - return err; -} - -/** - * msg_to_mgmt_async - send message to mgmt without response - * @pf_to_mgmt: PF to MGMT channel - * @mod: module in the chip that will get the message - * @cmd: command of the message - * @buf_in: the msg data - * @in_size: the msg data length - * @direction: the direction of the original message - * @resp_msg_id: msg id to response for - * - * Return 0 - Success, negative - Failure - **/ -static int msg_to_mgmt_async(struct hinic_pf_to_mgmt *pf_to_mgmt, - enum hinic_mod_type mod, u8 cmd, - u8 *buf_in, u16 in_size, - enum mgmt_direction_type direction, - u16 resp_msg_id) -{ - int err; - - /* Lock the sync_msg_buf */ - down(&pf_to_mgmt->sync_msg_lock); - - err = send_msg_to_mgmt(pf_to_mgmt, mod, cmd, buf_in, in_size, - MSG_NO_ACK, direction, resp_msg_id); - - up(&pf_to_mgmt->sync_msg_lock); - return err; -} - -/** - * hinic_msg_to_mgmt - send message to mgmt - * @pf_to_mgmt: PF to MGMT channel - * @mod: module in the chip that will get the message - * @cmd: command of the message - * @buf_in: the msg data - * @in_size: the msg data length - * @buf_out: response - * @out_size: returned response length - * @sync: sync msg or async msg - * - * Return 0 - Success, negative - Failure - **/ -int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt, - enum hinic_mod_type mod, u8 cmd, - void *buf_in, u16 in_size, void *buf_out, u16 *out_size, - enum hinic_mgmt_msg_type sync) -{ - struct hinic_hwif *hwif = pf_to_mgmt->hwif; - struct pci_dev *pdev = hwif->pdev; - - if (sync != HINIC_MGMT_MSG_SYNC) { - dev_err(&pdev->dev, "Invalid MGMT msg type\n"); - return -EINVAL; - } - - if (!MSG_SZ_IS_VALID(in_size)) { - dev_err(&pdev->dev, "Invalid MGMT msg buffer size\n"); - return -EINVAL; - } - - return msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size, - buf_out, out_size, MGMT_DIRECT_SEND, - MSG_NOT_RESP); -} - -/** - * mgmt_recv_msg_handler - handler for message from mgmt cpu - * @pf_to_mgmt: PF to MGMT channel - * @recv_msg: received message details - **/ -static void mgmt_recv_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt, - struct hinic_recv_msg *recv_msg) -{ - struct hinic_hwif *hwif = pf_to_mgmt->hwif; - struct pci_dev *pdev = hwif->pdev; - u8 *buf_out = recv_msg->buf_out; - struct hinic_mgmt_cb *mgmt_cb; - unsigned long cb_state; - u16 out_size = 0; - - if (recv_msg->mod >= HINIC_MOD_MAX) { - dev_err(&pdev->dev, "Unknown MGMT MSG module = %d\n", - recv_msg->mod); - return; - } - - mgmt_cb = &pf_to_mgmt->mgmt_cb[recv_msg->mod]; - - cb_state = cmpxchg(&mgmt_cb->state, - HINIC_MGMT_CB_ENABLED, - HINIC_MGMT_CB_ENABLED | HINIC_MGMT_CB_RUNNING); - - if ((cb_state == HINIC_MGMT_CB_ENABLED) && (mgmt_cb->cb)) - mgmt_cb->cb(mgmt_cb->handle, recv_msg->cmd, - recv_msg->msg, recv_msg->msg_len, - buf_out, &out_size); - else - dev_err(&pdev->dev, "No MGMT msg handler, mod = %d\n", - recv_msg->mod); - - mgmt_cb->state &= ~HINIC_MGMT_CB_RUNNING; - - if (!recv_msg->async_mgmt_to_pf) - /* MGMT sent sync msg, send the response */ - msg_to_mgmt_async(pf_to_mgmt, recv_msg->mod, recv_msg->cmd, - buf_out, out_size, MGMT_RESP, - recv_msg->msg_id); -} - -/** - * mgmt_resp_msg_handler - handler for a response message from mgmt cpu - * @pf_to_mgmt: PF to MGMT channel - * @recv_msg: received message details - **/ -static void mgmt_resp_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt, - struct hinic_recv_msg *recv_msg) -{ - wmb(); /* verify writing all, before reading */ - - complete(&recv_msg->recv_done); -} - -/** - * recv_mgmt_msg_handler - handler for a message from mgmt cpu - * @pf_to_mgmt: PF to MGMT channel - * @header: the header of the message - * @recv_msg: received message details - **/ -static void recv_mgmt_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt, - u64 *header, struct hinic_recv_msg *recv_msg) -{ - struct hinic_hwif *hwif = pf_to_mgmt->hwif; - struct pci_dev *pdev = hwif->pdev; - int seq_id, seg_len; - u8 *msg_body; - - seq_id = HINIC_MSG_HEADER_GET(*header, SEQID); - seg_len = HINIC_MSG_HEADER_GET(*header, SEG_LEN); - - if (seq_id >= (MAX_MSG_LEN / SEGMENT_LEN)) { - dev_err(&pdev->dev, "recv big mgmt msg\n"); - return; - } - - msg_body = (u8 *)header + sizeof(*header); - memcpy(recv_msg->msg + seq_id * SEGMENT_LEN, msg_body, seg_len); - - if (!HINIC_MSG_HEADER_GET(*header, LAST)) - return; - - recv_msg->cmd = HINIC_MSG_HEADER_GET(*header, CMD); - recv_msg->mod = HINIC_MSG_HEADER_GET(*header, MODULE); - recv_msg->async_mgmt_to_pf = HINIC_MSG_HEADER_GET(*header, - ASYNC_MGMT_TO_PF); - recv_msg->msg_len = HINIC_MSG_HEADER_GET(*header, MSG_LEN); - recv_msg->msg_id = HINIC_MSG_HEADER_GET(*header, MSG_ID); - - if (HINIC_MSG_HEADER_GET(*header, DIRECTION) == MGMT_RESP) - mgmt_resp_msg_handler(pf_to_mgmt, recv_msg); - else - mgmt_recv_msg_handler(pf_to_mgmt, recv_msg); -} - -/** - * mgmt_msg_aeqe_handler - handler for a mgmt message event - * @handle: PF to MGMT channel - * @data: the header of the message - * @size: unused - **/ -static void mgmt_msg_aeqe_handler(void *handle, void *data, u8 size) -{ - struct hinic_pf_to_mgmt *pf_to_mgmt = handle; - struct hinic_recv_msg *recv_msg; - u64 *header = (u64 *)data; - - recv_msg = HINIC_MSG_HEADER_GET(*header, DIRECTION) == - MGMT_DIRECT_SEND ? - &pf_to_mgmt->recv_msg_from_mgmt : - &pf_to_mgmt->recv_resp_msg_from_mgmt; - - recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg); -} - -/** - * alloc_recv_msg - allocate receive message memory - * @pf_to_mgmt: PF to MGMT channel - * @recv_msg: pointer that will hold the allocated data - * - * Return 0 - Success, negative - Failure - **/ -static int alloc_recv_msg(struct hinic_pf_to_mgmt *pf_to_mgmt, - struct hinic_recv_msg *recv_msg) -{ - struct hinic_hwif *hwif = pf_to_mgmt->hwif; - struct pci_dev *pdev = hwif->pdev; - - recv_msg->msg = devm_kzalloc(&pdev->dev, MAX_PF_MGMT_BUF_SIZE, - GFP_KERNEL); - if (!recv_msg->msg) - return -ENOMEM; - - recv_msg->buf_out = devm_kzalloc(&pdev->dev, MAX_PF_MGMT_BUF_SIZE, - GFP_KERNEL); - if (!recv_msg->buf_out) - return -ENOMEM; - - return 0; -} - -/** - * alloc_msg_buf - allocate all the message buffers of PF to MGMT channel - * @pf_to_mgmt: PF to MGMT channel - * - * Return 0 - Success, negative - Failure - **/ -static int alloc_msg_buf(struct hinic_pf_to_mgmt *pf_to_mgmt) -{ - struct hinic_hwif *hwif = pf_to_mgmt->hwif; - struct pci_dev *pdev = hwif->pdev; - int err; - - err = alloc_recv_msg(pf_to_mgmt, - &pf_to_mgmt->recv_msg_from_mgmt); - if (err) { - dev_err(&pdev->dev, "Failed to allocate recv msg\n"); - return err; - } - - err = alloc_recv_msg(pf_to_mgmt, - &pf_to_mgmt->recv_resp_msg_from_mgmt); - if (err) { - dev_err(&pdev->dev, "Failed to allocate resp recv msg\n"); - return err; - } - - pf_to_mgmt->sync_msg_buf = devm_kzalloc(&pdev->dev, - MAX_PF_MGMT_BUF_SIZE, - GFP_KERNEL); - if (!pf_to_mgmt->sync_msg_buf) - return -ENOMEM; - - return 0; -} - -/** - * hinic_pf_to_mgmt_init - initialize PF to MGMT channel - * @pf_to_mgmt: PF to MGMT channel - * @hwif: HW interface the PF to MGMT will use for accessing HW - * - * Return 0 - Success, negative - Failure - **/ -int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt, - struct hinic_hwif *hwif) -{ - struct hinic_pfhwdev *pfhwdev = mgmt_to_pfhwdev(pf_to_mgmt); - struct hinic_hwdev *hwdev = &pfhwdev->hwdev; - struct pci_dev *pdev = hwif->pdev; - int err; - - pf_to_mgmt->hwif = hwif; - - sema_init(&pf_to_mgmt->sync_msg_lock, 1); - pf_to_mgmt->sync_msg_id = 0; - - err = alloc_msg_buf(pf_to_mgmt); - if (err) { - dev_err(&pdev->dev, "Failed to allocate msg buffers\n"); - return err; - } - - err = hinic_api_cmd_init(pf_to_mgmt->cmd_chain, hwif); - if (err) { - dev_err(&pdev->dev, "Failed to initialize cmd chains\n"); - return err; - } - - hinic_aeq_register_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU, - pf_to_mgmt, - mgmt_msg_aeqe_handler); - return 0; -} - -/** - * hinic_pf_to_mgmt_free - free PF to MGMT channel - * @pf_to_mgmt: PF to MGMT channel - **/ -void hinic_pf_to_mgmt_free(struct hinic_pf_to_mgmt *pf_to_mgmt) -{ - struct hinic_pfhwdev *pfhwdev = mgmt_to_pfhwdev(pf_to_mgmt); - struct hinic_hwdev *hwdev = &pfhwdev->hwdev; - - hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU); - hinic_api_cmd_free(pf_to_mgmt->cmd_chain); -} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h index 320711e8dee6cf1dc83559548f6d8a23d2019d3d..ff742b37c23295391cad9dfdfe67f7056aab26e3 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h @@ -1,5 +1,5 @@ -/* - * Huawei HiNIC PCI Express Linux driver +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver * Copyright(c) 2017 Huawei Technologies Co., Ltd * * This program is free software; you can redistribute it and/or modify it @@ -13,141 +13,557 @@ * */ -#ifndef HINIC_HW_MGMT_H -#define HINIC_HW_MGMT_H +#ifndef HINIC_HW_MGMT_H_ +#define HINIC_HW_MGMT_H_ -#include -#include -#include -#include +/* show each drivers only such as nic_service_cap, + * toe_service_cap structure, but not show service_cap + */ +enum hinic_service_type { + SERVICE_T_NIC = 0, + SERVICE_T_OVS, + SERVICE_T_ROCE, + SERVICE_T_TOE, + SERVICE_T_IWARP, + SERVICE_T_FC, + SERVICE_T_FCOE, + SERVICE_T_MIGRATE, + SERVICE_T_PT, + SERVICE_T_HWPT, + SERVICE_T_MAX, + + /* Only used for interruption resource management, + * mark the request module + */ + SERVICE_T_INTF = (1 << 15), + SERVICE_T_CQM = (1 << 16), +}; + +/* NIC service capability + * 1, The chip supports NIC RQ is 1K + * 2, PF/VF RQ specifications: + * disable RSS: + * disable VMDq: Each PF/VF at most 8 RQ + * enable the VMDq: Each PF/VF at most 1K RQ + * enable the RSS: + * disable VMDq: each PF at most 64 RQ, VF at most 32 RQ + * enable the VMDq: Each PF/VF at most 1K RQ + * + * 3, The chip supports NIC SQ is 1K + * 4, PF/VF SQ specifications: + * disable RSS: + * disable VMDq: Each PF/VF at most 8 SQ + * enable the VMDq: Each PF/VF at most 1K SQ + * enable the RSS: + * disable VMDq: each PF at most 64 SQ, VF at most 32 SQ + * enable the VMDq: Each PF/VF at most 1K SQ + */ +struct nic_service_cap { + /* PF resources */ + u16 max_sqs; + u16 max_rqs; + + /* VF resources, vf obtain through the MailBox mechanism from + * according PF + */ + u16 vf_max_sqs; + u16 vf_max_rqs; + bool lro_en; /* LRO feature enable bit*/ + u8 lro_sz; /* LRO context space: n*16B */ + u8 tso_sz; /* TSO context space: n*16B */ + + u16 max_queue_allowed; + u16 dynamic_qp; /* support dynamic queue */ +}; + +struct dev_roce_svc_own_cap { + u32 max_qps; + u32 max_cqs; + u32 max_srqs; + u32 max_mpts; + + u32 vf_max_qps; + u32 vf_max_cqs; + u32 vf_max_srqs; + u32 vf_max_mpts; + + u32 cmtt_cl_start; + u32 cmtt_cl_end; + u32 cmtt_cl_sz; + + u32 dmtt_cl_start; + u32 dmtt_cl_end; + u32 dmtt_cl_sz; + + u32 wqe_cl_start; + u32 wqe_cl_end; + u32 wqe_cl_sz; + + u32 qpc_entry_sz; + u32 max_wqes; + u32 max_rq_sg; + u32 max_sq_inline_data_sz; + u32 max_rq_desc_sz; + + u32 rdmarc_entry_sz; + u32 max_qp_init_rdma; + u32 max_qp_dest_rdma; + + u32 max_srq_wqes; + u32 reserved_srqs; + u32 max_srq_sge; + u32 srqc_entry_sz; + + u32 max_msg_sz; /* Message size 2GB */ + + u8 num_cos; +}; + +struct dev_iwarp_svc_own_cap { + u32 max_qps; + u32 max_cqs; + u32 max_mpts; + + u32 vf_max_qps; + u32 vf_max_cqs; + u32 vf_max_mpts; + + u32 cmtt_cl_start; + u32 cmtt_cl_end; + u32 cmtt_cl_sz; + + u32 dmtt_cl_start; + u32 dmtt_cl_end; + u32 dmtt_cl_sz; + + u32 wqe_cl_start; + u32 wqe_cl_end; + u32 wqe_cl_sz; + + u32 max_rq_sg; + u32 max_sq_inline_data_sz; + u32 max_rq_desc_sz; + + u32 max_irq_depth; + u32 irq_entry_size; /* 64B */ + u32 max_orq_depth; + u32 orq_entry_size; /* 32B */ + u32 max_rtoq_depth; + u32 rtoq_entry_size; /* 32B */ + u32 max_ackq_depth; + u32 ackq_entry_size; /* 16B */ + + u32 max_msg_sz; /* Message size 1GB */ + + u32 max_wqes; /* 8K */ + u32 qpc_entry_sz; /* 1K */ + + /* true:CQM uses static allocation; + * false:CQM uses dynamic allocation. + * Currently, only consider the QPC + */ + bool alloc_flag; + + u8 num_cos; +}; + +/* RDMA service capability structure */ +struct dev_rdma_svc_cap { + /* ROCE service unique parameter structure */ + struct dev_roce_svc_own_cap roce_own_cap; + /* IWARP service unique parameter structure */ + struct dev_iwarp_svc_own_cap iwarp_own_cap; +}; + +/* Defines the RDMA service capability flag */ +enum { + RDMA_BMME_FLAG_LOCAL_INV = (1 << 0), + RDMA_BMME_FLAG_REMOTE_INV = (1 << 1), + RDMA_BMME_FLAG_FAST_REG_WR = (1 << 2), + RDMA_BMME_FLAG_RESERVED_LKEY = (1 << 3), + RDMA_BMME_FLAG_TYPE_2_WIN = (1 << 4), + RDMA_BMME_FLAG_WIN_TYPE_2B = (1 << 5), + + RDMA_DEV_CAP_FLAG_XRC = (1 << 6), + RDMA_DEV_CAP_FLAG_MEM_WINDOW = (1 << 7), + RDMA_DEV_CAP_FLAG_ATOMIC = (1 << 8), + RDMA_DEV_CAP_FLAG_APM = (1 << 9), +}; + +/* RDMA services */ +struct rdma_service_cap { + struct dev_rdma_svc_cap dev_rdma_cap; + + u8 log_mtt; /* 1. the number of MTT PA must be integer power of 2 + * 2. represented by logarithm. Each MTT table can + * contain 1, 2, 4, 8, and 16 PA) + */ + u8 log_rdmarc; /* 1. the number of RDMArc PA must be integer power of 2 + * 2. represented by logarithm. Each MTT table can + * contain 1, 2, 4, 8, and 16 PA) + */ + + u32 reserved_qps; /* Number of reserved QP */ + u32 max_sq_sg; /* Maximum SGE number of SQ (8) */ + u32 max_sq_desc_sz; /* WQE maximum size of SQ(1024B), inline maximum + * size if 960B(944B aligned to the 960B), + * 960B=>wqebb alignment=>1024B + */ + u32 wqebb_size; /* Currently, the supports 64B and 128B, + * defined as 64Bytes + */ + + u32 max_cqes; /* Size of the depth of the CQ (64K-1) */ + u32 reserved_cqs; /* Number of reserved CQ */ + u32 cqc_entry_sz; /* Size of the CQC (64B/128B) */ + u32 cqe_size; /* Size of CQE (32B) */ + + u32 reserved_mrws; /* Number of reserved MR/MR Window */ + u32 mpt_entry_sz; /* MPT table size (64B) */ + u32 max_fmr_maps; /* max MAP of FMR, + * (1 << (32-ilog2(num_mpt)))-1; + */ + + u32 num_mtts; /* Number of MTT table (4M), + * is actually MTT seg number + */ + /* MTT table number of Each MTT seg(3) */ + u32 log_mtt_seg; + u32 mtt_entry_sz; /* MTT table size 8B, including 1 PA(64bits) */ + u32 log_rdmarc_seg; /* table number of each RDMArc seg(3) */ + + /* Timeout time. Formula:Tr=4.096us*2(local_ca_ack_delay), [Tr,4Tr] */ + u32 local_ca_ack_delay; + u32 num_ports; /* Physical port number */ + + u32 db_page_size; /* Size of the DB (4KB) */ + u32 direct_wqe_size; /* Size of the DWQE (256B) */ + + u32 num_pds; /* Maximum number of PD (128K) */ + u32 reserved_pds; /* Number of reserved PD*/ + u32 max_xrcds; /* Maximum number of xrcd (64K) */ + u32 reserved_xrcds; /* Number of reserved xrcd */ + + u32 max_gid_per_port; /* gid number (16) of each port */ + u32 gid_entry_sz; /* RoCE v2 GID table is 32B, + * compatible RoCE v1 expansion + */ + + u32 reserved_lkey; /* local_dma_lkey */ + u32 num_comp_vectors; /* Number of complete vector (32) */ + u32 page_size_cap; /* Supports 4K,8K,64K,256K,1M,4M page_size */ + + u32 flags; /* RDMA some identity */ + u32 max_frpl_len; /* Maximum number of pages frmr registration */ + u32 max_pkeys; /* Number of supported pkey group */ +}; + +/* PF/VF FCoE service resource structure defined */ +struct dev_fcoe_svc_cap { + /* PF resources */ + u32 max_qps; + u32 max_cqs; + u32 max_srqs; + + /* Child Context(Task IO) + * For FCoE/IOE services, at most 8K + */ + u32 max_cctxs; + u32 cctxs_id_start; + + u8 vp_id_start; + u8 vp_id_end; +}; + +/* FCoE services */ +struct fcoe_service_cap { + struct dev_fcoe_svc_cap dev_fcoe_cap; -#include "hinic_hw_if.h" -#include "hinic_hw_api_cmd.h" + /* SQ */ + u32 qpc_basic_size; + u32 childc_basic_size; + u32 sqe_size; -#define HINIC_MSG_HEADER_MSG_LEN_SHIFT 0 -#define HINIC_MSG_HEADER_MODULE_SHIFT 11 -#define HINIC_MSG_HEADER_SEG_LEN_SHIFT 16 -#define HINIC_MSG_HEADER_NO_ACK_SHIFT 22 -#define HINIC_MSG_HEADER_ASYNC_MGMT_TO_PF_SHIFT 23 -#define HINIC_MSG_HEADER_SEQID_SHIFT 24 -#define HINIC_MSG_HEADER_LAST_SHIFT 30 -#define HINIC_MSG_HEADER_DIRECTION_SHIFT 31 -#define HINIC_MSG_HEADER_CMD_SHIFT 32 -#define HINIC_MSG_HEADER_ZEROS_SHIFT 40 -#define HINIC_MSG_HEADER_PCI_INTF_SHIFT 48 -#define HINIC_MSG_HEADER_PF_IDX_SHIFT 50 -#define HINIC_MSG_HEADER_MSG_ID_SHIFT 54 + /* SCQ */ + u32 scqc_basic_size; + u32 scqe_size; + + /* SRQ */ + u32 srqc_size; + u32 srqe_size; +}; -#define HINIC_MSG_HEADER_MSG_LEN_MASK 0x7FF -#define HINIC_MSG_HEADER_MODULE_MASK 0x1F -#define HINIC_MSG_HEADER_SEG_LEN_MASK 0x3F -#define HINIC_MSG_HEADER_NO_ACK_MASK 0x1 -#define HINIC_MSG_HEADER_ASYNC_MGMT_TO_PF_MASK 0x1 -#define HINIC_MSG_HEADER_SEQID_MASK 0x3F -#define HINIC_MSG_HEADER_LAST_MASK 0x1 -#define HINIC_MSG_HEADER_DIRECTION_MASK 0x1 -#define HINIC_MSG_HEADER_CMD_MASK 0xFF -#define HINIC_MSG_HEADER_ZEROS_MASK 0xFF -#define HINIC_MSG_HEADER_PCI_INTF_MASK 0x3 -#define HINIC_MSG_HEADER_PF_IDX_MASK 0xF -#define HINIC_MSG_HEADER_MSG_ID_MASK 0x3FF +/* PF/VF ToE service resource structure */ +struct dev_toe_svc_cap { + /* PF resources*/ + u32 max_pctxs; /* Parent Context: max specifications 1M */ + u32 max_cqs; + u32 max_srqs; + u32 srq_id_start; -#define HINIC_MSG_HEADER_SET(val, member) \ - ((u64)((val) & HINIC_MSG_HEADER_##member##_MASK) << \ - HINIC_MSG_HEADER_##member##_SHIFT) + u8 num_cos; +}; -#define HINIC_MSG_HEADER_GET(val, member) \ - (((val) >> HINIC_MSG_HEADER_##member##_SHIFT) & \ - HINIC_MSG_HEADER_##member##_MASK) +/* ToE services */ +struct toe_service_cap { + struct dev_toe_svc_cap dev_toe_cap; -enum hinic_mgmt_msg_type { - HINIC_MGMT_MSG_SYNC = 1, + bool alloc_flag; + u32 pctx_sz;/* 1KB */ + u32 scqc_sz;/* 64B */ }; -enum hinic_cfg_cmd { - HINIC_CFG_NIC_CAP = 0, +/* PF FC service resource structure defined */ +struct dev_fc_svc_cap { + /* PF Parent QPC */ + u32 max_parent_qpc_num; /* max number is 2048 */ + + /* PF Child QPC */ + u32 max_child_qpc_num; /* max number is 2048 */ + u32 child_qpc_id_start; + + /* PF SCQ */ + u32 scq_num; /* 16 */ + + /* PF supports SRQ*/ + u32 srq_num; /* Number of SRQ is 2 */ + + u8 vp_id_start; + u8 vp_id_end; }; -enum hinic_comm_cmd { - HINIC_COMM_CMD_IO_STATUS_GET = 0x3, +/* FC services*/ +struct fc_service_cap { + struct dev_fc_svc_cap dev_fc_cap; + + /* Parent QPC */ + u32 parent_qpc_size; /* 256B */ + + /* Child QPC */ + u32 child_qpc_size; /* 256B */ + + /* SQ */ + u32 sqe_size; /* 128B(in linked list mode) */ - HINIC_COMM_CMD_CMDQ_CTXT_SET = 0x10, - HINIC_COMM_CMD_CMDQ_CTXT_GET = 0x11, + /* SCQ */ + u32 scqc_size; /* Size of the Context 32B */ + u32 scqe_size; /* 64B */ - HINIC_COMM_CMD_HWCTXT_SET = 0x12, - HINIC_COMM_CMD_HWCTXT_GET = 0x13, + /* SRQ */ + u32 srqc_size; /* Size of SRQ Context (64B) */ + u32 srqe_size; /* 32B */ +}; + +/* PF OVS service resource structure defined */ +struct dev_ovs_svc_cap { + /* PF resources */ + u32 max_pctxs; /* Parent Context: max specifications 1M */ + u32 max_cqs; + u8 dynamic_qp_en; + + /* VF resources */ + u32 vf_max_pctxs; /* Parent Context: max specifications 1M */ + u32 vf_max_cqs; +}; - HINIC_COMM_CMD_SQ_HI_CI_SET = 0x14, +/* OVS services */ +struct ovs_service_cap { + struct dev_ovs_svc_cap dev_ovs_cap; - HINIC_COMM_CMD_RES_STATE_SET = 0x24, + bool alloc_flag; + u32 pctx_sz; /* 512B */ + u32 scqc_sz; /* 64B */ +}; - HINIC_COMM_CMD_IO_RES_CLEAR = 0x29, +/* PF ACL service resource structure */ +struct dev_acl_svc_cap { + /* PF resources */ + u32 max_pctxs; /* Parent Context: max specifications 1M */ + u32 max_cqs; - HINIC_COMM_CMD_MAX = 0x32, + /* VF resources */ + u32 vf_max_pctxs; /* Parent Context: max specifications 1M */ + u32 vf_max_cqs; }; -enum hinic_mgmt_cb_state { - HINIC_MGMT_CB_ENABLED = BIT(0), - HINIC_MGMT_CB_RUNNING = BIT(1), +/* ACL services */ +struct acl_service_cap { + struct dev_acl_svc_cap dev_acl_cap; + + bool alloc_flag; + u32 pctx_sz; /* 512B */ + u32 scqc_sz; /* 64B */ }; -struct hinic_recv_msg { - u8 *msg; - u8 *buf_out; +enum hinic_chip_mode { + CHIP_MODE_NORMAL, + CHIP_MODE_BMGW, + CHIP_MODE_VMGW, +}; - struct completion recv_done; +bool hinic_support_nic(void *hwdev, struct nic_service_cap *cap); +bool hinic_support_roce(void *hwdev, struct rdma_service_cap *cap); +bool hinic_support_fcoe(void *hwdev, struct fcoe_service_cap *cap); +/* PPF support,PF not support */ +bool hinic_support_toe(void *hwdev, struct toe_service_cap *cap); +bool hinic_support_iwarp(void *hwdev, struct rdma_service_cap *cap); +bool hinic_support_fc(void *hwdev, struct fc_service_cap *cap); +bool hinic_support_fic(void *hwdev); +bool hinic_support_ovs(void *hwdev, struct ovs_service_cap *cap); +bool hinic_support_acl(void *hwdev, struct acl_service_cap *cap); +bool hinic_support_rdma(void *hwdev, struct rdma_service_cap *cap); +bool hinic_support_ft(void *hwdev); +bool hinic_func_for_mgmt(void *hwdev); +bool hinic_support_dynamic_q(void *hwdev); + +int hinic_set_toe_enable(void *hwdev, bool enable); +bool hinic_get_toe_enable(void *hwdev); +int hinic_set_fcoe_enable(void *hwdev, bool enable); +bool hinic_get_fcoe_enable(void *hwdev); +bool hinic_get_stateful_enable(void *hwdev); + +/* Service interface for obtaining service_cap public fields */ +/* Obtain service_cap.host_oq_id_mask_val */ +u8 hinic_host_oq_id_mask(void *hwdev); +u8 hinic_host_id(void *hwdev);/* Obtain service_cap.host_id */ +/* Obtain service_cap.host_total_function */ +u16 hinic_host_total_func(void *hwdev); +/* Obtain service_cap.nic_cap.dev_nic_cap.max_sqs */ +u16 hinic_func_max_nic_qnum(void *hwdev); +/* Obtain service_cap.dev_cap.max_sqs */ +u16 hinic_func_max_qnum(void *hwdev); +u8 hinic_ep_id(void *hwdev);/* Obtain service_cap.ep_id */ +u8 hinic_er_id(void *hwdev);/* Obtain service_cap.er_id */ +u8 hinic_physical_port_id(void *hwdev);/* Obtain service_cap.port_id */ +u8 hinic_func_max_vf(void *hwdev);/* Obtain service_cap.max_vf */ +u32 hinic_func_pf_num(void *hwdev);/* Obtain service_cap.pf_num */ +u8 hinic_max_num_cos(void *hwdev); +u8 hinic_cos_valid_bitmap(void *hwdev); +u8 hinic_net_port_mode(void *hwdev);/* Obtain service_cap.net_port_mode */ + +/* The following information is obtained from the bar space + * which is recorded by SDK layer. + * Here provide parameter query interface for service + */ +/* func_attr.glb_func_idx, global function index */ +u16 hinic_global_func_id(void *hwdev); +/* func_attr.intr_num, MSI-X table entry in function */ +u16 hinic_intr_num(void *hwdev); +enum intr_type { + INTR_TYPE_MSIX, + INTR_TYPE_MSI, + INTR_TYPE_INT, + INTR_TYPE_NONE, + /* PXE,OVS need single thread processing, + * synchronization messages must use poll wait mechanism interface + */ +}; - u16 cmd; - enum hinic_mod_type mod; - int async_mgmt_to_pf; +enum intr_type hinic_intr_type(void *hwdev); - u16 msg_len; - u16 msg_id; +u8 hinic_pf_id_of_vf(void *hwdev); /* func_attr.p2p_idx, belongs to which pf */ +u8 hinic_pcie_itf_id(void *hwdev); /* func_attr.itf_idx, pcie interface index */ +u8 hinic_vf_in_pf(void *hwdev); /* func_attr.vf_in_pf, the vf offser in pf */ +enum func_type { + TYPE_PF, + TYPE_VF, + TYPE_PPF, + TYPE_UNKNOWN, }; -struct hinic_mgmt_cb { - void (*cb)(void *handle, u8 cmd, - void *buf_in, u16 in_size, - void *buf_out, u16 *out_size); +/* func_attr.func_type, 0-PF 1-VF 2-PPF */ +enum func_type hinic_func_type(void *hwdev); + +u8 hinic_ceq_num(void *hwdev); /* func_attr.ceq_num, ceq num in one function */ +/* func_attr.dma_attr_entry_num, dma attribute entry num */ +u8 hinic_dma_attr_entry_num(void *hwdev); +/* The PF func_attr.glb_pf_vf_offset, + * PF use only + */ +u16 hinic_glb_pf_vf_offset(void *hwdev); +/* func_attr.mpf_idx, mpf global function index, + * This value is valid only when it is PF + */ +u8 hinic_mpf_idx(void *hwdev); +u8 hinic_ppf_idx(void *hwdev); - void *handle; - unsigned long state; +enum hinic_msix_state { + HINIC_MSIX_ENABLE, + HINIC_MSIX_DISABLE, }; -struct hinic_pf_to_mgmt { - struct hinic_hwif *hwif; +void hinic_set_msix_state(void *hwdev, u16 msix_idx, + enum hinic_msix_state flag); +enum hinic_msix_state hinic_get_msix_state(void *hwdev, u16 msix_idx); + +/* Define the version information structure */ +struct dev_version_info { + u8 up_ver; /* uP version, directly read from uP + * is not configured to file + */ + u8 ucode_ver; /* The microcode version, + * read through the CMDq from microcode + */ + u8 cfg_file_ver;/* uP configuration file version */ + u8 sdk_ver; /* SDK driver version */ + u8 hw_ver; /* Hardware version */ +}; - struct semaphore sync_msg_lock; - u16 sync_msg_id; - u8 *sync_msg_buf; +/* Obtain service_cap.dev_version_info */ +int hinic_dev_ver_info(void *hwdev, struct dev_version_info *ver); - struct hinic_recv_msg recv_resp_msg_from_mgmt; - struct hinic_recv_msg recv_msg_from_mgmt; +int hinic_vector_to_eqn(void *hwdev, enum hinic_service_type type, int vector); - struct hinic_api_cmd_chain *cmd_chain[HINIC_API_CMD_MAX]; +/* Defines the IRQ information structure */ +struct irq_info { + u16 msix_entry_idx; /* IRQ corresponding index number */ + u32 irq_id; /* the IRQ number from OS */ +}; - struct hinic_mgmt_cb mgmt_cb[HINIC_MOD_MAX]; +int hinic_alloc_irqs(void *hwdev, enum hinic_service_type type, u16 req_num, + struct irq_info *irq_info_array, u16 *resp_num); +void hinic_free_irq(void *hwdev, enum hinic_service_type type, u32 irq_id); +int hinic_alloc_ceqs(void *hwdev, enum hinic_service_type type, int req_num, + int *ceq_id_array, int *resp_num); +void hinic_free_ceq(void *hwdev, enum hinic_service_type type, int ceq_id); +int hinic_sync_time(void *hwdev, u64 time); +void hinic_sync_time_async(void *hwdev, u64 time); + +struct hinic_micro_log_info { + int (*init)(void *hwdev); + void (*deinit)(void *hwdev); }; -void hinic_register_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt, - enum hinic_mod_type mod, - void *handle, - void (*callback)(void *handle, - u8 cmd, void *buf_in, - u16 in_size, void *buf_out, - u16 *out_size)); +int hinic_register_micro_log(struct hinic_micro_log_info *micro_log_info); +void hinic_unregister_micro_log(struct hinic_micro_log_info *micro_log_info); -void hinic_unregister_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt, - enum hinic_mod_type mod); +void hinic_disable_mgmt_msg_report(void *hwdev); +void hinic_set_func_deinit_flag(void *hwdev); +void hinic_flush_mgmt_workq(void *hwdev); -int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt, - enum hinic_mod_type mod, u8 cmd, - void *buf_in, u16 in_size, void *buf_out, u16 *out_size, - enum hinic_mgmt_msg_type sync); +enum func_nic_state { + HINIC_FUNC_NIC_DEL, + HINIC_FUNC_NIC_ADD, +}; -int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt, - struct hinic_hwif *hwif); +struct hinic_func_nic_state { + u8 state; + u8 rsvd0; + u16 func_idx; -void hinic_pf_to_mgmt_free(struct hinic_pf_to_mgmt *pf_to_mgmt); + u8 rsvd1[16]; +}; +int hinic_set_func_nic_state(void *hwdev, struct hinic_func_nic_state *state); +int hinic_get_func_nic_enable(void *hwdev, u16 glb_func_idx, bool *en); +bool hinic_get_master_host_mbox_enable(void *hwdev); +bool hinic_get_slave_host_enable(void *hwdev, u8 host_id); +int hinic_func_own_get(void *hwdev); +void hinic_func_own_free(void *hwdev); +int hinic_global_func_id_get(void *hwdev, u16 *func_id); +u16 hinic_pf_id_of_vf_hw(void *hwdev); +u16 hinic_global_func_id_hw(void *hwdev); +bool hinic_func_for_pt(void *hwdev); +bool hinic_func_for_hwpt(void *hwdev); +u32 hinic_get_db_size(void *cfg_reg_base, enum hinic_chip_mode *chip_mode); #endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c deleted file mode 100644 index cb239627770f4a8d64b25b1414c44129c680a55b..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c +++ /dev/null @@ -1,907 +0,0 @@ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "hinic_common.h" -#include "hinic_hw_if.h" -#include "hinic_hw_wqe.h" -#include "hinic_hw_wq.h" -#include "hinic_hw_qp_ctxt.h" -#include "hinic_hw_qp.h" -#include "hinic_hw_io.h" - -#define SQ_DB_OFF SZ_2K - -/* The number of cache line to prefetch Until threshold state */ -#define WQ_PREFETCH_MAX 2 -/* The number of cache line to prefetch After threshold state */ -#define WQ_PREFETCH_MIN 1 -/* Threshold state */ -#define WQ_PREFETCH_THRESHOLD 256 - -/* sizes of the SQ/RQ ctxt */ -#define Q_CTXT_SIZE 48 -#define CTXT_RSVD 240 - -#define SQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \ - (((max_rqs) + (max_sqs)) * CTXT_RSVD + (q_id) * Q_CTXT_SIZE) - -#define RQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \ - (((max_rqs) + (max_sqs)) * CTXT_RSVD + \ - (max_sqs + (q_id)) * Q_CTXT_SIZE) - -#define SIZE_16BYTES(size) (ALIGN(size, 16) >> 4) -#define SIZE_8BYTES(size) (ALIGN(size, 8) >> 3) -#define SECT_SIZE_FROM_8BYTES(size) ((size) << 3) - -#define SQ_DB_PI_HI_SHIFT 8 -#define SQ_DB_PI_HI(prod_idx) ((prod_idx) >> SQ_DB_PI_HI_SHIFT) - -#define SQ_DB_PI_LOW_MASK 0xFF -#define SQ_DB_PI_LOW(prod_idx) ((prod_idx) & SQ_DB_PI_LOW_MASK) - -#define SQ_DB_ADDR(sq, pi) ((u64 *)((sq)->db_base) + SQ_DB_PI_LOW(pi)) - -#define SQ_MASKED_IDX(sq, idx) ((idx) & (sq)->wq->mask) -#define RQ_MASKED_IDX(rq, idx) ((idx) & (rq)->wq->mask) - -#define TX_MAX_MSS_DEFAULT 0x3E00 - -enum sq_wqe_type { - SQ_NORMAL_WQE = 0, -}; - -enum rq_completion_fmt { - RQ_COMPLETE_SGE = 1 -}; - -void hinic_qp_prepare_header(struct hinic_qp_ctxt_header *qp_ctxt_hdr, - enum hinic_qp_ctxt_type ctxt_type, - u16 num_queues, u16 max_queues) -{ - u16 max_sqs = max_queues; - u16 max_rqs = max_queues; - - qp_ctxt_hdr->num_queues = num_queues; - qp_ctxt_hdr->queue_type = ctxt_type; - - if (ctxt_type == HINIC_QP_CTXT_TYPE_SQ) - qp_ctxt_hdr->addr_offset = SQ_CTXT_OFFSET(max_sqs, max_rqs, 0); - else - qp_ctxt_hdr->addr_offset = RQ_CTXT_OFFSET(max_sqs, max_rqs, 0); - - qp_ctxt_hdr->addr_offset = SIZE_16BYTES(qp_ctxt_hdr->addr_offset); - - hinic_cpu_to_be32(qp_ctxt_hdr, sizeof(*qp_ctxt_hdr)); -} - -void hinic_sq_prepare_ctxt(struct hinic_sq_ctxt *sq_ctxt, - struct hinic_sq *sq, u16 global_qid) -{ - u32 wq_page_pfn_hi, wq_page_pfn_lo, wq_block_pfn_hi, wq_block_pfn_lo; - u64 wq_page_addr, wq_page_pfn, wq_block_pfn; - u16 pi_start, ci_start; - struct hinic_wq *wq; - - wq = sq->wq; - ci_start = atomic_read(&wq->cons_idx); - pi_start = atomic_read(&wq->prod_idx); - - /* Read the first page paddr from the WQ page paddr ptrs */ - wq_page_addr = be64_to_cpu(*wq->block_vaddr); - - wq_page_pfn = HINIC_WQ_PAGE_PFN(wq_page_addr); - wq_page_pfn_hi = upper_32_bits(wq_page_pfn); - wq_page_pfn_lo = lower_32_bits(wq_page_pfn); - - wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq->block_paddr); - wq_block_pfn_hi = upper_32_bits(wq_block_pfn); - wq_block_pfn_lo = lower_32_bits(wq_block_pfn); - - sq_ctxt->ceq_attr = HINIC_SQ_CTXT_CEQ_ATTR_SET(global_qid, - GLOBAL_SQ_ID) | - HINIC_SQ_CTXT_CEQ_ATTR_SET(0, EN); - - sq_ctxt->ci_wrapped = HINIC_SQ_CTXT_CI_SET(ci_start, IDX) | - HINIC_SQ_CTXT_CI_SET(1, WRAPPED); - - sq_ctxt->wq_hi_pfn_pi = - HINIC_SQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) | - HINIC_SQ_CTXT_WQ_PAGE_SET(pi_start, PI); - - sq_ctxt->wq_lo_pfn = wq_page_pfn_lo; - - sq_ctxt->pref_cache = - HINIC_SQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) | - HINIC_SQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) | - HINIC_SQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD); - - sq_ctxt->pref_wrapped = 1; - - sq_ctxt->pref_wq_hi_pfn_ci = - HINIC_SQ_CTXT_PREF_SET(ci_start, CI) | - HINIC_SQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_HI_PFN); - - sq_ctxt->pref_wq_lo_pfn = wq_page_pfn_lo; - - sq_ctxt->wq_block_hi_pfn = - HINIC_SQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, HI_PFN); - - sq_ctxt->wq_block_lo_pfn = wq_block_pfn_lo; - - hinic_cpu_to_be32(sq_ctxt, sizeof(*sq_ctxt)); -} - -void hinic_rq_prepare_ctxt(struct hinic_rq_ctxt *rq_ctxt, - struct hinic_rq *rq, u16 global_qid) -{ - u32 wq_page_pfn_hi, wq_page_pfn_lo, wq_block_pfn_hi, wq_block_pfn_lo; - u64 wq_page_addr, wq_page_pfn, wq_block_pfn; - u16 pi_start, ci_start; - struct hinic_wq *wq; - - wq = rq->wq; - ci_start = atomic_read(&wq->cons_idx); - pi_start = atomic_read(&wq->prod_idx); - - /* Read the first page paddr from the WQ page paddr ptrs */ - wq_page_addr = be64_to_cpu(*wq->block_vaddr); - - wq_page_pfn = HINIC_WQ_PAGE_PFN(wq_page_addr); - wq_page_pfn_hi = upper_32_bits(wq_page_pfn); - wq_page_pfn_lo = lower_32_bits(wq_page_pfn); - - wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq->block_paddr); - wq_block_pfn_hi = upper_32_bits(wq_block_pfn); - wq_block_pfn_lo = lower_32_bits(wq_block_pfn); - - rq_ctxt->ceq_attr = HINIC_RQ_CTXT_CEQ_ATTR_SET(0, EN) | - HINIC_RQ_CTXT_CEQ_ATTR_SET(1, WRAPPED); - - rq_ctxt->pi_intr_attr = HINIC_RQ_CTXT_PI_SET(pi_start, IDX) | - HINIC_RQ_CTXT_PI_SET(rq->msix_entry, INTR); - - rq_ctxt->wq_hi_pfn_ci = HINIC_RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, - HI_PFN) | - HINIC_RQ_CTXT_WQ_PAGE_SET(ci_start, CI); - - rq_ctxt->wq_lo_pfn = wq_page_pfn_lo; - - rq_ctxt->pref_cache = - HINIC_RQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) | - HINIC_RQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) | - HINIC_RQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD); - - rq_ctxt->pref_wrapped = 1; - - rq_ctxt->pref_wq_hi_pfn_ci = - HINIC_RQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_HI_PFN) | - HINIC_RQ_CTXT_PREF_SET(ci_start, CI); - - rq_ctxt->pref_wq_lo_pfn = wq_page_pfn_lo; - - rq_ctxt->pi_paddr_hi = upper_32_bits(rq->pi_dma_addr); - rq_ctxt->pi_paddr_lo = lower_32_bits(rq->pi_dma_addr); - - rq_ctxt->wq_block_hi_pfn = - HINIC_RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, HI_PFN); - - rq_ctxt->wq_block_lo_pfn = wq_block_pfn_lo; - - hinic_cpu_to_be32(rq_ctxt, sizeof(*rq_ctxt)); -} - -/** - * alloc_sq_skb_arr - allocate sq array for saved skb - * @sq: HW Send Queue - * - * Return 0 - Success, negative - Failure - **/ -static int alloc_sq_skb_arr(struct hinic_sq *sq) -{ - struct hinic_wq *wq = sq->wq; - size_t skb_arr_size; - - skb_arr_size = wq->q_depth * sizeof(*sq->saved_skb); - sq->saved_skb = vzalloc(skb_arr_size); - if (!sq->saved_skb) - return -ENOMEM; - - return 0; -} - -/** - * free_sq_skb_arr - free sq array for saved skb - * @sq: HW Send Queue - **/ -static void free_sq_skb_arr(struct hinic_sq *sq) -{ - vfree(sq->saved_skb); -} - -/** - * alloc_rq_skb_arr - allocate rq array for saved skb - * @rq: HW Receive Queue - * - * Return 0 - Success, negative - Failure - **/ -static int alloc_rq_skb_arr(struct hinic_rq *rq) -{ - struct hinic_wq *wq = rq->wq; - size_t skb_arr_size; - - skb_arr_size = wq->q_depth * sizeof(*rq->saved_skb); - rq->saved_skb = vzalloc(skb_arr_size); - if (!rq->saved_skb) - return -ENOMEM; - - return 0; -} - -/** - * free_rq_skb_arr - free rq array for saved skb - * @rq: HW Receive Queue - **/ -static void free_rq_skb_arr(struct hinic_rq *rq) -{ - vfree(rq->saved_skb); -} - -/** - * hinic_init_sq - Initialize HW Send Queue - * @sq: HW Send Queue - * @hwif: HW Interface for accessing HW - * @wq: Work Queue for the data of the SQ - * @entry: msix entry for sq - * @ci_addr: address for reading the current HW consumer index - * @ci_dma_addr: dma address for reading the current HW consumer index - * @db_base: doorbell base address - * - * Return 0 - Success, negative - Failure - **/ -int hinic_init_sq(struct hinic_sq *sq, struct hinic_hwif *hwif, - struct hinic_wq *wq, struct msix_entry *entry, - void *ci_addr, dma_addr_t ci_dma_addr, - void __iomem *db_base) -{ - sq->hwif = hwif; - - sq->wq = wq; - - sq->irq = entry->vector; - sq->msix_entry = entry->entry; - - sq->hw_ci_addr = ci_addr; - sq->hw_ci_dma_addr = ci_dma_addr; - - sq->db_base = db_base + SQ_DB_OFF; - - return alloc_sq_skb_arr(sq); -} - -/** - * hinic_clean_sq - Clean HW Send Queue's Resources - * @sq: Send Queue - **/ -void hinic_clean_sq(struct hinic_sq *sq) -{ - free_sq_skb_arr(sq); -} - -/** - * alloc_rq_cqe - allocate rq completion queue elements - * @rq: HW Receive Queue - * - * Return 0 - Success, negative - Failure - **/ -static int alloc_rq_cqe(struct hinic_rq *rq) -{ - struct hinic_hwif *hwif = rq->hwif; - struct pci_dev *pdev = hwif->pdev; - size_t cqe_dma_size, cqe_size; - struct hinic_wq *wq = rq->wq; - int j, i; - - cqe_size = wq->q_depth * sizeof(*rq->cqe); - rq->cqe = vzalloc(cqe_size); - if (!rq->cqe) - return -ENOMEM; - - cqe_dma_size = wq->q_depth * sizeof(*rq->cqe_dma); - rq->cqe_dma = vzalloc(cqe_dma_size); - if (!rq->cqe_dma) - goto err_cqe_dma_arr_alloc; - - for (i = 0; i < wq->q_depth; i++) { - rq->cqe[i] = dma_zalloc_coherent(&pdev->dev, - sizeof(*rq->cqe[i]), - &rq->cqe_dma[i], GFP_KERNEL); - if (!rq->cqe[i]) - goto err_cqe_alloc; - } - - return 0; - -err_cqe_alloc: - for (j = 0; j < i; j++) - dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[j]), rq->cqe[j], - rq->cqe_dma[j]); - - vfree(rq->cqe_dma); - -err_cqe_dma_arr_alloc: - vfree(rq->cqe); - return -ENOMEM; -} - -/** - * free_rq_cqe - free rq completion queue elements - * @rq: HW Receive Queue - **/ -static void free_rq_cqe(struct hinic_rq *rq) -{ - struct hinic_hwif *hwif = rq->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_wq *wq = rq->wq; - int i; - - for (i = 0; i < wq->q_depth; i++) - dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[i]), rq->cqe[i], - rq->cqe_dma[i]); - - vfree(rq->cqe_dma); - vfree(rq->cqe); -} - -/** - * hinic_init_rq - Initialize HW Receive Queue - * @rq: HW Receive Queue - * @hwif: HW Interface for accessing HW - * @wq: Work Queue for the data of the RQ - * @entry: msix entry for rq - * - * Return 0 - Success, negative - Failure - **/ -int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif, - struct hinic_wq *wq, struct msix_entry *entry) -{ - struct pci_dev *pdev = hwif->pdev; - size_t pi_size; - int err; - - rq->hwif = hwif; - - rq->wq = wq; - - rq->irq = entry->vector; - rq->msix_entry = entry->entry; - - rq->buf_sz = HINIC_RX_BUF_SZ; - - err = alloc_rq_skb_arr(rq); - if (err) { - dev_err(&pdev->dev, "Failed to allocate rq priv data\n"); - return err; - } - - err = alloc_rq_cqe(rq); - if (err) { - dev_err(&pdev->dev, "Failed to allocate rq cqe\n"); - goto err_alloc_rq_cqe; - } - - /* HW requirements: Must be at least 32 bit */ - pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32)); - rq->pi_virt_addr = dma_zalloc_coherent(&pdev->dev, pi_size, - &rq->pi_dma_addr, GFP_KERNEL); - if (!rq->pi_virt_addr) { - dev_err(&pdev->dev, "Failed to allocate PI address\n"); - err = -ENOMEM; - goto err_pi_virt; - } - - return 0; - -err_pi_virt: - free_rq_cqe(rq); - -err_alloc_rq_cqe: - free_rq_skb_arr(rq); - return err; -} - -/** - * hinic_clean_rq - Clean HW Receive Queue's Resources - * @rq: HW Receive Queue - **/ -void hinic_clean_rq(struct hinic_rq *rq) -{ - struct hinic_hwif *hwif = rq->hwif; - struct pci_dev *pdev = hwif->pdev; - size_t pi_size; - - pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32)); - dma_free_coherent(&pdev->dev, pi_size, rq->pi_virt_addr, - rq->pi_dma_addr); - - free_rq_cqe(rq); - free_rq_skb_arr(rq); -} - -/** - * hinic_get_sq_free_wqebbs - return number of free wqebbs for use - * @sq: send queue - * - * Return number of free wqebbs - **/ -int hinic_get_sq_free_wqebbs(struct hinic_sq *sq) -{ - struct hinic_wq *wq = sq->wq; - - return atomic_read(&wq->delta) - 1; -} - -/** - * hinic_get_rq_free_wqebbs - return number of free wqebbs for use - * @rq: recv queue - * - * Return number of free wqebbs - **/ -int hinic_get_rq_free_wqebbs(struct hinic_rq *rq) -{ - struct hinic_wq *wq = rq->wq; - - return atomic_read(&wq->delta) - 1; -} - -static void sq_prepare_ctrl(struct hinic_sq_ctrl *ctrl, u16 prod_idx, - int nr_descs) -{ - u32 ctrl_size, task_size, bufdesc_size; - - ctrl_size = SIZE_8BYTES(sizeof(struct hinic_sq_ctrl)); - task_size = SIZE_8BYTES(sizeof(struct hinic_sq_task)); - bufdesc_size = nr_descs * sizeof(struct hinic_sq_bufdesc); - bufdesc_size = SIZE_8BYTES(bufdesc_size); - - ctrl->ctrl_info = HINIC_SQ_CTRL_SET(bufdesc_size, BUFDESC_SECT_LEN) | - HINIC_SQ_CTRL_SET(task_size, TASKSECT_LEN) | - HINIC_SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) | - HINIC_SQ_CTRL_SET(ctrl_size, LEN); - - ctrl->queue_info = HINIC_SQ_CTRL_SET(TX_MAX_MSS_DEFAULT, - QUEUE_INFO_MSS); -} - -static void sq_prepare_task(struct hinic_sq_task *task) -{ - task->pkt_info0 = - HINIC_SQ_TASK_INFO0_SET(0, L2HDR_LEN) | - HINIC_SQ_TASK_INFO0_SET(HINIC_L4_OFF_DISABLE, L4_OFFLOAD) | - HINIC_SQ_TASK_INFO0_SET(HINIC_OUTER_L3TYPE_UNKNOWN, - INNER_L3TYPE) | - HINIC_SQ_TASK_INFO0_SET(HINIC_VLAN_OFF_DISABLE, - VLAN_OFFLOAD) | - HINIC_SQ_TASK_INFO0_SET(HINIC_PKT_NOT_PARSED, PARSE_FLAG); - - task->pkt_info1 = - HINIC_SQ_TASK_INFO1_SET(HINIC_MEDIA_UNKNOWN, MEDIA_TYPE) | - HINIC_SQ_TASK_INFO1_SET(0, INNER_L4_LEN) | - HINIC_SQ_TASK_INFO1_SET(0, INNER_L3_LEN); - - task->pkt_info2 = - HINIC_SQ_TASK_INFO2_SET(0, TUNNEL_L4_LEN) | - HINIC_SQ_TASK_INFO2_SET(0, OUTER_L3_LEN) | - HINIC_SQ_TASK_INFO2_SET(HINIC_TUNNEL_L4TYPE_UNKNOWN, - TUNNEL_L4TYPE) | - HINIC_SQ_TASK_INFO2_SET(HINIC_OUTER_L3TYPE_UNKNOWN, - OUTER_L3TYPE); - - task->ufo_v6_identify = 0; - - task->pkt_info4 = HINIC_SQ_TASK_INFO4_SET(HINIC_L2TYPE_ETH, L2TYPE); - - task->zero_pad = 0; -} - -/** - * hinic_sq_prepare_wqe - prepare wqe before insert to the queue - * @sq: send queue - * @prod_idx: pi value - * @sq_wqe: wqe to prepare - * @sges: sges for use by the wqe for send for buf addresses - * @nr_sges: number of sges - **/ -void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx, - struct hinic_sq_wqe *sq_wqe, struct hinic_sge *sges, - int nr_sges) -{ - int i; - - sq_prepare_ctrl(&sq_wqe->ctrl, prod_idx, nr_sges); - - sq_prepare_task(&sq_wqe->task); - - for (i = 0; i < nr_sges; i++) - sq_wqe->buf_descs[i].sge = sges[i]; -} - -/** - * sq_prepare_db - prepare doorbell to write - * @sq: send queue - * @prod_idx: pi value for the doorbell - * @cos: cos of the doorbell - * - * Return db value - **/ -static u32 sq_prepare_db(struct hinic_sq *sq, u16 prod_idx, unsigned int cos) -{ - struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq); - u8 hi_prod_idx = SQ_DB_PI_HI(SQ_MASKED_IDX(sq, prod_idx)); - - /* Data should be written to HW in Big Endian Format */ - return cpu_to_be32(HINIC_SQ_DB_INFO_SET(hi_prod_idx, PI_HI) | - HINIC_SQ_DB_INFO_SET(HINIC_DB_SQ_TYPE, TYPE) | - HINIC_SQ_DB_INFO_SET(HINIC_DATA_PATH, PATH) | - HINIC_SQ_DB_INFO_SET(cos, COS) | - HINIC_SQ_DB_INFO_SET(qp->q_id, QID)); -} - -/** - * hinic_sq_write_db- write doorbell - * @sq: send queue - * @prod_idx: pi value for the doorbell - * @wqe_size: wqe size - * @cos: cos of the wqe - **/ -void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size, - unsigned int cos) -{ - struct hinic_wq *wq = sq->wq; - - /* increment prod_idx to the next */ - prod_idx += ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size; - - wmb(); /* Write all before the doorbell */ - - writel(sq_prepare_db(sq, prod_idx, cos), SQ_DB_ADDR(sq, prod_idx)); -} - -/** - * hinic_sq_get_wqe - get wqe ptr in the current pi and update the pi - * @sq: sq to get wqe from - * @wqe_size: wqe size - * @prod_idx: returned pi - * - * Return wqe pointer - **/ -struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq, - unsigned int wqe_size, u16 *prod_idx) -{ - struct hinic_hw_wqe *hw_wqe = hinic_get_wqe(sq->wq, wqe_size, - prod_idx); - - if (IS_ERR(hw_wqe)) - return NULL; - - return &hw_wqe->sq_wqe; -} - -/** - * hinic_sq_write_wqe - write the wqe to the sq - * @sq: send queue - * @prod_idx: pi of the wqe - * @sq_wqe: the wqe to write - * @skb: skb to save - * @wqe_size: the size of the wqe - **/ -void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx, - struct hinic_sq_wqe *sq_wqe, - struct sk_buff *skb, unsigned int wqe_size) -{ - struct hinic_hw_wqe *hw_wqe = (struct hinic_hw_wqe *)sq_wqe; - - sq->saved_skb[prod_idx] = skb; - - /* The data in the HW should be in Big Endian Format */ - hinic_cpu_to_be32(sq_wqe, wqe_size); - - hinic_write_wqe(sq->wq, hw_wqe, wqe_size); -} - -/** - * hinic_sq_read_wqebb - read wqe ptr in the current ci and update the ci, the - * wqe only have one wqebb - * @sq: send queue - * @skb: return skb that was saved - * @wqe_size: the wqe size ptr - * @cons_idx: consumer index of the wqe - * - * Return wqe in ci position - **/ -struct hinic_sq_wqe *hinic_sq_read_wqebb(struct hinic_sq *sq, - struct sk_buff **skb, - unsigned int *wqe_size, u16 *cons_idx) -{ - struct hinic_hw_wqe *hw_wqe; - struct hinic_sq_wqe *sq_wqe; - struct hinic_sq_ctrl *ctrl; - unsigned int buf_sect_len; - u32 ctrl_info; - - /* read the ctrl section for getting wqe size */ - hw_wqe = hinic_read_wqe(sq->wq, sizeof(*ctrl), cons_idx); - if (IS_ERR(hw_wqe)) - return NULL; - - *skb = sq->saved_skb[*cons_idx]; - - sq_wqe = &hw_wqe->sq_wqe; - ctrl = &sq_wqe->ctrl; - ctrl_info = be32_to_cpu(ctrl->ctrl_info); - buf_sect_len = HINIC_SQ_CTRL_GET(ctrl_info, BUFDESC_SECT_LEN); - - *wqe_size = sizeof(*ctrl) + sizeof(sq_wqe->task); - *wqe_size += SECT_SIZE_FROM_8BYTES(buf_sect_len); - *wqe_size = ALIGN(*wqe_size, sq->wq->wqebb_size); - - return &hw_wqe->sq_wqe; -} - -/** - * hinic_sq_read_wqe - read wqe ptr in the current ci and update the ci - * @sq: send queue - * @skb: return skb that was saved - * @wqe_size: the size of the wqe - * @cons_idx: consumer index of the wqe - * - * Return wqe in ci position - **/ -struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq, - struct sk_buff **skb, - unsigned int wqe_size, u16 *cons_idx) -{ - struct hinic_hw_wqe *hw_wqe; - - hw_wqe = hinic_read_wqe(sq->wq, wqe_size, cons_idx); - *skb = sq->saved_skb[*cons_idx]; - - return &hw_wqe->sq_wqe; -} - -/** - * hinic_sq_put_wqe - release the ci for new wqes - * @sq: send queue - * @wqe_size: the size of the wqe - **/ -void hinic_sq_put_wqe(struct hinic_sq *sq, unsigned int wqe_size) -{ - hinic_put_wqe(sq->wq, wqe_size); -} - -/** - * hinic_sq_get_sges - get sges from the wqe - * @sq_wqe: wqe to get the sges from its buffer addresses - * @sges: returned sges - * @nr_sges: number sges to return - **/ -void hinic_sq_get_sges(struct hinic_sq_wqe *sq_wqe, struct hinic_sge *sges, - int nr_sges) -{ - int i; - - for (i = 0; i < nr_sges && i < HINIC_MAX_SQ_BUFDESCS; i++) { - sges[i] = sq_wqe->buf_descs[i].sge; - hinic_be32_to_cpu(&sges[i], sizeof(sges[i])); - } -} - -/** - * hinic_rq_get_wqe - get wqe ptr in the current pi and update the pi - * @rq: rq to get wqe from - * @wqe_size: wqe size - * @prod_idx: returned pi - * - * Return wqe pointer - **/ -struct hinic_rq_wqe *hinic_rq_get_wqe(struct hinic_rq *rq, - unsigned int wqe_size, u16 *prod_idx) -{ - struct hinic_hw_wqe *hw_wqe = hinic_get_wqe(rq->wq, wqe_size, - prod_idx); - - if (IS_ERR(hw_wqe)) - return NULL; - - return &hw_wqe->rq_wqe; -} - -/** - * hinic_rq_write_wqe - write the wqe to the rq - * @rq: recv queue - * @prod_idx: pi of the wqe - * @rq_wqe: the wqe to write - * @skb: skb to save - **/ -void hinic_rq_write_wqe(struct hinic_rq *rq, u16 prod_idx, - struct hinic_rq_wqe *rq_wqe, struct sk_buff *skb) -{ - struct hinic_hw_wqe *hw_wqe = (struct hinic_hw_wqe *)rq_wqe; - - rq->saved_skb[prod_idx] = skb; - - /* The data in the HW should be in Big Endian Format */ - hinic_cpu_to_be32(rq_wqe, sizeof(*rq_wqe)); - - hinic_write_wqe(rq->wq, hw_wqe, sizeof(*rq_wqe)); -} - -/** - * hinic_rq_read_wqe - read wqe ptr in the current ci and update the ci - * @rq: recv queue - * @wqe_size: the size of the wqe - * @skb: return saved skb - * @cons_idx: consumer index of the wqe - * - * Return wqe in ci position - **/ -struct hinic_rq_wqe *hinic_rq_read_wqe(struct hinic_rq *rq, - unsigned int wqe_size, - struct sk_buff **skb, u16 *cons_idx) -{ - struct hinic_hw_wqe *hw_wqe; - struct hinic_rq_cqe *cqe; - int rx_done; - u32 status; - - hw_wqe = hinic_read_wqe(rq->wq, wqe_size, cons_idx); - if (IS_ERR(hw_wqe)) - return NULL; - - cqe = rq->cqe[*cons_idx]; - - status = be32_to_cpu(cqe->status); - - rx_done = HINIC_RQ_CQE_STATUS_GET(status, RXDONE); - if (!rx_done) - return NULL; - - *skb = rq->saved_skb[*cons_idx]; - - return &hw_wqe->rq_wqe; -} - -/** - * hinic_rq_read_next_wqe - increment ci and read the wqe in ci position - * @rq: recv queue - * @wqe_size: the size of the wqe - * @skb: return saved skb - * @cons_idx: consumer index in the wq - * - * Return wqe in incremented ci position - **/ -struct hinic_rq_wqe *hinic_rq_read_next_wqe(struct hinic_rq *rq, - unsigned int wqe_size, - struct sk_buff **skb, - u16 *cons_idx) -{ - struct hinic_wq *wq = rq->wq; - struct hinic_hw_wqe *hw_wqe; - unsigned int num_wqebbs; - - wqe_size = ALIGN(wqe_size, wq->wqebb_size); - num_wqebbs = wqe_size / wq->wqebb_size; - - *cons_idx = RQ_MASKED_IDX(rq, *cons_idx + num_wqebbs); - - *skb = rq->saved_skb[*cons_idx]; - - hw_wqe = hinic_read_wqe_direct(wq, *cons_idx); - - return &hw_wqe->rq_wqe; -} - -/** - * hinic_put_wqe - release the ci for new wqes - * @rq: recv queue - * @cons_idx: consumer index of the wqe - * @wqe_size: the size of the wqe - **/ -void hinic_rq_put_wqe(struct hinic_rq *rq, u16 cons_idx, - unsigned int wqe_size) -{ - struct hinic_rq_cqe *cqe = rq->cqe[cons_idx]; - u32 status = be32_to_cpu(cqe->status); - - status = HINIC_RQ_CQE_STATUS_CLEAR(status, RXDONE); - - /* Rx WQE size is 1 WQEBB, no wq shadow*/ - cqe->status = cpu_to_be32(status); - - wmb(); /* clear done flag */ - - hinic_put_wqe(rq->wq, wqe_size); -} - -/** - * hinic_rq_get_sge - get sge from the wqe - * @rq: recv queue - * @rq_wqe: wqe to get the sge from its buf address - * @cons_idx: consumer index - * @sge: returned sge - **/ -void hinic_rq_get_sge(struct hinic_rq *rq, struct hinic_rq_wqe *rq_wqe, - u16 cons_idx, struct hinic_sge *sge) -{ - struct hinic_rq_cqe *cqe = rq->cqe[cons_idx]; - u32 len = be32_to_cpu(cqe->len); - - sge->hi_addr = be32_to_cpu(rq_wqe->buf_desc.hi_addr); - sge->lo_addr = be32_to_cpu(rq_wqe->buf_desc.lo_addr); - sge->len = HINIC_RQ_CQE_SGE_GET(len, LEN); -} - -/** - * hinic_rq_prepare_wqe - prepare wqe before insert to the queue - * @rq: recv queue - * @prod_idx: pi value - * @rq_wqe: the wqe - * @sge: sge for use by the wqe for recv buf address - **/ -void hinic_rq_prepare_wqe(struct hinic_rq *rq, u16 prod_idx, - struct hinic_rq_wqe *rq_wqe, struct hinic_sge *sge) -{ - struct hinic_rq_cqe_sect *cqe_sect = &rq_wqe->cqe_sect; - struct hinic_rq_bufdesc *buf_desc = &rq_wqe->buf_desc; - struct hinic_rq_cqe *cqe = rq->cqe[prod_idx]; - struct hinic_rq_ctrl *ctrl = &rq_wqe->ctrl; - dma_addr_t cqe_dma = rq->cqe_dma[prod_idx]; - - ctrl->ctrl_info = - HINIC_RQ_CTRL_SET(SIZE_8BYTES(sizeof(*ctrl)), LEN) | - HINIC_RQ_CTRL_SET(SIZE_8BYTES(sizeof(*cqe_sect)), - COMPLETE_LEN) | - HINIC_RQ_CTRL_SET(SIZE_8BYTES(sizeof(*buf_desc)), - BUFDESC_SECT_LEN) | - HINIC_RQ_CTRL_SET(RQ_COMPLETE_SGE, COMPLETE_FORMAT); - - hinic_set_sge(&cqe_sect->sge, cqe_dma, sizeof(*cqe)); - - buf_desc->hi_addr = sge->hi_addr; - buf_desc->lo_addr = sge->lo_addr; -} - -/** - * hinic_rq_update - update pi of the rq - * @rq: recv queue - * @prod_idx: pi value - **/ -void hinic_rq_update(struct hinic_rq *rq, u16 prod_idx) -{ - *rq->pi_virt_addr = cpu_to_be16(RQ_MASKED_IDX(rq, prod_idx + 1)); -} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h deleted file mode 100644 index 6c84f83ec283156a538706ba1c35e43d3015179d..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h +++ /dev/null @@ -1,205 +0,0 @@ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - */ - -#ifndef HINIC_HW_QP_H -#define HINIC_HW_QP_H - -#include -#include -#include -#include -#include - -#include "hinic_common.h" -#include "hinic_hw_if.h" -#include "hinic_hw_wqe.h" -#include "hinic_hw_wq.h" -#include "hinic_hw_qp_ctxt.h" - -#define HINIC_SQ_DB_INFO_PI_HI_SHIFT 0 -#define HINIC_SQ_DB_INFO_QID_SHIFT 8 -#define HINIC_SQ_DB_INFO_PATH_SHIFT 23 -#define HINIC_SQ_DB_INFO_COS_SHIFT 24 -#define HINIC_SQ_DB_INFO_TYPE_SHIFT 27 - -#define HINIC_SQ_DB_INFO_PI_HI_MASK 0xFF -#define HINIC_SQ_DB_INFO_QID_MASK 0x3FF -#define HINIC_SQ_DB_INFO_PATH_MASK 0x1 -#define HINIC_SQ_DB_INFO_COS_MASK 0x7 -#define HINIC_SQ_DB_INFO_TYPE_MASK 0x1F - -#define HINIC_SQ_DB_INFO_SET(val, member) \ - (((u32)(val) & HINIC_SQ_DB_INFO_##member##_MASK) \ - << HINIC_SQ_DB_INFO_##member##_SHIFT) - -#define HINIC_SQ_WQEBB_SIZE 64 -#define HINIC_RQ_WQEBB_SIZE 32 - -#define HINIC_SQ_PAGE_SIZE SZ_4K -#define HINIC_RQ_PAGE_SIZE SZ_4K - -#define HINIC_SQ_DEPTH SZ_4K -#define HINIC_RQ_DEPTH SZ_4K - -/* In any change to HINIC_RX_BUF_SZ, HINIC_RX_BUF_SZ_IDX must be changed */ -#define HINIC_RX_BUF_SZ 2048 -#define HINIC_RX_BUF_SZ_IDX HINIC_RX_BUF_SZ_2048_IDX - -#define HINIC_MIN_TX_WQE_SIZE(wq) \ - ALIGN(HINIC_SQ_WQE_SIZE(1), (wq)->wqebb_size) - -#define HINIC_MIN_TX_NUM_WQEBBS(sq) \ - (HINIC_MIN_TX_WQE_SIZE((sq)->wq) / (sq)->wq->wqebb_size) - -enum hinic_rx_buf_sz_idx { - HINIC_RX_BUF_SZ_32_IDX, - HINIC_RX_BUF_SZ_64_IDX, - HINIC_RX_BUF_SZ_96_IDX, - HINIC_RX_BUF_SZ_128_IDX, - HINIC_RX_BUF_SZ_192_IDX, - HINIC_RX_BUF_SZ_256_IDX, - HINIC_RX_BUF_SZ_384_IDX, - HINIC_RX_BUF_SZ_512_IDX, - HINIC_RX_BUF_SZ_768_IDX, - HINIC_RX_BUF_SZ_1024_IDX, - HINIC_RX_BUF_SZ_1536_IDX, - HINIC_RX_BUF_SZ_2048_IDX, - HINIC_RX_BUF_SZ_3072_IDX, - HINIC_RX_BUF_SZ_4096_IDX, - HINIC_RX_BUF_SZ_8192_IDX, - HINIC_RX_BUF_SZ_16384_IDX, -}; - -struct hinic_sq { - struct hinic_hwif *hwif; - - struct hinic_wq *wq; - - u32 irq; - u16 msix_entry; - - void *hw_ci_addr; - dma_addr_t hw_ci_dma_addr; - - void __iomem *db_base; - - struct sk_buff **saved_skb; -}; - -struct hinic_rq { - struct hinic_hwif *hwif; - - struct hinic_wq *wq; - - u32 irq; - u16 msix_entry; - - size_t buf_sz; - - struct sk_buff **saved_skb; - - struct hinic_rq_cqe **cqe; - dma_addr_t *cqe_dma; - - u16 *pi_virt_addr; - dma_addr_t pi_dma_addr; -}; - -struct hinic_qp { - struct hinic_sq sq; - struct hinic_rq rq; - - u16 q_id; -}; - -void hinic_qp_prepare_header(struct hinic_qp_ctxt_header *qp_ctxt_hdr, - enum hinic_qp_ctxt_type ctxt_type, - u16 num_queues, u16 max_queues); - -void hinic_sq_prepare_ctxt(struct hinic_sq_ctxt *sq_ctxt, - struct hinic_sq *sq, u16 global_qid); - -void hinic_rq_prepare_ctxt(struct hinic_rq_ctxt *rq_ctxt, - struct hinic_rq *rq, u16 global_qid); - -int hinic_init_sq(struct hinic_sq *sq, struct hinic_hwif *hwif, - struct hinic_wq *wq, struct msix_entry *entry, void *ci_addr, - dma_addr_t ci_dma_addr, void __iomem *db_base); - -void hinic_clean_sq(struct hinic_sq *sq); - -int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif, - struct hinic_wq *wq, struct msix_entry *entry); - -void hinic_clean_rq(struct hinic_rq *rq); - -int hinic_get_sq_free_wqebbs(struct hinic_sq *sq); - -int hinic_get_rq_free_wqebbs(struct hinic_rq *rq); - -void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx, - struct hinic_sq_wqe *wqe, struct hinic_sge *sges, - int nr_sges); - -void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size, - unsigned int cos); - -struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq, - unsigned int wqe_size, u16 *prod_idx); - -void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx, - struct hinic_sq_wqe *wqe, struct sk_buff *skb, - unsigned int wqe_size); - -struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq, - struct sk_buff **skb, - unsigned int wqe_size, u16 *cons_idx); - -struct hinic_sq_wqe *hinic_sq_read_wqebb(struct hinic_sq *sq, - struct sk_buff **skb, - unsigned int *wqe_size, u16 *cons_idx); - -void hinic_sq_put_wqe(struct hinic_sq *sq, unsigned int wqe_size); - -void hinic_sq_get_sges(struct hinic_sq_wqe *wqe, struct hinic_sge *sges, - int nr_sges); - -struct hinic_rq_wqe *hinic_rq_get_wqe(struct hinic_rq *rq, - unsigned int wqe_size, u16 *prod_idx); - -void hinic_rq_write_wqe(struct hinic_rq *rq, u16 prod_idx, - struct hinic_rq_wqe *wqe, struct sk_buff *skb); - -struct hinic_rq_wqe *hinic_rq_read_wqe(struct hinic_rq *rq, - unsigned int wqe_size, - struct sk_buff **skb, u16 *cons_idx); - -struct hinic_rq_wqe *hinic_rq_read_next_wqe(struct hinic_rq *rq, - unsigned int wqe_size, - struct sk_buff **skb, - u16 *cons_idx); - -void hinic_rq_put_wqe(struct hinic_rq *rq, u16 cons_idx, - unsigned int wqe_size); - -void hinic_rq_get_sge(struct hinic_rq *rq, struct hinic_rq_wqe *wqe, - u16 cons_idx, struct hinic_sge *sge); - -void hinic_rq_prepare_wqe(struct hinic_rq *rq, u16 prod_idx, - struct hinic_rq_wqe *wqe, struct hinic_sge *sge); - -void hinic_rq_update(struct hinic_rq *rq, u16 prod_idx); - -#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h deleted file mode 100644 index 376abf00762b3bd2fc2068a9d95143d194b091fd..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - */ - -#ifndef HINIC_HW_QP_CTXT_H -#define HINIC_HW_QP_CTXT_H - -#include - -#include "hinic_hw_cmdq.h" - -#define HINIC_SQ_CTXT_CEQ_ATTR_GLOBAL_SQ_ID_SHIFT 13 -#define HINIC_SQ_CTXT_CEQ_ATTR_EN_SHIFT 23 - -#define HINIC_SQ_CTXT_CEQ_ATTR_GLOBAL_SQ_ID_MASK 0x3FF -#define HINIC_SQ_CTXT_CEQ_ATTR_EN_MASK 0x1 - -#define HINIC_SQ_CTXT_CEQ_ATTR_SET(val, member) \ - (((u32)(val) & HINIC_SQ_CTXT_CEQ_ATTR_##member##_MASK) \ - << HINIC_SQ_CTXT_CEQ_ATTR_##member##_SHIFT) - -#define HINIC_SQ_CTXT_CI_IDX_SHIFT 11 -#define HINIC_SQ_CTXT_CI_WRAPPED_SHIFT 23 - -#define HINIC_SQ_CTXT_CI_IDX_MASK 0xFFF -#define HINIC_SQ_CTXT_CI_WRAPPED_MASK 0x1 - -#define HINIC_SQ_CTXT_CI_SET(val, member) \ - (((u32)(val) & HINIC_SQ_CTXT_CI_##member##_MASK) \ - << HINIC_SQ_CTXT_CI_##member##_SHIFT) - -#define HINIC_SQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0 -#define HINIC_SQ_CTXT_WQ_PAGE_PI_SHIFT 20 - -#define HINIC_SQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFF -#define HINIC_SQ_CTXT_WQ_PAGE_PI_MASK 0xFFF - -#define HINIC_SQ_CTXT_WQ_PAGE_SET(val, member) \ - (((u32)(val) & HINIC_SQ_CTXT_WQ_PAGE_##member##_MASK) \ - << HINIC_SQ_CTXT_WQ_PAGE_##member##_SHIFT) - -#define HINIC_SQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0 -#define HINIC_SQ_CTXT_PREF_CACHE_MAX_SHIFT 14 -#define HINIC_SQ_CTXT_PREF_CACHE_MIN_SHIFT 25 - -#define HINIC_SQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFF -#define HINIC_SQ_CTXT_PREF_CACHE_MAX_MASK 0x7FF -#define HINIC_SQ_CTXT_PREF_CACHE_MIN_MASK 0x7F - -#define HINIC_SQ_CTXT_PREF_WQ_HI_PFN_SHIFT 0 -#define HINIC_SQ_CTXT_PREF_CI_SHIFT 20 - -#define HINIC_SQ_CTXT_PREF_WQ_HI_PFN_MASK 0xFFFFF -#define HINIC_SQ_CTXT_PREF_CI_MASK 0xFFF - -#define HINIC_SQ_CTXT_PREF_SET(val, member) \ - (((u32)(val) & HINIC_SQ_CTXT_PREF_##member##_MASK) \ - << HINIC_SQ_CTXT_PREF_##member##_SHIFT) - -#define HINIC_SQ_CTXT_WQ_BLOCK_HI_PFN_SHIFT 0 - -#define HINIC_SQ_CTXT_WQ_BLOCK_HI_PFN_MASK 0x7FFFFF - -#define HINIC_SQ_CTXT_WQ_BLOCK_SET(val, member) \ - (((u32)(val) & HINIC_SQ_CTXT_WQ_BLOCK_##member##_MASK) \ - << HINIC_SQ_CTXT_WQ_BLOCK_##member##_SHIFT) - -#define HINIC_RQ_CTXT_CEQ_ATTR_EN_SHIFT 0 -#define HINIC_RQ_CTXT_CEQ_ATTR_WRAPPED_SHIFT 1 - -#define HINIC_RQ_CTXT_CEQ_ATTR_EN_MASK 0x1 -#define HINIC_RQ_CTXT_CEQ_ATTR_WRAPPED_MASK 0x1 - -#define HINIC_RQ_CTXT_CEQ_ATTR_SET(val, member) \ - (((u32)(val) & HINIC_RQ_CTXT_CEQ_ATTR_##member##_MASK) \ - << HINIC_RQ_CTXT_CEQ_ATTR_##member##_SHIFT) - -#define HINIC_RQ_CTXT_PI_IDX_SHIFT 0 -#define HINIC_RQ_CTXT_PI_INTR_SHIFT 22 - -#define HINIC_RQ_CTXT_PI_IDX_MASK 0xFFF -#define HINIC_RQ_CTXT_PI_INTR_MASK 0x3FF - -#define HINIC_RQ_CTXT_PI_SET(val, member) \ - (((u32)(val) & HINIC_RQ_CTXT_PI_##member##_MASK) << \ - HINIC_RQ_CTXT_PI_##member##_SHIFT) - -#define HINIC_RQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0 -#define HINIC_RQ_CTXT_WQ_PAGE_CI_SHIFT 20 - -#define HINIC_RQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFF -#define HINIC_RQ_CTXT_WQ_PAGE_CI_MASK 0xFFF - -#define HINIC_RQ_CTXT_WQ_PAGE_SET(val, member) \ - (((u32)(val) & HINIC_RQ_CTXT_WQ_PAGE_##member##_MASK) << \ - HINIC_RQ_CTXT_WQ_PAGE_##member##_SHIFT) - -#define HINIC_RQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0 -#define HINIC_RQ_CTXT_PREF_CACHE_MAX_SHIFT 14 -#define HINIC_RQ_CTXT_PREF_CACHE_MIN_SHIFT 25 - -#define HINIC_RQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFF -#define HINIC_RQ_CTXT_PREF_CACHE_MAX_MASK 0x7FF -#define HINIC_RQ_CTXT_PREF_CACHE_MIN_MASK 0x7F - -#define HINIC_RQ_CTXT_PREF_WQ_HI_PFN_SHIFT 0 -#define HINIC_RQ_CTXT_PREF_CI_SHIFT 20 - -#define HINIC_RQ_CTXT_PREF_WQ_HI_PFN_MASK 0xFFFFF -#define HINIC_RQ_CTXT_PREF_CI_MASK 0xFFF - -#define HINIC_RQ_CTXT_PREF_SET(val, member) \ - (((u32)(val) & HINIC_RQ_CTXT_PREF_##member##_MASK) << \ - HINIC_RQ_CTXT_PREF_##member##_SHIFT) - -#define HINIC_RQ_CTXT_WQ_BLOCK_HI_PFN_SHIFT 0 - -#define HINIC_RQ_CTXT_WQ_BLOCK_HI_PFN_MASK 0x7FFFFF - -#define HINIC_RQ_CTXT_WQ_BLOCK_SET(val, member) \ - (((u32)(val) & HINIC_RQ_CTXT_WQ_BLOCK_##member##_MASK) << \ - HINIC_RQ_CTXT_WQ_BLOCK_##member##_SHIFT) - -#define HINIC_SQ_CTXT_SIZE(num_sqs) (sizeof(struct hinic_qp_ctxt_header) \ - + (num_sqs) * sizeof(struct hinic_sq_ctxt)) - -#define HINIC_RQ_CTXT_SIZE(num_rqs) (sizeof(struct hinic_qp_ctxt_header) \ - + (num_rqs) * sizeof(struct hinic_rq_ctxt)) - -#define HINIC_WQ_PAGE_PFN_SHIFT 12 -#define HINIC_WQ_BLOCK_PFN_SHIFT 9 - -#define HINIC_WQ_PAGE_PFN(page_addr) ((page_addr) >> HINIC_WQ_PAGE_PFN_SHIFT) -#define HINIC_WQ_BLOCK_PFN(page_addr) ((page_addr) >> \ - HINIC_WQ_BLOCK_PFN_SHIFT) - -#define HINIC_Q_CTXT_MAX \ - ((HINIC_CMDQ_BUF_SIZE - sizeof(struct hinic_qp_ctxt_header)) \ - / sizeof(struct hinic_sq_ctxt)) - -enum hinic_qp_ctxt_type { - HINIC_QP_CTXT_TYPE_SQ, - HINIC_QP_CTXT_TYPE_RQ -}; - -struct hinic_qp_ctxt_header { - u16 num_queues; - u16 queue_type; - u32 addr_offset; -}; - -struct hinic_sq_ctxt { - u32 ceq_attr; - - u32 ci_wrapped; - - u32 wq_hi_pfn_pi; - u32 wq_lo_pfn; - - u32 pref_cache; - u32 pref_wrapped; - u32 pref_wq_hi_pfn_ci; - u32 pref_wq_lo_pfn; - - u32 rsvd0; - u32 rsvd1; - - u32 wq_block_hi_pfn; - u32 wq_block_lo_pfn; -}; - -struct hinic_rq_ctxt { - u32 ceq_attr; - - u32 pi_intr_attr; - - u32 wq_hi_pfn_ci; - u32 wq_lo_pfn; - - u32 pref_cache; - u32 pref_wrapped; - - u32 pref_wq_hi_pfn_ci; - u32 pref_wq_lo_pfn; - - u32 pi_paddr_hi; - u32 pi_paddr_lo; - - u32 wq_block_hi_pfn; - u32 wq_block_lo_pfn; -}; - -struct hinic_sq_ctxt_block { - struct hinic_qp_ctxt_header hdr; - struct hinic_sq_ctxt sq_ctxt[HINIC_Q_CTXT_MAX]; -}; - -struct hinic_rq_ctxt_block { - struct hinic_qp_ctxt_header hdr; - struct hinic_rq_ctxt rq_ctxt[HINIC_Q_CTXT_MAX]; -}; - -#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c deleted file mode 100644 index 3e3181c089bdc6475c4d030f340f4a56cc0d775e..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c +++ /dev/null @@ -1,878 +0,0 @@ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "hinic_hw_if.h" -#include "hinic_hw_wqe.h" -#include "hinic_hw_wq.h" -#include "hinic_hw_cmdq.h" - -#define WQS_BLOCKS_PER_PAGE 4 - -#define WQ_BLOCK_SIZE 4096 -#define WQS_PAGE_SIZE (WQS_BLOCKS_PER_PAGE * WQ_BLOCK_SIZE) - -#define WQS_MAX_NUM_BLOCKS 128 -#define WQS_FREE_BLOCKS_SIZE(wqs) (WQS_MAX_NUM_BLOCKS * \ - sizeof((wqs)->free_blocks[0])) - -#define WQ_SIZE(wq) ((wq)->q_depth * (wq)->wqebb_size) - -#define WQ_PAGE_ADDR_SIZE sizeof(u64) -#define WQ_MAX_PAGES (WQ_BLOCK_SIZE / WQ_PAGE_ADDR_SIZE) - -#define CMDQ_BLOCK_SIZE 512 -#define CMDQ_PAGE_SIZE 4096 - -#define CMDQ_WQ_MAX_PAGES (CMDQ_BLOCK_SIZE / WQ_PAGE_ADDR_SIZE) - -#define WQ_BASE_VADDR(wqs, wq) \ - ((void *)((wqs)->page_vaddr[(wq)->page_idx]) \ - + (wq)->block_idx * WQ_BLOCK_SIZE) - -#define WQ_BASE_PADDR(wqs, wq) \ - ((wqs)->page_paddr[(wq)->page_idx] \ - + (wq)->block_idx * WQ_BLOCK_SIZE) - -#define WQ_BASE_ADDR(wqs, wq) \ - ((void *)((wqs)->shadow_page_vaddr[(wq)->page_idx]) \ - + (wq)->block_idx * WQ_BLOCK_SIZE) - -#define CMDQ_BASE_VADDR(cmdq_pages, wq) \ - ((void *)((cmdq_pages)->page_vaddr) \ - + (wq)->block_idx * CMDQ_BLOCK_SIZE) - -#define CMDQ_BASE_PADDR(cmdq_pages, wq) \ - ((cmdq_pages)->page_paddr \ - + (wq)->block_idx * CMDQ_BLOCK_SIZE) - -#define CMDQ_BASE_ADDR(cmdq_pages, wq) \ - ((void *)((cmdq_pages)->shadow_page_vaddr) \ - + (wq)->block_idx * CMDQ_BLOCK_SIZE) - -#define WQE_PAGE_OFF(wq, idx) (((idx) & ((wq)->num_wqebbs_per_page - 1)) * \ - (wq)->wqebb_size) - -#define WQE_PAGE_NUM(wq, idx) (((idx) / ((wq)->num_wqebbs_per_page)) \ - & ((wq)->num_q_pages - 1)) - -#define WQ_PAGE_ADDR(wq, idx) \ - ((wq)->shadow_block_vaddr[WQE_PAGE_NUM(wq, idx)]) - -#define MASKED_WQE_IDX(wq, idx) ((idx) & (wq)->mask) - -#define WQE_IN_RANGE(wqe, start, end) \ - (((unsigned long)(wqe) >= (unsigned long)(start)) && \ - ((unsigned long)(wqe) < (unsigned long)(end))) - -#define WQE_SHADOW_PAGE(wq, wqe) \ - (((unsigned long)(wqe) - (unsigned long)(wq)->shadow_wqe) \ - / (wq)->max_wqe_size) - -/** - * queue_alloc_page - allocate page for Queue - * @hwif: HW interface for allocating DMA - * @vaddr: virtual address will be returned in this address - * @paddr: physical address will be returned in this address - * @shadow_vaddr: VM area will be return here for holding WQ page addresses - * @page_sz: page size of each WQ page - * - * Return 0 - Success, negative - Failure - **/ -static int queue_alloc_page(struct hinic_hwif *hwif, u64 **vaddr, u64 *paddr, - void ***shadow_vaddr, size_t page_sz) -{ - struct pci_dev *pdev = hwif->pdev; - dma_addr_t dma_addr; - - *vaddr = dma_zalloc_coherent(&pdev->dev, page_sz, &dma_addr, - GFP_KERNEL); - if (!*vaddr) { - dev_err(&pdev->dev, "Failed to allocate dma for wqs page\n"); - return -ENOMEM; - } - - *paddr = (u64)dma_addr; - - /* use vzalloc for big mem */ - *shadow_vaddr = vzalloc(page_sz); - if (!*shadow_vaddr) - goto err_shadow_vaddr; - - return 0; - -err_shadow_vaddr: - dma_free_coherent(&pdev->dev, page_sz, *vaddr, dma_addr); - return -ENOMEM; -} - -/** - * wqs_allocate_page - allocate page for WQ set - * @wqs: Work Queue Set - * @page_idx: the page index of the page will be allocated - * - * Return 0 - Success, negative - Failure - **/ -static int wqs_allocate_page(struct hinic_wqs *wqs, int page_idx) -{ - return queue_alloc_page(wqs->hwif, &wqs->page_vaddr[page_idx], - &wqs->page_paddr[page_idx], - &wqs->shadow_page_vaddr[page_idx], - WQS_PAGE_SIZE); -} - -/** - * wqs_free_page - free page of WQ set - * @wqs: Work Queue Set - * @page_idx: the page index of the page will be freed - **/ -static void wqs_free_page(struct hinic_wqs *wqs, int page_idx) -{ - struct hinic_hwif *hwif = wqs->hwif; - struct pci_dev *pdev = hwif->pdev; - - dma_free_coherent(&pdev->dev, WQS_PAGE_SIZE, - wqs->page_vaddr[page_idx], - (dma_addr_t)wqs->page_paddr[page_idx]); - vfree(wqs->shadow_page_vaddr[page_idx]); -} - -/** - * cmdq_allocate_page - allocate page for cmdq - * @cmdq_pages: the pages of the cmdq queue struct to hold the page - * - * Return 0 - Success, negative - Failure - **/ -static int cmdq_allocate_page(struct hinic_cmdq_pages *cmdq_pages) -{ - return queue_alloc_page(cmdq_pages->hwif, &cmdq_pages->page_vaddr, - &cmdq_pages->page_paddr, - &cmdq_pages->shadow_page_vaddr, - CMDQ_PAGE_SIZE); -} - -/** - * cmdq_free_page - free page from cmdq - * @cmdq_pages: the pages of the cmdq queue struct that hold the page - * - * Return 0 - Success, negative - Failure - **/ -static void cmdq_free_page(struct hinic_cmdq_pages *cmdq_pages) -{ - struct hinic_hwif *hwif = cmdq_pages->hwif; - struct pci_dev *pdev = hwif->pdev; - - dma_free_coherent(&pdev->dev, CMDQ_PAGE_SIZE, - cmdq_pages->page_vaddr, - (dma_addr_t)cmdq_pages->page_paddr); - vfree(cmdq_pages->shadow_page_vaddr); -} - -static int alloc_page_arrays(struct hinic_wqs *wqs) -{ - struct hinic_hwif *hwif = wqs->hwif; - struct pci_dev *pdev = hwif->pdev; - size_t size; - - size = wqs->num_pages * sizeof(*wqs->page_paddr); - wqs->page_paddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); - if (!wqs->page_paddr) - return -ENOMEM; - - size = wqs->num_pages * sizeof(*wqs->page_vaddr); - wqs->page_vaddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); - if (!wqs->page_vaddr) - goto err_page_vaddr; - - size = wqs->num_pages * sizeof(*wqs->shadow_page_vaddr); - wqs->shadow_page_vaddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); - if (!wqs->shadow_page_vaddr) - goto err_page_shadow_vaddr; - - return 0; - -err_page_shadow_vaddr: - devm_kfree(&pdev->dev, wqs->page_vaddr); - -err_page_vaddr: - devm_kfree(&pdev->dev, wqs->page_paddr); - return -ENOMEM; -} - -static void free_page_arrays(struct hinic_wqs *wqs) -{ - struct hinic_hwif *hwif = wqs->hwif; - struct pci_dev *pdev = hwif->pdev; - - devm_kfree(&pdev->dev, wqs->shadow_page_vaddr); - devm_kfree(&pdev->dev, wqs->page_vaddr); - devm_kfree(&pdev->dev, wqs->page_paddr); -} - -static int wqs_next_block(struct hinic_wqs *wqs, int *page_idx, - int *block_idx) -{ - int pos; - - down(&wqs->alloc_blocks_lock); - - wqs->num_free_blks--; - - if (wqs->num_free_blks < 0) { - wqs->num_free_blks++; - up(&wqs->alloc_blocks_lock); - return -ENOMEM; - } - - pos = wqs->alloc_blk_pos++; - pos &= WQS_MAX_NUM_BLOCKS - 1; - - *page_idx = wqs->free_blocks[pos].page_idx; - *block_idx = wqs->free_blocks[pos].block_idx; - - wqs->free_blocks[pos].page_idx = -1; - wqs->free_blocks[pos].block_idx = -1; - - up(&wqs->alloc_blocks_lock); - return 0; -} - -static void wqs_return_block(struct hinic_wqs *wqs, int page_idx, - int block_idx) -{ - int pos; - - down(&wqs->alloc_blocks_lock); - - pos = wqs->return_blk_pos++; - pos &= WQS_MAX_NUM_BLOCKS - 1; - - wqs->free_blocks[pos].page_idx = page_idx; - wqs->free_blocks[pos].block_idx = block_idx; - - wqs->num_free_blks++; - - up(&wqs->alloc_blocks_lock); -} - -static void init_wqs_blocks_arr(struct hinic_wqs *wqs) -{ - int page_idx, blk_idx, pos = 0; - - for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) { - for (blk_idx = 0; blk_idx < WQS_BLOCKS_PER_PAGE; blk_idx++) { - wqs->free_blocks[pos].page_idx = page_idx; - wqs->free_blocks[pos].block_idx = blk_idx; - pos++; - } - } - - wqs->alloc_blk_pos = 0; - wqs->return_blk_pos = pos; - wqs->num_free_blks = pos; - - sema_init(&wqs->alloc_blocks_lock, 1); -} - -/** - * hinic_wqs_alloc - allocate Work Queues set - * @wqs: Work Queue Set - * @max_wqs: maximum wqs to allocate - * @hwif: HW interface for use for the allocation - * - * Return 0 - Success, negative - Failure - **/ -int hinic_wqs_alloc(struct hinic_wqs *wqs, int max_wqs, - struct hinic_hwif *hwif) -{ - struct pci_dev *pdev = hwif->pdev; - int err, i, page_idx; - - max_wqs = ALIGN(max_wqs, WQS_BLOCKS_PER_PAGE); - if (max_wqs > WQS_MAX_NUM_BLOCKS) { - dev_err(&pdev->dev, "Invalid max_wqs = %d\n", max_wqs); - return -EINVAL; - } - - wqs->hwif = hwif; - wqs->num_pages = max_wqs / WQS_BLOCKS_PER_PAGE; - - if (alloc_page_arrays(wqs)) { - dev_err(&pdev->dev, - "Failed to allocate mem for page addresses\n"); - return -ENOMEM; - } - - for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) { - err = wqs_allocate_page(wqs, page_idx); - if (err) { - dev_err(&pdev->dev, "Failed wq page allocation\n"); - goto err_wq_allocate_page; - } - } - - wqs->free_blocks = devm_kzalloc(&pdev->dev, WQS_FREE_BLOCKS_SIZE(wqs), - GFP_KERNEL); - if (!wqs->free_blocks) { - err = -ENOMEM; - goto err_alloc_blocks; - } - - init_wqs_blocks_arr(wqs); - return 0; - -err_alloc_blocks: -err_wq_allocate_page: - for (i = 0; i < page_idx; i++) - wqs_free_page(wqs, i); - - free_page_arrays(wqs); - return err; -} - -/** - * hinic_wqs_free - free Work Queues set - * @wqs: Work Queue Set - **/ -void hinic_wqs_free(struct hinic_wqs *wqs) -{ - struct hinic_hwif *hwif = wqs->hwif; - struct pci_dev *pdev = hwif->pdev; - int page_idx; - - devm_kfree(&pdev->dev, wqs->free_blocks); - - for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) - wqs_free_page(wqs, page_idx); - - free_page_arrays(wqs); -} - -/** - * alloc_wqes_shadow - allocate WQE shadows for WQ - * @wq: WQ to allocate shadows for - * - * Return 0 - Success, negative - Failure - **/ -static int alloc_wqes_shadow(struct hinic_wq *wq) -{ - struct hinic_hwif *hwif = wq->hwif; - struct pci_dev *pdev = hwif->pdev; - size_t size; - - size = wq->num_q_pages * wq->max_wqe_size; - wq->shadow_wqe = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); - if (!wq->shadow_wqe) - return -ENOMEM; - - size = wq->num_q_pages * sizeof(wq->prod_idx); - wq->shadow_idx = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); - if (!wq->shadow_idx) - goto err_shadow_idx; - - return 0; - -err_shadow_idx: - devm_kfree(&pdev->dev, wq->shadow_wqe); - return -ENOMEM; -} - -/** - * free_wqes_shadow - free WQE shadows of WQ - * @wq: WQ to free shadows from - **/ -static void free_wqes_shadow(struct hinic_wq *wq) -{ - struct hinic_hwif *hwif = wq->hwif; - struct pci_dev *pdev = hwif->pdev; - - devm_kfree(&pdev->dev, wq->shadow_idx); - devm_kfree(&pdev->dev, wq->shadow_wqe); -} - -/** - * free_wq_pages - free pages of WQ - * @hwif: HW interface for releasing dma addresses - * @wq: WQ to free pages from - * @num_q_pages: number pages to free - **/ -static void free_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif, - int num_q_pages) -{ - struct pci_dev *pdev = hwif->pdev; - int i; - - for (i = 0; i < num_q_pages; i++) { - void **vaddr = &wq->shadow_block_vaddr[i]; - u64 *paddr = &wq->block_vaddr[i]; - dma_addr_t dma_addr; - - dma_addr = (dma_addr_t)be64_to_cpu(*paddr); - dma_free_coherent(&pdev->dev, wq->wq_page_size, *vaddr, - dma_addr); - } - - free_wqes_shadow(wq); -} - -/** - * alloc_wq_pages - alloc pages for WQ - * @hwif: HW interface for allocating dma addresses - * @wq: WQ to allocate pages for - * @max_pages: maximum pages allowed - * - * Return 0 - Success, negative - Failure - **/ -static int alloc_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif, - int max_pages) -{ - struct pci_dev *pdev = hwif->pdev; - int i, err, num_q_pages; - - num_q_pages = ALIGN(WQ_SIZE(wq), wq->wq_page_size) / wq->wq_page_size; - if (num_q_pages > max_pages) { - dev_err(&pdev->dev, "Number wq pages exceeds the limit\n"); - return -EINVAL; - } - - if (num_q_pages & (num_q_pages - 1)) { - dev_err(&pdev->dev, "Number wq pages must be power of 2\n"); - return -EINVAL; - } - - wq->num_q_pages = num_q_pages; - - err = alloc_wqes_shadow(wq); - if (err) { - dev_err(&pdev->dev, "Failed to allocate wqe shadow\n"); - return err; - } - - for (i = 0; i < num_q_pages; i++) { - void **vaddr = &wq->shadow_block_vaddr[i]; - u64 *paddr = &wq->block_vaddr[i]; - dma_addr_t dma_addr; - - *vaddr = dma_zalloc_coherent(&pdev->dev, wq->wq_page_size, - &dma_addr, GFP_KERNEL); - if (!*vaddr) { - dev_err(&pdev->dev, "Failed to allocate wq page\n"); - goto err_alloc_wq_pages; - } - - /* HW uses Big Endian Format */ - *paddr = cpu_to_be64(dma_addr); - } - - return 0; - -err_alloc_wq_pages: - free_wq_pages(wq, hwif, i); - return -ENOMEM; -} - -/** - * hinic_wq_allocate - Allocate the WQ resources from the WQS - * @wqs: WQ set from which to allocate the WQ resources - * @wq: WQ to allocate resources for it from the WQ set - * @wqebb_size: Work Queue Block Byte Size - * @wq_page_size: the page size in the Work Queue - * @q_depth: number of wqebbs in WQ - * @max_wqe_size: maximum WQE size that will be used in the WQ - * - * Return 0 - Success, negative - Failure - **/ -int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq, - u16 wqebb_size, u16 wq_page_size, u16 q_depth, - u16 max_wqe_size) -{ - struct hinic_hwif *hwif = wqs->hwif; - struct pci_dev *pdev = hwif->pdev; - u16 num_wqebbs_per_page; - int err; - - if (wqebb_size == 0) { - dev_err(&pdev->dev, "wqebb_size must be > 0\n"); - return -EINVAL; - } - - if (wq_page_size == 0) { - dev_err(&pdev->dev, "wq_page_size must be > 0\n"); - return -EINVAL; - } - - if (q_depth & (q_depth - 1)) { - dev_err(&pdev->dev, "WQ q_depth must be power of 2\n"); - return -EINVAL; - } - - num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size; - - if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) { - dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n"); - return -EINVAL; - } - - wq->hwif = hwif; - - err = wqs_next_block(wqs, &wq->page_idx, &wq->block_idx); - if (err) { - dev_err(&pdev->dev, "Failed to get free wqs next block\n"); - return err; - } - - wq->wqebb_size = wqebb_size; - wq->wq_page_size = wq_page_size; - wq->q_depth = q_depth; - wq->max_wqe_size = max_wqe_size; - wq->num_wqebbs_per_page = num_wqebbs_per_page; - - wq->block_vaddr = WQ_BASE_VADDR(wqs, wq); - wq->shadow_block_vaddr = WQ_BASE_ADDR(wqs, wq); - wq->block_paddr = WQ_BASE_PADDR(wqs, wq); - - err = alloc_wq_pages(wq, wqs->hwif, WQ_MAX_PAGES); - if (err) { - dev_err(&pdev->dev, "Failed to allocate wq pages\n"); - goto err_alloc_wq_pages; - } - - atomic_set(&wq->cons_idx, 0); - atomic_set(&wq->prod_idx, 0); - atomic_set(&wq->delta, q_depth); - wq->mask = q_depth - 1; - - return 0; - -err_alloc_wq_pages: - wqs_return_block(wqs, wq->page_idx, wq->block_idx); - return err; -} - -/** - * hinic_wq_free - Free the WQ resources to the WQS - * @wqs: WQ set to free the WQ resources to it - * @wq: WQ to free its resources to the WQ set resources - **/ -void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq) -{ - free_wq_pages(wq, wqs->hwif, wq->num_q_pages); - - wqs_return_block(wqs, wq->page_idx, wq->block_idx); -} - -/** - * hinic_wqs_cmdq_alloc - Allocate wqs for cmdqs - * @cmdq_pages: will hold the pages of the cmdq - * @wq: returned wqs - * @hwif: HW interface - * @cmdq_blocks: number of cmdq blocks/wq to allocate - * @wqebb_size: Work Queue Block Byte Size - * @wq_page_size: the page size in the Work Queue - * @q_depth: number of wqebbs in WQ - * @max_wqe_size: maximum WQE size that will be used in the WQ - * - * Return 0 - Success, negative - Failure - **/ -int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages, - struct hinic_wq *wq, struct hinic_hwif *hwif, - int cmdq_blocks, u16 wqebb_size, u16 wq_page_size, - u16 q_depth, u16 max_wqe_size) -{ - struct pci_dev *pdev = hwif->pdev; - u16 num_wqebbs_per_page; - int i, j, err = -ENOMEM; - - if (wqebb_size == 0) { - dev_err(&pdev->dev, "wqebb_size must be > 0\n"); - return -EINVAL; - } - - if (wq_page_size == 0) { - dev_err(&pdev->dev, "wq_page_size must be > 0\n"); - return -EINVAL; - } - - if (q_depth & (q_depth - 1)) { - dev_err(&pdev->dev, "WQ q_depth must be power of 2\n"); - return -EINVAL; - } - - num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size; - - if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) { - dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n"); - return -EINVAL; - } - - cmdq_pages->hwif = hwif; - - err = cmdq_allocate_page(cmdq_pages); - if (err) { - dev_err(&pdev->dev, "Failed to allocate CMDQ page\n"); - return err; - } - - for (i = 0; i < cmdq_blocks; i++) { - wq[i].hwif = hwif; - wq[i].page_idx = 0; - wq[i].block_idx = i; - - wq[i].wqebb_size = wqebb_size; - wq[i].wq_page_size = wq_page_size; - wq[i].q_depth = q_depth; - wq[i].max_wqe_size = max_wqe_size; - wq[i].num_wqebbs_per_page = num_wqebbs_per_page; - - wq[i].block_vaddr = CMDQ_BASE_VADDR(cmdq_pages, &wq[i]); - wq[i].shadow_block_vaddr = CMDQ_BASE_ADDR(cmdq_pages, &wq[i]); - wq[i].block_paddr = CMDQ_BASE_PADDR(cmdq_pages, &wq[i]); - - err = alloc_wq_pages(&wq[i], cmdq_pages->hwif, - CMDQ_WQ_MAX_PAGES); - if (err) { - dev_err(&pdev->dev, "Failed to alloc CMDQ blocks\n"); - goto err_cmdq_block; - } - - atomic_set(&wq[i].cons_idx, 0); - atomic_set(&wq[i].prod_idx, 0); - atomic_set(&wq[i].delta, q_depth); - wq[i].mask = q_depth - 1; - } - - return 0; - -err_cmdq_block: - for (j = 0; j < i; j++) - free_wq_pages(&wq[j], cmdq_pages->hwif, wq[j].num_q_pages); - - cmdq_free_page(cmdq_pages); - return err; -} - -/** - * hinic_wqs_cmdq_free - Free wqs from cmdqs - * @cmdq_pages: hold the pages of the cmdq - * @wq: wqs to free - * @cmdq_blocks: number of wqs to free - **/ -void hinic_wqs_cmdq_free(struct hinic_cmdq_pages *cmdq_pages, - struct hinic_wq *wq, int cmdq_blocks) -{ - int i; - - for (i = 0; i < cmdq_blocks; i++) - free_wq_pages(&wq[i], cmdq_pages->hwif, wq[i].num_q_pages); - - cmdq_free_page(cmdq_pages); -} - -static void copy_wqe_to_shadow(struct hinic_wq *wq, void *shadow_addr, - int num_wqebbs, u16 idx) -{ - void *wqebb_addr; - int i; - - for (i = 0; i < num_wqebbs; i++, idx++) { - idx = MASKED_WQE_IDX(wq, idx); - wqebb_addr = WQ_PAGE_ADDR(wq, idx) + - WQE_PAGE_OFF(wq, idx); - - memcpy(shadow_addr, wqebb_addr, wq->wqebb_size); - - shadow_addr += wq->wqebb_size; - } -} - -static void copy_wqe_from_shadow(struct hinic_wq *wq, void *shadow_addr, - int num_wqebbs, u16 idx) -{ - void *wqebb_addr; - int i; - - for (i = 0; i < num_wqebbs; i++, idx++) { - idx = MASKED_WQE_IDX(wq, idx); - wqebb_addr = WQ_PAGE_ADDR(wq, idx) + - WQE_PAGE_OFF(wq, idx); - - memcpy(wqebb_addr, shadow_addr, wq->wqebb_size); - shadow_addr += wq->wqebb_size; - } -} - -/** - * hinic_get_wqe - get wqe ptr in the current pi and update the pi - * @wq: wq to get wqe from - * @wqe_size: wqe size - * @prod_idx: returned pi - * - * Return wqe pointer - **/ -struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size, - u16 *prod_idx) -{ - int curr_pg, end_pg, num_wqebbs; - u16 curr_prod_idx, end_prod_idx; - - *prod_idx = MASKED_WQE_IDX(wq, atomic_read(&wq->prod_idx)); - - num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size; - - if (atomic_sub_return(num_wqebbs, &wq->delta) <= 0) { - atomic_add(num_wqebbs, &wq->delta); - return ERR_PTR(-EBUSY); - } - - end_prod_idx = atomic_add_return(num_wqebbs, &wq->prod_idx); - - end_prod_idx = MASKED_WQE_IDX(wq, end_prod_idx); - curr_prod_idx = end_prod_idx - num_wqebbs; - curr_prod_idx = MASKED_WQE_IDX(wq, curr_prod_idx); - - /* end prod index points to the next wqebb, therefore minus 1 */ - end_prod_idx = MASKED_WQE_IDX(wq, end_prod_idx - 1); - - curr_pg = WQE_PAGE_NUM(wq, curr_prod_idx); - end_pg = WQE_PAGE_NUM(wq, end_prod_idx); - - *prod_idx = curr_prod_idx; - - if (curr_pg != end_pg) { - void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size]; - - copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *prod_idx); - - wq->shadow_idx[curr_pg] = *prod_idx; - return shadow_addr; - } - - return WQ_PAGE_ADDR(wq, *prod_idx) + WQE_PAGE_OFF(wq, *prod_idx); -} - -/** - * hinic_put_wqe - return the wqe place to use for a new wqe - * @wq: wq to return wqe - * @wqe_size: wqe size - **/ -void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size) -{ - int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size; - - atomic_add(num_wqebbs, &wq->cons_idx); - - atomic_add(num_wqebbs, &wq->delta); -} - -/** - * hinic_read_wqe - read wqe ptr in the current ci - * @wq: wq to get read from - * @wqe_size: wqe size - * @cons_idx: returned ci - * - * Return wqe pointer - **/ -struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size, - u16 *cons_idx) -{ - int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size; - u16 curr_cons_idx, end_cons_idx; - int curr_pg, end_pg; - - if ((atomic_read(&wq->delta) + num_wqebbs) > wq->q_depth) - return ERR_PTR(-EBUSY); - - curr_cons_idx = atomic_read(&wq->cons_idx); - - curr_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx); - end_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx + num_wqebbs - 1); - - curr_pg = WQE_PAGE_NUM(wq, curr_cons_idx); - end_pg = WQE_PAGE_NUM(wq, end_cons_idx); - - *cons_idx = curr_cons_idx; - - if (curr_pg != end_pg) { - void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size]; - - copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *cons_idx); - return shadow_addr; - } - - return WQ_PAGE_ADDR(wq, *cons_idx) + WQE_PAGE_OFF(wq, *cons_idx); -} - -/** - * hinic_read_wqe_direct - read wqe directly from ci position - * @wq: wq - * @cons_idx: ci position - * - * Return wqe - **/ -struct hinic_hw_wqe *hinic_read_wqe_direct(struct hinic_wq *wq, u16 cons_idx) -{ - return WQ_PAGE_ADDR(wq, cons_idx) + WQE_PAGE_OFF(wq, cons_idx); -} - -/** - * wqe_shadow - check if a wqe is shadow - * @wq: wq of the wqe - * @wqe: the wqe for shadow checking - * - * Return true - shadow, false - Not shadow - **/ -static inline bool wqe_shadow(struct hinic_wq *wq, struct hinic_hw_wqe *wqe) -{ - size_t wqe_shadow_size = wq->num_q_pages * wq->max_wqe_size; - - return WQE_IN_RANGE(wqe, wq->shadow_wqe, - &wq->shadow_wqe[wqe_shadow_size]); -} - -/** - * hinic_write_wqe - write the wqe to the wq - * @wq: wq to write wqe to - * @wqe: wqe to write - * @wqe_size: wqe size - **/ -void hinic_write_wqe(struct hinic_wq *wq, struct hinic_hw_wqe *wqe, - unsigned int wqe_size) -{ - int curr_pg, num_wqebbs; - void *shadow_addr; - u16 prod_idx; - - if (wqe_shadow(wq, wqe)) { - curr_pg = WQE_SHADOW_PAGE(wq, wqe); - - prod_idx = wq->shadow_idx[curr_pg]; - num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size; - shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size]; - - copy_wqe_from_shadow(wq, shadow_addr, num_wqebbs, prod_idx); - } -} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h deleted file mode 100644 index 9c030a0f035e21c6666d3e813cd65add9b8ead9e..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - */ - -#ifndef HINIC_HW_WQ_H -#define HINIC_HW_WQ_H - -#include -#include -#include - -#include "hinic_hw_if.h" -#include "hinic_hw_wqe.h" - -struct hinic_free_block { - int page_idx; - int block_idx; -}; - -struct hinic_wq { - struct hinic_hwif *hwif; - - int page_idx; - int block_idx; - - u16 wqebb_size; - u16 wq_page_size; - u16 q_depth; - u16 max_wqe_size; - u16 num_wqebbs_per_page; - - /* The addresses are 64 bit in the HW */ - u64 block_paddr; - void **shadow_block_vaddr; - u64 *block_vaddr; - - int num_q_pages; - u8 *shadow_wqe; - u16 *shadow_idx; - - atomic_t cons_idx; - atomic_t prod_idx; - atomic_t delta; - u16 mask; -}; - -struct hinic_wqs { - struct hinic_hwif *hwif; - int num_pages; - - /* The addresses are 64 bit in the HW */ - u64 *page_paddr; - u64 **page_vaddr; - void ***shadow_page_vaddr; - - struct hinic_free_block *free_blocks; - int alloc_blk_pos; - int return_blk_pos; - int num_free_blks; - - /* Lock for getting a free block from the WQ set */ - struct semaphore alloc_blocks_lock; -}; - -struct hinic_cmdq_pages { - /* The addresses are 64 bit in the HW */ - u64 page_paddr; - u64 *page_vaddr; - void **shadow_page_vaddr; - - struct hinic_hwif *hwif; -}; - -int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages, - struct hinic_wq *wq, struct hinic_hwif *hwif, - int cmdq_blocks, u16 wqebb_size, u16 wq_page_size, - u16 q_depth, u16 max_wqe_size); - -void hinic_wqs_cmdq_free(struct hinic_cmdq_pages *cmdq_pages, - struct hinic_wq *wq, int cmdq_blocks); - -int hinic_wqs_alloc(struct hinic_wqs *wqs, int num_wqs, - struct hinic_hwif *hwif); - -void hinic_wqs_free(struct hinic_wqs *wqs); - -int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq, - u16 wqebb_size, u16 wq_page_size, u16 q_depth, - u16 max_wqe_size); - -void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq); - -struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size, - u16 *prod_idx); - -void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size); - -struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size, - u16 *cons_idx); - -struct hinic_hw_wqe *hinic_read_wqe_direct(struct hinic_wq *wq, u16 cons_idx); - -void hinic_write_wqe(struct hinic_wq *wq, struct hinic_hw_wqe *wqe, - unsigned int wqe_size); - -#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h deleted file mode 100644 index bc73485483c59a6b8e4d1cf63ef4326d12f9823a..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h +++ /dev/null @@ -1,368 +0,0 @@ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - */ - -#ifndef HINIC_HW_WQE_H -#define HINIC_HW_WQE_H - -#include "hinic_common.h" - -#define HINIC_CMDQ_CTRL_PI_SHIFT 0 -#define HINIC_CMDQ_CTRL_CMD_SHIFT 16 -#define HINIC_CMDQ_CTRL_MOD_SHIFT 24 -#define HINIC_CMDQ_CTRL_ACK_TYPE_SHIFT 29 -#define HINIC_CMDQ_CTRL_HW_BUSY_BIT_SHIFT 31 - -#define HINIC_CMDQ_CTRL_PI_MASK 0xFFFF -#define HINIC_CMDQ_CTRL_CMD_MASK 0xFF -#define HINIC_CMDQ_CTRL_MOD_MASK 0x1F -#define HINIC_CMDQ_CTRL_ACK_TYPE_MASK 0x3 -#define HINIC_CMDQ_CTRL_HW_BUSY_BIT_MASK 0x1 - -#define HINIC_CMDQ_CTRL_SET(val, member) \ - (((u32)(val) & HINIC_CMDQ_CTRL_##member##_MASK) \ - << HINIC_CMDQ_CTRL_##member##_SHIFT) - -#define HINIC_CMDQ_CTRL_GET(val, member) \ - (((val) >> HINIC_CMDQ_CTRL_##member##_SHIFT) \ - & HINIC_CMDQ_CTRL_##member##_MASK) - -#define HINIC_CMDQ_WQE_HEADER_BUFDESC_LEN_SHIFT 0 -#define HINIC_CMDQ_WQE_HEADER_COMPLETE_FMT_SHIFT 15 -#define HINIC_CMDQ_WQE_HEADER_DATA_FMT_SHIFT 22 -#define HINIC_CMDQ_WQE_HEADER_COMPLETE_REQ_SHIFT 23 -#define HINIC_CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_SHIFT 27 -#define HINIC_CMDQ_WQE_HEADER_CTRL_LEN_SHIFT 29 -#define HINIC_CMDQ_WQE_HEADER_TOGGLED_WRAPPED_SHIFT 31 - -#define HINIC_CMDQ_WQE_HEADER_BUFDESC_LEN_MASK 0xFF -#define HINIC_CMDQ_WQE_HEADER_COMPLETE_FMT_MASK 0x1 -#define HINIC_CMDQ_WQE_HEADER_DATA_FMT_MASK 0x1 -#define HINIC_CMDQ_WQE_HEADER_COMPLETE_REQ_MASK 0x1 -#define HINIC_CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_MASK 0x3 -#define HINIC_CMDQ_WQE_HEADER_CTRL_LEN_MASK 0x3 -#define HINIC_CMDQ_WQE_HEADER_TOGGLED_WRAPPED_MASK 0x1 - -#define HINIC_CMDQ_WQE_HEADER_SET(val, member) \ - (((u32)(val) & HINIC_CMDQ_WQE_HEADER_##member##_MASK) \ - << HINIC_CMDQ_WQE_HEADER_##member##_SHIFT) - -#define HINIC_CMDQ_WQE_HEADER_GET(val, member) \ - (((val) >> HINIC_CMDQ_WQE_HEADER_##member##_SHIFT) \ - & HINIC_CMDQ_WQE_HEADER_##member##_MASK) - -#define HINIC_SQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0 -#define HINIC_SQ_CTRL_TASKSECT_LEN_SHIFT 16 -#define HINIC_SQ_CTRL_DATA_FORMAT_SHIFT 22 -#define HINIC_SQ_CTRL_LEN_SHIFT 29 - -#define HINIC_SQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFF -#define HINIC_SQ_CTRL_TASKSECT_LEN_MASK 0x1F -#define HINIC_SQ_CTRL_DATA_FORMAT_MASK 0x1 -#define HINIC_SQ_CTRL_LEN_MASK 0x3 - -#define HINIC_SQ_CTRL_QUEUE_INFO_MSS_SHIFT 13 - -#define HINIC_SQ_CTRL_QUEUE_INFO_MSS_MASK 0x3FFF - -#define HINIC_SQ_CTRL_SET(val, member) \ - (((u32)(val) & HINIC_SQ_CTRL_##member##_MASK) \ - << HINIC_SQ_CTRL_##member##_SHIFT) - -#define HINIC_SQ_CTRL_GET(val, member) \ - (((val) >> HINIC_SQ_CTRL_##member##_SHIFT) \ - & HINIC_SQ_CTRL_##member##_MASK) - -#define HINIC_SQ_TASK_INFO0_L2HDR_LEN_SHIFT 0 -#define HINIC_SQ_TASK_INFO0_L4_OFFLOAD_SHIFT 8 -#define HINIC_SQ_TASK_INFO0_INNER_L3TYPE_SHIFT 10 -#define HINIC_SQ_TASK_INFO0_VLAN_OFFLOAD_SHIFT 12 -#define HINIC_SQ_TASK_INFO0_PARSE_FLAG_SHIFT 13 -/* 1 bit reserved */ -#define HINIC_SQ_TASK_INFO0_TSO_FLAG_SHIFT 15 -#define HINIC_SQ_TASK_INFO0_VLAN_TAG_SHIFT 16 - -#define HINIC_SQ_TASK_INFO0_L2HDR_LEN_MASK 0xFF -#define HINIC_SQ_TASK_INFO0_L4_OFFLOAD_MASK 0x3 -#define HINIC_SQ_TASK_INFO0_INNER_L3TYPE_MASK 0x3 -#define HINIC_SQ_TASK_INFO0_VLAN_OFFLOAD_MASK 0x1 -#define HINIC_SQ_TASK_INFO0_PARSE_FLAG_MASK 0x1 -/* 1 bit reserved */ -#define HINIC_SQ_TASK_INFO0_TSO_FLAG_MASK 0x1 -#define HINIC_SQ_TASK_INFO0_VLAN_TAG_MASK 0xFFFF - -#define HINIC_SQ_TASK_INFO0_SET(val, member) \ - (((u32)(val) & HINIC_SQ_TASK_INFO0_##member##_MASK) << \ - HINIC_SQ_TASK_INFO0_##member##_SHIFT) - -/* 8 bits reserved */ -#define HINIC_SQ_TASK_INFO1_MEDIA_TYPE_SHIFT 8 -#define HINIC_SQ_TASK_INFO1_INNER_L4_LEN_SHIFT 16 -#define HINIC_SQ_TASK_INFO1_INNER_L3_LEN_SHIFT 24 - -/* 8 bits reserved */ -#define HINIC_SQ_TASK_INFO1_MEDIA_TYPE_MASK 0xFF -#define HINIC_SQ_TASK_INFO1_INNER_L4_LEN_MASK 0xFF -#define HINIC_SQ_TASK_INFO1_INNER_L3_LEN_MASK 0xFF - -#define HINIC_SQ_TASK_INFO1_SET(val, member) \ - (((u32)(val) & HINIC_SQ_TASK_INFO1_##member##_MASK) << \ - HINIC_SQ_TASK_INFO1_##member##_SHIFT) - -#define HINIC_SQ_TASK_INFO2_TUNNEL_L4_LEN_SHIFT 0 -#define HINIC_SQ_TASK_INFO2_OUTER_L3_LEN_SHIFT 12 -#define HINIC_SQ_TASK_INFO2_TUNNEL_L4TYPE_SHIFT 19 -/* 1 bit reserved */ -#define HINIC_SQ_TASK_INFO2_OUTER_L3TYPE_SHIFT 22 -/* 8 bits reserved */ - -#define HINIC_SQ_TASK_INFO2_TUNNEL_L4_LEN_MASK 0xFFF -#define HINIC_SQ_TASK_INFO2_OUTER_L3_LEN_MASK 0x7F -#define HINIC_SQ_TASK_INFO2_TUNNEL_L4TYPE_MASK 0x3 -/* 1 bit reserved */ -#define HINIC_SQ_TASK_INFO2_OUTER_L3TYPE_MASK 0x3 -/* 8 bits reserved */ - -#define HINIC_SQ_TASK_INFO2_SET(val, member) \ - (((u32)(val) & HINIC_SQ_TASK_INFO2_##member##_MASK) << \ - HINIC_SQ_TASK_INFO2_##member##_SHIFT) - -/* 31 bits reserved */ -#define HINIC_SQ_TASK_INFO4_L2TYPE_SHIFT 31 - -/* 31 bits reserved */ -#define HINIC_SQ_TASK_INFO4_L2TYPE_MASK 0x1 - -#define HINIC_SQ_TASK_INFO4_SET(val, member) \ - (((u32)(val) & HINIC_SQ_TASK_INFO4_##member##_MASK) << \ - HINIC_SQ_TASK_INFO4_##member##_SHIFT) - -#define HINIC_RQ_CQE_STATUS_RXDONE_SHIFT 31 - -#define HINIC_RQ_CQE_STATUS_RXDONE_MASK 0x1 - -#define HINIC_RQ_CQE_STATUS_GET(val, member) \ - (((val) >> HINIC_RQ_CQE_STATUS_##member##_SHIFT) & \ - HINIC_RQ_CQE_STATUS_##member##_MASK) - -#define HINIC_RQ_CQE_STATUS_CLEAR(val, member) \ - ((val) & (~(HINIC_RQ_CQE_STATUS_##member##_MASK << \ - HINIC_RQ_CQE_STATUS_##member##_SHIFT))) - -#define HINIC_RQ_CQE_SGE_LEN_SHIFT 16 - -#define HINIC_RQ_CQE_SGE_LEN_MASK 0xFFFF - -#define HINIC_RQ_CQE_SGE_GET(val, member) \ - (((val) >> HINIC_RQ_CQE_SGE_##member##_SHIFT) & \ - HINIC_RQ_CQE_SGE_##member##_MASK) - -#define HINIC_RQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0 -#define HINIC_RQ_CTRL_COMPLETE_FORMAT_SHIFT 15 -#define HINIC_RQ_CTRL_COMPLETE_LEN_SHIFT 27 -#define HINIC_RQ_CTRL_LEN_SHIFT 29 - -#define HINIC_RQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFF -#define HINIC_RQ_CTRL_COMPLETE_FORMAT_MASK 0x1 -#define HINIC_RQ_CTRL_COMPLETE_LEN_MASK 0x3 -#define HINIC_RQ_CTRL_LEN_MASK 0x3 - -#define HINIC_RQ_CTRL_SET(val, member) \ - (((u32)(val) & HINIC_RQ_CTRL_##member##_MASK) << \ - HINIC_RQ_CTRL_##member##_SHIFT) - -#define HINIC_SQ_WQE_SIZE(nr_sges) \ - (sizeof(struct hinic_sq_ctrl) + \ - sizeof(struct hinic_sq_task) + \ - (nr_sges) * sizeof(struct hinic_sq_bufdesc)) - -#define HINIC_SCMD_DATA_LEN 16 - -#define HINIC_MAX_SQ_BUFDESCS 17 - -#define HINIC_SQ_WQE_MAX_SIZE 320 -#define HINIC_RQ_WQE_SIZE 32 - -enum hinic_l4offload_type { - HINIC_L4_OFF_DISABLE = 0, - HINIC_TCP_OFFLOAD_ENABLE = 1, - HINIC_SCTP_OFFLOAD_ENABLE = 2, - HINIC_UDP_OFFLOAD_ENABLE = 3, -}; - -enum hinic_vlan_offload { - HINIC_VLAN_OFF_DISABLE = 0, - HINIC_VLAN_OFF_ENABLE = 1, -}; - -enum hinic_pkt_parsed { - HINIC_PKT_NOT_PARSED = 0, - HINIC_PKT_PARSED = 1, -}; - -enum hinic_outer_l3type { - HINIC_OUTER_L3TYPE_UNKNOWN = 0, - HINIC_OUTER_L3TYPE_IPV6 = 1, - HINIC_OUTER_L3TYPE_IPV4_NO_CHKSUM = 2, - HINIC_OUTER_L3TYPE_IPV4_CHKSUM = 3, -}; - -enum hinic_media_type { - HINIC_MEDIA_UNKNOWN = 0, -}; - -enum hinic_l2type { - HINIC_L2TYPE_ETH = 0, -}; - -enum hinc_tunnel_l4type { - HINIC_TUNNEL_L4TYPE_UNKNOWN = 0, -}; - -struct hinic_cmdq_header { - u32 header_info; - u32 saved_data; -}; - -struct hinic_status { - u32 status_info; -}; - -struct hinic_ctrl { - u32 ctrl_info; -}; - -struct hinic_sge_resp { - struct hinic_sge sge; - u32 rsvd; -}; - -struct hinic_cmdq_completion { - /* HW Format */ - union { - struct hinic_sge_resp sge_resp; - u64 direct_resp; - }; -}; - -struct hinic_scmd_bufdesc { - u32 buf_len; - u32 rsvd; - u8 data[HINIC_SCMD_DATA_LEN]; -}; - -struct hinic_lcmd_bufdesc { - struct hinic_sge sge; - u32 rsvd1; - u64 rsvd2; - u64 rsvd3; -}; - -struct hinic_cmdq_wqe_scmd { - struct hinic_cmdq_header header; - u64 rsvd; - struct hinic_status status; - struct hinic_ctrl ctrl; - struct hinic_cmdq_completion completion; - struct hinic_scmd_bufdesc buf_desc; -}; - -struct hinic_cmdq_wqe_lcmd { - struct hinic_cmdq_header header; - struct hinic_status status; - struct hinic_ctrl ctrl; - struct hinic_cmdq_completion completion; - struct hinic_lcmd_bufdesc buf_desc; -}; - -struct hinic_cmdq_direct_wqe { - struct hinic_cmdq_wqe_scmd wqe_scmd; -}; - -struct hinic_cmdq_wqe { - /* HW Format */ - union { - struct hinic_cmdq_direct_wqe direct_wqe; - struct hinic_cmdq_wqe_lcmd wqe_lcmd; - }; -}; - -struct hinic_sq_ctrl { - u32 ctrl_info; - u32 queue_info; -}; - -struct hinic_sq_task { - u32 pkt_info0; - u32 pkt_info1; - u32 pkt_info2; - u32 ufo_v6_identify; - u32 pkt_info4; - u32 zero_pad; -}; - -struct hinic_sq_bufdesc { - struct hinic_sge sge; - u32 rsvd; -}; - -struct hinic_sq_wqe { - struct hinic_sq_ctrl ctrl; - struct hinic_sq_task task; - struct hinic_sq_bufdesc buf_descs[HINIC_MAX_SQ_BUFDESCS]; -}; - -struct hinic_rq_cqe { - u32 status; - u32 len; - - u32 rsvd2; - u32 rsvd3; - u32 rsvd4; - u32 rsvd5; - u32 rsvd6; - u32 rsvd7; -}; - -struct hinic_rq_ctrl { - u32 ctrl_info; -}; - -struct hinic_rq_cqe_sect { - struct hinic_sge sge; - u32 rsvd; -}; - -struct hinic_rq_bufdesc { - u32 hi_addr; - u32 lo_addr; -}; - -struct hinic_rq_wqe { - struct hinic_rq_ctrl ctrl; - u32 rsvd; - struct hinic_rq_cqe_sect cqe_sect; - struct hinic_rq_bufdesc buf_desc; -}; - -struct hinic_hw_wqe { - /* HW Format */ - union { - struct hinic_cmdq_wqe cmdq_wqe; - struct hinic_sq_wqe sq_wqe; - struct hinic_rq_wqe rq_wqe; - }; -}; - -#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hwdev.c b/drivers/net/ethernet/huawei/hinic/hinic_hwdev.c new file mode 100644 index 0000000000000000000000000000000000000000..2021ff83bd52c8065499e9601fac5c394acdc49f --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hwdev.c @@ -0,0 +1,5005 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hwdev.h" +#include "hinic_csr.h" +#include "hinic_hwif.h" +#include "hinic_msix_attr.h" +#include "hinic_nic_io.h" +#include "hinic_eqs.h" +#include "hinic_api_cmd.h" +#include "hinic_mgmt.h" +#include "hinic_mbox.h" +#include "hinic_wq.h" +#include "hinic_cmdq.h" +#include "hinic_nic_cfg.h" +#include "hinic_hwif.h" +#include "hinic_mgmt_interface.h" +#include "hinic_multi_host_mgmt.h" + +#define HINIC_DEAULT_EQ_MSIX_PENDING_LIMIT 0 +#define HINIC_DEAULT_EQ_MSIX_COALESC_TIMER_CFG 0xFF +#define HINIC_DEAULT_EQ_MSIX_RESEND_TIMER_CFG 7 + +#define HINIC_WAIT_IO_STATUS_TIMEOUT 100 + +#define HINIC_FLR_TIMEOUT 1000 + +#define HINIC_HT_GPA_PAGE_SIZE 4096UL + +#define HINIC_PPF_HT_GPA_SET_RETRY_TIMES 10 + +#define HINIC_OK_FLAG_OK 0 + +#define HINIC_OK_FLAG_FAILED 1 + +#define HINIC_GET_SFP_INFO_REAL_TIME 0x1 + +#define HINIC_GLB_SO_RO_CFG_SHIFT 0x0 +#define HINIC_GLB_SO_RO_CFG_MASK 0x1 +#define HINIC_DISABLE_ORDER 0 +#define HINIC_GLB_DMA_SO_RO_GET(val, member) \ + (((val) >> HINIC_GLB_##member##_SHIFT) & HINIC_GLB_##member##_MASK) + +#define HINIC_GLB_DMA_SO_R0_CLEAR(val, member) \ + ((val) & (~(HINIC_GLB_##member##_MASK << HINIC_GLB_##member##_SHIFT))) + +#define HINIC_GLB_DMA_SO_R0_SET(val, member) \ + (((val) & HINIC_GLB_##member##_MASK) << HINIC_GLB_##member##_SHIFT) + +#define HINIC_MGMT_CHANNEL_STATUS_SHIFT 0x0 +#define HINIC_MGMT_CHANNEL_STATUS_MASK 0x1 +#define HINIC_ACTIVE_STATUS_MASK 0x80000000 +#define HINIC_ACTIVE_STATUS_CLEAR 0x7FFFFFFF +#define HINIC_ACTIVE_UCODE 0x1F80 /* bit7~bit12 */ + +#define HINIC_GET_MGMT_CHANNEL_STATUS(val, member) \ + (((val) >> HINIC_##member##_SHIFT) & HINIC_##member##_MASK) + +#define HINIC_CLEAR_MGMT_CHANNEL_STATUS(val, member) \ + ((val) & (~(HINIC_##member##_MASK << HINIC_##member##_SHIFT))) + +#define HINIC_SET_MGMT_CHANNEL_STATUS(val, member) \ + (((val) & HINIC_##member##_MASK) << HINIC_##member##_SHIFT) + +#define HINIC_BOARD_IS_PHY(hwdev) \ + ((hwdev)->board_info.board_type == 4 && \ + (hwdev)->board_info.board_id == 24) + +struct comm_info_ht_gpa_set { + u8 status; + u8 version; + u8 rsvd0[6]; + + u32 rsvd1; + u32 rsvd2; + + u64 page_pa0; + u64 page_pa1; +}; + +struct comm_info_eqm_fix { + u8 status; + u8 version; + u8 rsvd0[6]; + + u32 chunk_num; + u32 search_gpa_num; +}; + +struct comm_info_eqm_cfg { + u8 status; + u8 version; + u8 rsvd0[6]; + + u32 ppf_id; + u32 page_size; + u32 valid; +}; + +struct comm_info_eqm_search_gpa { + u8 status; + u8 version; + u8 rsvd0[6]; + + u32 start_idx; + u32 num; + u32 resv0; + u32 resv1; + u64 gpa_hi52[0]; /*lint !e1501 */ +}; + +struct hinic_cons_idx_attr { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 dma_attr_off; + u8 pending_limit; + u8 coalescing_time; + u8 intr_en; + u16 intr_idx; + u32 l2nic_sqn; + u32 sq_id; + u64 ci_addr; +}; + +struct hinic_clear_doorbell { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 ppf_idx; + u8 rsvd1; +}; + +struct hinic_clear_resource { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 ppf_idx; + u8 rsvd1; +}; + +struct hinic_msix_config { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 msix_index; + u8 pending_cnt; + u8 coalesct_timer_cnt; + u8 lli_tmier_cnt; + u8 lli_credit_cnt; + u8 resend_timer_cnt; + u8 rsvd1[3]; +}; + +enum func_tmr_bitmap_status { + FUNC_TMR_BITMAP_DISABLE, + FUNC_TMR_BITMAP_ENABLE, +}; + +struct hinic_func_tmr_bitmap_op { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 op_id; /* 0:start; 1:stop */ + u8 ppf_idx; + u32 rsvd1; +}; + +struct hinic_ppf_tmr_op { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 ppf_idx; + u8 op_id; /* 0: stop timer; 1:start timer */ + u8 rsvd1[2]; + u32 rsvd2; +}; + +struct hinic_cmd_set_res_state { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 state; + u8 rsvd1; + u32 rsvd2; +}; + +int hinic_hw_rx_buf_size[] = { + HINIC_RX_BUF_SIZE_32B, + HINIC_RX_BUF_SIZE_64B, + HINIC_RX_BUF_SIZE_96B, + HINIC_RX_BUF_SIZE_128B, + HINIC_RX_BUF_SIZE_192B, + HINIC_RX_BUF_SIZE_256B, + HINIC_RX_BUF_SIZE_384B, + HINIC_RX_BUF_SIZE_512B, + HINIC_RX_BUF_SIZE_768B, + HINIC_RX_BUF_SIZE_1K, + HINIC_RX_BUF_SIZE_1_5K, + HINIC_RX_BUF_SIZE_2K, + HINIC_RX_BUF_SIZE_3K, + HINIC_RX_BUF_SIZE_4K, + HINIC_RX_BUF_SIZE_8K, + HINIC_RX_BUF_SIZE_16K, +}; + +/* vf-pf dma attr table */ +struct hinic_vf_dma_attr_table { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 func_dma_entry_num; + u8 entry_idx; + u8 st; + u8 at; + u8 ph; + u8 no_snooping; + u8 tph_en; + u8 resv1[3]; +}; + +struct hinic_led_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 port; + u8 type; + u8 mode; + u8 reset; +}; + +struct hinic_comm_board_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + struct hinic_board_info info; + + u32 rsvd1[4]; +}; + +#define PHY_DOING_INIT_TIMEOUT (15 * 1000) + +struct hinic_phy_init_status { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 init_status; + u8 rsvd1[3]; +}; + +enum phy_init_status_type { + PHY_INIT_DOING = 0, + PHY_INIT_SUCCESS = 1, + PHY_INIT_FAIL = 2, + PHY_NONSUPPORT = 3, +}; + +struct hinic_update_active { + u8 status; + u8 version; + u8 rsvd0[6]; + + u32 update_flag; + u32 update_status; +}; + +enum hinic_bios_cfg_op_code { + HINIC_BIOS_CFG_GET = 0, + HINIC_BIOS_CFG_PF_BW_LIMIT = 0x1 << 6, +}; + +struct hinic_bios_cfg_cmd { + u8 status; + u8 version; + u8 rsvd0[6]; + + u32 op_code; + u32 signature; + + u8 rsvd1[12]; + u32 pf_bw_limit; + u8 rsvd2[5]; + + u8 func_valid; + u8 func_idx; + u8 rsvd3; +}; + +struct hinic_mgmt_watchdog_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u32 curr_time_h; + u32 curr_time_l; + u32 task_id; + u32 rsv; + + u32 reg[13]; + u32 pc; + u32 lr; + u32 cpsr; + + u32 stack_top; + u32 stack_bottom; + u32 sp; + u32 curr_used; + u32 peak_used; + u32 is_overflow; + + u32 stack_actlen; + u8 data[1024]; +}; + +struct hinic_fmw_act_ntc { + u8 status; + u8 version; + u8 rsvd0[6]; + + u32 rsvd1[5]; +}; + +struct hinic_ppf_state { + u8 status; + u8 version; + u8 rsvd0[6]; + u8 ppf_state; + u8 rsvd1[3]; +}; + +#define HINIC_PAGE_SIZE_HW(pg_size) ((u8)ilog2((u32)((pg_size) >> 12))) + +struct hinic_wq_page_size { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 ppf_idx; + /* real_size=4KB*2^page_size, range(0~20) must be checked by driver */ + u8 page_size; + + u32 rsvd1; +}; + +#define MAX_PCIE_DFX_BUF_SIZE 1024 + +struct hinic_pcie_dfx_ntc { + u8 status; + u8 version; + u8 rsvd0[6]; + + int len; + u32 rsvd; +}; + +struct hinic_pcie_dfx_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 host_id; + u8 last; + u8 rsvd[2]; + u32 offset; + + u8 data[MAX_PCIE_DFX_BUF_SIZE]; +}; + +struct hinic_hw_pf_infos_cmd { + u8 status; + u8 version; + u8 rsvd0[6]; + + struct hinic_hw_pf_infos infos; +}; + +enum hinic_sdi_mode_ops { + HINIC_SDI_INFO_SET = 1U << 0, /* 1-save, 0-read */ + HINIC_SDI_INFO_MODE = 1U << 1, +}; + +struct hinic_sdi_mode_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + /* Op-Code: + * Bit0: 0 - read configuration, 1 - write configuration + * Bit1: 0 - ignored, 1 - get/set SDI Mode + */ + u32 opcode; + u32 signature; + u16 cur_sdi_mode; + u16 cfg_sdi_mode; + + u32 rsvd1[29]; +}; + +struct hinic_reg_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u32 reg_addr; + u32 val_length; + + u32 data[2]; +}; + +#define HINIC_DMA_ATTR_ENTRY_ST_SHIFT 0 +#define HINIC_DMA_ATTR_ENTRY_AT_SHIFT 8 +#define HINIC_DMA_ATTR_ENTRY_PH_SHIFT 10 +#define HINIC_DMA_ATTR_ENTRY_NO_SNOOPING_SHIFT 12 +#define HINIC_DMA_ATTR_ENTRY_TPH_EN_SHIFT 13 + +#define HINIC_DMA_ATTR_ENTRY_ST_MASK 0xFF +#define HINIC_DMA_ATTR_ENTRY_AT_MASK 0x3 +#define HINIC_DMA_ATTR_ENTRY_PH_MASK 0x3 +#define HINIC_DMA_ATTR_ENTRY_NO_SNOOPING_MASK 0x1 +#define HINIC_DMA_ATTR_ENTRY_TPH_EN_MASK 0x1 + +#define HINIC_DMA_ATTR_ENTRY_SET(val, member) \ + (((u32)(val) & HINIC_DMA_ATTR_ENTRY_##member##_MASK) << \ + HINIC_DMA_ATTR_ENTRY_##member##_SHIFT) + +#define HINIC_DMA_ATTR_ENTRY_CLEAR(val, member) \ + ((val) & (~(HINIC_DMA_ATTR_ENTRY_##member##_MASK \ + << HINIC_DMA_ATTR_ENTRY_##member##_SHIFT))) + +#define HINIC_PCIE_ST_DISABLE 0 +#define HINIC_PCIE_AT_DISABLE 0 +#define HINIC_PCIE_PH_DISABLE 0 + +#define PCIE_MSIX_ATTR_ENTRY 0 + +struct hinic_cmd_fault_event { + u8 status; + u8 version; + u8 rsvd0[6]; + + struct hinic_fault_event event; +}; + +static void hinic_enable_mgmt_channel(void *hwdev, void *buf_out); +static void hinic_set_mgmt_channel_status(void *handle, bool state); + +#define HINIC_QUEUE_MIN_DEPTH 6 +#define HINIC_QUEUE_MAX_DEPTH 12 +#define HINIC_MAX_RX_BUFFER_SIZE 15 + +#define CAP_INFO_MAC_LEN 512 +#define VENDOR_MAX_LEN 17 + +static bool check_root_ctxt(struct hinic_hwdev *hwdev, u16 func_idx, + void *buf_in, u16 in_size) +{ + struct hinic_root_ctxt *root_ctxt; + + if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size)) + return false; + + root_ctxt = (struct hinic_root_ctxt *)buf_in; + + if (root_ctxt->ppf_idx != HINIC_HWIF_PPF_IDX(hwdev->hwif)) + return false; + + if (root_ctxt->set_cmdq_depth) { + if (root_ctxt->cmdq_depth >= HINIC_QUEUE_MIN_DEPTH && + root_ctxt->cmdq_depth <= HINIC_QUEUE_MAX_DEPTH) + return true; + + return false; + } + + if (root_ctxt->rq_depth >= HINIC_QUEUE_MIN_DEPTH && + root_ctxt->rq_depth <= HINIC_QUEUE_MAX_DEPTH && + root_ctxt->sq_depth >= HINIC_QUEUE_MIN_DEPTH && + root_ctxt->sq_depth <= HINIC_QUEUE_MAX_DEPTH && + root_ctxt->rx_buf_sz <= HINIC_MAX_RX_BUFFER_SIZE) + return true; + + if (!root_ctxt->rq_depth && !root_ctxt->sq_depth && + !root_ctxt->rx_buf_sz) + return true; + + return false; +} + +static bool check_cmdq_ctxt(struct hinic_hwdev *hwdev, u16 func_idx, + void *buf_in, u16 in_size) +{ + if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size)) + return false; + + return hinic_cmdq_check_vf_ctxt(hwdev, buf_in); +} + +static bool check_set_wq_page_size(struct hinic_hwdev *hwdev, u16 func_idx, + void *buf_in, u16 in_size) +{ + struct hinic_wq_page_size *page_size_info = buf_in; + + if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size)) + return false; + + if (page_size_info->ppf_idx != hinic_ppf_idx(hwdev)) + return false; + + if (((1U << page_size_info->page_size) * 0x1000) != + HINIC_DEFAULT_WQ_PAGE_SIZE) + return false; + + return true; +} + +static bool __mbox_check_tmr_bitmap(struct hinic_hwdev *hwdev, u16 func_idx, + void *buf_in, u16 in_size) +{ + struct hinic_func_tmr_bitmap_op *bitmap_op = buf_in; + + if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size)) + return false; + + if (bitmap_op->op_id == FUNC_TMR_BITMAP_ENABLE) { + if (!hinic_get_ppf_status(hwdev)) { + sdk_err(hwdev->dev_hdl, "PPF timer is not init, can't enable %d timer bitmap\n", + func_idx); + return false; + } + } + + if (bitmap_op->ppf_idx != hinic_ppf_idx(hwdev)) + return false; + + return true; +} + +struct vf_cmd_check_handle hw_cmd_support_vf[] = { + {HINIC_MGMT_CMD_START_FLR, hinic_mbox_check_func_id_8B}, + {HINIC_MGMT_CMD_DMA_ATTR_SET, hinic_mbox_check_func_id_8B}, + {HINIC_MGMT_CMD_CMDQ_CTXT_SET, check_cmdq_ctxt}, + {HINIC_MGMT_CMD_CMDQ_CTXT_GET, check_cmdq_ctxt}, + {HINIC_MGMT_CMD_VAT_SET, check_root_ctxt}, + {HINIC_MGMT_CMD_VAT_GET, check_root_ctxt}, + {HINIC_MGMT_CMD_L2NIC_SQ_CI_ATTR_SET, hinic_mbox_check_func_id_8B}, + {HINIC_MGMT_CMD_L2NIC_SQ_CI_ATTR_GET, hinic_mbox_check_func_id_8B}, + {HINIC_MGMT_CMD_RES_STATE_SET, hinic_mbox_check_func_id_8B}, + + {HINIC_MGMT_CMD_CEQ_CTRL_REG_WR_BY_UP, hinic_mbox_check_func_id_8B}, + {HINIC_MGMT_CMD_MSI_CTRL_REG_WR_BY_UP, hinic_mbox_check_func_id_8B}, + {HINIC_MGMT_CMD_MSI_CTRL_REG_RD_BY_UP, hinic_mbox_check_func_id_8B}, + + {HINIC_MGMT_CMD_L2NIC_RESET, hinic_mbox_check_func_id_8B}, + {HINIC_MGMT_CMD_FAST_RECYCLE_MODE_SET, hinic_mbox_check_func_id_8B}, + {HINIC_MGMT_CMD_PAGESIZE_SET, check_set_wq_page_size}, + {HINIC_MGMT_CMD_PAGESIZE_GET, hinic_mbox_check_func_id_8B}, + {HINIC_MGMT_CMD_GET_PPF_STATE, NULL}, + {HINIC_MGMT_CMD_FUNC_TMR_BITMAT_SET, __mbox_check_tmr_bitmap}, + {HINIC_MGMT_CMD_GET_BOARD_INFO, NULL}, + {HINIC_MGMT_CMD_GET_SDI_MODE, NULL}, +}; + +struct hinic_mgmt_status_log { + u8 status; + const char *log; +}; + +static struct hinic_mgmt_status_log mgmt_status_log[] = { + {HINIC_MGMT_STATUS_ERR_PARAM, "Invalid parameter"}, + {HINIC_MGMT_STATUS_ERR_FAILED, "Operation failed"}, + {HINIC_MGMT_STATUS_ERR_PORT, "Invalid port"}, + {HINIC_MGMT_STATUS_ERR_TIMEOUT, "Operation time out"}, + {HINIC_MGMT_STATUS_ERR_NOMATCH, "Version not match"}, + {HINIC_MGMT_STATUS_ERR_EXIST, "Entry exists"}, + {HINIC_MGMT_STATUS_ERR_NOMEM, "Out of memory"}, + {HINIC_MGMT_STATUS_ERR_INIT, "Feature not initialized"}, + {HINIC_MGMT_STATUS_ERR_FAULT, "Invalid address"}, + {HINIC_MGMT_STATUS_ERR_PERM, "Operation not permitted"}, + {HINIC_MGMT_STATUS_ERR_EMPTY, "Table empty"}, + {HINIC_MGMT_STATUS_ERR_FULL, "Table full"}, + {HINIC_MGMT_STATUS_ERR_NOT_FOUND, "Not found"}, + {HINIC_MGMT_STATUS_ERR_BUSY, "Device or resource busy "}, + {HINIC_MGMT_STATUS_ERR_RESOURCE, "No resources for operation "}, + {HINIC_MGMT_STATUS_ERR_CONFIG, "Invalid configuration"}, + {HINIC_MGMT_STATUS_ERR_UNAVAIL, "Feature unavailable"}, + {HINIC_MGMT_STATUS_ERR_CRC, "CRC check failed"}, + {HINIC_MGMT_STATUS_ERR_NXIO, "No such device or address"}, + {HINIC_MGMT_STATUS_ERR_ROLLBACK, "Chip rollback fail"}, + {HINIC_MGMT_STATUS_ERR_LEN, "Length too short or too long"}, + {HINIC_MGMT_STATUS_ERR_UNSUPPORT, "Feature not supported"}, +}; + +static void __print_status_info(struct hinic_hwdev *dev, + enum hinic_mod_type mod, u8 cmd, int index) +{ + if (mod == HINIC_MOD_COMM) { + sdk_err(dev->dev_hdl, "Mgmt process mod(0x%x) cmd(0x%x) fail: %s", + mod, cmd, mgmt_status_log[index].log); + } else if (mod == HINIC_MOD_L2NIC || + mod == HINIC_MOD_HILINK) { + if (HINIC_IS_VF(dev) && + (cmd == HINIC_PORT_CMD_SET_MAC || + cmd == HINIC_PORT_CMD_DEL_MAC || + cmd == HINIC_PORT_CMD_UPDATE_MAC) && + mgmt_status_log[index].status == HINIC_PF_SET_VF_ALREADY) + return; + + nic_err(dev->dev_hdl, "Mgmt process mod(0x%x) cmd(0x%x) fail: %s", + mod, cmd, mgmt_status_log[index].log); + } +} + +static bool hinic_status_need_special_handle(struct hinic_hwdev *dev, + enum hinic_mod_type mod, + u8 cmd, u8 status) +{ + if (mod == HINIC_MOD_L2NIC) { + /* optical module isn't plugged in */ + if ((cmd == HINIC_PORT_CMD_GET_STD_SFP_INFO || + cmd == HINIC_PORT_CMD_GET_SFP_INFO) && + status == HINIC_MGMT_STATUS_ERR_NXIO) + return true; + + if ((cmd == HINIC_PORT_CMD_SET_MAC || + cmd == HINIC_PORT_CMD_UPDATE_MAC) && + status == HINIC_MGMT_STATUS_ERR_EXIST) + return true; + } + + if (status == HINIC_MGMT_STATUS_ERR_UNSUPPORT) { + if (mod == HINIC_MOD_L2NIC) + sdk_warn(dev->dev_hdl, "Mgmt command: mod(0x%x) cmd(0x%x) not supported\n", + mod, cmd); + else + sdk_warn(dev->dev_hdl, "Mgmt command: mod(0x%x) cmd(0x%x) not supported\n", + mod, cmd); + + return true; + } + + return false; +} + +static void hinic_print_status_info(void *hwdev, enum hinic_mod_type mod, + u8 cmd, const void *buf_out) +{ + struct hinic_hwdev *dev = hwdev; + int i, size; + u8 status; + + if (!buf_out) + return; + + if (mod != HINIC_MOD_COMM && mod != HINIC_MOD_L2NIC && + mod != HINIC_MOD_HILINK) + return; + + status = *(u8 *)buf_out; + + if (!status) + return; + + if (hinic_status_need_special_handle(dev, mod, cmd, status)) + return; + + size = ARRAY_SIZE(mgmt_status_log); + for (i = 0; i < size; i++) { + if (status == mgmt_status_log[i].status) { + __print_status_info(dev, mod, cmd, i); + return; + } + } + + if (mod == HINIC_MOD_COMM) { + sdk_err(dev->dev_hdl, "Mgmt process mod(0x%x) cmd(0x%x) return driver unknown status(0x%x)\n", + mod, cmd, status); + } else if (mod == HINIC_MOD_L2NIC || mod == HINIC_MOD_HILINK) { + nic_err(dev->dev_hdl, "Mgmt process mod(0x%x) cmd(0x%x) return driver unknown status(0x%x)\n", + mod, cmd, status); + } +} + +void hinic_set_chip_present(void *hwdev) +{ + ((struct hinic_hwdev *)hwdev)->chip_present_flag = HINIC_CHIP_PRESENT; +} + +void hinic_set_chip_absent(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + sdk_err(dev->dev_hdl, "Card not present\n"); + dev->chip_present_flag = HINIC_CHIP_ABSENT; +} + +int hinic_get_chip_present_flag(void *hwdev) +{ + int flag; + + if (!hwdev) + return -EINVAL; + flag = ((struct hinic_hwdev *)hwdev)->chip_present_flag; + return flag; +} +EXPORT_SYMBOL(hinic_get_chip_present_flag); + +void hinic_force_complete_all(void *hwdev) +{ + struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev; + struct hinic_recv_msg *recv_resp_msg; + + set_bit(HINIC_HWDEV_STATE_BUSY, &dev->func_state); + + if (hinic_func_type(dev) != TYPE_VF && + hinic_is_hwdev_mod_inited(dev, HINIC_HWDEV_MGMT_INITED)) { + recv_resp_msg = &dev->pf_to_mgmt->recv_resp_msg_from_mgmt; + spin_lock_bh(&dev->pf_to_mgmt->sync_event_lock); + if (dev->pf_to_mgmt->event_flag == SEND_EVENT_START) { + complete(&recv_resp_msg->recv_done); + dev->pf_to_mgmt->event_flag = SEND_EVENT_TIMEOUT; + } + spin_unlock_bh(&dev->pf_to_mgmt->sync_event_lock); + } + + /* only flush sync cmdq to avoid blocking remove */ + if (hinic_is_hwdev_mod_inited(dev, HINIC_HWDEV_CMDQ_INITED)) + hinic_cmdq_flush_cmd(hwdev, + &dev->cmdqs->cmdq[HINIC_CMDQ_SYNC]); + + clear_bit(HINIC_HWDEV_STATE_BUSY, &dev->func_state); +} + +void hinic_detect_hw_present(void *hwdev) +{ + u32 addr, attr1; + + addr = HINIC_CSR_FUNC_ATTR1_ADDR; + attr1 = hinic_hwif_read_reg(((struct hinic_hwdev *)hwdev)->hwif, addr); + if (attr1 == HINIC_PCIE_LINK_DOWN) { + hinic_set_chip_absent(hwdev); + hinic_force_complete_all(hwdev); + } +} + +void hinic_record_pcie_error(void *hwdev) +{ + struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev; + + if (!hwdev) + return; + + atomic_inc(&dev->hw_stats.fault_event_stats.pcie_fault_stats); +} + +static int __func_send_mbox(struct hinic_hwdev *hwdev, enum hinic_mod_type mod, + u8 cmd, void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout) +{ + int err; + + if (hinic_func_type(hwdev) == TYPE_VF) + err = hinic_mbox_to_pf(hwdev, mod, cmd, buf_in, + in_size, buf_out, + out_size, timeout); + else if (NEED_MBOX_FORWARD(hwdev)) + err = hinic_mbox_to_host_sync(hwdev, mod, cmd, buf_in, + in_size, buf_out, out_size, + timeout); + else + err = -EFAULT; + + return err; +} + +static int __pf_to_mgmt_pre_handle(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, u8 cmd, + void *buf_in) +{ + struct hinic_update_active *active_info = buf_in; + + if (hinic_get_mgmt_channel_status(hwdev)) { + if (mod == HINIC_MOD_COMM || mod == HINIC_MOD_L2NIC || + mod == HINIC_MOD_CFGM || mod == HINIC_MOD_HILINK) + return HINIC_DEV_BUSY_ACTIVE_FW; + else + return -EBUSY; + } + + /* When only hot activation of ucode, mgmt channel can still be used + * normally, otherwise it is not allowed to send commands to mgmt until + * the hot activation is completed + */ + if (mod == HINIC_MOD_COMM && cmd == HINIC_MGMT_CMD_ACTIVATE_FW && + (active_info->update_flag & ~HINIC_ACTIVE_UCODE)) { + hinic_set_mgmt_channel_status(hwdev, true); + + /* Sleep 2s wait other pf's mgmt messages to complete */ + msleep(2000); + } + + return 0; +} + +static void __pf_to_mgmt_after_handle(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, u8 cmd, + int sw_status, void *mgmt_status) +{ + /* if activate fw is failed, set channel valid */ + if (mod == HINIC_MOD_COMM && + cmd == HINIC_MGMT_CMD_ACTIVATE_FW) { + if (sw_status) + hinic_set_mgmt_channel_status(hwdev, false); + else + hinic_enable_mgmt_channel(hwdev, mgmt_status); + } +} + +int hinic_pf_msg_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout) +{ + struct hinic_hwdev *dev = hwdev; + int err; + + if (!hwdev) + return -EINVAL; + + if (!((struct hinic_hwdev *)hwdev)->chip_present_flag) + return -EPERM; + + if (NEED_MBOX_FORWARD(dev)) { + if (!hinic_is_hwdev_mod_inited(hwdev, + HINIC_HWDEV_MBOX_INITED)) { + return -EPERM; + } + + err = __func_send_mbox(hwdev, mod, cmd, buf_in, in_size, + buf_out, out_size, timeout); + } else { + if (!hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MGMT_INITED)) + return -EPERM; + + if (in_size > HINIC_MSG_TO_MGMT_MAX_LEN) + return -EINVAL; + + err = __pf_to_mgmt_pre_handle(hwdev, mod, cmd, buf_in); + if (err) + return err; + + err = hinic_pf_to_mgmt_sync(hwdev, mod, cmd, buf_in, in_size, + buf_out, out_size, timeout); + __pf_to_mgmt_after_handle(hwdev, mod, cmd, err, buf_out); + } + + return err; +} + +static bool is_sfp_info_cmd_cached(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_cmd_get_sfp_qsfp_info *sfp_info = NULL; + struct hinic_port_routine_cmd *rt_cmd = NULL; + struct card_node *chip_node = hwdev->chip_node; + + sfp_info = buf_in; + if (sfp_info->port_id >= HINIC_MAX_PORT_ID || + *out_size < sizeof(*sfp_info)) + return false; + + if (sfp_info->version == HINIC_GET_SFP_INFO_REAL_TIME) + return false; + + rt_cmd = &chip_node->rt_cmd[sfp_info->port_id]; + mutex_lock(&chip_node->sfp_mutex); + memcpy(buf_out, &rt_cmd->sfp_info, sizeof(*sfp_info)); + mutex_unlock(&chip_node->sfp_mutex); + + return true; +} + +static bool is_sfp_abs_cmd_cached(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_cmd_get_light_module_abs *abs = NULL; + struct hinic_port_routine_cmd *rt_cmd = NULL; + struct card_node *chip_node = hwdev->chip_node; + + abs = buf_in; + if (abs->port_id >= HINIC_MAX_PORT_ID || + *out_size < sizeof(*abs)) + return false; + + if (abs->version == HINIC_GET_SFP_INFO_REAL_TIME) + return false; + + rt_cmd = &chip_node->rt_cmd[abs->port_id]; + mutex_lock(&chip_node->sfp_mutex); + memcpy(buf_out, &rt_cmd->abs, sizeof(*abs)); + mutex_unlock(&chip_node->sfp_mutex); + + return true; +} + +static bool driver_processed_cmd(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct card_node *chip_node = hwdev->chip_node; + + if (mod == HINIC_MOD_L2NIC) { + if (cmd == HINIC_PORT_CMD_GET_SFP_INFO && + chip_node->rt_cmd->up_send_sfp_info) { + return is_sfp_info_cmd_cached(hwdev, mod, cmd, buf_in, + in_size, buf_out, + out_size); + } else if (cmd == HINIC_PORT_CMD_GET_SFP_ABS && + chip_node->rt_cmd->up_send_sfp_abs) { + return is_sfp_abs_cmd_cached(hwdev, mod, cmd, buf_in, + in_size, buf_out, + out_size); + } + } + + return false; +} + +int hinic_msg_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout) +{ + struct hinic_hwdev *dev = hwdev; + unsigned long end; + int err; + + if (!hwdev) + return -EINVAL; + + if (!(dev->chip_present_flag)) + return -EPERM; + + end = jiffies + msecs_to_jiffies(HINIC_DEV_ACTIVE_FW_TIMEOUT); + if (hinic_func_type(hwdev) == TYPE_VF || NEED_MBOX_FORWARD(dev)) { + if (!hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MBOX_INITED)) + return -EPERM; + do { + if (!hinic_get_chip_present_flag(hwdev)) + break; + + err = __func_send_mbox(hwdev, mod, cmd, buf_in, in_size, + buf_out, out_size, timeout); + if (err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) { + hinic_print_status_info(hwdev, mod, cmd, + buf_out); + return err; + } + + msleep(1000); + } while (time_before(jiffies, end)); + + err = __func_send_mbox(hwdev, mod, cmd, buf_in, in_size, + buf_out, out_size, timeout); + } else { + if (driver_processed_cmd(hwdev, mod, cmd, buf_in, in_size, + buf_out, out_size)) + return 0; + + do { + if (!hinic_get_mgmt_channel_status(hwdev) || + !hinic_get_chip_present_flag(hwdev)) + break; + + msleep(1000); + } while (time_before(jiffies, end)); + err = hinic_pf_msg_to_mgmt_sync(hwdev, mod, cmd, buf_in, + in_size, buf_out, out_size, + timeout); + } + + hinic_print_status_info(hwdev, mod, cmd, buf_out); + + return err; +} +EXPORT_SYMBOL(hinic_msg_to_mgmt_sync); + +/* PF/VF send msg to uP by api cmd, and return immediately */ +int hinic_msg_to_mgmt_async(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size) +{ + int err; + + if (!hwdev) + return -EINVAL; + + if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag) || + !hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MGMT_INITED) || + hinic_get_mgmt_channel_status(hwdev)) + return -EPERM; + + if (hinic_func_type(hwdev) == TYPE_VF) { + err = -EFAULT; + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Mailbox don't support async cmd\n"); + } else { + err = hinic_pf_to_mgmt_async(hwdev, mod, cmd, buf_in, in_size); + } + + return err; +} +EXPORT_SYMBOL(hinic_msg_to_mgmt_async); + +int hinic_msg_to_mgmt_no_ack(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size) +{ + struct hinic_hwdev *dev = hwdev; + int err; + + if (!hwdev) + return -EINVAL; + + if (!(dev->chip_present_flag)) + return -EPERM; + + if (hinic_func_type(hwdev) == TYPE_VF || NEED_MBOX_FORWARD(dev)) { + if (!hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MBOX_INITED)) + return -EPERM; + + if (hinic_func_type(hwdev) == TYPE_VF) + err = hinic_mbox_to_pf_no_ack(hwdev, mod, cmd, buf_in, + in_size); + else + err = hinic_mbox_to_host_no_ack(hwdev, mod, cmd, buf_in, + in_size); + } else { + err = hinic_pf_to_mgmt_no_ack(hwdev, mod, cmd, buf_in, in_size); + } + + return err; +} + +int hinic_mbox_to_vf(void *hwdev, + enum hinic_mod_type mod, u16 vf_id, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout) +{ + int err; + + if (!hwdev) + return -EINVAL; + + err = __hinic_mbox_to_vf(hwdev, mod, vf_id, cmd, buf_in, in_size, + buf_out, out_size, timeout); + if (err == MBOX_ERRCODE_UNKNOWN_DES_FUNC) { + /* VF already in error condiction */ + sdk_warn(((struct hinic_hwdev *)hwdev)->dev_hdl, "VF%d not initialized, disconnect it\n", + vf_id); + hinic_unregister_vf_msg_handler(hwdev, vf_id); + } + + return err; +} +EXPORT_SYMBOL(hinic_mbox_to_vf); + +int hinic_clp_to_mgmt(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, u16 *out_size) + +{ + struct hinic_hwdev *dev = hwdev; + int err; + + if (!dev) + return -EINVAL; + + if (!dev->chip_present_flag) + return -EPERM; + + if (hinic_func_type(hwdev) == TYPE_VF || NEED_MBOX_FORWARD(dev)) + return -EINVAL; + + if (!hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_CLP_INITED)) + return -EPERM; + + err = hinic_pf_clp_to_mgmt(dev, mod, cmd, buf_in, + in_size, buf_out, out_size); + + return err; +} + +/** + * hinic_cpu_to_be32 - convert data to big endian 32 bit format + * @data: the data to convert + * @len: length of data to convert, must be Multiple of 4B + */ +void hinic_cpu_to_be32(void *data, int len) +{ + int i, chunk_sz = sizeof(u32); + u32 *mem = data; + + if (!data) + return; + + len = len / chunk_sz; + + for (i = 0; i < len; i++) { + *mem = cpu_to_be32(*mem); + mem++; + } +} +EXPORT_SYMBOL(hinic_cpu_to_be32); + +/** + * hinic_be32_to_cpu - convert data from big endian 32 bit format + * @data: the data to convert + * @len: length of data to convert + */ +void hinic_be32_to_cpu(void *data, int len) +{ + int i, chunk_sz = sizeof(u32); + u32 *mem = data; + + if (!data) + return; + + len = len / chunk_sz; + + for (i = 0; i < len; i++) { + *mem = be32_to_cpu(*mem); + mem++; + } +} +EXPORT_SYMBOL(hinic_be32_to_cpu); + +/** + * hinic_set_sge - set dma area in scatter gather entry + * @sge: scatter gather entry + * @addr: dma address + * @len: length of relevant data in the dma address + */ +void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, u32 len) +{ + sge->hi_addr = upper_32_bits(addr); + sge->lo_addr = lower_32_bits(addr); + sge->len = len; +} + +/** + * hinic_sge_to_dma - get dma address from scatter gather entry + * @sge: scatter gather entry + * + * Return dma address of sg entry + */ +dma_addr_t hinic_sge_to_dma(struct hinic_sge *sge) +{ + return (dma_addr_t)((((u64)sge->hi_addr) << 32) | sge->lo_addr); +} + +int hinic_set_ci_table(void *hwdev, u16 q_id, struct hinic_sq_attr *attr) +{ + struct hinic_cons_idx_attr cons_idx_attr = {0}; + u16 out_size = sizeof(cons_idx_attr); + int err; + + if (!hwdev || !attr) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &cons_idx_attr.func_idx); + if (err) + return err; + + cons_idx_attr.dma_attr_off = attr->dma_attr_off; + cons_idx_attr.pending_limit = attr->pending_limit; + cons_idx_attr.coalescing_time = attr->coalescing_time; + + if (attr->intr_en) { + cons_idx_attr.intr_en = attr->intr_en; + cons_idx_attr.intr_idx = attr->intr_idx; + } + + cons_idx_attr.l2nic_sqn = attr->l2nic_sqn; + cons_idx_attr.sq_id = q_id; + + cons_idx_attr.ci_addr = attr->ci_dma_base; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_L2NIC_SQ_CI_ATTR_SET, + &cons_idx_attr, sizeof(cons_idx_attr), + &cons_idx_attr, &out_size, 0); + if (err || !out_size || cons_idx_attr.status) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to set ci attribute table, err: %d, status: 0x%x, out_size: 0x%x\n", + err, cons_idx_attr.status, out_size); + return -EFAULT; + } + + return 0; +} +EXPORT_SYMBOL(hinic_set_ci_table); + +static int hinic_set_cmdq_depth(struct hinic_hwdev *hwdev, u16 cmdq_depth) +{ + struct hinic_root_ctxt root_ctxt = {0}; + u16 out_size = sizeof(root_ctxt); + int err; + + err = hinic_global_func_id_get(hwdev, &root_ctxt.func_idx); + if (err) + return err; + + root_ctxt.ppf_idx = hinic_ppf_idx(hwdev); + + root_ctxt.set_cmdq_depth = 1; + root_ctxt.cmdq_depth = (u8)ilog2(cmdq_depth); + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_VAT_SET, + &root_ctxt, sizeof(root_ctxt), + &root_ctxt, &out_size, 0); + if (err || !out_size || root_ctxt.status) { + sdk_err(hwdev->dev_hdl, "Failed to set cmdq depth, err: %d, status: 0x%x, out_size: 0x%x\n", + err, root_ctxt.status, out_size); + return -EFAULT; + } + + return 0; +} + +static u16 get_hw_rx_buf_size(int rx_buf_sz) +{ + u16 num_hw_types = + sizeof(hinic_hw_rx_buf_size) / + sizeof(hinic_hw_rx_buf_size[0]); + u16 i; + + for (i = 0; i < num_hw_types; i++) { + if (hinic_hw_rx_buf_size[i] == rx_buf_sz) + return i; + } + + pr_err("Chip can't support rx buf size of %d\n", rx_buf_sz); + + return DEFAULT_RX_BUF_SIZE; /* default 2K */ +} + +int hinic_set_root_ctxt(void *hwdev, u16 rq_depth, u16 sq_depth, int rx_buf_sz) +{ + struct hinic_root_ctxt root_ctxt = {0}; + u16 out_size = sizeof(root_ctxt); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &root_ctxt.func_idx); + if (err) + return err; + + root_ctxt.ppf_idx = hinic_ppf_idx(hwdev); + + root_ctxt.set_cmdq_depth = 0; + root_ctxt.cmdq_depth = 0; + + root_ctxt.lro_en = 1; + + root_ctxt.rq_depth = (u16)ilog2(rq_depth); + root_ctxt.rx_buf_sz = get_hw_rx_buf_size(rx_buf_sz); + root_ctxt.sq_depth = (u16)ilog2(sq_depth); + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_VAT_SET, + &root_ctxt, sizeof(root_ctxt), + &root_ctxt, &out_size, 0); + if (err || !out_size || root_ctxt.status) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to set root context, err: %d, status: 0x%x, out_size: 0x%x\n", + err, root_ctxt.status, out_size); + return -EFAULT; + } + + return 0; +} +EXPORT_SYMBOL(hinic_set_root_ctxt); + +int hinic_clean_root_ctxt(void *hwdev) +{ + struct hinic_root_ctxt root_ctxt = {0}; + u16 out_size = sizeof(root_ctxt); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &root_ctxt.func_idx); + if (err) + return err; + + root_ctxt.ppf_idx = hinic_ppf_idx(hwdev); + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_VAT_SET, + &root_ctxt, sizeof(root_ctxt), + &root_ctxt, &out_size, 0); + if (err || !out_size || root_ctxt.status) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to clean root context, err: %d, status: 0x%x, out_size: 0x%x\n", + err, root_ctxt.status, out_size); + return -EFAULT; + } + + return 0; +} +EXPORT_SYMBOL(hinic_clean_root_ctxt); + +static int wait_for_flr_finish(struct hinic_hwif *hwif) +{ + u32 cnt = 0; + enum hinic_pf_status status; + + while (cnt < HINIC_FLR_TIMEOUT) { + status = hinic_get_pf_status(hwif); + if (status == HINIC_PF_STATUS_FLR_FINISH_FLAG) { + hinic_set_pf_status(hwif, HINIC_PF_STATUS_ACTIVE_FLAG); + return 0; + } + + usleep_range(9900, 10000); + cnt++; + } + + return -EFAULT; +} + +#define HINIC_WAIT_CMDQ_IDLE_TIMEOUT 5000 + +static int wait_cmdq_stop(struct hinic_hwdev *hwdev) +{ + enum hinic_cmdq_type cmdq_type; + struct hinic_cmdqs *cmdqs = hwdev->cmdqs; + u32 cnt = 0; + int err = 0; + + if (!(cmdqs->status & HINIC_CMDQ_ENABLE)) + return 0; + + cmdqs->status &= ~HINIC_CMDQ_ENABLE; + + while (cnt < HINIC_WAIT_CMDQ_IDLE_TIMEOUT && hwdev->chip_present_flag) { + err = 0; + cmdq_type = HINIC_CMDQ_SYNC; + for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) { + if (!hinic_cmdq_idle(&cmdqs->cmdq[cmdq_type])) { + err = -EBUSY; + break; + } + } + + if (!err) + return 0; + + usleep_range(500, 1000); + cnt++; + } + + cmdq_type = HINIC_CMDQ_SYNC; + for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) { + if (!hinic_cmdq_idle(&cmdqs->cmdq[cmdq_type])) + sdk_err(hwdev->dev_hdl, "Cmdq %d busy\n", cmdq_type); + } + + cmdqs->status |= HINIC_CMDQ_ENABLE; + + return err; +} + +static int hinic_vf_rx_tx_flush(struct hinic_hwdev *hwdev) +{ + struct hinic_clear_resource clr_res = {0}; + int err; + + err = wait_cmdq_stop(hwdev); + if (err) + sdk_warn(hwdev->dev_hdl, "Cmdq is still working, please check CMDQ timeout value is reasonable\n"); + + err = hinic_global_func_id_get(hwdev, &clr_res.func_idx); + if (err) + return err; + + clr_res.ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif); + err = hinic_mbox_to_pf_no_ack(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_START_FLR, &clr_res, + sizeof(clr_res)); + if (err) + sdk_warn(hwdev->dev_hdl, "Failed to notice flush message\n"); + + /* PF firstly set VF doorbell flush csr to be disabled. After PF finish + * VF resources flush, PF will set VF doorbell flush csr to be enabled. + */ + err = wait_until_doorbell_flush_states(hwdev->hwif, DISABLE_DOORBELL); + if (err) + sdk_warn(hwdev->dev_hdl, "Wait doorbell flush disable timeout\n"); + err = wait_until_doorbell_flush_states(hwdev->hwif, ENABLE_DOORBELL); + if (err) + sdk_warn(hwdev->dev_hdl, "Wait doorbell flush enable timeout\n"); + + err = hinic_reinit_cmdq_ctxts(hwdev); + if (err) + sdk_warn(hwdev->dev_hdl, "Failed to reinit cmdq\n"); + + return 0; +} + +static void hinic_pf_set_vf_db_flush(struct hinic_hwdev *hwdev, u16 vf_id, + enum hinic_doorbell_ctrl val) +{ + u32 addr, vf_attr4; + + addr = HINIC_PF_CSR_VF_FLUSH_OFF(vf_id); + vf_attr4 = hinic_hwif_read_reg(hwdev->hwif, addr); + vf_attr4 = HINIC_AF4_CLEAR(vf_attr4, DOORBELL_CTRL); + vf_attr4 |= HINIC_AF4_SET(val, DOORBELL_CTRL); + hinic_hwif_write_reg(hwdev->hwif, addr, vf_attr4); +} + +static int hinic_vf_rx_tx_flush_in_pf(struct hinic_hwdev *hwdev, u16 vf_id) +{ + struct hinic_clear_doorbell clear_db = {0}; + struct hinic_clear_resource clr_res = {0}; + u16 glb_vf_func_id; + u16 out_size; + int err; + int ret = 0; + + /* disable vf doorbell flush csr */ + hinic_pf_set_vf_db_flush(hwdev, vf_id, DISABLE_DOORBELL); + + /* doorbell flush */ + out_size = sizeof(clear_db); + glb_vf_func_id = HINIC_HWIF_GLOBAL_VF_OFFSET(hwdev->hwif) + vf_id; + clear_db.func_idx = glb_vf_func_id; + clear_db.ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif); + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_FLUSH_DOORBELL, &clear_db, + sizeof(clear_db), &clear_db, &out_size, 0); + if (err || !out_size || clear_db.status) { + sdk_warn(hwdev->dev_hdl, "Failed to flush doorbell, err: %d, status: 0x%x, out_size: 0x%x\n", + err, clear_db.status, out_size); + if (err) + ret = err; + else + ret = -EFAULT; + } + + /* wait ucode stop I/O */ + msleep(100); + + /* notice up begine vf flush */ + out_size = sizeof(clr_res); + clr_res.func_idx = glb_vf_func_id; + clr_res.ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif); + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_START_FLR, &clr_res, + sizeof(clr_res), &clr_res, &out_size, 0); + if (err || !out_size || clr_res.status) { + sdk_warn(hwdev->dev_hdl, "Failed to start flr, err: %d, status: 0x%x, out_size: 0x%x\n", + err, clr_res.status, out_size); + ret = err ? err : (-EFAULT); + } + /* enable vf doorbell flush csr */ + hinic_pf_set_vf_db_flush(hwdev, vf_id, ENABLE_DOORBELL); + + return ret; +} + +static int hinic_pf_rx_tx_flush(struct hinic_hwdev *hwdev) +{ + struct hinic_hwif *hwif = hwdev->hwif; + struct hinic_clear_doorbell clear_db = {0}; + struct hinic_clear_resource clr_res = {0}; + u16 out_size, func_id; + int err; + int ret = 0; + + /* wait ucode stop I/O */ + msleep(100); + + err = wait_cmdq_stop(hwdev); + if (err) { + sdk_warn(hwdev->dev_hdl, "CMDQ is still working, please check CMDQ timeout value is reasonable\n"); + ret = err; + } + + hinic_disable_doorbell(hwif); + + out_size = sizeof(clear_db); + func_id = hinic_global_func_id_hw(hwdev); + clear_db.func_idx = func_id; + clear_db.ppf_idx = HINIC_HWIF_PPF_IDX(hwif); + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_FLUSH_DOORBELL, &clear_db, + sizeof(clear_db), &clear_db, &out_size, 0); + if (err || !out_size || clear_db.status) { + sdk_warn(hwdev->dev_hdl, "Failed to flush doorbell, err: %d, status: 0x%x, out_size: 0x%x\n", + err, clear_db.status, out_size); + ret = err ? err : (-EFAULT); + } + + hinic_set_pf_status(hwif, HINIC_PF_STATUS_FLR_START_FLAG); + + clr_res.func_idx = func_id; + clr_res.ppf_idx = HINIC_HWIF_PPF_IDX(hwif); + + err = hinic_msg_to_mgmt_no_ack(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_START_FLR, &clr_res, + sizeof(clr_res)); + if (err) { + sdk_warn(hwdev->dev_hdl, "Failed to notice flush message\n"); + ret = err; + } + + err = wait_for_flr_finish(hwif); + if (err) { + sdk_warn(hwdev->dev_hdl, "Wait firmware FLR timeout\n"); + ret = err; + } + + hinic_enable_doorbell(hwif); + + err = hinic_reinit_cmdq_ctxts(hwdev); + if (err) { + sdk_warn(hwdev->dev_hdl, "Failed to reinit cmdq\n"); + ret = err; + } + + return ret; +} + +int hinic_func_rx_tx_flush(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return -EINVAL; + + if (!dev->chip_present_flag) + return 0; + + if (HINIC_FUNC_TYPE(dev) == TYPE_VF) + return hinic_vf_rx_tx_flush(dev); + else + return hinic_pf_rx_tx_flush(dev); +} +EXPORT_SYMBOL(hinic_func_rx_tx_flush); + +int hinic_get_interrupt_cfg(void *hwdev, + struct nic_interrupt_info *interrupt_info) +{ + struct hinic_hwdev *nic_hwdev = hwdev; + struct hinic_msix_config msix_cfg = {0}; + u16 out_size = sizeof(msix_cfg); + int err; + + if (!hwdev || !interrupt_info) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &msix_cfg.func_id); + if (err) + return err; + + msix_cfg.msix_index = interrupt_info->msix_index; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_MSI_CTRL_REG_RD_BY_UP, + &msix_cfg, sizeof(msix_cfg), + &msix_cfg, &out_size, 0); + if (err || !out_size || msix_cfg.status) { + sdk_err(nic_hwdev->dev_hdl, "Failed to get interrupt config, err: %d, status: 0x%x, out size: 0x%x\n", + err, msix_cfg.status, out_size); + return -EINVAL; + } + + interrupt_info->lli_credit_limit = msix_cfg.lli_credit_cnt; + interrupt_info->lli_timer_cfg = msix_cfg.lli_tmier_cnt; + interrupt_info->pending_limt = msix_cfg.pending_cnt; + interrupt_info->coalesc_timer_cfg = msix_cfg.coalesct_timer_cnt; + interrupt_info->resend_timer_cfg = msix_cfg.resend_timer_cnt; + + return 0; +} +EXPORT_SYMBOL(hinic_get_interrupt_cfg); + +int hinic_set_interrupt_cfg_direct(void *hwdev, + struct nic_interrupt_info *interrupt_info) +{ + struct hinic_hwdev *nic_hwdev = hwdev; + struct hinic_msix_config msix_cfg = {0}; + u16 out_size = sizeof(msix_cfg); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &msix_cfg.func_id); + if (err) + return err; + + msix_cfg.msix_index = (u16)interrupt_info->msix_index; + msix_cfg.lli_credit_cnt = interrupt_info->lli_credit_limit; + msix_cfg.lli_tmier_cnt = interrupt_info->lli_timer_cfg; + msix_cfg.pending_cnt = interrupt_info->pending_limt; + msix_cfg.coalesct_timer_cnt = interrupt_info->coalesc_timer_cfg; + msix_cfg.resend_timer_cnt = interrupt_info->resend_timer_cfg; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_MSI_CTRL_REG_WR_BY_UP, + &msix_cfg, sizeof(msix_cfg), + &msix_cfg, &out_size, 0); + if (err || !out_size || msix_cfg.status) { + sdk_err(nic_hwdev->dev_hdl, "Failed to set interrupt config, err: %d, status: 0x%x, out size: 0x%x\n", + err, msix_cfg.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hinic_set_interrupt_cfg(void *hwdev, + struct nic_interrupt_info interrupt_info) +{ + struct nic_interrupt_info temp_info; + int err; + + if (!hwdev) + return -EINVAL; + + temp_info.msix_index = interrupt_info.msix_index; + + err = hinic_get_interrupt_cfg(hwdev, &temp_info); + if (err) + return -EINVAL; + + if (!interrupt_info.lli_set) { + interrupt_info.lli_credit_limit = temp_info.lli_credit_limit; + interrupt_info.lli_timer_cfg = temp_info.lli_timer_cfg; + } + + if (!interrupt_info.interrupt_coalesc_set) { + interrupt_info.pending_limt = temp_info.pending_limt; + interrupt_info.coalesc_timer_cfg = temp_info.coalesc_timer_cfg; + interrupt_info.resend_timer_cfg = temp_info.resend_timer_cfg; + } + + return hinic_set_interrupt_cfg_direct(hwdev, &interrupt_info); +} +EXPORT_SYMBOL(hinic_set_interrupt_cfg); + +void hinic_misx_intr_clear_resend_bit(void *hwdev, u16 msix_idx, + u8 clear_resend_en) +{ + struct hinic_hwif *hwif; + u32 msix_ctrl = 0, addr; + + if (!hwdev) + return; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + + msix_ctrl = HINIC_MSIX_CNT_SET(clear_resend_en, RESEND_TIMER); + + addr = HINIC_CSR_MSIX_CNT_ADDR(msix_idx); + + hinic_hwif_write_reg(hwif, addr, msix_ctrl); +} +EXPORT_SYMBOL(hinic_misx_intr_clear_resend_bit); + +static int init_aeqs_msix_attr(struct hinic_hwdev *hwdev) +{ + struct hinic_aeqs *aeqs = hwdev->aeqs; + struct nic_interrupt_info info = {0}; + struct hinic_eq *eq; + int q_id; + int err; + + info.lli_set = 0; + info.interrupt_coalesc_set = 1; + info.pending_limt = HINIC_DEAULT_EQ_MSIX_PENDING_LIMIT; + info.coalesc_timer_cfg = HINIC_DEAULT_EQ_MSIX_COALESC_TIMER_CFG; + info.resend_timer_cfg = HINIC_DEAULT_EQ_MSIX_RESEND_TIMER_CFG; + + for (q_id = aeqs->num_aeqs - 1; q_id >= 0; q_id--) { + eq = &aeqs->aeq[q_id]; + info.msix_index = eq->eq_irq.msix_entry_idx; + err = hinic_set_interrupt_cfg_direct(hwdev, &info); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to set msix attr for aeq %d\n", + q_id); + return -EFAULT; + } + } + + hinic_set_mbox_seg_ack_mod(hwdev, HINIC_MBOX_SEND_MSG_INT); + + return 0; +} + +static int init_ceqs_msix_attr(struct hinic_hwdev *hwdev) +{ + struct hinic_ceqs *ceqs = hwdev->ceqs; + struct nic_interrupt_info info = {0}; + struct hinic_eq *eq; + u16 q_id; + int err; + + info.lli_set = 0; + info.interrupt_coalesc_set = 1; + info.pending_limt = HINIC_DEAULT_EQ_MSIX_PENDING_LIMIT; + info.coalesc_timer_cfg = HINIC_DEAULT_EQ_MSIX_COALESC_TIMER_CFG; + info.resend_timer_cfg = HINIC_DEAULT_EQ_MSIX_RESEND_TIMER_CFG; + + for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) { + eq = &ceqs->ceq[q_id]; + info.msix_index = eq->eq_irq.msix_entry_idx; + err = hinic_set_interrupt_cfg(hwdev, info); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to set msix attr for ceq %d\n", + q_id); + return -EFAULT; + } + } + + return 0; +} + +/** + * set_pf_dma_attr_entry - set the dma attributes for entry + * @hwdev: the pointer to hw device + * @entry_idx: the entry index in the dma table + * @st: PCIE TLP steering tag + * @at: PCIE TLP AT field + * @ph: PCIE TLP Processing Hint field + * @no_snooping: PCIE TLP No snooping + * @tph_en: PCIE TLP Processing Hint Enable + */ +static void set_pf_dma_attr_entry(struct hinic_hwdev *hwdev, u32 entry_idx, + u8 st, u8 at, u8 ph, + enum hinic_pcie_nosnoop no_snooping, + enum hinic_pcie_tph tph_en) +{ + u32 addr, val, dma_attr_entry; + + /* Read Modify Write */ + addr = HINIC_CSR_DMA_ATTR_TBL_ADDR(entry_idx); + + val = hinic_hwif_read_reg(hwdev->hwif, addr); + val = HINIC_DMA_ATTR_ENTRY_CLEAR(val, ST) & + HINIC_DMA_ATTR_ENTRY_CLEAR(val, AT) & + HINIC_DMA_ATTR_ENTRY_CLEAR(val, PH) & + HINIC_DMA_ATTR_ENTRY_CLEAR(val, NO_SNOOPING) & + HINIC_DMA_ATTR_ENTRY_CLEAR(val, TPH_EN); + + dma_attr_entry = HINIC_DMA_ATTR_ENTRY_SET(st, ST) | + HINIC_DMA_ATTR_ENTRY_SET(at, AT) | + HINIC_DMA_ATTR_ENTRY_SET(ph, PH) | + HINIC_DMA_ATTR_ENTRY_SET(no_snooping, NO_SNOOPING) | + HINIC_DMA_ATTR_ENTRY_SET(tph_en, TPH_EN); + + val |= dma_attr_entry; + hinic_hwif_write_reg(hwdev->hwif, addr, val); +} + +static int set_vf_dma_attr_entry(struct hinic_hwdev *hwdev, u8 entry_idx, + u8 st, u8 at, u8 ph, + enum hinic_pcie_nosnoop no_snooping, + enum hinic_pcie_tph tph_en) +{ + struct hinic_vf_dma_attr_table attr = {0}; + u16 out_size = sizeof(attr); + int err; + + err = hinic_global_func_id_get(hwdev, &attr.func_idx); + if (err) + return err; + + attr.func_dma_entry_num = hinic_dma_attr_entry_num(hwdev); + attr.entry_idx = entry_idx; + attr.st = st; + attr.at = at; + attr.ph = ph; + attr.no_snooping = no_snooping; + attr.tph_en = tph_en; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_DMA_ATTR_SET, &attr, + sizeof(attr), &attr, &out_size, 0); + if (err || !out_size || attr.status) { + sdk_err(hwdev->dev_hdl, "Failed to set dma attribute, err: %d, status: 0x%x, out_size: 0x%x\n", + err, attr.status, out_size); + return -EFAULT; + } + + return 0; +} + +/** + * dma_attr_table_init - initialize the the default dma attributes + * @hwdev: the pointer to hw device + * Return: 0 - success, negative - failure + */ +static int dma_attr_table_init(struct hinic_hwdev *hwdev) +{ + int err = 0; + + if (HINIC_IS_VF(hwdev)) + err = set_vf_dma_attr_entry(hwdev, PCIE_MSIX_ATTR_ENTRY, + HINIC_PCIE_ST_DISABLE, + HINIC_PCIE_AT_DISABLE, + HINIC_PCIE_PH_DISABLE, + HINIC_PCIE_SNOOP, + HINIC_PCIE_TPH_DISABLE); + else + set_pf_dma_attr_entry(hwdev, PCIE_MSIX_ATTR_ENTRY, + HINIC_PCIE_ST_DISABLE, + HINIC_PCIE_AT_DISABLE, + HINIC_PCIE_PH_DISABLE, + HINIC_PCIE_SNOOP, + HINIC_PCIE_TPH_DISABLE); + + return err; +} + +static int resources_state_set(struct hinic_hwdev *hwdev, + enum hinic_res_state state) +{ + struct hinic_cmd_set_res_state res_state = {0}; + u16 out_size = sizeof(res_state); + int err; + + err = hinic_global_func_id_get(hwdev, &res_state.func_idx); + if (err) + return err; + + res_state.state = state; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_RES_STATE_SET, + &res_state, sizeof(res_state), + &res_state, &out_size, 0); + if (err || !out_size || res_state.status) { + sdk_err(hwdev->dev_hdl, "Failed to set resources state, err: %d, status: 0x%x, out_size: 0x%x\n", + err, res_state.status, out_size); + return -EFAULT; + } + + return 0; +} + +static void comm_mgmt_msg_handler(void *hwdev, void *pri_handle, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt = pri_handle; + u8 cmd_idx; + u32 *mem; + u16 i; + + for (cmd_idx = 0; cmd_idx < pf_to_mgmt->proc.cmd_num; cmd_idx++) { + if (cmd == pf_to_mgmt->proc.info[cmd_idx].cmd) { + if (!pf_to_mgmt->proc.info[cmd_idx].proc) { + sdk_warn(pf_to_mgmt->hwdev->dev_hdl, + "PF recv up comm msg handle null, cmd(0x%x)\n", + cmd); + } else { + pf_to_mgmt->proc.info[cmd_idx].proc(hwdev, + buf_in, in_size, buf_out, out_size); + } + + return; + } + } + + sdk_warn(pf_to_mgmt->hwdev->dev_hdl, "Received mgmt cpu event: 0x%x\n", + cmd); + + mem = buf_in; + for (i = 0; i < (in_size / sizeof(u32)); i++) { + pr_info("0x%x\n", *mem); + mem++; + } + + *out_size = 0; +} + +static int hinic_vf_get_ppf_init_state(void *handle, void *buf_out, + u16 *out_size) +{ + struct hinic_hwdev *hwdev = handle; + struct hinic_ppf_state *ppf_state = buf_out; + struct card_node *chip_node = hwdev->chip_node; + + ppf_state->ppf_state = (u8)chip_node->ppf_state; + + *out_size = sizeof(*ppf_state); + + return 0; +} + +int hinic_get_sdi_mode(struct hinic_hwdev *hwdev, u16 *cur_mode) +{ + struct hinic_sdi_mode_info sdi_mode = {0}; + u16 out_size = sizeof(sdi_mode); + int err; + + sdi_mode.opcode = HINIC_SDI_INFO_MODE & (~HINIC_SDI_INFO_SET); + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_GET_SDI_MODE, &sdi_mode, + sizeof(sdi_mode), &sdi_mode, &out_size, 0); + if ((sdi_mode.status != HINIC_MGMT_CMD_UNSUPPORTED && + sdi_mode.status) || err || !out_size) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to get sdi mode info, err: %d, status: 0x%x, out size: 0x%x\n", + err, sdi_mode.status, out_size); + return -EFAULT; + } + + *cur_mode = sdi_mode.cur_sdi_mode; + + return sdi_mode.status; +} + +int comm_pf_mbox_handler(void *handle, u16 vf_id, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + int err = 0; + u8 size = ARRAY_SIZE(hw_cmd_support_vf); + + if (!hinic_mbox_check_cmd_valid(handle, hw_cmd_support_vf, vf_id, cmd, + buf_in, in_size, size)) { + sdk_err(((struct hinic_hwdev *)handle)->dev_hdl, + "PF Receive VF(%d) common cmd(0x%x) or mbox len(0x%x) is invalid\n", + vf_id + hinic_glb_pf_vf_offset(handle), cmd, in_size); + err = HINIC_MBOX_VF_CMD_ERROR; + return err; + } + + if (cmd == HINIC_MGMT_CMD_START_FLR) { + *out_size = 0; + err = hinic_vf_rx_tx_flush_in_pf(handle, vf_id); + } else if (cmd == HINIC_MGMT_CMD_GET_PPF_STATE) { + err = hinic_vf_get_ppf_init_state(handle, buf_out, out_size); + } else { + err = hinic_pf_msg_to_mgmt_sync(handle, HINIC_MOD_COMM, cmd, + buf_in, in_size, buf_out, + out_size, 0U); + if (err && err != HINIC_DEV_BUSY_ACTIVE_FW && + err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) + sdk_err(((struct hinic_hwdev *)handle)->dev_hdl, + "PF mbox common cmd %d callback handler err: %d\n", + cmd, err); + } + + return err; +} + +static int hinic_comm_aeqs_init(struct hinic_hwdev *hwdev) +{ + struct irq_info aeq_irqs[HINIC_MAX_AEQS] = {{0} }; + u16 num_aeqs, resp_num_irq = 0, i; + int err; + + num_aeqs = HINIC_HWIF_NUM_AEQS(hwdev->hwif); + if (num_aeqs > HINIC_MAX_AEQS) { + sdk_warn(hwdev->dev_hdl, "Adjust aeq num to %d\n", + HINIC_MAX_AEQS); + num_aeqs = HINIC_MAX_AEQS; + } + err = hinic_alloc_irqs(hwdev, SERVICE_T_INTF, num_aeqs, aeq_irqs, + &resp_num_irq); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to alloc aeq irqs, num_aeqs: %d\n", + num_aeqs); + return err; + } + + if (resp_num_irq < num_aeqs) { + sdk_warn(hwdev->dev_hdl, "Adjust aeq num to %d\n", + resp_num_irq); + num_aeqs = resp_num_irq; + } + + err = hinic_aeqs_init(hwdev, num_aeqs, aeq_irqs); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init aeqs\n"); + goto aeqs_init_err; + } + + set_bit(HINIC_HWDEV_AEQ_INITED, &hwdev->func_state); + + return 0; + +aeqs_init_err: + for (i = 0; i < num_aeqs; i++) + hinic_free_irq(hwdev, SERVICE_T_INTF, aeq_irqs[i].irq_id); + + return err; +} + +static void hinic_comm_aeqs_free(struct hinic_hwdev *hwdev) +{ + struct irq_info aeq_irqs[HINIC_MAX_AEQS] = {{0} }; + u16 num_irqs, i; + + clear_bit(HINIC_HWDEV_AEQ_INITED, &hwdev->func_state); + + hinic_get_aeq_irqs(hwdev, aeq_irqs, &num_irqs); + hinic_aeqs_free(hwdev); + for (i = 0; i < num_irqs; i++) + hinic_free_irq(hwdev, SERVICE_T_INTF, aeq_irqs[i].irq_id); +} + +static int hinic_comm_ceqs_init(struct hinic_hwdev *hwdev) +{ + struct irq_info ceq_irqs[HINIC_MAX_CEQS] = {{0} }; + u16 num_ceqs, resp_num_irq = 0, i; + int err; + + num_ceqs = HINIC_HWIF_NUM_CEQS(hwdev->hwif); + if (num_ceqs > HINIC_MAX_CEQS) { + sdk_warn(hwdev->dev_hdl, "Adjust ceq num to %d\n", + HINIC_MAX_CEQS); + num_ceqs = HINIC_MAX_CEQS; + } + + err = hinic_alloc_irqs(hwdev, SERVICE_T_INTF, num_ceqs, ceq_irqs, + &resp_num_irq); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to alloc ceq irqs, num_ceqs: %d\n", + num_ceqs); + return err; + } + + if (resp_num_irq < num_ceqs) { + sdk_warn(hwdev->dev_hdl, "Adjust ceq num to %d\n", + resp_num_irq); + num_ceqs = resp_num_irq; + } + + err = hinic_ceqs_init(hwdev, num_ceqs, ceq_irqs); + if (err) { + sdk_err(hwdev->dev_hdl, + "Failed to init ceqs, err:%d\n", err); + goto ceqs_init_err; + } + + return 0; + +ceqs_init_err: + for (i = 0; i < num_ceqs; i++) + hinic_free_irq(hwdev, SERVICE_T_INTF, ceq_irqs[i].irq_id); + + return err; +} + +static void hinic_comm_ceqs_free(struct hinic_hwdev *hwdev) +{ + struct irq_info ceq_irqs[HINIC_MAX_CEQS] = {{0} }; + u16 num_irqs; + int i; + + hinic_get_ceq_irqs(hwdev, ceq_irqs, &num_irqs); + hinic_ceqs_free(hwdev); + for (i = 0; i < num_irqs; i++) + hinic_free_irq(hwdev, SERVICE_T_INTF, ceq_irqs[i].irq_id); +} + +static int hinic_comm_func_to_func_init(struct hinic_hwdev *hwdev) +{ + int err; + + err = hinic_func_to_func_init(hwdev); + if (err) + return err; + + hinic_aeq_register_hw_cb(hwdev, HINIC_MBX_FROM_FUNC, + hinic_mbox_func_aeqe_handler); + hinic_aeq_register_hw_cb(hwdev, HINIC_MBX_SEND_RSLT, + hinic_mbox_self_aeqe_handler); + + if (!HINIC_IS_VF(hwdev)) { + hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_COMM, + comm_pf_mbox_handler); + hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_SW_FUNC, + sw_func_pf_mbox_handler); + } else { + hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_COMM, + vf_to_pf_handler); + } + + set_bit(HINIC_HWDEV_MBOX_INITED, &hwdev->func_state); + + return 0; +} + +static void hinic_comm_func_to_func_free(struct hinic_hwdev *hwdev) +{ + hinic_aeq_unregister_hw_cb(hwdev, HINIC_MBX_FROM_FUNC); + hinic_aeq_unregister_hw_cb(hwdev, HINIC_MBX_SEND_RSLT); + + hinic_unregister_pf_mbox_cb(hwdev, HINIC_MOD_COMM); + hinic_unregister_pf_mbox_cb(hwdev, HINIC_MOD_SW_FUNC); + + hinic_func_to_func_free(hwdev); +} + +static int hinic_comm_pf_to_mgmt_init(struct hinic_hwdev *hwdev) +{ + int err; + + if (hinic_func_type(hwdev) == TYPE_VF || + !FUNC_SUPPORT_MGMT(hwdev)) + return 0; /* VF do not support send msg to mgmt directly */ + + err = hinic_pf_to_mgmt_init(hwdev); + if (err) + return err; + + hinic_aeq_register_hw_cb(hwdev, HINIC_MSG_FROM_MGMT_CPU, + hinic_mgmt_msg_aeqe_handler); + + hinic_register_mgmt_msg_cb(hwdev, HINIC_MOD_COMM, + hwdev->pf_to_mgmt, comm_mgmt_msg_handler); + + set_bit(HINIC_HWDEV_MGMT_INITED, &hwdev->func_state); + + return 0; +} + +static void hinic_comm_pf_to_mgmt_free(struct hinic_hwdev *hwdev) +{ + if (hinic_func_type(hwdev) == TYPE_VF || + !FUNC_SUPPORT_MGMT(hwdev)) + return; /* VF do not support send msg to mgmt directly */ + + hinic_unregister_mgmt_msg_cb(hwdev, HINIC_MOD_COMM); + + hinic_aeq_unregister_hw_cb(hwdev, HINIC_MSG_FROM_MGMT_CPU); + + hinic_pf_to_mgmt_free(hwdev); +} + +static int hinic_comm_clp_to_mgmt_init(struct hinic_hwdev *hwdev) +{ + int err; + + if (hinic_func_type(hwdev) == TYPE_VF || + !FUNC_SUPPORT_MGMT(hwdev)) + return 0; + + err = hinic_clp_pf_to_mgmt_init(hwdev); + if (err) + return err; + + set_bit(HINIC_HWDEV_CLP_INITED, &hwdev->func_state); + + return 0; +} + +static void hinic_comm_clp_to_mgmt_free(struct hinic_hwdev *hwdev) +{ + if (hinic_func_type(hwdev) == TYPE_VF || + !FUNC_SUPPORT_MGMT(hwdev)) + return; + + clear_bit(HINIC_HWDEV_CLP_INITED, &hwdev->func_state); + hinic_clp_pf_to_mgmt_free(hwdev); +} + +static int hinic_comm_cmdqs_init(struct hinic_hwdev *hwdev) +{ + int err; + + err = hinic_cmdqs_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init cmd queues\n"); + return err; + } + + hinic_ceq_register_cb(hwdev, HINIC_CMDQ, hinic_cmdq_ceq_handler); + + err = hinic_set_cmdq_depth(hwdev, HINIC_CMDQ_DEPTH); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to set cmdq depth\n"); + goto set_cmdq_depth_err; + } + + return 0; + +set_cmdq_depth_err: + hinic_cmdqs_free(hwdev); + + return err; +} + +static void hinic_comm_cmdqs_free(struct hinic_hwdev *hwdev) +{ + hinic_ceq_unregister_cb(hwdev, HINIC_CMDQ); + hinic_cmdqs_free(hwdev); +} + +static int hinic_sync_mgmt_func_state(struct hinic_hwdev *hwdev) +{ + int err; + + hinic_set_pf_status(hwdev->hwif, HINIC_PF_STATUS_ACTIVE_FLAG); + + err = resources_state_set(hwdev, HINIC_RES_ACTIVE); + if (err) { + sdk_err(hwdev->dev_hdl, + "Failed to set function resources state\n"); + goto resources_state_set_err; + } + + return 0; + +resources_state_set_err: + hinic_set_pf_status(hwdev->hwif, HINIC_PF_STATUS_INIT); + + return err; +} + +static void hinic_unsync_mgmt_func_state(struct hinic_hwdev *hwdev) +{ + hinic_set_pf_status(hwdev->hwif, HINIC_PF_STATUS_INIT); + + resources_state_set(hwdev, HINIC_RES_CLEAN); +} + +int hinic_l2nic_reset_base(struct hinic_hwdev *hwdev, u16 reset_flag) +{ + struct hinic_l2nic_reset l2nic_reset = {0}; + u16 out_size = sizeof(l2nic_reset); + int err = 0; + + err = hinic_set_vport_enable(hwdev, false); + if (err) + return err; + + msleep(100); + + sdk_info(hwdev->dev_hdl, "L2nic reset flag 0x%x\n", reset_flag); + + err = hinic_global_func_id_get(hwdev, &l2nic_reset.func_id); + if (err) + return err; + + l2nic_reset.reset_flag = reset_flag; + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_L2NIC_RESET, &l2nic_reset, + sizeof(l2nic_reset), &l2nic_reset, + &out_size, 0); + if (err || !out_size || l2nic_reset.status) { + sdk_err(hwdev->dev_hdl, "Failed to reset L2NIC resources, err: %d, status: 0x%x, out_size: 0x%x\n", + err, l2nic_reset.status, out_size); + return -EIO; + } + + return 0; +} +EXPORT_SYMBOL(hinic_l2nic_reset_base); + +static int hinic_l2nic_reset(struct hinic_hwdev *hwdev) +{ + return hinic_l2nic_reset_base(hwdev, 0); +} + +static int __get_func_misc_info(struct hinic_hwdev *hwdev) +{ + int err; + + err = hinic_get_board_info(hwdev, &hwdev->board_info); + if (err) { + /* For the pf/vf of slave host, return error */ + if (hinic_pcie_itf_id(hwdev)) + return err; + + /* VF can't get board info in early version */ + if (!HINIC_IS_VF(hwdev)) { + sdk_err(hwdev->dev_hdl, "Get board info failed\n"); + return err; + } + + memset(&hwdev->board_info, 0xff, + sizeof(struct hinic_board_info)); + } + + err = hinic_get_mgmt_version(hwdev, hwdev->mgmt_ver); + if (err) { + sdk_err(hwdev->dev_hdl, "Get mgmt cpu version failed\n"); + return err; + } + + return 0; +} + +/* initialize communication channel */ +int hinic_init_comm_ch(struct hinic_hwdev *hwdev) +{ + int err; + u16 func_id; + + if (IS_BMGW_SLAVE_HOST(hwdev) && + (!hinic_get_master_host_mbox_enable(hwdev))) { + sdk_err(hwdev->dev_hdl, "Master host not initialized\n"); + return -EFAULT; + } + + err = hinic_comm_clp_to_mgmt_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init clp\n"); + return err; + } + + err = hinic_comm_aeqs_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init async event queues\n"); + goto aeqs_init_err; + } + + err = hinic_comm_pf_to_mgmt_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init msg\n"); + goto msg_init_err; + } + + err = hinic_comm_func_to_func_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init mailbox\n"); + goto func_to_func_init_err; + } + + err = init_aeqs_msix_attr(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init aeqs msix attr\n"); + goto aeqs_msix_attr_init_err; + } + + err = __get_func_misc_info(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to get function misc information\n"); + goto get_func_info_err; + } + + /* detect master host chip mode according board type and host id */ + err = rectify_host_mode(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to rectify host mode\n"); + goto rectify_mode_err; + } + + err = hinic_l2nic_reset(hwdev); + if (err) + goto l2nic_reset_err; + + if (IS_MULTI_HOST(hwdev)) { + err = hinic_multi_host_mgmt_init(hwdev); + if (err) + goto multi_host_mgmt_init_err; + } + + dma_attr_table_init(hwdev); + + err = hinic_comm_ceqs_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init completion event queues\n"); + goto ceqs_init_err; + } + + err = init_ceqs_msix_attr(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init ceqs msix attr\n"); + goto init_eqs_msix_err; + } + + /* set default wq page_size */ + hwdev->wq_page_size = HINIC_DEFAULT_WQ_PAGE_SIZE; + + err = hinic_global_func_id_get(hwdev, &func_id); + if (err) + goto get_func_id_err; + + err = hinic_set_wq_page_size(hwdev, func_id, hwdev->wq_page_size); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to set wq page size\n"); + goto init_wq_pg_size_err; + } + + err = hinic_comm_cmdqs_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init cmd queues\n"); + goto cmdq_init_err; + } + + set_bit(HINIC_HWDEV_CMDQ_INITED, &hwdev->func_state); + + err = hinic_sync_mgmt_func_state(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to synchronize mgmt function state\n"); + goto sync_mgmt_func_err; + } + + err = hinic_aeq_register_swe_cb(hwdev, HINIC_STATELESS_EVENT, + hinic_nic_sw_aeqe_handler); + if (err) { + sdk_err(hwdev->dev_hdl, + "Failed to register ucode aeqe handler\n"); + goto register_ucode_aeqe_err; + } + + set_bit(HINIC_HWDEV_COMM_CH_INITED, &hwdev->func_state); + + return 0; + +register_ucode_aeqe_err: + hinic_unsync_mgmt_func_state(hwdev); +sync_mgmt_func_err: + return err; + +cmdq_init_err: + if (HINIC_FUNC_TYPE(hwdev) != TYPE_VF) + hinic_set_wq_page_size(hwdev, func_id, HINIC_HW_WQ_PAGE_SIZE); +init_wq_pg_size_err: +get_func_id_err: +init_eqs_msix_err: + hinic_comm_ceqs_free(hwdev); + +ceqs_init_err: + if (IS_MULTI_HOST(hwdev)) + hinic_multi_host_mgmt_free(hwdev); +multi_host_mgmt_init_err: +l2nic_reset_err: +rectify_mode_err: +get_func_info_err: +aeqs_msix_attr_init_err: +func_to_func_init_err: + return err; + +msg_init_err: + hinic_comm_aeqs_free(hwdev); + +aeqs_init_err: + hinic_comm_clp_to_mgmt_free(hwdev); + + return err; +} + +static void __uninit_comm_module(struct hinic_hwdev *hwdev, + enum hinic_hwdev_init_state init_state) +{ + u16 func_id; + + switch (init_state) { + case HINIC_HWDEV_COMM_CH_INITED: + hinic_aeq_unregister_swe_cb(hwdev, + HINIC_STATELESS_EVENT); + hinic_unsync_mgmt_func_state(hwdev); + break; + case HINIC_HWDEV_CMDQ_INITED: + hinic_comm_cmdqs_free(hwdev); + /* VF can set page size of 256K only, any other value + * will return error in pf, pf will set all vf's page + * size to 4K when disable sriov + */ + if (HINIC_FUNC_TYPE(hwdev) != TYPE_VF) { + func_id = hinic_global_func_id_hw(hwdev); + hinic_set_wq_page_size(hwdev, func_id, + HINIC_HW_WQ_PAGE_SIZE); + } + + hinic_comm_ceqs_free(hwdev); + + if (IS_MULTI_HOST(hwdev)) + hinic_multi_host_mgmt_free(hwdev); + break; + case HINIC_HWDEV_MBOX_INITED: + hinic_comm_func_to_func_free(hwdev); + break; + case HINIC_HWDEV_MGMT_INITED: + hinic_comm_pf_to_mgmt_free(hwdev); + break; + case HINIC_HWDEV_AEQ_INITED: + hinic_comm_aeqs_free(hwdev); + break; + case HINIC_HWDEV_CLP_INITED: + hinic_comm_clp_to_mgmt_free(hwdev); + break; + default: + break; + } +} + +#define HINIC_FUNC_STATE_BUSY_TIMEOUT 300 +void hinic_uninit_comm_ch(struct hinic_hwdev *hwdev) +{ + enum hinic_hwdev_init_state init_state = HINIC_HWDEV_COMM_CH_INITED; + int cnt; + + while (init_state > HINIC_HWDEV_NONE_INITED) { + if (!test_bit(init_state, &hwdev->func_state)) { + init_state--; + continue; + } + clear_bit(init_state, &hwdev->func_state); + + cnt = 0; + while (test_bit(HINIC_HWDEV_STATE_BUSY, &hwdev->func_state) && + cnt++ <= HINIC_FUNC_STATE_BUSY_TIMEOUT) + usleep_range(900, 1000); + + __uninit_comm_module(hwdev, init_state); + + init_state--; + } +} + +int hinic_slq_init(void *dev, int num_wqs) +{ + struct hinic_hwdev *hwdev = dev; + int err; + + if (!dev) + return -EINVAL; + + hwdev->wqs = kzalloc(sizeof(*hwdev->wqs), GFP_KERNEL); + if (!hwdev->wqs) + return -ENOMEM; + + err = hinic_wqs_alloc(hwdev->wqs, num_wqs, hwdev->dev_hdl); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to alloc wqs\n"); + kfree(hwdev->wqs); + hwdev->wqs = NULL; + } + + return err; +} +EXPORT_SYMBOL(hinic_slq_init); + +void hinic_slq_uninit(void *dev) +{ + struct hinic_hwdev *hwdev = dev; + + if (!hwdev) + return; + + hinic_wqs_free(hwdev->wqs); + + kfree(hwdev->wqs); +} +EXPORT_SYMBOL(hinic_slq_uninit); + +int hinic_slq_alloc(void *dev, u16 wqebb_size, u16 q_depth, u16 page_size, + u64 *cla_addr, void **handle) +{ + struct hinic_hwdev *hwdev = dev; + struct hinic_wq *wq; + int err; + + if (!dev || !cla_addr || !handle) + return -EINVAL; + + wq = kzalloc(sizeof(*wq), GFP_KERNEL); + if (!wq) + return -ENOMEM; + + err = hinic_wq_allocate(hwdev->wqs, wq, wqebb_size, hwdev->wq_page_size, + q_depth, 0); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to alloc wq\n"); + kfree(wq); + return -EFAULT; + } + + *cla_addr = wq->block_paddr; + *handle = wq; + + return 0; +} +EXPORT_SYMBOL(hinic_slq_alloc); + +void hinic_slq_free(void *dev, void *handle) +{ + struct hinic_hwdev *hwdev = dev; + + if (!hwdev || !handle) + return; + + hinic_wq_free(hwdev->wqs, handle); + kfree(handle); +} +EXPORT_SYMBOL(hinic_slq_free); + +u64 hinic_slq_get_addr(void *handle, u16 index) +{ + if (!handle) + return 0; /* NULL of wqe addr */ + + return (u64)hinic_get_wqebb_addr(handle, index); +} +EXPORT_SYMBOL(hinic_slq_get_addr); + +u64 hinic_slq_get_first_pageaddr(void *handle) +{ + struct hinic_wq *wq = handle; + + if (!handle) + return 0; /* NULL of wqe addr */ + + return hinic_get_first_wqe_page_addr(wq); +} +EXPORT_SYMBOL(hinic_slq_get_first_pageaddr); + +int hinic_func_tmr_bitmap_set(void *hwdev, bool en) +{ + struct hinic_func_tmr_bitmap_op bitmap_op = {0}; + u16 out_size = sizeof(bitmap_op); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &bitmap_op.func_idx); + if (err) + return err; + + bitmap_op.ppf_idx = hinic_ppf_idx(hwdev); + if (en) + bitmap_op.op_id = FUNC_TMR_BITMAP_ENABLE; + else + bitmap_op.op_id = FUNC_TMR_BITMAP_DISABLE; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_FUNC_TMR_BITMAT_SET, + &bitmap_op, sizeof(bitmap_op), + &bitmap_op, &out_size, 0); + if (err || !out_size || bitmap_op.status) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to set timer bitmap, err: %d, status: 0x%x, out_size: 0x%x\n", + err, bitmap_op.status, out_size); + return -EFAULT; + } + + return 0; +} +EXPORT_SYMBOL(hinic_func_tmr_bitmap_set); + +static int ppf_ht_gpa_set(struct hinic_hwdev *hwdev, + struct hinic_page_addr *pg0, + struct hinic_page_addr *pg1) +{ + struct comm_info_ht_gpa_set ht_gpa_set = {0}; + u16 out_size = sizeof(ht_gpa_set); + int ret; + + pg0->virt_addr = dma_zalloc_coherent(hwdev->dev_hdl, + HINIC_HT_GPA_PAGE_SIZE, + &pg0->phys_addr, GFP_KERNEL); + if (!pg0->virt_addr) { + sdk_err(hwdev->dev_hdl, "Alloc pg0 page addr failed\n"); + return -EFAULT; + } + + pg1->virt_addr = dma_zalloc_coherent(hwdev->dev_hdl, + HINIC_HT_GPA_PAGE_SIZE, + &pg1->phys_addr, GFP_KERNEL); + if (!pg1->virt_addr) { + sdk_err(hwdev->dev_hdl, "Alloc pg1 page addr failed\n"); + return -EFAULT; + } + + ht_gpa_set.page_pa0 = pg0->phys_addr; + ht_gpa_set.page_pa1 = pg1->phys_addr; + sdk_info(hwdev->dev_hdl, "PPF ht gpa set: page_addr0.pa=0x%llx, page_addr1.pa=0x%llx\n", + pg0->phys_addr, pg1->phys_addr); + ret = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_PPF_HT_GPA_SET, + &ht_gpa_set, sizeof(ht_gpa_set), + &ht_gpa_set, &out_size, 0); + if (ret || !out_size || ht_gpa_set.status) { + sdk_warn(hwdev->dev_hdl, "PPF ht gpa set failed, ret: %d, status: 0x%x, out_size: 0x%x\n", + ret, ht_gpa_set.status, out_size); + return -EFAULT; + } + + hwdev->page_pa0.phys_addr = pg0->phys_addr; + hwdev->page_pa0.virt_addr = pg0->virt_addr; + + hwdev->page_pa1.phys_addr = pg1->phys_addr; + hwdev->page_pa1.virt_addr = pg1->virt_addr; + + return 0; +} + +int hinic_ppf_ht_gpa_init(struct hinic_hwdev *hwdev) +{ + int ret; + int i; + int j; + int size; + + struct hinic_page_addr page_addr0[HINIC_PPF_HT_GPA_SET_RETRY_TIMES]; + struct hinic_page_addr page_addr1[HINIC_PPF_HT_GPA_SET_RETRY_TIMES]; + + size = HINIC_PPF_HT_GPA_SET_RETRY_TIMES * sizeof(page_addr0[0]); + memset(page_addr0, 0, size); + memset(page_addr1, 0, size); + + for (i = 0; i < HINIC_PPF_HT_GPA_SET_RETRY_TIMES; i++) { + ret = ppf_ht_gpa_set(hwdev, &page_addr0[i], &page_addr1[i]); + if (!ret) + break; + } + + for (j = 0; j < i; j++) { + if (page_addr0[j].virt_addr) { + dma_free_coherent(hwdev->dev_hdl, + HINIC_HT_GPA_PAGE_SIZE, + page_addr0[j].virt_addr, + page_addr0[j].phys_addr); + page_addr0[j].virt_addr = NULL; + } + if (page_addr1[j].virt_addr) { + dma_free_coherent(hwdev->dev_hdl, + HINIC_HT_GPA_PAGE_SIZE, + page_addr1[j].virt_addr, + page_addr1[j].phys_addr); + page_addr1[j].virt_addr = NULL; + } + } + + if (i >= HINIC_PPF_HT_GPA_SET_RETRY_TIMES) { + sdk_err(hwdev->dev_hdl, "PPF ht gpa init failed, retry times: %d\n", + i); + return -EFAULT; + } + + return 0; +} + +void hinic_ppf_ht_gpa_deinit(struct hinic_hwdev *hwdev) +{ + if (hwdev->page_pa0.virt_addr) { + dma_free_coherent(hwdev->dev_hdl, HINIC_HT_GPA_PAGE_SIZE, + hwdev->page_pa0.virt_addr, + hwdev->page_pa0.phys_addr); + hwdev->page_pa0.virt_addr = NULL; + } + + if (hwdev->page_pa1.virt_addr) { + dma_free_coherent(hwdev->dev_hdl, HINIC_HT_GPA_PAGE_SIZE, + hwdev->page_pa1.virt_addr, + hwdev->page_pa1.phys_addr); + hwdev->page_pa1.virt_addr = NULL; + } +} + +static int set_ppf_tmr_status(struct hinic_hwdev *hwdev, + enum ppf_tmr_status status) +{ + struct hinic_ppf_tmr_op op = {0}; + u16 out_size = sizeof(op); + int err = 0; + + if (!hwdev) + return -EINVAL; + + if (hinic_func_type(hwdev) != TYPE_PPF) + return -EFAULT; + + if (status == HINIC_PPF_TMR_FLAG_START) { + err = hinic_ppf_ht_gpa_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "PPF ht gpa init fail!\n"); + return -EFAULT; + } + } else { + hinic_ppf_ht_gpa_deinit(hwdev); + } + + op.op_id = status; + op.ppf_idx = hinic_ppf_idx(hwdev); + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_PPF_TMR_SET, &op, + sizeof(op), &op, &out_size, 0); + if (err || !out_size || op.status) { + sdk_err(hwdev->dev_hdl, "Failed to set ppf timer, err: %d, status: 0x%x, out_size: 0x%x\n", + err, op.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hinic_ppf_tmr_start(void *hwdev) +{ + if (!hwdev) { + pr_err("Hwdev pointer is NULL for starting ppf timer\n"); + return -EINVAL; + } + + return set_ppf_tmr_status(hwdev, HINIC_PPF_TMR_FLAG_START); +} +EXPORT_SYMBOL(hinic_ppf_tmr_start); + +int hinic_ppf_tmr_stop(void *hwdev) +{ + if (!hwdev) { + pr_err("Hwdev pointer is NULL for stop ppf timer\n"); + return -EINVAL; + } + + return set_ppf_tmr_status(hwdev, HINIC_PPF_TMR_FLAG_STOP); +} +EXPORT_SYMBOL(hinic_ppf_tmr_stop); + +int mqm_eqm_try_alloc_mem(struct hinic_hwdev *hwdev, u32 page_size, + u32 page_num) +{ + struct hinic_page_addr *page_addr = hwdev->mqm_att.brm_srch_page_addr; + u32 valid_num = 0; + u32 flag = 1; + u32 i = 0; + + for (i = 0; i < page_num; i++) { + page_addr->virt_addr = + dma_zalloc_coherent(hwdev->dev_hdl, page_size, + &page_addr->phys_addr, GFP_KERNEL); + if (!page_addr->virt_addr) { + flag = 0; + break; + } + valid_num++; + page_addr++; + } + + if (flag == 1) { + hwdev->mqm_att.page_size = page_size; + hwdev->mqm_att.page_num = page_num; + } else { + page_addr = hwdev->mqm_att.brm_srch_page_addr; + for (i = 0; i < valid_num; i++) { + dma_free_coherent(hwdev->dev_hdl, page_size, + page_addr->virt_addr, + page_addr->phys_addr); + page_addr++; + } + return -EFAULT; + } + + return 0; +} + +int mqm_eqm_alloc_page_mem(struct hinic_hwdev *hwdev) +{ + int ret = 0; + + /* apply for 64KB page, number is chunk_num/16 */ + ret = mqm_eqm_try_alloc_mem(hwdev, 64 * 1024, + hwdev->mqm_att.chunk_num >> 4); + if (!ret) + return 0; + + /* apply for 8KB page, number is chunk_num/2 */ + ret = mqm_eqm_try_alloc_mem(hwdev, 8 * 1024, + hwdev->mqm_att.chunk_num >> 1); + if (!ret) + return 0; + + /* apply for 4KB page, number is chunk_num */ + ret = mqm_eqm_try_alloc_mem(hwdev, 4 * 1024, + hwdev->mqm_att.chunk_num); + if (!ret) + return 0; + + return ret; +} + +void mqm_eqm_free_page_mem(struct hinic_hwdev *hwdev) +{ + u32 i; + struct hinic_page_addr *page_addr; + u32 page_size; + + page_size = hwdev->mqm_att.page_size; + page_addr = hwdev->mqm_att.brm_srch_page_addr; + + for (i = 0; i < hwdev->mqm_att.page_num; i++) { + dma_free_coherent(hwdev->dev_hdl, page_size, + page_addr->virt_addr, page_addr->phys_addr); + page_addr++; + } +} + +int mqm_eqm_set_cfg_2_hw(struct hinic_hwdev *hwdev, u32 valid) +{ + struct comm_info_eqm_cfg info_eqm_cfg = {0}; + u16 out_size = sizeof(info_eqm_cfg); + int err; + + info_eqm_cfg.ppf_id = hinic_global_func_id_hw(hwdev); + info_eqm_cfg.page_size = hwdev->mqm_att.page_size; + info_eqm_cfg.valid = valid; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_MQM_CFG_INFO_SET, + &info_eqm_cfg, sizeof(info_eqm_cfg), + &info_eqm_cfg, &out_size, 0); + if (err || !out_size || info_eqm_cfg.status) { + sdk_err(hwdev->dev_hdl, "Failed to init func table, err: %d, status: 0x%x, out_size: 0x%x\n", + err, info_eqm_cfg.status, out_size); + return -EFAULT; + } + + return 0; +} + +#define EQM_DATA_BUF_SIZE 1024 + +int mqm_eqm_set_page_2_hw(struct hinic_hwdev *hwdev) +{ + struct comm_info_eqm_search_gpa *info; + struct hinic_page_addr *page_addr; + void *send_buf; + u16 send_buf_size; + u32 i; + u64 *gpa_hi52; + u64 gpa; + u32 num; + u32 start_idx; + int err = 0; + u32 valid_page_num; + u16 out_size; + + send_buf_size = sizeof(struct comm_info_eqm_search_gpa) + + EQM_DATA_BUF_SIZE; + send_buf = kzalloc(send_buf_size, GFP_KERNEL); + if (!send_buf) { + sdk_err(hwdev->dev_hdl, "Alloc virtual mem failed\r\n"); + return -EFAULT; + } + + page_addr = hwdev->mqm_att.brm_srch_page_addr; + info = (struct comm_info_eqm_search_gpa *)send_buf; + valid_page_num = 0; + + gpa_hi52 = info->gpa_hi52; + num = 0; + start_idx = 0; + for (i = 0; i < hwdev->mqm_att.page_num; i++) { + gpa = page_addr->phys_addr >> 12; + gpa_hi52[num] = gpa; + num++; + if (num == 128) { + info->num = num; + info->start_idx = start_idx; + out_size = send_buf_size; + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_MQM_SRCH_GPA_SET, + info, (u16)send_buf_size, info, + &out_size, 0); + if (err || !out_size || info->status) { + sdk_err(hwdev->dev_hdl, "Set mqm srch gpa fail, err: %d, status: 0x%x, out_size: 0x%x\n", + err, info->status, out_size); + err = -EFAULT; + goto set_page_2_hw_end; + } + + gpa_hi52 = info->gpa_hi52; + num = 0; + start_idx = i + 1; + } + page_addr++; + valid_page_num++; + } + + if (0 != (valid_page_num & 0x7f)) { + info->num = num; + info->start_idx = start_idx; + out_size = send_buf_size; + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_MQM_SRCH_GPA_SET, + info, (u16)send_buf_size, + info, &out_size, 0); + if (err || !out_size || info->status) { + sdk_err(hwdev->dev_hdl, "Set mqm srch gpa fail, err: %d, status: 0x%x, out_size: 0x%x\n", + err, info->status, out_size); + err = -EFAULT; + goto set_page_2_hw_end; + } + } + +set_page_2_hw_end: + kfree(send_buf); + return err; +} + +int mqm_eqm_init(struct hinic_hwdev *hwdev) +{ + struct comm_info_eqm_fix info_eqm_fix = {0}; + u16 len = sizeof(info_eqm_fix); + int ret; + + if (hwdev->hwif->attr.func_type != TYPE_PPF) + return 0; + + ret = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_MQM_FIX_INFO_GET, + &info_eqm_fix, sizeof(info_eqm_fix), + &info_eqm_fix, &len, 0); + if (ret || !len || info_eqm_fix.status) { + sdk_err(hwdev->dev_hdl, "Get mqm fix info failed, err: %d, status: 0x%x, out_size: 0x%x\n", + ret, info_eqm_fix.status, len); + return -EFAULT; + } + if (!(info_eqm_fix.chunk_num)) + return 0; + + hwdev->mqm_att.chunk_num = info_eqm_fix.chunk_num; + hwdev->mqm_att.search_gpa_num = info_eqm_fix.search_gpa_num; + hwdev->mqm_att.page_size = 0; + hwdev->mqm_att.page_num = 0; + + hwdev->mqm_att.brm_srch_page_addr = + kcalloc(hwdev->mqm_att.chunk_num, + sizeof(struct hinic_page_addr), GFP_KERNEL); + if (!(hwdev->mqm_att.brm_srch_page_addr)) { + sdk_err(hwdev->dev_hdl, "Alloc virtual mem failed\n"); + return -EFAULT; + } + + ret = mqm_eqm_alloc_page_mem(hwdev); + if (ret) { + sdk_err(hwdev->dev_hdl, "Alloc eqm page mem failed\n"); + goto err_page; + } + + ret = mqm_eqm_set_page_2_hw(hwdev); + if (ret) { + sdk_err(hwdev->dev_hdl, "Set page to hw failed\n"); + goto err_ecmd; + } + + ret = mqm_eqm_set_cfg_2_hw(hwdev, 1); + if (ret) { + sdk_err(hwdev->dev_hdl, "Set page to hw failed\n"); + goto err_ecmd; + } + + return 0; + +err_ecmd: + mqm_eqm_free_page_mem(hwdev); + +err_page: + kfree(hwdev->mqm_att.brm_srch_page_addr); + + return ret; +} + +void mqm_eqm_deinit(struct hinic_hwdev *hwdev) +{ + int ret; + + if (hwdev->hwif->attr.func_type != TYPE_PPF) + return; + + if (!(hwdev->mqm_att.chunk_num)) + return; + + mqm_eqm_free_page_mem(hwdev); + kfree(hwdev->mqm_att.brm_srch_page_addr); + + ret = mqm_eqm_set_cfg_2_hw(hwdev, 0); + if (ret) { + sdk_err(hwdev->dev_hdl, "Set mqm eqm cfg to chip fail, err: %d\n", + ret); + return; + } + + hwdev->mqm_att.chunk_num = 0; + hwdev->mqm_att.search_gpa_num = 0; + hwdev->mqm_att.page_num = 0; + hwdev->mqm_att.page_size = 0; +} + +int hinic_ppf_ext_db_init(void *dev) +{ + struct hinic_hwdev *hwdev = dev; + int ret; + + if (!dev) + return -EINVAL; + + ret = mqm_eqm_init(hwdev); + if (ret) { + sdk_err(hwdev->dev_hdl, "MQM eqm init failed\n"); + return -EFAULT; + } + + return 0; +} +EXPORT_SYMBOL(hinic_ppf_ext_db_init); + +int hinic_ppf_ext_db_deinit(void *dev) +{ + struct hinic_hwdev *hwdev = dev; + + if (!dev) + return -EINVAL; + + if (hwdev->hwif->attr.func_type != TYPE_PPF) + return -EFAULT; + + mqm_eqm_deinit(hwdev); + + return 0; +} +EXPORT_SYMBOL(hinic_ppf_ext_db_deinit); + +int hinic_set_wq_page_size(struct hinic_hwdev *hwdev, u16 func_idx, + u32 page_size) +{ + struct hinic_wq_page_size page_size_info = {0}; + u16 out_size = sizeof(page_size_info); + int err; + + page_size_info.func_idx = func_idx; + page_size_info.ppf_idx = hinic_ppf_idx(hwdev); + page_size_info.page_size = HINIC_PAGE_SIZE_HW(page_size); + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_PAGESIZE_SET, + &page_size_info, sizeof(page_size_info), + &page_size_info, &out_size, 0); + if (err || !out_size || page_size_info.status) { + sdk_err(hwdev->dev_hdl, "Failed to set wq page size, err: %d, status: 0x%x, out_size: 0x%0x\n", + err, page_size_info.status, out_size); + return -EFAULT; + } + + return 0; +} + +enum hinic_event_cmd { + /* hilink event */ + HINIC_EVENT_LINK_STATUS_CHANGE = 1, + HINIC_EVENT_LINK_ERR, + HINIC_EVENT_CABLE_PLUG, + HINIC_EVENT_HILINK_INFO, + /* reserved for hilink */ + + /* driver event, pf & vf communicate */ + HINIC_EVENT_HEARTBEAT_LOST = 31, + HINIC_EVENT_SET_VF_COS, + + /* mgmt event */ + HINIC_EVENT_MGMT_FAULT = 61, + HINIC_EVENT_MGMT_WATCHDOG, + HINIC_EVENT_MGMT_FMW_ACT_NTC, + HINIC_EVENT_MGMT_RESET, + HINIC_EVENT_MGMT_PCIE_DFX, + HINIC_EVENT_MCTP_HOST_INFO, + HINIC_EVENT_SFP_INFO_REPORT, + HINIC_EVENT_SFP_ABS_REPORT, + + HINIC_EVENT_MAX_TYPE, +}; + +struct hinic_event_convert { + u8 mod; + u8 cmd; + + enum hinic_event_cmd event; +}; + +static struct hinic_event_convert __event_convert[] = { + /* hilink event */ + { + .mod = HINIC_MOD_L2NIC, + .cmd = HINIC_PORT_CMD_LINK_STATUS_REPORT, + .event = HINIC_EVENT_LINK_STATUS_CHANGE, + }, + { + .mod = HINIC_MOD_L2NIC, + .cmd = HINIC_PORT_CMD_LINK_ERR_EVENT, + .event = HINIC_EVENT_LINK_ERR, + }, + { + .mod = HINIC_MOD_L2NIC, + .cmd = HINIC_PORT_CMD_CABLE_PLUG_EVENT, + .event = HINIC_EVENT_CABLE_PLUG, + }, + { + .mod = HINIC_MOD_HILINK, + .cmd = HINIC_HILINK_CMD_GET_LINK_INFO, + .event = HINIC_EVENT_HILINK_INFO, + }, + + /* driver triggered event */ + { + .mod = HINIC_MOD_L2NIC, + .cmd = HINIC_MGMT_CMD_HEART_LOST_REPORT, + .event = HINIC_EVENT_HEARTBEAT_LOST, + }, + { + .mod = HINIC_MOD_L2NIC, + .cmd = HINIC_PORT_CMD_SET_VF_COS, + .event = HINIC_EVENT_SET_VF_COS, + }, + + /* mgmt event */ + { + .mod = HINIC_MOD_COMM, + .cmd = HINIC_MGMT_CMD_FAULT_REPORT, + .event = HINIC_EVENT_MGMT_FAULT, + }, + { + .mod = HINIC_MOD_COMM, + .cmd = HINIC_MGMT_CMD_WATCHDOG_INFO, + .event = HINIC_EVENT_MGMT_WATCHDOG, + }, + { + .mod = HINIC_MOD_COMM, + .cmd = HINIC_MGMT_CMD_FMW_ACT_NTC, + .event = HINIC_EVENT_MGMT_FMW_ACT_NTC, + }, + { + .mod = HINIC_MOD_L2NIC, + .cmd = HINIC_PORT_CMD_MGMT_RESET, + .event = HINIC_EVENT_MGMT_RESET, + }, + { + .mod = HINIC_MOD_COMM, + .cmd = HINIC_MGMT_CMD_PCIE_DFX_NTC, + .event = HINIC_EVENT_MGMT_PCIE_DFX, + }, + { + .mod = HINIC_MOD_COMM, + .cmd = HINIC_MGMT_CMD_GET_HOST_INFO, + .event = HINIC_EVENT_MCTP_HOST_INFO, + }, + { + .mod = HINIC_MOD_L2NIC, + .cmd = HINIC_PORT_CMD_GET_SFP_INFO, + .event = HINIC_EVENT_SFP_INFO_REPORT, + }, + { + .mod = HINIC_MOD_L2NIC, + .cmd = HINIC_PORT_CMD_GET_SFP_ABS, + .event = HINIC_EVENT_SFP_ABS_REPORT, + }, +}; + +static enum hinic_event_cmd __get_event_type(u8 mod, u8 cmd) +{ + int idx; + int arr_size = ARRAY_SIZE(__event_convert); + + for (idx = 0; idx < arr_size; idx++) { + if (__event_convert[idx].mod == mod && + __event_convert[idx].cmd == cmd) + return __event_convert[idx].event; + } + + return HINIC_EVENT_MAX_TYPE; +} + +bool hinic_mgmt_event_ack_first(u8 mod, u8 cmd) +{ + if ((mod == HINIC_MOD_COMM && cmd == HINIC_MGMT_CMD_GET_HOST_INFO) || + (mod == HINIC_MOD_COMM && cmd == HINIC_MGMT_CMD_HEARTBEAT_EVENT)) + return false; + + if (mod == HINIC_MOD_COMM || mod == HINIC_MOD_L2NIC || + mod == HINIC_MOD_HILINK) + return true; + + return false; +} + +#define FAULT_SHOW_STR_LEN 16 +static void fault_report_show(struct hinic_hwdev *hwdev, + struct hinic_fault_event *event) +{ + char fault_type[FAULT_TYPE_MAX][FAULT_SHOW_STR_LEN + 1] = { + "chip", "ucode", "mem rd timeout", "mem wr timeout", + "reg rd timeout", "reg wr timeout", "phy fault"}; + char fault_level[FAULT_LEVEL_MAX][FAULT_SHOW_STR_LEN + 1] = { + "fatal", "reset", "flr", "general", "suggestion"}; + char type_str[FAULT_SHOW_STR_LEN + 1]; + char level_str[FAULT_SHOW_STR_LEN + 1]; + u8 level; + u32 pos, base; + struct hinic_fault_event_stats *fault; + u8 node_id; + + sdk_err(hwdev->dev_hdl, "Fault event report received, func_id: %d\n", + hinic_global_func_id(hwdev)); + + memset(type_str, 0, FAULT_SHOW_STR_LEN + 1); + if (event->type < FAULT_TYPE_MAX) + strncpy(type_str, fault_type[event->type], FAULT_SHOW_STR_LEN); + else + strncpy(type_str, "Unknown", FAULT_SHOW_STR_LEN); + + sdk_err(hwdev->dev_hdl, "Fault type: %d [%s]\n", event->type, type_str); + sdk_err(hwdev->dev_hdl, "Fault val[0]: 0x%08x, val[1]: 0x%08x, val[2]: 0x%08x, val[3]: 0x%08x\n", + event->event.val[0], event->event.val[1], event->event.val[2], + event->event.val[3]); + + fault = &hwdev->hw_stats.fault_event_stats; + + switch (event->type) { + case FAULT_TYPE_CHIP: + memset(level_str, 0, FAULT_SHOW_STR_LEN + 1); + level = event->event.chip.err_level; + if (level < FAULT_LEVEL_MAX) + strncpy(level_str, fault_level[level], + FAULT_SHOW_STR_LEN); + else + strncpy(level_str, "Unknown", FAULT_SHOW_STR_LEN); + + if (level == FAULT_LEVEL_SERIOUS_FLR) { + sdk_err(hwdev->dev_hdl, "err_level: %d [%s], flr func_id: %d\n", + level, level_str, event->event.chip.func_id); + atomic_inc(&fault->fault_type_stat[event->type]); + } + sdk_err(hwdev->dev_hdl, "module_id: 0x%x, err_type: 0x%x, err_level: %d[%s], err_csr_addr: 0x%08x, err_csr_value: 0x%08x\n", + event->event.chip.node_id, + event->event.chip.err_type, level, level_str, + event->event.chip.err_csr_addr, + event->event.chip.err_csr_value); + + node_id = event->event.chip.node_id; + atomic_inc(&fault->chip_fault_stats[node_id][level]); + + base = event->event.chip.node_id * FAULT_LEVEL_MAX * + HINIC_CHIP_ERROR_TYPE_MAX; + pos = base + HINIC_CHIP_ERROR_TYPE_MAX * level + + event->event.chip.err_type; + if (pos < HINIC_CHIP_FAULT_SIZE) + hwdev->chip_fault_stats[pos]++; + break; + case FAULT_TYPE_UCODE: + atomic_inc(&fault->fault_type_stat[event->type]); + + sdk_err(hwdev->dev_hdl, "cause_id: %d, core_id: %d, c_id: %d, epc: 0x%08x\n", + event->event.ucode.cause_id, event->event.ucode.core_id, + event->event.ucode.c_id, event->event.ucode.epc); + break; + case FAULT_TYPE_MEM_RD_TIMEOUT: + case FAULT_TYPE_MEM_WR_TIMEOUT: + atomic_inc(&fault->fault_type_stat[event->type]); + + sdk_err(hwdev->dev_hdl, "err_csr_ctrl: 0x%08x, err_csr_data: 0x%08x, ctrl_tab: 0x%08x, mem_index: 0x%08x\n", + event->event.mem_timeout.err_csr_ctrl, + event->event.mem_timeout.err_csr_data, + event->event.mem_timeout.ctrl_tab, + event->event.mem_timeout.mem_index); + break; + case FAULT_TYPE_REG_RD_TIMEOUT: + case FAULT_TYPE_REG_WR_TIMEOUT: + atomic_inc(&fault->fault_type_stat[event->type]); + sdk_err(hwdev->dev_hdl, "err_csr: 0x%08x\n", + event->event.reg_timeout.err_csr); + break; + case FAULT_TYPE_PHY_FAULT: + atomic_inc(&fault->fault_type_stat[event->type]); + sdk_err(hwdev->dev_hdl, "op_type: %u, port_id: %u, dev_ad: %u, csr_addr: 0x%08x, op_data: 0x%08x\n", + event->event.phy_fault.op_type, + event->event.phy_fault.port_id, + event->event.phy_fault.dev_ad, + event->event.phy_fault.csr_addr, + event->event.phy_fault.op_data); + break; + default: + break; + } +} + +void hinic_migrate_report(void *dev) +{ + struct hinic_hwdev *hwdev = (struct hinic_hwdev *)dev; + struct hinic_event_info event_info = {0}; + + if (!dev) + return; + + event_info.type = HINIC_EVENT_INIT_MIGRATE_PF; + if (hwdev->event_callback) + hwdev->event_callback(hwdev->event_pri_handle, &event_info); +} +EXPORT_SYMBOL(hinic_migrate_report); + +static void fault_event_handler(struct hinic_hwdev *hwdev, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct hinic_cmd_fault_event *fault_event; + struct hinic_event_info event_info; + u8 fault_level; + + if (in_size != sizeof(*fault_event)) { + sdk_err(hwdev->dev_hdl, "Invalid fault event report, length: %d, should be %ld\n", + in_size, sizeof(*fault_event)); + return; + } + + fault_event = buf_in; + fault_report_show(hwdev, &fault_event->event); + + if (fault_event->event.type == HINIC_FAULT_SRC_HW_MGMT_CHIP) + fault_level = fault_event->event.event.chip.err_level; + else + fault_level = FAULT_LEVEL_FATAL; + + if (hwdev->event_callback) { + event_info.type = HINIC_EVENT_FAULT; + memcpy(&event_info.info, &fault_event->event, + sizeof(event_info.info)); + event_info.info.fault_level = fault_level; + hwdev->event_callback(hwdev->event_pri_handle, &event_info); + } +} + +static void heartbeat_lost_event_handler(struct hinic_hwdev *hwdev) +{ + struct hinic_event_info event_info = {0}; + + atomic_inc(&hwdev->hw_stats.heart_lost_stats); + sdk_err(hwdev->dev_hdl, "Heart lost report received, func_id: %d\n", + hinic_global_func_id(hwdev)); + + if (hwdev->event_callback) { + event_info.type = HINIC_EVENT_HEART_LOST; + hwdev->event_callback(hwdev->event_pri_handle, &event_info); + } +} + +static void link_status_event_handler(struct hinic_hwdev *hwdev, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct hinic_port_link_status *link_status, *ret_link_status; + struct hinic_event_info event_info = {0}; + struct hinic_event_link_info *link_info = &event_info.link_info; + struct nic_port_info port_info = {0}; + int err; + + /* Ignore link change event */ + if (FUNC_FORCE_LINK_UP(hwdev)) + return; + + link_status = buf_in; + sdk_info(hwdev->dev_hdl, "Link status report received, func_id: %d, status: %d\n", + hinic_global_func_id(hwdev), link_status->link); + + if (link_status->link) + atomic_inc(&hwdev->hw_stats.link_event_stats.link_up_stats); + else + atomic_inc(&hwdev->hw_stats.link_event_stats.link_down_stats); + + /* link event reported only after set vport enable */ + if (hinic_func_type(hwdev) != TYPE_VF && + link_status->link == HINIC_EVENT_LINK_UP) { + err = hinic_get_port_info(hwdev, &port_info); + if (err) { + nic_warn(hwdev->dev_hdl, "Failed to get port info\n"); + } else { + link_info->valid = 1; + link_info->port_type = port_info.port_type; + link_info->autoneg_cap = port_info.autoneg_cap; + link_info->autoneg_state = port_info.autoneg_state; + link_info->duplex = port_info.duplex; + link_info->speed = port_info.speed; + hinic_refresh_nic_cfg(hwdev, &port_info); + } + } + + if (!hwdev->event_callback) + return; + + event_info.type = link_status->link ? + HINIC_EVENT_LINK_UP : HINIC_EVENT_LINK_DOWN; + + hwdev->event_callback(hwdev->event_pri_handle, &event_info); + if (hinic_func_type(hwdev) != TYPE_VF) { + hinic_notify_all_vfs_link_changed(hwdev, link_status->link); + ret_link_status = buf_out; + ret_link_status->status = 0; + *out_size = sizeof(*ret_link_status); + } +} + +static void module_status_event(struct hinic_hwdev *hwdev, + enum hinic_event_cmd cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct hinic_cable_plug_event *plug_event; + struct hinic_link_err_event *link_err; + struct hinic_event_info event_info = {0}; + struct hinic_port_routine_cmd *rt_cmd; + struct card_node *chip_node = hwdev->chip_node; + + event_info.type = HINIC_EVENT_PORT_MODULE_EVENT; + + if (cmd == HINIC_EVENT_CABLE_PLUG) { + plug_event = buf_in; + + if (plug_event->port_id < HINIC_MAX_PORT_ID) { + rt_cmd = &chip_node->rt_cmd[plug_event->port_id]; + mutex_lock(&chip_node->sfp_mutex); + rt_cmd->up_send_sfp_abs = false; + rt_cmd->up_send_sfp_info = false; + mutex_unlock(&chip_node->sfp_mutex); + } + + event_info.module_event.type = plug_event->plugged ? + HINIC_PORT_MODULE_CABLE_PLUGGED : + HINIC_PORT_MODULE_CABLE_UNPLUGGED; + + *out_size = sizeof(*plug_event); + plug_event = buf_out; + plug_event->status = 0; + } else if (cmd == HINIC_EVENT_LINK_ERR) { + link_err = buf_in; + + event_info.module_event.type = HINIC_PORT_MODULE_LINK_ERR; + event_info.module_event.err_type = link_err->err_type; + + *out_size = sizeof(*link_err); + link_err = buf_out; + link_err->status = 0; + } else { + sdk_warn(hwdev->dev_hdl, "Unknown module event: %d\n", cmd); + return; + } + + if (!hwdev->event_callback) + return; + + hwdev->event_callback(hwdev->event_pri_handle, &event_info); +} + +void hinic_notify_dcb_state_event(struct hinic_hwdev *hwdev, + struct hinic_dcb_state *dcb_state) +{ + struct hinic_event_info event_info = {0}; + + sdk_info(hwdev->dev_hdl, "DCB %s, default cos %d, up2cos %d%d%d%d%d%d%d%d\n", + dcb_state->dcb_on ? "on" : "off", dcb_state->default_cos, + dcb_state->up_cos[0], dcb_state->up_cos[1], + dcb_state->up_cos[2], dcb_state->up_cos[3], + dcb_state->up_cos[4], dcb_state->up_cos[5], + dcb_state->up_cos[6], dcb_state->up_cos[7]); + + /* Saved in sdk for statefull module */ + hinic_save_dcb_state(hwdev, dcb_state); + + if (!hwdev->event_callback) + return; + + event_info.type = HINIC_EVENT_DCB_STATE_CHANGE; + memcpy(&event_info.dcb_state, dcb_state, sizeof(event_info.dcb_state)); + hwdev->event_callback(hwdev->event_pri_handle, &event_info); +} + +static void sw_watchdog_timeout_info_show(struct hinic_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_mgmt_watchdog_info *watchdog_info; + u32 *dump_addr, *reg, stack_len, i, j; + + if (in_size != sizeof(*watchdog_info)) { + sdk_err(hwdev->dev_hdl, "Invalid mgmt watchdog report, length: %d, should be %ld\n", + in_size, sizeof(*watchdog_info)); + return; + } + + watchdog_info = buf_in; + + sdk_err(hwdev->dev_hdl, "Mgmt deadloop time: 0x%x 0x%x, task id: 0x%x, sp: 0x%x\n", + watchdog_info->curr_time_h, watchdog_info->curr_time_l, + watchdog_info->task_id, watchdog_info->sp); + sdk_err(hwdev->dev_hdl, "Stack current used: 0x%x, peak used: 0x%x, overflow flag: 0x%x, top: 0x%x, bottom: 0x%x\n", + watchdog_info->curr_used, watchdog_info->peak_used, + watchdog_info->is_overflow, watchdog_info->stack_top, + watchdog_info->stack_bottom); + + sdk_err(hwdev->dev_hdl, "Mgmt pc: 0x%08x, lr: 0x%08x, cpsr:0x%08x\n", + watchdog_info->pc, watchdog_info->lr, watchdog_info->cpsr); + + sdk_err(hwdev->dev_hdl, "Mgmt register info\n"); + + for (i = 0; i < 3; i++) { + reg = watchdog_info->reg + (u64)(u32)(4 * i); + sdk_err(hwdev->dev_hdl, "0x%08x 0x%08x 0x%08x 0x%08x\n", + *(reg), *(reg + 1), *(reg + 2), *(reg + 3)); + } + + sdk_err(hwdev->dev_hdl, "0x%08x\n", watchdog_info->reg[12]); + + if (watchdog_info->stack_actlen <= 1024) { + stack_len = watchdog_info->stack_actlen; + } else { + sdk_err(hwdev->dev_hdl, "Oops stack length: 0x%x is wrong\n", + watchdog_info->stack_actlen); + stack_len = 1024; + } + + sdk_err(hwdev->dev_hdl, "Mgmt dump stack, 16Bytes per line(start from sp)\n"); + for (i = 0; i < (stack_len / 16); i++) { + dump_addr = (u32 *)(watchdog_info->data + ((u64)(u32)(i * 16))); + sdk_err(hwdev->dev_hdl, "0x%08x 0x%08x 0x%08x 0x%08x\n", + *dump_addr, *(dump_addr + 1), *(dump_addr + 2), + *(dump_addr + 3)); + } + + for (j = 0; j < ((stack_len % 16) / 4); j++) { + dump_addr = (u32 *)(watchdog_info->data + + ((u64)(u32)(i * 16 + j * 4))); + sdk_err(hwdev->dev_hdl, "0x%08x ", *dump_addr); + } + + *out_size = sizeof(*watchdog_info); + watchdog_info = buf_out; + watchdog_info->status = 0; +} + +static void mgmt_watchdog_timeout_event_handler(struct hinic_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_event_info event_info = { 0 }; + + sw_watchdog_timeout_info_show(hwdev, buf_in, in_size, + buf_out, out_size); + + if (hwdev->event_callback) { + event_info.type = HINIC_EVENT_MGMT_WATCHDOG_EVENT; + hwdev->event_callback(hwdev->event_pri_handle, &event_info); + } +} + +static void port_sfp_info_event(struct hinic_hwdev *hwdev, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct hinic_cmd_get_sfp_qsfp_info *sfp_info = buf_in; + struct hinic_port_routine_cmd *rt_cmd; + struct card_node *chip_node = hwdev->chip_node; + + if (in_size != sizeof(*sfp_info)) { + sdk_err(hwdev->dev_hdl, "Invalid sfp info cmd, length: %d, should be %ld\n", + in_size, sizeof(*sfp_info)); + return; + } + + if (sfp_info->port_id >= HINIC_MAX_PORT_ID) { + sdk_err(hwdev->dev_hdl, "Invalid sfp port id: %d, max port is %d\n", + sfp_info->port_id, HINIC_MAX_PORT_ID - 1); + return; + } + + if (!chip_node->rt_cmd) + return; + + rt_cmd = &chip_node->rt_cmd[sfp_info->port_id]; + mutex_lock(&chip_node->sfp_mutex); + memcpy(&rt_cmd->sfp_info, sfp_info, sizeof(rt_cmd->sfp_info)); + rt_cmd->up_send_sfp_info = true; + mutex_unlock(&chip_node->sfp_mutex); +} + +static void port_sfp_abs_event(struct hinic_hwdev *hwdev, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct hinic_cmd_get_light_module_abs *sfp_abs = buf_in; + struct hinic_port_routine_cmd *rt_cmd; + struct card_node *chip_node = hwdev->chip_node; + + if (in_size != sizeof(*sfp_abs)) { + sdk_err(hwdev->dev_hdl, "Invalid sfp absent cmd, length: %d, should be %ld\n", + in_size, sizeof(*sfp_abs)); + return; + } + + if (sfp_abs->port_id >= HINIC_MAX_PORT_ID) { + sdk_err(hwdev->dev_hdl, "Invalid sfp port id: %d, max port is %d\n", + sfp_abs->port_id, HINIC_MAX_PORT_ID - 1); + return; + } + + if (!chip_node->rt_cmd) + return; + + rt_cmd = &chip_node->rt_cmd[sfp_abs->port_id]; + mutex_lock(&chip_node->sfp_mutex); + memcpy(&rt_cmd->abs, sfp_abs, sizeof(rt_cmd->abs)); + rt_cmd->up_send_sfp_abs = true; + mutex_unlock(&chip_node->sfp_mutex); +} + +static void mgmt_reset_event_handler(struct hinic_hwdev *hwdev) +{ + sdk_info(hwdev->dev_hdl, "Mgmt is reset\n"); + + /* mgmt reset only occurred when hot update or Mgmt deadloop, + * if Mgmt deadloop, mgmt will report an event with + * mod=0, cmd=0x56, and will reported fault to os, + * so mgmt reset event don't need to report fault + */ +} + +static void hinic_fmw_act_ntc_handler(struct hinic_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_event_info event_info = {0}; + struct hinic_fmw_act_ntc *notice_info; + + if (in_size != sizeof(*notice_info)) { + sdk_err(hwdev->dev_hdl, "Invalid mgmt firmware active notice, length: %d, should be %ld\n", + in_size, sizeof(*notice_info)); + return; + } + + if (!hwdev->event_callback) + return; + + event_info.type = HINIC_EVENT_FMW_ACT_NTC; + hwdev->event_callback(hwdev->event_pri_handle, &event_info); + + *out_size = sizeof(*notice_info); + notice_info = buf_out; + notice_info->status = 0; +} + +static void hinic_pcie_dfx_event_handler(struct hinic_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_pcie_dfx_ntc *notice_info = buf_in; + struct hinic_pcie_dfx_info *dfx_info; + u16 size = 0; + u16 cnt = 0; + u32 num = 0; + u32 i, j; + int err; + u32 *reg; + + if (in_size != sizeof(*notice_info)) { + sdk_err(hwdev->dev_hdl, "Invalid mgmt firmware active notice, length: %d, should be %ld\n", + in_size, sizeof(*notice_info)); + return; + } + + dfx_info = kzalloc(sizeof(*dfx_info), GFP_KERNEL); + if (!dfx_info) { + sdk_err(hwdev->dev_hdl, "Malloc dfx_info memory failed\n"); + return; + } + + ((struct hinic_pcie_dfx_ntc *)buf_out)->status = 0; + *out_size = sizeof(*notice_info); + num = (u32)(notice_info->len / 1024); + sdk_info(hwdev->dev_hdl, "INFO LEN: %d\n", notice_info->len); + sdk_info(hwdev->dev_hdl, "PCIE DFX:\n"); + dfx_info->host_id = 0; + for (i = 0; i < num; i++) { + dfx_info->offset = i * MAX_PCIE_DFX_BUF_SIZE; + if (i == (num - 1)) + dfx_info->last = 1; + size = sizeof(*dfx_info); + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_PCIE_DFX_GET, + dfx_info, sizeof(*dfx_info), + dfx_info, &size, 0); + if (err || dfx_info->status || !size) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to get pcie dfx info, err: %d, status: 0x%x, out size: 0x%x\n", + err, dfx_info->status, size); + kfree(dfx_info); + return; + } + + reg = (u32 *)dfx_info->data; + for (j = 0; j < 256; j = j + 8) { + /*lint -save -e661 -e662*/ + sdk_info(hwdev->dev_hdl, "0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", + cnt, reg[j], reg[(u32)(j + 1)], + reg[(u32)(j + 2)], reg[(u32)(j + 3)], + reg[(u32)(j + 4)], reg[(u32)(j + 5)], + reg[(u32)(j + 6)], reg[(u32)(j + 7)]); + /*lint -restore*/ + cnt = cnt + 32; + } + memset(dfx_info->data, 0, MAX_PCIE_DFX_BUF_SIZE); + } + kfree(dfx_info); +} + +struct hinic_mctp_get_host_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 huawei_cmd; + u8 sub_cmd; + u8 rsvd[2]; + + u32 actual_len; + + u8 data[1024]; +}; + +static void hinic_mctp_get_host_info_event_handler(struct hinic_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_event_info event_info = {0}; + struct hinic_mctp_get_host_info *mctp_out, *mctp_in; + struct hinic_mctp_host_info *host_info; + + if (in_size != sizeof(*mctp_in)) { + sdk_err(hwdev->dev_hdl, "Invalid mgmt mctp info, length: %d, should be %ld\n", + in_size, sizeof(*mctp_in)); + return; + } + + *out_size = sizeof(*mctp_out); + mctp_out = buf_out; + mctp_out->status = 0; + + if (!hwdev->event_callback) { + mctp_out->status = HINIC_MGMT_STATUS_ERR_INIT; + return; + } + + mctp_in = buf_in; + host_info = &event_info.mctp_info; + host_info->major_cmd = mctp_in->huawei_cmd; + host_info->sub_cmd = mctp_in->sub_cmd; + host_info->data = mctp_out->data; + + event_info.type = HINIC_EVENT_MCTP_GET_HOST_INFO; + hwdev->event_callback(hwdev->event_pri_handle, &event_info); + + mctp_out->actual_len = host_info->data_len; +} + +static char *__hw_to_char_fec[HILINK_FEC_MAX_TYPE] = {"RS-FEC", "BASE-FEC", + "NO-FEC"}; + +static char *__hw_to_char_port_type[LINK_PORT_MAX_TYPE] = { + "Unknown", "Fibre", "Electric", "Direct Attach Copper", "AOC", + "Back plane", "BaseT" +}; + +static void __print_cable_info(struct hinic_hwdev *hwdev, + struct hinic_link_info *info) +{ + char tmp_str[CAP_INFO_MAC_LEN] = {0}; + char tmp_vendor[VENDOR_MAX_LEN] = {0}; + char *port_type = "Unknown port type"; + int i; + int err = 0; + + if (info->cable_absent) { + sdk_info(hwdev->dev_hdl, "Cable unpresent\n"); + return; + } + + if (info->port_type < LINK_PORT_MAX_TYPE) + port_type = __hw_to_char_port_type[info->port_type]; + else + sdk_info(hwdev->dev_hdl, "Unknown port type: %u\n", + info->port_type); + if (info->port_type == LINK_PORT_FIBRE) { + if (info->port_sub_type == FIBRE_SUBTYPE_SR) + port_type = "Fibre-SR"; + else if (info->port_sub_type == FIBRE_SUBTYPE_LR) + port_type = "Fibre-LR"; + } + + for (i = sizeof(info->vendor_name) - 1; i >= 0; i--) { + if (info->vendor_name[i] == ' ') + info->vendor_name[i] = '\0'; + else + break; + } + + memcpy(tmp_vendor, info->vendor_name, + sizeof(info->vendor_name)); + err = snprintf(tmp_str, sizeof(tmp_str), + "Vendor: %s, %s, length: %um, max_speed: %uGbps", + tmp_vendor, port_type, info->cable_length, + info->cable_max_speed); + if (err <= 0 || err >= CAP_INFO_MAC_LEN) { + sdk_err(hwdev->dev_hdl, + "Failed snprintf cable vendor info, function return(%d) and dest_len(%d)\n", + err, CAP_INFO_MAC_LEN); + return; + } + + if (info->port_type == LINK_PORT_FIBRE || + info->port_type == LINK_PORT_AOC) { + err = snprintf(tmp_str, sizeof(tmp_str), + "%s, %s, Temperature: %u", tmp_str, + info->sfp_type ? "SFP" : "QSFP", + info->cable_temp); + if (err <= 0 || err >= CAP_INFO_MAC_LEN) { + sdk_err(hwdev->dev_hdl, + "Failed snprintf cable Temp, function return(%d) and dest_len(%d)\n", + err, CAP_INFO_MAC_LEN); + return; + } + + if (info->sfp_type) { + err = snprintf(tmp_str, sizeof(tmp_str), + "%s, rx power: %uuW, tx power: %uuW", + tmp_str, info->power[0], info->power[1]); + } else { + err = snprintf(tmp_str, sizeof(tmp_str), + "%s, rx power: %uuw %uuW %uuW %uuW", + tmp_str, info->power[0], info->power[1], + info->power[2], info->power[3]); + } + if (err <= 0 || err >= CAP_INFO_MAC_LEN) { + sdk_err(hwdev->dev_hdl, + "Failed snprintf power info, function return(%d) and dest_len(%d)\n", + err, CAP_INFO_MAC_LEN); + return; + } + } + + sdk_info(hwdev->dev_hdl, "Cable information: %s\n", + tmp_str); +} + +static void __hi30_lane_info(struct hinic_hwdev *hwdev, + struct hilink_lane *lane) +{ + struct hi30_ffe_data *ffe_data; + struct hi30_ctle_data *ctle_data; + + ffe_data = (struct hi30_ffe_data *)lane->hi30_ffe; + ctle_data = (struct hi30_ctle_data *)lane->hi30_ctle; + + sdk_info(hwdev->dev_hdl, "TX_FFE: PRE1=%s%d; PRE2=%s%d; MAIN=%d; POST1=%s%d; POST1X=%s%d\n", + (ffe_data->PRE1 & 0x10) ? "-" : "", + (int)(ffe_data->PRE1 & 0xf), + (ffe_data->PRE2 & 0x10) ? "-" : "", + (int)(ffe_data->PRE2 & 0xf), + (int)ffe_data->MAIN, + (ffe_data->POST1 & 0x10) ? "-" : "", + (int)(ffe_data->POST1 & 0xf), + (ffe_data->POST2 & 0x10) ? "-" : "", + (int)(ffe_data->POST2 & 0xf)); + sdk_info(hwdev->dev_hdl, "RX_CTLE: Gain1~3=%u %u %u; Boost1~3=%u %u %u; Zero1~3=%u %u %u; Squelch1~3=%u %u %u\n", + ctle_data->ctlebst[0], ctle_data->ctlebst[1], + ctle_data->ctlebst[2], ctle_data->ctlecmband[0], + ctle_data->ctlecmband[1], ctle_data->ctlecmband[2], + ctle_data->ctlermband[0], ctle_data->ctlermband[1], + ctle_data->ctlermband[2], ctle_data->ctleza[0], + ctle_data->ctleza[1], ctle_data->ctleza[2]); +} + +static void __print_hi30_status(struct hinic_hwdev *hwdev, + struct hinic_link_info *info) +{ + struct hilink_lane *lane; + int lane_used_num = 0, i; + + for (i = 0; i < HILINK_MAX_LANE; i++) { + lane = (struct hilink_lane *)(info->lane2 + i * sizeof(*lane)); + if (!lane->lane_used) + continue; + + __hi30_lane_info(hwdev, lane); + lane_used_num++; + } + + /* in new firmware, all lane info setted in lane2 */ + if (lane_used_num) + return; + + /* compatible old firmware */ + __hi30_lane_info(hwdev, (struct hilink_lane *)info->lane1); +} + +static void __print_link_info(struct hinic_hwdev *hwdev, + struct hinic_link_info *info, + enum hilink_info_print_event type) +{ + char *fec = "None"; + + if (info->fec < HILINK_FEC_MAX_TYPE) + fec = __hw_to_char_fec[info->fec]; + else + sdk_info(hwdev->dev_hdl, "Unknown fec type: %u\n", + info->fec); + + if (type == HILINK_EVENT_LINK_UP || !info->an_state) { + sdk_info(hwdev->dev_hdl, "Link information: speed %dGbps, %s, autoneg %s\n", + info->speed, fec, info->an_state ? "on" : "off"); + } else { + sdk_info(hwdev->dev_hdl, "Link information: antoneg: %s\n", + info->an_state ? "on" : "off"); + } +} + +static char *hilink_info_report_type[HILINK_EVENT_MAX_TYPE] = { + "", "link up", "link down", "cable plugged" +}; + +static void print_hilink_info(struct hinic_hwdev *hwdev, + enum hilink_info_print_event type, + struct hinic_link_info *info) +{ + __print_cable_info(hwdev, info); + + __print_link_info(hwdev, info, type); + + __print_hi30_status(hwdev, info); + + if (type == HILINK_EVENT_LINK_UP) + return; + + if (type == HILINK_EVENT_CABLE_PLUGGED) { + sdk_info(hwdev->dev_hdl, "alos: %u, rx_los: %u\n", + info->alos, info->rx_los); + return; + } + + sdk_info(hwdev->dev_hdl, "PMA ctrl: %s, MAC tx %s, MAC rx %s, PMA debug info reg: 0x%x, PMA signal ok reg: 0x%x, RF/LF status reg: 0x%x\n", + info->pma_status == 1 ? "off" : "on", + info->mac_tx_en ? "enable" : "disable", + info->mac_rx_en ? "enable" : "disable", info->pma_dbg_info_reg, + info->pma_signal_ok_reg, info->rf_lf_status_reg); + sdk_info(hwdev->dev_hdl, "alos: %u, rx_los: %u, PCS block counter reg: 0x%x, PCS link: 0x%x, MAC link: 0x%x PCS_err_cnt: 0x%x\n", + info->alos, info->rx_los, info->pcs_err_blk_cnt_reg, + info->pcs_link_reg, info->mac_link_reg, info->pcs_err_cnt); +} + +static int hinic_print_hilink_info(struct hinic_hwdev *hwdev, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct hinic_hilink_link_info *hilink_info = buf_in; + struct hinic_link_info *info; + enum hilink_info_print_event type; + + if (in_size != sizeof(*hilink_info)) { + sdk_err(hwdev->dev_hdl, "Invalid hilink info message size %d, should be %ld\n", + in_size, sizeof(*hilink_info)); + return -EINVAL; + } + + ((struct hinic_hilink_link_info *)buf_out)->status = 0; + *out_size = sizeof(*hilink_info); + + info = &hilink_info->info; + type = hilink_info->info_type; + + if (type < HILINK_EVENT_LINK_UP || type >= HILINK_EVENT_MAX_TYPE) { + sdk_info(hwdev->dev_hdl, "Invalid hilink info report, type: %d\n", + type); + return -EINVAL; + } + + sdk_info(hwdev->dev_hdl, "Hilink info report after %s\n", + hilink_info_report_type[type]); + + print_hilink_info(hwdev, type, info); + + return 0; +} + +int hinic_hilink_info_show(struct hinic_hwdev *hwdev) +{ + struct hinic_link_info hilink_info = { {0} }; + int err; + + err = hinic_get_hilink_link_info(hwdev, &hilink_info); + if (err) { + if (err == HINIC_MGMT_CMD_UNSUPPORTED) + sdk_info(hwdev->dev_hdl, "Unsupport to get hilink info\n"); + return err; + } + + if (hilink_info.cable_absent) { + sdk_info(hwdev->dev_hdl, "Cable unpresent\n"); + return 0; + } + + sdk_info(hwdev->dev_hdl, "Current state of hilink info:\n"); + print_hilink_info(hwdev, HILINK_EVENT_MAX_TYPE, &hilink_info); + + return 0; +} + +/* public process for this event: + * pf link change event + * pf heart lost event ,TBD + * pf fault report event + * vf link change event + * vf heart lost event, TBD + * vf fault report event, TBD + */ +static void _event_handler(struct hinic_hwdev *hwdev, enum hinic_event_cmd cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_vf_dcb_state *vf_dcb; + + if (!hwdev) + return; + + *out_size = 0; + + switch (cmd) { + case HINIC_EVENT_LINK_STATUS_CHANGE: + link_status_event_handler(hwdev, buf_in, in_size, buf_out, + out_size); + break; + + case HINIC_EVENT_CABLE_PLUG: + case HINIC_EVENT_LINK_ERR: + module_status_event(hwdev, cmd, buf_in, in_size, buf_out, + out_size); + break; + + case HINIC_EVENT_HILINK_INFO: + hinic_print_hilink_info(hwdev, buf_in, in_size, buf_out, + out_size); + break; + + case HINIC_EVENT_MGMT_FAULT: + fault_event_handler(hwdev, buf_in, in_size, buf_out, out_size); + break; + + case HINIC_EVENT_HEARTBEAT_LOST: + heartbeat_lost_event_handler(hwdev); + break; + + case HINIC_EVENT_SET_VF_COS: + vf_dcb = buf_in; + if (!vf_dcb) + break; + + hinic_notify_dcb_state_event(hwdev, &vf_dcb->state); + + break; + + case HINIC_EVENT_MGMT_WATCHDOG: + mgmt_watchdog_timeout_event_handler(hwdev, buf_in, in_size, + buf_out, out_size); + break; + + case HINIC_EVENT_MGMT_RESET: + mgmt_reset_event_handler(hwdev); + break; + + case HINIC_EVENT_MGMT_FMW_ACT_NTC: + hinic_fmw_act_ntc_handler(hwdev, buf_in, in_size, buf_out, + out_size); + + break; + + case HINIC_EVENT_MGMT_PCIE_DFX: + hinic_pcie_dfx_event_handler(hwdev, buf_in, in_size, buf_out, + out_size); + break; + + case HINIC_EVENT_MCTP_HOST_INFO: + hinic_mctp_get_host_info_event_handler(hwdev, buf_in, in_size, + buf_out, out_size); + break; + + case HINIC_EVENT_SFP_INFO_REPORT: + port_sfp_info_event(hwdev, buf_in, in_size, buf_out, out_size); + break; + + case HINIC_EVENT_SFP_ABS_REPORT: + port_sfp_abs_event(hwdev, buf_in, in_size, buf_out, out_size); + break; + + default: + sdk_warn(hwdev->dev_hdl, "Unsupported event %d to process\n", + cmd); + break; + } +} + +/* vf link change event + * vf fault report event, TBD + */ +static int vf_nic_event_handler(void *hwdev, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) + +{ + enum hinic_event_cmd type = __get_event_type(HINIC_MOD_L2NIC, cmd); + + if (type == HINIC_EVENT_MAX_TYPE) { + sdk_warn(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Unsupport L2NIC event: cmd %d\n", cmd); + *out_size = 0; + return -EINVAL; + } + + _event_handler(hwdev, type, buf_in, in_size, buf_out, out_size); + + return 0; +} + +static int vf_comm_event_handler(void *hwdev, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) + +{ + enum hinic_event_cmd type = __get_event_type(HINIC_MOD_COMM, cmd); + + if (type == HINIC_EVENT_MAX_TYPE) { + sdk_warn(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Unsupport COMM event: cmd %d\n", cmd); + *out_size = 0; + return -EFAULT; + } + + _event_handler(hwdev, type, buf_in, in_size, buf_out, out_size); + + return 0; +} + +/* pf link change event */ +static void pf_nic_event_handler(void *hwdev, void *pri_handle, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + enum hinic_event_cmd type = __get_event_type(HINIC_MOD_L2NIC, cmd); + + if (type == HINIC_EVENT_MAX_TYPE) { + sdk_warn(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Unsupport L2NIC event: cmd %d\n", cmd); + *out_size = 0; + return; + } + + _event_handler(hwdev, type, buf_in, in_size, buf_out, out_size); +} + +static void pf_hilink_event_handler(void *hwdev, void *pri_handle, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + enum hinic_event_cmd type = __get_event_type(HINIC_MOD_HILINK, cmd); + + if (type == HINIC_EVENT_MAX_TYPE) { + sdk_warn(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Unsupport HILINK event: cmd %d\n", cmd); + *out_size = 0; + return; + } + + _event_handler(hwdev, type, buf_in, in_size, buf_out, out_size); +} + +/* pf fault report event */ +static void pf_fault_event_handler(void *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + _event_handler(hwdev, HINIC_EVENT_MGMT_FAULT, buf_in, + in_size, buf_out, out_size); +} + +static void mgmt_watchdog_event_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + _event_handler(hwdev, HINIC_EVENT_MGMT_WATCHDOG, buf_in, + in_size, buf_out, out_size); +} + +static void mgmt_fmw_act_event_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + _event_handler(hwdev, HINIC_EVENT_MGMT_FMW_ACT_NTC, buf_in, + in_size, buf_out, out_size); +} + +static void mgmt_pcie_dfx_event_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + _event_handler(hwdev, HINIC_EVENT_MGMT_PCIE_DFX, buf_in, + in_size, buf_out, out_size); +} + +static void mgmt_get_mctp_event_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + _event_handler(hwdev, HINIC_EVENT_MCTP_HOST_INFO, buf_in, + in_size, buf_out, out_size); +} + +static void pf_event_register(struct hinic_hwdev *hwdev) +{ + if (hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MGMT_INITED)) { + hinic_register_mgmt_msg_cb(hwdev, HINIC_MOD_L2NIC, + hwdev, pf_nic_event_handler); + hinic_register_mgmt_msg_cb(hwdev, HINIC_MOD_HILINK, + hwdev, + pf_hilink_event_handler); + hinic_comm_recv_mgmt_self_cmd_reg(hwdev, + HINIC_MGMT_CMD_FAULT_REPORT, + pf_fault_event_handler); + + hinic_comm_recv_mgmt_self_cmd_reg(hwdev, + HINIC_MGMT_CMD_WATCHDOG_INFO, + mgmt_watchdog_event_handler); + + hinic_comm_recv_mgmt_self_cmd_reg(hwdev, + HINIC_MGMT_CMD_FMW_ACT_NTC, + mgmt_fmw_act_event_handler); + hinic_comm_recv_mgmt_self_cmd_reg(hwdev, + HINIC_MGMT_CMD_PCIE_DFX_NTC, + mgmt_pcie_dfx_event_handler); + hinic_comm_recv_mgmt_self_cmd_reg(hwdev, + HINIC_MGMT_CMD_GET_HOST_INFO, + mgmt_get_mctp_event_handler); + } +} + +void hinic_event_register(void *dev, void *pri_handle, + hinic_event_handler callback) +{ + struct hinic_hwdev *hwdev = dev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for register event\n"); + return; + } + + hwdev->event_callback = callback; + hwdev->event_pri_handle = pri_handle; + + if (hinic_func_type(hwdev) != TYPE_VF) { + pf_event_register(hwdev); + } else { + hinic_register_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC, + vf_nic_event_handler); + hinic_register_vf_mbox_cb(hwdev, HINIC_MOD_COMM, + vf_comm_event_handler); + } +} + +void hinic_event_unregister(void *dev) +{ + struct hinic_hwdev *hwdev = dev; + + hwdev->event_callback = NULL; + hwdev->event_pri_handle = NULL; + + if (hinic_func_type(hwdev) != TYPE_VF) { + hinic_unregister_mgmt_msg_cb(hwdev, HINIC_MOD_L2NIC); + hinic_unregister_mgmt_msg_cb(hwdev, HINIC_MOD_HILINK); + hinic_comm_recv_up_self_cmd_unreg(hwdev, + HINIC_MGMT_CMD_FAULT_REPORT); + hinic_comm_recv_up_self_cmd_unreg(hwdev, + HINIC_MGMT_CMD_WATCHDOG_INFO); + hinic_comm_recv_up_self_cmd_unreg(hwdev, + HINIC_MGMT_CMD_FMW_ACT_NTC); + hinic_comm_recv_up_self_cmd_unreg(hwdev, + HINIC_MGMT_CMD_PCIE_DFX_NTC); + hinic_comm_recv_up_self_cmd_unreg(hwdev, + HINIC_MGMT_CMD_GET_HOST_INFO); + } else { + hinic_unregister_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC); + hinic_unregister_vf_mbox_cb(hwdev, HINIC_MOD_COMM); + } +} + +/* 0 - heartbeat lost, 1 - normal */ +static u8 hinic_get_heartbeat_status(struct hinic_hwdev *hwdev) +{ + struct hinic_hwif *hwif = hwdev->hwif; + u32 attr1; + + /* suprise remove should be set 1 */ + if (!hinic_get_chip_present_flag(hwdev)) + return 1; + + attr1 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR1_ADDR); + if (attr1 == HINIC_PCIE_LINK_DOWN) { + sdk_err(hwdev->dev_hdl, "Detect pcie is link down\n"); + hinic_set_chip_absent(hwdev); + hinic_force_complete_all(hwdev); + /* should notify chiperr to pangea when detecting pcie link down */ + return 1; + } + + return HINIC_AF1_GET(attr1, MGMT_INIT_STATUS); +} + +static void hinic_heartbeat_event_handler(struct work_struct *work) +{ + struct hinic_hwdev *hwdev = + container_of(work, struct hinic_hwdev, timer_work); + u16 out = 0; + + _event_handler(hwdev, HINIC_EVENT_HEARTBEAT_LOST, + NULL, 0, &out, &out); +} + +static void hinic_heartbeat_timer_handler(struct timer_list *t) +{ + struct hinic_hwdev *hwdev = from_timer(hwdev, t, heartbeat_timer); + + if (!hinic_get_heartbeat_status(hwdev)) { + hwdev->heartbeat_lost = 1; + queue_work(hwdev->workq, &hwdev->timer_work); + } else { + mod_timer(&hwdev->heartbeat_timer, + jiffies + msecs_to_jiffies(HINIC_HEARTBEAT_PERIOD)); + } +} + +void hinic_init_heartbeat(struct hinic_hwdev *hwdev) +{ + timer_setup(&hwdev->heartbeat_timer, hinic_heartbeat_timer_handler, 0); + + hwdev->heartbeat_timer.expires = + jiffies + msecs_to_jiffies(HINIC_HEARTBEAT_START_EXPIRE); + + add_timer(&hwdev->heartbeat_timer); + + INIT_WORK(&hwdev->timer_work, hinic_heartbeat_event_handler); +} + +void hinic_destroy_heartbeat(struct hinic_hwdev *hwdev) +{ + del_timer_sync(&hwdev->heartbeat_timer); +} + +u8 hinic_nic_sw_aeqe_handler(void *handle, u8 event, u64 data) +{ + struct hinic_hwdev *hwdev = (struct hinic_hwdev *)handle; + u8 event_level = FAULT_LEVEL_MAX; + + switch (event) { + case HINIC_INTERNAL_TSO_FATAL_ERROR: + case HINIC_INTERNAL_LRO_FATAL_ERROR: + case HINIC_INTERNAL_TX_FATAL_ERROR: + case HINIC_INTERNAL_RX_FATAL_ERROR: + case HINIC_INTERNAL_OTHER_FATAL_ERROR: + atomic_inc(&hwdev->hw_stats.nic_ucode_event_stats[event]); + sdk_err(hwdev->dev_hdl, "SW aeqe event type: 0x%x, data: 0x%llx\n", + event, data); + event_level = FAULT_LEVEL_FATAL; + break; + default: + sdk_err(hwdev->dev_hdl, "Unsupported sw event %d to process\n", + event); + } + + return event_level; +} + +struct hinic_fast_recycled_mode { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 fast_recycled_mode; /* 1: enable fast recycle, available + * in dpdk mode, + * 0: normal mode, available in kernel + * nic mode + */ + u8 rsvd1; +}; + +int hinic_enable_fast_recycle(void *hwdev, bool enable) +{ + struct hinic_fast_recycled_mode fast_recycled_mode = {0}; + u16 out_size = sizeof(fast_recycled_mode); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &fast_recycled_mode.func_id); + if (err) + return err; + + fast_recycled_mode.fast_recycled_mode = enable ? 1 : 0; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_FAST_RECYCLE_MODE_SET, + &fast_recycled_mode, + sizeof(fast_recycled_mode), + &fast_recycled_mode, &out_size, 0); + if (err || fast_recycled_mode.status || !out_size) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to set recycle mode, err: %d, status: 0x%x, out size: 0x%x\n", + err, fast_recycled_mode.status, out_size); + return -EFAULT; + } + + return 0; +} + +void hinic_set_pcie_order_cfg(void *handle) +{ + struct hinic_hwdev *hwdev = handle; + u32 val; + + if (!hwdev) + return; + + val = hinic_hwif_read_reg(hwdev->hwif, + HINIC_GLB_DMA_SO_RO_REPLACE_ADDR); + + if (HINIC_GLB_DMA_SO_RO_GET(val, SO_RO_CFG)) { + val = HINIC_GLB_DMA_SO_R0_CLEAR(val, SO_RO_CFG); + val |= HINIC_GLB_DMA_SO_R0_SET(HINIC_DISABLE_ORDER, SO_RO_CFG); + hinic_hwif_write_reg(hwdev->hwif, + HINIC_GLB_DMA_SO_RO_REPLACE_ADDR, val); + } +} + +int _set_led_status(struct hinic_hwdev *hwdev, u8 port, + enum hinic_led_type type, + enum hinic_led_mode mode, u8 reset) +{ + struct hinic_led_info led_info = {0}; + u16 out_size = sizeof(led_info); + int err; + + led_info.port = port; + led_info.reset = reset; + + led_info.type = type; + led_info.mode = mode; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_SET_LED_STATUS, + &led_info, sizeof(led_info), + &led_info, &out_size, 0); + if (err || led_info.status || !out_size) { + sdk_err(hwdev->dev_hdl, "Failed to set led status, err: %d, status: 0x%x, out size: 0x%x\n", + err, led_info.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hinic_set_led_status(void *hwdev, u8 port, enum hinic_led_type type, + enum hinic_led_mode mode) +{ + int err; + + if (!hwdev) + return -EFAULT; + + err = _set_led_status(hwdev, port, type, mode, 0); + if (err) + return err; + + return 0; +} + +int hinic_reset_led_status(void *hwdev, u8 port) +{ + int err; + + if (!hwdev) + return -EFAULT; + + err = _set_led_status(hwdev, port, HINIC_LED_TYPE_INVALID, + HINIC_LED_MODE_INVALID, 1); + if (err) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to reset led status\n"); + return err; + } + + return 0; +} + +int hinic_get_board_info(void *hwdev, struct hinic_board_info *info) +{ + struct hinic_comm_board_info board_info = {0}; + u16 out_size = sizeof(board_info); + int err; + + if (!hwdev || !info) + return -EINVAL; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_GET_BOARD_INFO, + &board_info, sizeof(board_info), + &board_info, &out_size, 0); + if (err || board_info.status || !out_size) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to get board info, err: %d, status: 0x%x, out size: 0x%x\n", + err, board_info.status, out_size); + return -EFAULT; + } + + memcpy(info, &board_info.info, sizeof(*info)); + + return 0; +} +EXPORT_SYMBOL(hinic_get_board_info); + +int hinic_get_phy_init_status(void *hwdev, + enum phy_init_status_type *init_status) +{ + struct hinic_phy_init_status phy_info = {0}; + u16 out_size = sizeof(phy_info); + int err; + + if (!hwdev || !init_status) + return -EINVAL; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_GET_PHY_INIT_STATUS, + &phy_info, sizeof(phy_info), + &phy_info, &out_size, 0); + if ((phy_info.status != HINIC_MGMT_CMD_UNSUPPORTED && + phy_info.status) || err || !out_size) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to get phy info, err: %d, status: 0x%x, out size: 0x%x\n", + err, phy_info.status, out_size); + return -EFAULT; + } + + *init_status = phy_info.init_status; + + return phy_info.status; +} + +int hinic_phy_init_status_judge(void *hwdev) +{ + enum phy_init_status_type init_status; + int ret; + unsigned long end; + + /* It's not a phy, so don't judge phy status */ + if (!HINIC_BOARD_IS_PHY((struct hinic_hwdev *)hwdev)) + return 0; + + end = jiffies + msecs_to_jiffies(PHY_DOING_INIT_TIMEOUT); + do { + ret = hinic_get_phy_init_status(hwdev, &init_status); + if (ret == HINIC_MGMT_CMD_UNSUPPORTED) + return 0; + else if (ret) + return -EFAULT; + + switch (init_status) { + case PHY_INIT_SUCCESS: + sdk_info(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Phy init is success\n"); + return 0; + case PHY_NONSUPPORT: + sdk_info(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Phy init is nonsupport\n"); + return 0; + case PHY_INIT_FAIL: + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Phy init is failed\n"); + return -EIO; + case PHY_INIT_DOING: + msleep(250); + break; + default: + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Phy init is invalid, init_status: %d\n", + init_status); + return -EINVAL; + } + } while (time_before(jiffies, end)); + + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Phy init is timeout\n"); + + return -ETIMEDOUT; +} + +static void hinic_set_mgmt_channel_status(void *handle, bool state) +{ + struct hinic_hwdev *hwdev = handle; + u32 val; + + if (!hwdev || hinic_func_type(hwdev) == TYPE_VF || + !(hwdev->feature_cap & HINIC_FUNC_SUPP_DFX_REG)) + return; + + val = hinic_hwif_read_reg(hwdev->hwif, HINIC_ICPL_RESERVD_ADDR); + val = HINIC_CLEAR_MGMT_CHANNEL_STATUS(val, MGMT_CHANNEL_STATUS); + val |= HINIC_SET_MGMT_CHANNEL_STATUS((u32)state, MGMT_CHANNEL_STATUS); + + hinic_hwif_write_reg(hwdev->hwif, HINIC_ICPL_RESERVD_ADDR, val); +} + +int hinic_get_mgmt_channel_status(void *handle) +{ + struct hinic_hwdev *hwdev = handle; + u32 val; + + if (!hwdev) + return true; + + if (hinic_func_type(hwdev) == TYPE_VF || + !(hwdev->feature_cap & HINIC_FUNC_SUPP_DFX_REG)) + return false; + + val = hinic_hwif_read_reg(hwdev->hwif, HINIC_ICPL_RESERVD_ADDR); + + return HINIC_GET_MGMT_CHANNEL_STATUS(val, MGMT_CHANNEL_STATUS); +} + +static void hinic_enable_mgmt_channel(void *hwdev, void *buf_out) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_update_active *active_info = buf_out; + + if (!active_info || hinic_func_type(hwdev) == TYPE_VF || + !(dev->feature_cap & HINIC_FUNC_SUPP_DFX_REG)) + return; + + if (!active_info->status && + (active_info->update_status & HINIC_ACTIVE_STATUS_MASK)) { + active_info->update_status &= HINIC_ACTIVE_STATUS_CLEAR; + return; + } + + hinic_set_mgmt_channel_status(hwdev, false); +} + +int hinic_get_bios_pf_bw_limit(void *hwdev, u32 *pf_bw_limit) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_bios_cfg_cmd cfg = {0}; + u16 out_size = sizeof(cfg); + u16 func_id; + int err; + + if (!hwdev || !pf_bw_limit) + return -EINVAL; + + if (HINIC_FUNC_TYPE(dev) == TYPE_VF || + !FUNC_SUPPORT_RATE_LIMIT(hwdev)) + return 0; + + err = hinic_global_func_id_get(hwdev, &func_id); + if (err) + return err; + + cfg.func_valid = 1; + cfg.func_idx = (u8)func_id; + + cfg.op_code = HINIC_BIOS_CFG_GET | HINIC_BIOS_CFG_PF_BW_LIMIT; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_BIOS_NV_DATA_MGMT, + &cfg, sizeof(cfg), + &cfg, &out_size, 0); + if (err || cfg.status || !out_size) { + sdk_err(dev->dev_hdl, "Failed to get bios pf bandwidth limit, err: %d, status: 0x%x, out size: 0x%x\n", + err, cfg.status, out_size); + return -EIO; + } + + /* Check data is valid or not */ + if (cfg.signature != 0x19e51822) { + sdk_err(dev->dev_hdl, "Invalid bios configureration data, signature: 0x%x\n", + cfg.signature); + return -EINVAL; + } + + if (cfg.pf_bw_limit > 100) { + sdk_err(dev->dev_hdl, "Invalid bios cfg pf bandwidth limit: %d\n", + cfg.pf_bw_limit); + return -EINVAL; + } + + *pf_bw_limit = cfg.pf_bw_limit; + + return 0; +} + +bool hinic_get_ppf_status(void *hwdev) +{ + struct hinic_ppf_state ppf_state = {0}; + struct hinic_hwdev *dev = hwdev; + struct card_node *chip_node; + u16 out_size = sizeof(ppf_state); + int err; + + if (!hwdev) + return false; + + chip_node = (struct card_node *)dev->chip_node; + + if (!HINIC_IS_VF(dev)) + return chip_node->ppf_state; + + err = hinic_mbox_to_pf(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_GET_PPF_STATE, + &ppf_state, sizeof(ppf_state), + &ppf_state, &out_size, 0); + if (err || ppf_state.status || !out_size) { + sdk_err(dev->dev_hdl, "Failed to get ppf state, err: %d, status: 0x%x, out size: 0x%x\n", + err, ppf_state.status, out_size); + return false; + } + + return (bool)ppf_state.ppf_state; +} + +void hinic_set_func_deinit_flag(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + + set_bit(HINIC_HWDEV_FUNC_DEINIT, &dev->func_state); +} + +int hinic_get_hw_pf_infos(void *hwdev, struct hinic_hw_pf_infos *infos) +{ + struct hinic_hw_pf_infos_cmd pf_infos = {0}; + u16 out_size = sizeof(pf_infos); + int err; + + if (!hwdev || !infos) + return -EINVAL; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_GET_HW_PF_INFOS, + &pf_infos, sizeof(pf_infos), + &pf_infos, &out_size, 0); + if ((pf_infos.status != HINIC_MGMT_CMD_UNSUPPORTED && + pf_infos.status) || err || !out_size) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to get hw pf information, err: %d, status: 0x%x, out size: 0x%x\n", + err, pf_infos.status, out_size); + return -EFAULT; + } + + if (!pf_infos.status) + memcpy(infos, &pf_infos.infos, sizeof(*infos)); + + return pf_infos.status; +} +EXPORT_SYMBOL(hinic_get_hw_pf_infos); + +int hinic_set_ip_check(void *hwdev, bool ip_check_ctl) +{ + u32 val = 0; + int ret; + int i; + + if (!hwdev) + return -EINVAL; + + if (hinic_func_type(hwdev) == TYPE_VF) + return 0; + + for (i = 0; i <= HINIC_IPSU_CHANNEL_NUM; i++) { + ret = hinic_api_csr_rd32(hwdev, HINIC_NODE_ID_IPSU, + (HINIC_IPSU_CHANNEL0_ADDR + + i * HINIC_IPSU_CHANNEL_OFFSET), &val); + if (ret) + return ret; + + val = be32_to_cpu(val); + if (ip_check_ctl) + val |= HINIC_IPSU_DIP_SIP_MASK; + else + val &= (~HINIC_IPSU_DIP_SIP_MASK); + + val = cpu_to_be32(val); + ret = hinic_api_csr_wr32(hwdev, HINIC_NODE_ID_IPSU, + (HINIC_IPSU_CHANNEL0_ADDR + + i * HINIC_IPSU_CHANNEL_OFFSET), val); + if (ret) + return ret; + } + return 0; +} + +int hinic_get_card_present_state(void *hwdev, bool *card_present_state) +{ + u32 addr, attr1; + + if (!hwdev || !card_present_state) + return -EINVAL; + + addr = HINIC_CSR_FUNC_ATTR1_ADDR; + attr1 = hinic_hwif_read_reg(((struct hinic_hwdev *)hwdev)->hwif, addr); + if (attr1 == HINIC_PCIE_LINK_DOWN) { + sdk_warn(((struct hinic_hwdev *)hwdev)->dev_hdl, "Card is not present\n"); + *card_present_state = (bool)0; + } else { + *card_present_state = (bool)1; + } + + return 0; +} +EXPORT_SYMBOL(hinic_get_card_present_state); + +void hinic_disable_mgmt_msg_report(void *hwdev) +{ + struct hinic_hwdev *hw_dev = (struct hinic_hwdev *)hwdev; + + hinic_set_pf_status(hw_dev->hwif, HINIC_PF_STATUS_INIT); +} + +int hinic_set_vxlan_udp_dport(void *hwdev, u32 udp_port) +{ + u32 val = 0; + int ret; + + if (!hwdev) + return -EINVAL; + + if (hinic_func_type(hwdev) == TYPE_VF) + return 0; + + ret = hinic_api_csr_rd32(hwdev, HINIC_NODE_ID_IPSU, + HINIC_IPSURX_VXLAN_DPORT_ADDR, &val); + if (ret) + return ret; + + nic_info(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Update VxLAN UDP dest port: cur port:%u, new port:%u", + be32_to_cpu(val), udp_port); + + if (be32_to_cpu(val) == udp_port) + return 0; + + udp_port = cpu_to_be32(udp_port); + ret = hinic_api_csr_wr32(hwdev, HINIC_NODE_ID_IPSU, + HINIC_IPSURX_VXLAN_DPORT_ADDR, udp_port); + if (ret) + return ret; + + return 0; +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hwdev.h b/drivers/net/ethernet/huawei/hinic/hinic_hwdev.h new file mode 100644 index 0000000000000000000000000000000000000000..22a95ea184dfdd48edee354781546f6ffa3f5145 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hwdev.h @@ -0,0 +1,370 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_HWDEV_H_ +#define HINIC_HWDEV_H_ + +#include "hinic_port_cmd.h" + +/* to use 0-level CLA, page size must be: 64B(wqebb) * 4096(max_q_depth) */ +#define HINIC_DEFAULT_WQ_PAGE_SIZE 0x40000 +#define HINIC_HW_WQ_PAGE_SIZE 0x1000 + +#define HINIC_MSG_TO_MGMT_MAX_LEN 2016 + +#define HINIC_MGMT_STATUS_ERR_OK 0 /* Ok */ +#define HINIC_MGMT_STATUS_ERR_PARAM 1 /* Invalid parameter */ +#define HINIC_MGMT_STATUS_ERR_FAILED 2 /* Operation failed */ +#define HINIC_MGMT_STATUS_ERR_PORT 3 /* Invalid port */ +#define HINIC_MGMT_STATUS_ERR_TIMEOUT 4 /* Operation time out */ +#define HINIC_MGMT_STATUS_ERR_NOMATCH 5 /* Version not match */ +#define HINIC_MGMT_STATUS_ERR_EXIST 6 /* Entry exists */ +#define HINIC_MGMT_STATUS_ERR_NOMEM 7 /* Out of memory */ +#define HINIC_MGMT_STATUS_ERR_INIT 8 /* Feature not initialized */ +#define HINIC_MGMT_STATUS_ERR_FAULT 9 /* Invalid address */ +#define HINIC_MGMT_STATUS_ERR_PERM 10 /* Operation not permitted */ +#define HINIC_MGMT_STATUS_ERR_EMPTY 11 /* Table empty */ +#define HINIC_MGMT_STATUS_ERR_FULL 12 /* Table full */ +#define HINIC_MGMT_STATUS_ERR_NOT_FOUND 13 /* Not found */ +#define HINIC_MGMT_STATUS_ERR_BUSY 14 /* Device or resource busy */ +#define HINIC_MGMT_STATUS_ERR_RESOURCE 15 /* No resources for operation */ +#define HINIC_MGMT_STATUS_ERR_CONFIG 16 /* Invalid configuration */ +#define HINIC_MGMT_STATUS_ERR_UNAVAIL 17 /* Feature unavailable */ +#define HINIC_MGMT_STATUS_ERR_CRC 18 /* CRC check failed */ +#define HINIC_MGMT_STATUS_ERR_NXIO 19 /* No such device or address */ +#define HINIC_MGMT_STATUS_ERR_ROLLBACK 20 /* Chip rollback fail */ +#define HINIC_MGMT_STATUS_ERR_LEN 32 /* Length too short or too long */ +#define HINIC_MGMT_STATUS_ERR_UNSUPPORT 0xFF /* Feature not supported */ + +#define HINIC_CHIP_PRESENT 1 +#define HINIC_CHIP_ABSENT 0 + +struct cfg_mgmt_info; +struct rdma_comp_resource; + +struct hinic_hwif; +struct hinic_nic_io; +struct hinic_wqs; +struct hinic_aeqs; +struct hinic_ceqs; +struct hinic_mbox_func_to_func; +struct hinic_msg_pf_to_mgmt; +struct hinic_cmdqs; +struct hinic_multi_host_mgmt; + +struct hinic_root_ctxt { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u16 rsvd1; + u8 set_cmdq_depth; + u8 cmdq_depth; + u8 lro_en; + u8 rsvd2; + u8 ppf_idx; + u8 rsvd3; + u16 rq_depth; + u16 rx_buf_sz; + u16 sq_depth; +}; + +struct hinic_page_addr { + void *virt_addr; + u64 phys_addr; +}; + +struct mqm_addr_trans_tbl_info { + u32 chunk_num; + u32 search_gpa_num; + u32 page_size; + u32 page_num; + struct hinic_page_addr *brm_srch_page_addr; +}; + +#define HINIC_PCIE_LINK_DOWN 0xFFFFFFFF + +#define HINIC_DEV_ACTIVE_FW_TIMEOUT (35 * 1000) +#define HINIC_DEV_BUSY_ACTIVE_FW 0xFE + +#define HINIC_HW_WQ_NAME "hinic_hardware" +#define HINIC_HEARTBEAT_PERIOD 1000 +#define HINIC_HEARTBEAT_START_EXPIRE 5000 + +#define HINIC_CHIP_ERROR_TYPE_MAX 1024 +#define HINIC_CHIP_FAULT_SIZE \ + (HINIC_NODE_ID_MAX * FAULT_LEVEL_MAX * HINIC_CHIP_ERROR_TYPE_MAX) + +enum hinic_node_id { + HINIC_NODE_ID_CPI = 0, + HINIC_NODE_ID_IPSU = 4, + HINIC_NODE_ID_MGMT_HOST = 21, /* Host CPU send API to uP */ + HINIC_NODE_ID_MAX = 22 +}; + +#define HINIC_HWDEV_INIT_MODES_MASK ((1UL << HINIC_HWDEV_ALL_INITED) - 1) + +enum hinic_hwdev_func_state { + HINIC_HWDEV_FUNC_INITED = HINIC_HWDEV_ALL_INITED, + + HINIC_HWDEV_FUNC_DEINIT, + + HINIC_HWDEV_STATE_BUSY = 31, +}; + +struct hinic_cqm_stats { + atomic_t cqm_cmd_alloc_cnt; + atomic_t cqm_cmd_free_cnt; + atomic_t cqm_send_cmd_box_cnt; + atomic_t cqm_send_cmd_imm_cnt; + atomic_t cqm_db_addr_alloc_cnt; + atomic_t cqm_db_addr_free_cnt; + + atomic_t cqm_fc_srq_create_cnt; + atomic_t cqm_srq_create_cnt; + atomic_t cqm_rq_create_cnt; + + atomic_t cqm_qpc_mpt_create_cnt; + atomic_t cqm_nonrdma_queue_create_cnt; + atomic_t cqm_rdma_queue_create_cnt; + atomic_t cqm_rdma_table_create_cnt; + + atomic_t cqm_qpc_mpt_delete_cnt; + atomic_t cqm_nonrdma_queue_delete_cnt; + atomic_t cqm_rdma_queue_delete_cnt; + atomic_t cqm_rdma_table_delete_cnt; + + atomic_t cqm_func_timer_clear_cnt; + atomic_t cqm_func_hash_buf_clear_cnt; + + atomic_t cqm_scq_callback_cnt; + atomic_t cqm_ecq_callback_cnt; + atomic_t cqm_nocq_callback_cnt; + atomic_t cqm_aeq_callback_cnt[112]; +}; + +struct hinic_link_event_stats { + atomic_t link_down_stats; + atomic_t link_up_stats; +}; + +struct hinic_fault_event_stats { + atomic_t chip_fault_stats[HINIC_NODE_ID_MAX][FAULT_LEVEL_MAX]; + atomic_t fault_type_stat[FAULT_TYPE_MAX]; + atomic_t pcie_fault_stats; +}; + +struct hinic_hw_stats { + atomic_t heart_lost_stats; + atomic_t nic_ucode_event_stats[HINIC_NIC_FATAL_ERROR_MAX]; + struct hinic_cqm_stats cqm_stats; + struct hinic_link_event_stats link_event_stats; + struct hinic_fault_event_stats fault_event_stats; +}; + +#define HINIC_NORMAL_HOST_CAP (HINIC_FUNC_MGMT | HINIC_FUNC_PORT | \ + HINIC_FUNC_SUPP_RATE_LIMIT | \ + HINIC_FUNC_SUPP_DFX_REG | \ + HINIC_FUNC_SUPP_RX_MODE | \ + HINIC_FUNC_SUPP_SET_VF_MAC_VLAN | \ + HINIC_FUNC_SUPP_CHANGE_MAC | \ + HINIC_FUNC_SUPP_ENCAP_TSO_CSUM) +#define HINIC_MULTI_BM_MASTER (HINIC_FUNC_MGMT | HINIC_FUNC_PORT | \ + HINIC_FUNC_SUPP_DFX_REG | \ + HINIC_FUNC_SUPP_RX_MODE | \ + HINIC_FUNC_SUPP_SET_VF_MAC_VLAN | \ + HINIC_FUNC_SUPP_CHANGE_MAC) +#define HINIC_MULTI_BM_SLAVE (HINIC_FUNC_SRIOV_EN_DFLT | \ + HINIC_FUNC_SRIOV_NUM_FIX | \ + HINIC_FUNC_FORCE_LINK_UP | \ + HINIC_FUNC_OFFLOAD_OVS_UNSUPP) +#define HINIC_MULTI_VM_MASTER (HINIC_FUNC_MGMT | HINIC_FUNC_PORT | \ + HINIC_FUNC_SUPP_DFX_REG | \ + HINIC_FUNC_SUPP_RX_MODE | \ + HINIC_FUNC_SUPP_SET_VF_MAC_VLAN | \ + HINIC_FUNC_SUPP_CHANGE_MAC) +#define HINIC_MULTI_VM_SLAVE (HINIC_FUNC_MGMT | \ + HINIC_FUNC_SUPP_DFX_REG | \ + HINIC_FUNC_SRIOV_EN_DFLT | \ + HINIC_FUNC_SUPP_RX_MODE | \ + HINIC_FUNC_SUPP_CHANGE_MAC | \ + HINIC_FUNC_OFFLOAD_OVS_UNSUPP) + +#define MULTI_HOST_CHIP_MODE_SHIFT 0 +#define MULTI_HOST_MASTER_MBX_STS_SHIFT 0x4 +#define MULTI_HOST_PRIV_DATA_SHIFT 0x8 + +#define MULTI_HOST_CHIP_MODE_MASK 0xF +#define MULTI_HOST_MASTER_MBX_STS_MASK 0xF +#define MULTI_HOST_PRIV_DATA_MASK 0xFFFF + +#define MULTI_HOST_REG_SET(val, member) \ + (((val) & MULTI_HOST_##member##_MASK) \ + << MULTI_HOST_##member##_SHIFT) +#define MULTI_HOST_REG_GET(val, member) \ + (((val) >> MULTI_HOST_##member##_SHIFT) \ + & MULTI_HOST_##member##_MASK) +#define MULTI_HOST_REG_CLEAR(val, member) \ + ((val) & (~(MULTI_HOST_##member##_MASK \ + << MULTI_HOST_##member##_SHIFT))) + +#define HINIC_BOARD_TYPE_MULTI_HOST_ETH_25GE 12 + +/* new version of roce qp not limited by power of 2 */ +#define HINIC_CMD_VER_ROCE_QP 1 +/* new version for add function id in multi-host */ +#define HINIC_CMD_VER_FUNC_ID 2 + +struct hinic_hwdev { + void *adapter_hdl; /* pointer to hinic_pcidev or NDIS_Adapter */ + void *pcidev_hdl; /* pointer to pcidev or Handler */ + void *dev_hdl; /* pointer to pcidev->dev or Handler, for + * sdk_err() or dma_alloc() + */ + u32 wq_page_size; + + void *cqm_hdl; + void *chip_node; + + struct hinic_hwif *hwif; /* include void __iomem *bar */ + struct hinic_nic_io *nic_io; + struct cfg_mgmt_info *cfg_mgmt; + struct rdma_comp_resource *rdma_comp_res; + struct hinic_wqs *wqs; /* for FC slq */ + struct mqm_addr_trans_tbl_info mqm_att; + + struct hinic_aeqs *aeqs; + struct hinic_ceqs *ceqs; + + struct hinic_mbox_func_to_func *func_to_func; + + struct hinic_msg_pf_to_mgmt *pf_to_mgmt; + struct hinic_clp_pf_to_mgmt *clp_pf_to_mgmt; + + struct hinic_cmdqs *cmdqs; + + struct hinic_page_addr page_pa0; + struct hinic_page_addr page_pa1; + + hinic_event_handler event_callback; + void *event_pri_handle; + + struct work_struct timer_work; + struct workqueue_struct *workq; + struct timer_list heartbeat_timer; + /* true represent heartbeat lost, false represent heartbeat restore */ + u32 heartbeat_lost; + int chip_present_flag; + struct hinic_hw_stats hw_stats; + u8 *chip_fault_stats; + + u32 statufull_ref_cnt; + ulong func_state; + + u64 feature_cap; /* enum hinic_func_cap */ + enum hinic_func_mode func_mode; + + struct hinic_multi_host_mgmt *mhost_mgmt; + + /* In bmgw x86 host, driver can't send message to mgmt cpu directly, + * need to trasmit message ppf mbox to bmgw arm host. + */ + struct semaphore ppf_sem; + void *ppf_hwdev; + + struct semaphore func_sem; + int func_ref; + struct hinic_board_info board_info; +#define MGMT_VERSION_MAX_LEN 32 + u8 mgmt_ver[MGMT_VERSION_MAX_LEN]; + u64 fw_support_func_flag; +}; + +int hinic_init_comm_ch(struct hinic_hwdev *hwdev); + +void hinic_uninit_comm_ch(struct hinic_hwdev *hwdev); + +int hinic_ppf_ext_db_init(void *dev); + +int hinic_ppf_ext_db_deinit(void *dev); + +enum hinic_set_arm_type { + HINIC_SET_ARM_CMDQ, + HINIC_SET_ARM_SQ, + HINIC_SET_ARM_TYPE_NUM, +}; + +int hinic_set_arm_bit(void *hwdev, enum hinic_set_arm_type q_type, u16 q_id); + +void hinic_set_chip_present(void *hwdev); +void hinic_force_complete_all(void *hwdev); + +void hinic_init_heartbeat(struct hinic_hwdev *hwdev); +void hinic_destroy_heartbeat(struct hinic_hwdev *hwdev); + +u8 hinic_nic_sw_aeqe_handler(void *handle, u8 event, u64 data); + +int hinic_enable_fast_recycle(void *hwdev, bool enable); +int hinic_l2nic_reset_base(struct hinic_hwdev *hwdev, u16 reset_flag); + +enum l2nic_resource_type { + RES_TYPE_NIC_FUNC = 0, + RES_TYPE_FLUSH_BIT, + RES_TYPE_PF_BW_CFG, + RES_TYPE_MQM, + RES_TYPE_SMF, + RES_TYPE_CMDQ_ROOTCTX, + RES_TYPE_SQ_CI_TABLE, + RES_TYPE_CEQ, + RES_TYPE_MBOX, + RES_TYPE_AEQ, +}; + +void hinic_notify_dcb_state_event(struct hinic_hwdev *hwdev, + struct hinic_dcb_state *dcb_state); + +int hinic_pf_msg_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout); + +int hinic_pf_send_clp_cmd(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +int hinic_get_bios_pf_bw_limit(void *hwdev, u32 *pf_bw_limit); + +bool hinic_mgmt_event_ack_first(u8 mod, u8 cmd); + +int hinic_set_wq_page_size(struct hinic_hwdev *hwdev, u16 func_idx, + u32 page_size); + +int hinic_phy_init_status_judge(void *hwdev); + +int hinic_hilink_info_show(struct hinic_hwdev *hwdev); +int hinic_api_csr_rd32(void *hwdev, u8 dest, u32 addr, u32 *val); +int hinic_api_csr_wr32(void *hwdev, u8 dest, u32 addr, u32 val); + +int hinic_ppf_process_mbox_msg(struct hinic_hwdev *hwdev, u16 pf_idx, u16 vf_id, + enum hinic_mod_type mod, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size); + +#define HINIC_SDI_MODE_UNKNOWN 0 +#define HINIC_SDI_MODE_BM 1 +#define HINIC_SDI_MODE_VM 2 +#define HINIC_SDI_MODE_MAX 3 +int hinic_get_sdi_mode(struct hinic_hwdev *hwdev, u16 *cur_mode); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hwif.c b/drivers/net/ethernet/huawei/hinic/hinic_hwif.c new file mode 100644 index 0000000000000000000000000000000000000000..626b76f17e6d82cf64f50d70c33a67e666f3e577 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hwif.c @@ -0,0 +1,1005 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hwdev.h" + +#include "hinic_csr.h" +#include "hinic_hwif.h" +#include "hinic_eqs.h" + +#define WAIT_HWIF_READY_TIMEOUT 10000 + +#define HINIC_SELFTEST_RESULT 0x883C + +/* For UEFI driver, this function can only read BAR0 */ +u32 hinic_hwif_read_reg(struct hinic_hwif *hwif, u32 reg) +{ + return be32_to_cpu(readl(hwif->cfg_regs_base + reg)); +} + +/* For UEFI driver, this function can only write BAR0 */ +void hinic_hwif_write_reg(struct hinic_hwif *hwif, u32 reg, u32 val) +{ + writel(cpu_to_be32(val), hwif->cfg_regs_base + reg); +} + +/** + * hwif_ready - test if the HW initialization passed + * @hwdev: the pointer to hw device + * Return: 0 - success, negative - failure + */ +static int hwif_ready(struct hinic_hwdev *hwdev) +{ + u32 addr, attr1; + + addr = HINIC_CSR_FUNC_ATTR1_ADDR; + attr1 = hinic_hwif_read_reg(hwdev->hwif, addr); + + if (attr1 == HINIC_PCIE_LINK_DOWN) + return -EBUSY; + + if (!HINIC_AF1_GET(attr1, MGMT_INIT_STATUS)) + return -EBUSY; + + if (HINIC_IS_VF(hwdev)) { + if (!HINIC_AF1_GET(attr1, PF_INIT_STATUS)) + return -EBUSY; + } + + return 0; +} + +static int wait_hwif_ready(struct hinic_hwdev *hwdev) +{ + ulong timeout = 0; + + do { + if (!hwif_ready(hwdev)) + return 0; + + usleep_range(999, 1000); + timeout++; + } while (timeout <= WAIT_HWIF_READY_TIMEOUT); + + sdk_err(hwdev->dev_hdl, "Wait for hwif timeout\n"); + return -EBUSY; +} + +/** + * set_hwif_attr - set the attributes as members in hwif + * @hwif: the hardware interface of a pci function device + * @attr0: the first attribute that was read from the hw + * @attr1: the second attribute that was read from the hw + * @attr2: the third attribute that was read from the hw + */ +static void set_hwif_attr(struct hinic_hwif *hwif, u32 attr0, u32 attr1, + u32 attr2) +{ + hwif->attr.func_global_idx = HINIC_AF0_GET(attr0, FUNC_GLOBAL_IDX); + hwif->attr.port_to_port_idx = HINIC_AF0_GET(attr0, P2P_IDX); + hwif->attr.pci_intf_idx = HINIC_AF0_GET(attr0, PCI_INTF_IDX); + hwif->attr.vf_in_pf = HINIC_AF0_GET(attr0, VF_IN_PF); + hwif->attr.func_type = HINIC_AF0_GET(attr0, FUNC_TYPE); + + hwif->attr.ppf_idx = HINIC_AF1_GET(attr1, PPF_IDX); + + hwif->attr.num_aeqs = BIT(HINIC_AF1_GET(attr1, AEQS_PER_FUNC)); + hwif->attr.num_ceqs = BIT(HINIC_AF1_GET(attr1, CEQS_PER_FUNC)); + hwif->attr.num_irqs = BIT(HINIC_AF1_GET(attr1, IRQS_PER_FUNC)); + hwif->attr.num_dma_attr = BIT(HINIC_AF1_GET(attr1, DMA_ATTR_PER_FUNC)); + + hwif->attr.global_vf_id_of_pf = HINIC_AF2_GET(attr2, + GLOBAL_VF_ID_OF_PF); +} + +/** + * get_hwif_attr - read and set the attributes as members in hwif + * @hwif: the hardware interface of a pci function device + */ +static void get_hwif_attr(struct hinic_hwif *hwif) +{ + u32 addr, attr0, attr1, attr2; + + addr = HINIC_CSR_FUNC_ATTR0_ADDR; + attr0 = hinic_hwif_read_reg(hwif, addr); + + addr = HINIC_CSR_FUNC_ATTR1_ADDR; + attr1 = hinic_hwif_read_reg(hwif, addr); + + addr = HINIC_CSR_FUNC_ATTR2_ADDR; + attr2 = hinic_hwif_read_reg(hwif, addr); + + set_hwif_attr(hwif, attr0, attr1, attr2); +} + +void hinic_set_pf_status(struct hinic_hwif *hwif, enum hinic_pf_status status) +{ + u32 attr5 = HINIC_AF5_SET(status, PF_STATUS); + u32 addr = HINIC_CSR_FUNC_ATTR5_ADDR; + + if (hwif->attr.func_type == TYPE_VF) + return; + + hinic_hwif_write_reg(hwif, addr, attr5); +} + +enum hinic_pf_status hinic_get_pf_status(struct hinic_hwif *hwif) +{ + u32 attr5 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR5_ADDR); + + return HINIC_AF5_GET(attr5, PF_STATUS); +} + +enum hinic_doorbell_ctrl hinic_get_doorbell_ctrl_status(struct hinic_hwif *hwif) +{ + u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR); + + return HINIC_AF4_GET(attr4, DOORBELL_CTRL); +} + +enum hinic_outbound_ctrl hinic_get_outbound_ctrl_status(struct hinic_hwif *hwif) +{ + u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR); + + return HINIC_AF4_GET(attr4, OUTBOUND_CTRL); +} + +void hinic_enable_doorbell(struct hinic_hwif *hwif) +{ + u32 addr, attr4; + + addr = HINIC_CSR_FUNC_ATTR4_ADDR; + attr4 = hinic_hwif_read_reg(hwif, addr); + + attr4 = HINIC_AF4_CLEAR(attr4, DOORBELL_CTRL); + attr4 |= HINIC_AF4_SET(ENABLE_DOORBELL, DOORBELL_CTRL); + + hinic_hwif_write_reg(hwif, addr, attr4); +} + +void hinic_disable_doorbell(struct hinic_hwif *hwif) +{ + u32 addr, attr4; + + addr = HINIC_CSR_FUNC_ATTR4_ADDR; + attr4 = hinic_hwif_read_reg(hwif, addr); + + attr4 = HINIC_AF4_CLEAR(attr4, DOORBELL_CTRL); + attr4 |= HINIC_AF4_SET(DISABLE_DOORBELL, DOORBELL_CTRL); + + hinic_hwif_write_reg(hwif, addr, attr4); +} + +void hinic_enable_outbound(struct hinic_hwif *hwif) +{ + u32 addr, attr4; + + addr = HINIC_CSR_FUNC_ATTR4_ADDR; + attr4 = hinic_hwif_read_reg(hwif, addr); + + attr4 = HINIC_AF4_CLEAR(attr4, OUTBOUND_CTRL); + attr4 |= HINIC_AF4_SET(ENABLE_OUTBOUND, OUTBOUND_CTRL); + + hinic_hwif_write_reg(hwif, addr, attr4); +} + +void hinic_disable_outbound(struct hinic_hwif *hwif) +{ + u32 addr, attr4; + + addr = HINIC_CSR_FUNC_ATTR4_ADDR; + attr4 = hinic_hwif_read_reg(hwif, addr); + + attr4 = HINIC_AF4_CLEAR(attr4, OUTBOUND_CTRL); + attr4 |= HINIC_AF4_SET(DISABLE_OUTBOUND, OUTBOUND_CTRL); + + hinic_hwif_write_reg(hwif, addr, attr4); +} + +/** + * set_ppf - try to set hwif as ppf and set the type of hwif in this case + * @hwif: the hardware interface of a pci function device + */ +static void set_ppf(struct hinic_hwif *hwif) +{ + struct hinic_func_attr *attr = &hwif->attr; + u32 addr, val, ppf_election; + + /* Read Modify Write */ + addr = HINIC_CSR_PPF_ELECTION_ADDR; + + val = hinic_hwif_read_reg(hwif, addr); + val = HINIC_PPF_ELECTION_CLEAR(val, IDX); + + ppf_election = HINIC_PPF_ELECTION_SET(attr->func_global_idx, IDX); + val |= ppf_election; + + hinic_hwif_write_reg(hwif, addr, val); + + /* Check PPF */ + val = hinic_hwif_read_reg(hwif, addr); + + attr->ppf_idx = HINIC_PPF_ELECTION_GET(val, IDX); + if (attr->ppf_idx == attr->func_global_idx) + attr->func_type = TYPE_PPF; +} + +/** + * get_mpf - get the mpf index into the hwif + * @hwif: the hardware interface of a pci function device + */ +static void get_mpf(struct hinic_hwif *hwif) +{ + struct hinic_func_attr *attr = &hwif->attr; + u32 mpf_election, addr; + + addr = HINIC_CSR_GLOBAL_MPF_ELECTION_ADDR; + + mpf_election = hinic_hwif_read_reg(hwif, addr); + attr->mpf_idx = HINIC_MPF_ELECTION_GET(mpf_election, IDX); +} + +/** + * set_mpf - try to set hwif as mpf and set the mpf idx in hwif + * @hwif: the hardware interface of a pci function device + */ +static void set_mpf(struct hinic_hwif *hwif) +{ + struct hinic_func_attr *attr = &hwif->attr; + u32 addr, val, mpf_election; + + /* Read Modify Write */ + addr = HINIC_CSR_GLOBAL_MPF_ELECTION_ADDR; + + val = hinic_hwif_read_reg(hwif, addr); + + val = HINIC_MPF_ELECTION_CLEAR(val, IDX); + mpf_election = HINIC_MPF_ELECTION_SET(attr->func_global_idx, IDX); + + val |= mpf_election; + hinic_hwif_write_reg(hwif, addr, val); +} + +static void init_db_area_idx(struct hinic_hwif *hwif) +{ + struct hinic_free_db_area *free_db_area; + u32 db_max_areas; + u32 i; + + free_db_area = &hwif->free_db_area; + db_max_areas = hwif->db_size / HINIC_DB_PAGE_SIZE; + + for (i = 0; i < db_max_areas; i++) + free_db_area->db_idx[i] = i; + + free_db_area->num_free = db_max_areas; + + spin_lock_init(&free_db_area->idx_lock); +} + +static int get_db_idx(struct hinic_hwif *hwif, u32 *idx) +{ + struct hinic_free_db_area *free_db_area = &hwif->free_db_area; + u32 db_max_areas = hwif->db_size / HINIC_DB_PAGE_SIZE; + u32 pos; + u32 pg_idx; + + spin_lock(&free_db_area->idx_lock); + +retry: + if (free_db_area->num_free == 0) { + spin_unlock(&free_db_area->idx_lock); + return -ENOMEM; + } + + free_db_area->num_free--; + + pos = free_db_area->alloc_pos++; + pos &= db_max_areas - 1; + + pg_idx = free_db_area->db_idx[pos]; + + free_db_area->db_idx[pos] = 0xFFFFFFFF; + + /* pg_idx out of range */ + if (pg_idx >= db_max_areas) + goto retry; + + spin_unlock(&free_db_area->idx_lock); + + *idx = pg_idx; + + return 0; +} + +static void free_db_idx(struct hinic_hwif *hwif, u32 idx) +{ + struct hinic_free_db_area *free_db_area = &hwif->free_db_area; + u32 db_max_areas = hwif->db_size / HINIC_DB_PAGE_SIZE; + u32 pos; + + if (idx >= db_max_areas) + return; + + spin_lock(&free_db_area->idx_lock); + + pos = free_db_area->return_pos++; + pos &= db_max_areas - 1; + + free_db_area->db_idx[pos] = idx; + + free_db_area->num_free++; + + spin_unlock(&free_db_area->idx_lock); +} + +void hinic_free_db_addr(void *hwdev, void __iomem *db_base, + void __iomem *dwqe_base) +{ + struct hinic_hwif *hwif; + u32 idx; + + if (!hwdev || !db_base) + return; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + idx = DB_IDX(db_base, hwif->db_base); + +#if defined(__aarch64__) + /* No need to unmap */ +#else + if (dwqe_base && hwif->chip_mode == CHIP_MODE_NORMAL) + io_mapping_unmap(dwqe_base); +#endif + + free_db_idx(hwif, idx); +} +EXPORT_SYMBOL(hinic_free_db_addr); + +int hinic_alloc_db_addr(void *hwdev, void __iomem **db_base, + void __iomem **dwqe_base) +{ + struct hinic_hwif *hwif; + u64 offset; + u32 idx; + int err; + + if (!hwdev || !db_base) + return -EINVAL; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + + err = get_db_idx(hwif, &idx); + if (err) + return -EFAULT; + + *db_base = hwif->db_base + idx * HINIC_DB_PAGE_SIZE; + + if (!dwqe_base || hwif->chip_mode != CHIP_MODE_NORMAL) + return 0; + + offset = ((u64)idx) << PAGE_SHIFT; + +#if defined(__aarch64__) + *dwqe_base = hwif->dwqe_mapping + offset; +#else + *dwqe_base = io_mapping_map_wc(hwif->dwqe_mapping, offset, + HINIC_DB_PAGE_SIZE); +#endif + + if (!(*dwqe_base)) { + hinic_free_db_addr(hwdev, *db_base, NULL); + return -EFAULT; + } + + return 0; +} +EXPORT_SYMBOL(hinic_alloc_db_addr); + +void hinic_free_db_phy_addr(void *hwdev, u64 db_base, u64 dwqe_base) +{ + struct hinic_hwif *hwif; + u32 idx; + + if (!hwdev) + return; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + idx = DB_IDX(db_base, hwif->db_base_phy); + + free_db_idx(hwif, idx); +} +EXPORT_SYMBOL(hinic_free_db_phy_addr); + +int hinic_alloc_db_phy_addr(void *hwdev, u64 *db_base, u64 *dwqe_base) +{ + struct hinic_hwif *hwif; + u32 idx; + int err; + + if (!hwdev || !db_base || !dwqe_base) + return -EINVAL; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + + err = get_db_idx(hwif, &idx); + if (err) + return -EFAULT; + + *db_base = hwif->db_base_phy + idx * HINIC_DB_PAGE_SIZE; + + if (hwif->chip_mode == CHIP_MODE_NORMAL) + *dwqe_base = *db_base + HINIC_DB_DWQE_SIZE; + + return 0; +} +EXPORT_SYMBOL(hinic_alloc_db_phy_addr); + +enum hinic_msix_state hinic_get_msix_state(void *hwdev, u16 msix_idx) +{ + struct hinic_hwif *hwif = NULL; + u32 offset = msix_idx * HINIC_PCI_MSIX_ENTRY_SIZE + + HINIC_PCI_MSIX_ENTRY_VECTOR_CTRL; + u32 mask_bits; + + if (!hwdev) + return HINIC_MSIX_DISABLE; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + + mask_bits = readl(hwif->intr_regs_base + offset); + + return !!(mask_bits & HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT); +} + +void hinic_set_msix_state(void *hwdev, u16 msix_idx, enum hinic_msix_state flag) +{ + struct hinic_hwif *hwif; + u32 offset = msix_idx * HINIC_PCI_MSIX_ENTRY_SIZE + + HINIC_PCI_MSIX_ENTRY_VECTOR_CTRL; + u32 mask_bits; + + if (!hwdev) + return; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + + mask_bits = readl(hwif->intr_regs_base + offset); + mask_bits &= ~HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT; + if (flag) + mask_bits |= HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT; + + writel(mask_bits, hwif->intr_regs_base + offset); +} +EXPORT_SYMBOL(hinic_set_msix_state); + +static void disable_all_msix(struct hinic_hwdev *hwdev) +{ + u16 num_irqs = hwdev->hwif->attr.num_irqs; + u16 i; + + for (i = 0; i < num_irqs; i++) + hinic_set_msix_state(hwdev, i, HINIC_MSIX_DISABLE); +} + +int wait_until_doorbell_flush_states(struct hinic_hwif *hwif, + enum hinic_doorbell_ctrl states) +{ + enum hinic_doorbell_ctrl db_ctrl; + u32 cnt = 0; + + if (!hwif) + return -EFAULT; + + while (cnt < HINIC_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT) { + db_ctrl = hinic_get_doorbell_ctrl_status(hwif); + if (db_ctrl == states) + return 0; + + usleep_range(900, 1000); + cnt++; + } + + return -EFAULT; +} +EXPORT_SYMBOL(wait_until_doorbell_flush_states); + +static int wait_until_doorbell_and_outbound_enabled(struct hinic_hwif *hwif) +{ + enum hinic_doorbell_ctrl db_ctrl; + enum hinic_outbound_ctrl outbound_ctrl; + u32 cnt = 0; + + while (cnt < HINIC_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT) { + db_ctrl = hinic_get_doorbell_ctrl_status(hwif); + outbound_ctrl = hinic_get_outbound_ctrl_status(hwif); + + if (outbound_ctrl == ENABLE_OUTBOUND && + db_ctrl == ENABLE_DOORBELL) + return 0; + + usleep_range(900, 1000); + cnt++; + } + + return -EFAULT; +} + +static void __print_selftest_reg(struct hinic_hwdev *hwdev) +{ + u32 addr, attr0, attr1; + + addr = HINIC_CSR_FUNC_ATTR1_ADDR; + attr1 = hinic_hwif_read_reg(hwdev->hwif, addr); + + if (attr1 == HINIC_PCIE_LINK_DOWN) { + sdk_err(hwdev->dev_hdl, "PCIE is link down\n"); + return; + } + + addr = HINIC_CSR_FUNC_ATTR0_ADDR; + attr0 = hinic_hwif_read_reg(hwdev->hwif, addr); + if (HINIC_AF0_GET(attr0, FUNC_TYPE) != TYPE_VF && + !HINIC_AF0_GET(attr0, PCI_INTF_IDX)) + sdk_err(hwdev->dev_hdl, "Selftest reg: 0x%08x\n", + hinic_hwif_read_reg(hwdev->hwif, + HINIC_SELFTEST_RESULT)); +} + +/** + * hinic_init_hwif - initialize the hw interface + * @hwdev: the pointer to hw device + * @cfg_reg_base: configuration base address + * Return: 0 - success, negative - failure + */ +int hinic_init_hwif(struct hinic_hwdev *hwdev, void *cfg_reg_base, + void *intr_reg_base, u64 db_base_phy, + void *db_base, void *dwqe_mapping) +{ + struct hinic_hwif *hwif; + int err; + + hwif = kzalloc(sizeof(*hwif), GFP_KERNEL); + if (!hwif) + return -ENOMEM; + + hwdev->hwif = hwif; + hwif->pdev = hwdev->pcidev_hdl; + + hwif->cfg_regs_base = cfg_reg_base; + hwif->intr_regs_base = intr_reg_base; + + hwif->db_base_phy = db_base_phy; + hwif->db_base = db_base; + hwif->dwqe_mapping = dwqe_mapping; + + hwif->db_size = hinic_get_db_size(cfg_reg_base, &hwif->chip_mode); + + sdk_info(hwdev->dev_hdl, "Doorbell size: 0x%x, chip mode: %d\n", + hwif->db_size, hwif->chip_mode); + + init_db_area_idx(hwif); + + err = wait_hwif_ready(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Chip status is not ready\n"); + __print_selftest_reg(hwdev); + goto hwif_ready_err; + } + + get_hwif_attr(hwif); + + err = wait_until_doorbell_and_outbound_enabled(hwif); + if (err) { + sdk_err(hwdev->dev_hdl, "Hw doorbell/outbound is disabled\n"); + goto hwif_ready_err; + } + + if (!HINIC_IS_VF(hwdev)) { + set_ppf(hwif); + + if (HINIC_IS_PPF(hwdev)) + set_mpf(hwif); + + get_mpf(hwif); + } + + disable_all_msix(hwdev); + /* disable mgmt cpu report any event */ + hinic_set_pf_status(hwdev->hwif, HINIC_PF_STATUS_INIT); + + sdk_info(hwdev->dev_hdl, "global_func_idx: %d, func_type: %d, host_id: %d, ppf: %d, mpf: %d\n", + hwif->attr.func_global_idx, hwif->attr.func_type, + hwif->attr.pci_intf_idx, hwif->attr.ppf_idx, + hwif->attr.mpf_idx); + + return 0; + +hwif_ready_err: + kfree(hwif); + + return err; +} + +/** + * hinic_free_hwif - free the hw interface + * @hwdev: the pointer to hw device + */ +void hinic_free_hwif(struct hinic_hwdev *hwdev) +{ + kfree(hwdev->hwif); +} + +int hinic_dma_zalloc_coherent_align(void *dev_hdl, u64 size, u64 align, + unsigned int flag, + struct hinic_dma_addr_align *mem_align) +{ + void *vaddr, *align_vaddr; + dma_addr_t paddr, align_paddr; + u64 real_size = size; + + vaddr = dma_zalloc_coherent(dev_hdl, real_size, &paddr, flag); + if (!vaddr) + return -ENOMEM; + + align_paddr = ALIGN(paddr, align); + /* align */ + if (align_paddr == paddr) { + align_vaddr = vaddr; + goto out; + } + + dma_free_coherent(dev_hdl, real_size, vaddr, paddr); + + /* realloc memory for align */ + real_size = size + align; + vaddr = dma_zalloc_coherent(dev_hdl, real_size, &paddr, flag); + if (!vaddr) + return -ENOMEM; + + align_paddr = ALIGN(paddr, align); + align_vaddr = (void *)((u64)vaddr + (align_paddr - paddr)); + +out: + mem_align->real_size = (u32)real_size; + mem_align->ori_vaddr = vaddr; + mem_align->ori_paddr = paddr; + mem_align->align_vaddr = align_vaddr; + mem_align->align_paddr = align_paddr; + + return 0; +} + +void hinic_dma_free_coherent_align(void *dev_hdl, + struct hinic_dma_addr_align *mem_align) +{ + dma_free_coherent(dev_hdl, mem_align->real_size, + mem_align->ori_vaddr, mem_align->ori_paddr); +} + +u16 hinic_global_func_id(void *hwdev) +{ + struct hinic_hwif *hwif; + + if (!hwdev) + return 0; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + + return hwif->attr.func_global_idx; +} +EXPORT_SYMBOL(hinic_global_func_id); + +/** + * get function id from register,used by sriov hot migration process + * @hwdev: the pointer to hw device + */ +u16 hinic_global_func_id_hw(void *hwdev) +{ + u32 addr, attr0; + struct hinic_hwdev *dev; + + dev = (struct hinic_hwdev *)hwdev; + addr = HINIC_CSR_FUNC_ATTR0_ADDR; + attr0 = hinic_hwif_read_reg(dev->hwif, addr); + + return HINIC_AF0_GET(attr0, FUNC_GLOBAL_IDX); +} + +static int func_busy_state_check(struct hinic_hwdev *hwdev) +{ + u32 func_state; + int cycle; + + /* set BUSY before src vm suspend and clear it before dst vm resume */ + cycle = PIPE_CYCLE_MAX; + func_state = hinic_func_busy_state_get(hwdev); + while (func_state && cycle) { + msleep(20); + cycle--; + if (!cycle) { + sdk_err(hwdev->dev_hdl, "busy_state suspend timeout"); + return -ETIMEDOUT; + } + + func_state = hinic_func_busy_state_get(hwdev); + } + + return 0; +} + +int hinic_func_own_get(void *hwdev) +{ + struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev; + u32 func_state; + int err; + + if (!HINIC_IS_VF(dev)) + return 0; + +restart: + down(&dev->func_sem); + + dev->func_ref++; + hinic_func_own_bit_set(dev, 1); + + func_state = hinic_func_busy_state_get(hwdev); + if (func_state) { + dev->func_ref--; + if (dev->func_ref == 0) + hinic_func_own_bit_set(dev, 0); + + up(&dev->func_sem); + err = func_busy_state_check(dev); + if (err) + return err; + goto restart; + } + + up(&dev->func_sem); + return 0; +} + +void hinic_func_own_free(void *hwdev) +{ + struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev; + + if (!HINIC_IS_VF(dev)) + return; + + down(&dev->func_sem); + dev->func_ref--; + if (dev->func_ref == 0) + hinic_func_own_bit_set(dev, 0); + + up(&dev->func_sem); +} + +/** + * get function id, used by sriov hot migratition process. + * @hwdev: the pointer to hw device + * @func_id: function id + */ +int hinic_global_func_id_get(void *hwdev, u16 *func_id) +{ + struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev; + int err; + + /* only vf get func_id from chip reg for sriov migrate */ + if (!HINIC_IS_VF(dev)) { + *func_id = hinic_global_func_id(hwdev); + return 0; + } + + err = func_busy_state_check(dev); + if (err) + return err; + + *func_id = hinic_global_func_id_hw(dev); + return 0; +} + +u16 hinic_intr_num(void *hwdev) +{ + struct hinic_hwif *hwif; + + if (!hwdev) + return 0; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + + return hwif->attr.num_irqs; +} +EXPORT_SYMBOL(hinic_intr_num); + +u8 hinic_pf_id_of_vf(void *hwdev) +{ + struct hinic_hwif *hwif; + + if (!hwdev) + return 0; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + + return hwif->attr.port_to_port_idx; +} +EXPORT_SYMBOL(hinic_pf_id_of_vf); + +u16 hinic_pf_id_of_vf_hw(void *hwdev) +{ + u32 addr, attr0; + struct hinic_hwdev *dev; + + dev = (struct hinic_hwdev *)hwdev; + addr = HINIC_CSR_FUNC_ATTR0_ADDR; + attr0 = hinic_hwif_read_reg(dev->hwif, addr); + + return HINIC_AF0_GET(attr0, P2P_IDX); +} + +u8 hinic_pcie_itf_id(void *hwdev) +{ + struct hinic_hwif *hwif; + + if (!hwdev) + return 0; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + + return hwif->attr.pci_intf_idx; +} +EXPORT_SYMBOL(hinic_pcie_itf_id); + +u8 hinic_vf_in_pf(void *hwdev) +{ + struct hinic_hwif *hwif; + + if (!hwdev) + return 0; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + + return hwif->attr.vf_in_pf; +} +EXPORT_SYMBOL(hinic_vf_in_pf); + +enum func_type hinic_func_type(void *hwdev) +{ + struct hinic_hwif *hwif; + + if (!hwdev) + return 0; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + + return hwif->attr.func_type; +} +EXPORT_SYMBOL(hinic_func_type); + +u8 hinic_ceq_num(void *hwdev) +{ + struct hinic_hwif *hwif; + + if (!hwdev) + return 0; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + + return hwif->attr.num_ceqs; +} +EXPORT_SYMBOL(hinic_ceq_num); + +u8 hinic_dma_attr_entry_num(void *hwdev) +{ + struct hinic_hwif *hwif; + + if (!hwdev) + return 0; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + + return hwif->attr.num_dma_attr; +} +EXPORT_SYMBOL(hinic_dma_attr_entry_num); + +u16 hinic_glb_pf_vf_offset(void *hwdev) +{ + struct hinic_hwif *hwif; + + if (!hwdev) + return 0; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + + return hwif->attr.global_vf_id_of_pf; +} +EXPORT_SYMBOL(hinic_glb_pf_vf_offset); + +u8 hinic_mpf_idx(void *hwdev) +{ + struct hinic_hwif *hwif; + + if (!hwdev) + return 0; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + + return hwif->attr.mpf_idx; +} +EXPORT_SYMBOL(hinic_mpf_idx); + +u8 hinic_ppf_idx(void *hwdev) +{ + struct hinic_hwif *hwif; + + if (!hwdev) + return 0; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + + return hwif->attr.ppf_idx; +} +EXPORT_SYMBOL(hinic_ppf_idx); + +#define CEQ_CTRL_0_CHIP_MODE_SHIFT 26 +#define CEQ_CTRL_0_CHIP_MODE_MASK 0xFU +#define CEQ_CTRL_0_GET(val, member) \ + (((val) >> CEQ_CTRL_0_##member##_SHIFT) & \ + CEQ_CTRL_0_##member##_MASK) + +/** + * hinic_get_db_size - get db size ceq ctrl: bit26~29: uP write vf mode is + * normal(0x0), bmgw(0x1) or vmgw(0x2) and normal mode db size is 512k, + * bmgw or vmgw mode db size is 256k + * @cfg_reg_base: pointer to cfg_reg_base + * @chip_mode: pointer to chip_mode + */ +u32 hinic_get_db_size(void *cfg_reg_base, enum hinic_chip_mode *chip_mode) +{ + u32 attr0, ctrl0; + + attr0 = be32_to_cpu(readl((u8 __iomem *)cfg_reg_base + + HINIC_CSR_FUNC_ATTR0_ADDR)); + + /* PF is always normal mode & db size is 512K */ + if (HINIC_AF0_GET(attr0, FUNC_TYPE) != TYPE_VF) { + *chip_mode = CHIP_MODE_NORMAL; + return HINIC_DB_DWQE_SIZE; + } + + ctrl0 = be32_to_cpu(readl((u8 __iomem *)cfg_reg_base + + HINIC_CSR_CEQ_CTRL_0_ADDR(0))); + + *chip_mode = CEQ_CTRL_0_GET(ctrl0, CHIP_MODE); + + switch (*chip_mode) { + case CHIP_MODE_VMGW: + case CHIP_MODE_BMGW: + return HINIC_GW_VF_DB_SIZE; + default: + return HINIC_DB_DWQE_SIZE; + } +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hwif.h b/drivers/net/ethernet/huawei/hinic/hinic_hwif.h new file mode 100644 index 0000000000000000000000000000000000000000..3a58a44b6bfebef96f49b48aa6f41c529f8a93e4 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hwif.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_HWIF_H +#define HINIC_HWIF_H + +#include "hinic_hwdev.h" + +#define HINIC_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT 60000 + +struct hinic_free_db_area { + u32 db_idx[HINIC_DB_MAX_AREAS]; + + u32 num_free; + + u32 alloc_pos; + u32 return_pos; + + /* spinlock for allocating doorbell area */ + spinlock_t idx_lock; +}; + +struct hinic_func_attr { + u16 func_global_idx; + u8 port_to_port_idx; + u8 pci_intf_idx; + u8 vf_in_pf; + enum func_type func_type; + + u8 mpf_idx; + + u8 ppf_idx; + + u16 num_irqs; /* max: 2 ^ 15 */ + u8 num_aeqs; /* max: 2 ^ 3 */ + u8 num_ceqs; /* max: 2 ^ 7 */ + + u8 num_dma_attr; /* max: 2 ^ 6 */ + + u16 global_vf_id_of_pf; +}; + +struct hinic_hwif { + u8 __iomem *cfg_regs_base; + u8 __iomem *intr_regs_base; + u64 db_base_phy; + u8 __iomem *db_base; + +#if defined(__aarch64__) + void __iomem *dwqe_mapping; +#else + struct io_mapping *dwqe_mapping; +#endif + struct hinic_free_db_area free_db_area; + + struct hinic_func_attr attr; + + void *pdev; + enum hinic_chip_mode chip_mode; + u32 db_size; +}; + +struct hinic_dma_addr_align { + u32 real_size; + + void *ori_vaddr; + dma_addr_t ori_paddr; + + void *align_vaddr; + dma_addr_t align_paddr; +}; + +u32 hinic_hwif_read_reg(struct hinic_hwif *hwif, u32 reg); + +void hinic_hwif_write_reg(struct hinic_hwif *hwif, u32 reg, u32 val); + +void hinic_set_pf_status(struct hinic_hwif *hwif, enum hinic_pf_status status); + +enum hinic_pf_status hinic_get_pf_status(struct hinic_hwif *hwif); + +enum hinic_doorbell_ctrl + hinic_get_doorbell_ctrl_status(struct hinic_hwif *hwif); + +enum hinic_outbound_ctrl + hinic_get_outbound_ctrl_status(struct hinic_hwif *hwif); + +void hinic_enable_doorbell(struct hinic_hwif *hwif); + +void hinic_disable_doorbell(struct hinic_hwif *hwif); + +void hinic_enable_outbound(struct hinic_hwif *hwif); + +void hinic_disable_outbound(struct hinic_hwif *hwif); + +int hinic_init_hwif(struct hinic_hwdev *hwdev, void *cfg_reg_base, + void *intr_reg_base, u64 db_base_phy, + void *db_base, void *dwqe_mapping); + +void hinic_free_hwif(struct hinic_hwdev *hwdev); + +int wait_until_doorbell_flush_states(struct hinic_hwif *hwif, + enum hinic_doorbell_ctrl states); + +int hinic_dma_zalloc_coherent_align(void *dev_hdl, u64 size, u64 align, + unsigned int flag, + struct hinic_dma_addr_align *mem_align); + +void hinic_dma_free_coherent_align(void *dev_hdl, + struct hinic_dma_addr_align *mem_align); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_lld.c b/drivers/net/ethernet/huawei/hinic/hinic_lld.c new file mode 100644 index 0000000000000000000000000000000000000000..f95b857c11352ec98e06655e41edb5cd4258c1dc --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_lld.c @@ -0,0 +1,2938 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hw.h" +#include "hinic_lld.h" +#include "hinic_pci_id_tbl.h" +#include "hinic_nic_dev.h" +#include "hinic_sriov.h" +#include "hinic_dbgtool_knl.h" +#include "hinic_nictool.h" + +#define HINIC_PCI_CFG_REG_BAR 0 +#define HINIC_PCI_INTR_REG_BAR 2 +#define HINIC_PCI_DB_BAR 4 +#define HINIC_PCI_VENDOR_ID 0x19e5 + +#define SELF_TEST_BAR_ADDR_OFFSET 0x883c + +#define HINIC_SECOND_BASE 1000 +#define HINIC_SYNC_YEAR_OFFSET 1900 +#define HINIC_SYNC_MONTH_OFFSET 1 +#define HINIC_MINUTE_BASE 60 +#define HINIC_WAIT_TOOL_CNT_TIMEOUT 10000 +#define HINIC_WAIT_SRIOV_CFG_TIMEOUT 15000 + +#define HINIC_DRV_DESC "Huawei(R) Intelligent Network Interface Card Driver" +#define HINICVF_DRV_DESC "Huawei(R) Intelligent Virtual Function Network Driver" + +MODULE_AUTHOR("Huawei Technologies CO., Ltd"); +MODULE_DESCRIPTION(HINIC_DRV_DESC); +MODULE_VERSION(HINIC_DRV_VERSION); +MODULE_LICENSE("GPL"); + +#ifdef CONFIG_PCI_IOV +static bool disable_vf_load; +module_param(disable_vf_load, bool, 0444); +MODULE_PARM_DESC(disable_vf_load, + "Disable virtual functions probe or not - default is false"); +#endif /* CONFIG_PCI_IOV */ + +enum { + HINIC_FUNC_IN_REMOVE = BIT(0), + HINIC_FUNC_PRB_ERR = BIT(1), + HINIC_FUNC_PRB_DELAY = BIT(2), +}; + +/* Structure pcidev private */ +struct hinic_pcidev { + struct pci_dev *pcidev; + void *hwdev; + struct card_node *chip_node; + struct hinic_lld_dev lld_dev; + /* Record the service object address, + * such as hinic_dev and toe_dev, fc_dev + */ + void *uld_dev[SERVICE_T_MAX]; + /* Record the service object name */ + char uld_dev_name[SERVICE_T_MAX][IFNAMSIZ]; + /* It is a the global variable for driver to manage + * all function device linked list + */ + struct list_head node; + + void __iomem *cfg_reg_base; + void __iomem *intr_reg_base; + u64 db_base_phy; + void __iomem *db_base; + +#if defined(__aarch64__) + void __iomem *dwqe_mapping; +#else + struct io_mapping *dwqe_mapping; +#endif + /* lock for attach/detach uld */ + struct mutex pdev_mutex; + struct hinic_sriov_info sriov_info; + + u32 init_state; + /* setted when uld driver processing event */ + unsigned long state; + struct pci_device_id id; + + unsigned long flag; + + struct work_struct slave_nic_work; + struct workqueue_struct *slave_nic_init_workq; + struct delayed_work slave_nic_init_dwork; + enum hinic_chip_mode chip_mode; + bool nic_cur_enable; + bool nic_des_enable; + + struct timer_list syncfw_time_timer; +}; + +#define HINIC_EVENT_PROCESS_TIMEOUT 10000 + +#define FIND_BIT(num, n) (((num) & (1UL << (n))) ? 1 : 0) +#define SET_BIT(num, n) ((num) | (1UL << (n))) +#define CLEAR_BIT(num, n) ((num) & (~(1UL << (n)))) + +#define MAX_CARD_ID 64 +static u64 card_bit_map; +LIST_HEAD(g_hinic_chip_list); +struct hinic_uld_info g_uld_info[SERVICE_T_MAX] = { {0} }; +static const char *s_uld_name[SERVICE_T_MAX] = { + "nic", "ovs", "roce", "toe", "iwarp", "fc", "fcoe", "migrate"}; + +enum hinic_lld_status { + HINIC_NODE_CHANGE = BIT(0), +}; + +struct hinic_lld_lock { + /* lock for chip list */ + struct mutex lld_mutex; + unsigned long status; + atomic_t dev_ref_cnt; +}; + +static struct hinic_lld_lock g_lld_lock; + +#define WAIT_LLD_DEV_HOLD_TIMEOUT (10 * 60 * 1000) /* 10minutes */ +#define WAIT_LLD_DEV_NODE_CHANGED (10 * 60 * 1000) /* 10minutes */ +#define WAIT_LLD_DEV_REF_CNT_EMPTY (2 * 60 * 1000) /* 2minutes */ + +/* node in chip_node will changed, tools or driver can't get node + * during this situation + */ +static void lld_lock_chip_node(void) +{ + u32 loop_cnt; + + mutex_lock(&g_lld_lock.lld_mutex); + + loop_cnt = 0; + while (loop_cnt < WAIT_LLD_DEV_NODE_CHANGED) { + if (!test_and_set_bit(HINIC_NODE_CHANGE, &g_lld_lock.status)) + break; + + loop_cnt++; + + if (loop_cnt % 10000 == 0) + pr_warn("Wait for lld node change complete for %us\n", + loop_cnt / 1000); + + usleep_range(900, 1000); + } + + if (loop_cnt == WAIT_LLD_DEV_NODE_CHANGED) + pr_warn("Wait for lld node change complete timeout when try to get lld lock\n"); + + loop_cnt = 0; + while (loop_cnt < WAIT_LLD_DEV_REF_CNT_EMPTY) { + if (!atomic_read(&g_lld_lock.dev_ref_cnt)) + break; + + loop_cnt++; + + if (loop_cnt % 10000 == 0) + pr_warn("Wait for lld dev unused for %us, reference count: %d\n", + loop_cnt / 1000, + atomic_read(&g_lld_lock.dev_ref_cnt)); + + usleep_range(900, 1000); + } + + if (loop_cnt == WAIT_LLD_DEV_REF_CNT_EMPTY) + pr_warn("Wait for lld dev unused timeout\n"); + + mutex_unlock(&g_lld_lock.lld_mutex); +} + +static void lld_unlock_chip_node(void) +{ + clear_bit(HINIC_NODE_CHANGE, &g_lld_lock.status); +} + +/* When tools or other drivers want to get node of chip_node, use this function + * to prevent node be freed + */ +static void lld_dev_hold(void) +{ + u32 loop_cnt = 0; + + /* ensure there have not any chip node in changing */ + mutex_lock(&g_lld_lock.lld_mutex); + + while (loop_cnt < WAIT_LLD_DEV_HOLD_TIMEOUT) { + if (!test_bit(HINIC_NODE_CHANGE, &g_lld_lock.status)) + break; + + loop_cnt++; + + if (loop_cnt % 10000 == 0) + pr_warn("Wait lld node change complete for %us\n", + loop_cnt / 1000); + + usleep_range(900, 1000); + } + + if (loop_cnt == WAIT_LLD_DEV_HOLD_TIMEOUT) + pr_warn("Wait lld node change complete timeout when try to hode lld dev\n"); + + atomic_inc(&g_lld_lock.dev_ref_cnt); + + mutex_unlock(&g_lld_lock.lld_mutex); +} + +static void lld_dev_put(void) +{ + atomic_dec(&g_lld_lock.dev_ref_cnt); +} + +static void hinic_lld_lock_init(void) +{ + mutex_init(&g_lld_lock.lld_mutex); + atomic_set(&g_lld_lock.dev_ref_cnt, 0); +} + +static atomic_t tool_used_cnt; + +void hinic_tool_cnt_inc(void) +{ + atomic_inc(&tool_used_cnt); +} + +void hinic_tool_cnt_dec(void) +{ + atomic_dec(&tool_used_cnt); +} + +static int attach_uld(struct hinic_pcidev *dev, enum hinic_service_type type, + struct hinic_uld_info *uld_info) +{ + void *uld_dev = NULL; + int err; + + mutex_lock(&dev->pdev_mutex); + + if (dev->init_state < HINIC_INIT_STATE_HWDEV_INITED) { + sdk_err(&dev->pcidev->dev, "SDK init failed, can not attach uld\n"); + err = -EFAULT; + goto out_unlock; + } + + if (dev->uld_dev[type]) { + sdk_err(&dev->pcidev->dev, + "%s driver has attached to pcie device\n", + s_uld_name[type]); + err = 0; + goto out_unlock; + } + + if ((hinic_get_func_mode(dev->hwdev) == FUNC_MOD_NORMAL_HOST) && + type == SERVICE_T_OVS && !hinic_support_ovs(dev->hwdev, NULL)) { + sdk_warn(&dev->pcidev->dev, "Dev not support %s\n", + s_uld_name[type]); + err = 0; + goto out_unlock; + } + + err = uld_info->probe(&dev->lld_dev, &uld_dev, dev->uld_dev_name[type]); + if (err || !uld_dev) { + sdk_err(&dev->pcidev->dev, + "Failed to add object for %s driver to pcie device\n", + s_uld_name[type]); + goto probe_failed; + } + + dev->uld_dev[type] = uld_dev; + mutex_unlock(&dev->pdev_mutex); + + sdk_info(&dev->pcidev->dev, + "Attach %s driver to pcie device succeed\n", s_uld_name[type]); + return 0; + +probe_failed: +out_unlock: + mutex_unlock(&dev->pdev_mutex); + + return err; +} + +static void detach_uld(struct hinic_pcidev *dev, enum hinic_service_type type) +{ + struct hinic_uld_info *uld_info = &g_uld_info[type]; + u32 cnt = 0; + + mutex_lock(&dev->pdev_mutex); + if (!dev->uld_dev[type]) { + mutex_unlock(&dev->pdev_mutex); + return; + } + + while (cnt < HINIC_EVENT_PROCESS_TIMEOUT) { + if (!test_and_set_bit(type, &dev->state)) + break; + usleep_range(900, 1000); + cnt++; + } + + uld_info->remove(&dev->lld_dev, dev->uld_dev[type]); + dev->uld_dev[type] = NULL; + if (cnt < HINIC_EVENT_PROCESS_TIMEOUT) + clear_bit(type, &dev->state); + + sdk_info(&dev->pcidev->dev, + "Detach %s driver from pcie device succeed\n", + s_uld_name[type]); + mutex_unlock(&dev->pdev_mutex); +} + +static void attach_ulds(struct hinic_pcidev *dev) +{ + enum hinic_service_type type; + + for (type = SERVICE_T_OVS; type < SERVICE_T_MAX; type++) { + if (g_uld_info[type].probe) + attach_uld(dev, type, &g_uld_info[type]); + } +} + +static void detach_ulds(struct hinic_pcidev *dev) +{ + enum hinic_service_type type; + + for (type = SERVICE_T_MAX - 1; type > SERVICE_T_NIC; type--) { + if (g_uld_info[type].probe) + detach_uld(dev, type); + } +} + +int hinic_register_uld(enum hinic_service_type type, + struct hinic_uld_info *uld_info) +{ + struct card_node *chip_node; + struct hinic_pcidev *dev; + + if (type >= SERVICE_T_MAX) { + pr_err("Unknown type %d of up layer driver to register\n", + type); + return -EINVAL; + } + + if (!uld_info || !uld_info->probe || !uld_info->remove) { + pr_err("Invalid information of %s driver to register\n", + s_uld_name[type]); + return -EINVAL; + } + + lld_dev_hold(); + + if (g_uld_info[type].probe) { + pr_err("%s driver has registered\n", s_uld_name[type]); + lld_dev_put(); + return -EINVAL; + } + + memcpy(&g_uld_info[type], uld_info, sizeof(*uld_info)); + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (attach_uld(dev, type, uld_info)) { + sdk_err(&dev->pcidev->dev, + "Attach %s driver to pcie device failed\n", + s_uld_name[type]); + continue; + } + } + } + + lld_dev_put(); + + pr_info("Register %s driver succeed\n", s_uld_name[type]); + return 0; +} +EXPORT_SYMBOL(hinic_register_uld); + +void hinic_unregister_uld(enum hinic_service_type type) +{ + struct card_node *chip_node; + struct hinic_pcidev *dev; + struct hinic_uld_info *uld_info; + + if (type >= SERVICE_T_MAX) { + pr_err("Unknown type %d of up layer driver to unregister\n", + type); + return; + } + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + detach_uld(dev, type); + } + } + + uld_info = &g_uld_info[type]; + memset(uld_info, 0, sizeof(*uld_info)); + lld_dev_put(); +} +EXPORT_SYMBOL(hinic_unregister_uld); + +#define HINIC_SYNFW_TIME_PERIOD (60 * 60 * 1000) + +static void hinic_syncfw_timer_handler(struct timer_list *t) +{ + struct hinic_pcidev *pci_adapter = from_timer(pci_adapter, t, + syncfw_time_timer); + struct timeval tv = {0}; + u64 tv_msec; + + do_gettimeofday(&tv); + + tv_msec = tv.tv_sec * MSEC_PER_SEC + + tv.tv_usec / USEC_PER_MSEC; + + hinic_sync_time_async(pci_adapter->hwdev, tv_msec); + mod_timer(&pci_adapter->syncfw_time_timer, + jiffies + msecs_to_jiffies(HINIC_SYNFW_TIME_PERIOD)); +} + +void hinic_init_syncfw_timer(struct hinic_pcidev *pci_adapter) +{ + if (hinic_get_func_mode(pci_adapter->hwdev) != FUNC_MOD_NORMAL_HOST || + hinic_func_type(pci_adapter->hwdev) != TYPE_PPF) + return; + + timer_setup(&pci_adapter->syncfw_time_timer, + hinic_syncfw_timer_handler, 0); + + pci_adapter->syncfw_time_timer.expires = + jiffies + msecs_to_jiffies(HINIC_SYNFW_TIME_PERIOD); + + add_timer(&pci_adapter->syncfw_time_timer); +} + +void hinic_destroy_syncfw_timer(struct hinic_pcidev *pci_adapter) +{ + if (hinic_get_func_mode(pci_adapter->hwdev) != FUNC_MOD_NORMAL_HOST || + hinic_func_type(pci_adapter->hwdev) != TYPE_PPF) + return; + + del_timer_sync(&pci_adapter->syncfw_time_timer); +} + +static void hinic_sync_time_to_fmw(struct hinic_pcidev *pdev_pri) +{ + struct timeval tv = {0}; + struct rtc_time rt_time = {0}; + u64 tv_msec; + int err; + + do_gettimeofday(&tv); + + tv_msec = tv.tv_sec * HINIC_SECOND_BASE + + tv.tv_usec / HINIC_SECOND_BASE; + err = hinic_sync_time(pdev_pri->hwdev, tv_msec); + if (err) { + sdk_err(&pdev_pri->pcidev->dev, "Synchronize UTC time to firmware failed, errno:%d.\n", + err); + } else { + rtc_time_to_tm(tv.tv_sec, &rt_time); + sdk_info(&pdev_pri->pcidev->dev, "Synchronize UTC time to firmware succeed. UTC time %d-%02d-%02d %02d:%02d:%02d.\n", + rt_time.tm_year + HINIC_SYNC_YEAR_OFFSET, + rt_time.tm_mon + HINIC_SYNC_MONTH_OFFSET, + rt_time.tm_mday, rt_time.tm_hour, + rt_time.tm_min, rt_time.tm_sec); + } +} + +enum hinic_ver_incompat_mode { + /* New driver can't compat with old firmware */ + VER_INCOMP_NEW_DRV_OLD_FW, + /* New Firmware can't compat with old driver */ + VER_INCOMP_NEW_FW_OLD_DRV, +}; + +struct hinic_version_incompat { + char *version; + char *advise; + u32 incompat_mode; +}; + +struct hinic_version_incompat ver_incompat_table[] = { + { + .version = "1.2.2.0", + .advise = "Mechanism of cos changed", + .incompat_mode = BIT(VER_INCOMP_NEW_DRV_OLD_FW), + }, + { + .version = "1.2.3.0", + .advise = "Driver get sevice mode from firmware", + .incompat_mode = BIT(VER_INCOMP_NEW_DRV_OLD_FW), + }, +}; + +#define MAX_VER_FIELD_LEN 4 +#define MAX_VER_SPLIT_NUM 4 +static void __version_split(const char *str, int *split_num, + char rst[][MAX_VER_FIELD_LEN]) +{ + const char delim = '.'; + const char *src; + int cnt = 0; + u16 idx, end, token_len; + + idx = 0; + while (idx < strlen(str)) { + for (end = idx; end < strlen(str); end++) { + if (*(str + end) == delim) + break; /* find */ + } + + if (end != idx) { + token_len = min_t(u16, end - idx, + MAX_VER_FIELD_LEN - 1); + src = str + idx; + memcpy(rst[cnt], src, token_len); + if (++cnt >= MAX_VER_SPLIT_NUM) + break; + } + + idx = end + 1; /* skip delim */ + } + + *split_num = cnt; +} + +int hinic_version_cmp(char *ver1, char *ver2) +{ + char ver1_split[MAX_VER_SPLIT_NUM][MAX_VER_FIELD_LEN] = { {0} }; + char ver2_split[MAX_VER_SPLIT_NUM][MAX_VER_FIELD_LEN] = { {0} }; + int split1_num, split2_num; + int ver1_num, ver2_num; + int split; + + /* To compat older firmware version */ + if (ver1[0] == 'B') + return -1; + + if (ver2[0] == 'B') + return 1; + + __version_split(ver1, &split1_num, ver1_split); + __version_split(ver2, &split2_num, ver2_split); + + if (split1_num != MAX_VER_SPLIT_NUM || + split2_num != MAX_VER_SPLIT_NUM) { + pr_err("Invalid version %s or %s\n", ver1, ver2); + return 0; + } + + for (split = 0; split < MAX_VER_SPLIT_NUM; split++) { + ver1_num = local_atoi(ver1_split[split]); + ver2_num = local_atoi(ver2_split[split]); + + if (ver1_num > ver2_num) + return 1; + else if (ver1_num < ver2_num) + return -1; + } + + return 0; +} + +static int __version_mismatch(struct hinic_pcidev *pcidev, char *cur_fw_ver, + char *cur_drv_ver, + struct hinic_version_incompat *ver_incompat, + int start_entry) +{ + struct hinic_version_incompat *ver_incmp_tmp; + int fw_ver_comp; + int i, num_entry; + + fw_ver_comp = hinic_version_cmp(cur_fw_ver, ver_incompat->version); + if (fw_ver_comp <= 0) { + /* Check if new driver compatible with old fw */ + for (i = start_entry; i >= 0; i--) { + ver_incmp_tmp = &ver_incompat_table[i]; + if (hinic_version_cmp(cur_fw_ver, + ver_incmp_tmp->version) >= 0) + break; /* Not need to check anymore */ + + if (ver_incmp_tmp->incompat_mode & + BIT(VER_INCOMP_NEW_DRV_OLD_FW)) { + sdk_err(&pcidev->pcidev->dev, + "Version incompatible: %s, please update firmware to %s, or use %s driver\n", + ver_incmp_tmp->advise, + cur_drv_ver, cur_fw_ver); + return -EINVAL; + } + } + + goto compatible; + } + + /* check if old driver compatible with new firmware */ + num_entry = (int)sizeof(ver_incompat_table) / + (int)sizeof(ver_incompat_table[0]); + for (i = start_entry + 1; i < num_entry; i++) { + ver_incmp_tmp = &ver_incompat_table[i]; + + if (hinic_version_cmp(cur_fw_ver, ver_incmp_tmp->version) < 0) + break; /* Not need to check anymore */ + + if (ver_incmp_tmp->incompat_mode & + BIT(VER_INCOMP_NEW_FW_OLD_DRV)) { + sdk_err(&pcidev->pcidev->dev, + "Version incompatible: %s, please update driver to %s, or use %s firmware\n", + ver_incmp_tmp->advise, + cur_fw_ver, cur_drv_ver); + return -EINVAL; + } + } + +compatible: + if (hinic_version_cmp(cur_drv_ver, cur_fw_ver) < 0) + sdk_info(&pcidev->pcidev->dev, + "Firmware newer than driver, you'd better update driver to %s\n", + cur_fw_ver); + else + sdk_info(&pcidev->pcidev->dev, + "Driver newer than firmware, you'd better update firmware to %s\n", + cur_drv_ver); + + return 0; +} + +static void hinic_ignore_minor_version(char *version) +{ + char ver_split[MAX_VER_SPLIT_NUM][MAX_VER_FIELD_LEN] = { {0} }; + int max_ver_len, split_num = 0; + int err; + + __version_split(version, &split_num, ver_split); + if (split_num != MAX_VER_SPLIT_NUM) + return; + + max_ver_len = (int)strlen(version) + 1; + memset(version, 0, max_ver_len); + + err = snprintf(version, max_ver_len, "%s.%s.%s.0", + ver_split[0], ver_split[1], ver_split[2]); + if (err <= 0 || err >= max_ver_len) + pr_err("Failed to snprintf version, function return(%d) and dest_len(%d)\n", + err, max_ver_len); +} + +static int hinic_detect_version_compatible(struct hinic_pcidev *pcidev) +{ + struct hinic_fw_version fw_ver = { {0} }; + struct hinic_version_incompat *ver_incompat; + char drv_ver[MAX_VER_SPLIT_NUM * MAX_VER_FIELD_LEN] = {0}; + int idx, num_entry, drv_ver_len; + int ver_mismatch; + int err; + + err = hinic_get_fw_version(pcidev->hwdev, &fw_ver); + if (err) { + sdk_err(&pcidev->pcidev->dev, + "Failed to get firmware version\n"); + return err; + } + + drv_ver_len = min_t(int, (int)sizeof(drv_ver) - 1, + (int)strlen(HINIC_DRV_VERSION)); + memcpy(drv_ver, HINIC_DRV_VERSION, drv_ver_len); + + sdk_info(&pcidev->pcidev->dev, "Version info: driver %s, firmware %s\n", + drv_ver, fw_ver.mgmt_ver); + + hinic_ignore_minor_version(fw_ver.mgmt_ver); + hinic_ignore_minor_version(drv_ver); + ver_mismatch = hinic_version_cmp(drv_ver, fw_ver.mgmt_ver); + if (!ver_mismatch) + return 0; + + num_entry = (int)sizeof(ver_incompat_table) / + (int)sizeof(ver_incompat_table[0]); + for (idx = num_entry - 1; idx >= 0; idx--) { + ver_incompat = &ver_incompat_table[idx]; + + if (hinic_version_cmp(drv_ver, ver_incompat->version) < 0) + continue; + + /* Find older verion of driver in table */ + return __version_mismatch(pcidev, fw_ver.mgmt_ver, drv_ver, + ver_incompat, idx); + } + + return 0; +} + +struct mctp_hdr { + u16 resp_code; + u16 reason_code; + u32 manufacture_id; + + u8 cmd_rsvd; + u8 major_cmd; + u8 sub_cmd; + u8 spc_field; +}; + +struct mctp_bdf_info { + struct mctp_hdr hdr; /* spc_field: pf index */ + u8 rsvd; + u8 bus; + u8 device; + u8 function; +}; + +enum mctp_resp_code { + /* COMMAND_COMPLETED = 0, */ + /* COMMAND_FAILED = 1, */ + /* COMMAND_UNAVALILABLE = 2, */ + COMMAND_UNSUPPORTED = 3, +}; + +static void __mctp_set_hdr(struct mctp_hdr *hdr, + struct hinic_mctp_host_info *mctp_info) +{ + u32 manufacture_id = 0x07DB; + + hdr->cmd_rsvd = 0; + hdr->major_cmd = mctp_info->major_cmd; + hdr->sub_cmd = mctp_info->sub_cmd; + hdr->manufacture_id = cpu_to_be32(manufacture_id); + hdr->resp_code = cpu_to_be16(hdr->resp_code); + hdr->reason_code = cpu_to_be16(hdr->reason_code); +} + +static void __mctp_get_bdf(struct hinic_pcidev *pci_adapter, + struct hinic_mctp_host_info *mctp_info) +{ + struct pci_dev *pdev = pci_adapter->pcidev; + struct mctp_bdf_info *bdf_info = mctp_info->data; + + bdf_info->bus = pdev->bus->number; + bdf_info->device = (u8)(pdev->devfn >> 3); /* 5bits in devfn */ + bdf_info->function = (u8)(pdev->devfn & 0x7); /* 3bits in devfn */ + + memset(&bdf_info->hdr, 0, sizeof(bdf_info->hdr)); + __mctp_set_hdr(&bdf_info->hdr, mctp_info); + bdf_info->hdr.spc_field = + (u8)hinic_global_func_id_hw(pci_adapter->hwdev); + + mctp_info->data_len = sizeof(*bdf_info); +} + +#define MCTP_MAJOR_CMD_PUBLIC 0x0 +#define MCTP_MAJOR_CMD_NIC 0x1 + +#define MCTP_PUBLIC_SUB_CMD_BDF 0x1 +#define MCTP_PUBLIC_SUB_CMD_DRV 0x4 + +#define MCTP_NIC_SUB_CMD_IP 0x1 + +static void __mctp_get_host_info(struct hinic_pcidev *dev, + struct hinic_mctp_host_info *mctp_info) +{ + struct mctp_hdr *hdr; + + switch ((((u16)mctp_info->major_cmd) << 8) | mctp_info->sub_cmd) { + case (MCTP_MAJOR_CMD_PUBLIC << 8 | MCTP_PUBLIC_SUB_CMD_BDF): + __mctp_get_bdf(dev, mctp_info); + break; + + default: + hdr = mctp_info->data; + hdr->reason_code = COMMAND_UNSUPPORTED; + __mctp_set_hdr(hdr, mctp_info); + mctp_info->data_len = sizeof(*hdr); + break; + } +} + +static bool __is_pcidev_match_chip_name(const char *ifname, + struct hinic_pcidev *dev, + struct card_node *chip_node, + enum func_type type) +{ + if (!strncmp(chip_node->chip_name, ifname, IFNAMSIZ)) { + if (type == TYPE_UNKNOWN) { + if (dev->init_state < HINIC_INIT_STATE_HW_PART_INITED) + return false; + } else { + if (dev->init_state < HINIC_INIT_STATE_HW_PART_INITED || + hinic_func_type(dev->hwdev) != type) + return false; + } + + return true; + } + + return false; +} + +static struct hinic_pcidev *_get_pcidev_by_chip_name(char *ifname, + enum func_type type) +{ + struct card_node *chip_node; + struct hinic_pcidev *dev; + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + if (__is_pcidev_match_chip_name(ifname, dev, chip_node, + type)) { + lld_dev_put(); + return dev; + } + } + } + + lld_dev_put(); + + return NULL; +} + +static struct hinic_pcidev *hinic_get_pcidev_by_chip_name(char *ifname) +{ + struct hinic_pcidev *dev, *dev_hw_init; + + /* find hw init device first */ + dev_hw_init = _get_pcidev_by_chip_name(ifname, TYPE_UNKNOWN); + if (dev_hw_init) { + if (hinic_func_type(dev_hw_init->hwdev) == TYPE_PPF) + return dev_hw_init; + } + + dev = _get_pcidev_by_chip_name(ifname, TYPE_PPF); + if (dev) { + if (dev_hw_init && dev_hw_init->init_state >= dev->init_state) + return dev_hw_init; + + return dev; + } + + dev = _get_pcidev_by_chip_name(ifname, TYPE_PF); + if (dev) { + if (dev_hw_init && dev_hw_init->init_state >= dev->init_state) + return dev_hw_init; + + return dev; + } + + dev = _get_pcidev_by_chip_name(ifname, TYPE_VF); + if (dev) + return dev; + + return NULL; +} + +static bool __is_pcidev_match_dev_name(const char *ifname, + struct hinic_pcidev *dev, + enum hinic_service_type type) +{ + struct hinic_nic_dev *nic_dev; + enum hinic_service_type i; + + if (type == SERVICE_T_MAX) { + for (i = SERVICE_T_OVS; i < SERVICE_T_MAX; i++) { + if (!strncmp(dev->uld_dev_name[i], ifname, IFNAMSIZ)) + return true; + } + } else { + if (!strncmp(dev->uld_dev_name[type], ifname, IFNAMSIZ)) + return true; + } + + nic_dev = dev->uld_dev[SERVICE_T_NIC]; + if (nic_dev) { + if (!strncmp(nic_dev->netdev->name, ifname, IFNAMSIZ)) + return true; + } + + return false; +} + +static struct hinic_pcidev * + hinic_get_pcidev_by_dev_name(char *ifname, enum hinic_service_type type) +{ + struct card_node *chip_node; + struct hinic_pcidev *dev; + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + if (__is_pcidev_match_dev_name(ifname, dev, type)) { + lld_dev_put(); + return dev; + } + } + } + lld_dev_put(); + + return NULL; +} + +static struct hinic_pcidev *hinic_get_pcidev_by_ifname(char *ifname) +{ + struct hinic_pcidev *dev; + + /* support search hwdev by chip name, net device name, + * or fc device name + */ + /* Find pcidev by chip_name first */ + dev = hinic_get_pcidev_by_chip_name(ifname); + if (dev) + return dev; + + /* If ifname not a chip name, + * find pcidev by FC name or netdevice name + */ + return hinic_get_pcidev_by_dev_name(ifname, SERVICE_T_MAX); +} + +int hinic_get_chip_name_by_hwdev(void *hwdev, char *ifname) +{ + struct card_node *chip_node; + struct hinic_pcidev *dev; + + if (!hwdev || !ifname) + return -EINVAL; + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + if (dev->hwdev == hwdev) { + strncpy(ifname, chip_node->chip_name, + IFNAMSIZ - 1); + ifname[IFNAMSIZ - 1] = 0; + lld_dev_put(); + return 0; + } + } + } + lld_dev_put(); + + return -ENXIO; +} +EXPORT_SYMBOL(hinic_get_chip_name_by_hwdev); + +static struct card_node *hinic_get_chip_node_by_hwdev(const void *hwdev) +{ + struct card_node *chip_node = NULL; + struct card_node *node_tmp = NULL; + struct hinic_pcidev *dev; + + if (!hwdev) + return NULL; + + lld_dev_hold(); + list_for_each_entry(node_tmp, &g_hinic_chip_list, node) { + if (!chip_node) { + list_for_each_entry(dev, &node_tmp->func_list, node) { + if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + if (dev->hwdev == hwdev) { + chip_node = node_tmp; + break; + } + } + } + } + + lld_dev_put(); + + return chip_node; +} + +int hinic_get_pf_uld_array(struct pci_dev *pdev, u32 *dev_cnt, void *array[]) +{ + struct hinic_pcidev *dev = pci_get_drvdata(pdev); + struct card_node *chip_node; + u32 cnt; + + if (!dev || !hinic_support_nic(dev->hwdev, NULL)) + return -EINVAL; + + lld_dev_hold(); + + cnt = 0; + chip_node = dev->chip_node; + list_for_each_entry(dev, &chip_node->func_list, node) { + if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + if (dev->init_state < HINIC_INIT_STATE_NIC_INITED) + continue; + + if (HINIC_FUNC_IS_VF(dev->hwdev)) + continue; + + array[cnt] = dev->uld_dev[SERVICE_T_NIC]; + cnt++; + } + lld_dev_put(); + + *dev_cnt = cnt; + + return 0; +} + +int hinic_get_chip_cos_up_map(struct pci_dev *pdev, bool *is_setted, u8 *cos_up) +{ + struct hinic_pcidev *dev = pci_get_drvdata(pdev); + struct card_node *chip_node; + + if (!dev) + return -EINVAL; + + chip_node = dev->chip_node; + *is_setted = chip_node->cos_up_setted; + if (chip_node->cos_up_setted) + memcpy(cos_up, chip_node->cos_up, sizeof(chip_node->cos_up)); + + return 0; +} + +int hinic_set_chip_cos_up_map(struct pci_dev *pdev, u8 *cos_up) +{ + struct hinic_pcidev *dev = pci_get_drvdata(pdev); + struct card_node *chip_node; + + if (!dev) + return -EINVAL; + + chip_node = dev->chip_node; + chip_node->cos_up_setted = true; + memcpy(chip_node->cos_up, cos_up, sizeof(chip_node->cos_up)); + + return 0; +} + +void *hinic_get_hwdev_by_ifname(char *ifname) +{ + struct hinic_pcidev *dev; + + dev = hinic_get_pcidev_by_ifname(ifname); + if (dev) + return dev->hwdev; + + return NULL; +} + +void *hinic_get_uld_dev_by_ifname(char *ifname, enum hinic_service_type type) +{ + struct hinic_pcidev *dev; + + if (type >= SERVICE_T_MAX) { + pr_err("Service type: %d is error\n", type); + return NULL; + } + + dev = hinic_get_pcidev_by_dev_name(ifname, type); + if (dev) + return dev->uld_dev[type]; + + return NULL; +} + +void *hinic_get_uld_by_chip_name(char *ifname, enum hinic_service_type type) +{ + struct hinic_pcidev *dev; + + /* support search hwdev by chip name, net device name, + * or fc device name, Find pcidev by chip_name first + */ + dev = hinic_get_pcidev_by_chip_name(ifname); + if (dev) + return dev->uld_dev[type]; + + return NULL; +} + +/* NOTICE: nictool can't use this function, because this function can't keep + * tool context mutual exclusive with remove context + */ +void *hinic_get_ppf_uld_by_pdev(struct pci_dev *pdev, + enum hinic_service_type type) +{ + struct hinic_pcidev *pci_adapter; + struct card_node *chip_node; + struct hinic_pcidev *dev; + + if (!pdev) + return NULL; + + pci_adapter = pci_get_drvdata(pdev); + if (!pci_adapter) + return NULL; + + chip_node = pci_adapter->chip_node; + lld_dev_hold(); + list_for_each_entry(dev, &chip_node->func_list, node) { + /* can't test HINIC_FUNC_IN_REMOVE bit in dev->flag, because + * TOE will call this function when detach toe driver + */ + + if (hinic_func_type(dev->hwdev) == TYPE_PPF) { + lld_dev_put(); + return dev->uld_dev[type]; + } + } + lld_dev_put(); + + return NULL; +} +EXPORT_SYMBOL(hinic_get_ppf_uld_by_pdev); + +void *hinic_get_ppf_hwdev_by_pdev(struct pci_dev *pdev) +{ + struct hinic_pcidev *pci_adapter; + struct card_node *chip_node; + struct hinic_pcidev *dev; + + if (!pdev) + return NULL; + + pci_adapter = pci_get_drvdata(pdev); + if (!pci_adapter) + return NULL; + + chip_node = pci_adapter->chip_node; + lld_dev_hold(); + list_for_each_entry(dev, &chip_node->func_list, node) { + if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag) || + dev->init_state < HINIC_INIT_STATE_HW_IF_INITED) + continue; + + if (dev->hwdev && hinic_func_type(dev->hwdev) == TYPE_PPF) { + lld_dev_put(); + return dev->hwdev; + } + } + lld_dev_put(); + + return NULL; +} + +void hinic_get_all_chip_id(void *id_info) +{ + struct nic_card_id *card_id = (struct nic_card_id *)id_info; + struct card_node *chip_node; + int i = 0; + int id, err; + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + err = sscanf(chip_node->chip_name, HINIC_CHIP_NAME "%d", &id); + if (err <= 0) + pr_err("Failed to get hinic id\n"); + + card_id->id[i] = id; + i++; + } + lld_dev_put(); + card_id->num = i; +} + +static bool __is_func_valid(struct hinic_pcidev *dev) +{ + if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag)) + return false; + + if (dev->init_state < HINIC_INIT_STATE_HWDEV_INITED) + return false; + + if (HINIC_FUNC_IS_VF(dev->hwdev)) + return false; + + return true; +} + +bool hinic_is_valid_bar_addr(u64 offset) +{ + struct card_node *chip_node = NULL; + struct hinic_pcidev *dev; + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (hinic_func_type(dev->hwdev) == TYPE_VF) + continue; + + if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + if (offset == pci_resource_start(dev->pcidev, 0)) { + lld_dev_put(); + return true; + } + } + } + lld_dev_put(); + + return false; +} + +void hinic_get_card_info(void *hwdev, void *bufin) +{ + struct card_node *chip_node = NULL; + struct card_info *info = (struct card_info *)bufin; + struct hinic_nic_dev *nic_dev; + struct hinic_pcidev *dev; + void *fun_hwdev; + u32 i = 0; + + info->pf_num = 0; + + chip_node = hinic_get_chip_node_by_hwdev(hwdev); + if (!chip_node) + return; + + lld_dev_hold(); + list_for_each_entry(dev, &chip_node->func_list, node) { + if (!__is_func_valid(dev)) + continue; + + fun_hwdev = dev->hwdev; + + if (((hinic_support_fc(fun_hwdev, NULL)) || + (hinic_support_fcoe(fun_hwdev, NULL))) && + dev->uld_dev[SERVICE_T_FC]) { + info->pf[i].pf_type |= (u32)BIT(SERVICE_T_FC); + strlcpy(info->pf[i].name, + dev->uld_dev_name[SERVICE_T_FC], IFNAMSIZ); + } + + if (hinic_support_nic(fun_hwdev, NULL)) { + nic_dev = dev->uld_dev[SERVICE_T_NIC]; + if (nic_dev) { + info->pf[i].pf_type |= (u32)BIT(SERVICE_T_NIC); + strlcpy(info->pf[i].name, + nic_dev->netdev->name, IFNAMSIZ); + } + } + + if ((hinic_support_ovs(fun_hwdev, NULL)) && + dev->uld_dev[SERVICE_T_OVS]) + info->pf[i].pf_type |= (u32)BIT(SERVICE_T_OVS); + + if ((hinic_support_roce(fun_hwdev, NULL)) && + dev->uld_dev[SERVICE_T_ROCE]) + info->pf[i].pf_type |= (u32)BIT(SERVICE_T_ROCE); + + if ((hinic_support_toe(fun_hwdev, NULL)) && + dev->uld_dev[SERVICE_T_TOE]) + info->pf[i].pf_type |= (u32)BIT(SERVICE_T_TOE); + + if (hinic_func_for_mgmt(fun_hwdev)) + strlcpy(info->pf[i].name, "FOR_MGMT", IFNAMSIZ); + + if (hinic_func_for_pt(fun_hwdev)) + info->pf[i].pf_type |= (u32)BIT(SERVICE_T_PT); + + if (hinic_func_for_hwpt(fun_hwdev)) + info->pf[i].pf_type |= (u32)BIT(SERVICE_T_HWPT); + + strlcpy(info->pf[i].bus_info, pci_name(dev->pcidev), + sizeof(info->pf[i].bus_info)); + info->pf_num++; + i = info->pf_num; + } + lld_dev_put(); +} + +void hinic_get_card_func_info_by_card_name(const char *chip_name, + struct hinic_card_func_info + *card_func) +{ + struct card_node *chip_node = NULL; + struct hinic_pcidev *dev; + struct func_pdev_info *pdev_info; + + card_func->num_pf = 0; + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + if (strncmp(chip_node->chip_name, chip_name, IFNAMSIZ)) + continue; + + list_for_each_entry(dev, &chip_node->func_list, node) { + if (hinic_func_type(dev->hwdev) == TYPE_VF) + continue; + + if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + pdev_info = &card_func->pdev_info[card_func->num_pf]; + pdev_info->bar0_size = pci_resource_len(dev->pcidev, 0); + pdev_info->bar0_phy_addr = + pci_resource_start(dev->pcidev, 0); + + card_func->num_pf++; + if (card_func->num_pf >= MAX_SIZE) + break; + } + } + + lld_dev_put(); +} + +int hinic_get_device_id(void *hwdev, u16 *dev_id) +{ + struct card_node *chip_node = NULL; + struct hinic_pcidev *dev; + u16 vendor_id = 0; + u16 device_id = 0; + + chip_node = hinic_get_chip_node_by_hwdev(hwdev); + if (!chip_node) + return -ENODEV; + + lld_dev_hold(); + list_for_each_entry(dev, &chip_node->func_list, node) { + if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + pci_read_config_word(dev->pcidev, 0, &vendor_id); + if (vendor_id == HINIC_PCI_VENDOR_ID) { + pci_read_config_word(dev->pcidev, 2, &device_id); + break; + } + } + lld_dev_put(); + *dev_id = device_id; + + return 0; +} + +int hinic_get_pf_id(void *hwdev, u32 port_id, u32 *pf_id, u32 *isvalid) +{ + struct card_node *chip_node = NULL; + struct hinic_pcidev *dev; + + chip_node = hinic_get_chip_node_by_hwdev(hwdev); + if (!chip_node) + return -ENODEV; + + lld_dev_hold(); + list_for_each_entry(dev, &chip_node->func_list, node) { + if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag) || + dev->init_state < HINIC_INIT_STATE_HWDEV_INITED) + continue; + + if (hinic_physical_port_id(dev->hwdev) == port_id) { + *pf_id = hinic_global_func_id(dev->hwdev); + *isvalid = 1; + break; + } + } + lld_dev_put(); + + return 0; +} + +void hinic_get_fc_devname(char *devname) +{ + struct card_node *chip_node; + struct hinic_pcidev *dev; + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + if (dev->init_state < HINIC_INIT_STATE_NIC_INITED) + continue; + + if (HINIC_FUNC_IS_VF(dev->hwdev)) + continue; + + if (dev->uld_dev[SERVICE_T_FC]) { + strlcpy(devname, + dev->uld_dev_name[SERVICE_T_FC], + IFNAMSIZ); + lld_dev_put(); + return; + } + } + } + lld_dev_put(); +} + +enum hinic_init_state hinic_get_init_state(struct pci_dev *pdev) +{ + struct hinic_pcidev *dev = pci_get_drvdata(pdev); + + if (dev) + return dev->init_state; + + return HINIC_INIT_STATE_NONE; +} + +enum hinic_init_state hinic_get_init_state_by_ifname(char *ifname) +{ + struct hinic_pcidev *dev; + + dev = hinic_get_pcidev_by_ifname(ifname); + if (dev) + return dev->init_state; + + pr_err("Can not get device %s\n", ifname); + + return HINIC_INIT_STATE_NONE; +} + +int hinic_get_self_test_result(char *ifname, u32 *result) +{ + struct hinic_pcidev *dev = NULL; + + dev = hinic_get_pcidev_by_ifname(ifname); + if (!dev) { + pr_err("Get pcidev failed by ifname: %s\n", ifname); + return -EFAULT; + } + + *result = be32_to_cpu(readl((u8 __iomem *)(dev->cfg_reg_base) + + SELF_TEST_BAR_ADDR_OFFSET)); + return 0; +} + +struct net_device *hinic_get_netdev_by_lld(struct hinic_lld_dev *lld_dev) +{ + struct hinic_pcidev *pci_adapter; + struct hinic_nic_dev *nic_dev; + + if (!lld_dev || !hinic_support_nic(lld_dev->hwdev, NULL)) + return NULL; + + pci_adapter = pci_get_drvdata(lld_dev->pdev); + nic_dev = pci_adapter->uld_dev[SERVICE_T_NIC]; + if (!nic_dev) { + sdk_err(&pci_adapter->pcidev->dev, + "There's no net device attached on the pci device\n"); + return NULL; + } + + return nic_dev->netdev; +} +EXPORT_SYMBOL(hinic_get_netdev_by_lld); + +void *hinic_get_hwdev_by_netdev(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + if (!nic_dev || !netdev) + return NULL; + + return nic_dev->hwdev; +} +EXPORT_SYMBOL(hinic_get_hwdev_by_netdev); + +struct net_device *hinic_get_netdev_by_pcidev(struct pci_dev *pdev) +{ + struct hinic_pcidev *pci_adapter; + struct hinic_nic_dev *nic_dev; + + if (!pdev) + return NULL; + + pci_adapter = pci_get_drvdata(pdev); + if (!pci_adapter || !hinic_support_nic(pci_adapter->hwdev, NULL)) + return NULL; + + nic_dev = pci_adapter->uld_dev[SERVICE_T_NIC]; + if (!nic_dev) { + sdk_err(&pci_adapter->pcidev->dev, + "There`s no net device attached on the pci device\n"); + return NULL; + } + + return nic_dev->netdev; +} +EXPORT_SYMBOL(hinic_get_netdev_by_pcidev); + +struct hinic_sriov_info *hinic_get_sriov_info_by_pcidev(struct pci_dev *pdev) +{ + struct hinic_pcidev *pci_adapter = pci_get_drvdata(pdev); + + return &pci_adapter->sriov_info; +} + +bool hinic_is_in_host(void) +{ + struct card_node *chip_node; + struct hinic_pcidev *dev; + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + if (dev->init_state > HINIC_INIT_STATE_PCI_INITED && + hinic_func_type(dev->hwdev) != TYPE_VF) { + lld_dev_put(); + return true; + } + } + } + lld_dev_put(); + + return false; +} + +int hinic_attach_nic(struct hinic_lld_dev *lld_dev) +{ + struct hinic_pcidev *dev; + + if (!lld_dev) + return -EINVAL; + + dev = container_of(lld_dev, struct hinic_pcidev, lld_dev); + return attach_uld(dev, SERVICE_T_NIC, &g_uld_info[SERVICE_T_NIC]); +} +EXPORT_SYMBOL(hinic_attach_nic); + +void hinic_detach_nic(struct hinic_lld_dev *lld_dev) +{ + struct hinic_pcidev *dev; + + if (!lld_dev) + return; + + dev = container_of(lld_dev, struct hinic_pcidev, lld_dev); + detach_uld(dev, SERVICE_T_NIC); +} +EXPORT_SYMBOL(hinic_detach_nic); + +int hinic_attach_roce(struct hinic_lld_dev *lld_dev) +{ + struct hinic_pcidev *dev; + + if (!lld_dev) + return -EINVAL; + + dev = container_of(lld_dev, struct hinic_pcidev, lld_dev); + return attach_uld(dev, SERVICE_T_ROCE, &g_uld_info[SERVICE_T_ROCE]); +} +EXPORT_SYMBOL(hinic_attach_roce); + +void hinic_detach_roce(struct hinic_lld_dev *lld_dev) +{ + struct hinic_pcidev *dev; + + if (!lld_dev) + return; + + dev = container_of(lld_dev, struct hinic_pcidev, lld_dev); + detach_uld(dev, SERVICE_T_ROCE); +} +EXPORT_SYMBOL(hinic_detach_roce); + +static int __set_nic_rss_state(struct hinic_pcidev *dev, bool enable) +{ + void *nic_uld; + int err = 0; + + if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag)) + return 0; + + nic_uld = dev->uld_dev[SERVICE_T_NIC]; + if (!hinic_support_nic(dev->hwdev, NULL) || !nic_uld) + return 0; + + if (hinic_func_type(dev->hwdev) == TYPE_VF) + return 0; + + if (enable) + err = hinic_enable_func_rss(nic_uld); + else + err = hinic_disable_func_rss(nic_uld); + if (err) { + sdk_err(&dev->pcidev->dev, "Failed to %s rss\n", + enable ? "enable" : "disable"); + } + + return err; +} + +int hinic_disable_nic_rss(struct hinic_lld_dev *lld_dev) +{ + struct hinic_pcidev *adapter; + + if (!lld_dev) + return -EINVAL; + + adapter = container_of(lld_dev, struct hinic_pcidev, lld_dev); + + return __set_nic_rss_state(adapter, false); +} +EXPORT_SYMBOL(hinic_disable_nic_rss); + +int hinic_enable_nic_rss(struct hinic_lld_dev *lld_dev) +{ + struct hinic_pcidev *adapter; + + if (!lld_dev) + return -EINVAL; + + adapter = container_of(lld_dev, struct hinic_pcidev, lld_dev); + + return __set_nic_rss_state(adapter, true); +} +EXPORT_SYMBOL(hinic_enable_nic_rss); + +struct pci_device_id *hinic_get_pci_device_id(struct pci_dev *pdev) +{ + struct hinic_pcidev *adapter; + + if (!pdev) + return NULL; + + adapter = pci_get_drvdata(pdev); + + return &adapter->id; +} +EXPORT_SYMBOL(hinic_get_pci_device_id); + +static int __set_nic_func_state(struct hinic_pcidev *pci_adapter) +{ + struct pci_dev *pdev = pci_adapter->pcidev; + u16 func_id; + int err; + bool enable_nic; + + err = hinic_global_func_id_get(pci_adapter->hwdev, &func_id); + if (err) + return err; + + err = hinic_get_func_nic_enable(pci_adapter->hwdev, func_id, + &enable_nic); + if (err) { + sdk_err(&pdev->dev, "Failed to get nic state\n"); + return err; + } + + if (enable_nic) { + if (is_multi_bm_slave(pci_adapter->hwdev)) + hinic_set_vf_dev_cap(pci_adapter->hwdev); + + err = attach_uld(pci_adapter, SERVICE_T_NIC, + &g_uld_info[SERVICE_T_NIC]); + if (err) { + sdk_err(&pdev->dev, "Failed to initialize NIC\n"); + return err; + } + + if (pci_adapter->init_state < HINIC_INIT_STATE_NIC_INITED) + pci_adapter->init_state = HINIC_INIT_STATE_NIC_INITED; + } else { + detach_uld(pci_adapter, SERVICE_T_NIC); + } + + return 0; +} + +int hinic_ovs_set_vf_nic_state(struct hinic_lld_dev *lld_dev, u16 vf_func_id, + bool en) +{ + struct hinic_pcidev *dev, *des_dev; + struct hinic_nic_dev *uld_dev; + int err = -EFAULT; + + if (!lld_dev) + return -EINVAL; + + dev = pci_get_drvdata(lld_dev->pdev); + + if (!dev) + return -EFAULT; + /* find func_idx pci_adapter and disable or enable nic */ + lld_dev_hold(); + list_for_each_entry(des_dev, &dev->chip_node->func_list, node) { + if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + if (des_dev->init_state < + HINIC_INIT_STATE_DBGTOOL_INITED && + !test_bit(HINIC_FUNC_PRB_ERR, + &des_dev->flag)) + continue; + + if (hinic_global_func_id(des_dev->hwdev) != vf_func_id) + continue; + + if (des_dev->init_state < + HINIC_INIT_STATE_DBGTOOL_INITED) { + break; + } + + sdk_info(&dev->pcidev->dev, "Receive event: %s vf%d nic\n", + en ? "enable" : "disable", vf_func_id); + + err = 0; + if (en) { + if (des_dev->uld_dev[SERVICE_T_NIC]) { + sdk_err(&des_dev->pcidev->dev, + "%s driver has attached to pcie device, cannot set VF max_queue_num\n", + s_uld_name[SERVICE_T_NIC]); + } else { + err = hinic_set_vf_dev_cap(des_dev->hwdev); + + if (err) { + sdk_err(&des_dev->pcidev->dev, + "%s driver Set VF max_queue_num failed, err=%d\n", + s_uld_name[SERVICE_T_NIC], err); + + break; + } + } + + err = attach_uld(des_dev, SERVICE_T_NIC, + &g_uld_info[SERVICE_T_NIC]); + if (err) { + sdk_err(&des_dev->pcidev->dev, "Failed to initialize NIC\n"); + break; + } + + uld_dev = (struct hinic_nic_dev *) + (des_dev->uld_dev[SERVICE_T_NIC]); + uld_dev->in_vm = true; + uld_dev->is_vm_slave = + is_multi_vm_slave(uld_dev->hwdev); + uld_dev->is_bm_slave = + is_multi_bm_slave(uld_dev->hwdev); + if (des_dev->init_state < HINIC_INIT_STATE_NIC_INITED) + des_dev->init_state = + HINIC_INIT_STATE_NIC_INITED; + } else { + detach_uld(des_dev, SERVICE_T_NIC); + } + + break; + } + lld_dev_put(); + + return err; +} +EXPORT_SYMBOL(hinic_ovs_set_vf_nic_state); + +static void slave_host_mgmt_work(struct work_struct *work) +{ + struct hinic_pcidev *pci_adapter = + container_of(work, struct hinic_pcidev, slave_nic_work); + + __set_nic_func_state(pci_adapter); +} + +static void __multi_host_mgmt(struct hinic_pcidev *dev, + struct hinic_multi_host_mgmt_event *mhost_mgmt) +{ + struct hinic_pcidev *des_dev; + struct hinic_mhost_nic_func_state *nic_state = {0}; + + switch (mhost_mgmt->sub_cmd) { + case HINIC_MHOST_NIC_STATE_CHANGE: + nic_state = mhost_mgmt->data; + + nic_state->status = 0; + + /* find func_idx pci_adapter and disable or enable nic */ + lld_dev_hold(); + list_for_each_entry(des_dev, &dev->chip_node->func_list, node) { + if (test_bit(HINIC_FUNC_IN_REMOVE, &des_dev->flag)) + continue; + + if (des_dev->init_state < + HINIC_INIT_STATE_DBGTOOL_INITED && + !test_bit(HINIC_FUNC_PRB_ERR, + &des_dev->flag)) + continue; + + if (hinic_global_func_id_hw(des_dev->hwdev) != + nic_state->func_idx) + continue; + + if (des_dev->init_state < + HINIC_INIT_STATE_DBGTOOL_INITED) { + nic_state->status = + test_bit(HINIC_FUNC_PRB_ERR, + &des_dev->flag) ? 1 : 0; + break; + } + + sdk_info(&dev->pcidev->dev, "Receive nic state changed event, state: %d\n", + nic_state->enable); + + /* schedule_work */ + schedule_work(&des_dev->slave_nic_work); + + break; + } + lld_dev_put(); + + break; + + default: + sdk_warn(&dev->pcidev->dev, "Received unknown multi-host mgmt event %d\n", + mhost_mgmt->sub_cmd); + break; + } +} + +static void send_uld_dev_event(struct hinic_pcidev *dev, + struct hinic_event_info *event) +{ + enum hinic_service_type type; + + for (type = SERVICE_T_NIC; type < SERVICE_T_MAX; type++) { + if (test_and_set_bit(type, &dev->state)) { + sdk_warn(&dev->pcidev->dev, "Event: 0x%x can't handler, %s is in detach\n", + event->type, s_uld_name[type]); + continue; + } + + if (g_uld_info[type].event) + g_uld_info[type].event(&dev->lld_dev, + dev->uld_dev[type], event); + clear_bit(type, &dev->state); + } +} + +static void send_event_to_all_pf(struct hinic_pcidev *dev, + struct hinic_event_info *event) +{ + struct hinic_pcidev *des_dev = NULL; + + lld_dev_hold(); + list_for_each_entry(des_dev, &dev->chip_node->func_list, node) { + if (test_bit(HINIC_FUNC_IN_REMOVE, &des_dev->flag) || + des_dev->init_state < HINIC_INIT_STATE_HW_IF_INITED) + continue; + + if (hinic_func_type(des_dev->hwdev) == TYPE_VF) + continue; + + send_uld_dev_event(des_dev, event); + } + lld_dev_put(); +} + +static void send_event_to_dst_pf(struct hinic_pcidev *dev, u16 func_id, + struct hinic_event_info *event) +{ + struct hinic_pcidev *des_dev = NULL; + + lld_dev_hold(); + list_for_each_entry(des_dev, &dev->chip_node->func_list, node) { + if (test_bit(HINIC_FUNC_IN_REMOVE, &des_dev->flag) || + des_dev->init_state < HINIC_INIT_STATE_HW_IF_INITED) + continue; + + if (hinic_func_type(des_dev->hwdev) == TYPE_VF) + continue; + + if (hinic_global_func_id(des_dev->hwdev) == func_id) { + send_uld_dev_event(des_dev, event); + break; + } + } + lld_dev_put(); +} + +void hinic_event_process(void *adapter, struct hinic_event_info *event) +{ + struct hinic_pcidev *dev = adapter; + u16 func_id; + + switch (event->type) { + case HINIC_EVENT_FMW_ACT_NTC: + hinic_sync_time_to_fmw(dev); + break; + case HINIC_EVENT_MCTP_GET_HOST_INFO: + __mctp_get_host_info(dev, &event->mctp_info); + break; + case HINIC_EVENT_MULTI_HOST_MGMT: + __multi_host_mgmt(dev, &event->mhost_mgmt); + break; + case HINIC_EVENT_FAULT: + if (event->info.fault_level == FAULT_LEVEL_SERIOUS_FLR && + event->info.event.chip.func_id < HINIC_MAX_PF_NUM) { + func_id = event->info.event.chip.func_id; + send_event_to_dst_pf(adapter, func_id, event); + } else { + send_uld_dev_event(adapter, event); + } + break; + case HINIC_EVENT_MGMT_WATCHDOG_EVENT: + send_event_to_all_pf(adapter, event); + break; + default: + send_uld_dev_event(adapter, event); + break; + } +} + +static int mapping_bar(struct pci_dev *pdev, struct hinic_pcidev *pci_adapter) +{ + u32 db_dwqe_size; + u64 dwqe_addr; + + pci_adapter->cfg_reg_base = + pci_ioremap_bar(pdev, HINIC_PCI_CFG_REG_BAR); + if (!pci_adapter->cfg_reg_base) { + sdk_err(&pci_adapter->pcidev->dev, + "Failed to map configuration regs\n"); + return -ENOMEM; + } + + pci_adapter->intr_reg_base = pci_ioremap_bar(pdev, + HINIC_PCI_INTR_REG_BAR); + if (!pci_adapter->intr_reg_base) { + sdk_err(&pci_adapter->pcidev->dev, + "Failed to map interrupt regs\n"); + goto map_intr_bar_err; + } + + db_dwqe_size = hinic_get_db_size(pci_adapter->cfg_reg_base, + &pci_adapter->chip_mode); + + pci_adapter->db_base_phy = pci_resource_start(pdev, HINIC_PCI_DB_BAR); + pci_adapter->db_base = ioremap(pci_adapter->db_base_phy, + db_dwqe_size); + if (!pci_adapter->db_base) { + sdk_err(&pci_adapter->pcidev->dev, + "Failed to map doorbell regs\n"); + goto map_db_err; + } + + if (pci_adapter->chip_mode != CHIP_MODE_NORMAL) + return 0; + + dwqe_addr = pci_adapter->db_base_phy + db_dwqe_size; + +#if defined(__aarch64__) + /* arm do not support call ioremap_wc() */ + pci_adapter->dwqe_mapping = __ioremap(dwqe_addr, db_dwqe_size, + __pgprot(PROT_DEVICE_nGnRnE)); +#else + pci_adapter->dwqe_mapping = io_mapping_create_wc(dwqe_addr, + db_dwqe_size); + +#endif + if (!pci_adapter->dwqe_mapping) { + sdk_err(&pci_adapter->pcidev->dev, "Failed to io_mapping_create_wc\n"); + goto mapping_dwqe_err; + } + + return 0; + +mapping_dwqe_err: + iounmap(pci_adapter->db_base); + +map_db_err: + iounmap(pci_adapter->intr_reg_base); + +map_intr_bar_err: + iounmap(pci_adapter->cfg_reg_base); + + return -ENOMEM; +} + +static void unmapping_bar(struct hinic_pcidev *pci_adapter) +{ + if (pci_adapter->chip_mode == CHIP_MODE_NORMAL) { +#if defined(__aarch64__) + iounmap(pci_adapter->dwqe_mapping); +#else + io_mapping_free(pci_adapter->dwqe_mapping); +#endif + } + + iounmap(pci_adapter->db_base); + iounmap(pci_adapter->intr_reg_base); + iounmap(pci_adapter->cfg_reg_base); +} + +static int alloc_chip_node(struct hinic_pcidev *pci_adapter) +{ + struct card_node *chip_node; + unsigned char i; + unsigned char parent_bus_number = 0; + int err; + + if (!pci_is_root_bus(pci_adapter->pcidev->bus)) + parent_bus_number = pci_adapter->pcidev->bus->parent->number; + + if (parent_bus_number != 0) { + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + if (chip_node->dp_bus_num == parent_bus_number) { + pci_adapter->chip_node = chip_node; + return 0; + } + } + } else if (pci_adapter->pcidev->device == HINIC_DEV_ID_1822_VF || + pci_adapter->pcidev->device == HINIC_DEV_ID_1822_VF_HV) { + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + if (chip_node) { + pci_adapter->chip_node = chip_node; + return 0; + } + } + } + + for (i = 0; i < MAX_CARD_ID; i++) { + if (!FIND_BIT(card_bit_map, i)) { + card_bit_map = (u64)SET_BIT(card_bit_map, i); + break; + } + } + + if (i == MAX_CARD_ID) { + sdk_err(&pci_adapter->pcidev->dev, + "Failed to alloc card id\n"); + return -EFAULT; + } + + chip_node = kzalloc(sizeof(*chip_node), GFP_KERNEL); + if (!chip_node) { + sdk_err(&pci_adapter->pcidev->dev, + "Failed to alloc chip node\n"); + goto alloc_chip_err; + } + + chip_node->dbgtool_attr_file.name = kzalloc(IFNAMSIZ, GFP_KERNEL); + if (!(chip_node->dbgtool_attr_file.name)) { + sdk_err(&pci_adapter->pcidev->dev, + "Failed to alloc dbgtool attr file name\n"); + goto alloc_dbgtool_attr_file_err; + } + + /* parent bus number */ + chip_node->dp_bus_num = parent_bus_number; + + err = snprintf(chip_node->chip_name, IFNAMSIZ, "%s%d", + HINIC_CHIP_NAME, i); + if (err <= 0 || err >= IFNAMSIZ) { + sdk_err(&pci_adapter->pcidev->dev, + "Failed to snprintf chip_name, function return(%d) and dest_len(%d)\n", + err, IFNAMSIZ); + goto alloc_dbgtool_attr_file_err; + } + + err = snprintf((char *)chip_node->dbgtool_attr_file.name, + IFNAMSIZ, "%s%d", HINIC_CHIP_NAME, i); + if (err <= 0 || err >= IFNAMSIZ) { + sdk_err(&pci_adapter->pcidev->dev, + "Failed to snprintf dbgtool_attr_file_name, function return(%d) and dest_len(%d)\n", + err, IFNAMSIZ); + goto alloc_dbgtool_attr_file_err; + } + + sdk_info(&pci_adapter->pcidev->dev, + "Add new chip %s to global list succeed\n", + chip_node->chip_name); + + list_add_tail(&chip_node->node, &g_hinic_chip_list); + + INIT_LIST_HEAD(&chip_node->func_list); + pci_adapter->chip_node = chip_node; + + mutex_init(&chip_node->sfp_mutex); + + return 0; + +alloc_dbgtool_attr_file_err: + kfree(chip_node); + +alloc_chip_err: + card_bit_map = CLEAR_BIT(card_bit_map, i); + return -ENOMEM; +} + +static void free_chip_node(struct hinic_pcidev *pci_adapter) +{ + struct card_node *chip_node = pci_adapter->chip_node; + u32 id; + int err; + + if (list_empty(&chip_node->func_list)) { + list_del(&chip_node->node); + sdk_info(&pci_adapter->pcidev->dev, + "Delete chip %s from global list succeed\n", + chip_node->chip_name); + err = sscanf(chip_node->chip_name, HINIC_CHIP_NAME "%u", &id); + if (err <= 0) + sdk_err(&pci_adapter->pcidev->dev, "Failed to get hinic id\n"); + + card_bit_map = CLEAR_BIT(card_bit_map, id); + + kfree(chip_node->dbgtool_attr_file.name); + kfree(chip_node); + } +} + +static bool hinic_get_vf_load_state(struct pci_dev *pdev) +{ + unsigned char parent_bus_number; + struct card_node *chip_node; + u8 id; + + if (!pdev->is_virtfn) + return false; + + /* vf used in vm */ + if (pci_is_root_bus(pdev->bus)) + return disable_vf_load; + + parent_bus_number = pdev->bus->parent->number; + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + if (chip_node->dp_bus_num == parent_bus_number) { + for (id = 0; id < HINIC_MAX_PF_NUM; id++) { + if (chip_node->pf_bus_num[id] == + pdev->bus->number) { + lld_dev_put(); + return chip_node->disable_vf_load[id]; + } + } + } + } + lld_dev_put(); + + return disable_vf_load; +} + +static void hinic_set_vf_load_state(struct hinic_pcidev *pci_adapter, + bool vf_load_state) +{ + struct card_node *chip_node; + u16 func_id; + + if (hinic_func_type(pci_adapter->hwdev) == TYPE_VF) + return; + + /* The VF on the BM slave side must be probed */ + if (is_multi_bm_slave(pci_adapter->hwdev)) + vf_load_state = false; + + func_id = hinic_global_func_id_hw(pci_adapter->hwdev); + + chip_node = pci_adapter->chip_node; + chip_node->disable_vf_load[func_id] = vf_load_state; + chip_node->pf_bus_num[func_id] = pci_adapter->pcidev->bus->number; + + sdk_info(&pci_adapter->pcidev->dev, "Current function support %s, %s vf load in host\n", + (hinic_support_ovs(pci_adapter->hwdev, NULL) ? "ovs" : "nic"), + (vf_load_state ? "disable" : "enable")); +} + +int hinic_ovs_set_vf_load_state(struct pci_dev *pdev) +{ + struct hinic_pcidev *pci_adapter; + + if (!pdev) { + pr_err("pdev is null\n"); + return -EINVAL; + } + + pci_adapter = pci_get_drvdata(pdev); + if (!pci_adapter) { + pr_err("pci_adapter is null\n"); + return -EFAULT; + } + + hinic_set_vf_load_state(pci_adapter, disable_vf_load); + + return 0; +} +EXPORT_SYMBOL(hinic_ovs_set_vf_load_state); + +static int hinic_config_deft_mrss(struct pci_dev *pdev) +{ + return 0; +} + +static int hinic_config_pci_cto(struct pci_dev *pdev) +{ + return 0; +} + +static int hinic_pci_init(struct pci_dev *pdev) +{ + struct hinic_pcidev *pci_adapter = NULL; + int err; + + err = hinic_config_deft_mrss(pdev); + if (err) { + sdk_err(&pdev->dev, "Failed to configure Max Read Request Size\n"); + return err; + } + + err = hinic_config_pci_cto(pdev); + if (err) { + sdk_err(&pdev->dev, "Failed to configure Completion timeout\n"); + return err; + } + + pci_adapter = kzalloc(sizeof(*pci_adapter), GFP_KERNEL); + if (!pci_adapter) { + sdk_err(&pdev->dev, + "Failed to alloc pci device adapter\n"); + return -ENOMEM; + } + pci_adapter->pcidev = pdev; + mutex_init(&pci_adapter->pdev_mutex); + + pci_set_drvdata(pdev, pci_adapter); + +#ifdef CONFIG_PCI_IOV + if (pdev->is_virtfn && hinic_get_vf_load_state(pdev)) { + sdk_info(&pdev->dev, "VFs are not binded to hinic\n"); + return 0; + } +#endif + + err = pci_enable_device(pdev); + if (err) { + sdk_err(&pdev->dev, "Failed to enable PCI device\n"); + goto pci_enable_err; + } + + err = pci_request_regions(pdev, HINIC_DRV_NAME); + if (err) { + sdk_err(&pdev->dev, "Failed to request regions\n"); + goto pci_regions_err; + } + + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + sdk_warn(&pdev->dev, "Couldn't set 64-bit DMA mask\n"); + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + sdk_err(&pdev->dev, "Failed to set DMA mask\n"); + goto dma_mask_err; + } + } + + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + sdk_warn(&pdev->dev, + "Couldn't set 64-bit coherent DMA mask\n"); + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + sdk_err(&pdev->dev, + "Failed to set coherent DMA mask\n"); + goto dma_consistnet_mask_err; + } + } + + return 0; + +dma_consistnet_mask_err: +dma_mask_err: + pci_clear_master(pdev); + pci_disable_pcie_error_reporting(pdev); + pci_release_regions(pdev); + +pci_regions_err: + pci_disable_device(pdev); + +pci_enable_err: + pci_set_drvdata(pdev, NULL); + kfree(pci_adapter); + + return err; +} + +static void hinic_pci_deinit(struct pci_dev *pdev) +{ + struct hinic_pcidev *pci_adapter = pci_get_drvdata(pdev); + + pci_clear_master(pdev); + pci_release_regions(pdev); + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + kfree(pci_adapter); +} + +static void hinic_notify_ppf_unreg(struct hinic_pcidev *pci_adapter) +{ + struct card_node *chip_node = pci_adapter->chip_node; + struct hinic_pcidev *dev; + + if (hinic_func_type(pci_adapter->hwdev) != TYPE_PPF) + return; + + lld_lock_chip_node(); + list_for_each_entry(dev, &chip_node->func_list, node) { + hinic_ppf_hwdev_unreg(dev->hwdev); + } + lld_unlock_chip_node(); +} + +static void hinic_notify_ppf_reg(struct hinic_pcidev *pci_adapter) +{ + struct card_node *chip_node = pci_adapter->chip_node; + struct hinic_pcidev *dev; + + if (hinic_func_type(pci_adapter->hwdev) != TYPE_PPF) + return; + + lld_lock_chip_node(); + list_for_each_entry(dev, &chip_node->func_list, node) { + hinic_ppf_hwdev_reg(dev->hwdev, pci_adapter->hwdev); + } + lld_unlock_chip_node(); +} + +#ifdef CONFIG_X86 +/** + * cfg_order_reg - when cpu model is haswell or broadwell, should configure dma + * order register to zero + * @pci_adapter: pci adapter + */ +/*lint -save -e40 */ +void cfg_order_reg(struct hinic_pcidev *pci_adapter) +{ + u8 cpu_model[] = {0x3c, 0x3f, 0x45, 0x46, 0x3d, 0x47, 0x4f, 0x56}; + struct cpuinfo_x86 *cpuinfo; + u32 i; + + if (HINIC_FUNC_IS_VF(pci_adapter->hwdev)) + return; + + cpuinfo = &cpu_data(0); + for (i = 0; i < sizeof(cpu_model); i++) { + if (cpu_model[i] == cpuinfo->x86_model) + hinic_set_pcie_order_cfg(pci_adapter->hwdev); + } +} + +/*lint -restore*/ +#endif + +static int hinic_func_init(struct pci_dev *pdev, + struct hinic_pcidev *pci_adapter) +{ + struct hinic_init_para init_para; + bool vf_load_state; + int err; + + init_para.adapter_hdl = pci_adapter; + init_para.pcidev_hdl = pdev; + init_para.dev_hdl = &pdev->dev; + init_para.cfg_reg_base = pci_adapter->cfg_reg_base; + init_para.intr_reg_base = pci_adapter->intr_reg_base; + init_para.db_base = pci_adapter->db_base; + init_para.db_base_phy = pci_adapter->db_base_phy; + init_para.dwqe_mapping = pci_adapter->dwqe_mapping; + init_para.hwdev = &pci_adapter->hwdev; + init_para.chip_node = pci_adapter->chip_node; + init_para.ppf_hwdev = hinic_get_ppf_hwdev_by_pdev(pdev); + err = hinic_init_hwdev(&init_para); + if (err < 0) { + pci_adapter->hwdev = NULL; + sdk_err(&pdev->dev, "Failed to initialize hardware device\n"); + return -EFAULT; + } else if (err > 0) { + if (err == (1 << HINIC_HWDEV_ALL_INITED) && + pci_adapter->init_state < HINIC_INIT_STATE_HW_IF_INITED) { + pci_adapter->init_state = HINIC_INIT_STATE_HW_IF_INITED; + sdk_info(&pdev->dev, + "Initialize hardware device later\n"); + queue_delayed_work(pci_adapter->slave_nic_init_workq, + &pci_adapter->slave_nic_init_dwork, + HINIC_SLAVE_NIC_DELAY_TIME); + set_bit(HINIC_FUNC_PRB_DELAY, &pci_adapter->flag); + } else if (err != (1 << HINIC_HWDEV_ALL_INITED)) { + sdk_err(&pdev->dev, + "Initialize hardware device partitial failed\n"); + hinic_detect_version_compatible(pci_adapter); + hinic_notify_ppf_reg(pci_adapter); + pci_adapter->init_state = + HINIC_INIT_STATE_HW_PART_INITED; + } + return -EFAULT; + } + + hinic_notify_ppf_reg(pci_adapter); + pci_adapter->init_state = HINIC_INIT_STATE_HWDEV_INITED; + + vf_load_state = hinic_support_ovs(pci_adapter->hwdev, NULL) ? + true : disable_vf_load; + + hinic_set_vf_load_state(pci_adapter, vf_load_state); + hinic_qps_num_set(pci_adapter->hwdev, 0); + + pci_adapter->lld_dev.pdev = pdev; + pci_adapter->lld_dev.hwdev = pci_adapter->hwdev; + pci_adapter->sriov_info.pdev = pdev; + pci_adapter->sriov_info.hwdev = pci_adapter->hwdev; + + hinic_event_register(pci_adapter->hwdev, pci_adapter, + hinic_event_process); + + if (!HINIC_FUNC_IS_VF(pci_adapter->hwdev)) + hinic_sync_time_to_fmw(pci_adapter); + hinic_init_syncfw_timer(pci_adapter); + + /* dbgtool init */ + lld_lock_chip_node(); + err = hinic_dbgtool_knl_init(pci_adapter->hwdev, + pci_adapter->chip_node); + if (err) { + lld_unlock_chip_node(); + sdk_err(&pdev->dev, "Failed to initialize dbgtool\n"); + hinic_destroy_syncfw_timer(pci_adapter); + hinic_event_unregister(pci_adapter->hwdev); + return err; + } + lld_unlock_chip_node(); + + pci_adapter->init_state = HINIC_INIT_STATE_DBGTOOL_INITED; + + err = hinic_detect_version_compatible(pci_adapter); + if (err) + return err; + + if (!HINIC_FUNC_IS_VF(pci_adapter->hwdev) && + FUNC_ENABLE_SRIOV_IN_DEFAULT(pci_adapter->hwdev)) { + hinic_pci_sriov_enable(pdev, + hinic_func_max_vf(pci_adapter->hwdev)); + } + + /* NIC is base driver, probe firstly */ + err = __set_nic_func_state(pci_adapter); + if (err) + return err; + + attach_ulds(pci_adapter); + +#ifdef CONFIG_X86 + cfg_order_reg(pci_adapter); +#endif + + sdk_info(&pdev->dev, "Pcie device probed\n"); + pci_adapter->init_state = HINIC_INIT_STATE_ALL_INITED; + + return 0; +} + +static void hinic_func_deinit(struct pci_dev *pdev) +{ + struct hinic_pcidev *pci_adapter = pci_get_drvdata(pdev); + + /* When function deinit, disable mgmt initiative report events firstly, + * then flush mgmt work-queue. + */ + hinic_disable_mgmt_msg_report(pci_adapter->hwdev); + if (pci_adapter->init_state >= HINIC_INIT_STATE_HW_PART_INITED) + hinic_flush_mgmt_workq(pci_adapter->hwdev); + + hinic_set_func_deinit_flag(pci_adapter->hwdev); + + if (pci_adapter->init_state >= HINIC_INIT_STATE_NIC_INITED) { + detach_ulds(pci_adapter); + detach_uld(pci_adapter, SERVICE_T_NIC); + } + + if (pci_adapter->init_state >= HINIC_INIT_STATE_DBGTOOL_INITED) { + lld_lock_chip_node(); + hinic_dbgtool_knl_deinit(pci_adapter->hwdev, + pci_adapter->chip_node); + lld_unlock_chip_node(); + hinic_destroy_syncfw_timer(pci_adapter); + hinic_event_unregister(pci_adapter->hwdev); + } + + hinic_notify_ppf_unreg(pci_adapter); + if (pci_adapter->init_state >= HINIC_INIT_STATE_HW_IF_INITED) { + /* Remove the current node from node-list first, + * then it's safe to free hwdev + */ + lld_lock_chip_node(); + list_del(&pci_adapter->node); + lld_unlock_chip_node(); + + hinic_free_hwdev(pci_adapter->hwdev); + } +} + +static void wait_tool_unused(void) +{ + u32 loop_cnt = 0; + + while (loop_cnt < HINIC_WAIT_TOOL_CNT_TIMEOUT) { + if (!atomic_read(&tool_used_cnt)) + return; + + usleep_range(9900, 10000); + loop_cnt++; + } +} + +static inline void wait_sriov_cfg_complete(struct hinic_pcidev *pci_adapter) +{ + struct hinic_sriov_info *sriov_info; + u32 loop_cnt = 0; + + sriov_info = &pci_adapter->sriov_info; + + set_bit(HINIC_FUNC_REMOVE, &sriov_info->state); + usleep_range(9900, 10000); + + while (loop_cnt < HINIC_WAIT_SRIOV_CFG_TIMEOUT) { + if (!test_bit(HINIC_SRIOV_ENABLE, &sriov_info->state) && + !test_bit(HINIC_SRIOV_DISABLE, &sriov_info->state)) + return; + + usleep_range(9900, 10000); + loop_cnt++; + } +} + +static void hinic_remove(struct pci_dev *pdev) +{ + struct hinic_pcidev *pci_adapter = pci_get_drvdata(pdev); + + if (!pci_adapter) + return; + + sdk_info(&pdev->dev, "Pcie device remove begin\n"); +#ifdef CONFIG_PCI_IOV + if (pdev->is_virtfn && hinic_get_vf_load_state(pdev)) { + pci_set_drvdata(pdev, NULL); + kfree(pci_adapter); + return; + } +#endif + cancel_delayed_work_sync(&pci_adapter->slave_nic_init_dwork); + flush_workqueue(pci_adapter->slave_nic_init_workq); + destroy_workqueue(pci_adapter->slave_nic_init_workq); + + if (pci_adapter->init_state >= HINIC_INIT_STATE_HW_IF_INITED) + hinic_detect_hw_present(pci_adapter->hwdev); + + switch (pci_adapter->init_state) { + case HINIC_INIT_STATE_ALL_INITED: + /*lint -fallthrough*/ + case HINIC_INIT_STATE_NIC_INITED: + /* Don't support hotplug when SR-IOV is enabled now. + * So disable SR-IOV capability as normal. + */ + if (!HINIC_FUNC_IS_VF(pci_adapter->hwdev)) { + wait_sriov_cfg_complete(pci_adapter); + hinic_pci_sriov_disable(pdev); + } + /*lint -fallthrough*/ + case HINIC_INIT_STATE_DBGTOOL_INITED: + case HINIC_INIT_STATE_HWDEV_INITED: + case HINIC_INIT_STATE_HW_PART_INITED: + case HINIC_INIT_STATE_HW_IF_INITED: + case HINIC_INIT_STATE_PCI_INITED: + set_bit(HINIC_FUNC_IN_REMOVE, &pci_adapter->flag); + lld_lock_chip_node(); + cancel_work_sync(&pci_adapter->slave_nic_work); + lld_unlock_chip_node(); + + wait_tool_unused(); + + if (pci_adapter->init_state >= HINIC_INIT_STATE_HW_IF_INITED) + hinic_func_deinit(pdev); + + lld_lock_chip_node(); + if (pci_adapter->init_state < HINIC_INIT_STATE_HW_IF_INITED) + list_del(&pci_adapter->node); + hinic_tool_k_uninit(); + free_chip_node(pci_adapter); + lld_unlock_chip_node(); + unmapping_bar(pci_adapter); + hinic_pci_deinit(pdev); + + /*lint -fallthrough*/ + break; + + default: + break; + } + + sdk_info(&pdev->dev, "Pcie device removed\n"); +} + +static void slave_host_init_delay_work(struct work_struct *work) +{ + struct delayed_work *delay = to_delayed_work(work); + struct hinic_pcidev *pci_adapter = container_of(delay, + struct hinic_pcidev, slave_nic_init_dwork); + struct pci_dev *pdev = pci_adapter->pcidev; + struct card_node *chip_node = pci_adapter->chip_node; + int found = 0; + struct hinic_pcidev *ppf_pcidev = NULL; + int err; + + if (!hinic_get_master_host_mbox_enable(pci_adapter->hwdev)) { + queue_delayed_work(pci_adapter->slave_nic_init_workq, + &pci_adapter->slave_nic_init_dwork, + HINIC_SLAVE_NIC_DELAY_TIME); + return; + } + if (hinic_func_type(pci_adapter->hwdev) == TYPE_PPF) { + err = hinic_func_init(pdev, pci_adapter); + clear_bit(HINIC_FUNC_PRB_DELAY, &pci_adapter->flag); + if (err) + set_bit(HINIC_FUNC_PRB_ERR, &pci_adapter->flag); + return; + } + + /* Make sure the PPF must be the first one */ + lld_dev_hold(); + list_for_each_entry(ppf_pcidev, &chip_node->func_list, node) { + if (test_bit(HINIC_FUNC_IN_REMOVE, &ppf_pcidev->flag) || + ppf_pcidev->init_state < HINIC_INIT_STATE_HW_IF_INITED) + continue; + + if (hinic_func_type(ppf_pcidev->hwdev) == TYPE_PPF) { + found = 1; + break; + } + } + lld_dev_put(); + if (found && ppf_pcidev->init_state == HINIC_INIT_STATE_ALL_INITED) { + err = hinic_func_init(pdev, pci_adapter); + clear_bit(HINIC_FUNC_PRB_DELAY, &pci_adapter->flag); + if (err) + set_bit(HINIC_FUNC_PRB_ERR, &pci_adapter->flag); + } else { + queue_delayed_work(pci_adapter->slave_nic_init_workq, + &pci_adapter->slave_nic_init_dwork, + HINIC_SLAVE_NIC_DELAY_TIME); + } +} + +static int hinic_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct hinic_pcidev *pci_adapter; + int err; + + sdk_info(&pdev->dev, "Pcie device probe begin\n"); + + err = hinic_pci_init(pdev); + if (err) + return err; + +#ifdef CONFIG_PCI_IOV + if (pdev->is_virtfn && hinic_get_vf_load_state(pdev)) + return 0; +#endif + + pci_adapter = pci_get_drvdata(pdev); + clear_bit(HINIC_FUNC_PRB_ERR, &pci_adapter->flag); + clear_bit(HINIC_FUNC_PRB_DELAY, &pci_adapter->flag); + err = mapping_bar(pdev, pci_adapter); + if (err) { + sdk_err(&pdev->dev, "Failed to map bar\n"); + goto map_bar_failed; + } + + pci_adapter->id = *id; + INIT_WORK(&pci_adapter->slave_nic_work, slave_host_mgmt_work); + pci_adapter->slave_nic_init_workq = + create_singlethread_workqueue(HINIC_SLAVE_NIC_DELAY); + if (!pci_adapter->slave_nic_init_workq) { + sdk_err(&pdev->dev, + "Failed to create work queue: %s\n", + HINIC_SLAVE_NIC_DELAY); + goto ceate_nic_delay_work_fail; + } + INIT_DELAYED_WORK(&pci_adapter->slave_nic_init_dwork, + slave_host_init_delay_work); + + /* if chip information of pcie function exist, + * add the function into chip + */ + lld_lock_chip_node(); + err = alloc_chip_node(pci_adapter); + if (err) { + sdk_err(&pdev->dev, + "Failed to add new chip node to global list\n"); + goto alloc_chip_node_fail; + } + + err = hinic_tool_k_init(); + if (err) { + sdk_warn(&pdev->dev, "Failed to init nictool"); + goto init_nictool_err; + } + + list_add_tail(&pci_adapter->node, &pci_adapter->chip_node->func_list); + + lld_unlock_chip_node(); + + pci_adapter->init_state = HINIC_INIT_STATE_PCI_INITED; + + err = hinic_func_init(pdev, pci_adapter); + if (err) + goto func_init_err; + + return 0; + +func_init_err: + if (!test_bit(HINIC_FUNC_PRB_DELAY, &pci_adapter->flag)) + set_bit(HINIC_FUNC_PRB_ERR, &pci_adapter->flag); + return 0; + +init_nictool_err: + free_chip_node(pci_adapter); + +alloc_chip_node_fail: + lld_unlock_chip_node(); + +ceate_nic_delay_work_fail: + unmapping_bar(pci_adapter); + +map_bar_failed: + hinic_pci_deinit(pdev); + + sdk_err(&pdev->dev, "Pcie device probe failed\n"); + return err; +} + +/*lint -save -e133 -e10*/ +static const struct pci_device_id hinic_pci_table[] = { + {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_PF), HINIC_BOARD_25GE}, + {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_VF), 0}, + {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_VF_HV), 0}, + {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_SMTIO), HINIC_BOARD_PG_SM_25GE}, + {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_PANGEA_100GE), + HINIC_BOARD_PG_100GE}, + {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_PANGEA_TP_10GE), + HINIC_BOARD_PG_TP_10GE}, + {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_KR_40GE), HINIC_BOARD_40GE}, + {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_KR_100GE), HINIC_BOARD_100GE}, + {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_KR_25GE), HINIC_BOARD_25GE}, + {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_MULTI_HOST), HINIC_BOARD_25GE}, + {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_100GE), HINIC_BOARD_100GE}, + {PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_1822_DUAL_25GE), HINIC_BOARD_25GE}, + {0, 0} +}; + +/*lint -restore*/ +MODULE_DEVICE_TABLE(pci, hinic_pci_table); + +/** + * hinic_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + * + * Since we only need error detecting not error handling, so we + * always return PCI_ERS_RESULT_CAN_RECOVER to tell the AER + * driver that we don't need reset(error handling). + */ +static pci_ers_result_t hinic_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct hinic_pcidev *pci_adapter; + + sdk_err(&pdev->dev, + "Uncorrectable error detected, log and cleanup error status: 0x%08x\n", + state); + + pci_cleanup_aer_uncorrect_error_status(pdev); + pci_adapter = pci_get_drvdata(pdev); + + if (pci_adapter) + hinic_record_pcie_error(pci_adapter->hwdev); + + return PCI_ERS_RESULT_CAN_RECOVER; +} + +static void hinic_shutdown(struct pci_dev *pdev) +{ + struct hinic_pcidev *pci_adapter = pci_get_drvdata(pdev); + + sdk_info(&pdev->dev, "Shutdown device\n"); + + if (pci_adapter) + hinic_shutdown_hwdev(pci_adapter->hwdev); + + pci_disable_device(pdev); + + if (pci_adapter) + hinic_set_api_stop(pci_adapter->hwdev); +} + +/* Cause we only need error detecting not error handling, so only error_detected + * callback is enough. + */ +static struct pci_error_handlers hinic_err_handler = { + .error_detected = hinic_io_error_detected, +}; + +static struct pci_driver hinic_driver = { + .name = HINIC_DRV_NAME, + .id_table = hinic_pci_table, + .probe = hinic_probe, + .remove = hinic_remove, + .shutdown = hinic_shutdown, + .sriov_configure = hinic_pci_sriov_configure, + .err_handler = &hinic_err_handler +}; + +static int __init hinic_lld_init(void) +{ + pr_info("%s - version %s\n", HINIC_DRV_DESC, HINIC_DRV_VERSION); + memset(g_uld_info, 0, sizeof(g_uld_info)); + atomic_set(&tool_used_cnt, 0); + + hinic_lld_lock_init(); + + /* register nic driver information first, and add net device in + * nic_probe called by hinic_probe. + */ + hinic_register_uld(SERVICE_T_NIC, &nic_uld_info); + + return pci_register_driver(&hinic_driver); +} + +static void __exit hinic_lld_exit(void) +{ + pci_unregister_driver(&hinic_driver); + + hinic_unregister_uld(SERVICE_T_NIC); +} + +module_init(hinic_lld_init); +module_exit(hinic_lld_exit); + +int hinic_register_micro_log(struct hinic_micro_log_info *micro_log_info) +{ + struct card_node *chip_node; + struct hinic_pcidev *dev; + + if (!micro_log_info || !micro_log_info->init || + !micro_log_info->deinit) { + pr_err("Invalid information of micro log info to register\n"); + return -EINVAL; + } + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag) || + dev->init_state < HINIC_INIT_STATE_HW_IF_INITED) + continue; + + if (hinic_func_type(dev->hwdev) == TYPE_PPF) { + if (micro_log_info->init(dev->hwdev)) { + sdk_err(&dev->pcidev->dev, + "micro log init failed\n"); + continue; + } + } + } + } + lld_dev_put(); + pr_info("Register micro log succeed\n"); + + return 0; +} +EXPORT_SYMBOL(hinic_register_micro_log); + +void hinic_unregister_micro_log(struct hinic_micro_log_info *micro_log_info) +{ + struct card_node *chip_node; + struct hinic_pcidev *dev; + + if (!micro_log_info) + return; + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag) || + dev->init_state < HINIC_INIT_STATE_HW_IF_INITED) + continue; + + if (hinic_func_type(dev->hwdev) == TYPE_PPF) + micro_log_info->deinit(dev->hwdev); + } + } + lld_dev_put(); + pr_info("Unregister micro log succeed\n"); +} +EXPORT_SYMBOL(hinic_unregister_micro_log); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_lld.h b/drivers/net/ethernet/huawei/hinic/hinic_lld.h new file mode 100644 index 0000000000000000000000000000000000000000..414dfd146f9a74ca6642c2edef0077fea87b921f --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_lld.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_LLD_H_ +#define HINIC_LLD_H_ + +#define HINIC_SLAVE_NIC_DELAY "hinic_slave_nic_delay" +#define HINIC_SLAVE_NIC_DELAY_TIME (5 * HZ) + +struct hinic_lld_dev { + struct pci_dev *pdev; + void *hwdev; +}; + +enum hinic_init_state { + HINIC_INIT_STATE_NONE, + HINIC_INIT_STATE_PCI_INITED, + HINIC_INIT_STATE_HW_IF_INITED, + HINIC_INIT_STATE_HW_PART_INITED, + HINIC_INIT_STATE_HWDEV_INITED, + HINIC_INIT_STATE_DBGTOOL_INITED, + HINIC_INIT_STATE_NIC_INITED, + HINIC_INIT_STATE_ALL_INITED, +}; + +struct hinic_uld_info { + /* uld_dev: should not return null even the function capability + * is not support the up layer driver + * uld_dev_name: NIC driver should copy net device name. + * FC driver could copy fc device name. + * other up layer driver don`t need copy anything + */ + int (*probe)(struct hinic_lld_dev *lld_dev, + void **uld_dev, char *uld_dev_name); + void (*remove)(struct hinic_lld_dev *lld_dev, void *uld_dev); + int (*suspend)(struct hinic_lld_dev *lld_dev, + void *uld_dev, pm_message_t state); + int (*resume)(struct hinic_lld_dev *lld_dev, void *uld_dev); + void (*event)(struct hinic_lld_dev *lld_dev, void *uld_dev, + struct hinic_event_info *event); + int (*ioctl)(void *uld_dev, u32 cmd, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size); +}; + +/* Used for the ULD HiNIC PCIe driver registration interface, + * the original interface is service_register_interface + */ +int hinic_register_uld(enum hinic_service_type uld_type, + struct hinic_uld_info *uld_info); + +/* Used for the ULD HiNIC PCIe driver unregistration interface, + * the original interface is service_unregister_interface + */ +void hinic_unregister_uld(enum hinic_service_type uld_type); + +void *hinic_get_ppf_uld_by_pdev(struct pci_dev *pdev, + enum hinic_service_type type); + +/* used for TOE/IWARP */ +struct net_device *hinic_get_netdev_by_lld(struct hinic_lld_dev *lld_dev); +/* used for TOE/IWARP */ +void *hinic_get_hwdev_by_netdev(struct net_device *netdev); + +struct net_device *hinic_get_netdev_by_pcidev(struct pci_dev *pdev); +void *hinic_get_hwdev_by_ifname(char *ifname); +int hinic_get_chip_name_by_hwdev(void *hwdev, char *ifname); +void *hinic_get_uld_dev_by_ifname(char *ifname, enum hinic_service_type type); +void *hinic_get_uld_by_chip_name(char *ifname, enum hinic_service_type type); + +int hinic_get_pf_uld_array(struct pci_dev *pdev, u32 *dev_cnt, void *array[]); +int hinic_set_chip_cos_up_map(struct pci_dev *pdev, u8 *cos_up); +int hinic_get_chip_cos_up_map(struct pci_dev *pdev, bool *is_setted, + u8 *cos_up); +void hinic_get_all_chip_id(void *card_id); +void hinic_get_card_info(void *hwdev, void *bufin); +int hinic_get_device_id(void *hwdev, u16 *dev_id); +void hinic_get_fc_devname(char *devname); +int hinic_get_pf_id(void *hwdev, u32 port_id, u32 *pf_id, u32 *isvalid); + +void hinic_tool_cnt_inc(void); +void hinic_tool_cnt_dec(void); + +struct hinic_sriov_info; +struct hinic_sriov_info *hinic_get_sriov_info_by_pcidev(struct pci_dev *pdev); + +/* for dpdk */ +void *hinic_get_pci_dev(u16 bdf); +void hinic_dpdk_pcie_remove(void *pdev); +int hinic_dpdk_pcie_probe(void *pdev); + +int hinic_attach_nic(struct hinic_lld_dev *lld_dev); +void hinic_detach_nic(struct hinic_lld_dev *lld_dev); + +int hinic_attach_roce(struct hinic_lld_dev *lld_dev); +void hinic_detach_roce(struct hinic_lld_dev *lld_dev); + +int hinic_disable_nic_rss(struct hinic_lld_dev *lld_dev); +int hinic_enable_nic_rss(struct hinic_lld_dev *lld_dev); + +int hinic_ovs_set_vf_nic_state(struct hinic_lld_dev *lld_dev, + u16 vf_func_id, bool en); + +int hinic_ovs_set_vf_load_state(struct pci_dev *pdev); + +int hinic_get_self_test_result(char *ifname, u32 *result); +enum hinic_init_state hinic_get_init_state_by_ifname(char *ifname); +enum hinic_init_state hinic_get_init_state(struct pci_dev *pdev); + +extern struct hinic_uld_info g_uld_info[SERVICE_T_MAX]; + +struct pci_device_id *hinic_get_pci_device_id(struct pci_dev *pdev); +bool hinic_is_in_host(void); + +bool hinic_is_valid_bar_addr(u64 offset); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index 4a8f82938ed5b87c8da6b09e88e08d387c652f0c..47232e1f6ae86ea19e8860cfce41f2e59f9c9e1d 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -1,5 +1,5 @@ -/* - * Huawei HiNIC PCI Express Linux driver +// SPDX-License-Identifier: GPL-2.0 +/* Huawei HiNIC PCI Express Linux driver * Copyright(c) 2017 Huawei Technologies Co., Ltd * * This program is free software; you can redistribute it and/or modify it @@ -13,1085 +13,3232 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt #include -#include -#include #include #include -#include +#include +#include #include +#include +#include #include #include -#include #include -#include -#include -#include -#include -#include -#include -#include - -#include "hinic_hw_qp.h" -#include "hinic_hw_dev.h" -#include "hinic_port.h" +#include +#include +#include +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hw.h" +#include "hinic_dbg.h" +#include "hinic_nic_cfg.h" +#include "hinic_nic_dev.h" #include "hinic_tx.h" #include "hinic_rx.h" -#include "hinic_dev.h" - -MODULE_AUTHOR("Huawei Technologies CO., Ltd"); -MODULE_DESCRIPTION("Huawei Intelligent NIC driver"); -MODULE_LICENSE("GPL"); - -static unsigned int tx_weight = 64; -module_param(tx_weight, uint, 0644); -MODULE_PARM_DESC(tx_weight, "Number Tx packets for NAPI budget (default=64)"); - -static unsigned int rx_weight = 64; -module_param(rx_weight, uint, 0644); -MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)"); - -#define HINIC_DEV_ID_QUAD_PORT_25GE 0x1822 -#define HINIC_DEV_ID_DUAL_PORT_25GE 0x0200 -#define HINIC_DEV_ID_DUAL_PORT_100GE 0x0201 - -#define HINIC_WQ_NAME "hinic_dev" +#include "hinic_qp.h" +#include "hinic_dcb.h" +#include "hinic_lld.h" +#include "hinic_sriov.h" +#include "hinic_pci_id_tbl.h" + +static u16 num_qps; +module_param(num_qps, ushort, 0444); +MODULE_PARM_DESC(num_qps, "Number of Queue Pairs (default unset)"); + +static u16 ovs_num_qps = 16; +module_param(ovs_num_qps, ushort, 0444); +MODULE_PARM_DESC(ovs_num_qps, "Number of Queue Pairs in ovs mode (default=16)"); + +#define DEFAULT_POLL_WEIGHT 64 +static unsigned int poll_weight = DEFAULT_POLL_WEIGHT; +module_param(poll_weight, uint, 0444); +MODULE_PARM_DESC(poll_weight, "Number packets for NAPI budget (default=64)"); + +#define HINIC_DEAULT_TXRX_MSIX_PENDING_LIMIT 2 +#define HINIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG 32 +#define HINIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG 7 + +/* suit for sdi3.0 vm mode, change this define for test best performance */ +#define SDI_VM_PENDING_LIMT 2 +#define SDI_VM_COALESCE_TIMER_CFG 16 +#define SDI_VM_RX_PKT_RATE_HIGH 1000000 +#define SDI_VM_RX_PKT_RATE_LOW 30000 +#define SDI_VM_RX_USECS_HIGH 56 +#define SDI_VM_RX_PENDING_LIMT_HIGH 20 +#define SDI_VM_RX_USECS_LOW 16 +#define SDI_VM_RX_PENDING_LIMT_LOW 2 + +/* if qp_coalesc_use_drv_params_switch !=0, use user setting params */ +static unsigned char qp_coalesc_use_drv_params_switch; +module_param(qp_coalesc_use_drv_params_switch, byte, 0444); +MODULE_PARM_DESC(qp_coalesc_use_drv_params_switch, "QP MSI-X Interrupt coalescing parameter switch (default=0, not use drv parameter)"); + +static unsigned char qp_pending_limit = HINIC_DEAULT_TXRX_MSIX_PENDING_LIMIT; +module_param(qp_pending_limit, byte, 0444); +MODULE_PARM_DESC(qp_pending_limit, "QP MSI-X Interrupt coalescing parameter pending_limit (default=2)"); + +static unsigned char qp_coalesc_timer_cfg = + HINIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG; +module_param(qp_coalesc_timer_cfg, byte, 0444); +MODULE_PARM_DESC(qp_coalesc_timer_cfg, "QP MSI-X Interrupt coalescing parameter coalesc_timer_cfg (default=32)"); + +/* For arm64 server, the best known configuration of lro max wqe number + * is 4 (8K), for x86_64 server, it is 8 (16K). You can also + * configure these values by hinicadm. + */ +static unsigned char set_max_wqe_num; +module_param(set_max_wqe_num, byte, 0444); +MODULE_PARM_DESC(set_max_wqe_num, "Set lro max wqe number, valid range is 1 - 32, default is 4(arm) / 8(x86)"); -#define MSG_ENABLE_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ - NETIF_MSG_IFUP | \ - NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR) +#define DEFAULT_RX_BUFF_LEN 2 +u16 rx_buff = DEFAULT_RX_BUFF_LEN; +module_param(rx_buff, ushort, 0444); +MODULE_PARM_DESC(rx_buff, "Set rx_buff size, buffer len must be 2^n. 2 - 16, default is 2KB"); -#define VLAN_BITMAP_SIZE(nic_dev) (ALIGN(VLAN_N_VID, 8) / 8) +static u32 set_lro_timer; +module_param(set_lro_timer, uint, 0444); +MODULE_PARM_DESC(set_lro_timer, "Set lro timer in micro second, valid range is 1 - 1024, default is 16"); -#define work_to_rx_mode_work(work) \ - container_of(work, struct hinic_rx_mode_work, work) +static unsigned char set_link_status_follow = HINIC_LINK_FOLLOW_STATUS_MAX; +module_param(set_link_status_follow, byte, 0444); +MODULE_PARM_DESC(set_link_status_follow, "Set link status follow port status. 0 - default, 1 - follow, 2 - separate, other - unset. (default unset)"); -#define rx_mode_work_to_nic_dev(rx_mode_work) \ - container_of(rx_mode_work, struct hinic_dev, rx_mode_work) +static unsigned int lro_replenish_thld = 256; +module_param(lro_replenish_thld, uint, 0444); +MODULE_PARM_DESC(lro_replenish_thld, "Number wqe for lro replenish buffer (default=256)"); -static int change_mac_addr(struct net_device *netdev, const u8 *addr); +static bool l2nic_interrupt_switch = true; +module_param(l2nic_interrupt_switch, bool, 0644); +MODULE_PARM_DESC(l2nic_interrupt_switch, "Control whether execute l2nic io interrupt switch or not, default is true"); -static void set_link_speed(struct ethtool_link_ksettings *link_ksettings, - enum hinic_speed speed) -{ - switch (speed) { - case HINIC_SPEED_10MB_LINK: - link_ksettings->base.speed = SPEED_10; - break; +static unsigned char lro_en_status = HINIC_LRO_STATUS_UNSET; +module_param(lro_en_status, byte, 0444); +MODULE_PARM_DESC(lro_en_status, "lro enable status. 0 - disable, 1 - enable, other - unset. (default unset)"); - case HINIC_SPEED_100MB_LINK: - link_ksettings->base.speed = SPEED_100; - break; +static unsigned char qp_pending_limit_low = HINIC_RX_PENDING_LIMIT_LOW; +module_param(qp_pending_limit_low, byte, 0444); +MODULE_PARM_DESC(qp_pending_limit_low, "MSI-X adaptive low coalesce pending limit, range is 0 - 255"); - case HINIC_SPEED_1000MB_LINK: - link_ksettings->base.speed = SPEED_1000; - break; +static unsigned char qp_coalesc_timer_low = HINIC_RX_COAL_TIME_LOW; +module_param(qp_coalesc_timer_low, byte, 0444); +MODULE_PARM_DESC(qp_coalesc_timer_low, "MSI-X adaptive low coalesce time, range is 0 - 255"); - case HINIC_SPEED_10GB_LINK: - link_ksettings->base.speed = SPEED_10000; - break; +static unsigned char qp_pending_limit_high = HINIC_RX_PENDING_LIMIT_HIGH; +module_param(qp_pending_limit_high, byte, 0444); +MODULE_PARM_DESC(qp_pending_limit_high, "MSI-X adaptive high coalesce pending limit, range is 0 - 255"); - case HINIC_SPEED_25GB_LINK: - link_ksettings->base.speed = SPEED_25000; - break; +static unsigned char qp_coalesc_timer_high = HINIC_RX_COAL_TIME_HIGH; +module_param(qp_coalesc_timer_high, byte, 0444); +MODULE_PARM_DESC(qp_coalesc_timer_high, "MSI-X adaptive high coalesce time, range is 0 - 255"); - case HINIC_SPEED_40GB_LINK: - link_ksettings->base.speed = SPEED_40000; - break; +#define HINIC_NIC_DEV_WQ_NAME "hinic_nic_dev_wq" - case HINIC_SPEED_100GB_LINK: - link_ksettings->base.speed = SPEED_100000; - break; +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_LINK | \ + NETIF_MSG_RX_ERR) - default: - link_ksettings->base.speed = SPEED_UNKNOWN; - break; - } -} +#define QID_MASKED(q_id, nic_dev) ((q_id) & ((nic_dev)->num_qps - 1)) -static int hinic_get_link_ksettings(struct net_device *netdev, - struct ethtool_link_ksettings - *link_ksettings) -{ - struct hinic_dev *nic_dev = netdev_priv(netdev); - enum hinic_port_link_state link_state; - struct hinic_port_cap port_cap; - int err; +#define VLAN_BITMAP_BYTE_SIZE(nic_dev) (sizeof(*(nic_dev)->vlan_bitmap)) - ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); - ethtool_link_ksettings_add_link_mode(link_ksettings, supported, - Autoneg); +#define VLAN_BITMAP_BITS_SIZE(nic_dev) (VLAN_BITMAP_BYTE_SIZE(nic_dev) * 8) - link_ksettings->base.speed = SPEED_UNKNOWN; - link_ksettings->base.autoneg = AUTONEG_DISABLE; - link_ksettings->base.duplex = DUPLEX_UNKNOWN; +#define VLAN_NUM_BITMAPS(nic_dev) (VLAN_N_VID / \ + VLAN_BITMAP_BITS_SIZE(nic_dev)) - err = hinic_port_get_cap(nic_dev, &port_cap); - if (err) { - netif_err(nic_dev, drv, netdev, - "Failed to get port capabilities\n"); - return err; - } +#define VLAN_BITMAP_SIZE(nic_dev) (VLAN_N_VID / \ + VLAN_BITMAP_BYTE_SIZE(nic_dev)) - err = hinic_port_link_state(nic_dev, &link_state); - if (err) { - netif_err(nic_dev, drv, netdev, - "Failed to get port link state\n"); - return err; - } +#define VID_LINE(nic_dev, vid) ((vid) / VLAN_BITMAP_BITS_SIZE(nic_dev)) +#define VID_COL(nic_dev, vid) ((vid) & (VLAN_BITMAP_BITS_SIZE(nic_dev) - 1)) - if (link_state != HINIC_LINK_STATE_UP) { - netif_info(nic_dev, drv, netdev, "No link\n"); - return err; - } +enum hinic_rx_mod { + HINIC_RX_MODE_UC = 1 << 0, + HINIC_RX_MODE_MC = 1 << 1, + HINIC_RX_MODE_BC = 1 << 2, + HINIC_RX_MODE_MC_ALL = 1 << 3, + HINIC_RX_MODE_PROMISC = 1 << 4, +}; - set_link_speed(link_ksettings, port_cap.speed); +enum hinic_rx_buff_len { + RX_BUFF_VALID_2KB = 2, + RX_BUFF_VALID_4KB = 4, + RX_BUFF_VALID_8KB = 8, + RX_BUFF_VALID_16KB = 16, +}; - if (!!(port_cap.autoneg_cap & HINIC_AUTONEG_SUPPORTED)) - ethtool_link_ksettings_add_link_mode(link_ksettings, - advertising, Autoneg); +#define HINIC_AVG_PKT_SMALL 256U +#define HINIC_MODERATONE_DELAY HZ +#define CONVERT_UNIT 1024 - if (port_cap.autoneg_state == HINIC_AUTONEG_ACTIVE) - link_ksettings->base.autoneg = AUTONEG_ENABLE; +int hinic_netdev_event(struct notifier_block *notifier, + unsigned long event, void *ptr); - link_ksettings->base.duplex = (port_cap.duplex == HINIC_DUPLEX_FULL) ? - DUPLEX_FULL : DUPLEX_HALF; - return 0; -} +/* used for netdev notifier register/unregister */ +DEFINE_MUTEX(g_hinic_netdev_notifiers_mutex); +static int hinic_netdev_notifiers_ref_cnt; +static struct notifier_block hinic_netdev_notifier = { + .notifier_call = hinic_netdev_event, +}; -static void hinic_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *info) +static void hinic_register_notifier(struct hinic_nic_dev *nic_dev) { - struct hinic_dev *nic_dev = netdev_priv(netdev); - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_hwif *hwif = hwdev->hwif; - - strlcpy(info->driver, HINIC_DRV_NAME, sizeof(info->driver)); - strlcpy(info->bus_info, pci_name(hwif->pdev), sizeof(info->bus_info)); -} + int err; -static void hinic_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) -{ - ring->rx_max_pending = HINIC_RQ_DEPTH; - ring->tx_max_pending = HINIC_SQ_DEPTH; - ring->rx_pending = HINIC_RQ_DEPTH; - ring->tx_pending = HINIC_SQ_DEPTH; + mutex_lock(&g_hinic_netdev_notifiers_mutex); + hinic_netdev_notifiers_ref_cnt++; + if (hinic_netdev_notifiers_ref_cnt == 1) { + err = register_netdevice_notifier(&hinic_netdev_notifier); + if (err) { + hinic_info(nic_dev, drv, "Register netdevice notifier failed, err: %d\n", + err); + hinic_netdev_notifiers_ref_cnt--; + } + } + mutex_unlock(&g_hinic_netdev_notifiers_mutex); } -static void hinic_get_channels(struct net_device *netdev, - struct ethtool_channels *channels) +static void hinic_unregister_notifier(struct hinic_nic_dev *nic_dev) { - struct hinic_dev *nic_dev = netdev_priv(netdev); - struct hinic_hwdev *hwdev = nic_dev->hwdev; + mutex_lock(&g_hinic_netdev_notifiers_mutex); + if (hinic_netdev_notifiers_ref_cnt == 1) + unregister_netdevice_notifier(&hinic_netdev_notifier); - channels->max_rx = hwdev->nic_cap.max_qps; - channels->max_tx = hwdev->nic_cap.max_qps; - channels->max_other = 0; - channels->max_combined = 0; - channels->rx_count = hinic_hwdev_num_qps(hwdev); - channels->tx_count = hinic_hwdev_num_qps(hwdev); - channels->other_count = 0; - channels->combined_count = 0; + if (hinic_netdev_notifiers_ref_cnt) + hinic_netdev_notifiers_ref_cnt--; + mutex_unlock(&g_hinic_netdev_notifiers_mutex); } -static const struct ethtool_ops hinic_ethtool_ops = { - .get_link_ksettings = hinic_get_link_ksettings, - .get_drvinfo = hinic_get_drvinfo, - .get_link = ethtool_op_get_link, - .get_ringparam = hinic_get_ringparam, - .get_channels = hinic_get_channels, -}; +#define HINIC_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT 2 +#define HINIC_VLAN_CLEAR_OFFLOAD (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \ + NETIF_F_SCTP_CRC | NETIF_F_RXCSUM | \ + NETIF_F_ALL_TSO) -static void update_rx_stats(struct hinic_dev *nic_dev, struct hinic_rxq *rxq) +int hinic_netdev_event(struct notifier_block *notifier, + unsigned long event, void *ptr) { - struct hinic_rxq_stats *nic_rx_stats = &nic_dev->rx_stats; - struct hinic_rxq_stats rx_stats; + struct net_device *ndev = netdev_notifier_info_to_dev(ptr); + struct net_device *real_dev, *ret; + struct hinic_nic_dev *nic_dev; + u16 vlan_depth; + + if (!is_vlan_dev(ndev)) + return NOTIFY_DONE; + + dev_hold(ndev); + + switch (event) { + case NETDEV_REGISTER: + real_dev = vlan_dev_real_dev(ndev); + nic_dev = hinic_get_uld_dev_by_ifname(real_dev->name, + SERVICE_T_NIC); + if (!nic_dev) + goto out; + + vlan_depth = 1; + ret = vlan_dev_priv(ndev)->real_dev; + while (is_vlan_dev(ret)) { + ret = vlan_dev_priv(ret)->real_dev; + vlan_depth++; + } - u64_stats_init(&rx_stats.syncp); + if (vlan_depth == HINIC_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT) { + ndev->vlan_features &= (~HINIC_VLAN_CLEAR_OFFLOAD); + } else if (vlan_depth > HINIC_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT) { + ndev->hw_features &= (~HINIC_VLAN_CLEAR_OFFLOAD); + ndev->features &= (~HINIC_VLAN_CLEAR_OFFLOAD); + } + + break; - hinic_rxq_get_stats(rxq, &rx_stats); + default: + break; + }; - u64_stats_update_begin(&nic_rx_stats->syncp); - nic_rx_stats->bytes += rx_stats.bytes; - nic_rx_stats->pkts += rx_stats.pkts; - u64_stats_update_end(&nic_rx_stats->syncp); +out: + dev_put(ndev); - hinic_rxq_clean_stats(rxq); + return NOTIFY_DONE; } -static void update_tx_stats(struct hinic_dev *nic_dev, struct hinic_txq *txq) +void hinic_link_status_change(struct hinic_nic_dev *nic_dev, bool status) { - struct hinic_txq_stats *nic_tx_stats = &nic_dev->tx_stats; - struct hinic_txq_stats tx_stats; + struct net_device *netdev = nic_dev->netdev; - u64_stats_init(&tx_stats.syncp); + if (!test_bit(HINIC_INTF_UP, &nic_dev->flags) || + test_bit(HINIC_LP_TEST, &nic_dev->flags)) + return; - hinic_txq_get_stats(txq, &tx_stats); + if (status) { + if (netif_carrier_ok(netdev)) + return; - u64_stats_update_begin(&nic_tx_stats->syncp); - nic_tx_stats->bytes += tx_stats.bytes; - nic_tx_stats->pkts += tx_stats.pkts; - nic_tx_stats->tx_busy += tx_stats.tx_busy; - nic_tx_stats->tx_wake += tx_stats.tx_wake; - nic_tx_stats->tx_dropped += tx_stats.tx_dropped; - u64_stats_update_end(&nic_tx_stats->syncp); + nic_dev->link_status = status; + netif_carrier_on(netdev); + nicif_info(nic_dev, link, netdev, "Link is up\n"); + } else { + if (!netif_carrier_ok(netdev)) + return; - hinic_txq_clean_stats(txq); + nic_dev->link_status = status; + netif_carrier_off(netdev); + nicif_info(nic_dev, link, netdev, "Link is down\n"); + } } -static void update_nic_stats(struct hinic_dev *nic_dev) +static void hinic_heart_lost(struct hinic_nic_dev *nic_dev) { - int i, num_qps = hinic_hwdev_num_qps(nic_dev->hwdev); - - for (i = 0; i < num_qps; i++) - update_rx_stats(nic_dev, &nic_dev->rxqs[i]); - - for (i = 0; i < num_qps; i++) - update_tx_stats(nic_dev, &nic_dev->txqs[i]); + nic_dev->heart_status = false; } -/** - * create_txqs - Create the Logical Tx Queues of specific NIC device - * @nic_dev: the specific NIC device - * - * Return 0 - Success, negative - Failure - **/ -static int create_txqs(struct hinic_dev *nic_dev) +static int hinic_setup_qps_resources(struct hinic_nic_dev *nic_dev) { - int err, i, j, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev); struct net_device *netdev = nic_dev->netdev; - size_t txq_size; - - if (nic_dev->txqs) - return -EINVAL; - - txq_size = num_txqs * sizeof(*nic_dev->txqs); - nic_dev->txqs = devm_kzalloc(&netdev->dev, txq_size, GFP_KERNEL); - if (!nic_dev->txqs) - return -ENOMEM; + int err; - for (i = 0; i < num_txqs; i++) { - struct hinic_sq *sq = hinic_hwdev_get_sq(nic_dev->hwdev, i); + err = hinic_setup_all_tx_resources(netdev); + if (err) { + nicif_err(nic_dev, drv, netdev, + "Failed to create Tx queues\n"); + return err; + } - err = hinic_init_txq(&nic_dev->txqs[i], sq, netdev); - if (err) { - netif_err(nic_dev, drv, netdev, - "Failed to init Txq\n"); - goto err_init_txq; - } + err = hinic_setup_all_rx_resources(netdev, nic_dev->qps_irq_info); + if (err) { + nicif_err(nic_dev, drv, netdev, + "Failed to create Rx queues\n"); + goto create_rxqs_err; } return 0; -err_init_txq: - for (j = 0; j < i; j++) - hinic_clean_txq(&nic_dev->txqs[j]); +create_rxqs_err: + hinic_free_all_tx_resources(netdev); - devm_kfree(&netdev->dev, nic_dev->txqs); return err; } -/** - * free_txqs - Free the Logical Tx Queues of specific NIC device - * @nic_dev: the specific NIC device - **/ -static void free_txqs(struct hinic_dev *nic_dev) -{ - int i, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev); - struct net_device *netdev = nic_dev->netdev; - - if (!nic_dev->txqs) - return; - - for (i = 0; i < num_txqs; i++) - hinic_clean_txq(&nic_dev->txqs[i]); - - devm_kfree(&netdev->dev, nic_dev->txqs); - nic_dev->txqs = NULL; -} - -/** - * create_txqs - Create the Logical Rx Queues of specific NIC device - * @nic_dev: the specific NIC device - * - * Return 0 - Success, negative - Failure - **/ -static int create_rxqs(struct hinic_dev *nic_dev) +static int hinic_configure(struct hinic_nic_dev *nic_dev) { - int err, i, j, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev); struct net_device *netdev = nic_dev->netdev; - size_t rxq_size; - - if (nic_dev->rxqs) - return -EINVAL; - - rxq_size = num_rxqs * sizeof(*nic_dev->rxqs); - nic_dev->rxqs = devm_kzalloc(&netdev->dev, rxq_size, GFP_KERNEL); - if (!nic_dev->rxqs) - return -ENOMEM; - - for (i = 0; i < num_rxqs; i++) { - struct hinic_rq *rq = hinic_hwdev_get_rq(nic_dev->hwdev, i); + int err; - err = hinic_init_rxq(&nic_dev->rxqs[i], rq, netdev); - if (err) { - netif_err(nic_dev, drv, netdev, - "Failed to init rxq\n"); - goto err_init_rxq; - } + /* rx rss init */ + err = hinic_rx_configure(netdev); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to configure rx\n"); + return err; } return 0; +} -err_init_rxq: - for (j = 0; j < i; j++) - hinic_clean_rxq(&nic_dev->rxqs[j]); - - devm_kfree(&netdev->dev, nic_dev->rxqs); - return err; +static void hinic_remove_configure(struct hinic_nic_dev *nic_dev) +{ + hinic_rx_remove_configure(nic_dev->netdev); } -/** - * free_txqs - Free the Logical Rx Queues of specific NIC device - * @nic_dev: the specific NIC device - **/ -static void free_rxqs(struct hinic_dev *nic_dev) +static void hinic_setup_dcb_qps(struct hinic_nic_dev *nic_dev, u16 max_qps) { - int i, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev); struct net_device *netdev = nic_dev->netdev; + u16 num_rss; + u8 num_tcs; + u8 i; - if (!nic_dev->rxqs) + if (!test_bit(HINIC_DCB_ENABLE, &nic_dev->flags) || + !test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) return; - for (i = 0; i < num_rxqs; i++) - hinic_clean_rxq(&nic_dev->rxqs[i]); - - devm_kfree(&netdev->dev, nic_dev->rxqs); - nic_dev->rxqs = NULL; + num_tcs = (u8)netdev_get_num_tc(netdev); + /* For now, we don't support to change num_tcs */ + if (num_tcs != nic_dev->max_cos || max_qps < num_tcs) { + nicif_err(nic_dev, drv, netdev, "Invalid num_tcs: %d or num_qps: %d, disable DCB\n", + num_tcs, max_qps); + netdev_reset_tc(netdev); + clear_bit(HINIC_DCB_ENABLE, &nic_dev->flags); + /* if we can't enable rss or get enough num_qps, + * need to sync default configure to hw + */ + hinic_configure_dcb(netdev); + } else { + /* We bind sq with cos but not tc */ + num_rss = (u16)(max_qps / nic_dev->max_cos); + num_rss = min_t(u16, num_rss, nic_dev->rss_limit); + for (i = 0; i < nic_dev->max_cos; i++) + netdev_set_tc_queue(netdev, i, num_rss, + (u16)(num_rss * i)); + + nic_dev->num_rss = num_rss; + nic_dev->num_qps = (u16)(num_tcs * num_rss); + } } -static int hinic_open(struct net_device *netdev) +/* determin num_qps from rss_tmpl_id/irq_num/dcb_en */ +static int hinic_setup_num_qps(struct hinic_nic_dev *nic_dev) { - struct hinic_dev *nic_dev = netdev_priv(netdev); - enum hinic_port_link_state link_state; - int err, ret, num_qps; + struct net_device *netdev = nic_dev->netdev; + u32 irq_size; + u16 resp_irq_num, i; + int err; - if (!(nic_dev->flags & HINIC_INTF_UP)) { - err = hinic_hwdev_ifup(nic_dev->hwdev); - if (err) { - netif_err(nic_dev, drv, netdev, - "Failed - HW interface up\n"); - return err; - } + if (test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) { + nic_dev->num_rss = nic_dev->rss_limit; + nic_dev->num_qps = nic_dev->rss_limit; + } else { + nic_dev->num_rss = 0; + nic_dev->num_qps = 1; } - err = create_txqs(nic_dev); - if (err) { - netif_err(nic_dev, drv, netdev, - "Failed to create Tx queues\n"); - goto err_create_txqs; - } + hinic_setup_dcb_qps(nic_dev, nic_dev->max_qps); - err = create_rxqs(nic_dev); - if (err) { - netif_err(nic_dev, drv, netdev, - "Failed to create Rx queues\n"); - goto err_create_rxqs; + irq_size = sizeof(*nic_dev->qps_irq_info) * nic_dev->num_qps; + if (!irq_size) { + nicif_err(nic_dev, drv, netdev, "Cannot allocate zero size entries\n"); + return -EINVAL; + } + nic_dev->qps_irq_info = kzalloc(irq_size, GFP_KERNEL); + if (!nic_dev->qps_irq_info) { + nicif_err(nic_dev, drv, netdev, "Failed to alloc qps_irq_info\n"); + return -ENOMEM; } - num_qps = hinic_hwdev_num_qps(nic_dev->hwdev); - netif_set_real_num_tx_queues(netdev, num_qps); - netif_set_real_num_rx_queues(netdev, num_qps); - - err = hinic_port_set_state(nic_dev, HINIC_PORT_ENABLE); + err = hinic_alloc_irqs(nic_dev->hwdev, SERVICE_T_NIC, nic_dev->num_qps, + nic_dev->qps_irq_info, &resp_irq_num); if (err) { - netif_err(nic_dev, drv, netdev, - "Failed to set port state\n"); - goto err_port_state; + nicif_err(nic_dev, drv, netdev, "Failed to alloc irqs\n"); + kfree(nic_dev->qps_irq_info); + return err; } - err = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_ENABLE); - if (err) { - netif_err(nic_dev, drv, netdev, - "Failed to set func port state\n"); - goto err_func_port_state; + /* available irq number is less than rq numbers, adjust rq numbers */ + if (resp_irq_num < nic_dev->num_qps) { + nic_dev->num_qps = resp_irq_num; + nic_dev->num_rss = nic_dev->num_qps; + hinic_setup_dcb_qps(nic_dev, nic_dev->num_qps); + nicif_warn(nic_dev, drv, netdev, + "Can not get enough irqs, adjust num_qps to %d\n", + nic_dev->num_qps); + /* after adjust num_qps, free the remaind irq */ + for (i = nic_dev->num_qps; i < resp_irq_num; i++) + hinic_free_irq(nic_dev->hwdev, SERVICE_T_NIC, + nic_dev->qps_irq_info[i].irq_id); } - /* Wait up to 3 sec between port enable to link state */ - msleep(3000); + nicif_info(nic_dev, drv, netdev, "Finally num_qps: %d, num_rss: %d\n", + nic_dev->num_qps, nic_dev->num_rss); - down(&nic_dev->mgmt_lock); + return 0; +} - err = hinic_port_link_state(nic_dev, &link_state); - if (err) { - netif_err(nic_dev, drv, netdev, "Failed to get link state\n"); - goto err_port_link; - } +static void hinic_destroy_num_qps(struct hinic_nic_dev *nic_dev) +{ + u16 i; - if (link_state == HINIC_LINK_STATE_UP) - nic_dev->flags |= HINIC_LINK_UP; + for (i = 0; i < nic_dev->num_qps; i++) + hinic_free_irq(nic_dev->hwdev, SERVICE_T_NIC, + nic_dev->qps_irq_info[i].irq_id); - nic_dev->flags |= HINIC_INTF_UP; + kfree(nic_dev->qps_irq_info); +} - if ((nic_dev->flags & (HINIC_LINK_UP | HINIC_INTF_UP)) == - (HINIC_LINK_UP | HINIC_INTF_UP)) { - netif_info(nic_dev, drv, netdev, "link + intf UP\n"); - netif_carrier_on(netdev); - netif_tx_wake_all_queues(netdev); - } +static int hinic_poll(struct napi_struct *napi, int budget) +{ + int tx_pkts, rx_pkts; + struct hinic_irq *irq_cfg = container_of(napi, struct hinic_irq, napi); + struct hinic_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev); - up(&nic_dev->mgmt_lock); + rx_pkts = hinic_rx_poll(irq_cfg->rxq, budget); - netif_info(nic_dev, drv, netdev, "HINIC_INTF is UP\n"); - return 0; + tx_pkts = hinic_tx_poll(irq_cfg->txq, budget); + + if (tx_pkts >= budget || rx_pkts >= budget) + return budget; + + set_bit(HINIC_RESEND_ON, &irq_cfg->intr_flag); + rx_pkts += hinic_rx_poll(irq_cfg->rxq, budget - rx_pkts); + tx_pkts += hinic_tx_poll(irq_cfg->txq, budget - tx_pkts); + if (rx_pkts >= budget || tx_pkts >= budget) { + clear_bit(HINIC_RESEND_ON, &irq_cfg->intr_flag); + return budget; + } -err_port_link: - up(&nic_dev->mgmt_lock); - ret = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE); - if (ret) - netif_warn(nic_dev, drv, netdev, - "Failed to revert func port state\n"); + napi_complete(napi); + + if (!test_and_set_bit(HINIC_INTR_ON, &irq_cfg->intr_flag)) { + if (!HINIC_FUNC_IS_VF(nic_dev->hwdev)) + hinic_set_msix_state(nic_dev->hwdev, + irq_cfg->msix_entry_idx, + HINIC_MSIX_ENABLE); + else if (!nic_dev->in_vm && + (hinic_get_func_mode(nic_dev->hwdev) == + FUNC_MOD_NORMAL_HOST)) + enable_irq(irq_cfg->irq_id); + } -err_func_port_state: - ret = hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE); - if (ret) - netif_warn(nic_dev, drv, netdev, - "Failed to revert port state\n"); + return max(tx_pkts, rx_pkts); +} -err_port_state: - free_rxqs(nic_dev); +static void qp_add_napi(struct hinic_irq *irq_cfg) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev); -err_create_rxqs: - free_txqs(nic_dev); + netif_napi_add(nic_dev->netdev, &irq_cfg->napi, + hinic_poll, nic_dev->poll_weight); + napi_enable(&irq_cfg->napi); +} -err_create_txqs: - if (!(nic_dev->flags & HINIC_INTF_UP)) - hinic_hwdev_ifdown(nic_dev->hwdev); - return err; +static void qp_del_napi(struct hinic_irq *irq_cfg) +{ + napi_disable(&irq_cfg->napi); + netif_napi_del(&irq_cfg->napi); } -static int hinic_close(struct net_device *netdev) +static irqreturn_t qp_irq(int irq, void *data) { - struct hinic_dev *nic_dev = netdev_priv(netdev); - unsigned int flags; - int err; + struct hinic_irq *irq_cfg = (struct hinic_irq *)data; + struct hinic_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev); + u16 msix_entry_idx = irq_cfg->msix_entry_idx; + + if (napi_schedule_prep(&irq_cfg->napi)) { + if (l2nic_interrupt_switch) { + /* Disable the interrupt until napi will be completed */ + if (!HINIC_FUNC_IS_VF(nic_dev->hwdev)) { + hinic_set_msix_state(nic_dev->hwdev, + msix_entry_idx, + HINIC_MSIX_DISABLE); + } else if (!nic_dev->in_vm && + (hinic_get_func_mode(nic_dev->hwdev) == + FUNC_MOD_NORMAL_HOST)) { + disable_irq_nosync(irq_cfg->irq_id); + } + + clear_bit(HINIC_INTR_ON, &irq_cfg->intr_flag); + } - down(&nic_dev->mgmt_lock); + hinic_misx_intr_clear_resend_bit(nic_dev->hwdev, + msix_entry_idx, 1); - flags = nic_dev->flags; - nic_dev->flags &= ~HINIC_INTF_UP; + clear_bit(HINIC_RESEND_ON, &irq_cfg->intr_flag); - netif_carrier_off(netdev); - netif_tx_disable(netdev); + __napi_schedule(&irq_cfg->napi); + } else if (!test_bit(HINIC_RESEND_ON, &irq_cfg->intr_flag)) { + hinic_misx_intr_clear_resend_bit(nic_dev->hwdev, msix_entry_idx, + 1); + } - update_nic_stats(nic_dev); + return IRQ_HANDLED; +} - up(&nic_dev->mgmt_lock); +static int hinic_request_irq(struct hinic_irq *irq_cfg, u16 q_id) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev); + struct nic_interrupt_info info = {0}; + int err; - err = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE); + qp_add_napi(irq_cfg); + + info.msix_index = irq_cfg->msix_entry_idx; + info.lli_set = 0; + info.interrupt_coalesc_set = 1; + info.pending_limt = nic_dev->intr_coalesce[q_id].pending_limt; + info.coalesc_timer_cfg = + nic_dev->intr_coalesce[q_id].coalesce_timer_cfg; + info.resend_timer_cfg = nic_dev->intr_coalesce[q_id].resend_timer_cfg; + nic_dev->rxqs[q_id].last_coalesc_timer_cfg = + nic_dev->intr_coalesce[q_id].coalesce_timer_cfg; + nic_dev->rxqs[q_id].last_pending_limt = + nic_dev->intr_coalesce[q_id].pending_limt; + err = hinic_set_interrupt_cfg(nic_dev->hwdev, info); if (err) { - netif_err(nic_dev, drv, netdev, - "Failed to set func port state\n"); - nic_dev->flags |= (flags & HINIC_INTF_UP); + nicif_err(nic_dev, drv, irq_cfg->netdev, + "Failed to set RX interrupt coalescing attribute\n"); + qp_del_napi(irq_cfg); return err; } - err = hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE); + err = request_irq(irq_cfg->irq_id, &qp_irq, 0, + irq_cfg->irq_name, irq_cfg); if (err) { - netif_err(nic_dev, drv, netdev, "Failed to set port state\n"); - nic_dev->flags |= (flags & HINIC_INTF_UP); + nicif_err(nic_dev, drv, irq_cfg->netdev, "Failed to request Rx irq\n"); + qp_del_napi(irq_cfg); return err; } - free_rxqs(nic_dev); - free_txqs(nic_dev); - - if (flags & HINIC_INTF_UP) - hinic_hwdev_ifdown(nic_dev->hwdev); + /* assign the mask for this irq */ + irq_set_affinity_hint(irq_cfg->irq_id, &irq_cfg->affinity_mask); - netif_info(nic_dev, drv, netdev, "HINIC_INTF is DOWN\n"); return 0; } -static int hinic_change_mtu(struct net_device *netdev, int new_mtu) +static int set_interrupt_moder(struct hinic_nic_dev *nic_dev, u16 q_id, + u8 coalesc_timer_cfg, u8 pending_limt) { - struct hinic_dev *nic_dev = netdev_priv(netdev); + struct nic_interrupt_info interrupt_info = {0}; int err; - netif_info(nic_dev, drv, netdev, "set_mtu = %d\n", new_mtu); - - err = hinic_port_set_mtu(nic_dev, new_mtu); - if (err) - netif_err(nic_dev, drv, netdev, "Failed to set port mtu\n"); - else - netdev->mtu = new_mtu; + if (coalesc_timer_cfg == nic_dev->rxqs[q_id].last_coalesc_timer_cfg && + pending_limt == nic_dev->rxqs[q_id].last_pending_limt) + return 0; + + /* netdev not running or qp not in using, + * don't need to set coalesce to hw + */ + if (!test_bit(HINIC_INTF_UP, &nic_dev->flags) || + q_id >= nic_dev->num_qps) + return 0; + + interrupt_info.lli_set = 0; + interrupt_info.interrupt_coalesc_set = 1; + interrupt_info.coalesc_timer_cfg = coalesc_timer_cfg; + interrupt_info.pending_limt = pending_limt; + interrupt_info.msix_index = nic_dev->irq_cfg[q_id].msix_entry_idx; + interrupt_info.resend_timer_cfg = + nic_dev->intr_coalesce[q_id].resend_timer_cfg; + + err = hinic_set_interrupt_cfg(nic_dev->hwdev, interrupt_info); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed modifying moderation for Queue: %d\n", q_id); + } else { + nic_dev->rxqs[q_id].last_coalesc_timer_cfg = coalesc_timer_cfg; + nic_dev->rxqs[q_id].last_pending_limt = pending_limt; + } return err; } -/** - * change_mac_addr - change the main mac address of network device - * @netdev: network device - * @addr: mac address to set - * - * Return 0 - Success, negative - Failure - **/ -static int change_mac_addr(struct net_device *netdev, const u8 *addr) +static void __calc_coal_para(struct hinic_nic_dev *nic_dev, + struct hinic_intr_coal_info *q_coal, u64 rate, + u8 *coalesc_timer_cfg, u8 *pending_limt) { - struct hinic_dev *nic_dev = netdev_priv(netdev); - u16 vid = 0; - int err; + if (rate < q_coal->pkt_rate_low) { + *coalesc_timer_cfg = q_coal->rx_usecs_low; + *pending_limt = q_coal->rx_pending_limt_low; + } else if (rate > q_coal->pkt_rate_high) { + *coalesc_timer_cfg = q_coal->rx_usecs_high; + *pending_limt = q_coal->rx_pending_limt_high; + } else { + *coalesc_timer_cfg = + (u8)((rate - q_coal->pkt_rate_low) * + (q_coal->rx_usecs_high - + q_coal->rx_usecs_low) / + (q_coal->pkt_rate_high - + q_coal->pkt_rate_low) + + q_coal->rx_usecs_low); + if (nic_dev->in_vm) + *pending_limt = (u8)((rate - q_coal->pkt_rate_low) * + (q_coal->rx_pending_limt_high - + q_coal->rx_pending_limt_low) / + (q_coal->pkt_rate_high - + q_coal->pkt_rate_low) + + q_coal->rx_pending_limt_low); + else + *pending_limt = q_coal->rx_pending_limt_low; + } +} - if (!is_valid_ether_addr(addr)) - return -EADDRNOTAVAIL; +static void update_queue_coal(struct hinic_nic_dev *nic_dev, u16 qid, + u64 rate, u64 avg_pkt_size, u64 tx_rate) +{ + struct hinic_intr_coal_info *q_coal; + u8 coalesc_timer_cfg, pending_limt; - netif_info(nic_dev, drv, netdev, "change mac addr = %02x %02x %02x %02x %02x %02x\n", - addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + q_coal = &nic_dev->intr_coalesce[qid]; - down(&nic_dev->mgmt_lock); + if ((rate > HINIC_RX_RATE_THRESH && + avg_pkt_size > HINIC_AVG_PKT_SMALL) || + (nic_dev->in_vm && rate > HINIC_RX_RATE_THRESH)) { + __calc_coal_para(nic_dev, q_coal, rate, + &coalesc_timer_cfg, &pending_limt); + } else { + coalesc_timer_cfg = HINIC_LOWEST_LATENCY; + pending_limt = q_coal->rx_pending_limt_low; + } - do { - err = hinic_port_del_mac(nic_dev, netdev->dev_addr, vid); - if (err) { - netif_err(nic_dev, drv, netdev, - "Failed to delete mac\n"); - break; - } + set_interrupt_moder(nic_dev, qid, coalesc_timer_cfg, + pending_limt); +} - err = hinic_port_add_mac(nic_dev, addr, vid); - if (err) { - netif_err(nic_dev, drv, netdev, "Failed to add mac\n"); - break; - } +#define SDI_VM_PPS_3W 30000 +#define SDI_VM_PPS_5W 50000 - vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1); - } while (vid != VLAN_N_VID); +#define SDI_VM_BPS_100MB 12500000 +#define SDI_VM_BPS_1GB 125000000 - up(&nic_dev->mgmt_lock); - return err; +static void update_queue_coal_sdi_vm(struct hinic_nic_dev *nic_dev, + u16 qid, u64 rx_pps, u64 rx_bps, + u64 tx_pps, u64 tx_bps) +{ + struct hinic_intr_coal_info *q_coal = NULL; + u8 coalesc_timer_cfg, pending_limt; + + q_coal = &nic_dev->intr_coalesce[qid]; + if (qp_coalesc_use_drv_params_switch == 0) { + if (rx_pps < SDI_VM_PPS_3W && + tx_pps < SDI_VM_PPS_3W && + rx_bps < SDI_VM_BPS_100MB && + tx_bps < SDI_VM_BPS_100MB) { + set_interrupt_moder(nic_dev, qid, 0, 0); + } else if (tx_pps > SDI_VM_PPS_3W && + tx_pps < SDI_VM_PPS_5W && + tx_bps > SDI_VM_BPS_1GB) { + set_interrupt_moder(nic_dev, qid, 7, 7); + } else { + __calc_coal_para(nic_dev, q_coal, rx_pps, + &coalesc_timer_cfg, + &pending_limt); + set_interrupt_moder(nic_dev, qid, + coalesc_timer_cfg, + pending_limt); + } + } else { + __calc_coal_para(nic_dev, q_coal, rx_pps, + &coalesc_timer_cfg, + &pending_limt); + set_interrupt_moder(nic_dev, qid, coalesc_timer_cfg, + pending_limt); + } } -static int hinic_set_mac_addr(struct net_device *netdev, void *addr) +static void hinic_auto_moderation_work(struct work_struct *work) { - unsigned char new_mac[ETH_ALEN]; - struct sockaddr *saddr = addr; - int err; + struct delayed_work *delay = to_delayed_work(work); + struct hinic_nic_dev *nic_dev = container_of(delay, + struct hinic_nic_dev, + moderation_task); + unsigned long period = (unsigned long)(jiffies - + nic_dev->last_moder_jiffies); + u64 rx_packets, rx_bytes, rx_pkt_diff, rate, avg_pkt_size; + u64 tx_packets, tx_bytes, tx_pkt_diff, tx_rate, rx_bps, tx_bps; + u16 qid; + + if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) + return; - memcpy(new_mac, saddr->sa_data, ETH_ALEN); + queue_delayed_work(nic_dev->workq, &nic_dev->moderation_task, + HINIC_MODERATONE_DELAY); - err = change_mac_addr(netdev, new_mac); - if (!err) - memcpy(netdev->dev_addr, new_mac, ETH_ALEN); + if (!nic_dev->adaptive_rx_coal || !period) + return; - return err; + for (qid = 0; qid < nic_dev->num_qps; qid++) { + rx_packets = nic_dev->rxqs[qid].rxq_stats.packets; + rx_bytes = nic_dev->rxqs[qid].rxq_stats.bytes; + tx_packets = nic_dev->txqs[qid].txq_stats.packets; + tx_bytes = nic_dev->txqs[qid].txq_stats.bytes; + + rx_pkt_diff = + rx_packets - nic_dev->rxqs[qid].last_moder_packets; + avg_pkt_size = rx_pkt_diff ? + ((unsigned long)(rx_bytes - + nic_dev->rxqs[qid].last_moder_bytes)) / + rx_pkt_diff : 0; + + rate = rx_pkt_diff * HZ / period; + tx_pkt_diff = + tx_packets - nic_dev->txqs[qid].last_moder_packets; + tx_rate = tx_pkt_diff * HZ / period; + + rx_bps = (unsigned long)(rx_bytes - + nic_dev->rxqs[qid].last_moder_bytes) + * HZ / period; + tx_bps = (unsigned long)(tx_bytes - + nic_dev->txqs[qid].last_moder_bytes) + * HZ / period; + if ((nic_dev->is_vm_slave && nic_dev->in_vm) || + nic_dev->is_bm_slave) { + update_queue_coal_sdi_vm(nic_dev, qid, rate, rx_bps, + tx_rate, tx_bps); + } else { + update_queue_coal(nic_dev, qid, rate, avg_pkt_size, + tx_rate); + } + + nic_dev->rxqs[qid].last_moder_packets = rx_packets; + nic_dev->rxqs[qid].last_moder_bytes = rx_bytes; + nic_dev->txqs[qid].last_moder_packets = tx_packets; + nic_dev->txqs[qid].last_moder_bytes = tx_bytes; + } + + nic_dev->last_moder_jiffies = jiffies; } -/** - * add_mac_addr - add mac address to network device - * @netdev: network device - * @addr: mac address to add - * - * Return 0 - Success, negative - Failure - **/ -static int add_mac_addr(struct net_device *netdev, const u8 *addr) +static void hinic_release_irq(struct hinic_irq *irq_cfg) { - struct hinic_dev *nic_dev = netdev_priv(netdev); - u16 vid = 0; + irq_set_affinity_hint(irq_cfg->irq_id, NULL); + synchronize_irq(irq_cfg->irq_id); + free_irq(irq_cfg->irq_id, irq_cfg); + qp_del_napi(irq_cfg); +} + +static int hinic_qps_irq_init(struct hinic_nic_dev *nic_dev) +{ + struct pci_dev *pdev = nic_dev->pdev; + struct irq_info *qp_irq_info; + struct hinic_irq *irq_cfg; + u16 q_id, i; + u32 local_cpu; int err; - if (!is_valid_ether_addr(addr)) - return -EADDRNOTAVAIL; + nic_dev->irq_cfg = kcalloc(nic_dev->num_qps, sizeof(*nic_dev->irq_cfg), + GFP_KERNEL); + if (!nic_dev->irq_cfg) { + nic_err(&pdev->dev, "Failed to alloc irq cfg\n"); + return -ENOMEM; + } + + for (q_id = 0; q_id < nic_dev->num_qps; q_id++) { + qp_irq_info = &nic_dev->qps_irq_info[q_id]; + irq_cfg = &nic_dev->irq_cfg[q_id]; + + irq_cfg->irq_id = qp_irq_info->irq_id; + irq_cfg->msix_entry_idx = qp_irq_info->msix_entry_idx; + irq_cfg->netdev = nic_dev->netdev; + irq_cfg->txq = &nic_dev->txqs[q_id]; + irq_cfg->rxq = &nic_dev->rxqs[q_id]; + nic_dev->rxqs[q_id].irq_cfg = irq_cfg; + + if (nic_dev->force_affinity) { + irq_cfg->affinity_mask = nic_dev->affinity_mask; + } else { + local_cpu = + cpumask_local_spread(q_id, + dev_to_node(&pdev->dev)); + cpumask_set_cpu(local_cpu, &irq_cfg->affinity_mask); + } - netif_info(nic_dev, drv, netdev, "set mac addr = %02x %02x %02x %02x %02x %02x\n", - addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + err = snprintf(irq_cfg->irq_name, sizeof(irq_cfg->irq_name), + "%s_qp%d", nic_dev->netdev->name, q_id); + if (err <= 0 || err >= (int)sizeof(irq_cfg->irq_name)) { + nic_err(&pdev->dev, + "Failed snprintf irq_name, function return(%d) and dest_len(%d)\n", + err, (int)sizeof(irq_cfg->irq_name)); + goto req_tx_irq_err; + } - down(&nic_dev->mgmt_lock); + set_bit(HINIC_INTR_ON, &irq_cfg->intr_flag); - do { - err = hinic_port_add_mac(nic_dev, addr, vid); + err = hinic_request_irq(irq_cfg, q_id); if (err) { - netif_err(nic_dev, drv, netdev, "Failed to add mac\n"); - break; + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to request Rx irq\n"); + goto req_tx_irq_err; } - vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1); - } while (vid != VLAN_N_VID); + hinic_set_msix_state(nic_dev->hwdev, + irq_cfg->msix_entry_idx, + HINIC_MSIX_ENABLE); + } - up(&nic_dev->mgmt_lock); - return err; -} + INIT_DELAYED_WORK(&nic_dev->moderation_task, + hinic_auto_moderation_work); -/** - * remove_mac_addr - remove mac address from network device - * @netdev: network device - * @addr: mac address to remove - * - * Return 0 - Success, negative - Failure - **/ -static int remove_mac_addr(struct net_device *netdev, const u8 *addr) -{ - struct hinic_dev *nic_dev = netdev_priv(netdev); - u16 vid = 0; - int err; + return 0; - if (!is_valid_ether_addr(addr)) - return -EADDRNOTAVAIL; +req_tx_irq_err: + for (i = 0; i < q_id; i++) { + hinic_set_msix_state(nic_dev->hwdev, + nic_dev->irq_cfg[i].msix_entry_idx, + HINIC_MSIX_DISABLE); + hinic_release_irq(&nic_dev->irq_cfg[i]); + } - netif_info(nic_dev, drv, netdev, "remove mac addr = %02x %02x %02x %02x %02x %02x\n", - addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + kfree(nic_dev->irq_cfg); - down(&nic_dev->mgmt_lock); + return err; +} - do { - err = hinic_port_del_mac(nic_dev, addr, vid); - if (err) { - netif_err(nic_dev, drv, netdev, - "Failed to delete mac\n"); - break; - } +static void hinic_qps_irq_deinit(struct hinic_nic_dev *nic_dev) +{ + u16 q_id; - vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1); - } while (vid != VLAN_N_VID); + for (q_id = 0; q_id < nic_dev->num_qps; q_id++) { + hinic_set_msix_state(nic_dev->hwdev, + nic_dev->irq_cfg[q_id].msix_entry_idx, + HINIC_MSIX_DISABLE); + hinic_release_irq(&nic_dev->irq_cfg[q_id]); + } - up(&nic_dev->mgmt_lock); - return err; + kfree(nic_dev->irq_cfg); } -static int hinic_vlan_rx_add_vid(struct net_device *netdev, - __always_unused __be16 proto, u16 vid) +int hinic_force_port_disable(struct hinic_nic_dev *nic_dev) { - struct hinic_dev *nic_dev = netdev_priv(netdev); - int ret, err; + int err; - netif_info(nic_dev, drv, netdev, "add vid = %d\n", vid); + down(&nic_dev->port_state_sem); - down(&nic_dev->mgmt_lock); + err = hinic_set_port_enable(nic_dev->hwdev, false); + if (!err) + nic_dev->force_port_disable = true; - err = hinic_port_add_vlan(nic_dev, vid); - if (err) { - netif_err(nic_dev, drv, netdev, "Failed to add vlan\n"); - goto err_vlan_add; - } + up(&nic_dev->port_state_sem); - err = hinic_port_add_mac(nic_dev, netdev->dev_addr, vid); - if (err) { - netif_err(nic_dev, drv, netdev, "Failed to set mac\n"); - goto err_add_mac; - } + return err; +} + +int hinic_force_set_port_state(struct hinic_nic_dev *nic_dev, bool enable) +{ + int err = 0; - bitmap_set(nic_dev->vlan_bitmap, vid, 1); + down(&nic_dev->port_state_sem); - up(&nic_dev->mgmt_lock); - return 0; + nic_dev->force_port_disable = false; + err = hinic_set_port_enable(nic_dev->hwdev, enable); -err_add_mac: - ret = hinic_port_del_vlan(nic_dev, vid); - if (ret) - netif_err(nic_dev, drv, netdev, - "Failed to revert by removing vlan\n"); + up(&nic_dev->port_state_sem); -err_vlan_add: - up(&nic_dev->mgmt_lock); return err; } -static int hinic_vlan_rx_kill_vid(struct net_device *netdev, - __always_unused __be16 proto, u16 vid) +int hinic_maybe_set_port_state(struct hinic_nic_dev *nic_dev, bool enable) { - struct hinic_dev *nic_dev = netdev_priv(netdev); int err; - netif_info(nic_dev, drv, netdev, "remove vid = %d\n", vid); + down(&nic_dev->port_state_sem); - down(&nic_dev->mgmt_lock); - - err = hinic_port_del_vlan(nic_dev, vid); - if (err) { - netif_err(nic_dev, drv, netdev, "Failed to delete vlan\n"); - goto err_del_vlan; + /* Do nothing when force disable + * Port will disable when call force port disable + * and should not enable port when in force mode + */ + if (nic_dev->force_port_disable) { + up(&nic_dev->port_state_sem); + return 0; } - bitmap_clear(nic_dev->vlan_bitmap, vid, 1); + err = hinic_set_port_enable(nic_dev->hwdev, enable); - up(&nic_dev->mgmt_lock); - return 0; + up(&nic_dev->port_state_sem); -err_del_vlan: - up(&nic_dev->mgmt_lock); return err; } -static void set_rx_mode(struct work_struct *work) +static void hinic_print_link_message(struct hinic_nic_dev *nic_dev, + u8 link_status) { - struct hinic_rx_mode_work *rx_mode_work = work_to_rx_mode_work(work); - struct hinic_dev *nic_dev = rx_mode_work_to_nic_dev(rx_mode_work); - - netif_info(nic_dev, drv, nic_dev->netdev, "set rx mode work\n"); + if (nic_dev->link_status == link_status) + return; - hinic_port_set_rx_mode(nic_dev, rx_mode_work->rx_mode); + nic_dev->link_status = link_status; - __dev_uc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr); - __dev_mc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr); + nicif_info(nic_dev, link, nic_dev->netdev, "Link is %s\n", + (link_status ? "up" : "down")); } -static void hinic_set_rx_mode(struct net_device *netdev) +int hinic_open(struct net_device *netdev) { - struct hinic_dev *nic_dev = netdev_priv(netdev); - struct hinic_rx_mode_work *rx_mode_work; - u32 rx_mode; - - rx_mode_work = &nic_dev->rx_mode_work; + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u8 link_status = 0; + int err; - rx_mode = HINIC_RX_MODE_UC | - HINIC_RX_MODE_MC | - HINIC_RX_MODE_BC; + if (test_bit(HINIC_INTF_UP, &nic_dev->flags)) { + nicif_info(nic_dev, drv, netdev, "Netdev already open, do nothing\n"); + return 0; + } - if (netdev->flags & IFF_PROMISC) - rx_mode |= HINIC_RX_MODE_PROMISC; - else if (netdev->flags & IFF_ALLMULTI) - rx_mode |= HINIC_RX_MODE_MC_ALL; + err = hinic_setup_num_qps(nic_dev); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to setup num_qps\n"); + return err; + } - rx_mode_work->rx_mode = rx_mode; + err = hinic_create_qps(nic_dev->hwdev, nic_dev->num_qps, + nic_dev->sq_depth, nic_dev->rq_depth, + nic_dev->qps_irq_info, HINIC_MAX_SQ_BUFDESCS); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to create queue pairs\n"); + goto create_qps_err; + } - queue_work(nic_dev->workq, &rx_mode_work->work); -} + err = hinic_setup_qps_resources(nic_dev); + if (err) + goto setup_qps_resources_err; -static void hinic_tx_timeout(struct net_device *netdev) -{ - struct hinic_dev *nic_dev = netdev_priv(netdev); + err = hinic_init_qp_ctxts(nic_dev->hwdev); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to init qp ctxts\n"); + goto init_qp_ctxts_err; + } - netif_err(nic_dev, drv, netdev, "Tx timeout\n"); -} + err = hinic_set_port_mtu(nic_dev->hwdev, netdev->mtu); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to set mtu\n"); + goto mtu_err; + } -static void hinic_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) -{ - struct hinic_dev *nic_dev = netdev_priv(netdev); - struct hinic_rxq_stats *nic_rx_stats; - struct hinic_txq_stats *nic_tx_stats; + err = hinic_configure(nic_dev); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to configure txrx\n"); + goto cfg_err; + } - nic_rx_stats = &nic_dev->rx_stats; - nic_tx_stats = &nic_dev->tx_stats; + err = hinic_qps_irq_init(nic_dev); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to qps irq init\n"); + goto qps_irqs_init_err; + } - down(&nic_dev->mgmt_lock); + err = hinic_set_vport_enable(nic_dev->hwdev, true); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to enable vport\n"); + goto vport_enable_err; + } - if (nic_dev->flags & HINIC_INTF_UP) - update_nic_stats(nic_dev); + err = hinic_maybe_set_port_state(nic_dev, true); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to enable port\n"); + goto port_enable_err; + } - up(&nic_dev->mgmt_lock); + set_bit(HINIC_INTF_UP, &nic_dev->flags); - stats->rx_bytes = nic_rx_stats->bytes; - stats->rx_packets = nic_rx_stats->pkts; + netif_set_real_num_tx_queues(netdev, nic_dev->num_qps); + netif_set_real_num_rx_queues(netdev, nic_dev->num_qps); + netif_tx_wake_all_queues(netdev); - stats->tx_bytes = nic_tx_stats->bytes; - stats->tx_packets = nic_tx_stats->pkts; - stats->tx_errors = nic_tx_stats->tx_dropped; -} + queue_delayed_work(nic_dev->workq, &nic_dev->moderation_task, + HINIC_MODERATONE_DELAY); + + err = hinic_get_link_state(nic_dev->hwdev, &link_status); + if (!err && link_status) { + hinic_update_pf_bw(nic_dev->hwdev); + netif_carrier_on(netdev); + } + + hinic_print_link_message(nic_dev, link_status); + + if (!HINIC_FUNC_IS_VF(nic_dev->hwdev)) + hinic_notify_all_vfs_link_changed(nic_dev->hwdev, link_status); + + nicif_info(nic_dev, drv, nic_dev->netdev, "Netdev is up\n"); + + return 0; + +port_enable_err: + hinic_set_vport_enable(nic_dev->hwdev, false); + +vport_enable_err: + hinic_flush_sq_res(nic_dev->hwdev); + /* After set vport disable 100ms, no packets will be send to host */ + msleep(100); + hinic_qps_irq_deinit(nic_dev); + +qps_irqs_init_err: + hinic_remove_configure(nic_dev); + +cfg_err: +mtu_err: + hinic_free_qp_ctxts(nic_dev->hwdev); + +init_qp_ctxts_err: + hinic_free_all_rx_resources(netdev); + hinic_free_all_tx_resources(netdev); + +setup_qps_resources_err: + hinic_free_qps(nic_dev->hwdev); + +create_qps_err: + hinic_destroy_num_qps(nic_dev); + + return err; +} + +int hinic_close(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + if (!test_and_clear_bit(HINIC_INTF_UP, &nic_dev->flags)) { + nicif_info(nic_dev, drv, netdev, "Netdev already close, do nothing\n"); + return 0; + } + + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + cancel_delayed_work_sync(&nic_dev->moderation_task); + + if (hinic_get_chip_present_flag(nic_dev->hwdev)) { + if (!HINIC_FUNC_IS_VF(nic_dev->hwdev)) + hinic_notify_all_vfs_link_changed(nic_dev->hwdev, 0); + + hinic_maybe_set_port_state(nic_dev, false); + + hinic_set_vport_enable(nic_dev->hwdev, false); + + hinic_flush_txqs(netdev); + hinic_flush_sq_res(nic_dev->hwdev); + /* After set vport disable 100ms, + * no packets will be send to host + */ + msleep(100); + } + + hinic_qps_irq_deinit(nic_dev); + hinic_remove_configure(nic_dev); + + if (hinic_get_chip_present_flag(nic_dev->hwdev)) + hinic_free_qp_ctxts(nic_dev->hwdev); + + mutex_lock(&nic_dev->nic_mutex); + hinic_free_all_rx_resources(netdev); + + hinic_free_all_tx_resources(netdev); + + hinic_free_qps(nic_dev->hwdev); + + hinic_destroy_num_qps(nic_dev); + mutex_unlock(&nic_dev->nic_mutex); + + nicif_info(nic_dev, drv, nic_dev->netdev, "Netdev is down\n"); + + return 0; +} + +static inline u32 calc_toeplitz_rss(u32 sip, u32 dip, u32 sport, u32 dport, + const u32 *rss_key) +{ + u32 i, port, rss = 0; + + port = (sport << 16) | dport; + + /* The key - SIP, DIP, SPORT, DPORT */ + for (i = 0; i < 32; i++) + if (sip & ((u32)1 << (u32)(31 - i))) + rss ^= (rss_key[0] << i) | + (u32)((u64)rss_key[1] >> (32 - i)); + + for (i = 0; i < 32; i++) + if (dip & ((u32)1 << (u32)(31 - i))) + rss ^= (rss_key[1] << i) | + (u32)((u64)rss_key[2] >> (32 - i)); + + for (i = 0; i < 32; i++) + if (port & ((u32)1 << (u32)(31 - i))) + rss ^= (rss_key[2] << i) | + (u32)((u64)rss_key[3] >> (32 - i)); + + return rss; +} + +static u16 select_queue_by_toeplitz(struct net_device *dev, + struct sk_buff *skb, + unsigned int num_tx_queues) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(dev); + struct tcphdr *tcphdr; + struct iphdr *iphdr; + u32 hash = 0; + + if (skb_rx_queue_recorded(skb)) { + hash = skb_get_rx_queue(skb); + while (unlikely(hash >= num_tx_queues)) + hash -= num_tx_queues; + return (u16)hash; + } + + /*lint -save -e778*/ + if (vlan_get_protocol(skb) == htons(ETH_P_IP)) { + iphdr = ip_hdr(skb); + if (iphdr->protocol == IPPROTO_UDP || + iphdr->protocol == IPPROTO_TCP) { + tcphdr = tcp_hdr(skb); + hash = calc_toeplitz_rss(ntohl(iphdr->daddr), + ntohl(iphdr->saddr), + ntohs(tcphdr->dest), + ntohs(tcphdr->source), + nic_dev->rss_hkey_user_be); + } + } + /*lint -restore*/ + + return (u16)nic_dev->rss_indir_user[hash & 0xFF]; +} + +static u16 hinic_select_queue(struct net_device *netdev, struct sk_buff *skb, + struct net_device *sb_dev, + select_queue_fallback_t fallback) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + skb->priority = skb->vlan_tci >> VLAN_PRIO_SHIFT; + + if (netdev_get_num_tc(netdev) || !nic_dev->rss_hkey_user_be) + goto fallback; + + if (nic_dev->rss_hash_engine == HINIC_RSS_HASH_ENGINE_TYPE_TOEP && + test_bit(HINIC_SAME_RXTX, &nic_dev->flags)) + return select_queue_by_toeplitz(netdev, skb, + netdev->real_num_tx_queues); + +fallback: + return fallback(netdev, skb, sb_dev); +} + +static void hinic_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic_txq_stats *txq_stats; + struct hinic_rxq_stats *rxq_stats; + struct hinic_txq *txq; + struct hinic_rxq *rxq; + u64 bytes, packets, dropped, errors; + unsigned int start; + int i; + + bytes = 0; + packets = 0; + dropped = 0; + for (i = 0; i < nic_dev->max_qps; i++) { + if (!nic_dev->txqs) + break; + + txq = &nic_dev->txqs[i]; + txq_stats = &txq->txq_stats; + do { + start = u64_stats_fetch_begin(&txq_stats->syncp); + bytes += txq_stats->bytes; + packets += txq_stats->packets; + dropped += txq_stats->dropped; + } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); + } + stats->tx_packets = packets; + stats->tx_bytes = bytes; + stats->tx_dropped = dropped; + + bytes = 0; + packets = 0; + errors = 0; + dropped = 0; + for (i = 0; i < nic_dev->max_qps; i++) { + if (!nic_dev->rxqs) + break; + + rxq = &nic_dev->rxqs[i]; + rxq_stats = &rxq->rxq_stats; + do { + start = u64_stats_fetch_begin(&rxq_stats->syncp); + bytes += rxq_stats->bytes; + packets += rxq_stats->packets; + errors += rxq_stats->csum_errors + + rxq_stats->other_errors; + dropped += rxq_stats->dropped; + } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); + } + stats->rx_packets = packets; + stats->rx_bytes = bytes; + stats->rx_errors = errors; + stats->rx_dropped = dropped; +} + +static void hinic_tx_timeout(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u16 msix_idx; + u8 q_id; + + HINIC_NIC_STATS_INC(nic_dev, netdev_tx_timeout); + nicif_err(nic_dev, drv, netdev, "Tx timeout\n"); + + for (q_id = 0; q_id < nic_dev->num_qps; q_id++) { + if (!netif_xmit_stopped(netdev_get_tx_queue(netdev, q_id))) + continue; + + msix_idx = nic_dev->irq_cfg[q_id].msix_entry_idx; + nicif_info(nic_dev, drv, netdev, + "txq%d: sw_pi: %d, hw_ci: %d, sw_ci: %d, napi->state: 0x%lx, msix mask: %d, intr_flag: 0x%lx\n", + q_id, hinic_dbg_get_sq_pi(nic_dev->hwdev, q_id), + hinic_get_sq_hw_ci(nic_dev->hwdev, q_id), + hinic_get_sq_local_ci(nic_dev->hwdev, q_id), + nic_dev->irq_cfg[q_id].napi.state, + hinic_get_msix_state(nic_dev->hwdev, msix_idx), + nic_dev->irq_cfg[q_id].intr_flag); + } +} + +static int hinic_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u32 mtu = (u32)new_mtu; + u32 xdp_max_mtu; + int err = 0; + + if (hinic_is_xdp_enable(nic_dev)) { + xdp_max_mtu = hinic_xdp_max_mtu(nic_dev); + if (mtu > xdp_max_mtu) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Max MTU for xdp usage is %d\n", xdp_max_mtu); + return -EINVAL; + } + } + + err = hinic_set_port_mtu(nic_dev->hwdev, mtu); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to change port mtu to %d\n", + new_mtu); + } else { + nicif_info(nic_dev, drv, nic_dev->netdev, "Change mtu from %d to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = mtu; + } + + return err; +} + +static int hinic_set_mac_addr(struct net_device *netdev, void *addr) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct sockaddr *saddr = addr; + u16 func_id; + int err; + + if (!FUNC_SUPPORT_CHANGE_MAC(nic_dev->hwdev)) { + nicif_warn(nic_dev, drv, netdev, + "Current function don't support to set mac\n"); + return -EOPNOTSUPP; + } + + if (!is_valid_ether_addr(saddr->sa_data)) + return -EADDRNOTAVAIL; + + if (ether_addr_equal(netdev->dev_addr, saddr->sa_data)) { + nicif_info(nic_dev, drv, netdev, + "Already using mac address %pM\n", + saddr->sa_data); + return 0; + } + + err = hinic_global_func_id_get(nic_dev->hwdev, &func_id); + if (err) + return err; + + err = hinic_update_mac(nic_dev->hwdev, netdev->dev_addr, saddr->sa_data, + 0, func_id); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to update mac, err: %d\n", + err); + return err == HINIC_PF_SET_VF_ALREADY ? -EPERM : err; + } + + memcpy(netdev->dev_addr, saddr->sa_data, ETH_ALEN); + + nicif_info(nic_dev, drv, netdev, "Set new mac address %pM\n", + saddr->sa_data); + + return 0; +} + +static int +hinic_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, + u16 vid) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + unsigned long *vlan_bitmap = nic_dev->vlan_bitmap; + u16 func_id; + u32 col, line; + int err; + + if (vid == 0) + return 0; + + col = VID_COL(nic_dev, vid); + line = VID_LINE(nic_dev, vid); + + err = hinic_global_func_id_get(nic_dev->hwdev, &func_id); + if (err) + goto end; + + err = hinic_add_vlan(nic_dev->hwdev, vid, func_id); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to add vlan%d\n", vid); + goto end; + } + + set_bit(col, &vlan_bitmap[line]); + + nicif_info(nic_dev, drv, netdev, "Add vlan %d\n", vid); + +end: + return err; +} + +static int +hinic_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, + u16 vid) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + unsigned long *vlan_bitmap = nic_dev->vlan_bitmap; + u16 func_id; + int err, col, line; + + /* vlan 0 is used internally by the firmware and must always exist + * after netdev open + */ + if (vid == 0) + return 0; + + col = VID_COL(nic_dev, vid); + line = VID_LINE(nic_dev, vid); + + err = hinic_global_func_id_get(nic_dev->hwdev, &func_id); + if (err) + goto end; + + err = hinic_del_vlan(nic_dev->hwdev, vid, func_id); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to delete vlan\n"); + goto end; + } + + clear_bit(col, &vlan_bitmap[line]); + + nicif_info(nic_dev, drv, netdev, "Remove vlan %d\n", vid); + +end: + return err; +} + +#define FEATURES_OP_STR(op) ((op) ? "Enable" : "Disable") + +static int set_feature_tso(struct hinic_nic_dev *nic_dev, + netdev_features_t wanted_features, + netdev_features_t features, + netdev_features_t *failed_features) +{ + netdev_features_t changed = wanted_features ^ features; + bool en = !!(wanted_features & NETIF_F_TSO); + int err; + + if (!(changed & NETIF_F_TSO)) + return 0; + + err = hinic_set_tx_tso(nic_dev->hwdev, en); + if (err) { + hinic_err(nic_dev, drv, "%s tso failed\n", FEATURES_OP_STR(en)); + *failed_features |= NETIF_F_TSO; + } else { + hinic_info(nic_dev, drv, "%s tso success\n", + FEATURES_OP_STR(en)); + } + + return err; +} + +static int set_feature_cvlan(struct hinic_nic_dev *nic_dev, + netdev_features_t wanted_features, + netdev_features_t features, + netdev_features_t *failed_features) +{ + netdev_features_t changed = wanted_features ^ features; + netdev_features_t vlan_feature = NETIF_F_HW_VLAN_CTAG_RX; + bool en = !!(wanted_features & vlan_feature); + int err; + + if (!(changed & vlan_feature)) + return 0; + + err = hinic_set_rx_vlan_offload(nic_dev->hwdev, en); + if (err) { + hinic_err(nic_dev, drv, "%s rxvlan failed\n", + FEATURES_OP_STR(en)); + *failed_features |= vlan_feature; + } else { + hinic_info(nic_dev, drv, "%s rxvlan success\n", + FEATURES_OP_STR(en)); + } + + return err; +} + +static int set_feature_rxcsum(struct hinic_nic_dev *nic_dev, + netdev_features_t wanted_features, + netdev_features_t features, + netdev_features_t *failed_features) +{ + netdev_features_t changed = wanted_features ^ features; + bool en = !!(wanted_features & NETIF_F_RXCSUM); + int err; + + if (!(changed & NETIF_F_RXCSUM)) + return 0; + + /* hw should always enable rx csum */ + err = hinic_set_rx_csum_offload(nic_dev->hwdev, + HINIC_RX_CSUM_OFFLOAD_EN); + if (err) { + hinic_err(nic_dev, drv, "%s rx csum failed\n", + FEATURES_OP_STR(en)); + *failed_features |= NETIF_F_RXCSUM; + } else { + hinic_info(nic_dev, drv, "%s rx csum success\n", + FEATURES_OP_STR(en)); + } + + return err; +} + +static int set_feature_lro(struct hinic_nic_dev *nic_dev, + netdev_features_t wanted_features, + netdev_features_t features, + netdev_features_t *failed_features) +{ + netdev_features_t changed = wanted_features ^ features; + bool en = !!(wanted_features & NETIF_F_LRO); + u32 lro_timer, lro_buf_size; + int err; + + if (!(changed & NETIF_F_LRO)) + return 0; + + if (en && hinic_is_xdp_enable(nic_dev)) { + hinic_err(nic_dev, drv, "Can not enable LRO when xdp is enable\n"); + *failed_features |= NETIF_F_LRO; + return -EINVAL; + } + + lro_timer = nic_dev->adaptive_cfg.lro.timer; + lro_buf_size = nic_dev->adaptive_cfg.lro.buffer_size; + err = hinic_set_rx_lro_state(nic_dev->hwdev, en, lro_timer, + lro_buf_size / nic_dev->rx_buff_len); + if (err) { + hinic_err(nic_dev, drv, "%s lro failed\n", FEATURES_OP_STR(en)); + *failed_features |= NETIF_F_LRO; + } else { + hinic_info(nic_dev, drv, "%s lro success\n", + FEATURES_OP_STR(en)); + } + + return err; +} + +static int set_feature_vlan_filter(struct hinic_nic_dev *nic_dev, + netdev_features_t wanted_features, + netdev_features_t features, + netdev_features_t *failed_features) +{ + netdev_features_t changed = wanted_features ^ features; + bool en = !!(wanted_features & NETIF_F_HW_VLAN_CTAG_FILTER); + int err; + + if (!(changed & NETIF_F_HW_VLAN_CTAG_FILTER)) + return 0; + + err = hinic_set_vlan_fliter(nic_dev->hwdev, en); + if (err) { + hinic_err(nic_dev, drv, "%s rx vlan filter failed\n", + FEATURES_OP_STR(en)); + *failed_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + } else { + hinic_info(nic_dev, drv, "%s rx vlan filter success\n", + FEATURES_OP_STR(en)); + } + + return err; +} + +static int set_features(struct hinic_nic_dev *nic_dev, + netdev_features_t pre_features, + netdev_features_t features) +{ + netdev_features_t failed_features = 0; + u32 err; + + err = (u32)set_feature_tso(nic_dev, features, pre_features, + &failed_features); + err |= (u32)set_feature_cvlan(nic_dev, features, pre_features, + &failed_features); + err |= (u32)set_feature_rxcsum(nic_dev, features, pre_features, + &failed_features); + err |= (u32)set_feature_lro(nic_dev, features, pre_features, + &failed_features); + err |= (u32)set_feature_vlan_filter(nic_dev, features, pre_features, + &failed_features); + if (err) { + nic_dev->netdev->features = features ^ failed_features; + return -EIO; + } + + return 0; +} + +static int hinic_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + return set_features(nic_dev, nic_dev->netdev->features, + features); +} + +static netdev_features_t hinic_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* If Rx checksum is disabled, then LRO should also be disabled */ + if (!(features & NETIF_F_RXCSUM)) + features &= ~NETIF_F_LRO; + + return features; +} + +static int hinic_set_default_hw_feature(struct hinic_nic_dev *nic_dev) +{ + int err; + + if (!HINIC_FUNC_IS_VF(nic_dev->hwdev)) { + if (FUNC_SUPPORT_DCB(nic_dev->hwdev)) { + err = hinic_dcb_reset_hw_config(nic_dev); + if (err) { + nic_err(&nic_dev->pdev->dev, "Failed to reset hw dcb configuration\n"); + return -EFAULT; + } + } + + if (FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) { + err = hinic_reset_port_link_cfg(nic_dev->hwdev); + if (err) + return -EFAULT; + } + + hinic_set_anti_attack(nic_dev->hwdev, true); + + if (set_link_status_follow < HINIC_LINK_FOLLOW_STATUS_MAX && + FUNC_SUPPORT_PORT_SETTING(nic_dev->hwdev)) { + err = hinic_set_link_status_follow(nic_dev->hwdev, + set_link_status_follow); + if (err == HINIC_MGMT_CMD_UNSUPPORTED) + nic_warn(&nic_dev->pdev->dev, + "Current version of firmware don't support to set link status follow port status\n"); + } + } + + /* enable all hw features in netdev->features */ + return set_features(nic_dev, ~nic_dev->netdev->features, + nic_dev->netdev->features); +} + +static int hinic_setup_tc_mqprio(struct net_device *dev, + struct tc_mqprio_qopt *mqprio) +{ + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + return hinic_setup_tc(dev, mqprio->num_tc); +} + +static int __hinic_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + switch (type) { + case TC_SETUP_QDISC_MQPRIO: + return hinic_setup_tc_mqprio(dev, type_data); + default: + return -EOPNOTSUPP; + } +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void hinic_netpoll(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u16 i; + + for (i = 0; i < nic_dev->num_qps; i++) + napi_schedule(&nic_dev->irq_cfg[i].napi); +} +#endif /* CONFIG_NET_POLL_CONTROLLER */ + +static int hinic_uc_sync(struct net_device *netdev, u8 *addr) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u16 func_id; + int err; + + err = hinic_global_func_id_get(nic_dev->hwdev, &func_id); + if (err) + return err; + + err = hinic_set_mac(nic_dev->hwdev, addr, 0, func_id); + return err; +} + +static int hinic_uc_unsync(struct net_device *netdev, u8 *addr) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u16 func_id; + int err; + + /* The addr is in use */ + if (ether_addr_equal(addr, netdev->dev_addr)) + return 0; + + err = hinic_global_func_id_get(nic_dev->hwdev, &func_id); + if (err) + return err; + + err = hinic_del_mac(nic_dev->hwdev, addr, 0, func_id); + return err; +} + +static void hinic_clean_mac_list_filter(struct hinic_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + struct hinic_mac_filter *f, *ftmp; + + list_for_each_entry_safe(f, ftmp, &nic_dev->uc_filter_list, list) { + if (f->state == HINIC_MAC_HW_SYNCED) + hinic_uc_unsync(netdev, f->addr); + list_del(&f->list); + kfree(f); + } + + list_for_each_entry_safe(f, ftmp, &nic_dev->mc_filter_list, list) { + if (f->state == HINIC_MAC_HW_SYNCED) + hinic_uc_unsync(netdev, f->addr); + list_del(&f->list); + kfree(f); + } +} + +static struct hinic_mac_filter *hinic_find_mac(struct list_head *filter_list, + u8 *addr) +{ + struct hinic_mac_filter *f; + + list_for_each_entry(f, filter_list, list) { + if (ether_addr_equal(addr, f->addr)) + return f; + } + return NULL; +} + +static struct hinic_mac_filter + *hinic_add_filter(struct hinic_nic_dev *nic_dev, + struct list_head *mac_filter_list, u8 *addr) +{ + struct hinic_mac_filter *f; + + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + goto out; + + memcpy(f->addr, addr, ETH_ALEN); + + INIT_LIST_HEAD(&f->list); + list_add_tail(&f->list, mac_filter_list); + + f->state = HINIC_MAC_WAIT_HW_SYNC; + set_bit(HINIC_MAC_FILTER_CHANGED, &nic_dev->flags); + +out: + return f; +} + +static void hinic_del_filter(struct hinic_nic_dev *nic_dev, + struct hinic_mac_filter *f) +{ + set_bit(HINIC_MAC_FILTER_CHANGED, &nic_dev->flags); + + if (f->state == HINIC_MAC_WAIT_HW_SYNC) { + /* have not added to hw, delete it directly */ + list_del(&f->list); + kfree(f); + return; + } + + f->state = HINIC_MAC_WAIT_HW_UNSYNC; +} + +static struct hinic_mac_filter + *hinic_mac_filter_entry_clone(struct hinic_mac_filter *src) +{ + struct hinic_mac_filter *f; + + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + return NULL; + + *f = *src; + INIT_LIST_HEAD(&f->list); + + return f; +} + +static void hinic_undo_del_filter_entries(struct list_head *filter_list, + struct list_head *from) +{ + struct hinic_mac_filter *f, *ftmp; + + list_for_each_entry_safe(f, ftmp, from, list) { + if (hinic_find_mac(filter_list, f->addr)) + continue; + + if (f->state == HINIC_MAC_HW_SYNCED) + f->state = HINIC_MAC_WAIT_HW_UNSYNC; + + list_move_tail(&f->list, filter_list); + } +} + +static void hinic_undo_add_filter_entries(struct list_head *filter_list, + struct list_head *from) +{ + struct hinic_mac_filter *f, *ftmp, *tmp; + + list_for_each_entry_safe(f, ftmp, from, list) { + tmp = hinic_find_mac(filter_list, f->addr); + if (tmp && tmp->state == HINIC_MAC_HW_SYNCED) + tmp->state = HINIC_MAC_WAIT_HW_SYNC; + } +} + +static void hinic_cleanup_filter_list(struct list_head *head) +{ + struct hinic_mac_filter *f, *ftmp; + + list_for_each_entry_safe(f, ftmp, head, list) { + list_del(&f->list); + kfree(f); + } +} + +static int hinic_mac_filter_sync_hw(struct hinic_nic_dev *nic_dev, + struct list_head *del_list, + struct list_head *add_list) +{ + struct net_device *netdev = nic_dev->netdev; + struct hinic_mac_filter *f, *ftmp; + int err = 0, add_count = 0; + + if (!list_empty(del_list)) { + list_for_each_entry_safe(f, ftmp, del_list, list) { + err = hinic_uc_unsync(netdev, f->addr); + if (err) { /* ignore errors when delete mac */ + nic_err(&nic_dev->pdev->dev, "Failed to delete mac\n"); + } + + list_del(&f->list); + kfree(f); + } + } + + if (!list_empty(add_list)) { + list_for_each_entry_safe(f, ftmp, add_list, list) { + err = hinic_uc_sync(netdev, f->addr); + if (err) { + nic_err(&nic_dev->pdev->dev, "Failed to add mac\n"); + return err; + } + + add_count++; + list_del(&f->list); + kfree(f); + } + } + + return add_count; +} + +static int hinic_mac_filter_sync(struct hinic_nic_dev *nic_dev, + struct list_head *mac_filter_list, bool uc) +{ + struct net_device *netdev = nic_dev->netdev; + struct list_head tmp_del_list, tmp_add_list; + struct hinic_mac_filter *f, *ftmp, *fclone; + int err = 0, add_count = 0; + + INIT_LIST_HEAD(&tmp_del_list); + INIT_LIST_HEAD(&tmp_add_list); + + list_for_each_entry_safe(f, ftmp, mac_filter_list, list) { + if (f->state != HINIC_MAC_WAIT_HW_UNSYNC) + continue; + + f->state = HINIC_MAC_HW_UNSYNCED; + list_move_tail(&f->list, &tmp_del_list); + } + + list_for_each_entry_safe(f, ftmp, mac_filter_list, list) { + if (f->state != HINIC_MAC_WAIT_HW_SYNC) + continue; + + fclone = hinic_mac_filter_entry_clone(f); + if (!fclone) { + err = -ENOMEM; + break; + } + + f->state = HINIC_MAC_HW_SYNCED; + list_add_tail(&fclone->list, &tmp_add_list); + } + + if (err) { + hinic_undo_del_filter_entries(mac_filter_list, &tmp_del_list); + hinic_undo_add_filter_entries(mac_filter_list, &tmp_add_list); + nicif_err(nic_dev, drv, netdev, "Failed to clone mac_filter_entry\n"); + } + + if (err) { + hinic_cleanup_filter_list(&tmp_del_list); + hinic_cleanup_filter_list(&tmp_add_list); + return -ENOMEM; + } + + add_count = + hinic_mac_filter_sync_hw(nic_dev, &tmp_del_list, &tmp_add_list); + if (list_empty(&tmp_add_list)) + return add_count; + + /* there are errors when add mac to hw, delete all mac in hw */ + hinic_undo_add_filter_entries(mac_filter_list, &tmp_add_list); + /* VF don't support to enter promisc mode, + * so we can't delete any other uc mac + */ + if (!HINIC_FUNC_IS_VF(nic_dev->hwdev) || !uc) { + list_for_each_entry_safe(f, ftmp, mac_filter_list, list) { + if (f->state != HINIC_MAC_HW_SYNCED) + continue; + + fclone = hinic_mac_filter_entry_clone(f); + if (!fclone) + break; + + f->state = HINIC_MAC_WAIT_HW_SYNC; + list_add_tail(&fclone->list, &tmp_del_list); + } + } + + hinic_cleanup_filter_list(&tmp_add_list); + hinic_mac_filter_sync_hw(nic_dev, &tmp_del_list, &tmp_add_list); + + /* need to enter promisc/allmulti mode */ + return -ENOMEM; +} + +static void hinic_mac_filter_sync_all(struct hinic_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + int add_count; + + if (test_bit(HINIC_MAC_FILTER_CHANGED, &nic_dev->flags)) { + clear_bit(HINIC_MAC_FILTER_CHANGED, &nic_dev->flags); + add_count = hinic_mac_filter_sync(nic_dev, + &nic_dev->uc_filter_list, + true); + if (add_count < 0 && !HINIC_FUNC_IS_VF(nic_dev->hwdev)) { + set_bit(HINIC_PROMISC_FORCE_ON, &nic_dev->rx_mod_state); + nicif_info(nic_dev, drv, netdev, "Promisc mode forced on\n"); + } else if (add_count) { + clear_bit(HINIC_PROMISC_FORCE_ON, + &nic_dev->rx_mod_state); + } + + add_count = hinic_mac_filter_sync(nic_dev, + &nic_dev->mc_filter_list, + false); + if (add_count < 0) { + set_bit(HINIC_ALLMULTI_FORCE_ON, + &nic_dev->rx_mod_state); + nicif_info(nic_dev, drv, netdev, "All multicast mode forced on\n"); + } else if (add_count) { + clear_bit(HINIC_ALLMULTI_FORCE_ON, + &nic_dev->rx_mod_state); + } + } +} + +#define HINIC_DEFAULT_RX_MODE (HINIC_RX_MODE_UC | HINIC_RX_MODE_MC | \ + HINIC_RX_MODE_BC) + +static void hinic_update_mac_filter(struct hinic_nic_dev *nic_dev, + struct netdev_hw_addr_list *src_list, + struct list_head *filter_list) +{ + struct netdev_hw_addr *ha; + struct hinic_mac_filter *f, *ftmp, *filter; + + /* add addr if not already in the filter list */ + netif_addr_lock_bh(nic_dev->netdev); + netdev_hw_addr_list_for_each(ha, src_list) { + filter = hinic_find_mac(filter_list, ha->addr); + if (!filter) + hinic_add_filter(nic_dev, filter_list, ha->addr); + else if (filter->state == HINIC_MAC_WAIT_HW_UNSYNC) + filter->state = HINIC_MAC_HW_SYNCED; + } + netif_addr_unlock_bh(nic_dev->netdev); + + /* delete addr if not in netdev list */ + list_for_each_entry_safe(f, ftmp, filter_list, list) { + bool found = false; + + netif_addr_lock_bh(nic_dev->netdev); + netdev_hw_addr_list_for_each(ha, src_list) + if (ether_addr_equal(ha->addr, f->addr)) { + found = true; + break; + } + netif_addr_unlock_bh(nic_dev->netdev); + + if (found) + continue; + + hinic_del_filter(nic_dev, f); + } +} + +static void __update_mac_filter(struct hinic_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + + if (test_and_clear_bit(HINIC_UPDATE_MAC_FILTER, &nic_dev->flags)) { + hinic_update_mac_filter(nic_dev, &netdev->uc, + &nic_dev->uc_filter_list); + hinic_update_mac_filter(nic_dev, &netdev->mc, + &nic_dev->mc_filter_list); + } +} + +static void hinic_set_rx_mode_work(struct work_struct *work) +{ + struct hinic_nic_dev *nic_dev = + container_of(work, struct hinic_nic_dev, rx_mode_work); + struct net_device *netdev = nic_dev->netdev; + int promisc_en = 0, allmulti_en = 0; + int err = 0; + + __update_mac_filter(nic_dev); + + hinic_mac_filter_sync_all(nic_dev); + + /* VF don't support to enter promisc mode */ + if (!HINIC_FUNC_IS_VF(nic_dev->hwdev)) { + promisc_en = !!(netdev->flags & IFF_PROMISC) || + test_bit(HINIC_PROMISC_FORCE_ON, + &nic_dev->rx_mod_state); + } + + allmulti_en = !!(netdev->flags & IFF_ALLMULTI) || + test_bit(HINIC_ALLMULTI_FORCE_ON, &nic_dev->rx_mod_state); + + if (promisc_en != + test_bit(HINIC_HW_PROMISC_ON, &nic_dev->rx_mod_state) || + allmulti_en != + test_bit(HINIC_HW_ALLMULTI_ON, &nic_dev->rx_mod_state)) { + enum hinic_rx_mod rx_mod = HINIC_DEFAULT_RX_MODE; + + rx_mod |= (promisc_en ? HINIC_RX_MODE_PROMISC : 0); + rx_mod |= (allmulti_en ? HINIC_RX_MODE_MC_ALL : 0); + + /* FOR DEBUG */ + if (promisc_en != + test_bit(HINIC_HW_PROMISC_ON, &nic_dev->rx_mod_state)) + nicif_info(nic_dev, drv, netdev, + "%s promisc mode\n", + promisc_en ? "Enter" : "Left"); + if (allmulti_en != + test_bit(HINIC_HW_ALLMULTI_ON, &nic_dev->rx_mod_state)) + nicif_info(nic_dev, drv, netdev, + "%s all_multi mode\n", + allmulti_en ? "Enter" : "Left"); + + err = hinic_set_rx_mode(nic_dev->hwdev, rx_mod); + if (!err) { + promisc_en ? + set_bit(HINIC_HW_PROMISC_ON, &nic_dev->rx_mod_state) : + clear_bit(HINIC_HW_PROMISC_ON, &nic_dev->rx_mod_state); + + allmulti_en ? + set_bit(HINIC_HW_ALLMULTI_ON, &nic_dev->rx_mod_state) : + clear_bit(HINIC_HW_ALLMULTI_ON, &nic_dev->rx_mod_state); + } else { + nicif_err(nic_dev, drv, netdev, "Failed to set rx_mode\n"); + } + } +} + +static void hinic_nic_set_rx_mode(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + if (netdev_uc_count(netdev) != nic_dev->netdev_uc_cnt || + netdev_mc_count(netdev) != nic_dev->netdev_mc_cnt) { + set_bit(HINIC_UPDATE_MAC_FILTER, &nic_dev->flags); + nic_dev->netdev_uc_cnt = netdev_uc_count(netdev); + nic_dev->netdev_mc_cnt = netdev_mc_count(netdev); + } + + if (FUNC_SUPPORT_RX_MODE(nic_dev->hwdev)) + queue_work(nic_dev->workq, &nic_dev->rx_mode_work); +} + +bool hinic_is_xdp_enable(struct hinic_nic_dev *nic_dev) +{ + return !!nic_dev->xdp_prog; +} + +int hinic_xdp_max_mtu(struct hinic_nic_dev *nic_dev) +{ + return nic_dev->rx_buff_len - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); +} + +static int hinic_xdp_setup(struct hinic_nic_dev *nic_dev, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + struct bpf_prog *old_prog = NULL; + int max_mtu = hinic_xdp_max_mtu(nic_dev); + int q_id; + + if (prog && nic_dev->netdev->mtu > max_mtu) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to setup xdp program, the current MTU %d is larger than max allowed MTU %d\n", + nic_dev->netdev->mtu, max_mtu); + NL_SET_ERR_MSG_MOD(extack, + "MTU is too large to load xdp program"); + return -EINVAL; + } + + if (prog && nic_dev->netdev->features & NETIF_F_LRO) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to setup xdp program while LRO is on\n"); + NL_SET_ERR_MSG_MOD(extack, + "Failed to setup xdp program while LRO is on\n"); + return -EINVAL; + } + + old_prog = xchg(&nic_dev->xdp_prog, prog); + for (q_id = 0; q_id < nic_dev->max_qps; q_id++) + xchg(&nic_dev->rxqs[q_id].xdp_prog, nic_dev->xdp_prog); + + if (old_prog) + bpf_prog_put(old_prog); + + return 0; +} + +static int hinic_xdp(struct net_device *netdev, struct netdev_bpf *xdp) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + switch (xdp->command) { + case XDP_SETUP_PROG: + return hinic_xdp_setup(nic_dev, xdp->prog, xdp->extack); + case XDP_QUERY_PROG: + xdp->prog_id = nic_dev->xdp_prog ? + nic_dev->xdp_prog->aux->id : 0; + return 0; + default: + return -EINVAL; + } +} static const struct net_device_ops hinic_netdev_ops = { .ndo_open = hinic_open, .ndo_stop = hinic_close, + .ndo_start_xmit = hinic_xmit_frame, + .ndo_get_stats64 = hinic_get_stats64, + .ndo_tx_timeout = hinic_tx_timeout, + .ndo_select_queue = hinic_select_queue, .ndo_change_mtu = hinic_change_mtu, .ndo_set_mac_address = hinic_set_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_vlan_rx_add_vid = hinic_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = hinic_vlan_rx_kill_vid, - .ndo_set_rx_mode = hinic_set_rx_mode, + + .ndo_set_vf_mac = hinic_ndo_set_vf_mac, + .ndo_set_vf_vlan = hinic_ndo_set_vf_vlan, + .ndo_set_vf_rate = hinic_ndo_set_vf_bw, + .ndo_set_vf_spoofchk = hinic_ndo_set_vf_spoofchk, + .ndo_set_vf_trust = hinic_ndo_set_vf_trust, + .ndo_get_vf_config = hinic_ndo_get_vf_config, + + .ndo_setup_tc = __hinic_setup_tc, + +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = hinic_netpoll, +#endif /* CONFIG_NET_POLL_CONTROLLER */ + + .ndo_set_rx_mode = hinic_nic_set_rx_mode, + .ndo_set_vf_link_state = hinic_ndo_set_vf_link_state, + .ndo_fix_features = hinic_fix_features, + .ndo_set_features = hinic_set_features, + .ndo_bpf = hinic_xdp, +}; + +static const struct net_device_ops hinicvf_netdev_ops = { + .ndo_open = hinic_open, + .ndo_stop = hinic_close, .ndo_start_xmit = hinic_xmit_frame, + .ndo_get_stats64 = hinic_get_stats64, .ndo_tx_timeout = hinic_tx_timeout, - .ndo_get_stats64 = hinic_get_stats64, + .ndo_select_queue = hinic_select_queue, + .ndo_change_mtu = hinic_change_mtu, + .ndo_set_mac_address = hinic_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_vlan_rx_add_vid = hinic_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = hinic_vlan_rx_kill_vid, + +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = hinic_netpoll, +#endif /* CONFIG_NET_POLL_CONTROLLER */ + + .ndo_set_rx_mode = hinic_nic_set_rx_mode, + + .ndo_fix_features = hinic_fix_features, + .ndo_set_features = hinic_set_features, + .ndo_bpf = hinic_xdp, }; -static void netdev_features_init(struct net_device *netdev) +static void netdev_feature_init(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + netdev_features_t hw_features; + + netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | NETIF_F_RXCSUM; + + if (FUNC_SUPPORT_SCTP_CRC(nic_dev->hwdev)) + netdev->features |= NETIF_F_SCTP_CRC; + + netdev->vlan_features = netdev->features; + + if (FUNC_SUPPORT_ENCAP_TSO_CSUM(nic_dev->hwdev)) + netdev->features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + + if (FUNC_SUPPORT_HW_VLAN(nic_dev->hwdev)) { + netdev->features |= NETIF_F_HW_VLAN_CTAG_TX; + netdev->features |= NETIF_F_HW_VLAN_CTAG_RX; + } + + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + + /* copy netdev features into list of user selectable features */ + hw_features = netdev->hw_features; + hw_features |= netdev->features; + + if (FUNC_SUPPORT_LRO(nic_dev->hwdev)) { + /* LRO is disable in default, only set hw features */ + hw_features |= NETIF_F_LRO; + + /* Enable LRO */ + if (nic_dev->adaptive_cfg.lro.enable && + !HINIC_FUNC_IS_VF(nic_dev->hwdev)) + netdev->features |= NETIF_F_LRO; + } + + netdev->hw_features = hw_features; + + netdev->priv_flags |= IFF_UNICAST_FLT; + + if (FUNC_SUPPORT_ENCAP_TSO_CSUM(nic_dev->hwdev)) { + netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM + | NETIF_F_SCTP_CRC | NETIF_F_SG; + netdev->hw_enc_features |= NETIF_F_TSO | NETIF_F_TSO6 + | NETIF_F_TSO_ECN + | NETIF_F_GSO_UDP_TUNNEL_CSUM + | NETIF_F_GSO_UDP_TUNNEL; + } +} + +#define MOD_PARA_VALIDATE_NUM_QPS(nic_dev, num_qps, out_qps) { \ + if ((num_qps) > (nic_dev)->max_qps) \ + nic_warn(&(nic_dev)->pdev->dev, \ + "Module Parameter %s value %d is out of range, "\ + "Maximum value for the device: %d, using %d\n",\ + #num_qps, num_qps, (nic_dev)->max_qps, \ + (nic_dev)->max_qps); \ + if (!(num_qps) || (num_qps) > (nic_dev)->max_qps) \ + out_qps = (nic_dev)->max_qps; \ + else \ + out_qps = num_qps; \ +} + +static void hinic_try_to_enable_rss(struct hinic_nic_dev *nic_dev) +{ + u8 prio_tc[HINIC_DCB_UP_MAX] = {0}; + int i, node, err = 0; + u16 num_cpus = 0; + enum hinic_service_mode service_mode = + hinic_get_service_mode(nic_dev->hwdev); + + nic_dev->max_qps = hinic_func_max_nic_qnum(nic_dev->hwdev); + if (nic_dev->max_qps <= 1) { + clear_bit(HINIC_RSS_ENABLE, &nic_dev->flags); + nic_dev->rss_limit = nic_dev->max_qps; + nic_dev->num_qps = nic_dev->max_qps; + nic_dev->num_rss = nic_dev->max_qps; + + return; + } + + err = hinic_rss_template_alloc(nic_dev->hwdev, &nic_dev->rss_tmpl_idx); + if (err) { + if (err == -ENOSPC) + nic_warn(&nic_dev->pdev->dev, + "Failed to alloc tmpl_idx for rss, table is full\n"); + else + nic_err(&nic_dev->pdev->dev, + "Failed to alloc tmpl_idx for rss, can't enable rss for this function\n"); + clear_bit(HINIC_RSS_ENABLE, &nic_dev->flags); + nic_dev->max_qps = 1; + nic_dev->rss_limit = nic_dev->max_qps; + nic_dev->num_qps = nic_dev->max_qps; + nic_dev->num_rss = nic_dev->max_qps; + + return; + } + + set_bit(HINIC_RSS_ENABLE, &nic_dev->flags); + + nic_dev->max_qps = hinic_func_max_nic_qnum(nic_dev->hwdev); + + MOD_PARA_VALIDATE_NUM_QPS(nic_dev, num_qps, nic_dev->num_qps); + + /* To reduce memory footprint in ovs mode. + * VF can't get board info correctly with early pf driver. + */ + if ((hinic_get_func_mode(nic_dev->hwdev) == FUNC_MOD_NORMAL_HOST) && + service_mode == HINIC_WORK_MODE_OVS && + hinic_func_type(nic_dev->hwdev) != TYPE_VF) + MOD_PARA_VALIDATE_NUM_QPS(nic_dev, ovs_num_qps, + nic_dev->num_qps); + + for (i = 0; i < (int)num_online_cpus(); i++) { + node = (int)cpu_to_node(i); + if (node == dev_to_node(&nic_dev->pdev->dev)) + num_cpus++; + } + + if (!num_cpus) + num_cpus = (u16)num_online_cpus(); + + nic_dev->num_qps = min_t(u16, nic_dev->num_qps, num_cpus); + + nic_dev->rss_limit = nic_dev->num_qps; + nic_dev->num_rss = nic_dev->num_qps; + + hinic_init_rss_parameters(nic_dev->netdev); + hinic_set_hw_rss_parameters(nic_dev->netdev, 0, 0, prio_tc); +} + +static int hinic_sw_init(struct hinic_nic_dev *adapter) +{ + struct net_device *netdev = adapter->netdev; + u16 func_id; + int err = 0; + + sema_init(&adapter->port_state_sem, 1); + + err = hinic_dcb_init(adapter); + if (err) { + nic_err(&adapter->pdev->dev, "Failed to init dcb\n"); + return -EFAULT; + } + + if (HINIC_FUNC_IS_VF(adapter->hwdev)) { + err = hinic_sq_cos_mapping(netdev); + if (err) { + nic_err(&adapter->pdev->dev, "Failed to set sq_cos_mapping\n"); + return -EFAULT; + } + } + + adapter->sq_depth = HINIC_SQ_DEPTH; + adapter->rq_depth = HINIC_RQ_DEPTH; + + hinic_try_to_enable_rss(adapter); + + err = hinic_get_default_mac(adapter->hwdev, netdev->dev_addr); + if (err) { + nic_err(&adapter->pdev->dev, "Failed to get MAC address\n"); + goto get_mac_err; + } + + if (!is_valid_ether_addr(netdev->dev_addr)) { + if (!HINIC_FUNC_IS_VF(adapter->hwdev)) { + nic_err(&adapter->pdev->dev, "Invalid MAC address\n"); + err = -EIO; + goto err_mac; + } + + nic_info(&adapter->pdev->dev, "Invalid MAC address %pM, using random\n", + netdev->dev_addr); + eth_hw_addr_random(netdev); + } + + err = hinic_global_func_id_get(adapter->hwdev, &func_id); + if (err) + goto func_id_err; + + err = hinic_set_mac(adapter->hwdev, netdev->dev_addr, 0, func_id); + /* When this is VF driver, we must consider that PF has already set VF + * MAC, and we can't consider this condition is error status during + * driver probe procedure. + */ + if (err && err != HINIC_PF_SET_VF_ALREADY) { + nic_err(&adapter->pdev->dev, "Failed to set default MAC\n"); + goto set_mac_err; + } + + /* MTU range: 256 - 9600 */ + netdev->min_mtu = HINIC_MIN_MTU_SIZE; + netdev->max_mtu = HINIC_MAX_JUMBO_FRAME_SIZE; + + return 0; + +set_mac_err: +func_id_err: +err_mac: +get_mac_err: + if (test_bit(HINIC_RSS_ENABLE, &adapter->flags)) + hinic_rss_template_free(adapter->hwdev, adapter->rss_tmpl_idx); + + return err; +} + +static void hinic_assign_netdev_ops(struct hinic_nic_dev *adapter) +{ + if (!HINIC_FUNC_IS_VF(adapter->hwdev)) { + adapter->netdev->netdev_ops = &hinic_netdev_ops; + if (FUNC_SUPPORT_DCB(adapter->hwdev)) + adapter->netdev->dcbnl_ops = &hinic_dcbnl_ops; + hinic_set_ethtool_ops(adapter->netdev); + } else { + adapter->netdev->netdev_ops = &hinicvf_netdev_ops; + hinicvf_set_ethtool_ops(adapter->netdev); + } + adapter->netdev->watchdog_timeo = 5 * HZ; +} + +#define HINIC_DFT_PG_10GE_TXRX_MSIX_PENDING_LIMIT 1 +#define HINIC_DFT_PG_10GE_TXRX_MSIX_COALESC_TIMER 1 +#define HINIC_DFT_PG_25GE_TXRX_MSIX_PENDING_LIMIT 2 +#define HINIC_DFT_PG_25GE_TXRX_MSIX_COALESC_TIMER 2 +#define HINIC_DFT_PG_ARM_25GE_TXRX_MSIX_COALESC_TIMER 3 +#define HINIC_DFT_PG_100GE_TXRX_MSIX_PENDING_LIMIT 2 +#define HINIC_DFT_PG_100GE_TXRX_MSIX_COALESC_TIMER 2 +#define HINIC_DFT_PG_ARM_100GE_TXRX_MSIX_COALESC_TIMER 3 + +static void update_queue_coal_param(struct hinic_nic_dev *nic_dev, + struct pci_device_id *id, u16 qid) { - netdev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA; + struct hinic_intr_coal_info *info = NULL; + + info = &nic_dev->intr_coalesce[qid]; + if (!nic_dev->intr_coal_set_flag) { + switch (id->driver_data) { + case HINIC_BOARD_PG_TP_10GE: + info->pending_limt = + HINIC_DFT_PG_10GE_TXRX_MSIX_PENDING_LIMIT; + info->coalesce_timer_cfg = + HINIC_DFT_PG_10GE_TXRX_MSIX_COALESC_TIMER; + break; + case HINIC_BOARD_PG_SM_25GE: + info->pending_limt = + HINIC_DFT_PG_25GE_TXRX_MSIX_PENDING_LIMIT; + info->coalesce_timer_cfg = + HINIC_DFT_PG_ARM_25GE_TXRX_MSIX_COALESC_TIMER; + break; + case HINIC_BOARD_PG_100GE: + info->pending_limt = + HINIC_DFT_PG_100GE_TXRX_MSIX_PENDING_LIMIT; + info->coalesce_timer_cfg = + HINIC_DFT_PG_ARM_100GE_TXRX_MSIX_COALESC_TIMER; + break; + default: + info->pending_limt = qp_pending_limit; + info->coalesce_timer_cfg = qp_coalesc_timer_cfg; + break; + } + } - netdev->vlan_features = netdev->hw_features; + info->resend_timer_cfg = HINIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG; + info->pkt_rate_high = HINIC_RX_RATE_HIGH; + info->rx_usecs_high = qp_coalesc_timer_high; + info->rx_pending_limt_high = qp_pending_limit_high; + info->pkt_rate_low = HINIC_RX_RATE_LOW; + info->rx_usecs_low = qp_coalesc_timer_low; + info->rx_pending_limt_low = qp_pending_limit_low; + + if (nic_dev->in_vm) { + if (qp_pending_limit_high == HINIC_RX_PENDING_LIMIT_HIGH) + qp_pending_limit_high = HINIC_RX_PENDING_LIMIT_HIGH_VM; + info->pkt_rate_low = HINIC_RX_RATE_LOW_VM; + info->rx_pending_limt_high = qp_pending_limit_high; + } - netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; + /* suit for sdi3.0 vm mode vf drv or bm mode pf/vf drv */ + if ((nic_dev->is_vm_slave && nic_dev->in_vm) || + nic_dev->is_bm_slave) { + info->pkt_rate_high = SDI_VM_RX_PKT_RATE_HIGH; + info->pkt_rate_low = SDI_VM_RX_PKT_RATE_LOW; + + if (qp_coalesc_use_drv_params_switch == 0) { + /* if arm server, maybe need to change this value + * again + */ + info->pending_limt = SDI_VM_PENDING_LIMT; + info->coalesce_timer_cfg = SDI_VM_COALESCE_TIMER_CFG; + info->rx_usecs_high = SDI_VM_RX_USECS_HIGH; + info->rx_pending_limt_high = + SDI_VM_RX_PENDING_LIMT_HIGH; + info->rx_usecs_low = SDI_VM_RX_USECS_LOW; + info->rx_pending_limt_low = SDI_VM_RX_PENDING_LIMT_LOW; + } else { + info->rx_usecs_high = qp_coalesc_timer_high; + info->rx_pending_limt_high = qp_pending_limit_high; + info->rx_usecs_low = qp_coalesc_timer_low; + info->rx_pending_limt_low = qp_pending_limit_low; + } + } } -/** - * link_status_event_handler - link event handler - * @handle: nic device for the handler - * @buf_in: input buffer - * @in_size: input size - * @buf_in: output buffer - * @out_size: returned output size - * - * Return 0 - Success, negative - Failure - **/ -static void link_status_event_handler(void *handle, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) +static void init_intr_coal_param(struct hinic_nic_dev *nic_dev) +{ + struct pci_device_id *id; + u16 i; + + id = hinic_get_pci_device_id(nic_dev->pdev); + switch (id->driver_data) { + case HINIC_BOARD_10GE: + case HINIC_BOARD_PG_TP_10GE: + nic_dev->his_link_speed = SPEED_10000; + break; + case HINIC_BOARD_25GE: + case HINIC_BOARD_PG_SM_25GE: + nic_dev->his_link_speed = SPEED_25000; + break; + case HINIC_BOARD_40GE: + nic_dev->his_link_speed = SPEED_40000; + break; + case HINIC_BOARD_100GE: + case HINIC_BOARD_PG_100GE: + nic_dev->his_link_speed = SPEED_100000; + break; + default: + break; + } + + for (i = 0; i < nic_dev->max_qps; i++) + update_queue_coal_param(nic_dev, id, i); +} + +static int hinic_init_intr_coalesce(struct hinic_nic_dev *nic_dev) { - struct hinic_port_link_status *link_status, *ret_link_status; - struct hinic_dev *nic_dev = handle; + u64 size; - link_status = buf_in; + if (qp_pending_limit != HINIC_DEAULT_TXRX_MSIX_PENDING_LIMIT || + qp_coalesc_timer_cfg != HINIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG) + nic_dev->intr_coal_set_flag = 1; + else + nic_dev->intr_coal_set_flag = 0; - if (link_status->link == HINIC_LINK_STATE_UP) { - down(&nic_dev->mgmt_lock); + size = sizeof(*nic_dev->intr_coalesce) * nic_dev->max_qps; + if (!size) { + nic_err(&nic_dev->pdev->dev, "Cannot allocate zero size intr coalesce\n"); + return -EINVAL; + } + nic_dev->intr_coalesce = kzalloc(size, GFP_KERNEL); + if (!nic_dev->intr_coalesce) { + nic_err(&nic_dev->pdev->dev, "Failed to alloc intr coalesce\n"); + return -ENOMEM; + } - nic_dev->flags |= HINIC_LINK_UP; + init_intr_coal_param(nic_dev); - if ((nic_dev->flags & (HINIC_LINK_UP | HINIC_INTF_UP)) == - (HINIC_LINK_UP | HINIC_INTF_UP)) { - netif_carrier_on(nic_dev->netdev); - netif_tx_wake_all_queues(nic_dev->netdev); - } + if (test_bit(HINIC_INTR_ADAPT, &nic_dev->flags)) + nic_dev->adaptive_rx_coal = 1; + else + nic_dev->adaptive_rx_coal = 0; - up(&nic_dev->mgmt_lock); + return 0; +} - netif_info(nic_dev, drv, nic_dev->netdev, "HINIC_Link is UP\n"); - } else { - down(&nic_dev->mgmt_lock); +static void hinic_free_intr_coalesce(struct hinic_nic_dev *nic_dev) +{ + kfree(nic_dev->intr_coalesce); +} - nic_dev->flags &= ~HINIC_LINK_UP; +static int hinic_alloc_qps(struct hinic_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + int err; - netif_carrier_off(nic_dev->netdev); - netif_tx_disable(nic_dev->netdev); + err = hinic_alloc_txqs(netdev); + if (err) { + nic_err(&nic_dev->pdev->dev, "Failed to alloc txqs\n"); + return err; + } - up(&nic_dev->mgmt_lock); + err = hinic_alloc_rxqs(netdev); + if (err) { + nic_err(&nic_dev->pdev->dev, "Failed to alloc rxqs\n"); + goto alloc_rxqs_err; + } - netif_info(nic_dev, drv, nic_dev->netdev, "HINIC_Link is DOWN\n"); + err = hinic_init_intr_coalesce(nic_dev); + if (err) { + nic_err(&nic_dev->pdev->dev, "Failed to init_intr_coalesce\n"); + goto init_intr_err; } - ret_link_status = buf_out; - ret_link_status->status = 0; + return 0; + +init_intr_err: + hinic_free_rxqs(netdev); - *out_size = sizeof(*ret_link_status); +alloc_rxqs_err: + hinic_free_txqs(netdev); + + return err; } -/** - * nic_dev_init - Initialize the NIC device - * @pdev: the NIC pci device - * - * Return 0 - Success, negative - Failure - **/ -static int nic_dev_init(struct pci_dev *pdev) -{ - struct hinic_rx_mode_work *rx_mode_work; - struct hinic_txq_stats *tx_stats; - struct hinic_rxq_stats *rx_stats; - struct hinic_dev *nic_dev; - struct net_device *netdev; - struct hinic_hwdev *hwdev; - int err, num_qps; +static void hinic_destroy_qps(struct hinic_nic_dev *nic_dev) +{ + hinic_free_intr_coalesce(nic_dev); + hinic_free_rxqs(nic_dev->netdev); + hinic_free_txqs(nic_dev->netdev); +} - hwdev = hinic_init_hwdev(pdev); - if (IS_ERR(hwdev)) { - dev_err(&pdev->dev, "Failed to initialize HW device\n"); - return PTR_ERR(hwdev); +static int hinic_validate_parameters(struct hinic_lld_dev *lld_dev) +{ + struct pci_dev *pdev = lld_dev->pdev; + + /* Check poll_weight value, default poll_weight is 64. + * The poll_weight isn't more than max queue depth, + * so the valid value range is 1~4096. + */ + if (!poll_weight) { + nic_warn(&pdev->dev, "Module Parameter poll_weight can not be 0, resetting to %d\n", + DEFAULT_POLL_WEIGHT); + poll_weight = DEFAULT_POLL_WEIGHT; } - num_qps = hinic_hwdev_num_qps(hwdev); - if (num_qps <= 0) { - dev_err(&pdev->dev, "Invalid number of QPS\n"); - err = -EINVAL; - goto err_num_qps; + if (poll_weight > HINIC_MAX_QUEUE_DEPTH) { + nic_warn(&pdev->dev, "Module Parameter poll_weight value %u is out of 1~%d, resetting to max value %d\n", + poll_weight, HINIC_MAX_QUEUE_DEPTH, + HINIC_MAX_QUEUE_DEPTH); + poll_weight = HINIC_MAX_QUEUE_DEPTH; } - netdev = alloc_etherdev_mq(sizeof(*nic_dev), num_qps); - if (!netdev) { - dev_err(&pdev->dev, "Failed to allocate Ethernet device\n"); - err = -ENOMEM; - goto err_alloc_etherdev; + /* check rx_buff value, default rx_buff is 2KB. + * Invalid rx_buff include 2KB/4KB/8KB/16KB. + */ + if (rx_buff != RX_BUFF_VALID_2KB && rx_buff != RX_BUFF_VALID_4KB && + rx_buff != RX_BUFF_VALID_8KB && rx_buff != RX_BUFF_VALID_16KB) { + nic_warn(&pdev->dev, "Module Parameter rx_buff value %d is out of range, must be 2^n. Valid range is 2 - 16, resetting to %dKB", + rx_buff, DEFAULT_RX_BUFF_LEN); + rx_buff = DEFAULT_RX_BUFF_LEN; } - netdev->netdev_ops = &hinic_netdev_ops; - netdev->ethtool_ops = &hinic_ethtool_ops; - netdev->max_mtu = ETH_MAX_MTU; + if (qp_coalesc_timer_high <= qp_coalesc_timer_low) { + nic_warn(&pdev->dev, "Module Parameter qp_coalesc_timer_high: %d, qp_coalesc_timer_low: %d is invalid, resetting to default\n", + qp_coalesc_timer_high, qp_coalesc_timer_low); + qp_coalesc_timer_high = HINIC_RX_COAL_TIME_HIGH; + qp_coalesc_timer_low = HINIC_RX_COAL_TIME_LOW; + } - nic_dev = netdev_priv(netdev); - nic_dev->netdev = netdev; - nic_dev->hwdev = hwdev; - nic_dev->msg_enable = MSG_ENABLE_DEFAULT; - nic_dev->flags = 0; - nic_dev->txqs = NULL; - nic_dev->rxqs = NULL; - nic_dev->tx_weight = tx_weight; - nic_dev->rx_weight = rx_weight; + if (qp_pending_limit_high <= qp_pending_limit_low) { + nic_warn(&pdev->dev, "Module Parameter qp_pending_limit_high: %d, qp_pending_limit_low: %d is invalid, resetting to default\n", + qp_pending_limit_high, qp_pending_limit_low); + qp_pending_limit_high = HINIC_RX_PENDING_LIMIT_HIGH; + qp_pending_limit_low = HINIC_RX_PENDING_LIMIT_LOW; + } - sema_init(&nic_dev->mgmt_lock, 1); + return 0; +} + +static void check_lro_module_param(struct hinic_nic_dev *nic_dev) +{ + struct hinic_lro_cfg *lro = &nic_dev->adaptive_cfg.lro; + + /* Use module parameters first. */ + if (set_lro_timer != 0 && + set_lro_timer >= HINIC_LRO_RX_TIMER_LOWER && + set_lro_timer <= HINIC_LRO_RX_TIMER_UPPER) + lro->timer = set_lro_timer; + + /* Use module parameters first. */ + if (set_max_wqe_num != 0 && + set_max_wqe_num <= HINIC_LRO_MAX_WQE_NUM_UPPER && + set_max_wqe_num >= HINIC_LRO_MAX_WQE_NUM_LOWER) + lro->buffer_size = set_max_wqe_num * nic_dev->rx_buff_len; +} - tx_stats = &nic_dev->tx_stats; - rx_stats = &nic_dev->rx_stats; +static void decide_rss_cfg(struct hinic_nic_dev *nic_dev) +{ + struct hinic_environment_info *info = &nic_dev->env_info; - u64_stats_init(&tx_stats->syncp); - u64_stats_init(&rx_stats->syncp); + switch (info->cpu) { + case HINIC_CPU_ARM_GENERIC: + set_bit(HINIC_SAME_RXTX, &nic_dev->flags); - nic_dev->vlan_bitmap = devm_kzalloc(&pdev->dev, - VLAN_BITMAP_SIZE(nic_dev), - GFP_KERNEL); - if (!nic_dev->vlan_bitmap) { - err = -ENOMEM; - goto err_vlan_bitmap; + break; + case HINIC_CPU_X86_GENERIC: + clear_bit(HINIC_SAME_RXTX, &nic_dev->flags); + + break; + + default: + clear_bit(HINIC_SAME_RXTX, &nic_dev->flags); + break; } +} - nic_dev->workq = create_singlethread_workqueue(HINIC_WQ_NAME); - if (!nic_dev->workq) { - err = -ENOMEM; - goto err_workq; +static void decide_lro_cfg(struct hinic_nic_dev *nic_dev) +{ + struct hinic_environment_info *info = &nic_dev->env_info; + struct hinic_lro_cfg *lro = &nic_dev->adaptive_cfg.lro; + + if (lro_en_status < HINIC_LRO_STATUS_UNSET) { + lro->enable = lro_en_status; + } else { + /* LRO will be opened in all Huawei OS */ + switch (info->os) { + case HINIC_OS_HUAWEI: + lro->enable = 1; + break; + case HINIC_OS_NON_HUAWEI: + lro->enable = 0; + break; + default: + lro->enable = 0; + break; + } } - pci_set_drvdata(pdev, netdev); + switch (info->board) { + case HINIC_BOARD_25GE: + lro->timer = HINIC_LRO_RX_TIMER_DEFAULT_25GE; + break; + case HINIC_BOARD_100GE: + lro->timer = HINIC_LRO_RX_TIMER_DEFAULT_100GE; + break; + case HINIC_BOARD_PG_TP_10GE: + lro->timer = HINIC_LRO_RX_TIMER_DEFAULT_PG_10GE; + break; + case HINIC_BOARD_PG_SM_25GE: + lro->timer = HINIC_LRO_RX_TIMER_DEFAULT; + break; + case HINIC_BOARD_PG_100GE: + lro->timer = HINIC_LRO_RX_TIMER_DEFAULT_PG_100GE; + break; + default: + lro->timer = HINIC_LRO_RX_TIMER_DEFAULT; + break; + } + + /* Use module parameters first. */ + switch (info->cpu) { + case HINIC_CPU_ARM_GENERIC: + lro->buffer_size = + HINIC_LRO_MAX_WQE_NUM_DEFAULT_ARM * + nic_dev->rx_buff_len; + break; + case HINIC_CPU_X86_GENERIC: + lro->buffer_size = + HINIC_LRO_MAX_WQE_NUM_DEFAULT_X86 * + nic_dev->rx_buff_len; + break; + default: + lro->buffer_size = + HINIC_LRO_MAX_WQE_NUM_DEFAULT * + nic_dev->rx_buff_len; + break; + } + + /* lro buffer_size need modify according board type */ + switch (info->board) { + case HINIC_BOARD_PG_TP_10GE: + case HINIC_BOARD_PG_SM_25GE: + case HINIC_BOARD_PG_100GE: + lro->buffer_size = + HINIC_LRO_WQE_NUM_PANGEA_DEFAULT * nic_dev->rx_buff_len; + break; + default: + break; + } + + check_lro_module_param(nic_dev); + + nic_info(&nic_dev->pdev->dev, + "LRO default configuration: enable %u, timer %u, buffer size %u\n", + lro->enable, lro->timer, lro->buffer_size); +} + +static void decide_intr_cfg(struct hinic_nic_dev *nic_dev) +{ + struct pci_device_id *id; + + id = hinic_get_pci_device_id(nic_dev->pdev); + switch (id->driver_data) { + case HINIC_BOARD_PG_TP_10GE: + case HINIC_BOARD_PG_SM_25GE: + case HINIC_BOARD_PG_100GE: + clear_bit(HINIC_INTR_ADAPT, &nic_dev->flags); + break; + default: + set_bit(HINIC_INTR_ADAPT, &nic_dev->flags); + break; + } +} + +static void adaptive_configuration_init(struct hinic_nic_dev *nic_dev) +{ + struct pci_device_id *id; + + id = hinic_get_pci_device_id(nic_dev->pdev); + if (id) + nic_dev->env_info.board = id->driver_data; + else + nic_dev->env_info.board = HINIC_BOARD_UNKNOWN; + + nic_dev->env_info.os = HINIC_OS_HUAWEI; + +#if defined(__aarch64__) + nic_dev->env_info.cpu = HINIC_CPU_ARM_GENERIC; +#elif defined(__x86_64__) + nic_dev->env_info.cpu = HINIC_CPU_X86_GENERIC; +#else + nic_dev->env_info.cpu = HINIC_CPU_UNKNOWN; +#endif + + nic_info(&nic_dev->pdev->dev, + "Board type %u, OS type %u, CPU type %u\n", + nic_dev->env_info.board, nic_dev->env_info.os, + nic_dev->env_info.cpu); + + decide_lro_cfg(nic_dev); + decide_rss_cfg(nic_dev); + decide_intr_cfg(nic_dev); +} + +static int nic_probe(struct hinic_lld_dev *lld_dev, void **uld_dev, + char *uld_dev_name) +{ + struct pci_dev *pdev = lld_dev->pdev; + struct hinic_nic_dev *nic_dev; + struct net_device *netdev; + u16 max_qps; + u32 page_num; + int err; + + /* *uld_dev should always no be NULL */ + *uld_dev = lld_dev; + + if (!hinic_support_nic(lld_dev->hwdev, NULL)) { + nic_info(&pdev->dev, "Hw don't support nic\n"); + return 0; + } - err = hinic_port_get_mac(nic_dev, netdev->dev_addr); + err = hinic_validate_parameters(lld_dev); if (err) - dev_warn(&pdev->dev, "Failed to get mac address\n"); + return -EINVAL; + + max_qps = hinic_func_max_nic_qnum(lld_dev->hwdev); + netdev = alloc_etherdev_mq(sizeof(*nic_dev), max_qps); + if (!netdev) { + nic_err(&pdev->dev, "Failed to allocate ETH device\n"); + return -ENOMEM; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + nic_dev = (struct hinic_nic_dev *)netdev_priv(netdev); + nic_dev->hwdev = lld_dev->hwdev; + nic_dev->pdev = pdev; + nic_dev->poll_weight = (int)poll_weight; + nic_dev->msg_enable = DEFAULT_MSG_ENABLE; + nic_dev->heart_status = true; + nic_dev->in_vm = !hinic_is_in_host(); + nic_dev->is_vm_slave = is_multi_vm_slave(lld_dev->hwdev); + nic_dev->is_bm_slave = is_multi_bm_slave(lld_dev->hwdev); + nic_dev->lro_replenish_thld = lro_replenish_thld; + nic_dev->rx_buff_len = (u16)(rx_buff * CONVERT_UNIT); + page_num = (RX_BUFF_NUM_PER_PAGE * nic_dev->rx_buff_len) / PAGE_SIZE; + nic_dev->page_order = page_num > 0 ? ilog2(page_num) : 0; + + mutex_init(&nic_dev->nic_mutex); + + adaptive_configuration_init(nic_dev); + + nic_dev->vlan_bitmap = kzalloc(VLAN_BITMAP_SIZE(nic_dev), GFP_KERNEL); + if (!nic_dev->vlan_bitmap) { + nic_err(&pdev->dev, "Failed to allocate vlan bitmap\n"); + err = -ENOMEM; + goto vlan_bitmap_err; + } + nic_dev->netdev = netdev; + hinic_assign_netdev_ops(nic_dev); + netdev_feature_init(netdev); + /* get nic cap from hw */ + hinic_support_nic(lld_dev->hwdev, &nic_dev->nic_cap); - err = hinic_port_add_mac(nic_dev, netdev->dev_addr, 0); + err = hinic_init_nic_hwdev(nic_dev->hwdev, nic_dev->rx_buff_len); if (err) { - dev_err(&pdev->dev, "Failed to add mac\n"); - goto err_add_mac; + nic_err(&pdev->dev, "Failed to init nic hwdev\n"); + goto init_nic_hwdev_err; } - err = hinic_port_set_mtu(nic_dev, netdev->mtu); + err = hinic_set_super_cqe_state(nic_dev->hwdev, true); if (err) { - dev_err(&pdev->dev, "Failed to set mtu\n"); - goto err_set_mtu; + nic_err(&pdev->dev, "Failed to set super cqe\n"); + goto set_supper_cqe_err; } - rx_mode_work = &nic_dev->rx_mode_work; - INIT_WORK(&rx_mode_work->work, set_rx_mode); + err = hinic_sw_init(nic_dev); + if (err) + goto sw_init_err; - netdev_features_init(netdev); + err = hinic_alloc_qps(nic_dev); + if (err) { + nic_err(&pdev->dev, "Failed to alloc qps\n"); + goto alloc_qps_err; + } - netif_carrier_off(netdev); + nic_dev->workq = create_singlethread_workqueue(HINIC_NIC_DEV_WQ_NAME); + if (!nic_dev->workq) { + nic_err(&pdev->dev, "Failed to initialize AEQ workqueue\n"); + err = -ENOMEM; + goto create_workq_err; + } - hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS, - nic_dev, link_status_event_handler); + INIT_LIST_HEAD(&nic_dev->uc_filter_list); + INIT_LIST_HEAD(&nic_dev->mc_filter_list); + INIT_WORK(&nic_dev->rx_mode_work, hinic_set_rx_mode_work); + + err = hinic_set_default_hw_feature(nic_dev); + if (err) + goto set_features_err; + + hinic_register_notifier(nic_dev); - SET_NETDEV_DEV(netdev, &pdev->dev); err = register_netdev(netdev); if (err) { - dev_err(&pdev->dev, "Failed to register netdev\n"); - goto err_reg_netdev; + nic_err(&pdev->dev, "Failed to register netdev\n"); + err = -ENOMEM; + goto netdev_err; } + netif_carrier_off(netdev); + + *uld_dev = nic_dev; + nicif_info(nic_dev, probe, netdev, "Register netdev succeed\n"); + return 0; -err_reg_netdev: - hinic_hwdev_cb_unregister(nic_dev->hwdev, - HINIC_MGMT_MSG_CMD_LINK_STATUS); - cancel_work_sync(&rx_mode_work->work); +netdev_err: + hinic_unregister_notifier(nic_dev); -err_set_mtu: -err_add_mac: - pci_set_drvdata(pdev, NULL); +set_features_err: destroy_workqueue(nic_dev->workq); -err_workq: -err_vlan_bitmap: +create_workq_err: + hinic_destroy_qps(nic_dev); + +alloc_qps_err: + hinic_del_mac(nic_dev->hwdev, netdev->dev_addr, 0, + hinic_global_func_id_hw(nic_dev->hwdev)); + +sw_init_err: + (void)hinic_set_super_cqe_state(nic_dev->hwdev, false); + +set_supper_cqe_err: + hinic_free_nic_hwdev(nic_dev->hwdev); + +init_nic_hwdev_err: + kfree(nic_dev->vlan_bitmap); + +vlan_bitmap_err: free_netdev(netdev); -err_alloc_etherdev: -err_num_qps: - hinic_free_hwdev(hwdev); return err; } -static int hinic_probe(struct pci_dev *pdev, - const struct pci_device_id *id) +static void nic_remove(struct hinic_lld_dev *lld_dev, void *adapter) { - int err = pci_enable_device(pdev); + struct hinic_nic_dev *nic_dev = adapter; + struct net_device *netdev; - if (err) { - dev_err(&pdev->dev, "Failed to enable PCI device\n"); - return err; + if (!nic_dev || !hinic_support_nic(lld_dev->hwdev, NULL)) + return; + + netdev = nic_dev->netdev; + + unregister_netdev(netdev); + hinic_unregister_notifier(nic_dev); + + cancel_work_sync(&nic_dev->rx_mode_work); + destroy_workqueue(nic_dev->workq); + + hinic_destroy_qps(nic_dev); + + hinic_clean_mac_list_filter(nic_dev); + hinic_del_mac(nic_dev->hwdev, netdev->dev_addr, 0, + hinic_global_func_id_hw(nic_dev->hwdev)); + if (test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) + hinic_rss_template_free(nic_dev->hwdev, nic_dev->rss_tmpl_idx); + + (void)hinic_set_super_cqe_state(nic_dev->hwdev, false); + + hinic_free_nic_hwdev(nic_dev->hwdev); + + kfree(nic_dev->vlan_bitmap); + + free_netdev(netdev); +} + +int hinic_disable_func_rss(struct hinic_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + int err, err_netdev = 0; + + nicif_info(nic_dev, drv, netdev, "Start to disable RSS\n"); + + if (!test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) { + nicif_info(nic_dev, drv, netdev, "RSS not enabled, do nothing\n"); + return 0; + } + + if (netif_running(netdev)) { + err_netdev = hinic_close(netdev); + if (err_netdev) { + nicif_err(nic_dev, drv, netdev, + "Failed to close netdev\n"); + return -EFAULT; + } } - err = pci_request_regions(pdev, HINIC_DRV_NAME); + /* free rss template */ + err = hinic_rss_template_free(nic_dev->hwdev, nic_dev->rss_tmpl_idx); if (err) { - dev_err(&pdev->dev, "Failed to request PCI regions\n"); - goto err_pci_regions; + nicif_err(nic_dev, drv, netdev, "Failed to free RSS template\n"); + } else { + nicif_info(nic_dev, drv, netdev, "Success to free RSS template\n"); + clear_bit(HINIC_RSS_ENABLE, &nic_dev->flags); } - pci_set_master(pdev); + if (netif_running(netdev)) { + err_netdev = hinic_open(netdev); + if (err_netdev) + nicif_err(nic_dev, drv, netdev, + "Failed to open netdev\n"); + } - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - dev_warn(&pdev->dev, "Couldn't set 64-bit DMA mask\n"); - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, "Failed to set DMA mask\n"); - goto err_dma_mask; - } + return err ? err : err_netdev; +} + +int hinic_enable_func_rss(struct hinic_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + int err, err_netdev = 0; + + nicif_info(nic_dev, drv, netdev, "Start to enable RSS\n"); + + if (test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) { + nicif_info(nic_dev, drv, netdev, "RSS already enabled, do nothing\n"); + return 0; } - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - dev_warn(&pdev->dev, - "Couldn't set 64-bit consistent DMA mask\n"); - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, - "Failed to set consistent DMA mask\n"); - goto err_dma_consistent_mask; + if (netif_running(netdev)) { + err_netdev = hinic_close(netdev); + if (err_netdev) { + nicif_err(nic_dev, drv, netdev, + "Failed to close netdev\n"); + return -EFAULT; } } - err = nic_dev_init(pdev); + err = hinic_rss_template_alloc(nic_dev->hwdev, &nic_dev->rss_tmpl_idx); if (err) { - dev_err(&pdev->dev, "Failed to initialize NIC device\n"); - goto err_nic_dev_init; + if (err == -ENOSPC) + nicif_warn(nic_dev, drv, netdev, + "Failed to alloc RSS template, table is full\n"); + else + nicif_err(nic_dev, drv, netdev, + "Failed to alloc RSS template\n"); + } else { + set_bit(HINIC_RSS_ENABLE, &nic_dev->flags); + nicif_info(nic_dev, drv, netdev, "Success to alloc RSS template\n"); } - dev_info(&pdev->dev, "HiNIC driver - probed\n"); - return 0; + if (netif_running(netdev)) { + err_netdev = hinic_open(netdev); + if (err_netdev) + nicif_err(nic_dev, drv, netdev, + "Failed to open netdev\n"); + } -err_nic_dev_init: -err_dma_consistent_mask: -err_dma_mask: - pci_release_regions(pdev); + return err ? err : err_netdev; +} -err_pci_regions: - pci_disable_device(pdev); - return err; +static const char *hinic_module_link_err[LINK_ERR_NUM] = { + "Unrecognized module", +}; + +static void hinic_port_module_event_handler(struct hinic_nic_dev *nic_dev, + struct hinic_event_info *event) +{ + enum port_module_event_type type = event->module_event.type; + enum link_err_type err_type = event->module_event.err_type; + + switch (type) { + case HINIC_PORT_MODULE_CABLE_PLUGGED: + case HINIC_PORT_MODULE_CABLE_UNPLUGGED: + nicif_info(nic_dev, link, nic_dev->netdev, + "Port module event: Cable %s\n", + type == HINIC_PORT_MODULE_CABLE_PLUGGED ? + "plugged" : "unplugged"); + break; + case HINIC_PORT_MODULE_LINK_ERR: + if (err_type >= LINK_ERR_NUM) { + nicif_info(nic_dev, link, nic_dev->netdev, + "Link failed, Unknown error type: 0x%x\n", + err_type); + } else { + nicif_info(nic_dev, link, nic_dev->netdev, + "Link failed, error type: 0x%x: %s\n", + err_type, hinic_module_link_err[err_type]); + } + break; + default: + nicif_err(nic_dev, link, nic_dev->netdev, + "Unknown port module type %d\n", type); + break; + } } -static void hinic_remove(struct pci_dev *pdev) +static void hinic_intr_coalesc_change(struct hinic_nic_dev *nic_dev, + struct hinic_event_info *event) { - struct net_device *netdev = pci_get_drvdata(pdev); - struct hinic_dev *nic_dev = netdev_priv(netdev); - struct hinic_rx_mode_work *rx_mode_work; + u32 hw_to_os_speed[LINK_SPEED_LEVELS] = {SPEED_10, SPEED_100, + SPEED_1000, SPEED_10000, + SPEED_25000, SPEED_40000, + SPEED_100000}; + u8 qid, coalesc_timer_cfg, pending_limt; + struct pci_device_id *id; + u32 speed; + int err; - unregister_netdev(netdev); + if (nic_dev->adaptive_rx_coal) + return; + + speed = hw_to_os_speed[event->link_info.speed]; + if (speed == nic_dev->his_link_speed) + return; - hinic_hwdev_cb_unregister(nic_dev->hwdev, - HINIC_MGMT_MSG_CMD_LINK_STATUS); + id = hinic_get_pci_device_id(nic_dev->pdev); + switch (id->driver_data) { + case HINIC_BOARD_PG_TP_10GE: + return; + case HINIC_BOARD_PG_SM_25GE: + if (speed == SPEED_10000) { + pending_limt = + HINIC_DFT_PG_10GE_TXRX_MSIX_PENDING_LIMIT; + coalesc_timer_cfg = + HINIC_DFT_PG_10GE_TXRX_MSIX_COALESC_TIMER; + } else if (speed == SPEED_25000) { + pending_limt = + HINIC_DFT_PG_25GE_TXRX_MSIX_PENDING_LIMIT; + coalesc_timer_cfg = + HINIC_DFT_PG_ARM_25GE_TXRX_MSIX_COALESC_TIMER; + } else { + pending_limt = + HINIC_DFT_PG_25GE_TXRX_MSIX_PENDING_LIMIT; + coalesc_timer_cfg = + HINIC_DFT_PG_25GE_TXRX_MSIX_COALESC_TIMER; + } + break; + case HINIC_BOARD_PG_100GE: + return; + default: + return; + } - rx_mode_work = &nic_dev->rx_mode_work; - cancel_work_sync(&rx_mode_work->work); + for (qid = 0; qid < nic_dev->num_qps; qid++) { + if (!nic_dev->intr_coalesce[qid].user_set_intr_coal_flag) { + err = set_interrupt_moder(nic_dev, qid, + coalesc_timer_cfg, + pending_limt); + if (!err) { + nic_dev->intr_coalesce[qid].pending_limt = + pending_limt; + nic_dev->intr_coalesce[qid].coalesce_timer_cfg = + coalesc_timer_cfg; + } + } + } - pci_set_drvdata(pdev, NULL); + nic_dev->his_link_speed = speed; +} - destroy_workqueue(nic_dev->workq); +void nic_event(struct hinic_lld_dev *lld_dev, void *adapter, + struct hinic_event_info *event) +{ + struct hinic_nic_dev *nic_dev = adapter; + struct net_device *netdev; + enum hinic_event_type type; - hinic_free_hwdev(nic_dev->hwdev); + if (!nic_dev || !event || !hinic_support_nic(lld_dev->hwdev, NULL)) + return; - free_netdev(netdev); + netdev = nic_dev->netdev; + type = event->type; - pci_release_regions(pdev); - pci_disable_device(pdev); + switch (type) { + case HINIC_EVENT_LINK_DOWN: + hinic_link_status_change(nic_dev, false); + break; + case HINIC_EVENT_LINK_UP: + hinic_link_status_change(nic_dev, true); + hinic_intr_coalesc_change(nic_dev, event); + break; + case HINIC_EVENT_HEART_LOST: + hinic_heart_lost(nic_dev); + hinic_link_status_change(nic_dev, false); + break; + case HINIC_EVENT_FAULT: + if (event->info.fault_level == FAULT_LEVEL_SERIOUS_FLR && + event->info.event.chip.func_id == + hinic_global_func_id(lld_dev->hwdev)) + hinic_link_status_change(nic_dev, false); + break; + case HINIC_EVENT_DCB_STATE_CHANGE: + if (nic_dev->default_cos_id == event->dcb_state.default_cos) + break; - dev_info(&pdev->dev, "HiNIC driver - removed\n"); -} + /* PF notify to vf, don't need to handle this event */ + if (!HINIC_FUNC_IS_VF(nic_dev->hwdev)) + break; -static const struct pci_device_id hinic_pci_table[] = { - { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_QUAD_PORT_25GE), 0}, - { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_25GE), 0}, - { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE), 0}, - { 0, 0} -}; -MODULE_DEVICE_TABLE(pci, hinic_pci_table); + nicif_info(nic_dev, drv, netdev, "Change default cos %d to %d\n", + nic_dev->default_cos_id, + event->dcb_state.default_cos); -static struct pci_driver hinic_driver = { - .name = HINIC_DRV_NAME, - .id_table = hinic_pci_table, - .probe = hinic_probe, - .remove = hinic_remove, -}; + nic_dev->default_cos_id = event->dcb_state.default_cos; + hinic_set_sq_default_cos(netdev, nic_dev->default_cos_id); + break; + case HINIC_EVENT_PORT_MODULE_EVENT: + hinic_port_module_event_handler(nic_dev, event); + break; + case HINIC_EVENT_MGMT_WATCHDOG_EVENT: + hinic_link_status_change(nic_dev, false); + break; + default: + break; + } +} -module_pci_driver(hinic_driver); +struct hinic_uld_info nic_uld_info = { + .probe = nic_probe, + .remove = nic_remove, + .suspend = NULL, + .resume = NULL, + .event = nic_event, + .ioctl = nic_ioctl, +}; /*lint -e766*/ diff --git a/drivers/net/ethernet/huawei/hinic/hinic_mbox.c b/drivers/net/ethernet/huawei/hinic/hinic_mbox.c new file mode 100644 index 0000000000000000000000000000000000000000..c2a07565fc540c6a5187f0c18cffb4c0a736916b --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_mbox.c @@ -0,0 +1,1730 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hwdev.h" +#include "hinic_csr.h" +#include "hinic_hwif.h" +#include "hinic_eqs.h" +#include "hinic_mbox.h" + +#define HINIC_MBOX_INT_DST_FUNC_SHIFT 0 +#define HINIC_MBOX_INT_DST_AEQN_SHIFT 10 +#define HINIC_MBOX_INT_SRC_RESP_AEQN_SHIFT 12 +#define HINIC_MBOX_INT_STAT_DMA_SHIFT 14 +/* The size of data to be send (unit of 4 bytes) */ +#define HINIC_MBOX_INT_TX_SIZE_SHIFT 20 +/* SO_RO(strong order, relax order) */ +#define HINIC_MBOX_INT_STAT_DMA_SO_RO_SHIFT 25 +#define HINIC_MBOX_INT_WB_EN_SHIFT 28 + +#define HINIC_MBOX_INT_DST_FUNC_MASK 0x3FF +#define HINIC_MBOX_INT_DST_AEQN_MASK 0x3 +#define HINIC_MBOX_INT_SRC_RESP_AEQN_MASK 0x3 +#define HINIC_MBOX_INT_STAT_DMA_MASK 0x3F +#define HINIC_MBOX_INT_TX_SIZE_MASK 0x1F +#define HINIC_MBOX_INT_STAT_DMA_SO_RO_MASK 0x3 +#define HINIC_MBOX_INT_WB_EN_MASK 0x1 + +#define HINIC_MBOX_INT_SET(val, field) \ + (((val) & HINIC_MBOX_INT_##field##_MASK) << \ + HINIC_MBOX_INT_##field##_SHIFT) + +enum hinic_mbox_tx_status { + TX_NOT_DONE = 1, +}; + +#define HINIC_MBOX_CTRL_TRIGGER_AEQE_SHIFT 0 +/* specifies the issue request for the message data. + * 0 - Tx request is done; + * 1 - Tx request is in process. + */ +#define HINIC_MBOX_CTRL_TX_STATUS_SHIFT 1 + +#define HINIC_MBOX_CTRL_TRIGGER_AEQE_MASK 0x1 +#define HINIC_MBOX_CTRL_TX_STATUS_MASK 0x1 + +#define HINIC_MBOX_CTRL_SET(val, field) \ + (((val) & HINIC_MBOX_CTRL_##field##_MASK) << \ + HINIC_MBOX_CTRL_##field##_SHIFT) + +#define HINIC_MBOX_HEADER_MSG_LEN_SHIFT 0 +#define HINIC_MBOX_HEADER_MODULE_SHIFT 11 +#define HINIC_MBOX_HEADER_SEG_LEN_SHIFT 16 +#define HINIC_MBOX_HEADER_NO_ACK_SHIFT 22 +#define HINIC_MBOX_HEADER_SEQID_SHIFT 24 +#define HINIC_MBOX_HEADER_LAST_SHIFT 30 +/* specifies the mailbox message direction + * 0 - send + * 1 - receive + */ +#define HINIC_MBOX_HEADER_DIRECTION_SHIFT 31 +#define HINIC_MBOX_HEADER_CMD_SHIFT 32 +#define HINIC_MBOX_HEADER_MSG_ID_SHIFT 40 +#define HINIC_MBOX_HEADER_STATUS_SHIFT 48 +#define HINIC_MBOX_HEADER_SRC_GLB_FUNC_IDX_SHIFT 54 + +#define HINIC_MBOX_HEADER_MSG_LEN_MASK 0x7FF +#define HINIC_MBOX_HEADER_MODULE_MASK 0x1F +#define HINIC_MBOX_HEADER_SEG_LEN_MASK 0x3F +#define HINIC_MBOX_HEADER_NO_ACK_MASK 0x1 +#define HINIC_MBOX_HEADER_SEQID_MASK 0x3F +#define HINIC_MBOX_HEADER_LAST_MASK 0x1 +#define HINIC_MBOX_HEADER_DIRECTION_MASK 0x1 +#define HINIC_MBOX_HEADER_CMD_MASK 0xFF +#define HINIC_MBOX_HEADER_MSG_ID_MASK 0xFF +#define HINIC_MBOX_HEADER_STATUS_MASK 0x3F +#define HINIC_MBOX_HEADER_SRC_GLB_FUNC_IDX_MASK 0x3FF + +#define HINIC_MBOX_HEADER_GET(val, field) \ + (((val) >> HINIC_MBOX_HEADER_##field##_SHIFT) & \ + HINIC_MBOX_HEADER_##field##_MASK) +#define HINIC_MBOX_HEADER_SET(val, field) \ + ((u64)((val) & HINIC_MBOX_HEADER_##field##_MASK) << \ + HINIC_MBOX_HEADER_##field##_SHIFT) + +#define MBOX_SEGLEN_MASK \ + HINIC_MBOX_HEADER_SET(HINIC_MBOX_HEADER_SEG_LEN_MASK, SEG_LEN) + +#define HINIC_MBOX_SEG_LEN 48 +#define HINIC_MBOX_COMP_TIME 8000U +#define MBOX_MSG_POLLING_TIMEOUT 8000 +#define MBOX_MSG_RETRY_ACK_TIMEOUT 1000 + +#define HINIC_MBOX_DATA_SIZE 2040 + +#define MBOX_MAX_BUF_SZ 2048UL +#define MBOX_HEADER_SZ 8 + +#define MBOX_INFO_SZ 4 + +/* MBOX size is 64B, 8B for mbox_header, 4B reserved */ +#define MBOX_SEG_LEN 48 +#define MBOX_SEG_LEN_ALIGN 4 +#define MBOX_WB_STATUS_LEN 16UL + +/* mbox write back status is 16B, only first 4B is used */ +#define MBOX_WB_STATUS_ERRCODE_MASK 0xFFFF +#define MBOX_WB_STATUS_MASK 0xFF +#define MBOX_WB_ERROR_CODE_MASK 0xFF00 +#define MBOX_WB_STATUS_FINISHED_SUCCESS 0xFF +#define MBOX_WB_STATUS_FINISHED_WITH_ERR 0xFE +#define MBOX_WB_STATUS_NOT_FINISHED 0x00 + +#define MBOX_STATUS_FINISHED(wb) \ + (((wb) & MBOX_WB_STATUS_MASK) != MBOX_WB_STATUS_NOT_FINISHED) +#define MBOX_STATUS_SUCCESS(wb) \ + (((wb) & MBOX_WB_STATUS_MASK) == MBOX_WB_STATUS_FINISHED_SUCCESS) +#define MBOX_STATUS_ERRCODE(wb) \ + ((wb) & MBOX_WB_ERROR_CODE_MASK) + +#define SEQ_ID_START_VAL 0 +#define SEQ_ID_MAX_VAL 42 +#define MBOX_LAST_SEG_MAX_LEN (MBOX_MAX_BUF_SZ - \ + SEQ_ID_MAX_VAL * MBOX_SEG_LEN) + +#define DST_AEQ_IDX_DEFAULT_VAL 0 +#define SRC_AEQ_IDX_DEFAULT_VAL 0 +#define NO_DMA_ATTRIBUTE_VAL 0 + +#define HINIC_MGMT_RSP_AEQN 0 +#define HINIC_MBOX_RSP_AEQN 2 +#define HINIC_MBOX_RECV_AEQN 0 + +#define MBOX_MSG_NO_DATA_LEN 1 + +#define MBOX_BODY_FROM_HDR(header) ((u8 *)(header) + MBOX_HEADER_SZ) +#define MBOX_AREA(hwif) \ + ((hwif)->cfg_regs_base + HINIC_FUNC_CSR_MAILBOX_DATA_OFF) + +#define IS_PF_OR_PPF_SRC(src_func_idx) ((src_func_idx) < HINIC_MAX_PF_FUNCS) + +#define MBOX_RESPONSE_ERROR 0x1 +#define MBOX_MSG_ID_MASK 0xFF +#define MBOX_MSG_ID(func_to_func) ((func_to_func)->send_msg_id) +#define MBOX_MSG_ID_INC(func_to_func) (MBOX_MSG_ID(func_to_func) = \ + (MBOX_MSG_ID(func_to_func) + 1) & MBOX_MSG_ID_MASK) + +#define FUNC_ID_OFF_SET_8B 8 +#define FUNC_ID_OFF_SET_10B 10 + +enum hinic_hwif_direction_type { + HINIC_HWIF_DIRECT_SEND = 0, + HINIC_HWIF_RESPONSE = 1, +}; + +enum mbox_seg_type { + NOT_LAST_SEG, + LAST_SEG, +}; + +enum mbox_ordering_type { + STRONG_ORDER, +}; + +enum mbox_write_back_type { + WRITE_BACK = 1, +}; + +enum mbox_aeq_trig_type { + NOT_TRIGGER, + TRIGGER, +}; + +struct hinic_set_random_id { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 vf_in_pf; + u8 rsvd1; + u16 func_idx; + u32 random_id; +}; + +static bool check_func_id(struct hinic_hwdev *hwdev, u16 src_func_idx, + const void *buf_in, u16 in_size, u16 offset) +{ + u16 func_idx; + + if (in_size < offset + sizeof(func_idx)) { + sdk_warn(hwdev->dev_hdl, + "Receive mailbox msg len: %d less than %ld Bytes is invalid\n", + in_size, offset + sizeof(func_idx)); + return false; + } + + func_idx = *((u16 *)((u8 *)buf_in + offset)); + + if (src_func_idx != func_idx) { + sdk_warn(hwdev->dev_hdl, + "Reveive mailbox function id(0x%x) not equal to msg function id(0x%x)\n", + src_func_idx, func_idx); + return false; + } + + return true; +} + +bool hinic_mbox_check_func_id_8B(struct hinic_hwdev *hwdev, u16 func_idx, + void *buf_in, u16 in_size) +{ + return check_func_id(hwdev, func_idx, buf_in, in_size, + FUNC_ID_OFF_SET_8B); +} + +bool hinic_mbox_check_func_id_10B(struct hinic_hwdev *hwdev, u16 func_idx, + void *buf_in, u16 in_size) +{ + return check_func_id(hwdev, func_idx, buf_in, in_size, + FUNC_ID_OFF_SET_10B); +} + +static int send_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func, + enum hinic_mod_type mod, u16 cmd, void *msg, + u16 msg_len, u16 dst_func, + enum hinic_hwif_direction_type direction, + enum hinic_mbox_ack_type ack_type, + struct mbox_msg_info *msg_info); + +/** + * hinic_register_ppf_mbox_cb - register mbox callback for ppf + * @hwdev: the pointer to hw device + * @mod: specific mod that the callback will handle + * @callback: callback function + * Return: 0 - success, negative - failure + */ +int hinic_register_ppf_mbox_cb(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, + hinic_ppf_mbox_cb callback) +{ + struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; + + if (mod >= HINIC_MOD_MAX) + return -EFAULT; + + func_to_func->ppf_mbox_cb[mod] = callback; + + set_bit(HINIC_PPF_MBOX_CB_REG, &func_to_func->ppf_mbox_cb_state[mod]); + + return 0; +} + +/** + * hinic_register_pf_mbox_cb - register mbox callback for pf + * @hwdev: the pointer to hw device + * @mod: specific mod that the callback will handle + * @callback: callback function + * Return: 0 - success, negative - failure + */ +int hinic_register_pf_mbox_cb(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, + hinic_pf_mbox_cb callback) +{ + struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; + + if (mod >= HINIC_MOD_MAX) + return -EFAULT; + + func_to_func->pf_mbox_cb[mod] = callback; + + set_bit(HINIC_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[mod]); + + return 0; +} + +/** + * hinic_register_vf_mbox_cb - register mbox callback for vf + * @hwdev: the pointer to hw device + * @mod: specific mod that the callback will handle + * @callback: callback function + * Return: 0 - success, negative - failure + */ +int hinic_register_vf_mbox_cb(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, + hinic_vf_mbox_cb callback) +{ + struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; + + if (mod >= HINIC_MOD_MAX) + return -EFAULT; + + func_to_func->vf_mbox_cb[mod] = callback; + + set_bit(HINIC_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[mod]); + + return 0; +} + +/** + * hinic_register_ppf_to_pf_mbox_cb - register mbox callback for pf from ppf + * @hwdev: the pointer to hw device + * @mod: specific mod that the callback will handle + * @callback: callback function + * Return: 0 - success, negative - failure + */ +int hinic_register_ppf_to_pf_mbox_cb(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, + hinic_pf_recv_from_ppf_mbox_cb callback) +{ + struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; + + if (mod >= HINIC_MOD_MAX) + return -EFAULT; + + func_to_func->pf_recv_from_ppf_mbox_cb[mod] = callback; + + set_bit(HINIC_PPF_TO_PF_MBOX_CB_REG, + &func_to_func->ppf_to_pf_mbox_cb_state[mod]); + + return 0; +} + +/** + * hinic_unregister_ppf_mbox_cb - unregister the mbox callback for ppf + * @hwdev: the pointer to hw device + * @mod: specific mod that the callback will handle + */ +void hinic_unregister_ppf_mbox_cb(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod) +{ + struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; + + clear_bit(HINIC_PPF_MBOX_CB_REG, &func_to_func->ppf_mbox_cb_state[mod]); + + while (test_bit(HINIC_PPF_MBOX_CB_RUNNING, + &func_to_func->ppf_mbox_cb_state[mod])) + usleep_range(900, 1000); + + func_to_func->ppf_mbox_cb[mod] = NULL; +} + +/** + * hinic_unregister_ppf_mbox_cb - unregister the mbox callback for pf + * @hwdev: the pointer to hw device + * @mod: specific mod that the callback will handle + */ +void hinic_unregister_pf_mbox_cb(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod) +{ + struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; + + clear_bit(HINIC_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[mod]); + + while (test_bit(HINIC_PF_MBOX_CB_RUNNING, + &func_to_func->pf_mbox_cb_state[mod])) + usleep_range(900, 1000); + + func_to_func->pf_mbox_cb[mod] = NULL; +} + +/** + * hinic_unregister_vf_mbox_cb - unregister the mbox callback for vf + * @hwdev:the pointer to hw device + * @mod:specific mod that the callback will handle + */ +void hinic_unregister_vf_mbox_cb(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod) +{ + struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; + + clear_bit(HINIC_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[mod]); + + while (test_bit(HINIC_VF_MBOX_CB_RUNNING, + &func_to_func->vf_mbox_cb_state[mod])) + usleep_range(900, 1000); + + func_to_func->vf_mbox_cb[mod] = NULL; +} + +/** + * hinic_unregister_ppf_mbox_cb - unregister the mbox callback for pf from ppf + * @hwdev: the pointer to hw device + * @mod: specific mod that the callback will handle + */ +void hinic_unregister_ppf_to_pf_mbox_cb(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod) +{ + struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; + + clear_bit(HINIC_PPF_TO_PF_MBOX_CB_REG, + &func_to_func->ppf_to_pf_mbox_cb_state[mod]); + + while (test_bit(HINIC_PPF_TO_PF_MBOX_CB_RUNNIG, + &func_to_func->ppf_to_pf_mbox_cb_state[mod])) + usleep_range(900, 1000); + + func_to_func->pf_recv_from_ppf_mbox_cb[mod] = NULL; +} + +int vf_to_pf_handler(void *handle, u16 vf_id, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct hinic_mbox_func_to_func *func_to_func = handle; + + sdk_warn(func_to_func->hwdev->dev_hdl, "Not support vf command yet/n"); + return -EFAULT; +} + +static int recv_vf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func, + struct hinic_recv_mbox *recv_mbox, + void *buf_out, u16 *out_size) +{ + hinic_vf_mbox_cb cb; + int ret; + + if (recv_mbox->mod >= HINIC_MOD_MAX) { + sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod = %d\n", + recv_mbox->mod); + return -EINVAL; + } + + set_bit(HINIC_VF_MBOX_CB_RUNNING, + &func_to_func->vf_mbox_cb_state[recv_mbox->mod]); + + cb = func_to_func->vf_mbox_cb[recv_mbox->mod]; + if (cb && test_bit(HINIC_VF_MBOX_CB_REG, + &func_to_func->vf_mbox_cb_state[recv_mbox->mod])) { + ret = cb(func_to_func->hwdev, recv_mbox->cmd, recv_mbox->mbox, + recv_mbox->mbox_len, buf_out, out_size); + } else { + sdk_warn(func_to_func->hwdev->dev_hdl, "VF mbox cb is not registered\n"); + ret = -EINVAL; + } + + clear_bit(HINIC_VF_MBOX_CB_RUNNING, + &func_to_func->vf_mbox_cb_state[recv_mbox->mod]); + + return ret; +} + +static int +recv_pf_from_ppf_handler(struct hinic_mbox_func_to_func *func_to_func, + struct hinic_recv_mbox *recv_mbox, + void *buf_out, u16 *out_size) +{ + hinic_pf_recv_from_ppf_mbox_cb cb; + enum hinic_mod_type mod = recv_mbox->mod; + int ret; + + if (mod >= HINIC_MOD_MAX) { + sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod = %d\n", + mod); + return -EINVAL; + } + + set_bit(HINIC_PPF_TO_PF_MBOX_CB_RUNNIG, + &func_to_func->ppf_to_pf_mbox_cb_state[mod]); + + cb = func_to_func->pf_recv_from_ppf_mbox_cb[mod]; + if (cb && test_bit(HINIC_PPF_TO_PF_MBOX_CB_REG, + &func_to_func->ppf_to_pf_mbox_cb_state[mod])) { + ret = cb(func_to_func->hwdev, recv_mbox->cmd, + recv_mbox->mbox, recv_mbox->mbox_len, + buf_out, out_size); + } else { + sdk_warn(func_to_func->hwdev->dev_hdl, "PF recvice ppf mailbox callback is not registered\n"); + ret = -EINVAL; + } + + clear_bit(HINIC_PPF_TO_PF_MBOX_CB_RUNNIG, + &func_to_func->ppf_to_pf_mbox_cb_state[mod]); + + return ret; +} + +static int recv_ppf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func, + struct hinic_recv_mbox *recv_mbox, + u8 pf_id, void *buf_out, u16 *out_size) +{ + hinic_ppf_mbox_cb cb; + u16 vf_id = 0; + int ret; + + if (recv_mbox->mod >= HINIC_MOD_MAX) { + sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod = %d\n", + recv_mbox->mod); + return -EINVAL; + } + + set_bit(HINIC_PPF_MBOX_CB_RUNNING, + &func_to_func->ppf_mbox_cb_state[recv_mbox->mod]); + + cb = func_to_func->ppf_mbox_cb[recv_mbox->mod]; + if (cb && test_bit(HINIC_PPF_MBOX_CB_REG, + &func_to_func->ppf_mbox_cb_state[recv_mbox->mod])) { + ret = cb(func_to_func->hwdev, pf_id, vf_id, recv_mbox->cmd, + recv_mbox->mbox, recv_mbox->mbox_len, + buf_out, out_size); + } else { + sdk_warn(func_to_func->hwdev->dev_hdl, "PPF mbox cb is not registered, mod = %d\n", + recv_mbox->mod); + ret = -EINVAL; + } + + clear_bit(HINIC_PPF_MBOX_CB_RUNNING, + &func_to_func->ppf_mbox_cb_state[recv_mbox->mod]); + + return ret; +} + +static int +recv_pf_from_vf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func, + struct hinic_recv_mbox *recv_mbox, + u16 src_func_idx, void *buf_out, + u16 *out_size) +{ + hinic_pf_mbox_cb cb; + u16 vf_id = 0; + int ret; + + if (recv_mbox->mod >= HINIC_MOD_MAX) { + sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod = %d\n", + recv_mbox->mod); + return -EINVAL; + } + + set_bit(HINIC_PF_MBOX_CB_RUNNING, + &func_to_func->pf_mbox_cb_state[recv_mbox->mod]); + + cb = func_to_func->pf_mbox_cb[recv_mbox->mod]; + if (cb && test_bit(HINIC_PF_MBOX_CB_REG, + &func_to_func->pf_mbox_cb_state[recv_mbox->mod])) { + vf_id = src_func_idx - + hinic_glb_pf_vf_offset(func_to_func->hwdev); + ret = cb(func_to_func->hwdev, vf_id, recv_mbox->cmd, + recv_mbox->mbox, recv_mbox->mbox_len, + buf_out, out_size); + } else { + sdk_warn(func_to_func->hwdev->dev_hdl, "PF mbox mod(0x%x) cb is not registered\n", + recv_mbox->mod); + ret = -EINVAL; + } + + clear_bit(HINIC_PF_MBOX_CB_RUNNING, + &func_to_func->pf_mbox_cb_state[recv_mbox->mod]); + + return ret; +} + +bool hinic_mbox_check_cmd_valid(struct hinic_hwdev *hwdev, + struct vf_cmd_check_handle *cmd_handle, + u16 vf_id, u8 cmd, void *buf_in, u16 in_size, + u8 size) +{ + u16 src_idx = vf_id + hinic_glb_pf_vf_offset(hwdev); + int i; + + for (i = 0; i < size; i++) { + if (cmd == cmd_handle[i].cmd) { + if (cmd_handle[i].check_cmd) + return cmd_handle[i].check_cmd(hwdev, src_idx, + buf_in, in_size); + else + return true; + } + } + + sdk_err(hwdev->dev_hdl, "Unsupported vf cmd %d\n", cmd); + + return false; +} + +static void recv_func_mbox_handler(struct hinic_mbox_func_to_func *func_to_func, + struct hinic_recv_mbox *recv_mbox, + u16 src_func_idx) +{ + struct hinic_hwdev *dev = func_to_func->hwdev; + struct mbox_msg_info msg_info = {0}; + u16 out_size = MBOX_MAX_BUF_SZ; + void *buf_out = recv_mbox->buf_out; + int err = 0; + + if (HINIC_IS_VF(dev)) { + err = recv_vf_mbox_handler(func_to_func, recv_mbox, buf_out, + &out_size); + } else { /* pf/ppf process */ + + if (IS_PF_OR_PPF_SRC(src_func_idx)) { + if (HINIC_IS_PPF(dev)) { + err = recv_ppf_mbox_handler(func_to_func, + recv_mbox, + (u8)src_func_idx, + buf_out, &out_size); + if (err) + goto out; + } else { + err = recv_pf_from_ppf_handler(func_to_func, + recv_mbox, + buf_out, + &out_size); + if (err) + goto out; + } + /* The source is neither PF nor PPF, so it is from VF */ + } else { + err = recv_pf_from_vf_mbox_handler(func_to_func, + recv_mbox, + src_func_idx, + buf_out, &out_size); + } + } + +out: + if (recv_mbox->ack_type == MBOX_ACK) { + msg_info.msg_id = recv_mbox->msg_info.msg_id; + if (err == HINIC_DEV_BUSY_ACTIVE_FW || + err == HINIC_MBOX_PF_BUSY_ACTIVE_FW) + msg_info.status = HINIC_MBOX_PF_BUSY_ACTIVE_FW; + else if (err == HINIC_MBOX_VF_CMD_ERROR) + msg_info.status = HINIC_MBOX_VF_CMD_ERROR; + else if (err) + msg_info.status = HINIC_MBOX_PF_SEND_ERR; + + /* if not data need to response, set out_size to 1 */ + if (!out_size || err) + out_size = MBOX_MSG_NO_DATA_LEN; + + send_mbox_to_func(func_to_func, recv_mbox->mod, recv_mbox->cmd, + buf_out, out_size, src_func_idx, + HINIC_HWIF_RESPONSE, MBOX_ACK, + &msg_info); + } + + kfree(recv_mbox->buf_out); + kfree(recv_mbox->mbox); + kfree(recv_mbox); +} + +static bool check_mbox_seq_id_and_seg_len(struct hinic_recv_mbox *recv_mbox, + u8 seq_id, u8 seg_len) +{ + if (seq_id > SEQ_ID_MAX_VAL || seg_len > MBOX_SEG_LEN) + return false; + else if (seq_id == SEQ_ID_MAX_VAL && seg_len > MBOX_LAST_SEG_MAX_LEN) + return false; + + if (seq_id == 0) { + recv_mbox->seq_id = seq_id; + } else { + if (seq_id != recv_mbox->seq_id + 1) + return false; + + recv_mbox->seq_id = seq_id; + } + + return true; +} + +static void resp_mbox_handler(struct hinic_mbox_func_to_func *func_to_func, + struct hinic_recv_mbox *recv_mbox) +{ + spin_lock(&func_to_func->mbox_lock); + if (recv_mbox->msg_info.msg_id == func_to_func->send_msg_id && + func_to_func->event_flag == EVENT_START) + complete(&recv_mbox->recv_done); + else + sdk_err(func_to_func->hwdev->dev_hdl, + "Mbox response timeout, current send msg id(0x%x), recv msg id(0x%x), status(0x%x)\n", + func_to_func->send_msg_id, recv_mbox->msg_info.msg_id, + recv_mbox->msg_info.status); + spin_unlock(&func_to_func->mbox_lock); +} + +static void recv_func_mbox_work_handler(struct work_struct *work) +{ + struct hinic_mbox_work *mbox_work = + container_of(work, struct hinic_mbox_work, work); + + recv_func_mbox_handler(mbox_work->func_to_func, mbox_work->recv_mbox, + mbox_work->src_func_idx); + + kfree(mbox_work); +} + +static void recv_mbox_handler(struct hinic_mbox_func_to_func *func_to_func, + void *header, struct hinic_recv_mbox *recv_mbox) +{ + u64 mbox_header = *((u64 *)header); + void *mbox_body = MBOX_BODY_FROM_HDR(header); + struct hinic_recv_mbox *rcv_mbox_temp = NULL; + u16 src_func_idx; + struct hinic_mbox_work *mbox_work; + int pos; + u8 seq_id, seg_len; + + seq_id = HINIC_MBOX_HEADER_GET(mbox_header, SEQID); + seg_len = HINIC_MBOX_HEADER_GET(mbox_header, SEG_LEN); + src_func_idx = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX); + + if (!check_mbox_seq_id_and_seg_len(recv_mbox, seq_id, seg_len)) { + sdk_err(func_to_func->hwdev->dev_hdl, + "Mailbox sequence and segment check fail, src func id: 0x%x, front id: 0x%x, current id: 0x%x, seg len: 0x%x\n", + src_func_idx, recv_mbox->seq_id, seq_id, seg_len); + recv_mbox->seq_id = SEQ_ID_MAX_VAL; + return; + } + + pos = seq_id * MBOX_SEG_LEN; + memcpy((u8 *)recv_mbox->mbox + pos, mbox_body, + HINIC_MBOX_HEADER_GET(mbox_header, SEG_LEN)); + + if (!HINIC_MBOX_HEADER_GET(mbox_header, LAST)) + return; + + recv_mbox->cmd = HINIC_MBOX_HEADER_GET(mbox_header, CMD); + recv_mbox->mod = HINIC_MBOX_HEADER_GET(mbox_header, MODULE); + recv_mbox->mbox_len = HINIC_MBOX_HEADER_GET(mbox_header, MSG_LEN); + recv_mbox->ack_type = HINIC_MBOX_HEADER_GET(mbox_header, NO_ACK); + recv_mbox->msg_info.msg_id = HINIC_MBOX_HEADER_GET(mbox_header, MSG_ID); + recv_mbox->msg_info.status = HINIC_MBOX_HEADER_GET(mbox_header, STATUS); + recv_mbox->seq_id = SEQ_ID_MAX_VAL; + + if (HINIC_MBOX_HEADER_GET(mbox_header, DIRECTION) == + HINIC_HWIF_RESPONSE) { + resp_mbox_handler(func_to_func, recv_mbox); + return; + } + + rcv_mbox_temp = kzalloc(sizeof(*rcv_mbox_temp), GFP_KERNEL); + if (!rcv_mbox_temp) { + sdk_err(func_to_func->hwdev->dev_hdl, "Allocate receive mbox memory failed.\n"); + return; + } + memcpy(rcv_mbox_temp, recv_mbox, sizeof(*rcv_mbox_temp)); + + rcv_mbox_temp->mbox = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); + if (!rcv_mbox_temp->mbox) { + sdk_err(func_to_func->hwdev->dev_hdl, "Allocate receive mbox message memory failed.\n"); + goto rcv_mbox_msg_err; + } + memcpy(rcv_mbox_temp->mbox, recv_mbox->mbox, MBOX_MAX_BUF_SZ); + + rcv_mbox_temp->buf_out = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); + if (!rcv_mbox_temp->buf_out) { + sdk_err(func_to_func->hwdev->dev_hdl, "Allocate receive mbox out buffer memory failed.\n"); + goto rcv_mbox_buf_err; + } + + mbox_work = kzalloc(sizeof(*mbox_work), GFP_KERNEL); + if (!mbox_work) { + sdk_err(func_to_func->hwdev->dev_hdl, "Allocate mbox work memory failed.\n"); + goto mbox_work_err; + } + + mbox_work->func_to_func = func_to_func; + mbox_work->recv_mbox = rcv_mbox_temp; + + mbox_work->src_func_idx = src_func_idx; + INIT_WORK(&mbox_work->work, recv_func_mbox_work_handler); + queue_work(func_to_func->workq, &mbox_work->work); + + return; + +mbox_work_err: + kfree(rcv_mbox_temp->buf_out); + +rcv_mbox_buf_err: + kfree(rcv_mbox_temp->mbox); + +rcv_mbox_msg_err: + kfree(rcv_mbox_temp); +} + +int set_vf_mbox_random_id(struct hinic_hwdev *hwdev, u16 func_id) +{ + struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; + struct hinic_set_random_id rand_info = {0}; + u16 out_size = sizeof(rand_info); + int ret; + + rand_info.version = HINIC_CMD_VER_FUNC_ID; + rand_info.func_idx = func_id; + rand_info.vf_in_pf = (u8)(func_id - hinic_glb_pf_vf_offset(hwdev)); + get_random_bytes(&rand_info.random_id, sizeof(u32)); + + func_to_func->vf_mbx_rand_id[func_id] = rand_info.random_id; + + ret = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM, + HINIC_MGMT_CMD_SET_VF_RANDOM_ID, + &rand_info, sizeof(rand_info), + &rand_info, &out_size, 0); + if ((rand_info.status != HINIC_MGMT_CMD_UNSUPPORTED && + rand_info.status) || !out_size || ret) { + sdk_err(hwdev->dev_hdl, "Failed to set vf random id, err: %d, status: 0x%x, out size: 0x%x\n", + ret, rand_info.status, out_size); + return -EINVAL; + } + + if (rand_info.status == HINIC_MGMT_CMD_UNSUPPORTED) + return rand_info.status; + + func_to_func->vf_mbx_old_rand_id[func_id] = + func_to_func->vf_mbx_rand_id[func_id]; + + return 0; +} + +static void update_random_id_work_handler(struct work_struct *work) +{ + struct hinic_mbox_work *mbox_work = + container_of(work, struct hinic_mbox_work, work); + struct hinic_mbox_func_to_func *func_to_func = mbox_work->func_to_func; + u16 src = mbox_work->src_func_idx; + int err; + + err = set_vf_mbox_random_id(func_to_func->hwdev, src); + if (err) + sdk_warn(func_to_func->hwdev->dev_hdl, "Update vf id(0x%x) random id fail\n", + mbox_work->src_func_idx); + + kfree(mbox_work); +} + +bool check_vf_mbox_random_id(struct hinic_mbox_func_to_func *func_to_func, + u8 *header) +{ + struct hinic_hwdev *hwdev = func_to_func->hwdev; + u64 mbox_header = *((u64 *)header); + struct hinic_mbox_work *mbox_work; + u32 random_id; + u16 offset, src = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX); + int vf_in_pf; + + if (IS_PF_OR_PPF_SRC(src) || !func_to_func->support_vf_random) + return true; + + if (!HINIC_IS_PPF(hwdev)) { + offset = hinic_glb_pf_vf_offset(hwdev); + vf_in_pf = src - offset; + + if (vf_in_pf < 1 || vf_in_pf > hinic_func_max_vf(hwdev)) { + sdk_warn(hwdev->dev_hdl, + "Receive vf id(0x%x) is invalid, vf id should be from 0x%x to 0x%x\n", + src, (offset + 1), + (hinic_func_max_vf(hwdev) + offset)); + return false; + } + } + + random_id = be32_to_cpu(*(u32 *)(header + MBOX_SEG_LEN + + MBOX_HEADER_SZ)); + + if (random_id == func_to_func->vf_mbx_rand_id[src] || + random_id == func_to_func->vf_mbx_old_rand_id[src]) + return true; + + sdk_warn(hwdev->dev_hdl, + "Receive func_id(0x%x) mailbox random id(0x%x) mismatch with pf reserve(0x%x)\n", + src, random_id, func_to_func->vf_mbx_rand_id[src]); + + mbox_work = kzalloc(sizeof(*mbox_work), GFP_KERNEL); + if (!mbox_work) { + sdk_err(func_to_func->hwdev->dev_hdl, "Allocate mbox work memory failed.\n"); + return false; + } + + mbox_work->func_to_func = func_to_func; + mbox_work->src_func_idx = src; + + INIT_WORK(&mbox_work->work, update_random_id_work_handler); + queue_work(func_to_func->workq, &mbox_work->work); + + return false; +} + +void hinic_mbox_func_aeqe_handler(void *handle, u8 *header, u8 size) +{ + struct hinic_mbox_func_to_func *func_to_func; + struct hinic_recv_mbox *recv_mbox; + u64 mbox_header = *((u64 *)header); + u64 src, dir; + + func_to_func = ((struct hinic_hwdev *)handle)->func_to_func; + + dir = HINIC_MBOX_HEADER_GET(mbox_header, DIRECTION); + src = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX); + + if (src >= HINIC_MAX_FUNCTIONS) { + sdk_err(func_to_func->hwdev->dev_hdl, + "Mailbox source function id: %u is invalid\n", + (u32)src); + return; + } + + if (!check_vf_mbox_random_id(func_to_func, header)) + return; + + recv_mbox = (dir == HINIC_HWIF_DIRECT_SEND) ? + &func_to_func->mbox_send[src] : + &func_to_func->mbox_resp[src]; + + recv_mbox_handler(func_to_func, (u64 *)header, recv_mbox); +} + +void hinic_mbox_self_aeqe_handler(void *handle, u8 *header, u8 size) +{ + struct hinic_mbox_func_to_func *func_to_func; + struct hinic_send_mbox *send_mbox; + + func_to_func = ((struct hinic_hwdev *)handle)->func_to_func; + send_mbox = &func_to_func->send_mbox; + + complete(&send_mbox->send_done); +} + +static void clear_mbox_status(struct hinic_send_mbox *mbox) +{ + *mbox->wb_status = 0; + + /* clear mailbox write back status */ + wmb(); +} + +static void mbox_copy_header(struct hinic_hwdev *hwdev, + struct hinic_send_mbox *mbox, u64 *header) +{ + u32 *data = (u32 *)header; + u32 i, idx_max = MBOX_HEADER_SZ / sizeof(u32); + + for (i = 0; i < idx_max; i++) + __raw_writel(*(data + i), mbox->data + i * sizeof(u32)); +} + +static void mbox_copy_send_data(struct hinic_hwdev *hwdev, + struct hinic_send_mbox *mbox, void *seg, + u16 seg_len) +{ + u32 *data = seg; + u32 data_len, chk_sz = sizeof(u32); + u32 i, idx_max; + u8 mbox_max_buf[MBOX_SEG_LEN] = {0}; + + /* The mbox message should be aligned in 4 bytes. */ + if (seg_len % chk_sz) { + memcpy(mbox_max_buf, seg, seg_len); + data = (u32 *)mbox_max_buf; + } + + data_len = seg_len; + idx_max = ALIGN(data_len, chk_sz) / chk_sz; + + for (i = 0; i < idx_max; i++) { + __raw_writel(*(data + i), + mbox->data + MBOX_HEADER_SZ + i * sizeof(u32)); + } +} + +static void write_mbox_msg_attr(struct hinic_mbox_func_to_func *func_to_func, + u16 dst_func, u16 dst_aeqn, u16 rsp_aeq, + u16 seg_len, int poll) +{ + u32 mbox_int, mbox_ctrl; + + mbox_int = HINIC_MBOX_INT_SET(dst_func, DST_FUNC) | + HINIC_MBOX_INT_SET(dst_aeqn, DST_AEQN) | + HINIC_MBOX_INT_SET(rsp_aeq, SRC_RESP_AEQN) | + HINIC_MBOX_INT_SET(NO_DMA_ATTRIBUTE_VAL, STAT_DMA) | + HINIC_MBOX_INT_SET(ALIGN(MBOX_SEG_LEN + MBOX_HEADER_SZ + + MBOX_INFO_SZ, MBOX_SEG_LEN_ALIGN) >> 2, + TX_SIZE) | + HINIC_MBOX_INT_SET(STRONG_ORDER, STAT_DMA_SO_RO) | + HINIC_MBOX_INT_SET(WRITE_BACK, WB_EN); + + hinic_hwif_write_reg(func_to_func->hwdev->hwif, + HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF, mbox_int); + + wmb(); /* writing the mbox int attributes */ + mbox_ctrl = HINIC_MBOX_CTRL_SET(TX_NOT_DONE, TX_STATUS); + + if (poll) + mbox_ctrl |= HINIC_MBOX_CTRL_SET(NOT_TRIGGER, TRIGGER_AEQE); + else + mbox_ctrl |= HINIC_MBOX_CTRL_SET(TRIGGER, TRIGGER_AEQE); + + hinic_hwif_write_reg(func_to_func->hwdev->hwif, + HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF, mbox_ctrl); +} + +void dump_mox_reg(struct hinic_hwdev *hwdev) +{ + u32 val; + + val = hinic_hwif_read_reg(hwdev->hwif, + HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF); + sdk_err(hwdev->dev_hdl, "Mailbox control reg: 0x%x\n", val); + val = hinic_hwif_read_reg(hwdev->hwif, + HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF); + sdk_err(hwdev->dev_hdl, "Mailbox interrupt offset: 0x%x\n", val); +} + +static u16 get_mbox_status(struct hinic_send_mbox *mbox) +{ + /* write back is 16B, but only use first 4B */ + u64 wb_val = be64_to_cpu(*mbox->wb_status); + + rmb(); /* verify reading before check */ + + return (u16)(wb_val & MBOX_WB_STATUS_ERRCODE_MASK); +} + +static u16 mbox_msg_ack_aeqn(struct hinic_hwdev *hwdev, + enum hinic_hwif_direction_type seq_dir) +{ + u8 num_aeqs = hwdev->hwif->attr.num_aeqs; + u16 dst_aeqn; + + if (num_aeqs >= HINIC_HW_MAX_AEQS) + dst_aeqn = HINIC_MBOX_RSP_AEQN; + else + dst_aeqn = 0; + + return dst_aeqn; +} + +static int mbox_retry_get_ack(struct hinic_mbox_func_to_func *func_to_func, + struct completion *done, u16 aeq_id) +{ + ulong timeo = msecs_to_jiffies(MBOX_MSG_RETRY_ACK_TIMEOUT); + int err; + + init_completion(done); + + err = hinic_reschedule_eq(func_to_func->hwdev, HINIC_AEQ, aeq_id); + if (err) + return err; + + if (!wait_for_completion_timeout(done, timeo)) + return -ETIMEDOUT; + + return 0; +} + +static int send_mbox_seg(struct hinic_mbox_func_to_func *func_to_func, + u64 header, u16 dst_func, void *seg, u16 seg_len, + int poll, void *msg_info) +{ + struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox; + struct hinic_hwdev *hwdev = func_to_func->hwdev; + u8 num_aeqs = hwdev->hwif->attr.num_aeqs; + u16 dst_aeqn, wb_status = 0, errcode, rsp_aeq; + u16 seq_dir = HINIC_MBOX_HEADER_GET(header, DIRECTION); + struct completion *done = &send_mbox->send_done; + ulong jif; + u32 cnt = 0; + + if (num_aeqs >= HINIC_HW_MAX_AEQS) + dst_aeqn = (seq_dir == HINIC_HWIF_DIRECT_SEND) ? + HINIC_MBOX_RECV_AEQN : HINIC_MBOX_RSP_AEQN; + else + dst_aeqn = 0; + + rsp_aeq = (dst_aeqn == 0) ? 0 : HINIC_MBOX_RSP_AEQN; + + if (!poll) + init_completion(done); + + clear_mbox_status(send_mbox); + + mbox_copy_header(hwdev, send_mbox, &header); + + mbox_copy_send_data(hwdev, send_mbox, seg, seg_len); + + write_mbox_msg_attr(func_to_func, dst_func, dst_aeqn, rsp_aeq, + seg_len, poll); + + wmb(); /* writing the mbox msg attributes */ + + if (poll) { + while (cnt < MBOX_MSG_POLLING_TIMEOUT) { + wb_status = get_mbox_status(send_mbox); + if (MBOX_STATUS_FINISHED(wb_status)) + break; + + usleep_range(900, 1000); + cnt++; + } + + if (cnt == MBOX_MSG_POLLING_TIMEOUT) { + sdk_err(hwdev->dev_hdl, "Send mailbox segment timeout, wb status: 0x%x\n", + wb_status); + dump_mox_reg(hwdev); + return -ETIMEDOUT; + } + } else { + jif = msecs_to_jiffies(HINIC_MBOX_COMP_TIME); + if (!wait_for_completion_timeout(done, jif) && + mbox_retry_get_ack(func_to_func, done, rsp_aeq)) { + sdk_err(hwdev->dev_hdl, "Send mailbox segment timeout\n"); + dump_mox_reg(hwdev); + hinic_dump_aeq_info(hwdev); + return -ETIMEDOUT; + } + + wb_status = get_mbox_status(send_mbox); + } + + if (!MBOX_STATUS_SUCCESS(wb_status)) { + sdk_err(hwdev->dev_hdl, "Send mailbox segment to function %d error, wb status: 0x%x\n", + dst_func, wb_status); + errcode = MBOX_STATUS_ERRCODE(wb_status); + return errcode ? errcode : -EFAULT; + } + + return 0; +} + +static int send_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func, + enum hinic_mod_type mod, u16 cmd, void *msg, + u16 msg_len, u16 dst_func, + enum hinic_hwif_direction_type direction, + enum hinic_mbox_ack_type ack_type, + struct mbox_msg_info *msg_info) +{ + struct hinic_hwdev *hwdev = func_to_func->hwdev; + int err = 0; + u32 seq_id = 0; + u16 seg_len = MBOX_SEG_LEN; + u16 left = msg_len; + u8 *msg_seg = (u8 *)msg; + u64 header = 0; + + down(&func_to_func->msg_send_sem); + + header = HINIC_MBOX_HEADER_SET(msg_len, MSG_LEN) | + HINIC_MBOX_HEADER_SET(mod, MODULE) | + HINIC_MBOX_HEADER_SET(seg_len, SEG_LEN) | + HINIC_MBOX_HEADER_SET(ack_type, NO_ACK) | + HINIC_MBOX_HEADER_SET(SEQ_ID_START_VAL, SEQID) | + HINIC_MBOX_HEADER_SET(NOT_LAST_SEG, LAST) | + HINIC_MBOX_HEADER_SET(direction, DIRECTION) | + HINIC_MBOX_HEADER_SET(cmd, CMD) | + /* The vf's offset to it's associated pf */ + HINIC_MBOX_HEADER_SET(msg_info->msg_id, MSG_ID) | + HINIC_MBOX_HEADER_SET(msg_info->status, STATUS) | + HINIC_MBOX_HEADER_SET(hinic_global_func_id_hw(hwdev), + SRC_GLB_FUNC_IDX); + + while (!(HINIC_MBOX_HEADER_GET(header, LAST))) { + if (left <= HINIC_MBOX_SEG_LEN) { + header &= ~MBOX_SEGLEN_MASK; + header |= HINIC_MBOX_HEADER_SET(left, SEG_LEN); + header |= HINIC_MBOX_HEADER_SET(LAST_SEG, LAST); + + seg_len = left; + } + + err = send_mbox_seg(func_to_func, header, dst_func, msg_seg, + seg_len, func_to_func->send_ack_mod, + msg_info); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to send mbox seg, seq_id=0x%llx\n", + HINIC_MBOX_HEADER_GET(header, SEQID)); + goto send_err; + } + + left -= HINIC_MBOX_SEG_LEN; + msg_seg += HINIC_MBOX_SEG_LEN; + + seq_id++; + header &= ~(HINIC_MBOX_HEADER_SET(HINIC_MBOX_HEADER_SEQID_MASK, + SEQID)); + header |= HINIC_MBOX_HEADER_SET(seq_id, SEQID); + } + +send_err: + up(&func_to_func->msg_send_sem); + + return err; +} + +static void set_mbox_to_func_event(struct hinic_mbox_func_to_func *func_to_func, + enum mbox_event_state event_flag) +{ + spin_lock(&func_to_func->mbox_lock); + func_to_func->event_flag = event_flag; + spin_unlock(&func_to_func->mbox_lock); +} + +int hinic_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func, + enum hinic_mod_type mod, u16 cmd, u16 dst_func, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout) +{ + /* use mbox_resp to hole data which responsed from other function */ + struct hinic_recv_mbox *mbox_for_resp; + struct mbox_msg_info msg_info = {0}; + ulong timeo; + int err; + + if (!func_to_func->hwdev->chip_present_flag) + return -EPERM; + + mbox_for_resp = &func_to_func->mbox_resp[dst_func]; + + down(&func_to_func->mbox_send_sem); + + init_completion(&mbox_for_resp->recv_done); + + msg_info.msg_id = MBOX_MSG_ID_INC(func_to_func); + + set_mbox_to_func_event(func_to_func, EVENT_START); + + err = send_mbox_to_func(func_to_func, mod, cmd, buf_in, in_size, + dst_func, HINIC_HWIF_DIRECT_SEND, MBOX_ACK, + &msg_info); + if (err) { + sdk_err(func_to_func->hwdev->dev_hdl, "Send mailbox mod %d cmd 0x%x failed, msg_id: %d\n", + mod, cmd, msg_info.msg_id); + set_mbox_to_func_event(func_to_func, EVENT_FAIL); + goto send_err; + } + + timeo = msecs_to_jiffies(timeout ? timeout : HINIC_MBOX_COMP_TIME); + if (!wait_for_completion_timeout(&mbox_for_resp->recv_done, timeo) && + mbox_retry_get_ack(func_to_func, &mbox_for_resp->recv_done, + mbox_msg_ack_aeqn(func_to_func->hwdev, + HINIC_HWIF_DIRECT_SEND))) { + set_mbox_to_func_event(func_to_func, EVENT_TIMEOUT); + sdk_err(func_to_func->hwdev->dev_hdl, + "Send mbox msg mod %d cmd 0x%x timeout, msg_id: %d\n", + mod, cmd, msg_info.msg_id); + hinic_dump_aeq_info(func_to_func->hwdev); + err = -ETIMEDOUT; + goto send_err; + } + + set_mbox_to_func_event(func_to_func, EVENT_END); + + if (mbox_for_resp->msg_info.status) { + err = mbox_for_resp->msg_info.status; + if (err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) + sdk_err(func_to_func->hwdev->dev_hdl, "Mbox response error(0x%x)\n", + mbox_for_resp->msg_info.status); + goto send_err; + } + + if (buf_out && out_size) { + if (*out_size < mbox_for_resp->mbox_len) { + sdk_err(func_to_func->hwdev->dev_hdl, + "Invalid response mbox message length: %d for mod %d cmd 0x%x, should less than: %d\n", + mbox_for_resp->mbox_len, mod, cmd, *out_size); + err = -EFAULT; + goto send_err; + } + + if (mbox_for_resp->mbox_len) + memcpy(buf_out, mbox_for_resp->mbox, + mbox_for_resp->mbox_len); + + *out_size = mbox_for_resp->mbox_len; + } + +send_err: + up(&func_to_func->mbox_send_sem); + + return err; +} + +static int mbox_func_params_valid(struct hinic_mbox_func_to_func *func_to_func, + void *buf_in, u16 in_size) +{ + if (!buf_in || !in_size) + return -EINVAL; + + if (in_size > HINIC_MBOX_DATA_SIZE) { + sdk_err(func_to_func->hwdev->dev_hdl, + "Mbox msg len(%d) exceed limit(%d)\n", + in_size, HINIC_MBOX_DATA_SIZE); + return -EINVAL; + } + + return 0; +} + +int hinic_mbox_to_host(struct hinic_hwdev *hwdev, u16 dest_host_ppf_id, + enum hinic_mod_type mod, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout) +{ + struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; + int err; + + err = mbox_func_params_valid(func_to_func, buf_in, in_size); + if (err) + return err; + + if (!HINIC_IS_PPF(hwdev)) { + sdk_err(hwdev->dev_hdl, "Params error, only ppf can send message to other host, func_type: %d\n", + hinic_func_type(hwdev)); + return -EINVAL; + } + + return hinic_mbox_to_func(func_to_func, mod, cmd, dest_host_ppf_id, + buf_in, in_size, buf_out, out_size, timeout); +} + +int hinic_mbox_to_ppf(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout) +{ + struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; + int err = mbox_func_params_valid(func_to_func, buf_in, in_size); + + if (err) + return err; + + if (HINIC_IS_VF(hwdev) || HINIC_IS_PPF(hwdev)) { + sdk_err(hwdev->dev_hdl, "Params error, func_type: %d\n", + hinic_func_type(hwdev)); + return -EINVAL; + } + + return hinic_mbox_to_func(func_to_func, mod, cmd, hinic_ppf_idx(hwdev), + buf_in, in_size, buf_out, out_size, timeout); +} + +int hinic_mbox_to_pf(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout) +{ + struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; + int err = mbox_func_params_valid(func_to_func, buf_in, in_size); + + if (err) + return err; + + if (!HINIC_IS_VF(hwdev)) { + sdk_err(hwdev->dev_hdl, "Params error, func_type: %d\n", + hinic_func_type(hwdev)); + return -EINVAL; + } + + err = hinic_func_own_get(hwdev); + if (err) + return err; + + /* port_to_port_idx - imply which PCIE interface PF is connected */ + err = hinic_mbox_to_func(func_to_func, mod, cmd, + hinic_pf_id_of_vf_hw(hwdev), buf_in, in_size, + buf_out, out_size, timeout); + hinic_func_own_free(hwdev); + return err; +} + +int hinic_mbox_to_func_no_ack(struct hinic_hwdev *hwdev, u16 func_idx, + enum hinic_mod_type mod, u8 cmd, void *buf_in, + u16 in_size) +{ + struct mbox_msg_info msg_info = {0}; + int err = mbox_func_params_valid(hwdev->func_to_func, buf_in, in_size); + + if (err) + return err; + + down(&hwdev->func_to_func->mbox_send_sem); + + err = send_mbox_to_func(hwdev->func_to_func, mod, cmd, buf_in, in_size, + func_idx, HINIC_HWIF_DIRECT_SEND, MBOX_NO_ACK, + &msg_info); + if (err) + sdk_err(hwdev->dev_hdl, "Send mailbox no ack failed\n"); + + up(&hwdev->func_to_func->mbox_send_sem); + + return err; +} + +int hinic_mbox_to_pf_no_ack(struct hinic_hwdev *hwdev, enum hinic_mod_type mod, + u8 cmd, void *buf_in, u16 in_size) +{ + int err; + + err = hinic_func_own_get(hwdev); + if (err) + return err; + + err = hinic_mbox_to_func_no_ack(hwdev, hinic_pf_id_of_vf_hw(hwdev), + mod, cmd, buf_in, in_size); + hinic_func_own_free(hwdev); + return err; +} + +int __hinic_mbox_to_vf(void *hwdev, + enum hinic_mod_type mod, u16 vf_id, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout) +{ + struct hinic_mbox_func_to_func *func_to_func; + int err; + u16 dst_func_idx; + + if (!hwdev) + return -EINVAL; + + func_to_func = ((struct hinic_hwdev *)hwdev)->func_to_func; + err = mbox_func_params_valid(func_to_func, buf_in, in_size); + if (err) + return err; + + if (HINIC_IS_VF((struct hinic_hwdev *)hwdev)) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, "Params error, func_type: %d\n", + hinic_func_type(hwdev)); + return -EINVAL; + } + + if (!vf_id) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "VF id(%d) error!\n", vf_id); + return -EINVAL; + } + + /* vf_offset_to_pf + vf_id is the vf's global function id of vf in + * this pf + */ + dst_func_idx = hinic_glb_pf_vf_offset(hwdev) + vf_id; + + return hinic_mbox_to_func(func_to_func, mod, cmd, dst_func_idx, buf_in, + in_size, buf_out, out_size, timeout); +} + +int hinic_mbox_ppf_to_vf(void *hwdev, enum hinic_mod_type mod, u16 func_id, + u8 cmd, void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout) +{ + struct hinic_mbox_func_to_func *func_to_func; + int err; + + if (!hwdev) + return -EINVAL; + + func_to_func = ((struct hinic_hwdev *)hwdev)->func_to_func; + err = mbox_func_params_valid(func_to_func, buf_in, in_size); + if (err) + return err; + + if (HINIC_IS_VF((struct hinic_hwdev *)hwdev)) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, "Params error, func_type: %d\n", + hinic_func_type(hwdev)); + return -EINVAL; + } + + return hinic_mbox_to_func(func_to_func, mod, cmd, func_id, buf_in, + in_size, buf_out, out_size, timeout); +} +EXPORT_SYMBOL(hinic_mbox_ppf_to_vf); + +int hinic_mbox_ppf_to_pf(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, u16 dst_pf_id, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout) +{ + struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; + int err; + + err = mbox_func_params_valid(func_to_func, buf_in, in_size); + if (err) + return err; + + if (!HINIC_IS_PPF(hwdev)) { + sdk_err(hwdev->dev_hdl, "Params error, func_type: %d\n", + hinic_func_type(hwdev)); + return -EINVAL; + } + + if (hinic_ppf_idx(hwdev) == dst_pf_id) { + sdk_err(hwdev->dev_hdl, + "Params error, dst_pf_id(0x%x) is ppf\n", dst_pf_id); + return -EINVAL; + } + + return hinic_mbox_to_func(func_to_func, mod, cmd, dst_pf_id, buf_in, + in_size, buf_out, out_size, timeout); +} + +static int init_mbox_info(struct hinic_recv_mbox *mbox_info) +{ + int err; + + mbox_info->seq_id = SEQ_ID_MAX_VAL; + + mbox_info->mbox = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); + if (!mbox_info->mbox) + return -ENOMEM; + + mbox_info->buf_out = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); + if (!mbox_info->buf_out) { + err = -ENOMEM; + goto alloc_buf_out_err; + } + + return 0; + +alloc_buf_out_err: + kfree(mbox_info->mbox); + + return err; +} + +static void clean_mbox_info(struct hinic_recv_mbox *mbox_info) +{ + kfree(mbox_info->buf_out); + kfree(mbox_info->mbox); +} + +static int alloc_mbox_info(struct hinic_recv_mbox *mbox_info) +{ + u16 func_idx, i; + int err; + + for (func_idx = 0; func_idx < HINIC_MAX_FUNCTIONS; func_idx++) { + err = init_mbox_info(&mbox_info[func_idx]); + if (err) { + pr_err("Failed to init mbox info\n"); + goto init_mbox_info_err; + } + } + + return 0; + +init_mbox_info_err: + for (i = 0; i < func_idx; i++) + clean_mbox_info(&mbox_info[i]); + + return err; +} + +static void free_mbox_info(struct hinic_recv_mbox *mbox_info) +{ + u16 func_idx; + + for (func_idx = 0; func_idx < HINIC_MAX_FUNCTIONS; func_idx++) + clean_mbox_info(&mbox_info[func_idx]); +} + +static void prepare_send_mbox(struct hinic_mbox_func_to_func *func_to_func) +{ + struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox; + + send_mbox->data = MBOX_AREA(func_to_func->hwdev->hwif); +} + +static int alloc_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func) +{ + struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox; + struct hinic_hwdev *hwdev = func_to_func->hwdev; + u32 addr_h, addr_l; + + send_mbox->wb_vaddr = dma_zalloc_coherent(hwdev->dev_hdl, + MBOX_WB_STATUS_LEN, + &send_mbox->wb_paddr, + GFP_KERNEL); + if (!send_mbox->wb_vaddr) + return -ENOMEM; + + send_mbox->wb_status = send_mbox->wb_vaddr; + + addr_h = upper_32_bits(send_mbox->wb_paddr); + addr_l = lower_32_bits(send_mbox->wb_paddr); + + hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF, + addr_h); + hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF, + addr_l); + + return 0; +} + +static void free_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func) +{ + struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox; + struct hinic_hwdev *hwdev = func_to_func->hwdev; + + hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF, + 0); + hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF, + 0); + + dma_free_coherent(hwdev->dev_hdl, MBOX_WB_STATUS_LEN, + send_mbox->wb_vaddr, + send_mbox->wb_paddr); +} + +int hinic_vf_mbox_random_id_init(struct hinic_hwdev *hwdev) +{ + u8 vf_in_pf; + int err = 0; + + if (hinic_func_type(hwdev) == TYPE_VF) + return 0; + + for (vf_in_pf = 1; vf_in_pf <= hinic_func_max_vf(hwdev); vf_in_pf++) { + err = set_vf_mbox_random_id(hwdev, + hinic_glb_pf_vf_offset(hwdev) + + vf_in_pf); + if (err) + break; + } + + if (err == HINIC_MGMT_CMD_UNSUPPORTED) { + hwdev->func_to_func->support_vf_random = false; + err = 0; + sdk_warn(hwdev->dev_hdl, "Mgmt unsupport set vf random id\n"); + } else if (!err) { + hwdev->func_to_func->support_vf_random = true; + sdk_info(hwdev->dev_hdl, "PF Set vf random id success\n"); + } + + return err; +} + +void hinic_set_mbox_seg_ack_mod(struct hinic_hwdev *hwdev, + enum hinic_mbox_send_mod mod) +{ + if (!hwdev || !hwdev->func_to_func) + return; + + hwdev->func_to_func->send_ack_mod = mod; +} + +int hinic_func_to_func_init(struct hinic_hwdev *hwdev) +{ + struct hinic_mbox_func_to_func *func_to_func; + struct card_node *chip_node; + int err; + + func_to_func = kzalloc(sizeof(*func_to_func), GFP_KERNEL); + if (!func_to_func) + return -ENOMEM; + + hwdev->func_to_func = func_to_func; + func_to_func->hwdev = hwdev; + chip_node = hwdev->chip_node; + func_to_func->vf_mbx_rand_id = chip_node->vf_mbx_rand_id; + func_to_func->vf_mbx_old_rand_id = chip_node->vf_mbx_old_rand_id; + sema_init(&func_to_func->mbox_send_sem, 1); + sema_init(&func_to_func->msg_send_sem, 1); + spin_lock_init(&func_to_func->mbox_lock); + func_to_func->workq = create_singlethread_workqueue(HINIC_MBOX_WQ_NAME); + if (!func_to_func->workq) { + sdk_err(hwdev->dev_hdl, "Failed to initialize MBOX workqueue\n"); + err = -ENOMEM; + goto create_mbox_workq_err; + } + + err = alloc_mbox_info(func_to_func->mbox_send); + if (err) { + sdk_err(hwdev->dev_hdl, "Alloc mem for mbox_active fail\n"); + goto alloc_mbox_for_send_err; + } + + err = alloc_mbox_info(func_to_func->mbox_resp); + if (err) { + sdk_err(hwdev->dev_hdl, "Alloc mem for mbox_passive fail\n"); + goto alloc_mbox_for_resp_err; + } + + err = alloc_mbox_wb_status(func_to_func); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to alloc mbox write back status\n"); + goto alloc_wb_status_err; + } + + prepare_send_mbox(func_to_func); + + func_to_func->send_ack_mod = HINIC_MBOX_SEND_MSG_POLL; + + return 0; + +alloc_wb_status_err: + free_mbox_info(func_to_func->mbox_resp); + +alloc_mbox_for_resp_err: + free_mbox_info(func_to_func->mbox_send); + +alloc_mbox_for_send_err: + destroy_workqueue(func_to_func->workq); + +create_mbox_workq_err: + kfree(func_to_func); + + return err; +} + +void hinic_func_to_func_free(struct hinic_hwdev *hwdev) +{ + struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; + + /* destroy workqueue before free related mbox resources in case of + * illegal resource access + */ + destroy_workqueue(func_to_func->workq); + + free_mbox_wb_status(func_to_func); + free_mbox_info(func_to_func->mbox_resp); + free_mbox_info(func_to_func->mbox_send); + + kfree(func_to_func); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_mbox.h b/drivers/net/ethernet/huawei/hinic/hinic_mbox.h new file mode 100644 index 0000000000000000000000000000000000000000..a03a5b8113f269369d2a9924899ab1c71b07d0f9 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_mbox.h @@ -0,0 +1,241 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_MBOX_H_ +#define HINIC_MBOX_H_ + +#define HINIC_MBOX_PF_SEND_ERR 0x1 +#define HINIC_MBOX_PF_BUSY_ACTIVE_FW 0x2 +#define HINIC_MBOX_VF_CMD_ERROR 0x3 + +#define HINIC_MAX_FUNCTIONS 512 +#define HINIC_MAX_PF_FUNCS 16 + +#define HINIC_MBOX_WQ_NAME "hinic_mbox" + +enum hinic_mbox_seg_errcode { + MBOX_ERRCODE_NO_ERRORS = 0, + /* VF send the mailbox data to the wrong destination functions */ + MBOX_ERRCODE_VF_TO_WRONG_FUNC = 0x100, + /* PPF send the mailbox data to the wrong destination functions */ + MBOX_ERRCODE_PPF_TO_WRONG_FUNC = 0x200, + /* PF send the mailbox data to the wrong destination functions */ + MBOX_ERRCODE_PF_TO_WRONG_FUNC = 0x300, + /* The mailbox data size is set to all zero */ + MBOX_ERRCODE_ZERO_DATA_SIZE = 0x400, + /* The sender function attribute has not been learned by CPI hardware */ + MBOX_ERRCODE_UNKNOWN_SRC_FUNC = 0x500, + /* The receiver function attr has not been learned by CPI hardware */ + MBOX_ERRCODE_UNKNOWN_DES_FUNC = 0x600, +}; + +enum hinic_mbox_ack_type { + MBOX_ACK, + MBOX_NO_ACK, +}; + +struct mbox_msg_info { + u8 msg_id; + u8 status; /*can only use 6 bit*/ +}; + +struct hinic_recv_mbox { + struct completion recv_done; + void *mbox; + u8 cmd; + enum hinic_mod_type mod; + u16 mbox_len; + void *buf_out; + enum hinic_mbox_ack_type ack_type; + struct mbox_msg_info msg_info; + u8 seq_id; +}; + +struct hinic_send_mbox { + struct completion send_done; + u8 *data; + + u64 *wb_status; /* write back status */ + void *wb_vaddr; + dma_addr_t wb_paddr; +}; + +typedef int (*hinic_vf_mbox_cb)(void *handle, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size); +typedef int (*hinic_pf_mbox_cb)(void *handle, u16 vf_id, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size); +typedef int (*hinic_ppf_mbox_cb)(void *handle, u16 pf_idx, u16 vf_id, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size); +typedef int (*hinic_pf_recv_from_ppf_mbox_cb)(void *handle, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +enum mbox_event_state { + EVENT_START = 0, + EVENT_FAIL, + EVENT_TIMEOUT, + EVENT_END, +}; + +enum hinic_mbox_cb_state { + HINIC_VF_MBOX_CB_REG = 0, + HINIC_VF_MBOX_CB_RUNNING, + HINIC_PF_MBOX_CB_REG, + HINIC_PF_MBOX_CB_RUNNING, + HINIC_PPF_MBOX_CB_REG, + HINIC_PPF_MBOX_CB_RUNNING, + HINIC_PPF_TO_PF_MBOX_CB_REG, + HINIC_PPF_TO_PF_MBOX_CB_RUNNIG, +}; + +enum hinic_mbox_send_mod { + HINIC_MBOX_SEND_MSG_INT, + HINIC_MBOX_SEND_MSG_POLL, +}; + +struct hinic_mbox_func_to_func { + struct hinic_hwdev *hwdev; + + struct semaphore mbox_send_sem; + struct semaphore msg_send_sem; + struct hinic_send_mbox send_mbox; + + struct workqueue_struct *workq; + + struct hinic_recv_mbox mbox_resp[HINIC_MAX_FUNCTIONS]; + struct hinic_recv_mbox mbox_send[HINIC_MAX_FUNCTIONS]; + + hinic_vf_mbox_cb vf_mbox_cb[HINIC_MOD_MAX]; + hinic_pf_mbox_cb pf_mbox_cb[HINIC_MOD_MAX]; + hinic_ppf_mbox_cb ppf_mbox_cb[HINIC_MOD_MAX]; + hinic_pf_recv_from_ppf_mbox_cb pf_recv_from_ppf_mbox_cb[HINIC_MOD_MAX]; + unsigned long ppf_to_pf_mbox_cb_state[HINIC_MOD_MAX]; + unsigned long ppf_mbox_cb_state[HINIC_MOD_MAX]; + unsigned long pf_mbox_cb_state[HINIC_MOD_MAX]; + unsigned long vf_mbox_cb_state[HINIC_MOD_MAX]; + + u8 send_msg_id; + enum mbox_event_state event_flag; + /* lock for mbox event flag */ + spinlock_t mbox_lock; + + u32 *vf_mbx_old_rand_id; + u32 *vf_mbx_rand_id; + bool support_vf_random; + enum hinic_mbox_send_mod send_ack_mod; +}; + +struct hinic_mbox_work { + struct work_struct work; + u16 src_func_idx; + struct hinic_mbox_func_to_func *func_to_func; + struct hinic_recv_mbox *recv_mbox; +}; + +struct vf_cmd_check_handle { + u8 cmd; + bool (*check_cmd)(struct hinic_hwdev *hwdev, u16 src_func_idx, + void *buf_in, u16 in_size); +}; + +int hinic_register_ppf_mbox_cb(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, + hinic_ppf_mbox_cb callback); + +int hinic_register_pf_mbox_cb(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, + hinic_pf_mbox_cb callback); + +int hinic_register_vf_mbox_cb(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, + hinic_vf_mbox_cb callback); + +int hinic_register_ppf_to_pf_mbox_cb(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, + hinic_pf_recv_from_ppf_mbox_cb callback); + +void hinic_unregister_ppf_mbox_cb(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod); + +void hinic_unregister_pf_mbox_cb(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod); + +void hinic_unregister_vf_mbox_cb(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod); + +void hinic_unregister_ppf_to_pf_mbox_cb(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod); + +void hinic_mbox_func_aeqe_handler(void *handle, u8 *header, u8 size); + +void hinic_mbox_self_aeqe_handler(void *handle, u8 *header, u8 size); + +int hinic_vf_mbox_random_id_init(struct hinic_hwdev *hwdev); + +bool hinic_mbox_check_func_id_8B(struct hinic_hwdev *hwdev, u16 func_idx, + void *buf_in, u16 in_size); + +bool hinic_mbox_check_func_id_10B(struct hinic_hwdev *hwdev, u16 func_idx, + void *buf_in, u16 in_size); + +bool hinic_mbox_check_cmd_valid(struct hinic_hwdev *hwdev, + struct vf_cmd_check_handle *cmd_handle, + u16 vf_id, u8 cmd, void *buf_in, u16 in_size, + u8 size); + +int hinic_func_to_func_init(struct hinic_hwdev *hwdev); + +void hinic_func_to_func_free(struct hinic_hwdev *hwdev); + +int hinic_mbox_to_host(struct hinic_hwdev *hwdev, u16 dest_host_ppf_id, + enum hinic_mod_type mod, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout); + +int hinic_mbox_to_ppf(struct hinic_hwdev *hwdev, enum hinic_mod_type mod, + u8 cmd, void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout); + +int hinic_mbox_to_pf(struct hinic_hwdev *hwdev, enum hinic_mod_type mod, + u8 cmd, void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout); + +int hinic_mbox_to_func_no_ack(struct hinic_hwdev *hwdev, u16 func_idx, + enum hinic_mod_type mod, u8 cmd, void *buf_in, + u16 in_size); + +int hinic_mbox_to_pf_no_ack(struct hinic_hwdev *hwdev, enum hinic_mod_type mod, + u8 cmd, void *buf_in, u16 in_size); + +int hinic_mbox_ppf_to_pf(struct hinic_hwdev *hwdev, enum hinic_mod_type mod, + u16 dst_pf_id, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout); +int hinic_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func, + enum hinic_mod_type mod, u16 cmd, u16 dst_func, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout); + +int __hinic_mbox_to_vf(void *hwdev, + enum hinic_mod_type mod, u16 vf_id, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout); + +int vf_to_pf_handler(void *handle, u16 vf_id, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size); + +void hinic_set_mbox_seg_ack_mod(struct hinic_hwdev *hwdev, + enum hinic_mbox_send_mod mod); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_mgmt.c new file mode 100644 index 0000000000000000000000000000000000000000..2a74fa00b308011c8d81f0a52d213b2ae51869d2 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_mgmt.c @@ -0,0 +1,1449 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hwdev.h" + +#include "hinic_hwif.h" +#include "hinic_api_cmd.h" +#include "hinic_mgmt.h" +#include "hinic_nic_cfg.h" +#include "hinic_mgmt_interface.h" +#include "hinic_eqs.h" + +#define BUF_OUT_DEFAULT_SIZE 1 +#define SEGMENT_LEN 48 + +#define MGMT_MSG_MAX_SEQ_ID (ALIGN(HINIC_MSG_TO_MGMT_MAX_LEN, \ + SEGMENT_LEN) / SEGMENT_LEN) + +#define MAX_PF_MGMT_BUF_SIZE 2048UL +#define MGMT_MSG_LAST_SEG_MAX_LEN (MAX_PF_MGMT_BUF_SIZE - \ + SEGMENT_LEN * MGMT_MSG_MAX_SEQ_ID) + +#define MGMT_MSG_SIZE_MIN 20 +#define MGMT_MSG_SIZE_STEP 16 +#define MGMT_MSG_RSVD_FOR_DEV 8 + +#define MGMT_MSG_TIMEOUT 5000 /* millisecond */ + +#define SYNC_MSG_ID_MASK 0x1FF +#define ASYNC_MSG_ID_MASK 0x1FF +#define ASYNC_MSG_FLAG 0x200 + +#define MSG_NO_RESP 0xFFFF + +#define MAX_MSG_SZ 2016 + +#define MAX_CMD_BUF_SIZE 2048ULL + +#define MSG_SZ_IS_VALID(in_size) ((in_size) <= MAX_MSG_SZ) + +#define SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id) + +#define SYNC_MSG_ID_INC(pf_to_mgmt) (SYNC_MSG_ID(pf_to_mgmt) = \ + (SYNC_MSG_ID(pf_to_mgmt) + 1) & SYNC_MSG_ID_MASK) + +#define ASYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->async_msg_id) + +#define ASYNC_MSG_ID_INC(pf_to_mgmt) (ASYNC_MSG_ID(pf_to_mgmt) = \ + ((ASYNC_MSG_ID(pf_to_mgmt) + 1) & ASYNC_MSG_ID_MASK) \ + | ASYNC_MSG_FLAG) + +static void pf_to_mgmt_send_event_set(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, + int event_flag) +{ + spin_lock_bh(&pf_to_mgmt->sync_event_lock); + pf_to_mgmt->event_flag = event_flag; + spin_unlock_bh(&pf_to_mgmt->sync_event_lock); +} + +/** + * hinic_register_mgmt_msg_cb - register sync msg handler for a module + * @hwdev: the pointer to hw device + * @mod: module in the chip that this handler will handle its sync messages + * @pri_handle: pri handle function + * @callback: the handler for a sync message that will handle messages + * Return: 0 - success, negative - failure + */ +int hinic_register_mgmt_msg_cb(void *hwdev, enum hinic_mod_type mod, + void *pri_handle, hinic_mgmt_msg_cb callback) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt; + + if (mod >= HINIC_MOD_HW_MAX || !hwdev) + return -EFAULT; + + pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt; + if (!pf_to_mgmt) + return -EINVAL; + + pf_to_mgmt->recv_mgmt_msg_cb[mod] = callback; + pf_to_mgmt->recv_mgmt_msg_data[mod] = pri_handle; + + set_bit(HINIC_MGMT_MSG_CB_REG, &pf_to_mgmt->mgmt_msg_cb_state[mod]); + + return 0; +} +EXPORT_SYMBOL(hinic_register_mgmt_msg_cb); + +/** + * hinic_unregister_mgmt_msg_cb - unregister sync msg handler for a module + * @hwdev: the pointer to hw device + * @mod: module in the chip that this handler will handle its sync messages + */ +void hinic_unregister_mgmt_msg_cb(void *hwdev, enum hinic_mod_type mod) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt; + + if (!hwdev || mod >= HINIC_MOD_HW_MAX) + return; + + pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt; + if (!pf_to_mgmt) + return; + + clear_bit(HINIC_MGMT_MSG_CB_REG, &pf_to_mgmt->mgmt_msg_cb_state[mod]); + + while (test_bit(HINIC_MGMT_MSG_CB_RUNNING, + &pf_to_mgmt->mgmt_msg_cb_state[mod])) + usleep_range(900, 1000); + + pf_to_mgmt->recv_mgmt_msg_cb[mod] = NULL; + pf_to_mgmt->recv_mgmt_msg_data[mod] = NULL; +} +EXPORT_SYMBOL(hinic_unregister_mgmt_msg_cb); + +void hinic_comm_recv_mgmt_self_cmd_reg(void *hwdev, u8 cmd, + comm_up_self_msg_proc proc) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt; + u8 cmd_idx; + + if (!hwdev || !proc) + return; + + pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt; + if (!pf_to_mgmt) + return; + + cmd_idx = pf_to_mgmt->proc.cmd_num; + if (cmd_idx >= HINIC_COMM_SELF_CMD_MAX) { + sdk_err(pf_to_mgmt->hwdev->dev_hdl, + "Register recv up process failed(cmd=0x%x)\r\n", cmd); + return; + } + + pf_to_mgmt->proc.info[cmd_idx].cmd = cmd; + pf_to_mgmt->proc.info[cmd_idx].proc = proc; + + pf_to_mgmt->proc.cmd_num++; +} + +void hinic_comm_recv_up_self_cmd_unreg(void *hwdev, u8 cmd) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt; + u8 cmd_idx; + + if (!hwdev) + return; + + pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt; + if (!pf_to_mgmt) + return; + + cmd_idx = pf_to_mgmt->proc.cmd_num; + if (cmd_idx >= HINIC_COMM_SELF_CMD_MAX) { + sdk_err(pf_to_mgmt->hwdev->dev_hdl, + "Unregister recv up process failed(cmd=0x%x)\r\n", cmd); + return; + } + + for (cmd_idx = 0; cmd_idx < HINIC_COMM_SELF_CMD_MAX; cmd_idx++) { + if (cmd == pf_to_mgmt->proc.info[cmd_idx].cmd) { + pf_to_mgmt->proc.info[cmd_idx].cmd = 0; + pf_to_mgmt->proc.info[cmd_idx].proc = NULL; + pf_to_mgmt->proc.cmd_num--; + } + } +} + +/** + * mgmt_msg_len - calculate the total message length + * @msg_data_len: the length of the message data + * Return: the total message length + */ +static u16 mgmt_msg_len(u16 msg_data_len) +{ + /* u64 - the size of the header */ + u16 msg_size; + + msg_size = (u16)(MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) + msg_data_len); + + if (msg_size > MGMT_MSG_SIZE_MIN) + msg_size = MGMT_MSG_SIZE_MIN + + ALIGN((msg_size - MGMT_MSG_SIZE_MIN), + MGMT_MSG_SIZE_STEP); + else + msg_size = MGMT_MSG_SIZE_MIN; + + return msg_size; +} + +/** + * prepare_header - prepare the header of the message + * @pf_to_mgmt: PF to MGMT channel + * @header: pointer of the header to prepare + * @msg_len: the length of the message + * @mod: module in the chip that will get the message + * @ack_type: message ack type + * @direction: the direction of the original message + * @cmd: cmd type + * @msg_id: message id + */ +static void prepare_header(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, + u64 *header, u16 msg_len, enum hinic_mod_type mod, + enum hinic_msg_ack_type ack_type, + enum hinic_msg_direction_type direction, + enum hinic_mgmt_cmd cmd, u32 msg_id) +{ + struct hinic_hwif *hwif = pf_to_mgmt->hwdev->hwif; + + *header = HINIC_MSG_HEADER_SET(msg_len, MSG_LEN) | + HINIC_MSG_HEADER_SET(mod, MODULE) | + HINIC_MSG_HEADER_SET(msg_len, SEG_LEN) | + HINIC_MSG_HEADER_SET(ack_type, NO_ACK) | + HINIC_MSG_HEADER_SET(0, ASYNC_MGMT_TO_PF) | + HINIC_MSG_HEADER_SET(0, SEQID) | + HINIC_MSG_HEADER_SET(LAST_SEGMENT, LAST) | + HINIC_MSG_HEADER_SET(direction, DIRECTION) | + HINIC_MSG_HEADER_SET(cmd, CMD) | + HINIC_MSG_HEADER_SET(HINIC_PCI_INTF_IDX(hwif), PCI_INTF_IDX) | + HINIC_MSG_HEADER_SET(hwif->attr.port_to_port_idx, P2P_IDX) | + HINIC_MSG_HEADER_SET(msg_id, MSG_ID); +} + +static void clp_prepare_header(struct hinic_hwdev *hwdev, + u64 *header, u16 msg_len, + enum hinic_mod_type mod, + enum hinic_msg_ack_type ack_type, + enum hinic_msg_direction_type direction, + enum hinic_mgmt_cmd cmd, u32 msg_id) +{ + struct hinic_hwif *hwif = hwdev->hwif; + + *header = HINIC_MSG_HEADER_SET(msg_len, MSG_LEN) | + HINIC_MSG_HEADER_SET(mod, MODULE) | + HINIC_MSG_HEADER_SET(msg_len, SEG_LEN) | + HINIC_MSG_HEADER_SET(ack_type, NO_ACK) | + HINIC_MSG_HEADER_SET(0, ASYNC_MGMT_TO_PF) | + HINIC_MSG_HEADER_SET(0, SEQID) | + HINIC_MSG_HEADER_SET(LAST_SEGMENT, LAST) | + HINIC_MSG_HEADER_SET(direction, DIRECTION) | + HINIC_MSG_HEADER_SET(cmd, CMD) | + HINIC_MSG_HEADER_SET(HINIC_PCI_INTF_IDX(hwif), PCI_INTF_IDX) | + HINIC_MSG_HEADER_SET(hwif->attr.port_to_port_idx, P2P_IDX) | + HINIC_MSG_HEADER_SET(msg_id, MSG_ID); +} + +/** + * prepare_mgmt_cmd - prepare the mgmt command + * @mgmt_cmd: pointer to the command to prepare + * @header: pointer of the header to prepare + * @msg: the data of the message + * @msg_len: the length of the message + */ +static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, const void *msg, + int msg_len) +{ + memset(mgmt_cmd, 0, MGMT_MSG_RSVD_FOR_DEV); + + mgmt_cmd += MGMT_MSG_RSVD_FOR_DEV; + memcpy(mgmt_cmd, header, sizeof(*header)); + + mgmt_cmd += sizeof(*header); + memcpy(mgmt_cmd, msg, msg_len); +} + +/** + * send_msg_to_mgmt_async - send async message + * @pf_to_mgmt: PF to MGMT channel + * @mod: module in the chip that will get the message + * @cmd: command of the message + * @msg: the data of the message + * @msg_len: the length of the message + * @direction: the direction of the original message + * @resp_msg_id: msg id to response for + * Return: 0 - success, negative - failure + */ +static int send_msg_to_mgmt_async(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, + enum hinic_mod_type mod, u8 cmd, + void *msg, u16 msg_len, + enum hinic_msg_direction_type direction, + u16 resp_msg_id) +{ + void *mgmt_cmd = pf_to_mgmt->async_msg_buf; + struct hinic_api_cmd_chain *chain; + u64 header; + u16 cmd_size = mgmt_msg_len(msg_len); + + if (!hinic_get_chip_present_flag(pf_to_mgmt->hwdev)) + return -EFAULT; + + if (cmd_size > MAX_MSG_SZ) + return -EINVAL; + + if (direction == HINIC_MSG_RESPONSE) + prepare_header(pf_to_mgmt, &header, msg_len, mod, HINIC_MSG_ACK, + direction, cmd, resp_msg_id); + else + prepare_header(pf_to_mgmt, &header, msg_len, mod, HINIC_MSG_ACK, + direction, cmd, ASYNC_MSG_ID(pf_to_mgmt)); + + prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len); + + chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU]; + + return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT_HOST, mgmt_cmd, + cmd_size); +} + +int hinic_pf_to_mgmt_async(void *hwdev, enum hinic_mod_type mod, + u8 cmd, void *buf_in, u16 in_size) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt; + void *dev = ((struct hinic_hwdev *)hwdev)->dev_hdl; + int err; + + pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt; + + /* Lock the async_msg_buf */ + spin_lock_bh(&pf_to_mgmt->async_msg_lock); + ASYNC_MSG_ID_INC(pf_to_mgmt); + + err = send_msg_to_mgmt_async(pf_to_mgmt, mod, cmd, buf_in, in_size, + HINIC_MSG_DIRECT_SEND, MSG_NO_RESP); + spin_unlock_bh(&pf_to_mgmt->async_msg_lock); + + if (err) { + sdk_err(dev, "Failed to send async mgmt msg\n"); + return err; + } + + return 0; +} + +/** + * send_msg_to_mgmt_sync - send async message + * @pf_to_mgmt: PF to MGMT channel + * @mod: module in the chip that will get the message + * @cmd: command of the message + * @msg: the msg data + * @msg_len: the msg data length + * @ack_type: message ack type + * @direction: the direction of the original message + * @resp_msg_id: msg id to response for + * Return: 0 - success, negative - failure + */ +static int send_msg_to_mgmt_sync(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, + enum hinic_mod_type mod, u8 cmd, + void *msg, u16 msg_len, + enum hinic_msg_ack_type ack_type, + enum hinic_msg_direction_type direction, + u16 resp_msg_id) +{ + void *mgmt_cmd = pf_to_mgmt->sync_msg_buf; + struct hinic_api_cmd_chain *chain; + u64 header; + u16 cmd_size = mgmt_msg_len(msg_len); + + if (!hinic_get_chip_present_flag(pf_to_mgmt->hwdev)) + return -EFAULT; + + if (cmd_size > MAX_MSG_SZ) + return -EINVAL; + + if (direction == HINIC_MSG_RESPONSE) + prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type, + direction, cmd, resp_msg_id); + else + prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type, + direction, cmd, SYNC_MSG_ID_INC(pf_to_mgmt)); + + if (ack_type == HINIC_MSG_ACK) + pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_START); + + prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len); + + chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_WRITE_TO_MGMT_CPU]; + + return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT_HOST, mgmt_cmd, + cmd_size); +} + +int hinic_pf_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt; + void *dev = ((struct hinic_hwdev *)hwdev)->dev_hdl; + struct hinic_recv_msg *recv_msg; + struct hinic_msg_head *msg_head; + struct completion *recv_done; + ulong timeo; + int err; + ulong ret; + + /* set aeq fix num to 3, need to ensure response aeq id < 3*/ + if (mod == HINIC_MOD_COMM || mod == HINIC_MOD_L2NIC) { + msg_head = buf_in; + + if (msg_head->resp_aeq_num >= HINIC_MAX_AEQS) + msg_head->resp_aeq_num = 0; + } + + pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt; + + /* Lock the sync_msg_buf */ + down(&pf_to_mgmt->sync_msg_lock); + recv_msg = &pf_to_mgmt->recv_resp_msg_from_mgmt; + recv_done = &recv_msg->recv_done; + + init_completion(recv_done); + + err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size, + HINIC_MSG_ACK, HINIC_MSG_DIRECT_SEND, + MSG_NO_RESP); + if (err) { + sdk_err(dev, "Failed to send sync msg mod %d cmd 0x%x to mgmt, sync_msg_id: %d\n", + mod, cmd, pf_to_mgmt->sync_msg_id); + pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_FAIL); + goto unlock_sync_msg; + } + + timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT); + + ret = wait_for_completion_timeout(recv_done, timeo); + if (!ret) { + sdk_err(dev, "Mgmt response sync msg mod %d cmd 0x%x timeout, sync_msg_id: %d\n", + mod, cmd, pf_to_mgmt->sync_msg_id); + hinic_dump_aeq_info((struct hinic_hwdev *)hwdev); + err = -ETIMEDOUT; + pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_TIMEOUT); + goto unlock_sync_msg; + } + pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_END); + + if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag)) { + up(&pf_to_mgmt->sync_msg_lock); + return -ETIMEDOUT; + } + + if (buf_out && out_size) { + if (*out_size < recv_msg->msg_len) { + sdk_err(dev, "Invalid response message length: %d for mod %d cmd 0x%x from mgmt, should less than: %d\n", + recv_msg->msg_len, mod, cmd, *out_size); + err = -EFAULT; + goto unlock_sync_msg; + } + + if (recv_msg->msg_len) + memcpy(buf_out, recv_msg->msg, recv_msg->msg_len); + + *out_size = recv_msg->msg_len; + } + +unlock_sync_msg: + up(&pf_to_mgmt->sync_msg_lock); + + return err; +} + +static int __get_clp_reg(void *hwdev, enum clp_data_type data_type, + enum clp_reg_type reg_type, u32 *reg_addr) +{ + struct hinic_hwdev *dev = hwdev; + u32 offset; + + offset = HINIC_CLP_REG_GAP * hinic_pcie_itf_id(dev); + + switch (reg_type) { + case HINIC_CLP_BA_HOST: + *reg_addr = (data_type == HINIC_CLP_REQ_HOST) ? + HINIC_CLP_REG(REQ_SRAM_BA) : + HINIC_CLP_REG(RSP_SRAM_BA); + break; + + case HINIC_CLP_SIZE_HOST: + *reg_addr = HINIC_CLP_REG(SRAM_SIZE); + break; + + case HINIC_CLP_LEN_HOST: + *reg_addr = (data_type == HINIC_CLP_REQ_HOST) ? + HINIC_CLP_REG(REQ) : HINIC_CLP_REG(RSP); + break; + + case HINIC_CLP_START_REQ_HOST: + *reg_addr = HINIC_CLP_REG(REQ); + break; + + case HINIC_CLP_READY_RSP_HOST: + *reg_addr = HINIC_CLP_REG(RSP); + break; + + default: + *reg_addr = 0; + break; + } + if (*reg_addr == 0) + return -EINVAL; + + *reg_addr += offset; + + return 0; +} + +static int hinic_read_clp_reg(struct hinic_hwdev *hwdev, + enum clp_data_type data_type, + enum clp_reg_type reg_type, u32 *read_value) +{ + int err; + u32 reg_addr, reg_value; + + if (data_type == HINIC_CLP_REQ_HOST && + reg_type == HINIC_CLP_READY_RSP_HOST) + return -EINVAL; + if (data_type == HINIC_CLP_RSP_HOST && + reg_type == HINIC_CLP_START_REQ_HOST) + return -EINVAL; + + err = __get_clp_reg(hwdev, data_type, reg_type, ®_addr); + if (err) + return err; + + reg_value = hinic_hwif_read_reg(hwdev->hwif, reg_addr); + + switch (reg_type) { + case HINIC_CLP_BA_HOST: + reg_value = ((reg_value >> + HINIC_CLP_OFFSET(SRAM_BASE)) & + HINIC_CLP_MASK(SRAM_BASE)); + break; + + case HINIC_CLP_SIZE_HOST: + reg_value = ((reg_value >> + HINIC_CLP_OFFSET(SRAM_SIZE)) & + HINIC_CLP_MASK(SRAM_SIZE)); + break; + + case HINIC_CLP_LEN_HOST: + reg_value = ((reg_value >> HINIC_CLP_OFFSET(LEN)) & + HINIC_CLP_MASK(LEN)); + break; + + case HINIC_CLP_START_REQ_HOST: + reg_value = ((reg_value >> HINIC_CLP_OFFSET(START)) & + HINIC_CLP_MASK(START)); + break; + + case HINIC_CLP_READY_RSP_HOST: + reg_value = ((reg_value >> HINIC_CLP_OFFSET(READY)) & + HINIC_CLP_MASK(READY)); + break; + + default: + break; + } + + *read_value = reg_value; + return 0; +} + +static int __check_reg_value(enum clp_reg_type reg_type, u32 value) +{ + if (reg_type == HINIC_CLP_BA_HOST && + value > HINIC_CLP_SRAM_BASE_REG_MAX) + return -EINVAL; + + if (reg_type == HINIC_CLP_SIZE_HOST && + value > HINIC_CLP_SRAM_SIZE_REG_MAX) + return -EINVAL; + + if (reg_type == HINIC_CLP_LEN_HOST && + value > HINIC_CLP_LEN_REG_MAX) + return -EINVAL; + + if (reg_type == HINIC_CLP_START_REQ_HOST && + value > HINIC_CLP_START_OR_READY_REG_MAX) + return -EINVAL; + + if (reg_type == HINIC_CLP_READY_RSP_HOST && + value > HINIC_CLP_START_OR_READY_REG_MAX) + return -EINVAL; + + return 0; +} + +static void hinic_write_clp_reg(struct hinic_hwdev *hwdev, + enum clp_data_type data_type, + enum clp_reg_type reg_type, u32 value) +{ + u32 reg_addr, reg_value; + + if (data_type == HINIC_CLP_REQ_HOST && + reg_type == HINIC_CLP_READY_RSP_HOST) + return; + if (data_type == HINIC_CLP_RSP_HOST && + reg_type == HINIC_CLP_START_REQ_HOST) + return; + + if (__check_reg_value(reg_type, value)) + return; + + if (__get_clp_reg(hwdev, data_type, reg_type, ®_addr)) + return; + + reg_value = hinic_hwif_read_reg(hwdev->hwif, reg_addr); + + switch (reg_type) { + case HINIC_CLP_LEN_HOST: + reg_value = reg_value & + (~(HINIC_CLP_MASK(LEN) << HINIC_CLP_OFFSET(LEN))); + reg_value = reg_value | (value << HINIC_CLP_OFFSET(LEN)); + break; + + case HINIC_CLP_START_REQ_HOST: + reg_value = reg_value & + (~(HINIC_CLP_MASK(START) << + HINIC_CLP_OFFSET(START))); + reg_value = reg_value | (value << HINIC_CLP_OFFSET(START)); + break; + + case HINIC_CLP_READY_RSP_HOST: + reg_value = reg_value & + (~(HINIC_CLP_MASK(READY) << + HINIC_CLP_OFFSET(READY))); + reg_value = reg_value | (value << HINIC_CLP_OFFSET(READY)); + break; + + default: + return; + } + + hinic_hwif_write_reg(hwdev->hwif, reg_addr, reg_value); +} + +static int hinic_read_clp_data(struct hinic_hwdev *hwdev, + void *buf_out, u16 *out_size) +{ + int err; + u32 reg = HINIC_CLP_DATA(RSP); + u32 ready, delay_cnt; + u32 *ptr = (u32 *)buf_out; + u32 temp_out_size = 0; + + err = hinic_read_clp_reg(hwdev, HINIC_CLP_RSP_HOST, + HINIC_CLP_READY_RSP_HOST, &ready); + if (err) + return err; + + delay_cnt = 0; + while (ready == 0) { + usleep_range(9000, 10000); + delay_cnt++; + err = hinic_read_clp_reg(hwdev, HINIC_CLP_RSP_HOST, + HINIC_CLP_READY_RSP_HOST, &ready); + if (err || delay_cnt > HINIC_CLP_DELAY_CNT_MAX) { + sdk_err(hwdev->dev_hdl, "Timeout with delay_cnt: %d\n", + delay_cnt); + return -EINVAL; + } + } + + err = hinic_read_clp_reg(hwdev, HINIC_CLP_RSP_HOST, + HINIC_CLP_LEN_HOST, &temp_out_size); + if (err) + return err; + + if (temp_out_size > HINIC_CLP_SRAM_SIZE_REG_MAX || !temp_out_size) { + sdk_err(hwdev->dev_hdl, "Invalid temp_out_size: %d\n", + temp_out_size); + return -EINVAL; + } + + *out_size = (u16)(temp_out_size & 0xffff); + for (; temp_out_size > 0; temp_out_size--) { + *ptr = hinic_hwif_read_reg(hwdev->hwif, reg); + ptr++; + reg = reg + 4; + } + + hinic_write_clp_reg(hwdev, HINIC_CLP_RSP_HOST, + HINIC_CLP_READY_RSP_HOST, (u32)0x0); + hinic_write_clp_reg(hwdev, HINIC_CLP_RSP_HOST, + HINIC_CLP_LEN_HOST, (u32)0x0); + + return 0; +} + +static int hinic_write_clp_data(struct hinic_hwdev *hwdev, + void *buf_in, u16 in_size) +{ + int err; + u32 reg = HINIC_CLP_DATA(REQ); + u32 start = 1; + u32 delay_cnt = 0; + u32 *ptr = (u32 *)buf_in; + + err = hinic_read_clp_reg(hwdev, HINIC_CLP_REQ_HOST, + HINIC_CLP_START_REQ_HOST, &start); + if (err) + return err; + + while (start == 1) { + usleep_range(9000, 10000); + delay_cnt++; + err = hinic_read_clp_reg(hwdev, HINIC_CLP_REQ_HOST, + HINIC_CLP_START_REQ_HOST, &start); + if (err || delay_cnt > HINIC_CLP_DELAY_CNT_MAX) + return -EINVAL; + } + + hinic_write_clp_reg(hwdev, HINIC_CLP_REQ_HOST, + HINIC_CLP_LEN_HOST, in_size); + hinic_write_clp_reg(hwdev, HINIC_CLP_REQ_HOST, + HINIC_CLP_START_REQ_HOST, (u32)0x1); + + for (; in_size > 0; in_size--) { + hinic_hwif_write_reg(hwdev->hwif, reg, *ptr); + ptr++; + reg = reg + 4; + } + + return 0; +} + +static int hinic_check_clp_init_status(struct hinic_hwdev *hwdev) +{ + int err; + u32 reg_value = 0; + + err = hinic_read_clp_reg(hwdev, HINIC_CLP_REQ_HOST, + HINIC_CLP_BA_HOST, ®_value); + if (err || !reg_value) { + sdk_err(hwdev->dev_hdl, "Wrong req ba value: 0x%x\n", + reg_value); + return -EINVAL; + } + + err = hinic_read_clp_reg(hwdev, HINIC_CLP_RSP_HOST, + HINIC_CLP_BA_HOST, ®_value); + if (err || !reg_value) { + sdk_err(hwdev->dev_hdl, "Wrong rsp ba value: 0x%x\n", + reg_value); + return -EINVAL; + } + + err = hinic_read_clp_reg(hwdev, HINIC_CLP_REQ_HOST, + HINIC_CLP_SIZE_HOST, ®_value); + if (err || !reg_value) { + sdk_err(hwdev->dev_hdl, "Wrong req size\n"); + return -EINVAL; + } + + err = hinic_read_clp_reg(hwdev, HINIC_CLP_RSP_HOST, + HINIC_CLP_SIZE_HOST, ®_value); + if (err || !reg_value) { + sdk_err(hwdev->dev_hdl, "Wrong rsp size\n"); + return -EINVAL; + } + + return 0; +} + +static void hinic_clear_clp_data(struct hinic_hwdev *hwdev, + enum clp_data_type data_type) +{ + u32 reg = (data_type == HINIC_CLP_REQ_HOST) ? + HINIC_CLP_DATA(REQ) : HINIC_CLP_DATA(RSP); + u32 count = HINIC_CLP_INPUT_BUFFER_LEN_HOST / HINIC_CLP_DATA_UNIT_HOST; + + for (; count > 0; count--) { + hinic_hwif_write_reg(hwdev->hwif, reg, 0x0); + reg = reg + 4; + } +} + +int hinic_pf_clp_to_mgmt(void *hwdev, enum hinic_mod_type mod, u8 cmd, + const void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_clp_pf_to_mgmt *clp_pf_to_mgmt; + struct hinic_hwdev *dev = hwdev; + u64 header; + u16 real_size; + u8 *clp_msg_buf; + int err; + + clp_pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->clp_pf_to_mgmt; + clp_msg_buf = clp_pf_to_mgmt->clp_msg_buf; + + /* 4 bytes alignment */ + if (in_size % HINIC_CLP_DATA_UNIT_HOST) + real_size = (in_size + (u16)sizeof(header) + + HINIC_CLP_DATA_UNIT_HOST); + else + real_size = in_size + (u16)sizeof(header); + real_size = real_size / HINIC_CLP_DATA_UNIT_HOST; + + if (real_size > + (HINIC_CLP_INPUT_BUFFER_LEN_HOST / HINIC_CLP_DATA_UNIT_HOST)) { + sdk_err(dev->dev_hdl, "Invalid real_size: %d\n", real_size); + return -EINVAL; + } + down(&clp_pf_to_mgmt->clp_msg_lock); + + err = hinic_check_clp_init_status(dev); + if (err) { + sdk_err(dev->dev_hdl, "Check clp init status failed\n"); + up(&clp_pf_to_mgmt->clp_msg_lock); + return err; + } + + hinic_clear_clp_data(dev, HINIC_CLP_RSP_HOST); + hinic_write_clp_reg(dev, HINIC_CLP_RSP_HOST, + HINIC_CLP_READY_RSP_HOST, 0x0); + + /* Send request */ + memset(clp_msg_buf, 0x0, HINIC_CLP_INPUT_BUFFER_LEN_HOST); + clp_prepare_header(dev, &header, in_size, mod, 0, 0, cmd, 0); + + memcpy(clp_msg_buf, &header, sizeof(header)); + clp_msg_buf += sizeof(header); + memcpy(clp_msg_buf, buf_in, in_size); + + clp_msg_buf = clp_pf_to_mgmt->clp_msg_buf; + + hinic_clear_clp_data(dev, HINIC_CLP_REQ_HOST); + err = hinic_write_clp_data(hwdev, + clp_pf_to_mgmt->clp_msg_buf, real_size); + if (err) { + sdk_err(dev->dev_hdl, "Send clp request failed\n"); + up(&clp_pf_to_mgmt->clp_msg_lock); + return -EINVAL; + } + + /* Get response */ + clp_msg_buf = clp_pf_to_mgmt->clp_msg_buf; + memset(clp_msg_buf, 0x0, HINIC_CLP_INPUT_BUFFER_LEN_HOST); + err = hinic_read_clp_data(hwdev, clp_msg_buf, &real_size); + hinic_clear_clp_data(dev, HINIC_CLP_RSP_HOST); + if (err) { + sdk_err(dev->dev_hdl, "Read clp response failed\n"); + up(&clp_pf_to_mgmt->clp_msg_lock); + return -EINVAL; + } + + real_size = (u16)((real_size * HINIC_CLP_DATA_UNIT_HOST) & 0xffff); + if (real_size <= sizeof(header) || + real_size > HINIC_CLP_INPUT_BUFFER_LEN_HOST) { + sdk_err(dev->dev_hdl, "Invalid response size: %d", real_size); + up(&clp_pf_to_mgmt->clp_msg_lock); + return -EINVAL; + } + real_size = real_size - sizeof(header); + if (real_size != *out_size) { + sdk_err(dev->dev_hdl, "Invalid real_size: %d, out_size: %d\n", + real_size, *out_size); + up(&clp_pf_to_mgmt->clp_msg_lock); + return -EINVAL; + } + + memcpy(buf_out, (clp_msg_buf + sizeof(header)), real_size); + up(&clp_pf_to_mgmt->clp_msg_lock); + + return 0; +} + +int hinic_msg_to_mgmt_poll_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout) +{ + return 0; +} + +/* This function is only used by txrx flush */ +int hinic_pf_to_mgmt_no_ack(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt; + void *dev = ((struct hinic_hwdev *)hwdev)->dev_hdl; + int err = -EINVAL; + + if (!hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MGMT_INITED)) { + sdk_err(dev, "Mgmt module not initialized\n"); + return -EINVAL; + } + + pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt; + + if (!MSG_SZ_IS_VALID(in_size)) { + sdk_err(dev, "Mgmt msg buffer size: %d is not valid\n", + in_size); + return -EINVAL; + } + + if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag)) + return -EPERM; + + /* Lock the sync_msg_buf */ + down(&pf_to_mgmt->sync_msg_lock); + + err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size, + HINIC_MSG_NO_ACK, HINIC_MSG_DIRECT_SEND, + MSG_NO_RESP); + + up(&pf_to_mgmt->sync_msg_lock); + + return err; +} + +/** + * api cmd write or read bypass defaut use poll, if want to use aeq interrupt, + * please set wb_trigger_aeqe to 1 + */ +int hinic_api_cmd_write_nack(void *hwdev, u8 dest, void *cmd, u16 size) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt; + struct hinic_api_cmd_chain *chain; + + if (!hwdev || !size || !cmd) + return -EINVAL; + + if (size > MAX_CMD_BUF_SIZE) + return -EINVAL; + + if (!hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MGMT_INITED) || + hinic_get_mgmt_channel_status(hwdev)) + return -EPERM; + + pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt; + chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_POLL_WRITE]; + + if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag)) + return -EPERM; + + return hinic_api_cmd_write(chain, dest, cmd, size); +} +EXPORT_SYMBOL(hinic_api_cmd_write_nack); + +int hinic_api_cmd_read_ack(void *hwdev, u8 dest, void *cmd, u16 size, void *ack, + u16 ack_size) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt; + struct hinic_api_cmd_chain *chain; + + if (!hwdev || !cmd || (ack_size && !ack)) + return -EINVAL; + + if (size > MAX_CMD_BUF_SIZE) + return -EINVAL; + + if (!hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MGMT_INITED) || + hinic_get_mgmt_channel_status(hwdev)) + return -EPERM; + + pf_to_mgmt = ((struct hinic_hwdev *)hwdev)->pf_to_mgmt; + chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_POLL_READ]; + + if (!(((struct hinic_hwdev *)hwdev)->chip_present_flag)) + return -EPERM; + + return hinic_api_cmd_read(chain, dest, cmd, size, ack, ack_size); +} +EXPORT_SYMBOL(hinic_api_cmd_read_ack); + +static void __send_mgmt_ack(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, + enum hinic_mod_type mod, u8 cmd, void *buf_in, + u16 in_size, u16 msg_id) +{ + u16 buf_size; + + if (!in_size) + buf_size = BUF_OUT_DEFAULT_SIZE; + else + buf_size = in_size; + + spin_lock_bh(&pf_to_mgmt->async_msg_lock); + /* MGMT sent sync msg, send the response */ + send_msg_to_mgmt_async(pf_to_mgmt, mod, cmd, + buf_in, buf_size, HINIC_MSG_RESPONSE, + msg_id); + spin_unlock_bh(&pf_to_mgmt->async_msg_lock); +} + +/** + * mgmt_recv_msg_handler - handler for message from mgmt cpu + * @pf_to_mgmt: PF to MGMT channel + * @mod: module in the chip that will get the message + * @cmd: command of the message + * @buf_in: the buffer of recv msg + * @in_size: the size of buffer + * @msg_id: message id + * @need_resp: the flag of need resp + */ +static void mgmt_recv_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, + enum hinic_mod_type mod, u8 cmd, void *buf_in, + u16 in_size, u16 msg_id, int need_resp) +{ + void *dev = pf_to_mgmt->hwdev->dev_hdl; + void *buf_out = pf_to_mgmt->mgmt_ack_buf; + enum hinic_mod_type tmp_mod = mod; + bool ack_first = false; + u16 out_size = 0; + + memset(buf_out, 0, MAX_PF_MGMT_BUF_SIZE); + + if (mod >= HINIC_MOD_HW_MAX) { + sdk_warn(dev, "Receive illegal message from mgmt cpu, mod = %d\n", + mod); + goto resp; + } + + set_bit(HINIC_MGMT_MSG_CB_RUNNING, + &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod]); + + if (!pf_to_mgmt->recv_mgmt_msg_cb[mod] || + !test_bit(HINIC_MGMT_MSG_CB_REG, + &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod])) { + sdk_warn(dev, "Receive mgmt callback is null, mod = %d\n", + mod); + clear_bit(HINIC_MGMT_MSG_CB_RUNNING, + &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod]); + goto resp; + } + + ack_first = hinic_mgmt_event_ack_first(mod, cmd); + if (ack_first && need_resp) { + /* send ack to mgmt first to avoid command timeout in + * mgmt(100ms in mgmt); + * mgmt to host command don't need any response data from host, + * just need ack from host + */ + __send_mgmt_ack(pf_to_mgmt, mod, cmd, buf_out, in_size, msg_id); + } + + pf_to_mgmt->recv_mgmt_msg_cb[tmp_mod](pf_to_mgmt->hwdev, + pf_to_mgmt->recv_mgmt_msg_data[tmp_mod], + cmd, buf_in, in_size, + buf_out, &out_size); + + clear_bit(HINIC_MGMT_MSG_CB_RUNNING, + &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod]); + +resp: + if (!ack_first && need_resp) + __send_mgmt_ack(pf_to_mgmt, mod, cmd, buf_out, out_size, + msg_id); +} + +/** + * mgmt_resp_msg_handler - handler for response message from mgmt cpu + * @pf_to_mgmt: PF to MGMT channel + * @recv_msg: received message details + */ +static void mgmt_resp_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, + struct hinic_recv_msg *recv_msg) +{ + void *dev = pf_to_mgmt->hwdev->dev_hdl; + + /* delete async msg */ + if (recv_msg->msg_id & ASYNC_MSG_FLAG) + return; + + spin_lock_bh(&pf_to_mgmt->sync_event_lock); + if (recv_msg->msg_id == pf_to_mgmt->sync_msg_id && + pf_to_mgmt->event_flag == SEND_EVENT_START) { + complete(&recv_msg->recv_done); + } else if (recv_msg->msg_id != pf_to_mgmt->sync_msg_id) { + sdk_err(dev, "Send msg id(0x%x) recv msg id(0x%x) dismatch, event state: %d\n", + pf_to_mgmt->sync_msg_id, recv_msg->msg_id, + pf_to_mgmt->event_flag); + } else { + sdk_err(dev, "Wait timeout, send msg id(0x%x) recv msg id(0x%x), event state: %d\n", + pf_to_mgmt->sync_msg_id, recv_msg->msg_id, + pf_to_mgmt->event_flag); + } + spin_unlock_bh(&pf_to_mgmt->sync_event_lock); +} + +static void recv_mgmt_msg_work_handler(struct work_struct *work) +{ + struct hinic_mgmt_msg_handle_work *mgmt_work = + container_of(work, struct hinic_mgmt_msg_handle_work, work); + + mgmt_recv_msg_handler(mgmt_work->pf_to_mgmt, mgmt_work->mod, + mgmt_work->cmd, mgmt_work->msg, + mgmt_work->msg_len, mgmt_work->msg_id, + !mgmt_work->async_mgmt_to_pf); + + kfree(mgmt_work->msg); + kfree(mgmt_work); +} + +static bool check_mgmt_seq_id_and_seg_len(struct hinic_recv_msg *recv_msg, + u8 seq_id, u8 seg_len) +{ + if (seq_id > MGMT_MSG_MAX_SEQ_ID || seg_len > SEGMENT_LEN) + return false; + else if (seq_id == MGMT_MSG_MAX_SEQ_ID && + seg_len > MGMT_MSG_LAST_SEG_MAX_LEN) + return false; + + if (seq_id == 0) { + recv_msg->seq_id = seq_id; + } else { + if (seq_id != recv_msg->seq_id + 1) + return false; + recv_msg->seq_id = seq_id; + } + + return true; +} + +/** + * recv_mgmt_msg_handler - handler a message from mgmt cpu + * @pf_to_mgmt: PF to MGMT channel + * @header: the header of the message + * @recv_msg: received message details + */ +static void recv_mgmt_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, + u8 *header, struct hinic_recv_msg *recv_msg) +{ + struct hinic_mgmt_msg_handle_work *mgmt_work; + u64 mbox_header = *((u64 *)header); + void *msg_body = header + sizeof(mbox_header); + u8 seq_id, seq_len; + u32 offset; + u64 dir; + + /* Don't need to get anything from hw when cmd is async */ + dir = HINIC_MSG_HEADER_GET(mbox_header, DIRECTION); + if (dir == HINIC_MSG_RESPONSE && + HINIC_MSG_HEADER_GET(mbox_header, MSG_ID) & ASYNC_MSG_FLAG) + return; + + seq_len = HINIC_MSG_HEADER_GET(mbox_header, SEG_LEN); + seq_id = HINIC_MSG_HEADER_GET(mbox_header, SEQID); + + if (!check_mgmt_seq_id_and_seg_len(recv_msg, seq_id, seq_len)) { + sdk_err(pf_to_mgmt->hwdev->dev_hdl, + "Mgmt msg sequence id and segment length check fail, front seq_id: 0x%x, current seq_id: 0x%x, seg len: 0x%x\n", + recv_msg->seq_id, seq_id, seq_len); + /* set seq_id to invalid seq_id */ + recv_msg->seq_id = MGMT_MSG_MAX_SEQ_ID; + return; + } + + offset = seq_id * SEGMENT_LEN; + memcpy((u8 *)recv_msg->msg + offset, msg_body, seq_len); + + if (!HINIC_MSG_HEADER_GET(mbox_header, LAST)) + return; + + recv_msg->cmd = HINIC_MSG_HEADER_GET(mbox_header, CMD); + recv_msg->mod = HINIC_MSG_HEADER_GET(mbox_header, MODULE); + recv_msg->async_mgmt_to_pf = HINIC_MSG_HEADER_GET(mbox_header, + ASYNC_MGMT_TO_PF); + recv_msg->msg_len = HINIC_MSG_HEADER_GET(mbox_header, MSG_LEN); + recv_msg->msg_id = HINIC_MSG_HEADER_GET(mbox_header, MSG_ID); + recv_msg->seq_id = MGMT_MSG_MAX_SEQ_ID; + + if (HINIC_MSG_HEADER_GET(mbox_header, DIRECTION) == + HINIC_MSG_RESPONSE) { + mgmt_resp_msg_handler(pf_to_mgmt, recv_msg); + return; + } + + mgmt_work = kzalloc(sizeof(*mgmt_work), GFP_KERNEL); + if (!mgmt_work) { + sdk_err(pf_to_mgmt->hwdev->dev_hdl, "Allocate mgmt work memory failed\n"); + return; + } + + if (recv_msg->msg_len) { + mgmt_work->msg = kzalloc(recv_msg->msg_len, GFP_KERNEL); + if (!mgmt_work->msg) { + sdk_err(pf_to_mgmt->hwdev->dev_hdl, "Allocate mgmt msg memory failed\n"); + kfree(mgmt_work); + return; + } + } + + mgmt_work->pf_to_mgmt = pf_to_mgmt; + mgmt_work->msg_len = recv_msg->msg_len; + memcpy(mgmt_work->msg, recv_msg->msg, recv_msg->msg_len); + mgmt_work->msg_id = recv_msg->msg_id; + mgmt_work->mod = recv_msg->mod; + mgmt_work->cmd = recv_msg->cmd; + mgmt_work->async_mgmt_to_pf = recv_msg->async_mgmt_to_pf; + + INIT_WORK(&mgmt_work->work, recv_mgmt_msg_work_handler); + queue_work(pf_to_mgmt->workq, &mgmt_work->work); +} + +/** + * hinic_mgmt_msg_aeqe_handler - handler for a mgmt message event + * @hwdev: the pointer to hw device + * @header: the header of the message + * @size: unused + */ +void hinic_mgmt_msg_aeqe_handler(void *hwdev, u8 *header, u8 size) +{ + struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev; + struct hinic_msg_pf_to_mgmt *pf_to_mgmt; + struct hinic_recv_msg *recv_msg; + bool is_send_dir = false; + + pf_to_mgmt = dev->pf_to_mgmt; + + is_send_dir = (HINIC_MSG_HEADER_GET(*(u64 *)header, DIRECTION) == + HINIC_MSG_DIRECT_SEND) ? true : false; + + recv_msg = is_send_dir ? &pf_to_mgmt->recv_msg_from_mgmt : + &pf_to_mgmt->recv_resp_msg_from_mgmt; + + recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg); +} + +/** + * alloc_recv_msg - allocate received message memory + * @recv_msg: pointer that will hold the allocated data + * Return: 0 - success, negative - failure + */ +static int alloc_recv_msg(struct hinic_recv_msg *recv_msg) +{ + recv_msg->seq_id = MGMT_MSG_MAX_SEQ_ID; + + recv_msg->msg = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!recv_msg->msg) + return -ENOMEM; + + return 0; +} + +/** + * free_recv_msg - free received message memory + * @recv_msg: pointer that holds the allocated data + */ +static void free_recv_msg(struct hinic_recv_msg *recv_msg) +{ + kfree(recv_msg->msg); +} + +/** + * alloc_msg_buf - allocate all the message buffers of PF to MGMT channel + * @pf_to_mgmt: PF to MGMT channel + * Return: 0 - success, negative - failure + */ +static int alloc_msg_buf(struct hinic_msg_pf_to_mgmt *pf_to_mgmt) +{ + int err; + void *dev = pf_to_mgmt->hwdev->dev_hdl; + + err = alloc_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); + if (err) { + sdk_err(dev, "Failed to allocate recv msg\n"); + return err; + } + + err = alloc_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); + if (err) { + sdk_err(dev, "Failed to allocate resp recv msg\n"); + goto alloc_msg_for_resp_err; + } + + pf_to_mgmt->async_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!pf_to_mgmt->async_msg_buf) { + err = -ENOMEM; + goto async_msg_buf_err; + } + + pf_to_mgmt->sync_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!pf_to_mgmt->sync_msg_buf) { + err = -ENOMEM; + goto sync_msg_buf_err; + } + + pf_to_mgmt->mgmt_ack_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!pf_to_mgmt->mgmt_ack_buf) { + err = -ENOMEM; + goto ack_msg_buf_err; + } + + return 0; + +ack_msg_buf_err: + kfree(pf_to_mgmt->sync_msg_buf); + +sync_msg_buf_err: + kfree(pf_to_mgmt->async_msg_buf); + +async_msg_buf_err: + free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); + +alloc_msg_for_resp_err: + free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); + return err; +} + +/** + * free_msg_buf - free all the message buffers of PF to MGMT channel + * @pf_to_mgmt: PF to MGMT channel + */ +static void free_msg_buf(struct hinic_msg_pf_to_mgmt *pf_to_mgmt) +{ + kfree(pf_to_mgmt->mgmt_ack_buf); + kfree(pf_to_mgmt->sync_msg_buf); + kfree(pf_to_mgmt->async_msg_buf); + + free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); + free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); +} + +/** + * hinic_pf_to_mgmt_init - initialize PF to MGMT channel + * @hwdev: the pointer to hw device + * Return: 0 - success, negative - failure + */ +int hinic_pf_to_mgmt_init(struct hinic_hwdev *hwdev) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt; + void *dev = hwdev->dev_hdl; + int err; + + pf_to_mgmt = kzalloc(sizeof(*pf_to_mgmt), GFP_KERNEL); + if (!pf_to_mgmt) + return -ENOMEM; + + hwdev->pf_to_mgmt = pf_to_mgmt; + pf_to_mgmt->hwdev = hwdev; + spin_lock_init(&pf_to_mgmt->async_msg_lock); + spin_lock_init(&pf_to_mgmt->sync_event_lock); + sema_init(&pf_to_mgmt->sync_msg_lock, 1); + pf_to_mgmt->workq = create_singlethread_workqueue(HINIC_MGMT_WQ_NAME); + if (!pf_to_mgmt->workq) { + sdk_err(dev, "Failed to initialize MGMT workqueue\n"); + err = -ENOMEM; + goto create_mgmt_workq_err; + } + + err = alloc_msg_buf(pf_to_mgmt); + if (err) { + sdk_err(dev, "Failed to allocate msg buffers\n"); + goto alloc_msg_buf_err; + } + + err = hinic_api_cmd_init(hwdev, pf_to_mgmt->cmd_chain); + if (err) { + sdk_err(dev, "Failed to init the api cmd chains\n"); + goto api_cmd_init_err; + } + + return 0; + +api_cmd_init_err: + free_msg_buf(pf_to_mgmt); + +alloc_msg_buf_err: + destroy_workqueue(pf_to_mgmt->workq); + +create_mgmt_workq_err: + kfree(pf_to_mgmt); + + return err; +} + +/** + * hinic_pf_to_mgmt_free - free PF to MGMT channel + * @hwdev: the pointer to hw device + */ +void hinic_pf_to_mgmt_free(struct hinic_hwdev *hwdev) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt = hwdev->pf_to_mgmt; + + /* destroy workqueue before free related pf_to_mgmt resources in case of + * illegal resource access + */ + destroy_workqueue(pf_to_mgmt->workq); + hinic_api_cmd_free(pf_to_mgmt->cmd_chain); + free_msg_buf(pf_to_mgmt); + kfree(pf_to_mgmt); +} + +void hinic_flush_mgmt_workq(void *hwdev) +{ + struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev; + + flush_workqueue(dev->aeqs->workq); + + if (hinic_func_type(dev) != TYPE_VF && + hinic_is_hwdev_mod_inited(hwdev, HINIC_HWDEV_MGMT_INITED)) + flush_workqueue(dev->pf_to_mgmt->workq); +} + +int hinic_clp_pf_to_mgmt_init(struct hinic_hwdev *hwdev) +{ + struct hinic_clp_pf_to_mgmt *clp_pf_to_mgmt; + + clp_pf_to_mgmt = kzalloc(sizeof(*clp_pf_to_mgmt), GFP_KERNEL); + if (!clp_pf_to_mgmt) + return -ENOMEM; + + clp_pf_to_mgmt->clp_msg_buf = kzalloc(HINIC_CLP_INPUT_BUFFER_LEN_HOST, + GFP_KERNEL); + if (!clp_pf_to_mgmt->clp_msg_buf) { + kfree(clp_pf_to_mgmt); + return -ENOMEM; + } + sema_init(&clp_pf_to_mgmt->clp_msg_lock, 1); + + hwdev->clp_pf_to_mgmt = clp_pf_to_mgmt; + + return 0; +} + +void hinic_clp_pf_to_mgmt_free(struct hinic_hwdev *hwdev) +{ + struct hinic_clp_pf_to_mgmt *clp_pf_to_mgmt = hwdev->clp_pf_to_mgmt; + + kfree(clp_pf_to_mgmt->clp_msg_buf); + kfree(clp_pf_to_mgmt); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_mgmt.h b/drivers/net/ethernet/huawei/hinic/hinic_mgmt.h new file mode 100644 index 0000000000000000000000000000000000000000..23ac9d2362a124b5b574ba4e8ab2b274da7a93d6 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_mgmt.h @@ -0,0 +1,245 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_MGMT_H_ +#define HINIC_MGMT_H_ + +#define HINIC_MSG_HEADER_MSG_LEN_SHIFT 0 +#define HINIC_MSG_HEADER_MODULE_SHIFT 11 +#define HINIC_MSG_HEADER_SEG_LEN_SHIFT 16 +#define HINIC_MSG_HEADER_NO_ACK_SHIFT 22 +#define HINIC_MSG_HEADER_ASYNC_MGMT_TO_PF_SHIFT 23 +#define HINIC_MSG_HEADER_SEQID_SHIFT 24 +#define HINIC_MSG_HEADER_LAST_SHIFT 30 +#define HINIC_MSG_HEADER_DIRECTION_SHIFT 31 +#define HINIC_MSG_HEADER_CMD_SHIFT 32 +#define HINIC_MSG_HEADER_PCI_INTF_IDX_SHIFT 48 +#define HINIC_MSG_HEADER_P2P_IDX_SHIFT 50 +#define HINIC_MSG_HEADER_MSG_ID_SHIFT 54 + +#define HINIC_MSG_HEADER_MSG_LEN_MASK 0x7FF +#define HINIC_MSG_HEADER_MODULE_MASK 0x1F +#define HINIC_MSG_HEADER_SEG_LEN_MASK 0x3F +#define HINIC_MSG_HEADER_NO_ACK_MASK 0x1 +#define HINIC_MSG_HEADER_ASYNC_MGMT_TO_PF_MASK 0x1 +#define HINIC_MSG_HEADER_SEQID_MASK 0x3F +#define HINIC_MSG_HEADER_LAST_MASK 0x1 +#define HINIC_MSG_HEADER_DIRECTION_MASK 0x1 +#define HINIC_MSG_HEADER_CMD_MASK 0xFF +#define HINIC_MSG_HEADER_PCI_INTF_IDX_MASK 0x3 +#define HINIC_MSG_HEADER_P2P_IDX_MASK 0xF +#define HINIC_MSG_HEADER_MSG_ID_MASK 0x3FF + +#define HINIC_MSG_HEADER_GET(val, member) \ + (((val) >> HINIC_MSG_HEADER_##member##_SHIFT) & \ + HINIC_MSG_HEADER_##member##_MASK) + +#define HINIC_MSG_HEADER_SET(val, member) \ + ((u64)((val) & HINIC_MSG_HEADER_##member##_MASK) << \ + HINIC_MSG_HEADER_##member##_SHIFT) + +#define HINIC_MGMT_WQ_NAME "hinic_mgmt" + +enum clp_data_type { + HINIC_CLP_REQ_HOST = 0, + HINIC_CLP_RSP_HOST = 1 +}; + +enum clp_reg_type { + HINIC_CLP_BA_HOST = 0, + HINIC_CLP_SIZE_HOST = 1, + HINIC_CLP_LEN_HOST = 2, + HINIC_CLP_START_REQ_HOST = 3, + HINIC_CLP_READY_RSP_HOST = 4 +}; + +#define HINIC_CLP_REG_GAP 0x20 +#define HINIC_CLP_INPUT_BUFFER_LEN_HOST 4096UL +#define HINIC_CLP_DATA_UNIT_HOST 4UL + +#define HINIC_BAR01_GLOABAL_CTL_OFFSET 0x4000 +#define HINIC_BAR01_CLP_OFFSET 0x5000 + +#define HINIC_CLP_SRAM_SIZE_REG (HINIC_BAR01_GLOABAL_CTL_OFFSET + 0x220) +#define HINIC_CLP_REQ_SRAM_BA_REG (HINIC_BAR01_GLOABAL_CTL_OFFSET + 0x224) +#define HINIC_CLP_RSP_SRAM_BA_REG (HINIC_BAR01_GLOABAL_CTL_OFFSET + 0x228) +#define HINIC_CLP_REQ_REG (HINIC_BAR01_GLOABAL_CTL_OFFSET + 0x22c) +#define HINIC_CLP_RSP_REG (HINIC_BAR01_GLOABAL_CTL_OFFSET + 0x230) +#define HINIC_CLP_REG(member) (HINIC_CLP_##member##_REG) + +#define HINIC_CLP_REQ_DATA (HINIC_BAR01_CLP_OFFSET) +#define HINIC_CLP_RSP_DATA (HINIC_BAR01_CLP_OFFSET + 0x1000) +#define HINIC_CLP_DATA(member) (HINIC_CLP_##member##_DATA) + +#define HINIC_CLP_SRAM_SIZE_OFFSET 16 +#define HINIC_CLP_SRAM_BASE_OFFSET 0 +#define HINIC_CLP_LEN_OFFSET 0 +#define HINIC_CLP_START_OFFSET 31 +#define HINIC_CLP_READY_OFFSET 31 +#define HINIC_CLP_OFFSET(member) (HINIC_CLP_##member##_OFFSET) + +#define HINIC_CLP_SRAM_SIZE_BIT_LEN 0x7ffUL +#define HINIC_CLP_SRAM_BASE_BIT_LEN 0x7ffffffUL +#define HINIC_CLP_LEN_BIT_LEN 0x7ffUL +#define HINIC_CLP_START_BIT_LEN 0x1UL +#define HINIC_CLP_READY_BIT_LEN 0x1UL +#define HINIC_CLP_MASK(member) (HINIC_CLP_##member##_BIT_LEN) + +#define HINIC_CLP_DELAY_CNT_MAX 200UL +#define HINIC_CLP_SRAM_SIZE_REG_MAX 0x3ff +#define HINIC_CLP_SRAM_BASE_REG_MAX 0x7ffffff +#define HINIC_CLP_LEN_REG_MAX 0x3ff +#define HINIC_CLP_START_OR_READY_REG_MAX 0x1 + +enum hinic_msg_direction_type { + HINIC_MSG_DIRECT_SEND = 0, + HINIC_MSG_RESPONSE = 1 +}; + +enum hinic_msg_segment_type { + NOT_LAST_SEGMENT = 0, + LAST_SEGMENT = 1, +}; + +enum hinic_mgmt_msg_type { + ASYNC_MGMT_MSG = 0, + SYNC_MGMT_MSG = 1, +}; + +enum hinic_msg_ack_type { + HINIC_MSG_ACK = 0, + HINIC_MSG_NO_ACK = 1, +}; + +struct hinic_recv_msg { + void *msg; + + struct completion recv_done; + + u16 msg_len; + enum hinic_mod_type mod; + u8 cmd; + u8 seq_id; + u16 msg_id; + int async_mgmt_to_pf; +}; + +#define HINIC_COMM_SELF_CMD_MAX 8 + +struct comm_up_self_msg_sub_info { + u8 cmd; + comm_up_self_msg_proc proc; +}; + +struct comm_up_self_msg_info { + u8 cmd_num; + struct comm_up_self_msg_sub_info info[HINIC_COMM_SELF_CMD_MAX]; +}; + +enum comm_pf_to_mgmt_event_state { + SEND_EVENT_UNINIT = 0, + SEND_EVENT_START, + SEND_EVENT_FAIL, + SEND_EVENT_TIMEOUT, + SEND_EVENT_END, +}; + +enum hinic_mgmt_msg_cb_state { + HINIC_MGMT_MSG_CB_REG = 0, + HINIC_MGMT_MSG_CB_RUNNING, +}; + +struct hinic_clp_pf_to_mgmt { + struct semaphore clp_msg_lock; + void *clp_msg_buf; +}; + +struct hinic_msg_pf_to_mgmt { + struct hinic_hwdev *hwdev; + + /* Async cmd can not be scheduling */ + spinlock_t async_msg_lock; + struct semaphore sync_msg_lock; + + struct workqueue_struct *workq; + + void *async_msg_buf; + void *sync_msg_buf; + void *mgmt_ack_buf; + + struct hinic_recv_msg recv_msg_from_mgmt; + struct hinic_recv_msg recv_resp_msg_from_mgmt; + + u16 async_msg_id; + u16 sync_msg_id; + + struct hinic_api_cmd_chain *cmd_chain[HINIC_API_CMD_MAX]; + + hinic_mgmt_msg_cb recv_mgmt_msg_cb[HINIC_MOD_HW_MAX]; + void *recv_mgmt_msg_data[HINIC_MOD_HW_MAX]; + unsigned long mgmt_msg_cb_state[HINIC_MOD_HW_MAX]; + + void (*async_msg_cb[HINIC_MOD_HW_MAX])(void *handle, + enum hinic_mgmt_cmd cmd, + void *priv_data, u32 msg_id, + void *buf_out, u32 out_size); + + void *async_msg_cb_data[HINIC_MOD_HW_MAX]; + + struct comm_up_self_msg_info proc; + + /* lock when sending msg */ + spinlock_t sync_event_lock; + enum comm_pf_to_mgmt_event_state event_flag; +}; + +struct hinic_mgmt_msg_handle_work { + struct work_struct work; + struct hinic_msg_pf_to_mgmt *pf_to_mgmt; + + void *msg; + u16 msg_len; + + enum hinic_mod_type mod; + u8 cmd; + u16 msg_id; + + int async_mgmt_to_pf; +}; + +int hinic_pf_to_mgmt_no_ack(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size); + +void hinic_mgmt_msg_aeqe_handler(void *handle, u8 *header, u8 size); + +int hinic_pf_to_mgmt_init(struct hinic_hwdev *hwdev); + +void hinic_pf_to_mgmt_free(struct hinic_hwdev *hwdev); + +int hinic_pf_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout); + +int hinic_pf_to_mgmt_async(void *hwdev, enum hinic_mod_type mod, + u8 cmd, void *buf_in, u16 in_size); + +int hinic_pf_clp_to_mgmt(void *hwdev, enum hinic_mod_type mod, u8 cmd, + const void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +int hinic_clp_pf_to_mgmt_init(struct hinic_hwdev *hwdev); +void hinic_clp_pf_to_mgmt_free(struct hinic_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_mgmt_interface.h b/drivers/net/ethernet/huawei/hinic/hinic_mgmt_interface.h new file mode 100644 index 0000000000000000000000000000000000000000..48ef0a4b0e7dba69a80b345e16662b611b70cf7b --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_mgmt_interface.h @@ -0,0 +1,987 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_MGMT_INTERFACE_H +#define HINIC_MGMT_INTERFACE_H + +#include +#include + +#include "hinic_port_cmd.h" + +/* up to driver event */ +#define HINIC_PORT_CMD_MGMT_RESET 0x0 + +struct hinic_msg_head { + u8 status; + u8 version; + u8 resp_aeq_num; + u8 rsvd0[5]; +}; + +struct hinic_register_vf { + u8 status; + u8 version; + u8 rsvd0[6]; +}; + +struct hinic_tx_rate_cfg { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; + u32 tx_rate; +}; + +struct hinic_tx_rate_cfg_max_min { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; + u32 min_rate; + u32 max_rate; + u8 rsvd2[8]; +}; + +struct hinic_port_mac_get { + u16 func_id; + u8 mac[ETH_ALEN]; + int ret; +}; + +struct hinic_function_table { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rx_wqe_buf_size; + u32 mtu; +}; + +struct hinic_cmd_qpn { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 base_qpn; +}; + +struct hinic_port_mac_set { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 vlan_id; + u16 rsvd1; + u8 mac[ETH_ALEN]; +}; + +struct hinic_port_mac_update { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 vlan_id; + u16 rsvd1; + u8 old_mac[ETH_ALEN]; + u16 rsvd2; + u8 new_mac[ETH_ALEN]; +}; + +struct hinic_vport_state { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; + u8 state; + u8 rsvd2[3]; +}; + +struct hinic_port_state { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 state; + u8 rsvd1; + u16 func_id; +}; + +struct hinic_spoofchk_set { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 state; + u8 rsvd1; + u16 func_id; +}; + +struct hinic_mtu { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; + u32 mtu; +}; + +struct hinic_vlan_config { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 vlan_id; +}; + +struct hinic_speed_cmd { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 speed; +}; + +struct hinic_link_mode_cmd { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; + u16 supported; /* 0xFFFF represent Invalid value */ + u16 advertised; +}; + +struct hinic_set_autoneg_cmd { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 enable; /* 1: enable , 0: disable */ +}; + +struct hinic_set_link_status { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 enable; +}; + +struct hinic_get_link { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 link_status; + u8 rsvd1; +}; + +struct hinic_link_status_report { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 link_status; + u8 port_id; +}; + +struct hinic_port_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; + u8 port_type; + u8 autoneg_cap; + u8 autoneg_state; + u8 duplex; + u8 speed; + u8 resv2[3]; +}; + +struct hinic_tso_config { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; + u8 tso_en; + u8 resv2[3]; +}; + +struct hinic_lro_config { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; + u8 lro_ipv4_en; + u8 lro_ipv6_en; + u8 lro_max_wqe_num; + u8 resv2[13]; +}; + +struct hinic_lro_timer { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 type; /* 0: set timer value, 1: get timer value */ + u8 enable; /* when set lro time, enable should be 1 */ + u16 rsvd1; + u32 timer; +}; + +struct hinic_checksum_offload { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; + u32 rx_csum_offload; +}; + +struct hinic_vlan_offload { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 vlan_rx_offload; + u8 rsvd1[5]; +}; + +struct hinic_vlan_filter { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 rsvd1[2]; + u32 vlan_filter_ctrl; +}; + +struct hinic_pause_config { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; + u32 auto_neg; + u32 rx_pause; + u32 tx_pause; +}; + +struct hinic_rx_mode_config { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; + u32 rx_mode; +}; + +/* rss */ +struct nic_rss_indirect_tbl { + u32 group_index; + u32 offset; + u32 size; + u32 rsvd; + u8 entry[HINIC_RSS_INDIR_SIZE]; +}; + +struct nic_rss_context_tbl { + u32 group_index; + u32 offset; + u32 size; + u32 rsvd; + u32 ctx; +}; + +struct hinic_rss_config { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 rss_en; + u8 template_id; + u8 rq_priority_number; + u8 rsvd1[3]; + u8 prio_tc[HINIC_DCB_UP_MAX]; +}; + +struct hinic_rss_template_mgmt { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 cmd; + u8 template_id; + u8 rsvd1[4]; +}; + +struct hinic_rss_indir_table { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 template_id; + u8 rsvd1; + u8 indir[HINIC_RSS_INDIR_SIZE]; +}; + +struct hinic_rss_template_key { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 template_id; + u8 rsvd1; + u8 key[HINIC_RSS_KEY_SIZE]; +}; + +struct hinic_rss_engine_type { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 template_id; + u8 hash_engine; + u8 rsvd1[4]; +}; + +struct hinic_rss_context_table { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 template_id; + u8 rsvd1; + u32 context; +}; + +struct hinic_up_ets_cfg { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 port_id; + u8 rsvd1[3]; + u8 up_tc[HINIC_DCB_UP_MAX]; + u8 pg_bw[HINIC_DCB_PG_MAX]; + u8 pgid[HINIC_DCB_UP_MAX]; + u8 up_bw[HINIC_DCB_UP_MAX]; + u8 prio[HINIC_DCB_PG_MAX]; +}; + +struct hinic_set_pfc { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 pfc_en; + u8 pfc_bitmap; + u8 rsvd1[4]; +}; + +struct hinic_set_micro_pfc { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 micro_pfc_en; + u8 rsvd1; + u8 cfg_rq_max; + u8 cfg_rq_depth; + u16 rq_sm_thd; +}; + +struct hinic_cos_up_map { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 port_id; + /* every bit indicate index of map is valid or not */ + u8 cos_valid_mask; + u16 rsvd1; + + /* user priority in cos(index:cos, value: up) */ + u8 map[HINIC_DCB_COS_MAX]; +}; + +struct hinic_set_rq_iq_mapping { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; + u8 map[64]; + u32 num_rqs; + u32 rq_depth; +}; + +#define HINIC_PFC_SET_FUNC_THD 0 +#define HINIC_PFC_SET_GLB_THD 1 +struct hinic_pfc_thd { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 op_type; + u16 func_thd; + u16 glb_thd; +}; + +/* set iq enable to ucode */ +struct hinic_cmd_enable_iq { + u8 rq_depth; + u8 num_rq; + u16 glb_rq_id; + + u16 q_id; + u16 lower_thd; + + u16 force_en; /* 1: force unlock, 0: depend on condition */ + u16 prod_idx; +}; + +/* set iq enable to mgmt cpu */ +struct hinic_cmd_enable_iq_mgmt { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 rq_depth; + u8 num_rq; + u16 glb_rq_id; + + u16 q_id; + u16 lower_thd; + + u16 force_en; /* 1: force unlock, 0: depend on condition */ + u16 prod_idx; +}; + +struct hinic_port_link_status { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 link; + u8 port_id; +}; + +struct hinic_cable_plug_event { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 plugged; /* 0: unplugged, 1: plugged */ + u8 port_id; +}; + +struct hinic_link_err_event { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 err_type; + u8 port_id; +}; + +struct hinic_sync_time_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u64 mstime; +}; + +#define HINIC_PORT_STATS_VERSION 0 + +struct hinic_port_stats_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; + u32 stats_version; + u32 stats_size; +}; + +struct hinic_port_stats { + u8 status; + u8 version; + u8 rsvd[6]; + + struct hinic_phy_port_stats stats; +}; + +struct hinic_cmd_vport_stats { + u8 status; + u8 version; + u8 rsvd0[6]; + + struct hinic_vport_stats stats; +}; + +struct hinic_vf_vlan_config { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 vlan_id; + u8 qos; + u8 rsvd1[7]; +}; + +struct hinic_port_ipsu_mac { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 index; + u16 func_id; + u16 vlan_id; + u8 mac[ETH_ALEN]; +}; + +/* get or set loopback mode, need to modify by base API */ +#define HINIC_INTERNAL_LP_MODE 5 +#define LOOP_MODE_MIN 1 +#define LOOP_MODE_MAX 6 + +struct hinic_port_loopback { + u8 status; + u8 version; + u8 rsvd[6]; + + u32 mode; + u32 en; +}; + +#define HINIC_COMPILE_TIME_LEN 20 +struct hinic_version_info { + u8 status; + u8 version; + u8 rsvd[6]; + + u8 ver[HINIC_FW_VERSION_NAME]; + u8 time[HINIC_COMPILE_TIME_LEN]; +}; + +#define ANTI_ATTACK_DEFAULT_CIR 500000 +#define ANTI_ATTACK_DEFAULT_XIR 600000 +#define ANTI_ATTACK_DEFAULT_CBS 10000000 +#define ANTI_ATTACK_DEFAULT_XBS 12000000 +/* set physical port Anti-Attack rate */ +struct hinic_port_anti_attack_rate { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 enable; /* 1: enable rate-limiting, 0: disable rate-limiting */ + u32 cir; /* Committed Information Rate */ + u32 xir; /* eXtended Information Rate */ + u32 cbs; /* Committed Burst Size */ + u32 xbs; /* eXtended Burst Size */ +}; + +struct hinic_clear_sq_resource { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; +}; + +struct hinic_l2nic_reset { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 reset_flag; +}; + +struct hinic_super_cqe { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 super_cqe_en; +}; + +struct hinic_capture_info { + u8 status; + u8 version; + u8 rsvd[6]; + + u32 op_type; + u32 func_id; + u32 is_en_trx; + u32 offset_cos; + u32 data_vlan; +}; + +struct hinic_port_rt_cmd { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 pf_id; + u8 enable; + u8 rsvd1[6]; +}; + +struct fw_support_func { + u8 status; + u8 version; + u8 rsvd0[6]; + + u64 flag; + u64 rsvd; +}; + +struct hinic_vf_dcb_state { + u8 status; + u8 version; + u8 rsvd0[6]; + + struct hinic_dcb_state state; +}; + +struct hinic_port_funcs_state { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; /* pf_id */ + u8 drop_en; + u8 rsvd1; +}; + +struct hinic_reset_link_cfg { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; +}; + +struct hinic_force_pkt_drop { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 port; + u8 rsvd1[3]; +}; + +struct hinic_set_link_follow { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; + u8 follow_status; + u8 rsvd2[3]; +}; + +int hinic_init_function_table(void *hwdev, u16 rx_buf_sz); + +int hinic_get_base_qpn(void *hwdev, u16 *global_qpn); + +int hinic_get_fw_support_func(void *hwdev); + +int hinic_vf_func_init(struct hinic_hwdev *hwdev); + +void hinic_vf_func_free(struct hinic_hwdev *hwdev); + +void hinic_unregister_vf_msg_handler(void *hwdev, u16 vf_id); + +int hinic_set_port_routine_cmd_report(void *hwdev, bool enable); + +int hinic_refresh_nic_cfg(void *hwdev, struct nic_port_info *port_info); + +int hinic_save_dcb_state(struct hinic_hwdev *hwdev, + struct hinic_dcb_state *dcb_state); + +void hinic_clear_vf_infos(void *hwdev, u16 vf_id); + +/* OVS module interface, for BMGW cpath command */ +enum hinic_hiovs_cmd { + OVS_SET_CPATH_VLAN = 39, + OVS_GET_CPATH_VLAN = 40, + OVS_DEL_CPATH_VLAN = 43, +}; + +struct cmd_cpath_vlan { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 vlan_id; + u16 pf_id; +}; + +/* HILINK module interface */ + +/* cmd of mgmt CPU message for HILINK module */ +enum hinic_hilink_cmd { + HINIC_HILINK_CMD_GET_LINK_INFO = 0x3, + HINIC_HILINK_CMD_SET_LINK_SETTINGS = 0x8, +}; + +enum hilink_info_print_event { + HILINK_EVENT_LINK_UP = 1, + HILINK_EVENT_LINK_DOWN, + HILINK_EVENT_CABLE_PLUGGED, + HILINK_EVENT_MAX_TYPE, +}; + +enum hinic_link_port_type { + LINK_PORT_FIBRE = 1, + LINK_PORT_ELECTRIC, + LINK_PORT_COPPER, + LINK_PORT_AOC, + LINK_PORT_BACKPLANE, + LINK_PORT_BASET, + LINK_PORT_MAX_TYPE, +}; + +enum hilink_fibre_subtype { + FIBRE_SUBTYPE_SR = 1, + FIBRE_SUBTYPE_LR, + FIBRE_SUBTYPE_MAX, +}; + +enum hilink_fec_type { + HILINK_FEC_RSFEC, + HILINK_FEC_BASEFEC, + HILINK_FEC_NOFEC, + HILINK_FEC_MAX_TYPE, +}; + +struct hi30_ffe_data { + u8 PRE2; + u8 PRE1; + u8 POST1; + u8 POST2; + u8 MAIN; +}; + +struct hi30_ctle_data { + u8 ctlebst[3]; + u8 ctlecmband[3]; + u8 ctlermband[3]; + u8 ctleza[3]; + u8 ctlesqh[3]; + u8 ctleactgn[3]; + u8 ctlepassgn; +}; + +struct hi30_dfe_data { + u8 fix_tap1_cen; + u8 fix_tap1_edg; + u8 dfefxtap[6]; + u8 dfefloattap[6]; +}; + +struct hilink_sfp_power { + u32 rx_power; + u32 tx_power; + u32 rsvd; + u32 is_invalid; +}; + +#define HILINK_MAX_LANE 4 + +struct hilink_lane { + u8 lane_used; + u8 hi30_ffe[5]; + u8 hi30_ctle[19]; + u8 hi30_dfe[14]; + u8 rsvd4; +}; + +struct hinic_link_info { + u8 vendor_name[16]; + /* port type: + * 1 - fiber; 2 - electric; 3 - copper; 4 - AOC; 5 - backplane; + * 6 - baseT; 0xffff - unknown + * + * port subtype: + * Only when port_type is fiber: + * 1 - SR; 2 - LR + */ + u32 port_type; + u32 port_sub_type; + u32 cable_length; + u8 cable_temp; + u8 cable_max_speed;/* 1(G)/10(G)/25(G)... */ + u8 sfp_type; /* 0 - qsfp; 1 - sfp */ + u8 rsvd0; + u32 power[4]; /* uW; if is sfp, only power[2] is valid */ + + u8 an_state; /* 0 - off; 1 - on */ + u8 fec; /* 0 - RSFEC; 1 - BASEFEC; 2 - NOFEC */ + u16 speed; /* 1(G)/10(G)/25(G)... */ + + u8 cable_absent; /* 0 - cable present; 1 - cable unpresent */ + u8 alos; /* 0 - yes; 1 - no */ + u8 rx_los; /* 0 - yes; 1 - no */ + u8 pma_status; + u32 pma_dbg_info_reg; /* pma debug info: */ + u32 pma_signal_ok_reg; /* signal ok: */ + + u32 pcs_err_blk_cnt_reg; /* error block counter: */ + u32 rf_lf_status_reg; /* RF/LF status: */ + u8 pcs_link_reg; /* pcs link: */ + u8 mac_link_reg; /* mac link: */ + u8 mac_tx_en; + u8 mac_rx_en; + u32 pcs_err_cnt; + + /* struct hinic_hilink_lane: 40 bytes */ + u8 lane1[40]; /* 25GE lane in old firmware */ + + u8 rsvd1[266]; /* hilink machine state */ + + u8 lane2[HILINK_MAX_LANE * 40]; /* max 4 lane for 40GE/100GE */ + + u8 rsvd2[2]; +}; + +struct hinic_hilink_link_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 port_id; + u8 info_type; /* 1: link up 2: link down 3 cable plugged */ + u8 rsvd1; + + struct hinic_link_info info; + + u8 rsvd2[352]; +}; + +struct hinic_link_ksettings_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; + + u32 valid_bitmap; + u32 speed; /* enum nic_speed_level */ + u8 autoneg; /* 0 - off; 1 - on */ + u8 fec; /* 0 - RSFEC; 1 - BASEFEC; 2 - NOFEC */ + u8 rsvd2[18]; /* reserved for duplex, port, etc. */ +}; + +enum hinic_tx_promsic { + HINIC_TX_PROMISC_ENABLE = 0, + HINIC_TX_PROMISC_DISABLE = 1, +}; + +struct hinic_promsic_info { + u8 status; + u8 version; + u8 rsvd0[6]; + u16 func_id; + u8 cfg; + u8 rsvd1; +}; + +struct hinic_netq_cfg_msg { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 netq_en; + u8 rsvd; +}; + +/* add/del rxq filter msg */ +struct hinic_rq_filter_msg { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 qid; + u8 filter_type; + u8 qflag;/*0:stdq, 1:defq, 2: netq*/ + + u8 mac[6]; + struct { + u8 inner_mac[6]; + u32 vni; + } vxlan; +}; + +int hinic_get_hilink_link_info(void *hwdev, struct hinic_link_info *info); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_msix_attr.c b/drivers/net/ethernet/huawei/hinic/hinic_msix_attr.c new file mode 100644 index 0000000000000000000000000000000000000000..350d10e4997f091c07e7bc228d47e05fe3f9e611 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_msix_attr.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hwif.h" +#include "hinic_csr.h" +#include "hinic_msix_attr.h" + +#define VALID_MSIX_IDX(attr, msix_index) ((msix_index) < (attr)->num_irqs) + +/** + * hinic_msix_attr_set - set message attribute of msix entry + * @hwif: the hardware interface of a pci function device + * @msix_index: msix_index + * @pending_limit: the maximum pending interrupt events (unit 8) + * @coalesc_timer: coalesc period for interrupt (unit 8 us) + * @lli_timer_cfg: replenishing period for low latency credit (unit 8 us) + * @lli_credit_limit: maximum credits for low latency msix messages (unit 8) + * @resend_timer: maximum wait for resending msix message + * (unit coalesc period) + * Return: 0 - success, negative - failure + */ +int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index, + u8 pending_limit, u8 coalesc_timer, + u8 lli_timer_cfg, u8 lli_credit_limit, + u8 resend_timer) +{ + u32 msix_ctrl, addr; + + if (!VALID_MSIX_IDX(&hwif->attr, msix_index)) + return -EINVAL; + + msix_ctrl = HINIC_MSIX_ATTR_SET(pending_limit, PENDING_LIMIT) | + HINIC_MSIX_ATTR_SET(coalesc_timer, COALESC_TIMER) | + HINIC_MSIX_ATTR_SET(lli_timer_cfg, LLI_TIMER) | + HINIC_MSIX_ATTR_SET(lli_credit_limit, LLI_CREDIT) | + HINIC_MSIX_ATTR_SET(resend_timer, RESEND_TIMER); + + addr = HINIC_CSR_MSIX_CTRL_ADDR(msix_index); + + hinic_hwif_write_reg(hwif, addr, msix_ctrl); + + return 0; +} + +/** + * hinic_msix_attr_get - get message attribute of msix entry + * @hwif: the hardware interface of a pci function device + * @msix_index: msix_index + * @pending_limit: the maximum pending interrupt events (unit 8) + * @coalesc_timer_cfg: coalesc period for interrupt (unit 8 us) + * @lli_timer_cfg: replenishing period for low latency credit (unit 8 us) + * @lli_credit_limit: maximum credits for low latency msix messages (unit 8) + * @resend_timer_cfg: maximum wait for resending msix message + * (unit coalesc period) + * Return: 0 - success, negative - failure + */ +int hinic_msix_attr_get(struct hinic_hwif *hwif, u16 msix_index, + u8 *pending_limit, u8 *coalesc_timer_cfg, + u8 *lli_timer_cfg, u8 *lli_credit_limit, + u8 *resend_timer_cfg) +{ + u32 addr, val; + + if (!VALID_MSIX_IDX(&hwif->attr, msix_index)) + return -EINVAL; + + addr = HINIC_CSR_MSIX_CTRL_ADDR(msix_index); + val = hinic_hwif_read_reg(hwif, addr); + + *pending_limit = HINIC_MSIX_ATTR_GET(val, PENDING_LIMIT); + *coalesc_timer_cfg = HINIC_MSIX_ATTR_GET(val, COALESC_TIMER); + *lli_timer_cfg = HINIC_MSIX_ATTR_GET(val, LLI_TIMER); + *lli_credit_limit = HINIC_MSIX_ATTR_GET(val, LLI_CREDIT); + *resend_timer_cfg = HINIC_MSIX_ATTR_GET(val, RESEND_TIMER); + + return 0; +} + +/** + * hinic_msix_attr_cnt_set - set message attribute counters of msix entry + * @hwif: the hardware interface of a pci function device + * @msix_index: msix_index + * @lli_timer_cnt: replenishing period for low latency interrupt (unit 8 us) + * @lli_credit_cnt: maximum credits for low latency msix messages (unit 8) + * @coalesc_timer_cnt: coalesc period for interrupt (unit 8 us) + * @pending_cnt: the maximum pending interrupt events (unit 8) + * @resend_timer_cnt: maximum wait for resending msix message + * (unit coalesc period) + * Return: 0 - success, negative - failure + */ +int hinic_msix_attr_cnt_set(struct hinic_hwif *hwif, u16 msix_index, + u8 lli_timer_cnt, u8 lli_credit_cnt, + u8 coalesc_timer_cnt, u8 pending_cnt, + u8 resend_timer_cnt) +{ + u32 msix_ctrl, addr; + + if (!VALID_MSIX_IDX(&hwif->attr, msix_index)) + return -EINVAL; + + msix_ctrl = HINIC_MSIX_CNT_SET(lli_timer_cnt, LLI_TIMER) | + HINIC_MSIX_CNT_SET(lli_credit_cnt, LLI_CREDIT) | + HINIC_MSIX_CNT_SET(coalesc_timer_cnt, COALESC_TIMER) | + HINIC_MSIX_CNT_SET(pending_cnt, PENDING) | + HINIC_MSIX_CNT_SET(resend_timer_cnt, RESEND_TIMER); + + addr = HINIC_CSR_MSIX_CNT_ADDR(msix_index); + + hinic_hwif_write_reg(hwif, addr, msix_ctrl); + + return 0; +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_msix_attr.h b/drivers/net/ethernet/huawei/hinic/hinic_msix_attr.h new file mode 100644 index 0000000000000000000000000000000000000000..0397dfb41fca3aa7518bc560c06c726face2bf27 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_msix_attr.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_MSIX_ATTR_H_ +#define HINIC_MSIX_ATTR_H_ + +#define HINIC_MSIX_PENDING_LIMIT_SHIFT 0 +#define HINIC_MSIX_COALESC_TIMER_SHIFT 8 +#define HINIC_MSIX_LLI_TIMER_SHIFT 16 +#define HINIC_MSIX_LLI_CREDIT_SHIFT 24 +#define HINIC_MSIX_RESEND_TIMER_SHIFT 29 + +#define HINIC_MSIX_PENDING_LIMIT_MASK 0xFFU +#define HINIC_MSIX_COALESC_TIMER_MASK 0xFFU +#define HINIC_MSIX_LLI_TIMER_MASK 0xFFU +#define HINIC_MSIX_LLI_CREDIT_MASK 0x1FU +#define HINIC_MSIX_RESEND_TIMER_MASK 0x7U + +#define HINIC_MSIX_ATTR_GET(val, member) \ + (((val) >> HINIC_MSIX_##member##_SHIFT) \ + & HINIC_MSIX_##member##_MASK) + +#define HINIC_MSIX_ATTR_SET(val, member) \ + (((val) & HINIC_MSIX_##member##_MASK) \ + << HINIC_MSIX_##member##_SHIFT) + +#define HINIC_MSIX_CNT_LLI_TIMER_SHIFT 0 +#define HINIC_MSIX_CNT_LLI_CREDIT_SHIFT 8 +#define HINIC_MSIX_CNT_COALESC_TIMER_SHIFT 8 +#define HINIC_MSIX_CNT_PENDING_SHIFT 8 +#define HINIC_MSIX_CNT_RESEND_TIMER_SHIFT 29 + +#define HINIC_MSIX_CNT_LLI_TIMER_MASK 0xFFU +#define HINIC_MSIX_CNT_LLI_CREDIT_MASK 0xFFU +#define HINIC_MSIX_CNT_COALESC_TIMER_MASK 0xFFU +#define HINIC_MSIX_CNT_PENDING_MASK 0x1FU +#define HINIC_MSIX_CNT_RESEND_TIMER_MASK 0x7U + +#define HINIC_MSIX_CNT_SET(val, member) \ + (((val) & HINIC_MSIX_CNT_##member##_MASK) << \ + HINIC_MSIX_CNT_##member##_SHIFT) + +int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index, + u8 pending_limit, u8 coalesc_timer, + u8 lli_timer_cfg, u8 lli_credit_limit, + u8 resend_timer); + +int hinic_msix_attr_get(struct hinic_hwif *hwif, u16 msix_index, + u8 *pending_limit, u8 *coalesc_timer_cfg, + u8 *lli_timer_cfg, u8 *lli_credit_limit, + u8 *resend_timer_cfg); + +int hinic_msix_attr_cnt_set(struct hinic_hwif *hwif, u16 msix_index, + u8 pending_limit, u8 coalesc_timer, + u8 lli_timer_cfg, u8 lli_credit_limit, + u8 resend_timer); +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_multi_host_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_multi_host_mgmt.c new file mode 100644 index 0000000000000000000000000000000000000000..408b813dd3746ba72592be746bafb54e2f150518 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_multi_host_mgmt.c @@ -0,0 +1,974 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hwdev.h" +#include "hinic_csr.h" +#include "hinic_hwif.h" +#include "hinic_nic_io.h" +#include "hinic_api_cmd.h" +#include "hinic_mgmt.h" +#include "hinic_mbox.h" +#include "hinic_nic_cfg.h" +#include "hinic_hwif.h" +#include "hinic_mgmt_interface.h" +#include "hinic_multi_host_mgmt.h" + +#define SLAVE_HOST_STATUS_CLEAR(host_id, val) \ + ((val) & (~(1U << (host_id)))) +#define SLAVE_HOST_STATUS_SET(host_id, enable) \ + (((u8)(enable) & 1U) << (host_id)) +#define SLAVE_HOST_STATUS_GET(host_id, val) (!!((val) & (1U << (host_id)))) + +#define MULTI_HOST_PPF_GET(host_id, val) (((val) >> ((host_id) * 4 + 16)) & 0xf) + +static inline u8 get_master_host_ppf_idx(struct hinic_hwdev *hwdev) +{ + u32 reg_val; + + reg_val = hinic_hwif_read_reg(hwdev->hwif, + HINIC_MULT_HOST_SLAVE_STATUS_ADDR); + /* master host sets host_id to 0 */ + return MULTI_HOST_PPF_GET(0, reg_val); +} + +void set_slave_host_enable(struct hinic_hwdev *hwdev, u8 host_id, bool enable) +{ + u32 reg_val; + + if (HINIC_FUNC_TYPE(hwdev) != TYPE_PPF) + return; + + reg_val = hinic_hwif_read_reg(hwdev->hwif, + HINIC_MULT_HOST_SLAVE_STATUS_ADDR); + + reg_val = SLAVE_HOST_STATUS_CLEAR(host_id, reg_val); + reg_val |= SLAVE_HOST_STATUS_SET(host_id, enable); + hinic_hwif_write_reg(hwdev->hwif, HINIC_MULT_HOST_SLAVE_STATUS_ADDR, + reg_val); + + sdk_info(hwdev->dev_hdl, "Set slave host %d status %d, reg value: 0x%x\n", + host_id, enable, reg_val); +} + +bool hinic_get_slave_host_enable(void *hwdev, u8 host_id) +{ + u32 reg_val; + struct hinic_hwdev *dev = hwdev; + + if (HINIC_FUNC_TYPE(dev) != TYPE_PPF) + return false; + + reg_val = hinic_hwif_read_reg(dev->hwif, + HINIC_MULT_HOST_SLAVE_STATUS_ADDR); + + return SLAVE_HOST_STATUS_GET(host_id, reg_val); +} +EXPORT_SYMBOL(hinic_get_slave_host_enable); + +void set_master_host_mbox_enable(struct hinic_hwdev *hwdev, bool enable) +{ + u32 reg_val; + + if (!IS_MASTER_HOST(hwdev) || HINIC_FUNC_TYPE(hwdev) != TYPE_PPF) + return; + + reg_val = hinic_hwif_read_reg(hwdev->hwif, HINIC_HOST_MODE_ADDR); + reg_val = MULTI_HOST_REG_CLEAR(reg_val, MASTER_MBX_STS); + reg_val |= MULTI_HOST_REG_SET((u8)enable, MASTER_MBX_STS); + hinic_hwif_write_reg(hwdev->hwif, HINIC_HOST_MODE_ADDR, reg_val); + + sdk_info(hwdev->dev_hdl, "multi-host status %d, reg value: 0x%x\n", + enable, reg_val); +} + +bool hinic_get_master_host_mbox_enable(void *hwdev) +{ + u32 reg_val; + struct hinic_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_SLAVE_HOST(dev) || HINIC_FUNC_TYPE(dev) == TYPE_VF) + return true; + + reg_val = hinic_hwif_read_reg(dev->hwif, HINIC_HOST_MODE_ADDR); + + return !!MULTI_HOST_REG_GET(reg_val, MASTER_MBX_STS); +} + +void set_func_host_mode(struct hinic_hwdev *hwdev, enum hinic_func_mode mode) +{ + switch (mode) { + case FUNC_MOD_MULTI_BM_MASTER: + sdk_info(hwdev->dev_hdl, "Detect multi-host BM master host\n"); + hwdev->func_mode = FUNC_MOD_MULTI_BM_MASTER; + hwdev->feature_cap = HINIC_MULTI_BM_MASTER; + break; + case FUNC_MOD_MULTI_BM_SLAVE: + sdk_info(hwdev->dev_hdl, "Detect multi-host BM slave host\n"); + hwdev->func_mode = FUNC_MOD_MULTI_BM_SLAVE; + hwdev->feature_cap = HINIC_MULTI_BM_SLAVE; + break; + case FUNC_MOD_MULTI_VM_MASTER: + sdk_info(hwdev->dev_hdl, "Detect multi-host VM master host\n"); + hwdev->func_mode = FUNC_MOD_MULTI_VM_MASTER; + hwdev->feature_cap = HINIC_MULTI_VM_MASTER; + break; + case FUNC_MOD_MULTI_VM_SLAVE: + sdk_info(hwdev->dev_hdl, "Detect multi-host VM slave host\n"); + hwdev->func_mode = FUNC_MOD_MULTI_VM_SLAVE; + hwdev->feature_cap = HINIC_MULTI_VM_SLAVE; + break; + default: + hwdev->func_mode = FUNC_MOD_NORMAL_HOST; + hwdev->feature_cap = HINIC_NORMAL_HOST_CAP; + break; + } +} + +bool is_multi_vm_slave(void *hwdev) +{ + struct hinic_hwdev *hw_dev = hwdev; + + if (!hwdev) + return false; + + return (hw_dev->func_mode == FUNC_MOD_MULTI_VM_SLAVE) ? true : false; +} + +bool is_multi_bm_slave(void *hwdev) +{ + struct hinic_hwdev *hw_dev = hwdev; + + if (!hwdev) + return false; + + return (hw_dev->func_mode == FUNC_MOD_MULTI_BM_SLAVE) ? true : false; +} + +int rectify_host_mode(struct hinic_hwdev *hwdev) +{ + u16 cur_sdi_mode; + int err; + + if (hwdev->board_info.board_type != + HINIC_BOARD_TYPE_MULTI_HOST_ETH_25GE) + return 0; + + sdk_info(hwdev->dev_hdl, "Rectify host mode, host_id: %d\n", + hinic_pcie_itf_id(hwdev)); + + err = hinic_get_sdi_mode(hwdev, &cur_sdi_mode); + if (err == HINIC_MGMT_CMD_UNSUPPORTED) + cur_sdi_mode = HINIC_SDI_MODE_BM; + else if (err) + return err; + + switch (cur_sdi_mode) { + case HINIC_SDI_MODE_BM: + if (hinic_pcie_itf_id(hwdev) == 0) + set_func_host_mode(hwdev, FUNC_MOD_MULTI_BM_MASTER); + else + set_func_host_mode(hwdev, FUNC_MOD_MULTI_BM_SLAVE); + break; + case HINIC_SDI_MODE_VM: + if (hinic_pcie_itf_id(hwdev) == 0) + set_func_host_mode(hwdev, FUNC_MOD_MULTI_VM_MASTER); + else + set_func_host_mode(hwdev, FUNC_MOD_MULTI_VM_SLAVE); + break; + default: + sdk_warn(hwdev->dev_hdl, "Unknown sdi mode %d\n", cur_sdi_mode); + break; + } + + return 0; +} + +void detect_host_mode_pre(struct hinic_hwdev *hwdev) +{ + enum hinic_chip_mode chip_mode; + + /* all pf can set HOST_MODE REG, so don't trust HOST_MODE REG for host0, + * get chip mode from mgmt cpu for host0 + * VF have not right to read HOST_MODE REG, detect mode from board info + */ + if (hinic_pcie_itf_id(hwdev) == 0 || + HINIC_FUNC_TYPE(hwdev) == TYPE_VF) { + set_func_host_mode(hwdev, FUNC_MOD_NORMAL_HOST); + return; + } + + chip_mode = hinic_hwif_read_reg(hwdev->hwif, HINIC_HOST_MODE_ADDR); + switch (MULTI_HOST_REG_GET(chip_mode, CHIP_MODE)) { + case CHIP_MODE_VMGW: + set_func_host_mode(hwdev, FUNC_MOD_MULTI_VM_SLAVE); + /* mbox has not initialized, set slave host disable */ + set_slave_host_enable(hwdev, hinic_pcie_itf_id(hwdev), false); + break; + case CHIP_MODE_BMGW: + set_func_host_mode(hwdev, FUNC_MOD_MULTI_BM_SLAVE); + /* mbox has not initialized, set slave host disable */ + set_slave_host_enable(hwdev, hinic_pcie_itf_id(hwdev), false); + break; + + default: + set_func_host_mode(hwdev, FUNC_MOD_NORMAL_HOST); + break; + } +} + +int __mbox_to_host(struct hinic_hwdev *hwdev, enum hinic_mod_type mod, + u8 cmd, void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout, + enum hinic_mbox_ack_type ack_type) +{ + struct hinic_hwdev *mbox_hwdev = hwdev; + u8 dst_host_func_idx; + int err; + + if (!IS_MULTI_HOST(hwdev) || HINIC_IS_VF(hwdev)) + return -EPERM; + + if (hinic_func_type(hwdev) == TYPE_PF) { + down(&hwdev->ppf_sem); + mbox_hwdev = hwdev->ppf_hwdev; + if (!mbox_hwdev) { + err = -EINVAL; + goto release_lock; + } + + if (!hinic_is_hwdev_mod_inited(mbox_hwdev, + HINIC_HWDEV_MBOX_INITED)) { + err = -EPERM; + goto release_lock; + } + } + + if (!mbox_hwdev->chip_present_flag) { + err = -EPERM; + goto release_lock; + } + + if (!hinic_get_master_host_mbox_enable(hwdev)) { + sdk_err(hwdev->dev_hdl, "Master host not initialized\n"); + err = -EFAULT; + goto release_lock; + } + + if (!mbox_hwdev->mhost_mgmt) { + /* send to master host in default */ + dst_host_func_idx = get_master_host_ppf_idx(hwdev); + } else { + dst_host_func_idx = IS_MASTER_HOST(hwdev) ? + mbox_hwdev->mhost_mgmt->shost_ppf_idx : + mbox_hwdev->mhost_mgmt->mhost_ppf_idx; + } + + if (ack_type == MBOX_ACK) + err = hinic_mbox_to_host(mbox_hwdev, dst_host_func_idx, + mod, cmd, buf_in, in_size, + buf_out, out_size, + timeout); + else + err = hinic_mbox_to_func_no_ack(mbox_hwdev, dst_host_func_idx, + mod, cmd, buf_in, in_size); + +release_lock: + if (hinic_func_type(hwdev) == TYPE_PF) + up(&hwdev->ppf_sem); + + return err; +} + +int hinic_mbox_to_host_sync(void *hwdev, enum hinic_mod_type mod, + u8 cmd, void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout) +{ + if (!hwdev) + return -EINVAL; + + return __mbox_to_host((struct hinic_hwdev *)hwdev, mod, cmd, buf_in, + in_size, buf_out, out_size, timeout, MBOX_ACK); +} +EXPORT_SYMBOL(hinic_mbox_to_host_sync); + +int hinic_mbox_to_host_no_ack(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, u8 cmd, void *buf_in, + u16 in_size) +{ + return __mbox_to_host(hwdev, mod, cmd, buf_in, in_size, NULL, NULL, + 0, MBOX_NO_ACK); +} + +static int __get_func_nic_state_from_pf(struct hinic_hwdev *hwdev, + u16 glb_func_idx, u8 *en); + +int sw_func_pf_mbox_handler(void *handle, u16 vf_id, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct hinic_hwdev *hwdev = handle; + struct hinic_slave_func_nic_state *nic_state, *out_state; + int err; + + switch (cmd) { + case HINIC_SW_CMD_GET_SLAVE_FUNC_NIC_STATE: + nic_state = buf_in; + out_state = buf_out; + *out_size = sizeof(*nic_state); + + /* find nic state in ppf func_nic_en bitmap */ + err = __get_func_nic_state_from_pf(hwdev, nic_state->func_idx, + &out_state->enable); + if (err) + out_state->status = 1; + else + out_state->status = 0; + + break; + default: + break; + } + + return 0; +} + +static int __master_host_sw_func_handler(struct hinic_hwdev *hwdev, u16 pf_idx, + u8 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_multi_host_mgmt *mhost_mgmt = hwdev->mhost_mgmt; + struct register_slave_host *slave_host, *out_shost; + int err = 0; + + if (!mhost_mgmt) + return -ENXIO; + + switch (cmd) { + case HINIC_SW_CMD_SLAVE_HOST_PPF_REGISTER: + slave_host = buf_in; + out_shost = buf_out; + *out_size = sizeof(*slave_host); + mhost_mgmt->shost_registered = true; + mhost_mgmt->shost_host_idx = slave_host->host_id; + mhost_mgmt->shost_ppf_idx = slave_host->ppf_idx; + + bitmap_copy((ulong *)out_shost->funcs_nic_en, + mhost_mgmt->func_nic_en, HINIC_MAX_FUNCTIONS); + sdk_info(hwdev->dev_hdl, "slave host register ppf, host_id: %d, ppf_idx: %d\n", + slave_host->host_id, slave_host->ppf_idx); + + out_shost->status = 0; + break; + case HINIC_SW_CMD_SLAVE_HOST_PPF_UNREGISTER: + slave_host = buf_in; + mhost_mgmt->shost_registered = false; + sdk_info(hwdev->dev_hdl, "slave host unregister ppf, host_id: %d, ppf_idx: %d\n", + slave_host->host_id, slave_host->ppf_idx); + + *out_size = sizeof(*slave_host); + ((struct register_slave_host *)buf_out)->status = 0; + break; + + default: + err = -EINVAL; + break; + } + + return err; +} + +static int __event_set_func_nic_state(struct hinic_hwdev *hwdev, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct hinic_event_info event_info = {0}; + struct hinic_mhost_nic_func_state nic_state = {0}; + struct hinic_slave_func_nic_state *out_state, *func_nic_state = buf_in; + + event_info.type = HINIC_EVENT_MULTI_HOST_MGMT; + event_info.mhost_mgmt.sub_cmd = HINIC_MHOST_NIC_STATE_CHANGE; + event_info.mhost_mgmt.data = &nic_state; + + nic_state.func_idx = func_nic_state->func_idx; + nic_state.enable = func_nic_state->enable; + + if (!hwdev->event_callback) + return -EFAULT; + + hwdev->event_callback(hwdev->event_pri_handle, &event_info); + + *out_size = sizeof(*out_state); + out_state = buf_out; + out_state->status = nic_state.status; + + return nic_state.status; +} + +static int multi_host_event_handler(struct hinic_hwdev *hwdev, + u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + int err; + + switch (cmd) { + case HINIC_SW_CMD_SET_SLAVE_FUNC_NIC_STATE: + err = __event_set_func_nic_state(hwdev, buf_in, in_size, + buf_out, out_size); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int sw_fwd_msg_to_vf(struct hinic_hwdev *hwdev, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size) +{ + struct hinic_host_fwd_head *fwd_head; + u16 fwd_head_len; + void *msg; + int err; + + fwd_head = buf_in; + fwd_head_len = sizeof(struct hinic_host_fwd_head); + msg = (void *)((u8 *)buf_in + fwd_head_len); + err = hinic_mbox_ppf_to_vf(hwdev, fwd_head->mod, + fwd_head->dst_glb_func_idx, fwd_head->cmd, + msg, in_size - fwd_head_len, + buf_out, out_size, 0); + if (err) + nic_err(hwdev->dev_hdl, + "Fwd msg to func %u failed, err: %d\n", + fwd_head->dst_glb_func_idx, err); + + return err; +} + +static int __slave_host_sw_func_handler(struct hinic_hwdev *hwdev, u16 pf_idx, + u8 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_multi_host_mgmt *mhost_mgmt = hwdev->mhost_mgmt; + struct hinic_slave_func_nic_state *nic_state; + int err = 0; + + if (!mhost_mgmt) + return -ENXIO; + + switch (cmd) { + case HINIC_SW_CMD_SET_SLAVE_FUNC_NIC_STATE: + nic_state = buf_in; + + *out_size = sizeof(*nic_state); + ((struct hinic_slave_func_nic_state *)buf_out)->status = 0; + + sdk_info(hwdev->dev_hdl, "slave func %d %s nic\n", + nic_state->func_idx, + nic_state->enable ? "register" : "unregister"); + + if (nic_state->enable) + set_bit(nic_state->func_idx, mhost_mgmt->func_nic_en); + else + clear_bit(nic_state->func_idx, mhost_mgmt->func_nic_en); + + multi_host_event_handler(hwdev, cmd, buf_in, in_size, buf_out, + out_size); + + break; + + case HINIC_SW_CMD_SEND_MSG_TO_VF: + err = sw_fwd_msg_to_vf(hwdev, buf_in, in_size, + buf_out, out_size); + break; + + case HINIC_SW_CMD_MIGRATE_READY: + hinic_migrate_report(hwdev); + break; + default: + err = -EINVAL; + break; + } + + return err; +} + +int sw_func_ppf_mbox_handler(void *handle, u16 pf_idx, u16 vf_id, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size) +{ + struct hinic_hwdev *hwdev = handle; + int err; + + if (IS_MASTER_HOST(hwdev)) + err = __master_host_sw_func_handler(hwdev, pf_idx, cmd, buf_in, + in_size, buf_out, out_size); + else if (IS_SLAVE_HOST(hwdev)) + err = __slave_host_sw_func_handler(hwdev, pf_idx, cmd, buf_in, + in_size, buf_out, out_size); + else + err = -EINVAL; + + if (err) + sdk_err(hwdev->dev_hdl, "PPF process sw funcs cmd %d failed, err: %d\n", + cmd, err); + + return err; +} + +int __ppf_process_mbox_msg(struct hinic_hwdev *hwdev, u16 pf_idx, u16 vf_id, + enum hinic_mod_type mod, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + int err; + + if (IS_SLAVE_HOST(hwdev)) { + err = hinic_mbox_to_host_sync(hwdev, mod, cmd, + buf_in, in_size, buf_out, + out_size, 0); + if (err) + sdk_err(hwdev->dev_hdl, "send to mpf failed, err: %d\n", + err); + } else if (IS_MASTER_HOST(hwdev)) { + if (mod == HINIC_MOD_COMM && cmd == HINIC_MGMT_CMD_START_FLR) + err = hinic_pf_to_mgmt_no_ack(hwdev, mod, cmd, buf_in, + in_size); + else + err = hinic_pf_msg_to_mgmt_sync(hwdev, mod, cmd, buf_in, + in_size, buf_out, + out_size, 0U); + if (err && err != HINIC_DEV_BUSY_ACTIVE_FW && + err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) + sdk_err(hwdev->dev_hdl, "PF mbox common callback handler err: %d\n", + err); + } else { + /* not support */ + err = -EFAULT; + } + + return err; +} + +int hinic_ppf_process_mbox_msg(struct hinic_hwdev *hwdev, u16 pf_idx, u16 vf_id, + enum hinic_mod_type mod, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + bool same_host = false; + int err = -EFAULT; + + /* modify same_host according to hinic_get_hw_pf_infos */ + + switch (hwdev->func_mode) { + case FUNC_MOD_MULTI_VM_MASTER: + case FUNC_MOD_MULTI_BM_MASTER: + if (!same_host) + err = __ppf_process_mbox_msg(hwdev, pf_idx, vf_id, + mod, cmd, buf_in, in_size, + buf_out, out_size); + else + sdk_warn(hwdev->dev_hdl, "Don't support ppf mbox message in BM master\n"); + + break; + case FUNC_MOD_MULTI_VM_SLAVE: + case FUNC_MOD_MULTI_BM_SLAVE: + same_host = true; + if (same_host) + err = __ppf_process_mbox_msg(hwdev, pf_idx, vf_id, + mod, cmd, buf_in, in_size, + buf_out, out_size); + else + sdk_warn(hwdev->dev_hdl, "Receive control message from BM master, don't support for now\n"); + + break; + default: + sdk_warn(hwdev->dev_hdl, "Don't support ppf mbox message\n"); + + break; + } + + return err; +} + +int comm_ppf_mbox_handler(void *handle, u16 pf_idx, u16 vf_id, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size) +{ + return hinic_ppf_process_mbox_msg(handle, pf_idx, vf_id, HINIC_MOD_COMM, + cmd, buf_in, in_size, buf_out, + out_size); +} + +void comm_ppf_to_pf_handler(void *handle, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_hwdev *hwdev = handle; + + sdk_err(hwdev->dev_hdl, "pf receive ppf common mbox msg, don't supported for now\n"); +} + +int hilink_ppf_mbox_handler(void *handle, u16 pf_idx, u16 vf_id, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size) +{ + return hinic_ppf_process_mbox_msg(handle, pf_idx, vf_id, + HINIC_MOD_HILINK, cmd, buf_in, + in_size, buf_out, out_size); +} + +int hinic_nic_ppf_mbox_handler(void *handle, u16 pf_idx, u16 vf_id, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size) +{ + return hinic_ppf_process_mbox_msg(handle, pf_idx, vf_id, + HINIC_MOD_L2NIC, cmd, buf_in, in_size, + buf_out, out_size); +} + +void hinic_nic_ppf_to_pf_handler(void *handle, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_hwdev *hwdev = handle; + + sdk_err(hwdev->dev_hdl, "ppf receive other pf l2nic mbox msg, don't supported for now\n"); +} + +int hinic_register_slave_ppf(struct hinic_hwdev *hwdev, bool registered) +{ + struct register_slave_host host_info = {0}; + u16 out_size = sizeof(host_info); + u8 cmd; + int err; + + if (!IS_SLAVE_HOST(hwdev)) + return -EINVAL; + + cmd = registered ? HINIC_SW_CMD_SLAVE_HOST_PPF_REGISTER : + HINIC_SW_CMD_SLAVE_HOST_PPF_UNREGISTER; + + host_info.host_id = hinic_pcie_itf_id(hwdev); + host_info.ppf_idx = hinic_ppf_idx(hwdev); + + err = hinic_mbox_to_host_sync(hwdev, HINIC_MOD_SW_FUNC, cmd, + &host_info, sizeof(host_info), &host_info, + &out_size, 0); + if (err || !out_size || host_info.status) { + sdk_err(hwdev->dev_hdl, "Failed to %s slave host, err: %d, out_size: 0x%x, status: 0x%x\n", + registered ? "register" : "unregister", err, out_size, + host_info.status); + return -EFAULT; + } + bitmap_copy(hwdev->mhost_mgmt->func_nic_en, + (ulong *)host_info.funcs_nic_en, + HINIC_MAX_FUNCTIONS); + return 0; +} + +static int get_host_id_by_func_id(struct hinic_hwdev *hwdev, u16 func_idx, + u8 *host_id) +{ + struct hinic_hw_pf_infos *pf_infos; + u16 vf_id_start, vf_id_end; + int i; + + if (!hwdev || !host_id || !hwdev->mhost_mgmt) + return -EINVAL; + + pf_infos = &hwdev->mhost_mgmt->pf_infos; + + for (i = 0; i < pf_infos->num_pfs; i++) { + if (func_idx == pf_infos->infos[i].glb_func_idx) { + *host_id = pf_infos->infos[i].itf_idx; + return 0; + } + + vf_id_start = pf_infos->infos[i].glb_pf_vf_offset + 1; + vf_id_end = pf_infos->infos[i].glb_pf_vf_offset + + pf_infos->infos[i].max_vfs; + if (func_idx >= vf_id_start && func_idx <= vf_id_end) { + *host_id = pf_infos->infos[i].itf_idx; + return 0; + } + } + + return -EFAULT; +} + +int set_slave_func_nic_state(struct hinic_hwdev *hwdev, u16 func_idx, u8 en) +{ + struct hinic_slave_func_nic_state nic_state = {0}; + u16 out_size = sizeof(nic_state); + int err; + + nic_state.func_idx = func_idx; + nic_state.enable = en; + + err = hinic_mbox_to_host_sync(hwdev, HINIC_MOD_SW_FUNC, + HINIC_SW_CMD_SET_SLAVE_FUNC_NIC_STATE, + &nic_state, sizeof(nic_state), &nic_state, + &out_size, 0); + if (err == MBOX_ERRCODE_UNKNOWN_DES_FUNC) { + sdk_warn(hwdev->dev_hdl, "Can not notify func %d nic state because slave host not initialized\n", + func_idx); + } else if (err || !out_size || nic_state.status) { + sdk_err(hwdev->dev_hdl, "Failed to set slave host functions nic state, err: %d, out_size: 0x%x, status: 0x%x\n", + err, out_size, nic_state.status); + return -EFAULT; + } + + return 0; +} + +int hinic_set_func_nic_state(void *hwdev, struct hinic_func_nic_state *state) +{ + struct hinic_hwdev *ppf_hwdev = hwdev; + struct hinic_multi_host_mgmt *mhost_mgmt; + u8 host_id = 0; + bool host_enable; + int err; + int old_state; + + if (!hwdev || !state) + return -EINVAL; + + if (hinic_func_type(hwdev) != TYPE_PPF) + ppf_hwdev = ((struct hinic_hwdev *)hwdev)->ppf_hwdev; + + if (!ppf_hwdev || !IS_MASTER_HOST(ppf_hwdev)) + return -EINVAL; + + mhost_mgmt = ppf_hwdev->mhost_mgmt; + if (!mhost_mgmt || state->func_idx >= HINIC_MAX_FUNCTIONS) + return -EINVAL; + + old_state = test_bit(state->func_idx, mhost_mgmt->func_nic_en) ? 1 : 0; + if (state->state == HINIC_FUNC_NIC_DEL) + clear_bit(state->func_idx, mhost_mgmt->func_nic_en); + else if (state->state == HINIC_FUNC_NIC_ADD) + set_bit(state->func_idx, mhost_mgmt->func_nic_en); + else + return -EINVAL; + + err = get_host_id_by_func_id(ppf_hwdev, state->func_idx, &host_id); + if (err) { + sdk_err(ppf_hwdev->dev_hdl, "Failed to get function %d host id, err: %d\n", + state->func_idx, err); + old_state ? set_bit(state->func_idx, mhost_mgmt->func_nic_en) : + clear_bit(state->func_idx, mhost_mgmt->func_nic_en); + return -EFAULT; + } + + host_enable = hinic_get_slave_host_enable(hwdev, host_id); + sdk_info(ppf_hwdev->dev_hdl, "Set slave host %d(status: %d) func %d %s nic\n", + host_id, host_enable, + state->func_idx, state->state ? "enable" : "disable"); + + if (!host_enable) + return 0; + + /* notify slave host */ + err = set_slave_func_nic_state(hwdev, state->func_idx, state->state); + if (err) { + old_state ? set_bit(state->func_idx, mhost_mgmt->func_nic_en) : + clear_bit(state->func_idx, mhost_mgmt->func_nic_en); + return err; + } + + return 0; +} +EXPORT_SYMBOL(hinic_set_func_nic_state); + +static int __get_func_nic_state_from_pf(struct hinic_hwdev *hwdev, + u16 glb_func_idx, u8 *en) +{ + struct hinic_multi_host_mgmt *mhost_mgmt; + struct hinic_hwdev *ppf_hwdev = hwdev; + + if (hinic_func_type(hwdev) != TYPE_PPF) + ppf_hwdev = ((struct hinic_hwdev *)hwdev)->ppf_hwdev; + + if (!ppf_hwdev || !ppf_hwdev->mhost_mgmt) + return -EFAULT; + + mhost_mgmt = ppf_hwdev->mhost_mgmt; + *en = !!(test_bit(glb_func_idx, mhost_mgmt->func_nic_en)); + + sdk_info(ppf_hwdev->dev_hdl, "slave host func %d nic %d\n", + glb_func_idx, *en); + + return 0; +} + +int hinic_get_func_nic_enable(void *hwdev, u16 glb_func_idx, bool *en) +{ + struct hinic_slave_func_nic_state nic_state = {0}; + u16 out_size = sizeof(nic_state); + u8 nic_en; + int err; + + if (!hwdev || !en) + return -EINVAL; + /*if card mode is OVS, VFs donot need attach_uld, so return false.*/ + if (!IS_SLAVE_HOST((struct hinic_hwdev *)hwdev)) { + if (hinic_func_type(hwdev) == TYPE_VF && + hinic_support_ovs(hwdev, NULL)) { + *en = false; + } else { + *en = true; + } + return 0; + } + + if (hinic_func_type(hwdev) == TYPE_VF) { + nic_state.func_idx = glb_func_idx; + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_SW_FUNC, + HINIC_SW_CMD_GET_SLAVE_FUNC_NIC_STATE, + &nic_state, sizeof(nic_state), + &nic_state, &out_size, 0); + if (err || !out_size || nic_state.status) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, "Failed to get func %d nic state, err: %d, out_size: 0x%x, status: 0x%x\n", + glb_func_idx, err, out_size, nic_state.status); + return -EFAULT; + } + + *en = !!nic_state.enable; + + return 0; + } + + /* pf in slave host should be probe in CHIP_MODE_VMGW + * mode for pxe install + */ + if (IS_VM_SLAVE_HOST((struct hinic_hwdev *)hwdev)) { + *en = true; + return 0; + } + + /* pf/ppf get function nic state in sdk diretly */ + err = __get_func_nic_state_from_pf(hwdev, glb_func_idx, &nic_en); + if (err) + return err; + + *en = !!nic_en; + + return 0; +} + +int hinic_multi_host_mgmt_init(struct hinic_hwdev *hwdev) +{ + int err; + + if (!IS_MULTI_HOST(hwdev) || !HINIC_IS_PPF(hwdev)) + return 0; + + hwdev->mhost_mgmt = kzalloc(sizeof(*hwdev->mhost_mgmt), GFP_KERNEL); + if (!hwdev->mhost_mgmt) { + sdk_err(hwdev->dev_hdl, "Failed to alloc multi-host mgmt memory\n"); + return -ENOMEM; + } + + hwdev->mhost_mgmt->mhost_ppf_idx = get_master_host_ppf_idx(hwdev); + hwdev->mhost_mgmt->shost_ppf_idx = 0; + hwdev->mhost_mgmt->shost_host_idx = 2; + + err = hinic_get_hw_pf_infos(hwdev, &hwdev->mhost_mgmt->pf_infos); + if (err) + goto out_free_mhost_mgmt; + + hinic_register_ppf_mbox_cb(hwdev, HINIC_MOD_COMM, + comm_ppf_mbox_handler); + hinic_register_ppf_mbox_cb(hwdev, HINIC_MOD_L2NIC, + hinic_nic_ppf_mbox_handler); + hinic_register_ppf_mbox_cb(hwdev, HINIC_MOD_HILINK, + hilink_ppf_mbox_handler); + hinic_register_ppf_mbox_cb(hwdev, HINIC_MOD_SW_FUNC, + sw_func_ppf_mbox_handler); + + bitmap_zero(hwdev->mhost_mgmt->func_nic_en, HINIC_MAX_FUNCTIONS); + + /* Slave host: + * register slave host ppf functions + * Get function's nic state + */ + if (IS_SLAVE_HOST(hwdev)) { + /* PXE don't support to receive mbox from master host */ + set_slave_host_enable(hwdev, hinic_pcie_itf_id(hwdev), true); + if ((IS_VM_SLAVE_HOST(hwdev) && + hinic_get_master_host_mbox_enable(hwdev)) || + IS_BMGW_SLAVE_HOST(hwdev)) { + err = hinic_register_slave_ppf(hwdev, true); + if (err) { + set_slave_host_enable(hwdev, + hinic_pcie_itf_id(hwdev), + false); + goto out_free_mhost_mgmt; + } + } + } else { + /* slave host can send message to mgmt cpu after setup master + * mbox + */ + set_master_host_mbox_enable(hwdev, true); + } + + return 0; + +out_free_mhost_mgmt: + kfree(hwdev->mhost_mgmt); + hwdev->mhost_mgmt = NULL; + + return err; +} + +int hinic_multi_host_mgmt_free(struct hinic_hwdev *hwdev) +{ + if (!IS_MULTI_HOST(hwdev) || !HINIC_IS_PPF(hwdev)) + return 0; + + if (IS_SLAVE_HOST(hwdev)) { + hinic_register_slave_ppf(hwdev, false); + + set_slave_host_enable(hwdev, hinic_pcie_itf_id(hwdev), false); + } else { + set_master_host_mbox_enable(hwdev, false); + } + + hinic_unregister_ppf_mbox_cb(hwdev, HINIC_MOD_COMM); + hinic_unregister_ppf_mbox_cb(hwdev, HINIC_MOD_L2NIC); + hinic_unregister_ppf_mbox_cb(hwdev, HINIC_MOD_HILINK); + hinic_unregister_ppf_mbox_cb(hwdev, HINIC_MOD_SW_FUNC); + + kfree(hwdev->mhost_mgmt); + hwdev->mhost_mgmt = NULL; + + return 0; +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_multi_host_mgmt.h b/drivers/net/ethernet/huawei/hinic/hinic_multi_host_mgmt.h new file mode 100644 index 0000000000000000000000000000000000000000..f35b2a1f79892ee15bcef70fb937b6df1f1a9b6d --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_multi_host_mgmt.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef __HINIC_MULTI_HOST_MGMT_H_ +#define __HINIC_MULTI_HOST_MGMT_H_ + +#define IS_BMGW_MASTER_HOST(hwdev) \ + ((hwdev)->func_mode == FUNC_MOD_MULTI_BM_MASTER) +#define IS_BMGW_SLAVE_HOST(hwdev) \ + ((hwdev)->func_mode == FUNC_MOD_MULTI_BM_SLAVE) +#define IS_VM_MASTER_HOST(hwdev) \ + ((hwdev)->func_mode == FUNC_MOD_MULTI_VM_MASTER) +#define IS_VM_SLAVE_HOST(hwdev) \ + ((hwdev)->func_mode == FUNC_MOD_MULTI_VM_SLAVE) + +#define IS_MASTER_HOST(hwdev) \ + (IS_BMGW_MASTER_HOST(hwdev) || IS_VM_MASTER_HOST(hwdev)) + +#define IS_SLAVE_HOST(hwdev) \ + (IS_BMGW_SLAVE_HOST(hwdev) || IS_VM_SLAVE_HOST(hwdev)) + +#define IS_MULTI_HOST(hwdev) \ + (IS_BMGW_MASTER_HOST(hwdev) || IS_BMGW_SLAVE_HOST(hwdev) || \ + IS_VM_MASTER_HOST(hwdev) || IS_VM_SLAVE_HOST(hwdev)) + +#define NEED_MBOX_FORWARD(hwdev) IS_BMGW_SLAVE_HOST(hwdev) + +struct hinic_multi_host_mgmt { + struct hinic_hwdev *hwdev; + + /* slave host registered */ + bool shost_registered; + u8 shost_host_idx; + u8 shost_ppf_idx; + + /* slave host functios support nic enable */ + DECLARE_BITMAP(func_nic_en, HINIC_MAX_FUNCTIONS); + + u8 mhost_ppf_idx; + + struct hinic_hw_pf_infos pf_infos; +}; + +struct hinic_host_fwd_head { + unsigned short dst_glb_func_idx; + unsigned char dst_itf_idx; + unsigned char mod; + + unsigned char cmd; + unsigned char rsv[3]; +}; + +int hinic_multi_host_mgmt_init(struct hinic_hwdev *hwdev); +int hinic_multi_host_mgmt_free(struct hinic_hwdev *hwdev); +int hinic_mbox_to_host_no_ack(struct hinic_hwdev *hwdev, + enum hinic_mod_type mod, u8 cmd, void *buf_in, + u16 in_size); + +struct register_slave_host { + u8 status; + u8 version; + u8 rsvd[6]; + + u8 host_id; + u8 ppf_idx; + u8 rsvd2[6]; + + /* for max 512 functions */ + u64 funcs_nic_en[8]; + + u64 rsvd3[8]; +}; + +struct hinic_slave_func_nic_state { + u8 status; + u8 version; + u8 rsvd[6]; + + u16 func_idx; + u8 enable; + u8 rsvd1; + + u32 rsvd2[2]; +}; + +void set_master_host_mbox_enable(struct hinic_hwdev *hwdev, bool enable); +void set_slave_host_enable(struct hinic_hwdev *hwdev, u8 host_id, bool enable); +void set_func_host_mode(struct hinic_hwdev *hwdev, enum hinic_func_mode mode); +int rectify_host_mode(struct hinic_hwdev *hwdev); +void detect_host_mode_pre(struct hinic_hwdev *hwdev); + +int sw_func_pf_mbox_handler(void *handle, u16 vf_id, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nic.h b/drivers/net/ethernet/huawei/hinic/hinic_nic.h new file mode 100644 index 0000000000000000000000000000000000000000..f2fb93773232f5dd901507b5b388200b1dc12bec --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_nic.h @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_NIC_H_ +#define HINIC_NIC_H_ + +#include "hinic_wq.h" + +#define SET_VPORT_MBOX_TIMEOUT (30 * 1000) +#define SET_VPORT_MGMT_TIMEOUT (25 * 1000) +struct hinic_sq { + struct hinic_wq *wq; + + u16 q_id; + + u8 owner; + + void *cons_idx_addr; + + u8 __iomem *db_addr; + u16 msix_entry_idx; +}; + +struct hinic_rq { + struct hinic_wq *wq; + + u16 *pi_virt_addr; + dma_addr_t pi_dma_addr; + + u16 q_id; + + u32 irq_id; + u16 msix_entry_idx; + + dma_addr_t cqe_dma_addr; +}; + +struct hinic_qp { + struct hinic_sq sq; + struct hinic_rq rq; +}; + +struct vf_data_storage { + u8 vf_mac_addr[ETH_ALEN]; + bool registered; + bool pf_set_mac; + u16 pf_vlan; + u8 pf_qos; + u32 max_rate; + u32 min_rate; + + bool link_forced; + bool link_up; /* only valid if VF link is forced */ + bool spoofchk; + bool trust; +}; + +struct hinic_nic_cfg { + struct semaphore cfg_lock; + + /* Valid when pfc is disable */ + bool pause_set; + struct nic_pause_config nic_pause; + + u8 pfc_en; + u8 pfc_bitmap; + + struct nic_port_info port_info; + + /* percentage of pf link bandwidth */ + u32 pf_bw_limit; +}; + +struct hinic_nic_io { + struct hinic_hwdev *hwdev; + + u16 global_qpn; + u8 link_status; + + struct hinic_wqs wqs; + + struct hinic_wq *sq_wq; + struct hinic_wq *rq_wq; + + u16 max_qps; + u16 num_qps; + u16 sq_depth; + u16 rq_depth; + struct hinic_qp *qps; + + void *ci_vaddr_base; + dma_addr_t ci_dma_base; + + u16 max_vfs; + struct vf_data_storage *vf_infos; + + struct hinic_dcb_state dcb_state; + + struct hinic_nic_cfg nic_cfg; + u16 rx_buff_len; +}; + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nic_cfg.c b/drivers/net/ethernet/huawei/hinic/hinic_nic_cfg.c new file mode 100644 index 0000000000000000000000000000000000000000..463b119c42e425650d3fae68461abcb109ff0481 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_nic_cfg.c @@ -0,0 +1,4004 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_hwdev.h" +#include "hinic_hw_mgmt.h" +#include "hinic_mbox.h" +#include "hinic_nic_io.h" +#include "hinic_nic_cfg.h" +#include "hinic_nic.h" +#include "hinic_mgmt_interface.h" +#include "hinic_hwif.h" + +static unsigned char set_vf_link_state; +module_param(set_vf_link_state, byte, 0444); +MODULE_PARM_DESC(set_vf_link_state, "Set vf link state, 0 represents link auto, 1 represents link always up, 2 represents link always down. - default is 0."); + +#define l2nic_msg_to_mgmt_sync(hwdev, cmd, buf_in, in_size, buf_out, out_size)\ + hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, cmd, \ + buf_in, in_size, \ + buf_out, out_size, 0) + +#define l2nic_msg_to_mgmt_async(hwdev, cmd, buf_in, in_size) \ + hinic_msg_to_mgmt_async(hwdev, HINIC_MOD_L2NIC, cmd, buf_in, in_size) + +#define CPATH_FUNC_ID_VALID_LIMIT 2 +#define CHECK_IPSU_15BIT 0X8000 + +static int hinic_set_rx_lro_timer(void *hwdev, u32 timer_value); + +static bool check_func_table(struct hinic_hwdev *hwdev, u16 func_idx, + void *buf_in, u16 in_size) +{ + struct hinic_function_table *function_table; + + if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size)) + return false; + + function_table = (struct hinic_function_table *)buf_in; + + if (!function_table->rx_wqe_buf_size) + return false; + + return true; +} + +static bool check_rxcsum_setting(struct hinic_hwdev *hwdev, u16 func_idx, + void *buf_in, u16 in_size) +{ + struct hinic_checksum_offload *rx_csum_cfg = NULL; + + if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size)) + return false; + + rx_csum_cfg = buf_in; + if (rx_csum_cfg->rx_csum_offload != HINIC_RX_CSUM_OFFLOAD_EN) + return false; + + return true; +} + +static bool check_force_pkt_drop(struct hinic_hwdev *hwdev, u16 func_idx, + void *buf_in, u16 in_size) +{ + struct hinic_force_pkt_drop *pkt_drop = buf_in; + + if (pkt_drop->port != hinic_physical_port_id(hwdev)) + return false; + + return true; +} + +struct vf_cmd_check_handle nic_cmd_support_vf[] = { + {HINIC_PORT_CMD_VF_REGISTER, NULL}, + {HINIC_PORT_CMD_VF_UNREGISTER, NULL}, + + {HINIC_PORT_CMD_CHANGE_MTU, hinic_mbox_check_func_id_8B}, + + {HINIC_PORT_CMD_ADD_VLAN, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_DEL_VLAN, hinic_mbox_check_func_id_8B}, + + {HINIC_PORT_CMD_SET_MAC, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_GET_MAC, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_DEL_MAC, hinic_mbox_check_func_id_8B}, + + {HINIC_PORT_CMD_SET_RX_MODE, hinic_mbox_check_func_id_8B}, + + {HINIC_PORT_CMD_GET_PAUSE_INFO, hinic_mbox_check_func_id_8B}, + + {HINIC_PORT_CMD_GET_LINK_STATE, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_SET_LRO, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_SET_RX_CSUM, check_rxcsum_setting}, + {HINIC_PORT_CMD_SET_RX_VLAN_OFFLOAD, hinic_mbox_check_func_id_8B}, + + {HINIC_PORT_CMD_GET_VPORT_STAT, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_CLEAN_VPORT_STAT, hinic_mbox_check_func_id_8B}, + + {HINIC_PORT_CMD_GET_RSS_TEMPLATE_INDIR_TBL, + hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_SET_RSS_TEMPLATE_INDIR_TBL, + hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_SET_RSS_TEMPLATE_TBL, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_GET_RSS_TEMPLATE_TBL, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_SET_RSS_HASH_ENGINE, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_GET_RSS_HASH_ENGINE, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_GET_RSS_CTX_TBL, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_SET_RSS_CTX_TBL, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_RSS_TEMP_MGR, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_RSS_CFG, hinic_mbox_check_func_id_8B}, + + {HINIC_PORT_CMD_INIT_FUNC, check_func_table}, + {HINIC_PORT_CMD_SET_LLI_PRI, hinic_mbox_check_func_id_8B}, + + {HINIC_PORT_CMD_GET_MGMT_VERSION, NULL}, + {HINIC_PORT_CMD_GET_BOOT_VERSION, NULL}, + {HINIC_PORT_CMD_GET_MICROCODE_VERSION, NULL}, + + {HINIC_PORT_CMD_GET_VPORT_ENABLE, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_SET_VPORT_ENABLE, hinic_mbox_check_func_id_8B}, + + {HINIC_PORT_CMD_GET_LRO, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_GET_GLOBAL_QPN, hinic_mbox_check_func_id_8B}, + + {HINIC_PORT_CMD_SET_TSO, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_SET_RQ_IQ_MAP, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_LINK_STATUS_REPORT, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_UPDATE_MAC, hinic_mbox_check_func_id_8B}, + + {HINIC_PORT_CMD_GET_PORT_INFO, hinic_mbox_check_func_id_8B}, + + {HINIC_PORT_CMD_SET_IPSU_MAC, hinic_mbox_check_func_id_10B}, + {HINIC_PORT_CMD_GET_IPSU_MAC, hinic_mbox_check_func_id_10B}, + + {HINIC_PORT_CMD_GET_LINK_MODE, hinic_mbox_check_func_id_8B}, + + {HINIC_PORT_CMD_CLEAR_SQ_RES, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_SET_SUPER_CQE, hinic_mbox_check_func_id_8B}, + + {HINIC_PORT_CMD_GET_VF_COS, NULL}, + {HINIC_PORT_CMD_SET_VHD_CFG, hinic_mbox_check_func_id_8B}, + + {HINIC_PORT_CMD_SET_VLAN_FILTER, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_FORCE_PKT_DROP, check_force_pkt_drop}, + {HINIC_PORT_CMD_Q_FILTER, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_TCAM_FILTER, NULL}, + {HINIC_PORT_CMD_UP_TC_ADD_FLOW, NULL}, + {HINIC_PORT_CMD_UP_TC_DEL_FLOW, NULL}, + {HINIC_PORT_CMD_UP_TC_FLUSH_TCAM, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_UP_TC_CTRL_TCAM_BLOCK, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_UP_TC_ENABLE, hinic_mbox_check_func_id_8B}, +}; + +int hinic_init_function_table(void *hwdev, u16 rx_buf_sz) +{ + struct hinic_function_table function_table = {0}; + u16 out_size = sizeof(function_table); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &function_table.func_id); + if (err) + return err; + + function_table.version = HINIC_CMD_VER_FUNC_ID; + function_table.mtu = 0x3FFF; /* default, max mtu */ + function_table.rx_wqe_buf_size = rx_buf_sz; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_INIT_FUNC, + &function_table, sizeof(function_table), + &function_table, &out_size, 0); + if (err || function_table.status || !out_size) { + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to init func table, err: %d, status: 0x%x, out size: 0x%x\n", + err, function_table.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hinic_get_base_qpn(void *hwdev, u16 *global_qpn) +{ + struct hinic_cmd_qpn cmd_qpn = {0}; + u16 out_size = sizeof(cmd_qpn); + int err; + + if (!hwdev || !global_qpn) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &cmd_qpn.func_id); + if (err) + return err; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_GET_GLOBAL_QPN, + &cmd_qpn, sizeof(cmd_qpn), &cmd_qpn, + &out_size, 0); + if (err || !out_size || cmd_qpn.status) { + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to get base qpn, err: %d, status: 0x%x, out size: 0x%x\n", + err, cmd_qpn.status, out_size); + return -EINVAL; + } + + *global_qpn = cmd_qpn.base_qpn; + + return 0; +} + +int hinic_get_fw_support_func(void *hwdev) +{ + struct fw_support_func support_flag = {0}; + struct hinic_hwdev *dev = hwdev; + u16 out_size = sizeof(support_flag); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_GET_FW_SUPPORT_FLAG, + &support_flag, sizeof(support_flag), + &support_flag, &out_size, 0); + if (support_flag.status == HINIC_MGMT_CMD_UNSUPPORTED) { + nic_info(dev->dev_hdl, "Current firmware doesn't support to get function capability\n"); + support_flag.flag = 0; + } else if (support_flag.status || err || !out_size) { + nic_err(dev->dev_hdl, "Failed to get function capability, err: %d, status: 0x%x, out size: 0x%x\n", + err, support_flag.status, out_size); + return -EFAULT; + } + + dev->fw_support_func_flag = support_flag.flag; + + return 0; +} + +#define HINIC_ADD_VLAN_IN_MAC 0x8000 +#define HINIC_VLAN_ID_MASK 0x7FFF +#define PF_SET_VF_MAC(hwdev, status) \ + (HINIC_FUNC_TYPE(hwdev) == TYPE_VF && \ + (status) == HINIC_PF_SET_VF_ALREADY) + +static int hinic_check_mac_status(struct hinic_hwdev *hwdev, u8 status, + u16 vlan_id) +{ + if ((status && status != HINIC_MGMT_STATUS_EXIST) || + (vlan_id & CHECK_IPSU_15BIT && status == HINIC_MGMT_STATUS_EXIST)) { + if (PF_SET_VF_MAC(hwdev, status)) + return 0; + + return -EINVAL; + } + + return 0; +} + +int hinic_set_mac(void *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_port_mac_set mac_info = {0}; + u16 out_size = sizeof(mac_info); + int err; + + if (!hwdev || !mac_addr) + return -EINVAL; + + if ((vlan_id & HINIC_VLAN_ID_MASK) >= VLAN_N_VID) { + nic_err(nic_hwdev->dev_hdl, "Invalid VLAN number: %d\n", + (vlan_id & HINIC_VLAN_ID_MASK)); + return -EINVAL; + } + + mac_info.func_id = func_id; + mac_info.vlan_id = vlan_id; + memcpy(mac_info.mac, mac_addr, ETH_ALEN); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_MAC, &mac_info, + sizeof(mac_info), &mac_info, &out_size); + if (err || !out_size || + hinic_check_mac_status(hwdev, mac_info.status, mac_info.vlan_id)) { + nic_err(nic_hwdev->dev_hdl, + "Failed to update MAC, err: %d, status: 0x%x, out size: 0x%x\n", + err, mac_info.status, out_size); + return -EIO; + } + + if (PF_SET_VF_MAC(nic_hwdev, mac_info.status)) { + nic_warn(nic_hwdev->dev_hdl, "PF has already set VF mac, Ignore set operation\n"); + return HINIC_PF_SET_VF_ALREADY; + } + + if (mac_info.status == HINIC_MGMT_STATUS_EXIST) { + nic_warn(nic_hwdev->dev_hdl, "MAC is repeated. Ignore update operation\n"); + return 0; + } + + return 0; +} +EXPORT_SYMBOL(hinic_set_mac); + +int hinic_del_mac(void *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_port_mac_set mac_info = {0}; + u16 out_size = sizeof(mac_info); + int err; + + if (!hwdev || !mac_addr) + return -EINVAL; + + if ((vlan_id & HINIC_VLAN_ID_MASK) >= VLAN_N_VID) { + nic_err(nic_hwdev->dev_hdl, "Invalid VLAN number: %d\n", + (vlan_id & HINIC_VLAN_ID_MASK)); + return -EINVAL; + } + + mac_info.func_id = func_id; + mac_info.vlan_id = vlan_id; + memcpy(mac_info.mac, mac_addr, ETH_ALEN); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_DEL_MAC, &mac_info, + sizeof(mac_info), &mac_info, &out_size); + if (err || !out_size || + (mac_info.status && !PF_SET_VF_MAC(nic_hwdev, mac_info.status))) { + nic_err(nic_hwdev->dev_hdl, + "Failed to delete MAC, err: %d, status: 0x%x, out size: 0x%x\n", + err, mac_info.status, out_size); + return -EIO; + } + if (PF_SET_VF_MAC(nic_hwdev, mac_info.status)) { + nic_warn(nic_hwdev->dev_hdl, "PF has already set VF mac, Ignore delete operation\n"); + return HINIC_PF_SET_VF_ALREADY; + } + + return 0; +} +EXPORT_SYMBOL(hinic_del_mac); + +int hinic_update_mac(void *hwdev, u8 *old_mac, u8 *new_mac, u16 vlan_id, + u16 func_id) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_port_mac_update mac_info = {0}; + u16 out_size = sizeof(mac_info); + int err; + + if (!hwdev || !old_mac || !new_mac) + return -EINVAL; + + if ((vlan_id & HINIC_VLAN_ID_MASK) >= VLAN_N_VID) { + nic_err(nic_hwdev->dev_hdl, "Invalid VLAN number: %d\n", + (vlan_id & HINIC_VLAN_ID_MASK)); + return -EINVAL; + } + + mac_info.func_id = func_id; + mac_info.vlan_id = vlan_id; + memcpy(mac_info.old_mac, old_mac, ETH_ALEN); + memcpy(mac_info.new_mac, new_mac, ETH_ALEN); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_UPDATE_MAC, + &mac_info, sizeof(mac_info), + &mac_info, &out_size); + if (err || !out_size || + hinic_check_mac_status(hwdev, mac_info.status, mac_info.vlan_id)) { + nic_err(nic_hwdev->dev_hdl, + "Failed to update MAC, err: %d, status: 0x%x, out size: 0x%x\n", + err, mac_info.status, out_size); + return -EIO; + } + + if (PF_SET_VF_MAC(nic_hwdev, mac_info.status)) { + nic_warn(nic_hwdev->dev_hdl, "PF has already set VF MAC. Ignore update operation\n"); + return HINIC_PF_SET_VF_ALREADY; + } + + if (mac_info.status == HINIC_MGMT_STATUS_EXIST) { + nic_warn(nic_hwdev->dev_hdl, "MAC is repeated. Ignore update operation\n"); + return 0; + } + + return 0; +} + +int hinic_update_mac_vlan(void *hwdev, u16 old_vlan, u16 new_vlan, int vf_id) +{ + struct hinic_hwdev *dev = hwdev; + struct vf_data_storage *vf_info; + u16 func_id, vlan_id; + int err; + + if (!hwdev || old_vlan >= VLAN_N_VID || new_vlan >= VLAN_N_VID) + return -EINVAL; + + vf_info = dev->nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id); + if (!vf_info->pf_set_mac) + return 0; + + if (!FW_SUPPORT_MAC_REUSE_FUNC(dev)) { + nic_info(dev->dev_hdl, "Current firmware doesn't support mac reuse\n"); + return 0; + } + + func_id = hinic_glb_pf_vf_offset(dev) + (u16)vf_id; + vlan_id = old_vlan; + if (vlan_id) + vlan_id |= HINIC_ADD_VLAN_IN_MAC; + err = hinic_del_mac(dev, vf_info->vf_mac_addr, vlan_id, + func_id); + if (err) { + nic_err(dev->dev_hdl, "Failed to delete VF %d MAC %pM vlan %d\n", + HW_VF_ID_TO_OS(vf_id), vf_info->vf_mac_addr, vlan_id); + return err; + } + + vlan_id = new_vlan; + if (vlan_id) + vlan_id |= HINIC_ADD_VLAN_IN_MAC; + err = hinic_set_mac(dev, vf_info->vf_mac_addr, vlan_id, + func_id); + if (err) { + nic_err(dev->dev_hdl, "Failed to add VF %d MAC %pM vlan %d\n", + HW_VF_ID_TO_OS(vf_id), vf_info->vf_mac_addr, vlan_id); + goto out; + } + + return 0; + +out: + vlan_id = old_vlan; + if (vlan_id) + vlan_id |= HINIC_ADD_VLAN_IN_MAC; + hinic_set_mac(dev, vf_info->vf_mac_addr, vlan_id, + func_id); + + return err; +} + +int hinic_get_default_mac(void *hwdev, u8 *mac_addr) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_port_mac_set mac_info = {0}; + u16 out_size = sizeof(mac_info); + int err; + + if (!hwdev || !mac_addr) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &mac_info.func_id); + if (err) + return err; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_MAC, + &mac_info, sizeof(mac_info), + &mac_info, &out_size); + if (err || !out_size || mac_info.status) { + nic_err(nic_hwdev->dev_hdl, + "Failed to get mac, err: %d, status: 0x%x, out size: 0x%x\n", + err, mac_info.status, out_size); + return -EINVAL; + } + + memcpy(mac_addr, mac_info.mac, ETH_ALEN); + + return 0; +} + +int hinic_set_port_mtu(void *hwdev, u32 new_mtu) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_mtu mtu_info = {0}; + u16 out_size = sizeof(mtu_info); + int err; + + if (!hwdev) + return -EINVAL; + + if (new_mtu < HINIC_MIN_MTU_SIZE) { + nic_err(nic_hwdev->dev_hdl, + "Invalid mtu size, mtu size < %dbytes\n", + HINIC_MIN_MTU_SIZE); + return -EINVAL; + } + + if (new_mtu > HINIC_MAX_JUMBO_FRAME_SIZE) { + nic_err(nic_hwdev->dev_hdl, "Invalid mtu size, mtu size > %dbytes\n", + HINIC_MAX_JUMBO_FRAME_SIZE); + return -EINVAL; + } + + err = hinic_global_func_id_get(hwdev, &mtu_info.func_id); + if (err) + return err; + + mtu_info.mtu = new_mtu; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_CHANGE_MTU, + &mtu_info, sizeof(mtu_info), + &mtu_info, &out_size); + if (err || !out_size || mtu_info.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to set mtu, err: %d, status: 0x%x, out size: 0x%x\n", + err, mtu_info.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_hiovs_set_cpath_vlan(void *hwdev, u16 vlan_id, u16 pf_id) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct cmd_cpath_vlan cpath_vlan_info = {0}; + u16 out_size = sizeof(cpath_vlan_info); + int err; + + if (!hwdev) + return -EINVAL; + + cpath_vlan_info.pf_id = pf_id; + cpath_vlan_info.vlan_id = vlan_id; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_OVS, OVS_SET_CPATH_VLAN, + &cpath_vlan_info, sizeof(cpath_vlan_info), + &cpath_vlan_info, &out_size, 0); + + if (err || !out_size || cpath_vlan_info.status) { + sdk_err(nic_hwdev->dev_hdl, "Failed to set cpath vlan, err: %d, status: 0x%x, out_size: 0x%0x\n", + err, cpath_vlan_info.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hinic_hiovs_del_cpath_vlan(void *hwdev, u16 vlan_id, u16 pf_id) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct cmd_cpath_vlan cpath_vlan_info = {0}; + u16 out_size = sizeof(cpath_vlan_info); + int err; + + if (!hwdev) + return -EINVAL; + + cpath_vlan_info.pf_id = pf_id; + cpath_vlan_info.vlan_id = vlan_id; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_OVS, OVS_DEL_CPATH_VLAN, + &cpath_vlan_info, sizeof(cpath_vlan_info), + &cpath_vlan_info, &out_size, 0); + + if (err || !out_size || cpath_vlan_info.status) { + sdk_err(nic_hwdev->dev_hdl, "Failed to delte cpath vlan, err: %d, status: 0x%x, out_size: 0x%0x\n", + err, cpath_vlan_info.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hinic_enable_netq(void *hwdev, u8 en) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_netq_cfg_msg netq_cfg = {0}; + u16 out_size = sizeof(netq_cfg); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &netq_cfg.func_id); + if (err) + return err; + + netq_cfg.netq_en = en; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_NETQ, + &netq_cfg, sizeof(netq_cfg), + &netq_cfg, &out_size); + if (netq_cfg.status == HINIC_MGMT_CMD_UNSUPPORTED) { + err = HINIC_MGMT_CMD_UNSUPPORTED; + nic_warn(nic_hwdev->dev_hdl, "Not support enable netq\n"); + } else if (err || !out_size || netq_cfg.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to enable netq, err: %d, status: 0x%x, out size: 0x%x\n", + err, netq_cfg.status, out_size); + } + + return err; +} + +int hinic_add_hw_rqfilter(void *hwdev, struct hinic_rq_filter_info *filter_info) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_rq_filter_msg filter_msg = {0}; + u16 out_size = sizeof(filter_msg); + int err; + + if (!hwdev || !filter_info) + return -EINVAL; + + switch (filter_info->filter_type) { + case HINIC_RQ_FILTER_TYPE_MAC_ONLY: + memcpy(filter_msg.mac, filter_info->mac, ETH_ALEN); + break; + case HINIC_RQ_FILTER_TYPE_VXLAN: + memcpy(filter_msg.mac, filter_info->mac, ETH_ALEN); + memcpy(filter_msg.vxlan.inner_mac, + filter_info->vxlan.inner_mac, ETH_ALEN); + filter_msg.vxlan.vni = filter_info->vxlan.vni; + break; + default: + nic_warn(nic_hwdev->dev_hdl, "No support filter type: 0x%x\n", + filter_info->filter_type); + return -EINVAL; + } + + err = hinic_global_func_id_get(hwdev, &filter_msg.func_id); + if (err) + return err; + + filter_msg.filter_type = filter_info->filter_type; + filter_msg.qid = filter_info->qid; + filter_msg.qflag = filter_info->qflag; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_ADD_RQ_FILTER, + &filter_msg, sizeof(filter_msg), + &filter_msg, &out_size); + if (filter_msg.status == HINIC_MGMT_CMD_UNSUPPORTED) { + err = HINIC_MGMT_CMD_UNSUPPORTED; + nic_warn(nic_hwdev->dev_hdl, "Not support add rxq filter\n"); + } else if (err || !out_size || filter_msg.status) { + nic_err(nic_hwdev->dev_hdl, + "Failed to add RX qfilter, err: %d, status: 0x%x, out size: 0x%x\n", + err, filter_msg.status, out_size); + return -EINVAL; + } + + return err; +} + +int hinic_del_hw_rqfilter(void *hwdev, struct hinic_rq_filter_info *filter_info) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_rq_filter_msg filter_msg = {0}; + u16 out_size = sizeof(filter_msg); + int err; + + if (!hwdev || !filter_info) + return -EINVAL; + + switch (filter_info->filter_type) { + case HINIC_RQ_FILTER_TYPE_MAC_ONLY: + memcpy(filter_msg.mac, filter_info->mac, ETH_ALEN); + break; + case HINIC_RQ_FILTER_TYPE_VXLAN: + memcpy(filter_msg.mac, filter_info->mac, ETH_ALEN); + memcpy(filter_msg.vxlan.inner_mac, + filter_info->vxlan.inner_mac, ETH_ALEN); + filter_msg.vxlan.vni = filter_info->vxlan.vni; + break; + default: + nic_warn(nic_hwdev->dev_hdl, "No support filter type: 0x%x\n", + filter_info->filter_type); + return -EINVAL; + } + + err = hinic_global_func_id_get(hwdev, &filter_msg.func_id); + if (err) + return err; + + filter_msg.filter_type = filter_info->filter_type; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_DEL_RQ_FILTER, + &filter_msg, sizeof(filter_msg), + &filter_msg, &out_size); + if (filter_msg.status == HINIC_MGMT_CMD_UNSUPPORTED) { + err = HINIC_MGMT_CMD_UNSUPPORTED; + nic_warn(nic_hwdev->dev_hdl, "Not support del rxq filter\n"); + } else if (err || !out_size || filter_msg.status) { + nic_err(nic_hwdev->dev_hdl, + "Failed to delte RX qfilter, err: %d, status: 0x%x, out size: 0x%x\n", + err, filter_msg.status, out_size); + return -EINVAL; + } + + return err; +} + +int hinic_add_vlan(void *hwdev, u16 vlan_id, u16 func_id) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_vlan_config vlan_info = {0}; + u16 out_size = sizeof(vlan_info); + int err; + + if (!hwdev) + return -EINVAL; + + vlan_info.func_id = func_id; + vlan_info.vlan_id = vlan_id; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_ADD_VLAN, + &vlan_info, sizeof(vlan_info), + &vlan_info, &out_size); + if (err || !out_size || vlan_info.status) { + nic_err(nic_hwdev->dev_hdl, + "Failed to add vlan, err: %d, status: 0x%x, out size: 0x%x\n", + err, vlan_info.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_del_vlan(void *hwdev, u16 vlan_id, u16 func_id) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_vlan_config vlan_info = {0}; + u16 out_size = sizeof(vlan_info); + int err; + + if (!hwdev) + return -EINVAL; + + vlan_info.func_id = func_id; + vlan_info.vlan_id = vlan_id; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_DEL_VLAN, + &vlan_info, sizeof(vlan_info), + &vlan_info, &out_size); + if (err || !out_size || vlan_info.status) { + nic_err(nic_hwdev->dev_hdl, + "Failed to delte vlan, err: %d, status: 0x%x, out size: 0x%x\n", + err, vlan_info.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_set_vlan_fliter(void *hwdev, u32 vlan_filter_ctrl) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_vlan_filter vlan_filter = {0}; + u16 out_size = sizeof(vlan_filter); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &vlan_filter.func_id); + if (err) + return err; + vlan_filter.vlan_filter_ctrl = vlan_filter_ctrl; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_VLAN_FILTER, + &vlan_filter, sizeof(vlan_filter), + &vlan_filter, &out_size); + if (vlan_filter.status == HINIC_MGMT_CMD_UNSUPPORTED) { + err = HINIC_MGMT_CMD_UNSUPPORTED; + } else if ((err == HINIC_MBOX_VF_CMD_ERROR) && + HINIC_IS_VF(nic_hwdev)) { + err = HINIC_MGMT_CMD_UNSUPPORTED; + } else if (err || !out_size || vlan_filter.status) { + nic_err(nic_hwdev->dev_hdl, + "Failed to set vlan fliter, err: %d, status: 0x%x, out size: 0x%x\n", + err, vlan_filter.status, out_size); + err = -EINVAL; + } + + return err; +} + +int hinic_get_port_info(void *hwdev, struct nic_port_info *port_info) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_port_info port_msg = {0}; + u16 out_size = sizeof(port_msg); + int err; + + if (!hwdev || !port_info) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &port_msg.func_id); + if (err) + return err; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_PORT_INFO, + &port_msg, sizeof(port_msg), + &port_msg, &out_size); + if (err || !out_size || port_msg.status) { + nic_err(nic_hwdev->dev_hdl, + "Failed to get port info, err: %d, status: 0x%x, out size: 0x%x\n", + err, port_msg.status, out_size); + return -EINVAL; + } + + port_info->autoneg_cap = port_msg.autoneg_cap; + port_info->autoneg_state = port_msg.autoneg_state; + port_info->duplex = port_msg.duplex; + port_info->port_type = port_msg.port_type; + port_info->speed = port_msg.speed; + + return 0; +} +EXPORT_SYMBOL(hinic_get_port_info); + +int hinic_set_autoneg(void *hwdev, bool enable) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_set_autoneg_cmd autoneg = {0}; + u16 out_size = sizeof(autoneg); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &autoneg.func_id); + if (err) + return err; + + autoneg.enable = enable; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_AUTONEG, + &autoneg, sizeof(autoneg), + &autoneg, &out_size); + if (err || !out_size || autoneg.status) { + nic_err(dev->dev_hdl, "Failed to %s autoneg, err: %d, status: 0x%x, out size: 0x%x\n", + enable ? "enable" : "disable", err, autoneg.status, + out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_force_port_relink(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + int err; + + /* Force port link down and link up */ + err = hinic_set_port_link_status(hwdev, false); + if (err) { + nic_err(dev->dev_hdl, "Failed to set port link down\n"); + return -EFAULT; + } + + err = hinic_set_port_link_status(hwdev, true); + if (err) { + nic_err(dev->dev_hdl, "Failed to set port link up\n"); + return -EFAULT; + } + + return 0; +} + +int hinic_get_link_mode(void *hwdev, enum hinic_link_mode *supported, + enum hinic_link_mode *advertised) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_link_mode_cmd link_mode = {0}; + u16 out_size = sizeof(link_mode); + int err; + + if (!hwdev || !supported || !advertised) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &link_mode.func_id); + if (err) + return err; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_LINK_MODE, + &link_mode, sizeof(link_mode), + &link_mode, &out_size); + if (err || !out_size || link_mode.status) { + nic_err(dev->dev_hdl, + "Failed to get link mode, err: %d, status: 0x%x, out size: 0x%x\n", + err, link_mode.status, out_size); + return -EINVAL; + } + + *supported = link_mode.supported; + *advertised = link_mode.advertised; + + return 0; +} + +int hinic_set_port_link_status(void *hwdev, bool enable) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_set_link_status link_status = {0}; + u16 out_size = sizeof(link_status); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &link_status.func_id); + if (err) + return err; + + link_status.enable = enable; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_PORT_LINK_STATUS, + &link_status, sizeof(link_status), + &link_status, &out_size); + if (err || !out_size || link_status.status) { + nic_err(dev->dev_hdl, "Failed to %s port link status, err: %d, status: 0x%x, out size: 0x%x\n", + enable ? "Enable" : "Disable", err, link_status.status, + out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_set_speed(void *hwdev, enum nic_speed_level speed) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_speed_cmd speed_info = {0}; + u16 out_size = sizeof(speed_info); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &speed_info.func_id); + if (err) + return err; + + speed_info.speed = speed; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_SPEED, + &speed_info, sizeof(speed_info), + &speed_info, &out_size); + if (err || !out_size || speed_info.status) { + nic_err(dev->dev_hdl, + "Failed to set speed, err: %d, status: 0x%x, out size: 0x%x\n", + err, speed_info.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_get_speed(void *hwdev, enum nic_speed_level *speed) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_speed_cmd speed_info = {0}; + u16 out_size = sizeof(speed_info); + int err; + + if (!hwdev || !speed) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &speed_info.func_id); + if (err) + return err; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_SPEED, + &speed_info, sizeof(speed_info), + &speed_info, &out_size); + if (err || !out_size || speed_info.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to get speed, err: %d, status: 0x%x, out size: 0x%x\n", + err, speed_info.status, out_size); + return -EINVAL; + } + + *speed = speed_info.speed; + + return 0; +} +EXPORT_SYMBOL(hinic_get_speed); + +int hinic_get_link_state(void *hwdev, u8 *link_state) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_get_link get_link = {0}; + u16 out_size = sizeof(get_link); + int err; + + if (!hwdev || !link_state) + return -EINVAL; + + if (FUNC_FORCE_LINK_UP(hwdev)) { + *link_state = 1; + return 0; + } + + err = hinic_global_func_id_get(hwdev, &get_link.func_id); + if (err) + return err; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_LINK_STATE, + &get_link, sizeof(get_link), + &get_link, &out_size); + if (err || !out_size || get_link.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to get link state, err: %d, status: 0x%x, out size: 0x%x\n", + err, get_link.status, out_size); + return -EINVAL; + } + + *link_state = get_link.link_status; + + return 0; +} + +static int hinic_set_hw_pause_info(void *hwdev, + struct nic_pause_config nic_pause) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_pause_config pause_info = {0}; + u16 out_size = sizeof(pause_info); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &pause_info.func_id); + if (err) + return err; + + pause_info.auto_neg = nic_pause.auto_neg; + pause_info.rx_pause = nic_pause.rx_pause; + pause_info.tx_pause = nic_pause.tx_pause; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_PAUSE_INFO, + &pause_info, sizeof(pause_info), + &pause_info, &out_size); + if (err || !out_size || pause_info.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to set pause info, err: %d, status: 0x%x, out size: 0x%x\n", + err, pause_info.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_set_pause_info(void *hwdev, struct nic_pause_config nic_pause) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_nic_cfg *nic_cfg; + int err; + + if (!hwdev) + return -EINVAL; + + nic_cfg = &nic_hwdev->nic_io->nic_cfg; + if (nic_cfg->pfc_en) { + nic_err(nic_hwdev->dev_hdl, "Failed to set pause, please disable pfc first\n"); + return -EPERM; + } + + down(&nic_cfg->cfg_lock); + + err = hinic_set_hw_pause_info(hwdev, nic_pause); + if (err) { + up(&nic_cfg->cfg_lock); + return err; + } + + nic_cfg->pfc_en = 0; + nic_cfg->pfc_bitmap = 0; + nic_cfg->pause_set = true; + nic_cfg->nic_pause.auto_neg = nic_pause.auto_neg; + nic_cfg->nic_pause.rx_pause = nic_pause.rx_pause; + nic_cfg->nic_pause.tx_pause = nic_pause.tx_pause; + + up(&nic_cfg->cfg_lock); + + return 0; +} + +int hinic_get_hw_pause_info(void *hwdev, struct nic_pause_config *nic_pause) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_pause_config pause_info = {0}; + u16 out_size = sizeof(pause_info); + int err; + + if (!hwdev || !nic_pause) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &pause_info.func_id); + if (err) + return err; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_PAUSE_INFO, + &pause_info, sizeof(pause_info), + &pause_info, &out_size); + if (err || !out_size || pause_info.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to get pause info, err: %d, status: 0x%x, out size: 0x%x\n", + err, pause_info.status, out_size); + return -EINVAL; + } + + nic_pause->auto_neg = pause_info.auto_neg; + nic_pause->rx_pause = pause_info.rx_pause; + nic_pause->tx_pause = pause_info.tx_pause; + + return 0; +} + +int hinic_get_pause_info(void *hwdev, struct nic_pause_config *nic_pause) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_nic_cfg *nic_cfg = &nic_hwdev->nic_io->nic_cfg; + int err = 0; + + err = hinic_get_hw_pause_info(hwdev, nic_pause); + if (err) + return err; + + if (nic_cfg->pause_set || !nic_pause->auto_neg) { + nic_pause->rx_pause = nic_cfg->nic_pause.rx_pause; + nic_pause->tx_pause = nic_cfg->nic_pause.tx_pause; + } + + return 0; +} + +int hinic_set_rx_mode(void *hwdev, u32 enable) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_rx_mode_config rx_mode_cfg = {0}; + u16 out_size = sizeof(rx_mode_cfg); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &rx_mode_cfg.func_id); + if (err) + return err; + + rx_mode_cfg.rx_mode = enable; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RX_MODE, + &rx_mode_cfg, sizeof(rx_mode_cfg), + &rx_mode_cfg, &out_size); + if (err || !out_size || rx_mode_cfg.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to set rx mode, err: %d, status: 0x%x, out size: 0x%x\n", + err, rx_mode_cfg.status, out_size); + return -EINVAL; + } + + return 0; +} + +/* offload feature */ +int hinic_set_rx_vlan_offload(void *hwdev, u8 en) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_vlan_offload vlan_cfg = {0}; + u16 out_size = sizeof(vlan_cfg); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &vlan_cfg.func_id); + if (err) + return err; + + vlan_cfg.vlan_rx_offload = en; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RX_VLAN_OFFLOAD, + &vlan_cfg, sizeof(vlan_cfg), + &vlan_cfg, &out_size); + if (err || !out_size || vlan_cfg.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to set rx vlan offload, err: %d, status: 0x%x, out size: 0x%x\n", + err, vlan_cfg.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_set_rx_csum_offload(void *hwdev, u32 en) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_checksum_offload rx_csum_cfg = {0}; + u16 out_size = sizeof(rx_csum_cfg); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &rx_csum_cfg.func_id); + if (err) + return err; + + rx_csum_cfg.rx_csum_offload = en; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RX_CSUM, + &rx_csum_cfg, sizeof(rx_csum_cfg), + &rx_csum_cfg, &out_size); + if (err || !out_size || rx_csum_cfg.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to set rx csum offload, err: %d, status: 0x%x, out size: 0x%x\n", + err, rx_csum_cfg.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_set_tx_tso(void *hwdev, u8 tso_en) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_tso_config tso_cfg = {0}; + u16 out_size = sizeof(tso_cfg); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &tso_cfg.func_id); + if (err) + return err; + + tso_cfg.tso_en = tso_en; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_TSO, + &tso_cfg, sizeof(tso_cfg), + &tso_cfg, &out_size); + if (err || !out_size || tso_cfg.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to set tso, err: %d, status: 0x%x, out size: 0x%x\n", + err, tso_cfg.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_set_rx_lro_state(void *hwdev, u8 lro_en, u32 lro_timer, u32 wqe_num) +{ + struct hinic_hwdev *nic_hwdev = hwdev; + u8 ipv4_en = 0, ipv6_en = 0; + int err; + + if (!hwdev) + return -EINVAL; + + ipv4_en = lro_en ? 1 : 0; + ipv6_en = lro_en ? 1 : 0; + + nic_info(nic_hwdev->dev_hdl, "Set LRO max wqe number to %u\n", wqe_num); + + err = hinic_set_rx_lro(hwdev, ipv4_en, ipv6_en, (u8)wqe_num); + if (err) + return err; + + /* we don't set LRO timer for VF */ + if (hinic_func_type(hwdev) == TYPE_VF) + return 0; + + nic_info(nic_hwdev->dev_hdl, "Set LRO timer to %u\n", lro_timer); + + return hinic_set_rx_lro_timer(hwdev, lro_timer); +} + +static int hinic_set_rx_lro_timer(void *hwdev, u32 timer_value) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_lro_timer lro_timer = {0}; + u16 out_size = sizeof(lro_timer); + int err; + + if (!hwdev) + return -EINVAL; + + lro_timer.status = 0; + lro_timer.type = 0; + lro_timer.enable = 1; + lro_timer.timer = timer_value; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_LRO_TIMER, + &lro_timer, sizeof(lro_timer), + &lro_timer, &out_size); + if (lro_timer.status == 0xFF) { + /* For this case, we think status (0xFF) is OK */ + lro_timer.status = 0; + nic_err(nic_hwdev->dev_hdl, "Set lro timer not supported by the current FW version, it will be 1ms default\n"); + } + + if (err || !out_size || lro_timer.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to set lro timer, err: %d, status: 0x%x, out size: 0x%x\n", + err, lro_timer.status, out_size); + + return -EINVAL; + } + + return 0; +} + +int hinic_set_rx_lro(void *hwdev, u8 ipv4_en, u8 ipv6_en, u8 max_wqe_num) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_lro_config lro_cfg = {0}; + u16 out_size = sizeof(lro_cfg); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &lro_cfg.func_id); + if (err) + return err; + + lro_cfg.lro_ipv4_en = ipv4_en; + lro_cfg.lro_ipv6_en = ipv6_en; + lro_cfg.lro_max_wqe_num = max_wqe_num; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_LRO, + &lro_cfg, sizeof(lro_cfg), + &lro_cfg, &out_size); + if (err || !out_size || lro_cfg.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to set lro offload, err: %d, status: 0x%x, out size: 0x%x\n", + err, lro_cfg.status, out_size); + return -EINVAL; + } + + return 0; +} + +static int hinic_dcb_set_hw_pfc(void *hwdev, u8 pfc_en, u8 pfc_bitmap) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_set_pfc pfc = {0}; + u16 out_size = sizeof(pfc); + int err; + + err = hinic_global_func_id_get(hwdev, &pfc.func_id); + if (err) + return err; + + pfc.pfc_bitmap = pfc_bitmap; + pfc.pfc_en = pfc_en; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_PFC, + &pfc, sizeof(pfc), &pfc, &out_size); + if (err || pfc.status || !out_size) { + nic_err(dev->dev_hdl, "Failed to set pfc, err: %d, status: 0x%x, out size: 0x%x\n", + err, pfc.status, out_size); + return -EINVAL; + } + + return 0; +} + +/* dcbtool */ +int hinic_dcb_set_pfc(void *hwdev, u8 pfc_en, u8 pfc_bitmap) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_nic_cfg *nic_cfg = &dev->nic_io->nic_cfg; + int err; + + down(&nic_cfg->cfg_lock); + + err = hinic_dcb_set_hw_pfc(hwdev, pfc_en, pfc_bitmap); + if (err) { + up(&nic_cfg->cfg_lock); + return err; + } + + nic_cfg->pfc_en = pfc_en; + nic_cfg->pfc_bitmap = pfc_bitmap; + + /* pause settings is opposite from pfc */ + nic_cfg->nic_pause.rx_pause = pfc_en ? 0 : 1; + nic_cfg->nic_pause.tx_pause = pfc_en ? 0 : 1; + + up(&nic_cfg->cfg_lock); + + return 0; +} + +int hinic_dcb_get_pfc(void *hwdev, u8 *pfc_en_bitmap) +{ + return 0; +} + +int hinic_dcb_set_ets(void *hwdev, u8 *up_tc, u8 *pg_bw, u8 *pgid, u8 *up_bw, + u8 *prio) +{ + struct hinic_up_ets_cfg ets = {0}; + u16 out_size = sizeof(ets); + u16 up_bw_t = 0; + u8 pg_bw_t = 0; + int i, err; + + for (i = 0; i < HINIC_DCB_TC_MAX; i++) { + up_bw_t += *(up_bw + i); + pg_bw_t += *(pg_bw + i); + + if (*(up_tc + i) > HINIC_DCB_TC_MAX) { + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Invalid up %d mapping tc: %d\n", + i, *(up_tc + i)); + return -EINVAL; + } + } + + if (pg_bw_t != 100 || (up_bw_t % 100) != 0) { + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Invalid pg_bw: %d or up_bw: %d\n", pg_bw_t, up_bw_t); + return -EINVAL; + } + + ets.port_id = 0; /* reserved */ + memcpy(ets.up_tc, up_tc, HINIC_DCB_TC_MAX); + memcpy(ets.pg_bw, pg_bw, HINIC_DCB_UP_MAX); + memcpy(ets.pgid, pgid, HINIC_DCB_UP_MAX); + memcpy(ets.up_bw, up_bw, HINIC_DCB_UP_MAX); + memcpy(ets.prio, prio, HINIC_DCB_UP_MAX); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_ETS, + &ets, sizeof(ets), &ets, &out_size); + if (err || ets.status || !out_size) { + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to set ets, err: %d, status: 0x%x, out size: 0x%x\n", + err, ets.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_dcb_get_ets(void *hwdev, u8 *up_tc, u8 *pg_bw, u8 *pgid, u8 *up_bw, + u8 *prio) +{ + return 0; +} + +int hinic_dcb_set_cos_up_map(void *hwdev, u8 cos_valid_bitmap, u8 *cos_up) +{ + struct hinic_cos_up_map map = {0}; + u16 out_size = sizeof(map); + int err; + + if (!hwdev || !cos_up) + return -EINVAL; + + map.port_id = hinic_physical_port_id(hwdev); + map.cos_valid_mask = cos_valid_bitmap; + memcpy(map.map, cos_up, sizeof(map.map)); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_COS_UP_MAP, + &map, sizeof(map), &map, &out_size); + if (err || map.status || !out_size) { + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to set cos2up map, err: %d, status: 0x%x, out size: 0x%x\n", + err, map.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hinic_dcb_set_rq_iq_mapping(void *hwdev, u32 num_rqs, u8 *map) +{ + struct hinic_hwdev *dev; + struct hinic_nic_io *nic_io; + struct hinic_set_rq_iq_mapping rq_iq_mapping = {0}; + u16 out_size = sizeof(rq_iq_mapping); + int err; + + if (!hwdev || num_rqs > HINIC_MAX_NUM_RQ) + return -EINVAL; + + dev = hwdev; + nic_io = dev->nic_io; + + hinic_qps_num_set(dev, nic_io->num_qps); + + err = hinic_global_func_id_get(hwdev, &rq_iq_mapping.func_id); + if (err) + return err; + + rq_iq_mapping.num_rqs = num_rqs; + rq_iq_mapping.rq_depth = (u16)ilog2(nic_io->rq_depth); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RQ_IQ_MAP, + &rq_iq_mapping, sizeof(rq_iq_mapping), + &rq_iq_mapping, &out_size); + if (err || !out_size || rq_iq_mapping.status) { + nic_err(dev->dev_hdl, "Failed to set rq cos mapping, err: %d, status: 0x%x, out size: 0x%x\n", + err, rq_iq_mapping.status, out_size); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL(hinic_dcb_set_rq_iq_mapping); + +/* nictool */ +int hinic_set_lro_aging_timer(void *hwdev, u8 timer_en, u32 period) +{ + return 0; +} + +int hinic_get_rx_lro(void *hwdev, struct nic_lro_info *cfg) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_lro_config lro_cfg = {0}; + u16 out_size = sizeof(lro_cfg); + int err; + + if (!hwdev || !cfg) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &lro_cfg.func_id); + if (err) + return err; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_LRO, + &lro_cfg, sizeof(lro_cfg), + &lro_cfg, &out_size); + if (err || !out_size || lro_cfg.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to set lro offload, err: %d, status: 0x%x, out size: 0x%x\n", + err, lro_cfg.status, out_size); + return -EINVAL; + } + + cfg->func_id = lro_cfg.func_id; + cfg->lro_ipv4_en = lro_cfg.lro_ipv4_en; + cfg->lro_ipv6_en = lro_cfg.lro_ipv6_en; + cfg->lro_max_wqe_num = lro_cfg.lro_max_wqe_num; + return 0; +} + +int hinic_get_jumbo_frame_size(void *hwdev, u32 *jumbo_size) +{ + return 0; +} + +int hinic_set_jumbo_frame_size(void *hwdev, u32 jumbo_size) +{ + return 0; +} + +int hinic_set_loopback_mode_ex(void *hwdev, u32 mode, u32 enable) +{ + struct hinic_port_loopback lb = {0}; + u16 out_size = sizeof(lb); + int err; + + lb.mode = mode; + lb.en = enable; + + if (mode < LOOP_MODE_MIN || mode > LOOP_MODE_MAX) { + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Invalid loopback mode %d to set\n", mode); + return -EINVAL; + } + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_LOOPBACK_MODE, + &lb, sizeof(lb), &lb, &out_size); + if (err || !out_size || lb.status) { + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to set loopback mode %d en %d, err: %d, status: 0x%x, out size: 0x%x\n", + mode, enable, err, lb.status, out_size); + return -EINVAL; + } + + nic_info(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Set loopback mode %d en %d succeed\n", mode, enable); + + return 0; +} + +int hinic_get_loopback_mode_ex(void *hwdev, u32 *mode, u32 *enable) +{ + struct hinic_port_loopback lb = {0}; + u16 out_size = sizeof(lb); + int err; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_LOOPBACK_MODE, + &lb, sizeof(lb), &lb, &out_size); + if (err || !out_size || lb.status) { + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to get loopback mode, err: %d, status: 0x%x, out size: 0x%x\n", + err, lb.status, out_size); + return -EINVAL; + } + + *mode = lb.mode; + *enable = lb.en; + return 0; +} + +int hinic_set_loopback_mode(void *hwdev, bool enable) +{ + return hinic_set_loopback_mode_ex(hwdev, HINIC_INTERNAL_LP_MODE, + enable); +} + +int hinic_get_port_enable_state(void *hwdev, bool *enable) +{ + return 0; +} + +int hinic_get_vport_enable_state(void *hwdev, bool *enable) +{ + return 0; +} + +int hinic_set_lli_state(void *hwdev, u8 lli_state) +{ + return 0; +} + +int hinic_set_vport_enable(void *hwdev, bool enable) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_vport_state en_state = {0}; + u16 out_size = sizeof(en_state); + int err; + u32 timeout; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &en_state.func_id); + if (err) + return err; + + en_state.state = enable ? 1 : 0; + + if (HINIC_IS_VF(nic_hwdev)) + timeout = SET_VPORT_MBOX_TIMEOUT; + else + timeout = SET_VPORT_MGMT_TIMEOUT; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_SET_VPORT_ENABLE, + &en_state, sizeof(en_state), &en_state, + &out_size, timeout); + + if (err || !out_size || en_state.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to set vport state, err: %d, status: 0x%x, out size: 0x%x\n", + err, en_state.status, out_size); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL(hinic_set_vport_enable); + +#define NIC_PORT_DISABLE 0x0 +#define NIC_PORT_ENABLE 0x3 +int hinic_set_port_enable(void *hwdev, bool enable) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_port_state en_state = {0}; + u16 out_size = sizeof(en_state); + int err; + + if (!hwdev) + return -EINVAL; + + if (HINIC_IS_VF(nic_hwdev)) + return 0; + + err = hinic_global_func_id_get(hwdev, &en_state.func_id); + if (err) + return err; + + en_state.version = HINIC_CMD_VER_FUNC_ID; + en_state.state = enable ? NIC_PORT_ENABLE : NIC_PORT_DISABLE; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_PORT_ENABLE, + &en_state, sizeof(en_state), &en_state, + &out_size); + if (err || !out_size || en_state.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to set port state, err: %d, status: 0x%x, out size: 0x%x\n", + err, en_state.status, out_size); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL(hinic_set_port_enable); + +/* rss */ +int hinic_set_rss_type(void *hwdev, u32 tmpl_idx, struct nic_rss_type rss_type) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct nic_rss_context_tbl *ctx_tbl; + struct hinic_cmd_buf *cmd_buf; + u32 ctx = 0; + u64 out_param; + int err; + + if (!hwdev) + return -EINVAL; + + cmd_buf = hinic_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + nic_err(nic_hwdev->dev_hdl, "Failed to allocate cmd buf\n"); + return -ENOMEM; + } + + ctx |= HINIC_RSS_TYPE_SET(1, VALID) | + HINIC_RSS_TYPE_SET(rss_type.ipv4, IPV4) | + HINIC_RSS_TYPE_SET(rss_type.ipv6, IPV6) | + HINIC_RSS_TYPE_SET(rss_type.ipv6_ext, IPV6_EXT) | + HINIC_RSS_TYPE_SET(rss_type.tcp_ipv4, TCP_IPV4) | + HINIC_RSS_TYPE_SET(rss_type.tcp_ipv6, TCP_IPV6) | + HINIC_RSS_TYPE_SET(rss_type.tcp_ipv6_ext, TCP_IPV6_EXT) | + HINIC_RSS_TYPE_SET(rss_type.udp_ipv4, UDP_IPV4) | + HINIC_RSS_TYPE_SET(rss_type.udp_ipv6, UDP_IPV6); + + cmd_buf->size = sizeof(struct nic_rss_context_tbl); + + ctx_tbl = (struct nic_rss_context_tbl *)cmd_buf->buf; + ctx_tbl->group_index = cpu_to_be32(tmpl_idx); + ctx_tbl->offset = 0; + ctx_tbl->size = sizeof(u32); + ctx_tbl->size = cpu_to_be32(ctx_tbl->size); + ctx_tbl->rsvd = 0; + ctx_tbl->ctx = cpu_to_be32(ctx); + + /* cfg the rss context table by command queue */ + err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ, + HINIC_MOD_L2NIC, + HINIC_UCODE_CMD_SET_RSS_CONTEXT_TABLE, + cmd_buf, &out_param, 0); + + hinic_free_cmd_buf(hwdev, cmd_buf); + + if (err || out_param != 0) { + nic_err(nic_hwdev->dev_hdl, "Failed to set rss context table, err: %d\n", + err); + return -EFAULT; + } + + return 0; +} + +int hinic_get_rss_type(void *hwdev, u32 tmpl_idx, struct nic_rss_type *rss_type) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_rss_context_table ctx_tbl = {0}; + u16 out_size = sizeof(ctx_tbl); + int err; + + if (!hwdev || !rss_type) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &ctx_tbl.func_id); + if (err) + return err; + + ctx_tbl.template_id = (u8)tmpl_idx; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_RSS_CTX_TBL, + &ctx_tbl, sizeof(ctx_tbl), + &ctx_tbl, &out_size); + if (err || !out_size || ctx_tbl.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to get hash type, err: %d, status: 0x%x, out size: 0x%x\n", + err, ctx_tbl.status, out_size); + return -EINVAL; + } + + rss_type->ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV4); + rss_type->ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV6); + rss_type->ipv6_ext = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV6_EXT); + rss_type->tcp_ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV4); + rss_type->tcp_ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV6); + rss_type->tcp_ipv6_ext = HINIC_RSS_TYPE_GET(ctx_tbl.context, + TCP_IPV6_EXT); + rss_type->udp_ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, UDP_IPV4); + rss_type->udp_ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, UDP_IPV6); + + return 0; +} + +int hinic_rss_set_template_tbl(void *hwdev, u32 tmpl_idx, const u8 *temp) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_rss_template_key temp_key = {0}; + u16 out_size = sizeof(temp_key); + int err; + + if (!hwdev || !temp) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &temp_key.func_id); + if (err) + return err; + + temp_key.template_id = (u8)tmpl_idx; + memcpy(temp_key.key, temp, HINIC_RSS_KEY_SIZE); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RSS_TEMPLATE_TBL, + &temp_key, sizeof(temp_key), + &temp_key, &out_size); + if (err || !out_size || temp_key.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to set hash key, err: %d, status: 0x%x, out size: 0x%x\n", + err, temp_key.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_rss_get_template_tbl(void *hwdev, u32 tmpl_idx, u8 *temp) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_rss_template_key temp_key = {0}; + u16 out_size = sizeof(temp_key); + int err; + + if (!hwdev || !temp) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &temp_key.func_id); + if (err) + return err; + + temp_key.template_id = (u8)tmpl_idx; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_RSS_TEMPLATE_TBL, + &temp_key, sizeof(temp_key), + &temp_key, &out_size); + if (err || !out_size || temp_key.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to get hash key, err: %d, status: 0x%x, out size: 0x%x\n", + err, temp_key.status, out_size); + return -EINVAL; + } + + memcpy(temp, temp_key.key, HINIC_RSS_KEY_SIZE); + + return 0; +} + +int hinic_rss_get_hash_engine(void *hwdev, u8 tmpl_idx, u8 *type) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_rss_engine_type hash_type = {0}; + u16 out_size = sizeof(hash_type); + int err; + + if (!hwdev || !type) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &hash_type.func_id); + if (err) + return err; + + hash_type.template_id = tmpl_idx; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_RSS_HASH_ENGINE, + &hash_type, sizeof(hash_type), + &hash_type, &out_size); + if (err || !out_size || hash_type.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to get hash engine, err: %d, status: 0x%x, out size: 0x%x\n", + err, hash_type.status, out_size); + return -EINVAL; + } + + *type = hash_type.hash_engine; + return 0; +} + +int hinic_rss_set_hash_engine(void *hwdev, u8 tmpl_idx, u8 type) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_rss_engine_type hash_type = {0}; + u16 out_size = sizeof(hash_type); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &hash_type.func_id); + if (err) + return err; + + hash_type.hash_engine = type; + hash_type.template_id = tmpl_idx; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RSS_HASH_ENGINE, + &hash_type, sizeof(hash_type), + &hash_type, &out_size); + if (err || !out_size || hash_type.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to set hash engine, err: %d, status: 0x%x, out size: 0x%x\n", + err, hash_type.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_rss_set_indir_tbl(void *hwdev, u32 tmpl_idx, const u32 *indir_table) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct nic_rss_indirect_tbl *indir_tbl; + struct hinic_cmd_buf *cmd_buf; + u32 i; + u32 *temp; + u32 indir_size; + u64 out_param; + int err; + + if (!hwdev || !indir_table) + return -EINVAL; + + cmd_buf = hinic_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + nic_err(nic_hwdev->dev_hdl, "Failed to allocate cmd buf\n"); + return -ENOMEM; + } + + cmd_buf->size = sizeof(struct nic_rss_indirect_tbl); + + indir_tbl = (struct nic_rss_indirect_tbl *)cmd_buf->buf; + indir_tbl->group_index = cpu_to_be32(tmpl_idx); + + for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++) { + indir_tbl->entry[i] = (u8)(*(indir_table + i)); + + if (0x3 == (i & 0x3)) { + temp = (u32 *)&indir_tbl->entry[i - 3]; + *temp = cpu_to_be32(*temp); + } + } + + /* cfg the rss indirect table by command queue */ + indir_size = HINIC_RSS_INDIR_SIZE / 2; + indir_tbl->offset = 0; + indir_tbl->size = cpu_to_be32(indir_size); + + err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ, + HINIC_MOD_L2NIC, + HINIC_UCODE_CMD_SET_RSS_INDIR_TABLE, + cmd_buf, &out_param, 0); + if (err || out_param != 0) { + nic_err(nic_hwdev->dev_hdl, "Failed to set rss indir table\n"); + err = -EFAULT; + goto free_buf; + } + + indir_tbl->offset = cpu_to_be32(indir_size); + indir_tbl->size = cpu_to_be32(indir_size); + memcpy(&indir_tbl->entry[0], &indir_tbl->entry[indir_size], indir_size); + + err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ, + HINIC_MOD_L2NIC, + HINIC_UCODE_CMD_SET_RSS_INDIR_TABLE, + cmd_buf, &out_param, 0); + if (err || out_param != 0) { + nic_err(nic_hwdev->dev_hdl, "Failed to set rss indir table\n"); + err = -EFAULT; + } + +free_buf: + hinic_free_cmd_buf(hwdev, cmd_buf); + + return err; +} + +int hinic_rss_get_indir_tbl(void *hwdev, u32 tmpl_idx, u32 *indir_table) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_rss_indir_table rss_cfg = {0}; + u16 out_size = sizeof(rss_cfg); + int err = 0, i; + + err = hinic_global_func_id_get(hwdev, &rss_cfg.func_id); + if (err) + return err; + + rss_cfg.template_id = (u8)tmpl_idx; + + err = l2nic_msg_to_mgmt_sync(hwdev, + HINIC_PORT_CMD_GET_RSS_TEMPLATE_INDIR_TBL, + &rss_cfg, sizeof(rss_cfg), &rss_cfg, + &out_size); + if (err || !out_size || rss_cfg.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to get indir table, err: %d, status: 0x%x, out size: 0x%x\n", + err, rss_cfg.status, out_size); + return -EINVAL; + } + + hinic_be32_to_cpu(rss_cfg.indir, HINIC_RSS_INDIR_SIZE); + for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++) + indir_table[i] = rss_cfg.indir[i]; + + return 0; +} + +int hinic_rss_cfg(void *hwdev, u8 rss_en, u8 tmpl_idx, u8 tc_num, u8 *prio_tc) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_rss_config rss_cfg = {0}; + u16 out_size = sizeof(rss_cfg); + int err; + + /* micro code required: number of TC should be power of 2 */ + if (!hwdev || !prio_tc || (tc_num & (tc_num - 1))) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &rss_cfg.func_id); + if (err) + return err; + + rss_cfg.rss_en = rss_en; + rss_cfg.template_id = tmpl_idx; + rss_cfg.rq_priority_number = tc_num ? (u8)ilog2(tc_num) : 0; + + memcpy(rss_cfg.prio_tc, prio_tc, HINIC_DCB_UP_MAX); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_RSS_CFG, + &rss_cfg, sizeof(rss_cfg), + &rss_cfg, &out_size); + if (err || !out_size || rss_cfg.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to set rss cfg, err: %d, status: 0x%x, out size: 0x%x\n", + err, rss_cfg.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_get_vport_stats(void *hwdev, struct hinic_vport_stats *stats) +{ + struct hinic_port_stats_info stats_info = {0}; + struct hinic_cmd_vport_stats vport_stats = {0}; + u16 out_size = sizeof(vport_stats); + int err; + + err = hinic_global_func_id_get(hwdev, &stats_info.func_id); + if (err) + return err; + + stats_info.stats_version = HINIC_PORT_STATS_VERSION; + stats_info.stats_size = sizeof(vport_stats); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_VPORT_STAT, + &stats_info, sizeof(stats_info), + &vport_stats, &out_size); + if (err || !out_size || vport_stats.status) { + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to get function statistics, err: %d, status: 0x%x, out size: 0x%x\n", + err, vport_stats.status, out_size); + return -EFAULT; + } + + memcpy(stats, &vport_stats.stats, sizeof(*stats)); + + return 0; +} + +int hinic_get_phy_port_stats(void *hwdev, struct hinic_phy_port_stats *stats) +{ + struct hinic_port_stats *port_stats; + struct hinic_port_stats_info stats_info = {0}; + u16 out_size = sizeof(*port_stats); + int err; + + port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL); + if (!port_stats) + return -ENOMEM; + + stats_info.stats_version = HINIC_PORT_STATS_VERSION; + stats_info.stats_size = sizeof(*port_stats); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_PORT_STATISTICS, + &stats_info, sizeof(stats_info), + port_stats, &out_size); + if (err || !out_size || port_stats->status) { + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to get port statistics, err: %d, status: 0x%x, out size: 0x%x\n", + err, port_stats->status, out_size); + err = -EINVAL; + goto out; + } + + memcpy(stats, &port_stats->stats, sizeof(*stats)); + +out: + kfree(port_stats); + + return err; +} + +int hinic_get_mgmt_version(void *hwdev, u8 *mgmt_ver) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_version_info up_ver = {0}; + u16 out_size; + int err; + + out_size = sizeof(up_ver); + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_MGMT_VERSION, + &up_ver, sizeof(up_ver), &up_ver, + &out_size); + if (err || !out_size || up_ver.status) { + nic_err(dev->dev_hdl, "Failed to get mgmt version, err: %d, status: 0x%x, out size: 0x%x\n", + err, up_ver.status, out_size); + return -EINVAL; + } + + err = snprintf(mgmt_ver, HINIC_MGMT_VERSION_MAX_LEN, "%s", up_ver.ver); + if (err <= 0 || err >= HINIC_MGMT_VERSION_MAX_LEN) { + nic_err(dev->dev_hdl, + "Failed to snprintf fw version, function return(%d) and dest_len(%d)\n", + err, HINIC_MGMT_VERSION_MAX_LEN); + return -EINVAL; + } + + return 0; +} + +int hinic_get_fw_version(void *hwdev, struct hinic_fw_version *fw_ver) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_version_info ver_info = {0}; + u16 out_size = sizeof(ver_info); + int err; + + if (!hwdev || !fw_ver) + return -EINVAL; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_MGMT_VERSION, + &ver_info, sizeof(ver_info), &ver_info, + &out_size); + if (err || !out_size || ver_info.status) { + nic_err(dev->dev_hdl, "Failed to get mgmt version, err: %d, status: 0x%x, out size: 0x%x\n", + err, ver_info.status, out_size); + return -EINVAL; + } + + memcpy(fw_ver->mgmt_ver, ver_info.ver, HINIC_FW_VERSION_NAME); + + out_size = sizeof(ver_info); + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_BOOT_VERSION, + &ver_info, sizeof(ver_info), &ver_info, + &out_size); + if (err || !out_size || ver_info.status) { + nic_err(dev->dev_hdl, "Failed to get boot versionerr: %d, status: 0x%x, out size: 0x%x\n", + err, ver_info.status, out_size); + return -EINVAL; + } + + memcpy(fw_ver->boot_ver, ver_info.ver, HINIC_FW_VERSION_NAME); + + out_size = sizeof(ver_info); + err = l2nic_msg_to_mgmt_sync(hwdev, + HINIC_PORT_CMD_GET_MICROCODE_VERSION, + &ver_info, sizeof(ver_info), &ver_info, + &out_size); + if (err || !out_size || ver_info.status) { + nic_err(dev->dev_hdl, "Failed to get microcode version, err: %d, status: 0x%x, out size: 0x%x\n", + err, ver_info.status, out_size); + return -EINVAL; + } + + memcpy(fw_ver->microcode_ver, ver_info.ver, HINIC_FW_VERSION_NAME); + + return 0; +} +EXPORT_SYMBOL(hinic_get_fw_version); + +int hinic_rss_template_alloc(void *hwdev, u8 *tmpl_idx) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_rss_template_mgmt template_mgmt = {0}; + u16 out_size = sizeof(template_mgmt); + int err; + + if (!hwdev || !tmpl_idx) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &template_mgmt.func_id); + if (err) + return err; + + template_mgmt.cmd = NIC_RSS_CMD_TEMP_ALLOC; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_RSS_TEMP_MGR, + &template_mgmt, sizeof(template_mgmt), + &template_mgmt, &out_size); + if (err || !out_size || template_mgmt.status) { + if (template_mgmt.status == HINIC_MGMT_STATUS_ERR_FULL) { + nic_warn(nic_hwdev->dev_hdl, "Failed to alloc rss template, table is full\n"); + return -ENOSPC; + } + nic_err(nic_hwdev->dev_hdl, "Failed to alloc rss template, err: %d, status: 0x%x, out size: 0x%x\n", + err, template_mgmt.status, out_size); + return -EINVAL; + } + + *tmpl_idx = template_mgmt.template_id; + + return 0; +} + +int hinic_rss_template_free(void *hwdev, u8 tmpl_idx) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_rss_template_mgmt template_mgmt = {0}; + u16 out_size = sizeof(template_mgmt); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &template_mgmt.func_id); + if (err) + return err; + + template_mgmt.template_id = tmpl_idx; + template_mgmt.cmd = NIC_RSS_CMD_TEMP_FREE; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_RSS_TEMP_MGR, + &template_mgmt, sizeof(template_mgmt), + &template_mgmt, &out_size); + if (err || !out_size || template_mgmt.status) { + nic_err(nic_hwdev->dev_hdl, "Failed to free rss template, err: %d, status: 0x%x, out size: 0x%x\n", + err, template_mgmt.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_set_port_funcs_state(void *hwdev, bool enable) +{ + struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev; + struct hinic_port_funcs_state state = {0}; + u16 out_size = sizeof(state); + int err = 0; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &state.func_id); + if (err) + return err; + + state.drop_en = enable ? 0 : 1; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_PORT_FUNCS_STATE, + &state, sizeof(state), &state, &out_size); + if (err || !out_size || state.status) { + nic_err(dev->dev_hdl, "Failed to %s all functions in port, err: %d, status: 0x%x, out size: 0x%x\n", + enable ? "enable" : "disable", err, state.status, + out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_reset_port_link_cfg(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_reset_link_cfg reset_cfg = {0}; + u16 out_size = sizeof(reset_cfg); + int err; + + err = hinic_global_func_id_get(hwdev, &reset_cfg.func_id); + if (err) + return err; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_RESET_LINK_CFG, + &reset_cfg, sizeof(reset_cfg), + &reset_cfg, &out_size); + if (err || !out_size || reset_cfg.status) { + nic_err(dev->dev_hdl, "Failed to reset port link configure, err: %d, status: 0x%x, out size: 0x%x\n", + err, reset_cfg.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hinic_save_vf_mac(void *hwdev, u16 vf_id, u8 *mac) +{ + struct hinic_nic_io *nic_io; + + if (!hwdev || !mac) + return -EINVAL; + + nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + memcpy(nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].vf_mac_addr, mac, + ETH_ALEN); + + return 0; +} + +static int hinic_change_vf_mtu_msg_handler(struct hinic_hwdev *hwdev, u16 vf_id, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + int err; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_CHANGE_MTU, buf_in, in_size, + buf_out, out_size, 0); + if (err) { + nic_err(hwdev->dev_hdl, "Failed to set VF %u mtu\n", vf_id); + return err; + } + + return 0; +} + +static bool is_ether_addr_zero(const u8 *addr) +{ + return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]); +} + +static int hinic_get_vf_mac_msg_handler(struct hinic_nic_io *nic_io, u16 vf, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct vf_data_storage *vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf); + struct hinic_port_mac_set *mac_info = buf_out; + int err; + + if (nic_io->hwdev->func_mode == FUNC_MOD_MULTI_BM_SLAVE || + nic_io->hwdev->func_mode == FUNC_MOD_MULTI_VM_SLAVE || + (hinic_support_ovs(nic_io->hwdev, NULL))) { + err = hinic_pf_msg_to_mgmt_sync(nic_io->hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_GET_MAC, buf_in, + in_size, buf_out, out_size, 0); + + if (!err) { + if (is_ether_addr_zero(&mac_info->mac[0])) + memcpy(mac_info->mac, + vf_info->vf_mac_addr, ETH_ALEN); + } + return err; + } + + memcpy(mac_info->mac, vf_info->vf_mac_addr, ETH_ALEN); + mac_info->status = 0; + *out_size = sizeof(*mac_info); + + return 0; +} + +static int hinic_set_vf_mac_msg_handler(struct hinic_nic_io *nic_io, u16 vf, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct vf_data_storage *vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf); + struct hinic_port_mac_set *mac_in = buf_in; + struct hinic_port_mac_set *mac_out = buf_out; + int err; + + if (vf_info->pf_set_mac && !(vf_info->trust) && + is_valid_ether_addr(mac_in->mac)) { + nic_warn(nic_io->hwdev->dev_hdl, "PF has already set VF %d MAC address\n", + HW_VF_ID_TO_OS(vf)); + mac_out->status = HINIC_PF_SET_VF_ALREADY; + *out_size = sizeof(*mac_out); + return 0; + } + + err = hinic_pf_msg_to_mgmt_sync(nic_io->hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_SET_MAC, buf_in, in_size, + buf_out, out_size, 0); + if ((err && err != HINIC_DEV_BUSY_ACTIVE_FW && + err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) || !(*out_size)) { + nic_err(nic_io->hwdev->dev_hdl, "Failed to set VF %d MAC address, err: %d, status: 0x%x, out size: 0x%x\n", + HW_VF_ID_TO_OS(vf), err, mac_out->status, *out_size); + return -EFAULT; + } + + return err; +} + +static int hinic_del_vf_mac_msg_handler(struct hinic_nic_io *nic_io, u16 vf, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct vf_data_storage *vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf); + struct hinic_port_mac_set *mac_in = buf_in; + struct hinic_port_mac_set *mac_out = buf_out; + int err; + + if (vf_info->pf_set_mac && !(vf_info->trust) && + is_valid_ether_addr(mac_in->mac) && + !memcmp(vf_info->vf_mac_addr, mac_in->mac, ETH_ALEN)) { + nic_warn(nic_io->hwdev->dev_hdl, "PF has already set VF mac\n"); + mac_out->status = HINIC_PF_SET_VF_ALREADY; + *out_size = sizeof(*mac_out); + return 0; + } + + err = hinic_pf_msg_to_mgmt_sync(nic_io->hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_DEL_MAC, buf_in, in_size, + buf_out, out_size, 0); + if ((err && err != HINIC_DEV_BUSY_ACTIVE_FW && + err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) || !(*out_size)) { + nic_err(nic_io->hwdev->dev_hdl, "Failed to delete VF %d MAC, err: %d, status: 0x%x, out size: 0x%x\n", + HW_VF_ID_TO_OS(vf), err, mac_out->status, *out_size); + return -EFAULT; + } + + return err; +} + +static int hinic_update_vf_mac_msg_handler(struct hinic_nic_io *nic_io, u16 vf, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct vf_data_storage *vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf); + struct hinic_port_mac_update *mac_in = buf_in; + struct hinic_port_mac_update *mac_out = buf_out; + int err; + + if (!is_valid_ether_addr(mac_in->new_mac)) { + nic_err(nic_io->hwdev->dev_hdl, "Update VF MAC is invalid\n"); + return -EINVAL; + } + + if (vf_info->pf_set_mac && !(vf_info->trust)) { + nic_warn(nic_io->hwdev->dev_hdl, "PF has already set VF mac\n"); + mac_out->status = HINIC_PF_SET_VF_ALREADY; + *out_size = sizeof(*mac_out); + return 0; + } + + err = hinic_pf_msg_to_mgmt_sync(nic_io->hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_UPDATE_MAC, buf_in, + in_size, buf_out, out_size, 0); + if ((err && err != HINIC_DEV_BUSY_ACTIVE_FW && + err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) || !(*out_size)) { + nic_warn(nic_io->hwdev->dev_hdl, "Failed to update VF %d MAC, err: %d, status: 0x%x, out size: 0x%x\n", + HW_VF_ID_TO_OS(vf), err, mac_out->status, *out_size); + return -EFAULT; + } + + return err; +} + +/*lint -save -e734*/ +static int hinic_set_vf_vlan(struct hinic_hwdev *hwdev, bool add, u16 vid, + u8 qos, int vf_id) +{ + struct hinic_vf_vlan_config vf_vlan = {0}; + u8 cmd; + u16 out_size = sizeof(vf_vlan); + int err; + + /* VLAN 0 is a special case, don't allow it to be removed */ + if (!vid && !add) + return 0; + + vf_vlan.func_id = hinic_glb_pf_vf_offset(hwdev) + vf_id; + vf_vlan.vlan_id = vid; + vf_vlan.qos = qos; + + if (add) + cmd = HINIC_PORT_CMD_SET_VF_VLAN; + else + cmd = HINIC_PORT_CMD_CLR_VF_VLAN; + + err = l2nic_msg_to_mgmt_sync(hwdev, cmd, &vf_vlan, sizeof(vf_vlan), + &vf_vlan, &out_size); + if (err || !out_size || vf_vlan.status) { + nic_err(hwdev->dev_hdl, "Failed to set VF %d vlan, err: %d, status: 0x%x, out size: 0x%x\n", + HW_VF_ID_TO_OS(vf_id), err, vf_vlan.status, out_size); + return -EFAULT; + } + + return 0; +} + +/*lint -restore*/ +static int hinic_init_vf_config(struct hinic_hwdev *hwdev, u16 vf_id) +{ + struct vf_data_storage *vf_info; + u16 func_id, vlan_id; + int err = 0; + + vf_info = hwdev->nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id); + if (vf_info->pf_set_mac) { + func_id = hinic_glb_pf_vf_offset(hwdev) + vf_id; + if (FW_SUPPORT_MAC_REUSE_FUNC(hwdev)) { + vlan_id = vf_info->pf_vlan; + if (vlan_id) + vlan_id |= HINIC_ADD_VLAN_IN_MAC; + } else { + vlan_id = 0; + } + + err = hinic_set_mac(hwdev, vf_info->vf_mac_addr, vlan_id, + func_id); + if (err) { + nic_err(hwdev->dev_hdl, "Failed to set VF %d MAC\n", + HW_VF_ID_TO_OS(vf_id)); + return err; + } + } + if (hinic_vf_info_vlanprio(hwdev, vf_id)) { + err = hinic_set_vf_vlan(hwdev, true, vf_info->pf_vlan, + vf_info->pf_qos, vf_id); + if (err) { + nic_err(hwdev->dev_hdl, "Failed to add VF %d VLAN_QOS\n", + HW_VF_ID_TO_OS(vf_id)); + return err; + } + } + + if (vf_info->max_rate) { + err = hinic_set_vf_tx_rate(hwdev, vf_id, vf_info->max_rate, + vf_info->min_rate); + if (err) { + nic_err(hwdev->dev_hdl, "Failed to set VF %d max rate %d, min rate %d\n", + HW_VF_ID_TO_OS(vf_id), vf_info->max_rate, + vf_info->min_rate); + return err; + } + } + + return 0; +} + +static int hinic_register_vf_msg_handler(void *hwdev, u16 vf_id, + void *buf_out, u16 *out_size) +{ + struct hinic_hwdev *hw_dev = hwdev; + struct hinic_nic_io *nic_io = hw_dev->nic_io; + struct hinic_register_vf *register_info = buf_out; + int err; + + if (vf_id > nic_io->max_vfs) { + nic_err(hw_dev->dev_hdl, "Register VF id %d exceed limit[0-%d]\n", + HW_VF_ID_TO_OS(vf_id), HW_VF_ID_TO_OS(nic_io->max_vfs)); + register_info->status = EFAULT; + return -EFAULT; + } + + *out_size = sizeof(*register_info); + err = hinic_init_vf_config(hw_dev, vf_id); + if (err) { + register_info->status = EFAULT; + return err; + } + + nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].registered = true; + + return 0; +} + +void hinic_unregister_vf_msg_handler(void *hwdev, u16 vf_id) +{ + struct hinic_hwdev *hw_dev = (struct hinic_hwdev *)hwdev; + struct hinic_nic_io *nic_io = hw_dev->nic_io; + + if (vf_id > nic_io->max_vfs) + return; + + nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].registered = false; +} + +static void hinic_get_vf_link_status_msg_handler(struct hinic_nic_io *nic_io, + u16 vf_id, void *buf_out, + u16 *out_size) +{ + struct vf_data_storage *vf_infos = nic_io->vf_infos; + struct hinic_get_link *get_link = buf_out; + bool link_forced, link_up; + + link_forced = vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced; + link_up = vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up; + + if (link_forced) + get_link->link_status = link_up ? + HINIC_LINK_UP : HINIC_LINK_DOWN; + else + get_link->link_status = nic_io->link_status; + + get_link->status = 0; + *out_size = sizeof(*get_link); +} + +static void hinic_get_vf_cos_msg_handler(struct hinic_nic_io *nic_io, + u16 vf_id, void *buf_out, + u16 *out_size) +{ + struct hinic_vf_dcb_state *dcb_state = buf_out; + + memcpy(&dcb_state->state, &nic_io->dcb_state, + sizeof(nic_io->dcb_state)); + + dcb_state->status = 0; + *out_size = sizeof(*dcb_state); +} + +/* pf receive message from vf */ +int nic_pf_mbox_handler(void *hwdev, u16 vf_id, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + u8 size = ARRAY_SIZE(nic_cmd_support_vf); + struct hinic_nic_io *nic_io; + int err = 0; + u32 timeout = 0; + + if (!hwdev) + return -EFAULT; + + if (!hinic_mbox_check_cmd_valid(hwdev, nic_cmd_support_vf, vf_id, cmd, + buf_in, in_size, size)) { + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "PF Receive VF nic cmd(0x%x) or mbox len(0x%x) is invalid\n", + cmd, in_size); + err = HINIC_MBOX_VF_CMD_ERROR; + return err; + } + + nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + + switch (cmd) { + case HINIC_PORT_CMD_VF_REGISTER: + err = hinic_register_vf_msg_handler(hwdev, vf_id, buf_out, + out_size); + break; + + case HINIC_PORT_CMD_VF_UNREGISTER: + *out_size = 0; + hinic_unregister_vf_msg_handler(hwdev, vf_id); + break; + + case HINIC_PORT_CMD_CHANGE_MTU: + err = hinic_change_vf_mtu_msg_handler(hwdev, vf_id, buf_in, + in_size, buf_out, + out_size); + break; + + case HINIC_PORT_CMD_GET_MAC: + hinic_get_vf_mac_msg_handler(nic_io, vf_id, buf_in, + in_size, buf_out, out_size); + break; + + case HINIC_PORT_CMD_SET_MAC: + err = hinic_set_vf_mac_msg_handler(nic_io, vf_id, buf_in, + in_size, buf_out, out_size); + break; + + case HINIC_PORT_CMD_DEL_MAC: + err = hinic_del_vf_mac_msg_handler(nic_io, vf_id, buf_in, + in_size, buf_out, out_size); + break; + + case HINIC_PORT_CMD_UPDATE_MAC: + err = hinic_update_vf_mac_msg_handler(nic_io, vf_id, buf_in, + in_size, buf_out, + out_size); + break; + + case HINIC_PORT_CMD_GET_LINK_STATE: + hinic_get_vf_link_status_msg_handler(nic_io, vf_id, buf_out, + out_size); + break; + + case HINIC_PORT_CMD_GET_VF_COS: + hinic_get_vf_cos_msg_handler(nic_io, vf_id, buf_out, out_size); + break; + + default: + /* pass through */ + if (cmd == HINIC_PORT_CMD_SET_VPORT_ENABLE) + timeout = SET_VPORT_MGMT_TIMEOUT; + + err = hinic_pf_msg_to_mgmt_sync(nic_io->hwdev, HINIC_MOD_L2NIC, + cmd, buf_in, in_size, + buf_out, out_size, timeout); + + break; + } + + if (err && err != HINIC_DEV_BUSY_ACTIVE_FW && + err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) + nic_err(nic_io->hwdev->dev_hdl, "PF receive VF L2NIC cmd: %d process error, err: %d\n", + cmd, err); + return err; +} + +static int hinic_init_vf_infos(struct hinic_nic_io *nic_io, u16 vf_id) +{ + struct vf_data_storage *vf_infos = nic_io->vf_infos; + u8 vf_link_state; + + if (set_vf_link_state > HINIC_IFLA_VF_LINK_STATE_DISABLE) { + nic_warn(nic_io->hwdev->dev_hdl, "Module Parameter set_vf_link_state value %d is out of range, resetting to %d\n", + set_vf_link_state, HINIC_IFLA_VF_LINK_STATE_AUTO); + set_vf_link_state = HINIC_IFLA_VF_LINK_STATE_AUTO; + } + + vf_link_state = hinic_support_ovs(nic_io->hwdev, NULL) ? + HINIC_IFLA_VF_LINK_STATE_ENABLE : set_vf_link_state; + + if (FUNC_FORCE_LINK_UP(nic_io->hwdev)) + vf_link_state = HINIC_IFLA_VF_LINK_STATE_ENABLE; + + switch (vf_link_state) { + case HINIC_IFLA_VF_LINK_STATE_AUTO: + vf_infos[vf_id].link_forced = false; + break; + case HINIC_IFLA_VF_LINK_STATE_ENABLE: + vf_infos[vf_id].link_forced = true; + vf_infos[vf_id].link_up = true; + break; + case HINIC_IFLA_VF_LINK_STATE_DISABLE: + vf_infos[vf_id].link_forced = true; + vf_infos[vf_id].link_up = false; + break; + default: + nic_err(nic_io->hwdev->dev_hdl, "Input parameter set_vf_link_state error: %d\n", + vf_link_state); + return -EINVAL; + } + + return 0; +} + +int hinic_init_vf_hw(void *hwdev, u16 start_vf_id, u16 end_vf_id) +{ + u16 i, func_idx; + int err; + + /* vf use 256K as default wq page size, and can't change it */ + for (i = start_vf_id; i <= end_vf_id; i++) { + func_idx = hinic_glb_pf_vf_offset(hwdev) + i; + err = hinic_set_wq_page_size(hwdev, func_idx, + HINIC_DEFAULT_WQ_PAGE_SIZE); + if (err) + return err; + } + + return 0; +} + +int hinic_deinit_vf_hw(void *hwdev, u16 start_vf_id, u16 end_vf_id) +{ + u16 func_idx, idx; + + for (idx = start_vf_id; idx <= end_vf_id; idx++) { + func_idx = hinic_glb_pf_vf_offset(hwdev) + idx; + hinic_set_wq_page_size(hwdev, func_idx, HINIC_HW_WQ_PAGE_SIZE); + + hinic_clear_vf_infos(hwdev, idx); + } + + return 0; +} + +int hinic_vf_func_init(struct hinic_hwdev *hwdev) +{ + struct hinic_nic_io *nic_io; + int err = 0; + struct hinic_register_vf register_info = {0}; + u32 size; + u16 i, out_size = sizeof(register_info); + + hwdev->nic_io = kzalloc(sizeof(*hwdev->nic_io), GFP_KERNEL); + if (!hwdev->nic_io) + return -ENOMEM; + + nic_io = hwdev->nic_io; + nic_io->hwdev = hwdev; + + sema_init(&nic_io->nic_cfg.cfg_lock, 1); + + if (hinic_func_type(hwdev) == TYPE_VF) { + err = hinic_mbox_to_pf(hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_VF_REGISTER, + ®ister_info, sizeof(register_info), + ®ister_info, &out_size, 0); + if (err || register_info.status || !out_size) { + nic_err(hwdev->dev_hdl, "Failed to register VF, err: %d, status: 0x%x, out size: 0x%x\n", + err, register_info.status, out_size); + hinic_unregister_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC); + err = -EIO; + goto out_free_nic_io; + } + } else { + nic_io->max_vfs = hinic_func_max_vf(hwdev); + size = sizeof(*nic_io->vf_infos) * nic_io->max_vfs; + if (size != 0) { + nic_io->vf_infos = kzalloc(size, GFP_KERNEL); + if (!nic_io->vf_infos) { + err = -ENOMEM; + goto out_free_nic_io; + } + + for (i = 0; i < nic_io->max_vfs; i++) { + err = hinic_init_vf_infos(nic_io, i); + if (err) + goto init_vf_infos_err; + } + + err = hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_L2NIC, + nic_pf_mbox_handler); + if (err) + goto register_pf_mbox_cb_err; + } + } + + return 0; + +register_pf_mbox_cb_err: +init_vf_infos_err: + kfree(nic_io->vf_infos); + +out_free_nic_io: + kfree(hwdev->nic_io); + hwdev->nic_io = NULL; + + return err; +} + +void hinic_vf_func_free(struct hinic_hwdev *hwdev) +{ + struct hinic_register_vf unregister = {0}; + u16 out_size = sizeof(unregister); + int err; + + if (hinic_func_type(hwdev) == TYPE_VF) { + err = hinic_mbox_to_pf(hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_VF_UNREGISTER, + &unregister, sizeof(unregister), + &unregister, &out_size, 0); + if (err || !out_size || unregister.status) + nic_err(hwdev->dev_hdl, "Failed to unregister VF, err: %d, status: 0x%x, out_size: 0x%x\n", + err, unregister.status, out_size); + } else { + if (hwdev->nic_io->vf_infos) { + hinic_unregister_pf_mbox_cb(hwdev, HINIC_MOD_L2NIC); + kfree(hwdev->nic_io->vf_infos); + } + } + + kfree(hwdev->nic_io); + hwdev->nic_io = NULL; +} + +/*lint -save -e734*/ +/* this function just be called by hinic_ndo_set_vf_mac, others are + * not permitted + */ +int hinic_set_vf_mac(void *hwdev, int vf, unsigned char *mac_addr) +{ + struct hinic_hwdev *hw_dev = (struct hinic_hwdev *)hwdev; + struct hinic_nic_io *nic_io = hw_dev->nic_io; + struct vf_data_storage *vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf); + int del_vf_mac = is_zero_ether_addr(mac_addr); + u16 func_id; + int err; + + /* duplicate request, so just return success */ + if (!memcmp(vf_info->vf_mac_addr, mac_addr, ETH_ALEN)) + return 0; + + func_id = hinic_glb_pf_vf_offset(hw_dev) + vf; + if (del_vf_mac) + err = hinic_del_mac(hwdev, vf_info->vf_mac_addr, 0, func_id); + else + err = hinic_update_mac(hw_dev, vf_info->vf_mac_addr, + mac_addr, 0, func_id); + if (err) + return err; + + memcpy(vf_info->vf_mac_addr, mac_addr, ETH_ALEN); + vf_info->pf_set_mac = !del_vf_mac; + + return 0; +} + +int hinic_add_vf_vlan(void *hwdev, int vf_id, u16 vlan, u8 qos) +{ + struct hinic_hwdev *hw_dev = (struct hinic_hwdev *)hwdev; + struct hinic_nic_io *nic_io = hw_dev->nic_io; + int err; + + err = hinic_set_vf_vlan(hw_dev, true, vlan, qos, vf_id); + if (err) + return err; + + nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan = vlan; + nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos = qos; + + nic_info(hw_dev->dev_hdl, "Setting VLAN %d, QOS 0x%x on VF %d\n", + vlan, qos, HW_VF_ID_TO_OS(vf_id)); + return 0; +} + +int hinic_kill_vf_vlan(void *hwdev, int vf_id) +{ + struct hinic_hwdev *hw_dev = (struct hinic_hwdev *)hwdev; + struct hinic_nic_io *nic_io = hw_dev->nic_io; + int err; + + err = hinic_set_vf_vlan(hw_dev, false, + nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan, + nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos, + vf_id); + if (err) + return err; + + nic_info(hw_dev->dev_hdl, "Remove VLAN %d on VF %d\n", + nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan, + HW_VF_ID_TO_OS(vf_id)); + + nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan = 0; + nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos = 0; + + return 0; +} + +u16 hinic_vf_info_vlanprio(void *hwdev, int vf_id) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + u16 pf_vlan = nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan; + u8 pf_qos = nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos; + u16 vlanprio = pf_vlan | pf_qos << HINIC_VLAN_PRIORITY_SHIFT; + + return vlanprio; +} + +/*lint -restore*/ + +bool hinic_vf_is_registered(void *hwdev, u16 vf_id) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + + return nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].registered; +} + +void hinic_get_vf_config(void *hwdev, u16 vf_id, struct ifla_vf_info *ivi) +{ + struct hinic_hwdev *hw_dev = (struct hinic_hwdev *)hwdev; + struct vf_data_storage *vfinfo; + + vfinfo = hw_dev->nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id); + + ivi->vf = HW_VF_ID_TO_OS(vf_id); + memcpy(ivi->mac, vfinfo->vf_mac_addr, ETH_ALEN); + ivi->vlan = vfinfo->pf_vlan; + ivi->qos = vfinfo->pf_qos; + ivi->spoofchk = vfinfo->spoofchk; + ivi->trusted = vfinfo->trust; + + ivi->max_tx_rate = vfinfo->max_rate; + ivi->min_tx_rate = vfinfo->min_rate; + + if (!vfinfo->link_forced) + ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; + else if (vfinfo->link_up) + ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; + else + ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; +} + +void hinic_clear_vf_infos(void *hwdev, u16 vf_id) +{ + struct hinic_hwdev *hw_dev = (struct hinic_hwdev *)hwdev; + struct vf_data_storage *vf_infos; + u16 func_id; + + func_id = hinic_glb_pf_vf_offset(hwdev) + vf_id; + vf_infos = hw_dev->nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id); + if (vf_infos->pf_set_mac) + hinic_del_mac(hwdev, vf_infos->vf_mac_addr, 0, func_id); + + if (hinic_vf_info_vlanprio(hwdev, vf_id)) + hinic_kill_vf_vlan(hwdev, vf_id); + + if (vf_infos->max_rate) + hinic_set_vf_tx_rate(hwdev, vf_id, 0, 0); + + if (vf_infos->spoofchk) + hinic_set_vf_spoofchk(hwdev, vf_id, false); + + if (vf_infos->trust) + hinic_set_vf_trust(hwdev, vf_id, false); + + memset(vf_infos, 0, sizeof(*vf_infos)); + /* set vf_infos to default */ + hinic_init_vf_infos(hw_dev->nic_io, HW_VF_ID_TO_OS(vf_id)); +} + +static void hinic_notify_vf_link_status(struct hinic_hwdev *hwdev, u16 vf_id, + u8 link_status) +{ + struct hinic_port_link_status link = {0}; + struct vf_data_storage *vf_infos = hwdev->nic_io->vf_infos; + u16 out_size = sizeof(link); + int err; + + if (vf_infos[HW_VF_ID_TO_OS(vf_id)].registered) { + link.link = link_status; + link.func_id = hinic_glb_pf_vf_offset(hwdev) + vf_id; + err = hinic_mbox_to_vf(hwdev, HINIC_MOD_L2NIC, + vf_id, HINIC_PORT_CMD_LINK_STATUS_REPORT, + &link, sizeof(link), + &link, &out_size, 0); + if (err || !out_size || link.status) + nic_err(hwdev->dev_hdl, + "Send link change event to VF %d failed, err: %d, status: 0x%x, out_size: 0x%x\n", + HW_VF_ID_TO_OS(vf_id), err, + link.status, out_size); + } +} + +/* send link change event mbox msg to active vfs under the pf */ +void hinic_notify_all_vfs_link_changed(void *hwdev, u8 link_status) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + u16 i; + + nic_io->link_status = link_status; + for (i = 1; i <= nic_io->max_vfs; i++) { + if (!nic_io->vf_infos[HW_VF_ID_TO_OS(i)].link_forced) + hinic_notify_vf_link_status(nic_io->hwdev, i, + link_status); + } +} + +void hinic_save_pf_link_status(void *hwdev, u8 link_status) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + + nic_io->link_status = link_status; +} + +int hinic_set_vf_link_state(void *hwdev, u16 vf_id, int link) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + struct vf_data_storage *vf_infos = nic_io->vf_infos; + u8 link_status = 0; + + switch (link) { + case HINIC_IFLA_VF_LINK_STATE_AUTO: + vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = false; + vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = nic_io->link_status ? + true : false; + link_status = nic_io->link_status; + break; + case HINIC_IFLA_VF_LINK_STATE_ENABLE: + vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = true; + vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = true; + link_status = HINIC_LINK_UP; + break; + case HINIC_IFLA_VF_LINK_STATE_DISABLE: + vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = true; + vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = false; + link_status = HINIC_LINK_DOWN; + break; + default: + return -EINVAL; + } + + /* Notify the VF of its new link state */ + hinic_notify_vf_link_status(hwdev, vf_id, link_status); + + return 0; +} + +int hinic_set_vf_spoofchk(void *hwdev, u16 vf_id, bool spoofchk) +{ + struct hinic_hwdev *hw_dev = hwdev; + struct hinic_nic_io *nic_io = NULL; + struct hinic_spoofchk_set spoofchk_cfg = {0}; + struct vf_data_storage *vf_infos = NULL; + u16 out_size = sizeof(spoofchk_cfg); + int err = 0; + + if (!hwdev) + return -EINVAL; + + nic_io = hw_dev->nic_io; + vf_infos = nic_io->vf_infos; + + spoofchk_cfg.func_id = hinic_glb_pf_vf_offset(hwdev) + vf_id; + spoofchk_cfg.state = spoofchk ? 1 : 0; + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_ENABLE_SPOOFCHK, + &spoofchk_cfg, + sizeof(spoofchk_cfg), &spoofchk_cfg, + &out_size, 0); + if (spoofchk_cfg.status == HINIC_MGMT_CMD_UNSUPPORTED) { + err = HINIC_MGMT_CMD_UNSUPPORTED; + } else if (err || !out_size || spoofchk_cfg.status) { + nic_err(hw_dev->dev_hdl, "Failed to set VF(%d) spoofchk, err: %d, status: 0x%x, out size: 0x%x\n", + HW_VF_ID_TO_OS(vf_id), err, spoofchk_cfg.status, + out_size); + err = -EINVAL; + } + + vf_infos[HW_VF_ID_TO_OS(vf_id)].spoofchk = spoofchk; + + return err; +} + +int hinic_set_vf_trust(void *hwdev, u16 vf_id, bool trust) +{ + struct hinic_hwdev *hw_dev = hwdev; + struct hinic_nic_io *nic_io = NULL; + struct vf_data_storage *vf_infos = NULL; + + if (!hwdev) + return -EINVAL; + + nic_io = hw_dev->nic_io; + vf_infos = nic_io->vf_infos; + vf_infos[HW_VF_ID_TO_OS(vf_id)].trust = trust; + + return 0; +} + +bool hinic_vf_info_spoofchk(void *hwdev, int vf_id) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + bool spoofchk = nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].spoofchk; + + return spoofchk; +} + +bool hinic_vf_info_trust(void *hwdev, int vf_id) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + bool trust = nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].trust; + + return trust; +} + +static int hinic_set_vf_rate_limit(void *hwdev, u16 vf_id, u32 tx_rate) +{ + struct hinic_hwdev *hw_dev = hwdev; + struct hinic_nic_io *nic_io = hw_dev->nic_io; + struct hinic_tx_rate_cfg rate_cfg = {0}; + u16 out_size = sizeof(rate_cfg); + int err; + + rate_cfg.func_id = hinic_glb_pf_vf_offset(hwdev) + vf_id; + rate_cfg.tx_rate = tx_rate; + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_SET_VF_RATE, &rate_cfg, + sizeof(rate_cfg), &rate_cfg, + &out_size, 0); + if (err || !out_size || rate_cfg.status) { + nic_err(hw_dev->dev_hdl, "Failed to set VF(%d) rate(%d), err: %d, status: 0x%x, out size: 0x%x\n", + HW_VF_ID_TO_OS(vf_id), tx_rate, err, rate_cfg.status, + out_size); + if (rate_cfg.status) + return rate_cfg.status; + + return -EIO; + } + + nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].max_rate = tx_rate; + nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].min_rate = 0; + + return 0; +} + +static int hinic_set_vf_tx_rate_max_min(void *hwdev, u16 vf_id, + u32 max_rate, u32 min_rate) +{ + struct hinic_hwdev *hw_dev = hwdev; + struct hinic_nic_io *nic_io = hw_dev->nic_io; + struct hinic_tx_rate_cfg_max_min rate_cfg = {0}; + u16 out_size = sizeof(rate_cfg); + int err; + + rate_cfg.func_id = hinic_glb_pf_vf_offset(hwdev) + vf_id; + rate_cfg.max_rate = max_rate; + rate_cfg.min_rate = min_rate; + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_SET_VF_MAX_MIN_RATE, + &rate_cfg, sizeof(rate_cfg), &rate_cfg, + &out_size, 0); + if ((rate_cfg.status != HINIC_MGMT_CMD_UNSUPPORTED && + rate_cfg.status) || err || !out_size) { + nic_err(hw_dev->dev_hdl, "Failed to set VF(%d) max rate(%d), min rate(%d), err: %d, status: 0x%x, out size: 0x%x\n", + HW_VF_ID_TO_OS(vf_id), max_rate, min_rate, err, + rate_cfg.status, out_size); + return -EIO; + } + + if (!rate_cfg.status) { + nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].max_rate = max_rate; + nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].min_rate = min_rate; + } + + return rate_cfg.status; +} + +int hinic_set_vf_tx_rate(void *hwdev, u16 vf_id, u32 max_rate, u32 min_rate) +{ + struct hinic_hwdev *hw_dev = hwdev; + int err; + + err = hinic_set_vf_tx_rate_max_min(hwdev, vf_id, max_rate, min_rate); + if (err != HINIC_MGMT_CMD_UNSUPPORTED) + return err; + + if (min_rate) { + nic_err(hw_dev->dev_hdl, "Current firmware don't support to set min tx rate\n"); + return -EINVAL; + } + + nic_info(hw_dev->dev_hdl, "Current firmware don't support to set min tx rate, force min_tx_rate = max_tx_rate\n"); + + return hinic_set_vf_rate_limit(hwdev, vf_id, max_rate); +} + +int hinic_set_dcb_state(void *hwdev, struct hinic_dcb_state *dcb_state) +{ + struct hinic_hwdev *hw_dev = hwdev; + struct hinic_nic_io *nic_io; + struct vf_data_storage *vf_infos; + struct hinic_vf_dcb_state vf_dcb = {0}; + u16 vf_id, out_size = 0; + int err; + + if (!hwdev || !dcb_state || !hw_dev->nic_io) + return -EINVAL; + + nic_io = hw_dev->nic_io; + if (!memcmp(&nic_io->dcb_state, dcb_state, sizeof(nic_io->dcb_state))) + return 0; + + memcpy(&vf_dcb.state, dcb_state, sizeof(vf_dcb.state)); + /* save in sdk, vf will get dcb state when probing */ + hinic_save_dcb_state(hwdev, dcb_state); + + /* notify statefull in pf, than notify all vf */ + hinic_notify_dcb_state_event(hwdev, dcb_state); + + /* not vf supported, don't need to notify vf */ + if (!nic_io->vf_infos) + return 0; + + vf_infos = nic_io->vf_infos; + for (vf_id = 0; vf_id < nic_io->max_vfs; vf_id++) { + if (vf_infos[vf_id].registered) { + vf_dcb.status = 0; + out_size = sizeof(vf_dcb); + err = hinic_mbox_to_vf(hwdev, HINIC_MOD_L2NIC, + OS_VF_ID_TO_HW(vf_id), + HINIC_PORT_CMD_SET_VF_COS, + &vf_dcb, sizeof(vf_dcb), &vf_dcb, + &out_size, 0); + if (err || vf_dcb.status || !out_size) + nic_err(hw_dev->dev_hdl, + "Failed to notify dcb state to VF %d, err: %d, status: 0x%x, out size: 0x%x\n", + vf_id, err, vf_dcb.status, out_size); + } + } + + return 0; +} + +int hinic_get_dcb_state(void *hwdev, struct hinic_dcb_state *dcb_state) +{ + struct hinic_hwdev *hw_dev = hwdev; + struct hinic_nic_io *nic_io; + + if (!hwdev || !dcb_state) + return -EINVAL; + + nic_io = hw_dev->nic_io; + memcpy(dcb_state, &nic_io->dcb_state, sizeof(*dcb_state)); + + return 0; +} +EXPORT_SYMBOL(hinic_get_dcb_state); + +int hinic_save_dcb_state(struct hinic_hwdev *hwdev, + struct hinic_dcb_state *dcb_state) +{ + struct hinic_nic_io *nic_io; + + if (!hwdev || !dcb_state) + return -EINVAL; + + if (!hwdev->nic_io) + return -EINVAL; + + nic_io = hwdev->nic_io; + memcpy(&nic_io->dcb_state, dcb_state, sizeof(*dcb_state)); + + return 0; +} + +int hinic_get_pf_dcb_state(void *hwdev, struct hinic_dcb_state *dcb_state) +{ + struct hinic_hwdev *hw_dev = hwdev; + struct hinic_vf_dcb_state vf_dcb = {0}; + u16 out_size = sizeof(vf_dcb); + int err; + + if (!hwdev || !dcb_state) + return -EINVAL; + + if (hinic_func_type(hwdev) != TYPE_VF) { + nic_err(hw_dev->dev_hdl, "Only vf need to get pf dcb state\n"); + return -EINVAL; + } + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_GET_VF_COS, &vf_dcb, + sizeof(vf_dcb), &vf_dcb, + &out_size, 0); + if (err || !out_size || vf_dcb.status) { + nic_err(hw_dev->dev_hdl, "Failed to get vf default cos, err: %d, status: 0x%x, out size: 0x%x\n", + err, vf_dcb.status, out_size); + return -EFAULT; + } + + memcpy(dcb_state, &vf_dcb.state, sizeof(*dcb_state)); + /* Save dcb_state in hw for statefull module */ + hinic_save_dcb_state(hwdev, dcb_state); + + return 0; +} +EXPORT_SYMBOL(hinic_get_pf_dcb_state); + +int hinic_set_ipsu_mac(void *hwdev, u16 index, u8 *mac_addr, u16 vlan_id, + u16 func_id) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_port_ipsu_mac mac_info = {0}; + u16 out_size = sizeof(mac_info); + int err; + + if (!hwdev || !mac_addr) + return -EINVAL; + + mac_info.index = index; + mac_info.func_id = func_id; + mac_info.vlan_id = vlan_id; + memcpy(mac_info.mac, mac_addr, ETH_ALEN); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_IPSU_MAC, + &mac_info, sizeof(mac_info), &mac_info, + &out_size); + if (err || !out_size || mac_info.status) { + nic_err(nic_hwdev->dev_hdl, + "Failed to set IPSU MAC(index %d), err: %d, status: 0x%x, out size: 0x%x\n", + index, err, mac_info.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic_get_ipsu_mac(void *hwdev, u16 index, u8 *mac_addr, u16 *vlan_id, + u16 *func_id) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_port_ipsu_mac mac_info = {0}; + u16 out_size = sizeof(mac_info); + int err; + + if (!hwdev || !mac_addr) + return -EINVAL; + + mac_info.index = index; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_IPSU_MAC, + &mac_info, sizeof(mac_info), &mac_info, + &out_size); + if (err || !out_size || mac_info.status) { + nic_err(nic_hwdev->dev_hdl, + "Failed to get IPSU MAC(index %d), err: %d, status: 0x%x, out size: 0x%x\n", + index, err, mac_info.status, out_size); + return -EINVAL; + } + *func_id = mac_info.func_id; + *vlan_id = mac_info.vlan_id; + memcpy(mac_addr, mac_info.mac, ETH_ALEN); + + return 0; +} + +int hinic_set_anti_attack(void *hwdev, bool enable) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_port_anti_attack_rate rate = {0}; + u16 out_size = sizeof(rate); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &rate.func_id); + if (err) + return err; + + rate.enable = enable; + rate.cir = ANTI_ATTACK_DEFAULT_CIR; + rate.xir = ANTI_ATTACK_DEFAULT_XIR; + rate.cbs = ANTI_ATTACK_DEFAULT_CBS; + rate.xbs = ANTI_ATTACK_DEFAULT_XBS; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_ANTI_ATTACK_RATE, + &rate, sizeof(rate), &rate, + &out_size); + if (err || !out_size || rate.status) { + nic_err(nic_hwdev->dev_hdl, "Can't %s port Anti-Attack rate limit err: %d, status: 0x%x, out size: 0x%x\n", + (enable ? "enable" : "disable"), err, rate.status, + out_size); + return -EINVAL; + } + + nic_info(nic_hwdev->dev_hdl, "%s port Anti-Attack rate limit succeed\n", + (enable ? "Enable" : "Disable")); + + return 0; +} + +int hinic_flush_sq_res(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_clear_sq_resource sq_res = {0}; + u16 out_size = sizeof(sq_res); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &sq_res.func_id); + if (err) + return err; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_CLEAR_SQ_RES, + &sq_res, sizeof(sq_res), &sq_res, + &out_size); + if (err || !out_size || sq_res.status) { + nic_err(dev->dev_hdl, "Failed to clear sq resources, err: %d, status: 0x%x, out size: 0x%x\n", + err, sq_res.status, out_size); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL(hinic_flush_sq_res); + +static int __set_pf_bw(struct hinic_hwdev *hwdev, u8 speed_level); + +int hinic_refresh_nic_cfg(void *hwdev, struct nic_port_info *port_info) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_nic_cfg *nic_cfg = &dev->nic_io->nic_cfg; + int err = 0; + + down(&nic_cfg->cfg_lock); + + /* Enable PFC will disable pause */ + if (nic_cfg->pfc_en) { + err = hinic_dcb_set_hw_pfc(hwdev, nic_cfg->pfc_en, + nic_cfg->pfc_bitmap); + if (err) + nic_err(dev->dev_hdl, "Failed to set pfc\n"); + + } else if (!port_info->autoneg_state || nic_cfg->pause_set) { + nic_cfg->nic_pause.auto_neg = port_info->autoneg_state; + err = hinic_set_hw_pause_info(hwdev, nic_cfg->nic_pause); + if (err) + nic_err(dev->dev_hdl, "Failed to set pause\n"); + } + + if (FUNC_SUPPORT_RATE_LIMIT(hwdev)) { + err = __set_pf_bw(hwdev, port_info->speed); + if (err) + nic_err(dev->dev_hdl, "Failed to set pf bandwidth limit\n"); + } + + up(&nic_cfg->cfg_lock); + + return err; +} + +int hinic_set_super_cqe_state(void *hwdev, bool enable) +{ + struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev; + struct hinic_super_cqe super_cqe = {0}; + u16 out_size = sizeof(super_cqe); + int err; + + if (!hwdev) + return -EINVAL; + + err = hinic_global_func_id_get(hwdev, &super_cqe.func_id); + if (err) + return err; + + super_cqe.super_cqe_en = enable; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_SUPER_CQE, + &super_cqe, sizeof(super_cqe), &super_cqe, + &out_size); + if (err || !out_size || super_cqe.status) { + nic_err(nic_hwdev->dev_hdl, "Can't %s surper cqe, err: %d, status: 0x%x, out size: 0x%x\n", + (enable ? "enable" : "disable"), err, super_cqe.status, + out_size); + return -EINVAL; + } + + nic_info(nic_hwdev->dev_hdl, "%s super cqe succeed\n", + (enable ? "Enable" : "Disable")); + + return 0; +} + +int hinic_set_port_routine_cmd_report(void *hwdev, bool enable) +{ + struct hinic_port_rt_cmd rt_cmd = { 0 }; + struct hinic_hwdev *dev = hwdev; + u16 out_size = sizeof(rt_cmd); + int err; + + if (!hwdev) + return -EINVAL; + + rt_cmd.pf_id = (u8)hinic_global_func_id(hwdev); + rt_cmd.enable = enable; + + err = l2nic_msg_to_mgmt_sync(hwdev, + HINIC_PORT_CMD_SET_PORT_REPORT, + &rt_cmd, sizeof(rt_cmd), &rt_cmd, + &out_size); + if (rt_cmd.status == HINIC_MGMT_CMD_UNSUPPORTED) { + nic_info(dev->dev_hdl, "Current firmware doesn't support to set port routine command report\n"); + } else if (rt_cmd.status || err || !out_size) { + nic_err(dev->dev_hdl, + "Failed to set port routine command report, err: %d, status: 0x%x, out size: 0x%x\n", + err, rt_cmd.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hinic_set_func_capture_en(void *hwdev, u16 func_id, bool cap_en) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_capture_info cap_info = {0}; + u16 out_size = sizeof(cap_info); + int err; + + if (!hwdev) + return -EINVAL; + + cap_info.op_type = 2; /* function capture */ + cap_info.is_en_trx = cap_en; + cap_info.func_id = func_id; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_UCAPTURE_OPT, + &cap_info, sizeof(cap_info), + &cap_info, &out_size); + if (err || !out_size || cap_info.status) { + nic_err(dev->dev_hdl, + "Failed to set function capture attr, err: %d, status: 0x%x, out size: 0x%x\n", + err, cap_info.status, out_size); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL(hinic_set_func_capture_en); + +int hinic_force_drop_tx_pkt(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_force_pkt_drop pkt_drop = {0}; + u16 out_size = sizeof(pkt_drop); + int err; + + if (!hwdev) + return -EINVAL; + + pkt_drop.port = hinic_physical_port_id(hwdev); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_FORCE_PKT_DROP, + &pkt_drop, sizeof(pkt_drop), + &pkt_drop, &out_size); + if ((pkt_drop.status != HINIC_MGMT_CMD_UNSUPPORTED && + pkt_drop.status) || err || !out_size) { + nic_err(dev->dev_hdl, + "Failed to set force tx packets drop, err: %d, status: 0x%x, out size: 0x%x\n", + err, pkt_drop.status, out_size); + return -EFAULT; + } + + return pkt_drop.status; +} + +u32 hw_speed_convert[LINK_SPEED_LEVELS] = { + 10, 100, 1000, 10000, + 25000, 40000, 100000 +}; + +static int __set_pf_bw(struct hinic_hwdev *hwdev, u8 speed_level) +{ + struct hinic_nic_cfg *nic_cfg = &hwdev->nic_io->nic_cfg; + struct hinic_tx_rate_cfg rate_cfg = {0}; + u32 pf_bw = 0; + u16 out_size = sizeof(rate_cfg); + int err; + + if (speed_level >= LINK_SPEED_LEVELS) { + nic_err(hwdev->dev_hdl, "Invalid speed level: %d\n", + speed_level); + return -EINVAL; + } + + if (nic_cfg->pf_bw_limit == 100) { + pf_bw = 0; /* unlimit bandwidth */ + } else { + pf_bw = (hw_speed_convert[speed_level] / 100) * + nic_cfg->pf_bw_limit; + /* bandwidth limit is very small but not unlimit in this case */ + if (pf_bw == 0) + pf_bw = 1; + } + + err = hinic_global_func_id_get(hwdev, &rate_cfg.func_id); + if (err) + return err; + + rate_cfg.tx_rate = pf_bw; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_SET_VF_RATE, &rate_cfg, + sizeof(rate_cfg), &rate_cfg, + &out_size, 0); + if (err || !out_size || rate_cfg.status) { + nic_err(hwdev->dev_hdl, "Failed to set rate(%d), err: %d, status: 0x%x, out size: 0x%x\n", + pf_bw, err, rate_cfg.status, out_size); + if (rate_cfg.status) + return rate_cfg.status; + + return -EIO; + } + + return 0; +} + +int hinic_update_pf_bw(void *hwdev) +{ + struct hinic_hwdev *dev = hwdev; + struct nic_port_info port_info = {0}; + int err; + + if (hinic_func_type(hwdev) == TYPE_VF || + !(FUNC_SUPPORT_RATE_LIMIT(hwdev))) + return 0; + + err = hinic_get_port_info(hwdev, &port_info); + if (err) { + nic_err(dev->dev_hdl, "Failed to get port info\n"); + return -EIO; + } + + err = __set_pf_bw(hwdev, port_info.speed); + if (err) { + nic_err(dev->dev_hdl, "Failed to set pf bandwidth\n"); + return err; + } + + return 0; +} + +int hinic_set_pf_bw_limit(void *hwdev, u32 bw_limit) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_nic_cfg *nic_cfg; + u32 old_bw_limit; + u8 link_state = 0; + int err; + + if (!hwdev) + return -EINVAL; + + if (hinic_func_type(hwdev) == TYPE_VF) + return 0; + + if (bw_limit > 100) { + nic_err(dev->dev_hdl, "Invalid bandwidth: %d\n", bw_limit); + return -EINVAL; + } + + err = hinic_get_link_state(hwdev, &link_state); + if (err) { + nic_err(dev->dev_hdl, "Failed to get link state\n"); + return -EIO; + } + + if (!link_state) { + nic_err(dev->dev_hdl, "Link status must be up when set pf tx rate\n"); + return -EINVAL; + } + + nic_cfg = &dev->nic_io->nic_cfg; + old_bw_limit = nic_cfg->pf_bw_limit; + nic_cfg->pf_bw_limit = bw_limit; + + err = hinic_update_pf_bw(hwdev); + if (err) { + nic_cfg->pf_bw_limit = old_bw_limit; + return err; + } + + return 0; +} + +/* Set link status follow port status */ +int hinic_set_link_status_follow(void *hwdev, + enum hinic_link_follow_status status) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_set_link_follow follow = {0}; + u16 out_size = sizeof(follow); + int err; + + if (!hwdev) + return -EINVAL; + + if (status >= HINIC_LINK_FOLLOW_STATUS_MAX) { + nic_err(dev->dev_hdl, + "Invalid link follow status: %d\n", status); + return -EINVAL; + } + + err = hinic_global_func_id_get(hwdev, &follow.func_id); + if (err) + return err; + + follow.follow_status = status; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_LINK_FOLLOW, + &follow, sizeof(follow), &follow, + &out_size); + if ((follow.status != HINIC_MGMT_CMD_UNSUPPORTED && + follow.status) || err || !out_size) { + nic_err(dev->dev_hdl, + "Failed to set link status follow port status, err: %d, status: 0x%x, out size: 0x%x\n", + err, follow.status, out_size); + return -EFAULT; + } + + return follow.status; +} +EXPORT_SYMBOL(hinic_set_link_status_follow); + +/* HILINK module */ + +#define HINIC_MGMT_DEFAULT_SIZE 1 + +static int __hilink_msg_to_mgmt_sync(void *hwdev, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, + u32 timeout) +{ + int err; + + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_HILINK, cmd, buf_in, + in_size, buf_out, out_size, timeout); + if (err) + return err; + + if (*out_size == HINIC_MGMT_DEFAULT_SIZE && buf_out) + *((u8 *)(buf_out)) = HINIC_MGMT_CMD_UNSUPPORTED; + + return 0; +} + +int hinic_get_hilink_link_info(void *hwdev, struct hinic_link_info *info) +{ + struct hinic_hilink_link_info link_info = {0}; + u16 out_size = sizeof(link_info); + int err; + + link_info.port_id = hinic_physical_port_id(hwdev); + + err = __hilink_msg_to_mgmt_sync(hwdev, HINIC_HILINK_CMD_GET_LINK_INFO, + &link_info, sizeof(link_info), + &link_info, &out_size, 0); + if ((link_info.status != HINIC_MGMT_CMD_UNSUPPORTED && + link_info.status) || err || !out_size) { + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to get hilink info, err: %d, status: 0x%x, out size: 0x%x\n", + err, link_info.status, out_size); + return -EFAULT; + } + + if (!link_info.status) + memcpy(info, &link_info.info, sizeof(*info)); + else if (link_info.status == HINIC_MGMT_CMD_UNSUPPORTED) + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Unsupported command: mod: %d, cmd: %d\n", + HINIC_MOD_HILINK, HINIC_HILINK_CMD_GET_LINK_INFO); + + return link_info.status; +} + +int hinic_set_link_settings(void *hwdev, struct hinic_link_ksettings *settings) +{ + struct hinic_link_ksettings_info info = {0}; + u16 out_size = sizeof(info); + int err; + + err = hinic_global_func_id_get(hwdev, &info.func_id); + if (err) + return err; + + info.valid_bitmap = settings->valid_bitmap; + info.autoneg = settings->autoneg; + info.speed = settings->speed; + info.fec = settings->fec; + + err = __hilink_msg_to_mgmt_sync(hwdev, + HINIC_HILINK_CMD_SET_LINK_SETTINGS, + &info, sizeof(info), + &info, &out_size, 0); + if ((info.status != HINIC_MGMT_CMD_UNSUPPORTED && + info.status) || err || !out_size) { + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to set link settings, err: %d, status: 0x%x, out size: 0x%x\n", + err, info.status, out_size); + return -EFAULT; + } + + return info.status; +} + +int hinic_disable_tx_promisc(void *hwdev) +{ + struct hinic_promsic_info info = {0}; + u16 out_size = sizeof(info); + int err; + + err = hinic_global_func_id_get(hwdev, &info.func_id); + if (err) + return err; + + info.cfg = HINIC_TX_PROMISC_DISABLE; + err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, + HINIC_PORT_CMD_DISABLE_PROMISC, &info, + sizeof(info), &info, &out_size, 0); + if (err || !out_size || info.status) { + if (info.status == HINIC_MGMT_CMD_UNSUPPORTED) { + nic_info(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Unsupported to disable TX promisc\n"); + return 0; + } + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to disable multihost promisc, err: %d, status: 0x%x, out size: 0x%x\n", + err, info.status, out_size); + return -EFAULT; + } + return 0; +} + +static bool hinic_if_sfp_absent(void *hwdev) +{ + struct card_node *chip_node = ((struct hinic_hwdev *)hwdev)->chip_node; + struct hinic_port_routine_cmd *rt_cmd; + struct hinic_cmd_get_light_module_abs sfp_abs = {0}; + u8 port_id = hinic_physical_port_id(hwdev); + u16 out_size = sizeof(sfp_abs); + int err; + bool sfp_abs_valid; + bool sfp_abs_status; + + rt_cmd = &chip_node->rt_cmd[port_id]; + mutex_lock(&chip_node->sfp_mutex); + sfp_abs_valid = rt_cmd->up_send_sfp_abs; + sfp_abs_status = (bool)rt_cmd->abs.abs_status; + if (sfp_abs_valid) { + mutex_unlock(&chip_node->sfp_mutex); + return sfp_abs_status; + } + mutex_unlock(&chip_node->sfp_mutex); + + sfp_abs.port_id = port_id; + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_SFP_ABS, + &sfp_abs, sizeof(sfp_abs), &sfp_abs, + &out_size); + if (sfp_abs.status || err || !out_size) { + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to get port%d sfp absent status, err: %d, status: 0x%x, out size: 0x%x\n", + port_id, err, sfp_abs.status, out_size); + return true; + } + + return ((sfp_abs.abs_status == 0) ? false : true); +} + +int hinic_get_sfp_eeprom(void *hwdev, u8 *data, u16 *len) +{ + struct hinic_cmd_get_std_sfp_info sfp_info = {0}; + u8 port_id; + u16 out_size = sizeof(sfp_info); + int err; + + if (!hwdev || !data || !len) + return -EINVAL; + + port_id = hinic_physical_port_id(hwdev); + if (port_id >= HINIC_MAX_PORT_ID) + return -EINVAL; + + if (hinic_if_sfp_absent(hwdev)) + return -ENXIO; + + sfp_info.port_id = port_id; + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_STD_SFP_INFO, + &sfp_info, sizeof(sfp_info), &sfp_info, + &out_size); + if (sfp_info.status || err || !out_size) { + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to get port%d sfp eeprom information, err: %d, status: 0x%x, out size: 0x%x\n", + port_id, err, sfp_info.status, out_size); + return -EIO; + } + + *len = min_t(u16, sfp_info.eeprom_len, STD_SFP_INFO_MAX_SIZE); + memcpy(data, sfp_info.sfp_info, STD_SFP_INFO_MAX_SIZE); + + return 0; +} + +int hinic_get_sfp_type(void *hwdev, u8 *data0, u8 *data1) +{ + struct card_node *chip_node = NULL; + struct hinic_port_routine_cmd *rt_cmd; + u8 sfp_data[STD_SFP_INFO_MAX_SIZE]; + u16 len; + u8 port_id; + int err; + + if (!hwdev || !data0 || !data1) + return -EINVAL; + + port_id = hinic_physical_port_id(hwdev); + if (port_id >= HINIC_MAX_PORT_ID) + return -EINVAL; + + if (hinic_if_sfp_absent(hwdev)) + return -ENXIO; + + chip_node = ((struct hinic_hwdev *)hwdev)->chip_node; + rt_cmd = &chip_node->rt_cmd[port_id]; + mutex_lock(&chip_node->sfp_mutex); + if (rt_cmd->up_send_sfp_info) { + *data0 = rt_cmd->sfp_info.sfp_qsfp_info[0]; + *data1 = rt_cmd->sfp_info.sfp_qsfp_info[1]; + mutex_unlock(&chip_node->sfp_mutex); + return 0; + } + mutex_unlock(&chip_node->sfp_mutex); + + err = hinic_get_sfp_eeprom(hwdev, sfp_data, &len); + if (err) + return err; + + *data0 = sfp_data[0]; + *data1 = sfp_data[1]; + + return 0; +} + diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nic_cfg.h b/drivers/net/ethernet/huawei/hinic/hinic_nic_cfg.h new file mode 100644 index 0000000000000000000000000000000000000000..08bb4404e7cfa3c2dfddd0a8207cbabb9b7975ed --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_nic_cfg.h @@ -0,0 +1,638 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_CFG_H +#define HINIC_CFG_H + +#define OS_VF_ID_TO_HW(os_vf_id) ((os_vf_id) + 1) +#define HW_VF_ID_TO_OS(hw_vf_id) ((hw_vf_id) - 1) + +#define FW_SUPPORT_MAC_REUSE 0x1 +#define FW_SUPPORT_MAC_REUSE_FUNC(hwdev) \ + ((hwdev)->fw_support_func_flag & FW_SUPPORT_MAC_REUSE) + +#define HINIC_VLAN_PRIORITY_SHIFT 13 + +#define HINIC_RSS_INDIR_SIZE 256 +#define HINIC_DCB_TC_MAX 0x8 +#define HINIC_DCB_UP_MAX 0x8 +#define HINIC_DCB_COS_MAX 0x8 +#define HINIC_DCB_PG_MAX 0x8 + +#define HINIC_DCB_TSA_TC_SP 2 +#define HINIC_DCB_TSA_TC_DWRR 0 + +#define HINIC_RSS_KEY_SIZE 40 + +#define HINIC_MAX_NUM_RQ 128 + +#define HINIC_MIN_MTU_SIZE 256 +#define HINIC_MAX_JUMBO_FRAME_SIZE 9600 + +#define HINIC_LRO_MAX_WQE_NUM_UPPER 32 +#define HINIC_LRO_MAX_WQE_NUM_LOWER 1 +#define HINIC_LRO_MAX_WQE_NUM_DEFAULT_ARM 4 +#define HINIC_LRO_MAX_WQE_NUM_DEFAULT_X86 8 +#define HINIC_LRO_MAX_WQE_NUM_DEFAULT 8 +#define HINIC_LRO_WQE_NUM_PANGEA_DEFAULT 32 + +#define HINIC_LRO_RX_TIMER_UPPER 1024 +#define HINIC_LRO_RX_TIMER_LOWER 1 +#define HINIC_LRO_RX_TIMER_DEFAULT 16 +#define HINIC_LRO_RX_TIMER_DEFAULT_25GE 16 +#define HINIC_LRO_RX_TIMER_DEFAULT_100GE 64 +#define HINIC_LRO_RX_TIMER_DEFAULT_PG_10GE 10 +#define HINIC_LRO_RX_TIMER_DEFAULT_PG_100GE 8 + +#if defined(__aarch64__) +#define HINIC_LOWEST_LATENCY 1 +#define HINIC_RX_RATE_LOW 400000 +#define HINIC_RX_COAL_TIME_LOW 20 +#define HINIC_RX_PENDING_LIMIT_LOW 2 +#define HINIC_RX_RATE_HIGH 1000000 +#define HINIC_RX_COAL_TIME_HIGH 225 +#define HINIC_RX_PENDING_LIMIT_HIGH 50 +#define HINIC_RX_RATE_THRESH 35000 +#define HINIC_TX_RATE_THRESH 35000 +#define HINIC_RX_RATE_LOW_VM 400000 +#define HINIC_RX_PENDING_LIMIT_HIGH_VM 50 +#else +#define HINIC_LOWEST_LATENCY 1 +#define HINIC_RX_RATE_LOW 400000 +#define HINIC_RX_COAL_TIME_LOW 16 +#define HINIC_RX_PENDING_LIMIT_LOW 2 +#define HINIC_RX_RATE_HIGH 1000000 +#define HINIC_RX_COAL_TIME_HIGH 225 +#define HINIC_RX_PENDING_LIMIT_HIGH 8 +#define HINIC_RX_RATE_THRESH 50000 +#define HINIC_TX_RATE_THRESH 50000 +#define HINIC_RX_RATE_LOW_VM 100000 +#define HINIC_RX_PENDING_LIMIT_HIGH_VM 87 +#endif + +enum hinic_board_type { + HINIC_BOARD_UNKNOWN = 0, + HINIC_BOARD_10GE = 1, + HINIC_BOARD_25GE = 2, + HINIC_BOARD_40GE = 3, + HINIC_BOARD_100GE = 4, + HINIC_BOARD_PG_TP_10GE = 5, + HINIC_BOARD_PG_SM_25GE = 6, + HINIC_BOARD_PG_100GE = 7, +}; + +enum hinic_os_type { + HINIC_OS_UNKNOWN = 0, + HINIC_OS_HUAWEI = 1, + HINIC_OS_NON_HUAWEI = 2, +}; + +enum hinic_cpu_type { + HINIC_CPU_UNKNOWN = 0, + HINIC_CPU_X86_GENERIC = 1, + HINIC_CPU_ARM_GENERIC = 2, +}; + +struct hinic_adaptive_rx_cfg { + u32 lowest_lat; + u32 rate_low; + u32 coal_time_low; + u32 pending_limit_low; + u32 rate_high; + u32 coal_time_high; + u32 pending_limit_high; + u32 rate_thresh; +}; + +struct hinic_lro_cfg { + u32 enable; + u32 timer; + u32 buffer_size; +}; + +struct hinic_environment_info { + enum hinic_board_type board; + enum hinic_os_type os; + enum hinic_cpu_type cpu; +}; + +struct hinic_adaptive_cfg { + struct hinic_adaptive_rx_cfg adaptive_rx; + struct hinic_lro_cfg lro; +}; + +enum hinic_rss_hash_type { + HINIC_RSS_HASH_ENGINE_TYPE_XOR = 0, + HINIC_RSS_HASH_ENGINE_TYPE_TOEP, + + HINIC_RSS_HASH_ENGINE_TYPE_MAX, +}; + +struct ifla_vf_info; +struct hinic_dcb_state; + +struct nic_port_info { + u8 port_type; + u8 autoneg_cap; + u8 autoneg_state; + u8 duplex; + u8 speed; +}; + +enum nic_media_type { + MEDIA_UNKNOWN = -1, + MEDIA_FIBRE = 0, + MEDIA_COPPER, + MEDIA_BACKPLANE +}; + +enum nic_speed_level { + LINK_SPEED_10MB = 0, + LINK_SPEED_100MB, + LINK_SPEED_1GB, + LINK_SPEED_10GB, + LINK_SPEED_25GB, + LINK_SPEED_40GB, + LINK_SPEED_100GB, + LINK_SPEED_LEVELS, +}; + +enum hinic_link_mode { + HINIC_10GE_BASE_KR = 0, + HINIC_40GE_BASE_KR4 = 1, + HINIC_40GE_BASE_CR4 = 2, + HINIC_100GE_BASE_KR4 = 3, + HINIC_100GE_BASE_CR4 = 4, + HINIC_25GE_BASE_KR_S = 5, + HINIC_25GE_BASE_CR_S = 6, + HINIC_25GE_BASE_KR = 7, + HINIC_25GE_BASE_CR = 8, + HINIC_GE_BASE_KX = 9, + HINIC_LINK_MODE_NUMBERS, + + HINIC_SUPPORTED_UNKNOWN = 0xFFFF, +}; + +enum hinic_port_type { + HINIC_PORT_TP, /* BASET */ + HINIC_PORT_AUI, + HINIC_PORT_MII, + HINIC_PORT_FIBRE, /* OPTICAL */ + HINIC_PORT_BNC, + HINIC_PORT_ELEC, + HINIC_PORT_COPPER, /* PORT_DA */ + HINIC_PORT_AOC, + HINIC_PORT_BACKPLANE, + HINIC_PORT_NONE = 0xEF, + HINIC_PORT_OTHER = 0xFF, +}; + +enum hinic_link_status { + HINIC_LINK_DOWN = 0, + HINIC_LINK_UP +}; + +struct nic_pause_config { + u32 auto_neg; + u32 rx_pause; + u32 tx_pause; +}; + +struct nic_lro_info { + u16 func_id; + u8 lro_ipv4_en; + u8 lro_ipv6_en; + u8 lro_max_wqe_num; + u8 lro_timer_en; + u32 lro_period; +}; + +struct nic_rss_type { + u8 tcp_ipv6_ext; + u8 ipv6_ext; + u8 tcp_ipv6; + u8 ipv6; + u8 tcp_ipv4; + u8 ipv4; + u8 udp_ipv6; + u8 udp_ipv4; +}; + +struct hinic_vport_stats { + u64 tx_unicast_pkts_vport; + u64 tx_unicast_bytes_vport; + u64 tx_multicast_pkts_vport; + u64 tx_multicast_bytes_vport; + u64 tx_broadcast_pkts_vport; + u64 tx_broadcast_bytes_vport; + + u64 rx_unicast_pkts_vport; + u64 rx_unicast_bytes_vport; + u64 rx_multicast_pkts_vport; + u64 rx_multicast_bytes_vport; + u64 rx_broadcast_pkts_vport; + u64 rx_broadcast_bytes_vport; + + u64 tx_discard_vport; + u64 rx_discard_vport; + u64 tx_err_vport; + u64 rx_err_vport; +}; + +struct hinic_phy_port_stats { + u64 mac_rx_total_pkt_num; + u64 mac_rx_total_oct_num; + u64 mac_rx_bad_pkt_num; + u64 mac_rx_bad_oct_num; + u64 mac_rx_good_pkt_num; + u64 mac_rx_good_oct_num; + u64 mac_rx_uni_pkt_num; + u64 mac_rx_multi_pkt_num; + u64 mac_rx_broad_pkt_num; + + u64 mac_tx_total_pkt_num; + u64 mac_tx_total_oct_num; + u64 mac_tx_bad_pkt_num; + u64 mac_tx_bad_oct_num; + u64 mac_tx_good_pkt_num; + u64 mac_tx_good_oct_num; + u64 mac_tx_uni_pkt_num; + u64 mac_tx_multi_pkt_num; + u64 mac_tx_broad_pkt_num; + + u64 mac_rx_fragment_pkt_num; + u64 mac_rx_undersize_pkt_num; + u64 mac_rx_undermin_pkt_num; + u64 mac_rx_64_oct_pkt_num; + u64 mac_rx_65_127_oct_pkt_num; + u64 mac_rx_128_255_oct_pkt_num; + u64 mac_rx_256_511_oct_pkt_num; + u64 mac_rx_512_1023_oct_pkt_num; + u64 mac_rx_1024_1518_oct_pkt_num; + u64 mac_rx_1519_2047_oct_pkt_num; + u64 mac_rx_2048_4095_oct_pkt_num; + u64 mac_rx_4096_8191_oct_pkt_num; + u64 mac_rx_8192_9216_oct_pkt_num; + u64 mac_rx_9217_12287_oct_pkt_num; + u64 mac_rx_12288_16383_oct_pkt_num; + u64 mac_rx_1519_max_bad_pkt_num; + u64 mac_rx_1519_max_good_pkt_num; + u64 mac_rx_oversize_pkt_num; + u64 mac_rx_jabber_pkt_num; + + u64 mac_rx_pause_num; + u64 mac_rx_pfc_pkt_num; + u64 mac_rx_pfc_pri0_pkt_num; + u64 mac_rx_pfc_pri1_pkt_num; + u64 mac_rx_pfc_pri2_pkt_num; + u64 mac_rx_pfc_pri3_pkt_num; + u64 mac_rx_pfc_pri4_pkt_num; + u64 mac_rx_pfc_pri5_pkt_num; + u64 mac_rx_pfc_pri6_pkt_num; + u64 mac_rx_pfc_pri7_pkt_num; + u64 mac_rx_control_pkt_num; + u64 mac_rx_y1731_pkt_num; + u64 mac_rx_sym_err_pkt_num; + u64 mac_rx_fcs_err_pkt_num; + u64 mac_rx_send_app_good_pkt_num; + u64 mac_rx_send_app_bad_pkt_num; + + u64 mac_tx_fragment_pkt_num; + u64 mac_tx_undersize_pkt_num; + u64 mac_tx_undermin_pkt_num; + u64 mac_tx_64_oct_pkt_num; + u64 mac_tx_65_127_oct_pkt_num; + u64 mac_tx_128_255_oct_pkt_num; + u64 mac_tx_256_511_oct_pkt_num; + u64 mac_tx_512_1023_oct_pkt_num; + u64 mac_tx_1024_1518_oct_pkt_num; + u64 mac_tx_1519_2047_oct_pkt_num; + u64 mac_tx_2048_4095_oct_pkt_num; + u64 mac_tx_4096_8191_oct_pkt_num; + u64 mac_tx_8192_9216_oct_pkt_num; + u64 mac_tx_9217_12287_oct_pkt_num; + u64 mac_tx_12288_16383_oct_pkt_num; + u64 mac_tx_1519_max_bad_pkt_num; + u64 mac_tx_1519_max_good_pkt_num; + u64 mac_tx_oversize_pkt_num; + u64 mac_tx_jabber_pkt_num; + + u64 mac_tx_pause_num; + u64 mac_tx_pfc_pkt_num; + u64 mac_tx_pfc_pri0_pkt_num; + u64 mac_tx_pfc_pri1_pkt_num; + u64 mac_tx_pfc_pri2_pkt_num; + u64 mac_tx_pfc_pri3_pkt_num; + u64 mac_tx_pfc_pri4_pkt_num; + u64 mac_tx_pfc_pri5_pkt_num; + u64 mac_tx_pfc_pri6_pkt_num; + u64 mac_tx_pfc_pri7_pkt_num; + u64 mac_tx_control_pkt_num; + u64 mac_tx_y1731_pkt_num; + u64 mac_tx_1588_pkt_num; + u64 mac_tx_err_all_pkt_num; + u64 mac_tx_from_app_good_pkt_num; + u64 mac_tx_from_app_bad_pkt_num; + + u64 mac_rx_higig2_ext_pkt_num; + u64 mac_rx_higig2_message_pkt_num; + u64 mac_rx_higig2_error_pkt_num; + u64 mac_rx_higig2_cpu_ctrl_pkt_num; + u64 mac_rx_higig2_unicast_pkt_num; + u64 mac_rx_higig2_broadcast_pkt_num; + u64 mac_rx_higig2_l2_multicast_pkt_num; + u64 mac_rx_higig2_l3_multicast_pkt_num; + + u64 mac_tx_higig2_message_pkt_num; + u64 mac_tx_higig2_ext_pkt_num; + u64 mac_tx_higig2_cpu_ctrl_pkt_num; + u64 mac_tx_higig2_unicast_pkt_num; + u64 mac_tx_higig2_broadcast_pkt_num; + u64 mac_tx_higig2_l2_multicast_pkt_num; + u64 mac_tx_higig2_l3_multicast_pkt_num; +}; + +enum hinic_rq_filter_type { + HINIC_RQ_FILTER_TYPE_NONE = 0x0, + HINIC_RQ_FILTER_TYPE_MAC_ONLY = (1 << 0), + HINIC_RQ_FILTER_TYPE_VLAN_ONLY = (1 << 1), + HINIC_RQ_FILTER_TYPE_VLANMAC = (1 << 2), + HINIC_RQ_FILTER_TYPE_VXLAN = (1 << 3), + HINIC_RQ_FILTER_TYPE_GENEVE = (1 << 4), +}; + +struct hinic_rq_filter_info { + u16 qid; + u8 filter_type;/* 1: mac, 8: vxlan */ + u8 qflag;/*0:stdq, 1:defq, 2: netq*/ + + u8 mac[ETH_ALEN]; + struct { + u8 inner_mac[ETH_ALEN]; + u32 vni; + } vxlan; +}; + +#define HINIC_MGMT_VERSION_MAX_LEN 32 + +#define HINIC_FW_VERSION_NAME 16 +#define HINIC_FW_VERSION_SECTION_CNT 4 +#define HINIC_FW_VERSION_SECTION_BORDER 0xFF +struct hinic_fw_version { + u8 mgmt_ver[HINIC_FW_VERSION_NAME]; + u8 microcode_ver[HINIC_FW_VERSION_NAME]; + u8 boot_ver[HINIC_FW_VERSION_NAME]; +}; + +enum hinic_valid_link_settings { + HILINK_LINK_SET_SPEED = 0x1, + HILINK_LINK_SET_AUTONEG = 0x2, + HILINK_LINK_SET_FEC = 0x4, +}; + +struct hinic_link_ksettings { + u32 valid_bitmap; + u32 speed; /* enum nic_speed_level */ + u8 autoneg; /* 0 - off; 1 - on */ + u8 fec; /* 0 - RSFEC; 1 - BASEFEC; 2 - NOFEC */ +}; + +enum hinic_link_follow_status { + HINIC_LINK_FOLLOW_DEFAULT, + HINIC_LINK_FOLLOW_PORT, + HINIC_LINK_FOLLOW_SEPARATE, + HINIC_LINK_FOLLOW_STATUS_MAX, +}; + +enum hinic_lro_en_status { + HINIC_LRO_STATUS_DISABLE, + HINIC_LRO_STATUS_ENABLE, + HINIC_LRO_STATUS_UNSET, +}; + +#define HINIC_VLAN_FILTER_EN (1U << 0) +#define HINIC_BROADCAST_FILTER_EX_EN (1U << 1) + +#define HINIC_RX_CSUM_OFFLOAD_EN 0xFFF + +/* Set mac_vlan table */ +int hinic_set_mac(void *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id); + +int hinic_del_mac(void *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id); + +int hinic_update_mac(void *hwdev, u8 *old_mac, u8 *new_mac, + u16 vlan_id, u16 func_id); +int hinic_update_mac_vlan(void *hwdev, u16 old_vlan, u16 new_vlan, int vf_id); +/* Obtaining the permanent mac */ +int hinic_get_default_mac(void *hwdev, u8 *mac_addr); +/* Check whether the current solution is using this interface, + * the current code does not invoke the sdk interface to set mtu + */ +int hinic_set_port_mtu(void *hwdev, u32 new_mtu); +/* Set vlan leaf table */ +int hinic_add_vlan(void *hwdev, u16 vlan_id, u16 func_id); + +int hinic_set_vlan_fliter(void *hwdev, u32 vlan_filter_ctrl); + +int hinic_del_vlan(void *hwdev, u16 vlan_id, u16 func_id); + +int hinic_get_port_info(void *hwdev, struct nic_port_info *port_info); + +int hinic_set_autoneg(void *hwdev, bool enable); + +int hinic_force_port_relink(void *hwdev); + +int hinic_get_link_mode(void *hwdev, enum hinic_link_mode *supported, + enum hinic_link_mode *advertised); + +int hinic_set_port_link_status(void *hwdev, bool enable); + +int hinic_set_speed(void *hwdev, enum nic_speed_level speed); +/* SPEED_UNKNOWN = -1,SPEED_10MB_LINK = 0 */ +int hinic_get_speed(void *hwdev, enum nic_speed_level *speed); + +int hinic_get_link_state(void *hwdev, u8 *link_state); + +int hinic_set_pause_info(void *hwdev, struct nic_pause_config nic_pause); + +int hinic_get_hw_pause_info(void *hwdev, struct nic_pause_config *nic_pause); + +int hinic_get_pause_info(void *hwdev, struct nic_pause_config *nic_pause); + +int hinic_set_rx_mode(void *hwdev, u32 enable); + +/* offload feature */ +int hinic_set_rx_vlan_offload(void *hwdev, u8 en); + +int hinic_set_rx_csum_offload(void *hwdev, u32 en); + +int hinic_set_tx_tso(void *hwdev, u8 tso_en); + +/* Linux NIC used */ +int hinic_set_rx_lro_state(void *hwdev, u8 lro_en, u32 lro_timer, u32 wqe_num); + +/* Win NIC used */ +int hinic_set_rx_lro(void *hwdev, u8 ipv4_en, u8 ipv6_en, u8 max_wqe_num); + +/* Related command dcbtool */ +int hinic_dcb_set_pfc(void *hwdev, u8 pfc_en, u8 pfc_bitmap); + +int hinic_dcb_get_pfc(void *hwdev, u8 *pfc_en_bitmap); + +int hinic_dcb_set_ets(void *hwdev, u8 *up_tc, u8 *pg_bw, u8 *pgid, + u8 *up_bw, u8 *prio); + +int hinic_dcb_get_ets(void *hwdev, u8 *up_tc, u8 *pg_bw, u8 *pgid, + u8 *up_bw, u8 *prio); + +int hinic_dcb_set_cos_up_map(void *hwdev, u8 cos_valid_bitmap, u8 *cos_up); + +int hinic_dcb_set_rq_iq_mapping(void *hwdev, u32 num_rqs, u8 *map); + +/* nictool adaptation interface*/ +int hinic_set_lro_aging_timer(void *hwdev, u8 timer_en, u32 period); +/* There should be output parameters, add the + * output parameter struct nic_up_offload *cfg + */ +int hinic_get_rx_lro(void *hwdev, struct nic_lro_info *lro_info); + +int hinic_get_jumbo_frame_size(void *hwdev, u32 *jumbo_size); + +int hinic_set_jumbo_frame_size(void *hwdev, u32 jumbo_size); + +int hinic_set_loopback_mode(void *hwdev, bool enable); +int hinic_set_loopback_mode_ex(void *hwdev, u32 mode, u32 enable); +int hinic_get_loopback_mode_ex(void *hwdev, u32 *mode, u32 *enable); + +int hinic_get_port_enable_state(void *hwdev, bool *enable); + +int hinic_get_vport_enable_state(void *hwdev, bool *enable); + +int hinic_set_lli_state(void *hwdev, u8 lli_state); + +int hinic_set_vport_enable(void *hwdev, bool enable); + +int hinic_set_port_enable(void *hwdev, bool enable); + +/* rss */ +int hinic_set_rss_type(void *hwdev, u32 tmpl_idx, struct nic_rss_type rss_type); + +int hinic_get_rss_type(void *hwdev, u32 tmpl_idx, + struct nic_rss_type *rss_type); + +int hinic_rss_set_template_tbl(void *hwdev, u32 tmpl_idx, const u8 *temp); + +int hinic_rss_get_template_tbl(void *hwdev, u32 tmpl_idx, u8 *temp); + +int hinic_rss_get_hash_engine(void *hwdev, u8 tmpl_idx, u8 *type); + +int hinic_rss_set_hash_engine(void *hwdev, u8 tmpl_idx, u8 type); + +int hinic_rss_get_indir_tbl(void *hwdev, u32 tmpl_idx, u32 *indir_table); + +int hinic_rss_set_indir_tbl(void *hwdev, u32 tmpl_idx, const u32 *indir_table); + +int hinic_rss_cfg(void *hwdev, u8 rss_en, u8 tmpl_idx, u8 tc_num, u8 *prio_tc); + +int hinic_rss_template_alloc(void *hwdev, u8 *tmpl_idx); + +int hinic_rss_template_free(void *hwdev, u8 tmpl_idx); + +/* disable or enable traffic of all functions in the same port */ +int hinic_set_port_funcs_state(void *hwdev, bool enable); + +int hinic_reset_port_link_cfg(void *hwdev); + +int hinic_get_vport_stats(void *hwdev, struct hinic_vport_stats *stats); + +int hinic_get_phy_port_stats(void *hwdev, struct hinic_phy_port_stats *stats); + +int hinic_get_mgmt_version(void *hwdev, u8 *mgmt_ver); + +int hinic_get_fw_version(void *hwdev, struct hinic_fw_version *fw_ver); + +int hinic_save_vf_mac(void *hwdev, u16 vf_id, u8 *mac); + +int hinic_add_vf_vlan(void *hwdev, int vf_id, u16 vlan, u8 qos); + +int hinic_kill_vf_vlan(void *hwdev, int vf_id); + +int hinic_set_vf_mac(void *hwdev, int vf_id, unsigned char *mac_addr); + +u16 hinic_vf_info_vlanprio(void *hwdev, int vf_id); + +bool hinic_vf_is_registered(void *hwdev, u16 vf_id); + +void hinic_get_vf_config(void *hwdev, u16 vf_id, struct ifla_vf_info *ivi); + +void hinic_notify_all_vfs_link_changed(void *hwdev, u8 link); + +void hinic_save_pf_link_status(void *hwdev, u8 link); + +int hinic_set_vf_link_state(void *hwdev, u16 vf_id, int link); + +int hinic_set_vf_spoofchk(void *hwdev, u16 vf_id, bool spoofchk); + +bool hinic_vf_info_spoofchk(void *hwdev, int vf_id); + +int hinic_set_vf_trust(void *hwdev, u16 vf_id, bool trust); +bool hinic_vf_info_trust(void *hwdev, int vf_id); + +int hinic_set_vf_tx_rate(void *hwdev, u16 vf_id, u32 max_rate, u32 min_rate); + +int hinic_init_vf_hw(void *hwdev, u16 start_vf_id, u16 end_vf_id); + +int hinic_deinit_vf_hw(void *hwdev, u16 start_vf_id, u16 end_vf_id); + +int hinic_set_dcb_state(void *hwdev, struct hinic_dcb_state *dcb_state); + +int hinic_get_dcb_state(void *hwdev, struct hinic_dcb_state *dcb_state); + +int hinic_get_pf_dcb_state(void *hwdev, struct hinic_dcb_state *dcb_state); + +int hinic_set_ipsu_mac(void *hwdev, u16 index, u8 *mac_addr, u16 vlan_id, + u16 func_id); +int hinic_get_ipsu_mac(void *hwdev, u16 index, u8 *mac_addr, u16 *vlan_id, + u16 *func_id); +int hinic_set_anti_attack(void *hwdev, bool enable); + +int hinic_flush_sq_res(void *hwdev); + +int hinic_set_super_cqe_state(void *hwdev, bool enable); + +int hinic_set_func_capture_en(void *hwdev, u16 func_id, bool cap_en); + +int hinic_force_drop_tx_pkt(void *hwdev); + +int hinic_update_pf_bw(void *hwdev); + +int hinic_set_pf_bw_limit(void *hwdev, u32 bw_limit); + +int hinic_set_link_status_follow(void *hwdev, + enum hinic_link_follow_status status); +int hinic_disable_tx_promisc(void *hwdev); + +/* HILINK module */ +int hinic_set_link_settings(void *hwdev, struct hinic_link_ksettings *settings); + +int hinic_enable_netq(void *hwdev, u8 en); +int hinic_add_hw_rqfilter(void *hwdev, + struct hinic_rq_filter_info *filter_info); +int hinic_del_hw_rqfilter(void *hwdev, + struct hinic_rq_filter_info *filter_info); +int hinic_get_sfp_eeprom(void *hwdev, u8 *data, u16 *len); +int hinic_get_sfp_type(void *hwdev, u8 *data0, u8 *data1); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nic_dbg.c b/drivers/net/ethernet/huawei/hinic/hinic_nic_dbg.c new file mode 100644 index 0000000000000000000000000000000000000000..7407241216f6f256ba3dbad4e17dcce8682a29f2 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_nic_dbg.c @@ -0,0 +1,294 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hw.h" +#include "hinic_hwdev.h" +#include "hinic_hwif.h" +#include "hinic_wq.h" +#include "hinic_nic_cfg.h" +#include "hinic_mgmt_interface.h" +#include "hinic_nic_io.h" +#include "hinic_nic.h" +#include "hinic_dbg.h" + +#define INVALID_PI 0xFFFF + +u16 hinic_dbg_get_qp_num(void *hwdev) +{ + struct hinic_nic_io *nic_io; + + if (!hwdev) + return 0; + + nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + if (!nic_io) + return 0; + + return nic_io->num_qps; +} + +void *hinic_dbg_get_qp_handle(void *hwdev, u16 q_id) +{ + struct hinic_nic_io *nic_io; + + if (!hwdev) + return NULL; + + nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + if (!nic_io) + return NULL; + + if (q_id >= nic_io->num_qps) + return NULL; + + return &nic_io->qps[q_id]; +} + +void *hinic_dbg_get_sq_wq_handle(void *hwdev, u16 q_id) +{ + struct hinic_qp *qp = hinic_dbg_get_qp_handle(hwdev, q_id); + + if (!qp) + return NULL; + + return qp->sq.wq; +} + +void *hinic_dbg_get_rq_wq_handle(void *hwdev, u16 q_id) +{ + struct hinic_qp *qp = hinic_dbg_get_qp_handle(hwdev, q_id); + + if (!qp) + return NULL; + + return qp->rq.wq; +} + +u16 hinic_dbg_get_sq_pi(void *hwdev, u16 q_id) +{ + struct hinic_wq *wq = hinic_dbg_get_sq_wq_handle(hwdev, q_id); + + if (!wq) + return 0; + + return ((u16)wq->prod_idx) & wq->mask; +} + +u16 hinic_dbg_get_rq_hw_pi(void *hwdev, u16 q_id) +{ + struct hinic_qp *qp = hinic_dbg_get_qp_handle(hwdev, q_id); + + if (qp) + return cpu_to_be16(*qp->rq.pi_virt_addr); + + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, "Get rq hw pi failed\n"); + + return INVALID_PI; +} + +u16 hinic_dbg_get_rq_sw_pi(void *hwdev, u16 q_id) +{ + struct hinic_wq *wq = hinic_dbg_get_rq_wq_handle(hwdev, q_id); + + if (!wq) + return 0; + + return ((u16)wq->prod_idx) & wq->mask; +} + +void *hinic_dbg_get_sq_ci_addr(void *hwdev, u16 q_id) +{ + struct hinic_qp *qp = hinic_dbg_get_qp_handle(hwdev, q_id); + + if (!qp) + return NULL; + + return qp->sq.cons_idx_addr; +} + +u64 hinic_dbg_get_sq_cla_addr(void *hwdev, u16 q_id) +{ + struct hinic_qp *qp = hinic_dbg_get_qp_handle(hwdev, q_id); + + if (!qp) + return 0; + + return qp->sq.wq->block_paddr; +} + +u64 hinic_dbg_get_rq_cla_addr(void *hwdev, u16 q_id) +{ + struct hinic_qp *qp = hinic_dbg_get_qp_handle(hwdev, q_id); + + if (!qp) + return 0; + + return qp->rq.wq->block_paddr; +} + +int hinic_dbg_get_sq_db_addr(void *hwdev, u16 q_id, u64 **map_addr, + u64 *phy_addr, u32 *pg_idx) +{ + struct hinic_qp *qp; + struct hinic_hwif *hwif; + + qp = hinic_dbg_get_qp_handle(hwdev, q_id); + if (!qp) + return -EFAULT; + + hwif = ((struct hinic_hwdev *)hwdev)->hwif; + + *map_addr = (u64 *)qp->sq.db_addr; + *pg_idx = DB_IDX(qp->sq.db_addr, hwif->db_base); + *phy_addr = hwif->db_base_phy + (*pg_idx) * HINIC_DB_PAGE_SIZE; + + return 0; +} + +u16 hinic_dbg_get_global_qpn(const void *hwdev) +{ + if (!hwdev) + return 0; + + return ((struct hinic_hwdev *)hwdev)->nic_io->global_qpn; +} + +static int get_wqe_info(struct hinic_wq *wq, u16 idx, u16 wqebb_cnt, + u8 *wqe, u16 *wqe_size) +{ + void *src_wqe; + u32 offset; + u16 i; + + if (idx + wqebb_cnt > wq->q_depth) + return -EFAULT; + + if (*wqe_size != (u16)(wq->wqebb_size * wqebb_cnt)) { + pr_err("Unexpect out buf size from user: %d, expect: %d\n", + *wqe_size, (u16)(wq->wqebb_size * wqebb_cnt)); + return -EFAULT; + } + + for (i = 0; i < wqebb_cnt; i++) { + src_wqe = (void *)hinic_slq_get_addr(wq, idx + i); + offset = i * wq->wqebb_size; + memcpy(wqe + offset, src_wqe, wq->wqebb_size); + } + + return 0; +} + +int hinic_dbg_get_sq_wqe_info(void *hwdev, u16 q_id, u16 idx, u16 wqebb_cnt, + u8 *wqe, u16 *wqe_size) +{ + struct hinic_wq *wq; + int err; + + wq = hinic_dbg_get_sq_wq_handle(hwdev, q_id); + if (!wq) + return -EFAULT; + + err = get_wqe_info(wq, idx, wqebb_cnt, wqe, wqe_size); + + return err; +} + +int hinic_dbg_get_rq_wqe_info(void *hwdev, u16 q_id, u16 idx, u16 wqebb_cnt, + u8 *wqe, u16 *wqe_size) +{ + struct hinic_wq *wq; + int err; + + wq = hinic_dbg_get_rq_wq_handle(hwdev, q_id); + if (!wq) + return -EFAULT; + + err = get_wqe_info(wq, idx, wqebb_cnt, wqe, wqe_size); + + return err; +} + +int hinic_dbg_get_hw_stats(const void *hwdev, u8 *hw_stats, u16 *out_size) +{ + if (!hw_stats || *out_size != sizeof(struct hinic_hw_stats)) { + pr_err("Unexpect out buf size from user: %d, expect: %lu\n", + *out_size, sizeof(struct hinic_hw_stats)); + return -EFAULT; + } + + memcpy(hw_stats, &((struct hinic_hwdev *)hwdev)->hw_stats, + sizeof(struct hinic_hw_stats)); + return 0; +} + +u16 hinic_dbg_clear_hw_stats(void *hwdev, u32 *out_size) +{ + if (*out_size != sizeof(struct hinic_hw_stats)) { + pr_err("Unexpect out buf size from user :%d, expect: %lu\n", + *out_size, sizeof(struct hinic_hw_stats)); + return -EFAULT; + } + + memset((void *)&((struct hinic_hwdev *)hwdev)->hw_stats, 0, + sizeof(struct hinic_hw_stats)); + memset((void *)((struct hinic_hwdev *)hwdev)->chip_fault_stats, 0, + HINIC_CHIP_FAULT_SIZE); + + return 0; +} + +void hinic_get_chip_fault_stats(const void *hwdev, + u8 *chip_fault_stats, int offset) +{ + if (offset < 0 || offset > HINIC_CHIP_FAULT_SIZE) { + pr_err("Invalid chip offset value: %d\n", offset); + return; + } + + if (offset + MAX_DRV_BUF_SIZE <= HINIC_CHIP_FAULT_SIZE) + memcpy(chip_fault_stats, + ((struct hinic_hwdev *)hwdev)->chip_fault_stats + offset, + MAX_DRV_BUF_SIZE); + else + memcpy(chip_fault_stats, + ((struct hinic_hwdev *)hwdev)->chip_fault_stats + offset, + HINIC_CHIP_FAULT_SIZE - offset); +} + +int hinic_dbg_get_pf_bw_limit(void *hwdev, u32 *pf_bw_limit) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_nic_cfg *nic_cfg; + + if (!hwdev) + return -EINVAL; + + if (!dev->nic_io) + return -EINVAL; + + nic_cfg = &dev->nic_io->nic_cfg; + + *pf_bw_limit = nic_cfg->pf_bw_limit; + + return 0; +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_nic_dev.h new file mode 100644 index 0000000000000000000000000000000000000000..3f3f5558086c3aeb767b28d16abfa4f520d33198 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_nic_dev.h @@ -0,0 +1,294 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_NIC_DEV_H +#define HINIC_NIC_DEV_H + +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_nic_io.h" +#include "hinic_nic_cfg.h" +#include "hinic_tx.h" +#include "hinic_rx.h" + +#define HINIC_DRV_NAME "hinic" +#define HINIC_CHIP_NAME "hinic" + +#define HINIC_DRV_VERSION "2.3.2.18" +struct vf_data_storage; + +#define HINIC_FUNC_IS_VF(hwdev) (hinic_func_type(hwdev) == TYPE_VF) + +enum hinic_flags { + HINIC_INTF_UP, + HINIC_MAC_FILTER_CHANGED, + HINIC_LP_TEST, + HINIC_RSS_ENABLE, + HINIC_DCB_ENABLE, + HINIC_SAME_RXTX, + HINIC_INTR_ADAPT, + HINIC_UPDATE_MAC_FILTER, + HINIC_ETS_ENABLE, +}; + +#define RX_BUFF_NUM_PER_PAGE 2 +#define HINIC_MAX_MAC_NUM 3 +#define LP_PKT_CNT 64 + +struct hinic_mac_addr { + u8 addr[ETH_ALEN]; + u16 state; +}; + +enum hinic_rx_mode_state { + HINIC_HW_PROMISC_ON, + HINIC_HW_ALLMULTI_ON, + HINIC_PROMISC_FORCE_ON, + HINIC_ALLMULTI_FORCE_ON, +}; + +enum mac_filter_state { + HINIC_MAC_WAIT_HW_SYNC, + HINIC_MAC_HW_SYNCED, + HINIC_MAC_WAIT_HW_UNSYNC, + HINIC_MAC_HW_UNSYNCED, +}; + +struct hinic_mac_filter { + struct list_head list; + u8 addr[ETH_ALEN]; + unsigned long state; +}; + +/* TC bandwidth allocation per direction */ +struct hinic_tc_attr { + u8 pg_id; /* Priority Group(PG) ID */ + u8 bw_pct; /* % of PG's bandwidth */ + u8 up_map; /* User Priority to Traffic Class mapping */ + u8 prio_type; +}; + +/* User priority configuration */ +struct hinic_tc_cfg { + struct hinic_tc_attr path[2]; /* One each for Tx/Rx */ + + bool pfc_en; +}; + +struct hinic_dcb_config { + u8 pg_tcs; + u8 pfc_tcs; + + bool pfc_state; + + struct hinic_tc_cfg tc_cfg[HINIC_DCB_TC_MAX]; + u8 bw_pct[2][HINIC_DCB_PG_MAX]; /* One each for Tx/Rx */ +}; + +enum hinic_intr_flags { + HINIC_INTR_ON, + HINIC_RESEND_ON, +}; + +struct hinic_irq { + struct net_device *netdev; + /* IRQ corresponding index number */ + u16 msix_entry_idx; + u32 irq_id; /* The IRQ number from OS */ + char irq_name[IFNAMSIZ + 16]; + struct napi_struct napi; + cpumask_t affinity_mask; + struct hinic_txq *txq; + struct hinic_rxq *rxq; + unsigned long intr_flag; +}; + +struct hinic_intr_coal_info { + u8 pending_limt; + u8 coalesce_timer_cfg; + u8 resend_timer_cfg; + + u64 pkt_rate_low; + u8 rx_usecs_low; + u8 rx_pending_limt_low; + u64 pkt_rate_high; + u8 rx_usecs_high; + u8 rx_pending_limt_high; + + u8 user_set_intr_coal_flag; +}; + +#define HINIC_NIC_STATS_INC(nic_dev, field) \ +{ \ + u64_stats_update_begin(&(nic_dev)->stats.syncp); \ + (nic_dev)->stats.field++; \ + u64_stats_update_end(&(nic_dev)->stats.syncp); \ +} + +struct hinic_nic_stats { + u64 netdev_tx_timeout; + + /* Subdivision statistics show in private tool */ + u64 tx_carrier_off_drop; + u64 tx_invalid_qid; + + struct u64_stats_sync syncp; +}; + +struct hinic_nic_dev { + struct pci_dev *pdev; + struct net_device *netdev; + void *hwdev; + + int poll_weight; + + unsigned long *vlan_bitmap; + + u16 num_qps; + u16 max_qps; + + u32 msg_enable; + unsigned long flags; + + u16 sq_depth; + u16 rq_depth; + + /* mapping from priority */ + u8 sq_cos_mapping[HINIC_DCB_UP_MAX]; + u8 default_cos_id; + struct hinic_txq *txqs; + struct hinic_rxq *rxqs; + + struct nic_service_cap nic_cap; + + struct irq_info *qps_irq_info; + struct hinic_irq *irq_cfg; + struct work_struct rx_mode_work; + struct delayed_work moderation_task; + struct workqueue_struct *workq; + + struct list_head uc_filter_list; + struct list_head mc_filter_list; + unsigned long rx_mod_state; + int netdev_uc_cnt; + int netdev_mc_cnt; + int lb_test_rx_idx; + int lb_pkt_len; + u8 *lb_test_rx_buf; + + u8 rss_tmpl_idx; + u16 num_rss; + u16 rss_limit; + u8 rss_hash_engine; + struct nic_rss_type rss_type; + u8 *rss_hkey_user; + /* hkey in big endian */ + u32 *rss_hkey_user_be; + u32 *rss_indir_user; + + u8 dcbx_cap; + u32 dcb_changes; + u8 max_cos; + u8 up_valid_bitmap; + u8 up_cos[HINIC_DCB_UP_MAX]; + struct ieee_ets hinic_ieee_ets_default; + struct ieee_ets hinic_ieee_ets; + struct ieee_pfc hinic_ieee_pfc; + struct hinic_dcb_config dcb_cfg; + struct hinic_dcb_config tmp_dcb_cfg; + struct hinic_dcb_config save_dcb_cfg; + unsigned long dcb_flags; + int disable_port_cnt; + /* lock for disable or enable traffic flow */ + struct semaphore dcb_sem; + + bool heart_status; + + struct hinic_intr_coal_info *intr_coalesce; + unsigned long last_moder_jiffies; + u32 adaptive_rx_coal; + u8 intr_coal_set_flag; + u32 his_link_speed; + /* interrupt coalesce must be different in virtual machine */ + bool in_vm; + bool is_vm_slave; + int is_bm_slave; + struct hinic_nic_stats stats; + /* lock for nic resource */ + struct mutex nic_mutex; + bool force_port_disable; + struct semaphore port_state_sem; + u8 link_status; + + struct hinic_environment_info env_info; + struct hinic_adaptive_cfg adaptive_cfg; + + /* pangea cpu affinity setting */ + bool force_affinity; + cpumask_t affinity_mask; + + u32 lro_replenish_thld; + u16 rx_buff_len; + u32 page_order; + + struct bpf_prog *xdp_prog; +}; + +extern struct hinic_uld_info nic_uld_info; + +int hinic_open(struct net_device *netdev); +int hinic_close(struct net_device *netdev); +void hinic_set_ethtool_ops(struct net_device *netdev); +void hinicvf_set_ethtool_ops(struct net_device *netdev); +void hinic_update_num_qps(struct net_device *netdev); +int nic_ioctl(void *uld_dev, u32 cmd, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size); + +int hinic_force_port_disable(struct hinic_nic_dev *nic_dev); +int hinic_force_set_port_state(struct hinic_nic_dev *nic_dev, bool enable); +int hinic_maybe_set_port_state(struct hinic_nic_dev *nic_dev, bool enable); +void hinic_link_status_change(struct hinic_nic_dev *nic_dev, bool status); + +int hinic_disable_func_rss(struct hinic_nic_dev *nic_dev); +int hinic_enable_func_rss(struct hinic_nic_dev *nic_dev); + +bool hinic_is_xdp_enable(struct hinic_nic_dev *nic_dev); +int hinic_xdp_max_mtu(struct hinic_nic_dev *nic_dev); + +#define hinic_msg(level, nic_dev, msglvl, format, arg...) \ +do { \ + if ((nic_dev)->netdev && (nic_dev)->netdev->reg_state \ + == NETREG_REGISTERED) \ + nicif_##level((nic_dev), msglvl, (nic_dev)->netdev, \ + format, ## arg); \ + else \ + nic_##level(&(nic_dev)->pdev->dev, \ + format, ## arg); \ +} while (0) + +#define hinic_info(nic_dev, msglvl, format, arg...) \ + hinic_msg(info, nic_dev, msglvl, format, ## arg) + +#define hinic_warn(nic_dev, msglvl, format, arg...) \ + hinic_msg(warn, nic_dev, msglvl, format, ## arg) + +#define hinic_err(nic_dev, msglvl, format, arg...) \ + hinic_msg(err, nic_dev, msglvl, format, ## arg) + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nic_io.c b/drivers/net/ethernet/huawei/hinic/hinic_nic_io.c new file mode 100644 index 0000000000000000000000000000000000000000..cd57651657739a8cad064de58d3c6bbd49a5c239 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_nic_io.c @@ -0,0 +1,1049 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_wq.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hw.h" +#include "hinic_hwdev.h" +#include "hinic_nic_cfg.h" +#include "hinic_mgmt_interface.h" +#include "hinic_nic_io.h" +#include "hinic_nic.h" +#include "hinic_ctx_def.h" +#include "hinic_wq.h" +#include "hinic_cmdq.h" + +#define HINIC_DEAULT_TX_CI_PENDING_LIMIT 0 +#define HINIC_DEAULT_TX_CI_COALESCING_TIME 0 + +static unsigned char tx_pending_limit = HINIC_DEAULT_TX_CI_PENDING_LIMIT; +module_param(tx_pending_limit, byte, 0444); +MODULE_PARM_DESC(tx_pending_limit, "TX CI coalescing parameter pending_limit (default=0)"); + +static unsigned char tx_coalescing_time = HINIC_DEAULT_TX_CI_COALESCING_TIME; +module_param(tx_coalescing_time, byte, 0444); +MODULE_PARM_DESC(tx_coalescing_time, "TX CI coalescing parameter coalescing_time (default=0)"); + +#define WQ_PREFETCH_MAX 4 +#define WQ_PREFETCH_MIN 1 +#define WQ_PREFETCH_THRESHOLD 256 + +struct hinic_qp_ctxt_header { + u16 num_queues; + u16 queue_type; + u32 addr_offset; +}; + +struct hinic_sq_ctxt { + u32 ceq_attr; + + u32 ci_owner; + + u32 wq_pfn_hi; + u32 wq_pfn_lo; + + u32 pref_cache; + u32 pref_owner; + u32 pref_wq_pfn_hi_ci; + u32 pref_wq_pfn_lo; + + u32 rsvd8; + u32 rsvd9; + + u32 wq_block_pfn_hi; + u32 wq_block_pfn_lo; +}; + +struct hinic_rq_ctxt { + u32 ceq_attr; + + u32 pi_intr_attr; + + u32 wq_pfn_hi_ci; + u32 wq_pfn_lo; + + u32 pref_cache; + u32 pref_owner; + + u32 pref_wq_pfn_hi_ci; + u32 pref_wq_pfn_lo; + + u32 pi_paddr_hi; + u32 pi_paddr_lo; + + u32 wq_block_pfn_hi; + u32 wq_block_pfn_lo; +}; + +struct hinic_sq_ctxt_block { + struct hinic_qp_ctxt_header cmdq_hdr; + struct hinic_sq_ctxt sq_ctxt[HINIC_Q_CTXT_MAX]; +}; + +struct hinic_rq_ctxt_block { + struct hinic_qp_ctxt_header cmdq_hdr; + struct hinic_rq_ctxt rq_ctxt[HINIC_Q_CTXT_MAX]; +}; + +struct hinic_sq_db { + u32 db_info; +}; + +struct hinic_addr { + u32 addr_hi; + u32 addr_lo; +}; + +struct hinic_clean_queue_ctxt { + struct hinic_qp_ctxt_header cmdq_hdr; + u32 ctxt_size; + struct hinic_addr cqe_dma_addr[HINIC_RQ_CQ_MAX]; +}; + +static int init_sq(struct hinic_sq *sq, struct hinic_wq *wq, u16 q_id, + u16 sq_msix_idx, void *cons_idx_addr, void __iomem *db_addr) +{ + sq->wq = wq; + sq->q_id = q_id; + sq->owner = 1; + sq->msix_entry_idx = sq_msix_idx; + + sq->cons_idx_addr = cons_idx_addr; + sq->db_addr = db_addr; + + return 0; +} + +static int init_rq(struct hinic_rq *rq, void *dev_hdl, struct hinic_wq *wq, + u16 q_id, u16 rq_msix_idx) +{ + rq->wq = wq; + rq->q_id = q_id; + rq->cqe_dma_addr = 0; + + rq->msix_entry_idx = rq_msix_idx; + + rq->pi_virt_addr = dma_zalloc_coherent(dev_hdl, PAGE_SIZE, + &rq->pi_dma_addr, GFP_KERNEL); + if (!rq->pi_virt_addr) { + nic_err(dev_hdl, "Failed to allocate rq pi virtual addr\n"); + return -ENOMEM; + } + + return 0; +} + +void hinic_rq_cqe_addr_set(void *hwdev, u16 qid, dma_addr_t cqe_dma_ddr) +{ + struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev; + struct hinic_nic_io *nic_io; + + nic_io = dev->nic_io; + nic_io->qps[qid].rq.cqe_dma_addr = cqe_dma_ddr; +} + +static void clean_rq(struct hinic_rq *rq, void *dev_hdl) +{ + dma_free_coherent(dev_hdl, PAGE_SIZE, rq->pi_virt_addr, + rq->pi_dma_addr); +} + +static int create_qp(struct hinic_nic_io *nic_io, struct hinic_qp *qp, + u16 q_id, u16 qp_msix_idx, int max_sq_sge) +{ + struct hinic_sq *sq = &qp->sq; + struct hinic_rq *rq = &qp->rq; + void __iomem *db_addr; + int err; + + err = hinic_wq_allocate(&nic_io->wqs, &nic_io->sq_wq[q_id], + HINIC_SQ_WQEBB_SIZE, + nic_io->hwdev->wq_page_size, nic_io->sq_depth, + MAX_WQE_SIZE(max_sq_sge, HINIC_SQ_WQEBB_SIZE)); + if (err) { + nic_err(nic_io->hwdev->dev_hdl, "Failed to allocate WQ for SQ\n"); + return err; + } + + err = hinic_wq_allocate(&nic_io->wqs, &nic_io->rq_wq[q_id], + HINIC_RQ_WQE_SIZE, nic_io->hwdev->wq_page_size, + nic_io->rq_depth, HINIC_RQ_WQE_SIZE); + if (err) { + nic_err(nic_io->hwdev->dev_hdl, "Failed to allocate WQ for RQ\n"); + goto rq_alloc_err; + } + + /* we don't use direct wqe for sq */ + err = hinic_alloc_db_addr(nic_io->hwdev, &db_addr, NULL); + if (err) { + nic_err(nic_io->hwdev->dev_hdl, "Failed to alloc sq doorbell addr\n"); + goto alloc_db_err; + } + + err = init_sq(sq, &nic_io->sq_wq[q_id], q_id, qp_msix_idx, + HINIC_CI_VADDR(nic_io->ci_vaddr_base, q_id), db_addr); + if (err != 0) { + nic_err(nic_io->hwdev->dev_hdl, "Failed to init sq\n"); + goto sq_init_err; + } + + err = init_rq(rq, nic_io->hwdev->dev_hdl, &nic_io->rq_wq[q_id], + q_id, qp_msix_idx); + if (err) { + nic_err(nic_io->hwdev->dev_hdl, "Failed to init rq\n"); + goto rq_init_err; + } + + return 0; + +rq_init_err: +sq_init_err: + hinic_free_db_addr(nic_io->hwdev, db_addr, NULL); + +alloc_db_err: + hinic_wq_free(&nic_io->wqs, &nic_io->rq_wq[q_id]); + +rq_alloc_err: + hinic_wq_free(&nic_io->wqs, &nic_io->sq_wq[q_id]); + + return err; +} + +static void destroy_qp(struct hinic_nic_io *nic_io, struct hinic_qp *qp) +{ + clean_rq(&qp->rq, nic_io->hwdev->dev_hdl); + + hinic_free_db_addr(nic_io->hwdev, qp->sq.db_addr, NULL); + + hinic_wq_free(&nic_io->wqs, qp->sq.wq); + hinic_wq_free(&nic_io->wqs, qp->rq.wq); +} + +/* alloc qps and init qps ctxt */ +int hinic_create_qps(void *dev, u16 num_qp, u16 sq_depth, u16 rq_depth, + struct irq_info *qps_msix_arry, int max_sq_sge) +{ + struct hinic_hwdev *hwdev = dev; + struct hinic_nic_io *nic_io; + u16 q_id, i, max_qps; + int err; + + if (!hwdev || !qps_msix_arry) + return -EFAULT; + + max_qps = hinic_func_max_qnum(hwdev); + if (num_qp > max_qps) { + nic_err(hwdev->dev_hdl, "Create number of qps: %d > max number of qps: %d\n", + num_qp, max_qps); + return -EINVAL; + } + + nic_io = hwdev->nic_io; + + nic_io->max_qps = max_qps; + nic_io->num_qps = num_qp; + nic_io->sq_depth = sq_depth; + nic_io->rq_depth = rq_depth; + + err = hinic_wqs_alloc(&nic_io->wqs, 2 * num_qp, hwdev->dev_hdl); + if (err) { + nic_err(hwdev->dev_hdl, "Failed to allocate WQS for IO\n"); + return err; + } + + nic_io->qps = kcalloc(num_qp, sizeof(*nic_io->qps), GFP_KERNEL); + if (!nic_io->qps) { + err = -ENOMEM; + goto alloc_qps_err; + } + + nic_io->ci_vaddr_base = + dma_zalloc_coherent(hwdev->dev_hdl, + CI_TABLE_SIZE(num_qp, PAGE_SIZE), + &nic_io->ci_dma_base, GFP_KERNEL); + if (!nic_io->ci_vaddr_base) { + err = -ENOMEM; + goto ci_base_err; + } + + nic_io->sq_wq = kcalloc(num_qp, sizeof(*nic_io->sq_wq), GFP_KERNEL); + if (!nic_io->sq_wq) { + err = -ENOMEM; + goto sq_wq_err; + } + + nic_io->rq_wq = kcalloc(num_qp, sizeof(*nic_io->rq_wq), GFP_KERNEL); + if (!nic_io->rq_wq) { + err = -ENOMEM; + goto rq_wq_err; + } + + for (q_id = 0; q_id < num_qp; q_id++) { + err = create_qp(nic_io, &nic_io->qps[q_id], q_id, + qps_msix_arry[q_id].msix_entry_idx, max_sq_sge); + if (err) { + nic_err(hwdev->dev_hdl, + "Failed to allocate qp %d, err: %d\n", + q_id, err); + goto create_qp_err; + } + } + + return 0; + +create_qp_err: + for (i = 0; i < q_id; i++) + destroy_qp(nic_io, &nic_io->qps[i]); + + kfree(nic_io->rq_wq); + +rq_wq_err: + kfree(nic_io->sq_wq); + +sq_wq_err: + dma_free_coherent(hwdev->dev_hdl, CI_TABLE_SIZE(num_qp, PAGE_SIZE), + nic_io->ci_vaddr_base, nic_io->ci_dma_base); + +ci_base_err: + kfree(nic_io->qps); + +alloc_qps_err: + hinic_wqs_free(&nic_io->wqs); + + return err; +} +EXPORT_SYMBOL(hinic_create_qps); + +void hinic_free_qps(void *dev) +{ + struct hinic_hwdev *hwdev = dev; + struct hinic_nic_io *nic_io; + u16 i; + + if (!hwdev) + return; + + nic_io = hwdev->nic_io; + + for (i = 0; i < nic_io->num_qps; i++) + destroy_qp(nic_io, &nic_io->qps[i]); + + kfree(nic_io->rq_wq); + kfree(nic_io->sq_wq); + + dma_free_coherent(hwdev->dev_hdl, + CI_TABLE_SIZE(nic_io->num_qps, PAGE_SIZE), + nic_io->ci_vaddr_base, nic_io->ci_dma_base); + + kfree(nic_io->qps); + + hinic_wqs_free(&nic_io->wqs); +} +EXPORT_SYMBOL(hinic_free_qps); + +void hinic_qp_prepare_cmdq_header(struct hinic_qp_ctxt_header *qp_ctxt_hdr, + enum hinic_qp_ctxt_type ctxt_type, + u16 num_queues, u16 max_queues, u16 q_id) +{ + qp_ctxt_hdr->queue_type = ctxt_type; + qp_ctxt_hdr->num_queues = num_queues; + + if (ctxt_type == HINIC_QP_CTXT_TYPE_SQ) + qp_ctxt_hdr->addr_offset = + SQ_CTXT_OFFSET(max_queues, max_queues, q_id); + else + qp_ctxt_hdr->addr_offset = + RQ_CTXT_OFFSET(max_queues, max_queues, q_id); + + qp_ctxt_hdr->addr_offset = SIZE_16BYTES(qp_ctxt_hdr->addr_offset); + + hinic_cpu_to_be32(qp_ctxt_hdr, sizeof(*qp_ctxt_hdr)); +} + +void hinic_sq_prepare_ctxt(struct hinic_sq *sq, u16 global_qpn, + struct hinic_sq_ctxt *sq_ctxt) +{ + struct hinic_wq *wq = sq->wq; + u64 wq_page_addr; + u64 wq_page_pfn, wq_block_pfn; + u32 wq_page_pfn_hi, wq_page_pfn_lo; + u32 wq_block_pfn_hi, wq_block_pfn_lo; + u16 pi_start, ci_start; + + ci_start = (u16)wq->cons_idx; + pi_start = (u16)wq->prod_idx; + + /* read the first page from the HW table */ + wq_page_addr = be64_to_cpu(*wq->block_vaddr); + + wq_page_pfn = WQ_PAGE_PFN(wq_page_addr); + wq_page_pfn_hi = upper_32_bits(wq_page_pfn); + wq_page_pfn_lo = lower_32_bits(wq_page_pfn); + + /* If only one page, use 0-level CLA */ + if (wq->num_q_pages == 1) + wq_block_pfn = WQ_BLOCK_PFN(wq_page_addr); + else + wq_block_pfn = WQ_BLOCK_PFN(wq->block_paddr); + + wq_block_pfn_hi = upper_32_bits(wq_block_pfn); + wq_block_pfn_lo = lower_32_bits(wq_block_pfn); + + sq_ctxt->ceq_attr = SQ_CTXT_CEQ_ATTR_SET(global_qpn, GLOBAL_SQ_ID) | + SQ_CTXT_CEQ_ATTR_SET(0, EN); + + sq_ctxt->ci_owner = SQ_CTXT_CI_SET(ci_start, IDX) | + SQ_CTXT_CI_SET(1, OWNER); + + sq_ctxt->wq_pfn_hi = + SQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) | + SQ_CTXT_WQ_PAGE_SET(pi_start, PI); + + sq_ctxt->wq_pfn_lo = wq_page_pfn_lo; + + sq_ctxt->pref_cache = + SQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) | + SQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) | + SQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD); + + sq_ctxt->pref_owner = 1; + + sq_ctxt->pref_wq_pfn_hi_ci = + SQ_CTXT_PREF_SET(ci_start, CI) | + SQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI); + + sq_ctxt->pref_wq_pfn_lo = wq_page_pfn_lo; + + sq_ctxt->wq_block_pfn_hi = + SQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI); + + sq_ctxt->wq_block_pfn_lo = wq_block_pfn_lo; + + hinic_cpu_to_be32(sq_ctxt, sizeof(*sq_ctxt)); +} + +void hinic_rq_prepare_ctxt(struct hinic_rq *rq, struct hinic_rq_ctxt *rq_ctxt) +{ + struct hinic_wq *wq = rq->wq; + u64 wq_page_addr; + u64 wq_page_pfn, wq_block_pfn; + u32 wq_page_pfn_hi, wq_page_pfn_lo; + u32 wq_block_pfn_hi, wq_block_pfn_lo; + u16 pi_start, ci_start; + + ci_start = (u16)wq->cons_idx; + pi_start = (u16)wq->prod_idx; + pi_start = pi_start & wq->mask; + + /* read the first page from the HW table */ + wq_page_addr = be64_to_cpu(*wq->block_vaddr); + + wq_page_pfn = WQ_PAGE_PFN(wq_page_addr); + wq_page_pfn_hi = upper_32_bits(wq_page_pfn); + wq_page_pfn_lo = lower_32_bits(wq_page_pfn); + + if (wq->num_q_pages == 1) + wq_block_pfn = WQ_BLOCK_PFN(wq_page_addr); + else + wq_block_pfn = WQ_BLOCK_PFN(wq->block_paddr); + + wq_block_pfn_hi = upper_32_bits(wq_block_pfn); + wq_block_pfn_lo = lower_32_bits(wq_block_pfn); + + rq_ctxt->ceq_attr = RQ_CTXT_CEQ_ATTR_SET(0, EN) | + RQ_CTXT_CEQ_ATTR_SET(1, OWNER); + + rq_ctxt->pi_intr_attr = RQ_CTXT_PI_SET(pi_start, IDX) | + RQ_CTXT_PI_SET(rq->msix_entry_idx, INTR); + + rq_ctxt->wq_pfn_hi_ci = RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) | + RQ_CTXT_WQ_PAGE_SET(ci_start, CI); + + rq_ctxt->wq_pfn_lo = wq_page_pfn_lo; + + rq_ctxt->pref_cache = + RQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) | + RQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) | + RQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD); + + rq_ctxt->pref_owner = 1; + + rq_ctxt->pref_wq_pfn_hi_ci = + RQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI) | + RQ_CTXT_PREF_SET(ci_start, CI); + + rq_ctxt->pref_wq_pfn_lo = wq_page_pfn_lo; + + rq_ctxt->pi_paddr_hi = upper_32_bits(rq->pi_dma_addr); + rq_ctxt->pi_paddr_lo = lower_32_bits(rq->pi_dma_addr); + + rq_ctxt->wq_block_pfn_hi = + RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI); + + rq_ctxt->wq_block_pfn_lo = wq_block_pfn_lo; + + hinic_cpu_to_be32(rq_ctxt, sizeof(*rq_ctxt)); +} + +static int init_sq_ctxts(struct hinic_nic_io *nic_io) +{ + struct hinic_hwdev *hwdev = nic_io->hwdev; + struct hinic_sq_ctxt_block *sq_ctxt_block; + struct hinic_sq_ctxt *sq_ctxt; + struct hinic_cmd_buf *cmd_buf; + struct hinic_qp *qp; + u64 out_param = 0; + u16 q_id, curr_id, global_qpn, max_ctxts, i; + int err = 0; + + cmd_buf = hinic_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + nic_err(hwdev->dev_hdl, "Failed to allocate cmd buf\n"); + return -ENOMEM; + } + + q_id = 0; + while (q_id < nic_io->num_qps) { + sq_ctxt_block = cmd_buf->buf; + sq_ctxt = sq_ctxt_block->sq_ctxt; + + max_ctxts = (nic_io->num_qps - q_id) > HINIC_Q_CTXT_MAX ? + HINIC_Q_CTXT_MAX : (nic_io->num_qps - q_id); + + hinic_qp_prepare_cmdq_header(&sq_ctxt_block->cmdq_hdr, + HINIC_QP_CTXT_TYPE_SQ, max_ctxts, + nic_io->max_qps, q_id); + + for (i = 0; i < max_ctxts; i++) { + curr_id = q_id + i; + qp = &nic_io->qps[curr_id]; + global_qpn = nic_io->global_qpn + curr_id; + + hinic_sq_prepare_ctxt(&qp->sq, global_qpn, &sq_ctxt[i]); + } + + cmd_buf->size = SQ_CTXT_SIZE(max_ctxts); + + err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ, + HINIC_MOD_L2NIC, + HINIC_UCODE_CMD_MODIFY_QUEUE_CONTEXT, + cmd_buf, &out_param, 0); + if (err || out_param != 0) { + nic_err(hwdev->dev_hdl, "Failed to set SQ ctxts, err: %d, out_param: 0x%llx\n", + err, out_param); + err = -EFAULT; + break; + } + + q_id += max_ctxts; + } + + hinic_free_cmd_buf(hwdev, cmd_buf); + + return err; +} + +static int init_rq_ctxts(struct hinic_nic_io *nic_io) +{ + struct hinic_hwdev *hwdev = nic_io->hwdev; + struct hinic_rq_ctxt_block *rq_ctxt_block; + struct hinic_rq_ctxt *rq_ctxt; + struct hinic_cmd_buf *cmd_buf; + struct hinic_qp *qp; + u64 out_param = 0; + u16 q_id, curr_id, max_ctxts, i; + int err = 0; + + cmd_buf = hinic_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + nic_err(hwdev->dev_hdl, "Failed to allocate cmd buf\n"); + return -ENOMEM; + } + + q_id = 0; + while (q_id < nic_io->num_qps) { + rq_ctxt_block = cmd_buf->buf; + rq_ctxt = rq_ctxt_block->rq_ctxt; + + max_ctxts = (nic_io->num_qps - q_id) > HINIC_Q_CTXT_MAX ? + HINIC_Q_CTXT_MAX : (nic_io->num_qps - q_id); + + hinic_qp_prepare_cmdq_header(&rq_ctxt_block->cmdq_hdr, + HINIC_QP_CTXT_TYPE_RQ, max_ctxts, + nic_io->max_qps, q_id); + + for (i = 0; i < max_ctxts; i++) { + curr_id = q_id + i; + qp = &nic_io->qps[curr_id]; + + hinic_rq_prepare_ctxt(&qp->rq, &rq_ctxt[i]); + } + + cmd_buf->size = RQ_CTXT_SIZE(max_ctxts); + + err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ, + HINIC_MOD_L2NIC, + HINIC_UCODE_CMD_MODIFY_QUEUE_CONTEXT, + cmd_buf, &out_param, 0); + + if (err || out_param != 0) { + nic_err(hwdev->dev_hdl, "Failed to set RQ ctxts, err: %d, out_param: 0x%llx\n", + err, out_param); + err = -EFAULT; + break; + } + + q_id += max_ctxts; + } + + hinic_free_cmd_buf(hwdev, cmd_buf); + + return err; +} + +static int init_qp_ctxts(struct hinic_nic_io *nic_io) +{ + int err; + + err = init_sq_ctxts(nic_io); + if (err) + return err; + + err = init_rq_ctxts(nic_io); + if (err) + return err; + + return 0; +} + +static int clean_queue_offload_ctxt(struct hinic_nic_io *nic_io, + enum hinic_qp_ctxt_type ctxt_type) +{ + struct hinic_hwdev *hwdev = nic_io->hwdev; + struct hinic_clean_queue_ctxt *ctxt_block; + struct hinic_cmd_buf *cmd_buf; + dma_addr_t cqe_dma_addr; + struct hinic_addr *addr; + u64 out_param = 0; + int i, err; + + cmd_buf = hinic_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + nic_err(hwdev->dev_hdl, "Failed to allocate cmd buf\n"); + return -ENOMEM; + } + + ctxt_block = cmd_buf->buf; + ctxt_block->cmdq_hdr.num_queues = nic_io->max_qps; + ctxt_block->cmdq_hdr.queue_type = ctxt_type; + ctxt_block->cmdq_hdr.addr_offset = 0; + + /* TSO/LRO ctxt size: 0x0:0B; 0x1:160B; 0x2:200B; 0x3:240B */ + ctxt_block->ctxt_size = 0x3; + if ((hinic_func_type(hwdev) == TYPE_VF) && + ctxt_type == HINIC_QP_CTXT_TYPE_RQ) { + addr = ctxt_block->cqe_dma_addr; + for (i = 0; i < nic_io->max_qps; i++) { + cqe_dma_addr = nic_io->qps[i].rq.cqe_dma_addr; + addr[i].addr_hi = upper_32_bits(cqe_dma_addr); + addr[i].addr_lo = lower_32_bits(cqe_dma_addr); + } + } + + hinic_cpu_to_be32(ctxt_block, sizeof(*ctxt_block)); + + cmd_buf->size = sizeof(*ctxt_block); + + err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ, + HINIC_MOD_L2NIC, + HINIC_UCODE_CMD_CLEAN_QUEUE_CONTEXT, + cmd_buf, &out_param, 0); + + if ((err) || (out_param)) { + nic_err(hwdev->dev_hdl, "Failed to clean queue offload ctxts, err: %d, out_param: 0x%llx\n", + err, out_param); + err = -EFAULT; + } + + hinic_free_cmd_buf(hwdev, cmd_buf); + + return err; +} + +static int clean_qp_offload_ctxt(struct hinic_nic_io *nic_io) +{ + /* clean LRO/TSO context space */ + return (clean_queue_offload_ctxt(nic_io, HINIC_QP_CTXT_TYPE_SQ) || + clean_queue_offload_ctxt(nic_io, HINIC_QP_CTXT_TYPE_RQ)); +} + +/* init qps ctxt and set sq ci attr and arm all sq */ +int hinic_init_qp_ctxts(void *dev) +{ + struct hinic_hwdev *hwdev = dev; + struct hinic_nic_io *nic_io; + struct hinic_sq_attr sq_attr; + u16 q_id; + int err; + + if (!hwdev) + return -EINVAL; + + nic_io = hwdev->nic_io; + + err = init_qp_ctxts(nic_io); + if (err) { + nic_err(hwdev->dev_hdl, "Failed to init QP ctxts\n"); + return err; + } + + /* clean LRO/TSO context space */ + err = clean_qp_offload_ctxt(nic_io); + if (err) { + nic_err(hwdev->dev_hdl, "Failed to clean qp offload ctxts\n"); + return err; + } + + err = hinic_set_root_ctxt(hwdev, nic_io->rq_depth, + nic_io->sq_depth, nic_io->rx_buff_len); + if (err) { + nic_err(hwdev->dev_hdl, "Failed to set root context\n"); + return err; + } + + for (q_id = 0; q_id < nic_io->num_qps; q_id++) { + sq_attr.ci_dma_base = + HINIC_CI_PADDR(nic_io->ci_dma_base, q_id) >> 2; + sq_attr.pending_limit = tx_pending_limit; + sq_attr.coalescing_time = tx_coalescing_time; + sq_attr.intr_en = 1; + sq_attr.intr_idx = nic_io->qps[q_id].sq.msix_entry_idx; + sq_attr.l2nic_sqn = q_id; + sq_attr.dma_attr_off = 0; + err = hinic_set_ci_table(hwdev, q_id, &sq_attr); + if (err) { + nic_err(hwdev->dev_hdl, "Failed to set ci table\n"); + goto set_cons_idx_table_err; + } + } + + return 0; + +set_cons_idx_table_err: + hinic_clean_root_ctxt(hwdev); + + return err; +} +EXPORT_SYMBOL(hinic_init_qp_ctxts); + +void hinic_free_qp_ctxts(void *hwdev) +{ + int err; + + if (!hwdev) + return; + + hinic_qps_num_set(hwdev, 0); + + err = hinic_clean_root_ctxt(hwdev); + if (err) + nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Failed to clean root ctxt\n"); +} +EXPORT_SYMBOL(hinic_free_qp_ctxts); + +int hinic_init_nic_hwdev(void *hwdev, u16 rx_buff_len) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_nic_io *nic_io; + u16 global_qpn; + int err; + + if (!hwdev) + return -EINVAL; + + if (is_multi_bm_slave(hwdev) && hinic_support_dynamic_q(hwdev)) { + err = hinic_reinit_cmdq_ctxts(dev); + if (err) { + nic_err(dev->dev_hdl, "Failed to reinit cmdq\n"); + return err; + } + } + + nic_io = dev->nic_io; + + err = hinic_get_base_qpn(hwdev, &global_qpn); + if (err) { + nic_err(dev->dev_hdl, "Failed to get base qpn\n"); + return err; + } + + nic_io->global_qpn = global_qpn; + nic_io->rx_buff_len = rx_buff_len; + err = hinic_init_function_table(hwdev, nic_io->rx_buff_len); + if (err) { + nic_err(dev->dev_hdl, "Failed to init function table\n"); + return err; + } + + err = hinic_enable_fast_recycle(hwdev, false); + if (err) { + nic_err(dev->dev_hdl, "Failed to disable fast recycle\n"); + return err; + } + + /* get default pf bandwidth from firmware witch setted by bios */ + err = hinic_get_bios_pf_bw_limit(hwdev, &nic_io->nic_cfg.pf_bw_limit); + if (err) { + nic_err(dev->dev_hdl, "Failed to get pf bandwidth limit\n"); + return err; + } + + if (dev->func_mode == FUNC_MOD_MULTI_BM_MASTER || + dev->func_mode == FUNC_MOD_MULTI_VM_MASTER) { + if (hinic_func_type(dev) != TYPE_VF) { + err = hinic_disable_tx_promisc(dev); + if (err) { + nic_err(dev->dev_hdl, "Failed to set tx promisc\n"); + return err; + } + } + } + + /* VFs don't set port routine command report */ + if (hinic_func_type(dev) != TYPE_VF) { + /* Get the fw support mac reuse flag */ + err = hinic_get_fw_support_func(hwdev); + if (err) { + nic_err(dev->dev_hdl, "Failed to get function capability\n"); + return err; + } + + /* Inform mgmt to send sfp's information to driver */ + err = hinic_set_port_routine_cmd_report(hwdev, true); + } + + return err; +} +EXPORT_SYMBOL(hinic_init_nic_hwdev); + +void hinic_free_nic_hwdev(void *hwdev) +{ + if (hinic_func_type(hwdev) != TYPE_VF) + hinic_set_port_routine_cmd_report(hwdev, false); +} +EXPORT_SYMBOL(hinic_free_nic_hwdev); + +int hinic_enable_tx_irq(void *hwdev, u16 q_id) +{ + return hinic_set_arm_bit(hwdev, HINIC_SET_ARM_SQ, q_id); +} +EXPORT_SYMBOL(hinic_enable_tx_irq); + +int hinic_rx_tx_flush(void *hwdev) +{ + return hinic_func_rx_tx_flush(hwdev); +} +EXPORT_SYMBOL(hinic_rx_tx_flush); + +int hinic_get_sq_free_wqebbs(void *hwdev, u16 q_id) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + struct hinic_wq *wq = &nic_io->sq_wq[q_id]; + + return atomic_read(&wq->delta) - 1; +} +EXPORT_SYMBOL(hinic_get_sq_free_wqebbs); + +int hinic_get_rq_free_wqebbs(void *hwdev, u16 q_id) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + + return nic_io->rq_wq[q_id].delta.counter - 1; +} +EXPORT_SYMBOL(hinic_get_rq_free_wqebbs); + +u16 hinic_get_sq_local_ci(void *hwdev, u16 q_id) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + + return (u16)(nic_io->sq_wq[q_id].cons_idx & nic_io->sq_wq[q_id].mask); +} +EXPORT_SYMBOL(hinic_get_sq_local_ci); + +u16 hinic_get_sq_hw_ci(void *hwdev, u16 q_id) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + struct hinic_sq *sq = &nic_io->qps[q_id].sq; + + return MASKED_SQ_IDX(sq, be16_to_cpu(*(u16 *)(sq->cons_idx_addr))); +} +EXPORT_SYMBOL(hinic_get_sq_hw_ci); + +void *hinic_get_sq_wqe(void *hwdev, u16 q_id, int wqebb_cnt, u16 *pi, u8 *owner) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + struct hinic_sq *sq = &nic_io->qps[q_id].sq; + void *wqe; + + wqe = hinic_get_wqe(sq->wq, wqebb_cnt, pi); + if (wqe) { + *owner = sq->owner; + if ((*pi + wqebb_cnt) >= nic_io->sq_depth) + sq->owner = !sq->owner; + } + + return wqe; +} +EXPORT_SYMBOL(hinic_get_sq_wqe); + +void hinic_return_sq_wqe(void *hwdev, u16 q_id, int num_wqebbs, u8 owner) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + struct hinic_sq *sq = &nic_io->qps[q_id].sq; + + if (owner != sq->owner) + sq->owner = owner; + + atomic_add(num_wqebbs, &sq->wq->delta); + sq->wq->prod_idx -= num_wqebbs; +} +EXPORT_SYMBOL(hinic_return_sq_wqe); + +void hinic_update_sq_pi(void *hwdev, u16 q_id, int num_wqebbs, u16 *pi, + u8 *owner) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + struct hinic_sq *sq = &nic_io->qps[q_id].sq; + + *pi = MASKED_WQE_IDX(sq->wq, sq->wq->prod_idx); + + atomic_sub(num_wqebbs, &sq->wq->delta); + sq->wq->prod_idx += num_wqebbs; + + *owner = sq->owner; + if ((*pi + num_wqebbs) >= nic_io->sq_depth) + sq->owner = !sq->owner; +} +EXPORT_SYMBOL(hinic_update_sq_pi); + +static void sq_prepare_db(struct hinic_sq *sq, struct hinic_sq_db *db, + u16 prod_idx, int cos) +{ + u32 hi_prod_idx = SQ_DB_PI_HIGH(MASKED_SQ_IDX(sq, prod_idx)); + + db->db_info = SQ_DB_INFO_SET(hi_prod_idx, HI_PI) | + SQ_DB_INFO_SET(SQ_DB, TYPE) | + SQ_DB_INFO_SET(CFLAG_DATA_PATH, CFLAG) | + SQ_DB_INFO_SET(cos, COS) | + SQ_DB_INFO_SET(sq->q_id, QID); +} + +static void sq_write_db(struct hinic_sq *sq, u16 prod_idx, int cos) +{ + struct hinic_sq_db sq_db; + + sq_prepare_db(sq, &sq_db, prod_idx, cos); + + /* Data should be written to HW in Big Endian Format */ + sq_db.db_info = cpu_to_be32(sq_db.db_info); + + wmb(); /* Write all before the doorbell */ + + writel(sq_db.db_info, SQ_DB_ADDR(sq, prod_idx)); +} + +void hinic_send_sq_wqe(void *hwdev, u16 q_id, void *wqe, int wqebb_cnt, int cos) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + struct hinic_sq *sq = &nic_io->qps[q_id].sq; + + if (wqebb_cnt != 1) + hinic_write_wqe(sq->wq, wqe, wqebb_cnt); + + sq_write_db(sq, MASKED_SQ_IDX(sq, sq->wq->prod_idx), cos); +} +EXPORT_SYMBOL(hinic_send_sq_wqe); + +void hinic_update_sq_local_ci(void *hwdev, u16 q_id, int wqebb_cnt) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + struct hinic_sq *sq = &nic_io->qps[q_id].sq; + + sq->wq->cons_idx += wqebb_cnt; + atomic_add(wqebb_cnt, &sq->wq->delta); +} +EXPORT_SYMBOL(hinic_update_sq_local_ci); + +void *hinic_get_rq_wqe(void *hwdev, u16 q_id, u16 *pi) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + struct hinic_rq *rq = &nic_io->qps[q_id].rq; + + return hinic_get_wqe(rq->wq, 1, pi); +} +EXPORT_SYMBOL(hinic_get_rq_wqe); + +void hinic_return_rq_wqe(void *hwdev, u16 q_id, int num_wqebbs) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + struct hinic_rq *rq = &nic_io->qps[q_id].rq; + + atomic_add(num_wqebbs, &rq->wq->delta); + rq->wq->prod_idx -= num_wqebbs; +} +EXPORT_SYMBOL(hinic_return_rq_wqe); + +void hinic_update_rq_delta(void *hwdev, u16 q_id, int num_wqebbs) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + + nic_io->qps[q_id].rq.wq->delta.counter -= num_wqebbs; +} +EXPORT_SYMBOL(hinic_update_rq_delta); + +void hinic_update_rq_hw_pi(void *hwdev, u16 q_id, u16 pi) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + struct hinic_rq *rq = &nic_io->qps[q_id].rq; + + *rq->pi_virt_addr = cpu_to_be16(pi & rq->wq->mask); +} +EXPORT_SYMBOL(hinic_update_rq_hw_pi); + +u16 hinic_get_rq_local_ci(void *hwdev, u16 q_id) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + + return (u16)(nic_io->rq_wq[q_id].cons_idx & nic_io->rq_wq[q_id].mask); +} +EXPORT_SYMBOL(hinic_get_rq_local_ci); + +void hinic_update_rq_local_ci(void *hwdev, u16 q_id, int wqe_cnt) +{ + struct hinic_nic_io *nic_io = ((struct hinic_hwdev *)hwdev)->nic_io; + + nic_io->qps[q_id].rq.wq->cons_idx += wqe_cnt; + nic_io->qps[q_id].rq.wq->delta.counter += wqe_cnt; +} +EXPORT_SYMBOL(hinic_update_rq_local_ci); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nic_io.h b/drivers/net/ethernet/huawei/hinic/hinic_nic_io.h new file mode 100644 index 0000000000000000000000000000000000000000..c38e356bb7cb62cce08b048f06e6562d6301cd78 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_nic_io.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_HW_NIC_IO_H_ +#define HINIC_HW_NIC_IO_H_ + +#include "hinic_hw_mgmt.h" +#include "hinic_qe_def.h" + +#define HINIC_RX_BUF_SHIFT 11 +#define HINIC_RX_BUF_LEN 2048 /* buffer len must be 2^n */ + +#define SQ_CTRL_SET(val, member) ((u32)(val) << SQ_CTRL_##member##_SHIFT) + +int hinic_init_nic_hwdev(void *hwdev, u16 rx_buff_len); +void hinic_free_nic_hwdev(void *hwdev); + +/* alloc qps resource */ +int hinic_create_qps(void *hwdev, u16 qp_num, u16 sq_depth, u16 rq_depth, + struct irq_info *rq_msix_arry, int max_sq_sge); +void hinic_free_qps(void *hwdev); + +/* init qps ctxt and set sq ci attr and arm all sq */ +int hinic_init_qp_ctxts(void *hwdev); +void hinic_free_qp_ctxts(void *hwdev); + +/* function table and root context set */ +int hinic_set_parameters(void *hwdev, u8 *mac, u16 rx_buf_size, u32 mtu); +void hinic_clear_parameters(void *hwdev); + +/* The function is internally invoked. set_arm_bit function */ +int hinic_enable_tx_irq(void *hwdev, u16 q_id); + +int hinic_rx_tx_flush(void *hwdev); + +/* Obtain sq/rq number of idle wqebb */ +int hinic_get_sq_free_wqebbs(void *hwdev, u16 q_id); +int hinic_get_rq_free_wqebbs(void *hwdev, u16 q_id); + +u16 hinic_get_sq_local_ci(void *hwdev, u16 q_id); +u16 hinic_get_sq_hw_ci(void *hwdev, u16 q_id); + +void *hinic_get_sq_wqe(void *hwdev, u16 q_id, + int wqebb_cnt, u16 *pi, u8 *owner); + +void hinic_return_sq_wqe(void *hwdev, u16 q_id, int num_wqebbs, u8 owner); + +void hinic_update_sq_pi(void *hwdev, u16 q_id, int num_wqebbs, + u16 *pi, u8 *owner); + +/* including cross-page process and press the doorbell */ +void hinic_send_sq_wqe(void *hwdev, u16 q_id, void *wqe, + int wqebb_cnt, int cos); + +void hinic_update_sq_local_ci(void *hwdev, u16 q_id, int wqebb_cnt); + +/* Refreshes the rq buff */ +void *hinic_get_rq_wqe(void *hwdev, u16 q_id, u16 *pi); +/* gupdate rq pi, is the latest pi, function does not need to calculate */ +void hinic_return_rq_wqe(void *hwdev, u16 q_id, int num_wqebbs); + +void hinic_update_rq_delta(void *hwdev, u16 q_id, int num_wqebbs); + +void hinic_update_rq_hw_pi(void *hwdev, u16 q_id, u16 pi); + +u16 hinic_get_rq_local_ci(void *hwdev, u16 q_id); + +/* Clear rx done is not performed */ +void hinic_update_rq_local_ci(void *hwdev, u16 q_id, int wqe_cnt); + +struct hinic_sge { + u32 hi_addr; + u32 lo_addr; + u32 len; +}; + +void hinic_cpu_to_be32(void *data, int len); + +void hinic_be32_to_cpu(void *data, int len); + +void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, u32 len); + +dma_addr_t hinic_sge_to_dma(struct hinic_sge *sge); + +void hinic_rq_cqe_addr_set(void *hwdev, u16 qid, dma_addr_t cqe_dma_ddr); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nictool.c b/drivers/net/ethernet/huawei/hinic/hinic_nictool.c new file mode 100644 index 0000000000000000000000000000000000000000..a99436549e98b5c641db5dc0e0ca196682f96d86 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_nictool.c @@ -0,0 +1,2503 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_hw_mgmt.h" +#include "hinic_lld.h" +#include "hinic_nic_dev.h" +#include "hinic_dbg.h" +#include "hinic_nictool.h" +#include "hinic_qp.h" +#include "hinic_dcb.h" +#include "hinic_dbgtool_knl.h" + +#define HIADM_DEV_PATH "/dev/nictool_dev" +#define HIADM_DEV_CLASS "nictool_class" +#define HIADM_DEV_NAME "nictool_dev" + +#define HINIC_CMDQ_BUF_MAX_SIZE 2048U +#define MSG_MAX_IN_SIZE (2048 * 1024) +#define MSG_MAX_OUT_SIZE (2048 * 1024) + +static dev_t g_dev_id = {0}; +/*lint -save -e104 -e808*/ +static struct class *g_nictool_class; +/*lint -restore*/ +static struct cdev g_nictool_cdev; + +static int g_nictool_init_flag; +static int g_nictool_ref_cnt; + +typedef int (*nic_driv_module)(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size); +struct nic_drv_module_handle { + enum driver_cmd_type driv_cmd_name; + nic_driv_module driv_func; +}; + +typedef int (*hw_driv_module)(void *hwdev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size); +struct hw_drv_module_handle { + enum driver_cmd_type driv_cmd_name; + hw_driv_module driv_func; +}; + +static void free_buff_in(void *hwdev, struct msg_module *nt_msg, void *buf_in) +{ + if (!buf_in) + return; + + if (nt_msg->module == SEND_TO_UCODE) + hinic_free_cmd_buf(hwdev, buf_in); + else + kfree(buf_in); +} + +static int alloc_buff_in(void *hwdev, struct msg_module *nt_msg, + u32 in_size, void **buf_in) +{ + void *msg_buf; + + if (!in_size) + return 0; + + if (nt_msg->module == SEND_TO_UCODE) { + struct hinic_cmd_buf *cmd_buf; + + if (in_size > HINIC_CMDQ_BUF_MAX_SIZE) { + pr_err("Cmdq in size(%u) more than 2KB\n", in_size); + return -ENOMEM; + } + + cmd_buf = hinic_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + pr_err("Alloc cmdq cmd buffer failed in %s\n", + __func__); + return -ENOMEM; + } + msg_buf = cmd_buf->buf; + *buf_in = (void *)cmd_buf; + cmd_buf->size = (u16)in_size; + } else { + if (in_size > MSG_MAX_IN_SIZE) { + pr_err("In size(%u) more than 2M\n", in_size); + return -ENOMEM; + } + msg_buf = kzalloc(in_size, GFP_KERNEL); + *buf_in = msg_buf; + } + if (!(*buf_in)) { + pr_err("Alloc buffer in failed\n"); + return -ENOMEM; + } + + if (copy_from_user(msg_buf, nt_msg->in_buff, in_size)) { + pr_err("%s:%d: Copy from user failed\n", + __func__, __LINE__); + free_buff_in(hwdev, nt_msg, *buf_in); + return -EFAULT; + } + + return 0; +} + +static void free_buff_out(void *hwdev, struct msg_module *nt_msg, + void *buf_out) +{ + if (!buf_out) + return; + + if (nt_msg->module == SEND_TO_UCODE && + !nt_msg->ucode_cmd.ucode_db.ucode_imm) + hinic_free_cmd_buf(hwdev, buf_out); + else + kfree(buf_out); +} + +static int alloc_buff_out(void *hwdev, struct msg_module *nt_msg, + u32 out_size, void **buf_out) +{ + if (!out_size) + return 0; + + if (nt_msg->module == SEND_TO_UCODE && + !nt_msg->ucode_cmd.ucode_db.ucode_imm) { + struct hinic_cmd_buf *cmd_buf; + + if (out_size > HINIC_CMDQ_BUF_MAX_SIZE) { + pr_err("Cmdq out size(%u) more than 2KB\n", out_size); + return -ENOMEM; + } + + cmd_buf = hinic_alloc_cmd_buf(hwdev); + *buf_out = (void *)cmd_buf; + } else { + if (out_size > MSG_MAX_OUT_SIZE) { + pr_err("out size(%u) more than 2M\n", out_size); + return -ENOMEM; + } + *buf_out = kzalloc(out_size, GFP_KERNEL); + } + if (!(*buf_out)) { + pr_err("Alloc buffer out failed\n"); + return -ENOMEM; + } + + return 0; +} + +static int copy_buf_out_to_user(struct msg_module *nt_msg, + u32 out_size, void *buf_out) +{ + int ret = 0; + void *msg_out; + + if (nt_msg->module == SEND_TO_UCODE && + !nt_msg->ucode_cmd.ucode_db.ucode_imm) + msg_out = ((struct hinic_cmd_buf *)buf_out)->buf; + else + msg_out = buf_out; + + if (copy_to_user(nt_msg->out_buf, msg_out, out_size)) + ret = -EFAULT; + + return ret; +} + +static int hinic_dbg_get_sq_info(struct hinic_nic_dev *nic_dev, u16 q_id, + struct hinic_dbg_sq_info *sq_info, + u32 *msg_size); +static int hinic_dbg_get_rq_info(struct hinic_nic_dev *nic_dev, u16 q_id, + struct hinic_dbg_rq_info *rq_info, + u32 *msg_size); + +static int get_tx_info(struct hinic_nic_dev *nic_dev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + u16 q_id; + int err; + + if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Netdev is down, can't get tx info\n"); + return -EFAULT; + } + + if (!buf_in || !buf_out || in_size != sizeof(int)) + return -EINVAL; + + q_id = *((u16 *)buf_in); + + err = hinic_dbg_get_sq_info(nic_dev, q_id, buf_out, out_size); + + return err; +} + +static int get_q_num(struct hinic_nic_dev *nic_dev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + u16 num_qp; + + if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Netdev is down, can't get queue number\n"); + return -EFAULT; + } + + if (!buf_out) + return -EFAULT; + + num_qp = hinic_dbg_get_qp_num(nic_dev->hwdev); + if (!num_qp) + return -EFAULT; + + if (*out_size != sizeof(u16)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user: %d, expect: %lu\n", + *out_size, sizeof(u16)); + return -EFAULT; + } + *((u16 *)buf_out) = num_qp; + + return 0; +} + +static int get_tx_wqe_info(struct hinic_nic_dev *nic_dev, + void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + struct hinic_wqe_info *info = buf_in; + u16 q_id = 0; + u16 idx = 0, wqebb_cnt = 1; + int err; + + if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Netdev is down, can't get tx wqe info\n"); + return -EFAULT; + } + + if (!info || !buf_out || in_size != sizeof(*info)) + return -EFAULT; + + q_id = (u16)info->q_id; + idx = (u16)info->wqe_id; + + err = hinic_dbg_get_sq_wqe_info(nic_dev->hwdev, q_id, + idx, wqebb_cnt, + buf_out, (u16 *)out_size); + + return err; +} + +static int get_rx_info(struct hinic_nic_dev *nic_dev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + u16 q_id; + int err; + + if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Netdev is down, can't get rx info\n"); + return -EFAULT; + } + + if (!buf_in || !buf_out || in_size != sizeof(int)) + return -EINVAL; + + q_id = *((u16 *)buf_in); + + err = hinic_dbg_get_rq_info(nic_dev, q_id, buf_out, out_size); + + for (q_id = 0; q_id < nic_dev->num_qps; q_id++) { + nicif_info(nic_dev, drv, nic_dev->netdev, + "qid: %u, coalesc_timer:0x%x, pending_limit: 0x%x\n", + q_id, nic_dev->rxqs[q_id].last_coalesc_timer_cfg, + nic_dev->rxqs[q_id].last_pending_limt); + } + + return err; +} + +static int get_rx_wqe_info(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct hinic_wqe_info *info = buf_in; + u16 q_id = 0; + u16 idx = 0, wqebb_cnt = 1; + int err; + + if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Netdev is down, can't get rx wqe info\n"); + return -EFAULT; + } + + if (!info || !buf_out || in_size != sizeof(*info)) + return -EFAULT; + + q_id = (u16)info->q_id; + idx = (u16)info->wqe_id; + + err = hinic_dbg_get_rq_wqe_info(nic_dev->hwdev, q_id, + idx, wqebb_cnt, + buf_out, (u16 *)out_size); + + return err; +} + +static int get_inter_num(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + u16 intr_num; + + intr_num = hinic_intr_num(nic_dev->hwdev); + + if (*out_size != sizeof(u16)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user: %d, expect: %lu\n", + *out_size, sizeof(u16)); + return -EFAULT; + } + *(u16 *)buf_out = intr_num; + + return 0; +} + +static void clean_nicdev_stats(struct hinic_nic_dev *nic_dev) +{ + u64_stats_update_begin(&nic_dev->stats.syncp); + nic_dev->stats.netdev_tx_timeout = 0; + nic_dev->stats.tx_carrier_off_drop = 0; + nic_dev->stats.tx_invalid_qid = 0; + u64_stats_update_end(&nic_dev->stats.syncp); +} + +static int clear_func_static(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + int i; + + if (*out_size != 0) { + pr_err("Unexpect out buf size from user: %d, expect: 0\n", + *out_size); + return -EINVAL; + } + + clean_nicdev_stats(nic_dev); + for (i = 0; i < nic_dev->max_qps; i++) { + hinic_rxq_clean_stats(&nic_dev->rxqs[i].rxq_stats); + hinic_txq_clean_stats(&nic_dev->txqs[i].txq_stats); + } + + return 0; +} + +static int get_num_cos(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + u8 *num_cos = buf_out; + + if (!buf_out || !out_size) + return -EINVAL; + + if (*out_size != sizeof(*num_cos)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user: %d, expect: %lu\n", + *out_size, sizeof(*num_cos)); + return -EFAULT; + } + + return hinic_get_num_cos(nic_dev, num_cos); +} + +static int get_dcb_cos_up_map(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct hinic_cos_up_map *map = buf_out; + + if (!buf_out || !out_size) + return -EINVAL; + + if (*out_size != sizeof(*map)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user: %d, expect: %lu\n", + *out_size, sizeof(*map)); + return -EFAULT; + } + + return hinic_get_cos_up_map(nic_dev, &map->num_cos, map->cos_up); +} + +static int set_dcb_cos_up_map(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct hinic_cos_up_map *map = buf_in; + + if (!buf_in || !out_size || in_size != sizeof(*map)) + return -EINVAL; + + if (*out_size != sizeof(*map)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user: %d, expect: %lu\n", + *out_size, sizeof(*map)); + return -EINVAL; + } + + return hinic_set_cos_up_map(nic_dev, map->cos_up); +} + +static int get_rx_cqe_info(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct hinic_wqe_info *info = buf_in; + u16 q_id = 0; + u16 idx = 0; + + if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Netdev is down, can't get rx cqe info\n"); + return -EFAULT; + } + + if (!info || !buf_out || in_size != sizeof(*info)) + return -EFAULT; + + if (*out_size != sizeof(struct hinic_rq_cqe)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user: %d, expect: %lu\n", + *out_size, sizeof(struct hinic_rq_cqe)); + return -EFAULT; + } + q_id = (u16)info->q_id; + idx = (u16)info->wqe_id; + + if (q_id >= nic_dev->num_qps || idx >= nic_dev->rxqs[q_id].q_depth) + return -EFAULT; + + memcpy(buf_out, nic_dev->rxqs[q_id].rx_info[idx].cqe, + sizeof(struct hinic_rq_cqe)); + + return 0; +} + +static int hinic_dbg_get_sq_info(struct hinic_nic_dev *nic_dev, u16 q_id, + struct hinic_dbg_sq_info *sq_info, + u32 *msg_size) +{ + int err; + + if (!nic_dev) + return -EINVAL; + + if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Netdev is down, can't get sq info\n"); + return -EFAULT; + } + + if (q_id >= nic_dev->num_qps) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Input queue id is larger than the actual queue number\n"); + return -EINVAL; + } + + if (*msg_size != sizeof(*sq_info)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user: %d, expect: %lu\n", + *msg_size, sizeof(*sq_info)); + return -EFAULT; + } + sq_info->q_id = q_id; + sq_info->pi = hinic_dbg_get_sq_pi(nic_dev->hwdev, q_id); + sq_info->ci = hinic_get_sq_local_ci(nic_dev->hwdev, q_id); + sq_info->fi = hinic_get_sq_hw_ci(nic_dev->hwdev, q_id); + + sq_info->q_depth = nic_dev->txqs[q_id].q_depth; + /* pi_reverse */ + + sq_info->weqbb_size = HINIC_SQ_WQEBB_SIZE; + /* priority */ + + sq_info->ci_addr = hinic_dbg_get_sq_ci_addr(nic_dev->hwdev, q_id); + + sq_info->cla_addr = hinic_dbg_get_sq_cla_addr(nic_dev->hwdev, q_id); + sq_info->slq_handle = hinic_dbg_get_sq_wq_handle(nic_dev->hwdev, q_id); + + /* direct wqe */ + + err = hinic_dbg_get_sq_db_addr(nic_dev->hwdev, + q_id, &sq_info->db_addr.map_addr, + &sq_info->db_addr.phy_addr, + &sq_info->pg_idx); + + sq_info->glb_sq_id = hinic_dbg_get_global_qpn(nic_dev->hwdev) + q_id; + + return err; +} + +static int hinic_dbg_get_rq_info(struct hinic_nic_dev *nic_dev, u16 q_id, + struct hinic_dbg_rq_info *rq_info, + u32 *msg_size) +{ + if (!nic_dev) + return -EINVAL; + + if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Netdev is down, can't get rq info\n"); + return -EFAULT; + } + + if (q_id >= nic_dev->num_qps) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Input queue id is larger than the actual queue number\n"); + return -EINVAL; + } + if (*msg_size != sizeof(*rq_info)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user: %d, expect: %lu\n", + *msg_size, sizeof(*rq_info)); + return -EFAULT; + } + + rq_info->q_id = q_id; + rq_info->glb_rq_id = hinic_dbg_get_global_qpn(nic_dev->hwdev) + q_id; + + rq_info->hw_pi = hinic_dbg_get_rq_hw_pi(nic_dev->hwdev, q_id); + rq_info->ci = (u16)nic_dev->rxqs[q_id].cons_idx & + nic_dev->rxqs[q_id].q_mask; + + rq_info->sw_pi = nic_dev->rxqs[q_id].next_to_update; + + rq_info->wqebb_size = HINIC_RQ_WQE_SIZE; + rq_info->q_depth = nic_dev->rxqs[q_id].q_depth; + + rq_info->buf_len = nic_dev->rxqs[q_id].buf_len; + + rq_info->slq_handle = hinic_dbg_get_rq_wq_handle(nic_dev->hwdev, q_id); + if (!rq_info->slq_handle) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Get rq slq handle null\n"); + return -EFAULT; + } + rq_info->ci_wqe_page_addr = + hinic_slq_get_first_pageaddr(rq_info->slq_handle); + rq_info->ci_cla_tbl_addr = + hinic_dbg_get_rq_cla_addr(nic_dev->hwdev, q_id); + + rq_info->msix_idx = nic_dev->rxqs[q_id].msix_entry_idx; + rq_info->msix_vector = nic_dev->rxqs[q_id].irq_id; + + return 0; +} + +static int get_loopback_mode(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct hinic_nic_loop_mode *mode = buf_out; + int err; + + if (!out_size || !mode) + return -EFAULT; + + if (*out_size != sizeof(*mode)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user: %d, expect: %lu\n", + *out_size, sizeof(*mode)); + return -EFAULT; + } + err = hinic_get_loopback_mode_ex(nic_dev->hwdev, &mode->loop_mode, + &mode->loop_ctrl); + return err; +} + +static int set_loopback_mode(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct hinic_nic_loop_mode *mode = buf_in; + + if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Netdev is down, can't set loopback mode\n"); + return -EFAULT; + } + + if (!mode || !out_size || in_size != sizeof(*mode)) + return -EFAULT; + + if (*out_size != sizeof(*mode)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user: %d, expect: %lu\n", + *out_size, sizeof(*mode)); + return -EINVAL; + } + + return hinic_set_loopback_mode_ex(nic_dev->hwdev, mode->loop_mode, + mode->loop_ctrl); +} + +static int set_link_mode(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + enum hinic_nic_link_mode *link = buf_in; + u8 link_status; + + if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Netdev is down, can't set link mode\n"); + return -EFAULT; + } + + if (!link || !out_size || in_size != sizeof(*link)) + return -EFAULT; + + if (*out_size != sizeof(*link)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user: %d, expect: %lu\n", + *out_size, sizeof(*link)); + return -EINVAL; + } + + switch (*link) { + case HINIC_LINK_MODE_AUTO: + if (hinic_get_link_state(nic_dev->hwdev, &link_status)) + link_status = false; + hinic_link_status_change(nic_dev, (bool)link_status); + nicif_info(nic_dev, drv, nic_dev->netdev, + "Set link mode: auto succeed, now is link %s\n", + (link_status ? "up" : "down")); + break; + case HINIC_LINK_MODE_UP: + hinic_link_status_change(nic_dev, true); + nicif_info(nic_dev, drv, nic_dev->netdev, + "Set link mode: up succeed\n"); + break; + case HINIC_LINK_MODE_DOWN: + hinic_link_status_change(nic_dev, false); + nicif_info(nic_dev, drv, nic_dev->netdev, + "Set link mode: down succeed\n"); + break; + default: + nicif_err(nic_dev, drv, nic_dev->netdev, + "Invalid link mode %d to set\n", *link); + return -EINVAL; + } + + return 0; +} + +static int set_dcb_cfg(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + union _dcb_ctl dcb_ctl = {.data = 0}; + int err; + + if (!buf_in || !buf_out || *out_size != sizeof(u32) || + in_size != sizeof(u32)) + return -EINVAL; + + dcb_ctl.data = *((u32 *)buf_in); + + err = hinic_setup_dcb_tool(nic_dev->netdev, + &dcb_ctl.dcb_data.dcb_en, + !!dcb_ctl.dcb_data.wr_flag); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to setup dcb state to %d\n", + !!dcb_ctl.dcb_data.dcb_en); + err = EINVAL; + } + dcb_ctl.dcb_data.err = (u8)err; + *((u32 *)buf_out) = (u32)dcb_ctl.data; + + return 0; +} + +int get_pfc_info(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + union _pfc pfc = {.data = 0}; + + if (!buf_in || !buf_out || *out_size != sizeof(u32) || + in_size != sizeof(u32)) + return -EINVAL; + + pfc.data = *((u32 *)buf_in); + + hinic_dcbnl_set_pfc_en_tool(nic_dev->netdev, + &pfc.pfc_data.pfc_en, false); + hinic_dcbnl_get_pfc_cfg_tool(nic_dev->netdev, + &pfc.pfc_data.pfc_priority); + hinic_dcbnl_get_tc_num_tool(nic_dev->netdev, + &pfc.pfc_data.num_of_tc); + *((u32 *)buf_out) = (u32)pfc.data; + + return 0; +} + +int set_pfc_control(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + u8 pfc_en = 0; + u8 err = 0; + + if (!buf_in || !buf_out || *out_size != sizeof(u8) || + in_size != sizeof(u8)) + return -EINVAL; + + pfc_en = *((u8 *)buf_in); + if (!(test_bit(HINIC_DCB_ENABLE, &nic_dev->flags))) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Need to enable dcb first\n"); + err = 0xff; + goto exit; + } + + hinic_dcbnl_set_pfc_en_tool(nic_dev->netdev, &pfc_en, true); + err = hinic_dcbnl_set_pfc_tool(nic_dev->netdev); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to set pfc to %s\n", + pfc_en ? "enable" : "disable"); + } + +exit: + *((u8 *)buf_out) = (u8)err; + + return 0; +} + +int set_ets(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct _ets ets = {0}; + u8 err = 0; + u8 i; + u8 support_tc = nic_dev->max_cos; + + if (!buf_in || !buf_out || *out_size != sizeof(u8) || + in_size != sizeof(struct _ets)) + return -EINVAL; + + memcpy(&ets, buf_in, sizeof(struct _ets)); + + if (!(test_bit(HINIC_DCB_ENABLE, &nic_dev->flags))) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Need to enable dcb first\n"); + err = 0xff; + goto exit; + } + if (ets.flag_com.ets_flag.flag_ets_enable) { + hinic_dcbnl_set_ets_en_tool(nic_dev->netdev, &ets.ets_en, true); + + if (!ets.ets_en) + goto exit; + } + + if (!(test_bit(HINIC_ETS_ENABLE, &nic_dev->flags))) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Need to enable ets first\n"); + err = 0xff; + goto exit; + } + + if (ets.flag_com.ets_flag.flag_ets_cos) { + for (i = 0; i < HINIC_DCB_COS_MAX; i++) { + if (ets.tc[i] >= HINIC_DCB_TC_MAX) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "ETS tc id %d out of range\n", + ets.tc[i]); + err = 0xFF; + goto exit; + } + } + hinic_dcbnl_set_ets_tc_tool(nic_dev->netdev, ets.tc, true); + } + + if (ets.flag_com.ets_flag.flag_ets_percent) { + for (i = support_tc; i < HINIC_DCB_TC_MAX; i++) { + if (ets.ets_percent[i]) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "ETS setting out of range\n"); + break; + } + } + + hinic_dcbnl_set_ets_pecent_tool(nic_dev->netdev, + ets.ets_percent, true); + } + + if (ets.flag_com.ets_flag.flag_ets_strict) + hinic_dcbnl_set_ets_strict_tool(nic_dev->netdev, + &ets.strict, true); + + err = hinic_dcbnl_set_ets_tool(nic_dev->netdev); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to set ets [%d]\n", err); + } +exit: + *((u8 *)buf_out) = err; + + return 0; +} + +int get_support_up(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + u8 *up_num = buf_out; + u8 support_up = 0; + u8 i; + u8 up_valid_bitmap = nic_dev->up_valid_bitmap; + + if (!buf_in || !buf_out || !out_size) + return -EINVAL; + + if (*out_size != sizeof(*up_num)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user: %d, expect: %lu\n", + *out_size, sizeof(*up_num)); + return -EFAULT; + } + + for (i = 0; i < HINIC_DCB_UP_MAX; i++) { + if (up_valid_bitmap & BIT(i)) + support_up++; + } + + *up_num = support_up; + + return 0; +} + +int get_support_tc(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + u8 *tc_num = buf_out; + + if (!buf_in || !buf_out || !out_size) + return -EINVAL; + + if (*out_size != sizeof(*tc_num)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user: %d, expect: %lu\n", + *out_size, sizeof(*tc_num)); + return -EFAULT; + } + + hinic_dcbnl_get_tc_num_tool(nic_dev->netdev, tc_num); + + return 0; +} + +int get_ets_info(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct _ets *ets = buf_out; + + if (!buf_in || !buf_out || *out_size != sizeof(*ets)) + return -EINVAL; + + hinic_dcbnl_set_ets_pecent_tool(nic_dev->netdev, + ets->ets_percent, false); + hinic_dcbnl_set_ets_tc_tool(nic_dev->netdev, ets->tc, false); + hinic_dcbnl_set_ets_en_tool(nic_dev->netdev, &ets->ets_en, false); + hinic_dcbnl_set_ets_strict_tool(nic_dev->netdev, &ets->strict, false); + ets->err = 0; + + return 0; +} + +int set_pfc_priority(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + u8 pfc_prority = 0; + u8 err = 0; + + if (!buf_in || !buf_out || *out_size != sizeof(u8) || + in_size != sizeof(u8)) + return -EINVAL; + + pfc_prority = *((u8 *)buf_in); + if (!((test_bit(HINIC_DCB_ENABLE, &nic_dev->flags)) && + nic_dev->tmp_dcb_cfg.pfc_state)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Need to enable pfc first\n"); + err = 0xff; + goto exit; + } + + hinic_dcbnl_set_pfc_cfg_tool(nic_dev->netdev, pfc_prority); + + err = hinic_dcbnl_set_pfc_tool(nic_dev->netdev); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to set pfc to %x priority\n", + pfc_prority); + } +exit: + *((u8 *)buf_out) = (u8)err; + + return 0; +} + +static int set_pf_bw_limit(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + u32 pf_bw_limit = 0; + int err; + + if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "To set VF bandwidth rate, please use ip link cmd\n"); + return -EINVAL; + } + + if (!buf_in || !buf_out || in_size != sizeof(u32) || + *out_size != sizeof(u8)) + return -EINVAL; + + pf_bw_limit = *((u32 *)buf_in); + + err = hinic_set_pf_bw_limit(nic_dev->hwdev, pf_bw_limit); + if (err) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to set pf bandwidth limit to %d%%\n", + pf_bw_limit); + if (err < 0) + return err; + } + + *((u8 *)buf_out) = (u8)err; + + return 0; +} + +static int get_pf_bw_limit(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + u32 pf_bw_limit = 0; + int err; + + if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "To get VF bandwidth rate, please use ip link cmd\n"); + return -EINVAL; + } + + if (!buf_out || *out_size != sizeof(u32)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user :%d, expect: %lu\n", + *out_size, sizeof(u32)); + return -EFAULT; + } + err = hinic_dbg_get_pf_bw_limit(nic_dev->hwdev, &pf_bw_limit); + if (err) + return err; + + *((u32 *)buf_out) = pf_bw_limit; + + return 0; +} + +static int get_poll_weight(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct hinic_nic_poll_weight *weight_info = buf_out; + + if (!buf_out || *out_size != sizeof(*weight_info)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user :%d, expect: %lu\n", + *out_size, sizeof(*weight_info)); + return -EFAULT; + } + weight_info->poll_weight = nic_dev->poll_weight; + return 0; +} + +static int set_poll_weight(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct hinic_nic_poll_weight *weight_info = buf_in; + + if (!buf_in || in_size != sizeof(*weight_info) || + *out_size != sizeof(u32)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect in buf size: %u or out buf size: %d from user, expect: %lu\n", + in_size, *out_size, sizeof(*weight_info)); + return -EFAULT; + } + + nic_dev->poll_weight = weight_info->poll_weight; + + return 0; +} + +static int get_homologue(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct hinic_homologues *homo = buf_out; + + if (!buf_out || *out_size != sizeof(*homo)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user: %d, expect: %lu\n", + *out_size, sizeof(*homo)); + return -EFAULT; + } + + if (test_bit(HINIC_SAME_RXTX, &nic_dev->flags)) + homo->homo_state = HINIC_HOMOLOGUES_ON; + else + homo->homo_state = HINIC_HOMOLOGUES_OFF; + + return 0; +} + +static int set_homologue(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct hinic_homologues *homo = buf_in; + + if (!buf_in || in_size != sizeof(*homo) || + *out_size != sizeof(*homo)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect in buf size: %d or out buf size: %d from user, expect: %lu\n", + in_size, *out_size, sizeof(*homo)); + return -EFAULT; + } + + if (homo->homo_state == HINIC_HOMOLOGUES_ON) { + set_bit(HINIC_SAME_RXTX, &nic_dev->flags); + } else if (homo->homo_state == HINIC_HOMOLOGUES_OFF) { + clear_bit(HINIC_SAME_RXTX, &nic_dev->flags); + } else { + pr_err("Invalid parameters\n"); + return -EFAULT; + } + + return 0; +} + +static int get_sset_count(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + u32 count; + + if (!buf_in || !buf_out || in_size != sizeof(u32) || + *out_size != sizeof(u32)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Invalid parameters\n"); + return -EINVAL; + } + + switch (*((u32 *)buf_in)) { + case HINIC_SHOW_SSET_IO_STATS: + count = hinic_get_io_stats_size(nic_dev); + break; + + default: + count = 0; + break; + } + + *((u32 *)buf_out) = count; + + return 0; +} + +static int get_sset_stats(struct hinic_nic_dev *nic_dev, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct hinic_show_item *items = buf_out; + u32 sset, count, size; + int err; + + if (!buf_in || in_size != sizeof(u32) || !out_size || !buf_out) + return -EINVAL; + + size = sizeof(u32); + err = get_sset_count(nic_dev, buf_in, in_size, &count, &size); + if (err) + return -EINVAL; + + if (count * sizeof(*items) != *out_size) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user: %d, expect: %lu\n", + *out_size, count * sizeof(*items)); + return -EINVAL; + } + + sset = *((u32 *)buf_in); + + switch (sset) { + case HINIC_SHOW_SSET_IO_STATS: + hinic_get_io_stats(nic_dev, items); + break; + + default: + nicif_err(nic_dev, drv, nic_dev->netdev, "Unknown %d to get stats\n", + sset); + err = -EINVAL; + break; + } + + return err; +} + +static int get_func_type(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + u16 func_typ; + + func_typ = hinic_func_type(hwdev); + if (!buf_out || *out_size != sizeof(u16)) { + pr_err("Unexpect out buf size from user: %d, expect: %lu\n", + *out_size, sizeof(u16)); + return -EFAULT; + } + *(u16 *)buf_out = func_typ; + return 0; +} + +static int get_func_id(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + u16 func_id; + + if (!buf_out || *out_size != sizeof(u16)) { + pr_err("Unexpect out buf size from user: %d, expect: %lu\n", + *out_size, sizeof(u16)); + return -EFAULT; + } + + func_id = hinic_global_func_id_hw(hwdev); + *(u16 *)buf_out = func_id; + + return 0; +} + +static int get_chip_faults_stats(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + int offset = 0; + struct chip_fault_stats *fault_info; + + if (!buf_in || !buf_out || *out_size != sizeof(*fault_info) || + in_size != sizeof(*fault_info)) { + pr_err("Unexpect out buf size from user: %d, expect: %lu\n", + *out_size, sizeof(*fault_info)); + return -EFAULT; + } + fault_info = (struct chip_fault_stats *)buf_in; + offset = fault_info->offset; + fault_info = (struct chip_fault_stats *)buf_out; + hinic_get_chip_fault_stats(hwdev, fault_info->chip_faults, offset); + + return 0; +} + +static int get_hw_stats(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + return hinic_dbg_get_hw_stats(hwdev, buf_out, (u16 *)out_size); +} + +static int clear_hw_stats(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + return hinic_dbg_clear_hw_stats(hwdev, out_size); +} + +static int get_drv_version(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + struct drv_version_info *ver_info; + char ver_str[MAX_VER_INFO_LEN] = {0}; + int err; + + if (*out_size != sizeof(*ver_info)) { + pr_err("Unexpect out buf size from user: %d, expect: %lu\n", + *out_size, sizeof(*ver_info)); + return -EFAULT; + } + err = snprintf(ver_str, sizeof(ver_str), + "%s [compiled with the kernel]", HINIC_DRV_VERSION); + if (err <= 0 || err >= MAX_VER_INFO_LEN) { + pr_err("Failed snprintf driver version, function return(%d) and dest_len(%d)\n", + err, MAX_VER_INFO_LEN); + return -EFAULT; + } + ver_info = (struct drv_version_info *)buf_out; + memcpy(ver_info->ver, ver_str, sizeof(ver_str)); + + return 0; +} + +static int get_self_test(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + return 0; +} + +static int get_chip_id_test(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + return 0; +} + +static int get_single_card_info(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + if (!buf_in || !buf_out || in_size != sizeof(struct card_info) || + *out_size != sizeof(struct card_info)) { + pr_err("Unexpect out buf size from user: %d, expect: %lu\n", + *out_size, sizeof(struct card_info)); + return -EFAULT; + } + + hinic_get_card_info(hwdev, buf_out); + + return 0; +} + +static int get_device_id(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + u16 dev_id; + int err; + + if (!buf_out || *out_size != sizeof(u16)) { + pr_err("Unexpect out buf size from user: %d, expect: %lu\n", + *out_size, sizeof(u16)); + return -EFAULT; + } + + err = hinic_get_device_id(hwdev, &dev_id); + if (err) + return err; + + *((u32 *)buf_out) = dev_id; + + return 0; +} + +static int is_driver_in_vm(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + bool in_host; + + if (!buf_out || (*out_size != sizeof(u8))) + return -EINVAL; + + in_host = hinic_is_in_host(); + if (in_host) + *((u8 *)buf_out) = 0; + else + *((u8 *)buf_out) = 1; + + return 0; +} + +static int get_pf_id(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + struct hinic_pf_info *pf_info; + u32 port_id = 0; + int err; + + if (!buf_out || (*out_size != sizeof(*pf_info)) || + !buf_in || in_size != sizeof(u32)) + return -EINVAL; + + port_id = *((u32 *)buf_in); + pf_info = (struct hinic_pf_info *)buf_out; + err = hinic_get_pf_id(hwdev, port_id, &pf_info->pf_id, + &pf_info->isvalid); + if (err) + return err; + + return 0; +} + +static int __get_card_usr_api_chain_mem(int card_idx) +{ + unsigned char *tmp; + int i; + + mutex_lock(&g_hinic_addr_lock); + g_hinic_card_id = card_idx; + if (!g_hinic_card_vir_addr[card_idx]) { + g_hinic_card_vir_addr[card_idx] = + (void *)__get_free_pages(GFP_KERNEL, + DBGTOOL_PAGE_ORDER); + if (!g_hinic_card_vir_addr[card_idx]) { + pr_err("Alloc api chain memory fail for card %d\n", + card_idx); + mutex_unlock(&g_hinic_addr_lock); + return -EFAULT; + } + + memset(g_hinic_card_vir_addr[card_idx], 0, + PAGE_SIZE * (1 << DBGTOOL_PAGE_ORDER)); + + g_hinic_card_phy_addr[card_idx] = + virt_to_phys(g_hinic_card_vir_addr[card_idx]); + if (!g_hinic_card_phy_addr[card_idx]) { + pr_err("phy addr for card %d is 0\n", card_idx); + free_pages((unsigned long)g_hinic_card_vir_addr + [card_idx], DBGTOOL_PAGE_ORDER); + g_hinic_card_vir_addr[card_idx] = NULL; + mutex_unlock(&g_hinic_addr_lock); + return -EFAULT; + } + + tmp = g_hinic_card_vir_addr[card_idx]; + for (i = 0; i < (1 << DBGTOOL_PAGE_ORDER); i++) { + SetPageReserved(virt_to_page(tmp)); + tmp += PAGE_SIZE; + } + } + mutex_unlock(&g_hinic_addr_lock); + + return 0; +} + +static int get_pf_dev_info(char *dev_name, struct msg_module *nt_msg) +{ + struct pf_dev_info dev_info[16] = { {0} }; + struct card_node *card_info = NULL; + int i; + int err; + + if (nt_msg->len_info.out_buff_len != sizeof(dev_info) || + nt_msg->len_info.in_buff_len != sizeof(dev_info)) { + pr_err("Invalid out_buf_size %d or Invalid in_buf_size %d, expect %lu\n", + nt_msg->len_info.out_buff_len, + nt_msg->len_info.in_buff_len, (sizeof(dev_info) * 16)); + return -EINVAL; + } + + for (i = 0; i < MAX_CARD_NUM; i++) { + card_info = (struct card_node *)g_hinic_card_node_array[i]; + if (!card_info) + continue; + if (!strncmp(dev_name, card_info->chip_name, IFNAMSIZ)) + break; + } + + if (i == MAX_CARD_NUM || !card_info) { + pr_err("Can't find this card %s\n", dev_name); + return -EFAULT; + } + + err = __get_card_usr_api_chain_mem(i); + if (err) { + pr_err("Faile to get api chain memory for userspace %s\n", + dev_name); + return -EFAULT; + } + + hinic_chipif_get_all_pf_dev_info(dev_info, i, + card_info->func_handle_array); + + /* Copy the dev_info to user mode */ + if (copy_to_user(nt_msg->out_buf, dev_info, sizeof(dev_info))) { + pr_err("Copy dev_info to user fail\n"); + return -EFAULT; + } + + return 0; +} + +static int knl_free_mem(char *dev_name, struct msg_module *nt_msg) +{ + struct card_node *card_info = NULL; + int i; + + for (i = 0; i < MAX_CARD_NUM; i++) { + card_info = (struct card_node *)g_hinic_card_node_array[i]; + if (!card_info) + continue; + if (!strncmp(dev_name, card_info->chip_name, IFNAMSIZ)) + break; + } + + if (i == MAX_CARD_NUM || !card_info) { + pr_err("Can't find this card %s\n", dev_name); + return -EFAULT; + } + + hinic_dbgtool_knl_free_mem(i); + + return 0; +} + +extern void hinic_get_card_func_info_by_card_name(const char *chip_name, + struct hinic_card_func_info + *card_func); + +static int get_card_func_info(char *dev_name, struct msg_module *nt_msg) +{ + struct hinic_card_func_info card_func_info = {0}; + int id, err; + + if (nt_msg->len_info.out_buff_len != sizeof(card_func_info) || + nt_msg->len_info.in_buff_len != sizeof(card_func_info)) { + pr_err("Invalid out_buf_size %d or Invalid in_buf_size %d, expect %lu\n", + nt_msg->len_info.out_buff_len, + nt_msg->len_info.in_buff_len, sizeof(card_func_info)); + return -EINVAL; + } + + err = memcmp(dev_name, HINIC_CHIP_NAME, strlen(HINIC_CHIP_NAME)); + if (err) { + pr_err("Invalid chip name %s\n", dev_name); + return err; + } + + err = sscanf(dev_name, HINIC_CHIP_NAME "%d", &id); + if (err <= 0) { + pr_err("Failed to get hinic id\n"); + return err; + } + + if (id >= MAX_CARD_NUM) { + pr_err("chip id %d exceed limit[0-%d]\n", id, MAX_CARD_NUM - 1); + return -EINVAL; + } + + hinic_get_card_func_info_by_card_name(dev_name, &card_func_info); + + if (!card_func_info.num_pf) { + pr_err("None function found for %s\n", dev_name); + return -EFAULT; + } + + err = __get_card_usr_api_chain_mem(id); + if (err) { + pr_err("Faile to get api chain memory for userspace %s\n", + dev_name); + return -EFAULT; + } + + card_func_info.usr_api_phy_addr = g_hinic_card_phy_addr[id]; + + /* Copy the dev_info to user mode */ + if (copy_to_user(nt_msg->out_buf, &card_func_info, + sizeof(card_func_info))) { + pr_err("Copy dev_info to user fail\n"); + return -EFAULT; + } + + return 0; +} + +#define GET_FIRMWARE_ACTIVE_STATUS_TIMEOUT 30 +static int get_firmware_active_status(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + u32 loop_cnt = 0; + + if (*out_size != 0) { + pr_err("Unexpect out buf size from user: %d, expect: 0\n", + *out_size); + return -EINVAL; + } + + while (loop_cnt < GET_FIRMWARE_ACTIVE_STATUS_TIMEOUT) { + if (!hinic_get_mgmt_channel_status(hwdev)) + return 0; + + msleep(1000); + loop_cnt++; + } + if (loop_cnt == GET_FIRMWARE_ACTIVE_STATUS_TIMEOUT) + return -ETIMEDOUT; + + return 0; +} + +struct nic_drv_module_handle nic_driv_module_cmd_handle[] = { + {TX_INFO, get_tx_info}, + {Q_NUM, get_q_num}, + {TX_WQE_INFO, get_tx_wqe_info}, + {RX_INFO, get_rx_info}, + {RX_WQE_INFO, get_rx_wqe_info}, + {RX_CQE_INFO, get_rx_cqe_info}, + {GET_INTER_NUM, get_inter_num}, + {CLEAR_FUNC_STASTIC, clear_func_static}, + {GET_NUM_COS, get_num_cos}, + {GET_COS_UP_MAP, get_dcb_cos_up_map}, + {SET_COS_UP_MAP, set_dcb_cos_up_map}, + {GET_LOOPBACK_MODE, get_loopback_mode}, + {SET_LOOPBACK_MODE, set_loopback_mode}, + {SET_LINK_MODE, set_link_mode}, + {SET_PF_BW_LIMIT, set_pf_bw_limit}, + {GET_PF_BW_LIMIT, get_pf_bw_limit}, + {GET_POLL_WEIGHT, get_poll_weight}, + {SET_POLL_WEIGHT, set_poll_weight}, + {GET_HOMOLOGUE, get_homologue}, + {SET_HOMOLOGUE, set_homologue}, + {GET_SSET_COUNT, get_sset_count}, + {GET_SSET_ITEMS, get_sset_stats}, + {SET_PFC_CONTROL, set_pfc_control}, + {SET_ETS, set_ets}, + {GET_ETS_INFO, get_ets_info}, + {SET_PFC_PRIORITY, set_pfc_priority}, + {SET_DCB_CFG, set_dcb_cfg}, + {GET_PFC_INFO, get_pfc_info}, + {GET_SUPPORT_UP, get_support_up}, + {GET_SUPPORT_TC, get_support_tc}, +}; + +struct hw_drv_module_handle hw_driv_module_cmd_handle[] = { + {FUNC_TYPE, get_func_type}, + {GET_FUNC_IDX, get_func_id}, + {GET_DRV_VERSION, get_drv_version}, + {GET_HW_STATS, get_hw_stats}, + {CLEAR_HW_STATS, clear_hw_stats}, + {GET_SELF_TEST_RES, get_self_test}, + {GET_CHIP_FAULT_STATS, get_chip_faults_stats}, + {GET_CHIP_ID, get_chip_id_test}, + {GET_SINGLE_CARD_INFO, get_single_card_info}, + {GET_FIRMWARE_ACTIVE_STATUS, get_firmware_active_status}, + {GET_DEVICE_ID, get_device_id}, + {IS_DRV_IN_VM, is_driver_in_vm}, + {GET_PF_ID, get_pf_id}, +}; + +static int send_to_nic_driver(struct hinic_nic_dev *nic_dev, + u32 cmd, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + int index, num_cmds = sizeof(nic_driv_module_cmd_handle) / + sizeof(nic_driv_module_cmd_handle[0]); + enum driver_cmd_type cmd_type = (enum driver_cmd_type)cmd; + int err = 0; + + mutex_lock(&nic_dev->nic_mutex); + for (index = 0; index < num_cmds; index++) { + if (cmd_type == + nic_driv_module_cmd_handle[index].driv_cmd_name) { + err = nic_driv_module_cmd_handle[index].driv_func + (nic_dev, buf_in, + in_size, buf_out, out_size); + break; + } + } + mutex_unlock(&nic_dev->nic_mutex); + + if (index == num_cmds) + return -EINVAL; + return err; +} + +static int send_to_hw_driver(void *hwdev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, + u32 *out_size) +{ + int index, num_cmds = sizeof(hw_driv_module_cmd_handle) / + sizeof(hw_driv_module_cmd_handle[0]); + enum driver_cmd_type cmd_type = + (enum driver_cmd_type)(nt_msg->msg_formate); + int err = 0; + + for (index = 0; index < num_cmds; index++) { + if (cmd_type == + hw_driv_module_cmd_handle[index].driv_cmd_name) { + err = hw_driv_module_cmd_handle[index].driv_func + (hwdev, buf_in, + in_size, buf_out, out_size); + break; + } + } + if (index == num_cmds) + return -EINVAL; + return err; +} + +static int send_to_ucode(void *hwdev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, + u32 *out_size) +{ + int ret = 0; + + if (nt_msg->ucode_cmd.ucode_db.ucode_imm) { + ret = hinic_cmdq_direct_resp + (hwdev, nt_msg->ucode_cmd.ucode_db.cmdq_ack_type, + nt_msg->ucode_cmd.ucode_db.comm_mod_type, + nt_msg->ucode_cmd.ucode_db.ucode_cmd_type, + buf_in, buf_out, 0); + if (ret) + pr_err("Send direct cmdq err: %d\n", ret); + } else { + ret = hinic_cmdq_detail_resp + (hwdev, nt_msg->ucode_cmd.ucode_db.cmdq_ack_type, + nt_msg->ucode_cmd.ucode_db.comm_mod_type, + nt_msg->ucode_cmd.ucode_db.ucode_cmd_type, + buf_in, buf_out, 0); + if (ret) + pr_err("Send detail cmdq err: %d\n", ret); + } + + return ret; +} + +enum api_csr_op_width { + OP_WIDTH_4B, + OP_WIDTH_8B, + OP_WIDTH_UNKNOWN, +}; + +static int api_csr_read(void *hwdev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size, + enum api_csr_op_width width) +{ + struct up_log_msg_st *up_log_msg = (struct up_log_msg_st *)buf_in; + u32 op_bytes = (width == OP_WIDTH_4B ? sizeof(u32) : sizeof(u64)); + int ret = 0; + u32 rd_len, rd_addr, rd_cnt; + u32 offset = 0; + u8 node_id; + u32 i; + + if (!buf_in || !buf_out || in_size != sizeof(*up_log_msg) || + *out_size != up_log_msg->rd_len || width >= OP_WIDTH_UNKNOWN) + return -EINVAL; + + rd_len = up_log_msg->rd_len; + rd_addr = up_log_msg->addr; + node_id = (u8)nt_msg->up_cmd.up_db.comm_mod_type; + + rd_cnt = rd_len / op_bytes; + + if (rd_len % op_bytes) + rd_cnt++; + + for (i = 0; i < rd_cnt; i++) { + if (width == OP_WIDTH_4B) + ret = hinic_api_csr_rd32(hwdev, node_id, + rd_addr + offset, + (u32 *)(((u8 *)buf_out) + + offset)); + else + ret = hinic_api_csr_rd64(hwdev, node_id, + rd_addr + offset, + (u64 *)(((u8 *)buf_out) + + offset)); + if (ret) { + pr_err("Read csr failed, err: %d, node_id: %d, csr addr: 0x%08x\n", + ret, node_id, rd_addr + offset); + return ret; + } + offset += op_bytes; + } + *out_size = rd_len; + + return ret; +} + +static int api_csr_write(void *hwdev, struct msg_module *nt_msg, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size, + enum api_csr_op_width width) +{ + struct csr_write_st *csr_write_msg = (struct csr_write_st *)buf_in; + u32 op_bytes = (width == OP_WIDTH_4B ? sizeof(u32) : sizeof(u64)); + int ret = 0; + u32 rd_len, rd_addr, rd_cnt; + u32 offset = 0; + u8 node_id; + u32 i; + u8 *data = NULL; + + if (!buf_in || in_size != sizeof(*csr_write_msg) || + width >= OP_WIDTH_UNKNOWN) + return -EINVAL; + + rd_len = csr_write_msg->rd_len; + rd_addr = csr_write_msg->addr; + node_id = (u8)nt_msg->up_cmd.up_db.comm_mod_type; + + if (rd_len % op_bytes) { + pr_err("Csr length must be a multiple of %d\n", op_bytes); + return -EFAULT; + } + + rd_cnt = rd_len / op_bytes; + data = kzalloc(rd_len, GFP_KERNEL); + if (!data) { + pr_err("No more memory\n"); + return -EFAULT; + } + if (copy_from_user(data, (void *)csr_write_msg->data, rd_len)) { + pr_err("Copy information from user failed\n"); + kfree(data); + return -EFAULT; + } + + for (i = 0; i < rd_cnt; i++) { + if (width == OP_WIDTH_4B) + ret = hinic_api_csr_wr32(hwdev, node_id, + rd_addr + offset, + *((u32 *)(data + offset))); + else + ret = hinic_api_csr_wr64(hwdev, node_id, + rd_addr + offset, + *((u64 *)(data + offset))); + if (ret) { + pr_err("Write csr failed, ret: %d, node_id: %d, csr addr: 0x%08x\n", + ret, rd_addr + offset, node_id); + kfree(data); + return ret; + } + offset += op_bytes; + } + + *out_size = 0; + kfree(data); + return ret; +} + +static u32 get_up_timeout_val(enum hinic_mod_type mod, u8 cmd) +{ + if (mod == HINIC_MOD_L2NIC && cmd == HINIC_PORT_CMD_UPDATE_FW) + return UP_UPDATEFW_TIME_OUT_VAL; + else + return UP_COMP_TIME_OUT_VAL; +} + +static int check_useparam_valid(struct msg_module *nt_msg, void *buf_in) +{ + struct csr_write_st *csr_write_msg = (struct csr_write_st *)buf_in; + u32 rd_len = csr_write_msg->rd_len; + + if (rd_len > TOOL_COUNTER_MAX_LEN) { + pr_err("Csr read or write len is invalid\n"); + return -EINVAL; + } + + return 0; +} + +static int send_to_up(void *hwdev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + int ret = 0; + + if (nt_msg->up_cmd.up_db.up_api_type == API_CMD || + nt_msg->up_cmd.up_db.up_api_type == API_CLP) { + enum hinic_mod_type mod; + u8 cmd; + u32 timeout; + + mod = (enum hinic_mod_type)nt_msg->up_cmd.up_db.comm_mod_type; + cmd = nt_msg->up_cmd.up_db.chipif_cmd; + + timeout = get_up_timeout_val(mod, cmd); + + if (nt_msg->up_cmd.up_db.up_api_type == API_CMD) + ret = hinic_msg_to_mgmt_sync(hwdev, mod, cmd, + buf_in, (u16)in_size, + buf_out, (u16 *)out_size, + timeout); + else + ret = hinic_clp_to_mgmt(hwdev, mod, cmd, + buf_in, (u16)in_size, + buf_out, (u16 *)out_size); + if (ret) { + pr_err("Message to mgmt cpu return fail, mod: %d, cmd: %d\n", + mod, cmd); + return ret; + } + + } else if (nt_msg->up_cmd.up_db.up_api_type == API_CHAIN) { + if (check_useparam_valid(nt_msg, buf_in)) + return -EINVAL; + + switch (nt_msg->up_cmd.up_db.chipif_cmd) { + case API_CSR_WRITE: + ret = api_csr_write(hwdev, nt_msg, buf_in, in_size, + buf_out, out_size, OP_WIDTH_4B); + break; + case API_CSR_READ: + ret = api_csr_read(hwdev, nt_msg, buf_in, in_size, + buf_out, out_size, OP_WIDTH_4B); + break; + case API_CSR_WRITE_8B: + ret = api_csr_write(hwdev, nt_msg, buf_in, in_size, + buf_out, out_size, OP_WIDTH_8B); + break; + case API_CSR_READ_8B: + ret = api_csr_read(hwdev, nt_msg, buf_in, in_size, + buf_out, out_size, OP_WIDTH_8B); + break; + default: + pr_err("Unsupported chipif cmd: %d\n", + nt_msg->up_cmd.up_db.chipif_cmd); + ret = -EINVAL; + break; + } + } + + return ret; +} + +static int sm_rd32(void *hwdev, u32 id, u8 instance, + u8 node, struct sm_out_st *buf_out) +{ + u32 val1; + int ret; + + ret = hinic_sm_ctr_rd32(hwdev, node, instance, id, &val1); + if (ret) { + pr_err("Get sm ctr information (32 bits)failed\n"); + val1 = 0xffffffff; + } + + buf_out->val1 = val1; + + return ret; +} + +static int sm_rd64_pair(void *hwdev, u32 id, u8 instance, + u8 node, struct sm_out_st *buf_out) +{ + u64 val1 = 0, val2 = 0; + int ret; + + ret = hinic_sm_ctr_rd64_pair(hwdev, node, instance, id, &val1, &val2); + if (ret) { + pr_err("Get sm ctr information (64 bits pair)failed\n"); + val1 = 0xffffffff; + } + + buf_out->val1 = val1; + buf_out->val2 = val2; + + return ret; +} + +static int sm_rd64(void *hwdev, u32 id, u8 instance, + u8 node, struct sm_out_st *buf_out) +{ + u64 val1; + int ret; + + ret = hinic_sm_ctr_rd64(hwdev, node, instance, id, &val1); + if (ret) { + pr_err("Get sm ctr information (64 bits)failed\n"); + val1 = 0xffffffff; + } + buf_out->val1 = val1; + + return ret; +} + +typedef int (*sm_module)(void *hwdev, u32 id, u8 instance, + u8 node, struct sm_out_st *buf_out); + +struct sm_module_handle { + enum sm_cmd_type sm_cmd_name; + sm_module sm_func; +}; + +static struct sm_module_handle sm_module_cmd_handle[] = { + {SM_CTR_RD32, sm_rd32}, + {SM_CTR_RD64_PAIR, sm_rd64_pair}, + {SM_CTR_RD64, sm_rd64} +}; + +static int send_to_sm(void *hwdev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + struct sm_in_st *sm_in = buf_in; + struct sm_out_st *sm_out = buf_out; + u32 msg_formate = nt_msg->msg_formate; + int index, num_cmds = sizeof(sm_module_cmd_handle) / + sizeof(sm_module_cmd_handle[0]); + int ret = 0; + + if (!buf_in || !buf_out || in_size != sizeof(*sm_in) || + *out_size != sizeof(*sm_out)) + return -EINVAL; + + for (index = 0; index < num_cmds; index++) { + if (msg_formate == sm_module_cmd_handle[index].sm_cmd_name) + ret = sm_module_cmd_handle[index].sm_func(hwdev, + (u32)sm_in->id, + (u8)sm_in->instance, + (u8)sm_in->node, sm_out); + } + + if (ret) + pr_err("Get sm information fail\n"); + + *out_size = sizeof(struct sm_out_st); + + return ret; +} + +static bool is_hwdev_cmd_support(unsigned int mod, + char *ifname, u32 up_api_type) +{ + void *hwdev; + + hwdev = hinic_get_hwdev_by_ifname(ifname); + if (!hwdev) { + pr_err("Can not get the device %s correctly\n", ifname); + return false; + } + + switch (mod) { + case SEND_TO_UP: + case SEND_TO_SM: + if (FUNC_SUPPORT_MGMT(hwdev)) { + if (up_api_type == API_CLP) { + if (!hinic_is_hwdev_mod_inited + (hwdev, HINIC_HWDEV_CLP_INITED)) { + pr_err("CLP have not initialized\n"); + return false; + } + } else if (!hinic_is_hwdev_mod_inited + (hwdev, HINIC_HWDEV_MGMT_INITED)) { + pr_err("MGMT have not initialized\n"); + return false; + } + } else if (!hinic_is_hwdev_mod_inited + (hwdev, HINIC_HWDEV_MBOX_INITED)) { + pr_err("MBOX have not initialized\n"); + return false; + } + + if (mod == SEND_TO_SM && + ((hinic_func_type(hwdev) == TYPE_VF) || + (!hinic_is_hwdev_mod_inited(hwdev, + HINIC_HWDEV_MGMT_INITED)))) { + pr_err("Current function do not support this cmd\n"); + return false; + } + break; + + case SEND_TO_UCODE: + if (!hinic_is_hwdev_mod_inited(hwdev, + HINIC_HWDEV_CMDQ_INITED)) { + pr_err("CMDQ have not initialized\n"); + return false; + } + break; + + default: + return false; + } + + return true; +} + +static bool nictool_k_is_cmd_support(unsigned int mod, + char *ifname, u32 up_api_type) +{ + enum hinic_init_state init_state = + hinic_get_init_state_by_ifname(ifname); + bool support = true; + + if (init_state == HINIC_INIT_STATE_NONE) + return false; + + if (mod == SEND_TO_NIC_DRIVER) { + if (init_state < HINIC_INIT_STATE_NIC_INITED) { + pr_err("NIC driver have not initialized\n"); + return false; + } + } else if (mod >= SEND_TO_UCODE && mod <= SEND_TO_SM) { + return is_hwdev_cmd_support(mod, ifname, up_api_type); + } else if ((mod >= HINICADM_OVS_DRIVER && + mod <= HINICADM_FCOE_DRIVER) || + mod == SEND_TO_HW_DRIVER) { + if (init_state < HINIC_INIT_STATE_HWDEV_INITED) { + pr_err("Hwdev have not initialized\n"); + return false; + } + } else { + pr_err("Unsupport mod %d\n", mod); + support = false; + } + + return support; +} + +static int alloc_tmp_buf(void *hwdev, struct msg_module *nt_msg, u32 in_size, + void **buf_in, u32 out_size, void **buf_out) +{ + int ret; + + ret = alloc_buff_in(hwdev, nt_msg, in_size, buf_in); + if (ret) { + pr_err("Alloc tool cmd buff in failed\n"); + return ret; + } + + ret = alloc_buff_out(hwdev, nt_msg, out_size, buf_out); + if (ret) { + pr_err("Alloc tool cmd buff out failed\n"); + goto out_free_buf_in; + } + + return 0; + +out_free_buf_in: + free_buff_in(hwdev, nt_msg, *buf_in); + + return ret; +} + +static void free_tmp_buf(void *hwdev, struct msg_module *nt_msg, + void *buf_in, void *buf_out) +{ + free_buff_out(hwdev, nt_msg, buf_out); + free_buff_in(hwdev, nt_msg, buf_in); +} + +static int get_self_test_cmd(struct msg_module *nt_msg) +{ + int ret; + u32 res = 0; + + ret = hinic_get_self_test_result(nt_msg->device_name, &res); + if (ret) { + pr_err("Get self test result failed\n"); + return -EFAULT; + } + + ret = copy_buf_out_to_user(nt_msg, sizeof(res), &res); + if (ret) + pr_err("%s:%d:: Copy to user failed\n", __func__, __LINE__); + + return ret; +} + +static int get_all_chip_id_cmd(struct msg_module *nt_msg) +{ + struct nic_card_id card_id; + + memset(&card_id, 0, sizeof(card_id)); + + hinic_get_all_chip_id((void *)&card_id); + + if (copy_to_user(nt_msg->out_buf, &card_id, sizeof(card_id))) { + pr_err("Copy chip id to user failed\n"); + return -EFAULT; + } + + return 0; +} + +int nic_ioctl(void *uld_dev, u32 cmd, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + return send_to_nic_driver(uld_dev, cmd, buf_in, + in_size, buf_out, out_size); +} + +static void *__get_dev_support_nic_cmd(struct msg_module *nt_msg, + enum hinic_service_type type) +{ + void *uld_dev = NULL; + + /* set/get qos must use chip_name(hinic0) */ + switch (nt_msg->msg_formate) { + case GET_COS_UP_MAP: + case SET_COS_UP_MAP: + case GET_NUM_COS: + uld_dev = hinic_get_uld_by_chip_name(nt_msg->device_name, type); + if (!uld_dev) + pr_err("Get/set cos_up must use chip_name(hinic0)\n"); + + return uld_dev; + + default: + break; + } + + uld_dev = hinic_get_uld_dev_by_ifname(nt_msg->device_name, type); + if (!uld_dev) + pr_err("Can not get the uld dev correctly: %s, nic driver may be not register\n", + nt_msg->device_name); + + return uld_dev; +} + +static void *get_support_uld_dev(struct msg_module *nt_msg, + enum hinic_service_type type) +{ + char *service_name[SERVICE_T_MAX] = {"NIC", "OVS", "ROCE", "TOE", + "IWARP", "FC", "FCOE"}; + void *hwdev = NULL; + void *uld_dev = NULL; + + switch (nt_msg->module) { + case SEND_TO_NIC_DRIVER: + hwdev = hinic_get_hwdev_by_ifname(nt_msg->device_name); + if (!hinic_support_nic(hwdev, NULL)) { + pr_err("Current function don't support NIC\n"); + return NULL; + } + return __get_dev_support_nic_cmd(nt_msg, type); + default: + break; + } + + uld_dev = hinic_get_uld_dev_by_ifname(nt_msg->device_name, type); + if (!uld_dev) + pr_err("Can not get the uld dev correctly: %s, %s driver may be not register\n", + nt_msg->device_name, service_name[type]); + + return uld_dev; +} + +static int get_service_drv_version(void *hwdev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, + u32 *out_size) +{ + enum hinic_service_type type; + int ret = 0; + + type = nt_msg->module - SEND_TO_SM; + *out_size = sizeof(struct drv_version_info); + + if (!g_uld_info[type].ioctl) + return ret; + + ret = g_uld_info[type].ioctl(NULL, nt_msg->msg_formate, buf_in, in_size, + buf_out, out_size); + if (ret) + return ret; + + if (copy_to_user(nt_msg->out_buf, buf_out, *out_size)) + return -EFAULT; + + return ret; +} + +static int send_to_service_driver(struct msg_module *nt_msg, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + enum hinic_service_type type; + void *uld_dev; + int ret = -EINVAL; + + if (nt_msg->module == SEND_TO_NIC_DRIVER) + type = SERVICE_T_NIC; + else + type = nt_msg->module - SEND_TO_SM; + + if (type < SERVICE_T_MAX) { + uld_dev = get_support_uld_dev(nt_msg, type); + if (!uld_dev) + return -EINVAL; + + if (g_uld_info[type].ioctl) + ret = g_uld_info[type].ioctl(uld_dev, + nt_msg->msg_formate, + buf_in, in_size, buf_out, + out_size); + } else { + pr_err("Ioctl input module id: %d is incorrectly\n", + nt_msg->module); + } + + return ret; +} + +static int nictool_exec_cmd(void *hwdev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, + u32 *out_size) +{ + int ret; + + switch (nt_msg->module) { + case SEND_TO_HW_DRIVER: + ret = send_to_hw_driver(hwdev, nt_msg, buf_in, + in_size, buf_out, out_size); + break; + case SEND_TO_UP: + ret = send_to_up(hwdev, nt_msg, buf_in, + in_size, buf_out, out_size); + break; + case SEND_TO_UCODE: + ret = send_to_ucode(hwdev, nt_msg, buf_in, + in_size, buf_out, out_size); + break; + case SEND_TO_SM: + ret = send_to_sm(hwdev, nt_msg, buf_in, + in_size, buf_out, out_size); + break; + default: + ret = send_to_service_driver(nt_msg, buf_in, in_size, buf_out, + out_size); + break; + } + + return ret; +} + +static int get_nictool_drv_cap(struct msg_module *nt_msg) +{ + int ret; + u64 support = 0; + + if (nt_msg->len_info.out_buff_len != sizeof(u64)) { + pr_err("Unexpect out buf size from user: %d, expect: %lu\n", + nt_msg->len_info.out_buff_len, sizeof(u64)); + return -EINVAL; + } + + support |= NICTOOL_SUPPORT_API_CSR; + + ret = copy_buf_out_to_user(nt_msg, sizeof(support), &support); + if (ret) + pr_err("%s:%d:: Copy to user failed\n", __func__, __LINE__); + + return ret; +} + +static bool hinic_is_special_handling_cmd(struct msg_module *nt_msg, int *ret) +{ + bool handled = true; + + if (nt_msg->module != SEND_TO_HW_DRIVER) + return false; + + switch (nt_msg->msg_formate) { + case GET_SELF_TEST_RES: + *ret = get_self_test_cmd(nt_msg); + break; + case GET_CHIP_ID: + *ret = get_all_chip_id_cmd(nt_msg); + break; + case GET_PF_DEV_INFO: + *ret = get_pf_dev_info(nt_msg->device_name, nt_msg); + break; + case CMD_FREE_MEM: + *ret = knl_free_mem(nt_msg->device_name, nt_msg); + break; + case GET_CHIP_INFO: + *ret = get_card_func_info(nt_msg->device_name, nt_msg); + break; + case GET_NICTOOL_CAP: + *ret = get_nictool_drv_cap(nt_msg); + break; + default: + handled = false; + break; + } + + return handled; +} + +static long nictool_k_unlocked_ioctl(struct file *pfile, + unsigned int cmd, unsigned long arg) +{ + void *hwdev; + struct msg_module nt_msg; + void *buf_out = NULL; + void *buf_in = NULL; + u32 out_size_expect = 0; + u32 out_size = 0; + u32 in_size = 0; + unsigned int cmd_raw = 0; + int ret = 0; + + memset(&nt_msg, 0, sizeof(nt_msg)); + + if (copy_from_user(&nt_msg, (void *)arg, sizeof(nt_msg))) { + pr_err("Copy information from user failed\n"); + return -EFAULT; + } + + /* end with '\0' */ + nt_msg.device_name[IFNAMSIZ - 1] = '\0'; + + cmd_raw = nt_msg.module; + + out_size_expect = nt_msg.len_info.out_buff_len; + in_size = nt_msg.len_info.in_buff_len; + + hinic_tool_cnt_inc(); + + if (hinic_is_special_handling_cmd(&nt_msg, &ret)) + goto out_free_lock; + + if (cmd_raw == HINICADM_FC_DRIVER && + nt_msg.msg_formate == GET_CHIP_ID) + hinic_get_fc_devname(nt_msg.device_name); + + if (!nictool_k_is_cmd_support(cmd_raw, nt_msg.device_name, + nt_msg.up_cmd.up_db.up_api_type)) { + ret = -EFAULT; + goto out_free_lock; + } + + /* get the netdevice */ + hwdev = hinic_get_hwdev_by_ifname(nt_msg.device_name); + if (!hwdev) { + pr_err("Can not get the device %s correctly\n", + nt_msg.device_name); + ret = -ENODEV; + goto out_free_lock; + } + + ret = alloc_tmp_buf(hwdev, &nt_msg, in_size, + &buf_in, out_size_expect, &buf_out); + if (ret) { + pr_err("Alloc tmp buff failed\n"); + goto out_free_lock; + } + + out_size = out_size_expect; + + if (nt_msg.msg_formate == GET_DRV_VERSION && + (cmd_raw == HINICADM_FC_DRIVER || cmd_raw == HINICADM_TOE_DRIVER)) { + ret = get_service_drv_version(hwdev, &nt_msg, buf_in, + in_size, buf_out, &out_size); + goto out_free_buf; + } + + ret = nictool_exec_cmd(hwdev, &nt_msg, buf_in, + in_size, buf_out, &out_size); + if (ret) + goto out_free_buf; + + ret = copy_buf_out_to_user(&nt_msg, out_size_expect, buf_out); + if (ret) + pr_err("Copy information to user failed\n"); + +out_free_buf: + free_tmp_buf(hwdev, &nt_msg, buf_in, buf_out); + +out_free_lock: + hinic_tool_cnt_dec(); + + return (long)ret; +} + +static int nictool_k_open(struct inode *pnode, struct file *pfile) +{ + return 0; +} + +static ssize_t nictool_k_read(struct file *pfile, char __user *ubuf, + size_t size, loff_t *ppos) +{ + return 0; +} + +static ssize_t nictool_k_write(struct file *pfile, const char __user *ubuf, + size_t size, loff_t *ppos) +{ + return 0; +} + +static const struct file_operations fifo_operations = { + .owner = THIS_MODULE, + .open = nictool_k_open, + .read = nictool_k_read, + .write = nictool_k_write, + .unlocked_ioctl = nictool_k_unlocked_ioctl, + .mmap = hinic_mem_mmap, +}; + +static int if_nictool_exist(void) +{ + struct file *fp = NULL; + int exist = 0; + + fp = filp_open(HIADM_DEV_PATH, O_RDONLY, 0); + if (IS_ERR(fp)) { + exist = 0; + } else { + (void)filp_close(fp, NULL); + exist = 1; + } + + return exist; +} + +/** + * hinic_tool_k_init - initialize the hw interface + */ +int hinic_tool_k_init(void) +{ + int ret; + struct device *pdevice; + + if (g_nictool_init_flag) { + g_nictool_ref_cnt++; + /* already initialized */ + return 0; + } + + if (if_nictool_exist()) { + pr_err("Nictool device exists\n"); + return 0; + } + + ret = alloc_chrdev_region(&g_dev_id, 0, 1, HIADM_DEV_NAME); + if (ret < 0) { + pr_err("Register nictool_dev fail(0x%x)\n", ret); + return ret; + } + + /* Create equipment */ + /*lint -save -e160*/ + g_nictool_class = class_create(THIS_MODULE, HIADM_DEV_CLASS); + /*lint -restore*/ + if (IS_ERR(g_nictool_class)) { + pr_err("Create nictool_class fail\n"); + ret = -EFAULT; + goto class_create_err; + } + + /* Initializing the character device */ + cdev_init(&g_nictool_cdev, &fifo_operations); + + /* Add devices to the operating system */ + ret = cdev_add(&g_nictool_cdev, g_dev_id, 1); + if (ret < 0) { + pr_err("Add nictool_dev to operating system fail(0x%x)\n", ret); + goto cdev_add_err; + } + + /* Export device information to user space + * (/sys/class/class name/device name) + */ + pdevice = device_create(g_nictool_class, NULL, + g_dev_id, NULL, HIADM_DEV_NAME); + if (IS_ERR(pdevice)) { + pr_err("Export nictool device information to user space fail\n"); + ret = -EFAULT; + goto device_create_err; + } + + g_nictool_init_flag = 1; + g_nictool_ref_cnt = 1; + + pr_info("Register nictool_dev to system succeed\n"); + + return 0; + +device_create_err: + cdev_del(&g_nictool_cdev); + +cdev_add_err: + class_destroy(g_nictool_class); + +class_create_err: + g_nictool_class = NULL; + unregister_chrdev_region(g_dev_id, 1); + + return ret; +} + +void hinic_tool_k_uninit(void) +{ + if (g_nictool_init_flag) { + if ((--g_nictool_ref_cnt)) + return; + } + + g_nictool_init_flag = 0; + + if (!g_nictool_class || IS_ERR(g_nictool_class)) + return; + + cdev_del(&g_nictool_cdev); + device_destroy(g_nictool_class, g_dev_id); + class_destroy(g_nictool_class); + g_nictool_class = NULL; + + unregister_chrdev_region(g_dev_id, 1); + + pr_info("Unregister nictool_dev succeed\n"); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nictool.h b/drivers/net/ethernet/huawei/hinic/hinic_nictool.h new file mode 100644 index 0000000000000000000000000000000000000000..4d2fd351b99e46ed831c7dc65ace383dbef67fe9 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_nictool.h @@ -0,0 +1,277 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_NICTOOL_H_ +#define HINIC_NICTOOL_H_ + +#include "hinic_dfx_def.h" +/* completion timeout interval, unit is jiffies*/ +#define UP_COMP_TIME_OUT_VAL 10000U + +struct sm_in_st { + int node; + int id; + int instance; +}; + +struct sm_out_st { + u64 val1; + u64 val2; +}; + +struct up_log_msg_st { + u32 rd_len; + u32 addr; +}; + +struct csr_write_st { + u32 rd_len; + u32 addr; + u8 *data; +}; + +struct ipsurx_stats_info { + u32 addr; + u32 rd_cnt; +}; + +struct ucode_cmd_st { + union { + struct { + u32 comm_mod_type : 8; + u32 ucode_cmd_type : 4; + u32 cmdq_ack_type : 3; + u32 ucode_imm : 1; + u32 len : 16; + } ucode_db; + u32 value; + }; +}; + +struct up_cmd_st { + union { + struct { + u32 comm_mod_type : 8; + u32 chipif_cmd : 8; + u32 up_api_type : 16; + } up_db; + u32 value; + }; +}; + +struct _dcb_data { + u8 wr_flag; + u8 dcb_en; + u8 err; + u8 rsvd; +}; + +union _dcb_ctl { + struct _dcb_data dcb_data; + u32 data; +}; + +struct _pfc_data { + u8 pfc_en; + u8 pfc_priority; + u8 num_of_tc; + u8 err; +}; + +union _pfc { + struct _pfc_data pfc_data; + u32 data; +}; + +union _flag_com { + struct _ets_flag { + u8 flag_ets_enable : 1; + u8 flag_ets_percent : 1; + u8 flag_ets_cos : 1; + u8 flag_ets_strict : 1; + u8 rev : 4; + } ets_flag; + u8 data; +}; + +struct _ets { + u8 ets_en; + u8 err; + u8 strict; + u8 tc[8]; + u8 ets_percent[8]; + union _flag_com flag_com; +}; + +#define API_CMD 0x1 +#define API_CHAIN 0x2 +#define API_CLP 0x3 + +struct msg_module { + char device_name[IFNAMSIZ]; + unsigned int module; + union { + u32 msg_formate; + struct ucode_cmd_st ucode_cmd; + struct up_cmd_st up_cmd; + }; + + struct { + u32 in_buff_len; + u32 out_buff_len; + } len_info; + u32 res; + void *in_buff; + void *out_buf; +}; + +#define MAX_VER_INFO_LEN 128 +struct drv_version_info { + char ver[MAX_VER_INFO_LEN]; +}; + +struct chip_fault_stats { + int offset; + u8 chip_faults[MAX_DRV_BUF_SIZE]; +}; + +struct hinic_wqe_info { + int q_id; + void *slq_handle; + unsigned int wqe_id; +}; + +struct hinic_cos_up_map { + u8 cos_up[HINIC_DCB_UP_MAX]; + u8 num_cos; +}; + +struct hinic_tx_hw_page { + u64 phy_addr; + u64 *map_addr; +}; + +struct hinic_dbg_sq_info { + u16 q_id; + u16 pi; + u16 ci; /* sw_ci */ + u16 fi; /* hw_ci */ + + u32 q_depth; + u16 pi_reverse; + u16 weqbb_size; + + u8 priority; + u16 *ci_addr; + u64 cla_addr; + + void *slq_handle; + + struct hinic_tx_hw_page direct_wqe; + struct hinic_tx_hw_page db_addr; + u32 pg_idx; + + u32 glb_sq_id; +}; + +struct hinic_dbg_rq_info { + u16 q_id; + u16 glb_rq_id; + u16 hw_pi; + u16 ci; /* sw_ci */ + u16 sw_pi; + u16 wqebb_size; + u16 q_depth; + u16 buf_len; + + void *slq_handle; + u64 ci_wqe_page_addr; + u64 ci_cla_tbl_addr; + + u16 msix_idx; + u32 msix_vector; +}; + +#define BUSINFO_LEN 32 +struct pf_info { + char name[IFNAMSIZ]; + char bus_info[BUSINFO_LEN]; + u32 pf_type; +}; + +#define MAX_SIZE 16 +struct card_info { + struct pf_info pf[MAX_SIZE]; + u32 pf_num; +}; + +struct nic_card_id { + u32 id[MAX_SIZE]; + u32 num; +}; + +struct func_pdev_info { + u64 bar0_phy_addr; + u64 bar0_size; + u64 rsvd1[4]; +}; + +struct hinic_card_func_info { + u32 num_pf; + u32 rsvd0; + u64 usr_api_phy_addr; + struct func_pdev_info pdev_info[MAX_SIZE]; +}; + +#define MAX_CARD_NUM 64 +extern void *g_hinic_card_node_array[MAX_CARD_NUM]; +extern void *g_hinic_card_vir_addr[MAX_CARD_NUM]; +extern u64 g_hinic_card_phy_addr[MAX_CARD_NUM]; +extern struct mutex g_hinic_addr_lock; +extern int g_hinic_card_id; + +struct hinic_nic_loop_mode { + u32 loop_mode; + u32 loop_ctrl; +}; + +struct hinic_nic_poll_weight { + int poll_weight; +}; + +enum hinic_homologues_state { + HINIC_HOMOLOGUES_OFF = 0, + HINIC_HOMOLOGUES_ON = 1, +}; + +struct hinic_homologues { + enum hinic_homologues_state homo_state; +}; + +struct hinic_pf_info { + u32 isvalid; + u32 pf_id; +}; + +int hinic_tool_k_init(void); +void hinic_tool_k_uninit(void); + +u32 hinic_get_io_stats_size(struct hinic_nic_dev *nic_dev); +void hinic_get_io_stats(struct hinic_nic_dev *nic_dev, + struct hinic_show_item *items); + +#define TOOL_COUNTER_MAX_LEN 512 + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_pci_id_tbl.h b/drivers/net/ethernet/huawei/hinic/hinic_pci_id_tbl.h new file mode 100644 index 0000000000000000000000000000000000000000..d225e543f68a04facf251293e875b79bdf5b5bf5 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_pci_id_tbl.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_PCI_ID_TBL_H +#define HINIC_PCI_ID_TBL_H + +#define PCI_VENDOR_ID_HUAWEI 0x19e5 +#define HINIC_DEV_ID_1822_PF 0x1822 +#define HINIC_DEV_ID_1822_VF 0x375E +#define HINIC_DEV_ID_1822_VF_HV 0x379E +#define HINIC_DEV_ID_1822_SMTIO 0x020B +#define HINIC_DEV_ID_1822_PANGEA_100GE 0x0208 +#define HINIC_DEV_ID_1822_PANGEA_TP_10GE 0x0204 +#define HINIC_DEV_ID_1822_KR_40GE 0x020D +#define HINIC_DEV_ID_1822_KR_100GE 0x0205 +#define HINIC_DEV_ID_1822_DUAL_25GE 0x0206 +#define HINIC_DEV_ID_1822_KR_25GE 0x0210 +#define HINIC_DEV_ID_1822_MULTI_HOST 0x0211 +#define HINIC_DEV_ID_1822_100GE 0x0200 +#define HINIC_DEV_ID_1822_100GE_MULTI_HOST 0x0201 + +#define HIFC_DEV_ID_1822_8G 0x0212 +#define HIFC_DEV_ID_1822_16G 0x0203 +#define HIFC_DEV_ID_1822_32G 0x0202 + +#define HIFC_DEV_ID_1822_SMTIO 0x020C + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c deleted file mode 100644 index 4d4e3f05fb5fbff66fc4a9b857c9ff5a3adfca72..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_port.c +++ /dev/null @@ -1,379 +0,0 @@ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - */ - -#include -#include -#include -#include -#include -#include -#include - -#include "hinic_hw_if.h" -#include "hinic_hw_dev.h" -#include "hinic_port.h" -#include "hinic_dev.h" - -#define HINIC_MIN_MTU_SIZE 256 -#define HINIC_MAX_JUMBO_FRAME_SIZE 15872 - -enum mac_op { - MAC_DEL, - MAC_SET, -}; - -/** - * change_mac - change(add or delete) mac address - * @nic_dev: nic device - * @addr: mac address - * @vlan_id: vlan number to set with the mac - * @op: add or delete the mac - * - * Return 0 - Success, negative - Failure - **/ -static int change_mac(struct hinic_dev *nic_dev, const u8 *addr, - u16 vlan_id, enum mac_op op) -{ - struct net_device *netdev = nic_dev->netdev; - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_port_mac_cmd port_mac_cmd; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - enum hinic_port_cmd cmd; - u16 out_size; - int err; - - if (vlan_id >= VLAN_N_VID) { - netif_err(nic_dev, drv, netdev, "Invalid VLAN number\n"); - return -EINVAL; - } - - if (op == MAC_SET) - cmd = HINIC_PORT_CMD_SET_MAC; - else - cmd = HINIC_PORT_CMD_DEL_MAC; - - port_mac_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif); - port_mac_cmd.vlan_id = vlan_id; - memcpy(port_mac_cmd.mac, addr, ETH_ALEN); - - err = hinic_port_msg_cmd(hwdev, cmd, &port_mac_cmd, - sizeof(port_mac_cmd), - &port_mac_cmd, &out_size); - if (err || (out_size != sizeof(port_mac_cmd)) || port_mac_cmd.status) { - dev_err(&pdev->dev, "Failed to change MAC, ret = %d\n", - port_mac_cmd.status); - return -EFAULT; - } - - return 0; -} - -/** - * hinic_port_add_mac - add mac address - * @nic_dev: nic device - * @addr: mac address - * @vlan_id: vlan number to set with the mac - * - * Return 0 - Success, negative - Failure - **/ -int hinic_port_add_mac(struct hinic_dev *nic_dev, - const u8 *addr, u16 vlan_id) -{ - return change_mac(nic_dev, addr, vlan_id, MAC_SET); -} - -/** - * hinic_port_del_mac - remove mac address - * @nic_dev: nic device - * @addr: mac address - * @vlan_id: vlan number that is connected to the mac - * - * Return 0 - Success, negative - Failure - **/ -int hinic_port_del_mac(struct hinic_dev *nic_dev, const u8 *addr, - u16 vlan_id) -{ - return change_mac(nic_dev, addr, vlan_id, MAC_DEL); -} - -/** - * hinic_port_get_mac - get the mac address of the nic device - * @nic_dev: nic device - * @addr: returned mac address - * - * Return 0 - Success, negative - Failure - **/ -int hinic_port_get_mac(struct hinic_dev *nic_dev, u8 *addr) -{ - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_port_mac_cmd port_mac_cmd; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - u16 out_size; - int err; - - port_mac_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif); - - err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_MAC, - &port_mac_cmd, sizeof(port_mac_cmd), - &port_mac_cmd, &out_size); - if (err || (out_size != sizeof(port_mac_cmd)) || port_mac_cmd.status) { - dev_err(&pdev->dev, "Failed to get mac, ret = %d\n", - port_mac_cmd.status); - return -EFAULT; - } - - memcpy(addr, port_mac_cmd.mac, ETH_ALEN); - return 0; -} - -/** - * hinic_port_set_mtu - set mtu - * @nic_dev: nic device - * @new_mtu: new mtu - * - * Return 0 - Success, negative - Failure - **/ -int hinic_port_set_mtu(struct hinic_dev *nic_dev, int new_mtu) -{ - struct net_device *netdev = nic_dev->netdev; - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_port_mtu_cmd port_mtu_cmd; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - int err, max_frame; - u16 out_size; - - if (new_mtu < HINIC_MIN_MTU_SIZE) { - netif_err(nic_dev, drv, netdev, "mtu < MIN MTU size"); - return -EINVAL; - } - - max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; - if (max_frame > HINIC_MAX_JUMBO_FRAME_SIZE) { - netif_err(nic_dev, drv, netdev, "mtu > MAX MTU size"); - return -EINVAL; - } - - port_mtu_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif); - port_mtu_cmd.mtu = new_mtu; - - err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_CHANGE_MTU, - &port_mtu_cmd, sizeof(port_mtu_cmd), - &port_mtu_cmd, &out_size); - if (err || (out_size != sizeof(port_mtu_cmd)) || port_mtu_cmd.status) { - dev_err(&pdev->dev, "Failed to set mtu, ret = %d\n", - port_mtu_cmd.status); - return -EFAULT; - } - - return 0; -} - -/** - * hinic_port_add_vlan - add vlan to the nic device - * @nic_dev: nic device - * @vlan_id: the vlan number to add - * - * Return 0 - Success, negative - Failure - **/ -int hinic_port_add_vlan(struct hinic_dev *nic_dev, u16 vlan_id) -{ - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_port_vlan_cmd port_vlan_cmd; - - port_vlan_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif); - port_vlan_cmd.vlan_id = vlan_id; - - return hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_ADD_VLAN, - &port_vlan_cmd, sizeof(port_vlan_cmd), - NULL, NULL); -} - -/** - * hinic_port_del_vlan - delete vlan from the nic device - * @nic_dev: nic device - * @vlan_id: the vlan number to delete - * - * Return 0 - Success, negative - Failure - **/ -int hinic_port_del_vlan(struct hinic_dev *nic_dev, u16 vlan_id) -{ - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_port_vlan_cmd port_vlan_cmd; - - port_vlan_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif); - port_vlan_cmd.vlan_id = vlan_id; - - return hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_DEL_VLAN, - &port_vlan_cmd, sizeof(port_vlan_cmd), - NULL, NULL); -} - -/** - * hinic_port_set_rx_mode - set rx mode in the nic device - * @nic_dev: nic device - * @rx_mode: the rx mode to set - * - * Return 0 - Success, negative - Failure - **/ -int hinic_port_set_rx_mode(struct hinic_dev *nic_dev, u32 rx_mode) -{ - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_port_rx_mode_cmd rx_mode_cmd; - - rx_mode_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif); - rx_mode_cmd.rx_mode = rx_mode; - - return hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RX_MODE, - &rx_mode_cmd, sizeof(rx_mode_cmd), - NULL, NULL); -} - -/** - * hinic_port_link_state - get the link state - * @nic_dev: nic device - * @link_state: the returned link state - * - * Return 0 - Success, negative - Failure - **/ -int hinic_port_link_state(struct hinic_dev *nic_dev, - enum hinic_port_link_state *link_state) -{ - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_hwif *hwif = hwdev->hwif; - struct hinic_port_link_cmd link_cmd; - struct pci_dev *pdev = hwif->pdev; - u16 out_size; - int err; - - if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { - dev_err(&pdev->dev, "unsupported PCI Function type\n"); - return -EINVAL; - } - - link_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif); - - err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_LINK_STATE, - &link_cmd, sizeof(link_cmd), - &link_cmd, &out_size); - if (err || (out_size != sizeof(link_cmd)) || link_cmd.status) { - dev_err(&pdev->dev, "Failed to get link state, ret = %d\n", - link_cmd.status); - return -EINVAL; - } - - *link_state = link_cmd.state; - return 0; -} - -/** - * hinic_port_set_state - set port state - * @nic_dev: nic device - * @state: the state to set - * - * Return 0 - Success, negative - Failure - **/ -int hinic_port_set_state(struct hinic_dev *nic_dev, enum hinic_port_state state) -{ - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_port_state_cmd port_state; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - u16 out_size; - int err; - - if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { - dev_err(&pdev->dev, "unsupported PCI Function type\n"); - return -EINVAL; - } - - port_state.state = state; - - err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_PORT_STATE, - &port_state, sizeof(port_state), - &port_state, &out_size); - if (err || (out_size != sizeof(port_state)) || port_state.status) { - dev_err(&pdev->dev, "Failed to set port state, ret = %d\n", - port_state.status); - return -EFAULT; - } - - return 0; -} - -/** - * hinic_port_set_func_state- set func device state - * @nic_dev: nic device - * @state: the state to set - * - * Return 0 - Success, negative - Failure - **/ -int hinic_port_set_func_state(struct hinic_dev *nic_dev, - enum hinic_func_port_state state) -{ - struct hinic_port_func_state_cmd func_state; - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - u16 out_size; - int err; - - func_state.func_idx = HINIC_HWIF_FUNC_IDX(hwif); - func_state.state = state; - - err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_FUNC_STATE, - &func_state, sizeof(func_state), - &func_state, &out_size); - if (err || (out_size != sizeof(func_state)) || func_state.status) { - dev_err(&pdev->dev, "Failed to set port func state, ret = %d\n", - func_state.status); - return -EFAULT; - } - - return 0; -} - -/** - * hinic_port_get_cap - get port capabilities - * @nic_dev: nic device - * @port_cap: returned port capabilities - * - * Return 0 - Success, negative - Failure - **/ -int hinic_port_get_cap(struct hinic_dev *nic_dev, - struct hinic_port_cap *port_cap) -{ - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - u16 out_size; - int err; - - port_cap->func_idx = HINIC_HWIF_FUNC_IDX(hwif); - - err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_CAP, - port_cap, sizeof(*port_cap), - port_cap, &out_size); - if (err || (out_size != sizeof(*port_cap)) || port_cap->status) { - dev_err(&pdev->dev, - "Failed to get port capabilities, ret = %d\n", - port_cap->status); - return -EINVAL; - } - - return 0; -} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h deleted file mode 100644 index 9404365195ddff0679fea6c94d2cc41ddcd90482..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/huawei/hinic/hinic_port.h +++ /dev/null @@ -1,198 +0,0 @@ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - */ - -#ifndef HINIC_PORT_H -#define HINIC_PORT_H - -#include -#include -#include - -#include "hinic_dev.h" - -enum hinic_rx_mode { - HINIC_RX_MODE_UC = BIT(0), - HINIC_RX_MODE_MC = BIT(1), - HINIC_RX_MODE_BC = BIT(2), - HINIC_RX_MODE_MC_ALL = BIT(3), - HINIC_RX_MODE_PROMISC = BIT(4), -}; - -enum hinic_port_link_state { - HINIC_LINK_STATE_DOWN, - HINIC_LINK_STATE_UP, -}; - -enum hinic_port_state { - HINIC_PORT_DISABLE = 0, - HINIC_PORT_ENABLE = 3, -}; - -enum hinic_func_port_state { - HINIC_FUNC_PORT_DISABLE = 0, - HINIC_FUNC_PORT_ENABLE = 2, -}; - -enum hinic_autoneg_cap { - HINIC_AUTONEG_UNSUPPORTED, - HINIC_AUTONEG_SUPPORTED, -}; - -enum hinic_autoneg_state { - HINIC_AUTONEG_DISABLED, - HINIC_AUTONEG_ACTIVE, -}; - -enum hinic_duplex { - HINIC_DUPLEX_HALF, - HINIC_DUPLEX_FULL, -}; - -enum hinic_speed { - HINIC_SPEED_10MB_LINK = 0, - HINIC_SPEED_100MB_LINK, - HINIC_SPEED_1000MB_LINK, - HINIC_SPEED_10GB_LINK, - HINIC_SPEED_25GB_LINK, - HINIC_SPEED_40GB_LINK, - HINIC_SPEED_100GB_LINK, - - HINIC_SPEED_UNKNOWN = 0xFF, -}; - -struct hinic_port_mac_cmd { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_idx; - u16 vlan_id; - u16 rsvd1; - unsigned char mac[ETH_ALEN]; -}; - -struct hinic_port_mtu_cmd { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_idx; - u16 rsvd1; - u32 mtu; -}; - -struct hinic_port_vlan_cmd { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_idx; - u16 vlan_id; -}; - -struct hinic_port_rx_mode_cmd { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_idx; - u16 rsvd; - u32 rx_mode; -}; - -struct hinic_port_link_cmd { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_idx; - u8 state; - u8 rsvd1; -}; - -struct hinic_port_state_cmd { - u8 status; - u8 version; - u8 rsvd0[6]; - - u8 state; - u8 rsvd1[3]; -}; - -struct hinic_port_link_status { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 rsvd1; - u8 link; - u8 rsvd2; -}; - -struct hinic_port_func_state_cmd { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_idx; - u16 rsvd1; - u8 state; - u8 rsvd2[3]; -}; - -struct hinic_port_cap { - u8 status; - u8 version; - u8 rsvd0[6]; - - u16 func_idx; - u16 rsvd1; - u8 port_type; - u8 autoneg_cap; - u8 autoneg_state; - u8 duplex; - u8 speed; - u8 rsvd2[3]; -}; - -int hinic_port_add_mac(struct hinic_dev *nic_dev, const u8 *addr, - u16 vlan_id); - -int hinic_port_del_mac(struct hinic_dev *nic_dev, const u8 *addr, - u16 vlan_id); - -int hinic_port_get_mac(struct hinic_dev *nic_dev, u8 *addr); - -int hinic_port_set_mtu(struct hinic_dev *nic_dev, int new_mtu); - -int hinic_port_add_vlan(struct hinic_dev *nic_dev, u16 vlan_id); - -int hinic_port_del_vlan(struct hinic_dev *nic_dev, u16 vlan_id); - -int hinic_port_set_rx_mode(struct hinic_dev *nic_dev, u32 rx_mode); - -int hinic_port_link_state(struct hinic_dev *nic_dev, - enum hinic_port_link_state *link_state); - -int hinic_port_set_state(struct hinic_dev *nic_dev, - enum hinic_port_state state); - -int hinic_port_set_func_state(struct hinic_dev *nic_dev, - enum hinic_func_port_state state); - -int hinic_port_get_cap(struct hinic_dev *nic_dev, - struct hinic_port_cap *port_cap); - -#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port_cmd.h b/drivers/net/ethernet/huawei/hinic/hinic_port_cmd.h new file mode 100644 index 0000000000000000000000000000000000000000..d720974926a7d50a7eb42f843ac640d8c1722089 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_port_cmd.h @@ -0,0 +1,548 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef __HINIC_PORT_CMD_H__ +#define __HINIC_PORT_CMD_H__ + +/* cmd of mgmt CPU message for NIC module */ +enum hinic_port_cmd { + HINIC_PORT_CMD_VF_REGISTER = 0x0, + /* not defined in base line, only for PFD and VFD */ + HINIC_PORT_CMD_VF_UNREGISTER = 0x1, + /* not defined in base line, only for PFD and VFD */ + + HINIC_PORT_CMD_CHANGE_MTU = 0x2, + + HINIC_PORT_CMD_ADD_VLAN = 0x3, + HINIC_PORT_CMD_DEL_VLAN, + + HINIC_PORT_CMD_SET_PFC = 0x5, + HINIC_PORT_CMD_GET_PFC, + HINIC_PORT_CMD_SET_ETS, + HINIC_PORT_CMD_GET_ETS, + + HINIC_PORT_CMD_SET_MAC = 0x9, + HINIC_PORT_CMD_GET_MAC, + HINIC_PORT_CMD_DEL_MAC, + + HINIC_PORT_CMD_SET_RX_MODE = 0xc, + HINIC_PORT_CMD_SET_ANTI_ATTACK_RATE = 0xd, + + HINIC_PORT_CMD_GET_AUTONEG_CAP = 0xf, + /* not defined in base line */ + HINIC_PORT_CMD_GET_AUTONET_STATE, + /* not defined in base line */ + HINIC_PORT_CMD_GET_SPEED, + /* not defined in base line */ + HINIC_PORT_CMD_GET_DUPLEX, + /* not defined in base line */ + HINIC_PORT_CMD_GET_MEDIA_TYPE, + /* not defined in base line */ + + HINIC_PORT_CMD_GET_PAUSE_INFO = 0x14, + HINIC_PORT_CMD_SET_PAUSE_INFO, + + HINIC_PORT_CMD_GET_LINK_STATE = 0x18, + HINIC_PORT_CMD_SET_LRO = 0x19, + HINIC_PORT_CMD_SET_RX_CSUM = 0x1a, + HINIC_PORT_CMD_SET_RX_VLAN_OFFLOAD = 0x1b, + + HINIC_PORT_CMD_GET_PORT_STATISTICS = 0x1c, + HINIC_PORT_CMD_CLEAR_PORT_STATISTICS, + HINIC_PORT_CMD_GET_VPORT_STAT, + HINIC_PORT_CMD_CLEAN_VPORT_STAT, + + HINIC_PORT_CMD_GET_RSS_TEMPLATE_INDIR_TBL = 0x25, + HINIC_PORT_CMD_SET_RSS_TEMPLATE_INDIR_TBL, + + HINIC_PORT_CMD_SET_PORT_ENABLE = 0x29, + HINIC_PORT_CMD_GET_PORT_ENABLE, + + HINIC_PORT_CMD_SET_RSS_TEMPLATE_TBL = 0x2b, + HINIC_PORT_CMD_GET_RSS_TEMPLATE_TBL, + HINIC_PORT_CMD_SET_RSS_HASH_ENGINE, + HINIC_PORT_CMD_GET_RSS_HASH_ENGINE, + HINIC_PORT_CMD_GET_RSS_CTX_TBL, + HINIC_PORT_CMD_SET_RSS_CTX_TBL, + HINIC_PORT_CMD_RSS_TEMP_MGR, + + /* 0x36 ~ 0x40 have defined in base line */ + + HINIC_PORT_CMD_RSS_CFG = 0x42, + + HINIC_PORT_CMD_GET_PHY_TYPE = 0x44, + HINIC_PORT_CMD_INIT_FUNC = 0x45, + HINIC_PORT_CMD_SET_LLI_PRI = 0x46, + + HINIC_PORT_CMD_GET_LOOPBACK_MODE = 0x48, + HINIC_PORT_CMD_SET_LOOPBACK_MODE, + + HINIC_PORT_CMD_GET_JUMBO_FRAME_SIZE = 0x4a, + HINIC_PORT_CMD_SET_JUMBO_FRAME_SIZE, + + /* 0x4c ~ 0x57 have defined in base line */ + HINIC_PORT_CMD_DISABLE_PROMISC = 0x4c, + HINIC_PORT_CMD_ENABLE_SPOOFCHK = 0x4e, + HINIC_PORT_CMD_GET_MGMT_VERSION = 0x58, + HINIC_PORT_CMD_GET_BOOT_VERSION, + HINIC_PORT_CMD_GET_MICROCODE_VERSION, + + HINIC_PORT_CMD_GET_PORT_TYPE = 0x5b, + /* not defined in base line */ + + HINIC_PORT_CMD_GET_VPORT_ENABLE = 0x5c, + HINIC_PORT_CMD_SET_VPORT_ENABLE, + + HINIC_PORT_CMD_GET_PORT_ID_BY_FUNC_ID = 0x5e, + + HINIC_PORT_CMD_SET_LED_TEST = 0x5f, + + HINIC_PORT_CMD_SET_LLI_STATE = 0x60, + HINIC_PORT_CMD_SET_LLI_TYPE, + HINIC_PORT_CMD_GET_LLI_CFG, + + HINIC_PORT_CMD_GET_LRO = 0x63, + + HINIC_PORT_CMD_GET_DMA_CS = 0x64, + HINIC_PORT_CMD_SET_DMA_CS, + + HINIC_PORT_CMD_GET_GLOBAL_QPN = 0x66, + + HINIC_PORT_CMD_SET_PFC_MISC = 0x67, + HINIC_PORT_CMD_GET_PFC_MISC, + + HINIC_PORT_CMD_SET_VF_RATE = 0x69, + HINIC_PORT_CMD_SET_VF_VLAN, + HINIC_PORT_CMD_CLR_VF_VLAN, + + /* 0x6c,0x6e have defined in base line */ + HINIC_PORT_CMD_SET_UCAPTURE_OPT = 0x6F, + + HINIC_PORT_CMD_SET_TSO = 0x70, + HINIC_PORT_CMD_SET_PHY_POWER = 0x71, + HINIC_PORT_CMD_UPDATE_FW = 0x72, + HINIC_PORT_CMD_SET_RQ_IQ_MAP = 0x73, + /* not defined in base line */ + HINIC_PORT_CMD_SET_PFC_THD = 0x75, + /* not defined in base line */ + HINIC_PORT_CMD_SET_PORT_LINK_STATUS = 0x76, + HINIC_PORT_CMD_SET_CGE_PAUSE_TIME_CFG = 0x77, + + HINIC_PORT_CMD_GET_FW_SUPPORT_FLAG = 0x79, + + HINIC_PORT_CMD_SET_PORT_REPORT = 0x7B, + + HINIC_PORT_CMD_LINK_STATUS_REPORT = 0xa0, + + HINIC_PORT_CMD_SET_LOSSLESS_ETH = 0xa3, + HINIC_PORT_CMD_UPDATE_MAC = 0xa4, + + HINIC_PORT_CMD_GET_UART_LOG = 0xa5, + HINIC_PORT_CMD_SET_UART_LOG, + + HINIC_PORT_CMD_GET_PORT_INFO = 0xaa, + + HINIC_MISC_SET_FUNC_SF_ENBITS = 0xab, + /* not defined in base line */ + HINIC_MISC_GET_FUNC_SF_ENBITS, + /* not defined in base line */ + + HINIC_PORT_CMD_GET_SFP_INFO = 0xad, + HINIC_PORT_CMD_UP_TC_ADD_FLOW = 0xAF, + HINIC_PORT_CMD_UP_TC_DEL_FLOW = 0xB0, + HINIC_PORT_CMD_UP_TC_GET_FLOW = 0xB1, + HINIC_PORT_CMD_UP_TC_FLUSH_TCAM = 0xB2, + HINIC_PORT_CMD_UP_TC_CTRL_TCAM_BLOCK = 0xB3, + HINIC_PORT_CMD_UP_TC_ENABLE = 0xB4, + HINIC_PORT_CMD_UP_TC_GET_TCAM_BLOCK = 0xB5, + + HINIC_PORT_CMD_SET_NETQ = 0xc1, + HINIC_PORT_CMD_ADD_RQ_FILTER = 0xc2, + HINIC_PORT_CMD_DEL_RQ_FILTER = 0xc3, + + HINIC_PORT_CMD_GET_FW_LOG = 0xca, + HINIC_PORT_CMD_SET_IPSU_MAC = 0xcb, + HINIC_PORT_CMD_GET_IPSU_MAC = 0xcc, + + HINIC_PORT_CMD_SET_XSFP_STATUS = 0xD4, + + HINIC_PORT_CMD_SET_IQ_ENABLE = 0xd6, + + HINIC_PORT_CMD_GET_LINK_MODE = 0xD9, + HINIC_PORT_CMD_SET_SPEED = 0xDA, + HINIC_PORT_CMD_SET_AUTONEG = 0xDB, + + HINIC_PORT_CMD_CLEAR_SQ_RES = 0xDD, + HINIC_PORT_CMD_SET_SUPER_CQE = 0xDE, + HINIC_PORT_CMD_SET_VF_COS = 0xDF, + HINIC_PORT_CMD_GET_VF_COS = 0xE1, + + HINIC_PORT_CMD_CABLE_PLUG_EVENT = 0xE5, + HINIC_PORT_CMD_LINK_ERR_EVENT = 0xE6, + + HINIC_PORT_CMD_SET_PORT_FUNCS_STATE = 0xE7, + HINIC_PORT_CMD_SET_COS_UP_MAP = 0xE8, + + HINIC_PORT_CMD_RESET_LINK_CFG = 0xEB, + HINIC_PORT_CMD_GET_STD_SFP_INFO = 0xF0, + + HINIC_PORT_CMD_FORCE_PKT_DROP = 0xF3, + HINIC_PORT_CMD_SET_LRO_TIMER = 0xF4, + + HINIC_PORT_CMD_SET_VHD_CFG = 0xF7, + HINIC_PORT_CMD_SET_LINK_FOLLOW = 0xF8, + HINIC_PORT_CMD_SET_VF_MAX_MIN_RATE = 0xF9, + HINIC_PORT_CMD_SET_RXQ_LRO_ADPT = 0xFA, + HINIC_PORT_CMD_GET_SFP_ABS = 0xFB, + HINIC_PORT_CMD_Q_FILTER = 0xFC, + HINIC_PORT_CMD_TCAM_FILTER = 0xFE, + HINIC_PORT_CMD_SET_VLAN_FILTER = 0xFF, +}; + +/* cmd of mgmt CPU message for HW module */ +enum hinic_mgmt_cmd { + HINIC_MGMT_CMD_RESET_MGMT = 0x0, + HINIC_MGMT_CMD_START_FLR = 0x1, + HINIC_MGMT_CMD_FLUSH_DOORBELL = 0x2, + HINIC_MGMT_CMD_GET_IO_STATUS = 0x3, + HINIC_MGMT_CMD_DMA_ATTR_SET = 0x4, + + HINIC_MGMT_CMD_CMDQ_CTXT_SET = 0x10, + HINIC_MGMT_CMD_CMDQ_CTXT_GET, + + HINIC_MGMT_CMD_VAT_SET = 0x12, + HINIC_MGMT_CMD_VAT_GET, + + HINIC_MGMT_CMD_L2NIC_SQ_CI_ATTR_SET = 0x14, + HINIC_MGMT_CMD_L2NIC_SQ_CI_ATTR_GET, + + HINIC_MGMT_CMD_MQM_FIX_INFO_GET = 0x16, + HINIC_MGMT_CMD_MQM_CFG_INFO_SET = 0x18, + HINIC_MGMT_CMD_MQM_SRCH_GPA_SET = 0x20, + HINIC_MGMT_CMD_PPF_TMR_SET = 0x22, + HINIC_MGMT_CMD_PPF_HT_GPA_SET = 0x23, + HINIC_MGMT_CMD_RES_STATE_SET = 0x24, + HINIC_MGMT_CMD_FUNC_CACHE_OUT = 0x25, + HINIC_MGMT_CMD_FFM_SET = 0x26, + HINIC_MGMT_CMD_SMF_TMR_CLEAR = 0x27, + /* 0x29 not defined in base line, + * only used in open source driver + */ + HINIC_MGMT_CMD_FUNC_RES_CLEAR = 0x29, + + HINIC_MGMT_CMD_FUNC_TMR_BITMAT_SET = 0x32, + + HINIC_MGMT_CMD_CEQ_CTRL_REG_WR_BY_UP = 0x33, + HINIC_MGMT_CMD_MSI_CTRL_REG_WR_BY_UP, + HINIC_MGMT_CMD_MSI_CTRL_REG_RD_BY_UP, + + HINIC_MGMT_CMD_VF_RANDOM_ID_SET = 0x36, + HINIC_MGMT_CMD_FAULT_REPORT = 0x37, + HINIC_MGMT_CMD_HEART_LOST_REPORT = 0x38, + + HINIC_MGMT_CMD_VPD_SET = 0x40, + HINIC_MGMT_CMD_VPD_GET, + HINIC_MGMT_CMD_LABEL_SET, + HINIC_MGMT_CMD_LABEL_GET, + HINIC_MGMT_CMD_SATIC_MAC_SET, + HINIC_MGMT_CMD_SATIC_MAC_GET, + HINIC_MGMT_CMD_SYNC_TIME = 0x46, + + HINIC_MGMT_CMD_REG_READ = 0x48, + + HINIC_MGMT_CMD_SET_LED_STATUS = 0x4A, + HINIC_MGMT_CMD_L2NIC_RESET = 0x4b, + HINIC_MGMT_CMD_FAST_RECYCLE_MODE_SET = 0x4d, + HINIC_MGMT_CMD_BIOS_NV_DATA_MGMT = 0x4E, + HINIC_MGMT_CMD_ACTIVATE_FW = 0x4F, + HINIC_MGMT_CMD_PAGESIZE_SET = 0x50, + HINIC_MGMT_CMD_PAGESIZE_GET = 0x51, + HINIC_MGMT_CMD_GET_BOARD_INFO = 0x52, + HINIC_MGMT_CMD_WATCHDOG_INFO = 0x56, + HINIC_MGMT_CMD_FMW_ACT_NTC = 0x57, + HINIC_MGMT_CMD_SET_VF_RANDOM_ID = 0x61, + HINIC_MGMT_CMD_GET_PPF_STATE = 0x63, + HINIC_MGMT_CMD_PCIE_DFX_NTC = 0x65, + HINIC_MGMT_CMD_PCIE_DFX_GET = 0x66, + + HINIC_MGMT_CMD_GET_HOST_INFO = 0x67, + + HINIC_MGMT_CMD_GET_PHY_INIT_STATUS = 0x6A, + HINIC_MGMT_CMD_HEARTBEAT_SUPPORTED = 0x6B, + HINIC_MGMT_CMD_HEARTBEAT_EVENT = 0x6C, + HINIC_MGMT_CMD_GET_HW_PF_INFOS = 0x6D, + HINIC_MGMT_CMD_GET_SDI_MODE = 0x6E, + + HINIC_MGMT_CMD_ENABLE_MIGRATE = 0x6F, +}; + +/* uCode relates commands */ +enum hinic_ucode_cmd { + HINIC_UCODE_CMD_MODIFY_QUEUE_CONTEXT = 0, + HINIC_UCODE_CMD_CLEAN_QUEUE_CONTEXT, + HINIC_UCODE_CMD_ARM_SQ, + HINIC_UCODE_CMD_ARM_RQ, + HINIC_UCODE_CMD_SET_RSS_INDIR_TABLE, + HINIC_UCODE_CMD_SET_RSS_CONTEXT_TABLE, + HINIC_UCODE_CMD_GET_RSS_INDIR_TABLE, + HINIC_UCODE_CMD_GET_RSS_CONTEXT_TABLE, + HINIC_UCODE_CMD_SET_IQ_ENABLE, + HINIC_UCODE_CMD_SET_RQ_FLUSH = 10 +}; + +/* software cmds, vf->pf and multi-host */ +enum hinic_sw_funcs_cmd { + HINIC_SW_CMD_SLAVE_HOST_PPF_REGISTER = 0x0, + HINIC_SW_CMD_SLAVE_HOST_PPF_UNREGISTER = 0x1, + HINIC_SW_CMD_GET_SLAVE_FUNC_NIC_STATE = 0x2, + HINIC_SW_CMD_SET_SLAVE_FUNC_NIC_STATE = 0x3, + HINIC_SW_CMD_SEND_MSG_TO_VF = 0x4, + HINIC_SW_CMD_MIGRATE_READY = 0x5, +}; + +enum sq_l4offload_type { + OFFLOAD_DISABLE = 0, + TCP_OFFLOAD_ENABLE = 1, + SCTP_OFFLOAD_ENABLE = 2, + UDP_OFFLOAD_ENABLE = 3, +}; + +enum sq_vlan_offload_flag { + VLAN_OFFLOAD_DISABLE = 0, + VLAN_OFFLOAD_ENABLE = 1, +}; + +enum sq_pkt_parsed_flag { + PKT_NOT_PARSED = 0, + PKT_PARSED = 1, +}; + +enum sq_l3_type { + UNKNOWN_L3TYPE = 0, + IPV6_PKT = 1, + IPV4_PKT_NO_CHKSUM_OFFLOAD = 2, + IPV4_PKT_WITH_CHKSUM_OFFLOAD = 3, +}; + +enum sq_md_type { + UNKNOWN_MD_TYPE = 0, +}; + +enum sq_l2type { + ETHERNET = 0, +}; + +enum sq_tunnel_l4_type { + NOT_TUNNEL, + TUNNEL_UDP_NO_CSUM, + TUNNEL_UDP_CSUM, +}; + +#define NIC_RSS_CMD_TEMP_ALLOC 0x01 +#define NIC_RSS_CMD_TEMP_FREE 0x02 + +#define HINIC_RSS_TYPE_VALID_SHIFT 23 +#define HINIC_RSS_TYPE_TCP_IPV6_EXT_SHIFT 24 +#define HINIC_RSS_TYPE_IPV6_EXT_SHIFT 25 +#define HINIC_RSS_TYPE_TCP_IPV6_SHIFT 26 +#define HINIC_RSS_TYPE_IPV6_SHIFT 27 +#define HINIC_RSS_TYPE_TCP_IPV4_SHIFT 28 +#define HINIC_RSS_TYPE_IPV4_SHIFT 29 +#define HINIC_RSS_TYPE_UDP_IPV6_SHIFT 30 +#define HINIC_RSS_TYPE_UDP_IPV4_SHIFT 31 + +#define HINIC_RSS_TYPE_SET(val, member) \ + (((u32)(val) & 0x1) << HINIC_RSS_TYPE_##member##_SHIFT) + +#define HINIC_RSS_TYPE_GET(val, member) \ + (((u32)(val) >> HINIC_RSS_TYPE_##member##_SHIFT) & 0x1) + +enum hinic_speed { + HINIC_SPEED_10MB_LINK = 0, + HINIC_SPEED_100MB_LINK, + HINIC_SPEED_1000MB_LINK, + HINIC_SPEED_10GB_LINK, + HINIC_SPEED_25GB_LINK, + HINIC_SPEED_40GB_LINK, + HINIC_SPEED_100GB_LINK, + HINIC_SPEED_UNKNOWN = 0xFF, +}; + +/* In order to adapt different linux version */ +enum { + HINIC_IFLA_VF_LINK_STATE_AUTO, /* link state of the uplink */ + HINIC_IFLA_VF_LINK_STATE_ENABLE, /* link always up */ + HINIC_IFLA_VF_LINK_STATE_DISABLE, /* link always down */ +}; + +#define HINIC_AF0_FUNC_GLOBAL_IDX_SHIFT 0 +#define HINIC_AF0_P2P_IDX_SHIFT 10 +#define HINIC_AF0_PCI_INTF_IDX_SHIFT 14 +#define HINIC_AF0_VF_IN_PF_SHIFT 16 +#define HINIC_AF0_FUNC_TYPE_SHIFT 24 + +#define HINIC_AF0_FUNC_GLOBAL_IDX_MASK 0x3FF +#define HINIC_AF0_P2P_IDX_MASK 0xF +#define HINIC_AF0_PCI_INTF_IDX_MASK 0x3 +#define HINIC_AF0_VF_IN_PF_MASK 0xFF +#define HINIC_AF0_FUNC_TYPE_MASK 0x1 + +#define HINIC_AF0_GET(val, member) \ + (((val) >> HINIC_AF0_##member##_SHIFT) & HINIC_AF0_##member##_MASK) + +#define HINIC_AF1_PPF_IDX_SHIFT 0 +#define HINIC_AF1_AEQS_PER_FUNC_SHIFT 8 +#define HINIC_AF1_CEQS_PER_FUNC_SHIFT 12 +#define HINIC_AF1_IRQS_PER_FUNC_SHIFT 20 +#define HINIC_AF1_DMA_ATTR_PER_FUNC_SHIFT 24 +#define HINIC_AF1_MGMT_INIT_STATUS_SHIFT 30 +#define HINIC_AF1_PF_INIT_STATUS_SHIFT 31 + +#define HINIC_AF1_PPF_IDX_MASK 0x1F +#define HINIC_AF1_AEQS_PER_FUNC_MASK 0x3 +#define HINIC_AF1_CEQS_PER_FUNC_MASK 0x7 +#define HINIC_AF1_IRQS_PER_FUNC_MASK 0xF +#define HINIC_AF1_DMA_ATTR_PER_FUNC_MASK 0x7 +#define HINIC_AF1_MGMT_INIT_STATUS_MASK 0x1 +#define HINIC_AF1_PF_INIT_STATUS_MASK 0x1 + +#define HINIC_AF1_GET(val, member) \ + (((val) >> HINIC_AF1_##member##_SHIFT) & HINIC_AF1_##member##_MASK) + +#define HINIC_AF2_GLOBAL_VF_ID_OF_PF_SHIFT 16 +#define HINIC_AF2_GLOBAL_VF_ID_OF_PF_MASK 0x3FF + +#define HINIC_AF2_GET(val, member) \ + (((val) >> HINIC_AF2_##member##_SHIFT) & HINIC_AF2_##member##_MASK) + +#define HINIC_AF4_OUTBOUND_CTRL_SHIFT 0 +#define HINIC_AF4_DOORBELL_CTRL_SHIFT 1 +#define HINIC_AF4_OUTBOUND_CTRL_MASK 0x1 +#define HINIC_AF4_DOORBELL_CTRL_MASK 0x1 + +#define HINIC_AF4_GET(val, member) \ + (((val) >> HINIC_AF4_##member##_SHIFT) & HINIC_AF4_##member##_MASK) + +#define HINIC_AF4_SET(val, member) \ + (((val) & HINIC_AF4_##member##_MASK) << HINIC_AF4_##member##_SHIFT) + +#define HINIC_AF4_CLEAR(val, member) \ + ((val) & (~(HINIC_AF4_##member##_MASK << \ + HINIC_AF4_##member##_SHIFT))) + +#define HINIC_AF5_PF_STATUS_SHIFT 0 +#define HINIC_AF5_PF_STATUS_MASK 0xFFFF + +#define HINIC_AF5_SET(val, member) \ + (((val) & HINIC_AF5_##member##_MASK) << HINIC_AF5_##member##_SHIFT) + +#define HINIC_AF5_GET(val, member) \ + (((val) >> HINIC_AF5_##member##_SHIFT) & HINIC_AF5_##member##_MASK) + +#define HINIC_AF5_CLEAR(val, member) \ + ((val) & (~(HINIC_AF5_##member##_MASK << \ + HINIC_AF5_##member##_SHIFT))) + +#define HINIC_PPF_ELECTION_IDX_SHIFT 0 + +#define HINIC_PPF_ELECTION_IDX_MASK 0x1F + +#define HINIC_PPF_ELECTION_SET(val, member) \ + (((val) & HINIC_PPF_ELECTION_##member##_MASK) << \ + HINIC_PPF_ELECTION_##member##_SHIFT) + +#define HINIC_PPF_ELECTION_GET(val, member) \ + (((val) >> HINIC_PPF_ELECTION_##member##_SHIFT) & \ + HINIC_PPF_ELECTION_##member##_MASK) + +#define HINIC_PPF_ELECTION_CLEAR(val, member) \ + ((val) & (~(HINIC_PPF_ELECTION_##member##_MASK \ + << HINIC_PPF_ELECTION_##member##_SHIFT))) + +#define HINIC_MPF_ELECTION_IDX_SHIFT 0 + +#define HINIC_MPF_ELECTION_IDX_MASK 0x1F + +#define HINIC_MPF_ELECTION_SET(val, member) \ + (((val) & HINIC_MPF_ELECTION_##member##_MASK) << \ + HINIC_MPF_ELECTION_##member##_SHIFT) + +#define HINIC_MPF_ELECTION_GET(val, member) \ + (((val) >> HINIC_MPF_ELECTION_##member##_SHIFT) & \ + HINIC_MPF_ELECTION_##member##_MASK) + +#define HINIC_MPF_ELECTION_CLEAR(val, member) \ + ((val) & (~(HINIC_MPF_ELECTION_##member##_MASK \ + << HINIC_MPF_ELECTION_##member##_SHIFT))) + +#define HINIC_HWIF_NUM_AEQS(hwif) ((hwif)->attr.num_aeqs) +#define HINIC_HWIF_NUM_CEQS(hwif) ((hwif)->attr.num_ceqs) +#define HINIC_HWIF_NUM_IRQS(hwif) ((hwif)->attr.num_irqs) +#define HINIC_HWIF_GLOBAL_IDX(hwif) ((hwif)->attr.func_global_idx) +#define HINIC_HWIF_GLOBAL_VF_OFFSET(hwif) ((hwif)->attr.global_vf_id_of_pf) +#define HINIC_HWIF_PPF_IDX(hwif) ((hwif)->attr.ppf_idx) +#define HINIC_PCI_INTF_IDX(hwif) ((hwif)->attr.pci_intf_idx) + +#define HINIC_FUNC_TYPE(dev) ((dev)->hwif->attr.func_type) +#define HINIC_IS_PF(dev) (HINIC_FUNC_TYPE(dev) == TYPE_PF) +#define HINIC_IS_VF(dev) (HINIC_FUNC_TYPE(dev) == TYPE_VF) +#define HINIC_IS_PPF(dev) (HINIC_FUNC_TYPE(dev) == TYPE_PPF) + +#define DB_IDX(db, db_base) \ + ((u32)(((ulong)(db) - (ulong)(db_base)) / \ + HINIC_DB_PAGE_SIZE)) + +enum hinic_pcie_nosnoop { + HINIC_PCIE_SNOOP = 0, + HINIC_PCIE_NO_SNOOP = 1, +}; + +enum hinic_pcie_tph { + HINIC_PCIE_TPH_DISABLE = 0, + HINIC_PCIE_TPH_ENABLE = 1, +}; + +enum hinic_outbound_ctrl { + ENABLE_OUTBOUND = 0x0, + DISABLE_OUTBOUND = 0x1, +}; + +enum hinic_doorbell_ctrl { + ENABLE_DOORBELL = 0x0, + DISABLE_DOORBELL = 0x1, +}; + +enum hinic_pf_status { + HINIC_PF_STATUS_INIT = 0x0, + HINIC_PF_STATUS_ACTIVE_FLAG = 0x11, + HINIC_PF_STATUS_FLR_START_FLAG = 0x12, + HINIC_PF_STATUS_FLR_FINISH_FLAG = 0x13, +}; + +/* total doorbell or direct wqe size is 512kB, db num: 128, dwqe: 128 */ +#define HINIC_DB_DWQE_SIZE 0x00080000 +/* BMGW & VMGW VF db size 256k, have no dwqe space */ +#define HINIC_GW_VF_DB_SIZE 0x00040000 + +/* db/dwqe page size: 4K */ +#define HINIC_DB_PAGE_SIZE 0x00001000ULL + +#define HINIC_DB_MAX_AREAS (HINIC_DB_DWQE_SIZE / HINIC_DB_PAGE_SIZE) + +#define HINIC_PCI_MSIX_ENTRY_SIZE 16 +#define HINIC_PCI_MSIX_ENTRY_VECTOR_CTRL 12 +#define HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT 1 + +#endif /* __HINIC_PORT_CMD_H__ */ diff --git a/drivers/net/ethernet/huawei/hinic/hinic_qe_def.h b/drivers/net/ethernet/huawei/hinic/hinic_qe_def.h new file mode 100644 index 0000000000000000000000000000000000000000..00863a77ef4d5e19d63eccd41ec3f36927613bd5 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_qe_def.h @@ -0,0 +1,459 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef __HINIC_QE_DEF_H__ +#define __HINIC_QE_DEF_H__ + +#define HINIC_SQ_WQEBB_SIZE 64 +#define HINIC_RQ_WQE_SIZE 32 +#define HINIC_SQ_WQEBB_SHIFT 6 +#define HINIC_RQ_WQEBB_SHIFT 5 + +#define HINIC_MAX_QUEUE_DEPTH 4096 +#define HINIC_MIN_QUEUE_DEPTH 128 +#define HINIC_TXD_ALIGN 1 +#define HINIC_RXD_ALIGN 1 + +#define HINIC_SQ_DEPTH 1024 +#define HINIC_RQ_DEPTH 1024 + +#define HINIC_RQ_WQE_MAX_SIZE 32 + +#define SIZE_8BYTES(size) (ALIGN((u32)(size), 8) >> 3)//lint !e767 + +/************** SQ_CTRL ***************/ +#define SQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0 +#define SQ_CTRL_TASKSECT_LEN_SHIFT 16 +#define SQ_CTRL_DATA_FORMAT_SHIFT 22 +#define SQ_CTRL_LEN_SHIFT 29 +#define SQ_CTRL_OWNER_SHIFT 31 + +#define SQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFFU +#define SQ_CTRL_TASKSECT_LEN_MASK 0x1FU +#define SQ_CTRL_DATA_FORMAT_MASK 0x1U +#define SQ_CTRL_LEN_MASK 0x3U +#define SQ_CTRL_OWNER_MASK 0x1U + +#define SQ_CTRL_GET(val, member) (((val) >> SQ_CTRL_##member##_SHIFT) \ + & SQ_CTRL_##member##_MASK) + +#define SQ_CTRL_CLEAR(val, member) ((val) & \ + (~(SQ_CTRL_##member##_MASK << \ + SQ_CTRL_##member##_SHIFT))) + +#define SQ_CTRL_QUEUE_INFO_PLDOFF_SHIFT 2 +#define SQ_CTRL_QUEUE_INFO_UFO_SHIFT 10 +#define SQ_CTRL_QUEUE_INFO_TSO_SHIFT 11 +#define SQ_CTRL_QUEUE_INFO_TCPUDP_CS_SHIFT 12 +#define SQ_CTRL_QUEUE_INFO_MSS_SHIFT 13 +#define SQ_CTRL_QUEUE_INFO_SCTP_SHIFT 27 +#define SQ_CTRL_QUEUE_INFO_UC_SHIFT 28 +#define SQ_CTRL_QUEUE_INFO_PRI_SHIFT 29 + +#define SQ_CTRL_QUEUE_INFO_PLDOFF_MASK 0xFFU +#define SQ_CTRL_QUEUE_INFO_UFO_MASK 0x1U +#define SQ_CTRL_QUEUE_INFO_TSO_MASK 0x1U +#define SQ_CTRL_QUEUE_INFO_TCPUDP_CS_MASK 0x1U +#define SQ_CTRL_QUEUE_INFO_MSS_MASK 0x3FFFU +#define SQ_CTRL_QUEUE_INFO_SCTP_MASK 0x1U +#define SQ_CTRL_QUEUE_INFO_UC_MASK 0x1U +#define SQ_CTRL_QUEUE_INFO_PRI_MASK 0x7U + +#define SQ_CTRL_QUEUE_INFO_SET(val, member) \ + (((u32)(val) & SQ_CTRL_QUEUE_INFO_##member##_MASK) \ + << SQ_CTRL_QUEUE_INFO_##member##_SHIFT) + +#define SQ_CTRL_QUEUE_INFO_GET(val, member) \ + (((val) >> SQ_CTRL_QUEUE_INFO_##member##_SHIFT) \ + & SQ_CTRL_QUEUE_INFO_##member##_MASK) + +#define SQ_CTRL_QUEUE_INFO_CLEAR(val, member) \ + ((val) & (~(SQ_CTRL_QUEUE_INFO_##member##_MASK << \ + SQ_CTRL_QUEUE_INFO_##member##_SHIFT))) + +#define SQ_TASK_INFO0_L2HDR_LEN_SHIFT 0 +#define SQ_TASK_INFO0_L4OFFLOAD_SHIFT 8 +#define SQ_TASK_INFO0_INNER_L3TYPE_SHIFT 10 +#define SQ_TASK_INFO0_VLAN_OFFLOAD_SHIFT 12 +#define SQ_TASK_INFO0_PARSE_FLAG_SHIFT 13 +#define SQ_TASK_INFO0_UFO_AVD_SHIFT 14 +#define SQ_TASK_INFO0_TSO_UFO_SHIFT 15 +#define SQ_TASK_INFO0_VLAN_TAG_SHIFT 16 + +#define SQ_TASK_INFO0_L2HDR_LEN_MASK 0xFFU +#define SQ_TASK_INFO0_L4OFFLOAD_MASK 0x3U +#define SQ_TASK_INFO0_INNER_L3TYPE_MASK 0x3U +#define SQ_TASK_INFO0_VLAN_OFFLOAD_MASK 0x1U +#define SQ_TASK_INFO0_PARSE_FLAG_MASK 0x1U +#define SQ_TASK_INFO0_UFO_AVD_MASK 0x1U +#define SQ_TASK_INFO0_TSO_UFO_MASK 0x1U +#define SQ_TASK_INFO0_VLAN_TAG_MASK 0xFFFFU + +#define SQ_TASK_INFO0_SET(val, member) \ + (((u32)(val) & SQ_TASK_INFO0_##member##_MASK) << \ + SQ_TASK_INFO0_##member##_SHIFT) +#define SQ_TASK_INFO0_GET(val, member) \ + (((val) >> SQ_TASK_INFO0_##member##_SHIFT) & \ + SQ_TASK_INFO0_##member##_MASK) + +#define SQ_TASK_INFO1_MD_TYPE_SHIFT 8 +#define SQ_TASK_INFO1_INNER_L4LEN_SHIFT 16 +#define SQ_TASK_INFO1_INNER_L3LEN_SHIFT 24 + +#define SQ_TASK_INFO1_MD_TYPE_MASK 0xFFU +#define SQ_TASK_INFO1_INNER_L4LEN_MASK 0xFFU +#define SQ_TASK_INFO1_INNER_L3LEN_MASK 0xFFU + +#define SQ_TASK_INFO1_SET(val, member) \ + (((val) & SQ_TASK_INFO1_##member##_MASK) << \ + SQ_TASK_INFO1_##member##_SHIFT) +#define SQ_TASK_INFO1_GET(val, member) \ + (((val) >> SQ_TASK_INFO1_##member##_SHIFT) & \ + SQ_TASK_INFO1_##member##_MASK) + +#define SQ_TASK_INFO2_TUNNEL_L4LEN_SHIFT 0 +#define SQ_TASK_INFO2_OUTER_L3LEN_SHIFT 8 +#define SQ_TASK_INFO2_TUNNEL_L4TYPE_SHIFT 16 +#define SQ_TASK_INFO2_OUTER_L3TYPE_SHIFT 24 + +#define SQ_TASK_INFO2_TUNNEL_L4LEN_MASK 0xFFU +#define SQ_TASK_INFO2_OUTER_L3LEN_MASK 0xFFU +#define SQ_TASK_INFO2_TUNNEL_L4TYPE_MASK 0x7U +#define SQ_TASK_INFO2_OUTER_L3TYPE_MASK 0x3U + +#define SQ_TASK_INFO2_SET(val, member) \ + (((val) & SQ_TASK_INFO2_##member##_MASK) << \ + SQ_TASK_INFO2_##member##_SHIFT) +#define SQ_TASK_INFO2_GET(val, member) \ + (((val) >> SQ_TASK_INFO2_##member##_SHIFT) & \ + SQ_TASK_INFO2_##member##_MASK) + +#define SQ_TASK_INFO4_L2TYPE_SHIFT 31 + +#define SQ_TASK_INFO4_L2TYPE_MASK 0x1U + +#define SQ_TASK_INFO4_SET(val, member) \ + (((u32)(val) & SQ_TASK_INFO4_##member##_MASK) << \ + SQ_TASK_INFO4_##member##_SHIFT) + +/********************* SQ_DB *********************/ +#define SQ_DB_OFF 0x00000800 +#define SQ_DB_INFO_HI_PI_SHIFT 0 +#define SQ_DB_INFO_QID_SHIFT 8 +#define SQ_DB_INFO_CFLAG_SHIFT 23 +#define SQ_DB_INFO_COS_SHIFT 24 +#define SQ_DB_INFO_TYPE_SHIFT 27 +#define SQ_DB_INFO_HI_PI_MASK 0xFFU +#define SQ_DB_INFO_QID_MASK 0x3FFU +#define SQ_DB_INFO_CFLAG_MASK 0x1U +#define SQ_DB_INFO_COS_MASK 0x7U +#define SQ_DB_INFO_TYPE_MASK 0x1FU +#define SQ_DB_INFO_SET(val, member) \ + (((u32)(val) & SQ_DB_INFO_##member##_MASK) << \ + SQ_DB_INFO_##member##_SHIFT) + +#define SQ_DB_PI_LOW_MASK 0xFF +#define SQ_DB_PI_LOW(pi) ((pi) & SQ_DB_PI_LOW_MASK) +#define SQ_DB_PI_HI_SHIFT 8 +#define SQ_DB_PI_HIGH(pi) ((pi) >> SQ_DB_PI_HI_SHIFT) +#define SQ_DB_ADDR(sq, pi) ((u64 *)((sq)->db_addr + SQ_DB_OFF) + \ + SQ_DB_PI_LOW(pi)) +#define SQ_DB 1 +#define SQ_CFLAG_DP 0 /* CFLAG_DATA_PATH */ + +/*********************** RQ_CTRL ******************/ +#define RQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0 +#define RQ_CTRL_COMPLETE_FORMAT_SHIFT 15 +#define RQ_CTRL_COMPLETE_LEN_SHIFT 27 +#define RQ_CTRL_LEN_SHIFT 29 + +#define RQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFFU +#define RQ_CTRL_COMPLETE_FORMAT_MASK 0x1U +#define RQ_CTRL_COMPLETE_LEN_MASK 0x3U +#define RQ_CTRL_LEN_MASK 0x3U + +#define RQ_CTRL_SET(val, member) \ + (((val) & RQ_CTRL_##member##_MASK) << \ + RQ_CTRL_##member##_SHIFT) + +#define RQ_CTRL_GET(val, member) \ + (((val) >> RQ_CTRL_##member##_SHIFT) & \ + RQ_CTRL_##member##_MASK) + +#define RQ_CTRL_CLEAR(val, member) \ + ((val) & (~(RQ_CTRL_##member##_MASK << \ + RQ_CTRL_##member##_SHIFT))) + +#define RQ_CQE_STATUS_CSUM_ERR_SHIFT 0 +#define RQ_CQE_STATUS_NUM_LRO_SHIFT 16 +#define RQ_CQE_STATUS_LRO_PUSH_SHIFT 25 +#define RQ_CQE_STATUS_LRO_ENTER_SHIFT 26 +#define RQ_CQE_STATUS_LRO_INTR_SHIFT 27 + +#define RQ_CQE_STATUS_BP_EN_SHIFT 30 +#define RQ_CQE_STATUS_RXDONE_SHIFT 31 +#define RQ_CQE_STATUS_FLUSH_SHIFT 28 + +#define RQ_CQE_STATUS_CSUM_ERR_MASK 0xFFFFU +#define RQ_CQE_STATUS_NUM_LRO_MASK 0xFFU +#define RQ_CQE_STATUS_LRO_PUSH_MASK 0X1U +#define RQ_CQE_STATUS_LRO_ENTER_MASK 0X1U +#define RQ_CQE_STATUS_LRO_INTR_MASK 0X1U +#define RQ_CQE_STATUS_BP_EN_MASK 0X1U +#define RQ_CQE_STATUS_RXDONE_MASK 0x1U +#define RQ_CQE_STATUS_FLUSH_MASK 0x1U + +#define RQ_CQE_STATUS_GET(val, member) \ + (((val) >> RQ_CQE_STATUS_##member##_SHIFT) & \ + RQ_CQE_STATUS_##member##_MASK) + +#define RQ_CQE_STATUS_CLEAR(val, member) \ + ((val) & (~(RQ_CQE_STATUS_##member##_MASK << \ + RQ_CQE_STATUS_##member##_SHIFT))) + +#define RQ_CQE_SGE_VLAN_SHIFT 0 +#define RQ_CQE_SGE_LEN_SHIFT 16 + +#define RQ_CQE_SGE_VLAN_MASK 0xFFFFU +#define RQ_CQE_SGE_LEN_MASK 0xFFFFU + +#define RQ_CQE_SGE_GET(val, member) \ + (((val) >> RQ_CQE_SGE_##member##_SHIFT) & \ + RQ_CQE_SGE_##member##_MASK) + +#define RQ_CQE_PKT_NUM_SHIFT 1 +#define RQ_CQE_PKT_FIRST_LEN_SHIFT 19 +#define RQ_CQE_PKT_LAST_LEN_SHIFT 6 +#define RQ_CQE_SUPER_CQE_EN_SHIFT 0 + +#define RQ_CQE_PKT_FIRST_LEN_MASK 0x1FFFU +#define RQ_CQE_PKT_LAST_LEN_MASK 0x1FFFU +#define RQ_CQE_PKT_NUM_MASK 0x1FU +#define RQ_CQE_SUPER_CQE_EN_MASK 0x1 + +#define RQ_CQE_PKT_NUM_GET(val, member) \ + (((val) >> RQ_CQE_PKT_##member##_SHIFT) & \ + RQ_CQE_PKT_##member##_MASK) +#define HINIC_GET_RQ_CQE_PKT_NUM(pkt_info) RQ_CQE_PKT_NUM_GET(pkt_info, NUM) + +#define RQ_CQE_SUPER_CQE_EN_GET(val, member) \ + (((val) >> RQ_CQE_##member##_SHIFT) & \ + RQ_CQE_##member##_MASK) +#define HINIC_GET_SUPER_CQE_EN(pkt_info) \ + RQ_CQE_SUPER_CQE_EN_GET(pkt_info, SUPER_CQE_EN) + +#define HINIC_GET_SUPER_CQE_EN_BE(pkt_info) ((pkt_info) & 0x1000000U) +#define RQ_CQE_PKT_LEN_GET(val, member) \ + (((val) >> RQ_CQE_PKT_##member##_SHIFT) & \ + RQ_CQE_PKT_##member##_MASK) + +#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_SHIFT 21 +#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_MASK 0x1U + +#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_SHIFT 0 +#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_MASK 0xFFFU + +#define RQ_CQE_OFFOLAD_TYPE_PKT_UMBCAST_SHIFT 19 +#define RQ_CQE_OFFOLAD_TYPE_PKT_UMBCAST_MASK 0x3U + +#define RQ_CQE_OFFOLAD_TYPE_RSS_TYPE_SHIFT 24 +#define RQ_CQE_OFFOLAD_TYPE_RSS_TYPE_MASK 0xFFU + +#define RQ_CQE_OFFOLAD_TYPE_GET(val, member) \ + (((val) >> RQ_CQE_OFFOLAD_TYPE_##member##_SHIFT) & \ + RQ_CQE_OFFOLAD_TYPE_##member##_MASK) + +#define RQ_CQE_PKT_TYPES_NON_L2_MASK 0x800U +#define RQ_CQE_PKT_TYPES_L2_MASK 0x7FU + +#define RQ_CQE_STATUS_CSUM_BYPASS_VAL 0x80 +#define RQ_CQE_STATUS_CSUM_ERR_IP_MASK 0x31U +#define RQ_CQE_STATUS_CSUM_ERR_L4_MASK 0x4EU + +#define SECT_SIZE_BYTES(size) ((size) << 3) + +#define HINIC_PF_SET_VF_ALREADY 0x4 +#define HINIC_MGMT_STATUS_EXIST 0x6 + +#define WQS_BLOCKS_PER_PAGE 4 + +#define WQ_SIZE(wq) (u32)((u64)(wq)->q_depth * (wq)->wqebb_size) + +#define WQE_PAGE_NUM(wq, idx) (((idx) >> ((wq)->wqebbs_per_page_shift)) & \ + ((wq)->num_q_pages - 1)) + +#define WQE_PAGE_OFF(wq, idx) ((u64)((wq)->wqebb_size) * \ + ((idx) & ((wq)->num_wqebbs_per_page - 1))) + +#define WQ_PAGE_ADDR_SIZE sizeof(u64) +#define WQ_PAGE_ADDR_SIZE_SHIFT 3 +#define WQ_PAGE_ADDR(wq, idx) \ + (u8 *)(*(u64 *)((u64)((wq)->shadow_block_vaddr) + \ + (WQE_PAGE_NUM(wq, idx) << WQ_PAGE_ADDR_SIZE_SHIFT))) + +#define WQ_BLOCK_SIZE 4096UL +#define WQS_PAGE_SIZE (WQS_BLOCKS_PER_PAGE * WQ_BLOCK_SIZE) +#define WQ_MAX_PAGES (WQ_BLOCK_SIZE >> WQ_PAGE_ADDR_SIZE_SHIFT) + +#define CMDQ_BLOCKS_PER_PAGE 8 +#define CMDQ_BLOCK_SIZE 512UL +#define CMDQ_PAGE_SIZE ALIGN((CMDQ_BLOCKS_PER_PAGE * \ + CMDQ_BLOCK_SIZE), PAGE_SIZE) + +#define ADDR_4K_ALIGNED(addr) (0 == ((addr) & 0xfff)) +#define ADDR_256K_ALIGNED(addr) (0 == ((addr) & 0x3ffff)) + +#define WQ_BASE_VADDR(wqs, wq) \ + (u64 *)(((u64)((wqs)->page_vaddr[(wq)->page_idx])) \ + + (wq)->block_idx * WQ_BLOCK_SIZE) + +#define WQ_BASE_PADDR(wqs, wq) (((wqs)->page_paddr[(wq)->page_idx]) \ + + (u64)(wq)->block_idx * WQ_BLOCK_SIZE) + +#define WQ_BASE_ADDR(wqs, wq) \ + (u64 *)(((u64)((wqs)->shadow_page_vaddr[(wq)->page_idx])) \ + + (wq)->block_idx * WQ_BLOCK_SIZE) + +#define CMDQ_BASE_VADDR(cmdq_pages, wq) \ + (u64 *)(((u64)((cmdq_pages)->cmdq_page_vaddr)) \ + + (wq)->block_idx * CMDQ_BLOCK_SIZE) + +#define CMDQ_BASE_PADDR(cmdq_pages, wq) \ + (((u64)((cmdq_pages)->cmdq_page_paddr)) \ + + (u64)(wq)->block_idx * CMDQ_BLOCK_SIZE) + +#define CMDQ_BASE_ADDR(cmdq_pages, wq) \ + (u64 *)(((u64)((cmdq_pages)->cmdq_shadow_page_vaddr)) \ + + (wq)->block_idx * CMDQ_BLOCK_SIZE) + +#define MASKED_WQE_IDX(wq, idx) ((idx) & (wq)->mask) + +#define WQE_SHADOW_PAGE(wq, wqe) \ + (u16)(((ulong)(wqe) - (ulong)(wq)->shadow_wqe) \ + / (wq)->max_wqe_size) + +#define WQE_IN_RANGE(wqe, start, end) \ + (((ulong)(wqe) >= (ulong)(start)) && \ + ((ulong)(wqe) < (ulong)(end))) + +#define WQ_NUM_PAGES(num_wqs) \ + (ALIGN((u32)num_wqs, WQS_BLOCKS_PER_PAGE) / WQS_BLOCKS_PER_PAGE) + +/* Qe buffer relates define */ +enum hinic_rx_buf_size { + HINIC_RX_BUF_SIZE_32B = 0x20, + HINIC_RX_BUF_SIZE_64B = 0x40, + HINIC_RX_BUF_SIZE_96B = 0x60, + HINIC_RX_BUF_SIZE_128B = 0x80, + HINIC_RX_BUF_SIZE_192B = 0xC0, + HINIC_RX_BUF_SIZE_256B = 0x100, + HINIC_RX_BUF_SIZE_384B = 0x180, + HINIC_RX_BUF_SIZE_512B = 0x200, + HINIC_RX_BUF_SIZE_768B = 0x300, + HINIC_RX_BUF_SIZE_1K = 0x400, + HINIC_RX_BUF_SIZE_1_5K = 0x600, + HINIC_RX_BUF_SIZE_2K = 0x800, + HINIC_RX_BUF_SIZE_3K = 0xC00, + HINIC_RX_BUF_SIZE_4K = 0x1000, + HINIC_RX_BUF_SIZE_8K = 0x2000, + HINIC_RX_BUF_SIZE_16K = 0x4000, +}; + +enum ppf_tmr_status { + HINIC_PPF_TMR_FLAG_STOP, + HINIC_PPF_TMR_FLAG_START, +}; + +enum hinic_res_state { + HINIC_RES_CLEAN = 0, + HINIC_RES_ACTIVE = 1, +}; + +#define DEFAULT_RX_BUF_SIZE ((u16)0xB) + +#define BUF_DESC_SIZE_SHIFT 4 + +#define HINIC_SQ_WQE_SIZE(num_sge) \ + (sizeof(struct hinic_sq_ctrl) + \ + sizeof(struct hinic_sq_task) + \ + (u32)((num_sge) << BUF_DESC_SIZE_SHIFT)) + +#define HINIC_SQ_WQEBB_CNT(num_sge) \ + (int)(ALIGN(HINIC_SQ_WQE_SIZE((u32)num_sge), \ + HINIC_SQ_WQEBB_SIZE) >> HINIC_SQ_WQEBB_SHIFT) + +#define HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type) \ + RQ_CQE_OFFOLAD_TYPE_GET(offload_type, VLAN_EN) + +#define HINIC_GET_RSS_TYPES(offload_type) \ + RQ_CQE_OFFOLAD_TYPE_GET(offload_type, RSS_TYPE) + +#define HINIC_GET_PKT_TYPES(offload_type) \ + RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_TYPE) + +#define HINIC_GET_RX_PKT_TYPE(offload_type) \ + RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_TYPE) + +#define HINIC_GET_RX_PKT_UMBCAST(offload_type) \ + RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_UMBCAST) + +#define HINIC_GET_RX_VLAN_TAG(vlan_len) \ + RQ_CQE_SGE_GET(vlan_len, VLAN) + +#define HINIC_GET_RX_PKT_LEN(vlan_len) \ + RQ_CQE_SGE_GET(vlan_len, LEN) + +#define HINIC_GET_RX_CSUM_ERR(status) \ + RQ_CQE_STATUS_GET(status, CSUM_ERR) + +#define HINIC_GET_RX_DONE(status) \ + RQ_CQE_STATUS_GET(status, RXDONE) + +#define HINIC_GET_RX_FLUSH(status) \ + RQ_CQE_STATUS_GET(status, FLUSH) + +#define HINIC_GET_RX_BP_EN(status) \ + RQ_CQE_STATUS_GET(status, BP_EN) + +#define HINIC_GET_RX_NUM_LRO(status) \ + RQ_CQE_STATUS_GET(status, NUM_LRO) + +#define HINIC_PKT_TYPES_UNKNOWN(pkt_types) \ + ((pkt_types) & RQ_CQE_PKT_TYPES_NON_L2_MASK) + +#define HINIC_PKT_TYPES_L2(pkt_types) \ + ((pkt_types) & RQ_CQE_PKT_TYPES_L2_MASK) + +#define HINIC_CSUM_ERR_BYPASSED(csum_err) \ + ((csum_err) == RQ_CQE_STATUS_CSUM_BYPASS_VAL) + +#define HINIC_CSUM_ERR_IP(csum_err) \ + ((csum_err) & RQ_CQE_STATUS_CSUM_ERR_IP_MASK) + +#define HINIC_CSUM_ERR_L4(csum_err) \ + ((csum_err) & RQ_CQE_STATUS_CSUM_ERR_L4_MASK) + +#define TX_MSS_DEFAULT 0x3E00 +#define TX_MSS_MIN 0x50 + +enum sq_wqe_type { + SQ_NORMAL_WQE = 0, +}; + +enum rq_completion_fmt { + RQ_COMPLETE_SGE = 1 +}; + +#endif /* __HINIC_QE_DEF_H__ */ diff --git a/drivers/net/ethernet/huawei/hinic/hinic_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_qp.c new file mode 100644 index 0000000000000000000000000000000000000000..c61df2f96283e80359f1ccbba22f8eadb039a915 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_qp.c @@ -0,0 +1,222 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hinic_nic_io.h" +#include "hinic_qp.h" + +#define BUF_DESC_SHIFT 1 +#define BUF_DESC_SIZE(nr_descs) (((u32)nr_descs) << BUF_DESC_SHIFT) + +void hinic_prepare_sq_ctrl(struct hinic_sq_ctrl *ctrl, u32 queue_info, + int nr_descs, u8 owner) +{ + u32 ctrl_size, task_size, bufdesc_size; + + ctrl_size = SIZE_8BYTES(sizeof(struct hinic_sq_ctrl)); + task_size = SIZE_8BYTES(sizeof(struct hinic_sq_task)); + bufdesc_size = BUF_DESC_SIZE(nr_descs); + + ctrl->ctrl_fmt = SQ_CTRL_SET(bufdesc_size, BUFDESC_SECT_LEN) | + SQ_CTRL_SET(task_size, TASKSECT_LEN) | + SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) | + SQ_CTRL_SET(ctrl_size, LEN) | + SQ_CTRL_SET(owner, OWNER); + + ctrl->ctrl_fmt = be32_to_cpu(ctrl->ctrl_fmt); + + ctrl->queue_info = queue_info; + ctrl->queue_info |= SQ_CTRL_QUEUE_INFO_SET(1U, UC); + + if (!SQ_CTRL_QUEUE_INFO_GET(ctrl->queue_info, MSS)) { + ctrl->queue_info |= SQ_CTRL_QUEUE_INFO_SET(TX_MSS_DEFAULT, MSS); + } else if (SQ_CTRL_QUEUE_INFO_GET(ctrl->queue_info, MSS) < TX_MSS_MIN) { + /* mss should not less than 80 */ + ctrl->queue_info = SQ_CTRL_QUEUE_INFO_CLEAR(ctrl->queue_info, + MSS); + ctrl->queue_info |= SQ_CTRL_QUEUE_INFO_SET(TX_MSS_MIN, MSS); + } + ctrl->queue_info = be32_to_cpu(ctrl->queue_info); +} + +int hinic_get_rx_done(struct hinic_rq_cqe *cqe) +{ + u32 status; + int rx_done; + + status = be32_to_cpu(cqe->status); + + rx_done = RQ_CQE_STATUS_GET(status, RXDONE); + if (!rx_done) + return 0; + + return 1; +} + +void hinic_clear_rx_done(struct hinic_rq_cqe *cqe, u32 status_old) +{ + u32 status; + + status = RQ_CQE_STATUS_CLEAR(status_old, RXDONE); + + cqe->status = cpu_to_be32(status); + + /* Make sure Rxdone has been set */ + wmb(); +} + +int hinic_get_super_cqe_en(struct hinic_rq_cqe *cqe) +{ + u32 pkt_info; + int super_cqe_en; + + pkt_info = be32_to_cpu(cqe->pkt_info); + + super_cqe_en = RQ_CQE_SUPER_CQE_EN_GET(pkt_info, SUPER_CQE_EN); + if (!super_cqe_en) + return 0; + + return 1; +} + +u32 hinic_get_pkt_len(struct hinic_rq_cqe *cqe) +{ + u32 vlan_len = be32_to_cpu(cqe->vlan_len); + + return RQ_CQE_SGE_GET(vlan_len, LEN); +} + +u32 hinic_get_pkt_num(struct hinic_rq_cqe *cqe) +{ + u32 pkt_num = be32_to_cpu(cqe->pkt_info); + + return RQ_CQE_PKT_NUM_GET(pkt_num, NUM); +} + +u32 hinic_get_pkt_len_for_super_cqe(struct hinic_rq_cqe *cqe, + bool last) +{ + u32 pkt_len = be32_to_cpu(cqe->pkt_info); + + if (!last) + return RQ_CQE_PKT_LEN_GET(pkt_len, FIRST_LEN); + else + return RQ_CQE_PKT_LEN_GET(pkt_len, LAST_LEN); +} + +void hinic_prepare_rq_wqe(void *wqe, u16 pi, dma_addr_t buf_addr, + dma_addr_t cqe_dma) +{ + struct hinic_rq_wqe *rq_wqe = (struct hinic_rq_wqe *)wqe; + struct hinic_rq_ctrl *ctrl = &rq_wqe->ctrl; + struct hinic_rq_cqe_sect *cqe_sect = &rq_wqe->cqe_sect; + struct hinic_rq_bufdesc *buf_desc = &rq_wqe->buf_desc; + u32 rq_ceq_len = sizeof(struct hinic_rq_cqe); + + ctrl->ctrl_fmt = + RQ_CTRL_SET(SIZE_8BYTES(sizeof(*ctrl)), LEN) | + RQ_CTRL_SET(SIZE_8BYTES(sizeof(*cqe_sect)), COMPLETE_LEN) | + RQ_CTRL_SET(SIZE_8BYTES(sizeof(*buf_desc)), BUFDESC_SECT_LEN) | + RQ_CTRL_SET(RQ_COMPLETE_SGE, COMPLETE_FORMAT); + + hinic_set_sge(&cqe_sect->sge, cqe_dma, rq_ceq_len); + + buf_desc->addr_high = upper_32_bits(buf_addr); + buf_desc->addr_low = lower_32_bits(buf_addr); +} + +void hinic_set_cs_inner_l4(struct hinic_sq_task *task, + u32 *queue_info, + enum sq_l4offload_type l4_offload, + u32 l4_len, u32 offset) +{ + u32 tcp_udp_cs = 0, sctp = 0; + u32 mss = TX_MSS_DEFAULT; + + /* tcp_udp_cs should be setted to calculate outter checksum when vxlan + * packets without inner l3 and l4 + */ + if (unlikely(l4_offload == SCTP_OFFLOAD_ENABLE)) + sctp = 1; + else + tcp_udp_cs = 1; + + task->pkt_info0 |= SQ_TASK_INFO0_SET(l4_offload, L4OFFLOAD); + task->pkt_info1 |= SQ_TASK_INFO1_SET(l4_len, INNER_L4LEN); + + *queue_info |= SQ_CTRL_QUEUE_INFO_SET(offset, PLDOFF) | + SQ_CTRL_QUEUE_INFO_SET(tcp_udp_cs, TCPUDP_CS) | + SQ_CTRL_QUEUE_INFO_SET(sctp, SCTP); + + *queue_info = SQ_CTRL_QUEUE_INFO_CLEAR(*queue_info, MSS); + *queue_info |= SQ_CTRL_QUEUE_INFO_SET(mss, MSS); +} + +void hinic_set_tso_inner_l4(struct hinic_sq_task *task, + u32 *queue_info, + enum sq_l4offload_type l4_offload, + u32 l4_len, + u32 offset, u32 ip_ident, u32 mss) +{ + u32 tso = 0, ufo = 0; + + if (l4_offload == TCP_OFFLOAD_ENABLE) + tso = 1; + else if (l4_offload == UDP_OFFLOAD_ENABLE) + ufo = 1; + + task->ufo_v6_identify = be32_to_cpu(ip_ident); + /* just keep the same code style here */ + + task->pkt_info0 |= SQ_TASK_INFO0_SET(l4_offload, L4OFFLOAD); + task->pkt_info0 |= SQ_TASK_INFO0_SET(tso || ufo, TSO_UFO); + task->pkt_info1 |= SQ_TASK_INFO1_SET(l4_len, INNER_L4LEN); + + *queue_info |= SQ_CTRL_QUEUE_INFO_SET(offset, PLDOFF) | + SQ_CTRL_QUEUE_INFO_SET(tso, TSO) | + SQ_CTRL_QUEUE_INFO_SET(ufo, UFO) | + SQ_CTRL_QUEUE_INFO_SET(!!l4_offload, TCPUDP_CS); + /* cs must be calculate by hw if tso is enable */ + + *queue_info = SQ_CTRL_QUEUE_INFO_CLEAR(*queue_info, MSS); + /* qsf was initialized in prepare_sq_wqe */ + *queue_info |= SQ_CTRL_QUEUE_INFO_SET(mss, MSS); +} + +void hinic_set_vlan_tx_offload(struct hinic_sq_task *task, + u32 *queue_info, + u16 vlan_tag, u16 vlan_pri) +{ + task->pkt_info0 |= SQ_TASK_INFO0_SET(vlan_tag, VLAN_TAG) | + SQ_TASK_INFO0_SET(1U, VLAN_OFFLOAD); + + *queue_info |= SQ_CTRL_QUEUE_INFO_SET(vlan_pri, PRI); +} + +void hinic_task_set_tx_offload_valid(struct hinic_sq_task *task, u32 l2hdr_len) +{ + task->pkt_info0 |= SQ_TASK_INFO0_SET(l2hdr_len, L2HDR_LEN); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_qp.h new file mode 100644 index 0000000000000000000000000000000000000000..005aaf1304403052843a0de8b0a75dc2dd29fdd6 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_qp.h @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_QP_H +#define HINIC_QP_H + +#include "hinic_qe_def.h" +#include "hinic_port_cmd.h" + +/* frags and linner */ +#define HINIC_MAX_SQ_BUFDESCS (MAX_SKB_FRAGS + 1) +#define HINIC_MAX_SQ_SGE 17 +#define HINIC_MAX_SKB_NR_FRAGE (HINIC_MAX_SQ_SGE - 1) +#define HINIC_GSO_MAX_SIZE 65536 + +struct hinic_sq_ctrl { + u32 ctrl_fmt; + u32 queue_info; +}; + +struct hinic_sq_task { + u32 pkt_info0; + u32 pkt_info1; + u32 pkt_info2; + u32 ufo_v6_identify; + u32 pkt_info4; + u32 rsvd5; +}; + +struct hinic_sq_bufdesc { + u32 hi_addr; + u32 lo_addr; + u32 len; + u32 rsvd; +}; + +struct hinic_sq_wqe { + struct hinic_sq_ctrl ctrl; + struct hinic_sq_task task; + struct hinic_sq_bufdesc buf_descs[HINIC_MAX_SQ_BUFDESCS]; +}; + +struct hinic_rq_ctrl { + u32 ctrl_fmt; +}; + +struct hinic_rq_cqe { + u32 status; + u32 vlan_len; + + u32 offload_type; + u32 hash_val; + u32 rsvd4; + u32 rsvd5; + u32 rsvd6; + u32 pkt_info; +}; + +struct hinic_rq_cqe_sect { + struct hinic_sge sge; + u32 rsvd; +}; + +struct hinic_rq_bufdesc { + u32 addr_high; + u32 addr_low; +}; + +struct hinic_rq_wqe { + struct hinic_rq_ctrl ctrl; + u32 rsvd; + struct hinic_rq_cqe_sect cqe_sect; + struct hinic_rq_bufdesc buf_desc; +}; + +void hinic_prepare_sq_ctrl(struct hinic_sq_ctrl *ctrl, u32 queue_info, + int nr_descs, u8 owner); + +u32 hinic_get_pkt_len(struct hinic_rq_cqe *cqe); + +int hinic_get_super_cqe_en(struct hinic_rq_cqe *cqe); + +u32 hinic_get_pkt_len_for_super_cqe(struct hinic_rq_cqe *cqe, bool last); + +u32 hinic_get_pkt_num(struct hinic_rq_cqe *cqe); + +int hinic_get_rx_done(struct hinic_rq_cqe *cqe); + +void hinic_clear_rx_done(struct hinic_rq_cqe *cqe, u32 status_old); + +void hinic_prepare_rq_wqe(void *wqe, u16 pi, dma_addr_t buf_addr, + dma_addr_t cqe_dma); + +static inline void hinic_task_set_outter_l3(struct hinic_sq_task *task, + enum sq_l3_type l3_type, + u32 network_len) +{ + task->pkt_info2 |= SQ_TASK_INFO2_SET(l3_type, OUTER_L3TYPE) | + SQ_TASK_INFO2_SET(network_len, OUTER_L3LEN); +} + +static inline void hinic_task_set_tunnel_l4(struct hinic_sq_task *task, + enum sq_tunnel_l4_type l4_type, + u32 tunnel_len) +{ + task->pkt_info2 |= SQ_TASK_INFO2_SET(l4_type, TUNNEL_L4TYPE) | + SQ_TASK_INFO2_SET(tunnel_len, TUNNEL_L4LEN); +} + +static inline void hinic_task_set_inner_l3(struct hinic_sq_task *task, + enum sq_l3_type l3_type, + u32 network_len) +{ + task->pkt_info0 |= SQ_TASK_INFO0_SET(l3_type, INNER_L3TYPE); + task->pkt_info1 |= SQ_TASK_INFO1_SET(network_len, INNER_L3LEN); +} + +void hinic_set_cs_inner_l4(struct hinic_sq_task *task, u32 *queue_info, + enum sq_l4offload_type l4_offload, + u32 l4_len, u32 offset); + +void hinic_set_tso_inner_l4(struct hinic_sq_task *task, u32 *queue_info, + enum sq_l4offload_type l4_offload, u32 l4_len, + u32 offset, u32 ip_ident, u32 mss); + +void hinic_set_vlan_tx_offload(struct hinic_sq_task *task, u32 *queue_info, + u16 vlan_tag, u16 vlan_pri); + +void hinic_task_set_tx_offload_valid(struct hinic_sq_task *task, u32 l2hdr_len); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c index 4c0f7eda1166c5df202c3b9a71cc2e43516531fb..019cd439ce696ee64535083cdb2f4d66febc2df7 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c @@ -1,5 +1,5 @@ -/* - * Huawei HiNIC PCI Express Linux driver +// SPDX-License-Identifier: GPL-2.0 +/* Huawei HiNIC PCI Express Linux driver * Copyright(c) 2017 Huawei Technologies Co., Ltd * * This program is free software; you can redistribute it and/or modify it @@ -13,57 +13,362 @@ * */ -#include +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt #include #include -#include -#include -#include -#include -#include -#include -#include +#include #include #include -#include -#include -#include - -#include "hinic_common.h" -#include "hinic_hw_if.h" -#include "hinic_hw_wqe.h" -#include "hinic_hw_wq.h" -#include "hinic_hw_qp.h" -#include "hinic_hw_dev.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_hw_mgmt.h" +#include "hinic_nic_io.h" +#include "hinic_nic_cfg.h" +#include "hinic_nic_dev.h" +#include "hinic_qp.h" #include "hinic_rx.h" -#include "hinic_dev.h" -#define RX_IRQ_NO_PENDING 0 -#define RX_IRQ_NO_COALESC 0 -#define RX_IRQ_NO_LLI_TIMER 0 -#define RX_IRQ_NO_CREDIT 0 -#define RX_IRQ_NO_RESEND_TIMER 0 +static void hinic_clear_rss_config_user(struct hinic_nic_dev *nic_dev); + +#define HINIC_RX_HDR_SIZE 256 +#define HINIC_RX_IPV6_PKT 7 +#define HINIC_RX_VXLAN_PKT 0xb -/** - * hinic_rxq_clean_stats - Clean the statistics of specific queue - * @rxq: Logical Rx Queue - **/ -void hinic_rxq_clean_stats(struct hinic_rxq *rxq) +#define RXQ_STATS_INC(rxq, field) \ +{ \ + u64_stats_update_begin(&(rxq)->rxq_stats.syncp); \ + (rxq)->rxq_stats.field++; \ + u64_stats_update_end(&(rxq)->rxq_stats.syncp); \ +} + +static bool rx_alloc_mapped_page(struct hinic_rxq *rxq, + struct hinic_rx_info *rx_info) { - struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats; + struct net_device *netdev = rxq->netdev; + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct pci_dev *pdev = nic_dev->pdev; - u64_stats_update_begin(&rxq_stats->syncp); - rxq_stats->pkts = 0; - rxq_stats->bytes = 0; - u64_stats_update_end(&rxq_stats->syncp); + struct page *page = rx_info->page; + dma_addr_t dma = rx_info->buf_dma_addr; + + if (likely(dma)) + return true; + + /* alloc new page for storage */ + page = dev_alloc_pages(nic_dev->page_order); + if (unlikely(!page)) { + RXQ_STATS_INC(rxq, alloc_rx_buf_err); + return false; + } + + /* map page for use */ + dma = dma_map_page(&pdev->dev, page, 0, rxq->dma_rx_buff_size, + DMA_FROM_DEVICE); + + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (unlikely(dma_mapping_error(&pdev->dev, dma))) { + RXQ_STATS_INC(rxq, map_rx_buf_err); + __free_pages(page, nic_dev->page_order); + return false; + } + + rx_info->page = page; + rx_info->buf_dma_addr = dma; + rx_info->page_offset = 0; + + return true; +} + +static int hinic_rx_fill_wqe(struct hinic_rxq *rxq) +{ + struct net_device *netdev = rxq->netdev; + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic_rq_wqe *rq_wqe; + struct hinic_rx_info *rx_info; + dma_addr_t dma_addr = 0; + u16 pi = 0; + int rq_wqe_len; + int i; + + for (i = 0; i < rxq->q_depth; i++) { + rx_info = &rxq->rx_info[i]; + + rq_wqe = hinic_get_rq_wqe(nic_dev->hwdev, rxq->q_id, &pi); + if (!rq_wqe) { + nicif_err(nic_dev, drv, netdev, "Failed to get rq wqe, rxq id: %d, wqe id: %d\n", + rxq->q_id, i); + break; + } + + hinic_prepare_rq_wqe(rq_wqe, pi, dma_addr, rx_info->cqe_dma); + + rq_wqe_len = sizeof(struct hinic_rq_wqe); + hinic_cpu_to_be32(rq_wqe, rq_wqe_len); + rx_info->rq_wqe = rq_wqe; + } + + hinic_return_rq_wqe(nic_dev->hwdev, rxq->q_id, rxq->q_depth); + + return i; } -/** - * hinic_rxq_get_stats - get statistics of Rx Queue - * @rxq: Logical Rx Queue - * @stats: return updated stats here - **/ -void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats) +static int hinic_rx_fill_buffers(struct hinic_rxq *rxq) +{ + struct net_device *netdev = rxq->netdev; + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic_rq_wqe *rq_wqe; + struct hinic_rx_info *rx_info; + dma_addr_t dma_addr; + int i; + int free_wqebbs = rxq->delta - 1; + + for (i = 0; i < free_wqebbs; i++) { + rx_info = &rxq->rx_info[rxq->next_to_update]; + + if (unlikely(!rx_alloc_mapped_page(rxq, rx_info))) + break; + + dma_addr = rx_info->buf_dma_addr + rx_info->page_offset; + + rq_wqe = rx_info->rq_wqe; + + rq_wqe->buf_desc.addr_high = + cpu_to_be32(upper_32_bits(dma_addr)); + rq_wqe->buf_desc.addr_low = + cpu_to_be32(lower_32_bits(dma_addr)); + rxq->next_to_update = (rxq->next_to_update + 1) & rxq->q_mask; + } + + if (likely(i)) { + /* Write all the wqes before pi update */ + wmb(); + + hinic_update_rq_hw_pi(nic_dev->hwdev, rxq->q_id, + rxq->next_to_update); + rxq->delta -= i; + rxq->next_to_alloc = rxq->next_to_update; + } else if (free_wqebbs == rxq->q_depth - 1) { + RXQ_STATS_INC(rxq, rx_buf_empty); + } + + return i; +} + +void hinic_rx_free_buffers(struct hinic_rxq *rxq) +{ + u16 i; + struct hinic_nic_dev *nic_dev = netdev_priv(rxq->netdev); + struct hinic_rx_info *rx_info; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rxq->q_depth; i++) { + rx_info = &rxq->rx_info[i]; + + if (rx_info->buf_dma_addr) { + dma_unmap_page(rxq->dev, rx_info->buf_dma_addr, + rxq->dma_rx_buff_size, + DMA_FROM_DEVICE); + rx_info->buf_dma_addr = 0; + } + + if (rx_info->page) { + __free_pages(rx_info->page, nic_dev->page_order); + rx_info->page = NULL; + } + } +} + +static void hinic_reuse_rx_page(struct hinic_rxq *rxq, + struct hinic_rx_info *old_rx_info) +{ + struct hinic_rx_info *new_rx_info; + u16 nta = rxq->next_to_alloc; + + new_rx_info = &rxq->rx_info[nta]; + + /* update, and store next to alloc */ + nta++; + rxq->next_to_alloc = (nta < rxq->q_depth) ? nta : 0; + + new_rx_info->page = old_rx_info->page; + new_rx_info->page_offset = old_rx_info->page_offset; + new_rx_info->buf_dma_addr = old_rx_info->buf_dma_addr; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rxq->dev, new_rx_info->buf_dma_addr, + new_rx_info->page_offset, + rxq->buf_len, + DMA_FROM_DEVICE); +} + +static bool hinic_add_rx_frag(struct hinic_rxq *rxq, + struct hinic_rx_info *rx_info, + struct sk_buff *skb, u32 size) +{ + struct page *page; + u8 *va; + + page = rx_info->page; + va = (u8 *)page_address(page) + rx_info->page_offset; + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + + dma_sync_single_range_for_cpu(rxq->dev, + rx_info->buf_dma_addr, + rx_info->page_offset, + rxq->buf_len, + DMA_FROM_DEVICE); + + if (size <= HINIC_RX_HDR_SIZE && !skb_is_nonlinear(skb)) { + memcpy(__skb_put(skb, size), va, + ALIGN(size, sizeof(long))); /*lint !e666*/ + + /* page is not reserved, we can reuse buffer as-is */ + if (likely(page_to_nid(page) == numa_node_id())) + return true; + + /* this page cannot be reused so discard it */ + put_page(page); + return false; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + (int)rx_info->page_offset, (int)size, rxq->buf_len); + + /* avoid re-using remote pages */ + if (unlikely(page_to_nid(page) != numa_node_id())) + return false; + + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; + + /* flip page offset to other buffer */ + rx_info->page_offset ^= rxq->buf_len; + + page_ref_inc(page); + + return true; +} + +static void __packaging_skb(struct hinic_rxq *rxq, struct sk_buff *head_skb, + u8 sge_num, u32 pkt_len) +{ + struct hinic_rx_info *rx_info; + struct sk_buff *skb; + u8 frag_num = 0; + u32 size; + u16 sw_ci; + + sw_ci = ((u32)rxq->cons_idx) & rxq->q_mask; + skb = head_skb; + while (sge_num) { + rx_info = &rxq->rx_info[sw_ci]; + sw_ci = (sw_ci + 1) & rxq->q_mask; + if (unlikely(pkt_len > rxq->buf_len)) { + size = rxq->buf_len; + pkt_len -= rxq->buf_len; + } else { + size = pkt_len; + } + + if (unlikely(frag_num == MAX_SKB_FRAGS)) { + frag_num = 0; + if (skb == head_skb) + skb = skb_shinfo(skb)->frag_list; + else + skb = skb->next; + } + + if (unlikely(skb != head_skb)) { + head_skb->len += size; + head_skb->data_len += size; + head_skb->truesize += rxq->buf_len; + } + + if (likely(hinic_add_rx_frag(rxq, rx_info, skb, size))) { + hinic_reuse_rx_page(rxq, rx_info); + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rxq->dev, rx_info->buf_dma_addr, + rxq->dma_rx_buff_size, DMA_FROM_DEVICE); + } + /* clear contents of buffer_info */ + rx_info->buf_dma_addr = 0; + rx_info->page = NULL; + sge_num--; + frag_num++; + } +} + +static struct sk_buff *hinic_fetch_rx_buffer(struct hinic_rxq *rxq, u32 pkt_len) +{ + struct sk_buff *head_skb, *cur_skb, *skb = NULL; + struct net_device *netdev = rxq->netdev; + u8 sge_num, skb_num; + u16 wqebb_cnt = 0; + + head_skb = netdev_alloc_skb_ip_align(netdev, HINIC_RX_HDR_SIZE); + if (unlikely(!head_skb)) + return NULL; + + sge_num = (u8)(pkt_len >> rxq->rx_buff_shift) + + ((pkt_len & (rxq->buf_len - 1)) ? 1 : 0); + if (likely(sge_num <= MAX_SKB_FRAGS)) + skb_num = 1; + else + skb_num = (sge_num / MAX_SKB_FRAGS) + + ((sge_num % MAX_SKB_FRAGS) ? 1 : 0); + + while (unlikely(skb_num > 1)) { + cur_skb = netdev_alloc_skb_ip_align(netdev, HINIC_RX_HDR_SIZE); + if (unlikely(!cur_skb)) + goto alloc_skb_fail; + + if (!skb) { + skb_shinfo(head_skb)->frag_list = cur_skb; + skb = cur_skb; + } else { + skb->next = cur_skb; + skb = cur_skb; + } + + skb_num--; + } + + prefetchw(head_skb->data); + wqebb_cnt = sge_num; + + __packaging_skb(rxq, head_skb, sge_num, pkt_len); + + rxq->cons_idx += wqebb_cnt; + rxq->delta += wqebb_cnt; + + return head_skb; + +alloc_skb_fail: + dev_kfree_skb_any(head_skb); + return NULL; +} + +void hinic_rxq_get_stats(struct hinic_rxq *rxq, + struct hinic_rxq_stats *stats) { struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats; unsigned int start; @@ -71,440 +376,829 @@ void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats) u64_stats_update_begin(&stats->syncp); do { start = u64_stats_fetch_begin(&rxq_stats->syncp); - stats->pkts = rxq_stats->pkts; stats->bytes = rxq_stats->bytes; + stats->packets = rxq_stats->packets; + stats->errors = rxq_stats->csum_errors + + rxq_stats->other_errors; + stats->csum_errors = rxq_stats->csum_errors; + stats->other_errors = rxq_stats->other_errors; + stats->dropped = rxq_stats->dropped; + stats->xdp_dropped = rxq_stats->xdp_dropped; + stats->rx_buf_empty = rxq_stats->rx_buf_empty; } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); u64_stats_update_end(&stats->syncp); } -/** - * rxq_stats_init - Initialize the statistics of specific queue - * @rxq: Logical Rx Queue - **/ +void hinic_rxq_clean_stats(struct hinic_rxq_stats *rxq_stats) +{ + u64_stats_update_begin(&rxq_stats->syncp); + rxq_stats->bytes = 0; + rxq_stats->packets = 0; + rxq_stats->errors = 0; + rxq_stats->csum_errors = 0; + rxq_stats->other_errors = 0; + rxq_stats->dropped = 0; + rxq_stats->xdp_dropped = 0; + + rxq_stats->alloc_skb_err = 0; + rxq_stats->alloc_rx_buf_err = 0; + rxq_stats->map_rx_buf_err = 0; + rxq_stats->rx_buf_empty = 0; + rxq_stats->xdp_large_pkt = 0; + u64_stats_update_end(&rxq_stats->syncp); +} + static void rxq_stats_init(struct hinic_rxq *rxq) { struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats; u64_stats_init(&rxq_stats->syncp); - hinic_rxq_clean_stats(rxq); + hinic_rxq_clean_stats(rxq_stats); } -/** - * rx_alloc_skb - allocate skb and map it to dma address - * @rxq: rx queue - * @dma_addr: returned dma address for the skb - * - * Return skb - **/ -static struct sk_buff *rx_alloc_skb(struct hinic_rxq *rxq, - dma_addr_t *dma_addr) -{ - struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - struct sk_buff *skb; - dma_addr_t addr; - int err; +static void hinic_pull_tail(struct sk_buff *skb) +{ + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + + /* it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, HINIC_RX_HDR_SIZE); + + /* update all of the pointers */ + skb_frag_size_sub(frag, HINIC_RX_HDR_SIZE); + frag->page_offset += HINIC_RX_HDR_SIZE; + skb->data_len -= HINIC_RX_HDR_SIZE; + skb->tail += HINIC_RX_HDR_SIZE; +} - skb = netdev_alloc_skb_ip_align(rxq->netdev, rxq->rq->buf_sz); - if (!skb) { - netdev_err(rxq->netdev, "Failed to allocate Rx SKB\n"); - return NULL; +static void hinic_rx_csum(struct hinic_rxq *rxq, u32 status, + struct sk_buff *skb) +{ + struct net_device *netdev = rxq->netdev; + u32 csum_err; + + csum_err = HINIC_GET_RX_CSUM_ERR(status); + + if (unlikely(csum_err == HINIC_RX_CSUM_IPSU_OTHER_ERR)) + rxq->rxq_stats.other_errors++; + + if (!(netdev->features & NETIF_F_RXCSUM)) + return; + + if (!csum_err) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else { + /* pkt type is recognized by HW, and csum is err */ + if (!(csum_err & (HINIC_RX_CSUM_HW_CHECK_NONE | + HINIC_RX_CSUM_IPSU_OTHER_ERR))) + rxq->rxq_stats.csum_errors++; + + skb->ip_summed = CHECKSUM_NONE; } +} - addr = dma_map_single(&pdev->dev, skb->data, rxq->rq->buf_sz, - DMA_FROM_DEVICE); - err = dma_mapping_error(&pdev->dev, addr); - if (err) { - dev_err(&pdev->dev, "Failed to map Rx DMA, err = %d\n", err); - goto err_rx_map; +static void hinic_rx_gro(struct hinic_rxq *rxq, u32 offload_type, + struct sk_buff *skb) +{ + struct net_device *netdev = rxq->netdev; + bool l2_tunnel; + + if (!(netdev->features & NETIF_F_GRO)) + return; + + l2_tunnel = HINIC_GET_RX_PKT_TYPE(offload_type) == HINIC_RX_VXLAN_PKT ? + 1 : 0; + + if (l2_tunnel && skb->ip_summed == CHECKSUM_UNNECESSARY) + /* If we checked the outer header let the stack know */ + skb->csum_level = 1; +} + +static void hinic_copy_lp_data(struct hinic_nic_dev *nic_dev, + struct sk_buff *skb) +{ + struct net_device *netdev = nic_dev->netdev; + u8 *lb_buf = nic_dev->lb_test_rx_buf; + void *frag_data; + int lb_len = nic_dev->lb_pkt_len; + int pkt_offset, frag_len, i; + + if (nic_dev->lb_test_rx_idx == LP_PKT_CNT) { + nic_dev->lb_test_rx_idx = 0; + nicif_warn(nic_dev, rx_err, netdev, "Loopback test warning, recive too more test pkt\n"); } - *dma_addr = addr; - return skb; + if (skb->len != nic_dev->lb_pkt_len) { + nicif_warn(nic_dev, rx_err, netdev, "Wrong packet length\n"); + nic_dev->lb_test_rx_idx++; + return; + } -err_rx_map: - dev_kfree_skb_any(skb); - return NULL; + pkt_offset = nic_dev->lb_test_rx_idx * lb_len; + frag_len = (int)skb_headlen(skb); + memcpy((lb_buf + pkt_offset), skb->data, frag_len); + pkt_offset += frag_len; + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + frag_data = skb_frag_address(&skb_shinfo(skb)->frags[i]); + frag_len = (int)skb_frag_size(&skb_shinfo(skb)->frags[i]); + memcpy((lb_buf + pkt_offset), frag_data, frag_len); + pkt_offset += frag_len; + } + nic_dev->lb_test_rx_idx++; } -/** - * rx_unmap_skb - unmap the dma address of the skb - * @rxq: rx queue - * @dma_addr: dma address of the skb - **/ -static void rx_unmap_skb(struct hinic_rxq *rxq, dma_addr_t dma_addr) +enum hinic_xdp_pkt { + HINIC_XDP_PKT_PASS, + HINIC_XDP_PKT_DROP, +}; + +static inline void update_drop_rx_info(struct hinic_rxq *rxq, u16 weqbb_num) { - struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; + struct hinic_rx_info *rx_info = NULL; + + while (weqbb_num) { + rx_info = &rxq->rx_info[rxq->cons_idx & rxq->q_mask]; + if (likely(page_to_nid(rx_info->page) == numa_node_id())) + hinic_reuse_rx_page(rxq, rx_info); - dma_unmap_single(&pdev->dev, dma_addr, rxq->rq->buf_sz, - DMA_FROM_DEVICE); + rx_info->buf_dma_addr = 0; + rx_info->page = NULL; + rxq->cons_idx++; + rxq->delta++; + + weqbb_num--; + } } -/** - * rx_free_skb - unmap and free skb - * @rxq: rx queue - * @skb: skb to free - * @dma_addr: dma address of the skb - **/ -static void rx_free_skb(struct hinic_rxq *rxq, struct sk_buff *skb, - dma_addr_t dma_addr) +int hinic_run_xdp(struct hinic_rxq *rxq, u32 pkt_len) { - rx_unmap_skb(rxq, dma_addr); - dev_kfree_skb_any(skb); + struct bpf_prog *xdp_prog = NULL; + struct hinic_rx_info *rx_info = NULL; + struct xdp_buff xdp; + int result = HINIC_XDP_PKT_PASS; + u16 weqbb_num = 1; /* xdp can only use one rx_buff */ + u8 *va = NULL; + u32 act; + + rcu_read_lock(); + xdp_prog = READ_ONCE(rxq->xdp_prog); + if (!xdp_prog) + goto unlock_rcu; + + if (unlikely(pkt_len > rxq->buf_len)) { + RXQ_STATS_INC(rxq, xdp_large_pkt); + weqbb_num = (u16)(pkt_len >> rxq->rx_buff_shift) + + ((pkt_len & (rxq->buf_len - 1)) ? 1 : 0); + result = HINIC_XDP_PKT_DROP; + goto xdp_out; + } + + rx_info = &rxq->rx_info[rxq->cons_idx & rxq->q_mask]; + va = (u8 *)page_address(rx_info->page) + rx_info->page_offset; + prefetch(va); + dma_sync_single_range_for_cpu(rxq->dev, rx_info->buf_dma_addr, + rx_info->page_offset, rxq->buf_len, + DMA_FROM_DEVICE); + xdp.data = va; + xdp.data_hard_start = xdp.data; + xdp.data_end = xdp.data + pkt_len; + xdp_set_data_meta_invalid(&xdp); + prefetchw(xdp.data_hard_start); + act = bpf_prog_run_xdp(xdp_prog, &xdp); + switch (act) { + case XDP_PASS: + break; + default: + bpf_warn_invalid_xdp_action(act); + /* fallthrough */ + case XDP_DROP: + result = HINIC_XDP_PKT_DROP; + break; + } + +xdp_out: + if (result == HINIC_XDP_PKT_DROP) { + RXQ_STATS_INC(rxq, xdp_dropped); + update_drop_rx_info(rxq, weqbb_num); + } + +unlock_rcu: + rcu_read_unlock(); + + return result; } -/** - * rx_alloc_pkts - allocate pkts in rx queue - * @rxq: rx queue - * - * Return number of skbs allocated - **/ -static int rx_alloc_pkts(struct hinic_rxq *rxq) +int recv_one_pkt(struct hinic_rxq *rxq, struct hinic_rq_cqe *rx_cqe, + u32 pkt_len, u32 vlan_len, u32 status) { - struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); - struct hinic_rq_wqe *rq_wqe; - unsigned int free_wqebbs; - struct hinic_sge sge; - dma_addr_t dma_addr; struct sk_buff *skb; - u16 prod_idx; - int i; + struct net_device *netdev = rxq->netdev; + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u32 offload_type; + u32 xdp_status; - free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq); + xdp_status = hinic_run_xdp(rxq, pkt_len); + if (xdp_status == HINIC_XDP_PKT_DROP) + return 0; - /* Limit the allocation chunks */ - if (free_wqebbs > nic_dev->rx_weight) - free_wqebbs = nic_dev->rx_weight; + skb = hinic_fetch_rx_buffer(rxq, pkt_len); + if (unlikely(!skb)) { + RXQ_STATS_INC(rxq, alloc_skb_err); + return -ENOMEM; + } - for (i = 0; i < free_wqebbs; i++) { - skb = rx_alloc_skb(rxq, &dma_addr); - if (!skb) { - netdev_err(rxq->netdev, "Failed to alloc Rx skb\n"); - goto skb_out; - } + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb)) + hinic_pull_tail(skb); - hinic_set_sge(&sge, dma_addr, skb->len); + hinic_rx_csum(rxq, status, skb); - rq_wqe = hinic_rq_get_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, - &prod_idx); - if (!rq_wqe) { - rx_free_skb(rxq, skb, dma_addr); - goto skb_out; - } + offload_type = be32_to_cpu(rx_cqe->offload_type); + hinic_rx_gro(rxq, offload_type, skb); - hinic_rq_prepare_wqe(rxq->rq, prod_idx, rq_wqe, &sge); + if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && + HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type)) { + u16 vid = HINIC_GET_RX_VLAN_TAG(vlan_len); - hinic_rq_write_wqe(rxq->rq, prod_idx, rq_wqe, skb); + /* if the packet is a vlan pkt, the vid may be 0 */ + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); } -skb_out: - if (i) { - wmb(); /* write all the wqes before update PI */ + if (unlikely(test_bit(HINIC_LP_TEST, &nic_dev->flags))) + hinic_copy_lp_data(nic_dev, skb); + + skb_record_rx_queue(skb, rxq->q_id); + skb->protocol = eth_type_trans(skb, netdev); - hinic_rq_update(rxq->rq, prod_idx); + if (skb_has_frag_list(skb)) { + napi_gro_flush(&rxq->irq_cfg->napi, false); + netif_receive_skb(skb); + } else { + napi_gro_receive(&rxq->irq_cfg->napi, skb); } - tasklet_schedule(&rxq->rx_task); - return i; + return 0; } -/** - * free_all_rx_skbs - free all skbs in rx queue - * @rxq: rx queue - **/ -static void free_all_rx_skbs(struct hinic_rxq *rxq) +void rx_pass_super_cqe(struct hinic_rxq *rxq, u32 index, u32 pkt_num, + struct hinic_rq_cqe *cqe) { - struct hinic_rq *rq = rxq->rq; - struct hinic_hw_wqe *hw_wqe; - struct hinic_sge sge; - u16 ci; - - while ((hw_wqe = hinic_read_wqe(rq->wq, HINIC_RQ_WQE_SIZE, &ci))) { - if (IS_ERR(hw_wqe)) - break; + u8 sge_num = 0; + u32 pkt_len; + + while (index < pkt_num) { + pkt_len = hinic_get_pkt_len_for_super_cqe + (cqe, index == (pkt_num - 1)); + sge_num += (u8)(pkt_len >> rxq->rx_buff_shift) + + ((pkt_len & (rxq->buf_len - 1)) ? 1 : 0); + index++; + } - hinic_rq_get_sge(rq, &hw_wqe->rq_wqe, ci, &sge); + rxq->cons_idx += sge_num; + rxq->delta += sge_num; +} - hinic_put_wqe(rq->wq, HINIC_RQ_WQE_SIZE); +static inline int __recv_supper_cqe(struct hinic_rxq *rxq, + struct hinic_rq_cqe *rx_cqe, u32 pkt_info, + u32 vlan_len, u32 status, int *pkts, + u64 *rx_bytes, u32 *dropped) +{ + u32 pkt_len; + int i, pkt_num = 0; + + pkt_num = HINIC_GET_RQ_CQE_PKT_NUM(pkt_info); + i = 0; + while (i < pkt_num) { + pkt_len = ((i == (pkt_num - 1)) ? + RQ_CQE_PKT_LEN_GET(pkt_info, LAST_LEN) : + RQ_CQE_PKT_LEN_GET(pkt_info, FIRST_LEN)); + if (unlikely(recv_one_pkt(rxq, rx_cqe, pkt_len, + vlan_len, status))) { + if (i) { + rx_pass_super_cqe(rxq, i, + pkt_num, + rx_cqe); + *dropped += (pkt_num - i); + } + break; + } - rx_free_skb(rxq, rq->saved_skb[ci], hinic_sge_to_dma(&sge)); + *rx_bytes += pkt_len; + (*pkts)++; + i++; } -} -/** - * rx_alloc_task - tasklet for queue allocation - * @data: rx queue - **/ -static void rx_alloc_task(unsigned long data) -{ - struct hinic_rxq *rxq = (struct hinic_rxq *)data; + if (!i) + return -EFAULT; - (void)rx_alloc_pkts(rxq); + return 0; } -/** - * rx_recv_jumbo_pkt - Rx handler for jumbo pkt - * @rxq: rx queue - * @head_skb: the first skb in the list - * @left_pkt_len: left size of the pkt exclude head skb - * @ci: consumer index - * - * Return number of wqes that used for the left of the pkt - **/ -static int rx_recv_jumbo_pkt(struct hinic_rxq *rxq, struct sk_buff *head_skb, - unsigned int left_pkt_len, u16 ci) +#define LRO_PKT_HDR_LEN_IPV4 66 +#define LRO_PKT_HDR_LEN_IPV6 86 +#define LRO_PKT_HDR_LEN(cqe) \ + (HINIC_GET_RX_PKT_TYPE(be32_to_cpu((cqe)->offload_type)) == \ + HINIC_RX_IPV6_PKT ? LRO_PKT_HDR_LEN_IPV6 : LRO_PKT_HDR_LEN_IPV4) + +int hinic_rx_poll(struct hinic_rxq *rxq, int budget) { - struct sk_buff *skb, *curr_skb = head_skb; - struct hinic_rq_wqe *rq_wqe; - unsigned int curr_len; - struct hinic_sge sge; - int num_wqes = 0; + struct hinic_nic_dev *nic_dev = netdev_priv(rxq->netdev); + u32 status, pkt_len, vlan_len, pkt_info, dropped = 0; + struct hinic_rq_cqe *rx_cqe; + u64 rx_bytes = 0; + u16 sw_ci, num_lro; + int pkts = 0, nr_pkts = 0; + u16 num_wqe = 0; + + while (likely(pkts < budget)) { + sw_ci = ((u32)rxq->cons_idx) & rxq->q_mask; + rx_cqe = rxq->rx_info[sw_ci].cqe; + status = be32_to_cpu(rx_cqe->status); + + if (!HINIC_GET_RX_DONE(status)) + break; - while (left_pkt_len > 0) { - rq_wqe = hinic_rq_read_next_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, - &skb, &ci); + /* make sure we read rx_done before packet length */ + rmb(); - num_wqes++; + vlan_len = be32_to_cpu(rx_cqe->vlan_len); + pkt_info = be32_to_cpu(rx_cqe->pkt_info); + pkt_len = HINIC_GET_RX_PKT_LEN(vlan_len); - hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge); + if (unlikely(HINIC_GET_SUPER_CQE_EN(pkt_info))) { + if (unlikely(__recv_supper_cqe(rxq, rx_cqe, pkt_info, + vlan_len, status, &pkts, + &rx_bytes, &dropped))) + break; + nr_pkts += (int)HINIC_GET_RQ_CQE_PKT_NUM(pkt_info); + } else { + if (recv_one_pkt(rxq, rx_cqe, pkt_len, + vlan_len, status)) + break; + rx_bytes += pkt_len; + pkts++; + nr_pkts++; + + num_lro = HINIC_GET_RX_NUM_LRO(status); + if (num_lro) { + rx_bytes += ((num_lro - 1) * + LRO_PKT_HDR_LEN(rx_cqe)); + + num_wqe += + (u16)(pkt_len >> rxq->rx_buff_shift) + + ((pkt_len & (rxq->buf_len - 1)) ? 1 : 0); + } + } - rx_unmap_skb(rxq, hinic_sge_to_dma(&sge)); + rx_cqe->status = 0; - prefetch(skb->data); + if (num_wqe >= nic_dev->lro_replenish_thld) + break; + } - curr_len = (left_pkt_len > HINIC_RX_BUF_SZ) ? HINIC_RX_BUF_SZ : - left_pkt_len; + if (rxq->delta >= HINIC_RX_BUFFER_WRITE) + hinic_rx_fill_buffers(rxq); - left_pkt_len -= curr_len; + u64_stats_update_begin(&rxq->rxq_stats.syncp); + rxq->rxq_stats.packets += nr_pkts; + rxq->rxq_stats.bytes += rx_bytes; + rxq->rxq_stats.dropped += dropped; + u64_stats_update_end(&rxq->rxq_stats.syncp); + return pkts; +} - __skb_put(skb, curr_len); +static int rx_alloc_cqe(struct hinic_rxq *rxq) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(rxq->netdev); + struct pci_dev *pdev = nic_dev->pdev; + struct hinic_rx_info *rx_info; + struct hinic_rq_cqe *cqe_va; + dma_addr_t cqe_pa; + u32 cqe_mem_size; + int idx; + + cqe_mem_size = sizeof(*rx_info->cqe) * rxq->q_depth; + rxq->cqe_start_vaddr = dma_zalloc_coherent(&pdev->dev, cqe_mem_size, + &rxq->cqe_start_paddr, + GFP_KERNEL); + if (!rxq->cqe_start_vaddr) { + nicif_err(nic_dev, drv, rxq->netdev, "Failed to allocate cqe dma\n"); + return -ENOMEM; + } - if (curr_skb == head_skb) - skb_shinfo(head_skb)->frag_list = skb; - else - curr_skb->next = skb; + cqe_va = (struct hinic_rq_cqe *)rxq->cqe_start_vaddr; + cqe_pa = rxq->cqe_start_paddr; - head_skb->len += skb->len; - head_skb->data_len += skb->len; - head_skb->truesize += skb->truesize; + for (idx = 0; idx < rxq->q_depth; idx++) { + rx_info = &rxq->rx_info[idx]; + rx_info->cqe = cqe_va; + rx_info->cqe_dma = cqe_pa; - curr_skb = skb; + cqe_va++; + cqe_pa += sizeof(*rx_info->cqe); } - return num_wqes; + hinic_rq_cqe_addr_set(nic_dev->hwdev, rxq->q_id, rxq->cqe_start_paddr); + return 0; } -/** - * rxq_recv - Rx handler - * @rxq: rx queue - * @budget: maximum pkts to process - * - * Return number of pkts received - **/ -static int rxq_recv(struct hinic_rxq *rxq, int budget) +static void rx_free_cqe(struct hinic_rxq *rxq) { - struct hinic_qp *qp = container_of(rxq->rq, struct hinic_qp, rq); - u64 pkt_len = 0, rx_bytes = 0; - struct hinic_rq_wqe *rq_wqe; - int num_wqes, pkts = 0; - struct hinic_sge sge; - struct sk_buff *skb; - u16 ci; + struct hinic_nic_dev *nic_dev = netdev_priv(rxq->netdev); + struct pci_dev *pdev = nic_dev->pdev; + u32 cqe_mem_size; - while (pkts < budget) { - num_wqes = 0; + cqe_mem_size = sizeof(struct hinic_rq_cqe) * rxq->q_depth; - rq_wqe = hinic_rq_read_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, &skb, - &ci); - if (!rq_wqe) - break; + dma_free_coherent(&pdev->dev, cqe_mem_size, + rxq->cqe_start_vaddr, rxq->cqe_start_paddr); +} - hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge); +static int hinic_setup_rx_resources(struct hinic_rxq *rxq, + struct net_device *netdev, + struct irq_info *entry) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(rxq->netdev); + u64 rx_info_sz; + int err, pkts; + + rxq->irq_id = entry->irq_id; + rxq->msix_entry_idx = entry->msix_entry_idx; + rxq->next_to_alloc = 0; + rxq->next_to_update = 0; + rxq->delta = rxq->q_depth; + rxq->q_mask = rxq->q_depth - 1; + rxq->cons_idx = 0; + + rx_info_sz = rxq->q_depth * sizeof(*rxq->rx_info); + if (!rx_info_sz) { + nicif_err(nic_dev, drv, netdev, "Cannot allocate zero size rx info\n"); + return -EINVAL; + } - rx_unmap_skb(rxq, hinic_sge_to_dma(&sge)); + rxq->rx_info = kzalloc(rx_info_sz, GFP_KERNEL); + if (!rxq->rx_info) + return -ENOMEM; - prefetch(skb->data); + err = rx_alloc_cqe(rxq); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to allocate Rx cqe\n"); + goto rx_cqe_err; + } - pkt_len = sge.len; + pkts = hinic_rx_fill_wqe(rxq); + if (pkts != rxq->q_depth) { + nicif_err(nic_dev, drv, netdev, "Failed to fill rx wqe\n"); + err = -ENOMEM; + goto rx_pkts_err; + } + pkts = hinic_rx_fill_buffers(rxq); + if (!pkts) { + nicif_err(nic_dev, drv, netdev, "Failed to allocate Rx buffer\n"); + err = -ENOMEM; + goto rx_pkts_err; + } - if (pkt_len <= HINIC_RX_BUF_SZ) { - __skb_put(skb, pkt_len); - } else { - __skb_put(skb, HINIC_RX_BUF_SZ); - num_wqes = rx_recv_jumbo_pkt(rxq, skb, pkt_len - - HINIC_RX_BUF_SZ, ci); - } + return 0; - hinic_rq_put_wqe(rxq->rq, ci, - (num_wqes + 1) * HINIC_RQ_WQE_SIZE); +rx_pkts_err: + rx_free_cqe(rxq); - skb_record_rx_queue(skb, qp->q_id); - skb->protocol = eth_type_trans(skb, rxq->netdev); +rx_cqe_err: + kfree(rxq->rx_info); - napi_gro_receive(&rxq->napi, skb); + return err; +} - pkts++; - rx_bytes += pkt_len; +static void hinic_free_rx_resources(struct hinic_rxq *rxq) +{ + hinic_rx_free_buffers(rxq); + rx_free_cqe(rxq); + kfree(rxq->rx_info); +} + +int hinic_setup_all_rx_resources(struct net_device *netdev, + struct irq_info *msix_entires) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u16 i, q_id; + int err; + + for (q_id = 0; q_id < nic_dev->num_qps; q_id++) { + err = hinic_setup_rx_resources(&nic_dev->rxqs[q_id], + nic_dev->netdev, + &msix_entires[q_id]); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to set up rxq resource\n"); + goto init_rxq_err; + } } - if (pkts) - tasklet_schedule(&rxq->rx_task); /* rx_alloc_pkts */ + return 0; - u64_stats_update_begin(&rxq->rxq_stats.syncp); - rxq->rxq_stats.pkts += pkts; - rxq->rxq_stats.bytes += rx_bytes; - u64_stats_update_end(&rxq->rxq_stats.syncp); +init_rxq_err: + for (i = 0; i < q_id; i++) + hinic_free_rx_resources(&nic_dev->rxqs[i]); - return pkts; + return err; +} + +void hinic_free_all_rx_resources(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u16 q_id; + + for (q_id = 0; q_id < nic_dev->num_qps; q_id++) + hinic_free_rx_resources(&nic_dev->rxqs[q_id]); } -static int rx_poll(struct napi_struct *napi, int budget) +int hinic_alloc_rxqs(struct net_device *netdev) { - struct hinic_rxq *rxq = container_of(napi, struct hinic_rxq, napi); - struct hinic_rq *rq = rxq->rq; - int pkts; + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct pci_dev *pdev = nic_dev->pdev; + struct hinic_rxq *rxq; + u16 num_rxqs = nic_dev->max_qps; + u16 q_id; + u64 rxq_size; + + rxq_size = num_rxqs * sizeof(*nic_dev->rxqs); + if (!rxq_size) { + nic_err(&pdev->dev, "Cannot allocate zero size rxqs\n"); + return -EINVAL; + } + + nic_dev->rxqs = kzalloc(rxq_size, GFP_KERNEL); + if (!nic_dev->rxqs) { + nic_err(&pdev->dev, "Failed to allocate rxqs\n"); + return -ENOMEM; + } - pkts = rxq_recv(rxq, budget); - if (pkts >= budget) - return budget; + for (q_id = 0; q_id < num_rxqs; q_id++) { + rxq = &nic_dev->rxqs[q_id]; + rxq->netdev = netdev; + rxq->dev = &pdev->dev; + rxq->q_id = q_id; + rxq->buf_len = nic_dev->rx_buff_len; + rxq->rx_buff_shift = ilog2(nic_dev->rx_buff_len); + rxq->dma_rx_buff_size = RX_BUFF_NUM_PER_PAGE * + nic_dev->rx_buff_len; + rxq->q_depth = nic_dev->rq_depth; + rxq->q_mask = nic_dev->rq_depth - 1; + + rxq_stats_init(rxq); + } - napi_complete(napi); - enable_irq(rq->irq); - return pkts; + return 0; } -static void rx_add_napi(struct hinic_rxq *rxq) +void hinic_free_rxqs(struct net_device *netdev) { - struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); - netif_napi_add(rxq->netdev, &rxq->napi, rx_poll, nic_dev->rx_weight); - napi_enable(&rxq->napi); + hinic_clear_rss_config_user(nic_dev); + kfree(nic_dev->rxqs); } -static void rx_del_napi(struct hinic_rxq *rxq) +void hinic_init_rss_parameters(struct net_device *netdev) { - napi_disable(&rxq->napi); - netif_napi_del(&rxq->napi); + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + nic_dev->rss_hash_engine = HINIC_RSS_HASH_ENGINE_TYPE_XOR; + + nic_dev->rss_type.tcp_ipv6_ext = 1; + nic_dev->rss_type.ipv6_ext = 1; + nic_dev->rss_type.tcp_ipv6 = 1; + nic_dev->rss_type.ipv6 = 1; + nic_dev->rss_type.tcp_ipv4 = 1; + nic_dev->rss_type.ipv4 = 1; + nic_dev->rss_type.udp_ipv6 = 1; + nic_dev->rss_type.udp_ipv4 = 1; } -static irqreturn_t rx_irq(int irq, void *data) +void hinic_set_default_rss_indir(struct net_device *netdev) { - struct hinic_rxq *rxq = (struct hinic_rxq *)data; - struct hinic_rq *rq = rxq->rq; - struct hinic_dev *nic_dev; + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); - /* Disable the interrupt until napi will be completed */ - disable_irq_nosync(rq->irq); + if (!nic_dev->rss_indir_user) + return; - nic_dev = netdev_priv(rxq->netdev); - hinic_hwdev_msix_cnt_set(nic_dev->hwdev, rq->msix_entry); + nicif_info(nic_dev, drv, netdev, + "Discard user configured Rx flow hash indirection\n"); - napi_schedule(&rxq->napi); - return IRQ_HANDLED; + kfree(nic_dev->rss_indir_user); + nic_dev->rss_indir_user = NULL; } -static int rx_request_irq(struct hinic_rxq *rxq) +static void hinic_maybe_reconfig_rss_indir(struct net_device *netdev) { - struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_rq *rq = rxq->rq; - struct hinic_qp *qp; - struct cpumask mask; - int err; + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + int i; - rx_add_napi(rxq); + if (!nic_dev->rss_indir_user) + return; - hinic_hwdev_msix_set(hwdev, rq->msix_entry, - RX_IRQ_NO_PENDING, RX_IRQ_NO_COALESC, - RX_IRQ_NO_LLI_TIMER, RX_IRQ_NO_CREDIT, - RX_IRQ_NO_RESEND_TIMER); + if (test_bit(HINIC_DCB_ENABLE, &nic_dev->flags)) + goto discard_user_rss_indir; - err = request_irq(rq->irq, rx_irq, 0, rxq->irq_name, rxq); - if (err) { - rx_del_napi(rxq); - return err; + for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++) { + if (nic_dev->rss_indir_user[i] >= nic_dev->num_qps) + goto discard_user_rss_indir; } - qp = container_of(rq, struct hinic_qp, rq); - cpumask_set_cpu(qp->q_id % num_online_cpus(), &mask); - return irq_set_affinity_hint(rq->irq, &mask); + return; + +discard_user_rss_indir: + hinic_set_default_rss_indir(netdev); } -static void rx_free_irq(struct hinic_rxq *rxq) +static void hinic_clear_rss_config_user(struct hinic_nic_dev *nic_dev) { - struct hinic_rq *rq = rxq->rq; + kfree(nic_dev->rss_hkey_user); - irq_set_affinity_hint(rq->irq, NULL); - free_irq(rq->irq, rxq); - rx_del_napi(rxq); + nic_dev->rss_hkey_user_be = NULL; + nic_dev->rss_hkey_user = NULL; + + kfree(nic_dev->rss_indir_user); + nic_dev->rss_indir_user = NULL; } -/** - * hinic_init_rxq - Initialize the Rx Queue - * @rxq: Logical Rx Queue - * @rq: Hardware Rx Queue to connect the Logical queue with - * @netdev: network device to connect the Logical queue with - * - * Return 0 - Success, negative - Failure - **/ -int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq, - struct net_device *netdev) +static void hinic_fillout_indir_tbl(struct hinic_nic_dev *nic_dev, + u8 num_tcs, u32 *indir) { - struct hinic_qp *qp = container_of(rq, struct hinic_qp, rq); - int err, pkts, irqname_len; + u16 num_rss, tc_group_size; + int i; - rxq->netdev = netdev; - rxq->rq = rq; + if (num_tcs) + tc_group_size = HINIC_RSS_INDIR_SIZE / num_tcs; + else + tc_group_size = HINIC_RSS_INDIR_SIZE; + + num_rss = nic_dev->num_rss; + for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++) + indir[i] = (i / tc_group_size) * num_rss + i % num_rss; +} - rxq_stats_init(rxq); +static void hinic_rss_deinit(struct hinic_nic_dev *nic_dev) +{ + u8 prio_tc[HINIC_DCB_UP_MAX] = {0}; + + hinic_rss_cfg(nic_dev->hwdev, 0, nic_dev->rss_tmpl_idx, 0, prio_tc); +} + +int hinic_set_hw_rss_parameters(struct net_device *netdev, u8 rss_en, u8 num_tc, + u8 *prio_tc) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u8 tmpl_idx = 0xFF; + u8 default_rss_key[HINIC_RSS_KEY_SIZE] = { + 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, + 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, + 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, + 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, + 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa}; + u32 *indir_tbl; + u8 *hkey; + int err; + + tmpl_idx = nic_dev->rss_tmpl_idx; + + /* RSS key */ + if (nic_dev->rss_hkey_user) + hkey = nic_dev->rss_hkey_user; + else + hkey = default_rss_key; + err = hinic_rss_set_template_tbl(nic_dev->hwdev, tmpl_idx, hkey); + if (err) + return err; - irqname_len = snprintf(NULL, 0, "hinic_rxq%d", qp->q_id) + 1; - rxq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL); - if (!rxq->irq_name) + hinic_maybe_reconfig_rss_indir(netdev); + indir_tbl = kzalloc(sizeof(u32) * HINIC_RSS_INDIR_SIZE, GFP_KERNEL); + if (!indir_tbl) { + nicif_err(nic_dev, drv, netdev, "Failed to allocate set hw rss indir_tbl\n"); return -ENOMEM; + } - sprintf(rxq->irq_name, "hinic_rxq%d", qp->q_id); + if (nic_dev->rss_indir_user) + memcpy(indir_tbl, nic_dev->rss_indir_user, + sizeof(u32) * HINIC_RSS_INDIR_SIZE); + else + hinic_fillout_indir_tbl(nic_dev, num_tc, indir_tbl); - tasklet_init(&rxq->rx_task, rx_alloc_task, (unsigned long)rxq); + err = hinic_rss_set_indir_tbl(nic_dev->hwdev, tmpl_idx, indir_tbl); + if (err) + goto out; - pkts = rx_alloc_pkts(rxq); - if (!pkts) { - err = -ENOMEM; - goto err_rx_pkts; + err = hinic_set_rss_type(nic_dev->hwdev, tmpl_idx, nic_dev->rss_type); + if (err) + goto out; + + err = hinic_rss_set_hash_engine(nic_dev->hwdev, tmpl_idx, + nic_dev->rss_hash_engine); + if (err) + goto out; + + err = hinic_rss_cfg(nic_dev->hwdev, rss_en, tmpl_idx, num_tc, prio_tc); + if (err) + goto out; + + kfree(indir_tbl); + return 0; + +out: + kfree(indir_tbl); + return err; +} + +static int hinic_rss_init(struct hinic_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + u32 *indir_tbl; + u8 cos, num_tc = 0; + u8 prio_tc[HINIC_DCB_UP_MAX] = {0}; + int err; + + if (test_bit(HINIC_DCB_ENABLE, &nic_dev->flags)) { + num_tc = nic_dev->max_cos; + for (cos = 0; cos < HINIC_DCB_COS_MAX; cos++) { + if (cos < HINIC_DCB_COS_MAX - nic_dev->max_cos) + prio_tc[cos] = nic_dev->max_cos - 1; + else + prio_tc[cos] = (HINIC_DCB_COS_MAX - 1) - cos; + } + } else { + num_tc = 0; + } + + indir_tbl = kzalloc(sizeof(u32) * HINIC_RSS_INDIR_SIZE, GFP_KERNEL); + if (!indir_tbl) { + nicif_err(nic_dev, drv, netdev, "Failed to allocate rss init indir_tbl\n"); + return -ENOMEM; + } + + if (nic_dev->rss_indir_user) + memcpy(indir_tbl, nic_dev->rss_indir_user, + sizeof(u32) * HINIC_RSS_INDIR_SIZE); + else + hinic_fillout_indir_tbl(nic_dev, num_tc, indir_tbl); + err = hinic_set_hw_rss_parameters(netdev, 1, num_tc, prio_tc); + if (err) { + kfree(indir_tbl); + return err; } - err = rx_request_irq(rxq); + kfree(indir_tbl); + return 0; +} + +int hinic_update_hw_tc_map(struct net_device *netdev, u8 num_tc, u8 *prio_tc) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u8 tmpl_idx = nic_dev->rss_tmpl_idx; + + /* RSS must be enable when dcb is enabled */ + return hinic_rss_cfg(nic_dev->hwdev, 1, tmpl_idx, num_tc, prio_tc); +} + +int hinic_rx_configure(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + int err; + + if (test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) { + err = hinic_rss_init(nic_dev); + if (err) { + nicif_err(nic_dev, drv, netdev, "Failed to init rss\n"); + return -EFAULT; + } + } + + err = hinic_dcb_set_rq_iq_mapping(nic_dev->hwdev, + hinic_func_max_qnum(nic_dev->hwdev), + NULL); if (err) { - netdev_err(netdev, "Failed to request Rx irq\n"); - goto err_req_rx_irq; + nicif_err(nic_dev, drv, netdev, "Failed to set rq_iq mapping\n"); + goto set_rq_cos_mapping_err; } return 0; -err_req_rx_irq: -err_rx_pkts: - tasklet_kill(&rxq->rx_task); - free_all_rx_skbs(rxq); - devm_kfree(&netdev->dev, rxq->irq_name); +set_rq_cos_mapping_err: + if (test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) + hinic_rss_deinit(nic_dev); + return err; } -/** - * hinic_clean_rxq - Clean the Rx Queue - * @rxq: Logical Rx Queue - **/ -void hinic_clean_rxq(struct hinic_rxq *rxq) +void hinic_rx_remove_configure(struct net_device *netdev) { - struct net_device *netdev = rxq->netdev; - - rx_free_irq(rxq); + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); - tasklet_kill(&rxq->rx_task); - free_all_rx_skbs(rxq); - devm_kfree(&netdev->dev, rxq->irq_name); + if (test_bit(HINIC_RSS_ENABLE, &nic_dev->flags)) + hinic_rss_deinit(nic_dev); } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.h b/drivers/net/ethernet/huawei/hinic/hinic_rx.h index 27c9af4b1c12a388f08baab8500a3c11d3fbe672..827904f7839bc548c4fec8584dd781c8dab9530e 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_rx.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.h @@ -1,5 +1,5 @@ -/* - * Huawei HiNIC PCI Express Linux driver +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver * Copyright(c) 2017 Huawei Technologies Co., Ltd * * This program is free software; you can redistribute it and/or modify it @@ -16,40 +16,111 @@ #ifndef HINIC_RX_H #define HINIC_RX_H -#include -#include -#include -#include +/* rx cqe checksum err */ +#define HINIC_RX_CSUM_IP_CSUM_ERR BIT(0) +#define HINIC_RX_CSUM_TCP_CSUM_ERR BIT(1) +#define HINIC_RX_CSUM_UDP_CSUM_ERR BIT(2) +#define HINIC_RX_CSUM_IGMP_CSUM_ERR BIT(3) +#define HINIC_RX_CSUM_ICMPv4_CSUM_ERR BIT(4) +#define HINIC_RX_CSUM_ICMPv6_CSUM_ERR BIT(5) +#define HINIC_RX_CSUM_SCTP_CRC_ERR BIT(6) +#define HINIC_RX_CSUM_HW_CHECK_NONE BIT(7) +#define HINIC_RX_CSUM_IPSU_OTHER_ERR BIT(8) -#include "hinic_hw_qp.h" +#define HINIC_SUPPORT_LRO_ADAP_QPS_MAX 16 +#define HINIC_RX_BUFFER_WRITE 16 struct hinic_rxq_stats { - u64 pkts; - u64 bytes; + u64 packets; + u64 bytes; + u64 errors; + u64 csum_errors; + u64 other_errors; + u64 dropped; + u64 xdp_dropped; + u64 rx_buf_empty; - struct u64_stats_sync syncp; + u64 alloc_skb_err; + u64 alloc_rx_buf_err; + u64 map_rx_buf_err; + u64 xdp_large_pkt; + + struct u64_stats_sync syncp; +}; + +struct hinic_rx_info { + dma_addr_t buf_dma_addr; + + struct hinic_rq_cqe *cqe; + dma_addr_t cqe_dma; + struct page *page; + u32 page_offset; + struct hinic_rq_wqe *rq_wqe; }; struct hinic_rxq { - struct net_device *netdev; - struct hinic_rq *rq; + struct net_device *netdev; + + u16 q_id; + u16 q_depth; + u16 q_mask; - struct hinic_rxq_stats rxq_stats; + u16 buf_len; + u32 rx_buff_shift; + u32 dma_rx_buff_size; - char *irq_name; + struct hinic_rxq_stats rxq_stats; + u16 cons_idx; + u16 delta; - struct tasklet_struct rx_task; + u32 irq_id; + u16 msix_entry_idx; + + struct hinic_rx_info *rx_info; + struct bpf_prog *xdp_prog; + + struct hinic_irq *irq_cfg; + u16 next_to_alloc; + u16 next_to_update; + struct device *dev; /* device for DMA mapping */ + + unsigned long status; + dma_addr_t cqe_start_paddr; + void *cqe_start_vaddr; + u64 last_moder_packets; + u64 last_moder_bytes; + u8 last_coalesc_timer_cfg; + u8 last_pending_limt; - struct napi_struct napi; }; -void hinic_rxq_clean_stats(struct hinic_rxq *rxq); +void hinic_rxq_clean_stats(struct hinic_rxq_stats *rxq_stats); + +void hinic_rxq_get_stats(struct hinic_rxq *rxq, + struct hinic_rxq_stats *stats); + +int hinic_alloc_rxqs(struct net_device *netdev); + +void hinic_free_rxqs(struct net_device *netdev); + +void hinic_init_rss_parameters(struct net_device *netdev); + +void hinic_set_default_rss_indir(struct net_device *netdev); + +int hinic_setup_all_rx_resources(struct net_device *netdev, + struct irq_info *msix_entires); + +void hinic_free_all_rx_resources(struct net_device *netdev); + +void hinic_rx_remove_configure(struct net_device *netdev); + +int hinic_rx_configure(struct net_device *netdev); -void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats); +int hinic_set_hw_rss_parameters(struct net_device *netdev, u8 rss_en, u8 num_tc, + u8 *prio_tc); -int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq, - struct net_device *netdev); +int hinic_update_hw_tc_map(struct net_device *netdev, u8 num_tc, u8 *prio_tc); -void hinic_clean_rxq(struct hinic_rxq *rxq); +int hinic_rx_poll(struct hinic_rxq *rxq, int budget); #endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sm_lt.h b/drivers/net/ethernet/huawei/hinic/hinic_sm_lt.h new file mode 100644 index 0000000000000000000000000000000000000000..e88e556274fc8933ea89316a63d42e95800794b1 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_sm_lt.h @@ -0,0 +1,228 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef __CHIPIF_SM_LT_H__ +#define __CHIPIF_SM_LT_H__ + +#define SM_LT_LOAD 0x12 +#define SM_LT_STORE 0x14 + +#define SM_LT_NUM_OFFSET 13 +#define SM_LT_ABUF_FLG_OFFSET 12 +#define SM_LT_BC_OFFSET 11 + +#define SM_LT_ENTRY_16B 16 +#define SM_LT_ENTRY_32B 32 +#define SM_LT_ENTRY_48B 48 +#define SM_LT_ENTRY_64B 64 + +#define TBL_LT_OFFSET_DEFAULT 0 + +#define SM_CACHE_LINE_SHFT 4 /* log2(16) */ +#define SM_CACHE_LINE_SIZE 16 /* the size of cache line */ + +#define MAX_SM_LT_READ_LINE_NUM 4 +#define MAX_SM_LT_WRITE_LINE_NUM 3 + +#define SM_LT_FULL_BYTEENB 0xFFFF + +#define TBL_GET_ENB3_MASK(bitmask) (u16)(((bitmask) >> 32) & 0xFFFF) +#define TBL_GET_ENB2_MASK(bitmask) (u16)(((bitmask) >> 16) & 0xFFFF) +#define TBL_GET_ENB1_MASK(bitmask) (u16)((bitmask) & 0xFFFF) + +enum { + SM_LT_NUM_0 = 0, /* lt num = 0, load/store 16B */ + SM_LT_NUM_1, /* lt num = 1, load/store 32B */ + SM_LT_NUM_2, /* lt num = 2, load/store 48B */ + SM_LT_NUM_3 /* lt num = 3, load 64B */ +}; + +/* lt load request */ +typedef union { + struct { + u32 offset : 8; + u32 pad : 3; + u32 bc : 1; + u32 abuf_flg : 1; + u32 num : 2; + u32 ack : 1; + u32 op_id : 5; + u32 instance : 6; + u32 src : 5; + } bs; + + u32 value; +} sml_lt_req_head_u; + +typedef struct { + u32 extra; + sml_lt_req_head_u head; + u32 index; + u32 pad0; + u32 pad1; +} sml_lt_load_req_s; + +typedef struct { + u32 extra; + sml_lt_req_head_u head; + u32 index; + u32 byte_enb[2]; + u8 write_data[48]; +} sml_lt_store_req_s; + +enum { + SM_LT_OFFSET_1 = 1, + SM_LT_OFFSET_2, + SM_LT_OFFSET_3, + SM_LT_OFFSET_4, + SM_LT_OFFSET_5, + SM_LT_OFFSET_6, + SM_LT_OFFSET_7, + SM_LT_OFFSET_8, + SM_LT_OFFSET_9, + SM_LT_OFFSET_10, + SM_LT_OFFSET_11, + SM_LT_OFFSET_12, + SM_LT_OFFSET_13, + SM_LT_OFFSET_14, + SM_LT_OFFSET_15 +}; + +static inline void sml_lt_store_memcpy(u32 *dst, u32 *src, u8 num) +{ + switch (num) { + case SM_LT_NUM_2: + *(dst + SM_LT_OFFSET_11) = *(src + SM_LT_OFFSET_11); + *(dst + SM_LT_OFFSET_10) = *(src + SM_LT_OFFSET_10); + *(dst + SM_LT_OFFSET_9) = *(src + SM_LT_OFFSET_9); + *(dst + SM_LT_OFFSET_8) = *(src + SM_LT_OFFSET_8); + /*lint -fallthrough*/ + case SM_LT_NUM_1: + *(dst + SM_LT_OFFSET_7) = *(src + SM_LT_OFFSET_7); + *(dst + SM_LT_OFFSET_6) = *(src + SM_LT_OFFSET_6); + *(dst + SM_LT_OFFSET_5) = *(src + SM_LT_OFFSET_5); + *(dst + SM_LT_OFFSET_4) = *(src + SM_LT_OFFSET_4); + /*lint -fallthrough*/ + case SM_LT_NUM_0: + *(dst + SM_LT_OFFSET_3) = *(src + SM_LT_OFFSET_3); + *(dst + SM_LT_OFFSET_2) = *(src + SM_LT_OFFSET_2); + *(dst + SM_LT_OFFSET_1) = *(src + SM_LT_OFFSET_1); + *dst = *src; + break; + default: + break; + } +} + +static inline void sml_lt_load_memcpy(u32 *dst, u32 *src, u8 num) +{ + switch (num) { + case SM_LT_NUM_3: + *(dst + SM_LT_OFFSET_15) = *(src + SM_LT_OFFSET_15); + *(dst + SM_LT_OFFSET_14) = *(src + SM_LT_OFFSET_14); + *(dst + SM_LT_OFFSET_13) = *(src + SM_LT_OFFSET_13); + *(dst + SM_LT_OFFSET_12) = *(src + SM_LT_OFFSET_12); + /*lint -fallthrough*/ + case SM_LT_NUM_2: + *(dst + SM_LT_OFFSET_11) = *(src + SM_LT_OFFSET_11); + *(dst + SM_LT_OFFSET_10) = *(src + SM_LT_OFFSET_10); + *(dst + SM_LT_OFFSET_9) = *(src + SM_LT_OFFSET_9); + *(dst + SM_LT_OFFSET_8) = *(src + SM_LT_OFFSET_8); + /*lint -fallthrough*/ + case SM_LT_NUM_1: + *(dst + SM_LT_OFFSET_7) = *(src + SM_LT_OFFSET_7); + *(dst + SM_LT_OFFSET_6) = *(src + SM_LT_OFFSET_6); + *(dst + SM_LT_OFFSET_5) = *(src + SM_LT_OFFSET_5); + *(dst + SM_LT_OFFSET_4) = *(src + SM_LT_OFFSET_4); + /*lint -fallthrough*/ + case SM_LT_NUM_0: + *(dst + SM_LT_OFFSET_3) = *(src + SM_LT_OFFSET_3); + *(dst + SM_LT_OFFSET_2) = *(src + SM_LT_OFFSET_2); + *(dst + SM_LT_OFFSET_1) = *(src + SM_LT_OFFSET_1); + *dst = *src; + break; + default: + break; + } +} + +enum HINIC_CSR_API_DATA_OPERATION_ID { + HINIC_CSR_OPERATION_WRITE_CSR = 0x1E, + HINIC_CSR_OPERATION_READ_CSR = 0x1F +}; + +enum HINIC_CSR_API_DATA_NEED_RESPONSE_DATA { + HINIC_CSR_NO_RESP_DATA = 0, + HINIC_CSR_NEED_RESP_DATA = 1 +}; + +enum HINIC_CSR_API_DATA_DATA_SIZE { + HINIC_CSR_DATA_SZ_32 = 0, + HINIC_CSR_DATA_SZ_64 = 1 +}; + +struct hinic_csr_request_api_data { + u32 dw0; + + union { + struct { + u32 reserved1 : 13; + /* this field indicates the write/read data size: + * 2'b00: 32 bits + * 2'b01: 64 bits + * 2'b10~2'b11:reserved + */ + u32 data_size : 2; + /* this field indicates that requestor expect receive a + * response data or not. + * 1'b0: expect not to receive a response data. + * 1'b1: expect to receive a response data. + */ + u32 need_response : 1; + /* this field indicates the operation that the requestor + * expected. + * 5'b1_1110: write value to csr space. + * 5'b1_1111: read register from csr space. + */ + u32 operation_id : 5; + u32 reserved2 : 6; + /* this field specifies the Src node ID for this API + * request message. + */ + u32 src_node_id : 5; + } bits; + + u32 val32; + } dw1; + + union { + struct { + /* it specifies the CSR address. */ + u32 csr_addr : 26; + u32 reserved3 : 6; + } bits; + + u32 val32; + } dw2; + + /* if data_size=2'b01, it is high 32 bits of write data. else, it is + * 32'hFFFF_FFFF. + */ + u32 csr_write_data_h; + /* the low 32 bits of write data. */ + u32 csr_write_data_l; +}; + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sml_counter.c b/drivers/net/ethernet/huawei/hinic/hinic_sml_counter.c new file mode 100644 index 0000000000000000000000000000000000000000..a790d945734228afcdfe06711e4d4f56a0fd12f0 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_sml_counter.c @@ -0,0 +1,310 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_hwdev.h" +#include "hinic_sml_counter.h" + +static void sml_ctr_htonl_n(u32 *node, u32 ulLen) +{ + u32 i; + + for (i = 0; i < ulLen; i++) { + *node = cpu_to_be32(*node); + node++; + } +} + +static void hinic_sml_ctr_read_build_req(chipif_sml_ctr_rd_req_s *msg, + u8 instance_id, u8 op_id, + u8 ack, u32 ctr_id, u32 init_val) +{ + msg->head.value = 0; + msg->head.bs.instance = instance_id; + msg->head.bs.op_id = op_id; + msg->head.bs.ack = ack; + msg->head.value = cpu_to_be32(msg->head.value); + + msg->ctr_id = ctr_id; + msg->ctr_id = cpu_to_be32(msg->ctr_id); + + msg->initial = init_val; +} + +static void hinic_sml_ctr_write_build_req(chipif_sml_ctr_wr_req_s *msg, + u8 instance_id, u8 op_id, + u8 ack, u32 ctr_id, + u64 val1, u64 val2) +{ + msg->head.value = 0; + msg->head.bs.instance = instance_id; + msg->head.bs.op_id = op_id; + msg->head.bs.ack = ack; + msg->head.value = cpu_to_be32(msg->head.value); + + msg->ctr_id = ctr_id; + msg->ctr_id = cpu_to_be32(msg->ctr_id); + + msg->value1_h = val1 >> 32; + msg->value1_l = val1 & 0xFFFFFFFF; + + msg->value2_h = val2 >> 32; + msg->value2_l = val2 & 0xFFFFFFFF; +} + +/** + * hinic_sm_ctr_rd32 - small single 32 counter read + * @hwdev: the pointer to hw device + * @node: the node id + * @instance: instance value + * @ctr_id: counter id + * @value: read counter value ptr + * Return: 0 - success, negative - failure + */ +int hinic_sm_ctr_rd32(void *hwdev, u8 node, u8 instance, u32 ctr_id, u32 *value) +{ + chipif_sml_ctr_rd_req_s req; + ctr_rd_rsp_u rsp; + int ret; + + if (!hwdev || !value) + return -EFAULT; + + hinic_sml_ctr_read_build_req(&req, instance, CHIPIF_SM_CTR_OP_READ, + CHIPIF_ACK, ctr_id, 0); + + ret = hinic_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), + (void *)&rsp, (unsigned short)sizeof(rsp)); + if (ret) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Sm 32bit counter read fail, err(%d)\n", ret); + return ret; + } + sml_ctr_htonl_n((u32 *)&rsp, 4); + *value = rsp.bs_ss32_rsp.value1; + + return 0; +} +EXPORT_SYMBOL(hinic_sm_ctr_rd32); + +/** + * hinic_sm_ctr_rd32_clear - small single 32 counter read and clear to zero + * @hwdev: the pointer to hw device + * @node: the node id + * @instance: instance value + * @ctr_id: counter id + * @value: read counter value ptr + * Return: 0 - success, negative - failure + * according to ACN error code (ERR_OK, ERR_PARAM, ERR_FAILED...etc) + */ +int hinic_sm_ctr_rd32_clear(void *hwdev, u8 node, u8 instance, + u32 ctr_id, u32 *value) +{ + chipif_sml_ctr_rd_req_s req; + ctr_rd_rsp_u rsp; + int ret; + + if (!hwdev || !value) + return -EFAULT; + + hinic_sml_ctr_read_build_req(&req, instance, + CHIPIF_SM_CTR_OP_READ_CLEAR, + CHIPIF_ACK, ctr_id, 0); + + ret = hinic_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), + (void *)&rsp, (unsigned short)sizeof(rsp)); + + if (ret) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Sm 32bit counter clear fail, err(%d)\n", ret); + return ret; + } + sml_ctr_htonl_n((u32 *)&rsp, 4); + *value = rsp.bs_ss32_rsp.value1; + + return 0; +} +EXPORT_SYMBOL(hinic_sm_ctr_rd32_clear); + +/** + * hinic_sm_ctr_wr32 - small single 32 counter write + * @hwdev: the pointer to hw device + * @node: the node id + * @instance: instance value + * @ctr_id: counter id + * @value: write counter value + * Return: 0 - success, negative - failure + */ +int hinic_sm_ctr_wr32(void *hwdev, u8 node, u8 instance, u32 ctr_id, u32 value) +{ + chipif_sml_ctr_wr_req_s req; + chipif_sml_ctr_wr_rsp_s rsp; + + if (!hwdev) + return -EFAULT; + + hinic_sml_ctr_write_build_req(&req, instance, CHIPIF_SM_CTR_OP_WRITE, + CHIPIF_NOACK, ctr_id, (u64)value, 0ULL); + + return hinic_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), (void *)&rsp, + (unsigned short)sizeof(rsp)); +} + +/** + * hinic_sm_ctr_rd64 - big counter 64 read + * @hwdev: the pointer to hw device + * @node: the node id + * @instance: instance value + * @ctr_id: counter id + * @value: read counter value ptr + * Return: 0 - success, negative - failure + */ +int hinic_sm_ctr_rd64(void *hwdev, u8 node, u8 instance, u32 ctr_id, u64 *value) +{ + chipif_sml_ctr_rd_req_s req; + ctr_rd_rsp_u rsp; + int ret; + + if (!hwdev || !value) + return -EFAULT; + + hinic_sml_ctr_read_build_req(&req, instance, CHIPIF_SM_CTR_OP_READ, + CHIPIF_ACK, ctr_id, 0); + + ret = hinic_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), (void *)&rsp, + (unsigned short)sizeof(rsp)); + if (ret) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Sm 64bit counter read fail err(%d)\n", ret); + return ret; + } + sml_ctr_htonl_n((u32 *)&rsp, 4); + *value = ((u64)rsp.bs_bs64_rsp.value1 << 32) | rsp.bs_bs64_rsp.value2; + + return 0; +} + +/** + * hinic_sm_ctr_wr64 - big single 64 counter write + * @hwdev: the pointer to hw device + * @node: the node id + * @instance: instance value + * @ctr_id: counter id + * @value: write counter value + * Return: 0 - success, negative - failure + */ +int hinic_sm_ctr_wr64(void *hwdev, u8 node, u8 instance, u32 ctr_id, u64 value) +{ + chipif_sml_ctr_wr_req_s req; + chipif_sml_ctr_wr_rsp_s rsp; + + if (!hwdev) + return -EFAULT; + + hinic_sml_ctr_write_build_req(&req, instance, CHIPIF_SM_CTR_OP_WRITE, + CHIPIF_NOACK, ctr_id, value, 0ULL); + + return hinic_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), (void *)&rsp, + (unsigned short)sizeof(rsp)); +} + +/** + * hinic_sm_ctr_rd64_pair - big pair 128 counter read + * @hwdev: the pointer to hw device + * @node: the node id + * @instance: instance value + * @ctr_id: counter id + * @value1: read counter value ptr + * @value2: read counter value ptr + * Return: 0 - success, negative - failure + */ +int hinic_sm_ctr_rd64_pair(void *hwdev, u8 node, u8 instance, + u32 ctr_id, u64 *value1, u64 *value2) +{ + chipif_sml_ctr_rd_req_s req; + ctr_rd_rsp_u rsp; + int ret; + + if (!value1) { + pr_err("value1 is NULL for read 64 bit pair\n"); + return -EFAULT; + } + + if (!value2) { + pr_err("value2 is NULL for read 64 bit pair\n"); + return -EFAULT; + } + + if (!hwdev || (0 != (ctr_id & 0x1))) { + pr_err("Hwdev is NULL or ctr_id(%d) is odd number for read 64 bit pair\n", + ctr_id); + return -EFAULT; + } + + hinic_sml_ctr_read_build_req(&req, instance, CHIPIF_SM_CTR_OP_READ, + CHIPIF_ACK, ctr_id, 0); + + ret = hinic_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), (void *)&rsp, + (unsigned short)sizeof(rsp)); + if (ret) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Sm 64 bit rd pair ret(%d)\n", ret); + return ret; + } + sml_ctr_htonl_n((u32 *)&rsp, 4); + *value1 = ((u64)rsp.bs_bp64_rsp.val1_h << 32) | rsp.bs_bp64_rsp.val1_l; + *value2 = ((u64)rsp.bs_bp64_rsp.val2_h << 32) | rsp.bs_bp64_rsp.val2_l; + + return 0; +} + +/** + * hinic_sm_ctr_wr64_pair - big pair 128 counter write + * @hwdev: the pointer to hw device + * @node: the node id + * @instance: instance value + * @ctr_id: counter id + * @value1: write counter value + * @value2: write counter value + * Return: 0 - success, negative - failure + */ +int hinic_sm_ctr_wr64_pair(void *hwdev, u8 node, u8 instance, + u32 ctr_id, u64 value1, u64 value2) +{ + chipif_sml_ctr_wr_req_s req; + chipif_sml_ctr_wr_rsp_s rsp; + + /* pair pattern ctr_id must be even number */ + if (!hwdev || (0 != (ctr_id & 0x1))) { + pr_err("Handle is NULL or ctr_id(%d) is odd number for write 64 bit pair\n", + ctr_id); + return -EFAULT; + } + + hinic_sml_ctr_write_build_req(&req, instance, CHIPIF_SM_CTR_OP_WRITE, + CHIPIF_NOACK, ctr_id, value1, value2); + return hinic_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), (void *)&rsp, + (unsigned short)sizeof(rsp)); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sml_counter.h b/drivers/net/ethernet/huawei/hinic/hinic_sml_counter.h new file mode 100644 index 0000000000000000000000000000000000000000..a2e772ee727b8822c5e2377a25bacfdacd753755 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_sml_counter.h @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef __CHIPIF_SML_COUNTER_H__ +#define __CHIPIF_SML_COUNTER_H__ + +#define CHIPIF_FUNC_PF 0 +#define CHIPIF_FUNC_VF 1 +#define CHIPIF_FUNC_PPF 2 + +#define CHIPIF_ACK 1 +#define CHIPIF_NOACK 0 + +#define CHIPIF_SM_CTR_OP_READ 0x2 +#define CHIPIF_SM_CTR_OP_READ_CLEAR 0x6 +#define CHIPIF_SM_CTR_OP_WRITE 0x3 + +#define SMALL_CNT_READ_RSP_SIZE 16 + +/* request head */ +typedef union { + struct { + u32 pad : 15; + u32 ack : 1; + u32 op_id : 5; + u32 instance : 6; + u32 src : 5; + } bs; + + u32 value; +} chipif_sml_ctr_req_head_u; +/* counter read request struct */ +typedef struct { + u32 extra; + chipif_sml_ctr_req_head_u head; + u32 ctr_id; + u32 initial; + u32 pad; +} chipif_sml_ctr_rd_req_s; + +/* counter read response union */ +typedef union { + struct { + u32 value1 : 16; + u32 pad0 : 16; + u32 pad1[3]; + } bs_ss16_rsp; + + struct { + u32 value1; + u32 pad[3]; + } bs_ss32_rsp; + + struct { + u32 value1 : 20; + u32 pad0 : 12; + u32 value2 : 12; + u32 pad1 : 20; + u32 pad2[2]; + } bs_sp_rsp; + + struct { + u32 value1; + u32 value2; + u32 pad[2]; + } bs_bs64_rsp; + + struct { + u32 val1_h; + u32 val1_l; + u32 val2_h; + u32 val2_l; + } bs_bp64_rsp; + +} ctr_rd_rsp_u; + +/* resopnse head */ +typedef union { + struct { + u32 pad : 30; /* reserve */ + u32 code : 2; /* error code */ + } bs; + + u32 value; +} sml_ctr_rsp_head_u; + +/* counter write request struct */ +typedef struct { + u32 extra; + chipif_sml_ctr_req_head_u head; + u32 ctr_id; + u32 rsv1; + u32 rsv2; + u32 value1_h; + u32 value1_l; + u32 value2_h; + u32 value2_l; +} chipif_sml_ctr_wr_req_s; + +/*counter write response struct */ +typedef struct { + sml_ctr_rsp_head_u head; + u32 pad[3]; +} chipif_sml_ctr_wr_rsp_s; + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sml_lt.c b/drivers/net/ethernet/huawei/hinic/hinic_sml_lt.c new file mode 100644 index 0000000000000000000000000000000000000000..5ed2ea4a3531a24596b88bb306c02c5e8c42f049 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_sml_lt.c @@ -0,0 +1,286 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_sm_lt.h" +#include "hinic_hw.h" +#include "hinic_hwdev.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hwif.h" +#include "hinic_dbg.h" + +#define ACK 1 +#define NOACK 0 + +#define LT_LOAD16_API_SIZE (16 + 4) +#define LT_STORE16_API_SIZE (32 + 4) + +#define HINIC_API_RD_8B 8 +#define HINIC_API_RD_4B 4 + +static inline void sm_lt_build_head(sml_lt_req_head_u *head, + u8 instance_id, + u8 op_id, u8 ack, + u8 offset, u8 num) +{ + head->value = 0; + head->bs.instance = instance_id; + head->bs.op_id = op_id; + head->bs.ack = ack; + head->bs.num = num; + head->bs.abuf_flg = 0; + head->bs.bc = 1; + head->bs.offset = offset; + head->value = cpu_to_be32(head->value); +} + +static inline void sm_lt_load_build_req(sml_lt_load_req_s *req, + u8 instance_id, + u8 op_id, u8 ack, + u32 lt_index, + u8 offset, u8 num) +{ + sm_lt_build_head(&req->head, instance_id, op_id, ack, offset, num); + req->extra = 0; + req->index = lt_index; + req->index = cpu_to_be32(req->index); +} + +static inline void sm_lt_store_build_req(sml_lt_store_req_s *req, + u8 instance_id, + u8 op_id, u8 ack, + u32 lt_index, + u8 offset, + u8 num, + u16 byte_enb3, + u16 byte_enb2, + u16 byte_enb1, + u8 *data) +{ + sm_lt_build_head(&req->head, instance_id, op_id, ack, offset, num); + req->index = lt_index; + req->index = cpu_to_be32(req->index); + req->extra = 0; + req->byte_enb[0] = (u32)(byte_enb3); + req->byte_enb[0] = cpu_to_be32(req->byte_enb[0]); + req->byte_enb[1] = cpu_to_be32((((u32)byte_enb2) << 16) | byte_enb1); + sml_lt_store_memcpy((u32 *)req->write_data, (u32 *)(void *)data, num); +} + +int hinic_dbg_lt_rd_16byte(void *hwdev, u8 dest, u8 instance, + u32 lt_index, u8 *data) +{ + sml_lt_load_req_s req; + int ret; + + if (!hwdev) + return -EFAULT; + + sm_lt_load_build_req(&req, instance, SM_LT_LOAD, ACK, lt_index, 0, 0); + + ret = hinic_api_cmd_read_ack(hwdev, dest, &req, + LT_LOAD16_API_SIZE, (void *)data, 16); + if (ret) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Read linear table 16byte fail, err: %d\n", ret); + return -EFAULT; + } + + return 0; +} +EXPORT_SYMBOL(hinic_dbg_lt_rd_16byte); + +int hinic_dbg_lt_wr_16byte_mask(void *hwdev, u8 dest, u8 instance, + u32 lt_index, u8 *data, u16 mask) +{ + sml_lt_store_req_s req; + int ret; + + if (!hwdev || !data) + return -EFAULT; + + sm_lt_store_build_req(&req, instance, SM_LT_STORE, NOACK, lt_index, + 0, 0, 0, 0, mask, data); + + ret = hinic_api_cmd_write_nack(hwdev, dest, &req, LT_STORE16_API_SIZE); + if (ret) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Write linear table 16byte fail, err: %d\n", ret); + return -EFAULT; + } + + return 0; +} +EXPORT_SYMBOL(hinic_dbg_lt_wr_16byte_mask); + +int hinic_api_csr_rd32(void *hwdev, u8 dest, u32 addr, u32 *val) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_csr_request_api_data api_data = { 0 }; + u32 csr_val = 0; + u16 in_size = sizeof(api_data); + int ret; + + if (!hwdev || !val) + return -EFAULT; + + if (dest == HINIC_NODE_ID_CPI) { + *val = readl(dev->hwif->cfg_regs_base + addr); + return 0; + } + + api_data.dw0 = 0; + api_data.dw1.bits.operation_id = HINIC_CSR_OPERATION_READ_CSR; + api_data.dw1.bits.need_response = HINIC_CSR_NEED_RESP_DATA; + api_data.dw1.bits.data_size = HINIC_CSR_DATA_SZ_32; + api_data.dw1.val32 = cpu_to_be32(api_data.dw1.val32); + api_data.dw2.bits.csr_addr = addr; + api_data.dw2.val32 = cpu_to_be32(api_data.dw2.val32); + + ret = hinic_api_cmd_read_ack(hwdev, dest, (u8 *)(&api_data), + in_size, &csr_val, HINIC_API_RD_4B); + if (ret) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Read 32 bit csr failed, dest %d addr 0x%x, ret: 0x%x\n", + dest, addr, ret); + return ret; + } + + *val = csr_val; + + return 0; +} +EXPORT_SYMBOL(hinic_api_csr_rd32); + +int hinic_api_csr_wr32(void *hwdev, u8 dest, u32 addr, u32 val) +{ + struct hinic_hwdev *dev = hwdev; + struct hinic_csr_request_api_data api_data = { 0 }; + u16 in_size = sizeof(api_data); + int ret; + + if (!hwdev) + return -EFAULT; + + if (dest == HINIC_NODE_ID_CPI) { + writel(val, dev->hwif->cfg_regs_base + addr); + return 0; + } + + api_data.dw1.bits.operation_id = HINIC_CSR_OPERATION_WRITE_CSR; + api_data.dw1.bits.need_response = HINIC_CSR_NO_RESP_DATA; + api_data.dw1.bits.data_size = HINIC_CSR_DATA_SZ_32; + api_data.dw1.val32 = cpu_to_be32(api_data.dw1.val32); + api_data.dw2.bits.csr_addr = addr; + api_data.dw2.val32 = cpu_to_be32(api_data.dw2.val32); + api_data.csr_write_data_h = 0xffffffff; + api_data.csr_write_data_l = val; + + ret = hinic_api_cmd_write_nack(hwdev, dest, (u8 *)(&api_data), in_size); + if (ret) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Write 32 bit csr failed, dest %d addr 0x%x val 0x%x\n", + dest, addr, val); + return ret; + } + + return 0; +} +EXPORT_SYMBOL(hinic_api_csr_wr32); + +int hinic_api_csr_rd64(void *hwdev, u8 dest, u32 addr, u64 *val) +{ + struct hinic_csr_request_api_data api_data = { 0 }; + u64 csr_val = 0; + u16 in_size = sizeof(api_data); + int ret; + + if (!hwdev || !val) + return -EFAULT; + + if (dest == HINIC_NODE_ID_CPI) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Unsupport to read 64 bit csr from cpi\n"); + return -EOPNOTSUPP; + } + + api_data.dw0 = 0; + api_data.dw1.bits.operation_id = HINIC_CSR_OPERATION_READ_CSR; + api_data.dw1.bits.need_response = HINIC_CSR_NEED_RESP_DATA; + api_data.dw1.bits.data_size = HINIC_CSR_DATA_SZ_64; + api_data.dw1.val32 = cpu_to_be32(api_data.dw1.val32); + api_data.dw2.bits.csr_addr = addr; + api_data.dw2.val32 = cpu_to_be32(api_data.dw2.val32); + + ret = hinic_api_cmd_read_ack(hwdev, dest, (u8 *)(&api_data), + in_size, &csr_val, HINIC_API_RD_8B); + if (ret) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Read 64 bit csr failed, dest %d addr 0x%x\n", + dest, addr); + return ret; + } + + *val = csr_val; + + return 0; +} +EXPORT_SYMBOL(hinic_api_csr_rd64); + +int hinic_api_csr_wr64(void *hwdev, u8 dest, u32 addr, u64 val) +{ + struct hinic_csr_request_api_data api_data = { 0 }; + u16 in_size = sizeof(api_data); + int ret; + + if (!hwdev || !val) + return -EFAULT; + + if (dest == HINIC_NODE_ID_CPI) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Unsupport to write 64 bit csr from cpi\n"); + return -EOPNOTSUPP; + } + + api_data.dw0 = 0; + api_data.dw1.bits.operation_id = HINIC_CSR_OPERATION_WRITE_CSR; + api_data.dw1.bits.need_response = HINIC_CSR_NO_RESP_DATA; + api_data.dw1.bits.data_size = HINIC_CSR_DATA_SZ_64; + api_data.dw1.val32 = cpu_to_be32(api_data.dw1.val32); + api_data.dw2.bits.csr_addr = addr; + api_data.dw2.val32 = cpu_to_be32(api_data.dw2.val32); + api_data.csr_write_data_h = cpu_to_be32(upper_32_bits(val)); + api_data.csr_write_data_l = cpu_to_be32(lower_32_bits(val)); + + ret = hinic_api_cmd_write_nack(hwdev, dest, (u8 *)(&api_data), in_size); + if (ret) { + sdk_err(((struct hinic_hwdev *)hwdev)->dev_hdl, + "Write 64 bit csr failed, dest %d addr 0x%x val 0x%llx\n", + dest, addr, val); + return ret; + } + + return 0; +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c new file mode 100644 index 0000000000000000000000000000000000000000..d987a4671f3619379630f263000de338e13af65e --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c @@ -0,0 +1,463 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_nic_cfg.h" +#include "hinic_nic_dev.h" +#include "hinic_sriov.h" +#include "hinic_lld.h" + +int hinic_pci_sriov_disable(struct pci_dev *dev) +{ +#ifdef CONFIG_PCI_IOV + struct hinic_sriov_info *sriov_info; + u16 tmp_vfs; + + sriov_info = hinic_get_sriov_info_by_pcidev(dev); + /* if SR-IOV is already disabled then nothing will be done */ + if (!sriov_info->sriov_enabled) + return 0; + + if (test_and_set_bit(HINIC_SRIOV_DISABLE, &sriov_info->state)) { + nic_err(&sriov_info->pdev->dev, + "SR-IOV disable in process, please wait\n"); + return -EPERM; + } + + /* If our VFs are assigned we cannot shut down SR-IOV + * without causing issues, so just leave the hardware + * available but disabled + */ + if (pci_vfs_assigned(sriov_info->pdev)) { + clear_bit(HINIC_SRIOV_DISABLE, &sriov_info->state); + nic_warn(&sriov_info->pdev->dev, "Unloading driver while VFs are assigned - VFs will not be deallocated\n"); + return -EPERM; + } + sriov_info->sriov_enabled = false; + + /* disable iov and allow time for transactions to clear */ + pci_disable_sriov(sriov_info->pdev); + + tmp_vfs = (u16)sriov_info->num_vfs; + sriov_info->num_vfs = 0; + hinic_deinit_vf_hw(sriov_info->hwdev, OS_VF_ID_TO_HW(0), + OS_VF_ID_TO_HW(tmp_vfs - 1)); + + clear_bit(HINIC_SRIOV_DISABLE, &sriov_info->state); + +#endif + + return 0; +} + +int hinic_pci_sriov_enable(struct pci_dev *dev, int num_vfs) +{ +#ifdef CONFIG_PCI_IOV + struct hinic_sriov_info *sriov_info; + int err = 0; + int pre_existing_vfs = 0; + + sriov_info = hinic_get_sriov_info_by_pcidev(dev); + + if (test_and_set_bit(HINIC_SRIOV_ENABLE, &sriov_info->state)) { + nic_err(&sriov_info->pdev->dev, + "SR-IOV enable in process, please wait, num_vfs %d\n", + num_vfs); + return -EPERM; + } + + pre_existing_vfs = pci_num_vf(sriov_info->pdev); + + if (num_vfs > pci_sriov_get_totalvfs(sriov_info->pdev)) { + clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state); + return -ERANGE; + } + if (pre_existing_vfs && pre_existing_vfs != num_vfs) { + err = hinic_pci_sriov_disable(sriov_info->pdev); + if (err) { + clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state); + return err; + } + } else if (pre_existing_vfs == num_vfs) { + clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state); + return num_vfs; + } + + err = hinic_init_vf_hw(sriov_info->hwdev, OS_VF_ID_TO_HW(0), + OS_VF_ID_TO_HW((u16)num_vfs - 1)); + if (err) { + nic_err(&sriov_info->pdev->dev, + "Failed to init vf in hardware before enable sriov, error %d\n", + err); + clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state); + return err; + } + + err = pci_enable_sriov(sriov_info->pdev, num_vfs); + if (err) { + nic_err(&sriov_info->pdev->dev, + "Failed to enable SR-IOV, error %d\n", err); + clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state); + return err; + } + + sriov_info->sriov_enabled = true; + sriov_info->num_vfs = num_vfs; + clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state); + + return num_vfs; +#else + + return 0; +#endif +} + +static bool hinic_is_support_sriov_configure(struct pci_dev *pdev) +{ + enum hinic_init_state state = hinic_get_init_state(pdev); + struct hinic_sriov_info *sriov_info; + + if (state < HINIC_INIT_STATE_NIC_INITED) { + nic_err(&pdev->dev, "NIC device not initialized, don't support to configure sriov\n"); + return false; + } + + sriov_info = hinic_get_sriov_info_by_pcidev(pdev); + if (FUNC_SRIOV_FIX_NUM_VF(sriov_info->hwdev)) { + nic_err(&pdev->dev, "Don't support to changed sriov configuration\n"); + return false; + } + + return true; +} + +int hinic_pci_sriov_configure(struct pci_dev *dev, int num_vfs) +{ + struct hinic_sriov_info *sriov_info; + + if (!hinic_is_support_sriov_configure(dev)) + return -EFAULT; + + sriov_info = hinic_get_sriov_info_by_pcidev(dev); + + if (test_bit(HINIC_FUNC_REMOVE, &sriov_info->state)) + return -EFAULT; + + if (!num_vfs) + return hinic_pci_sriov_disable(dev); + else + return hinic_pci_sriov_enable(dev, num_vfs); +} + +int hinic_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) +{ + struct hinic_nic_dev *adapter = netdev_priv(netdev); + struct hinic_sriov_info *sriov_info; + int err; + + if (!FUNC_SUPPORT_SET_VF_MAC_VLAN(adapter->hwdev)) { + nicif_err(adapter, drv, netdev, + "Current function don't support to set vf mac\n"); + return -EOPNOTSUPP; + } + + sriov_info = hinic_get_sriov_info_by_pcidev(adapter->pdev); + if (is_multicast_ether_addr(mac) || /*lint !e574*/ + vf >= sriov_info->num_vfs) /*lint !e574*/ + return -EINVAL; + + err = hinic_set_vf_mac(sriov_info->hwdev, OS_VF_ID_TO_HW(vf), mac); + if (err) { + nicif_info(adapter, drv, netdev, "Failed to set MAC %pM on VF %d\n", + mac, vf); + return err; + } + + if (is_zero_ether_addr(mac)) + nicif_info(adapter, drv, netdev, "Removing MAC on VF %d\n", vf); + else + nicif_info(adapter, drv, netdev, "Setting MAC %pM on VF %d\n", + mac, vf); + nicif_info(adapter, drv, netdev, "Reload the VF driver to make this change effective\n"); + + return 0; +} + +/*lint -save -e574 -e734*/ +static int set_hw_vf_vlan(struct hinic_sriov_info *sriov_info, + u16 cur_vlanprio, int vf, u16 vlan, u8 qos) +{ + int err = 0; + u16 old_vlan = cur_vlanprio & VLAN_VID_MASK; + + if (vlan || qos) { + if (cur_vlanprio) { + err = hinic_kill_vf_vlan(sriov_info->hwdev, + OS_VF_ID_TO_HW(vf)); + if (err) { + nic_err(&sriov_info->pdev->dev, "Failed to delete vf %d old vlan %d\n", + vf, old_vlan); + return err; + } + } + err = hinic_add_vf_vlan(sriov_info->hwdev, + OS_VF_ID_TO_HW(vf), vlan, qos); + if (err) { + nic_err(&sriov_info->pdev->dev, "Failed to add vf %d new vlan %d\n", + vf, vlan); + return err; + } + } else { + err = hinic_kill_vf_vlan(sriov_info->hwdev, OS_VF_ID_TO_HW(vf)); + if (err) { + nic_err(&sriov_info->pdev->dev, "Failed to delete vf %d vlan %d\n", + vf, old_vlan); + return err; + } + } + + return hinic_update_mac_vlan(sriov_info->hwdev, old_vlan, vlan, + OS_VF_ID_TO_HW(vf)); +} + +int hinic_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, + __be16 vlan_proto) +{ + struct hinic_nic_dev *adapter = netdev_priv(netdev); + struct hinic_sriov_info *sriov_info; + u16 vlanprio, cur_vlanprio; + + if (!FUNC_SUPPORT_SET_VF_MAC_VLAN(adapter->hwdev)) { + nicif_err(adapter, drv, netdev, + "Current function don't support to set vf vlan\n"); + return -EOPNOTSUPP; + } + + sriov_info = hinic_get_sriov_info_by_pcidev(adapter->pdev); + if (vf >= sriov_info->num_vfs || vlan > 4095 || qos > 7) + return -EINVAL; + + if (vlan_proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; + + vlanprio = vlan | qos << HINIC_VLAN_PRIORITY_SHIFT; + cur_vlanprio = hinic_vf_info_vlanprio(sriov_info->hwdev, + OS_VF_ID_TO_HW(vf)); + /* duplicate request, so just return success */ + if (vlanprio == cur_vlanprio) + return 0; + + return set_hw_vf_vlan(sriov_info, cur_vlanprio, vf, vlan, qos); +} + +int hinic_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) +{ + struct hinic_nic_dev *adapter = netdev_priv(netdev); + struct hinic_sriov_info *sriov_info; + int err = 0; + bool cur_spoofchk; + + sriov_info = hinic_get_sriov_info_by_pcidev(adapter->pdev); + if (vf >= sriov_info->num_vfs) + return -EINVAL; + + cur_spoofchk = hinic_vf_info_spoofchk(sriov_info->hwdev, + OS_VF_ID_TO_HW(vf)); + /* same request, so just return success */ + if ((setting && cur_spoofchk) || (!setting && !cur_spoofchk)) + return 0; + + err = hinic_set_vf_spoofchk(sriov_info->hwdev, + OS_VF_ID_TO_HW(vf), setting); + + if (!err) { + nicif_info(adapter, drv, netdev, "Set VF %d spoofchk %s\n", + vf, setting ? "on" : "off"); + } else if (err == HINIC_MGMT_CMD_UNSUPPORTED) { + nicif_err(adapter, drv, netdev, + "Current firmware doesn't support to set vf spoofchk, need to upgrade latest firmware version\n"); + err = -EOPNOTSUPP; + } + + return err; +} + +int hinic_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) +{ + struct hinic_nic_dev *adapter = netdev_priv(netdev); + struct hinic_sriov_info *sriov_info; + int err = 0; + bool cur_trust; + + sriov_info = hinic_get_sriov_info_by_pcidev(adapter->pdev); + if (vf >= sriov_info->num_vfs) + return -EINVAL; + + cur_trust = hinic_vf_info_trust(sriov_info->hwdev, + OS_VF_ID_TO_HW(vf)); + /* same request, so just return success */ + if ((setting && cur_trust) || (!setting && !cur_trust)) + return 0; + + err = hinic_set_vf_trust(sriov_info->hwdev, + OS_VF_ID_TO_HW(vf), setting); + if (!err) + nicif_info(adapter, drv, netdev, "Set VF %d trusted %s succeed\n", + vf, setting ? "on" : "off"); + else + nicif_err(adapter, drv, netdev, "Failed set VF %d trusted %s\n", + vf, setting ? "on" : "off"); + + return err; +} + +int hinic_ndo_get_vf_config(struct net_device *netdev, + int vf, struct ifla_vf_info *ivi) +{ + struct hinic_nic_dev *adapter = netdev_priv(netdev); + struct hinic_sriov_info *sriov_info; + + sriov_info = hinic_get_sriov_info_by_pcidev(adapter->pdev); + if (vf >= sriov_info->num_vfs) + return -EINVAL; + + hinic_get_vf_config(sriov_info->hwdev, OS_VF_ID_TO_HW(vf), ivi); + + return 0; +} + +/** + * hinic_ndo_set_vf_link_state + * @netdev: network interface device structure + * @vf_id: VF identifier + * @link: required link state + * Return: 0 - success, negative - failure + * Set the link state of a specified VF, regardless of physical link state + */ +int hinic_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) +{ + struct hinic_nic_dev *adapter = netdev_priv(netdev); + struct hinic_sriov_info *sriov_info; + const char *vf_link[] = {"auto", "enable", "disable"}; + int err; + + if (FUNC_FORCE_LINK_UP(adapter->hwdev)) { + nicif_err(adapter, drv, netdev, + "Current function don't support to set vf link state\n"); + return -EOPNOTSUPP; + } + + sriov_info = hinic_get_sriov_info_by_pcidev(adapter->pdev); + /* validate the request */ + if (vf_id >= sriov_info->num_vfs) { + nicif_err(adapter, drv, netdev, + "Invalid VF Identifier %d\n", vf_id); + return -EINVAL; + } + + err = hinic_set_vf_link_state(sriov_info->hwdev, + OS_VF_ID_TO_HW(vf_id), link); + + if (!err) + nicif_info(adapter, drv, netdev, "Set VF %d link state: %s\n", + vf_id, vf_link[link]); + + return err; +} + +#define HINIC_TX_RATE_TABLE_FULL 12 + +int hinic_ndo_set_vf_bw(struct net_device *netdev, + int vf, int min_tx_rate, int max_tx_rate) +{ + struct hinic_nic_dev *adapter = netdev_priv(netdev); + struct nic_port_info port_info = {0}; + struct hinic_sriov_info *sriov_info; + u8 link_status = 0; + u32 speeds[] = {SPEED_10, SPEED_100, SPEED_1000, SPEED_10000, + SPEED_25000, SPEED_40000, SPEED_100000}; + int err = 0; + + if (!FUNC_SUPPORT_RATE_LIMIT(adapter->hwdev)) { + nicif_err(adapter, drv, netdev, + "Current function don't support to set vf rate limit\n"); + return -EOPNOTSUPP; + } + + sriov_info = hinic_get_sriov_info_by_pcidev(adapter->pdev); + + /* verify VF is active */ + if (vf >= sriov_info->num_vfs) { + nicif_err(adapter, drv, netdev, "VF number must be less than %d\n", + sriov_info->num_vfs); + return -EINVAL; + } + + if (max_tx_rate < min_tx_rate) { + nicif_err(adapter, drv, netdev, "Invalid rate, max rate %d must greater than min rate %d\n", + max_tx_rate, min_tx_rate); + return -EINVAL; + } + + err = hinic_get_link_state(adapter->hwdev, &link_status); + if (err) { + nicif_err(adapter, drv, netdev, + "Get link status failed when set vf tx rate\n"); + return -EIO; + } + + if (!link_status) { + nicif_err(adapter, drv, netdev, + "Link status must be up when set vf tx rate\n"); + return -EINVAL; + } + + err = hinic_get_port_info(adapter->hwdev, &port_info); + if (err || port_info.speed > LINK_SPEED_100GB) + return -EIO; + + /* rate limit cannot be less than 0 and greater than link speed */ + if (max_tx_rate < 0 || max_tx_rate > speeds[port_info.speed]) { + nicif_err(adapter, drv, netdev, "Set vf max tx rate must be in [0 - %d]\n", + speeds[port_info.speed]); + return -EINVAL; + } + + err = hinic_set_vf_tx_rate(adapter->hwdev, OS_VF_ID_TO_HW(vf), + max_tx_rate, min_tx_rate); + if (err) { + nicif_err(adapter, drv, netdev, + "Unable to set VF %d max rate %d min rate %d%s\n", + vf, max_tx_rate, min_tx_rate, + err == HINIC_TX_RATE_TABLE_FULL ? + ", tx rate profile is full" : ""); + return -EIO; + } + + nicif_info(adapter, drv, netdev, + "Set VF %d max tx rate %d min tx rate %d successfully\n", + vf, max_tx_rate, min_tx_rate); + + return 0; +} + +/*lint -restore*/ diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_sriov.h similarity index 32% rename from drivers/net/ethernet/huawei/hinic/hinic_dev.h rename to drivers/net/ethernet/huawei/hinic/hinic_sriov.h index 5186cc9023aae513caa35c39ae56dbeaf89789f9..67b3174f09f04d0754b5d8697eaad3793af542b1 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_dev.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.h @@ -1,5 +1,5 @@ -/* - * Huawei HiNIC PCI Express Linux driver +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver * Copyright(c) 2017 Huawei Technologies Co., Ltd * * This program is free software; you can redistribute it and/or modify it @@ -13,52 +13,39 @@ * */ -#ifndef HINIC_DEV_H -#define HINIC_DEV_H +#ifndef HINIC_SRIOV_H +#define HINIC_SRIOV_H -#include -#include -#include -#include -#include - -#include "hinic_hw_dev.h" -#include "hinic_tx.h" -#include "hinic_rx.h" - -#define HINIC_DRV_NAME "hinic" - -enum hinic_flags { - HINIC_LINK_UP = BIT(0), - HINIC_INTF_UP = BIT(1), +enum hinic_sriov_state { + HINIC_SRIOV_DISABLE, + HINIC_SRIOV_ENABLE, + HINIC_FUNC_REMOVE, }; -struct hinic_rx_mode_work { - struct work_struct work; - u32 rx_mode; +struct hinic_sriov_info { + struct pci_dev *pdev; + void *hwdev; + bool sriov_enabled; + unsigned int num_vfs; + unsigned long state; }; -struct hinic_dev { - struct net_device *netdev; - struct hinic_hwdev *hwdev; +int hinic_pci_sriov_disable(struct pci_dev *dev); +int hinic_pci_sriov_enable(struct pci_dev *dev, int num_vfs); +int hinic_pci_sriov_configure(struct pci_dev *dev, int num_vfs); +int hinic_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac); +int hinic_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, + __be16 vlan_proto); - u32 msg_enable; - unsigned int tx_weight; - unsigned int rx_weight; +int hinic_ndo_get_vf_config(struct net_device *netdev, int vf, + struct ifla_vf_info *ivi); - unsigned int flags; +int hinic_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); - struct semaphore mgmt_lock; - unsigned long *vlan_bitmap; +int hinic_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting); - struct hinic_rx_mode_work rx_mode_work; - struct workqueue_struct *workq; - - struct hinic_txq *txqs; - struct hinic_rxq *rxqs; - - struct hinic_txq_stats tx_stats; - struct hinic_rxq_stats rx_stats; -}; +int hinic_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link); +int hinic_ndo_set_vf_bw(struct net_device *netdev, + int vf, int min_tx_rate, int max_tx_rate); #endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c index c5fca0356c9c966207ca1b649dd094f4cfc7a55b..bc0a7e77de34ce8195d6c2019cfed7b0f01cb925 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c @@ -1,5 +1,5 @@ -/* - * Huawei HiNIC PCI Express Linux driver +// SPDX-License-Identifier: GPL-2.0 +/* Huawei HiNIC PCI Express Linux driver * Copyright(c) 2017 Huawei Technologies Co., Ltd * * This program is free software; you can redistribute it and/or modify it @@ -13,65 +13,48 @@ * */ -#include +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + #include -#include -#include -#include -#include +#include +#include +#include #include +#include +#include +#include +#include +#include +#include +#include +#include #include -#include -#include -#include -#include -#include - -#include "hinic_common.h" -#include "hinic_hw_if.h" -#include "hinic_hw_wqe.h" -#include "hinic_hw_wq.h" -#include "hinic_hw_qp.h" -#include "hinic_hw_dev.h" -#include "hinic_dev.h" -#include "hinic_tx.h" - -#define TX_IRQ_NO_PENDING 0 -#define TX_IRQ_NO_COALESC 0 -#define TX_IRQ_NO_LLI_TIMER 0 -#define TX_IRQ_NO_CREDIT 0 -#define TX_IRQ_NO_RESEND_TIMER 0 - -#define CI_UPDATE_NO_PENDING 0 -#define CI_UPDATE_NO_COALESC 0 +#include +#include -#define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr)) +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_hw_mgmt.h" +#include "hinic_nic_io.h" +#include "hinic_nic_dev.h" +#include "hinic_qp.h" +#include "hinic_tx.h" +#include "hinic_dbg.h" -#define MIN_SKB_LEN 64 +#define MIN_SKB_LEN 32 +#define MAX_PAYLOAD_OFFSET 221 -/** - * hinic_txq_clean_stats - Clean the statistics of specific queue - * @txq: Logical Tx Queue - **/ -void hinic_txq_clean_stats(struct hinic_txq *txq) -{ - struct hinic_txq_stats *txq_stats = &txq->txq_stats; +#define NIC_QID(q_id, nic_dev) ((q_id) & ((nic_dev)->num_qps - 1)) - u64_stats_update_begin(&txq_stats->syncp); - txq_stats->pkts = 0; - txq_stats->bytes = 0; - txq_stats->tx_busy = 0; - txq_stats->tx_wake = 0; - txq_stats->tx_dropped = 0; - u64_stats_update_end(&txq_stats->syncp); +#define TXQ_STATS_INC(txq, field) \ +{ \ + u64_stats_update_begin(&(txq)->txq_stats.syncp); \ + (txq)->txq_stats.field++; \ + u64_stats_update_end(&(txq)->txq_stats.syncp); \ } -/** - * hinic_txq_get_stats - get statistics of Tx Queue - * @txq: Logical Tx Queue - * @stats: return updated stats here - **/ -void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats) +void hinic_txq_get_stats(struct hinic_txq *txq, + struct hinic_txq_stats *stats) { struct hinic_txq_stats *txq_stats = &txq->txq_stats; unsigned int start; @@ -79,451 +62,1087 @@ void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats) u64_stats_update_begin(&stats->syncp); do { start = u64_stats_fetch_begin(&txq_stats->syncp); - stats->pkts = txq_stats->pkts; - stats->bytes = txq_stats->bytes; - stats->tx_busy = txq_stats->tx_busy; - stats->tx_wake = txq_stats->tx_wake; - stats->tx_dropped = txq_stats->tx_dropped; + stats->bytes = txq_stats->bytes; + stats->packets = txq_stats->packets; + stats->busy = txq_stats->busy; + stats->wake = txq_stats->wake; + stats->dropped = txq_stats->dropped; + stats->big_frags_pkts = txq_stats->big_frags_pkts; + stats->big_udp_pkts = txq_stats->big_udp_pkts; } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); u64_stats_update_end(&stats->syncp); } -/** - * txq_stats_init - Initialize the statistics of specific queue - * @txq: Logical Tx Queue - **/ +void hinic_txq_clean_stats(struct hinic_txq_stats *txq_stats) +{ + u64_stats_update_begin(&txq_stats->syncp); + txq_stats->bytes = 0; + txq_stats->packets = 0; + txq_stats->busy = 0; + txq_stats->wake = 0; + txq_stats->dropped = 0; + txq_stats->big_frags_pkts = 0; + txq_stats->big_udp_pkts = 0; + + txq_stats->ufo_pkt_unsupport = 0; + txq_stats->ufo_linearize_err = 0; + txq_stats->ufo_alloc_skb_err = 0; + txq_stats->skb_pad_err = 0; + txq_stats->frag_len_overflow = 0; + txq_stats->offload_cow_skb_err = 0; + txq_stats->alloc_cpy_frag_err = 0; + txq_stats->map_cpy_frag_err = 0; + txq_stats->map_frag_err = 0; + txq_stats->frag_size_err = 0; + txq_stats->unknown_tunnel_pkt = 0; + u64_stats_update_end(&txq_stats->syncp); +} + static void txq_stats_init(struct hinic_txq *txq) { struct hinic_txq_stats *txq_stats = &txq->txq_stats; u64_stats_init(&txq_stats->syncp); - hinic_txq_clean_stats(txq); + hinic_txq_clean_stats(txq_stats); } -/** - * tx_map_skb - dma mapping for skb and return sges - * @nic_dev: nic device - * @skb: the skb - * @sges: returned sges - * - * Return 0 - Success, negative - Failure - **/ -static int tx_map_skb(struct hinic_dev *nic_dev, struct sk_buff *skb, - struct hinic_sge *sges) +inline void hinic_set_buf_desc(struct hinic_sq_bufdesc *buf_descs, + dma_addr_t addr, u32 len) +{ + buf_descs->hi_addr = cpu_to_be32(upper_32_bits(addr)); + buf_descs->lo_addr = cpu_to_be32(lower_32_bits(addr)); + buf_descs->len = cpu_to_be32(len); +} + +static int tx_map_skb(struct hinic_nic_dev *nic_dev, struct sk_buff *skb, + struct hinic_txq *txq, struct hinic_tx_info *tx_info, + struct hinic_sq_bufdesc *buf_descs, u16 skb_nr_frags) { - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - struct skb_frag_struct *frag; - dma_addr_t dma_addr; - int i, j; - - dma_addr = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb), - DMA_TO_DEVICE); - if (dma_mapping_error(&pdev->dev, dma_addr)) { - dev_err(&pdev->dev, "Failed to map Tx skb data\n"); - return -EFAULT; - } - - hinic_set_sge(&sges[0], dma_addr, skb_headlen(skb)); - - for (i = 0 ; i < skb_shinfo(skb)->nr_frags; i++) { - frag = &skb_shinfo(skb)->frags[i]; - - dma_addr = skb_frag_dma_map(&pdev->dev, frag, 0, - skb_frag_size(frag), - DMA_TO_DEVICE); - if (dma_mapping_error(&pdev->dev, dma_addr)) { - dev_err(&pdev->dev, "Failed to map Tx skb frag\n"); - goto err_tx_map; + struct pci_dev *pdev = nic_dev->pdev; + struct hinic_dma_len *dma_len = tx_info->dma_len; + struct skb_frag_struct *frag = NULL; + u16 base_nr_frags; + int j, i = 0; + int node, err = 0; + u32 nsize, cpy_nsize = 0; + u8 *vaddr, *cpy_buff = NULL; + + if (unlikely(skb_nr_frags > HINIC_MAX_SKB_NR_FRAGE)) { + for (i = HINIC_MAX_SKB_NR_FRAGE; i <= skb_nr_frags; i++) + cpy_nsize += + skb_frag_size(&skb_shinfo(skb)->frags[i - 1]); + if (!cpy_nsize) { + TXQ_STATS_INC(txq, alloc_cpy_frag_err); + return -EINVAL; + } + + node = dev_to_node(&nic_dev->pdev->dev); + if (node == NUMA_NO_NODE) + cpy_buff = kzalloc(cpy_nsize, + GFP_ATOMIC | __GFP_NOWARN); + else + cpy_buff = kzalloc_node(cpy_nsize, + GFP_ATOMIC | __GFP_NOWARN, + node); + + if (!cpy_buff) { + TXQ_STATS_INC(txq, alloc_cpy_frag_err); + return -ENOMEM; } - hinic_set_sge(&sges[i + 1], dma_addr, skb_frag_size(frag)); + tx_info->cpy_buff = cpy_buff; + + for (i = HINIC_MAX_SKB_NR_FRAGE; i <= skb_nr_frags; i++) { + frag = &skb_shinfo(skb)->frags[i - 1]; + nsize = skb_frag_size(frag); + + vaddr = kmap_atomic(skb_frag_page(frag)); + memcpy(cpy_buff, vaddr + frag->page_offset, nsize); + kunmap_atomic(vaddr); + cpy_buff += nsize; + } } - return 0; + dma_len[0].dma = dma_map_single(&pdev->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, dma_len[0].dma)) { + TXQ_STATS_INC(txq, map_frag_err); + err = -EFAULT; + goto map_single_err; + } + dma_len[0].len = skb_headlen(skb); + hinic_set_buf_desc(&buf_descs[0], dma_len[0].dma, + dma_len[0].len); + + if (skb_nr_frags > HINIC_MAX_SKB_NR_FRAGE) + base_nr_frags = HINIC_MAX_SKB_NR_FRAGE - 1; + else + base_nr_frags = skb_nr_frags; + + for (i = 0; i < base_nr_frags; ) { + frag = &(skb_shinfo(skb)->frags[i]); + nsize = skb_frag_size(frag); + i++; + dma_len[i].dma = skb_frag_dma_map(&pdev->dev, frag, 0, + nsize, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, dma_len[i].dma)) { + TXQ_STATS_INC(txq, map_frag_err); + i--; + err = -EFAULT; + goto frag_map_err; + } + dma_len[i].len = nsize; + + hinic_set_buf_desc(&buf_descs[i], dma_len[i].dma, + dma_len[i].len); + } -err_tx_map: - for (j = 0; j < i; j++) - dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[j + 1]), - sges[j + 1].len, DMA_TO_DEVICE); + if (skb_nr_frags > HINIC_MAX_SKB_NR_FRAGE) { + dma_len[HINIC_MAX_SKB_NR_FRAGE].dma = + dma_map_single(&pdev->dev, tx_info->cpy_buff, + cpy_nsize, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, + dma_len[HINIC_MAX_SKB_NR_FRAGE].dma)) { + TXQ_STATS_INC(txq, map_cpy_frag_err); + err = -EFAULT; + goto fusion_map_err; + } + + dma_len[HINIC_MAX_SKB_NR_FRAGE].len = cpy_nsize; + hinic_set_buf_desc(&buf_descs[HINIC_MAX_SKB_NR_FRAGE], + dma_len[HINIC_MAX_SKB_NR_FRAGE].dma, + dma_len[HINIC_MAX_SKB_NR_FRAGE].len); + } + + return 0; - dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len, +fusion_map_err: +frag_map_err: + for (j = 0; j < i;) { + j++; + dma_unmap_page(&pdev->dev, dma_len[j].dma, + dma_len[j].len, DMA_TO_DEVICE); + } + dma_unmap_single(&pdev->dev, dma_len[0].dma, dma_len[0].len, DMA_TO_DEVICE); - return -EFAULT; + +map_single_err: + kfree(tx_info->cpy_buff); + tx_info->cpy_buff = NULL; + + return err; } -/** - * tx_unmap_skb - unmap the dma address of the skb - * @nic_dev: nic device - * @skb: the skb - * @sges: the sges that are connected to the skb - **/ -static void tx_unmap_skb(struct hinic_dev *nic_dev, struct sk_buff *skb, - struct hinic_sge *sges) +static inline void tx_unmap_skb(struct hinic_nic_dev *nic_dev, + struct sk_buff *skb, + struct hinic_dma_len *dma_len, + u16 valid_nr_frags) { - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; + struct pci_dev *pdev = nic_dev->pdev; int i; + u16 nr_frags = valid_nr_frags; - for (i = 0; i < skb_shinfo(skb)->nr_frags ; i++) - dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[i + 1]), - sges[i + 1].len, DMA_TO_DEVICE); + if (nr_frags > HINIC_MAX_SKB_NR_FRAGE) + nr_frags = HINIC_MAX_SKB_NR_FRAGE; - dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len, - DMA_TO_DEVICE); + for (i = 0; i < nr_frags; ) { + i++; + dma_unmap_page(&pdev->dev, + dma_len[i].dma, + dma_len[i].len, DMA_TO_DEVICE); + } + + dma_unmap_single(&pdev->dev, dma_len[0].dma, + dma_len[0].len, DMA_TO_DEVICE); } -netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +union hinic_ip { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; +}; + +union hinic_l4 { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; +}; + +#define TRANSPORT_OFFSET(l4_hdr, skb) ((u32)((l4_hdr) - (skb)->data)) + +static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic_ip *ip, + union hinic_l4 *l4, + enum tx_offload_type offload_type, + enum sq_l3_type *l3_type, u8 *l4_proto) { - struct hinic_dev *nic_dev = netdev_priv(netdev); - struct netdev_queue *netdev_txq; - int nr_sges, err = NETDEV_TX_OK; - struct hinic_sq_wqe *sq_wqe; - unsigned int wqe_size; - struct hinic_txq *txq; - struct hinic_qp *qp; - u16 prod_idx; + unsigned char *exthdr; + + if (ip->v4->version == 4) { + *l3_type = (offload_type == TX_OFFLOAD_CSUM) ? + IPV4_PKT_NO_CHKSUM_OFFLOAD : IPV4_PKT_WITH_CHKSUM_OFFLOAD; + *l4_proto = ip->v4->protocol; + } else if (ip->v4->version == 6) { + *l3_type = IPV6_PKT; + exthdr = ip->hdr + sizeof(*ip->v6); + *l4_proto = ip->v6->nexthdr; + if (exthdr != l4->hdr) { + __be16 frag_off = 0; + + ipv6_skip_exthdr(skb, (int)(exthdr - skb->data), + l4_proto, &frag_off); + } + } else { + *l3_type = UNKNOWN_L3TYPE; + *l4_proto = 0; + } +} - txq = &nic_dev->txqs[skb->queue_mapping]; - qp = container_of(txq->sq, struct hinic_qp, sq); +static void get_inner_l4_info(struct sk_buff *skb, union hinic_l4 *l4, + enum tx_offload_type offload_type, u8 l4_proto, + enum sq_l4offload_type *l4_offload, + u32 *l4_len, u32 *offset) +{ + *offset = 0; + *l4_len = 0; + *l4_offload = OFFLOAD_DISABLE; + + switch (l4_proto) { + case IPPROTO_TCP: + *l4_offload = TCP_OFFLOAD_ENABLE; + *l4_len = l4->tcp->doff * 4; /* doff in unit of 4B */ + /* To keep same with TSO, payload offset begins from paylaod */ + *offset = *l4_len + TRANSPORT_OFFSET(l4->hdr, skb); + break; + + case IPPROTO_UDP: + *l4_offload = UDP_OFFLOAD_ENABLE; + *l4_len = sizeof(struct udphdr); + *offset = TRANSPORT_OFFSET(l4->hdr, skb); + break; + + case IPPROTO_SCTP: + /* only csum offload support sctp */ + if (offload_type != TX_OFFLOAD_CSUM) + break; - if (skb->len < MIN_SKB_LEN) { - if (skb_pad(skb, MIN_SKB_LEN - skb->len)) { - netdev_err(netdev, "Failed to pad skb\n"); - goto update_error_stats; + *l4_offload = SCTP_OFFLOAD_ENABLE; + *l4_len = sizeof(struct sctphdr); + /* To keep same with UFO, payload offset + * begins from L4 header + */ + *offset = TRANSPORT_OFFSET(l4->hdr, skb); + break; + + default: + break; + } +} + +static int hinic_tx_csum(struct hinic_txq *txq, struct hinic_sq_task *task, + u32 *queue_info, struct sk_buff *skb) +{ + union hinic_ip ip; + union hinic_l4 l4; + enum sq_l3_type l3_type; + enum sq_l4offload_type l4_offload; + u32 network_hdr_len; + u32 offset, l4_len; + u8 l4_proto; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (skb->encapsulation) { + u32 l4_tunnel_len; + u32 tunnel_type = TUNNEL_UDP_NO_CSUM; + + ip.hdr = skb_network_header(skb); + + if (ip.v4->version == 4) { + l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD; + l4_proto = ip.v4->protocol; + } else if (ip.v4->version == 6) { + unsigned char *exthdr; + __be16 frag_off; + + l3_type = IPV6_PKT; + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + l4.hdr = skb_transport_header(skb); + if (l4.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto, &frag_off); + } else { + l3_type = UNKNOWN_L3TYPE; + l4_proto = IPPROTO_RAW; } - skb->len = MIN_SKB_LEN; + hinic_task_set_outter_l3(task, l3_type, + skb_network_header_len(skb)); + + switch (l4_proto) { + case IPPROTO_UDP: + l4_tunnel_len = skb_inner_network_offset(skb) - + skb_transport_offset(skb); + ip.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + network_hdr_len = skb_inner_network_header_len(skb); + break; + case IPPROTO_IPIP: + case IPPROTO_IPV6: + tunnel_type = NOT_TUNNEL; + l4_tunnel_len = 0; + + ip.hdr = skb_inner_network_header(skb); + l4.hdr = skb_transport_header(skb); + network_hdr_len = skb_network_header_len(skb); + break; + default: + TXQ_STATS_INC(txq, unknown_tunnel_pkt); + /* Unsupport tunnel packet, disable csum offload */ + skb_checksum_help(skb); + return 0; + } + + hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len); + } else { + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + network_hdr_len = skb_network_header_len(skb); } + get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_CSUM, + &l3_type, &l4_proto); + + get_inner_l4_info(skb, &l4, TX_OFFLOAD_CSUM, l4_proto, + &l4_offload, &l4_len, &offset); + + hinic_task_set_inner_l3(task, l3_type, network_hdr_len); + + hinic_set_cs_inner_l4(task, queue_info, l4_offload, l4_len, offset); + + return 1; +} - nr_sges = skb_shinfo(skb)->nr_frags + 1; - if (nr_sges > txq->max_sges) { - netdev_err(netdev, "Too many Tx sges\n"); - goto skb_error; +static __sum16 csum_magic(union hinic_ip *ip, unsigned short proto) +{ + return (ip->v4->version == 4) ? + csum_tcpudp_magic(ip->v4->saddr, ip->v4->daddr, 0, proto, 0) : + csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0); +} + +static int hinic_tso(struct hinic_sq_task *task, u32 *queue_info, + struct sk_buff *skb) +{ + union hinic_ip ip; + union hinic_l4 l4; + enum sq_l3_type l3_type; + enum sq_l4offload_type l4_offload; + u32 network_hdr_len; + u32 offset, l4_len; + u32 ip_identify = 0; + u8 l4_proto; + int err; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + if (skb->encapsulation) { + u32 l4_tunnel_len; + u32 tunnel_type = 0; + u32 gso_type = skb_shinfo(skb)->gso_type; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + network_hdr_len = skb_inner_network_header_len(skb); + + if (ip.v4->version == 4) + l3_type = IPV4_PKT_WITH_CHKSUM_OFFLOAD; + else if (ip.v4->version == 6) + l3_type = IPV6_PKT; + else + l3_type = 0; + + hinic_task_set_outter_l3(task, l3_type, + skb_network_header_len(skb)); + + if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) { + l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP); + tunnel_type = TUNNEL_UDP_CSUM; + } else if (gso_type & SKB_GSO_UDP_TUNNEL) { + tunnel_type = TUNNEL_UDP_NO_CSUM; + } + + l4_tunnel_len = skb_inner_network_offset(skb) - + skb_transport_offset(skb); + hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len); + + ip.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + } else { + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + network_hdr_len = skb_network_header_len(skb); } - err = tx_map_skb(nic_dev, skb, txq->sges); + get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_TSO, + &l3_type, &l4_proto); + + if (l4_proto == IPPROTO_TCP) + l4.tcp->check = ~csum_magic(&ip, IPPROTO_TCP); + + get_inner_l4_info(skb, &l4, TX_OFFLOAD_TSO, l4_proto, + &l4_offload, &l4_len, &offset); + + hinic_task_set_inner_l3(task, l3_type, network_hdr_len); + + hinic_set_tso_inner_l4(task, queue_info, l4_offload, l4_len, + offset, ip_identify, skb_shinfo(skb)->gso_size); + + return 1; +} + +static enum tx_offload_type hinic_tx_offload(struct hinic_txq *txq, + struct sk_buff *skb, + struct hinic_sq_task *task, + u32 *queue_info, u8 avd_flag) +{ + enum tx_offload_type offload = 0; + int tso_cs_en; + u16 vlan_tag; + + task->pkt_info0 = 0; + task->pkt_info1 = 0; + task->pkt_info2 = 0; + + tso_cs_en = hinic_tso(task, queue_info, skb); + if (tso_cs_en < 0) { + offload = TX_OFFLOAD_INVALID; + return offload; + } else if (tso_cs_en) { + offload |= TX_OFFLOAD_TSO; + } else { + tso_cs_en = hinic_tx_csum(txq, task, queue_info, skb); + if (tso_cs_en) + offload |= TX_OFFLOAD_CSUM; + } + + if (unlikely(skb_vlan_tag_present(skb))) { + vlan_tag = skb_vlan_tag_get(skb); + hinic_set_vlan_tx_offload(task, queue_info, vlan_tag, + vlan_tag >> VLAN_PRIO_SHIFT); + offload |= TX_OFFLOAD_VLAN; + } + + if (unlikely(SQ_CTRL_QUEUE_INFO_GET(*queue_info, PLDOFF) > + MAX_PAYLOAD_OFFSET)) { + offload = TX_OFFLOAD_INVALID; + return offload; + } + + if (avd_flag == HINIC_TX_UFO_AVD) + task->pkt_info0 |= SQ_TASK_INFO0_SET(1, UFO_AVD); + + if (offload) { + hinic_task_set_tx_offload_valid(task, skb_network_offset(skb)); + task->pkt_info0 = be32_to_cpu(task->pkt_info0); + task->pkt_info1 = be32_to_cpu(task->pkt_info1); + task->pkt_info2 = be32_to_cpu(task->pkt_info2); + } + + return offload; +} + +static inline void __get_pkt_stats(struct hinic_tx_info *tx_info, + struct sk_buff *skb) +{ + u32 ihs, hdr_len; + + if (skb_is_gso(skb)) { +#if (defined(HAVE_SKB_INNER_TRANSPORT_HEADER) && \ + defined(HAVE_SK_BUFF_ENCAPSULATION)) + if (skb->encapsulation) { +#ifdef HAVE_SKB_INNER_TRANSPORT_OFFSET + ihs = skb_inner_transport_offset(skb) + + inner_tcp_hdrlen(skb); +#else + ihs = (skb_inner_transport_header(skb) - skb->data) + + inner_tcp_hdrlen(skb); +#endif + } else { +#endif + ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); +#if (defined(HAVE_SKB_INNER_TRANSPORT_HEADER) && \ + defined(HAVE_SK_BUFF_ENCAPSULATION)) + } +#endif + hdr_len = (skb_shinfo(skb)->gso_segs - 1) * ihs; + tx_info->num_bytes = skb->len + (u64)hdr_len; + + } else { + tx_info->num_bytes = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN; + } + + tx_info->num_pkts = 1; +} + +inline u8 hinic_get_vlan_pri(struct sk_buff *skb) +{ + u16 vlan_tci = 0; + int err; + + err = vlan_get_tag(skb, &vlan_tci); if (err) - goto skb_error; + return 0; - wqe_size = HINIC_SQ_WQE_SIZE(nr_sges); + return (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; +} - sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); - if (!sq_wqe) { - netif_stop_subqueue(netdev, qp->q_id); +static void *__try_to_get_wqe(struct net_device *netdev, u16 q_id, + int wqebb_cnt, u16 *pi, u8 *owner) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + void *wqe = NULL; + + netif_stop_subqueue(netdev, q_id); + /* We need to check again in a case another CPU has just + * made room available. + */ + if (unlikely(hinic_get_sq_free_wqebbs(nic_dev->hwdev, q_id) >= + wqebb_cnt)) { + netif_start_subqueue(netdev, q_id); + /* there have enough wqebbs after queue is wake up */ + wqe = hinic_get_sq_wqe(nic_dev->hwdev, q_id, + wqebb_cnt, pi, owner); + } - /* Check for the case free_tx_poll is called in another cpu - * and we stopped the subqueue after free_tx_poll check. - */ - sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); - if (sq_wqe) { - netif_wake_subqueue(nic_dev->netdev, qp->q_id); - goto process_sq_wqe; + return wqe; +} + +#define HINIC_FRAG_STATUS_OK 0 +#define HINIC_FRAG_STATUS_IGNORE 1 + +static netdev_tx_t hinic_send_one_skb(struct sk_buff *skb, + struct net_device *netdev, + struct hinic_txq *txq, + u8 *flag, u8 avd_flag) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic_tx_info *tx_info; + struct hinic_sq_wqe *wqe = NULL; + enum tx_offload_type offload = 0; + u16 q_id = txq->q_id; + u32 queue_info = 0; + u8 owner = 0; + u16 pi = 0; + int err, wqebb_cnt; + u16 num_sge = 0; + u16 original_nr_frags; + u16 new_nr_frags; + u16 i; + int frag_err = HINIC_FRAG_STATUS_OK; + + /* skb->dev will not initialized when calling netdev_alloc_skb_ip_align + * and parameter of length is largger then PAGE_SIZE(under redhat7.3), + * but skb->dev will be used in vlan_get_tag or somewhere + */ + if (unlikely(!skb->dev)) + skb->dev = netdev; + + if (unlikely(skb->len < MIN_SKB_LEN)) { + if (skb_pad(skb, (int)(MIN_SKB_LEN - skb->len))) { + TXQ_STATS_INC(txq, skb_pad_err); + goto tx_skb_pad_err; + } + + skb->len = MIN_SKB_LEN; + } + + original_nr_frags = skb_shinfo(skb)->nr_frags; + new_nr_frags = original_nr_frags; + + /* If size of lastest frags are all zero, should ignore this frags. + * If size of some frag in the middle is zero, should drop this skb. + */ + for (i = 0; i < original_nr_frags; i++) { + if ((skb_frag_size(&skb_shinfo(skb)->frags[i])) && + frag_err == HINIC_FRAG_STATUS_OK) + continue; + + if ((!skb_frag_size(&skb_shinfo(skb)->frags[i])) && + frag_err == HINIC_FRAG_STATUS_OK) { + frag_err = HINIC_FRAG_STATUS_IGNORE; + new_nr_frags = i + 1; + continue; + } + + if ((!skb_frag_size(&skb_shinfo(skb)->frags[i])) && + frag_err == HINIC_FRAG_STATUS_IGNORE) + continue; + + if ((skb_frag_size(&skb_shinfo(skb)->frags[i])) && + frag_err == HINIC_FRAG_STATUS_IGNORE) { + TXQ_STATS_INC(txq, frag_size_err); + goto tx_drop_pkts; } + } - tx_unmap_skb(nic_dev, skb, txq->sges); + num_sge = new_nr_frags + 1; - u64_stats_update_begin(&txq->txq_stats.syncp); - txq->txq_stats.tx_busy++; - u64_stats_update_end(&txq->txq_stats.syncp); - err = NETDEV_TX_BUSY; - wqe_size = 0; - goto flush_skbs; + /* if skb->len is more than 65536B but num_sge is 1, + * driver will drop it + */ + if (unlikely(skb->len > HINIC_GSO_MAX_SIZE && num_sge == 1)) { + TXQ_STATS_INC(txq, frag_len_overflow); + goto tx_drop_pkts; } -process_sq_wqe: - hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges); + /* if sge number more than 17, driver will set 17 sges */ + if (unlikely(num_sge > HINIC_MAX_SQ_SGE)) { + TXQ_STATS_INC(txq, big_frags_pkts); + num_sge = HINIC_MAX_SQ_SGE; + } - hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size); + wqebb_cnt = HINIC_SQ_WQEBB_CNT(num_sge); + if (likely(hinic_get_sq_free_wqebbs(nic_dev->hwdev, q_id) >= + wqebb_cnt)) { + if (likely(wqebb_cnt == 1)) { + hinic_update_sq_pi(nic_dev->hwdev, q_id, + wqebb_cnt, &pi, &owner); + wqe = txq->tx_info[pi].wqe; + } else { + wqe = hinic_get_sq_wqe(nic_dev->hwdev, q_id, + wqebb_cnt, &pi, &owner); + } -flush_skbs: - netdev_txq = netdev_get_tx_queue(netdev, skb->queue_mapping); - if ((!skb->xmit_more) || (netif_xmit_stopped(netdev_txq))) - hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0); + } else { + wqe = __try_to_get_wqe(netdev, q_id, wqebb_cnt, &pi, &owner); + if (likely(!wqe)) { + TXQ_STATS_INC(txq, busy); + return NETDEV_TX_BUSY; + } + } - return err; + tx_info = &txq->tx_info[pi]; + tx_info->skb = skb; + tx_info->wqebb_cnt = wqebb_cnt; + tx_info->valid_nr_frags = new_nr_frags; -skb_error: + __get_pkt_stats(tx_info, skb); + + offload = hinic_tx_offload(txq, skb, &wqe->task, &queue_info, avd_flag); + if (unlikely(offload == TX_OFFLOAD_INVALID)) { + hinic_return_sq_wqe(nic_dev->hwdev, q_id, wqebb_cnt, owner); + TXQ_STATS_INC(txq, offload_cow_skb_err); + goto tx_drop_pkts; + } + + err = tx_map_skb(nic_dev, skb, txq, tx_info, wqe->buf_descs, + new_nr_frags); + if (err) { + hinic_return_sq_wqe(nic_dev->hwdev, q_id, wqebb_cnt, owner); + goto tx_drop_pkts; + } + + hinic_prepare_sq_ctrl(&wqe->ctrl, queue_info, num_sge, owner); + + hinic_send_sq_wqe(nic_dev->hwdev, q_id, wqe, wqebb_cnt, + nic_dev->sq_cos_mapping[hinic_get_vlan_pri(skb)]); + + return NETDEV_TX_OK; + +tx_drop_pkts: dev_kfree_skb_any(skb); -update_error_stats: +tx_skb_pad_err: + TXQ_STATS_INC(txq, dropped); + + *flag = HINIC_TX_DROPED; + return NETDEV_TX_OK; +} + +netdev_tx_t hinic_lb_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u16 q_id = skb_get_queue_mapping(skb); + struct hinic_txq *txq; + u8 flag = 0; + + if (unlikely(!nic_dev->heart_status)) { + dev_kfree_skb_any(skb); + HINIC_NIC_STATS_INC(nic_dev, tx_carrier_off_drop); + return NETDEV_TX_OK; + } + + txq = &nic_dev->txqs[q_id]; + + return hinic_send_one_skb(skb, netdev, txq, &flag, HINIC_TX_NON_AVD); +} + +netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u16 q_id = skb_get_queue_mapping(skb); + struct hinic_txq *txq; + u8 flag = 0; + + if (unlikely(!netif_carrier_ok(netdev) || + !nic_dev->heart_status)) { + dev_kfree_skb_any(skb); + HINIC_NIC_STATS_INC(nic_dev, tx_carrier_off_drop); + return NETDEV_TX_OK; + } + + if (unlikely(q_id >= nic_dev->num_qps)) { + txq = &nic_dev->txqs[0]; + HINIC_NIC_STATS_INC(nic_dev, tx_invalid_qid); + goto tx_drop_pkts; + } + txq = &nic_dev->txqs[q_id]; + + return hinic_send_one_skb(skb, netdev, txq, &flag, HINIC_TX_NON_AVD); + +tx_drop_pkts: + dev_kfree_skb_any(skb); u64_stats_update_begin(&txq->txq_stats.syncp); - txq->txq_stats.tx_dropped++; + txq->txq_stats.dropped++; u64_stats_update_end(&txq->txq_stats.syncp); - return err; + + return NETDEV_TX_OK; } -/** - * tx_free_skb - unmap and free skb - * @nic_dev: nic device - * @skb: the skb - * @sges: the sges that are connected to the skb - **/ -static void tx_free_skb(struct hinic_dev *nic_dev, struct sk_buff *skb, - struct hinic_sge *sges) +static inline void tx_free_skb(struct hinic_nic_dev *nic_dev, + struct sk_buff *skb, + struct hinic_tx_info *tx_info) { - tx_unmap_skb(nic_dev, skb, sges); + tx_unmap_skb(nic_dev, skb, tx_info->dma_len, tx_info->valid_nr_frags); + kfree(tx_info->cpy_buff); + tx_info->cpy_buff = NULL; dev_kfree_skb_any(skb); } -/** - * free_all_rx_skbs - free all skbs in tx queue - * @txq: tx queue - **/ static void free_all_tx_skbs(struct hinic_txq *txq) { - struct hinic_dev *nic_dev = netdev_priv(txq->netdev); - struct hinic_sq *sq = txq->sq; - struct hinic_sq_wqe *sq_wqe; - unsigned int wqe_size; - struct sk_buff *skb; - int nr_sges; + struct hinic_nic_dev *nic_dev = netdev_priv(txq->netdev); + struct hinic_tx_info *tx_info; u16 ci; + int free_wqebbs = hinic_get_sq_free_wqebbs(nic_dev->hwdev, + txq->q_id) + 1; - while ((sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &ci))) { - sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &ci); - if (!sq_wqe) - break; + while (free_wqebbs < txq->q_depth) { + ci = hinic_get_sq_local_ci(nic_dev->hwdev, txq->q_id); - nr_sges = skb_shinfo(skb)->nr_frags + 1; + tx_info = &txq->tx_info[ci]; - hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges); + tx_free_skb(nic_dev, tx_info->skb, tx_info); - hinic_sq_put_wqe(sq, wqe_size); + hinic_update_sq_local_ci(nic_dev->hwdev, txq->q_id, + tx_info->wqebb_cnt); - tx_free_skb(nic_dev, skb, txq->free_sges); + free_wqebbs += tx_info->wqebb_cnt; } } -/** - * free_tx_poll - free finished tx skbs in tx queue that connected to napi - * @napi: napi - * @budget: number of tx - * - * Return 0 - Success, negative - Failure - **/ -static int free_tx_poll(struct napi_struct *napi, int budget) +int hinic_tx_poll(struct hinic_txq *txq, int budget) { - struct hinic_txq *txq = container_of(napi, struct hinic_txq, napi); - struct hinic_qp *qp = container_of(txq->sq, struct hinic_qp, sq); - struct hinic_dev *nic_dev = netdev_priv(txq->netdev); - struct netdev_queue *netdev_txq; - struct hinic_sq *sq = txq->sq; - struct hinic_wq *wq = sq->wq; - struct hinic_sq_wqe *sq_wqe; - unsigned int wqe_size; - int nr_sges, pkts = 0; + struct hinic_nic_dev *nic_dev = netdev_priv(txq->netdev); struct sk_buff *skb; - u64 tx_bytes = 0; - u16 hw_ci, sw_ci; + struct hinic_tx_info *tx_info; + u64 tx_bytes = 0, wake = 0; + int pkts = 0, nr_pkts = 0, wqebb_cnt = 0; + u16 hw_ci, sw_ci = 0, q_id = txq->q_id; + + hw_ci = hinic_get_sq_hw_ci(nic_dev->hwdev, q_id); + dma_rmb(); + sw_ci = hinic_get_sq_local_ci(nic_dev->hwdev, q_id); do { - hw_ci = HW_CONS_IDX(sq) & wq->mask; + tx_info = &txq->tx_info[sw_ci]; - /* Reading a WQEBB to get real WQE size and consumer index. */ - sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &sw_ci); - if ((!sq_wqe) || - (((hw_ci - sw_ci) & wq->mask) * wq->wqebb_size < wqe_size)) + /* Whether all of the wqebb of this wqe is completed */ + if (hw_ci == sw_ci || ((hw_ci - sw_ci) & + txq->q_mask) < tx_info->wqebb_cnt) { break; - - /* If this WQE have multiple WQEBBs, we will read again to get - * full size WQE. - */ - if (wqe_size > wq->wqebb_size) { - sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &sw_ci); - if (unlikely(!sq_wqe)) - break; } - tx_bytes += skb->len; - pkts++; - - nr_sges = skb_shinfo(skb)->nr_frags + 1; + sw_ci = (u16)(sw_ci + tx_info->wqebb_cnt) & txq->q_mask; + prefetch(&txq->tx_info[sw_ci]); - hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges); + wqebb_cnt += tx_info->wqebb_cnt; - hinic_sq_put_wqe(sq, wqe_size); + skb = tx_info->skb; + tx_bytes += tx_info->num_bytes; + nr_pkts += tx_info->num_pkts; + pkts++; - tx_free_skb(nic_dev, skb, txq->free_sges); - } while (pkts < budget); + tx_free_skb(nic_dev, skb, tx_info); - if (__netif_subqueue_stopped(nic_dev->netdev, qp->q_id) && - hinic_get_sq_free_wqebbs(sq) >= HINIC_MIN_TX_NUM_WQEBBS(sq)) { - netdev_txq = netdev_get_tx_queue(txq->netdev, qp->q_id); + } while (likely(pkts < budget)); - __netif_tx_lock(netdev_txq, smp_processor_id()); + hinic_update_sq_local_ci(nic_dev->hwdev, q_id, wqebb_cnt); - netif_wake_subqueue(nic_dev->netdev, qp->q_id); + if (unlikely(__netif_subqueue_stopped(nic_dev->netdev, q_id) && + hinic_get_sq_free_wqebbs(nic_dev->hwdev, q_id) >= 1 && + test_bit(HINIC_INTF_UP, &nic_dev->flags))) { + struct netdev_queue *netdev_txq = + netdev_get_tx_queue(txq->netdev, q_id); + __netif_tx_lock(netdev_txq, smp_processor_id()); + /* To avoid re-waking subqueue with xmit_frame */ + if (__netif_subqueue_stopped(nic_dev->netdev, q_id)) { + netif_wake_subqueue(nic_dev->netdev, q_id); + wake++; + } __netif_tx_unlock(netdev_txq); - - u64_stats_update_begin(&txq->txq_stats.syncp); - txq->txq_stats.tx_wake++; - u64_stats_update_end(&txq->txq_stats.syncp); } u64_stats_update_begin(&txq->txq_stats.syncp); txq->txq_stats.bytes += tx_bytes; - txq->txq_stats.pkts += pkts; + txq->txq_stats.packets += nr_pkts; + txq->txq_stats.wake += wake; u64_stats_update_end(&txq->txq_stats.syncp); - if (pkts < budget) { - napi_complete(napi); - enable_irq(sq->irq); - return pkts; - } - - return budget; + return pkts; } -static void tx_napi_add(struct hinic_txq *txq, int weight) +int hinic_setup_tx_wqe(struct hinic_txq *txq) { - netif_napi_add(txq->netdev, &txq->napi, free_tx_poll, weight); - napi_enable(&txq->napi); -} - -static void tx_napi_del(struct hinic_txq *txq) -{ - napi_disable(&txq->napi); - netif_napi_del(&txq->napi); -} + struct net_device *netdev = txq->netdev; + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic_sq_wqe *wqe; + struct hinic_tx_info *tx_info; + u16 pi = 0; + int i; + u8 owner = 0; -static irqreturn_t tx_irq(int irq, void *data) -{ - struct hinic_txq *txq = data; - struct hinic_dev *nic_dev; + for (i = 0; i < txq->q_depth; i++) { + tx_info = &txq->tx_info[i]; - nic_dev = netdev_priv(txq->netdev); + wqe = hinic_get_sq_wqe(nic_dev->hwdev, txq->q_id, + 1, &pi, &owner); + if (!wqe) { + nicif_err(nic_dev, drv, netdev, "Failed to get SQ wqe\n"); + break; + } - /* Disable the interrupt until napi will be completed */ - disable_irq_nosync(txq->sq->irq); + tx_info->wqe = wqe; + } - hinic_hwdev_msix_cnt_set(nic_dev->hwdev, txq->sq->msix_entry); + hinic_return_sq_wqe(nic_dev->hwdev, txq->q_id, txq->q_depth, owner); - napi_schedule(&txq->napi); - return IRQ_HANDLED; + return i; } -static int tx_request_irq(struct hinic_txq *txq) +int hinic_setup_all_tx_resources(struct net_device *netdev) { - struct hinic_dev *nic_dev = netdev_priv(txq->netdev); - struct hinic_hwdev *hwdev = nic_dev->hwdev; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_sq *sq = txq->sq; + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic_txq *txq; + u64 tx_info_sz; + u16 i, q_id; int err; - tx_napi_add(txq, nic_dev->tx_weight); + for (q_id = 0; q_id < nic_dev->num_qps; q_id++) { + txq = &nic_dev->txqs[q_id]; + tx_info_sz = txq->q_depth * sizeof(*txq->tx_info); + if (!tx_info_sz) { + nicif_err(nic_dev, drv, netdev, "Cannot allocate zero size txq%d info\n", + q_id); + err = -EINVAL; + goto init_txq_err; + } - hinic_hwdev_msix_set(nic_dev->hwdev, sq->msix_entry, - TX_IRQ_NO_PENDING, TX_IRQ_NO_COALESC, - TX_IRQ_NO_LLI_TIMER, TX_IRQ_NO_CREDIT, - TX_IRQ_NO_RESEND_TIMER); + txq->tx_info = kzalloc(tx_info_sz, GFP_KERNEL); + if (!txq->tx_info) { + nicif_err(nic_dev, drv, netdev, "Failed to allocate Tx:%d info\n", + q_id); + err = -ENOMEM; + goto init_txq_err; + } - err = request_irq(sq->irq, tx_irq, 0, txq->irq_name, txq); - if (err) { - dev_err(&pdev->dev, "Failed to request Tx irq\n"); - tx_napi_del(txq); - return err; + err = hinic_setup_tx_wqe(txq); + if (err != txq->q_depth) { + nicif_err(nic_dev, drv, netdev, "Failed to setup Tx: %d wqe\n", + q_id); + q_id++; + goto init_txq_err; + } } return 0; + +init_txq_err: + for (i = 0; i < q_id; i++) { + txq = &nic_dev->txqs[i]; + kfree(txq->tx_info); + } + + return err; } -static void tx_free_irq(struct hinic_txq *txq) +void hinic_free_all_tx_resources(struct net_device *netdev) { - struct hinic_sq *sq = txq->sq; + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic_txq *txq; + u16 q_id; - free_irq(sq->irq, txq); - tx_napi_del(txq); + for (q_id = 0; q_id < nic_dev->num_qps; q_id++) { + txq = &nic_dev->txqs[q_id]; + free_all_tx_skbs(txq); + kfree(txq->tx_info); + } } -/** - * hinic_init_txq - Initialize the Tx Queue - * @txq: Logical Tx Queue - * @sq: Hardware Tx Queue to connect the Logical queue with - * @netdev: network device to connect the Logical queue with - * - * Return 0 - Success, negative - Failure - **/ -int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq, - struct net_device *netdev) +void hinic_set_sq_default_cos(struct net_device *netdev, u8 cos_id) { - struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq); - struct hinic_dev *nic_dev = netdev_priv(netdev); - struct hinic_hwdev *hwdev = nic_dev->hwdev; - int err, irqname_len; - size_t sges_size; - - txq->netdev = netdev; - txq->sq = sq; + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + int up; - txq_stats_init(txq); + for (up = HINIC_DCB_UP_MAX - 1; up >= 0; up--) + nic_dev->sq_cos_mapping[up] = nic_dev->default_cos_id; +} - txq->max_sges = HINIC_MAX_SQ_BUFDESCS; +int hinic_sq_cos_mapping(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic_dcb_state dcb_state = {0}; + u8 default_cos = 0; + int err; - sges_size = txq->max_sges * sizeof(*txq->sges); - txq->sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL); - if (!txq->sges) - return -ENOMEM; + if (HINIC_FUNC_IS_VF(nic_dev->hwdev)) { + err = hinic_get_pf_dcb_state(nic_dev->hwdev, &dcb_state); + if (err) { + hinic_info(nic_dev, drv, "Failed to get vf default cos\n"); + return err; + } - sges_size = txq->max_sges * sizeof(*txq->free_sges); - txq->free_sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL); - if (!txq->free_sges) { - err = -ENOMEM; - goto err_alloc_free_sges; + default_cos = dcb_state.default_cos; + nic_dev->default_cos_id = default_cos; + hinic_set_sq_default_cos(nic_dev->netdev, default_cos); + } else { + default_cos = nic_dev->default_cos_id; + if (test_bit(HINIC_DCB_ENABLE, &nic_dev->flags)) + memcpy(nic_dev->sq_cos_mapping, nic_dev->up_cos, + sizeof(nic_dev->sq_cos_mapping)); + else + hinic_set_sq_default_cos(nic_dev->netdev, default_cos); + + dcb_state.dcb_on = !!test_bit(HINIC_DCB_ENABLE, + &nic_dev->flags); + dcb_state.default_cos = default_cos; + memcpy(dcb_state.up_cos, nic_dev->sq_cos_mapping, + sizeof(dcb_state.up_cos)); + + err = hinic_set_dcb_state(nic_dev->hwdev, &dcb_state); + if (err) + hinic_info(nic_dev, drv, "Failed to set vf default cos\n"); } - irqname_len = snprintf(NULL, 0, "hinic_txq%d", qp->q_id) + 1; - txq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL); - if (!txq->irq_name) { - err = -ENOMEM; - goto err_alloc_irqname; + return err; +} + +int hinic_alloc_txqs(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + struct pci_dev *pdev = nic_dev->pdev; + struct hinic_txq *txq; + u16 q_id, num_txqs = nic_dev->max_qps; + u64 txq_size; + + txq_size = num_txqs * sizeof(*nic_dev->txqs); + if (!txq_size) { + nic_err(&pdev->dev, "Cannot allocate zero size txqs\n"); + return -EINVAL; } - sprintf(txq->irq_name, "hinic_txq%d", qp->q_id); + nic_dev->txqs = kzalloc(txq_size, GFP_KERNEL); - err = hinic_hwdev_hw_ci_addr_set(hwdev, sq, CI_UPDATE_NO_PENDING, - CI_UPDATE_NO_COALESC); - if (err) - goto err_hw_ci; + if (!nic_dev->txqs) { + nic_err(&pdev->dev, "Failed to allocate txqs\n"); + return -ENOMEM; + } - err = tx_request_irq(txq); - if (err) { - netdev_err(netdev, "Failed to request Tx irq\n"); - goto err_req_tx_irq; + for (q_id = 0; q_id < num_txqs; q_id++) { + txq = &nic_dev->txqs[q_id]; + txq->netdev = netdev; + txq->q_id = q_id; + txq->q_depth = nic_dev->sq_depth; + txq->q_mask = nic_dev->sq_depth - 1; + + txq_stats_init(txq); } return 0; +} -err_req_tx_irq: -err_hw_ci: - devm_kfree(&netdev->dev, txq->irq_name); +void hinic_free_txqs(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); -err_alloc_irqname: - devm_kfree(&netdev->dev, txq->free_sges); + kfree(nic_dev->txqs); +} -err_alloc_free_sges: - devm_kfree(&netdev->dev, txq->sges); - return err; +/* should stop transmit any packets before calling this function */ +#define HINIC_FLUSH_QUEUE_TIMEOUT 1000 + +static bool hinic_get_hw_handle_status(void *hwdev, u16 q_id) +{ + u16 sw_pi = 0, hw_ci = 0; + + sw_pi = hinic_dbg_get_sq_pi(hwdev, q_id); + hw_ci = hinic_get_sq_hw_ci(hwdev, q_id); + + return sw_pi == hw_ci; } -/** - * hinic_clean_txq - Clean the Tx Queue - * @txq: Logical Tx Queue - **/ -void hinic_clean_txq(struct hinic_txq *txq) +int hinic_stop_sq(struct hinic_txq *txq) { - struct net_device *netdev = txq->netdev; + struct hinic_nic_dev *nic_dev = netdev_priv(txq->netdev); + unsigned long timeout; + int err; + + timeout = msecs_to_jiffies(HINIC_FLUSH_QUEUE_TIMEOUT) + jiffies; + do { + if (hinic_get_hw_handle_status(nic_dev->hwdev, txq->q_id)) + return 0; - tx_free_irq(txq); + usleep_range(900, 1000); + } while (time_before(jiffies, timeout)); + + /* force hardware to drop packets */ + timeout = msecs_to_jiffies(HINIC_FLUSH_QUEUE_TIMEOUT) + jiffies; + do { + if (hinic_get_hw_handle_status(nic_dev->hwdev, txq->q_id)) + return 0; + + err = hinic_force_drop_tx_pkt(nic_dev->hwdev); + if (err) + break; - free_all_tx_skbs(txq); + usleep_range(9900, 10000); + } while (time_before(jiffies, timeout)); - devm_kfree(&netdev->dev, txq->irq_name); - devm_kfree(&netdev->dev, txq->free_sges); - devm_kfree(&netdev->dev, txq->sges); + /* Avoid msleep takes too long and get a fake result */ + if (hinic_get_hw_handle_status(nic_dev->hwdev, txq->q_id)) + return 0; + + return -EFAULT; } + +void hinic_flush_txqs(struct net_device *netdev) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + u16 qid; + int err; + + for (qid = 0; qid < nic_dev->num_qps; qid++) { + err = hinic_stop_sq(&nic_dev->txqs[qid]); + if (err) + nicif_err(nic_dev, drv, netdev, + "Failed to stop sq%d\n", qid); + } +} /*lint -e766*/ diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.h b/drivers/net/ethernet/huawei/hinic/hinic_tx.h index 1fa55dce5aa7b06b95cca3b229aac3145e27eeac..3e7ff1538b9985d89b7e6dfe9fd76328258b03c5 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.h @@ -1,5 +1,5 @@ -/* - * Huawei HiNIC PCI Express Linux driver +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver * Copyright(c) 2017 Huawei Technologies Co., Ltd * * This program is free software; you can redistribute it and/or modify it @@ -16,47 +16,108 @@ #ifndef HINIC_TX_H #define HINIC_TX_H -#include -#include -#include -#include - -#include "hinic_common.h" -#include "hinic_hw_qp.h" +enum tx_offload_type { + TX_OFFLOAD_TSO = BIT(0), + TX_OFFLOAD_CSUM = BIT(1), + TX_OFFLOAD_VLAN = BIT(2), + TX_OFFLOAD_INVALID = BIT(3), +}; struct hinic_txq_stats { - u64 pkts; - u64 bytes; - u64 tx_busy; - u64 tx_wake; - u64 tx_dropped; + u64 packets; + u64 bytes; + u64 busy; + u64 wake; + u64 dropped; + u64 big_frags_pkts; + u64 big_udp_pkts; + + /* Subdivision statistics show in private tool */ + u64 ufo_pkt_unsupport; + u64 ufo_linearize_err; + u64 ufo_alloc_skb_err; + u64 skb_pad_err; + u64 frag_len_overflow; + u64 offload_cow_skb_err; + u64 alloc_cpy_frag_err; + u64 map_cpy_frag_err; + u64 map_frag_err; + u64 frag_size_err; + u64 unknown_tunnel_pkt; + + struct u64_stats_sync syncp; +}; + +struct hinic_dma_len { + dma_addr_t dma; + u32 len; +}; + +#define MAX_SGE_NUM_PER_WQE 17 + +struct hinic_tx_info { + struct sk_buff *skb; + + int wqebb_cnt; - struct u64_stats_sync syncp; + int num_sge; + void *wqe; + u8 *cpy_buff; + u16 valid_nr_frags; + u16 num_pkts; + u64 num_bytes; + struct hinic_dma_len dma_len[MAX_SGE_NUM_PER_WQE]; }; struct hinic_txq { - struct net_device *netdev; - struct hinic_sq *sq; + struct net_device *netdev; - struct hinic_txq_stats txq_stats; + u16 q_id; + u16 q_depth; + u16 q_mask; + struct hinic_txq_stats txq_stats; + u64 last_moder_packets; + u64 last_moder_bytes; + struct hinic_tx_info *tx_info; +}; - int max_sges; - struct hinic_sge *sges; - struct hinic_sge *free_sges; +enum hinic_tx_xmit_status { + HINIC_TX_OK = 0, + HINIC_TX_DROPED = 1, + HINIC_TX_BUSY = 2, +}; - char *irq_name; - struct napi_struct napi; +enum hinic_tx_avd_type { + HINIC_TX_NON_AVD = 0, + HINIC_TX_UFO_AVD = 1, }; -void hinic_txq_clean_stats(struct hinic_txq *txq); +void hinic_txq_clean_stats(struct hinic_txq_stats *txq_stats); + +void hinic_txq_get_stats(struct hinic_txq *txq, + struct hinic_txq_stats *stats); -void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats); +netdev_tx_t hinic_lb_xmit_frame(struct sk_buff *skb, + struct net_device *netdev); netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev); -int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq, - struct net_device *netdev); +int hinic_setup_all_tx_resources(struct net_device *netdev); + +void hinic_free_all_tx_resources(struct net_device *netdev); + +void hinic_set_sq_default_cos(struct net_device *netdev, u8 cos_id); + +int hinic_sq_cos_mapping(struct net_device *netdev); + +int hinic_alloc_txqs(struct net_device *netdev); + +void hinic_free_txqs(struct net_device *netdev); + +int hinic_tx_poll(struct hinic_txq *txq, int budget); + +u8 hinic_get_vlan_pri(struct sk_buff *skb); -void hinic_clean_txq(struct hinic_txq *txq); +void hinic_flush_txqs(struct net_device *netdev); #endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_wq.c new file mode 100644 index 0000000000000000000000000000000000000000..86588539a60b831959040bef5abef826e0e8b911 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_wq.c @@ -0,0 +1,685 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ossl_knl.h" +#include "hinic_hw.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hwif.h" +#include "hinic_wq.h" +#include "hinic_qe_def.h" + +#define WQS_MAX_NUM_BLOCKS 256 +#define WQS_FREE_BLOCKS_SIZE(wqs) (WQS_MAX_NUM_BLOCKS * \ + sizeof((wqs)->free_blocks[0])) + +static int wqs_next_block(struct hinic_wqs *wqs, u32 *page_idx, + u32 *block_idx); + +static void wqs_return_block(struct hinic_wqs *wqs, u32 page_idx, + u32 block_idx); + +static int queue_alloc_page(void *handle, u64 **vaddr, u64 *paddr, + u64 **shadow_vaddr, u64 page_sz) +{ + dma_addr_t dma_addr = 0; + + *vaddr = dma_zalloc_coherent(handle, page_sz, &dma_addr, + GFP_KERNEL); + if (!*vaddr) { + sdk_err(handle, "Failed to allocate dma to wqs page\n"); + return -ENOMEM; + } + + if (!ADDR_4K_ALIGNED(dma_addr)) { + sdk_err(handle, "Cla is not 4k aligned\n"); + goto shadow_vaddr_err; + } + + *paddr = (u64)dma_addr; + + /* use vzalloc for big mem, shadow_vaddr only used at initialization */ + *shadow_vaddr = vzalloc(page_sz); + if (!*shadow_vaddr) { + sdk_err(handle, "Failed to allocate shadow page vaddr\n"); + goto shadow_vaddr_err; + } + + return 0; + +shadow_vaddr_err: + dma_free_coherent(handle, page_sz, *vaddr, dma_addr); + return -ENOMEM; +} + +static int wqs_allocate_page(struct hinic_wqs *wqs, u32 page_idx) +{ + return queue_alloc_page(wqs->dev_hdl, &wqs->page_vaddr[page_idx], + &wqs->page_paddr[page_idx], + &wqs->shadow_page_vaddr[page_idx], + WQS_PAGE_SIZE); +} + +static void wqs_free_page(struct hinic_wqs *wqs, u32 page_idx) +{ + dma_free_coherent(wqs->dev_hdl, WQS_PAGE_SIZE, + wqs->page_vaddr[page_idx], + (dma_addr_t)wqs->page_paddr[page_idx]); + vfree(wqs->shadow_page_vaddr[page_idx]); +} + +static int cmdq_allocate_page(struct hinic_cmdq_pages *cmdq_pages) +{ + return queue_alloc_page(cmdq_pages->dev_hdl, + &cmdq_pages->cmdq_page_vaddr, + &cmdq_pages->cmdq_page_paddr, + &cmdq_pages->cmdq_shadow_page_vaddr, + CMDQ_PAGE_SIZE); +} + +static void cmdq_free_page(struct hinic_cmdq_pages *cmdq_pages) +{ + dma_free_coherent(cmdq_pages->dev_hdl, CMDQ_PAGE_SIZE, + cmdq_pages->cmdq_page_vaddr, + (dma_addr_t)cmdq_pages->cmdq_page_paddr); + vfree(cmdq_pages->cmdq_shadow_page_vaddr); +} + +static int alloc_wqes_shadow(struct hinic_wq *wq) +{ + u64 size; + + /* if wq->max_wqe_size == 0, we don't need to alloc shadow */ + if (wq->max_wqe_size <= wq->wqebb_size) + return 0; + + size = (u64)wq->num_q_pages * wq->max_wqe_size; + wq->shadow_wqe = kzalloc(size, GFP_KERNEL); + if (!wq->shadow_wqe) { + pr_err("Failed to allocate shadow wqe\n"); + return -ENOMEM; + } + + size = wq->num_q_pages * sizeof(wq->prod_idx); + wq->shadow_idx = kzalloc(size, GFP_KERNEL); + if (!wq->shadow_idx) { + pr_err("Failed to allocate shadow index\n"); + goto shadow_idx_err; + } + + return 0; + +shadow_idx_err: + kfree(wq->shadow_wqe); + return -ENOMEM; +} + +static void free_wqes_shadow(struct hinic_wq *wq) +{ + if (wq->max_wqe_size <= wq->wqebb_size) + return; + + kfree(wq->shadow_idx); + kfree(wq->shadow_wqe); +} + +static void free_wq_pages(void *handle, struct hinic_wq *wq, + u32 num_q_pages) +{ + u32 i; + + for (i = 0; i < num_q_pages; i++) + hinic_dma_free_coherent_align(handle, &wq->mem_align[i]); + + free_wqes_shadow(wq); + + wq->block_vaddr = NULL; + wq->shadow_block_vaddr = NULL; + + kfree(wq->mem_align); +} + +static int alloc_wq_pages(void *dev_hdl, struct hinic_wq *wq) +{ + struct hinic_dma_addr_align *mem_align; + u64 *vaddr, *paddr; + u32 i, num_q_pages; + int err; + + vaddr = wq->shadow_block_vaddr; + paddr = wq->block_vaddr; + + num_q_pages = ALIGN(WQ_SIZE(wq), wq->wq_page_size) / wq->wq_page_size; + if (num_q_pages > WQ_MAX_PAGES) { + sdk_err(dev_hdl, "Number(%d) wq pages exceeds the limit\n", + num_q_pages); + return -EINVAL; + } + + if (num_q_pages & (num_q_pages - 1)) { + sdk_err(dev_hdl, "Wq num(%d) q pages must be power of 2\n", + num_q_pages); + return -EINVAL; + } + + wq->num_q_pages = num_q_pages; + + err = alloc_wqes_shadow(wq); + if (err) { + sdk_err(dev_hdl, "Failed to allocate wqe shadow\n"); + return err; + } + + wq->mem_align = kcalloc(wq->num_q_pages, sizeof(*wq->mem_align), + GFP_KERNEL); + if (!wq->mem_align) { + sdk_err(dev_hdl, "Failed to allocate mem_align\n"); + free_wqes_shadow(wq); + return -ENOMEM; + } + + for (i = 0; i < num_q_pages; i++) { + mem_align = &wq->mem_align[i]; + err = hinic_dma_zalloc_coherent_align(dev_hdl, wq->wq_page_size, + wq->wq_page_size, + GFP_KERNEL, mem_align); + if (err) { + sdk_err(dev_hdl, "Failed to allocate wq page\n"); + goto alloc_wq_pages_err; + } + + *paddr = cpu_to_be64(mem_align->align_paddr); + *vaddr = (u64)mem_align->align_vaddr; + + paddr++; + vaddr++; + } + + return 0; + +alloc_wq_pages_err: + free_wq_pages(dev_hdl, wq, i); + + return -ENOMEM; +} + +int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq, + u32 wqebb_size, u32 wq_page_size, u16 q_depth, + u32 max_wqe_size) +{ + u32 num_wqebbs_per_page; + int err; + + if (wqebb_size == 0) { + sdk_err(wqs->dev_hdl, "Wqebb_size must be >0\n"); + return -EINVAL; + } + + if (q_depth & (q_depth - 1)) { + sdk_err(wqs->dev_hdl, "Wq q_depth(%d) isn't power of 2\n", + q_depth); + return -EINVAL; + } + + if (wq_page_size & (wq_page_size - 1)) { + sdk_err(wqs->dev_hdl, "Wq page_size(%d) isn't power of 2\n", + wq_page_size); + return -EINVAL; + } + + num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size; + + if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) { + sdk_err(wqs->dev_hdl, "Num(%d) wqebbs per page isn't power of 2\n", + num_wqebbs_per_page); + return -EINVAL; + } + + err = wqs_next_block(wqs, &wq->page_idx, &wq->block_idx); + if (err) { + sdk_err(wqs->dev_hdl, "Failed to get free wqs next block\n"); + return err; + } + + wq->wqebb_size = wqebb_size; + wq->wq_page_size = wq_page_size; + wq->q_depth = q_depth; + wq->max_wqe_size = max_wqe_size; + wq->num_wqebbs_per_page = num_wqebbs_per_page; + + wq->wqebbs_per_page_shift = (u32)ilog2(num_wqebbs_per_page); + + wq->block_vaddr = WQ_BASE_VADDR(wqs, wq); + wq->shadow_block_vaddr = WQ_BASE_ADDR(wqs, wq); + wq->block_paddr = WQ_BASE_PADDR(wqs, wq); + + err = alloc_wq_pages(wqs->dev_hdl, wq); + if (err) { + sdk_err(wqs->dev_hdl, "Failed to allocate wq pages\n"); + goto alloc_wq_pages_err; + } + + atomic_set(&wq->delta, q_depth); + wq->cons_idx = 0; + wq->prod_idx = 0; + wq->mask = q_depth - 1; + + return 0; + +alloc_wq_pages_err: + wqs_return_block(wqs, wq->page_idx, wq->block_idx); + return err; +} + +void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq) +{ + free_wq_pages(wqs->dev_hdl, wq, wq->num_q_pages); + + wqs_return_block(wqs, wq->page_idx, wq->block_idx); +} + +static int wqs_next_block(struct hinic_wqs *wqs, u32 *page_idx, + u32 *block_idx) +{ + u32 pos; + + spin_lock(&wqs->alloc_blocks_lock); + + if (wqs->num_free_blks <= 0) { + spin_unlock(&wqs->alloc_blocks_lock); + return -ENOMEM; + } + wqs->num_free_blks--; + + pos = wqs->alloc_blk_pos++; + pos &= WQS_MAX_NUM_BLOCKS - 1; + + *page_idx = wqs->free_blocks[pos].page_idx; + *block_idx = wqs->free_blocks[pos].block_idx; + + wqs->free_blocks[pos].page_idx = 0xFFFFFFFF; + wqs->free_blocks[pos].block_idx = 0xFFFFFFFF; + + spin_unlock(&wqs->alloc_blocks_lock); + + return 0; +} + +static void wqs_return_block(struct hinic_wqs *wqs, u32 page_idx, + u32 block_idx) +{ + u32 pos; + + spin_lock(&wqs->alloc_blocks_lock); + + wqs->num_free_blks++; + + pos = wqs->return_blk_pos++; + pos &= WQS_MAX_NUM_BLOCKS - 1; + + wqs->free_blocks[pos].page_idx = page_idx; + wqs->free_blocks[pos].block_idx = block_idx; + + spin_unlock(&wqs->alloc_blocks_lock); +} + +static void init_wqs_blocks_arr(struct hinic_wqs *wqs) +{ + u32 page_idx, blk_idx, pos = 0; + + for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) { + for (blk_idx = 0; blk_idx < WQS_BLOCKS_PER_PAGE; blk_idx++) { + wqs->free_blocks[pos].page_idx = page_idx; + wqs->free_blocks[pos].block_idx = blk_idx; + pos++; + } + } + + wqs->alloc_blk_pos = 0; + wqs->return_blk_pos = 0; + wqs->num_free_blks = WQS_MAX_NUM_BLOCKS; + spin_lock_init(&wqs->alloc_blocks_lock); +} + +void hinic_wq_wqe_pg_clear(struct hinic_wq *wq) +{ + u64 *block_vaddr; + u32 pg_idx; + + block_vaddr = wq->shadow_block_vaddr; + + atomic_set(&wq->delta, wq->q_depth); + wq->cons_idx = 0; + wq->prod_idx = 0; + + for (pg_idx = 0; pg_idx < wq->num_q_pages; pg_idx++) + memset((void *)(*(block_vaddr + pg_idx)), 0, wq->wq_page_size); +} + +int hinic_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages, + struct hinic_wq *wq, void *dev_hdl, + int cmdq_blocks, u32 wq_page_size, u32 wqebb_size, + u16 q_depth, u32 max_wqe_size) +{ + int i, j, err = -ENOMEM; + + if (q_depth & (q_depth - 1)) { + sdk_err(dev_hdl, "Cmdq q_depth(%d) isn't power of 2\n", + q_depth); + return -EINVAL; + } + + cmdq_pages->dev_hdl = dev_hdl; + + err = cmdq_allocate_page(cmdq_pages); + if (err) { + sdk_err(dev_hdl, "Failed to allocate CMDQ page\n"); + return err; + } + + for (i = 0; i < cmdq_blocks; i++) { + wq[i].page_idx = 0; + wq[i].block_idx = (u32)i; + wq[i].wqebb_size = wqebb_size; + wq[i].wq_page_size = wq_page_size; + wq[i].q_depth = q_depth; + wq[i].max_wqe_size = max_wqe_size; + wq[i].num_wqebbs_per_page = + ALIGN(wq_page_size, wqebb_size) / wqebb_size; + + wq[i].wqebbs_per_page_shift = + (u32)ilog2(wq[i].num_wqebbs_per_page); + + wq[i].block_vaddr = CMDQ_BASE_VADDR(cmdq_pages, &wq[i]); + wq[i].shadow_block_vaddr = CMDQ_BASE_ADDR(cmdq_pages, &wq[i]); + wq[i].block_paddr = CMDQ_BASE_PADDR(cmdq_pages, &wq[i]); + + err = alloc_wq_pages(cmdq_pages->dev_hdl, &wq[i]); + if (err) { + sdk_err(dev_hdl, "Failed to alloc CMDQ blocks\n"); + goto cmdq_block_err; + } + + atomic_set(&wq[i].delta, q_depth); + wq[i].cons_idx = 0; + wq[i].prod_idx = 0; + wq[i].mask = q_depth - 1; + } + + return 0; + +cmdq_block_err: + for (j = 0; j < i; j++) + free_wq_pages(cmdq_pages->dev_hdl, &wq[j], wq[j].num_q_pages); + + cmdq_free_page(cmdq_pages); + return err; +} + +void hinic_cmdq_free(struct hinic_cmdq_pages *cmdq_pages, + struct hinic_wq *wq, int cmdq_blocks) +{ + int i; + + for (i = 0; i < cmdq_blocks; i++) + free_wq_pages(cmdq_pages->dev_hdl, &wq[i], wq[i].num_q_pages); + + cmdq_free_page(cmdq_pages); +} + +static int alloc_page_addr(struct hinic_wqs *wqs) +{ + u64 size = wqs->num_pages * sizeof(*wqs->page_paddr); + + wqs->page_paddr = kzalloc(size, GFP_KERNEL); + if (!wqs->page_paddr) + return -ENOMEM; + + size = wqs->num_pages * sizeof(*wqs->page_vaddr); + wqs->page_vaddr = kzalloc(size, GFP_KERNEL); + if (!wqs->page_vaddr) + goto page_vaddr_err; + + size = wqs->num_pages * sizeof(*wqs->shadow_page_vaddr); + wqs->shadow_page_vaddr = kzalloc(size, GFP_KERNEL); + if (!wqs->shadow_page_vaddr) + goto page_shadow_vaddr_err; + + return 0; + +page_shadow_vaddr_err: + kfree(wqs->page_vaddr); + +page_vaddr_err: + kfree(wqs->page_paddr); + return -ENOMEM; +} + +static void free_page_addr(struct hinic_wqs *wqs) +{ + kfree(wqs->shadow_page_vaddr); + kfree(wqs->page_vaddr); + kfree(wqs->page_paddr); +} + +int hinic_wqs_alloc(struct hinic_wqs *wqs, int num_wqs, void *dev_hdl) +{ + u32 i, page_idx; + int err; + + wqs->dev_hdl = dev_hdl; + wqs->num_pages = WQ_NUM_PAGES(num_wqs); + + if (alloc_page_addr(wqs)) { + sdk_err(dev_hdl, "Failed to allocate mem for page addresses\n"); + return -ENOMEM; + } + + for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) { + err = wqs_allocate_page(wqs, page_idx); + if (err) { + sdk_err(dev_hdl, "Failed wq page allocation\n"); + goto wq_allocate_page_err; + } + } + + wqs->free_blocks = kzalloc(WQS_FREE_BLOCKS_SIZE(wqs), GFP_KERNEL); + if (!wqs->free_blocks) { + err = -ENOMEM; + goto alloc_blocks_err; + } + + init_wqs_blocks_arr(wqs); + return 0; + +alloc_blocks_err: +wq_allocate_page_err: + for (i = 0; i < page_idx; i++) + wqs_free_page(wqs, i); + + free_page_addr(wqs); + return err; +} + +void hinic_wqs_free(struct hinic_wqs *wqs) +{ + u32 page_idx; + + for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) + wqs_free_page(wqs, page_idx); + + free_page_addr(wqs); + kfree(wqs->free_blocks); +} + +static void copy_wqe_to_shadow(struct hinic_wq *wq, void *shadow_addr, + int num_wqebbs, u16 prod_idx) +{ + u8 *shadow_wqebb_addr, *wqe_page_addr, *wqebb_addr; + u32 i, offset; + u16 idx; + + for (i = 0; i < (u32)num_wqebbs; i++) { + offset = i * wq->wqebb_size; + shadow_wqebb_addr = (u8 *)shadow_addr + offset; + + idx = MASKED_WQE_IDX(wq, prod_idx + i); + wqe_page_addr = WQ_PAGE_ADDR(wq, idx); + wqebb_addr = wqe_page_addr + + WQE_PAGE_OFF(wq, MASKED_WQE_IDX(wq, idx)); + + memcpy(shadow_wqebb_addr, wqebb_addr, wq->wqebb_size); + } +} + +static void copy_wqe_from_shadow(struct hinic_wq *wq, void *shadow_addr, + int num_wqebbs, u16 prod_idx) +{ + u8 *shadow_wqebb_addr, *wqe_page_addr, *wqebb_addr; + u32 i, offset; + u16 idx; + + for (i = 0; i < (u32)num_wqebbs; i++) { + offset = i * wq->wqebb_size; + shadow_wqebb_addr = (u8 *)shadow_addr + offset; + + idx = MASKED_WQE_IDX(wq, prod_idx + i); + wqe_page_addr = WQ_PAGE_ADDR(wq, idx); + wqebb_addr = wqe_page_addr + + WQE_PAGE_OFF(wq, MASKED_WQE_IDX(wq, idx)); + + memcpy(wqebb_addr, shadow_wqebb_addr, wq->wqebb_size); + } +} + +void *hinic_get_wqebb_addr(struct hinic_wq *wq, u16 index) +{ + return WQ_PAGE_ADDR(wq, index) + WQE_PAGE_OFF(wq, index); +} + +u64 hinic_get_first_wqe_page_addr(struct hinic_wq *wq) +{ + return be64_to_cpu(*wq->block_vaddr); +} + +void *hinic_get_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *prod_idx) +{ + u32 curr_pg, end_pg; + u16 curr_prod_idx, end_prod_idx; + + if (atomic_sub_return(num_wqebbs, &wq->delta) < 0) { + atomic_add(num_wqebbs, &wq->delta); + return NULL; + } + + /* use original cur_pi and end_pi, no need queue depth mask as + * WQE_PAGE_NUM will do num_queue_pages mask + */ + curr_prod_idx = (u16)wq->prod_idx; + wq->prod_idx += num_wqebbs; + + /* end prod index should points to the last wqebb of wqe, + * therefore minus 1 + */ + end_prod_idx = (u16)wq->prod_idx - 1; + + curr_pg = WQE_PAGE_NUM(wq, curr_prod_idx); + end_pg = WQE_PAGE_NUM(wq, end_prod_idx); + + *prod_idx = MASKED_WQE_IDX(wq, curr_prod_idx); + + /* If we only have one page, still need to get shadown wqe when + * wqe rolling-over page + */ + if (curr_pg != end_pg || MASKED_WQE_IDX(wq, end_prod_idx) < *prod_idx) { + u32 offset = curr_pg * wq->max_wqe_size; + u8 *shadow_addr = wq->shadow_wqe + offset; + + wq->shadow_idx[curr_pg] = *prod_idx; + return shadow_addr; + } + + return WQ_PAGE_ADDR(wq, *prod_idx) + WQE_PAGE_OFF(wq, *prod_idx); +} + +void hinic_put_wqe(struct hinic_wq *wq, int num_wqebbs) +{ + atomic_add(num_wqebbs, &wq->delta); + wq->cons_idx += num_wqebbs; +} + +void *hinic_read_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *cons_idx) +{ + u32 curr_pg, end_pg; + u16 curr_cons_idx, end_cons_idx; + + if ((atomic_read(&wq->delta) + num_wqebbs) > wq->q_depth) + return NULL; + + curr_cons_idx = (u16)wq->cons_idx; + + curr_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx); + end_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx + num_wqebbs - 1); + + curr_pg = WQE_PAGE_NUM(wq, curr_cons_idx); + end_pg = WQE_PAGE_NUM(wq, end_cons_idx); + + *cons_idx = curr_cons_idx; + + if (curr_pg != end_pg) { + u32 offset = curr_pg * wq->max_wqe_size; + u8 *shadow_addr = wq->shadow_wqe + offset; + + copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *cons_idx); + + return shadow_addr; + } + + return WQ_PAGE_ADDR(wq, *cons_idx) + WQE_PAGE_OFF(wq, *cons_idx); +} + +static inline int wqe_shadow(struct hinic_wq *wq, const void *wqe) +{ + void *end_wqe_shadow_addr; + u32 wqe_shadow_size = wq->num_q_pages * wq->max_wqe_size; + + end_wqe_shadow_addr = &wq->shadow_wqe[wqe_shadow_size]; + + return WQE_IN_RANGE(wqe, wq->shadow_wqe, end_wqe_shadow_addr); +} + +void hinic_write_wqe(struct hinic_wq *wq, void *wqe, int num_wqebbs) +{ + u16 curr_pg; + u16 prod_idx; + + if (wqe_shadow(wq, wqe)) { + curr_pg = WQE_SHADOW_PAGE(wq, wqe); + prod_idx = wq->shadow_idx[curr_pg]; + + copy_wqe_from_shadow(wq, wqe, num_wqebbs, prod_idx); + } +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_wq.h b/drivers/net/ethernet/huawei/hinic/hinic_wq.h new file mode 100644 index 0000000000000000000000000000000000000000..c2a408c3396d25c119d4fdc5e1a8d70b2d37b63e --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_wq.h @@ -0,0 +1,117 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_WQ_H +#define HINIC_WQ_H + +struct hinic_free_block { + u32 page_idx; + u32 block_idx; +}; + +struct hinic_wq { + /* The addresses are 64 bit in the HW */ + u64 block_paddr; + u64 *shadow_block_vaddr; + u64 *block_vaddr; + + u32 wqebb_size; + u32 wq_page_size; + u16 q_depth; + u32 max_wqe_size; + u32 num_wqebbs_per_page; + + /* performance: replace mul/div as shift; + * num_wqebbs_per_page must be power of 2 + */ + u32 wqebbs_per_page_shift; + u32 page_idx; + u32 block_idx; + + u32 num_q_pages; + + struct hinic_dma_addr_align *mem_align; + + int cons_idx; + int prod_idx; + + atomic_t delta; + u16 mask; + + u8 *shadow_wqe; + u16 *shadow_idx; +}; + +struct hinic_cmdq_pages { + /* The addresses are 64 bit in the HW */ + u64 cmdq_page_paddr; + u64 *cmdq_page_vaddr; + u64 *cmdq_shadow_page_vaddr; + + void *dev_hdl; +}; + +struct hinic_wqs { + /* The addresses are 64 bit in the HW */ + u64 *page_paddr; + u64 **page_vaddr; + u64 **shadow_page_vaddr; + + struct hinic_free_block *free_blocks; + u32 alloc_blk_pos; + u32 return_blk_pos; + int num_free_blks; + + /* for allocate blocks */ + spinlock_t alloc_blocks_lock; + + u32 num_pages; + + void *dev_hdl; +}; + +void hinic_wq_wqe_pg_clear(struct hinic_wq *wq); + +int hinic_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages, + struct hinic_wq *wq, void *dev_hdl, + int cmdq_blocks, u32 wq_page_size, u32 wqebb_size, + u16 q_depth, u32 max_wqe_size); + +void hinic_cmdq_free(struct hinic_cmdq_pages *cmdq_pages, + struct hinic_wq *wq, int cmdq_blocks); + +int hinic_wqs_alloc(struct hinic_wqs *wqs, int num_wqs, void *dev_hdl); + +void hinic_wqs_free(struct hinic_wqs *wqs); + +int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq, + u32 wqebb_size, u32 wq_page_size, u16 q_depth, + u32 max_wqe_size); + +void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq); + +void *hinic_get_wqebb_addr(struct hinic_wq *wq, u16 index); + +u64 hinic_get_first_wqe_page_addr(struct hinic_wq *wq); + +void *hinic_get_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *prod_idx); + +void hinic_put_wqe(struct hinic_wq *wq, int num_wqebbs); + +void *hinic_read_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *cons_idx); + +void hinic_write_wqe(struct hinic_wq *wq, void *wqe, int num_wqebbs); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/ossl_knl.h b/drivers/net/ethernet/huawei/hinic/ossl_knl.h new file mode 100644 index 0000000000000000000000000000000000000000..c596cdc7231870e44b84db6d269a1ae301c12f60 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/ossl_knl.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef OSSL_KNL_H +#define OSSL_KNL_H + +#include "ossl_knl_linux.h" + +#define sdk_err(dev, format, ...) \ + dev_err(dev, "[COMM]" format, ##__VA_ARGS__) +#define sdk_warn(dev, format, ...) \ + dev_warn(dev, "[COMM]" format, ##__VA_ARGS__) +#define sdk_notice(dev, format, ...) \ + dev_notice(dev, "[COMM]" format, ##__VA_ARGS__) +#define sdk_info(dev, format, ...) \ + dev_info(dev, "[COMM]" format, ##__VA_ARGS__) + +#define nic_err(dev, format, ...) \ + dev_err(dev, "[NIC]" format, ##__VA_ARGS__) +#define nic_warn(dev, format, ...) \ + dev_warn(dev, "[NIC]" format, ##__VA_ARGS__) +#define nic_notice(dev, format, ...) \ + dev_notice(dev, "[NIC]" format, ##__VA_ARGS__) +#define nic_info(dev, format, ...) \ + dev_info(dev, "[NIC]" format, ##__VA_ARGS__) + +#endif /* OSSL_KNL_H */ diff --git a/drivers/net/ethernet/huawei/hinic/hinic_common.h b/drivers/net/ethernet/huawei/hinic/ossl_knl_linux.c similarity index 48% rename from drivers/net/ethernet/huawei/hinic/hinic_common.h rename to drivers/net/ethernet/huawei/hinic/ossl_knl_linux.c index 2c06b76e94a167f37449782e29ec5e84d10f8b4a..1aa09e55c64e6c69da8921eee1dc7c3452786948 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_common.h +++ b/drivers/net/ethernet/huawei/hinic/ossl_knl_linux.c @@ -1,5 +1,5 @@ -/* - * Huawei HiNIC PCI Express Linux driver +// SPDX-License-Identifier: GPL-2.0 +/* Huawei HiNIC PCI Express Linux driver * Copyright(c) 2017 Huawei Technologies Co., Ltd * * This program is free software; you can redistribute it and/or modify it @@ -13,26 +13,19 @@ * */ -#ifndef HINIC_COMMON_H -#define HINIC_COMMON_H - -#include - -#define UPPER_8_BITS(data) (((data) >> 8) & 0xFF) -#define LOWER_8_BITS(data) ((data) & 0xFF) - -struct hinic_sge { - u32 hi_addr; - u32 lo_addr; - u32 len; -}; - -void hinic_cpu_to_be32(void *data, int len); - -void hinic_be32_to_cpu(void *data, int len); - -void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, int len); - -dma_addr_t hinic_sge_to_dma(struct hinic_sge *sge); - -#endif +#include "ossl_knl_linux.h" + +int local_atoi(const char *name) +{ + int val = 0; + + for (;; name++) { + switch (*name) { + case '0' ... '9': + val = 10 * val + (*name - '0'); + break; + default: + return val; + } + } +} diff --git a/drivers/net/ethernet/huawei/hinic/ossl_knl_linux.h b/drivers/net/ethernet/huawei/hinic/ossl_knl_linux.h new file mode 100644 index 0000000000000000000000000000000000000000..24c0bbc8f3f9ce9a341589565aba76930c5ef9a2 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/ossl_knl_linux.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef OSSL_KNL_LINUX_H_ +#define OSSL_KNL_LINUX_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef SUPPORTED_100000baseKR4_Full +#define SUPPORTED_100000baseKR4_Full 0 +#define ADVERTISED_100000baseKR4_Full 0 +#endif +#ifndef SUPPORTED_100000baseCR4_Full +#define SUPPORTED_100000baseCR4_Full 0 +#define ADVERTISED_100000baseCR4_Full 0 +#endif + +#ifndef SUPPORTED_40000baseKR4_Full +#define SUPPORTED_40000baseKR4_Full 0 +#define ADVERTISED_40000baseKR4_Full 0 +#endif +#ifndef SUPPORTED_40000baseCR4_Full +#define SUPPORTED_40000baseCR4_Full 0 +#define ADVERTISED_40000baseCR4_Full 0 +#endif + +#ifndef SUPPORTED_25000baseKR_Full +#define SUPPORTED_25000baseKR_Full 0 +#define ADVERTISED_25000baseKR_Full 0 +#endif +#ifndef SUPPORTED_25000baseCR_Full +#define SUPPORTED_25000baseCR_Full 0 +#define ADVERTISED_25000baseCR_Full 0 +#endif + +int local_atoi(const char *name); + +#define nicif_err(priv, type, dev, fmt, args...) \ + netif_level(err, priv, type, dev, "[NIC]" fmt, ##args) +#define nicif_warn(priv, type, dev, fmt, args...) \ + netif_level(warn, priv, type, dev, "[NIC]" fmt, ##args) +#define nicif_notice(priv, type, dev, fmt, args...) \ + netif_level(notice, priv, type, dev, "[NIC]" fmt, ##args) +#define nicif_info(priv, type, dev, fmt, args...) \ + netif_level(info, priv, type, dev, "[NIC]" fmt, ##args) +#define nicif_dbg(priv, type, dev, fmt, args...) \ + netif_level(dbg, priv, type, dev, "[NIC]" fmt, ##args) + +#define tasklet_state(tasklet) ((tasklet)->state) + +#endif diff --git a/drivers/net/ethernet/i825xx/lasi_82596.c b/drivers/net/ethernet/i825xx/lasi_82596.c index b69c622ba8b2d027fff91468a15c88349edae3d7..6f0e4019adefa27b2cd50f80271b0b70f79b72df 100644 --- a/drivers/net/ethernet/i825xx/lasi_82596.c +++ b/drivers/net/ethernet/i825xx/lasi_82596.c @@ -96,6 +96,8 @@ #define OPT_SWAP_PORT 0x0001 /* Need to wordswp on the MPU port */ +#define LIB82596_DMA_ATTR DMA_ATTR_NON_CONSISTENT + #define DMA_WBACK(ndev, addr, len) \ do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_TO_DEVICE); } while (0) @@ -199,7 +201,7 @@ static int __exit lan_remove_chip(struct parisc_device *pdev) unregister_netdev (dev); dma_free_attrs(&pdev->dev, sizeof(struct i596_private), lp->dma, - lp->dma_addr, DMA_ATTR_NON_CONSISTENT); + lp->dma_addr, LIB82596_DMA_ATTR); free_netdev (dev); return 0; } diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c index 2f7ae118217fe881a53614fb436ed119bf56919f..d0e8193ca4708a6dbb4a06cdb3b847760fc65622 100644 --- a/drivers/net/ethernet/i825xx/lib82596.c +++ b/drivers/net/ethernet/i825xx/lib82596.c @@ -1065,7 +1065,7 @@ static int i82596_probe(struct net_device *dev) dma = dma_alloc_attrs(dev->dev.parent, sizeof(struct i596_dma), &lp->dma_addr, GFP_KERNEL, - DMA_ATTR_NON_CONSISTENT); + LIB82596_DMA_ATTR); if (!dma) { printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__); return -ENOMEM; @@ -1087,7 +1087,7 @@ static int i82596_probe(struct net_device *dev) i = register_netdev(dev); if (i) { dma_free_attrs(dev->dev.parent, sizeof(struct i596_dma), - dma, lp->dma_addr, DMA_ATTR_NON_CONSISTENT); + dma, lp->dma_addr, LIB82596_DMA_ATTR); return i; } diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c index b2c04a789744fc770b798d79b96604910077dfd3..43c1fd18670b00373e099adcab94062a0ea93fdb 100644 --- a/drivers/net/ethernet/i825xx/sni_82596.c +++ b/drivers/net/ethernet/i825xx/sni_82596.c @@ -23,6 +23,8 @@ static const char sni_82596_string[] = "snirm_82596"; +#define LIB82596_DMA_ATTR 0 + #define DMA_WBACK(priv, addr, len) do { } while (0) #define DMA_INV(priv, addr, len) do { } while (0) #define DMA_WBACK_INV(priv, addr, len) do { } while (0) @@ -151,7 +153,7 @@ static int sni_82596_driver_remove(struct platform_device *pdev) unregister_netdev(dev); dma_free_attrs(dev->dev.parent, sizeof(struct i596_private), lp->dma, - lp->dma_addr, DMA_ATTR_NON_CONSISTENT); + lp->dma_addr, LIB82596_DMA_ATTR); iounmap(lp->ca); iounmap(lp->mpu_port); free_netdev (dev); diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c index 1a86184d44c0ae8cc2f96b0d942c70711630c4df..0f0304043070ba0dd61f0b29e89f43bbc1c59a67 100644 --- a/drivers/net/ethernet/i825xx/sun3_82586.c +++ b/drivers/net/ethernet/i825xx/sun3_82586.c @@ -1015,6 +1015,7 @@ sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev) if(skb->len > XMIT_BUFF_SIZE) { printk("%s: Sorry, max. framelength is %d bytes. The length of your frame is %d bytes.\n",dev->name,XMIT_BUFF_SIZE,skb->len); + dev_kfree_skb(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 03f64f40b2a3e0a3d9f3432cb6fbdd7bcd264a4c..e8ee69d4e4d34898f4cb490c32b3d0f2370bd710 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -2027,7 +2027,7 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev, dev_consume_skb_any(skb); } -static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ehea_port *port = netdev_priv(dev); struct ehea_swqe *swqe; @@ -3161,6 +3161,7 @@ static ssize_t ehea_probe_port(struct device *dev, if (ehea_add_adapter_mr(adapter)) { pr_err("creating MR failed\n"); + of_node_put(eth_dn); return -EIO; } diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 129f4e9f38dac01f424ea7337002add11a2751e5..a96f501813ff7fab86ddb766e9bb8825fb25dd1b 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -1409,7 +1409,7 @@ static inline u16 emac_tx_csum(struct emac_instance *dev, return 0; } -static inline int emac_xmit_finish(struct emac_instance *dev, int len) +static inline netdev_tx_t emac_xmit_finish(struct emac_instance *dev, int len) { struct emac_regs __iomem *p = dev->emacp; struct net_device *ndev = dev->ndev; @@ -1436,7 +1436,7 @@ static inline int emac_xmit_finish(struct emac_instance *dev, int len) } /* Tx lock BH */ -static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct emac_instance *dev = netdev_priv(ndev); unsigned int len = skb->len; @@ -1494,7 +1494,8 @@ static inline int emac_xmit_split(struct emac_instance *dev, int slot, } /* Tx lock BH disabled (SG version for TAH equipped EMACs) */ -static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t +emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev) { struct emac_instance *dev = netdev_priv(ndev); int nr_frags = skb_shinfo(skb)->nr_frags; diff --git a/drivers/net/ethernet/ibm/emac/emac.h b/drivers/net/ethernet/ibm/emac/emac.h index e2f80cca9bed432e88221d89d54fe337e09e16d0..0d2de6f676764d73729e859ecfcceaf0a1f47ac9 100644 --- a/drivers/net/ethernet/ibm/emac/emac.h +++ b/drivers/net/ethernet/ibm/emac/emac.h @@ -231,7 +231,7 @@ struct emac_regs { #define EMAC_STACR_PHYE 0x00004000 #define EMAC_STACR_STAC_MASK 0x00003000 #define EMAC_STACR_STAC_READ 0x00001000 -#define EMAC_STACR_STAC_WRITE 0x00000800 +#define EMAC_STACR_STAC_WRITE 0x00002000 #define EMAC_STACR_OPBC_MASK 0x00000C00 #define EMAC_STACR_OPBC_50 0x00000000 #define EMAC_STACR_OPBC_66 0x00000400 diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 525d8b89187b9b92eec16f2c33bc33c7524a3e4e..40ad1e5032553b9a833b0a7697f7cc9316fee382 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1172,11 +1172,15 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, map_failed_frags: last = i+1; - for (i = 0; i < last; i++) + for (i = 1; i < last; i++) dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address, descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK, DMA_TO_DEVICE); + dma_unmap_single(&adapter->vdev->dev, + descs[0].fields.address, + descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK, + DMA_TO_DEVICE); map_failed: if (!firmware_has_feature(FW_FEATURE_CMO)) netdev_err(netdev, "tx: unable to map xmit buffer\n"); @@ -1310,7 +1314,6 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) unsigned long lpar_rc; u16 mss = 0; -restart_poll: while (frames_processed < budget) { if (!ibmveth_rxq_pending_buffer(adapter)) break; @@ -1398,7 +1401,6 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) napi_reschedule(napi)) { lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); - goto restart_poll; } } @@ -1616,7 +1618,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) struct net_device *netdev; struct ibmveth_adapter *adapter; unsigned char *mac_addr_p; - unsigned int *mcastFilterSize_p; + __be32 *mcastFilterSize_p; long ret; unsigned long ret_attr; @@ -1638,8 +1640,9 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) return -EINVAL; } - mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev, - VETH_MCAST_FILTER_SIZE, NULL); + mcastFilterSize_p = (__be32 *)vio_get_attribute(dev, + VETH_MCAST_FILTER_SIZE, + NULL); if (!mcastFilterSize_p) { dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE " "attribute\n"); @@ -1656,7 +1659,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) adapter->vdev = dev; adapter->netdev = netdev; - adapter->mcastFilterSize = *mcastFilterSize_p; + adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p); adapter->pool_config = 0; netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 699ef942b615c3a22053ba1419399317a6642cfa..1e9193b8ad9429444e3f1ff8b6f9041c1ae91a46 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -438,9 +438,10 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter) if (rx_pool->buff_size != be64_to_cpu(size_array[i])) { free_long_term_buff(adapter, &rx_pool->long_term_buff); rx_pool->buff_size = be64_to_cpu(size_array[i]); - alloc_long_term_buff(adapter, &rx_pool->long_term_buff, - rx_pool->size * - rx_pool->buff_size); + rc = alloc_long_term_buff(adapter, + &rx_pool->long_term_buff, + rx_pool->size * + rx_pool->buff_size); } else { rc = reset_long_term_buff(adapter, &rx_pool->long_term_buff); @@ -485,8 +486,8 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter) for (j = 0; j < rx_pool->size; j++) { if (rx_pool->rx_buff[j].skb) { - dev_kfree_skb_any(rx_pool->rx_buff[i].skb); - rx_pool->rx_buff[i].skb = NULL; + dev_kfree_skb_any(rx_pool->rx_buff[j].skb); + rx_pool->rx_buff[j].skb = NULL; } } @@ -706,9 +707,9 @@ static int init_tx_pools(struct net_device *netdev) return rc; } - init_one_tx_pool(netdev, &adapter->tso_pool[i], - IBMVNIC_TSO_BUFS, - IBMVNIC_TSO_BUF_SZ); + rc = init_one_tx_pool(netdev, &adapter->tso_pool[i], + IBMVNIC_TSO_BUFS, + IBMVNIC_TSO_BUF_SZ); if (rc) { release_tx_pools(adapter); return rc; @@ -1103,20 +1104,15 @@ static int ibmvnic_open(struct net_device *netdev) return 0; } - mutex_lock(&adapter->reset_lock); - if (adapter->state != VNIC_CLOSED) { rc = ibmvnic_login(netdev); - if (rc) { - mutex_unlock(&adapter->reset_lock); + if (rc) return rc; - } rc = init_resources(adapter); if (rc) { netdev_err(netdev, "failed to initialize resources\n"); release_resources(adapter); - mutex_unlock(&adapter->reset_lock); return rc; } } @@ -1124,8 +1120,6 @@ static int ibmvnic_open(struct net_device *netdev) rc = __ibmvnic_open(netdev); netif_carrier_on(netdev); - mutex_unlock(&adapter->reset_lock); - return rc; } @@ -1269,10 +1263,8 @@ static int ibmvnic_close(struct net_device *netdev) return 0; } - mutex_lock(&adapter->reset_lock); rc = __ibmvnic_close(netdev); ibmvnic_cleanup(netdev); - mutex_unlock(&adapter->reset_lock); return rc; } @@ -1428,7 +1420,7 @@ static int ibmvnic_xmit_workarounds(struct sk_buff *skb, return 0; } -static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); int queue_num = skb_get_queue_mapping(skb); @@ -1452,7 +1444,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) u64 *handle_array; int index = 0; u8 proto = 0; - int ret = 0; + netdev_tx_t ret = NETDEV_TX_OK; if (adapter->resetting) { if (!netif_subqueue_stopped(netdev, skb)) @@ -1545,7 +1537,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_crq.v1.sge_len = cpu_to_be32(skb->len); tx_crq.v1.ioba = cpu_to_be64(data_dma_addr); - if (adapter->vlan_header_insertion) { + if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) { tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT; tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci); } @@ -1594,6 +1586,8 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num], (u64)tx_buff->indir_dma, (u64)num_entries); + dma_unmap_single(dev, tx_buff->indir_dma, + sizeof(tx_buff->indir_arr), DMA_TO_DEVICE); } else { tx_buff->num_entries = num_entries; lpar_rc = send_subcrq(adapter, handle_array[queue_num], @@ -1746,6 +1740,7 @@ static int do_reset(struct ibmvnic_adapter *adapter, struct ibmvnic_rwi *rwi, u32 reset_state) { u64 old_num_rx_queues, old_num_tx_queues; + u64 old_num_rx_slots, old_num_tx_slots; struct net_device *netdev = adapter->netdev; int i, rc; @@ -1757,10 +1752,13 @@ static int do_reset(struct ibmvnic_adapter *adapter, old_num_rx_queues = adapter->req_rx_queues; old_num_tx_queues = adapter->req_tx_queues; + old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq; + old_num_tx_slots = adapter->req_tx_entries_per_subcrq; ibmvnic_cleanup(netdev); - if (adapter->reset_reason != VNIC_RESET_MOBILITY && + if (reset_state == VNIC_OPEN && + adapter->reset_reason != VNIC_RESET_MOBILITY && adapter->reset_reason != VNIC_RESET_FAILOVER) { rc = __ibmvnic_close(netdev); if (rc) @@ -1819,21 +1817,20 @@ static int do_reset(struct ibmvnic_adapter *adapter, if (rc) return rc; } else if (adapter->req_rx_queues != old_num_rx_queues || - adapter->req_tx_queues != old_num_tx_queues) { - adapter->map_id = 1; + adapter->req_tx_queues != old_num_tx_queues || + adapter->req_rx_add_entries_per_subcrq != + old_num_rx_slots || + adapter->req_tx_entries_per_subcrq != + old_num_tx_slots) { release_rx_pools(adapter); release_tx_pools(adapter); - rc = init_rx_pools(netdev); - if (rc) - return rc; - rc = init_tx_pools(netdev); - if (rc) - return rc; - release_napi(adapter); - rc = init_napi(adapter); + release_vpd_data(adapter); + + rc = init_resources(adapter); if (rc) return rc; + } else { rc = reset_tx_pools(adapter); if (rc) @@ -1860,13 +1857,16 @@ static int do_reset(struct ibmvnic_adapter *adapter, return 0; } + /* refresh device's multicast list */ + ibmvnic_set_multi(netdev); + /* kick napi */ for (i = 0; i < adapter->req_rx_queues; i++) napi_schedule(&adapter->napi[i]); if (adapter->reset_reason != VNIC_RESET_FAILOVER && adapter->reset_reason != VNIC_RESET_CHANGE_PARAM) - netdev_notify_peers(netdev); + call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev); netif_carrier_on(netdev); @@ -1895,6 +1895,7 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter, */ adapter->state = VNIC_PROBED; + reinit_completion(&adapter->init_done); rc = init_crq_queue(adapter); if (rc) { netdev_err(adapter->netdev, @@ -1917,17 +1918,8 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter, adapter->state = VNIC_PROBED; return 0; } - /* netif_set_real_num_xx_queues needs to take rtnl lock here - * unless wait_for_reset is set, in which case the rtnl lock - * has already been taken before initializing the reset - */ - if (!adapter->wait_for_reset) { - rtnl_lock(); - rc = init_resources(adapter); - rtnl_unlock(); - } else { - rc = init_resources(adapter); - } + + rc = init_resources(adapter); if (rc) return rc; @@ -1955,8 +1947,9 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter, static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter) { struct ibmvnic_rwi *rwi; + unsigned long flags; - mutex_lock(&adapter->rwi_lock); + spin_lock_irqsave(&adapter->rwi_lock, flags); if (!list_empty(&adapter->rwi_list)) { rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi, @@ -1966,7 +1959,7 @@ static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter) rwi = NULL; } - mutex_unlock(&adapter->rwi_lock); + spin_unlock_irqrestore(&adapter->rwi_lock, flags); return rwi; } @@ -1986,17 +1979,32 @@ static void __ibmvnic_reset(struct work_struct *work) struct ibmvnic_rwi *rwi; struct ibmvnic_adapter *adapter; struct net_device *netdev; + bool we_lock_rtnl = false; u32 reset_state; int rc = 0; adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); netdev = adapter->netdev; - mutex_lock(&adapter->reset_lock); + /* netif_set_real_num_xx_queues needs to take rtnl lock here + * unless wait_for_reset is set, in which case the rtnl lock + * has already been taken before initializing the reset + */ + if (!adapter->wait_for_reset) { + rtnl_lock(); + we_lock_rtnl = true; + } reset_state = adapter->state; rwi = get_next_rwi(adapter); while (rwi) { + if (adapter->state == VNIC_REMOVING || + adapter->state == VNIC_REMOVED) { + kfree(rwi); + rc = EBUSY; + break; + } + if (adapter->force_reset_recovery) { adapter->force_reset_recovery = false; rc = do_hard_reset(adapter, rwi, reset_state); @@ -2020,12 +2028,11 @@ static void __ibmvnic_reset(struct work_struct *work) if (rc) { netdev_dbg(adapter->netdev, "Reset failed\n"); free_all_rwi(adapter); - mutex_unlock(&adapter->reset_lock); - return; } adapter->resetting = false; - mutex_unlock(&adapter->reset_lock); + if (we_lock_rtnl) + rtnl_unlock(); } static int ibmvnic_reset(struct ibmvnic_adapter *adapter, @@ -2034,6 +2041,7 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter, struct list_head *entry, *tmp_entry; struct ibmvnic_rwi *rwi, *tmp; struct net_device *netdev = adapter->netdev; + unsigned long flags; int ret; if (adapter->state == VNIC_REMOVING || @@ -2050,21 +2058,21 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter, goto err; } - mutex_lock(&adapter->rwi_lock); + spin_lock_irqsave(&adapter->rwi_lock, flags); list_for_each(entry, &adapter->rwi_list) { tmp = list_entry(entry, struct ibmvnic_rwi, list); if (tmp->reset_reason == reason) { netdev_dbg(netdev, "Skipping matching reset\n"); - mutex_unlock(&adapter->rwi_lock); + spin_unlock_irqrestore(&adapter->rwi_lock, flags); ret = EBUSY; goto err; } } - rwi = kzalloc(sizeof(*rwi), GFP_KERNEL); + rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC); if (!rwi) { - mutex_unlock(&adapter->rwi_lock); + spin_unlock_irqrestore(&adapter->rwi_lock, flags); ibmvnic_close(netdev); ret = ENOMEM; goto err; @@ -2073,12 +2081,14 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter, * flush reset queue and process this reset */ if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) { - list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) + list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) { list_del(entry); + kfree(list_entry(entry, struct ibmvnic_rwi, list)); + } } rwi->reset_reason = reason; list_add_tail(&rwi->list, &adapter->rwi_list); - mutex_unlock(&adapter->rwi_lock); + spin_unlock_irqrestore(&adapter->rwi_lock, flags); adapter->resetting = true; netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason); schedule_work(&adapter->ibmvnic_reset); @@ -2723,12 +2733,10 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter, if (adapter->resetting && adapter->reset_reason == VNIC_RESET_MOBILITY) { - u64 val = (0xff000000) | scrq->hw_irq; + struct irq_desc *desc = irq_to_desc(scrq->irq); + struct irq_chip *chip = irq_desc_get_chip(desc); - rc = plpar_hcall_norets(H_EOI, val); - if (rc) - dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", - val, rc); + chip->irq_eoi(&desc->irq_data); } rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, @@ -2748,7 +2756,6 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, union sub_crq *next; int index; int i, j; - u8 *first; restart_loop: while (pending_scrq(adapter, scrq)) { @@ -2778,14 +2785,6 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, txbuff->data_dma[j] = 0; } - /* if sub_crq was sent indirectly */ - first = &txbuff->indir_arr[0].generic.first; - if (*first == IBMVNIC_CRQ_CMD) { - dma_unmap_single(dev, txbuff->indir_dma, - sizeof(txbuff->indir_arr), - DMA_TO_DEVICE); - *first = 0; - } if (txbuff->last_frag) { dev_kfree_skb_any(txbuff->skb); @@ -4576,7 +4575,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter) old_num_rx_queues = adapter->req_rx_queues; old_num_tx_queues = adapter->req_tx_queues; - init_completion(&adapter->init_done); + reinit_completion(&adapter->init_done); adapter->init_done_rc = 0; ibmvnic_send_crq_init(adapter); if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { @@ -4631,7 +4630,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter) adapter->from_passive_init = false; - init_completion(&adapter->init_done); adapter->init_done_rc = 0; ibmvnic_send_crq_init(adapter); if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { @@ -4709,8 +4707,8 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); INIT_LIST_HEAD(&adapter->rwi_list); - mutex_init(&adapter->reset_lock); - mutex_init(&adapter->rwi_lock); + spin_lock_init(&adapter->rwi_lock); + init_completion(&adapter->init_done); adapter->resetting = false; adapter->mac_change_pending = false; @@ -4781,8 +4779,8 @@ static int ibmvnic_remove(struct vio_dev *dev) struct ibmvnic_adapter *adapter = netdev_priv(netdev); adapter->state = VNIC_REMOVING; - unregister_netdev(netdev); - mutex_lock(&adapter->reset_lock); + rtnl_lock(); + unregister_netdevice(netdev); release_resources(adapter); release_sub_crqs(adapter, 1); @@ -4793,7 +4791,7 @@ static int ibmvnic_remove(struct vio_dev *dev) adapter->state = VNIC_REMOVED; - mutex_unlock(&adapter->reset_lock); + rtnl_unlock(); device_remove_file(&dev->dev, &dev_attr_failover); free_netdev(netdev); dev_set_drvdata(&dev->dev, NULL); diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index f06eec145ca60689bef26f119ed18f372867924a..09465397b7ff48f1837e94c501c289dd7135907b 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -1068,7 +1068,7 @@ struct ibmvnic_adapter { struct tasklet_struct tasklet; enum vnic_state state; enum ibmvnic_reset_reason reset_reason; - struct mutex reset_lock, rwi_lock; + spinlock_t rwi_lock; struct list_head rwi_list; struct work_struct ibmvnic_reset; bool resetting; diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 27d5f27163d2cd04f8583f9defd888c3ee1ee8bc..049add0a2ba2ce8eb3e3f49ca5188ad49e72a66d 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -1345,8 +1345,8 @@ static inline int e100_load_ucode_wait(struct nic *nic) fw = e100_request_firmware(nic); /* If it's NULL, then no ucode is required */ - if (!fw || IS_ERR(fw)) - return PTR_ERR(fw); + if (IS_ERR_OR_NULL(fw)) + return PTR_ERR_OR_ZERO(fw); if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode))) netif_err(nic, probe, nic->netdev, @@ -2795,7 +2795,7 @@ static int e100_set_features(struct net_device *netdev, netdev->features = features; e100_exec_cb(nic, NULL, e100_configure); - return 0; + return 1; } static const struct net_device_ops e100_netdev_ops = { diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 2569a168334cbc6785f9e2909f5a68ac6450c9d6..903b0a902cb959dbb564ec6793b5de21316446de 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -607,6 +607,7 @@ static int e1000_set_ringparam(struct net_device *netdev, for (i = 0; i < adapter->num_rx_queues; i++) rxdr[i].count = rxdr->count; + err = 0; if (netif_running(adapter->netdev)) { /* Try to get new resources before deleting old */ err = e1000_setup_all_rx_resources(adapter); @@ -627,14 +628,13 @@ static int e1000_set_ringparam(struct net_device *netdev, adapter->rx_ring = rxdr; adapter->tx_ring = txdr; err = e1000_up(adapter); - if (err) - goto err_setup; } kfree(tx_old); kfree(rx_old); clear_bit(__E1000_RESETTING, &adapter->flags); - return 0; + return err; + err_setup_tx: e1000_free_all_rx_resources(adapter); err_setup_rx: @@ -646,7 +646,6 @@ static int e1000_set_ringparam(struct net_device *netdev, err_alloc_tx: if (netif_running(adapter->netdev)) e1000_up(adapter); -err_setup: clear_bit(__E1000_RESETTING, &adapter->flags); return err; } diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 2110d5f2da19037d11be071566c8ce882b8644e6..18b61be9e0b93d5508a6bc117656c167c006425a 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -542,8 +542,13 @@ void e1000_reinit_locked(struct e1000_adapter *adapter) WARN_ON(in_interrupt()); while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) msleep(1); - e1000_down(adapter); - e1000_up(adapter); + + /* only run the task if not already down */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) { + e1000_down(adapter); + e1000_up(adapter); + } + clear_bit(__E1000_RESETTING, &adapter->flags); } @@ -820,7 +825,7 @@ static int e1000_set_features(struct net_device *netdev, else e1000_reset(adapter); - return 0; + return 1; } static const struct net_device_ops e1000_netdev_ops = { @@ -1433,10 +1438,15 @@ int e1000_close(struct net_device *netdev) struct e1000_hw *hw = &adapter->hw; int count = E1000_CHECK_RESET_COUNT; - while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--) usleep_range(10000, 20000); - WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); + WARN_ON(count < 0); + + /* signal that we're down so that the reset task will no longer run */ + set_bit(__E1000_DOWN, &adapter->flags); + clear_bit(__E1000_RESETTING, &adapter->flags); + e1000_down(adapter); e1000_power_down_phy(adapter); e1000_free_irq(adapter); @@ -3144,8 +3154,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); if (skb->data_len && hdr_len == len) { switch (hw->mac_type) { + case e1000_82544: { unsigned int pull_size; - case e1000_82544: + /* Make sure we have room to chop off 4 bytes, * and that the end alignment will work out to * this hardware's requirements @@ -3166,6 +3177,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, } len = skb_headlen(skb); break; + } default: /* do nothing */ break; diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c index 257bd59bc9c6ff58446f9525b7230281a6fe7d2a..f86d55657959147b4cafc1d81ccb9f723847ed85 100644 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c @@ -696,11 +696,16 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) ret_val = e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, &kum_reg_data); - if (ret_val) - return ret_val; - kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; - e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, - kum_reg_data); + if (!ret_val) { + kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_INBAND_PARAM, + kum_reg_data); + if (ret_val) + e_dbg("Error disabling far-end loopback\n"); + } else { + e_dbg("Error disabling far-end loopback\n"); + } ret_val = e1000e_get_auto_rd_done(hw); if (ret_val) @@ -754,11 +759,19 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) return ret_val; /* Disable IBIST slave mode (far-end loopback) */ - e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, - &kum_reg_data); - kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; - e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, - kum_reg_data); + ret_val = + e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + &kum_reg_data); + if (!ret_val) { + kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_INBAND_PARAM, + kum_reg_data); + if (ret_val) + e_dbg("Error disabling far-end loopback\n"); + } else { + e_dbg("Error disabling far-end loopback\n"); + } /* Set the transmit descriptor write-back policy */ reg_data = er32(TXDCTL(0)); diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index c760dc72c52007def58701558ef72730ba94a69b..c5a119daa7f3c5df6903ddb47b333c1be0e95200 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -574,7 +574,6 @@ static inline u32 __er32(struct e1000_hw *hw, unsigned long reg) #define er32(reg) __er32(hw, E1000_##reg) -s32 __ew32_prepare(struct e1000_hw *hw); void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val); #define ew32(reg, val) __ew32(hw, E1000_##reg, (val)) diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index cdae0efde8e6415a89afeaec45de49acab763164..7998a73b6a0fa175bab2e986a98116af752b4ab1 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1429,6 +1429,16 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) else phy_reg |= 0xFA; e1e_wphy_locked(hw, I217_PLL_CLOCK_GATE_REG, phy_reg); + + if (speed == SPEED_1000) { + hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL, + &phy_reg); + + phy_reg |= HV_PM_CTRL_K1_CLK_REQ; + + hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL, + phy_reg); + } } hw->phy.ops.release(hw); diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index eb09c755fa172314ffa8500b110d0c481d14e680..1502895eb45ddd996c9f75aefc4ac29ea61e16a5 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -210,7 +210,7 @@ /* PHY Power Management Control */ #define HV_PM_CTRL PHY_REG(770, 17) -#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100 +#define HV_PM_CTRL_K1_CLK_REQ 0x200 #define HV_PM_CTRL_K1_ENABLE 0x4000 #define I217_PLL_CLOCK_GATE_REG PHY_REG(772, 28) diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 3ba0c90e7055b14a666566279fb7a58728da7427..7583f893ef6e6e6e071af595420e103478d1411f 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -119,14 +119,12 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = { * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set * and try again a number of times. **/ -s32 __ew32_prepare(struct e1000_hw *hw) +static void __ew32_prepare(struct e1000_hw *hw) { s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT; while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i) udelay(50); - - return i; } void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val) @@ -607,11 +605,11 @@ static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i) { struct e1000_adapter *adapter = rx_ring->adapter; struct e1000_hw *hw = &adapter->hw; - s32 ret_val = __ew32_prepare(hw); + __ew32_prepare(hw); writel(i, rx_ring->tail); - if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) { + if (unlikely(i != readl(rx_ring->tail))) { u32 rctl = er32(RCTL); ew32(RCTL, rctl & ~E1000_RCTL_EN); @@ -624,11 +622,11 @@ static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i) { struct e1000_adapter *adapter = tx_ring->adapter; struct e1000_hw *hw = &adapter->hw; - s32 ret_val = __ew32_prepare(hw); + __ew32_prepare(hw); writel(i, tx_ring->tail); - if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) { + if (unlikely(i != readl(tx_ring->tail))) { u32 tctl = er32(TCTL); ew32(TCTL, tctl & ~E1000_TCTL_EN); @@ -2106,7 +2104,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter) if (strlen(netdev->name) < (IFNAMSIZ - 5)) snprintf(adapter->rx_ring->name, sizeof(adapter->rx_ring->name) - 1, - "%s-rx-0", netdev->name); + "%.14s-rx-0", netdev->name); else memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); err = request_irq(adapter->msix_entries[vector].vector, @@ -2122,7 +2120,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter) if (strlen(netdev->name) < (IFNAMSIZ - 5)) snprintf(adapter->tx_ring->name, sizeof(adapter->tx_ring->name) - 1, - "%s-tx-0", netdev->name); + "%.14s-tx-0", netdev->name); else memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); err = request_irq(adapter->msix_entries[vector].vector, @@ -4208,7 +4206,7 @@ void e1000e_up(struct e1000_adapter *adapter) e1000_configure_msix(adapter); e1000_irq_enable(adapter); - netif_start_queue(adapter->netdev); + /* Tx queue started by watchdog timer when link is up */ e1000e_trigger_lsc(adapter); } @@ -4584,6 +4582,7 @@ int e1000e_open(struct net_device *netdev) pm_runtime_get_sync(&pdev->dev); netif_carrier_off(netdev); + netif_stop_queue(netdev); /* allocate transmit descriptors */ err = e1000e_setup_tx_resources(adapter->tx_ring); @@ -4644,7 +4643,6 @@ int e1000e_open(struct net_device *netdev) e1000_irq_enable(adapter); adapter->tx_hang_recheck = false; - netif_start_queue(netdev); hw->mac.get_link_status = true; pm_runtime_put(&pdev->dev); @@ -5251,6 +5249,10 @@ static void e1000_watchdog_task(struct work_struct *work) /* oops */ break; } + if (hw->mac.type == e1000_pch_spt) { + netdev->features &= ~NETIF_F_TSO; + netdev->features &= ~NETIF_F_TSO6; + } } /* enable transmits in the hardware, need to do this @@ -5266,6 +5268,7 @@ static void e1000_watchdog_task(struct work_struct *work) if (phy->ops.cfg_on_link_up) phy->ops.cfg_on_link_up(hw); + netif_wake_queue(netdev); netif_carrier_on(netdev); if (!test_bit(__E1000_DOWN, &adapter->state)) @@ -5279,6 +5282,7 @@ static void e1000_watchdog_task(struct work_struct *work) /* Link status message must follow this format */ pr_info("%s NIC Link is Down\n", adapter->netdev->name); netif_carrier_off(netdev); + netif_stop_queue(netdev); if (!test_bit(__E1000_DOWN, &adapter->state)) mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ)); @@ -6304,11 +6308,17 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - u32 ctrl, ctrl_ext, rctl, status; - /* Runtime suspend should only enable wakeup for link changes */ - u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; + u32 ctrl, ctrl_ext, rctl, status, wufc; int retval = 0; + /* Runtime suspend should only enable wakeup for link changes */ + if (runtime) + wufc = E1000_WUFC_LNKC; + else if (device_may_wakeup(&pdev->dev)) + wufc = adapter->wol; + else + wufc = 0; + status = er32(STATUS); if (status & E1000_STATUS_LU) wufc &= ~E1000_WUFC_LNKC; @@ -6365,7 +6375,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) if (adapter->hw.phy.type == e1000_phy_igp_3) { e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); } else if (hw->mac.type >= e1000_pch_lpt) { - if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) + if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) /* ULP does not support wake from unicast, multicast * or broadcast. */ @@ -6985,7 +6995,7 @@ static int e1000_set_features(struct net_device *netdev, else e1000e_reset(adapter); - return 0; + return 1; } static const struct net_device_ops e1000e_netdev_ops = { @@ -7330,7 +7340,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) e1000_print_device_info(adapter); - if (pci_dev_run_wake(pdev)) + dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP); + + if (pci_dev_run_wake(pdev) && hw->mac.type < e1000_pch_cnp) pm_runtime_put_noidle(&pdev->dev); return 0; diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index 37c76945ad9baaf8c433669e9ef4310148255caa..e1f821edbc21c3d2f8e83ea01b1e4827cf0c0b1e 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -173,10 +173,14 @@ static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter, ptp_clock_info); unsigned long flags; - u64 ns; + u64 cycles, ns; spin_lock_irqsave(&adapter->systim_lock, flags); - ns = timecounter_read(&adapter->tc); + + /* Use timecounter_cyc2time() to allow non-monotonic SYSTIM readings */ + cycles = adapter->cc.read(&adapter->cc); + ns = timecounter_cyc2time(&adapter->tc, cycles); + spin_unlock_irqrestore(&adapter->systim_lock, flags); *ts = ns_to_timespec64(ns); @@ -232,9 +236,12 @@ static void e1000e_systim_overflow_work(struct work_struct *work) systim_overflow_work.work); struct e1000_hw *hw = &adapter->hw; struct timespec64 ts; + u64 ns; - adapter->ptp_clock_info.gettime64(&adapter->ptp_clock_info, &ts); + /* Update the timecounter */ + ns = timecounter_read(&adapter->tc); + ts = ns_to_timespec64(ns); e_dbg("SYSTIM overflow check at %lld.%09lu\n", (long long) ts.tv_sec, ts.tv_nsec); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c index e707d717012faa997a127687ce45d54b27b9e3eb..618032612f52d8c365050d7a9c89d78f6caea06f 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c @@ -302,6 +302,28 @@ void fm10k_iov_suspend(struct pci_dev *pdev) } } +static void fm10k_mask_aer_comp_abort(struct pci_dev *pdev) +{ + u32 err_mask; + int pos; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); + if (!pos) + return; + + /* Mask the completion abort bit in the ERR_UNCOR_MASK register, + * preventing the device from reporting these errors to the upstream + * PCIe root device. This avoids bringing down platforms which upgrade + * non-fatal completer aborts into machine check exceptions. Completer + * aborts can occur whenever a VF reads a queue it doesn't own. + */ + pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, &err_mask); + err_mask |= PCI_ERR_UNC_COMP_ABORT; + pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, err_mask); + + mmiowb(); +} + int fm10k_iov_resume(struct pci_dev *pdev) { struct fm10k_intfc *interface = pci_get_drvdata(pdev); @@ -317,6 +339,12 @@ int fm10k_iov_resume(struct pci_dev *pdev) if (!iov_data) return -ENOMEM; + /* Lower severity of completer abort error reporting as + * the VFs can trigger this any time they read a queue + * that they don't own. + */ + fm10k_mask_aer_comp_abort(pdev); + /* allocate hardware resources for the VFs */ hw->iov.ops.assign_resources(hw, num_vfs, num_vfs); @@ -460,20 +488,6 @@ void fm10k_iov_disable(struct pci_dev *pdev) fm10k_iov_free_data(pdev); } -static void fm10k_disable_aer_comp_abort(struct pci_dev *pdev) -{ - u32 err_sev; - int pos; - - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); - if (!pos) - return; - - pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &err_sev); - err_sev &= ~PCI_ERR_UNC_COMP_ABORT; - pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, err_sev); -} - int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs) { int current_vfs = pci_num_vf(pdev); @@ -495,12 +509,6 @@ int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs) /* allocate VFs if not already allocated */ if (num_vfs && num_vfs != current_vfs) { - /* Disable completer abort error reporting as - * the VFs can trigger this any time they read a queue - * that they don't own. - */ - fm10k_disable_aer_comp_abort(pdev); - err = pci_enable_sriov(pdev, num_vfs); if (err) { dev_err(&pdev->dev, diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 3f536541f45f170ab9a2c0513bc6f311501990a6..78a43d688cb138e22d5a13b762496781efbf2bf2 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -41,6 +41,8 @@ static int __init fm10k_init_module(void) /* create driver workqueue */ fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, fm10k_driver_name); + if (!fm10k_workqueue) + return -ENOMEM; fm10k_dbg_init(); diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 7a80652e25008503ae17fbafa158337266c00f90..3bccf23135da42a7cd66ee7881677f29f7e29997 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -122,10 +122,12 @@ enum i40e_state_t { __I40E_MDD_EVENT_PENDING, __I40E_VFLR_EVENT_PENDING, __I40E_RESET_RECOVERY_PENDING, + __I40E_TIMEOUT_RECOVERY_PENDING, __I40E_MISC_IRQ_REQUESTED, __I40E_RESET_INTR_RECEIVED, __I40E_REINIT_REQUESTED, __I40E_PF_RESET_REQUESTED, + __I40E_PF_RESET_AND_REBUILD_REQUESTED, __I40E_CORE_RESET_REQUESTED, __I40E_GLOBAL_RESET_REQUESTED, __I40E_EMP_RESET_REQUESTED, @@ -146,11 +148,15 @@ enum i40e_state_t { __I40E_CLIENT_SERVICE_REQUESTED, __I40E_CLIENT_L2_CHANGE, __I40E_CLIENT_RESET, + __I40E_VIRTCHNL_OP_PENDING, + __I40E_VF_RESETS_DISABLED, /* disable resets during i40e_remove */ /* This must be last as it determines the size of the BITMAP */ __I40E_STATE_SIZE__, }; #define I40E_PF_RESET_FLAG BIT_ULL(__I40E_PF_RESET_REQUESTED) +#define I40E_PF_RESET_AND_REBUILD_FLAG \ + BIT_ULL(__I40E_PF_RESET_AND_REBUILD_REQUESTED) /* VSI state flags */ enum i40e_vsi_state_t { @@ -160,6 +166,7 @@ enum i40e_vsi_state_t { __I40E_VSI_OVERFLOW_PROMISC, __I40E_VSI_REINIT_REQUESTED, __I40E_VSI_DOWN_REQUESTED, + __I40E_VSI_RELEASING, /* This must be last as it determines the size of the BITMAP */ __I40E_VSI_STATE_SIZE__, }; diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 80e3eec6134ee82f0ab69725e5eb08695685b11b..a5e5e7e14e6c59c27512f374326e4246928bb2e4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -1206,7 +1206,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes { #define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 #define I40E_AQC_SET_VSI_DEFAULT 0x08 #define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 -#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000 +#define I40E_AQC_SET_VSI_PROMISC_RX_ONLY 0x8000 __le16 seid; #define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF __le16 vlan_tag; diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c index 5f3b8b9ff511d49a13e24f30f0d2a589915e40fe..2fa4becdaee925ba7120e9ea19de5266715d5250 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c @@ -178,6 +178,10 @@ void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset) "Cannot locate client instance close routine\n"); return; } + if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) { + dev_dbg(&pf->pdev->dev, "Client is not open, abort close\n"); + return; + } cdev->client->ops->close(&cdev->lan_info, cdev->client, reset); clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state); i40e_client_release_qvlist(&cdev->lan_info); @@ -376,7 +380,7 @@ void i40e_client_subtask(struct i40e_pf *pf) /* Remove failed client instance */ clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state); - i40e_client_del_instance(pf); + return; } } } diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 85f75b5978fca572f776f7eab2ea7f6365cf78d3..e75b4c4872c0947e0b0df93eec1d13d51171b24b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1668,25 +1668,15 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, return status; } -/** - * i40e_set_fc - * @hw: pointer to the hw struct - * @aq_failures: buffer to return AdminQ failure information - * @atomic_restart: whether to enable atomic link restart - * - * Set the requested flow control mode using set_phy_config. - **/ -enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, - bool atomic_restart) +static noinline_for_stack enum i40e_status_code +i40e_set_fc_status(struct i40e_hw *hw, + struct i40e_aq_get_phy_abilities_resp *abilities, + bool atomic_restart) { - enum i40e_fc_mode fc_mode = hw->fc.requested_mode; - struct i40e_aq_get_phy_abilities_resp abilities; struct i40e_aq_set_phy_config config; - enum i40e_status_code status; + enum i40e_fc_mode fc_mode = hw->fc.requested_mode; u8 pause_mask = 0x0; - *aq_failures = 0x0; - switch (fc_mode) { case I40E_FC_FULL: pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; @@ -1702,6 +1692,48 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, break; } + memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); + /* clear the old pause settings */ + config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) & + ~(I40E_AQ_PHY_FLAG_PAUSE_RX); + /* set the new abilities */ + config.abilities |= pause_mask; + /* If the abilities have changed, then set the new config */ + if (config.abilities == abilities->abilities) + return 0; + + /* Auto restart link so settings take effect */ + if (atomic_restart) + config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; + /* Copy over all the old settings */ + config.phy_type = abilities->phy_type; + config.phy_type_ext = abilities->phy_type_ext; + config.link_speed = abilities->link_speed; + config.eee_capability = abilities->eee_capability; + config.eeer = abilities->eeer_val; + config.low_power_ctrl = abilities->d3_lpan; + config.fec_config = abilities->fec_cfg_curr_mod_ext_info & + I40E_AQ_PHY_FEC_CONFIG_MASK; + + return i40e_aq_set_phy_config(hw, &config, NULL); +} + +/** + * i40e_set_fc + * @hw: pointer to the hw struct + * @aq_failures: buffer to return AdminQ failure information + * @atomic_restart: whether to enable atomic link restart + * + * Set the requested flow control mode using set_phy_config. + **/ +enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, + bool atomic_restart) +{ + struct i40e_aq_get_phy_abilities_resp abilities; + enum i40e_status_code status; + + *aq_failures = 0x0; + /* Get the current phy config */ status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); @@ -1710,31 +1742,10 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, return status; } - memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); - /* clear the old pause settings */ - config.abilities = abilities.abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) & - ~(I40E_AQ_PHY_FLAG_PAUSE_RX); - /* set the new abilities */ - config.abilities |= pause_mask; - /* If the abilities have changed, then set the new config */ - if (config.abilities != abilities.abilities) { - /* Auto restart link so settings take effect */ - if (atomic_restart) - config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; - /* Copy over all the old settings */ - config.phy_type = abilities.phy_type; - config.phy_type_ext = abilities.phy_type_ext; - config.link_speed = abilities.link_speed; - config.eee_capability = abilities.eee_capability; - config.eeer = abilities.eeer_val; - config.low_power_ctrl = abilities.d3_lpan; - config.fec_config = abilities.fec_cfg_curr_mod_ext_info & - I40E_AQ_PHY_FEC_CONFIG_MASK; - status = i40e_aq_set_phy_config(hw, &config, NULL); + status = i40e_set_fc_status(hw, &abilities, atomic_restart); + if (status) + *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; - if (status) - *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; - } /* Update the link info */ status = i40e_update_link_info(hw); if (status) { @@ -1959,6 +1970,21 @@ i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, return status; } +/** + * i40e_is_aq_api_ver_ge + * @aq: pointer to AdminQ info containing HW API version to compare + * @maj: API major value + * @min: API minor value + * + * Assert whether current HW API version is greater/equal than provided. + **/ +static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj, + u16 min) +{ + return (aq->api_maj_ver > maj || + (aq->api_maj_ver == maj && aq->api_min_ver >= min)); +} + /** * i40e_aq_add_vsi * @hw: pointer to the hw struct @@ -2084,18 +2110,16 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, if (set) { flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; - if (rx_only_promisc && - (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) || - (hw->aq.api_maj_ver > 1))) - flags |= I40E_AQC_SET_VSI_PROMISC_TX; + if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) + flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; } cmd->promiscuous_flags = cpu_to_le16(flags); cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); - if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) || - (hw->aq.api_maj_ver > 1)) - cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX); + if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) + cmd->valid_flags |= + cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); cmd->seid = cpu_to_le16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); @@ -2192,11 +2216,17 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); - if (enable) + if (enable) { flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; + if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) + flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; + } cmd->promiscuous_flags = cpu_to_le16(flags); cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); + if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) + cmd->valid_flags |= + cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); cmd->seid = cpu_to_le16(seid); cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); @@ -2563,7 +2593,7 @@ i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up) * i40e_updatelink_status - update status of the HW network link * @hw: pointer to the hw struct **/ -i40e_status i40e_update_link_info(struct i40e_hw *hw) +noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw) { struct i40e_aq_get_phy_abilities_resp abilities; i40e_status status = 0; diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 56b911a5dd8be669ec0fcd231eeb01987cd7afc6..3e6c6585012f96dfe9f388bdd0faefaef0fe76bd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -506,6 +506,14 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid); return; } + if (vsi->type != I40E_VSI_MAIN && + vsi->type != I40E_VSI_FDIR && + vsi->type != I40E_VSI_VMDQ2) { + dev_info(&pf->pdev->dev, + "vsi %d type %d descriptor rings not available\n", + vsi_seid, vsi->type); + return; + } if (ring_id >= vsi->num_queue_pairs || ring_id < 0) { dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id); return; diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 5ff6caa83948c2c14a6b85070ddaba54e0c50c3a..a6b0f605a7d8b64edd4dc39ebb346bc7bc3a29c5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1136,6 +1136,7 @@ static int i40e_set_pauseparam(struct net_device *netdev, i40e_status status; u8 aq_failures; int err = 0; + u32 is_an; /* Changing the port's flow control is not supported if this isn't the * port's controlling PF @@ -1148,15 +1149,14 @@ static int i40e_set_pauseparam(struct net_device *netdev, if (vsi != pf->vsi[pf->lan_vsi]) return -EOPNOTSUPP; - if (pause->autoneg != ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? - AUTONEG_ENABLE : AUTONEG_DISABLE)) { + is_an = hw_link_info->an_info & I40E_AQ_AN_COMPLETED; + if (pause->autoneg != is_an) { netdev_info(netdev, "To change autoneg please use: ethtool -s autoneg \n"); return -EOPNOTSUPP; } /* If we have link and don't have autoneg */ - if (!test_bit(__I40E_DOWN, pf->state) && - !(hw_link_info->an_info & I40E_AQ_AN_COMPLETED)) { + if (!test_bit(__I40E_DOWN, pf->state) && !is_an) { /* Send message that it might not necessarily work*/ netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n"); } @@ -1207,7 +1207,7 @@ static int i40e_set_pauseparam(struct net_device *netdev, err = -EAGAIN; } - if (!test_bit(__I40E_DOWN, pf->state)) { + if (!test_bit(__I40E_DOWN, pf->state) && is_an) { /* Give it a little more time to try to come back */ msleep(75); if (!test_bit(__I40E_DOWN, pf->state)) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index ac685ad4d8773125b059f1209d2b60747996b39b..bfa5e286c489ddd94ea8211b7ca8bb34c7ebd6a3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -42,6 +42,8 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf); static void i40e_determine_queue_usage(struct i40e_pf *pf); static int i40e_setup_pf_filter_control(struct i40e_pf *pf); static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired); +static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit, + bool lock_acquired); static int i40e_reset(struct i40e_pf *pf); static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired); static void i40e_fdir_sb_setup(struct i40e_pf *pf); @@ -191,6 +193,20 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, return -EINVAL; } + /* Allocate last queue in the pile for FDIR VSI queue + * so it doesn't fragment the qp_pile + */ + if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) { + if (pile->list[pile->num_entries - 1] & I40E_PILE_VALID_BIT) { + dev_err(&pf->pdev->dev, + "Cannot allocate queue %d for I40E_VSI_FDIR\n", + pile->num_entries - 1); + return -ENOMEM; + } + pile->list[pile->num_entries - 1] = id | I40E_PILE_VALID_BIT; + return pile->num_entries - 1; + } + /* start the linear search with an imperfect hint */ i = pile->search_hint; while (i < pile->num_entries) { @@ -336,6 +352,10 @@ static void i40e_tx_timeout(struct net_device *netdev) (pf->tx_timeout_last_recovery + netdev->watchdog_timeo))) return; /* don't do any new action before the next timeout */ + /* don't kick off another recovery if one is already pending */ + if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state)) + return; + if (tx_ring) { head = i40e_get_head(tx_ring); /* Read interrupt register */ @@ -420,9 +440,9 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct i40e_netdev_priv *np = netdev_priv(netdev); - struct i40e_ring *tx_ring, *rx_ring; struct i40e_vsi *vsi = np->vsi; struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); + struct i40e_ring *ring; int i; if (test_bit(__I40E_VSI_DOWN, vsi->state)) @@ -436,24 +456,30 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev, u64 bytes, packets; unsigned int start; - tx_ring = READ_ONCE(vsi->tx_rings[i]); - if (!tx_ring) + ring = READ_ONCE(vsi->tx_rings[i]); + if (!ring) continue; - i40e_get_netdev_stats_struct_tx(tx_ring, stats); + i40e_get_netdev_stats_struct_tx(ring, stats); - rx_ring = &tx_ring[1]; + if (i40e_enabled_xdp_vsi(vsi)) { + ring = READ_ONCE(vsi->xdp_rings[i]); + if (!ring) + continue; + i40e_get_netdev_stats_struct_tx(ring, stats); + } + ring = READ_ONCE(vsi->rx_rings[i]); + if (!ring) + continue; do { - start = u64_stats_fetch_begin_irq(&rx_ring->syncp); - packets = rx_ring->stats.packets; - bytes = rx_ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); + start = u64_stats_fetch_begin_irq(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); stats->rx_packets += packets; stats->rx_bytes += bytes; - if (i40e_enabled_xdp_vsi(vsi)) - i40e_get_netdev_stats_struct_tx(&rx_ring[1], stats); } rcu_read_unlock(); @@ -787,6 +813,8 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) for (q = 0; q < vsi->num_queue_pairs; q++) { /* locate Tx ring */ p = READ_ONCE(vsi->tx_rings[q]); + if (!p) + continue; do { start = u64_stats_fetch_begin_irq(&p->syncp); @@ -800,8 +828,11 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) tx_linearize += p->tx_stats.tx_linearize; tx_force_wb += p->tx_stats.tx_force_wb; - /* Rx queue is part of the same block as Tx queue */ - p = &p[1]; + /* locate Rx ring */ + p = READ_ONCE(vsi->rx_rings[q]); + if (!p) + continue; + do { start = u64_stats_fetch_begin_irq(&p->syncp); packets = p->stats.packets; @@ -1409,7 +1440,7 @@ void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f) } vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; - set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->state); + set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state); } /** @@ -1539,17 +1570,17 @@ static int i40e_set_mac(struct net_device *netdev, void *p) netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); /* Copy the address first, so that we avoid a possible race with - * .set_rx_mode(). If we copy after changing the address in the filter - * list, we might open ourselves to a narrow race window where - * .set_rx_mode could delete our dev_addr filter and prevent traffic - * from passing. + * .set_rx_mode(). + * - Remove old address from MAC filter + * - Copy new address + * - Add new address to MAC filter */ - ether_addr_copy(netdev->dev_addr, addr->sa_data); - spin_lock_bh(&vsi->mac_filter_hash_lock); i40e_del_mac_filter(vsi, netdev->dev_addr); - i40e_add_mac_filter(vsi, addr->sa_data); + ether_addr_copy(netdev->dev_addr, addr->sa_data); + i40e_add_mac_filter(vsi, netdev->dev_addr); spin_unlock_bh(&vsi->mac_filter_hash_lock); + if (vsi->type == I40E_VSI_MAIN) { i40e_status ret; @@ -2560,10 +2591,15 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf) return; if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state)) return; + if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) { + set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state); + return; + } for (v = 0; v < pf->num_alloc_vsi; v++) { if (pf->vsi[v] && - (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) { + (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED) && + !test_bit(__I40E_VSI_RELEASING, pf->vsi[v]->state)) { int ret = i40e_sync_vsi_filters(pf->vsi[v]); if (ret) { @@ -2574,6 +2610,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf) } } } + clear_bit(__I40E_VF_DISABLE, pf->state); } /** @@ -2648,6 +2685,10 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi) struct i40e_vsi_context ctxt; i40e_status ret; + /* Don't modify stripping options if a port VLAN is active */ + if (vsi->info.pvid) + return; + if ((vsi->info.valid_sections & cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0)) @@ -2678,6 +2719,10 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) struct i40e_vsi_context ctxt; i40e_status ret; + /* Don't modify stripping options if a port VLAN is active */ + if (vsi->info.pvid) + return; + if ((vsi->info.valid_sections & cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) == @@ -3422,14 +3467,14 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[i]->itr_setting); wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), - q_vector->rx.target_itr); + q_vector->rx.target_itr >> 1); q_vector->rx.current_itr = q_vector->rx.target_itr; q_vector->tx.next_update = jiffies + 1; q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[i]->itr_setting); wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), - q_vector->tx.target_itr); + q_vector->tx.target_itr >> 1); q_vector->tx.current_itr = q_vector->tx.target_itr; wr32(hw, I40E_PFINT_RATEN(vector - 1), @@ -3534,11 +3579,11 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) /* set the ITR configuration */ q_vector->rx.next_update = jiffies + 1; q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting); - wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr); + wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1); q_vector->rx.current_itr = q_vector->rx.target_itr; q_vector->tx.next_update = jiffies + 1; q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting); - wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr); + wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1); q_vector->tx.current_itr = q_vector->tx.target_itr; i40e_enable_misc_int_causes(pf); @@ -3867,8 +3912,16 @@ static irqreturn_t i40e_intr(int irq, void *data) } if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) { - ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK; - set_bit(__I40E_VFLR_EVENT_PENDING, pf->state); + /* disable any further VFLR event notifications */ + if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) { + u32 reg = rd32(hw, I40E_PFINT_ICR0_ENA); + + reg &= ~I40E_PFINT_ICR0_VFLR_MASK; + wr32(hw, I40E_PFINT_ICR0_ENA, reg); + } else { + ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK; + set_bit(__I40E_VFLR_EVENT_PENDING, pf->state); + } } if (icr0 & I40E_PFINT_ICR0_GRST_MASK) { @@ -4677,7 +4730,8 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) { int i; - i40e_free_misc_vector(pf); + if (test_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) + i40e_free_misc_vector(pf); i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector, I40E_IWARP_IRQ_PILE_ID); @@ -6568,6 +6622,24 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up) struct i40e_hw *hw = &pf->hw; i40e_status err; u64 mask; + u8 speed; + + /* Card might've been put in an unstable state by other drivers + * and applications, which causes incorrect speed values being + * set on startup. In order to clear speed registers, we call + * get_phy_capabilities twice, once to get initial state of + * available speeds, and once to get current PHY config. + */ + err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, + NULL); + if (err) { + dev_err(&pf->pdev->dev, + "failed to get phy cap., ret = %s last_status = %s\n", + i40e_stat_str(hw, err), + i40e_aq_str(hw, hw->aq.asq_last_status)); + return err; + } + speed = abilities.link_speed; /* Get the current phy config */ err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, @@ -6581,9 +6653,9 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up) } /* If link needs to go up, but was not forced to go down, - * no need for a flap + * and its speed values are OK, no need for a flap */ - if (is_up && abilities.phy_type != 0) + if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0) return I40E_SUCCESS; /* To force link we need to set bits for all supported PHY types, @@ -6595,7 +6667,10 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up) config.phy_type_ext = is_up ? (u8)((mask >> 32) & 0xff) : 0; /* Copy the old settings, except of phy_type */ config.abilities = abilities.abilities; - config.link_speed = abilities.link_speed; + if (abilities.link_speed != 0) + config.link_speed = abilities.link_speed; + else + config.link_speed = speed; config.eee_capability = abilities.eee_capability; config.eeer = abilities.eeer_val; config.low_power_ctrl = abilities.d3_lpan; @@ -6744,10 +6819,12 @@ static int i40e_setup_tc(struct net_device *netdev, void *type_data) struct i40e_pf *pf = vsi->back; u8 enabled_tc = 0, num_tc, hw; bool need_reset = false; + int old_queue_pairs; int ret = -EINVAL; u16 mode; int i; + old_queue_pairs = vsi->num_queue_pairs; num_tc = mqprio_qopt->qopt.num_tc; hw = mqprio_qopt->qopt.hw; mode = mqprio_qopt->mode; @@ -6848,6 +6925,7 @@ static int i40e_setup_tc(struct net_device *netdev, void *type_data) } ret = i40e_configure_queue_channels(vsi); if (ret) { + vsi->num_queue_pairs = old_queue_pairs; netdev_info(netdev, "Failed configuring queue channels\n"); need_reset = true; @@ -7869,6 +7947,14 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired) dev_dbg(&pf->pdev->dev, "PFR requested\n"); i40e_handle_reset_warning(pf, lock_acquired); + } else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) { + /* Request a PF Reset + * + * Resets PF and reinitializes PFs VSI. + */ + i40e_prep_for_reset(pf, lock_acquired); + i40e_reset_and_rebuild(pf, true, lock_acquired); + } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) { int v; @@ -9566,6 +9652,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) clear_bit(__I40E_RESET_FAILED, pf->state); clear_recovery: clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state); + clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state); } /** @@ -10152,10 +10239,10 @@ static void i40e_vsi_clear_rings(struct i40e_vsi *vsi) if (vsi->tx_rings && vsi->tx_rings[0]) { for (i = 0; i < vsi->alloc_queue_pairs; i++) { kfree_rcu(vsi->tx_rings[i], rcu); - vsi->tx_rings[i] = NULL; - vsi->rx_rings[i] = NULL; + WRITE_ONCE(vsi->tx_rings[i], NULL); + WRITE_ONCE(vsi->rx_rings[i], NULL); if (vsi->xdp_rings) - vsi->xdp_rings[i] = NULL; + WRITE_ONCE(vsi->xdp_rings[i], NULL); } } } @@ -10189,7 +10276,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; ring->itr_setting = pf->tx_itr_default; - vsi->tx_rings[i] = ring++; + WRITE_ONCE(vsi->tx_rings[i], ring++); if (!i40e_enabled_xdp_vsi(vsi)) goto setup_rx; @@ -10207,7 +10294,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; set_ring_xdp(ring); ring->itr_setting = pf->tx_itr_default; - vsi->xdp_rings[i] = ring++; + WRITE_ONCE(vsi->xdp_rings[i], ring++); setup_rx: ring->queue_index = i; @@ -10220,7 +10307,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) ring->size = 0; ring->dcb_tc = 0; ring->itr_setting = pf->rx_itr_default; - vsi->rx_rings[i] = ring; + WRITE_ONCE(vsi->rx_rings[i], ring); } return 0; @@ -10691,7 +10778,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf) /* associate no queues to the misc vector */ wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST); - wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K); + wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K >> 1); i40e_flush(hw); @@ -11926,6 +12013,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_PARTIAL | + NETIF_F_GSO_IPXIP4 | + NETIF_F_GSO_IPXIP6 | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC | @@ -12009,6 +12098,9 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) ether_addr_copy(netdev->dev_addr, mac_addr); ether_addr_copy(netdev->perm_addr, mac_addr); + /* i40iw_net_event() reads 16 bytes from neigh->primary_key */ + netdev->neigh_priv_len = sizeof(u32) * 4; + netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS; /* Setup netdev TC information */ @@ -12349,7 +12441,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi) dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); return -ENODEV; } - + set_bit(__I40E_VSI_RELEASING, vsi->state); uplink_seid = vsi->uplink_seid; if (vsi->type != I40E_VSI_SRIOV) { if (vsi->netdev_registered) { @@ -14094,6 +14186,14 @@ static void i40e_remove(struct pci_dev *pdev) i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0); + while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) + usleep_range(1000, 2000); + + if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { + set_bit(__I40E_VF_RESETS_DISABLED, pf->state); + i40e_free_vfs(pf); + pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; + } /* no more scheduling of any task */ set_bit(__I40E_SUSPENDED, pf->state); set_bit(__I40E_DOWN, pf->state); @@ -14107,11 +14207,6 @@ static void i40e_remove(struct pci_dev *pdev) */ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); - if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { - i40e_free_vfs(pf); - pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; - } - i40e_fdir_teardown(pf); /* If there is a switch structure or any orphans, remove them. @@ -14159,6 +14254,7 @@ static void i40e_remove(struct pci_dev *pdev) mutex_destroy(&hw->aq.asq_mutex); /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ + rtnl_lock(); i40e_clear_interrupt_scheme(pf); for (i = 0; i < pf->num_alloc_vsi; i++) { if (pf->vsi[i]) { @@ -14167,6 +14263,7 @@ static void i40e_remove(struct pci_dev *pdev) pf->vsi[i] = NULL; } } + rtnl_unlock(); for (i = 0; i < I40E_MAX_VEB; i++) { kfree(pf->veb[i]); @@ -14378,7 +14475,13 @@ static void i40e_shutdown(struct pci_dev *pdev) wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); + /* Since we're going to destroy queues during the + * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this + * whole section + */ + rtnl_lock(); i40e_clear_interrupt_scheme(pf); + rtnl_unlock(); if (system_state == SYSTEM_POWER_OFF) { pci_wake_from_d3(pdev, pf->wol_en); @@ -14522,7 +14625,7 @@ static int __init i40e_init_module(void) * since we need to be able to guarantee forward progress even under * memory pressure. */ - i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name); + i40e_wq = alloc_workqueue("%s", 0, 0, i40e_driver_name); if (!i40e_wq) { pr_err("%s: Failed to create workqueue\n", i40e_driver_name); return -ENOMEM; diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 35f2866b38c6b921044d851f3898e342e048813f..1199f0502d6d5169fa211beb15f3b86331428582 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -694,7 +694,8 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf) if (!IS_ERR_OR_NULL(pf->ptp_clock)) return 0; - strncpy(pf->ptp_caps.name, i40e_driver_name, sizeof(pf->ptp_caps.name)); + strncpy(pf->ptp_caps.name, i40e_driver_name, + sizeof(pf->ptp_caps.name) - 1); pf->ptp_caps.owner = THIS_MODULE; pf->ptp_caps.max_adj = 999999999; pf->ptp_caps.n_ext_ts = 0; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index c6d24eaede184fdd9ad2fde19724af1b9406b9da..9154abe13b93f6994c41dd3893f3b69596a812d1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -181,7 +181,7 @@ static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id) * check for the valid queue id **/ static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id, - u8 qid) + u16 qid) { struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); @@ -196,7 +196,7 @@ static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id, * * check for the valid vector id **/ -static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id) +static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id) { struct i40e_pf *pf = vf->pf; @@ -441,14 +441,28 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, u32 v_idx, i, reg_idx, reg; u32 next_q_idx, next_q_type; u32 msix_vf, size; + int ret = 0; + + msix_vf = pf->hw.func_caps.num_msix_vectors_vf; + + if (qvlist_info->num_vectors > msix_vf) { + dev_warn(&pf->pdev->dev, + "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n", + qvlist_info->num_vectors, + msix_vf); + ret = -EINVAL; + goto err_out; + } size = sizeof(struct virtchnl_iwarp_qvlist_info) + (sizeof(struct virtchnl_iwarp_qv_info) * (qvlist_info->num_vectors - 1)); + kfree(vf->qvlist_info); vf->qvlist_info = kzalloc(size, GFP_KERNEL); - if (!vf->qvlist_info) - return -ENOMEM; - + if (!vf->qvlist_info) { + ret = -ENOMEM; + goto err_out; + } vf->qvlist_info->num_vectors = qvlist_info->num_vectors; msix_vf = pf->hw.func_caps.num_msix_vectors_vf; @@ -459,8 +473,10 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, v_idx = qv_info->v_idx; /* Validate vector id belongs to this vf */ - if (!i40e_vc_isvalid_vector_id(vf, v_idx)) - goto err; + if (!i40e_vc_isvalid_vector_id(vf, v_idx)) { + ret = -EINVAL; + goto err_free; + } vf->qvlist_info->qv_info[i] = *qv_info; @@ -502,10 +518,11 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, } return 0; -err: +err_free: kfree(vf->qvlist_info); vf->qvlist_info = NULL; - return -EINVAL; +err_out: + return ret; } /** @@ -1187,7 +1204,8 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf) * @vf: pointer to the VF structure * @flr: VFLR was issued or not * - * Returns true if the VF is reset, false otherwise. + * Returns true if the VF is in reset, resets successfully, or resets + * are disabled and false otherwise. **/ bool i40e_reset_vf(struct i40e_vf *vf, bool flr) { @@ -1197,11 +1215,14 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr) u32 reg; int i; + if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) + return true; + /* If the VFs have been disabled, this means something else is * resetting the VF, so we shouldn't continue. */ if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) - return false; + return true; i40e_trigger_vf_reset(vf, flr); @@ -1365,6 +1386,15 @@ void i40e_free_vfs(struct i40e_pf *pf) i40e_notify_client_of_vf_enable(pf, 0); + /* Disable IOV before freeing resources. This lets any VF drivers + * running in the host get themselves cleaned up before we yank + * the carpet out from underneath their feet. + */ + if (!pci_vfs_assigned(pf->pdev)) + pci_disable_sriov(pf->pdev); + else + dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); + /* Amortize wait time by stopping all VFs at the same time */ for (i = 0; i < pf->num_alloc_vfs; i++) { if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) @@ -1380,15 +1410,6 @@ void i40e_free_vfs(struct i40e_pf *pf) i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]); } - /* Disable IOV before freeing resources. This lets any VF drivers - * running in the host get themselves cleaned up before we yank - * the carpet out from underneath their feet. - */ - if (!pci_vfs_assigned(pf->pdev)) - pci_disable_sriov(pf->pdev); - else - dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); - /* free up VF resources */ tmp = pf->num_alloc_vfs; pf->num_alloc_vfs = 0; @@ -1542,24 +1563,34 @@ static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) { struct i40e_pf *pf = pci_get_drvdata(pdev); + int ret = 0; + + if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { + dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n"); + return -EAGAIN; + } if (num_vfs) { if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; - i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); + i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG); } - return i40e_pci_sriov_enable(pdev, num_vfs); + ret = i40e_pci_sriov_enable(pdev, num_vfs); + goto sriov_configure_out; } if (!pci_vfs_assigned(pf->pdev)) { i40e_free_vfs(pf); pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; - i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); + i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG); } else { dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); - return -EINVAL; + ret = -EINVAL; + goto sriov_configure_out; } - return 0; +sriov_configure_out: + clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); + return ret; } /***********************virtual channel routines******************/ @@ -2014,6 +2045,11 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) goto error_param; } + if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + for (i = 0; i < qci->num_queue_pairs; i++) { qpi = &qci->qpair[i]; @@ -2399,8 +2435,10 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) (u8 *)&stats, sizeof(stats)); } -/* If the VF is not trusted restrict the number of MAC/VLAN it can program */ -#define I40E_VC_MAX_MAC_ADDR_PER_VF 12 +/* If the VF is not trusted restrict the number of MAC/VLAN it can program + * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast + */ +#define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1) #define I40E_VC_MAX_VLAN_PER_VF 8 /** @@ -2569,6 +2607,16 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ret = I40E_ERR_INVALID_MAC_ADDR; goto error_param; } + + if (vf->pf_set_mac && + ether_addr_equal(al->list[i].addr, + vf->default_lan_addr.addr)) { + dev_err(&pf->pdev->dev, + "MAC addr %pM has been set by PF, cannot delete it for VF %d, reset VF to change MAC addr\n", + vf->default_lan_addr.addr, vf->vf_id); + ret = I40E_ERR_PARAM; + goto error_param; + } } vsi = pf->vsi[vf->lan_vsi_idx]; @@ -3323,7 +3371,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; - goto err; + goto err_out; } if (!vf->adq_enabled) { @@ -3331,15 +3379,15 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) "VF %d: ADq is not enabled, can't apply cloud filter\n", vf->vf_id); aq_ret = I40E_ERR_PARAM; - goto err; + goto err_out; } if (i40e_validate_cloud_filter(vf, vcf)) { dev_info(&pf->pdev->dev, "VF %d: Invalid input/s, can't apply cloud filter\n", vf->vf_id); - aq_ret = I40E_ERR_PARAM; - goto err; + aq_ret = I40E_ERR_PARAM; + goto err_out; } cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL); @@ -3400,13 +3448,17 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) "VF %d: Failed to add cloud filter, err %s aq_err %s\n", vf->vf_id, i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); - goto err; + goto err_free; } INIT_HLIST_NODE(&cfilter->cloud_node); hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list); + /* release the pointer passing it to the collection */ + cfilter = NULL; vf->num_cloud_filters++; -err: +err_free: + kfree(cfilter); +err_out: return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER, aq_ret); } @@ -3610,7 +3662,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, int ret; pf->vf_aq_requests++; - if (local_vf_id >= pf->num_alloc_vfs) + if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs) return -EINVAL; vf = &(pf->vf[local_vf_id]); @@ -3832,6 +3884,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) goto error_param; } + if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { + dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); + return -EAGAIN; + } + if (is_multicast_ether_addr(mac)) { dev_err(&pf->pdev->dev, "Invalid Ethernet address %pM for VF %d\n", mac, vf_id); @@ -3878,6 +3935,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n"); error_param: + clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); return ret; } @@ -3929,6 +3987,11 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, struct i40e_vf *vf; int ret = 0; + if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { + dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); + return -EAGAIN; + } + /* validate the request */ if (vf_id >= pf->num_alloc_vfs) { dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); @@ -4046,6 +4109,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, ret = 0; error_pvid: + clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); return ret; } @@ -4067,6 +4131,11 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, struct i40e_vf *vf; int ret = 0; + if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { + dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); + return -EAGAIN; + } + /* validate the request */ if (vf_id >= pf->num_alloc_vfs) { dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id); @@ -4095,6 +4164,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, vf->tx_rate = max_tx_rate; error: + clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); return ret; } @@ -4115,6 +4185,11 @@ int i40e_ndo_get_vf_config(struct net_device *netdev, struct i40e_vf *vf; int ret = 0; + if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { + dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); + return -EAGAIN; + } + /* validate the request */ if (vf_id >= pf->num_alloc_vfs) { dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); @@ -4152,6 +4227,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev, ret = 0; error_param: + clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); return ret; } @@ -4173,6 +4249,11 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) int abs_vf_id; int ret = 0; + if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { + dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); + return -EAGAIN; + } + /* validate the request */ if (vf_id >= pf->num_alloc_vfs) { dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); @@ -4199,7 +4280,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) vf->link_forced = true; vf->link_up = true; pfe.event_data.link_event.link_status = true; - pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB; + pfe.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB; break; case IFLA_VF_LINK_STATE_DISABLE: vf->link_forced = true; @@ -4216,6 +4297,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) 0, (u8 *)&pfe, sizeof(pfe), NULL); error_out: + clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); return ret; } @@ -4237,6 +4319,11 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) struct i40e_vf *vf; int ret = 0; + if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { + dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); + return -EAGAIN; + } + /* validate the request */ if (vf_id >= pf->num_alloc_vfs) { dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); @@ -4270,6 +4357,7 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) ret = -EIO; } out: + clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); return ret; } @@ -4288,15 +4376,22 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting) struct i40e_vf *vf; int ret = 0; + if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { + dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); + return -EAGAIN; + } + /* validate the request */ if (vf_id >= pf->num_alloc_vfs) { dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); - return -EINVAL; + ret = -EINVAL; + goto out; } if (pf->flags & I40E_FLAG_MFP_ENABLED) { dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n"); - return -EINVAL; + ret = -EINVAL; + goto out; } vf = &pf->vf[vf_id]; @@ -4319,5 +4414,6 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting) } out: + clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); return ret; } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index a9730711e2579da0ed1621f57d99597868ca2c83..b56d22b530a7079ff740c56a490e0e79a2c27822 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1291,7 +1291,7 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, struct i40e_rx_buffer *rx_buffer, unsigned int size) { - void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; + void *va; #if (PAGE_SIZE < 8192) unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; #else @@ -1301,6 +1301,7 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, struct sk_buff *skb; /* prefetch first cache line of first page */ + va = page_address(rx_buffer->page) + rx_buffer->page_offset; prefetch(va); #if L1_CACHE_BYTES < 128 prefetch(va + L1_CACHE_BYTES); @@ -1355,7 +1356,7 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, struct i40e_rx_buffer *rx_buffer, unsigned int size) { - void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; + void *va; #if (PAGE_SIZE < 8192) unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; #else @@ -1365,6 +1366,7 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, struct sk_buff *skb; /* prefetch first cache line of first page */ + va = page_address(rx_buffer->page) + rx_buffer->page_offset; prefetch(va); #if L1_CACHE_BYTES < 128 prefetch(va + L1_CACHE_BYTES); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index fef6d892ed4cfe5ae293aa2fe01ef38a257c47bf..29d5f851f55e75b95feb466c3e8fe5936af78ff6 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -1797,8 +1797,8 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter) i40evf_free_misc_irq(adapter); i40evf_reset_interrupt_capability(adapter); - i40evf_free_queues(adapter); i40evf_free_q_vectors(adapter); + i40evf_free_queues(adapter); kfree(adapter->vf_res); i40evf_shutdown_adminq(&adapter->hw); adapter->netdev->flags &= ~IFF_UP; @@ -3097,18 +3097,19 @@ static int i40evf_set_features(struct net_device *netdev, { struct i40evf_adapter *adapter = netdev_priv(netdev); - /* Don't allow changing VLAN_RX flag when VLAN is set for VF - * and return an error in this case + /* Don't allow changing VLAN_RX flag when adapter is not capable + * of VLAN offload */ - if (VLAN_ALLOWED(adapter)) { + if (!VLAN_ALLOWED(adapter)) { + if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) + return -EINVAL; + } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) { if (features & NETIF_F_HW_VLAN_CTAG_RX) adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; else adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; - } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) { - return -EINVAL; } return 0; @@ -3332,6 +3333,8 @@ int i40evf_process_config(struct i40evf_adapter *adapter) if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + netdev->priv_flags |= IFF_UNICAST_FLT; + /* Do not turn on offloads when they are requested to be turned off. * TSO needs minimum 576 bytes to work correctly. */ @@ -3881,6 +3884,8 @@ static void i40evf_remove(struct pci_dev *pdev) if (adapter->watchdog_timer.function) del_timer_sync(&adapter->watchdog_timer); + cancel_work_sync(&adapter->adminq_task); + i40evf_free_rss(adapter); if (hw->aq.asq.count) diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index 565677de5ba376184d5acbe32644f13207d2767e..94dabc9d89f731f436c90ed7bf56eebd56297e63 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -153,6 +153,32 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter) NULL, 0); } +/** + * i40evf_validate_num_queues + * @adapter: adapter structure + * + * Validate that the number of queues the PF has sent in + * VIRTCHNL_OP_GET_VF_RESOURCES is not larger than the VF can handle. + **/ +static void i40evf_validate_num_queues(struct i40evf_adapter *adapter) +{ + if (adapter->vf_res->num_queue_pairs > I40EVF_MAX_REQ_QUEUES) { + struct virtchnl_vsi_resource *vsi_res; + int i; + + dev_info(&adapter->pdev->dev, "Received %d queues, but can only have a max of %d\n", + adapter->vf_res->num_queue_pairs, + I40EVF_MAX_REQ_QUEUES); + dev_info(&adapter->pdev->dev, "Fixing by reducing queues to %d\n", + I40EVF_MAX_REQ_QUEUES); + adapter->vf_res->num_queue_pairs = I40EVF_MAX_REQ_QUEUES; + for (i = 0; i < adapter->vf_res->num_vsis; i++) { + vsi_res = &adapter->vf_res->vsi_res[i]; + vsi_res->num_queue_pairs = I40EVF_MAX_REQ_QUEUES; + } + } +} + /** * i40evf_get_vf_config * @adapter: private adapter structure @@ -195,6 +221,11 @@ int i40evf_get_vf_config(struct i40evf_adapter *adapter) err = (i40e_status)le32_to_cpu(event.desc.cookie_low); memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len)); + /* some PFs send more queues than we should have so validate that + * we aren't getting too many queues + */ + if (!err) + i40evf_validate_num_queues(adapter); i40e_vf_parse_hw_config(hw, adapter->vf_res); out_alloc: kfree(event.msg_buf); @@ -1329,6 +1360,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource); memcpy(adapter->vf_res, msg, min(msglen, len)); + i40evf_validate_num_queues(adapter); i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res); /* restore current mac address */ ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 868f4a1d0f724379534791f74127261bba0c9682..67591722c625e56b0f2a5726951b5edaa87b3fcf 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -39,9 +39,9 @@ extern const char ice_drv_ver[]; #define ICE_BAR0 0 #define ICE_DFLT_NUM_DESC 128 -#define ICE_MIN_NUM_DESC 8 -#define ICE_MAX_NUM_DESC 8160 #define ICE_REQ_DESC_MULTIPLE 32 +#define ICE_MIN_NUM_DESC ICE_REQ_DESC_MULTIPLE +#define ICE_MAX_NUM_DESC 8160 #define ICE_DFLT_TRAFFIC_CLASS BIT(0) #define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16) #define ICE_ETHTOOL_FWVER_LEN 32 diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index a0614f472658ac5305c6aff1e589d99dbdd1634a..328d293bc3ff522560c1b164532a4703b43aceb5 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -1056,10 +1056,10 @@ struct ice_aqc_nvm { #define ICE_AQC_NVM_LAST_CMD BIT(0) #define ICE_AQC_NVM_PCIR_REQ BIT(0) /* Used by NVM Update reply */ #define ICE_AQC_NVM_PRESERVATION_S 1 -#define ICE_AQC_NVM_PRESERVATION_M (3 << CSR_AQ_NVM_PRESERVATION_S) -#define ICE_AQC_NVM_NO_PRESERVATION (0 << CSR_AQ_NVM_PRESERVATION_S) +#define ICE_AQC_NVM_PRESERVATION_M (3 << ICE_AQC_NVM_PRESERVATION_S) +#define ICE_AQC_NVM_NO_PRESERVATION (0 << ICE_AQC_NVM_PRESERVATION_S) #define ICE_AQC_NVM_PRESERVE_ALL BIT(1) -#define ICE_AQC_NVM_PRESERVE_SELECTED (3 << CSR_AQ_NVM_PRESERVATION_S) +#define ICE_AQC_NVM_PRESERVE_SELECTED (3 << ICE_AQC_NVM_PRESERVATION_S) #define ICE_AQC_NVM_FLASH_ONLY BIT(7) __le16 module_typeid; __le16 length; diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 661beea6af795cd72abf3e609347c89b21d9902d..f8d00263d90198bc0824ba3703cb7a0d41f4f2aa 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -904,7 +904,22 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) * @timeout: the maximum time in ms that the driver may hold the resource * @cd: pointer to command details structure or NULL * - * requests common resource using the admin queue commands (0x0008) + * Requests common resource using the admin queue commands (0x0008). + * When attempting to acquire the Global Config Lock, the driver can + * learn of three states: + * 1) ICE_SUCCESS - acquired lock, and can perform download package + * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load + * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has + * successfully downloaded the package; the driver does + * not have to download the package and can continue + * loading + * + * Note that if the caller is in an acquire lock, perform action, release lock + * phase of operation, it is possible that the FW may detect a timeout and issue + * a CORER. In this case, the driver will receive a CORER interrupt and will + * have to determine its cause. The calling thread that is handling this flow + * will likely get an error propagated back to it indicating the Download + * Package, Update Package or the Release Resource AQ commands timed out. */ static enum ice_status ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, @@ -922,13 +937,43 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, cmd_resp->res_id = cpu_to_le16(res); cmd_resp->access_type = cpu_to_le16(access); cmd_resp->res_number = cpu_to_le32(sdp_number); + cmd_resp->timeout = cpu_to_le32(*timeout); + *timeout = 0; status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); + /* The completion specifies the maximum time in ms that the driver * may hold the resource in the Timeout field. - * If the resource is held by someone else, the command completes with - * busy return value and the timeout field indicates the maximum time - * the current owner of the resource has to free it. + */ + + /* Global config lock response utilizes an additional status field. + * + * If the Global config lock resource is held by some other driver, the + * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field + * and the timeout field indicates the maximum time the current owner + * of the resource has to free it. + */ + if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) { + if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) { + *timeout = le32_to_cpu(cmd_resp->timeout); + return 0; + } else if (le16_to_cpu(cmd_resp->status) == + ICE_AQ_RES_GLBL_IN_PROG) { + *timeout = le32_to_cpu(cmd_resp->timeout); + return ICE_ERR_AQ_ERROR; + } else if (le16_to_cpu(cmd_resp->status) == + ICE_AQ_RES_GLBL_DONE) { + return ICE_ERR_AQ_NO_WORK; + } + + /* invalid FW response, force a timeout immediately */ + *timeout = 0; + return ICE_ERR_AQ_ERROR; + } + + /* If the resource is held by some other driver, the command completes + * with a busy return value and the timeout field indicates the maximum + * time the current owner of the resource has to free it. */ if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) *timeout = le32_to_cpu(cmd_resp->timeout); @@ -967,30 +1012,28 @@ ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, * @hw: pointer to the HW structure * @res: resource id * @access: access type (read or write) + * @timeout: timeout in milliseconds * * This function will attempt to acquire the ownership of a resource. */ enum ice_status ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, - enum ice_aq_res_access_type access) + enum ice_aq_res_access_type access, u32 timeout) { #define ICE_RES_POLLING_DELAY_MS 10 u32 delay = ICE_RES_POLLING_DELAY_MS; + u32 time_left = timeout; enum ice_status status; - u32 time_left = 0; - u32 timeout; status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); - /* An admin queue return code of ICE_AQ_RC_EEXIST means that another - * driver has previously acquired the resource and performed any - * necessary updates; in this case the caller does not obtain the - * resource and has no further work to do. + /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has + * previously acquired the resource and performed any necessary updates; + * in this case the caller does not obtain the resource and has no + * further work to do. */ - if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) { - status = ICE_ERR_AQ_NO_WORK; + if (status == ICE_ERR_AQ_NO_WORK) goto ice_acquire_res_exit; - } if (status) ice_debug(hw, ICE_DBG_RES, @@ -1003,11 +1046,9 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, timeout = (timeout > delay) ? timeout - delay : 0; status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); - if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) { + if (status == ICE_ERR_AQ_NO_WORK) /* lock free, but no work to do */ - status = ICE_ERR_AQ_NO_WORK; break; - } if (!status) /* lock acquired */ diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 9a5519130af13bd83c91e6347170ce44f65d4d3c..6455b6952ec8e4e44c20d895dcba07b95e236794 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -23,7 +23,7 @@ enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up); enum ice_status ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, - enum ice_aq_res_access_type access); + enum ice_aq_res_access_type access, u32 timeout); void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res); enum ice_status ice_init_nvm(struct ice_hw *hw); enum ice_status diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index 62be72fdc8f30c283b2385216f41ff15b87dc628..921cc0c9a30d7088f7aaf33bbcce65561a1a9542 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -518,22 +518,31 @@ ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) /** * ice_aq_ver_check - Check the reported AQ API version. - * @fw_branch: The "branch" of FW, typically describes the device type - * @fw_major: The major version of the FW API - * @fw_minor: The minor version increment of the FW API + * @hw: pointer to the hardware structure * * Checks if the driver should load on a given AQ API version. * * Return: 'true' iff the driver should attempt to load. 'false' otherwise. */ -static bool ice_aq_ver_check(u8 fw_branch, u8 fw_major, u8 fw_minor) +static bool ice_aq_ver_check(struct ice_hw *hw) { - if (fw_branch != EXP_FW_API_VER_BRANCH) - return false; - if (fw_major != EXP_FW_API_VER_MAJOR) - return false; - if (fw_minor != EXP_FW_API_VER_MINOR) + if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) { + /* Major API version is newer than expected, don't load */ + dev_warn(ice_hw_to_dev(hw), + "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); return false; + } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) { + if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2)) + dev_info(ice_hw_to_dev(hw), + "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); + else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR) + dev_info(ice_hw_to_dev(hw), + "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); + } else { + /* Major API version is older than expected, log a warning */ + dev_info(ice_hw_to_dev(hw), + "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); + } return true; } @@ -588,8 +597,7 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw) if (status) goto init_ctrlq_free_rq; - if (!ice_aq_ver_check(hw->api_branch, hw->api_maj_ver, - hw->api_min_ver)) { + if (!ice_aq_ver_check(hw)) { status = ICE_ERR_FW_API_VER; goto init_ctrlq_free_rq; } @@ -806,6 +814,9 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, u16 retval = 0; u32 val = 0; + /* if reset is in progress return a soft error */ + if (hw->reset_ongoing) + return ICE_ERR_RESET_ONGOING; mutex_lock(&cq->sq_lock); cq->sq_last_status = ICE_AQ_RC_OK; @@ -900,7 +911,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, if (ice_sq_done(hw, cq)) break; - mdelay(1); + udelay(ICE_CTL_Q_SQ_CMD_USEC); total_delay++; } while (total_delay < cq->sq_cmd_timeout); diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h index ea02b89243e2ceded547fa7be26ca61a24a1b865..0f2cdb06e6efafe9156a3a6f26d01baa74304277 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.h +++ b/drivers/net/ethernet/intel/ice/ice_controlq.h @@ -30,8 +30,9 @@ enum ice_ctl_q { ICE_CTL_Q_ADMIN, }; -/* Control Queue default settings */ -#define ICE_CTL_Q_SQ_CMD_TIMEOUT 250 /* msecs */ +/* Control Queue timeout settings - max delay 250ms */ +#define ICE_CTL_Q_SQ_CMD_TIMEOUT 2500 /* Count 2500 times */ +#define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */ struct ice_ctl_q_ring { void *dma_head; /* Virtual address to dma head */ diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index c71a9b528d6d558eca4a97a4511e445b00aa5bf1..4c5c87b158f5593a7e4264443e5ac57828f2ee7b 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -478,9 +478,11 @@ ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) ring->tx_max_pending = ICE_MAX_NUM_DESC; ring->rx_pending = vsi->rx_rings[0]->count; ring->tx_pending = vsi->tx_rings[0]->count; - ring->rx_mini_pending = ICE_MIN_NUM_DESC; + + /* Rx mini and jumbo rings are not supported */ ring->rx_mini_max_pending = 0; ring->rx_jumbo_max_pending = 0; + ring->rx_mini_pending = 0; ring->rx_jumbo_pending = 0; } @@ -498,14 +500,23 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) ring->tx_pending < ICE_MIN_NUM_DESC || ring->rx_pending > ICE_MAX_NUM_DESC || ring->rx_pending < ICE_MIN_NUM_DESC) { - netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n", + netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n", ring->tx_pending, ring->rx_pending, - ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC); + ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC, + ICE_REQ_DESC_MULTIPLE); return -EINVAL; } new_tx_cnt = ALIGN(ring->tx_pending, ICE_REQ_DESC_MULTIPLE); + if (new_tx_cnt != ring->tx_pending) + netdev_info(netdev, + "Requested Tx descriptor count rounded up to %d\n", + new_tx_cnt); new_rx_cnt = ALIGN(ring->rx_pending, ICE_REQ_DESC_MULTIPLE); + if (new_rx_cnt != ring->rx_pending) + netdev_info(netdev, + "Requested Rx descriptor count rounded up to %d\n", + new_rx_cnt); /* if nothing to do return success */ if (new_tx_cnt == vsi->tx_rings[0]->count && @@ -786,10 +797,15 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) } if (!test_bit(__ICE_DOWN, pf->state)) { - /* Give it a little more time to try to come back */ + /* Give it a little more time to try to come back. If still + * down, restart autoneg link or reinitialize the interface. + */ msleep(75); if (!test_bit(__ICE_DOWN, pf->state)) return ice_nway_reset(netdev); + + ice_down(vsi); + ice_up(vsi); } return err; diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 3f047bb43348881c592adf3b236518ee21bc2fdd..00c833cd2b3ae3bed0784399b19d1e5d5853c39a 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -7,7 +7,7 @@ #include "ice.h" -#define DRV_VERSION "ice-0.7.0-k" +#define DRV_VERSION "0.7.1-k" #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" const char ice_drv_ver[] = DRV_VERSION; static const char ice_driver_string[] = DRV_SUMMARY; @@ -535,10 +535,13 @@ static void ice_reset_subtask(struct ice_pf *pf) ice_prepare_for_reset(pf); /* make sure we are ready to rebuild */ - if (ice_check_reset(&pf->hw)) + if (ice_check_reset(&pf->hw)) { set_bit(__ICE_RESET_FAILED, pf->state); - else + } else { + /* done with reset. start rebuild */ + pf->hw.reset_ongoing = false; ice_rebuild(pf); + } clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); goto unlock; } @@ -652,6 +655,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) case ICE_FC_RX_PAUSE: fc = "RX"; break; + case ICE_FC_NONE: + fc = "None"; + break; default: fc = "Unknown"; break; @@ -1754,7 +1760,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) * We also make note of which reset happened so that peer * devices/drivers can be informed. */ - if (!test_bit(__ICE_RESET_RECOVERY_PENDING, pf->state)) { + if (!test_and_set_bit(__ICE_RESET_RECOVERY_PENDING, + pf->state)) { if (reset == ICE_RESET_CORER) set_bit(__ICE_CORER_RECV, pf->state); else if (reset == ICE_RESET_GLOBR) @@ -1762,7 +1769,20 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) else set_bit(__ICE_EMPR_RECV, pf->state); - set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); + /* There are couple of different bits at play here. + * hw->reset_ongoing indicates whether the hardware is + * in reset. This is set to true when a reset interrupt + * is received and set back to false after the driver + * has determined that the hardware is out of reset. + * + * __ICE_RESET_RECOVERY_PENDING in pf->state indicates + * that a post reset rebuild is required before the + * driver is operational again. This is set above. + * + * As this is the start of the reset/rebuild cycle, set + * both to indicate that. + */ + hw->reset_ongoing = true; } } @@ -4185,7 +4205,14 @@ static int ice_vsi_stop_tx_rings(struct ice_vsi *vsi) } status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids, NULL); - if (status) { + /* if the disable queue command was exercised during an active reset + * flow, ICE_ERR_RESET_ONGOING is returned. This is not an error as + * the reset operation disables queues at the hardware level anyway. + */ + if (status == ICE_ERR_RESET_ONGOING) { + dev_dbg(&pf->pdev->dev, + "Reset in progress. LAN Tx queues already disabled\n"); + } else if (status) { dev_err(&pf->pdev->dev, "Failed to disable LAN Tx queues, error: %d\n", status); @@ -4333,8 +4360,12 @@ static void ice_napi_enable_all(struct ice_vsi *vsi) if (!vsi->netdev) return; - for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) - napi_enable(&vsi->q_vectors[q_idx]->napi); + for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { + struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; + + if (q_vector->rx.ring || q_vector->tx.ring) + napi_enable(&q_vector->napi); + } } /** @@ -4817,8 +4848,12 @@ static void ice_napi_disable_all(struct ice_vsi *vsi) if (!vsi->netdev) return; - for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) - napi_disable(&vsi->q_vectors[q_idx]->napi); + for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { + struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; + + if (q_vector->rx.ring || q_vector->tx.ring) + napi_disable(&q_vector->napi); + } } /** diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index 295a8cd87fc16565148bf6cbb7713c02a71454f9..3274c543283c688ef2ecdb018dd247453a16fce9 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -137,7 +137,7 @@ ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access) if (hw->nvm.blank_nvm_mode) return 0; - return ice_acquire_res(hw, ICE_NVM_RES_ID, access); + return ice_acquire_res(hw, ICE_NVM_RES_ID, access, ICE_NVM_TIMEOUT); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h index 9a95c4ffd7d795b03eebea76f6953e0255c2eea2..d2dae913d81e0bac21f3b978b1cfdacf8785d0f1 100644 --- a/drivers/net/ethernet/intel/ice/ice_status.h +++ b/drivers/net/ethernet/intel/ice/ice_status.h @@ -20,6 +20,7 @@ enum ice_status { ICE_ERR_ALREADY_EXISTS = -14, ICE_ERR_DOES_NOT_EXIST = -15, ICE_ERR_MAX_LIMIT = -17, + ICE_ERR_RESET_ONGOING = -18, ICE_ERR_BUF_TOO_SHORT = -52, ICE_ERR_NVM_BLANK_MODE = -53, ICE_ERR_AQ_ERROR = -100, diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 6b7ec2ae5ad6798818a9d5947e8071aba410ffff..1bfc59dff51f7eaf61a5226f06b253079595ca8b 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -468,6 +468,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, void *daddr = NULL; u32 act = 0; __be16 *off; + u8 q_rgn; if (opc == ice_aqc_opc_remove_sw_rules) { s_rule->pdata.lkup_tx_rx.act = 0; @@ -503,14 +504,19 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & ICE_SINGLE_ACT_Q_INDEX_M; break; + case ICE_DROP_PACKET: + act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP | + ICE_SINGLE_ACT_VALID_BIT; + break; case ICE_FWD_TO_QGRP: + q_rgn = f_info->qgrp_size > 0 ? + (u8)ilog2(f_info->qgrp_size) : 0; act |= ICE_SINGLE_ACT_TO_Q; - act |= (f_info->qgrp_size << ICE_SINGLE_ACT_Q_REGION_S) & + act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & + ICE_SINGLE_ACT_Q_INDEX_M; + act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) & ICE_SINGLE_ACT_Q_REGION_M; break; - case ICE_DROP_PACKET: - act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP; - break; default: return; } @@ -1017,6 +1023,9 @@ ice_handle_vsi_list_mgmt(struct ice_hw *hw, u16 vsi_id = new_fltr->fwd_id.vsi_id; enum ice_adminq_opc opcode; + if (!m_entry->vsi_list_info) + return ICE_ERR_CFG; + /* A rule already exists with the new VSI being added */ if (test_bit(vsi_id, m_entry->vsi_list_info->vsi_map)) return 0; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 6481e3d863749837ff215621d6e1d21a33d574c3..1d84fedf1f649b921d15c238e981f323ecea59cc 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -1106,7 +1106,8 @@ int ice_napi_poll(struct napi_struct *napi, int budget) napi_complete_done(napi, work_done); if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) ice_irq_dynamic_ena(&vsi->back->hw, vsi, q_vector); - return 0; + + return min(work_done, budget - 1); } /* helper function for building cmd/type/offset */ @@ -1519,7 +1520,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) /* update gso_segs and bytecount */ first->gso_segs = skb_shinfo(skb)->gso_segs; - first->bytecount = (first->gso_segs - 1) * off->header_len; + first->bytecount += (first->gso_segs - 1) * off->header_len; cd_tso_len = skb->len - off->header_len; cd_mss = skb_shinfo(skb)->gso_size; diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 97c366e0ca596facaec70c7f1bb4ddc8b81e8774..5ca9d684429d13138379327effcd288e5b0aa73c 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -34,10 +34,15 @@ static inline bool ice_is_tc_ena(u8 bitmap, u8 tc) enum ice_aq_res_ids { ICE_NVM_RES_ID = 1, ICE_SPD_RES_ID, - ICE_GLOBAL_CFG_LOCK_RES_ID, - ICE_CHANGE_LOCK_RES_ID + ICE_CHANGE_LOCK_RES_ID, + ICE_GLOBAL_CFG_LOCK_RES_ID }; +/* FW update timeout definitions are in milliseconds */ +#define ICE_NVM_TIMEOUT 180000 +#define ICE_CHANGE_LOCK_TIMEOUT 1000 +#define ICE_GLOBAL_CFG_LOCK_TIMEOUT 3000 + enum ice_aq_res_access_type { ICE_RES_READ = 1, ICE_RES_WRITE @@ -83,12 +88,12 @@ struct ice_link_status { u64 phy_type_low; u16 max_frame_size; u16 link_speed; + u16 req_speeds; u8 lse_ena; /* Link Status Event notification */ u8 link_info; u8 an_info; u8 ext_info; u8 pacing; - u8 req_speeds; /* Refer to #define from module_type[ICE_MODULE_TYPE_TOTAL_BYTE] of * ice_aqc_get_phy_caps structure */ @@ -288,6 +293,7 @@ struct ice_hw { u8 sw_entry_point_layer; u8 evb_veb; /* true for VEB, false for VEPA */ + u8 reset_ongoing; /* true if hw is in reset, false otherwise */ struct ice_bus_info bus; struct ice_nvm_info nvm; struct ice_hw_dev_caps dev_caps; /* device capabilities */ diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index bafdcf70a353d534b2c92fcc898b435f87ffa951..fdab974b245b76acff56b5b10afd27e86e4eba2b 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -530,7 +530,7 @@ static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw) dev_spec->module_plugged = true; if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) { hw->phy.media_type = e1000_media_type_internal_serdes; - } else if (eth_flags->e100_base_fx) { + } else if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) { dev_spec->sgmii_active = true; hw->phy.media_type = e1000_media_type_internal_serdes; } else if (eth_flags->e1000_base_t) { @@ -657,14 +657,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) break; } - /* do not change link mode for 100BaseFX */ - if (dev_spec->eth_flags.e100_base_fx) - break; - /* change current link mode setting */ ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK; - if (hw->phy.media_type == e1000_media_type_copper) + if (dev_spec->sgmii_active) ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII; else ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index 8a28f3388f699bf30df581af4f9aa08ed5f2b567..dca671591ef65f7894dbab7e434b04af7f981ad4 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -194,6 +194,8 @@ /* enable link status from external LINK_0 and LINK_1 pins */ #define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ #define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ +#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */ #define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ #define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ #define E1000_CTRL_RST 0x04000000 /* Global reset */ diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c index c54ebedca6da9a3ddaeff1b43cde2993438c4714..c393cb2c0f1681f702a8b648f21ef49c3c8a565d 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.c +++ b/drivers/net/ethernet/intel/igb/e1000_i210.c @@ -842,6 +842,7 @@ s32 igb_pll_workaround_i210(struct e1000_hw *hw) nvm_word = E1000_INVM_DEFAULT_AL; tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL; igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, E1000_PHY_PLL_FREQ_PAGE); + phy_word = E1000_PHY_PLL_UNCONF; for (i = 0; i < E1000_MAX_PLL_TRIES; i++) { /* check current state directly from internal PHY */ igb_read_phy_reg_82580(hw, E1000_PHY_PLL_FREQ_REG, &phy_word); diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 5acf3b743876a485f61002658dccba47a0ca3d59..2e17625e6c35cc7f9e2d3bed4468f44e4820c69f 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -143,7 +143,8 @@ static int igb_get_link_ksettings(struct net_device *netdev, u32 speed; u32 supported, advertising; - status = rd32(E1000_STATUS); + status = pm_runtime_suspended(&adapter->pdev->dev) ? + 0 : rd32(E1000_STATUS); if (hw->phy.media_type == e1000_media_type_copper) { supported = (SUPPORTED_10baseT_Half | @@ -181,7 +182,7 @@ static int igb_get_link_ksettings(struct net_device *netdev, advertising &= ~ADVERTISED_1000baseKX_Full; } } - if (eth_flags->e100_base_fx) { + if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) { supported |= SUPPORTED_100baseT_Full; advertising |= ADVERTISED_100baseT_Full; } diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 0796cef96fa335ee1907d9aa5d4d56f4fd366ca3..ef5d11723d562c0d00f0ba24443f0516d4f342b9 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -2064,7 +2064,8 @@ static void igb_check_swap_media(struct igb_adapter *adapter) if ((hw->phy.media_type == e1000_media_type_copper) && (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) { swap_now = true; - } else if (!(connsw & E1000_CONNSW_SERDESD)) { + } else if ((hw->phy.media_type != e1000_media_type_copper) && + !(connsw & E1000_CONNSW_SERDESD)) { /* copper signal takes time to appear */ if (adapter->copper_tries < 4) { adapter->copper_tries++; @@ -2481,7 +2482,7 @@ static int igb_set_features(struct net_device *netdev, else igb_reset(adapter); - return 0; + return 1; } static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], @@ -3468,6 +3469,9 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) break; } } + + dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP); + pm_runtime_put_noidle(&pdev->dev); return 0; @@ -4557,6 +4561,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, static void igb_set_rx_buffer_len(struct igb_adapter *adapter, struct igb_ring *rx_ring) { +#if (PAGE_SIZE < 8192) + struct e1000_hw *hw = &adapter->hw; +#endif + /* set build_skb and buffer size flags */ clear_ring_build_skb_enabled(rx_ring); clear_ring_uses_large_buffer(rx_ring); @@ -4567,10 +4575,9 @@ static void igb_set_rx_buffer_len(struct igb_adapter *adapter, set_ring_build_skb_enabled(rx_ring); #if (PAGE_SIZE < 8192) - if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB) - return; - - set_ring_uses_large_buffer(rx_ring); + if (adapter->max_frame_size > IGB_MAX_FRAME_BUILD_SKB || + rd32(E1000_RCTL) & E1000_RCTL_SBP) + set_ring_uses_large_buffer(rx_ring); #endif } @@ -4680,6 +4687,8 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring) DMA_TO_DEVICE); } + tx_buffer->next_to_watch = NULL; + /* move us one more past the eop_desc for start of next pkt */ tx_buffer++; i++; @@ -5700,6 +5709,7 @@ static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, */ if (tx_ring->launchtime_enable) { ts = ns_to_timespec64(first->skb->tstamp); + first->skb->tstamp = 0; context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32); } else { context_desc->seqnum_seed = 0; @@ -6221,9 +6231,18 @@ static void igb_reset_task(struct work_struct *work) struct igb_adapter *adapter; adapter = container_of(work, struct igb_adapter, reset_task); + rtnl_lock(); + /* If we're already down or resetting, just bail */ + if (test_bit(__IGB_DOWN, &adapter->state) || + test_bit(__IGB_RESETTING, &adapter->state)) { + rtnl_unlock(); + return; + } + igb_dump(adapter); netdev_err(adapter->netdev, "Reset adapter\n"); igb_reinit_locked(adapter); + rtnl_unlock(); } /** @@ -7132,7 +7151,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) { struct e1000_hw *hw = &adapter->hw; unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; - u32 reg, msgbuf[3]; + u32 reg, msgbuf[3] = {}; u8 *addr = (u8 *)(&msgbuf[1]); /* process all the same items cleared in a function level reset */ @@ -8754,9 +8773,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, struct e1000_hw *hw = &adapter->hw; u32 ctrl, rctl, status; u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; -#ifdef CONFIG_PM - int retval = 0; -#endif + bool wake; rtnl_lock(); netif_device_detach(netdev); @@ -8769,12 +8786,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, igb_clear_interrupt_scheme(adapter); rtnl_unlock(); -#ifdef CONFIG_PM - retval = pci_save_state(pdev); - if (retval) - return retval; -#endif - status = rd32(E1000_STATUS); if (status & E1000_STATUS_LU) wufc &= ~E1000_WUFC_LNKC; @@ -8791,10 +8802,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, } ctrl = rd32(E1000_CTRL); - /* advertise wake from D3Cold */ - #define E1000_CTRL_ADVD3WUC 0x00100000 - /* phy power management enable */ - #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 ctrl |= E1000_CTRL_ADVD3WUC; wr32(E1000_CTRL, ctrl); @@ -8808,12 +8815,15 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, wr32(E1000_WUFC, 0); } - *enable_wake = wufc || adapter->en_mng_pt; - if (!*enable_wake) + wake = wufc || adapter->en_mng_pt; + if (!wake) igb_power_down_link(adapter); else igb_power_up_link(adapter); + if (enable_wake) + *enable_wake = wake; + /* Release control of h/w to f/w. If f/w is AMT enabled, this * would have already happened in close and is redundant. */ @@ -8856,22 +8866,7 @@ static void igb_deliver_wake_packet(struct net_device *netdev) static int __maybe_unused igb_suspend(struct device *dev) { - int retval; - bool wake; - struct pci_dev *pdev = to_pci_dev(dev); - - retval = __igb_shutdown(pdev, &wake, 0); - if (retval) - return retval; - - if (wake) { - pci_prepare_to_sleep(pdev); - } else { - pci_wake_from_d3(pdev, false); - pci_set_power_state(pdev, PCI_D3hot); - } - - return 0; + return __igb_shutdown(to_pci_dev(dev), NULL, 0); } static int __maybe_unused igb_resume(struct device *dev) @@ -8942,22 +8937,7 @@ static int __maybe_unused igb_runtime_idle(struct device *dev) static int __maybe_unused igb_runtime_suspend(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - int retval; - bool wake; - - retval = __igb_shutdown(pdev, &wake, 1); - if (retval) - return retval; - - if (wake) { - pci_prepare_to_sleep(pdev); - } else { - pci_wake_from_d3(pdev, false); - pci_set_power_state(pdev, PCI_D3hot); - } - - return 0; + return __igb_shutdown(to_pci_dev(dev), NULL, 1); } static int __maybe_unused igb_runtime_resume(struct device *dev) diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 9f4d700e09df33cb5d3e17576859a563f9c6c52c..29ced6b74d364632113e9674f8d005257e548411 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -51,9 +51,15 @@ * * The 40 bit 82580 SYSTIM overflows every * 2^40 * 10^-9 / 60 = 18.3 minutes. + * + * SYSTIM is converted to real time using a timecounter. As + * timecounter_cyc2time() allows old timestamps, the timecounter + * needs to be updated at least once per half of the SYSTIM interval. + * Scheduling of delayed work is not very accurate, so we aim for 8 + * minutes to be sure the actual interval is shorter than 9.16 minutes. */ -#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9) +#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 8) #define IGB_PTP_TX_TIMEOUT (HZ * 15) #define INCPERIOD_82576 BIT(E1000_TIMINCA_16NS_SHIFT) #define INCVALUE_82576_MASK GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0) diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index e0c989ffb2b3edf9ff2ea65a1d4a58341f624d41..df827c2541628ab82caa11691efa44d448e9bd96 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -2888,6 +2888,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; err_hw_init: + netif_napi_del(&adapter->rx_ring->napi); kfree(adapter->tx_ring); kfree(adapter->rx_ring); err_sw_init: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 4fc906c6166b34c790ebe60dc228d52756cf7eb4..074e23b4534b718f6af1df1f0d2169e447cadcbd 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -179,11 +179,14 @@ struct vf_data_storage { u16 pf_vlan; /* When set, guest VLAN config not allowed. */ u16 pf_qos; u16 tx_rate; + int link_enable; + int link_state; u8 spoofchk_enabled; bool rss_query_enabled; u8 trusted; int xcast_mode; unsigned int vf_api; + u8 primary_abort_count; }; enum ixgbevf_xcast_modes { @@ -546,6 +549,8 @@ struct ixgbe_mac_addr { #define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) #define IXGBE_SFP_POLL_JIFFIES (2 * HZ) /* SFP poll every 2 seconds */ +#define IXGBE_PRIMARY_ABORT_LIMIT 5 + /* board specific private data structure */ struct ixgbe_adapter { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; @@ -605,6 +610,7 @@ struct ixgbe_adapter { #define IXGBE_FLAG2_EEE_ENABLED BIT(15) #define IXGBE_FLAG2_RX_LEGACY BIT(16) #define IXGBE_FLAG2_IPSEC_ENABLED BIT(17) +#define IXGBE_FLAG2_AUTO_DISABLE_VF BIT(19) /* Tx fast path data */ int num_tx_queues; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 0bd1294ba51737240d510f31bbd255faceffeb11..39c5e6fdb72c5b46179eca14680d171bbd86697d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -2243,7 +2243,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) } /* Configure pause time (2 TCs per register) */ - reg = hw->fc.pause_time * 0x00010001; + reg = hw->fc.pause_time * 0x00010001U; for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index e5a8461fe6a99bfbf8ab20b85e38c0f0c24e0bb5..73e769212c65b9f6e22c9566dc0110d1cc4d93d6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -136,6 +136,8 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = { #define IXGBE_PRIV_FLAGS_LEGACY_RX BIT(0) "legacy-rx", +#define IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF BIT(2) + "mdd-disable-vf", }; #define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings) @@ -3223,7 +3225,8 @@ static int ixgbe_get_module_info(struct net_device *dev, page_swap = true; } - if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) { + if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap || + !(addr_mode & IXGBE_SFF_DDM_IMPLEMENTED)) { /* We have a SFP, but it does not support SFF-8472 */ modinfo->type = ETH_MODULE_SFF_8079; modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; @@ -3409,6 +3412,9 @@ static u32 ixgbe_get_priv_flags(struct net_device *netdev) if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) priv_flags |= IXGBE_PRIV_FLAGS_LEGACY_RX; + if (adapter->flags2 & IXGBE_FLAG2_AUTO_DISABLE_VF) + priv_flags |= IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF; + return priv_flags; } @@ -3416,11 +3422,27 @@ static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags) { struct ixgbe_adapter *adapter = netdev_priv(netdev); unsigned int flags2 = adapter->flags2; + unsigned int i; flags2 &= ~IXGBE_FLAG2_RX_LEGACY; if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX) flags2 |= IXGBE_FLAG2_RX_LEGACY; + flags2 &= ~IXGBE_FLAG2_AUTO_DISABLE_VF; + if (priv_flags & IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF) { + if (adapter->hw.mac.type == ixgbe_mac_82599EB) { + /* Reset primary abort counter */ + for (i = 0; i < adapter->num_vfs; i++) + adapter->vfinfo[i].primary_abort_count = 0; + + flags2 |= IXGBE_FLAG2_AUTO_DISABLE_VF; + } else { + e_info(probe, + "Cannot set private flags: Operation not supported\n"); + return -EOPNOTSUPP; + } + } + if (flags2 != adapter->flags2) { adapter->flags2 = flags2; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index ccd852ad62a4b109ff26ea40e7587024f8b0d198..d50c5b55da1806083c6c36b2f8ac279585d8aed6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -192,7 +192,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, } /* alloc the udl from per cpu ddp pool */ - ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_KERNEL, &ddp->udp); + ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp); if (!ddp->udl) { e_err(drv, "failed allocated ddp context\n"); goto out_noddp_unmap; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index da4322e4daed5de4fb44f06d8cdb488bc41f6432..49e6d66ccf8027a179201fac32a62f690f9ea171 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -4,6 +4,7 @@ #include "ixgbe.h" #include #include +#include /** * ixgbe_ipsec_set_tx_sa - set the Tx SA registers @@ -113,7 +114,6 @@ static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw *hw, u16 idx, __be32 addr[]) **/ static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter) { - struct ixgbe_ipsec *ipsec = adapter->ipsec; struct ixgbe_hw *hw = &adapter->hw; u32 buf[4] = {0, 0, 0, 0}; u16 idx; @@ -132,9 +132,6 @@ static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter) ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0); ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0); } - - ipsec->num_rx_sa = 0; - ipsec->num_tx_sa = 0; } /** @@ -676,6 +673,10 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) } else { struct tx_sa tsa; + if (adapter->num_vfs && + adapter->bridge_mode != BRIDGE_MODE_VEPA) + return -EOPNOTSUPP; + /* find the first unused index */ ret = ixgbe_ipsec_find_empty_idx(ipsec, false); if (ret < 0) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index d361f570ca37be6a8df4cd2e2c7a806cee8b1a54..952630cb882c2e8786f44b377f0346312f811ccf 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -923,7 +923,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, ring->queue_index = txr_idx; /* assign ring to adapter */ - adapter->tx_ring[txr_idx] = ring; + WRITE_ONCE(adapter->tx_ring[txr_idx], ring); /* update count and index */ txr_count--; @@ -950,7 +950,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, set_ring_xdp(ring); /* assign ring to adapter */ - adapter->xdp_ring[xdp_idx] = ring; + WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring); /* update count and index */ xdp_count--; @@ -993,7 +993,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, ring->queue_index = rxr_idx; /* assign ring to adapter */ - adapter->rx_ring[rxr_idx] = ring; + WRITE_ONCE(adapter->rx_ring[rxr_idx], ring); /* update count and index */ rxr_count--; @@ -1022,13 +1022,13 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) ixgbe_for_each_ring(ring, q_vector->tx) { if (ring_is_xdp(ring)) - adapter->xdp_ring[ring->queue_index] = NULL; + WRITE_ONCE(adapter->xdp_ring[ring->queue_index], NULL); else - adapter->tx_ring[ring->queue_index] = NULL; + WRITE_ONCE(adapter->tx_ring[ring->queue_index], NULL); } ixgbe_for_each_ring(ring, q_vector->rx) - adapter->rx_ring[ring->queue_index] = NULL; + WRITE_ONCE(adapter->rx_ring[ring->queue_index], NULL); adapter->q_vector[v_idx] = NULL; napi_hash_del(&q_vector->napi); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 6cdd58d9d461bd34ab62e5f68a43141f5cd70323..9455c619169485d317b1b4408cc1fdd23ee1bea6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -34,6 +34,7 @@ #include #include #include +#include #include "ixgbe.h" #include "ixgbe_common.h" @@ -1821,13 +1822,7 @@ static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, struct sk_buff *skb) { - /* if the page was released unmap it, else just sync our portion */ - if (unlikely(IXGBE_CB(skb)->page_released)) { - dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma, - ixgbe_rx_pg_size(rx_ring), - DMA_FROM_DEVICE, - IXGBE_RX_DMA_ATTR); - } else if (ring_uses_build_skb(rx_ring)) { + if (ring_uses_build_skb(rx_ring)) { unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK; dma_sync_single_range_for_cpu(rx_ring->dev, @@ -1844,6 +1839,14 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, skb_frag_size(frag), DMA_FROM_DEVICE); } + + /* If the page was released, just unmap it. */ + if (unlikely(IXGBE_CB(skb)->page_released)) { + dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma, + ixgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IXGBE_RX_DMA_ATTR); + } } /** @@ -1940,7 +1943,8 @@ static inline bool ixgbe_page_is_reserved(struct page *page) return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); } -static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer) +static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer, + int rx_buffer_pgcnt) { unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; struct page *page = rx_buffer->page; @@ -1951,7 +1955,7 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer) #if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ - if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) + if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) return false; #else /* The last offset is a bit aggressive in that we assume the @@ -2016,11 +2020,18 @@ static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff **skb, - const unsigned int size) + const unsigned int size, + int *rx_buffer_pgcnt) { struct ixgbe_rx_buffer *rx_buffer; rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + *rx_buffer_pgcnt = +#if (PAGE_SIZE < 8192) + page_count(rx_buffer->page); +#else + 0; +#endif prefetchw(rx_buffer->page); *skb = rx_buffer->skb; @@ -2050,9 +2061,10 @@ static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring, static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *rx_buffer, - struct sk_buff *skb) + struct sk_buff *skb, + int rx_buffer_pgcnt) { - if (ixgbe_can_reuse_rx_page(rx_buffer)) { + if (ixgbe_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { /* hand second half of page back to the ring */ ixgbe_reuse_rx_page(rx_ring, rx_buffer); } else { @@ -2255,7 +2267,8 @@ static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring, rx_buffer->page_offset ^= truesize; #else unsigned int truesize = ring_uses_build_skb(rx_ring) ? - SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) : + SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : SKB_DATA_ALIGN(size); rx_buffer->page_offset += truesize; @@ -2295,6 +2308,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, union ixgbe_adv_rx_desc *rx_desc; struct ixgbe_rx_buffer *rx_buffer; struct sk_buff *skb; + int rx_buffer_pgcnt; unsigned int size; /* return some buffers to hardware, one at a time is too slow */ @@ -2314,7 +2328,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, */ dma_rmb(); - rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size); + rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size, &rx_buffer_pgcnt); /* retrieve a buffer from the ring */ if (!skb) { @@ -2356,7 +2370,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, break; } - ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb); + ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt); cleaned_count++; /* place incomplete frames back on ring for completion */ @@ -2625,7 +2639,7 @@ static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector, /* 16K ints/sec to 9.2K ints/sec */ avg_wire_size *= 15; avg_wire_size += 11452; - } else if (avg_wire_size <= 1980) { + } else if (avg_wire_size < 1968) { /* 9.2K ints/sec to 8K ints/sec */ avg_wire_size *= 5; avg_wire_size += 22420; @@ -2658,6 +2672,8 @@ static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector, case IXGBE_LINK_SPEED_2_5GB_FULL: case IXGBE_LINK_SPEED_1GB_FULL: case IXGBE_LINK_SPEED_10_FULL: + if (avg_wire_size > 8064) + avg_wire_size = 8064; itr += DIV_ROUND_UP(avg_wire_size, IXGBE_ITR_ADAPTIVE_MIN_INC * 64) * IXGBE_ITR_ADAPTIVE_MIN_INC; @@ -3579,12 +3595,18 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) else mtqc |= IXGBE_MTQC_64VF; } else { - if (tcs > 4) + if (tcs > 4) { mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; - else if (tcs > 1) + } else if (tcs > 1) { mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; - else - mtqc = IXGBE_MTQC_64Q_1PB; + } else { + u8 max_txq = adapter->num_tx_queues + + adapter->num_xdp_queues; + if (max_txq > 63) + mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; + else + mtqc = IXGBE_MTQC_64Q_1PB; + } } IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc); @@ -3924,8 +3946,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) else mrqc = IXGBE_MRQC_VMDQRSS64EN; - /* Enable L3/L4 for Tx Switched packets */ - mrqc |= IXGBE_MRQC_L3L4TXSWEN; + /* Enable L3/L4 for Tx Switched packets only for X550, + * older devices do not support this feature + */ + if (hw->mac.type >= ixgbe_mac_X550) + mrqc |= IXGBE_MRQC_L3L4TXSWEN; } else { if (tcs > 4) mrqc = IXGBE_MRQC_RTRSS8TCEN; @@ -5175,6 +5200,7 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; struct hlist_node *node2; struct ixgbe_fdir_filter *filter; + u8 queue; spin_lock(&adapter->fdir_perfect_lock); @@ -5183,12 +5209,34 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) hlist_for_each_entry_safe(filter, node2, &adapter->fdir_filter_list, fdir_node) { + if (filter->action == IXGBE_FDIR_DROP_QUEUE) { + queue = IXGBE_FDIR_DROP_QUEUE; + } else { + u32 ring = ethtool_get_flow_spec_ring(filter->action); + u8 vf = ethtool_get_flow_spec_ring_vf(filter->action); + + if (!vf && (ring >= adapter->num_rx_queues)) { + e_err(drv, "FDIR restore failed without VF, ring: %u\n", + ring); + continue; + } else if (vf && + ((vf > adapter->num_vfs) || + ring >= adapter->num_rx_queues_per_pool)) { + e_err(drv, "FDIR restore failed with VF, vf: %hhu, ring: %u\n", + vf, ring); + continue; + } + + /* Map the ring onto the absolute queue index */ + if (!vf) + queue = adapter->rx_ring[ring]->reg_idx; + else + queue = ((vf - 1) * + adapter->num_rx_queues_per_pool) + ring; + } + ixgbe_fdir_write_perfect_filter_82599(hw, - &filter->filter, - filter->sw_idx, - (filter->action == IXGBE_FDIR_DROP_QUEUE) ? - IXGBE_FDIR_DROP_QUEUE : - adapter->rx_ring[filter->action]->reg_idx); + &filter->filter, filter->sw_idx, queue); } spin_unlock(&adapter->fdir_perfect_lock); @@ -5574,6 +5622,9 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter) ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); + + /* update setting rx tx for all active vfs */ + ixgbe_set_all_vfs(adapter); } void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) @@ -6026,11 +6077,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter) for (i = 0 ; i < adapter->num_vfs; i++) adapter->vfinfo[i].clear_to_send = false; - /* ping all the active vfs to let them know we are going down */ - ixgbe_ping_all_vfs(adapter); - - /* Disable all VFTE/VFRE TX/RX */ - ixgbe_disable_tx_rx(adapter); + /* update setting rx tx for all active vfs */ + ixgbe_set_all_vfs(adapter); } /* disable transmits in the hardware now that interrupts are off */ @@ -6967,7 +7015,10 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) } for (i = 0; i < adapter->num_rx_queues; i++) { - struct ixgbe_ring *rx_ring = adapter->rx_ring[i]; + struct ixgbe_ring *rx_ring = READ_ONCE(adapter->rx_ring[i]); + + if (!rx_ring) + continue; non_eop_descs += rx_ring->rx_stats.non_eop_descs; alloc_rx_page += rx_ring->rx_stats.alloc_rx_page; alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; @@ -6988,15 +7039,20 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) packets = 0; /* gather some stats to the adapter struct that are per queue */ for (i = 0; i < adapter->num_tx_queues; i++) { - struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; + struct ixgbe_ring *tx_ring = READ_ONCE(adapter->tx_ring[i]); + + if (!tx_ring) + continue; restart_queue += tx_ring->tx_stats.restart_queue; tx_busy += tx_ring->tx_stats.tx_busy; bytes += tx_ring->stats.bytes; packets += tx_ring->stats.packets; } for (i = 0; i < adapter->num_xdp_queues; i++) { - struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i]; + struct ixgbe_ring *xdp_ring = READ_ONCE(adapter->xdp_ring[i]); + if (!xdp_ring) + continue; restart_queue += xdp_ring->tx_stats.restart_queue; tx_busy += xdp_ring->tx_stats.tx_busy; bytes += xdp_ring->stats.bytes; @@ -7509,6 +7565,27 @@ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) } #ifdef CONFIG_PCI_IOV +static void ixgbe_bad_vf_abort(struct ixgbe_adapter *adapter, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + + if (adapter->hw.mac.type == ixgbe_mac_82599EB && + adapter->flags2 & IXGBE_FLAG2_AUTO_DISABLE_VF) { + adapter->vfinfo[vf].primary_abort_count++; + if (adapter->vfinfo[vf].primary_abort_count == + IXGBE_PRIMARY_ABORT_LIMIT) { + ixgbe_set_vf_link_state(adapter, vf, + IFLA_VF_LINK_STATE_DISABLE); + adapter->vfinfo[vf].primary_abort_count = 0; + + e_info(drv, + "Malicious Driver Detection event detected on PF %d VF %d MAC: %pM mdd-disable-vf=on", + hw->bus.func, vf, + adapter->vfinfo[vf].vf_mac_addresses); + } + } +} + static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; @@ -7540,8 +7617,10 @@ static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) continue; pci_read_config_word(vfdev, PCI_STATUS, &status_reg); if (status_reg != IXGBE_FAILED_READ_CFG_WORD && - status_reg & PCI_STATUS_REC_MASTER_ABORT) + status_reg & PCI_STATUS_REC_MASTER_ABORT) { + ixgbe_bad_vf_abort(adapter, vf); pcie_flr(vfdev); + } } } @@ -8533,7 +8612,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && adapter->ptp_clock) { - if (!test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS, + if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && + !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state)) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; tx_flags |= IXGBE_TX_FLAGS_TSTAMP; @@ -8596,7 +8676,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, #endif /* IXGBE_FCOE */ #ifdef CONFIG_XFRM_OFFLOAD - if (skb->sp && !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx)) + if (xfrm_offload(skb) && + !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx)) goto out_drop; #endif tso = ixgbe_tso(tx_ring, first, &hdr_len, &ipsec_tx); @@ -9367,6 +9448,10 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, jump->mat = nexthdr[i].jump; adapter->jump_tables[link_uhtid] = jump; break; + } else { + kfree(mask); + kfree(input); + kfree(jump); } } return 0; @@ -9671,7 +9756,7 @@ static int ixgbe_set_features(struct net_device *netdev, NETIF_F_HW_VLAN_CTAG_FILTER)) ixgbe_set_rx_mode(netdev); - return 0; + return 1; } /** @@ -10226,6 +10311,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw, .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, + .ndo_set_vf_link_state = ixgbe_ndo_set_vf_link_state, .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en, .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust, .ndo_get_vf_config = ixgbe_ndo_get_vf_config, @@ -10539,6 +10625,9 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_sw_init; + if (adapter->hw.mac.type == ixgbe_mac_82599EB) + adapter->flags2 |= IXGBE_FLAG2_AUTO_DISABLE_VF; + /* Make sure the SWFW semaphore is in a valid state */ if (hw->mac.ops.init_swfw_sync) hw->mac.ops.init_swfw_sync(hw); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h index e085b6520dac8871ebe4f117024aecd38c55b650..2246b07beae462a74fe6d59258daa414f3e5addf 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h @@ -80,6 +80,8 @@ enum ixgbe_pfvf_api_rev { #define IXGBE_VF_UPDATE_XCAST_MODE 0x0c +#define IXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */ + /* length of permanent address message returned from PF */ #define IXGBE_VF_PERMADDR_MSG_LEN 4 /* word in permanent address message with the current multicast type */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index 64e44e01c973fc4c047f04432c4694ac06271a25..c56baad04ee615067649c521aef12bedb99c150c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -45,6 +45,7 @@ #define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8 #define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0 #define IXGBE_SFF_ADDRESSING_MODE 0x4 +#define IXGBE_SFF_DDM_IMPLEMENTED 0x40 #define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1 #define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8 #define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 3c6f01c41b788eb45730e49083f2025c7c0683f1..1f0e84120b32df2d9c2d0071478bd2181701a3b6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -96,6 +96,7 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter, for (i = 0; i < num_vfs; i++) { /* enable spoof checking for all VFs */ adapter->vfinfo[i].spoofchk_enabled = true; + adapter->vfinfo[i].link_enable = true; /* We support VF RSS querying only for 82599 and x540 * devices at the moment. These devices share RSS @@ -467,12 +468,16 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, return err; } -static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) +static int ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 max_frame, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; - int max_frame = msgbuf[1]; u32 max_frs; + if (max_frame < ETH_MIN_MTU || max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) { + e_err(drv, "VF max_frame %d out of range\n", max_frame); + return -EINVAL; + } + /* * For 82599EB we have to keep all PFs and VFs operating with * the same max_frame value in order to avoid sending an oversize @@ -532,12 +537,6 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) } } - /* MTU < 68 is an error and causes problems on some kernels */ - if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) { - e_err(drv, "VF max_frame %d out of range\n", max_frame); - return -EINVAL; - } - /* pull current max frame size from hardware */ max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); max_frs &= IXGBE_MHADD_MFS_MASK; @@ -699,7 +698,6 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) u8 num_tcs = adapter->hw_tcs; u32 reg_val; u32 queue; - u32 word; /* remove VLAN filters beloning to this VF */ ixgbe_clear_vf_vlans(adapter, vf); @@ -721,8 +719,10 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, adapter->default_up, vf); - if (vfinfo->spoofchk_enabled) + if (vfinfo->spoofchk_enabled) { hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); + hw->mac.ops.set_mac_anti_spoofing(hw, true, vf); + } } /* reset multicast table array for vf */ @@ -752,6 +752,14 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) } } + IXGBE_WRITE_FLUSH(hw); +} + +static void ixgbe_vf_clear_mbx(struct ixgbe_adapter *adapter, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 word; + /* Clear VF's mailbox memory */ for (word = 0; word < IXGBE_VFMAILBOX_SIZE; word++) IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf), word, 0); @@ -809,6 +817,57 @@ static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf, } } +/** + * ixgbe_set_vf_rx_tx - Set VF rx tx + * @adapter: Pointer to adapter struct + * @vf: VF identifier + * + * Set or reset correct transmit and receive for vf + **/ +static void ixgbe_set_vf_rx_tx(struct ixgbe_adapter *adapter, int vf) +{ + u32 reg_cur_tx, reg_cur_rx, reg_req_tx, reg_req_rx; + struct ixgbe_hw *hw = &adapter->hw; + u32 reg_offset, vf_shift; + + vf_shift = vf % 32; + reg_offset = vf / 32; + + reg_cur_tx = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset)); + reg_cur_rx = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); + + if (adapter->vfinfo[vf].link_enable) { + reg_req_tx = reg_cur_tx | 1 << vf_shift; + reg_req_rx = reg_cur_rx | 1 << vf_shift; + } else { + reg_req_tx = reg_cur_tx & ~(1 << vf_shift); + reg_req_rx = reg_cur_rx & ~(1 << vf_shift); + } + + /* The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs. + * For more info take a look at ixgbe_set_vf_lpe + */ + if (adapter->hw.mac.type == ixgbe_mac_82599EB) { + struct net_device *dev = adapter->netdev; + int pf_max_frame = dev->mtu + ETH_HLEN; + +#if IS_ENABLED(CONFIG_FCOE) + if (dev->features & NETIF_F_FCOE_MTU) + pf_max_frame = max_t(int, pf_max_frame, + IXGBE_FCOE_JUMBO_FRAME_SIZE); +#endif /* CONFIG_FCOE */ + + if (pf_max_frame > ETH_FRAME_LEN) + reg_req_rx = reg_cur_rx & ~(1 << vf_shift); + } + + /* Enable/Disable particular VF */ + if (reg_cur_tx != reg_req_tx) + IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg_req_tx); + if (reg_cur_rx != reg_req_rx) + IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg_req_rx); +} + static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) { struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; @@ -825,6 +884,8 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) /* reset the filters for the device */ ixgbe_vf_reset_event(adapter, vf); + ixgbe_vf_clear_mbx(adapter, vf); + /* set vf mac address */ if (!is_zero_ether_addr(vf_mac)) ixgbe_set_vf_mac(adapter, vf, vf_mac); @@ -832,11 +893,6 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) vf_shift = vf % 32; reg_offset = vf / 32; - /* enable transmit for vf */ - reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset)); - reg |= BIT(vf_shift); - IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg); - /* force drop enable for all VF Rx queues */ reg = IXGBE_QDE_ENABLE; if (adapter->vfinfo[vf].pf_vlan) @@ -844,27 +900,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) ixgbe_write_qde(adapter, vf, reg); - /* enable receive for vf */ - reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); - reg |= BIT(vf_shift); - /* - * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs. - * For more info take a look at ixgbe_set_vf_lpe - */ - if (adapter->hw.mac.type == ixgbe_mac_82599EB) { - struct net_device *dev = adapter->netdev; - int pf_max_frame = dev->mtu + ETH_HLEN; - -#ifdef CONFIG_FCOE - if (dev->features & NETIF_F_FCOE_MTU) - pf_max_frame = max_t(int, pf_max_frame, - IXGBE_FCOE_JUMBO_FRAME_SIZE); - -#endif /* CONFIG_FCOE */ - if (pf_max_frame > ETH_FRAME_LEN) - reg &= ~BIT(vf_shift); - } - IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg); + ixgbe_set_vf_rx_tx(adapter, vf); /* enable VF mailbox for further messages */ adapter->vfinfo[vf].clear_to_send = true; @@ -1184,6 +1220,25 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, return 0; } +static int ixgbe_get_vf_link_state(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + u32 *link_state = &msgbuf[1]; + + /* verify the PF is supporting the correct API */ + switch (adapter->vfinfo[vf].vf_api) { + case ixgbe_mbox_api_12: + case ixgbe_mbox_api_13: + break; + default: + return -EOPNOTSUPP; + } + + *link_state = adapter->vfinfo[vf].link_enable; + + return 0; +} + static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) { u32 mbx_size = IXGBE_VFMAILBOX_SIZE; @@ -1229,7 +1284,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf); break; case IXGBE_VF_SET_LPE: - retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf); + retval = ixgbe_set_vf_lpe(adapter, msgbuf[1], vf); break; case IXGBE_VF_SET_MACVLAN: retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf); @@ -1249,6 +1304,9 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) case IXGBE_VF_UPDATE_XCAST_MODE: retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf); break; + case IXGBE_VF_GET_LINK_STATE: + retval = ixgbe_get_vf_link_state(adapter, msgbuf, vf); + break; default: e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); retval = IXGBE_ERR_MBX; @@ -1298,18 +1356,6 @@ void ixgbe_msg_task(struct ixgbe_adapter *adapter) } } -void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - - /* disable transmit and receive for all vfs */ - IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0); - IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0); - - IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0); - IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0); -} - static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf) { struct ixgbe_hw *hw = &adapter->hw; @@ -1335,6 +1381,21 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter) } } +/** + * ixgbe_set_all_vfs - update vfs queues + * @adapter: Pointer to adapter struct + * + * Update setting transmit and receive queues for all vfs + **/ +void ixgbe_set_all_vfs(struct ixgbe_adapter *adapter) +{ + int i; + + for (i = 0 ; i < adapter->num_vfs; i++) + ixgbe_set_vf_link_state(adapter, i, + adapter->vfinfo[i].link_state); +} + int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) { struct ixgbe_adapter *adapter = netdev_priv(netdev); @@ -1632,6 +1693,84 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) return 0; } +/** + * ixgbe_set_vf_link_state - Set link state + * @adapter: Pointer to adapter struct + * @vf: VF identifier + * @state: required link state + * + * Set a link force state on/off a single vf + **/ +void ixgbe_set_vf_link_state(struct ixgbe_adapter *adapter, int vf, int state) +{ + adapter->vfinfo[vf].link_state = state; + + switch (state) { + case IFLA_VF_LINK_STATE_AUTO: + if (test_bit(__IXGBE_DOWN, &adapter->state)) + adapter->vfinfo[vf].link_enable = false; + else + adapter->vfinfo[vf].link_enable = true; + break; + case IFLA_VF_LINK_STATE_ENABLE: + adapter->vfinfo[vf].link_enable = true; + break; + case IFLA_VF_LINK_STATE_DISABLE: + adapter->vfinfo[vf].link_enable = false; + break; + } + + ixgbe_set_vf_rx_tx(adapter, vf); + + /* restart the VF */ + adapter->vfinfo[vf].clear_to_send = false; + ixgbe_ping_vf(adapter, vf); +} + +/** + * ixgbe_ndo_set_vf_link_state - Set link state + * @netdev: network interface device structure + * @vf: VF identifier + * @state: required link state + * + * Set the link state of a specified VF, regardless of physical link state + **/ +int ixgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int ret = 0; + + if (vf < 0 || vf >= adapter->num_vfs) { + dev_err(&adapter->pdev->dev, + "NDO set VF link - invalid VF identifier %d\n", vf); + return -EINVAL; + } + + switch (state) { + case IFLA_VF_LINK_STATE_ENABLE: + dev_info(&adapter->pdev->dev, + "NDO set VF %d link state %d - not supported\n", + vf, state); + break; + case IFLA_VF_LINK_STATE_DISABLE: + dev_info(&adapter->pdev->dev, + "NDO set VF %d link state disable\n", vf); + ixgbe_set_vf_link_state(adapter, vf, state); + break; + case IFLA_VF_LINK_STATE_AUTO: + dev_info(&adapter->pdev->dev, + "NDO set VF %d link state auto\n", vf); + ixgbe_set_vf_link_state(adapter, vf, state); + break; + default: + dev_err(&adapter->pdev->dev, + "NDO set VF %d - invalid link state %d\n", vf, state); + ret = -EINVAL; + } + + return ret; +} + int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, bool setting) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h index 3ec21923c89cd9e38cd39327445a184f68f661c9..0690ecb8dfa348e9493fe9e9003bc3b8225a928e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h @@ -17,8 +17,8 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter); #endif void ixgbe_msg_task(struct ixgbe_adapter *adapter); int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); -void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter); void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter); +void ixgbe_set_all_vfs(struct ixgbe_adapter *adapter); int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac); int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, u8 qos, __be16 vlan_proto); @@ -31,7 +31,9 @@ int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting); int ixgbe_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi); +int ixgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state); void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); +void ixgbe_set_vf_link_state(struct ixgbe_adapter *adapter, int vf, int state); int ixgbe_disable_sriov(struct ixgbe_adapter *adapter); #ifdef CONFIG_PCI_IOV void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index a8148c7126e51d9757dde2f230901b9437adf116..9772016222c306f0aa9295669e744ef4f04c091c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -2248,7 +2248,9 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, *autoneg = false; if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) { + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) { *speed = IXGBE_LINK_SPEED_1GB_FULL; return 0; } diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 56a1031dcc074deccaa65eb6e2eaac3800016175..469df8a242e82d726ad1c1fd6f3638c8b998639d 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -383,6 +383,8 @@ struct ixgbevf_adapter { u32 *rss_key; u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE]; u32 flags; + bool link_state; + #define IXGBEVF_FLAGS_LEGACY_RX BIT(1) }; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 5a228582423b74726de2bd040a388eb8e59df764..450c0a95d36d81f5521f46b710f975cec565cd59 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -2066,11 +2066,6 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev) struct ixgbe_hw *hw = &adapter->hw; int count = 0; - if ((netdev_uc_count(netdev)) > 10) { - pr_err("Too many unicast filters - No Space\n"); - return -ENOSPC; - } - if (!netdev_uc_empty(netdev)) { struct netdev_hw_addr *ha; @@ -2268,7 +2263,9 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) { struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; struct ixgbe_hw *hw = &adapter->hw; + bool state; ixgbevf_configure_msix(adapter); @@ -2281,6 +2278,11 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) spin_unlock_bh(&adapter->mbx_lock); + state = adapter->link_state; + hw->mac.ops.get_link_state(hw, &adapter->link_state); + if (state && state != adapter->link_state) + dev_info(&pdev->dev, "VF is administratively disabled\n"); + smp_mb__before_atomic(); clear_bit(__IXGBEVF_DOWN, &adapter->state); ixgbevf_napi_enable_all(adapter); @@ -2501,6 +2503,7 @@ void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter) msleep(1); ixgbevf_down(adapter); + pci_set_master(adapter->pdev); ixgbevf_up(adapter); clear_bit(__IXGBEVF_RESETTING, &adapter->state); @@ -3049,6 +3052,8 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD; adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD; + adapter->link_state = true; + set_bit(__IXGBEVF_DOWN, &adapter->state); return 0; @@ -3281,7 +3286,7 @@ static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter) ixgbevf_watchdog_update_link(adapter); - if (adapter->link_up) + if (adapter->link_up && adapter->link_state) ixgbevf_watchdog_link_is_up(adapter); else ixgbevf_watchdog_link_is_down(adapter); @@ -3849,6 +3854,10 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, skb_checksum_help(skb); goto no_csum; } + + if (first->protocol == htons(ETH_P_IP)) + type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; + /* update TX checksum flag */ first->tx_flags |= IXGBE_TX_FLAGS_CSUM; vlan_macip_lens = skb_checksum_start_offset(skb) - diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h index bfd9ae150808810947e661907cae8e7e26cd5531..464841e2131576b12cb6a30d07c9477ea9c1675a 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.h +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h @@ -92,6 +92,8 @@ enum ixgbe_pfvf_api_rev { #define IXGBE_VF_UPDATE_XCAST_MODE 0x0c +#define IXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */ + /* length of permanent address message returned from PF */ #define IXGBE_VF_PERMADDR_MSG_LEN 4 /* word in permanent address message with the current multicast type */ diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index bf0577e819e1c247811f9e1f9c0380e21584a219..c96a4c3ca0754467c430743ce604bf881dee8e9a 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -506,9 +506,8 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr); } - ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, IXGBE_VFMAILBOX_SIZE); - - return 0; + return ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, + IXGBE_VFMAILBOX_SIZE); } /** @@ -571,6 +570,46 @@ static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) return -EOPNOTSUPP; } +/** + * ixgbevf_get_link_state_vf - Get VF link state from PF + * @hw: pointer to the HW structure + * @link_state: link state storage + * + * Returns state of the operation error or success. + */ +static s32 ixgbevf_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state) +{ + u32 msgbuf[2]; + s32 ret_val; + s32 err; + + msgbuf[0] = IXGBE_VF_GET_LINK_STATE; + msgbuf[1] = 0x0; + + err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2); + + if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK)) { + ret_val = IXGBE_ERR_MBX; + } else { + ret_val = 0; + *link_state = msgbuf[1]; + } + + return ret_val; +} + +/** + * ixgbevf_hv_get_link_state_vf - * Hyper-V variant - just a stub. + * @hw: unused + * @link_state: unused + * + * Hyper-V variant; there is no mailbox communication. + */ +static s32 ixgbevf_hv_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state) +{ + return -EOPNOTSUPP; +} + /** * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address * @hw: pointer to the HW structure @@ -947,6 +986,7 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = { .set_rar = ixgbevf_set_rar_vf, .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf, .update_xcast_mode = ixgbevf_update_xcast_mode, + .get_link_state = ixgbevf_get_link_state_vf, .set_uc_addr = ixgbevf_set_uc_addr_vf, .set_vfta = ixgbevf_set_vfta_vf, .set_rlpml = ixgbevf_set_rlpml_vf, @@ -964,6 +1004,7 @@ static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = { .set_rar = ixgbevf_hv_set_rar_vf, .update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf, .update_xcast_mode = ixgbevf_hv_update_xcast_mode, + .get_link_state = ixgbevf_hv_get_link_state_vf, .set_uc_addr = ixgbevf_hv_set_uc_addr_vf, .set_vfta = ixgbevf_hv_set_vfta_vf, .set_rlpml = ixgbevf_hv_set_rlpml_vf, diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index d1e9e306653b87ca9ee6e73543ca918f8a3787af..45d9269218db60bee69b23a32e6fef5eb95e7175 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -42,6 +42,7 @@ struct ixgbe_mac_operations { s32 (*init_rx_addrs)(struct ixgbe_hw *); s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); s32 (*update_xcast_mode)(struct ixgbe_hw *, int); + s32 (*get_link_state)(struct ixgbe_hw *hw, bool *link_state); s32 (*enable_mc)(struct ixgbe_hw *); s32 (*disable_mc)(struct ixgbe_hw *); s32 (*clear_vfta)(struct ixgbe_hw *); diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index e08301d833e2ed91bda977109404175f556f563f..3f4f2882ac26ea2782dc22ed189ebdd977e84378 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -221,8 +221,9 @@ ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch) if (ch->dma.irq) free_irq(ch->dma.irq, priv); if (IS_RX(ch->idx)) { - int desc; - for (desc = 0; desc < LTQ_DESC_NUM; desc++) + struct ltq_dma_channel *dma = &ch->dma; + + for (dma->desc = 0; dma->desc < LTQ_DESC_NUM; dma->desc++) dev_kfree_skb_any(ch->skb[ch->dma.desc]); } } diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 62f204f3231693807231c269183a49961020c7c8..59007d6cd36d99b6bba508b6eb0458901e26a161 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2886,7 +2886,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) ret = mv643xx_eth_shared_of_probe(pdev); if (ret) - return ret; + goto err_put_clk; pd = dev_get_platdata(&pdev->dev); msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? @@ -2894,6 +2894,11 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) infer_hw_params(msp); return 0; + +err_put_clk: + if (!IS_ERR(msp->clk)) + clk_disable_unprepare(msp->clk); + return ret; } static int mv643xx_eth_shared_remove(struct platform_device *pdev) diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index c5dac6bd2be4d31b988df0f572deedff15ce18fc..ee7857298361ded94b87e7e715c87610a2a05924 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c @@ -64,7 +64,7 @@ struct orion_mdio_dev { void __iomem *regs; - struct clk *clk[3]; + struct clk *clk[4]; /* * If we have access to the error interrupt pin (which is * somewhat misnamed as it not only reflects internal errors @@ -321,6 +321,10 @@ static int orion_mdio_probe(struct platform_device *pdev) for (i = 0; i < ARRAY_SIZE(dev->clk); i++) { dev->clk[i] = of_clk_get(pdev->dev.of_node, i); + if (PTR_ERR(dev->clk[i]) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto out_clk; + } if (IS_ERR(dev->clk[i])) break; clk_prepare_enable(dev->clk[i]); @@ -362,6 +366,7 @@ static int orion_mdio_probe(struct platform_device *pdev) if (dev->err_interrupt > 0) writel(0, dev->regs + MVMDIO_ERR_INT_MASK); +out_clk: for (i = 0; i < ARRAY_SIZE(dev->clk); i++) { if (IS_ERR(dev->clk[i])) break; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index b4ed7d394d079e0acb05a89d8866e8e97d0e252d..337919fbaefe02e70baab500237261dc64f839d9 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -406,7 +406,6 @@ struct mvneta_port { struct mvneta_pcpu_stats __percpu *stats; int pkt_size; - unsigned int frag_size; void __iomem *base; struct mvneta_rx_queue *rxqs; struct mvneta_tx_queue *txqs; @@ -2148,7 +2147,7 @@ static int mvneta_rx_hwbm(struct napi_struct *napi, if (unlikely(!skb)) goto err_drop_frame_ret_pool; - dma_sync_single_range_for_cpu(dev->dev.parent, + dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev, rx_desc->buf_phys_addr, MVNETA_MH_SIZE + NET_SKB_PAD, rx_bytes, @@ -2395,7 +2394,7 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, } /* Main tx processing */ -static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev) { struct mvneta_port *pp = netdev_priv(dev); u16 txq_id = skb_get_queue_mapping(skb); @@ -2905,7 +2904,9 @@ static void mvneta_rxq_hw_init(struct mvneta_port *pp, if (!pp->bm_priv) { /* Set Offset */ mvneta_rxq_offset_set(pp, rxq, 0); - mvneta_rxq_buf_size_set(pp, rxq, pp->frag_size); + mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ? + PAGE_SIZE : + MVNETA_RX_BUF_SIZE(pp->pkt_size)); mvneta_rxq_bm_disable(pp, rxq); mvneta_rxq_fill(pp, rxq, rxq->size); } else { @@ -3603,7 +3604,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp) /* Use the cpu associated to the rxq when it is online, in all * the other cases, use the cpu 0 which can't be offline. */ - if (cpu_online(pp->rxq_def)) + if (pp->rxq_def < nr_cpu_ids && cpu_online(pp->rxq_def)) elected_cpu = pp->rxq_def; max_cpu = num_present_cpus(); @@ -3749,7 +3750,6 @@ static int mvneta_open(struct net_device *dev) int ret; pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); - pp->frag_size = PAGE_SIZE; ret = mvneta_setup_rxqs(pp); if (ret) @@ -4611,7 +4611,7 @@ static int mvneta_probe(struct platform_device *pdev) err = register_netdev(dev); if (err < 0) { dev_err(&pdev->dev, "failed to register\n"); - goto err_free_stats; + goto err_netdev; } netdev_info(dev, "Using %s mac address %pM\n", mac_from, @@ -4622,14 +4622,12 @@ static int mvneta_probe(struct platform_device *pdev) return 0; err_netdev: - unregister_netdev(dev); if (pp->bm_priv) { mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id); mvneta_bm_put(pp->bm_priv); } -err_free_stats: free_percpu(pp->stats); err_free_ports: free_percpu(pp->ports); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index 67b9e81b7c0246435c26680e06939ef2e061bfd7..23f60bc5d48f5e2d3b6e0ad3db2e7e7166d7d697 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -253,7 +253,8 @@ #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff) #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000) #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port)) -#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff +#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(version) \ + ((version) == MVPP21 ? 0xffff : 0xff) #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET 16 #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24) @@ -1106,5 +1107,6 @@ void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu, u32 offset, void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name); void mvpp2_dbgfs_cleanup(struct mvpp2 *priv); +void mvpp2_dbgfs_exit(void); #endif diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c index f9744a61e5dd6fc6282eddca38b2718dac899e12..d77414e1e704b25eb013e0ba590be60d98e53e95 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c @@ -660,6 +660,13 @@ static int mvpp2_dbgfs_port_init(struct dentry *parent, return 0; } +static struct dentry *mvpp2_root; + +void mvpp2_dbgfs_exit(void) +{ + debugfs_remove(mvpp2_root); +} + void mvpp2_dbgfs_cleanup(struct mvpp2 *priv) { debugfs_remove_recursive(priv->dbgfs_dir); @@ -667,10 +674,9 @@ void mvpp2_dbgfs_cleanup(struct mvpp2 *priv) void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name) { - struct dentry *mvpp2_dir, *mvpp2_root; + struct dentry *mvpp2_dir; int ret, i; - mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL); if (!mvpp2_root) { mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL); if (IS_ERR(mvpp2_root)) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index a74002b43b5183baa4d829b489a1c757e60ae209..beb79455adc7e7248cb7965bf55890dd021e24cc 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -908,7 +908,7 @@ static void mvpp2_interrupts_unmask(void *arg) u32 val; val = MVPP2_CAUSE_MISC_SUM_MASK | - MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version); if (port->has_tx_irqs) val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; @@ -928,7 +928,7 @@ mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask) if (mask) val = 0; else - val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22); for (i = 0; i < port->nqvecs; i++) { struct mvpp2_queue_vector *v = port->qvecs + i; @@ -1310,8 +1310,8 @@ static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset, int i; for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++) - memcpy(data + i * ETH_GSTRING_LEN, - &mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN); + strscpy(data + i * ETH_GSTRING_LEN, + mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN); } } @@ -1372,13 +1372,9 @@ static void mvpp2_port_reset(struct mvpp2_port *port) for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++) mvpp2_read_count(port, &mvpp2_ethtool_regs[i]); - val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) & - ~MVPP2_GMAC_PORT_RESET_MASK; + val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) | + MVPP2_GMAC_PORT_RESET_MASK; writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); - - while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) & - MVPP2_GMAC_PORT_RESET_MASK) - continue; } /* Change maximum receive size of the port */ @@ -1408,7 +1404,7 @@ static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port) /* Set defaults to the MVPP2 port */ static void mvpp2_defaults_set(struct mvpp2_port *port) { - int tx_port_num, val, queue, ptxq, lrxq; + int tx_port_num, val, queue, lrxq; if (port->priv->hw_version == MVPP21) { /* Update TX FIFO MIN Threshold */ @@ -1426,11 +1422,9 @@ static void mvpp2_defaults_set(struct mvpp2_port *port) mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0); /* Close bandwidth for all queues */ - for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) { - ptxq = mvpp2_txq_phys(port->id, queue); + for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) mvpp2_write(port->priv, - MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0); - } + MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0); /* Set refill period to 1 usec, refill tokens * and bucket size to maximum @@ -2275,7 +2269,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port, txq->descs_dma = 0; /* Set minimum bandwidth for disabled TXQs */ - mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); + mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0); /* Set Tx descriptors queue starting address and size */ cpu = get_cpu(); @@ -2907,7 +2901,7 @@ static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev, } /* Main tx processing */ -static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); struct mvpp2_tx_queue *txq, *aggr_txq; @@ -3065,7 +3059,8 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) } /* Process RX packets */ - cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + cause_rx = cause_rx_tx & + MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version); cause_rx <<= qv->first_rxq; cause_rx |= qv->pending_cause_rx; while (cause_rx && budget > 0) { @@ -3346,7 +3341,7 @@ static int mvpp2_open(struct net_device *dev) valid = true; } - if (priv->hw_version == MVPP22 && port->link_irq && !port->phylink) { + if (priv->hw_version == MVPP22 && port->link_irq) { err = request_irq(port->link_irq, mvpp2_link_status_isr, 0, dev->name, port); if (err) { @@ -3507,6 +3502,7 @@ static int mvpp2_set_mac_address(struct net_device *dev, void *p) static int mvpp2_change_mtu(struct net_device *dev, int mtu) { struct mvpp2_port *port = netdev_priv(dev); + bool running = netif_running(dev); int err; if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) { @@ -3515,40 +3511,24 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu) mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8); } - if (!netif_running(dev)) { - err = mvpp2_bm_update_mtu(dev, mtu); - if (!err) { - port->pkt_size = MVPP2_RX_PKT_SIZE(mtu); - return 0; - } - - /* Reconfigure BM to the original MTU */ - err = mvpp2_bm_update_mtu(dev, dev->mtu); - if (err) - goto log_error; - } - - mvpp2_stop_dev(port); + if (running) + mvpp2_stop_dev(port); err = mvpp2_bm_update_mtu(dev, mtu); - if (!err) { + if (err) { + netdev_err(dev, "failed to change MTU\n"); + /* Reconfigure BM to the original MTU */ + mvpp2_bm_update_mtu(dev, dev->mtu); + } else { port->pkt_size = MVPP2_RX_PKT_SIZE(mtu); - goto out_start; } - /* Reconfigure BM to the original MTU */ - err = mvpp2_bm_update_mtu(dev, dev->mtu); - if (err) - goto log_error; - -out_start: - mvpp2_start_dev(port); - mvpp2_egress_enable(port); - mvpp2_ingress_enable(port); + if (running) { + mvpp2_start_dev(port); + mvpp2_egress_enable(port); + mvpp2_ingress_enable(port); + } - return 0; -log_error: - netdev_err(dev, "failed to change MTU\n"); return err; } @@ -4262,8 +4242,27 @@ static void mvpp2_phylink_validate(struct net_device *dev, unsigned long *supported, struct phylink_link_state *state) { + struct mvpp2_port *port = netdev_priv(dev); __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + /* Invalid combinations */ + switch (state->interface) { + case PHY_INTERFACE_MODE_10GKR: + case PHY_INTERFACE_MODE_XAUI: + if (port->gop_id != 0) + goto empty_set; + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + if (port->priv->hw_version == MVPP22 && port->gop_id == 0) + goto empty_set; + break; + default: + break; + } + phylink_set(mask, Autoneg); phylink_set_port_modes(mask); phylink_set(mask, Pause); @@ -4271,30 +4270,45 @@ static void mvpp2_phylink_validate(struct net_device *dev, switch (state->interface) { case PHY_INTERFACE_MODE_10GKR: - phylink_set(mask, 10000baseCR_Full); - phylink_set(mask, 10000baseSR_Full); - phylink_set(mask, 10000baseLR_Full); - phylink_set(mask, 10000baseLRM_Full); - phylink_set(mask, 10000baseER_Full); - phylink_set(mask, 10000baseKR_Full); + case PHY_INTERFACE_MODE_XAUI: + case PHY_INTERFACE_MODE_NA: + if (port->gop_id == 0) { + phylink_set(mask, 10000baseT_Full); + phylink_set(mask, 10000baseCR_Full); + phylink_set(mask, 10000baseSR_Full); + phylink_set(mask, 10000baseLR_Full); + phylink_set(mask, 10000baseLRM_Full); + phylink_set(mask, 10000baseER_Full); + phylink_set(mask, 10000baseKR_Full); + } /* Fall-through */ - default: + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + case PHY_INTERFACE_MODE_SGMII: phylink_set(mask, 10baseT_Half); phylink_set(mask, 10baseT_Full); phylink_set(mask, 100baseT_Half); phylink_set(mask, 100baseT_Full); - phylink_set(mask, 10000baseT_Full); /* Fall-through */ case PHY_INTERFACE_MODE_1000BASEX: case PHY_INTERFACE_MODE_2500BASEX: phylink_set(mask, 1000baseT_Full); phylink_set(mask, 1000baseX_Full); phylink_set(mask, 2500baseX_Full); + break; + default: + goto empty_set; } bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); bitmap_and(state->advertising, state->advertising, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); + return; + +empty_set: + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); } static void mvpp22_xlg_link_state(struct mvpp2_port *port, @@ -4399,9 +4413,9 @@ static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode, if (state->pause & MLO_PAUSE_RX) ctrl0 |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; - ctrl4 &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC; - ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC | - MVPP22_XLG_CTRL4_EN_IDLE_CHECK; + ctrl4 &= ~(MVPP22_XLG_CTRL4_MACMODSELECT_GMAC | + MVPP22_XLG_CTRL4_EN_IDLE_CHECK); + ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC; writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG); writel(ctrl4, port->base + MVPP22_XLG_CTRL4_REG); @@ -4411,12 +4425,15 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode, const struct phylink_link_state *state) { u32 an, ctrl0, ctrl2, ctrl4; + u32 old_ctrl2; an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG); ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG); ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG); + old_ctrl2 = ctrl2; + /* Force link down */ an &= ~MVPP2_GMAC_FORCE_LINK_PASS; an |= MVPP2_GMAC_FORCE_LINK_DOWN; @@ -4489,6 +4506,12 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode, writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG); writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG); writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG); + + if (old_ctrl2 & MVPP2_GMAC_PORT_RESET_MASK) { + while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) & + MVPP2_GMAC_PORT_RESET_MASK) + continue; + } } static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, @@ -5109,6 +5132,8 @@ static int mvpp2_probe(struct platform_device *pdev) if (has_acpi_companion(&pdev->dev)) { acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev); + if (!acpi_id) + return -EINVAL; priv->hw_version = (unsigned long)acpi_id->driver_data; } else { priv->hw_version = @@ -5321,9 +5346,6 @@ static int mvpp2_remove(struct platform_device *pdev) mvpp2_dbgfs_cleanup(priv); - flush_workqueue(priv->stats_queue); - destroy_workqueue(priv->stats_queue); - fwnode_for_each_available_child_node(fwnode, port_fwnode) { if (priv->port_list[i]) { mutex_destroy(&priv->port_list[i]->gather_stats_lock); @@ -5332,6 +5354,8 @@ static int mvpp2_remove(struct platform_device *pdev) i++; } + destroy_workqueue(priv->stats_queue); + for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i]; @@ -5388,7 +5412,18 @@ static struct platform_driver mvpp2_driver = { }, }; -module_platform_driver(mvpp2_driver); +static int __init mvpp2_driver_init(void) +{ + return platform_driver_register(&mvpp2_driver); +} +module_init(mvpp2_driver_init); + +static void __exit mvpp2_driver_exit(void) +{ + platform_driver_unregister(&mvpp2_driver); + mvpp2_dbgfs_exit(); +} +module_exit(mvpp2_driver_exit); MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com"); MODULE_AUTHOR("Marcin Wojtas "); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c index 392fd895f27826e81153f230603fa37b8e921fcc..5692c6087bbb0781ef473ea5dfe8f6148c4ae4f7 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c @@ -312,7 +312,8 @@ static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, } /* Set value */ - pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] = shift & MVPP2_PRS_SRAM_SHIFT_MASK; + pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] |= + shift & MVPP2_PRS_SRAM_SHIFT_MASK; /* Reset and set operation */ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, @@ -1905,8 +1906,7 @@ static int mvpp2_prs_ip6_init(struct mvpp2 *priv) } /* Find tcam entry with matched pair */ -static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid, - u16 mask) +static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask) { unsigned char byte[2], enable[2]; struct mvpp2_prs_entry pe; @@ -1914,13 +1914,13 @@ static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid, int tid; /* Go through the all entries with MVPP2_PRS_LU_VID */ - for (tid = MVPP2_PE_VID_FILT_RANGE_START; - tid <= MVPP2_PE_VID_FILT_RANGE_END; tid++) { - if (!priv->prs_shadow[tid].valid || - priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID) + for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id); + tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { + if (!port->priv->prs_shadow[tid].valid || + port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid); + mvpp2_prs_init_from_hw(port->priv, &pe, tid); mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]); mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]); @@ -1950,7 +1950,7 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) memset(&pe, 0, sizeof(pe)); /* Scan TCAM and see if entry with this already exist */ - tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, mask); + tid = mvpp2_prs_vid_range_find(port, vid, mask); reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id)); if (reg_val & MVPP2_DSA_EXTENDED) @@ -2008,7 +2008,7 @@ void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid) int tid; /* Scan TCAM and see if entry with this already exist */ - tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, 0xfff); + tid = mvpp2_prs_vid_range_find(port, vid, 0xfff); /* No such entry */ if (tid < 0) @@ -2026,8 +2026,10 @@ void mvpp2_prs_vid_remove_all(struct mvpp2_port *port) for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id); tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { - if (priv->prs_shadow[tid].valid) - mvpp2_prs_vid_entry_remove(port, tid); + if (priv->prs_shadow[tid].valid) { + mvpp2_prs_hw_inv(priv, tid); + priv->prs_shadow[tid].valid = false; + } } } diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 3a9730612a704d318b5ab2297a31822775dad1a4..ff2fea0f8b75181eba81fd62d08194537116391e 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1260,7 +1260,8 @@ static int pxa168_rx_poll(struct napi_struct *napi, int budget) return work_done; } -static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct pxa168_eth_private *pep = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 9c08c3650c02cdc28cc83f682447dfece1327c65..d6f8a41c3e35cd3c2e555b4cabd2e1f90a15973a 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -152,8 +152,10 @@ static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs, memset(p, 0, regs->len); memcpy_fromio(p, io, B3_RAM_ADDR); - memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, - regs->len - B3_RI_WTO_R1); + if (regs->len > B3_RI_WTO_R1) { + memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, + regs->len - B3_RI_WTO_R1); + } } /* Wake on Lan only supported on Yukon chips with rev 1 or above */ @@ -3120,7 +3122,7 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, skb_put(skb, len); if (dev->features & NETIF_F_RXCSUM) { - skb->csum = csum; + skb->csum = le16_to_cpu(csum); skb->ip_summed = CHECKSUM_COMPLETE; } diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 697d9b374f5e12d2c4df19e720f893ec0a5a96a7..d013f30019b69f61617157304974649de6359ae1 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -46,6 +46,7 @@ #include #include #include +#include #include @@ -93,7 +94,7 @@ static int copybreak __read_mostly = 128; module_param(copybreak, int, 0); MODULE_PARM_DESC(copybreak, "Receive copy threshold"); -static int disable_msi = 0; +static int disable_msi = -1; module_param(disable_msi, int, 0); MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); @@ -4931,6 +4932,38 @@ static const char *sky2_name(u8 chipid, char *buf, int sz) return buf; } +static const struct dmi_system_id msi_blacklist[] = { + { + .ident = "Dell Inspiron 1545", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1545"), + }, + }, + { + .ident = "Gateway P-79", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Gateway"), + DMI_MATCH(DMI_PRODUCT_NAME, "P-79"), + }, + }, + { + .ident = "ASUS P6T", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "P6T"), + }, + }, + { + .ident = "ASUS P6X", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "P6X"), + }, + }, + {} +}; + static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *dev, *dev1; @@ -5042,6 +5075,9 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_free_pci; } + if (disable_msi == -1) + disable_msi = !!dmi_check_system(msi_blacklist); + if (!disable_msi && pci_enable_msi(pdev) == 0) { err = sky2_test_msi(hw); if (err) { @@ -5087,7 +5123,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&hw->restart_work, sky2_restart); pci_set_drvdata(pdev, hw); - pdev->d3_delay = 200; + pdev->d3_delay = 300; return 0; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 6e6abdc399deb3c1dc4662faa082e170e2a212f6..1d55f014725efd75bd94dfbe2484dfae0792f857 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -1784,6 +1784,7 @@ static void mtk_poll_controller(struct net_device *dev) static int mtk_start_dma(struct mtk_eth *eth) { + u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0; int err; err = mtk_dma_init(eth); @@ -1800,7 +1801,7 @@ static int mtk_start_dma(struct mtk_eth *eth) MTK_QDMA_GLO_CFG); mtk_w32(eth, - MTK_RX_DMA_EN | MTK_RX_2B_OFFSET | + MTK_RX_DMA_EN | rx_2b_offset | MTK_RX_BT_32DWORDS | MTK_MULTI_EN, MTK_PDMA_GLO_CFG); @@ -2304,13 +2305,13 @@ static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, switch (cmd->cmd) { case ETHTOOL_GRXRINGS: - if (dev->features & NETIF_F_LRO) { + if (dev->hw_features & NETIF_F_LRO) { cmd->data = MTK_MAX_RX_RING_NUM; ret = 0; } break; case ETHTOOL_GRXCLSRLCNT: - if (dev->features & NETIF_F_LRO) { + if (dev->hw_features & NETIF_F_LRO) { struct mtk_mac *mac = netdev_priv(dev); cmd->rule_cnt = mac->hwlro_ip_cnt; @@ -2318,11 +2319,11 @@ static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, } break; case ETHTOOL_GRXCLSRULE: - if (dev->features & NETIF_F_LRO) + if (dev->hw_features & NETIF_F_LRO) ret = mtk_hwlro_get_fdir_entry(dev, cmd); break; case ETHTOOL_GRXCLSRLALL: - if (dev->features & NETIF_F_LRO) + if (dev->hw_features & NETIF_F_LRO) ret = mtk_hwlro_get_fdir_all(dev, cmd, rule_locs); break; @@ -2339,11 +2340,11 @@ static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) switch (cmd->cmd) { case ETHTOOL_SRXCLSRLINS: - if (dev->features & NETIF_F_LRO) + if (dev->hw_features & NETIF_F_LRO) ret = mtk_hwlro_add_ipaddr(dev, cmd); break; case ETHTOOL_SRXCLSRLDEL: - if (dev->features & NETIF_F_LRO) + if (dev->hw_features & NETIF_F_LRO) ret = mtk_hwlro_del_ipaddr(dev, cmd); break; default: diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig index 36054e6fb9d34840cd15f9c45296c9258df9a276..f200b8c420d5738e5bc5c67b124c71cc27f21fd6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig @@ -5,7 +5,7 @@ config MLX4_EN tristate "Mellanox Technologies 1/10/40Gbit Ethernet support" depends on MAY_USE_DEVLINK - depends on PCI + depends on PCI && NETDEVICES && ETHERNET && INET select MLX4_CORE imply PTP_1588_CLOCK ---help--- diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c index 4bdf2505954271071007a748be8b901fee12fcef..21788d4f988142d696aa6390c9e4fa796766c81c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c @@ -337,7 +337,7 @@ void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc) static u32 __mlx4_alloc_from_zone(struct mlx4_zone_entry *zone, int count, int align, u32 skip_mask, u32 *puid) { - u32 uid; + u32 uid = 0; u32 res; struct mlx4_zone_allocator *zone_alloc = zone->allocator; struct mlx4_zone_entry *curr_node; diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index e65bc3c95630a184309e3fad5ce77080d457b750..857588e2488d2878b843f5753f91ffb3e4ac0508 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -2645,6 +2645,8 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev) if (!priv->cmd.context) return -ENOMEM; + if (mlx4_is_mfunc(dev)) + mutex_lock(&priv->cmd.slave_cmd_mutex); down_write(&priv->cmd.switch_sem); for (i = 0; i < priv->cmd.max_cmds; ++i) { priv->cmd.context[i].token = i; @@ -2670,6 +2672,8 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev) down(&priv->cmd.poll_sem); priv->cmd.use_events = 1; up_write(&priv->cmd.switch_sem); + if (mlx4_is_mfunc(dev)) + mutex_unlock(&priv->cmd.slave_cmd_mutex); return err; } @@ -2682,6 +2686,8 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev) struct mlx4_priv *priv = mlx4_priv(dev); int i; + if (mlx4_is_mfunc(dev)) + mutex_lock(&priv->cmd.slave_cmd_mutex); down_write(&priv->cmd.switch_sem); priv->cmd.use_events = 0; @@ -2689,9 +2695,12 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev) down(&priv->cmd.event_sem); kfree(priv->cmd.context); + priv->cmd.context = NULL; up(&priv->cmd.poll_sem); up_write(&priv->cmd.switch_sem); + if (mlx4_is_mfunc(dev)) + mutex_unlock(&priv->cmd.slave_cmd_mutex); } struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index f11b45001cad8c5635684e820a03f183e12d6ef5..e639a365ac2d4333a667c1be4b69c6b459c0df3b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -1084,8 +1084,8 @@ static int mlx4_en_set_pauseparam(struct net_device *dev, tx_pause = !!(pause->tx_pause); rx_pause = !!(pause->rx_pause); - rx_ppp = priv->prof->rx_ppp && !(tx_pause || rx_pause); - tx_ppp = priv->prof->tx_ppp && !(tx_pause || rx_pause); + rx_ppp = (tx_pause || rx_pause) ? 0 : priv->prof->rx_ppp; + tx_ppp = (tx_pause || rx_pause) ? 0 : priv->prof->tx_ppp; err = mlx4_SET_PORT_general(mdev->dev, priv->port, priv->rx_skb_size + ETH_FCS_LEN, @@ -1745,6 +1745,7 @@ static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, err = mlx4_en_get_flow(dev, cmd, cmd->fs.location); break; case ETHTOOL_GRXCLSRLALL: + cmd->data = MAX_NUM_OF_FS_RULES; while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) { err = mlx4_en_get_flow(dev, cmd, i); if (!err) @@ -1811,6 +1812,7 @@ static int mlx4_en_set_channels(struct net_device *dev, struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_port_profile new_prof; struct mlx4_en_priv *tmp; + int total_tx_count; int port_up = 0; int xdp_count; int err = 0; @@ -1825,13 +1827,12 @@ static int mlx4_en_set_channels(struct net_device *dev, mutex_lock(&mdev->state_lock); xdp_count = priv->tx_ring_num[TX_XDP] ? channel->rx_count : 0; - if (channel->tx_count * priv->prof->num_up + xdp_count > - priv->mdev->profile.max_num_tx_rings_p_up * priv->prof->num_up) { + total_tx_count = channel->tx_count * priv->prof->num_up + xdp_count; + if (total_tx_count > MAX_TX_RINGS) { err = -EINVAL; en_err(priv, "Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n", - channel->tx_count * priv->prof->num_up + xdp_count, - MAX_TX_RINGS); + total_tx_count, MAX_TX_RINGS); goto out; } @@ -2010,6 +2011,8 @@ static int mlx4_en_set_tunable(struct net_device *dev, return ret; } +#define MLX4_EEPROM_PAGE_LEN 256 + static int mlx4_en_get_module_info(struct net_device *dev, struct ethtool_modinfo *modinfo) { @@ -2044,7 +2047,7 @@ static int mlx4_en_get_module_info(struct net_device *dev, break; case MLX4_MODULE_ID_SFP: modinfo->type = ETH_MODULE_SFF_8472; - modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + modinfo->eeprom_len = MLX4_EEPROM_PAGE_LEN; break; default: return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index fe49384eba48cb3f75bd33f5bfb9cf1fa15af791..e60ca4c8648210c3d7072879b8b82e595a68301a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -92,6 +92,7 @@ int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc) struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_port_profile new_prof; struct mlx4_en_priv *tmp; + int total_count; int port_up = 0; int err = 0; @@ -105,6 +106,14 @@ int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc) MLX4_EN_NUM_UP_HIGH; new_prof.tx_ring_num[TX] = new_prof.num_tx_rings_p_up * new_prof.num_up; + total_count = new_prof.tx_ring_num[TX] + new_prof.tx_ring_num[TX_XDP]; + if (total_count > MAX_TX_RINGS) { + err = -EINVAL; + en_err(priv, + "Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n", + total_count, MAX_TX_RINGS); + goto out; + } err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true); if (err) goto out; @@ -1375,8 +1384,10 @@ static void mlx4_en_tx_timeout(struct net_device *dev) } priv->port_stats.tx_timeout++; - en_dbg(DRV, priv, "Scheduling watchdog\n"); - queue_work(mdev->workqueue, &priv->watchdog_task); + if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state)) { + en_dbg(DRV, priv, "Scheduling port restart\n"); + queue_work(mdev->workqueue, &priv->restart_task); + } } @@ -1730,6 +1741,7 @@ int mlx4_en_start_port(struct net_device *dev) mlx4_en_deactivate_cq(priv, cq); goto tx_err; } + clear_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &tx_ring->state); if (t != TX_XDP) { tx_ring->tx_queue = netdev_get_tx_queue(dev, i); tx_ring->recycle_ring = NULL; @@ -1826,6 +1838,7 @@ int mlx4_en_start_port(struct net_device *dev) local_bh_enable(); } + clear_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state); netif_tx_start_all_queues(dev); netif_device_attach(dev); @@ -1996,7 +2009,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) static void mlx4_en_restart(struct work_struct *work) { struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, - watchdog_task); + restart_task); struct mlx4_en_dev *mdev = priv->mdev; struct net_device *dev = priv->dev; @@ -2266,9 +2279,14 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv, bool carry_xdp_prog) { struct bpf_prog *xdp_prog; - int i, t; + int i, t, ret; - mlx4_en_copy_priv(tmp, priv, prof); + ret = mlx4_en_copy_priv(tmp, priv, prof); + if (ret) { + en_warn(priv, "%s: mlx4_en_copy_priv() failed, return\n", + __func__); + return ret; + } if (mlx4_en_alloc_resources(tmp)) { en_warn(priv, @@ -2378,7 +2396,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) if (netif_running(dev)) { mutex_lock(&mdev->state_lock); if (!mdev->device_up) { - /* NIC is probably restarting - let watchdog task reset + /* NIC is probably restarting - let restart task reset * the port */ en_dbg(DRV, priv, "Change MTU called with card down!?\n"); } else { @@ -2387,7 +2405,9 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) if (err) { en_err(priv, "Failed restarting port:%d\n", priv->port); - queue_work(mdev->workqueue, &priv->watchdog_task); + if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, + &priv->state)) + queue_work(mdev->workqueue, &priv->restart_task); } } mutex_unlock(&mdev->state_lock); @@ -2873,7 +2893,8 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog) if (err) { en_err(priv, "Failed starting port %d for XDP change\n", priv->port); - queue_work(mdev->workqueue, &priv->watchdog_task); + if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state)) + queue_work(mdev->workqueue, &priv->restart_task); } } @@ -3271,7 +3292,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev); spin_lock_init(&priv->stats_lock); INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); - INIT_WORK(&priv->watchdog_task, mlx4_en_restart); + INIT_WORK(&priv->restart_task, mlx4_en_restart); INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); @@ -3494,8 +3515,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; } - /* MTU range: 46 - hw-specific max */ - dev->min_mtu = MLX4_EN_MIN_MTU; + /* MTU range: 68 - hw-specific max */ + dev->min_mtu = ETH_MIN_MTU; dev->max_mtu = priv->max_mtu; mdev->pndev[port] = dev; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index a1aeeb8094c376f9fac9610b7a4606e92860d4fc..f509a6ce31db7594763dce20cd70a33c5a1f3fdb 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -620,6 +620,8 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, } #endif +#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN) + /* We reach this function only after checking that any of * the (IPv4 | IPv6) bits are set in cqe->status. */ @@ -627,9 +629,20 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, netdev_features_t dev_features) { __wsum hw_checksum = 0; + void *hdr; + + /* CQE csum doesn't cover padding octets in short ethernet + * frames. And the pad field is appended prior to calculating + * and appending the FCS field. + * + * Detecting these padded frames requires to verify and parse + * IP headers, so we simply force all those small frames to skip + * checksum complete. + */ + if (short_frame(skb->len)) + return -EINVAL; - void *hdr = (u8 *)va + sizeof(struct ethhdr); - + hdr = (u8 *)va + sizeof(struct ethhdr); hw_checksum = csum_unfold((__force __sum16)cqe->checksum); if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) && @@ -822,6 +835,11 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud skb_record_rx_queue(skb, cq_ring); if (likely(dev->features & NETIF_F_RXCSUM)) { + /* TODO: For IP non TCP/UDP packets when csum complete is + * not an option (not supported or any other reason) we can + * actually check cqe IPOK status bit and report + * CHECKSUM_UNNECESSARY rather than CHECKSUM_NONE + */ if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP | MLX4_CQE_STATUS_UDP)) && (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && @@ -927,6 +945,9 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) bool clean_complete = true; int done; + if (!budget) + return 0; + if (priv->tx_ring_num[TX_XDP]) { xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring]; if (xdp_tx_cq->xdp_busy) { @@ -1172,7 +1193,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp); if (err) { en_err(priv, "Failed to allocate RSS indirection QP\n"); - goto rss_err; + goto qp_alloc_err; } rss_map->indir_qp->event = mlx4_en_sqp_event; @@ -1226,6 +1247,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) MLX4_QP_STATE_RST, NULL, 0, 0, rss_map->indir_qp); mlx4_qp_remove(mdev->dev, rss_map->indir_qp); mlx4_qp_free(mdev->dev, rss_map->indir_qp); +qp_alloc_err: kfree(rss_map->indir_qp); rss_map->indir_qp = NULL; rss_err: diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 1857ee0f0871d48285a6d3711f7c3e9a1e08a05f..29041d4a3f28ea33c1389564b78bb9a6d64b27ad 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -343,7 +343,7 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv, .dma = tx_info->map0_dma, }; - if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) { + if (!napi_mode || !mlx4_en_rx_recycle(ring->recycle_ring, &frame)) { dma_unmap_page(priv->ddev, tx_info->map0_dma, PAGE_SIZE, priv->dma_dir); put_page(tx_info->page); @@ -385,6 +385,35 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) return cnt; } +static void mlx4_en_handle_err_cqe(struct mlx4_en_priv *priv, struct mlx4_err_cqe *err_cqe, + u16 cqe_index, struct mlx4_en_tx_ring *ring) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_tx_info *tx_info; + struct mlx4_en_tx_desc *tx_desc; + u16 wqe_index; + int desc_size; + + en_err(priv, "CQE error - cqn 0x%x, ci 0x%x, vendor syndrome: 0x%x syndrome: 0x%x\n", + ring->sp_cqn, cqe_index, err_cqe->vendor_err_syndrome, err_cqe->syndrome); + print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, err_cqe, sizeof(*err_cqe), + false); + + wqe_index = be16_to_cpu(err_cqe->wqe_index) & ring->size_mask; + tx_info = &ring->tx_info[wqe_index]; + desc_size = tx_info->nr_txbb << LOG_TXBB_SIZE; + en_err(priv, "Related WQE - qpn 0x%x, wqe index 0x%x, wqe size 0x%x\n", ring->qpn, + wqe_index, desc_size); + tx_desc = ring->buf + (wqe_index << LOG_TXBB_SIZE); + print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, tx_desc, desc_size, false); + + if (test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state)) + return; + + en_err(priv, "Scheduling port restart\n"); + queue_work(mdev->workqueue, &priv->restart_task); +} + bool mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int napi_budget) { @@ -431,13 +460,10 @@ bool mlx4_en_process_tx_cq(struct net_device *dev, dma_rmb(); if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == - MLX4_CQE_OPCODE_ERROR)) { - struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe; - - en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n", - cqe_err->vendor_err_syndrome, - cqe_err->syndrome); - } + MLX4_CQE_OPCODE_ERROR)) + if (!test_and_set_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &ring->state)) + mlx4_en_handle_err_cqe(priv, (struct mlx4_err_cqe *)cqe, index, + ring); /* Skip over last polled CQE */ new_index = be16_to_cpu(cqe->wqe_index) & size_mask; diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index babcfd9c0571fc6ffac47bd222304b62c65824c1..04ebce738db91e9877ab798ab7d8faeee68d296f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -2064,9 +2064,11 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, { struct mlx4_cmd_mailbox *mailbox; __be32 *outbox; + u64 qword_field; u32 dword_field; - int err; + u16 word_field; u8 byte_field; + int err; static const u8 a0_dmfs_query_hw_steering[] = { [0] = MLX4_STEERING_DMFS_A0_DEFAULT, [1] = MLX4_STEERING_DMFS_A0_DYNAMIC, @@ -2094,19 +2096,32 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, /* QPC/EEC/CQC/EQC/RDMARC attributes */ - MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET); - MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET); - MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET); - MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET); - MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET); - MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET); - MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET); - MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET); - MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET); - MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET); - MLX4_GET(param->num_sys_eqs, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET); - MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET); - MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET); + MLX4_GET(qword_field, outbox, INIT_HCA_QPC_BASE_OFFSET); + param->qpc_base = qword_field & ~((u64)0x1f); + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_QP_OFFSET); + param->log_num_qps = byte_field & 0x1f; + MLX4_GET(qword_field, outbox, INIT_HCA_SRQC_BASE_OFFSET); + param->srqc_base = qword_field & ~((u64)0x1f); + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_SRQ_OFFSET); + param->log_num_srqs = byte_field & 0x1f; + MLX4_GET(qword_field, outbox, INIT_HCA_CQC_BASE_OFFSET); + param->cqc_base = qword_field & ~((u64)0x1f); + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_CQ_OFFSET); + param->log_num_cqs = byte_field & 0x1f; + MLX4_GET(qword_field, outbox, INIT_HCA_ALTC_BASE_OFFSET); + param->altc_base = qword_field; + MLX4_GET(qword_field, outbox, INIT_HCA_AUXC_BASE_OFFSET); + param->auxc_base = qword_field; + MLX4_GET(qword_field, outbox, INIT_HCA_EQC_BASE_OFFSET); + param->eqc_base = qword_field & ~((u64)0x1f); + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_EQ_OFFSET); + param->log_num_eqs = byte_field & 0x1f; + MLX4_GET(word_field, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET); + param->num_sys_eqs = word_field & 0xfff; + MLX4_GET(qword_field, outbox, INIT_HCA_RDMARC_BASE_OFFSET); + param->rdmarc_base = qword_field & ~((u64)0x1f); + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_RD_OFFSET); + param->log_rd_per_qp = byte_field & 0x7; MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET); if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) { @@ -2125,22 +2140,21 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, /* steering attributes */ if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET); - MLX4_GET(param->log_mc_entry_sz, outbox, - INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); - MLX4_GET(param->log_mc_table_sz, outbox, - INIT_HCA_FS_LOG_TABLE_SZ_OFFSET); - MLX4_GET(byte_field, outbox, - INIT_HCA_FS_A0_OFFSET); + MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); + param->log_mc_entry_sz = byte_field & 0x1f; + MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_TABLE_SZ_OFFSET); + param->log_mc_table_sz = byte_field & 0x1f; + MLX4_GET(byte_field, outbox, INIT_HCA_FS_A0_OFFSET); param->dmfs_high_steer_mode = a0_dmfs_query_hw_steering[(byte_field >> 6) & 3]; } else { MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET); - MLX4_GET(param->log_mc_entry_sz, outbox, - INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); - MLX4_GET(param->log_mc_hash_sz, outbox, - INIT_HCA_LOG_MC_HASH_SZ_OFFSET); - MLX4_GET(param->log_mc_table_sz, outbox, - INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); + param->log_mc_entry_sz = byte_field & 0x1f; + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_HASH_SZ_OFFSET); + param->log_mc_hash_sz = byte_field & 0x1f; + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); + param->log_mc_table_sz = byte_field & 0x1f; } /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */ @@ -2164,15 +2178,18 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, /* TPT attributes */ MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET); - MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET); - MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET); + MLX4_GET(byte_field, outbox, INIT_HCA_TPT_MW_OFFSET); + param->mw_enabled = byte_field >> 7; + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET); + param->log_mpt_sz = byte_field & 0x3f; MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET); MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET); /* UAR attributes */ MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET); - MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET); + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET); + param->log_uar_sz = byte_field & 0xf; /* phv_check enable */ MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET); @@ -2714,7 +2731,7 @@ void mlx4_opreq_action(struct work_struct *work) if (err) { mlx4_err(dev, "Failed to retrieve required operation: %d\n", err); - return; + goto out; } MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET); MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET); diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c index 7262c6310650e0516bb8dbe08c8dd6038814f503..288fca826a55c41251a8f8d60fca3b3dfc90748b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.c +++ b/drivers/net/ethernet/mellanox/mlx4/icm.c @@ -57,12 +57,12 @@ static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chu int i; if (chunk->nsg > 0) - pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages, + pci_unmap_sg(dev->persist->pdev, chunk->sg, chunk->npages, PCI_DMA_BIDIRECTIONAL); for (i = 0; i < chunk->npages; ++i) - __free_pages(sg_page(&chunk->mem[i]), - get_order(chunk->mem[i].length)); + __free_pages(sg_page(&chunk->sg[i]), + get_order(chunk->sg[i].length)); } static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) @@ -71,9 +71,9 @@ static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk * for (i = 0; i < chunk->npages; ++i) dma_free_coherent(&dev->persist->pdev->dev, - chunk->mem[i].length, - lowmem_page_address(sg_page(&chunk->mem[i])), - sg_dma_address(&chunk->mem[i])); + chunk->buf[i].size, + chunk->buf[i].addr, + chunk->buf[i].dma_addr); } void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent) @@ -111,22 +111,21 @@ static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, return 0; } -static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem, - int order, gfp_t gfp_mask) +static int mlx4_alloc_icm_coherent(struct device *dev, struct mlx4_icm_buf *buf, + int order, gfp_t gfp_mask) { - void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order, - &sg_dma_address(mem), gfp_mask); - if (!buf) + buf->addr = dma_alloc_coherent(dev, PAGE_SIZE << order, + &buf->dma_addr, gfp_mask); + if (!buf->addr) return -ENOMEM; - if (offset_in_page(buf)) { - dma_free_coherent(dev, PAGE_SIZE << order, - buf, sg_dma_address(mem)); + if (offset_in_page(buf->addr)) { + dma_free_coherent(dev, PAGE_SIZE << order, buf->addr, + buf->dma_addr); return -ENOMEM; } - sg_set_buf(mem, buf, PAGE_SIZE << order); - sg_dma_len(mem) = PAGE_SIZE << order; + buf->size = PAGE_SIZE << order; return 0; } @@ -159,21 +158,21 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, while (npages > 0) { if (!chunk) { - chunk = kmalloc_node(sizeof(*chunk), + chunk = kzalloc_node(sizeof(*chunk), gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN), dev->numa_node); if (!chunk) { - chunk = kmalloc(sizeof(*chunk), + chunk = kzalloc(sizeof(*chunk), gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); if (!chunk) goto fail; } + chunk->coherent = coherent; - sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN); - chunk->npages = 0; - chunk->nsg = 0; + if (!coherent) + sg_init_table(chunk->sg, MLX4_ICM_CHUNK_LEN); list_add_tail(&chunk->list, &icm->chunk_list); } @@ -186,10 +185,10 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, if (coherent) ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev, - &chunk->mem[chunk->npages], - cur_order, mask); + &chunk->buf[chunk->npages], + cur_order, mask); else - ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], + ret = mlx4_alloc_icm_pages(&chunk->sg[chunk->npages], cur_order, mask, dev->numa_node); @@ -205,7 +204,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, if (coherent) ++chunk->nsg; else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { - chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem, + chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->sg, chunk->npages, PCI_DMA_BIDIRECTIONAL); @@ -220,7 +219,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, } if (!coherent && chunk) { - chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem, + chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->sg, chunk->npages, PCI_DMA_BIDIRECTIONAL); @@ -320,7 +319,7 @@ void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj, u64 idx; struct mlx4_icm_chunk *chunk; struct mlx4_icm *icm; - struct page *page = NULL; + void *addr = NULL; if (!table->lowmem) return NULL; @@ -336,28 +335,49 @@ void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj, list_for_each_entry(chunk, &icm->chunk_list, list) { for (i = 0; i < chunk->npages; ++i) { + dma_addr_t dma_addr; + size_t len; + + if (table->coherent) { + len = chunk->buf[i].size; + dma_addr = chunk->buf[i].dma_addr; + addr = chunk->buf[i].addr; + } else { + struct page *page; + + len = sg_dma_len(&chunk->sg[i]); + dma_addr = sg_dma_address(&chunk->sg[i]); + + /* XXX: we should never do this for highmem + * allocation. This function either needs + * to be split, or the kernel virtual address + * return needs to be made optional. + */ + page = sg_page(&chunk->sg[i]); + addr = lowmem_page_address(page); + } + if (dma_handle && dma_offset >= 0) { - if (sg_dma_len(&chunk->mem[i]) > dma_offset) - *dma_handle = sg_dma_address(&chunk->mem[i]) + - dma_offset; - dma_offset -= sg_dma_len(&chunk->mem[i]); + if (len > dma_offset) + *dma_handle = dma_addr + dma_offset; + dma_offset -= len; } + /* * DMA mapping can merge pages but not split them, * so if we found the page, dma_handle has already * been assigned to. */ - if (chunk->mem[i].length > offset) { - page = sg_page(&chunk->mem[i]); + if (len > offset) goto out; - } - offset -= chunk->mem[i].length; + offset -= len; } } + addr = NULL; out: mutex_unlock(&table->mutex); - return page ? lowmem_page_address(page) + offset : NULL; + return addr ? addr + offset : NULL; } int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h index c9169a490557cc28a865ef98cbe0676fd3999d33..d199874b1c074cafd7a740e6806761c279e476cd 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.h +++ b/drivers/net/ethernet/mellanox/mlx4/icm.h @@ -47,11 +47,21 @@ enum { MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT, }; +struct mlx4_icm_buf { + void *addr; + size_t size; + dma_addr_t dma_addr; +}; + struct mlx4_icm_chunk { struct list_head list; int npages; int nsg; - struct scatterlist mem[MLX4_ICM_CHUNK_LEN]; + bool coherent; + union { + struct scatterlist sg[MLX4_ICM_CHUNK_LEN]; + struct mlx4_icm_buf buf[MLX4_ICM_CHUNK_LEN]; + }; }; struct mlx4_icm { @@ -114,12 +124,18 @@ static inline void mlx4_icm_next(struct mlx4_icm_iter *iter) static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter) { - return sg_dma_address(&iter->chunk->mem[iter->page_idx]); + if (iter->chunk->coherent) + return iter->chunk->buf[iter->page_idx].dma_addr; + else + return sg_dma_address(&iter->chunk->sg[iter->page_idx]); } static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter) { - return sg_dma_len(&iter->chunk->mem[iter->page_idx]); + if (iter->chunk->coherent) + return iter->chunk->buf[iter->page_idx].size; + else + return sg_dma_len(&iter->chunk->sg[iter->page_idx]); } int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 6a046030e8734a8542ff8ea67560f980d9ffb26c..8d7bb9a8896770ea1e7c32a80bf3b292f5da12c0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -313,7 +313,7 @@ int mlx4_check_port_params(struct mlx4_dev *dev, for (i = 0; i < dev->caps.num_ports - 1; i++) { if (port_type[i] != port_type[i + 1]) { mlx4_err(dev, "Only same port types supported on this HCA, aborting\n"); - return -EINVAL; + return -EOPNOTSUPP; } } } @@ -322,7 +322,7 @@ int mlx4_check_port_params(struct mlx4_dev *dev, if (!(port_type[i] & dev->caps.supported_type[i+1])) { mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n", i + 1); - return -EINVAL; + return -EOPNOTSUPP; } } return 0; @@ -1188,8 +1188,7 @@ static int __set_port_type(struct mlx4_port_info *info, mlx4_err(mdev, "Requested port type for port %d is not supported on this HCA\n", info->port); - err = -EINVAL; - goto err_sup; + return -EOPNOTSUPP; } mlx4_stop_sense(mdev); @@ -1211,7 +1210,7 @@ static int __set_port_type(struct mlx4_port_info *info, for (i = 1; i <= mdev->caps.num_ports; i++) { if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) { mdev->caps.possible_type[i] = mdev->caps.port_type[i]; - err = -EINVAL; + err = -EOPNOTSUPP; } } } @@ -1237,7 +1236,7 @@ static int __set_port_type(struct mlx4_port_info *info, out: mlx4_start_sense(mdev); mutex_unlock(&priv->port_mutex); -err_sup: + return err; } @@ -2540,6 +2539,7 @@ static int mlx4_allocate_default_counters(struct mlx4_dev *dev) if (!err || err == -ENOSPC) { priv->def_counter[port] = idx; + err = 0; } else if (err == -ENOENT) { err = 0; continue; @@ -2590,7 +2590,8 @@ int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx, u8 usage) MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); if (!err) *idx = get_param_l(&out_param); - + if (WARN_ON(err == -ENOSPC)) + err = -EINVAL; return err; } return __mlx4_counter_alloc(dev, idx); @@ -4310,12 +4311,14 @@ static void mlx4_pci_resume(struct pci_dev *pdev) static void mlx4_shutdown(struct pci_dev *pdev) { struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); + struct mlx4_dev *dev = persist->dev; mlx4_info(persist->dev, "mlx4_shutdown was called\n"); mutex_lock(&persist->interface_state_mutex); if (persist->interface_state & MLX4_INTERFACE_STATE_UP) mlx4_unload_one(pdev); mutex_unlock(&persist->interface_state_mutex); + mlx4_pci_disable_device(dev); } static const struct pci_error_handlers mlx4_err_handler = { diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index ffed2d4c9403022df2f73859e33fa5f2158fe4f2..9c481823b3e86f052e09dcb6da5d51ede00131aa 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -1492,7 +1492,7 @@ int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, rule.port = port; rule.qpn = qpn; INIT_LIST_HEAD(&rule.list); - mlx4_err(dev, "going promisc on %x\n", port); + mlx4_info(dev, "going promisc on %x\n", port); return mlx4_flow_attach(dev, &rule, regid_p); } diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index ebcd2778eeb3e1f22524064ff2db7762e1f852ba..23f1b5b512c2198cb664167e42fb91ff9c549f13 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -540,8 +540,8 @@ struct slave_list { struct resource_allocator { spinlock_t alloc_lock; /* protect quotas */ union { - int res_reserved; - int res_port_rsvd[MLX4_MAX_PORTS]; + unsigned int res_reserved; + unsigned int res_port_rsvd[MLX4_MAX_PORTS]; }; union { int res_free; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index c3228b89df463597de1cb546754ea1b8aa4d876d..1a57ea9a7ea592b47dd4b3c082bdbdd8d3031299 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -161,7 +161,6 @@ #define MLX4_SELFTEST_LB_MIN_MTU (MLX4_LOOPBACK_TEST_PAYLOAD + NET_IP_ALIGN + \ ETH_HLEN + PREAMBLE_LEN) -#define MLX4_EN_MIN_MTU 46 /* VLAN_HLEN is added twice,to support skb vlan tagged with multiple * headers. (For example: ETH_P_8021Q and ETH_P_8021AD). */ @@ -272,6 +271,10 @@ struct mlx4_en_page_cache { } buf[MLX4_EN_CACHE_SIZE]; }; +enum { + MLX4_EN_TX_RING_STATE_RECOVERING, +}; + struct mlx4_en_priv; struct mlx4_en_tx_ring { @@ -318,6 +321,7 @@ struct mlx4_en_tx_ring { * Only queue_stopped might be used if BQL is not properly working. */ unsigned long queue_stopped; + unsigned long state; struct mlx4_hwq_resources sp_wqres; struct mlx4_qp sp_qp; struct mlx4_qp_context sp_context; @@ -531,6 +535,10 @@ struct mlx4_en_stats_bitmap { struct mutex mutex; /* for mutual access to stats bitmap */ }; +enum { + MLX4_EN_STATE_FLAG_RESTARTING, +}; + struct mlx4_en_priv { struct mlx4_en_dev *mdev; struct mlx4_en_port_profile *prof; @@ -596,7 +604,7 @@ struct mlx4_en_priv { struct mlx4_en_cq *rx_cq[MAX_RX_RINGS]; struct mlx4_qp drop_qp; struct work_struct rx_mode_task; - struct work_struct watchdog_task; + struct work_struct restart_task; struct work_struct linkstate_task; struct delayed_work stats_task; struct delayed_work service_task; @@ -644,6 +652,7 @@ struct mlx4_en_priv { u32 pflags; u8 rss_key[MLX4_EN_RSS_KEY_SIZE]; u8 rss_hash_fn; + unsigned long state; }; enum mlx4_en_wol { diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 2e84f10f59ba9ca0a69d980b87368f7310e5bb13..cfa0bba3940fb7c57f473053189b5077763fd4c2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -114,7 +114,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) goto err_out; for (i = 0; i <= buddy->max_order; ++i) { - s = BITS_TO_LONGS(1 << (buddy->max_order - i)); + s = BITS_TO_LONGS(1UL << (buddy->max_order - i)); buddy->bits[i] = kvmalloc_array(s, sizeof(long), GFP_KERNEL | __GFP_ZERO); if (!buddy->bits[i]) goto err_out_free; @@ -363,6 +363,7 @@ int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, container_of((void *)mpt_entry, struct mlx4_cmd_mailbox, buf); + (*mpt_entry)->lkey = 0; err = mlx4_SW2HW_MPT(dev, mailbox, key); } diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 10fcc22f45908b6a2f21ef25998df4f711b89bc3..ba6ac31a339dc3c6c84ff30eea9d8126ab2279b9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -2077,11 +2077,6 @@ int mlx4_get_module_info(struct mlx4_dev *dev, u8 port, size -= offset + size - I2C_PAGE_SIZE; i2c_addr = I2C_ADDR_LOW; - if (offset >= I2C_PAGE_SIZE) { - /* Reset offset to high page */ - i2c_addr = I2C_ADDR_HIGH; - offset -= I2C_PAGE_SIZE; - } cable_info = (struct mlx4_cable_info *)inmad->data; cable_info->dev_mem_address = cpu_to_be16(offset); diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 31bd56727022fe7a3bacc3a09e11691f79f1a974..a4c1ed65f620c035c1f26340ed29c4f5b9bc5708 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -471,12 +471,31 @@ void mlx4_init_quotas(struct mlx4_dev *dev) priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf]; } -static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev) +static int +mlx4_calc_res_counter_guaranteed(struct mlx4_dev *dev, + struct resource_allocator *res_alloc, + int vf) { - /* reduce the sink counter */ - return (dev->caps.max_counters - 1 - - (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS)) - / MLX4_MAX_PORTS; + struct mlx4_active_ports actv_ports; + int ports, counters_guaranteed; + + /* For master, only allocate according to the number of phys ports */ + if (vf == mlx4_master_func_num(dev)) + return MLX4_PF_COUNTERS_PER_PORT * dev->caps.num_ports; + + /* calculate real number of ports for the VF */ + actv_ports = mlx4_get_active_ports(dev, vf); + ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports); + counters_guaranteed = ports * MLX4_VF_COUNTERS_PER_PORT; + + /* If we do not have enough counters for this VF, do not + * allocate any for it. '-1' to reduce the sink counter. + */ + if ((res_alloc->res_reserved + counters_guaranteed) > + (dev->caps.max_counters - 1)) + return 0; + + return counters_guaranteed; } int mlx4_init_resource_tracker(struct mlx4_dev *dev) @@ -484,7 +503,6 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev) struct mlx4_priv *priv = mlx4_priv(dev); int i, j; int t; - int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev); priv->mfunc.master.res_tracker.slave_list = kcalloc(dev->num_slaves, sizeof(struct slave_list), @@ -603,16 +621,8 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev) break; case RES_COUNTER: res_alloc->quota[t] = dev->caps.max_counters; - if (t == mlx4_master_func_num(dev)) - res_alloc->guaranteed[t] = - MLX4_PF_COUNTERS_PER_PORT * - MLX4_MAX_PORTS; - else if (t <= max_vfs_guarantee_counter) - res_alloc->guaranteed[t] = - MLX4_VF_COUNTERS_PER_PORT * - MLX4_MAX_PORTS; - else - res_alloc->guaranteed[t] = 0; + res_alloc->guaranteed[t] = + mlx4_calc_res_counter_guaranteed(dev, res_alloc, t); break; default: break; @@ -2719,13 +2729,13 @@ static int qp_get_mtt_size(struct mlx4_qp_context *qpc) int total_pages; int total_mem; int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f; + int tot; sq_size = 1 << (log_sq_size + log_sq_sride + 4); rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4)); total_mem = sq_size + rq_size; - total_pages = - roundup_pow_of_two((total_mem + (page_offset << 6)) >> - page_shift); + tot = (total_mem + (page_offset << 6)) >> page_shift; + total_pages = !tot ? 1 : roundup_pow_of_two(tot); return total_pages; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 37a551436e4ab7a5ac36752dd237ef9e6d931ad8..b7e3b8902e7e540627d247a349cc998bc3b88717 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -8,6 +8,7 @@ config MLX5_CORE depends on PCI imply PTP_1588_CLOCK imply VXLAN + imply MLXFW default n ---help--- Core driver for low level functionality of the ConnectX-4 and diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index a53736c26c0cec416b119878356e2beffe17b37e..a686082762df7e03142bfef30619f0c33253b2ab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -835,6 +835,7 @@ static void cmd_work_handler(struct work_struct *work) int alloc_ret; int cmd_mode; + complete(&ent->handling); sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; down(sem); if (!ent->page_queue) { @@ -862,7 +863,6 @@ static void cmd_work_handler(struct work_struct *work) } cmd->ent_arr[ent->idx] = ent; - set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state); lay = get_inst(cmd, ent->idx); ent->lay = lay; memset(lay, 0, sizeof(*lay)); @@ -884,6 +884,7 @@ static void cmd_work_handler(struct work_struct *work) if (ent->callback) schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); + set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state); /* Skip sending command to fw if internal error */ if (pci_channel_offline(dev->pdev) || @@ -896,6 +897,10 @@ static void cmd_work_handler(struct work_struct *work) MLX5_SET(mbox_out, ent->out, syndrome, drv_synd); mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); + /* no doorbell, no need to keep the entry */ + free_ent(cmd, ent->idx); + if (ent->callback) + free_cmd(ent); return; } @@ -949,6 +954,11 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) struct mlx5_cmd *cmd = &dev->cmd; int err; + if (!wait_for_completion_timeout(&ent->handling, timeout) && + cancel_work_sync(&ent->work)) { + ent->ret = -ECANCELED; + goto out_err; + } if (cmd->mode == CMD_MODE_POLLING || ent->polling) { wait_for_completion(&ent->done); } else if (!wait_for_completion_timeout(&ent->done, timeout)) { @@ -956,12 +966,17 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); } +out_err: err = ent->ret; if (err == -ETIMEDOUT) { mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); + } else if (err == -ECANCELED) { + mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n", + mlx5_command_str(msg_to_opcode(ent->in)), + msg_to_opcode(ent->in)); } mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", err, deliv_status_to_str(ent->status), ent->status); @@ -997,6 +1012,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, ent->token = token; ent->polling = force_polling; + init_completion(&ent->handling); if (!callback) init_completion(&ent->done); @@ -1016,6 +1032,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, err = wait_func(dev, ent); if (err == -ETIMEDOUT) goto out; + if (err == -ECANCELED) + goto out_free; ds = ent->ts2 - ent->ts1; op = MLX5_GET(mbox_in, in->first.data, opcode); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c index a4179122a2796bafc87a429e7ad54abbfc347ed9..65ff00bcc2fb6e41dd31b01bae7240c9a950b740 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -155,6 +155,7 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0}; int err; + mlx5_debug_cq_remove(dev, cq); err = mlx5_eq_del_cq(&dev->priv.eq_table.async_eq, cq); if (err) return err; @@ -171,7 +172,6 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) synchronize_irq(cq->irqn); - mlx5_debug_cq_remove(dev, cq); mlx5_cq_put(cq); wait_for_completion(&cq->free); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c index 90fabd612b6cd84f1420afa151cc6c3b0103acfb..20c36bb9024cdb999d4fb0bc01fc2767398d5b1b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -588,6 +588,8 @@ void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) if (!mlx5_debugfs_root) return; - if (cq->dbg) + if (cq->dbg) { rem_res_tree(cq->dbg); + cq->dbg = NULL; + } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index 37ba7c78859db17aa7ecfa76648ca54adb71790b..3692d6a1cce8d6acebda01fa84bf0ce8d5a1fcb4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -307,7 +307,7 @@ void mlx5_unregister_device(struct mlx5_core_dev *dev) struct mlx5_interface *intf; mutex_lock(&mlx5_intf_mutex); - list_for_each_entry(intf, &intf_list, list) + list_for_each_entry_reverse(intf, &intf_list, list) mlx5_remove_device(intf, priv); list_del(&priv->dev_list); mutex_unlock(&mlx5_intf_mutex); @@ -342,11 +342,32 @@ void mlx5_unregister_interface(struct mlx5_interface *intf) } EXPORT_SYMBOL(mlx5_unregister_interface); +/* Must be called with intf_mutex held */ +static bool mlx5_has_added_dev_by_protocol(struct mlx5_core_dev *mdev, int protocol) +{ + struct mlx5_device_context *dev_ctx; + struct mlx5_interface *intf; + bool found = false; + + list_for_each_entry(intf, &intf_list, list) { + if (intf->protocol == protocol) { + dev_ctx = mlx5_get_device(intf, &mdev->priv); + if (dev_ctx && test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state)) + found = true; + break; + } + } + + return found; +} + void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol) { mutex_lock(&mlx5_intf_mutex); - mlx5_remove_dev_by_protocol(mdev, protocol); - mlx5_add_dev_by_protocol(mdev, protocol); + if (mlx5_has_added_dev_by_protocol(mdev, protocol)) { + mlx5_remove_dev_by_protocol(mdev, protocol); + mlx5_add_dev_by_protocol(mdev, protocol); + } mutex_unlock(&mlx5_intf_mutex); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index d4ec93bde4dedbaeca4bb5976c705a3ea6b83f82..2266c09b741a26b134858ea9c56c86234bf190c9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -796,7 +796,7 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev) return NULL; } - tracer = kzalloc(sizeof(*tracer), GFP_KERNEL); + tracer = kvzalloc(sizeof(*tracer), GFP_KERNEL); if (!tracer) return ERR_PTR(-ENOMEM); @@ -842,7 +842,7 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev) tracer->dev = NULL; destroy_workqueue(tracer->work_queue); free_tracer: - kfree(tracer); + kvfree(tracer); return ERR_PTR(err); } @@ -919,7 +919,7 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer) mlx5_fw_tracer_destroy_log_buf(tracer); flush_workqueue(tracer->work_queue); destroy_workqueue(tracer->work_queue); - kfree(tracer); + kvfree(tracer); } void mlx5_fw_tracer_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 0f189f87385923226966a57537b7fa643e540a98..153b018f14252f0ad185dc51a68f9a1deaf73df3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -184,7 +184,7 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) struct mlx5e_tx_wqe { struct mlx5_wqe_ctrl_seg ctrl; struct mlx5_wqe_eth_seg eth; - struct mlx5_wqe_data_seg data[0]; + struct mlx5_wqe_data_seg data[]; }; struct mlx5e_rx_wqe_ll { @@ -200,7 +200,7 @@ struct mlx5e_umr_wqe { struct mlx5_wqe_ctrl_seg ctrl; struct mlx5_wqe_umr_ctrl_seg uctrl; struct mlx5_mkey_seg mkc; - struct mlx5_mtt inline_mtts[0]; + struct mlx5_mtt inline_mtts[]; }; extern const char mlx5e_self_tests[][ETH_GSTRING_LEN]; @@ -210,6 +210,7 @@ static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = { "tx_cqe_moder", "rx_cqe_compress", "rx_striding_rq", + "rx_no_csum_complete", }; enum mlx5e_priv_flag { @@ -217,6 +218,7 @@ enum mlx5e_priv_flag { MLX5E_PFLAG_TX_CQE_BASED_MODER = (1 << 1), MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 2), MLX5E_PFLAG_RX_STRIDING_RQ = (1 << 3), + MLX5E_PFLAG_RX_NO_CSUM_COMPLETE = (1 << 4), }; #define MLX5E_SET_PFLAG(params, pflag, enable) \ @@ -298,6 +300,7 @@ struct mlx5e_dcbx_dp { enum { MLX5E_RQ_STATE_ENABLED, MLX5E_RQ_STATE_AM, + MLX5E_RQ_STATE_NO_CSUM_COMPLETE, }; struct mlx5e_cq { @@ -511,7 +514,7 @@ typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq); typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16); enum mlx5e_rq_flag { - MLX5E_RQ_FLAG_XDP_XMIT = BIT(0), + MLX5E_RQ_FLAG_XDP_XMIT, }; struct mlx5e_rq_frag_info { @@ -566,6 +569,7 @@ struct mlx5e_rq { unsigned long state; int ix; + unsigned int hw_mtu; struct net_dim dim; /* Dynamic Interrupt Moderation */ @@ -632,6 +636,7 @@ enum { MLX5E_STATE_ASYNC_EVENTS_ENABLED, MLX5E_STATE_OPENED, MLX5E_STATE_DESTROYING, + MLX5E_STATE_XDP_TX_ENABLED, }; struct mlx5e_rqt { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index 1431232c9a09ef1ddecdf1e497142f82544e4de0..5fc903fa9573b12773d4fe3e0f6719c64bb633e6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -149,6 +149,7 @@ struct mlx5e_arfs_tables { struct list_head rules; int last_filter_id; struct workqueue_struct *wq; + unsigned long state; }; int mlx5e_arfs_create_tables(struct mlx5e_priv *priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c index 24e3b564964ffecf6a37e16898e25af57a1319be..12e1682f940b8866ca32d66b4b44a86e953f251f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c @@ -88,10 +88,8 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); *speed = mlx5e_port_ptys2speed(eth_proto_oper); - if (!(*speed)) { - mlx5_core_warn(mdev, "cannot get port speed\n"); + if (!(*speed)) err = -EINVAL; - } return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c index c047da8752daa80bf856373037ead5ba26adbefc..28d56e44ed9d81c308cbeb6967cf34dab451a713 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c @@ -122,7 +122,9 @@ static int port_set_buffer(struct mlx5e_priv *priv, return err; } -/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) */ +/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) + * minimum speed value is 40Gbps + */ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) { u32 speed; @@ -131,7 +133,8 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) err = mlx5e_port_linkspeed(priv->mdev, &speed); if (err) - return 0; + speed = SPEED_40000; + speed = max_t(u32, speed, SPEED_40000); xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100; @@ -140,7 +143,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) } static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, - u32 xoff, unsigned int mtu) + u32 xoff, unsigned int max_mtu) { int i; @@ -152,11 +155,15 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, } if (port_buffer->buffer[i].size < - (xoff + mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) + (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) { + pr_err("buffer_size[%d]=%d is not enough for lossless buffer\n", + i, port_buffer->buffer[i].size); return -ENOMEM; + } port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff; - port_buffer->buffer[i].xon = port_buffer->buffer[i].xoff - mtu; + port_buffer->buffer[i].xon = + port_buffer->buffer[i].xoff - max_mtu; } return 0; @@ -164,7 +171,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, /** * update_buffer_lossy() - * mtu: device's MTU + * max_mtu: netdev's max_mtu * pfc_en: current pfc configuration * buffer: current prio to buffer mapping * xoff: xoff value @@ -181,7 +188,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, * Return 0 if no error. * Set change to true if buffer configuration is modified. */ -static int update_buffer_lossy(unsigned int mtu, +static int update_buffer_lossy(unsigned int max_mtu, u8 pfc_en, u8 *buffer, u32 xoff, struct mlx5e_port_buffer *port_buffer, bool *change) @@ -218,7 +225,7 @@ static int update_buffer_lossy(unsigned int mtu, } if (changed) { - err = update_xoff_threshold(port_buffer, xoff, mtu); + err = update_xoff_threshold(port_buffer, xoff, max_mtu); if (err) return err; @@ -228,6 +235,27 @@ static int update_buffer_lossy(unsigned int mtu, return 0; } +static int fill_pfc_en(struct mlx5_core_dev *mdev, u8 *pfc_en) +{ + u32 g_rx_pause, g_tx_pause; + int err; + + err = mlx5_query_port_pause(mdev, &g_rx_pause, &g_tx_pause); + if (err) + return err; + + /* If global pause enabled, set all active buffers to lossless. + * Otherwise, check PFC setting. + */ + if (g_rx_pause || g_tx_pause) + *pfc_en = 0xff; + else + err = mlx5_query_port_pfc(mdev, pfc_en, NULL); + + return err; +} + +#define MINIMUM_MAX_MTU 9216 int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, u32 change, unsigned int mtu, struct ieee_pfc *pfc, @@ -239,12 +267,14 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, bool update_prio2buffer = false; u8 buffer[MLX5E_MAX_PRIORITY]; bool update_buffer = false; + unsigned int max_mtu; u32 total_used = 0; u8 curr_pfc_en; int err; int i; mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change); + max_mtu = max_t(unsigned int, priv->netdev->max_mtu, MINIMUM_MAX_MTU); err = mlx5e_port_query_buffer(priv, &port_buffer); if (err) @@ -252,7 +282,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, if (change & MLX5E_PORT_BUFFER_CABLE_LEN) { update_buffer = true; - err = update_xoff_threshold(&port_buffer, xoff, mtu); + err = update_xoff_threshold(&port_buffer, xoff, max_mtu); if (err) return err; } @@ -262,7 +292,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, if (err) return err; - err = update_buffer_lossy(mtu, pfc->pfc_en, buffer, xoff, + err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff, &port_buffer, &update_buffer); if (err) return err; @@ -270,12 +300,12 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, if (change & MLX5E_PORT_BUFFER_PRIO2BUFFER) { update_prio2buffer = true; - err = mlx5_query_port_pfc(priv->mdev, &curr_pfc_en, NULL); + err = fill_pfc_en(priv->mdev, &curr_pfc_en); if (err) return err; - err = update_buffer_lossy(mtu, curr_pfc_en, prio2buffer, xoff, - &port_buffer, &update_buffer); + err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, + xoff, &port_buffer, &update_buffer); if (err) return err; } @@ -299,7 +329,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, return -EINVAL; update_buffer = true; - err = update_xoff_threshold(&port_buffer, xoff, mtu); + err = update_xoff_threshold(&port_buffer, xoff, max_mtu); if (err) return err; } @@ -307,7 +337,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, /* Need to update buffer configuration if xoff value is changed */ if (!update_buffer && xoff != priv->dcbx.xoff) { update_buffer = true; - err = update_xoff_threshold(&port_buffer, xoff, mtu); + err = update_xoff_threshold(&port_buffer, xoff, max_mtu); if (err) return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index ad6d471d00dd47c6c4b802ad79fd6dd1e157cf21..12f3787b3048f5211f6a15e2d43f4098188effdb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -33,6 +33,26 @@ #include #include "en/xdp.h" +int mlx5e_xdp_max_mtu(struct mlx5e_params *params) +{ + int hr = NET_IP_ALIGN + XDP_PACKET_HEADROOM; + + /* Let S := SKB_DATA_ALIGN(sizeof(struct skb_shared_info)). + * The condition checked in mlx5e_rx_is_linear_skb is: + * SKB_DATA_ALIGN(sw_mtu + hard_mtu + hr) + S <= PAGE_SIZE (1) + * (Note that hw_mtu == sw_mtu + hard_mtu.) + * What is returned from this function is: + * max_mtu = PAGE_SIZE - S - hr - hard_mtu (2) + * After assigning sw_mtu := max_mtu, the left side of (1) turns to + * SKB_DATA_ALIGN(PAGE_SIZE - S) + S, which is equal to PAGE_SIZE, + * because both PAGE_SIZE and S are already aligned. Any number greater + * than max_mtu would make the left side of (1) greater than PAGE_SIZE, + * so max_mtu is the maximum MTU allowed. + */ + + return MLX5E_HW2SW_MTU(params, SKB_MAX_HEAD(hr)); +} + static inline bool mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_dma_info *di, struct xdp_buff *xdp) @@ -139,8 +159,10 @@ bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi) /* copy the inline part if required */ if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) { - memcpy(eseg->inline_hdr.start, xdpf->data, MLX5E_XDP_MIN_INLINE); + memcpy(eseg->inline_hdr.start, xdpf->data, sizeof(eseg->inline_hdr.start)); eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE); + memcpy(dseg, xdpf->data + sizeof(eseg->inline_hdr.start), + MLX5E_XDP_MIN_INLINE - sizeof(eseg->inline_hdr.start)); dma_len -= MLX5E_XDP_MIN_INLINE; dma_addr += MLX5E_XDP_MIN_INLINE; dseg++; @@ -207,9 +229,9 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) sqcc++; if (is_redirect) { - xdp_return_frame(xdpi->xdpf); dma_unmap_single(sq->pdev, xdpi->dma_addr, xdpi->xdpf->len, DMA_TO_DEVICE); + xdp_return_frame(xdpi->xdpf); } else { /* Recycle RX page */ mlx5e_page_release(rq, &xdpi->di, true); @@ -243,9 +265,9 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq) sq->cc++; if (is_redirect) { - xdp_return_frame(xdpi->xdpf); dma_unmap_single(sq->pdev, xdpi->dma_addr, xdpi->xdpf->len, DMA_TO_DEVICE); + xdp_return_frame(xdpi->xdpf); } else { /* Recycle RX page */ mlx5e_page_release(rq, &xdpi->di, false); @@ -262,7 +284,8 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, int sq_num; int i; - if (unlikely(!test_bit(MLX5E_STATE_OPENED, &priv->state))) + /* this flag is sufficient, no need to test internal sq state */ + if (unlikely(!mlx5e_xdp_tx_is_enabled(priv))) return -ENETDOWN; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) @@ -275,9 +298,6 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, sq = &priv->channels.c[sq_num]->xdpsq; - if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) - return -ENETDOWN; - for (i = 0; i < n; i++) { struct xdp_frame *xdpf = frames[i]; struct mlx5e_xdp_info xdpi; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h index 6dfab045925f01564b50e47c0340cdc04b4a602f..827ceef5fa93a95b5b2a73e166a8b2d9ecb335ad 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h @@ -34,12 +34,11 @@ #include "en.h" -#define MLX5E_XDP_MAX_MTU ((int)(PAGE_SIZE - \ - MLX5_SKB_FRAG_SZ(XDP_PACKET_HEADROOM))) #define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN) #define MLX5E_XDP_TX_DS_COUNT \ ((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */) +int mlx5e_xdp_max_mtu(struct mlx5e_params *params); bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, void *va, u16 *rx_headroom, u32 *len); bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq); @@ -49,6 +48,23 @@ bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi); int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags); +static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv) +{ + set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state); +} + +static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv) +{ + clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state); + /* let other device's napi(s) see our new state */ + synchronize_rcu(); +} + +static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv) +{ + return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state); +} + static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) { struct mlx5_wq_cyc *wq = &sq->wq; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c index be137d4a91692026acf2a2bce299667fc7228dcc..22acd462856c0a7ed7b3be74c3ab44522469bf91 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c @@ -265,6 +265,7 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev, { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_tls_offload_context_tx *context; + struct net_device *tls_netdev; struct tls_context *tls_ctx; u32 expected_seq; int datalen; @@ -278,7 +279,8 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev, goto out; tls_ctx = tls_get_ctx(skb->sk); - if (unlikely(tls_ctx->netdev != netdev)) + tls_netdev = rcu_dereference_bh(tls_ctx->netdev); + if (unlikely(tls_netdev != netdev)) goto out; skb_seq = ntohl(tcp_hdr(skb)->seq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index 45cdde694d20049af85391ffcc376438fadb921e..8a4fbb89820cfb75010e49422ee19d44113ccfcc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -36,6 +36,10 @@ #include #include "en.h" +enum { + MLX5E_ARFS_STATE_ENABLED, +}; + struct arfs_tuple { __be16 etype; u8 ip_proto; @@ -144,6 +148,8 @@ int mlx5e_arfs_enable(struct mlx5e_priv *priv) return err; } } + set_bit(MLX5E_ARFS_STATE_ENABLED, &priv->fs.arfs.state); + return 0; } @@ -227,11 +233,13 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft, ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL); - in = kvzalloc(inlen, GFP_KERNEL); - if (!in || !ft->g) { - kvfree(ft->g); - kvfree(in); + if (!ft->g) return -ENOMEM; + + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) { + err = -ENOMEM; + goto err_free_g; } mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); @@ -251,7 +259,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft, break; default: err = -EINVAL; - goto out; + goto err_free_in; } switch (type) { @@ -273,7 +281,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft, break; default: err = -EINVAL; - goto out; + goto err_free_in; } MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); @@ -282,7 +290,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft, MLX5_SET_CFG(in, end_flow_index, ix - 1); ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); if (IS_ERR(ft->g[ft->num_groups])) - goto err; + goto err_clean_group; ft->num_groups++; memset(in, 0, inlen); @@ -291,18 +299,20 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft, MLX5_SET_CFG(in, end_flow_index, ix - 1); ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); if (IS_ERR(ft->g[ft->num_groups])) - goto err; + goto err_clean_group; ft->num_groups++; kvfree(in); return 0; -err: +err_clean_group: err = PTR_ERR(ft->g[ft->num_groups]); ft->g[ft->num_groups] = NULL; -out: +err_free_in: kvfree(in); - +err_free_g: + kfree(ft->g); + ft->g = NULL; return err; } @@ -406,6 +416,8 @@ static void arfs_del_rules(struct mlx5e_priv *priv) int j; HLIST_HEAD(del_list); + clear_bit(MLX5E_ARFS_STATE_ENABLED, &priv->fs.arfs.state); + spin_lock_bh(&priv->fs.arfs.arfs_lock); mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs.arfs_tables, i, j) { hlist_del_init(&rule->hlist); @@ -437,12 +449,6 @@ arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port, return &arfs_t->rules_hash[bucket_idx]; } -static u8 arfs_get_ip_proto(const struct sk_buff *skb) -{ - return (skb->protocol == htons(ETH_P_IP)) ? - ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr; -} - static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs, u8 ip_proto, __be16 etype) { @@ -574,17 +580,8 @@ static void arfs_handle_work(struct work_struct *work) struct mlx5e_priv *priv = arfs_rule->priv; struct mlx5_flow_handle *rule; - mutex_lock(&priv->state_lock); - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { - spin_lock_bh(&priv->fs.arfs.arfs_lock); - hlist_del(&arfs_rule->hlist); - spin_unlock_bh(&priv->fs.arfs.arfs_lock); - - mutex_unlock(&priv->state_lock); - kfree(arfs_rule); - goto out; - } - mutex_unlock(&priv->state_lock); + if (!test_bit(MLX5E_ARFS_STATE_ENABLED, &priv->fs.arfs.state)) + return; if (!arfs_rule->rule) { rule = arfs_add_rule(priv, arfs_rule); @@ -599,31 +596,9 @@ static void arfs_handle_work(struct work_struct *work) arfs_may_expire_flow(priv); } -/* return L4 destination port from ip4/6 packets */ -static __be16 arfs_get_dst_port(const struct sk_buff *skb) -{ - char *transport_header; - - transport_header = skb_transport_header(skb); - if (arfs_get_ip_proto(skb) == IPPROTO_TCP) - return ((struct tcphdr *)transport_header)->dest; - return ((struct udphdr *)transport_header)->dest; -} - -/* return L4 source port from ip4/6 packets */ -static __be16 arfs_get_src_port(const struct sk_buff *skb) -{ - char *transport_header; - - transport_header = skb_transport_header(skb); - if (arfs_get_ip_proto(skb) == IPPROTO_TCP) - return ((struct tcphdr *)transport_header)->source; - return ((struct udphdr *)transport_header)->source; -} - static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv, struct arfs_table *arfs_t, - const struct sk_buff *skb, + const struct flow_keys *fk, u16 rxq, u32 flow_id) { struct arfs_rule *rule; @@ -638,19 +613,19 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv, INIT_WORK(&rule->arfs_work, arfs_handle_work); tuple = &rule->tuple; - tuple->etype = skb->protocol; + tuple->etype = fk->basic.n_proto; + tuple->ip_proto = fk->basic.ip_proto; if (tuple->etype == htons(ETH_P_IP)) { - tuple->src_ipv4 = ip_hdr(skb)->saddr; - tuple->dst_ipv4 = ip_hdr(skb)->daddr; + tuple->src_ipv4 = fk->addrs.v4addrs.src; + tuple->dst_ipv4 = fk->addrs.v4addrs.dst; } else { - memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr, + memcpy(&tuple->src_ipv6, &fk->addrs.v6addrs.src, sizeof(struct in6_addr)); - memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr, + memcpy(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst, sizeof(struct in6_addr)); } - tuple->ip_proto = arfs_get_ip_proto(skb); - tuple->src_port = arfs_get_src_port(skb); - tuple->dst_port = arfs_get_dst_port(skb); + tuple->src_port = fk->ports.src; + tuple->dst_port = fk->ports.dst; rule->flow_id = flow_id; rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER; @@ -661,37 +636,33 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv, return rule; } -static bool arfs_cmp_ips(struct arfs_tuple *tuple, - const struct sk_buff *skb) +static bool arfs_cmp(const struct arfs_tuple *tuple, const struct flow_keys *fk) { - if (tuple->etype == htons(ETH_P_IP) && - tuple->src_ipv4 == ip_hdr(skb)->saddr && - tuple->dst_ipv4 == ip_hdr(skb)->daddr) - return true; - if (tuple->etype == htons(ETH_P_IPV6) && - (!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr, - sizeof(struct in6_addr))) && - (!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr, - sizeof(struct in6_addr)))) - return true; + if (tuple->src_port != fk->ports.src || tuple->dst_port != fk->ports.dst) + return false; + if (tuple->etype != fk->basic.n_proto) + return false; + if (tuple->etype == htons(ETH_P_IP)) + return tuple->src_ipv4 == fk->addrs.v4addrs.src && + tuple->dst_ipv4 == fk->addrs.v4addrs.dst; + if (tuple->etype == htons(ETH_P_IPV6)) + return !memcmp(&tuple->src_ipv6, &fk->addrs.v6addrs.src, + sizeof(struct in6_addr)) && + !memcmp(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst, + sizeof(struct in6_addr)); return false; } static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t, - const struct sk_buff *skb) + const struct flow_keys *fk) { struct arfs_rule *arfs_rule; struct hlist_head *head; - __be16 src_port = arfs_get_src_port(skb); - __be16 dst_port = arfs_get_dst_port(skb); - head = arfs_hash_bucket(arfs_t, src_port, dst_port); + head = arfs_hash_bucket(arfs_t, fk->ports.src, fk->ports.dst); hlist_for_each_entry(arfs_rule, head, hlist) { - if (arfs_rule->tuple.src_port == src_port && - arfs_rule->tuple.dst_port == dst_port && - arfs_cmp_ips(&arfs_rule->tuple, skb)) { + if (arfs_cmp(&arfs_rule->tuple, fk)) return arfs_rule; - } } return NULL; @@ -704,20 +675,29 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, struct mlx5e_arfs_tables *arfs = &priv->fs.arfs; struct arfs_table *arfs_t; struct arfs_rule *arfs_rule; + struct flow_keys fk; - if (skb->protocol != htons(ETH_P_IP) && - skb->protocol != htons(ETH_P_IPV6)) + if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) + return -EPROTONOSUPPORT; + + if (fk.basic.n_proto != htons(ETH_P_IP) && + fk.basic.n_proto != htons(ETH_P_IPV6)) return -EPROTONOSUPPORT; if (skb->encapsulation) return -EPROTONOSUPPORT; - arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol); + arfs_t = arfs_get_table(arfs, fk.basic.ip_proto, fk.basic.n_proto); if (!arfs_t) return -EPROTONOSUPPORT; spin_lock_bh(&arfs->arfs_lock); - arfs_rule = arfs_find_rule(arfs_t, skb); + if (!test_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state)) { + spin_unlock_bh(&arfs->arfs_lock); + return -EPERM; + } + + arfs_rule = arfs_find_rule(arfs_t, &fk); if (arfs_rule) { if (arfs_rule->rxq == rxq_index) { spin_unlock_bh(&arfs->arfs_lock); @@ -725,8 +705,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, } arfs_rule->rxq = rxq_index; } else { - arfs_rule = arfs_alloc_rule(priv, arfs_t, skb, - rxq_index, flow_id); + arfs_rule = arfs_alloc_rule(priv, arfs_t, &fk, rxq_index, flow_id); if (!arfs_rule) { spin_unlock_bh(&arfs->arfs_lock); return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index db3278cc052ba3639fb8e74cdf4cf610637c8201..124e4567a4ee6cbfd890af7f5821c0218af5e0bf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -45,7 +45,9 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev, if (err) return err; + mutex_lock(&mdev->mlx5e_res.td.list_lock); list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list); + mutex_unlock(&mdev->mlx5e_res.td.list_lock); return 0; } @@ -53,8 +55,10 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev, void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir) { + mutex_lock(&mdev->mlx5e_res.td.list_lock); mlx5_core_destroy_tir(mdev, tir->tirn); list_del(&tir->list); + mutex_unlock(&mdev->mlx5e_res.td.list_lock); } static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, @@ -114,6 +118,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev) } INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list); + mutex_init(&mdev->mlx5e_res.td.list_lock); return 0; @@ -141,15 +146,17 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb) { struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_tir *tir; - int err = -ENOMEM; + int err = 0; u32 tirn = 0; int inlen; void *in; inlen = MLX5_ST_SZ_BYTES(modify_tir_in); in = kvzalloc(inlen, GFP_KERNEL); - if (!in) + if (!in) { + err = -ENOMEM; goto out; + } if (enable_uc_lb) MLX5_SET(modify_tir_in, in, ctx.self_lb_block, @@ -157,6 +164,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb) MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1); + mutex_lock(&mdev->mlx5e_res.td.list_lock); list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) { tirn = tir->tirn; err = mlx5_core_modify_tir(mdev, tirn, in, inlen); @@ -168,6 +176,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb) kvfree(in); if (err) netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err); + mutex_unlock(&mdev->mlx5e_res.td.list_lock); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 98dd3e0ada72bf02640b1b3eb5c69b949c1940b2..a383276eb816aa3edbcae378a824542748f30bfd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1083,6 +1083,9 @@ static int mlx5e_set_pauseparam(struct net_device *netdev, struct mlx5_core_dev *mdev = priv->mdev; int err; + if (!MLX5_CAP_GEN(mdev, vport_group_manager)) + return -EOPNOTSUPP; + if (pauseparam->autoneg) return -EINVAL; @@ -1101,11 +1104,6 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, struct ethtool_ts_info *info) { struct mlx5_core_dev *mdev = priv->mdev; - int ret; - - ret = ethtool_op_get_ts_info(priv->netdev, info); - if (ret) - return ret; info->phc_index = mlx5_clock_get_ptp_index(mdev); @@ -1113,9 +1111,9 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, info->phc_index == -1) return 0; - info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE | - SOF_TIMESTAMPING_RX_HARDWARE | - SOF_TIMESTAMPING_RAW_HARDWARE; + info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); @@ -1512,6 +1510,28 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable) return 0; } +static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_channels *channels = &priv->channels; + struct mlx5e_channel *c; + int i; + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || + priv->channels.params.xdp_prog) + return 0; + + for (i = 0; i < channels->num; i++) { + c = channels->c[i]; + if (enable) + __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state); + else + __clear_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state); + } + + return 0; +} + static int mlx5e_handle_pflag(struct net_device *netdev, u32 wanted_flags, enum mlx5e_priv_flag flag, @@ -1563,6 +1583,12 @@ static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags) err = mlx5e_handle_pflag(netdev, pflags, MLX5E_PFLAG_RX_STRIDING_RQ, set_pflag_rx_striding_rq); + if (err) + goto out; + + err = mlx5e_handle_pflag(netdev, pflags, + MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, + set_pflag_rx_no_csum_complete); out: mutex_unlock(&priv->state_lock); @@ -1614,6 +1640,22 @@ static int mlx5e_flash_device(struct net_device *dev, return mlx5e_ethtool_flash_device(priv, flash); } +#ifndef CONFIG_MLX5_EN_RXNFC +/* When CONFIG_MLX5_EN_RXNFC=n we only support ETHTOOL_GRXRINGS + * otherwise this function will be defined from en_fs_ethtool.c + */ +static int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + if (info->cmd != ETHTOOL_GRXRINGS) + return -EOPNOTSUPP; + /* ring_count is needed by ethtool -x */ + info->data = priv->channels.params.num_channels; + return 0; +} +#endif + const struct ethtool_ops mlx5e_ethtool_ops = { .get_drvinfo = mlx5e_get_drvinfo, .get_link = ethtool_op_get_link, @@ -1632,8 +1674,8 @@ const struct ethtool_ops mlx5e_ethtool_ops = { .get_rxfh_indir_size = mlx5e_get_rxfh_indir_size, .get_rxfh = mlx5e_get_rxfh, .set_rxfh = mlx5e_set_rxfh, -#ifdef CONFIG_MLX5_EN_RXNFC .get_rxnfc = mlx5e_get_rxnfc, +#ifdef CONFIG_MLX5_EN_RXNFC .set_rxnfc = mlx5e_set_rxnfc, #endif .flash_device = mlx5e_flash_device, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 76cc10e44080b012d50da503cd6ed60520aa6154..c6eea6b6b1bbc640629a6ef6c36ca8653e807486 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -217,6 +217,9 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, break; } + if (WARN_ONCE(*rule_p, "VLAN rule already exists type %d", rule_type)) + return 0; + *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); if (IS_ERR(*rule_p)) { @@ -397,8 +400,7 @@ static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv) for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID) mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i); - if (priv->fs.vlan.cvlan_filter_disabled && - !(priv->netdev->flags & IFF_PROMISC)) + if (priv->fs.vlan.cvlan_filter_disabled) mlx5e_add_any_vid_rules(priv); } @@ -415,8 +417,12 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv) for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID) mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i); - if (priv->fs.vlan.cvlan_filter_disabled && - !(priv->netdev->flags & IFF_PROMISC)) + WARN_ON_ONCE(!(test_bit(MLX5E_STATE_DESTROYING, &priv->state))); + + /* must be called after DESTROY bit is set and + * set_rx_mode is called and flushed + */ + if (priv->fs.vlan.cvlan_filter_disabled) mlx5e_del_any_vid_rules(priv); } @@ -887,6 +893,7 @@ static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc, in = kvzalloc(inlen, GFP_KERNEL); if (!in) { kfree(ft->g); + ft->g = NULL; return -ENOMEM; } @@ -1027,6 +1034,7 @@ static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc) in = kvzalloc(inlen, GFP_KERNEL); if (!in) { kfree(ft->g); + ft->g = NULL; return -ENOMEM; } @@ -1306,6 +1314,7 @@ static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table) ft->g[ft->num_groups] = NULL; mlx5e_destroy_groups(ft); kvfree(in); + kfree(ft->g); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index f291d1bf15586b9bff40a3cc2ea8c62c71e62f05..51edc507b7b5dddcb6b9af11511ea7000ba016d6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -128,6 +128,8 @@ static bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev, return !params->lro_en && frag_sz <= PAGE_SIZE; } +#define MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ ((BIT(__mlx5_bit_sz(wq, log_wqe_stride_size)) - 1) + \ + MLX5_MPWQE_LOG_STRIDE_SZ_BASE) static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { @@ -138,6 +140,9 @@ static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev, if (!mlx5e_rx_is_linear_skb(mdev, params)) return false; + if (order_base_2(frag_sz) > MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ) + return false; + if (MLX5_CAP_GEN(mdev, ext_stride_num_range)) return true; @@ -415,12 +420,11 @@ static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix) static void mlx5e_init_frags_partition(struct mlx5e_rq *rq) { - struct mlx5e_wqe_frag_info next_frag, *prev; + struct mlx5e_wqe_frag_info next_frag = {}; + struct mlx5e_wqe_frag_info *prev = NULL; int i; next_frag.di = &rq->wqe.di[0]; - next_frag.offset = 0; - prev = NULL; for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) { struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0]; @@ -492,6 +496,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, rq->channel = c; rq->ix = c->ix; rq->mdev = mdev; + rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); rq->stats = &c->priv->channel_stats[c->ix].rq; rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL; @@ -514,7 +519,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq, &rq->wq_ctrl); if (err) - return err; + goto err_rq_wq_destroy; rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR]; @@ -559,7 +564,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq, &rq->wq_ctrl); if (err) - return err; + goto err_rq_wq_destroy; rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR]; @@ -929,6 +934,13 @@ static int mlx5e_open_rq(struct mlx5e_channel *c, if (params->rx_dim_enabled) __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); + /* We disable csum_complete when XDP is enabled since + * XDP programs might manipulate packets which will render + * skb->checksum incorrect. + */ + if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp) + __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state); + return 0; err_destroy_rq: @@ -1382,6 +1394,7 @@ static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq) struct mlx5_core_dev *mdev = c->mdev; struct mlx5_rate_limit rl = {0}; + cancel_work_sync(&sq->dim.work); mlx5e_destroy_sq(mdev, sq->sqn); if (sq->rate_limit) { rl.rate = sq->rate_limit; @@ -1610,13 +1623,15 @@ static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev, int err; u32 i; + err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn); + if (err) + return err; + err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq, &cq->wq_ctrl); if (err) return err; - mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn); - mcq->cqe_sz = 64; mcq->set_ci_db = cq->wq_ctrl.db.db; mcq->arm_db = cq->wq_ctrl.db.db + 1; @@ -1674,6 +1689,10 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) int eqn; int err; + err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used); + if (err) + return err; + inlen = MLX5_ST_SZ_BYTES(create_cq_in) + sizeof(u64) * cq->wq_ctrl.buf.npages; in = kvzalloc(inlen, GFP_KERNEL); @@ -1687,8 +1706,6 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); - mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used); - MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode); MLX5_SET(cqc, cqc, c_eqn, eqn); MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); @@ -1747,7 +1764,7 @@ static void mlx5e_close_cq(struct mlx5e_cq *cq) static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix) { - return cpumask_first(priv->mdev->priv.irq_info[ix].mask); + return cpumask_first(priv->mdev->priv.irq_info[ix + MLX5_EQ_VEC_COMP_BASE].mask); } static int mlx5e_open_tx_cqs(struct mlx5e_channel *c, @@ -1908,6 +1925,10 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, int err; int eqn; + err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq); + if (err) + return err; + c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); if (!c) return -ENOMEM; @@ -1924,7 +1945,6 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, c->xdp = !!params->xdp_prog; c->stats = &priv->channel_stats[ix].ch; - mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq); c->irq_desc = irq_to_desc(irq); netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); @@ -2876,6 +2896,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) mlx5e_build_tx2sq_maps(priv); mlx5e_activate_channels(&priv->channels); + mlx5e_xdp_tx_enable(priv); netif_tx_start_all_queues(priv->netdev); if (MLX5_ESWITCH_MANAGER(priv->mdev)) @@ -2897,6 +2918,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) */ netif_tx_stop_all_queues(priv->netdev); netif_tx_disable(priv->netdev); + mlx5e_xdp_tx_disable(priv); mlx5e_deactivate_channels(&priv->channels); } @@ -3566,6 +3588,7 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable) return 0; } +#ifdef CONFIG_MLX5_ESWITCH static int set_feature_tc_num_filters(struct net_device *netdev, bool enable) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -3578,6 +3601,7 @@ static int set_feature_tc_num_filters(struct net_device *netdev, bool enable) return 0; } +#endif static int set_feature_rx_all(struct net_device *netdev, bool enable) { @@ -3676,7 +3700,9 @@ static int mlx5e_set_features(struct net_device *netdev, err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro); err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER, set_feature_cvlan_filter); +#ifdef CONFIG_MLX5_ESWITCH err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters); +#endif err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all); err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs); err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan); @@ -3714,6 +3740,12 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev, netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n"); } + if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) { + features &= ~NETIF_F_RXHASH; + if (netdev->features & NETIF_F_RXHASH) + netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n"); + } + mutex_unlock(&priv->state_lock); return features; @@ -3741,16 +3773,17 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, if (params->xdp_prog && !mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) { netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n", - new_mtu, MLX5E_XDP_MAX_MTU); + new_mtu, mlx5e_xdp_max_mtu(params)); err = -EINVAL; goto out; } if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { + bool is_linear = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, &new_channels.params); u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params); u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params); - reset = reset && (ppw_old != ppw_new); + reset = reset && (is_linear || (ppw_old != ppw_new)); } if (!reset) { @@ -3839,6 +3872,9 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) memcpy(&priv->tstamp, &config, sizeof(config)); mutex_unlock(&priv->state_lock); + /* might need to fix some features */ + netdev_update_features(priv->netdev); + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; } @@ -4206,7 +4242,8 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog) if (!mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) { netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n", - new_channels.params.sw_mtu, MLX5E_XDP_MAX_MTU); + new_channels.params.sw_mtu, + mlx5e_xdp_max_mtu(&new_channels.params)); return -EINVAL; } @@ -4503,6 +4540,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, params->rx_cqe_compress_def = slow_pci_heuristic(mdev); MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def); + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, false); /* RQ */ /* Prefer Striding RQ, unless any of the following holds: @@ -4680,12 +4718,18 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) if (!priv->channels.params.scatter_fcs_en) netdev->features &= ~NETIF_F_RXFCS; + /* prefere CQE compression over rxhash */ + if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS)) + netdev->features &= ~NETIF_F_RXHASH; + #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f) if (FT_CAP(flow_modify_en) && FT_CAP(modify_root) && FT_CAP(identified_miss_table_mode) && FT_CAP(flow_table_modify)) { +#ifdef CONFIG_MLX5_ESWITCH netdev->hw_features |= NETIF_F_HW_TC; +#endif #ifdef CONFIG_MLX5_EN_ARFS netdev->hw_features |= NETIF_F_NTUPLE; #endif @@ -4958,11 +5002,21 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; const struct mlx5e_profile *profile; + int max_nch; int err; profile = priv->profile; clear_bit(MLX5E_STATE_DESTROYING, &priv->state); + /* max number of channels may have changed */ + max_nch = mlx5e_get_max_num_channels(priv->mdev); + if (priv->channels.params.num_channels > max_nch) { + mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch); + priv->channels.params.num_channels = max_nch; + mlx5e_build_default_indir_rqt(priv->channels.params.indirection_rqt, + MLX5E_INDIR_RQT_SIZE, max_nch); + } + err = profile->init_tx(priv); if (err) goto out; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index c9cc9747d21d187810c9f93efaf0937bb9c1ebe2..1ab40d622ae1e5805c9db30b9d2cd3471c2f1e16 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -144,6 +144,7 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv) s->tx_packets += sq_stats->packets; s->tx_bytes += sq_stats->bytes; + s->tx_queue_dropped += sq_stats->dropped; } } } @@ -197,7 +198,7 @@ int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr) struct mlx5_eswitch_rep *rep = rpriv->rep; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - if (esw->mode == SRIOV_NONE) + if (esw->mode != SRIOV_OFFLOADS) return -EOPNOTSUPP; switch (attr->id) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 00172dee5339c42eebb6222cacd3ac9945073dd0..8a2f8189669d7618addd88823d5b14296fdaed08 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -37,6 +37,7 @@ #include #include #include +#include #include "en.h" #include "en_tc.h" #include "eswitch.h" @@ -688,51 +689,108 @@ static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe, skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht); } -static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth) +static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth, + __be16 *proto) { - __be16 ethertype = ((struct ethhdr *)skb->data)->h_proto; + *proto = ((struct ethhdr *)skb->data)->h_proto; + *proto = __vlan_get_protocol(skb, *proto, network_depth); - ethertype = __vlan_get_protocol(skb, ethertype, network_depth); - return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6)); + if (*proto == htons(ETH_P_IP)) + return pskb_may_pull(skb, *network_depth + sizeof(struct iphdr)); + + if (*proto == htons(ETH_P_IPV6)) + return pskb_may_pull(skb, *network_depth + sizeof(struct ipv6hdr)); + + return false; } -static __be32 mlx5e_get_fcs(struct sk_buff *skb) +static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb) { - int last_frag_sz, bytes_in_prev, nr_frags; - u8 *fcs_p1, *fcs_p2; - skb_frag_t *last_frag; - __be32 fcs_bytes; + int network_depth = 0; + __be16 proto; + void *ip; + int rc; + + if (unlikely(!is_last_ethertype_ip(skb, &network_depth, &proto))) + return; - if (!skb_is_nonlinear(skb)) - return *(__be32 *)(skb->data + skb->len - ETH_FCS_LEN); + ip = skb->data + network_depth; + rc = ((proto == htons(ETH_P_IP)) ? IP_ECN_set_ce((struct iphdr *)ip) : + IP6_ECN_set_ce(skb, (struct ipv6hdr *)ip)); - nr_frags = skb_shinfo(skb)->nr_frags; - last_frag = &skb_shinfo(skb)->frags[nr_frags - 1]; - last_frag_sz = skb_frag_size(last_frag); + rq->stats->ecn_mark += !!rc; +} - /* If all FCS data is in last frag */ - if (last_frag_sz >= ETH_FCS_LEN) - return *(__be32 *)(skb_frag_address(last_frag) + - last_frag_sz - ETH_FCS_LEN); +static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto) +{ + void *ip_p = skb->data + network_depth; - fcs_p2 = (u8 *)skb_frag_address(last_frag); - bytes_in_prev = ETH_FCS_LEN - last_frag_sz; + return (proto == htons(ETH_P_IP)) ? ((struct iphdr *)ip_p)->protocol : + ((struct ipv6hdr *)ip_p)->nexthdr; +} - /* Find where the other part of the FCS is - Linear or another frag */ - if (nr_frags == 1) { - fcs_p1 = skb_tail_pointer(skb); - } else { - skb_frag_t *prev_frag = &skb_shinfo(skb)->frags[nr_frags - 2]; +#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN) - fcs_p1 = skb_frag_address(prev_frag) + - skb_frag_size(prev_frag); +#define MAX_PADDING 8 + +static void +tail_padding_csum_slow(struct sk_buff *skb, int offset, int len, + struct mlx5e_rq_stats *stats) +{ + stats->csum_complete_tail_slow++; + skb->csum = csum_block_add(skb->csum, + skb_checksum(skb, offset, len, 0), + offset); +} + +static void +tail_padding_csum(struct sk_buff *skb, int offset, + struct mlx5e_rq_stats *stats) +{ + u8 tail_padding[MAX_PADDING]; + int len = skb->len - offset; + void *tail; + + if (unlikely(len > MAX_PADDING)) { + tail_padding_csum_slow(skb, offset, len, stats); + return; } - fcs_p1 -= bytes_in_prev; - memcpy(&fcs_bytes, fcs_p1, bytes_in_prev); - memcpy(((u8 *)&fcs_bytes) + bytes_in_prev, fcs_p2, last_frag_sz); + tail = skb_header_pointer(skb, offset, len, tail_padding); + if (unlikely(!tail)) { + tail_padding_csum_slow(skb, offset, len, stats); + return; + } - return fcs_bytes; + stats->csum_complete_tail++; + skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset); +} + +static void +mlx5e_skb_padding_csum(struct sk_buff *skb, int network_depth, __be16 proto, + struct mlx5e_rq_stats *stats) +{ + struct ipv6hdr *ip6; + struct iphdr *ip4; + int pkt_len; + + switch (proto) { + case htons(ETH_P_IP): + ip4 = (struct iphdr *)(skb->data + network_depth); + pkt_len = network_depth + ntohs(ip4->tot_len); + break; + case htons(ETH_P_IPV6): + ip6 = (struct ipv6hdr *)(skb->data + network_depth); + pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len); + break; + default: + return; + } + + if (likely(pkt_len >= skb->len)) + return; + + tail_padding_csum(skb, pkt_len, stats); } static inline void mlx5e_handle_csum(struct net_device *netdev, @@ -743,6 +801,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, { struct mlx5e_rq_stats *stats = rq->stats; int network_depth = 0; + __be16 proto; if (unlikely(!(netdev->features & NETIF_F_RXCSUM))) goto csum_none; @@ -753,7 +812,25 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, return; } - if (likely(is_last_ethertype_ip(skb, &network_depth))) { + /* True when explicitly set via priv flag, or XDP prog is loaded */ + if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)) + goto csum_unnecessary; + + /* CQE csum doesn't cover padding octets in short ethernet + * frames. And the pad field is appended prior to calculating + * and appending the FCS field. + * + * Detecting these padded frames requires to verify and parse + * IP headers, so we simply force all those small frames to be + * CHECKSUM_UNNECESSARY even if they are not padded. + */ + if (short_frame(skb->len)) + goto csum_unnecessary; + + if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) { + if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP)) + goto csum_unnecessary; + skb->ip_summed = CHECKSUM_COMPLETE; skb->csum = csum_unfold((__force __sum16)cqe->check_sum); if (network_depth > ETH_HLEN) @@ -764,13 +841,13 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, skb->csum = csum_partial(skb->data + ETH_HLEN, network_depth - ETH_HLEN, skb->csum); - if (unlikely(netdev->features & NETIF_F_RXFCS)) - skb->csum = csum_add(skb->csum, - (__force __wsum)mlx5e_get_fcs(skb)); + + mlx5e_skb_padding_csum(skb, network_depth, proto, stats); stats->csum_complete++; return; } +csum_unnecessary: if (likely((cqe->hds_ip_ext & CQE_L3_OK) && (cqe->hds_ip_ext & CQE_L4_OK))) { skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -788,6 +865,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, stats->csum_none++; } +#define MLX5E_CE_BIT_MASK 0x80 + static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, u32 cqe_bcnt, struct mlx5e_rq *rq, @@ -832,6 +911,10 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK; mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg); + /* checking CE bit in cqe - MSB in ml_path field */ + if (unlikely(cqe->ml_path & MLX5E_CE_BIT_MASK)) + mlx5e_enable_ecn(rq, skb); + skb->protocol = eth_type_trans(skb, netdev); } @@ -1091,6 +1174,12 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, u32 frag_size; bool consumed; + /* Check packet size. Note LRO doesn't use linear SKB */ + if (unlikely(cqe_bcnt > rq->hw_mtu)) { + rq->stats->oversize_pkts_sw_drop++; + return NULL; + } + va = page_address(di->page) + head_offset; data = va + rx_headroom; frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32); @@ -1171,21 +1260,25 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) { struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); - struct mlx5e_xdpsq *xdpsq; + struct mlx5e_xdpsq *xdpsq = &rq->xdpsq; struct mlx5_cqe64 *cqe; int work_done = 0; if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) return 0; - if (cq->decmprs_left) + if (cq->decmprs_left) { work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget); + if (cq->decmprs_left || work_done >= budget) + goto out; + } cqe = mlx5_cqwq_get_cqe(&cq->wq); - if (!cqe) + if (!cqe) { + if (unlikely(work_done)) + goto out; return 0; - - xdpsq = &rq->xdpsq; + } do { if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) { @@ -1200,6 +1293,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) rq->handle_rx_cqe(rq, cqe); } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); +out: if (xdpsq->doorbell) { mlx5e_xmit_xdp_doorbell(xdpsq); xdpsq->doorbell = false; @@ -1220,6 +1314,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) #ifdef CONFIG_MLX5_CORE_IPOIB +#define MLX5_IB_GRH_SGID_OFFSET 8 #define MLX5_IB_GRH_DGID_OFFSET 24 #define MLX5_GID_SIZE 16 @@ -1228,11 +1323,12 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, u32 cqe_bcnt, struct sk_buff *skb) { - struct mlx5e_rq_stats *stats = rq->stats; struct hwtstamp_config *tstamp; + struct mlx5e_rq_stats *stats; struct net_device *netdev; struct mlx5e_priv *priv; char *pseudo_header; + u32 flags_rqpn; u32 qpn; u8 *dgid; u8 g; @@ -1252,8 +1348,10 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, priv = mlx5i_epriv(netdev); tstamp = &priv->tstamp; + stats = &priv->channel_stats[rq->ix].rq; - g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; + flags_rqpn = be32_to_cpu(cqe->flags_rqpn); + g = (flags_rqpn >> 28) & 3; dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET; if ((!g) || dgid[0] != 0xff) skb->pkt_type = PACKET_HOST; @@ -1262,16 +1360,28 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, else skb->pkt_type = PACKET_MULTICAST; - /* TODO: IB/ipoib: Allow mcast packets from other VFs - * 68996a6e760e5c74654723eeb57bf65628ae87f4 + /* Drop packets that this interface sent, ie multicast packets + * that the HCA has replicated. */ + if (g && (qpn == (flags_rqpn & 0xffffff)) && + (memcmp(netdev->dev_addr + 4, skb->data + MLX5_IB_GRH_SGID_OFFSET, + MLX5_GID_SIZE) == 0)) { + skb->dev = NULL; + return; + } skb_pull(skb, MLX5_IB_GRH_BYTES); skb->protocol = *((__be16 *)(skb->data)); - skb->ip_summed = CHECKSUM_COMPLETE; - skb->csum = csum_unfold((__force __sum16)cqe->check_sum); + if (netdev->features & NETIF_F_RXCSUM) { + skb->ip_summed = CHECKSUM_COMPLETE; + skb->csum = csum_unfold((__force __sum16)cqe->check_sum); + stats->csum_complete++; + } else { + skb->ip_summed = CHECKSUM_NONE; + stats->csum_none++; + } if (unlikely(mlx5e_rx_hw_stamp(tstamp))) skb_hwtstamps(skb)->hwtstamp = @@ -1290,7 +1400,6 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, skb->dev = netdev; - stats->csum_complete++; stats->packets++; stats->bytes += cqe_bcnt; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index 35ded91203f52984dfa451c3fcebaad1d4c07664..5fb088b54e665e65629fec6a43950939a4a64b2f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c @@ -35,6 +35,7 @@ #include #include #include "en.h" +#include "en/port.h" enum { MLX5E_ST_LINK_STATE, @@ -80,36 +81,25 @@ static int mlx5e_test_link_state(struct mlx5e_priv *priv) static int mlx5e_test_link_speed(struct mlx5e_priv *priv) { - u32 out[MLX5_ST_SZ_DW(ptys_reg)]; - u32 eth_proto_oper; - int i; + u32 speed; if (!netif_carrier_ok(priv->netdev)) return 1; - if (mlx5_query_port_ptys(priv->mdev, out, sizeof(out), MLX5_PTYS_EN, 1)) - return 1; - - eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); - for (i = 0; i < MLX5E_LINK_MODES_NUMBER; i++) { - if (eth_proto_oper & MLX5E_PROT_MASK(i)) - return 0; - } - return 1; + return mlx5e_port_linkspeed(priv->mdev, &speed); } -#ifdef CONFIG_INET -/* loopback test */ -#define MLX5E_TEST_PKT_SIZE (MLX5E_RX_MAX_HEAD - NET_IP_ALIGN) -static const char mlx5e_test_text[ETH_GSTRING_LEN] = "MLX5E SELF TEST"; -#define MLX5E_TEST_MAGIC 0x5AEED15C001ULL - struct mlx5ehdr { __be32 version; __be64 magic; - char text[ETH_GSTRING_LEN]; }; +#ifdef CONFIG_INET +/* loopback test */ +#define MLX5E_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) +\ + sizeof(struct udphdr) + sizeof(struct mlx5ehdr)) +#define MLX5E_TEST_MAGIC 0x5AEED15C001ULL + static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv) { struct sk_buff *skb = NULL; @@ -117,10 +107,7 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv) struct ethhdr *ethh; struct udphdr *udph; struct iphdr *iph; - int datalen, iplen; - - datalen = MLX5E_TEST_PKT_SIZE - - (sizeof(*ethh) + sizeof(*iph) + sizeof(*udph)); + int iplen; skb = netdev_alloc_skb(priv->netdev, MLX5E_TEST_PKT_SIZE); if (!skb) { @@ -149,7 +136,7 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv) /* Fill UDP header */ udph->source = htons(9); udph->dest = htons(9); /* Discard Protocol */ - udph->len = htons(datalen + sizeof(struct udphdr)); + udph->len = htons(sizeof(struct mlx5ehdr) + sizeof(struct udphdr)); udph->check = 0; /* Fill IP header */ @@ -157,7 +144,8 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv) iph->ttl = 32; iph->version = 4; iph->protocol = IPPROTO_UDP; - iplen = sizeof(struct iphdr) + sizeof(struct udphdr) + datalen; + iplen = sizeof(struct iphdr) + sizeof(struct udphdr) + + sizeof(struct mlx5ehdr); iph->tot_len = htons(iplen); iph->frag_off = 0; iph->saddr = 0; @@ -170,9 +158,6 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv) mlxh = skb_put(skb, sizeof(*mlxh)); mlxh->version = 0; mlxh->magic = cpu_to_be64(MLX5E_TEST_MAGIC); - strlcpy(mlxh->text, mlx5e_test_text, sizeof(mlxh->text)); - datalen -= sizeof(*mlxh); - skb_put_zero(skb, datalen); skb->csum = 0; skb->ip_summed = CHECKSUM_PARTIAL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 6839481f76974c7f39b2c99ccb8fce4ba89953ea..9a68dee588c1af41921ba8a17bf5547934d2fc05 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -53,10 +53,13 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) }, @@ -73,7 +76,6 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_udp_seg_rem) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) }, @@ -82,6 +84,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) }, @@ -144,9 +147,12 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->rx_bytes += rq_stats->bytes; s->rx_lro_packets += rq_stats->lro_packets; s->rx_lro_bytes += rq_stats->lro_bytes; + s->rx_ecn_mark += rq_stats->ecn_mark; s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets; s->rx_csum_none += rq_stats->csum_none; s->rx_csum_complete += rq_stats->csum_complete; + s->rx_csum_complete_tail += rq_stats->csum_complete_tail; + s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow; s->rx_csum_unnecessary += rq_stats->csum_unnecessary; s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; s->rx_xdp_drop += rq_stats->xdp_drop; @@ -158,6 +164,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->rx_wqe_err += rq_stats->wqe_err; s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes; s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides; + s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop; s->rx_buff_alloc_err += rq_stats->buff_alloc_err; s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks; s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts; @@ -192,7 +199,6 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->tx_nop += sq_stats->nop; s->tx_queue_stopped += sq_stats->stopped; s->tx_queue_wake += sq_stats->wake; - s->tx_udp_seg_rem += sq_stats->udp_seg_rem; s->tx_queue_dropped += sq_stats->dropped; s->tx_cqe_err += sq_stats->cqe_err; s->tx_recover += sq_stats->recover; @@ -205,6 +211,9 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes; #endif s->tx_cqes += sq_stats->cqes; + + /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */ + barrier(); } } @@ -1137,6 +1146,8 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) }, @@ -1144,10 +1155,12 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index a4c035aedd46cfe2c64e24f02f64bf90bf809e46..3ea8033ed6bddac0b473c2c24e11db576547b189 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -66,10 +66,13 @@ struct mlx5e_sw_stats { u64 tx_nop; u64 rx_lro_packets; u64 rx_lro_bytes; + u64 rx_ecn_mark; u64 rx_removed_vlan_packets; u64 rx_csum_unnecessary; u64 rx_csum_none; u64 rx_csum_complete; + u64 rx_csum_complete_tail; + u64 rx_csum_complete_tail_slow; u64 rx_csum_unnecessary_inner; u64 rx_xdp_drop; u64 rx_xdp_redirect; @@ -86,7 +89,6 @@ struct mlx5e_sw_stats { u64 tx_recover; u64 tx_cqes; u64 tx_queue_wake; - u64 tx_udp_seg_rem; u64 tx_cqe_err; u64 tx_xdp_xmit; u64 tx_xdp_full; @@ -95,6 +97,7 @@ struct mlx5e_sw_stats { u64 rx_wqe_err; u64 rx_mpwqe_filler_cqes; u64 rx_mpwqe_filler_strides; + u64 rx_oversize_pkts_sw_drop; u64 rx_buff_alloc_err; u64 rx_cqe_compress_blks; u64 rx_cqe_compress_pkts; @@ -179,17 +182,21 @@ struct mlx5e_rq_stats { u64 packets; u64 bytes; u64 csum_complete; + u64 csum_complete_tail; + u64 csum_complete_tail_slow; u64 csum_unnecessary; u64 csum_unnecessary_inner; u64 csum_none; u64 lro_packets; u64 lro_bytes; + u64 ecn_mark; u64 removed_vlan_packets; u64 xdp_drop; u64 xdp_redirect; u64 wqe_err; u64 mpwqe_filler_cqes; u64 mpwqe_filler_strides; + u64 oversize_pkts_sw_drop; u64 buff_alloc_err; u64 cqe_compress_blks; u64 cqe_compress_pkts; @@ -215,7 +222,6 @@ struct mlx5e_sq_stats { u64 csum_partial_inner; u64 added_vlan_packets; u64 nop; - u64 udp_seg_rem; #ifdef CONFIG_MLX5_EN_TLS u64 tls_ooo; u64 tls_resync_bytes; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 85796727093eec1ddfc3873cc345f2b8d861ea01..72708263686cfcbe8e805e78398cf91ce829580c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -96,6 +96,7 @@ struct mlx5e_tc_flow_parse_attr { struct ip_tunnel_info tun_info; struct mlx5_flow_spec spec; int num_mod_hdr_actions; + int max_mod_hdr_actions; void *mod_hdr_actions; int mirred_ifindex; }; @@ -991,13 +992,13 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) { struct mlx5e_neigh *m_neigh = &nhe->m_neigh; - u64 bytes, packets, lastuse = 0; struct mlx5e_tc_flow *flow; struct mlx5e_encap_entry *e; struct mlx5_fc *counter; struct neigh_table *tbl; bool neigh_used = false; struct neighbour *n; + u64 lastuse; if (m_neigh->family == AF_INET) tbl = &arp_tbl; @@ -1014,7 +1015,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) list_for_each_entry(flow, &e->flows, encap) { if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { counter = mlx5_flow_rule_counter(flow->rule[0]); - mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); + lastuse = mlx5_fc_query_lastuse(counter); if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) { neigh_used = true; break; @@ -1310,31 +1311,21 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, inner_headers); } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { - struct flow_dissector_key_eth_addrs *key = + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_dissector_key_basic *key = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ETH_ADDRS, + FLOW_DISSECTOR_KEY_BASIC, f->key); - struct flow_dissector_key_eth_addrs *mask = + struct flow_dissector_key_basic *mask = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ETH_ADDRS, + FLOW_DISSECTOR_KEY_BASIC, f->mask); + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype, + ntohs(mask->n_proto)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, + ntohs(key->n_proto)); - ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, - dmac_47_16), - mask->dst); - ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, - dmac_47_16), - key->dst); - - ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, - smac_47_16), - mask->src); - ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, - smac_47_16), - key->src); - - if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst)) + if (mask->n_proto) *match_level = MLX5_MATCH_L2; } @@ -1368,9 +1359,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, *match_level = MLX5_MATCH_L2; } - } else { + } else if (*match_level != MLX5_MATCH_NONE) { MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1); MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1); + *match_level = MLX5_MATCH_L2; } if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) { @@ -1408,21 +1400,31 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, } } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { - struct flow_dissector_key_basic *key = + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_dissector_key_eth_addrs *key = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, + FLOW_DISSECTOR_KEY_ETH_ADDRS, f->key); - struct flow_dissector_key_basic *mask = + struct flow_dissector_key_eth_addrs *mask = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, + FLOW_DISSECTOR_KEY_ETH_ADDRS, f->mask); - MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype, - ntohs(mask->n_proto)); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, - ntohs(key->n_proto)); - if (mask->n_proto) + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, + dmac_47_16), + mask->dst); + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, + dmac_47_16), + key->dst); + + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, + smac_47_16), + mask->src); + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, + smac_47_16), + key->src); + + if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst)) *match_level = MLX5_MATCH_L2; } @@ -1449,10 +1451,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, /* the HW doesn't need L3 inline to match on frag=no */ if (!(key->flags & FLOW_DIS_IS_FRAGMENT)) - *match_level = MLX5_INLINE_MODE_L2; + *match_level = MLX5_MATCH_L2; /* *** L2 attributes parsing up to here *** */ else - *match_level = MLX5_INLINE_MODE_IP; + *match_level = MLX5_MATCH_L3; } } @@ -1741,9 +1743,9 @@ static struct mlx5_fields fields[] = { OFFLOAD(UDP_DPORT, 2, udp.dest, 0), }; -/* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at - * max from the SW pedit action. On success, it says how many HW actions were - * actually parsed. +/* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at + * max from the SW pedit action. On success, attr->num_mod_hdr_actions + * says how many HW actions were actually parsed. */ static int offload_pedit_fields(struct pedit_headers *masks, struct pedit_headers *vals, @@ -1766,9 +1768,11 @@ static int offload_pedit_fields(struct pedit_headers *masks, add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD]; action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto); - action = parse_attr->mod_hdr_actions; - max_actions = parse_attr->num_mod_hdr_actions; - nactions = 0; + action = parse_attr->mod_hdr_actions + + parse_attr->num_mod_hdr_actions * action_size; + + max_actions = parse_attr->max_mod_hdr_actions; + nactions = parse_attr->num_mod_hdr_actions; for (i = 0; i < ARRAY_SIZE(fields); i++) { f = &fields[i]; @@ -1873,7 +1877,7 @@ static int alloc_mod_hdr_actions(struct mlx5e_priv *priv, if (!parse_attr->mod_hdr_actions) return -ENOMEM; - parse_attr->num_mod_hdr_actions = max_actions; + parse_attr->max_mod_hdr_actions = max_actions; return 0; } @@ -1917,9 +1921,11 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv, goto out_err; } - err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr); - if (err) - goto out_err; + if (!parse_attr->mod_hdr_actions) { + err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr); + if (err) + goto out_err; + } err = offload_pedit_fields(masks, vals, parse_attr); if (err < 0) @@ -2213,10 +2219,10 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; int ret; - ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst, - fl6); - if (ret < 0) - return ret; + dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, fl6, + NULL); + if (IS_ERR(dst)) + return PTR_ERR(dst); if (!(*out_ttl)) *out_ttl = ip6_dst_hoplimit(dst); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 6dacaeba2fbff85e5091a1151f7ee731e70cf0cd..52d3989bb8e2a2e6b0b17e966b6a9b30224a910d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -387,8 +387,14 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); if (unlikely(contig_wqebbs_room < num_wqebbs)) { +#ifdef CONFIG_MLX5_EN_IPSEC + struct mlx5_wqe_eth_seg cur_eth = wqe->eth; +#endif mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); mlx5e_sq_fetch_wqe(sq, &wqe, &pi); +#ifdef CONFIG_MLX5_EN_IPSEC + wqe->eth = cur_eth; +#endif } /* fill wqe */ @@ -456,7 +462,10 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq, struct mlx5_err_cqe *err_cqe) { - u32 ci = mlx5_cqwq_get_ci(&sq->cq.wq); + struct mlx5_cqwq *wq = &sq->cq.wq; + u32 ci; + + ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1); netdev_err(sq->channel->netdev, "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n", @@ -586,8 +595,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq) { struct mlx5e_tx_wqe_info *wi; + u32 nbytes = 0; + u16 ci, npkts = 0; struct sk_buff *skb; - u16 ci; int i; while (sq->cc != sq->pc) { @@ -608,8 +618,11 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq) } dev_kfree_skb_any(skb); + npkts++; + nbytes += wi->num_bytes; sq->cc += wi->num_wqebbs; } + netdev_tx_completed_queue(sq->txq, npkts, nbytes); } #ifdef CONFIG_MLX5_CORE_IPOIB diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index c1e1a16a9b07d4335bb4cdc3b29bdea3673b8fa2..aeab0c4f60f43a0a9bfd61f1d7dc8c531531bd90 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -356,8 +356,9 @@ static int init_pf_ctx(struct mlx5_eq_pagefault *pf_ctx, const char *name) spin_lock_init(&pf_ctx->lock); INIT_WORK(&pf_ctx->work, eq_pf_action); - pf_ctx->wq = alloc_ordered_workqueue(name, - WQ_MEM_RECLAIM); + pf_ctx->wq = alloc_workqueue(name, + WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, + MLX5_NUM_CMD_EQE); if (!pf_ctx->wq) return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index ea7dedc2d5adfc48081387619222c8e07da43bd4..2190daace8735c785afc003355827d2a071ec717 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -80,8 +80,7 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1); MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); - if (vport) - MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); + MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context); @@ -109,8 +108,7 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport, MLX5_SET(modify_esw_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT); MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport); - if (vport) - MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1); + MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1); return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); } @@ -1133,13 +1131,6 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, int err = 0; u8 *smac_v; - if (vport->info.spoofchk && !is_valid_ether_addr(vport->info.mac)) { - mlx5_core_warn(esw->dev, - "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n", - vport->vport); - return -EPERM; - } - esw_vport_cleanup_ingress_rules(esw, vport); if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) { @@ -1696,7 +1687,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) int vport_num; int err; - if (!MLX5_ESWITCH_MANAGER(dev)) + if (!MLX5_VPORT_MANAGER(dev)) return 0; esw_info(dev, @@ -1765,7 +1756,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) { - if (!esw || !MLX5_ESWITCH_MANAGER(esw->dev)) + if (!esw || !MLX5_VPORT_MANAGER(esw->dev)) return; esw_info(esw->dev, "cleanup\n"); @@ -1804,7 +1795,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, u64 node_guid; int err = 0; - if (!MLX5_CAP_GEN(esw->dev, vport_group_manager)) + if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager)) return -EPERM; if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac)) return -EINVAL; @@ -1812,13 +1803,10 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, mutex_lock(&esw->state_lock); evport = &esw->vports[vport]; - if (evport->info.spoofchk && !is_valid_ether_addr(mac)) { + if (evport->info.spoofchk && !is_valid_ether_addr(mac)) mlx5_core_warn(esw->dev, - "MAC invalidation is not allowed when spoofchk is on, vport(%d)\n", + "Set invalid MAC while spoofchk is on, vport(%d)\n", vport); - err = -EPERM; - goto unlock; - } err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac); if (err) { @@ -1873,7 +1861,7 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, unlock: mutex_unlock(&esw->state_lock); - return 0; + return err; } int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, @@ -1881,7 +1869,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, { struct mlx5_vport *evport; - if (!MLX5_CAP_GEN(esw->dev, vport_group_manager)) + if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager)) return -EPERM; if (!LEGAL_VPORT(esw, vport)) return -EINVAL; @@ -1964,6 +1952,10 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, evport = &esw->vports[vport]; pschk = evport->info.spoofchk; evport->info.spoofchk = spoofchk; + if (pschk && !is_valid_ether_addr(evport->info.mac)) + mlx5_core_warn(esw->dev, + "Spoofchk in set while MAC is invalid, vport(%d)\n", + evport->vport); if (evport->enabled && esw->mode == SRIOV_LEGACY) err = esw_vport_ingress_config(esw, evport); if (err) @@ -2007,12 +1999,15 @@ static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw) max_guarantee = evport->info.min_rate; } - return max_t(u32, max_guarantee / fw_max_bw_share, 1); + if (max_guarantee) + return max_t(u32, max_guarantee / fw_max_bw_share, 1); + return 0; } -static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider) +static int normalize_vports_min_rate(struct mlx5_eswitch *esw) { u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); + u32 divider = calculate_vports_min_rate_divider(esw); struct mlx5_vport *evport; u32 vport_max_rate; u32 vport_min_rate; @@ -2026,9 +2021,9 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider) continue; vport_min_rate = evport->info.min_rate; vport_max_rate = evport->info.max_rate; - bw_share = MLX5_MIN_BW_SHARE; + bw_share = 0; - if (vport_min_rate) + if (divider) bw_share = MLX5_RATE_TO_BW_SHARE(vport_min_rate, divider, fw_max_bw_share); @@ -2050,19 +2045,23 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider) int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport, u32 max_rate, u32 min_rate) { - u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); - bool min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) && - fw_max_bw_share >= MLX5_MIN_BW_SHARE; - bool max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit); struct mlx5_vport *evport; + u32 fw_max_bw_share; u32 previous_min_rate; - u32 divider; + bool min_rate_supported; + bool max_rate_supported; int err = 0; if (!ESW_ALLOWED(esw)) return -EPERM; if (!LEGAL_VPORT(esw, vport)) return -EINVAL; + + fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); + min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) && + fw_max_bw_share >= MLX5_MIN_BW_SHARE; + max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit); + if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported)) return -EOPNOTSUPP; @@ -2074,8 +2073,7 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport, previous_min_rate = evport->info.min_rate; evport->info.min_rate = min_rate; - divider = calculate_vports_min_rate_divider(esw); - err = normalize_vports_min_rate(esw, divider); + err = normalize_vports_min_rate(esw); if (err) { evport->info.min_rate = previous_min_rate; goto unlock; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c index 8ca1d1949d930d46d0cf7c386383e060640ff792..d8d0b6bd5c5ae192b1a78c21f9b9359a5e1b5325 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c @@ -462,8 +462,10 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size) } err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn); - if (err) + if (err) { + kvfree(in); goto err_cqwq; + } cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c index 436a8136f26ff5f8b879eb02313beabb717f779e..310f9e7d8320045093b35ab42651d465d7cc3465 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c @@ -289,7 +289,6 @@ void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data) const char *event_name; bool teardown = false; unsigned long flags; - u32 fpga_qpn; u8 syndrome; switch (event) { @@ -300,7 +299,6 @@ void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data) case MLX5_EVENT_TYPE_FPGA_QP_ERROR: syndrome = MLX5_GET(fpga_qp_error_event, data, syndrome); event_name = mlx5_fpga_qp_syndrome_to_string(syndrome); - fpga_qpn = MLX5_GET(fpga_qp_error_event, data, fpga_qpn); break; default: mlx5_fpga_warn_ratelimited(fdev, "Unexpected event %u\n", diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c index b8ee9101c5066fba94c27d5600aa3a632cff33ca..715ccafc92cd3b3e0058512c8cd3a9b6d8dc4e6c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c @@ -83,8 +83,14 @@ struct mlx5_fpga_ipsec_rule { }; static const struct rhashtable_params rhash_sa = { - .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa), - .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa), + /* Keep out "cmd" field from the key as it's + * value is not constant during the lifetime + * of the key object. + */ + .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) - + FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd), + .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) + + FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd), .head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash), .automatic_shrinking = true, .min_size = 1, @@ -842,6 +848,7 @@ void mlx5_fpga_ipsec_delete_sa_ctx(void *context) mutex_lock(&fpga_xfrm->lock); if (!--fpga_xfrm->num_rules) { mlx5_fpga_ipsec_release_sa_ctx(fpga_xfrm->sa_ctx); + kfree(fpga_xfrm->sa_ctx); fpga_xfrm->sa_ctx = NULL; } mutex_unlock(&fpga_xfrm->lock); @@ -1466,7 +1473,7 @@ int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs))) return 0; - if (!mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) { + if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) { mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n"); return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c index 5cf5f2a9d51fec724f4fac709e29e40f4110d5f7..22a2ef11151441c3abcfc07e7a6e66e292563cae 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c @@ -148,14 +148,16 @@ static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock, return ret; } -static void mlx5_fpga_tls_release_swid(struct idr *idr, - spinlock_t *idr_spinlock, u32 swid) +static void *mlx5_fpga_tls_release_swid(struct idr *idr, + spinlock_t *idr_spinlock, u32 swid) { unsigned long flags; + void *ptr; spin_lock_irqsave(idr_spinlock, flags); - idr_remove(idr, swid); + ptr = idr_remove(idr, swid); spin_unlock_irqrestore(idr_spinlock, flags); + return ptr; } static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn, @@ -165,20 +167,12 @@ static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn, kfree(buf); } -struct mlx5_teardown_stream_context { - struct mlx5_fpga_tls_command_context cmd; - u32 swid; -}; - static void mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn, struct mlx5_fpga_device *fdev, struct mlx5_fpga_tls_command_context *cmd, struct mlx5_fpga_dma_buf *resp) { - struct mlx5_teardown_stream_context *ctx = - container_of(cmd, struct mlx5_teardown_stream_context, cmd); - if (resp) { u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome); @@ -186,14 +180,6 @@ mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn, mlx5_fpga_err(fdev, "Teardown stream failed with syndrome = %d", syndrome); - else if (MLX5_GET(tls_cmd, cmd->buf.sg[0].data, direction_sx)) - mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr, - &fdev->tls->tx_idr_spinlock, - ctx->swid); - else - mlx5_fpga_tls_release_swid(&fdev->tls->rx_idr, - &fdev->tls->rx_idr_spinlock, - ctx->swid); } mlx5_fpga_tls_put_command_ctx(cmd); } @@ -225,8 +211,14 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, rcu_read_lock(); flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle)); - rcu_read_unlock(); + if (unlikely(!flow)) { + rcu_read_unlock(); + WARN_ONCE(1, "Received NULL pointer for handle\n"); + kfree(buf); + return -EINVAL; + } mlx5_fpga_tls_flow_to_cmd(flow, cmd); + rcu_read_unlock(); MLX5_SET(tls_cmd, cmd, swid, ntohl(handle)); MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn)); @@ -238,6 +230,8 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, buf->complete = mlx_tls_kfree_complete; ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf); + if (ret < 0) + kfree(buf); return ret; } @@ -245,7 +239,7 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, void *flow, u32 swid, gfp_t flags) { - struct mlx5_teardown_stream_context *ctx; + struct mlx5_fpga_tls_command_context *ctx; struct mlx5_fpga_dma_buf *buf; void *cmd; @@ -253,7 +247,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, if (!ctx) return; - buf = &ctx->cmd.buf; + buf = &ctx->buf; cmd = (ctx + 1); MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM); MLX5_SET(tls_cmd, cmd, swid, swid); @@ -264,8 +258,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, buf->sg[0].data = cmd; buf->sg[0].size = MLX5_TLS_COMMAND_SIZE; - ctx->swid = swid; - mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd, + mlx5_fpga_tls_cmd_send(mdev->fpga, ctx, mlx5_fpga_tls_teardown_completion); } @@ -275,13 +268,14 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, struct mlx5_fpga_tls *tls = mdev->fpga->tls; void *flow; - rcu_read_lock(); if (direction_sx) - flow = idr_find(&tls->tx_idr, swid); + flow = mlx5_fpga_tls_release_swid(&tls->tx_idr, + &tls->tx_idr_spinlock, + swid); else - flow = idr_find(&tls->rx_idr, swid); - - rcu_read_unlock(); + flow = mlx5_fpga_tls_release_swid(&tls->rx_idr, + &tls->rx_idr_spinlock, + swid); if (!flow) { mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n", @@ -289,6 +283,7 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, return; } + synchronize_rcu(); /* before kfree(flow) */ mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 37d114c668b7ba70ca968f76c88c42af84967f25..a23323f717a35009ffca0df67fbab129908ae7ed 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -432,7 +432,7 @@ static void del_sw_hw_rule(struct fs_node *node) if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && --fte->dests_size) { - modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST), + modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST); update_fte = true; } out: @@ -469,6 +469,7 @@ static void del_hw_fte(struct fs_node *node) mlx5_core_warn(dev, "flow steering can't delete fte in index %d of flow group id %d\n", fte->index, fg->id); + node->active = 0; } } @@ -520,7 +521,7 @@ static void del_sw_flow_group(struct fs_node *node) rhashtable_destroy(&fg->ftes_hash); ida_destroy(&fg->fte_allocator); - if (ft->autogroup.active) + if (ft->autogroup.active && fg->max_ftes == ft->autogroup.group_size) ft->autogroup.num_groups--; err = rhltable_remove(&ft->fgs_hash, &fg->hash, @@ -1003,6 +1004,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa destroy_ft: root->cmds->destroy_flow_table(root->dev, ft); free_ft: + rhltable_destroy(&ft->fgs_hash); kfree(ft); unlock_root: mutex_unlock(&root->chain_lock); @@ -1065,6 +1067,8 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, ft->autogroup.active = true; ft->autogroup.required_groups = max_num_groups; + /* We save place for flow groups in addition to max types */ + ft->autogroup.group_size = ft->max_fte / (max_num_groups + 1); return ft; } @@ -1270,8 +1274,7 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft return ERR_PTR(-ENOENT); if (ft->autogroup.num_groups < ft->autogroup.required_groups) - /* We save place for flow groups in addition to max types */ - group_size = ft->max_fte / (ft->autogroup.required_groups + 1); + group_size = ft->autogroup.group_size; /* ft->max_fte == ft->autogroup.max_types */ if (group_size == 0) @@ -1298,7 +1301,8 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft if (IS_ERR(fg)) goto out; - ft->autogroup.num_groups++; + if (group_size == ft->autogroup.group_size) + ft->autogroup.num_groups++; out: return fg; @@ -1446,8 +1450,9 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg, } trace_mlx5_fs_set_fte(fte, false); + /* Link newly added rules into the tree. */ for (i = 0; i < handle->num_rules; i++) { - if (refcount_read(&handle->rule[i]->node.refcount) == 1) { + if (!handle->rule[i]->node.parent) { tree_add_node(&handle->rule[i]->node, &fte->node); trace_mlx5_fs_add_rule(handle->rule[i]); } @@ -1595,6 +1600,11 @@ lookup_fte_locked(struct mlx5_flow_group *g, fte_tmp = NULL; goto out; } + if (!fte_tmp->node.active) { + tree_put_node(&fte_tmp->node); + fte_tmp = NULL; + goto out; + } nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD); out: @@ -2220,7 +2230,7 @@ static struct mlx5_flow_root_namespace cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type); /* Create the root namespace */ - root_ns = kvzalloc(sizeof(*root_ns), GFP_KERNEL); + root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL); if (!root_ns) return NULL; @@ -2363,6 +2373,7 @@ static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev) cleanup_root_ns(steering->esw_egress_root_ns[i]); kfree(steering->esw_egress_root_ns); + steering->esw_egress_root_ns = NULL; } static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev) @@ -2377,6 +2388,7 @@ static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev) cleanup_root_ns(steering->esw_ingress_root_ns[i]); kfree(steering->esw_ingress_root_ns); + steering->esw_ingress_root_ns = NULL; } void mlx5_cleanup_fs(struct mlx5_core_dev *dev) @@ -2505,6 +2517,7 @@ static int init_egress_acls_root_ns(struct mlx5_core_dev *dev) for (i--; i >= 0; i--) cleanup_root_ns(steering->esw_egress_root_ns[i]); kfree(steering->esw_egress_root_ns); + steering->esw_egress_root_ns = NULL; return err; } @@ -2532,6 +2545,7 @@ static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev) for (i--; i >= 0; i--) cleanup_root_ns(steering->esw_ingress_root_ns[i]); kfree(steering->esw_ingress_root_ns); + steering->esw_ingress_root_ns = NULL; return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 32070e5d993d856d0c560e9adde78b3e86d928bf..ba62fbce23a2100ce4144e5b59ad2dba68c0325a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -121,6 +121,7 @@ struct mlx5_flow_table { struct { bool active; unsigned int required_groups; + unsigned int group_size; unsigned int num_groups; } autogroup; /* Protect fwd_rules */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c index 58af6be13dfa88a2e08d5d85dd54b3fdaeb4e4f6..808ddd732e04473b218dea25c5def432038896d0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c @@ -321,6 +321,11 @@ int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter, } EXPORT_SYMBOL(mlx5_fc_query); +u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter) +{ + return counter->cache.lastuse; +} + void mlx5_fc_query_cached(struct mlx5_fc *counter, u64 *bytes, u64 *packets, u64 *lastuse) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index e3797a44e07439a2e6ac620791a333d7f0b266b2..56d916c2f1537ef32b3fc37ff894775e7e7d755a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -45,6 +45,7 @@ static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu); static const struct net_device_ops mlx5i_netdev_ops = { .ndo_open = mlx5i_open, .ndo_stop = mlx5i_close, + .ndo_get_stats64 = mlx5i_get_stats, .ndo_init = mlx5i_dev_init, .ndo_uninit = mlx5i_dev_cleanup, .ndo_change_mtu = mlx5i_change_mtu, @@ -83,6 +84,7 @@ void mlx5i_init(struct mlx5_core_dev *mdev, priv->netdev = netdev; priv->profile = profile; priv->ppriv = ppriv; + priv->max_opened_tc = 1; mutex_init(&priv->state_lock); mlx5_query_port_max_mtu(mdev, &max_mtu, 1); @@ -114,6 +116,47 @@ static void mlx5i_cleanup(struct mlx5e_priv *priv) /* Do nothing .. */ } +void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv) +{ + struct mlx5e_sw_stats s = { 0 }; + int i, j; + + for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) { + struct mlx5e_channel_stats *channel_stats; + struct mlx5e_rq_stats *rq_stats; + + channel_stats = &priv->channel_stats[i]; + rq_stats = &channel_stats->rq; + + s.rx_packets += rq_stats->packets; + s.rx_bytes += rq_stats->bytes; + + for (j = 0; j < priv->max_opened_tc; j++) { + struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j]; + + s.tx_packets += sq_stats->packets; + s.tx_bytes += sq_stats->bytes; + s.tx_queue_dropped += sq_stats->dropped; + } + } + + memcpy(&priv->stats.sw, &s, sizeof(s)); +} + +void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct mlx5e_priv *priv = mlx5i_epriv(dev); + struct mlx5e_sw_stats *sstats = &priv->stats.sw; + + mlx5i_grp_sw_update_stats(priv); + + stats->rx_packets = sstats->rx_packets; + stats->rx_bytes = sstats->rx_bytes; + stats->tx_packets = sstats->tx_packets; + stats->tx_bytes = sstats->tx_bytes; + stats->tx_dropped = sstats->tx_queue_dropped; +} + int mlx5i_init_underlay_qp(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; @@ -502,9 +545,9 @@ static int mlx5i_close(struct net_device *netdev) netif_carrier_off(epriv->netdev); mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn); - mlx5i_uninit_underlay_qp(epriv); mlx5e_deactivate_priv_channels(epriv); mlx5e_close_channels(&epriv->channels); + mlx5i_uninit_underlay_qp(epriv); unlock: mutex_unlock(&epriv->state_lock); return 0; @@ -662,7 +705,9 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, profile->init(mdev, netdev, profile, ipriv); - mlx5e_attach_netdev(epriv); + err = mlx5e_attach_netdev(epriv); + if (err) + goto detach; netif_carrier_off(netdev); /* set rdma_netdev func pointers */ @@ -678,6 +723,11 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, return netdev; +detach: + profile->cleanup(epriv); + if (ipriv->sub_interface) + return NULL; + mlx5e_destroy_mdev_resources(mdev); destroy_ht: mlx5i_pkey_qpn_ht_cleanup(netdev); destroy_wq: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h index 0982c579ec740f0b8ecd467c2666bc678037b083..445a1f7a7de4e537ac7bb2b95808bd815bbc2a9f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h @@ -120,6 +120,7 @@ static inline void mlx5i_sq_fetch_wqe(struct mlx5e_txqsq *sq, netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_av *av, u32 dqpn, u32 dqkey); void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); +void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); #endif /* CONFIG_MLX5_CORE_IPOIB */ #endif /* __MLX5E_IPOB_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c index 54a188f41f90ca329f1f50e591a6e8fca85731fe..e3e8a5f1ac9b1daadc9970f22c5eb0dd0faea203 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c @@ -146,6 +146,7 @@ static const struct net_device_ops mlx5i_pkey_netdev_ops = { .ndo_open = mlx5i_pkey_open, .ndo_stop = mlx5i_pkey_close, .ndo_init = mlx5i_pkey_dev_init, + .ndo_get_stats64 = mlx5i_get_stats, .ndo_uninit = mlx5i_pkey_dev_cleanup, .ndo_change_mtu = mlx5i_pkey_change_mtu, .ndo_do_ioctl = mlx5i_pkey_ioctl, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index 3f767cde4c1d50cbcd50d2eb670164fc20802983..0fd62510fb277f3aff408435c35705caef34c2db 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -366,10 +366,31 @@ static int mlx5_ptp_enable(struct ptp_clock_info *ptp, return 0; } +enum { + MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0), + MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1), +}; + static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, enum ptp_pin_function func, unsigned int chan) { - return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0; + struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, + ptp_info); + + switch (func) { + case PTP_PF_NONE: + return 0; + case PTP_PF_EXTTS: + return !(clock->pps_info.pin_caps[pin] & + MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN); + case PTP_PF_PEROUT: + return !(clock->pps_info.pin_caps[pin] & + MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT); + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; } static const struct ptp_clock_info mlx5_ptp_clock_info = { @@ -454,8 +475,9 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev, switch (clock->ptp_info.pin_config[pin].func) { case PTP_PF_EXTTS: ptp_event.index = pin; - ptp_event.timestamp = timecounter_cyc2time(&clock->tc, - be64_to_cpu(eqe->data.pps.time_stamp)); + ptp_event.timestamp = + mlx5_timecounter_cyc2time(clock, + be64_to_cpu(eqe->data.pps.time_stamp)); if (clock->pps_info.enabled) { ptp_event.type = PTP_CLOCK_PPSUSR; ptp_event.pps_times.ts_real = @@ -511,14 +533,14 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev) ktime_to_ns(ktime_get_real())); /* Calculate period in seconds to call the overflow watchdog - to make - * sure counter is checked at least once every wrap around. + * sure counter is checked at least twice every wrap around. * The period is calculated as the minimum between max HW cycles count * (The clock source mask) and max amount of cycles that can be * multiplied by clock multiplier where the result doesn't exceed * 64bits. */ overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult); - overflow_cycles = min(overflow_cycles, clock->cycles.mask >> 1); + overflow_cycles = min(overflow_cycles, div_u64(clock->cycles.mask, 3)); ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles, frac, &frac); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index b5e9f664fc66758d5642b18e2396503baf351415..04c815a48e921a1ac327cf72ce13978b245d82b5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -51,6 +51,7 @@ #ifdef CONFIG_RFS_ACCEL #include #endif +#include #include #include "mlx5_core.h" #include "fs_core.h" @@ -162,26 +163,6 @@ static struct mlx5_profile profile[] = { .size = 8, .limit = 4 }, - .mr_cache[16] = { - .size = 8, - .limit = 4 - }, - .mr_cache[17] = { - .size = 8, - .limit = 4 - }, - .mr_cache[18] = { - .size = 8, - .limit = 4 - }, - .mr_cache[19] = { - .size = 4, - .limit = 2 - }, - .mr_cache[20] = { - .size = 4, - .limit = 2 - }, }, }; @@ -231,7 +212,10 @@ static void mlx5_set_driver_version(struct mlx5_core_dev *dev) strncat(string, ",", remaining_size); remaining_size = max_t(int, 0, driver_ver_sz - strlen(string)); - strncat(string, DRIVER_VERSION, remaining_size); + + snprintf(string + strlen(string), remaining_size, "%u.%u.%u", + (u8)((LINUX_VERSION_CODE >> 16) & 0xff), (u8)((LINUX_VERSION_CODE >> 8) & 0xff), + (u16)(LINUX_VERSION_CODE & 0xffff)); /*Send the command*/ MLX5_SET(set_driver_version_in, in, opcode, @@ -640,18 +624,19 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev) static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i) { struct mlx5_priv *priv = &mdev->priv; - int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i); + int vecidx = MLX5_EQ_VEC_COMP_BASE + i; + int irq = pci_irq_vector(mdev->pdev, vecidx); - if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) { + if (!zalloc_cpumask_var(&priv->irq_info[vecidx].mask, GFP_KERNEL)) { mlx5_core_warn(mdev, "zalloc_cpumask_var failed"); return -ENOMEM; } cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node), - priv->irq_info[i].mask); + priv->irq_info[vecidx].mask); if (IS_ENABLED(CONFIG_SMP) && - irq_set_affinity_hint(irq, priv->irq_info[i].mask)) + irq_set_affinity_hint(irq, priv->irq_info[vecidx].mask)) mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq); return 0; @@ -659,11 +644,12 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i) static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i) { + int vecidx = MLX5_EQ_VEC_COMP_BASE + i; struct mlx5_priv *priv = &mdev->priv; - int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i); + int irq = pci_irq_vector(mdev->pdev, vecidx); irq_set_affinity_hint(irq, NULL); - free_cpumask_var(priv->irq_info[i].mask); + free_cpumask_var(priv->irq_info[vecidx].mask); } static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev) @@ -877,11 +863,9 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv) priv->numa_node = dev_to_node(&dev->pdev->dev); - priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root); - if (!priv->dbg_root) { - dev_err(&pdev->dev, "Cannot create debugfs dir, aborting\n"); - return -ENOMEM; - } + if (mlx5_debugfs_root) + priv->dbg_root = + debugfs_create_dir(pci_name(pdev), mlx5_debugfs_root); err = mlx5_pci_enable_device(dev); if (err) { @@ -1658,8 +1642,12 @@ static const struct pci_device_id mlx5_core_pci_table[] = { { PCI_VDEVICE(MELLANOX, 0x101a), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 Ex VF */ { PCI_VDEVICE(MELLANOX, 0x101b) }, /* ConnectX-6 */ { PCI_VDEVICE(MELLANOX, 0x101c), MLX5_PCI_DEV_IS_VF}, /* ConnectX-6 VF */ + { PCI_VDEVICE(MELLANOX, 0x101d) }, /* ConnectX-6 Dx */ + { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */ + { PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */ { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */ { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */ + { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */ { 0, } }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index e36d3e3675f963c44ff76f6c69a7ac6c72155554..9c3653e06886a679eca755f00f6459b2b4068e8a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -331,6 +331,24 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, return err; } +static u32 fwp_fill_manage_pages_out(struct fw_page *fwp, u32 *out, u32 index, + u32 npages) +{ + u32 pages_set = 0; + unsigned int n; + + for_each_clear_bit(n, &fwp->bitmask, MLX5_NUM_4K_IN_PAGE) { + MLX5_ARRAY_SET64(manage_pages_out, out, pas, index + pages_set, + fwp->addr + (n * MLX5_ADAPTER_PAGE_SIZE)); + pages_set++; + + if (!--npages) + break; + } + + return pages_set; +} + static int reclaim_pages_cmd(struct mlx5_core_dev *dev, u32 *in, int in_size, u32 *out, int out_size) { @@ -354,8 +372,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev, if (fwp->func_id != func_id) continue; - MLX5_ARRAY_SET64(manage_pages_out, out, pas, i, fwp->addr); - i++; + i += fwp_fill_manage_pages_out(fwp, out, i, npages - i); } MLX5_SET(manage_pages_out, out, output_num_entries, i); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 31a9cbd85689b01fc0bfe9e6c221d73cc7c5fe13..09b6b1bfbfa8b412634d22dd371d17d2423381c7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -404,10 +404,6 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, size -= offset + size - MLX5_EEPROM_PAGE_LENGTH; i2c_addr = MLX5_I2C_ADDR_LOW; - if (offset >= MLX5_EEPROM_PAGE_LENGTH) { - i2c_addr = MLX5_I2C_ADDR_HIGH; - offset -= MLX5_EEPROM_PAGE_LENGTH; - } MLX5_SET(mcia_reg, in, l, 0); MLX5_SET(mcia_reg, in, module, module_num); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index 4ca07bfb6b14f75760928e5df47ce911f8639f10..479ac21cdbc6940701f1029ba31c699449293235 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -44,14 +44,15 @@ static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev, { struct mlx5_qp_table *table = &dev->priv.qp_table; struct mlx5_core_rsc_common *common; + unsigned long flags; - spin_lock(&table->lock); + spin_lock_irqsave(&table->lock, flags); common = radix_tree_lookup(&table->tree, rsn); if (common) atomic_inc(&common->refcount); - spin_unlock(&table->lock); + spin_unlock_irqrestore(&table->lock, flags); if (!common) { mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n", @@ -132,7 +133,7 @@ void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type) if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type)) { mlx5_core_warn(dev, "event 0x%.2x is not allowed on resource 0x%.8x\n", event_type, rsn); - return; + goto out; } switch (common->res) { @@ -150,7 +151,7 @@ void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type) default: mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn); } - +out: mlx5_core_put_rsc(common); } diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c index 2cf89126fb23ba13c7f47f59be40e23e638d11fe..d765e7a69d6b18dcff6acc30c4005b2023aadf44 100644 --- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c +++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c @@ -86,6 +86,8 @@ static int mlxfw_fsm_state_wait(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, return err; if (fsm_state_err != MLXFW_FSM_STATE_ERR_OK) { + fsm_state_err = min_t(enum mlxfw_fsm_state_err, + fsm_state_err, MLXFW_FSM_STATE_ERR_MAX); pr_err("Firmware flash failed: %s\n", mlxfw_fsm_state_err_str[fsm_state_err]); return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c index 993cb5ba934ecfc4635d056fb7259d5427943936..b99169a386ebc5e131757821f4c2e7f65e8ac9b9 100644 --- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c +++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include "mlxfw_mfa2.h" #include "mlxfw_mfa2_file.h" @@ -579,7 +580,7 @@ mlxfw_mfa2_file_component_get(const struct mlxfw_mfa2_file *mfa2_file, comp_size = be32_to_cpu(comp->size); comp_buf_size = comp_size + mlxfw_mfa2_comp_magic_len; - comp_data = kmalloc(sizeof(*comp_data) + comp_buf_size, GFP_KERNEL); + comp_data = vzalloc(sizeof(*comp_data) + comp_buf_size); if (!comp_data) return ERR_PTR(-ENOMEM); comp_data->comp.data_size = comp_size; @@ -601,7 +602,7 @@ mlxfw_mfa2_file_component_get(const struct mlxfw_mfa2_file *mfa2_file, comp_data->comp.data = comp_data->buff + mlxfw_mfa2_comp_magic_len; return &comp_data->comp; err_out: - kfree(comp_data); + vfree(comp_data); return ERR_PTR(err); } @@ -610,7 +611,7 @@ void mlxfw_mfa2_file_component_put(struct mlxfw_mfa2_component *comp) const struct mlxfw_mfa2_comp_data *comp_data; comp_data = container_of(comp, struct mlxfw_mfa2_comp_data, comp); - kfree(comp_data); + vfree(comp_data); } void mlxfw_mfa2_file_fini(struct mlxfw_mfa2_file *mfa2_file) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 937d0ace699a7eeb4e04af3bf54eebde5dd5d459..049ca4ba49deb12a46f8b9e5b9142c90105e5efc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -81,6 +81,7 @@ struct mlxsw_core { struct mlxsw_core_port *ports; unsigned int max_ports; bool reload_fail; + bool fw_flash_in_progress; unsigned long driver_priv[0]; /* driver_priv has to be always the last item */ }; @@ -428,13 +429,18 @@ struct mlxsw_reg_trans { struct rcu_head rcu; }; -#define MLXSW_EMAD_TIMEOUT_MS 200 +#define MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS 3000 +#define MLXSW_EMAD_TIMEOUT_MS 200 static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans) { unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS); - queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout); + if (trans->core->fw_flash_in_progress) + timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS); + + queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, + timeout << trans->retries); } static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, @@ -483,6 +489,9 @@ static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core, err = mlxsw_emad_transmit(trans->core, trans); if (err == 0) return; + + if (!atomic_dec_and_test(&trans->active)) + return; } else { err = -EIO; } @@ -563,7 +572,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) return 0; - emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM, 0); + emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0); if (!emad_wq) return -ENOMEM; mlxsw_core->emad_wq = emad_wq; @@ -582,7 +591,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener, mlxsw_core); if (err) - return err; + goto err_trap_register; err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core); if (err) @@ -594,6 +603,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) err_emad_trap_set: mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, mlxsw_core); +err_trap_register: destroy_workqueue(mlxsw_core->emad_wq); return err; } @@ -943,8 +953,8 @@ static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink, mlxsw_core->bus, mlxsw_core->bus_priv, true, devlink); - if (err) - mlxsw_core->reload_fail = true; + mlxsw_core->reload_fail = !!err; + return err; } @@ -1083,8 +1093,15 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, { struct devlink *devlink = priv_to_devlink(mlxsw_core); - if (mlxsw_core->reload_fail) - goto reload_fail; + if (mlxsw_core->reload_fail) { + if (!reload) + /* Only the parts that were not de-initialized in the + * failed reload attempt need to be de-initialized. + */ + goto reload_fail_deinit; + else + return; + } if (mlxsw_core->driver->fini) mlxsw_core->driver->fini(mlxsw_core); @@ -1098,9 +1115,14 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, if (!reload) devlink_resources_unregister(devlink, NULL); mlxsw_core->bus->fini(mlxsw_core->bus_priv); - if (reload) - return; -reload_fail: + if (!reload) + devlink_free(devlink); + + return; + +reload_fail_deinit: + devlink_unregister(devlink); + devlink_resources_unregister(devlink, NULL); devlink_free(devlink); } EXPORT_SYMBOL(mlxsw_core_bus_device_unregister); @@ -1368,7 +1390,7 @@ static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core, err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans, bulk_list, cb, cb_priv, tid); if (err) { - kfree(trans); + kfree_rcu(trans, rcu); return err; } return 0; @@ -1589,11 +1611,13 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, break; } } - rcu_read_unlock(); - if (!found) + if (!found) { + rcu_read_unlock(); goto drop; + } rxl->func(skb, local_port, rxl_item->priv); + rcu_read_unlock(); return; drop: @@ -1844,14 +1868,26 @@ int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core, } EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get); +void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core) +{ + mlxsw_core->fw_flash_in_progress = true; +} +EXPORT_SYMBOL(mlxsw_core_fw_flash_start); + +void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core) +{ + mlxsw_core->fw_flash_in_progress = false; +} +EXPORT_SYMBOL(mlxsw_core_fw_flash_end); + static int __init mlxsw_core_module_init(void) { int err; - mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0); + mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0); if (!mlxsw_wq) return -ENOMEM; - mlxsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM, + mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0, mlxsw_core_driver_name); if (!mlxsw_owq) { err = -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index c35be477856f18d6493c4a8c1c6d14e0ef2f2d1b..c4e4971764e54efc101130c87af817f16c7b6106 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -292,6 +292,9 @@ int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core, u64 *p_single_size, u64 *p_double_size, u64 *p_linear_size); +void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core); +void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core); + bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core, enum mlxsw_res_id res_id); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index c51b2adfc1e19fcd739b4f7a2115d3842b9423b0..2cbfa5cfefabc8522386aa3d127a948245949cf1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -316,7 +316,7 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa) block = kzalloc(sizeof(*block), GFP_KERNEL); if (!block) - return NULL; + return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&block->resource_list); block->afa = mlxsw_afa; @@ -344,7 +344,7 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa) mlxsw_afa_set_destroy(block->first_set); err_first_set_create: kfree(block); - return NULL; + return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL(mlxsw_afa_block_create); diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 5890fdfd62c377d9444d04589f0bf455d4ef6229..a903e97793f9ae67b633c245a719261a401ce4d1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -604,29 +604,31 @@ static void mlxsw_pci_cq_tasklet(unsigned long data) u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe); u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe); u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe); + char ncqe[MLXSW_PCI_CQE_SIZE_MAX]; + + memcpy(ncqe, cqe, q->elem_size); + mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); if (sendq) { struct mlxsw_pci_queue *sdq; sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn); mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq, - wqe_counter, cqe); + wqe_counter, ncqe); q->u.cq.comp_sdq_count++; } else { struct mlxsw_pci_queue *rdq; rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn); mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq, - wqe_counter, q->u.cq.v, cqe); + wqe_counter, q->u.cq.v, ncqe); q->u.cq.comp_rdq_count++; } if (++items == credits) break; } - if (items) { - mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); + if (items) mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); - } } static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q) @@ -1365,10 +1367,10 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci, u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY); if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC) - break; + return 0; cond_resched(); } while (time_before(jiffies, end)); - return 0; + return -EBUSY; } static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci) diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h index 83f452b7ccbbdee2e689fcf4a6e835a955580811..100618531021ea18e76f23d64992c2ff72b7a9b4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h @@ -27,7 +27,7 @@ #define MLXSW_PCI_SW_RESET 0xF0010 #define MLXSW_PCI_SW_RESET_RST_BIT BIT(0) -#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000 +#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 20000 #define MLXSW_PCI_SW_RESET_WAIT_MSECS 100 #define MLXSW_PCI_FW_READY 0xA1844 #define MLXSW_PCI_FW_READY_MASK 0xFFFF @@ -53,6 +53,7 @@ #define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */ #define MLXSW_PCI_CQE01_SIZE 16 /* 16 bytes per element */ #define MLXSW_PCI_CQE2_SIZE 32 /* 32 bytes per element */ +#define MLXSW_PCI_CQE_SIZE_MAX MLXSW_PCI_CQE2_SIZE #define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */ #define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE) #define MLXSW_PCI_CQE01_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE01_SIZE) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 6e8b619b769b4ef39fc3ddbd44e067d3f09189b1..c9895876a23177e13cd7dbf22be02b032eb9cef4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -877,7 +877,7 @@ static inline void mlxsw_reg_spaft_pack(char *payload, u8 local_port, MLXSW_REG_ZERO(spaft, payload); mlxsw_reg_spaft_local_port_set(payload, local_port); mlxsw_reg_spaft_allow_untagged_set(payload, allow_untagged); - mlxsw_reg_spaft_allow_prio_tagged_set(payload, true); + mlxsw_reg_spaft_allow_prio_tagged_set(payload, allow_untagged); mlxsw_reg_spaft_allow_tagged_set(payload, true); } @@ -3215,7 +3215,7 @@ static inline void mlxsw_reg_qtct_pack(char *payload, u8 local_port, * Configures the ETS elements. */ #define MLXSW_REG_QEEC_ID 0x400D -#define MLXSW_REG_QEEC_LEN 0x1C +#define MLXSW_REG_QEEC_LEN 0x20 MLXSW_REG_DEFINE(qeec, MLXSW_REG_QEEC_ID, MLXSW_REG_QEEC_LEN); @@ -3257,6 +3257,15 @@ MLXSW_ITEM32(reg, qeec, element_index, 0x04, 0, 8); */ MLXSW_ITEM32(reg, qeec, next_element_index, 0x08, 0, 8); +/* reg_qeec_mise + * Min shaper configuration enable. Enables configuration of the min + * shaper on this ETS element + * 0 - Disable + * 1 - Enable + * Access: RW + */ +MLXSW_ITEM32(reg, qeec, mise, 0x0C, 31, 1); + enum { MLXSW_REG_QEEC_BYTES_MODE, MLXSW_REG_QEEC_PACKETS_MODE, @@ -3273,6 +3282,17 @@ enum { */ MLXSW_ITEM32(reg, qeec, pb, 0x0C, 28, 1); +/* The smallest permitted min shaper rate. */ +#define MLXSW_REG_QEEC_MIS_MIN 200000 /* Kbps */ + +/* reg_qeec_min_shaper_rate + * Min shaper information rate. + * For CPU port, can only be configured for port hierarchy. + * When in bytes mode, value is specified in units of 1000bps. + * Access: RW + */ +MLXSW_ITEM32(reg, qeec, min_shaper_rate, 0x0C, 0, 28); + /* reg_qeec_mase * Max shaper configuration enable. Enables configuration of the max * shaper on this ETS element. diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 30bb2c533cecc42c5c75b3f21b36d2150baac083..1019c9efedea44bdf13135a183acfbad4585d6e5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -308,8 +308,13 @@ static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, }, .mlxsw_sp = mlxsw_sp }; + int err; + + mlxsw_core_fw_flash_start(mlxsw_sp->core); + err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware); + mlxsw_core_fw_flash_end(mlxsw_sp->core); - return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware); + return err; } static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) @@ -331,7 +336,10 @@ static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) return -EINVAL; } if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) == - MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor)) + MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor) && + (rev->minor > req_rev->minor || + (rev->minor == req_rev->minor && + rev->subminor >= req_rev->subminor))) return 0; dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n", @@ -836,8 +844,9 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { bool configure = false; bool pfc = false; + u16 thres_cells; + u16 delay_cells; bool lossy; - u16 thres; for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { if (prio_tc[j] == i) { @@ -851,10 +860,11 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, continue; lossy = !(pfc || pause_en); - thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); - delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc, - pause_en); - mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy); + thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); + delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, + pfc, pause_en); + mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres_cells + delay_cells, + thres_cells, lossy); } return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); @@ -1051,6 +1061,9 @@ static void update_stats_cache(struct work_struct *work) periodic_hw_stats.update_dw.work); if (!netif_carrier_ok(mlxsw_sp_port->dev)) + /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as + * necessary when port goes down. + */ goto out; mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, @@ -1981,7 +1994,7 @@ static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) int i; for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { - snprintf(*p, ETH_GSTRING_LEN, "%s_%d", + snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", mlxsw_sp_port_hw_prio_stats[i].str, prio); *p += ETH_GSTRING_LEN; } @@ -1992,7 +2005,7 @@ static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) int i; for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { - snprintf(*p, ETH_GSTRING_LEN, "%s_%d", + snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", mlxsw_sp_port_hw_tc_stats[i].str, tc); *p += ETH_GSTRING_LEN; } @@ -2481,6 +2494,10 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev, mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL); autoneg = cmd->base.autoneg == AUTONEG_ENABLE; + if (!autoneg && cmd->base.speed == SPEED_56000) { + netdev_err(dev, "56G not supported with autoneg off\n"); + return -EINVAL; + } eth_proto_new = autoneg ? mlxsw_sp_to_ptys_advert_link(cmd) : mlxsw_sp_to_ptys_speed(cmd->base.speed); @@ -2497,11 +2514,11 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev, if (err) return err; + mlxsw_sp_port->link.autoneg = autoneg; + if (!netif_running(dev)) return 0; - mlxsw_sp_port->link.autoneg = autoneg; - mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); @@ -2736,6 +2753,21 @@ int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); } +static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, + enum mlxsw_reg_qeec_hr hr, u8 index, + u8 next_index, u32 minrate) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char qeec_pl[MLXSW_REG_QEEC_LEN]; + + mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, + next_index); + mlxsw_reg_qeec_mise_set(qeec_pl, true); + mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); +} + int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 switch_prio, u8 tclass) { @@ -2776,7 +2808,7 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) err = mlxsw_sp_port_ets_set(mlxsw_sp_port, MLXSW_REG_QEEC_HIERARCY_TC, i + 8, i, - false, 0); + true, 100); if (err) return err; } @@ -2804,6 +2836,23 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) MLXSW_REG_QEEC_MAS_DIS); if (err) return err; + + err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, + MLXSW_REG_QEEC_HIERARCY_TC, + i + 8, i, + MLXSW_REG_QEEC_MAS_DIS); + if (err) + return err; + } + + /* Configure the min shaper for multicast TCs. */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, + MLXSW_REG_QEEC_HIERARCY_TC, + i + 8, i, + MLXSW_REG_QEEC_MIS_MIN); + if (err) + return err; } /* Map all priorities to traffic class 0. */ @@ -3077,6 +3126,7 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) mlxsw_sp_port_remove(mlxsw_sp, i); kfree(mlxsw_sp->port_to_module); kfree(mlxsw_sp->ports); + mlxsw_sp->ports = NULL; } static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) @@ -3125,6 +3175,7 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) kfree(mlxsw_sp->port_to_module); err_port_to_module_alloc: kfree(mlxsw_sp->ports); + mlxsw_sp->ports = NULL; return err; } @@ -3179,6 +3230,14 @@ static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, } } +static struct mlxsw_sp_port * +mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port) +{ + if (mlxsw_sp->ports && mlxsw_sp->ports[local_port]) + return mlxsw_sp->ports[local_port]; + return NULL; +} + static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, unsigned int count, struct netlink_ext_ack *extack) @@ -3189,7 +3248,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, int i; int err; - mlxsw_sp_port = mlxsw_sp->ports[local_port]; + mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); if (!mlxsw_sp_port) { dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", local_port); @@ -3256,7 +3315,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, unsigned int count; int i; - mlxsw_sp_port = mlxsw_sp->ports[local_port]; + mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); if (!mlxsw_sp_port) { dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", local_port); @@ -3288,6 +3347,15 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, return 0; } +static void +mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port) +{ + int i; + + for (i = 0; i < TC_MAX_QUEUE; i++) + mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0; +} + static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, char *pude_pl, void *priv) { @@ -3308,6 +3376,7 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, } else { netdev_info(mlxsw_sp_port->dev, "link down\n"); netif_carrier_off(mlxsw_sp_port->dev); + mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port); } } @@ -3519,7 +3588,6 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) burst_size = 7; break; case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: - is_bytes = true; rate = 4 * 1024; burst_size = 4; break; @@ -4231,6 +4299,25 @@ void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) dev_put(mlxsw_sp_port->dev); } +static void +mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *lag_dev) +{ + struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); + struct net_device *upper_dev; + struct list_head *iter; + + if (netif_is_bridge_port(lag_dev)) + mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); + + netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { + if (!netif_is_bridge_port(upper_dev)) + continue; + br_dev = netdev_master_upper_dev_get(upper_dev); + mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); + } +} + static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) { char sldr_pl[MLXSW_REG_SLDR_LEN]; @@ -4381,9 +4468,6 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); if (err) goto err_col_port_add; - err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id); - if (err) - goto err_col_port_enable; mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, mlxsw_sp_port->local_port); @@ -4398,8 +4482,6 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, return 0; -err_col_port_enable: - mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); err_col_port_add: if (!lag->ref_count) mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); @@ -4418,11 +4500,14 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); WARN_ON(lag->ref_count == 0); - mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); /* Any VLANs configured on the port are no longer valid */ mlxsw_sp_port_vlan_flush(mlxsw_sp_port); + /* Make the LAG and its directly linked uppers leave bridges they + * are memeber in + */ + mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); if (lag->ref_count == 1) mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); @@ -4459,21 +4544,56 @@ static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); } -static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port, - bool lag_tx_enabled) +static int +mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) { - if (lag_tx_enabled) - return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, - mlxsw_sp_port->lag_id); - else - return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, - mlxsw_sp_port->lag_id); + int err; + + err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, + mlxsw_sp_port->lag_id); + if (err) + return err; + + err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); + if (err) + goto err_dist_port_add; + + return 0; + +err_dist_port_add: + mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); + return err; +} + +static int +mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) +{ + int err; + + err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, + mlxsw_sp_port->lag_id); + if (err) + return err; + + err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, + mlxsw_sp_port->lag_id); + if (err) + goto err_col_port_disable; + + return 0; + +err_col_port_disable: + mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); + return err; } static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, struct netdev_lag_lower_state_info *info) { - return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled); + if (info->tx_enabled) + return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); + else + return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); } static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, @@ -4631,12 +4751,14 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, lower_dev, upper_dev); } else if (netif_is_lag_master(upper_dev)) { - if (info->linking) + if (info->linking) { err = mlxsw_sp_port_lag_join(mlxsw_sp_port, upper_dev); - else + } else { + mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); mlxsw_sp_port_lag_leave(mlxsw_sp_port, upper_dev); + } } else if (netif_is_ovs_master(upper_dev)) { if (info->linking) err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); @@ -4645,6 +4767,16 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, } else if (netif_is_macvlan(upper_dev)) { if (!info->linking) mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); + } else if (is_vlan_dev(upper_dev)) { + struct net_device *br_dev; + + if (!netif_is_bridge_port(upper_dev)) + break; + if (info->linking) + break; + br_dev = netdev_master_upper_dev_get(upper_dev); + mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, + br_dev); } break; } @@ -4986,7 +5118,7 @@ static int __init mlxsw_sp_module_init(void) return 0; err_sp2_pci_driver_register: - mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); + mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); err_sp1_pci_driver_register: mlxsw_core_driver_unregister(&mlxsw_sp2_driver); err_sp2_core_driver_register: diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 3cdb7aca90b72492f02fd4bce2c35ad6de76eaa9..7d2978223c16fa66892de2555e24138118058a70 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -491,6 +491,7 @@ enum mlxsw_sp_acl_profile { }; struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl); +struct mlxsw_sp_acl_tcam *mlxsw_sp_acl_to_tcam(struct mlxsw_sp_acl *acl); struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block); unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block); void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block *block); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c index 8ca77f3e8f279f1ada24240042c793c460ffabe0..ffd4b055fead73d5d7c4dc5519ae739074631173 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c @@ -88,8 +88,8 @@ static int mlxsw_sp2_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv, * to be written using PEFA register to all indexes for all regions. */ afa_block = mlxsw_afa_block_create(mlxsw_sp->afa); - if (!afa_block) { - err = -ENOMEM; + if (IS_ERR(afa_block)) { + err = PTR_ERR(afa_block); goto err_afa_block; } err = mlxsw_afa_block_continue(afa_block); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index c4f9238591e6ef1c338a7434dfaa4f1701ea4106..22ba0600ccb5431c38f234e6d64914f84ed9ce6b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -38,6 +38,11 @@ struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl) return acl->afk; } +struct mlxsw_sp_acl_tcam *mlxsw_sp_acl_to_tcam(struct mlxsw_sp_acl *acl) +{ + return &acl->tcam; +} + struct mlxsw_sp_acl_block_binding { struct list_head list; struct net_device *dev; @@ -442,7 +447,8 @@ mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl) rulei = kzalloc(sizeof(*rulei), GFP_KERNEL); if (!rulei) - return NULL; + return ERR_PTR(-ENOMEM); + rulei->act_block = mlxsw_afa_block_create(acl->mlxsw_sp->afa); if (IS_ERR(rulei->act_block)) { err = PTR_ERR(rulei->act_block); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c index e3c6fe8b1d4065e0dd1ce60c8a817138e9e54f74..1dcf152b281384b4a5df06ca7407264f4e9275fc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c @@ -75,7 +75,15 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, act_set = mlxsw_afa_block_first_set(rulei->act_block); mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); + if (err) + goto err_ptce2_write; + + return 0; + +err_ptce2_write: + cregion->ops->entry_remove(cregion, centry); + return err; } static void diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index e171513bb32a6c5d2b93582d548c78d0b3f710ea..ab03fd136ceffed73c7a387ef8dd489b8b729647 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -57,6 +57,8 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, tcam->max_groups = max_groups; tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUP_SIZE); + tcam->max_group_size = min_t(unsigned int, tcam->max_group_size, + MLXSW_REG_PAGT_ACL_MAX_NUM); err = ops->init(mlxsw_sp, tcam->priv, tcam); if (err) @@ -95,8 +97,9 @@ int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp, if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE)) return -EIO; - max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE); - if (rulei->priority > max_priority) + /* Priority range is 1..cap_kvd_size-1. */ + max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE) - 1; + if (rulei->priority >= max_priority) return -EINVAL; /* Unlike in TC, in HW, higher number means higher priority. */ @@ -570,12 +573,13 @@ static void mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_region *region) { + struct mlxsw_sp_acl_tcam *tcam = mlxsw_sp_acl_to_tcam(mlxsw_sp->acl); const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; ops->region_fini(mlxsw_sp, region->priv); mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region); mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region); - mlxsw_sp_acl_tcam_region_id_put(region->group->tcam, region->id); + mlxsw_sp_acl_tcam_region_id_put(tcam, region->id); mlxsw_afk_key_info_put(region->key_info); kfree(region); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c index b25048c6c7618e4b2e9fa210bcdfe7516bcb41d2..21296fa7f7fbf52060d51740e9c2247085c4e8d0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c @@ -408,14 +408,6 @@ static int mlxsw_sp_port_dcb_app_update(struct mlxsw_sp_port *mlxsw_sp_port) have_dscp = mlxsw_sp_port_dcb_app_prio_dscp_map(mlxsw_sp_port, &prio_map); - if (!have_dscp) { - err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port, - MLXSW_REG_QPTS_TRUST_STATE_PCP); - if (err) - netdev_err(mlxsw_sp_port->dev, "Couldn't switch to trust L2\n"); - return err; - } - mlxsw_sp_port_dcb_app_dscp_prio_map(mlxsw_sp_port, default_prio, &dscp_map); err = mlxsw_sp_port_dcb_app_update_qpdpm(mlxsw_sp_port, @@ -432,6 +424,14 @@ static int mlxsw_sp_port_dcb_app_update(struct mlxsw_sp_port *mlxsw_sp_port) return err; } + if (!have_dscp) { + err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port, + MLXSW_REG_QPTS_TRUST_STATE_PCP); + if (err) + netdev_err(mlxsw_sp_port->dev, "Couldn't switch to trust L2\n"); + return err; + } + err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port, MLXSW_REG_QPTS_TRUST_STATE_DSCP); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c index 41e607a14846db0a791f721c15c2171caf736881..4fe193c4fa55da6e0f1d9c55a1f4aa1e966eaf68 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c @@ -215,7 +215,7 @@ mlxsw_sp_dpipe_table_erif_entries_dump(void *priv, bool counters_enabled, start_again: err = devlink_dpipe_entry_ctx_prepare(dump_ctx); if (err) - return err; + goto err_ctx_prepare; j = 0; for (; i < rif_count; i++) { struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i); @@ -247,6 +247,7 @@ mlxsw_sp_dpipe_table_erif_entries_dump(void *priv, bool counters_enabled, return 0; err_entry_append: err_entry_get: +err_ctx_prepare: rtnl_unlock(); devlink_dpipe_entry_clear(&entry); return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c index 715d24ff937e90bbe96a15bde6afa18184ea6897..562c4429eec71f6a8c7571270f56ce149b3a4cee 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c @@ -696,8 +696,8 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_dummy_ops = { static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = { .type = MLXSW_SP_FID_TYPE_DUMMY, .fid_size = sizeof(struct mlxsw_sp_fid), - .start_index = MLXSW_SP_RFID_BASE - 1, - .end_index = MLXSW_SP_RFID_BASE - 1, + .start_index = VLAN_N_VID - 1, + .end_index = VLAN_N_VID - 1, .ops = &mlxsw_sp_fid_dummy_ops, }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 8d211972c5e90fbe1a24dc32edf6f371ff018bd4..9f4eb3cde93e293df83f8e356d5a1c02c76dd946 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -98,9 +98,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, u8 prio = tcf_vlan_push_prio(a); u16 vid = tcf_vlan_push_vid(a); - return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei, - action, vid, - proto, prio, extack); + err = mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei, + action, vid, + proto, prio, extack); + if (err) + return err; } else { NL_SET_ERR_MSG_MOD(extack, "Unsupported action"); dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n"); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c index 54275624718baca0111d2dfd3bf168fd063e06cf..336e5ecc68f8659c9121948574a59988241a5edf 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c @@ -637,12 +637,12 @@ static int mlxsw_sp_mr_vif_resolve(struct mlxsw_sp_mr_table *mr_table, return 0; err_erif_unresolve: - list_for_each_entry_from_reverse(erve, &mr_vif->route_evif_list, - vif_node) + list_for_each_entry_continue_reverse(erve, &mr_vif->route_evif_list, + vif_node) mlxsw_sp_mr_route_evif_unresolve(mr_table, erve); err_irif_unresolve: - list_for_each_entry_from_reverse(irve, &mr_vif->route_ivif_list, - vif_node) + list_for_each_entry_continue_reverse(irve, &mr_vif->route_ivif_list, + vif_node) mlxsw_sp_mr_route_ivif_unresolve(mr_table, irve); mr_vif->rif = NULL; return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c index 346f4a5fe053bc186008ebbf789f5d8f0919e1fa..221aa6a474eb104ce5be1591e91d134dc4aeb68b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c @@ -199,8 +199,8 @@ mlxsw_sp_mr_tcam_afa_block_create(struct mlxsw_sp *mlxsw_sp, int err; afa_block = mlxsw_afa_block_create(mlxsw_sp->afa); - if (!afa_block) - return ERR_PTR(-ENOMEM); + if (IS_ERR(afa_block)) + return afa_block; err = mlxsw_afa_block_append_allocated_counter(afa_block, counter_index); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c index bdf53cf350f60874a70153eb27e75e7bcf8ad768..dc63583c49483375ff8d62d69a2733cd3032c946 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c @@ -195,6 +195,20 @@ mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port, return -EOPNOTSUPP; } +static u64 +mlxsw_sp_xstats_backlog(struct mlxsw_sp_port_xstats *xstats, int tclass_num) +{ + return xstats->backlog[tclass_num] + + xstats->backlog[tclass_num + 8]; +} + +static u64 +mlxsw_sp_xstats_tail_drop(struct mlxsw_sp_port_xstats *xstats, int tclass_num) +{ + return xstats->tail_drop[tclass_num] + + xstats->tail_drop[tclass_num + 8]; +} + static void mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats, u8 prio_bitmap, u64 *tx_packets, @@ -269,7 +283,7 @@ mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port, &stats_base->tx_bytes); red_base->prob_mark = xstats->ecn; red_base->prob_drop = xstats->wred_drop[tclass_num]; - red_base->pdrop = xstats->tail_drop[tclass_num]; + red_base->pdrop = mlxsw_sp_xstats_tail_drop(xstats, tclass_num); stats_base->overlimits = red_base->prob_drop + red_base->prob_mark; stats_base->drops = red_base->prob_drop + red_base->pdrop; @@ -369,7 +383,8 @@ mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port, early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop; marks = xstats->ecn - xstats_base->prob_mark; - pdrops = xstats->tail_drop[tclass_num] - xstats_base->pdrop; + pdrops = mlxsw_sp_xstats_tail_drop(xstats, tclass_num) - + xstats_base->pdrop; res->pdrop += pdrops; res->prob_drop += early_drops; @@ -402,9 +417,10 @@ mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port, overlimits = xstats->wred_drop[tclass_num] + xstats->ecn - stats_base->overlimits; - drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] - + drops = xstats->wred_drop[tclass_num] + + mlxsw_sp_xstats_tail_drop(xstats, tclass_num) - stats_base->drops; - backlog = xstats->backlog[tclass_num]; + backlog = mlxsw_sp_xstats_backlog(xstats, tclass_num); _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets); stats_ptr->qstats->overlimits += overlimits; @@ -575,9 +591,9 @@ mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port, tx_packets = stats->tx_packets - stats_base->tx_packets; for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { - drops += xstats->tail_drop[i]; + drops += mlxsw_sp_xstats_tail_drop(xstats, i); drops += xstats->wred_drop[i]; - backlog += xstats->backlog[i]; + backlog += mlxsw_sp_xstats_backlog(xstats, i); } drops = drops - stats_base->drops; @@ -613,7 +629,7 @@ mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port, stats_base->drops = 0; for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { - stats_base->drops += xstats->tail_drop[i]; + stats_base->drops += mlxsw_sp_xstats_tail_drop(xstats, i); stats_base->drops += xstats->wred_drop[i]; } @@ -650,6 +666,13 @@ mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == p->child_handle) return 0; + if (!p->child_handle) { + /* This is an invisible FIFO replacing the original Qdisc. + * Ignore it--the original Qdisc's destroy will follow. + */ + return 0; + } + /* See if the grafted qdisc is already offloaded on any tclass. If so, * unoffload it. */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 2ab9cf25a08ae19788d28ffddaa8698ba2213152..091d812ececa7193940b8f683fb0fc7c91ae2d46 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -970,7 +970,7 @@ u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev) if (d) return l3mdev_fib_table(d) ? : RT_TABLE_MAIN; else - return l3mdev_fib_table(ol_dev) ? : RT_TABLE_MAIN; + return RT_TABLE_MAIN; } static struct mlxsw_sp_rif * @@ -1215,15 +1215,12 @@ mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp, { u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN; enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt; - struct net_device *ipip_ul_dev; if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto) return false; - ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev); return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip, - ul_tb_id, ipip_entry) && - (!ipip_ul_dev || ipip_ul_dev == ul_dev); + ul_tb_id, ipip_entry); } /* Given decap parameters, find the corresponding IPIP entry. */ @@ -1532,27 +1529,10 @@ static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_ipip_entry *ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); - enum mlxsw_sp_l3proto ul_proto; - union mlxsw_sp_l3addr saddr; - u32 ul_tb_id; if (!ipip_entry) return 0; - /* For flat configuration cases, moving overlay to a different VRF might - * cause local address conflict, and the conflicting tunnels need to be - * demoted. - */ - ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev); - ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto; - saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev); - if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto, - saddr, ul_tb_id, - ipip_entry)) { - mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry); - return 0; - } - return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry, true, false, false, extack); } @@ -2248,7 +2228,7 @@ static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work) static void mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_neigh_entry *neigh_entry, - bool removing); + bool removing, bool dead); static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding) { @@ -2379,7 +2359,8 @@ static void mlxsw_sp_router_neigh_event_work(struct work_struct *work) memcpy(neigh_entry->ha, ha, ETH_ALEN); mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected); - mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected); + mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected, + dead); if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list)) mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry); @@ -3343,13 +3324,79 @@ static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh, nh->update = 1; } +static int +mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_neigh_entry *neigh_entry) +{ + struct neighbour *n, *old_n = neigh_entry->key.n; + struct mlxsw_sp_nexthop *nh; + bool entry_connected; + u8 nud_state, dead; + int err; + + nh = list_first_entry(&neigh_entry->nexthop_list, + struct mlxsw_sp_nexthop, neigh_list_node); + + n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev); + if (!n) { + n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr, + nh->rif->dev); + if (IS_ERR(n)) + return PTR_ERR(n); + neigh_event_send(n, NULL); + } + + mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry); + neigh_entry->key.n = n; + err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry); + if (err) + goto err_neigh_entry_insert; + + read_lock_bh(&n->lock); + nud_state = n->nud_state; + dead = n->dead; + read_unlock_bh(&n->lock); + entry_connected = nud_state & NUD_VALID && !dead; + + list_for_each_entry(nh, &neigh_entry->nexthop_list, + neigh_list_node) { + neigh_release(old_n); + neigh_clone(n); + __mlxsw_sp_nexthop_neigh_update(nh, !entry_connected); + mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); + } + + neigh_release(n); + + return 0; + +err_neigh_entry_insert: + neigh_entry->key.n = old_n; + mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry); + neigh_release(n); + return err; +} + static void mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_neigh_entry *neigh_entry, - bool removing) + bool removing, bool dead) { struct mlxsw_sp_nexthop *nh; + if (list_empty(&neigh_entry->nexthop_list)) + return; + + if (dead) { + int err; + + err = mlxsw_sp_nexthop_dead_neigh_replace(mlxsw_sp, + neigh_entry); + if (err) + dev_err(mlxsw_sp->bus_info->dev, "Failed to replace dead neigh\n"); + return; + } + list_for_each_entry(nh, &neigh_entry->nexthop_list, neigh_list_node) { __mlxsw_sp_nexthop_neigh_update(nh, removing); @@ -5935,7 +5982,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, } fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC); - if (WARN_ON(!fib_work)) + if (!fib_work) return NOTIFY_BAD; fib_work->mlxsw_sp = router->mlxsw_sp; @@ -7382,14 +7429,15 @@ static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp) static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) { - bool usp = init_net.ipv4.sysctl_ip_fwd_update_priority; char rgcr_pl[MLXSW_REG_RGCR_LEN]; u64 max_rifs; + bool usp; int err; if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS)) return -EIO; max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); + usp = READ_ONCE(init_net.ipv4.sysctl_ip_fwd_update_priority); mlxsw_reg_rgcr_pack(rgcr_pl, true, true); mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index db715da7bab7746c58ed23b48048d8a4917ca10d..8d556eb37b7aa42a36c1f57579193bdef9d39da9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -282,24 +282,6 @@ mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port) kfree(bridge_port); } -static bool -mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port * - bridge_port) -{ - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_port->dev); - - /* In case ports were pulled from out of a bridged LAG, then - * it's possible the reference count isn't zero, yet the bridge - * port should be destroyed, as it's no longer an upper of ours. - */ - if (!mlxsw_sp && list_empty(&bridge_port->vlans_list)) - return true; - else if (bridge_port->ref_count == 0) - return true; - else - return false; -} - static struct mlxsw_sp_bridge_port * mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge, struct net_device *brport_dev) @@ -337,8 +319,7 @@ static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge, { struct mlxsw_sp_bridge_device *bridge_device; - bridge_port->ref_count--; - if (!mlxsw_sp_bridge_port_should_destroy(bridge_port)) + if (--bridge_port->ref_count != 0) return; bridge_device = bridge_port->bridge_device; mlxsw_sp_bridge_port_destroy(bridge_port); @@ -1228,7 +1209,7 @@ mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp, static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic) { return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS : - MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY; + MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG; } static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding) @@ -1240,7 +1221,7 @@ static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding) static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, const char *mac, u16 fid, bool adding, enum mlxsw_reg_sfd_rec_action action, - bool dynamic) + enum mlxsw_reg_sfd_rec_policy policy) { char *sfd_pl; u8 num_rec; @@ -1251,8 +1232,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, return -ENOMEM; mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); - mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), - mac, fid, action, local_port); + mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port); num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); if (err) @@ -1271,7 +1251,8 @@ static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, bool dynamic) { return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding, - MLXSW_REG_SFD_REC_ACTION_NOP, dynamic); + MLXSW_REG_SFD_REC_ACTION_NOP, + mlxsw_sp_sfd_rec_policy(dynamic)); } int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid, @@ -1279,7 +1260,7 @@ int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid, { return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding, MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER, - false); + MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY); } static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, @@ -1604,7 +1585,7 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid_index; int err = 0; - if (switchdev_trans_ph_prepare(trans)) + if (switchdev_trans_ph_commit(trans)) return 0; bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); @@ -1755,7 +1736,7 @@ static void mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_bridge_port *bridge_port, u16 vid) { - u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid; + u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid; struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); @@ -2020,7 +2001,7 @@ mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device, vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1; mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); - if (WARN_ON(!mlxsw_sp_port_vlan)) + if (!mlxsw_sp_port_vlan) return; mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); @@ -2317,8 +2298,6 @@ static void mlxsw_sp_switchdev_event_work(struct work_struct *work) break; case SWITCHDEV_FDB_DEL_TO_DEVICE: fdb_info = &switchdev_work->fdb_info; - if (!fdb_info->added_by_user) - break; mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false); break; case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */ @@ -2345,8 +2324,15 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused, struct net_device *dev = switchdev_notifier_info_to_dev(ptr); struct mlxsw_sp_switchdev_event_work *switchdev_work; struct switchdev_notifier_fdb_info *fdb_info = ptr; + struct net_device *br_dev; - if (!mlxsw_sp_port_dev_lower_find_rcu(dev)) + /* Tunnel devices are not our uppers, so check their master instead */ + br_dev = netdev_master_upper_dev_get_rcu(dev); + if (!br_dev) + return NOTIFY_DONE; + if (!netif_is_bridge_master(br_dev)) + return NOTIFY_DONE; + if (!mlxsw_sp_port_dev_lower_find_rcu(br_dev)) return NOTIFY_DONE; switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index 2d4f213e154d8ca72deb092efd36c638e434c3b7..b22c190e001d912ce7b0d433a2ce8f62126bdbd4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -1289,6 +1289,7 @@ static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx) if (mlxsw_sx_port_created(mlxsw_sx, i)) mlxsw_sx_port_remove(mlxsw_sx, i); kfree(mlxsw_sx->ports); + mlxsw_sx->ports = NULL; } static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx) @@ -1323,6 +1324,7 @@ static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx) if (mlxsw_sx_port_created(mlxsw_sx, i)) mlxsw_sx_port_remove(mlxsw_sx, i); kfree(mlxsw_sx->ports); + mlxsw_sx->ports = NULL; return err; } @@ -1406,6 +1408,12 @@ static int mlxsw_sx_port_type_set(struct mlxsw_core *mlxsw_core, u8 local_port, u8 module, width; int err; + if (!mlxsw_sx->ports || !mlxsw_sx->ports[local_port]) { + dev_err(mlxsw_sx->bus_info->dev, "Port number \"%d\" does not exist\n", + local_port); + return -EINVAL; + } + if (new_type == DEVLINK_PORT_TYPE_AUTO) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c index bd51e057e915063e401c957c6f606b28fee2084a..b881f5d4a7f9efd7769c99eea86105453635f4fc 100644 --- a/drivers/net/ethernet/micrel/ks8695net.c +++ b/drivers/net/ethernet/micrel/ks8695net.c @@ -1164,7 +1164,7 @@ ks8695_timeout(struct net_device *ndev) * sk_buff and adds it to the TX ring. It then kicks the TX DMA * engine to ensure transmission begins. */ -static int +static netdev_tx_t ks8695_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct ks8695_priv *ksp = netdev_priv(ndev); diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c index bd6e9014bc74794b9a8a7e680f5b59ea7048382f..b83b070a9eec842e7af24933224a54cbf234f0da 100644 --- a/drivers/net/ethernet/micrel/ks8851.c +++ b/drivers/net/ethernet/micrel/ks8851.c @@ -535,9 +535,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks) /* set dma read address */ ks8851_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI | 0x00); - /* start the packet dma process, and set auto-dequeue rx */ - ks8851_wrreg16(ks, KS_RXQCR, - ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE); + /* start DMA access */ + ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA); if (rxlen > 4) { unsigned int rxalign; @@ -568,7 +567,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks) } } - ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); + /* end DMA access and dequeue packet */ + ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_RRXEF); } } @@ -785,6 +785,15 @@ static void ks8851_tx_work(struct work_struct *work) static int ks8851_net_open(struct net_device *dev) { struct ks8851_net *ks = netdev_priv(dev); + int ret; + + ret = request_threaded_irq(dev->irq, NULL, ks8851_irq, + IRQF_TRIGGER_LOW | IRQF_ONESHOT, + dev->name, ks); + if (ret < 0) { + netdev_err(dev, "failed to get irq\n"); + return ret; + } /* lock the card, even if we may not actually be doing anything * else at the moment */ @@ -849,6 +858,7 @@ static int ks8851_net_open(struct net_device *dev) netif_dbg(ks, ifup, ks->netdev, "network device up\n"); mutex_unlock(&ks->lock); + mii_check_link(&ks->mii); return 0; } @@ -899,6 +909,8 @@ static int ks8851_net_stop(struct net_device *dev) dev_kfree_skb(txb); } + free_irq(dev->irq, ks); + return 0; } @@ -1508,6 +1520,7 @@ static int ks8851_probe(struct spi_device *spi) spi_set_drvdata(spi, ks); + netif_carrier_off(ks->netdev); ndev->if_port = IF_PORT_100BASET; ndev->netdev_ops = &ks8851_netdev_ops; ndev->irq = spi->irq; @@ -1529,14 +1542,6 @@ static int ks8851_probe(struct spi_device *spi) ks8851_read_selftest(ks); ks8851_init_mac(ks); - ret = request_threaded_irq(spi->irq, NULL, ks8851_irq, - IRQF_TRIGGER_LOW | IRQF_ONESHOT, - ndev->name, ks); - if (ret < 0) { - dev_err(&spi->dev, "failed to get irq\n"); - goto err_irq; - } - ret = register_netdev(ndev); if (ret) { dev_err(&spi->dev, "failed to register network device\n"); @@ -1549,14 +1554,10 @@ static int ks8851_probe(struct spi_device *spi) return 0; - err_netdev: - free_irq(ndev->irq, ks); - -err_irq: +err_id: if (gpio_is_valid(gpio)) gpio_set_value(gpio, 0); -err_id: regulator_disable(ks->vdd_reg); err_reg: regulator_disable(ks->vdd_io); @@ -1574,7 +1575,6 @@ static int ks8851_remove(struct spi_device *spi) dev_info(&spi->dev, "remove\n"); unregister_netdev(priv->netdev); - free_irq(spi->irq, priv); if (gpio_is_valid(priv->gpio)) gpio_set_value(priv->gpio, 0); regulator_disable(priv->vdd_reg); diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c index 0e9719fbc624382ff9ee63be06698e4515c8a922..35f8c9ef204d91cd4c17591d84ebab597cff33b4 100644 --- a/drivers/net/ethernet/micrel/ks8851_mll.c +++ b/drivers/net/ethernet/micrel/ks8851_mll.c @@ -1021,9 +1021,9 @@ static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len) * spin_lock_irqsave is required because tx and rx should be mutual exclusive. * So while tx is in-progress, prevent IRQ interrupt from happenning. */ -static int ks_start_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev) { - int retv = NETDEV_TX_OK; + netdev_tx_t retv = NETDEV_TX_OK; struct ks_net *ks = netdev_priv(netdev); disable_irq(netdev->irq); diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c index 44bb04d4d21b58cd878e7bfb4c24a690e959157d..46181559d1f1b2acf386e6a77fb5caea0f2aef72 100644 --- a/drivers/net/ethernet/microchip/encx24j600-regmap.c +++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c @@ -505,13 +505,19 @@ static struct regmap_bus phymap_encx24j600 = { .reg_read = regmap_encx24j600_phy_reg_read, }; -void devm_regmap_init_encx24j600(struct device *dev, - struct encx24j600_context *ctx) +int devm_regmap_init_encx24j600(struct device *dev, + struct encx24j600_context *ctx) { mutex_init(&ctx->mutex); regcfg.lock_arg = ctx; ctx->regmap = devm_regmap_init(dev, ®map_encx24j600, ctx, ®cfg); + if (IS_ERR(ctx->regmap)) + return PTR_ERR(ctx->regmap); ctx->phymap = devm_regmap_init(dev, &phymap_encx24j600, ctx, &phycfg); + if (IS_ERR(ctx->phymap)) + return PTR_ERR(ctx->phymap); + + return 0; } EXPORT_SYMBOL_GPL(devm_regmap_init_encx24j600); diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c index f831238d9793abe8b90b36b1e521cb3943230e2c..965814a343b4084bd52455746c2b4aba21b6dee0 100644 --- a/drivers/net/ethernet/microchip/encx24j600.c +++ b/drivers/net/ethernet/microchip/encx24j600.c @@ -1032,10 +1032,13 @@ static int encx24j600_spi_probe(struct spi_device *spi) priv->speed = SPEED_100; priv->ctx.spi = spi; - devm_regmap_init_encx24j600(&spi->dev, &priv->ctx); ndev->irq = spi->irq; ndev->netdev_ops = &encx24j600_netdev_ops; + ret = devm_regmap_init_encx24j600(&spi->dev, &priv->ctx); + if (ret) + goto out_free; + mutex_init(&priv->lock); /* Reset device and check if it is connected */ diff --git a/drivers/net/ethernet/microchip/encx24j600_hw.h b/drivers/net/ethernet/microchip/encx24j600_hw.h index f604a260ede798b93d9235087cb850250f40de2e..711147a159aa991f39b65268c5a6773c90be3303 100644 --- a/drivers/net/ethernet/microchip/encx24j600_hw.h +++ b/drivers/net/ethernet/microchip/encx24j600_hw.h @@ -15,8 +15,8 @@ struct encx24j600_context { int bank; }; -void devm_regmap_init_encx24j600(struct device *dev, - struct encx24j600_context *ctx); +int devm_regmap_init_encx24j600(struct device *dev, + struct encx24j600_context *ctx); /* Single-byte instructions */ #define BANK_SELECT(bank) (0xC0 | ((bank & (BANK_MASK >> BANK_SHIFT)) << 1)) diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 001b5f714c1b767e80835aa62fb4afa8be5e06d7..208341541087ea4a4658261f5f844f85aa47f089 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -585,8 +585,7 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter) if (adapter->csr.flags & LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) { - flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR | - LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET | + flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET | LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET | LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR | LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR; @@ -599,12 +598,6 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter) /* map TX interrupt to vector */ int_vec_map1 |= INT_VEC_MAP1_TX_VEC_(index, vector); lan743x_csr_write(adapter, INT_VEC_MAP1, int_vec_map1); - if (flags & - LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) { - int_vec_en_auto_clr |= INT_VEC_EN_(vector); - lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR, - int_vec_en_auto_clr); - } /* Remove TX interrupt from shared mask */ intr->vector_list[0].int_mask &= ~int_bit; @@ -802,14 +795,8 @@ static int lan743x_mac_init(struct lan743x_adapter *adapter) u32 mac_addr_hi = 0; u32 mac_addr_lo = 0; u32 data; - int ret; netdev = adapter->netdev; - lan743x_csr_write(adapter, MAC_CR, MAC_CR_RST_); - ret = lan743x_csr_wait_for_bit(adapter, MAC_CR, MAC_CR_RST_, - 0, 1000, 20000, 100); - if (ret) - return ret; /* setup auto duplex, and speed detection */ data = lan743x_csr_read(adapter, MAC_CR); @@ -968,13 +955,10 @@ static void lan743x_phy_link_status_change(struct net_device *netdev) memset(&ksettings, 0, sizeof(ksettings)); phy_ethtool_get_link_ksettings(netdev, &ksettings); - local_advertisement = phy_read(phydev, MII_ADVERTISE); - if (local_advertisement < 0) - return; - - remote_advertisement = phy_read(phydev, MII_LPA); - if (remote_advertisement < 0) - return; + local_advertisement = + ethtool_adv_to_mii_adv_t(phydev->advertising); + remote_advertisement = + ethtool_adv_to_mii_adv_t(phydev->lp_advertising); lan743x_phy_update_flowcontrol(adapter, ksettings.base.duplex, @@ -1412,7 +1396,8 @@ static int lan743x_tx_frame_start(struct lan743x_tx *tx, } static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx, - unsigned int frame_length) + unsigned int frame_length, + int nr_frags) { /* called only from within lan743x_tx_xmit_frame. * assuming tx->ring_lock has already been acquired. @@ -1422,6 +1407,10 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx, /* wrap up previous descriptor */ tx->frame_data0 |= TX_DESC_DATA0_EXT_; + if (nr_frags <= 0) { + tx->frame_data0 |= TX_DESC_DATA0_LS_; + tx->frame_data0 |= TX_DESC_DATA0_IOC_; + } tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; tx_descriptor->data0 = tx->frame_data0; @@ -1526,8 +1515,11 @@ static void lan743x_tx_frame_end(struct lan743x_tx *tx, u32 tx_tail_flags = 0; /* wrap up previous descriptor */ - tx->frame_data0 |= TX_DESC_DATA0_LS_; - tx->frame_data0 |= TX_DESC_DATA0_IOC_; + if ((tx->frame_data0 & TX_DESC_DATA0_DTYPE_MASK_) == + TX_DESC_DATA0_DTYPE_DATA_) { + tx->frame_data0 |= TX_DESC_DATA0_LS_; + tx->frame_data0 |= TX_DESC_DATA0_IOC_; + } tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; buffer_info = &tx->buffer_info[tx->frame_tail]; @@ -1612,7 +1604,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx, } if (gso) - lan743x_tx_frame_add_lso(tx, frame_length); + lan743x_tx_frame_add_lso(tx, frame_length, nr_frags); if (nr_frags <= 0) goto finish; @@ -1675,7 +1667,7 @@ static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight) netif_wake_queue(adapter->netdev); } - if (!napi_complete_done(napi, weight)) + if (!napi_complete(napi)) goto done; /* enable isr */ @@ -1684,7 +1676,7 @@ static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight) lan743x_csr_read(adapter, INT_STS); done: - return weight; + return 0; } static void lan743x_tx_ring_cleanup(struct lan743x_tx *tx) @@ -1873,9 +1865,9 @@ static int lan743x_tx_open(struct lan743x_tx *tx) tx->vector_flags = lan743x_intr_get_vector_flags(adapter, INT_BIT_DMA_TX_ (tx->channel_number)); - netif_napi_add(adapter->netdev, - &tx->napi, lan743x_tx_napi_poll, - tx->ring_size - 1); + netif_tx_napi_add(adapter->netdev, + &tx->napi, lan743x_tx_napi_poll, + tx->ring_size - 1); napi_enable(&tx->napi); data = 0; @@ -1906,7 +1898,17 @@ static int lan743x_rx_next_index(struct lan743x_rx *rx, int index) return ((++index) % rx->ring_size); } -static int lan743x_rx_allocate_ring_element(struct lan743x_rx *rx, int index) +static struct sk_buff *lan743x_rx_allocate_skb(struct lan743x_rx *rx) +{ + int length = 0; + + length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING); + return __netdev_alloc_skb(rx->adapter->netdev, + length, GFP_ATOMIC | GFP_DMA); +} + +static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index, + struct sk_buff *skb) { struct lan743x_rx_buffer_info *buffer_info; struct lan743x_rx_descriptor *descriptor; @@ -1915,9 +1917,7 @@ static int lan743x_rx_allocate_ring_element(struct lan743x_rx *rx, int index) length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING); descriptor = &rx->ring_cpu_ptr[index]; buffer_info = &rx->buffer_info[index]; - buffer_info->skb = __netdev_alloc_skb(rx->adapter->netdev, - length, - GFP_ATOMIC | GFP_DMA); + buffer_info->skb = skb; if (!(buffer_info->skb)) return -ENOMEM; buffer_info->dma_ptr = dma_map_single(&rx->adapter->pdev->dev, @@ -2064,8 +2064,19 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx) /* packet is available */ if (first_index == last_index) { /* single buffer packet */ + struct sk_buff *new_skb = NULL; int packet_length; + new_skb = lan743x_rx_allocate_skb(rx); + if (!new_skb) { + /* failed to allocate next skb. + * Memory is very low. + * Drop this packet and reuse buffer. + */ + lan743x_rx_reuse_ring_element(rx, first_index); + goto process_extension; + } + buffer_info = &rx->buffer_info[first_index]; skb = buffer_info->skb; descriptor = &rx->ring_cpu_ptr[first_index]; @@ -2085,7 +2096,7 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx) skb_put(skb, packet_length - 4); skb->protocol = eth_type_trans(skb, rx->adapter->netdev); - lan743x_rx_allocate_ring_element(rx, first_index); + lan743x_rx_init_ring_element(rx, first_index, new_skb); } else { int index = first_index; @@ -2098,26 +2109,23 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx) if (first_index <= last_index) { while ((index >= first_index) && (index <= last_index)) { - lan743x_rx_release_ring_element(rx, - index); - lan743x_rx_allocate_ring_element(rx, - index); + lan743x_rx_reuse_ring_element(rx, + index); index = lan743x_rx_next_index(rx, index); } } else { while ((index >= first_index) || (index <= last_index)) { - lan743x_rx_release_ring_element(rx, - index); - lan743x_rx_allocate_ring_element(rx, - index); + lan743x_rx_reuse_ring_element(rx, + index); index = lan743x_rx_next_index(rx, index); } } } +process_extension: if (extension_index >= 0) { descriptor = &rx->ring_cpu_ptr[extension_index]; buffer_info = &rx->buffer_info[extension_index]; @@ -2294,7 +2302,9 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx) rx->last_head = 0; for (index = 0; index < rx->ring_size; index++) { - ret = lan743x_rx_allocate_ring_element(rx, index); + struct sk_buff *new_skb = lan743x_rx_allocate_skb(rx); + + ret = lan743x_rx_init_ring_element(rx, index, new_skb); if (ret) goto cleanup; } @@ -2722,8 +2732,9 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter) snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, "pci-%s", pci_name(adapter->pdev)); - /* set to internal PHY id */ - adapter->mdiobus->phy_mask = ~(u32)BIT(1); + if ((adapter->csr.id_rev & ID_REV_ID_MASK_) == ID_REV_ID_LAN7430_) + /* LAN7430 uses internal phy at address 1 */ + adapter->mdiobus->phy_mask = ~(u32)BIT(1); /* register mdiobus */ ret = mdiobus_register(adapter->mdiobus); @@ -3020,6 +3031,7 @@ static const struct dev_pm_ops lan743x_pm_ops = { static const struct pci_device_id lan743x_pcidev_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) }, + { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7431) }, { 0, } }; diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h index 0e82b6368798a2cf02cfef922b4feffa2ff779d1..2d6eea18973e8f4c8b5c733d825a5a0cc8492c39 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.h +++ b/drivers/net/ethernet/microchip/lan743x_main.h @@ -548,6 +548,7 @@ struct lan743x_adapter; /* SMSC acquired EFAR late 1990's, MCHP acquired SMSC 2012 */ #define PCI_VENDOR_ID_SMSC PCI_VENDOR_ID_EFAR #define PCI_DEVICE_ID_SMSC_LAN7430 (0x7430) +#define PCI_DEVICE_ID_SMSC_LAN7431 (0x7431) #define PCI_CONFIG_LENGTH (0x1000) diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index ed4e298cd823977c663c4cd63be8286a3d55ea6e..a29a6a618110e0a2f63b3f3edaa14cc1881e43b1 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -253,8 +253,15 @@ static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid, port->pvid = vid; /* Untagged egress vlan clasification */ - if (untagged) + if (untagged && port->vid != vid) { + if (port->vid) { + dev_err(ocelot->dev, + "Port already has a native VLAN: %d\n", + port->vid); + return -EBUSY; + } port->vid = vid; + } ocelot_vlan_port_apply(ocelot, port); @@ -605,7 +612,7 @@ static int ocelot_mact_mc_add(struct ocelot_port *port, struct netdev_hw_addr *hw_addr) { struct ocelot *ocelot = port->ocelot; - struct netdev_hw_addr *ha = kzalloc(sizeof(*ha), GFP_KERNEL); + struct netdev_hw_addr *ha = kzalloc(sizeof(*ha), GFP_ATOMIC); if (!ha) return -ENOMEM; @@ -733,7 +740,7 @@ static int ocelot_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], } return ocelot_mact_learn(ocelot, port->chip_port, addr, vid, - ENTRYTYPE_NORMAL); + ENTRYTYPE_LOCKED); } static int ocelot_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], @@ -886,7 +893,7 @@ static int ocelot_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, static int ocelot_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) { - return ocelot_vlan_vid_add(dev, vid, false, true); + return ocelot_vlan_vid_add(dev, vid, false, false); } static int ocelot_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, @@ -1506,9 +1513,6 @@ static int ocelot_netdevice_port_event(struct net_device *dev, struct ocelot_port *ocelot_port = netdev_priv(dev); int err = 0; - if (!ocelot_netdevice_dev_check(dev)) - return 0; - switch (event) { case NETDEV_CHANGEUPPER: if (netif_is_bridge_master(info->upper_dev)) { @@ -1545,12 +1549,16 @@ static int ocelot_netdevice_event(struct notifier_block *unused, struct net_device *dev = netdev_notifier_info_to_dev(ptr); int ret = 0; + if (!ocelot_netdevice_dev_check(dev)) + return 0; + if (event == NETDEV_PRECHANGEUPPER && netif_is_lag_master(info->upper_dev)) { struct netdev_lag_upper_info *lag_upper_info = info->upper_info; struct netlink_ext_ack *extack; - if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { + if (lag_upper_info && + lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { extack = netdev_notifier_info_to_extack(&info->info); NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); @@ -1767,6 +1775,7 @@ EXPORT_SYMBOL(ocelot_init); void ocelot_deinit(struct ocelot *ocelot) { + cancel_delayed_work(&ocelot->stats_work); destroy_workqueue(ocelot->stats_queue); mutex_destroy(&ocelot->stats_lock); } diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h index 616bec30dfa3fe4b31a1295ef2239a57ac2cc36d..3d8c6f38e76b9e997f3c5a8c4f3f807f13a96ab0 100644 --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -541,7 +541,7 @@ void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset); #define ocelot_write_rix(ocelot, val, reg, ri) __ocelot_write_ix(ocelot, val, reg, reg##_RSZ * (ri)) #define ocelot_write(ocelot, val, reg) __ocelot_write_ix(ocelot, val, reg, 0) -void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 mask, +void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg, u32 offset); #define ocelot_rmw_ix(ocelot, val, m, reg, gi, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri)) #define ocelot_rmw_gix(ocelot, val, m, reg, gi) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi)) diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index b2d2ec8c11e2d15e0562ca89c2f76d58bc4e69c3..6789eed78ff70a29e96a2179f066bd028b066711 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -3922,7 +3922,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * setup (if available). */ status = myri10ge_request_irq(mgp); if (status != 0) - goto abort_with_firmware; + goto abort_with_slices; myri10ge_free_irq(mgp); /* Save configuration space to be restored if the diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c index 398011c87643c7f15a1da8fc2c72175213c006b0..28f76566470276b9e43a5711f6d9e8e6fa995d9a 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c @@ -807,7 +807,7 @@ __vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath, struct vxge_hw_device_date *fw_date = &hw_info->fw_date; struct vxge_hw_device_version *flash_version = &hw_info->flash_version; struct vxge_hw_device_date *flash_date = &hw_info->flash_date; - u64 data0, data1 = 0, steer_ctrl = 0; + u64 data0 = 0, data1 = 0, steer_ctrl = 0; enum vxge_hw_status status; status = vxge_hw_vpath_fw_api(vpath, @@ -2365,6 +2365,7 @@ static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size, dma_object->addr))) { vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle); + memblock = NULL; goto exit; } diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index eff57f7d056a44ccb51caed4d1bd617ff7fc3319..4e18d95e548f1680f27bac98b8f1fa206607e546 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1288,15 +1288,10 @@ wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, static int wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, - enum alu_op alu_op, bool skip) + enum alu_op alu_op) { const struct bpf_insn *insn = &meta->insn; - if (skip) { - meta->skip = true; - return 0; - } - wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm); wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); @@ -2306,7 +2301,7 @@ static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm); + return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR); } static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) @@ -2316,7 +2311,7 @@ static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); + return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND); } static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) @@ -2326,7 +2321,7 @@ static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); + return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR); } static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) @@ -2336,7 +2331,7 @@ static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm); + return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD); } static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) @@ -2346,7 +2341,7 @@ static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm); + return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB); } static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index dbd00982fd2b698bfa5bedc1572583890e75fe63..2134045e14c36a88c51717b7f4b36f07bbaa5c83 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -206,6 +206,11 @@ enum nfp_bpf_map_use { NFP_MAP_USE_ATOMIC_CNT, }; +struct nfp_bpf_map_word { + unsigned char type :4; + unsigned char non_zero_update :1; +}; + /** * struct nfp_bpf_map - private per-map data attached to BPF maps for offload * @offmap: pointer to the offloaded BPF map @@ -219,7 +224,7 @@ struct nfp_bpf_map { struct nfp_app_bpf *bpf; u32 tid; struct list_head l; - enum nfp_bpf_map_use use_map[]; + struct nfp_bpf_map_word use_map[]; }; struct nfp_bpf_neutral_map { diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index 1ccd6371a15b5c3c0a142909d63b4e0796418fd4..6140e4650b71cc249d82f84fefc1fda1efe26b56 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -299,10 +299,25 @@ static void nfp_map_bpf_byte_swap(struct nfp_bpf_map *nfp_map, void *value) unsigned int i; for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++) - if (nfp_map->use_map[i] == NFP_MAP_USE_ATOMIC_CNT) + if (nfp_map->use_map[i].type == NFP_MAP_USE_ATOMIC_CNT) word[i] = (__force u32)cpu_to_be32(word[i]); } +/* Mark value as unsafely initialized in case it becomes atomic later + * and we didn't byte swap something non-byte swap neutral. + */ +static void +nfp_map_bpf_byte_swap_record(struct nfp_bpf_map *nfp_map, void *value) +{ + u32 *word = value; + unsigned int i; + + for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++) + if (nfp_map->use_map[i].type == NFP_MAP_UNUSED && + word[i] != (__force u32)cpu_to_be32(word[i])) + nfp_map->use_map[i].non_zero_update = 1; +} + static int nfp_bpf_map_lookup_entry(struct bpf_offloaded_map *offmap, void *key, void *value) @@ -322,6 +337,7 @@ nfp_bpf_map_update_entry(struct bpf_offloaded_map *offmap, void *key, void *value, u64 flags) { nfp_map_bpf_byte_swap(offmap->dev_priv, value); + nfp_map_bpf_byte_swap_record(offmap->dev_priv, value); return nfp_bpf_ctrl_update_entry(offmap, key, value, flags); } diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index a6e9248669e141d4d19f0536eed52adf6ef78d90..db7e186dae56d4c0dda0d3ba59c5408867ef7371 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -108,6 +108,46 @@ nfp_record_adjust_head(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog, nfp_prog->adjust_head_location = location; } +static bool nfp_bpf_map_update_value_ok(struct bpf_verifier_env *env) +{ + const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1; + const struct bpf_reg_state *reg3 = cur_regs(env) + BPF_REG_3; + struct bpf_offloaded_map *offmap; + struct bpf_func_state *state; + struct nfp_bpf_map *nfp_map; + int off, i; + + state = env->cur_state->frame[reg3->frameno]; + + /* We need to record each time update happens with non-zero words, + * in case such word is used in atomic operations. + * Implicitly depend on nfp_bpf_stack_arg_ok(reg3) being run before. + */ + + offmap = map_to_offmap(reg1->map_ptr); + nfp_map = offmap->dev_priv; + off = reg3->off + reg3->var_off.value; + + for (i = 0; i < offmap->map.value_size; i++) { + struct bpf_stack_state *stack_entry; + unsigned int soff; + + soff = -(off + i) - 1; + stack_entry = &state->stack[soff / BPF_REG_SIZE]; + if (stack_entry->slot_type[soff % BPF_REG_SIZE] == STACK_ZERO) + continue; + + if (nfp_map->use_map[i / 4].type == NFP_MAP_USE_ATOMIC_CNT) { + pr_vlog(env, "value at offset %d/%d may be non-zero, bpf_map_update_elem() is required to initialize atomic counters to zero to avoid offload endian issues\n", + i, soff); + return false; + } + nfp_map->use_map[i / 4].non_zero_update = 1; + } + + return true; +} + static int nfp_bpf_stack_arg_ok(const char *fname, struct bpf_verifier_env *env, const struct bpf_reg_state *reg, @@ -198,7 +238,8 @@ nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env, bpf->helpers.map_update, reg1) || !nfp_bpf_stack_arg_ok("map_update", env, reg2, meta->func_id ? &meta->arg2 : NULL) || - !nfp_bpf_stack_arg_ok("map_update", env, reg3, NULL)) + !nfp_bpf_stack_arg_ok("map_update", env, reg3, NULL) || + !nfp_bpf_map_update_value_ok(env)) return -EOPNOTSUPP; break; @@ -376,15 +417,22 @@ nfp_bpf_map_mark_used_one(struct bpf_verifier_env *env, struct nfp_bpf_map *nfp_map, unsigned int off, enum nfp_bpf_map_use use) { - if (nfp_map->use_map[off / 4] != NFP_MAP_UNUSED && - nfp_map->use_map[off / 4] != use) { + if (nfp_map->use_map[off / 4].type != NFP_MAP_UNUSED && + nfp_map->use_map[off / 4].type != use) { pr_vlog(env, "map value use type conflict %s vs %s off: %u\n", - nfp_bpf_map_use_name(nfp_map->use_map[off / 4]), + nfp_bpf_map_use_name(nfp_map->use_map[off / 4].type), nfp_bpf_map_use_name(use), off); return -EOPNOTSUPP; } - nfp_map->use_map[off / 4] = use; + if (nfp_map->use_map[off / 4].non_zero_update && + use == NFP_MAP_USE_ATOMIC_CNT) { + pr_vlog(env, "atomic counter in map value may already be initialized to non-zero value off: %u\n", + off); + return -EOPNOTSUPP; + } + + nfp_map->use_map[off / 4].type = use; return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 7a1e9cd9cc62cd539c3833866750efe4bdea0ad4..777b99416062a01a9254736cbf2aaf293244fad3 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -80,8 +80,7 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan, tmp_push_vlan_tci = FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, tcf_vlan_push_prio(action)) | - FIELD_PREP(NFP_FL_PUSH_VLAN_VID, tcf_vlan_push_vid(action)) | - NFP_FL_PUSH_VLAN_CFI; + FIELD_PREP(NFP_FL_PUSH_VLAN_VID, tcf_vlan_push_vid(action)); push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci); } diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 325954b829c8429ad9d83b4c1aa12ea74e258afb..9b018321e24e4d1af89aeefe0c0b30634466dfc2 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -55,7 +55,7 @@ #define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6) #define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13) -#define NFP_FLOWER_MASK_VLAN_CFI BIT(12) +#define NFP_FLOWER_MASK_VLAN_PRESENT BIT(12) #define NFP_FLOWER_MASK_VLAN_VID GENMASK(11, 0) #define NFP_FLOWER_MASK_MPLS_LB GENMASK(31, 12) @@ -109,7 +109,6 @@ #define NFP_FL_OUT_FLAGS_TYPE_IDX GENMASK(2, 0) #define NFP_FL_PUSH_VLAN_PRIO GENMASK(15, 13) -#define NFP_FL_PUSH_VLAN_CFI BIT(12) #define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0) /* LAG ports */ diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c index bf10598f66ae056a488074592ec4551fada4c319..1b5e0cef977134f8ec8fbfebdadf22ceffa2ac13 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c @@ -336,6 +336,11 @@ static void nfp_fl_lag_do_work(struct work_struct *work) acti_netdevs = kmalloc_array(entry->slave_cnt, sizeof(*acti_netdevs), GFP_KERNEL); + if (!acti_netdevs) { + schedule_delayed_work(&lag->work, + NFP_FL_LAG_DELAY); + continue; + } /* Include sanity check in the loop. It may be that a bond has * changed between processing the last notification and the diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index e57d23746585f7abe1d7d52e0045fde2b2839852..c19e88efe958df2fce544a7995e5f9690d78e958 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -259,6 +259,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL); if (!repr_priv) { err = -ENOMEM; + nfp_repr_free(repr); goto err_reprs_clean; } @@ -271,6 +272,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, port = nfp_port_alloc(app, port_type, repr); if (IS_ERR(port)) { err = PTR_ERR(port); + kfree(repr_priv); nfp_repr_free(repr); goto err_reprs_clean; } @@ -291,6 +293,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, err = nfp_repr_init(app, repr, port_id, port, priv->nn->dp.netdev); if (err) { + kfree(repr_priv); nfp_port_free(port); nfp_repr_free(repr); goto err_reprs_clean; @@ -373,6 +376,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL); if (!repr_priv) { err = -ENOMEM; + nfp_repr_free(repr); goto err_reprs_clean; } @@ -382,11 +386,13 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr); if (IS_ERR(port)) { err = PTR_ERR(port); + kfree(repr_priv); nfp_repr_free(repr); goto err_reprs_clean; } err = nfp_port_init_phy_port(app->pf, app, port, i); if (err) { + kfree(repr_priv); nfp_port_free(port); nfp_repr_free(repr); goto err_reprs_clean; @@ -399,6 +405,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) err = nfp_repr_init(app, repr, cmsg_port_id, port, priv->nn->dp.netdev); if (err) { + kfree(repr_priv); nfp_port_free(port); nfp_repr_free(repr); goto err_reprs_clean; diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index 17acb8cc60440ee4272b642ccf62b62df90ef019..b99d55cf81f122de883a171759e350e3bcbe99ff 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c @@ -56,14 +56,12 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *frame, FLOW_DISSECTOR_KEY_VLAN, target); /* Populate the tci field. */ - if (flow_vlan->vlan_id || flow_vlan->vlan_priority) { - tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, - flow_vlan->vlan_priority) | - FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, - flow_vlan->vlan_id) | - NFP_FLOWER_MASK_VLAN_CFI; - frame->tci = cpu_to_be16(tmp_tci); - } + tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT; + tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, + flow_vlan->vlan_priority) | + FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, + flow_vlan->vlan_id); + frame->tci = cpu_to_be16(tmp_tci); } } diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index bd19624f10cf48e3d7187f6721ec6b3eecac98da..90148dbb261b68854aad0fe462d84135a27abb26 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -375,13 +375,29 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST))) return -EOPNOTSUPP; - /* We need to store TCP flags in the IPv4 key space, thus - * we need to ensure we include a IPv4 key layer if we have - * not done so already. + /* We need to store TCP flags in the either the IPv4 or IPv6 key + * space, thus we need to ensure we include a IPv4/IPv6 key + * layer if we have not done so already. */ - if (!(key_layer & NFP_FLOWER_LAYER_IPV4)) { - key_layer |= NFP_FLOWER_LAYER_IPV4; - key_size += sizeof(struct nfp_flower_ipv4); + if (!key_basic) + return -EOPNOTSUPP; + + if (!(key_layer & NFP_FLOWER_LAYER_IPV4) && + !(key_layer & NFP_FLOWER_LAYER_IPV6)) { + switch (key_basic->n_proto) { + case cpu_to_be16(ETH_P_IP): + key_layer |= NFP_FLOWER_LAYER_IPV4; + key_size += sizeof(struct nfp_flower_ipv4); + break; + + case cpu_to_be16(ETH_P_IPV6): + key_layer |= NFP_FLOWER_LAYER_IPV6; + key_size += sizeof(struct nfp_flower_ipv6); + break; + + default: + return -EOPNOTSUPP; + } } } diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index 382bb93cb0900f7e83e0cc34277379c3b36468bd..ff5c74120c123bb66f3a790a2bb33c7be7350586 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -194,6 +194,7 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb) return; } + rcu_read_lock(); for (i = 0; i < count; i++) { ipv4_addr = payload->tun_info[i].ipv4; port = be32_to_cpu(payload->tun_info[i].egress_port); @@ -209,6 +210,7 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb) neigh_event_send(n, NULL); neigh_release(n); } + rcu_read_unlock(); } static bool nfp_tun_is_netdev_to_offload(struct net_device *netdev) @@ -404,9 +406,10 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb) payload = nfp_flower_cmsg_get_data(skb); + rcu_read_lock(); netdev = nfp_app_repr_get(app, be32_to_cpu(payload->ingress_port)); if (!netdev) - goto route_fail_warning; + goto fail_rcu_unlock; flow.daddr = payload->ipv4_addr; flow.flowi4_proto = IPPROTO_UDP; @@ -416,21 +419,23 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb) rt = ip_route_output_key(dev_net(netdev), &flow); err = PTR_ERR_OR_ZERO(rt); if (err) - goto route_fail_warning; + goto fail_rcu_unlock; #else - goto route_fail_warning; + goto fail_rcu_unlock; #endif /* Get the neighbour entry for the lookup */ n = dst_neigh_lookup(&rt->dst, &flow.daddr); ip_rt_put(rt); if (!n) - goto route_fail_warning; - nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_KERNEL); + goto fail_rcu_unlock; + nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC); neigh_release(n); + rcu_read_unlock(); return; -route_fail_warning: +fail_rcu_unlock: + rcu_read_unlock(); nfp_flower_cmsg_warn(app, "Requested route not found.\n"); } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c index db463e20a876cd1bb820f8e7335cbd7fbb24246e..e9a4179e7e4867cf1f47a46fe861d1a327f3228f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c @@ -96,6 +96,7 @@ nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index, { struct nfp_pf *pf = devlink_priv(devlink); struct nfp_eth_table_port eth_port; + unsigned int lanes; int ret; if (count < 2) @@ -114,8 +115,12 @@ nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index, goto out; } - ret = nfp_devlink_set_lanes(pf, eth_port.index, - eth_port.port_lanes / count); + /* Special case the 100G CXP -> 2x40G split */ + lanes = eth_port.port_lanes / count; + if (eth_port.lanes == 10 && count == 2) + lanes = 8 / count; + + ret = nfp_devlink_set_lanes(pf, eth_port.index, lanes); out: mutex_unlock(&pf->lock); @@ -128,6 +133,7 @@ nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index, { struct nfp_pf *pf = devlink_priv(devlink); struct nfp_eth_table_port eth_port; + unsigned int lanes; int ret; mutex_lock(&pf->lock); @@ -143,7 +149,12 @@ nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index, goto out; } - ret = nfp_devlink_set_lanes(pf, eth_port.index, eth_port.port_lanes); + /* Special case the 100G CXP -> 2x40G unsplit */ + lanes = eth_port.port_lanes; + if (eth_port.port_lanes == 8) + lanes = 10; + + ret = nfp_devlink_set_lanes(pf, eth_port.index, lanes); out: mutex_unlock(&pf->lock); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index c6d29fdbb880f1964847e674dc512d2f0311f0b7..d288c7eebacd8af046f23ef3fd3c68a28c34f998 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -2187,9 +2187,13 @@ nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds)); tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size, - &tx_ring->dma, GFP_KERNEL); - if (!tx_ring->txds) + &tx_ring->dma, + GFP_KERNEL | __GFP_NOWARN); + if (!tx_ring->txds) { + netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n", + tx_ring->cnt); goto err_alloc; + } tx_ring->txbufs = kvcalloc(tx_ring->cnt, sizeof(*tx_ring->txbufs), GFP_KERNEL); @@ -2341,9 +2345,13 @@ nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring) rx_ring->cnt = dp->rxd_cnt; rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds)); rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size, - &rx_ring->dma, GFP_KERNEL); - if (!rx_ring->rxds) + &rx_ring->dma, + GFP_KERNEL | __GFP_NOWARN); + if (!rx_ring->rxds) { + netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n", + rx_ring->cnt); goto err_alloc; + } rx_ring->rxbufs = kvcalloc(rx_ring->cnt, sizeof(*rx_ring->rxbufs), GFP_KERNEL); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index 18a09cdcd9c6ff0247a625d71f46395f6a55aa6b..aa5869eb2e3f4a2d12c39d34af5cf0aaf61031b6 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -225,7 +225,7 @@ static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev) ret = dev_queue_xmit(skb); nfp_repr_inc_tx_stats(netdev, len, ret); - return ret; + return NETDEV_TX_OK; } static int nfp_repr_stop(struct net_device *netdev) @@ -329,6 +329,8 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops); + netdev->priv_flags |= IFF_DISABLE_NETPOLL; + if (nfp_app_has_tc(app)) { netdev->features |= NETIF_F_HW_TC; netdev->hw_features |= NETIF_F_HW_TC; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c index 73de57a09800d7d0c482e64d22995a3aa6c06613..f7b354e796cb65886644ad92c615a0f6a9f76c25 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c @@ -803,8 +803,10 @@ int nfp_cpp_area_cache_add(struct nfp_cpp *cpp, size_t size) return -ENOMEM; cache = kzalloc(sizeof(*cache), GFP_KERNEL); - if (!cache) + if (!cache) { + nfp_cpp_area_free(area); return -ENOMEM; + } cache->id = 0; cache->addr = 0; @@ -872,7 +874,6 @@ area_cache_get(struct nfp_cpp *cpp, u32 id, } /* Adjust the start address to be cache size aligned */ - cache->id = id; cache->addr = addr & ~(u64)(cache->size - 1); /* Re-init to the new ID and address */ @@ -892,6 +893,8 @@ area_cache_get(struct nfp_cpp *cpp, u32 id, return NULL; } + cache->id = id; + exit: /* Adjust offset */ *offset = addr - cache->addr; diff --git a/drivers/net/ethernet/netswift/Kconfig b/drivers/net/ethernet/netswift/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..58b0cfa917c63ee6de516951e67fa1e1133407db --- /dev/null +++ b/drivers/net/ethernet/netswift/Kconfig @@ -0,0 +1,86 @@ +# +# Netswift network device configuration +# + +config NET_VENDOR_NETSWIFT + bool "netswift devices" + default y + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Netswift NICs. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_NETSWIFT + +source "drivers/net/ethernet/netswift/txgbe/Kconfig" + +config NGBE + tristate "Netswift PCI-Express Gigabit Ethernet support" + depends on PCI + imply PTP_1588_CLOCK + ---help--- + This driver supports Netswift gigabit ethernet adapters. + For more information on how to identify your adapter, go + to + + To compile this driver as a module, choose M here. The module + will be called ngbe. + +config NGBE_HWMON + bool "Netswift PCI-Express Gigabit adapters HWMON support" + default n + depends on NGBE && HWMON && !(NGBE=y && HWMON=m) + ---help--- + Say Y if you want to expose thermal sensor data on these devices. + + If unsure, say N. + +config NGBE_PROCFS + bool "Netswift PCI-Express Gigabit adapters procfs support" + default n + depends on NGBE && !NGBE_SYSFS + ---help--- + Say Y if you want to setup procfs for these devices. + + If unsure, say N. + +config NGBE_NO_LLI + bool "Netswift PCI-Express Gigabit adapters NO Low Latency Interrupt support" + default n + depends on NGBE + ---help--- + Say N if you want to enable LLI for these devices. + + If unsure, say Y. + +config NGBE_DEBUG_FS + bool "Netswift PCI-Express Gigabit adapters debugfs support" + default n + depends on NGBE + ---help--- + Say Y if you want to setup debugfs for these devices. + + If unsure, say N. + +config NGBE_POLL_LINK_STATUS + bool "Netswift PCI-Express Gigabit adapters poll mode support" + default n + depends on NGBE + ---help--- + Say Y if you want to turn these devices to poll mode instead of interrupt-trigged TX/RX. + + If unsure, say N. + +config NGBE_SYSFS + bool "Netswift PCI-Express Gigabit adapters sysfs support" + default n + depends on NGBE + ---help--- + Say Y if you want to setup sysfs for these devices. + + If unsure, say N. + +endif # NET_VENDOR_NETSWIFT diff --git a/drivers/net/ethernet/netswift/Makefile b/drivers/net/ethernet/netswift/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..5690b6392ce2f19e5db4c1799d0b0d71dde00a68 --- /dev/null +++ b/drivers/net/ethernet/netswift/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Netswift network device drivers. +# + +obj-$(CONFIG_TXGBE) += txgbe/ +obj-$(CONFIG_NGBE) += ngbe/ diff --git a/drivers/net/ethernet/netswift/ngbe/Makefile b/drivers/net/ethernet/netswift/ngbe/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..dd6615eee4ede360fef598a8987d414883d47784 --- /dev/null +++ b/drivers/net/ethernet/netswift/ngbe/Makefile @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. +# +# Makefile for the Netswift Gigabit PCI Express ethernet driver +# + +obj-$(CONFIG_NGBE) += ngbe.o + +ngbe-objs := ngbe_main.o ngbe_ethtool.o \ + ngbe_hw.o ngbe_phy.o ngbe_sriov.o \ + ngbe_mbx.o ngbe_pcierr.o ngbe_param.o ngbe_lib.o ngbe_ptp.o + +ngbe-$(CONFIG_NGBE_HWMON) += ngbe_sysfs.o +ngbe-$(CONFIG_NGBE_DEBUG_FS) += ngbe_debugfs.o +ngbe-$(CONFIG_NGBE_PROCFS) += ngbe_procfs.o +ngbe-$(CONFIG_NGBE_SYSFS) += ngbe_sysfs.o diff --git a/drivers/net/ethernet/netswift/ngbe/ngbe.h b/drivers/net/ethernet/netswift/ngbe/ngbe.h new file mode 100644 index 0000000000000000000000000000000000000000..3c7d3ec96abd7a8423ddf8cce4b20ac06bcd628c --- /dev/null +++ b/drivers/net/ethernet/netswift/ngbe/ngbe.h @@ -0,0 +1,1109 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + */ + + +#ifndef _NGBE_H_ +#define _NGBE_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ngbe_type.h" + +/* Ether Types */ +#define NGBE_ETH_P_LLDP 0x88CC +#define NGBE_ETH_P_CNM 0x22E7 + +/* TX/RX descriptor defines */ +#define NGBE_DEFAULT_TXD 512 /* default ring size */ +#define NGBE_DEFAULT_TX_WORK 256 +#define NGBE_MAX_TXD 8192 +#define NGBE_MIN_TXD 128 + +#define NGBE_DEFAULT_RXD 512 /* default ring size */ +#define NGBE_DEFAULT_RX_WORK 256 +#define NGBE_MAX_RXD 8192 +#define NGBE_MIN_RXD 128 + +#define NGBE_ETH_P_LLDP 0x88CC + +/* flow control */ +#define NGBE_MIN_FCRTL 0x40 +#define NGBE_MAX_FCRTL 0x7FF80 +#define NGBE_MIN_FCRTH 0x600 +#define NGBE_MAX_FCRTH 0x7FFF0 +#define NGBE_DEFAULT_FCPAUSE 0xFFFF +#define NGBE_MIN_FCPAUSE 0 +#define NGBE_MAX_FCPAUSE 0xFFFF + +/* Supported Rx Buffer Sizes */ +#define NGBE_RXBUFFER_256 256 /* Used for skb receive header */ +#define NGBE_RXBUFFER_2K 2048 +#define NGBE_RXBUFFER_3K 3072 +#define NGBE_RXBUFFER_4K 4096 +#define NGBE_MAX_RXBUFFER 16384 /* largest size for single descriptor */ + +/* + * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we + * reserve 64 more, and skb_shared_info adds an additional 320 bytes more, + * this adds up to 448 bytes of extra data. + * + * Since netdev_alloc_skb now allocates a page fragment we can use a value + * of 256 and the resultant skb will have a truesize of 960 or less. + */ +#define NGBE_RX_HDR_SIZE NGBE_RXBUFFER_256 + +#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define NGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define NGBE_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + +enum ngbe_tx_flags { + /* cmd_type flags */ + NGBE_TX_FLAGS_HW_VLAN = 0x01, + NGBE_TX_FLAGS_TSO = 0x02, + NGBE_TX_FLAGS_TSTAMP = 0x04, + + /* olinfo flags */ + NGBE_TX_FLAGS_CC = 0x08, + NGBE_TX_FLAGS_IPV4 = 0x10, + NGBE_TX_FLAGS_CSUM = 0x20, + NGBE_TX_FLAGS_OUTER_IPV4 = 0x100, + NGBE_TX_FLAGS_LINKSEC = 0x200, + NGBE_TX_FLAGS_IPSEC = 0x400, + + /* software defined flags */ + NGBE_TX_FLAGS_SW_VLAN = 0x40, + NGBE_TX_FLAGS_FCOE = 0x80, +}; + +/* VLAN info */ +#define NGBE_TX_FLAGS_VLAN_MASK 0xffff0000 +#define NGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 +#define NGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 +#define NGBE_TX_FLAGS_VLAN_SHIFT 16 + +#define NGBE_MAX_RX_DESC_POLL 10 + +#define NGBE_MAX_VF_MC_ENTRIES 30 +#define NGBE_MAX_VF_FUNCTIONS 8 +#define MAX_EMULATION_MAC_ADDRS 16 +#define NGBE_MAX_PF_MACVLANS 15 +#define NGBE_VF_DEVICE_ID 0x1000 + +/* must account for pools assigned to VFs. */ +#ifdef CONFIG_PCI_IOV +#define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset) +#else +#define VMDQ_P(p) (p) +#endif + +#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ + { \ + u32 current_counter = rd32(hw, reg); \ + if (current_counter < last_counter) \ + counter += 0x100000000LL; \ + last_counter = current_counter; \ + counter &= 0xFFFFFFFF00000000LL; \ + counter |= current_counter; \ + } + +#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ + { \ + u64 current_counter_lsb = rd32(hw, reg_lsb); \ + u64 current_counter_msb = rd32(hw, reg_msb); \ + u64 current_counter = (current_counter_msb << 32) | \ + current_counter_lsb; \ + if (current_counter < last_counter) \ + counter += 0x1000000000LL; \ + last_counter = current_counter; \ + counter &= 0xFFFFFFF000000000LL; \ + counter |= current_counter; \ + } + +struct vf_stats { + u64 gprc; + u64 gorc; + u64 gptc; + u64 gotc; + u64 mprc; +}; + +struct vf_data_storage { + struct pci_dev *vfdev; + u8 __iomem *b4_addr; + u32 b4_buf[16]; + unsigned char vf_mac_addresses[ETH_ALEN]; + u16 vf_mc_hashes[NGBE_MAX_VF_MC_ENTRIES]; + u16 num_vf_mc_hashes; + u16 default_vf_vlan_id; + u16 vlans_enabled; + bool clear_to_send; + struct vf_stats vfstats; + struct vf_stats last_vfstats; + struct vf_stats saved_rst_vfstats; + bool pf_set_mac; + u16 pf_vlan; /* When set, guest VLAN config not allowed. */ + u16 pf_qos; + u16 min_tx_rate; + u16 max_tx_rate; + u16 vlan_count; + u8 spoofchk_enabled; + u8 trusted; + int xcast_mode; + unsigned int vf_api; +}; + +struct vf_macvlans { + struct list_head l; + int vf; + bool free; + bool is_macvlan; + u8 vf_macvlan[ETH_ALEN]; +}; + +#define NGBE_MAX_TXD_PWR 14 +#define NGBE_MAX_DATA_PER_TXD (1 << NGBE_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), NGBE_MAX_DATA_PER_TXD) +#ifndef MAX_SKB_FRAGS +#define DESC_NEEDED 4 +#elif (MAX_SKB_FRAGS < 16) +#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) +#else +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) +#endif + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer */ +struct ngbe_tx_buffer { + union ngbe_tx_desc *next_to_watch; + unsigned long time_stamp; + struct sk_buff *skb; + unsigned int bytecount; + unsigned short gso_segs; + __be16 protocol; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; +}; + +struct ngbe_rx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + dma_addr_t page_dma; + struct page *page; + unsigned int page_offset; +}; + +struct ngbe_queue_stats { + u64 packets; + u64 bytes; +}; + +struct ngbe_tx_queue_stats { + u64 restart_queue; + u64 tx_busy; + u64 tx_done_old; +}; + +struct ngbe_rx_queue_stats { + u64 non_eop_descs; + u64 alloc_rx_page_failed; + u64 alloc_rx_buff_failed; + u64 csum_good_cnt; + u64 csum_err; +}; + +#define NGBE_TS_HDR_LEN 8 +enum ngbe_ring_state_t { + __NGBE_RX_3K_BUFFER, + __NGBE_RX_BUILD_SKB_ENABLED, + __NGBE_TX_XPS_INIT_DONE, + __NGBE_TX_DETECT_HANG, + __NGBE_HANG_CHECK_ARMED, + __NGBE_RX_HS_ENABLED, +}; + +struct ngbe_fwd_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + struct net_device *vdev; + struct ngbe_adapter *adapter; + unsigned int tx_base_queue; + unsigned int rx_base_queue; + int index; /* pool index on PF */ +}; + +#define ring_uses_build_skb(ring) \ + test_bit(__NGBE_RX_BUILD_SKB_ENABLED, &(ring)->state) + + +#define ring_is_hs_enabled(ring) \ + test_bit(__NGBE_RX_HS_ENABLED, &(ring)->state) +#define set_ring_hs_enabled(ring) \ + set_bit(__NGBE_RX_HS_ENABLED, &(ring)->state) +#define clear_ring_hs_enabled(ring) \ + clear_bit(__NGBE_RX_HS_ENABLED, &(ring)->state) +#define check_for_tx_hang(ring) \ + test_bit(__NGBE_TX_DETECT_HANG, &(ring)->state) +#define set_check_for_tx_hang(ring) \ + set_bit(__NGBE_TX_DETECT_HANG, &(ring)->state) +#define clear_check_for_tx_hang(ring) \ + clear_bit(__NGBE_TX_DETECT_HANG, &(ring)->state) + +struct ngbe_ring { + struct ngbe_ring *next; /* pointer to next ring in q_vector */ + struct ngbe_q_vector *q_vector; /* backpointer to host q_vector */ + struct net_device *netdev; /* netdev ring belongs to */ + struct device *dev; /* device for DMA mapping */ + struct ngbe_fwd_adapter *accel; + void *desc; /* descriptor ring memory */ + union { + struct ngbe_tx_buffer *tx_buffer_info; + struct ngbe_rx_buffer *rx_buffer_info; + }; + unsigned long state; + u8 __iomem *tail; + dma_addr_t dma; /* phys. address of descriptor ring */ + unsigned int size; /* length in bytes */ + + u16 count; /* amount of descriptors */ + + u8 queue_index; /* needed for multiqueue queue management */ + u8 reg_idx; /* holds the special value that gets + * the hardware register offset + * associated with this ring, which is + * different for DCB and RSS modes + */ + u16 next_to_use; + u16 next_to_clean; + + unsigned long last_rx_timestamp; + + u16 rx_buf_len; + union { + u16 next_to_alloc; + struct { + u8 atr_sample_rate; + u8 atr_count; + }; + }; + + u8 dcb_tc; + struct ngbe_queue_stats stats; + struct u64_stats_sync syncp; + + union { + struct ngbe_tx_queue_stats tx_stats; + struct ngbe_rx_queue_stats rx_stats; + }; +} ____cacheline_internodealigned_in_smp; + +enum ngbe_ring_f_enum { + RING_F_NONE = 0, + RING_F_VMDQ, /* SR-IOV uses the same ring feature */ + RING_F_RSS, + RING_F_ARRAY_SIZE /* must be last in enum set */ +}; + +#define TGB_MAX_RX_QUEUES 16 +#define NGBE_MAX_TX_QUEUES 16 + +#define NGBE_MAX_RSS_INDICES 8 +#define NGBE_MAX_VMDQ_INDICES 8 +#define NGBE_MAX_FDIR_INDICES 8 +#define MAX_RX_QUEUES 8 +#define MAX_TX_QUEUES 8 +#define NGBE_MAX_L2A_QUEUES 4 +#define NGBE_BAD_L2A_QUEUE 3 + +#define NGBE_MAX_MACVLANS 8 + +struct ngbe_ring_feature { + u16 limit; /* upper limit on feature indices */ + u16 indices; /* current value of indices */ + u16 mask; /* Mask used for feature to ring mapping */ + u16 offset; /* offset to start of feature */ +}; + +/* + * FCoE requires that all Rx buffers be over 2200 bytes in length. Since + * this is twice the size of a half page we need to double the page order + * for FCoE enabled Rx queues. + */ +static inline unsigned int ngbe_rx_bufsz(struct ngbe_ring __maybe_unused *ring) +{ +#if MAX_SKB_FRAGS < 8 + return ALIGN(NGBE_MAX_RXBUFFER / MAX_SKB_FRAGS, 1024); +#else + return NGBE_RXBUFFER_2K; +#endif +} + +static inline unsigned int ngbe_rx_pg_order(struct ngbe_ring __maybe_unused *ring) +{ + return 0; +} +#define ngbe_rx_pg_size(_ring) (PAGE_SIZE << ngbe_rx_pg_order(_ring)) + +struct ngbe_ring_container { + struct ngbe_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 work_limit; /* total work allowed per interrupt */ + u8 count; /* total number of rings in vector */ + u8 itr; /* current ITR setting for ring */ +}; + +/* iterator for handling rings in ring container */ +#define ngbe_for_each_ring(pos, head) \ + for (pos = (head).ring; pos != NULL; pos = pos->next) + +#define MAX_RX_PACKET_BUFFERS ((adapter->flags & NGBE_FLAG_DCB_ENABLED) \ + ? 8 : 1) +#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS + +/* MAX_MSIX_Q_VECTORS of these are allocated, + * but we only use one per queue-specific vector. + */ +struct ngbe_q_vector { + struct ngbe_adapter *adapter; + int cpu; /* CPU for DCA */ + u16 v_idx; /* index of q_vector within array, also used for + * finding the bit in EICR and friends that + * represents the vector for this ring */ + u16 itr; /* Interrupt throttle rate written to EITR */ + struct ngbe_ring_container rx, tx; + + struct napi_struct napi; + cpumask_t affinity_mask; + int numa_node; + struct rcu_head rcu; /* to avoid race with update stats on free */ + char name[IFNAMSIZ + 17]; + bool netpoll_rx; + + /* for dynamic allocation of rings associated with this q_vector */ + struct ngbe_ring ring[0] ____cacheline_internodealigned_in_smp; +}; + +#ifdef CONFIG_NGBE_HWMON + +#define NGBE_HWMON_TYPE_TEMP 0 +#define NGBE_HWMON_TYPE_ALARMTHRESH 1 +#define NGBE_HWMON_TYPE_DALARMTHRESH 2 + +struct hwmon_attr { + struct device_attribute dev_attr; + struct ngbe_hw *hw; + struct ngbe_thermal_diode_data *sensor; + char name[19]; +}; + +struct hwmon_buff { + struct device *device; + struct hwmon_attr *hwmon_list; + unsigned int n_hwmon; +}; +#endif /* CONFIG_NGBE_HWMON */ + +/* + * microsecond values for various ITR rates shifted by 2 to fit itr register + * with the first 3 bits reserved 0 + */ +#define NGBE_70K_ITR 57 +#define NGBE_20K_ITR 200 +#define NGBE_4K_ITR 1024 +#define NGBE_7K_ITR 595 + +/* ngbe_test_staterr - tests bits in Rx descriptor status and error fields */ +static inline __le32 ngbe_test_staterr(union ngbe_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + +/* ngbe_desc_unused - calculate if we have unused descriptors */ +static inline u16 ngbe_desc_unused(struct ngbe_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} + +#define NGBE_RX_DESC(R, i) \ + (&(((union ngbe_rx_desc *)((R)->desc))[i])) +#define NGBE_TX_DESC(R, i) \ + (&(((union ngbe_tx_desc *)((R)->desc))[i])) +#define NGBE_TX_CTXTDESC(R, i) \ + (&(((struct ngbe_tx_context_desc *)((R)->desc))[i])) + +#define NGBE_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */ +#define TCP_TIMER_VECTOR 0 +#define OTHER_VECTOR 1 +#define NON_Q_VECTORS (OTHER_VECTOR + TCP_TIMER_VECTOR) + +#define NGBE_MAX_MSIX_Q_VECTORS_EMERALD 9 + +struct ngbe_mac_addr { + u8 addr[ETH_ALEN]; + u16 state; /* bitmask */ + u64 pools; +}; + +#define NGBE_MAC_STATE_DEFAULT 0x1 +#define NGBE_MAC_STATE_MODIFIED 0x2 +#define NGBE_MAC_STATE_IN_USE 0x4 + +#ifdef CONFIG_NGBE_PROCFS +struct ngbe_therm_proc_data { + struct ngbe_hw *hw; + struct ngbe_thermal_diode_data *sensor_data; +}; +#endif + +/* + * Only for array allocations in our adapter struct. + * we can actually assign 64 queue vectors based on our extended-extended + * interrupt registers. + */ +#define MAX_MSIX_Q_VECTORS NGBE_MAX_MSIX_Q_VECTORS_EMERALD +#define MAX_MSIX_COUNT NGBE_MAX_MSIX_VECTORS_EMERALD + +#define MIN_MSIX_Q_VECTORS 1 +#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) + +/* default to trying for four seconds */ +#define NGBE_TRY_LINK_TIMEOUT (4 * HZ) +#define NGBE_SFP_POLL_JIFFIES (2 * HZ) /* SFP poll every 2 seconds */ + +/** + * ngbe_adapter.flag + **/ +#define NGBE_FLAG_MSI_CAPABLE (u32)(1 << 0) +#define NGBE_FLAG_MSI_ENABLED (u32)(1 << 1) +#define NGBE_FLAG_MSIX_CAPABLE (u32)(1 << 2) +#define NGBE_FLAG_MSIX_ENABLED (u32)(1 << 3) +#ifndef CONFIG_NGBE_NO_LLI +#define NGBE_FLAG_LLI_PUSH (u32)(1 << 4) +#endif + +#define NGBE_FLAG_TPH_ENABLED (u32)(1 << 6) +#define NGBE_FLAG_TPH_CAPABLE (u32)(1 << 7) +#define NGBE_FLAG_TPH_ENABLED_DATA (u32)(1 << 8) + +#define NGBE_FLAG_MQ_CAPABLE (u32)(1 << 9) +#define NGBE_FLAG_DCB_ENABLED (u32)(1 << 10) +#define NGBE_FLAG_VMDQ_ENABLED (u32)(1 << 11) +#define NGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 12) +#define NGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 13) +#define NGBE_FLAG_NEED_ANC_CHECK (u32)(1 << 14) +#define NGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 15) +#define NGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 16) +#define NGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 19) +#define NGBE_FLAG_SRIOV_ENABLED (u32)(1 << 20) +#define NGBE_FLAG_SRIOV_REPLICATION_ENABLE (u32)(1 << 21) +#define NGBE_FLAG_SRIOV_L2SWITCH_ENABLE (u32)(1 << 22) +#define NGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE (u32)(1 << 23) +#define NGBE_FLAG_RX_HWTSTAMP_ENABLED (u32)(1 << 24) +#define NGBE_FLAG_VXLAN_OFFLOAD_CAPABLE (u32)(1 << 25) +#define NGBE_FLAG_VXLAN_OFFLOAD_ENABLE (u32)(1 << 26) +#define NGBE_FLAG_RX_HWTSTAMP_IN_REGISTER (u32)(1 << 27) +#define NGBE_FLAG_NEED_ETH_PHY_RESET (u32)(1 << 28) +#define NGBE_FLAG_RX_HS_ENABLED (u32)(1 << 30) +#define NGBE_FLAG_LINKSEC_ENABLED (u32)(1 << 31) +#define NGBE_FLAG_IPSEC_ENABLED (u32)(1 << 5) + +/* preset defaults */ +#define NGBE_FLAGS_SP_INIT (NGBE_FLAG_MSI_CAPABLE \ + | NGBE_FLAG_MSIX_CAPABLE \ + | NGBE_FLAG_MQ_CAPABLE \ + | NGBE_FLAG_SRIOV_CAPABLE) + +/** + * ngbe_adapter.flag2 + **/ +#define NGBE_FLAG2_RSC_CAPABLE (1U << 0) +#define NGBE_FLAG2_RSC_ENABLED (1U << 1) +#define NGBE_FLAG2_TEMP_SENSOR_CAPABLE (1U << 3) +#define NGBE_FLAG2_TEMP_SENSOR_EVENT (1U << 4) +#define NGBE_FLAG2_SEARCH_FOR_SFP (1U << 5) +#define NGBE_FLAG2_SFP_NEEDS_RESET (1U << 6) +#define NGBE_FLAG2_PF_RESET_REQUESTED (1U << 7) +#define NGBE_FLAG2_FDIR_REQUIRES_REINIT (1U << 8) +#define NGBE_FLAG2_RSS_FIELD_IPV4_UDP (1U << 9) +#define NGBE_FLAG2_RSS_FIELD_IPV6_UDP (1U << 10) +#define NGBE_FLAG2_RSS_ENABLED (1U << 12) +#define NGBE_FLAG2_PTP_PPS_ENABLED (1U << 11) +#define NGBE_FLAG2_EEE_CAPABLE (1U << 14) +#define NGBE_FLAG2_EEE_ENABLED (1U << 15) +#define NGBE_FLAG2_VXLAN_REREG_NEEDED (1U << 16) +#define NGBE_FLAG2_DEV_RESET_REQUESTED (1U << 18) +#define NGBE_FLAG2_RESET_INTR_RECEIVED (1U << 19) +#define NGBE_FLAG2_GLOBAL_RESET_REQUESTED (1U << 20) +#define NGBE_FLAG2_MNG_REG_ACCESS_DISABLED (1U << 22) +#define NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP (1U << 23) +#define NGBE_FLAG2_PCIE_NEED_RECOVER (1U << 31) + +#define NGBE_SET_FLAG(_input, _flag, _result) \ + ((_flag <= _result) ? \ + ((u32)(_input & _flag) * (_result / _flag)) : \ + ((u32)(_input & _flag) / (_flag / _result))) + +enum ngbe_isb_idx { + NGBE_ISB_HEADER, + NGBE_ISB_MISC, + NGBE_ISB_VEC0, + NGBE_ISB_VEC1, + NGBE_ISB_MAX +}; + +/* board specific private data structure */ +struct ngbe_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + + unsigned long state; + + /* Some features need tri-state capability, + * thus the additional *_CAPABLE flags. + */ + u32 flags; + u32 flags2; + + /* Tx fast path data */ + int num_tx_queues; + u16 tx_itr_setting; + u16 tx_work_limit; + + /* Rx fast path data */ + int num_rx_queues; + u16 rx_itr_setting; + u16 rx_work_limit; + + unsigned int num_vmdqs; /* does not include pools assigned to VFs */ + unsigned int queues_per_pool; + + /* TX */ + struct ngbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; + + u64 restart_queue; + u64 lsc_int; + u32 tx_timeout_count; + + /* RX */ + struct ngbe_ring *rx_ring[MAX_RX_QUEUES]; + u64 hw_csum_rx_error; + u64 hw_csum_rx_good; + u64 hw_rx_no_dma_resources; + u64 non_eop_descs; + u32 alloc_rx_page_failed; + u32 alloc_rx_buff_failed; + + struct ngbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; + +#ifdef HAVE_DCBNL_IEEE + struct ieee_pfc *ngbe_ieee_pfc; + struct ieee_ets *ngbe_ieee_ets; +#endif + enum ngbe_fc_mode last_lfc_mode; + int num_q_vectors; /* current number of q_vectors for device */ + int max_q_vectors; /* upper limit of q_vectors for device */ + struct ngbe_ring_feature ring_feature[RING_F_ARRAY_SIZE]; + struct msix_entry *msix_entries; + + u64 test_icr; + struct ngbe_ring test_tx_ring; + struct ngbe_ring test_rx_ring; + + /* structs defined in ngbe_hw.h */ + struct ngbe_hw hw; + u16 msg_enable; + struct ngbe_hw_stats stats; +#ifndef CONFIG_NGBE_NO_LLI + u32 lli_port; + u32 lli_size; + u32 lli_etype; + u32 lli_vlan_pri; +#endif /* CONFIG_NGBE_NO_LLI */ + + u32 *config_space; + u64 tx_busy; + unsigned int tx_ring_count; + unsigned int rx_ring_count; + + u32 link_speed; + bool link_up; + unsigned long sfp_poll_time; + unsigned long link_check_timeout; + + struct timer_list service_timer; + struct work_struct service_task; +#ifdef CONFIG_NGBE_POLL_LINK_STATUS + struct timer_list link_check_timer; +#endif + u32 atr_sample_rate; + u8 __iomem *io_addr; /* Mainly for iounmap use */ + u32 wol; + + u16 bd_number; + u16 bridge_mode; + + char eeprom_id[32]; + u16 eeprom_cap; + bool netdev_registered; + u32 interrupt_event; + u32 led_reg; + + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + struct work_struct ptp_tx_work; + struct sk_buff *ptp_tx_skb; + struct hwtstamp_config tstamp_config; + unsigned long ptp_tx_start; + unsigned long last_overflow_check; + unsigned long last_rx_ptp_check; + spinlock_t tmreg_lock; + struct cyclecounter hw_cc; + struct timecounter hw_tc; + u32 base_incval; + u32 tx_hwtstamp_timeouts; + u32 tx_hwtstamp_skipped; + u32 rx_hwtstamp_cleared; + void (*ptp_setup_sdp) (struct ngbe_adapter *); + + DECLARE_BITMAP(active_vfs, NGBE_MAX_VF_FUNCTIONS); + unsigned int num_vfs; + struct vf_data_storage *vfinfo; + struct vf_macvlans vf_mvs; + struct vf_macvlans *mv_list; +#ifdef CONFIG_PCI_IOV + u32 timer_event_accumulator; + u32 vferr_refcount; +#endif + struct ngbe_mac_addr *mac_table; + + __le16 vxlan_port; + __le16 geneve_port; + +#ifdef CONFIG_NGBE_SYSFS +#ifdef CONFIG_NGBE_HWMON + struct hwmon_buff ngbe_hwmon_buff; +#endif /* CONFIG_NGBE_HWMON */ +#else /* CONFIG_NGBE_SYSFS */ +#ifdef CONFIG_NGBE_PROCFS + struct proc_dir_entry *eth_dir; + struct proc_dir_entry *info_dir; + u64 old_lsc; + struct proc_dir_entry *therm_dir; + struct ngbe_therm_proc_data therm_data; +#endif /* CONFIG_NGBE_PROCFS */ +#endif /* CONFIG_NGBE_SYSFS */ + +#ifdef CONFIG_NGBE_DEBUG_FS + struct dentry *ngbe_dbg_adapter; +#endif /* CONFIG_NGBE_DEBUG_FS */ + u8 default_up; + unsigned long fwd_bitmask; /* bitmask indicating in use pools */ + unsigned long tx_timeout_last_recovery; + u32 tx_timeout_recovery_level; + +#define NGBE_MAX_RETA_ENTRIES 128 + u8 rss_indir_tbl[NGBE_MAX_RETA_ENTRIES]; +#define NGBE_RSS_KEY_SIZE 40 + u32 rss_key[NGBE_RSS_KEY_SIZE / sizeof(u32)]; + + void *ipsec; + + /* misc interrupt status block */ + dma_addr_t isb_dma; + u32 *isb_mem; + u32 isb_tag[NGBE_ISB_MAX]; + + u32 hang_cnt; +}; + +static inline u32 ngbe_misc_isb(struct ngbe_adapter *adapter, + enum ngbe_isb_idx idx) +{ + u32 cur_tag = 0; + u32 cur_diff = 0; + + cur_tag = adapter->isb_mem[NGBE_ISB_HEADER]; + cur_diff = cur_tag - adapter->isb_tag[idx]; + + adapter->isb_tag[idx] = cur_tag; + + return cpu_to_le32(adapter->isb_mem[idx]); +} + +static inline u8 ngbe_max_rss_indices(struct ngbe_adapter *adapter) +{ + return NGBE_MAX_RSS_INDICES; +} + +enum ngbe_state_t { + __NGBE_TESTING, + __NGBE_RESETTING, + __NGBE_DOWN, + __NGBE_HANGING, + __NGBE_DISABLED, + __NGBE_REMOVING, + __NGBE_SERVICE_SCHED, + __NGBE_SERVICE_INITED, + __NGBE_IN_SFP_INIT, + __NGBE_PTP_RUNNING, + __NGBE_PTP_TX_IN_PROGRESS, +}; + +struct ngbe_cb { + dma_addr_t dma; + u16 append_cnt; /* number of skb's appended */ + bool page_released; + bool dma_released; +}; +#define NGBE_CB(skb) ((struct ngbe_cb *)(skb)->cb) + +/* ESX ngbe CIM IOCTL definition */ + +#ifdef CONFIG_NGBE_SYSFS +void ngbe_sysfs_exit(struct ngbe_adapter *adapter); +int ngbe_sysfs_init(struct ngbe_adapter *adapter); +#endif /* CONFIG_NGBE_SYSFS */ +#ifdef CONFIG_NGBE_PROCFS +void ngbe_procfs_exit(struct ngbe_adapter *adapter); +int ngbe_procfs_init(struct ngbe_adapter *adapter); +int ngbe_procfs_topdir_init(void); +void ngbe_procfs_topdir_exit(void); +#endif /* CONFIG_NGBE_PROCFS */ + +/* needed by ngbe_main.c */ +int ngbe_validate_mac_addr(u8 *mc_addr); +void ngbe_check_options(struct ngbe_adapter *adapter); +void ngbe_assign_netdev_ops(struct net_device *netdev); + +/* needed by ngbe_ethtool.c */ +extern char ngbe_driver_name[]; +extern const char ngbe_driver_version[]; + +void ngbe_irq_disable(struct ngbe_adapter *adapter); +void ngbe_irq_enable(struct ngbe_adapter *adapter, bool queues, bool flush); +int ngbe_open(struct net_device *netdev); +int ngbe_close(struct net_device *netdev); +void ngbe_up(struct ngbe_adapter *adapter); +void ngbe_down(struct ngbe_adapter *adapter); +void ngbe_reinit_locked(struct ngbe_adapter *adapter); +void ngbe_reset(struct ngbe_adapter *adapter); +void ngbe_set_ethtool_ops(struct net_device *netdev); +int ngbe_setup_rx_resources(struct ngbe_ring *); +int ngbe_setup_tx_resources(struct ngbe_ring *); +void ngbe_free_rx_resources(struct ngbe_ring *); +void ngbe_free_tx_resources(struct ngbe_ring *); +void ngbe_configure_rx_ring(struct ngbe_adapter *, + struct ngbe_ring *); +void ngbe_configure_tx_ring(struct ngbe_adapter *, + struct ngbe_ring *); +void ngbe_update_stats(struct ngbe_adapter *adapter); +int ngbe_init_interrupt_scheme(struct ngbe_adapter *adapter); +void ngbe_reset_interrupt_capability(struct ngbe_adapter *adapter); +void ngbe_set_interrupt_capability(struct ngbe_adapter *adapter); +void ngbe_clear_interrupt_scheme(struct ngbe_adapter *adapter); +netdev_tx_t ngbe_xmit_frame_ring(struct sk_buff *, + struct ngbe_adapter *, + struct ngbe_ring *); +void ngbe_unmap_and_free_tx_resource(struct ngbe_ring *, + struct ngbe_tx_buffer *); +void ngbe_alloc_rx_buffers(struct ngbe_ring *, u16); + +void ngbe_set_rx_mode(struct net_device *netdev); +int ngbe_write_mc_addr_list(struct net_device *netdev); +int ngbe_setup_tc(struct net_device *dev, u8 tc); +void ngbe_tx_ctxtdesc(struct ngbe_ring *, u32, u32, u32, u32); +void ngbe_do_reset(struct net_device *netdev); +void ngbe_write_eitr(struct ngbe_q_vector *q_vector); +int ngbe_poll(struct napi_struct *napi, int budget); +void ngbe_disable_rx_queue(struct ngbe_adapter *adapter, + struct ngbe_ring *); +void ngbe_vlan_strip_enable(struct ngbe_adapter *adapter); +void ngbe_vlan_strip_disable(struct ngbe_adapter *adapter); + +#ifdef CONFIG_NGBE_DEBUG_FS +void ngbe_dbg_adapter_init(struct ngbe_adapter *adapter); +void ngbe_dbg_adapter_exit(struct ngbe_adapter *adapter); +void ngbe_dbg_init(void); +void ngbe_dbg_exit(void); +void ngbe_dump(struct ngbe_adapter *adapter); +#endif /* CONFIG_NGBE_DEBUG_FS */ + +static inline struct netdev_queue *txring_txq(const struct ngbe_ring *ring) +{ + return netdev_get_tx_queue(ring->netdev, ring->queue_index); +} + +int ngbe_wol_supported(struct ngbe_adapter *adapter); +int ngbe_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd); +int ngbe_write_uc_addr_list(struct net_device *netdev, int pool); +void ngbe_full_sync_mac_table(struct ngbe_adapter *adapter); +int ngbe_add_mac_filter(struct ngbe_adapter *adapter, + const u8 *addr, u16 pool); +int ngbe_del_mac_filter(struct ngbe_adapter *adapter, + const u8 *addr, u16 pool); +int ngbe_available_rars(struct ngbe_adapter *adapter); +void ngbe_vlan_mode(struct net_device *, u32); + +void ngbe_ptp_init(struct ngbe_adapter *adapter); +void ngbe_ptp_stop(struct ngbe_adapter *adapter); +void ngbe_ptp_suspend(struct ngbe_adapter *adapter); +void ngbe_ptp_overflow_check(struct ngbe_adapter *adapter); +void ngbe_ptp_rx_hang(struct ngbe_adapter *adapter); +void ngbe_ptp_rx_hwtstamp(struct ngbe_adapter *adapter, struct sk_buff *skb); +int ngbe_ptp_set_ts_config(struct ngbe_adapter *adapter, struct ifreq *ifr); +int ngbe_ptp_get_ts_config(struct ngbe_adapter *adapter, struct ifreq *ifr); +void ngbe_ptp_start_cyclecounter(struct ngbe_adapter *adapter); +void ngbe_ptp_reset(struct ngbe_adapter *adapter); +void ngbe_ptp_check_pps_event(struct ngbe_adapter *adapter); + +#ifdef CONFIG_PCI_IOV +void ngbe_sriov_reinit(struct ngbe_adapter *adapter); +#endif + +void ngbe_set_rx_drop_en(struct ngbe_adapter *adapter); + +u32 ngbe_rss_indir_tbl_entries(struct ngbe_adapter *adapter); +void ngbe_store_reta(struct ngbe_adapter *adapter); + +/** + * interrupt masking operations. each bit in PX_ICn correspond to a interrupt. + * disable a interrupt by writing to PX_IMS with the corresponding bit=1 + * enable a interrupt by writing to PX_IMC with the corresponding bit=1 + * trigger a interrupt by writing to PX_ICS with the corresponding bit=1 + **/ +//#define NGBE_INTR_ALL (~0ULL) +#define NGBE_INTR_ALL 0x1FF +#define NGBE_INTR_MISC(A) (1ULL << (A)->num_q_vectors) +#define NGBE_INTR_MISC_VMDQ(A) (1ULL << ((A)->num_q_vectors + (A)->ring_feature[RING_F_VMDQ].offset)) +#define NGBE_INTR_QALL(A) (NGBE_INTR_MISC(A) - 1) +#define NGBE_INTR_Q(i) (1ULL << (i)) +static inline void ngbe_intr_enable(struct ngbe_hw *hw, u64 qmask) +{ + u32 mask; + + mask = (qmask & 0xFFFFFFFF); + if (mask) { + wr32(hw, NGBE_PX_IMC, mask); + } +} + +static inline void ngbe_intr_disable(struct ngbe_hw *hw, u64 qmask) +{ + u32 mask; + + mask = (qmask & 0xFFFFFFFF); + if (mask) + wr32(hw, NGBE_PX_IMS, mask); +} + +static inline void ngbe_intr_trigger(struct ngbe_hw *hw, u64 qmask) +{ + u32 mask; + + mask = (qmask & 0xFFFFFFFF); + if (mask) + wr32(hw, NGBE_PX_ICS, mask); +} + +#define NGBE_RING_SIZE(R) ((R)->count < NGBE_MAX_TXD ? (R)->count / 128 : 0) + + +#define NGBE_CPU_TO_BE16(_x) cpu_to_be16(_x) +#define NGBE_BE16_TO_CPU(_x) be16_to_cpu(_x) +#define NGBE_CPU_TO_BE32(_x) cpu_to_be32(_x) +#define NGBE_BE32_TO_CPU(_x) be32_to_cpu(_x) + +#define msec_delay(_x) msleep(_x) + +#define usec_delay(_x) udelay(_x) + +#define STATIC static + +#define NGBE_NAME "ngbe" + +#define DPRINTK(nlevel, klevel, fmt, args...) \ + ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ + printk(KERN_##klevel NGBE_NAME ": %s: %s: " fmt, \ + adapter->netdev->name, \ + __func__, ## args))) + +#define ngbe_emerg(fmt, ...) printk(KERN_EMERG fmt, ## __VA_ARGS__) +#define ngbe_alert(fmt, ...) printk(KERN_ALERT fmt, ## __VA_ARGS__) +#define ngbe_crit(fmt, ...) printk(KERN_CRIT fmt, ## __VA_ARGS__) +#define ngbe_error(fmt, ...) printk(KERN_ERR fmt, ## __VA_ARGS__) +#define ngbe_warn(fmt, ...) printk(KERN_WARNING fmt, ## __VA_ARGS__) +#define ngbe_notice(fmt, ...) printk(KERN_NOTICE fmt, ## __VA_ARGS__) +#define ngbe_info(fmt, ...) printk(KERN_INFO fmt, ## __VA_ARGS__) +#define ngbe_print(fmt, ...) printk(KERN_DEBUG fmt, ## __VA_ARGS__) +#define ngbe_trace(fmt, ...) printk(KERN_INFO fmt, ## __VA_ARGS__) + +#define ngbe_debug(fmt, ...) do {} while (0) + +#define ASSERT(_x) do {} while (0) +#define DEBUGOUT(S) do {} while (0) +#define DEBUGOUT1(S, A...) do {} while (0) +#define DEBUGOUT2(S, A...) do {} while (0) +#define DEBUGOUT3(S, A...) do {} while (0) +#define DEBUGOUT4(S, A...) do {} while (0) +#define DEBUGOUT5(S, A...) do {} while (0) +#define DEBUGOUT6(S, A...) do {} while (0) +#define DEBUGFUNC(fmt, ...) do {} while (0) + +#define NGBE_SFP_DETECT_RETRIES 2 + +struct ngbe_hw; +struct ngbe_msg { + u16 msg_enable; +}; +struct net_device *ngbe_hw_to_netdev(const struct ngbe_hw *hw); +struct ngbe_msg *ngbe_hw_to_msg(const struct ngbe_hw *hw); + +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return &pdev->dev; +} + +#define hw_dbg(hw, format, arg...) \ + netdev_dbg(ngbe_hw_to_netdev(hw), format, ## arg) +#define hw_err(hw, format, arg...) \ + netdev_err(ngbe_hw_to_netdev(hw), format, ## arg) +#define e_dev_info(format, arg...) \ + dev_info(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_warn(format, arg...) \ + dev_warn(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_err(format, arg...) \ + dev_err(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_notice(format, arg...) \ + dev_notice(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dbg(msglvl, format, arg...) \ + netif_dbg(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_info(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_err(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_warn(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_crit(msglvl, format, arg...) \ + netif_crit(adapter, msglvl, adapter->netdev, format, ## arg) + +#define NGBE_FAILED_READ_CFG_DWORD 0xffffffffU +#define NGBE_FAILED_READ_CFG_WORD 0xffffU +#define NGBE_FAILED_READ_CFG_BYTE 0xffU + +extern u32 ngbe_read_reg(struct ngbe_hw *hw, u32 reg, bool quiet); +extern u16 ngbe_read_pci_cfg_word(struct ngbe_hw *hw, u32 reg); +extern void ngbe_write_pci_cfg_word(struct ngbe_hw *hw, u32 reg, u16 value); + +#define NGBE_READ_PCIE_WORD ngbe_read_pci_cfg_word +#define NGBE_WRITE_PCIE_WORD ngbe_write_pci_cfg_word +#define NGBE_R32_Q(h, r) ngbe_read_reg(h, r, true) + +#ifndef writeq +#define writeq(val, addr) do { writel((u32) (val), addr); \ + writel((u32) (val >> 32), (addr + 4)); \ + } while (0); +#endif + +#define NGBE_EEPROM_GRANT_ATTEMPS 100 +#define NGBE_HTONL(_i) htonl(_i) +#define NGBE_NTOHL(_i) ntohl(_i) +#define NGBE_NTOHS(_i) ntohs(_i) +#define NGBE_CPU_TO_LE32(_i) cpu_to_le32(_i) +#define NGBE_LE32_TO_CPUS(_i) le32_to_cpus(_i) + +enum { + NGBE_ERROR_SOFTWARE, + NGBE_ERROR_POLLING, + NGBE_ERROR_INVALID_STATE, + NGBE_ERROR_UNSUPPORTED, + NGBE_ERROR_ARGUMENT, + NGBE_ERROR_CAUTION, +}; + +#define ERROR_REPORT(level, format, arg...) do { \ + switch (level) { \ + case NGBE_ERROR_SOFTWARE: \ + case NGBE_ERROR_CAUTION: \ + case NGBE_ERROR_POLLING: \ + netif_warn(ngbe_hw_to_msg(hw), drv, ngbe_hw_to_netdev(hw), \ + format, ## arg); \ + break; \ + case NGBE_ERROR_INVALID_STATE: \ + case NGBE_ERROR_UNSUPPORTED: \ + case NGBE_ERROR_ARGUMENT: \ + netif_err(ngbe_hw_to_msg(hw), hw, ngbe_hw_to_netdev(hw), \ + format, ## arg); \ + break; \ + default: \ + break; \ + } \ +} while (0) + +#define ERROR_REPORT1 ERROR_REPORT +#define ERROR_REPORT2 ERROR_REPORT +#define ERROR_REPORT3 ERROR_REPORT + +#define UNREFERENCED_XPARAMETER +#define UNREFERENCED_1PARAMETER(_p) do { \ + uninitialized_var(_p); \ +} while (0) +#define UNREFERENCED_2PARAMETER(_p, _q) do { \ + uninitialized_var(_p); \ + uninitialized_var(_q); \ +} while (0) +#define UNREFERENCED_3PARAMETER(_p, _q, _r) do { \ + uninitialized_var(_p); \ + uninitialized_var(_q); \ + uninitialized_var(_r); \ +} while (0) +#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) do { \ + uninitialized_var(_p); \ + uninitialized_var(_q); \ + uninitialized_var(_r); \ + uninitialized_var(_s); \ +} while (0) +#define UNREFERENCED_PARAMETER(_p) UNREFERENCED_1PARAMETER(_p) + +#endif /* _NGBE_H_ */ diff --git a/drivers/net/ethernet/netswift/ngbe/ngbe_debugfs.c b/drivers/net/ethernet/netswift/ngbe/ngbe_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..6710dff494796e04e829d9694326a1010b0ba406 --- /dev/null +++ b/drivers/net/ethernet/netswift/ngbe/ngbe_debugfs.c @@ -0,0 +1,764 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + + +#include "ngbe.h" + +#ifdef CONFIG_NGBE_DEBUG_FS +#include +#include + +static struct dentry *ngbe_dbg_root; +static int ngbe_data_mode; + +#define NGBE_DATA_FUNC(dm) ((dm) & ~0xFFFF) +#define NGBE_DATA_ARGS(dm) ((dm) & 0xFFFF) +enum ngbe_data_func { + NGBE_FUNC_NONE = (0 << 16), + NGBE_FUNC_DUMP_BAR = (1 << 16), + NGBE_FUNC_DUMP_RDESC = (2 << 16), + NGBE_FUNC_DUMP_TDESC = (3 << 16), + NGBE_FUNC_FLASH_READ = (4 << 16), + NGBE_FUNC_FLASH_WRITE = (5 << 16), +}; + +/** + * data operation + **/ +ssize_t +ngbe_simple_read_from_pcibar(struct ngbe_adapter *adapter, int res, + void __user *buf, size_t size, loff_t *ppos) +{ + loff_t pos = *ppos; + u32 miss, len, limit = pci_resource_len(adapter->pdev, res); + + if (pos < 0) + return 0; + + limit = (pos + size <= limit ? pos + size : limit); + for (miss = 0; pos < limit && !miss; buf += len, pos += len) { + u32 val = 0, reg = round_down(pos, 4); + u32 off = pos - reg; + + len = (reg + 4 <= limit ? 4 - off : 4 - off - (limit - reg - 4)); + val = ngbe_rd32(adapter->io_addr + reg); + miss = copy_to_user(buf, &val + off, len); + } + + size = pos - *ppos - miss; + *ppos += size; + + return size; +} + +ssize_t +ngbe_simple_read_from_flash(struct ngbe_adapter *adapter, + void __user *buf, size_t size, loff_t *ppos) +{ + struct ngbe_hw *hw = &adapter->hw; + loff_t pos = *ppos; + size_t ret = 0; + loff_t rpos, rtail; + void __user *to = buf; + size_t available = adapter->hw.flash.dword_size << 2; + + if (pos < 0) + return -EINVAL; + if (pos >= available || !size) + return 0; + if (size > available - pos) + size = available - pos; + + rpos = round_up(pos, 4); + rtail = round_down(pos + size, 4); + if (rtail < rpos) + return 0; + + to += rpos - pos; + while (rpos <= rtail) { + u32 value = ngbe_rd32(adapter->io_addr + rpos); + if (TCALL(hw, flash.ops.write_buffer, rpos>>2, 1, &value)) { + ret = size; + break; + } + if (4 == copy_to_user(to, &value, 4)) { + ret = size; + break; + } + to += 4; + rpos += 4; + } + + if (ret == size) + return -EFAULT; + size -= ret; + *ppos = pos + size; + return size; +} + +ssize_t +ngbe_simple_write_to_flash(struct ngbe_adapter *adapter, + const void __user *from, size_t size, loff_t *ppos, size_t available) +{ + return size; +} + +static ssize_t +ngbe_dbg_data_ops_read(struct file *filp, char __user *buffer, + size_t size, loff_t *ppos) +{ + struct ngbe_adapter *adapter = filp->private_data; + u32 func = NGBE_DATA_FUNC(ngbe_data_mode); + + rmb(); + + switch (func) { + case NGBE_FUNC_DUMP_BAR: { + u32 bar = NGBE_DATA_ARGS(ngbe_data_mode); + + return ngbe_simple_read_from_pcibar(adapter, bar, buffer, size, + ppos); + break; + } + case NGBE_FUNC_FLASH_READ: { + return ngbe_simple_read_from_flash(adapter, buffer, size, ppos); + break; + } + case NGBE_FUNC_DUMP_RDESC: { + struct ngbe_ring *ring; + u32 queue = NGBE_DATA_ARGS(ngbe_data_mode); + + if (queue >= adapter->num_rx_queues) + return 0; + queue += VMDQ_P(0) * adapter->queues_per_pool; + ring = adapter->rx_ring[queue]; + + return simple_read_from_buffer(buffer, size, ppos, + ring->desc, ring->size); + break; + } + case NGBE_FUNC_DUMP_TDESC: { + struct ngbe_ring *ring; + u32 queue = NGBE_DATA_ARGS(ngbe_data_mode); + + if (queue >= adapter->num_tx_queues) + return 0; + queue += VMDQ_P(0) * adapter->queues_per_pool; + ring = adapter->tx_ring[queue]; + + return simple_read_from_buffer(buffer, size, ppos, + ring->desc, ring->size); + break; + } + default: + break; + } + + return 0; +} + +static ssize_t +ngbe_dbg_data_ops_write(struct file *filp, + const char __user *buffer, + size_t size, loff_t *ppos) +{ + struct ngbe_adapter *adapter = filp->private_data; + u32 func = NGBE_DATA_FUNC(ngbe_data_mode); + + rmb(); + + switch (func) { + case NGBE_FUNC_FLASH_WRITE: { + u32 size = NGBE_DATA_ARGS(ngbe_data_mode); + + if (size > adapter->hw.flash.dword_size << 2) + size = adapter->hw.flash.dword_size << 2; + + return ngbe_simple_write_to_flash(adapter, buffer, size, ppos, size); + break; + } + default: + break; + } + + return size; +} +static struct file_operations ngbe_dbg_data_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ngbe_dbg_data_ops_read, + .write = ngbe_dbg_data_ops_write, +}; + +/** + * reg_ops operation + **/ +static char ngbe_dbg_reg_ops_buf[256] = ""; +static ssize_t +ngbe_dbg_reg_ops_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ngbe_adapter *adapter = filp->private_data; + char *buf; + int len; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + buf = kasprintf(GFP_KERNEL, "%s: mode=0x%08x\n%s\n", + adapter->netdev->name, ngbe_data_mode, + ngbe_dbg_reg_ops_buf); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + return len; +} + +static ssize_t +ngbe_dbg_reg_ops_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ngbe_adapter *adapter = filp->private_data; + char *pc = ngbe_dbg_reg_ops_buf; + int len; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + if (count >= sizeof(ngbe_dbg_reg_ops_buf)) + return -ENOSPC; + + len = simple_write_to_buffer(ngbe_dbg_reg_ops_buf, + sizeof(ngbe_dbg_reg_ops_buf)-1, + ppos, + buffer, + count); + if (len < 0) + return len; + + pc[len] = '\0'; + + if (strncmp(pc, "dump", 4) == 0) { + u32 mode = 0; + u16 args; + + pc += 4; + pc += strspn(pc, " \t"); + + if (!strncmp(pc, "bar", 3)) { + pc += 3; + mode = NGBE_FUNC_DUMP_BAR; + } else if (!strncmp(pc, "rdesc", 5)) { + pc += 5; + mode = NGBE_FUNC_DUMP_RDESC; + } else if (!strncmp(pc, "tdesc", 5)) { + pc += 5; + mode = NGBE_FUNC_DUMP_TDESC; + } else { + ngbe_dump(adapter); + } + + if (mode && 1 == sscanf(pc, "%hu", &args)) { + mode |= args; + } + + ngbe_data_mode = mode; + } else if (strncmp(pc, "flash", 4) == 0) { + u32 mode = 0; + u16 args; + + pc += 5; + pc += strspn(pc, " \t"); + if (!strncmp(pc, "read", 3)) { + pc += 4; + mode = NGBE_FUNC_FLASH_READ; + } else if (!strncmp(pc, "write", 5)) { + pc += 5; + mode = NGBE_FUNC_FLASH_WRITE; + } + + if (mode && 1 == sscanf(pc, "%hu", &args)) { + mode |= args; + } + + ngbe_data_mode = mode; + } else if (strncmp(ngbe_dbg_reg_ops_buf, "write", 5) == 0) { + u32 reg, value; + int cnt; + cnt = sscanf(&ngbe_dbg_reg_ops_buf[5], "%x %x", ®, &value); + if (cnt == 2) { + wr32(&adapter->hw, reg, value); + e_dev_info("write: 0x%08x = 0x%08x\n", reg, value); + } else { + e_dev_info("write \n"); + } + } else if (strncmp(ngbe_dbg_reg_ops_buf, "read", 4) == 0) { + u32 reg, value; + int cnt; + cnt = sscanf(&ngbe_dbg_reg_ops_buf[4], "%x", ®); + if (cnt == 1) { + value = rd32(&adapter->hw, reg); + e_dev_info("read 0x%08x = 0x%08x\n", reg, value); + } else { + e_dev_info("read \n"); + } + } else { + e_dev_info("Unknown command %s\n", ngbe_dbg_reg_ops_buf); + e_dev_info("Available commands:\n"); + e_dev_info(" read \n"); + e_dev_info(" write \n"); + } + return count; +} + +static const struct file_operations ngbe_dbg_reg_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ngbe_dbg_reg_ops_read, + .write = ngbe_dbg_reg_ops_write, +}; + +/** + * netdev_ops operation + **/ +static char ngbe_dbg_netdev_ops_buf[256] = ""; +static ssize_t +ngbe_dbg_netdev_ops_read(struct file *filp, + char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ngbe_adapter *adapter = filp->private_data; + char *buf; + int len; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + buf = kasprintf(GFP_KERNEL, "%s: mode=0x%08x\n%s\n", + adapter->netdev->name, ngbe_data_mode, + ngbe_dbg_netdev_ops_buf); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + return len; +} + +static ssize_t +ngbe_dbg_netdev_ops_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ngbe_adapter *adapter = filp->private_data; + int len; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + if (count >= sizeof(ngbe_dbg_netdev_ops_buf)) + return -ENOSPC; + + len = simple_write_to_buffer(ngbe_dbg_netdev_ops_buf, + sizeof(ngbe_dbg_netdev_ops_buf)-1, + ppos, + buffer, + count); + if (len < 0) + return len; + + ngbe_dbg_netdev_ops_buf[len] = '\0'; + + if (strncmp(ngbe_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) { + adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev); + e_dev_info("tx_timeout called\n"); + } else { + e_dev_info("Unknown command: %s\n", ngbe_dbg_netdev_ops_buf); + e_dev_info("Available commands:\n"); + e_dev_info(" tx_timeout\n"); + } + return count; +} + +static struct file_operations ngbe_dbg_netdev_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ngbe_dbg_netdev_ops_read, + .write = ngbe_dbg_netdev_ops_write, +}; + +/** + * ngbe_dbg_adapter_init - setup the debugfs directory for the adapter + * @adapter: the adapter that is starting up + **/ +void ngbe_dbg_adapter_init(struct ngbe_adapter *adapter) +{ + const char *name = pci_name(adapter->pdev); + struct dentry *pfile; + + adapter->ngbe_dbg_adapter = debugfs_create_dir(name, ngbe_dbg_root); + if (!adapter->ngbe_dbg_adapter) { + e_dev_err("debugfs entry for %s failed\n", name); + return; + } + + pfile = debugfs_create_file("data", 0600, + adapter->ngbe_dbg_adapter, adapter, + &ngbe_dbg_data_ops_fops); + if (!pfile) + e_dev_err("debugfs netdev_ops for %s failed\n", name); + + pfile = debugfs_create_file("reg_ops", 0600, + adapter->ngbe_dbg_adapter, adapter, + &ngbe_dbg_reg_ops_fops); + if (!pfile) + e_dev_err("debugfs reg_ops for %s failed\n", name); + + pfile = debugfs_create_file("netdev_ops", 0600, + adapter->ngbe_dbg_adapter, adapter, + &ngbe_dbg_netdev_ops_fops); + if (!pfile) + e_dev_err("debugfs netdev_ops for %s failed\n", name); +} + +/** + * ngbe_dbg_adapter_exit - clear out the adapter's debugfs entries + * @pf: the pf that is stopping + **/ +void ngbe_dbg_adapter_exit(struct ngbe_adapter *adapter) +{ + if (adapter->ngbe_dbg_adapter) + debugfs_remove_recursive(adapter->ngbe_dbg_adapter); + adapter->ngbe_dbg_adapter = NULL; +} + +/** + * ngbe_dbg_init - start up debugfs for the driver + **/ +void ngbe_dbg_init(void) +{ + ngbe_dbg_root = debugfs_create_dir(ngbe_driver_name, NULL); + if (ngbe_dbg_root == NULL) + pr_err("init of debugfs failed\n"); +} + +/** + * ngbe_dbg_exit - clean out the driver's debugfs entries + **/ +void ngbe_dbg_exit(void) +{ + debugfs_remove_recursive(ngbe_dbg_root); +} + +#endif /* CONFIG_NGBE_DEBUG_FS */ + +struct ngbe_reg_info { + u32 offset; + u32 length; + char *name; +}; + +static struct ngbe_reg_info ngbe_reg_info_tbl[] = { + + /* General Registers */ + {NGBE_CFG_PORT_CTL, 1, "CTRL"}, + {NGBE_CFG_PORT_ST, 1, "STATUS"}, + + /* RX Registers */ + {NGBE_PX_RR_CFG(0), 1, "SRRCTL"}, + {NGBE_PX_RR_RP(0), 1, "RDH"}, + {NGBE_PX_RR_WP(0), 1, "RDT"}, + {NGBE_PX_RR_CFG(0), 1, "RXDCTL"}, + {NGBE_PX_RR_BAL(0), 1, "RDBAL"}, + {NGBE_PX_RR_BAH(0), 1, "RDBAH"}, + + /* TX Registers */ + {NGBE_PX_TR_BAL(0), 1, "TDBAL"}, + {NGBE_PX_TR_BAH(0), 1, "TDBAH"}, + {NGBE_PX_TR_RP(0), 1, "TDH"}, + {NGBE_PX_TR_WP(0), 1, "TDT"}, + {NGBE_PX_TR_CFG(0), 1, "TXDCTL"}, + + /* MACVLAN */ + {NGBE_PSR_MAC_SWC_VM, 128, "PSR_MAC_SWC_VM"}, + {NGBE_PSR_MAC_SWC_AD_L, 32, "PSR_MAC_SWC_AD"}, + {NGBE_PSR_VLAN_TBL(0), 128, "PSR_VLAN_TBL"}, + + /* List Terminator */ + { .name = NULL } +}; + +/** + * ngbe_regdump - register printout routine + **/ +static void +ngbe_regdump(struct ngbe_hw *hw, struct ngbe_reg_info *reg_info) +{ + int i, n = 0; + u32 buffer[32*8]; + + switch (reg_info->offset) { + case NGBE_PSR_MAC_SWC_AD_L: + for (i = 0; i < reg_info->length; i++) { + wr32(hw, NGBE_PSR_MAC_SWC_IDX, i); + buffer[n++] = + rd32(hw, NGBE_PSR_MAC_SWC_AD_H); + buffer[n++] = + rd32(hw, NGBE_PSR_MAC_SWC_AD_L); + } + break; + default: + for (i = 0; i < reg_info->length; i++) { + buffer[n++] = rd32(hw, + reg_info->offset + 4*i); + } + break; + } + BUG_ON(n); +} + +/** + * ngbe_dump - Print registers, tx-rings and rx-rings + **/ +void ngbe_dump(struct ngbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ngbe_hw *hw = &adapter->hw; + struct ngbe_reg_info *reg_info; + int n = 0; + struct ngbe_ring *tx_ring; + struct ngbe_tx_buffer *tx_buffer; + union ngbe_tx_desc *tx_desc; + struct my_u0 { u64 a; u64 b; } *u0; + struct ngbe_ring *rx_ring; + union ngbe_rx_desc *rx_desc; + struct ngbe_rx_buffer *rx_buffer_info; + u32 staterr; + int i = 0; + + if (!netif_msg_hw(adapter)) + return; + + /* Print Registers */ + dev_info(&adapter->pdev->dev, "Register Dump\n"); + pr_info(" Register Name Value\n"); + for (reg_info = ngbe_reg_info_tbl; reg_info->name; reg_info++) { + ngbe_regdump(hw, reg_info); + } + + /* Print TX Ring Summary */ + if (!netdev || !netif_running(netdev)) + return; + + dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); + pr_info(" %s %s %s %s\n", + "Queue [NTU] [NTC] [bi(ntc)->dma ]", + "leng", "ntw", "timestamp"); + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; + pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n", + n, tx_ring->next_to_use, tx_ring->next_to_clean, + (u64)dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + tx_buffer->next_to_watch, + (u64)tx_buffer->time_stamp); + } + + /* Print TX Rings */ + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); + + /* Transmit Descriptor Formats + * + * Transmit Descriptor (Read) + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +--------------------------------------------------------------+ + * 8 |PAYLEN |POPTS|CC|IDX |STA |DCMD |DTYP |MAC |RSV |DTALEN | + * +--------------------------------------------------------------+ + * 63 46 45 40 39 38 36 35 32 31 24 23 20 19 18 17 16 15 0 + * + * Transmit Descriptor (Write-Back) + * +--------------------------------------------------------------+ + * 0 | RSV [63:0] | + * +--------------------------------------------------------------+ + * 8 | RSV | STA | RSV | + * +--------------------------------------------------------------+ + * 63 36 35 32 31 0 + */ + + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + pr_info("------------------------------------\n"); + pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); + pr_info("------------------------------------\n"); + pr_info("%s%s %s %s %s %s\n", + "T [desc] [address 63:0 ] ", + "[PlPOIdStDDt Ln] [bi->dma ] ", + "leng", "ntw", "timestamp", "bi->skb"); + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + tx_desc = NGBE_TX_DESC(tx_ring, i); + tx_buffer = &tx_ring->tx_buffer_info[i]; + u0 = (struct my_u0 *)tx_desc; + if (dma_unmap_len(tx_buffer, len) > 0) { + pr_info("T [0x%03X] %016llX %016llX %016llX " + "%08X %p %016llX %p", + i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + tx_buffer->next_to_watch, + (u64)tx_buffer->time_stamp, + tx_buffer->skb); + if (i == tx_ring->next_to_use && + i == tx_ring->next_to_clean) + pr_cont(" NTC/U\n"); + else if (i == tx_ring->next_to_use) + pr_cont(" NTU\n"); + else if (i == tx_ring->next_to_clean) + pr_cont(" NTC\n"); + else + pr_cont("\n"); + + if (netif_msg_pktdata(adapter) && + tx_buffer->skb) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, 16, 1, + tx_buffer->skb->data, + dma_unmap_len(tx_buffer, len), + true); + } + } + } + + /* Print RX Rings Summary */ +rx_ring_summary: + dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); + pr_info("Queue [NTU] [NTC]\n"); + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + pr_info("%5d %5X %5X\n", + n, rx_ring->next_to_use, rx_ring->next_to_clean); + } + + /* Print RX Rings */ + if (!netif_msg_rx_status(adapter)) + return; + + dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); + + /* Receive Descriptor Formats + * + * Receive Descriptor (Read) + * 63 1 0 + * +-----------------------------------------------------+ + * 0 | Packet Buffer Address [63:1] |A0/NSE| + * +----------------------------------------------+------+ + * 8 | Header Buffer Address [63:1] | DD | + * +-----------------------------------------------------+ + * + * + * Receive Descriptor (Write-Back) + * + * 63 48 47 32 31 30 21 20 17 16 4 3 0 + * +------------------------------------------------------+ + * 0 |RSS / Frag Checksum|SPH| HDR_LEN |RSC- |Packet| RSS | + * |/ RTT / PCoE_PARAM | | | CNT | Type | Type | + * |/ Flow Dir Flt ID | | | | | | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length |Extended Error| Xtnd Status/NEXTP | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + pr_info("------------------------------------\n"); + pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); + pr_info("------------------------------------\n"); + pr_info("%s%s%s", + "R [desc] [ PktBuf A0] ", + "[ HeadBuf DD] [bi->dma ] [bi->skb ] ", + "<-- Adv Rx Read format\n"); + pr_info("%s%s%s", + "RWB[desc] [PcsmIpSHl PtRs] ", + "[vl er S cks ln] ---------------- [bi->skb ] ", + "<-- Adv Rx Write-Back format\n"); + + for (i = 0; i < rx_ring->count; i++) { + rx_buffer_info = &rx_ring->rx_buffer_info[i]; + rx_desc = NGBE_RX_DESC(rx_ring, i); + u0 = (struct my_u0 *)rx_desc; + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + if (staterr & NGBE_RXD_STAT_DD) { + /* Descriptor Done */ + pr_info("RWB[0x%03X] %016llX " + "%016llX ---------------- %p", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + rx_buffer_info->skb); + } else { + pr_info("R [0x%03X] %016llX " + "%016llX %016llX %p", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)rx_buffer_info->page_dma, + rx_buffer_info->skb); + + if (netif_msg_pktdata(adapter) && + rx_buffer_info->page_dma) { + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, 16, 1, + page_address(rx_buffer_info->page) + + rx_buffer_info->page_offset, + ngbe_rx_bufsz(rx_ring), true); + } + } + + if (i == rx_ring->next_to_use) + pr_cont(" NTU\n"); + else if (i == rx_ring->next_to_clean) + pr_cont(" NTC\n"); + else + pr_cont("\n"); + + } + } +} diff --git a/drivers/net/ethernet/netswift/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/netswift/ngbe/ngbe_ethtool.c new file mode 100644 index 0000000000000000000000000000000000000000..ca389a7ec4adee172e56b1aae786c58e2374f4d0 --- /dev/null +++ b/drivers/net/ethernet/netswift/ngbe/ngbe_ethtool.c @@ -0,0 +1,2756 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +/* ethtool support for ngbe */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ngbe.h" +#include "ngbe_hw.h" +#include "ngbe_phy.h" + +#define NGBE_ALL_RAR_ENTRIES 16 + +struct ngbe_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define NGBE_NETDEV_STAT(_net_stat) { \ + .stat_string = #_net_stat, \ + .sizeof_stat = sizeof_field(struct net_device_stats, _net_stat), \ + .stat_offset = offsetof(struct net_device_stats, _net_stat) \ +} +static const struct ngbe_stats ngbe_gstrings_net_stats[] = { + NGBE_NETDEV_STAT(rx_packets), + NGBE_NETDEV_STAT(tx_packets), + NGBE_NETDEV_STAT(rx_bytes), + NGBE_NETDEV_STAT(tx_bytes), + NGBE_NETDEV_STAT(rx_errors), + NGBE_NETDEV_STAT(tx_errors), + NGBE_NETDEV_STAT(rx_dropped), + NGBE_NETDEV_STAT(tx_dropped), + NGBE_NETDEV_STAT(multicast), + NGBE_NETDEV_STAT(collisions), + NGBE_NETDEV_STAT(rx_over_errors), + NGBE_NETDEV_STAT(rx_crc_errors), + NGBE_NETDEV_STAT(rx_frame_errors), + NGBE_NETDEV_STAT(rx_fifo_errors), + NGBE_NETDEV_STAT(rx_missed_errors), + NGBE_NETDEV_STAT(tx_aborted_errors), + NGBE_NETDEV_STAT(tx_carrier_errors), + NGBE_NETDEV_STAT(tx_fifo_errors), + NGBE_NETDEV_STAT(tx_heartbeat_errors), +}; + +#define NGBE_STAT(_name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = sizeof_field(struct ngbe_adapter, _stat), \ + .stat_offset = offsetof(struct ngbe_adapter, _stat) \ +} +static struct ngbe_stats ngbe_gstrings_stats[] = { + NGBE_STAT("rx_pkts_nic", stats.gprc), + NGBE_STAT("tx_pkts_nic", stats.gptc), + NGBE_STAT("rx_bytes_nic", stats.gorc), + NGBE_STAT("tx_bytes_nic", stats.gotc), + NGBE_STAT("lsc_int", lsc_int), + NGBE_STAT("tx_busy", tx_busy), + NGBE_STAT("non_eop_descs", non_eop_descs), + NGBE_STAT("broadcast", stats.bprc), + NGBE_STAT("rx_no_buffer_count", stats.rnbc[0]), + NGBE_STAT("tx_timeout_count", tx_timeout_count), + NGBE_STAT("tx_restart_queue", restart_queue), + NGBE_STAT("rx_long_length_count", stats.roc), + NGBE_STAT("rx_short_length_count", stats.ruc), + NGBE_STAT("tx_flow_control_xon", stats.lxontxc), + NGBE_STAT("rx_flow_control_xon", stats.lxonrxc), + NGBE_STAT("tx_flow_control_xoff", stats.lxofftxc), + NGBE_STAT("rx_flow_control_xoff", stats.lxoffrxc), + NGBE_STAT("rx_csum_offload_good_count", hw_csum_rx_good), + NGBE_STAT("rx_csum_offload_errors", hw_csum_rx_error), + NGBE_STAT("alloc_rx_page_failed", alloc_rx_page_failed), + NGBE_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), + NGBE_STAT("rx_no_dma_resources", hw_rx_no_dma_resources), + NGBE_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), + NGBE_STAT("os2bmc_tx_by_bmc", stats.b2ospc), + NGBE_STAT("os2bmc_tx_by_host", stats.o2bspc), + NGBE_STAT("os2bmc_rx_by_host", stats.b2ogprc), + NGBE_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), + NGBE_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), +}; + +/* ngbe allocates num_tx_queues and num_rx_queues symmetrically so + * we set the num_rx_queues to evaluate to num_tx_queues. This is + * used because we do not have a good way to get the max number of + * rx queues with CONFIG_RPS disabled. + */ +#define NGBE_NUM_RX_QUEUES netdev->num_tx_queues +#define NGBE_NUM_TX_QUEUES netdev->num_tx_queues + +#define NGBE_QUEUE_STATS_LEN ( \ + (NGBE_NUM_TX_QUEUES + NGBE_NUM_RX_QUEUES) * \ + (sizeof(struct ngbe_queue_stats) / sizeof(u64))) +#define NGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ngbe_gstrings_stats) +#define NGBE_NETDEV_STATS_LEN ARRAY_SIZE(ngbe_gstrings_net_stats) +#define NGBE_PB_STATS_LEN ( \ + (sizeof(((struct ngbe_adapter *)0)->stats.pxonrxc) + \ + sizeof(((struct ngbe_adapter *)0)->stats.pxontxc) + \ + sizeof(((struct ngbe_adapter *)0)->stats.pxoffrxc) + \ + sizeof(((struct ngbe_adapter *)0)->stats.pxofftxc)) \ + / sizeof(u64)) +#define NGBE_VF_STATS_LEN \ + ((((struct ngbe_adapter *)netdev_priv(netdev))->num_vfs) * \ + (sizeof(struct vf_stats) / sizeof(u64))) +#define NGBE_STATS_LEN (NGBE_GLOBAL_STATS_LEN + \ + NGBE_NETDEV_STATS_LEN + \ + NGBE_PB_STATS_LEN + \ + NGBE_QUEUE_STATS_LEN + \ + NGBE_VF_STATS_LEN) + +static const char ngbe_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; +#define NGBE_TEST_LEN (sizeof(ngbe_gstrings_test) / ETH_GSTRING_LEN) + +#define ngbe_isbackplane(type) \ + ((type == ngbe_media_type_backplane) ? true : false) + +int ngbe_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u32 supported_link = 0; + u32 link_speed = 0; + bool autoneg = false; + u32 supported, advertising; + bool link_up = 0; + + ethtool_convert_link_mode_to_legacy_u32(&supported, + cmd->link_modes.supported); + + TCALL(hw, mac.ops.get_link_capabilities, &supported_link, &autoneg); + + /* set the supported link speeds */ + if (supported_link & NGBE_LINK_SPEED_1GB_FULL) + supported |= (ngbe_isbackplane(hw->phy.media_type)) ? + SUPPORTED_1000baseKX_Full : SUPPORTED_1000baseT_Full; + if (supported_link & NGBE_LINK_SPEED_100_FULL) + supported |= SUPPORTED_100baseT_Full; + if (supported_link & NGBE_LINK_SPEED_10_FULL) + supported |= SUPPORTED_10baseT_Full; + + /* default advertised speed if phy.autoneg_advertised isn't set */ + advertising = supported; + + /* set the advertised speeds */ + if (hw->phy.autoneg_advertised) { + advertising = 0; + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_100_FULL) + advertising |= ADVERTISED_100baseT_Full; + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_1GB_FULL) { + if (supported & SUPPORTED_1000baseKX_Full) + advertising |= ADVERTISED_1000baseKX_Full; + else + advertising |= ADVERTISED_1000baseT_Full; + } + if (hw->phy.autoneg_advertised & NGBE_LINK_SPEED_10_FULL) + advertising |= ADVERTISED_10baseT_Full; + } else { + /* default modes in case phy.autoneg_advertised isn't set */ + if (supported_link & NGBE_LINK_SPEED_1GB_FULL) + advertising |= ADVERTISED_1000baseT_Full; + if (supported_link & NGBE_LINK_SPEED_100_FULL) + advertising |= ADVERTISED_100baseT_Full; + if (supported_link & NGBE_LINK_SPEED_10_FULL) + advertising |= ADVERTISED_10baseT_Full; + } + supported |= SUPPORTED_Autoneg; + if (autoneg) { + advertising |= ADVERTISED_Autoneg; + autoneg = AUTONEG_ENABLE; + cmd->base.autoneg = AUTONEG_ENABLE; + } else + cmd->base.autoneg = AUTONEG_DISABLE; + + /* Determine the remaining settings based on the PHY type. */ + switch (adapter->hw.phy.type) { + case ngbe_phy_internal: + case ngbe_phy_m88e1512: + case ngbe_phy_zte: + supported |= SUPPORTED_TP; + advertising |= ADVERTISED_TP; + cmd->base.port = PORT_TP; + break; + case ngbe_phy_sfp_passive_tyco: + case ngbe_phy_sfp_passive_unknown: + case ngbe_phy_sfp_ftl: + case ngbe_phy_sfp_avago: + case ngbe_phy_sfp_intel: + case ngbe_phy_sfp_unknown: + switch (adapter->hw.phy.sfp_type) { + /* SFP+ devices, further checking needed */ + case ngbe_sfp_type_da_cu: + case ngbe_sfp_type_da_cu_core0: + case ngbe_sfp_type_da_cu_core1: + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + cmd->base.port = PORT_DA; + break; + case ngbe_sfp_type_sr: + case ngbe_sfp_type_lr: + case ngbe_sfp_type_srlr_core0: + case ngbe_sfp_type_srlr_core1: + case ngbe_sfp_type_1g_sx_core0: + case ngbe_sfp_type_1g_sx_core1: + case ngbe_sfp_type_1g_lx_core0: + case ngbe_sfp_type_1g_lx_core1: + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + cmd->base.port = PORT_FIBRE; + break; + case ngbe_sfp_type_not_present: + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + cmd->base.port = PORT_NONE; + break; + case ngbe_sfp_type_1g_cu_core0: + case ngbe_sfp_type_1g_cu_core1: + supported |= SUPPORTED_TP; + advertising |= ADVERTISED_TP; + cmd->base.port = PORT_TP; + break; + case ngbe_sfp_type_unknown: + default: + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + cmd->base.port = PORT_OTHER; + break; + } + break; + case ngbe_phy_unknown: + case ngbe_phy_generic: + case ngbe_phy_sfp_unsupported: + default: + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + cmd->base.port = PORT_OTHER; + break; + } + + if (!in_interrupt()) { + TCALL(hw, mac.ops.check_link, &link_speed, &link_up, false); + } else { + /* + * this case is a special workaround for RHEL5 bonding + * that calls this routine from interrupt context + */ + link_speed = adapter->link_speed; + link_up = adapter->link_up; + } + + supported |= SUPPORTED_Pause; + + switch (hw->fc.requested_mode) { + case ngbe_fc_full: + advertising |= ADVERTISED_Pause; + break; + case ngbe_fc_rx_pause: + advertising |= ADVERTISED_Pause | + ADVERTISED_Asym_Pause; + break; + case ngbe_fc_tx_pause: + advertising |= ADVERTISED_Asym_Pause; + break; + default: + advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + } + + if (link_up) { + switch (link_speed) { + case NGBE_LINK_SPEED_1GB_FULL: + cmd->base.speed = SPEED_1000; + break; + case NGBE_LINK_SPEED_100_FULL: + cmd->base.speed = SPEED_100; + break; + case NGBE_LINK_SPEED_10_FULL: + cmd->base.speed = SPEED_10; + break; + default: + break; + } + cmd->base.duplex = DUPLEX_FULL; + } else { + cmd->base.speed = -1; + cmd->base.duplex = -1; + } + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + return 0; +} + +static int ngbe_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u32 advertised, old; + s32 err = 0; + u32 supported, advertising; + ethtool_convert_link_mode_to_legacy_u32(&supported, + cmd->link_modes.supported); + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + + if ((hw->phy.media_type == ngbe_media_type_copper) || + (hw->phy.multispeed_fiber)) { + /* + * this function does not support duplex forcing, but can + * limit the advertising of the adapter to the specified speed + */ + if (advertising & ~supported) { + return -EINVAL; + } + old = hw->phy.autoneg_advertised; + advertised = 0; + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + hw->mac.autoneg = true; + if (advertising & ADVERTISED_1000baseT_Full) + advertised |= NGBE_LINK_SPEED_1GB_FULL; + + if (advertising & ADVERTISED_100baseT_Full) + advertised |= NGBE_LINK_SPEED_100_FULL; + + if (advertising & ADVERTISED_10baseT_Full) + advertised |= NGBE_LINK_SPEED_10_FULL; + + if (old == advertised) { + return err; + } + } else { + if (cmd->base.duplex == DUPLEX_HALF) { + e_err(probe, "unsupported duplex\n"); + return -EINVAL; + } + + switch (cmd->base.speed) { + case SPEED_10: + advertised = NGBE_LINK_SPEED_10_FULL; + break; + case SPEED_100: + advertised = NGBE_LINK_SPEED_100_FULL; + break; + case SPEED_1000: + advertised = NGBE_LINK_SPEED_1GB_FULL; + break; + default: + e_err(probe, "unsupported speed\n"); + return -EINVAL; + } + hw->mac.autoneg = false; + } + + hw->mac.autotry_restart = true; + err = TCALL(hw, mac.ops.setup_link, advertised, true); + if (err) { + e_info(probe, "setup link failed with code %d\n", err); + TCALL(hw, mac.ops.setup_link, old, true); + } else { + hw->phy.autoneg_advertised = advertised; + } + } else { + /* in this case we currently only support 1Gb/FULL */ + u32 speed = cmd->base.speed; + if ((cmd->base.autoneg == AUTONEG_ENABLE) || + (advertising != ADVERTISED_10000baseT_Full) || + (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL)) + return -EINVAL; + } + + return err; +} + +static void ngbe_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + + if (!hw->fc.disable_fc_autoneg) + pause->autoneg = 1; + else + pause->autoneg = 0; + + if (hw->fc.current_mode == ngbe_fc_rx_pause) { + pause->rx_pause = 1; + } else if (hw->fc.current_mode == ngbe_fc_tx_pause) { + pause->tx_pause = 1; + } else if (hw->fc.current_mode == ngbe_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int ngbe_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + struct ngbe_fc_info fc = hw->fc; + + fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE); + + if ((pause->rx_pause && pause->tx_pause) || pause->autoneg) + fc.requested_mode = ngbe_fc_full; + else if (pause->rx_pause) + fc.requested_mode = ngbe_fc_rx_pause; + else if (pause->tx_pause) + fc.requested_mode = ngbe_fc_tx_pause; + else + fc.requested_mode = ngbe_fc_none; + + /* if the thing changed then we'll update and use new autoneg */ + if (memcmp(&fc, &hw->fc, sizeof(struct ngbe_fc_info))) { + hw->fc = fc; + if (netif_running(netdev)) + ngbe_reinit_locked(adapter); + else + ngbe_reset(adapter); + } + + return 0; +} + +static u32 ngbe_get_msglevel(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void ngbe_set_msglevel(struct net_device *netdev, u32 data) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +static int ngbe_get_regs_len(struct net_device __always_unused *netdev) +{ +#define NGBE_REGS_LEN 4096 + return NGBE_REGS_LEN * sizeof(u32); +} + +#define NGBE_GET_STAT(_A_, _R_) (_A_->stats._R_) + +static void ngbe_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, + void *p) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u32 i; + u32 id = 0; + + memset(p, 0, NGBE_REGS_LEN * sizeof(u32)); + regs_buff[NGBE_REGS_LEN - 1] = 0x55555555; + + regs->version = hw->revision_id << 16 | + hw->device_id; + + /* Global Registers */ + /* chip control */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MIS_PWR);//0 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MIS_CTL);//1 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MIS_PF_SM);//2 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MIS_RST);//3 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MIS_ST);//4 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MIS_SWSM);//5 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MIS_RST_ST);//6 + /* pvt sensor */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TS_CTL);//7 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TS_EN);//8 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TS_ST);//9 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TS_ALARM_THRE);//10 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TS_DALARM_THRE);//11 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TS_INT_EN);//12 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TS_ALARM_ST);//13 + /* Fmgr Register */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_SPI_CMD);//14 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_SPI_DATA);//15 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_SPI_STATUS);//16 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_SPI_USR_CMD);//17 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_SPI_CMDCFG0);//18 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_SPI_CMDCFG1);//19 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_SPI_ILDR_STATUS);//20 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_SPI_ILDR_SWPTR);//21 + + /* Port Registers */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_CFG_PORT_CTL);//22 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_CFG_PORT_ST);//23 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_CFG_EX_VTYPE);//24 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_CFG_TCP_TIME);//25 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_CFG_LED_CTL);//26 + /* GPIO */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_GPIO_DR);//27 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_GPIO_DDR);//28 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_GPIO_CTL);//29 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_GPIO_INTEN);//30 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_GPIO_INTMASK);//31 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_GPIO_INTSTATUS);//32 + /* TX TPH */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_CFG_TPH_TDESC);//33 + /* RX TPH */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_CFG_TPH_RDESC);//34 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_CFG_TPH_RHDR);//35 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_CFG_TPH_RPL);//36 + + /* TDMA */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_CTL);//37 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_POOL_TE);//38 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_PB_THRE);//39 + + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_LLQ);//40 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_ETYPE_LB_L);//41 + + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_ETYPE_AS_L);//42 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_MAC_AS_L);//43 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_VLAN_AS_L);//44 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_TCP_FLG_L);//45 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_TCP_FLG_H);//46 + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_VLAN_INS(i));//47-54 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_ETAG_INS(i));//55-62 + } + /* Transmit QOS */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_PBWARB_CTL);//63 + + /* statistics */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_DRP_CNT);//64 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_SEC_DRP);//65 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_PKT_CNT);//66 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_BYTE_CNT_L);//67 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_BYTE_CNT_H);//68 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDM_OS2BMC_CNT);//69 + + /* RDMA */ + /* receive control */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDM_ARB_CTL);//70 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDM_POOL_RE);//71 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDM_PF_QDE);//72 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDM_PF_HIDE);//73 + /* static */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDM_DRP_PKT);//74 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDM_PKT_CNT);//75 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDM_BYTE_CNT_L);//76 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDM_BYTE_CNT_H);//77 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDM_BMC2OS_CNT);//78 + + /* RDB */ + /*flow control */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_RFCV);//79 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_RFCL);//80 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_RFCH);//81 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_RFCRT);//82 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_RFCC);//83 + /* receive packet buffer */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_PB_CTL);//84 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_PB_SZ);//85 + + /* lli interrupt */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_LLI_THRE);//86 + /* ring assignment */ + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_PL_CFG(i));//87-94 + } + for (i = 0; i < 32; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_RSSTBL(i));//95-126 + } + for (i = 0; i < 10; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_RSSRK(i));//127-136 + } + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_RA_CTL);//137 + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_5T_SDP(i));//138-145 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_5T_CTL0(i));//146-153 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_5T_CTL1(i));//154-161 + } + + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_SYN_CLS);//162 + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_ETYPE_CLS(i));//163-170 + } + /* statistics */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_MPCNT);//171 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_PKT_CNT);//172 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_REPLI_CNT);//173 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_DRP_CNT);//174 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_LXONTXC);//175 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_LXOFFTXC);//176 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_PFCMACDAL);//177 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_PFCMACDAH);//178 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RDB_TXSWERR);//179 + + /* PSR */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_CTL);//180 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MAX_SZ);//181 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_VLAN_CTL);//182 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_VM_CTL);//183 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_PKT_CNT);//184 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MNG_PKT_CNT);//185 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_DBG_DOP_CNT);//186 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MNG_DOP_CNT);//187 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_VM_FLP_L);//188 + + /* vm l2 control */ + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_VM_L2CTL(i));//189-196 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_ETYPE_SWC(i));//197-204 + } + for (i = 0; i < 128; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MC_TBL(i));//205-332 + } + for (i = 0; i < 128; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_UC_TBL(i));///333-460 + } + for (i = 0; i < 128; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_VLAN_TBL(i));//461-588 + } + /* mac switcher */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MAC_SWC_AD_L);//589 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MAC_SWC_AD_H);//590 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MAC_SWC_VM);//591 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MAC_SWC_IDX);//592 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_VLAN_SWC);//593 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_VLAN_SWC_VM_L);//594 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_VLAN_SWC_IDX);//595 + + /* mirror */ + for (i = 0; i < 4; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MR_CTL(i));//596-599 + } + for (i = 0; i < 4; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MR_VLAN_L(i));//600-603 + } + for (i = 0; i < 4; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_MR_VM_L(i));//604-607 + } + /* 1588 */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_1588_CTL);//608 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_1588_STMPL);//609 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_1588_STMPH);//610 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_1588_ATTRL);//611 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_1588_ATTRH);//612 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_1588_MSGTYPE);//613 + /* wake up */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_WKUP_CTL);//614 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_WKUP_IPV);//615 + for (i = 0; i < 4; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_WKUP_IP4TBL(i));//616-619 + } + for (i = 0; i < 4; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_WKUP_IP6TBL(i));//620-623 + } + for (i = 0; i < 16; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_LAN_FLEX_DW_L(i));//624-639 + } + for (i = 0; i < 16; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_LAN_FLEX_DW_H(i));//640-655 + } + for (i = 0; i < 16; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_LAN_FLEX_MSK(i));//656-671 + } + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PSR_LAN_FLEX_CTL);//672 + + /* TDB */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDB_RFCS);//673 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDB_PB_SZ);//674 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDB_PBRARB_CTL);//675 + /* statistic */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDB_OUT_PKT_CNT);//676 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDB_MNG_PKT_CNT);//677 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDB_LB_PKT_CNT);//678 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TDB_MNG_LARGE_DOP_CNT);//679 + + /* TSEC */ + /* general tsec */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_CTL);//680 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_ST);//681 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_BUF_AF);//682 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_BUF_AE);//683 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_MIN_IFG);//684 + /* 1588 */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_CTL);//685 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_STMPL);//686 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_STMPH);//687 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_SYSTIML);//688 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_SYSTIMH);//689 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_INC);//690 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_ADJL);//691 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_ADJH);//692 + + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_INT_ST);//693 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_INT_EN);//694 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_AUX_CTL);//695 + for (i = 0; i < 4; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TSEC_1588_SDP(i));//696-699 + } + + /* RSEC */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RSEC_CTL);//700 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RSEC_ST);//701 + /* mac wrapper */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MAC_TX_CFG);//702 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MAC_RX_CFG);//703 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MAC_PKT_FLT);//704 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MAC_WDG_TIMEOUT);//705 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MAC_TX_FLOW_CTRL);//706 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MAC_RX_FLOW_CTRL);//707 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MAC_INT_ST);//708 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_MAC_INT_EN);//709 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_RX_FRAME_CNT_GOOD_BAD_LOW);//710 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_TX_FRAME_CNT_GOOD_BAD_LOW);//711 + + /* BAR register */ + /* pf interrupt register */ + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_MISC_IC);//712 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_MISC_ICS);//713 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_MISC_IEN);//714 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_GPIE);//715 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_IC);//716 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_ICS);//717 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_IMS);//718 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_IMC);//719 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_ISB_ADDR_L);//720 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_ISB_ADDR_H);//721 + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_ITRSEL);//722 + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_ITR(i));//723-730 + } + for (i = 0; i < 4; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_IVAR(i));//731-734 + } + + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_MISC_IVAR);//735 + /* pf receive ring register */ + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_RR_BAL(i));//736-743 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_RR_BAH(i));//744-751 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_RR_WP(i));//752-759 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_RR_RP(i));//760-767 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_RR_CFG(i));//768-775 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_TR_BAL(i));//776-783 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_TR_BAH(i));//784-791 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_TR_WP(i));//792-709 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_TR_RP(i));//800-807 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = NGBE_R32_Q(hw, NGBE_PX_TR_CFG(i));//808-815 + } +} + +static int ngbe_get_eeprom_len(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + return adapter->hw.eeprom.word_size * 2; +} + +static int ngbe_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word, last_word, eeprom_len; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_len = last_word - first_word + 1; + + eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ret_val = TCALL(hw, eeprom.ops.read_buffer, first_word, eeprom_len, + eeprom_buff); + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < eeprom_len; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int ngbe_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len, first_word, last_word, ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EINVAL; + + max_len = hw->eeprom.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = eeprom_buff; + + if (eeprom->offset & 1) { + /* + * need read/modify/write of first changed EEPROM word + * only the second byte of the word is being modified + */ + ret_val = TCALL(hw, eeprom.ops.read, first_word, + &eeprom_buff[0]); + if (ret_val) + goto err; + + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { + /* + * need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + ret_val = TCALL(hw, eeprom.ops.read, last_word, + &eeprom_buff[last_word - first_word]); + if (ret_val) + goto err; + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + cpu_to_le16s(&eeprom_buff[i]); + + ret_val = TCALL(hw, eeprom.ops.write_buffer, first_word, + last_word - first_word + 1, + eeprom_buff); + + /* Update the checksum */ + if (ret_val == 0) + TCALL(hw, eeprom.ops.update_checksum); + +err: + kfree(eeprom_buff); + return ret_val; +} + +static void ngbe_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + strncpy(drvinfo->driver, ngbe_driver_name, + sizeof(drvinfo->driver) - 1); + strncpy(drvinfo->version, ngbe_driver_version, + sizeof(drvinfo->version) - 1); + strncpy(drvinfo->fw_version, adapter->eeprom_id, + sizeof(drvinfo->fw_version)); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info) - 1); + if (adapter->num_tx_queues <= NGBE_NUM_RX_QUEUES) { + drvinfo->n_stats = NGBE_STATS_LEN - + (NGBE_NUM_RX_QUEUES - adapter->num_tx_queues)* + (sizeof(struct ngbe_queue_stats) / sizeof(u64))*2; + } else { + drvinfo->n_stats = NGBE_STATS_LEN; + } + drvinfo->testinfo_len = NGBE_TEST_LEN; + drvinfo->regdump_len = ngbe_get_regs_len(netdev); +} + +static void ngbe_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = NGBE_MAX_RXD; + ring->tx_max_pending = NGBE_MAX_TXD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +static int ngbe_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_ring *temp_ring; + int i, err = 0; + u32 new_rx_count, new_tx_count; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + new_tx_count = clamp_t(u32, ring->tx_pending, + NGBE_MIN_TXD, NGBE_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, NGBE_REQ_TX_DESCRIPTOR_MULTIPLE); + + new_rx_count = clamp_t(u32, ring->rx_pending, + NGBE_MIN_RXD, NGBE_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, NGBE_REQ_RX_DESCRIPTOR_MULTIPLE); + + if ((new_tx_count == adapter->tx_ring_count) && + (new_rx_count == adapter->rx_ring_count)) { + /* nothing to do */ + return 0; + } + + while (test_and_set_bit(__NGBE_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto clear_reset; + } + + /* allocate temporary buffer to store rings in */ + i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues); + temp_ring = vmalloc(i * sizeof(struct ngbe_ring)); + + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } + + ngbe_down(adapter); + + /* + * Setup new Tx resources and free the old Tx resources in that order. + * We can then assign the new resources to the rings via a memcpy. + * The advantage to this approach is that we are guaranteed to still + * have resources even in the case of an allocation failure. + */ + if (new_tx_count != adapter->tx_ring_count) { + for (i = 0; i < adapter->num_tx_queues; i++) { + memcpy(&temp_ring[i], adapter->tx_ring[i], + sizeof(struct ngbe_ring)); + + temp_ring[i].count = new_tx_count; + err = ngbe_setup_tx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + ngbe_free_tx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + ngbe_free_tx_resources(adapter->tx_ring[i]); + + memcpy(adapter->tx_ring[i], &temp_ring[i], + sizeof(struct ngbe_ring)); + } + + adapter->tx_ring_count = new_tx_count; + } + + /* Repeat the process for the Rx rings if needed */ + if (new_rx_count != adapter->rx_ring_count) { + for (i = 0; i < adapter->num_rx_queues; i++) { + memcpy(&temp_ring[i], adapter->rx_ring[i], + sizeof(struct ngbe_ring)); + + temp_ring[i].count = new_rx_count; + err = ngbe_setup_rx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + ngbe_free_rx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + ngbe_free_rx_resources(adapter->rx_ring[i]); + + memcpy(adapter->rx_ring[i], &temp_ring[i], + sizeof(struct ngbe_ring)); + } + + adapter->rx_ring_count = new_rx_count; + } + +err_setup: + ngbe_up(adapter); + vfree(temp_ring); +clear_reset: + clear_bit(__NGBE_RESETTING, &adapter->state); + return err; +} + +static int ngbe_get_sset_count(struct net_device *netdev, int sset) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + switch (sset) { + case ETH_SS_TEST: + return NGBE_TEST_LEN; + case ETH_SS_STATS: + if (adapter->num_tx_queues <= NGBE_NUM_RX_QUEUES) { + return NGBE_STATS_LEN - (NGBE_NUM_RX_QUEUES - adapter->num_tx_queues)* + (sizeof(struct ngbe_queue_stats) / sizeof(u64))*2; + } else { + return NGBE_STATS_LEN; + } + default: + return -EOPNOTSUPP; + } +} + +static void ngbe_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, + u64 *data) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct rtnl_link_stats64 temp; + const struct rtnl_link_stats64 *net_stats; + + u64 *queue_stat; + int stat_count, k; + unsigned int start; + struct ngbe_ring *ring; + int i, j; + char *p; + + ngbe_update_stats(adapter); + net_stats = dev_get_stats(netdev, &temp); + + for (i = 0; i < NGBE_NETDEV_STATS_LEN; i++) { + p = (char *)net_stats + ngbe_gstrings_net_stats[i].stat_offset; + data[i] = (ngbe_gstrings_net_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < NGBE_GLOBAL_STATS_LEN; j++, i++) { + p = (char *)adapter + ngbe_gstrings_stats[j].stat_offset; + data[i] = (ngbe_gstrings_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + for (j = 0; j < adapter->num_tx_queues; j++) { + ring = adapter->tx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; + continue; + } + + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + data[i] = ring->stats.packets; + data[i+1] = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + i += 2; + } + + for (j = 0; j < adapter->num_rx_queues; j++) { + ring = adapter->rx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; + continue; + } + + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + data[i] = ring->stats.packets; + data[i+1] = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + i += 2; + } + + for (j = 0; j < NGBE_MAX_PACKET_BUFFERS; j++) { + data[i++] = adapter->stats.pxontxc[j]; + data[i++] = adapter->stats.pxofftxc[j]; + } + for (j = 0; j < NGBE_MAX_PACKET_BUFFERS; j++) { + data[i++] = adapter->stats.pxonrxc[j]; + data[i++] = adapter->stats.pxoffrxc[j]; + } + + stat_count = sizeof(struct vf_stats) / sizeof(u64); + for (j = 0; j < adapter->num_vfs; j++) { + queue_stat = (u64 *)&adapter->vfinfo[j].vfstats; + for (k = 0; k < stat_count; k++) + data[i + k] = queue_stat[k]; + queue_stat = (u64 *)&adapter->vfinfo[j].saved_rst_vfstats; + for (k = 0; k < stat_count; k++) + data[i + k] += queue_stat[k]; + i += k; + } +} + +static void ngbe_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + char *p = (char *)data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *ngbe_gstrings_test, + NGBE_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (i = 0; i < NGBE_NETDEV_STATS_LEN; i++) { + memcpy(p, ngbe_gstrings_net_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < NGBE_GLOBAL_STATS_LEN; i++) { + memcpy(p, ngbe_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_tx_queues; i++) { /*temp setting2*/ + sprintf(p, "tx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_rx_queues; i++) { /*temp setting2*/ + sprintf(p, "rx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < NGBE_MAX_PACKET_BUFFERS; i++) { + sprintf(p, "tx_pb_%u_pxon", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_pb_%u_pxoff", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < NGBE_MAX_PACKET_BUFFERS; i++) { + sprintf(p, "rx_pb_%u_pxon", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_pb_%u_pxoff", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_vfs; i++) { + sprintf(p, "VF %d Rx Packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "VF %d Rx Bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "VF %d Tx Packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "VF %d Tx Bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "VF %d MC Packets", i); + p += ETH_GSTRING_LEN; + } + /* BUG_ON(p - data != NGBE_STATS_LEN * ETH_GSTRING_LEN); */ + break; + } +} + +static int ngbe_link_test(struct ngbe_adapter *adapter, u64 *data) +{ + struct ngbe_hw *hw = &adapter->hw; + bool link_up; + u32 link_speed = 0; + + if (NGBE_REMOVED(hw->hw_addr)) { + *data = 1; + return 1; + } + *data = 0; + TCALL(hw, mac.ops.check_link, &link_speed, &link_up, true); + if (link_up) + return *data; + else + *data = 1; + return *data; +} + +/* ethtool register test data */ +struct ngbe_reg_test { + u32 reg; + u8 array_len; + u8 test_type; + u32 mask; + u32 write; +}; + +/* In the hardware, registers are laid out either singly, in arrays + * spaced 0x40 bytes apart, or in contiguous tables. We assume + * most tests take place on arrays or single registers (handled + * as a single-element array) and special-case the tables. + * Table tests are always pattern tests. + * + * We also make provision for some required setup steps by specifying + * registers to be written without any read-back testing. + */ + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define WRITE_NO_TEST 3 +#define TABLE32_TEST 4 +#define TABLE64_TEST_LO 5 +#define TABLE64_TEST_HI 6 + +/* default sapphire register test */ +static struct ngbe_reg_test reg_test_sapphire[] = { + { NGBE_RDB_RFCL, 1, PATTERN_TEST, 0x8007FFE0, 0x8007FFE0 }, + { NGBE_RDB_RFCH, 1, PATTERN_TEST, 0x8007FFE0, 0x8007FFE0 }, + { NGBE_PSR_VLAN_CTL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, + { NGBE_PX_RR_BAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + { NGBE_PX_RR_BAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { NGBE_PX_RR_CFG(0), 4, WRITE_NO_TEST, 0, NGBE_PX_RR_CFG_RR_EN }, + { NGBE_RDB_RFCH, 1, PATTERN_TEST, 0x8007FFE0, 0x8007FFE0 }, + { NGBE_RDB_RFCV, 1, PATTERN_TEST, 0xFFFF0000, 0xFFFF0000 }, + { NGBE_PX_TR_BAL(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { NGBE_PX_TR_BAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { NGBE_RDB_PB_CTL, 1, SET_READ_TEST, 0x00000001, 0x00000001 }, + { NGBE_PSR_MC_TBL(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { .reg = 0 } +}; + + +static bool reg_pattern_test(struct ngbe_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + u32 pat, val, before; + static const u32 test_pattern[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; + + if (NGBE_REMOVED(adapter->hw.hw_addr)) { + *data = 1; + return true; + } + for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { + before = rd32(&adapter->hw, reg); + wr32(&adapter->hw, reg, test_pattern[pat] & write); + val = rd32(&adapter->hw, reg); + if (val != (test_pattern[pat] & write & mask)) { + e_err(drv, + "pattern test reg %04X failed: got 0x%08X " + "expected 0x%08X\n", + reg, val, test_pattern[pat] & write & mask); + *data = reg; + wr32(&adapter->hw, reg, before); + return true; + } + wr32(&adapter->hw, reg, before); + } + return false; +} + +static bool reg_set_and_check(struct ngbe_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + u32 val, before; + + if (NGBE_REMOVED(adapter->hw.hw_addr)) { + *data = 1; + return true; + } + before = rd32(&adapter->hw, reg); + wr32(&adapter->hw, reg, write & mask); + val = rd32(&adapter->hw, reg); + if ((write & mask) != (val & mask)) { + e_err(drv, + "set/check reg %04X test failed: got 0x%08X expected" + "0x%08X\n", + reg, (val & mask), (write & mask)); + *data = reg; + wr32(&adapter->hw, reg, before); + return true; + } + wr32(&adapter->hw, reg, before); + return false; +} + +static bool ngbe_reg_test(struct ngbe_adapter *adapter, u64 *data) +{ + struct ngbe_reg_test *test; + struct ngbe_hw *hw = &adapter->hw; + u32 i; + + if (NGBE_REMOVED(hw->hw_addr)) { + e_err(drv, "Adapter removed - register test blocked\n"); + *data = 1; + return true; + } + + test = reg_test_sapphire; + + /* + * Perform the remainder of the register test, looping through + * the test table until we either fail or reach the null entry. + */ + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + bool b = false; + + switch (test->test_type) { + case PATTERN_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case SET_READ_TEST: + b = reg_set_and_check(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case WRITE_NO_TEST: + wr32(hw, test->reg + (i * 0x40), + test->write); + break; + case TABLE32_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 4), + test->mask, + test->write); + break; + case TABLE64_TEST_LO: + b = reg_pattern_test(adapter, data, + test->reg + (i * 8), + test->mask, + test->write); + break; + case TABLE64_TEST_HI: + b = reg_pattern_test(adapter, data, + (test->reg + 4) + (i * 8), + test->mask, + test->write); + break; + } + if (b) + return true; + } + test++; + } + + *data = 0; + return false; +} + +static bool ngbe_eeprom_test(struct ngbe_adapter *adapter, u64 *data) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 devcap; + + if (TCALL(hw, eeprom.ops.eeprom_chksum_cap_st, NGBE_CALSUM_COMMAND, &devcap)) { + *data = 1; + return true; + } else { + *data = 0; + return false; + } +} + +static irqreturn_t ngbe_test_intr(int __always_unused irq, void *data) +{ + struct net_device *netdev = (struct net_device *) data; + struct ngbe_adapter *adapter = netdev_priv(netdev); + u64 icr; + + /* get misc interrupt, as cannot get ring interrupt status */ + icr = ngbe_misc_isb(adapter, NGBE_ISB_VEC1); + icr <<= 32; + icr |= ngbe_misc_isb(adapter, NGBE_ISB_VEC0); + + adapter->test_icr = icr; + + return IRQ_HANDLED; +} + +static int ngbe_intr_test(struct ngbe_adapter *adapter, u64 *data) +{ + struct net_device *netdev = adapter->netdev; + u64 mask; + u32 i = 0, shared_int = true; + u32 irq = adapter->pdev->irq; + + if (NGBE_REMOVED(adapter->hw.hw_addr)) { + *data = 1; + return -1; + } + *data = 0; + + /* Hook up test interrupt handler just for this test */ + if (adapter->msix_entries) { + /* NOTE: we don't test MSI-X interrupts here, yet */ + return 0; + } else if (adapter->flags & NGBE_FLAG_MSI_ENABLED) { + shared_int = false; + if (request_irq(irq, &ngbe_test_intr, 0, netdev->name, + netdev)) { + *data = 1; + return -1; + } + } else if (!request_irq(irq, &ngbe_test_intr, IRQF_PROBE_SHARED, + netdev->name, netdev)) { + shared_int = false; + } else if (request_irq(irq, &ngbe_test_intr, IRQF_SHARED, + netdev->name, netdev)) { + *data = 1; + return -1; + } + e_info(hw, "testing %s interrupt\n", + (shared_int ? "shared" : "unshared")); + + /* Disable all the interrupts */ + ngbe_irq_disable(adapter); + NGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + /* Test each interrupt */ + for (; i < 1; i++) { + /* Interrupt to test */ + mask = 1ULL << i; + + if (!shared_int) { + /* + * Disable the interrupts to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ngbe_intr_disable(&adapter->hw, ~mask); + ngbe_intr_trigger(&adapter->hw, mask); + NGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* + * Enable the interrupt to be reported in the cause + * register and then force the same interrupt and see + * if one gets posted. If an interrupt was not posted + * to the bus, the test failed. + */ + adapter->test_icr = 0; + ngbe_intr_disable(&adapter->hw, NGBE_INTR_ALL); + ngbe_intr_trigger(&adapter->hw, mask); + NGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + if (!(adapter->test_icr & mask)) { + *data = 0; + break; + } + } + + /* Disable all the interrupts */ + ngbe_intr_disable(&adapter->hw, NGBE_INTR_ALL); + NGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + /* Unhook test interrupt handler */ + free_irq(irq, netdev); + + return *data; +} + +static void ngbe_free_desc_rings(struct ngbe_adapter *adapter) +{ + struct ngbe_ring *tx_ring = &adapter->test_tx_ring; + struct ngbe_ring *rx_ring = &adapter->test_rx_ring; + struct ngbe_hw *hw = &adapter->hw; + + /* shut down the DMA engines now so they can be reinitialized later */ + + /* first Rx */ + TCALL(hw, mac.ops.disable_rx); + ngbe_disable_rx_queue(adapter, rx_ring); + + /* now Tx */ + wr32(hw, NGBE_PX_TR_CFG(tx_ring->reg_idx), 0); + + wr32m(hw, NGBE_TDM_CTL, NGBE_TDM_CTL_TE, 0); + + ngbe_reset(adapter); + + ngbe_free_tx_resources(&adapter->test_tx_ring); + ngbe_free_rx_resources(&adapter->test_rx_ring); +} + +static int ngbe_setup_desc_rings(struct ngbe_adapter *adapter) +{ + struct ngbe_ring *tx_ring = &adapter->test_tx_ring; + struct ngbe_ring *rx_ring = &adapter->test_rx_ring; + struct ngbe_hw *hw = &adapter->hw; + int ret_val; + int err; + + TCALL(hw, mac.ops.setup_rxpba, 0, 0, PBA_STRATEGY_EQUAL); + + /* Setup Tx descriptor ring and Tx buffers */ + tx_ring->count = NGBE_DEFAULT_TXD; + tx_ring->queue_index = 0; + tx_ring->dev = pci_dev_to_dev(adapter->pdev); + tx_ring->netdev = adapter->netdev; + tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; + + err = ngbe_setup_tx_resources(tx_ring); + if (err) + return 1; + + wr32m(&adapter->hw, NGBE_TDM_CTL, + NGBE_TDM_CTL_TE, NGBE_TDM_CTL_TE); + wr32m(hw, NGBE_TSEC_CTL, 0x2, 0); + wr32m(hw, NGBE_RSEC_CTL, 0x2, 0); + ngbe_configure_tx_ring(adapter, tx_ring); + + + /* enable mac transmitter */ + wr32m(hw, NGBE_MAC_TX_CFG, + NGBE_MAC_TX_CFG_TE | NGBE_MAC_TX_CFG_SPEED_MASK, + NGBE_MAC_TX_CFG_TE | NGBE_MAC_TX_CFG_SPEED_1G); + + /* Setup Rx Descriptor ring and Rx buffers */ + rx_ring->count = NGBE_DEFAULT_RXD; + rx_ring->queue_index = 0; + rx_ring->dev = pci_dev_to_dev(adapter->pdev); + rx_ring->netdev = adapter->netdev; + rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; + + err = ngbe_setup_rx_resources(rx_ring); + if (err) { + ret_val = 4; + goto err_nomem; + } + + TCALL(hw, mac.ops.disable_rx); + + ngbe_configure_rx_ring(adapter, rx_ring); + + TCALL(hw, mac.ops.enable_rx); + + return 0; + +err_nomem: + ngbe_free_desc_rings(adapter); + return ret_val; +} + +static int ngbe_setup_loopback_test(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 reg_data; + + /* Setup MAC loopback */ + wr32m(hw, NGBE_MAC_RX_CFG, + NGBE_MAC_RX_CFG_LM, NGBE_MAC_RX_CFG_LM); + + reg_data = rd32(hw, NGBE_PSR_CTL); + reg_data |= NGBE_PSR_CTL_BAM | NGBE_PSR_CTL_UPE | + NGBE_PSR_CTL_MPE | NGBE_PSR_CTL_TPE; + wr32(hw, NGBE_PSR_CTL, reg_data); + + wr32(hw, 0x17000, + ((rd32(hw, 0x17000) | + 0x00000040U) & ~0x1U)); + + wr32(hw, 0x17204, 0x4); + wr32(hw, NGBE_PSR_VLAN_CTL, + rd32(hw, NGBE_PSR_VLAN_CTL) & + ~NGBE_PSR_VLAN_CTL_VFE); + + NGBE_WRITE_FLUSH(hw); + usleep_range(10000, 20000); + + return 0; +} + +static void ngbe_loopback_cleanup(struct ngbe_adapter *adapter) +{ + wr32m(&adapter->hw, NGBE_MAC_RX_CFG, + NGBE_MAC_RX_CFG_LM, ~NGBE_MAC_RX_CFG_LM); +} + + +static void ngbe_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size >>= 1; + memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1); + memset(&skb->data[frame_size + 10], 0xBE, 1); + memset(&skb->data[frame_size + 12], 0xAF, 1); +} + +static bool ngbe_check_lbtest_frame(struct ngbe_rx_buffer *rx_buffer, + unsigned int frame_size) +{ + unsigned char *data; + bool match = true; + + frame_size >>= 1; + + data = kmap(rx_buffer->page) + rx_buffer->page_offset; + if (data[3] != 0xFF || + data[frame_size + 10] != 0xBE || + data[frame_size + 12] != 0xAF) + match = false; + + kunmap(rx_buffer->page); + + return match; +} + +static u16 ngbe_clean_test_rings(struct ngbe_ring *rx_ring, + struct ngbe_ring *tx_ring, + unsigned int size) +{ + union ngbe_rx_desc *rx_desc; + struct ngbe_rx_buffer *rx_buffer; + struct ngbe_tx_buffer *tx_buffer; + const int bufsz = ngbe_rx_bufsz(rx_ring); + u16 rx_ntc, tx_ntc, count = 0; + + /* initialize next to clean and descriptor values */ + rx_ntc = rx_ring->next_to_clean; + tx_ntc = tx_ring->next_to_clean; + rx_desc = NGBE_RX_DESC(rx_ring, rx_ntc); + + while (ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_DD)) { + /* unmap buffer on Tx side */ + tx_buffer = &tx_ring->tx_buffer_info[tx_ntc]; + ngbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); + + /* check Rx buffer */ + rx_buffer = &rx_ring->rx_buffer_info[rx_ntc]; + + /* sync Rx buffer for CPU read */ + dma_sync_single_for_cpu(rx_ring->dev, + rx_buffer->page_dma, + bufsz, + DMA_FROM_DEVICE); + + /* verify contents of skb */ + if (ngbe_check_lbtest_frame(rx_buffer, size)) + count++; + + /* sync Rx buffer for device write */ + dma_sync_single_for_device(rx_ring->dev, + rx_buffer->page_dma, + bufsz, + DMA_FROM_DEVICE); + + /* increment Rx/Tx next to clean counters */ + rx_ntc++; + if (rx_ntc == rx_ring->count) + rx_ntc = 0; + tx_ntc++; + if (tx_ntc == tx_ring->count) + tx_ntc = 0; + + /* fetch next descriptor */ + rx_desc = NGBE_RX_DESC(rx_ring, rx_ntc); + } + + /* re-map buffers to ring, store next to clean values */ + ngbe_alloc_rx_buffers(rx_ring, count); + rx_ring->next_to_clean = rx_ntc; + tx_ring->next_to_clean = tx_ntc; + + return count; +} + +static int ngbe_run_loopback_test(struct ngbe_adapter *adapter) +{ + struct ngbe_ring *tx_ring = &adapter->test_tx_ring; + struct ngbe_ring *rx_ring = &adapter->test_rx_ring; + int i, j, lc, good_cnt, ret_val = 0; + unsigned int size = 1024; + netdev_tx_t tx_ret_val; + struct sk_buff *skb; + u32 flags_orig = adapter->flags; + + /* DCB can modify the frames on Tx */ + adapter->flags &= ~NGBE_FLAG_DCB_ENABLED; + + /* allocate test skb */ + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) + return 11; + + /* place data into test skb */ + ngbe_create_lbtest_frame(skb, size); + skb_put(skb, size); + + /* + * Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rx_ring->count <= tx_ring->count) + lc = ((tx_ring->count / 64) * 2) + 1; + else + lc = ((rx_ring->count / 64) * 2) + 1; + + for (j = 0; j <= lc; j++) { + /* reset count of good packets */ + good_cnt = 0; + + /* place 64 packets on the transmit queue*/ + for (i = 0; i < 64; i++) { + skb_get(skb); + tx_ret_val = ngbe_xmit_frame_ring(skb, + adapter, + tx_ring); + if (tx_ret_val == NETDEV_TX_OK) + good_cnt++; + } + + msleep(10); + + if (good_cnt != 64) { + ret_val = 12; + break; + } + + /* allow 200 milliseconds for packets to go from Tx to Rx */ + msleep(200); + + good_cnt = ngbe_clean_test_rings(rx_ring, tx_ring, size); + if (good_cnt != 64) { + ret_val = 13; + e_dev_err("ngbe_run_loopback_test: recv_cnt = %d\n", good_cnt); + break; + } + } + + /* free the original skb */ + kfree_skb(skb); + adapter->flags = flags_orig; + + return ret_val; +} + +static int ngbe_loopback_test(struct ngbe_adapter *adapter, u64 *data) +{ + *data = ngbe_setup_desc_rings(adapter); + if (*data) + goto out; + *data = ngbe_setup_loopback_test(adapter); + if (*data) + goto err_loopback; + *data = ngbe_run_loopback_test(adapter); + if (*data) + e_info(hw, "mac loopback testing failed\n"); + ngbe_loopback_cleanup(adapter); + +err_loopback: + ngbe_free_desc_rings(adapter); +out: + return *data; +} + +static void ngbe_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + bool if_running = netif_running(netdev); + struct ngbe_hw *hw = &adapter->hw; + + e_dev_info("ngbe_diag_test: start test\n"); + + if (NGBE_REMOVED(hw->hw_addr)) { + e_err(hw, "Adapter removed - test blocked\n"); + data[0] = 1; + data[1] = 1; + data[2] = 1; + data[3] = 1; + data[4] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + return; + } + set_bit(__NGBE_TESTING, &adapter->state); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + if (adapter->flags & NGBE_FLAG_SRIOV_ENABLED) { + int i; + for (i = 0; i < adapter->num_vfs; i++) { + if (adapter->vfinfo[i].clear_to_send) { + e_warn(drv, "Please take active VFS " + "offline and restart the " + "adapter before running NIC " + "diagnostics\n"); + data[0] = 1; + data[1] = 1; + data[2] = 1; + data[3] = 1; + data[4] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + clear_bit(__NGBE_TESTING, + &adapter->state); + goto skip_ol_tests; + } + } + } + + /* Offline tests */ + e_info(hw, "offline testing starting\n"); + + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result */ + if (ngbe_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ + ngbe_close(netdev); + else { + msleep(20); + ngbe_reset(adapter); + } + + e_info(hw, "register testing starting\n"); + + if (ngbe_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + msleep(20); + ngbe_reset(adapter); + e_info(hw, "eeprom testing starting\n"); + if (ngbe_eeprom_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + msleep(20); + + ngbe_reset(adapter); + e_info(hw, "interrupt testing starting\n"); + if (ngbe_intr_test(adapter, &data[2])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (!(((hw->subsystem_device_id & OEM_MASK) == OCP_CARD) || + ((hw->subsystem_device_id & NCSI_SUP_MASK) == NCSI_SUP))) { + /* If SRIOV or VMDq is enabled then skip MAC + * loopback diagnostic. */ + if (adapter->flags & (NGBE_FLAG_SRIOV_ENABLED | + NGBE_FLAG_VMDQ_ENABLED)) { + e_info(hw, "skip MAC loopback diagnostic in VT mode\n"); + data[3] = 0; + goto skip_loopback; + } + + e_info(hw, "loopback testing starting\n"); + ngbe_loopback_test(adapter, &data[3]); + } + + data[3] = 0; + +skip_loopback: + msleep(20); + ngbe_reset(adapter); + + /* clear testing bit and return adapter to previous state */ + clear_bit(__NGBE_TESTING, &adapter->state); + if (if_running) + ngbe_open(netdev); + } else { + e_info(hw, "online testing starting\n"); + + /* Online tests */ + if (ngbe_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Offline tests aren't run; pass by default */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + clear_bit(__NGBE_TESTING, &adapter->state); + } + +skip_ol_tests: + msleep_interruptible(4 * 1000); +} + +static int ngbe_wol_exclusion(struct ngbe_adapter *adapter, + struct ethtool_wolinfo *wol) +{ + int retval = 0; + + /* WOL not supported for all devices */ + if (!ngbe_wol_supported(adapter)) { + retval = 1; + wol->supported = 0; + } + + return retval; +} + +static void ngbe_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + struct ngbe_hw *hw = &adapter->hw; + + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC; + wol->wolopts = 0; + + if (ngbe_wol_exclusion(adapter, wol) || + !device_can_wakeup(pci_dev_to_dev(adapter->pdev))) + return; + + if (adapter->wol & NGBE_PSR_WKUP_CTL_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & NGBE_PSR_WKUP_CTL_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & NGBE_PSR_WKUP_CTL_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & NGBE_PSR_WKUP_CTL_MAG) + wol->wolopts |= WAKE_MAGIC; + + if (!((hw->subsystem_device_id & WOL_SUP_MASK) == WOL_SUP)) + wol->wolopts = 0; +} + +static int ngbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u32 slot = hw->bus.lan_id; + u16 value; + + if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; + + if (ngbe_wol_exclusion(adapter, wol)) + return wol->wolopts ? -EOPNOTSUPP : 0; + if (!((hw->subsystem_device_id & WOL_SUP_MASK) == WOL_SUP)) + return -EOPNOTSUPP; + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= NGBE_PSR_WKUP_CTL_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= NGBE_PSR_WKUP_CTL_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= NGBE_PSR_WKUP_CTL_BC; + if (wol->wolopts & WAKE_MAGIC) { + adapter->wol |= NGBE_PSR_WKUP_CTL_MAG; + hw->wol_enabled = !!(adapter->wol); + wr32(hw, NGBE_PSR_WKUP_CTL, adapter->wol); + ngbe_read_ee_hostif(hw, 0x7FE, &value); + /*enable wol in shadow ram*/ + ngbe_write_ee_hostif(hw, 0x7FE, value | (1 << slot)); + ngbe_write_ee_hostif(hw, 0x7FF, 0x5a5a); + device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev), adapter->wol); + return 0; + } + + ngbe_read_ee_hostif(hw, 0x7FE, &value); + /*disable wol in shadow ram*/ + ngbe_write_ee_hostif(hw, 0x7FE, value & ~(1 << slot)); + ngbe_write_ee_hostif(hw, 0x7FF, 0x5a5a); + return 0; +} + +static int ngbe_nway_reset(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + if (netif_running(netdev)) + ngbe_reinit_locked(adapter); + + return 0; +} + +static int ngbe_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + adapter->led_reg = rd32(hw, NGBE_CFG_LED_CTL); + return 2; + + case ETHTOOL_ID_ON: + TCALL(hw, mac.ops.led_on, NGBE_LED_LINK_1G); + break; + + case ETHTOOL_ID_OFF: + TCALL(hw, mac.ops.led_off, NGBE_LED_LINK_100M | NGBE_LED_LINK_1G); + break; + + case ETHTOOL_ID_INACTIVE: + /* Restore LED settings */ + wr32(&adapter->hw, NGBE_CFG_LED_CTL, + adapter->led_reg); + break; + } + + return 0; +} + +static int ngbe_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit; + /* only valid if in constant ITR mode */ + if (adapter->rx_itr_setting <= 1) + ec->rx_coalesce_usecs = adapter->rx_itr_setting; + else + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + + /* if in mixed tx/rx queues per vector mode, report only rx settings */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) + return 0; + + /* only valid if in constant ITR mode */ + if (adapter->tx_itr_setting <= 1) + ec->tx_coalesce_usecs = adapter->tx_itr_setting; + else + ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; + + return 0; +} + +static int ngbe_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + struct ngbe_q_vector *q_vector; + int i; + u16 tx_itr_param, rx_itr_param; + u16 tx_itr_prev; + bool need_reset = false; + + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) { + /* reject Tx specific changes in case of mixed RxTx vectors */ + if (ec->tx_coalesce_usecs) + return -EINVAL; + tx_itr_prev = adapter->rx_itr_setting; + } else { + tx_itr_prev = adapter->tx_itr_setting; + } + + if (ec->tx_max_coalesced_frames_irq) + adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq; + + if ((ec->rx_coalesce_usecs > (NGBE_MAX_EITR >> 2)) || + (ec->tx_coalesce_usecs > (NGBE_MAX_EITR >> 2))) + return -EINVAL; + + if (ec->rx_coalesce_usecs > 1) + adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; + else + adapter->rx_itr_setting = ec->rx_coalesce_usecs; + + if (adapter->rx_itr_setting == 1) + rx_itr_param = NGBE_20K_ITR; + else + rx_itr_param = adapter->rx_itr_setting; + + if (ec->tx_coalesce_usecs > 1) + adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; + else + adapter->tx_itr_setting = ec->tx_coalesce_usecs; + + if (adapter->tx_itr_setting == 1) + tx_itr_param = NGBE_20K_ITR; + else + tx_itr_param = adapter->tx_itr_setting; + + /* mixed Rx/Tx */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) + adapter->tx_itr_setting = adapter->rx_itr_setting; + + /* detect ITR changes that require update of TXDCTL.WTHRESH */ + if ((adapter->tx_itr_setting != 1) && + (adapter->tx_itr_setting < NGBE_70K_ITR)) { + if ((tx_itr_prev == 1) || + (tx_itr_prev >= NGBE_70K_ITR)) + need_reset = true; + } else { + if ((tx_itr_prev != 1) && + (tx_itr_prev < NGBE_70K_ITR)) + need_reset = true; + } + + if (adapter->hw.mac.dmac_config.watchdog_timer && + (!adapter->rx_itr_setting && !adapter->tx_itr_setting)) { + e_info(probe, + "Disabling DMA coalescing because interrupt throttling " + "is disabled\n"); + adapter->hw.mac.dmac_config.watchdog_timer = 0; + TCALL(hw, mac.ops.dmac_config); + } + + for (i = 0; i < adapter->num_q_vectors; i++) { + q_vector = adapter->q_vector[i]; + q_vector->tx.work_limit = adapter->tx_work_limit; + q_vector->rx.work_limit = adapter->rx_work_limit; + if (q_vector->tx.count && !q_vector->rx.count) + /* tx only */ + q_vector->itr = tx_itr_param; + else + /* rx only or mixed */ + q_vector->itr = rx_itr_param; + ngbe_write_eitr(q_vector); + } + + /* + * do reset here at the end to make sure EITR==0 case is handled + * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings + * also locks in RSC enable/disable which requires reset + */ + if (need_reset) + ngbe_do_reset(netdev); + + return 0; +} + +static int ngbe_get_rss_hash_opts(struct ngbe_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + /* Report default options for RSS on ngbe */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + case UDP_V4_FLOW: + if (adapter->flags2 & NGBE_FLAG2_RSS_FIELD_IPV4_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + case UDP_V6_FLOW: + if (adapter->flags2 & NGBE_FLAG2_RSS_FIELD_IPV6_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int ngbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_rx_queues; + ret = 0; + break; + case ETHTOOL_GRXCLSRLCNT: + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + break; + case ETHTOOL_GRXCLSRLALL: + break; + case ETHTOOL_GRXFH: + ret = ngbe_get_rss_hash_opts(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +#define UDP_RSS_FLAGS (NGBE_FLAG2_RSS_FIELD_IPV4_UDP | \ + NGBE_FLAG2_RSS_FIELD_IPV6_UDP) +static int ngbe_set_rss_hash_opt(struct ngbe_adapter *adapter, + struct ethtool_rxnfc *nfc) +{ + u32 flags2 = adapter->flags2; + + /* + * RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || + !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + case UDP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags2 &= ~NGBE_FLAG2_RSS_FIELD_IPV4_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags2 |= NGBE_FLAG2_RSS_FIELD_IPV4_UDP; + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags2 &= ~NGBE_FLAG2_RSS_FIELD_IPV6_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags2 |= NGBE_FLAG2_RSS_FIELD_IPV6_UDP; + break; + default: + return -EINVAL; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* if we changed something we need to update flags */ + if (flags2 != adapter->flags2) { + struct ngbe_hw *hw = &adapter->hw; + u32 mrqc; + + mrqc = rd32(hw, NGBE_RDB_RA_CTL); + + if ((flags2 & UDP_RSS_FLAGS) && + !(adapter->flags2 & UDP_RSS_FLAGS)) + e_warn(drv, "enabling UDP RSS: fragmented packets" + " may arrive out of order to the stack above\n"); + + adapter->flags2 = flags2; + + /* Perform hash on these packet types */ + mrqc |= NGBE_RDB_RA_CTL_RSS_IPV4 + | NGBE_RDB_RA_CTL_RSS_IPV4_TCP + | NGBE_RDB_RA_CTL_RSS_IPV6 + | NGBE_RDB_RA_CTL_RSS_IPV6_TCP; + + mrqc &= ~(NGBE_RDB_RA_CTL_RSS_IPV4_UDP | + NGBE_RDB_RA_CTL_RSS_IPV6_UDP); + + if (flags2 & NGBE_FLAG2_RSS_FIELD_IPV4_UDP) + mrqc |= NGBE_RDB_RA_CTL_RSS_IPV4_UDP; + + if (flags2 & NGBE_FLAG2_RSS_FIELD_IPV6_UDP) + mrqc |= NGBE_RDB_RA_CTL_RSS_IPV6_UDP; + + wr32(hw, NGBE_RDB_RA_CTL, mrqc); + } + + return 0; +} + +static int ngbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + break; + case ETHTOOL_SRXCLSRLDEL: + break; + case ETHTOOL_SRXFH: + ret = ngbe_set_rss_hash_opt(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +static int ngbe_rss_indir_tbl_max(struct ngbe_adapter *adapter) +{ + return 64; +} + +static u32 ngbe_get_rxfh_key_size(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + return sizeof(adapter->rss_key); +} + +static u32 ngbe_rss_indir_size(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + return ngbe_rss_indir_tbl_entries(adapter); +} + +static void ngbe_get_reta(struct ngbe_adapter *adapter, u32 *indir) +{ + int i, reta_size = ngbe_rss_indir_tbl_entries(adapter); + + for (i = 0; i < reta_size; i++) + indir[i] = adapter->rss_indir_tbl[i]; +} + +static int ngbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + if (indir) + ngbe_get_reta(adapter, indir); + + if (key) + memcpy(key, adapter->rss_key, ngbe_get_rxfh_key_size(netdev)); + + return 0; +} + +static int ngbe_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + int i; + u32 reta_entries = ngbe_rss_indir_tbl_entries(adapter); + + if (hfunc) + return -EINVAL; + + /* Fill out the redirection table */ + if (indir) { + int max_queues = min_t(int, adapter->num_rx_queues, + ngbe_rss_indir_tbl_max(adapter)); + + /*Allow at least 2 queues w/ SR-IOV.*/ + if ((adapter->flags & NGBE_FLAG_SRIOV_ENABLED) && + (max_queues < 2)) + max_queues = 2; + + /* Verify user input. */ + for (i = 0; i < reta_entries; i++) + if (indir[i] >= max_queues) + return -EINVAL; + + for (i = 0; i < reta_entries; i++) + adapter->rss_indir_tbl[i] = indir[i]; + } + + /* Fill out the rss hash key */ + if (key) + memcpy(adapter->rss_key, key, ngbe_get_rxfh_key_size(netdev)); + + ngbe_store_reta(adapter); + + return 0; +} + +static int ngbe_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + + /* we always support timestamping disabled */ + info->rx_filters = 1 << HWTSTAMP_FILTER_NONE; + + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (adapter->ptp_clock) + info->phc_index = ptp_clock_index(adapter->ptp_clock); + else + info->phc_index = -1; + + info->tx_types = + (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + + info->rx_filters |= + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); + + return 0; +} + +static unsigned int ngbe_max_channels(struct ngbe_adapter *adapter) +{ + unsigned int max_combined; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + if (!(adapter->flags & NGBE_FLAG_MSIX_ENABLED)) { + /* We only support one q_vector without MSI-X */ + max_combined = 1; + } else if (adapter->flags & NGBE_FLAG_SRIOV_ENABLED) { + /* SR-IOV currently only allows one queue on the PF */ + max_combined = 1; + } else if (tcs > 1) { + /* For DCB report channels per traffic class */ + if (tcs > 4) { + /* 8 TC w/ 8 queues per TC */ + max_combined = 8; + } else { + /* 4 TC w/ 16 queues per TC */ + max_combined = 16; + } + } else if (adapter->atr_sample_rate) { + /* support up to 64 queues with ATR */ + max_combined = NGBE_MAX_FDIR_INDICES; + } else { + /* support up to max allowed queues with RSS */ + max_combined = ngbe_max_rss_indices(adapter); + } + + return max_combined; +} + +static void ngbe_get_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + + /* report maximum channels */ + ch->max_combined = ngbe_max_channels(adapter); + + /* report info for other vector */ + if (adapter->flags & NGBE_FLAG_MSIX_ENABLED) { + ch->max_other = NON_Q_VECTORS; + ch->other_count = NON_Q_VECTORS; + } + + /* record RSS queues */ + ch->combined_count = adapter->ring_feature[RING_F_RSS].indices; + + /* nothing else to report if RSS is disabled */ + if (ch->combined_count == 1) + return; + + /* we do not support ATR queueing if SR-IOV is enabled */ + if (adapter->flags & NGBE_FLAG_SRIOV_ENABLED) + return; + + /* same thing goes for being DCB enabled */ + if (netdev_get_num_tc(dev) > 1) + return; + + /* if ATR is disabled we can exit */ + if (!adapter->atr_sample_rate) + return; + +} + +static int ngbe_set_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + unsigned int count = ch->combined_count; + u8 max_rss_indices = ngbe_max_rss_indices(adapter); + + /* verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) + return -EINVAL; + + /* verify other_count has not changed */ + if (ch->other_count != NON_Q_VECTORS) + return -EINVAL; + + /* verify the number of channels does not exceed hardware limits */ + if (count > ngbe_max_channels(adapter)) + return -EINVAL; + + /* cap RSS limit */ + if (count > max_rss_indices) + count = max_rss_indices; + adapter->ring_feature[RING_F_RSS].limit = count; + + /* use setup TC to update any traffic class queue mapping */ + return ngbe_setup_tc(dev, netdev_get_num_tc(dev)); +} + +static int ngbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + return 0; +} + +static int ngbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + struct ethtool_eee eee_data; + s32 ret_val; + + if (!(hw->mac.ops.setup_eee && + (adapter->flags2 & NGBE_FLAG2_EEE_CAPABLE))) + return -EOPNOTSUPP; + + memset(&eee_data, 0, sizeof(struct ethtool_eee)); + + ret_val = ngbe_get_eee(netdev, &eee_data); + if (ret_val) + return ret_val; + + if (eee_data.eee_enabled && !edata->eee_enabled) { + if (eee_data.tx_lpi_enabled != edata->tx_lpi_enabled) { + e_dev_err("Setting EEE tx-lpi is not supported\n"); + return -EINVAL; + } + + if (eee_data.tx_lpi_timer != edata->tx_lpi_timer) { + e_dev_err("Setting EEE Tx LPI timer is not " + "supported\n"); + return -EINVAL; + } + + if (eee_data.advertised != edata->advertised) { + e_dev_err("Setting EEE advertised speeds is not " + "supported\n"); + return -EINVAL; + } + + } + + if (eee_data.eee_enabled != edata->eee_enabled) { + + if (edata->eee_enabled) + adapter->flags2 |= NGBE_FLAG2_EEE_ENABLED; + else + adapter->flags2 &= ~NGBE_FLAG2_EEE_ENABLED; + + /* reset link */ + if (netif_running(netdev)) + ngbe_reinit_locked(adapter); + else + ngbe_reset(adapter); + } + + return 0; +} + +static int ngbe_set_flash(struct net_device *netdev, struct ethtool_flash *ef) +{ + int ret; + const struct firmware *fw; + struct ngbe_adapter *adapter = netdev_priv(netdev); + + ret = request_firmware(&fw, ef->data, &netdev->dev); + if (ret < 0) + return ret; + + if (ef->region == 0) { + ret = ngbe_upgrade_flash(&adapter->hw, ef->region, + fw->data, fw->size); + } else { + if (ngbe_mng_present(&adapter->hw)) { + ret = ngbe_upgrade_flash_hostif(&adapter->hw, ef->region, + fw->data, fw->size); + } else + ret = -EOPNOTSUPP; + } + + release_firmware(fw); + if (!ret) + dev_info(&netdev->dev, + "loaded firmware %s, reboot to make firmware work\n", ef->data); + return ret; +} + + +static struct ethtool_ops ngbe_ethtool_ops = { + .get_link_ksettings = ngbe_get_link_ksettings, + .set_link_ksettings = ngbe_set_link_ksettings, + .get_drvinfo = ngbe_get_drvinfo, + .get_regs_len = ngbe_get_regs_len, + .get_regs = ngbe_get_regs, + .get_wol = ngbe_get_wol, + .set_wol = ngbe_set_wol, + .nway_reset = ngbe_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = ngbe_get_eeprom_len, + .get_eeprom = ngbe_get_eeprom, + .set_eeprom = ngbe_set_eeprom, + .get_ringparam = ngbe_get_ringparam, + .set_ringparam = ngbe_set_ringparam, + .get_pauseparam = ngbe_get_pauseparam, + .set_pauseparam = ngbe_set_pauseparam, + .get_msglevel = ngbe_get_msglevel, + .set_msglevel = ngbe_set_msglevel, + .self_test = ngbe_diag_test, + .get_strings = ngbe_get_strings, + .set_phys_id = ngbe_set_phys_id, + .get_sset_count = ngbe_get_sset_count, + .get_ethtool_stats = ngbe_get_ethtool_stats, + .get_coalesce = ngbe_get_coalesce, + .set_coalesce = ngbe_set_coalesce, + .get_rxnfc = ngbe_get_rxnfc, + .set_rxnfc = ngbe_set_rxnfc, + .get_eee = ngbe_get_eee, + .set_eee = ngbe_set_eee, + .get_channels = ngbe_get_channels, + .set_channels = ngbe_set_channels, + .get_ts_info = ngbe_get_ts_info, + .get_rxfh_indir_size = ngbe_rss_indir_size, + .get_rxfh_key_size = ngbe_get_rxfh_key_size, + .get_rxfh = ngbe_get_rxfh, + .set_rxfh = ngbe_set_rxfh, + .flash_device = ngbe_set_flash, +}; + +void ngbe_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &ngbe_ethtool_ops; +} diff --git a/drivers/net/ethernet/netswift/ngbe/ngbe_hw.c b/drivers/net/ethernet/netswift/ngbe/ngbe_hw.c new file mode 100644 index 0000000000000000000000000000000000000000..73b8a328c267d2b64e3423a30c5fdec695d841a6 --- /dev/null +++ b/drivers/net/ethernet/netswift/ngbe/ngbe_hw.c @@ -0,0 +1,5047 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + + +#include "ngbe_type.h" +#include "ngbe_hw.h" +#include "ngbe_phy.h" +#include "ngbe.h" + +#define NGBE_SP_MAX_TX_QUEUES 8 +#define NGBE_SP_MAX_RX_QUEUES 8 +#define NGBE_SP_RAR_ENTRIES 32 +#define NGBE_SP_MC_TBL_SIZE 128 +#define NGBE_SP_VFT_TBL_SIZE 128 +#define NGBE_SP_RX_PB_SIZE 42 + +STATIC s32 ngbe_get_eeprom_semaphore(struct ngbe_hw *hw); +STATIC void ngbe_release_eeprom_semaphore(struct ngbe_hw *hw); +STATIC s32 ngbe_mta_vector(struct ngbe_hw *hw, u8 *mc_addr); + +STATIC s32 ngbe_setup_copper_link(struct ngbe_hw *hw, + u32 speed, + bool need_restart_AN); +s32 ngbe_check_mac_link(struct ngbe_hw *hw, u32 *speed, + bool *link_up, bool link_up_wait_to_complete); +s32 ngbe_check_mac_link_mdi(struct ngbe_hw *hw, + u32 *speed, + bool *link_up, + bool link_up_wait_to_complete); +s32 ngbe_check_mac_link_yt8521s(struct ngbe_hw *hw, + u32 *speed, + bool *link_up, + bool link_up_wait_to_complete); + +u32 ngbe_rd32_epcs(struct ngbe_hw *hw, u32 addr) +{ + unsigned int portRegOffset; + u32 data; + /* Set the LAN port indicator to portRegOffset[1] */ + /* 1st, write the regOffset to IDA_ADDR register */ + portRegOffset = NGBE_XPCS_IDA_ADDR; + wr32(hw, portRegOffset, addr); + + /* 2nd, read the data from IDA_DATA register */ + portRegOffset = NGBE_XPCS_IDA_DATA; + data = rd32(hw, portRegOffset); + + return data; +} + +void ngbe_wr32_ephy(struct ngbe_hw *hw, u32 addr, u32 data) +{ + unsigned int portRegOffset; + + /* Set the LAN port indicator to portRegOffset[1] */ + /* 1st, write the regOffset to IDA_ADDR register */ + portRegOffset = NGBE_ETHPHY_IDA_ADDR; + wr32(hw, portRegOffset, addr); + + /* 2nd, read the data from IDA_DATA register */ + portRegOffset = NGBE_ETHPHY_IDA_DATA; + wr32(hw, portRegOffset, data); +} + +void ngbe_wr32_epcs(struct ngbe_hw *hw, u32 addr, u32 data) +{ + unsigned int portRegOffset; + + /* Set the LAN port indicator to portRegOffset[1] */ + /* 1st, write the regOffset to IDA_ADDR register */ + portRegOffset = NGBE_XPCS_IDA_ADDR; + wr32(hw, portRegOffset, addr); + + /* 2nd, read the data from IDA_DATA register */ + portRegOffset = NGBE_XPCS_IDA_DATA; + wr32(hw, portRegOffset, data); +} + +/** + * ngbe_get_pcie_msix_count - Gets MSI-X vector count + * @hw: pointer to hardware structure + * + * Read PCIe configuration space, and get the MSI-X vector count from + * the capabilities table. + **/ +u16 ngbe_get_pcie_msix_count(struct ngbe_hw *hw) +{ + u16 msix_count = 1; + u16 max_msix_count; + u32 pos; + + DEBUGFUNC("\n"); + + /* ??? max_msix_count for emerald */ + max_msix_count = NGBE_MAX_MSIX_VECTORS_EMERALD; + pos = pci_find_capability(((struct ngbe_adapter *)hw->back)->pdev, + PCI_CAP_ID_MSIX); + if (!pos) + return msix_count; + pci_read_config_word(((struct ngbe_adapter *)hw->back)->pdev, + pos + PCI_MSIX_FLAGS, &msix_count); + + if (NGBE_REMOVED(hw->hw_addr)) + msix_count = 0; + msix_count &= NGBE_PCIE_MSIX_TBL_SZ_MASK; + + /* MSI-X count is zero-based in HW */ + msix_count++; + + if (msix_count > max_msix_count) + msix_count = max_msix_count; + + return msix_count; +} + +/** + * ngbe_init_hw - Generic hardware initialization + * @hw: pointer to hardware structure + * + * Initialize the hardware by resetting the hardware, filling the bus info + * structure and media type, clears all on chip counters, initializes receive + * address registers, multicast table, VLAN filter table, calls routine to set + * up link and flow control settings, and leaves transmit and receive units + * disabled and uninitialized + **/ +s32 ngbe_init_hw(struct ngbe_hw *hw) +{ + s32 status; + + DEBUGFUNC("\n"); + + /* Reset the hardware */ + status = TCALL(hw, mac.ops.reset_hw); + + if (status == 0) { + /* Start the HW */ + status = TCALL(hw, mac.ops.start_hw); + } + + return status; +} + + +/** + * ngbe_clear_hw_cntrs - Generic clear hardware counters + * @hw: pointer to hardware structure + * + * Clears all hardware statistics counters by reading them from the hardware + * Statistics counters are clear on read. + **/ +s32 ngbe_clear_hw_cntrs(struct ngbe_hw *hw) +{ + u16 i = 0; + + DEBUGFUNC("\n"); + + rd32(hw, NGBE_RX_CRC_ERROR_FRAMES_LOW); + rd32(hw, NGBE_RX_LEN_ERROR_FRAMES_LOW); + rd32(hw, NGBE_RDB_LXONTXC); + rd32(hw, NGBE_RDB_LXOFFTXC); + /* ??? 1e0c not found */ + /* rd32(hw, NGBE_MAC_LXONRXC); */ + rd32(hw, NGBE_MAC_LXOFFRXC); + + for (i = 0; i < 8; i++) { + /* ??? move 16? */ + wr32m(hw, NGBE_MMC_CONTROL, NGBE_MMC_CONTROL_UP, i<<16); + rd32(hw, NGBE_MAC_PXOFFRXC); + } + + for (i = 0; i < 8; i++) { + wr32(hw, NGBE_PX_MPRC(i), 0); + } + /* BPRC */ + + rd32(hw, NGBE_PX_GPRC); + rd32(hw, NGBE_PX_GPTC); + rd32(hw, NGBE_PX_GORC_MSB); + rd32(hw, NGBE_PX_GOTC_MSB); + + rd32(hw, NGBE_RX_BC_FRAMES_GOOD_LOW); + rd32(hw, NGBE_RX_UNDERSIZE_FRAMES_GOOD); + rd32(hw, NGBE_RX_OVERSIZE_FRAMES_GOOD); + rd32(hw, NGBE_RX_FRAME_CNT_GOOD_BAD_LOW); + rd32(hw, NGBE_TX_FRAME_CNT_GOOD_BAD_LOW); + rd32(hw, NGBE_TX_MC_FRAMES_GOOD_LOW); + rd32(hw, NGBE_TX_BC_FRAMES_GOOD_LOW); + rd32(hw, NGBE_RDM_DRP_PKT); + return 0; +} + +/** + * ngbe_setup_fc - Set up flow control + * @hw: pointer to hardware structure + * + * Called at init time to set up flow control. + **/ +s32 ngbe_setup_fc(struct ngbe_hw *hw) +{ + s32 ret_val = 0; + u16 pcap_backplane = 0; + + DEBUGFUNC("\n"); + + /* Validate the requested mode */ + if (hw->fc.strict_ieee && hw->fc.requested_mode == ngbe_fc_rx_pause) { + ERROR_REPORT1(NGBE_ERROR_UNSUPPORTED, + "ngbe_fc_rx_pause not valid in strict IEEE mode\n"); + ret_val = NGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + /* + * gig parts do not have a word in the EEPROM to determine the + * default flow control setting, so we explicitly set it to full. + */ + if (hw->fc.requested_mode == ngbe_fc_default) + hw->fc.requested_mode = ngbe_fc_full; + + /* + * The possible values of fc.requested_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.requested_mode) { + case ngbe_fc_none: + /* Flow control completely disabled by software override. */ + break; + case ngbe_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + if (hw->phy.type != ngbe_phy_m88e1512_sfi && + hw->phy.type != ngbe_phy_yt8521s_sfi) + pcap_backplane |= NGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM; + else + pcap_backplane |= 0x100; + break; + case ngbe_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE, as such we fall + * through to the fc_full statement. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + case ngbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + if (hw->phy.type != ngbe_phy_m88e1512_sfi && + hw->phy.type != ngbe_phy_yt8521s_sfi) + pcap_backplane |= NGBE_SR_AN_MMD_ADV_REG1_PAUSE_SYM | + NGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM; + else + pcap_backplane |= 0x80; + break; + default: + ERROR_REPORT1(NGBE_ERROR_ARGUMENT, + "Flow control param set incorrectly\n"); + ret_val = NGBE_ERR_CONFIG; + goto out; + break; + } + + /* + * AUTOC restart handles negotiation of 1G on backplane + * and copper. + */ + if ((hw->phy.media_type == ngbe_media_type_copper)) { + ret_val = TCALL(hw, phy.ops.set_adv_pause, pcap_backplane); + } + +out: + return ret_val; +} + + +/** + * ngbe_get_mac_addr - Generic get MAC address + * @hw: pointer to hardware structure + * @mac_addr: Adapter MAC address + * + * Reads the adapter's MAC address from first Receive Address Register (RAR0) + * A reset of the adapter must be performed prior to calling this function + * in order for the MAC address to have been loaded from the EEPROM into RAR0 + **/ +s32 ngbe_get_mac_addr(struct ngbe_hw *hw, u8 *mac_addr) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + DEBUGFUNC("\n"); + + wr32(hw, NGBE_PSR_MAC_SWC_IDX, 0); + rar_high = rd32(hw, NGBE_PSR_MAC_SWC_AD_H); + rar_low = rd32(hw, NGBE_PSR_MAC_SWC_AD_L); + + for (i = 0; i < 2; i++) + mac_addr[i] = (u8)(rar_high >> (1 - i) * 8); + + for (i = 0; i < 4; i++) + mac_addr[i + 2] = (u8)(rar_low >> (3 - i) * 8); + + return 0; +} + +/** + * ngbe_set_pci_config_data - Generic store PCI bus info + * @hw: pointer to hardware structure + * @link_status: the link status returned by the PCI config space + * + * Stores the PCI bus info (speed, width, type) within the ngbe_hw structure + **/ +void ngbe_set_pci_config_data(struct ngbe_hw *hw, u16 link_status) +{ + if (hw->bus.type == ngbe_bus_type_unknown) + hw->bus.type = ngbe_bus_type_pci_express; + + switch (link_status & NGBE_PCI_LINK_WIDTH) { + case NGBE_PCI_LINK_WIDTH_1: + hw->bus.width = ngbe_bus_width_pcie_x1; + break; + case NGBE_PCI_LINK_WIDTH_2: + hw->bus.width = ngbe_bus_width_pcie_x2; + break; + case NGBE_PCI_LINK_WIDTH_4: + hw->bus.width = ngbe_bus_width_pcie_x4; + break; + case NGBE_PCI_LINK_WIDTH_8: + hw->bus.width = ngbe_bus_width_pcie_x8; + break; + default: + hw->bus.width = ngbe_bus_width_unknown; + break; + } + + switch (link_status & NGBE_PCI_LINK_SPEED) { + case NGBE_PCI_LINK_SPEED_2500: + hw->bus.speed = ngbe_bus_speed_2500; + break; + case NGBE_PCI_LINK_SPEED_5000: + hw->bus.speed = ngbe_bus_speed_5000; + break; + case NGBE_PCI_LINK_SPEED_8000: + hw->bus.speed = ngbe_bus_speed_8000; + break; + default: + hw->bus.speed = ngbe_bus_speed_unknown; + break; + } +} + +/** + * ngbe_get_bus_info - Generic set PCI bus info + * @hw: pointer to hardware structure + * + * Gets the PCI bus info (speed, width, type) then calls helper function to + * store this data within the ngbe_hw structure. + **/ +s32 ngbe_get_bus_info(struct ngbe_hw *hw) +{ + u16 link_status; + + DEBUGFUNC("\n"); + + /* Get the negotiated link width and speed from PCI config space */ + link_status = NGBE_READ_PCIE_WORD(hw, NGBE_PCI_LINK_STATUS); + + ngbe_set_pci_config_data(hw, link_status); + + return 0; +} + +/** + * ngbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices + * @hw: pointer to the HW structure + * + * Determines the LAN function id by reading memory-mapped registers + * and swaps the port value if requested. + **/ +void ngbe_set_lan_id_multi_port_pcie(struct ngbe_hw *hw) +{ + struct ngbe_bus_info *bus = &hw->bus; + u32 reg = 0; + + DEBUGFUNC("\n"); + + reg = rd32(hw, NGBE_CFG_PORT_ST); + bus->lan_id = NGBE_CFG_PORT_ST_LAN_ID(reg); + bus->func = bus->lan_id; +} + +/** + * ngbe_stop_adapter - Generic stop Tx/Rx units + * @hw: pointer to hardware structure + * + * Sets the adapter_stopped flag within ngbe_hw struct. Clears interrupts, + * disables transmit and receive units. The adapter_stopped flag is used by + * the shared code and drivers to determine if the adapter is in a stopped + * state and should not touch the hardware. + **/ +s32 ngbe_stop_adapter(struct ngbe_hw *hw) +{ + u16 i; + + DEBUGFUNC("\n"); + + /* + * Set the adapter_stopped flag so other driver functions stop touching + * the hardware + */ + hw->adapter_stopped = true; + + /* Disable the receive unit */ + TCALL(hw, mac.ops.disable_rx); + + /* Set interrupt mask to stop interrupts from being generated */ + ngbe_intr_disable(hw, NGBE_INTR_ALL); + + /* Clear any pending interrupts, flush previous writes */ + wr32(hw, NGBE_PX_MISC_IC, 0xffffffff); + + /* ??? 0bit RW->RO */ + wr32(hw, NGBE_BME_CTL, 0x3); + + /* Disable the transmit unit. Each queue must be disabled. */ + for (i = 0; i < hw->mac.max_tx_queues; i++) { + wr32m(hw, NGBE_PX_TR_CFG(i), + NGBE_PX_TR_CFG_SWFLSH | NGBE_PX_TR_CFG_ENABLE, + NGBE_PX_TR_CFG_SWFLSH); + } + + /* Disable the receive unit by stopping each queue */ + for (i = 0; i < hw->mac.max_rx_queues; i++) { + wr32m(hw, NGBE_PX_RR_CFG(i), + NGBE_PX_RR_CFG_RR_EN, 0); + } + + /* flush all queues disables */ + NGBE_WRITE_FLUSH(hw); + msec_delay(2); + + /* + * Prevent the PCI-E bus from hanging by disabling PCI-E master + * access and verify no pending requests + */ + return ngbe_disable_pcie_master(hw); +} + +/** + * ngbe_led_on - Turns on the software controllable LEDs. + * @hw: pointer to hardware structure + * @index: led number to turn on + **/ +s32 ngbe_led_on(struct ngbe_hw *hw, u32 index) +{ + u32 led_reg = rd32(hw, NGBE_CFG_LED_CTL); + + DEBUGFUNC("\n"); + + /* ??? */ + /* To turn on the LED, set mode to ON. */ + led_reg |= index | (index << NGBE_CFG_LED_CTL_LINK_OD_SHIFT); + wr32(hw, NGBE_CFG_LED_CTL, led_reg); + NGBE_WRITE_FLUSH(hw); + + return 0; +} + +/** + * ngbe_led_off - Turns off the software controllable LEDs. + * @hw: pointer to hardware structure + * @index: led number to turn off + **/ +s32 ngbe_led_off(struct ngbe_hw *hw, u32 index) +{ + u32 led_reg = rd32(hw, NGBE_CFG_LED_CTL); + + DEBUGFUNC("\n"); + + /* To turn off the LED, set mode to OFF. */ + led_reg &= ~(index << NGBE_CFG_LED_CTL_LINK_OD_SHIFT); + led_reg |= index; + wr32(hw, NGBE_CFG_LED_CTL, led_reg); + NGBE_WRITE_FLUSH(hw); + return 0; +} + +/** + * ngbe_get_eeprom_semaphore - Get hardware semaphore + * @hw: pointer to hardware structure + * + * Sets the hardware semaphores so EEPROM access can occur for bit-bang method + **/ +STATIC s32 ngbe_get_eeprom_semaphore(struct ngbe_hw *hw) +{ + s32 status = NGBE_ERR_EEPROM; + u32 timeout = 2000; + u32 i; + u32 swsm; + + /* Get SMBI software semaphore between device drivers first */ + for (i = 0; i < timeout; i++) { + /* + * If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ + swsm = rd32(hw, NGBE_MIS_SWSM); + if (!(swsm & NGBE_MIS_SWSM_SMBI)) { + status = 0; + break; + } + usec_delay(50); + } + + if (i == timeout) { + DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore " + "not granted.\n"); + /* + * this release is particularly important because our attempts + * above to get the semaphore may have succeeded, and if there + * was a timeout, we should unconditionally clear the semaphore + * bits to free the driver to make progress + */ + ngbe_release_eeprom_semaphore(hw); + + usec_delay(50); + /* + * one last try + * If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ + swsm = rd32(hw, NGBE_MIS_SWSM); + if (!(swsm & NGBE_MIS_SWSM_SMBI)) + status = 0; + } + + /* Now get the semaphore between SW/FW through the SWESMBI bit */ + if (status == 0) { + for (i = 0; i < timeout; i++) { + if (ngbe_check_mng_access(hw)) { + /* Set the SW EEPROM semaphore bit to request access */ + wr32m(hw, NGBE_MNG_SW_SM, + NGBE_MNG_SW_SM_SM, NGBE_MNG_SW_SM_SM); + + /* + * If we set the bit successfully then we got + * semaphore. + */ + swsm = rd32(hw, NGBE_MNG_SW_SM); + if (swsm & NGBE_MNG_SW_SM_SM) + break; + } + usec_delay(50); + } + + /* + * Release semaphores and return error if SW EEPROM semaphore + * was not granted because we don't have access to the EEPROM + */ + if (i >= timeout) { + ERROR_REPORT1(NGBE_ERROR_POLLING, + "SWESMBI Software EEPROM semaphore not granted.\n"); + ngbe_release_eeprom_semaphore(hw); + status = NGBE_ERR_EEPROM; + } + } else { + ERROR_REPORT1(NGBE_ERROR_POLLING, + "Software semaphore SMBI between device drivers " + "not granted.\n"); + } + + return status; +} + +/** + * ngbe_release_eeprom_semaphore - Release hardware semaphore + * @hw: pointer to hardware structure + * + * This function clears hardware semaphore bits. + **/ +STATIC void ngbe_release_eeprom_semaphore(struct ngbe_hw *hw) +{ + if (ngbe_check_mng_access(hw)) { + wr32m(hw, NGBE_MNG_SW_SM, + NGBE_MNG_SW_SM_SM, 0); + wr32m(hw, NGBE_MIS_SWSM, + NGBE_MIS_SWSM_SMBI, 0); + NGBE_WRITE_FLUSH(hw); + } +} + +/** + * ngbe_validate_mac_addr - Validate MAC address + * @mac_addr: pointer to MAC address. + * + * Tests a MAC address to ensure it is a valid Individual Address + **/ +s32 ngbe_validate_mac_addr(u8 *mac_addr) +{ + s32 status = 0; + + DEBUGFUNC("\n"); + + /* Make sure it is not a multicast address */ + if (NGBE_IS_MULTICAST(mac_addr)) { + DEBUGOUT("MAC address is multicast\n"); + status = NGBE_ERR_INVALID_MAC_ADDR; + /* Not a broadcast address */ + } else if (NGBE_IS_BROADCAST(mac_addr)) { + DEBUGOUT("MAC address is broadcast\n"); + status = NGBE_ERR_INVALID_MAC_ADDR; + /* Reject the zero address */ + } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && + mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) { + DEBUGOUT("MAC address is all zeros\n"); + status = NGBE_ERR_INVALID_MAC_ADDR; + } + return status; +} + +/** + * ngbe_set_rar - Set Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * @addr: Address to put into receive address register + * @vmdq: VMDq "set" or "pool" index + * @enable_addr: set flag that address is active + * + * Puts an ethernet address into a receive address register. + **/ +s32 ngbe_set_rar(struct ngbe_hw *hw, u32 index, u8 *addr, u64 pools, + u32 enable_addr) +{ + u32 rar_low, rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + + DEBUGFUNC("\n"); + + /* Make sure we are using a valid rar index range */ + if (index >= rar_entries) { + ERROR_REPORT2(NGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", index); + return NGBE_ERR_INVALID_ARGUMENT; + } + + /* select the MAC address */ + wr32(hw, NGBE_PSR_MAC_SWC_IDX, index); + + /* setup VMDq pool mapping */ + wr32(hw, NGBE_PSR_MAC_SWC_VM, pools & 0xFFFFFFFF); + + /* + * HW expects these in little endian so we reverse the byte + * order from network order (big endian) to little endian + * + * Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + rar_low = ((u32)addr[5] | + ((u32)addr[4] << 8) | + ((u32)addr[3] << 16) | + ((u32)addr[2] << 24)); + rar_high = ((u32)addr[1] | + ((u32)addr[0] << 8)); + if (enable_addr != 0) + rar_high |= NGBE_PSR_MAC_SWC_AD_H_AV; + + wr32(hw, NGBE_PSR_MAC_SWC_AD_L, rar_low); + wr32m(hw, NGBE_PSR_MAC_SWC_AD_H, + (NGBE_PSR_MAC_SWC_AD_H_AD(~0) | + NGBE_PSR_MAC_SWC_AD_H_ADTYPE(~0) | + NGBE_PSR_MAC_SWC_AD_H_AV), + rar_high); + + return 0; +} + +/** + * ngbe_clear_rar - Remove Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * + * Clears an ethernet address from a receive address register. + **/ +s32 ngbe_clear_rar(struct ngbe_hw *hw, u32 index) +{ + u32 rar_entries = hw->mac.num_rar_entries; + + DEBUGFUNC("\n"); + + /* Make sure we are using a valid rar index range */ + if (index >= rar_entries) { + ERROR_REPORT2(NGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", index); + return NGBE_ERR_INVALID_ARGUMENT; + } + + /* + * Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + wr32(hw, NGBE_PSR_MAC_SWC_IDX, index); + + wr32(hw, NGBE_PSR_MAC_SWC_VM, 0); + wr32(hw, NGBE_PSR_MAC_SWC_AD_L, 0); + wr32m(hw, NGBE_PSR_MAC_SWC_AD_H, + (NGBE_PSR_MAC_SWC_AD_H_AD(~0) | + NGBE_PSR_MAC_SWC_AD_H_ADTYPE(~0) | + NGBE_PSR_MAC_SWC_AD_H_AV), + 0); + + return 0; +} + +/** + * ngbe_init_rx_addrs - Initializes receive address filters. + * @hw: pointer to hardware structure + * + * Places the MAC address in receive address register 0 and clears the rest + * of the receive address registers. Clears the multicast table. Assumes + * the receiver is in reset when the routine is called. + **/ +s32 ngbe_init_rx_addrs(struct ngbe_hw *hw) +{ + u32 i; + u32 rar_entries = hw->mac.num_rar_entries; + u32 psrctl; + + DEBUGFUNC("\n"); + + /* + * If the current mac address is valid, assume it is a software override + * to the permanent address. + * Otherwise, use the permanent address from the eeprom. + */ + if (ngbe_validate_mac_addr(hw->mac.addr) == + NGBE_ERR_INVALID_MAC_ADDR) { + /* Get the MAC address from the RAR0 for later reference */ + TCALL(hw, mac.ops.get_mac_addr, hw->mac.addr); + + DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n", + hw->mac.addr[0], hw->mac.addr[1], + hw->mac.addr[2], hw->mac.addr[3], + hw->mac.addr[4], hw->mac.addr[5]); + } else { + /* Setup the receive address. */ + DEBUGOUT("Overriding MAC Address in RAR[0]\n"); + DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n", + hw->mac.addr[0], hw->mac.addr[1], + hw->mac.addr[2], hw->mac.addr[3], + hw->mac.addr[4], hw->mac.addr[5]); + + TCALL(hw, mac.ops.set_rar, 0, hw->mac.addr, 0, + NGBE_PSR_MAC_SWC_AD_H_AV); + } + hw->addr_ctrl.overflow_promisc = 0; + + hw->addr_ctrl.rar_used_count = 1; + + /* Zero out the other receive addresses. */ + DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1); + for (i = 1; i < rar_entries; i++) { + wr32(hw, NGBE_PSR_MAC_SWC_IDX, i); + wr32(hw, NGBE_PSR_MAC_SWC_AD_L, 0); + wr32(hw, NGBE_PSR_MAC_SWC_AD_H, 0); + } + + /* Clear the MTA */ + hw->addr_ctrl.mta_in_use = 0; + psrctl = rd32(hw, NGBE_PSR_CTL); + psrctl &= ~(NGBE_PSR_CTL_MO | NGBE_PSR_CTL_MFE); + psrctl |= hw->mac.mc_filter_type << NGBE_PSR_CTL_MO_SHIFT; + wr32(hw, NGBE_PSR_CTL, psrctl); + DEBUGOUT(" Clearing MTA\n"); + for (i = 0; i < hw->mac.mcft_size; i++) + wr32(hw, NGBE_PSR_MC_TBL(i), 0); + + TCALL(hw, mac.ops.init_uta_tables); + + return 0; +} + +/** + * ngbe_add_uc_addr - Adds a secondary unicast address. + * @hw: pointer to hardware structure + * @addr: new address + * + * Adds it to unused receive address register or goes into promiscuous mode. + **/ +void ngbe_add_uc_addr(struct ngbe_hw *hw, u8 *addr, u32 vmdq) +{ + u32 rar_entries = hw->mac.num_rar_entries; + u32 rar; + + DEBUGFUNC("\n"); + + DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + + /* + * Place this address in the RAR if there is room, + * else put the controller into promiscuous mode + */ + if (hw->addr_ctrl.rar_used_count < rar_entries) { + rar = hw->addr_ctrl.rar_used_count; + TCALL(hw, mac.ops.set_rar, rar, addr, vmdq, + NGBE_PSR_MAC_SWC_AD_H_AV); + DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar); + hw->addr_ctrl.rar_used_count++; + } else { + hw->addr_ctrl.overflow_promisc++; + } + + DEBUGOUT("ngbe_add_uc_addr Complete\n"); +} + +/** + * ngbe_update_uc_addr_list - Updates MAC list of secondary addresses + * @hw: pointer to hardware structure + * @addr_list: the list of new addresses + * @addr_count: number of addresses + * @next: iterator function to walk the address list + * + * The given list replaces any existing list. Clears the secondary addrs from + * receive address registers. Uses unused receive address registers for the + * first secondary addresses, and falls back to promiscuous mode as needed. + * + * Drivers using secondary unicast addresses must set user_set_promisc when + * manually putting the device into promiscuous mode. + **/ +s32 ngbe_update_uc_addr_list(struct ngbe_hw *hw, u8 *addr_list, + u32 addr_count, ngbe_mc_addr_itr next) +{ + u8 *addr; + u32 i; + u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc; + u32 uc_addr_in_use; + u32 vmdq; + + DEBUGFUNC("\n"); + + /* + * Clear accounting of old secondary address list, + * don't count RAR[0] + */ + uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1; + hw->addr_ctrl.rar_used_count -= uc_addr_in_use; + hw->addr_ctrl.overflow_promisc = 0; + + /* Zero out the other receive addresses */ + DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use + 1); + for (i = 0; i < uc_addr_in_use; i++) { + wr32(hw, NGBE_PSR_MAC_SWC_IDX, 1 + i); + wr32(hw, NGBE_PSR_MAC_SWC_AD_L, 0); + wr32(hw, NGBE_PSR_MAC_SWC_AD_H, 0); + } + + /* Add the new addresses */ + for (i = 0; i < addr_count; i++) { + DEBUGOUT(" Adding the secondary addresses:\n"); + addr = next(hw, &addr_list, &vmdq); + ngbe_add_uc_addr(hw, addr, vmdq); + } + + if (hw->addr_ctrl.overflow_promisc) { + /* enable promisc if not already in overflow or set by user */ + if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { + DEBUGOUT(" Entering address overflow promisc mode\n"); + wr32m(hw, NGBE_PSR_CTL, + NGBE_PSR_CTL_UPE, NGBE_PSR_CTL_UPE); + } + } else { + /* only disable if set by overflow, not by user */ + if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { + DEBUGOUT(" Leaving address overflow promisc mode\n"); + wr32m(hw, NGBE_PSR_CTL, + NGBE_PSR_CTL_UPE, 0); + } + } + + DEBUGOUT("ngbe_update_uc_addr_list Complete\n"); + return 0; +} + +/** + * ngbe_mta_vector - Determines bit-vector in multicast table to set + * @hw: pointer to hardware structure + * @mc_addr: the multicast address + * + * Extracts the 12 bits, from a multicast address, to determine which + * bit-vector to set in the multicast table. The hardware uses 12 bits, from + * incoming rx multicast addresses, to determine the bit-vector to check in + * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set + * by the MO field of the MCSTCTRL. The MO field is set during initialization + * to mc_filter_type. + **/ +STATIC s32 ngbe_mta_vector(struct ngbe_hw *hw, u8 *mc_addr) +{ + u32 vector = 0; + + DEBUGFUNC("\n"); + + switch (hw->mac.mc_filter_type) { + case 0: /* use bits [47:36] of the address */ + vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); + break; + case 1: /* use bits [46:35] of the address */ + vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); + break; + case 2: /* use bits [45:34] of the address */ + vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); + break; + case 3: /* use bits [43:32] of the address */ + vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); + break; + default: /* Invalid mc_filter_type */ + DEBUGOUT("MC filter type param set incorrectly\n"); + ASSERT(0); + break; + } + + /* vector can only be 12-bits or boundary will be exceeded */ + vector &= 0xFFF; + return vector; +} + +/** + * ngbe_set_mta - Set bit-vector in multicast table + * @hw: pointer to hardware structure + * @hash_value: Multicast address hash value + * + * Sets the bit-vector in the multicast table. + **/ +void ngbe_set_mta(struct ngbe_hw *hw, u8 *mc_addr) +{ + u32 vector; + u32 vector_bit; + u32 vector_reg; + + DEBUGFUNC("\n"); + + hw->addr_ctrl.mta_in_use++; + + vector = ngbe_mta_vector(hw, mc_addr); + DEBUGOUT1(" bit-vector = 0x%03X\n", vector); + + /* + * The MTA is a register array of 128 32-bit registers. It is treated + * like an array of 4096 bits. We want to set bit + * BitArray[vector_value]. So we figure out what register the bit is + * in, read it, OR in the new bit, then write back the new value. The + * register is determined by the upper 7 bits of the vector value and + * the bit within that register are determined by the lower 5 bits of + * the value. + */ + vector_reg = (vector >> 5) & 0x7F; + vector_bit = vector & 0x1F; + hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); +} + +/** + * ngbe_update_mc_addr_list - Updates MAC list of multicast addresses + * @hw: pointer to hardware structure + * @mc_addr_list: the list of new multicast addresses + * @mc_addr_count: number of addresses + * @next: iterator function to walk the multicast address list + * @clear: flag, when set clears the table beforehand + * + * When the clear flag is set, the given list replaces any existing list. + * Hashes the given addresses into the multicast table. + **/ +s32 ngbe_update_mc_addr_list(struct ngbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, ngbe_mc_addr_itr next, + bool clear) +{ + u32 i; + u32 vmdq; + u32 psrctl; + + DEBUGFUNC("\n"); + + /* + * Set the new number of MC addresses that we are being requested to + * use. + */ + hw->addr_ctrl.num_mc_addrs = mc_addr_count; + hw->addr_ctrl.mta_in_use = 0; + + /* Clear mta_shadow */ + if (clear) { + DEBUGOUT(" Clearing MTA\n"); + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + } + + /* Update mta_shadow */ + for (i = 0; i < mc_addr_count; i++) { + DEBUGOUT(" Adding the multicast addresses:\n"); + ngbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq)); + } + + /* Enable mta */ + for (i = 0; i < hw->mac.mcft_size; i++) + wr32a(hw, NGBE_PSR_MC_TBL(0), i, + hw->mac.mta_shadow[i]); + + if (hw->addr_ctrl.mta_in_use > 0) { + psrctl = rd32(hw, NGBE_PSR_CTL); + psrctl &= ~(NGBE_PSR_CTL_MO | NGBE_PSR_CTL_MFE); + psrctl |= NGBE_PSR_CTL_MFE | + (hw->mac.mc_filter_type << NGBE_PSR_CTL_MO_SHIFT); + wr32(hw, NGBE_PSR_CTL, psrctl); + } + + DEBUGOUT("ngbe_update_mc_addr_list Complete\n"); + return 0; +} + +/** + * ngbe_enable_mc - Enable multicast address in RAR + * @hw: pointer to hardware structure + * + * Enables multicast address in RAR and the use of the multicast hash table. + **/ +s32 ngbe_enable_mc(struct ngbe_hw *hw) +{ + struct ngbe_addr_filter_info *a = &hw->addr_ctrl; + u32 psrctl; + + DEBUGFUNC("\n"); + + if (a->mta_in_use > 0) { + psrctl = rd32(hw, NGBE_PSR_CTL); + psrctl &= ~(NGBE_PSR_CTL_MO | NGBE_PSR_CTL_MFE); + psrctl |= NGBE_PSR_CTL_MFE | + (hw->mac.mc_filter_type << NGBE_PSR_CTL_MO_SHIFT); + wr32(hw, NGBE_PSR_CTL, psrctl); + } + + return 0; +} + +/** + * ngbe_disable_mc - Disable multicast address in RAR + * @hw: pointer to hardware structure + * + * Disables multicast address in RAR and the use of the multicast hash table. + **/ +s32 ngbe_disable_mc(struct ngbe_hw *hw) +{ + struct ngbe_addr_filter_info *a = &hw->addr_ctrl; + u32 psrctl; + DEBUGFUNC("\n"); + + if (a->mta_in_use > 0) { + psrctl = rd32(hw, NGBE_PSR_CTL); + psrctl &= ~(NGBE_PSR_CTL_MO | NGBE_PSR_CTL_MFE); + psrctl |= hw->mac.mc_filter_type << NGBE_PSR_CTL_MO_SHIFT; + wr32(hw, NGBE_PSR_CTL, psrctl); + } + + return 0; +} + +/** + * ngbe_fc_enable - Enable flow control + * @hw: pointer to hardware structure + * + * Enable flow control according to the current settings. + **/ +s32 ngbe_fc_enable(struct ngbe_hw *hw) +{ + s32 ret_val = 0; + u32 mflcn_reg, fccfg_reg; + u32 reg; + u32 fcrtl, fcrth; + + DEBUGFUNC("\n"); + + /* Validate the water mark configuration */ + if (!hw->fc.pause_time) { + ret_val = NGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + + /* Low water mark of zero causes XOFF floods */ + if ((hw->fc.current_mode & ngbe_fc_tx_pause) && hw->fc.high_water) { + if (!hw->fc.low_water || hw->fc.low_water >= hw->fc.high_water) { + DEBUGOUT("Invalid water mark configuration\n"); + ret_val = NGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + } + + /* Negotiate the fc mode to use */ + ngbe_fc_autoneg(hw); + + /* Disable any previous flow control settings */ + mflcn_reg = rd32(hw, NGBE_MAC_RX_FLOW_CTRL); + mflcn_reg &= ~NGBE_MAC_RX_FLOW_CTRL_RFE; + + fccfg_reg = rd32(hw, NGBE_RDB_RFCC); + fccfg_reg &= ~NGBE_RDB_RFCC_RFCE_802_3X; + + /* + * The possible values of fc.current_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.current_mode) { + case ngbe_fc_none: + /* + * Flow control is disabled by software override or autoneg. + * The code below will actually disable it in the HW. + */ + break; + case ngbe_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + mflcn_reg |= NGBE_MAC_RX_FLOW_CTRL_RFE; + break; + case ngbe_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + fccfg_reg |= NGBE_RDB_RFCC_RFCE_802_3X; + break; + case ngbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + mflcn_reg |= NGBE_MAC_RX_FLOW_CTRL_RFE; + fccfg_reg |= NGBE_RDB_RFCC_RFCE_802_3X; + break; + default: + ERROR_REPORT1(NGBE_ERROR_ARGUMENT, + "Flow control param set incorrectly\n"); + ret_val = NGBE_ERR_CONFIG; + goto out; + break; + } + + /* Set 802.3x based flow control settings. */ + wr32(hw, NGBE_MAC_RX_FLOW_CTRL, mflcn_reg); + wr32(hw, NGBE_RDB_RFCC, fccfg_reg); + + /* Set up and enable Rx high/low water mark thresholds, enable XON. */ + if ((hw->fc.current_mode & ngbe_fc_tx_pause) && + hw->fc.high_water) { + /* 32Byte granularity */ + fcrtl = (hw->fc.low_water << 10) | + NGBE_RDB_RFCL_XONE; + wr32(hw, NGBE_RDB_RFCL, fcrtl); + fcrth = (hw->fc.high_water << 10) | + NGBE_RDB_RFCH_XOFFE; + } else { + wr32(hw, NGBE_RDB_RFCL, 0); + /* + * In order to prevent Tx hangs when the internal Tx + * switch is enabled we must set the high water mark + * to the Rx packet buffer size - 24KB. This allows + * the Tx switch to function even under heavy Rx + * workloads. + */ + fcrth = rd32(hw, NGBE_RDB_PB_SZ) - 24576; + } + + wr32(hw, NGBE_RDB_RFCH, fcrth); + + /* Configure pause time (2 TCs per register) */ + reg = hw->fc.pause_time * 0x00010000; + wr32(hw, NGBE_RDB_RFCV, reg); + + /* Configure flow control refresh threshold value */ + wr32(hw, NGBE_RDB_RFCRT, hw->fc.pause_time / 2); + +out: + return ret_val; +} + +/** + * ngbe_negotiate_fc - Negotiate flow control + * @hw: pointer to hardware structure + * @adv_reg: flow control advertised settings + * @lp_reg: link partner's flow control settings + * @adv_sym: symmetric pause bit in advertisement + * @adv_asm: asymmetric pause bit in advertisement + * @lp_sym: symmetric pause bit in link partner advertisement + * @lp_asm: asymmetric pause bit in link partner advertisement + * + * Find the intersection between advertised settings and link partner's + * advertised settings + **/ +STATIC s32 ngbe_negotiate_fc(struct ngbe_hw *hw, u32 adv_reg, u32 lp_reg, + u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) +{ + if ((!(adv_reg)) || (!(lp_reg))) { + ERROR_REPORT3(NGBE_ERROR_UNSUPPORTED, + "Local or link partner's advertised flow control " + "settings are NULL. Local: %x, link partner: %x\n", + adv_reg, lp_reg); + return NGBE_ERR_FC_NOT_NEGOTIATED; + } + + if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { + /* + * Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == ngbe_fc_full) { + hw->fc.current_mode = ngbe_fc_full; + DEBUGOUT("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = ngbe_fc_rx_pause; + DEBUGOUT("Flow Control=RX PAUSE frames only\n"); + } + } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && + (lp_reg & lp_sym) && (lp_reg & lp_asm)) { + hw->fc.current_mode = ngbe_fc_tx_pause; + DEBUGOUT("Flow Control = TX PAUSE frames only.\n"); + } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && + !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { + hw->fc.current_mode = ngbe_fc_rx_pause; + DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); + } else { + hw->fc.current_mode = ngbe_fc_none; + DEBUGOUT("Flow Control = NONE.\n"); + } + return 0; +} + +/** + * ngbe_fc_autoneg_copper - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure + * + * Enable flow control according to IEEE clause 37. + **/ +STATIC s32 ngbe_fc_autoneg_copper(struct ngbe_hw *hw) +{ + u8 technology_ability_reg = 0; + u8 lp_technology_ability_reg = 0; + + TCALL(hw, phy.ops.get_adv_pause, &technology_ability_reg); + TCALL(hw, phy.ops.get_lp_adv_pause, &lp_technology_ability_reg); + + return ngbe_negotiate_fc(hw, (u32)technology_ability_reg, + (u32)lp_technology_ability_reg, + NGBE_TAF_SYM_PAUSE, NGBE_TAF_ASM_PAUSE, + NGBE_TAF_SYM_PAUSE, NGBE_TAF_ASM_PAUSE); +} + +/** + * ngbe_fc_autoneg - Configure flow control + * @hw: pointer to hardware structure + * + * Compares our advertised flow control capabilities to those advertised by + * our link partner, and determines the proper flow control mode to use. + **/ +void ngbe_fc_autoneg(struct ngbe_hw *hw) +{ + s32 ret_val = NGBE_ERR_FC_NOT_NEGOTIATED; + u32 speed; + bool link_up; + + DEBUGFUNC("\n"); + + /* + * AN should have completed when the cable was plugged in. + * Look for reasons to bail out. Bail out if: + * - FC autoneg is disabled, or if + * - link is not up. + */ + if (hw->fc.disable_fc_autoneg) { + ERROR_REPORT1(NGBE_ERROR_UNSUPPORTED, + "Flow control autoneg is disabled"); + goto out; + } + + TCALL(hw, mac.ops.check_link, &speed, &link_up, false); + if (!link_up) { + ERROR_REPORT1(NGBE_ERROR_SOFTWARE, "The link is down"); + goto out; + } + + switch (hw->phy.media_type) { + /* Autoneg flow control on fiber adapters */ + case ngbe_media_type_fiber: + break; + + /* Autoneg flow control on copper adapters */ + case ngbe_media_type_copper: + ret_val = ngbe_fc_autoneg_copper(hw); + break; + + default: + break; + } + +out: + if (ret_val == NGBE_OK) { + hw->fc.fc_was_autonegged = true; + } else { + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; + } +} + +/** + * ngbe_disable_pcie_master - Disable PCI-express master access + * @hw: pointer to hardware structure + * + * Disables PCI-Express master access and verifies there are no pending + * requests. NGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable + * bit hasn't caused the master requests to be disabled, else 0 + * is returned signifying master requests disabled. + **/ +s32 ngbe_disable_pcie_master(struct ngbe_hw *hw) +{ + s32 status = 0; + u32 i; + + DEBUGFUNC("\n"); + + /* Always set this bit to ensure any future transactions are blocked */ + pci_clear_master(((struct ngbe_adapter *)hw->back)->pdev); + + /* Exit if master requests are blocked */ + if (!(rd32(hw, NGBE_PX_TRANSACTION_PENDING)) || + NGBE_REMOVED(hw->hw_addr)) + goto out; + + + /* Poll for master request bit to clear */ + for (i = 0; i < NGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { + usec_delay(100); + if (!(rd32(hw, NGBE_PX_TRANSACTION_PENDING))) + goto out; + } + + + ERROR_REPORT1(NGBE_ERROR_POLLING, + "PCIe transaction pending bit did not clear.\n"); + status = NGBE_ERR_MASTER_REQUESTS_PENDING; + +out: + return status; +} + +/** + * ngbe_acquire_swfw_sync - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore through the GSSR register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +s32 ngbe_acquire_swfw_sync(struct ngbe_hw *hw, u32 mask) +{ + u32 gssr = 0; + u32 swmask = mask; + u32 fwmask = mask << 16; + u32 timeout = 200; + u32 i; + + for (i = 0; i < timeout; i++) { + /* + * SW NVM semaphore bit is used for access to all + * SW_FW_SYNC bits (not just NVM) + */ + if (ngbe_get_eeprom_semaphore(hw)) + return NGBE_ERR_SWFW_SYNC; + + if (ngbe_check_mng_access(hw)) { + gssr = rd32(hw, NGBE_MNG_SWFW_SYNC); + if (!(gssr & (fwmask | swmask))) { + gssr |= swmask; + wr32(hw, NGBE_MNG_SWFW_SYNC, gssr); + ngbe_release_eeprom_semaphore(hw); + return 0; + } else { + /* Resource is currently in use by FW or SW */ + ngbe_release_eeprom_semaphore(hw); + msec_delay(5); + } + } + } + + ERROR_REPORT1(NGBE_ERROR_POLLING, + "ngbe_acquire_swfw_sync: i = %u, gssr = %u\n", i, gssr); + + + /* If time expired clear the bits holding the lock and retry */ + if (gssr & (fwmask | swmask)) + ngbe_release_swfw_sync(hw, gssr & (fwmask | swmask)); + + msec_delay(5); + return NGBE_ERR_SWFW_SYNC; +} + +/** + * ngbe_release_swfw_sync - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SWFW semaphore through the GSSR register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +void ngbe_release_swfw_sync(struct ngbe_hw *hw, u32 mask) +{ + ngbe_get_eeprom_semaphore(hw); + if (ngbe_check_mng_access(hw)) + wr32m(hw, NGBE_MNG_SWFW_SYNC, mask, 0); + + ngbe_release_eeprom_semaphore(hw); +} + +/** + * ngbe_disable_sec_rx_path - Stops the receive data path + * @hw: pointer to hardware structure + * + * Stops the receive data path and waits for the HW to internally empty + * the Rx security block + **/ +s32 ngbe_disable_sec_rx_path(struct ngbe_hw *hw) +{ +#define NGBE_MAX_SECRX_POLL 40 + + int i; + int secrxreg; + + DEBUGFUNC("\n"); + + wr32m(hw, NGBE_RSEC_CTL, + NGBE_RSEC_CTL_RX_DIS, NGBE_RSEC_CTL_RX_DIS); + for (i = 0; i < NGBE_MAX_SECRX_POLL; i++) { + secrxreg = rd32(hw, NGBE_RSEC_ST); + if (secrxreg & NGBE_RSEC_ST_RSEC_RDY) + break; + else + /* Use interrupt-safe sleep just in case */ + usec_delay(1000); + } + + /* For informational purposes only */ + if (i >= NGBE_MAX_SECRX_POLL) + DEBUGOUT("Rx unit being enabled before security " + "path fully disabled. Continuing with init.\n"); + + return 0; +} + +/** + * ngbe_enable_sec_rx_path - Enables the receive data path + * @hw: pointer to hardware structure + * + * Enables the receive data path. + **/ +s32 ngbe_enable_sec_rx_path(struct ngbe_hw *hw) +{ + DEBUGFUNC("\n"); + + wr32m(hw, NGBE_RSEC_CTL, + NGBE_RSEC_CTL_RX_DIS, 0); + NGBE_WRITE_FLUSH(hw); + + return 0; +} + +/** + * ngbe_insert_mac_addr - Find a RAR for this mac address + * @hw: pointer to hardware structure + * @addr: Address to put into receive address register + * @vmdq: VMDq pool to assign + * + * Puts an ethernet address into a receive address register, or + * finds the rar that it is aleady in; adds to the pool list + **/ +s32 ngbe_insert_mac_addr(struct ngbe_hw *hw, u8 *addr, u32 vmdq) +{ + static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF; + u32 first_empty_rar = NO_EMPTY_RAR_FOUND; + u32 rar; + u32 rar_low, rar_high; + u32 addr_low, addr_high; + + DEBUGFUNC("\n"); + + /* swap bytes for HW little endian */ + addr_low = addr[5] | (addr[4] << 8) + | (addr[3] << 16) + | (addr[2] << 24); + addr_high = addr[1] | (addr[0] << 8); + + /* + * Either find the mac_id in rar or find the first empty space. + * rar_highwater points to just after the highest currently used + * rar in order to shorten the search. It grows when we add a new + * rar to the top. + */ + for (rar = 0; rar < hw->mac.rar_highwater; rar++) { + wr32(hw, NGBE_PSR_MAC_SWC_IDX, rar); + rar_high = rd32(hw, NGBE_PSR_MAC_SWC_AD_H); + + if (((NGBE_PSR_MAC_SWC_AD_H_AV & rar_high) == 0) + && first_empty_rar == NO_EMPTY_RAR_FOUND) { + first_empty_rar = rar; + } else if ((rar_high & 0xFFFF) == addr_high) { + rar_low = rd32(hw, NGBE_PSR_MAC_SWC_AD_L); + if (rar_low == addr_low) + break; /* found it already in the rars */ + } + } + + if (rar < hw->mac.rar_highwater) { + + } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) { + /* stick it into first empty RAR slot we found */ + rar = first_empty_rar; + TCALL(hw, mac.ops.set_rar, rar, addr, vmdq, + NGBE_PSR_MAC_SWC_AD_H_AV); + } else if (rar == hw->mac.rar_highwater) { + /* add it to the top of the list and inc the highwater mark */ + TCALL(hw, mac.ops.set_rar, rar, addr, vmdq, + NGBE_PSR_MAC_SWC_AD_H_AV); + hw->mac.rar_highwater++; + } else if (rar >= hw->mac.num_rar_entries) { + return NGBE_ERR_INVALID_MAC_ADDR; + } + + return rar; +} + +/** + * ngbe_clear_vmdq - Disassociate a VMDq pool index from a rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to disassociate + * @vmdq: VMDq pool index to remove from the rar + **/ +s32 ngbe_clear_vmdq(struct ngbe_hw *hw, u32 rar, u32 vmdq) +{ + u32 mpsar_lo; + u32 rar_entries = hw->mac.num_rar_entries; + + DEBUGFUNC("\n"); + UNREFERENCED_PARAMETER(vmdq); + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + ERROR_REPORT2(NGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", rar); + return NGBE_ERR_INVALID_ARGUMENT; + } + + wr32(hw, NGBE_PSR_MAC_SWC_IDX, rar); + mpsar_lo = rd32(hw, NGBE_PSR_MAC_SWC_VM); + + if (NGBE_REMOVED(hw->hw_addr)) + goto done; + + if (!mpsar_lo) + goto done; + + /* was that the last pool using this rar? */ + if (mpsar_lo == 0 && rar != 0) + TCALL(hw, mac.ops.clear_rar, rar); +done: + return 0; +} + +/** + * ngbe_set_vmdq - Associate a VMDq pool index with a rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to associate with a VMDq index + * @vmdq: VMDq pool index + **/ +s32 ngbe_set_vmdq(struct ngbe_hw *hw, u32 rar, u32 pool) +{ + u32 rar_entries = hw->mac.num_rar_entries; + + DEBUGFUNC("\n"); + UNREFERENCED_PARAMETER(pool); + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + ERROR_REPORT2(NGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", rar); + return NGBE_ERR_INVALID_ARGUMENT; + } + + return 0; +} + +/** + * This function should only be involved in the IOV mode. + * In IOV mode, Default pool is next pool after the number of + * VFs advertized and not 0. + * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index] + * + * ngbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address + * @hw: pointer to hardware struct + * @vmdq: VMDq pool index + **/ +s32 ngbe_set_vmdq_san_mac(struct ngbe_hw *hw, u32 vmdq) +{ + u32 rar = hw->mac.san_mac_rar_index; + + DEBUGFUNC("\n"); + /* ??? */ + if (vmdq > 32) + return -1; + + wr32(hw, NGBE_PSR_MAC_SWC_IDX, rar); + wr32(hw, NGBE_PSR_MAC_SWC_VM, 1 << vmdq); + + return 0; +} + +/** + * ngbe_init_uta_tables - Initialize the Unicast Table Array + * @hw: pointer to hardware structure + **/ +s32 ngbe_init_uta_tables(struct ngbe_hw *hw) +{ + int i; + + DEBUGFUNC("\n"); + DEBUGOUT(" Clearing UTA\n"); + + for (i = 0; i < 128; i++) + wr32(hw, NGBE_PSR_UC_TBL(i), 0); + + return 0; +} + +/** + * ngbe_find_vlvf_slot - find the vlanid or the first empty slot + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * + * return the VLVF index where this VLAN id should be placed + * + **/ +s32 ngbe_find_vlvf_slot(struct ngbe_hw *hw, u32 vlan) +{ + u32 bits = 0; + u32 first_empty_slot = 0; + s32 regindex; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* + * Search for the vlan id in the VLVF entries. Save off the first empty + * slot found along the way + */ + for (regindex = 1; regindex < NGBE_PSR_VLAN_SWC_ENTRIES; regindex++) { + wr32(hw, NGBE_PSR_VLAN_SWC_IDX, regindex); + bits = rd32(hw, NGBE_PSR_VLAN_SWC); + if (!bits && !(first_empty_slot)) + first_empty_slot = regindex; + else if ((bits & 0x0FFF) == vlan) + break; + } + + /* + * If regindex is less than NGBE_VLVF_ENTRIES, then we found the vlan + * in the VLVF. Else use the first empty VLVF register for this + * vlan id. + */ + if (regindex >= NGBE_PSR_VLAN_SWC_ENTRIES) { + if (first_empty_slot) + regindex = first_empty_slot; + else { + ERROR_REPORT1(NGBE_ERROR_SOFTWARE, + "No space in VLVF.\n"); + regindex = NGBE_ERR_NO_SPACE; + } + } + + return regindex; +} + +/** + * ngbe_set_vfta - Set VLAN filter table + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFVFB + * @vlan_on: boolean flag to turn on/off VLAN in VFVF + * + * Turn on/off specified VLAN in the VLAN filter table. + **/ +s32 ngbe_set_vfta(struct ngbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on) +{ + s32 regindex; + u32 bitindex; + u32 vfta; + u32 targetbit; + s32 ret_val = 0; + bool vfta_changed = false; + + DEBUGFUNC("\n"); + + if (vlan > 4095) + return NGBE_ERR_PARAM; + + /* + * this is a 2 part operation - first the VFTA, then the + * VLVF and VLVFB if VT Mode is set + * We don't write the VFTA until we know the VLVF part succeeded. + */ + + /* Part 1 + * The VFTA is a bitstring made up of 128 32-bit registers + * that enable the particular VLAN id, much like the MTA: + * bits[11-5]: which register + * bits[4-0]: which bit in the register + */ + regindex = (vlan >> 5) & 0x7F; + bitindex = vlan & 0x1F; + targetbit = (1 << bitindex); + /* errata 5 */ + vfta = hw->mac.vft_shadow[regindex]; + if (vlan_on) { + if (!(vfta & targetbit)) { + vfta |= targetbit; + vfta_changed = true; + } + } else { + if ((vfta & targetbit)) { + vfta &= ~targetbit; + vfta_changed = true; + } + } + + /* Part 2 + * Call ngbe_set_vlvf to set VLVFB and VLVF + */ + ret_val = ngbe_set_vlvf(hw, vlan, vind, vlan_on, + &vfta_changed); + if (ret_val != 0) + return ret_val; + + if (vfta_changed) + wr32(hw, NGBE_PSR_VLAN_TBL(regindex), vfta); + /* errata 5 */ + hw->mac.vft_shadow[regindex] = vfta; + return 0; +} + +/** + * ngbe_set_vlvf - Set VLAN Pool Filter + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFVFB + * @vlan_on: boolean flag to turn on/off VLAN in VFVF + * @vfta_changed: pointer to boolean flag which indicates whether VFTA + * should be changed + * + * Turn on/off specified bit in VLVF table. + **/ +s32 ngbe_set_vlvf(struct ngbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool *vfta_changed) +{ + u32 vt; + + DEBUGFUNC("\n"); + + if (vlan > 4095) + return NGBE_ERR_PARAM; + + /* If VT Mode is set + * Either vlan_on + * make sure the vlan is in VLVF + * set the vind bit in the matching VLVFB + * Or !vlan_on + * clear the pool bit and possibly the vind + */ + vt = rd32(hw, NGBE_CFG_PORT_CTL); + if (vt & NGBE_CFG_PORT_CTL_NUM_VT_MASK) { + s32 vlvf_index; + u32 bits = 0; + + vlvf_index = ngbe_find_vlvf_slot(hw, vlan); + if (vlvf_index < 0) + return vlvf_index; + + wr32(hw, NGBE_PSR_VLAN_SWC_IDX, vlvf_index); + if (vlan_on) { + /* set the pool bit */ + if (vind < 32) { + bits = rd32(hw, + NGBE_PSR_VLAN_SWC_VM_L); + bits |= (1 << vind); + wr32(hw, + NGBE_PSR_VLAN_SWC_VM_L, + bits); + } + } else { + /* clear the pool bit */ + if (vind < 32) { + bits = rd32(hw, + NGBE_PSR_VLAN_SWC_VM_L); + bits &= ~(1 << vind); + wr32(hw, + NGBE_PSR_VLAN_SWC_VM_L, + bits); + } else { + bits |= rd32(hw, + NGBE_PSR_VLAN_SWC_VM_L); + } + } + + /* + * If there are still bits set in the VLVFB registers + * for the VLAN ID indicated we need to see if the + * caller is requesting that we clear the VFTA entry bit. + * If the caller has requested that we clear the VFTA + * entry bit but there are still pools/VFs using this VLAN + * ID entry then ignore the request. We're not worried + * about the case where we're turning the VFTA VLAN ID + * entry bit on, only when requested to turn it off as + * there may be multiple pools and/or VFs using the + * VLAN ID entry. In that case we cannot clear the + * VFTA bit until all pools/VFs using that VLAN ID have also + * been cleared. This will be indicated by "bits" being + * zero. + */ + if (bits) { + wr32(hw, NGBE_PSR_VLAN_SWC, + (NGBE_PSR_VLAN_SWC_VIEN | vlan)); + if ((!vlan_on) && (vfta_changed != NULL)) { + /* someone wants to clear the vfta entry + * but some pools/VFs are still using it. + * Ignore it. */ + *vfta_changed = false; + } + } else + wr32(hw, NGBE_PSR_VLAN_SWC, 0); + } + + return 0; +} + +/** + * ngbe_clear_vfta - Clear VLAN filter table + * @hw: pointer to hardware structure + * + * Clears the VLAN filer table, and the VMDq index associated with the filter + **/ +s32 ngbe_clear_vfta(struct ngbe_hw *hw) +{ + u32 offset; + + DEBUGFUNC("\n"); + + for (offset = 0; offset < hw->mac.vft_size; offset++) { + wr32(hw, NGBE_PSR_VLAN_TBL(offset), 0); + /* errata 5 */ + hw->mac.vft_shadow[offset] = 0; + } + + for (offset = 0; offset < NGBE_PSR_VLAN_SWC_ENTRIES; offset++) { + wr32(hw, NGBE_PSR_VLAN_SWC_IDX, offset); + wr32(hw, NGBE_PSR_VLAN_SWC, 0); + wr32(hw, NGBE_PSR_VLAN_SWC_VM_L, 0); + } + + return 0; +} + + +/** + * ngbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for anti-spoofing + * @pf: Physical Function pool - do not enable anti-spoofing for the PF + * + **/ +void ngbe_set_mac_anti_spoofing(struct ngbe_hw *hw, bool enable, int pf) +{ + u64 pfvfspoof = 0; + + DEBUGFUNC("\n"); + + if (enable) { + /* + * The PF should be allowed to spoof so that it can support + * emulation mode NICs. Do not set the bits assigned to the PF + * Remaining pools belong to the PF so they do not need to have + * anti-spoofing enabled. + */ + pfvfspoof = (1 << pf) - 1; + wr32(hw, NGBE_TDM_MAC_AS_L, + pfvfspoof & 0xff); + } else { + wr32(hw, NGBE_TDM_MAC_AS_L, 0); + } +} + +/** + * ngbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for VLAN anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing + * + **/ +void ngbe_set_vlan_anti_spoofing(struct ngbe_hw *hw, bool enable, int vf) +{ + u32 pfvfspoof; + + DEBUGFUNC("\n"); + + if (vf > 8) + return; + + pfvfspoof = rd32(hw, NGBE_TDM_VLAN_AS_L); + if (enable) + pfvfspoof |= (1 << vf); + else + pfvfspoof &= ~(1 << vf); + wr32(hw, NGBE_TDM_VLAN_AS_L, pfvfspoof); + +} + +/** + * ngbe_set_ethertype_anti_spoofing - Enable/Disable Ethertype anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for Ethertype anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing + * + **/ +void ngbe_set_ethertype_anti_spoofing(struct ngbe_hw *hw, + bool enable, int vf) +{ + u32 pfvfspoof; + + DEBUGFUNC("\n"); + + if (vf <= 8) { + pfvfspoof = rd32(hw, NGBE_TDM_ETYPE_AS_L); + if (enable) + pfvfspoof |= (1 << vf); + else + pfvfspoof &= ~(1 << vf); + wr32(hw, NGBE_TDM_ETYPE_AS_L, pfvfspoof); + } +} + +/** + * ngbe_get_device_caps - Get additional device capabilities + * @hw: pointer to hardware structure + * @device_caps: the EEPROM word with the extra device capabilities + * + * This function will read the EEPROM location for the device capabilities, + * and return the word through device_caps. + **/ +s32 ngbe_get_device_caps(struct ngbe_hw *hw, u16 *device_caps) +{ + DEBUGFUNC("\n"); + + TCALL(hw, eeprom.ops.read, + hw->eeprom.sw_region_offset + NGBE_DEVICE_CAPS, device_caps); + + return 0; +} + +/** + * ngbe_calculate_checksum - Calculate checksum for buffer + * @buffer: pointer to EEPROM + * @length: size of EEPROM to calculate a checksum for + * Calculates the checksum for some buffer on a specified length. The + * checksum calculated is returned. + **/ +u8 ngbe_calculate_checksum(u8 *buffer, u32 length) +{ + u32 i; + u8 sum = 0; + + DEBUGFUNC("\n"); + + if (!buffer) + return 0; + + for (i = 0; i < length; i++) + sum += buffer[i]; + + return (u8) (0 - sum); +} + + +s32 ngbe_host_interface_pass_command(struct ngbe_hw *hw, u32 *buffer, + u32 length, u32 timeout, bool return_data) +{ + u32 i; + u32 dword_len; + s32 status = 0; + + DEBUGFUNC("\n"); + + if (length == 0 || length > NGBE_HI_MAX_BLOCK_BYTE_LENGTH) { + DEBUGOUT1("Buffer length failure buffersize=%d.\n", length); + return NGBE_ERR_HOST_INTERFACE_COMMAND; + } + + if (TCALL(hw, mac.ops.acquire_swfw_sync, NGBE_MNG_SWFW_SYNC_SW_MB) + != 0) { + return NGBE_ERR_SWFW_SYNC; + } + + /* Calculate length in DWORDs. We must be DWORD aligned */ + if ((length % (sizeof(u32))) != 0) { + DEBUGOUT("Buffer length failure, not aligned to dword"); + status = NGBE_ERR_INVALID_ARGUMENT; + goto rel_out; + } + + dword_len = length >> 2; + + /* The device driver writes the relevant command block + * into the ram area. + */ + for (i = 0; i < dword_len; i++) { + if (ngbe_check_mng_access(hw)) + wr32a(hw, NGBE_MNG_MBOX, + i, NGBE_CPU_TO_LE32(buffer[i])); + else { + status = NGBE_ERR_MNG_ACCESS_FAILED; + goto rel_out; + } + } + /* Setting this bit tells the ARC that a new command is pending. */ + if (ngbe_check_mng_access(hw)) + wr32m(hw, NGBE_MNG_MBOX_CTL, + NGBE_MNG_MBOX_CTL_SWRDY, NGBE_MNG_MBOX_CTL_SWRDY); + else { + status = NGBE_ERR_MNG_ACCESS_FAILED; + goto rel_out; + } + +rel_out: + TCALL(hw, mac.ops.release_swfw_sync, NGBE_MNG_SWFW_SYNC_SW_MB); + return status; +} + +/** + * ngbe_host_interface_command - Issue command to manageability block + * @hw: pointer to the HW structure + * @buffer: contains the command to write and where the return status will + * be placed + * @length: length of buffer, must be multiple of 4 bytes + * @timeout: time in ms to wait for command completion + * @return_data: read and return data from the buffer (true) or not (false) + * Needed because FW structures are big endian and decoding of + * these fields can be 8 bit or 16 bit based on command. Decoding + * is not easily understood without making a table of commands. + * So we will leave this up to the caller to read back the data + * in these cases. + * + * Communicates with the manageability block. On success return 0 + * else return NGBE_ERR_HOST_INTERFACE_COMMAND. + **/ +s32 ngbe_host_interface_command(struct ngbe_hw *hw, u32 *buffer, + u32 length, u32 timeout, bool return_data) +{ + u32 hicr, i, bi; + u32 hdr_size = sizeof(struct ngbe_hic_hdr); + u16 buf_len; + u32 dword_len; + s32 status = 0; + u32 buf[64] = {}; + + DEBUGFUNC("\n"); + + if (length == 0 || length > NGBE_HI_MAX_BLOCK_BYTE_LENGTH) { + DEBUGOUT1("Buffer length failure buffersize=%d.\n", length); + return NGBE_ERR_HOST_INTERFACE_COMMAND; + } + + if (TCALL(hw, mac.ops.acquire_swfw_sync, NGBE_MNG_SWFW_SYNC_SW_MB) + != 0) { + return NGBE_ERR_SWFW_SYNC; + } + + + /* Calculate length in DWORDs. We must be DWORD aligned */ + if ((length % (sizeof(u32))) != 0) { + DEBUGOUT("Buffer length failure, not aligned to dword"); + status = NGBE_ERR_INVALID_ARGUMENT; + goto rel_out; + } + + /*read to clean all status*/ + if (ngbe_check_mng_access(hw)) { + hicr = rd32(hw, NGBE_MNG_MBOX_CTL); + if ((hicr & NGBE_MNG_MBOX_CTL_FWRDY)) + ERROR_REPORT1(NGBE_ERROR_CAUTION, + "fwrdy is set before command.\n"); + } + + dword_len = length >> 2; + + /* The device driver writes the relevant command block + * into the ram area. + */ + for (i = 0; i < dword_len; i++) { + if (ngbe_check_mng_access(hw)) + wr32a(hw, NGBE_MNG_MBOX, + i, NGBE_CPU_TO_LE32(buffer[i])); + else { + status = NGBE_ERR_MNG_ACCESS_FAILED; + goto rel_out; + } + } + /* Setting this bit tells the ARC that a new command is pending. */ + if (ngbe_check_mng_access(hw)) + wr32m(hw, NGBE_MNG_MBOX_CTL, + NGBE_MNG_MBOX_CTL_SWRDY, NGBE_MNG_MBOX_CTL_SWRDY); + else { + status = NGBE_ERR_MNG_ACCESS_FAILED; + goto rel_out; + } + + for (i = 0; i < timeout; i++) { + if (ngbe_check_mng_access(hw)) { + hicr = rd32(hw, NGBE_MNG_MBOX_CTL); + if ((hicr & NGBE_MNG_MBOX_CTL_FWRDY)) + break; + } + msec_delay(1); + } + + buf[0] = rd32(hw, NGBE_MNG_MBOX); + /* Check command completion */ + if (timeout != 0 && i == timeout) { + ERROR_REPORT1(NGBE_ERROR_CAUTION, + "Command has failed with no status valid.\n"); + printk("===%x= %x=\n", buffer[0] & 0xff, (~buf[0] >> 24)); + printk("===%08x\n", rd32(hw, 0x1e100)); + printk("===%08x\n", rd32(hw, 0x1e104)); + printk("===%08x\n", rd32(hw, 0x1e108)); + printk("===%08x\n", rd32(hw, 0x1e10c)); + printk("===%08x\n", rd32(hw, 0x1e044)); + printk("===%08x\n", rd32(hw, 0x10000)); + if ((buffer[0] & 0xff) != (~buf[0] >> 24)) { + status = NGBE_ERR_HOST_INTERFACE_COMMAND; + goto rel_out; + } + } + + if (!return_data) + goto rel_out; + + /* Calculate length in DWORDs */ + dword_len = hdr_size >> 2; + + /* first pull in the header so we know the buffer length */ + for (bi = 0; bi < dword_len; bi++) { + if (ngbe_check_mng_access(hw)) { + buffer[bi] = rd32a(hw, NGBE_MNG_MBOX, + bi); + NGBE_LE32_TO_CPUS(&buffer[bi]); + } else { + status = NGBE_ERR_MNG_ACCESS_FAILED; + goto rel_out; + } + } + + /* If there is any thing in data position pull it in */ + buf_len = ((struct ngbe_hic_hdr *)buffer)->buf_len; + if (buf_len == 0) + goto rel_out; + + if (length < buf_len + hdr_size) { + DEBUGOUT("Buffer not large enough for reply message.\n"); + status = NGBE_ERR_HOST_INTERFACE_COMMAND; + goto rel_out; + } + + /* Calculate length in DWORDs, add 3 for odd lengths */ + dword_len = (buf_len + 3) >> 2; + + /* Pull in the rest of the buffer (bi is where we left off) */ + for (; bi <= dword_len; bi++) { + if (ngbe_check_mng_access(hw)) { + buffer[bi] = rd32a(hw, NGBE_MNG_MBOX, + bi); + NGBE_LE32_TO_CPUS(&buffer[bi]); + } else { + status = NGBE_ERR_MNG_ACCESS_FAILED; + goto rel_out; + } + } + +rel_out: + TCALL(hw, mac.ops.release_swfw_sync, NGBE_MNG_SWFW_SYNC_SW_MB); + return status; +} + +/** + * ngbe_set_fw_drv_ver - Sends driver version to firmware + * @hw: pointer to the HW structure + * @maj: driver version major number + * @min: driver version minor number + * @build: driver version build number + * @sub: driver version sub build number + * + * Sends driver version number to firmware through the manageability + * block. On success return 0 + * else returns NGBE_ERR_SWFW_SYNC when encountering an error acquiring + * semaphore or NGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + **/ +s32 ngbe_set_fw_drv_ver(struct ngbe_hw *hw, u8 maj, u8 min, + u8 build, u8 sub) +{ + struct ngbe_hic_drv_info fw_cmd; + int i; + s32 ret_val = 0; + + DEBUGFUNC("\n"); + + fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; + fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; + fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + fw_cmd.port_num = (u8)hw->bus.func; + fw_cmd.ver_maj = maj; + fw_cmd.ver_min = min; + fw_cmd.ver_build = build; + fw_cmd.ver_sub = sub; + fw_cmd.hdr.checksum = 0; + fw_cmd.hdr.checksum = ngbe_calculate_checksum((u8 *)&fw_cmd, + (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); + fw_cmd.pad = 0; + fw_cmd.pad2 = 0; + + for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { + ret_val = ngbe_host_interface_command(hw, (u32 *)&fw_cmd, + sizeof(fw_cmd), + NGBE_HI_COMMAND_TIMEOUT, + true); + if (ret_val != 0) + continue; + + if (fw_cmd.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) + ret_val = 0; + else + ret_val = NGBE_ERR_HOST_INTERFACE_COMMAND; + + break; + } + + return ret_val; +} + +/** + * ngbe_reset_hostif - send reset cmd to fw + * @hw: pointer to hardware structure + * + * Sends reset cmd to firmware through the manageability + * block. On success return 0 + * else returns NGBE_ERR_SWFW_SYNC when encountering an error acquiring + * semaphore or NGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + **/ +s32 ngbe_reset_hostif(struct ngbe_hw *hw) +{ + struct ngbe_hic_reset reset_cmd; + int i; + s32 status = 0; + + DEBUGFUNC("\n"); + + reset_cmd.hdr.cmd = FW_RESET_CMD; + reset_cmd.hdr.buf_len = FW_RESET_LEN; + reset_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + reset_cmd.lan_id = hw->bus.lan_id; + reset_cmd.reset_type = (u16)hw->reset_type; + reset_cmd.hdr.checksum = 0; + reset_cmd.hdr.checksum = ngbe_calculate_checksum((u8 *)&reset_cmd, + (FW_CEM_HDR_LEN + reset_cmd.hdr.buf_len)); + + /* send reset request to FW and wait for response */ + for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { + status = ngbe_host_interface_command(hw, (u32 *)&reset_cmd, + sizeof(reset_cmd), + NGBE_HI_COMMAND_TIMEOUT, + true); + msleep(1); + if (status != 0) + continue; + + if (reset_cmd.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) + status = 0; + else + status = NGBE_ERR_HOST_INTERFACE_COMMAND; + + break; + } + + return status; +} + +s32 ngbe_setup_mac_link_hostif(struct ngbe_hw *hw, u32 speed) +{ + struct ngbe_hic_phy_cfg cmd; + int i; + s32 status = 0; + + DEBUGFUNC("\n"); + + cmd.hdr.cmd = FW_SETUP_MAC_LINK_CMD; + cmd.hdr.buf_len = FW_SETUP_MAC_LINK_LEN; + cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + cmd.lan_id = hw->bus.lan_id; + cmd.phy_mode = 0; + cmd.phy_speed = (u16)speed; + cmd.hdr.checksum = 0; + cmd.hdr.checksum = ngbe_calculate_checksum((u8 *)&cmd, + (FW_CEM_HDR_LEN + cmd.hdr.buf_len)); + + for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { + status = ngbe_host_interface_command(hw, (u32 *)&cmd, + sizeof(cmd), + NGBE_HI_COMMAND_TIMEOUT, + true); + if (status != 0) + continue; + + if (cmd.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) + status = 0; + else + status = NGBE_ERR_HOST_INTERFACE_COMMAND; + + break; + } + + return status; +} + +u16 ngbe_crc16_ccitt(const u8 *buf, int size) +{ + u16 crc = 0; + int i; + while (--size >= 0) { + crc ^= (u16)*buf++ << 8; + for (i = 0; i < 8; i++) { + if (crc & 0x8000) + crc = crc << 1 ^ 0x1021; + else + crc <<= 1; + } + } + return crc; +} + +s32 ngbe_upgrade_flash_hostif(struct ngbe_hw *hw, u32 region, + const u8 *data, u32 size) +{ + struct ngbe_hic_upg_start start_cmd; + struct ngbe_hic_upg_write write_cmd; + struct ngbe_hic_upg_verify verify_cmd; + u32 offset; + s32 status = 0; + + DEBUGFUNC("\n"); + + start_cmd.hdr.cmd = FW_FLASH_UPGRADE_START_CMD; + start_cmd.hdr.buf_len = FW_FLASH_UPGRADE_START_LEN; + start_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + start_cmd.module_id = (u8)region; + start_cmd.hdr.checksum = 0; + start_cmd.hdr.checksum = ngbe_calculate_checksum((u8 *)&start_cmd, + (FW_CEM_HDR_LEN + start_cmd.hdr.buf_len)); + start_cmd.pad2 = 0; + start_cmd.pad3 = 0; + + status = ngbe_host_interface_command(hw, (u32 *)&start_cmd, + sizeof(start_cmd), + NGBE_HI_FLASH_ERASE_TIMEOUT, + true); + + if (start_cmd.hdr.cmd_or_resp.ret_status == FW_CEM_RESP_STATUS_SUCCESS) + status = 0; + else { + status = NGBE_ERR_HOST_INTERFACE_COMMAND; + return status; + } + + for (offset = 0; offset < size;) { + write_cmd.hdr.cmd = FW_FLASH_UPGRADE_WRITE_CMD; + if (size - offset > 248) { + write_cmd.data_len = 248 / 4; + write_cmd.eof_flag = 0; + } else { + write_cmd.data_len = (u8)((size - offset) / 4); + write_cmd.eof_flag = 1; + } + memcpy((u8 *)write_cmd.data, &data[offset], write_cmd.data_len * 4); + write_cmd.hdr.buf_len = (write_cmd.data_len + 1) * 4; + write_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + write_cmd.check_sum = ngbe_crc16_ccitt((u8 *)write_cmd.data, + write_cmd.data_len * 4); + + status = ngbe_host_interface_command(hw, (u32 *)&write_cmd, + sizeof(write_cmd), + NGBE_HI_FLASH_UPDATE_TIMEOUT, + true); + if (start_cmd.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) + status = 0; + else { + status = NGBE_ERR_HOST_INTERFACE_COMMAND; + return status; + } + offset += write_cmd.data_len * 4; + } + + verify_cmd.hdr.cmd = FW_FLASH_UPGRADE_VERIFY_CMD; + verify_cmd.hdr.buf_len = FW_FLASH_UPGRADE_VERIFY_LEN; + verify_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + switch (region) { + case NGBE_MODULE_EEPROM: + verify_cmd.action_flag = NGBE_RELOAD_EEPROM; + break; + case NGBE_MODULE_FIRMWARE: + verify_cmd.action_flag = NGBE_RESET_FIRMWARE; + break; + case NGBE_MODULE_HARDWARE: + verify_cmd.action_flag = NGBE_RESET_LAN; + break; + default: + ERROR_REPORT1(NGBE_ERROR_ARGUMENT, + "ngbe_upgrade_flash_hostif: region err %x\n", region); + return status; + } + + verify_cmd.hdr.checksum = ngbe_calculate_checksum((u8 *)&verify_cmd, + (FW_CEM_HDR_LEN + verify_cmd.hdr.buf_len)); + + status = ngbe_host_interface_command(hw, (u32 *)&verify_cmd, + sizeof(verify_cmd), + NGBE_HI_FLASH_VERIFY_TIMEOUT, + true); + + if (verify_cmd.hdr.cmd_or_resp.ret_status == FW_CEM_RESP_STATUS_SUCCESS) + status = 0; + else { + status = NGBE_ERR_HOST_INTERFACE_COMMAND; + } + return status; +} + +/* cmd_addr is used for some special command: +** 1. to be sector address, when implemented erase sector command +** 2. to be flash address when implemented read, write flash address +*/ +u8 fmgr_cmd_op(struct ngbe_hw *hw, u32 cmd, u32 cmd_addr) +{ + u32 cmd_val = 0; + u32 time_out = 0; + + cmd_val = (cmd << SPI_CLK_CMD_OFFSET) | (SPI_CLK_DIV << SPI_CLK_DIV_OFFSET) | cmd_addr; + wr32(hw, SPI_H_CMD_REG_ADDR, cmd_val); + while (1) { + if (rd32(hw, SPI_H_STA_REG_ADDR) & 0x1) + break; + + if (time_out == SPI_TIME_OUT_VALUE) + return 1; + + time_out = time_out + 1; + udelay(10); + } + + return 0; +} + +u8 fmgr_usr_cmd_op(struct ngbe_hw *hw, u32 usr_cmd) +{ + u8 status = 0; + + wr32(hw, SPI_H_USR_CMD_REG_ADDR, usr_cmd); + status = fmgr_cmd_op(hw, SPI_CMD_USER_CMD, 0); + + return status; +} + +u8 flash_erase_chip(struct ngbe_hw *hw) +{ + u8 status = fmgr_cmd_op(hw, SPI_CMD_ERASE_CHIP, 0); + return status; +} + +u8 flash_erase_sector(struct ngbe_hw *hw, u32 sec_addr) +{ + u8 status = fmgr_cmd_op(hw, SPI_CMD_ERASE_SECTOR, sec_addr); + return status; +} + +u32 flash_read_dword(struct ngbe_hw *hw, u32 addr) +{ + u8 status = fmgr_cmd_op(hw, SPI_CMD_READ_DWORD, addr); + if (status) + return (u32)status; + + return rd32(hw, SPI_H_DAT_REG_ADDR); +} + +u8 flash_write_dword(struct ngbe_hw *hw, u32 addr, u32 dword) +{ + u8 status = 0; + + wr32(hw, SPI_H_DAT_REG_ADDR, dword); + status = fmgr_cmd_op(hw, SPI_CMD_WRITE_DWORD, addr); + if (status) + return status; + + if (dword != flash_read_dword(hw, addr)) { + return 1; + } + + return 0; +} + +int ngbe_flash_write_cab(struct ngbe_hw *hw, u32 addr, u32 value, u16 lan_id) +{ + int status; + struct ngbe_hic_read_cab buffer; + + buffer.hdr.req.cmd = 0xE2; + buffer.hdr.req.buf_lenh = 0x6; + buffer.hdr.req.buf_lenl = 0x0; + buffer.hdr.req.checksum = 0xFF; + + /* convert offset from words to bytes */ + buffer.dbuf.d16[0] = cpu_to_le16(lan_id); + /* one word */ + buffer.dbuf.d32[0] = htonl(addr); + buffer.dbuf.d32[1] = htonl(value); + + status = ngbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), 5000, true); + printk("0x1e100 :%08x\n", rd32(hw, 0x1e100)); + printk("0x1e104 :%08x\n", rd32(hw, 0x1e104)); + printk("0x1e108 :%08x\n", rd32(hw, 0x1e108)); + printk("0x1e10c :%08x\n", rd32(hw, 0x1e10c)); + + return status; +} + +int ngbe_flash_read_cab(struct ngbe_hw *hw, u32 addr, u16 lan_id) +{ + int status; + struct ngbe_hic_read_cab buffer; + u16 *data = NULL; + + buffer.hdr.req.cmd = 0xE1; + buffer.hdr.req.buf_lenh = 0xaa; + buffer.hdr.req.buf_lenl = 0; + buffer.hdr.req.checksum = 0xFF; + + /* convert offset from words to bytes */ + buffer.dbuf.d16[0] = cpu_to_le16(lan_id); + /* one word */ + buffer.dbuf.d32[0] = htonl(addr); + + status = ngbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), 5000, true); + + if (status) + return status; + if (ngbe_check_mng_access(hw)) { + *data = (u16)rd32a(hw, 0x1e100, 3); + printk("0x1e100 :%08x\n", rd32(hw, 0x1e100)); + printk("0x1e104 :%08x\n", rd32(hw, 0x1e104)); + printk("0x1e108 :%08x\n", rd32(hw, 0x1e108)); + printk("0x1e10c :%08x\n", rd32(hw, 0x1e10c)); + } else { + status = -147; + return status; + } + + return rd32(hw, 0x1e108); +} + +int ngbe_flash_write_unlock(struct ngbe_hw *hw) +{ + int status; + struct ngbe_hic_read_shadow_ram buffer; + + buffer.hdr.req.cmd = 0x40; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = 0; + buffer.hdr.req.checksum = 0xFF; + + /* convert offset from words to bytes */ + buffer.address = 0; + /* one word */ + buffer.length = 0; + + status = ngbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), 5000, false); + if (status) + return status; + + return status; +} + +int ngbe_flash_write_lock(struct ngbe_hw *hw) +{ + int status; + struct ngbe_hic_read_shadow_ram buffer; + + buffer.hdr.req.cmd = 0x39; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = 0; + buffer.hdr.req.checksum = 0xFF; + + /* convert offset from words to bytes */ + buffer.address = 0; + /* one word */ + buffer.length = 0; + + status = ngbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), 5000, false); + if (status) + return status; + + return status; +} + +int ngbe_upgrade_flash(struct ngbe_hw *hw, u32 region, + const u8 *data, u32 size) +{ + u32 sector_num = 0; + u32 read_data = 0; + u8 status = 0; + u8 skip = 0; + u32 i = 0, k = 0, n = 0; + u8 flash_vendor = 0; + u32 num[256] = {0}; + u32 mac_addr0_dword0_t, mac_addr0_dword1_t; + u32 mac_addr1_dword0_t, mac_addr1_dword1_t; + u32 mac_addr2_dword0_t, mac_addr2_dword1_t; + u32 mac_addr3_dword0_t, mac_addr3_dword1_t; + u32 serial_num_dword0_t, serial_num_dword1_t, serial_num_dword2_t; + + /*check sub_id*/; + printk("Checking sub_id .......\n"); + printk("The card's sub_id : %04x\n", hw->subsystem_device_id); + printk("The image's sub_id : %04x\n", data[0xfffdc] << 8 | data[0xfffdd]); + if ((hw->subsystem_device_id & 0xffff) == + ((data[0xfffdc] << 8 | data[0xfffdd]) & 0xffff)) { + printk("It is a right image\n"); + } else if (hw->subsystem_device_id == 0xffff) { + printk("update anyway\n"); + } else { + printk("====The Gigabit image do not match the Gigabit card====\n"); + printk("====Please check your image====\n"); + return -EOPNOTSUPP; + } + + /*check dev_id*/ + printk("Checking dev_id .......\n"); + printk("The image's dev_id : %04x\n", data[0xfffde] << 8 | data[0xfffdf]); + printk("The card's dev_id : %04x\n", hw->device_id); + if (!((hw->device_id & 0xffff) == + ((data[0xfffde] << 8 | data[0xfffdf]) & 0xffff)) + && !(hw->device_id == 0xffff)) { + printk("====The Gigabit image is not match the Gigabit card====\n"); + printk("====Please check your image====\n"); + return -EOPNOTSUPP; + } + + // unlock flash write protect + ngbe_release_eeprom_semaphore(hw); + ngbe_flash_write_unlock(hw); + + wr32(hw, 0x10114, 0x9f050206); + wr32(hw, 0x10194, 0x9f050206); + + ngbe_flash_write_cab(hw, 0x188, 0, 0); + ngbe_flash_write_cab(hw, 0x184, 0x60000000, 0); + msleep(1000); + + mac_addr0_dword0_t = flash_read_dword(hw, MAC_ADDR0_WORD0_OFFSET_1G); + mac_addr0_dword1_t = flash_read_dword(hw, MAC_ADDR0_WORD1_OFFSET_1G) & 0xffff; + mac_addr1_dword0_t = flash_read_dword(hw, MAC_ADDR1_WORD0_OFFSET_1G); + mac_addr1_dword1_t = flash_read_dword(hw, MAC_ADDR1_WORD1_OFFSET_1G) & 0xffff; + mac_addr2_dword0_t = flash_read_dword(hw, MAC_ADDR2_WORD0_OFFSET_1G); + mac_addr2_dword1_t = flash_read_dword(hw, MAC_ADDR2_WORD1_OFFSET_1G) & 0xffff; + mac_addr3_dword0_t = flash_read_dword(hw, MAC_ADDR3_WORD0_OFFSET_1G); + mac_addr3_dword1_t = flash_read_dword(hw, MAC_ADDR3_WORD1_OFFSET_1G) & 0xffff; + + serial_num_dword0_t = flash_read_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G); + serial_num_dword1_t = flash_read_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G + 4); + serial_num_dword2_t = flash_read_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G + 8); + printk("Old: MAC Address0 is: 0x%04x%08x\n", mac_addr0_dword1_t, + mac_addr0_dword0_t); + printk(" MAC Address1 is: 0x%04x%08x\n", mac_addr1_dword1_t, + mac_addr1_dword0_t); + printk(" MAC Address2 is: 0x%04x%08x\n", mac_addr2_dword1_t, + mac_addr2_dword0_t); + printk(" MAC Address3 is: 0x%04x%08x\n", mac_addr3_dword1_t, + mac_addr3_dword0_t); + + for (k = 0; k < (1024 / 4); k++) { + num[k] = flash_read_dword(hw, 0xfe000 + k * 4); + } + + status = fmgr_usr_cmd_op(hw, 0x6); // write enable + status = fmgr_usr_cmd_op(hw, 0x98); // global protection un-lock + msleep(1000); // 1s + + // Note: for Spanish FLASH, first 8 sectors (4KB) in sector0 (64KB) need to use + // a special erase command (4K sector erase) + if (flash_vendor == 1) { + wr32(hw, SPI_CMD_CFG1_ADDR, 0x0103c720); + for (i = 0; i < 8; i++) { + flash_erase_sector(hw, i*128); + msleep(20); // 20 ms + } + wr32(hw, SPI_CMD_CFG1_ADDR, 0x0103c7d8); + } + + /* Winbond Flash, erase chip command is okay, but erase sector doestn't work*/ + sector_num = size / SPI_SECTOR_SIZE; + if (flash_vendor == 2) { + status = flash_erase_chip(hw); + printk("Erase chip command, return status = %0d\n", status); + msleep(1000); // 1 s + } else { + wr32(hw, SPI_CMD_CFG1_ADDR, 0x0103c720); + for (i = 0; i < sector_num; i++) { + status = flash_erase_sector(hw, i * SPI_SECTOR_SIZE); + printk("Erase sector[%2d] command, return status = %0d\n", i, status); + msleep(50); // 50 ms + } + wr32(hw, SPI_CMD_CFG1_ADDR, 0x0103c7d8); + } + + // Program Image file in dword + for (i = 0; i < size / 4; i++) { + read_data = data[4 * i + 3] << 24 | data[4 * i + 2] << 16 | + data[4 * i + 1] << 8 | data[4 * i]; + read_data = __le32_to_cpu(read_data); + skip = ((i * 4 == MAC_ADDR0_WORD0_OFFSET_1G) || + (i * 4 == MAC_ADDR0_WORD1_OFFSET_1G) || + (i * 4 == MAC_ADDR1_WORD0_OFFSET_1G) || + (i * 4 == MAC_ADDR1_WORD1_OFFSET_1G) || + (i * 4 == MAC_ADDR2_WORD0_OFFSET_1G) || + (i * 4 == MAC_ADDR2_WORD1_OFFSET_1G) || + (i * 4 == MAC_ADDR3_WORD0_OFFSET_1G) || + (i * 4 == MAC_ADDR3_WORD1_OFFSET_1G) || + (i * 4 >= PRODUCT_SERIAL_NUM_OFFSET_1G && + i * 4 <= PRODUCT_SERIAL_NUM_OFFSET_1G + 8)); + if (read_data != 0xffffffff && !skip) { + status = flash_write_dword(hw, i * 4, read_data); + if (status) { + printk("ERROR: Program 0x%08x @addr: 0x%08x is failed !!\n", + read_data, i * 4); + read_data = flash_read_dword(hw, i * 4); + printk(" Read data from Flash is: 0x%08x\n", read_data); + return 1; + } + } + if (i % 1024 == 0) { + printk("\b\b\b\b%3d%%", (int)(i * 4 * 100 / size)); + } + } + + flash_write_dword(hw, MAC_ADDR0_WORD0_OFFSET_1G, + mac_addr0_dword0_t); + flash_write_dword(hw, MAC_ADDR0_WORD1_OFFSET_1G, + (mac_addr0_dword1_t | 0x80000000));//lan0 + flash_write_dword(hw, MAC_ADDR1_WORD0_OFFSET_1G, + mac_addr1_dword0_t); + flash_write_dword(hw, MAC_ADDR1_WORD1_OFFSET_1G, + (mac_addr1_dword1_t | 0x80000000));//lan1 + flash_write_dword(hw, MAC_ADDR2_WORD0_OFFSET_1G, + mac_addr2_dword0_t); + flash_write_dword(hw, MAC_ADDR2_WORD1_OFFSET_1G, + (mac_addr2_dword1_t | 0x80000000));//lan2 + flash_write_dword(hw, MAC_ADDR3_WORD0_OFFSET_1G, + mac_addr3_dword0_t); + flash_write_dword(hw, MAC_ADDR3_WORD1_OFFSET_1G, + (mac_addr3_dword1_t | 0x80000000));//lan3 + flash_write_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G, serial_num_dword0_t); + flash_write_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G+4, serial_num_dword1_t); + flash_write_dword(hw, PRODUCT_SERIAL_NUM_OFFSET_1G+8, serial_num_dword2_t); + + for (n = 0; n < 1024/4; n++) { + if (!(num[n] == 0xffffffff)) + flash_write_dword(hw, 0xfe000 + n * 4, num[n]); + } + + return 0; +} + +/** + * ngbe_set_rxpba - Initialize Rx packet buffer + * @hw: pointer to hardware structure + * @num_pb: number of packet buffers to allocate + * @headroom: reserve n KB of headroom + * @strategy: packet buffer allocation strategy + **/ +void ngbe_set_rxpba(struct ngbe_hw *hw, int num_pb, u32 headroom, + int strategy) +{ + u32 pbsize = hw->mac.rx_pb_size; + u32 rxpktsize, txpktsize, txpbthresh; + + DEBUGFUNC("\n"); + + /* Reserve headroom */ + pbsize -= headroom; + + if (!num_pb) + num_pb = 1; + + /* Divide remaining packet buffer space amongst the number of packet + * buffers requested using supplied strategy. + */ + switch (strategy) { + case PBA_STRATEGY_EQUAL: + rxpktsize = (pbsize / num_pb) << NGBE_RDB_PB_SZ_SHIFT; + wr32(hw, NGBE_RDB_PB_SZ, rxpktsize); + break; + default: + break; + } + + /* Only support an equally distributed Tx packet buffer strategy. */ + txpktsize = NGBE_TDB_PB_SZ_MAX / num_pb; + txpbthresh = (txpktsize / NGBE_KB_TO_B) - NGBE_TXPKT_SIZE_MAX; + + wr32(hw, NGBE_TDB_PB_SZ, txpktsize); + wr32(hw, NGBE_TDM_PB_THRE, txpbthresh); +} + +STATIC const u8 ngbe_emc_temp_data[4] = { + NGBE_EMC_INTERNAL_DATA, + NGBE_EMC_DIODE1_DATA, + NGBE_EMC_DIODE2_DATA, + NGBE_EMC_DIODE3_DATA +}; + +STATIC const u8 ngbe_emc_therm_limit[4] = { + NGBE_EMC_INTERNAL_THERM_LIMIT, + NGBE_EMC_DIODE1_THERM_LIMIT, + NGBE_EMC_DIODE2_THERM_LIMIT, + NGBE_EMC_DIODE3_THERM_LIMIT +}; + +/** + * ngbe_get_thermal_sensor_data - Gathers thermal sensor data + * @hw: pointer to hardware structure + * @data: pointer to the thermal sensor data structure + * + * algorithm: + * T = (-4.8380E+01)N^0 + (3.1020E-01)N^1 + (-1.8201E-04)N^2 + + (8.1542E-08)N^3 + (-1.6743E-11)N^4 + * algorithm with 5% more deviation, easy for implementation + * T = (-50)N^0 + (0.31)N^1 + (-0.0002)N^2 + (0.0000001)N^3 + * + * Returns the thermal sensor data structure + **/ +s32 ngbe_get_thermal_sensor_data(struct ngbe_hw *hw) +{ + s64 tsv; + struct ngbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + DEBUGFUNC("\n"); + + /* Only support thermal sensors attached to physical port 0 */ + if (hw->bus.lan_id) + return NGBE_NOT_IMPLEMENTED; + + tsv = (s64)(rd32(hw, NGBE_TS_ST) & + NGBE_TS_ST_DATA_OUT_MASK); + /* 216 < tsv < 876 */ + + tsv = tsv < 876 ? tsv : 876; + tsv = tsv - 216; + tsv = tsv/4; + tsv = tsv - 40; + data->sensor.temp = (s16)tsv; + + return 0; +} + +/** + * ngbe_init_thermal_sensor_thresh - Inits thermal sensor thresholds + * @hw: pointer to hardware structure + * + * Inits the thermal sensor thresholds according to the NVM map + * and save off the threshold and location values into mac.thermal_sensor_data + **/ +s32 ngbe_init_thermal_sensor_thresh(struct ngbe_hw *hw) +{ + s32 status = 0; + + struct ngbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + DEBUGFUNC("\n"); + + memset(data, 0, sizeof(struct ngbe_thermal_sensor_data)); + + /* Only support thermal sensors attached to SP physical port 0 */ + if (hw->bus.lan_id) + return NGBE_NOT_IMPLEMENTED; + + wr32(hw, NGBE_TS_INT_EN, NGBE_TS_INT_EN_DALARM_INT_EN | NGBE_TS_INT_EN_ALARM_INT_EN); + + wr32(hw, NGBE_TS_EN, NGBE_TS_EN_ENA); + + data->sensor.alarm_thresh = 115; + wr32(hw, NGBE_TS_ALARM_THRE, 0x344);/* magic num */ + data->sensor.dalarm_thresh = 110; + wr32(hw, NGBE_TS_DALARM_THRE, 0x330);/* magic num */ + + return status; +} + +void ngbe_disable_rx(struct ngbe_hw *hw) +{ + u32 pfdtxgswc; + u32 rxctrl; + + DEBUGFUNC("\n"); + + rxctrl = rd32(hw, NGBE_RDB_PB_CTL); + if (rxctrl & NGBE_RDB_PB_CTL_PBEN) { + pfdtxgswc = rd32(hw, NGBE_PSR_CTL); + if (pfdtxgswc & NGBE_PSR_CTL_SW_EN) { + pfdtxgswc &= ~NGBE_PSR_CTL_SW_EN; + wr32(hw, NGBE_PSR_CTL, pfdtxgswc); + hw->mac.set_lben = true; + } else { + hw->mac.set_lben = false; + } + rxctrl &= ~NGBE_RDB_PB_CTL_PBEN; + wr32(hw, NGBE_RDB_PB_CTL, rxctrl); + + /*OCP NCSI BMC need it*/ + if (!(((hw->subsystem_device_id & OEM_MASK) == OCP_CARD) || + ((hw->subsystem_device_id & WOL_SUP_MASK) == WOL_SUP) || + ((hw->subsystem_device_id & NCSI_SUP_MASK) == NCSI_SUP))) { + /* disable mac receiver */ + wr32m(hw, NGBE_MAC_RX_CFG, + NGBE_MAC_RX_CFG_RE, 0); + } + } +} + + +void ngbe_enable_rx(struct ngbe_hw *hw) +{ + u32 pfdtxgswc; + + DEBUGFUNC("\n"); + + /* enable mac receiver */ + wr32m(hw, NGBE_MAC_RX_CFG, + NGBE_MAC_RX_CFG_RE, NGBE_MAC_RX_CFG_RE); + + wr32m(hw, NGBE_RSEC_CTL, + 0x2, 0); + + wr32m(hw, NGBE_RDB_PB_CTL, + NGBE_RDB_PB_CTL_PBEN, NGBE_RDB_PB_CTL_PBEN); + + if (hw->mac.set_lben) { + pfdtxgswc = rd32(hw, NGBE_PSR_CTL); + pfdtxgswc |= NGBE_PSR_CTL_SW_EN; + wr32(hw, NGBE_PSR_CTL, pfdtxgswc); + hw->mac.set_lben = false; + } +} + +/** + * ngbe_mng_present - returns true when manangbeent capability is present + * @hw: pointer to hardware structure + */ +bool ngbe_mng_present(struct ngbe_hw *hw) +{ + u32 fwsm; + + fwsm = rd32(hw, NGBE_MIS_ST); + return fwsm & NGBE_MIS_ST_MNG_INIT_DN; +} + +bool ngbe_check_mng_access(struct ngbe_hw *hw) +{ + if (!ngbe_mng_present(hw)) + return false; + return true; +} + +int ngbe_check_flash_load(struct ngbe_hw *hw, u32 check_bit) +{ + u32 i = 0; + u32 reg = 0; + int err = 0; + /* if there's flash existing */ + if (!(rd32(hw, NGBE_SPI_STATUS) & + NGBE_SPI_STATUS_FLASH_BYPASS)) { + /* wait hw load flash done */ + for (i = 0; i < NGBE_MAX_FLASH_LOAD_POLL_TIME; i++) { + reg = rd32(hw, NGBE_SPI_ILDR_STATUS); + if (!(reg & check_bit)) { + /* done */ + break; + } + msleep(200); + } + if (i == NGBE_MAX_FLASH_LOAD_POLL_TIME) { + err = NGBE_ERR_FLASH_LOADING_FAILED; + ERROR_REPORT1(NGBE_ERROR_POLLING, + "HW Loading Flash failed: %d\n", err); + } + } + return err; +} + +/* The ngbe_ptype_lookup is used to convert from the 8-bit ptype in the + * hardware to a bit-field that can be used by SW to more easily determine the + * packet type. + * + * Macros are used to shorten the table lines and make this table human + * readable. + * + * We store the PTYPE in the top byte of the bit field - this is just so that + * we can check that the table doesn't have a row missing, as the index into + * the table should be the PTYPE. + * + * Typical work flow: + * + * IF NOT ngbe_ptype_lookup[ptype].known + * THEN + * Packet is unknown + * ELSE IF ngbe_ptype_lookup[ptype].mac == NGBE_DEC_PTYPE_MAC_IP + * Use the rest of the fields to look at the tunnels, inner protocols, etc + * ELSE + * Use the enum ngbe_l2_ptypes to decode the packet type + * ENDIF + */ + +/* macro to make the table lines short */ +#define NGBE_PTT(ptype, mac, ip, etype, eip, proto, layer)\ + { ptype, \ + 1, \ + /* mac */ NGBE_DEC_PTYPE_MAC_##mac, \ + /* ip */ NGBE_DEC_PTYPE_IP_##ip, \ + /* etype */ NGBE_DEC_PTYPE_ETYPE_##etype, \ + /* eip */ NGBE_DEC_PTYPE_IP_##eip, \ + /* proto */ NGBE_DEC_PTYPE_PROT_##proto, \ + /* layer */ NGBE_DEC_PTYPE_LAYER_##layer } + +#define NGBE_UKN(ptype) \ + { ptype, 0, 0, 0, 0, 0, 0, 0 } + +/* Lookup table mapping the HW PTYPE to the bit field for decoding */ +/* for ((pt=0;pt<256;pt++)); do printf "macro(0x%02X),\n" $pt; done */ +ngbe_dptype ngbe_ptype_lookup[256] = { + NGBE_UKN(0x00), + NGBE_UKN(0x01), + NGBE_UKN(0x02), + NGBE_UKN(0x03), + NGBE_UKN(0x04), + NGBE_UKN(0x05), + NGBE_UKN(0x06), + NGBE_UKN(0x07), + NGBE_UKN(0x08), + NGBE_UKN(0x09), + NGBE_UKN(0x0A), + NGBE_UKN(0x0B), + NGBE_UKN(0x0C), + NGBE_UKN(0x0D), + NGBE_UKN(0x0E), + NGBE_UKN(0x0F), + + /* L2: mac */ + NGBE_UKN(0x10), + NGBE_PTT(0x11, L2, NONE, NONE, NONE, NONE, PAY2), + NGBE_PTT(0x12, L2, NONE, NONE, NONE, TS, PAY2), + NGBE_PTT(0x13, L2, NONE, NONE, NONE, NONE, PAY2), + NGBE_PTT(0x14, L2, NONE, NONE, NONE, NONE, PAY2), + NGBE_PTT(0x15, L2, NONE, NONE, NONE, NONE, NONE), + NGBE_PTT(0x16, L2, NONE, NONE, NONE, NONE, PAY2), + NGBE_PTT(0x17, L2, NONE, NONE, NONE, NONE, NONE), + + /* L2: ethertype filter */ + NGBE_PTT(0x18, L2, NONE, NONE, NONE, NONE, NONE), + NGBE_PTT(0x19, L2, NONE, NONE, NONE, NONE, NONE), + NGBE_PTT(0x1A, L2, NONE, NONE, NONE, NONE, NONE), + NGBE_PTT(0x1B, L2, NONE, NONE, NONE, NONE, NONE), + NGBE_PTT(0x1C, L2, NONE, NONE, NONE, NONE, NONE), + NGBE_PTT(0x1D, L2, NONE, NONE, NONE, NONE, NONE), + NGBE_PTT(0x1E, L2, NONE, NONE, NONE, NONE, NONE), + NGBE_PTT(0x1F, L2, NONE, NONE, NONE, NONE, NONE), + + /* L3: ip non-tunnel */ + NGBE_UKN(0x20), + NGBE_PTT(0x21, IP, FGV4, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x22, IP, IPV4, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x23, IP, IPV4, NONE, NONE, UDP, PAY4), + NGBE_PTT(0x24, IP, IPV4, NONE, NONE, TCP, PAY4), + NGBE_PTT(0x25, IP, IPV4, NONE, NONE, SCTP, PAY4), + NGBE_UKN(0x26), + NGBE_UKN(0x27), + NGBE_UKN(0x28), + NGBE_PTT(0x29, IP, FGV6, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x2A, IP, IPV6, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x2B, IP, IPV6, NONE, NONE, UDP, PAY3), + NGBE_PTT(0x2C, IP, IPV6, NONE, NONE, TCP, PAY4), + NGBE_PTT(0x2D, IP, IPV6, NONE, NONE, SCTP, PAY4), + NGBE_UKN(0x2E), + NGBE_UKN(0x2F), + + /* L2: fcoe */ + NGBE_PTT(0x30, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x31, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x32, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x33, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x34, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_UKN(0x35), + NGBE_UKN(0x36), + NGBE_UKN(0x37), + NGBE_PTT(0x38, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x39, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x3A, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x3B, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_PTT(0x3C, FCOE, NONE, NONE, NONE, NONE, PAY3), + NGBE_UKN(0x3D), + NGBE_UKN(0x3E), + NGBE_UKN(0x3F), + + NGBE_UKN(0x40), + NGBE_UKN(0x41), + NGBE_UKN(0x42), + NGBE_UKN(0x43), + NGBE_UKN(0x44), + NGBE_UKN(0x45), + NGBE_UKN(0x46), + NGBE_UKN(0x47), + NGBE_UKN(0x48), + NGBE_UKN(0x49), + NGBE_UKN(0x4A), + NGBE_UKN(0x4B), + NGBE_UKN(0x4C), + NGBE_UKN(0x4D), + NGBE_UKN(0x4E), + NGBE_UKN(0x4F), + NGBE_UKN(0x50), + NGBE_UKN(0x51), + NGBE_UKN(0x52), + NGBE_UKN(0x53), + NGBE_UKN(0x54), + NGBE_UKN(0x55), + NGBE_UKN(0x56), + NGBE_UKN(0x57), + NGBE_UKN(0x58), + NGBE_UKN(0x59), + NGBE_UKN(0x5A), + NGBE_UKN(0x5B), + NGBE_UKN(0x5C), + NGBE_UKN(0x5D), + NGBE_UKN(0x5E), + NGBE_UKN(0x5F), + NGBE_UKN(0x60), + NGBE_UKN(0x61), + NGBE_UKN(0x62), + NGBE_UKN(0x63), + NGBE_UKN(0x64), + NGBE_UKN(0x65), + NGBE_UKN(0x66), + NGBE_UKN(0x67), + NGBE_UKN(0x68), + NGBE_UKN(0x69), + NGBE_UKN(0x6A), + NGBE_UKN(0x6B), + NGBE_UKN(0x6C), + NGBE_UKN(0x6D), + NGBE_UKN(0x6E), + NGBE_UKN(0x6F), + NGBE_UKN(0x70), + NGBE_UKN(0x71), + NGBE_UKN(0x72), + NGBE_UKN(0x73), + NGBE_UKN(0x74), + NGBE_UKN(0x75), + NGBE_UKN(0x76), + NGBE_UKN(0x77), + NGBE_UKN(0x78), + NGBE_UKN(0x79), + NGBE_UKN(0x7A), + NGBE_UKN(0x7B), + NGBE_UKN(0x7C), + NGBE_UKN(0x7D), + NGBE_UKN(0x7E), + NGBE_UKN(0x7F), + + /* IPv4 --> IPv4/IPv6 */ + NGBE_UKN(0x80), + NGBE_PTT(0x81, IP, IPV4, IPIP, FGV4, NONE, PAY3), + NGBE_PTT(0x82, IP, IPV4, IPIP, IPV4, NONE, PAY3), + NGBE_PTT(0x83, IP, IPV4, IPIP, IPV4, UDP, PAY4), + NGBE_PTT(0x84, IP, IPV4, IPIP, IPV4, TCP, PAY4), + NGBE_PTT(0x85, IP, IPV4, IPIP, IPV4, SCTP, PAY4), + NGBE_UKN(0x86), + NGBE_UKN(0x87), + NGBE_UKN(0x88), + NGBE_PTT(0x89, IP, IPV4, IPIP, FGV6, NONE, PAY3), + NGBE_PTT(0x8A, IP, IPV4, IPIP, IPV6, NONE, PAY3), + NGBE_PTT(0x8B, IP, IPV4, IPIP, IPV6, UDP, PAY4), + NGBE_PTT(0x8C, IP, IPV4, IPIP, IPV6, TCP, PAY4), + NGBE_PTT(0x8D, IP, IPV4, IPIP, IPV6, SCTP, PAY4), + NGBE_UKN(0x8E), + NGBE_UKN(0x8F), + + /* IPv4 --> GRE/NAT --> NONE/IPv4/IPv6 */ + NGBE_PTT(0x90, IP, IPV4, IG, NONE, NONE, PAY3), + NGBE_PTT(0x91, IP, IPV4, IG, FGV4, NONE, PAY3), + NGBE_PTT(0x92, IP, IPV4, IG, IPV4, NONE, PAY3), + NGBE_PTT(0x93, IP, IPV4, IG, IPV4, UDP, PAY4), + NGBE_PTT(0x94, IP, IPV4, IG, IPV4, TCP, PAY4), + NGBE_PTT(0x95, IP, IPV4, IG, IPV4, SCTP, PAY4), + NGBE_UKN(0x96), + NGBE_UKN(0x97), + NGBE_UKN(0x98), + NGBE_PTT(0x99, IP, IPV4, IG, FGV6, NONE, PAY3), + NGBE_PTT(0x9A, IP, IPV4, IG, IPV6, NONE, PAY3), + NGBE_PTT(0x9B, IP, IPV4, IG, IPV6, UDP, PAY4), + NGBE_PTT(0x9C, IP, IPV4, IG, IPV6, TCP, PAY4), + NGBE_PTT(0x9D, IP, IPV4, IG, IPV6, SCTP, PAY4), + NGBE_UKN(0x9E), + NGBE_UKN(0x9F), + + /* IPv4 --> GRE/NAT --> MAC --> NONE/IPv4/IPv6 */ + NGBE_PTT(0xA0, IP, IPV4, IGM, NONE, NONE, PAY3), + NGBE_PTT(0xA1, IP, IPV4, IGM, FGV4, NONE, PAY3), + NGBE_PTT(0xA2, IP, IPV4, IGM, IPV4, NONE, PAY3), + NGBE_PTT(0xA3, IP, IPV4, IGM, IPV4, UDP, PAY4), + NGBE_PTT(0xA4, IP, IPV4, IGM, IPV4, TCP, PAY4), + NGBE_PTT(0xA5, IP, IPV4, IGM, IPV4, SCTP, PAY4), + NGBE_UKN(0xA6), + NGBE_UKN(0xA7), + NGBE_UKN(0xA8), + NGBE_PTT(0xA9, IP, IPV4, IGM, FGV6, NONE, PAY3), + NGBE_PTT(0xAA, IP, IPV4, IGM, IPV6, NONE, PAY3), + NGBE_PTT(0xAB, IP, IPV4, IGM, IPV6, UDP, PAY4), + NGBE_PTT(0xAC, IP, IPV4, IGM, IPV6, TCP, PAY4), + NGBE_PTT(0xAD, IP, IPV4, IGM, IPV6, SCTP, PAY4), + NGBE_UKN(0xAE), + NGBE_UKN(0xAF), + + /* IPv4 --> GRE/NAT --> MAC+VLAN --> NONE/IPv4/IPv6 */ + NGBE_PTT(0xB0, IP, IPV4, IGMV, NONE, NONE, PAY3), + NGBE_PTT(0xB1, IP, IPV4, IGMV, FGV4, NONE, PAY3), + NGBE_PTT(0xB2, IP, IPV4, IGMV, IPV4, NONE, PAY3), + NGBE_PTT(0xB3, IP, IPV4, IGMV, IPV4, UDP, PAY4), + NGBE_PTT(0xB4, IP, IPV4, IGMV, IPV4, TCP, PAY4), + NGBE_PTT(0xB5, IP, IPV4, IGMV, IPV4, SCTP, PAY4), + NGBE_UKN(0xB6), + NGBE_UKN(0xB7), + NGBE_UKN(0xB8), + NGBE_PTT(0xB9, IP, IPV4, IGMV, FGV6, NONE, PAY3), + NGBE_PTT(0xBA, IP, IPV4, IGMV, IPV6, NONE, PAY3), + NGBE_PTT(0xBB, IP, IPV4, IGMV, IPV6, UDP, PAY4), + NGBE_PTT(0xBC, IP, IPV4, IGMV, IPV6, TCP, PAY4), + NGBE_PTT(0xBD, IP, IPV4, IGMV, IPV6, SCTP, PAY4), + NGBE_UKN(0xBE), + NGBE_UKN(0xBF), + + /* IPv6 --> IPv4/IPv6 */ + NGBE_UKN(0xC0), + NGBE_PTT(0xC1, IP, IPV6, IPIP, FGV4, NONE, PAY3), + NGBE_PTT(0xC2, IP, IPV6, IPIP, IPV4, NONE, PAY3), + NGBE_PTT(0xC3, IP, IPV6, IPIP, IPV4, UDP, PAY4), + NGBE_PTT(0xC4, IP, IPV6, IPIP, IPV4, TCP, PAY4), + NGBE_PTT(0xC5, IP, IPV6, IPIP, IPV4, SCTP, PAY4), + NGBE_UKN(0xC6), + NGBE_UKN(0xC7), + NGBE_UKN(0xC8), + NGBE_PTT(0xC9, IP, IPV6, IPIP, FGV6, NONE, PAY3), + NGBE_PTT(0xCA, IP, IPV6, IPIP, IPV6, NONE, PAY3), + NGBE_PTT(0xCB, IP, IPV6, IPIP, IPV6, UDP, PAY4), + NGBE_PTT(0xCC, IP, IPV6, IPIP, IPV6, TCP, PAY4), + NGBE_PTT(0xCD, IP, IPV6, IPIP, IPV6, SCTP, PAY4), + NGBE_UKN(0xCE), + NGBE_UKN(0xCF), + + /* IPv6 --> GRE/NAT -> NONE/IPv4/IPv6 */ + NGBE_PTT(0xD0, IP, IPV6, IG, NONE, NONE, PAY3), + NGBE_PTT(0xD1, IP, IPV6, IG, FGV4, NONE, PAY3), + NGBE_PTT(0xD2, IP, IPV6, IG, IPV4, NONE, PAY3), + NGBE_PTT(0xD3, IP, IPV6, IG, IPV4, UDP, PAY4), + NGBE_PTT(0xD4, IP, IPV6, IG, IPV4, TCP, PAY4), + NGBE_PTT(0xD5, IP, IPV6, IG, IPV4, SCTP, PAY4), + NGBE_UKN(0xD6), + NGBE_UKN(0xD7), + NGBE_UKN(0xD8), + NGBE_PTT(0xD9, IP, IPV6, IG, FGV6, NONE, PAY3), + NGBE_PTT(0xDA, IP, IPV6, IG, IPV6, NONE, PAY3), + NGBE_PTT(0xDB, IP, IPV6, IG, IPV6, UDP, PAY4), + NGBE_PTT(0xDC, IP, IPV6, IG, IPV6, TCP, PAY4), + NGBE_PTT(0xDD, IP, IPV6, IG, IPV6, SCTP, PAY4), + NGBE_UKN(0xDE), + NGBE_UKN(0xDF), + + /* IPv6 --> GRE/NAT -> MAC -> NONE/IPv4/IPv6 */ + NGBE_PTT(0xE0, IP, IPV6, IGM, NONE, NONE, PAY3), + NGBE_PTT(0xE1, IP, IPV6, IGM, FGV4, NONE, PAY3), + NGBE_PTT(0xE2, IP, IPV6, IGM, IPV4, NONE, PAY3), + NGBE_PTT(0xE3, IP, IPV6, IGM, IPV4, UDP, PAY4), + NGBE_PTT(0xE4, IP, IPV6, IGM, IPV4, TCP, PAY4), + NGBE_PTT(0xE5, IP, IPV6, IGM, IPV4, SCTP, PAY4), + NGBE_UKN(0xE6), + NGBE_UKN(0xE7), + NGBE_UKN(0xE8), + NGBE_PTT(0xE9, IP, IPV6, IGM, FGV6, NONE, PAY3), + NGBE_PTT(0xEA, IP, IPV6, IGM, IPV6, NONE, PAY3), + NGBE_PTT(0xEB, IP, IPV6, IGM, IPV6, UDP, PAY4), + NGBE_PTT(0xEC, IP, IPV6, IGM, IPV6, TCP, PAY4), + NGBE_PTT(0xED, IP, IPV6, IGM, IPV6, SCTP, PAY4), + NGBE_UKN(0xEE), + NGBE_UKN(0xEF), + + /* IPv6 --> GRE/NAT -> MAC--> NONE/IPv */ + NGBE_PTT(0xF0, IP, IPV6, IGMV, NONE, NONE, PAY3), + NGBE_PTT(0xF1, IP, IPV6, IGMV, FGV4, NONE, PAY3), + NGBE_PTT(0xF2, IP, IPV6, IGMV, IPV4, NONE, PAY3), + NGBE_PTT(0xF3, IP, IPV6, IGMV, IPV4, UDP, PAY4), + NGBE_PTT(0xF4, IP, IPV6, IGMV, IPV4, TCP, PAY4), + NGBE_PTT(0xF5, IP, IPV6, IGMV, IPV4, SCTP, PAY4), + NGBE_UKN(0xF6), + NGBE_UKN(0xF7), + NGBE_UKN(0xF8), + NGBE_PTT(0xF9, IP, IPV6, IGMV, FGV6, NONE, PAY3), + NGBE_PTT(0xFA, IP, IPV6, IGMV, IPV6, NONE, PAY3), + NGBE_PTT(0xFB, IP, IPV6, IGMV, IPV6, UDP, PAY4), + NGBE_PTT(0xFC, IP, IPV6, IGMV, IPV6, TCP, PAY4), + NGBE_PTT(0xFD, IP, IPV6, IGMV, IPV6, SCTP, PAY4), + NGBE_UKN(0xFE), + NGBE_UKN(0xFF), +}; + + +void ngbe_init_mac_link_ops(struct ngbe_hw *hw) +{ + struct ngbe_mac_info *mac = &hw->mac; + + DEBUGFUNC("\n"); + + mac->ops.setup_link = ngbe_setup_mac_link; +} + +/** + * ngbe_init_ops - Inits func ptrs and MAC type + * @hw: pointer to hardware structure + * + * Initialize the function pointers and assign the MAC type for emerald. + * Does not touch the hardware. + **/ + +s32 ngbe_init_ops(struct ngbe_hw *hw) +{ + struct ngbe_mac_info *mac = &hw->mac; + struct ngbe_phy_info *phy = &hw->phy; + + DEBUGFUNC("ngbe_init_ops"); + + ngbe_init_phy_ops_common(hw); + ngbe_init_ops_common(hw); + + if (hw->phy.type == ngbe_phy_m88e1512 || + hw->phy.type == ngbe_phy_m88e1512_sfi) { + phy->ops.read_reg_mdi = ngbe_phy_read_reg_mdi; + phy->ops.write_reg_mdi = ngbe_phy_write_reg_mdi; + phy->ops.setup_link = ngbe_phy_setup_link_m88e1512; + phy->ops.reset = ngbe_phy_reset_m88e1512; + phy->ops.check_event = ngbe_phy_check_event_m88e1512; + phy->ops.get_adv_pause = ngbe_phy_get_advertised_pause_m88e1512; + phy->ops.get_lp_adv_pause = ngbe_phy_get_lp_advertised_pause_m88e1512; + phy->ops.set_adv_pause = ngbe_phy_set_pause_advertisement_m88e1512; + + mac->ops.check_link = ngbe_check_mac_link_mdi; + } else if (hw->phy.type == ngbe_phy_yt8521s_sfi) { + phy->ops.read_reg_mdi = ngbe_phy_read_reg_mdi; + phy->ops.write_reg_mdi = ngbe_phy_write_reg_mdi; + phy->ops.setup_link = ngbe_phy_setup_link_yt8521s; + phy->ops.reset = ngbe_phy_reset_yt8521s; + phy->ops.check_event = ngbe_phy_check_event_yt8521s; + phy->ops.get_adv_pause = ngbe_phy_get_advertised_pause_yt8521s; + phy->ops.get_lp_adv_pause = ngbe_phy_get_lp_advertised_pause_yt8521s; + phy->ops.set_adv_pause = ngbe_phy_set_pause_advertisement_yt8521s; + + mac->ops.check_link = ngbe_check_mac_link_yt8521s; + } + + return NGBE_OK; +} + +/** + * ngbe_get_link_capabilities - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: true when autoneg or autotry is enabled + * + * Determines the link capabilities by reading the AUTOC register. + **/ +s32 ngbe_get_link_capabilities(struct ngbe_hw *hw, + u32 *speed, + bool *autoneg) +{ + s32 status = 0; + + DEBUGFUNC("\n"); + + if (hw->device_id == NGBE_DEV_ID_EM_TEST || + hw->device_id == NGBE_DEV_ID_EM_WX1860A2 || + hw->device_id == NGBE_DEV_ID_EM_WX1860A2S || + hw->device_id == NGBE_DEV_ID_EM_WX1860A4 || + hw->device_id == NGBE_DEV_ID_EM_WX1860A4S || + hw->device_id == NGBE_DEV_ID_EM_WX1860AL2 || + hw->device_id == NGBE_DEV_ID_EM_WX1860AL2S || + hw->device_id == NGBE_DEV_ID_EM_WX1860AL4 || + hw->device_id == NGBE_DEV_ID_EM_WX1860AL4S || + hw->device_id == NGBE_DEV_ID_EM_WX1860AL_W || + hw->device_id == NGBE_DEV_ID_EM_WX1860A1 || + hw->device_id == NGBE_DEV_ID_EM_WX1860AL1 || + + hw->device_id == 0x10c || + hw->device_id == NGBE_DEV_ID_EM_WX1860NCSI) { + *speed = NGBE_LINK_SPEED_1GB_FULL | + NGBE_LINK_SPEED_100_FULL | + NGBE_LINK_SPEED_10_FULL; + *autoneg = false; + hw->phy.link_mode = NGBE_PHYSICAL_LAYER_1000BASE_T | + NGBE_PHYSICAL_LAYER_100BASE_TX; + } + + return status; +} + +/** + * ngbe_get_copper_link_capabilities - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: boolean auto-negotiation value + * + * Determines the supported link capabilities by reading the PHY auto + * negotiation register. +**/ +s32 ngbe_get_copper_link_capabilities(struct ngbe_hw *hw, + u32 *speed, + bool *autoneg) +{ + s32 status = 0; + + DEBUGFUNC("\n"); + + *speed = 0; + + if (hw->mac.autoneg) + *autoneg = true; + else + *autoneg = false; + + if (status == 0) { + *speed = NGBE_LINK_SPEED_10_FULL | + NGBE_LINK_SPEED_100_FULL | + NGBE_LINK_SPEED_1GB_FULL; + } + + return status; +} + +/** + * ngbe_get_media_type - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + **/ +enum ngbe_media_type ngbe_get_media_type(struct ngbe_hw *hw) +{ + enum ngbe_media_type media_type; + + DEBUGFUNC("\n"); + + ERROR_REPORT1(NGBE_ERROR_ARGUMENT, + "ngbe_get_media_type: hw->device_id = %u/n", hw->device_id); + + media_type = ngbe_media_type_copper; + + return media_type; +} + +/** + * ngbe_stop_mac_link_on_d3 - Disables link on D3 + * @hw: pointer to hardware structure + * + * Disables link during D3 power down sequence. + * + **/ +void ngbe_stop_mac_link_on_d3(struct ngbe_hw *hw) +{ + UNREFERENCED_PARAMETER(hw); + return; +} + +/** + * ngbe_setup_mac_link - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the AUTOC register and restarts link. + **/ +s32 ngbe_setup_mac_link(struct ngbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete) +{ + bool autoneg = false; + s32 status = 0; + u32 link_capabilities = NGBE_LINK_SPEED_UNKNOWN; + u32 link_speed = NGBE_LINK_SPEED_UNKNOWN; + u32 lan_speed = 0; + bool link_up = false; + + UNREFERENCED_PARAMETER(autoneg_wait_to_complete); + DEBUGFUNC("\n"); + + if (!(((hw->subsystem_device_id & OEM_MASK) == OCP_CARD) || + ((hw->subsystem_device_id & WOL_SUP_MASK) == WOL_SUP) || + ((hw->subsystem_device_id & NCSI_SUP_MASK) == NCSI_SUP))) { + /* Check to see if speed passed in is supported. */ + status = TCALL(hw, mac.ops.get_link_capabilities, + &link_capabilities, &autoneg); + if (status) + goto out; + + speed &= link_capabilities; + + if (speed == NGBE_LINK_SPEED_UNKNOWN) { + status = NGBE_ERR_LINK_SETUP; + goto out; + } + } + + status = TCALL(hw, mac.ops.check_link, + &link_speed, &link_up, false); + if (status != 0) + goto out; + if ((link_speed == speed) && link_up) { + switch (link_speed) { + case NGBE_LINK_SPEED_100_FULL: + lan_speed = 1; + break; + case NGBE_LINK_SPEED_1GB_FULL: + lan_speed = 2; + break; + case NGBE_LINK_SPEED_10_FULL: + lan_speed = 0; + break; + default: + break; + } + wr32m(hw, NGBE_CFG_LAN_SPEED, + 0x3, lan_speed); + } + +out: + return status; +} + + +/** + * ngbe_setup_copper_link - Set the PHY autoneg advertised field + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true if waiting is needed to complete + * + * Restarts link on PHY and MAC based on settings passed in. + **/ +STATIC s32 ngbe_setup_copper_link(struct ngbe_hw *hw, + u32 speed, + bool need_restart_AN) +{ + s32 status; + struct ngbe_adapter *adapter = hw->back; + + DEBUGFUNC("\n"); + + /* Setup the PHY according to input speed */ + status = TCALL(hw, phy.ops.setup_link, speed, + need_restart_AN); + + adapter->flags |= NGBE_FLAG_NEED_ANC_CHECK; + + return status; +} + +int ngbe_reset_misc(struct ngbe_hw *hw) +{ + int i; + + /* receive packets that size > 2048 */ + wr32m(hw, NGBE_MAC_RX_CFG, + NGBE_MAC_RX_CFG_JE, NGBE_MAC_RX_CFG_JE); + + /* clear counters on read */ + wr32m(hw, NGBE_MMC_CONTROL, + NGBE_MMC_CONTROL_RSTONRD, NGBE_MMC_CONTROL_RSTONRD); + + wr32m(hw, NGBE_MAC_RX_FLOW_CTRL, + NGBE_MAC_RX_FLOW_CTRL_RFE, NGBE_MAC_RX_FLOW_CTRL_RFE); + + wr32(hw, NGBE_MAC_PKT_FLT, + NGBE_MAC_PKT_FLT_PR); + + wr32m(hw, NGBE_MIS_RST_ST, + NGBE_MIS_RST_ST_RST_INIT, 0x1E00); + + /* errata 4: initialize mng flex tbl and wakeup flex tbl*/ + wr32(hw, NGBE_PSR_MNG_FLEX_SEL, 0); + for (i = 0; i < 16; i++) { + wr32(hw, NGBE_PSR_MNG_FLEX_DW_L(i), 0); + wr32(hw, NGBE_PSR_MNG_FLEX_DW_H(i), 0); + wr32(hw, NGBE_PSR_MNG_FLEX_MSK(i), 0); + } + wr32(hw, NGBE_PSR_LAN_FLEX_SEL, 0); + for (i = 0; i < 16; i++) { + wr32(hw, NGBE_PSR_LAN_FLEX_DW_L(i), 0); + wr32(hw, NGBE_PSR_LAN_FLEX_DW_H(i), 0); + wr32(hw, NGBE_PSR_LAN_FLEX_MSK(i), 0); + } + + /* set pause frame dst mac addr */ + wr32(hw, NGBE_RDB_PFCMACDAL, 0xC2000001); + wr32(hw, NGBE_RDB_PFCMACDAH, 0x0180); + + wr32(hw, NGBE_MDIO_CLAUSE_SELECT, 0xF); + + if (((hw->subsystem_device_id & OEM_MASK) == LY_M88E1512_SFP) || + (hw->subsystem_device_id & OEM_MASK) == LY_YT8521S_SFP) { + /* gpio0 is used to power on/off control*/ + wr32(hw, NGBE_GPIO_DDR, 0x1); + wr32(hw, NGBE_GPIO_DR, NGBE_GPIO_DR_0); + } + + ngbe_init_thermal_sensor_thresh(hw); + + return 0; +} + +/** + * ngbe_reset_hw - Perform hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks + * and clears all interrupts, perform a PHY reset, and perform a link (MAC) + * reset. + **/ +s32 ngbe_reset_hw(struct ngbe_hw *hw) +{ + s32 status; + u32 reset = 0; + u32 i; + struct ngbe_mac_info *mac = &hw->mac; + + u32 sr_pcs_ctl = 0, sr_pma_mmd_ctl1 = 0, sr_an_mmd_ctl = 0; + u32 sr_an_mmd_adv_reg2 = 0; + u32 vr_xs_or_pcs_mmd_digi_ctl1 = 0, curr_vr_xs_or_pcs_mmd_digi_ctl1 = 0; + u32 curr_sr_pcs_ctl = 0, curr_sr_pma_mmd_ctl1 = 0; + u32 curr_sr_an_mmd_ctl = 0, curr_sr_an_mmd_adv_reg2 = 0; + + u32 reset_status = 0; + u32 rst_delay = 0; + + struct ngbe_adapter *adapter = NULL; + + DEBUGFUNC("\n"); + + /* Call adapter stop to disable tx/rx and clear interrupts */ + status = TCALL(hw, mac.ops.stop_adapter); + if (status != 0) + goto reset_hw_out; + + /* Identify PHY and related function pointers */ + status = TCALL(hw, phy.ops.init); + + if (status) + goto reset_hw_out; + + if (ngbe_get_media_type(hw) == ngbe_media_type_copper) { + mac->ops.setup_link = ngbe_setup_copper_link; + mac->ops.get_link_capabilities = + ngbe_get_copper_link_capabilities; + } + + /* + * Issue global reset to the MAC. Needs to be SW reset if link is up. + * If link reset is used when link is up, it might reset the PHY when + * mng is using it. If link is down or the flag to force full link + * reset is set, then perform link reset. + */ + if (hw->force_full_reset) { + rst_delay = (rd32(hw, NGBE_MIS_RST_ST) & + NGBE_MIS_RST_ST_RST_INIT) >> + NGBE_MIS_RST_ST_RST_INI_SHIFT; + if (hw->reset_type == NGBE_SW_RESET) { + for (i = 0; i < rst_delay + 20; i++) { + reset_status = + rd32(hw, NGBE_MIS_RST_ST); + if (!(reset_status & + NGBE_MIS_RST_ST_DEV_RST_ST_MASK)) + break; + msleep(100); + } + + if (reset_status & NGBE_MIS_RST_ST_DEV_RST_ST_MASK) { + status = NGBE_ERR_RESET_FAILED; + DEBUGOUT("software reset polling failed to " + "complete.\n"); + goto reset_hw_out; + } + status = ngbe_check_flash_load(hw, + NGBE_SPI_ILDR_STATUS_SW_RESET); + if (status != 0) + goto reset_hw_out; + + } else if (hw->reset_type == NGBE_GLOBAL_RESET) { + adapter = (struct ngbe_adapter *)hw->back; + msleep(100 * rst_delay + 2000); + pci_restore_state(adapter->pdev); + pci_save_state(adapter->pdev); + pci_wake_from_d3(adapter->pdev, false); + } + } else { + + if (hw->bus.lan_id == 0) { + reset = NGBE_MIS_RST_LAN0_RST; + } else if (hw->bus.lan_id == 1) { + reset = NGBE_MIS_RST_LAN1_RST; + } else if (hw->bus.lan_id == 2) { + reset = NGBE_MIS_RST_LAN2_RST; + } else if (hw->bus.lan_id == 3) { + reset = NGBE_MIS_RST_LAN3_RST; + } + + wr32(hw, NGBE_MIS_RST, + reset | rd32(hw, NGBE_MIS_RST)); + NGBE_WRITE_FLUSH(hw); + + msleep(15); + } + + status = ngbe_reset_misc(hw); + if (status != 0) + goto reset_hw_out; + + if (hw->mac.orig_link_settings_stored == false) { + hw->mac.orig_sr_pcs_ctl2 = sr_pcs_ctl; + hw->mac.orig_sr_pma_mmd_ctl1 = sr_pma_mmd_ctl1; + hw->mac.orig_sr_an_mmd_ctl = sr_an_mmd_ctl; + hw->mac.orig_sr_an_mmd_adv_reg2 = sr_an_mmd_adv_reg2; + hw->mac.orig_vr_xs_or_pcs_mmd_digi_ctl1 = + vr_xs_or_pcs_mmd_digi_ctl1; + hw->mac.orig_link_settings_stored = true; + } else { + /* If MNG FW is running on a multi-speed device that + * doesn't autoneg with out driver support we need to + * leave LMS in the state it was before we MAC reset. + * Likewise if we support WoL we don't want change the + * LMS state. + */ + hw->mac.orig_sr_pcs_ctl2 = curr_sr_pcs_ctl; + hw->mac.orig_sr_pma_mmd_ctl1 = curr_sr_pma_mmd_ctl1; + hw->mac.orig_sr_an_mmd_ctl = curr_sr_an_mmd_ctl; + hw->mac.orig_sr_an_mmd_adv_reg2 = + curr_sr_an_mmd_adv_reg2; + hw->mac.orig_vr_xs_or_pcs_mmd_digi_ctl1 = + curr_vr_xs_or_pcs_mmd_digi_ctl1; + } + + /* Store the permanent mac address */ + TCALL(hw, mac.ops.get_mac_addr, hw->mac.perm_addr); + + /* + * Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ + hw->mac.num_rar_entries = NGBE_SP_RAR_ENTRIES; + TCALL(hw, mac.ops.init_rx_addrs); + + pci_set_master(((struct ngbe_adapter *)hw->back)->pdev); + +reset_hw_out: + return status; +} + +/* + * These defines allow us to quickly generate all of the necessary instructions + * in the function below by simply calling out NGBE_COMPUTE_SIG_HASH_ITERATION + * for values 0 through 15 + */ +#define NGBE_ATR_COMMON_HASH_KEY \ + (NGBE_ATR_BUCKET_HASH_KEY & NGBE_ATR_SIGNATURE_HASH_KEY) +#define NGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ +do { \ + u32 n = (_n); \ + if (NGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ + common_hash ^= lo_hash_dword >> n; \ + else if (NGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ + bucket_hash ^= lo_hash_dword >> n; \ + else if (NGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ + sig_hash ^= lo_hash_dword << (16 - n); \ + if (NGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ + common_hash ^= hi_hash_dword >> n; \ + else if (NGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ + bucket_hash ^= hi_hash_dword >> n; \ + else if (NGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ + sig_hash ^= hi_hash_dword << (16 - n); \ +} while (0) + +#define NGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ +do { \ + u32 n = (_n); \ + if (NGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ + bucket_hash ^= lo_hash_dword >> n; \ + if (NGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ + bucket_hash ^= hi_hash_dword >> n; \ +} while (0) + +/* + * These two macros are meant to address the fact that we have registers + * that are either all or in part big-endian. As a result on big-endian + * systems we will end up byte swapping the value to little-endian before + * it is byte swapped again and written to the hardware in the original + * big-endian format. + */ +#define NGBE_STORE_AS_BE32(_value) \ + (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \ + (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24)) + +#define NGBE_WRITE_REG_BE32(a, reg, value) \ + wr32((a), (reg), NGBE_STORE_AS_BE32(NGBE_NTOHL(value))) + +#define NGBE_STORE_AS_BE16(_value) \ + NGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8)) + +/** + * ngbe_start_hw - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware using the generic start_hw function + * and the generation start_hw function. + * Then performs revision-specific operations, if any. + **/ +s32 ngbe_start_hw(struct ngbe_hw *hw) +{ + int ret_val = 0; + + DEBUGFUNC("\n"); + + /* Set the media type */ + hw->phy.media_type = TCALL(hw, mac.ops.get_media_type); + + /* PHY ops initialization must be done in reset_hw() */ + + /* Clear the VLAN filter table */ + TCALL(hw, mac.ops.clear_vfta); + + /* Clear statistics registers */ + TCALL(hw, mac.ops.clear_hw_cntrs); + + NGBE_WRITE_FLUSH(hw); + + /* Setup flow control */ + ret_val = TCALL(hw, mac.ops.setup_fc); + + /* Clear adapter stopped flag */ + hw->adapter_stopped = false; + + /* We need to run link autotry after the driver loads */ + hw->mac.autotry_restart = true; + + return ret_val; +} + +/** + * ngbe_enable_rx_dma - Enable the Rx DMA unit on emerald + * @hw: pointer to hardware structure + * @regval: register value to write to RXCTRL + * + * Enables the Rx DMA unit for emerald + **/ +s32 ngbe_enable_rx_dma(struct ngbe_hw *hw, u32 regval) +{ + DEBUGFUNC("\n"); + + /* + * Workaround for emerald silicon errata when enabling the Rx datapath. + * If traffic is incoming before we enable the Rx unit, it could hang + * the Rx DMA unit. Therefore, make sure the security engine is + * completely disabled prior to enabling the Rx unit. + */ + + TCALL(hw, mac.ops.disable_sec_rx_path); + + if (regval & NGBE_RDB_PB_CTL_PBEN) + TCALL(hw, mac.ops.enable_rx); + else + TCALL(hw, mac.ops.disable_rx); + + TCALL(hw, mac.ops.enable_sec_rx_path); + + return 0; +} + +/** + * ngbe_init_flash_params - Initialize flash params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ngbe_eeprom_info within the + * ngbe_hw struct in order to set up EEPROM access. + **/ +s32 ngbe_init_flash_params(struct ngbe_hw *hw) +{ + struct ngbe_flash_info *flash = &hw->flash; + u32 eec; + + DEBUGFUNC("\n"); + + eec = 0x1000000; + flash->semaphore_delay = 10; + flash->dword_size = (eec >> 2); + flash->address_bits = 24; + DEBUGOUT3("FLASH params: size = %d, address bits: %d\n", + flash->dword_size, + flash->address_bits); + + return 0; +} + +/** + * ngbe_read_flash_buffer - Read FLASH dword(s) using + * fastest available method + * + * @hw: pointer to hardware structure + * @offset: offset of dword in EEPROM to read + * @dwords: number of dwords + * @data: dword(s) read from the EEPROM + * + * Retrieves 32 bit dword(s) read from EEPROM + **/ +s32 ngbe_read_flash_buffer(struct ngbe_hw *hw, u32 offset, + u32 dwords, u32 *data) +{ + s32 status = 0; + u32 i; + + DEBUGFUNC("\n"); + + TCALL(hw, eeprom.ops.init_params); + + if (!dwords || offset + dwords >= hw->flash.dword_size) { + status = NGBE_ERR_INVALID_ARGUMENT; + ERROR_REPORT1(NGBE_ERROR_ARGUMENT, "Invalid FLASH arguments"); + return status; + } + + for (i = 0; i < dwords; i++) { + wr32(hw, NGBE_SPI_DATA, data[i]); + wr32(hw, NGBE_SPI_CMD, + NGBE_SPI_CMD_ADDR(offset + i) | + NGBE_SPI_CMD_CMD(0x0)); + + status = po32m(hw, NGBE_SPI_STATUS, + NGBE_SPI_STATUS_OPDONE, NGBE_SPI_STATUS_OPDONE, + NGBE_SPI_TIMEOUT, 0); + if (status) { + DEBUGOUT("FLASH read timed out\n"); + break; + } + } + + return status; +} + +/** + * ngbe_write_flash_buffer - Write FLASH dword(s) using + * fastest available method + * + * @hw: pointer to hardware structure + * @offset: offset of dword in EEPROM to write + * @dwords: number of dwords + * @data: dword(s) write from to EEPROM + * + **/ +s32 ngbe_write_flash_buffer(struct ngbe_hw *hw, u32 offset, + u32 dwords, u32 *data) +{ + s32 status = 0; + u32 i; + + DEBUGFUNC("\n"); + + TCALL(hw, eeprom.ops.init_params); + + if (!dwords || offset + dwords >= hw->flash.dword_size) { + status = NGBE_ERR_INVALID_ARGUMENT; + ERROR_REPORT1(NGBE_ERROR_ARGUMENT, "Invalid FLASH arguments"); + return status; + } + + for (i = 0; i < dwords; i++) { + wr32(hw, NGBE_SPI_CMD, + NGBE_SPI_CMD_ADDR(offset + i) | + NGBE_SPI_CMD_CMD(0x1)); + + status = po32m(hw, NGBE_SPI_STATUS, + NGBE_SPI_STATUS_OPDONE, NGBE_SPI_STATUS_OPDONE, + NGBE_SPI_TIMEOUT, 0); + if (status != 0) { + DEBUGOUT("FLASH write timed out\n"); + break; + } + data[i] = rd32(hw, NGBE_SPI_DATA); + } + + return status; +} + +/** + * ngbe_init_eeprom_params - Initialize EEPROM params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ngbe_eeprom_info within the + * ngbe_hw struct in order to set up EEPROM access. + **/ +s32 ngbe_init_eeprom_params(struct ngbe_hw *hw) +{ + struct ngbe_eeprom_info *eeprom = &hw->eeprom; + u16 eeprom_size; + s32 status = 0; + + DEBUGFUNC("\n"); + + if (eeprom->type == ngbe_eeprom_uninitialized) { + eeprom->semaphore_delay = 10; + eeprom->type = ngbe_eeprom_none; + + if (!(rd32(hw, NGBE_SPI_STATUS) & + NGBE_SPI_STATUS_FLASH_BYPASS)) { + eeprom->type = ngbe_flash; + eeprom_size = 4096; + eeprom->word_size = eeprom_size >> 1; + + DEBUGOUT2("Eeprom params: type = %d, size = %d\n", + eeprom->type, eeprom->word_size); + } + } + + eeprom->sw_region_offset = 0x80; + + return status; +} + +/** + * ngbe_read_ee_hostif - Read EEPROM word using a host interface cmd + * assuming that the semaphore is already obtained. + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the hostif. + **/ +s32 ngbe_read_ee_hostif_data(struct ngbe_hw *hw, u16 offset, + u16 *data) +{ + s32 status; + struct ngbe_hic_read_shadow_ram buffer; + + DEBUGFUNC("\n"); + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = NGBE_CPU_TO_BE32(offset * 2); + /* one word */ + buffer.length = NGBE_CPU_TO_BE16(sizeof(u16)); + + status = ngbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + NGBE_HI_COMMAND_TIMEOUT, false); + + if (status) + return status; + if (ngbe_check_mng_access(hw)) + *data = (u16)rd32a(hw, NGBE_MNG_MBOX, + FW_NVM_DATA_OFFSET); + else { + status = NGBE_ERR_MNG_ACCESS_FAILED; + return status; + } + + return 0; +} + +s32 ngbe_eepromcheck_cap(struct ngbe_hw *hw, u16 offset, + u32 *data) +{ + int tmp; + s32 status; + struct ngbe_hic_read_shadow_ram buffer; + + DEBUGFUNC("\n"); + buffer.hdr.req.cmd = FW_EEPROM_CHECK_STATUS; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = 0; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = 0; + /* one word */ + buffer.length = 0; + + status = ngbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + NGBE_HI_COMMAND_TIMEOUT, false); + + if (status) + return status; + if (ngbe_check_mng_access(hw)) { + tmp = (u32)rd32a(hw, NGBE_MNG_MBOX, 1); + if (tmp == NGBE_CHECKSUM_CAP_ST_PASS) { + status = 0; + } else + status = NGBE_ERR_EEPROM_CHECKSUM; + } else { + status = NGBE_ERR_MNG_ACCESS_FAILED; + return status; + } + + return status; +} + +s32 ngbe_phy_signal_set(struct ngbe_hw *hw) +{ + s32 status; + struct ngbe_hic_read_shadow_ram buffer; + + DEBUGFUNC("\n"); + buffer.hdr.req.cmd = FW_PHY_SIGNAL; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = 0; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = 0; + /* one word */ + buffer.length = 0; + + status = ngbe_host_interface_pass_command(hw, (u32 *)&buffer, + sizeof(buffer), + NGBE_HI_COMMAND_TIMEOUT, false); + + if (status) + return status; + + return status; +} + +/** + * ngbe_read_ee_hostif - Read EEPROM word using a host interface cmd + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the hostif. + **/ +s32 ngbe_read_ee_hostif(struct ngbe_hw *hw, u16 offset, + u16 *data) +{ + s32 status = 0; + + DEBUGFUNC("\n"); + + if (TCALL(hw, mac.ops.acquire_swfw_sync, + NGBE_MNG_SWFW_SYNC_SW_FLASH) == 0) { + status = ngbe_read_ee_hostif_data(hw, offset, data); + TCALL(hw, mac.ops.release_swfw_sync, + NGBE_MNG_SWFW_SYNC_SW_FLASH); + } else { + status = NGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * ngbe_read_ee_hostif_buffer- Read EEPROM word(s) using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @words: number of words + * @data: word(s) read from the EEPROM + * + * Reads a 16 bit word(s) from the EEPROM using the hostif. + **/ +s32 ngbe_read_ee_hostif_buffer(struct ngbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + struct ngbe_hic_read_shadow_ram buffer; + u32 current_word = 0; + u16 words_to_read; + s32 status; + u32 i; + u32 value = 0; + + DEBUGFUNC("\n"); + + /* Take semaphore for the entire operation. */ + status = TCALL(hw, mac.ops.acquire_swfw_sync, + NGBE_MNG_SWFW_SYNC_SW_FLASH); + if (status) { + DEBUGOUT("EEPROM read buffer - semaphore failed\n"); + return status; + } + while (words) { + if (words > FW_MAX_READ_BUFFER_SIZE / 2) + words_to_read = FW_MAX_READ_BUFFER_SIZE / 2; + else + words_to_read = words; + + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = NGBE_CPU_TO_BE32((offset + current_word) * 2); + buffer.length = NGBE_CPU_TO_BE16(words_to_read * 2); + + status = ngbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + NGBE_HI_COMMAND_TIMEOUT, + false); + + if (status) { + DEBUGOUT("Host interface command failed\n"); + goto out; + } + + for (i = 0; i < words_to_read; i++) { + u32 reg = NGBE_MNG_MBOX + (FW_NVM_DATA_OFFSET << 2) + + 2 * i; + if (ngbe_check_mng_access(hw)) + value = rd32(hw, reg); + else { + status = NGBE_ERR_MNG_ACCESS_FAILED; + goto out; + } + data[current_word] = (u16)(value & 0xffff); + current_word++; + i++; + if (i < words_to_read) { + value >>= 16; + data[current_word] = (u16)(value & 0xffff); + current_word++; + } + } + words -= words_to_read; + } + +out: + TCALL(hw, mac.ops.release_swfw_sync, + NGBE_MNG_SWFW_SYNC_SW_FLASH); + return status; +} + + +/** + * ngbe_read_ee_hostif - Read EEPROM word using a host interface cmd + * assuming that the semaphore is already obtained. + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 32 bit word from the EEPROM using the hostif. + **/ +s32 ngbe_read_ee_hostif_data32(struct ngbe_hw *hw, u16 offset, + u32 *data) +{ + s32 status; + struct ngbe_hic_read_shadow_ram buffer; + + DEBUGFUNC("\n"); + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = NGBE_CPU_TO_BE32(offset * 2); + /* one word */ + buffer.length = NGBE_CPU_TO_BE16(sizeof(u32)); + + status = ngbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + NGBE_HI_COMMAND_TIMEOUT, false); + if (status) + return status; + if (ngbe_check_mng_access(hw)) + *data = (u32)rd32a(hw, NGBE_MNG_MBOX, FW_NVM_DATA_OFFSET); + else { + status = NGBE_ERR_MNG_ACCESS_FAILED; + return status; + } + return 0; +} + +/** + * ngbe_read_ee_hostif - Read EEPROM word using a host interface cmd + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 32 bit word from the EEPROM using the hostif. + **/ +s32 ngbe_read_ee_hostif32(struct ngbe_hw *hw, u16 offset, + u32 *data) +{ + s32 status = 0; + + DEBUGFUNC("\n"); + + if (TCALL(hw, mac.ops.acquire_swfw_sync, NGBE_MNG_SWFW_SYNC_SW_FLASH) == 0) { + status = ngbe_read_ee_hostif_data32(hw, offset, data); + TCALL(hw, mac.ops.release_swfw_sync, + NGBE_MNG_SWFW_SYNC_SW_FLASH); + } else { + status = NGBE_ERR_SWFW_SYNC; + } + + return status; +} + + +/** + * ngbe_write_ee_hostif - Write EEPROM word using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the hostif. + **/ +s32 ngbe_write_ee_hostif_data(struct ngbe_hw *hw, u16 offset, + u16 data) +{ + s32 status; + struct ngbe_hic_write_shadow_ram buffer; + + DEBUGFUNC("\n"); + + buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* one word */ + buffer.length = NGBE_CPU_TO_BE16(sizeof(u16)); + buffer.data = data; + buffer.address = NGBE_CPU_TO_BE32(offset * 2); + + status = ngbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + NGBE_HI_COMMAND_TIMEOUT, false); + + return status; +} + +/** + * ngbe_write_ee_hostif - Write EEPROM word using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the hostif. + **/ +s32 ngbe_write_ee_hostif(struct ngbe_hw *hw, u16 offset, + u16 data) +{ + s32 status = 0; + + DEBUGFUNC("\n"); + + if (TCALL(hw, mac.ops.acquire_swfw_sync, + NGBE_MNG_SWFW_SYNC_SW_FLASH) == 0) { + status = ngbe_write_ee_hostif_data(hw, offset, data); + TCALL(hw, mac.ops.release_swfw_sync, + NGBE_MNG_SWFW_SYNC_SW_FLASH); + } else { + DEBUGOUT("write ee hostif failed to get semaphore"); + status = NGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * ngbe_write_ee_hostif - Write EEPROM word using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the hostif. + **/ +s32 ngbe_write_ee_hostif_data32(struct ngbe_hw *hw, u16 offset, + u32 data) +{ + s32 status; + struct ngbe_hic_write_shadow_ram buffer; + + DEBUGFUNC("\n"); + + buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* one word */ + buffer.length = NGBE_CPU_TO_BE16(sizeof(u32)); + buffer.data = data; + buffer.address = NGBE_CPU_TO_BE32(offset * 2); + + status = ngbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + NGBE_HI_COMMAND_TIMEOUT, false); + + return status; +} + +/*** ngbe_write_ee_hostif - Write EEPROM word using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the hostif. + **/ +s32 ngbe_write_ee_hostif32(struct ngbe_hw *hw, u16 offset, + u32 data) +{ + s32 status = 0; + + DEBUGFUNC("\n"); + + if (TCALL(hw, mac.ops.acquire_swfw_sync, + NGBE_MNG_SWFW_SYNC_SW_FLASH) == 0) { + status = ngbe_write_ee_hostif_data32(hw, offset, data); + TCALL(hw, mac.ops.release_swfw_sync, + NGBE_MNG_SWFW_SYNC_SW_FLASH); + } else { + DEBUGOUT("write ee hostif failed to get semaphore"); + status = NGBE_ERR_SWFW_SYNC; + } + + return status; +} + + +/** + * ngbe_write_ee_hostif_buffer - Write EEPROM word(s) using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @words: number of words + * @data: word(s) write to the EEPROM + * + * Write a 16 bit word(s) to the EEPROM using the hostif. + **/ +s32 ngbe_write_ee_hostif_buffer(struct ngbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + s32 status = 0; + u16 i = 0; + + DEBUGFUNC("\n"); + + /* Take semaphore for the entire operation. */ + status = TCALL(hw, mac.ops.acquire_swfw_sync, + NGBE_MNG_SWFW_SYNC_SW_FLASH); + if (status != 0) { + DEBUGOUT("EEPROM write buffer - semaphore failed\n"); + return status; + } + + for (i = 0; i < words; i++) { + status = ngbe_write_ee_hostif_data(hw, offset + i, + data[i]); + + if (status != 0) { + DEBUGOUT("Eeprom buffered write failed\n"); + break; + } + } + + TCALL(hw, mac.ops.release_swfw_sync, NGBE_MNG_SWFW_SYNC_SW_FLASH); + return status; +} + + + +/** + * ngbe_calc_eeprom_checksum - Calculates and returns the checksum + * @hw: pointer to hardware structure + * + * Returns a negative error code on error, or the 16-bit checksum + **/ +s32 ngbe_calc_eeprom_checksum(struct ngbe_hw *hw) +{ + u16 *buffer = NULL; + u32 buffer_size = 0; + + u16 *eeprom_ptrs = NULL; + u16 *local_buffer; + s32 status; + u16 checksum = 0; + u16 i; + + DEBUGFUNC("\n"); + + TCALL(hw, eeprom.ops.init_params); + + if (!buffer) { + eeprom_ptrs = (u16 *)vmalloc(NGBE_EEPROM_LAST_WORD * + sizeof(u16)); + if (!eeprom_ptrs) + return NGBE_ERR_NO_SPACE; + /* Read pointer area */ + status = ngbe_read_ee_hostif_buffer(hw, 0, + NGBE_EEPROM_LAST_WORD, + eeprom_ptrs); + if (status) { + DEBUGOUT("Failed to read EEPROM image\n"); + return status; + } + local_buffer = eeprom_ptrs; + } else { + if (buffer_size < NGBE_EEPROM_LAST_WORD) + return NGBE_ERR_PARAM; + local_buffer = buffer; + } + + for (i = 0; i < NGBE_EEPROM_LAST_WORD; i++) + if (i != hw->eeprom.sw_region_offset + NGBE_EEPROM_CHECKSUM) + checksum += local_buffer[i]; + + checksum = (u16)NGBE_EEPROM_SUM - checksum; + if (eeprom_ptrs) + vfree(eeprom_ptrs); + + return (s32)checksum; +} + +/** + * ngbe_update_eeprom_checksum - Updates the EEPROM checksum and flash + * @hw: pointer to hardware structure + * + * After writing EEPROM to shadow RAM using EEWR register, software calculates + * checksum and updates the EEPROM and instructs the hardware to update + * the flash. + **/ +s32 ngbe_update_eeprom_checksum(struct ngbe_hw *hw) +{ + s32 status; + u16 checksum = 0; + + DEBUGFUNC("\n"); + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = ngbe_read_ee_hostif(hw, 0, &checksum); + if (status) { + DEBUGOUT("EEPROM read failed\n"); + return status; + } + + status = ngbe_calc_eeprom_checksum(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = ngbe_write_ee_hostif(hw, NGBE_EEPROM_CHECKSUM, + checksum); + if (status) + return status; + + return status; +} + +/** + * ngbe_validate_eeprom_checksum - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. + **/ +s32 ngbe_validate_eeprom_checksum(struct ngbe_hw *hw, + u16 *checksum_val) +{ + s32 status; + u16 checksum; + u16 read_checksum = 0; + + DEBUGFUNC("\n"); + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = TCALL(hw, eeprom.ops.read, 0, &checksum); + if (status) { + DEBUGOUT("EEPROM read failed\n"); + return status; + } + + status = TCALL(hw, eeprom.ops.calc_checksum); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = ngbe_read_ee_hostif(hw, hw->eeprom.sw_region_offset + + NGBE_EEPROM_CHECKSUM, + &read_checksum); + if (status) + return status; + + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) { + status = NGBE_ERR_EEPROM_CHECKSUM; + ERROR_REPORT1(NGBE_ERROR_INVALID_STATE, + "Invalid EEPROM checksum\n"); + } + + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; + + return status; +} + +/** + * ngbe_check_mac_link - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true when link is up + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Reads the links register to determine if link is up and the current speed + **/ +s32 ngbe_check_mac_link(struct ngbe_hw *hw, + u32 *speed, + bool *link_up, + bool link_up_wait_to_complete) +{ + u32 i; + u16 value = 0; + s32 status = 0; + u16 speed_sta = 0; + + DEBUGFUNC("ngbe_check_mac_link"); + + if (link_up_wait_to_complete) { + for (i = 0; i < NGBE_LINK_UP_TIME; i++) { + status = TCALL(hw, phy.ops.read_reg, 0x1A, 0xA43, &value); + if (!status && (value & 0x4)) { + *link_up = true; + break; + } else { + *link_up = false; + } + msleep(100); + } + } else { + status = TCALL(hw, phy.ops.read_reg, 0x1A, 0xA43, &value); + if (!status && (value & 0x4)) { + *link_up = true; + } else { + *link_up = false; + } + } + + speed_sta = value & 0x38; + if (*link_up) { + if (speed_sta == 0x28) { + *speed = NGBE_LINK_SPEED_1GB_FULL; + } else if (speed_sta == 0x18) { + *speed = NGBE_LINK_SPEED_100_FULL; + } else if (speed_sta == 0x8) { + *speed = NGBE_LINK_SPEED_10_FULL; + } + } else + *speed = NGBE_LINK_SPEED_UNKNOWN; + + if (*speed == NGBE_LINK_SPEED_1GB_FULL) { + status = TCALL(hw, phy.ops.read_reg, 0xA, 0x0, &value); + if (!status && !(value & 0x2000)) { + *link_up = false; + } + } + return status; +} + +s32 ngbe_check_mac_link_mdi(struct ngbe_hw *hw, + u32 *speed, + bool *link_up, + bool link_up_wait_to_complete) +{ + u32 i; + u16 value = 0; + s32 status = 0; + u16 speed_sta = 0; + + DEBUGFUNC("ngbe_check_mac_link_mdi"); + + if (hw->phy.type == ngbe_phy_m88e1512) + /* select page 0 */ + status = TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 0); + else + /* select page 1 */ + status = TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 1); + status = TCALL(hw, phy.ops.read_reg_mdi, 17, 0, &value); + if (link_up_wait_to_complete) { + for (i = 0; i < NGBE_LINK_UP_TIME; i++) { + status = TCALL(hw, phy.ops.read_reg_mdi, 17, 0, &value); + if (value & 0x400) { + *link_up = true; + break; + } else { + *link_up = false; + } + msleep(100); + } + } else { + status = TCALL(hw, phy.ops.read_reg_mdi, 17, 0, &value); + if (value & 0x400) { + *link_up = true; + } else { + *link_up = false; + } + } + + speed_sta = value & 0xC000; + if (*link_up) { + if (speed_sta == 0x8000) { + *speed = NGBE_LINK_SPEED_1GB_FULL; + } else if (speed_sta == 0x4000) { + *speed = NGBE_LINK_SPEED_100_FULL; + } else if (speed_sta == 0x0000) { + *speed = NGBE_LINK_SPEED_10_FULL; + } + } else + *speed = NGBE_LINK_SPEED_UNKNOWN; + + return status; +} + +s32 ngbe_check_mac_link_yt8521s(struct ngbe_hw *hw, + u32 *speed, + bool *link_up, + bool link_up_wait_to_complete) +{ + u32 i; + u16 value = 0; + s32 status = 0; + u16 speed_sta = 0; + + DEBUGFUNC("ngbe_check_mac_link_yt"); + + status = ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x11, 0, &value); + if (link_up_wait_to_complete) { + for (i = 0; i < NGBE_LINK_UP_TIME; i++) { + status = ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x11, 0, &value); + if (value & 0x400) { + *link_up = true; + break; + } else { + *link_up = false; + } + msleep(100); + } + } else { + status = ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x11, 0, &value); + if (value & 0x400) { + *link_up = true; + } else { + *link_up = false; + ngbe_phy_read_reg_mdi(hw, 0x11, 0, &value); + if (value & 0x400) { + *link_up = true; + //printk("yt8521: diankou link is up\n"); + } else { + *link_up = false; + //printk("yt8521: diankou link is down\n"); + } + } + } + + speed_sta = value & 0xC000; + if (*link_up) { + if (speed_sta == 0x8000) { + *speed = NGBE_LINK_SPEED_1GB_FULL; + } else if (speed_sta == 0x4000) { + *speed = NGBE_LINK_SPEED_100_FULL; + } else if (speed_sta == 0x0000) { + *speed = NGBE_LINK_SPEED_10_FULL; + } + } else + *speed = NGBE_LINK_SPEED_UNKNOWN; + return status; +} + +s32 ngbe_check_mac_link_zte(struct ngbe_hw *hw, + u32 *speed, + bool *link_up, + bool link_up_wait_to_complete) +{ + u32 i; + u16 value = 0; + s32 status = 0; + u16 speed_sta = 0; + + DEBUGFUNC("ngbe_check_mac_link_zte"); + + /* PHY status register */ + status = TCALL(hw, phy.ops.read_reg_mdi, 0x1a, 0, &value); + + if (link_up_wait_to_complete) { + for (i = 0; i < NGBE_LINK_UP_TIME; i++) { + status = TCALL(hw, phy.ops.read_reg_mdi, 0x1a, 0, &value); + /*bit 6->0x0040*/ + if (value & 0x40) { + *link_up = true; + break; + } else { + *link_up = false; + } + msleep(100); + } + } else { + status = TCALL(hw, phy.ops.read_reg_mdi, 0x1a, 0, &value); + if (value & 0x40) { + *link_up = true; + } else { + *link_up = false; + } + } + + speed_sta = value & 0xC000; + if (*link_up) { + if (speed_sta == 0x0200) { + *speed = NGBE_LINK_SPEED_1GB_FULL; + } else if (speed_sta == 0x0100) { + *speed = NGBE_LINK_SPEED_100_FULL; + } else if (speed_sta == 0x0000) { + *speed = NGBE_LINK_SPEED_10_FULL; + } + } else { + *speed = NGBE_LINK_SPEED_UNKNOWN; + } + return status; +} + +/** + * ngbe_setup_eee - Enable/disable EEE support + * @hw: pointer to the HW structure + * @enable_eee: boolean flag to enable EEE + * + * Enable/disable EEE based on enable_eee flag. + * Auto-negotiation must be started after BASE-T EEE bits in PHY register 7.3C + * are modified. + * + **/ +s32 ngbe_setup_eee(struct ngbe_hw *hw, bool enable_eee) +{ + /* fix eee */ + UNREFERENCED_PARAMETER(hw); + UNREFERENCED_PARAMETER(enable_eee); + DEBUGFUNC("\n"); + + return 0; +} + +s32 ngbe_init_ops_common(struct ngbe_hw *hw) +{ + struct ngbe_mac_info *mac = &hw->mac; + struct ngbe_eeprom_info *eeprom = &hw->eeprom; + struct ngbe_flash_info *flash = &hw->flash; + + /* MAC */ + mac->ops.init_hw = ngbe_init_hw; + mac->ops.clear_hw_cntrs = ngbe_clear_hw_cntrs; + mac->ops.get_mac_addr = ngbe_get_mac_addr; + mac->ops.stop_adapter = ngbe_stop_adapter; + mac->ops.get_bus_info = ngbe_get_bus_info; + mac->ops.set_lan_id = ngbe_set_lan_id_multi_port_pcie; + mac->ops.acquire_swfw_sync = ngbe_acquire_swfw_sync; + mac->ops.release_swfw_sync = ngbe_release_swfw_sync; + mac->ops.reset_hw = ngbe_reset_hw; + mac->ops.get_media_type = ngbe_get_media_type; + mac->ops.disable_sec_rx_path = ngbe_disable_sec_rx_path; + mac->ops.enable_sec_rx_path = ngbe_enable_sec_rx_path; + mac->ops.enable_rx_dma = ngbe_enable_rx_dma; + mac->ops.start_hw = ngbe_start_hw; + mac->ops.get_device_caps = ngbe_get_device_caps; + mac->ops.setup_eee = ngbe_setup_eee; + + /* LEDs */ + mac->ops.led_on = ngbe_led_on; + mac->ops.led_off = ngbe_led_off; + + /* RAR, Multicast, VLAN */ + mac->ops.set_rar = ngbe_set_rar; + mac->ops.clear_rar = ngbe_clear_rar; + mac->ops.init_rx_addrs = ngbe_init_rx_addrs; + mac->ops.update_uc_addr_list = ngbe_update_uc_addr_list; + mac->ops.update_mc_addr_list = ngbe_update_mc_addr_list; + mac->ops.enable_mc = ngbe_enable_mc; + mac->ops.disable_mc = ngbe_disable_mc; + mac->ops.enable_rx = ngbe_enable_rx; + mac->ops.disable_rx = ngbe_disable_rx; + mac->ops.set_vmdq_san_mac = ngbe_set_vmdq_san_mac; + mac->ops.insert_mac_addr = ngbe_insert_mac_addr; + mac->rar_highwater = 1; + mac->ops.set_vfta = ngbe_set_vfta; + mac->ops.set_vlvf = ngbe_set_vlvf; + mac->ops.clear_vfta = ngbe_clear_vfta; + mac->ops.init_uta_tables = ngbe_init_uta_tables; + mac->ops.set_mac_anti_spoofing = ngbe_set_mac_anti_spoofing; + mac->ops.set_vlan_anti_spoofing = ngbe_set_vlan_anti_spoofing; + mac->ops.set_ethertype_anti_spoofing = + ngbe_set_ethertype_anti_spoofing; + + /* Flow Control */ + mac->ops.fc_enable = ngbe_fc_enable; + mac->ops.setup_fc = ngbe_setup_fc; + + /* Link */ + mac->ops.get_link_capabilities = ngbe_get_link_capabilities; + mac->ops.check_link = ngbe_check_mac_link; + mac->ops.setup_rxpba = ngbe_set_rxpba; + + mac->mcft_size = NGBE_SP_MC_TBL_SIZE; + mac->vft_size = NGBE_SP_VFT_TBL_SIZE; + mac->num_rar_entries = NGBE_SP_RAR_ENTRIES; + mac->rx_pb_size = NGBE_SP_RX_PB_SIZE; + mac->max_rx_queues = NGBE_SP_MAX_RX_QUEUES; + mac->max_tx_queues = NGBE_SP_MAX_TX_QUEUES; + mac->max_msix_vectors = ngbe_get_pcie_msix_count(hw); + + mac->arc_subsystem_valid = (rd32(hw, NGBE_MIS_ST) & + NGBE_MIS_ST_MNG_INIT_DN) ? true : false; + + hw->mbx.ops.init_params = ngbe_init_mbx_params_pf; + + /* EEPROM */ + eeprom->ops.init_params = ngbe_init_eeprom_params; + eeprom->ops.calc_checksum = ngbe_calc_eeprom_checksum; + eeprom->ops.read = ngbe_read_ee_hostif; + eeprom->ops.read_buffer = ngbe_read_ee_hostif_buffer; + eeprom->ops.read32 = ngbe_read_ee_hostif32; + eeprom->ops.write = ngbe_write_ee_hostif; + eeprom->ops.write_buffer = ngbe_write_ee_hostif_buffer; + eeprom->ops.update_checksum = ngbe_update_eeprom_checksum; + eeprom->ops.validate_checksum = ngbe_validate_eeprom_checksum; + eeprom->ops.eeprom_chksum_cap_st = ngbe_eepromcheck_cap; + eeprom->ops.phy_signal_set = ngbe_phy_signal_set; + + /* FLASH */ + flash->ops.init_params = ngbe_init_flash_params; + flash->ops.read_buffer = ngbe_read_flash_buffer; + flash->ops.write_buffer = ngbe_write_flash_buffer; + + /* Manageability interface */ + mac->ops.set_fw_drv_ver = ngbe_set_fw_drv_ver; + + mac->ops.get_thermal_sensor_data = + ngbe_get_thermal_sensor_data; + mac->ops.init_thermal_sensor_thresh = + ngbe_init_thermal_sensor_thresh; + + return NGBE_OK; +} diff --git a/drivers/net/ethernet/netswift/ngbe/ngbe_hw.h b/drivers/net/ethernet/netswift/ngbe/ngbe_hw.h new file mode 100644 index 0000000000000000000000000000000000000000..d7f06643f258acab28d71e750751a37dc4ee6687 --- /dev/null +++ b/drivers/net/ethernet/netswift/ngbe/ngbe_hw.h @@ -0,0 +1,280 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + */ + +#ifndef _NGBE_HW_H_ +#define _NGBE_HW_H_ + +#define NGBE_EMC_INTERNAL_DATA 0x00 +#define NGBE_EMC_INTERNAL_THERM_LIMIT 0x20 +#define NGBE_EMC_DIODE1_DATA 0x01 +#define NGBE_EMC_DIODE1_THERM_LIMIT 0x19 +#define NGBE_EMC_DIODE2_DATA 0x23 +#define NGBE_EMC_DIODE2_THERM_LIMIT 0x1A +#define NGBE_EMC_DIODE3_DATA 0x2A +#define NGBE_EMC_DIODE3_THERM_LIMIT 0x30 + +#define SPI_CLK_DIV 3 + +#define SPI_CMD_ERASE_CHIP 4 // SPI erase chip command +#define SPI_CMD_ERASE_SECTOR 3 // SPI erase sector command +#define SPI_CMD_WRITE_DWORD 0 // SPI write a dword command +#define SPI_CMD_READ_DWORD 1 // SPI read a dword command +#define SPI_CMD_USER_CMD 5 // SPI user command + +#define SPI_CLK_CMD_OFFSET 28 // SPI command field offset in Command register +#define SPI_CLK_DIV_OFFSET 25 // SPI clock divide field offset in Command register + +#define SPI_TIME_OUT_VALUE 10000 +#define SPI_SECTOR_SIZE (4 * 1024) // FLASH sector size is 64KB +#define SPI_H_CMD_REG_ADDR 0x10104 // SPI Command register address +#define SPI_H_DAT_REG_ADDR 0x10108 // SPI Data register address +#define SPI_H_STA_REG_ADDR 0x1010c // SPI Status register address +#define SPI_H_USR_CMD_REG_ADDR 0x10110 // SPI User Command register address +#define SPI_CMD_CFG1_ADDR 0x10118 // Flash command configuration register 1 +#define MISC_RST_REG_ADDR 0x1000c // Misc reset register address +#define MGR_FLASH_RELOAD_REG_ADDR 0x101a0 // MGR reload flash read + +#define MAC_ADDR0_WORD0_OFFSET_1G 0x006000c // MAC Address for LAN0, stored in external FLASH +#define MAC_ADDR0_WORD1_OFFSET_1G 0x0060014 +#define MAC_ADDR1_WORD0_OFFSET_1G 0x006800c // MAC Address for LAN1, stored in external FLASH +#define MAC_ADDR1_WORD1_OFFSET_1G 0x0068014 +#define MAC_ADDR2_WORD0_OFFSET_1G 0x007000c // MAC Address for LAN2, stored in external FLASH +#define MAC_ADDR2_WORD1_OFFSET_1G 0x0070014 +#define MAC_ADDR3_WORD0_OFFSET_1G 0x007800c // MAC Address for LAN3, stored in external FLASH +#define MAC_ADDR3_WORD1_OFFSET_1G 0x0078014 +#define PRODUCT_SERIAL_NUM_OFFSET_1G 0x00f0000 // Product Serial Number, stored in external FLASH last sector + +struct ngbe_hic_read_cab { + union ngbe_hic_hdr2 hdr; + union { + u8 d8[252]; + u16 d16[126]; + u32 d32[63]; + } dbuf; +}; + + +/** + * Packet Type decoding + **/ +/* ngbe_dec_ptype.mac: outer mac */ +enum ngbe_dec_ptype_mac { + NGBE_DEC_PTYPE_MAC_IP = 0, + NGBE_DEC_PTYPE_MAC_L2 = 2, + NGBE_DEC_PTYPE_MAC_FCOE = 3, +}; + +/* ngbe_dec_ptype.[e]ip: outer&encaped ip */ +#define NGBE_DEC_PTYPE_IP_FRAG (0x4) +enum ngbe_dec_ptype_ip { + NGBE_DEC_PTYPE_IP_NONE = 0, + NGBE_DEC_PTYPE_IP_IPV4 = 1, + NGBE_DEC_PTYPE_IP_IPV6 = 2, + NGBE_DEC_PTYPE_IP_FGV4 = + (NGBE_DEC_PTYPE_IP_FRAG | NGBE_DEC_PTYPE_IP_IPV4), + NGBE_DEC_PTYPE_IP_FGV6 = + (NGBE_DEC_PTYPE_IP_FRAG | NGBE_DEC_PTYPE_IP_IPV6), +}; + +/* ngbe_dec_ptype.etype: encaped type */ +enum ngbe_dec_ptype_etype { + NGBE_DEC_PTYPE_ETYPE_NONE = 0, + NGBE_DEC_PTYPE_ETYPE_IPIP = 1, /* IP+IP */ + NGBE_DEC_PTYPE_ETYPE_IG = 2, /* IP+GRE */ + NGBE_DEC_PTYPE_ETYPE_IGM = 3, /* IP+GRE+MAC */ + NGBE_DEC_PTYPE_ETYPE_IGMV = 4, /* IP+GRE+MAC+VLAN */ +}; + +/* ngbe_dec_ptype.proto: payload proto */ +enum ngbe_dec_ptype_prot { + NGBE_DEC_PTYPE_PROT_NONE = 0, + NGBE_DEC_PTYPE_PROT_UDP = 1, + NGBE_DEC_PTYPE_PROT_TCP = 2, + NGBE_DEC_PTYPE_PROT_SCTP = 3, + NGBE_DEC_PTYPE_PROT_ICMP = 4, + NGBE_DEC_PTYPE_PROT_TS = 5, /* time sync */ +}; + +/* ngbe_dec_ptype.layer: payload layer */ +enum ngbe_dec_ptype_layer { + NGBE_DEC_PTYPE_LAYER_NONE = 0, + NGBE_DEC_PTYPE_LAYER_PAY2 = 1, + NGBE_DEC_PTYPE_LAYER_PAY3 = 2, + NGBE_DEC_PTYPE_LAYER_PAY4 = 3, +}; + +struct ngbe_dec_ptype { + u32 ptype:8; + u32 known:1; + u32 mac:2; /* outer mac */ + u32 ip:3; /* outer ip*/ + u32 etype:3; /* encaped type */ + u32 eip:3; /* encaped ip */ + u32 prot:4; /* payload proto */ + u32 layer:3; /* payload layer */ +}; +typedef struct ngbe_dec_ptype ngbe_dptype; + + +u16 ngbe_get_pcie_msix_count(struct ngbe_hw *hw); +s32 ngbe_init_hw(struct ngbe_hw *hw); +s32 ngbe_start_hw(struct ngbe_hw *hw); +s32 ngbe_clear_hw_cntrs(struct ngbe_hw *hw); +s32 ngbe_read_pba_string(struct ngbe_hw *hw, u8 *pba_num, + u32 pba_num_size); +s32 ngbe_get_mac_addr(struct ngbe_hw *hw, u8 *mac_addr); +s32 ngbe_get_bus_info(struct ngbe_hw *hw); +void ngbe_set_pci_config_data(struct ngbe_hw *hw, u16 link_status); +void ngbe_set_lan_id_multi_port_pcie(struct ngbe_hw *hw); +s32 ngbe_stop_adapter(struct ngbe_hw *hw); + +s32 ngbe_led_on(struct ngbe_hw *hw, u32 index); +s32 ngbe_led_off(struct ngbe_hw *hw, u32 index); + +s32 ngbe_set_rar(struct ngbe_hw *hw, u32 index, u8 *addr, u64 pools, + u32 enable_addr); +s32 ngbe_clear_rar(struct ngbe_hw *hw, u32 index); +s32 ngbe_init_rx_addrs(struct ngbe_hw *hw); +s32 ngbe_update_mc_addr_list(struct ngbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, + ngbe_mc_addr_itr func, bool clear); +s32 ngbe_update_uc_addr_list(struct ngbe_hw *hw, u8 *addr_list, + u32 addr_count, ngbe_mc_addr_itr func); +s32 ngbe_enable_mc(struct ngbe_hw *hw); +s32 ngbe_disable_mc(struct ngbe_hw *hw); +s32 ngbe_disable_sec_rx_path(struct ngbe_hw *hw); +s32 ngbe_enable_sec_rx_path(struct ngbe_hw *hw); + +s32 ngbe_fc_enable(struct ngbe_hw *hw); +void ngbe_fc_autoneg(struct ngbe_hw *hw); +s32 ngbe_setup_fc(struct ngbe_hw *hw); + +s32 ngbe_validate_mac_addr(u8 *mac_addr); +s32 ngbe_acquire_swfw_sync(struct ngbe_hw *hw, u32 mask); +void ngbe_release_swfw_sync(struct ngbe_hw *hw, u32 mask); +s32 ngbe_disable_pcie_master(struct ngbe_hw *hw); + +s32 ngbe_set_vmdq(struct ngbe_hw *hw, u32 rar, u32 vmdq); +s32 ngbe_set_vmdq_san_mac(struct ngbe_hw *hw, u32 vmdq); +s32 ngbe_clear_vmdq(struct ngbe_hw *hw, u32 rar, u32 vmdq); +s32 ngbe_insert_mac_addr(struct ngbe_hw *hw, u8 *addr, u32 vmdq); +s32 ngbe_init_uta_tables(struct ngbe_hw *hw); +s32 ngbe_set_vfta(struct ngbe_hw *hw, u32 vlan, + u32 vind, bool vlan_on); +s32 ngbe_set_vlvf(struct ngbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool *vfta_changed); +s32 ngbe_clear_vfta(struct ngbe_hw *hw); +s32 ngbe_find_vlvf_slot(struct ngbe_hw *hw, u32 vlan); + +void ngbe_set_mac_anti_spoofing(struct ngbe_hw *hw, bool enable, int pf); +void ngbe_set_vlan_anti_spoofing(struct ngbe_hw *hw, bool enable, int vf); +void ngbe_set_ethertype_anti_spoofing(struct ngbe_hw *hw, + bool enable, int vf); +s32 ngbe_get_device_caps(struct ngbe_hw *hw, u16 *device_caps); +void ngbe_set_rxpba(struct ngbe_hw *hw, int num_pb, u32 headroom, + int strategy); +s32 ngbe_set_fw_drv_ver(struct ngbe_hw *hw, u8 maj, u8 min, + u8 build, u8 ver); +s32 ngbe_reset_hostif(struct ngbe_hw *hw); +u8 ngbe_calculate_checksum(u8 *buffer, u32 length); +s32 ngbe_host_interface_command(struct ngbe_hw *hw, u32 *buffer, + u32 length, u32 timeout, bool return_data); + +void ngbe_clear_tx_pending(struct ngbe_hw *hw); +void ngbe_stop_mac_link_on_d3(struct ngbe_hw *hw); +bool ngbe_mng_present(struct ngbe_hw *hw); +bool ngbe_check_mng_access(struct ngbe_hw *hw); + +s32 ngbe_get_thermal_sensor_data(struct ngbe_hw *hw); +s32 ngbe_init_thermal_sensor_thresh(struct ngbe_hw *hw); +void ngbe_enable_rx(struct ngbe_hw *hw); +void ngbe_disable_rx(struct ngbe_hw *hw); +s32 ngbe_setup_mac_link_multispeed_fiber(struct ngbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete); +int ngbe_check_flash_load(struct ngbe_hw *hw, u32 check_bit); + +/* @ngbe_api.h */ +void ngbe_atr_compute_perfect_hash(union ngbe_atr_input *input, + union ngbe_atr_input *mask); +u32 ngbe_atr_compute_sig_hash(union ngbe_atr_hash_dword input, + union ngbe_atr_hash_dword common); + +s32 ngbe_get_link_capabilities(struct ngbe_hw *hw, + u32 *speed, bool *autoneg); +enum ngbe_media_type ngbe_get_media_type(struct ngbe_hw *hw); +void ngbe_disable_tx_laser_multispeed_fiber(struct ngbe_hw *hw); +void ngbe_enable_tx_laser_multispeed_fiber(struct ngbe_hw *hw); +void ngbe_flap_tx_laser_multispeed_fiber(struct ngbe_hw *hw); +void ngbe_set_hard_rate_select_speed(struct ngbe_hw *hw, + u32 speed); +s32 ngbe_setup_mac_link(struct ngbe_hw *hw, u32 speed, + bool autoneg_wait_to_complete); +void ngbe_init_mac_link_ops(struct ngbe_hw *hw); +s32 ngbe_reset_hw(struct ngbe_hw *hw); +s32 ngbe_identify_phy(struct ngbe_hw *hw); +s32 ngbe_init_ops_common(struct ngbe_hw *hw); +s32 ngbe_enable_rx_dma(struct ngbe_hw *hw, u32 regval); +s32 ngbe_init_ops(struct ngbe_hw *hw); +s32 ngbe_setup_eee(struct ngbe_hw *hw, bool enable_eee); + +s32 ngbe_init_flash_params(struct ngbe_hw *hw); +s32 ngbe_read_flash_buffer(struct ngbe_hw *hw, u32 offset, + u32 dwords, u32 *data); +s32 ngbe_write_flash_buffer(struct ngbe_hw *hw, u32 offset, + u32 dwords, u32 *data); + +s32 ngbe_read_eeprom(struct ngbe_hw *hw, + u16 offset, u16 *data); +s32 ngbe_read_eeprom_buffer(struct ngbe_hw *hw, u16 offset, + u16 words, u16 *data); +s32 ngbe_init_eeprom_params(struct ngbe_hw *hw); +s32 ngbe_update_eeprom_checksum(struct ngbe_hw *hw); +s32 ngbe_calc_eeprom_checksum(struct ngbe_hw *hw); +s32 ngbe_validate_eeprom_checksum(struct ngbe_hw *hw, + u16 *checksum_val); +s32 ngbe_upgrade_flash(struct ngbe_hw *hw, u32 region, + const u8 *data, u32 size); +s32 ngbe_write_ee_hostif_buffer(struct ngbe_hw *hw, + u16 offset, u16 words, u16 *data); +s32 ngbe_write_ee_hostif(struct ngbe_hw *hw, u16 offset, + u16 data); +s32 ngbe_write_ee_hostif32(struct ngbe_hw *hw, u16 offset, + u32 data); + +s32 ngbe_read_ee_hostif_buffer(struct ngbe_hw *hw, + u16 offset, u16 words, u16 *data); +s32 ngbe_read_ee_hostif(struct ngbe_hw *hw, u16 offset, u16 *data); + +s32 ngbe_read_ee_hostif32(struct ngbe_hw *hw, u16 offset, u32 *data); + +u32 ngbe_rd32_epcs(struct ngbe_hw *hw, u32 addr); +void ngbe_wr32_epcs(struct ngbe_hw *hw, u32 addr, u32 data); +void ngbe_wr32_ephy(struct ngbe_hw *hw, u32 addr, u32 data); +s32 ngbe_upgrade_flash_hostif(struct ngbe_hw *hw, u32 region, + const u8 *data, u32 size); + +s32 ngbe_check_mac_link_zte(struct ngbe_hw *hw, + u32 *speed, + bool *link_up, + bool link_up_wait_to_complete); + +s32 ngbe_eepromcheck_cap(struct ngbe_hw *hw, u16 offset, + u32 *data); +s32 ngbe_phy_signal_set(struct ngbe_hw *hw); + +#endif /* _NGBE_HW_H_ */ diff --git a/drivers/net/ethernet/netswift/ngbe/ngbe_lib.c b/drivers/net/ethernet/netswift/ngbe/ngbe_lib.c new file mode 100644 index 0000000000000000000000000000000000000000..200fc34e9fa894f749d580c19d7224fa4ac72363 --- /dev/null +++ b/drivers/net/ethernet/netswift/ngbe/ngbe_lib.c @@ -0,0 +1,701 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include "ngbe.h" +#include "ngbe_sriov.h" + +/** + * ngbe_cache_ring_vmdq - Descriptor ring to register mapping for VMDq + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for VMDq to the assigned rings. It + * will also try to cache the proper offsets if RSS/FCoE/SRIOV are enabled along + * with VMDq. + * + **/ +static bool ngbe_cache_ring_vmdq(struct ngbe_adapter *adapter) +{ + struct ngbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + int i; + u16 reg_idx; + + /* only proceed if VMDq is enabled */ + if (!(adapter->flags & NGBE_FLAG_VMDQ_ENABLED)) + return false; + + /* start at VMDq register offset for SR-IOV enabled setups */ + reg_idx = vmdq->offset; + + for (i = 0; i < adapter->num_rx_queues; i++) { + + /* If we are greater than indices move to next pool */ + adapter->rx_ring[i]->reg_idx = reg_idx + i; + } + + reg_idx = vmdq->offset; + for (i = 0; i < adapter->num_tx_queues; i++) { + + /* If we are greater than indices move to next pool */ + adapter->tx_ring[i]->reg_idx = reg_idx + i; + } + + return true; +} + +/** + * ngbe_cache_ring_rss - Descriptor ring to register mapping for RSS + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for RSS, ATR, FCoE, and SR-IOV. + * + **/ +static bool ngbe_cache_ring_rss(struct ngbe_adapter *adapter) +{ + u16 i; + + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->reg_idx = i; + + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->reg_idx = i; + + return true; +} + +/** + * ngbe_cache_ring_register - Descriptor ring to register mapping + * @adapter: board private structure to initialize + * + * Once we know the feature-set enabled for the device, we'll cache + * the register offset the descriptor ring is assigned to. + * + * Note, the order the various feature calls is important. It must start with + * the "most" features enabled at the same time, then trickle down to the + * least amount of features turned on at once. + **/ +static void ngbe_cache_ring_register(struct ngbe_adapter *adapter) +{ + if (ngbe_cache_ring_vmdq(adapter)) + return; + + ngbe_cache_ring_rss(adapter); +} + +#define NGBE_RSS_64Q_MASK 0x3F +#define NGBE_RSS_16Q_MASK 0xF +#define NGBE_RSS_8Q_MASK 0x7 +#define NGBE_RSS_4Q_MASK 0x3 +#define NGBE_RSS_2Q_MASK 0x1 +#define NGBE_RSS_DISABLED_MASK 0x0 + +/** + * ngbe_set_vmdq_queues: Allocate queues for VMDq devices + * @adapter: board private structure to initialize + * + * When VMDq (Virtual Machine Devices queue) is enabled, allocate queues + * and VM pools where appropriate. If RSS is available, then also try and + * enable RSS and map accordingly. + * + **/ +static bool ngbe_set_vmdq_queues(struct ngbe_adapter *adapter) +{ + u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; + u16 vmdq_m = 0; + u16 rss_i = adapter->ring_feature[RING_F_RSS].limit; + u16 rss_m = NGBE_RSS_DISABLED_MASK; + + /* only proceed if VMDq is enabled */ + if (!(adapter->flags & NGBE_FLAG_VMDQ_ENABLED)) + return false; + + /* Add starting offset to total pool count */ + vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; + + /* double check we are limited to maximum pools */ + vmdq_i = min_t(u16, NGBE_MAX_VMDQ_INDICES, vmdq_i); + + /* when VMDQ on, disable RSS */ + rss_i = 1; + + /* remove the starting offset from the pool count */ + vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; + + /* save features for later use */ + adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; + adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; + + /* limit RSS based on user input and save for later use */ + adapter->ring_feature[RING_F_RSS].indices = rss_i; + adapter->ring_feature[RING_F_RSS].mask = rss_m; + + adapter->queues_per_pool = rss_i; + adapter->num_rx_queues = vmdq_i * rss_i; + adapter->num_tx_queues = vmdq_i * rss_i; + + return true; +} + +/** + * ngbe_set_rss_queues: Allocate queues for RSS + * @adapter: board private structure to initialize + * + * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try + * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. + * + **/ +static bool ngbe_set_rss_queues(struct ngbe_adapter *adapter) +{ + struct ngbe_ring_feature *f; + u16 rss_i; + + /* set mask for 16 queue limit of RSS */ + f = &adapter->ring_feature[RING_F_RSS]; + rss_i = f->limit; + + f->indices = rss_i; + f->mask = NGBE_RSS_8Q_MASK; + + adapter->num_rx_queues = rss_i; + adapter->num_tx_queues = rss_i; + + return true; +} + +/* + * ngbe_set_num_queues: Allocate queues for device, feature dependent + * @adapter: board private structure to initialize + * + * This is the top level queue allocation routine. The order here is very + * important, starting with the "most" number of features turned on at once, + * and ending with the smallest set of features. This way large combinations + * can be allocated if they're turned on, and smaller combinations are the + * fallthrough conditions. + * + **/ +static void ngbe_set_num_queues(struct ngbe_adapter *adapter) +{ + /* Start with base case */ + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + adapter->queues_per_pool = 1; + + if (ngbe_set_vmdq_queues(adapter)) + return; + + ngbe_set_rss_queues(adapter); + +} + +/** + * ngbe_acquire_msix_vectors - acquire MSI-X vectors + * @adapter: board private structure + * + * Attempts to acquire a suitable range of MSI-X vector interrupts. Will + * return a negative error code if unable to acquire MSI-X vectors for any + * reason. + */ +static int ngbe_acquire_msix_vectors(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int i, vectors, vector_threshold; + + if (!(adapter->flags & NGBE_FLAG_MSIX_CAPABLE)) + return -EOPNOTSUPP; + + /* We start by asking for one vector per queue pair */ + vectors = max(adapter->num_rx_queues, adapter->num_tx_queues); + + /* It is easy to be greedy for MSI-X vectors. However, it really + * doesn't do much good if we have a lot more vectors than CPUs. We'll + * be somewhat conservative and only ask for (roughly) the same number + * of vectors as there are CPUs. + */ + vectors = min_t(int, vectors, num_online_cpus()); + + /* Some vectors are necessary for non-queue interrupts */ + vectors += NON_Q_VECTORS; + + /* Hardware can only support a maximum of hw.mac->max_msix_vectors. + * With features such as RSS and VMDq, we can easily surpass the + * number of Rx and Tx descriptor queues supported by our device. + * Thus, we cap the maximum in the rare cases where the CPU count also + * exceeds our vector limit + */ + vectors = min_t(int, vectors, hw->mac.max_msix_vectors); + + /* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0] + * handler, and (2) an Other (Link Status Change, etc.) handler. + */ + vector_threshold = MIN_MSIX_COUNT; + + /* we need to alloc (7vfs+1pf+1misc) or (8vfs+1misc) msix entries */ + if (adapter->flags2 & NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP) { + vectors += adapter->ring_feature[RING_F_VMDQ].offset; + } + + adapter->msix_entries = kcalloc(vectors, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!adapter->msix_entries) + return -ENOMEM; + + for (i = 0; i < vectors; i++) + adapter->msix_entries[i].entry = i; + + vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, + vector_threshold, vectors); + if (vectors < 0) { + /* A negative count of allocated vectors indicates an error in + * acquiring within the specified range of MSI-X vectors */ + e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n", + vectors); + + adapter->flags &= ~NGBE_FLAG_MSIX_ENABLED; + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + + return vectors; + } + + if (adapter->flags2 & NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP) { + if (vectors < 9) { + adapter->flags2 &= ~NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP; + e_dev_warn("Remain available irqs < 9. Disable MISC IRQ REMAP.\n"); + } else + vectors -= adapter->ring_feature[RING_F_VMDQ].offset; + } + + /* we successfully allocated some number of vectors within our + * requested range. + */ + adapter->flags |= NGBE_FLAG_MSIX_ENABLED; + + /* Adjust for only the vectors we'll use, which is minimum + * of max_q_vectors, or the number of vectors we were allocated. + */ + vectors -= NON_Q_VECTORS; + adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors); + + return 0; +} + +static void ngbe_add_ring(struct ngbe_ring *ring, + struct ngbe_ring_container *head) +{ + ring->next = head->ring; + head->ring = ring; + head->count++; +} + +/** + * ngbe_alloc_q_vector - Allocate memory for a single interrupt vector + * @adapter: board private structure to initialize + * @v_count: q_vectors allocated on adapter, used for ring interleaving + * @v_idx: index of vector in adapter struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + **/ +static int ngbe_alloc_q_vector(struct ngbe_adapter *adapter, + unsigned int v_count, unsigned int v_idx, + unsigned int txr_count, unsigned int txr_idx, + unsigned int rxr_count, unsigned int rxr_idx) +{ + struct ngbe_q_vector *q_vector; + struct ngbe_ring *ring; + int node = -1; + int cpu = -1; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + int ring_count, size; + + /* note this will allocate space for the ring structure as well! */ + ring_count = txr_count + rxr_count; + size = sizeof(struct ngbe_q_vector) + + (sizeof(struct ngbe_ring) * ring_count); + + /* customize cpu for Flow Director mapping */ + if ((tcs <= 1) && !(adapter->flags & NGBE_FLAG_VMDQ_ENABLED)) { + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + if (rss_i > 1 && adapter->atr_sample_rate) { + if (cpu_online(v_idx)) { + cpu = v_idx; + node = cpu_to_node(cpu); + } + } + } + + /* allocate q_vector and rings */ + q_vector = kzalloc_node(size, GFP_KERNEL, node); + if (!q_vector) + q_vector = kzalloc(size, GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + /* setup affinity mask and node */ + if (cpu != -1) + cpumask_set_cpu(cpu, &q_vector->affinity_mask); + q_vector->numa_node = node; + + /* initialize CPU for DCA */ + q_vector->cpu = -1; + + /* initialize NAPI */ + netif_napi_add(adapter->netdev, &q_vector->napi, + ngbe_poll, 64); + + /* tie q_vector and adapter together */ + adapter->q_vector[v_idx] = q_vector; + q_vector->adapter = adapter; + q_vector->v_idx = v_idx; + + /* initialize work limits */ + q_vector->tx.work_limit = adapter->tx_work_limit; + q_vector->rx.work_limit = adapter->rx_work_limit; + + /* initialize pointer to rings */ + ring = q_vector->ring; + + /* intialize ITR */ + if (txr_count && !rxr_count) { + /* tx only vector */ + if (adapter->tx_itr_setting == 1) + q_vector->itr = NGBE_7K_ITR; + else + q_vector->itr = adapter->tx_itr_setting; + } else { + /* rx or rx/tx vector */ + if (adapter->rx_itr_setting == 1) + q_vector->itr = NGBE_7K_ITR; + else + q_vector->itr = adapter->rx_itr_setting; + } + + while (txr_count) { + /* assign generic ring traits */ + ring->dev = pci_dev_to_dev(adapter->pdev); + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + ngbe_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + if (adapter->num_vmdqs > 1) + ring->queue_index = + txr_idx % adapter->queues_per_pool; + else + ring->queue_index = txr_idx; + + /* assign ring to adapter */ + adapter->tx_ring[txr_idx] = ring; + + /* update count and index */ + txr_count--; + txr_idx += v_count; + + /* push pointer to next ring */ + ring++; + } + + while (rxr_count) { + /* assign generic ring traits */ + ring->dev = pci_dev_to_dev(adapter->pdev); + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + ngbe_add_ring(ring, &q_vector->rx); + + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_count; + if (adapter->num_vmdqs > 1) + ring->queue_index = + rxr_idx % adapter->queues_per_pool; + else + ring->queue_index = rxr_idx; + + /* assign ring to adapter */ + adapter->rx_ring[rxr_idx] = ring; + + /* update count and index */ + rxr_count--; + rxr_idx += v_count; + + /* push pointer to next ring */ + ring++; + } + + return 0; +} + +/** + * ngbe_free_q_vector - Free memory allocated for specific interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void ngbe_free_q_vector(struct ngbe_adapter *adapter, int v_idx) +{ + struct ngbe_q_vector *q_vector = adapter->q_vector[v_idx]; + struct ngbe_ring *ring; + + ngbe_for_each_ring(ring, q_vector->tx) + adapter->tx_ring[ring->queue_index] = NULL; + + ngbe_for_each_ring(ring, q_vector->rx) + adapter->rx_ring[ring->queue_index] = NULL; + + adapter->q_vector[v_idx] = NULL; + netif_napi_del(&q_vector->napi); + kfree_rcu(q_vector, rcu); +} + +/** + * ngbe_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int ngbe_alloc_q_vectors(struct ngbe_adapter *adapter) +{ + unsigned int q_vectors = adapter->num_q_vectors; + unsigned int rxr_remaining = adapter->num_rx_queues; + unsigned int txr_remaining = adapter->num_tx_queues; + unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0; + int err; + + if (q_vectors >= (rxr_remaining + txr_remaining)) { + for (; rxr_remaining; v_idx++) { + err = ngbe_alloc_q_vector(adapter, q_vectors, v_idx, + 0, 0, 1, rxr_idx); + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining--; + rxr_idx++; + } + } + + for (; v_idx < q_vectors; v_idx++) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); + int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + err = ngbe_alloc_q_vector(adapter, q_vectors, v_idx, + tqpv, txr_idx, + rqpv, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + txr_remaining -= tqpv; + rxr_idx++; + txr_idx++; + } + + return 0; + +err_out: + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + ngbe_free_q_vector(adapter, v_idx); + + return -ENOMEM; +} + +/** + * ngbe_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void ngbe_free_q_vectors(struct ngbe_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + ngbe_free_q_vector(adapter, v_idx); +} + +void ngbe_reset_interrupt_capability(struct ngbe_adapter *adapter) +{ + if (adapter->flags & NGBE_FLAG_MSIX_ENABLED) { + adapter->flags &= ~NGBE_FLAG_MSIX_ENABLED; + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else if (adapter->flags & NGBE_FLAG_MSI_ENABLED) { + adapter->flags &= ~NGBE_FLAG_MSI_ENABLED; + pci_disable_msi(adapter->pdev); + } +} + +/** + * ngbe_set_interrupt_capability - set MSI-X or MSI if supported + * @adapter: board private structure to initialize + * + * Attempt to configure the interrupts using the best available + * capabilities of the hardware and the kernel. + **/ +void ngbe_set_interrupt_capability(struct ngbe_adapter *adapter) +{ + int err; + + /* We will try to get MSI-X interrupts first */ + if (!ngbe_acquire_msix_vectors(adapter)) + return; + + /* At this point, we do not have MSI-X capabilities. We need to + * reconfigure or disable various features which require MSI-X + * capability. + */ + /* Disable VMDq support */ + e_dev_warn("Disabling VMQd support\n"); + adapter->flags &= ~NGBE_FLAG_VMDQ_ENABLED; + +#ifdef CONFIG_PCI_IOV + /* Disable SR-IOV support */ + e_dev_warn("Disabling SR-IOV support\n"); + ngbe_disable_sriov(adapter); + if (adapter->flags2 & NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP) + adapter->flags2 &= ~NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP; +#endif /* CONFIG_PCI_IOV */ + + /* Disable RSS */ + e_dev_warn("Disabling RSS support\n"); + adapter->ring_feature[RING_F_RSS].limit = 1; + + /* recalculate number of queues now that many features have been + * changed or disabled. + */ + ngbe_set_num_queues(adapter); + adapter->num_q_vectors = 1; + + if (!(adapter->flags & NGBE_FLAG_MSI_CAPABLE)) + return; + + err = pci_enable_msi(adapter->pdev); + if (err) + e_dev_warn("Failed to allocate MSI interrupt, falling back to " + "legacy. Error: %d\n", + err); + else + adapter->flags |= NGBE_FLAG_MSI_ENABLED; +} + +/** + * ngbe_init_interrupt_scheme - Determine proper interrupt scheme + * @adapter: board private structure to initialize + * + * We determine which interrupt scheme to use based on... + * - Kernel support (MSI, MSI-X) + * - which can be user-defined (via MODULE_PARAM) + * - Hardware queue count (num_*_queues) + * - defined by miscellaneous hardware support/features (RSS, etc.) + **/ +int ngbe_init_interrupt_scheme(struct ngbe_adapter *adapter) +{ + int err; + + /* if assigned vfs >= 7, the PF queue irq remain seq 0 and misc irq move from + * seq 1 to seq 8. it needs extra processions. + */ + if (adapter->num_vfs >= NGBE_MAX_VF_FUNCTIONS - 1) { + adapter->flags2 |= NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP; + } + + /* Number of supported queues */ + ngbe_set_num_queues(adapter); + + /* Set interrupt mode */ + ngbe_set_interrupt_capability(adapter); + + /* Allocate memory for queues */ + err = ngbe_alloc_q_vectors(adapter); + if (err) { + e_err(probe, "Unable to allocate memory for queue vectors\n"); + ngbe_reset_interrupt_capability(adapter); + return err; + } + + ngbe_cache_ring_register(adapter); + + set_bit(__NGBE_DOWN, &adapter->state); + + return 0; +} + +/** + * ngbe_clear_interrupt_scheme - Clear the current interrupt scheme settings + * @adapter: board private structure to clear interrupt scheme on + * + * We go through and clear interrupt specific resources and reset the structure + * to pre-load conditions + **/ +void ngbe_clear_interrupt_scheme(struct ngbe_adapter *adapter) +{ + ngbe_free_q_vectors(adapter); + ngbe_reset_interrupt_capability(adapter); + + /* remove this flags */ + if (adapter->flags2 & NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP) { + adapter->flags2 &= ~NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP; + } +} + +void ngbe_tx_ctxtdesc(struct ngbe_ring *tx_ring, u32 vlan_macip_lens, + u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) +{ + struct ngbe_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; + + context_desc = NGBE_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= NGBE_TXD_DTYP_CTXT; + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); +} diff --git a/drivers/net/ethernet/netswift/ngbe/ngbe_main.c b/drivers/net/ethernet/netswift/ngbe/ngbe_main.c new file mode 100644 index 0000000000000000000000000000000000000000..e0565ee67543b60043ea8fbf205440298e028626 --- /dev/null +++ b/drivers/net/ethernet/netswift/ngbe/ngbe_main.c @@ -0,0 +1,7160 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ngbe.h" +#include "ngbe_sriov.h" +#include "ngbe_hw.h" +#include "ngbe_phy.h" +#include "ngbe_pcierr.h" + +char ngbe_driver_name[32] = NGBE_NAME; +static const char ngbe_driver_string[] = + "WangXun Gigabit PCI Express Network Driver"; + +#define DRV_VERSION __stringify(1.1.0oe) + +const char ngbe_driver_version[32] = DRV_VERSION; +static const char ngbe_copyright[] = + "Copyright (c) 2018 -2019 Beijing WangXun Technology Co., Ltd"; +static const char ngbe_overheat_msg[] = + "Network adapter has been stopped because it has over heated. " + "If the problem persists, restart the computer, or " + "power off the system and replace the adapter"; +static const char ngbe_underheat_msg[] = + "Network adapter has been started again since the temperature " + "has been back to normal state"; + +/* ngbe_pci_tbl - PCI Device ID Table + * + * Wildcard entries (PCI_ANY_ID) should come last + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static const struct pci_device_id ngbe_pci_tbl[] = { + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_TEST), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_WX1860A2), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_WX1860A2S), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_WX1860A4), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_WX1860A4S), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_WX1860AL2), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_WX1860AL2S), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_WX1860AL4), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_WX1860AL4S), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_WX1860AL_W), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_WX1860NCSI), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_WX1860AL1), 0}, + { PCI_VDEVICE(TRUSTNETIC, NGBE_DEV_ID_EM_WX1860A1), 0}, + { PCI_VDEVICE(TRUSTNETIC, 0x10c), 0}, + /* required last entry */ + { .device = 0 } +}; +MODULE_DEVICE_TABLE(pci, ngbe_pci_tbl); + +MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, "); +MODULE_DESCRIPTION("WangXun(R) Gigabit PCI Express Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +#define DEFAULT_DEBUG_LEVEL_SHIFT 3 + +static struct workqueue_struct *ngbe_wq; + +static bool ngbe_check_cfg_remove(struct ngbe_hw *hw, struct pci_dev *pdev); +static void ngbe_clean_rx_ring(struct ngbe_ring *rx_ring); +static void ngbe_clean_tx_ring(struct ngbe_ring *tx_ring); + +extern ngbe_dptype ngbe_ptype_lookup[256]; + +static inline ngbe_dptype ngbe_decode_ptype(const u8 ptype) +{ + return ngbe_ptype_lookup[ptype]; +} + +static inline ngbe_dptype +decode_rx_desc_ptype(const union ngbe_rx_desc *rx_desc) +{ + return ngbe_decode_ptype(NGBE_RXD_PKTTYPE(rx_desc)); +} + +static void ngbe_check_minimum_link(struct ngbe_adapter *adapter, + int expected_gts) +{ + struct ngbe_hw *hw = &adapter->hw; + struct pci_dev *pdev; + + /* Some devices are not connected over PCIe and thus do not negotiate + * speed. These devices do not have valid bus info, and thus any report + * we generate may not be correct. + */ + if (hw->bus.type == ngbe_bus_type_internal) + return; + + pdev = adapter->pdev; + + pcie_print_link_status(pdev); +} + +/** + * ngbe_enumerate_functions - Get the number of ports this device has + * @adapter: adapter structure + * + * This function enumerates the phsyical functions co-located on a single slot, + * in order to determine how many ports a device has. This is most useful in + * determining the required GT/s of PCIe bandwidth necessary for optimal + * performance. + **/ +static inline int ngbe_enumerate_functions(struct ngbe_adapter *adapter) +{ + struct pci_dev *entry, *pdev = adapter->pdev; + int physfns = 0; + + list_for_each_entry(entry, &pdev->bus->devices, bus_list) { +#ifdef CONFIG_PCI_IOV + /* don't count virtual functions */ + if (entry->is_virtfn) + continue; +#endif + + /* When the devices on the bus don't all match our device ID, + * we can't reliably determine the correct number of + * functions. This can occur if a function has been direct + * attached to a virtual machine using VT-d, for example. In + * this case, simply return -1 to indicate this. + */ + if ((entry->vendor != pdev->vendor) || + (entry->device != pdev->device)) + return -1; + + physfns++; + } + + return physfns; +} + +void ngbe_service_event_schedule(struct ngbe_adapter *adapter) +{ + if (!test_bit(__NGBE_DOWN, &adapter->state) && + !test_bit(__NGBE_REMOVING, &adapter->state) && + !test_and_set_bit(__NGBE_SERVICE_SCHED, &adapter->state)) + queue_work(ngbe_wq, &adapter->service_task); +} + +static void ngbe_service_event_complete(struct ngbe_adapter *adapter) +{ + BUG_ON(!test_bit(__NGBE_SERVICE_SCHED, &adapter->state)); + + /* flush memory to make sure state is correct before next watchdog */ + smp_mb__before_atomic(); + clear_bit(__NGBE_SERVICE_SCHED, &adapter->state); +} + +static void ngbe_remove_adapter(struct ngbe_hw *hw) +{ + struct ngbe_adapter *adapter = hw->back; + + if (!hw->hw_addr) + return; + hw->hw_addr = NULL; + e_dev_err("Adapter removed\n"); + if (test_bit(__NGBE_SERVICE_INITED, &adapter->state)) + ngbe_service_event_schedule(adapter); +} + +static void ngbe_check_remove(struct ngbe_hw *hw, u32 reg) +{ + u32 value; + + /* The following check not only optimizes a bit by not + * performing a read on the status register when the + * register just read was a status register read that + * returned NGBE_FAILED_READ_REG. It also blocks any + * potential recursion. + */ + if (reg == NGBE_CFG_PORT_ST) { + ngbe_remove_adapter(hw); + return; + } + value = rd32(hw, NGBE_CFG_PORT_ST); + if (value == NGBE_FAILED_READ_REG) + ngbe_remove_adapter(hw); +} + +static u32 ngbe_validate_register_read(struct ngbe_hw *hw, u32 reg, bool quiet) +{ + int i; + u32 value; + u8 __iomem *reg_addr; + struct ngbe_adapter *adapter = hw->back; + + reg_addr = READ_ONCE(hw->hw_addr); + if (NGBE_REMOVED(reg_addr)) + return NGBE_FAILED_READ_REG; + for (i = 0; i < NGBE_DEAD_READ_RETRIES; ++i) { + value = ngbe_rd32(reg_addr + reg); + if (value != NGBE_DEAD_READ_REG) + break; + } + if (quiet) + return value; + if (value == NGBE_DEAD_READ_REG) + e_err(drv, "%s: register %x read unchanged\n", __func__, reg); + else + e_warn(hw, "%s: register %x read recovered after %d retries\n", + __func__, reg, i + 1); + return value; +} + +/** + * ngbe_read_reg - Read from device register + * @hw: hw specific details + * @reg: offset of register to read + * + * Returns : value read or NGBE_FAILED_READ_REG if removed + * + * This function is used to read device registers. It checks for device + * removal by confirming any read that returns all ones by checking the + * status register value for all ones. This function avoids reading from + * the hardware if a removal was previously detected in which case it + * returns NGBE_FAILED_READ_REG (all ones). + */ +u32 ngbe_read_reg(struct ngbe_hw *hw, u32 reg, bool quiet) +{ + u32 value; + u8 __iomem *reg_addr; + + reg_addr = READ_ONCE(hw->hw_addr); + if (NGBE_REMOVED(reg_addr)) + return NGBE_FAILED_READ_REG; + value = ngbe_rd32(reg_addr + reg); + if (unlikely(value == NGBE_FAILED_READ_REG)) + ngbe_check_remove(hw, reg); + if (unlikely(value == NGBE_DEAD_READ_REG)) + value = ngbe_validate_register_read(hw, reg, quiet); + return value; +} + +static void ngbe_release_hw_control(struct ngbe_adapter *adapter) +{ + /* Let firmware take over control of h/w */ + wr32m(&adapter->hw, NGBE_CFG_PORT_CTL, + NGBE_CFG_PORT_CTL_DRV_LOAD, 0); +} + +static void ngbe_get_hw_control(struct ngbe_adapter *adapter) +{ + /* Let firmware know the driver has taken over */ + wr32m(&adapter->hw, NGBE_CFG_PORT_CTL, + NGBE_CFG_PORT_CTL_DRV_LOAD, NGBE_CFG_PORT_CTL_DRV_LOAD); +} + +/** + * ngbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors + * @adapter: pointer to adapter struct + * @direction: 0 for Rx, 1 for Tx, -1 for other causes + * @queue: queue to map the corresponding interrupt to + * @msix_vector: the vector to map to the corresponding queue + * + **/ +static void ngbe_set_ivar(struct ngbe_adapter *adapter, s8 direction, + u16 queue, u16 msix_vector) +{ + u32 ivar, index; + struct ngbe_hw *hw = &adapter->hw; + + if (direction == -1) { + /* other causes */ + msix_vector |= NGBE_PX_IVAR_ALLOC_VAL; + index = 0; + ivar = rd32(&adapter->hw, NGBE_PX_MISC_IVAR); + ivar &= ~(0xFF << index); + ivar |= (msix_vector << index); + /* if assigned VFs >= 7, the pf misc irq shall be remapped to 0x88. */ + if (adapter->flags2 & NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP) + ivar = msix_vector; + wr32(&adapter->hw, NGBE_PX_MISC_IVAR, ivar); + } else { + /* tx or rx causes */ + msix_vector |= NGBE_PX_IVAR_ALLOC_VAL; + index = ((16 * (queue & 1)) + (8 * direction)); + ivar = rd32(hw, NGBE_PX_IVAR(queue >> 1)); + ivar &= ~(0xFF << index); + ivar |= (msix_vector << index); + wr32(hw, NGBE_PX_IVAR(queue >> 1), ivar); + } +} + +void ngbe_unmap_and_free_tx_resource(struct ngbe_ring *ring, + struct ngbe_tx_buffer *tx_buffer) +{ + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + /* tx_buffer must be completely set up in the transmit path */ +} + +static void ngbe_update_xoff_rx_lfc(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + struct ngbe_hw_stats *hwstats = &adapter->stats; + int i; + u32 data; + + if ((hw->fc.current_mode != ngbe_fc_full) && + (hw->fc.current_mode != ngbe_fc_rx_pause)) + return; + + data = rd32(hw, NGBE_MAC_LXOFFRXC); + + hwstats->lxoffrxc += data; + + /* refill credits (no tx hang) if we received xoff */ + if (!data) + return; + + for (i = 0; i < adapter->num_tx_queues; i++) + clear_bit(__NGBE_HANG_CHECK_ARMED, + &adapter->tx_ring[i]->state); +} + +static u64 ngbe_get_tx_completed(struct ngbe_ring *ring) +{ + return ring->stats.packets; +} + +static u64 ngbe_get_tx_pending(struct ngbe_ring *ring) +{ + struct ngbe_adapter *adapter; + struct ngbe_hw *hw; + u32 head, tail; + + if (ring->accel) + adapter = ring->accel->adapter; + else + adapter = ring->q_vector->adapter; + + hw = &adapter->hw; + head = rd32(hw, NGBE_PX_TR_RP(ring->reg_idx)); + tail = rd32(hw, NGBE_PX_TR_WP(ring->reg_idx)); + + return ((head <= tail) ? tail : tail + ring->count) - head; +} + +static inline bool ngbe_check_tx_hang(struct ngbe_ring *tx_ring) +{ + u64 tx_done = ngbe_get_tx_completed(tx_ring); + u64 tx_done_old = tx_ring->tx_stats.tx_done_old; + u64 tx_pending = ngbe_get_tx_pending(tx_ring); + + clear_check_for_tx_hang(tx_ring); + + /* + * Check for a hung queue, but be thorough. This verifies + * that a transmit has been completed since the previous + * check AND there is at least one packet pending. The + * ARMED bit is set to indicate a potential hang. The + * bit is cleared if a pause frame is received to remove + * false hang detection due to PFC or 802.3x frames. By + * requiring this to fail twice we avoid races with + * pfc clearing the ARMED bit and conditions where we + * run the check_tx_hang logic with a transmit completion + * pending but without time to complete it yet. + */ + if (tx_done_old == tx_done && tx_pending) { + + /* make sure it is true for two checks in a row */ + return test_and_set_bit(__NGBE_HANG_CHECK_ARMED, + &tx_ring->state); + } + /* update completed stats and continue */ + tx_ring->tx_stats.tx_done_old = tx_done; + /* reset the countdown */ + clear_bit(__NGBE_HANG_CHECK_ARMED, &tx_ring->state); + + return false; +} + +/** + * ngbe_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void ngbe_tx_timeout(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + bool real_tx_hang = false; + int i; + u16 value = 0; + u32 value2 = 0; + u32 head, tail; + +#define TX_TIMEO_LIMIT 16000 + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ngbe_ring *tx_ring = adapter->tx_ring[i]; + if (check_for_tx_hang(tx_ring) && ngbe_check_tx_hang(tx_ring)) { + real_tx_hang = true; + e_info(drv, "&&ngbe_tx_timeout:i=%d&&", i); + } + } + + pci_read_config_word(adapter->pdev, PCI_VENDOR_ID, &value); + ERROR_REPORT1(NGBE_ERROR_POLLING, "pci vendor id is 0x%x\n", value); + + pci_read_config_word(adapter->pdev, PCI_COMMAND, &value); + ERROR_REPORT1(NGBE_ERROR_POLLING, "pci command reg is 0x%x.\n", value); + + value2 = rd32(&adapter->hw, 0x10000); + ERROR_REPORT1(NGBE_ERROR_POLLING, "reg 0x10000 value is 0x%08x\n", value2); + value2 = rd32(&adapter->hw, 0x180d0); + ERROR_REPORT1(NGBE_ERROR_POLLING, "reg 0x180d0 value is 0x%08x\n", value2); + value2 = rd32(&adapter->hw, 0x180d4); + ERROR_REPORT1(NGBE_ERROR_POLLING, "reg 0x180d4 value is 0x%08x\n", value2); + value2 = rd32(&adapter->hw, 0x180d8); + ERROR_REPORT1(NGBE_ERROR_POLLING, "reg 0x180d8 value is 0x%08x\n", value2); + value2 = rd32(&adapter->hw, 0x180dc); + ERROR_REPORT1(NGBE_ERROR_POLLING, "reg 0x180dc value is 0x%08x\n", value2); + + for (i = 0; i < adapter->num_tx_queues; i++) { + head = rd32(&adapter->hw, NGBE_PX_TR_RP(adapter->tx_ring[i]->reg_idx)); + tail = rd32(&adapter->hw, NGBE_PX_TR_WP(adapter->tx_ring[i]->reg_idx)); + + ERROR_REPORT1(NGBE_ERROR_POLLING, + "tx ring %d next_to_use is %d, next_to_clean is %d\n", + i, adapter->tx_ring[i]->next_to_use, adapter->tx_ring[i]->next_to_clean); + ERROR_REPORT1(NGBE_ERROR_POLLING, + "tx ring %d hw rp is 0x%x, wp is 0x%x\n", i, head, tail); + } + + value2 = rd32(&adapter->hw, NGBE_PX_IMS); + ERROR_REPORT1(NGBE_ERROR_POLLING, + "PX_IMS value is 0x%08x\n", value2); + + if (value2) { + ERROR_REPORT1(NGBE_ERROR_POLLING, "clear interrupt mask.\n"); + wr32(&adapter->hw, NGBE_PX_ICS, value2); + wr32(&adapter->hw, NGBE_PX_IMC, value2); + } + + if (adapter->hw.bus.lan_id == 0) { + ERROR_REPORT1(NGBE_ERROR_POLLING, "tx timeout. do pcie recovery.\n"); + adapter->flags2 |= NGBE_FLAG2_PCIE_NEED_RECOVER; + ngbe_service_event_schedule(adapter); + } else + wr32(&adapter->hw, NGBE_MIS_PF_SM, 1); +} + +/** + * ngbe_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: structure containing interrupt and ring information + * @tx_ring: tx ring to clean + **/ +static bool ngbe_clean_tx_irq(struct ngbe_q_vector *q_vector, + struct ngbe_ring *tx_ring) +{ + struct ngbe_adapter *adapter = q_vector->adapter; + struct ngbe_tx_buffer *tx_buffer; + union ngbe_tx_desc *tx_desc; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = q_vector->tx.work_limit; + unsigned int i = tx_ring->next_to_clean; + + if (test_bit(__NGBE_DOWN, &adapter->state)) + return true; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = NGBE_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + union ngbe_tx_desc *eop_desc = tx_buffer->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + read_barrier_depends(); + + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(NGBE_TXD_STAT_DD))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + /* free the skb */ + dev_consume_skb_any(tx_buffer->skb); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + + /* clear tx_buffer data */ + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = NGBE_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + } + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = NGBE_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + + if (check_for_tx_hang(tx_ring)) { + if (!ngbe_check_tx_hang(tx_ring)) { + adapter->hang_cnt = 0; + } else + adapter->hang_cnt++; + + if (adapter->hang_cnt >= 5) { + /* schedule immediate reset if we believe we hung */ + struct ngbe_hw *hw = &adapter->hw; + u16 value = 0; + + e_err(drv, "Detected Tx Unit Hang\n" + " Tx Queue <%d>\n" + " TDH, TDT <%x>, <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "tx_buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " jiffies <%lx>\n", + tx_ring->queue_index, + rd32(hw, NGBE_PX_TR_RP(tx_ring->reg_idx)), + rd32(hw, NGBE_PX_TR_WP(tx_ring->reg_idx)), + tx_ring->next_to_use, i, + tx_ring->tx_buffer_info[i].time_stamp, jiffies); + + pci_read_config_word(adapter->pdev, PCI_VENDOR_ID, &value); + if (value == NGBE_FAILED_READ_CFG_WORD) { + e_info(hw, "pcie link has been lost.\n"); + } + + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + e_info(probe, + "tx hang %d detected on queue %d, resetting adapter\n", + adapter->tx_timeout_count + 1, tx_ring->queue_index); + + /* schedule immediate reset if we believe we hung */ + e_info(hw, "real tx hang. do pcie recovery.\n"); + adapter->flags2 |= NGBE_FLAG2_PCIE_NEED_RECOVER; + ngbe_service_event_schedule(adapter); + + /* the adapter is about to reset, no point in enabling stuff */ + return true; + } + } + + netdev_tx_completed_queue(txring_txq(tx_ring), + total_packets, total_bytes); + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && + (ngbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) + && !test_bit(__NGBE_DOWN, &adapter->state)) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + } + } + + return !!budget; +} + +#define NGBE_RSS_L4_TYPES_MASK \ + ((1ul << NGBE_RXD_RSSTYPE_IPV4_TCP) | \ + (1ul << NGBE_RXD_RSSTYPE_IPV4_UDP) | \ + (1ul << NGBE_RXD_RSSTYPE_IPV4_SCTP) | \ + (1ul << NGBE_RXD_RSSTYPE_IPV6_TCP) | \ + (1ul << NGBE_RXD_RSSTYPE_IPV6_UDP) | \ + (1ul << NGBE_RXD_RSSTYPE_IPV6_SCTP)) + +static inline void ngbe_rx_hash(struct ngbe_ring *ring, + union ngbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u16 rss_type; + + if (!(ring->netdev->features & NETIF_F_RXHASH)) + return; + + rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & + NGBE_RXD_RSSTYPE_MASK; + + if (!rss_type) + return; + + skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + (NGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? + PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); +} + +/** + * ngbe_rx_checksum - indicate in skb if hw indicated a good cksum + * @ring: structure containing ring specific data + * @rx_desc: current Rx descriptor being processed + * @skb: skb currently being received and modified + **/ +static inline void ngbe_rx_checksum(struct ngbe_ring *ring, + union ngbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + ngbe_dptype dptype = decode_rx_desc_ptype(rx_desc); + + skb->ip_summed = CHECKSUM_NONE; + + skb_checksum_none_assert(skb); + + /* Rx csum disabled */ + if (!(ring->netdev->features & NETIF_F_RXCSUM)) + return; + + /* if IPv4 header checksum error */ + if ((ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_IPCS) && + ngbe_test_staterr(rx_desc, NGBE_RXD_ERR_IPE)) || + (ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_OUTERIPCS) && + ngbe_test_staterr(rx_desc, NGBE_RXD_ERR_OUTERIPER))) { + ring->rx_stats.csum_err++; + return; + } + + /* L4 checksum offload flag must set for the below code to work */ + if (!ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_L4CS)) + return; + + /*likely incorrect csum if IPv6 Dest Header found */ + if (dptype.prot != NGBE_DEC_PTYPE_PROT_SCTP && NGBE_RXD_IPV6EX(rx_desc)) + return; + + /* if L4 checksum error */ + if (ngbe_test_staterr(rx_desc, NGBE_RXD_ERR_TCPE)) { + ring->rx_stats.csum_err++; + return; + } + /* If there is an outer header present that might contain a checksum + * we need to bump the checksum level by 1 to reflect the fact that + * we are indicating we validated the inner checksum. + */ + if (dptype.etype >= NGBE_DEC_PTYPE_ETYPE_IG) { + skb->csum_level = 1; + } + + /* It must be a TCP or UDP or SCTP packet with a valid checksum */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + ring->rx_stats.csum_good_cnt++; +} + +static bool ngbe_alloc_mapped_skb(struct ngbe_ring *rx_ring, + struct ngbe_rx_buffer *bi) +{ + struct sk_buff *skb = bi->skb; + dma_addr_t dma = bi->dma; + + if (unlikely(dma)) + return true; + + if (likely(!skb)) { + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_buf_len); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + return false; + } + + bi->skb = skb; + } + + dma = dma_map_single(rx_ring->dev, skb->data, + rx_ring->rx_buf_len, DMA_FROM_DEVICE); + + /* + * if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + dev_kfree_skb_any(skb); + bi->skb = NULL; + + rx_ring->rx_stats.alloc_rx_buff_failed++; + return false; + } + + bi->dma = dma; + return true; +} + +static bool ngbe_alloc_mapped_page(struct ngbe_ring *rx_ring, + struct ngbe_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + /* alloc new page for storage */ + page = dev_alloc_pages(ngbe_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page(rx_ring->dev, page, 0, + ngbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); + + /* + * if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_pages(page, ngbe_rx_pg_order(rx_ring)); + + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + bi->page_dma = dma; + bi->page = page; + bi->page_offset = 0; + + return true; +} + +/** + * ngbe_alloc_rx_buffers - Replace used receive buffers + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +void ngbe_alloc_rx_buffers(struct ngbe_ring *rx_ring, u16 cleaned_count) +{ + union ngbe_rx_desc *rx_desc; + struct ngbe_rx_buffer *bi; + u16 i = rx_ring->next_to_use; + + /* nothing to do */ + if (!cleaned_count) + return; + + rx_desc = NGBE_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; + + do { + if (ring_is_hs_enabled(rx_ring)) { + if (!ngbe_alloc_mapped_skb(rx_ring, bi)) + break; + rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); + } + + if (!ngbe_alloc_mapped_page(rx_ring, bi)) + break; + rx_desc->read.pkt_addr = + cpu_to_le64(bi->page_dma + bi->page_offset); + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = NGBE_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the status bits for the next_to_use descriptor */ + rx_desc->wb.upper.status_error = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + rx_ring->next_to_use = i; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, rx_ring->tail); + } +} + +static inline u16 ngbe_get_hlen(struct ngbe_ring *rx_ring, + union ngbe_rx_desc *rx_desc) +{ + __le16 hdr_info = rx_desc->wb.lower.lo_dword.hs_rss.hdr_info; + u16 hlen = le16_to_cpu(hdr_info) & NGBE_RXD_HDRBUFLEN_MASK; + + UNREFERENCED_PARAMETER(rx_ring); + + if (hlen > (NGBE_RX_HDR_SIZE << NGBE_RXD_HDRBUFLEN_SHIFT)) + hlen = 0; + else + hlen >>= NGBE_RXD_HDRBUFLEN_SHIFT; + + return hlen; +} + +static void ngbe_rx_vlan(struct ngbe_ring *ring, + union ngbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u8 idx = 0; + u16 ethertype; + + if ((ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && + ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_VP)) { + idx = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & + NGBE_RXD_TPID_MASK) >> NGBE_RXD_TPID_SHIFT; + ethertype = ring->q_vector->adapter->hw.tpid[idx]; + __vlan_hwaccel_put_tag(skb, + htons(ethertype), + le16_to_cpu(rx_desc->wb.upper.vlan)); + } +} + +/** + * ngbe_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, VLAN, timestamp, protocol, and + * other fields within the skb. + **/ +static void ngbe_process_skb_fields(struct ngbe_ring *rx_ring, + union ngbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u32 flags = rx_ring->q_vector->adapter->flags; + + ngbe_rx_hash(rx_ring, rx_desc, skb); + ngbe_rx_checksum(rx_ring, rx_desc, skb); + + if (unlikely(flags & NGBE_FLAG_RX_HWTSTAMP_ENABLED) && + unlikely(ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_TS))) { + ngbe_ptp_rx_hwtstamp(rx_ring->q_vector->adapter, skb); + rx_ring->last_rx_timestamp = jiffies; + } + + ngbe_rx_vlan(rx_ring, rx_desc, skb); + skb_record_rx_queue(skb, rx_ring->queue_index); + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +} + +static void ngbe_rx_skb(struct ngbe_q_vector *q_vector, + struct ngbe_ring *rx_ring, + union ngbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + napi_gro_receive(&q_vector->napi, skb); +} + +/** + * ngbe_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: Current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool ngbe_is_non_eop(struct ngbe_ring *rx_ring, + union ngbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct ngbe_rx_buffer *rx_buffer = + &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(NGBE_RX_DESC(rx_ring, ntc)); + + /* if we are the last buffer then there is nothing else to do */ + if (likely(ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_EOP))) + return false; + + /* place skb in next buffer to be received */ + if (ring_is_hs_enabled(rx_ring)) { + rx_buffer->skb = rx_ring->rx_buffer_info[ntc].skb; + rx_buffer->dma = rx_ring->rx_buffer_info[ntc].dma; + rx_ring->rx_buffer_info[ntc].dma = 0; + } + rx_ring->rx_buffer_info[ntc].skb = skb; + + rx_ring->rx_stats.non_eop_descs++; + + return true; +} + +/** + * ngbe_pull_tail - ngbe specific version of skb_pull_tail + * @skb: pointer to current skb being adjusted + * + * This function is an ngbe specific version of __pskb_pull_tail. The + * main difference between this version and the original function is that + * this function can make several assumptions about the state of things + * that allow for significant optimizations versus the standard function. + * As a result we can do things like drop a frag and maintain an accurate + * truesize for the skb. + */ +static void ngbe_pull_tail(struct sk_buff *skb) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + unsigned int pull_len; + + /* + * it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + /* + * we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(va, NGBE_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + frag->page_offset += pull_len; + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +/** + * ngbe_dma_sync_frag - perform DMA sync for first frag of SKB + * @rx_ring: rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being updated + * + * This function provides a basic DMA sync up for the first fragment of an + * skb. The reason for doing this is that the first fragment cannot be + * unmapped until we have reached the end of packet descriptor for a buffer + * chain. + */ +static void ngbe_dma_sync_frag(struct ngbe_ring *rx_ring, + struct sk_buff *skb) +{ + if (ring_uses_build_skb(rx_ring)) { + unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK; + + dma_sync_single_range_for_cpu(rx_ring->dev, + NGBE_CB(skb)->dma, + offset, + skb_headlen(skb), + DMA_FROM_DEVICE); + } else { + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + + dma_sync_single_range_for_cpu(rx_ring->dev, + NGBE_CB(skb)->dma, + frag->page_offset, + skb_frag_size(frag), + DMA_FROM_DEVICE); + } + + /* if the page was released unmap it */ + if (unlikely(NGBE_CB(skb)->page_released)) { + dma_unmap_page_attrs(rx_ring->dev, NGBE_CB(skb)->dma, + ngbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + NGBE_RX_DMA_ATTR); + } +} + +/** + * ngbe_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Check for corrupted packet headers caused by senders on the local L2 + * embedded NIC switch not setting up their Tx Descriptors right. These + * should be very rare. + * + * Also address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + **/ +static bool ngbe_cleanup_headers(struct ngbe_ring *rx_ring, + union ngbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *netdev = rx_ring->netdev; + + /* verify that the packet does not have any known errors */ + if (unlikely(ngbe_test_staterr(rx_desc, + NGBE_RXD_ERR_FRAME_ERR_MASK) && + !(netdev->features & NETIF_F_RXALL))) { + dev_kfree_skb_any(skb); + return true; + } + + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb) && !skb_headlen(skb)) + ngbe_pull_tail(skb); + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +/** + * ngbe_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + **/ +static void ngbe_reuse_rx_page(struct ngbe_ring *rx_ring, + struct ngbe_rx_buffer *old_buff) +{ + struct ngbe_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + new_buff->page_dma = old_buff->page_dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, new_buff->page_dma, + new_buff->page_offset, + ngbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); +} + +static inline bool ngbe_page_is_reserved(struct page *page) +{ + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); +} + +/** + * ngbe_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @rx_desc: descriptor containing length of buffer written by hardware + * @skb: sk_buff to place the data into + * + * This function will add the data contained in rx_buffer->page to the skb. + * This is done either through a direct copy if the data in the buffer is + * less than the skb header size, otherwise it will just attach the page as + * a frag to the skb. + * + * The function will then update the page offset if necessary and return + * true if the buffer can be reused by the adapter. + **/ +static bool ngbe_add_rx_frag(struct ngbe_ring *rx_ring, + struct ngbe_rx_buffer *rx_buffer, + union ngbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct page *page = rx_buffer->page; + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); +#if (PAGE_SIZE < 8192) + unsigned int truesize = ngbe_rx_bufsz(rx_ring); +#else + unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); + unsigned int last_offset = ngbe_rx_pg_size(rx_ring) - + ngbe_rx_bufsz(rx_ring); +#endif + + if ((size <= NGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb) && + !ring_is_hs_enabled(rx_ring)) { + unsigned char *va = page_address(page) + rx_buffer->page_offset; + + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + + /* page is not reserved, we can reuse buffer as-is */ + if (likely(!ngbe_page_is_reserved(page))) + return true; + + /* this page cannot be reused so discard it */ + __free_pages(page, ngbe_rx_pg_order(rx_ring)); + return false; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rx_buffer->page_offset, size, truesize); + + /* avoid re-using remote pages */ + if (unlikely(ngbe_page_is_reserved(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; + + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= truesize; +#else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; + + if (rx_buffer->page_offset > last_offset) + return false; +#endif + + /* Even if we own the page, we are not allowed to use atomic_set() + * This would break get_page_unless_zero() users. + */ + page_ref_inc(page); + + return true; +} + +static struct sk_buff *ngbe_fetch_rx_buffer(struct ngbe_ring *rx_ring, + union ngbe_rx_desc *rx_desc) +{ + struct ngbe_rx_buffer *rx_buffer; + struct sk_buff *skb; + struct page *page; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + page = rx_buffer->page; + prefetchw(page); + + skb = rx_buffer->skb; + + if (likely(!skb)) { + void *page_addr = page_address(page) + + rx_buffer->page_offset; + + /* prefetch first cache line of first page */ + prefetch(page_addr); +#if L1_CACHE_BYTES < 128 + prefetch(page_addr + L1_CACHE_BYTES); +#endif + + /* allocate a skb to store the frags */ + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + NGBE_RX_HDR_SIZE); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + return NULL; + } + + /* + * we will be copying header into skb->data in + * pskb_may_pull so it is in our interest to prefetch + * it now to avoid a possible cache miss + */ + prefetchw(skb->data); + + /* + * Delay unmapping of the first packet. It carries the + * header information, HW may still access the header + * after the writeback. Only unmap it when EOP is + * reached + */ + if (likely(ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_EOP))) + goto dma_sync; + + NGBE_CB(skb)->dma = rx_buffer->page_dma; + } else { + if (ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_EOP)) + ngbe_dma_sync_frag(rx_ring, skb); + +dma_sync: + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->page_dma, + rx_buffer->page_offset, + ngbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + rx_buffer->skb = NULL; + } + + /* pull page into skb */ + if (ngbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + /* hand second half of page back to the ring */ + ngbe_reuse_rx_page(rx_ring, rx_buffer); + } else if (NGBE_CB(skb)->dma == rx_buffer->page_dma) { + /* the page has been released from the ring */ + NGBE_CB(skb)->page_released = true; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rx_ring->dev, rx_buffer->page_dma, + ngbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE); + } + + /* clear contents of buffer_info */ + rx_buffer->page = NULL; + + return skb; +} + +static struct sk_buff *ngbe_fetch_rx_buffer_hs(struct ngbe_ring *rx_ring, + union ngbe_rx_desc *rx_desc) +{ + struct ngbe_rx_buffer *rx_buffer; + struct sk_buff *skb; + struct page *page; + int hdr_len = 0; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + page = rx_buffer->page; + prefetchw(page); + + skb = rx_buffer->skb; + rx_buffer->skb = NULL; + prefetchw(skb->data); + + if (!skb_is_nonlinear(skb)) { + hdr_len = ngbe_get_hlen(rx_ring, rx_desc); + if (hdr_len > 0) { + __skb_put(skb, hdr_len); + NGBE_CB(skb)->dma_released = true; + NGBE_CB(skb)->dma = rx_buffer->dma; + rx_buffer->dma = 0; + } else { + dma_unmap_single(rx_ring->dev, + rx_buffer->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + rx_buffer->dma = 0; + if (likely(ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_EOP))) + goto dma_sync; + NGBE_CB(skb)->dma = rx_buffer->page_dma; + goto add_frag; + } + } + + if (ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_EOP)) { + if (skb_headlen(skb)) { + if (NGBE_CB(skb)->dma_released == true) { + dma_unmap_single(rx_ring->dev, + NGBE_CB(skb)->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + NGBE_CB(skb)->dma = 0; + NGBE_CB(skb)->dma_released = false; + } + } else + ngbe_dma_sync_frag(rx_ring, skb); + } + +dma_sync: + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->page_dma, + rx_buffer->page_offset, + ngbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); +add_frag: + /* pull page into skb */ + if (ngbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + /* hand second half of page back to the ring */ + ngbe_reuse_rx_page(rx_ring, rx_buffer); + } else if (NGBE_CB(skb)->dma == rx_buffer->page_dma) { + /* the page has been released from the ring */ + NGBE_CB(skb)->page_released = true; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rx_ring->dev, rx_buffer->page_dma, + ngbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE); + } + + /* clear contents of buffer_info */ + rx_buffer->page = NULL; + + return skb; +} + +/** + * ngbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf + * @q_vector: structure containing interrupt and ring information + * @rx_ring: rx descriptor ring to transact packets on + * @budget: Total limit on number of packets to process + * + * This function provides a "bounce buffer" approach to Rx interrupt + * processing. The advantage to this is that on systems that have + * expensive overhead for IOMMU access this provides a means of avoiding + * it by maintaining the mapping of the page to the syste. + * + * Returns amount of work completed. + **/ +static int ngbe_clean_rx_irq(struct ngbe_q_vector *q_vector, + struct ngbe_ring *rx_ring, + int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 cleaned_count = ngbe_desc_unused(rx_ring); + + do { + union ngbe_rx_desc *rx_desc; + struct sk_buff *skb; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= NGBE_RX_BUFFER_WRITE) { + ngbe_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + rx_desc = NGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); + + if (!ngbe_test_staterr(rx_desc, NGBE_RXD_STAT_DD)) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + /* retrieve a buffer from the ring */ + if (ring_is_hs_enabled(rx_ring)) + skb = ngbe_fetch_rx_buffer_hs(rx_ring, rx_desc); + else + skb = ngbe_fetch_rx_buffer(rx_ring, rx_desc); + + /* exit if we failed to retrieve a buffer */ + if (!skb) + break; + + cleaned_count++; + + /* place incomplete frames back on ring for completion */ + if (ngbe_is_non_eop(rx_ring, rx_desc, skb)) + continue; + + /* verify the packet layout is correct */ + if (ngbe_cleanup_headers(rx_ring, rx_desc, skb)) + continue; + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + ngbe_process_skb_fields(rx_ring, rx_desc, skb); + + ngbe_rx_skb(q_vector, rx_ring, rx_desc, skb); + + /* update budget accounting */ + total_rx_packets++; + } while (likely(total_rx_packets < budget)); + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + + return total_rx_packets; +} + +/** + * ngbe_configure_msix - Configure MSI-X hardware + * @adapter: board private structure + * + * ngbe_configure_msix sets up the hardware to properly generate MSI-X + * interrupts. + **/ +static void ngbe_configure_msix(struct ngbe_adapter *adapter) +{ + u16 v_idx; + u32 i; + u32 eitrsel = 0; + + /* Populate MSIX to EITR Select */ + if (!(adapter->flags & NGBE_FLAG_VMDQ_ENABLED)) + wr32(&adapter->hw, NGBE_PX_ITRSEL, eitrsel); + else { + for (i = 0; i < adapter->num_vfs; i++) { + eitrsel |= 1 << i; + } + wr32(&adapter->hw, NGBE_PX_ITRSEL, eitrsel); + } + + /* + * Populate the IVAR table and set the ITR values to the + * corresponding register. + */ + for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { + struct ngbe_q_vector *q_vector = adapter->q_vector[v_idx]; + struct ngbe_ring *ring; + + ngbe_for_each_ring(ring, q_vector->rx) + ngbe_set_ivar(adapter, 0, ring->reg_idx, v_idx); + + ngbe_for_each_ring(ring, q_vector->tx) + ngbe_set_ivar(adapter, 1, ring->reg_idx, v_idx); + + ngbe_write_eitr(q_vector); + } + + /* misc ivar from seq 1 to seq 8 */ + if (adapter->flags2 & NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP) + v_idx += adapter->ring_feature[RING_F_VMDQ].offset; + + ngbe_set_ivar(adapter, -1, 0, v_idx); + wr32(&adapter->hw, NGBE_PX_ITR(v_idx), 1950); +} + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +/** + * ngbe_write_eitr - write EITR register in hardware specific way + * @q_vector: structure containing interrupt and ring information + * + * This function is made to be called by ethtool and by the driver + * when it needs to update EITR registers at runtime. Hardware + * specific quirks/differences are taken care of here. + */ +void ngbe_write_eitr(struct ngbe_q_vector *q_vector) +{ + struct ngbe_adapter *adapter = q_vector->adapter; + struct ngbe_hw *hw = &adapter->hw; + int v_idx = q_vector->v_idx; + u32 itr_reg = q_vector->itr & NGBE_MAX_EITR; + + itr_reg |= NGBE_PX_ITR_CNT_WDIS; + + wr32(hw, NGBE_PX_ITR(v_idx), itr_reg); +} + +/** + * ngbe_check_overtemp_subtask - check for over temperature + * @adapter: pointer to adapter + **/ +static void ngbe_check_overtemp_subtask(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 eicr = adapter->interrupt_event; + s32 temp_state; + + if (test_bit(__NGBE_DOWN, &adapter->state)) + return; + if (!(adapter->flags2 & NGBE_FLAG2_TEMP_SENSOR_CAPABLE)) + return; + if (!(adapter->flags2 & NGBE_FLAG2_TEMP_SENSOR_EVENT)) + return; + + adapter->flags2 &= ~NGBE_FLAG2_TEMP_SENSOR_EVENT; + + /* + * Since the warning interrupt is for both ports + * we don't have to check if: + * - This interrupt wasn't for our port. + * - We may have missed the interrupt so always have to + * check if we got a LSC + */ + if (!(eicr & NGBE_PX_MISC_IC_OVER_HEAT)) + return; + + temp_state = ngbe_phy_check_overtemp(hw); + if (!temp_state || temp_state == NGBE_NOT_IMPLEMENTED) + return; + + if (temp_state == NGBE_ERR_UNDERTEMP && + test_bit(__NGBE_HANGING, &adapter->state)) { + e_crit(drv, "%s\n", ngbe_underheat_msg); + wr32m(&adapter->hw, NGBE_RDB_PB_CTL, + NGBE_RDB_PB_CTL_PBEN, NGBE_RDB_PB_CTL_PBEN); + netif_carrier_on(adapter->netdev); + clear_bit(__NGBE_HANGING, &adapter->state); + } else if (temp_state == NGBE_ERR_OVERTEMP && + !test_and_set_bit(__NGBE_HANGING, &adapter->state)) { + e_crit(drv, "%s\n", ngbe_overheat_msg); + netif_carrier_off(adapter->netdev); + wr32m(&adapter->hw, NGBE_RDB_PB_CTL, + NGBE_RDB_PB_CTL_PBEN, 0); + } + + adapter->interrupt_event = 0; +} + +static void ngbe_check_overtemp_event(struct ngbe_adapter *adapter, u32 eicr) +{ + if (!(adapter->flags2 & NGBE_FLAG2_TEMP_SENSOR_CAPABLE)) + return; + + if (!(eicr & NGBE_PX_MISC_IC_OVER_HEAT)) + return; + if (!test_bit(__NGBE_DOWN, &adapter->state)) { + adapter->interrupt_event = eicr; + adapter->flags2 |= NGBE_FLAG2_TEMP_SENSOR_EVENT; + ngbe_service_event_schedule(adapter); + } +} + + +static void ngbe_handle_phy_event(struct ngbe_hw *hw) +{ + struct ngbe_adapter *adapter = hw->back; + u32 reg; + + reg = rd32(hw, NGBE_GPIO_INTSTATUS); + wr32(hw, NGBE_GPIO_EOI, reg); + TCALL(hw, phy.ops.check_event); + adapter->lsc_int++; + adapter->link_check_timeout = jiffies; + if (!test_bit(__NGBE_DOWN, &adapter->state)) { + ngbe_service_event_schedule(adapter); + } +} + +/** + * ngbe_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +void ngbe_irq_enable(struct ngbe_adapter *adapter, bool queues, bool flush) +{ + u32 mask = 0; + + /* enable misc interrupt */ + mask = NGBE_PX_MISC_IEN_MASK; + + if (adapter->flags2 & NGBE_FLAG2_TEMP_SENSOR_CAPABLE) + mask |= NGBE_PX_MISC_IEN_OVER_HEAT; + + mask |= NGBE_PX_MISC_IEN_TIMESYNC; + + wr32(&adapter->hw, NGBE_GPIO_DDR, 0x1); + wr32(&adapter->hw, NGBE_GPIO_INTEN, 0x3); + wr32(&adapter->hw, NGBE_GPIO_INTTYPE_LEVEL, 0x0); + if (adapter->hw.phy.type == ngbe_phy_yt8521s_sfi) + wr32(&adapter->hw, NGBE_GPIO_POLARITY, 0x0); + else + wr32(&adapter->hw, NGBE_GPIO_POLARITY, 0x3); + + if (adapter->hw.phy.type == ngbe_phy_yt8521s_sfi) + mask |= NGBE_PX_MISC_IEN_GPIO; + + wr32(&adapter->hw, NGBE_PX_MISC_IEN, mask); + + /* unmask interrupt */ + if (queues) + ngbe_intr_enable(&adapter->hw, NGBE_INTR_ALL); + else { + if (!(adapter->flags2 & NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP)) + ngbe_intr_enable(&adapter->hw, NGBE_INTR_MISC(adapter)); + else + ngbe_intr_enable(&adapter->hw, NGBE_INTR_MISC_VMDQ(adapter)); + } + + /* flush configuration */ + if (flush) + NGBE_WRITE_FLUSH(&adapter->hw); +} + +static irqreturn_t ngbe_msix_other(int __always_unused irq, void *data) +{ + struct ngbe_adapter *adapter = data; + struct ngbe_hw *hw = &adapter->hw; + u32 eicr; + u32 ecc; + u16 pci_val = 0; + + eicr = ngbe_misc_isb(adapter, NGBE_ISB_MISC); + if (eicr & (NGBE_PX_MISC_IC_PHY | NGBE_PX_MISC_IC_GPIO)) + ngbe_handle_phy_event(hw); + + if (eicr & NGBE_PX_MISC_IC_VF_MBOX) + ngbe_msg_task(adapter); + + if (eicr & NGBE_PX_MISC_IC_PCIE_REQ_ERR) { + ERROR_REPORT1(NGBE_ERROR_POLLING, + "lan id %d, PCIe request error founded.\n", hw->bus.lan_id); + + pci_read_config_word(adapter->pdev, PCI_VENDOR_ID, &pci_val); + ERROR_REPORT1(NGBE_ERROR_POLLING, "pci vendor id is 0x%x\n", pci_val); + + pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_val); + ERROR_REPORT1(NGBE_ERROR_POLLING, "pci command reg is 0x%x.\n", pci_val); + + if (hw->bus.lan_id == 0) { + adapter->flags2 |= NGBE_FLAG2_PCIE_NEED_RECOVER; + ngbe_service_event_schedule(adapter); + } else + wr32(&adapter->hw, NGBE_MIS_PF_SM, 1); + } + + if (eicr & NGBE_PX_MISC_IC_INT_ERR) { + e_info(link, "Received unrecoverable ECC Err," + "initiating reset.\n"); + ecc = rd32(hw, NGBE_MIS_ST); + e_info(link, "ecc error status is 0x%08x\n", ecc); + if (((ecc & NGBE_MIS_ST_LAN0_ECC) && (hw->bus.lan_id == 0)) || + ((ecc & NGBE_MIS_ST_LAN1_ECC) && (hw->bus.lan_id == 1))) + adapter->flags2 |= NGBE_FLAG2_DEV_RESET_REQUESTED; + + ngbe_service_event_schedule(adapter); + } + if (eicr & NGBE_PX_MISC_IC_DEV_RST) { + adapter->flags2 |= NGBE_FLAG2_RESET_INTR_RECEIVED; + ngbe_service_event_schedule(adapter); + } + if ((eicr & NGBE_PX_MISC_IC_STALL) || + (eicr & NGBE_PX_MISC_IC_ETH_EVENT)) { + adapter->flags2 |= NGBE_FLAG2_PF_RESET_REQUESTED; + ngbe_service_event_schedule(adapter); + } + + ngbe_check_overtemp_event(adapter, eicr); + + if (unlikely(eicr & NGBE_PX_MISC_IC_TIMESYNC)) + ngbe_ptp_check_pps_event(adapter); + + /* re-enable the original interrupt state, no lsc, no queues */ + if (!test_bit(__NGBE_DOWN, &adapter->state)) + ngbe_irq_enable(adapter, false, false); + + return IRQ_HANDLED; +} + +static irqreturn_t ngbe_msix_clean_rings(int __always_unused irq, void *data) +{ + struct ngbe_q_vector *q_vector = data; + + /* EIAM disabled interrupts (on this vector) for us */ + + if (q_vector->rx.ring || q_vector->tx.ring) + napi_schedule_irqoff(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * ngbe_poll - NAPI polling RX/TX cleanup routine + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * This function will clean all queues associated with a q_vector. + **/ +int ngbe_poll(struct napi_struct *napi, int budget) +{ + struct ngbe_q_vector *q_vector = + container_of(napi, struct ngbe_q_vector, napi); + struct ngbe_adapter *adapter = q_vector->adapter; + struct ngbe_ring *ring; + int per_ring_budget; + bool clean_complete = true; + + ngbe_for_each_ring(ring, q_vector->tx) { + if (!ngbe_clean_tx_irq(q_vector, ring)) + clean_complete = false; + } + + /* Exit if we are called by netpoll */ + if (budget <= 0) + return budget; + + /* attempt to distribute budget to each queue fairly, but don't allow + * the budget to go below 1 because we'll exit polling */ + if (q_vector->rx.count > 1) + per_ring_budget = max(budget/q_vector->rx.count, 1); + else + per_ring_budget = budget; + + ngbe_for_each_ring(ring, q_vector->rx) { + int cleaned = ngbe_clean_rx_irq(q_vector, ring, + per_ring_budget); + + if (cleaned >= per_ring_budget) + clean_complete = false; + } + + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + + /* all work done, exit the polling mode */ + napi_complete(napi); + if (!test_bit(__NGBE_DOWN, &adapter->state)) + ngbe_intr_enable(&adapter->hw, + NGBE_INTR_Q(q_vector->v_idx)); + + return 0; +} + +/** + * ngbe_request_msix_irqs - Initialize MSI-X interrupts + * @adapter: board private structure + * + * ngbe_request_msix_irqs allocates MSI-X vectors and requests + * interrupts from the kernel. + **/ +static int ngbe_request_msix_irqs(struct ngbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int vector, err; + int ri = 0, ti = 0; + + for (vector = 0; vector < adapter->num_q_vectors; vector++) { + struct ngbe_q_vector *q_vector = adapter->q_vector[vector]; + struct msix_entry *entry = &adapter->msix_entries[vector]; + + if (q_vector->tx.ring && q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-TxRx-%d", netdev->name, ri++); + ti++; + } else if (q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-rx-%d", netdev->name, ri++); + } else if (q_vector->tx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-tx-%d", netdev->name, ti++); + } else { + /* skip this unused q_vector */ + continue; + } + err = request_irq(entry->vector, &ngbe_msix_clean_rings, 0, + q_vector->name, q_vector); + if (err) { + e_err(probe, "request_irq failed for MSIX interrupt" + " '%s' Error: %d\n", q_vector->name, err); + goto free_queue_irqs; + } + } + + if (adapter->flags2 & NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP) + vector += adapter->ring_feature[RING_F_VMDQ].offset; + + err = request_irq(adapter->msix_entries[vector].vector, + ngbe_msix_other, 0, netdev->name, adapter); + + if (adapter->flags2 & NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP) + vector -= adapter->ring_feature[RING_F_VMDQ].offset; + + if (err) { + e_err(probe, "request_irq for msix_other failed: %d\n", err); + goto free_queue_irqs; + } + + return 0; + +free_queue_irqs: + while (vector) { + vector--; + + irq_set_affinity_hint(adapter->msix_entries[vector].vector, + NULL); + + free_irq(adapter->msix_entries[vector].vector, + adapter->q_vector[vector]); + } + adapter->flags &= ~NGBE_FLAG_MSIX_ENABLED; + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + return err; +} + +/** + * ngbe_intr - legacy mode Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t ngbe_intr(int __always_unused irq, void *data) +{ + struct ngbe_adapter *adapter = data; + struct ngbe_hw *hw = &adapter->hw; + struct ngbe_q_vector *q_vector = adapter->q_vector[0]; + u32 eicr; + u32 eicr_misc; + u32 ecc = 0; + + eicr = ngbe_misc_isb(adapter, NGBE_ISB_VEC0); + if (!eicr) { + /* + * shared interrupt alert! + * the interrupt that we masked before the EICR read. + */ + if (!test_bit(__NGBE_DOWN, &adapter->state)) + ngbe_irq_enable(adapter, true, true); + return IRQ_NONE; /* Not our interrupt */ + } + adapter->isb_mem[NGBE_ISB_VEC0] = 0; + if (!(adapter->flags & NGBE_FLAG_MSI_ENABLED)) + wr32(&(adapter->hw), NGBE_PX_INTA, 1); + + eicr_misc = ngbe_misc_isb(adapter, NGBE_ISB_MISC); + if (eicr_misc & (NGBE_PX_MISC_IC_PHY | NGBE_PX_MISC_IC_GPIO)) + ngbe_handle_phy_event(hw); + + if (eicr_misc & NGBE_PX_MISC_IC_INT_ERR) { + e_info(link, "Received unrecoverable ECC Err," + "initiating reset.\n"); + ecc = rd32(hw, NGBE_MIS_ST); + e_info(link, "ecc error status is 0x%08x\n", ecc); + adapter->flags2 |= NGBE_FLAG2_DEV_RESET_REQUESTED; + ngbe_service_event_schedule(adapter); + } + + if (eicr_misc & NGBE_PX_MISC_IC_DEV_RST) { + adapter->flags2 |= NGBE_FLAG2_RESET_INTR_RECEIVED; + ngbe_service_event_schedule(adapter); + } + ngbe_check_overtemp_event(adapter, eicr_misc); + + if (unlikely(eicr_misc & NGBE_PX_MISC_IC_TIMESYNC)) + ngbe_ptp_check_pps_event(adapter); + + adapter->isb_mem[NGBE_ISB_MISC] = 0; + /* would disable interrupts here but it is auto disabled */ + napi_schedule_irqoff(&q_vector->napi); + + /* + * re-enable link(maybe) and non-queue interrupts, no flush. + * ngbe_poll will re-enable the queue interrupts + */ + if (!test_bit(__NGBE_DOWN, &adapter->state)) + ngbe_irq_enable(adapter, false, false); + + return IRQ_HANDLED; +} + +/** + * ngbe_request_irq - initialize interrupts + * @adapter: board private structure + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int ngbe_request_irq(struct ngbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + + if (adapter->flags & NGBE_FLAG_MSIX_ENABLED) + err = ngbe_request_msix_irqs(adapter); + else if (adapter->flags & NGBE_FLAG_MSI_ENABLED) + err = request_irq(adapter->pdev->irq, &ngbe_intr, 0, + netdev->name, adapter); + else + err = request_irq(adapter->pdev->irq, &ngbe_intr, IRQF_SHARED, + netdev->name, adapter); + + if (err) + e_err(probe, "request_irq failed, Error %d\n", err); + + return err; +} + +static void ngbe_free_irq(struct ngbe_adapter *adapter) +{ + int vector; + + if (!(adapter->flags & NGBE_FLAG_MSIX_ENABLED)) { + free_irq(adapter->pdev->irq, adapter); + return; + } + + for (vector = 0; vector < adapter->num_q_vectors; vector++) { + struct ngbe_q_vector *q_vector = adapter->q_vector[vector]; + struct msix_entry *entry = &adapter->msix_entries[vector]; + + /* free only the irqs that were actually requested */ + if (!q_vector->rx.ring && !q_vector->tx.ring) + continue; + + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(entry->vector, NULL); + + free_irq(entry->vector, q_vector); + } + + if (adapter->flags2 & NGBE_FLAG2_SRIOV_MISC_IRQ_REMAP) { + free_irq( + adapter->msix_entries[vector + adapter->ring_feature[RING_F_VMDQ].offset].vector, + adapter); + } else + free_irq(adapter->msix_entries[vector++].vector, adapter); +} + +/** + * ngbe_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +void ngbe_irq_disable(struct ngbe_adapter *adapter) +{ + wr32(&adapter->hw, NGBE_PX_MISC_IEN, 0); + ngbe_intr_disable(&adapter->hw, NGBE_INTR_ALL); + + NGBE_WRITE_FLUSH(&adapter->hw); + if (adapter->flags & NGBE_FLAG_MSIX_ENABLED) { + int vector; + + for (vector = 0; vector < adapter->num_q_vectors; vector++) + synchronize_irq(adapter->msix_entries[vector].vector); + + synchronize_irq(adapter->msix_entries[vector++].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } +} + +/** + * ngbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts + * + **/ +static void ngbe_configure_msi_and_legacy(struct ngbe_adapter *adapter) +{ + struct ngbe_q_vector *q_vector = adapter->q_vector[0]; + struct ngbe_ring *ring; + + ngbe_write_eitr(q_vector); + + ngbe_for_each_ring(ring, q_vector->rx) + ngbe_set_ivar(adapter, 0, ring->reg_idx, 0); + + ngbe_for_each_ring(ring, q_vector->tx) + ngbe_set_ivar(adapter, 1, ring->reg_idx, 0); + + ngbe_set_ivar(adapter, -1, 0, 1); + + e_info(hw, "Legacy interrupt IVAR setup done\n"); +} + +/** + * ngbe_configure_tx_ring - Configure Tx ring after Reset + * @adapter: board private structure + * @ring: structure containing ring specific data + * + * Configure the Tx descriptor ring after a reset. + **/ +void ngbe_configure_tx_ring(struct ngbe_adapter *adapter, + struct ngbe_ring *ring) +{ + struct ngbe_hw *hw = &adapter->hw; + u64 tdba = ring->dma; + int wait_loop = 10; + u32 txdctl = NGBE_PX_TR_CFG_ENABLE; + u8 reg_idx = ring->reg_idx; + + /* disable queue to avoid issues while updating state */ + wr32(hw, NGBE_PX_TR_CFG(reg_idx), NGBE_PX_TR_CFG_SWFLSH); + NGBE_WRITE_FLUSH(hw); + + wr32(hw, NGBE_PX_TR_BAL(reg_idx), tdba & DMA_BIT_MASK(32)); + wr32(hw, NGBE_PX_TR_BAH(reg_idx), tdba >> 32); + + /* reset head and tail pointers */ + wr32(hw, NGBE_PX_TR_RP(reg_idx), 0); + wr32(hw, NGBE_PX_TR_WP(reg_idx), 0); + ring->tail = adapter->io_addr + NGBE_PX_TR_WP(reg_idx); + + /* reset ntu and ntc to place SW in sync with hardwdare */ + ring->next_to_clean = 0; + ring->next_to_use = 0; + + txdctl |= NGBE_RING_SIZE(ring) << NGBE_PX_TR_CFG_TR_SIZE_SHIFT; + + /* + * set WTHRESH to encourage burst writeback, it should not be set + * higher than 1 when: + * - ITR is 0 as it could cause false TX hangs + * - ITR is set to > 100k int/sec and BQL is enabled + * + * In order to avoid issues WTHRESH + PTHRESH should always be equal + * to or less than the number of on chip descriptors, which is + * currently 40. + */ + txdctl |= 0x20 << NGBE_PX_TR_CFG_WTHRESH_SHIFT; + /* + * Setting PTHRESH to 32 both improves performance + * and avoids a TX hang with DFP enabled + */ + + /* initialize XPS */ + if (!test_and_set_bit(__NGBE_TX_XPS_INIT_DONE, &ring->state)) { + struct ngbe_q_vector *q_vector = ring->q_vector; + + if (q_vector) + netif_set_xps_queue(adapter->netdev, + &q_vector->affinity_mask, + ring->queue_index); + } + + clear_bit(__NGBE_HANG_CHECK_ARMED, &ring->state); + + /* enable queue */ + wr32(hw, NGBE_PX_TR_CFG(reg_idx), txdctl); + + /* poll to verify queue is enabled */ + do { + msleep(1); + txdctl = rd32(hw, NGBE_PX_TR_CFG(reg_idx)); + } while (--wait_loop && !(txdctl & NGBE_PX_TR_CFG_ENABLE)); + if (!wait_loop) + e_err(drv, "Could not enable Tx Queue %d\n", reg_idx); +} + +/** + * ngbe_configure_tx - Configure Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void ngbe_configure_tx(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 i; + + /* TDM_CTL.TE must be before Tx queues are enabled */ + wr32m(hw, NGBE_TDM_CTL, + NGBE_TDM_CTL_TE, NGBE_TDM_CTL_TE); + + /* Setup the HW Tx Head and Tail descriptor pointers */ + for (i = 0; i < adapter->num_tx_queues; i++) + ngbe_configure_tx_ring(adapter, adapter->tx_ring[i]); + + wr32m(hw, NGBE_TSEC_BUF_AE, 0x3FF, 0x10); + wr32m(hw, NGBE_TSEC_CTL, 0x2, 0); + + wr32m(hw, NGBE_TSEC_CTL, 0x1, 1); + + /* enable mac transmitter */ + wr32m(hw, NGBE_MAC_TX_CFG, + NGBE_MAC_TX_CFG_TE, NGBE_MAC_TX_CFG_TE); +} + +static void ngbe_enable_rx_drop(struct ngbe_adapter *adapter, + struct ngbe_ring *ring) +{ + struct ngbe_hw *hw = &adapter->hw; + u16 reg_idx = ring->reg_idx; + + u32 srrctl = rd32(hw, NGBE_PX_RR_CFG(reg_idx)); + + srrctl |= NGBE_PX_RR_CFG_DROP_EN; + + wr32(hw, NGBE_PX_RR_CFG(reg_idx), srrctl); +} + +static void ngbe_disable_rx_drop(struct ngbe_adapter *adapter, + struct ngbe_ring *ring) +{ + struct ngbe_hw *hw = &adapter->hw; + u16 reg_idx = ring->reg_idx; + + u32 srrctl = rd32(hw, NGBE_PX_RR_CFG(reg_idx)); + + srrctl &= ~NGBE_PX_RR_CFG_DROP_EN; + + wr32(hw, NGBE_PX_RR_CFG(reg_idx), srrctl); +} + +void ngbe_set_rx_drop_en(struct ngbe_adapter *adapter) +{ + int i; + + /* + * We should set the drop enable bit if: + * SR-IOV is enabled + * or + * Number of Rx queues > 1 and flow control is disabled + * + * This allows us to avoid head of line blocking for security + * and performance reasons. + */ + if (adapter->num_vfs || (adapter->num_rx_queues > 1 && + !(adapter->hw.fc.current_mode & ngbe_fc_tx_pause))) { + for (i = 0; i < adapter->num_rx_queues; i++) + ngbe_enable_rx_drop(adapter, adapter->rx_ring[i]); + } else { + for (i = 0; i < adapter->num_rx_queues; i++) + ngbe_disable_rx_drop(adapter, adapter->rx_ring[i]); + } +} + +static void ngbe_configure_srrctl(struct ngbe_adapter *adapter, + struct ngbe_ring *rx_ring) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 srrctl; + u16 reg_idx = rx_ring->reg_idx; + + srrctl = rd32m(hw, NGBE_PX_RR_CFG(reg_idx), + ~(NGBE_PX_RR_CFG_RR_HDR_SZ | + NGBE_PX_RR_CFG_RR_BUF_SZ | + NGBE_PX_RR_CFG_SPLIT_MODE)); + + /* configure header buffer length, needed for RSC */ + srrctl |= NGBE_RX_HDR_SIZE << NGBE_PX_RR_CFG_BSIZEHDRSIZE_SHIFT; + + /* configure the packet buffer length */ + srrctl |= ngbe_rx_bufsz(rx_ring) >> NGBE_PX_RR_CFG_BSIZEPKT_SHIFT; + if (ring_is_hs_enabled(rx_ring)) + srrctl |= NGBE_PX_RR_CFG_SPLIT_MODE; + + wr32(hw, NGBE_PX_RR_CFG(reg_idx), srrctl); +} + +/** + * Return a number of entries in the RSS indirection table + * + * @adapter: device handle + * + */ +u32 ngbe_rss_indir_tbl_entries(struct ngbe_adapter *adapter) +{ + if (adapter->flags & NGBE_FLAG_SRIOV_ENABLED) + return 64; + else + return 128; +} + +/** + * Write the RETA table to HW + * + * @adapter: device handle + * + * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. + */ +void ngbe_store_reta(struct ngbe_adapter *adapter) +{ + u32 i, reta_entries = ngbe_rss_indir_tbl_entries(adapter); + struct ngbe_hw *hw = &adapter->hw; + u32 reta = 0; + u8 *indir_tbl = adapter->rss_indir_tbl; + + /* Write redirection table to HW */ + for (i = 0; i < reta_entries; i++) { + reta |= indir_tbl[i] << (i & 0x3) * 8; + if ((i & 3) == 3) { + wr32(hw, NGBE_RDB_RSSTBL(i >> 2), reta); + reta = 0; + } + } +} + +static void ngbe_setup_reta(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 i, j; + u32 reta_entries = ngbe_rss_indir_tbl_entries(adapter); + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + + /* + * Program table for at least 2 queues w/ SR-IOV so that VFs can + * make full use of any rings they may have. We will use the + * PSRTYPE register to control how many rings we use within the PF. + */ + if ((adapter->flags & NGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2)) + rss_i = 1; + + /* Fill out hash function seeds */ + for (i = 0; i < 10; i++) + wr32(hw, NGBE_RDB_RSSRK(i), adapter->rss_key[i]); + + /* Fill out redirection table */ + memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl)); + + for (i = 0, j = 0; i < reta_entries; i++, j++) { + if (j == rss_i) + j = 0; + + adapter->rss_indir_tbl[i] = j; + } + + ngbe_store_reta(adapter); +} + +static void ngbe_setup_mrqc(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 rss_field = 0; + + /* VT, and RSS do not coexist at the same time */ + if (adapter->flags & NGBE_FLAG_VMDQ_ENABLED) { + return; + } + + /* Disable indicating checksum in descriptor, enables RSS hash */ + wr32m(hw, NGBE_PSR_CTL, + NGBE_PSR_CTL_PCSD, NGBE_PSR_CTL_PCSD); + + /* Perform hash on these packet types */ + rss_field = NGBE_RDB_RA_CTL_RSS_IPV4 | + NGBE_RDB_RA_CTL_RSS_IPV4_TCP | + NGBE_RDB_RA_CTL_RSS_IPV6 | + NGBE_RDB_RA_CTL_RSS_IPV6_TCP; + + if (adapter->flags2 & NGBE_FLAG2_RSS_FIELD_IPV4_UDP) + rss_field |= NGBE_RDB_RA_CTL_RSS_IPV4_UDP; + if (adapter->flags2 & NGBE_FLAG2_RSS_FIELD_IPV6_UDP) + rss_field |= NGBE_RDB_RA_CTL_RSS_IPV6_UDP; + + netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key)); + + ngbe_setup_reta(adapter); + + if (adapter->flags2 & NGBE_FLAG2_RSS_ENABLED) + rss_field |= NGBE_RDB_RA_CTL_RSS_EN; + wr32(hw, NGBE_RDB_RA_CTL, rss_field); +} + +static void ngbe_rx_desc_queue_enable(struct ngbe_adapter *adapter, + struct ngbe_ring *ring) +{ + struct ngbe_hw *hw = &adapter->hw; + int wait_loop = NGBE_MAX_RX_DESC_POLL; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + if (NGBE_REMOVED(hw->hw_addr)) + return; + + do { + msleep(1); + rxdctl = rd32(hw, NGBE_PX_RR_CFG(reg_idx)); + } while (--wait_loop && !(rxdctl & NGBE_PX_RR_CFG_RR_EN)); + + if (!wait_loop) { + e_err(drv, "RXDCTL.ENABLE on Rx queue %d " + "not set within the polling period\n", reg_idx); + } +} + +/* disable the specified rx ring/queue */ +void ngbe_disable_rx_queue(struct ngbe_adapter *adapter, + struct ngbe_ring *ring) +{ + struct ngbe_hw *hw = &adapter->hw; + int wait_loop = NGBE_MAX_RX_DESC_POLL; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + if (NGBE_REMOVED(hw->hw_addr)) + return; + + /* write value back with RXDCTL.ENABLE bit cleared */ + wr32m(hw, NGBE_PX_RR_CFG(reg_idx), + NGBE_PX_RR_CFG_RR_EN, 0); + + /* hardware may take up to 100us to actually disable rx queue */ + do { + udelay(10); + rxdctl = rd32(hw, NGBE_PX_RR_CFG(reg_idx)); + } while (--wait_loop && (rxdctl & NGBE_PX_RR_CFG_RR_EN)); + + if (!wait_loop) { + e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within " + "the polling period\n", reg_idx); + } +} + +void ngbe_configure_rx_ring(struct ngbe_adapter *adapter, + struct ngbe_ring *ring) +{ + struct ngbe_hw *hw = &adapter->hw; + u64 rdba = ring->dma; + u32 rxdctl; + u16 reg_idx = ring->reg_idx; + + /* disable queue to avoid issues while updating state */ + rxdctl = rd32(hw, NGBE_PX_RR_CFG(reg_idx)); + ngbe_disable_rx_queue(adapter, ring); + + wr32(hw, NGBE_PX_RR_BAL(reg_idx), rdba & DMA_BIT_MASK(32)); + wr32(hw, NGBE_PX_RR_BAH(reg_idx), rdba >> 32); + + if (ring->count == NGBE_MAX_RXD) + rxdctl |= 0 << NGBE_PX_RR_CFG_RR_SIZE_SHIFT; + else + rxdctl |= (ring->count / 128) << NGBE_PX_RR_CFG_RR_SIZE_SHIFT; + + rxdctl |= 0x1 << NGBE_PX_RR_CFG_RR_THER_SHIFT; + wr32(hw, NGBE_PX_RR_CFG(reg_idx), rxdctl); + + /* reset head and tail pointers */ + wr32(hw, NGBE_PX_RR_RP(reg_idx), 0); + wr32(hw, NGBE_PX_RR_WP(reg_idx), 0); + ring->tail = adapter->io_addr + NGBE_PX_RR_WP(reg_idx); + + /* reset ntu and ntc to place SW in sync with hardwdare */ + ring->next_to_clean = 0; + ring->next_to_use = 0; + ring->next_to_alloc = 0; + + ngbe_configure_srrctl(adapter, ring); + + /* enable receive descriptor ring */ + wr32m(hw, NGBE_PX_RR_CFG(reg_idx), + NGBE_PX_RR_CFG_RR_EN, NGBE_PX_RR_CFG_RR_EN); + + ngbe_rx_desc_queue_enable(adapter, ring); + ngbe_alloc_rx_buffers(ring, ngbe_desc_unused(ring)); +} + +static void ngbe_setup_psrtype(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int pool; + + /* PSRTYPE must be initialized in adapters */ + u32 psrtype = NGBE_RDB_PL_CFG_L4HDR | + NGBE_RDB_PL_CFG_L3HDR | + NGBE_RDB_PL_CFG_L2HDR | + NGBE_RDB_PL_CFG_TUN_OUTER_L2HDR | + NGBE_RDB_PL_CFG_TUN_TUNHDR; + + for_each_set_bit(pool, &adapter->fwd_bitmask, NGBE_MAX_MACVLANS) { + wr32(hw, NGBE_RDB_PL_CFG(VMDQ_P(pool)), psrtype); + } +} + +/** + * ngbe_configure_bridge_mode - common settings for configuring bridge mode + * @adapter - the private structure + * + * This function's purpose is to remove code duplication and configure some + * settings require to switch bridge modes. + **/ +static void ngbe_configure_bridge_mode(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + + if (adapter->flags & NGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE) { + /* disable Tx loopback, rely on switch hairpin mode */ + wr32m(hw, NGBE_PSR_CTL, + NGBE_PSR_CTL_SW_EN, 0); + } else { + /* enable Tx loopback for internal VF/PF communication */ + wr32m(hw, NGBE_PSR_CTL, + NGBE_PSR_CTL_SW_EN, NGBE_PSR_CTL_SW_EN); + } +} + +static void ngbe_configure_virtualization(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 i; + u8 vfe = 0; + + if (!(adapter->flags & NGBE_FLAG_VMDQ_ENABLED)) + return; + + wr32m(hw, NGBE_PSR_VM_CTL, + NGBE_PSR_VM_CTL_POOL_MASK | + NGBE_PSR_VM_CTL_REPLEN, + VMDQ_P(0) << NGBE_PSR_VM_CTL_POOL_SHIFT | + NGBE_PSR_VM_CTL_REPLEN); + + for_each_set_bit(i, &adapter->fwd_bitmask, NGBE_MAX_MACVLANS) { + /* accept untagged packets until a vlan tag is + * specifically set for the VMDQ queue/pool + */ + wr32m(hw, NGBE_PSR_VM_L2CTL(i), + NGBE_PSR_VM_L2CTL_AUPE, NGBE_PSR_VM_L2CTL_AUPE); + } + + vfe = 1 << (VMDQ_P(0)); + /* Enable only the PF pools for Tx/Rx */ + wr32(hw, NGBE_RDM_POOL_RE, vfe); + wr32(hw, NGBE_TDM_POOL_TE, vfe); + + if (!(adapter->flags & NGBE_FLAG_SRIOV_ENABLED)) + return; + + /* configure default bridge settings */ + ngbe_configure_bridge_mode(adapter); + + /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be + * calling set_ethertype_anti_spoofing for each VF in loop below. + */ + if (hw->mac.ops.set_ethertype_anti_spoofing) { + wr32(hw, + NGBE_PSR_ETYPE_SWC(NGBE_PSR_ETYPE_SWC_FILTER_LLDP), + (NGBE_PSR_ETYPE_SWC_FILTER_EN | /* enable filter */ + NGBE_PSR_ETYPE_SWC_TX_ANTISPOOF | + NGBE_ETH_P_LLDP)); /* LLDP eth procotol type */ + + wr32(hw, + NGBE_PSR_ETYPE_SWC(NGBE_PSR_ETYPE_SWC_FILTER_FC), + (NGBE_PSR_ETYPE_SWC_FILTER_EN | + NGBE_PSR_ETYPE_SWC_TX_ANTISPOOF | + ETH_P_PAUSE)); + } + + for (i = 0; i < adapter->num_vfs; i++) { + if (!adapter->vfinfo[i].spoofchk_enabled) + ngbe_ndo_set_vf_spoofchk(adapter->netdev, i, false); + /* enable ethertype anti spoofing if hw supports it */ + TCALL(hw, mac.ops.set_ethertype_anti_spoofing, true, i); + } +} + +static void ngbe_set_rx_buffer_len(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + struct ngbe_ring *rx_ring; + int i; + u32 mhadd; + + /* adjust max frame to be at least the size of a standard frame */ + if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) + max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN); + + mhadd = rd32(hw, NGBE_PSR_MAX_SZ); + if (max_frame != mhadd) { + wr32(hw, NGBE_PSR_MAX_SZ, max_frame); + } + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + rx_ring = adapter->rx_ring[i]; + + if (adapter->flags & NGBE_FLAG_RX_HS_ENABLED) { + rx_ring->rx_buf_len = NGBE_RX_HDR_SIZE; + set_ring_hs_enabled(rx_ring); + } else + clear_ring_hs_enabled(rx_ring); + } +} + +/** + * ngbe_configure_rx - Configure Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void ngbe_configure_rx(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int i; + u32 rxctrl; + + /* disable receives while setting up the descriptors */ + TCALL(hw, mac.ops.disable_rx); + + ngbe_setup_psrtype(adapter); + + /* enable hw crc stripping */ + wr32m(hw, NGBE_RSEC_CTL, + NGBE_RSEC_CTL_CRC_STRIP, NGBE_RSEC_CTL_CRC_STRIP); + + /* Program registers for the distribution of queues */ + ngbe_setup_mrqc(adapter); + + /* set_rx_buffer_len must be called before ring initialization */ + ngbe_set_rx_buffer_len(adapter); + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) + ngbe_configure_rx_ring(adapter, adapter->rx_ring[i]); + + rxctrl = rd32(hw, NGBE_RDB_PB_CTL); + + /* enable all receives */ + rxctrl |= NGBE_RDB_PB_CTL_PBEN; + TCALL(hw, mac.ops.enable_rx_dma, rxctrl); +} + +static int ngbe_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + int pool_ndx = VMDQ_P(0); + + /* add VID to filter table */ + if (hw->mac.ops.set_vfta) { + if (vid < VLAN_N_VID) + set_bit(vid, adapter->active_vlans); + + TCALL(hw, mac.ops.set_vfta, vid, pool_ndx, true); + if (adapter->flags & NGBE_FLAG_VMDQ_ENABLED) { + int i; + /* enable vlan id for all pools */ + for_each_set_bit(i, &adapter->fwd_bitmask, + NGBE_MAX_MACVLANS) + TCALL(hw, mac.ops.set_vfta, vid, + VMDQ_P(i), true); + } + } + + return 0; +} + +static int ngbe_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + int pool_ndx = VMDQ_P(0); + + /* User is not allowed to remove vlan ID 0 */ + if (!vid) + return 0; + + /* remove VID from filter table */ + if (hw->mac.ops.set_vfta) { + TCALL(hw, mac.ops.set_vfta, vid, pool_ndx, false); + if (adapter->flags & NGBE_FLAG_VMDQ_ENABLED) { + int i; + /* remove vlan id from all pools */ + for_each_set_bit(i, &adapter->fwd_bitmask, + NGBE_MAX_MACVLANS) + TCALL(hw, mac.ops.set_vfta, vid, + VMDQ_P(i), false); + } + } + + clear_bit(vid, adapter->active_vlans); + return 0; +} + +/** + * ngbe_vlan_strip_disable - helper to disable vlan tag stripping + * @adapter: driver data + */ +void ngbe_vlan_strip_disable(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int i, j; + + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ngbe_ring *ring = adapter->rx_ring[i]; + if (ring->accel) + continue; + j = ring->reg_idx; + wr32m(hw, NGBE_PX_RR_CFG(j), + NGBE_PX_RR_CFG_VLAN, 0); + } +} + +/** + * ngbe_vlan_strip_enable - helper to enable vlan tag stripping + * @adapter: driver data + */ +void ngbe_vlan_strip_enable(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int i, j; + + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ngbe_ring *ring = adapter->rx_ring[i]; + if (ring->accel) + continue; + j = ring->reg_idx; + wr32m(hw, NGBE_PX_RR_CFG(j), + NGBE_PX_RR_CFG_VLAN, NGBE_PX_RR_CFG_VLAN); + } +} + +void ngbe_vlan_mode(struct net_device *netdev, u32 features) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + bool enable; + + enable = !!(features & (NETIF_F_HW_VLAN_CTAG_RX)); + if (enable) + /* enable VLAN tag insert/strip */ + ngbe_vlan_strip_enable(adapter); + else + /* disable VLAN tag insert/strip */ + ngbe_vlan_strip_disable(adapter); +} + +static void ngbe_restore_vlan(struct ngbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u16 vid; + + ngbe_vlan_mode(netdev, netdev->features); + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + ngbe_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); +} + +static u8 *ngbe_addr_list_itr(struct ngbe_hw *hw, + u8 **mc_addr_ptr, u32 *vmdq) +{ + struct netdev_hw_addr *mc_ptr; + u8 *addr = *mc_addr_ptr; + struct ngbe_adapter *adapter = hw->back; + + /* VMDQ_P implicitely uses the adapter struct when CONFIG_PCI_IOV is + * defined, so we have to wrap the pointer above correctly to prevent + * a warning. + */ + *vmdq = VMDQ_P(0); + + mc_ptr = container_of(addr, struct netdev_hw_addr, addr[0]); + if (mc_ptr->list.next) { + struct netdev_hw_addr *ha; + + ha = list_entry(mc_ptr->list.next, struct netdev_hw_addr, list); + *mc_addr_ptr = ha->addr; + } else + *mc_addr_ptr = NULL; + + return addr; +} + +/** + * ngbe_write_mc_addr_list - write multicast addresses to MTA + * @netdev: network interface device structure + * + * Writes multicast address list to the MTA hash table. + * Returns: -ENOMEM on failure + * 0 on no addresses written + * X on writing X addresses to MTA + **/ +int ngbe_write_mc_addr_list(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u8 *addr_list = NULL; + int addr_count = 0; + + if (!hw->mac.ops.update_mc_addr_list) + return -ENOMEM; + + if (!netif_running(netdev)) + return 0; + + if (netdev_mc_empty(netdev)) { + TCALL(hw, mac.ops.update_mc_addr_list, NULL, 0, + ngbe_addr_list_itr, true); + } else { + ha = list_first_entry(&netdev->mc.list, + struct netdev_hw_addr, list); + addr_list = ha->addr; + addr_count = netdev_mc_count(netdev); + TCALL(hw, mac.ops.update_mc_addr_list, addr_list, addr_count, + ngbe_addr_list_itr, true); + } + +#ifdef CONFIG_PCI_IOV + ngbe_restore_vf_multicasts(adapter); +#endif + return addr_count; +} + +void ngbe_full_sync_mac_table(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int i; + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state & NGBE_MAC_STATE_IN_USE) { + TCALL(hw, mac.ops.set_rar, i, + adapter->mac_table[i].addr, + adapter->mac_table[i].pools, + NGBE_PSR_MAC_SWC_AD_H_AV); + } else { + TCALL(hw, mac.ops.clear_rar, i); + } + adapter->mac_table[i].state &= ~(NGBE_MAC_STATE_MODIFIED); + } +} + +static void ngbe_sync_mac_table(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int i; + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state & NGBE_MAC_STATE_MODIFIED) { + if (adapter->mac_table[i].state & + NGBE_MAC_STATE_IN_USE) { + TCALL(hw, mac.ops.set_rar, i, + adapter->mac_table[i].addr, + adapter->mac_table[i].pools, + NGBE_PSR_MAC_SWC_AD_H_AV); + } else { + TCALL(hw, mac.ops.clear_rar, i); + } + adapter->mac_table[i].state &= + ~(NGBE_MAC_STATE_MODIFIED); + } + } +} + +int ngbe_available_rars(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 i, count = 0; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state == 0) + count++; + } + return count; +} + +/* this function destroys the first RAR entry */ +static void ngbe_mac_set_default_filter(struct ngbe_adapter *adapter, + u8 *addr) +{ + struct ngbe_hw *hw = &adapter->hw; + + memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN); + adapter->mac_table[0].pools = 1ULL << VMDQ_P(0); + adapter->mac_table[0].state = (NGBE_MAC_STATE_DEFAULT | + NGBE_MAC_STATE_IN_USE); + TCALL(hw, mac.ops.set_rar, 0, adapter->mac_table[0].addr, + adapter->mac_table[0].pools, + NGBE_PSR_MAC_SWC_AD_H_AV); +} + +int ngbe_add_mac_filter(struct ngbe_adapter *adapter, const u8 *addr, u16 pool) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state & NGBE_MAC_STATE_IN_USE) { + continue; + } + adapter->mac_table[i].state |= (NGBE_MAC_STATE_MODIFIED | + NGBE_MAC_STATE_IN_USE); + memcpy(adapter->mac_table[i].addr, addr, ETH_ALEN); + adapter->mac_table[i].pools = (1ULL << pool); + ngbe_sync_mac_table(adapter); + return i; + } + return -ENOMEM; +} + +static void ngbe_flush_sw_mac_table(struct ngbe_adapter *adapter) +{ + u32 i; + struct ngbe_hw *hw = &adapter->hw; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + adapter->mac_table[i].state |= NGBE_MAC_STATE_MODIFIED; + adapter->mac_table[i].state &= ~NGBE_MAC_STATE_IN_USE; + memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + adapter->mac_table[i].pools = 0; + } + ngbe_sync_mac_table(adapter); +} + +int ngbe_del_mac_filter(struct ngbe_adapter *adapter, const u8 *addr, u16 pool) +{ + /* search table for addr, if found, set to 0 and sync */ + u32 i; + struct ngbe_hw *hw = &adapter->hw; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (ether_addr_equal(addr, adapter->mac_table[i].addr) && + adapter->mac_table[i].pools | (1ULL << pool)) { + adapter->mac_table[i].state |= NGBE_MAC_STATE_MODIFIED; + adapter->mac_table[i].state &= ~NGBE_MAC_STATE_IN_USE; + memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + adapter->mac_table[i].pools = 0; + ngbe_sync_mac_table(adapter); + return 0; + } + } + return -ENOMEM; +} + +static int ngbe_uc_sync(struct net_device *netdev, const unsigned char *addr) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + int ret; + + ret = ngbe_add_mac_filter(adapter, addr, VMDQ_P(0)); + + return min_t(int, ret, 0); +} + +static int ngbe_uc_unsync(struct net_device *netdev, const unsigned char *addr) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + ngbe_del_mac_filter(adapter, addr, VMDQ_P(0)); + + return 0; +} + +/** + * ngbe_write_uc_addr_list - write unicast addresses to RAR table + * @netdev: network interface device structure + * + * Writes unicast address list to the RAR table. + * Returns: -ENOMEM on failure/insufficient address space + * 0 on no addresses written + * X on writing X addresses to the RAR table + **/ +int ngbe_write_uc_addr_list(struct net_device *netdev, int pool) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + int count = 0; + + /* return ENOMEM indicating insufficient memory for addresses */ + if (netdev_uc_count(netdev) > ngbe_available_rars(adapter)) + return -ENOMEM; + + if (!netdev_uc_empty(netdev)) { + struct netdev_hw_addr *ha; + netdev_for_each_uc_addr(ha, netdev) { + ngbe_del_mac_filter(adapter, ha->addr, pool); + ngbe_add_mac_filter(adapter, ha->addr, pool); + count++; + } + } + return count; +} + +/** + * ngbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_method entry point is called whenever the unicast/multicast + * address list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast and + * promiscuous mode. + **/ +void ngbe_set_rx_mode(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u32 fctrl, vmolr, vlnctrl; + int count; + + /* Check for Promiscuous and All Multicast modes */ + fctrl = rd32m(hw, NGBE_PSR_CTL, + ~(NGBE_PSR_CTL_UPE | NGBE_PSR_CTL_MPE)); + vmolr = rd32m(hw, NGBE_PSR_VM_L2CTL(VMDQ_P(0)), + ~(NGBE_PSR_VM_L2CTL_UPE | + NGBE_PSR_VM_L2CTL_MPE | + NGBE_PSR_VM_L2CTL_ROPE | + NGBE_PSR_VM_L2CTL_ROMPE)); + vlnctrl = rd32m(hw, NGBE_PSR_VLAN_CTL, + ~(NGBE_PSR_VLAN_CTL_VFE | + NGBE_PSR_VLAN_CTL_CFIEN)); + + /* set all bits that we expect to always be set */ + fctrl |= NGBE_PSR_CTL_BAM | NGBE_PSR_CTL_MFE; + vmolr |= NGBE_PSR_VM_L2CTL_BAM | + NGBE_PSR_VM_L2CTL_AUPE | + NGBE_PSR_VM_L2CTL_VACC; + vlnctrl |= NGBE_PSR_VLAN_CTL_VFE; + + hw->addr_ctrl.user_set_promisc = false; + if (netdev->flags & IFF_PROMISC) { + hw->addr_ctrl.user_set_promisc = true; + fctrl |= (NGBE_PSR_CTL_UPE | NGBE_PSR_CTL_MPE); + /* pf don't want packets routing to vf, so clear UPE */ + vmolr |= NGBE_PSR_VM_L2CTL_MPE; + vlnctrl &= ~NGBE_PSR_VLAN_CTL_VFE; + } + + if (netdev->flags & IFF_ALLMULTI) { + fctrl |= NGBE_PSR_CTL_MPE; + vmolr |= NGBE_PSR_VM_L2CTL_MPE; + } + + /* This is useful for sniffing bad packets. */ + if (netdev->features & NETIF_F_RXALL) { + vmolr |= (NGBE_PSR_VM_L2CTL_UPE | NGBE_PSR_VM_L2CTL_MPE); + vlnctrl &= ~NGBE_PSR_VLAN_CTL_VFE; + /* receive bad packets */ + wr32m(hw, NGBE_RSEC_CTL, + NGBE_RSEC_CTL_SAVE_MAC_ERR, + NGBE_RSEC_CTL_SAVE_MAC_ERR); + } else { + vmolr |= NGBE_PSR_VM_L2CTL_ROPE | NGBE_PSR_VM_L2CTL_ROMPE; + } + + /* + * Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + if (__dev_uc_sync(netdev, ngbe_uc_sync, ngbe_uc_unsync)) { + vmolr &= ~NGBE_PSR_VM_L2CTL_ROPE; + fctrl |= NGBE_PSR_CTL_UPE; + } + + /* + * Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + count = ngbe_write_mc_addr_list(netdev); + if (count < 0) { + vmolr &= ~NGBE_PSR_VM_L2CTL_ROMPE; + vmolr |= NGBE_PSR_VM_L2CTL_MPE; + } + + wr32(hw, NGBE_PSR_VLAN_CTL, vlnctrl); + wr32(hw, NGBE_PSR_CTL, fctrl); + wr32(hw, NGBE_PSR_VM_L2CTL(VMDQ_P(0)), vmolr); + + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) + ngbe_vlan_strip_enable(adapter); + else + ngbe_vlan_strip_disable(adapter); +} + +static void ngbe_napi_enable_all(struct ngbe_adapter *adapter) +{ + struct ngbe_q_vector *q_vector; + int q_idx; + + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { + q_vector = adapter->q_vector[q_idx]; + napi_enable(&q_vector->napi); + } +} + +static void ngbe_napi_disable_all(struct ngbe_adapter *adapter) +{ + struct ngbe_q_vector *q_vector; + int q_idx; + + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { + q_vector = adapter->q_vector[q_idx]; + napi_disable(&q_vector->napi); + } +} + +/* NETIF_F_GSO_IPXIP4/6 may not be defined in all distributions */ +#define NGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPXIP4 | \ + NETIF_F_GSO_IPXIP6 | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + +static inline unsigned long ngbe_tso_features(void) +{ + unsigned long features = 0; + + features |= NETIF_F_TSO; + features |= NETIF_F_TSO6; + features |= NETIF_F_GSO_PARTIAL | NGBE_GSO_PARTIAL_FEATURES; + + return features; +} + +#ifndef CONFIG_NGBE_NO_LLI +static void ngbe_configure_lli(struct ngbe_adapter *adapter) +{ + /* lli should only be enabled with MSI-X and MSI */ + if (!(adapter->flags & NGBE_FLAG_MSI_ENABLED) && + !(adapter->flags & NGBE_FLAG_MSIX_ENABLED)) + return; + + if (adapter->lli_etype) { + wr32(&adapter->hw, NGBE_RDB_5T_CTL1(0), + (NGBE_RDB_5T_CTL1_LLI | + NGBE_RDB_5T_CTL1_SIZE_BP)); + wr32(&adapter->hw, NGBE_RDB_ETYPE_CLS(0), + NGBE_RDB_ETYPE_CLS_LLI); + wr32(&adapter->hw, NGBE_PSR_ETYPE_SWC(0), + (adapter->lli_etype | + NGBE_PSR_ETYPE_SWC_FILTER_EN)); + } + + if (adapter->lli_port) { + wr32(&adapter->hw, NGBE_RDB_5T_CTL1(0), + (NGBE_RDB_5T_CTL1_LLI | + NGBE_RDB_5T_CTL1_SIZE_BP)); + + wr32(&adapter->hw, NGBE_RDB_5T_CTL0(0), + (NGBE_RDB_5T_CTL0_POOL_MASK_EN | + (NGBE_RDB_5T_CTL0_PRIORITY_MASK << + NGBE_RDB_5T_CTL0_PRIORITY_SHIFT) | + (NGBE_RDB_5T_CTL0_DEST_PORT_MASK << + NGBE_RDB_5T_CTL0_5TUPLE_MASK_SHIFT))); + + wr32(&adapter->hw, NGBE_RDB_5T_SDP(0), + (adapter->lli_port << 16)); + } + + if (adapter->lli_size) { + wr32(&adapter->hw, NGBE_RDB_5T_CTL1(0), + NGBE_RDB_5T_CTL1_LLI); + wr32m(&adapter->hw, NGBE_RDB_LLI_THRE, + NGBE_RDB_LLI_THRE_SZ(~0), adapter->lli_size); + wr32(&adapter->hw, NGBE_RDB_5T_CTL0(0), + (NGBE_RDB_5T_CTL0_POOL_MASK_EN | + (NGBE_RDB_5T_CTL0_PRIORITY_MASK << + NGBE_RDB_5T_CTL0_PRIORITY_SHIFT) | + (NGBE_RDB_5T_CTL0_5TUPLE_MASK_MASK << + NGBE_RDB_5T_CTL0_5TUPLE_MASK_SHIFT))); + } + + if (adapter->lli_vlan_pri) { + wr32m(&adapter->hw, NGBE_RDB_LLI_THRE, + NGBE_RDB_LLI_THRE_PRIORITY_EN | + NGBE_RDB_LLI_THRE_UP(~0), + NGBE_RDB_LLI_THRE_PRIORITY_EN | + (adapter->lli_vlan_pri << NGBE_RDB_LLI_THRE_UP_SHIFT)); + } +} + +#endif /* CONFIG_NGBE_NO_LLI */ +/* Additional bittime to account for NGBE framing */ +#define NGBE_ETH_FRAMING 20 + +/* + * ngbe_hpbthresh - calculate high water mark for flow control + * + * @adapter: board private structure to calculate for + * @pb - packet buffer to calculate + */ +static int ngbe_hpbthresh(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + struct net_device *dev = adapter->netdev; + int link, tc, kb, marker; + u32 dv_id, rx_pba; + + /* Calculate max LAN frame size */ + tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NGBE_ETH_FRAMING; + + /* Calculate delay value for device */ + dv_id = NGBE_DV(link, tc); + + /* Loopback switch introduces additional latency */ + if (adapter->flags & NGBE_FLAG_SRIOV_ENABLED) + dv_id += NGBE_B2BT(tc); + + /* Delay value is calculated in bit times convert to KB */ + kb = NGBE_BT2KB(dv_id); + rx_pba = rd32(hw, NGBE_RDB_PB_SZ) + >> NGBE_RDB_PB_SZ_SHIFT; + + marker = rx_pba - kb; + + /* It is possible that the packet buffer is not large enough + * to provide required headroom. In this case throw an error + * to user and a do the best we can. + */ + if (marker < 0) { + e_warn(drv, "Packet Buffer can not provide enough" + "headroom to suppport flow control." + "Decrease MTU or number of traffic classes\n"); + marker = tc + 1; + } + + return marker; +} + +/* + * ngbe_lpbthresh - calculate low water mark for for flow control + * + * @adapter: board private structure to calculate for + * @pb - packet buffer to calculate + */ +static int ngbe_lpbthresh(struct ngbe_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + int tc; + u32 dv_id; + + /* Calculate max LAN frame size */ + tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN; + + /* Calculate delay value for device */ + dv_id = NGBE_LOW_DV(tc); + + /* Delay value is calculated in bit times convert to KB */ + return NGBE_BT2KB(dv_id); +} + +/* + * ngbe_pbthresh_setup - calculate and setup high low water marks + */ + +static void ngbe_pbthresh_setup(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int num_tc = netdev_get_num_tc(adapter->netdev); + + if (!num_tc) + num_tc = 1; + + hw->fc.high_water = ngbe_hpbthresh(adapter); + hw->fc.low_water = ngbe_lpbthresh(adapter); + + /* Low water marks must not be larger than high water marks */ + if (hw->fc.low_water > hw->fc.high_water) + hw->fc.low_water = 0; +} + +static void ngbe_configure_pb(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int hdrm = 0; + int tc = netdev_get_num_tc(adapter->netdev); + + TCALL(hw, mac.ops.setup_rxpba, tc, hdrm, PBA_STRATEGY_EQUAL); + ngbe_pbthresh_setup(adapter); +} + +void ngbe_configure_isb(struct ngbe_adapter *adapter) +{ + /* set ISB Address */ + struct ngbe_hw *hw = &adapter->hw; + + wr32(hw, NGBE_PX_ISB_ADDR_L, + adapter->isb_dma & DMA_BIT_MASK(32)); + wr32(hw, NGBE_PX_ISB_ADDR_H, adapter->isb_dma >> 32); +} + +void ngbe_configure_port(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 value, i; + + if (adapter->num_vfs == 0) { + value = NGBE_CFG_PORT_CTL_NUM_VT_NONE; + } else + value = NGBE_CFG_PORT_CTL_NUM_VT_8; + + /* enable double vlan and qinq, NONE VT at default */ + value |= NGBE_CFG_PORT_CTL_D_VLAN | + NGBE_CFG_PORT_CTL_QINQ; + wr32m(hw, NGBE_CFG_PORT_CTL, + NGBE_CFG_PORT_CTL_D_VLAN | + NGBE_CFG_PORT_CTL_QINQ | + NGBE_CFG_PORT_CTL_NUM_VT_MASK, + value); + + wr32(hw, NGBE_CFG_TAG_TPID(0), + ETH_P_8021Q | ETH_P_8021AD << 16); + adapter->hw.tpid[0] = ETH_P_8021Q; + adapter->hw.tpid[1] = ETH_P_8021AD; + for (i = 1; i < 4; i++) + wr32(hw, NGBE_CFG_TAG_TPID(i), + ETH_P_8021Q | ETH_P_8021Q << 16); + for (i = 2; i < 8; i++) + adapter->hw.tpid[i] = ETH_P_8021Q; +} + +static void ngbe_configure(struct ngbe_adapter *adapter) +{ + ngbe_configure_pb(adapter); + + /* + * We must restore virtualization before VLANs or else + * the VLVF registers will not be populated + */ + ngbe_configure_virtualization(adapter); + /* configure Double Vlan */ + ngbe_configure_port(adapter); + + ngbe_set_rx_mode(adapter->netdev); + ngbe_restore_vlan(adapter); + + ngbe_configure_tx(adapter); + ngbe_configure_rx(adapter); + ngbe_configure_isb(adapter); +} + + +/** + * ngbe_non_sfp_link_config - set up non-SFP+ link + * @hw: pointer to private hardware struct + * + * Returns 0 on success, negative on failure + **/ +static int ngbe_non_sfp_link_config(struct ngbe_hw *hw) +{ + u32 speed; + bool autoneg, link_up = false; + u32 ret = NGBE_ERR_LINK_SETUP; + + ret = TCALL(hw, mac.ops.check_link, &speed, &link_up, false); + + speed = hw->phy.autoneg_advertised; + if (!speed) + ret = TCALL(hw, mac.ops.get_link_capabilities, &speed, + &autoneg); + + if ((hw->subsystem_device_id & OEM_MASK) == OCP_CARD || + ((hw->subsystem_device_id & NCSI_SUP_MASK) == NCSI_SUP)) { + + } else { + msleep(50); + if (hw->phy.type == ngbe_phy_internal) { + TCALL(hw, eeprom.ops.phy_signal_set); + TCALL(hw, phy.ops.setup_once); + } + } + + ret = TCALL(hw, mac.ops.setup_link, speed, false); + + return ret; +} + +static void ngbe_setup_gpie(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 gpie = 0; + + if (adapter->flags & NGBE_FLAG_MSIX_ENABLED) { + gpie = NGBE_PX_GPIE_MODEL; + /* + * use EIAM to auto-mask when MSI-X interrupt is asserted + * this saves a register write for every interrupt + */ + } else { + /* legacy interrupts, use EIAM to auto-mask when reading EICR, + * specifically only auto mask tx and rx interrupts */ + } + + wr32(hw, NGBE_PX_GPIE, gpie); +} + +static void ngbe_up_complete(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int err; + + ngbe_get_hw_control(adapter); + ngbe_setup_gpie(adapter); + + if (adapter->flags & NGBE_FLAG_MSIX_ENABLED) + ngbe_configure_msix(adapter); + else + ngbe_configure_msi_and_legacy(adapter); + + smp_mb__before_atomic(); + clear_bit(__NGBE_DOWN, &adapter->state); + ngbe_napi_enable_all(adapter); +#ifndef CONFIG_NGBE_NO_LLI + ngbe_configure_lli(adapter); +#endif + + err = ngbe_non_sfp_link_config(hw); + if (err) + e_err(probe, "link_config FAILED %d\n", err); + + /* sellect GMII */ + wr32(hw, NGBE_MAC_TX_CFG, + (rd32(hw, NGBE_MAC_TX_CFG) & ~NGBE_MAC_TX_CFG_SPEED_MASK) | + NGBE_MAC_TX_CFG_SPEED_1G); + + /* clear any pending interrupts, may auto mask */ + rd32(hw, NGBE_PX_IC); + rd32(hw, NGBE_PX_MISC_IC); + ngbe_irq_enable(adapter, true, true); + + if (((hw->subsystem_device_id & OEM_MASK) == LY_M88E1512_SFP) || + (hw->subsystem_device_id & OEM_MASK) == LY_YT8521S_SFP) + /* gpio0 is used to power on/off control*/ + wr32(hw, NGBE_GPIO_DR, 0); + + /* enable transmits */ + netif_tx_start_all_queues(adapter->netdev); + + /* bring the link up in the watchdog, this could race with our first + * link up interrupt but shouldn't be a problem */ + adapter->flags |= NGBE_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; +#ifdef CONFIG_NGBE_POLL_LINK_STATUS + mod_timer(&adapter->link_check_timer, jiffies); +#endif + mod_timer(&adapter->service_timer, jiffies); + /* ngbe_clear_vf_stats_counters(adapter); */ + + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + wr32m(hw, NGBE_CFG_PORT_CTL, + NGBE_CFG_PORT_CTL_PFRSTD, NGBE_CFG_PORT_CTL_PFRSTD); +} + +void ngbe_reinit_locked(struct ngbe_adapter *adapter) +{ + WARN_ON(in_interrupt()); + /* put off any impending NetWatchDogTimeout */ + netif_trans_update(adapter->netdev); + + while (test_and_set_bit(__NGBE_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + ngbe_down(adapter); + /* + * If SR-IOV enabled then wait a bit before bringing the adapter + * back up to give the VFs time to respond to the reset. The + * two second wait is based upon the watchdog timer cycle in + * the VF driver. + */ + if (adapter->flags & NGBE_FLAG_SRIOV_ENABLED) + msleep(2000); + ngbe_up(adapter); + clear_bit(__NGBE_RESETTING, &adapter->state); +} + +void ngbe_up(struct ngbe_adapter *adapter) +{ + /* hardware has been reset, we need to reload some things */ + ngbe_configure(adapter); + ngbe_up_complete(adapter); +} + +void ngbe_reset(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int err; + u8 old_addr[ETH_ALEN]; + + if (NGBE_REMOVED(hw->hw_addr)) + return; + + err = TCALL(hw, mac.ops.init_hw); + switch (err) { + case 0: + break; + case NGBE_ERR_MASTER_REQUESTS_PENDING: + e_dev_err("master disable timed out\n"); + break; + case NGBE_ERR_EEPROM_VERSION: + /* We are running on a pre-production device, log a warning */ + e_dev_warn("This device is a pre-production adapter/LOM. " + "Please be aware there may be issues associated " + "with your hardware. If you are experiencing " + "problems please contact your hardware " + "representative who provided you with this " + "hardware.\n"); + break; + default: + e_dev_err("Hardware Error: %d\n", err); + } + + /* do not flush user set addresses */ + memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len); + ngbe_flush_sw_mac_table(adapter); + ngbe_mac_set_default_filter(adapter, old_addr); + + /* update SAN MAC vmdq pool selection */ + TCALL(hw, mac.ops.set_vmdq_san_mac, VMDQ_P(0)); + + /* Clear saved DMA coalescing values except for watchdog_timer */ + hw->mac.dmac_config.fcoe_en = false; + hw->mac.dmac_config.link_speed = 0; + hw->mac.dmac_config.fcoe_tc = 0; + hw->mac.dmac_config.num_tcs = 0; + + if (test_bit(__NGBE_PTP_RUNNING, &adapter->state)) + ngbe_ptp_reset(adapter); +} + +/** + * ngbe_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +static void ngbe_clean_rx_ring(struct ngbe_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + unsigned long size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!rx_ring->rx_buffer_info) + return; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + struct ngbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; + if (rx_buffer->dma) { + dma_unmap_single(dev, + rx_buffer->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + rx_buffer->dma = 0; + } + + if (rx_buffer->skb) { + struct sk_buff *skb = rx_buffer->skb; + if (NGBE_CB(skb)->dma_released) { + dma_unmap_single(dev, + NGBE_CB(skb)->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + NGBE_CB(skb)->dma = 0; + NGBE_CB(skb)->dma_released = false; + } + + if (NGBE_CB(skb)->page_released) + dma_unmap_page(dev, + NGBE_CB(skb)->dma, + ngbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + dev_kfree_skb(skb); + rx_buffer->skb = NULL; + } + + if (!rx_buffer->page) + continue; + + dma_unmap_page(dev, rx_buffer->page_dma, + ngbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE); + + __free_pages(rx_buffer->page, + ngbe_rx_pg_order(rx_ring)); + rx_buffer->page = NULL; + } + + size = sizeof(struct ngbe_rx_buffer) * rx_ring->count; + memset(rx_ring->rx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} + +/** + * ngbe_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +static void ngbe_clean_tx_ring(struct ngbe_ring *tx_ring) +{ + struct ngbe_tx_buffer *tx_buffer_info; + unsigned long size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!tx_ring->tx_buffer_info) + return; + + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tx_ring->count; i++) { + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + ngbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); + } + + netdev_tx_reset_queue(txring_txq(tx_ring)); + + size = sizeof(struct ngbe_tx_buffer) * tx_ring->count; + memset(tx_ring->tx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); +} + +/** + * ngbe_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void ngbe_clean_all_rx_rings(struct ngbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + ngbe_clean_rx_ring(adapter->rx_ring[i]); +} + +/** + * ngbe_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void ngbe_clean_all_tx_rings(struct ngbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + ngbe_clean_tx_ring(adapter->tx_ring[i]); +} + +void ngbe_disable_device(struct ngbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ngbe_hw *hw = &adapter->hw; + u32 i; + + /* signal that we are down to the interrupt handler */ + if (test_and_set_bit(__NGBE_DOWN, &adapter->state)) + return; /* do nothing if already down */ + + ngbe_disable_pcie_master(hw); + /* disable receives */ + TCALL(hw, mac.ops.disable_rx); + + /* disable all enabled rx queues */ + for (i = 0; i < adapter->num_rx_queues; i++) + /* this call also flushes the previous write */ + ngbe_disable_rx_queue(adapter, adapter->rx_ring[i]); + + netif_tx_stop_all_queues(netdev); + + /* call carrier off first to avoid false dev_watchdog timeouts */ + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + if ((hw->subsystem_device_id & OEM_MASK) == LY_M88E1512_SFP || + (hw->subsystem_device_id & OEM_MASK) == LY_YT8521S_SFP) + /* gpio0 is used to power on/off control*/ + wr32(hw, NGBE_GPIO_DR, NGBE_GPIO_DR_0); + + ngbe_irq_disable(adapter); + + ngbe_napi_disable_all(adapter); + + adapter->flags2 &= ~(NGBE_FLAG2_PF_RESET_REQUESTED | + NGBE_FLAG2_DEV_RESET_REQUESTED | + NGBE_FLAG2_GLOBAL_RESET_REQUESTED); + adapter->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE; + + del_timer_sync(&adapter->service_timer); +#ifdef CONFIG_NGBE_POLL_LINK_STATUS + del_timer_sync(&adapter->link_check_timer); +#endif + if (adapter->num_vfs) { + /* Clear EITR Select mapping */ + wr32(&adapter->hw, NGBE_PX_ITRSEL, 0); + + /* Mark all the VFs as inactive */ + for (i = 0 ; i < adapter->num_vfs; i++) + adapter->vfinfo[i].clear_to_send = 0; + + /* ping all the active vfs to let them know we are going down */ + ngbe_ping_all_vfs(adapter); + + /* Disable all VFTE/VFRE TX/RX */ + ngbe_disable_tx_rx(adapter); + } + + /*OCP NCSI need it*/ + if (!(((hw->subsystem_device_id & OEM_MASK) == OCP_CARD) || + ((hw->subsystem_device_id & WOL_SUP_MASK) == WOL_SUP) || + ((hw->subsystem_device_id & NCSI_SUP_MASK) == NCSI_SUP))) { + /* disable mac transmiter */ + wr32m(hw, NGBE_MAC_TX_CFG, NGBE_MAC_TX_CFG_TE, 0); + } + + /* disable transmits in the hardware now that interrupts are off */ + for (i = 0; i < adapter->num_tx_queues; i++) { + u8 reg_idx = adapter->tx_ring[i]->reg_idx; + wr32(hw, NGBE_PX_TR_CFG(reg_idx), + NGBE_PX_TR_CFG_SWFLSH); + } + + /* Disable the Tx DMA engine */ + wr32m(hw, NGBE_TDM_CTL, NGBE_TDM_CTL_TE, 0); +} + + +void ngbe_down(struct ngbe_adapter *adapter) +{ + ngbe_disable_device(adapter); + + if (!pci_channel_offline(adapter->pdev)) + ngbe_reset(adapter); + + ngbe_clean_all_tx_rings(adapter); + ngbe_clean_all_rx_rings(adapter); +} + +/** + * ngbe_init_shared_code - Initialize the shared code + * @hw: pointer to hardware structure + * + * This will assign function pointers and assign the MAC type and PHY code. + * Does not touch the hardware. This function must be called prior to any + * other function in the shared code. The ngbe_hw structure should be + * memset to 0 prior to calling this function. The following fields in + * hw structure should be filled in prior to calling this function: + * hw_addr, back, device_id, vendor_id, subsystem_device_id, + * subsystem_vendor_id, and revision_id + **/ +s32 ngbe_init_shared_code(struct ngbe_hw *hw) +{ + DEBUGFUNC("\n"); + + if ((hw->subsystem_device_id & INTERNAL_SFP_MASK) == INTERNAL_SFP || + (hw->subsystem_device_id & OEM_MASK) == LY_M88E1512_SFP) + hw->phy.type = ngbe_phy_m88e1512_sfi; + else if (hw->subsystem_device_id == NGBE_WX1860AL_M88E1512_RJ45) + hw->phy.type = ngbe_phy_m88e1512; + else if ((hw->subsystem_device_id & OEM_MASK) == YT8521S_SFP || + (hw->subsystem_device_id & OEM_MASK) == LY_YT8521S_SFP) + hw->phy.type = ngbe_phy_yt8521s_sfi; + else + hw->phy.type = ngbe_phy_internal; + +/* select claus22 */ + wr32(hw, NGBE_MDIO_CLAUSE_SELECT, 0xF); + + return ngbe_init_ops(hw); +} + +/** + * ngbe_sw_init - Initialize general software structures (struct ngbe_adapter) + * @adapter: board private structure to initialize + * + * ngbe_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static const u32 def_rss_key[10] = { + 0xE291D73D, 0x1805EC6C, 0x2A94B30D, + 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE, + 0x6A3E67EA, 0x14364D17, 0x3BED200D +}; + +static int ngbe_sw_init(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + int err; + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); + if (hw->revision_id == NGBE_FAILED_READ_CFG_BYTE && + ngbe_check_cfg_remove(hw, pdev)) { + e_err(probe, "read of revision id failed\n"); + err = -ENODEV; + goto out; + } + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + /* phy type, phy ops, mac ops */ + err = ngbe_init_shared_code(hw); + if (err) { + e_err(probe, "init_shared_code failed: %d\n", err); + goto out; + } + + adapter->mac_table = kzalloc(sizeof(struct ngbe_mac_addr) * + hw->mac.num_rar_entries, + GFP_ATOMIC); + if (!adapter->mac_table) { + err = NGBE_ERR_OUT_OF_MEM; + e_err(probe, "mac_table allocation failed: %d\n", err); + goto out; + } + + memcpy(adapter->rss_key, def_rss_key, sizeof(def_rss_key)); + + /* Set common capability flags and settings */ + adapter->max_q_vectors = NGBE_MAX_MSIX_Q_VECTORS_EMERALD; + + /* Set MAC specific capability flags and exceptions */ + adapter->flags |= NGBE_FLAGS_SP_INIT; + adapter->flags2 |= NGBE_FLAG2_TEMP_SENSOR_CAPABLE; + adapter->flags2 |= NGBE_FLAG2_EEE_CAPABLE; + + /* init mailbox params */ + TCALL(hw, mbx.ops.init_params); + + /* default flow control settings */ + hw->fc.requested_mode = ngbe_fc_full; + hw->fc.current_mode = ngbe_fc_full; /* init for ethtool output */ + + adapter->last_lfc_mode = hw->fc.current_mode; + hw->fc.pause_time = NGBE_DEFAULT_FCPAUSE; + hw->fc.send_xon = true; + hw->fc.disable_fc_autoneg = false; + + /* set default ring sizes */ + adapter->tx_ring_count = NGBE_DEFAULT_TXD; + adapter->rx_ring_count = NGBE_DEFAULT_RXD; + + /* set default work limits */ + adapter->tx_work_limit = NGBE_DEFAULT_TX_WORK; + adapter->rx_work_limit = NGBE_DEFAULT_RX_WORK; + + adapter->tx_timeout_recovery_level = 0; + + /* PF holds first pool slot */ + adapter->num_vmdqs = 1; + set_bit(0, &adapter->fwd_bitmask); + set_bit(__NGBE_DOWN, &adapter->state); +out: + return err; +} + +/** + * ngbe_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +int ngbe_setup_tx_resources(struct ngbe_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int orig_node = dev_to_node(dev); + int numa_node = -1; + int size; + + size = sizeof(struct ngbe_tx_buffer) * tx_ring->count; + + if (tx_ring->q_vector) + numa_node = tx_ring->q_vector->numa_node; + + tx_ring->tx_buffer_info = vzalloc_node(size, numa_node); + if (!tx_ring->tx_buffer_info) + tx_ring->tx_buffer_info = vzalloc(size); + if (!tx_ring->tx_buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union ngbe_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + set_dev_node(dev, numa_node); + tx_ring->desc = dma_alloc_coherent(dev, + tx_ring->size, + &tx_ring->dma, + GFP_KERNEL); + set_dev_node(dev, orig_node); + if (!tx_ring->desc) + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) + goto err; + + return 0; + +err: + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); + return -ENOMEM; +} + +/** + * ngbe_setup_all_tx_resources - allocate all queues Tx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ngbe_setup_all_tx_resources(struct ngbe_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = ngbe_setup_tx_resources(adapter->tx_ring[i]); + if (!err) + continue; + + e_err(probe, "Allocation for Tx Queue %u failed\n", i); + goto err_setup_tx; + } + + return 0; +err_setup_tx: + /* rewind the index freeing the rings as we go */ + while (i--) + ngbe_free_tx_resources(adapter->tx_ring[i]); + return err; +} + +/** + * ngbe_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +int ngbe_setup_rx_resources(struct ngbe_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + int orig_node = dev_to_node(dev); + int numa_node = -1; + int size; + + size = sizeof(struct ngbe_rx_buffer) * rx_ring->count; + + if (rx_ring->q_vector) + numa_node = rx_ring->q_vector->numa_node; + + rx_ring->rx_buffer_info = vzalloc_node(size, numa_node); + if (!rx_ring->rx_buffer_info) + rx_ring->rx_buffer_info = vzalloc(size); + if (!rx_ring->rx_buffer_info) + goto err; + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union ngbe_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + + set_dev_node(dev, numa_node); + rx_ring->desc = dma_alloc_coherent(dev, + rx_ring->size, + &rx_ring->dma, + GFP_KERNEL); + set_dev_node(dev, orig_node); + if (!rx_ring->desc) + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!rx_ring->desc) + goto err; + + return 0; +err: + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); + return -ENOMEM; +} + +/** + * ngbe_setup_all_rx_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ngbe_setup_all_rx_resources(struct ngbe_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = ngbe_setup_rx_resources(adapter->rx_ring[i]); + if (!err) { + continue; + } + + e_err(probe, "Allocation for Rx Queue %u failed\n", i); + goto err_setup_rx; + } + + return 0; +err_setup_rx: + /* rewind the index freeing the rings as we go */ + while (i--) + ngbe_free_rx_resources(adapter->rx_ring[i]); + return err; +} + +/** + * ngbe_setup_isb_resources - allocate interrupt status resources + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +static int ngbe_setup_isb_resources(struct ngbe_adapter *adapter) +{ + struct device *dev = pci_dev_to_dev(adapter->pdev); + + adapter->isb_mem = dma_alloc_coherent(dev, + sizeof(u32) * NGBE_ISB_MAX, + &adapter->isb_dma, + GFP_KERNEL); + if (!adapter->isb_mem) { + e_err(probe, "ngbe_setup_isb_resources: alloc isb_mem failed\n"); + return -ENOMEM; + } + memset(adapter->isb_mem, 0, sizeof(u32) * NGBE_ISB_MAX); + return 0; +} + +/** + * ngbe_free_isb_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +static void ngbe_free_isb_resources(struct ngbe_adapter *adapter) +{ + struct device *dev = pci_dev_to_dev(adapter->pdev); + + dma_free_coherent(dev, sizeof(u32) * NGBE_ISB_MAX, + adapter->isb_mem, adapter->isb_dma); + adapter->isb_mem = NULL; +} + +/** + * ngbe_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +void ngbe_free_tx_resources(struct ngbe_ring *tx_ring) +{ + ngbe_clean_tx_ring(tx_ring); + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + tx_ring->desc = NULL; +} + +/** + * ngbe_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +static void ngbe_free_all_tx_resources(struct ngbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + ngbe_free_tx_resources(adapter->tx_ring[i]); +} + +/** + * ngbe_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +void ngbe_free_rx_resources(struct ngbe_ring *rx_ring) +{ + ngbe_clean_rx_ring(rx_ring); + + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * ngbe_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +static void ngbe_free_all_rx_resources(struct ngbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + ngbe_free_rx_resources(adapter->rx_ring[i]); +} + +/** + * ngbe_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int ngbe_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + if ((new_mtu < 68) || (new_mtu > 9414)) + return -EINVAL; + + /* + * we cannot allow legacy VFs to enable their receive + * paths when MTU greater than 1500 is configured. So display a + * warning that legacy VFs will be disabled. + */ + if ((adapter->flags & NGBE_FLAG_SRIOV_ENABLED) && + (new_mtu > ETH_DATA_LEN)) + e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n"); + + e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + + /* must set new MTU before calling down or up */ + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + ngbe_reinit_locked(adapter); + + return 0; +} + +/** + * ngbe_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +int ngbe_open(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + int err; + + /* disallow open during test */ + if (test_bit(__NGBE_TESTING, &adapter->state)) + return -EBUSY; + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = ngbe_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = ngbe_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + err = ngbe_setup_isb_resources(adapter); + if (err) + goto err_req_isb; + + ngbe_configure(adapter); + + err = ngbe_request_irq(adapter); + if (err) + goto err_req_irq; + + if (adapter->num_tx_queues) { + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(netdev, adapter->num_vmdqs > 1 + ? adapter->queues_per_pool + : adapter->num_tx_queues); + if (err) + goto err_set_queues; + } + + if (adapter->num_rx_queues) { + err = netif_set_real_num_rx_queues(netdev, adapter->num_vmdqs > 1 + ? adapter->queues_per_pool + : adapter->num_rx_queues); + if (err) + goto err_set_queues; + } + + ngbe_ptp_init(adapter); + ngbe_up_complete(adapter); + + return 0; + +err_set_queues: + ngbe_free_irq(adapter); +err_req_irq: + ngbe_free_isb_resources(adapter); +err_req_isb: + ngbe_free_all_rx_resources(adapter); + +err_setup_rx: + ngbe_free_all_tx_resources(adapter); +err_setup_tx: + ngbe_reset(adapter); + return err; +} + +/** + * ngbe_close_suspend - actions necessary to both suspend and close flows + * @adapter: the private adapter struct + * + * This function should contain the necessary work common to both suspending + * and closing of the device. + */ +static void ngbe_close_suspend(struct ngbe_adapter *adapter) +{ + ngbe_ptp_suspend(adapter); + ngbe_disable_device(adapter); + + ngbe_clean_all_tx_rings(adapter); + ngbe_clean_all_rx_rings(adapter); + + ngbe_free_irq(adapter); + + ngbe_free_isb_resources(adapter); + ngbe_free_all_rx_resources(adapter); + ngbe_free_all_tx_resources(adapter); +} + +/** + * ngbe_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +int ngbe_close(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + ngbe_ptp_stop(adapter); + ngbe_down(adapter); + ngbe_free_irq(adapter); + + ngbe_free_isb_resources(adapter); + ngbe_free_all_rx_resources(adapter); + ngbe_free_all_tx_resources(adapter); + + ngbe_release_hw_control(adapter); + + return 0; +} + +#ifdef CONFIG_PM +static int ngbe_resume(struct pci_dev *pdev) +{ + struct ngbe_adapter *adapter; + struct net_device *netdev; + u32 err; + + adapter = pci_get_drvdata(pdev); + netdev = adapter->netdev; + adapter->hw.hw_addr = adapter->io_addr; + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + /* + * pci_restore_state clears dev->state_saved so call + * pci_save_state to restore it. + */ + pci_save_state(pdev); + wr32(&adapter->hw, NGBE_PSR_WKUP_CTL, adapter->wol); + + err = pci_enable_device_mem(pdev); + if (err) { + e_dev_err("Cannot enable PCI device from suspend\n"); + return err; + } + smp_mb__before_atomic(); + clear_bit(__NGBE_DISABLED, &adapter->state); + pci_set_master(pdev); + + pci_wake_from_d3(pdev, false); + + ngbe_reset(adapter); + + rtnl_lock(); + + err = ngbe_init_interrupt_scheme(adapter); + if (!err && netif_running(netdev)) + err = ngbe_open(netdev); + + rtnl_unlock(); + + if (err) + return err; + + netif_device_attach(netdev); + + return 0; +} +#endif /* CONFIG_PM */ + +/* + * __ngbe_shutdown is not used when power manangbeent + * is disabled on older kernels (<2.6.12). causes a compile + * warning/error, because it is defined and not used. + */ +static int __ngbe_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct ngbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + struct ngbe_hw *hw = &adapter->hw; + u32 wufc = adapter->wol; +#ifdef CONFIG_PM + int retval = 0; +#endif + + netif_device_detach(netdev); + + rtnl_lock(); + if (netif_running(netdev)) + ngbe_close_suspend(adapter); + rtnl_unlock(); + + ngbe_clear_interrupt_scheme(adapter); + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; +#endif + + /* this won't stop link of managebility or WoL is enabled */ + ngbe_stop_mac_link_on_d3(hw); + + if (wufc) { + ngbe_set_rx_mode(netdev); + ngbe_configure_rx(adapter); + /* enable the optics for SFP+ fiber as we can WoL */ + TCALL(hw, mac.ops.enable_tx_laser); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & NGBE_PSR_WKUP_CTL_MC) { + wr32m(hw, NGBE_PSR_CTL, + NGBE_PSR_CTL_MPE, NGBE_PSR_CTL_MPE); + } + + pci_clear_master(adapter->pdev); + wr32(hw, NGBE_PSR_WKUP_CTL, wufc); + } else { + wr32(hw, NGBE_PSR_WKUP_CTL, 0); + } + + pci_wake_from_d3(pdev, !!wufc); + + *enable_wake = !!wufc; + ngbe_release_hw_control(adapter); + + if (!test_and_set_bit(__NGBE_DISABLED, &adapter->state)) + pci_disable_device(pdev); + + return 0; +} + +#ifdef CONFIG_PM +static int ngbe_suspend(struct pci_dev *pdev, + pm_message_t __always_unused state) +{ + int retval; + bool wake; + + retval = __ngbe_shutdown(pdev, &wake); + if (retval) + return retval; + + if (wake) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } + + return 0; +} +#endif /* CONFIG_PM */ + +static void ngbe_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __ngbe_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +/** + * ngbe_get_stats64 - Get System Network Statistics + * @netdev: network interface device structure + * @stats: storage space for 64bit statistics + * + * Returns 64bit statistics, for use in the ndo_get_stats64 callback. This + * function replaces ngbe_get_stats for kernels which support it. + */ +static void ngbe_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + int i; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ngbe_ring *ring = READ_ONCE(adapter->rx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, + start)); + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ngbe_ring *ring = READ_ONCE(adapter->tx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, + start)); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } + } + rcu_read_unlock(); + /* following stats updated by ngbe_watchdog_task() */ + stats->multicast = netdev->stats.multicast; + stats->rx_errors = netdev->stats.rx_errors; + stats->rx_length_errors = netdev->stats.rx_length_errors; + stats->rx_crc_errors = netdev->stats.rx_crc_errors; + stats->rx_missed_errors = netdev->stats.rx_missed_errors; +} + +/** + * ngbe_update_stats - Update the board statistics counters. + * @adapter: board private structure + **/ +void ngbe_update_stats(struct ngbe_adapter *adapter) +{ + struct net_device_stats *net_stats = &adapter->netdev->stats; + struct ngbe_hw *hw = &adapter->hw; + struct ngbe_hw_stats *hwstats = &adapter->stats; + u64 total_mpc = 0; + u32 i, bprc, lxon, lxoff; + u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0; + u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; + u64 bytes = 0, packets = 0, hw_csum_rx_error = 0; + u64 hw_csum_rx_good = 0; + + if (test_bit(__NGBE_DOWN, &adapter->state) || + test_bit(__NGBE_RESETTING, &adapter->state)) + return; + + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ngbe_ring *rx_ring = adapter->rx_ring[i]; + non_eop_descs += rx_ring->rx_stats.non_eop_descs; + alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; + alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; + hw_csum_rx_error += rx_ring->rx_stats.csum_err; + hw_csum_rx_good += rx_ring->rx_stats.csum_good_cnt; + bytes += rx_ring->stats.bytes; + packets += rx_ring->stats.packets; + + } + + adapter->non_eop_descs = non_eop_descs; + adapter->alloc_rx_page_failed = alloc_rx_page_failed; + adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; + adapter->hw_csum_rx_error = hw_csum_rx_error; + adapter->hw_csum_rx_good = hw_csum_rx_good; + net_stats->rx_bytes = bytes; + net_stats->rx_packets = packets; + + bytes = 0; + packets = 0; + /* gather some stats to the adapter struct that are per queue */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ngbe_ring *tx_ring = adapter->tx_ring[i]; + restart_queue += tx_ring->tx_stats.restart_queue; + tx_busy += tx_ring->tx_stats.tx_busy; + bytes += tx_ring->stats.bytes; + packets += tx_ring->stats.packets; + } + adapter->restart_queue = restart_queue; + adapter->tx_busy = tx_busy; + net_stats->tx_bytes = bytes; + net_stats->tx_packets = packets; + + hwstats->crcerrs += rd32(hw, NGBE_RX_CRC_ERROR_FRAMES_LOW); + + hwstats->gprc += rd32(hw, NGBE_PX_GPRC); + + ngbe_update_xoff_rx_lfc(adapter); + + hwstats->o2bgptc += rd32(hw, NGBE_TDM_OS2BMC_CNT); + if (ngbe_check_mng_access(&adapter->hw)) { + hwstats->o2bspc += rd32(hw, NGBE_MNG_OS2BMC_CNT); + hwstats->b2ospc += rd32(hw, NGBE_MNG_BMC2OS_CNT); + } + hwstats->b2ogprc += rd32(hw, NGBE_RDM_BMC2OS_CNT); + hwstats->gorc += rd32(hw, NGBE_PX_GORC_LSB); + hwstats->gorc += (u64)rd32(hw, NGBE_PX_GORC_MSB) << 32; + + hwstats->gotc += rd32(hw, NGBE_PX_GOTC_LSB); + hwstats->gotc += (u64)rd32(hw, NGBE_PX_GOTC_MSB) << 32; + + + adapter->hw_rx_no_dma_resources += + rd32(hw, NGBE_RDM_DRP_PKT); + bprc = rd32(hw, NGBE_RX_BC_FRAMES_GOOD_LOW); + hwstats->bprc += bprc; + hwstats->mprc = 0; + + for (i = 0; i < 8; i++) + hwstats->mprc += rd32(hw, NGBE_PX_MPRC(i)); + + hwstats->roc += rd32(hw, NGBE_RX_OVERSIZE_FRAMES_GOOD); + hwstats->rlec += rd32(hw, NGBE_RX_LEN_ERROR_FRAMES_LOW); + lxon = rd32(hw, NGBE_RDB_LXONTXC); + hwstats->lxontxc += lxon; + lxoff = rd32(hw, NGBE_RDB_LXOFFTXC); + hwstats->lxofftxc += lxoff; + + hwstats->gptc += rd32(hw, NGBE_PX_GPTC); + hwstats->mptc += rd32(hw, NGBE_TX_MC_FRAMES_GOOD_LOW); + hwstats->ruc += rd32(hw, NGBE_RX_UNDERSIZE_FRAMES_GOOD); + hwstats->tpr += rd32(hw, NGBE_RX_FRAME_CNT_GOOD_BAD_LOW); + hwstats->bptc += rd32(hw, NGBE_TX_BC_FRAMES_GOOD_LOW); + /* Fill out the OS statistics structure */ + net_stats->multicast = hwstats->mprc; + + /* Rx Errors */ + net_stats->rx_errors = hwstats->crcerrs + + hwstats->rlec; + net_stats->rx_dropped = 0; + net_stats->rx_length_errors = hwstats->rlec; + net_stats->rx_crc_errors = hwstats->crcerrs; + total_mpc = rd32(hw, NGBE_RDB_MPCNT); + net_stats->rx_missed_errors = total_mpc; + + /* + * VF Stats Collection - skip while resetting because these + * are not clear on read and otherwise you'll sometimes get + * crazy values. + */ + if (!test_bit(__NGBE_RESETTING, &adapter->state)) { + for (i = 0; i < adapter->num_vfs; i++) { + UPDATE_VF_COUNTER_32bit(NGBE_VX_GPRC, \ + adapter->vfinfo->last_vfstats.gprc, \ + adapter->vfinfo->vfstats.gprc); + UPDATE_VF_COUNTER_32bit(NGBE_VX_GPTC, \ + adapter->vfinfo->last_vfstats.gptc, \ + adapter->vfinfo->vfstats.gptc); + UPDATE_VF_COUNTER_36bit(NGBE_VX_GORC_LSB, \ + NGBE_VX_GORC_MSB, \ + adapter->vfinfo->last_vfstats.gorc, \ + adapter->vfinfo->vfstats.gorc); + UPDATE_VF_COUNTER_36bit(NGBE_VX_GOTC_LSB, \ + NGBE_VX_GOTC_MSB, \ + adapter->vfinfo->last_vfstats.gotc, \ + adapter->vfinfo->vfstats.gotc); + UPDATE_VF_COUNTER_32bit(NGBE_VX_MPRC, \ + adapter->vfinfo->last_vfstats.mprc, \ + adapter->vfinfo->vfstats.mprc); + } + } +} + +/** + * ngbe_check_hang_subtask - check for hung queues and dropped interrupts + * @adapter - pointer to the device adapter structure + * + * This function serves two purposes. First it strobes the interrupt lines + * in order to make certain interrupts are occurring. Secondly it sets the + * bits needed to check for TX hangs. As a result we should immediately + * determine if a hang has occurred. + */ +static void ngbe_check_hang_subtask(struct ngbe_adapter *adapter) +{ + int i; + + /* If we're down or resetting, just bail */ + if (test_bit(__NGBE_DOWN, &adapter->state) || + test_bit(__NGBE_REMOVING, &adapter->state) || + test_bit(__NGBE_RESETTING, &adapter->state)) + return; + + /* Force detection of hung controller */ + if (netif_carrier_ok(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + set_check_for_tx_hang(adapter->tx_ring[i]); + } +} + +static void ngbe_watchdog_an_complete(struct ngbe_adapter *adapter) +{ + u32 link_speed = 0; + u32 lan_speed = 0; + bool link_up = true; + struct ngbe_hw *hw = &adapter->hw; + + if (!(adapter->flags & NGBE_FLAG_NEED_ANC_CHECK)) + return; + + TCALL(hw, mac.ops.check_link, &link_speed, &link_up, false); + + adapter->link_speed = link_speed; + switch (link_speed) { + case NGBE_LINK_SPEED_100_FULL: + lan_speed = 1; + break; + case NGBE_LINK_SPEED_1GB_FULL: + lan_speed = 2; + break; + case NGBE_LINK_SPEED_10_FULL: + lan_speed = 0; + break; + default: + break; + } + wr32m(hw, NGBE_CFG_LAN_SPEED, + 0x3, lan_speed); + + if (link_speed & (NGBE_LINK_SPEED_1GB_FULL | + NGBE_LINK_SPEED_100_FULL | NGBE_LINK_SPEED_10_FULL)) { + wr32(hw, NGBE_MAC_TX_CFG, + (rd32(hw, NGBE_MAC_TX_CFG) & + ~NGBE_MAC_TX_CFG_SPEED_MASK) | NGBE_MAC_TX_CFG_TE | + NGBE_MAC_TX_CFG_SPEED_1G); + } + + adapter->flags &= ~NGBE_FLAG_NEED_ANC_CHECK; + adapter->flags |= NGBE_FLAG_NEED_LINK_UPDATE; + + return; +} + +/** + * ngbe_watchdog_update_link - update the link status + * @adapter - pointer to the device adapter structure + * @link_speed - pointer to a u32 to store the link_speed + **/ +static void ngbe_watchdog_update_link_status(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 link_speed = adapter->link_speed; + bool link_up = adapter->link_up; + u32 lan_speed = 0; + u32 reg; + +#ifndef CONFIG_NGBE_POLL_LINK_STATUS + if (!(adapter->flags & NGBE_FLAG_NEED_LINK_UPDATE)) + return; +#endif + link_speed = NGBE_LINK_SPEED_1GB_FULL; + link_up = true; + + TCALL(hw, mac.ops.check_link, &link_speed, &link_up, false); +#ifndef CONFIG_NGBE_POLL_LINK_STATUS + if (link_up || time_after(jiffies, (adapter->link_check_timeout + + NGBE_TRY_LINK_TIMEOUT))) { + adapter->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE; + } +#else + if (adapter->link_up == link_up && + adapter->link_speed == link_speed) + return; +#endif + + adapter->link_speed = link_speed; + switch (link_speed) { + case NGBE_LINK_SPEED_100_FULL: + lan_speed = 1; + break; + case NGBE_LINK_SPEED_1GB_FULL: + lan_speed = 2; + break; + case NGBE_LINK_SPEED_10_FULL: + lan_speed = 0; + break; + default: + break; + } + wr32m(hw, NGBE_CFG_LAN_SPEED, + 0x3, lan_speed); + + if (link_up) { + TCALL(hw, mac.ops.fc_enable); + ngbe_set_rx_drop_en(adapter); + } + + if (link_up) { + adapter->last_rx_ptp_check = jiffies; + + if (test_bit(__NGBE_PTP_RUNNING, &adapter->state)) + ngbe_ptp_start_cyclecounter(adapter); + + if (link_speed & (NGBE_LINK_SPEED_1GB_FULL | + NGBE_LINK_SPEED_100_FULL | NGBE_LINK_SPEED_10_FULL)) { + wr32(hw, NGBE_MAC_TX_CFG, + (rd32(hw, NGBE_MAC_TX_CFG) & + ~NGBE_MAC_TX_CFG_SPEED_MASK) | NGBE_MAC_TX_CFG_TE | + NGBE_MAC_TX_CFG_SPEED_1G); + } + + /* Re configure MAC RX */ + reg = rd32(hw, NGBE_MAC_RX_CFG); + wr32(hw, NGBE_MAC_RX_CFG, reg); + wr32(hw, NGBE_MAC_PKT_FLT, NGBE_MAC_PKT_FLT_PR); + reg = rd32(hw, NGBE_MAC_WDG_TIMEOUT); + wr32(hw, NGBE_MAC_WDG_TIMEOUT, reg); + } + + adapter->link_up = link_up; + if (hw->mac.ops.dmac_config && hw->mac.dmac_config.watchdog_timer) { + u8 num_tcs = netdev_get_num_tc(adapter->netdev); + + if (hw->mac.dmac_config.link_speed != link_speed || + hw->mac.dmac_config.num_tcs != num_tcs) { + hw->mac.dmac_config.link_speed = link_speed; + hw->mac.dmac_config.num_tcs = num_tcs; + TCALL(hw, mac.ops.dmac_config); + } + } + return; +} + +static void ngbe_update_default_up(struct ngbe_adapter *adapter) +{ + u8 up = 0; + adapter->default_up = up; +} + +/** + * ngbe_watchdog_link_is_up - update netif_carrier status and + * print link up message + * @adapter - pointer to the device adapter structure + **/ +static void ngbe_watchdog_link_is_up(struct ngbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ngbe_hw *hw = &adapter->hw; + u32 link_speed = adapter->link_speed; + bool flow_rx, flow_tx; + + /* only continue if link was previously down */ + if (netif_carrier_ok(netdev)) + return; + + adapter->flags2 &= ~NGBE_FLAG2_SEARCH_FOR_SFP; + + /* flow_rx, flow_tx report link flow control status */ + flow_rx = (rd32(hw, NGBE_MAC_RX_FLOW_CTRL) & 0x101) == 0x1; + flow_tx = !!(NGBE_RDB_RFCC_RFCE_802_3X & + rd32(hw, NGBE_RDB_RFCC)); + + e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", + (link_speed == NGBE_LINK_SPEED_1GB_FULL ? + "1 Gbps" : + (link_speed == NGBE_LINK_SPEED_100_FULL ? + "100 Mbps" : + (link_speed == NGBE_LINK_SPEED_10_FULL ? + "10 Mbps" : + "unknown speed"))), + ((flow_rx && flow_tx) ? "RX/TX" : + (flow_rx ? "RX" : + (flow_tx ? "TX" : "None")))); + + netif_carrier_on(netdev); + netif_tx_wake_all_queues(netdev); + + /* update the default user priority for VFs */ + ngbe_update_default_up(adapter); + + /* ping all the active vfs to let them know link has changed */ + ngbe_ping_all_vfs(adapter); +} + +/** + * ngbe_watchdog_link_is_down - update netif_carrier status and + * print link down message + * @adapter - pointer to the adapter structure + **/ +static void ngbe_watchdog_link_is_down(struct ngbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + adapter->link_up = false; + adapter->link_speed = 0; + + /* only continue if link was up previously */ + if (!netif_carrier_ok(netdev)) + return; + + if (test_bit(__NGBE_PTP_RUNNING, &adapter->state)) + ngbe_ptp_start_cyclecounter(adapter); + + e_info(drv, "NIC Link is Down\n"); + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + /* ping all the active vfs to let them know link has changed */ + ngbe_ping_all_vfs(adapter); +} + +static bool ngbe_ring_tx_pending(struct ngbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ngbe_ring *tx_ring = adapter->tx_ring[i]; + + if (tx_ring->next_to_use != tx_ring->next_to_clean) + return true; + } + + return false; +} + +static bool ngbe_vf_tx_pending(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 q_per_pool = 1; + + u32 i, j; + + if (!adapter->num_vfs) + return false; + + for (i = 0; i < adapter->num_vfs; i++) { + for (j = 0; j < q_per_pool; j++) { + u32 h, t; + + h = rd32(hw, + NGBE_PX_TR_RPn(q_per_pool, i, j)); + t = rd32(hw, + NGBE_PX_TR_WPn(q_per_pool, i, j)); + + if (h != t) + return true; + } + } + + return false; +} + +/** + * ngbe_watchdog_flush_tx - flush queues on link down + * @adapter - pointer to the device adapter structure + **/ +static void ngbe_watchdog_flush_tx(struct ngbe_adapter *adapter) +{ + if (!netif_carrier_ok(adapter->netdev)) { + if (ngbe_ring_tx_pending(adapter) || + ngbe_vf_tx_pending(adapter)) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + e_warn(drv, "initiating reset due to lost link with " + "pending Tx work\n"); + adapter->flags2 |= NGBE_FLAG2_PF_RESET_REQUESTED; + } + } +} + +#ifdef CONFIG_PCI_IOV +static inline void ngbe_issue_vf_flr(struct ngbe_adapter *adapter, + struct pci_dev *vfdev) +{ + int pos, i; + u16 status; + + /* wait for pending transactions on the bus */ + for (i = 0; i < 4; i++) { + if (i) + msleep((1 << (i - 1)) * 100); + + pcie_capability_read_word(vfdev, PCI_EXP_DEVSTA, &status); + if (!(status & PCI_EXP_DEVSTA_TRPND)) + goto clear; + } + + e_dev_warn("Issuing VFLR with pending transactions\n"); + +clear: + pos = pci_find_capability(vfdev, PCI_CAP_ID_EXP); + if (!pos) + return; + + e_dev_err("Issuing VFLR for VF %s\n", pci_name(vfdev)); + pci_write_config_word(vfdev, pos + PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_BCR_FLR); + msleep(100); +} + + +static void ngbe_spoof_check(struct ngbe_adapter *adapter) +{ + u32 ssvpc; + + /* Do not perform spoof check if in non-IOV mode */ + if (adapter->num_vfs == 0) + return; + ssvpc = rd32(&adapter->hw, NGBE_TDM_SEC_DRP); + + /* + * ssvpc register is cleared on read, if zero then no + * spoofed packets in the last interval. + */ + if (!ssvpc) + return; + + e_warn(drv, "%d Spoofed packets detected\n", ssvpc); +} +#endif /* CONFIG_PCI_IOV */ + +/** + * ngbe_watchdog_subtask - check and bring link up + * @adapter - pointer to the device adapter structure + **/ +static void ngbe_watchdog_subtask(struct ngbe_adapter *adapter) +{ + /* if interface is down do nothing */ + if (test_bit(__NGBE_DOWN, &adapter->state) || + test_bit(__NGBE_REMOVING, &adapter->state) || + test_bit(__NGBE_RESETTING, &adapter->state)) + return; + + ngbe_watchdog_an_complete(adapter); +#ifndef CONFIG_NGBE_POLL_LINK_STATUS + ngbe_watchdog_update_link_status(adapter); + + if (adapter->link_up) + ngbe_watchdog_link_is_up(adapter); + else + ngbe_watchdog_link_is_down(adapter); +#endif +#ifdef CONFIG_PCI_IOV + ngbe_spoof_check(adapter); +#endif /* CONFIG_PCI_IOV */ + + ngbe_update_stats(adapter); + ngbe_watchdog_flush_tx(adapter); +} + +/** + * ngbe_service_timer - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void ngbe_service_timer(struct timer_list *t) +{ + struct ngbe_adapter *adapter = from_timer(adapter, t, service_timer); + unsigned long next_event_offset; + struct ngbe_hw *hw = &adapter->hw; + + /* poll faster when waiting for link */ + if ((adapter->flags & NGBE_FLAG_NEED_LINK_UPDATE) || + (adapter->flags & NGBE_FLAG_NEED_ANC_CHECK)) + next_event_offset = HZ / 10; + else + next_event_offset = HZ * 2; + + if ((rd32(&adapter->hw, NGBE_MIS_PF_SM) == 1) && (hw->bus.lan_id)) { + adapter->flags2 |= NGBE_FLAG2_PCIE_NEED_RECOVER; + } + + /* Reset the timer */ + mod_timer(&adapter->service_timer, next_event_offset + jiffies); + + ngbe_service_event_schedule(adapter); +} + +#ifdef CONFIG_NGBE_POLL_LINK_STATUS +static void ngbe_link_check_timer(struct timer_list *t) +{ + struct ngbe_adapter *adapter = from_timer(adapter, t, link_check_timer); + unsigned long next_event_offset = HZ / 1000; + + mod_timer(&adapter->link_check_timer, next_event_offset + jiffies); + /* if interface is down do nothing */ + if (test_bit(__NGBE_DOWN, &adapter->state) || + test_bit(__NGBE_REMOVING, &adapter->state) || + test_bit(__NGBE_RESETTING, &adapter->state)) + return; + + ngbe_watchdog_update_link_status(adapter); + + if (adapter->link_up) + ngbe_watchdog_link_is_up(adapter); + else + ngbe_watchdog_link_is_down(adapter); +} +#endif + +static void ngbe_reset_subtask(struct ngbe_adapter *adapter) +{ + u32 reset_flag = 0; + u32 value = 0; + + if (!(adapter->flags2 & (NGBE_FLAG2_PF_RESET_REQUESTED | + NGBE_FLAG2_DEV_RESET_REQUESTED | + NGBE_FLAG2_GLOBAL_RESET_REQUESTED | + NGBE_FLAG2_RESET_INTR_RECEIVED))) + return; + + /* If we're already down, just bail */ + if (test_bit(__NGBE_DOWN, &adapter->state) || + test_bit(__NGBE_REMOVING, &adapter->state)) + return; + + netdev_err(adapter->netdev, "Reset adapter\n"); + adapter->tx_timeout_count++; + + rtnl_lock(); + if (adapter->flags2 & NGBE_FLAG2_GLOBAL_RESET_REQUESTED) { + reset_flag |= NGBE_FLAG2_GLOBAL_RESET_REQUESTED; + adapter->flags2 &= ~NGBE_FLAG2_GLOBAL_RESET_REQUESTED; + } + if (adapter->flags2 & NGBE_FLAG2_DEV_RESET_REQUESTED) { + reset_flag |= NGBE_FLAG2_DEV_RESET_REQUESTED; + adapter->flags2 &= ~NGBE_FLAG2_DEV_RESET_REQUESTED; + } + if (adapter->flags2 & NGBE_FLAG2_PF_RESET_REQUESTED) { + reset_flag |= NGBE_FLAG2_PF_RESET_REQUESTED; + adapter->flags2 &= ~NGBE_FLAG2_PF_RESET_REQUESTED; + } + + if (adapter->flags2 & NGBE_FLAG2_RESET_INTR_RECEIVED) { + /* If there's a recovery already waiting, it takes + * precedence before starting a new reset sequence. + */ + adapter->flags2 &= ~NGBE_FLAG2_RESET_INTR_RECEIVED; + value = rd32m(&adapter->hw, NGBE_MIS_RST_ST, + NGBE_MIS_RST_ST_DEV_RST_TYPE_MASK) >> + NGBE_MIS_RST_ST_DEV_RST_TYPE_SHIFT; + if (value == NGBE_MIS_RST_ST_DEV_RST_TYPE_SW_RST) { + adapter->hw.reset_type = NGBE_SW_RESET; + + } else if (value == NGBE_MIS_RST_ST_DEV_RST_TYPE_GLOBAL_RST) + adapter->hw.reset_type = NGBE_GLOBAL_RESET; + adapter->hw.force_full_reset = true; + ngbe_reinit_locked(adapter); + adapter->hw.force_full_reset = false; + goto unlock; + } + + if (reset_flag & NGBE_FLAG2_DEV_RESET_REQUESTED) { + /* Request a Device Reset + * + * This will start the chip's countdown to the actual full + * chip reset event, and a warning interrupt to be sent + * to all PFs, including the requestor. Our handler + * for the warning interrupt will deal with the shutdown + * and recovery of the switch setup. + */ + /*debug to open*/ + /*ngbe_dump(adapter);*/ + + wr32m(&adapter->hw, NGBE_MIS_RST, + NGBE_MIS_RST_SW_RST, NGBE_MIS_RST_SW_RST); + e_info(drv, "ngbe_reset_subtask: sw reset\n"); + + } else if (reset_flag & NGBE_FLAG2_PF_RESET_REQUESTED) { + /*debug to open*/ + /*ngbe_dump(adapter);*/ + ngbe_reinit_locked(adapter); + } else if (reset_flag & NGBE_FLAG2_GLOBAL_RESET_REQUESTED) { + /* Request a Global Reset + * + * This will start the chip's countdown to the actual full + * chip reset event, and a warning interrupt to be sent + * to all PFs, including the requestor. Our handler + * for the warning interrupt will deal with the shutdown + * and recovery of the switch setup. + */ + /*debug to open*/ + /*ngbe_dump(adapter);*/ + pci_save_state(adapter->pdev); + if (ngbe_mng_present(&adapter->hw)) { + ngbe_reset_hostif(&adapter->hw); + e_info(drv, "ngbe_reset_subtask: lan reset\n"); + + } else { + wr32m(&adapter->hw, NGBE_MIS_RST, + NGBE_MIS_RST_GLOBAL_RST, + NGBE_MIS_RST_GLOBAL_RST); + e_info(drv, "ngbe_reset_subtask: global reset\n"); + } + } + +unlock: + rtnl_unlock(); +} + +static void ngbe_check_pcie_subtask(struct ngbe_adapter *adapter) +{ + if (!(adapter->flags2 & NGBE_FLAG2_PCIE_NEED_RECOVER)) + return; + + e_info(probe, "do recovery\n"); + ngbe_pcie_do_recovery(adapter->pdev); + wr32m(&adapter->hw, NGBE_MIS_PF_SM, + NGBE_MIS_PF_SM_SM, 0); + adapter->flags2 &= ~NGBE_FLAG2_PCIE_NEED_RECOVER; +} + + +/** + * ngbe_service_task - manages and runs subtasks + * @work: pointer to work_struct containing our data + **/ +static void ngbe_service_task(struct work_struct *work) +{ + struct ngbe_adapter *adapter = container_of(work, + struct ngbe_adapter, + service_task); + if (NGBE_REMOVED(adapter->hw.hw_addr)) { + if (!test_bit(__NGBE_DOWN, &adapter->state)) { + rtnl_lock(); + ngbe_down(adapter); + rtnl_unlock(); + } + ngbe_service_event_complete(adapter); + return; + } + + ngbe_check_pcie_subtask(adapter); + ngbe_reset_subtask(adapter); + ngbe_check_overtemp_subtask(adapter); + ngbe_watchdog_subtask(adapter); + ngbe_check_hang_subtask(adapter); + + if (test_bit(__NGBE_PTP_RUNNING, &adapter->state)) { + ngbe_ptp_overflow_check(adapter); + if (unlikely(adapter->flags & + NGBE_FLAG_RX_HWTSTAMP_IN_REGISTER)) + ngbe_ptp_rx_hang(adapter); + } + + ngbe_service_event_complete(adapter); +} + +union network_header { + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + void *raw; +}; + +static ngbe_dptype encode_tx_desc_ptype(const struct ngbe_tx_buffer *first) +{ + struct sk_buff *skb = first->skb; + u8 tun_prot = 0; + u8 l4_prot = 0; + u8 ptype = 0; + unsigned char *exthdr; + unsigned char *l4_hdr; + __be16 frag_off; + u32 len = 0; + + if (skb->encapsulation) { + union network_header hdr; + + switch (first->protocol) { + case __constant_htons(ETH_P_IP): + tun_prot = ip_hdr(skb)->protocol; + if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) + goto encap_frag; + ptype = NGBE_PTYPE_TUN_IPV4; + break; + case htons(ETH_P_IPV6): + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb) + + sizeof(struct ipv6hdr); + tun_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &tun_prot, &frag_off); + + if (tun_prot == NEXTHDR_FRAGMENT) + goto encap_frag; + ptype = NGBE_PTYPE_TUN_IPV6; + break; + default: + goto exit; + } + + if (tun_prot == IPPROTO_IPIP || + tun_prot == IPPROTO_IPV6) { + hdr.raw = (void *)inner_ip_hdr(skb); + ptype |= NGBE_PTYPE_PKT_IPIP; + } else if (tun_prot == IPPROTO_UDP) { + hdr.raw = (void *)inner_ip_hdr(skb); + } else { + goto exit; + } + + switch (hdr.ipv4->version) { + case IPVERSION: + l4_prot = hdr.ipv4->protocol; + if (hdr.ipv4->frag_off & htons(IP_MF | IP_OFFSET)) { + ptype |= NGBE_PTYPE_TYP_IPFRAG; + goto exit; + } + break; + case 6: + l4_hdr = skb_inner_transport_header(skb); + exthdr = skb_inner_network_header(skb) + + sizeof(struct ipv6hdr); + l4_prot = inner_ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_prot, &frag_off); + + ptype |= NGBE_PTYPE_PKT_IPV6; + if (l4_prot == NEXTHDR_FRAGMENT) { + ptype |= NGBE_PTYPE_TYP_IPFRAG; + goto exit; + } + break; + default: + goto exit; + } + } else { +encap_frag: + switch (first->protocol) { + case __constant_htons(ETH_P_IP): + l4_prot = ip_hdr(skb)->protocol; + ptype = NGBE_PTYPE_PKT_IP; + if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { + ptype |= NGBE_PTYPE_TYP_IPFRAG; + goto exit; + } + break; +#ifdef NETIF_F_IPV6_CSUM + case htons(ETH_P_IPV6): + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb) + + sizeof(struct ipv6hdr); + l4_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_prot, &frag_off); + + ptype = NGBE_PTYPE_PKT_IP | NGBE_PTYPE_PKT_IPV6; + if (l4_prot == NEXTHDR_FRAGMENT) { + ptype |= NGBE_PTYPE_TYP_IPFRAG; + goto exit; + } + break; +#endif /* NETIF_F_IPV6_CSUM */ + case __constant_htons(ETH_P_1588): + ptype = NGBE_PTYPE_L2_TS; + goto exit; + case __constant_htons(ETH_P_FIP): + ptype = NGBE_PTYPE_L2_FIP; + goto exit; + case __constant_htons(NGBE_ETH_P_LLDP): + ptype = NGBE_PTYPE_L2_LLDP; + goto exit; + case __constant_htons(NGBE_ETH_P_CNM): + ptype = NGBE_PTYPE_L2_CNM; + goto exit; + case __constant_htons(ETH_P_PAE): + ptype = NGBE_PTYPE_L2_EAPOL; + goto exit; + case __constant_htons(ETH_P_ARP): + ptype = NGBE_PTYPE_L2_ARP; + goto exit; + default: + ptype = NGBE_PTYPE_L2_MAC; + goto exit; + } + } + + switch (l4_prot) { + case IPPROTO_TCP: + ptype |= NGBE_PTYPE_TYP_TCP; + break; + case IPPROTO_UDP: + ptype |= NGBE_PTYPE_TYP_UDP; + break; + case IPPROTO_SCTP: + ptype |= NGBE_PTYPE_TYP_SCTP; + break; + default: + ptype |= NGBE_PTYPE_TYP_IP; + break; + } + +exit: + return ngbe_decode_ptype(ptype); +} + +static int ngbe_tso(struct ngbe_ring *tx_ring, + struct ngbe_tx_buffer *first, + u8 *hdr_len, ngbe_dptype dptype) +{ + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens, type_tucmd; + u32 mss_l4len_idx, l4len; + struct tcphdr *tcph; + struct iphdr *iph; + u32 tunhdr_eiplen_tunlen = 0; + u8 tun_prot = 0; + unsigned char *exthdr; + unsigned char *l4_hdr; + __be16 frag_off; + bool enc = skb->encapsulation; + + struct ipv6hdr *ipv6h; + + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + if (skb_header_cloned(skb)) { + int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (err) + return err; + } + + iph = enc ? inner_ip_hdr(skb) : ip_hdr(skb); + if (iph->version == 4) { + tcph = enc ? inner_tcp_hdr(skb) : tcp_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcph->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + first->tx_flags |= NGBE_TX_FLAGS_TSO | + NGBE_TX_FLAGS_CSUM | + NGBE_TX_FLAGS_IPV4 | + NGBE_TX_FLAGS_CC; + } else if (iph->version == 6 && skb_is_gso_v6(skb)) { + ipv6h = enc ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); + tcph = enc ? inner_tcp_hdr(skb) : tcp_hdr(skb); + ipv6h->payload_len = 0; + tcph->check = + ~csum_ipv6_magic(&ipv6h->saddr, + &ipv6h->daddr, + 0, IPPROTO_TCP, 0); + first->tx_flags |= NGBE_TX_FLAGS_TSO | + NGBE_TX_FLAGS_CSUM | + NGBE_TX_FLAGS_CC; + + } + + /* compute header lengths */ + l4len = enc ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb); + *hdr_len = enc ? (skb_inner_transport_header(skb) - skb->data) + : skb_transport_offset(skb); + *hdr_len += l4len; + + /* update gso size and bytecount with header size */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + + /* mss_l4len_id: use 0 as index for TSO */ + mss_l4len_idx = l4len << NGBE_TXD_L4LEN_SHIFT; + mss_l4len_idx |= skb_shinfo(skb)->gso_size << NGBE_TXD_MSS_SHIFT; + + /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ + if (enc) { + switch (first->protocol) { + case __constant_htons(ETH_P_IP): + tun_prot = ip_hdr(skb)->protocol; + first->tx_flags |= NGBE_TX_FLAGS_OUTER_IPV4; + break; + case htons(ETH_P_IPV6): + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb) + + sizeof(struct ipv6hdr); + tun_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &tun_prot, &frag_off); + break; + default: + break; + } + switch (tun_prot) { + case IPPROTO_UDP: + tunhdr_eiplen_tunlen = NGBE_TXD_TUNNEL_UDP; + tunhdr_eiplen_tunlen |= + ((skb_network_header_len(skb) >> 2) << + NGBE_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + NGBE_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_GRE: + tunhdr_eiplen_tunlen = NGBE_TXD_TUNNEL_GRE; + tunhdr_eiplen_tunlen |= + ((skb_network_header_len(skb) >> 2) << + NGBE_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + NGBE_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_IPIP: + case IPPROTO_IPV6: + tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) - + (char *)ip_hdr(skb)) >> 2) << + NGBE_TXD_OUTER_IPLEN_SHIFT; + break; + default: + break; + } + + vlan_macip_lens = skb_inner_network_header_len(skb) >> 1; + } else + vlan_macip_lens = skb_network_header_len(skb) >> 1; + + vlan_macip_lens |= skb_network_offset(skb) << NGBE_TXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & NGBE_TX_FLAGS_VLAN_MASK; + + type_tucmd = dptype.ptype << 24; + ngbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen, + type_tucmd, mss_l4len_idx); + + return 1; +} + +static void ngbe_tx_csum(struct ngbe_ring *tx_ring, + struct ngbe_tx_buffer *first, ngbe_dptype dptype) +{ + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens = 0; + u32 mss_l4len_idx = 0; + u32 tunhdr_eiplen_tunlen = 0; + u8 tun_prot = 0; + u32 type_tucmd; + + if (skb->ip_summed != CHECKSUM_PARTIAL) { +csum_failed: + if (!(first->tx_flags & NGBE_TX_FLAGS_HW_VLAN) && + !(first->tx_flags & NGBE_TX_FLAGS_CC)) + return; + vlan_macip_lens = skb_network_offset(skb) << + NGBE_TXD_MACLEN_SHIFT; + } else { + u8 l4_prot = 0; + unsigned char *exthdr; + unsigned char *l4_hdr; + __be16 frag_off; + + union { + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + u8 *raw; + } network_hdr; + union { + struct tcphdr *tcphdr; + u8 *raw; + } transport_hdr; + + if (skb->encapsulation) { + network_hdr.raw = skb_inner_network_header(skb); + transport_hdr.raw = skb_inner_transport_header(skb); + vlan_macip_lens = skb_network_offset(skb) << + NGBE_TXD_MACLEN_SHIFT; + switch (first->protocol) { + case __constant_htons(ETH_P_IP): + tun_prot = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb) + + sizeof(struct ipv6hdr); + tun_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, + exthdr - skb->data, + &tun_prot, &frag_off); + + break; + default: + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, + "partial checksum but version=%d\n", + network_hdr.ipv4->version); + } + return; + } + switch (tun_prot) { + case IPPROTO_UDP: + tunhdr_eiplen_tunlen = NGBE_TXD_TUNNEL_UDP; + tunhdr_eiplen_tunlen |= + ((skb_network_header_len(skb) >> 2) << + NGBE_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + NGBE_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_GRE: + tunhdr_eiplen_tunlen = NGBE_TXD_TUNNEL_GRE; + tunhdr_eiplen_tunlen |= + ((skb_network_header_len(skb) >> 2) << + NGBE_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + NGBE_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_IPIP: + case IPPROTO_IPV6: + tunhdr_eiplen_tunlen = + (((char *)inner_ip_hdr(skb)- + (char *)ip_hdr(skb)) >> 2) << + NGBE_TXD_OUTER_IPLEN_SHIFT; + break; + default: + break; + } + + } else { + network_hdr.raw = skb_network_header(skb); + transport_hdr.raw = skb_transport_header(skb); + vlan_macip_lens = skb_network_offset(skb) << + NGBE_TXD_MACLEN_SHIFT; + } + + switch (network_hdr.ipv4->version) { + case IPVERSION: + vlan_macip_lens |= + (transport_hdr.raw - network_hdr.raw) >> 1; + l4_prot = network_hdr.ipv4->protocol; + break; + case 6: + vlan_macip_lens |= + (transport_hdr.raw - network_hdr.raw) >> 1; + exthdr = network_hdr.raw + sizeof(struct ipv6hdr); + l4_prot = network_hdr.ipv6->nexthdr; + if (transport_hdr.raw != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_prot, &frag_off); + break; + default: + break; + } + + switch (l4_prot) { + case IPPROTO_TCP: + mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) << + NGBE_TXD_L4LEN_SHIFT; + break; + case IPPROTO_SCTP: + mss_l4len_idx = sizeof(struct sctphdr) << + NGBE_TXD_L4LEN_SHIFT; + break; + case IPPROTO_UDP: + mss_l4len_idx = sizeof(struct udphdr) << + NGBE_TXD_L4LEN_SHIFT; + break; + default: + skb_checksum_help(skb); + goto csum_failed; + } + + /* update TX checksum flag */ + first->tx_flags |= NGBE_TX_FLAGS_CSUM; + } + first->tx_flags |= NGBE_TX_FLAGS_CC; + /* vlan_macip_lens: MACLEN, VLAN tag */ + vlan_macip_lens |= first->tx_flags & NGBE_TX_FLAGS_VLAN_MASK; + + type_tucmd = dptype.ptype << 24; + ngbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen, + type_tucmd, mss_l4len_idx); +} + +static u32 ngbe_tx_cmd_type(u32 tx_flags) +{ + /* set type for advanced descriptor with frame checksum insertion */ + u32 cmd_type = NGBE_TXD_DTYP_DATA | + NGBE_TXD_IFCS; + + /* set HW vlan bit if vlan is present */ + cmd_type |= NGBE_SET_FLAG(tx_flags, NGBE_TX_FLAGS_HW_VLAN, + NGBE_TXD_VLE); + + /* set segmentation enable bits for TSO/FSO */ + cmd_type |= NGBE_SET_FLAG(tx_flags, NGBE_TX_FLAGS_TSO, + NGBE_TXD_TSE); + + /* set timestamp bit if present */ + cmd_type |= NGBE_SET_FLAG(tx_flags, NGBE_TX_FLAGS_TSTAMP, + NGBE_TXD_MAC_TSTAMP); + + cmd_type |= NGBE_SET_FLAG(tx_flags, NGBE_TX_FLAGS_LINKSEC, + NGBE_TXD_LINKSEC); + + return cmd_type; +} + +static void ngbe_tx_olinfo_status(union ngbe_tx_desc *tx_desc, + u32 tx_flags, unsigned int paylen) +{ + u32 olinfo_status = paylen << NGBE_TXD_PAYLEN_SHIFT; + + /* enable L4 checksum for TSO and TX checksum offload */ + olinfo_status |= NGBE_SET_FLAG(tx_flags, + NGBE_TX_FLAGS_CSUM, + NGBE_TXD_L4CS); + + /* enble IPv4 checksum for TSO */ + olinfo_status |= NGBE_SET_FLAG(tx_flags, + NGBE_TX_FLAGS_IPV4, + NGBE_TXD_IIPCS); + /* enable outer IPv4 checksum for TSO */ + olinfo_status |= NGBE_SET_FLAG(tx_flags, + NGBE_TX_FLAGS_OUTER_IPV4, + NGBE_TXD_EIPCS); + /* + * Check Context must be set if Tx switch is enabled, which it + * always is for case where virtual functions are running + */ + olinfo_status |= NGBE_SET_FLAG(tx_flags, + NGBE_TX_FLAGS_CC, + NGBE_TXD_CC); + + olinfo_status |= NGBE_SET_FLAG(tx_flags, + NGBE_TX_FLAGS_IPSEC, + NGBE_TXD_IPSEC); + + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); +} + +static int __ngbe_maybe_stop_tx(struct ngbe_ring *tx_ring, u16 size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (likely(ngbe_desc_unused(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + return 0; +} + +static inline int ngbe_maybe_stop_tx(struct ngbe_ring *tx_ring, u16 size) +{ + if (likely(ngbe_desc_unused(tx_ring) >= size)) + return 0; + + return __ngbe_maybe_stop_tx(tx_ring, size); +} + +#define NGBE_TXD_CMD (NGBE_TXD_EOP | \ + NGBE_TXD_RS) + +static int ngbe_tx_map(struct ngbe_ring *tx_ring, + struct ngbe_tx_buffer *first, + const u8 hdr_len) +{ + struct sk_buff *skb = first->skb; + struct ngbe_tx_buffer *tx_buffer; + union ngbe_tx_desc *tx_desc; + skb_frag_t *frag; + dma_addr_t dma; + unsigned int data_len, size; + u32 tx_flags = first->tx_flags; + u32 cmd_type = ngbe_tx_cmd_type(tx_flags); + u16 i = tx_ring->next_to_use; + + tx_desc = NGBE_TX_DESC(tx_ring, i); + + ngbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len); + + size = skb_headlen(skb); + data_len = skb->data_len; + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_buffer = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + while (unlikely(size > NGBE_MAX_DATA_PER_TXD)) { + tx_desc->read.cmd_type_len = + cpu_to_le32(cmd_type ^ NGBE_MAX_DATA_PER_TXD); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = NGBE_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + dma += NGBE_MAX_DATA_PER_TXD; + size -= NGBE_MAX_DATA_PER_TXD; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + } + + if (likely(!data_len)) + break; + + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = NGBE_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + size = skb_frag_size(frag); + + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, + DMA_TO_DEVICE); + + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + /* write last descriptor with RS and EOP bits */ + cmd_type |= size | NGBE_TXD_CMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + + /* set the timestamp */ + first->time_stamp = jiffies; + + /* + * Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + ngbe_maybe_stop_tx(tx_ring, DESC_NEEDED); + + skb_tx_timestamp(skb); + + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { + writel(i, tx_ring->tail); + /* The following mmiowb() is required on certain + * architechtures (IA64/Altix in particular) in order to + * synchronize the I/O calls with respect to a spin lock. This + * is because the wmb() on those architectures does not + * guarantee anything for posted I/O writes. + * + * Note that the associated spin_unlock() is not within the + * driver code, but in the networking core stack. + */ + mmiowb(); + } + + return 0; +dma_error: + dev_err(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_buffer_info map */ + for (;;) { + tx_buffer = &tx_ring->tx_buffer_info[i]; + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + if (tx_buffer == first) + break; + if (i == 0) + i += tx_ring->count; + i--; + } + + dev_kfree_skb_any(first->skb); + first->skb = NULL; + + tx_ring->next_to_use = i; + + return -1; +} + +/** + * skb_pad - zero pad the tail of an skb + * @skb: buffer to pad + * @pad: space to pad + * + * Ensure that a buffer is followed by a padding area that is zero + * filled. Used by network drivers which may DMA or transfer data + * beyond the buffer end onto the wire. + * + * May return error in out of memory cases. The skb is freed on error. + */ + +int ngbe_skb_pad_nonzero(struct sk_buff *skb, int pad) +{ + int err; + int ntail; + + /* If the skbuff is non linear tailroom is always zero.. */ + if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { + memset(skb->data+skb->len, 0x1, pad); + return 0; + } + + ntail = skb->data_len + pad - (skb->end - skb->tail); + if (likely(skb_cloned(skb) || ntail > 0)) { + err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); + if (unlikely(err)) + goto free_skb; + } + + /* FIXME: The use of this function with non-linear skb's really needs + * to be audited. + */ + err = skb_linearize(skb); + if (unlikely(err)) + goto free_skb; + + memset(skb->data + skb->len, 0x1, pad); + return 0; + +free_skb: + kfree_skb(skb); + return err; +} + +netdev_tx_t ngbe_xmit_frame_ring(struct sk_buff *skb, + struct ngbe_adapter *adapter, + struct ngbe_ring *tx_ring) +{ + struct ngbe_tx_buffer *first; + int tso; + u32 tx_flags = 0; + unsigned short f; + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + __be16 protocol = skb->protocol; + u8 hdr_len = 0; + ngbe_dptype dptype; + + /* + * need: 1 descriptor per page * PAGE_SIZE/NGBE_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/NGBE_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)-> + frags[f])); + + if (ngbe_maybe_stop_tx(tx_ring, count + 3)) { + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + + /* if we have a HW VLAN tag being added default to the HW one */ + if (skb_vlan_tag_present(skb)) { + tx_flags |= skb_vlan_tag_get(skb) << NGBE_TX_FLAGS_VLAN_SHIFT; + tx_flags |= NGBE_TX_FLAGS_HW_VLAN; + /* else if it is a SW VLAN check the next protocol and store the tag */ + } else if (protocol == htons(ETH_P_8021Q)) { + struct vlan_hdr *vhdr, _vhdr; + vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); + if (!vhdr) + goto out_drop; + + protocol = vhdr->h_vlan_encapsulated_proto; + tx_flags |= ntohs(vhdr->h_vlan_TCI) << + NGBE_TX_FLAGS_VLAN_SHIFT; + tx_flags |= NGBE_TX_FLAGS_SW_VLAN; + } + + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + adapter->ptp_clock) { + if (!test_and_set_bit_lock(__NGBE_PTP_TX_IN_PROGRESS, + &adapter->state)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + tx_flags |= NGBE_TX_FLAGS_TSTAMP; + + /* schedule check for Tx timestamp */ + adapter->ptp_tx_skb = skb_get(skb); + adapter->ptp_tx_start = jiffies; + schedule_work(&adapter->ptp_tx_work); + } else { + adapter->tx_hwtstamp_skipped++; + } + } + +#ifdef CONFIG_PCI_IOV + /* + * Use the l2switch_enable flag - would be false if the DMA + * Tx switch had been disabled. + */ + if (adapter->flags & NGBE_FLAG_SRIOV_L2SWITCH_ENABLE) + tx_flags |= NGBE_TX_FLAGS_CC; + +#endif + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + first->protocol = protocol; + + dptype = encode_tx_desc_ptype(first); + + tso = ngbe_tso(tx_ring, first, &hdr_len, dptype); + if (tso < 0) + goto out_drop; + else if (!tso) + ngbe_tx_csum(tx_ring, first, dptype); + + if (ngbe_tx_map(tx_ring, first, hdr_len)) + goto cleanup_tx_tstamp; + + return NETDEV_TX_OK; + +out_drop: + dev_kfree_skb_any(first->skb); + first->skb = NULL; + +cleanup_tx_tstamp: + if (unlikely(tx_flags & NGBE_TX_FLAGS_TSTAMP)) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + cancel_work_sync(&adapter->ptp_tx_work); + clear_bit_unlock(__NGBE_PTP_TX_IN_PROGRESS, &adapter->state); + } + + return NETDEV_TX_OK; +} + +static netdev_tx_t ngbe_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_ring *tx_ring; + unsigned int r_idx = skb->queue_mapping; + + if (!netif_carrier_ok(netdev)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* + * The minimum packet size for olinfo paylen is 17 so pad the skb + * in order to meet this minimum size requirement. + */ + if (skb_put_padto(skb, 17)) + return NETDEV_TX_OK; + + if (r_idx >= adapter->num_tx_queues) + r_idx = r_idx % adapter->num_tx_queues; + tx_ring = adapter->tx_ring[r_idx]; + + return ngbe_xmit_frame_ring(skb, adapter, tx_ring); +} + +/** + * ngbe_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int ngbe_set_mac(struct net_device *netdev, void *p) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + ngbe_del_mac_filter(adapter, hw->mac.addr, VMDQ_P(0)); + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + + ngbe_mac_set_default_filter(adapter, hw->mac.addr); + e_info(drv, "The mac has been set to %02X:%02X:%02X:%02X:%02X:%02X\n", + hw->mac.addr[0], hw->mac.addr[1], hw->mac.addr[2], + hw->mac.addr[3], hw->mac.addr[4], hw->mac.addr[5]); + + return 0; +} + +static int ngbe_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd) +{ + struct mii_ioctl_data *mii = (struct mii_ioctl_data *) &ifr->ifr_data; + int prtad, devad, ret = 0; + + prtad = (mii->phy_id & MDIO_PHY_ID_PRTAD) >> 5; + devad = (mii->phy_id & MDIO_PHY_ID_DEVAD); + + return ret; +} + +static int ngbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + switch (cmd) { + case SIOCGHWTSTAMP: + return ngbe_ptp_get_ts_config(adapter, ifr); + case SIOCSHWTSTAMP: + return ngbe_ptp_set_ts_config(adapter, ifr); + case SIOCGMIIREG: + case SIOCSMIIREG: + return ngbe_mii_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +/** + * ngbe_setup_tc - routine to configure net_device for multiple traffic + * classes. + * + * @netdev: net device to configure + * @tc: number of traffic classes to enable + */ +int ngbe_setup_tc(struct net_device *dev, u8 tc) +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + + /* Hardware has to reinitialize queues and interrupts to + * match packet buffer alignment. Unfortunately, the + * hardware is not flexible enough to do this dynamically. + */ + if (netif_running(dev)) + ngbe_close(dev); + else + ngbe_reset(adapter); + + ngbe_clear_interrupt_scheme(adapter); + + if (tc) { + netdev_set_num_tc(dev, tc); + } else { + netdev_reset_tc(dev); + } + + ngbe_init_interrupt_scheme(adapter); + if (netif_running(dev)) + ngbe_open(dev); + + return 0; +} + +#ifdef CONFIG_PCI_IOV +void ngbe_sriov_reinit(struct ngbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + rtnl_lock(); + ngbe_setup_tc(netdev, netdev_get_num_tc(netdev)); + rtnl_unlock(); +} +#endif + +void ngbe_do_reset(struct net_device *netdev) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + ngbe_reinit_locked(adapter); + else + ngbe_reset(adapter); +} + +static netdev_features_t ngbe_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ + if (!(features & NETIF_F_RXCSUM)) + features &= ~NETIF_F_LRO; + + /* Turn off LRO if not RSC capable */ + features &= ~NETIF_F_LRO; + + return features; +} + +static int ngbe_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + bool need_reset = false; + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + ngbe_vlan_strip_enable(adapter); + else + ngbe_vlan_strip_disable(adapter); + + if (features & NETIF_F_RXHASH) { + if (!(adapter->flags2 & NGBE_FLAG2_RSS_ENABLED)) { + wr32m(&adapter->hw, NGBE_RDB_RA_CTL, + NGBE_RDB_RA_CTL_RSS_EN, NGBE_RDB_RA_CTL_RSS_EN); + adapter->flags2 |= NGBE_FLAG2_RSS_ENABLED; + } + } else { + if (adapter->flags2 & NGBE_FLAG2_RSS_ENABLED) { + wr32m(&adapter->hw, NGBE_RDB_RA_CTL, + NGBE_RDB_RA_CTL_RSS_EN, ~NGBE_RDB_RA_CTL_RSS_EN); + adapter->flags2 &= ~NGBE_FLAG2_RSS_ENABLED; + } + } + + if (need_reset) + ngbe_do_reset(netdev); + + return 0; +} + +static int ngbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, + u16 vid, + u16 flags) +{ + /* guarantee we can provide a unique filter for the unicast address */ + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { + if (NGBE_MAX_PF_MACVLANS <= netdev_uc_count(dev)) + return -ENOMEM; + } + + return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); +} + +static int ngbe_ndo_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh, + __always_unused u16 flags) +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + struct nlattr *attr, *br_spec; + int rem; + + if (!(adapter->flags & NGBE_FLAG_SRIOV_ENABLED)) + return -EOPNOTSUPP; + + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + + nla_for_each_nested(attr, br_spec, rem) { + __u16 mode; + + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + + mode = nla_get_u16(attr); + if (mode == BRIDGE_MODE_VEPA) { + adapter->flags |= NGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; + } else if (mode == BRIDGE_MODE_VEB) { + adapter->flags &= ~NGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; + } else { + return -EINVAL; + } + + adapter->bridge_mode = mode; + + /* re-configure settings related to bridge mode */ + ngbe_configure_bridge_mode(adapter); + + e_info(drv, "enabling bridge mode: %s\n", + mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + } + + return 0; +} + +static int ngbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, + u32 __maybe_unused filter_mask, + int nlflags) +{ + struct ngbe_adapter *adapter = netdev_priv(dev); + u16 mode; + + if (!(adapter->flags & NGBE_FLAG_SRIOV_ENABLED)) + return 0; + + mode = adapter->bridge_mode; + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags, + filter_mask, NULL); +} + +#define NGBE_MAX_TUNNEL_HDR_LEN 80 +static netdev_features_t ngbe_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + u32 vlan_num = 0; + u16 vlan_depth = skb->mac_len; + __be16 type = skb->protocol; + struct vlan_hdr *vh; + + if (skb_vlan_tag_present(skb)) { + vlan_num++; + } + + if (vlan_depth) { + vlan_depth -= VLAN_HLEN; + } else { + vlan_depth = ETH_HLEN; + } + + while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) { + vlan_num++; + vh = (struct vlan_hdr *)(skb->data + vlan_depth); + type = vh->h_vlan_encapsulated_proto; + vlan_depth += VLAN_HLEN; + + } + + if (vlan_num > 2) + features &= ~(NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX); + + if (skb->encapsulation) { + if (unlikely(skb_inner_mac_header(skb) - + skb_transport_header(skb) > + NGBE_MAX_TUNNEL_HDR_LEN)) + return features & ~NETIF_F_CSUM_MASK; + } + return features; +} + +static const struct net_device_ops ngbe_netdev_ops = { + .ndo_open = ngbe_open, + .ndo_stop = ngbe_close, + .ndo_start_xmit = ngbe_xmit_frame, + .ndo_set_rx_mode = ngbe_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = ngbe_set_mac, + .ndo_change_mtu = ngbe_change_mtu, + .ndo_tx_timeout = ngbe_tx_timeout, + .ndo_vlan_rx_add_vid = ngbe_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ngbe_vlan_rx_kill_vid, + .ndo_do_ioctl = ngbe_ioctl, + + .ndo_set_vf_mac = ngbe_ndo_set_vf_mac, + .ndo_set_vf_vlan = ngbe_ndo_set_vf_vlan, + /* set_vf_rate not support by emerald */ + .ndo_set_vf_rate = ngbe_ndo_set_vf_bw, + .ndo_set_vf_spoofchk = ngbe_ndo_set_vf_spoofchk, + .ndo_set_vf_trust = ngbe_ndo_set_vf_trust, + .ndo_get_vf_config = ngbe_ndo_get_vf_config, + .ndo_get_stats64 = ngbe_get_stats64, + + .ndo_fdb_add = ngbe_ndo_fdb_add, + + .ndo_bridge_setlink = ngbe_ndo_bridge_setlink, + .ndo_bridge_getlink = ngbe_ndo_bridge_getlink, + + .ndo_features_check = ngbe_features_check, + .ndo_set_features = ngbe_set_features, + .ndo_fix_features = ngbe_fix_features, +}; + +void ngbe_assign_netdev_ops(struct net_device *dev) +{ + dev->netdev_ops = &ngbe_netdev_ops; + ngbe_set_ethtool_ops(dev); + dev->watchdog_timeo = 5 * HZ; +} + +/** + * ngbe_wol_supported - Check whether device supports WoL + * @adapter: the adapter private structure + * @device_id: the device ID + * @subdev_id: the subsystem device ID + * + * This function is used by probe and ethtool to determine + * which devices have WoL support + * + **/ +int ngbe_wol_supported(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + + /* check eeprom to see if WOL is enabled */ + if ((hw->bus.func == 0) || + (hw->bus.func == 1) || + (hw->bus.func == 2) || + (hw->bus.func == 3)) + return true; + else + return false; +} + + +/** + * ngbe_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in ngbe_pci_tbl + * + * Returns 0 on success, negative on failure + * + * ngbe_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int ngbe_probe(struct pci_dev *pdev, + const struct pci_device_id __always_unused *ent) +{ + struct net_device *netdev; + struct ngbe_adapter *adapter = NULL; + struct ngbe_hw *hw = NULL; + static int cards_found; + int err, pci_using_dac, expected_gts; + u32 eeprom_verl = 0; + u32 etrack_id = 0; + char *info_string, *i_s_var; + u32 eeprom_cksum_devcap = 0; + u32 saved_version = 0; + u32 devcap; + + bool disable_dev = false; + + netdev_features_t hw_features; + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + if (!dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64)) && + !dma_set_coherent_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64))) { + pci_using_dac = 1; + } else { + err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(32)); + if (err) { + err = dma_set_coherent_mask(pci_dev_to_dev(pdev), + DMA_BIT_MASK(32)); + if (err) { + dev_err(pci_dev_to_dev(pdev), "No usable DMA " + "configuration, aborting\n"); + goto err_dma; + } + } + pci_using_dac = 0; + } + + err = pci_request_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM), + ngbe_driver_name); + if (err) { + dev_err(pci_dev_to_dev(pdev), + "pci_request_selected_regions failed 0x%x\n", err); + goto err_pci_reg; + } + + pci_enable_pcie_error_reporting(pdev); + pci_set_master(pdev); + + /* errata 16 */ + pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_READRQ, + 0x1000); + + netdev = alloc_etherdev_mq(sizeof(struct ngbe_adapter), NGBE_MAX_TX_QUEUES); + if (!netdev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + + SET_NETDEV_DEV(netdev, pci_dev_to_dev(pdev)); + + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + hw = &adapter->hw; + hw->back = adapter; + adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; + + hw->hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + + adapter->io_addr = hw->hw_addr; + if (!hw->hw_addr) { + err = -EIO; + goto err_ioremap; + } + + /* autoneg default on */ + hw->mac.autoneg = true; + + /* assign netdev ops and ethtool ops */ + ngbe_assign_netdev_ops(netdev); + + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + adapter->bd_number = cards_found; + + /* setup the private structure */ + err = ngbe_sw_init(adapter); + if (err) + goto err_sw_init; + + /* + * check_options must be called before setup_link to set up + * hw->fc completely + */ + ngbe_check_options(adapter); + + TCALL(hw, mac.ops.set_lan_id); + + /* check if flash load is done after hw power up */ + err = ngbe_check_flash_load(hw, NGBE_SPI_ILDR_STATUS_PERST); + if (err) + goto err_sw_init; + err = ngbe_check_flash_load(hw, NGBE_SPI_ILDR_STATUS_PWRRST); + if (err) + goto err_sw_init; + + /* reset_hw fills in the perm_addr as well */ + + hw->phy.reset_if_overtemp = true; + err = TCALL(hw, mac.ops.reset_hw); + hw->phy.reset_if_overtemp = false; + if (err) { + e_dev_err("HW reset failed: %d\n", err); + goto err_sw_init; + } + +#ifdef CONFIG_PCI_IOV + if (adapter->num_vfs > 0) { + e_dev_warn("Enabling SR-IOV VFs using the max_vfs module " + "parameter is deprecated.\n"); + e_dev_warn("Please use the pci sysfs interface instead. Ex:\n"); + e_dev_warn("echo '%d' > /sys/bus/pci/devices/%04x:%02x:%02x.%1x" + "/sriov_numvfs\n", + adapter->num_vfs, + pci_domain_nr(pdev->bus), + pdev->bus->number, + PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); + } + + if (adapter->flags & NGBE_FLAG_SRIOV_CAPABLE) { + pci_sriov_set_totalvfs(pdev, NGBE_MAX_VFS_DRV_LIMIT); + ngbe_enable_sriov(adapter); + } +#endif /* CONFIG_PCI_IOV */ + + netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; + +#ifdef NETIF_F_IPV6_CSUM + netdev->features |= NETIF_F_IPV6_CSUM; +#endif + + netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + + netdev->features |= ngbe_tso_features(); + + if (adapter->flags2 & NGBE_FLAG2_RSS_ENABLED) + netdev->features |= NETIF_F_RXHASH; + + netdev->features |= NETIF_F_RXCSUM; + + /* copy netdev features into list of user selectable features */ + hw_features = netdev->hw_features; + hw_features |= netdev->features; + + /* set this bit last since it cannot be part of hw_features */ + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + netdev->features |= NETIF_F_NTUPLE; + + hw_features |= NETIF_F_NTUPLE; + netdev->hw_features = hw_features; + + netdev->vlan_features |= NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6; + + netdev->hw_enc_features |= NETIF_F_SG | NETIF_F_IP_CSUM; + + netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->priv_flags |= IFF_SUPP_NOFCS; + + /* MTU range: 68 - 9414 */ + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = NGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); + + if (pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; + + netdev->vlan_features |= NETIF_F_HIGHDMA; + + } + + if (hw->bus.lan_id == 0) { + wr32(hw, NGBE_CALSUM_CAP_STATUS, 0x0); + wr32(hw, NGBE_EEPROM_VERSION_STORE_REG, 0x0); + } else { + eeprom_cksum_devcap = rd32(hw, NGBE_CALSUM_CAP_STATUS); + saved_version = rd32(hw, NGBE_EEPROM_VERSION_STORE_REG); + } + + TCALL(hw, eeprom.ops.init_params); + TCALL(hw, mac.ops.release_swfw_sync, NGBE_MNG_SWFW_SYNC_SW_MB); + if (hw->bus.lan_id == 0 || eeprom_cksum_devcap == 0) { + /* make sure the EEPROM is good */ + if (TCALL(hw, eeprom.ops.eeprom_chksum_cap_st, NGBE_CALSUM_COMMAND, &devcap)) { + e_dev_err("The EEPROM Checksum Is Not Valid\n"); + err = -EIO; + goto err_sw_init; + } + } + + memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + e_dev_err("invalid MAC address\n"); + err = -EIO; + goto err_sw_init; + } + + ngbe_mac_set_default_filter(adapter, hw->mac.perm_addr); + + timer_setup(&adapter->service_timer, ngbe_service_timer, 0); +#ifdef CONFIG_NGBE_POLL_LINK_STATUS + timer_setup(&adapter->link_check_timer, ngbe_link_check_timer, 0); +#endif + if (NGBE_REMOVED(hw->hw_addr)) { + err = -EIO; + goto err_sw_init; + } + INIT_WORK(&adapter->service_task, ngbe_service_task); + set_bit(__NGBE_SERVICE_INITED, &adapter->state); + clear_bit(__NGBE_SERVICE_SCHED, &adapter->state); + + err = ngbe_init_interrupt_scheme(adapter); + if (err) + goto err_sw_init; + + /* WOL not supported for all devices */ + adapter->wol = 0; + if (hw->bus.lan_id == 0 || eeprom_cksum_devcap == 0) { + TCALL(hw, eeprom.ops.read, + hw->eeprom.sw_region_offset + NGBE_DEVICE_CAPS, + &adapter->eeprom_cap); + /*only support in LAN0*/ + adapter->eeprom_cap = NGBE_DEVICE_CAPS_WOL_PORT0; + } else { + adapter->eeprom_cap = eeprom_cksum_devcap & 0xffff; + } + if (ngbe_wol_supported(adapter)) + adapter->wol = NGBE_PSR_WKUP_CTL_MAG; + if ((hw->subsystem_device_id & WOL_SUP_MASK) == WOL_SUP) { + /*enable wol first in shadow ram*/ + ngbe_write_ee_hostif(hw, 0x7FE, 0xa50F); + ngbe_write_ee_hostif(hw, 0x7FF, 0x5a5a); + } + hw->wol_enabled = !!(adapter->wol); + wr32(hw, NGBE_PSR_WKUP_CTL, adapter->wol); + + device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev), adapter->wol); + + /* + * Save off EEPROM version number and Option Rom version which + * together make a unique identify for the eeprom + */ + if (hw->bus.lan_id == 0 || saved_version == 0) { + TCALL(hw, eeprom.ops.read32, + hw->eeprom.sw_region_offset + NGBE_EEPROM_VERSION_L, + &eeprom_verl); + etrack_id = eeprom_verl; + wr32(hw, NGBE_EEPROM_VERSION_STORE_REG, etrack_id); + wr32(hw, NGBE_CALSUM_CAP_STATUS, 0x10000 | (u32)adapter->eeprom_cap); + } else if (eeprom_cksum_devcap) { + etrack_id = saved_version; + } else { + TCALL(hw, eeprom.ops.read32, + hw->eeprom.sw_region_offset + NGBE_EEPROM_VERSION_L, + &eeprom_verl); + etrack_id = eeprom_verl; + } + + /* Make sure offset to SCSI block is valid */ + snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), + "0x%08x", etrack_id); + + /* reset the hardware with the new settings */ + err = TCALL(hw, mac.ops.start_hw); + if (err == NGBE_ERR_EEPROM_VERSION) { + /* We are running on a pre-production device, log a warning */ + e_dev_warn("This device is a pre-production adapter/LOM. " + "Please be aware there may be issues associated " + "with your hardware. If you are experiencing " + "problems please contact your hardware " + "representative who provided you with this " + "hardware.\n"); + } else if (err) { + e_dev_err("HW init failed, err = %d\n", err); + goto err_register; + } + + /* pick up the PCI bus settings for reporting later */ + TCALL(hw, mac.ops.get_bus_info); + + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_register; + + pci_set_drvdata(pdev, adapter); + adapter->netdev_registered = true; + + /* + * call save state here in standalone driver because it relies on + * adapter struct to exist, and needs to call netdev_priv + */ + pci_save_state(pdev); + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + /* keep stopping all the transmit queues for older kernels */ + netif_tx_stop_all_queues(netdev); + + /* print all messages at the end so that we use our eth%d name */ + + /* calculate the expected PCIe bandwidth required for optimal + * performance. Note that some older parts will never have enough + * bandwidth due to being older generation PCIe parts. We clamp these + * parts to ensure that no warning is displayed, as this could confuse + * users otherwise. */ + + expected_gts = ngbe_enumerate_functions(adapter) * 10; + + /* don't check link if we failed to enumerate functions */ + if (expected_gts > 0) + ngbe_check_minimum_link(adapter, expected_gts); + + TCALL(hw, mac.ops.set_fw_drv_ver, 0xFF, 0xFF, 0xFF, 0xFF); + + if (((hw->subsystem_device_id & NCSI_SUP_MASK) == NCSI_SUP) || + ((hw->subsystem_device_id & OEM_MASK) == OCP_CARD)) + e_info(probe, "NCSI : support"); + else + e_info(probe, "NCSI : unsupported"); + + e_info(probe, "PHY: %s, PBA No: Wang Xun GbE Family Controller\n", + hw->phy.type == ngbe_phy_internal?"Internal":"External"); + + e_info(probe, "%02x:%02x:%02x:%02x:%02x:%02x\n", + netdev->dev_addr[0], netdev->dev_addr[1], + netdev->dev_addr[2], netdev->dev_addr[3], + netdev->dev_addr[4], netdev->dev_addr[5]); + +#define INFO_STRING_LEN 255 + info_string = kzalloc(INFO_STRING_LEN, GFP_KERNEL); + if (!info_string) { + e_err(probe, "allocation for info string failed\n"); + goto no_info_string; + } + i_s_var = info_string; + i_s_var += sprintf(info_string, "Enabled Features: "); + i_s_var += sprintf(i_s_var, "RxQ: %d TxQ: %d ", + adapter->num_rx_queues, adapter->num_tx_queues); + if (adapter->flags & NGBE_FLAG_TPH_ENABLED) + i_s_var += sprintf(i_s_var, "TPH "); + + BUG_ON(i_s_var > (info_string + INFO_STRING_LEN)); + /* end features printing */ + e_info(probe, "%s\n", info_string); + kfree(info_string); +no_info_string: + +#ifdef CONFIG_PCI_IOV + if (adapter->flags & NGBE_FLAG_SRIOV_ENABLED) { + int i; + for (i = 0; i < adapter->num_vfs; i++) + ngbe_vf_configuration(pdev, (i | 0x10000000)); + } +#endif + + e_info(probe, "WangXun(R) Gigabit Network Connection\n"); + cards_found++; + +#ifdef CONFIG_NGBE_SYSFS + if (ngbe_sysfs_init(adapter)) + e_err(probe, "failed to allocate sysfs resources\n"); +#else +#ifdef CONFIG_NGBE_PROCFS + if (ngbe_procfs_init(adapter)) + e_err(probe, "failed to allocate procfs resources\n"); +#endif /* CONFIG_NGBE_PROCFS */ +#endif /* CONFIG_NGBE_SYSFS */ + + +#ifdef CONFIG_NGBE_DEBUG_FS + ngbe_dbg_adapter_init(adapter); +#endif /* CONFIG_NGBE_DEBUG_FS */ + + return 0; + +err_register: + ngbe_clear_interrupt_scheme(adapter); + ngbe_release_hw_control(adapter); +err_sw_init: +#ifdef CONFIG_PCI_IOV + ngbe_disable_sriov(adapter); +#endif /* CONFIG_PCI_IOV */ + adapter->flags2 &= ~NGBE_FLAG2_SEARCH_FOR_SFP; + kfree(adapter->mac_table); + iounmap(adapter->io_addr); +err_ioremap: + disable_dev = !test_and_set_bit(__NGBE_DISABLED, &adapter->state); + free_netdev(netdev); +err_alloc_etherdev: + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +err_pci_reg: +err_dma: + if (!adapter || disable_dev) + pci_disable_device(pdev); + + return err; +} + +/** + * ngbe_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * ngbe_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void ngbe_remove(struct pci_dev *pdev) +{ + struct ngbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev; + bool disable_dev; + + /* if !adapter then we already cleaned up in probe */ + if (!adapter) + return; + + netdev = adapter->netdev; +#ifdef CONFIG_NGBE_DEBUG_FS + ngbe_dbg_adapter_exit(adapter); +#endif + + set_bit(__NGBE_REMOVING, &adapter->state); + cancel_work_sync(&adapter->service_task); + +#ifdef CONFIG_NGBE_SYSFS + ngbe_sysfs_exit(adapter); +#else +#ifdef CONFIG_NGBE_PROCFS + ngbe_procfs_exit(adapter); +#endif +#endif /* CONFIG_NGBE_SYSFS */ + if (adapter->netdev_registered) { + unregister_netdev(netdev); + adapter->netdev_registered = false; + } + +#ifdef CONFIG_PCI_IOV + ngbe_disable_sriov(adapter); +#endif + + ngbe_clear_interrupt_scheme(adapter); + ngbe_release_hw_control(adapter); + + iounmap(adapter->io_addr); + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); + + kfree(adapter->mac_table); + disable_dev = !test_and_set_bit(__NGBE_DISABLED, &adapter->state); + free_netdev(netdev); + + pci_disable_pcie_error_reporting(pdev); + + if (disable_dev) + pci_disable_device(pdev); +} + +static bool ngbe_check_cfg_remove(struct ngbe_hw *hw, struct pci_dev *pdev) +{ + u16 value; + + pci_read_config_word(pdev, PCI_VENDOR_ID, &value); + if (value == NGBE_FAILED_READ_CFG_WORD) { + ngbe_remove_adapter(hw); + return true; + } + return false; +} + +u16 ngbe_read_pci_cfg_word(struct ngbe_hw *hw, u32 reg) +{ + struct ngbe_adapter *adapter = hw->back; + u16 value; + + if (NGBE_REMOVED(hw->hw_addr)) + return NGBE_FAILED_READ_CFG_WORD; + pci_read_config_word(adapter->pdev, reg, &value); + if (value == NGBE_FAILED_READ_CFG_WORD && + ngbe_check_cfg_remove(hw, adapter->pdev)) + return NGBE_FAILED_READ_CFG_WORD; + return value; +} + +#ifdef CONFIG_PCI_IOV +static u32 ngbe_read_pci_cfg_dword(struct ngbe_hw *hw, u32 reg) +{ + struct ngbe_adapter *adapter = hw->back; + u32 value; + + if (NGBE_REMOVED(hw->hw_addr)) + return NGBE_FAILED_READ_CFG_DWORD; + pci_read_config_dword(adapter->pdev, reg, &value); + if (value == NGBE_FAILED_READ_CFG_DWORD && + ngbe_check_cfg_remove(hw, adapter->pdev)) + return NGBE_FAILED_READ_CFG_DWORD; + return value; +} +#endif /* CONFIG_PCI_IOV */ + +void ngbe_write_pci_cfg_word(struct ngbe_hw *hw, u32 reg, u16 value) +{ + struct ngbe_adapter *adapter = hw->back; + + if (NGBE_REMOVED(hw->hw_addr)) + return; + pci_write_config_word(adapter->pdev, reg, value); +} + +/** + * ngbe_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t ngbe_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct ngbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + +#ifdef CONFIG_PCI_IOV + struct ngbe_hw *hw = &adapter->hw; + struct pci_dev *bdev, *vfdev; + u32 dw0, dw1, dw2, dw3; + int vf, pos; + u16 req_id, pf_func; + + if (adapter->num_vfs == 0) + goto skip_bad_vf_detection; + + bdev = pdev->bus->self; + while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT)) + bdev = bdev->bus->self; + + if (!bdev) + goto skip_bad_vf_detection; + + pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR); + if (!pos) + goto skip_bad_vf_detection; + + dw0 = ngbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG); + dw1 = ngbe_read_pci_cfg_dword(hw, + pos + PCI_ERR_HEADER_LOG + 4); + dw2 = ngbe_read_pci_cfg_dword(hw, + pos + PCI_ERR_HEADER_LOG + 8); + dw3 = ngbe_read_pci_cfg_dword(hw, + pos + PCI_ERR_HEADER_LOG + 12); + if (NGBE_REMOVED(hw->hw_addr)) + goto skip_bad_vf_detection; + + req_id = dw1 >> 16; + /* if bit 7 of the requestor ID is set then it's a VF */ + if (!(req_id & 0x0080)) + goto skip_bad_vf_detection; + + pf_func = req_id & 0x01; + if ((pf_func & 1) == (pdev->devfn & 1)) { + vf = (req_id & 0x7F) >> 1; + e_dev_err("VF %d has caused a PCIe error\n", vf); + e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: " + "%8.8x\tdw3: %8.8x\n", + dw0, dw1, dw2, dw3); + + /* Find the pci device of the offending VF */ + vfdev = pci_get_device(PCI_VENDOR_ID_TRUSTNETIC, + NGBE_VF_DEVICE_ID, NULL); + while (vfdev) { + if (vfdev->devfn == (req_id & 0xFF)) + break; + vfdev = pci_get_device(PCI_VENDOR_ID_TRUSTNETIC, + NGBE_VF_DEVICE_ID, vfdev); + } + /* + * There's a slim chance the VF could have been hot + * plugged, so if it is no longer present we don't need + * to issue the VFLR.Just clean up the AER in that case. + */ + if (vfdev) { + ngbe_issue_vf_flr(adapter, vfdev); + /* Free device reference count */ + pci_dev_put(vfdev); + } + + pci_cleanup_aer_uncorrect_error_status(pdev); + } + + /* + * Even though the error may have occurred on the other port + * we still need to increment the vf error reference count for + * both ports because the I/O resume function will be called + * for both of them. + */ + adapter->vferr_refcount++; + + return PCI_ERS_RESULT_RECOVERED; + + skip_bad_vf_detection: +#endif /* CONFIG_PCI_IOV */ + + if (!test_bit(__NGBE_SERVICE_INITED, &adapter->state)) + return PCI_ERS_RESULT_DISCONNECT; + + rtnl_lock(); + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) { + rtnl_unlock(); + return PCI_ERS_RESULT_DISCONNECT; + } + + if (netif_running(netdev)) + ngbe_close(netdev); + + if (!test_and_set_bit(__NGBE_DISABLED, &adapter->state)) + pci_disable_device(pdev); + rtnl_unlock(); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * ngbe_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. + */ +static pci_ers_result_t ngbe_io_slot_reset(struct pci_dev *pdev) +{ + struct ngbe_adapter *adapter = pci_get_drvdata(pdev); + pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED; + + if (pci_enable_device_mem(pdev)) { + e_err(probe, "Cannot re-enable PCI device after reset.\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + smp_mb__before_atomic(); + clear_bit(__NGBE_DISABLED, &adapter->state); + adapter->hw.hw_addr = adapter->io_addr; + pci_set_master(pdev); + pci_restore_state(pdev); + /* + * After second error pci->state_saved is false, this + * resets it so EEH doesn't break. + */ + pci_save_state(pdev); + + pci_wake_from_d3(pdev, false); + + ngbe_reset(adapter); + + result = PCI_ERS_RESULT_RECOVERED; + } + + pci_cleanup_aer_uncorrect_error_status(pdev); + + return result; +} + +/** + * ngbe_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. + */ +static void ngbe_io_resume(struct pci_dev *pdev) +{ + struct ngbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + +#ifdef CONFIG_PCI_IOV + if (adapter->vferr_refcount) { + e_info(drv, "Resuming after VF err\n"); + adapter->vferr_refcount--; + return; + } +#endif + rtnl_lock(); + if (netif_running(netdev)) + ngbe_open(netdev); + + netif_device_attach(netdev); + rtnl_unlock(); +} + +static const struct pci_error_handlers ngbe_err_handler = { + .error_detected = ngbe_io_error_detected, + .slot_reset = ngbe_io_slot_reset, + .resume = ngbe_io_resume, +}; + +struct net_device *ngbe_hw_to_netdev(const struct ngbe_hw *hw) +{ + return ((struct ngbe_adapter *)hw->back)->netdev; +} +struct ngbe_msg *ngbe_hw_to_msg(const struct ngbe_hw *hw) +{ + struct ngbe_adapter *adapter = + container_of(hw, struct ngbe_adapter, hw); + return (struct ngbe_msg *)&adapter->msg_enable; +} + +static struct pci_driver ngbe_driver = { + .name = ngbe_driver_name, + .id_table = ngbe_pci_tbl, + .probe = ngbe_probe, + .remove = ngbe_remove, +#ifdef CONFIG_PM + .suspend = ngbe_suspend, + .resume = ngbe_resume, +#endif + .shutdown = ngbe_shutdown, + .sriov_configure = ngbe_pci_sriov_configure, + .err_handler = &ngbe_err_handler +}; + +/** + * ngbe_init_module - Driver Registration Routine + * + * ngbe_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init ngbe_init_module(void) +{ + int ret; + pr_info("%s - version %s\n", ngbe_driver_string, ngbe_driver_version); + pr_info("%s\n", ngbe_copyright); + + ngbe_wq = create_singlethread_workqueue(ngbe_driver_name); + if (!ngbe_wq) { + pr_err("%s: Failed to create workqueue\n", ngbe_driver_name); + return -ENOMEM; + } + +#ifdef CONFIG_NGBE_PROCFS + if (ngbe_procfs_topdir_init()) + pr_info("Procfs failed to initialize topdir\n"); +#endif + +#ifdef CONFIG_NGBE_DEBUG_FS + ngbe_dbg_init(); +#endif + + ret = pci_register_driver(&ngbe_driver); + return ret; +} + +module_init(ngbe_init_module); + +/** + * ngbe_exit_module - Driver Exit Cleanup Routine + * + * ngbe_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit ngbe_exit_module(void) +{ + pci_unregister_driver(&ngbe_driver); +#ifdef CONFIG_NGBE_PROCFS + ngbe_procfs_topdir_exit(); +#endif + destroy_workqueue(ngbe_wq); +#ifdef CONFIG_NGBE_DEBUG_FS + ngbe_dbg_exit(); +#endif /* CONFIG_NGBE_DEBUG_FS */ +} + +module_exit(ngbe_exit_module); + +/* ngbe_main.c */ diff --git a/drivers/net/ethernet/netswift/ngbe/ngbe_mbx.c b/drivers/net/ethernet/netswift/ngbe/ngbe_mbx.c new file mode 100644 index 0000000000000000000000000000000000000000..34167f78c207fd817dac9b39946c6a6760f2a924 --- /dev/null +++ b/drivers/net/ethernet/netswift/ngbe/ngbe_mbx.c @@ -0,0 +1,687 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include "ngbe_type.h" +#include "ngbe.h" +#include "ngbe_mbx.h" + + +/** + * ngbe_read_mbx - Reads a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfuly read message from buffer + **/ +int ngbe_read_mbx(struct ngbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct ngbe_mbx_info *mbx = &hw->mbx; + int err = NGBE_ERR_MBX; + + /* limit read to size of mailbox */ + if (size > mbx->size) + size = mbx->size; + + err = TCALL(hw, mbx.ops.read, msg, size, mbx_id); + + return err; +} + +/** + * ngbe_write_mbx - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +int ngbe_write_mbx(struct ngbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct ngbe_mbx_info *mbx = &hw->mbx; + int err = 0; + + if (size > mbx->size) { + err = NGBE_ERR_MBX; + ERROR_REPORT2(NGBE_ERROR_ARGUMENT, + "Invalid mailbox message size %d", size); + } else + err = TCALL(hw, mbx.ops.write, msg, size, mbx_id); + + return err; +} + +/** + * ngbe_check_for_msg - checks to see if someone sent us mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +int ngbe_check_for_msg(struct ngbe_hw *hw, u16 mbx_id) +{ + int err = NGBE_ERR_MBX; + + err = TCALL(hw, mbx.ops.check_for_msg, mbx_id); + + return err; +} + +/** + * ngbe_check_for_ack - checks to see if someone sent us ACK + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +int ngbe_check_for_ack(struct ngbe_hw *hw, u16 mbx_id) +{ + int err = NGBE_ERR_MBX; + + err = TCALL(hw, mbx.ops.check_for_ack, mbx_id); + + return err; +} + +/** + * ngbe_check_for_rst - checks to see if other side has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +int ngbe_check_for_rst(struct ngbe_hw *hw, u16 mbx_id) +{ + struct ngbe_mbx_info *mbx = &hw->mbx; + int err = NGBE_ERR_MBX; + + if (mbx->ops.check_for_rst) + err = mbx->ops.check_for_rst(hw, mbx_id); + + return err; +} + +/** + * ngbe_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification + **/ +int ngbe_poll_for_msg(struct ngbe_hw *hw, u16 mbx_id) +{ + struct ngbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_msg) + goto out; + + while (countdown && TCALL(hw, mbx.ops.check_for_msg, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->udelay); + } + + if (countdown == 0) + ERROR_REPORT2(NGBE_ERROR_POLLING, + "Polling for VF%d mailbox message timedout", mbx_id); + +out: + return countdown ? 0 : NGBE_ERR_MBX; +} + +/** + * ngbe_poll_for_ack - Wait for message acknowledngbeent + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message acknowledngbeent + **/ +int ngbe_poll_for_ack(struct ngbe_hw *hw, u16 mbx_id) +{ + struct ngbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_ack) + goto out; + + while (countdown && TCALL(hw, mbx.ops.check_for_ack, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->udelay); + } + + if (countdown == 0) + ERROR_REPORT2(NGBE_ERROR_POLLING, + "Polling for VF%d mailbox ack timedout", mbx_id); + +out: + return countdown ? 0 : NGBE_ERR_MBX; +} + +/** + * ngbe_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification and + * copied it into the receive buffer. + **/ +int ngbe_read_posted_mbx(struct ngbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct ngbe_mbx_info *mbx = &hw->mbx; + int err = NGBE_ERR_MBX; + + if (!mbx->ops.read) + goto out; + + err = ngbe_poll_for_msg(hw, mbx_id); + + /* if ack received read message, otherwise we timed out */ + if (!err) + err = TCALL(hw, mbx.ops.read, msg, size, mbx_id); +out: + return err; +} + +/** + * ngbe_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +int ngbe_write_posted_mbx(struct ngbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + struct ngbe_mbx_info *mbx = &hw->mbx; + int err; + + /* exit if either we can't write or there isn't a defined timeout */ + if (!mbx->timeout) + return NGBE_ERR_MBX; + + /* send msg */ + err = TCALL(hw, mbx.ops.write, msg, size, mbx_id); + + /* if msg sent wait until we receive an ack */ + if (!err) + err = ngbe_poll_for_ack(hw, mbx_id); + + return err; +} + +/** + * ngbe_init_mbx_ops - Initialize MB function pointers + * @hw: pointer to the HW structure + * + * Setups up the mailbox read and write message function pointers + **/ +void ngbe_init_mbx_ops(struct ngbe_hw *hw) +{ + struct ngbe_mbx_info *mbx = &hw->mbx; + + mbx->ops.read_posted = ngbe_read_posted_mbx; + mbx->ops.write_posted = ngbe_write_posted_mbx; +} + +/** + * ngbe_read_v2p_mailbox - read v2p mailbox + * @hw: pointer to the HW structure + * + * This function is used to read the v2p mailbox without losing the read to + * clear status bits. + **/ +u32 ngbe_read_v2p_mailbox(struct ngbe_hw *hw) +{ + u32 v2p_mailbox = rd32(hw, NGBE_VXMAILBOX); + + v2p_mailbox |= hw->mbx.v2p_mailbox; + hw->mbx.v2p_mailbox |= v2p_mailbox & NGBE_VXMAILBOX_R2C_BITS; + + return v2p_mailbox; +} + +/** + * ngbe_check_for_bit_vf - Determine if a status bit was set + * @hw: pointer to the HW structure + * @mask: bitmask for bits to be tested and cleared + * + * This function is used to check for the read to clear bits within + * the V2P mailbox. + **/ +int ngbe_check_for_bit_vf(struct ngbe_hw *hw, u32 mask) +{ + u32 mailbox = ngbe_read_v2p_mailbox(hw); + + hw->mbx.v2p_mailbox &= ~mask; + + return (mailbox & mask ? 0 : NGBE_ERR_MBX); +} + +/** + * ngbe_check_for_msg_vf - checks to see if the PF has sent mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the PF has set the Status bit or else ERR_MBX + **/ +int ngbe_check_for_msg_vf(struct ngbe_hw *hw, u16 mbx_id) +{ + int err = NGBE_ERR_MBX; + + UNREFERENCED_PARAMETER(mbx_id); + + /* read clear the pf sts bit */ + if (!ngbe_check_for_bit_vf(hw, NGBE_VXMAILBOX_PFSTS)) { + err = 0; + hw->mbx.stats.reqs++; + } + + return err; +} + +/** + * ngbe_check_for_ack_vf - checks to see if the PF has ACK'd + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX + **/ +int ngbe_check_for_ack_vf(struct ngbe_hw *hw, u16 mbx_id) +{ + int err = NGBE_ERR_MBX; + + UNREFERENCED_PARAMETER(mbx_id); + + /* read clear the pf ack bit */ + if (!ngbe_check_for_bit_vf(hw, NGBE_VXMAILBOX_PFACK)) { + err = 0; + hw->mbx.stats.acks++; + } + + return err; +} + +/** + * ngbe_check_for_rst_vf - checks to see if the PF has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns true if the PF has set the reset done bit or else false + **/ +int ngbe_check_for_rst_vf(struct ngbe_hw *hw, u16 mbx_id) +{ + int err = NGBE_ERR_MBX; + + UNREFERENCED_PARAMETER(mbx_id); + if (!ngbe_check_for_bit_vf(hw, (NGBE_VXMAILBOX_RSTD | + NGBE_VXMAILBOX_RSTI))) { + err = 0; + hw->mbx.stats.rsts++; + } + + return err; +} + +/** + * ngbe_obtain_mbx_lock_vf - obtain mailbox lock + * @hw: pointer to the HW structure + * + * return SUCCESS if we obtained the mailbox lock + **/ +int ngbe_obtain_mbx_lock_vf(struct ngbe_hw *hw) +{ + int err = NGBE_ERR_MBX; + u32 mailbox; + + /* Take ownership of the buffer */ + wr32(hw, NGBE_VXMAILBOX, NGBE_VXMAILBOX_VFU); + + /* reserve mailbox for vf use */ + mailbox = ngbe_read_v2p_mailbox(hw); + if (mailbox & NGBE_VXMAILBOX_VFU) + err = 0; + else + ERROR_REPORT2(NGBE_ERROR_POLLING, + "Failed to obtain mailbox lock for VF"); + + return err; +} + +/** + * ngbe_write_mbx_vf - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +int ngbe_write_mbx_vf(struct ngbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + int err; + u16 i; + + UNREFERENCED_PARAMETER(mbx_id); + + /* lock the mailbox to prevent pf/vf race condition */ + err = ngbe_obtain_mbx_lock_vf(hw); + if (err) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + ngbe_check_for_msg_vf(hw, 0); + ngbe_check_for_ack_vf(hw, 0); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + wr32a(hw, NGBE_VXMBMEM, i, msg[i]); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + + /* Drop VFU and interrupt the PF to tell it a message has been sent */ + wr32(hw, NGBE_VXMAILBOX, NGBE_VXMAILBOX_REQ); + +out_no_write: + return err; +} + +/** + * ngbe_read_mbx_vf - Reads a message from the inbox intended for vf + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfuly read message from buffer + **/ +int ngbe_read_mbx_vf(struct ngbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + int err = 0; + u16 i; + UNREFERENCED_PARAMETER(mbx_id); + + /* lock the mailbox to prevent pf/vf race condition */ + err = ngbe_obtain_mbx_lock_vf(hw); + if (err) + goto out_no_read; + + /* copy the message from the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = rd32a(hw, NGBE_VXMBMEM, i); + + /* Acknowledge receipt and release mailbox, then we're done */ + wr32(hw, NGBE_VXMAILBOX, NGBE_VXMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return err; +} + +/** + * ngbe_init_mbx_params_vf - set initial values for vf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for vf mailbox + */ +void ngbe_init_mbx_params_vf(struct ngbe_hw *hw) +{ + struct ngbe_mbx_info *mbx = &hw->mbx; + + /* start mailbox as timed out and let the reset_hw call set the timeout + * value to begin communications */ + mbx->timeout = 0; + mbx->udelay = NGBE_VF_MBX_INIT_DELAY; + + mbx->size = NGBE_VXMAILBOX_SIZE; + + mbx->ops.read = ngbe_read_mbx_vf; + mbx->ops.write = ngbe_write_mbx_vf; + mbx->ops.read_posted = ngbe_read_posted_mbx; + mbx->ops.write_posted = ngbe_write_posted_mbx; + mbx->ops.check_for_msg = ngbe_check_for_msg_vf; + mbx->ops.check_for_ack = ngbe_check_for_ack_vf; + mbx->ops.check_for_rst = ngbe_check_for_rst_vf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; +} + +int ngbe_check_for_bit_pf(struct ngbe_hw *hw, u32 mask) +{ + u32 mbvficr = rd32(hw, NGBE_MBVFICR); + int err = NGBE_ERR_MBX; + + if (mbvficr & mask) { + err = 0; + wr32(hw, NGBE_MBVFICR, mask); + } + + return err; +} + +/** + * ngbe_check_for_msg_pf - checks to see if the VF has sent mail + * @hw: pointer to the HW structure + * @vf: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +int ngbe_check_for_msg_pf(struct ngbe_hw *hw, u16 vf) +{ + int err = NGBE_ERR_MBX; + u32 vf_bit = vf; + + if (!ngbe_check_for_bit_pf(hw, NGBE_MBVFICR_VFREQ_VF1 << vf_bit)) { + err = 0; + hw->mbx.stats.reqs++; + } + + return err; +} + +/** + * ngbe_check_for_ack_pf - checks to see if the VF has ACKed + * @hw: pointer to the HW structure + * @vf: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +int ngbe_check_for_ack_pf(struct ngbe_hw *hw, u16 vf) +{ + int err = NGBE_ERR_MBX; + u32 vf_bit = vf; + + if (!ngbe_check_for_bit_pf(hw, NGBE_MBVFICR_VFACK_VF1 << vf_bit)) { + err = 0; + hw->mbx.stats.acks++; + } + + return err; +} + +/** + * ngbe_check_for_rst_pf - checks to see if the VF has reset + * @hw: pointer to the HW structure + * @vf: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +int ngbe_check_for_rst_pf(struct ngbe_hw *hw, u16 vf) +{ + u32 vflre = 0; + int err = NGBE_ERR_MBX; + + vflre = rd32(hw, NGBE_VFLRE); + + if (vflre & (1 << vf)) { + err = 0; + wr32(hw, NGBE_VFLREC, (1 << vf)); + hw->mbx.stats.rsts++; + } + + return err; +} + +/** + * ngbe_obtain_mbx_lock_pf - obtain mailbox lock + * @hw: pointer to the HW structure + * @vf: the VF index + * + * return SUCCESS if we obtained the mailbox lock + **/ +int ngbe_obtain_mbx_lock_pf(struct ngbe_hw *hw, u16 vf) +{ + int err = NGBE_ERR_MBX; + u32 mailbox; + + /* Take ownership of the buffer */ + wr32(hw, NGBE_PXMAILBOX(vf), NGBE_PXMAILBOX_PFU); + + /* reserve mailbox for vf use */ + mailbox = rd32(hw, NGBE_PXMAILBOX(vf)); + if (mailbox & NGBE_PXMAILBOX_PFU) + err = 0; + else + ERROR_REPORT2(NGBE_ERROR_POLLING, + "Failed to obtain mailbox lock for PF%d", vf); + + return err; +} + +/** + * ngbe_write_mbx_pf - Places a message in the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf: the VF index + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +int ngbe_write_mbx_pf(struct ngbe_hw *hw, u32 *msg, u16 size, + u16 vf) +{ + int err; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + err = ngbe_obtain_mbx_lock_pf(hw, vf); + if (err) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + ngbe_check_for_msg_pf(hw, vf); + ngbe_check_for_ack_pf(hw, vf); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + wr32a(hw, NGBE_PXMBMEM(vf), i, msg[i]); + + /* Interrupt VF to tell it a message has been sent and release buffer*/ + wr32(hw, NGBE_PXMAILBOX(vf), NGBE_PXMAILBOX_STS); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + +out_no_write: + return err; +} + +/** + * ngbe_read_mbx_pf - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf: the VF index + * + * This function copies a message from the mailbox buffer to the caller's + * memory buffer. The presumption is that the caller knows that there was + * a message due to a VF request so no polling for message is needed. + **/ +int ngbe_read_mbx_pf(struct ngbe_hw *hw, u32 *msg, u16 size, + u16 vf) +{ + int err; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + err = ngbe_obtain_mbx_lock_pf(hw, vf); + if (err) + goto out_no_read; + + /* copy the message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = rd32a(hw, NGBE_PXMBMEM(vf), i); + + /* Acknowledge the message and release buffer */ + wr32(hw, NGBE_PXMAILBOX(vf), NGBE_PXMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return err; +} + +/** + * ngbe_init_mbx_params_pf - set initial values for pf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for pf mailbox + */ +void ngbe_init_mbx_params_pf(struct ngbe_hw *hw) +{ + struct ngbe_mbx_info *mbx = &hw->mbx; + + mbx->timeout = 0; + mbx->udelay = 0; + + mbx->size = NGBE_VXMAILBOX_SIZE; + + mbx->ops.read = ngbe_read_mbx_pf; + mbx->ops.write = ngbe_write_mbx_pf; + mbx->ops.read_posted = ngbe_read_posted_mbx; + mbx->ops.write_posted = ngbe_write_posted_mbx; + mbx->ops.check_for_msg = ngbe_check_for_msg_pf; + mbx->ops.check_for_ack = ngbe_check_for_ack_pf; + mbx->ops.check_for_rst = ngbe_check_for_rst_pf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; +} diff --git a/drivers/net/ethernet/netswift/ngbe/ngbe_mbx.h b/drivers/net/ethernet/netswift/ngbe/ngbe_mbx.h new file mode 100644 index 0000000000000000000000000000000000000000..5e89fa180f968f350e33a6d6387d9e0edfb1ceb7 --- /dev/null +++ b/drivers/net/ethernet/netswift/ngbe/ngbe_mbx.h @@ -0,0 +1,167 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + */ + +#ifndef _NGBE_MBX_H_ +#define _NGBE_MBX_H_ + +#define NGBE_VXMAILBOX_SIZE (16) + +/** + * VF Registers + **/ +#define NGBE_VXMAILBOX 0x00600 +#define NGBE_VXMAILBOX_REQ ((0x1) << 0) /* Request for PF Ready bit */ +#define NGBE_VXMAILBOX_ACK ((0x1) << 1) /* Ack PF message received */ +#define NGBE_VXMAILBOX_VFU ((0x1) << 2) /* VF owns the mailbox buffer */ +#define NGBE_VXMAILBOX_PFU ((0x1) << 3) /* PF owns the mailbox buffer */ +#define NGBE_VXMAILBOX_PFSTS ((0x1) << 4) /* PF wrote a message in the MB */ +#define NGBE_VXMAILBOX_PFACK ((0x1) << 5) /* PF ack the previous VF msg */ +#define NGBE_VXMAILBOX_RSTI ((0x1) << 6) /* PF has reset indication */ +#define NGBE_VXMAILBOX_RSTD ((0x1) << 7) /* PF has indicated reset done */ +#define NGBE_VXMAILBOX_R2C_BITS (NGBE_VXMAILBOX_RSTD | \ + NGBE_VXMAILBOX_PFSTS | NGBE_VXMAILBOX_PFACK) + +#define NGBE_VXMBMEM 0x00C00 /* 16*4B */ + +/** + * PF Registers + **/ +#define NGBE_PXMAILBOX(i) (0x00600 + (4 * (i))) /* i=[0,7] */ +#define NGBE_PXMAILBOX_STS ((0x1) << 0) /* Initiate message send to VF */ +#define NGBE_PXMAILBOX_ACK ((0x1) << 1) /* Ack message recv'd from VF */ +#define NGBE_PXMAILBOX_VFU ((0x1) << 2) /* VF owns the mailbox buffer */ +#define NGBE_PXMAILBOX_PFU ((0x1) << 3) /* PF owns the mailbox buffer */ +#define NGBE_PXMAILBOX_RVFU ((0x1) << 4) /* Reset VFU - used when VF stuck*/ + +#define NGBE_PXMBMEM(i) (0x5000 + (64 * (i))) /* i=[0,7] */ + +#define NGBE_VFLRP(i) (0x00490 + (4 * (i))) /* i=[0,1] */ +#define NGBE_VFLRE 0x004A0 +#define NGBE_VFLREC 0x004A8 + +/* SR-IOV specific macros */ +#define NGBE_MBVFICR 0x00480 +#define NGBE_MBVFICR_INDEX(vf) ((vf) >> 4) +#define NGBE_MBVFICR_VFREQ_MASK (0x0000FFFF) /* bits for VF messages */ +#define NGBE_MBVFICR_VFREQ_VF1 (0x00000001) /* bit for VF 1 message */ +#define NGBE_MBVFICR_VFACK_MASK (0xFFFF0000) /* bits for VF acks */ +#define NGBE_MBVFICR_VFACK_VF1 (0x00010000) /* bit for VF 1 ack */ + +/** + * Messages + **/ +/* If it's a NGBE_VF_* msg then it originates in the VF and is sent to the + * PF. The reverse is true if it is NGBE_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +#define NGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with + * this are the ACK */ +#define NGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with + * this are the NACK */ +#define NGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still + * clear to send requests */ +#define NGBE_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for extra info for certain messages */ +#define NGBE_VT_MSGINFO_MASK (0xFF << NGBE_VT_MSGINFO_SHIFT) + +/* definitions to support mailbox API version negotiation */ + +/* + * each element denotes a version of the API; existing numbers may not + * change; any additions must go at the end + */ +enum ngbe_pfvf_api_rev { + ngbe_mbox_api_null, + ngbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */ + ngbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ + ngbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ + ngbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */ + ngbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ + ngbe_mbox_api_unknown, /* indicates that API version is not known */ +}; + +/* mailbox API, legacy requests */ +#define NGBE_VF_RESET 0x01 /* VF requests reset */ +#define NGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define NGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define NGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ + +/* mailbox API, version 1.0 VF requests */ +#define NGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ +#define NGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ +#define NGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ + +/* mailbox API, version 1.1 VF requests */ +#define NGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ + +/* mailbox API, version 1.2 VF requests */ +#define NGBE_VF_GET_RETA 0x0a /* VF request for RETA */ +#define NGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */ +#define NGBE_VF_UPDATE_XCAST_MODE 0x0c +#define NGBE_VF_BACKUP 0x8001 /* VF requests backup */ + +#define NGBE_VF_GET_LINK_STATUS 0x20 /* VF get link status from PF */ + +/* mode choices for IXGBE_VF_UPDATE_XCAST_MODE */ +enum ngbevf_xcast_modes { + NGBEVF_XCAST_MODE_NONE = 0, + NGBEVF_XCAST_MODE_MULTI, + NGBEVF_XCAST_MODE_ALLMULTI, + NGBEVF_XCAST_MODE_PROMISC, +}; + +/* GET_QUEUES return data indices within the mailbox */ +#define NGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */ +#define NGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */ +#define NGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */ +#define NGBE_VF_DEF_QUEUE 4 /* Default queue offset */ + +/* length of permanent address message returned from PF */ +#define NGBE_VF_PERMADDR_MSG_LEN 4 +/* word in permanent address message with the current multicast type */ +#define NGBE_VF_MC_TYPE_WORD 3 + +#define NGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ + +/* mailbox API, version 2.0 VF requests */ +#define NGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ +#define NGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ +#define NGBE_VF_ENABLE_MACADDR 0x0A /* enable MAC address */ +#define NGBE_VF_DISABLE_MACADDR 0x0B /* disable MAC address */ +#define NGBE_VF_GET_MACADDRS 0x0C /* get all configured MAC addrs */ +#define NGBE_VF_SET_MCAST_PROMISC 0x0D /* enable multicast promiscuous */ +#define NGBE_VF_GET_MTU 0x0E /* get bounds on MTU */ +#define NGBE_VF_SET_MTU 0x0F /* set a specific MTU */ + +/* mailbox API, version 2.0 PF requests */ +#define NGBE_PF_TRANSPARENT_VLAN 0x0101 /* enable transparent vlan */ + +#define NGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define NGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ + +int ngbe_read_mbx(struct ngbe_hw *, u32 *, u16, u16); +int ngbe_write_mbx(struct ngbe_hw *, u32 *, u16, u16); +int ngbe_read_posted_mbx(struct ngbe_hw *, u32 *, u16, u16); +int ngbe_write_posted_mbx(struct ngbe_hw *, u32 *, u16, u16); +int ngbe_check_for_msg(struct ngbe_hw *, u16); +int ngbe_check_for_ack(struct ngbe_hw *, u16); +int ngbe_check_for_rst(struct ngbe_hw *, u16); +void ngbe_init_mbx_ops(struct ngbe_hw *hw); +void ngbe_init_mbx_params_vf(struct ngbe_hw *); +void ngbe_init_mbx_params_pf(struct ngbe_hw *); + +#endif /* _NGBE_MBX_H_ */ diff --git a/drivers/net/ethernet/netswift/ngbe/ngbe_param.c b/drivers/net/ethernet/netswift/ngbe/ngbe_param.c new file mode 100644 index 0000000000000000000000000000000000000000..92f0dd0f32734b75e38c6332939b0d97069b9364 --- /dev/null +++ b/drivers/net/ethernet/netswift/ngbe/ngbe_param.c @@ -0,0 +1,839 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + + +#include +#include + +#include "ngbe.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ +#define NGBE_MAX_NIC 32 +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +#define STRINGIFY(foo) #foo /* magic for getting defines into strings */ +#define XSTRINGIFY(bar) STRINGIFY(bar) + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ + +#define NGBE_PARAM_INIT { [0 ... NGBE_MAX_NIC] = OPTION_UNSET } + +#define NGBE_PARAM(X, desc) \ + static int X[NGBE_MAX_NIC + 1] = NGBE_PARAM_INIT; \ + static unsigned int num_##X; \ + module_param_array(X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); + +/* IntMode (Interrupt Mode) + * + * Valid Range: 0-2 + * - 0 - Legacy Interrupt + * - 1 - MSI Interrupt + * - 2 - MSI-X Interrupt(s) + * + * Default Value: 2 + */ +NGBE_PARAM(InterruptType, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), " + "default IntMode (deprecated)"); +NGBE_PARAM(IntMode, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), " + "default 2"); +#define NGBE_INT_LEGACY 0 +#define NGBE_INT_MSI 1 +#define NGBE_INT_MSIX 2 +#define NGBE_DEFAULT_INT NGBE_INT_MSIX + +/* MQ - Multiple Queue enable/disable + * + * Valid Range: 0, 1 + * - 0 - disables MQ + * - 1 - enables MQ + * + * Default Value: 1 + */ + +NGBE_PARAM(MQ, "Disable or enable Multiple Queues, default 1"); + +/* RSS - Receive-Side Scaling (RSS) Descriptor Queues + * + * Valid Range: 0-64 + * - 0 - enables RSS and sets the Desc. Q's to min(64, num_online_cpus()). + * - 1-64 - enables RSS and sets the Desc. Q's to the specified value. + * + * Default Value: 0 + */ + +NGBE_PARAM(RSS, "Number of Receive-Side Scaling Descriptor Queues, " + "default 0=number of cpus"); + +/* VMDQ - Virtual Machine Device Queues (VMDQ) + * + * Valid Range: 1-16 + * - 1 Disables VMDQ by allocating only a single queue. + * - 2-16 - enables VMDQ and sets the Desc. Q's to the specified value. + * + * Default Value: 1 + */ + +#define NGBE_DEFAULT_NUM_VMDQ 8 + +NGBE_PARAM(VMDQ, "Number of Virtual Machine Device Queues: 0/1 = disable, " + "2-16 enable (default=" XSTRINGIFY(NGBE_DEFAULT_NUM_VMDQ) ")"); + +#ifdef CONFIG_PCI_IOV +/* max_vfs - SR I/O Virtualization + * + * Valid Range: 0-63 + * - 0 Disables SR-IOV + * - 1-63 - enables SR-IOV and sets the number of VFs enabled + * + * Default Value: 0 + */ + +#define MAX_SRIOV_VFS 8 + +NGBE_PARAM(max_vfs, "Number of Virtual Functions: 0 = disable (default), " + "1-" XSTRINGIFY(MAX_SRIOV_VFS) " = enable " + "this many VFs"); + +/* VEPA - Set internal bridge to VEPA mode + * + * Valid Range: 0-1 + * - 0 Set bridge to VEB mode + * - 1 Set bridge to VEPA mode + * + * Default Value: 0 + */ +/* + *Note: + *===== + * This provides ability to ensure VEPA mode on the internal bridge even if + * the kernel does not support the netdev bridge setting operations. +*/ +NGBE_PARAM(VEPA, "VEPA Bridge Mode: 0 = VEB (default), 1 = VEPA"); +#endif + +/* Interrupt Throttle Rate (interrupts/sec) + * + * Valid Range: 980-500000 (0=off, 1=dynamic) + * + * Default Value: 1 + */ +#define DEFAULT_ITR 1 +NGBE_PARAM(InterruptThrottleRate, "Maximum interrupts per second, per vector, " + "(0,1,980-500000), default 1"); +#define MAX_ITR NGBE_MAX_INT_RATE +#define MIN_ITR NGBE_MIN_INT_RATE + +#ifndef CONFIG_NGBE_NO_LLI + +/* LLIPort (Low Latency Interrupt TCP Port) + * + * Valid Range: 0 - 65535 + * + * Default Value: 0 (disabled) + */ +NGBE_PARAM(LLIPort, "Low Latency Interrupt TCP Port (0-65535)"); + +#define DEFAULT_LLIPORT 0 +#define MAX_LLIPORT 0xFFFF +#define MIN_LLIPORT 0 + + +/* LLISize (Low Latency Interrupt on Packet Size) + * + * Valid Range: 0 - 1500 + * + * Default Value: 0 (disabled) + */ +NGBE_PARAM(LLISize, "Low Latency Interrupt on Packet Size (0-1500)"); + +#define DEFAULT_LLISIZE 0 +#define MAX_LLISIZE 1500 +#define MIN_LLISIZE 0 + +/* LLIEType (Low Latency Interrupt Ethernet Type) + * + * Valid Range: 0 - 0x8fff + * + * Default Value: 0 (disabled) + */ +NGBE_PARAM(LLIEType, "Low Latency Interrupt Ethernet Protocol Type"); + +#define DEFAULT_LLIETYPE 0 +#define MAX_LLIETYPE 0x8fff +#define MIN_LLIETYPE 0 + +/* LLIVLANP (Low Latency Interrupt on VLAN priority threshold) + * + * Valid Range: 0 - 7 + * + * Default Value: 0 (disabled) + */ +NGBE_PARAM(LLIVLANP, "Low Latency Interrupt on VLAN priority threshold"); + +#define DEFAULT_LLIVLANP 0 +#define MAX_LLIVLANP 7 +#define MIN_LLIVLANP 0 + +#endif /* CONFIG_NGBE_NO_LLI */ + +/* Software ATR packet sample rate + * + * Valid Range: 0-255 0 = off, 1-255 = rate of Tx packet inspection + * + * Default Value: 20 + */ +NGBE_PARAM(AtrSampleRate, "Software ATR Tx packet sample rate"); + +#define NGBE_MAX_ATR_SAMPLE_RATE 255 +#define NGBE_MIN_ATR_SAMPLE_RATE 1 +#define NGBE_ATR_SAMPLE_RATE_OFF 0 +#define NGBE_DEFAULT_ATR_SAMPLE_RATE 20 + +/* Enable/disable Large Receive Offload + * + * Valid Values: 0(off), 1(on) + * + * Default Value: 1 + */ +NGBE_PARAM(LRO, "Large Receive Offload (0,1), default 1 = on"); + +/* Enable/disable support for DMA coalescing + * + * Valid Values: 0(off), 41 - 10000(on) + * + * Default Value: 0 + */ +NGBE_PARAM(dmac_watchdog, + "DMA coalescing watchdog in microseconds (0,41-10000)," + "default 0 = off"); + +/* Rx buffer mode + * + * Valid Range: 0-1 0 = no header split, 1 = hdr split + * + * Default Value: 0 + */ +NGBE_PARAM(RxBufferMode, "0=(default)no header split\n" + "\t\t\t1=hdr split for recognized packet\n"); + +#define NGBE_RXBUFMODE_NO_HEADER_SPLIT 0 +#define NGBE_RXBUFMODE_HEADER_SPLIT 1 +#define NGBE_DEFAULT_RXBUFMODE NGBE_RXBUFMODE_NO_HEADER_SPLIT + +struct ngbe_option { + enum { enable_option, range_option, list_option } type; + const char *name; + const char *err; + const char *msg; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + const struct ngbe_opt_list { + int i; + char *str; + } *p; + } l; + } arg; +}; + +static int ngbe_validate_option(u32 *value, + struct ngbe_option *opt) +{ + int val = (int)*value; + + if (val == OPTION_UNSET) { + ngbe_info("ngbe: Invalid %s specified (%d), %s\n", + opt->name, val, opt->err); + *value = (u32)opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (val) { + case OPTION_ENABLED: + ngbe_info("ngbe: %s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + ngbe_info("ngbe: %s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if ((val >= opt->arg.r.min && val <= opt->arg.r.max) || + val == opt->def) { + if (opt->msg) + ngbe_info("ngbe: %s set to %d, %s\n", + opt->name, val, opt->msg); + else + ngbe_info("ngbe: %s set to %d\n", + opt->name, val); + return 0; + } + break; + case list_option: { + int i; + const struct ngbe_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (val == ent->i) { + if (ent->str[0] != '\0') + ngbe_info("%s\n", ent->str); + return 0; + } + } + } + break; + default: + BUG_ON(1); + } + + ngbe_info("ngbe: Invalid %s specified (%d), %s\n", + opt->name, val, opt->err); + *value = (u32)opt->def; + return -1; +} + +/** + * ngbe_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ +void ngbe_check_options(struct ngbe_adapter *adapter) +{ + u32 bd = adapter->bd_number; + u32 *aflags = &adapter->flags; + struct ngbe_ring_feature *feature = adapter->ring_feature; + u32 vmdq; + + if (bd >= NGBE_MAX_NIC) { + ngbe_notice("Warning: no configuration for board #%d\n", bd); + ngbe_notice("Using defaults for all values\n"); + } + + { /* Interrupt Mode */ + u32 int_mode; + static struct ngbe_option opt = { + .type = range_option, + .name = "Interrupt Mode", + .err = + "using default of "__MODULE_STRING(NGBE_DEFAULT_INT), + .def = NGBE_DEFAULT_INT, + .arg = { .r = { .min = NGBE_INT_LEGACY, + .max = NGBE_INT_MSIX} } + }; + + if (num_IntMode > bd || num_InterruptType > bd) { + int_mode = IntMode[bd]; + if (int_mode == OPTION_UNSET) + int_mode = InterruptType[bd]; + ngbe_validate_option(&int_mode, &opt); + switch (int_mode) { + case NGBE_INT_MSIX: + if (!(*aflags & NGBE_FLAG_MSIX_CAPABLE)) + ngbe_info( + "Ignoring MSI-X setting; " + "support unavailable\n"); + break; + case NGBE_INT_MSI: + if (!(*aflags & NGBE_FLAG_MSI_CAPABLE)) { + ngbe_info( + "Ignoring MSI setting; " + "support unavailable\n"); + } else { + *aflags &= ~NGBE_FLAG_MSIX_CAPABLE; + } + break; + case NGBE_INT_LEGACY: + default: + *aflags &= ~NGBE_FLAG_MSIX_CAPABLE; + *aflags &= ~NGBE_FLAG_MSI_CAPABLE; + break; + } + } else { + /* default settings */ + if (opt.def == NGBE_INT_MSIX && + *aflags & NGBE_FLAG_MSIX_CAPABLE) { + *aflags |= NGBE_FLAG_MSIX_CAPABLE; + *aflags |= NGBE_FLAG_MSI_CAPABLE; + } else if (opt.def == NGBE_INT_MSI && + *aflags & NGBE_FLAG_MSI_CAPABLE) { + *aflags &= ~NGBE_FLAG_MSIX_CAPABLE; + *aflags |= NGBE_FLAG_MSI_CAPABLE; + } else { + *aflags &= ~NGBE_FLAG_MSIX_CAPABLE; + *aflags &= ~NGBE_FLAG_MSI_CAPABLE; + } + } + } + { /* Multiple Queue Support */ + static struct ngbe_option opt = { + .type = enable_option, + .name = "Multiple Queue Support", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + + if (num_MQ > bd) { + u32 mq = MQ[bd]; + ngbe_validate_option(&mq, &opt); + if (mq) + *aflags |= NGBE_FLAG_MQ_CAPABLE; + else + *aflags &= ~NGBE_FLAG_MQ_CAPABLE; + } else { + if (opt.def == OPTION_ENABLED) + *aflags |= NGBE_FLAG_MQ_CAPABLE; + else + *aflags &= ~NGBE_FLAG_MQ_CAPABLE; + } + /* Check Interoperability */ + if ((*aflags & NGBE_FLAG_MQ_CAPABLE) && + !(*aflags & NGBE_FLAG_MSIX_CAPABLE)) { + DPRINTK(PROBE, INFO, + "Multiple queues are not supported while MSI-X " + "is disabled. Disabling Multiple Queues.\n"); + *aflags &= ~NGBE_FLAG_MQ_CAPABLE; + } + } + + { /* Receive-Side Scaling (RSS) */ + static struct ngbe_option opt = { + .type = range_option, + .name = "Receive-Side Scaling (RSS)", + .err = "using default.", + .def = 0, + .arg = { .r = { .min = 0, + .max = 1} } + }; + u32 rss = RSS[bd]; + /* adjust Max allowed RSS queues based on MAC type */ + opt.arg.r.max = ngbe_max_rss_indices(adapter); + + if (num_RSS > bd) { + ngbe_validate_option(&rss, &opt); + /* base it off num_online_cpus() with hardware limit */ + if (!rss) + rss = min_t(int, opt.arg.r.max, + num_online_cpus()); + + feature[RING_F_RSS].limit = (u16)rss; + } else if (opt.def == 0) { + rss = min_t(int, ngbe_max_rss_indices(adapter), + num_online_cpus()); + feature[RING_F_RSS].limit = rss; + } + /* Check Interoperability */ + if (rss > 1) { + if (!(*aflags & NGBE_FLAG_MQ_CAPABLE)) { + DPRINTK(PROBE, INFO, + "Multiqueue is disabled. " + "Limiting RSS.\n"); + feature[RING_F_RSS].limit = 1; + } + } + adapter->flags2 |= NGBE_FLAG2_RSS_ENABLED; + } + { /* Virtual Machine Device Queues (VMDQ) */ + static struct ngbe_option opt = { + .type = range_option, + .name = "Virtual Machine Device Queues (VMDQ)", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED, + .arg = { .r = { .min = OPTION_DISABLED, + .max = NGBE_MAX_VMDQ_INDICES + } } + }; + + if (num_VMDQ > bd) { + vmdq = VMDQ[bd]; + + ngbe_validate_option(&vmdq, &opt); + + /* zero or one both mean disabled from our driver's + * perspective */ + if (vmdq > 1) { + *aflags |= NGBE_FLAG_VMDQ_ENABLED; + } else + *aflags &= ~NGBE_FLAG_VMDQ_ENABLED; + + feature[RING_F_VMDQ].limit = (u16)vmdq; + } else { + if (opt.def == OPTION_DISABLED) + *aflags &= ~NGBE_FLAG_VMDQ_ENABLED; + else + *aflags |= NGBE_FLAG_VMDQ_ENABLED; + + feature[RING_F_VMDQ].limit = opt.def; + } + + /* Check Interoperability */ + if (*aflags & NGBE_FLAG_VMDQ_ENABLED) { + if (!(*aflags & NGBE_FLAG_MQ_CAPABLE)) { + DPRINTK(PROBE, INFO, + "VMDQ is not supported while multiple " + "queues are disabled. " + "Disabling VMDQ.\n"); + *aflags &= ~NGBE_FLAG_VMDQ_ENABLED; + feature[RING_F_VMDQ].limit = 0; + } + } + } +#ifdef CONFIG_PCI_IOV + { /* Single Root I/O Virtualization (SR-IOV) */ + static struct ngbe_option opt = { + .type = range_option, + .name = "I/O Virtualization (IOV)", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED, + .arg = { .r = { .min = OPTION_DISABLED, + .max = MAX_SRIOV_VFS} } + }; + + if (num_max_vfs > bd) { + u32 vfs = max_vfs[bd]; + if (ngbe_validate_option(&vfs, &opt)) { + vfs = 0; + DPRINTK(PROBE, INFO, + "max_vfs out of range " + "Disabling SR-IOV.\n"); + } + + adapter->num_vfs = vfs; + + if (vfs) + *aflags |= NGBE_FLAG_SRIOV_ENABLED; + else + *aflags &= ~NGBE_FLAG_SRIOV_ENABLED; + } else { + if (opt.def == OPTION_DISABLED) { + adapter->num_vfs = 0; + *aflags &= ~NGBE_FLAG_SRIOV_ENABLED; + } else { + adapter->num_vfs = opt.def; + *aflags |= NGBE_FLAG_SRIOV_ENABLED; + } + } + + /* Check Interoperability */ + if (*aflags & NGBE_FLAG_SRIOV_ENABLED) { + if (!(*aflags & NGBE_FLAG_SRIOV_CAPABLE)) { + DPRINTK(PROBE, INFO, + "IOV is not supported on this " + "hardware. Disabling IOV.\n"); + *aflags &= ~NGBE_FLAG_SRIOV_ENABLED; + adapter->num_vfs = 0; + } else if (!(*aflags & NGBE_FLAG_MQ_CAPABLE)) { + DPRINTK(PROBE, INFO, + "IOV is not supported while multiple " + "queues are disabled. " + "Disabling IOV.\n"); + *aflags &= ~NGBE_FLAG_SRIOV_ENABLED; + adapter->num_vfs = 0; + } + } + } + { /* VEPA Bridge Mode enable for SR-IOV mode */ + static struct ngbe_option opt = { + .type = range_option, + .name = "VEPA Bridge Mode Enable", + .err = "defaulting to disabled", + .def = OPTION_DISABLED, + .arg = { .r = { .min = OPTION_DISABLED, + .max = OPTION_ENABLED} } + }; + + if (num_VEPA > bd) { + u32 vepa = VEPA[bd]; + ngbe_validate_option(&vepa, &opt); + if (vepa) + adapter->flags |= + NGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; + } else { + if (opt.def == OPTION_ENABLED) + adapter->flags |= + NGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; + } + } +#endif /* CONFIG_PCI_IOV */ + { /* Interrupt Throttling Rate */ + static struct ngbe_option opt = { + .type = range_option, + .name = "Interrupt Throttling Rate (ints/sec)", + .err = "using default of "__MODULE_STRING(DEFAULT_ITR), + .def = DEFAULT_ITR, + .arg = { .r = { .min = MIN_ITR, + .max = MAX_ITR } } + }; + + if (num_InterruptThrottleRate > bd) { + u32 itr = InterruptThrottleRate[bd]; + switch (itr) { + case 0: + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + adapter->rx_itr_setting = 0; + break; + case 1: + DPRINTK(PROBE, INFO, "dynamic interrupt " + "throttling enabled\n"); + adapter->rx_itr_setting = 1; + break; + default: + ngbe_validate_option(&itr, &opt); + /* the first bit is used as control */ + adapter->rx_itr_setting = (u16)((1000000/itr) << 2); + break; + } + adapter->tx_itr_setting = adapter->rx_itr_setting; + } else { + adapter->rx_itr_setting = opt.def; + adapter->tx_itr_setting = opt.def; + } + } +#ifndef CONFIG_NGBE_NO_LLI + { /* Low Latency Interrupt TCP Port*/ + static struct ngbe_option opt = { + .type = range_option, + .name = "Low Latency Interrupt TCP Port", + .err = "using default of " + __MODULE_STRING(DEFAULT_LLIPORT), + .def = DEFAULT_LLIPORT, + .arg = { .r = { .min = MIN_LLIPORT, + .max = MAX_LLIPORT } } + }; + + if (num_LLIPort > bd) { + adapter->lli_port = LLIPort[bd]; + if (adapter->lli_port) { + ngbe_validate_option(&adapter->lli_port, &opt); + } else { + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + } + } else { + adapter->lli_port = opt.def; + } + } + { /* Low Latency Interrupt on Packet Size */ + static struct ngbe_option opt = { + .type = range_option, + .name = "Low Latency Interrupt on Packet Size", + .err = "using default of " + __MODULE_STRING(DEFAULT_LLISIZE), + .def = DEFAULT_LLISIZE, + .arg = { .r = { .min = MIN_LLISIZE, + .max = MAX_LLISIZE } } + }; + + if (num_LLISize > bd) { + adapter->lli_size = LLISize[bd]; + if (adapter->lli_size) { + ngbe_validate_option(&adapter->lli_size, &opt); + } else { + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + } + } else { + adapter->lli_size = opt.def; + } + } + { /* Low Latency Interrupt EtherType*/ + static struct ngbe_option opt = { + .type = range_option, + .name = "Low Latency Interrupt on Ethernet Protocol " + "Type", + .err = "using default of " + __MODULE_STRING(DEFAULT_LLIETYPE), + .def = DEFAULT_LLIETYPE, + .arg = { .r = { .min = MIN_LLIETYPE, + .max = MAX_LLIETYPE } } + }; + + if (num_LLIEType > bd) { + adapter->lli_etype = LLIEType[bd]; + if (adapter->lli_etype) { + ngbe_validate_option(&adapter->lli_etype, + &opt); + } else { + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + } + } else { + adapter->lli_etype = opt.def; + } + } + { /* LLI VLAN Priority */ + static struct ngbe_option opt = { + .type = range_option, + .name = "Low Latency Interrupt on VLAN priority " + "threshold", + .err = "using default of " + __MODULE_STRING(DEFAULT_LLIVLANP), + .def = DEFAULT_LLIVLANP, + .arg = { .r = { .min = MIN_LLIVLANP, + .max = MAX_LLIVLANP } } + }; + + if (num_LLIVLANP > bd) { + adapter->lli_vlan_pri = LLIVLANP[bd]; + if (adapter->lli_vlan_pri) { + ngbe_validate_option(&adapter->lli_vlan_pri, + &opt); + } else { + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + } + } else { + adapter->lli_vlan_pri = opt.def; + } + } +#endif /* CONFIG_NGBE_NO_LLI */ + + { /* Flow Director ATR Tx sample packet rate */ + static struct ngbe_option opt = { + .type = range_option, + .name = "Software ATR Tx packet sample rate", + .err = "using default of " + __MODULE_STRING(NGBE_DEFAULT_ATR_SAMPLE_RATE), + .def = NGBE_DEFAULT_ATR_SAMPLE_RATE, + .arg = {.r = {.min = NGBE_ATR_SAMPLE_RATE_OFF, + .max = NGBE_MAX_ATR_SAMPLE_RATE} } + }; + static const char atr_string[] = + "ATR Tx Packet sample rate set to"; + + if (num_AtrSampleRate > bd) { + adapter->atr_sample_rate = AtrSampleRate[bd]; + + if (adapter->atr_sample_rate) { + ngbe_validate_option(&adapter->atr_sample_rate, + &opt); + DPRINTK(PROBE, INFO, "%s %d\n", atr_string, + adapter->atr_sample_rate); + } + } else { + adapter->atr_sample_rate = opt.def; + } + } + + { /* LRO - Set Large Receive Offload */ + struct ngbe_option opt = { + .type = enable_option, + .name = "LRO - Large Receive Offload", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED + }; + struct net_device *netdev = adapter->netdev; + opt.def = OPTION_DISABLED; + + if (num_LRO > bd) { + u32 lro = LRO[bd]; + ngbe_validate_option(&lro, &opt); + if (lro) + netdev->features |= NETIF_F_LRO; + else + netdev->features &= ~NETIF_F_LRO; + } else if (opt.def == OPTION_ENABLED) { + netdev->features |= NETIF_F_LRO; + } else { + netdev->features &= ~NETIF_F_LRO; + } + + if ((netdev->features & NETIF_F_LRO)) { + DPRINTK(PROBE, INFO, + "RSC is not supported on this " + "hardware. Disabling RSC.\n"); + netdev->features &= ~NETIF_F_LRO; + } + } + { /* DMA Coalescing */ + struct ngbe_option opt = { + .type = range_option, + .name = "dmac_watchdog", + .err = "defaulting to 0 (disabled)", + .def = 0, + .arg = { .r = { .min = 41, .max = 10000 } }, + }; + const char *cmsg = "DMA coalescing not supported on this " + "hardware"; + + opt.err = cmsg; + opt.msg = cmsg; + opt.arg.r.min = 0; + opt.arg.r.max = 0; + + if (num_dmac_watchdog > bd) { + u32 dmac_wd = dmac_watchdog[bd]; + + ngbe_validate_option(&dmac_wd, &opt); + adapter->hw.mac.dmac_config.watchdog_timer = (u16)dmac_wd; + } else { + adapter->hw.mac.dmac_config.watchdog_timer = opt.def; + } + } + + { /* Rx buffer mode */ + u32 rx_buf_mode; + static struct ngbe_option opt = { + .type = range_option, + .name = "Rx buffer mode", + .err = "using default of " + __MODULE_STRING(NGBE_DEFAULT_RXBUFMODE), + .def = NGBE_DEFAULT_RXBUFMODE, + .arg = {.r = {.min = NGBE_RXBUFMODE_NO_HEADER_SPLIT, + .max = NGBE_RXBUFMODE_HEADER_SPLIT} } + + }; + + if (num_RxBufferMode > bd) { + rx_buf_mode = RxBufferMode[bd]; + ngbe_validate_option(&rx_buf_mode, &opt); + switch (rx_buf_mode) { + case NGBE_RXBUFMODE_NO_HEADER_SPLIT: + *aflags &= ~NGBE_FLAG_RX_HS_ENABLED; + break; + case NGBE_RXBUFMODE_HEADER_SPLIT: + *aflags |= NGBE_FLAG_RX_HS_ENABLED; + break; + default: + break; + } + } else { + *aflags &= ~NGBE_FLAG_RX_HS_ENABLED; + } + } +} diff --git a/drivers/net/ethernet/netswift/ngbe/ngbe_pcierr.c b/drivers/net/ethernet/netswift/ngbe/ngbe_pcierr.c new file mode 100644 index 0000000000000000000000000000000000000000..8d47bfabd6ad8a8a3b99d6e57982a220e9b1064a --- /dev/null +++ b/drivers/net/ethernet/netswift/ngbe/ngbe_pcierr.c @@ -0,0 +1,257 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include +#include +#include "ngbe_pcierr.h" +#include "ngbe.h" + +#define NGBE_ROOT_PORT_INTR_ON_MESG_MASK (PCI_ERR_ROOT_CMD_COR_EN| \ + PCI_ERR_ROOT_CMD_NONFATAL_EN| \ + PCI_ERR_ROOT_CMD_FATAL_EN) + +#ifndef PCI_ERS_RESULT_NO_AER_DRIVER +/* No AER capabilities registered for the driver */ +#define PCI_ERS_RESULT_NO_AER_DRIVER ((__force pci_ers_result_t) 6) +#endif + +static pci_ers_result_t merge_result(enum pci_ers_result orig, + enum pci_ers_result new) +{ + if (new == PCI_ERS_RESULT_NO_AER_DRIVER) + return PCI_ERS_RESULT_NO_AER_DRIVER; + if (new == PCI_ERS_RESULT_NONE) + return orig; + switch (orig) { + case PCI_ERS_RESULT_CAN_RECOVER: + case PCI_ERS_RESULT_RECOVERED: + orig = new; + break; + case PCI_ERS_RESULT_DISCONNECT: + if (new == PCI_ERS_RESULT_NEED_RESET) + orig = PCI_ERS_RESULT_NEED_RESET; + break; + default: + break; + } + return orig; +} + +static int ngbe_report_error_detected(struct pci_dev *dev, + enum pci_channel_state state, + enum pci_ers_result *result) +{ + pci_ers_result_t vote; + const struct pci_error_handlers *err_handler; + + device_lock(&dev->dev); + if ( + !dev->driver || + !dev->driver->err_handler || + !dev->driver->err_handler->error_detected) { + /* + * If any device in the subtree does not have an error_detected + * callback, PCI_ERS_RESULT_NO_AER_DRIVER prevents subsequent + * error callbacks of "any" device in the subtree, and will + * exit in the disconnected error state. + */ + if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) + vote = PCI_ERS_RESULT_NO_AER_DRIVER; + else + vote = PCI_ERS_RESULT_NONE; + } else { + err_handler = dev->driver->err_handler; + vote = err_handler->error_detected(dev, state); + } + + *result = merge_result(*result, vote); + device_unlock(&dev->dev); + return 0; +} + +static int ngbe_report_frozen_detected(struct pci_dev *dev, void *data) +{ + return ngbe_report_error_detected(dev, pci_channel_io_frozen, data); +} + +static int ngbe_report_mmio_enabled(struct pci_dev *dev, void *data) +{ + pci_ers_result_t vote, *result = data; + const struct pci_error_handlers *err_handler; + + device_lock(&dev->dev); + if (!dev->driver || + !dev->driver->err_handler || + !dev->driver->err_handler->mmio_enabled) + goto out; + + err_handler = dev->driver->err_handler; + vote = err_handler->mmio_enabled(dev); + *result = merge_result(*result, vote); +out: + device_unlock(&dev->dev); + return 0; +} + +static int ngbe_report_slot_reset(struct pci_dev *dev, void *data) +{ + pci_ers_result_t vote, *result = data; + const struct pci_error_handlers *err_handler; + + device_lock(&dev->dev); + if (!dev->driver || + !dev->driver->err_handler || + !dev->driver->err_handler->slot_reset) + goto out; + + err_handler = dev->driver->err_handler; + vote = err_handler->slot_reset(dev); + *result = merge_result(*result, vote); +out: + device_unlock(&dev->dev); + return 0; +} + +static int ngbe_report_resume(struct pci_dev *dev, void *data) +{ + const struct pci_error_handlers *err_handler; + + device_lock(&dev->dev); + dev->error_state = pci_channel_io_normal; + if ( + !dev->driver || + !dev->driver->err_handler || + !dev->driver->err_handler->resume) + goto out; + + err_handler = dev->driver->err_handler; + err_handler->resume(dev); +out: + device_unlock(&dev->dev); + return 0; +} + +void ngbe_pcie_do_recovery(struct pci_dev *dev) +{ + pci_ers_result_t status = PCI_ERS_RESULT_CAN_RECOVER; + struct pci_bus *bus; + u32 reg32; + int pos; + int delay = 1; + u32 id; + u16 ctrl; + /* + * Error recovery runs on all subordinates of the first downstream port. + * If the downstream port detected the error, it is cleared at the end. + */ + if (!(pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT || + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM)) + dev = dev->bus->self; + bus = dev->subordinate; + + pci_walk_bus(bus, ngbe_report_frozen_detected, &status); + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); + if (pos) { + /* Disable Root's interrupt in response to error messages */ + pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, ®32); + reg32 &= ~NGBE_ROOT_PORT_INTR_ON_MESG_MASK; + pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32); + } + + pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl); + ctrl |= PCI_BRIDGE_CTL_BUS_RESET; + pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl); + + /* + * * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double + * * this to 2ms to ensure that we meet the minimum requirement. + * */ + + msleep(2); + ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; + pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl); + + /* + * * Trhfa for conventional PCI is 2^25 clock cycles. + * * Assuming a minimum 33MHz clock this results in a 1s + * * delay before we can consider subordinate devices to + * * be re-initialized. PCIe has some ways to shorten this, + * * but we don't make use of them yet. + * */ + ssleep(1); + + pci_read_config_dword(dev, PCI_COMMAND, &id); + while (id == ~0) { + if (delay > 60000) { + pci_warn(dev, "not ready %dms after %s; giving up\n", + delay - 1, "bus_reset"); + return; + } + + if (delay > 1000) + pci_info(dev, "not ready %dms after %s; waiting\n", + delay - 1, "bus_reset"); + + msleep(delay); + delay *= 2; + pci_read_config_dword(dev, PCI_COMMAND, &id); + } + + if (delay > 1000) + pci_info(dev, "ready %dms after %s\n", delay - 1, + "bus_reset"); + + pci_info(dev, "Root Port link has been reset\n"); + + if (pos) { + /* Clear Root Error Status */ + pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, ®32); + pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, reg32); + + /* Enable Root Port's interrupt in response to error messages */ + pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, ®32); + reg32 |= NGBE_ROOT_PORT_INTR_ON_MESG_MASK; + pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32); + } + + if (status == PCI_ERS_RESULT_CAN_RECOVER) { + status = PCI_ERS_RESULT_RECOVERED; + pci_dbg(dev, "broadcast mmio_enabled message\n"); + pci_walk_bus(bus, ngbe_report_mmio_enabled, &status); + } + + if (status == PCI_ERS_RESULT_NEED_RESET) { + /* + * TODO: Should call platform-specific + * functions to reset slot before calling + * drivers' slot_reset callbacks? + */ + status = PCI_ERS_RESULT_RECOVERED; + pci_dbg(dev, "broadcast slot_reset message\n"); + pci_walk_bus(bus, ngbe_report_slot_reset, &status); + } + + if (status != PCI_ERS_RESULT_RECOVERED) + goto failed; + + pci_dbg(dev, "broadcast resume message\n"); + pci_walk_bus(bus, ngbe_report_resume, &status); + +failed: + return; +} diff --git a/drivers/net/ethernet/netswift/ngbe/ngbe_pcierr.h b/drivers/net/ethernet/netswift/ngbe/ngbe_pcierr.h new file mode 100644 index 0000000000000000000000000000000000000000..f92def4d2166767543ff60db838066fe7b20528b --- /dev/null +++ b/drivers/net/ethernet/netswift/ngbe/ngbe_pcierr.h @@ -0,0 +1,23 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#ifndef _NGBE_PCIERR_H_ +#define _NGBE_PCIERR_H_ + +void ngbe_pcie_do_recovery(struct pci_dev *dev); +#endif diff --git a/drivers/net/ethernet/netswift/ngbe/ngbe_phy.c b/drivers/net/ethernet/netswift/ngbe/ngbe_phy.c new file mode 100644 index 0000000000000000000000000000000000000000..2f9013c291a114a27c6208da7258b7019e3f9fc2 --- /dev/null +++ b/drivers/net/ethernet/netswift/ngbe/ngbe_phy.c @@ -0,0 +1,1243 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include "ngbe_phy.h" + +/** + * ngbe_check_reset_blocked - check status of MNG FW veto bit + * @hw: pointer to the hardware structure + * + * This function checks the MMNGC.MNG_VETO bit to see if there are + * any constraints on link from manageability. For MAC's that don't + * have this bit just return faluse since the link can not be blocked + * via this method. + **/ +bool ngbe_check_reset_blocked(struct ngbe_hw *hw) +{ + u32 mmngc; + + DEBUGFUNC("ngbe_check_reset_blocked"); + + mmngc = rd32(hw, NGBE_MIS_ST); + if (mmngc & NGBE_MIS_ST_MNG_VETO) { + ERROR_REPORT1(NGBE_ERROR_SOFTWARE, + "MNG_VETO bit detected.\n"); + return true; + } + + return false; +} + +/* For internal phy only */ +s32 ngbe_phy_read_reg(struct ngbe_hw *hw, + u32 reg_offset, + u32 page, + u16 *phy_data) +{ + /* clear input */ + *phy_data = 0; + + wr32(hw, NGBE_PHY_CONFIG(NGBE_INTERNAL_PHY_PAGE_SELECT_OFFSET), + page); + + if (reg_offset >= NGBE_INTERNAL_PHY_OFFSET_MAX) { + ERROR_REPORT1(NGBE_ERROR_UNSUPPORTED, + "input reg offset %d exceed maximum 31.\n", reg_offset); + return NGBE_ERR_INVALID_ARGUMENT; + } + + *phy_data = 0xFFFF & rd32(hw, NGBE_PHY_CONFIG(reg_offset)); + + return NGBE_OK; +} + +/* For internal phy only */ +s32 ngbe_phy_write_reg(struct ngbe_hw *hw, + u32 reg_offset, + u32 page, + u16 phy_data) +{ + + wr32(hw, NGBE_PHY_CONFIG(NGBE_INTERNAL_PHY_PAGE_SELECT_OFFSET), + page); + + if (reg_offset >= NGBE_INTERNAL_PHY_OFFSET_MAX) { + ERROR_REPORT1(NGBE_ERROR_UNSUPPORTED, + "input reg offset %d exceed maximum 31.\n", reg_offset); + return NGBE_ERR_INVALID_ARGUMENT; + } + wr32(hw, NGBE_PHY_CONFIG(reg_offset), phy_data); + + return NGBE_OK; +} + +s32 ngbe_check_internal_phy_id(struct ngbe_hw *hw) +{ + u16 phy_id_high = 0; + u16 phy_id_low = 0; + u16 phy_id = 0; + + DEBUGFUNC("ngbe_check_internal_phy_id"); + + ngbe_phy_read_reg(hw, NGBE_MDI_PHY_ID1_OFFSET, 0, &phy_id_high); + phy_id = phy_id_high << 6; + ngbe_phy_read_reg(hw, NGBE_MDI_PHY_ID2_OFFSET, 0, &phy_id_low); + phy_id |= (phy_id_low & NGBE_MDI_PHY_ID_MASK) >> 10; + + if (NGBE_INTERNAL_PHY_ID != phy_id) { + ERROR_REPORT1(NGBE_ERROR_UNSUPPORTED, + "internal phy id 0x%x not supported.\n", phy_id); + return NGBE_ERR_DEVICE_NOT_SUPPORTED; + } else + hw->phy.id = (u32)phy_id; + + return NGBE_OK; +} + +/** + * ngbe_read_phy_mdi - Reads a value from a specified PHY register without + * the SWFW lock + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @phy_data: Pointer to read data from PHY register + **/ +s32 ngbe_phy_read_reg_mdi(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 *phy_data) +{ + u32 command; + s32 status = 0; + + /* setup and write the address cycle command */ + command = NGBE_MSCA_RA(reg_addr) | + NGBE_MSCA_PA(hw->phy.addr) | + NGBE_MSCA_DA(device_type); + wr32(hw, NGBE_MSCA, command); + + command = NGBE_MSCC_CMD(NGBE_MSCA_CMD_READ) | + NGBE_MSCC_BUSY | + NGBE_MDIO_CLK(6); + wr32(hw, NGBE_MSCC, command); + + /* wait to complete */ + status = po32m(hw, NGBE_MSCC, + NGBE_MSCC_BUSY, ~NGBE_MSCC_BUSY, + NGBE_MDIO_TIMEOUT, 10); + if (status != 0) { + ERROR_REPORT1(NGBE_ERROR_POLLING, + "PHY address command did not complete.\n"); + return NGBE_ERR_PHY; + } + + /* read data from MSCC */ + *phy_data = 0xFFFF & rd32(hw, NGBE_MSCC); + + return 0; +} + +/** + * ngbe_write_phy_reg_mdi - Writes a value to specified PHY register + * without SWFW lock + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 5 bit device type + * @phy_data: Data to write to the PHY register + **/ +s32 ngbe_phy_write_reg_mdi(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 phy_data) +{ + u32 command; + s32 status = 0; + + /* setup and write the address cycle command */ + command = NGBE_MSCA_RA(reg_addr) | + NGBE_MSCA_PA(hw->phy.addr) | + NGBE_MSCA_DA(device_type); + wr32(hw, NGBE_MSCA, command); + + command = phy_data | NGBE_MSCC_CMD(NGBE_MSCA_CMD_WRITE) | + NGBE_MSCC_BUSY | NGBE_MDIO_CLK(6); + wr32(hw, NGBE_MSCC, command); + + /* wait to complete */ + status = po32m(hw, NGBE_MSCC, + NGBE_MSCC_BUSY, ~NGBE_MSCC_BUSY, + NGBE_MDIO_TIMEOUT, 10); + if (status != 0) { + ERROR_REPORT1(NGBE_ERROR_POLLING, + "PHY address command did not complete.\n"); + return NGBE_ERR_PHY; + } + + return 0; +} + +s32 ngbe_phy_read_reg_ext_yt8521s(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 *phy_data) +{ + s32 status = 0; + status = ngbe_phy_write_reg_mdi(hw, 0x1e, device_type, reg_addr); + if (!status) + status = ngbe_phy_read_reg_mdi(hw, 0x1f, device_type, phy_data); + return status; +} + +s32 ngbe_phy_write_reg_ext_yt8521s(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 phy_data) +{ + s32 status = 0; + status = ngbe_phy_write_reg_mdi(hw, 0x1e, device_type, reg_addr); + if (!status) + status = ngbe_phy_write_reg_mdi(hw, 0x1f, device_type, phy_data); + return status; +} + +s32 ngbe_phy_read_reg_sds_ext_yt8521s(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 *phy_data) +{ + s32 status = 0; + status = ngbe_phy_write_reg_ext_yt8521s(hw, 0xa000, device_type, 0x02); + if (!status) + status = ngbe_phy_read_reg_ext_yt8521s(hw, reg_addr, device_type, phy_data); + ngbe_phy_write_reg_ext_yt8521s(hw, 0xa000, device_type, 0x00); + return status; +} + +s32 ngbe_phy_write_reg_sds_ext_yt8521s(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 phy_data) +{ + s32 status = 0; + status = ngbe_phy_write_reg_ext_yt8521s(hw, 0xa000, device_type, 0x02); + if (!status) + status = ngbe_phy_write_reg_ext_yt8521s(hw, reg_addr, device_type, phy_data); + ngbe_phy_write_reg_ext_yt8521s(hw, 0xa000, device_type, 0x00); + return status; +} + +s32 ngbe_phy_read_reg_sds_mii_yt8521s(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 *phy_data) +{ + s32 status = 0; + status = ngbe_phy_write_reg_ext_yt8521s(hw, 0xa000, device_type, 0x02); + if (!status) + status = ngbe_phy_read_reg_mdi(hw, reg_addr, device_type, phy_data); + ngbe_phy_write_reg_ext_yt8521s(hw, 0xa000, device_type, 0x00); + return status; +} + +s32 ngbe_phy_write_reg_sds_mii_yt8521s(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 phy_data) +{ + s32 status = 0; + status = ngbe_phy_write_reg_ext_yt8521s(hw, 0xa000, device_type, 0x02); + if (!status) + status = ngbe_phy_write_reg_mdi(hw, reg_addr, device_type, phy_data); + ngbe_phy_write_reg_ext_yt8521s(hw, 0xa000, device_type, 0x00); + return status; +} + +s32 ngbe_check_mdi_phy_id(struct ngbe_hw *hw) +{ + u16 phy_id_high = 0; + u16 phy_id_low = 0; + u32 phy_id = 0; + + DEBUGFUNC("ngbe_check_mdi_phy_id"); + + if (hw->phy.type == ngbe_phy_m88e1512) { + /* select page 0 */ + ngbe_phy_write_reg_mdi(hw, 22, 0, 0); + } else { + /* select page 1 */ + ngbe_phy_write_reg_mdi(hw, 22, 0, 1); + } + + ngbe_phy_read_reg_mdi(hw, NGBE_MDI_PHY_ID1_OFFSET, 0, &phy_id_high); + phy_id = phy_id_high << 6; + ngbe_phy_read_reg_mdi(hw, NGBE_MDI_PHY_ID2_OFFSET, 0, &phy_id_low); + phy_id |= (phy_id_low & NGBE_MDI_PHY_ID_MASK) >> 10; + + if (NGBE_M88E1512_PHY_ID != phy_id) { + ERROR_REPORT1(NGBE_ERROR_UNSUPPORTED, + "MDI phy id 0x%x not supported.\n", phy_id); + return NGBE_ERR_DEVICE_NOT_SUPPORTED; + } else + hw->phy.id = phy_id; + + return NGBE_OK; +} + +bool ngbe_validate_phy_addr(struct ngbe_hw *hw, u32 phy_addr) +{ + u16 phy_id = 0; + bool valid = false; + + DEBUGFUNC("ngbe_validate_phy_addr"); + + hw->phy.addr = phy_addr; + + ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x3, 0, &phy_id); + if (phy_id != 0xFFFF && phy_id != 0x0) + valid = true; + + return valid; +} + +s32 ngbe_check_yt_phy_id(struct ngbe_hw *hw) +{ + u16 phy_id = 0; + bool valid = false; + u32 phy_addr; + DEBUGFUNC("ngbe_check_yt_phy_id"); + + for (phy_addr = 0; phy_addr < 32; phy_addr++) { + valid = ngbe_validate_phy_addr(hw, phy_addr); + if (valid) { + hw->phy.addr = phy_addr; + printk("valid phy addr is 0x%x\n", phy_addr); + break; + } + } + if (!valid) { + printk("cannnot find valid phy address.\n"); + return NGBE_ERR_DEVICE_NOT_SUPPORTED; + } + ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x3, 0, &phy_id); + if (NGBE_YT8521S_PHY_ID != phy_id) { + ERROR_REPORT1(NGBE_ERROR_UNSUPPORTED, + "MDI phy id 0x%x not supported.\n", phy_id); + printk("phy id is 0x%x\n", phy_id); + return NGBE_ERR_DEVICE_NOT_SUPPORTED; + } else + hw->phy.id = phy_id; + return NGBE_OK; +} + +s32 ngbe_check_zte_phy_id(struct ngbe_hw *hw) +{ + u16 phy_id_high = 0; + u16 phy_id_low = 0; + u16 phy_id = 0; + + DEBUGFUNC("ngbe_check_zte_phy_id"); + + ngbe_phy_read_reg_mdi(hw, NGBE_MDI_PHY_ID1_OFFSET, 0, &phy_id_high); + phy_id = phy_id_high << 6; + ngbe_phy_read_reg_mdi(hw, NGBE_MDI_PHY_ID2_OFFSET, 0, &phy_id_low); + phy_id |= (phy_id_low & NGBE_MDI_PHY_ID_MASK) >> 10; + + if (NGBE_INTERNAL_PHY_ID != phy_id) { + ERROR_REPORT1(NGBE_ERROR_UNSUPPORTED, + "MDI phy id 0x%x not supported.\n", phy_id); + return NGBE_ERR_DEVICE_NOT_SUPPORTED; + } else + hw->phy.id = (u32)phy_id; + + return NGBE_OK; +} + +/** + * ngbe_init_phy_ops - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during init_shared_code because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + * +**/ +s32 ngbe_phy_init(struct ngbe_hw *hw) +{ + s32 ret_val = 0; + u16 value = 0; + int i; + + DEBUGFUNC("\n"); + + /* set fwsw semaphore mask for phy first */ + if (!hw->phy.phy_semaphore_mask) { + hw->phy.phy_semaphore_mask = NGBE_MNG_SWFW_SYNC_SW_PHY; + } + + /* init phy.addr according to HW design */ + + hw->phy.addr = 0; + + /* Identify the PHY or SFP module */ + ret_val = TCALL(hw, phy.ops.identify); + if (ret_val == NGBE_ERR_SFP_NOT_SUPPORTED) + return ret_val; + + /* enable interrupts, only link status change and an done is allowed */ + if (hw->phy.type == ngbe_phy_internal) { + value = NGBE_INTPHY_INT_LSC | NGBE_INTPHY_INT_ANC; + TCALL(hw, phy.ops.write_reg, 0x12, 0xa42, value); + } else if (hw->phy.type == ngbe_phy_m88e1512 || + hw->phy.type == ngbe_phy_m88e1512_sfi) { + TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 2); + TCALL(hw, phy.ops.read_reg_mdi, 21, 0, &value); + value &= ~NGBE_M88E1512_RGM_TTC; + value |= NGBE_M88E1512_RGM_RTC; + TCALL(hw, phy.ops.write_reg_mdi, 21, 0, value); + if (hw->phy.type == ngbe_phy_m88e1512) + TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 0); + else + TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 1); + + TCALL(hw, phy.ops.write_reg_mdi, 0, 0, NGBE_MDI_PHY_RESET); + for (i = 0; i < 15; i++) { + TCALL(hw, phy.ops.read_reg_mdi, 0, 0, &value); + if (value & NGBE_MDI_PHY_RESET) + msleep(1); + else + break; + } + + if (i == 15) { + ERROR_REPORT1(NGBE_ERROR_POLLING, + "phy reset exceeds maximum waiting period.\n"); + return NGBE_ERR_PHY_TIMEOUT; + } + + ret_val = TCALL(hw, phy.ops.reset); + if (ret_val) { + return ret_val; + } + + /* set LED2 to interrupt output and INTn active low */ + TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 3); + TCALL(hw, phy.ops.read_reg_mdi, 18, 0, &value); + value |= NGBE_M88E1512_INT_EN; + value &= ~(NGBE_M88E1512_INT_POL); + TCALL(hw, phy.ops.write_reg_mdi, 18, 0, value); + + if (hw->phy.type == ngbe_phy_m88e1512_sfi) { + TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 1); + TCALL(hw, phy.ops.read_reg_mdi, 16, 0, &value); + value &= ~0x4; + TCALL(hw, phy.ops.write_reg_mdi, 16, 0, value); + } + + /* enable link status change and AN complete interrupts */ + value = NGBE_M88E1512_INT_ANC | NGBE_M88E1512_INT_LSC; + if (hw->phy.type == ngbe_phy_m88e1512) + TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 0); + else + TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 1); + TCALL(hw, phy.ops.write_reg_mdi, 18, 0, value); + + /* LED control */ + TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 3); + TCALL(hw, phy.ops.read_reg_mdi, 16, 0, &value); + value &= ~0x00FF; + value |= (NGBE_M88E1512_LED1_CONF << 4) | NGBE_M88E1512_LED0_CONF; + TCALL(hw, phy.ops.write_reg_mdi, 16, 0, value); + TCALL(hw, phy.ops.read_reg_mdi, 17, 0, &value); + value &= ~0x000F; + + TCALL(hw, phy.ops.write_reg_mdi, 17, 0, value); + } else if (hw->phy.type == ngbe_phy_yt8521s_sfi) { + + /*enable yt8521s interrupt*/ + #if 1 + /* select sds area register */ + ngbe_phy_write_reg_ext_yt8521s(hw, 0xa000, 0, 0x00); + + /* enable interrupt */ + value = 0x000C; + TCALL(hw, phy.ops.write_reg_mdi, 0x12, 0, value); + #endif + + /* select fiber_to_rgmii first */ + ngbe_phy_read_reg_ext_yt8521s(hw, 0xa006, 0, &value); + value &= ~0x100; + ngbe_phy_write_reg_ext_yt8521s(hw, 0xa006, 0, value); + + ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x0, 0, &value); + value |= 0x800; + ngbe_phy_write_reg_sds_mii_yt8521s(hw, 0x0, 0, value); + } + + return ret_val; +} + +/** + * ngbe_identify_module - Identifies module type + * @hw: pointer to hardware structure + * + * Determines HW type and calls appropriate function. + **/ +s32 ngbe_phy_identify(struct ngbe_hw *hw) +{ + s32 status = 0; + + DEBUGFUNC("ngbe_phy_identify"); + + switch (hw->phy.type) { + case ngbe_phy_internal: + status = ngbe_check_internal_phy_id(hw); + break; + case ngbe_phy_m88e1512: + case ngbe_phy_m88e1512_sfi: + status = ngbe_check_mdi_phy_id(hw); + break; + case ngbe_phy_zte: + status = ngbe_check_zte_phy_id(hw); + break; + case ngbe_phy_yt8521s_sfi: + status = ngbe_check_yt_phy_id(hw); + break; + default: + status = NGBE_ERR_PHY_TYPE; + } + + return status; +} + +s32 ngbe_phy_reset(struct ngbe_hw *hw) +{ + s32 status = 0; + + u16 value = 0; + int i; + + DEBUGFUNC("ngbe_phy_reset"); + + /* only support internal phy */ + if (hw->phy.type != ngbe_phy_internal) + return NGBE_ERR_PHY_TYPE; + + /* Don't reset PHY if it's shut down due to overtemp. */ + if (!hw->phy.reset_if_overtemp && + (NGBE_ERR_OVERTEMP == TCALL(hw, phy.ops.check_overtemp))) { + ERROR_REPORT1(NGBE_ERROR_CAUTION, + "OVERTEMP! Skip PHY reset.\n"); + return NGBE_ERR_OVERTEMP; + } + + /* Blocked by MNG FW so bail */ + if (ngbe_check_reset_blocked(hw)) + return status; + + value |= NGBE_MDI_PHY_RESET; + status = TCALL(hw, phy.ops.write_reg, 0, 0, value); + for (i = 0; i < NGBE_PHY_RST_WAIT_PERIOD; i++) { + status = TCALL(hw, phy.ops.read_reg, 0, 0, &value); + if (!(value & NGBE_MDI_PHY_RESET)) + break; + msleep(1); + } + + if (i == NGBE_PHY_RST_WAIT_PERIOD) { + ERROR_REPORT1(NGBE_ERROR_POLLING, + "PHY MODE RESET did not complete.\n"); + return NGBE_ERR_RESET_FAILED; + } + + return status; +} + +u32 ngbe_phy_setup_link(struct ngbe_hw *hw, + u32 speed, + bool need_restart_AN) +{ + u16 value = 0; + + DEBUGFUNC("ngbe_phy_setup_link"); + + /* disable 10/100M Half Duplex */ + TCALL(hw, phy.ops.read_reg, 4, 0, &value); + value &= 0xFF5F; + TCALL(hw, phy.ops.write_reg, 4, 0, value); + + /* set advertise enable according to input speed */ + if (!(speed & NGBE_LINK_SPEED_1GB_FULL)) { + TCALL(hw, phy.ops.read_reg, 9, 0, &value); + value &= 0xFDFF; + TCALL(hw, phy.ops.write_reg, 9, 0, value); + } else { + TCALL(hw, phy.ops.read_reg, 9, 0, &value); + value |= 0x200; + TCALL(hw, phy.ops.write_reg, 9, 0, value); + } + + if (!(speed & NGBE_LINK_SPEED_100_FULL)) { + TCALL(hw, phy.ops.read_reg, 4, 0, &value); + value &= 0xFEFF; + TCALL(hw, phy.ops.write_reg, 4, 0, value); + } else { + TCALL(hw, phy.ops.read_reg, 4, 0, &value); + value |= 0x100; + TCALL(hw, phy.ops.write_reg, 4, 0, value); + } + + if (!(speed & NGBE_LINK_SPEED_10_FULL)) { + TCALL(hw, phy.ops.read_reg, 4, 0, &value); + value &= 0xFFBF; + TCALL(hw, phy.ops.write_reg, 4, 0, value); + } else { + TCALL(hw, phy.ops.read_reg, 4, 0, &value); + value |= 0x40; + TCALL(hw, phy.ops.write_reg, 4, 0, value); + } + + /* restart AN and wait AN done interrupt */ + if (((hw->subsystem_device_id & NCSI_SUP_MASK) == NCSI_SUP) || + ((hw->subsystem_device_id & OEM_MASK) == OCP_CARD)) { + if (need_restart_AN) + value = NGBE_MDI_PHY_RESTART_AN | NGBE_MDI_PHY_ANE; + else + value = NGBE_MDI_PHY_ANE; + } else { + value = NGBE_MDI_PHY_RESTART_AN | NGBE_MDI_PHY_ANE; + } + TCALL(hw, phy.ops.write_reg, 0, 0, value); + + value = 0x205B; + TCALL(hw, phy.ops.write_reg, 16, 0xd04, value); + TCALL(hw, phy.ops.write_reg, 17, 0xd04, 0); + + TCALL(hw, phy.ops.read_reg, 18, 0xd04, &value); + + value = value & 0xFF8C; + /*act led blinking mode set to 60ms*/ + value |= 0x2; + TCALL(hw, phy.ops.write_reg, 18, 0xd04, value); + + TCALL(hw, phy.ops.check_event); + + return NGBE_OK; +} + +s32 ngbe_phy_reset_m88e1512(struct ngbe_hw *hw) +{ + s32 status = 0; + + u16 value = 0; + int i; + + DEBUGFUNC("ngbe_phy_reset_m88e1512"); + + if (hw->phy.type != ngbe_phy_m88e1512 && + hw->phy.type != ngbe_phy_m88e1512_sfi) + return NGBE_ERR_PHY_TYPE; + + /* Don't reset PHY if it's shut down due to overtemp. */ + if (!hw->phy.reset_if_overtemp && + (NGBE_ERR_OVERTEMP == TCALL(hw, phy.ops.check_overtemp))) { + ERROR_REPORT1(NGBE_ERROR_CAUTION, + "OVERTEMP! Skip PHY reset.\n"); + return NGBE_ERR_OVERTEMP; + } + + /* Blocked by MNG FW so bail */ + if (ngbe_check_reset_blocked(hw)) + return status; + + /* select page 18 reg 20 */ + status = TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 18); + + if (hw->phy.type == ngbe_phy_m88e1512) + /* mode select to RGMII-to-copper */ + value = 0; + else + /* mode select to RGMII-to-sfi */ + value = 2; + status = TCALL(hw, phy.ops.write_reg_mdi, 20, 0, value); + /* mode reset */ + value |= NGBE_MDI_PHY_RESET; + status = TCALL(hw, phy.ops.write_reg_mdi, 20, 0, value); + + for (i = 0; i < NGBE_PHY_RST_WAIT_PERIOD; i++) { + status = TCALL(hw, phy.ops.read_reg_mdi, 20, 0, &value); + if (!(value & NGBE_MDI_PHY_RESET)) + break; + msleep(1); + } + + if (i == NGBE_PHY_RST_WAIT_PERIOD) { + ERROR_REPORT1(NGBE_ERROR_POLLING, + "M88E1512 MODE RESET did not complete.\n"); + return NGBE_ERR_RESET_FAILED; + } + + return status; +} + +s32 ngbe_phy_reset_yt8521s(struct ngbe_hw *hw) +{ + s32 status = 0; + + u16 value = 0; + int i; + + DEBUGFUNC("ngbe_phy_reset_yt8521s"); + + if (hw->phy.type != ngbe_phy_yt8521s_sfi) + return NGBE_ERR_PHY_TYPE; + + /* Don't reset PHY if it's shut down due to overtemp. */ + if (!hw->phy.reset_if_overtemp && + (NGBE_ERR_OVERTEMP == TCALL(hw, phy.ops.check_overtemp))) { + ERROR_REPORT1(NGBE_ERROR_CAUTION, + "OVERTEMP! Skip PHY reset.\n"); + return NGBE_ERR_OVERTEMP; + } + + /* Blocked by MNG FW so bail */ + if (ngbe_check_reset_blocked(hw)) + return status; + + status = ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0, 0, &value); + /* sds software reset */ + value |= 0x8000; + status = ngbe_phy_write_reg_sds_mii_yt8521s(hw, 0, 0, value); + + for (i = 0; i < NGBE_PHY_RST_WAIT_PERIOD; i++) { + status = ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0, 0, &value); + if (!(value & 0x8000)) + break; + msleep(1); + } + + if (i == NGBE_PHY_RST_WAIT_PERIOD) { + ERROR_REPORT1(NGBE_ERROR_POLLING, + "YT8521S Software RESET did not complete.\n"); + return NGBE_ERR_RESET_FAILED; + } + + return status; +} + +u32 ngbe_phy_setup_link_m88e1512(struct ngbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete) +{ + u16 value_r4 = 0; + u16 value_r9 = 0; + u16 value; + + DEBUGFUNC("\n"); + UNREFERENCED_PARAMETER(autoneg_wait_to_complete); + + hw->phy.autoneg_advertised = 0; + if (hw->phy.type == ngbe_phy_m88e1512) { + if (speed & NGBE_LINK_SPEED_1GB_FULL) { + value_r9 |= NGBE_M88E1512_1000BASET_FULL; + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_1GB_FULL; + } + + if (speed & NGBE_LINK_SPEED_100_FULL) { + value_r4 |= NGBE_M88E1512_100BASET_FULL; + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_100_FULL; + } + + if (speed & NGBE_LINK_SPEED_10_FULL) { + value_r4 |= NGBE_M88E1512_10BASET_FULL; + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_10_FULL; + } + + TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 0); + TCALL(hw, phy.ops.read_reg_mdi, 4, 0, &value); + value &= ~(NGBE_M88E1512_100BASET_FULL | + NGBE_M88E1512_100BASET_HALF | + NGBE_M88E1512_10BASET_FULL | + NGBE_M88E1512_10BASET_HALF); + value_r4 |= value; + TCALL(hw, phy.ops.write_reg_mdi, 4, 0, value_r4); + + TCALL(hw, phy.ops.read_reg_mdi, 9, 0, &value); + value &= ~(NGBE_M88E1512_1000BASET_FULL | + NGBE_M88E1512_1000BASET_HALF); + value_r9 |= value; + TCALL(hw, phy.ops.write_reg_mdi, 9, 0, value_r9); + + value = NGBE_MDI_PHY_RESTART_AN | NGBE_MDI_PHY_ANE; + TCALL(hw, phy.ops.write_reg_mdi, 0, 0, value); + } else { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_1GB_FULL; + TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 1); + TCALL(hw, phy.ops.read_reg_mdi, 4, 0, &value); + value &= ~0x60; + value |= 0x20; + TCALL(hw, phy.ops.write_reg_mdi, 4, 0, value); + + value = NGBE_MDI_PHY_RESTART_AN | NGBE_MDI_PHY_ANE; + TCALL(hw, phy.ops.write_reg_mdi, 0, 0, value); + } + + TCALL(hw, phy.ops.check_event); + + return NGBE_OK; +} + +u32 ngbe_phy_setup_link_yt8521s(struct ngbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete) +{ + s32 ret_val = 0; + u16 value; + u16 value_r4 = 0; + u16 value_r9 = 0; + + DEBUGFUNC("\n"); + UNREFERENCED_PARAMETER(autoneg_wait_to_complete); + UNREFERENCED_PARAMETER(speed); + + hw->phy.autoneg_advertised = 0; + + if (hw->phy.type == ngbe_phy_yt8521s) { + value_r4 = 0x140; + value_r9 = 0x200; + /*disable 100/10base-T Self-negotiation ability*/ + ngbe_phy_read_reg_mdi(hw, 0x4, 0, &value); + value &= ~value_r4; + ngbe_phy_write_reg_mdi(hw, 0x4, 0, value); + + /*disable 1000base-T Self-negotiation ability*/ + ngbe_phy_read_reg_mdi(hw, 0x9, 0, &value); + value &= ~value_r9; + ngbe_phy_write_reg_mdi(hw, 0x9, 0, value); + + value_r4 = 0x0; + value_r9 = 0x0; + + if (speed & NGBE_LINK_SPEED_1GB_FULL) { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_1GB_FULL; + value_r9 |= 0x200; + } + if (speed & NGBE_LINK_SPEED_100_FULL) { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_100_FULL; + value_r4 |= 0x100; + } + if (speed & NGBE_LINK_SPEED_10_FULL) { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_10_FULL; + value_r4 |= 0x40; + } + + /* enable 1000base-T Self-negotiation ability */ + ngbe_phy_read_reg_mdi(hw, 0x9, 0, &value); + value |= value_r9; + ngbe_phy_write_reg_mdi(hw, 0x9, 0, value); + + /* enable 100/10base-T Self-negotiation ability */ + ngbe_phy_read_reg_mdi(hw, 0x4, 0, &value); + value |= value_r4; + ngbe_phy_write_reg_mdi(hw, 0x4, 0, value); + + /* software reset to make the above configuration take effect*/ + ngbe_phy_read_reg_mdi(hw, 0x0, 0, &value); + value |= 0x8000; + ngbe_phy_write_reg_mdi(hw, 0x0, 0, value); + } else { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_1GB_FULL; + + /* RGMII_Config1 : Config rx and tx training delay */ + ngbe_phy_write_reg_ext_yt8521s(hw, 0xA003, 0, 0x3cf1); + ngbe_phy_write_reg_ext_yt8521s(hw, 0xA001, 0, 0x8041); + + /* software reset */ + ngbe_phy_write_reg_sds_ext_yt8521s(hw, 0x0, 0, 0x9140); + + /* power on phy */ + ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x0, 0, &value); + value &= ~0x800; + ngbe_phy_write_reg_sds_mii_yt8521s(hw, 0x0, 0, value); + } + + TCALL(hw, phy.ops.check_event); + + return ret_val; +} + +s32 ngbe_phy_reset_zte(struct ngbe_hw *hw) +{ + s32 status = 0; + u16 value = 0; + int i; + + DEBUGFUNC("ngbe_phy_reset_zte"); + + if (hw->phy.type != ngbe_phy_zte) + return NGBE_ERR_PHY_TYPE; + + /* Don't reset PHY if it's shut down due to overtemp. */ + if (!hw->phy.reset_if_overtemp && + (NGBE_ERR_OVERTEMP == TCALL(hw, phy.ops.check_overtemp))) { + ERROR_REPORT1(NGBE_ERROR_CAUTION, + "OVERTEMP! Skip PHY reset.\n"); + return NGBE_ERR_OVERTEMP; + } + + /* Blocked by MNG FW so bail */ + if (ngbe_check_reset_blocked(hw)) + return status; + + /* zte phy */ + /* set control register[0x0] to reset mode */ + value = 1; + /* mode reset */ + value |= NGBE_MDI_PHY_RESET; + status = TCALL(hw, phy.ops.write_reg_mdi, 0, 0, value); + + for (i = 0; i < NGBE_PHY_RST_WAIT_PERIOD; i++) { + status = TCALL(hw, phy.ops.read_reg_mdi, 0, 0, &value); + if (!(value & NGBE_MDI_PHY_RESET)) + break; + msleep(1); + } + + if (i == NGBE_PHY_RST_WAIT_PERIOD) { + ERROR_REPORT1(NGBE_ERROR_POLLING, + "ZTE MODE RESET did not complete.\n"); + return NGBE_ERR_RESET_FAILED; + } + + return status; +} + +u32 ngbe_phy_setup_link_zte(struct ngbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete) +{ + u16 ngbe_phy_ccr = 0; + + DEBUGFUNC("\n"); + UNREFERENCED_PARAMETER(autoneg_wait_to_complete); + /* + * Clear autoneg_advertised and set new values based on input link + * speed. + */ + hw->phy.autoneg_advertised = 0; + TCALL(hw, phy.ops.read_reg_mdi, 0, 0, &ngbe_phy_ccr); + + if (speed & NGBE_LINK_SPEED_1GB_FULL) { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_1GB_FULL; + ngbe_phy_ccr |= NGBE_MDI_PHY_SPEED_SELECT1;/*bit 6*/ + } else if (speed & NGBE_LINK_SPEED_100_FULL) { + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_100_FULL; + ngbe_phy_ccr |= NGBE_MDI_PHY_SPEED_SELECT0;/*bit 13*/ + } else if (speed & NGBE_LINK_SPEED_10_FULL) + hw->phy.autoneg_advertised |= NGBE_LINK_SPEED_10_FULL; + else + return NGBE_LINK_SPEED_UNKNOWN; + + ngbe_phy_ccr |= NGBE_MDI_PHY_DUPLEX;/*restart autonegotiation*/ + TCALL(hw, phy.ops.write_reg_mdi, 0, 0, ngbe_phy_ccr); + + return speed; +} + +/** + * ngbe_tn_check_overtemp - Checks if an overtemp occurred. + * @hw: pointer to hardware structure + * + * Checks if the LASI temp alarm status was triggered due to overtemp + **/ +s32 ngbe_phy_check_overtemp(struct ngbe_hw *hw) +{ + s32 status = 0; + u32 ts_state; + + DEBUGFUNC("ngbe_phy_check_overtemp"); + + /* Check that the LASI temp alarm status was triggered */ + ts_state = rd32(hw, NGBE_TS_ALARM_ST); + + if (ts_state & NGBE_TS_ALARM_ST_DALARM) + status = NGBE_ERR_UNDERTEMP; + else if (ts_state & NGBE_TS_ALARM_ST_ALARM) + status = NGBE_ERR_OVERTEMP; + + return status; +} + +s32 ngbe_phy_check_event(struct ngbe_hw *hw) +{ + u16 value = 0; + struct ngbe_adapter *adapter = hw->back; + + TCALL(hw, phy.ops.read_reg, 0x1d, 0xa43, &value); + adapter->flags |= NGBE_FLAG_NEED_LINK_UPDATE; + if (value & 0x10) { + adapter->flags |= NGBE_FLAG_NEED_LINK_UPDATE; + } else if (value & 0x08) { + adapter->flags |= NGBE_FLAG_NEED_ANC_CHECK; + } + + return NGBE_OK; +} + +s32 ngbe_phy_check_event_m88e1512(struct ngbe_hw *hw) +{ + u16 value = 0; + struct ngbe_adapter *adapter = hw->back; + + if (hw->phy.type == ngbe_phy_m88e1512) + TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 0); + else + TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 1); + TCALL(hw, phy.ops.read_reg_mdi, 19, 0, &value); + + if (value & NGBE_M88E1512_LSC) { + adapter->flags |= NGBE_FLAG_NEED_LINK_UPDATE; + } + + if (value & NGBE_M88E1512_ANC) { + adapter->flags |= NGBE_FLAG_NEED_ANC_CHECK; + } + + return NGBE_OK; +} + +s32 ngbe_phy_check_event_yt8521s(struct ngbe_hw *hw) +{ + u16 value = 0; + struct ngbe_adapter *adapter = hw->back; + + ngbe_phy_write_reg_ext_yt8521s(hw, 0xa000, 0, 0x0); + TCALL(hw, phy.ops.read_reg_mdi, 0x13, 0, &value); + + if (value & (NGBE_YT8521S_SDS_LINK_UP | NGBE_YT8521S_SDS_LINK_DOWN)) { + adapter->flags |= NGBE_FLAG_NEED_LINK_UPDATE; + } + + return NGBE_OK; +} + +s32 ngbe_phy_get_advertised_pause(struct ngbe_hw *hw, u8 *pause_bit) +{ + u16 value; + s32 status = 0; + + status = TCALL(hw, phy.ops.read_reg, 4, 0, &value); + *pause_bit = (u8)((value >> 10) & 0x3); + return status; +} + +s32 ngbe_phy_get_advertised_pause_m88e1512(struct ngbe_hw *hw, u8 *pause_bit) +{ + u16 value; + s32 status = 0; + + if (hw->phy.type == ngbe_phy_m88e1512) { + status = TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 0); + status = TCALL(hw, phy.ops.read_reg_mdi, 4, 0, &value); + *pause_bit = (u8)((value >> 10) & 0x3); + } else { + status = TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 1); + status = TCALL(hw, phy.ops.read_reg_mdi, 4, 0, &value); + *pause_bit = (u8)((value >> 7) & 0x3); + } + return status; +} + +s32 ngbe_phy_get_advertised_pause_yt8521s(struct ngbe_hw *hw, u8 *pause_bit) +{ + u16 value; + s32 status = 0; + + status = ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x04, 0, &value); + *pause_bit = (u8)((value >> 7) & 0x3); + return status; +} + +s32 ngbe_phy_get_lp_advertised_pause(struct ngbe_hw *hw, u8 *pause_bit) +{ + u16 value; + s32 status = 0; + + status = TCALL(hw, phy.ops.read_reg, 0x1d, 0xa43, &value); + + status = TCALL(hw, phy.ops.read_reg, 0x1, 0, &value); + value = (value >> 5) & 0x1; + + /* if AN complete then check lp adv pause */ + status = TCALL(hw, phy.ops.read_reg, 5, 0, &value); + *pause_bit = (u8)((value >> 10) & 0x3); + return status; +} + +s32 ngbe_phy_get_lp_advertised_pause_m88e1512(struct ngbe_hw *hw, + u8 *pause_bit) +{ + u16 value; + s32 status = 0; + + if (hw->phy.type == ngbe_phy_m88e1512) { + status = TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 0); + status = TCALL(hw, phy.ops.read_reg_mdi, 5, 0, &value); + *pause_bit = (u8)((value >> 10) & 0x3); + } else { + status = TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 1); + status = TCALL(hw, phy.ops.read_reg_mdi, 5, 0, &value); + *pause_bit = (u8)((value >> 7) & 0x3); + } + return status; +} + +s32 ngbe_phy_get_lp_advertised_pause_yt8521s(struct ngbe_hw *hw, + u8 *pause_bit) +{ + u16 value; + s32 status = 0; + + status = ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x05, 0, &value); + *pause_bit = (u8)((value >> 7) & 0x3); + return status; +} + +s32 ngbe_phy_set_pause_advertisement(struct ngbe_hw *hw, u16 pause_bit) +{ + u16 value; + s32 status = 0; + + status = TCALL(hw, phy.ops.read_reg, 4, 0, &value); + value &= ~0xC00; + value |= pause_bit; + status = TCALL(hw, phy.ops.write_reg, 4, 0, value); + return status; +} + +s32 ngbe_phy_set_pause_advertisement_m88e1512(struct ngbe_hw *hw, + u16 pause_bit) +{ + u16 value; + s32 status = 0; + if (hw->phy.type == ngbe_phy_m88e1512) { + status = TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 0); + status = TCALL(hw, phy.ops.read_reg_mdi, 4, 0, &value); + value &= ~0xC00; + value |= pause_bit; + status = TCALL(hw, phy.ops.write_reg_mdi, 4, 0, value); + } else { + status = TCALL(hw, phy.ops.write_reg_mdi, 22, 0, 1); + status = TCALL(hw, phy.ops.read_reg_mdi, 4, 0, &value); + value &= ~0x180; + value |= pause_bit; + status = TCALL(hw, phy.ops.write_reg_mdi, 4, 0, value); + } + + return status; +} + +s32 ngbe_phy_set_pause_advertisement_yt8521s(struct ngbe_hw *hw, + u16 pause_bit) +{ + u16 value; + s32 status = 0; + + status = ngbe_phy_read_reg_sds_mii_yt8521s(hw, 0x04, 0, &value); + value &= ~0x180; + value |= pause_bit; + status = ngbe_phy_write_reg_sds_mii_yt8521s(hw, 0x04, 0, value); + + return status; +} + +s32 ngbe_phy_setup(struct ngbe_hw *hw) +{ + int i; + u16 value = 0; + + for (i = 0; i < 15; i++) { + if (!rd32m(hw, NGBE_MIS_ST, NGBE_MIS_ST_GPHY_IN_RST(hw->bus.lan_id))) { + break; + } + msleep(1); + } + + if (i == 15) { + ERROR_REPORT1(NGBE_ERROR_POLLING, + "GPhy reset exceeds maximum times.\n"); + return NGBE_ERR_PHY_TIMEOUT; + } + + for (i = 0; i < 1000; i++) { + TCALL(hw, phy.ops.read_reg, 29, 0xa43, &value); + if (value & 0x20) + break; + } + + TCALL(hw, phy.ops.write_reg, 20, 0xa46, 1); + for (i = 0; i < 1000; i++) { + TCALL(hw, phy.ops.read_reg, 29, 0xa43, &value); + if (value & 0x20) + break; + } + if (i == 1000) { + return NGBE_ERR_PHY_TIMEOUT; + } + + TCALL(hw, phy.ops.write_reg, 20, 0xa46, 2); + for (i = 0; i < 1000; i++) { + TCALL(hw, phy.ops.read_reg, 29, 0xa43, &value); + if (value & 0x20) + break; + } + + if (i == 1000) { + return NGBE_ERR_PHY_TIMEOUT; + } + + for (i = 0; i < 1000; i++) { + TCALL(hw, phy.ops.read_reg, 16, 0xa42, &value); + if ((value & 0x7) == 3) + break; + } + + if (i == 1000) { + return NGBE_ERR_PHY_TIMEOUT; + } + + return NGBE_OK; +} + +s32 ngbe_init_phy_ops_common(struct ngbe_hw *hw) +{ + struct ngbe_phy_info *phy = &hw->phy; + + phy->ops.reset = ngbe_phy_reset; + phy->ops.read_reg = ngbe_phy_read_reg; + phy->ops.write_reg = ngbe_phy_write_reg; + phy->ops.setup_link = ngbe_phy_setup_link; + phy->ops.check_overtemp = ngbe_phy_check_overtemp; + phy->ops.identify = ngbe_phy_identify; + phy->ops.init = ngbe_phy_init; + phy->ops.check_event = ngbe_phy_check_event; + phy->ops.get_adv_pause = ngbe_phy_get_advertised_pause; + phy->ops.get_lp_adv_pause = ngbe_phy_get_lp_advertised_pause; + phy->ops.set_adv_pause = ngbe_phy_set_pause_advertisement; + phy->ops.setup_once = ngbe_phy_setup; + + return NGBE_OK; +} diff --git a/drivers/net/ethernet/netswift/ngbe/ngbe_phy.h b/drivers/net/ethernet/netswift/ngbe/ngbe_phy.h new file mode 100644 index 0000000000000000000000000000000000000000..c6568018b20c73219202cdea4d315f85be4ab218 --- /dev/null +++ b/drivers/net/ethernet/netswift/ngbe/ngbe_phy.h @@ -0,0 +1,201 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + */ + +#ifndef _NGBE_PHY_H_ +#define _NGBE_PHY_H_ + +#include "ngbe_type.h" +#include "ngbe.h" + +/* EEPROM byte offsets */ +#define NGBE_SFF_IDENTIFIER 0x0 +#define NGBE_SFF_IDENTIFIER_SFP 0x3 +#define NGBE_SFF_VENDOR_OUI_BYTE0 0x25 +#define NGBE_SFF_VENDOR_OUI_BYTE1 0x26 +#define NGBE_SFF_VENDOR_OUI_BYTE2 0x27 +#define NGBE_SFF_1GBE_COMP_CODES 0x6 +#define NGBE_SFF_10GBE_COMP_CODES 0x3 +#define NGBE_SFF_CABLE_TECHNOLOGY 0x8 +#define NGBE_SFF_CABLE_SPEC_COMP 0x3C +#define NGBE_SFF_SFF_8472_SWAP 0x5C +#define NGBE_SFF_SFF_8472_COMP 0x5E +#define NGBE_SFF_SFF_8472_OSCB 0x6E +#define NGBE_SFF_SFF_8472_ESCB 0x76 +#define NGBE_SFF_IDENTIFIER_QSFP_PLUS 0xD +#define NGBE_SFF_QSFP_VENDOR_OUI_BYTE0 0xA5 +#define NGBE_SFF_QSFP_VENDOR_OUI_BYTE1 0xA6 +#define NGBE_SFF_QSFP_VENDOR_OUI_BYTE2 0xA7 +#define NGBE_SFF_QSFP_CONNECTOR 0x82 +#define NGBE_SFF_QSFP_10GBE_COMP 0x83 +#define NGBE_SFF_QSFP_1GBE_COMP 0x86 +#define NGBE_SFF_QSFP_CABLE_LENGTH 0x92 +#define NGBE_SFF_QSFP_DEVICE_TECH 0x93 + +/* Bitmasks */ +#define NGBE_SFF_DA_PASSIVE_CABLE 0x4 +#define NGBE_SFF_DA_ACTIVE_CABLE 0x8 +#define NGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4 +#define NGBE_SFF_1GBASESX_CAPABLE 0x1 +#define NGBE_SFF_1GBASELX_CAPABLE 0x2 +#define NGBE_SFF_1GBASET_CAPABLE 0x8 +#define NGBE_SFF_10GBASESR_CAPABLE 0x10 +#define NGBE_SFF_10GBASELR_CAPABLE 0x20 +#define NGBE_SFF_SOFT_RS_SELECT_MASK 0x8 +#define NGBE_SFF_SOFT_RS_SELECT_10G 0x8 +#define NGBE_SFF_SOFT_RS_SELECT_1G 0x0 +#define NGBE_SFF_ADDRESSING_MODE 0x4 +#define NGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1 +#define NGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8 +#define NGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23 +#define NGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0 +#define NGBE_I2C_EEPROM_READ_MASK 0x100 +#define NGBE_I2C_EEPROM_STATUS_MASK 0x3 +#define NGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0 +#define NGBE_I2C_EEPROM_STATUS_PASS 0x1 +#define NGBE_I2C_EEPROM_STATUS_FAIL 0x2 +#define NGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 + +#define NGBE_CS4227 0xBE /* CS4227 address */ +#define NGBE_CS4227_GLOBAL_ID_LSB 0 +#define NGBE_CS4227_SCRATCH 2 +#define NGBE_CS4227_GLOBAL_ID_VALUE 0x03E5 +#define NGBE_CS4227_SCRATCH_VALUE 0x5aa5 +#define NGBE_CS4227_RETRIES 5 +#define NGBE_CS4227_LINE_SPARE22_MSB 0x12AD /* Reg to program speed */ +#define NGBE_CS4227_LINE_SPARE24_LSB 0x12B0 /* Reg to program EDC */ +#define NGBE_CS4227_HOST_SPARE22_MSB 0x1AAD /* Reg to program speed */ +#define NGBE_CS4227_HOST_SPARE24_LSB 0x1AB0 /* Reg to program EDC */ +#define NGBE_CS4227_EDC_MODE_CX1 0x0002 +#define NGBE_CS4227_EDC_MODE_SR 0x0004 +#define NGBE_CS4227_RESET_HOLD 500 /* microseconds */ +#define NGBE_CS4227_RESET_DELAY 500 /* milliseconds */ +#define NGBE_CS4227_CHECK_DELAY 30 /* milliseconds */ +#define NGBE_PE 0xE0 /* Port expander address */ +#define NGBE_PE_OUTPUT 1 /* Output register offset */ +#define NGBE_PE_CONFIG 3 /* Config register offset */ +#define NGBE_PE_BIT1 (1 << 1) + +/* Flow control defines */ +#define NGBE_TAF_SYM_PAUSE (0x1) +#define NGBE_TAF_ASM_PAUSE (0x2) + +/* Bit-shift macros */ +#define NGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24 +#define NGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16 +#define NGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 8 + +/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */ +#define NGBE_SFF_VENDOR_OUI_TYCO 0x00407600 +#define NGBE_SFF_VENDOR_OUI_FTL 0x00906500 +#define NGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00 +#define NGBE_SFF_VENDOR_OUI_INTEL 0x001B2100 + +/* I2C SDA and SCL timing parameters for standard mode */ +#define NGBE_I2C_T_HD_STA 4 +#define NGBE_I2C_T_LOW 5 +#define NGBE_I2C_T_HIGH 4 +#define NGBE_I2C_T_SU_STA 5 +#define NGBE_I2C_T_HD_DATA 5 +#define NGBE_I2C_T_SU_DATA 1 +#define NGBE_I2C_T_RISE 1 +#define NGBE_I2C_T_FALL 1 +#define NGBE_I2C_T_SU_STO 4 +#define NGBE_I2C_T_BUF 5 + +#ifndef NGBE_SFP_DETECT_RETRIES +#define NGBE_SFP_DETECT_RETRIES 10 +#endif /* NGBE_SFP_DETECT_RETRIES */ + +/* SFP+ SFF-8472 Compliance */ +#define NGBE_SFF_SFF_8472_UNSUP 0x00 + +enum ngbe_phy_type ngbe_get_phy_type_from_id(struct ngbe_hw *hw); +s32 ngbe_init_phy_ops_common(struct ngbe_hw *hw); +s32 ngbe_phy_read_reg_mdi(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 *phy_data); +s32 ngbe_phy_write_reg_mdi(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 phy_data); + +s32 ngbe_phy_read_reg_sds_mii_yt8521s(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 *phy_data); +s32 ngbe_phy_write_reg_sds_mii_yt8521s(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 phy_data); + +s32 ngbe_phy_read_reg_ext_yt8521s(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 *phy_data); +s32 ngbe_phy_write_reg_ext_yt8521s(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 phy_data); + +s32 ngbe_phy_read_reg_sds_ext_yt8521s(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 *phy_data); +s32 ngbe_phy_write_reg_sds_ext_yt8521s(struct ngbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 phy_data); + +s32 ngbe_phy_init(struct ngbe_hw *hw); +s32 ngbe_phy_identify(struct ngbe_hw *hw); +s32 ngbe_phy_reset(struct ngbe_hw *hw); +u32 ngbe_phy_setup_link(struct ngbe_hw *hw, + u32 speed, + bool need_restart_AN); +s32 ngbe_phy_reset_m88e1512(struct ngbe_hw *hw); +u32 ngbe_phy_setup_link_m88e1512(struct ngbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete); +s32 ngbe_phy_check_overtemp(struct ngbe_hw *hw); + +s32 ngbe_check_zte_phy_id(struct ngbe_hw *hw); +s32 ngbe_phy_reset_zte(struct ngbe_hw *hw); +u32 ngbe_phy_setup_link_zte(struct ngbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete); +s32 ngbe_phy_check_event(struct ngbe_hw *hw); +s32 ngbe_phy_check_event_m88e1512(struct ngbe_hw *hw); +s32 ngbe_phy_get_advertised_pause_m88e1512(struct ngbe_hw *hw, u8 *pause_bit); +s32 ngbe_phy_get_lp_advertised_pause_m88e1512(struct ngbe_hw *hw, + u8 *pause_bit); +s32 ngbe_phy_set_pause_advertisement_m88e1512(struct ngbe_hw *hw, + u16 pause_bit); + +s32 ngbe_phy_reset_yt8521s(struct ngbe_hw *hw); +u32 ngbe_phy_setup_link_yt8521s(struct ngbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete); + +s32 ngbe_phy_check_event_yt8521s(struct ngbe_hw *hw); +s32 ngbe_phy_get_advertised_pause_yt8521s(struct ngbe_hw *hw, u8 *pause_bit); +s32 ngbe_phy_get_lp_advertised_pause_yt8521s(struct ngbe_hw *hw, + u8 *pause_bit); +s32 ngbe_phy_set_pause_advertisement_yt8521s(struct ngbe_hw *hw, + u16 pause_bit); + +#endif /* _NGBE_PHY_H_ */ diff --git a/drivers/net/ethernet/netswift/ngbe/ngbe_procfs.c b/drivers/net/ethernet/netswift/ngbe/ngbe_procfs.c new file mode 100644 index 0000000000000000000000000000000000000000..f7ef1da9fd4efb85fe80263dd260feb9e837e84a --- /dev/null +++ b/drivers/net/ethernet/netswift/ngbe/ngbe_procfs.c @@ -0,0 +1,908 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include "ngbe.h" +#include "ngbe_hw.h" +#include "ngbe_type.h" + +#ifdef CONFIG_NGBE_PROCFS + +#include +#include +#include +#include +#include + +static struct proc_dir_entry *ngbe_top_dir; + +static struct net_device_stats *procfs_get_stats(struct net_device *netdev) +{ + if (netdev == NULL) + return NULL; + + /* only return the current stats */ + return &netdev->stats; +} + +static int ngbe_fwbanner(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%s\n", adapter->eeprom_id); +} + +static int ngbe_porttype(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + return snprintf(page, count, "%d\n", + test_bit(__NGBE_DOWN, &adapter->state)); +} + +static int ngbe_portspeed(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + int speed = 0; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + switch (adapter->link_speed) { + case NGBE_LINK_SPEED_100_FULL: + speed = 1; + break; + case NGBE_LINK_SPEED_1GB_FULL: + speed = 10; + break; + case NGBE_LINK_SPEED_10GB_FULL: + speed = 100; + break; + default: + break; + } + return snprintf(page, count, "%d\n", speed); +} + +static int ngbe_wqlflag(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->wol); +} + +static int ngbe_xflowctl(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct ngbe_hw *hw; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", hw->fc.current_mode); +} + +static int ngbe_rxdrops(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->rx_dropped); +} + +static int ngbe_rxerrors(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", net_stats->rx_errors); +} + +static int ngbe_rxupacks(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_hw *hw; + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", rd32(hw, NGBE_TPR)); +} + +static int ngbe_rxmpacks(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_hw *hw; + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + int i, mprc = 0; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + for (i = 0; i < 8; i++) + mprc += rd32(hw, NGBE_PX_MPRC(i)); + return snprintf(page, count, "%d\n", mprc); +} + +static int ngbe_rxbpacks(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_hw *hw; + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", + rd32(hw, NGBE_RX_BC_FRAMES_GOOD_LOW)); +} + +static int ngbe_txupacks(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_hw *hw; + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", + rd32(hw, NGBE_TX_FRAME_CNT_GOOD_BAD_LOW)); +} + +static int ngbe_txmpacks(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_hw *hw; + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", + rd32(hw, NGBE_TX_MC_FRAMES_GOOD_LOW)); +} + +static int ngbe_txbpacks(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_hw *hw; + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "%d\n", + rd32(hw, NGBE_TX_BC_FRAMES_GOOD_LOW)); +} + +static int ngbe_txerrors(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->tx_errors); +} + +static int ngbe_txdrops(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->tx_dropped); +} + +static int ngbe_rxframes(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->rx_packets); +} + +static int ngbe_rxbytes(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->rx_bytes); +} + +static int ngbe_txframes(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->tx_packets); +} + +static int ngbe_txbytes(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct net_device_stats *net_stats; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + net_stats = procfs_get_stats(adapter->netdev); + if (net_stats == NULL) + return snprintf(page, count, "error: no net stats\n"); + + return snprintf(page, count, "%lu\n", + net_stats->tx_bytes); +} + +static int ngbe_linkstat(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_hw *hw; + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + int bitmask = 0; + u32 link_speed; + bool link_up = false; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + if (!test_bit(__NGBE_DOWN, &adapter->state)) + bitmask |= 1; + + /* always assume link is up, if no check link function */ + link_up = true; + if (link_up) + bitmask |= 2; + + if (adapter->old_lsc != adapter->lsc_int) { + bitmask |= 4; + adapter->old_lsc = adapter->lsc_int; + } + + return snprintf(page, count, "0x%X\n", bitmask); +} + +static int ngbe_funcid(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct ngbe_hw *hw; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "0x%X\n", hw->bus.func); +} + +static int ngbe_funcvers(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void __always_unused *data) +{ + return snprintf(page, count, "%s\n", ngbe_driver_version); +} + +static int ngbe_macburn(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_hw *hw; + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n", + (unsigned int)hw->mac.perm_addr[0], + (unsigned int)hw->mac.perm_addr[1], + (unsigned int)hw->mac.perm_addr[2], + (unsigned int)hw->mac.perm_addr[3], + (unsigned int)hw->mac.perm_addr[4], + (unsigned int)hw->mac.perm_addr[5]); +} + +static int ngbe_macadmn(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_hw *hw; + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n", + (unsigned int)hw->mac.addr[0], + (unsigned int)hw->mac.addr[1], + (unsigned int)hw->mac.addr[2], + (unsigned int)hw->mac.addr[3], + (unsigned int)hw->mac.addr[4], + (unsigned int)hw->mac.addr[5]); +} + +static int ngbe_maclla1(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct ngbe_hw *hw; + int rc; + u16 eeprom_buff[6]; + u16 first_word = 0x37; + const u16 word_count = ARRAY_SIZE(eeprom_buff); + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + hw = &adapter->hw; + if (hw == NULL) + return snprintf(page, count, "error: no hw data\n"); + + rc = TCALL(hw, eeprom.ops.read_buffer, first_word, 1, &first_word); + if (rc != 0) + return snprintf(page, count, + "error: reading pointer to the EEPROM\n"); + + if (first_word != 0x0000 && first_word != 0xFFFF) { + rc = TCALL(hw, eeprom.ops.read_buffer, first_word, word_count, + eeprom_buff); + if (rc != 0) + return snprintf(page, count, "error: reading buffer\n"); + } else { + memset(eeprom_buff, 0, sizeof(eeprom_buff)); + } + + switch (hw->bus.func) { + case 0: + return snprintf(page, count, "0x%04X%04X%04X\n", + eeprom_buff[0], + eeprom_buff[1], + eeprom_buff[2]); + case 1: + return snprintf(page, count, "0x%04X%04X%04X\n", + eeprom_buff[3], + eeprom_buff[4], + eeprom_buff[5]); + default: + return snprintf(page, count, "unexpected port %d\n", hw->bus.func); + } +} + +static int ngbe_mtusize(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct net_device *netdev; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + netdev = adapter->netdev; + if (netdev == NULL) + return snprintf(page, count, "error: no net device\n"); + + return snprintf(page, count, "%d\n", netdev->mtu); +} + +static int ngbe_featflag(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + int bitmask = 0; + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct net_device *netdev; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + netdev = adapter->netdev; + if (netdev == NULL) + return snprintf(page, count, "error: no net device\n"); + if (adapter->netdev->features & NETIF_F_RXCSUM) + bitmask |= 1; + return snprintf(page, count, "%d\n", bitmask); +} + +static int ngbe_lsominct(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void __always_unused *data) +{ + return snprintf(page, count, "%d\n", 1); +} + +static int ngbe_prommode(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + struct net_device *netdev; + + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + netdev = adapter->netdev; + if (netdev == NULL) + return snprintf(page, count, "error: no net device\n"); + + return snprintf(page, count, "%d\n", + netdev->flags & IFF_PROMISC); +} + +static int ngbe_txdscqsz(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->tx_ring[0]->count); +} + +static int ngbe_rxdscqsz(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->rx_ring[0]->count); +} + +static int ngbe_rxqavg(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + int index; + int diff = 0; + u16 ntc; + u16 ntu; + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + for (index = 0; index < adapter->num_rx_queues; index++) { + ntc = adapter->rx_ring[index]->next_to_clean; + ntu = adapter->rx_ring[index]->next_to_use; + + if (ntc >= ntu) + diff += (ntc - ntu); + else + diff += (adapter->rx_ring[index]->count - ntu + ntc); + } + if (adapter->num_rx_queues <= 0) + return snprintf(page, count, + "can't calculate, number of queues %d\n", + adapter->num_rx_queues); + return snprintf(page, count, "%d\n", diff/adapter->num_rx_queues); +} + +static int ngbe_txqavg(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + int index; + int diff = 0; + u16 ntc; + u16 ntu; + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + for (index = 0; index < adapter->num_tx_queues; index++) { + ntc = adapter->tx_ring[index]->next_to_clean; + ntu = adapter->tx_ring[index]->next_to_use; + + if (ntc >= ntu) + diff += (ntc - ntu); + else + diff += (adapter->tx_ring[index]->count - ntu + ntc); + } + if (adapter->num_tx_queues <= 0) + return snprintf(page, count, + "can't calculate, number of queues %d\n", + adapter->num_tx_queues); + return snprintf(page, count, "%d\n", + diff/adapter->num_tx_queues); +} + +static int ngbe_iovotype(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void __always_unused *data) +{ + return snprintf(page, count, "2\n"); +} + +static int ngbe_funcnbr(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->num_vfs); +} + +static int ngbe_pciebnbr(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_adapter *adapter = (struct ngbe_adapter *)data; + if (adapter == NULL) + return snprintf(page, count, "error: no adapter\n"); + + return snprintf(page, count, "%d\n", adapter->pdev->bus->number); +} + +static int ngbe_therm_dealarmthresh(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_therm_proc_data *therm_data = + (struct ngbe_therm_proc_data *)data; + + if (therm_data == NULL) + return snprintf(page, count, "error: no therm_data\n"); + + return snprintf(page, count, "%d\n", + therm_data->sensor_data->dalarm_thresh); +} + +static int ngbe_therm_alarmthresh(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + struct ngbe_therm_proc_data *therm_data = + (struct ngbe_therm_proc_data *)data; + + if (therm_data == NULL) + return snprintf(page, count, "error: no therm_data\n"); + + return snprintf(page, count, "%d\n", + therm_data->sensor_data->alarm_thresh); +} + +static int ngbe_therm_temp(char *page, char __always_unused **start, + off_t __always_unused off, int count, + int __always_unused *eof, void *data) +{ + s32 status; + struct ngbe_therm_proc_data *therm_data = + (struct ngbe_therm_proc_data *)data; + + if (therm_data == NULL) + return snprintf(page, count, "error: no therm_data\n"); + + status = ngbe_get_thermal_sensor_data(therm_data->hw); + if (status != 0) + snprintf(page, count, "error: status %d returned\n", status); + + return snprintf(page, count, "%d\n", therm_data->sensor_data->temp); +} + +struct ngbe_proc_type { + char name[32]; + int (*read)(char*, char**, off_t, int, int*, void*); +}; + +struct ngbe_proc_type ngbe_proc_entries[] = { + {"fwbanner", &ngbe_fwbanner}, + {"porttype", &ngbe_porttype}, + {"portspeed", &ngbe_portspeed}, + {"wqlflag", &ngbe_wqlflag}, + {"xflowctl", &ngbe_xflowctl}, + {"rxdrops", &ngbe_rxdrops}, + {"rxerrors", &ngbe_rxerrors}, + {"rxupacks", &ngbe_rxupacks}, + {"rxmpacks", &ngbe_rxmpacks}, + {"rxbpacks", &ngbe_rxbpacks}, + {"txdrops", &ngbe_txdrops}, + {"txerrors", &ngbe_txerrors}, + {"txupacks", &ngbe_txupacks}, + {"txmpacks", &ngbe_txmpacks}, + {"txbpacks", &ngbe_txbpacks}, + {"rxframes", &ngbe_rxframes}, + {"rxbytes", &ngbe_rxbytes}, + {"txframes", &ngbe_txframes}, + {"txbytes", &ngbe_txbytes}, + {"linkstat", &ngbe_linkstat}, + {"funcid", &ngbe_funcid}, + {"funcvers", &ngbe_funcvers}, + {"macburn", &ngbe_macburn}, + {"macadmn", &ngbe_macadmn}, + {"maclla1", &ngbe_maclla1}, + {"mtusize", &ngbe_mtusize}, + {"featflag", &ngbe_featflag}, + {"lsominct", &ngbe_lsominct}, + {"prommode", &ngbe_prommode}, + {"txdscqsz", &ngbe_txdscqsz}, + {"rxdscqsz", &ngbe_rxdscqsz}, + {"txqavg", &ngbe_txqavg}, + {"rxqavg", &ngbe_rxqavg}, + {"iovotype", &ngbe_iovotype}, + {"funcnbr", &ngbe_funcnbr}, + {"pciebnbr", &ngbe_pciebnbr}, + {"", NULL} +}; + +struct ngbe_proc_type ngbe_internal_entries[] = { + {"temp", &ngbe_therm_temp}, + {"alarmthresh", &ngbe_therm_alarmthresh}, + {"dealarmthresh", &ngbe_therm_dealarmthresh}, + {"", NULL} +}; + +void ngbe_del_proc_entries(struct ngbe_adapter *adapter) +{ + int index; + int i; + char buf[16]; /* much larger than the sensor number will ever be */ + + if (ngbe_top_dir == NULL) + return; + + for (i = 0; i < NGBE_MAX_SENSORS; i++) { + if (adapter->therm_dir[i] == NULL) + continue; + + for (index = 0; ; index++) { + if (ngbe_internal_entries[index].read == NULL) + break; + + remove_proc_entry(ngbe_internal_entries[index].name, + adapter->therm_dir[i]); + } + snprintf(buf, sizeof(buf), "sensor_%d", i); + remove_proc_entry(buf, adapter->info_dir); + } + + if (adapter->info_dir != NULL) { + for (index = 0; ; index++) { + if (ngbe_proc_entries[index].read == NULL) + break; + remove_proc_entry(ngbe_proc_entries[index].name, + adapter->info_dir); + } + remove_proc_entry("info", adapter->eth_dir); + } + + if (adapter->eth_dir != NULL) + remove_proc_entry(pci_name(adapter->pdev), ngbe_top_dir); +} + +/* called from ngbe_main.c */ +void ngbe_procfs_exit(struct ngbe_adapter *adapter) +{ + ngbe_del_proc_entries(adapter); +} + +int ngbe_procfs_topdir_init(void) +{ + ngbe_top_dir = proc_mkdir("driver/ngbe", NULL); + if (ngbe_top_dir == NULL) + return -ENOMEM; + + return 0; +} + +void ngbe_procfs_topdir_exit(void) +{ + remove_proc_entry("driver/ngbe", NULL); +} + +/* called from ngbe_main.c */ +int ngbe_procfs_init(struct ngbe_adapter *adapter) +{ + int rc = 0; + int index; + int i; + char buf[16]; /* much larger than the sensor number will ever be */ + + adapter->eth_dir = NULL; + adapter->info_dir = NULL; + adapter->therm_dir = NULL; + + if (ngbe_top_dir == NULL) { + rc = -ENOMEM; + goto fail; + } + + adapter->eth_dir = proc_mkdir(pci_name(adapter->pdev), ngbe_top_dir); + if (adapter->eth_dir == NULL) { + rc = -ENOMEM; + goto fail; + } + + adapter->info_dir = proc_mkdir("info", adapter->eth_dir); + if (adapter->info_dir == NULL) { + rc = -ENOMEM; + goto fail; + } + for (index = 0; ; index++) { + if (ngbe_proc_entries[index].read == NULL) + break; + if (!(create_proc_read_entry(ngbe_proc_entries[index].name, + 0444, + adapter->info_dir, + ngbe_proc_entries[index].read, + adapter))) { + + rc = -ENOMEM; + goto fail; + } + } + if (!TCALL(&(adapter->hw), ops.init_thermal_sensor_thresh)) + goto exit; + + snprintf(buf, sizeof(buf), "sensor"); + adapter->therm_dir = proc_mkdir(buf, adapter->info_dir); + if (adapter->therm_dir == NULL) { + rc = -ENOMEM; + goto fail; + } + for (index = 0; ; index++) { + if (ngbe_internal_entries[index].read == NULL) + break; + /* + * therm_data struct contains pointer the read func + * will be needing + */ + adapter->therm_data.hw = &adapter->hw; + adapter->therm_data.sensor_data = + &adapter->hw.mac.thermal_sensor_data.sensor; + + if (!(create_proc_read_entry( + ngbe_internal_entries[index].name, + 0444, + adapter->therm_dir, + ngbe_internal_entries[index].read, + &adapter->therm_data))) { + rc = -ENOMEM; + goto fail; + } + } + + goto exit; + +fail: + ngbe_del_proc_entries(adapter); +exit: + return rc; +} + +#endif /* CONFIG_NGBE_PROCFS */ diff --git a/drivers/net/ethernet/netswift/ngbe/ngbe_ptp.c b/drivers/net/ethernet/netswift/ngbe/ngbe_ptp.c new file mode 100644 index 0000000000000000000000000000000000000000..87e7d5dc11a43bdfe51d0b7707dcc802701b6cdd --- /dev/null +++ b/drivers/net/ethernet/netswift/ngbe/ngbe_ptp.c @@ -0,0 +1,858 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + */ + +#include "ngbe.h" +#include + +/* + * SYSTIME is defined by a fixed point system which allows the user to + * define the scale counter increment value at every level change of + * the oscillator driving SYSTIME value. The time unit is determined by + * the clock frequency of the oscillator and TIMINCA register. + * The cyclecounter and timecounter structures are used to to convert + * the scale counter into nanoseconds. SYSTIME registers need to be converted + * to ns values by use of only a right shift. + * The following math determines the largest incvalue that will fit into + * the available bits in the TIMINCA register: + * Period * [ 2 ^ ( MaxWidth - PeriodWidth ) ] + * PeriodWidth: Number of bits to store the clock period + * MaxWidth: The maximum width value of the TIMINCA register + * Period: The clock period for the oscillator, which changes based on the link + * speed: + * At 10Gb link or no link, the period is 6.4 ns. + * At 1Gb link, the period is multiplied by 10. (64ns) + * At 100Mb link, the period is multiplied by 100. (640ns) + * round(): discard the fractional portion of the calculation + * + * The calculated value allows us to right shift the SYSTIME register + * value in order to quickly convert it into a nanosecond clock, + * while allowing for the maximum possible adjustment value. + * + * LinkSpeed ClockFreq ClockPeriod TIMINCA:IV + * 10000Mbps 156.25MHz 6.4*10^-9 0xCCCCCC(0xFFFFF/ns) + * 1000 Mbps 62.5 MHz 16 *10^-9 0x800000(0x7FFFF/ns) + * 100 Mbps 6.25 MHz 160*10^-9 0xA00000(0xFFFF/ns) + * 10 Mbps 0.625 MHz 1600*10^-9 0xC7F380(0xFFF/ns) + * FPGA 31.25 MHz 32 *10^-9 0x800000(0x3FFFF/ns) + * + * These diagrams are only for the 10Gb link period + * + * +--------------+ +--------------+ + * | 32 | | 8 | 3 | 20 | + * *--------------+ +--------------+ + * \________ 43 bits ______/ fract + * + * The 43 bit SYSTIME overflows every + * 2^43 * 10^-9 / 3600 = 2.4 hours + */ +#define NGBE_INCVAL_10GB 0xCCCCCC +#define NGBE_INCVAL_1GB 0x2000000/*in Emerald all speed is same*/ +#define NGBE_INCVAL_100 0xA00000 +#define NGBE_INCVAL_10 0xC7F380 +#define NGBE_INCVAL_FPGA 0x800000 + +#define NGBE_INCVAL_SHIFT_10GB 20 +#define NGBE_INCVAL_SHIFT_1GB 22/*in Emerald all speed is same*/ +#define NGBE_INCVAL_SHIFT_100 15 +#define NGBE_INCVAL_SHIFT_10 12 +#define NGBE_INCVAL_SHIFT_FPGA 17 + +#define NGBE_OVERFLOW_PERIOD (HZ * 30) +#define NGBE_PTP_TX_TIMEOUT (HZ) + +/** + * ngbe_ptp_read - read raw cycle counter (to be used by time counter) + * @hw_cc: the cyclecounter structure + * + * this function reads the cyclecounter registers and is called by the + * cyclecounter structure used to construct a ns counter from the + * arbitrary fixed point registers + */ +static u64 ngbe_ptp_read(const struct cyclecounter *hw_cc) +{ + struct ngbe_adapter *adapter = + container_of(hw_cc, struct ngbe_adapter, hw_cc); + struct ngbe_hw *hw = &adapter->hw; + u64 stamp = 0; + + stamp |= (u64)rd32(hw, NGBE_TSEC_1588_SYSTIML); + stamp |= (u64)rd32(hw, NGBE_TSEC_1588_SYSTIMH) << 32; + + return stamp; +} + +/** + * ngbe_ptp_convert_to_hwtstamp - convert register value to hw timestamp + * @adapter: private adapter structure + * @hwtstamp: stack timestamp structure + * @systim: unsigned 64bit system time value + * + * We need to convert the adapter's RX/TXSTMP registers into a hwtstamp value + * which can be used by the stack's ptp functions. + * + * The lock is used to protect consistency of the cyclecounter and the SYSTIME + * registers. However, it does not need to protect against the Rx or Tx + * timestamp registers, as there can't be a new timestamp until the old one is + * unlatched by reading. + * + * In addition to the timestamp in hardware, some controllers need a software + * overflow cyclecounter, and this function takes this into account as well. + **/ +static void ngbe_ptp_convert_to_hwtstamp(struct ngbe_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamp, + u64 timestamp) +{ + unsigned long flags; + u64 ns; + + memset(hwtstamp, 0, sizeof(*hwtstamp)); + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_cyc2time(&adapter->hw_tc, timestamp); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + hwtstamp->hwtstamp = ns_to_ktime(ns); +} + +/** + * ngbe_ptp_adjfreq + * @ptp: the ptp clock structure + * @ppb: parts per billion adjustment from base + * + * adjust the frequency of the ptp cycle counter by the + * indicated ppb from the base frequency. + */ +static int ngbe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + struct ngbe_adapter *adapter = + container_of(ptp, struct ngbe_adapter, ptp_caps); + struct ngbe_hw *hw = &adapter->hw; + u64 freq, incval; + u32 diff; + int neg_adj = 0; + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + + smp_mb(); + incval = READ_ONCE(adapter->base_incval); + + freq = incval; + freq *= ppb; + diff = div_u64(freq, 1000000000ULL); + + incval = neg_adj ? (incval - diff) : (incval + diff); + /* temp setting*/ + + if (incval > NGBE_TSEC_1588_INC_IV(~0)) + e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n"); + wr32(hw, NGBE_TSEC_1588_INC, NGBE_TSEC_1588_INC_IV(incval)); + + return 0; +} + +/** + * ngbe_ptp_adjtime + * @ptp: the ptp clock structure + * @delta: offset to adjust the cycle counter by ns + * + * adjust the timer by resetting the timecounter structure. + */ +static int ngbe_ptp_adjtime(struct ptp_clock_info *ptp, + s64 delta) +{ + struct ngbe_adapter *adapter = + container_of(ptp, struct ngbe_adapter, ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + timecounter_adjtime(&adapter->hw_tc, delta); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + return 0; +} + +/** + * ngbe_ptp_gettime64 + * @ptp: the ptp clock structure + * @ts: timespec64 structure to hold the current time value + * + * read the timecounter and return the correct value on ns, + * after converting it into a struct timespec64. + */ +static int ngbe_ptp_gettime64(struct ptp_clock_info *ptp, + struct timespec64 *ts) +{ + struct ngbe_adapter *adapter = + container_of(ptp, struct ngbe_adapter, ptp_caps); + unsigned long flags; + u64 ns; + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_read(&adapter->hw_tc); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +/** + * ngbe_ptp_settime64 + * @ptp: the ptp clock structure + * @ts: the timespec64 containing the new time for the cycle counter + * + * reset the timecounter to use a new base value instead of the kernel + * wall timer value. + */ +static int ngbe_ptp_settime64(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct ngbe_adapter *adapter = + container_of(ptp, struct ngbe_adapter, ptp_caps); + u64 ns; + unsigned long flags; + + ns = timespec64_to_ns(ts); + + /* reset the timecounter */ + spin_lock_irqsave(&adapter->tmreg_lock, flags); + timecounter_init(&adapter->hw_tc, &adapter->hw_cc, ns); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + return 0; +} + +/** + * ngbe_ptp_feature_enable + * @ptp: the ptp clock structure + * @rq: the requested feature to change + * @on: whether to enable or disable the feature + * + * enable (or disable) ancillary features of the phc subsystem. + * our driver only supports the PPS feature on the X540 + */ +static int ngbe_ptp_feature_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + return -ENOTSUPP; +} + +/** + * ngbe_ptp_check_pps_event + * @adapter: the private adapter structure + * @eicr: the interrupt cause register value + * + * This function is called by the interrupt routine when checking for + * interrupts. It will check and handle a pps event. + */ +void ngbe_ptp_check_pps_event(struct ngbe_adapter *adapter) +{ + struct ptp_clock_event event; + + event.type = PTP_CLOCK_PPS; + + /* this check is necessary in case the interrupt was enabled via some + * alternative means (ex. debug_fs). Better to check here than + * everywhere that calls this function. + */ + if (!adapter->ptp_clock) + return; + + /* we don't config PPS on SDP yet, so just return. + * ptp_clock_event(adapter->ptp_clock, &event); + */ +} + +/** + * ngbe_ptp_overflow_check - watchdog task to detect SYSTIME overflow + * @adapter: private adapter struct + * + * this watchdog task periodically reads the timecounter + * in order to prevent missing when the system time registers wrap + * around. This needs to be run approximately twice a minute for the fastest + * overflowing hardware. We run it for all hardware since it shouldn't have a + * large impact. + */ +void ngbe_ptp_overflow_check(struct ngbe_adapter *adapter) +{ + bool timeout = time_is_before_jiffies(adapter->last_overflow_check + + NGBE_OVERFLOW_PERIOD); + struct timespec64 ts; + + if (timeout) { + ngbe_ptp_gettime64(&adapter->ptp_caps, &ts); + adapter->last_overflow_check = jiffies; + } +} + +/** + * ngbe_ptp_rx_hang - detect error case when Rx timestamp registers latched + * @adapter: private network adapter structure + * + * this watchdog task is scheduled to detect error case where hardware has + * dropped an Rx packet that was timestamped when the ring is full. The + * particular error is rare but leaves the device in a state unable to timestamp + * any future packets. + */ +void ngbe_ptp_rx_hang(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + struct ngbe_ring *rx_ring; + u32 tsyncrxctl = rd32(hw, NGBE_PSR_1588_CTL); + unsigned long rx_event; + int n; + + /* if we don't have a valid timestamp in the registers, just update the + * timeout counter and exit + */ + if (!(tsyncrxctl & NGBE_PSR_1588_CTL_VALID)) { + adapter->last_rx_ptp_check = jiffies; + return; + } + + /* determine the most recent watchdog or rx_timestamp event */ + rx_event = adapter->last_rx_ptp_check; + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + if (time_after(rx_ring->last_rx_timestamp, rx_event)) + rx_event = rx_ring->last_rx_timestamp; + } + + /* only need to read the high RXSTMP register to clear the lock */ + if (time_is_before_jiffies(rx_event + 5 * HZ)) { + rd32(hw, NGBE_PSR_1588_STMPH); + adapter->last_rx_ptp_check = jiffies; + + adapter->rx_hwtstamp_cleared++; + e_warn(drv, "clearing RX Timestamp hang"); + } +} + +/** + * ngbe_ptp_clear_tx_timestamp - utility function to clear Tx timestamp state + * @adapter: the private adapter structure + * + * This function should be called whenever the state related to a Tx timestamp + * needs to be cleared. This helps ensure that all related bits are reset for + * the next Tx timestamp event. + */ +static void ngbe_ptp_clear_tx_timestamp(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + + rd32(hw, NGBE_TSEC_1588_STMPH); + if (adapter->ptp_tx_skb) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + } + clear_bit_unlock(__NGBE_PTP_TX_IN_PROGRESS, &adapter->state); +} + +/** + * ngbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp + * @adapter: the private adapter struct + * + * if the timestamp is valid, we convert it into the timecounter ns + * value, then store that result into the shhwtstamps structure which + * is passed up the network stack + */ +static void ngbe_ptp_tx_hwtstamp(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + struct skb_shared_hwtstamps shhwtstamps; + u64 regval = 0; + + regval |= (u64)rd32(hw, NGBE_TSEC_1588_STMPL); + regval |= (u64)rd32(hw, NGBE_TSEC_1588_STMPH) << 32; + + ngbe_ptp_convert_to_hwtstamp(adapter, &shhwtstamps, regval); + skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); + + ngbe_ptp_clear_tx_timestamp(adapter); +} + +/** + * ngbe_ptp_tx_hwtstamp_work + * @work: pointer to the work struct + * + * This work item polls TSYNCTXCTL valid bit to determine when a Tx hardware + * timestamp has been taken for the current skb. It is necesary, because the + * descriptor's "done" bit does not correlate with the timestamp event. + */ +static void ngbe_ptp_tx_hwtstamp_work(struct work_struct *work) +{ + struct ngbe_adapter *adapter = container_of(work, struct ngbe_adapter, + ptp_tx_work); + struct ngbe_hw *hw = &adapter->hw; + bool timeout = time_is_before_jiffies(adapter->ptp_tx_start + + NGBE_PTP_TX_TIMEOUT); + u32 tsynctxctl; + + /* we have to have a valid skb to poll for a timestamp */ + if (!adapter->ptp_tx_skb) { + ngbe_ptp_clear_tx_timestamp(adapter); + return; + } + + /* stop polling once we have a valid timestamp */ + tsynctxctl = rd32(hw, NGBE_TSEC_1588_CTL); + if (tsynctxctl & NGBE_TSEC_1588_CTL_VALID) { + ngbe_ptp_tx_hwtstamp(adapter); + return; + } + + /* check timeout last in case timestamp event just occurred */ + if (timeout) { + ngbe_ptp_clear_tx_timestamp(adapter); + adapter->tx_hwtstamp_timeouts++; + e_warn(drv, "clearing Tx Timestamp hang"); + } else { + /* reschedule to keep checking until we timeout */ + schedule_work(&adapter->ptp_tx_work); + } +} + +/** + * ngbe_ptp_rx_rgtstamp - utility function which checks for RX time stamp + * @q_vector: structure containing interrupt and ring information + * @skb: particular skb to send timestamp with + * + * if the timestamp is valid, we convert it into the timecounter ns + * value, then store that result into the shhwtstamps structure which + * is passed up the network stack + */ +void ngbe_ptp_rx_hwtstamp(struct ngbe_adapter *adapter, struct sk_buff *skb) +{ + struct ngbe_hw *hw = &adapter->hw; + u64 regval = 0; + u32 tsyncrxctl; + + /* + * Read the tsyncrxctl register afterwards in order to prevent taking an + * I/O hit on every packet. + */ + tsyncrxctl = rd32(hw, NGBE_PSR_1588_CTL); + if (!(tsyncrxctl & NGBE_PSR_1588_CTL_VALID)) + return; + + regval |= (u64)rd32(hw, NGBE_PSR_1588_STMPL); + regval |= (u64)rd32(hw, NGBE_PSR_1588_STMPH) << 32; + + ngbe_ptp_convert_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); +} + +/** + * ngbe_ptp_get_ts_config - get current hardware timestamping configuration + * @adapter: pointer to adapter structure + * @ifreq: ioctl data + * + * This function returns the current timestamping settings. Rather than + * attempt to deconstruct registers to fill in the values, simply keep a copy + * of the old settings around, and return a copy when requested. + */ +int ngbe_ptp_get_ts_config(struct ngbe_adapter *adapter, struct ifreq *ifr) +{ + struct hwtstamp_config *config = &adapter->tstamp_config; + + return copy_to_user(ifr->ifr_data, config, + sizeof(*config)) ? -EFAULT : 0; +} + +/** + * ngbe_ptp_set_timestamp_mode - setup the hardware for the requested mode + * @adapter: the private ngbe adapter structure + * @config: the hwtstamp configuration requested + * + * Outgoing time stamping can be enabled and disabled. Play nice and + * disable it when requested, although it shouldn't cause any overhead + * when no packet needs it. At most one packet in the queue may be + * marked for time stamping, otherwise it would be impossible to tell + * for sure to which packet the hardware time stamp belongs. + * + * Incoming time stamping has to be configured via the hardware + * filters. Not all combinations are supported, in particular event + * type has to be specified. Matching the kind of event packet is + * not supported, with the exception of "all V2 events regardless of + * level 2 or 4". + * + * Since hardware always timestamps Path delay packets when timestamping V2 + * packets, regardless of the type specified in the register, only use V2 + * Event mode. This more accurately tells the user what the hardware is going + * to do anyways. + * + * Note: this may modify the hwtstamp configuration towards a more general + * mode, if required to support the specifically requested mode. + */ +static int ngbe_ptp_set_timestamp_mode(struct ngbe_adapter *adapter, + struct hwtstamp_config *config) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 tsync_tx_ctl = NGBE_TSEC_1588_CTL_ENABLED; + u32 tsync_rx_ctl = NGBE_PSR_1588_CTL_ENABLED; + u32 tsync_rx_mtrl = PTP_EV_PORT << 16; + bool is_l2 = false; + u32 regval; + + /* reserved for future extensions */ + if (config->flags) + return -EINVAL; + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + tsync_tx_ctl = 0; + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + tsync_rx_ctl = 0; + tsync_rx_mtrl = 0; + adapter->flags &= ~(NGBE_FLAG_RX_HWTSTAMP_ENABLED | + NGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + tsync_rx_ctl |= NGBE_PSR_1588_CTL_TYPE_L4_V1; + tsync_rx_mtrl |= NGBE_PSR_1588_MSGTYPE_V1_SYNC_MSG; + adapter->flags |= (NGBE_FLAG_RX_HWTSTAMP_ENABLED | + NGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + tsync_rx_ctl |= NGBE_PSR_1588_CTL_TYPE_L4_V1; + tsync_rx_mtrl |= NGBE_PSR_1588_MSGTYPE_V1_DELAY_REQ_MSG; + adapter->flags |= (NGBE_FLAG_RX_HWTSTAMP_ENABLED | + NGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + tsync_rx_ctl |= NGBE_PSR_1588_CTL_TYPE_EVENT_V2; + is_l2 = true; + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + adapter->flags |= (NGBE_FLAG_RX_HWTSTAMP_ENABLED | + NGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_ALL: + default: + /* register RXMTRL must be set in order to do V1 packets, + * therefore it is not possible to time stamp both V1 Sync and + * Delay_Req messages unless hardware supports timestamping all + * packets => return error + */ + adapter->flags &= ~(NGBE_FLAG_RX_HWTSTAMP_ENABLED | + NGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + config->rx_filter = HWTSTAMP_FILTER_NONE; + return -ERANGE; + } + + /* define ethertype filter for timestamping L2 packets */ + if (is_l2) + wr32(hw, + NGBE_PSR_ETYPE_SWC(NGBE_PSR_ETYPE_SWC_FILTER_1588), + (NGBE_PSR_ETYPE_SWC_FILTER_EN | /* enable filter */ + NGBE_PSR_ETYPE_SWC_1588 | /* enable timestamping */ + ETH_P_1588)); /* 1588 eth protocol type */ + else + wr32(hw, + NGBE_PSR_ETYPE_SWC(NGBE_PSR_ETYPE_SWC_FILTER_1588), + 0); + + /* enable/disable TX */ + regval = rd32(hw, NGBE_TSEC_1588_CTL); + regval &= ~NGBE_TSEC_1588_CTL_ENABLED; + regval |= tsync_tx_ctl; + wr32(hw, NGBE_TSEC_1588_CTL, regval); + + /* enable/disable RX */ + regval = rd32(hw, NGBE_PSR_1588_CTL); + regval &= ~(NGBE_PSR_1588_CTL_ENABLED | NGBE_PSR_1588_CTL_TYPE_MASK); + regval |= tsync_rx_ctl; + wr32(hw, NGBE_PSR_1588_CTL, regval); + + /* define which PTP packets are time stamped */ + wr32(hw, NGBE_PSR_1588_MSGTYPE, tsync_rx_mtrl); + + NGBE_WRITE_FLUSH(hw); + + /* clear TX/RX timestamp state, just to be sure */ + ngbe_ptp_clear_tx_timestamp(adapter); + rd32(hw, NGBE_PSR_1588_STMPH); + + return 0; +} + +/** + * ngbe_ptp_set_ts_config - user entry point for timestamp mode + * @adapter: pointer to adapter struct + * @ifreq: ioctl data + * + * Set hardware to requested mode. If unsupported, return an error with no + * changes. Otherwise, store the mode for future reference. + */ +int ngbe_ptp_set_ts_config(struct ngbe_adapter *adapter, struct ifreq *ifr) +{ + struct hwtstamp_config config; + int err; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + err = ngbe_ptp_set_timestamp_mode(adapter, &config); + if (err) + return err; + + /* save these settings for future reference */ + memcpy(&adapter->tstamp_config, &config, + sizeof(adapter->tstamp_config)); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +static void ngbe_ptp_link_speed_adjust(struct ngbe_adapter *adapter, + u32 *shift, u32 *incval) +{ + /** + * Scale the NIC cycle counter by a large factor so that + * relatively small corrections to the frequency can be added + * or subtracted. The drawbacks of a large factor include + * (a) the clock register overflows more quickly, (b) the cycle + * counter structure must be able to convert the systime value + * to nanoseconds using only a multiplier and a right-shift, + * and (c) the value must fit within the timinca register space + * => math based on internal DMA clock rate and available bits + * + * Note that when there is no link, internal DMA clock is same as when + * link speed is 10Gb. Set the registers correctly even when link is + * down to preserve the clock setting + */ + + *shift = NGBE_INCVAL_SHIFT_1GB; + *incval = NGBE_INCVAL_1GB; + + return; +} + +/** + * ngbe_ptp_start_cyclecounter - create the cycle counter from hw + * @adapter: pointer to the adapter structure + * + * This function should be called to set the proper values for the TIMINCA + * register and tell the cyclecounter structure what the tick rate of SYSTIME + * is. It does not directly modify SYSTIME registers or the timecounter + * structure. It should be called whenever a new TIMINCA value is necessary, + * such as during initialization or when the link speed changes. + */ +void ngbe_ptp_start_cyclecounter(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + unsigned long flags; + struct cyclecounter cc; + u32 incval = 0; + + /* For some of the boards below this mask is technically incorrect. + * The timestamp mask overflows at approximately 61bits. However the + * particular hardware does not overflow on an even bitmask value. + * Instead, it overflows due to conversion of upper 32bits billions of + * cycles. Timecounters are not really intended for this purpose so + * they do not properly function if the overflow point isn't 2^N-1. + * However, the actual SYSTIME values in question take ~138 years to + * overflow. In practice this means they won't actually overflow. A + * proper fix to this problem would require modification of the + * timecounter delta calculations. + */ + cc.mask = CLOCKSOURCE_MASK(64); + cc.mult = 1; + cc.shift = 0; + + cc.read = ngbe_ptp_read; + ngbe_ptp_link_speed_adjust(adapter, &cc.shift, &incval); + wr32(hw, NGBE_TSEC_1588_INC, NGBE_TSEC_1588_INC_IV(incval)); + + /* update the base incval used to calculate frequency adjustment */ + WRITE_ONCE(adapter->base_incval, incval); + smp_mb(); + + /* need lock to prevent incorrect read while modifying cyclecounter */ + spin_lock_irqsave(&adapter->tmreg_lock, flags); + memcpy(&adapter->hw_cc, &cc, sizeof(adapter->hw_cc)); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); +} + +/** + * ngbe_ptp_reset + * @adapter: the ngbe private board structure + * + * When the MAC resets, all of the hardware configuration for timesync is + * reset. This function should be called to re-enable the device for PTP, + * using the last known settings. However, we do lose the current clock time, + * so we fallback to resetting it based on the kernel's realtime clock. + * + * This function will maintain the hwtstamp_config settings, and it retriggers + * the SDP output if it's enabled. + */ +void ngbe_ptp_reset(struct ngbe_adapter *adapter) +{ + unsigned long flags; + + /* reset the hardware timestamping mode */ + ngbe_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); + ngbe_ptp_start_cyclecounter(adapter); + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + timecounter_init(&adapter->hw_tc, &adapter->hw_cc, + ktime_to_ns(ktime_get_real())); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + adapter->last_overflow_check = jiffies; +} + +/** + * ngbe_ptp_create_clock + * @adapter: the ngbe private adapter structure + * + * This function performs setup of the user entry point function table and + * initalizes the PTP clock device used by userspace to access the clock-like + * features of the PTP core. It will be called by ngbe_ptp_init, and may + * re-use a previously initialized clock (such as during a suspend/resume + * cycle). + */ +static long ngbe_ptp_create_clock(struct ngbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + long err; + + /* do nothing if we already have a clock device */ + if (!IS_ERR_OR_NULL(adapter->ptp_clock)) + return 0; + + snprintf(adapter->ptp_caps.name, sizeof(adapter->ptp_caps.name), + "%s", netdev->name); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 500000000; /* 10^-9s */ + adapter->ptp_caps.n_alarm = 0; + adapter->ptp_caps.n_ext_ts = 0; + adapter->ptp_caps.n_per_out = 0; + adapter->ptp_caps.pps = 0; + adapter->ptp_caps.adjfreq = ngbe_ptp_adjfreq; + adapter->ptp_caps.adjtime = ngbe_ptp_adjtime; + adapter->ptp_caps.gettime64 = ngbe_ptp_gettime64; + adapter->ptp_caps.settime64 = ngbe_ptp_settime64; + adapter->ptp_caps.enable = ngbe_ptp_feature_enable; + + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, + pci_dev_to_dev(adapter->pdev)); + if (IS_ERR(adapter->ptp_clock)) { + err = PTR_ERR(adapter->ptp_clock); + adapter->ptp_clock = NULL; + e_dev_err("ptp_clock_register failed\n"); + return err; + } else + e_dev_info("registered PHC device on %s\n", netdev->name); + + /* Set the default timestamp mode to disabled here. We do this in + * create_clock instead of initialization, because we don't want to + * override the previous settings during a suspend/resume cycle. + */ + adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + + return 0; +} + +/** + * ngbe_ptp_init + * @adapter: the ngbe private adapter structure + * + * This function performs the required steps for enabling ptp + * support. If ptp support has already been loaded it simply calls the + * cyclecounter init routine and exits. + */ +void ngbe_ptp_init(struct ngbe_adapter *adapter) +{ + /* initialize the spin lock first, since the user might call the clock + * functions any time after we've initialized the ptp clock device. + */ + spin_lock_init(&adapter->tmreg_lock); + + /* obtain a ptp clock device, or re-use an existing device */ + if (ngbe_ptp_create_clock(adapter)) + return; + + /* we have a clock, so we can intialize work for timestamps now */ + INIT_WORK(&adapter->ptp_tx_work, ngbe_ptp_tx_hwtstamp_work); + + /* reset the ptp related hardware bits */ + ngbe_ptp_reset(adapter); + + /* enter the NGBE_PTP_RUNNING state */ + set_bit(__NGBE_PTP_RUNNING, &adapter->state); + + return; +} + +/** + * ngbe_ptp_suspend - stop ptp work items + * @adapter: pointer to adapter struct + * + * This function suspends ptp activity, and prevents more work from being + * generated, but does not destroy the clock device. + */ +void ngbe_ptp_suspend(struct ngbe_adapter *adapter) +{ + /* leave the NGBE_PTP_RUNNING STATE */ + if (!test_and_clear_bit(__NGBE_PTP_RUNNING, &adapter->state)) + return; + + adapter->flags2 &= ~NGBE_FLAG2_PTP_PPS_ENABLED; + + cancel_work_sync(&adapter->ptp_tx_work); + ngbe_ptp_clear_tx_timestamp(adapter); +} + +/** + * ngbe_ptp_stop - destroy the ptp_clock device + * @adapter: pointer to adapter struct + * + * Completely destroy the ptp_clock device, and disable all PTP related + * features. Intended to be run when the device is being closed. + */ +void ngbe_ptp_stop(struct ngbe_adapter *adapter) +{ + /* first, suspend ptp activity */ + ngbe_ptp_suspend(adapter); + + /* now destroy the ptp clock device */ + if (adapter->ptp_clock) { + ptp_clock_unregister(adapter->ptp_clock); + adapter->ptp_clock = NULL; + e_dev_info("removed PHC on %s\n", + adapter->netdev->name); + } +} diff --git a/drivers/net/ethernet/netswift/ngbe/ngbe_sriov.c b/drivers/net/ethernet/netswift/ngbe/ngbe_sriov.c new file mode 100644 index 0000000000000000000000000000000000000000..785e25287ad39c1442002a27805c260ed3d9ca3e --- /dev/null +++ b/drivers/net/ethernet/netswift/ngbe/ngbe_sriov.c @@ -0,0 +1,1461 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ngbe.h" +#include "ngbe_type.h" +#include "ngbe_sriov.h" + +#ifdef CONFIG_PCI_IOV +static int __ngbe_enable_sriov(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + int num_vf_macvlans, i; + struct vf_macvlans *mv_list; + + adapter->flags |= NGBE_FLAG_SRIOV_ENABLED; + e_dev_info("SR-IOV enabled with %d VFs\n", adapter->num_vfs); + + /* Enable VMDq flag so device will be set in VM mode */ + adapter->flags |= NGBE_FLAG_VMDQ_ENABLED; + if (!adapter->ring_feature[RING_F_VMDQ].limit) + adapter->ring_feature[RING_F_VMDQ].limit = 1; + adapter->ring_feature[RING_F_VMDQ].offset = adapter->num_vfs; + + num_vf_macvlans = hw->mac.num_rar_entries - + (NGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs); + + adapter->mv_list = mv_list = kcalloc(num_vf_macvlans, + sizeof(struct vf_macvlans), + GFP_KERNEL); + if (mv_list) { + /* Initialize list of VF macvlans */ + INIT_LIST_HEAD(&adapter->vf_mvs.l); + for (i = 0; i < num_vf_macvlans; i++) { + mv_list->vf = -1; + mv_list->free = true; + list_add(&mv_list->l, &adapter->vf_mvs.l); + mv_list++; + } + } + + /* Initialize default switching mode VEB */ + wr32m(hw, NGBE_PSR_CTL, + NGBE_PSR_CTL_SW_EN, NGBE_PSR_CTL_SW_EN); + + /* If call to enable VFs succeeded then allocate memory + * for per VF control structures. + */ + adapter->vfinfo = kcalloc(adapter->num_vfs, + sizeof(struct vf_data_storage), GFP_KERNEL); + if (!adapter->vfinfo) { + adapter->num_vfs = 0; + e_dev_info("failed to allocate memory for VF Data Storage\n"); + return -ENOMEM; + } + + /* enable L2 switch and replication */ + adapter->flags |= NGBE_FLAG_SRIOV_L2SWITCH_ENABLE | + NGBE_FLAG_SRIOV_REPLICATION_ENABLE; + + /* We do not support RSS w/ SR-IOV */ + adapter->ring_feature[RING_F_RSS].limit = 1; + + /* enable spoof checking for all VFs */ + for (i = 0; i < adapter->num_vfs; i++) { + /* enable spoof checking for all VFs */ + adapter->vfinfo[i].spoofchk_enabled = true; + + /* Untrust all VFs */ + adapter->vfinfo[i].trusted = false; + + /* set the default xcast mode */ + adapter->vfinfo[i].xcast_mode = NGBEVF_XCAST_MODE_NONE; + } + + wr32m(hw, NGBE_CFG_PORT_CTL, + NGBE_CFG_PORT_CTL_NUM_VT_MASK, NGBE_CFG_PORT_CTL_NUM_VT_8); + + return 0; +} + +#define NGBE_BA4_ADDR(vfinfo, reg) \ + ((u8 __iomem *)((u8 *)(vfinfo)->b4_addr + (reg))) + +/** + * ngbe_get_vfs - Find and take references to all vf devices + * @adapter: Pointer to adapter struct + */ +static void ngbe_get_vfs(struct ngbe_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + u16 vendor = pdev->vendor; + struct pci_dev *vfdev; + int vf = 0; + u16 vf_id; + int pos; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return; + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id); + + vfdev = pci_get_device(vendor, vf_id, NULL); + for (; vfdev; vfdev = pci_get_device(vendor, vf_id, vfdev)) { + struct vf_data_storage *vfinfo; + if (!vfdev->is_virtfn) + continue; + if (vfdev->physfn != pdev) + continue; + if (vf >= adapter->num_vfs) + continue; + + /*pci_dev_get(vfdev);*/ + vfinfo = &adapter->vfinfo[vf]; + vfinfo->vfdev = vfdev; + vfinfo->b4_addr = ioremap(pci_resource_start(vfdev, 4), 64); + + ++vf; + } +} + +/** + * ngbe_pet_vfs - Release references to all vf devices + * @adapter: Pointer to adapter struct + */ +static void ngbe_put_vfs(struct ngbe_adapter *adapter) +{ + unsigned int num_vfs = adapter->num_vfs, vf; + + /* put the reference to all of the vf devices */ + for (vf = 0; vf < num_vfs; ++vf) { + struct vf_data_storage *vfinfo; + struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev; + + if (!vfdev) + continue; + + vfinfo = &adapter->vfinfo[vf]; + iounmap(vfinfo->b4_addr); + vfinfo->b4_addr = NULL; + vfinfo->vfdev = NULL; + /*pci_dev_put(vfdev);*/ + } +} + +/* Note this function is called when the user wants to enable SR-IOV + * VFs using the now deprecated module parameter + */ +void ngbe_enable_sriov(struct ngbe_adapter *adapter) +{ + int pre_existing_vfs = 0; + + pre_existing_vfs = pci_num_vf(adapter->pdev); + if (!pre_existing_vfs && !adapter->num_vfs) + return; + + /* If there are pre-existing VFs then we have to force + * use of that many - over ride any module parameter value. + * This may result from the user unloading the PF driver + * while VFs were assigned to guest VMs or because the VFs + * have been created via the new PCI SR-IOV sysfs interface. + */ + if (pre_existing_vfs) { + adapter->num_vfs = pre_existing_vfs; + dev_warn(&adapter->pdev->dev, + "Virtual Functions already enabled for this device -" + "Please reload all VF drivers to avoid spoofed packet " + "errors\n"); + } else { + int err; + /* + * The sapphire supports up to 64 VFs per physical function + * but this implementation limits allocation to 63 so that + * basic networking resources are still available to the + * physical function. If the user requests greater thn + * 63 VFs then it is an error - reset to default of zero. + */ + adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, + NGBE_MAX_VFS_DRV_LIMIT); + + err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); + if (err) { + e_err(probe, "Failed to enable PCI sriov: %d\n", err); + adapter->num_vfs = 0; + return; + } + } + + if (!__ngbe_enable_sriov(adapter)) { + ngbe_get_vfs(adapter); + return; + } + + /* If we have gotten to this point then there is no memory available + * to manage the VF devices - print message and bail. + */ + e_err(probe, "Unable to allocate memory for VF Data Storage - " + "SRIOV disabled\n"); + ngbe_disable_sriov(adapter); +} +#endif /* CONFIG_PCI_IOV */ + +int ngbe_disable_sriov(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + + /* set num VFs to 0 to prevent access to vfinfo */ + adapter->num_vfs = 0; + + /* put the reference to all of the vf devices */ +#ifdef CONFIG_PCI_IOV + ngbe_put_vfs(adapter); +#endif + /* free VF control structures */ + kfree(adapter->vfinfo); + adapter->vfinfo = NULL; + + /* free macvlan list */ + kfree(adapter->mv_list); + adapter->mv_list = NULL; + + /* if SR-IOV is already disabled then there is nothing to do */ + if (!(adapter->flags & NGBE_FLAG_SRIOV_ENABLED)) + return 0; + +#ifdef CONFIG_PCI_IOV + /* + * If our VFs are assigned we cannot shut down SR-IOV + * without causing issues, so just leave the hardware + * available but disabled + */ + if (pci_vfs_assigned(adapter->pdev)) { + e_dev_warn("Unloading driver while VFs are assigned -" + "VFs will not be deallocated\n"); + return -EPERM; + } + /* disable iov and allow time for transactions to clear */ + pci_disable_sriov(adapter->pdev); +#endif + + /* set default pool back to 0 */ + wr32m(hw, NGBE_PSR_VM_CTL, + NGBE_PSR_VM_CTL_POOL_MASK, 0); + NGBE_WRITE_FLUSH(hw); + + adapter->ring_feature[RING_F_VMDQ].offset = 0; + + /* take a breather then clean up driver data */ + msleep(100); + + adapter->flags &= ~NGBE_FLAG_SRIOV_ENABLED; + + /* Disable VMDq flag so device will be set in VM mode */ + if (adapter->ring_feature[RING_F_VMDQ].limit == 1) { + adapter->flags &= ~NGBE_FLAG_VMDQ_ENABLED; + } + + return 0; +} + +static int ngbe_set_vf_multicasts(struct ngbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + u16 entries = (msgbuf[0] & NGBE_VT_MSGINFO_MASK) + >> NGBE_VT_MSGINFO_SHIFT; + u16 *hash_list = (u16 *)&msgbuf[1]; + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + struct ngbe_hw *hw = &adapter->hw; + int i; + u32 vector_bit; + u32 vector_reg; + u32 mta_reg; + u32 vmolr = rd32(hw, NGBE_PSR_VM_L2CTL(vf)); + + /* only so many hash values supported */ + entries = min(entries, (u16)NGBE_MAX_VF_MC_ENTRIES); + + /* salt away the number of multi cast addresses assigned + * to this VF for later use to restore when the PF multi cast + * list changes + */ + vfinfo->num_vf_mc_hashes = entries; + + /* VFs are limited to using the MTA hash table for their multicast + * addresses */ + for (i = 0; i < entries; i++) + vfinfo->vf_mc_hashes[i] = hash_list[i]; + + for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { + vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F; + vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F; + /* errata 5: maintain a copy of the register table conf */ + mta_reg = hw->mac.mta_shadow[vector_reg]; + mta_reg |= (1 << vector_bit); + hw->mac.mta_shadow[vector_reg] = mta_reg; + wr32(hw, NGBE_PSR_MC_TBL(vector_reg), mta_reg); + } + vmolr |= NGBE_PSR_VM_L2CTL_ROMPE; + wr32(hw, NGBE_PSR_VM_L2CTL(vf), vmolr); + + return 0; +} + +void ngbe_restore_vf_multicasts(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + struct vf_data_storage *vfinfo; + u32 i, j; + u32 vector_bit; + u32 vector_reg; + + for (i = 0; i < adapter->num_vfs; i++) { + u32 vmolr = rd32(hw, NGBE_PSR_VM_L2CTL(i)); + vfinfo = &adapter->vfinfo[i]; + for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) { + hw->addr_ctrl.mta_in_use++; + vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F; + vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F; + wr32m(hw, NGBE_PSR_MC_TBL(vector_reg), + 1 << vector_bit, 1 << vector_bit); + /* errata 5: maintain a copy of the reg table conf */ + hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); + } + if (vfinfo->num_vf_mc_hashes) + vmolr |= NGBE_PSR_VM_L2CTL_ROMPE; + else + vmolr &= ~NGBE_PSR_VM_L2CTL_ROMPE; + wr32(hw, NGBE_PSR_VM_L2CTL(i), vmolr); + } + + /* Restore any VF macvlans */ + ngbe_full_sync_mac_table(adapter); +} + +int ngbe_set_vf_vlan(struct ngbe_adapter *adapter, int add, int vid, u16 vf) +{ + struct ngbe_hw *hw = &adapter->hw; + + /* VLAN 0 is a special case, don't allow it to be removed */ + if (!vid && !add) + return 0; + + return TCALL(hw, mac.ops.set_vfta, vid, vf, (bool)add); +} + +static int ngbe_set_vf_lpe(struct ngbe_adapter *adapter, u32 max_frame, + u32 vf) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 max_frs, reg_val; + + /* + * For sapphire we have to keep all PFs and VFs operating with + * the same max_frame value in order to avoid sending an oversize + * frame to a VF. In order to guarantee this is handled correctly + * for all cases we have several special exceptions to take into + * account before we can enable the VF for receive + */ + struct net_device *dev = adapter->netdev; + int pf_max_frame = dev->mtu + ETH_HLEN; + u32 vf_shift, vfre; + s32 err = 0; + + switch (adapter->vfinfo[vf].vf_api) { + case ngbe_mbox_api_11: + case ngbe_mbox_api_12: + case ngbe_mbox_api_13: + /* + * Version 1.1 supports jumbo frames on VFs if PF has + * jumbo frames enabled which means legacy VFs are + * disabled + */ + if (pf_max_frame > ETH_FRAME_LEN) + break; + /* fall through */ + default: + /* + * If the PF or VF are running w/ jumbo frames enabled + * we need to shut down the VF Rx path as we cannot + * support jumbo frames on legacy VFs + */ + if ((pf_max_frame > ETH_FRAME_LEN) || + (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) + err = -EINVAL; + break; + } + + /* determine VF receive enable location */ + vf_shift = vf; + + /* enable or disable receive depending on error */ + vfre = rd32(hw, NGBE_RDM_POOL_RE); + if (err) + vfre &= ~(1 << vf_shift); + else + vfre |= 1 << vf_shift; + wr32(hw, NGBE_RDM_POOL_RE, vfre); + + if (err) { + e_err(drv, "VF max_frame %d out of range\n", max_frame); + return err; + } + + /* pull current max frame size from hardware */ + max_frs = DIV_ROUND_UP(max_frame, 1024); + reg_val = rd32(hw, NGBE_MAC_WDG_TIMEOUT) & + NGBE_MAC_WDG_TIMEOUT_WTO_MASK; + if (max_frs > (reg_val + NGBE_MAC_WDG_TIMEOUT_WTO_DELTA)) { + wr32(hw, NGBE_MAC_WDG_TIMEOUT, + max_frs - NGBE_MAC_WDG_TIMEOUT_WTO_DELTA); + } + + e_info(hw, "VF requests change max MTU to %d\n", max_frame); + + return 0; +} + +void ngbe_set_vmolr(struct ngbe_hw *hw, u16 vf, bool aupe) +{ + u32 vmolr = rd32(hw, NGBE_PSR_VM_L2CTL(vf)); + vmolr |= NGBE_PSR_VM_L2CTL_BAM; + if (aupe) + vmolr |= NGBE_PSR_VM_L2CTL_AUPE; + else + vmolr &= ~NGBE_PSR_VM_L2CTL_AUPE; + wr32(hw, NGBE_PSR_VM_L2CTL(vf), vmolr); +} + +static void ngbe_set_vmvir(struct ngbe_adapter *adapter, + u16 vid, u16 qos, u16 vf) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | + NGBE_TDM_VLAN_INS_VLANA_DEFAULT; + + wr32(hw, NGBE_TDM_VLAN_INS(vf), vmvir); +} + +static void ngbe_clear_vmvir(struct ngbe_adapter *adapter, u32 vf) +{ + struct ngbe_hw *hw = &adapter->hw; + + wr32(hw, NGBE_TDM_VLAN_INS(vf), 0); +} + +static inline void ngbe_vf_reset_event(struct ngbe_adapter *adapter, u16 vf) +{ + struct ngbe_hw *hw = &adapter->hw; + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + u8 num_tcs = netdev_get_num_tc(adapter->netdev); + + /* add PF assigned VLAN or VLAN 0 */ + ngbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf); + + /* reset offloads to defaults */ + ngbe_set_vmolr(hw, vf, !vfinfo->pf_vlan); + + /* set outgoing tags for VFs */ + if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) { + ngbe_clear_vmvir(adapter, vf); + } else { + if (vfinfo->pf_qos || !num_tcs) + ngbe_set_vmvir(adapter, vfinfo->pf_vlan, + vfinfo->pf_qos, vf); + else + ngbe_set_vmvir(adapter, vfinfo->pf_vlan, + adapter->default_up, vf); + + if (vfinfo->spoofchk_enabled) + TCALL(hw, mac.ops.set_vlan_anti_spoofing, true, vf); + } + + /* reset multicast table array for vf */ + adapter->vfinfo[vf].num_vf_mc_hashes = 0; + + /* Flush and reset the mta with the new values */ + ngbe_set_rx_mode(adapter->netdev); + + ngbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); + + /* reset VF api back to unknown */ + adapter->vfinfo[vf].vf_api = ngbe_mbox_api_10; +} + +int ngbe_set_vf_mac(struct ngbe_adapter *adapter, + u16 vf, unsigned char *mac_addr) +{ + s32 retval = 0; + ngbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); + retval = ngbe_add_mac_filter(adapter, mac_addr, vf); + if (retval >= 0) + memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN); + else + memset(adapter->vfinfo[vf].vf_mac_addresses, 0, ETH_ALEN); + + return retval; +} + +static int ngbe_negotiate_vf_api(struct ngbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + int api = msgbuf[1]; + + switch (api) { + case ngbe_mbox_api_10: + case ngbe_mbox_api_11: + case ngbe_mbox_api_12: + case ngbe_mbox_api_13: + adapter->vfinfo[vf].vf_api = api; + return 0; + default: + break; + } + + e_info(drv, "VF %d requested invalid api version %u\n", vf, api); + + return -1; +} + +static int ngbe_get_vf_queues(struct ngbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + struct net_device *dev = adapter->netdev; + unsigned int default_tc = 0; + u8 num_tcs = netdev_get_num_tc(dev); + + /* verify the PF is supporting the correct APIs */ + switch (adapter->vfinfo[vf].vf_api) { + case ngbe_mbox_api_20: + case ngbe_mbox_api_11: + break; + default: + return -1; + } + + /* only allow 1 Tx queue for bandwidth limiting */ + msgbuf[NGBE_VF_TX_QUEUES] = 1; + msgbuf[NGBE_VF_RX_QUEUES] = 1; + + /* notify VF of need for VLAN tag stripping, and correct queue */ + if (num_tcs) + msgbuf[NGBE_VF_TRANS_VLAN] = num_tcs; + else if (adapter->vfinfo[vf].pf_vlan || adapter->vfinfo[vf].pf_qos) + msgbuf[NGBE_VF_TRANS_VLAN] = 1; + else + msgbuf[NGBE_VF_TRANS_VLAN] = 0; + + /* notify VF of default queue */ + msgbuf[NGBE_VF_DEF_QUEUE] = default_tc; + + return 0; +} + +static int ngbe_get_vf_link_status(struct ngbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + /* verify the PF is supporting the correct APIs */ + switch (adapter->vfinfo[vf].vf_api) { + case ngbe_mbox_api_11: + case ngbe_mbox_api_12: + case ngbe_mbox_api_13: + break; + default: + return -1; + } + + if (adapter->link_up) + msgbuf[1] = NGBE_VF_STATUS_LINKUP; + else + msgbuf[1] = 0; + + return 0; +} + +static int ngbe_set_vf_macvlan(struct ngbe_adapter *adapter, + u16 vf, int index, unsigned char *mac_addr) +{ + struct list_head *pos; + struct vf_macvlans *entry; + s32 retval = 0; + + if (index <= 1) { + list_for_each(pos, &adapter->vf_mvs.l) { + entry = list_entry(pos, struct vf_macvlans, l); + if (entry->vf == vf) { + entry->vf = -1; + entry->free = true; + entry->is_macvlan = false; + ngbe_del_mac_filter(adapter, + entry->vf_macvlan, vf); + } + } + } + + /* + * If index was zero then we were asked to clear the uc list + * for the VF. We're done. + */ + if (!index) + return 0; + + entry = NULL; + + list_for_each(pos, &adapter->vf_mvs.l) { + entry = list_entry(pos, struct vf_macvlans, l); + if (entry->free) + break; + } + + /* + * If we traversed the entire list and didn't find a free entry + * then we're out of space on the RAR table. Also entry may + * be NULL because the original memory allocation for the list + * failed, which is not fatal but does mean we can't support + * VF requests for MACVLAN because we couldn't allocate + * memory for the list manangbeent required. + */ + if (!entry || !entry->free) + return -ENOSPC; + + retval = ngbe_add_mac_filter(adapter, mac_addr, vf); + if (retval >= 0) { + entry->free = false; + entry->is_macvlan = true; + entry->vf = vf; + memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); + } + + return retval; +} + +#ifdef CONFIG_PCI_IOV +int ngbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) +{ + unsigned char vf_mac_addr[6]; + struct ngbe_adapter *adapter = pci_get_drvdata(pdev); + unsigned int vfn = (event_mask & 0x7); + bool enable = ((event_mask & 0x10000000U) != 0); + + if (enable) { + memset(vf_mac_addr, 0, ETH_ALEN); + memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6); + } + + return 0; +} +#endif /* CONFIG_PCI_IOV */ + +static inline void ngbe_write_qde(struct ngbe_adapter *adapter, u32 vf, + u32 qde) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 q_per_pool = 1; + u32 reg = 0; + u32 i = vf * q_per_pool; + + reg = rd32(hw, NGBE_RDM_PF_QDE); + reg |= qde << i; + + wr32(hw, NGBE_RDM_PF_QDE, reg); + +} + +static inline void ngbe_write_hide_vlan(struct ngbe_adapter *adapter, u32 vf, + u32 hide_vlan) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 q_per_pool = 1; + u32 reg = 0; + u32 i = vf * q_per_pool; + reg = rd32(hw, NGBE_RDM_PF_HIDE); + + if (hide_vlan == 1) + reg |= hide_vlan << i; + else + reg &= hide_vlan << i; + + wr32(hw, NGBE_RDM_PF_HIDE, reg); +} + +static int ngbe_vf_reset_msg(struct ngbe_adapter *adapter, u16 vf) +{ + struct ngbe_hw *hw = &adapter->hw; + unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; + u32 reg, vf_shift; + u32 msgbuf[4] = {0, 0, 0, 0}; + u8 *addr = (u8 *)(&msgbuf[1]); + struct net_device *dev = adapter->netdev; + int pf_max_frame; + + e_info(probe, "VF Reset msg received from vf %d\n", vf); + + /* reset the filters for the device */ + ngbe_vf_reset_event(adapter, vf); + + /* set vf mac address */ + if (!is_zero_ether_addr(vf_mac)) + ngbe_set_vf_mac(adapter, vf, vf_mac); + + vf_shift = vf; + + /* enable transmit for vf */ + wr32m(hw, NGBE_TDM_POOL_TE, + 1 << vf, 1 << vf); + + /* force drop enable for all VF Rx queues */ + ngbe_write_qde(adapter, vf, 1); + + /* enable receive for vf */ + reg = rd32(hw, NGBE_RDM_POOL_RE); + reg |= 1 << vf_shift; + + pf_max_frame = dev->mtu + ETH_HLEN; + + if (pf_max_frame > ETH_FRAME_LEN) + reg &= ~(1 << vf_shift); + wr32(hw, NGBE_RDM_POOL_RE, reg); + + /* enable VF mailbox for further messages */ + adapter->vfinfo[vf].clear_to_send = true; + + /* reply to reset with ack and vf mac address */ + msgbuf[0] = NGBE_VF_RESET; + if (!is_zero_ether_addr(vf_mac)) { + msgbuf[0] |= NGBE_VT_MSGTYPE_ACK; + memcpy(addr, vf_mac, ETH_ALEN); + } else { + msgbuf[0] |= NGBE_VT_MSGTYPE_NACK; + dev_warn(pci_dev_to_dev(adapter->pdev), + "VF %d has no MAC address assigned, you may have to " + "assign one manually\n", vf); + } + + /* + * Piggyback the multicast filter type so VF can compute the + * correct vectors + */ + msgbuf[3] = hw->mac.mc_filter_type; + ngbe_write_mbx(hw, msgbuf, NGBE_VF_PERMADDR_MSG_LEN, vf); + + return 0; +} + +static int ngbe_set_vf_mac_addr(struct ngbe_adapter *adapter, + u32 *msgbuf, u16 vf) +{ + u8 *new_mac = ((u8 *)(&msgbuf[1])); + + if (!is_valid_ether_addr(new_mac)) { + e_warn(drv, "VF %d attempted to set invalid mac\n", vf); + return -1; + } + + if (adapter->vfinfo[vf].pf_set_mac && + memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac, + ETH_ALEN)) { + u8 *pm = adapter->vfinfo[vf].vf_mac_addresses; + e_warn(drv, + "VF %d attempted to set a new MAC address but it already " + "has an administratively set MAC address " + "%2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n", + vf, pm[0], pm[1], pm[2], pm[3], pm[4], pm[5]); + e_warn(drv, "Check the VF driver and if it is not using the " + "correct MAC address you may need to reload the VF " + "driver\n"); + return -1; + } + return ngbe_set_vf_mac(adapter, vf, new_mac) < 0; +} + +#ifdef CONFIG_PCI_IOV +static int ngbe_find_vlvf_entry(struct ngbe_hw *hw, u32 vlan) +{ + u32 vlvf; + s32 regindex; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* Search for the vlan id in the VLVF entries */ + for (regindex = 1; regindex < NGBE_PSR_VLAN_SWC_ENTRIES; regindex++) { + wr32(hw, NGBE_PSR_VLAN_SWC_IDX, regindex); + vlvf = rd32(hw, NGBE_PSR_VLAN_SWC); + if ((vlvf & VLAN_VID_MASK) == vlan) + break; + } + + /* Return a negative value if not found */ + if (regindex >= NGBE_PSR_VLAN_SWC_ENTRIES) + regindex = -1; + + return regindex; +} +#endif /* CONFIG_PCI_IOV */ + +static int ngbe_set_vf_vlan_msg(struct ngbe_adapter *adapter, + u32 *msgbuf, u16 vf) +{ + struct ngbe_hw *hw = &adapter->hw; + int add = (msgbuf[0] & NGBE_VT_MSGINFO_MASK) >> NGBE_VT_MSGINFO_SHIFT; + int vid = (msgbuf[1] & NGBE_PSR_VLAN_SWC_VLANID_MASK); + int err; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + if (adapter->vfinfo[vf].pf_vlan || tcs) { + e_warn(drv, + "VF %d attempted to override administratively set VLAN " + "configuration\n" + "Reload the VF driver to resume operations\n", + vf); + return -1; + } + + if (add) + adapter->vfinfo[vf].vlan_count++; + else if (adapter->vfinfo[vf].vlan_count) + adapter->vfinfo[vf].vlan_count--; + + /* in case of promiscuous mode any VLAN filter set for a VF must + * also have the PF pool added to it. + */ + if (add && adapter->netdev->flags & IFF_PROMISC) + err = ngbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); + + err = ngbe_set_vf_vlan(adapter, add, vid, vf); + if (!err && adapter->vfinfo[vf].spoofchk_enabled) + TCALL(hw, mac.ops.set_vlan_anti_spoofing, true, vf); + +#ifdef CONFIG_PCI_IOV + /* Go through all the checks to see if the VLAN filter should + * be wiped completely. + */ + if (!add && adapter->netdev->flags & IFF_PROMISC) { + u32 bits = 0, vlvf; + s32 reg_ndx; + + reg_ndx = ngbe_find_vlvf_entry(hw, vid); + if (reg_ndx < 0) + goto out; + wr32(hw, NGBE_PSR_VLAN_SWC_IDX, reg_ndx); + vlvf = rd32(hw, NGBE_PSR_VLAN_SWC); + /* See if any other pools are set for this VLAN filter + * entry other than the PF. + */ + if (VMDQ_P(0) < 32) { + bits = rd32(hw, NGBE_PSR_VLAN_SWC_VM_L); + bits &= ~(1 << VMDQ_P(0)); + } else { + bits &= ~(1 << (VMDQ_P(0) - 32)); + bits |= rd32(hw, NGBE_PSR_VLAN_SWC_VM_L); + } + + /* If the filter was removed then ensure PF pool bit + * is cleared if the PF only added itself to the pool + * because the PF is in promiscuous mode. + */ + if ((vlvf & VLAN_VID_MASK) == vid && + !test_bit(vid, adapter->active_vlans) && + !bits) + ngbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); + } + +out: +#endif + return err; +} + +static int ngbe_set_vf_macvlan_msg(struct ngbe_adapter *adapter, + u32 *msgbuf, u16 vf) +{ + u8 *new_mac = ((u8 *)(&msgbuf[1])); + int index = (msgbuf[0] & NGBE_VT_MSGINFO_MASK) >> + NGBE_VT_MSGINFO_SHIFT; + int err; + + if (adapter->vfinfo[vf].pf_set_mac && index > 0) { + e_warn(drv, + "VF %d requested MACVLAN filter but is administratively denied\n", + vf); + return -1; + } + + /* An non-zero index indicates the VF is setting a filter */ + if (index) { + if (!is_valid_ether_addr(new_mac)) { + e_warn(drv, "VF %d attempted to set invalid mac\n", vf); + return -1; + } + + /* + * If the VF is allowed to set MAC filters then turn off + * anti-spoofing to avoid false positives. + */ + if (adapter->vfinfo[vf].spoofchk_enabled) + ngbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false); + } + + err = ngbe_set_vf_macvlan(adapter, vf, index, new_mac); + if (err == -ENOSPC) + e_warn(drv, + "VF %d has requested a MACVLAN filter but there is no " + "space for it\n", + vf); + + return err < 0; +} + +static int ngbe_update_vf_xcast_mode(struct ngbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + struct ngbe_hw *hw = &adapter->hw; + int xcast_mode = msgbuf[1]; + u32 vmolr, fctrl, disable, enable; + + /* verify the PF is supporting the correct APIs */ + switch (adapter->vfinfo[vf].vf_api) { + case ngbe_mbox_api_12: + /* promisc introduced in 1.3 version */ + if (xcast_mode == NGBEVF_XCAST_MODE_PROMISC) + return -EOPNOTSUPP; + /* Fall threw */ + case ngbe_mbox_api_13: + break; + default: + return -EOPNOTSUPP; + } + + if (adapter->vfinfo[vf].xcast_mode == xcast_mode) + goto out; + + switch (xcast_mode) { + case NGBEVF_XCAST_MODE_NONE: + disable = NGBE_PSR_VM_L2CTL_BAM | + NGBE_PSR_VM_L2CTL_ROMPE | + NGBE_PSR_VM_L2CTL_MPE | + NGBE_PSR_VM_L2CTL_UPE | + NGBE_PSR_VM_L2CTL_VPE; + enable = 0; + break; + case NGBEVF_XCAST_MODE_MULTI: + disable = NGBE_PSR_VM_L2CTL_MPE | + NGBE_PSR_VM_L2CTL_UPE | + NGBE_PSR_VM_L2CTL_VPE; + enable = NGBE_PSR_VM_L2CTL_BAM | + NGBE_PSR_VM_L2CTL_ROMPE; + break; + case NGBEVF_XCAST_MODE_ALLMULTI: + disable = NGBE_PSR_VM_L2CTL_UPE | + NGBE_PSR_VM_L2CTL_VPE; + enable = NGBE_PSR_VM_L2CTL_BAM | + NGBE_PSR_VM_L2CTL_ROMPE | + NGBE_PSR_VM_L2CTL_MPE; + break; + case NGBEVF_XCAST_MODE_PROMISC: + fctrl = rd32(hw, NGBE_PSR_CTL); + if (!(fctrl & NGBE_PSR_CTL_UPE)) { + /* VF promisc requires PF in promisc */ + e_warn(drv, + "Enabling VF promisc requires PF in promisc\n"); + return -EPERM; + } + disable = 0; + enable = NGBE_PSR_VM_L2CTL_BAM | + NGBE_PSR_VM_L2CTL_ROMPE | + NGBE_PSR_VM_L2CTL_MPE | + NGBE_PSR_VM_L2CTL_UPE | + NGBE_PSR_VM_L2CTL_VPE; + break; + default: + return -EOPNOTSUPP; + } + + vmolr = rd32(hw, NGBE_PSR_VM_L2CTL(vf)); + vmolr &= ~disable; + vmolr |= enable; + wr32(hw, NGBE_PSR_VM_L2CTL(vf), vmolr); + + adapter->vfinfo[vf].xcast_mode = xcast_mode; + +out: + msgbuf[1] = xcast_mode; + + return 0; +} + +static int ngbe_rcv_msg_from_vf(struct ngbe_adapter *adapter, u16 vf) +{ + u16 mbx_size = NGBE_VXMAILBOX_SIZE; + u32 msgbuf[NGBE_VXMAILBOX_SIZE]; + struct ngbe_hw *hw = &adapter->hw; + s32 retval; + + retval = ngbe_read_mbx(hw, msgbuf, mbx_size, vf); + + if (retval) { + pr_err("Error receiving message from VF\n"); + return retval; + } + + /* this is a message we already processed, do nothing */ + if (msgbuf[0] & (NGBE_VT_MSGTYPE_ACK | NGBE_VT_MSGTYPE_NACK)) + return retval; + + /* flush the ack before we write any messages back */ + NGBE_WRITE_FLUSH(hw); + + if (msgbuf[0] == NGBE_VF_RESET) + return ngbe_vf_reset_msg(adapter, vf); + + /* + * until the vf completes a virtual function reset it should not be + * allowed to start any configuration. + */ + + if (!adapter->vfinfo[vf].clear_to_send) { + msgbuf[0] |= NGBE_VT_MSGTYPE_NACK; + ngbe_write_mbx(hw, msgbuf, 1, vf); + return retval; + } + + switch ((msgbuf[0] & 0xFFFF)) { + case NGBE_VF_SET_MAC_ADDR: + retval = ngbe_set_vf_mac_addr(adapter, msgbuf, vf); + break; + case NGBE_VF_SET_MULTICAST: + retval = ngbe_set_vf_multicasts(adapter, msgbuf, vf); + break; + case NGBE_VF_SET_VLAN: + retval = ngbe_set_vf_vlan_msg(adapter, msgbuf, vf); + break; + case NGBE_VF_SET_LPE: + if (msgbuf[1] > NGBE_MAX_JUMBO_FRAME_SIZE) { + e_err(drv, "VF max_frame %d exceed MAX_JUMBO_FRAME_SIZE\n", msgbuf[1]); + return -EINVAL; + } + retval = ngbe_set_vf_lpe(adapter, msgbuf[1], vf); + break; + case NGBE_VF_SET_MACVLAN: + retval = ngbe_set_vf_macvlan_msg(adapter, msgbuf, vf); + break; + case NGBE_VF_API_NEGOTIATE: + retval = ngbe_negotiate_vf_api(adapter, msgbuf, vf); + break; + case NGBE_VF_GET_QUEUES: + retval = ngbe_get_vf_queues(adapter, msgbuf, vf); + break; + case NGBE_VF_UPDATE_XCAST_MODE: + retval = ngbe_update_vf_xcast_mode(adapter, msgbuf, vf); + break; + case NGBE_VF_GET_LINK_STATUS: + retval = ngbe_get_vf_link_status(adapter, msgbuf, vf); + break; + case NGBE_VF_BACKUP: + break; + default: + e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); + retval = NGBE_ERR_MBX; + break; + } + + /* notify the VF of the results of what it sent us */ + if (retval) + msgbuf[0] |= NGBE_VT_MSGTYPE_NACK; + else + msgbuf[0] |= NGBE_VT_MSGTYPE_ACK; + + msgbuf[0] |= NGBE_VT_MSGTYPE_CTS; + + ngbe_write_mbx(hw, msgbuf, mbx_size, vf); + + return retval; +} + +static void ngbe_rcv_ack_from_vf(struct ngbe_adapter *adapter, u16 vf) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 msg = NGBE_VT_MSGTYPE_NACK; + + /* if device isn't clear to send it shouldn't be reading either */ + if (!adapter->vfinfo[vf].clear_to_send) + ngbe_write_mbx(hw, &msg, 1, vf); +} + +void ngbe_msg_task(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u16 vf; + + for (vf = 0; vf < adapter->num_vfs; vf++) { + /* process any reset requests */ + if (!ngbe_check_for_rst(hw, vf)) + ngbe_vf_reset_event(adapter, vf); + + /* process any messages pending */ + if (!ngbe_check_for_msg(hw, vf)) + ngbe_rcv_msg_from_vf(adapter, vf); + + /* process any acks */ + if (!ngbe_check_for_ack(hw, vf)) + ngbe_rcv_ack_from_vf(adapter, vf); + } +} + +void ngbe_disable_tx_rx(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + + /* disable transmit and receive for all vfs */ + wr32(hw, NGBE_TDM_POOL_TE, 0); + wr32(hw, NGBE_RDM_POOL_RE, 0); +} + +static inline void ngbe_ping_vf(struct ngbe_adapter *adapter, int vf) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 ping; + + ping = NGBE_PF_CONTROL_MSG; + if (adapter->vfinfo[vf].clear_to_send) + ping |= NGBE_VT_MSGTYPE_CTS; + ngbe_write_mbx(hw, &ping, 1, vf); +} + +void ngbe_ping_all_vfs(struct ngbe_adapter *adapter) +{ + struct ngbe_hw *hw = &adapter->hw; + u32 ping; + u16 i; + + for (i = 0 ; i < adapter->num_vfs; i++) { + ping = NGBE_PF_CONTROL_MSG; + if (adapter->vfinfo[i].clear_to_send) + ping |= NGBE_VT_MSGTYPE_CTS; + ngbe_write_mbx(hw, &ping, 1, i); + } +} + +int ngbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + if (vf >= adapter->num_vfs) + return -EINVAL; + + /* nothing to do */ + if (adapter->vfinfo[vf].trusted == setting) + return 0; + + adapter->vfinfo[vf].trusted = setting; + + /* reset VF to reconfigure features */ + adapter->vfinfo[vf].clear_to_send = false; + ngbe_ping_vf(adapter, vf); + + e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not "); + + return 0; +} + +#ifdef CONFIG_PCI_IOV +static int ngbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs) +{ + struct ngbe_adapter *adapter = pci_get_drvdata(dev); + int err = 0; + int i; + int pre_existing_vfs = pci_num_vf(dev); + + if (!(adapter->flags & NGBE_FLAG_SRIOV_CAPABLE)) { + e_dev_warn("SRIOV not supported on this device\n"); + return -EOPNOTSUPP; + } + + if (pre_existing_vfs && pre_existing_vfs != num_vfs) + err = ngbe_disable_sriov(adapter); + else if (pre_existing_vfs && pre_existing_vfs == num_vfs) + goto out; + + if (err) + goto err_out; + + /* While the SR-IOV capability structure reports total VFs to be + * 8 we limit the actual number that can be allocated to 7 so + * that some transmit/receive resources can be reserved to the + * PF. The PCI bus driver already checks for other values out of + * range. + */ + if ((num_vfs + adapter->num_vmdqs) > NGBE_MAX_VF_FUNCTIONS) { + err = -EPERM; + goto err_out; + } + + adapter->num_vfs = num_vfs; + + err = __ngbe_enable_sriov(adapter); + if (err) + goto err_out; + + for (i = 0; i < adapter->num_vfs; i++) + ngbe_vf_configuration(dev, (i | 0x10000000)); + + err = pci_enable_sriov(dev, num_vfs); + if (err) { + e_dev_warn("Failed to enable PCI sriov: %d\n", err); + goto err_out; + } + ngbe_get_vfs(adapter); + msleep(100); + ngbe_sriov_reinit(adapter); +out: + return num_vfs; +err_out: + return err; +} + +static int ngbe_pci_sriov_disable(struct pci_dev *dev) +{ + struct ngbe_adapter *adapter = pci_get_drvdata(dev); + int err; + u32 current_flags = adapter->flags; + + err = ngbe_disable_sriov(adapter); + + /* Only reinit if no error and state changed */ + if (!err && current_flags != adapter->flags) + ngbe_sriov_reinit(adapter); + + return err; +} +#endif + +int ngbe_pci_sriov_configure(struct pci_dev __maybe_unused *dev, + int __maybe_unused num_vfs) +{ +#ifdef CONFIG_PCI_IOV + if (num_vfs == 0) + return ngbe_pci_sriov_disable(dev); + else + return ngbe_pci_sriov_enable(dev, num_vfs); +#endif + return 0; +} + +int ngbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) +{ + s32 retval = 0; + struct ngbe_adapter *adapter = netdev_priv(netdev); + + if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs)) + return -EINVAL; + + dev_info(pci_dev_to_dev(adapter->pdev), + "setting MAC %pM on VF %d\n", mac, vf); + dev_info(pci_dev_to_dev(adapter->pdev), + "Reload the VF driver to make this change effective.\n"); + retval = ngbe_set_vf_mac(adapter, vf, mac); + if (retval >= 0) { + adapter->vfinfo[vf].pf_set_mac = true; + if (test_bit(__NGBE_DOWN, &adapter->state)) { + dev_warn(pci_dev_to_dev(adapter->pdev), + "The VF MAC address has been set, but the PF " + "device is not up.\n"); + dev_warn(pci_dev_to_dev(adapter->pdev), + "Bring the PF device up before attempting to " + "use the VF device.\n"); + } + } else { + dev_warn(pci_dev_to_dev(adapter->pdev), + "The VF MAC address was NOT set due to invalid or " + "duplicate MAC address.\n"); + } + + return retval; +} + +static int ngbe_enable_port_vlan(struct ngbe_adapter *adapter, + int vf, u16 vlan, u8 qos) +{ + struct ngbe_hw *hw = &adapter->hw; + int err; + + err = ngbe_set_vf_vlan(adapter, true, vlan, vf); + if (err) + goto out; + ngbe_set_vmvir(adapter, vlan, qos, vf); + ngbe_set_vmolr(hw, vf, false); + if (adapter->vfinfo[vf].spoofchk_enabled) + TCALL(hw, mac.ops.set_vlan_anti_spoofing, true, vf); + adapter->vfinfo[vf].vlan_count++; + /* enable hide vlan */ + ngbe_write_qde(adapter, vf, 1); + ngbe_write_hide_vlan(adapter, vf, 1); + adapter->vfinfo[vf].pf_vlan = vlan; + adapter->vfinfo[vf].pf_qos = qos; + dev_info(pci_dev_to_dev(adapter->pdev), + "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); + if (test_bit(__NGBE_DOWN, &adapter->state)) { + dev_warn(pci_dev_to_dev(adapter->pdev), + "The VF VLAN has been set, but the PF device is not " + "up.\n"); + dev_warn(pci_dev_to_dev(adapter->pdev), + "Bring the PF device up before attempting to use the VF " + "device.\n"); + } + +out: + return err; +} + +static int ngbe_disable_port_vlan(struct ngbe_adapter *adapter, int vf) +{ + struct ngbe_hw *hw = &adapter->hw; + int err; + + err = ngbe_set_vf_vlan(adapter, false, + adapter->vfinfo[vf].pf_vlan, vf); + ngbe_clear_vmvir(adapter, vf); + ngbe_set_vmolr(hw, vf, true); + TCALL(hw, mac.ops.set_vlan_anti_spoofing, false, vf); + if (adapter->vfinfo[vf].vlan_count) + adapter->vfinfo[vf].vlan_count--; + /* disable hide vlan */ + ngbe_write_hide_vlan(adapter, vf, 0); + adapter->vfinfo[vf].pf_vlan = 0; + adapter->vfinfo[vf].pf_qos = 0; + + return err; +} + +int ngbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, + u8 qos, __be16 vlan_proto) +{ + int err = 0; + struct ngbe_adapter *adapter = netdev_priv(netdev); + + /* VLAN IDs accepted range 0-4094 */ + if ((vf >= adapter->num_vfs) || (vlan > VLAN_VID_MASK-1) || (qos > 7)) + return -EINVAL; + + if (vlan_proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; + + if (vlan || qos) { + /* + * Check if there is already a port VLAN set, if so + * we have to delete the old one first before we + * can set the new one. The usage model had + * previously assumed the user would delete the + * old port VLAN before setting a new one but this + * is not necessarily the case. + */ + if (adapter->vfinfo[vf].pf_vlan) + err = ngbe_disable_port_vlan(adapter, vf); + if (err) + goto out; + err = ngbe_enable_port_vlan(adapter, vf, vlan, qos); + + } else { + err = ngbe_disable_port_vlan(adapter, vf); + } +out: + return err; +} + +/* no effect */ +int ngbe_ndo_set_vf_bw(struct net_device *netdev, + int vf, + int min_tx_rate, + int max_tx_rate) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + + /* verify VF is active */ + if (vf >= adapter->num_vfs) + return -EINVAL; + + /* verify link is up */ + if (!adapter->link_up) + return -EINVAL; + + /* verify we are linked at 1 or 10 Gbps */ + if (adapter->link_speed < NGBE_LINK_SPEED_1GB_FULL) + return -EINVAL; + + /* store values */ + adapter->vfinfo[vf].min_tx_rate = min_tx_rate; + adapter->vfinfo[vf].max_tx_rate = max_tx_rate; + + return 0; +} + +int ngbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + struct ngbe_hw *hw = &adapter->hw; + u32 regval; + + if (vf >= adapter->num_vfs) + return -EINVAL; + + adapter->vfinfo[vf].spoofchk_enabled = setting; + + if (vf < 32) { + regval = (setting << vf); + wr32m(hw, NGBE_TDM_MAC_AS_L, + regval | (1 << vf), regval); + + if (adapter->vfinfo[vf].vlan_count) { + wr32m(hw, NGBE_TDM_VLAN_AS_L, + regval | (1 << vf), regval); + } + } + + return 0; +} + +int ngbe_ndo_get_vf_config(struct net_device *netdev, + int vf, struct ifla_vf_info *ivi) +{ + struct ngbe_adapter *adapter = netdev_priv(netdev); + if (vf >= adapter->num_vfs) + return -EINVAL; + ivi->vf = vf; + memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN); + + ivi->max_tx_rate = adapter->vfinfo[vf].max_tx_rate; + ivi->min_tx_rate = adapter->vfinfo[vf].min_tx_rate; + + ivi->vlan = adapter->vfinfo[vf].pf_vlan; + ivi->qos = adapter->vfinfo[vf].pf_qos; + + ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; + ivi->trusted = adapter->vfinfo[vf].trusted; + + return 0; +} diff --git a/drivers/net/ethernet/netswift/ngbe/ngbe_sriov.h b/drivers/net/ethernet/netswift/ngbe/ngbe_sriov.h new file mode 100644 index 0000000000000000000000000000000000000000..958c5303f72ad939b1909c89a65f0feb337b980c --- /dev/null +++ b/drivers/net/ethernet/netswift/ngbe/ngbe_sriov.h @@ -0,0 +1,63 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + */ + + +#ifndef _NGBE_SRIOV_H_ +#define _NGBE_SRIOV_H_ + +/* ngbe driver limit the max number of VFs could be enabled to + * 7 (NGBE_MAX_VF_FUNCTIONS - 1) + */ +#define NGBE_MAX_VFS_DRV_LIMIT (NGBE_MAX_VF_FUNCTIONS - 1) + +void ngbe_restore_vf_multicasts(struct ngbe_adapter *adapter); +int ngbe_set_vf_vlan(struct ngbe_adapter *adapter, int add, int vid, u16 vf); +void ngbe_set_vmolr(struct ngbe_hw *hw, u16 vf, bool aupe); +void ngbe_msg_task(struct ngbe_adapter *adapter); +int ngbe_set_vf_mac(struct ngbe_adapter *adapter, + u16 vf, unsigned char *mac_addr); +void ngbe_disable_tx_rx(struct ngbe_adapter *adapter); +void ngbe_ping_all_vfs(struct ngbe_adapter *adapter); + +int ngbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac); + +int ngbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, + u8 qos, __be16 vlan_proto); + +int ngbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, + int max_tx_rate); + +int ngbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); +int ngbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting); +int ngbe_ndo_get_vf_config(struct net_device *netdev, + int vf, struct ifla_vf_info *ivi); + +int ngbe_disable_sriov(struct ngbe_adapter *adapter); +#ifdef CONFIG_PCI_IOV +int ngbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); +void ngbe_enable_sriov(struct ngbe_adapter *adapter); +#endif +int ngbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs); + +#define NGBE_VF_STATUS_LINKUP 0x1 + +/* + * These are defined in ngbe_type.h on behalf of the VF driver + * but we need them here unwrapped for the PF driver. + */ +//#define NGBE_DEV_ID_SP_VF 0x1000 +#endif /* _NGBE_SRIOV_H_ */ diff --git a/drivers/net/ethernet/netswift/ngbe/ngbe_sysfs.c b/drivers/net/ethernet/netswift/ngbe/ngbe_sysfs.c new file mode 100644 index 0000000000000000000000000000000000000000..559d02b2feeb2a64138d1527f3f2f627dfdb5ece --- /dev/null +++ b/drivers/net/ethernet/netswift/ngbe/ngbe_sysfs.c @@ -0,0 +1,222 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include "ngbe.h" +#include "ngbe_hw.h" +#include "ngbe_type.h" + +#ifdef CONFIG_NGBE_SYSFS + +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_NGBE_HWMON +#include +#endif + +#ifdef CONFIG_NGBE_HWMON +/* hwmon callback functions */ +static ssize_t ngbe_hwmon_show_temp(struct device __always_unused *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *ngbe_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value; + + /* reset the temp field */ + TCALL(ngbe_attr->hw, mac.ops.get_thermal_sensor_data); + + value = ngbe_attr->sensor->temp; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +static ssize_t ngbe_hwmon_show_alarmthresh(struct device __always_unused *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *ngbe_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value = ngbe_attr->sensor->alarm_thresh; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +static ssize_t ngbe_hwmon_show_dalarmthresh(struct device __always_unused *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *ngbe_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value = ngbe_attr->sensor->dalarm_thresh; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +/** + * ngbe_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file. + * @adapter: pointer to the adapter structure + * @type: type of sensor data to display + * + * For each file we want in hwmon's sysfs interface we need a device_attribute + * This is included in our hwmon_attr struct that contains the references to + * the data structures we need to get the data to display. + */ +static int ngbe_add_hwmon_attr(struct ngbe_adapter *adapter, int type) +{ + int rc; + unsigned int n_attr; + struct hwmon_attr *ngbe_attr; + + n_attr = adapter->ngbe_hwmon_buff.n_hwmon; + ngbe_attr = &adapter->ngbe_hwmon_buff.hwmon_list[n_attr]; + + switch (type) { + case NGBE_HWMON_TYPE_TEMP: + ngbe_attr->dev_attr.show = ngbe_hwmon_show_temp; + snprintf(ngbe_attr->name, sizeof(ngbe_attr->name), + "temp%u_input", 0); + break; + case NGBE_HWMON_TYPE_ALARMTHRESH: + ngbe_attr->dev_attr.show = ngbe_hwmon_show_alarmthresh; + snprintf(ngbe_attr->name, sizeof(ngbe_attr->name), + "temp%u_alarmthresh", 0); + break; + case NGBE_HWMON_TYPE_DALARMTHRESH: + ngbe_attr->dev_attr.show = ngbe_hwmon_show_dalarmthresh; + snprintf(ngbe_attr->name, sizeof(ngbe_attr->name), + "temp%u_dalarmthresh", 0); + break; + default: + rc = -EPERM; + return rc; + } + + /* These always the same regardless of type */ + ngbe_attr->sensor = + &adapter->hw.mac.thermal_sensor_data.sensor; + ngbe_attr->hw = &adapter->hw; + ngbe_attr->dev_attr.store = NULL; + ngbe_attr->dev_attr.attr.mode = S_IRUGO; + ngbe_attr->dev_attr.attr.name = ngbe_attr->name; + + rc = device_create_file(pci_dev_to_dev(adapter->pdev), + &ngbe_attr->dev_attr); + + if (rc == 0) + ++adapter->ngbe_hwmon_buff.n_hwmon; + + return rc; +} +#endif /* CONFIG_NGBE_HWMON */ + +static void ngbe_sysfs_del_adapter( + struct ngbe_adapter __maybe_unused *adapter) +{ +#ifdef CONFIG_NGBE_HWMON + int i; + + if (adapter == NULL) + return; + + for (i = 0; i < adapter->ngbe_hwmon_buff.n_hwmon; i++) { + device_remove_file(pci_dev_to_dev(adapter->pdev), + &adapter->ngbe_hwmon_buff.hwmon_list[i].dev_attr); + } + + kfree(adapter->ngbe_hwmon_buff.hwmon_list); + + if (adapter->ngbe_hwmon_buff.device) + hwmon_device_unregister(adapter->ngbe_hwmon_buff.device); +#endif /* CONFIG_NGBE_HWMON */ +} + +/* called from ngbe_main.c */ +void ngbe_sysfs_exit(struct ngbe_adapter *adapter) +{ + ngbe_sysfs_del_adapter(adapter); +} + +/* called from ngbe_main.c */ +int ngbe_sysfs_init(struct ngbe_adapter *adapter) +{ + int rc = 0; +#ifdef CONFIG_NGBE_HWMON + struct hwmon_buff *ngbe_hwmon = &adapter->ngbe_hwmon_buff; + int n_attrs; + +#endif /* CONFIG_NGBE_HWMON */ + if (adapter == NULL) + goto err; + +#ifdef CONFIG_NGBE_HWMON + + /* Don't create thermal hwmon interface if no sensors present */ + if (TCALL(&adapter->hw, mac.ops.init_thermal_sensor_thresh)) + goto no_thermal; + + /* + * Allocation space for max attributs + * max num sensors * values (temp, alamthresh, dalarmthresh) + */ + n_attrs = 3; + ngbe_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr), + GFP_KERNEL); + if (!ngbe_hwmon->hwmon_list) { + rc = -ENOMEM; + goto err; + } + + ngbe_hwmon->device = + hwmon_device_register(pci_dev_to_dev(adapter->pdev)); + if (IS_ERR(ngbe_hwmon->device)) { + rc = PTR_ERR(ngbe_hwmon->device); + goto err; + } + + /* Bail if any hwmon attr struct fails to initialize */ + rc = ngbe_add_hwmon_attr(adapter, NGBE_HWMON_TYPE_TEMP); + rc |= ngbe_add_hwmon_attr(adapter, NGBE_HWMON_TYPE_ALARMTHRESH); + rc |= ngbe_add_hwmon_attr(adapter, NGBE_HWMON_TYPE_DALARMTHRESH); + if (rc) + goto err; + +no_thermal: +#endif /* CONFIG_NGBE_HWMON */ + goto exit; + +err: + ngbe_sysfs_del_adapter(adapter); +exit: + return rc; +} +#endif /* CONFIG_NGBE_SYSFS */ diff --git a/drivers/net/ethernet/netswift/ngbe/ngbe_type.h b/drivers/net/ethernet/netswift/ngbe/ngbe_type.h new file mode 100644 index 0000000000000000000000000000000000000000..4e7f627edbbce8f04209e75c245a777152b01e6e --- /dev/null +++ b/drivers/net/ethernet/netswift/ngbe/ngbe_type.h @@ -0,0 +1,2941 @@ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#ifndef _NGBE_TYPE_H_ +#define _NGBE_TYPE_H_ + +#include +#include +#include + +/* + * The following is a brief description of the error categories used by the + * ERROR_REPORT* macros. + * + * - NGBE_ERROR_INVALID_STATE + * This category is for errors which represent a serious failure state that is + * unexpected, and could be potentially harmful to device operation. It should + * not be used for errors relating to issues that can be worked around or + * ignored. + * + * - NGBE_ERROR_POLLING + * This category is for errors related to polling/timeout issues and should be + * used in any case where the timeout occured, or a failure to obtain a lock, or + * failure to receive data within the time limit. + * + * - NGBE_ERROR_CAUTION + * This category should be used for reporting issues that may be the cause of + * other errors, such as temperature warnings. It should indicate an event which + * could be serious, but hasn't necessarily caused problems yet. + * + * - NGBE_ERROR_SOFTWARE + * This category is intended for errors due to software state preventing + * something. The category is not intended for errors due to bad arguments, or + * due to unsupported features. It should be used when a state occurs which + * prevents action but is not a serious issue. + * + * - NGBE_ERROR_ARGUMENT + * This category is for when a bad or invalid argument is passed. It should be + * used whenever a function is called and error checking has detected the + * argument is wrong or incorrect. + * + * - NGBE_ERROR_UNSUPPORTED + * This category is for errors which are due to unsupported circumstances or + * configuration issues. It should not be used when the issue is due to an + * invalid argument, but for when something has occurred that is unsupported + * (Ex: Flow control autonegotiation or an unsupported SFP+ module.) + */ + +/* Little Endian defines */ +#ifndef __le16 +#define __le16 u16 +#endif +#ifndef __le32 +#define __le32 u32 +#endif +#ifndef __le64 +#define __le64 u64 + +#endif +#ifndef __be16 +/* Big Endian defines */ +#define __be16 u16 +#define __be32 u32 +#define __be64 u64 + +#endif + +/************ ngbe_register.h ************/ +/* Vendor ID */ +#ifndef PCI_VENDOR_ID_TRUSTNETIC +#define PCI_VENDOR_ID_TRUSTNETIC 0x8088 +#endif + +/* Device IDs */ +/* copper */ +#define NGBE_DEV_ID_EM_TEST 0x0000 +#define NGBE_DEV_ID_EM_WX1860AL_W 0x0100 +#define NGBE_DEV_ID_EM_WX1860A2 0x0101 +#define NGBE_DEV_ID_EM_WX1860A2S 0x0102 +#define NGBE_DEV_ID_EM_WX1860A4 0x0103 +#define NGBE_DEV_ID_EM_WX1860A4S 0x0104 +#define NGBE_DEV_ID_EM_WX1860AL2 0x0105 +#define NGBE_DEV_ID_EM_WX1860AL2S 0x0106 +#define NGBE_DEV_ID_EM_WX1860AL4 0x0107 +#define NGBE_DEV_ID_EM_WX1860AL4S 0x0108 +#define NGBE_DEV_ID_EM_WX1860NCSI 0x0109 +#define NGBE_DEV_ID_EM_WX1860A1 0x010a +#define NGBE_DEV_ID_EM_WX1860AL1 0x010b + +/* transfer units */ +#define NGBE_KB_TO_B 1024 + +/* Subsystem ID */ +#define NGBE_WX1860AL_INTERNAL 0x0410 +#define NGBE_WX1860AL_ZTE5201_RJ45 0x0100 +#define NGBE_WX1860AL_M88E1512_RJ45 0x0200 +#define NGBE_WX1860AL_M88E1512_SFP 0x0403 +#define NGBE_WX1860AL_YT8521S_SFP 0x0460 + +#define NGBE_SUBSYSTEM_ID_EM_SF100F_LP 0x0103 +#define NGBE_SUBSYSTEM_ID_EM_SF100HF_LP 0x0103 +#define NGBE_SUBSYSTEM_ID_EM_SF200T 0x0201 +#define NGBE_SUBSYSTEM_ID_EM_SF200T_S 0x0210 +#define NGBE_SUBSYSTEM_ID_EM_SF400T 0x0401 +#define NGBE_SUBSYSTEM_ID_EM_SF400T_S 0x0410 +#define NGBE_SUBSYSTEM_ID_EM_SF200HT 0x0202 +#define NGBE_SUBSYSTEM_ID_EM_SF200HT_S 0x0220 +#define NGBE_SUBSYSTEM_ID_EM_SF400HT 0x0402 +#define NGBE_SUBSYSTEM_ID_EM_SF400HT_S 0x0420 +#define NGBE_SUBSYSTEM_ID_EM_SF200HXT 0x0230 +#define NGBE_SUBSYSTEM_ID_EM_SF400HXT 0x0430 +#define NGBE_SUBSYSTEM_ID_EM_SF400_OCP 0x0440 +#define NGBE_SUBSYSTEM_ID_EM_SF400_LY 0x0450 +#define NGBE_SUBSYSTEM_ID_EM_SF400_LY_YT 0x0470 + +#define INTERNAL_SFP 0x0003 +#define OCP_CARD 0x0040 +#define LY_M88E1512_SFP 0x0050 +#define YT8521S_SFP 0x0060 +#define LY_YT8521S_SFP 0x0070 + +#define OEM_MASK 0x00F0 +#define INTERNAL_SFP_MASK 0x000F + +#define NCSI_SUP 0x8000 +#define NCSI_SUP_MASK 0x8000 + +#define WOL_SUP 0x4000 +#define WOL_SUP_MASK 0x4000 + +/* MDIO Manageable Devices (MMDs). */ +#define NGBE_MDIO_PMA_PMD_DEV_TYPE 0x1 /* PMA and PMD */ +#define NGBE_MDIO_PCS_DEV_TYPE 0x3 /* Physical Coding Sublayer*/ +#define NGBE_MDIO_PHY_XS_DEV_TYPE 0x4 /* PHY Extender Sublayer */ +#define NGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7 /* Auto-Negotiation */ +#define NGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Vendor specific 1 */ + +/* phy register definitions */ +/* VENDOR_SPECIFIC_1_DEV regs */ +#define NGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */ +#define NGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */ +#define NGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0-10G, 1-1G */ + +/* AUTO_NEG_DEV regs */ +#define NGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */ +#define NGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */ +#define NGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Reg */ +#define NGBE_MDIO_AUTO_NEG_LP_STATUS 0xE820 /* AUTO NEG RX LP Status + * Reg */ +#define NGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */ +#define NGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */ +#define NGBE_MII_AUTONEG_ADVERTISE_REG 0x10 /* 100M Advertisement */ + +#define NGBE_MDIO_AUTO_NEG_1000BASE_EEE_ADVT 0x4 +#define NGBE_MDIO_AUTO_NEG_100BASE_EEE_ADVT 0x2 +#define NGBE_MDIO_AUTO_NEG_LP_1000BASE_CAP 0x8000 + +#define NGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/ +#define NGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/ +#define NGBE_MII_100BASE_T_ADVERTISE 0x0100 /* full duplex, bit:8 */ +#define NGBE_MII_100BASE_T_ADVERTISE_HALF 0x0080 /* half duplex, bit:7 */ +#define NGBE_MII_RESTART 0x200 +#define NGBE_MII_AUTONEG_COMPLETE 0x20 +#define NGBE_MII_AUTONEG_LINK_UP 0x04 +#define NGBE_MII_AUTONEG_REG 0x0 + +/* PHY_XS_DEV regs */ +#define NGBE_MDIO_PHY_XS_CONTROL 0x0 /* PHY_XS Control Reg */ +#define NGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */ + +/* Media-dependent registers. */ +#define NGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/ +#define NGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/ +#define NGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Ability Reg */ +#define NGBE_MDIO_PHY_EXT_ABILITY 0xB /* Ext Ability Reg */ + +#define NGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */ +#define NGBE_MDIO_PHY_SPEED_100M 0x0020 /* 100M capable */ +#define NGBE_MDIO_PHY_SPEED_10M 0x0040 /* 10M capable */ + +#define NGBE_MDIO_PHY_1000BASET_ABILITY 0x0020 /* 1000BaseT capable */ +#define NGBE_MDIO_PHY_100BASETX_ABILITY 0x0080 /* 100BaseTX capable */ + +#define NGBE_PHY_REVISION_MASK 0xFFFFFFF0U +#define NGBE_MAX_PHY_ADDR 32 + +#define NGBE_MDIO_CLAUSE_SELECT 0x11220 + +/* INTERNAL PHY CONTROL */ +#define NGBE_INTERNAL_PHY_PAGE_SELECT_OFFSET 31 +#define NGBE_INTERNAL_PHY_OFFSET_MAX 32 +#define NGBE_INTERNAL_PHY_ID 0x000732 + +#define NGBE_INTPHY_LED0 0x0010 +#define NGBE_INTPHY_LED1 0x0040 +#define NGBE_INTPHY_LED2 0x2000 + +#define NGBE_INTPHY_INT_LSC 0x0010 +#define NGBE_INTPHY_INT_ANC 0x0008 + +/* PHY MDI STANDARD CONFIG */ +#define NGBE_MDI_PHY_ID1_OFFSET 2 +#define NGBE_MDI_PHY_ID2_OFFSET 3 +#define NGBE_MDI_PHY_ID_MASK 0xFFFFFC00U +#define NGBE_MDI_PHY_SPEED_SELECT1 0x0040 +#define NGBE_MDI_PHY_DUPLEX 0x0100 +#define NGBE_MDI_PHY_RESTART_AN 0x0200 +#define NGBE_MDI_PHY_ANE 0x1000 +#define NGBE_MDI_PHY_SPEED_SELECT0 0x2000 +#define NGBE_MDI_PHY_RESET 0x8000 + +#define NGBE_PHY_RST_WAIT_PERIOD 5 + +#define NGBE_M88E1512_PHY_ID 0x005043 +/* reg 18_0 */ +#define NGBE_M88E1512_INT_LSC 0x0400 +#define NGBE_M88E1512_INT_ANC 0x0800 +/* reg 18_3 */ +#define NGBE_M88E1512_INT_EN 0x0080 +#define NGBE_M88E1512_INT_POL 0x0800 + +/* reg 21_2 */ +#define NGBE_M88E1512_RGM_TTC 0x0010 +#define NGBE_M88E1512_RGM_RTC 0x0020 + +/* LED control */ +#define NGBE_M88E1512_LED1_CONF 0x6 +#define NGBE_M88E1512_LED0_CONF 0x1 + +/* LED polarity */ +#define NGBE_M88E1512_LED1_POL 0x1 +#define NGBE_M88E1512_LED0_POL 0x1 + +/* reg 4_0 ADV REG*/ +#define NGBE_M88E1512_10BASET_HALF 0x0020 +#define NGBE_M88E1512_10BASET_FULL 0x0040 +#define NGBE_M88E1512_100BASET_HALF 0x0080 +#define NGBE_M88E1512_100BASET_FULL 0x0100 + +/* reg 9_0 ADV REG*/ +#define NGBE_M88E1512_1000BASET_HALF 0x0100 +#define NGBE_M88E1512_1000BASET_FULL 0x0200 + +/* reg 19_0 INT status*/ +#define NGBE_M88E1512_ANC 0x0800 +#define NGBE_M88E1512_LSC 0x0400 + +/* yt8521s reg */ +#define NGBE_YT8521S_PHY_ID 0x011a + +#define NGBE_YT8521S_SDS_LINK_UP 0x4 +#define NGBE_YT8521S_SDS_LINK_DOWN 0x8 + +/* PHY IDs*/ +#define TN1010_PHY_ID 0x00A19410U +#define QT2022_PHY_ID 0x0043A400U +#define ATH_PHY_ID 0x03429050U +/* PHY FW revision */ +#define TNX_FW_REV 0xB +#define AQ_FW_REV 0x20 + +/* ETH PHY Registers */ +#define NGBE_SR_XS_PCS_MMD_STATUS1 0x30001 +#define NGBE_SR_PCS_CTL2 0x30007 +#define NGBE_SR_PMA_MMD_CTL1 0x10000 +#define NGBE_SR_MII_MMD_CTL 0x1F0000 +#define NGBE_SR_MII_MMD_DIGI_CTL 0x1F8000 +#define NGBE_SR_MII_MMD_AN_CTL 0x1F8001 +#define NGBE_SR_MII_MMD_AN_ADV 0x1F0004 +#define NGBE_SR_MII_MMD_AN_ADV_PAUSE(_v) ((0x3 & (_v)) << 7) +#define NGBE_SR_MII_MMD_LP_BABL 0x1F0005 +#define NGBE_SR_AN_MMD_CTL 0x70000 +#define NGBE_SR_AN_MMD_ADV_REG1 0x70010 +#define NGBE_SR_AN_MMD_ADV_REG1_PAUSE(_v) ((0x3 & (_v)) << 10) +#define NGBE_SR_AN_MMD_ADV_REG1_PAUSE_SYM 0x400 +#define NGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM 0x800 +#define NGBE_SR_AN_MMD_ADV_REG2 0x70011 +#define NGBE_SR_AN_MMD_LP_ABL1 0x70013 +#define NGBE_VR_AN_KR_MODE_CL 0x78003 +#define NGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1 0x38000 +#define NGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS 0x38010 + +#define NGBE_PHY_MPLLA_CTL0 0x18071 +#define NGBE_PHY_MPLLA_CTL3 0x18077 +#define NGBE_PHY_MISC_CTL0 0x18090 +#define NGBE_PHY_VCO_CAL_LD0 0x18092 +#define NGBE_PHY_VCO_CAL_LD1 0x18093 +#define NGBE_PHY_VCO_CAL_LD2 0x18094 +#define NGBE_PHY_VCO_CAL_LD3 0x18095 +#define NGBE_PHY_VCO_CAL_REF0 0x18096 +#define NGBE_PHY_VCO_CAL_REF1 0x18097 +#define NGBE_PHY_RX_AD_ACK 0x18098 +#define NGBE_PHY_AFE_DFE_ENABLE 0x1805D +#define NGBE_PHY_DFE_TAP_CTL0 0x1805E +#define NGBE_PHY_RX_EQ_ATT_LVL0 0x18057 +#define NGBE_PHY_RX_EQ_CTL0 0x18058 +#define NGBE_PHY_RX_EQ_CTL 0x1805C +#define NGBE_PHY_TX_EQ_CTL0 0x18036 +#define NGBE_PHY_TX_EQ_CTL1 0x18037 +#define NGBE_PHY_TX_RATE_CTL 0x18034 +#define NGBE_PHY_RX_RATE_CTL 0x18054 +#define NGBE_PHY_TX_GEN_CTL2 0x18032 +#define NGBE_PHY_RX_GEN_CTL2 0x18052 +#define NGBE_PHY_RX_GEN_CTL3 0x18053 +#define NGBE_PHY_MPLLA_CTL2 0x18073 +#define NGBE_PHY_RX_POWER_ST_CTL 0x18055 +#define NGBE_PHY_TX_POWER_ST_CTL 0x18035 +#define NGBE_PHY_TX_GENCTRL1 0x18031 + +#define NGBE_SR_PCS_CTL2_PCS_TYPE_SEL_R 0x0 +#define NGBE_SR_PCS_CTL2_PCS_TYPE_SEL_X 0x1 +#define NGBE_SR_PCS_CTL2_PCS_TYPE_SEL_MASK 0x3 +#define NGBE_SR_PMA_MMD_CTL1_SPEED_SEL_1G 0x0 +#define NGBE_SR_PMA_MMD_CTL1_SPEED_SEL_MASK 0x2000 +#define NGBE_SR_PMA_MMD_CTL1_LB_EN 0x1 +#define NGBE_SR_MII_MMD_CTL_AN_EN 0x1000 +#define NGBE_SR_MII_MMD_CTL_RESTART_AN 0x0200 +#define NGBE_SR_AN_MMD_CTL_RESTART_AN 0x0200 +#define NGBE_SR_AN_MMD_CTL_ENABLE 0x1000 +#define NGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_KX4 0x40 +#define NGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_KX 0x20 +#define NGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_KR 0x80 +#define NGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_MASK 0xFFFF +#define NGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1_ENABLE 0x1000 +#define NGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1_VR_RST 0x8000 +#define NGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_MASK 0x1C +#define NGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_POWER_GOOD 0x10 + +#define NGBE_PHY_MPLLA_CTL0_MULTIPLIER_1GBASEX_KX 32 +#define NGBE_PHY_MPLLA_CTL0_MULTIPLIER_OTHER 40 +#define NGBE_PHY_MPLLA_CTL0_MULTIPLIER_MASK 0xFF +#define NGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_1GBASEX_KX 0x46 +#define NGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_OTHER 0x56 +#define NGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_MASK 0x7FF +#define NGBE_PHY_MISC_CTL0_TX2RX_LB_EN_0 0x1 +#define NGBE_PHY_MISC_CTL0_TX2RX_LB_EN_3_1 0xE +#define NGBE_PHY_MISC_CTL0_RX_VREF_CTRL 0x1F00 +#define NGBE_PHY_VCO_CAL_LD0_1GBASEX_KX 1344 +#define NGBE_PHY_VCO_CAL_LD0_OTHER 1360 +#define NGBE_PHY_VCO_CAL_LD0_MASK 0x1000 +#define NGBE_PHY_VCO_CAL_REF0_LD0_1GBASEX_KX 42 +#define NGBE_PHY_VCO_CAL_REF0_LD0_OTHER 34 +#define NGBE_PHY_VCO_CAL_REF0_LD0_MASK 0x3F +#define NGBE_PHY_AFE_DFE_ENABLE_DFE_EN0 0x10 +#define NGBE_PHY_AFE_DFE_ENABLE_AFE_EN0 0x1 +#define NGBE_PHY_AFE_DFE_ENABLE_MASK 0xFF +#define NGBE_PHY_RX_EQ_CTL_CONT_ADAPT0 0x1 +#define NGBE_PHY_RX_EQ_CTL_CONT_ADAPT_MASK 0xF +#define NGBE_PHY_TX_RATE_CTL_TX0_RATE_RXAUI 0x1 +#define NGBE_PHY_TX_RATE_CTL_TX0_RATE_1GBASEX_KX 0x3 +#define NGBE_PHY_TX_RATE_CTL_TX0_RATE_OTHER 0x2 +#define NGBE_PHY_TX_RATE_CTL_TX1_RATE_OTHER 0x20 +#define NGBE_PHY_TX_RATE_CTL_TX2_RATE_OTHER 0x200 +#define NGBE_PHY_TX_RATE_CTL_TX3_RATE_OTHER 0x2000 +#define NGBE_PHY_TX_RATE_CTL_TX0_RATE_MASK 0x7 +#define NGBE_PHY_TX_RATE_CTL_TX1_RATE_MASK 0x70 +#define NGBE_PHY_TX_RATE_CTL_TX2_RATE_MASK 0x700 +#define NGBE_PHY_TX_RATE_CTL_TX3_RATE_MASK 0x7000 +#define NGBE_PHY_RX_RATE_CTL_RX0_RATE_RXAUI 0x1 +#define NGBE_PHY_RX_RATE_CTL_RX0_RATE_1GBASEX_KX 0x3 +#define NGBE_PHY_RX_RATE_CTL_RX0_RATE_OTHER 0x2 +#define NGBE_PHY_RX_RATE_CTL_RX1_RATE_OTHER 0x20 +#define NGBE_PHY_RX_RATE_CTL_RX2_RATE_OTHER 0x200 +#define NGBE_PHY_RX_RATE_CTL_RX3_RATE_OTHER 0x2000 +#define NGBE_PHY_RX_RATE_CTL_RX0_RATE_MASK 0x7 +#define NGBE_PHY_RX_RATE_CTL_RX1_RATE_MASK 0x70 +#define NGBE_PHY_RX_RATE_CTL_RX2_RATE_MASK 0x700 +#define NGBE_PHY_RX_RATE_CTL_RX3_RATE_MASK 0x7000 +#define NGBE_PHY_TX_GEN_CTL2_TX0_WIDTH_OTHER 0x100 +#define NGBE_PHY_TX_GEN_CTL2_TX0_WIDTH_MASK 0x300 +#define NGBE_PHY_TX_GEN_CTL2_TX1_WIDTH_OTHER 0x400 +#define NGBE_PHY_TX_GEN_CTL2_TX1_WIDTH_MASK 0xC00 +#define NGBE_PHY_TX_GEN_CTL2_TX2_WIDTH_OTHER 0x1000 +#define NGBE_PHY_TX_GEN_CTL2_TX2_WIDTH_MASK 0x3000 +#define NGBE_PHY_TX_GEN_CTL2_TX3_WIDTH_OTHER 0x4000 +#define NGBE_PHY_TX_GEN_CTL2_TX3_WIDTH_MASK 0xC000 +#define NGBE_PHY_RX_GEN_CTL2_RX0_WIDTH_OTHER 0x100 +#define NGBE_PHY_RX_GEN_CTL2_RX0_WIDTH_MASK 0x300 +#define NGBE_PHY_RX_GEN_CTL2_RX1_WIDTH_OTHER 0x400 +#define NGBE_PHY_RX_GEN_CTL2_RX1_WIDTH_MASK 0xC00 +#define NGBE_PHY_RX_GEN_CTL2_RX2_WIDTH_OTHER 0x1000 +#define NGBE_PHY_RX_GEN_CTL2_RX2_WIDTH_MASK 0x3000 +#define NGBE_PHY_RX_GEN_CTL2_RX3_WIDTH_OTHER 0x4000 +#define NGBE_PHY_RX_GEN_CTL2_RX3_WIDTH_MASK 0xC000 + +#define NGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_8 0x100 +#define NGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_10 0x200 +#define NGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_16P5 0x400 +#define NGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_MASK 0x700 + +#define NGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME 100 +#define NGBE_PHY_INIT_DONE_POLLING_TIME 100 + +/**************** Global Registers ****************************/ +/* chip control Registers */ +#define NGBE_MIS_RST 0x1000C +#define NGBE_MIS_PWR 0x10000 +#define NGBE_MIS_CTL 0x10004 +#define NGBE_MIS_PF_SM 0x10008 +#define NGBE_MIS_ST 0x10028 +#define NGBE_MIS_SWSM 0x1002C +#define NGBE_MIS_RST_ST 0x10030 + +#define NGBE_MIS_RST_SW_RST 0x00000001U +#define NGBE_MIS_RST_LAN0_RST 0x00000002U +#define NGBE_MIS_RST_LAN1_RST 0x00000004U +#define NGBE_MIS_RST_LAN2_RST 0x00000008U +#define NGBE_MIS_RST_LAN3_RST 0x00000010U +#define NGBE_MIS_RST_FW_RST 0x00000020U + +#define NGBE_MIS_RST_LAN0_CHG_ETH_MODE 0x20000000U +#define NGBE_MIS_RST_LAN1_CHG_ETH_MODE 0x40000000U +#define NGBE_MIS_RST_GLOBAL_RST 0x80000000U + +#define NGBE_MIS_PWR_LAN_ID(_r) ((0xF0000000U & (_r)) >> 28) +#define NGBE_MIS_PWR_LAN_ID_0 (1) +#define NGBE_MIS_PWR_LAN_ID_1 (2) +#define NGBE_MIS_PWR_LAN_ID_2 (3) +#define NGBE_MIS_PWR_LAN_ID_3 (4) + +#define NGBE_MIS_ST_MNG_INIT_DN 0x00000001U +#define NGBE_MIS_ST_MNG_VETO 0x00000100U +#define NGBE_MIS_ST_LAN0_ECC 0x00010000U +#define NGBE_MIS_ST_LAN1_ECC 0x00020000U +#define NGBE_MIS_ST_LAN2_ECC 0x00040000U +#define NGBE_MIS_ST_LAN3_ECC 0x00080000U +#define NGBE_MIS_ST_MNG_ECC 0x00100000U +#define NGBE_MIS_ST_PCORE_ECC 0x00200000U +#define NGBE_MIS_ST_PCIWRP_ECC 0x00400000U +#define NGBE_MIS_ST_PCIEPHY_ECC 0x00800000U +#define NGBE_MIS_ST_FMGR_ECC 0x01000000U +#define NGBE_MIS_ST_GPHY_IN_RST(_r) (0x00000200U << (_r)) + +#define NGBE_MIS_SWSM_SMBI 1 +#define NGBE_MIS_RST_ST_DEV_RST_ST_DONE 0x00000000U +#define NGBE_MIS_RST_ST_DEV_RST_ST_REQ 0x00080000U +#define NGBE_MIS_RST_ST_DEV_RST_ST_INPROGRESS 0x00100000U +#define NGBE_MIS_RST_ST_DEV_RST_ST_MASK 0x00180000U +#define NGBE_MIS_RST_ST_DEV_RST_TYPE_MASK 0x00070000U +#define NGBE_MIS_RST_ST_DEV_RST_TYPE_SHIFT 16 +#define NGBE_MIS_RST_ST_DEV_RST_TYPE_SW_RST 0x3 +#define NGBE_MIS_RST_ST_DEV_RST_TYPE_GLOBAL_RST 0x5 +#define NGBE_MIS_RST_ST_RST_INIT 0x0000FF00U +#define NGBE_MIS_RST_ST_RST_INI_SHIFT 8 +#define NGBE_MIS_RST_ST_RST_TIM 0x000000FFU +#define NGBE_MIS_PF_SM_SM 1 + +/* Sensors for PVT(Process Voltage Temperature) */ +#define NGBE_TS_CTL 0x10300 +#define NGBE_TS_EN 0x10304 +#define NGBE_TS_ST 0x10308 +#define NGBE_TS_ALARM_THRE 0x1030C +#define NGBE_TS_DALARM_THRE 0x10310 +#define NGBE_TS_INT_EN 0x10314 +#define NGBE_TS_ALARM_ST 0x10318 +#define NGBE_TS_ALARM_ST_DALARM 0x00000002U +#define NGBE_TS_ALARM_ST_ALARM 0x00000001U + +#define NGBE_EFUSE_WDATA0 0x10320 +#define NGBE_EFUSE_WDATA1 0x10324 +#define NGBE_EFUSE_RDATA0 0x10328 +#define NGBE_EFUSE_RDATA1 0x1032C +#define NGBE_EFUSE_STATUS 0x10330 + +#define NGBE_TS_CTL_CALI_DONE 0x80000000U +#define NGBE_TS_EN_ENA 0x00000001U +#define NGBE_TS_ST_DATA_OUT_MASK 0x000003FFU +#define NGBE_TS_ALARM_THRE_MASK 0x000003FFU +#define NGBE_TS_DALARM_THRE_MASK 0x000003FFU +#define NGBE_TS_INT_EN_DALARM_INT_EN 0x00000002U +#define NGBE_TS_INT_EN_ALARM_INT_EN 0x00000001U + +struct ngbe_thermal_diode_data { + s16 temp; + s16 alarm_thresh; + s16 dalarm_thresh; +}; + +struct ngbe_thermal_sensor_data { + struct ngbe_thermal_diode_data sensor; +}; + +/* FMGR Registers */ +#define NGBE_SPI_ILDR_STATUS 0x10120 +#define NGBE_SPI_ILDR_STATUS_PERST 0x00000001U /* PCIE_PERST is done */ +#define NGBE_SPI_ILDR_STATUS_PWRRST 0x00000002U /* Power on reset done */ +#define NGBE_SPI_ILDR_STATUS_SW_RESET 0x00000800U /* software reset done */ +#define NGBE_SPI_ILDR_STATUS_LAN0_SW_RST 0x00002000U /* lan0 soft reset done */ +#define NGBE_SPI_ILDR_STATUS_LAN1_SW_RST 0x00004000U /* lan1 soft reset done */ +#define NGBE_SPI_ILDR_STATUS_LAN2_SW_RST 0x00008000U /* lan2 soft reset done */ +#define NGBE_SPI_ILDR_STATUS_LAN3_SW_RST 0x00010000U /* lan3 soft reset done */ + +#define NGBE_MAX_FLASH_LOAD_POLL_TIME 10 + +#define NGBE_SPI_CMD 0x10104 +#define NGBE_SPI_CMD_CMD(_v) (((_v) & 0x7) << 28) +#define NGBE_SPI_CMD_CLK(_v) (((_v) & 0x7) << 25) +#define NGBE_SPI_CMD_ADDR(_v) (((_v) & 0x7FFFFF)) + +#define NGBE_SPI_DATA 0x10108 +#define NGBE_SPI_DATA_BYPASS ((0x1) << 31) +#define NGBE_SPI_DATA_STATUS(_v) (((_v) & 0xFF) << 16) +#define NGBE_SPI_DATA_OP_DONE ((0x1)) + +#define NGBE_SPI_STATUS 0x1010C +#define NGBE_SPI_STATUS_OPDONE ((0x1)) +#define NGBE_SPI_STATUS_FLASH_BYPASS ((0x1) << 31) + +#define NGBE_SPI_USR_CMD 0x10110 +#define NGBE_SPI_CMDCFG0 0x10114 +#define NGBE_SPI_CMDCFG1 0x10118 +#define NGBE_SPI_ILDR_SWPTR 0x10124 + +/************************* Port Registers ************************************/ + +/* port cfg Registers */ +#define NGBE_CFG_PORT_CTL 0x14400 +#define NGBE_CFG_PORT_ST 0x14404 +#define NGBE_CFG_EX_VTYPE 0x14408 +#define NGBE_CFG_LED_CTL 0x14424 + +/* internal phy reg_offset [0,31] */ +#define NGBE_PHY_CONFIG(reg_offset) (0x14000 + ((reg_offset) * 4)) + +#define NGBE_CFG_TCP_TIME 0x14420 +#define NGBE_CFG_TAG_TPID(_i) (0x14430 + ((_i) * 4)) /* [0,3] */ +#define NGBE_CFG_LAN_SPEED 0x14440 + +/* port cfg bit */ +#define NGBE_CFG_PORT_CTL_PFRSTD 0x00004000U /* Phy Function Reset Done */ +#define NGBE_CFG_PORT_CTL_D_VLAN 0x00000001U /* double vlan*/ +#define NGBE_CFG_PORT_CTL_ETAG_ETYPE_VLD 0x00000002U +#define NGBE_CFG_PORT_CTL_QINQ 0x00000004U +#define NGBE_CFG_PORT_CTL_DRV_LOAD 0x00000008U +#define NGBE_CFG_PORT_CTL_NUM_VT_MASK 0x00001000U /* number of TVs */ +#define NGBE_CFG_PORT_CTL_NUM_VT_NONE 0x00000000U +#define NGBE_CFG_PORT_CTL_NUM_VT_8 0x00001000U +/* Status Bit */ +#define NGBE_CFG_PORT_ST_LINK_1000M 0x00000002U +#define NGBE_CFG_PORT_ST_LINK_100M 0x00000004U +#define NGBE_CFG_PORT_ST_LINK_10M 0x00000008U +#define NGBE_CFG_PORT_ST_LAN_ID(_r) ((0x00000300U & (_r)) >> 8) +#define NGBE_LINK_UP_TIME 90 + +/* LED CTL Bit */ + +#define NGBE_CFG_LED_CTL_LINK_10M_SEL 0x00000008U +#define NGBE_CFG_LED_CTL_LINK_100M_SEL 0x00000004U +#define NGBE_CFG_LED_CTL_LINK_1G_SEL 0x00000002U +#define NGBE_CFG_LED_CTL_LINK_OD_SHIFT 16 +/* LED modes */ +#define NGBE_LED_LINK_10M NGBE_CFG_LED_CTL_LINK_10M_SEL +#define NGBE_LED_LINK_1G NGBE_CFG_LED_CTL_LINK_1G_SEL +#define NGBE_LED_LINK_100M NGBE_CFG_LED_CTL_LINK_100M_SEL + +/* GPIO Registers */ +#define NGBE_GPIO_DR 0x14800 +#define NGBE_GPIO_DDR 0x14804 +#define NGBE_GPIO_CTL 0x14808 +#define NGBE_GPIO_INTEN 0x14830 +#define NGBE_GPIO_INTMASK 0x14834 +#define NGBE_GPIO_INTTYPE_LEVEL 0x14838 +#define NGBE_GPIO_POLARITY 0x1483C +#define NGBE_GPIO_INTSTATUS 0x14840 +#define NGBE_GPIO_EOI 0x1484C +/*GPIO bit */ +#define NGBE_GPIO_DR_0 0x00000001U /* SDP0 Data Value */ +#define NGBE_GPIO_DR_1 0x00000002U /* SDP1 Data Value */ +#define NGBE_GPIO_DDR_0 0x00000001U /* SDP0 IO direction */ +#define NGBE_GPIO_DDR_1 0x00000002U /* SDP1 IO direction */ +#define NGBE_GPIO_CTL_SW_MODE 0x00000000U /* SDP software mode */ +#define NGBE_GPIO_INTEN_1 0x00000002U /* SDP1 interrupt enable */ +#define NGBE_GPIO_INTEN_2 0x00000004U /* SDP2 interrupt enable */ +#define NGBE_GPIO_INTEN_3 0x00000008U /* SDP3 interrupt enable */ +#define NGBE_GPIO_INTEN_5 0x00000020U /* SDP5 interrupt enable */ +#define NGBE_GPIO_INTEN_6 0x00000040U /* SDP6 interrupt enable */ +#define NGBE_GPIO_INTTYPE_LEVEL_2 0x00000004U /* SDP2 interrupt type level */ +#define NGBE_GPIO_INTTYPE_LEVEL_3 0x00000008U /* SDP3 interrupt type level */ +#define NGBE_GPIO_INTTYPE_LEVEL_5 0x00000020U /* SDP5 interrupt type level */ +#define NGBE_GPIO_INTTYPE_LEVEL_6 0x00000040U /* SDP6 interrupt type level */ +#define NGBE_GPIO_INTSTATUS_1 0x00000002U /* SDP1 interrupt status */ +#define NGBE_GPIO_INTSTATUS_2 0x00000004U /* SDP2 interrupt status */ +#define NGBE_GPIO_INTSTATUS_3 0x00000008U /* SDP3 interrupt status */ +#define NGBE_GPIO_INTSTATUS_5 0x00000020U /* SDP5 interrupt status */ +#define NGBE_GPIO_INTSTATUS_6 0x00000040U /* SDP6 interrupt status */ +#define NGBE_GPIO_EOI_2 0x00000004U /* SDP2 interrupt clear */ +#define NGBE_GPIO_EOI_3 0x00000008U /* SDP3 interrupt clear */ +#define NGBE_GPIO_EOI_5 0x00000020U /* SDP5 interrupt clear */ +#define NGBE_GPIO_EOI_6 0x00000040U /* SDP6 interrupt clear */ + +/* TPH registers */ +#define NGBE_CFG_TPH_TDESC 0x14F00 /* TPH conf for Tx desc write back */ +#define NGBE_CFG_TPH_RDESC 0x14F04 /* TPH conf for Rx desc write back */ +#define NGBE_CFG_TPH_RHDR 0x14F08 /* TPH conf for writing Rx pkt header */ +#define NGBE_CFG_TPH_RPL 0x14F0C /* TPH conf for payload write access */ +/* TPH bit */ +#define NGBE_CFG_TPH_TDESC_EN 0x80000000U +#define NGBE_CFG_TPH_TDESC_PH_SHIFT 29 +#define NGBE_CFG_TPH_TDESC_ST_SHIFT 16 +#define NGBE_CFG_TPH_RDESC_EN 0x80000000U +#define NGBE_CFG_TPH_RDESC_PH_SHIFT 29 +#define NGBE_CFG_TPH_RDESC_ST_SHIFT 16 +#define NGBE_CFG_TPH_RHDR_EN 0x00008000U +#define NGBE_CFG_TPH_RHDR_PH_SHIFT 13 +#define NGBE_CFG_TPH_RHDR_ST_SHIFT 0 +#define NGBE_CFG_TPH_RPL_EN 0x80000000U +#define NGBE_CFG_TPH_RPL_PH_SHIFT 29 +#define NGBE_CFG_TPH_RPL_ST_SHIFT 16 + +/*********************** Transmit DMA registers **************************/ +/* transmit global control */ +#define NGBE_TDM_CTL 0x18000 +#define NGBE_TDM_POOL_TE 0x18004 +#define NGBE_TDM_PB_THRE 0x18020 + +#define NGBE_TDM_LLQ 0x18040 +#define NGBE_TDM_ETYPE_LB_L 0x18050 + +#define NGBE_TDM_ETYPE_AS_L 0x18058 +#define NGBE_TDM_MAC_AS_L 0x18060 + +#define NGBE_TDM_VLAN_AS_L 0x18070 + +#define NGBE_TDM_TCP_FLG_L 0x18078 +#define NGBE_TDM_TCP_FLG_H 0x1807C +#define NGBE_TDM_VLAN_INS(_i) (0x18100 + ((_i) * 4)) /* 8 of these 0 - 7 */ +/* TDM CTL BIT */ +#define NGBE_TDM_CTL_TE 0x1 /* Transmit Enable */ +#define NGBE_TDM_CTL_PADDING 0x2 /* Padding byte number for ipsec ESP */ +#define NGBE_TDM_CTL_VT_SHIFT 16 /* VLAN EtherType */ +/* Per VF Port VLAN insertion rules */ +#define NGBE_TDM_VLAN_INS_VLANA_DEFAULT 0x40000000U /*Always use default VLAN*/ +#define NGBE_TDM_VLAN_INS_VLANA_NEVER 0x80000000U /* Never insert VLAN tag */ + +#define NGBE_TDM_RP_CTL_RST ((0x1) << 0) +#define NGBE_TDM_RP_CTL_RPEN ((0x1) << 2) +#define NGBE_TDM_RP_CTL_RLEN ((0x1) << 3) +#define NGBE_TDM_RP_RATE_MIN(v) ((0x3FFF & (v))) +#define NGBE_TDM_RP_RATE_MAX(v) ((0x3FFF & (v)) << 16) + +/* qos */ +#define NGBE_TDM_PBWARB_CTL 0x18200 +#define NGBE_TDM_VM_CREDIT_VAL(v) (0x3FF & (v)) + +/* etag */ +#define NGBE_TDM_ETAG_INS(_i) (0x18700 + ((_i) * 4)) /* 8 of these 0 - 7 */ +/* statistic */ +#define NGBE_TDM_DRP_CNT 0x18300 +#define NGBE_TDM_SEC_DRP 0x18304 +#define NGBE_TDM_PKT_CNT 0x18308 +#define NGBE_TDM_BYTE_CNT_L 0x1830C +#define NGBE_TDM_BYTE_CNT_H 0x18310 +#define NGBE_TDM_OS2BMC_CNT 0x18314 + +/**************************** Receive DMA registers **************************/ +/* receive control */ +#define NGBE_RDM_ARB_CTL 0x12000 +#define NGBE_RDM_POOL_RE 0x12004 + +#define NGBE_RDM_PF_QDE 0x12080 +#define NGBE_RDM_PF_HIDE 0x12090 +/* VFRE bitmask */ +#define NGBE_RDM_POOL_RE_ENABLE_ALL 0xFFFFFFFFU + +/* statistic */ +#define NGBE_RDM_DRP_PKT 0x12500 +#define NGBE_RDM_PKT_CNT 0x12504 +#define NGBE_RDM_BYTE_CNT_L 0x12508 +#define NGBE_RDM_BYTE_CNT_H 0x1250C +#define NGBE_RDM_BMC2OS_CNT 0x12510 + +/***************************** RDB registers *********************************/ +/* Flow Control Registers */ +#define NGBE_RDB_RFCV 0x19200 +#define NGBE_RDB_RFCL 0x19220 +#define NGBE_RDB_RFCH 0x19260 +#define NGBE_RDB_RFCRT 0x192A0 +#define NGBE_RDB_RFCC 0x192A4 +/* receive packet buffer */ +#define NGBE_RDB_PB_WRAP 0x19004 +#define NGBE_RDB_PB_SZ 0x19020 + +#define NGBE_RDB_PB_CTL 0x19000 +#define NGBE_RDB_PB_SZ_SHIFT 10 +#define NGBE_RDB_PB_SZ_MASK 0x000FFC00U +/* lli interrupt */ +#define NGBE_RDB_LLI_THRE 0x19080 +#define NGBE_RDB_LLI_THRE_SZ(_v) ((0xFFF & (_v))) +#define NGBE_RDB_LLI_THRE_UP(_v) ((0x7 & (_v)) << 16) +#define NGBE_RDB_LLI_THRE_UP_SHIFT 16 + +/* ring assignment */ +#define NGBE_RDB_PL_CFG(_i) (0x19300 + ((_i) * 4)) /* [0,7] */ +#define NGBE_RDB_RSSTBL(_i) (0x19400 + ((_i) * 4)) /* [0,31] */ +#define NGBE_RDB_RSSRK(_i) (0x19480 + ((_i) * 4)) /* [0,9] */ +#define NGBE_RDB_RA_CTL 0x194F4 +#define NGBE_RDB_5T_SDP(_i) (0x19A00 + ((_i) * 4)) /*Src Dst Addr Q Filter*/ +#define NGBE_RDB_5T_CTL0(_i) (0x19C00 + ((_i) * 4)) /* Five Tuple Q Filter */ +#define NGBE_RDB_ETYPE_CLS(_i) (0x19100 + ((_i) * 4)) /* EType Q Select */ +#define NGBE_RDB_SYN_CLS 0x19130 +#define NGBE_RDB_5T_CTL1(_i) (0x19E00 + ((_i) * 4)) /*8 of these (0-7)*/ +/* VM RSS */ +#define NGBE_RDB_VMRSSRK(_i, _p) (0x1A000 + ((_i) * 4) + ((_p) * 0x40)) +#define NGBE_RDB_VMRSSTBL(_i, _p) (0x1B000 + ((_i) * 4) + ((_p) * 0x40)) +/* statistic */ +#define NGBE_RDB_MPCNT 0x19040 +#define NGBE_RDB_PKT_CNT 0x19060 +#define NGBE_RDB_REPLI_CNT 0x19064 +#define NGBE_RDB_DRP_CNT 0x19068 +#define NGBE_RDB_LXONTXC 0x1921C +#define NGBE_RDB_LXOFFTXC 0x19218 +#define NGBE_RDB_PFCMACDAL 0x19210 +#define NGBE_RDB_PFCMACDAH 0x19214 +#define NGBE_RDB_TXSWERR 0x1906C +#define NGBE_RDB_TXSWERR_TB_FREE 0x3FF +/* rdb_pl_cfg reg mask */ +#define NGBE_RDB_PL_CFG_L4HDR 0x2 +#define NGBE_RDB_PL_CFG_L3HDR 0x4 +#define NGBE_RDB_PL_CFG_L2HDR 0x8 +#define NGBE_RDB_PL_CFG_TUN_OUTER_L2HDR 0x20 +#define NGBE_RDB_PL_CFG_TUN_TUNHDR 0x10 +/* RQTC Bit Masks and Shifts */ +#define NGBE_RDB_RSS_TC_SHIFT_TC(_i) ((_i) * 4) +#define NGBE_RDB_RSS_TC_TC0_MASK (0x7 << 0) +#define NGBE_RDB_RSS_TC_TC1_MASK (0x7 << 4) +#define NGBE_RDB_RSS_TC_TC2_MASK (0x7 << 8) +#define NGBE_RDB_RSS_TC_TC3_MASK (0x7 << 12) +#define NGBE_RDB_RSS_TC_TC4_MASK (0x7 << 16) +#define NGBE_RDB_RSS_TC_TC5_MASK (0x7 << 20) +#define NGBE_RDB_RSS_TC_TC6_MASK (0x7 << 24) +#define NGBE_RDB_RSS_TC_TC7_MASK (0x7 << 28) +/* Packet Buffer Initialization */ +#define NGBE_MAX_PACKET_BUFFERS 8 +#define NGBE_RDB_PB_SZ_48KB 0x00000030U /* 48KB Packet Buffer */ +#define NGBE_RDB_PB_SZ_64KB 0x00000040U /* 64KB Packet Buffer */ +#define NGBE_RDB_PB_SZ_80KB 0x00000050U /* 80KB Packet Buffer */ +#define NGBE_RDB_PB_SZ_128KB 0x00000080U /* 128KB Packet Buffer */ +#define NGBE_RDB_PB_SZ_MAX 0x00000200U /* 512KB Packet Buffer */ + +/* Packet buffer allocation strategies */ +enum { + PBA_STRATEGY_EQUAL = 0, /* Distribute PB space equally */ +#define PBA_STRATEGY_EQUAL PBA_STRATEGY_EQUAL + PBA_STRATEGY_WEIGHTED = 1, /* Weight front half of TCs */ +#define PBA_STRATEGY_WEIGHTED PBA_STRATEGY_WEIGHTED +}; + +/* FCRTL Bit Masks */ +#define NGBE_RDB_RFCL_XONE 0x80000000U /* XON enable */ +#define NGBE_RDB_RFCH_XOFFE 0x80000000U /* Packet buffer fc enable */ +/* FCCFG Bit Masks */ +#define NGBE_RDB_RFCC_RFCE_802_3X 0x00000008U /* Tx link FC enable */ + +/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ +#define NGBE_RDB_5T_CTL1_SIZE_BP 0x00001000U /* Packet size bypass */ +#define NGBE_RDB_5T_CTL1_LLI 0x00100000U /* Enables low latency Int */ +#define NGBE_RDB_LLI_THRE_PRIORITY_MASK 0x00070000U /* VLAN priority mask */ +#define NGBE_RDB_LLI_THRE_PRIORITY_EN 0x00080000U /* VLAN priority enable */ + +#define NGBE_MAX_RDB_5T_CTL0_FILTERS 128 +#define NGBE_RDB_5T_CTL0_PROTOCOL_MASK 0x00000003U +#define NGBE_RDB_5T_CTL0_PROTOCOL_TCP 0x00000000U +#define NGBE_RDB_5T_CTL0_PROTOCOL_UDP 0x00000001U +#define NGBE_RDB_5T_CTL0_PROTOCOL_SCTP 2 +#define NGBE_RDB_5T_CTL0_PRIORITY_MASK 0x00000007U +#define NGBE_RDB_5T_CTL0_PRIORITY_SHIFT 2 +#define NGBE_RDB_5T_CTL0_POOL_MASK 0x0000003FU +#define NGBE_RDB_5T_CTL0_POOL_SHIFT 8 +#define NGBE_RDB_5T_CTL0_5TUPLE_MASK_MASK 0x00000007U +#define NGBE_RDB_5T_CTL0_5TUPLE_MASK_SHIFT 27 +#define NGBE_RDB_5T_CTL0_SOURCE_PORT_MASK 0x1B +#define NGBE_RDB_5T_CTL0_DEST_PORT_MASK 0x05 +#define NGBE_RDB_5T_CTL0_PROTOCOL_COMP_MASK 0x0F +#define NGBE_RDB_5T_CTL0_POOL_MASK_EN 0x40000000U +#define NGBE_RDB_5T_CTL0_QUEUE_ENABLE 0x80000000U + +#define NGBE_RDB_ETYPE_CLS_RX_QUEUE 0x007F0000U /* bits 22:16 */ +#define NGBE_RDB_ETYPE_CLS_RX_QUEUE_SHIFT 16 +#define NGBE_RDB_ETYPE_CLS_LLI 0x20000000U /* bit 29 */ +#define NGBE_RDB_ETYPE_CLS_QUEUE_EN 0x80000000U /* bit 31 */ + +/* Receive Config masks */ +#define NGBE_RDB_PB_CTL_PBEN (0x80000000) /* Enable Receiver */ +#define NGBE_RDB_PB_CTL_DISABLED 0x1 + +#define NGBE_RDB_RA_CTL_RSS_EN 0x00000004U /* RSS Enable */ +#define NGBE_RDB_RA_CTL_RSS_MASK 0xFFFF0000U +#define NGBE_RDB_RA_CTL_RSS_IPV4_TCP 0x00010000U +#define NGBE_RDB_RA_CTL_RSS_IPV4 0x00020000U +#define NGBE_RDB_RA_CTL_RSS_IPV6 0x00100000U +#define NGBE_RDB_RA_CTL_RSS_IPV6_TCP 0x00200000U +#define NGBE_RDB_RA_CTL_RSS_IPV4_UDP 0x00400000U +#define NGBE_RDB_RA_CTL_RSS_IPV6_UDP 0x00800000U + +/******************************* PSR Registers *******************************/ +/* psr control */ +#define NGBE_PSR_CTL 0x15000 +#define NGBE_PSR_VLAN_CTL 0x15088 +#define NGBE_PSR_VM_CTL 0x151B0 +#define NGBE_PSR_PKT_CNT 0x151B8 +#define NGBE_PSR_MNG_PKT_CNT 0x151BC +#define NGBE_PSR_DBG_DOP_CNT 0x151C0 +#define NGBE_PSR_MNG_DOP_CNT 0x151C4 +#define NGBE_PSR_VM_FLP_L 0x151C8 + +/* Header split receive */ +#define NGBE_PSR_CTL_SW_EN 0x00040000U +#define NGBE_PSR_CTL_PCSD 0x00002000U +#define NGBE_PSR_CTL_IPPCSE 0x00001000U +#define NGBE_PSR_CTL_BAM 0x00000400U +#define NGBE_PSR_CTL_UPE 0x00000200U +#define NGBE_PSR_CTL_MPE 0x00000100U +#define NGBE_PSR_CTL_MFE 0x00000080U +#define NGBE_PSR_CTL_MO 0x00000060U +#define NGBE_PSR_CTL_TPE 0x00000010U +#define NGBE_PSR_CTL_MO_SHIFT 5 +/* VT_CTL bitmasks */ +#define NGBE_PSR_VM_CTL_DIS_DEFPL 0x20000000U /* disable default pool */ +#define NGBE_PSR_VM_CTL_REPLEN 0x40000000U /* replication enabled */ +#define NGBE_PSR_VM_CTL_POOL_SHIFT 7 +#define NGBE_PSR_VM_CTL_POOL_MASK (0x7 << NGBE_PSR_VM_CTL_POOL_SHIFT) +/* VLAN Control Bit Masks */ +#define NGBE_PSR_VLAN_CTL_VET 0x0000FFFFU /* bits 0-15 */ +#define NGBE_PSR_VLAN_CTL_CFI 0x10000000U /* bit 28 */ +#define NGBE_PSR_VLAN_CTL_CFIEN 0x20000000U /* bit 29 */ +#define NGBE_PSR_VLAN_CTL_VFE 0x40000000U /* bit 30 */ + +/* vm L2 contorl */ +#define NGBE_PSR_VM_L2CTL(_i) (0x15600 + ((_i) * 4)) +/* VMOLR bitmasks */ +#define NGBE_PSR_VM_L2CTL_LBDIS 0x00000002U /* disable loopback */ +#define NGBE_PSR_VM_L2CTL_LLB 0x00000004U /* local pool loopback */ +#define NGBE_PSR_VM_L2CTL_UPE 0x00000010U /* unicast promiscuous */ +#define NGBE_PSR_VM_L2CTL_TPE 0x00000020U /* ETAG promiscuous */ +#define NGBE_PSR_VM_L2CTL_VACC 0x00000040U /* accept nomatched vlan */ +#define NGBE_PSR_VM_L2CTL_VPE 0x00000080U /* vlan promiscuous mode */ +#define NGBE_PSR_VM_L2CTL_AUPE 0x00000100U /* accept untagged packets */ +#define NGBE_PSR_VM_L2CTL_ROMPE 0x00000200U /*accept packets in MTA tbl*/ +#define NGBE_PSR_VM_L2CTL_ROPE 0x00000400U /* accept packets in UC tbl*/ +#define NGBE_PSR_VM_L2CTL_BAM 0x00000800U /* accept broadcast packets*/ +#define NGBE_PSR_VM_L2CTL_MPE 0x00001000U /* multicast promiscuous */ + +/* etype switcher 1st stage */ +#define NGBE_PSR_ETYPE_SWC(_i) (0x15128 + ((_i) * 4)) /* EType Queue Filter */ +/* ETYPE Queue Filter/Select Bit Masks */ +#define NGBE_MAX_PSR_ETYPE_SWC_FILTERS 8 +#define NGBE_PSR_ETYPE_SWC_FCOE 0x08000000U /* bit 27 */ +#define NGBE_PSR_ETYPE_SWC_TX_ANTISPOOF 0x20000000U /* bit 29 */ +#define NGBE_PSR_ETYPE_SWC_1588 0x40000000U /* bit 30 */ +#define NGBE_PSR_ETYPE_SWC_FILTER_EN 0x80000000U /* bit 31 */ +#define NGBE_PSR_ETYPE_SWC_POOL_ENABLE (1 << 26) /* bit 26 */ +#define NGBE_PSR_ETYPE_SWC_POOL_SHIFT 20 +/* + * ETQF filter list: one static filter per filter consumer. This is + * to avoid filter collisions later. Add new filters + * here!! + * + * Current filters: + * EAPOL 802.1x (0x888e): Filter 0 + * FCoE (0x8906): Filter 2 + * 1588 (0x88f7): Filter 3 + * FIP (0x8914): Filter 4 + * LLDP (0x88CC): Filter 5 + * LACP (0x8809): Filter 6 + * FC (0x8808): Filter 7 + */ +#define NGBE_PSR_ETYPE_SWC_FILTER_EAPOL 0 +#define NGBE_PSR_ETYPE_SWC_FILTER_FCOE 2 +#define NGBE_PSR_ETYPE_SWC_FILTER_1588 3 +#define NGBE_PSR_ETYPE_SWC_FILTER_FIP 4 +#define NGBE_PSR_ETYPE_SWC_FILTER_LLDP 5 +#define NGBE_PSR_ETYPE_SWC_FILTER_LACP 6 +#define NGBE_PSR_ETYPE_SWC_FILTER_FC 7 + +/* mcasst/ucast overflow tbl */ +#define NGBE_PSR_MC_TBL(_i) (0x15200 + ((_i) * 4)) +#define NGBE_PSR_UC_TBL(_i) (0x15400 + ((_i) * 4)) + +/* vlan tbl */ +#define NGBE_PSR_VLAN_TBL(_i) (0x16000 + ((_i) * 4)) + +/* mac switcher */ +#define NGBE_PSR_MAC_SWC_AD_L 0x16200 +#define NGBE_PSR_MAC_SWC_AD_H 0x16204 +#define NGBE_PSR_MAC_SWC_VM 0x16208 +#define NGBE_PSR_MAC_SWC_IDX 0x16210 +/* RAH */ +#define NGBE_PSR_MAC_SWC_AD_H_AD(v) (((v) & 0xFFFF)) +#define NGBE_PSR_MAC_SWC_AD_H_ADTYPE(v) (((v) & 0x1) << 30) +#define NGBE_PSR_MAC_SWC_AD_H_AV 0x80000000U +#define NGBE_CLEAR_VMDQ_ALL 0xFFFFFFFFU + +/* vlan switch */ +#define NGBE_PSR_VLAN_SWC 0x16220 +#define NGBE_PSR_VLAN_SWC_VM_L 0x16224 +#define NGBE_PSR_VLAN_SWC_IDX 0x16230 /* 32 vlan entries */ +/* VLAN pool filtering masks */ +#define NGBE_PSR_VLAN_SWC_VIEN 0x80000000U /* filter is valid */ +#define NGBE_PSR_VLAN_SWC_ENTRIES 32 +#define NGBE_PSR_VLAN_SWC_VLANID_MASK 0x00000FFFU +#define NGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ + +/* Manangbeent */ +#define NGBE_PSR_MNG_FIT_CTL 0x15820 +/* Manangbeent Bit Fields and Masks */ +#define NGBE_PSR_MNG_FIT_CTL_MPROXYE 0x40000000U /* Manangbeent Proxy Enable*/ +#define NGBE_PSR_MNG_FIT_CTL_RCV_TCO_EN 0x00020000U /* Rcv TCO packet enable */ +#define NGBE_PSR_MNG_FIT_CTL_EN_BMC2OS 0x10000000U /* Ena BMC2OS and OS2BMC + *traffic */ +#define NGBE_PSR_MNG_FIT_CTL_EN_BMC2OS_SHIFT 28 + +#define NGBE_PSR_MNG_FLEX_SEL 0x1582C +#define NGBE_PSR_MNG_FLEX_DW_L(_i) (0x15A00 + ((_i) * 16)) /* [0,15] */ +#define NGBE_PSR_MNG_FLEX_DW_H(_i) (0x15A04 + ((_i) * 16)) +#define NGBE_PSR_MNG_FLEX_MSK(_i) (0x15A08 + ((_i) * 16)) + +/* mirror */ +#define NGBE_PSR_MR_CTL(_i) (0x15B00 + ((_i) * 4)) /* [0,3] */ +#define NGBE_PSR_MR_VLAN_L(_i) (0x15B10 + ((_i) * 8)) +#define NGBE_PSR_MR_VM_L(_i) (0x15B30 + ((_i) * 8)) + +/* 1588 */ +#define NGBE_PSR_1588_CTL 0x15188 /* Rx Time Sync Control register - RW */ +#define NGBE_PSR_1588_STMPL 0x151E8 /* Rx timestamp Low - RO */ +#define NGBE_PSR_1588_STMPH 0x151A4 /* Rx timestamp High - RO */ +#define NGBE_PSR_1588_ATTRL 0x151A0 /* Rx timestamp attribute low - RO */ +#define NGBE_PSR_1588_ATTRH 0x151A8 /* Rx timestamp attribute high - RO */ +#define NGBE_PSR_1588_MSGTYPE 0x15120 /* RX message type register low - RW */ +/* 1588 CTL Bit */ +#define NGBE_PSR_1588_CTL_VALID 0x00000001U /* Rx timestamp valid */ +#define NGBE_PSR_1588_CTL_TYPE_MASK 0x0000000EU /* Rx type mask */ +#define NGBE_PSR_1588_CTL_TYPE_L2_V2 0x00 +#define NGBE_PSR_1588_CTL_TYPE_L4_V1 0x02 +#define NGBE_PSR_1588_CTL_TYPE_L2_L4_V2 0x04 +#define NGBE_PSR_1588_CTL_TYPE_EVENT_V2 0x0A +#define NGBE_PSR_1588_CTL_ENABLED 0x00000010U /* Rx Timestamp enabled*/ +/* 1588 msg type bit */ +#define NGBE_PSR_1588_MSGTYPE_V1_CTRLT_MASK 0x000000FFU +#define NGBE_PSR_1588_MSGTYPE_V1_SYNC_MSG 0x00 +#define NGBE_PSR_1588_MSGTYPE_V1_DELAY_REQ_MSG 0x01 +#define NGBE_PSR_1588_MSGTYPE_V1_FOLLOWUP_MSG 0x02 +#define NGBE_PSR_1588_MSGTYPE_V1_DELAY_RESP_MSG 0x03 +#define NGBE_PSR_1588_MSGTYPE_V1_MGMT_MSG 0x04 +#define NGBE_PSR_1588_MSGTYPE_V2_MSGID_MASK 0x0000FF00U +#define NGBE_PSR_1588_MSGTYPE_V2_SYNC_MSG 0x0000 +#define NGBE_PSR_1588_MSGTYPE_V2_DELAY_REQ_MSG 0x0100 +#define NGBE_PSR_1588_MSGTYPE_V2_PDELAY_REQ_MSG 0x0200 +#define NGBE_PSR_1588_MSGTYPE_V2_PDELAY_RESP_MSG 0x0300 +#define NGBE_PSR_1588_MSGTYPE_V2_FOLLOWUP_MSG 0x0800 +#define NGBE_PSR_1588_MSGTYPE_V2_DELAY_RESP_MSG 0x0900 +#define NGBE_PSR_1588_MSGTYPE_V2_PDELAY_FOLLOWUP_MSG 0x0A00 +#define NGBE_PSR_1588_MSGTYPE_V2_ANNOUNCE_MSG 0x0B00 +#define NGBE_PSR_1588_MSGTYPE_V2_SIGNALLING_MSG 0x0C00 +#define NGBE_PSR_1588_MSGTYPE_V2_MGMT_MSG 0x0D00 + +/* Wake up registers */ +#define NGBE_PSR_WKUP_CTL 0x15B80 +#define NGBE_PSR_WKUP_IPV 0x15B84 +#define NGBE_PSR_LAN_FLEX_SEL 0x15B8C +#define NGBE_PSR_WKUP_IP4TBL(_i) (0x15BC0 + ((_i) * 4)) /* [0,3] */ +#define NGBE_PSR_WKUP_IP6TBL(_i) (0x15BE0 + ((_i) * 4)) +#define NGBE_PSR_LAN_FLEX_DW_L(_i) (0x15C00 + ((_i) * 16)) /* [0,15] */ +#define NGBE_PSR_LAN_FLEX_DW_H(_i) (0x15C04 + ((_i) * 16)) +#define NGBE_PSR_LAN_FLEX_MSK(_i) (0x15C08 + ((_i) * 16)) +#define NGBE_PSR_LAN_FLEX_CTL 0x15CFC +/* Wake Up Filter Control Bit */ +#define NGBE_PSR_WKUP_CTL_LNKC 0x00000001U /* Link Status Change Wakeup Enable*/ +#define NGBE_PSR_WKUP_CTL_MAG 0x00000002U /* Magic Packet Wakeup Enable */ +#define NGBE_PSR_WKUP_CTL_EX 0x00000004U /* Directed Exact Wakeup Enable */ +#define NGBE_PSR_WKUP_CTL_MC 0x00000008U /* Directed Multicast Wakeup Enable*/ +#define NGBE_PSR_WKUP_CTL_BC 0x00000010U /* Broadcast Wakeup Enable */ +#define NGBE_PSR_WKUP_CTL_ARP 0x00000020U /* ARP Request Packet Wakeup Enable*/ +#define NGBE_PSR_WKUP_CTL_IPV4 0x00000040U /* Directed IPv4 Pkt Wakeup Enable */ +#define NGBE_PSR_WKUP_CTL_IPV6 0x00000080U /* Directed IPv6 Pkt Wakeup Enable */ +#define NGBE_PSR_WKUP_CTL_IGNORE_TCO 0x00008000U /* Ignore WakeOn TCO pkts */ +#define NGBE_PSR_WKUP_CTL_FLX0 0x00010000U /* Flexible Filter 0 Ena */ +#define NGBE_PSR_WKUP_CTL_FLX1 0x00020000U /* Flexible Filter 1 Ena */ +#define NGBE_PSR_WKUP_CTL_FLX2 0x00040000U /* Flexible Filter 2 Ena */ +#define NGBE_PSR_WKUP_CTL_FLX3 0x00080000U /* Flexible Filter 3 Ena */ +#define NGBE_PSR_WKUP_CTL_FLX4 0x00100000U /* Flexible Filter 4 Ena */ +#define NGBE_PSR_WKUP_CTL_FLX5 0x00200000U /* Flexible Filter 5 Ena */ +#define NGBE_PSR_WKUP_CTL_FLX_FILTERS 0x000F0000U /* Mask for 4 flex filters */ +#define NGBE_PSR_WKUP_CTL_FLX_FILTERS_6 0x003F0000U /* Mask for 6 flex filters*/ +#define NGBE_PSR_WKUP_CTL_FLX_FILTERS_8 0x00FF0000U /* Mask for 8 flex filters*/ +#define NGBE_PSR_WKUP_CTL_FW_RST_WK 0x80000000U /* Ena wake on FW reset + * assertion */ +/* Mask for Ext. flex filters */ +#define NGBE_PSR_WKUP_CTL_EXT_FLX_FILTERS 0x00300000U +#define NGBE_PSR_WKUP_CTL_ALL_FILTERS 0x000F00FFU /* Mask all 4 flex filters*/ +#define NGBE_PSR_WKUP_CTL_ALL_FILTERS_6 0x003F00FFU /* Mask all 6 flex filters*/ +#define NGBE_PSR_WKUP_CTL_ALL_FILTERS_8 0x00FF00FFU /* Mask all 8 flex filters*/ +#define NGBE_PSR_WKUP_CTL_FLX_OFFSET 16 /* Offset to the Flex Filters bits*/ + +#define NGBE_PSR_MAX_SZ 0x15020 + +/****************************** TDB ******************************************/ +#define NGBE_TDB_RFCS 0x1CE00 +#define NGBE_TDB_PB_SZ 0x1CC00 + +#define NGBE_TDB_PRB_CTL 0x17010 +#define NGBE_TDB_PBRARB_CTL 0x1CD00 + +#define NGBE_TDB_PB_SZ_MAX 0x00005000U /* 20KB Packet Buffer */ +#define NGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */ +#define NGBE_MAX_PB 8 +/* statistic */ +#define NGBE_TDB_OUT_PKT_CNT 0x1CF00 +#define NGBE_TDB_MNG_PKT_CNT 0x1CF04 +#define NGBE_TDB_LB_PKT_CNT 0x1CF08 +#define NGBE_TDB_MNG_LARGE_DOP_CNT 0x1CF0C + +/****************************** TSEC *****************************************/ +/* Security Control Registers */ +#define NGBE_TSEC_CTL 0x1D000 +#define NGBE_TSEC_ST 0x1D004 +#define NGBE_TSEC_BUF_AF 0x1D008 +#define NGBE_TSEC_BUF_AE 0x1D00C +#define NGBE_TSEC_MIN_IFG 0x1D020 + +/* 1588 */ +#define NGBE_TSEC_1588_CTL 0x11F00 /* Tx Time Sync Control reg */ +#define NGBE_TSEC_1588_STMPL 0x11F04 /* Tx timestamp value Low */ +#define NGBE_TSEC_1588_STMPH 0x11F08 /* Tx timestamp value High */ +#define NGBE_TSEC_1588_SYSTIML 0x11F0C /* System time register Low */ +#define NGBE_TSEC_1588_SYSTIMH 0x11F10 /* System time register High */ +#define NGBE_TSEC_1588_INC 0x11F14 /* Increment attributes reg */ +#define NGBE_TSEC_1588_INC_IV(v) ((v) & 0x7FFFFFF) + +#define NGBE_TSEC_1588_ADJL 0x11F18 /* Time Adjustment Offset reg Low */ +#define NGBE_TSEC_1588_ADJH 0x11F1C /* Time Adjustment Offset reg High*/ + +#define NGBE_TSEC_1588_INT_ST 0x11F20 +#define NGBE_TSEC_1588_INT_EN 0x11F24 + +/* 1588 fields */ +#define NGBE_TSEC_1588_CTL_VALID 0x00000001U /* Tx timestamp valid */ +#define NGBE_TSEC_1588_CTL_ENABLED 0x00000010U /* Tx timestamping enabled */ + +#define NGBE_TSEC_1588_AUX_CTL 0x11F28 +#define NGBE_TSEC_1588_TRGT_L(i) (0x11F2C + ((i) * 8)) /* [0,1] */ +#define NGBE_TSEC_1588_TRGT_H(i) (0x11F30 + ((i) * 8)) /* [0,1] */ +#define NGBE_TSEC_1588_FREQ_CLK_L(i) (0x11F3C + ((i) * 8)) /* [0,1] */ +#define NGBE_TSEC_1588_FREQ_CLK_H(i) (0x11F40 + ((i) * 8)) /* [0,1] */ +#define NGBE_TSEC_1588_AUX_STMP_L(i) (0x11F4C + ((i) * 8)) /* [0,1] */ +#define NGBE_TSEC_1588_AUX_STMP_H(i) (0x11F50 + ((i) * 8)) /* [0,1] */ +#define NGBE_TSEC_1588_SDP(n) (0x11F5C + ((n) * 4)) /* [0,3] */ + +/********************************* RSEC **************************************/ +/* general rsec */ +#define NGBE_RSEC_CTL 0x17000 +#define NGBE_RSEC_ST 0x17004 +/* general rsec fields */ +#define NGBE_RSEC_CTL_SECRX_DIS 0x00000001U +#define NGBE_RSEC_CTL_RX_DIS 0x00000002U +#define NGBE_RSEC_CTL_CRC_STRIP 0x00000004U +#define NGBE_RSEC_CTL_SAVE_MAC_ERR 0x00000040U +#define NGBE_RSEC_ST_RSEC_RDY 0x00000001U +#define NGBE_RSEC_ST_RSEC_OFLD_DIS 0x00000002U +#define NGBE_RSEC_ST_ECC_RXERR 0x00000004U + +/* link sec */ +#define NGBE_RSEC_LSEC_CAP 0x17200 +#define NGBE_RSEC_LSEC_CTL 0x17204 +#define NGBE_RSEC_LSEC_SCI_L 0x17208 +#define NGBE_RSEC_LSEC_SCI_H 0x1720C +#define NGBE_RSEC_LSEC_SA0 0x17210 +#define NGBE_RSEC_LSEC_SA1 0x17214 +#define NGBE_RSEC_LSEC_PKNUM0 0x17218 +#define NGBE_RSEC_LSEC_PKNUM1 0x1721C +#define NGBE_RSEC_LSEC_KEY0(_n) 0x17220 +#define NGBE_RSEC_LSEC_KEY1(_n) 0x17230 +#define NGBE_RSEC_LSEC_UNTAG_PKT 0x17240 +#define NGBE_RSEC_LSEC_DEC_OCTET 0x17244 +#define NGBE_RSEC_LSEC_VLD_OCTET 0x17248 +#define NGBE_RSEC_LSEC_BAD_PKT 0x1724C +#define NGBE_RSEC_LSEC_NOSCI_PKT 0x17250 +#define NGBE_RSEC_LSEC_UNSCI_PKT 0x17254 +#define NGBE_RSEC_LSEC_UNCHK_PKT 0x17258 +#define NGBE_RSEC_LSEC_DLY_PKT 0x1725C +#define NGBE_RSEC_LSEC_LATE_PKT 0x17260 +#define NGBE_RSEC_LSEC_OK_PKT(_n) 0x17264 +#define NGBE_RSEC_LSEC_INV_PKT(_n) 0x17274 +#define NGBE_RSEC_LSEC_BADSA_PKT 0x1727C +#define NGBE_RSEC_LSEC_INVSA_PKT 0x17280 + +/* ipsec */ +#define NGBE_RSEC_IPS_IDX 0x17100 +#define NGBE_RSEC_IPS_IDX_WT 0x80000000U +#define NGBE_RSEC_IPS_IDX_RD 0x40000000U +#define NGBE_RSEC_IPS_IDX_TB_IDX 0x0U /* */ +#define NGBE_RSEC_IPS_IDX_TB_IP 0x00000002U +#define NGBE_RSEC_IPS_IDX_TB_SPI 0x00000004U +#define NGBE_RSEC_IPS_IDX_TB_KEY 0x00000006U +#define NGBE_RSEC_IPS_IDX_EN 0x00000001U +#define NGBE_RSEC_IPS_IP(i) (0x17104 + ((i) * 4)) +#define NGBE_RSEC_IPS_SPI 0x17114 +#define NGBE_RSEC_IPS_IP_IDX 0x17118 +#define NGBE_RSEC_IPS_KEY(i) (0x1711C + ((i) * 4)) +#define NGBE_RSEC_IPS_SALT 0x1712C +#define NGBE_RSEC_IPS_MODE 0x17130 +#define NGBE_RSEC_IPS_MODE_IPV6 0x00000010 +#define NGBE_RSEC_IPS_MODE_DEC 0x00000008 +#define NGBE_RSEC_IPS_MODE_ESP 0x00000004 +#define NGBE_RSEC_IPS_MODE_AH 0x00000002 +#define NGBE_RSEC_IPS_MODE_VALID 0x00000001 + +/************************************** ETH PHY ******************************/ +#define NGBE_XPCS_IDA_ADDR 0x13000 +#define NGBE_XPCS_IDA_DATA 0x13004 +#define NGBE_ETHPHY_IDA_ADDR 0x13008 +#define NGBE_ETHPHY_IDA_DATA 0x1300C + +/************************************** MNG ********************************/ +#define NGBE_MNG_FW_SM 0x1E000 +#define NGBE_MNG_SW_SM 0x1E004 +#define NGBE_MNG_SWFW_SYNC 0x1E008 +#define NGBE_MNG_MBOX 0x1E100 +#define NGBE_MNG_MBOX_CTL 0x1E044 + +#define NGBE_MNG_OS2BMC_CNT 0x1E094 +#define NGBE_MNG_BMC2OS_CNT 0x1E090 + +/* Firmware Semaphore Register */ +#define NGBE_MNG_FW_SM_MODE_MASK 0xE +#define NGBE_MNG_FW_SM_TS_ENABLED 0x1 +/* SW Semaphore Register bitmasks */ +#define NGBE_MNG_SW_SM_SM 0x00000001U /* software Semaphore */ + +/* SW_FW_SYNC definitions */ +#define NGBE_MNG_SWFW_SYNC_SW_PHY 0x0001 +#define NGBE_MNG_SWFW_SYNC_SW_FLASH 0x0008 +#define NGBE_MNG_SWFW_SYNC_SW_MB 0x0004 + +#define NGBE_MNG_MBOX_CTL_SWRDY 0x1 +#define NGBE_MNG_MBOX_CTL_SWACK 0x2 +#define NGBE_MNG_MBOX_CTL_FWRDY 0x4 +#define NGBE_MNG_MBOX_CTL_FWACK 0x8 + +/************************************* ETH MAC *****************************/ +#define NGBE_MAC_TX_CFG 0x11000 +#define NGBE_MAC_RX_CFG 0x11004 +#define NGBE_MAC_PKT_FLT 0x11008 +#define NGBE_MAC_PKT_FLT_PR (0x1) /* promiscuous mode */ +#define NGBE_MAC_PKT_FLT_RA (0x80000000) /* receive all */ +#define NGBE_MAC_WDG_TIMEOUT 0x1100C +#define NGBE_MAC_TX_FLOW_CTRL 0x11070 +#define NGBE_MAC_RX_FLOW_CTRL 0x11090 +#define NGBE_MAC_INT_ST 0x110B0 +#define NGBE_MAC_INT_EN 0x110B4 +#define NGBE_MAC_ADDRESS0_HIGH 0x11300 +#define NGBE_MAC_ADDRESS0_LOW 0x11304 + +#define NGBE_MAC_TX_CFG_TE 0x00000001U +#define NGBE_MAC_TX_CFG_SPEED_MASK 0x60000000U +#define NGBE_MAC_TX_CFG_SPEED_1G 0x60000000U +#define NGBE_MAC_RX_CFG_RE 0x00000001U +#define NGBE_MAC_RX_CFG_JE 0x00000100U +#define NGBE_MAC_RX_CFG_LM 0x00000400U +#define NGBE_MAC_WDG_TIMEOUT_PWE 0x00000100U +#define NGBE_MAC_WDG_TIMEOUT_WTO_MASK 0x0000000FU +#define NGBE_MAC_WDG_TIMEOUT_WTO_DELTA 2 + +#define NGBE_MAC_RX_FLOW_CTRL_RFE 0x00000001U /* receive fc enable */ + +#define NGBE_MSCA 0x11200 +#define NGBE_MSCA_RA(v) ((0xFFFF & (v))) +#define NGBE_MSCA_PA(v) ((0x1F & (v)) << 16) +#define NGBE_MSCA_DA(v) ((0x1F & (v)) << 21) +#define NGBE_MSCC 0x11204 +#define NGBE_MSCC_DATA(v) ((0xFFFF & (v))) +#define NGBE_MSCC_CMD(v) ((0x3 & (v)) << 16) +enum NGBE_MSCA_CMD_value { + NGBE_MSCA_CMD_RSV = 0, + NGBE_MSCA_CMD_WRITE, + NGBE_MSCA_CMD_POST_READ, + NGBE_MSCA_CMD_READ, +}; +#define NGBE_MSCC_SADDR ((0x1U) << 18) +#define NGBE_MSCC_CR(v) ((0x8U & (v)) << 19) +#define NGBE_MSCC_BUSY ((0x1U) << 22) +#define NGBE_MDIO_CLK(v) ((0x7 & (v)) << 19) + +/* EEE registers */ + +/* statistic */ +#define NGBE_MAC_LXOFFRXC 0x11988 +#define NGBE_MAC_PXOFFRXC 0x119DC +#define NGBE_RX_BC_FRAMES_GOOD_LOW 0x11918 +#define NGBE_RX_CRC_ERROR_FRAMES_LOW 0x11928 +#define NGBE_RX_LEN_ERROR_FRAMES_LOW 0x11978 +#define NGBE_RX_UNDERSIZE_FRAMES_GOOD 0x11938 +#define NGBE_RX_OVERSIZE_FRAMES_GOOD 0x1193C +#define NGBE_RX_FRAME_CNT_GOOD_BAD_LOW 0x11900 +#define NGBE_TX_FRAME_CNT_GOOD_BAD_LOW 0x1181C +#define NGBE_TX_MC_FRAMES_GOOD_LOW 0x1182C +#define NGBE_TX_BC_FRAMES_GOOD_LOW 0x11824 +#define NGBE_MMC_CONTROL 0x11800 +#define NGBE_MMC_CONTROL_RSTONRD 0x4 /* reset on read */ +#define NGBE_MMC_CONTROL_UP 0x700 + +/********************************* BAR registers ***************************/ +/* Interrupt Registers */ +#define NGBE_BME_CTL 0x12020 +#define NGBE_PX_MISC_IC 0x100 +#define NGBE_PX_MISC_ICS 0x104 +#define NGBE_PX_MISC_IEN 0x108 +#define NGBE_PX_MISC_IVAR 0x4FC +#define NGBE_PX_GPIE 0x118 +#define NGBE_PX_ISB_ADDR_L 0x160 +#define NGBE_PX_ISB_ADDR_H 0x164 +#define NGBE_PX_TCP_TIMER 0x170 +#define NGBE_PX_ITRSEL 0x180 +#define NGBE_PX_IC 0x120 +#define NGBE_PX_ICS 0x130 +#define NGBE_PX_IMS 0x140 +#define NGBE_PX_IMC 0x150 +#define NGBE_PX_IVAR(_i) (0x500 + (_i) * 4) /* [0,3] */ +#define NGBE_PX_ITR(_i) (0x200 + (_i) * 4) /* [0,8] */ +#define NGBE_PX_TRANSACTION_PENDING 0x168 +#define NGBE_PX_INTA 0x110 + +/* Interrupt register bitmasks */ +/* Extended Interrupt Cause Read */ +#define NGBE_PX_MISC_IC_DEV_RST 0x00000400U /* device reset event */ +#define NGBE_PX_MISC_IC_TIMESYNC 0x00000800U /* time sync */ +#define NGBE_PX_MISC_IC_STALL 0x00001000U /* trans or recv path is + * stalled */ +#define NGBE_PX_MISC_IC_LINKSEC 0x00002000U /* Tx LinkSec require key + * exchange */ +#define NGBE_PX_MISC_IC_RX_MISS 0x00004000U /* Packet Buffer Overrun */ +#define NGBE_PX_MISC_IC_I2C 0x00010000U /* I2C interrupt */ +#define NGBE_PX_MISC_IC_ETH_EVENT 0x00020000U /* err reported by MAC except + * eth link down */ +#define NGBE_PX_MISC_IC_PHY 0x00040000U /* link up */ +#define NGBE_PX_MISC_IC_INT_ERR 0x00100000U /* integrity error */ +#define NGBE_PX_MISC_IC_SPI 0x00200000U /* SPI interface */ +#define NGBE_PX_MISC_IC_VF_MBOX 0x00800000U /* VF-PF message box */ +#define NGBE_PX_MISC_IC_GPIO 0x04000000U /* GPIO interrupt */ +#define NGBE_PX_MISC_IC_PCIE_REQ_ERR 0x08000000U /* pcie request error int */ +#define NGBE_PX_MISC_IC_OVER_HEAT 0x10000000U /* overheat detection */ +#define NGBE_PX_MISC_IC_PROBE_MATCH 0x20000000U /* probe match */ +#define NGBE_PX_MISC_IC_MNG_HOST_MBOX 0x40000000U /* mng mailbox */ +#define NGBE_PX_MISC_IC_TIMER 0x80000000U /* tcp timer */ + +/* Extended Interrupt Cause Set */ +#define NGBE_PX_MISC_ICS_ETH_LKDN 0x00000100U +#define NGBE_PX_MISC_ICS_DEV_RST 0x00000400U +#define NGBE_PX_MISC_ICS_TIMESYNC 0x00000800U +#define NGBE_PX_MISC_ICS_STALL 0x00001000U +#define NGBE_PX_MISC_ICS_LINKSEC 0x00002000U +#define NGBE_PX_MISC_ICS_RX_MISS 0x00004000U +#define NGBE_PX_MISC_ICS_FLOW_DIR 0x00008000U +#define NGBE_PX_MISC_ICS_I2C 0x00010000U +#define NGBE_PX_MISC_ICS_ETH_EVENT 0x00020000U +#define NGBE_PX_MISC_ICS_ETH_LK 0x00040000U +#define NGBE_PX_MISC_ICS_ETH_AN 0x00080000U +#define NGBE_PX_MISC_ICS_INT_ERR 0x00100000U +#define NGBE_PX_MISC_ICS_SPI 0x00200000U +#define NGBE_PX_MISC_ICS_VF_MBOX 0x00800000U +#define NGBE_PX_MISC_ICS_GPIO 0x04000000U +#define NGBE_PX_MISC_ICS_PCIE_REQ_ERR 0x08000000U +#define NGBE_PX_MISC_ICS_OVER_HEAT 0x10000000U +#define NGBE_PX_MISC_ICS_PROBE_MATCH 0x20000000U +#define NGBE_PX_MISC_ICS_MNG_HOST_MBOX 0x40000000U +#define NGBE_PX_MISC_ICS_TIMER 0x80000000U + +/* Extended Interrupt Enable Set */ +#define NGBE_PX_MISC_IEN_ETH_LKDN 0x00000100U +#define NGBE_PX_MISC_IEN_DEV_RST 0x00000400U +#define NGBE_PX_MISC_IEN_TIMESYNC 0x00000800U +#define NGBE_PX_MISC_IEN_STALL 0x00001000U +#define NGBE_PX_MISC_IEN_LINKSEC 0x00002000U +#define NGBE_PX_MISC_IEN_RX_MISS 0x00004000U +#define NGBE_PX_MISC_IEN_I2C 0x00010000U +#define NGBE_PX_MISC_IEN_ETH_EVENT 0x00020000U +#define NGBE_PX_MISC_IEN_ETH_LK 0x00040000U +#define NGBE_PX_MISC_IEN_ETH_AN 0x00080000U +#define NGBE_PX_MISC_IEN_INT_ERR 0x00100000U +#define NGBE_PX_MISC_IEN_SPI 0x00200000U +#define NGBE_PX_MISC_IEN_VF_MBOX 0x00800000U +#define NGBE_PX_MISC_IEN_GPIO 0x04000000U +#define NGBE_PX_MISC_IEN_PCIE_REQ_ERR 0x08000000U +#define NGBE_PX_MISC_IEN_OVER_HEAT 0x10000000U +#define NGBE_PX_MISC_IEN_PROBE_MATCH 0x20000000U +#define NGBE_PX_MISC_IEN_MNG_HOST_MBOX 0x40000000U +#define NGBE_PX_MISC_IEN_TIMER 0x80000000U + +#define NGBE_PX_MISC_IEN_MASK ( \ + NGBE_PX_MISC_IEN_ETH_LKDN| \ + NGBE_PX_MISC_IEN_DEV_RST | \ + NGBE_PX_MISC_IEN_ETH_EVENT | \ + NGBE_PX_MISC_IEN_ETH_LK | \ + NGBE_PX_MISC_IEN_ETH_AN | \ + NGBE_PX_MISC_IEN_INT_ERR | \ + NGBE_PX_MISC_IEN_VF_MBOX | \ + NGBE_PX_MISC_IEN_GPIO | \ + NGBE_PX_MISC_IEN_MNG_HOST_MBOX | \ + NGBE_PX_MISC_IEN_STALL | \ + NGBE_PX_MISC_IEN_PCIE_REQ_ERR | \ + NGBE_PX_MISC_IEN_TIMER) + +/* General purpose Interrupt Enable */ +#define NGBE_PX_GPIE_MODEL 0x00000001U +#define NGBE_PX_GPIE_IMEN 0x00000002U +#define NGBE_PX_GPIE_LL_INTERVAL 0x000000F0U + +/* Interrupt Vector Allocation Registers */ +#define NGBE_PX_IVAR_REG_NUM 64 +#define NGBE_PX_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ + +#define NGBE_MAX_INT_RATE 500000 +#define NGBE_MIN_INT_RATE 980 +#define NGBE_MAX_EITR 0x00007FFCU +#define NGBE_MIN_EITR 4 +#define NGBE_PX_ITR_ITR_INT_MASK 0x00000FF8U +#define NGBE_PX_ITR_LLI_CREDIT 0x001f0000U +#define NGBE_PX_ITR_LLI_MOD 0x00008000U +#define NGBE_PX_ITR_CNT_WDIS 0x80000000U +#define NGBE_PX_ITR_ITR_CNT 0x0FE00000U + +/* transmit DMA Registers */ +#define NGBE_PX_TR_BAL(_i) (0x03000 + ((_i) * 0x40)) /* [0, 7] */ +#define NGBE_PX_TR_BAH(_i) (0x03004 + ((_i) * 0x40)) +#define NGBE_PX_TR_WP(_i) (0x03008 + ((_i) * 0x40)) +#define NGBE_PX_TR_RP(_i) (0x0300C + ((_i) * 0x40)) +#define NGBE_PX_TR_CFG(_i) (0x03010 + ((_i) * 0x40)) +/* Transmit Config masks */ +#define NGBE_PX_TR_CFG_ENABLE (1) /* Ena specific Tx Queue */ +#define NGBE_PX_TR_CFG_TR_SIZE_SHIFT 1 /* tx desc number per ring */ +#define NGBE_PX_TR_CFG_SWFLSH (1 << 26) /* Tx Desc. wr-bk flushing */ +#define NGBE_PX_TR_CFG_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ +#define NGBE_PX_TR_CFG_THRE_SHIFT 8 + +#define NGBE_PX_TR_RPn(q_per_pool, vf_number, vf_q_index) \ + (NGBE_PX_TR_RP((q_per_pool)*(vf_number) + (vf_q_index))) + +#define NGBE_PX_TR_WPn(q_per_pool, vf_number, vf_q_index) \ + (NGBE_PX_TR_WP((q_per_pool)*(vf_number) + (vf_q_index))) + +/* Receive DMA Registers */ +#define NGBE_PX_RR_BAL(_i) (0x01000 + ((_i) * 0x40)) /* [0, 7] */ +#define NGBE_PX_RR_BAH(_i) (0x01004 + ((_i) * 0x40)) +#define NGBE_PX_RR_WP(_i) (0x01008 + ((_i) * 0x40)) +#define NGBE_PX_RR_RP(_i) (0x0100C + ((_i) * 0x40)) +#define NGBE_PX_RR_CFG(_i) (0x01010 + ((_i) * 0x40)) +/* PX_RR_CFG bit definitions */ +#define NGBE_PX_RR_CFG_RR_SIZE_SHIFT 1 +#define NGBE_PX_RR_CFG_BSIZEPKT_SHIFT 2 /* so many KBs */ +#define NGBE_PX_RR_CFG_BSIZEHDRSIZE_SHIFT 6 /* 64byte resolution (>> 6) + * + at bit 8 offset (<< 12) + * = (<< 6) + */ +#define NGBE_PX_RR_CFG_DROP_EN 0x40000000U +#define NGBE_PX_RR_CFG_VLAN 0x80000000U +#define NGBE_PX_RR_CFG_RSC 0x20000000U +#define NGBE_PX_RR_CFG_CNTAG 0x10000000U +#define NGBE_PX_RR_CFG_RSC_CNT_MD 0x08000000U +#define NGBE_PX_RR_CFG_SPLIT_MODE 0x04000000U +#define NGBE_PX_RR_CFG_STALL 0x02000000U +#define NGBE_PX_RR_CFG_MAX_RSCBUF_1 0x00000000U +#define NGBE_PX_RR_CFG_MAX_RSCBUF_4 0x00800000U +#define NGBE_PX_RR_CFG_MAX_RSCBUF_8 0x01000000U +#define NGBE_PX_RR_CFG_MAX_RSCBUF_16 0x01800000U +#define NGBE_PX_RR_CFG_RR_THER 0x00070000U +#define NGBE_PX_RR_CFG_RR_THER_SHIFT 16 + +#define NGBE_PX_RR_CFG_RR_HDR_SZ 0x0000F000U +#define NGBE_PX_RR_CFG_RR_BUF_SZ 0x00000F00U +#define NGBE_PX_RR_CFG_RR_SZ 0x0000007EU +#define NGBE_PX_RR_CFG_RR_EN 0x00000001U + +/* statistic */ +#define NGBE_PX_MPRC(_i) (0x1020 + ((_i) * 64)) /* [0,7] */ +#define NGBE_PX_BPRC(_i) (0x1024 + ((_i) * 64)) + + +#define NGBE_PX_MPTC(_i) (0x3020 + ((_i) * 64)) /* [0,7] */ +#define NGBE_PX_BPTC(_i) (0x3024 + ((_i) * 64)) + +#define NGBE_VX_GPRC 0x01014 +#define NGBE_VX_GORC_LSB 0x01018 +#define NGBE_VX_GORC_MSB 0x0101C +#define NGBE_VX_MPRC 0x01020 +#define NGBE_VX_BPRC 0x01024 + +#define NGBE_VX_GPTC 0x03014 +#define NGBE_VX_GOTC_LSB 0x03018 +#define NGBE_VX_GOTC_MSB 0x0301C +#define NGBE_VX_MPTC 0x03020 +#define NGBE_VX_BPTC 0x03024 + +#define NGBE_PX_GPRC 0x12504 + +#define NGBE_PX_GPTC 0x18308 + +#define NGBE_PX_GORC_LSB 0x12508 +#define NGBE_PX_GORC_MSB 0x1250C + +#define NGBE_PX_GOTC_LSB 0x1830C +#define NGBE_PX_GOTC_MSB 0x18310 + +/*************************** Flash region definition *************************/ +/* EEC Register */ +#define NGBE_EEC_SK 0x00000001U /* EEPROM Clock */ +#define NGBE_EEC_CS 0x00000002U /* EEPROM Chip Select */ +#define NGBE_EEC_DI 0x00000004U /* EEPROM Data In */ +#define NGBE_EEC_DO 0x00000008U /* EEPROM Data Out */ +#define NGBE_EEC_FWE_MASK 0x00000030U /* FLASH Write Enable */ +#define NGBE_EEC_FWE_DIS 0x00000010U /* Disable FLASH writes */ +#define NGBE_EEC_FWE_EN 0x00000020U /* Enable FLASH writes */ +#define NGBE_EEC_FWE_SHIFT 4 +#define NGBE_EEC_REQ 0x00000040U /* EEPROM Access Request */ +#define NGBE_EEC_GNT 0x00000080U /* EEPROM Access Grant */ +#define NGBE_EEC_PRES 0x00000100U /* EEPROM Present */ +#define NGBE_EEC_ARD 0x00000200U /* EEPROM Auto Read Done */ +#define NGBE_EEC_FLUP 0x00800000U /* Flash update command */ +#define NGBE_EEC_SEC1VAL 0x02000000U /* Sector 1 Valid */ +#define NGBE_EEC_FLUDONE 0x04000000U /* Flash update done */ +/* EEPROM Addressing bits based on type (0-small, 1-large) */ +#define NGBE_EEC_ADDR_SIZE 0x00000400U +#define NGBE_EEC_SIZE 0x00007800U /* EEPROM Size */ +#define NGBE_EERD_MAX_ADDR 0x00003FFFU /* EERD alows 14 bits for addr. */ + +#define NGBE_EEC_SIZE_SHIFT 11 +#define NGBE_EEPROM_WORD_SIZE_SHIFT 6 +#define NGBE_EEPROM_OPCODE_BITS 8 + +/* FLA Register */ +#define NGBE_FLA_LOCKED 0x00000040U + +/* Part Number String Length */ +#define NGBE_PBANUM_LENGTH 32 + +/* Checksum and EEPROM pointers */ +#define NGBE_PBANUM_PTR_GUARD 0xFAFA +#define NGBE_CHECKSUM_CAP_ST_PASS 0x80658383 +#define NGBE_CHECKSUM_CAP_ST_FAIL 0x70657376 +#define NGBE_EEPROM_CHECKSUM 0x2F +#define NGBE_EEPROM_SUM 0xBABA +#define NGBE_OPTION_ROM_PTR 0x05 +#define NGBE_SHADOW_RAM_SIZE 0x4000 +#define NGBE_PCIE_CONFIG_SIZE 0x08 +#define NGBE_EEPROM_LAST_WORD 0x800 +#define NGBE_FW_PTR 0x0F +#define NGBE_SW_REGION_PTR 0x28 + +#define NGBE_CALSUM_COMMAND 0xE9 +#define NGBE_CALSUM_CAP_STATUS 0x10224 +#define NGBE_EEPROM_VERSION_STORE_REG 0x1022C +#define NGBE_SAN_MAC_ADDR_PTR 0x18 +#define NGBE_DEVICE_CAPS 0x1C +#define NGBE_EEPROM_VERSION_L 0x1D +#define NGBE_EEPROM_VERSION_H 0x1E + +#define NGBE_MAX_MSIX_VECTORS_EMERALD 0x09 + +/* MSI-X capability fields masks */ +#define NGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF + +/* EEPROM Commands - SPI */ +#define NGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */ +#define NGBE_EEPROM_STATUS_RDY_SPI 0x01 +#define NGBE_EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ +#define NGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ +#define NGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */ +#define NGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */ +/* EEPROM reset Write Enable latch */ +#define NGBE_EEPROM_WRDI_OPCODE_SPI 0x04 +#define NGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */ +#define NGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */ +#define NGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ +#define NGBE_EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ +#define NGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ + +/* EEPROM Read Register */ +#define NGBE_EEPROM_RW_REG_DATA 16 /* data offset in EEPROM read reg */ +#define NGBE_EEPROM_RW_REG_DONE 2 /* Offset to READ done bit */ +#define NGBE_EEPROM_RW_REG_START 1 /* First bit to start operation */ +#define NGBE_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define NGBE_NVM_POLL_WRITE 1 /* Flag for polling for wr complete */ +#define NGBE_NVM_POLL_READ 0 /* Flag for polling for rd complete */ + +#define NVM_INIT_CTRL_3 0x38 +#define NVM_INIT_CTRL_3_LPLU 0x8 + +#define NGBE_ETH_LENGTH_OF_ADDRESS 6 + +#define NGBE_EEPROM_PAGE_SIZE_MAX 128 +#define NGBE_EEPROM_RD_BUFFER_MAX_COUNT 256 /* words rd in burst */ +#define NGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* words wr in burst */ +#define NGBE_EEPROM_CTRL_2 1 /* EEPROM CTRL word 2 */ +#define NGBE_EEPROM_CCD_BIT 2 + +#ifndef NGBE_EEPROM_GRANT_ATTEMPTS +#define NGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM attempts to gain grant */ +#endif + +#ifndef NGBE_EERD_EEWR_ATTEMPTS +/* Number of 5 microseconds we wait for EERD read and + * EERW write to complete */ +#define NGBE_EERD_EEWR_ATTEMPTS 100000 +#endif + +#ifndef NGBE_FLUDONE_ATTEMPTS +/* # attempts we wait for flush update to complete */ +#define NGBE_FLUDONE_ATTEMPTS 20000 +#endif + +#define NGBE_PCIE_CTRL2 0x5 /* PCIe Control 2 Offset */ +#define NGBE_PCIE_CTRL2_DUMMY_ENABLE 0x8 /* Dummy Function Enable */ +#define NGBE_PCIE_CTRL2_LAN_DISABLE 0x2 /* LAN PCI Disable */ +#define NGBE_PCIE_CTRL2_DISABLE_SELECT 0x1 /* LAN Disable Select */ + +#define NGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0 +#define NGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3 +#define NGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1 +#define NGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2 +#define NGBE_FW_LESM_PARAMETERS_PTR 0x2 +#define NGBE_FW_LESM_STATE_1 0x1 +#define NGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */ +#define NGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 +#define NGBE_FW_PATCH_VERSION_4 0x7 +#define NGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */ +#define NGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */ +#define NGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */ +#define NGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */ +#define NGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */ +#define NGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x17 /* Alt. SAN MAC block */ +#define NGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt SAN MAC capability */ +#define NGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt SAN MAC 0 offset */ +#define NGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt SAN MAC 1 offset */ +#define NGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt WWNN prefix offset */ +#define NGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt WWPN prefix offset */ +#define NGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt SAN MAC exists */ +#define NGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt WWN base exists */ +#define NGBE_DEVICE_CAPS_WOL_PORT0_1 0x4 /* WoL supported on ports 0 & 1 */ +#define NGBE_DEVICE_CAPS_WOL_PORT0 0x8 /* WoL supported on port 0 */ +#define NGBE_DEVICE_CAPS_WOL_MASK 0xC /* Mask for WoL capabilities */ + +/******************************** PCI Bus Info *******************************/ +#define NGBE_PCI_DEVICE_STATUS 0xAA +#define NGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020 +#define NGBE_PCI_LINK_STATUS 0xB2 +#define NGBE_PCI_DEVICE_CONTROL2 0xC8 +#define NGBE_PCI_LINK_WIDTH 0x3F0 +#define NGBE_PCI_LINK_WIDTH_1 0x10 +#define NGBE_PCI_LINK_WIDTH_2 0x20 +#define NGBE_PCI_LINK_WIDTH_4 0x40 +#define NGBE_PCI_LINK_WIDTH_8 0x80 +#define NGBE_PCI_LINK_SPEED 0xF +#define NGBE_PCI_LINK_SPEED_2500 0x1 +#define NGBE_PCI_LINK_SPEED_5000 0x2 +#define NGBE_PCI_LINK_SPEED_8000 0x3 +#define NGBE_PCI_HEADER_TYPE_REGISTER 0x0E +#define NGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 +#define NGBE_PCI_DEVICE_CONTROL2_16ms 0x0005 + +#define NGBE_PCIDEVCTRL2_RELAX_ORDER_OFFSET 4 +#define NGBE_PCIDEVCTRL2_RELAX_ORDER_MASK \ + (0x0001 << NGBE_PCIDEVCTRL2_RELAX_ORDER_OFFSET) +#define NGBE_PCIDEVCTRL2_RELAX_ORDER_ENABLE \ + (0x01 << NGBE_PCIDEVCTRL2_RELAX_ORDER_OFFSET) + +#define NGBE_PCIDEVCTRL2_TIMEO_MASK 0xf +#define NGBE_PCIDEVCTRL2_16_32ms_def 0x0 +#define NGBE_PCIDEVCTRL2_50_100us 0x1 +#define NGBE_PCIDEVCTRL2_1_2ms 0x2 +#define NGBE_PCIDEVCTRL2_16_32ms 0x5 +#define NGBE_PCIDEVCTRL2_65_130ms 0x6 +#define NGBE_PCIDEVCTRL2_260_520ms 0x9 +#define NGBE_PCIDEVCTRL2_1_2s 0xa +#define NGBE_PCIDEVCTRL2_4_8s 0xd +#define NGBE_PCIDEVCTRL2_17_34s 0xe + +/******************* Receive Descriptor bit definitions **********************/ +#define NGBE_RXD_IPSEC_STATUS_SECP 0x00020000U +#define NGBE_RXD_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000U +#define NGBE_RXD_IPSEC_ERROR_INVALID_LENGTH 0x10000000U +#define NGBE_RXD_IPSEC_ERROR_AUTH_FAILED 0x18000000U +#define NGBE_RXD_IPSEC_ERROR_BIT_MASK 0x18000000U + +#define NGBE_RXD_NEXTP_MASK 0x000FFFF0U /* Next Descriptor Index */ +#define NGBE_RXD_NEXTP_SHIFT 0x00000004U +#define NGBE_RXD_STAT_MASK 0x000fffffU /* Stat/NEXTP: bit 0-19 */ +#define NGBE_RXD_STAT_DD 0x00000001U /* Done */ +#define NGBE_RXD_STAT_EOP 0x00000002U /* End of Packet */ +#define NGBE_RXD_STAT_CLASS_ID_MASK 0x0000001CU +#define NGBE_RXD_STAT_CLASS_ID_TC_RSS 0x00000000U +#define NGBE_RXD_STAT_CLASS_ID_SYN 0x00000008U +#define NGBE_RXD_STAT_CLASS_ID_5_TUPLE 0x0000000CU +#define NGBE_RXD_STAT_CLASS_ID_L2_ETYPE 0x00000010U +#define NGBE_RXD_STAT_VP 0x00000020U /* IEEE VLAN Pkt */ +#define NGBE_RXD_STAT_UDPCS 0x00000040U /* UDP xsum calculated */ +#define NGBE_RXD_STAT_L4CS 0x00000080U /* L4 xsum calculated */ +#define NGBE_RXD_STAT_IPCS 0x00000100U /* IP xsum calculated */ +#define NGBE_RXD_STAT_PIF 0x00000200U /* passed in-exact filter */ +#define NGBE_RXD_STAT_OUTERIPCS 0x00000400U /* Cloud IP xsum calculated*/ +#define NGBE_RXD_STAT_VEXT 0x00000800U /* 1st VLAN found */ +#define NGBE_RXD_STAT_LLINT 0x00002000U /* Pkt caused Low Latency + * Int */ +#define NGBE_RXD_STAT_TS 0x00004000U /* IEEE1588 Time Stamp */ +#define NGBE_RXD_STAT_SECP 0x00008000U /* Security Processing */ +#define NGBE_RXD_STAT_LB 0x00010000U /* Loopback Status */ +#define NGBE_RXD_STAT_FCEOFS 0x00020000U /* FCoE EOF/SOF Stat */ +#define NGBE_RXD_STAT_FCSTAT 0x000C0000U /* FCoE Pkt Stat */ +#define NGBE_RXD_STAT_FCSTAT_NOMTCH 0x00000000U /* 00: No Ctxt Match */ +#define NGBE_RXD_STAT_FCSTAT_NODDP 0x00040000U /* 01: Ctxt w/o DDP */ +#define NGBE_RXD_STAT_FCSTAT_FCPRSP 0x00080000U /* 10: Recv. FCP_RSP */ +#define NGBE_RXD_STAT_FCSTAT_DDP 0x000C0000U /* 11: Ctxt w/ DDP */ + +#define NGBE_RXD_ERR_MASK 0xfff00000U /* RDESC.ERRORS mask */ +#define NGBE_RXD_ERR_SHIFT 20 /* RDESC.ERRORS shift */ +#define NGBE_RXD_ERR_FCEOFE 0x80000000U /* FCEOFe/IPE */ +#define NGBE_RXD_ERR_HBO 0x00800000U /*Header Buffer Overflow */ +#define NGBE_RXD_ERR_OUTERIPER 0x04000000U /* CRC IP Header error */ +#define NGBE_RXD_ERR_SECERR_MASK 0x18000000U +#define NGBE_RXD_ERR_RXE 0x20000000U /* Any MAC Error */ +#define NGBE_RXD_ERR_TCPE 0x40000000U /* TCP/UDP Checksum Error */ +#define NGBE_RXD_ERR_IPE 0x80000000U /* IP Checksum Error */ + +#define NGBE_RXDPS_HDRSTAT_HDRSP 0x00008000U +#define NGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FFU + +#define NGBE_RXD_RSSTYPE_MASK 0x0000000FU +#define NGBE_RXD_TPID_MASK 0x000001C0U +#define NGBE_RXD_TPID_SHIFT 6 +#define NGBE_RXD_HDRBUFLEN_MASK 0x00007FE0U +#define NGBE_RXD_RSCCNT_MASK 0x001E0000U +#define NGBE_RXD_RSCCNT_SHIFT 17 +#define NGBE_RXD_HDRBUFLEN_SHIFT 5 +#define NGBE_RXD_SPLITHEADER_EN 0x00001000U +#define NGBE_RXD_SPH 0x8000 + +/* RSS Hash results */ +#define NGBE_RXD_RSSTYPE_NONE 0x00000000U +#define NGBE_RXD_RSSTYPE_IPV4_TCP 0x00000001U +#define NGBE_RXD_RSSTYPE_IPV4 0x00000002U +#define NGBE_RXD_RSSTYPE_IPV6_TCP 0x00000003U +#define NGBE_RXD_RSSTYPE_IPV4_SCTP 0x00000004U +#define NGBE_RXD_RSSTYPE_IPV6 0x00000005U +#define NGBE_RXD_RSSTYPE_IPV6_SCTP 0x00000006U +#define NGBE_RXD_RSSTYPE_IPV4_UDP 0x00000007U +#define NGBE_RXD_RSSTYPE_IPV6_UDP 0x00000008U + +/** + * receive packet type + * PTYPE:8 = TUN:2 + PKT:2 + TYP:4 + **/ +/* TUN */ +#define NGBE_PTYPE_TUN_IPV4 (0x80) +#define NGBE_PTYPE_TUN_IPV6 (0xC0) + +/* PKT for TUN */ +#define NGBE_PTYPE_PKT_IPIP (0x00) /* IP+IP */ +#define NGBE_PTYPE_PKT_IG (0x10) /* IP+GRE */ +#define NGBE_PTYPE_PKT_IGM (0x20) /* IP+GRE+MAC */ +#define NGBE_PTYPE_PKT_IGMV (0x30) /* IP+GRE+MAC+VLAN */ +/* PKT for !TUN */ +#define NGBE_PTYPE_PKT_MAC (0x10) +#define NGBE_PTYPE_PKT_IP (0x20) +#define NGBE_PTYPE_PKT_FCOE (0x30) + +/* TYP for PKT=mac */ +#define NGBE_PTYPE_TYP_MAC (0x01) +#define NGBE_PTYPE_TYP_TS (0x02) /* time sync */ +#define NGBE_PTYPE_TYP_FIP (0x03) +#define NGBE_PTYPE_TYP_LLDP (0x04) +#define NGBE_PTYPE_TYP_CNM (0x05) +#define NGBE_PTYPE_TYP_EAPOL (0x06) +#define NGBE_PTYPE_TYP_ARP (0x07) +/* TYP for PKT=ip */ +#define NGBE_PTYPE_PKT_IPV6 (0x08) +#define NGBE_PTYPE_TYP_IPFRAG (0x01) +#define NGBE_PTYPE_TYP_IP (0x02) +#define NGBE_PTYPE_TYP_UDP (0x03) +#define NGBE_PTYPE_TYP_TCP (0x04) +#define NGBE_PTYPE_TYP_SCTP (0x05) +/* TYP for PKT=fcoe */ +#define NGBE_PTYPE_PKT_VFT (0x08) +#define NGBE_PTYPE_TYP_FCOE (0x00) +#define NGBE_PTYPE_TYP_FCDATA (0x01) +#define NGBE_PTYPE_TYP_FCRDY (0x02) +#define NGBE_PTYPE_TYP_FCRSP (0x03) +#define NGBE_PTYPE_TYP_FCOTHER (0x04) + +/* Packet type non-ip values */ +enum ngbe_l2_ptypes { + NGBE_PTYPE_L2_ABORTED = (NGBE_PTYPE_PKT_MAC), + NGBE_PTYPE_L2_MAC = (NGBE_PTYPE_PKT_MAC | NGBE_PTYPE_TYP_MAC), + NGBE_PTYPE_L2_TS = (NGBE_PTYPE_PKT_MAC | NGBE_PTYPE_TYP_TS), + NGBE_PTYPE_L2_FIP = (NGBE_PTYPE_PKT_MAC | NGBE_PTYPE_TYP_FIP), + NGBE_PTYPE_L2_LLDP = (NGBE_PTYPE_PKT_MAC | NGBE_PTYPE_TYP_LLDP), + NGBE_PTYPE_L2_CNM = (NGBE_PTYPE_PKT_MAC | NGBE_PTYPE_TYP_CNM), + NGBE_PTYPE_L2_EAPOL = (NGBE_PTYPE_PKT_MAC | NGBE_PTYPE_TYP_EAPOL), + NGBE_PTYPE_L2_ARP = (NGBE_PTYPE_PKT_MAC | NGBE_PTYPE_TYP_ARP), + + NGBE_PTYPE_L2_IPV4_FRAG = (NGBE_PTYPE_PKT_IP | + NGBE_PTYPE_TYP_IPFRAG), + NGBE_PTYPE_L2_IPV4 = (NGBE_PTYPE_PKT_IP | NGBE_PTYPE_TYP_IP), + NGBE_PTYPE_L2_IPV4_UDP = (NGBE_PTYPE_PKT_IP | NGBE_PTYPE_TYP_UDP), + NGBE_PTYPE_L2_IPV4_TCP = (NGBE_PTYPE_PKT_IP | NGBE_PTYPE_TYP_TCP), + NGBE_PTYPE_L2_IPV4_SCTP = (NGBE_PTYPE_PKT_IP | NGBE_PTYPE_TYP_SCTP), + NGBE_PTYPE_L2_IPV6_FRAG = (NGBE_PTYPE_PKT_IP | NGBE_PTYPE_PKT_IPV6 | + NGBE_PTYPE_TYP_IPFRAG), + NGBE_PTYPE_L2_IPV6 = (NGBE_PTYPE_PKT_IP | NGBE_PTYPE_PKT_IPV6 | + NGBE_PTYPE_TYP_IP), + NGBE_PTYPE_L2_IPV6_UDP = (NGBE_PTYPE_PKT_IP | NGBE_PTYPE_PKT_IPV6 | + NGBE_PTYPE_TYP_UDP), + NGBE_PTYPE_L2_IPV6_TCP = (NGBE_PTYPE_PKT_IP | NGBE_PTYPE_PKT_IPV6 | + NGBE_PTYPE_TYP_TCP), + NGBE_PTYPE_L2_IPV6_SCTP = (NGBE_PTYPE_PKT_IP | NGBE_PTYPE_PKT_IPV6 | + NGBE_PTYPE_TYP_SCTP), + + NGBE_PTYPE_L2_FCOE = (NGBE_PTYPE_PKT_FCOE | NGBE_PTYPE_TYP_FCOE), + NGBE_PTYPE_L2_FCOE_FCDATA = (NGBE_PTYPE_PKT_FCOE | + NGBE_PTYPE_TYP_FCDATA), + NGBE_PTYPE_L2_FCOE_FCRDY = (NGBE_PTYPE_PKT_FCOE | + NGBE_PTYPE_TYP_FCRDY), + NGBE_PTYPE_L2_FCOE_FCRSP = (NGBE_PTYPE_PKT_FCOE | + NGBE_PTYPE_TYP_FCRSP), + NGBE_PTYPE_L2_FCOE_FCOTHER = (NGBE_PTYPE_PKT_FCOE | + NGBE_PTYPE_TYP_FCOTHER), + NGBE_PTYPE_L2_FCOE_VFT = (NGBE_PTYPE_PKT_FCOE | NGBE_PTYPE_PKT_VFT), + NGBE_PTYPE_L2_FCOE_VFT_FCDATA = (NGBE_PTYPE_PKT_FCOE | + NGBE_PTYPE_PKT_VFT | NGBE_PTYPE_TYP_FCDATA), + NGBE_PTYPE_L2_FCOE_VFT_FCRDY = (NGBE_PTYPE_PKT_FCOE | + NGBE_PTYPE_PKT_VFT | NGBE_PTYPE_TYP_FCRDY), + NGBE_PTYPE_L2_FCOE_VFT_FCRSP = (NGBE_PTYPE_PKT_FCOE | + NGBE_PTYPE_PKT_VFT | NGBE_PTYPE_TYP_FCRSP), + NGBE_PTYPE_L2_FCOE_VFT_FCOTHER = (NGBE_PTYPE_PKT_FCOE | + NGBE_PTYPE_PKT_VFT | NGBE_PTYPE_TYP_FCOTHER), + + NGBE_PTYPE_L2_TUN4_MAC = (NGBE_PTYPE_TUN_IPV4 | NGBE_PTYPE_PKT_IGM), + NGBE_PTYPE_L2_TUN6_MAC = (NGBE_PTYPE_TUN_IPV6 | NGBE_PTYPE_PKT_IGM), +}; + +#define NGBE_RXD_PKTTYPE(_rxd) \ + ((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 9) & 0xFF) +#define NGBE_PTYPE_TUN(_pt) ((_pt) & 0xC0) +#define NGBE_PTYPE_PKT(_pt) ((_pt) & 0x30) +#define NGBE_PTYPE_TYP(_pt) ((_pt) & 0x0F) +#define NGBE_PTYPE_TYPL4(_pt) ((_pt) & 0x07) + +#define NGBE_RXD_IPV6EX(_rxd) \ + ((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 6) & 0x1) + +/* Security Processing bit Indication */ +#define NGBE_RXD_LNKSEC_STATUS_SECP 0x00020000U +#define NGBE_RXD_LNKSEC_ERROR_NO_SA_MATCH 0x08000000U +#define NGBE_RXD_LNKSEC_ERROR_REPLAY_ERROR 0x10000000U +#define NGBE_RXD_LNKSEC_ERROR_BIT_MASK 0x18000000U +#define NGBE_RXD_LNKSEC_ERROR_BAD_SIG 0x18000000U + +/* Masks to determine if packets should be dropped due to frame errors */ +#define NGBE_RXD_ERR_FRAME_ERR_MASK NGBE_RXD_ERR_RXE + +/*********************** Adv Transmit Descriptor Config Masks ****************/ +#define NGBE_TXD_DTALEN_MASK 0x0000FFFFU /* Data buf length(bytes) */ +#define NGBE_TXD_MAC_LINKSEC 0x00040000U /* Insert LinkSec */ +#define NGBE_TXD_MAC_TSTAMP 0x00080000U /* IEEE1588 time stamp */ +#define NGBE_TXD_IPSEC_SA_INDEX_MASK 0x000003FFU /* IPSec SA index */ +#define NGBE_TXD_IPSEC_ESP_LEN_MASK 0x000001FFU /* IPSec ESP length */ +#define NGBE_TXD_DTYP_MASK 0x00F00000U /* DTYP mask */ +#define NGBE_TXD_DTYP_CTXT 0x00100000U /* Adv Context Desc */ +#define NGBE_TXD_DTYP_DATA 0x00000000U /* Adv Data Descriptor */ +#define NGBE_TXD_EOP 0x01000000U /* End of Packet */ +#define NGBE_TXD_IFCS 0x02000000U /* Insert FCS */ +#define NGBE_TXD_LINKSEC 0x04000000U /* enable linksec */ +#define NGBE_TXD_RS 0x08000000U /* Report Status */ +#define NGBE_TXD_ECU 0x10000000U /* DDP hdr type or iSCSI */ +#define NGBE_TXD_QCN 0x20000000U /* cntag insertion enable */ +#define NGBE_TXD_VLE 0x40000000U /* VLAN pkt enable */ +#define NGBE_TXD_TSE 0x80000000U /* TCP Seg enable */ +#define NGBE_TXD_STAT_DD 0x00000001U /* Descriptor Done */ +#define NGBE_TXD_IDX_SHIFT 4 /* Adv desc Index shift */ +#define NGBE_TXD_CC 0x00000080U /* Check Context */ +#define NGBE_TXD_IPSEC 0x00000100U /* enable ipsec esp */ +#define NGBE_TXD_IIPCS 0x00000400U +#define NGBE_TXD_EIPCS 0x00000800U +#define NGBE_TXD_L4CS 0x00000200U +#define NGBE_TXD_PAYLEN_SHIFT 13 /* Adv desc PAYLEN shift */ +#define NGBE_TXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define NGBE_TXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +#define NGBE_TXD_TAG_TPID_SEL_SHIFT 11 +#define NGBE_TXD_IPSEC_TYPE_SHIFT 14 +#define NGBE_TXD_ENC_SHIFT 15 + +#define NGBE_TXD_TUCMD_IPSEC_TYPE_ESP 0x00004000U /* IPSec Type ESP */ +#define NGBE_TXD_TUCMD_IPSEC_ENCRYPT_EN 0x00008000/* ESP Encrypt Enable */ +#define NGBE_TXD_TUCMD_FCOE 0x00010000U /* FCoE Frame Type */ +#define NGBE_TXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */ +#define NGBE_TXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */ +#define NGBE_TXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */ +#define NGBE_TXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation End */ +#define NGBE_TXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation Start */ +#define NGBE_TXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */ +#define NGBE_TXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */ +#define NGBE_TXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */ +#define NGBE_TXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */ +#define NGBE_TXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define NGBE_TXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +#define NGBE_TXD_OUTER_IPLEN_SHIFT 12 /* Adv ctxt OUTERIPLEN shift */ +#define NGBE_TXD_TUNNEL_LEN_SHIFT 21 /* Adv ctxt TUNNELLEN shift */ +#define NGBE_TXD_TUNNEL_TYPE_SHIFT 11 /* Adv Tx Desc Tunnel Type shift */ +#define NGBE_TXD_TUNNEL_DECTTL_SHIFT 27 /* Adv ctxt DECTTL shift */ +#define NGBE_TXD_TUNNEL_UDP (0x0ULL << NGBE_TXD_TUNNEL_TYPE_SHIFT) +#define NGBE_TXD_TUNNEL_GRE (0x1ULL << NGBE_TXD_TUNNEL_TYPE_SHIFT) + +/************ ngbe_type.h ************/ +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define NGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define NGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8 +#define NGBE_REQ_TX_BUFFER_GRANULARITY 1024 + +/* Vlan-specific macros */ +#define NGBE_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID in lower 12 bits */ +#define NGBE_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority in upper 3 bits */ +#define NGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ +#define NGBE_TX_DESC_SPECIAL_PRI_SHIFT NGBE_RX_DESC_SPECIAL_PRI_SHIFT + +/* Transmit Descriptor */ +union ngbe_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Receive Descriptor */ +union ngbe_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /* RSS, Pkt type */ + __le16 hdr_info; /* Splithdr, hdrlen */ + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +/* Context descriptors */ +struct ngbe_tx_context_desc { + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +/************************* Flow Directory HASH *******************************/ +/* Software ATR hash keys */ +#define NGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2 +#define NGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614 + +/* Software ATR input stream values and masks */ +#define NGBE_ATR_HASH_MASK 0x7fff +#define NGBE_ATR_L4TYPE_MASK 0x3 +#define NGBE_ATR_L4TYPE_UDP 0x1 +#define NGBE_ATR_L4TYPE_TCP 0x2 +#define NGBE_ATR_L4TYPE_SCTP 0x3 +#define NGBE_ATR_L4TYPE_IPV6_MASK 0x4 +#define NGBE_ATR_L4TYPE_TUNNEL_MASK 0x10 +enum ngbe_atr_flow_type { + NGBE_ATR_FLOW_TYPE_IPV4 = 0x0, + NGBE_ATR_FLOW_TYPE_UDPV4 = 0x1, + NGBE_ATR_FLOW_TYPE_TCPV4 = 0x2, + NGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3, + NGBE_ATR_FLOW_TYPE_IPV6 = 0x4, + NGBE_ATR_FLOW_TYPE_UDPV6 = 0x5, + NGBE_ATR_FLOW_TYPE_TCPV6 = 0x6, + NGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7, + NGBE_ATR_FLOW_TYPE_TUNNELED_IPV4 = 0x10, + NGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4 = 0x11, + NGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4 = 0x12, + NGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4 = 0x13, + NGBE_ATR_FLOW_TYPE_TUNNELED_IPV6 = 0x14, + NGBE_ATR_FLOW_TYPE_TUNNELED_UDPV6 = 0x15, + NGBE_ATR_FLOW_TYPE_TUNNELED_TCPV6 = 0x16, + NGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV6 = 0x17, +}; + +/* Flow Director ATR input struct. */ +union ngbe_atr_input { + /* + * Byte layout in order, all values with MSB first: + * + * vm_pool - 1 byte + * flow_type - 1 byte + * vlan_id - 2 bytes + * src_ip - 16 bytes + * inner_mac - 6 bytes + * cloud_mode - 2 bytes + * tni_vni - 4 bytes + * dst_ip - 16 bytes + * src_port - 2 bytes + * dst_port - 2 bytes + * flex_bytes - 2 bytes + * bkt_hash - 2 bytes + */ + struct { + u8 vm_pool; + u8 flow_type; + __be16 vlan_id; + __be32 dst_ip[4]; + __be32 src_ip[4]; + __be16 src_port; + __be16 dst_port; + __be16 flex_bytes; + __be16 bkt_hash; + } formatted; + __be32 dword_stream[11]; +}; + +/* Flow Director compressed ATR hash input struct */ +union ngbe_atr_hash_dword { + struct { + u8 vm_pool; + u8 flow_type; + __be16 vlan_id; + } formatted; + __be32 ip; + struct { + __be16 src; + __be16 dst; + } port; + __be16 flex_bytes; + __be32 dword; +}; + +/****************** Manageablility Host Interface defines ********************/ +#define NGBE_HI_MAX_BLOCK_BYTE_LENGTH 256 /* Num of bytes in range */ +#define NGBE_HI_MAX_BLOCK_DWORD_LENGTH 64 /* Num of dwords in range */ +#define NGBE_HI_COMMAND_TIMEOUT 5000 /* Process HI command limit */ +#define NGBE_HI_FLASH_ERASE_TIMEOUT 5000 /* Process Erase command limit */ +#define NGBE_HI_FLASH_UPDATE_TIMEOUT 5000 /* Process Update command limit */ +#define NGBE_HI_FLASH_VERIFY_TIMEOUT 60000 /* Process Apply command limit */ +#define NGBE_HI_PHY_MGMT_REQ_TIMEOUT 2000 /* Wait up to 2 seconds */ + +/* CEM Support */ +#define FW_CEM_HDR_LEN 0x4 +#define FW_CEM_CMD_DRIVER_INFO 0xDD +#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5 +#define FW_CEM_CMD_RESERVED 0X0 +#define FW_CEM_UNUSED_VER 0x0 +#define FW_CEM_MAX_RETRIES 3 +#define FW_CEM_RESP_STATUS_SUCCESS 0x1 +#define FW_READ_SHADOW_RAM_CMD 0x31 +#define FW_READ_SHADOW_RAM_LEN 0x6 +#define FW_WRITE_SHADOW_RAM_CMD 0x33 +#define FW_WRITE_SHADOW_RAM_LEN 0xA /* 8 plus 1 WORD to write */ +#define FW_SHADOW_RAM_DUMP_CMD 0x36 +#define FW_SHADOW_RAM_DUMP_LEN 0 +#define FW_DEFAULT_CHECKSUM 0xFF /* checksum always 0xFF */ +#define FW_NVM_DATA_OFFSET 3 +#define FW_MAX_READ_BUFFER_SIZE 244 +#define FW_DISABLE_RXEN_CMD 0xDE +#define FW_DISABLE_RXEN_LEN 0x1 +#define FW_PHY_MGMT_REQ_CMD 0x20 +#define FW_RESET_CMD 0xDF +#define FW_RESET_LEN 0x2 +#define FW_SETUP_MAC_LINK_CMD 0xE0 +#define FW_SETUP_MAC_LINK_LEN 0x2 +#define FW_FLASH_UPGRADE_START_CMD 0xE3 +#define FW_FLASH_UPGRADE_START_LEN 0x1 +#define FW_FLASH_UPGRADE_WRITE_CMD 0xE4 +#define FW_FLASH_UPGRADE_VERIFY_CMD 0xE5 +#define FW_FLASH_UPGRADE_VERIFY_LEN 0x4 +#define FW_EEPROM_CHECK_STATUS 0xE9 +#define FW_PHY_SIGNAL 0xF0 + +/* Host Interface Command Structures */ +struct ngbe_hic_hdr { + u8 cmd; + u8 buf_len; + union { + u8 cmd_resv; + u8 ret_status; + } cmd_or_resp; + u8 checksum; +}; + +struct ngbe_hic_hdr2_req { + u8 cmd; + u8 buf_lenh; + u8 buf_lenl; + u8 checksum; +}; + +struct ngbe_hic_hdr2_rsp { + u8 cmd; + u8 buf_lenl; + u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */ + u8 checksum; +}; + +union ngbe_hic_hdr2 { + struct ngbe_hic_hdr2_req req; + struct ngbe_hic_hdr2_rsp rsp; +}; + +struct ngbe_hic_drv_info { + struct ngbe_hic_hdr hdr; + u8 port_num; + u8 ver_sub; + u8 ver_build; + u8 ver_min; + u8 ver_maj; + u8 pad; /* end spacing to ensure length is mult. of dword */ + u16 pad2; /* end spacing to ensure length is mult. of dword2 */ +}; + +/* These need to be dword aligned */ +struct ngbe_hic_read_shadow_ram { + union ngbe_hic_hdr2 hdr; + u32 address; + u16 length; + u16 pad2; + u16 data; + u16 pad3; +}; + +struct ngbe_hic_write_shadow_ram { + union ngbe_hic_hdr2 hdr; + u32 address; + u16 length; + u16 pad2; + u16 data; + u16 pad3; +}; + +struct ngbe_hic_disable_rxen { + struct ngbe_hic_hdr hdr; + u8 port_number; + u8 pad2; + u16 pad3; +}; + +struct ngbe_hic_reset { + struct ngbe_hic_hdr hdr; + u16 lan_id; + u16 reset_type; +}; + +struct ngbe_hic_phy_cfg { + struct ngbe_hic_hdr hdr; + u8 lan_id; + u8 phy_mode; + u16 phy_speed; +}; + +enum ngbe_module_id { + NGBE_MODULE_EEPROM = 0, + NGBE_MODULE_FIRMWARE, + NGBE_MODULE_HARDWARE, + NGBE_MODULE_PCIE +}; + +struct ngbe_hic_upg_start { + struct ngbe_hic_hdr hdr; + u8 module_id; + u8 pad2; + u16 pad3; +}; + +struct ngbe_hic_upg_write { + struct ngbe_hic_hdr hdr; + u8 data_len; + u8 eof_flag; + u16 check_sum; + u32 data[62]; +}; + +enum ngbe_upg_flag { + NGBE_RESET_NONE = 0, + NGBE_RESET_FIRMWARE, + NGBE_RELOAD_EEPROM, + NGBE_RESET_LAN +}; + +struct ngbe_hic_upg_verify { + struct ngbe_hic_hdr hdr; + u32 action_flag; +}; + +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define NGBE_PCI_MASTER_DISABLE_TIMEOUT 800 + +/* Check whether address is multicast. This is little-endian specific check.*/ +#define NGBE_IS_MULTICAST(Address) \ + (bool)(((u8 *)(Address))[0] & ((u8)0x01)) + +/* Check whether an address is broadcast. */ +#define NGBE_IS_BROADCAST(Address) \ + ((((u8 *)(Address))[0] == ((u8)0xff)) && \ + (((u8 *)(Address))[1] == ((u8)0xff))) + +/* DCB registers */ +#define NGBE_DCB_MAX_TRAFFIC_CLASS 8 + +/* Power Manangbeent */ +/* DMA Coalescing configuration */ +struct ngbe_dmac_config { + u16 watchdog_timer; /* usec units */ + bool fcoe_en; + u32 link_speed; + u8 fcoe_tc; + u8 num_tcs; +}; + +/* Autonegotiation advertised speeds */ +typedef u32 ngbe_autoneg_advertised; +/* Link speed */ +#define NGBE_LINK_SPEED_UNKNOWN 0 +#define NGBE_LINK_SPEED_100_FULL 1 +#define NGBE_LINK_SPEED_1GB_FULL 2 +#define NGBE_LINK_SPEED_10_FULL 8 +#define NGBE_LINK_SPEED_AUTONEG (NGBE_LINK_SPEED_100_FULL | \ + NGBE_LINK_SPEED_1GB_FULL | \ + NGBE_LINK_SPEED_10_FULL) + +/* Physical layer type */ +typedef u32 ngbe_physical_layer; +#define NGBE_PHYSICAL_LAYER_UNKNOWN 0 +#define NGBE_PHYSICAL_LAYER_1000BASE_T 0x0002 +#define NGBE_PHYSICAL_LAYER_100BASE_TX 0x0004 +#define NGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x0008 +#define NGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200 +#define NGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400 +#define NGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000 +#define NGBE_PHYSICAL_LAYER_1000BASE_SX 0x4000 + +/* Special PHY Init Routine */ +#define NGBE_PHY_INIT_OFFSET_NL 0x002B +#define NGBE_PHY_INIT_END_NL 0xFFFF +#define NGBE_CONTROL_MASK_NL 0xF000 +#define NGBE_DATA_MASK_NL 0x0FFF +#define NGBE_CONTROL_SHIFT_NL 12 +#define NGBE_DELAY_NL 0 +#define NGBE_DATA_NL 1 +#define NGBE_CONTROL_NL 0x000F +#define NGBE_CONTROL_EOL_NL 0x0FFF +#define NGBE_CONTROL_SOL_NL 0x0000 + +/* ethtool */ +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 + +/* Flow Control Data Sheet defined values + * Calculation and defines taken from 802.1bb Annex O + */ + +/* BitTimes (BT) conversion */ +#define NGBE_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024)) +#define NGBE_B2BT(BT) (BT * 8) + +/* Calculate Delay to respond to PFC */ +#define NGBE_PFC_D 672 + +/* Calculate Cable Delay */ +#define NGBE_CABLE_DC 5556 /* Delay Copper */ +#define NGBE_CABLE_DO 5000 /* Delay Optical */ + +/* Calculate Interface Delay X540 */ +#define NGBE_PHY_DC 25600 /* Delay 10G BASET */ +#define NGBE_MAC_DC 8192 /* Delay Copper XAUI interface */ +#define NGBE_XAUI_DC (2 * 2048) /* Delay Copper Phy */ + +#define NGBE_ID_X540 (NGBE_MAC_DC + NGBE_XAUI_DC + NGBE_PHY_DC) + +/* Calculate Interface Delay */ +#define NGBE_PHY_D 12800 +#define NGBE_MAC_D 4096 +#define NGBE_XAUI_D (2 * 1024) + +#define NGBE_ID (NGBE_MAC_D + NGBE_XAUI_D + NGBE_PHY_D) + +/* Calculate Delay incurred from higher layer */ +#define NGBE_HD 6144 + +/* Calculate PCI Bus delay for low thresholds */ +#define NGBE_PCI_DELAY 10000 + +/* Calculate X540 delay value in bit times */ +#define NGBE_DV_X540(_max_frame_link, _max_frame_tc) \ + ((36 * \ + (NGBE_B2BT(_max_frame_link) + \ + NGBE_PFC_D + \ + (2 * NGBE_CABLE_DC) + \ + (2 * NGBE_ID_X540) + \ + NGBE_HD) / 25 + 1) + \ + 2 * NGBE_B2BT(_max_frame_tc)) + + +/* Calculate delay value in bit times */ +#define NGBE_DV(_max_frame_link, _max_frame_tc) \ + ((36 * \ + (NGBE_B2BT(_max_frame_link) + \ + NGBE_PFC_D + \ + (2 * NGBE_CABLE_DC) + \ + (2 * NGBE_ID) + \ + NGBE_HD) / 25 + 1) + \ + 2 * NGBE_B2BT(_max_frame_tc)) + +/* Calculate low threshold delay values */ +#define NGBE_LOW_DV_X540(_max_frame_tc) \ + (2 * NGBE_B2BT(_max_frame_tc) + \ + (36 * NGBE_PCI_DELAY / 25) + 1) + +#define NGBE_LOW_DV(_max_frame_tc) \ + (2 * NGBE_LOW_DV_X540(_max_frame_tc)) + +/* + * Unavailable: The FCoE Boot Option ROM is not present in the flash. + * Disabled: Present; boot order is not set for any targets on the port. + * Enabled: Present; boot order is set for at least one target on the port. + */ +enum ngbe_fcoe_boot_status { + ngbe_fcoe_bootstatus_disabled = 0, + ngbe_fcoe_bootstatus_enabled = 1, + ngbe_fcoe_bootstatus_unavailable = 0xFFFF +}; + +enum ngbe_eeprom_type { + ngbe_eeprom_uninitialized = 0, + ngbe_eeprom_spi, + ngbe_flash, + ngbe_eeprom_none /* No NVM support */ +}; + +enum ngbe_phy_type { + ngbe_phy_unknown = 0, + ngbe_phy_none, + ngbe_phy_internal, + ngbe_phy_m88e1512, + ngbe_phy_m88e1512_sfi, + ngbe_phy_yt8521s, + ngbe_phy_yt8521s_sfi, + ngbe_phy_zte, + ngbe_phy_sfp_passive_tyco, + ngbe_phy_sfp_passive_unknown, + ngbe_phy_sfp_active_unknown, + ngbe_phy_sfp_avago, + ngbe_phy_sfp_ftl, + ngbe_phy_sfp_ftl_active, + ngbe_phy_sfp_unknown, + ngbe_phy_sfp_intel, + ngbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/ + ngbe_phy_generic +}; + +/* + * SFP+ module type IDs: + * + * ID Module Type + * ============= + * 0 SFP_DA_CU + * 1 SFP_SR + * 2 SFP_LR + * 3 SFP_DA_CU_CORE0 + * 4 SFP_DA_CU_CORE1 + * 5 SFP_SR/LR_CORE0 + * 6 SFP_SR/LR_CORE1 + */ +enum ngbe_sfp_type { + ngbe_sfp_type_da_cu = 0, + ngbe_sfp_type_sr = 1, + ngbe_sfp_type_lr = 2, + ngbe_sfp_type_da_cu_core0 = 3, + ngbe_sfp_type_da_cu_core1 = 4, + ngbe_sfp_type_srlr_core0 = 5, + ngbe_sfp_type_srlr_core1 = 6, + ngbe_sfp_type_da_act_lmt_core0 = 7, + ngbe_sfp_type_da_act_lmt_core1 = 8, + ngbe_sfp_type_1g_cu_core0 = 9, + ngbe_sfp_type_1g_cu_core1 = 10, + ngbe_sfp_type_1g_sx_core0 = 11, + ngbe_sfp_type_1g_sx_core1 = 12, + ngbe_sfp_type_1g_lx_core0 = 13, + ngbe_sfp_type_1g_lx_core1 = 14, + ngbe_sfp_type_not_present = 0xFFFE, + ngbe_sfp_type_unknown = 0xFFFF +}; + +enum ngbe_media_type { + ngbe_media_type_unknown = 0, + ngbe_media_type_fiber, + ngbe_media_type_copper, + ngbe_media_type_backplane, + ngbe_media_type_virtual +}; + +/* Flow Control Settings */ +enum ngbe_fc_mode { + ngbe_fc_none = 0, + ngbe_fc_rx_pause, + ngbe_fc_tx_pause, + ngbe_fc_full, + ngbe_fc_default +}; + +/* Smart Speed Settings */ +#define NGBE_SMARTSPEED_MAX_RETRIES 3 +enum ngbe_smart_speed { + ngbe_smart_speed_auto = 0, + ngbe_smart_speed_on, + ngbe_smart_speed_off +}; + +/* PCI bus types */ +enum ngbe_bus_type { + ngbe_bus_type_unknown = 0, + ngbe_bus_type_pci, + ngbe_bus_type_pcix, + ngbe_bus_type_pci_express, + ngbe_bus_type_internal, + ngbe_bus_type_reserved +}; + +/* PCI bus speeds */ +enum ngbe_bus_speed { + ngbe_bus_speed_unknown = 0, + ngbe_bus_speed_33 = 33, + ngbe_bus_speed_66 = 66, + ngbe_bus_speed_100 = 100, + ngbe_bus_speed_120 = 120, + ngbe_bus_speed_133 = 133, + ngbe_bus_speed_2500 = 2500, + ngbe_bus_speed_5000 = 5000, + ngbe_bus_speed_8000 = 8000, + ngbe_bus_speed_reserved +}; + +/* PCI bus widths */ +enum ngbe_bus_width { + ngbe_bus_width_unknown = 0, + ngbe_bus_width_pcie_x1 = 1, + ngbe_bus_width_pcie_x2 = 2, + ngbe_bus_width_pcie_x4 = 4, + ngbe_bus_width_pcie_x8 = 8, + ngbe_bus_width_32 = 32, + ngbe_bus_width_64 = 64, + ngbe_bus_width_reserved +}; + +struct ngbe_addr_filter_info { + u32 num_mc_addrs; + u32 rar_used_count; + u32 mta_in_use; + u32 overflow_promisc; + bool user_set_promisc; +}; + +/* Bus parameters */ +struct ngbe_bus_info { + enum ngbe_bus_speed speed; + enum ngbe_bus_width width; + enum ngbe_bus_type type; + + u16 func; + u16 lan_id; +}; + +/* Flow control parameters */ +struct ngbe_fc_info { + u32 high_water; /* Flow Ctrl High-water */ + u32 low_water; /* Flow Ctrl Low-water */ + u16 pause_time; /* Flow Control Pause timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + bool disable_fc_autoneg; /* Do not autonegotiate FC */ + bool fc_was_autonegged; /* Is current_mode the result of autonegging? */ + enum ngbe_fc_mode current_mode; /* FC mode in effect */ + enum ngbe_fc_mode requested_mode; /* FC mode requested by caller */ +}; + +/* Statistics counters collected by the MAC */ +struct ngbe_hw_stats { + u64 crcerrs; + u64 illerrc; + u64 errbc; + u64 mspdc; + u64 mpctotal; + u64 mpc[8]; + u64 mlfc; + u64 mrfc; + u64 rlec; + u64 lxontxc; + u64 lxonrxc; + u64 lxofftxc; + u64 lxoffrxc; + u64 pxontxc[8]; + u64 pxonrxc[8]; + u64 pxofftxc[8]; + u64 pxoffrxc[8]; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc[8]; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mngprc; + u64 mngpdc; + u64 mngptc; + u64 tor; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 xec; + u64 qprc[16]; + u64 qptc[16]; + u64 qbrc[16]; + u64 qbtc[16]; + u64 qprdc[16]; + u64 pxon2offc[8]; + u64 fccrc; + u64 fclast; + u64 fcoerpdc; + u64 fcoeprc; + u64 fcoeptc; + u64 fcoedwrc; + u64 fcoedwtc; + u64 fcoe_noddp; + u64 fcoe_noddp_ext_buff; + u64 ldpcec; + u64 pcrc8ec; + u64 b2ospc; + u64 b2ogprc; + u64 o2bgptc; + u64 o2bspc; +}; + +/* forward declaration */ +struct ngbe_hw; + +/* iterator type for walking multicast address lists */ +typedef u8* (*ngbe_mc_addr_itr) (struct ngbe_hw *hw, u8 **mc_addr_ptr, + u32 *vmdq); + +/* Function pointer table */ +struct ngbe_eeprom_operations { + s32 (*init_params)(struct ngbe_hw *); + s32 (*read)(struct ngbe_hw *, u16, u16 *); + s32 (*read_buffer)(struct ngbe_hw *, u16, u16, u16 *); + s32 (*read32)(struct ngbe_hw *, u16, u32 *); + s32 (*write)(struct ngbe_hw *, u16, u16); + s32 (*write_buffer)(struct ngbe_hw *, u16, u16, u16 *); + s32 (*validate_checksum)(struct ngbe_hw *, u16 *); + s32 (*update_checksum)(struct ngbe_hw *); + s32 (*calc_checksum)(struct ngbe_hw *); + s32 (*eeprom_chksum_cap_st)(struct ngbe_hw *, u16, u32 *); + s32 (*phy_signal_set)(struct ngbe_hw *); +}; + +struct ngbe_flash_operations { + s32 (*init_params)(struct ngbe_hw *); + s32 (*read_buffer)(struct ngbe_hw *, u32, u32, u32 *); + s32 (*write_buffer)(struct ngbe_hw *, u32, u32, u32 *); +}; + +struct ngbe_mac_operations { + s32 (*init_hw)(struct ngbe_hw *); + s32 (*reset_hw)(struct ngbe_hw *); + s32 (*start_hw)(struct ngbe_hw *); + s32 (*clear_hw_cntrs)(struct ngbe_hw *); + enum ngbe_media_type (*get_media_type)(struct ngbe_hw *); + s32 (*get_mac_addr)(struct ngbe_hw *, u8 *); + s32 (*get_device_caps)(struct ngbe_hw *, u16 *); + s32 (*stop_adapter)(struct ngbe_hw *); + s32 (*get_bus_info)(struct ngbe_hw *); + void (*set_lan_id)(struct ngbe_hw *); + s32 (*enable_rx_dma)(struct ngbe_hw *, u32); + s32 (*disable_sec_rx_path)(struct ngbe_hw *); + s32 (*enable_sec_rx_path)(struct ngbe_hw *); + s32 (*acquire_swfw_sync)(struct ngbe_hw *, u32); + void (*release_swfw_sync)(struct ngbe_hw *, u32); + + /* Link */ + void (*disable_tx_laser)(struct ngbe_hw *); + void (*enable_tx_laser)(struct ngbe_hw *); + void (*flap_tx_laser)(struct ngbe_hw *); + s32 (*setup_link)(struct ngbe_hw *, u32, bool); + s32 (*setup_mac_link)(struct ngbe_hw *, u32, bool); + s32 (*check_link)(struct ngbe_hw *, u32 *, bool *, bool); + s32 (*get_link_capabilities)(struct ngbe_hw *, u32 *, + bool *); + void (*set_rate_select_speed)(struct ngbe_hw *, u32); + + /* Packet Buffer manipulation */ + void (*setup_rxpba)(struct ngbe_hw *, int, u32, int); + + /* LED */ + s32 (*led_on)(struct ngbe_hw *, u32); + s32 (*led_off)(struct ngbe_hw *, u32); + + /* RAR, Multicast, VLAN */ + s32 (*set_rar)(struct ngbe_hw *, u32, u8 *, u64, u32); + s32 (*clear_rar)(struct ngbe_hw *, u32); + s32 (*insert_mac_addr)(struct ngbe_hw *, u8 *, u32); + s32 (*set_vmdq)(struct ngbe_hw *, u32, u32); + s32 (*set_vmdq_san_mac)(struct ngbe_hw *, u32); + s32 (*clear_vmdq)(struct ngbe_hw *, u32, u32); + s32 (*init_rx_addrs)(struct ngbe_hw *); + s32 (*update_uc_addr_list)(struct ngbe_hw *, u8 *, u32, + ngbe_mc_addr_itr); + s32 (*update_mc_addr_list)(struct ngbe_hw *, u8 *, u32, + ngbe_mc_addr_itr, bool clear); + s32 (*enable_mc)(struct ngbe_hw *); + s32 (*disable_mc)(struct ngbe_hw *); + s32 (*clear_vfta)(struct ngbe_hw *); + s32 (*set_vfta)(struct ngbe_hw *, u32, u32, bool); + s32 (*set_vlvf)(struct ngbe_hw *, u32, u32, bool, bool *); + s32 (*init_uta_tables)(struct ngbe_hw *); + void (*set_mac_anti_spoofing)(struct ngbe_hw *, bool, int); + void (*set_vlan_anti_spoofing)(struct ngbe_hw *, bool, int); + + /* Flow Control */ + s32 (*fc_enable)(struct ngbe_hw *); + s32 (*setup_fc)(struct ngbe_hw *); + + /* Manageability interface */ + s32 (*set_fw_drv_ver)(struct ngbe_hw *, u8, u8, u8, u8); + s32 (*get_thermal_sensor_data)(struct ngbe_hw *); + s32 (*init_thermal_sensor_thresh)(struct ngbe_hw *hw); + void (*get_rtrup2tc)(struct ngbe_hw *hw, u8 *map); + void (*disable_rx)(struct ngbe_hw *hw); + void (*enable_rx)(struct ngbe_hw *hw); + void (*set_source_address_pruning)(struct ngbe_hw *, bool, + unsigned int); + void (*set_ethertype_anti_spoofing)(struct ngbe_hw *, bool, int); + s32 (*dmac_config)(struct ngbe_hw *hw); + s32 (*setup_eee)(struct ngbe_hw *hw, bool enable_eee); +}; + +struct ngbe_phy_operations { + s32 (*identify)(struct ngbe_hw *); + s32 (*identify_sfp)(struct ngbe_hw *); + s32 (*init)(struct ngbe_hw *); + s32 (*reset)(struct ngbe_hw *); + s32 (*read_reg)(struct ngbe_hw *, u32, u32, u16 *); + s32 (*write_reg)(struct ngbe_hw *, u32, u32, u16); + s32 (*read_reg_mdi)(struct ngbe_hw *, u32, u32, u16 *); + s32 (*write_reg_mdi)(struct ngbe_hw *, u32, u32, u16); + u32 (*setup_link)(struct ngbe_hw *, u32, bool); + s32 (*setup_internal_link)(struct ngbe_hw *); + u32 (*setup_link_speed)(struct ngbe_hw *, u32, bool); + s32 (*check_link)(struct ngbe_hw *, u32 *, bool *); + s32 (*check_overtemp)(struct ngbe_hw *); + s32 (*check_event)(struct ngbe_hw *); + s32 (*get_adv_pause)(struct ngbe_hw *, u8 *); + s32 (*get_lp_adv_pause)(struct ngbe_hw *, u8 *); + s32 (*set_adv_pause)(struct ngbe_hw *, u16); + s32 (*setup_once)(struct ngbe_hw *); +}; + +struct ngbe_eeprom_info { + struct ngbe_eeprom_operations ops; + enum ngbe_eeprom_type type; + u32 semaphore_delay; + u16 word_size; + u16 address_bits; + u16 word_page_size; + u16 ctrl_word_3; + u16 sw_region_offset; +}; + +struct ngbe_flash_info { + struct ngbe_flash_operations ops; + u32 semaphore_delay; + u32 dword_size; + u16 address_bits; +}; + +#define NGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01 +struct ngbe_mac_info { + struct ngbe_mac_operations ops; + u8 addr[NGBE_ETH_LENGTH_OF_ADDRESS]; + u8 perm_addr[NGBE_ETH_LENGTH_OF_ADDRESS]; + u8 san_addr[NGBE_ETH_LENGTH_OF_ADDRESS]; + /* prefix for World Wide Node Name (WWNN) */ + u16 wwnn_prefix; + /* prefix for World Wide Port Name (WWPN) */ + u16 wwpn_prefix; +#define NGBE_MAX_MTA 128 +#define NGBE_MAX_VFTA_ENTRIES 128 + u32 mta_shadow[NGBE_MAX_MTA]; + s32 mc_filter_type; + u32 mcft_size; + u32 vft_shadow[NGBE_MAX_VFTA_ENTRIES]; + u32 vft_size; + u32 num_rar_entries; + u32 rar_highwater; + u32 rx_pb_size; + u32 max_tx_queues; + u32 max_rx_queues; + u32 orig_sr_pcs_ctl2; + u32 orig_sr_pma_mmd_ctl1; + u32 orig_sr_an_mmd_ctl; + u32 orig_sr_an_mmd_adv_reg2; + u32 orig_vr_xs_or_pcs_mmd_digi_ctl1; + u8 san_mac_rar_index; + bool get_link_status; + u16 max_msix_vectors; + bool arc_subsystem_valid; + bool orig_link_settings_stored; + bool autotry_restart; + u8 flags; + struct ngbe_thermal_sensor_data thermal_sensor_data; + bool thermal_sensor_enabled; + struct ngbe_dmac_config dmac_config; + bool set_lben; + bool autoneg; +}; + +struct ngbe_phy_info { + struct ngbe_phy_operations ops; + enum ngbe_phy_type type; + u32 addr; + u32 id; + enum ngbe_sfp_type sfp_type; + bool sfp_setup_needed; + u32 revision; + enum ngbe_media_type media_type; + u32 phy_semaphore_mask; + u8 lan_id; /* to be delete */ + ngbe_autoneg_advertised autoneg_advertised; + enum ngbe_smart_speed smart_speed; + bool smart_speed_active; + bool multispeed_fiber; + bool reset_if_overtemp; + ngbe_physical_layer link_mode; +}; + +#include "ngbe_mbx.h" + +struct ngbe_mbx_operations { + void (*init_params)(struct ngbe_hw *hw); + s32 (*read)(struct ngbe_hw *, u32 *, u16, u16); + s32 (*write)(struct ngbe_hw *, u32 *, u16, u16); + s32 (*read_posted)(struct ngbe_hw *, u32 *, u16, u16); + s32 (*write_posted)(struct ngbe_hw *, u32 *, u16, u16); + s32 (*check_for_msg)(struct ngbe_hw *, u16); + s32 (*check_for_ack)(struct ngbe_hw *, u16); + s32 (*check_for_rst)(struct ngbe_hw *, u16); +}; + +struct ngbe_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct ngbe_mbx_info { + struct ngbe_mbx_operations ops; + struct ngbe_mbx_stats stats; + u32 timeout; + u32 udelay; + u32 v2p_mailbox; + u16 size; +}; + +enum ngbe_reset_type { + NGBE_LAN_RESET = 0, + NGBE_SW_RESET, + NGBE_GLOBAL_RESET +}; + +enum ngbe_link_status { + NGBE_LINK_STATUS_NONE = 0, + NGBE_LINK_STATUS_KX, + NGBE_LINK_STATUS_KX4 +}; + +struct ngbe_hw { + u8 __iomem *hw_addr; + void *back; + struct ngbe_mac_info mac; + struct ngbe_addr_filter_info addr_ctrl; + struct ngbe_fc_info fc; + struct ngbe_phy_info phy; + struct ngbe_eeprom_info eeprom; + struct ngbe_flash_info flash; + struct ngbe_bus_info bus; + struct ngbe_mbx_info mbx; + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + bool adapter_stopped; + int api_version; + enum ngbe_reset_type reset_type; + bool force_full_reset; + bool allow_unsupported_sfp; + bool wol_enabled; + enum ngbe_link_status link_status; + u16 tpid[8]; +}; + +#define TCALL(hw, func, args...) (((hw)->func != NULL) \ + ? (hw)->func((hw), ##args) : NGBE_NOT_IMPLEMENTED) + +/* Error Codes */ +#define NGBE_OK 0 +#define NGBE_ERR 100 +#define NGBE_NOT_IMPLEMENTED 0x7FFFFFFF +/* (-NGBE_ERR, NGBE_ERR): reserved for non-ngbe defined error code */ +#define NGBE_ERR_NOSUPP -(NGBE_ERR+0) +#define NGBE_ERR_EEPROM -(NGBE_ERR+1) +#define NGBE_ERR_EEPROM_CHECKSUM -(NGBE_ERR+2) +#define NGBE_ERR_PHY -(NGBE_ERR+3) +#define NGBE_ERR_CONFIG -(NGBE_ERR+4) +#define NGBE_ERR_PARAM -(NGBE_ERR+5) +#define NGBE_ERR_MAC_TYPE -(NGBE_ERR+6) +#define NGBE_ERR_UNKNOWN_PHY -(NGBE_ERR+7) +#define NGBE_ERR_LINK_SETUP -(NGBE_ERR+8) +#define NGBE_ERR_ADAPTER_STOPPED -(NGBE_ERR+9) +#define NGBE_ERR_INVALID_MAC_ADDR -(NGBE_ERR+10) +#define NGBE_ERR_DEVICE_NOT_SUPPORTED -(NGBE_ERR+11) +#define NGBE_ERR_MASTER_REQUESTS_PENDING -(NGBE_ERR+12) +#define NGBE_ERR_INVALID_LINK_SETTINGS -(NGBE_ERR+13) +#define NGBE_ERR_AUTONEG_NOT_COMPLETE -(NGBE_ERR+14) +#define NGBE_ERR_RESET_FAILED -(NGBE_ERR+15) +#define NGBE_ERR_SWFW_SYNC -(NGBE_ERR+16) +#define NGBE_ERR_PHY_ADDR_INVALID -(NGBE_ERR+17) +#define NGBE_ERR_I2C -(NGBE_ERR+18) +#define NGBE_ERR_SFP_NOT_SUPPORTED -(NGBE_ERR+19) +#define NGBE_ERR_SFP_NOT_PRESENT -(NGBE_ERR+20) +#define NGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -(NGBE_ERR+21) +#define NGBE_ERR_NO_SAN_ADDR_PTR -(NGBE_ERR+22) +#define NGBE_ERR_FDIR_REINIT_FAILED -(NGBE_ERR+23) +#define NGBE_ERR_EEPROM_VERSION -(NGBE_ERR+24) +#define NGBE_ERR_NO_SPACE -(NGBE_ERR+25) +#define NGBE_ERR_OVERTEMP -(NGBE_ERR+26) +#define NGBE_ERR_UNDERTEMP -(NGBE_ERR+27) +#define NGBE_ERR_FC_NOT_NEGOTIATED -(NGBE_ERR+28) +#define NGBE_ERR_FC_NOT_SUPPORTED -(NGBE_ERR+29) +#define NGBE_ERR_SFP_SETUP_NOT_COMPLETE -(NGBE_ERR+30) +#define NGBE_ERR_PBA_SECTION -(NGBE_ERR+31) +#define NGBE_ERR_INVALID_ARGUMENT -(NGBE_ERR+32) +#define NGBE_ERR_HOST_INTERFACE_COMMAND -(NGBE_ERR+33) +#define NGBE_ERR_OUT_OF_MEM -(NGBE_ERR+34) +#define NGBE_ERR_FEATURE_NOT_SUPPORTED -(NGBE_ERR+36) +#define NGBE_ERR_EEPROM_PROTECTED_REGION -(NGBE_ERR+37) +#define NGBE_ERR_FDIR_CMD_INCOMPLETE -(NGBE_ERR+38) +#define NGBE_ERR_FLASH_LOADING_FAILED -(NGBE_ERR+39) +#define NGBE_ERR_XPCS_POWER_UP_FAILED -(NGBE_ERR+40) +#define NGBE_ERR_FW_RESP_INVALID -(NGBE_ERR+41) +#define NGBE_ERR_PHY_INIT_NOT_DONE -(NGBE_ERR+42) +#define NGBE_ERR_TIMEOUT -(NGBE_ERR+43) +#define NGBE_ERR_TOKEN_RETRY -(NGBE_ERR+44) +#define NGBE_ERR_REGISTER -(NGBE_ERR+45) +#define NGBE_ERR_MBX -(NGBE_ERR+46) +#define NGBE_ERR_MNG_ACCESS_FAILED -(NGBE_ERR+47) +#define NGBE_ERR_PHY_TYPE -(NGBE_ERR+48) +#define NGBE_ERR_PHY_TIMEOUT -(NGBE_ERR+49) + +/** + * register operations + **/ +/* read register */ +#define NGBE_DEAD_READ_RETRIES 10 +#define NGBE_DEAD_READ_REG 0xdeadbeefU +#define NGBE_DEAD_READ_REG64 0xdeadbeefdeadbeefULL + +#define NGBE_FAILED_READ_REG 0xffffffffU +#define NGBE_FAILED_READ_REG64 0xffffffffffffffffULL + +static inline bool NGBE_REMOVED(void __iomem *addr) +{ + return unlikely(!addr); +} + +static inline u32 +ngbe_rd32(u8 __iomem *base) +{ + return readl(base); +} + +static inline u32 +rd32(struct ngbe_hw *hw, u32 reg) +{ + u8 __iomem *base = READ_ONCE(hw->hw_addr); + u32 val = NGBE_FAILED_READ_REG; + + if (unlikely(!base)) + return val; + + val = ngbe_rd32(base + reg); + + return val; +} +#define rd32a(a, reg, offset) ( \ + rd32((a), (reg) + ((offset) << 2))) + +static inline u32 +rd32m(struct ngbe_hw *hw, u32 reg, u32 mask) +{ + u8 __iomem *base = READ_ONCE(hw->hw_addr); + u32 val = NGBE_FAILED_READ_REG; + + if (unlikely(!base)) + return val; + + val = ngbe_rd32(base + reg); + if (unlikely(val == NGBE_FAILED_READ_REG)) + return val; + + return val & mask; +} + +/* write register */ +static inline void +ngbe_wr32(u8 __iomem *base, u32 val) +{ + writel(val, base); +} + +static inline void +wr32(struct ngbe_hw *hw, u32 reg, u32 val) +{ + u8 __iomem *base = READ_ONCE(hw->hw_addr); + + if (unlikely(!base)) + return; + + ngbe_wr32(base + reg, val); +} +#define wr32a(a, reg, off, val) \ + wr32((a), (reg) + ((off) << 2), (val)) + +static inline void +wr32m(struct ngbe_hw *hw, u32 reg, u32 mask, u32 field) +{ + u8 __iomem *base = READ_ONCE(hw->hw_addr); + u32 val; + + if (unlikely(!base)) + return; + + val = ngbe_rd32(base + reg); + if (unlikely(val == NGBE_FAILED_READ_REG)) + return; + + val = ((val & ~mask) | (field & mask)); + ngbe_wr32(base + reg, val); +} + +/* poll register */ +#define NGBE_MDIO_TIMEOUT 1000 +#define NGBE_I2C_TIMEOUT 1000 +#define NGBE_SPI_TIMEOUT 1000 +static inline s32 +po32m(struct ngbe_hw *hw, u32 reg, + u32 mask, u32 field, int usecs, int count) +{ + int loop; + + loop = (count ? count : (usecs + 9) / 10); + usecs = (loop ? (usecs + loop - 1) / loop : 0); + + count = loop; + do { + u32 value = rd32(hw, reg); + if ((value & mask) == (field & mask)) { + break; + } + + if (loop-- <= 0) + break; + + udelay(usecs); + } while (true); + + return (count - loop <= count ? 0 : NGBE_ERR_TIMEOUT); +} + +#define NGBE_WRITE_FLUSH(H) rd32(H, NGBE_MIS_PWR) + +#endif /* _NGBE_TYPE_H_ */ diff --git a/drivers/net/ethernet/netswift/txgbe/Kconfig b/drivers/net/ethernet/netswift/txgbe/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..5aba1985d83f870c68f40615c9b40699a48d808d --- /dev/null +++ b/drivers/net/ethernet/netswift/txgbe/Kconfig @@ -0,0 +1,13 @@ +# +# Netswift driver configuration +# + +config TXGBE + tristate "Netswift 10G Network Interface Card" + default n + depends on PCI_MSI && NUMA && PCI_IOV && DCB + ---help--- + This driver supports Netswift 10G Ethernet cards. + To compile this driver as part of the kernel, choose Y here. + If unsure, choose N. + The default is N. diff --git a/drivers/net/ethernet/netswift/txgbe/Makefile b/drivers/net/ethernet/netswift/txgbe/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..f8531f3356a85ce22fbcdc45542825b9e03fb07a --- /dev/null +++ b/drivers/net/ethernet/netswift/txgbe/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. +# +# Makefile for the Netswift 10GbE PCI Express ethernet driver +# + +obj-$(CONFIG_TXGBE) += txgbe.o + +txgbe-objs := txgbe_main.o txgbe_ethtool.o \ + txgbe_hw.o txgbe_phy.o txgbe_bp.o \ + txgbe_mbx.o txgbe_mtd.o txgbe_param.o txgbe_lib.o txgbe_ptp.o diff --git a/drivers/net/ethernet/netswift/txgbe/txgbe.h b/drivers/net/ethernet/netswift/txgbe/txgbe.h new file mode 100644 index 0000000000000000000000000000000000000000..ada52e2457c5ab1fdad987924fd48d63e2b8897d --- /dev/null +++ b/drivers/net/ethernet/netswift/txgbe/txgbe.h @@ -0,0 +1,1260 @@ +/* + * WangXun 10 Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + */ + + +#ifndef _TXGBE_H_ +#define _TXGBE_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "txgbe_type.h" + +#ifndef KR_POLLING +#define KR_POLLING 0 +#endif + +#ifndef KR_MODE +#define KR_MODE 0 +#endif + +#ifndef AUTO +#define AUTO 1 +#endif + +#ifndef DEFAULT_FCPAUSE +#define DEFAULT_FCPAUSE 0xFFFF /* kylinft/kylinlx : 0x3FFF default to 0xFFFF*/ +#endif + +#ifndef MAX_REQUEST_SIZE +#define MAX_REQUEST_SIZE 256 /* kylinft : 512 default to 256*/ +#endif + +#ifndef DEFAULT_TXD +#define DEFAULT_TXD 512 /*deepinsw : 1024 default to 512*/ +#endif + +#ifndef DEFAULT_TX_WORK +#define DEFAULT_TX_WORK 256 /*deepinsw : 512 default to 256*/ +#endif + +#ifndef CL72_KRTR_PRBS_MODE_EN +#define CL72_KRTR_PRBS_MODE_EN 0x2fff /*deepinsw : 512 default to 256*/ +#endif + +#ifndef SFI_SET +#define SFI_SET 0 +#define SFI_MAIN 24 +#define SFI_PRE 4 +#define SFI_POST 16 +#endif + +#ifndef KR_SET +#define KR_SET 0 +#define KR_MAIN 27 +#define KR_PRE 8 +#define KR_POST 44 +#endif + +#ifndef KX4_SET +#define KX4_SET 0 +#define KX4_MAIN 40 +#define KX4_PRE 0 +#define KX4_POST 0 +#endif + +#ifndef KX_SET +#define KX_SET 0 +#define KX_MAIN 24 +#define KX_PRE 4 +#define KX_POST 16 +#endif + + +#ifndef KX4_TXRX_PIN +#define KX4_TXRX_PIN 0 /*rx : 0xf tx : 0xf0 */ +#endif +#ifndef KR_TXRX_PIN +#define KR_TXRX_PIN 0 /*rx : 0xf tx : 0xf0 */ +#endif +#ifndef SFI_TXRX_PIN +#define SFI_TXRX_PIN 0 /*rx : 0xf tx : 0xf0 */ +#endif + +#ifndef KX_SGMII +#define KX_SGMII 0 /* 1 0x18090 :0xcf00 */ +#endif + +#ifndef KR_NORESET +#define KR_NORESET 0 +#endif + +#ifndef KR_CL72_TRAINING +#define KR_CL72_TRAINING 1 +#endif + +#ifndef KR_REINITED +#define KR_REINITED 1 +#endif + +#ifndef KR_AN73_PRESET +#define KR_AN73_PRESET 1 +#endif + +#ifndef BOND_CHECK_LINK_MODE +#define BOND_CHECK_LINK_MODE 0 +#endif + +/* Ether Types */ +#define TXGBE_ETH_P_LLDP 0x88CC +#define TXGBE_ETH_P_CNM 0x22E7 + +/* TX/RX descriptor defines */ +#if defined(DEFAULT_TXD) || defined(DEFAULT_TX_WORK) +#define TXGBE_DEFAULT_TXD DEFAULT_TXD +#define TXGBE_DEFAULT_TX_WORK DEFAULT_TX_WORK +#else +#define TXGBE_DEFAULT_TXD 512 +#define TXGBE_DEFAULT_TX_WORK 256 +#endif +#define TXGBE_MAX_TXD 8192 +#define TXGBE_MIN_TXD 128 + +#if (PAGE_SIZE < 8192) +#define TXGBE_DEFAULT_RXD 512 +#define TXGBE_DEFAULT_RX_WORK 256 +#else +#define TXGBE_DEFAULT_RXD 256 +#define TXGBE_DEFAULT_RX_WORK 128 +#endif + +#define TXGBE_MAX_RXD 8192 +#define TXGBE_MIN_RXD 128 + +#define TXGBE_ETH_P_LLDP 0x88CC + +/* flow control */ +#define TXGBE_MIN_FCRTL 0x40 +#define TXGBE_MAX_FCRTL 0x7FF80 +#define TXGBE_MIN_FCRTH 0x600 +#define TXGBE_MAX_FCRTH 0x7FFF0 +#if defined(DEFAULT_FCPAUSE) +#define TXGBE_DEFAULT_FCPAUSE DEFAULT_FCPAUSE /*0x3800*/ +#else +#define TXGBE_DEFAULT_FCPAUSE 0xFFFF +#endif +#define TXGBE_MIN_FCPAUSE 0 +#define TXGBE_MAX_FCPAUSE 0xFFFF + +/* Supported Rx Buffer Sizes */ +#define TXGBE_RXBUFFER_256 256 /* Used for skb receive header */ +#define TXGBE_RXBUFFER_2K 2048 +#define TXGBE_RXBUFFER_3K 3072 +#define TXGBE_RXBUFFER_4K 4096 +#define TXGBE_MAX_RXBUFFER 16384 /* largest size for single descriptor */ + +#define TXGBE_BP_M_NULL 0 +#define TXGBE_BP_M_SFI 1 +#define TXGBE_BP_M_KR 2 +#define TXGBE_BP_M_KX4 3 +#define TXGBE_BP_M_KX 4 +#define TXGBE_BP_M_NAUTO 0 +#define TXGBE_BP_M_AUTO 1 + +/* + * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we + * reserve 64 more, and skb_shared_info adds an additional 320 bytes more, + * this adds up to 448 bytes of extra data. + * + * Since netdev_alloc_skb now allocates a page fragment we can use a value + * of 256 and the resultant skb will have a truesize of 960 or less. + */ +#define TXGBE_RX_HDR_SIZE TXGBE_RXBUFFER_256 + +#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define TXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +#define TXGBE_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + +/* assume the kernel supports 8021p to avoid stripping vlan tags */ +#ifndef HAVE_8021P_SUPPORT +#define HAVE_8021P_SUPPORT +#endif + +enum txgbe_tx_flags { + /* cmd_type flags */ + TXGBE_TX_FLAGS_HW_VLAN = 0x01, + TXGBE_TX_FLAGS_TSO = 0x02, + TXGBE_TX_FLAGS_TSTAMP = 0x04, + + /* olinfo flags */ + TXGBE_TX_FLAGS_CC = 0x08, + TXGBE_TX_FLAGS_IPV4 = 0x10, + TXGBE_TX_FLAGS_CSUM = 0x20, + TXGBE_TX_FLAGS_OUTER_IPV4 = 0x100, + TXGBE_TX_FLAGS_LINKSEC = 0x200, + TXGBE_TX_FLAGS_IPSEC = 0x400, + + /* software defined flags */ + TXGBE_TX_FLAGS_SW_VLAN = 0x40, + TXGBE_TX_FLAGS_FCOE = 0x80, +}; + +/* VLAN info */ +#define TXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 +#define TXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 +#define TXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 +#define TXGBE_TX_FLAGS_VLAN_SHIFT 16 + +#define TXGBE_MAX_RX_DESC_POLL 10 + +#define TXGBE_MAX_VF_MC_ENTRIES 30 +#define TXGBE_MAX_VF_FUNCTIONS 64 +#define MAX_EMULATION_MAC_ADDRS 16 +#define TXGBE_MAX_PF_MACVLANS 15 +#define TXGBE_VF_DEVICE_ID 0x1000 + +/* must account for pools assigned to VFs. */ +#define VMDQ_P(p) (p) + + +#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ + { \ + u32 current_counter = rd32(hw, reg); \ + if (current_counter < last_counter) \ + counter += 0x100000000LL; \ + last_counter = current_counter; \ + counter &= 0xFFFFFFFF00000000LL; \ + counter |= current_counter; \ + } + +#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ + { \ + u64 current_counter_lsb = rd32(hw, reg_lsb); \ + u64 current_counter_msb = rd32(hw, reg_msb); \ + u64 current_counter = (current_counter_msb << 32) | \ + current_counter_lsb; \ + if (current_counter < last_counter) \ + counter += 0x1000000000LL; \ + last_counter = current_counter; \ + counter &= 0xFFFFFFF000000000LL; \ + counter |= current_counter; \ + } + +struct vf_stats { + u64 gprc; + u64 gorc; + u64 gptc; + u64 gotc; + u64 mprc; +}; + +struct vf_data_storage { + struct pci_dev *vfdev; + u8 __iomem *b4_addr; + u32 b4_buf[16]; + unsigned char vf_mac_addresses[ETH_ALEN]; + u16 vf_mc_hashes[TXGBE_MAX_VF_MC_ENTRIES]; + u16 num_vf_mc_hashes; + u16 default_vf_vlan_id; + u16 vlans_enabled; + bool clear_to_send; + struct vf_stats vfstats; + struct vf_stats last_vfstats; + struct vf_stats saved_rst_vfstats; + bool pf_set_mac; + u16 pf_vlan; /* When set, guest VLAN config not allowed. */ + u16 pf_qos; + u16 min_tx_rate; + u16 max_tx_rate; + u16 vlan_count; + u8 spoofchk_enabled; + u8 trusted; + int xcast_mode; + unsigned int vf_api; +}; + +struct vf_macvlans { + struct list_head l; + int vf; + bool free; + bool is_macvlan; + u8 vf_macvlan[ETH_ALEN]; +}; + +#define TXGBE_MAX_TXD_PWR 14 +#define TXGBE_MAX_DATA_PER_TXD (1 << TXGBE_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), TXGBE_MAX_DATA_PER_TXD) +#ifndef MAX_SKB_FRAGS +#define DESC_NEEDED 4 +#elif (MAX_SKB_FRAGS < 16) +#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) +#else +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) +#endif + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer */ +struct txgbe_tx_buffer { + union txgbe_tx_desc *next_to_watch; + unsigned long time_stamp; + struct sk_buff *skb; + unsigned int bytecount; + unsigned short gso_segs; + __be16 protocol; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; +}; + +struct txgbe_rx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + dma_addr_t page_dma; + struct page *page; + unsigned int page_offset; +}; + +struct txgbe_queue_stats { + u64 packets; + u64 bytes; +#ifdef BP_EXTENDED_STATS + u64 yields; + u64 misses; + u64 cleaned; +#endif /* BP_EXTENDED_STATS */ +}; + +struct txgbe_tx_queue_stats { + u64 restart_queue; + u64 tx_busy; + u64 tx_done_old; +}; + +struct txgbe_rx_queue_stats { + u64 rsc_count; + u64 rsc_flush; + u64 non_eop_descs; + u64 alloc_rx_page_failed; + u64 alloc_rx_buff_failed; + u64 csum_good_cnt; + u64 csum_err; +}; + +#define TXGBE_TS_HDR_LEN 8 +enum txgbe_ring_state_t { + __TXGBE_RX_3K_BUFFER, + __TXGBE_RX_BUILD_SKB_ENABLED, + __TXGBE_TX_FDIR_INIT_DONE, + __TXGBE_TX_XPS_INIT_DONE, + __TXGBE_TX_DETECT_HANG, + __TXGBE_HANG_CHECK_ARMED, + __TXGBE_RX_HS_ENABLED, + __TXGBE_RX_RSC_ENABLED, +}; + +struct txgbe_fwd_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + struct net_device *vdev; + struct txgbe_adapter *adapter; + unsigned int tx_base_queue; + unsigned int rx_base_queue; + int index; /* pool index on PF */ +}; + +#define ring_uses_build_skb(ring) \ + test_bit(__TXGBE_RX_BUILD_SKB_ENABLED, &(ring)->state) + +#define ring_is_hs_enabled(ring) \ + test_bit(__TXGBE_RX_HS_ENABLED, &(ring)->state) +#define set_ring_hs_enabled(ring) \ + set_bit(__TXGBE_RX_HS_ENABLED, &(ring)->state) +#define clear_ring_hs_enabled(ring) \ + clear_bit(__TXGBE_RX_HS_ENABLED, &(ring)->state) +#define check_for_tx_hang(ring) \ + test_bit(__TXGBE_TX_DETECT_HANG, &(ring)->state) +#define set_check_for_tx_hang(ring) \ + set_bit(__TXGBE_TX_DETECT_HANG, &(ring)->state) +#define clear_check_for_tx_hang(ring) \ + clear_bit(__TXGBE_TX_DETECT_HANG, &(ring)->state) +#define ring_is_rsc_enabled(ring) \ + test_bit(__TXGBE_RX_RSC_ENABLED, &(ring)->state) +#define set_ring_rsc_enabled(ring) \ + set_bit(__TXGBE_RX_RSC_ENABLED, &(ring)->state) +#define clear_ring_rsc_enabled(ring) \ + clear_bit(__TXGBE_RX_RSC_ENABLED, &(ring)->state) + +struct txgbe_ring { + struct txgbe_ring *next; /* pointer to next ring in q_vector */ + struct txgbe_q_vector *q_vector; /* backpointer to host q_vector */ + struct net_device *netdev; /* netdev ring belongs to */ + struct device *dev; /* device for DMA mapping */ + struct txgbe_fwd_adapter *accel; + void *desc; /* descriptor ring memory */ + union { + struct txgbe_tx_buffer *tx_buffer_info; + struct txgbe_rx_buffer *rx_buffer_info; + }; + unsigned long state; + u8 __iomem *tail; + dma_addr_t dma; /* phys. address of descriptor ring */ + unsigned int size; /* length in bytes */ + + u16 count; /* amount of descriptors */ + + u8 queue_index; /* needed for multiqueue queue management */ + u8 reg_idx; /* holds the special value that gets + * the hardware register offset + * associated with this ring, which is + * different for DCB and RSS modes + */ + u16 next_to_use; + u16 next_to_clean; + unsigned long last_rx_timestamp; + u16 rx_buf_len; + union { + u16 next_to_alloc; + struct { + u8 atr_sample_rate; + u8 atr_count; + }; + }; + + u8 dcb_tc; + struct txgbe_queue_stats stats; + struct u64_stats_sync syncp; + + union { + struct txgbe_tx_queue_stats tx_stats; + struct txgbe_rx_queue_stats rx_stats; + }; +} ____cacheline_internodealigned_in_smp; + +enum txgbe_ring_f_enum { + RING_F_NONE = 0, + RING_F_VMDQ, /* SR-IOV uses the same ring feature */ + RING_F_RSS, + RING_F_FDIR, + RING_F_ARRAY_SIZE /* must be last in enum set */ +}; + +#define TXGBE_MAX_DCB_INDICES 8 +#define TXGBE_MAX_RSS_INDICES 63 +#define TXGBE_MAX_VMDQ_INDICES 64 +#define TXGBE_MAX_FDIR_INDICES 63 + +#define MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) +#define MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) + +#define TXGBE_MAX_L2A_QUEUES 4 +#define TXGBE_BAD_L2A_QUEUE 3 + +#define TXGBE_MAX_MACVLANS 32 +#define TXGBE_MAX_DCBMACVLANS 8 + +struct txgbe_ring_feature { + u16 limit; /* upper limit on feature indices */ + u16 indices; /* current value of indices */ + u16 mask; /* Mask used for feature to ring mapping */ + u16 offset; /* offset to start of feature */ +}; + +#define TXGBE_VMDQ_8Q_MASK 0x78 +#define TXGBE_VMDQ_4Q_MASK 0x7C +#define TXGBE_VMDQ_2Q_MASK 0x7E + +/* + * FCoE requires that all Rx buffers be over 2200 bytes in length. Since + * this is twice the size of a half page we need to double the page order + * for FCoE enabled Rx queues. + */ +static inline unsigned int txgbe_rx_bufsz(struct txgbe_ring __maybe_unused *ring) +{ +#if MAX_SKB_FRAGS < 8 + return ALIGN(TXGBE_MAX_RXBUFFER / MAX_SKB_FRAGS, 1024); +#else + return TXGBE_RXBUFFER_2K; +#endif +} + +static inline unsigned int txgbe_rx_pg_order(struct txgbe_ring __maybe_unused *ring) +{ + return 0; +} +#define txgbe_rx_pg_size(_ring) (PAGE_SIZE << txgbe_rx_pg_order(_ring)) + +struct txgbe_ring_container { + struct txgbe_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 work_limit; /* total work allowed per interrupt */ + u8 count; /* total number of rings in vector */ + u8 itr; /* current ITR setting for ring */ +}; + +/* iterator for handling rings in ring container */ +#define txgbe_for_each_ring(pos, head) \ + for (pos = (head).ring; pos != NULL; pos = pos->next) + +#define MAX_RX_PACKET_BUFFERS ((adapter->flags & TXGBE_FLAG_DCB_ENABLED) \ + ? 8 : 1) +#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS + +/* MAX_MSIX_Q_VECTORS of these are allocated, + * but we only use one per queue-specific vector. + */ +struct txgbe_q_vector { + struct txgbe_adapter *adapter; + int cpu; /* CPU for DCA */ + u16 v_idx; /* index of q_vector within array, also used for + * finding the bit in EICR and friends that + * represents the vector for this ring */ + u16 itr; /* Interrupt throttle rate written to EITR */ + struct txgbe_ring_container rx, tx; + + struct napi_struct napi; + cpumask_t affinity_mask; + int numa_node; + struct rcu_head rcu; /* to avoid race with update stats on free */ + char name[IFNAMSIZ + 17]; + bool netpoll_rx; + + /* for dynamic allocation of rings associated with this q_vector */ + struct txgbe_ring ring[0] ____cacheline_internodealigned_in_smp; +}; + +/* + * microsecond values for various ITR rates shifted by 2 to fit itr register + * with the first 3 bits reserved 0 + */ +#define TXGBE_MIN_RSC_ITR 24 +#define TXGBE_100K_ITR 40 +#define TXGBE_20K_ITR 200 +#define TXGBE_16K_ITR 248 +#define TXGBE_12K_ITR 336 + +/* txgbe_test_staterr - tests bits in Rx descriptor status and error fields */ +static inline __le32 txgbe_test_staterr(union txgbe_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + +/* txgbe_desc_unused - calculate if we have unused descriptors */ +static inline u16 txgbe_desc_unused(struct txgbe_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} + +#define TXGBE_RX_DESC(R, i) \ + (&(((union txgbe_rx_desc *)((R)->desc))[i])) +#define TXGBE_TX_DESC(R, i) \ + (&(((union txgbe_tx_desc *)((R)->desc))[i])) +#define TXGBE_TX_CTXTDESC(R, i) \ + (&(((struct txgbe_tx_context_desc *)((R)->desc))[i])) + +#define TXGBE_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */ + +#define TCP_TIMER_VECTOR 0 +#define OTHER_VECTOR 1 +#define NON_Q_VECTORS (OTHER_VECTOR + TCP_TIMER_VECTOR) + +#define TXGBE_MAX_MSIX_Q_VECTORS_SAPPHIRE 64 + +struct txgbe_mac_addr { + u8 addr[ETH_ALEN]; + u16 state; /* bitmask */ + u64 pools; +}; + +#define TXGBE_MAC_STATE_DEFAULT 0x1 +#define TXGBE_MAC_STATE_MODIFIED 0x2 +#define TXGBE_MAC_STATE_IN_USE 0x4 + +/* + * Only for array allocations in our adapter struct. + * we can actually assign 64 queue vectors based on our extended-extended + * interrupt registers. + */ +#define MAX_MSIX_Q_VECTORS TXGBE_MAX_MSIX_Q_VECTORS_SAPPHIRE +#define MAX_MSIX_COUNT TXGBE_MAX_MSIX_VECTORS_SAPPHIRE + +#define MIN_MSIX_Q_VECTORS 1 +#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) + +/* default to trying for four seconds */ +#define TXGBE_TRY_LINK_TIMEOUT (4 * HZ) +#define TXGBE_SFP_POLL_JIFFIES (2 * HZ) /* SFP poll every 2 seconds */ + +/** + * txgbe_adapter.flag + **/ +#define TXGBE_FLAG_MSI_CAPABLE (u32)(1 << 0) +#define TXGBE_FLAG_MSI_ENABLED (u32)(1 << 1) +#define TXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 2) +#define TXGBE_FLAG_MSIX_ENABLED (u32)(1 << 3) +#define TXGBE_FLAG_LLI_PUSH (u32)(1 << 4) + +#define TXGBE_FLAG_TPH_ENABLED (u32)(1 << 6) +#define TXGBE_FLAG_TPH_CAPABLE (u32)(1 << 7) +#define TXGBE_FLAG_TPH_ENABLED_DATA (u32)(1 << 8) + +#define TXGBE_FLAG_MQ_CAPABLE (u32)(1 << 9) +#define TXGBE_FLAG_DCB_ENABLED (u32)(1 << 10) +#define TXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 11) +#define TXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 12) +#define TXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 13) +#define TXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 14) +#define TXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 15) +#define TXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 16) +#define TXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 19) +#define TXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 20) +#define TXGBE_FLAG_SRIOV_REPLICATION_ENABLE (u32)(1 << 21) +#define TXGBE_FLAG_SRIOV_L2SWITCH_ENABLE (u32)(1 << 22) +#define TXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE (u32)(1 << 23) +#define TXGBE_FLAG_RX_HWTSTAMP_ENABLED (u32)(1 << 24) +#define TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE (u32)(1 << 25) +#define TXGBE_FLAG_VXLAN_OFFLOAD_ENABLE (u32)(1 << 26) +#define TXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER (u32)(1 << 27) +#define TXGBE_FLAG_NEED_ETH_PHY_RESET (u32)(1 << 28) +#define TXGBE_FLAG_RX_HS_ENABLED (u32)(1 << 30) +#define TXGBE_FLAG_LINKSEC_ENABLED (u32)(1 << 31) +#define TXGBE_FLAG_IPSEC_ENABLED (u32)(1 << 5) + +/* preset defaults */ +#define TXGBE_FLAGS_SP_INIT (TXGBE_FLAG_MSI_CAPABLE \ + | TXGBE_FLAG_MSIX_CAPABLE \ + | TXGBE_FLAG_MQ_CAPABLE \ + | TXGBE_FLAG_SRIOV_CAPABLE) + +/** + * txgbe_adapter.flag2 + **/ +#define TXGBE_FLAG2_RSC_CAPABLE (1U << 0) +#define TXGBE_FLAG2_RSC_ENABLED (1U << 1) +#define TXGBE_FLAG2_TEMP_SENSOR_CAPABLE (1U << 3) +#define TXGBE_FLAG2_TEMP_SENSOR_EVENT (1U << 4) +#define TXGBE_FLAG2_SEARCH_FOR_SFP (1U << 5) +#define TXGBE_FLAG2_SFP_NEEDS_RESET (1U << 6) +#define TXGBE_FLAG2_PF_RESET_REQUESTED (1U << 7) +#define TXGBE_FLAG2_FDIR_REQUIRES_REINIT (1U << 8) +#define TXGBE_FLAG2_RSS_FIELD_IPV4_UDP (1U << 9) +#define TXGBE_FLAG2_RSS_FIELD_IPV6_UDP (1U << 10) +#define TXGBE_FLAG2_RSS_ENABLED (1U << 12) +#define TXGBE_FLAG2_PTP_PPS_ENABLED (1U << 11) +#define TXGBE_FLAG2_EEE_CAPABLE (1U << 14) +#define TXGBE_FLAG2_EEE_ENABLED (1U << 15) +#define TXGBE_FLAG2_VXLAN_REREG_NEEDED (1U << 16) +#define TXGBE_FLAG2_DEV_RESET_REQUESTED (1U << 18) +#define TXGBE_FLAG2_RESET_INTR_RECEIVED (1U << 19) +#define TXGBE_FLAG2_GLOBAL_RESET_REQUESTED (1U << 20) +#define TXGBE_FLAG2_CLOUD_SWITCH_ENABLED (1U << 21) +#define TXGBE_FLAG2_MNG_REG_ACCESS_DISABLED (1U << 22) +#define KR (1U << 23) +#define TXGBE_FLAG2_KR_TRAINING (1U << 24) +#define TXGBE_FLAG2_KR_AUTO (1U << 25) +#define TXGBE_FLAG2_LINK_DOWN (1U << 26) +#define TXGBE_FLAG2_KR_PRO_DOWN (1U << 27) +#define TXGBE_FLAG2_KR_PRO_REINIT (1U << 28) +#define TXGBE_FLAG2_PCIE_NEED_RECOVER (1U << 31) + + +#define TXGBE_SET_FLAG(_input, _flag, _result) \ + ((_flag <= _result) ? \ + ((u32)(_input & _flag) * (_result / _flag)) : \ + ((u32)(_input & _flag) / (_flag / _result))) + +enum txgbe_isb_idx { + TXGBE_ISB_HEADER, + TXGBE_ISB_MISC, + TXGBE_ISB_VEC0, + TXGBE_ISB_VEC1, + TXGBE_ISB_MAX +}; + +/* board specific private data structure */ +struct txgbe_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + + unsigned long state; + + /* Some features need tri-state capability, + * thus the additional *_CAPABLE flags. + */ + u32 flags; + u32 flags2; + u32 vf_mode; + u32 backplane_an; + u32 an73; + u32 an37; + u32 ffe_main; + u32 ffe_pre; + u32 ffe_post; + u32 ffe_set; + u32 backplane_mode; + u32 backplane_auto; + + bool cloud_mode; + + /* Tx fast path data */ + int num_tx_queues; + u16 tx_itr_setting; + u16 tx_work_limit; + + /* Rx fast path data */ + int num_rx_queues; + u16 rx_itr_setting; + u16 rx_work_limit; + + unsigned int num_vmdqs; /* does not include pools assigned to VFs */ + unsigned int queues_per_pool; + + /* TX */ + struct txgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; + + u64 restart_queue; + u64 lsc_int; + u32 tx_timeout_count; + + /* RX */ + struct txgbe_ring *rx_ring[MAX_RX_QUEUES]; + u64 hw_csum_rx_error; + u64 hw_csum_rx_good; + u64 hw_rx_no_dma_resources; + u64 rsc_total_count; + u64 rsc_total_flush; + u64 non_eop_descs; + u32 alloc_rx_page_failed; + u32 alloc_rx_buff_failed; + + struct txgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; + + u8 dcb_set_bitmap; + u8 dcbx_cap; + enum txgbe_fc_mode last_lfc_mode; + + int num_q_vectors; /* current number of q_vectors for device */ + int max_q_vectors; /* upper limit of q_vectors for device */ + struct txgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE]; + struct msix_entry *msix_entries; + + u64 test_icr; + struct txgbe_ring test_tx_ring; + struct txgbe_ring test_rx_ring; + + /* structs defined in txgbe_hw.h */ + struct txgbe_hw hw; + u16 msg_enable; + struct txgbe_hw_stats stats; + u32 lli_port; + u32 lli_size; + u32 lli_etype; + u32 lli_vlan_pri; + + u32 *config_space; + u64 tx_busy; + unsigned int tx_ring_count; + unsigned int rx_ring_count; + + u32 link_speed; + bool link_up; + unsigned long sfp_poll_time; + unsigned long link_check_timeout; + + struct timer_list service_timer; + struct work_struct service_task; + struct hlist_head fdir_filter_list; + unsigned long fdir_overflow; /* number of times ATR was backed off */ + union txgbe_atr_input fdir_mask; + int fdir_filter_count; + u32 fdir_pballoc; + u32 atr_sample_rate; + spinlock_t fdir_perfect_lock; + + u8 __iomem *io_addr; /* Mainly for iounmap use */ + u32 wol; + + u16 bd_number; + u16 bridge_mode; + + char eeprom_id[32]; + u16 eeprom_cap; + bool netdev_registered; + u32 interrupt_event; + u32 led_reg; + + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + struct work_struct ptp_tx_work; + struct sk_buff *ptp_tx_skb; + struct hwtstamp_config tstamp_config; + unsigned long ptp_tx_start; + unsigned long last_overflow_check; + unsigned long last_rx_ptp_check; + spinlock_t tmreg_lock; + struct cyclecounter hw_cc; + struct timecounter hw_tc; + u32 base_incval; + u32 tx_hwtstamp_timeouts; + u32 tx_hwtstamp_skipped; + u32 rx_hwtstamp_cleared; + void (*ptp_setup_sdp) (struct txgbe_adapter *); + + DECLARE_BITMAP(active_vfs, TXGBE_MAX_VF_FUNCTIONS); + unsigned int num_vfs; + struct vf_data_storage *vfinfo; + struct vf_macvlans vf_mvs; + struct vf_macvlans *mv_list; + struct txgbe_mac_addr *mac_table; + + __le16 vxlan_port; + __le16 geneve_port; + + u8 default_up; + + unsigned long fwd_bitmask; /* bitmask indicating in use pools */ + unsigned long tx_timeout_last_recovery; + u32 tx_timeout_recovery_level; + +#define TXGBE_MAX_RETA_ENTRIES 128 + u8 rss_indir_tbl[TXGBE_MAX_RETA_ENTRIES]; +#define TXGBE_RSS_KEY_SIZE 40 + u32 rss_key[TXGBE_RSS_KEY_SIZE / sizeof(u32)]; + + void *ipsec; + + /* misc interrupt status block */ + dma_addr_t isb_dma; + u32 *isb_mem; + u32 isb_tag[TXGBE_ISB_MAX]; +}; + +static inline u32 txgbe_misc_isb(struct txgbe_adapter *adapter, + enum txgbe_isb_idx idx) +{ + u32 cur_tag = 0; + u32 cur_diff = 0; + + cur_tag = adapter->isb_mem[TXGBE_ISB_HEADER]; + cur_diff = cur_tag - adapter->isb_tag[idx]; + + adapter->isb_tag[idx] = cur_tag; + + return adapter->isb_mem[idx]; +} + +static inline u8 txgbe_max_rss_indices(struct txgbe_adapter *adapter) +{ + return TXGBE_MAX_RSS_INDICES; +} + +struct txgbe_fdir_filter { + struct hlist_node fdir_node; + union txgbe_atr_input filter; + u16 sw_idx; + u16 action; +}; + +enum txgbe_state_t { + __TXGBE_TESTING, + __TXGBE_RESETTING, + __TXGBE_DOWN, + __TXGBE_HANGING, + __TXGBE_DISABLED, + __TXGBE_REMOVING, + __TXGBE_SERVICE_SCHED, + __TXGBE_SERVICE_INITED, + __TXGBE_IN_SFP_INIT, + __TXGBE_PTP_RUNNING, + __TXGBE_PTP_TX_IN_PROGRESS, +}; + +struct txgbe_cb { + dma_addr_t dma; + u16 append_cnt; /* number of skb's appended */ + bool page_released; + bool dma_released; +}; +#define TXGBE_CB(skb) ((struct txgbe_cb *)(skb)->cb) + +/* ESX txgbe CIM IOCTL definition */ + +extern struct dcbnl_rtnl_ops dcbnl_ops; +int txgbe_copy_dcb_cfg(struct txgbe_adapter *adapter, int tc_max); + +u8 txgbe_dcb_txq_to_tc(struct txgbe_adapter *adapter, u8 index); + +/* needed by txgbe_main.c */ +int txgbe_validate_mac_addr(u8 *mc_addr); +void txgbe_check_options(struct txgbe_adapter *adapter); +void txgbe_assign_netdev_ops(struct net_device *netdev); + +/* needed by txgbe_ethtool.c */ +extern char txgbe_driver_name[]; +extern const char txgbe_driver_version[]; + +void txgbe_irq_disable(struct txgbe_adapter *adapter); +void txgbe_irq_enable(struct txgbe_adapter *adapter, bool queues, bool flush); +int txgbe_open(struct net_device *netdev); +int txgbe_close(struct net_device *netdev); +void txgbe_up(struct txgbe_adapter *adapter); +void txgbe_down(struct txgbe_adapter *adapter); +void txgbe_reinit_locked(struct txgbe_adapter *adapter); +void txgbe_reset(struct txgbe_adapter *adapter); +void txgbe_set_ethtool_ops(struct net_device *netdev); +int txgbe_setup_rx_resources(struct txgbe_ring *); +int txgbe_setup_tx_resources(struct txgbe_ring *); +void txgbe_free_rx_resources(struct txgbe_ring *); +void txgbe_free_tx_resources(struct txgbe_ring *); +void txgbe_configure_rx_ring(struct txgbe_adapter *, + struct txgbe_ring *); +void txgbe_configure_tx_ring(struct txgbe_adapter *, + struct txgbe_ring *); +void txgbe_update_stats(struct txgbe_adapter *adapter); +int txgbe_init_interrupt_scheme(struct txgbe_adapter *adapter); +void txgbe_reset_interrupt_capability(struct txgbe_adapter *adapter); +void txgbe_set_interrupt_capability(struct txgbe_adapter *adapter); +void txgbe_clear_interrupt_scheme(struct txgbe_adapter *adapter); +bool txgbe_is_txgbe(struct pci_dev *pcidev); +netdev_tx_t txgbe_xmit_frame_ring(struct sk_buff *, + struct txgbe_adapter *, + struct txgbe_ring *); +void txgbe_unmap_and_free_tx_resource(struct txgbe_ring *, + struct txgbe_tx_buffer *); +void txgbe_alloc_rx_buffers(struct txgbe_ring *, u16); +void txgbe_configure_rscctl(struct txgbe_adapter *adapter, + struct txgbe_ring *); +void txgbe_clear_rscctl(struct txgbe_adapter *adapter, + struct txgbe_ring *); +void txgbe_clear_vxlan_port(struct txgbe_adapter *); +void txgbe_set_rx_mode(struct net_device *netdev); +int txgbe_write_mc_addr_list(struct net_device *netdev); +int txgbe_setup_tc(struct net_device *dev, u8 tc); +void txgbe_tx_ctxtdesc(struct txgbe_ring *, u32, u32, u32, u32); +void txgbe_do_reset(struct net_device *netdev); +void txgbe_write_eitr(struct txgbe_q_vector *q_vector); +int txgbe_poll(struct napi_struct *napi, int budget); +void txgbe_disable_rx_queue(struct txgbe_adapter *adapter, + struct txgbe_ring *); +void txgbe_vlan_strip_enable(struct txgbe_adapter *adapter); +void txgbe_vlan_strip_disable(struct txgbe_adapter *adapter); + +void txgbe_dump(struct txgbe_adapter *adapter); + +static inline struct netdev_queue *txring_txq(const struct txgbe_ring *ring) +{ + return netdev_get_tx_queue(ring->netdev, ring->queue_index); +} + +int txgbe_wol_supported(struct txgbe_adapter *adapter); +int txgbe_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd); +int txgbe_write_uc_addr_list(struct net_device *netdev, int pool); +void txgbe_full_sync_mac_table(struct txgbe_adapter *adapter); +int txgbe_add_mac_filter(struct txgbe_adapter *adapter, + const u8 *addr, u16 pool); +int txgbe_del_mac_filter(struct txgbe_adapter *adapter, + const u8 *addr, u16 pool); +int txgbe_available_rars(struct txgbe_adapter *adapter); +void txgbe_vlan_mode(struct net_device *, u32); + +void txgbe_ptp_init(struct txgbe_adapter *adapter); +void txgbe_ptp_stop(struct txgbe_adapter *adapter); +void txgbe_ptp_suspend(struct txgbe_adapter *adapter); +void txgbe_ptp_overflow_check(struct txgbe_adapter *adapter); +void txgbe_ptp_rx_hang(struct txgbe_adapter *adapter); +void txgbe_ptp_rx_hwtstamp(struct txgbe_adapter *adapter, struct sk_buff *skb); +int txgbe_ptp_set_ts_config(struct txgbe_adapter *adapter, struct ifreq *ifr); +int txgbe_ptp_get_ts_config(struct txgbe_adapter *adapter, struct ifreq *ifr); +void txgbe_ptp_start_cyclecounter(struct txgbe_adapter *adapter); +void txgbe_ptp_reset(struct txgbe_adapter *adapter); +void txgbe_ptp_check_pps_event(struct txgbe_adapter *adapter); + +void txgbe_set_rx_drop_en(struct txgbe_adapter *adapter); + +u32 txgbe_rss_indir_tbl_entries(struct txgbe_adapter *adapter); +void txgbe_store_reta(struct txgbe_adapter *adapter); + +/** + * interrupt masking operations. each bit in PX_ICn correspond to a interrupt. + * disable a interrupt by writing to PX_IMS with the corresponding bit=1 + * enable a interrupt by writing to PX_IMC with the corresponding bit=1 + * trigger a interrupt by writing to PX_ICS with the corresponding bit=1 + **/ +#define TXGBE_INTR_ALL (~0ULL) +#define TXGBE_INTR_MISC(A) (1ULL << (A)->num_q_vectors) +#define TXGBE_INTR_QALL(A) (TXGBE_INTR_MISC(A) - 1) +#define TXGBE_INTR_Q(i) (1ULL << (i)) +static inline void txgbe_intr_enable(struct txgbe_hw *hw, u64 qmask) +{ + u32 mask; + + mask = (qmask & 0xFFFFFFFF); + if (mask) + wr32(hw, TXGBE_PX_IMC(0), mask); + mask = (qmask >> 32); + if (mask) + wr32(hw, TXGBE_PX_IMC(1), mask); + + /* skip the flush */ +} + +static inline void txgbe_intr_disable(struct txgbe_hw *hw, u64 qmask) +{ + u32 mask; + + mask = (qmask & 0xFFFFFFFF); + if (mask) + wr32(hw, TXGBE_PX_IMS(0), mask); + mask = (qmask >> 32); + if (mask) + wr32(hw, TXGBE_PX_IMS(1), mask); + + /* skip the flush */ +} + +static inline void txgbe_intr_trigger(struct txgbe_hw *hw, u64 qmask) +{ + u32 mask; + + mask = (qmask & 0xFFFFFFFF); + if (mask) + wr32(hw, TXGBE_PX_ICS(0), mask); + mask = (qmask >> 32); + if (mask) + wr32(hw, TXGBE_PX_ICS(1), mask); + + /* skip the flush */ +} + +#define TXGBE_RING_SIZE(R) ((R)->count < TXGBE_MAX_TXD ? (R)->count / 128 : 0) + +/* move from txgbe_osdep.h */ +#define TXGBE_CPU_TO_BE16(_x) cpu_to_be16(_x) +#define TXGBE_BE16_TO_CPU(_x) be16_to_cpu(_x) +#define TXGBE_CPU_TO_BE32(_x) cpu_to_be32(_x) +#define TXGBE_BE32_TO_CPU(_x) be32_to_cpu(_x) + +#define msec_delay(_x) msleep(_x) + +#define usec_delay(_x) udelay(_x) + +#define STATIC static + +#define TXGBE_NAME "txgbe" + +#define DPRINTK(nlevel, klevel, fmt, args...) \ + ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ + printk(KERN_##klevel TXGBE_NAME ": %s: %s: " fmt, \ + adapter->netdev->name, \ + __func__, ## args))) + +#ifndef _WIN32 +#define txgbe_emerg(fmt, ...) printk(KERN_EMERG fmt, ## __VA_ARGS__) +#define txgbe_alert(fmt, ...) printk(KERN_ALERT fmt, ## __VA_ARGS__) +#define txgbe_crit(fmt, ...) printk(KERN_CRIT fmt, ## __VA_ARGS__) +#define txgbe_error(fmt, ...) printk(KERN_ERR fmt, ## __VA_ARGS__) +#define txgbe_warn(fmt, ...) printk(KERN_WARNING fmt, ## __VA_ARGS__) +#define txgbe_notice(fmt, ...) printk(KERN_NOTICE fmt, ## __VA_ARGS__) +#define txgbe_info(fmt, ...) printk(KERN_INFO fmt, ## __VA_ARGS__) +#define txgbe_print(fmt, ...) printk(KERN_DEBUG fmt, ## __VA_ARGS__) +#define txgbe_trace(fmt, ...) printk(KERN_INFO fmt, ## __VA_ARGS__) +#else /* _WIN32 */ +#define txgbe_error(lvl, fmt, ...) \ + DbgPrintEx(DPFLTR_IHVNETWORK_ID, DPFLTR_ERROR_LEVEL, \ + "%s-error: %s@%d, " fmt, \ + "txgbe", __FUNCTION__, __LINE__, ## __VA_ARGS__) +#endif /* !_WIN32 */ + +#ifdef DBG +#ifndef _WIN32 +#define txgbe_debug(fmt, ...) \ + printk(KERN_DEBUG \ + "%s-debug: %s@%d, " fmt, \ + "txgbe", __FUNCTION__, __LINE__, ## __VA_ARGS__) +#else /* _WIN32 */ +#define txgbe_debug(fmt, ...) \ + DbgPrintEx(DPFLTR_IHVNETWORK_ID, DPFLTR_ERROR_LEVEL, \ + "%s-debug: %s@%d, " fmt, \ + "txgbe", __FUNCTION__, __LINE__, ## __VA_ARGS__) +#endif /* _WIN32 */ +#else /* DBG */ +#define txgbe_debug(fmt, ...) do {} while (0) +#endif /* DBG */ + + +#ifdef DBG +#define ASSERT(_x) BUG_ON(!(_x)) +#define DEBUGOUT(S) printk(KERN_DEBUG S) +#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S, ## A) +#define DEBUGOUT2(S, A...) printk(KERN_DEBUG S, ## A) +#define DEBUGOUT3(S, A...) printk(KERN_DEBUG S, ## A) +#define DEBUGOUT4(S, A...) printk(KERN_DEBUG S, ## A) +#define DEBUGOUT5(S, A...) printk(KERN_DEBUG S, ## A) +#define DEBUGOUT6(S, A...) printk(KERN_DEBUG S, ## A) +#define DEBUGFUNC(fmt, ...) txgbe_debug(fmt, ## __VA_ARGS__) +#else +#define ASSERT(_x) do {} while (0) +#define DEBUGOUT(S) do {} while (0) +#define DEBUGOUT1(S, A...) do {} while (0) +#define DEBUGOUT2(S, A...) do {} while (0) +#define DEBUGOUT3(S, A...) do {} while (0) +#define DEBUGOUT4(S, A...) do {} while (0) +#define DEBUGOUT5(S, A...) do {} while (0) +#define DEBUGOUT6(S, A...) do {} while (0) +#define DEBUGFUNC(fmt, ...) do {} while (0) +#endif + + +struct txgbe_msg { + u16 msg_enable; +}; + +__attribute__((unused)) static struct net_device *txgbe_hw_to_netdev(const struct txgbe_hw *hw) +{ + return ((struct txgbe_adapter *)hw->back)->netdev; +} + +__attribute__((unused)) static struct txgbe_msg *txgbe_hw_to_msg(const struct txgbe_hw *hw) +{ + struct txgbe_adapter *adapter = + container_of(hw, struct txgbe_adapter, hw); + return (struct txgbe_msg *)&adapter->msg_enable; +} + +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return &pdev->dev; +} + +#define hw_dbg(hw, format, arg...) \ + netdev_dbg(txgbe_hw_to_netdev(hw), format, ## arg) +#define hw_err(hw, format, arg...) \ + netdev_err(txgbe_hw_to_netdev(hw), format, ## arg) +#define e_dev_info(format, arg...) \ + dev_info(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_warn(format, arg...) \ + dev_warn(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_err(format, arg...) \ + dev_err(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dev_notice(format, arg...) \ + dev_notice(pci_dev_to_dev(adapter->pdev), format, ## arg) +#define e_dbg(msglvl, format, arg...) \ + netif_dbg(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_info(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_err(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_warn(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_crit(msglvl, format, arg...) \ + netif_crit(adapter, msglvl, adapter->netdev, format, ## arg) + +#define TXGBE_FAILED_READ_CFG_DWORD 0xffffffffU +#define TXGBE_FAILED_READ_CFG_WORD 0xffffU +#define TXGBE_FAILED_READ_CFG_BYTE 0xffU + +extern u32 txgbe_read_reg(struct txgbe_hw *hw, u32 reg, bool quiet); +extern u16 txgbe_read_pci_cfg_word(struct txgbe_hw *hw, u32 reg); +extern void txgbe_write_pci_cfg_word(struct txgbe_hw *hw, u32 reg, u16 value); + +#define TXGBE_R32_Q(h, r) txgbe_read_reg(h, r, true) + +#define TXGBE_EEPROM_GRANT_ATTEMPS 100 +#define TXGBE_HTONL(_i) htonl(_i) +#define TXGBE_NTOHL(_i) ntohl(_i) +#define TXGBE_NTOHS(_i) ntohs(_i) +#define TXGBE_CPU_TO_LE32(_i) cpu_to_le32(_i) +#define TXGBE_LE32_TO_CPUS(_i) le32_to_cpus(_i) + +enum { + TXGBE_ERROR_SOFTWARE, + TXGBE_ERROR_POLLING, + TXGBE_ERROR_INVALID_STATE, + TXGBE_ERROR_UNSUPPORTED, + TXGBE_ERROR_ARGUMENT, + TXGBE_ERROR_CAUTION, +}; + +#define ERROR_REPORT(level, format, arg...) do { \ + switch (level) { \ + case TXGBE_ERROR_SOFTWARE: \ + case TXGBE_ERROR_CAUTION: \ + case TXGBE_ERROR_POLLING: \ + netif_warn(txgbe_hw_to_msg(hw), drv, txgbe_hw_to_netdev(hw), \ + format, ## arg); \ + break; \ + case TXGBE_ERROR_INVALID_STATE: \ + case TXGBE_ERROR_UNSUPPORTED: \ + case TXGBE_ERROR_ARGUMENT: \ + netif_err(txgbe_hw_to_msg(hw), hw, txgbe_hw_to_netdev(hw), \ + format, ## arg); \ + break; \ + default: \ + break; \ + } \ +} while (0) + +#define ERROR_REPORT1 ERROR_REPORT +#define ERROR_REPORT2 ERROR_REPORT +#define ERROR_REPORT3 ERROR_REPORT + +#define UNREFERENCED_XPARAMETER +#define UNREFERENCED_1PARAMETER(_p) do { \ + uninitialized_var(_p); \ +} while (0) +#define UNREFERENCED_2PARAMETER(_p, _q) do { \ + uninitialized_var(_p); \ + uninitialized_var(_q); \ +} while (0) +#define UNREFERENCED_3PARAMETER(_p, _q, _r) do { \ + uninitialized_var(_p); \ + uninitialized_var(_q); \ + uninitialized_var(_r); \ +} while (0) +#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) do { \ + uninitialized_var(_p); \ + uninitialized_var(_q); \ + uninitialized_var(_r); \ + uninitialized_var(_s); \ +} while (0) +#define UNREFERENCED_PARAMETER(_p) UNREFERENCED_1PARAMETER(_p) + +/* end of txgbe_osdep.h */ + +#endif /* _TXGBE_H_ */ diff --git a/drivers/net/ethernet/netswift/txgbe/txgbe_bp.c b/drivers/net/ethernet/netswift/txgbe/txgbe_bp.c new file mode 100644 index 0000000000000000000000000000000000000000..68d465da2eee89aa904198e57bfb5bd98b07a65e --- /dev/null +++ b/drivers/net/ethernet/netswift/txgbe/txgbe_bp.c @@ -0,0 +1,875 @@ +/* + * WangXun 10 Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + */ + + +#include "txgbe_bp.h" + +int Handle_bkp_an73_flow(unsigned char byLinkMode, struct txgbe_adapter *adapter); +int WaitBkpAn73XnpDone(struct txgbe_adapter *adapter); +int GetBkpAn73Ability(bkpan73ability *ptBkpAn73Ability, unsigned char byLinkPartner, + struct txgbe_adapter *adapter); +int Get_bkp_an73_ability(bkpan73ability *ptBkpAn73Ability, unsigned char byLinkPartner, + struct txgbe_adapter *adapter); +int ClearBkpAn73Interrupt(unsigned int intIndex, unsigned int intIndexHi, struct txgbe_adapter *adapter); +int CheckBkpAn73Interrupt(unsigned int intIndex, struct txgbe_adapter *adapter); +int Check_bkp_an73_ability(bkpan73ability tBkpAn73Ability, bkpan73ability tLpBkpAn73Ability, + struct txgbe_adapter *adapter); + +void txgbe_bp_close_protect(struct txgbe_adapter *adapter) +{ + adapter->flags2 |= TXGBE_FLAG2_KR_PRO_DOWN; + if (adapter->flags2 & TXGBE_FLAG2_KR_PRO_REINIT) { + msleep(100); + printk("wait to reinited ok..%x\n", adapter->flags2); + } +} + +int txgbe_bp_mode_setting(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + + /*default to open an73*/ + + adapter->backplane_an = AUTO ? 1 : 0; + adapter->an37 = AUTO ? 1 : 0; + + if (adapter->backplane_mode == TXGBE_BP_M_KR) { + hw->subsystem_device_id = TXGBE_ID_WX1820_KR_KX_KX4; + hw->subsystem_id = TXGBE_ID_WX1820_KR_KX_KX4; + } else if (adapter->backplane_mode == TXGBE_BP_M_KX4) { + hw->subsystem_device_id = TXGBE_ID_WX1820_MAC_XAUI; + hw->subsystem_id = TXGBE_ID_WX1820_MAC_XAUI; + } else if (adapter->backplane_mode == TXGBE_BP_M_KX) { + hw->subsystem_device_id = TXGBE_ID_WX1820_MAC_SGMII; + hw->subsystem_id = TXGBE_ID_WX1820_MAC_SGMII; + } else if (adapter->backplane_mode == TXGBE_BP_M_SFI) { + hw->subsystem_device_id = TXGBE_ID_WX1820_SFP; + hw->subsystem_id = TXGBE_ID_WX1820_SFP; + } + + if (adapter->backplane_auto == TXGBE_BP_M_AUTO) { + adapter->backplane_an = 1; + adapter->an37 = 1; + } else if (adapter->backplane_auto == TXGBE_BP_M_NAUTO) { + adapter->backplane_an = 0; + adapter->an37 = 0; + } + + if (adapter->ffe_set == TXGBE_BP_M_KR || + adapter->ffe_set == TXGBE_BP_M_KX4 || + adapter->ffe_set == TXGBE_BP_M_KX || + adapter->ffe_set == TXGBE_BP_M_SFI) { + goto out; + } + + if (KR_SET == 1) { + adapter->ffe_main = KR_MAIN; + adapter->ffe_pre = KR_PRE; + adapter->ffe_post = KR_POST; + } else if (KX4_SET == 1) { + adapter->ffe_main = KX4_MAIN; + adapter->ffe_pre = KX4_PRE; + adapter->ffe_post = KX4_POST; + } else if (KX_SET == 1) { + adapter->ffe_main = KX_MAIN; + adapter->ffe_pre = KX_PRE; + adapter->ffe_post = KX_POST; + } else if (SFI_SET == 1) { + adapter->ffe_main = SFI_MAIN; + adapter->ffe_pre = SFI_PRE; + adapter->ffe_post = SFI_POST; + } +out: + return 0; +} + +static int txgbe_kr_subtask(struct txgbe_adapter *adapter) +{ + Handle_bkp_an73_flow(0, adapter); + return 0; +} + +void txgbe_bp_watchdog_event(struct txgbe_adapter *adapter) +{ + u32 value = 0; + struct txgbe_hw *hw = &adapter->hw; + + if (KR_POLLING == 1) { + value = txgbe_rd32_epcs(hw, 0x78002); + value = value & 0x4; + if (value == 0x4) { + e_dev_info("Enter training\n"); + txgbe_kr_subtask(adapter); + } + } else { + if (adapter->flags2 & TXGBE_FLAG2_KR_TRAINING) { + e_dev_info("Enter training\n"); + txgbe_kr_subtask(adapter); + adapter->flags2 &= ~TXGBE_FLAG2_KR_TRAINING; + } + } +} + +void txgbe_bp_down_event(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + if (adapter->backplane_an == 1) { + if (KR_NORESET == 1) { + txgbe_wr32_epcs(hw, 0x78003, 0x0000); + txgbe_wr32_epcs(hw, 0x70000, 0x0000); + txgbe_wr32_epcs(hw, 0x78001, 0x0000); + msleep(1050); + txgbe_set_link_to_kr(hw, 1); + } else if (KR_REINITED == 1) { + txgbe_wr32_epcs(hw, 0x78003, 0x0000); + txgbe_wr32_epcs(hw, 0x70000, 0x0000); + txgbe_wr32_epcs(hw, 0x78001, 0x0000); + txgbe_wr32_epcs(hw, 0x18035, 0x00FF); + txgbe_wr32_epcs(hw, 0x18055, 0x00FF); + msleep(1050); + txgbe_wr32_epcs(hw, 0x78003, 0x0001); + txgbe_wr32_epcs(hw, 0x70000, 0x3200); + txgbe_wr32_epcs(hw, 0x78001, 0x0007); + txgbe_wr32_epcs(hw, 0x18035, 0x00FC); + txgbe_wr32_epcs(hw, 0x18055, 0x00FC); + } else { + msleep(1000); + if (!(adapter->flags2&TXGBE_FLAG2_KR_PRO_DOWN)) { + adapter->flags2 |= TXGBE_FLAG2_KR_PRO_REINIT; + txgbe_reinit_locked(adapter); + adapter->flags2 &= ~TXGBE_FLAG2_KR_PRO_REINIT; + } + } + } +} + +int txgbe_kr_intr_handle(struct txgbe_adapter *adapter) +{ + bkpan73ability tBkpAn73Ability, tLpBkpAn73Ability; + tBkpAn73Ability.currentLinkMode = 0; + + if (KR_MODE) { + e_dev_info("HandleBkpAn73Flow() \n"); + e_dev_info("---------------------------------\n"); + } + + /*1. Get the local AN73 Base Page Ability*/ + if (KR_MODE) + e_dev_info("<1>. Get the local AN73 Base Page Ability ...\n"); + GetBkpAn73Ability(&tBkpAn73Ability, 0, adapter); + + /*2. Check the AN73 Interrupt Status*/ + if (KR_MODE) + e_dev_info("<2>. Check the AN73 Interrupt Status ...\n"); + /*3.Clear the AN_PG_RCV interrupt*/ + ClearBkpAn73Interrupt(2, 0x0, adapter); + + /*3.1. Get the link partner AN73 Base Page Ability*/ + if (KR_MODE) + e_dev_info("<3.1>. Get the link partner AN73 Base Page Ability ...\n"); + Get_bkp_an73_ability(&tLpBkpAn73Ability, 1, adapter); + + /*3.2. Check the AN73 Link Ability with Link Partner*/ + if (KR_MODE) { + e_dev_info("<3.2>. Check the AN73 Link Ability with Link Partner ...\n"); + e_dev_info(" Local Link Ability: 0x%x\n", tBkpAn73Ability.linkAbility); + e_dev_info(" Link Partner Link Ability: 0x%x\n", tLpBkpAn73Ability.linkAbility); + } + Check_bkp_an73_ability(tBkpAn73Ability, tLpBkpAn73Ability, adapter); + + return 0; +} + +/*Check Ethernet Backplane AN73 Base Page Ability +**return value: +** -1 : none link mode matched, exit +** 0 : current link mode matched, wait AN73 to be completed +** 1 : current link mode not matched, set to matched link mode, re-start AN73 external +*/ +int Check_bkp_an73_ability(bkpan73ability tBkpAn73Ability, bkpan73ability tLpBkpAn73Ability, + struct txgbe_adapter *adapter) +{ + unsigned int comLinkAbility; + struct txgbe_hw *hw = &adapter->hw; + + if (KR_MODE) { + e_dev_info("CheckBkpAn73Ability():\n"); + e_dev_info("------------------------\n"); + } + + /*-- Check the common link ability and take action based on the result*/ + comLinkAbility = tBkpAn73Ability.linkAbility & tLpBkpAn73Ability.linkAbility; + if (KR_MODE) + e_dev_info("comLinkAbility= 0x%x, linkAbility= 0x%x, lpLinkAbility= 0x%x\n", + comLinkAbility, tBkpAn73Ability.linkAbility, tLpBkpAn73Ability.linkAbility); + + if (comLinkAbility == 0) { + if (KR_MODE) + e_dev_info("WARNING: The Link Partner does not support any compatible speed mode!!!\n\n"); + return -1; + } else if (comLinkAbility & 0x80) { + if (tBkpAn73Ability.currentLinkMode == 0) { + if (KR_MODE) + e_dev_info("Link mode is matched with Link Partner: [LINK_KR].\n"); + return 0; + } else { + if (KR_MODE) { + e_dev_info("Link mode is not matched with Link Partner: [LINK_KR].\n"); + e_dev_info("Set the local link mode to [LINK_KR] ...\n"); + } + txgbe_set_link_to_kr(hw, 1); + return 1; + } + } else if (comLinkAbility & 0x40) { + if (tBkpAn73Ability.currentLinkMode == 0x10) { + if (KR_MODE) + e_dev_info("Link mode is matched with Link Partner: [LINK_KX4].\n"); + return 0; + } else { + if (KR_MODE) { + e_dev_info("Link mode is not matched with Link Partner: [LINK_KX4].\n"); + e_dev_info("Set the local link mode to [LINK_KX4] ...\n"); + } + txgbe_set_link_to_kx4(hw, 1); + return 1; + } + } else if (comLinkAbility & 0x20) { + if (tBkpAn73Ability.currentLinkMode == 0x1) { + if (KR_MODE) + e_dev_info("Link mode is matched with Link Partner: [LINK_KX].\n"); + return 0; + } else { + if (KR_MODE) { + e_dev_info("Link mode is not matched with Link Partner: [LINK_KX].\n"); + e_dev_info("Set the local link mode to [LINK_KX] ...\n"); + } + txgbe_set_link_to_kx(hw, 1, 1); + return 1; + } + } + return 0; +} + + +/*Get Ethernet Backplane AN73 Base Page Ability +**byLinkPartner: +**- 1: Get Link Partner Base Page +**- 2: Get Link Partner Next Page (only get NXP Ability Register 1 at the moment) +**- 0: Get Local Device Base Page +*/ +int Get_bkp_an73_ability(bkpan73ability *ptBkpAn73Ability, unsigned char byLinkPartner, + struct txgbe_adapter *adapter) +{ + int status = 0; + unsigned int rdata; + struct txgbe_hw *hw = &adapter->hw; + + if (KR_MODE) { + e_dev_info("GetBkpAn73Ability(): byLinkPartner = %d\n", byLinkPartner); + e_dev_info("----------------------------------------\n"); + } + + if (byLinkPartner == 1) { /*Link Partner Base Page*/ + /*Read the link partner AN73 Base Page Ability Registers*/ + if (KR_MODE) + e_dev_info("Read the link partner AN73 Base Page Ability Registers...\n"); + rdata = 0; + rdata = txgbe_rd32_epcs(hw, 0x70013); + if (KR_MODE) + e_dev_info("SR AN MMD LP Base Page Ability Register 1: 0x%x\n", rdata); + ptBkpAn73Ability->nextPage = (rdata >> 15) & 0x01; + if (KR_MODE) + e_dev_info(" Next Page (bit15): %d\n", ptBkpAn73Ability->nextPage); + + rdata = 0; + rdata = txgbe_rd32_epcs(hw, 0x70014); + if (KR_MODE) + e_dev_info("SR AN MMD LP Base Page Ability Register 2: 0x%x\n", rdata); + ptBkpAn73Ability->linkAbility = rdata & 0xE0; + if (KR_MODE) { + e_dev_info(" Link Ability (bit[15:0]): 0x%x\n", ptBkpAn73Ability->linkAbility); + e_dev_info(" (0x20- KX_ONLY, 0x40- KX4_ONLY, 0x60- KX4_KX\n"); + e_dev_info(" 0x80- KR_ONLY, 0xA0- KR_KX, 0xC0- KR_KX4, 0xE0- KR_KX4_KX)\n"); + } + + rdata = 0; + rdata = txgbe_rd32_epcs(hw, 0x70015); + if (KR_MODE) { + e_dev_info("SR AN MMD LP Base Page Ability Register 3: 0x%x\n", rdata); + e_dev_info(" FEC Request (bit15): %d\n", ((rdata >> 15) & 0x01)); + e_dev_info(" FEC Enable (bit14): %d\n", ((rdata >> 14) & 0x01)); + } + ptBkpAn73Ability->fecAbility = (rdata >> 14) & 0x03; + } else if (byLinkPartner == 2) {/*Link Partner Next Page*/ + /*Read the link partner AN73 Next Page Ability Registers*/ + if (KR_MODE) + e_dev_info("\nRead the link partner AN73 Next Page Ability Registers...\n"); + rdata = 0; + rdata = txgbe_rd32_epcs(hw, 0x70019); + if (KR_MODE) + e_dev_info(" SR AN MMD LP XNP Ability Register 1: 0x%x\n", rdata); + ptBkpAn73Ability->nextPage = (rdata >> 15) & 0x01; + if (KR_MODE) + e_dev_info(" Next Page (bit15): %d\n", ptBkpAn73Ability->nextPage); + } else { + /*Read the local AN73 Base Page Ability Registers*/ + if (KR_MODE) + e_dev_info("\nRead the local AN73 Base Page Ability Registers...\n"); + rdata = 0; + rdata = txgbe_rd32_epcs(hw, 0x70010); + if (KR_MODE) + e_dev_info("SR AN MMD Advertisement Register 1: 0x%x\n", rdata); + ptBkpAn73Ability->nextPage = (rdata >> 15) & 0x01; + if (KR_MODE) + e_dev_info(" Next Page (bit15): %d\n", ptBkpAn73Ability->nextPage); + + rdata = 0; + rdata = txgbe_rd32_epcs(hw, 0x70011); + if (KR_MODE) + e_dev_info("SR AN MMD Advertisement Register 2: 0x%x\n", rdata); + ptBkpAn73Ability->linkAbility = rdata & 0xE0; + if (KR_MODE) { + e_dev_info(" Link Ability (bit[15:0]): 0x%x\n", ptBkpAn73Ability->linkAbility); + e_dev_info(" (0x20- KX_ONLY, 0x40- KX4_ONLY, 0x60- KX4_KX\n"); + e_dev_info(" 0x80- KR_ONLY, 0xA0- KR_KX, 0xC0- KR_KX4, 0xE0- KR_KX4_KX)\n"); + } + rdata = 0; + rdata = txgbe_rd32_epcs(hw, 0x70012); + if (KR_MODE) { + e_dev_info("SR AN MMD Advertisement Register 3: 0x%x\n", rdata); + e_dev_info(" FEC Request (bit15): %d\n", ((rdata >> 15) & 0x01)); + e_dev_info(" FEC Enable (bit14): %d\n", ((rdata >> 14) & 0x01)); + } + ptBkpAn73Ability->fecAbility = (rdata >> 14) & 0x03; + } /*if (byLinkPartner == 1) Link Partner Base Page*/ + + if (KR_MODE) + e_dev_info("GetBkpAn73Ability() done.\n"); + + return status; +} + + +/*Get Ethernet Backplane AN73 Base Page Ability +**byLinkPartner: +**- 1: Get Link Partner Base Page +**- 2: Get Link Partner Next Page (only get NXP Ability Register 1 at the moment) +**- 0: Get Local Device Base Page +*/ +int GetBkpAn73Ability(bkpan73ability *ptBkpAn73Ability, unsigned char byLinkPartner, + struct txgbe_adapter *adapter) +{ + int status = 0; + unsigned int rdata; + struct txgbe_hw *hw = &adapter->hw; + + if (KR_MODE) { + e_dev_info("GetBkpAn73Ability(): byLinkPartner = %d\n", byLinkPartner); + e_dev_info("----------------------------------------\n"); + } + + if (byLinkPartner == 1) { //Link Partner Base Page + //Read the link partner AN73 Base Page Ability Registers + if (KR_MODE) + e_dev_info("Read the link partner AN73 Base Page Ability Registers...\n"); + rdata = 0; + rdata = txgbe_rd32_epcs(hw, 0x70013); + if (KR_MODE) + e_dev_info("SR AN MMD LP Base Page Ability Register 1: 0x%x\n", rdata); + ptBkpAn73Ability->nextPage = (rdata >> 15) & 0x01; + if (KR_MODE) + e_dev_info(" Next Page (bit15): %d\n", ptBkpAn73Ability->nextPage); + + rdata = 0; + rdata = txgbe_rd32_epcs(hw, 0x70014); + if (KR_MODE) + e_dev_info("SR AN MMD LP Base Page Ability Register 2: 0x%x\n", rdata); + ptBkpAn73Ability->linkAbility = rdata & 0xE0; + if (KR_MODE) { + e_dev_info(" Link Ability (bit[15:0]): 0x%x\n", ptBkpAn73Ability->linkAbility); + e_dev_info(" (0x20- KX_ONLY, 0x40- KX4_ONLY, 0x60- KX4_KX\n"); + e_dev_info(" 0x80- KR_ONLY, 0xA0- KR_KX, 0xC0- KR_KX4, 0xE0- KR_KX4_KX)\n"); + } + + rdata = 0; + rdata = txgbe_rd32_epcs(hw, 0x70015); + printk("SR AN MMD LP Base Page Ability Register 3: 0x%x\n", rdata); + printk(" FEC Request (bit15): %d\n", ((rdata >> 15) & 0x01)); + printk(" FEC Enable (bit14): %d\n", ((rdata >> 14) & 0x01)); + ptBkpAn73Ability->fecAbility = (rdata >> 14) & 0x03; + } else if (byLinkPartner == 2) { //Link Partner Next Page + //Read the link partner AN73 Next Page Ability Registers + if (KR_MODE) + e_dev_info("Read the link partner AN73 Next Page Ability Registers...\n"); + rdata = 0; + rdata = txgbe_rd32_epcs(hw, 0x70019); + if (KR_MODE) + e_dev_info(" SR AN MMD LP XNP Ability Register 1: 0x%x\n", rdata); + ptBkpAn73Ability->nextPage = (rdata >> 15) & 0x01; + if (KR_MODE) + e_dev_info(" Next Page (bit15): %d\n", ptBkpAn73Ability->nextPage); + } else { + //Read the local AN73 Base Page Ability Registers + if (KR_MODE) + e_dev_info("Read the local AN73 Base Page Ability Registers...\n"); + rdata = 0; + rdata = txgbe_rd32_epcs(hw, 0x70010); + if (KR_MODE) + e_dev_info("SR AN MMD Advertisement Register 1: 0x%x\n", rdata); + ptBkpAn73Ability->nextPage = (rdata >> 15) & 0x01; + if (KR_MODE) + e_dev_info(" Next Page (bit15): %d\n", ptBkpAn73Ability->nextPage); + + rdata = 0; + rdata = txgbe_rd32_epcs(hw, 0x70011); + if (KR_MODE) + e_dev_info("SR AN MMD Advertisement Register 2: 0x%x\n", rdata); + ptBkpAn73Ability->linkAbility = rdata & 0xE0; + if (KR_MODE) { + e_dev_info(" Link Ability (bit[15:0]): 0x%x\n", ptBkpAn73Ability->linkAbility); + e_dev_info(" (0x20- KX_ONLY, 0x40- KX4_ONLY, 0x60- KX4_KX\n"); + e_dev_info(" 0x80- KR_ONLY, 0xA0- KR_KX, 0xC0- KR_KX4, 0xE0- KR_KX4_KX)\n"); + } + + rdata = 0; + rdata = txgbe_rd32_epcs(hw, 0x70012); + if (KR_MODE) { + e_dev_info("SR AN MMD Advertisement Register 3: 0x%x\n", rdata); + e_dev_info(" FEC Request (bit15): %d\n", ((rdata >> 15) & 0x01)); + e_dev_info(" FEC Enable (bit14): %d\n", ((rdata >> 14) & 0x01)); + } + ptBkpAn73Ability->fecAbility = (rdata >> 14) & 0x03; + } + + if (KR_MODE) + e_dev_info("GetBkpAn73Ability() done.\n"); + + return status; +} + +/* DESCRIPTION: Set the source data fields[bitHigh:bitLow] with setValue +** INPUTS: *pSrcData: Source data pointer +** bitHigh: High bit position of the fields +** bitLow : Low bit position of the fields +** setValue: Set value of the fields +** OUTPUTS: return the updated source data +*/ +static void SetFields( + unsigned int *pSrcData, + unsigned int bitHigh, + unsigned int bitLow, + unsigned int setValue) +{ + int i; + + if (bitHigh == bitLow) { + if (setValue == 0) { + *pSrcData &= ~(1 << bitLow); + } else { + *pSrcData |= (1 << bitLow); + } + } else { + for (i = bitLow; i <= bitHigh; i++) { + *pSrcData &= ~(1 << i); + } + *pSrcData |= (setValue << bitLow); + } +} + +/*Check Ethernet Backplane AN73 Interrupt status +**- return the value of select interrupt index +*/ +int CheckBkpAn73Interrupt(unsigned int intIndex, struct txgbe_adapter *adapter) +{ + unsigned int rdata; + struct txgbe_hw *hw = &adapter->hw; + + if (KR_MODE) { + e_dev_info("CheckBkpAn73Interrupt(): intIndex = %d\n", intIndex); + e_dev_info("----------------------------------------\n"); + } + + rdata = 0x0000; + rdata = txgbe_rd32_epcs(hw, 0x78002); + if (KR_MODE) { + e_dev_info("Read VR AN MMD Interrupt Register: 0x%x\n", rdata); + e_dev_info("Interrupt: 0- AN_INT_CMPLT, 1- AN_INC_LINK, 2- AN_PG_RCV\n\n"); + } + + return ((rdata >> intIndex) & 0x01); +} + +/*Clear Ethernet Backplane AN73 Interrupt status +**- intIndexHi =0, only intIndex bit will be cleared +**- intIndexHi !=0, the [intIndexHi, intIndex] range will be cleared +*/ +int ClearBkpAn73Interrupt(unsigned int intIndex, unsigned int intIndexHi, struct txgbe_adapter *adapter) +{ + int status = 0; + unsigned int rdata, wdata; + struct txgbe_hw *hw = &adapter->hw; + + if (KR_MODE) { + e_dev_info("ClearBkpAn73Interrupt(): intIndex = %d\n", intIndex); + e_dev_info("----------------------------------------\n"); + } + + rdata = 0x0000; + rdata = txgbe_rd32_epcs(hw, 0x78002); + if (KR_MODE) + e_dev_info("[Before clear] Read VR AN MMD Interrupt Register: 0x%x\n", rdata); + + wdata = rdata; + if (intIndexHi) { + SetFields(&wdata, intIndexHi, intIndex, 0); + } else { + SetFields(&wdata, intIndex, intIndex, 0); + } + txgbe_wr32_epcs(hw, 0x78002, wdata); + + rdata = 0x0000; + rdata = txgbe_rd32_epcs(hw, 0x78002); + if (KR_MODE) { + e_dev_info("[After clear] Read VR AN MMD Interrupt Register: 0x%x\n", rdata); + e_dev_info("\n"); + } + + return status; +} + +int WaitBkpAn73XnpDone(struct txgbe_adapter *adapter) +{ + int status = 0; + unsigned int timer = 0; + bkpan73ability tLpBkpAn73Ability; + + /*while(timer++ < BKPAN73_TIMEOUT)*/ + while (timer++ < 20) { + if (CheckBkpAn73Interrupt(2, adapter)) { + /*Clear the AN_PG_RCV interrupt*/ + ClearBkpAn73Interrupt(2, 0, adapter); + + /*Get the link partner AN73 Next Page Ability*/ + Get_bkp_an73_ability(&tLpBkpAn73Ability, 2, adapter); + + /*Return when AN_LP_XNP_NP == 0, (bit[15]: Next Page)*/ + if (tLpBkpAn73Ability.nextPage == 0) { + return status; + } + } + msleep(200); + } /*while(timer++ < BKPAN73_TIMEOUT)*/ + if (KR_MODE) + e_dev_info("ERROR: Wait all the AN73 next pages to be exchanged Timeout!!!\n"); + + return -1; +} + +int ReadPhyLaneTxEq(unsigned short lane, struct txgbe_adapter *adapter, int post_t, int mode) +{ + int status = 0; + unsigned int addr, rdata; + struct txgbe_hw *hw = &adapter->hw; + u32 pre; + u32 post; + u32 lmain; + + /*LANEN_DIG_ASIC_TX_ASIC_IN_1[11:6]: TX_MAIN_CURSOR*/ + rdata = 0; + addr = 0x100E | (lane << 8); + rdata = rd32_ephy(hw, addr); + if (KR_MODE) { + e_dev_info("PHY LANE%0d TX EQ Read Value:\n", lane); + e_dev_info(" TX_MAIN_CURSOR: %d\n", ((rdata >> 6) & 0x3F)); + } + + /*LANEN_DIG_ASIC_TX_ASIC_IN_2[5 :0]: TX_PRE_CURSOR*/ + /*LANEN_DIG_ASIC_TX_ASIC_IN_2[11:6]: TX_POST_CURSOR*/ + rdata = 0; + addr = 0x100F | (lane << 8); + rdata = rd32_ephy(hw, addr); + if (KR_MODE) { + e_dev_info(" TX_PRE_CURSOR : %d\n", (rdata & 0x3F)); + e_dev_info(" TX_POST_CURSOR: %d\n", ((rdata >> 6) & 0x3F)); + e_dev_info("**********************************************\n"); + } + + if (mode == 1) { + pre = (rdata & 0x3F); + post = ((rdata >> 6) & 0x3F); + if ((160 - pre -post) < 88) + lmain = 88; + else + lmain = 160 - pre - post; + if (post_t != 0) + post = post_t; + txgbe_wr32_epcs(hw, 0x1803b, post); + txgbe_wr32_epcs(hw, 0x1803a, pre | (lmain << 8)); + txgbe_wr32_epcs(hw, 0x18037, txgbe_rd32_epcs(hw, 0x18037) & 0xff7f); + } + if (KR_MODE) + e_dev_info("**********************************************\n"); + + return status; +} + + +/*Enable Clause 72 KR training +** +**Note: +**<1>. The Clause 72 start-up protocol should be initiated when all pages are exchanged during Clause 73 auto- +**negotiation and when the auto-negotiation process is waiting for link status to be UP for 500 ms after +**exchanging all the pages. +** +**<2>. The local device and link partner should be enabled the CL72 KR training +**with in 500ms +** +**enable: +**- bits[1:0] =2'b11: Enable the CL72 KR training +**- bits[1:0] =2'b01: Disable the CL72 KR training +*/ +int EnableCl72KrTr(unsigned int enable, struct txgbe_adapter *adapter) +{ + int status = 0; + unsigned int wdata = 0; + struct txgbe_hw *hw = &adapter->hw; + + if (enable == 1) { + if (KR_MODE) + e_dev_info("\nDisable Clause 72 KR Training ...\n"); + status |= ReadPhyLaneTxEq(0, adapter, 0, 0); + } else if (enable == 4) { + status |= ReadPhyLaneTxEq(0, adapter, 20, 1); + } else if (enable == 8) { + status |= ReadPhyLaneTxEq(0, adapter, 16, 1); + } else if (enable == 12) { + status |= ReadPhyLaneTxEq(0, adapter, 24, 1); + } else if (enable == 5) { + status |= ReadPhyLaneTxEq(0, adapter, 0, 1); + } else if (enable == 3) { + if (KR_MODE) + e_dev_info("\nEnable Clause 72 KR Training ...\n"); + + if (CL72_KRTR_PRBS_MODE_EN != 0xffff) { + /*Set PRBS Timer Duration Control to maximum 6.7ms in VR_PMA_KRTR_PRBS_CTRL1 Register*/ + wdata = CL72_KRTR_PRBS_MODE_EN; + txgbe_wr32_epcs(hw, 0x18005, wdata); + /*Set PRBS Timer Duration Control to maximum 6.7ms in VR_PMA_KRTR_PRBS_CTRL1 Register*/ + wdata = 0xFFFF; + txgbe_wr32_epcs(hw, 0x18004, wdata); + + /*Enable PRBS Mode to determine KR Training Status by setting Bit 0 of VR_PMA_KRTR_PRBS_CTRL0 Register*/ + wdata = 0; + SetFields(&wdata, 0, 0, 1); + } + +#ifdef CL72_KRTR_PRBS31_EN + /*Enable PRBS31 as the KR Training Pattern by setting Bit 1 of VR_PMA_KRTR_PRBS_CTRL0 Register*/ + SetFields(&wdata, 1, 1, 1); +#endif /*#ifdef CL72_KRTR_PRBS31_EN*/ + txgbe_wr32_epcs(hw, 0x18003, wdata); + status |= ReadPhyLaneTxEq(0, adapter, 0, 0); + } else { + if (KR_MODE) + e_dev_info("\nInvalid setting for Clause 72 KR Training!!!\n"); + return -1; + } + + /*Enable the Clause 72 start-up protocol by setting Bit 1 of SR_PMA_KR_PMD_CTRL Register. + **Restart the Clause 72 start-up protocol by setting Bit 0 of SR_PMA_KR_PMD_CTRL Register*/ + wdata = enable; + txgbe_wr32_epcs(hw, 0x10096, wdata); + return status; +} + +int CheckCl72KrTrStatus(struct txgbe_adapter *adapter) +{ + int status = 0; + unsigned int addr, rdata, rdata1; + unsigned int timer = 0, times = 0; + struct txgbe_hw *hw = &adapter->hw; + + times = KR_POLLING ? 35 : 20; + + /*While loop to check clause 72 KR training status*/ + while (timer++ < times) { + //Get the latest received coefficient update or status + rdata = 0; + addr = 0x010098; + rdata = txgbe_rd32_epcs(hw, addr); + if (KR_MODE) + e_dev_info("SR PMA MMD 10GBASE-KR LP Coefficient Update Register: 0x%x\n", rdata); + + rdata = 0; + addr = 0x010099; + rdata = txgbe_rd32_epcs(hw, addr); + if (KR_MODE) + e_dev_info("SR PMA MMD 10GBASE-KR LP Coefficient Status Register: 0x%x\n", rdata); + + rdata = 0; + addr = 0x01009a; + rdata = txgbe_rd32_epcs(hw, addr); + if (KR_MODE) + e_dev_info("SR PMA MMD 10GBASE-KR LD Coefficient Update: 0x%x\n", rdata); + + rdata = 0; + addr = 0x01009b; + rdata = txgbe_rd32_epcs(hw, addr); + if (KR_MODE) + e_dev_info(" SR PMA MMD 10GBASE-KR LD Coefficient Status: 0x%x\n", rdata); + + rdata = 0; + addr = 0x010097; + rdata = txgbe_rd32_epcs(hw, addr); + if (KR_MODE) { + e_dev_info("SR PMA MMD 10GBASE-KR Status Register: 0x%x\n", rdata); + e_dev_info(" Training Failure (bit3): %d\n", ((rdata >> 3) & 0x01)); + e_dev_info(" Start-Up Protocol Status (bit2): %d\n", ((rdata >> 2) & 0x01)); + e_dev_info(" Frame Lock (bit1): %d\n", ((rdata >> 1) & 0x01)); + e_dev_info(" Receiver Status (bit0): %d\n", ((rdata >> 0) & 0x01)); + } + + rdata1 = txgbe_rd32_epcs(hw, 0x10099) & 0x8000; + if (rdata1 == 0x8000) { + adapter->flags2 |= KR; + if (KR_MODE) + e_dev_info("TEST Coefficient Status Register: 0x%x\n", rdata); + } + /*If bit3 is set, Training is completed with failure*/ + if ((rdata >> 3) & 0x01) { + if (KR_MODE) + e_dev_info("Training is completed with failure!!!\n"); + status |= ReadPhyLaneTxEq(0, adapter, 0, 0); + return status; + } + + /*If bit0 is set, Receiver trained and ready to receive data*/ + if ((rdata >> 0) & 0x01) { + if (KR_MODE) + e_dev_info("Receiver trained and ready to receive data ^_^\n"); + status |= ReadPhyLaneTxEq(0, adapter, 0, 0); + return status; + } + + msleep(20); + } + + if (KR_MODE) + e_dev_info("ERROR: Check Clause 72 KR Training Complete Timeout!!!\n"); + + return status; +} + +int Handle_bkp_an73_flow(unsigned char byLinkMode, struct txgbe_adapter *adapter) +{ + int status = 0; + unsigned int timer = 0; + unsigned int addr, data; + bkpan73ability tBkpAn73Ability, tLpBkpAn73Ability; + u32 i = 0; + u32 rdata = 0; + u32 rdata1 = 0; + struct txgbe_hw *hw = &adapter->hw; + tBkpAn73Ability.currentLinkMode = byLinkMode; + + if (KR_MODE) { + e_dev_info("HandleBkpAn73Flow() \n"); + e_dev_info("---------------------------------\n"); + } + + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x0); + txgbe_wr32_epcs(hw, 0x78003, 0x0); + + /*Check the FEC and KR Training for KR mode*/ + if (1) { + //FEC handling + if (KR_MODE) + e_dev_info("<3.3>. Check the FEC for KR mode ...\n"); + tBkpAn73Ability.fecAbility = 0x03; + tLpBkpAn73Ability.fecAbility = 0x0; + if ((tBkpAn73Ability.fecAbility & tLpBkpAn73Ability.fecAbility) == 0x03) { + if (KR_MODE) + e_dev_info("Enable the Backplane KR FEC ...\n"); + //Write 1 to SR_PMA_KR_FEC_CTRL bit0 to enable the FEC + data = 1; + addr = 0x100ab; //SR_PMA_KR_FEC_CTRL + txgbe_wr32_epcs(hw, addr, data); + } else { + if (KR_MODE) + e_dev_info("Backplane KR FEC is disabled.\n"); + } +#ifdef CL72_KR_TRAINING_ON + for (i = 0; i < 2; i++) { + if (KR_MODE) { + e_dev_info("\n<3.4>. Check the CL72 KR Training for KR mode ...\n"); + printk("===================%d=======================\n", i); + } + + status |= EnableCl72KrTr(3, adapter); + + if (KR_MODE) + e_dev_info("\nCheck the Clause 72 KR Training status ...\n"); + status |= CheckCl72KrTrStatus(adapter); + + rdata = txgbe_rd32_epcs(hw, 0x10099) & 0x8000; + if (KR_MODE) + e_dev_info("SR PMA MMD 10GBASE-KR LP Coefficient Status Register: 0x%x\n", rdata); + rdata1 = txgbe_rd32_epcs(hw, 0x1009b) & 0x8000; + if (KR_MODE) + e_dev_info("SR PMA MMD 10GBASE-KR LP Coefficient Status Register: 0x%x\n", rdata1); + if (KR_POLLING == 0) { + if (adapter->flags2 & KR) { + rdata = 0x8000; + adapter->flags2 &= ~KR; + } + } + if ((rdata == 0x8000) & (rdata1 == 0x8000)) { + if (KR_MODE) + e_dev_info("====================out===========================\n"); + status |= EnableCl72KrTr(1, adapter); + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x0000); + ClearBkpAn73Interrupt(2, 0, adapter); + ClearBkpAn73Interrupt(1, 0, adapter); + ClearBkpAn73Interrupt(0, 0, adapter); + while (timer++ < 10) { + rdata = txgbe_rd32_epcs(hw, 0x30020); + rdata = rdata & 0x1000; + if (rdata == 0x1000) { + if (KR_MODE) + e_dev_info("\nINT_AN_INT_CMPLT =1, AN73 Done Success.\n"); + e_dev_info("AN73 Done Success.\n"); + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x0000); + return 0; + } + msleep(10); + } + msleep(1000); + txgbe_set_link_to_kr(hw, 1); + + return 0; + } + + status |= EnableCl72KrTr(1, adapter); + } +#endif + } + ClearBkpAn73Interrupt(0, 0, adapter); + ClearBkpAn73Interrupt(1, 0, adapter); + ClearBkpAn73Interrupt(2, 0, adapter); + + return status; +} diff --git a/drivers/net/ethernet/netswift/txgbe/txgbe_bp.h b/drivers/net/ethernet/netswift/txgbe/txgbe_bp.h new file mode 100644 index 0000000000000000000000000000000000000000..c5f0dc507216442d3bcd18947c0a7890263ebb0c --- /dev/null +++ b/drivers/net/ethernet/netswift/txgbe/txgbe_bp.h @@ -0,0 +1,41 @@ +/* + * WangXun 10 Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + */ + + +#ifndef _TXGBE_BP_H_ +#define _TXGBE_BP_H_ + +#include "txgbe.h" +#include "txgbe_hw.h" + +#define CL72_KR_TRAINING_ON + +/* Backplane AN73 Base Page Ability struct*/ +typedef struct TBKPAN73ABILITY { + unsigned int nextPage; //Next Page (bit0) + unsigned int linkAbility; //Link Ability (bit[7:0]) + unsigned int fecAbility; //FEC Request (bit1), FEC Enable (bit0) + unsigned int currentLinkMode; //current link mode for local device +} bkpan73ability; + +int txgbe_kr_intr_handle(struct txgbe_adapter *adapter); +void txgbe_bp_down_event(struct txgbe_adapter *adapter); +void txgbe_bp_watchdog_event(struct txgbe_adapter *adapter); +int txgbe_bp_mode_setting(struct txgbe_adapter *adapter); +void txgbe_bp_close_protect(struct txgbe_adapter *adapter); + +#endif diff --git a/drivers/net/ethernet/netswift/txgbe/txgbe_dcb.h b/drivers/net/ethernet/netswift/txgbe/txgbe_dcb.h new file mode 100644 index 0000000000000000000000000000000000000000..495460e1db8c7a8d0d430f36c6934befbc0590cd --- /dev/null +++ b/drivers/net/ethernet/netswift/txgbe/txgbe_dcb.h @@ -0,0 +1,30 @@ +/* + * WangXun 10 Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * based on ixgbe_dcb.h, Copyright(c) 1999 - 2017 Intel Corporation. + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + + +#ifndef _TXGBE_DCB_H_ +#define _TXGBE_DCB_H_ + +#include "txgbe_type.h" + +#endif /* _TXGBE_DCB_H_ */ diff --git a/drivers/net/ethernet/netswift/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/netswift/txgbe/txgbe_ethtool.c new file mode 100644 index 0000000000000000000000000000000000000000..5cb8ef61e04b33ff303fe2cbba30c1b95ba0c9ec --- /dev/null +++ b/drivers/net/ethernet/netswift/txgbe/txgbe_ethtool.c @@ -0,0 +1,3381 @@ +/* + * WangXun 10 Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * based on ixgbe_ethtool.c, Copyright(c) 1999 - 2017 Intel Corporation. + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +/* ethtool support for txgbe */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "txgbe.h" +#include "txgbe_hw.h" +#include "txgbe_phy.h" + +#define TXGBE_ALL_RAR_ENTRIES 16 + +struct txgbe_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define TXGBE_NETDEV_STAT(_net_stat) { \ + .stat_string = #_net_stat, \ + .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \ + .stat_offset = offsetof(struct net_device_stats, _net_stat) \ +} +static const struct txgbe_stats txgbe_gstrings_net_stats[] = { + TXGBE_NETDEV_STAT(rx_packets), + TXGBE_NETDEV_STAT(tx_packets), + TXGBE_NETDEV_STAT(rx_bytes), + TXGBE_NETDEV_STAT(tx_bytes), + TXGBE_NETDEV_STAT(rx_errors), + TXGBE_NETDEV_STAT(tx_errors), + TXGBE_NETDEV_STAT(rx_dropped), + TXGBE_NETDEV_STAT(tx_dropped), + TXGBE_NETDEV_STAT(multicast), + TXGBE_NETDEV_STAT(collisions), + TXGBE_NETDEV_STAT(rx_over_errors), + TXGBE_NETDEV_STAT(rx_crc_errors), + TXGBE_NETDEV_STAT(rx_frame_errors), + TXGBE_NETDEV_STAT(rx_fifo_errors), + TXGBE_NETDEV_STAT(rx_missed_errors), + TXGBE_NETDEV_STAT(tx_aborted_errors), + TXGBE_NETDEV_STAT(tx_carrier_errors), + TXGBE_NETDEV_STAT(tx_fifo_errors), + TXGBE_NETDEV_STAT(tx_heartbeat_errors), +}; + +#define TXGBE_STAT(_name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = FIELD_SIZEOF(struct txgbe_adapter, _stat), \ + .stat_offset = offsetof(struct txgbe_adapter, _stat) \ +} +static struct txgbe_stats txgbe_gstrings_stats[] = { + TXGBE_STAT("rx_pkts_nic", stats.gprc), + TXGBE_STAT("tx_pkts_nic", stats.gptc), + TXGBE_STAT("rx_bytes_nic", stats.gorc), + TXGBE_STAT("tx_bytes_nic", stats.gotc), + TXGBE_STAT("lsc_int", lsc_int), + TXGBE_STAT("tx_busy", tx_busy), + TXGBE_STAT("non_eop_descs", non_eop_descs), + TXGBE_STAT("broadcast", stats.bprc), + TXGBE_STAT("rx_no_buffer_count", stats.rnbc[0]), + TXGBE_STAT("tx_timeout_count", tx_timeout_count), + TXGBE_STAT("tx_restart_queue", restart_queue), + TXGBE_STAT("rx_long_length_count", stats.roc), + TXGBE_STAT("rx_short_length_count", stats.ruc), + TXGBE_STAT("tx_flow_control_xon", stats.lxontxc), + TXGBE_STAT("rx_flow_control_xon", stats.lxonrxc), + TXGBE_STAT("tx_flow_control_xoff", stats.lxofftxc), + TXGBE_STAT("rx_flow_control_xoff", stats.lxoffrxc), + TXGBE_STAT("rx_csum_offload_good_count", hw_csum_rx_good), + TXGBE_STAT("rx_csum_offload_errors", hw_csum_rx_error), + TXGBE_STAT("alloc_rx_page_failed", alloc_rx_page_failed), + TXGBE_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), + TXGBE_STAT("rx_no_dma_resources", hw_rx_no_dma_resources), + TXGBE_STAT("hw_rsc_aggregated", rsc_total_count), + TXGBE_STAT("hw_rsc_flushed", rsc_total_flush), + TXGBE_STAT("fdir_match", stats.fdirmatch), + TXGBE_STAT("fdir_miss", stats.fdirmiss), + TXGBE_STAT("fdir_overflow", fdir_overflow), + TXGBE_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), + TXGBE_STAT("os2bmc_tx_by_bmc", stats.b2ospc), + TXGBE_STAT("os2bmc_tx_by_host", stats.o2bspc), + TXGBE_STAT("os2bmc_rx_by_host", stats.b2ogprc), + TXGBE_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), + TXGBE_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), +}; + +/* txgbe allocates num_tx_queues and num_rx_queues symmetrically so + * we set the num_rx_queues to evaluate to num_tx_queues. This is + * used because we do not have a good way to get the max number of + * rx queues with CONFIG_RPS disabled. + */ +#define TXGBE_NUM_RX_QUEUES netdev->num_tx_queues +#define TXGBE_NUM_TX_QUEUES netdev->num_tx_queues + +#define TXGBE_QUEUE_STATS_LEN ( \ + (TXGBE_NUM_TX_QUEUES + TXGBE_NUM_RX_QUEUES) * \ + (sizeof(struct txgbe_queue_stats) / sizeof(u64))) +#define TXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(txgbe_gstrings_stats) +#define TXGBE_NETDEV_STATS_LEN ARRAY_SIZE(txgbe_gstrings_net_stats) +#define TXGBE_PB_STATS_LEN ( \ + (sizeof(((struct txgbe_adapter *)0)->stats.pxonrxc) + \ + sizeof(((struct txgbe_adapter *)0)->stats.pxontxc) + \ + sizeof(((struct txgbe_adapter *)0)->stats.pxoffrxc) + \ + sizeof(((struct txgbe_adapter *)0)->stats.pxofftxc)) \ + / sizeof(u64)) +#define TXGBE_VF_STATS_LEN \ + ((((struct txgbe_adapter *)netdev_priv(netdev))->num_vfs) * \ + (sizeof(struct vf_stats) / sizeof(u64))) +#define TXGBE_STATS_LEN (TXGBE_GLOBAL_STATS_LEN + \ + TXGBE_NETDEV_STATS_LEN + \ + TXGBE_PB_STATS_LEN + \ + TXGBE_QUEUE_STATS_LEN + \ + TXGBE_VF_STATS_LEN) + +static const char txgbe_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; +#define TXGBE_TEST_LEN (sizeof(txgbe_gstrings_test) / ETH_GSTRING_LEN) + +/* currently supported speeds for 10G */ +#define ADVERTISED_MASK_10G (SUPPORTED_10000baseT_Full | \ + SUPPORTED_10000baseKX4_Full | \ + SUPPORTED_10000baseKR_Full) + +#define txgbe_isbackplane(type) \ + ((type == txgbe_media_type_backplane) ? true : false) + +static __u32 txgbe_backplane_type(struct txgbe_hw *hw) +{ + __u32 mode = 0x00; + switch (hw->phy.link_mode) { + case TXGBE_PHYSICAL_LAYER_10GBASE_KX4: + mode = SUPPORTED_10000baseKX4_Full; + break; + case TXGBE_PHYSICAL_LAYER_10GBASE_KR: + mode = SUPPORTED_10000baseKR_Full; + break; + case TXGBE_PHYSICAL_LAYER_1000BASE_KX: + mode = SUPPORTED_1000baseKX_Full; + break; + default: + mode = (SUPPORTED_10000baseKX4_Full | + SUPPORTED_10000baseKR_Full | + SUPPORTED_1000baseKX_Full); + break; + } + return mode; +} + +int txgbe_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u32 supported_link; + u32 link_speed = 0; + bool autoneg = false; + u32 supported, advertising; + bool link_up; + + ethtool_convert_link_mode_to_legacy_u32(&supported, + cmd->link_modes.supported); + + TCALL(hw, mac.ops.get_link_capabilities, &supported_link, &autoneg); + + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_KR_KX_KX4) + autoneg = adapter->backplane_an ? 1:0; + else if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_SGMII) + autoneg = adapter->an37?1:0; + + /* set the supported link speeds */ + if (supported_link & TXGBE_LINK_SPEED_10GB_FULL) + supported |= (txgbe_isbackplane(hw->phy.media_type)) ? + txgbe_backplane_type(hw) : SUPPORTED_10000baseT_Full; + if (supported_link & TXGBE_LINK_SPEED_1GB_FULL) + supported |= (txgbe_isbackplane(hw->phy.media_type)) ? + SUPPORTED_1000baseKX_Full : SUPPORTED_1000baseT_Full; + if (supported_link & TXGBE_LINK_SPEED_100_FULL) + supported |= SUPPORTED_100baseT_Full; + if (supported_link & TXGBE_LINK_SPEED_10_FULL) + supported |= SUPPORTED_10baseT_Full; + + /* default advertised speed if phy.autoneg_advertised isn't set */ + advertising = supported; + + /* set the advertised speeds */ + if (hw->phy.autoneg_advertised) { + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_100_FULL) + advertising |= ADVERTISED_100baseT_Full; + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_10GB_FULL) + advertising |= (supported & ADVERTISED_MASK_10G); + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_1GB_FULL) { + if (supported & SUPPORTED_1000baseKX_Full) + advertising |= ADVERTISED_1000baseKX_Full; + else + advertising |= ADVERTISED_1000baseT_Full; + } + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_10_FULL) + advertising |= ADVERTISED_10baseT_Full; + } else { + /* default modes in case phy.autoneg_advertised isn't set */ + if (supported_link & TXGBE_LINK_SPEED_10GB_FULL) + advertising |= ADVERTISED_10000baseT_Full; + if (supported_link & TXGBE_LINK_SPEED_1GB_FULL) + advertising |= ADVERTISED_1000baseT_Full; + if (supported_link & TXGBE_LINK_SPEED_100_FULL) + advertising |= ADVERTISED_100baseT_Full; + if (hw->phy.multispeed_fiber && !autoneg) { + if (supported_link & TXGBE_LINK_SPEED_10GB_FULL) + advertising = ADVERTISED_10000baseT_Full; + } + if (supported_link & TXGBE_LINK_SPEED_10_FULL) + advertising |= ADVERTISED_10baseT_Full; + } + + if (autoneg) { + supported |= SUPPORTED_Autoneg; + advertising |= ADVERTISED_Autoneg; + cmd->base.autoneg = AUTONEG_ENABLE; + } else + cmd->base.autoneg = AUTONEG_DISABLE; + + /* Determine the remaining settings based on the PHY type. */ + switch (adapter->hw.phy.type) { + case txgbe_phy_tn: + case txgbe_phy_aq: + case txgbe_phy_cu_unknown: + supported |= SUPPORTED_TP; + advertising |= ADVERTISED_TP; + cmd->base.port = PORT_TP; + break; + case txgbe_phy_qt: + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + cmd->base.port = PORT_FIBRE; + break; + case txgbe_phy_nl: + case txgbe_phy_sfp_passive_tyco: + case txgbe_phy_sfp_passive_unknown: + case txgbe_phy_sfp_ftl: + case txgbe_phy_sfp_avago: + case txgbe_phy_sfp_intel: + case txgbe_phy_sfp_unknown: + switch (adapter->hw.phy.sfp_type) { + /* SFP+ devices, further checking needed */ + case txgbe_sfp_type_da_cu: + case txgbe_sfp_type_da_cu_core0: + case txgbe_sfp_type_da_cu_core1: + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + cmd->base.port = PORT_DA; + break; + case txgbe_sfp_type_sr: + case txgbe_sfp_type_lr: + case txgbe_sfp_type_srlr_core0: + case txgbe_sfp_type_srlr_core1: + case txgbe_sfp_type_1g_sx_core0: + case txgbe_sfp_type_1g_sx_core1: + case txgbe_sfp_type_1g_lx_core0: + case txgbe_sfp_type_1g_lx_core1: + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + cmd->base.port = PORT_FIBRE; + break; + case txgbe_sfp_type_not_present: + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + cmd->base.port = PORT_NONE; + break; + case txgbe_sfp_type_1g_cu_core0: + case txgbe_sfp_type_1g_cu_core1: + supported |= SUPPORTED_TP; + advertising |= ADVERTISED_TP; + cmd->base.port = PORT_TP; + break; + case txgbe_sfp_type_unknown: + default: + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + cmd->base.port = PORT_OTHER; + break; + } + break; + case txgbe_phy_xaui: + supported |= SUPPORTED_TP; + advertising |= ADVERTISED_TP; + cmd->base.port = PORT_TP; + break; + case txgbe_phy_unknown: + case txgbe_phy_generic: + case txgbe_phy_sfp_unsupported: + default: + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + cmd->base.port = PORT_OTHER; + break; + } + + if (!in_interrupt()) { + TCALL(hw, mac.ops.check_link, &link_speed, &link_up, false); + } else { + /* + * this case is a special workaround for RHEL5 bonding + * that calls this routine from interrupt context + */ + link_speed = adapter->link_speed; + link_up = adapter->link_up; + } + + supported |= SUPPORTED_Pause; + + switch (hw->fc.requested_mode) { + case txgbe_fc_full: + advertising |= ADVERTISED_Pause; + break; + case txgbe_fc_rx_pause: + advertising |= ADVERTISED_Pause | + ADVERTISED_Asym_Pause; + break; + case txgbe_fc_tx_pause: + advertising |= ADVERTISED_Asym_Pause; + break; + default: + advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + } + + if (link_up) { + switch (link_speed) { + case TXGBE_LINK_SPEED_10GB_FULL: + cmd->base.speed = SPEED_10000; + break; + case TXGBE_LINK_SPEED_1GB_FULL: + cmd->base.speed = SPEED_1000; + break; + case TXGBE_LINK_SPEED_100_FULL: + cmd->base.speed = SPEED_100; + break; + case TXGBE_LINK_SPEED_10_FULL: + cmd->base.speed = SPEED_10; + break; + default: + break; + } + cmd->base.duplex = DUPLEX_FULL; + } else { + cmd->base.speed = -1; + cmd->base.duplex = -1; + } + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + return 0; +} + +static int txgbe_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u32 advertised, old; + s32 err = 0; + u32 supported, advertising; + ethtool_convert_link_mode_to_legacy_u32(&supported, + cmd->link_modes.supported); + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_KR_KX_KX4) { + adapter->backplane_an = cmd->base.autoneg ? 1 : 0; + } else if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_SGMII) { + adapter->an37 = cmd->base.autoneg ? 1 : 0; + } + + if ((hw->phy.media_type == txgbe_media_type_copper) || + (hw->phy.multispeed_fiber)) { + /* + * this function does not support duplex forcing, but can + * limit the advertising of the adapter to the specified speed + */ + if (advertising & ~supported) + return -EINVAL; + + /* only allow one speed at a time if no autoneg */ + if (!cmd->base.autoneg && hw->phy.multispeed_fiber) { + if (advertising == + (ADVERTISED_10000baseT_Full | + ADVERTISED_1000baseT_Full)) + return -EINVAL; + } + old = hw->phy.autoneg_advertised; + advertised = 0; + if (advertising & ADVERTISED_10000baseT_Full) + advertised |= TXGBE_LINK_SPEED_10GB_FULL; + + if (advertising & ADVERTISED_1000baseT_Full) + advertised |= TXGBE_LINK_SPEED_1GB_FULL; + + if (advertising & ADVERTISED_100baseT_Full) + advertised |= TXGBE_LINK_SPEED_100_FULL; + + if (advertising & ADVERTISED_10baseT_Full) + advertised |= TXGBE_LINK_SPEED_10_FULL; + + if (old == advertised) + return err; + /* this sets the link speed and restarts auto-neg */ + while (test_and_set_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) + usleep_range(1000, 2000); + + hw->mac.autotry_restart = true; + err = TCALL(hw, mac.ops.setup_link, advertised, true); + if (err) { + e_info(probe, "setup link failed with code %d\n", err); + TCALL(hw, mac.ops.setup_link, old, true); + } + if ((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) + TCALL(hw, mac.ops.flap_tx_laser); + clear_bit(__TXGBE_IN_SFP_INIT, &adapter->state); + } else if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_KR_KX_KX4 || + (hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_SGMII) { + if (!cmd->base.autoneg) { + if (advertising == + (ADVERTISED_10000baseKR_Full | + ADVERTISED_1000baseKX_Full | + ADVERTISED_10000baseKX4_Full)) + return -EINVAL; + } else { + err = txgbe_set_link_to_kr(hw, 1); + return err; + } + advertised = 0; + if (advertising & ADVERTISED_10000baseKR_Full) { + err = txgbe_set_link_to_kr(hw, 1); + advertised |= TXGBE_LINK_SPEED_10GB_FULL; + return err; + } else if (advertising & ADVERTISED_10000baseKX4_Full) { + err = txgbe_set_link_to_kx4(hw, 1); + advertised |= TXGBE_LINK_SPEED_10GB_FULL; + return err; + } else if (advertising & ADVERTISED_1000baseKX_Full) { + advertised |= TXGBE_LINK_SPEED_1GB_FULL; + err = txgbe_set_link_to_kx(hw, TXGBE_LINK_SPEED_1GB_FULL, 0); + return err; + } + return err; + } else { + /* in this case we currently only support 10Gb/FULL */ + u32 speed = cmd->base.speed; + if ((cmd->base.autoneg == AUTONEG_ENABLE) || + (advertising != ADVERTISED_10000baseT_Full) || + (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL)) + return -EINVAL; + } + + return err; +} + +static void txgbe_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + + if (txgbe_device_supports_autoneg_fc(hw) && + !hw->fc.disable_fc_autoneg) + pause->autoneg = 1; + else + pause->autoneg = 0; + + if (hw->fc.current_mode == txgbe_fc_rx_pause) { + pause->rx_pause = 1; + } else if (hw->fc.current_mode == txgbe_fc_tx_pause) { + pause->tx_pause = 1; + } else if (hw->fc.current_mode == txgbe_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int txgbe_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_fc_info fc = hw->fc; + + /* some devices do not support autoneg of flow control */ + if ((pause->autoneg == AUTONEG_ENABLE) && + !txgbe_device_supports_autoneg_fc(hw)) + return -EINVAL; + + fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE); + + if ((pause->rx_pause && pause->tx_pause) || pause->autoneg) + fc.requested_mode = txgbe_fc_full; + else if (pause->rx_pause) + fc.requested_mode = txgbe_fc_rx_pause; + else if (pause->tx_pause) + fc.requested_mode = txgbe_fc_tx_pause; + else + fc.requested_mode = txgbe_fc_none; + + /* if the thing changed then we'll update and use new autoneg */ + if (memcmp(&fc, &hw->fc, sizeof(struct txgbe_fc_info))) { + hw->fc = fc; + if (netif_running(netdev)) + txgbe_reinit_locked(adapter); + else + txgbe_reset(adapter); + } + + return 0; +} + +static u32 txgbe_get_msglevel(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void txgbe_set_msglevel(struct net_device *netdev, u32 data) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +#define TXGBE_REGS_LEN 4096 +static int txgbe_get_regs_len(struct net_device __always_unused *netdev) +{ + return TXGBE_REGS_LEN * sizeof(u32); +} + +#define TXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_) + +static void txgbe_get_regs(struct net_device *netdev, struct ethtool_regs *regs, + void *p) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u32 i; + u32 id = 0; + + memset(p, 0, TXGBE_REGS_LEN * sizeof(u32)); + regs_buff[TXGBE_REGS_LEN - 1] = 0x55555555; + + regs->version = hw->revision_id << 16 | + hw->device_id; + + /* Global Registers */ + /* chip control */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_MIS_PWR);//0 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_MIS_CTL);//1 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_MIS_PF_SM);//2 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_MIS_RST);//3 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_MIS_ST);//4 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_MIS_SWSM);//5 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_MIS_RST_ST);//6 + /* pvt sensor */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TS_CTL);//7 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TS_EN);//8 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TS_ST);//9 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TS_ALARM_THRE);//10 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TS_DALARM_THRE);//11 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TS_INT_EN);//12 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TS_ALARM_ST);//13 + /* Fmgr Register */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_SPI_CMD);//14 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_SPI_DATA);//15 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_SPI_STATUS);//16 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_SPI_USR_CMD);//17 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_SPI_CMDCFG0);//18 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_SPI_CMDCFG1);//19 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_SPI_ILDR_STATUS);//20 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_SPI_ILDR_SWPTR);//21 + + /* Port Registers */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_PORT_CTL);//22 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_PORT_ST);//23 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_EX_VTYPE);//24 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_VXLAN);//25 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_VXLAN_GPE);//26 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_GENEVE);//27 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_TEREDO);//28 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_TCP_TIME);//29 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_LED_CTL);//30 + /* GPIO */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_GPIO_DR);//31 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_GPIO_DDR);//32 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_GPIO_CTL);//33 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_GPIO_INTEN);//34 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_GPIO_INTMASK);//35 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_GPIO_INTSTATUS);//36 + /* I2C */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CON);//37 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_TAR);//38 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_DATA_CMD);//39 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_SS_SCL_HCNT);//40 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_SS_SCL_LCNT);//41 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_FS_SCL_HCNT);//42 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_FS_SCL_LCNT);//43 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_HS_SCL_HCNT);//44 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_INTR_STAT);//45 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_INTR_MASK);//46 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_RAW_INTR_STAT);//47 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_RX_TL);//48 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_TX_TL);//49 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CLR_INTR);//50 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CLR_RX_UNDER);//51 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CLR_RX_OVER);//52 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CLR_TX_OVER);//53 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CLR_RD_REQ);//54 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CLR_TX_ABRT);//55 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CLR_RX_DONE);//56 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CLR_ACTIVITY);//57 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CLR_STOP_DET);//58 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CLR_START_DET);//59 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CLR_GEN_CALL);//60 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_ENABLE);//61 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_STATUS);//62 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_TXFLR);//63 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_RXFLR);//64 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_SDA_HOLD);//65 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_TX_ABRT_SOURCE);//66 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_SDA_SETUP);//67 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_ENABLE_STATUS);//68 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_FS_SPKLEN);//69 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_HS_SPKLEN);//70 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_SCL_STUCK_TIMEOUT);//71 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_SDA_STUCK_TIMEOUT);//72 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_CLR_SCL_STUCK_DET);//73 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_DEVICE_ID);//74 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_COMP_PARAM_1);//75 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_COMP_VERSION);//76 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_I2C_COMP_TYPE);//77 + /* TX TPH */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_TPH_TDESC);//78 + /* RX TPH */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_TPH_RDESC);//79 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_TPH_RHDR);//80 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_CFG_TPH_RPL);//81 + + /* TDMA */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_CTL);//82 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_VF_TE(0));//83 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_VF_TE(1));//84 + for (i = 0; i < 8; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_PB_THRE(i));//85-92 + } + for (i = 0; i < 4; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_LLQ(i));//93-96 + } + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_ETYPE_LB_L);//97 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_ETYPE_LB_H);//98 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_ETYPE_AS_L);//99 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_ETYPE_AS_H);//100 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_MAC_AS_L);//101 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_MAC_AS_H);//102 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_VLAN_AS_L);//103 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_VLAN_AS_H);//104 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_TCP_FLG_L);//105 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_TCP_FLG_H);//106 + for (i = 0; i < 64; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_VLAN_INS(i));//107-234 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_ETAG_INS(i)); + } + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_PBWARB_CTL);//235 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_MMW);//236 + for (i = 0; i < 8; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_PBWARB_CFG(i));//237-244 + } + for (i = 0; i < 128; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_VM_CREDIT(i));//245-372 + } + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_FC_EOF);//373 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDM_FC_SOF);//374 + + /* RDMA */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDM_ARB_CTL);//375 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDM_VF_RE(0));//376 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDM_VF_RE(1));//377 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDM_RSC_CTL);//378 + for (i = 0; i < 8; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDM_ARB_CFG(i));//379-386 + } + for (i = 0; i < 4; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDM_PF_QDE(i));//387-394 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDM_PF_HIDE(i)); + } + + /* RDB */ + /*flow control */ + for (i = 0; i < 4; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_RFCV(i));//395-398 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_RFCL(i));//399-414 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_RFCH(i)); + } + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_RFCRT);//415 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_RFCC);//416 + /* receive packet buffer */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_PB_CTL);//417 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_PB_WRAP);//418 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_UP2TC);//419 + for (i = 0; i < 8; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_PB_SZ(i));//420-435 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_MPCNT(i)); + } + /* lli interrupt */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_LLI_THRE);//436 + /* ring assignment */ + for (i = 0; i < 64; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_PL_CFG(i));//437-500 + } + for (i = 0; i < 32; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_RSSTBL(i));//501-532 + } + for (i = 0; i < 10; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_RSSRK(i));//533-542 + } + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_RSS_TC);//543 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_RA_CTL);//544 + for (i = 0; i < 128; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_5T_SA(i));//545-1184 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_5T_DA(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_5T_SDP(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_5T_CTL0(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_5T_CTL1(i)); + } + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_SYN_CLS);//1185 + for (i = 0; i < 8; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_ETYPE_CLS(i));//1186-1193 + } + /* fcoe redirection table */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FCRE_CTL);//1194 + for (i = 0; i < 8; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FCRE_TBL(i));//1195-1202 + } + /*flow director */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_CTL);//1203 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_HKEY);//1204 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_SKEY);//1205 + for (i = 0; i < 16; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_FLEX_CFG(i));//1206-1221 + } + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_FREE);//1222 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_LEN);//1223 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_USE_ST);//1224 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_FAIL_ST);//1225 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_MATCH);//1226 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_MISS);//1227 + for (i = 0; i < 3; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_IP6(i));//1228-1230 + } + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_SA);//1231 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_DA);//1232 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_PORT);//1233 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_FLEX);//1234 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_HASH);//1235 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_CMD);//1236 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_DA4_MSK);//1237 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_SA4_MSK);//1238 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_TCP_MSK);//1239 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_UDP_MSK);//1240 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_SCTP_MSK);//1241 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_IP6_MSK);//1242 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RDB_FDIR_OTHER_MSK);//1243 + + /* PSR */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_CTL);//1244 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_VLAN_CTL);//1245 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_VM_CTL);//1246 + for (i = 0; i < 64; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_VM_L2CTL(i));//1247-1310 + } + for (i = 0; i < 8; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_ETYPE_SWC(i));//1311-1318 + } + for (i = 0; i < 128; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_MC_TBL(i));//1319-1702 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_UC_TBL(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_VLAN_TBL(i)); + } + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_MAC_SWC_AD_L);//1703 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_MAC_SWC_AD_H);//1704 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_MAC_SWC_VM_L);//1705 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_MAC_SWC_VM_H);//1706 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_MAC_SWC_IDX);//1707 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_VLAN_SWC);//1708 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_VLAN_SWC_VM_L);//1709 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_VLAN_SWC_VM_H);//1710 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_VLAN_SWC_IDX);//1711 + for (i = 0; i < 4; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_MR_CTL(i));//1712-1731 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_MR_VLAN_L(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_MR_VLAN_H(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_MR_VM_L(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_MR_VM_H(i)); + } + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_1588_CTL);//1732 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_1588_STMPL);//1733 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_1588_STMPH);//1734 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_1588_ATTRL);//1735 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_1588_ATTRH);//1736 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_1588_MSGTYPE);//1737 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_WKUP_CTL);//1738 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_WKUP_IPV);//1739 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_LAN_FLEX_CTL);//1740 + for (i = 0; i < 4; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_WKUP_IP4TBL(i));//1741-1748 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_WKUP_IP6TBL(i)); + } + for (i = 0; i < 16; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_LAN_FLEX_DW_L(i));//1749-1796 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_LAN_FLEX_DW_H(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_LAN_FLEX_MSK(i)); + } + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PSR_LAN_FLEX_CTL);//1797 + + /* TDB */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDB_RFCS);//1798 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDB_PB_SZ(0));//1799 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDB_UP2TC);//1800 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDB_PBRARB_CTL);//1801 + for (i = 0; i < 8; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDB_PBRARB_CFG(i));//1802-1809 + } + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TDB_MNG_TC);//1810 + + /* tsec */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_CTL);//1811 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_ST);//1812 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_BUF_AF);//1813 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_BUF_AE);//1814 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_MIN_IFG);//1815 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_1588_CTL);//1816 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_1588_STMPL);//1817 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_1588_STMPH);//1818 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_1588_SYSTIML);//1819 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_1588_SYSTIMH);//1820 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_1588_INC);//1821 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_1588_ADJL);//1822 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_TSC_1588_ADJH);//1823 + + /* RSEC */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RSC_CTL);//1824 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_RSC_ST);//1825 + + /* BAR register */ + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_MISC_IC);//1826 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_MISC_ICS);//1827 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_MISC_IEN);//1828 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_GPIE);//1829 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_IC(0));//1830 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_IC(1));//1831 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_ICS(0));//1832 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_ICS(1));//1833 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_IMS(0));//1834 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_IMS(1));//1835 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_IMC(0));//1836 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_IMC(1));//1837 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_ISB_ADDR_L);//1838 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_ISB_ADDR_H);//1839 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_ITRSEL);//1840 + for (i = 0; i < 64; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_ITR(i));//1841-1968 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_IVAR(i)); + } + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_MISC_IVAR);//1969 + for (i = 0; i < 128; i++) { + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_RR_BAL(i));//1970-3249 + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_RR_BAH(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_RR_WP(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_RR_RP(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_RR_CFG(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_TR_BAL(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_TR_BAH(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_TR_WP(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_TR_RP(i)); + regs_buff[id++] = TXGBE_R32_Q(hw, TXGBE_PX_TR_CFG(i)); + } +} + +static int txgbe_get_eeprom_len(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + return adapter->hw.eeprom.word_size * 2; +} + +static int txgbe_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word, last_word, eeprom_len; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_len = last_word - first_word + 1; + + eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ret_val = TCALL(hw, eeprom.ops.read_buffer, first_word, eeprom_len, + eeprom_buff); + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < eeprom_len; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int txgbe_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len, first_word, last_word, ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EINVAL; + + max_len = hw->eeprom.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = eeprom_buff; + + if (eeprom->offset & 1) { + /* + * need read/modify/write of first changed EEPROM word + * only the second byte of the word is being modified + */ + ret_val = TCALL(hw, eeprom.ops.read, first_word, + &eeprom_buff[0]); + if (ret_val) + goto err; + + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { + /* + * need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + ret_val = TCALL(hw, eeprom.ops.read, last_word, + &eeprom_buff[last_word - first_word]); + if (ret_val) + goto err; + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + cpu_to_le16s(&eeprom_buff[i]); + + ret_val = TCALL(hw, eeprom.ops.write_buffer, first_word, + last_word - first_word + 1, + eeprom_buff); + + /* Update the checksum */ + if (ret_val == 0) + TCALL(hw, eeprom.ops.update_checksum); + +err: + kfree(eeprom_buff); + return ret_val; +} + +static void txgbe_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + strncpy(drvinfo->driver, txgbe_driver_name, + sizeof(drvinfo->driver) - 1); + strncpy(drvinfo->version, txgbe_driver_version, + sizeof(drvinfo->version) - 1); + strncpy(drvinfo->fw_version, adapter->eeprom_id, + sizeof(drvinfo->fw_version)); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info) - 1); + if (adapter->num_tx_queues <= TXGBE_NUM_RX_QUEUES) { + drvinfo->n_stats = TXGBE_STATS_LEN - + (TXGBE_NUM_RX_QUEUES - adapter->num_tx_queues)* + (sizeof(struct txgbe_queue_stats) / sizeof(u64))*2; + } else { + drvinfo->n_stats = TXGBE_STATS_LEN; + } + drvinfo->testinfo_len = TXGBE_TEST_LEN; + drvinfo->regdump_len = txgbe_get_regs_len(netdev); +} + +static void txgbe_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = TXGBE_MAX_RXD; + ring->tx_max_pending = TXGBE_MAX_TXD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +static int txgbe_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_ring *temp_ring; + int i, err = 0; + u32 new_rx_count, new_tx_count; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + new_tx_count = clamp_t(u32, ring->tx_pending, + TXGBE_MIN_TXD, TXGBE_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, TXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); + + new_rx_count = clamp_t(u32, ring->rx_pending, + TXGBE_MIN_RXD, TXGBE_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, TXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); + + if ((new_tx_count == adapter->tx_ring_count) && + (new_rx_count == adapter->rx_ring_count)) { + /* nothing to do */ + return 0; + } + + while (test_and_set_bit(__TXGBE_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto clear_reset; + } + + /* allocate temporary buffer to store rings in */ + i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues); + temp_ring = vmalloc(i * sizeof(struct txgbe_ring)); + + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } + + txgbe_down(adapter); + + /* + * Setup new Tx resources and free the old Tx resources in that order. + * We can then assign the new resources to the rings via a memcpy. + * The advantage to this approach is that we are guaranteed to still + * have resources even in the case of an allocation failure. + */ + if (new_tx_count != adapter->tx_ring_count) { + for (i = 0; i < adapter->num_tx_queues; i++) { + memcpy(&temp_ring[i], adapter->tx_ring[i], + sizeof(struct txgbe_ring)); + + temp_ring[i].count = new_tx_count; + err = txgbe_setup_tx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + txgbe_free_tx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + txgbe_free_tx_resources(adapter->tx_ring[i]); + + memcpy(adapter->tx_ring[i], &temp_ring[i], + sizeof(struct txgbe_ring)); + } + + adapter->tx_ring_count = new_tx_count; + } + + /* Repeat the process for the Rx rings if needed */ + if (new_rx_count != adapter->rx_ring_count) { + for (i = 0; i < adapter->num_rx_queues; i++) { + memcpy(&temp_ring[i], adapter->rx_ring[i], + sizeof(struct txgbe_ring)); + + temp_ring[i].count = new_rx_count; + err = txgbe_setup_rx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + txgbe_free_rx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + txgbe_free_rx_resources(adapter->rx_ring[i]); + memcpy(adapter->rx_ring[i], &temp_ring[i], + sizeof(struct txgbe_ring)); + } + + adapter->rx_ring_count = new_rx_count; + } + +err_setup: + txgbe_up(adapter); + vfree(temp_ring); +clear_reset: + clear_bit(__TXGBE_RESETTING, &adapter->state); + return err; +} + +static int txgbe_get_sset_count(struct net_device *netdev, int sset) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + switch (sset) { + case ETH_SS_TEST: + return TXGBE_TEST_LEN; + case ETH_SS_STATS: + if (adapter->num_tx_queues <= TXGBE_NUM_RX_QUEUES) { + return TXGBE_STATS_LEN - (TXGBE_NUM_RX_QUEUES - adapter->num_tx_queues) * + (sizeof(struct txgbe_queue_stats) / sizeof(u64)) * 2; + } else { + return TXGBE_STATS_LEN; + } + default: + return -EOPNOTSUPP; + } +} + +static void txgbe_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct rtnl_link_stats64 temp; + const struct rtnl_link_stats64 *net_stats; + + u64 *queue_stat; + int stat_count, k; + unsigned int start; + struct txgbe_ring *ring; + int i, j; + char *p; + + txgbe_update_stats(adapter); + net_stats = dev_get_stats(netdev, &temp); + + for (i = 0; i < TXGBE_NETDEV_STATS_LEN; i++) { + p = (char *)net_stats + txgbe_gstrings_net_stats[i].stat_offset; + data[i] = (txgbe_gstrings_net_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + for (j = 0; j < TXGBE_GLOBAL_STATS_LEN; j++, i++) { + p = (char *)adapter + txgbe_gstrings_stats[j].stat_offset; + data[i] = (txgbe_gstrings_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + for (j = 0; j < adapter->num_tx_queues; j++) { + ring = adapter->tx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; +#ifdef BP_EXTENDED_STATS + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; +#endif + continue; + } + + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + data[i] = ring->stats.packets; + data[i+1] = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + i += 2; + } + for (j = 0; j < adapter->num_rx_queues; j++) { + ring = adapter->rx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; + continue; + } + + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + data[i] = ring->stats.packets; + data[i+1] = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + i += 2; + } + for (j = 0; j < TXGBE_MAX_PACKET_BUFFERS; j++) { + data[i++] = adapter->stats.pxontxc[j]; + data[i++] = adapter->stats.pxofftxc[j]; + } + for (j = 0; j < TXGBE_MAX_PACKET_BUFFERS; j++) { + data[i++] = adapter->stats.pxonrxc[j]; + data[i++] = adapter->stats.pxoffrxc[j]; + } + + stat_count = sizeof(struct vf_stats) / sizeof(u64); + for (j = 0; j < adapter->num_vfs; j++) { + queue_stat = (u64 *)&adapter->vfinfo[j].vfstats; + for (k = 0; k < stat_count; k++) + data[i + k] = queue_stat[k]; + queue_stat = (u64 *)&adapter->vfinfo[j].saved_rst_vfstats; + for (k = 0; k < stat_count; k++) + data[i + k] += queue_stat[k]; + i += k; + } +} + +static void txgbe_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + char *p = (char *)data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *txgbe_gstrings_test, + TXGBE_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (i = 0; i < TXGBE_NETDEV_STATS_LEN; i++) { + memcpy(p, txgbe_gstrings_net_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < TXGBE_GLOBAL_STATS_LEN; i++) { + memcpy(p, txgbe_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_tx_queues; i++) { + sprintf(p, "tx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_rx_queues; i++) { + sprintf(p, "rx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < TXGBE_MAX_PACKET_BUFFERS; i++) { + sprintf(p, "tx_pb_%u_pxon", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_pb_%u_pxoff", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < TXGBE_MAX_PACKET_BUFFERS; i++) { + sprintf(p, "rx_pb_%u_pxon", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_pb_%u_pxoff", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_vfs; i++) { + sprintf(p, "VF %d Rx Packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "VF %d Rx Bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "VF %d Tx Packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "VF %d Tx Bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "VF %d MC Packets", i); + p += ETH_GSTRING_LEN; + } + /* BUG_ON(p - data != TXGBE_STATS_LEN * ETH_GSTRING_LEN); */ + break; + } +} + +static int txgbe_link_test(struct txgbe_adapter *adapter, u64 *data) +{ + struct txgbe_hw *hw = &adapter->hw; + bool link_up; + u32 link_speed = 0; + + if (TXGBE_REMOVED(hw->hw_addr)) { + *data = 1; + return 1; + } + *data = 0; + TCALL(hw, mac.ops.check_link, &link_speed, &link_up, true); + if (link_up) + return *data; + else + *data = 1; + return *data; +} + +/* ethtool register test data */ +struct txgbe_reg_test { + u32 reg; + u8 array_len; + u8 test_type; + u32 mask; + u32 write; +}; + +/* In the hardware, registers are laid out either singly, in arrays + * spaced 0x40 bytes apart, or in contiguous tables. We assume + * most tests take place on arrays or single registers (handled + * as a single-element array) and special-case the tables. + * Table tests are always pattern tests. + * + * We also make provision for some required setup steps by specifying + * registers to be written without any read-back testing. + */ + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define WRITE_NO_TEST 3 +#define TABLE32_TEST 4 +#define TABLE64_TEST_LO 5 +#define TABLE64_TEST_HI 6 + +/* default sapphire register test */ +static struct txgbe_reg_test reg_test_sapphire[] = { + { TXGBE_RDB_RFCL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { TXGBE_RDB_RFCH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { TXGBE_PSR_VLAN_CTL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, + { TXGBE_PX_RR_BAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + { TXGBE_PX_RR_BAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { TXGBE_PX_RR_CFG(0), 4, WRITE_NO_TEST, 0, TXGBE_PX_RR_CFG_RR_EN }, + { TXGBE_RDB_RFCH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { TXGBE_RDB_RFCV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { TXGBE_PX_TR_BAL(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { TXGBE_PX_TR_BAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { TXGBE_RDB_PB_CTL, 1, SET_READ_TEST, 0x00000001, 0x00000001 }, + { TXGBE_PSR_MC_TBL(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { .reg = 0 } +}; + +static bool reg_pattern_test(struct txgbe_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + u32 pat, val, before; + static const u32 test_pattern[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; + + if (TXGBE_REMOVED(adapter->hw.hw_addr)) { + *data = 1; + return true; + } + for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { + before = rd32(&adapter->hw, reg); + wr32(&adapter->hw, reg, test_pattern[pat] & write); + val = rd32(&adapter->hw, reg); + if (val != (test_pattern[pat] & write & mask)) { + e_err(drv, + "pattern test reg %04X failed: got 0x%08X " + "expected 0x%08X\n", + reg, val, test_pattern[pat] & write & mask); + *data = reg; + wr32(&adapter->hw, reg, before); + return true; + } + wr32(&adapter->hw, reg, before); + } + return false; +} + +static bool reg_set_and_check(struct txgbe_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + u32 val, before; + + if (TXGBE_REMOVED(adapter->hw.hw_addr)) { + *data = 1; + return true; + } + before = rd32(&adapter->hw, reg); + wr32(&adapter->hw, reg, write & mask); + val = rd32(&adapter->hw, reg); + if ((write & mask) != (val & mask)) { + e_err(drv, + "set/check reg %04X test failed: got 0x%08X expected" + "0x%08X\n", + reg, (val & mask), (write & mask)); + *data = reg; + wr32(&adapter->hw, reg, before); + return true; + } + wr32(&adapter->hw, reg, before); + return false; +} + +static bool txgbe_reg_test(struct txgbe_adapter *adapter, u64 *data) +{ + struct txgbe_reg_test *test; + struct txgbe_hw *hw = &adapter->hw; + u32 i; + + if (TXGBE_REMOVED(hw->hw_addr)) { + e_err(drv, "Adapter removed - register test blocked\n"); + *data = 1; + return true; + } + + test = reg_test_sapphire; + + /* + * Perform the remainder of the register test, looping through + * the test table until we either fail or reach the null entry. + */ + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + bool b = false; + + switch (test->test_type) { + case PATTERN_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case SET_READ_TEST: + b = reg_set_and_check(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case WRITE_NO_TEST: + wr32(hw, test->reg + (i * 0x40), + test->write); + break; + case TABLE32_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 4), + test->mask, + test->write); + break; + case TABLE64_TEST_LO: + b = reg_pattern_test(adapter, data, + test->reg + (i * 8), + test->mask, + test->write); + break; + case TABLE64_TEST_HI: + b = reg_pattern_test(adapter, data, + (test->reg + 4) + (i * 8), + test->mask, + test->write); + break; + } + if (b) + return true; + } + test++; + } + + *data = 0; + return false; +} + +static bool txgbe_eeprom_test(struct txgbe_adapter *adapter, u64 *data) +{ + struct txgbe_hw *hw = &adapter->hw; + + if (TCALL(hw, eeprom.ops.validate_checksum, NULL)) { + *data = 1; + return true; + } else { + *data = 0; + return false; + } +} + +static irqreturn_t txgbe_test_intr(int __always_unused irq, void *data) +{ + struct net_device *netdev = (struct net_device *) data; + struct txgbe_adapter *adapter = netdev_priv(netdev); + u64 icr; + + /* get misc interrupt, as cannot get ring interrupt status */ + icr = txgbe_misc_isb(adapter, TXGBE_ISB_VEC1); + icr <<= 32; + icr |= txgbe_misc_isb(adapter, TXGBE_ISB_VEC0); + + adapter->test_icr = icr; + + return IRQ_HANDLED; +} + +static int txgbe_intr_test(struct txgbe_adapter *adapter, u64 *data) +{ + struct net_device *netdev = adapter->netdev; + u64 mask; + u32 i = 0, shared_int = true; + u32 irq = adapter->pdev->irq; + + if (TXGBE_REMOVED(adapter->hw.hw_addr)) { + *data = 1; + return -1; + } + *data = 0; + + /* Hook up test interrupt handler just for this test */ + if (adapter->msix_entries) { + /* NOTE: we don't test MSI-X interrupts here, yet */ + return 0; + } else if (adapter->flags & TXGBE_FLAG_MSI_ENABLED) { + shared_int = false; + if (request_irq(irq, &txgbe_test_intr, 0, netdev->name, + netdev)) { + *data = 1; + return -1; + } + } else if (!request_irq(irq, &txgbe_test_intr, IRQF_PROBE_SHARED, + netdev->name, netdev)) { + shared_int = false; + } else if (request_irq(irq, &txgbe_test_intr, IRQF_SHARED, + netdev->name, netdev)) { + *data = 1; + return -1; + } + e_info(hw, "testing %s interrupt\n", + (shared_int ? "shared" : "unshared")); + + /* Disable all the interrupts */ + txgbe_irq_disable(adapter); + TXGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + /* Test each interrupt */ + for (; i < 1; i++) { + /* Interrupt to test */ + mask = 1ULL << i; + + if (!shared_int) { + /* + * Disable the interrupts to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + txgbe_intr_disable(&adapter->hw, ~mask); + txgbe_intr_trigger(&adapter->hw, mask); + TXGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* + * Enable the interrupt to be reported in the cause + * register and then force the same interrupt and see + * if one gets posted. If an interrupt was not posted + * to the bus, the test failed. + */ + adapter->test_icr = 0; + txgbe_intr_disable(&adapter->hw, TXGBE_INTR_ALL); + txgbe_intr_trigger(&adapter->hw, mask); + TXGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + if (!(adapter->test_icr & mask)) { + *data = 4; + break; + } + } + + /* Disable all the interrupts */ + txgbe_intr_disable(&adapter->hw, TXGBE_INTR_ALL); + TXGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + /* Unhook test interrupt handler */ + free_irq(irq, netdev); + + return *data; +} + +static void txgbe_free_desc_rings(struct txgbe_adapter *adapter) +{ + struct txgbe_ring *tx_ring = &adapter->test_tx_ring; + struct txgbe_ring *rx_ring = &adapter->test_rx_ring; + struct txgbe_hw *hw = &adapter->hw; + + /* shut down the DMA engines now so they can be reinitialized later */ + + /* first Rx */ + TCALL(hw, mac.ops.disable_rx); + txgbe_disable_rx_queue(adapter, rx_ring); + + /* now Tx */ + wr32(hw, TXGBE_PX_TR_CFG(tx_ring->reg_idx), 0); + + wr32m(hw, TXGBE_TDM_CTL, TXGBE_TDM_CTL_TE, 0); + + txgbe_reset(adapter); + + txgbe_free_tx_resources(&adapter->test_tx_ring); + txgbe_free_rx_resources(&adapter->test_rx_ring); +} + +static int txgbe_setup_desc_rings(struct txgbe_adapter *adapter) +{ + struct txgbe_ring *tx_ring = &adapter->test_tx_ring; + struct txgbe_ring *rx_ring = &adapter->test_rx_ring; + struct txgbe_hw *hw = &adapter->hw; + int ret_val; + int err; + + TCALL(hw, mac.ops.setup_rxpba, 0, 0, PBA_STRATEGY_EQUAL); + + /* Setup Tx descriptor ring and Tx buffers */ + tx_ring->count = TXGBE_DEFAULT_TXD; + tx_ring->queue_index = 0; + tx_ring->dev = pci_dev_to_dev(adapter->pdev); + tx_ring->netdev = adapter->netdev; + tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; + + err = txgbe_setup_tx_resources(tx_ring); + if (err) + return 1; + + wr32m(&adapter->hw, TXGBE_TDM_CTL, + TXGBE_TDM_CTL_TE, TXGBE_TDM_CTL_TE); + + txgbe_configure_tx_ring(adapter, tx_ring); + + /* enable mac transmitter */ + wr32m(hw, TXGBE_MAC_TX_CFG, + TXGBE_MAC_TX_CFG_TE | TXGBE_MAC_TX_CFG_SPEED_MASK, + TXGBE_MAC_TX_CFG_TE | TXGBE_MAC_TX_CFG_SPEED_10G); + + /* Setup Rx Descriptor ring and Rx buffers */ + rx_ring->count = TXGBE_DEFAULT_RXD; + rx_ring->queue_index = 0; + rx_ring->dev = pci_dev_to_dev(adapter->pdev); + rx_ring->netdev = adapter->netdev; + rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; + + err = txgbe_setup_rx_resources(rx_ring); + if (err) { + ret_val = 4; + goto err_nomem; + } + + TCALL(hw, mac.ops.disable_rx); + + txgbe_configure_rx_ring(adapter, rx_ring); + + TCALL(hw, mac.ops.enable_rx); + + return 0; + +err_nomem: + txgbe_free_desc_rings(adapter); + return ret_val; +} + +static int txgbe_setup_config(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 reg_data; + + /* Setup traffic loopback */ + reg_data = rd32(hw, TXGBE_PSR_CTL); + reg_data |= TXGBE_PSR_CTL_BAM | TXGBE_PSR_CTL_UPE | + TXGBE_PSR_CTL_MPE | TXGBE_PSR_CTL_TPE; + wr32(hw, TXGBE_PSR_CTL, reg_data); + + wr32(hw, TXGBE_RSC_CTL, + (rd32(hw, TXGBE_RSC_CTL) | + TXGBE_RSC_CTL_SAVE_MAC_ERR) & ~TXGBE_RSC_CTL_SECRX_DIS); + + wr32(hw, TXGBE_RSC_LSEC_CTL, 0x4); + + wr32(hw, TXGBE_PSR_VLAN_CTL, + rd32(hw, TXGBE_PSR_VLAN_CTL) & + ~TXGBE_PSR_VLAN_CTL_VFE); + + wr32m(&adapter->hw, TXGBE_MAC_RX_CFG, + TXGBE_MAC_RX_CFG_LM, ~TXGBE_MAC_RX_CFG_LM); + wr32m(&adapter->hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_FORCE_LKUP, ~TXGBE_CFG_PORT_CTL_FORCE_LKUP); + + + TXGBE_WRITE_FLUSH(hw); + usleep_range(10000, 20000); + + return 0; +} + +static int txgbe_setup_phy_loopback_test(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 value; + /* setup phy loopback */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_MISC_CTL0); + value |= TXGBE_PHY_MISC_CTL0_TX2RX_LB_EN_0 | + TXGBE_PHY_MISC_CTL0_TX2RX_LB_EN_3_1; + + txgbe_wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, value); + + value = txgbe_rd32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1); + txgbe_wr32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1, + value | TXGBE_SR_PMA_MMD_CTL1_LB_EN); + return 0; +} + +static void txgbe_phy_loopback_cleanup(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 value; + + value = txgbe_rd32_epcs(hw, TXGBE_PHY_MISC_CTL0); + value &= ~(TXGBE_PHY_MISC_CTL0_TX2RX_LB_EN_0 | + TXGBE_PHY_MISC_CTL0_TX2RX_LB_EN_3_1); + + txgbe_wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, value); + value = txgbe_rd32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1); + txgbe_wr32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1, + value & ~TXGBE_SR_PMA_MMD_CTL1_LB_EN); +} + + +static void txgbe_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size >>= 1; + memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1); + memset(&skb->data[frame_size + 10], 0xBE, 1); + memset(&skb->data[frame_size + 12], 0xAF, 1); +} + +static bool txgbe_check_lbtest_frame(struct txgbe_rx_buffer *rx_buffer, + unsigned int frame_size) +{ + unsigned char *data; + bool match = true; + + frame_size >>= 1; + data = kmap(rx_buffer->page) + rx_buffer->page_offset; + + if (data[3] != 0xFF || + data[frame_size + 10] != 0xBE || + data[frame_size + 12] != 0xAF) + match = false; + + kunmap(rx_buffer->page); + return match; +} + +static u16 txgbe_clean_test_rings(struct txgbe_ring *rx_ring, + struct txgbe_ring *tx_ring, + unsigned int size) +{ + union txgbe_rx_desc *rx_desc; + struct txgbe_rx_buffer *rx_buffer; + struct txgbe_tx_buffer *tx_buffer; + const int bufsz = txgbe_rx_bufsz(rx_ring); + u16 rx_ntc, tx_ntc, count = 0; + + /* initialize next to clean and descriptor values */ + rx_ntc = rx_ring->next_to_clean; + tx_ntc = tx_ring->next_to_clean; + rx_desc = TXGBE_RX_DESC(rx_ring, rx_ntc); + + while (txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_DD)) { + /* unmap buffer on Tx side */ + tx_buffer = &tx_ring->tx_buffer_info[tx_ntc]; + txgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); + + /* check Rx buffer */ + rx_buffer = &rx_ring->rx_buffer_info[rx_ntc]; + + /* sync Rx buffer for CPU read */ + dma_sync_single_for_cpu(rx_ring->dev, + rx_buffer->page_dma, + bufsz, + DMA_FROM_DEVICE); + + /* verify contents of skb */ + if (txgbe_check_lbtest_frame(rx_buffer, size)) + count++; + + /* sync Rx buffer for device write */ + dma_sync_single_for_device(rx_ring->dev, + rx_buffer->page_dma, + bufsz, + DMA_FROM_DEVICE); + + /* increment Rx/Tx next to clean counters */ + rx_ntc++; + if (rx_ntc == rx_ring->count) + rx_ntc = 0; + tx_ntc++; + if (tx_ntc == tx_ring->count) + tx_ntc = 0; + + /* fetch next descriptor */ + rx_desc = TXGBE_RX_DESC(rx_ring, rx_ntc); + } + + /* re-map buffers to ring, store next to clean values */ + txgbe_alloc_rx_buffers(rx_ring, count); + rx_ring->next_to_clean = rx_ntc; + tx_ring->next_to_clean = tx_ntc; + + return count; +} + +static int txgbe_run_loopback_test(struct txgbe_adapter *adapter) +{ + struct txgbe_ring *tx_ring = &adapter->test_tx_ring; + struct txgbe_ring *rx_ring = &adapter->test_rx_ring; + int i, j, lc, good_cnt, ret_val = 0; + unsigned int size = 1024; + netdev_tx_t tx_ret_val; + struct sk_buff *skb; + u32 flags_orig = adapter->flags; + + + /* DCB can modify the frames on Tx */ + adapter->flags &= ~TXGBE_FLAG_DCB_ENABLED; + + /* allocate test skb */ + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) + return 11; + + /* place data into test skb */ + txgbe_create_lbtest_frame(skb, size); + skb_put(skb, size); + + /* + * Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rx_ring->count <= tx_ring->count) + lc = ((tx_ring->count / 64) * 2) + 1; + else + lc = ((rx_ring->count / 64) * 2) + 1; + + for (j = 0; j <= lc; j++) { + /* reset count of good packets */ + good_cnt = 0; + + /* place 64 packets on the transmit queue*/ + for (i = 0; i < 64; i++) { + skb_get(skb); + tx_ret_val = txgbe_xmit_frame_ring(skb, + adapter, + tx_ring); + if (tx_ret_val == NETDEV_TX_OK) + good_cnt++; + } + + if (good_cnt != 64) { + ret_val = 12; + break; + } + + /* allow 200 milliseconds for packets to go from Tx to Rx */ + msleep(200); + + good_cnt = txgbe_clean_test_rings(rx_ring, tx_ring, size); + if (j == 0) + continue; + else if (good_cnt != 64) { + ret_val = 13; + break; + } + } + + /* free the original skb */ + kfree_skb(skb); + adapter->flags = flags_orig; + + return ret_val; +} + +static int txgbe_loopback_test(struct txgbe_adapter *adapter, u64 *data) +{ + *data = txgbe_setup_desc_rings(adapter); + if (*data) + goto out; + + *data = txgbe_setup_config(adapter); + if (*data) + goto err_loopback; + + *data = txgbe_setup_phy_loopback_test(adapter); + if (*data) + goto err_loopback; + *data = txgbe_run_loopback_test(adapter); + if (*data) + e_info(hw, "phy loopback testing failed\n"); + txgbe_phy_loopback_cleanup(adapter); + +err_loopback: + txgbe_free_desc_rings(adapter); +out: + return *data; +} + +static void txgbe_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + bool if_running = netif_running(netdev); + struct txgbe_hw *hw = &adapter->hw; + + if (TXGBE_REMOVED(hw->hw_addr)) { + e_err(hw, "Adapter removed - test blocked\n"); + data[0] = 1; + data[1] = 1; + data[2] = 1; + data[3] = 1; + data[4] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + return; + } + + set_bit(__TXGBE_TESTING, &adapter->state); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) { + int i; + for (i = 0; i < adapter->num_vfs; i++) { + if (adapter->vfinfo[i].clear_to_send) { + e_warn(drv, "Please take active VFS " + "offline and restart the " + "adapter before running NIC " + "diagnostics\n"); + data[0] = 1; + data[1] = 1; + data[2] = 1; + data[3] = 1; + data[4] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + clear_bit(__TXGBE_TESTING, + &adapter->state); + goto skip_ol_tests; + } + } + } + + /* Offline tests */ + e_info(hw, "offline testing starting\n"); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result */ + if (txgbe_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ + txgbe_close(netdev); + else + txgbe_reset(adapter); + + e_info(hw, "register testing starting\n"); + if (txgbe_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + txgbe_reset(adapter); + e_info(hw, "eeprom testing starting\n"); + if (txgbe_eeprom_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + txgbe_reset(adapter); + e_info(hw, "interrupt testing starting\n"); + if (txgbe_intr_test(adapter, &data[2])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (!(((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) || + ((hw->subsystem_device_id & TXGBE_WOL_MASK) == TXGBE_WOL_SUP))) { + /* If SRIOV or VMDq is enabled then skip MAC + * loopback diagnostic. */ + if (adapter->flags & (TXGBE_FLAG_SRIOV_ENABLED | + TXGBE_FLAG_VMDQ_ENABLED)) { + e_info(hw, "skip MAC loopback diagnostic in VT mode\n"); + data[3] = 0; + goto skip_loopback; + } + + txgbe_reset(adapter); + e_info(hw, "loopback testing starting\n"); + if (txgbe_loopback_test(adapter, &data[3])) + eth_test->flags |= ETH_TEST_FL_FAILED; + } + + data[3] = 0; +skip_loopback: + txgbe_reset(adapter); + + /* clear testing bit and return adapter to previous state */ + clear_bit(__TXGBE_TESTING, &adapter->state); + if (if_running) + txgbe_open(netdev); + else + TCALL(hw, mac.ops.disable_tx_laser); + } else { + e_info(hw, "online testing starting\n"); + + /* Online tests */ + if (txgbe_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Offline tests aren't run; pass by default */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + clear_bit(__TXGBE_TESTING, &adapter->state); + } + +skip_ol_tests: + msleep_interruptible(4 * 1000); +} + + +static int txgbe_wol_exclusion(struct txgbe_adapter *adapter, + struct ethtool_wolinfo *wol) +{ + int retval = 0; + + /* WOL not supported for all devices */ + if (!txgbe_wol_supported(adapter)) { + retval = 1; + wol->supported = 0; + } + + return retval; +} + +static void txgbe_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC; + wol->wolopts = 0; + + if (txgbe_wol_exclusion(adapter, wol) || + !device_can_wakeup(pci_dev_to_dev(adapter->pdev))) + return; + if ((hw->subsystem_device_id & TXGBE_WOL_MASK) != TXGBE_WOL_SUP) + return; + + if (adapter->wol & TXGBE_PSR_WKUP_CTL_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & TXGBE_PSR_WKUP_CTL_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & TXGBE_PSR_WKUP_CTL_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & TXGBE_PSR_WKUP_CTL_MAG) + wol->wolopts |= WAKE_MAGIC; +} + +static int txgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + + if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; + + if (txgbe_wol_exclusion(adapter, wol)) + return wol->wolopts ? -EOPNOTSUPP : 0; + if ((hw->subsystem_device_id & TXGBE_WOL_MASK) != TXGBE_WOL_SUP) + return -EOPNOTSUPP; + + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= TXGBE_PSR_WKUP_CTL_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= TXGBE_PSR_WKUP_CTL_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= TXGBE_PSR_WKUP_CTL_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= TXGBE_PSR_WKUP_CTL_MAG; + + hw->wol_enabled = !!(adapter->wol); + wr32(hw, TXGBE_PSR_WKUP_CTL, adapter->wol); + + device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev), adapter->wol); + + return 0; +} + +static int txgbe_nway_reset(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + if (netif_running(netdev)) + txgbe_reinit_locked(adapter); + + return 0; +} + +static int txgbe_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + adapter->led_reg = rd32(hw, TXGBE_CFG_LED_CTL); + return 2; + + case ETHTOOL_ID_ON: + TCALL(hw, mac.ops.led_on, TXGBE_LED_LINK_UP); + break; + + case ETHTOOL_ID_OFF: + TCALL(hw, mac.ops.led_off, TXGBE_LED_LINK_UP); + break; + + case ETHTOOL_ID_INACTIVE: + /* Restore LED settings */ + wr32(&adapter->hw, TXGBE_CFG_LED_CTL, + adapter->led_reg); + break; + } + + return 0; +} + +static int txgbe_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit; + /* only valid if in constant ITR mode */ + if (adapter->rx_itr_setting <= 1) + ec->rx_coalesce_usecs = adapter->rx_itr_setting; + else + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + + /* if in mixed tx/rx queues per vector mode, report only rx settings */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) + return 0; + + /* only valid if in constant ITR mode */ + if (adapter->tx_itr_setting <= 1) + ec->tx_coalesce_usecs = adapter->tx_itr_setting; + else + ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; + + return 0; +} + +/* + * this function must be called before setting the new value of + * rx_itr_setting + */ +static bool txgbe_update_rsc(struct txgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + /* nothing to do if LRO or RSC are not enabled */ + if (!(adapter->flags2 & TXGBE_FLAG2_RSC_CAPABLE) || + !(netdev->features & NETIF_F_LRO)) + return false; + + /* check the feature flag value and enable RSC if necessary */ + if (adapter->rx_itr_setting == 1 || + adapter->rx_itr_setting > TXGBE_MIN_RSC_ITR) { + if (!(adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED)) { + adapter->flags2 |= TXGBE_FLAG2_RSC_ENABLED; + e_info(probe, "rx-usecs value high enough " + "to re-enable RSC\n"); + return true; + } + /* if interrupt rate is too high then disable RSC */ + } else if (adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED) { + adapter->flags2 &= ~TXGBE_FLAG2_RSC_ENABLED; + e_info(probe, "rx-usecs set too low, disabling RSC\n"); + return true; + } + return false; +} + +static int txgbe_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_q_vector *q_vector; + int i; + u16 tx_itr_param, rx_itr_param; + u16 tx_itr_prev; + bool need_reset = false; + + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) { + /* reject Tx specific changes in case of mixed RxTx vectors */ + if (ec->tx_coalesce_usecs) + return -EINVAL; + tx_itr_prev = adapter->rx_itr_setting; + } else { + tx_itr_prev = adapter->tx_itr_setting; + } + + if (ec->tx_max_coalesced_frames_irq) + adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq; + + if ((ec->rx_coalesce_usecs > (TXGBE_MAX_EITR >> 2)) || + (ec->tx_coalesce_usecs > (TXGBE_MAX_EITR >> 2))) + return -EINVAL; + + if (ec->rx_coalesce_usecs > 1) + adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; + else + adapter->rx_itr_setting = ec->rx_coalesce_usecs; + + if (adapter->rx_itr_setting == 1) + rx_itr_param = TXGBE_20K_ITR; + else + rx_itr_param = adapter->rx_itr_setting; + + if (ec->tx_coalesce_usecs > 1) + adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; + else + adapter->tx_itr_setting = ec->tx_coalesce_usecs; + + if (adapter->tx_itr_setting == 1) + tx_itr_param = TXGBE_12K_ITR; + else + tx_itr_param = adapter->tx_itr_setting; + + /* mixed Rx/Tx */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) + adapter->tx_itr_setting = adapter->rx_itr_setting; + + /* detect ITR changes that require update of TXDCTL.WTHRESH */ + if ((adapter->tx_itr_setting != 1) && + (adapter->tx_itr_setting < TXGBE_100K_ITR)) { + if ((tx_itr_prev == 1) || + (tx_itr_prev >= TXGBE_100K_ITR)) + need_reset = true; + } else { + if ((tx_itr_prev != 1) && + (tx_itr_prev < TXGBE_100K_ITR)) + need_reset = true; + } + + /* check the old value and enable RSC if necessary */ + need_reset |= txgbe_update_rsc(adapter); + + if (adapter->hw.mac.dmac_config.watchdog_timer && + (!adapter->rx_itr_setting && !adapter->tx_itr_setting)) { + e_info(probe, + "Disabling DMA coalescing because interrupt throttling " + "is disabled\n"); + adapter->hw.mac.dmac_config.watchdog_timer = 0; + TCALL(hw, mac.ops.dmac_config); + } + + for (i = 0; i < adapter->num_q_vectors; i++) { + q_vector = adapter->q_vector[i]; + q_vector->tx.work_limit = adapter->tx_work_limit; + q_vector->rx.work_limit = adapter->rx_work_limit; + if (q_vector->tx.count && !q_vector->rx.count) + /* tx only */ + q_vector->itr = tx_itr_param; + else + /* rx only or mixed */ + q_vector->itr = rx_itr_param; + txgbe_write_eitr(q_vector); + } + + /* + * do reset here at the end to make sure EITR==0 case is handled + * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings + * also locks in RSC enable/disable which requires reset + */ + if (need_reset) + txgbe_do_reset(netdev); + + return 0; +} + +static int txgbe_get_ethtool_fdir_entry(struct txgbe_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + union txgbe_atr_input *mask = &adapter->fdir_mask; + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct hlist_node *node; + struct txgbe_fdir_filter *rule = NULL; + + /* report total rule count */ + cmd->data = (1024 << adapter->fdir_pballoc) - 2; + + hlist_for_each_entry_safe(rule, node, + &adapter->fdir_filter_list, fdir_node) { + if (fsp->location <= rule->sw_idx) + break; + } + + if (!rule || fsp->location != rule->sw_idx) + return -EINVAL; + + /* fill out the flow spec entry */ + + /* set flow type field */ + switch (rule->filter.formatted.flow_type) { + case TXGBE_ATR_FLOW_TYPE_TCPV4: + fsp->flow_type = TCP_V4_FLOW; + break; + case TXGBE_ATR_FLOW_TYPE_UDPV4: + fsp->flow_type = UDP_V4_FLOW; + break; + case TXGBE_ATR_FLOW_TYPE_SCTPV4: + fsp->flow_type = SCTP_V4_FLOW; + break; + case TXGBE_ATR_FLOW_TYPE_IPV4: + fsp->flow_type = IP_USER_FLOW; + fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; + fsp->h_u.usr_ip4_spec.proto = 0; + fsp->m_u.usr_ip4_spec.proto = 0; + break; + default: + return -EINVAL; + } + + fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port; + fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port; + fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port; + fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port; + fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0]; + fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0]; + fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0]; + fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0]; + fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes; + fsp->m_ext.vlan_etype = mask->formatted.flex_bytes; + fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool); + fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool); + fsp->flow_type |= FLOW_EXT; + + /* record action */ + if (rule->action == TXGBE_RDB_FDIR_DROP_QUEUE) + fsp->ring_cookie = RX_CLS_FLOW_DISC; + else + fsp->ring_cookie = rule->action; + + return 0; +} + +static int txgbe_get_ethtool_fdir_all(struct txgbe_adapter *adapter, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct hlist_node *node; + struct txgbe_fdir_filter *rule; + int cnt = 0; + + /* report total rule count */ + cmd->data = (1024 << adapter->fdir_pballoc) - 2; + + hlist_for_each_entry_safe(rule, node, + &adapter->fdir_filter_list, fdir_node) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + rule_locs[cnt] = rule->sw_idx; + cnt++; + } + + cmd->rule_cnt = cnt; + + return 0; +} + +static int txgbe_get_rss_hash_opts(struct txgbe_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + /* Report default options for RSS on txgbe */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + case UDP_V4_FLOW: + if (adapter->flags2 & TXGBE_FLAG2_RSS_FIELD_IPV4_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + case UDP_V6_FLOW: + if (adapter->flags2 & TXGBE_FLAG2_RSS_FIELD_IPV6_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int txgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_rx_queues; + ret = 0; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = adapter->fdir_filter_count; + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + ret = txgbe_get_ethtool_fdir_entry(adapter, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = txgbe_get_ethtool_fdir_all(adapter, cmd, + (u32 *)rule_locs); + break; + case ETHTOOL_GRXFH: + ret = txgbe_get_rss_hash_opts(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +static int txgbe_update_ethtool_fdir_entry(struct txgbe_adapter *adapter, + struct txgbe_fdir_filter *input, + u16 sw_idx) +{ + struct txgbe_hw *hw = &adapter->hw; + struct hlist_node *node, *parent; + struct txgbe_fdir_filter *rule; + bool deleted = false; + s32 err; + + parent = NULL; + rule = NULL; + + hlist_for_each_entry_safe(rule, node, + &adapter->fdir_filter_list, fdir_node) { + /* hash found, or no matching entry */ + if (rule->sw_idx >= sw_idx) + break; + parent = node; + } + + /* if there is an old rule occupying our place remove it */ + if (rule && (rule->sw_idx == sw_idx)) { + /* hardware filters are only configured when interface is up, + * and we should not issue filter commands while the interface + * is down + */ + if (netif_running(adapter->netdev) && + (!input || (rule->filter.formatted.bkt_hash != + input->filter.formatted.bkt_hash))) { + err = txgbe_fdir_erase_perfect_filter(hw, + &rule->filter, + sw_idx); + if (err) + return -EINVAL; + } + + hlist_del(&rule->fdir_node); + kfree(rule); + adapter->fdir_filter_count--; + deleted = true; + } + + /* If we weren't given an input, then this was a request to delete a + * filter. We should return -EINVAL if the filter wasn't found, but + * return 0 if the rule was successfully deleted. + */ + if (!input) + return deleted ? 0 : -EINVAL; + + /* initialize node and set software index */ + INIT_HLIST_NODE(&input->fdir_node); + + /* add filter to the list */ + if (parent) + hlist_add_behind(&input->fdir_node, parent); + else + hlist_add_head(&input->fdir_node, + &adapter->fdir_filter_list); + + /* update counts */ + adapter->fdir_filter_count++; + + return 0; +} + +static int txgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp, + u8 *flow_type) +{ + switch (fsp->flow_type & ~FLOW_EXT) { + case TCP_V4_FLOW: + *flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4; + break; + case UDP_V4_FLOW: + *flow_type = TXGBE_ATR_FLOW_TYPE_UDPV4; + break; + case SCTP_V4_FLOW: + *flow_type = TXGBE_ATR_FLOW_TYPE_SCTPV4; + break; + case IP_USER_FLOW: + switch (fsp->h_u.usr_ip4_spec.proto) { + case IPPROTO_TCP: + *flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4; + break; + case IPPROTO_UDP: + *flow_type = TXGBE_ATR_FLOW_TYPE_UDPV4; + break; + case IPPROTO_SCTP: + *flow_type = TXGBE_ATR_FLOW_TYPE_SCTPV4; + break; + case 0: + if (!fsp->m_u.usr_ip4_spec.proto) { + *flow_type = TXGBE_ATR_FLOW_TYPE_IPV4; + break; + } + /* fall through */ + default: + return 0; + } + break; + default: + return 0; + } + + return 1; +} + +static int txgbe_add_ethtool_fdir_entry(struct txgbe_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_fdir_filter *input; + union txgbe_atr_input mask; + int err; + u16 ptype = 0; + + if (!(adapter->flags & TXGBE_FLAG_FDIR_PERFECT_CAPABLE)) + return -EOPNOTSUPP; + + /* + * Don't allow programming if the action is a queue greater than + * the number of online Rx queues. + */ + if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) && + (fsp->ring_cookie >= adapter->num_rx_queues)) + return -EINVAL; + + /* Don't allow indexes to exist outside of available space */ + if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) { + e_err(drv, "Location out of range\n"); + return -EINVAL; + } + + input = kzalloc(sizeof(*input), GFP_ATOMIC); + if (!input) + return -ENOMEM; + + memset(&mask, 0, sizeof(union txgbe_atr_input)); + + /* set SW index */ + input->sw_idx = fsp->location; + + /* record flow type */ + if (!txgbe_flowspec_to_flow_type(fsp, + &input->filter.formatted.flow_type)) { + e_err(drv, "Unrecognized flow type\n"); + goto err_out; + } + + mask.formatted.flow_type = TXGBE_ATR_L4TYPE_IPV6_MASK | + TXGBE_ATR_L4TYPE_MASK; + + if (input->filter.formatted.flow_type == TXGBE_ATR_FLOW_TYPE_IPV4) + mask.formatted.flow_type &= TXGBE_ATR_L4TYPE_IPV6_MASK; + + /* Copy input into formatted structures */ + input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; + mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src; + input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; + mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst; + input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc; + mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc; + input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst; + mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst; + + if (fsp->flow_type & FLOW_EXT) { + input->filter.formatted.vm_pool = + (unsigned char)ntohl(fsp->h_ext.data[1]); + mask.formatted.vm_pool = + (unsigned char)ntohl(fsp->m_ext.data[1]); + input->filter.formatted.flex_bytes = + fsp->h_ext.vlan_etype; + mask.formatted.flex_bytes = fsp->m_ext.vlan_etype; +#if 0 + /* need fix */ + input->filter.formatted.tunnel_type = + (unsigned char)ntohl(fsp->h_ext.data[0]); + mask.formatted.tunnel_type = + (unsigned char)ntohl(fsp->m_ext.data[0]); +#endif + } + + switch (input->filter.formatted.flow_type) { + case TXGBE_ATR_FLOW_TYPE_TCPV4: + ptype = TXGBE_PTYPE_L2_IPV4_TCP; + break; + case TXGBE_ATR_FLOW_TYPE_UDPV4: + ptype = TXGBE_PTYPE_L2_IPV4_UDP; + break; + case TXGBE_ATR_FLOW_TYPE_SCTPV4: + ptype = TXGBE_PTYPE_L2_IPV4_SCTP; + break; + case TXGBE_ATR_FLOW_TYPE_IPV4: + ptype = TXGBE_PTYPE_L2_IPV4; + break; + case TXGBE_ATR_FLOW_TYPE_TCPV6: + ptype = TXGBE_PTYPE_L2_IPV6_TCP; + break; + case TXGBE_ATR_FLOW_TYPE_UDPV6: + ptype = TXGBE_PTYPE_L2_IPV6_UDP; + break; + case TXGBE_ATR_FLOW_TYPE_SCTPV6: + ptype = TXGBE_PTYPE_L2_IPV6_SCTP; + break; + case TXGBE_ATR_FLOW_TYPE_IPV6: + ptype = TXGBE_PTYPE_L2_IPV6; + break; + default: + break; + } + + input->filter.formatted.vlan_id = htons(ptype); + if (mask.formatted.flow_type & TXGBE_ATR_L4TYPE_MASK) + mask.formatted.vlan_id = 0xFFFF; + else + mask.formatted.vlan_id = htons(0xFFF8); + + /* determine if we need to drop or route the packet */ + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) + input->action = TXGBE_RDB_FDIR_DROP_QUEUE; + else + input->action = fsp->ring_cookie; + + spin_lock(&adapter->fdir_perfect_lock); + + if (hlist_empty(&adapter->fdir_filter_list)) { + /* save mask and program input mask into HW */ + memcpy(&adapter->fdir_mask, &mask, sizeof(mask)); + err = txgbe_fdir_set_input_mask(hw, &mask, + adapter->cloud_mode); + if (err) { + e_err(drv, "Error writing mask\n"); + goto err_out_w_lock; + } + } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) { + e_err(drv, "Hardware only supports one mask per port. To change" + "the mask you must first delete all the rules.\n"); + goto err_out_w_lock; + } + + /* apply mask and compute/store hash */ + txgbe_atr_compute_perfect_hash(&input->filter, &mask); + + /* only program filters to hardware if the net device is running, as + * we store the filters in the Rx buffer which is not allocated when + * the device is down + */ + if (netif_running(adapter->netdev)) { + err = txgbe_fdir_write_perfect_filter(hw, + &input->filter, input->sw_idx, + (input->action == TXGBE_RDB_FDIR_DROP_QUEUE) ? + TXGBE_RDB_FDIR_DROP_QUEUE : + adapter->rx_ring[input->action]->reg_idx, + adapter->cloud_mode); + if (err) + goto err_out_w_lock; + } + + txgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); + + spin_unlock(&adapter->fdir_perfect_lock); + + return err; +err_out_w_lock: + spin_unlock(&adapter->fdir_perfect_lock); +err_out: + kfree(input); + return -EINVAL; +} + +static int txgbe_del_ethtool_fdir_entry(struct txgbe_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + int err; + + spin_lock(&adapter->fdir_perfect_lock); + err = txgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location); + spin_unlock(&adapter->fdir_perfect_lock); + + return err; +} + +#define UDP_RSS_FLAGS (TXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \ + TXGBE_FLAG2_RSS_FIELD_IPV6_UDP) +static int txgbe_set_rss_hash_opt(struct txgbe_adapter *adapter, + struct ethtool_rxnfc *nfc) +{ + u32 flags2 = adapter->flags2; + + /* + * RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || + !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + case UDP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags2 &= ~TXGBE_FLAG2_RSS_FIELD_IPV4_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags2 |= TXGBE_FLAG2_RSS_FIELD_IPV4_UDP; + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags2 &= ~TXGBE_FLAG2_RSS_FIELD_IPV6_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags2 |= TXGBE_FLAG2_RSS_FIELD_IPV6_UDP; + break; + default: + return -EINVAL; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* if we changed something we need to update flags */ + if (flags2 != adapter->flags2) { + struct txgbe_hw *hw = &adapter->hw; + u32 mrqc; + + mrqc = rd32(hw, TXGBE_RDB_RA_CTL); + + if ((flags2 & UDP_RSS_FLAGS) && + !(adapter->flags2 & UDP_RSS_FLAGS)) + e_warn(drv, "enabling UDP RSS: fragmented packets" + " may arrive out of order to the stack above\n"); + + adapter->flags2 = flags2; + + /* Perform hash on these packet types */ + mrqc |= TXGBE_RDB_RA_CTL_RSS_IPV4 + | TXGBE_RDB_RA_CTL_RSS_IPV4_TCP + | TXGBE_RDB_RA_CTL_RSS_IPV6 + | TXGBE_RDB_RA_CTL_RSS_IPV6_TCP; + + mrqc &= ~(TXGBE_RDB_RA_CTL_RSS_IPV4_UDP | + TXGBE_RDB_RA_CTL_RSS_IPV6_UDP); + + if (flags2 & TXGBE_FLAG2_RSS_FIELD_IPV4_UDP) + mrqc |= TXGBE_RDB_RA_CTL_RSS_IPV4_UDP; + + if (flags2 & TXGBE_FLAG2_RSS_FIELD_IPV6_UDP) + mrqc |= TXGBE_RDB_RA_CTL_RSS_IPV6_UDP; + + wr32(hw, TXGBE_RDB_RA_CTL, mrqc); + } + + return 0; +} + +static int txgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + ret = txgbe_add_ethtool_fdir_entry(adapter, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = txgbe_del_ethtool_fdir_entry(adapter, cmd); + break; + case ETHTOOL_SRXFH: + ret = txgbe_set_rss_hash_opt(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +static int txgbe_rss_indir_tbl_max(struct txgbe_adapter *adapter) +{ + return 64; +} + + +static u32 txgbe_get_rxfh_key_size(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + return sizeof(adapter->rss_key); +} + +static u32 txgbe_rss_indir_size(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + return txgbe_rss_indir_tbl_entries(adapter); +} + +static void txgbe_get_reta(struct txgbe_adapter *adapter, u32 *indir) +{ + int i, reta_size = txgbe_rss_indir_tbl_entries(adapter); + + for (i = 0; i < reta_size; i++) + indir[i] = adapter->rss_indir_tbl[i]; +} + +static int txgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + if (indir) + txgbe_get_reta(adapter, indir); + + if (key) + memcpy(key, adapter->rss_key, txgbe_get_rxfh_key_size(netdev)); + + return 0; +} + +static int txgbe_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + int i; + u32 reta_entries = txgbe_rss_indir_tbl_entries(adapter); + + if (hfunc) + return -EINVAL; + + /* Fill out the redirection table */ + if (indir) { + int max_queues = min_t(int, adapter->num_rx_queues, + txgbe_rss_indir_tbl_max(adapter)); + + /*Allow at least 2 queues w/ SR-IOV.*/ + if ((adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) && + (max_queues < 2)) + max_queues = 2; + + /* Verify user input. */ + for (i = 0; i < reta_entries; i++) + if (indir[i] >= max_queues) + return -EINVAL; + + for (i = 0; i < reta_entries; i++) + adapter->rss_indir_tbl[i] = indir[i]; + } + + /* Fill out the rss hash key */ + if (key) + memcpy(adapter->rss_key, key, txgbe_get_rxfh_key_size(netdev)); + + txgbe_store_reta(adapter); + + return 0; +} + +static int txgbe_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + + /* we always support timestamping disabled */ + info->rx_filters = 1 << HWTSTAMP_FILTER_NONE; + + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (adapter->ptp_clock) + info->phc_index = ptp_clock_index(adapter->ptp_clock); + else + info->phc_index = -1; + + info->tx_types = + (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + + info->rx_filters |= + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); + + return 0; +} + +static unsigned int txgbe_max_channels(struct txgbe_adapter *adapter) +{ + unsigned int max_combined; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + if (!(adapter->flags & TXGBE_FLAG_MSIX_ENABLED)) { + /* We only support one q_vector without MSI-X */ + max_combined = 1; + } else if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) { + /* SR-IOV currently only allows one queue on the PF */ + max_combined = 1; + } else if (tcs > 1) { + /* For DCB report channels per traffic class */ + if (tcs > 4) { + /* 8 TC w/ 8 queues per TC */ + max_combined = 8; + } else { + /* 4 TC w/ 16 queues per TC */ + max_combined = 16; + } + } else if (adapter->atr_sample_rate) { + /* support up to 64 queues with ATR */ + max_combined = TXGBE_MAX_FDIR_INDICES; + } else { + /* support up to max allowed queues with RSS */ + max_combined = txgbe_max_rss_indices(adapter); + } + + return max_combined; +} + +static void txgbe_get_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + + /* report maximum channels */ + ch->max_combined = txgbe_max_channels(adapter); + + /* report info for other vector */ + if (adapter->flags & TXGBE_FLAG_MSIX_ENABLED) { + ch->max_other = NON_Q_VECTORS; + ch->other_count = NON_Q_VECTORS; + } + + /* record RSS queues */ + ch->combined_count = adapter->ring_feature[RING_F_RSS].indices; + + /* nothing else to report if RSS is disabled */ + if (ch->combined_count == 1) + return; + + /* we do not support ATR queueing if SR-IOV is enabled */ + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) + return; + + /* same thing goes for being DCB enabled */ + if (netdev_get_num_tc(dev) > 1) + return; + + /* if ATR is disabled we can exit */ + if (!adapter->atr_sample_rate) + return; + + /* report flow director queues as maximum channels */ + ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices; +} + +static int txgbe_set_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + unsigned int count = ch->combined_count; + u8 max_rss_indices = txgbe_max_rss_indices(adapter); + + /* verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) + return -EINVAL; + + /* verify other_count has not changed */ + if (ch->other_count != NON_Q_VECTORS) + return -EINVAL; + + /* verify the number of channels does not exceed hardware limits */ + if (count > txgbe_max_channels(adapter)) + return -EINVAL; + + /* update feature limits from largest to smallest supported values */ + adapter->ring_feature[RING_F_FDIR].limit = count; + + /* cap RSS limit */ + if (count > max_rss_indices) + count = max_rss_indices; + adapter->ring_feature[RING_F_RSS].limit = count; + + /* use setup TC to update any traffic class queue mapping */ + return txgbe_setup_tc(dev, netdev_get_num_tc(dev)); +} + +static int txgbe_get_module_info(struct net_device *dev, + struct ethtool_modinfo *modinfo) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + struct txgbe_hw *hw = &adapter->hw; + u32 status; + u8 sff8472_rev, addr_mode; + bool page_swap = false; + + /* Check whether we support SFF-8472 or not */ + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_SFF_8472_COMP, + &sff8472_rev); + if (status != 0) + return -EIO; + + /* addressing mode is not supported */ + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_SFF_8472_SWAP, + &addr_mode); + if (status != 0) + return -EIO; + + if (addr_mode & TXGBE_SFF_ADDRESSING_MODE) { + e_err(drv, "Address change required to access page 0xA2, " + "but not supported. Please report the module type to the " + "driver maintainers.\n"); + page_swap = true; + } + + if (sff8472_rev == TXGBE_SFF_SFF_8472_UNSUP || page_swap) { + /* We have a SFP, but it does not support SFF-8472 */ + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else { + /* We have a SFP which supports a revision of SFF-8472. */ + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } + + return 0; +} + +static int txgbe_get_module_eeprom(struct net_device *dev, + struct ethtool_eeprom *ee, + u8 *data) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + struct txgbe_hw *hw = &adapter->hw; + u32 status = TXGBE_ERR_PHY_ADDR_INVALID; + u8 databyte = 0xFF; + int i = 0; + + if (ee->len == 0) + return -EINVAL; + + for (i = ee->offset; i < ee->offset + ee->len; i++) { + /* I2C reads can take long time */ + if (test_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) + return -EBUSY; + + if (i < ETH_MODULE_SFF_8079_LEN) + status = TCALL(hw, phy.ops.read_i2c_eeprom, i, + &databyte); + else + status = TCALL(hw, phy.ops.read_i2c_sff8472, i, + &databyte); + + if (status != 0) + return -EIO; + + data[i - ee->offset] = databyte; + } + + return 0; +} + +static int txgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + return 0; +} + +static int txgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + struct ethtool_eee eee_data; + s32 ret_val; + + if (!(hw->mac.ops.setup_eee && + (adapter->flags2 & TXGBE_FLAG2_EEE_CAPABLE))) + return -EOPNOTSUPP; + + memset(&eee_data, 0, sizeof(struct ethtool_eee)); + + ret_val = txgbe_get_eee(netdev, &eee_data); + if (ret_val) + return ret_val; + + if (eee_data.eee_enabled && !edata->eee_enabled) { + if (eee_data.tx_lpi_enabled != edata->tx_lpi_enabled) { + e_dev_err("Setting EEE tx-lpi is not supported\n"); + return -EINVAL; + } + + if (eee_data.tx_lpi_timer != edata->tx_lpi_timer) { + e_dev_err("Setting EEE Tx LPI timer is not " + "supported\n"); + return -EINVAL; + } + + if (eee_data.advertised != edata->advertised) { + e_dev_err("Setting EEE advertised speeds is not " + "supported\n"); + return -EINVAL; + } + + } + + if (eee_data.eee_enabled != edata->eee_enabled) { + + if (edata->eee_enabled) + adapter->flags2 |= TXGBE_FLAG2_EEE_ENABLED; + else + adapter->flags2 &= ~TXGBE_FLAG2_EEE_ENABLED; + + /* reset link */ + if (netif_running(netdev)) + txgbe_reinit_locked(adapter); + else + txgbe_reset(adapter); + } + + return 0; +} + +static int txgbe_set_flash(struct net_device *netdev, struct ethtool_flash *ef) +{ + int ret; + const struct firmware *fw; + struct txgbe_adapter *adapter = netdev_priv(netdev); + + ret = request_firmware(&fw, ef->data, &netdev->dev); + if (ret < 0) + return ret; + + if (txgbe_mng_present(&adapter->hw)) { + ret = txgbe_upgrade_flash_hostif(&adapter->hw, ef->region, + fw->data, fw->size); + } else + ret = -EOPNOTSUPP; + + release_firmware(fw); + if (!ret) + dev_info(&netdev->dev, + "loaded firmware %s, reload txgbe driver\n", ef->data); + return ret; +} + +static struct ethtool_ops txgbe_ethtool_ops = { + .get_link_ksettings = txgbe_get_link_ksettings, + .set_link_ksettings = txgbe_set_link_ksettings, + .get_drvinfo = txgbe_get_drvinfo, + .get_regs_len = txgbe_get_regs_len, + .get_regs = txgbe_get_regs, + .get_wol = txgbe_get_wol, + .set_wol = txgbe_set_wol, + .nway_reset = txgbe_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = txgbe_get_eeprom_len, + .get_eeprom = txgbe_get_eeprom, + .set_eeprom = txgbe_set_eeprom, + .get_ringparam = txgbe_get_ringparam, + .set_ringparam = txgbe_set_ringparam, + .get_pauseparam = txgbe_get_pauseparam, + .set_pauseparam = txgbe_set_pauseparam, + .get_msglevel = txgbe_get_msglevel, + .set_msglevel = txgbe_set_msglevel, + .self_test = txgbe_diag_test, + .get_strings = txgbe_get_strings, + .set_phys_id = txgbe_set_phys_id, + .get_sset_count = txgbe_get_sset_count, + .get_ethtool_stats = txgbe_get_ethtool_stats, + .get_coalesce = txgbe_get_coalesce, + .set_coalesce = txgbe_set_coalesce, + .get_rxnfc = txgbe_get_rxnfc, + .set_rxnfc = txgbe_set_rxnfc, + .get_eee = txgbe_get_eee, + .set_eee = txgbe_set_eee, + .get_channels = txgbe_get_channels, + .set_channels = txgbe_set_channels, + .get_module_info = txgbe_get_module_info, + .get_module_eeprom = txgbe_get_module_eeprom, + .get_ts_info = txgbe_get_ts_info, + .get_rxfh_indir_size = txgbe_rss_indir_size, + .get_rxfh_key_size = txgbe_get_rxfh_key_size, + .get_rxfh = txgbe_get_rxfh, + .set_rxfh = txgbe_set_rxfh, + .flash_device = txgbe_set_flash, +}; + +void txgbe_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &txgbe_ethtool_ops; +} diff --git a/drivers/net/ethernet/netswift/txgbe/txgbe_hw.c b/drivers/net/ethernet/netswift/txgbe/txgbe_hw.c new file mode 100644 index 0000000000000000000000000000000000000000..17e366ebd6fe4b0b6eda50c39d0abadef28e3b8d --- /dev/null +++ b/drivers/net/ethernet/netswift/txgbe/txgbe_hw.c @@ -0,0 +1,7072 @@ +/* + * WangXun 10 Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * based on ixgbe_82599.c, Copyright(c) 1999 - 2017 Intel Corporation. + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + + +#include "txgbe_type.h" +#include "txgbe_hw.h" +#include "txgbe_phy.h" +#include "txgbe.h" + + +#define TXGBE_SP_MAX_TX_QUEUES 128 +#define TXGBE_SP_MAX_RX_QUEUES 128 +#define TXGBE_SP_RAR_ENTRIES 128 +#define TXGBE_SP_MC_TBL_SIZE 128 +#define TXGBE_SP_VFT_TBL_SIZE 128 +#define TXGBE_SP_RX_PB_SIZE 512 + +STATIC s32 txgbe_get_eeprom_semaphore(struct txgbe_hw *hw); +STATIC void txgbe_release_eeprom_semaphore(struct txgbe_hw *hw); +STATIC s32 txgbe_mta_vector(struct txgbe_hw *hw, u8 *mc_addr); +STATIC s32 txgbe_get_san_mac_addr_offset(struct txgbe_hw *hw, + u16 *san_mac_offset); + +STATIC s32 txgbe_setup_copper_link(struct txgbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete); +s32 txgbe_check_mac_link(struct txgbe_hw *hw, u32 *speed, + bool *link_up, bool link_up_wait_to_complete); + + +u32 rd32_ephy(struct txgbe_hw *hw, u32 addr) +{ + unsigned int portRegOffset; + u32 data; + + /* Set the LAN port indicator to portRegOffset[1] */ + /* 1st, write the regOffset to IDA_ADDR register */ + portRegOffset = TXGBE_ETHPHY_IDA_ADDR; + wr32(hw, portRegOffset, addr); + + /* 2nd, read the data from IDA_DATA register */ + portRegOffset = TXGBE_ETHPHY_IDA_DATA; + data = rd32(hw, portRegOffset); + return data; +} + + +u32 txgbe_rd32_epcs(struct txgbe_hw *hw, u32 addr) +{ + unsigned int portRegOffset; + u32 data; + /* Set the LAN port indicator to portRegOffset[1] */ + /* 1st, write the regOffset to IDA_ADDR register */ + portRegOffset = TXGBE_XPCS_IDA_ADDR; + wr32(hw, portRegOffset, addr); + + /* 2nd, read the data from IDA_DATA register */ + portRegOffset = TXGBE_XPCS_IDA_DATA; + data = rd32(hw, portRegOffset); + + return data; +} + + +void txgbe_wr32_ephy(struct txgbe_hw *hw, u32 addr, u32 data) +{ + unsigned int portRegOffset; + + /* Set the LAN port indicator to portRegOffset[1] */ + /* 1st, write the regOffset to IDA_ADDR register */ + portRegOffset = TXGBE_ETHPHY_IDA_ADDR; + wr32(hw, portRegOffset, addr); + + /* 2nd, read the data from IDA_DATA register */ + portRegOffset = TXGBE_ETHPHY_IDA_DATA; + wr32(hw, portRegOffset, data); +} + +void txgbe_wr32_epcs(struct txgbe_hw *hw, u32 addr, u32 data) +{ + unsigned int portRegOffset; + + /* Set the LAN port indicator to portRegOffset[1] */ + /* 1st, write the regOffset to IDA_ADDR register */ + portRegOffset = TXGBE_XPCS_IDA_ADDR; + wr32(hw, portRegOffset, addr); + + /* 2nd, read the data from IDA_DATA register */ + portRegOffset = TXGBE_XPCS_IDA_DATA; + wr32(hw, portRegOffset, data); +} + +/** + * txgbe_get_pcie_msix_count - Gets MSI-X vector count + * @hw: pointer to hardware structure + * + * Read PCIe configuration space, and get the MSI-X vector count from + * the capabilities table. + **/ +u16 txgbe_get_pcie_msix_count(struct txgbe_hw *hw) +{ + u16 msix_count = 1; + u16 max_msix_count; + u32 pos; + + DEBUGFUNC("\n"); + + max_msix_count = TXGBE_MAX_MSIX_VECTORS_SAPPHIRE; + pos = pci_find_capability(((struct txgbe_adapter *)hw->back)->pdev, PCI_CAP_ID_MSIX); + if (!pos) + return msix_count; + pci_read_config_word(((struct txgbe_adapter *)hw->back)->pdev, + pos + PCI_MSIX_FLAGS, &msix_count); + + if (TXGBE_REMOVED(hw->hw_addr)) + msix_count = 0; + msix_count &= TXGBE_PCIE_MSIX_TBL_SZ_MASK; + + /* MSI-X count is zero-based in HW */ + msix_count++; + + if (msix_count > max_msix_count) + msix_count = max_msix_count; + + return msix_count; +} + +/** + * txgbe_init_hw - Generic hardware initialization + * @hw: pointer to hardware structure + * + * Initialize the hardware by resetting the hardware, filling the bus info + * structure and media type, clears all on chip counters, initializes receive + * address registers, multicast table, VLAN filter table, calls routine to set + * up link and flow control settings, and leaves transmit and receive units + * disabled and uninitialized + **/ +s32 txgbe_init_hw(struct txgbe_hw *hw) +{ + s32 status; + + DEBUGFUNC("\n"); + + /* Reset the hardware */ + status = TCALL(hw, mac.ops.reset_hw); + + if (status == 0) { + /* Start the HW */ + status = TCALL(hw, mac.ops.start_hw); + } + + return status; +} + + +/** + * txgbe_clear_hw_cntrs - Generic clear hardware counters + * @hw: pointer to hardware structure + * + * Clears all hardware statistics counters by reading them from the hardware + * Statistics counters are clear on read. + **/ +s32 txgbe_clear_hw_cntrs(struct txgbe_hw *hw) +{ + u16 i = 0; + + DEBUGFUNC("\n"); + + rd32(hw, TXGBE_RX_CRC_ERROR_FRAMES_LOW); + for (i = 0; i < 8; i++) + rd32(hw, TXGBE_RDB_MPCNT(i)); + + rd32(hw, TXGBE_RX_LEN_ERROR_FRAMES_LOW); + rd32(hw, TXGBE_RDB_LXONTXC); + rd32(hw, TXGBE_RDB_LXOFFTXC); + rd32(hw, TXGBE_MAC_LXONRXC); + rd32(hw, TXGBE_MAC_LXOFFRXC); + + for (i = 0; i < 8; i++) { + rd32(hw, TXGBE_RDB_PXONTXC(i)); + rd32(hw, TXGBE_RDB_PXOFFTXC(i)); + rd32(hw, TXGBE_MAC_PXONRXC(i)); + wr32m(hw, TXGBE_MMC_CONTROL, TXGBE_MMC_CONTROL_UP, i<<16); + rd32(hw, TXGBE_MAC_PXOFFRXC); + } + for (i = 0; i < 8; i++) + rd32(hw, TXGBE_RDB_PXON2OFFCNT(i)); + for (i = 0; i < 128; i++) { + wr32(hw, TXGBE_PX_MPRC(i), 0); + } + + rd32(hw, TXGBE_PX_GPRC); + rd32(hw, TXGBE_PX_GPTC); + rd32(hw, TXGBE_PX_GORC_MSB); + rd32(hw, TXGBE_PX_GOTC_MSB); + + rd32(hw, TXGBE_RX_BC_FRAMES_GOOD_LOW); + rd32(hw, TXGBE_RX_UNDERSIZE_FRAMES_GOOD); + rd32(hw, TXGBE_RX_OVERSIZE_FRAMES_GOOD); + rd32(hw, TXGBE_RX_FRAME_CNT_GOOD_BAD_LOW); + rd32(hw, TXGBE_TX_FRAME_CNT_GOOD_BAD_LOW); + rd32(hw, TXGBE_TX_MC_FRAMES_GOOD_LOW); + rd32(hw, TXGBE_TX_BC_FRAMES_GOOD_LOW); + rd32(hw, TXGBE_RDM_DRP_PKT); + return 0; +} + +/** + * txgbe_device_supports_autoneg_fc - Check if device supports autonegotiation + * of flow control + * @hw: pointer to hardware structure + * + * This function returns true if the device supports flow control + * autonegotiation, and false if it does not. + * + **/ +bool txgbe_device_supports_autoneg_fc(struct txgbe_hw *hw) +{ + bool supported = false; + u32 speed; + bool link_up; + u8 device_type = hw->subsystem_id & 0xF0; + + DEBUGFUNC("\n"); + + switch (hw->phy.media_type) { + case txgbe_media_type_fiber: + TCALL(hw, mac.ops.check_link, &speed, &link_up, false); + /* if link is down, assume supported */ + if (link_up) + supported = speed == TXGBE_LINK_SPEED_1GB_FULL ? + true : false; + else + supported = true; + break; + case txgbe_media_type_backplane: + supported = (device_type != TXGBE_ID_MAC_XAUI && + device_type != TXGBE_ID_MAC_SGMII); + break; + case txgbe_media_type_copper: + /* only some copper devices support flow control autoneg */ + supported = true; + break; + default: + break; + } + + ERROR_REPORT2(TXGBE_ERROR_UNSUPPORTED, + "Device %x does not support flow control autoneg", + hw->device_id); + return supported; +} + +/** + * txgbe_setup_fc - Set up flow control + * @hw: pointer to hardware structure + * + * Called at init time to set up flow control. + **/ +s32 txgbe_setup_fc(struct txgbe_hw *hw) +{ + s32 ret_val = 0; + u32 pcap = 0; + u32 value = 0; + u32 pcap_backplane = 0; + + DEBUGFUNC("\n"); + + /* Validate the requested mode */ + if (hw->fc.strict_ieee && hw->fc.requested_mode == txgbe_fc_rx_pause) { + ERROR_REPORT1(TXGBE_ERROR_UNSUPPORTED, + "txgbe_fc_rx_pause not valid in strict IEEE mode\n"); + ret_val = TXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + /* + * 10gig parts do not have a word in the EEPROM to determine the + * default flow control setting, so we explicitly set it to full. + */ + if (hw->fc.requested_mode == txgbe_fc_default) + hw->fc.requested_mode = txgbe_fc_full; + + /* + * Set up the 1G and 10G flow control advertisement registers so the + * HW will be able to do fc autoneg once the cable is plugged in. If + * we link at 10G, the 1G advertisement is harmless and vice versa. + */ + + /* + * The possible values of fc.requested_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.requested_mode) { + case txgbe_fc_none: + /* Flow control completely disabled by software override. */ + break; + case txgbe_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + pcap |= TXGBE_SR_MII_MMD_AN_ADV_PAUSE_ASM; + pcap_backplane |= TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM; + break; + case txgbe_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE, as such we fall + * through to the fc_full statement. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + case txgbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + pcap |= TXGBE_SR_MII_MMD_AN_ADV_PAUSE_SYM | + TXGBE_SR_MII_MMD_AN_ADV_PAUSE_ASM; + pcap_backplane |= TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_SYM | + TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM; + break; + default: + ERROR_REPORT1(TXGBE_ERROR_ARGUMENT, + "Flow control param set incorrectly\n"); + ret_val = TXGBE_ERR_CONFIG; + goto out; + break; + } + + /* + * Enable auto-negotiation between the MAC & PHY; + * the MAC will advertise clause 37 flow control. + */ + value = txgbe_rd32_epcs(hw, TXGBE_SR_MII_MMD_AN_ADV); + value = (value & ~(TXGBE_SR_MII_MMD_AN_ADV_PAUSE_ASM | + TXGBE_SR_MII_MMD_AN_ADV_PAUSE_SYM)) | pcap; + txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_AN_ADV, value); + + /* + * AUTOC restart handles negotiation of 1G and 10G on backplane + * and copper. + */ + if (hw->phy.media_type == txgbe_media_type_backplane) { + value = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_ADV_REG1); + value = (value & ~(TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM | + TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_SYM)) | + pcap_backplane; + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_ADV_REG1, value); + + } else if ((hw->phy.media_type == txgbe_media_type_copper) && + (txgbe_device_supports_autoneg_fc(hw))) { + ret_val = txgbe_set_phy_pause_advertisement(hw, pcap_backplane); + } +out: + return ret_val; +} + +/** + * txgbe_read_pba_string - Reads part number string from EEPROM + * @hw: pointer to hardware structure + * @pba_num: stores the part number string from the EEPROM + * @pba_num_size: part number string buffer length + * + * Reads the part number string from the EEPROM. + **/ +s32 txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num, + u32 pba_num_size) +{ + s32 ret_val; + u16 data; + u16 pba_ptr; + u16 offset; + u16 length; + + DEBUGFUNC("\n"); + + if (pba_num == NULL) { + DEBUGOUT("PBA string buffer was null\n"); + return TXGBE_ERR_INVALID_ARGUMENT; + } + + ret_val = TCALL(hw, eeprom.ops.read, + hw->eeprom.sw_region_offset + TXGBE_PBANUM0_PTR, + &data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + ret_val = TCALL(hw, eeprom.ops.read, + hw->eeprom.sw_region_offset + TXGBE_PBANUM1_PTR, + &pba_ptr); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + /* + * if data is not ptr guard the PBA must be in legacy format which + * means pba_ptr is actually our second data word for the PBA number + * and we can decode it into an ascii string + */ + if (data != TXGBE_PBANUM_PTR_GUARD) { + DEBUGOUT("NVM PBA number is not stored as string\n"); + + /* we will need 11 characters to store the PBA */ + if (pba_num_size < 11) { + DEBUGOUT("PBA string buffer too small\n"); + return TXGBE_ERR_NO_SPACE; + } + + /* extract hex string from data and pba_ptr */ + pba_num[0] = (data >> 12) & 0xF; + pba_num[1] = (data >> 8) & 0xF; + pba_num[2] = (data >> 4) & 0xF; + pba_num[3] = data & 0xF; + pba_num[4] = (pba_ptr >> 12) & 0xF; + pba_num[5] = (pba_ptr >> 8) & 0xF; + pba_num[6] = '-'; + pba_num[7] = 0; + pba_num[8] = (pba_ptr >> 4) & 0xF; + pba_num[9] = pba_ptr & 0xF; + + /* put a null character on the end of our string */ + pba_num[10] = '\0'; + + /* switch all the data but the '-' to hex char */ + for (offset = 0; offset < 10; offset++) { + if (pba_num[offset] < 0xA) + pba_num[offset] += '0'; + else if (pba_num[offset] < 0x10) + pba_num[offset] += 'A' - 0xA; + } + + return 0; + } + + ret_val = TCALL(hw, eeprom.ops.read, pba_ptr, &length); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (length == 0xFFFF || length == 0) { + DEBUGOUT("NVM PBA number section invalid length\n"); + return TXGBE_ERR_PBA_SECTION; + } + + /* check if pba_num buffer is big enough */ + if (pba_num_size < (((u32)length * 2) - 1)) { + DEBUGOUT("PBA string buffer too small\n"); + return TXGBE_ERR_NO_SPACE; + } + + /* trim pba length from start of string */ + pba_ptr++; + length--; + + for (offset = 0; offset < length; offset++) { + ret_val = TCALL(hw, eeprom.ops.read, pba_ptr + offset, &data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + pba_num[offset * 2] = (u8)(data >> 8); + pba_num[(offset * 2) + 1] = (u8)(data & 0xFF); + } + pba_num[offset * 2] = '\0'; + + return 0; +} + +/** + * txgbe_get_mac_addr - Generic get MAC address + * @hw: pointer to hardware structure + * @mac_addr: Adapter MAC address + * + * Reads the adapter's MAC address from first Receive Address Register (RAR0) + * A reset of the adapter must be performed prior to calling this function + * in order for the MAC address to have been loaded from the EEPROM into RAR0 + **/ +s32 txgbe_get_mac_addr(struct txgbe_hw *hw, u8 *mac_addr) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + DEBUGFUNC("\n"); + + wr32(hw, TXGBE_PSR_MAC_SWC_IDX, 0); + rar_high = rd32(hw, TXGBE_PSR_MAC_SWC_AD_H); + rar_low = rd32(hw, TXGBE_PSR_MAC_SWC_AD_L); + + for (i = 0; i < 2; i++) + mac_addr[i] = (u8)(rar_high >> (1 - i) * 8); + + for (i = 0; i < 4; i++) + mac_addr[i + 2] = (u8)(rar_low >> (3 - i) * 8); + + return 0; +} + +/** + * txgbe_set_pci_config_data - Generic store PCI bus info + * @hw: pointer to hardware structure + * @link_status: the link status returned by the PCI config space + * + * Stores the PCI bus info (speed, width, type) within the txgbe_hw structure + **/ +void txgbe_set_pci_config_data(struct txgbe_hw *hw, u16 link_status) +{ + if (hw->bus.type == txgbe_bus_type_unknown) + hw->bus.type = txgbe_bus_type_pci_express; + + switch (link_status & TXGBE_PCI_LINK_WIDTH) { + case TXGBE_PCI_LINK_WIDTH_1: + hw->bus.width = txgbe_bus_width_pcie_x1; + break; + case TXGBE_PCI_LINK_WIDTH_2: + hw->bus.width = txgbe_bus_width_pcie_x2; + break; + case TXGBE_PCI_LINK_WIDTH_4: + hw->bus.width = txgbe_bus_width_pcie_x4; + break; + case TXGBE_PCI_LINK_WIDTH_8: + hw->bus.width = txgbe_bus_width_pcie_x8; + break; + default: + hw->bus.width = txgbe_bus_width_unknown; + break; + } + + switch (link_status & TXGBE_PCI_LINK_SPEED) { + case TXGBE_PCI_LINK_SPEED_2500: + hw->bus.speed = txgbe_bus_speed_2500; + break; + case TXGBE_PCI_LINK_SPEED_5000: + hw->bus.speed = txgbe_bus_speed_5000; + break; + case TXGBE_PCI_LINK_SPEED_8000: + hw->bus.speed = txgbe_bus_speed_8000; + break; + default: + hw->bus.speed = txgbe_bus_speed_unknown; + break; + } + +} + +/** + * txgbe_get_bus_info - Generic set PCI bus info + * @hw: pointer to hardware structure + * + * Gets the PCI bus info (speed, width, type) then calls helper function to + * store this data within the txgbe_hw structure. + **/ +s32 txgbe_get_bus_info(struct txgbe_hw *hw) +{ + u16 link_status; + + DEBUGFUNC("\n"); + + /* Get the negotiated link width and speed from PCI config space */ + link_status = txgbe_read_pci_cfg_word(hw, TXGBE_PCI_LINK_STATUS); + + txgbe_set_pci_config_data(hw, link_status); + + return 0; +} + +/** + * txgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices + * @hw: pointer to the HW structure + * + * Determines the LAN function id by reading memory-mapped registers + * and swaps the port value if requested. + **/ +void txgbe_set_lan_id_multi_port_pcie(struct txgbe_hw *hw) +{ + struct txgbe_bus_info *bus = &hw->bus; + u32 reg; + + DEBUGFUNC("\n"); + + reg = rd32(hw, TXGBE_CFG_PORT_ST); + bus->lan_id = TXGBE_CFG_PORT_ST_LAN_ID(reg); + + /* check for a port swap */ + reg = rd32(hw, TXGBE_MIS_PWR); + if (TXGBE_MIS_PWR_LAN_ID_1 == TXGBE_MIS_PWR_LAN_ID(reg)) + bus->func = 0; + else + bus->func = bus->lan_id; +} + +/** + * txgbe_stop_adapter - Generic stop Tx/Rx units + * @hw: pointer to hardware structure + * + * Sets the adapter_stopped flag within txgbe_hw struct. Clears interrupts, + * disables transmit and receive units. The adapter_stopped flag is used by + * the shared code and drivers to determine if the adapter is in a stopped + * state and should not touch the hardware. + **/ +s32 txgbe_stop_adapter(struct txgbe_hw *hw) +{ + u16 i; + + DEBUGFUNC("\n"); + + /* + * Set the adapter_stopped flag so other driver functions stop touching + * the hardware + */ + hw->adapter_stopped = true; + + /* Disable the receive unit */ + TCALL(hw, mac.ops.disable_rx); + + /* Set interrupt mask to stop interrupts from being generated */ + txgbe_intr_disable(hw, TXGBE_INTR_ALL); + + /* Clear any pending interrupts, flush previous writes */ + wr32(hw, TXGBE_PX_MISC_IC, 0xffffffff); + wr32(hw, TXGBE_BME_CTL, 0x3); + + /* Disable the transmit unit. Each queue must be disabled. */ + for (i = 0; i < hw->mac.max_tx_queues; i++) { + wr32m(hw, TXGBE_PX_TR_CFG(i), + TXGBE_PX_TR_CFG_SWFLSH | TXGBE_PX_TR_CFG_ENABLE, + TXGBE_PX_TR_CFG_SWFLSH); + } + + /* Disable the receive unit by stopping each queue */ + for (i = 0; i < hw->mac.max_rx_queues; i++) { + wr32m(hw, TXGBE_PX_RR_CFG(i), + TXGBE_PX_RR_CFG_RR_EN, 0); + } + + /* flush all queues disables */ + TXGBE_WRITE_FLUSH(hw); + + /* + * Prevent the PCI-E bus from hanging by disabling PCI-E master + * access and verify no pending requests + */ + return txgbe_disable_pcie_master(hw); +} + +/** + * txgbe_led_on - Turns on the software controllable LEDs. + * @hw: pointer to hardware structure + * @index: led number to turn on + **/ +s32 txgbe_led_on(struct txgbe_hw *hw, u32 index) +{ + u32 led_reg = rd32(hw, TXGBE_CFG_LED_CTL); + u16 value = 0; + DEBUGFUNC("\n"); + + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI) { + txgbe_read_mdio(&hw->phy_dev, hw->phy.addr, 31, 0xF021, &value); + txgbe_write_mdio(&hw->phy_dev, hw->phy.addr, 31, 0xF021, value | 0x3); + txgbe_read_mdio(&hw->phy_dev, hw->phy.addr, 31, 0xF022, &value); + txgbe_write_mdio(&hw->phy_dev, hw->phy.addr, 31, 0xF022, value | 0x3); + txgbe_read_mdio(&hw->phy_dev, hw->phy.addr, 31, 0xF023, &value); + txgbe_write_mdio(&hw->phy_dev, hw->phy.addr, 31, 0xF023, value | 0x3); + } + /* To turn on the LED, set mode to ON. */ + led_reg |= index | (index << TXGBE_CFG_LED_CTL_LINK_OD_SHIFT); + wr32(hw, TXGBE_CFG_LED_CTL, led_reg); + TXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/** + * txgbe_led_off - Turns off the software controllable LEDs. + * @hw: pointer to hardware structure + * @index: led number to turn off + **/ +s32 txgbe_led_off(struct txgbe_hw *hw, u32 index) +{ + u32 led_reg = rd32(hw, TXGBE_CFG_LED_CTL); + u16 value = 0; + DEBUGFUNC("\n"); + + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_XAUI) { + txgbe_read_mdio(&hw->phy_dev, hw->phy.addr, 31, 0xF021, &value); + txgbe_write_mdio(&hw->phy_dev, hw->phy.addr, 31, 0xF021, value & 0xFFFC); + txgbe_read_mdio(&hw->phy_dev, hw->phy.addr, 31, 0xF022, &value); + txgbe_write_mdio(&hw->phy_dev, hw->phy.addr, 31, 0xF022, value & 0xFFFC); + txgbe_read_mdio(&hw->phy_dev, hw->phy.addr, 31, 0xF023, &value); + txgbe_write_mdio(&hw->phy_dev, hw->phy.addr, 31, 0xF023, value & 0xFFFC); + } + + /* To turn off the LED, set mode to OFF. */ + led_reg &= ~(index << TXGBE_CFG_LED_CTL_LINK_OD_SHIFT); + led_reg |= index; + wr32(hw, TXGBE_CFG_LED_CTL, led_reg); + TXGBE_WRITE_FLUSH(hw); + return 0; +} + +/** + * txgbe_get_eeprom_semaphore - Get hardware semaphore + * @hw: pointer to hardware structure + * + * Sets the hardware semaphores so EEPROM access can occur for bit-bang method + **/ +STATIC s32 txgbe_get_eeprom_semaphore(struct txgbe_hw *hw) +{ + s32 status = TXGBE_ERR_EEPROM; + u32 timeout = 2000; + u32 i; + u32 swsm; + + /* Get SMBI software semaphore between device drivers first */ + for (i = 0; i < timeout; i++) { + /* + * If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ + swsm = rd32(hw, TXGBE_MIS_SWSM); + if (!(swsm & TXGBE_MIS_SWSM_SMBI)) { + status = 0; + break; + } + usec_delay(50); + } + + if (i == timeout) { + DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore " + "not granted.\n"); + /* + * this release is particularly important because our attempts + * above to get the semaphore may have succeeded, and if there + * was a timeout, we should unconditionally clear the semaphore + * bits to free the driver to make progress + */ + txgbe_release_eeprom_semaphore(hw); + + usec_delay(50); + /* + * one last try + * If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ + swsm = rd32(hw, TXGBE_MIS_SWSM); + if (!(swsm & TXGBE_MIS_SWSM_SMBI)) + status = 0; + } + + /* Now get the semaphore between SW/FW through the SWESMBI bit */ + if (status == 0) { + for (i = 0; i < timeout; i++) { + if (txgbe_check_mng_access(hw)) { + /* Set the SW EEPROM semaphore bit to request access */ + wr32m(hw, TXGBE_MNG_SW_SM, + TXGBE_MNG_SW_SM_SM, TXGBE_MNG_SW_SM_SM); + + /* + * If we set the bit successfully then we got + * semaphore. + */ + swsm = rd32(hw, TXGBE_MNG_SW_SM); + if (swsm & TXGBE_MNG_SW_SM_SM) + break; + } + usec_delay(50); + } + + /* + * Release semaphores and return error if SW EEPROM semaphore + * was not granted because we don't have access to the EEPROM + */ + if (i >= timeout) { + ERROR_REPORT1(TXGBE_ERROR_POLLING, + "SWESMBI Software EEPROM semaphore not granted.\n"); + txgbe_release_eeprom_semaphore(hw); + status = TXGBE_ERR_EEPROM; + } + } else { + ERROR_REPORT1(TXGBE_ERROR_POLLING, + "Software semaphore SMBI between device drivers " + "not granted.\n"); + } + + return status; +} + +/** + * txgbe_release_eeprom_semaphore - Release hardware semaphore + * @hw: pointer to hardware structure + * + * This function clears hardware semaphore bits. + **/ +STATIC void txgbe_release_eeprom_semaphore(struct txgbe_hw *hw) +{ + if (txgbe_check_mng_access(hw)) { + wr32m(hw, TXGBE_MNG_SW_SM, + TXGBE_MNG_SW_SM_SM, 0); + wr32m(hw, TXGBE_MIS_SWSM, + TXGBE_MIS_SWSM_SMBI, 0); + TXGBE_WRITE_FLUSH(hw); + } +} + +/** + * txgbe_validate_mac_addr - Validate MAC address + * @mac_addr: pointer to MAC address. + * + * Tests a MAC address to ensure it is a valid Individual Address + **/ +s32 txgbe_validate_mac_addr(u8 *mac_addr) +{ + s32 status = 0; + + DEBUGFUNC("\n"); + + /* Make sure it is not a multicast address */ + if (TXGBE_IS_MULTICAST(mac_addr)) { + DEBUGOUT("MAC address is multicast\n"); + status = TXGBE_ERR_INVALID_MAC_ADDR; + /* Not a broadcast address */ + } else if (TXGBE_IS_BROADCAST(mac_addr)) { + DEBUGOUT("MAC address is broadcast\n"); + status = TXGBE_ERR_INVALID_MAC_ADDR; + /* Reject the zero address */ + } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && + mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) { + DEBUGOUT("MAC address is all zeros\n"); + status = TXGBE_ERR_INVALID_MAC_ADDR; + } + return status; +} + +/** + * txgbe_set_rar - Set Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * @addr: Address to put into receive address register + * @vmdq: VMDq "set" or "pool" index + * @enable_addr: set flag that address is active + * + * Puts an ethernet address into a receive address register. + **/ +s32 txgbe_set_rar(struct txgbe_hw *hw, u32 index, u8 *addr, u64 pools, + u32 enable_addr) +{ + u32 rar_low, rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + + DEBUGFUNC("\n"); + + /* Make sure we are using a valid rar index range */ + if (index >= rar_entries) { + ERROR_REPORT2(TXGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", index); + return TXGBE_ERR_INVALID_ARGUMENT; + } + + /* select the MAC address */ + wr32(hw, TXGBE_PSR_MAC_SWC_IDX, index); + + /* setup VMDq pool mapping */ + wr32(hw, TXGBE_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF); + wr32(hw, TXGBE_PSR_MAC_SWC_VM_H, pools >> 32); + + /* + * HW expects these in little endian so we reverse the byte + * order from network order (big endian) to little endian + * + * Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + rar_low = ((u32)addr[5] | + ((u32)addr[4] << 8) | + ((u32)addr[3] << 16) | + ((u32)addr[2] << 24)); + rar_high = ((u32)addr[1] | + ((u32)addr[0] << 8)); + if (enable_addr != 0) + rar_high |= TXGBE_PSR_MAC_SWC_AD_H_AV; + + wr32(hw, TXGBE_PSR_MAC_SWC_AD_L, rar_low); + wr32m(hw, TXGBE_PSR_MAC_SWC_AD_H, + (TXGBE_PSR_MAC_SWC_AD_H_AD(~0) | + TXGBE_PSR_MAC_SWC_AD_H_ADTYPE(~0) | + TXGBE_PSR_MAC_SWC_AD_H_AV), + rar_high); + + return 0; +} + +/** + * txgbe_clear_rar - Remove Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * + * Clears an ethernet address from a receive address register. + **/ +s32 txgbe_clear_rar(struct txgbe_hw *hw, u32 index) +{ + u32 rar_entries = hw->mac.num_rar_entries; + + DEBUGFUNC("\n"); + + /* Make sure we are using a valid rar index range */ + if (index >= rar_entries) { + ERROR_REPORT2(TXGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", index); + return TXGBE_ERR_INVALID_ARGUMENT; + } + + /* + * Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + wr32(hw, TXGBE_PSR_MAC_SWC_IDX, index); + + wr32(hw, TXGBE_PSR_MAC_SWC_VM_L, 0); + wr32(hw, TXGBE_PSR_MAC_SWC_VM_H, 0); + + wr32(hw, TXGBE_PSR_MAC_SWC_AD_L, 0); + wr32m(hw, TXGBE_PSR_MAC_SWC_AD_H, + (TXGBE_PSR_MAC_SWC_AD_H_AD(~0) | + TXGBE_PSR_MAC_SWC_AD_H_ADTYPE(~0) | + TXGBE_PSR_MAC_SWC_AD_H_AV), + 0); + + return 0; +} + +/** + * txgbe_init_rx_addrs - Initializes receive address filters. + * @hw: pointer to hardware structure + * + * Places the MAC address in receive address register 0 and clears the rest + * of the receive address registers. Clears the multicast table. Assumes + * the receiver is in reset when the routine is called. + **/ +s32 txgbe_init_rx_addrs(struct txgbe_hw *hw) +{ + u32 i; + u32 rar_entries = hw->mac.num_rar_entries; + u32 psrctl; + + DEBUGFUNC("\n"); + + /* + * If the current mac address is valid, assume it is a software override + * to the permanent address. + * Otherwise, use the permanent address from the eeprom. + */ + if (txgbe_validate_mac_addr(hw->mac.addr) == + TXGBE_ERR_INVALID_MAC_ADDR) { + /* Get the MAC address from the RAR0 for later reference */ + TCALL(hw, mac.ops.get_mac_addr, hw->mac.addr); + + DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n", + hw->mac.addr[0], hw->mac.addr[1], + hw->mac.addr[2], hw->mac.addr[3], + hw->mac.addr[4], hw->mac.addr[5]); + } else { + /* Setup the receive address. */ + DEBUGOUT("Overriding MAC Address in RAR[0]\n"); + DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n", + hw->mac.addr[0], hw->mac.addr[1], + hw->mac.addr[2], hw->mac.addr[3], + hw->mac.addr[4], hw->mac.addr[5]); + + TCALL(hw, mac.ops.set_rar, 0, hw->mac.addr, 0, + TXGBE_PSR_MAC_SWC_AD_H_AV); + + /* clear VMDq pool/queue selection for RAR 0 */ + TCALL(hw, mac.ops.clear_vmdq, 0, TXGBE_CLEAR_VMDQ_ALL); + } + hw->addr_ctrl.overflow_promisc = 0; + + hw->addr_ctrl.rar_used_count = 1; + + /* Zero out the other receive addresses. */ + DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1); + for (i = 1; i < rar_entries; i++) { + wr32(hw, TXGBE_PSR_MAC_SWC_IDX, i); + wr32(hw, TXGBE_PSR_MAC_SWC_AD_L, 0); + wr32(hw, TXGBE_PSR_MAC_SWC_AD_H, 0); + } + + /* Clear the MTA */ + hw->addr_ctrl.mta_in_use = 0; + psrctl = rd32(hw, TXGBE_PSR_CTL); + psrctl &= ~(TXGBE_PSR_CTL_MO | TXGBE_PSR_CTL_MFE); + psrctl |= hw->mac.mc_filter_type << TXGBE_PSR_CTL_MO_SHIFT; + wr32(hw, TXGBE_PSR_CTL, psrctl); + DEBUGOUT(" Clearing MTA\n"); + for (i = 0; i < hw->mac.mcft_size; i++) + wr32(hw, TXGBE_PSR_MC_TBL(i), 0); + + TCALL(hw, mac.ops.init_uta_tables); + + return 0; +} + +/** + * txgbe_add_uc_addr - Adds a secondary unicast address. + * @hw: pointer to hardware structure + * @addr: new address + * + * Adds it to unused receive address register or goes into promiscuous mode. + **/ +void txgbe_add_uc_addr(struct txgbe_hw *hw, u8 *addr, u32 vmdq) +{ + u32 rar_entries = hw->mac.num_rar_entries; + u32 rar; + + DEBUGFUNC("\n"); + + DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + + /* + * Place this address in the RAR if there is room, + * else put the controller into promiscuous mode + */ + if (hw->addr_ctrl.rar_used_count < rar_entries) { + rar = hw->addr_ctrl.rar_used_count; + TCALL(hw, mac.ops.set_rar, rar, addr, vmdq, + TXGBE_PSR_MAC_SWC_AD_H_AV); + DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar); + hw->addr_ctrl.rar_used_count++; + } else { + hw->addr_ctrl.overflow_promisc++; + } + + DEBUGOUT("txgbe_add_uc_addr Complete\n"); +} + +/** + * txgbe_update_uc_addr_list - Updates MAC list of secondary addresses + * @hw: pointer to hardware structure + * @addr_list: the list of new addresses + * @addr_count: number of addresses + * @next: iterator function to walk the address list + * + * The given list replaces any existing list. Clears the secondary addrs from + * receive address registers. Uses unused receive address registers for the + * first secondary addresses, and falls back to promiscuous mode as needed. + * + * Drivers using secondary unicast addresses must set user_set_promisc when + * manually putting the device into promiscuous mode. + **/ +s32 txgbe_update_uc_addr_list(struct txgbe_hw *hw, u8 *addr_list, + u32 addr_count, txgbe_mc_addr_itr next) +{ + u8 *addr; + u32 i; + u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc; + u32 uc_addr_in_use; + u32 vmdq; + + DEBUGFUNC("\n"); + + /* + * Clear accounting of old secondary address list, + * don't count RAR[0] + */ + uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1; + hw->addr_ctrl.rar_used_count -= uc_addr_in_use; + hw->addr_ctrl.overflow_promisc = 0; + + /* Zero out the other receive addresses */ + DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1); + for (i = 0; i < uc_addr_in_use; i++) { + wr32(hw, TXGBE_PSR_MAC_SWC_IDX, 1+i); + wr32(hw, TXGBE_PSR_MAC_SWC_AD_L, 0); + wr32(hw, TXGBE_PSR_MAC_SWC_AD_H, 0); + } + + /* Add the new addresses */ + for (i = 0; i < addr_count; i++) { + DEBUGOUT(" Adding the secondary addresses:\n"); + addr = next(hw, &addr_list, &vmdq); + txgbe_add_uc_addr(hw, addr, vmdq); + } + + if (hw->addr_ctrl.overflow_promisc) { + /* enable promisc if not already in overflow or set by user */ + if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { + DEBUGOUT(" Entering address overflow promisc mode\n"); + wr32m(hw, TXGBE_PSR_CTL, + TXGBE_PSR_CTL_UPE, TXGBE_PSR_CTL_UPE); + } + } else { + /* only disable if set by overflow, not by user */ + if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { + DEBUGOUT(" Leaving address overflow promisc mode\n"); + wr32m(hw, TXGBE_PSR_CTL, + TXGBE_PSR_CTL_UPE, 0); + } + } + + DEBUGOUT("txgbe_update_uc_addr_list Complete\n"); + return 0; +} + +/** + * txgbe_mta_vector - Determines bit-vector in multicast table to set + * @hw: pointer to hardware structure + * @mc_addr: the multicast address + * + * Extracts the 12 bits, from a multicast address, to determine which + * bit-vector to set in the multicast table. The hardware uses 12 bits, from + * incoming rx multicast addresses, to determine the bit-vector to check in + * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set + * by the MO field of the MCSTCTRL. The MO field is set during initialization + * to mc_filter_type. + **/ +STATIC s32 txgbe_mta_vector(struct txgbe_hw *hw, u8 *mc_addr) +{ + u32 vector = 0; + + DEBUGFUNC("\n"); + + switch (hw->mac.mc_filter_type) { + case 0: /* use bits [47:36] of the address */ + vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); + break; + case 1: /* use bits [46:35] of the address */ + vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); + break; + case 2: /* use bits [45:34] of the address */ + vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); + break; + case 3: /* use bits [43:32] of the address */ + vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); + break; + default: /* Invalid mc_filter_type */ + DEBUGOUT("MC filter type param set incorrectly\n"); + ASSERT(0); + break; + } + + /* vector can only be 12-bits or boundary will be exceeded */ + vector &= 0xFFF; + return vector; +} + +/** + * txgbe_set_mta - Set bit-vector in multicast table + * @hw: pointer to hardware structure + * @hash_value: Multicast address hash value + * + * Sets the bit-vector in the multicast table. + **/ +void txgbe_set_mta(struct txgbe_hw *hw, u8 *mc_addr) +{ + u32 vector; + u32 vector_bit; + u32 vector_reg; + + DEBUGFUNC("\n"); + + hw->addr_ctrl.mta_in_use++; + + vector = txgbe_mta_vector(hw, mc_addr); + DEBUGOUT1(" bit-vector = 0x%03X\n", vector); + + /* + * The MTA is a register array of 128 32-bit registers. It is treated + * like an array of 4096 bits. We want to set bit + * BitArray[vector_value]. So we figure out what register the bit is + * in, read it, OR in the new bit, then write back the new value. The + * register is determined by the upper 7 bits of the vector value and + * the bit within that register are determined by the lower 5 bits of + * the value. + */ + vector_reg = (vector >> 5) & 0x7F; + vector_bit = vector & 0x1F; + hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); +} + +/** + * txgbe_update_mc_addr_list - Updates MAC list of multicast addresses + * @hw: pointer to hardware structure + * @mc_addr_list: the list of new multicast addresses + * @mc_addr_count: number of addresses + * @next: iterator function to walk the multicast address list + * @clear: flag, when set clears the table beforehand + * + * When the clear flag is set, the given list replaces any existing list. + * Hashes the given addresses into the multicast table. + **/ +s32 txgbe_update_mc_addr_list(struct txgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, txgbe_mc_addr_itr next, + bool clear) +{ + u32 i; + u32 vmdq; + u32 psrctl; + + DEBUGFUNC("\n"); + + /* + * Set the new number of MC addresses that we are being requested to + * use. + */ + hw->addr_ctrl.num_mc_addrs = mc_addr_count; + hw->addr_ctrl.mta_in_use = 0; + + /* Clear mta_shadow */ + if (clear) { + DEBUGOUT(" Clearing MTA\n"); + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + } + + /* Update mta_shadow */ + for (i = 0; i < mc_addr_count; i++) { + DEBUGOUT(" Adding the multicast addresses:\n"); + txgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq)); + } + + /* Enable mta */ + for (i = 0; i < hw->mac.mcft_size; i++) + wr32a(hw, TXGBE_PSR_MC_TBL(0), i, + hw->mac.mta_shadow[i]); + + if (hw->addr_ctrl.mta_in_use > 0) { + psrctl = rd32(hw, TXGBE_PSR_CTL); + psrctl &= ~(TXGBE_PSR_CTL_MO | TXGBE_PSR_CTL_MFE); + psrctl |= TXGBE_PSR_CTL_MFE | + (hw->mac.mc_filter_type << TXGBE_PSR_CTL_MO_SHIFT); + wr32(hw, TXGBE_PSR_CTL, psrctl); + } + + DEBUGOUT("txgbe_update_mc_addr_list Complete\n"); + return 0; +} + +/** + * txgbe_enable_mc - Enable multicast address in RAR + * @hw: pointer to hardware structure + * + * Enables multicast address in RAR and the use of the multicast hash table. + **/ +s32 txgbe_enable_mc(struct txgbe_hw *hw) +{ + struct txgbe_addr_filter_info *a = &hw->addr_ctrl; + u32 psrctl; + + DEBUGFUNC("\n"); + + if (a->mta_in_use > 0) { + psrctl = rd32(hw, TXGBE_PSR_CTL); + psrctl &= ~(TXGBE_PSR_CTL_MO | TXGBE_PSR_CTL_MFE); + psrctl |= TXGBE_PSR_CTL_MFE | + (hw->mac.mc_filter_type << TXGBE_PSR_CTL_MO_SHIFT); + wr32(hw, TXGBE_PSR_CTL, psrctl); + } + + return 0; +} + +/** + * txgbe_disable_mc - Disable multicast address in RAR + * @hw: pointer to hardware structure + * + * Disables multicast address in RAR and the use of the multicast hash table. + **/ +s32 txgbe_disable_mc(struct txgbe_hw *hw) +{ + struct txgbe_addr_filter_info *a = &hw->addr_ctrl; + u32 psrctl; + DEBUGFUNC("\n"); + + if (a->mta_in_use > 0) { + psrctl = rd32(hw, TXGBE_PSR_CTL); + psrctl &= ~(TXGBE_PSR_CTL_MO | TXGBE_PSR_CTL_MFE); + psrctl |= hw->mac.mc_filter_type << TXGBE_PSR_CTL_MO_SHIFT; + wr32(hw, TXGBE_PSR_CTL, psrctl); + } + + return 0; +} + +/** + * txgbe_fc_enable - Enable flow control + * @hw: pointer to hardware structure + * + * Enable flow control according to the current settings. + **/ +s32 txgbe_fc_enable(struct txgbe_hw *hw) +{ + s32 ret_val = 0; + u32 mflcn_reg, fccfg_reg; + u32 reg; + u32 fcrtl, fcrth; + int i; + + DEBUGFUNC("\n"); + + /* Validate the water mark configuration */ + if (!hw->fc.pause_time) { + ret_val = TXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + /* Low water mark of zero causes XOFF floods */ + for (i = 0; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & txgbe_fc_tx_pause) && + hw->fc.high_water[i]) { + if (!hw->fc.low_water[i] || + hw->fc.low_water[i] >= hw->fc.high_water[i]) { + DEBUGOUT("Invalid water mark configuration\n"); + ret_val = TXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + } + } + + /* Negotiate the fc mode to use */ + txgbe_fc_autoneg(hw); + + /* Disable any previous flow control settings */ + mflcn_reg = rd32(hw, TXGBE_MAC_RX_FLOW_CTRL); + mflcn_reg &= ~(TXGBE_MAC_RX_FLOW_CTRL_PFCE | + TXGBE_MAC_RX_FLOW_CTRL_RFE); + + fccfg_reg = rd32(hw, TXGBE_RDB_RFCC); + fccfg_reg &= ~(TXGBE_RDB_RFCC_RFCE_802_3X | + TXGBE_RDB_RFCC_RFCE_PRIORITY); + + /* + * The possible values of fc.current_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.current_mode) { + case txgbe_fc_none: + /* + * Flow control is disabled by software override or autoneg. + * The code below will actually disable it in the HW. + */ + break; + case txgbe_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + mflcn_reg |= TXGBE_MAC_RX_FLOW_CTRL_RFE; + break; + case txgbe_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + fccfg_reg |= TXGBE_RDB_RFCC_RFCE_802_3X; + break; + case txgbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + mflcn_reg |= TXGBE_MAC_RX_FLOW_CTRL_RFE; + fccfg_reg |= TXGBE_RDB_RFCC_RFCE_802_3X; + break; + default: + ERROR_REPORT1(TXGBE_ERROR_ARGUMENT, + "Flow control param set incorrectly\n"); + ret_val = TXGBE_ERR_CONFIG; + goto out; + break; + } + + /* Set 802.3x based flow control settings. */ + wr32(hw, TXGBE_MAC_RX_FLOW_CTRL, mflcn_reg); + wr32(hw, TXGBE_RDB_RFCC, fccfg_reg); + + /* Set up and enable Rx high/low water mark thresholds, enable XON. */ + for (i = 0; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & txgbe_fc_tx_pause) && + hw->fc.high_water[i]) { + fcrtl = (hw->fc.low_water[i] << 10) | + TXGBE_RDB_RFCL_XONE; + wr32(hw, TXGBE_RDB_RFCL(i), fcrtl); + fcrth = (hw->fc.high_water[i] << 10) | + TXGBE_RDB_RFCH_XOFFE; + } else { + wr32(hw, TXGBE_RDB_RFCL(i), 0); + /* + * In order to prevent Tx hangs when the internal Tx + * switch is enabled we must set the high water mark + * to the Rx packet buffer size - 24KB. This allows + * the Tx switch to function even under heavy Rx + * workloads. + */ + fcrth = rd32(hw, TXGBE_RDB_PB_SZ(i)) - 24576; + } + + wr32(hw, TXGBE_RDB_RFCH(i), fcrth); + } + + /* Configure pause time (2 TCs per register) */ + reg = hw->fc.pause_time * 0x00010001; + for (i = 0; i < (TXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) + wr32(hw, TXGBE_RDB_RFCV(i), reg); + + /* Configure flow control refresh threshold value */ + wr32(hw, TXGBE_RDB_RFCRT, hw->fc.pause_time / 2); + +out: + return ret_val; +} + +/** + * txgbe_negotiate_fc - Negotiate flow control + * @hw: pointer to hardware structure + * @adv_reg: flow control advertised settings + * @lp_reg: link partner's flow control settings + * @adv_sym: symmetric pause bit in advertisement + * @adv_asm: asymmetric pause bit in advertisement + * @lp_sym: symmetric pause bit in link partner advertisement + * @lp_asm: asymmetric pause bit in link partner advertisement + * + * Find the intersection between advertised settings and link partner's + * advertised settings + **/ +STATIC s32 txgbe_negotiate_fc(struct txgbe_hw *hw, u32 adv_reg, u32 lp_reg, + u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) +{ + if ((!(adv_reg)) || (!(lp_reg))) { + ERROR_REPORT3(TXGBE_ERROR_UNSUPPORTED, + "Local or link partner's advertised flow control " + "settings are NULL. Local: %x, link partner: %x\n", + adv_reg, lp_reg); + return TXGBE_ERR_FC_NOT_NEGOTIATED; + } + + if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { + /* + * Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == txgbe_fc_full) { + hw->fc.current_mode = txgbe_fc_full; + DEBUGOUT("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = txgbe_fc_rx_pause; + DEBUGOUT("Flow Control=RX PAUSE frames only\n"); + } + } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && + (lp_reg & lp_sym) && (lp_reg & lp_asm)) { + hw->fc.current_mode = txgbe_fc_tx_pause; + DEBUGOUT("Flow Control = TX PAUSE frames only.\n"); + } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && + !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { + hw->fc.current_mode = txgbe_fc_rx_pause; + DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); + } else { + hw->fc.current_mode = txgbe_fc_none; + DEBUGOUT("Flow Control = NONE.\n"); + } + return 0; +} + +/** + * txgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber + * @hw: pointer to hardware structure + * + * Enable flow control according on 1 gig fiber. + **/ +STATIC s32 txgbe_fc_autoneg_fiber(struct txgbe_hw *hw) +{ + u32 pcs_anadv_reg, pcs_lpab_reg; + s32 ret_val = TXGBE_ERR_FC_NOT_NEGOTIATED; + + pcs_anadv_reg = txgbe_rd32_epcs(hw, TXGBE_SR_MII_MMD_AN_ADV); + pcs_lpab_reg = txgbe_rd32_epcs(hw, TXGBE_SR_MII_MMD_LP_BABL); + + ret_val = txgbe_negotiate_fc(hw, pcs_anadv_reg, + pcs_lpab_reg, + TXGBE_SR_MII_MMD_AN_ADV_PAUSE_SYM, + TXGBE_SR_MII_MMD_AN_ADV_PAUSE_ASM, + TXGBE_SR_MII_MMD_AN_ADV_PAUSE_SYM, + TXGBE_SR_MII_MMD_AN_ADV_PAUSE_ASM); + + return ret_val; +} + +/** + * txgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure + * + * Enable flow control according to IEEE clause 37. + **/ +STATIC s32 txgbe_fc_autoneg_backplane(struct txgbe_hw *hw) +{ + u32 anlp1_reg, autoc_reg; + s32 ret_val = TXGBE_ERR_FC_NOT_NEGOTIATED; + + /* + * Read the 10g AN autoc and LP ability registers and resolve + * local flow control settings accordingly + */ + autoc_reg = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_ADV_REG1); + anlp1_reg = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_LP_ABL1); + + ret_val = txgbe_negotiate_fc(hw, autoc_reg, + anlp1_reg, TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_SYM, + TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM, + TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_SYM, + TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM); + + return ret_val; +} + +/** + * txgbe_fc_autoneg_copper - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure + * + * Enable flow control according to IEEE clause 37. + **/ +STATIC s32 txgbe_fc_autoneg_copper(struct txgbe_hw *hw) +{ + u8 technology_ability_reg = 0; + u8 lp_technology_ability_reg = 0; + + txgbe_get_phy_advertised_pause(hw, &technology_ability_reg); + txgbe_get_lp_advertised_pause(hw, &lp_technology_ability_reg); + + return txgbe_negotiate_fc(hw, (u32)technology_ability_reg, + (u32)lp_technology_ability_reg, + TXGBE_TAF_SYM_PAUSE, TXGBE_TAF_ASM_PAUSE, + TXGBE_TAF_SYM_PAUSE, TXGBE_TAF_ASM_PAUSE); +} + +/** + * txgbe_fc_autoneg - Configure flow control + * @hw: pointer to hardware structure + * + * Compares our advertised flow control capabilities to those advertised by + * our link partner, and determines the proper flow control mode to use. + **/ +void txgbe_fc_autoneg(struct txgbe_hw *hw) +{ + s32 ret_val = TXGBE_ERR_FC_NOT_NEGOTIATED; + u32 speed; + bool link_up; + + DEBUGFUNC("\n"); + + /* + * AN should have completed when the cable was plugged in. + * Look for reasons to bail out. Bail out if: + * - FC autoneg is disabled, or if + * - link is not up. + */ + if (hw->fc.disable_fc_autoneg) { + ERROR_REPORT1(TXGBE_ERROR_UNSUPPORTED, + "Flow control autoneg is disabled"); + goto out; + } + + TCALL(hw, mac.ops.check_link, &speed, &link_up, false); + if (!link_up) { + ERROR_REPORT1(TXGBE_ERROR_SOFTWARE, "The link is down"); + goto out; + } + + switch (hw->phy.media_type) { + /* Autoneg flow control on fiber adapters */ + case txgbe_media_type_fiber: + if (speed == TXGBE_LINK_SPEED_1GB_FULL) + ret_val = txgbe_fc_autoneg_fiber(hw); + break; + + /* Autoneg flow control on backplane adapters */ + case txgbe_media_type_backplane: + ret_val = txgbe_fc_autoneg_backplane(hw); + break; + + /* Autoneg flow control on copper adapters */ + case txgbe_media_type_copper: + if (txgbe_device_supports_autoneg_fc(hw)) + ret_val = txgbe_fc_autoneg_copper(hw); + break; + + default: + break; + } + +out: + if (ret_val == 0) { + hw->fc.fc_was_autonegged = true; + } else { + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; + } +} + +/** + * txgbe_disable_pcie_master - Disable PCI-express master access + * @hw: pointer to hardware structure + * + * Disables PCI-Express master access and verifies there are no pending + * requests. TXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable + * bit hasn't caused the master requests to be disabled, else 0 + * is returned signifying master requests disabled. + **/ +s32 txgbe_disable_pcie_master(struct txgbe_hw *hw) +{ + s32 status = 0; + u32 i; + struct txgbe_adapter *adapter = hw->back; + unsigned int num_vfs = adapter->num_vfs; + u16 dev_ctl; + u32 vf_bme_clear = 0; + + DEBUGFUNC("\n"); + + /* Always set this bit to ensure any future transactions are blocked */ + pci_clear_master(((struct txgbe_adapter *)hw->back)->pdev); + + /* Exit if master requests are blocked */ + if (!(rd32(hw, TXGBE_PX_TRANSACTION_PENDING)) || + TXGBE_REMOVED(hw->hw_addr)) + goto out; + + /* BME disable handshake will not be finished if any VF BME is 0 */ + for (i = 0; i < num_vfs; i++) { + struct pci_dev *vfdev = adapter->vfinfo[i].vfdev; + if (!vfdev) + continue; + pci_read_config_word(vfdev, 0x4, &dev_ctl); + if ((dev_ctl & 0x4) == 0) { + vf_bme_clear = 1; + break; + } + } + + /* Poll for master request bit to clear */ + for (i = 0; i < TXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { + usec_delay(100); + if (!(rd32(hw, TXGBE_PX_TRANSACTION_PENDING))) + goto out; + } + + if (!vf_bme_clear) { + ERROR_REPORT1(TXGBE_ERROR_POLLING, + "PCIe transaction pending bit did not clear.\n"); + status = TXGBE_ERR_MASTER_REQUESTS_PENDING; + } + +out: + return status; +} + + +/** + * txgbe_acquire_swfw_sync - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore through the GSSR register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +s32 txgbe_acquire_swfw_sync(struct txgbe_hw *hw, u32 mask) +{ + u32 gssr = 0; + u32 swmask = mask; + u32 fwmask = mask << 16; + u32 timeout = 200; + u32 i; + + for (i = 0; i < timeout; i++) { + /* + * SW NVM semaphore bit is used for access to all + * SW_FW_SYNC bits (not just NVM) + */ + if (txgbe_get_eeprom_semaphore(hw)) + return TXGBE_ERR_SWFW_SYNC; + + if (txgbe_check_mng_access(hw)) { + gssr = rd32(hw, TXGBE_MNG_SWFW_SYNC); + if (!(gssr & (fwmask | swmask))) { + gssr |= swmask; + wr32(hw, TXGBE_MNG_SWFW_SYNC, gssr); + txgbe_release_eeprom_semaphore(hw); + return 0; + } else { + /* Resource is currently in use by FW or SW */ + txgbe_release_eeprom_semaphore(hw); + msec_delay(5); + } + } + } + + /* If time expired clear the bits holding the lock and retry */ + if (gssr & (fwmask | swmask)) + txgbe_release_swfw_sync(hw, gssr & (fwmask | swmask)); + + msec_delay(5); + return TXGBE_ERR_SWFW_SYNC; +} + +/** + * txgbe_release_swfw_sync - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SWFW semaphore through the GSSR register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +void txgbe_release_swfw_sync(struct txgbe_hw *hw, u32 mask) +{ + txgbe_get_eeprom_semaphore(hw); + if (txgbe_check_mng_access(hw)) + wr32m(hw, TXGBE_MNG_SWFW_SYNC, mask, 0); + + txgbe_release_eeprom_semaphore(hw); +} + +/** + * txgbe_disable_sec_rx_path - Stops the receive data path + * @hw: pointer to hardware structure + * + * Stops the receive data path and waits for the HW to internally empty + * the Rx security block + **/ +s32 txgbe_disable_sec_rx_path(struct txgbe_hw *hw) +{ +#define TXGBE_MAX_SECRX_POLL 40 + + int i; + int secrxreg; + + DEBUGFUNC("\n"); + + wr32m(hw, TXGBE_RSC_CTL, + TXGBE_RSC_CTL_RX_DIS, TXGBE_RSC_CTL_RX_DIS); + for (i = 0; i < TXGBE_MAX_SECRX_POLL; i++) { + secrxreg = rd32(hw, TXGBE_RSC_ST); + if (secrxreg & TXGBE_RSC_ST_RSEC_RDY) + break; + else + /* Use interrupt-safe sleep just in case */ + usec_delay(1000); + } + + /* For informational purposes only */ + if (i >= TXGBE_MAX_SECRX_POLL) + DEBUGOUT("Rx unit being enabled before security " + "path fully disabled. Continuing with init.\n"); + + return 0; +} + +/** + * txgbe_enable_sec_rx_path - Enables the receive data path + * @hw: pointer to hardware structure + * + * Enables the receive data path. + **/ +s32 txgbe_enable_sec_rx_path(struct txgbe_hw *hw) +{ + DEBUGFUNC("\n"); + + wr32m(hw, TXGBE_RSC_CTL, + TXGBE_RSC_CTL_RX_DIS, 0); + TXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/** + * txgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM + * @hw: pointer to hardware structure + * @san_mac_offset: SAN MAC address offset + * + * This function will read the EEPROM location for the SAN MAC address + * pointer, and returns the value at that location. This is used in both + * get and set mac_addr routines. + **/ +STATIC s32 txgbe_get_san_mac_addr_offset(struct txgbe_hw *hw, + u16 *san_mac_offset) +{ + s32 ret_val; + + DEBUGFUNC("\n"); + + /* + * First read the EEPROM pointer to see if the MAC addresses are + * available. + */ + ret_val = TCALL(hw, eeprom.ops.read, + hw->eeprom.sw_region_offset + TXGBE_SAN_MAC_ADDR_PTR, + san_mac_offset); + if (ret_val) { + ERROR_REPORT2(TXGBE_ERROR_INVALID_STATE, + "eeprom at offset %d failed", + TXGBE_SAN_MAC_ADDR_PTR); + } + + return ret_val; +} + +/** + * txgbe_get_san_mac_addr - SAN MAC address retrieval from the EEPROM + * @hw: pointer to hardware structure + * @san_mac_addr: SAN MAC address + * + * Reads the SAN MAC address from the EEPROM, if it's available. This is + * per-port, so set_lan_id() must be called before reading the addresses. + * set_lan_id() is called by identify_sfp(), but this cannot be relied + * upon for non-SFP connections, so we must call it here. + **/ +s32 txgbe_get_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr) +{ + u16 san_mac_data, san_mac_offset; + u8 i; + s32 ret_val; + + DEBUGFUNC("\n"); + + /* + * First read the EEPROM pointer to see if the MAC addresses are + * available. If they're not, no point in calling set_lan_id() here. + */ + ret_val = txgbe_get_san_mac_addr_offset(hw, &san_mac_offset); + if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) + goto san_mac_addr_out; + + /* apply the port offset to the address offset */ + (hw->bus.func) ? (san_mac_offset += TXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : + (san_mac_offset += TXGBE_SAN_MAC_ADDR_PORT0_OFFSET); + for (i = 0; i < 3; i++) { + ret_val = TCALL(hw, eeprom.ops.read, san_mac_offset, + &san_mac_data); + if (ret_val) { + ERROR_REPORT2(TXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", + san_mac_offset); + goto san_mac_addr_out; + } + san_mac_addr[i * 2] = (u8)(san_mac_data); + san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); + san_mac_offset++; + } + return 0; + +san_mac_addr_out: + /* + * No addresses available in this EEPROM. It's not an + * error though, so just wipe the local address and return. + */ + for (i = 0; i < 6; i++) + san_mac_addr[i] = 0xFF; + return 0; +} + +/** + * txgbe_set_san_mac_addr - Write the SAN MAC address to the EEPROM + * @hw: pointer to hardware structure + * @san_mac_addr: SAN MAC address + * + * Write a SAN MAC address to the EEPROM. + **/ +s32 txgbe_set_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr) +{ + s32 ret_val; + u16 san_mac_data, san_mac_offset; + u8 i; + + DEBUGFUNC("\n"); + + /* Look for SAN mac address pointer. If not defined, return */ + ret_val = txgbe_get_san_mac_addr_offset(hw, &san_mac_offset); + if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) + return TXGBE_ERR_NO_SAN_ADDR_PTR; + + /* Apply the port offset to the address offset */ + (hw->bus.func) ? (san_mac_offset += TXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : + (san_mac_offset += TXGBE_SAN_MAC_ADDR_PORT0_OFFSET); + + for (i = 0; i < 3; i++) { + san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8); + san_mac_data |= (u16)(san_mac_addr[i * 2]); + TCALL(hw, eeprom.ops.write, san_mac_offset, san_mac_data); + san_mac_offset++; + } + + return 0; +} + +/** + * txgbe_insert_mac_addr - Find a RAR for this mac address + * @hw: pointer to hardware structure + * @addr: Address to put into receive address register + * @vmdq: VMDq pool to assign + * + * Puts an ethernet address into a receive address register, or + * finds the rar that it is aleady in; adds to the pool list + **/ +s32 txgbe_insert_mac_addr(struct txgbe_hw *hw, u8 *addr, u32 vmdq) +{ + static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF; + u32 first_empty_rar = NO_EMPTY_RAR_FOUND; + u32 rar; + u32 rar_low, rar_high; + u32 addr_low, addr_high; + + DEBUGFUNC("\n"); + + /* swap bytes for HW little endian */ + addr_low = addr[5] | (addr[4] << 8) + | (addr[3] << 16) + | (addr[2] << 24); + addr_high = addr[1] | (addr[0] << 8); + + /* + * Either find the mac_id in rar or find the first empty space. + * rar_highwater points to just after the highest currently used + * rar in order to shorten the search. It grows when we add a new + * rar to the top. + */ + for (rar = 0; rar < hw->mac.rar_highwater; rar++) { + wr32(hw, TXGBE_PSR_MAC_SWC_IDX, rar); + rar_high = rd32(hw, TXGBE_PSR_MAC_SWC_AD_H); + + if (((TXGBE_PSR_MAC_SWC_AD_H_AV & rar_high) == 0) + && first_empty_rar == NO_EMPTY_RAR_FOUND) { + first_empty_rar = rar; + } else if ((rar_high & 0xFFFF) == addr_high) { + rar_low = rd32(hw, TXGBE_PSR_MAC_SWC_AD_L); + if (rar_low == addr_low) + break; /* found it already in the rars */ + } + } + + if (rar < hw->mac.rar_highwater) { + /* already there so just add to the pool bits */ + TCALL(hw, mac.ops.set_vmdq, rar, vmdq); + } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) { + /* stick it into first empty RAR slot we found */ + rar = first_empty_rar; + TCALL(hw, mac.ops.set_rar, rar, addr, vmdq, + TXGBE_PSR_MAC_SWC_AD_H_AV); + } else if (rar == hw->mac.rar_highwater) { + /* add it to the top of the list and inc the highwater mark */ + TCALL(hw, mac.ops.set_rar, rar, addr, vmdq, + TXGBE_PSR_MAC_SWC_AD_H_AV); + hw->mac.rar_highwater++; + } else if (rar >= hw->mac.num_rar_entries) { + return TXGBE_ERR_INVALID_MAC_ADDR; + } + + /* + * If we found rar[0], make sure the default pool bit (we use pool 0) + * remains cleared to be sure default pool packets will get delivered + */ + if (rar == 0) + TCALL(hw, mac.ops.clear_vmdq, rar, 0); + + return rar; +} + +/** + * txgbe_clear_vmdq - Disassociate a VMDq pool index from a rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to disassociate + * @vmdq: VMDq pool index to remove from the rar + **/ +s32 txgbe_clear_vmdq(struct txgbe_hw *hw, u32 rar, u32 vmdq) +{ + u32 mpsar_lo, mpsar_hi; + u32 rar_entries = hw->mac.num_rar_entries; + + DEBUGFUNC("\n"); + UNREFERENCED_PARAMETER(vmdq); + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + ERROR_REPORT2(TXGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", rar); + return TXGBE_ERR_INVALID_ARGUMENT; + } + + wr32(hw, TXGBE_PSR_MAC_SWC_IDX, rar); + mpsar_lo = rd32(hw, TXGBE_PSR_MAC_SWC_VM_L); + mpsar_hi = rd32(hw, TXGBE_PSR_MAC_SWC_VM_H); + + if (TXGBE_REMOVED(hw->hw_addr)) + goto done; + + if (!mpsar_lo && !mpsar_hi) + goto done; + + /* was that the last pool using this rar? */ + if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) + TCALL(hw, mac.ops.clear_rar, rar); +done: + return 0; +} + +/** + * txgbe_set_vmdq - Associate a VMDq pool index with a rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to associate with a VMDq index + * @vmdq: VMDq pool index + **/ +s32 txgbe_set_vmdq(struct txgbe_hw *hw, u32 rar, u32 pool) +{ + u32 rar_entries = hw->mac.num_rar_entries; + + DEBUGFUNC("\n"); + UNREFERENCED_PARAMETER(pool); + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + ERROR_REPORT2(TXGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", rar); + return TXGBE_ERR_INVALID_ARGUMENT; + } + + return 0; +} + +/** + * This function should only be involved in the IOV mode. + * In IOV mode, Default pool is next pool after the number of + * VFs advertized and not 0. + * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index] + * + * txgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address + * @hw: pointer to hardware struct + * @vmdq: VMDq pool index + **/ +s32 txgbe_set_vmdq_san_mac(struct txgbe_hw *hw, u32 vmdq) +{ + u32 rar = hw->mac.san_mac_rar_index; + + DEBUGFUNC("\n"); + + wr32(hw, TXGBE_PSR_MAC_SWC_IDX, rar); + if (vmdq < 32) { + wr32(hw, TXGBE_PSR_MAC_SWC_VM_L, 1 << vmdq); + wr32(hw, TXGBE_PSR_MAC_SWC_VM_H, 0); + } else { + wr32(hw, TXGBE_PSR_MAC_SWC_VM_L, 0); + wr32(hw, TXGBE_PSR_MAC_SWC_VM_H, 1 << (vmdq - 32)); + } + + return 0; +} + +/** + * txgbe_init_uta_tables - Initialize the Unicast Table Array + * @hw: pointer to hardware structure + **/ +s32 txgbe_init_uta_tables(struct txgbe_hw *hw) +{ + int i; + + DEBUGFUNC("\n"); + DEBUGOUT(" Clearing UTA\n"); + + for (i = 0; i < 128; i++) + wr32(hw, TXGBE_PSR_UC_TBL(i), 0); + + return 0; +} + +/** + * txgbe_find_vlvf_slot - find the vlanid or the first empty slot + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * + * return the VLVF index where this VLAN id should be placed + * + **/ +s32 txgbe_find_vlvf_slot(struct txgbe_hw *hw, u32 vlan) +{ + u32 bits = 0; + u32 first_empty_slot = 0; + s32 regindex; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* + * Search for the vlan id in the VLVF entries. Save off the first empty + * slot found along the way + */ + for (regindex = 1; regindex < TXGBE_PSR_VLAN_SWC_ENTRIES; regindex++) { + wr32(hw, TXGBE_PSR_VLAN_SWC_IDX, regindex); + bits = rd32(hw, TXGBE_PSR_VLAN_SWC); + if (!bits && !(first_empty_slot)) + first_empty_slot = regindex; + else if ((bits & 0x0FFF) == vlan) + break; + } + + /* + * If regindex is less than TXGBE_VLVF_ENTRIES, then we found the vlan + * in the VLVF. Else use the first empty VLVF register for this + * vlan id. + */ + if (regindex >= TXGBE_PSR_VLAN_SWC_ENTRIES) { + if (first_empty_slot) + regindex = first_empty_slot; + else { + ERROR_REPORT1(TXGBE_ERROR_SOFTWARE, + "No space in VLVF.\n"); + regindex = TXGBE_ERR_NO_SPACE; + } + } + + return regindex; +} + +/** + * txgbe_set_vfta - Set VLAN filter table + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFVFB + * @vlan_on: boolean flag to turn on/off VLAN in VFVF + * + * Turn on/off specified VLAN in the VLAN filter table. + **/ +s32 txgbe_set_vfta(struct txgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on) +{ + s32 regindex; + u32 bitindex; + u32 vfta; + u32 targetbit; + s32 ret_val = 0; + bool vfta_changed = false; + + DEBUGFUNC("\n"); + + if (vlan > 4095) + return TXGBE_ERR_PARAM; + + /* + * this is a 2 part operation - first the VFTA, then the + * VLVF and VLVFB if VT Mode is set + * We don't write the VFTA until we know the VLVF part succeeded. + */ + + /* Part 1 + * The VFTA is a bitstring made up of 128 32-bit registers + * that enable the particular VLAN id, much like the MTA: + * bits[11-5]: which register + * bits[4-0]: which bit in the register + */ + regindex = (vlan >> 5) & 0x7F; + bitindex = vlan & 0x1F; + targetbit = (1 << bitindex); + /* errata 5 */ + vfta = hw->mac.vft_shadow[regindex]; + if (vlan_on) { + if (!(vfta & targetbit)) { + vfta |= targetbit; + vfta_changed = true; + } + } else { + if ((vfta & targetbit)) { + vfta &= ~targetbit; + vfta_changed = true; + } + } + + /* Part 2 + * Call txgbe_set_vlvf to set VLVFB and VLVF + */ + ret_val = txgbe_set_vlvf(hw, vlan, vind, vlan_on, + &vfta_changed); + if (ret_val != 0) + return ret_val; + + if (vfta_changed) + wr32(hw, TXGBE_PSR_VLAN_TBL(regindex), vfta); + /* errata 5 */ + hw->mac.vft_shadow[regindex] = vfta; + return 0; +} + +/** + * txgbe_set_vlvf - Set VLAN Pool Filter + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFVFB + * @vlan_on: boolean flag to turn on/off VLAN in VFVF + * @vfta_changed: pointer to boolean flag which indicates whether VFTA + * should be changed + * + * Turn on/off specified bit in VLVF table. + **/ +s32 txgbe_set_vlvf(struct txgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool *vfta_changed) +{ + u32 vt; + + DEBUGFUNC("\n"); + + if (vlan > 4095) + return TXGBE_ERR_PARAM; + + /* If VT Mode is set + * Either vlan_on + * make sure the vlan is in VLVF + * set the vind bit in the matching VLVFB + * Or !vlan_on + * clear the pool bit and possibly the vind + */ + vt = rd32(hw, TXGBE_CFG_PORT_CTL); + if (vt & TXGBE_CFG_PORT_CTL_NUM_VT_MASK) { + s32 vlvf_index; + u32 bits; + + vlvf_index = txgbe_find_vlvf_slot(hw, vlan); + if (vlvf_index < 0) + return vlvf_index; + + wr32(hw, TXGBE_PSR_VLAN_SWC_IDX, vlvf_index); + if (vlan_on) { + /* set the pool bit */ + if (vind < 32) { + bits = rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_L); + bits |= (1 << vind); + wr32(hw, + TXGBE_PSR_VLAN_SWC_VM_L, + bits); + } else { + bits = rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_H); + bits |= (1 << (vind - 32)); + wr32(hw, + TXGBE_PSR_VLAN_SWC_VM_H, + bits); + } + } else { + /* clear the pool bit */ + if (vind < 32) { + bits = rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_L); + bits &= ~(1 << vind); + wr32(hw, + TXGBE_PSR_VLAN_SWC_VM_L, + bits); + bits |= rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_H); + } else { + bits = rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_H); + bits &= ~(1 << (vind - 32)); + wr32(hw, + TXGBE_PSR_VLAN_SWC_VM_H, + bits); + bits |= rd32(hw, + TXGBE_PSR_VLAN_SWC_VM_L); + } + } + + /* + * If there are still bits set in the VLVFB registers + * for the VLAN ID indicated we need to see if the + * caller is requesting that we clear the VFTA entry bit. + * If the caller has requested that we clear the VFTA + * entry bit but there are still pools/VFs using this VLAN + * ID entry then ignore the request. We're not worried + * about the case where we're turning the VFTA VLAN ID + * entry bit on, only when requested to turn it off as + * there may be multiple pools and/or VFs using the + * VLAN ID entry. In that case we cannot clear the + * VFTA bit until all pools/VFs using that VLAN ID have also + * been cleared. This will be indicated by "bits" being + * zero. + */ + if (bits) { + wr32(hw, TXGBE_PSR_VLAN_SWC, + (TXGBE_PSR_VLAN_SWC_VIEN | vlan)); + if ((!vlan_on) && (vfta_changed != NULL)) { + /* someone wants to clear the vfta entry + * but some pools/VFs are still using it. + * Ignore it. */ + *vfta_changed = false; + } + } else + wr32(hw, TXGBE_PSR_VLAN_SWC, 0); + } + + return 0; +} + +/** + * txgbe_clear_vfta - Clear VLAN filter table + * @hw: pointer to hardware structure + * + * Clears the VLAN filer table, and the VMDq index associated with the filter + **/ +s32 txgbe_clear_vfta(struct txgbe_hw *hw) +{ + u32 offset; + + DEBUGFUNC("\n"); + + for (offset = 0; offset < hw->mac.vft_size; offset++) { + wr32(hw, TXGBE_PSR_VLAN_TBL(offset), 0); + /* errata 5 */ + hw->mac.vft_shadow[offset] = 0; + } + + for (offset = 0; offset < TXGBE_PSR_VLAN_SWC_ENTRIES; offset++) { + wr32(hw, TXGBE_PSR_VLAN_SWC_IDX, offset); + wr32(hw, TXGBE_PSR_VLAN_SWC, 0); + wr32(hw, TXGBE_PSR_VLAN_SWC_VM_L, 0); + wr32(hw, TXGBE_PSR_VLAN_SWC_VM_H, 0); + } + + return 0; +} + +/** + * txgbe_get_wwn_prefix - Get alternative WWNN/WWPN prefix from + * the EEPROM + * @hw: pointer to hardware structure + * @wwnn_prefix: the alternative WWNN prefix + * @wwpn_prefix: the alternative WWPN prefix + * + * This function will read the EEPROM from the alternative SAN MAC address + * block to check the support for the alternative WWNN/WWPN prefix support. + **/ +s32 txgbe_get_wwn_prefix(struct txgbe_hw *hw, u16 *wwnn_prefix, + u16 *wwpn_prefix) +{ + u16 offset, caps; + u16 alt_san_mac_blk_offset; + + DEBUGFUNC("\n"); + + /* clear output first */ + *wwnn_prefix = 0xFFFF; + *wwpn_prefix = 0xFFFF; + + /* check if alternative SAN MAC is supported */ + offset = hw->eeprom.sw_region_offset + TXGBE_ALT_SAN_MAC_ADDR_BLK_PTR; + if (TCALL(hw, eeprom.ops.read, offset, &alt_san_mac_blk_offset)) + goto wwn_prefix_err; + + if ((alt_san_mac_blk_offset == 0) || + (alt_san_mac_blk_offset == 0xFFFF)) + goto wwn_prefix_out; + + /* check capability in alternative san mac address block */ + offset = alt_san_mac_blk_offset + TXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; + if (TCALL(hw, eeprom.ops.read, offset, &caps)) + goto wwn_prefix_err; + if (!(caps & TXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) + goto wwn_prefix_out; + + /* get the corresponding prefix for WWNN/WWPN */ + offset = alt_san_mac_blk_offset + TXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; + if (TCALL(hw, eeprom.ops.read, offset, wwnn_prefix)) { + ERROR_REPORT2(TXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", offset); + } + + offset = alt_san_mac_blk_offset + TXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; + if (TCALL(hw, eeprom.ops.read, offset, wwpn_prefix)) + goto wwn_prefix_err; + +wwn_prefix_out: + return 0; + +wwn_prefix_err: + ERROR_REPORT2(TXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", offset); + return 0; +} + + +/** + * txgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for anti-spoofing + * @pf: Physical Function pool - do not enable anti-spoofing for the PF + * + **/ +void txgbe_set_mac_anti_spoofing(struct txgbe_hw *hw, bool enable, int pf) +{ + u64 pfvfspoof = 0; + + DEBUGFUNC("\n"); + + if (enable) { + /* + * The PF should be allowed to spoof so that it can support + * emulation mode NICs. Do not set the bits assigned to the PF + * Remaining pools belong to the PF so they do not need to have + * anti-spoofing enabled. + */ + pfvfspoof = (1 << pf) - 1; + wr32(hw, TXGBE_TDM_MAC_AS_L, + pfvfspoof & 0xffffffff); + wr32(hw, TXGBE_TDM_MAC_AS_H, pfvfspoof >> 32); + } else { + wr32(hw, TXGBE_TDM_MAC_AS_L, 0); + wr32(hw, TXGBE_TDM_MAC_AS_H, 0); + } +} + +/** + * txgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for VLAN anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing + * + **/ +void txgbe_set_vlan_anti_spoofing(struct txgbe_hw *hw, bool enable, int vf) +{ + u32 pfvfspoof; + + DEBUGFUNC("\n"); + + if (vf < 32) { + pfvfspoof = rd32(hw, TXGBE_TDM_VLAN_AS_L); + if (enable) + pfvfspoof |= (1 << vf); + else + pfvfspoof &= ~(1 << vf); + wr32(hw, TXGBE_TDM_VLAN_AS_L, pfvfspoof); + } else { + pfvfspoof = rd32(hw, TXGBE_TDM_VLAN_AS_H); + if (enable) + pfvfspoof |= (1 << (vf - 32)); + else + pfvfspoof &= ~(1 << (vf - 32)); + wr32(hw, TXGBE_TDM_VLAN_AS_H, pfvfspoof); + } +} + +/** + * txgbe_set_ethertype_anti_spoofing - Enable/Disable Ethertype anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for Ethertype anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing + * + **/ +void txgbe_set_ethertype_anti_spoofing(struct txgbe_hw *hw, + bool enable, int vf) +{ + u32 pfvfspoof; + + DEBUGFUNC("\n"); + + if (vf < 32) { + pfvfspoof = rd32(hw, TXGBE_TDM_ETYPE_AS_L); + if (enable) + pfvfspoof |= (1 << vf); + else + pfvfspoof &= ~(1 << vf); + wr32(hw, TXGBE_TDM_ETYPE_AS_L, pfvfspoof); + } else { + pfvfspoof = rd32(hw, TXGBE_TDM_ETYPE_AS_H); + if (enable) + pfvfspoof |= (1 << (vf - 32)); + else + pfvfspoof &= ~(1 << (vf - 32)); + wr32(hw, TXGBE_TDM_ETYPE_AS_H, pfvfspoof); + } +} + +/** + * txgbe_get_device_caps - Get additional device capabilities + * @hw: pointer to hardware structure + * @device_caps: the EEPROM word with the extra device capabilities + * + * This function will read the EEPROM location for the device capabilities, + * and return the word through device_caps. + **/ +s32 txgbe_get_device_caps(struct txgbe_hw *hw, u16 *device_caps) +{ + DEBUGFUNC("\n"); + + TCALL(hw, eeprom.ops.read, + hw->eeprom.sw_region_offset + TXGBE_DEVICE_CAPS, device_caps); + + return 0; +} + +/** + * txgbe_calculate_checksum - Calculate checksum for buffer + * @buffer: pointer to EEPROM + * @length: size of EEPROM to calculate a checksum for + * Calculates the checksum for some buffer on a specified length. The + * checksum calculated is returned. + **/ +u8 txgbe_calculate_checksum(u8 *buffer, u32 length) +{ + u32 i; + u8 sum = 0; + + DEBUGFUNC("\n"); + + if (!buffer) + return 0; + + for (i = 0; i < length; i++) + sum += buffer[i]; + + return (u8) (0 - sum); +} + +/** + * txgbe_host_interface_command - Issue command to manageability block + * @hw: pointer to the HW structure + * @buffer: contains the command to write and where the return status will + * be placed + * @length: length of buffer, must be multiple of 4 bytes + * @timeout: time in ms to wait for command completion + * @return_data: read and return data from the buffer (true) or not (false) + * Needed because FW structures are big endian and decoding of + * these fields can be 8 bit or 16 bit based on command. Decoding + * is not easily understood without making a table of commands. + * So we will leave this up to the caller to read back the data + * in these cases. + * + * Communicates with the manageability block. On success return 0 + * else return TXGBE_ERR_HOST_INTERFACE_COMMAND. + **/ +s32 txgbe_host_interface_command(struct txgbe_hw *hw, u32 *buffer, + u32 length, u32 timeout, bool return_data) +{ + u32 hicr, i, bi; + u32 hdr_size = sizeof(struct txgbe_hic_hdr); + u16 buf_len; + u32 dword_len; + s32 status = 0; + u32 buf[64] = {}; + + DEBUGFUNC("\n"); + + if (length == 0 || length > TXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { + DEBUGOUT1("Buffer length failure buffersize=%d.\n", length); + return TXGBE_ERR_HOST_INTERFACE_COMMAND; + } + + if (TCALL(hw, mac.ops.acquire_swfw_sync, TXGBE_MNG_SWFW_SYNC_SW_MB) + != 0) { + return TXGBE_ERR_SWFW_SYNC; + } + + + /* Calculate length in DWORDs. We must be DWORD aligned */ + if ((length % (sizeof(u32))) != 0) { + DEBUGOUT("Buffer length failure, not aligned to dword"); + status = TXGBE_ERR_INVALID_ARGUMENT; + goto rel_out; + } + + dword_len = length >> 2; + + /* The device driver writes the relevant command block + * into the ram area. + */ + for (i = 0; i < dword_len; i++) { + if (txgbe_check_mng_access(hw)) { + wr32a(hw, TXGBE_MNG_MBOX, + i, TXGBE_CPU_TO_LE32(buffer[i])); + /* write flush */ + buf[i] = rd32a(hw, TXGBE_MNG_MBOX, i); + } else { + status = TXGBE_ERR_MNG_ACCESS_FAILED; + goto rel_out; + } + } + /* Setting this bit tells the ARC that a new command is pending. */ + if (txgbe_check_mng_access(hw)) + wr32m(hw, TXGBE_MNG_MBOX_CTL, + TXGBE_MNG_MBOX_CTL_SWRDY, TXGBE_MNG_MBOX_CTL_SWRDY); + else { + status = TXGBE_ERR_MNG_ACCESS_FAILED; + goto rel_out; + } + + for (i = 0; i < timeout; i++) { + if (txgbe_check_mng_access(hw)) { + hicr = rd32(hw, TXGBE_MNG_MBOX_CTL); + if ((hicr & TXGBE_MNG_MBOX_CTL_FWRDY)) + break; + } + msec_delay(1); + } + + /* Check command completion */ + if (timeout != 0 && i == timeout) { + ERROR_REPORT1(TXGBE_ERROR_CAUTION, + "Command has failed with no status valid.\n"); + + ERROR_REPORT1(TXGBE_ERROR_CAUTION, "write value:\n"); + for (i = 0; i < dword_len; i++) { + ERROR_REPORT1(TXGBE_ERROR_CAUTION, "%x ", buffer[i]); + } + ERROR_REPORT1(TXGBE_ERROR_CAUTION, "read value:\n"); + for (i = 0; i < dword_len; i++) { + ERROR_REPORT1(TXGBE_ERROR_CAUTION, "%x ", buf[i]); + } + + status = TXGBE_ERR_HOST_INTERFACE_COMMAND; + goto rel_out; + } + + if (!return_data) + goto rel_out; + + /* Calculate length in DWORDs */ + dword_len = hdr_size >> 2; + + /* first pull in the header so we know the buffer length */ + for (bi = 0; bi < dword_len; bi++) { + if (txgbe_check_mng_access(hw)) { + buffer[bi] = rd32a(hw, TXGBE_MNG_MBOX, + bi); + TXGBE_LE32_TO_CPUS(&buffer[bi]); + } else { + status = TXGBE_ERR_MNG_ACCESS_FAILED; + goto rel_out; + } + } + + /* If there is any thing in data position pull it in */ + buf_len = ((struct txgbe_hic_hdr *)buffer)->buf_len; + if (buf_len == 0) + goto rel_out; + + if (length < buf_len + hdr_size) { + DEBUGOUT("Buffer not large enough for reply message.\n"); + status = TXGBE_ERR_HOST_INTERFACE_COMMAND; + goto rel_out; + } + + /* Calculate length in DWORDs, add 3 for odd lengths */ + dword_len = (buf_len + 3) >> 2; + + /* Pull in the rest of the buffer (bi is where we left off) */ + for (; bi <= dword_len; bi++) { + if (txgbe_check_mng_access(hw)) { + buffer[bi] = rd32a(hw, TXGBE_MNG_MBOX, + bi); + TXGBE_LE32_TO_CPUS(&buffer[bi]); + } else { + status = TXGBE_ERR_MNG_ACCESS_FAILED; + goto rel_out; + } + } + +rel_out: + TCALL(hw, mac.ops.release_swfw_sync, TXGBE_MNG_SWFW_SYNC_SW_MB); + return status; +} + +/** + * txgbe_set_fw_drv_ver - Sends driver version to firmware + * @hw: pointer to the HW structure + * @maj: driver version major number + * @min: driver version minor number + * @build: driver version build number + * @sub: driver version sub build number + * + * Sends driver version number to firmware through the manageability + * block. On success return 0 + * else returns TXGBE_ERR_SWFW_SYNC when encountering an error acquiring + * semaphore or TXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + **/ +s32 txgbe_set_fw_drv_ver(struct txgbe_hw *hw, u8 maj, u8 min, + u8 build, u8 sub) +{ + struct txgbe_hic_drv_info fw_cmd; + int i; + s32 ret_val = 0; + + DEBUGFUNC("\n"); + + fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; + fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; + fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + fw_cmd.port_num = (u8)hw->bus.func; + fw_cmd.ver_maj = maj; + fw_cmd.ver_min = min; + fw_cmd.ver_build = build; + fw_cmd.ver_sub = sub; + fw_cmd.hdr.checksum = 0; + fw_cmd.hdr.checksum = txgbe_calculate_checksum((u8 *)&fw_cmd, + (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); + fw_cmd.pad = 0; + fw_cmd.pad2 = 0; + + for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { + ret_val = txgbe_host_interface_command(hw, (u32 *)&fw_cmd, + sizeof(fw_cmd), + TXGBE_HI_COMMAND_TIMEOUT, + true); + if (ret_val != 0) + continue; + + if (fw_cmd.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) + ret_val = 0; + else + ret_val = TXGBE_ERR_HOST_INTERFACE_COMMAND; + + break; + } + + return ret_val; +} + +/** + * txgbe_reset_hostif - send reset cmd to fw + * @hw: pointer to hardware structure + * + * Sends reset cmd to firmware through the manageability + * block. On success return 0 + * else returns TXGBE_ERR_SWFW_SYNC when encountering an error acquiring + * semaphore or TXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + **/ +s32 txgbe_reset_hostif(struct txgbe_hw *hw) +{ + struct txgbe_hic_reset reset_cmd; + int i; + s32 status = 0; + + DEBUGFUNC("\n"); + + reset_cmd.hdr.cmd = FW_RESET_CMD; + reset_cmd.hdr.buf_len = FW_RESET_LEN; + reset_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + reset_cmd.lan_id = hw->bus.lan_id; + reset_cmd.reset_type = (u16)hw->reset_type; + reset_cmd.hdr.checksum = 0; + reset_cmd.hdr.checksum = txgbe_calculate_checksum((u8 *)&reset_cmd, + (FW_CEM_HDR_LEN + reset_cmd.hdr.buf_len)); + + for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { + status = txgbe_host_interface_command(hw, (u32 *)&reset_cmd, + sizeof(reset_cmd), + TXGBE_HI_COMMAND_TIMEOUT, + true); + if (status != 0) + continue; + + if (reset_cmd.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) { + status = 0; + hw->link_status = TXGBE_LINK_STATUS_NONE; + } else + status = TXGBE_ERR_HOST_INTERFACE_COMMAND; + + break; + } + + return status; +} + +s32 txgbe_setup_mac_link_hostif(struct txgbe_hw *hw, u32 speed) +{ + struct txgbe_hic_phy_cfg cmd; + int i; + s32 status = 0; + + DEBUGFUNC("\n"); + + cmd.hdr.cmd = FW_SETUP_MAC_LINK_CMD; + cmd.hdr.buf_len = FW_SETUP_MAC_LINK_LEN; + cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + cmd.lan_id = hw->bus.lan_id; + cmd.phy_mode = 0; + cmd.phy_speed = (u16)speed; + cmd.hdr.checksum = 0; + cmd.hdr.checksum = txgbe_calculate_checksum((u8 *)&cmd, + (FW_CEM_HDR_LEN + cmd.hdr.buf_len)); + + for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { + status = txgbe_host_interface_command(hw, (u32 *)&cmd, + sizeof(cmd), + TXGBE_HI_COMMAND_TIMEOUT, + true); + if (status != 0) + continue; + + if (cmd.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) + status = 0; + else + status = TXGBE_ERR_HOST_INTERFACE_COMMAND; + + break; + } + + return status; + +} + +u16 txgbe_crc16_ccitt(const u8 *buf, int size) +{ + u16 crc = 0; + int i; + while (--size >= 0) { + crc ^= (u16)*buf++ << 8; + for (i = 0; i < 8; i++) { + if (crc & 0x8000) + crc = crc << 1 ^ 0x1021; + else + crc <<= 1; + } + } + return crc; +} + +s32 txgbe_upgrade_flash_hostif(struct txgbe_hw *hw, u32 region, + const u8 *data, u32 size) +{ + struct txgbe_hic_upg_start start_cmd; + struct txgbe_hic_upg_write write_cmd; + struct txgbe_hic_upg_verify verify_cmd; + u32 offset; + s32 status = 0; + + DEBUGFUNC("\n"); + + start_cmd.hdr.cmd = FW_FLASH_UPGRADE_START_CMD; + start_cmd.hdr.buf_len = FW_FLASH_UPGRADE_START_LEN; + start_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + start_cmd.module_id = (u8)region; + start_cmd.hdr.checksum = 0; + start_cmd.hdr.checksum = txgbe_calculate_checksum((u8 *)&start_cmd, + (FW_CEM_HDR_LEN + start_cmd.hdr.buf_len)); + start_cmd.pad2 = 0; + start_cmd.pad3 = 0; + + status = txgbe_host_interface_command(hw, (u32 *)&start_cmd, + sizeof(start_cmd), + TXGBE_HI_FLASH_ERASE_TIMEOUT, + true); + + if (start_cmd.hdr.cmd_or_resp.ret_status == FW_CEM_RESP_STATUS_SUCCESS) + status = 0; + else { + status = TXGBE_ERR_HOST_INTERFACE_COMMAND; + return status; + } + + for (offset = 0; offset < size;) { + write_cmd.hdr.cmd = FW_FLASH_UPGRADE_WRITE_CMD; + if (size - offset > 248) { + write_cmd.data_len = 248 / 4; + write_cmd.eof_flag = 0; + } else { + write_cmd.data_len = (u8)((size - offset) / 4); + write_cmd.eof_flag = 1; + } + memcpy((u8 *)write_cmd.data, &data[offset], write_cmd.data_len * 4); + write_cmd.hdr.buf_len = (write_cmd.data_len + 1) * 4; + write_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + write_cmd.check_sum = txgbe_crc16_ccitt((u8 *)write_cmd.data, + write_cmd.data_len * 4); + + status = txgbe_host_interface_command(hw, (u32 *)&write_cmd, + sizeof(write_cmd), + TXGBE_HI_FLASH_UPDATE_TIMEOUT, + true); + if (start_cmd.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) + status = 0; + else { + status = TXGBE_ERR_HOST_INTERFACE_COMMAND; + return status; + } + offset += write_cmd.data_len * 4; + } + + verify_cmd.hdr.cmd = FW_FLASH_UPGRADE_VERIFY_CMD; + verify_cmd.hdr.buf_len = FW_FLASH_UPGRADE_VERIFY_LEN; + verify_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + switch (region) { + case TXGBE_MODULE_EEPROM: + verify_cmd.action_flag = TXGBE_RELOAD_EEPROM; + break; + case TXGBE_MODULE_FIRMWARE: + verify_cmd.action_flag = TXGBE_RESET_FIRMWARE; + break; + case TXGBE_MODULE_HARDWARE: + verify_cmd.action_flag = TXGBE_RESET_LAN; + break; + default: + return status; + } + + verify_cmd.hdr.checksum = txgbe_calculate_checksum((u8 *)&verify_cmd, + (FW_CEM_HDR_LEN + verify_cmd.hdr.buf_len)); + + status = txgbe_host_interface_command(hw, (u32 *)&verify_cmd, + sizeof(verify_cmd), + TXGBE_HI_FLASH_VERIFY_TIMEOUT, + true); + + if (verify_cmd.hdr.cmd_or_resp.ret_status == FW_CEM_RESP_STATUS_SUCCESS) + status = 0; + else { + status = TXGBE_ERR_HOST_INTERFACE_COMMAND; + } + return status; +} + +/** + * txgbe_set_rxpba - Initialize Rx packet buffer + * @hw: pointer to hardware structure + * @num_pb: number of packet buffers to allocate + * @headroom: reserve n KB of headroom + * @strategy: packet buffer allocation strategy + **/ +void txgbe_set_rxpba(struct txgbe_hw *hw, int num_pb, u32 headroom, + int strategy) +{ + u32 pbsize = hw->mac.rx_pb_size; + int i = 0; + u32 rxpktsize, txpktsize, txpbthresh; + + DEBUGFUNC("\n"); + + /* Reserve headroom */ + pbsize -= headroom; + + if (!num_pb) + num_pb = 1; + + /* Divide remaining packet buffer space amongst the number of packet + * buffers requested using supplied strategy. + */ + switch (strategy) { + case PBA_STRATEGY_WEIGHTED: + /* txgbe_dcb_pba_80_48 strategy weight first half of packet + * buffer with 5/8 of the packet buffer space. + */ + rxpktsize = (pbsize * 5) / (num_pb * 4); + pbsize -= rxpktsize * (num_pb / 2); + rxpktsize <<= TXGBE_RDB_PB_SZ_SHIFT; + for (; i < (num_pb / 2); i++) + wr32(hw, TXGBE_RDB_PB_SZ(i), rxpktsize); + /* fall through */ + /* Fall through to configure remaining packet buffers */ + case PBA_STRATEGY_EQUAL: + rxpktsize = (pbsize / (num_pb - i)) << TXGBE_RDB_PB_SZ_SHIFT; + for (; i < num_pb; i++) + wr32(hw, TXGBE_RDB_PB_SZ(i), rxpktsize); + break; + default: + break; + } + + /* Only support an equally distributed Tx packet buffer strategy. */ + txpktsize = TXGBE_TDB_PB_SZ_MAX / num_pb; + txpbthresh = (txpktsize / 1024) - TXGBE_TXPKT_SIZE_MAX; + for (i = 0; i < num_pb; i++) { + wr32(hw, TXGBE_TDB_PB_SZ(i), txpktsize); + wr32(hw, TXGBE_TDM_PB_THRE(i), txpbthresh); + } + + /* Clear unused TCs, if any, to zero buffer size*/ + for (; i < TXGBE_MAX_PB; i++) { + wr32(hw, TXGBE_RDB_PB_SZ(i), 0); + wr32(hw, TXGBE_TDB_PB_SZ(i), 0); + wr32(hw, TXGBE_TDM_PB_THRE(i), 0); + } +} + +STATIC const u8 txgbe_emc_temp_data[4] = { + TXGBE_EMC_INTERNAL_DATA, + TXGBE_EMC_DIODE1_DATA, + TXGBE_EMC_DIODE2_DATA, + TXGBE_EMC_DIODE3_DATA +}; +STATIC const u8 txgbe_emc_therm_limit[4] = { + TXGBE_EMC_INTERNAL_THERM_LIMIT, + TXGBE_EMC_DIODE1_THERM_LIMIT, + TXGBE_EMC_DIODE2_THERM_LIMIT, + TXGBE_EMC_DIODE3_THERM_LIMIT +}; + +/** + * txgbe_get_thermal_sensor_data - Gathers thermal sensor data + * @hw: pointer to hardware structure + * @data: pointer to the thermal sensor data structure + * + * algorithm: + * T = (-4.8380E+01)N^0 + (3.1020E-01)N^1 + (-1.8201E-04)N^2 + + (8.1542E-08)N^3 + (-1.6743E-11)N^4 + * algorithm with 5% more deviation, easy for implementation + * T = (-50)N^0 + (0.31)N^1 + (-0.0002)N^2 + (0.0000001)N^3 + * + * Returns the thermal sensor data structure + **/ +s32 txgbe_get_thermal_sensor_data(struct txgbe_hw *hw) +{ + s64 tsv; + int i = 0; + struct txgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + DEBUGFUNC("\n"); + + /* Only support thermal sensors attached to physical port 0 */ + if (hw->bus.lan_id) + return TXGBE_NOT_IMPLEMENTED; + + tsv = (s64)(rd32(hw, TXGBE_TS_ST) & + TXGBE_TS_ST_DATA_OUT_MASK); + + tsv = tsv < 1200 ? tsv : 1200; + tsv = -(48380 << 8) / 1000 + + tsv * (31020 << 8) / 100000 + - tsv * tsv * (18201 << 8) / 100000000 + + tsv * tsv * tsv * (81542 << 8) / 1000000000000 + - tsv * tsv * tsv * tsv * (16743 << 8) / 1000000000000000; + tsv >>= 8; + + data->sensor.temp = (s16)tsv; + + for (i = 0; i < 100 ; i++) { + tsv = (s64)rd32(hw, TXGBE_TS_ST); + if (tsv >> 16 == 0x1) { + tsv = tsv & TXGBE_TS_ST_DATA_OUT_MASK; + tsv = tsv < 1200 ? tsv : 1200; + tsv = -(48380 << 8) / 1000 + + tsv * (31020 << 8) / 100000 + - tsv * tsv * (18201 << 8) / 100000000 + + tsv * tsv * tsv * (81542 << 8) / 1000000000000 + - tsv * tsv * tsv * tsv * (16743 << 8) / 1000000000000000; + tsv >>= 8; + + data->sensor.temp = (s16)tsv; + break; + } else { + msleep(1); + continue; + } + } + + return 0; +} + +/** + * txgbe_init_thermal_sensor_thresh - Inits thermal sensor thresholds + * @hw: pointer to hardware structure + * + * Inits the thermal sensor thresholds according to the NVM map + * and save off the threshold and location values into mac.thermal_sensor_data + **/ +s32 txgbe_init_thermal_sensor_thresh(struct txgbe_hw *hw) +{ + s32 status = 0; + + struct txgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + DEBUGFUNC("\n"); + + memset(data, 0, sizeof(struct txgbe_thermal_sensor_data)); + + /* Only support thermal sensors attached to SP physical port 0 */ + if (hw->bus.lan_id) + return TXGBE_NOT_IMPLEMENTED; + + wr32(hw, TXGBE_TS_CTL, TXGBE_TS_CTL_EVAL_MD); + wr32(hw, TXGBE_TS_INT_EN, + TXGBE_TS_INT_EN_ALARM_INT_EN | TXGBE_TS_INT_EN_DALARM_INT_EN); + wr32(hw, TXGBE_TS_EN, TXGBE_TS_EN_ENA); + + + data->sensor.alarm_thresh = 100; + wr32(hw, TXGBE_TS_ALARM_THRE, 677); + data->sensor.dalarm_thresh = 90; + wr32(hw, TXGBE_TS_DALARM_THRE, 614); + + return status; +} + +void txgbe_disable_rx(struct txgbe_hw *hw) +{ + u32 pfdtxgswc; + u32 rxctrl; + + DEBUGFUNC("\n"); + + rxctrl = rd32(hw, TXGBE_RDB_PB_CTL); + if (rxctrl & TXGBE_RDB_PB_CTL_RXEN) { + pfdtxgswc = rd32(hw, TXGBE_PSR_CTL); + if (pfdtxgswc & TXGBE_PSR_CTL_SW_EN) { + pfdtxgswc &= ~TXGBE_PSR_CTL_SW_EN; + wr32(hw, TXGBE_PSR_CTL, pfdtxgswc); + hw->mac.set_lben = true; + } else { + hw->mac.set_lben = false; + } + rxctrl &= ~TXGBE_RDB_PB_CTL_RXEN; + wr32(hw, TXGBE_RDB_PB_CTL, rxctrl); + /* errata 14 */ + if (hw->revision_id == TXGBE_SP_MPW) { + do { + do { + if (rd32m(hw, + TXGBE_RDB_PB_CTL, + TXGBE_RDB_PB_CTL_DISABLED) == 1) + break; + msleep(10); + } while (1); + if (rd32m(hw, TXGBE_RDB_TXSWERR, + TXGBE_RDB_TXSWERR_TB_FREE) == 0x143) + break; + else { + wr32m(hw, + TXGBE_RDB_PB_CTL, + TXGBE_RDB_PB_CTL_RXEN, + TXGBE_RDB_PB_CTL_RXEN); + wr32m(hw, + TXGBE_RDB_PB_CTL, + TXGBE_RDB_PB_CTL_RXEN, + ~TXGBE_RDB_PB_CTL_RXEN); + + } + } while (1); + } + + if (!(((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) || + ((hw->subsystem_device_id & TXGBE_WOL_MASK) == TXGBE_WOL_SUP))) { + /* disable mac receiver */ + wr32m(hw, TXGBE_MAC_RX_CFG, + TXGBE_MAC_RX_CFG_RE, 0); + } + } +} + +void txgbe_enable_rx(struct txgbe_hw *hw) +{ + u32 pfdtxgswc; + + DEBUGFUNC("\n"); + + /* enable mac receiver */ + wr32m(hw, TXGBE_MAC_RX_CFG, + TXGBE_MAC_RX_CFG_RE, TXGBE_MAC_RX_CFG_RE); + + wr32m(hw, TXGBE_RDB_PB_CTL, + TXGBE_RDB_PB_CTL_RXEN, TXGBE_RDB_PB_CTL_RXEN); + + if (hw->mac.set_lben) { + pfdtxgswc = rd32(hw, TXGBE_PSR_CTL); + pfdtxgswc |= TXGBE_PSR_CTL_SW_EN; + wr32(hw, TXGBE_PSR_CTL, pfdtxgswc); + hw->mac.set_lben = false; + } +} + +/** + * txgbe_mng_present - returns true when management capability is present + * @hw: pointer to hardware structure + */ +bool txgbe_mng_present(struct txgbe_hw *hw) +{ + u32 fwsm; + + fwsm = rd32(hw, TXGBE_MIS_ST); + return fwsm & TXGBE_MIS_ST_MNG_INIT_DN; +} + +bool txgbe_check_mng_access(struct txgbe_hw *hw) +{ + bool ret = false; + u32 rst_delay; + u32 i; + + struct txgbe_adapter *adapter = hw->back; + if (!txgbe_mng_present(hw)) + return false; + if (adapter->hw.revision_id != TXGBE_SP_MPW) + return true; + if (!(adapter->flags2 & TXGBE_FLAG2_MNG_REG_ACCESS_DISABLED)) + return true; + + rst_delay = (rd32(&adapter->hw, TXGBE_MIS_RST_ST) & + TXGBE_MIS_RST_ST_RST_INIT) >> + TXGBE_MIS_RST_ST_RST_INI_SHIFT; + for (i = 0; i < rst_delay + 2; i++) { + if (!(adapter->flags2 & TXGBE_FLAG2_MNG_REG_ACCESS_DISABLED)) { + ret = true; + break; + } + msleep(100); + } + return ret; +} + +/** + * txgbe_setup_mac_link_multispeed_fiber - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the MAC and/or PHY register and restarts link. + **/ +s32 txgbe_setup_mac_link_multispeed_fiber(struct txgbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete) +{ + u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN; + u32 highest_link_speed = TXGBE_LINK_SPEED_UNKNOWN; + s32 status = 0; + u32 speedcnt = 0; + u32 i = 0; + bool autoneg, link_up = false; + + DEBUGFUNC("\n"); + + /* Mask off requested but non-supported speeds */ + status = TCALL(hw, mac.ops.get_link_capabilities, + &link_speed, &autoneg); + if (status != 0) + return status; + + speed &= link_speed; + + /* Try each speed one by one, highest priority first. We do this in + * software because 10Gb fiber doesn't support speed autonegotiation. + */ + if (speed & TXGBE_LINK_SPEED_10GB_FULL) { + speedcnt++; + highest_link_speed = TXGBE_LINK_SPEED_10GB_FULL; + + /* If we already have link at this speed, just jump out */ + status = TCALL(hw, mac.ops.check_link, + &link_speed, &link_up, false); + if (status != 0) + return status; + + if ((link_speed == TXGBE_LINK_SPEED_10GB_FULL) && link_up) + goto out; + + /* Allow module to change analog characteristics (1G->10G) */ + msec_delay(40); + + status = TCALL(hw, mac.ops.setup_mac_link, + TXGBE_LINK_SPEED_10GB_FULL, + autoneg_wait_to_complete); + if (status != 0) + return status; + + /* Flap the Tx laser if it has not already been done */ + TCALL(hw, mac.ops.flap_tx_laser); + + /* Wait for the controller to acquire link. Per IEEE 802.3ap, + * Section 73.10.2, we may have to wait up to 500ms if KR is + * attempted. sapphire uses the same timing for 10g SFI. + */ + for (i = 0; i < 5; i++) { + /* Wait for the link partner to also set speed */ + msec_delay(100); + + /* If we have link, just jump out */ + status = TCALL(hw, mac.ops.check_link, + &link_speed, &link_up, false); + if (status != 0) + return status; + + if (link_up) + goto out; + } + } + + if (speed & TXGBE_LINK_SPEED_1GB_FULL) { + speedcnt++; + if (highest_link_speed == TXGBE_LINK_SPEED_UNKNOWN) + highest_link_speed = TXGBE_LINK_SPEED_1GB_FULL; + + /* If we already have link at this speed, just jump out */ + status = TCALL(hw, mac.ops.check_link, + &link_speed, &link_up, false); + if (status != 0) + return status; + + if ((link_speed == TXGBE_LINK_SPEED_1GB_FULL) && link_up) + goto out; + + /* Allow module to change analog characteristics (10G->1G) */ + msec_delay(40); + + status = TCALL(hw, mac.ops.setup_mac_link, + TXGBE_LINK_SPEED_1GB_FULL, + autoneg_wait_to_complete); + if (status != 0) + return status; + + /* Flap the Tx laser if it has not already been done */ + TCALL(hw, mac.ops.flap_tx_laser); + + /* Wait for the link partner to also set speed */ + msec_delay(100); + + /* If we have link, just jump out */ + status = TCALL(hw, mac.ops.check_link, + &link_speed, &link_up, false); + if (status != 0) + return status; + + if (link_up) + goto out; + } + + /* We didn't get link. Configure back to the highest speed we tried, + * (if there was more than one). We call ourselves back with just the + * single highest speed that the user requested. + */ + if (speedcnt > 1) + status = txgbe_setup_mac_link_multispeed_fiber(hw, + highest_link_speed, + autoneg_wait_to_complete); + +out: + /* Set autoneg_advertised value based on input link speed */ + hw->phy.autoneg_advertised = 0; + + if (speed & TXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_10GB_FULL; + + if (speed & TXGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_1GB_FULL; + + return status; +} + +int txgbe_check_flash_load(struct txgbe_hw *hw, u32 check_bit) +{ + u32 i = 0; + u32 reg = 0; + int err = 0; + /* if there's flash existing */ + if (!(rd32(hw, TXGBE_SPI_STATUS) & + TXGBE_SPI_STATUS_FLASH_BYPASS)) { + /* wait hw load flash done */ + for (i = 0; i < TXGBE_MAX_FLASH_LOAD_POLL_TIME; i++) { + reg = rd32(hw, TXGBE_SPI_ILDR_STATUS); + if (!(reg & check_bit)) { + /* done */ + break; + } + msleep(200); + } + if (i == TXGBE_MAX_FLASH_LOAD_POLL_TIME) { + err = TXGBE_ERR_FLASH_LOADING_FAILED; + } + } + return err; +} + +/* The txgbe_ptype_lookup is used to convert from the 8-bit ptype in the + * hardware to a bit-field that can be used by SW to more easily determine the + * packet type. + * + * Macros are used to shorten the table lines and make this table human + * readable. + * + * We store the PTYPE in the top byte of the bit field - this is just so that + * we can check that the table doesn't have a row missing, as the index into + * the table should be the PTYPE. + * + * Typical work flow: + * + * IF NOT txgbe_ptype_lookup[ptype].known + * THEN + * Packet is unknown + * ELSE IF txgbe_ptype_lookup[ptype].mac == TXGBE_DEC_PTYPE_MAC_IP + * Use the rest of the fields to look at the tunnels, inner protocols, etc + * ELSE + * Use the enum txgbe_l2_ptypes to decode the packet type + * ENDIF + */ + +/* macro to make the table lines short */ +#define TXGBE_PTT(ptype, mac, ip, etype, eip, proto, layer)\ + { ptype, \ + 1, \ + /* mac */ TXGBE_DEC_PTYPE_MAC_##mac, \ + /* ip */ TXGBE_DEC_PTYPE_IP_##ip, \ + /* etype */ TXGBE_DEC_PTYPE_ETYPE_##etype, \ + /* eip */ TXGBE_DEC_PTYPE_IP_##eip, \ + /* proto */ TXGBE_DEC_PTYPE_PROT_##proto, \ + /* layer */ TXGBE_DEC_PTYPE_LAYER_##layer } + +#define TXGBE_UKN(ptype) \ + { ptype, 0, 0, 0, 0, 0, 0, 0 } + +/* Lookup table mapping the HW PTYPE to the bit field for decoding */ +/* for ((pt=0;pt<256;pt++)); do printf "macro(0x%02X),\n" $pt; done */ +txgbe_dptype txgbe_ptype_lookup[256] = { + TXGBE_UKN(0x00), + TXGBE_UKN(0x01), + TXGBE_UKN(0x02), + TXGBE_UKN(0x03), + TXGBE_UKN(0x04), + TXGBE_UKN(0x05), + TXGBE_UKN(0x06), + TXGBE_UKN(0x07), + TXGBE_UKN(0x08), + TXGBE_UKN(0x09), + TXGBE_UKN(0x0A), + TXGBE_UKN(0x0B), + TXGBE_UKN(0x0C), + TXGBE_UKN(0x0D), + TXGBE_UKN(0x0E), + TXGBE_UKN(0x0F), + + /* L2: mac */ + TXGBE_UKN(0x10), + TXGBE_PTT(0x11, L2, NONE, NONE, NONE, NONE, PAY2), + TXGBE_PTT(0x12, L2, NONE, NONE, NONE, TS, PAY2), + TXGBE_PTT(0x13, L2, NONE, NONE, NONE, NONE, PAY2), + TXGBE_PTT(0x14, L2, NONE, NONE, NONE, NONE, PAY2), + TXGBE_PTT(0x15, L2, NONE, NONE, NONE, NONE, NONE), + TXGBE_PTT(0x16, L2, NONE, NONE, NONE, NONE, PAY2), + TXGBE_PTT(0x17, L2, NONE, NONE, NONE, NONE, NONE), + + /* L2: ethertype filter */ + TXGBE_PTT(0x18, L2, NONE, NONE, NONE, NONE, NONE), + TXGBE_PTT(0x19, L2, NONE, NONE, NONE, NONE, NONE), + TXGBE_PTT(0x1A, L2, NONE, NONE, NONE, NONE, NONE), + TXGBE_PTT(0x1B, L2, NONE, NONE, NONE, NONE, NONE), + TXGBE_PTT(0x1C, L2, NONE, NONE, NONE, NONE, NONE), + TXGBE_PTT(0x1D, L2, NONE, NONE, NONE, NONE, NONE), + TXGBE_PTT(0x1E, L2, NONE, NONE, NONE, NONE, NONE), + TXGBE_PTT(0x1F, L2, NONE, NONE, NONE, NONE, NONE), + + /* L3: ip non-tunnel */ + TXGBE_UKN(0x20), + TXGBE_PTT(0x21, IP, FGV4, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x22, IP, IPV4, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x23, IP, IPV4, NONE, NONE, UDP, PAY4), + TXGBE_PTT(0x24, IP, IPV4, NONE, NONE, TCP, PAY4), + TXGBE_PTT(0x25, IP, IPV4, NONE, NONE, SCTP, PAY4), + TXGBE_UKN(0x26), + TXGBE_UKN(0x27), + TXGBE_UKN(0x28), + TXGBE_PTT(0x29, IP, FGV6, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x2A, IP, IPV6, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x2B, IP, IPV6, NONE, NONE, UDP, PAY3), + TXGBE_PTT(0x2C, IP, IPV6, NONE, NONE, TCP, PAY4), + TXGBE_PTT(0x2D, IP, IPV6, NONE, NONE, SCTP, PAY4), + TXGBE_UKN(0x2E), + TXGBE_UKN(0x2F), + + /* L2: fcoe */ + TXGBE_PTT(0x30, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x31, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x32, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x33, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x34, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_UKN(0x35), + TXGBE_UKN(0x36), + TXGBE_UKN(0x37), + TXGBE_PTT(0x38, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x39, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x3A, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x3B, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_PTT(0x3C, FCOE, NONE, NONE, NONE, NONE, PAY3), + TXGBE_UKN(0x3D), + TXGBE_UKN(0x3E), + TXGBE_UKN(0x3F), + + TXGBE_UKN(0x40), + TXGBE_UKN(0x41), + TXGBE_UKN(0x42), + TXGBE_UKN(0x43), + TXGBE_UKN(0x44), + TXGBE_UKN(0x45), + TXGBE_UKN(0x46), + TXGBE_UKN(0x47), + TXGBE_UKN(0x48), + TXGBE_UKN(0x49), + TXGBE_UKN(0x4A), + TXGBE_UKN(0x4B), + TXGBE_UKN(0x4C), + TXGBE_UKN(0x4D), + TXGBE_UKN(0x4E), + TXGBE_UKN(0x4F), + TXGBE_UKN(0x50), + TXGBE_UKN(0x51), + TXGBE_UKN(0x52), + TXGBE_UKN(0x53), + TXGBE_UKN(0x54), + TXGBE_UKN(0x55), + TXGBE_UKN(0x56), + TXGBE_UKN(0x57), + TXGBE_UKN(0x58), + TXGBE_UKN(0x59), + TXGBE_UKN(0x5A), + TXGBE_UKN(0x5B), + TXGBE_UKN(0x5C), + TXGBE_UKN(0x5D), + TXGBE_UKN(0x5E), + TXGBE_UKN(0x5F), + TXGBE_UKN(0x60), + TXGBE_UKN(0x61), + TXGBE_UKN(0x62), + TXGBE_UKN(0x63), + TXGBE_UKN(0x64), + TXGBE_UKN(0x65), + TXGBE_UKN(0x66), + TXGBE_UKN(0x67), + TXGBE_UKN(0x68), + TXGBE_UKN(0x69), + TXGBE_UKN(0x6A), + TXGBE_UKN(0x6B), + TXGBE_UKN(0x6C), + TXGBE_UKN(0x6D), + TXGBE_UKN(0x6E), + TXGBE_UKN(0x6F), + TXGBE_UKN(0x70), + TXGBE_UKN(0x71), + TXGBE_UKN(0x72), + TXGBE_UKN(0x73), + TXGBE_UKN(0x74), + TXGBE_UKN(0x75), + TXGBE_UKN(0x76), + TXGBE_UKN(0x77), + TXGBE_UKN(0x78), + TXGBE_UKN(0x79), + TXGBE_UKN(0x7A), + TXGBE_UKN(0x7B), + TXGBE_UKN(0x7C), + TXGBE_UKN(0x7D), + TXGBE_UKN(0x7E), + TXGBE_UKN(0x7F), + + /* IPv4 --> IPv4/IPv6 */ + TXGBE_UKN(0x80), + TXGBE_PTT(0x81, IP, IPV4, IPIP, FGV4, NONE, PAY3), + TXGBE_PTT(0x82, IP, IPV4, IPIP, IPV4, NONE, PAY3), + TXGBE_PTT(0x83, IP, IPV4, IPIP, IPV4, UDP, PAY4), + TXGBE_PTT(0x84, IP, IPV4, IPIP, IPV4, TCP, PAY4), + TXGBE_PTT(0x85, IP, IPV4, IPIP, IPV4, SCTP, PAY4), + TXGBE_UKN(0x86), + TXGBE_UKN(0x87), + TXGBE_UKN(0x88), + TXGBE_PTT(0x89, IP, IPV4, IPIP, FGV6, NONE, PAY3), + TXGBE_PTT(0x8A, IP, IPV4, IPIP, IPV6, NONE, PAY3), + TXGBE_PTT(0x8B, IP, IPV4, IPIP, IPV6, UDP, PAY4), + TXGBE_PTT(0x8C, IP, IPV4, IPIP, IPV6, TCP, PAY4), + TXGBE_PTT(0x8D, IP, IPV4, IPIP, IPV6, SCTP, PAY4), + TXGBE_UKN(0x8E), + TXGBE_UKN(0x8F), + + /* IPv4 --> GRE/NAT --> NONE/IPv4/IPv6 */ + TXGBE_PTT(0x90, IP, IPV4, IG, NONE, NONE, PAY3), + TXGBE_PTT(0x91, IP, IPV4, IG, FGV4, NONE, PAY3), + TXGBE_PTT(0x92, IP, IPV4, IG, IPV4, NONE, PAY3), + TXGBE_PTT(0x93, IP, IPV4, IG, IPV4, UDP, PAY4), + TXGBE_PTT(0x94, IP, IPV4, IG, IPV4, TCP, PAY4), + TXGBE_PTT(0x95, IP, IPV4, IG, IPV4, SCTP, PAY4), + TXGBE_UKN(0x96), + TXGBE_UKN(0x97), + TXGBE_UKN(0x98), + TXGBE_PTT(0x99, IP, IPV4, IG, FGV6, NONE, PAY3), + TXGBE_PTT(0x9A, IP, IPV4, IG, IPV6, NONE, PAY3), + TXGBE_PTT(0x9B, IP, IPV4, IG, IPV6, UDP, PAY4), + TXGBE_PTT(0x9C, IP, IPV4, IG, IPV6, TCP, PAY4), + TXGBE_PTT(0x9D, IP, IPV4, IG, IPV6, SCTP, PAY4), + TXGBE_UKN(0x9E), + TXGBE_UKN(0x9F), + + /* IPv4 --> GRE/NAT --> MAC --> NONE/IPv4/IPv6 */ + TXGBE_PTT(0xA0, IP, IPV4, IGM, NONE, NONE, PAY3), + TXGBE_PTT(0xA1, IP, IPV4, IGM, FGV4, NONE, PAY3), + TXGBE_PTT(0xA2, IP, IPV4, IGM, IPV4, NONE, PAY3), + TXGBE_PTT(0xA3, IP, IPV4, IGM, IPV4, UDP, PAY4), + TXGBE_PTT(0xA4, IP, IPV4, IGM, IPV4, TCP, PAY4), + TXGBE_PTT(0xA5, IP, IPV4, IGM, IPV4, SCTP, PAY4), + TXGBE_UKN(0xA6), + TXGBE_UKN(0xA7), + TXGBE_UKN(0xA8), + TXGBE_PTT(0xA9, IP, IPV4, IGM, FGV6, NONE, PAY3), + TXGBE_PTT(0xAA, IP, IPV4, IGM, IPV6, NONE, PAY3), + TXGBE_PTT(0xAB, IP, IPV4, IGM, IPV6, UDP, PAY4), + TXGBE_PTT(0xAC, IP, IPV4, IGM, IPV6, TCP, PAY4), + TXGBE_PTT(0xAD, IP, IPV4, IGM, IPV6, SCTP, PAY4), + TXGBE_UKN(0xAE), + TXGBE_UKN(0xAF), + + /* IPv4 --> GRE/NAT --> MAC+VLAN --> NONE/IPv4/IPv6 */ + TXGBE_PTT(0xB0, IP, IPV4, IGMV, NONE, NONE, PAY3), + TXGBE_PTT(0xB1, IP, IPV4, IGMV, FGV4, NONE, PAY3), + TXGBE_PTT(0xB2, IP, IPV4, IGMV, IPV4, NONE, PAY3), + TXGBE_PTT(0xB3, IP, IPV4, IGMV, IPV4, UDP, PAY4), + TXGBE_PTT(0xB4, IP, IPV4, IGMV, IPV4, TCP, PAY4), + TXGBE_PTT(0xB5, IP, IPV4, IGMV, IPV4, SCTP, PAY4), + TXGBE_UKN(0xB6), + TXGBE_UKN(0xB7), + TXGBE_UKN(0xB8), + TXGBE_PTT(0xB9, IP, IPV4, IGMV, FGV6, NONE, PAY3), + TXGBE_PTT(0xBA, IP, IPV4, IGMV, IPV6, NONE, PAY3), + TXGBE_PTT(0xBB, IP, IPV4, IGMV, IPV6, UDP, PAY4), + TXGBE_PTT(0xBC, IP, IPV4, IGMV, IPV6, TCP, PAY4), + TXGBE_PTT(0xBD, IP, IPV4, IGMV, IPV6, SCTP, PAY4), + TXGBE_UKN(0xBE), + TXGBE_UKN(0xBF), + + /* IPv6 --> IPv4/IPv6 */ + TXGBE_UKN(0xC0), + TXGBE_PTT(0xC1, IP, IPV6, IPIP, FGV4, NONE, PAY3), + TXGBE_PTT(0xC2, IP, IPV6, IPIP, IPV4, NONE, PAY3), + TXGBE_PTT(0xC3, IP, IPV6, IPIP, IPV4, UDP, PAY4), + TXGBE_PTT(0xC4, IP, IPV6, IPIP, IPV4, TCP, PAY4), + TXGBE_PTT(0xC5, IP, IPV6, IPIP, IPV4, SCTP, PAY4), + TXGBE_UKN(0xC6), + TXGBE_UKN(0xC7), + TXGBE_UKN(0xC8), + TXGBE_PTT(0xC9, IP, IPV6, IPIP, FGV6, NONE, PAY3), + TXGBE_PTT(0xCA, IP, IPV6, IPIP, IPV6, NONE, PAY3), + TXGBE_PTT(0xCB, IP, IPV6, IPIP, IPV6, UDP, PAY4), + TXGBE_PTT(0xCC, IP, IPV6, IPIP, IPV6, TCP, PAY4), + TXGBE_PTT(0xCD, IP, IPV6, IPIP, IPV6, SCTP, PAY4), + TXGBE_UKN(0xCE), + TXGBE_UKN(0xCF), + + /* IPv6 --> GRE/NAT -> NONE/IPv4/IPv6 */ + TXGBE_PTT(0xD0, IP, IPV6, IG, NONE, NONE, PAY3), + TXGBE_PTT(0xD1, IP, IPV6, IG, FGV4, NONE, PAY3), + TXGBE_PTT(0xD2, IP, IPV6, IG, IPV4, NONE, PAY3), + TXGBE_PTT(0xD3, IP, IPV6, IG, IPV4, UDP, PAY4), + TXGBE_PTT(0xD4, IP, IPV6, IG, IPV4, TCP, PAY4), + TXGBE_PTT(0xD5, IP, IPV6, IG, IPV4, SCTP, PAY4), + TXGBE_UKN(0xD6), + TXGBE_UKN(0xD7), + TXGBE_UKN(0xD8), + TXGBE_PTT(0xD9, IP, IPV6, IG, FGV6, NONE, PAY3), + TXGBE_PTT(0xDA, IP, IPV6, IG, IPV6, NONE, PAY3), + TXGBE_PTT(0xDB, IP, IPV6, IG, IPV6, UDP, PAY4), + TXGBE_PTT(0xDC, IP, IPV6, IG, IPV6, TCP, PAY4), + TXGBE_PTT(0xDD, IP, IPV6, IG, IPV6, SCTP, PAY4), + TXGBE_UKN(0xDE), + TXGBE_UKN(0xDF), + + /* IPv6 --> GRE/NAT -> MAC -> NONE/IPv4/IPv6 */ + TXGBE_PTT(0xE0, IP, IPV6, IGM, NONE, NONE, PAY3), + TXGBE_PTT(0xE1, IP, IPV6, IGM, FGV4, NONE, PAY3), + TXGBE_PTT(0xE2, IP, IPV6, IGM, IPV4, NONE, PAY3), + TXGBE_PTT(0xE3, IP, IPV6, IGM, IPV4, UDP, PAY4), + TXGBE_PTT(0xE4, IP, IPV6, IGM, IPV4, TCP, PAY4), + TXGBE_PTT(0xE5, IP, IPV6, IGM, IPV4, SCTP, PAY4), + TXGBE_UKN(0xE6), + TXGBE_UKN(0xE7), + TXGBE_UKN(0xE8), + TXGBE_PTT(0xE9, IP, IPV6, IGM, FGV6, NONE, PAY3), + TXGBE_PTT(0xEA, IP, IPV6, IGM, IPV6, NONE, PAY3), + TXGBE_PTT(0xEB, IP, IPV6, IGM, IPV6, UDP, PAY4), + TXGBE_PTT(0xEC, IP, IPV6, IGM, IPV6, TCP, PAY4), + TXGBE_PTT(0xED, IP, IPV6, IGM, IPV6, SCTP, PAY4), + TXGBE_UKN(0xEE), + TXGBE_UKN(0xEF), + + /* IPv6 --> GRE/NAT -> MAC--> NONE/IPv */ + TXGBE_PTT(0xF0, IP, IPV6, IGMV, NONE, NONE, PAY3), + TXGBE_PTT(0xF1, IP, IPV6, IGMV, FGV4, NONE, PAY3), + TXGBE_PTT(0xF2, IP, IPV6, IGMV, IPV4, NONE, PAY3), + TXGBE_PTT(0xF3, IP, IPV6, IGMV, IPV4, UDP, PAY4), + TXGBE_PTT(0xF4, IP, IPV6, IGMV, IPV4, TCP, PAY4), + TXGBE_PTT(0xF5, IP, IPV6, IGMV, IPV4, SCTP, PAY4), + TXGBE_UKN(0xF6), + TXGBE_UKN(0xF7), + TXGBE_UKN(0xF8), + TXGBE_PTT(0xF9, IP, IPV6, IGMV, FGV6, NONE, PAY3), + TXGBE_PTT(0xFA, IP, IPV6, IGMV, IPV6, NONE, PAY3), + TXGBE_PTT(0xFB, IP, IPV6, IGMV, IPV6, UDP, PAY4), + TXGBE_PTT(0xFC, IP, IPV6, IGMV, IPV6, TCP, PAY4), + TXGBE_PTT(0xFD, IP, IPV6, IGMV, IPV6, SCTP, PAY4), + TXGBE_UKN(0xFE), + TXGBE_UKN(0xFF), +}; + + +void txgbe_init_mac_link_ops(struct txgbe_hw *hw) +{ + struct txgbe_mac_info *mac = &hw->mac; + + DEBUGFUNC("\n"); + + /* + * enable the laser control functions for SFP+ fiber + * and MNG not enabled + */ + if ((TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_fiber) && + !txgbe_mng_present(hw)) { + mac->ops.disable_tx_laser = + txgbe_disable_tx_laser_multispeed_fiber; + mac->ops.enable_tx_laser = + txgbe_enable_tx_laser_multispeed_fiber; + mac->ops.flap_tx_laser = txgbe_flap_tx_laser_multispeed_fiber; + + } else { + mac->ops.disable_tx_laser = + txgbe_disable_tx_laser_multispeed_fiber; + mac->ops.enable_tx_laser = + txgbe_enable_tx_laser_multispeed_fiber; + mac->ops.flap_tx_laser = txgbe_flap_tx_laser_multispeed_fiber; + } + + if (hw->phy.multispeed_fiber) { + /* Set up dual speed SFP+ support */ + mac->ops.setup_link = txgbe_setup_mac_link_multispeed_fiber; + mac->ops.setup_mac_link = txgbe_setup_mac_link; + mac->ops.set_rate_select_speed = + txgbe_set_hard_rate_select_speed; + } else { + mac->ops.setup_link = txgbe_setup_mac_link; + mac->ops.set_rate_select_speed = + txgbe_set_hard_rate_select_speed; + } +} + +/** + * txgbe_init_phy_ops - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during init_shared_code because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + * + **/ +s32 txgbe_init_phy_ops(struct txgbe_hw *hw) +{ + struct txgbe_mac_info *mac = &hw->mac; + s32 ret_val = 0; + + DEBUGFUNC("\n"); + + txgbe_init_i2c(hw); + /* Identify the PHY or SFP module */ + ret_val = TCALL(hw, phy.ops.identify); + if (ret_val == TXGBE_ERR_SFP_NOT_SUPPORTED) + goto init_phy_ops_out; + + /* Setup function pointers based on detected SFP module and speeds */ + txgbe_init_mac_link_ops(hw); + if (hw->phy.sfp_type != txgbe_sfp_type_unknown) + hw->phy.ops.reset = NULL; + + /* If copper media, overwrite with copper function pointers */ + if (TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_copper) { + hw->phy.type = txgbe_phy_xaui; + if ((hw->subsystem_id & 0xF0) != TXGBE_ID_SFI_XAUI) { + mac->ops.setup_link = txgbe_setup_copper_link; + mac->ops.get_link_capabilities = + txgbe_get_copper_link_capabilities; + } + } + +init_phy_ops_out: + return ret_val; +} + + +/** + * txgbe_init_ops - Inits func ptrs and MAC type + * @hw: pointer to hardware structure + * + * Initialize the function pointers and assign the MAC type for sapphire. + * Does not touch the hardware. + **/ + +s32 txgbe_init_ops(struct txgbe_hw *hw) +{ + struct txgbe_mac_info *mac = &hw->mac; + struct txgbe_phy_info *phy = &hw->phy; + struct txgbe_eeprom_info *eeprom = &hw->eeprom; + struct txgbe_flash_info *flash = &hw->flash; + s32 ret_val = 0; + + DEBUGFUNC("\n"); + + /* PHY */ + phy->ops.reset = txgbe_reset_phy; + phy->ops.read_reg = txgbe_read_phy_reg; + phy->ops.write_reg = txgbe_write_phy_reg; + phy->ops.read_reg_mdi = txgbe_read_phy_reg_mdi; + phy->ops.write_reg_mdi = txgbe_write_phy_reg_mdi; + phy->ops.setup_link = txgbe_setup_phy_link; + phy->ops.setup_link_speed = txgbe_setup_phy_link_speed; + phy->ops.read_i2c_byte = txgbe_read_i2c_byte; + phy->ops.write_i2c_byte = txgbe_write_i2c_byte; + phy->ops.read_i2c_sff8472 = txgbe_read_i2c_sff8472; + phy->ops.read_i2c_eeprom = txgbe_read_i2c_eeprom; + phy->ops.write_i2c_eeprom = txgbe_write_i2c_eeprom; + phy->ops.identify_sfp = txgbe_identify_module; + phy->sfp_type = txgbe_sfp_type_unknown; + phy->ops.check_overtemp = txgbe_tn_check_overtemp; + phy->ops.identify = txgbe_identify_phy; + phy->ops.init = txgbe_init_phy_ops; + + /* MAC */ + mac->ops.init_hw = txgbe_init_hw; + mac->ops.clear_hw_cntrs = txgbe_clear_hw_cntrs; + mac->ops.get_mac_addr = txgbe_get_mac_addr; + mac->ops.stop_adapter = txgbe_stop_adapter; + mac->ops.get_bus_info = txgbe_get_bus_info; + mac->ops.set_lan_id = txgbe_set_lan_id_multi_port_pcie; + mac->ops.acquire_swfw_sync = txgbe_acquire_swfw_sync; + mac->ops.release_swfw_sync = txgbe_release_swfw_sync; + mac->ops.reset_hw = txgbe_reset_hw; + mac->ops.get_media_type = txgbe_get_media_type; + mac->ops.disable_sec_rx_path = txgbe_disable_sec_rx_path; + mac->ops.enable_sec_rx_path = txgbe_enable_sec_rx_path; + mac->ops.enable_rx_dma = txgbe_enable_rx_dma; + mac->ops.start_hw = txgbe_start_hw; + mac->ops.get_san_mac_addr = txgbe_get_san_mac_addr; + mac->ops.set_san_mac_addr = txgbe_set_san_mac_addr; + mac->ops.get_device_caps = txgbe_get_device_caps; + mac->ops.get_wwn_prefix = txgbe_get_wwn_prefix; + mac->ops.setup_eee = txgbe_setup_eee; + + /* LEDs */ + mac->ops.led_on = txgbe_led_on; + mac->ops.led_off = txgbe_led_off; + + /* RAR, Multicast, VLAN */ + mac->ops.set_rar = txgbe_set_rar; + mac->ops.clear_rar = txgbe_clear_rar; + mac->ops.init_rx_addrs = txgbe_init_rx_addrs; + mac->ops.update_uc_addr_list = txgbe_update_uc_addr_list; + mac->ops.update_mc_addr_list = txgbe_update_mc_addr_list; + mac->ops.enable_mc = txgbe_enable_mc; + mac->ops.disable_mc = txgbe_disable_mc; + mac->ops.enable_rx = txgbe_enable_rx; + mac->ops.disable_rx = txgbe_disable_rx; + mac->ops.set_vmdq_san_mac = txgbe_set_vmdq_san_mac; + mac->ops.insert_mac_addr = txgbe_insert_mac_addr; + mac->rar_highwater = 1; + mac->ops.set_vfta = txgbe_set_vfta; + mac->ops.set_vlvf = txgbe_set_vlvf; + mac->ops.clear_vfta = txgbe_clear_vfta; + mac->ops.init_uta_tables = txgbe_init_uta_tables; + mac->ops.set_mac_anti_spoofing = txgbe_set_mac_anti_spoofing; + mac->ops.set_vlan_anti_spoofing = txgbe_set_vlan_anti_spoofing; + mac->ops.set_ethertype_anti_spoofing = + txgbe_set_ethertype_anti_spoofing; + + /* Flow Control */ + mac->ops.fc_enable = txgbe_fc_enable; + mac->ops.setup_fc = txgbe_setup_fc; + + /* Link */ + mac->ops.get_link_capabilities = txgbe_get_link_capabilities; + mac->ops.check_link = txgbe_check_mac_link; + mac->ops.setup_rxpba = txgbe_set_rxpba; + mac->mcft_size = TXGBE_SP_MC_TBL_SIZE; + mac->vft_size = TXGBE_SP_VFT_TBL_SIZE; + mac->num_rar_entries = TXGBE_SP_RAR_ENTRIES; + mac->rx_pb_size = TXGBE_SP_RX_PB_SIZE; + mac->max_rx_queues = TXGBE_SP_MAX_RX_QUEUES; + mac->max_tx_queues = TXGBE_SP_MAX_TX_QUEUES; + mac->max_msix_vectors = txgbe_get_pcie_msix_count(hw); + + mac->arc_subsystem_valid = (rd32(hw, TXGBE_MIS_ST) & + TXGBE_MIS_ST_MNG_INIT_DN) ? true : false; + + hw->mbx.ops.init_params = txgbe_init_mbx_params_pf; + + /* EEPROM */ + eeprom->ops.init_params = txgbe_init_eeprom_params; + eeprom->ops.calc_checksum = txgbe_calc_eeprom_checksum; + eeprom->ops.read = txgbe_read_ee_hostif; + eeprom->ops.read_buffer = txgbe_read_ee_hostif_buffer; + eeprom->ops.write = txgbe_write_ee_hostif; + eeprom->ops.write_buffer = txgbe_write_ee_hostif_buffer; + eeprom->ops.update_checksum = txgbe_update_eeprom_checksum; + eeprom->ops.validate_checksum = txgbe_validate_eeprom_checksum; + + /* FLASH */ + flash->ops.init_params = txgbe_init_flash_params; + flash->ops.read_buffer = txgbe_read_flash_buffer; + flash->ops.write_buffer = txgbe_write_flash_buffer; + + /* Manageability interface */ + mac->ops.set_fw_drv_ver = txgbe_set_fw_drv_ver; + + mac->ops.get_thermal_sensor_data = + txgbe_get_thermal_sensor_data; + mac->ops.init_thermal_sensor_thresh = + txgbe_init_thermal_sensor_thresh; + + return ret_val; +} + +/** + * txgbe_get_link_capabilities - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: true when autoneg or autotry is enabled + * + * Determines the link capabilities by reading the AUTOC register. + **/ +s32 txgbe_get_link_capabilities(struct txgbe_hw *hw, + u32 *speed, + bool *autoneg) +{ + s32 status = 0; + u32 sr_pcs_ctl, sr_pma_mmd_ctl1, sr_an_mmd_ctl; + u32 sr_an_mmd_adv_reg2; + + DEBUGFUNC("\n"); + + /* Check if 1G SFP module. */ + if (hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core1 || + hw->phy.sfp_type == txgbe_sfp_type_1g_lx_core0 || + hw->phy.sfp_type == txgbe_sfp_type_1g_lx_core1 || + hw->phy.sfp_type == txgbe_sfp_type_1g_sx_core0 || + hw->phy.sfp_type == txgbe_sfp_type_1g_sx_core1) { + *speed = TXGBE_LINK_SPEED_1GB_FULL; + *autoneg = false; + } else if (hw->phy.multispeed_fiber) { + *speed = TXGBE_LINK_SPEED_10GB_FULL | + TXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + } + /* SFP */ + else if (txgbe_get_media_type(hw) == txgbe_media_type_fiber) { + *speed = TXGBE_LINK_SPEED_10GB_FULL; + *autoneg = false; + } + /* XAUI */ + else if ((txgbe_get_media_type(hw) == txgbe_media_type_copper) && + ((hw->subsystem_id & 0xF0) == TXGBE_ID_XAUI || + (hw->subsystem_id & 0xF0) == TXGBE_ID_SFI_XAUI)) { + *speed = TXGBE_LINK_SPEED_10GB_FULL; + *autoneg = false; + hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_10GBASE_T; + } + /* SGMII */ + else if ((hw->subsystem_id & 0xF0) == TXGBE_ID_SGMII) { + *speed = TXGBE_LINK_SPEED_1GB_FULL | + TXGBE_LINK_SPEED_100_FULL | + TXGBE_LINK_SPEED_10_FULL; + *autoneg = false; + hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_1000BASE_T | + TXGBE_PHYSICAL_LAYER_100BASE_TX; + /* MAC XAUI */ + } else if ((hw->subsystem_id & 0xF0) == TXGBE_ID_MAC_XAUI) { + *speed = TXGBE_LINK_SPEED_10GB_FULL; + *autoneg = false; + hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_10GBASE_KX4; + /* MAC SGMII */ + } else if ((hw->subsystem_id & 0xF0) == TXGBE_ID_MAC_SGMII) { + *speed = TXGBE_LINK_SPEED_1GB_FULL; + *autoneg = false; + hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_1000BASE_KX; + } + /* KR KX KX4 */ + else { + /* + * Determine link capabilities based on the stored value, + * which represents EEPROM defaults. If value has not + * been stored, use the current register values. + */ + if (hw->mac.orig_link_settings_stored) { + sr_pcs_ctl = hw->mac.orig_sr_pcs_ctl2; + sr_pma_mmd_ctl1 = hw->mac.orig_sr_pma_mmd_ctl1; + sr_an_mmd_ctl = hw->mac.orig_sr_an_mmd_ctl; + sr_an_mmd_adv_reg2 = hw->mac.orig_sr_an_mmd_adv_reg2; + } else { + sr_pcs_ctl = txgbe_rd32_epcs(hw, TXGBE_SR_PCS_CTL2); + sr_pma_mmd_ctl1 = txgbe_rd32_epcs(hw, + TXGBE_SR_PMA_MMD_CTL1); + sr_an_mmd_ctl = txgbe_rd32_epcs(hw, + TXGBE_SR_AN_MMD_CTL); + sr_an_mmd_adv_reg2 = txgbe_rd32_epcs(hw, + TXGBE_SR_AN_MMD_ADV_REG2); + } + + if ((sr_pcs_ctl & TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_MASK) == + TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_X && + (sr_pma_mmd_ctl1 & TXGBE_SR_PMA_MMD_CTL1_SPEED_SEL_MASK) + == TXGBE_SR_PMA_MMD_CTL1_SPEED_SEL_1G && + (sr_an_mmd_ctl & TXGBE_SR_AN_MMD_CTL_ENABLE) == 0) { + /* 1G or KX - no backplane auto-negotiation */ + *speed = TXGBE_LINK_SPEED_1GB_FULL; + *autoneg = false; + hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_1000BASE_KX; + } else if ((sr_pcs_ctl & TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_MASK) == + TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_X && + (sr_pma_mmd_ctl1 & TXGBE_SR_PMA_MMD_CTL1_SPEED_SEL_MASK) + == TXGBE_SR_PMA_MMD_CTL1_SPEED_SEL_10G && + (sr_an_mmd_ctl & TXGBE_SR_AN_MMD_CTL_ENABLE) == 0) { + *speed = TXGBE_LINK_SPEED_10GB_FULL; + *autoneg = false; + hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_10GBASE_KX4; + } else if ((sr_pcs_ctl & TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_MASK) == + TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_R && + (sr_an_mmd_ctl & TXGBE_SR_AN_MMD_CTL_ENABLE) == 0) { + /* 10 GbE serial link (KR -no backplane auto-negotiation) */ + *speed = TXGBE_LINK_SPEED_10GB_FULL; + *autoneg = false; + hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_10GBASE_KR; + } else if ((sr_an_mmd_ctl & TXGBE_SR_AN_MMD_CTL_ENABLE)) { + /* KX/KX4/KR backplane auto-negotiation enable */ + *speed = TXGBE_LINK_SPEED_UNKNOWN; + if (sr_an_mmd_adv_reg2 & + TXGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_KR) + *speed |= TXGBE_LINK_SPEED_10GB_FULL; + if (sr_an_mmd_adv_reg2 & + TXGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_KX4) + *speed |= TXGBE_LINK_SPEED_10GB_FULL; + if (sr_an_mmd_adv_reg2 & + TXGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_KX) + *speed |= TXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_10GBASE_KR | + TXGBE_PHYSICAL_LAYER_10GBASE_KX4 | + TXGBE_PHYSICAL_LAYER_1000BASE_KX; + } else { + status = TXGBE_ERR_LINK_SETUP; + goto out; + } + } + +out: + return status; +} + +/** + * txgbe_get_media_type - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + **/ +enum txgbe_media_type txgbe_get_media_type(struct txgbe_hw *hw) +{ + enum txgbe_media_type media_type; + u8 device_type = hw->subsystem_id & 0xF0; + + DEBUGFUNC("\n"); + + /* Detect if there is a copper PHY attached. */ + switch (hw->phy.type) { + case txgbe_phy_cu_unknown: + case txgbe_phy_tn: + media_type = txgbe_media_type_copper; + goto out; + default: + break; + } + + switch (device_type) { + case TXGBE_ID_MAC_XAUI: + case TXGBE_ID_MAC_SGMII: + case TXGBE_ID_KR_KX_KX4: + /* Default device ID is mezzanine card KX/KX4 */ + media_type = txgbe_media_type_backplane; + break; + case TXGBE_ID_SFP: + media_type = txgbe_media_type_fiber; + break; + case TXGBE_ID_XAUI: + case TXGBE_ID_SGMII: + media_type = txgbe_media_type_copper; + break; + case TXGBE_ID_SFI_XAUI: + if (hw->bus.lan_id == 0) + media_type = txgbe_media_type_fiber; + else + media_type = txgbe_media_type_copper; + break; + default: + media_type = txgbe_media_type_unknown; + break; + } +out: + return media_type; +} + +/** + * txgbe_stop_mac_link_on_d3 - Disables link on D3 + * @hw: pointer to hardware structure + * + * Disables link during D3 power down sequence. + * + **/ +void txgbe_stop_mac_link_on_d3(struct txgbe_hw *hw) +{ + /* fix autoc2 */ + UNREFERENCED_PARAMETER(hw); + return; +} + + +/** + * txgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser + * @hw: pointer to hardware structure + * + * The base drivers may require better control over SFP+ module + * PHY states. This includes selectively shutting down the Tx + * laser on the PHY, effectively halting physical link. + **/ +void txgbe_disable_tx_laser_multispeed_fiber(struct txgbe_hw *hw) +{ + u32 esdp_reg = rd32(hw, TXGBE_GPIO_DR); + + /* Blocked by MNG FW so bail */ + txgbe_check_reset_blocked(hw); + + /* Disable Tx laser; allow 100us to go dark per spec */ + esdp_reg |= TXGBE_GPIO_DR_1 | TXGBE_GPIO_DR_0; + wr32(hw, TXGBE_GPIO_DR, esdp_reg); + TXGBE_WRITE_FLUSH(hw); + usec_delay(100); +} + +/** + * txgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser + * @hw: pointer to hardware structure + * + * The base drivers may require better control over SFP+ module + * PHY states. This includes selectively turning on the Tx + * laser on the PHY, effectively starting physical link. + **/ +void txgbe_enable_tx_laser_multispeed_fiber(struct txgbe_hw *hw) +{ + /* Enable Tx laser; allow 100ms to light up */ + wr32m(hw, TXGBE_GPIO_DR, + TXGBE_GPIO_DR_0 | TXGBE_GPIO_DR_1, 0); + TXGBE_WRITE_FLUSH(hw); + msec_delay(100); +} + +/** + * txgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser + * @hw: pointer to hardware structure + * + * When the driver changes the link speeds that it can support, + * it sets autotry_restart to true to indicate that we need to + * initiate a new autotry session with the link partner. To do + * so, we set the speed then disable and re-enable the Tx laser, to + * alert the link partner that it also needs to restart autotry on its + * end. This is consistent with true clause 37 autoneg, which also + * involves a loss of signal. + **/ +void txgbe_flap_tx_laser_multispeed_fiber(struct txgbe_hw *hw) +{ + DEBUGFUNC("\n"); + + /* Blocked by MNG FW so bail */ + txgbe_check_reset_blocked(hw); + + if (hw->mac.autotry_restart) { + txgbe_disable_tx_laser_multispeed_fiber(hw); + txgbe_enable_tx_laser_multispeed_fiber(hw); + hw->mac.autotry_restart = false; + } +} + +/** + * txgbe_set_hard_rate_select_speed - Set module link speed + * @hw: pointer to hardware structure + * @speed: link speed to set + * + * Set module link speed via RS0/RS1 rate select pins. + */ +void txgbe_set_hard_rate_select_speed(struct txgbe_hw *hw, + u32 speed) +{ + u32 esdp_reg = rd32(hw, TXGBE_GPIO_DR); + + switch (speed) { + case TXGBE_LINK_SPEED_10GB_FULL: + esdp_reg |= TXGBE_GPIO_DR_5 | TXGBE_GPIO_DR_4; + break; + case TXGBE_LINK_SPEED_1GB_FULL: + esdp_reg &= ~(TXGBE_GPIO_DR_5 | TXGBE_GPIO_DR_4); + break; + default: + DEBUGOUT("Invalid fixed module speed\n"); + return; + } + + wr32(hw, TXGBE_GPIO_DDR, + TXGBE_GPIO_DDR_5 | TXGBE_GPIO_DDR_4 | + TXGBE_GPIO_DDR_1 | TXGBE_GPIO_DDR_0); + + wr32(hw, TXGBE_GPIO_DR, esdp_reg); + + TXGBE_WRITE_FLUSH(hw); +} + +s32 txgbe_enable_rx_adapter(struct txgbe_hw *hw) +{ + u32 value; + + value = txgbe_rd32_epcs(hw, TXGBE_PHY_RX_EQ_CTL); + value |= 1 << 12; + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL, value); + + value = 0; + while (!(value >> 11)) { + value = txgbe_rd32_epcs(hw, TXGBE_PHY_RX_AD_ACK); + msleep(1); + } + + value = txgbe_rd32_epcs(hw, TXGBE_PHY_RX_EQ_CTL); + value &= ~(1 << 12); + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL, value); + + return 0; +} + +s32 txgbe_set_sgmii_an37_ability(struct txgbe_hw *hw) +{ + u32 value; + + txgbe_wr32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1, 0x3002); + /* for sgmii + external phy, set to 0x0105 (mac sgmii mode) */ + if ((hw->subsystem_id & 0xF0) == TXGBE_ID_SGMII) { + txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_AN_CTL, 0x0105); + } + /* for sgmii direct link, set to 0x010c (phy sgmii mode) */ + if ((hw->subsystem_id & 0xF0) == TXGBE_ID_MAC_SGMII) { + txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_AN_CTL, 0x010c); + } + txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_DIGI_CTL, 0x0200); + value = txgbe_rd32_epcs(hw, TXGBE_SR_MII_MMD_CTL); + value = (value & ~0x1200) | (0x1 << 12) | (0x1 << 9); + txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_CTL, value); + return 0; +} + + +s32 txgbe_set_link_to_kr(struct txgbe_hw *hw, bool autoneg) +{ + u32 i; + s32 status = 0; + u32 value = 0; + struct txgbe_adapter *adapter = hw->back; + + /* 1. Wait xpcs power-up good */ + for (i = 0; i < TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME; i++) { + if ((txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS) & + TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_MASK) == + TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_POWER_GOOD) + break; + msleep(10); + } + if (i == TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME) { + status = TXGBE_ERR_XPCS_POWER_UP_FAILED; + goto out; + } + e_dev_info("It is set to kr.\n"); + + txgbe_wr32_epcs(hw, 0x78001, 0x7); + txgbe_wr32_epcs(hw, 0x18035, 0x00FC); + txgbe_wr32_epcs(hw, 0x18055, 0x00FC); + + if (1) { + /* 2. Disable xpcs AN-73 */ + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x3000); + +#if 0 + if (autoneg) + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x3000); + else + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x0); +#endif + txgbe_wr32_epcs(hw, 0x78003, 0x1); + if (!(adapter->backplane_an == 1)) { + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x0000); + txgbe_wr32_epcs(hw, 0x78003, 0x0); + } + + if (KR_SET == 1 || adapter->ffe_set == TXGBE_BP_M_KR) { + e_dev_info("Set KR TX_EQ MAIN:%d PRE:%d POST:%d\n", + adapter->ffe_main, adapter->ffe_pre, adapter->ffe_post); + value = (0x1804 & ~0x3F3F); + value |= adapter->ffe_main << 8 | adapter->ffe_pre; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); + + value = (0x50 & ~0x7F) | (1 << 6)| adapter->ffe_post; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + } + + if (KR_AN73_PRESET == 1) { + txgbe_wr32_epcs(hw, 0x18037, 0x80); + } + + if (KR_POLLING == 1) { + txgbe_wr32_epcs(hw, 0x18006, 0xffff); + txgbe_wr32_epcs(hw, 0x18008, 0xA697); + } + + /* 3. Set VR_XS_PMA_Gen5_12G_MPLLA_CTRL3 Register */ + /* Bit[10:0](MPLLA_BANDWIDTH) = 11'd123 (default: 11'd16) */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL3, + TXGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_10GBASER_KR); + + /* 4. Set VR_XS_PMA_Gen5_12G_MISC_CTRL0 Register */ + /* Bit[12:8](RX_VREF_CTRL) = 5'hF (default: 5'h11) */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, + 0xCF00); + + /* 5. Set VR_XS_PMA_Gen5_12G_RX_EQ_CTRL0 Register */ + /* Bit[15:8](VGA1/2_GAIN_0) = 8'h77, Bit[7:5](CTLE_POLE_0) = 3'h2 + * Bit[4:0](CTLE_BOOST_0) = 4'hA + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0, + 0x774A); + + /* 6. Set VR_MII_Gen5_12G_RX_GENCTRL3 Register */ + /* Bit[2:0](LOS_TRSHLD_0) = 3'h4 (default: 3) */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL3, + 0x0004); + /* 7. Initialize the mode by setting VR XS or PCS MMD Digital */ + /* Control1 Register Bit[15](VR_RST) */ + txgbe_wr32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1, + 0xA000); + /* wait phy initialization done */ + for (i = 0; i < TXGBE_PHY_INIT_DONE_POLLING_TIME; i++) { + if ((txgbe_rd32_epcs(hw, + TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1) & + TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1_VR_RST) == 0) + break; + msleep(100); + } + if (i == TXGBE_PHY_INIT_DONE_POLLING_TIME) { + status = TXGBE_ERR_PHY_INIT_NOT_DONE; + goto out; + } + } else { + txgbe_wr32_epcs(hw, TXGBE_VR_AN_KR_MODE_CL, + 0x1); + } +out: + return status; +} + +s32 txgbe_set_link_to_kx4(struct txgbe_hw *hw, bool autoneg) +{ + u32 i; + s32 status = 0; + u32 value; + struct txgbe_adapter *adapter = hw->back; + + /* check link status, if already set, skip setting it again */ + if (hw->link_status == TXGBE_LINK_STATUS_KX4) { + goto out; + } + e_dev_info("It is set to kx4.\n"); + + /* 1. Wait xpcs power-up good */ + for (i = 0; i < TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME; i++) { + if ((txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS) & + TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_MASK) == + TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_POWER_GOOD) + break; + msleep(10); + } + if (i == TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME) { + status = TXGBE_ERR_XPCS_POWER_UP_FAILED; + goto out; + } + + wr32m(hw, TXGBE_MAC_TX_CFG, TXGBE_MAC_TX_CFG_TE, + ~TXGBE_MAC_TX_CFG_TE); + + /* 2. Disable xpcs AN-73 */ + if (!autoneg) + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x0); + else + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x3000); + + if (hw->revision_id == TXGBE_SP_MPW) { + /* Disable PHY MPLLA */ + txgbe_wr32_ephy(hw, 0x4, 0x2501); + /* Reset rx lane0-3 clock */ + txgbe_wr32_ephy(hw, 0x1005, 0x4001); + txgbe_wr32_ephy(hw, 0x1105, 0x4001); + txgbe_wr32_ephy(hw, 0x1205, 0x4001); + txgbe_wr32_ephy(hw, 0x1305, 0x4001); + } else { + /* Disable PHY MPLLA for eth mode change(after ECO) */ + txgbe_wr32_ephy(hw, 0x4, 0x250A); + TXGBE_WRITE_FLUSH(hw); + msleep(1); + + /* Set the eth change_mode bit first in mis_rst register + * for corresponding LAN port + */ + if (hw->bus.lan_id == 0) + wr32(hw, TXGBE_MIS_RST, + TXGBE_MIS_RST_LAN0_CHG_ETH_MODE); + else + wr32(hw, TXGBE_MIS_RST, + TXGBE_MIS_RST_LAN1_CHG_ETH_MODE); + } + + /* Set SR PCS Control2 Register Bits[1:0] = 2'b01 PCS_TYPE_SEL: non KR */ + txgbe_wr32_epcs(hw, TXGBE_SR_PCS_CTL2, + TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_X); + /* Set SR PMA MMD Control1 Register Bit[13] = 1'b1 SS13: 10G speed */ + txgbe_wr32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1, + TXGBE_SR_PMA_MMD_CTL1_SPEED_SEL_10G); + + value = (0xf5f0 & ~0x7F0) | (0x5 << 8) | (0x7 << 5) | 0xF0; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_GENCTRL1, value); + + if ((hw->subsystem_id & 0xF0) == TXGBE_ID_MAC_XAUI) + txgbe_wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, 0xCF00); + else + txgbe_wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, 0x4F00); + + if (KX4_SET == 1 || adapter->ffe_set) { + e_dev_info("Set KX4 TX_EQ MAIN:%d PRE:%d POST:%d\n", + adapter->ffe_main, adapter->ffe_pre, adapter->ffe_post); + value = (0x1804 & ~0x3F3F); + value |= adapter->ffe_main << 8 | adapter->ffe_pre; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); + + value = (0x50 & ~0x7F) | (1 << 6)| adapter->ffe_post; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + } else { + value = (0x1804 & ~0x3F3F); + value |= 40 << 8 ; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); + + value = (0x50 & ~0x7F) | (1 << 6); + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + + } + for (i = 0; i < 4; i++) { + if (i == 0) + value = (0x45 & ~0xFFFF) | (0x7 << 12) | (0x7 << 8) | 0x6; + else + value = (0xff06 & ~0xFFFF) | (0x7 << 12) | (0x7 << 8) | 0x6; + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0 + i, value); + } + + value = 0x0 & ~0x7777; + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_ATT_LVL0, value); + + txgbe_wr32_epcs(hw, TXGBE_PHY_DFE_TAP_CTL0, 0x0); + + value = (0x6db & ~0xFFF) | (0x1 << 9) | (0x1 << 6) | (0x1 << 3) | 0x1; + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL3, value); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY MPLLA */ + /* Control 0 Register Bit[7:0] = 8'd40 MPLLA_MULTIPLIER */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL0, + TXGBE_PHY_MPLLA_CTL0_MULTIPLIER_OTHER); + /* Set VR XS, PMA or MII Synopsys Enterprise Gen5 12G PHY MPLLA */ + /* Control 3 Register Bit[10:0] = 11'd86 MPLLA_BANDWIDTH */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL3, + TXGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_OTHER); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO */ + /* Calibration Load 0 Register Bit[12:0] = 13'd1360 VCO_LD_VAL_0 */ + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD0, + TXGBE_PHY_VCO_CAL_LD0_OTHER); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO */ + /* Calibration Load 1 Register Bit[12:0] = 13'd1360 VCO_LD_VAL_1 */ + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD1, + TXGBE_PHY_VCO_CAL_LD0_OTHER); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO */ + /* Calibration Load 2 Register Bit[12:0] = 13'd1360 VCO_LD_VAL_2 */ + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD2, + TXGBE_PHY_VCO_CAL_LD0_OTHER); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO */ + /* Calibration Load 3 Register Bit[12:0] = 13'd1360 VCO_LD_VAL_3 */ + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD3, + TXGBE_PHY_VCO_CAL_LD0_OTHER); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO */ + /* Calibration Reference 0 Register Bit[5:0] = 6'd34 VCO_REF_LD_0/1 */ + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_REF0, + 0x2222); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO */ + /* Calibration Reference 1 Register Bit[5:0] = 6'd34 VCO_REF_LD_2/3 */ + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_REF1, + 0x2222); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY AFE-DFE */ + /* Enable Register Bit[7:0] = 8'd0 AFE_EN_0/3_1, DFE_EN_0/3_1 */ + txgbe_wr32_epcs(hw, TXGBE_PHY_AFE_DFE_ENABLE, + 0x0); + + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx */ + /* Equalization Control 4 Register Bit[3:0] = 4'd0 CONT_ADAPT_0/3_1 */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL, + 0x00F0); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Tx Rate */ + /* Control Register Bit[14:12], Bit[10:8], Bit[6:4], Bit[2:0], + * all rates to 3'b010 TX0/1/2/3_RATE + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_RATE_CTL, + 0x2222); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx Rate */ + /* Control Register Bit[13:12], Bit[9:8], Bit[5:4], Bit[1:0], + * all rates to 2'b10 RX0/1/2/3_RATE + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_RATE_CTL, + 0x2222); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Tx General */ + /* Control 2 Register Bit[15:8] = 2'b01 TX0/1/2/3_WIDTH: 10bits */ + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_GEN_CTL2, + 0x5500); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx General */ + /* Control 2 Register Bit[15:8] = 2'b01 RX0/1/2/3_WIDTH: 10bits */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL2, + 0x5500); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY MPLLA Control + * 2 Register Bit[10:8] = 3'b010 + * MPLLA_DIV16P5_CLK_EN=0, MPLLA_DIV10_CLK_EN=1, MPLLA_DIV8_CLK_EN=0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL2, + TXGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_10); + + txgbe_wr32_epcs(hw, 0x1f0000, 0x0); + txgbe_wr32_epcs(hw, 0x1f8001, 0x0); + txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_DIGI_CTL, 0x0); + + if (KX4_TXRX_PIN == 1) + txgbe_wr32_epcs(hw, 0x38001, 0xff); + /* 10. Initialize the mode by setting VR XS or PCS MMD Digital Control1 + * Register Bit[15](VR_RST) + */ + txgbe_wr32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1, 0xA000); + /* wait phy initialization done */ + for (i = 0; i < TXGBE_PHY_INIT_DONE_POLLING_TIME; i++) { + if ((txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1) & + TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1_VR_RST) == 0) + break; + msleep(100); + } + + /* if success, set link status */ + hw->link_status = TXGBE_LINK_STATUS_KX4; + + if (i == TXGBE_PHY_INIT_DONE_POLLING_TIME) { + status = TXGBE_ERR_PHY_INIT_NOT_DONE; + goto out; + } + +out: + return status; +} + + +s32 txgbe_set_link_to_kx(struct txgbe_hw *hw, + u32 speed, + bool autoneg) +{ + u32 i; + s32 status = 0; + u32 wdata = 0; + u32 value; + struct txgbe_adapter *adapter = hw->back; + + /* check link status, if already set, skip setting it again */ + if (hw->link_status == TXGBE_LINK_STATUS_KX) { + goto out; + } + e_dev_info("It is set to kx. speed =0x%x\n", speed); + + txgbe_wr32_epcs(hw, 0x18035, 0x00FC); + txgbe_wr32_epcs(hw, 0x18055, 0x00FC); + + /* 1. Wait xpcs power-up good */ + for (i = 0; i < TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME; i++) { + if ((txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS) & + TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_MASK) == + TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_POWER_GOOD) + break; + msleep(10); + } + if (i == TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME) { + status = TXGBE_ERR_XPCS_POWER_UP_FAILED; + goto out; + } + + wr32m(hw, TXGBE_MAC_TX_CFG, TXGBE_MAC_TX_CFG_TE, + ~TXGBE_MAC_TX_CFG_TE); + + /* 2. Disable xpcs AN-73 */ + if (!autoneg) + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x0); + else + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x3000); + + if (hw->revision_id == TXGBE_SP_MPW) { + /* Disable PHY MPLLA */ + txgbe_wr32_ephy(hw, 0x4, 0x2401); + /* Reset rx lane0 clock */ + txgbe_wr32_ephy(hw, 0x1005, 0x4001); + } else { + /* Disable PHY MPLLA for eth mode change(after ECO) */ + txgbe_wr32_ephy(hw, 0x4, 0x240A); + TXGBE_WRITE_FLUSH(hw); + msleep(1); + + /* Set the eth change_mode bit first in mis_rst register */ + /* for corresponding LAN port */ + if (hw->bus.lan_id == 0) + wr32(hw, TXGBE_MIS_RST, + TXGBE_MIS_RST_LAN0_CHG_ETH_MODE); + else + wr32(hw, TXGBE_MIS_RST, + TXGBE_MIS_RST_LAN1_CHG_ETH_MODE); + } + + /* Set SR PCS Control2 Register Bits[1:0] = 2'b01 PCS_TYPE_SEL: non KR */ + txgbe_wr32_epcs(hw, TXGBE_SR_PCS_CTL2, + TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_X); + + /* Set SR PMA MMD Control1 Register Bit[13] = 1'b0 SS13: 1G speed */ + txgbe_wr32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1, + TXGBE_SR_PMA_MMD_CTL1_SPEED_SEL_1G); + + /* Set SR MII MMD Control Register to corresponding speed: {Bit[6], + * Bit[13]}=[2'b00,2'b01,2'b10]->[10M,100M,1G] + */ + if (speed == TXGBE_LINK_SPEED_100_FULL) + wdata = 0x2100; + else if (speed == TXGBE_LINK_SPEED_1GB_FULL) + wdata = 0x0140; + else if (speed == TXGBE_LINK_SPEED_10_FULL) + wdata = 0x0100; + txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_CTL, + wdata); + + value = (0xf5f0 & ~0x710) | (0x5 << 8)| 0x10; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_GENCTRL1, value); + + if (KX_SGMII == 1) + txgbe_wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, 0x4F00); + else + txgbe_wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, 0xCF00); + + if (KX_SET == 1 || adapter->ffe_set == TXGBE_BP_M_KX) { + e_dev_info("Set KX TX_EQ MAIN:%d PRE:%d POST:%d\n", + adapter->ffe_main, adapter->ffe_pre, adapter->ffe_post); + /* 5. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL0 Register Bit[13:8](TX_EQ_MAIN) + * = 6'd30, Bit[5:0](TX_EQ_PRE) = 6'd4 + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0); + value = (value & ~0x3F3F) | (adapter->ffe_main << 8) | adapter->ffe_pre; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); + /* 6. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL1 Register Bit[6](TX_EQ_OVR_RIDE) + * = 1'b1, Bit[5:0](TX_EQ_POST) = 6'd36 + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1); + value = (value & ~0x7F) | adapter->ffe_post | (1 << 6); + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + } else { + value = (0x1804 & ~0x3F3F) | (24 << 8) | 4; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); + + value = (0x50 & ~0x7F) | 16 | (1 << 6); + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + } + + for (i = 0; i < 4; i++) { + if (i) { + value = 0xff06; + } else { + value = (0x45 & ~0xFFFF) | (0x7 << 12) | (0x7 << 8) | 0x6; + } + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0 + i, value); + } + + value = 0x0 & ~0x7; + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_ATT_LVL0, value); + + txgbe_wr32_epcs(hw, TXGBE_PHY_DFE_TAP_CTL0, 0x0); + + value = (0x6db & ~0x7) | 0x4; + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL3, value); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY MPLLA Control + * 0 Register Bit[7:0] = 8'd32 MPLLA_MULTIPLIER + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL0, + TXGBE_PHY_MPLLA_CTL0_MULTIPLIER_1GBASEX_KX); + + /* Set VR XS, PMA or MII Synopsys Enterprise Gen5 12G PHY MPLLA Control 3 + * Register Bit[10:0] = 11'd70 MPLLA_BANDWIDTH + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL3, + TXGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_1GBASEX_KX); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO + * Calibration Load 0 Register Bit[12:0] = 13'd1344 VCO_LD_VAL_0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD0, + TXGBE_PHY_VCO_CAL_LD0_1GBASEX_KX); + + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD1, 0x549); + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD2, 0x549); + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD3, 0x549); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO + * Calibration Reference 0 Register Bit[5:0] = 6'd42 VCO_REF_LD_0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_REF0, + TXGBE_PHY_VCO_CAL_REF0_LD0_1GBASEX_KX); + + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_REF1, 0x2929); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY AFE-DFE Enable + * Register Bit[4], Bit[0] = 1'b0 AFE_EN_0, DFE_EN_0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_AFE_DFE_ENABLE, + 0x0); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx + * Equalization Control 4 Register Bit[0] = 1'b0 CONT_ADAPT_0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL, + 0x0010); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Tx Rate + * Control Register Bit[2:0] = 3'b011 TX0_RATE + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_RATE_CTL, + TXGBE_PHY_TX_RATE_CTL_TX0_RATE_1GBASEX_KX); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx Rate + * Control Register Bit[2:0] = 3'b011 RX0_RATE + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_RATE_CTL, + TXGBE_PHY_RX_RATE_CTL_RX0_RATE_1GBASEX_KX); + + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Tx General + * Control 2 Register Bit[9:8] = 2'b01 TX0_WIDTH: 10bits + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_GEN_CTL2, + TXGBE_PHY_TX_GEN_CTL2_TX0_WIDTH_OTHER); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx General + * Control 2 Register Bit[9:8] = 2'b01 RX0_WIDTH: 10bits + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL2, + TXGBE_PHY_RX_GEN_CTL2_RX0_WIDTH_OTHER); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY MPLLA Control + * 2 Register Bit[10:8] = 3'b010 MPLLA_DIV16P5_CLK_EN=0, + * MPLLA_DIV10_CLK_EN=1, MPLLA_DIV8_CLK_EN=0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL2, + TXGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_10); + /* VR MII MMD AN Control Register Bit[8] = 1'b1 MII_CTRL */ + /* Set to 8bit MII (required in 10M/100M SGMII) */ + txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_AN_CTL, + 0x0100); + + /* 10. Initialize the mode by setting VR XS or PCS MMD Digital Control1 + * Register Bit[15](VR_RST) + */ + txgbe_wr32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1, 0xA000); + /* wait phy initialization done */ + for (i = 0; i < TXGBE_PHY_INIT_DONE_POLLING_TIME; i++) { + if ((txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1) & + TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1_VR_RST) == 0) + break; + msleep(100); + } + + /* if success, set link status */ + hw->link_status = TXGBE_LINK_STATUS_KX; + + if (i == TXGBE_PHY_INIT_DONE_POLLING_TIME) { + status = TXGBE_ERR_PHY_INIT_NOT_DONE; + goto out; + } + +out: + return status; +} + +s32 txgbe_set_link_to_sfi(struct txgbe_hw *hw, + u32 speed) +{ + u32 i; + s32 status = 0; + u32 value = 0; + struct txgbe_adapter *adapter = hw->back; + + /* Set the module link speed */ + TCALL(hw, mac.ops.set_rate_select_speed, + speed); + + e_dev_info("It is set to sfi.\n"); + /* 1. Wait xpcs power-up good */ + for (i = 0; i < TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME; i++) { + if ((txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS) & + TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_MASK) == + TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_POWER_GOOD) + break; + msleep(10); + } + if (i == TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME) { + status = TXGBE_ERR_XPCS_POWER_UP_FAILED; + goto out; + } + + wr32m(hw, TXGBE_MAC_TX_CFG, TXGBE_MAC_TX_CFG_TE, + ~TXGBE_MAC_TX_CFG_TE); + + /* 2. Disable xpcs AN-73 */ + txgbe_wr32_epcs(hw, TXGBE_SR_AN_MMD_CTL, 0x0); + + if (hw->revision_id != TXGBE_SP_MPW) { + /* Disable PHY MPLLA for eth mode change(after ECO) */ + txgbe_wr32_ephy(hw, 0x4, 0x243A); + TXGBE_WRITE_FLUSH(hw); + msleep(1); + /* Set the eth change_mode bit first in mis_rst register + * for corresponding LAN port + */ + if (hw->bus.lan_id == 0) + wr32(hw, TXGBE_MIS_RST, + TXGBE_MIS_RST_LAN0_CHG_ETH_MODE); + else + wr32(hw, TXGBE_MIS_RST, + TXGBE_MIS_RST_LAN1_CHG_ETH_MODE); + } + if (speed == TXGBE_LINK_SPEED_10GB_FULL) { + /* @. Set SR PCS Control2 Register Bits[1:0] = 2'b00 PCS_TYPE_SEL: KR */ + txgbe_wr32_epcs(hw, TXGBE_SR_PCS_CTL2, 0); + value = txgbe_rd32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1); + value = value | 0x2000; + txgbe_wr32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1, value); + /* @. Set VR_XS_PMA_Gen5_12G_MPLLA_CTRL0 Register Bit[7:0] = 8'd33 + * MPLLA_MULTIPLIER + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL0, 0x0021); + /* 3. Set VR_XS_PMA_Gen5_12G_MPLLA_CTRL3 Register + * Bit[10:0](MPLLA_BANDWIDTH) = 11'd0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL3, 0); + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_GENCTRL1); + value = (value & ~0x700) | 0x500; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_GENCTRL1, value); + /* 4.Set VR_XS_PMA_Gen5_12G_MISC_CTRL0 Register Bit[12:8](RX_VREF_CTRL) + * = 5'hF + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, 0xCF00); + /* @. Set VR_XS_PMA_Gen5_12G_VCO_CAL_LD0 Register Bit[12:0] = 13'd1353 + * VCO_LD_VAL_0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD0, 0x0549); + /* @. Set VR_XS_PMA_Gen5_12G_VCO_CAL_REF0 Register Bit[5:0] = 6'd41 + * VCO_REF_LD_0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_REF0, 0x0029); + /* @. Set VR_XS_PMA_Gen5_12G_TX_RATE_CTRL Register Bit[2:0] = 3'b000 + * TX0_RATE + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_RATE_CTL, 0); + /* @. Set VR_XS_PMA_Gen5_12G_RX_RATE_CTRL Register Bit[2:0] = 3'b000 + * RX0_RATE + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_RATE_CTL, 0); + /* @. Set VR_XS_PMA_Gen5_12G_TX_GENCTRL2 Register Bit[9:8] = 2'b11 + * TX0_WIDTH: 20bits + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_GEN_CTL2, 0x0300); + /* @. Set VR_XS_PMA_Gen5_12G_RX_GENCTRL2 Register Bit[9:8] = 2'b11 + * RX0_WIDTH: 20bits + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL2, 0x0300); + /* @. Set VR_XS_PMA_Gen5_12G_MPLLA_CTRL2 Register Bit[10:8] = 3'b110 + * MPLLA_DIV16P5_CLK_EN=1, MPLLA_DIV10_CLK_EN=1, MPLLA_DIV8_CLK_EN=0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL2, 0x0600); + if (SFI_SET == 1 || adapter->ffe_set) { + e_dev_info("Set SFI TX_EQ MAIN:%d PRE:%d POST:%d\n", + adapter->ffe_main, adapter->ffe_pre, adapter->ffe_post); + /* 5. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL0 Register Bit[13:8](TX_EQ_MAIN) + * = 6'd30, Bit[5:0](TX_EQ_PRE) = 6'd4 + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0); + value = (value & ~0x3F3F) | (adapter->ffe_main << 8) | adapter->ffe_pre; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); + /* 6. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL1 Register Bit[6](TX_EQ_OVR_RIDE) + * = 1'b1, Bit[5:0](TX_EQ_POST) = 6'd36 + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1); + value = (value & ~0x7F) | adapter->ffe_post | (1 << 6); + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + } else { + /* 5. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL0 Register Bit[13:8](TX_EQ_MAIN) + * = 6'd30, Bit[5:0](TX_EQ_PRE) = 6'd4 + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0); + value = (value & ~0x3F3F) | (24 << 8) | 4; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); + /* 6. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL1 Register Bit[6](TX_EQ_OVR_RIDE) + * = 1'b1, Bit[5:0](TX_EQ_POST) = 6'd36 + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1); + value = (value & ~0x7F) | 16 | (1 << 6); + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + } + if (hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1) { + /* 7. Set VR_XS_PMA_Gen5_12G_RX_EQ_CTRL0 Register + * Bit[15:8](VGA1/2_GAIN_0) = 8'h77, Bit[7:5] + * (CTLE_POLE_0) = 3'h2, Bit[4:0](CTLE_BOOST_0) = 4'hF + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0, 0x774F); + + } else { + /* 7. Set VR_XS_PMA_Gen5_12G_RX_EQ_CTRL0 Register Bit[15:8] + * (VGA1/2_GAIN_0) = 8'h00, Bit[7:5](CTLE_POLE_0) = 3'h2, + * Bit[4:0](CTLE_BOOST_0) = 4'hA + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0); + value = (value & ~0xFFFF) | (2 << 5) | 0x05; + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0, value); + } + value = txgbe_rd32_epcs(hw, TXGBE_PHY_RX_EQ_ATT_LVL0); + value = (value & ~0x7) | 0x0; + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_ATT_LVL0, value); + + if (hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1) { + /* 8. Set VR_XS_PMA_Gen5_12G_DFE_TAP_CTRL0 Register Bit[7:0](DFE_TAP1_0) + * = 8'd20 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_DFE_TAP_CTL0, 0x0014); + value = txgbe_rd32_epcs(hw, TXGBE_PHY_AFE_DFE_ENABLE); + value = (value & ~0x11) | 0x11; + txgbe_wr32_epcs(hw, TXGBE_PHY_AFE_DFE_ENABLE, value); + } else { + /* 8. Set VR_XS_PMA_Gen5_12G_DFE_TAP_CTRL0 Register Bit[7:0](DFE_TAP1_0) + * = 8'd20 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_DFE_TAP_CTL0, 0xBE); + /* 9. Set VR_MII_Gen5_12G_AFE_DFE_EN_CTRL Register Bit[4](DFE_EN_0) = + * 1'b0, Bit[0](AFE_EN_0) = 1'b0 + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_AFE_DFE_ENABLE); + value = (value & ~0x11) | 0x0; + txgbe_wr32_epcs(hw, TXGBE_PHY_AFE_DFE_ENABLE, value); + } + value = txgbe_rd32_epcs(hw, TXGBE_PHY_RX_EQ_CTL); + value = value & ~0x1; + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL, value); + } else { + if (hw->revision_id == TXGBE_SP_MPW) { + /* Disable PHY MPLLA */ + txgbe_wr32_ephy(hw, 0x4, 0x2401); + /* Reset rx lane0 clock */ + txgbe_wr32_ephy(hw, 0x1005, 0x4001); + } + /* @. Set SR PCS Control2 Register Bits[1:0] = 2'b00 PCS_TYPE_SEL: KR */ + txgbe_wr32_epcs(hw, TXGBE_SR_PCS_CTL2, 0x1); + /* Set SR PMA MMD Control1 Register Bit[13] = 1'b0 SS13: 1G speed */ + txgbe_wr32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1, 0x0000); + /* Set SR MII MMD Control Register to corresponding speed: */ + txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_CTL, 0x0140); + + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_GENCTRL1); + value = (value & ~0x710) | 0x500; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_GENCTRL1, value); + /* 4. Set VR_XS_PMA_Gen5_12G_MISC_CTRL0 Register Bit[12:8](RX_VREF_CTRL) + * = 5'hF + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, 0xCF00); + /* 5. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL0 Register Bit[13:8](TX_EQ_MAIN) + * = 6'd30, Bit[5:0](TX_EQ_PRE) = 6'd4 + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0); + value = (value & ~0x3F3F) | (24 << 8) | 4; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); + /* 6. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL1 Register Bit[6](TX_EQ_OVR_RIDE) + * = 1'b1, Bit[5:0](TX_EQ_POST) = 6'd36 + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1); + value = (value & ~0x7F) | 16 | (1 << 6); + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + if (hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1) { + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0, 0x774F); + } else { + /* 7. Set VR_XS_PMA_Gen5_12G_RX_EQ_CTRL0 Register Bit[15:8] + * (VGA1/2_GAIN_0) = 8'h00, Bit[7:5](CTLE_POLE_0) = 3'h2, + * Bit[4:0](CTLE_BOOST_0) = 4'hA + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0); + value = (value & ~0xFFFF) | 0x7706; + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0, value); + } + value = txgbe_rd32_epcs(hw, TXGBE_PHY_RX_EQ_ATT_LVL0); + value = (value & ~0x7) | 0x0; + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_ATT_LVL0, value); + /* 8. Set VR_XS_PMA_Gen5_12G_DFE_TAP_CTRL0 Register Bit[7:0](DFE_TAP1_0) + * = 8'd00 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_DFE_TAP_CTL0, 0x0); + /* Set VR_XS_PMA_Gen5_12G_RX_GENCTRL3 Register Bit[2:0] LOS_TRSHLD_0 = 4 */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_RX_GEN_CTL3); + value = (value & ~0x7) | 0x4; + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL3, value); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY + * MPLLA Control 0 Register Bit[7:0] = 8'd32 MPLLA_MULTIPLIER + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL0, 0x0020); + /* Set VR XS, PMA or MII Synopsys Enterprise Gen5 12G PHY MPLLA Control + * 3 Register Bit[10:0] = 11'd70 MPLLA_BANDWIDTH + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL3, 0x0046); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO + * Calibration Load 0 Register Bit[12:0] = 13'd1344 VCO_LD_VAL_0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD0, 0x0540); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO + * Calibration Reference 0 Register Bit[5:0] = 6'd42 VCO_REF_LD_0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_VCO_CAL_REF0, 0x002A); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY AFE-DFE + * Enable Register Bit[4], Bit[0] = 1'b0 AFE_EN_0, DFE_EN_0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_AFE_DFE_ENABLE, 0x0); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx + * Equalization Control 4 Register Bit[0] = 1'b0 CONT_ADAPT_0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL, 0x0010); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Tx Rate + * Control Register Bit[2:0] = 3'b011 TX0_RATE + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_RATE_CTL, 0x0003); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx Rate + * Control Register Bit[2:0] = 3'b011 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_RATE_CTL, 0x0003); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Tx General + * Control 2 Register Bit[9:8] = 2'b01 TX0_WIDTH: 10bits + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_GEN_CTL2, 0x0100); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx General + * Control 2 Register Bit[9:8] = 2'b01 RX0_WIDTH: 10bits + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL2, 0x0100); + /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY MPLLA + * Control 2 Register Bit[10:8] = 3'b010 MPLLA_DIV16P5_CLK_EN=0, + * MPLLA_DIV10_CLK_EN=1, MPLLA_DIV8_CLK_EN=0 + */ + txgbe_wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL2, 0x0200); + /* VR MII MMD AN Control Register Bit[8] = 1'b1 MII_CTRL */ + txgbe_wr32_epcs(hw, TXGBE_SR_MII_MMD_AN_CTL, 0x0100); + } + /* 10. Initialize the mode by setting VR XS or PCS MMD Digital Control1 + * Register Bit[15](VR_RST) + */ + txgbe_wr32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1, 0xA000); + /* wait phy initialization done */ + for (i = 0; i < TXGBE_PHY_INIT_DONE_POLLING_TIME; i++) { + if ((txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1) & + TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1_VR_RST) == 0) + break; + msleep(100); + } + if (i == TXGBE_PHY_INIT_DONE_POLLING_TIME) { + status = TXGBE_ERR_PHY_INIT_NOT_DONE; + goto out; + } + +out: + return status; +} + + +/** + * txgbe_setup_mac_link - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the AUTOC register and restarts link. + **/ +s32 txgbe_setup_mac_link(struct txgbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete) +{ + bool autoneg = false; + s32 status = 0; + u32 link_capabilities = TXGBE_LINK_SPEED_UNKNOWN; + struct txgbe_adapter *adapter = hw->back; + u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN; + bool link_up = false; + + UNREFERENCED_PARAMETER(autoneg_wait_to_complete); + DEBUGFUNC("\n"); + + /* Check to see if speed passed in is supported. */ + status = TCALL(hw, mac.ops.get_link_capabilities, + &link_capabilities, &autoneg); + if (status) + goto out; + + speed &= link_capabilities; + + if (speed == TXGBE_LINK_SPEED_UNKNOWN) { + status = TXGBE_ERR_LINK_SETUP; + goto out; + } + + if (!(((hw->subsystem_device_id & 0xF0) == TXGBE_ID_KR_KX_KX4) || + ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_XAUI) || + ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_MAC_SGMII))) { + status = TCALL(hw, mac.ops.check_link, + &link_speed, &link_up, false); + if (status != 0) + goto out; + if ((link_speed == speed) && link_up) + goto out; + } + + if ((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) + goto out; + + if ((hw->subsystem_id & 0xF0) == TXGBE_ID_KR_KX_KX4) { + if (!autoneg) { + switch (hw->phy.link_mode) { + case TXGBE_PHYSICAL_LAYER_10GBASE_KR: + txgbe_set_link_to_kr(hw, autoneg); + break; + case TXGBE_PHYSICAL_LAYER_10GBASE_KX4: + txgbe_set_link_to_kx4(hw, autoneg); + break; + case TXGBE_PHYSICAL_LAYER_1000BASE_KX: + txgbe_set_link_to_kx(hw, speed, autoneg); + break; + default: + status = TXGBE_ERR_PHY; + goto out; + } + } else { + txgbe_set_link_to_kr(hw, autoneg); + } + } else if ((hw->subsystem_id & 0xF0) == TXGBE_ID_XAUI || + ((hw->subsystem_id & 0xF0) == TXGBE_ID_MAC_XAUI) || + (hw->subsystem_id & 0xF0) == TXGBE_ID_SGMII || + ((hw->subsystem_id & 0xF0) == TXGBE_ID_MAC_SGMII) || + (txgbe_get_media_type(hw) == txgbe_media_type_copper && + (hw->subsystem_id & 0xF0) == TXGBE_ID_SFI_XAUI)) { + if (speed == TXGBE_LINK_SPEED_10GB_FULL) { + txgbe_set_link_to_kx4(hw, autoneg); + } else { + txgbe_set_link_to_kx(hw, speed, 0); + if (adapter->an37 || + (hw->subsystem_id & 0xF0) == TXGBE_ID_SGMII || + (hw->subsystem_id & 0xF0) == TXGBE_ID_XAUI) + txgbe_set_sgmii_an37_ability(hw); + } + } else if (txgbe_get_media_type(hw) == txgbe_media_type_fiber) { + txgbe_set_link_to_sfi(hw, speed); + } + +out: + return status; +} + +/** + * txgbe_setup_copper_link - Set the PHY autoneg advertised field + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true if waiting is needed to complete + * + * Restarts link on PHY and MAC based on settings passed in. + **/ +STATIC s32 txgbe_setup_copper_link(struct txgbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete) +{ + s32 status; + u32 link_speed; + + DEBUGFUNC("\n"); + + /* Setup the PHY according to input speed */ + link_speed = TCALL(hw, phy.ops.setup_link_speed, speed, + autoneg_wait_to_complete); + + if (link_speed != TXGBE_LINK_SPEED_UNKNOWN) + /* Set up MAC */ + status = txgbe_setup_mac_link(hw, link_speed, autoneg_wait_to_complete); + else { + status = 0; + } + return status; +} + +int txgbe_reset_misc(struct txgbe_hw *hw) +{ + int i; + u32 value; + + txgbe_init_i2c(hw); + + value = txgbe_rd32_epcs(hw, TXGBE_SR_PCS_CTL2); + if ((value & 0x3) != TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_X) { + hw->link_status = TXGBE_LINK_STATUS_NONE; + } + + /* receive packets that size > 2048 */ + wr32m(hw, TXGBE_MAC_RX_CFG, + TXGBE_MAC_RX_CFG_JE, TXGBE_MAC_RX_CFG_JE); + + /* clear counters on read */ + wr32m(hw, TXGBE_MMC_CONTROL, + TXGBE_MMC_CONTROL_RSTONRD, TXGBE_MMC_CONTROL_RSTONRD); + + wr32m(hw, TXGBE_MAC_RX_FLOW_CTRL, + TXGBE_MAC_RX_FLOW_CTRL_RFE, TXGBE_MAC_RX_FLOW_CTRL_RFE); + + wr32(hw, TXGBE_MAC_PKT_FLT, + TXGBE_MAC_PKT_FLT_PR); + + wr32m(hw, TXGBE_MIS_RST_ST, + TXGBE_MIS_RST_ST_RST_INIT, 0x1E00); + + /* errata 4: initialize mng flex tbl and wakeup flex tbl*/ + wr32(hw, TXGBE_PSR_MNG_FLEX_SEL, 0); + for (i = 0; i < 16; i++) { + wr32(hw, TXGBE_PSR_MNG_FLEX_DW_L(i), 0); + wr32(hw, TXGBE_PSR_MNG_FLEX_DW_H(i), 0); + wr32(hw, TXGBE_PSR_MNG_FLEX_MSK(i), 0); + } + wr32(hw, TXGBE_PSR_LAN_FLEX_SEL, 0); + for (i = 0; i < 16; i++) { + wr32(hw, TXGBE_PSR_LAN_FLEX_DW_L(i), 0); + wr32(hw, TXGBE_PSR_LAN_FLEX_DW_H(i), 0); + wr32(hw, TXGBE_PSR_LAN_FLEX_MSK(i), 0); + } + + /* set pause frame dst mac addr */ + wr32(hw, TXGBE_RDB_PFCMACDAL, 0xC2000001); + wr32(hw, TXGBE_RDB_PFCMACDAH, 0x0180); + + txgbe_init_thermal_sensor_thresh(hw); + + return 0; +} + +/** + * txgbe_reset_hw - Perform hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks + * and clears all interrupts, perform a PHY reset, and perform a link (MAC) + * reset. + **/ +s32 txgbe_reset_hw(struct txgbe_hw *hw) +{ + s32 status; + u32 reset = 0; + u32 i; + + u32 sr_pcs_ctl, sr_pma_mmd_ctl1, sr_an_mmd_ctl, sr_an_mmd_adv_reg2; + u32 vr_xs_or_pcs_mmd_digi_ctl1, curr_vr_xs_or_pcs_mmd_digi_ctl1; + u32 curr_sr_pcs_ctl, curr_sr_pma_mmd_ctl1; + u32 curr_sr_an_mmd_ctl, curr_sr_an_mmd_adv_reg2; + + u32 reset_status = 0; + u32 rst_delay = 0; + struct txgbe_adapter *adapter = hw->back; + u32 value; + + DEBUGFUNC("\n"); + + /* Call adapter stop to disable tx/rx and clear interrupts */ + status = TCALL(hw, mac.ops.stop_adapter); + if (status != 0) + goto reset_hw_out; + + /* Identify PHY and related function pointers */ + status = TCALL(hw, phy.ops.init); + + if (status == TXGBE_ERR_SFP_NOT_SUPPORTED) + goto reset_hw_out; + + /* Reset PHY */ + if (txgbe_get_media_type(hw) == txgbe_media_type_copper) + TCALL(hw, phy.ops.reset); + + /* remember internel phy regs from before we reset */ + curr_sr_pcs_ctl = txgbe_rd32_epcs(hw, TXGBE_SR_PCS_CTL2); + curr_sr_pma_mmd_ctl1 = txgbe_rd32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1); + curr_sr_an_mmd_ctl = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_CTL); + curr_sr_an_mmd_adv_reg2 = txgbe_rd32_epcs(hw, + TXGBE_SR_AN_MMD_ADV_REG2); + curr_vr_xs_or_pcs_mmd_digi_ctl1 = + txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1); + + /* + * Issue global reset to the MAC. Needs to be SW reset if link is up. + * If link reset is used when link is up, it might reset the PHY when + * mng is using it. If link is down or the flag to force full link + * reset is set, then perform link reset. + */ + if (hw->force_full_reset) { + rst_delay = (rd32(hw, TXGBE_MIS_RST_ST) & + TXGBE_MIS_RST_ST_RST_INIT) >> + TXGBE_MIS_RST_ST_RST_INI_SHIFT; + if (hw->reset_type == TXGBE_SW_RESET) { + for (i = 0; i < rst_delay + 20; i++) { + reset_status = + rd32(hw, TXGBE_MIS_RST_ST); + if (!(reset_status & + TXGBE_MIS_RST_ST_DEV_RST_ST_MASK)) + break; + msleep(100); + } + + if (reset_status & TXGBE_MIS_RST_ST_DEV_RST_ST_MASK) { + status = TXGBE_ERR_RESET_FAILED; + DEBUGOUT("Global reset polling failed to " + "complete.\n"); + goto reset_hw_out; + } + status = txgbe_check_flash_load(hw, + TXGBE_SPI_ILDR_STATUS_SW_RESET); + if (status != 0) + goto reset_hw_out; + /* errata 7 */ + if (txgbe_mng_present(hw) && + hw->revision_id == TXGBE_SP_MPW) { + struct txgbe_adapter *adapter = + (struct txgbe_adapter *)hw->back; + adapter->flags2 &= + ~TXGBE_FLAG2_MNG_REG_ACCESS_DISABLED; + } + } else if (hw->reset_type == TXGBE_GLOBAL_RESET) { +#ifndef _WIN32 + struct txgbe_adapter *adapter = + (struct txgbe_adapter *)hw->back; + msleep(100 * rst_delay + 2000); + pci_restore_state(adapter->pdev); + pci_save_state(adapter->pdev); + pci_wake_from_d3(adapter->pdev, false); +#endif /*_WIN32*/ + } + } else { + if (txgbe_mng_present(hw)) { + if (!(((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) || + ((hw->subsystem_device_id & TXGBE_WOL_MASK) == TXGBE_WOL_SUP))) { + txgbe_reset_hostif(hw); + } + } else { + + if (hw->bus.lan_id == 0) { + reset = TXGBE_MIS_RST_LAN0_RST; + } else { + reset = TXGBE_MIS_RST_LAN1_RST; + } + + wr32(hw, TXGBE_MIS_RST, + reset | rd32(hw, TXGBE_MIS_RST)); + TXGBE_WRITE_FLUSH(hw); + } + usec_delay(10); + + if (hw->bus.lan_id == 0) { + status = txgbe_check_flash_load(hw, + TXGBE_SPI_ILDR_STATUS_LAN0_SW_RST); + } else { + status = txgbe_check_flash_load(hw, + TXGBE_SPI_ILDR_STATUS_LAN1_SW_RST); + } + if (status != 0) + goto reset_hw_out; + } + + status = txgbe_reset_misc(hw); + if (status != 0) + goto reset_hw_out; + + /* + * Store the original AUTOC/AUTOC2 values if they have not been + * stored off yet. Otherwise restore the stored original + * values since the reset operation sets back to defaults. + */ + sr_pcs_ctl = txgbe_rd32_epcs(hw, TXGBE_SR_PCS_CTL2); + sr_pma_mmd_ctl1 = txgbe_rd32_epcs(hw, TXGBE_SR_PMA_MMD_CTL1); + sr_an_mmd_ctl = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_CTL); + sr_an_mmd_adv_reg2 = txgbe_rd32_epcs(hw, TXGBE_SR_AN_MMD_ADV_REG2); + vr_xs_or_pcs_mmd_digi_ctl1 = + txgbe_rd32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1); + + if (hw->mac.orig_link_settings_stored == false) { + hw->mac.orig_sr_pcs_ctl2 = sr_pcs_ctl; + hw->mac.orig_sr_pma_mmd_ctl1 = sr_pma_mmd_ctl1; + hw->mac.orig_sr_an_mmd_ctl = sr_an_mmd_ctl; + hw->mac.orig_sr_an_mmd_adv_reg2 = sr_an_mmd_adv_reg2; + hw->mac.orig_vr_xs_or_pcs_mmd_digi_ctl1 = + vr_xs_or_pcs_mmd_digi_ctl1; + hw->mac.orig_link_settings_stored = true; + } else { + + /* If MNG FW is running on a multi-speed device that + * doesn't autoneg with out driver support we need to + * leave LMS in the state it was before we MAC reset. + * Likewise if we support WoL we don't want change the + * LMS state. + */ + + hw->mac.orig_sr_pcs_ctl2 = curr_sr_pcs_ctl; + hw->mac.orig_sr_pma_mmd_ctl1 = curr_sr_pma_mmd_ctl1; + hw->mac.orig_sr_an_mmd_ctl = curr_sr_an_mmd_ctl; + hw->mac.orig_sr_an_mmd_adv_reg2 = + curr_sr_an_mmd_adv_reg2; + hw->mac.orig_vr_xs_or_pcs_mmd_digi_ctl1 = + curr_vr_xs_or_pcs_mmd_digi_ctl1; + + } + + /*A temporary solution for set to sfi*/ + if (SFI_SET == 1 || adapter->ffe_set == TXGBE_BP_M_SFI) { + e_dev_info("Set SFI TX_EQ MAIN:%d PRE:%d POST:%d\n", + adapter->ffe_main, adapter->ffe_pre, adapter->ffe_post); + /* 5. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL0 Register Bit[13:8](TX_EQ_MAIN) + * = 6'd30, Bit[5:0](TX_EQ_PRE) = 6'd4 + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0); + value = (value & ~0x3F3F) | (adapter->ffe_main << 8) | adapter->ffe_pre; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); + /* 6. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL1 Register Bit[6](TX_EQ_OVR_RIDE) + * = 1'b1, Bit[5:0](TX_EQ_POST) = 6'd36 + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1); + value = (value & ~0x7F) | adapter->ffe_post | (1 << 6); + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + } + + if (KR_SET == 1 || adapter->ffe_set == TXGBE_BP_M_KR) { + e_dev_info("Set KR TX_EQ MAIN:%d PRE:%d POST:%d\n", + adapter->ffe_main, adapter->ffe_pre, adapter->ffe_post); + value = (0x1804 & ~0x3F3F); + value |= adapter->ffe_main << 8 | adapter->ffe_pre; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); + + value = (0x50 & ~0x7F) | (1 << 6)| adapter->ffe_post; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + txgbe_wr32_epcs(hw, 0x18035, 0x00FF); + txgbe_wr32_epcs(hw, 0x18055, 0x00FF); + } + + if (KX_SET == 1 || adapter->ffe_set == TXGBE_BP_M_KX) { + e_dev_info("Set KX TX_EQ MAIN:%d PRE:%d POST:%d\n", + adapter->ffe_main, adapter->ffe_pre, adapter->ffe_post); + /* 5. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL0 Register Bit[13:8](TX_EQ_MAIN) + * = 6'd30, Bit[5:0](TX_EQ_PRE) = 6'd4 + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0); + value = (value & ~0x3F3F) | (adapter->ffe_main << 8) | adapter->ffe_pre; + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); + /* 6. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL1 Register Bit[6](TX_EQ_OVR_RIDE) + * = 1'b1, Bit[5:0](TX_EQ_POST) = 6'd36 + */ + value = txgbe_rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1); + value = (value & ~0x7F) | adapter->ffe_post | (1 << 6); + txgbe_wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + + txgbe_wr32_epcs(hw, 0x18035, 0x00FF); + txgbe_wr32_epcs(hw, 0x18055, 0x00FF); + } + + /* Store the permanent mac address */ + TCALL(hw, mac.ops.get_mac_addr, hw->mac.perm_addr); + + /* + * Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ + hw->mac.num_rar_entries = 128; + TCALL(hw, mac.ops.init_rx_addrs); + + /* Store the permanent SAN mac address */ + TCALL(hw, mac.ops.get_san_mac_addr, hw->mac.san_addr); + + /* Add the SAN MAC address to the RAR only if it's a valid address */ + if (txgbe_validate_mac_addr(hw->mac.san_addr) == 0) { + TCALL(hw, mac.ops.set_rar, hw->mac.num_rar_entries - 1, + hw->mac.san_addr, 0, TXGBE_PSR_MAC_SWC_AD_H_AV); + + /* Save the SAN MAC RAR index */ + hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; + + /* Reserve the last RAR for the SAN MAC address */ + hw->mac.num_rar_entries--; + } + + /* Store the alternative WWNN/WWPN prefix */ + TCALL(hw, mac.ops.get_wwn_prefix, &hw->mac.wwnn_prefix, + &hw->mac.wwpn_prefix); + + pci_set_master(((struct txgbe_adapter *)hw->back)->pdev); + +reset_hw_out: + return status; +} + +/** + * txgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete + * @hw: pointer to hardware structure + * @fdircmd: current value of FDIRCMD register + */ +STATIC s32 txgbe_fdir_check_cmd_complete(struct txgbe_hw *hw, u32 *fdircmd) +{ + int i; + + for (i = 0; i < TXGBE_RDB_FDIR_CMD_CMD_POLL; i++) { + *fdircmd = rd32(hw, TXGBE_RDB_FDIR_CMD); + if (!(*fdircmd & TXGBE_RDB_FDIR_CMD_CMD_MASK)) + return 0; + usec_delay(10); + } + + return TXGBE_ERR_FDIR_CMD_INCOMPLETE; +} + +/** + * txgbe_reinit_fdir_tables - Reinitialize Flow Director tables. + * @hw: pointer to hardware structure + **/ +s32 txgbe_reinit_fdir_tables(struct txgbe_hw *hw) +{ + s32 err; + int i; + u32 fdirctrl = rd32(hw, TXGBE_RDB_FDIR_CTL); + u32 fdircmd; + fdirctrl &= ~TXGBE_RDB_FDIR_CTL_INIT_DONE; + + DEBUGFUNC("\n"); + + /* + * Before starting reinitialization process, + * FDIRCMD.CMD must be zero. + */ + err = txgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err) { + DEBUGOUT("Flow Director previous command did not complete, " + "aborting table re-initialization.\n"); + return err; + } + + wr32(hw, TXGBE_RDB_FDIR_FREE, 0); + TXGBE_WRITE_FLUSH(hw); + /* + * sapphire adapters flow director init flow cannot be restarted, + * Workaround sapphire silicon errata by performing the following steps + * before re-writing the FDIRCTRL control register with the same value. + * - write 1 to bit 8 of FDIRCMD register & + * - write 0 to bit 8 of FDIRCMD register + */ + wr32m(hw, TXGBE_RDB_FDIR_CMD, + TXGBE_RDB_FDIR_CMD_CLEARHT, TXGBE_RDB_FDIR_CMD_CLEARHT); + TXGBE_WRITE_FLUSH(hw); + wr32m(hw, TXGBE_RDB_FDIR_CMD, + TXGBE_RDB_FDIR_CMD_CLEARHT, 0); + TXGBE_WRITE_FLUSH(hw); + /* + * Clear FDIR Hash register to clear any leftover hashes + * waiting to be programmed. + */ + wr32(hw, TXGBE_RDB_FDIR_HASH, 0x00); + TXGBE_WRITE_FLUSH(hw); + + wr32(hw, TXGBE_RDB_FDIR_CTL, fdirctrl); + TXGBE_WRITE_FLUSH(hw); + + /* Poll init-done after we write FDIRCTRL register */ + for (i = 0; i < TXGBE_FDIR_INIT_DONE_POLL; i++) { + if (rd32(hw, TXGBE_RDB_FDIR_CTL) & + TXGBE_RDB_FDIR_CTL_INIT_DONE) + break; + msec_delay(1); + } + if (i >= TXGBE_FDIR_INIT_DONE_POLL) { + DEBUGOUT("Flow Director Signature poll time exceeded!\n"); + return TXGBE_ERR_FDIR_REINIT_FAILED; + } + + /* Clear FDIR statistics registers (read to clear) */ + rd32(hw, TXGBE_RDB_FDIR_USE_ST); + rd32(hw, TXGBE_RDB_FDIR_FAIL_ST); + rd32(hw, TXGBE_RDB_FDIR_MATCH); + rd32(hw, TXGBE_RDB_FDIR_MISS); + rd32(hw, TXGBE_RDB_FDIR_LEN); + + return 0; +} + +/** + * txgbe_fdir_enable - Initialize Flow Director control registers + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register + **/ +STATIC void txgbe_fdir_enable(struct txgbe_hw *hw, u32 fdirctrl) +{ + int i; + + DEBUGFUNC("\n"); + + /* Prime the keys for hashing */ + wr32(hw, TXGBE_RDB_FDIR_HKEY, TXGBE_ATR_BUCKET_HASH_KEY); + wr32(hw, TXGBE_RDB_FDIR_SKEY, TXGBE_ATR_SIGNATURE_HASH_KEY); + + /* + * Poll init-done after we write the register. Estimated times: + * 10G: PBALLOC = 11b, timing is 60us + * 1G: PBALLOC = 11b, timing is 600us + * 100M: PBALLOC = 11b, timing is 6ms + * + * Multiple these timings by 4 if under full Rx load + * + * So we'll poll for TXGBE_FDIR_INIT_DONE_POLL times, sleeping for + * 1 msec per poll time. If we're at line rate and drop to 100M, then + * this might not finish in our poll time, but we can live with that + * for now. + */ + wr32(hw, TXGBE_RDB_FDIR_CTL, fdirctrl); + TXGBE_WRITE_FLUSH(hw); + for (i = 0; i < TXGBE_RDB_FDIR_INIT_DONE_POLL; i++) { + if (rd32(hw, TXGBE_RDB_FDIR_CTL) & + TXGBE_RDB_FDIR_CTL_INIT_DONE) + break; + msec_delay(1); + } + + if (i >= TXGBE_RDB_FDIR_INIT_DONE_POLL) + DEBUGOUT("Flow Director poll time exceeded!\n"); +} + +/** + * txgbe_init_fdir_signature -Initialize Flow Director sig filters + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register, initially + * contains just the value of the Rx packet buffer allocation + **/ +s32 txgbe_init_fdir_signature(struct txgbe_hw *hw, u32 fdirctrl) +{ + struct txgbe_adapter *adapter = (struct txgbe_adapter *)hw->back; + int i = VMDQ_P(0) / 4; + int j = VMDQ_P(0) % 4; + u32 flex = rd32m(hw, TXGBE_RDB_FDIR_FLEX_CFG(i), + ~((TXGBE_RDB_FDIR_FLEX_CFG_BASE_MSK | + TXGBE_RDB_FDIR_FLEX_CFG_MSK | + TXGBE_RDB_FDIR_FLEX_CFG_OFST) << + (TXGBE_RDB_FDIR_FLEX_CFG_VM_SHIFT * j))); + + UNREFERENCED_PARAMETER(adapter); + + flex |= (TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC | + 0x6 << TXGBE_RDB_FDIR_FLEX_CFG_OFST_SHIFT) << + (TXGBE_RDB_FDIR_FLEX_CFG_VM_SHIFT * j); + wr32(hw, TXGBE_RDB_FDIR_FLEX_CFG(i), flex); + + /* + * Continue setup of fdirctrl register bits: + * Move the flexible bytes to use the ethertype - shift 6 words + * Set the maximum length per hash bucket to 0xA filters + * Send interrupt when 64 filters are left + */ + fdirctrl |= (0xF << TXGBE_RDB_FDIR_CTL_HASH_BITS_SHIFT) | + (0xA << TXGBE_RDB_FDIR_CTL_MAX_LENGTH_SHIFT) | + (4 << TXGBE_RDB_FDIR_CTL_FULL_THRESH_SHIFT); + + /* write hashes and fdirctrl register, poll for completion */ + txgbe_fdir_enable(hw, fdirctrl); + + if (hw->revision_id == TXGBE_SP_MPW) { + /* errata 1: disable RSC of drop ring 0 */ + wr32m(hw, TXGBE_PX_RR_CFG(0), + TXGBE_PX_RR_CFG_RSC, ~TXGBE_PX_RR_CFG_RSC); + } + return 0; +} + +/** + * txgbe_init_fdir_perfect - Initialize Flow Director perfect filters + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register, initially + * contains just the value of the Rx packet buffer allocation + * @cloud_mode: true - cloud mode, false - other mode + **/ +s32 txgbe_init_fdir_perfect(struct txgbe_hw *hw, u32 fdirctrl, + bool cloud_mode) +{ + UNREFERENCED_PARAMETER(cloud_mode); + DEBUGFUNC("\n"); + + /* + * Continue setup of fdirctrl register bits: + * Turn perfect match filtering on + * Report hash in RSS field of Rx wb descriptor + * Initialize the drop queue + * Move the flexible bytes to use the ethertype - shift 6 words + * Set the maximum length per hash bucket to 0xA filters + * Send interrupt when 64 (0x4 * 16) filters are left + */ + fdirctrl |= TXGBE_RDB_FDIR_CTL_PERFECT_MATCH | + (TXGBE_RDB_FDIR_DROP_QUEUE << + TXGBE_RDB_FDIR_CTL_DROP_Q_SHIFT) | + (0xF << TXGBE_RDB_FDIR_CTL_HASH_BITS_SHIFT) | + (0xA << TXGBE_RDB_FDIR_CTL_MAX_LENGTH_SHIFT) | + (4 << TXGBE_RDB_FDIR_CTL_FULL_THRESH_SHIFT); + + /* write hashes and fdirctrl register, poll for completion */ + txgbe_fdir_enable(hw, fdirctrl); + + if (hw->revision_id == TXGBE_SP_MPW) { + if (((struct txgbe_adapter *)hw->back)->num_rx_queues > + TXGBE_RDB_FDIR_DROP_QUEUE) + /* errata 1: disable RSC of drop ring */ + wr32m(hw, + TXGBE_PX_RR_CFG(TXGBE_RDB_FDIR_DROP_QUEUE), + TXGBE_PX_RR_CFG_RSC, ~TXGBE_PX_RR_CFG_RSC); + } + return 0; +} + +/* + * These defines allow us to quickly generate all of the necessary instructions + * in the function below by simply calling out TXGBE_COMPUTE_SIG_HASH_ITERATION + * for values 0 through 15 + */ +#define TXGBE_ATR_COMMON_HASH_KEY \ + (TXGBE_ATR_BUCKET_HASH_KEY & TXGBE_ATR_SIGNATURE_HASH_KEY) +#define TXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ +do { \ + u32 n = (_n); \ + if (TXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ + common_hash ^= lo_hash_dword >> n; \ + else if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ + bucket_hash ^= lo_hash_dword >> n; \ + else if (TXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ + sig_hash ^= lo_hash_dword << (16 - n); \ + if (TXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ + common_hash ^= hi_hash_dword >> n; \ + else if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ + bucket_hash ^= hi_hash_dword >> n; \ + else if (TXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ + sig_hash ^= hi_hash_dword << (16 - n); \ +} while (0) + +/** + * txgbe_atr_compute_sig_hash - Compute the signature hash + * @stream: input bitstream to compute the hash on + * + * This function is almost identical to the function above but contains + * several optimizations such as unwinding all of the loops, letting the + * compiler work out all of the conditional ifs since the keys are static + * defines, and computing two keys at once since the hashed dword stream + * will be the same for both keys. + **/ +u32 txgbe_atr_compute_sig_hash(union txgbe_atr_hash_dword input, + union txgbe_atr_hash_dword common) +{ + u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; + u32 sig_hash = 0, bucket_hash = 0, common_hash = 0; + + /* record the flow_vm_vlan bits as they are a key part to the hash */ + flow_vm_vlan = TXGBE_NTOHL(input.dword); + + /* generate common hash dword */ + hi_hash_dword = TXGBE_NTOHL(common.dword); + + /* low dword is word swapped version of common */ + lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); + + /* apply flow ID/VM pool/VLAN ID bits to hash words */ + hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); + + /* Process bits 0 and 16 */ + TXGBE_COMPUTE_SIG_HASH_ITERATION(0); + + /* + * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to + * delay this because bit 0 of the stream should not be processed + * so we do not add the VLAN until after bit 0 was processed + */ + lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); + + /* Process remaining 30 bit of the key */ + TXGBE_COMPUTE_SIG_HASH_ITERATION(1); + TXGBE_COMPUTE_SIG_HASH_ITERATION(2); + TXGBE_COMPUTE_SIG_HASH_ITERATION(3); + TXGBE_COMPUTE_SIG_HASH_ITERATION(4); + TXGBE_COMPUTE_SIG_HASH_ITERATION(5); + TXGBE_COMPUTE_SIG_HASH_ITERATION(6); + TXGBE_COMPUTE_SIG_HASH_ITERATION(7); + TXGBE_COMPUTE_SIG_HASH_ITERATION(8); + TXGBE_COMPUTE_SIG_HASH_ITERATION(9); + TXGBE_COMPUTE_SIG_HASH_ITERATION(10); + TXGBE_COMPUTE_SIG_HASH_ITERATION(11); + TXGBE_COMPUTE_SIG_HASH_ITERATION(12); + TXGBE_COMPUTE_SIG_HASH_ITERATION(13); + TXGBE_COMPUTE_SIG_HASH_ITERATION(14); + TXGBE_COMPUTE_SIG_HASH_ITERATION(15); + + /* combine common_hash result with signature and bucket hashes */ + bucket_hash ^= common_hash; + bucket_hash &= TXGBE_ATR_HASH_MASK; + + sig_hash ^= common_hash << 16; + sig_hash &= TXGBE_ATR_HASH_MASK << 16; + + /* return completed signature hash */ + return sig_hash ^ bucket_hash; +} + +/** + * txgbe_atr_add_signature_filter - Adds a signature hash filter + * @hw: pointer to hardware structure + * @input: unique input dword + * @common: compressed common input dword + * @queue: queue index to direct traffic to + **/ +s32 txgbe_fdir_add_signature_filter(struct txgbe_hw *hw, + union txgbe_atr_hash_dword input, + union txgbe_atr_hash_dword common, + u8 queue) +{ + u32 fdirhashcmd = 0; + u8 flow_type; + u32 fdircmd; + s32 err; + + DEBUGFUNC("\n"); + + /* + * Get the flow_type in order to program FDIRCMD properly + * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 + * fifth is FDIRCMD.TUNNEL_FILTER + */ + flow_type = input.formatted.flow_type; + switch (flow_type) { + case TXGBE_ATR_FLOW_TYPE_TCPV4: + case TXGBE_ATR_FLOW_TYPE_UDPV4: + case TXGBE_ATR_FLOW_TYPE_SCTPV4: + case TXGBE_ATR_FLOW_TYPE_TCPV6: + case TXGBE_ATR_FLOW_TYPE_UDPV6: + case TXGBE_ATR_FLOW_TYPE_SCTPV6: + break; + default: + DEBUGOUT(" Error on flow type input\n"); + return TXGBE_ERR_CONFIG; + } + + /* configure FDIRCMD register */ + fdircmd = TXGBE_RDB_FDIR_CMD_CMD_ADD_FLOW | + TXGBE_RDB_FDIR_CMD_FILTER_UPDATE | + TXGBE_RDB_FDIR_CMD_LAST | TXGBE_RDB_FDIR_CMD_QUEUE_EN; + fdircmd |= (u32)flow_type << TXGBE_RDB_FDIR_CMD_FLOW_TYPE_SHIFT; + fdircmd |= (u32)queue << TXGBE_RDB_FDIR_CMD_RX_QUEUE_SHIFT; + + fdirhashcmd |= txgbe_atr_compute_sig_hash(input, common); + fdirhashcmd |= 0x1 << TXGBE_RDB_FDIR_HASH_BUCKET_VALID_SHIFT; + wr32(hw, TXGBE_RDB_FDIR_HASH, fdirhashcmd); + + wr32(hw, TXGBE_RDB_FDIR_CMD, fdircmd); + + err = txgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err) { + DEBUGOUT("Flow Director command did not complete!\n"); + return err; + } + + DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); + + return 0; +} + +#define TXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ +do { \ + u32 n = (_n); \ + if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ + bucket_hash ^= lo_hash_dword >> n; \ + if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ + bucket_hash ^= hi_hash_dword >> n; \ +} while (0) + +/** + * txgbe_atr_compute_perfect_hash - Compute the perfect filter hash + * @atr_input: input bitstream to compute the hash on + * @input_mask: mask for the input bitstream + * + * This function serves two main purposes. First it applies the input_mask + * to the atr_input resulting in a cleaned up atr_input data stream. + * Secondly it computes the hash and stores it in the bkt_hash field at + * the end of the input byte stream. This way it will be available for + * future use without needing to recompute the hash. + **/ +void txgbe_atr_compute_perfect_hash(union txgbe_atr_input *input, + union txgbe_atr_input *input_mask) +{ + u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; + u32 bucket_hash = 0; + u32 hi_dword = 0; + u32 i = 0; + + /* Apply masks to input data */ + for (i = 0; i < 11; i++) + input->dword_stream[i] &= input_mask->dword_stream[i]; + + /* record the flow_vm_vlan bits as they are a key part to the hash */ + flow_vm_vlan = TXGBE_NTOHL(input->dword_stream[0]); + + /* generate common hash dword */ + for (i = 1; i <= 10; i++) + hi_dword ^= input->dword_stream[i]; + hi_hash_dword = TXGBE_NTOHL(hi_dword); + + /* low dword is word swapped version of common */ + lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); + + /* apply flow ID/VM pool/VLAN ID bits to hash words */ + hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); + + /* Process bits 0 and 16 */ + TXGBE_COMPUTE_BKT_HASH_ITERATION(0); + + /* + * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to + * delay this because bit 0 of the stream should not be processed + * so we do not add the VLAN until after bit 0 was processed + */ + lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); + + /* Process remaining 30 bit of the key */ + for (i = 1; i <= 15; i++) + TXGBE_COMPUTE_BKT_HASH_ITERATION(i); + + /* + * Limit hash to 13 bits since max bucket count is 8K. + * Store result at the end of the input stream. + */ + input->formatted.bkt_hash = bucket_hash & 0x1FFF; +} + +/** + * txgbe_get_fdirtcpm - generate a TCP port from atr_input_masks + * @input_mask: mask to be bit swapped + * + * The source and destination port masks for flow director are bit swapped + * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to + * generate a correctly swapped value we need to bit swap the mask and that + * is what is accomplished by this function. + **/ +STATIC u32 txgbe_get_fdirtcpm(union txgbe_atr_input *input_mask) +{ + u32 mask = TXGBE_NTOHS(input_mask->formatted.dst_port); + mask <<= TXGBE_RDB_FDIR_TCP_MSK_DPORTM_SHIFT; + mask |= TXGBE_NTOHS(input_mask->formatted.src_port); + mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); + mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); + mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); + return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8); +} + +/* + * These two macros are meant to address the fact that we have registers + * that are either all or in part big-endian. As a result on big-endian + * systems we will end up byte swapping the value to little-endian before + * it is byte swapped again and written to the hardware in the original + * big-endian format. + */ +#define TXGBE_STORE_AS_BE32(_value) \ + (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \ + (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24)) + +#define TXGBE_WRITE_REG_BE32(a, reg, value) \ + wr32((a), (reg), TXGBE_STORE_AS_BE32(TXGBE_NTOHL(value))) + +#define TXGBE_STORE_AS_BE16(_value) \ + TXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8)) + +s32 txgbe_fdir_set_input_mask(struct txgbe_hw *hw, + union txgbe_atr_input *input_mask, + bool cloud_mode) +{ + /* mask IPv6 since it is currently not supported */ + u32 fdirm = 0; + u32 fdirtcpm; + u32 flex = 0; + int i, j; + struct txgbe_adapter *adapter = (struct txgbe_adapter *)hw->back; + + UNREFERENCED_PARAMETER(cloud_mode); + UNREFERENCED_PARAMETER(adapter); + + DEBUGFUNC("\n"); + + /* + * Program the relevant mask registers. If src/dst_port or src/dst_addr + * are zero, then assume a full mask for that field. Also assume that + * a VLAN of 0 is unspecified, so mask that out as well. L4type + * cannot be masked out in this implementation. + * + * This also assumes IPv4 only. IPv6 masking isn't supported at this + * point in time. + */ + + /* verify bucket hash is cleared on hash generation */ + if (input_mask->formatted.bkt_hash) + DEBUGOUT(" bucket hash should always be 0 in mask\n"); + + /* Program FDIRM and verify partial masks */ + switch (input_mask->formatted.vm_pool & 0x7F) { + case 0x0: + fdirm |= TXGBE_RDB_FDIR_OTHER_MSK_POOL; + case 0x7F: + break; + default: + DEBUGOUT(" Error on vm pool mask\n"); + return TXGBE_ERR_CONFIG; + } + + switch (input_mask->formatted.flow_type & TXGBE_ATR_L4TYPE_MASK) { + case 0x0: + fdirm |= TXGBE_RDB_FDIR_OTHER_MSK_L4P; + if (input_mask->formatted.dst_port || + input_mask->formatted.src_port) { + DEBUGOUT(" Error on src/dst port mask\n"); + return TXGBE_ERR_CONFIG; + } + case TXGBE_ATR_L4TYPE_MASK: + break; + default: + DEBUGOUT(" Error on flow type mask\n"); + return TXGBE_ERR_CONFIG; + } + + /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ + wr32(hw, TXGBE_RDB_FDIR_OTHER_MSK, fdirm); + + i = VMDQ_P(0) / 4; + j = VMDQ_P(0) % 4; + flex = rd32m(hw, TXGBE_RDB_FDIR_FLEX_CFG(i), + ~((TXGBE_RDB_FDIR_FLEX_CFG_BASE_MSK | + TXGBE_RDB_FDIR_FLEX_CFG_MSK | + TXGBE_RDB_FDIR_FLEX_CFG_OFST) << + (TXGBE_RDB_FDIR_FLEX_CFG_VM_SHIFT * j))); + flex |= (TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC | + 0x6 << TXGBE_RDB_FDIR_FLEX_CFG_OFST_SHIFT) << + (TXGBE_RDB_FDIR_FLEX_CFG_VM_SHIFT * j); + + switch (input_mask->formatted.flex_bytes & 0xFFFF) { + case 0x0000: + /* Mask Flex Bytes, fall through */ + flex |= TXGBE_RDB_FDIR_FLEX_CFG_MSK << + (TXGBE_RDB_FDIR_FLEX_CFG_VM_SHIFT * j); + case 0xFFFF: + break; + default: + DEBUGOUT(" Error on flexible byte mask\n"); + return TXGBE_ERR_CONFIG; + } + wr32(hw, TXGBE_RDB_FDIR_FLEX_CFG(i), flex); + + /* store the TCP/UDP port masks, bit reversed from port + * layout */ + fdirtcpm = txgbe_get_fdirtcpm(input_mask); + + /* write both the same so that UDP and TCP use the same mask */ + wr32(hw, TXGBE_RDB_FDIR_TCP_MSK, ~fdirtcpm); + wr32(hw, TXGBE_RDB_FDIR_UDP_MSK, ~fdirtcpm); + wr32(hw, TXGBE_RDB_FDIR_SCTP_MSK, ~fdirtcpm); + + /* store source and destination IP masks (little-enian) */ + wr32(hw, TXGBE_RDB_FDIR_SA4_MSK, + TXGBE_NTOHL(~input_mask->formatted.src_ip[0])); + wr32(hw, TXGBE_RDB_FDIR_DA4_MSK, + TXGBE_NTOHL(~input_mask->formatted.dst_ip[0])); + return 0; +} + +s32 txgbe_fdir_write_perfect_filter(struct txgbe_hw *hw, + union txgbe_atr_input *input, + u16 soft_id, u8 queue, + bool cloud_mode) +{ + u32 fdirport, fdirvlan, fdirhash, fdircmd; + s32 err; + + DEBUGFUNC("\n"); + if (!cloud_mode) { + /* currently IPv6 is not supported, must be programmed with 0 */ + wr32(hw, TXGBE_RDB_FDIR_IP6(2), + TXGBE_NTOHL(input->formatted.src_ip[0])); + wr32(hw, TXGBE_RDB_FDIR_IP6(1), + TXGBE_NTOHL(input->formatted.src_ip[1])); + wr32(hw, TXGBE_RDB_FDIR_IP6(0), + TXGBE_NTOHL(input->formatted.src_ip[2])); + + /* record the source address (little-endian) */ + wr32(hw, TXGBE_RDB_FDIR_SA, + TXGBE_NTOHL(input->formatted.src_ip[0])); + + /* record the first 32 bits of the destination address + * (little-endian) */ + wr32(hw, TXGBE_RDB_FDIR_DA, + TXGBE_NTOHL(input->formatted.dst_ip[0])); + + /* record source and destination port (little-endian)*/ + fdirport = TXGBE_NTOHS(input->formatted.dst_port); + fdirport <<= TXGBE_RDB_FDIR_PORT_DESTINATION_SHIFT; + fdirport |= TXGBE_NTOHS(input->formatted.src_port); + wr32(hw, TXGBE_RDB_FDIR_PORT, fdirport); + } + + /* record packet type and flex_bytes(little-endian) */ + fdirvlan = TXGBE_NTOHS(input->formatted.flex_bytes); + fdirvlan <<= TXGBE_RDB_FDIR_FLEX_FLEX_SHIFT; + + fdirvlan |= TXGBE_NTOHS(input->formatted.vlan_id); + wr32(hw, TXGBE_RDB_FDIR_FLEX, fdirvlan); + + + /* configure FDIRHASH register */ + fdirhash = input->formatted.bkt_hash | + 0x1 << TXGBE_RDB_FDIR_HASH_BUCKET_VALID_SHIFT; + fdirhash |= soft_id << TXGBE_RDB_FDIR_HASH_SIG_SW_INDEX_SHIFT; + wr32(hw, TXGBE_RDB_FDIR_HASH, fdirhash); + + /* + * flush all previous writes to make certain registers are + * programmed prior to issuing the command + */ + TXGBE_WRITE_FLUSH(hw); + + /* configure FDIRCMD register */ + fdircmd = TXGBE_RDB_FDIR_CMD_CMD_ADD_FLOW | + TXGBE_RDB_FDIR_CMD_FILTER_UPDATE | + TXGBE_RDB_FDIR_CMD_LAST | TXGBE_RDB_FDIR_CMD_QUEUE_EN; + if (queue == TXGBE_RDB_FDIR_DROP_QUEUE) + fdircmd |= TXGBE_RDB_FDIR_CMD_DROP; + fdircmd |= input->formatted.flow_type << + TXGBE_RDB_FDIR_CMD_FLOW_TYPE_SHIFT; + fdircmd |= (u32)queue << TXGBE_RDB_FDIR_CMD_RX_QUEUE_SHIFT; + fdircmd |= (u32)input->formatted.vm_pool << + TXGBE_RDB_FDIR_CMD_VT_POOL_SHIFT; + + wr32(hw, TXGBE_RDB_FDIR_CMD, fdircmd); + err = txgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err) { + DEBUGOUT("Flow Director command did not complete!\n"); + return err; + } + + return 0; +} + +s32 txgbe_fdir_erase_perfect_filter(struct txgbe_hw *hw, + union txgbe_atr_input *input, + u16 soft_id) +{ + u32 fdirhash; + u32 fdircmd; + s32 err; + + /* configure FDIRHASH register */ + fdirhash = input->formatted.bkt_hash; + fdirhash |= soft_id << TXGBE_RDB_FDIR_HASH_SIG_SW_INDEX_SHIFT; + wr32(hw, TXGBE_RDB_FDIR_HASH, fdirhash); + + /* flush hash to HW */ + TXGBE_WRITE_FLUSH(hw); + + /* Query if filter is present */ + wr32(hw, TXGBE_RDB_FDIR_CMD, + TXGBE_RDB_FDIR_CMD_CMD_QUERY_REM_FILT); + + err = txgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err) { + DEBUGOUT("Flow Director command did not complete!\n"); + return err; + } + + /* if filter exists in hardware then remove it */ + if (fdircmd & TXGBE_RDB_FDIR_CMD_FILTER_VALID) { + wr32(hw, TXGBE_RDB_FDIR_HASH, fdirhash); + TXGBE_WRITE_FLUSH(hw); + wr32(hw, TXGBE_RDB_FDIR_CMD, + TXGBE_RDB_FDIR_CMD_CMD_REMOVE_FLOW); + } + + return 0; +} + + +/** + * txgbe_start_hw - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware using the generic start_hw function + * and the generation start_hw function. + * Then performs revision-specific operations, if any. + **/ +s32 txgbe_start_hw(struct txgbe_hw *hw) +{ + int ret_val = 0; + u32 i; + + DEBUGFUNC("\n"); + + /* Set the media type */ + hw->phy.media_type = TCALL(hw, mac.ops.get_media_type); + + /* PHY ops initialization must be done in reset_hw() */ + + /* Clear the VLAN filter table */ + TCALL(hw, mac.ops.clear_vfta); + + /* Clear statistics registers */ + TCALL(hw, mac.ops.clear_hw_cntrs); + + TXGBE_WRITE_FLUSH(hw); + + /* Setup flow control */ + ret_val = TCALL(hw, mac.ops.setup_fc); + + /* Clear the rate limiters */ + for (i = 0; i < hw->mac.max_tx_queues; i++) { + wr32(hw, TXGBE_TDM_RP_IDX, i); + wr32(hw, TXGBE_TDM_RP_RATE, 0); + } + TXGBE_WRITE_FLUSH(hw); + + /* Clear adapter stopped flag */ + hw->adapter_stopped = false; + + /* We need to run link autotry after the driver loads */ + hw->mac.autotry_restart = true; + + return ret_val; +} + +/** + * txgbe_identify_phy - Get physical layer module + * @hw: pointer to hardware structure + * + * Determines the physical layer module found on the current adapter. + * If PHY already detected, maintains current PHY type in hw struct, + * otherwise executes the PHY detection routine. + **/ +s32 txgbe_identify_phy(struct txgbe_hw *hw) +{ + /* Detect PHY if not unknown - returns success if already detected. */ + s32 status = TXGBE_ERR_PHY_ADDR_INVALID; + enum txgbe_media_type media_type; + + DEBUGFUNC("\n"); + + if (!hw->phy.phy_semaphore_mask) { + hw->phy.phy_semaphore_mask = TXGBE_MNG_SWFW_SYNC_SW_PHY; + } + + media_type = TCALL(hw, mac.ops.get_media_type); + if (media_type == txgbe_media_type_copper) { + status = txgbe_init_external_phy(hw); + if (status != 0) { + return status; + } + txgbe_get_phy_id(hw); + hw->phy.type = txgbe_get_phy_type_from_id(hw); + status = 0; + } else if (media_type == txgbe_media_type_fiber) { + status = txgbe_identify_module(hw); + } else { + hw->phy.type = txgbe_phy_none; + status = 0; + } + + /* Return error if SFP module has been detected but is not supported */ + if (hw->phy.type == txgbe_phy_sfp_unsupported) + return TXGBE_ERR_SFP_NOT_SUPPORTED; + + return status; +} + + +/** + * txgbe_enable_rx_dma - Enable the Rx DMA unit on sapphire + * @hw: pointer to hardware structure + * @regval: register value to write to RXCTRL + * + * Enables the Rx DMA unit for sapphire + **/ +s32 txgbe_enable_rx_dma(struct txgbe_hw *hw, u32 regval) +{ + + DEBUGFUNC("\n"); + + /* + * Workaround for sapphire silicon errata when enabling the Rx datapath. + * If traffic is incoming before we enable the Rx unit, it could hang + * the Rx DMA unit. Therefore, make sure the security engine is + * completely disabled prior to enabling the Rx unit. + */ + + TCALL(hw, mac.ops.disable_sec_rx_path); + + if (regval & TXGBE_RDB_PB_CTL_RXEN) + TCALL(hw, mac.ops.enable_rx); + else + TCALL(hw, mac.ops.disable_rx); + + TCALL(hw, mac.ops.enable_sec_rx_path); + + return 0; +} + +/** + * txgbe_init_flash_params - Initialize flash params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters txgbe_eeprom_info within the + * txgbe_hw struct in order to set up EEPROM access. + **/ +s32 txgbe_init_flash_params(struct txgbe_hw *hw) +{ + struct txgbe_flash_info *flash = &hw->flash; + u32 eec; + + DEBUGFUNC("\n"); + + eec = 0x1000000; + flash->semaphore_delay = 10; + flash->dword_size = (eec >> 2); + flash->address_bits = 24; + DEBUGOUT3("FLASH params: size = %d, address bits: %d\n", + flash->dword_size, + flash->address_bits); + + return 0; +} + +/** + * txgbe_read_flash_buffer - Read FLASH dword(s) using + * fastest available method + * + * @hw: pointer to hardware structure + * @offset: offset of dword in EEPROM to read + * @dwords: number of dwords + * @data: dword(s) read from the EEPROM + * + * Retrieves 32 bit dword(s) read from EEPROM + **/ +s32 txgbe_read_flash_buffer(struct txgbe_hw *hw, u32 offset, + u32 dwords, u32 *data) +{ + s32 status = 0; + u32 i; + + DEBUGFUNC("\n"); + + TCALL(hw, eeprom.ops.init_params); + + if (!dwords || offset + dwords >= hw->flash.dword_size) { + status = TXGBE_ERR_INVALID_ARGUMENT; + ERROR_REPORT1(TXGBE_ERROR_ARGUMENT, "Invalid FLASH arguments"); + return status; + } + + for (i = 0; i < dwords; i++) { + wr32(hw, TXGBE_SPI_DATA, data[i]); + wr32(hw, TXGBE_SPI_CMD, + TXGBE_SPI_CMD_ADDR(offset + i) | + TXGBE_SPI_CMD_CMD(0x0)); + + status = po32m(hw, TXGBE_SPI_STATUS, + TXGBE_SPI_STATUS_OPDONE, TXGBE_SPI_STATUS_OPDONE, + TXGBE_SPI_TIMEOUT, 0); + if (status) { + DEBUGOUT("FLASH read timed out\n"); + break; + } + } + + return status; +} + +/** + * txgbe_write_flash_buffer - Write FLASH dword(s) using + * fastest available method + * + * @hw: pointer to hardware structure + * @offset: offset of dword in EEPROM to write + * @dwords: number of dwords + * @data: dword(s) write from to EEPROM + * + **/ +s32 txgbe_write_flash_buffer(struct txgbe_hw *hw, u32 offset, + u32 dwords, u32 *data) +{ + s32 status = 0; + u32 i; + + DEBUGFUNC("\n"); + + TCALL(hw, eeprom.ops.init_params); + + if (!dwords || offset + dwords >= hw->flash.dword_size) { + status = TXGBE_ERR_INVALID_ARGUMENT; + ERROR_REPORT1(TXGBE_ERROR_ARGUMENT, "Invalid FLASH arguments"); + return status; + } + + for (i = 0; i < dwords; i++) { + wr32(hw, TXGBE_SPI_CMD, + TXGBE_SPI_CMD_ADDR(offset + i) | + TXGBE_SPI_CMD_CMD(0x1)); + + status = po32m(hw, TXGBE_SPI_STATUS, + TXGBE_SPI_STATUS_OPDONE, TXGBE_SPI_STATUS_OPDONE, + TXGBE_SPI_TIMEOUT, 0); + if (status != 0) { + DEBUGOUT("FLASH write timed out\n"); + break; + } + data[i] = rd32(hw, TXGBE_SPI_DATA); + } + + return status; +} + +/** + * txgbe_init_eeprom_params - Initialize EEPROM params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters txgbe_eeprom_info within the + * txgbe_hw struct in order to set up EEPROM access. + **/ +s32 txgbe_init_eeprom_params(struct txgbe_hw *hw) +{ + struct txgbe_eeprom_info *eeprom = &hw->eeprom; + u16 eeprom_size; + s32 status = 0; + u16 data; + + DEBUGFUNC("\n"); + + if (eeprom->type == txgbe_eeprom_uninitialized) { + eeprom->semaphore_delay = 10; + eeprom->type = txgbe_eeprom_none; + + if (!(rd32(hw, TXGBE_SPI_STATUS) & + TXGBE_SPI_STATUS_FLASH_BYPASS)) { + eeprom->type = txgbe_flash; + + eeprom_size = 4096; + eeprom->word_size = eeprom_size >> 1; + + DEBUGOUT2("Eeprom params: type = %d, size = %d\n", + eeprom->type, eeprom->word_size); + } + } + + status = TCALL(hw, eeprom.ops.read, TXGBE_SW_REGION_PTR, + &data); + if (status) { + DEBUGOUT("NVM Read Error\n"); + return status; + } + eeprom->sw_region_offset = data >> 1; + + return status; +} + +/** + * txgbe_read_ee_hostif - Read EEPROM word using a host interface cmd + * assuming that the semaphore is already obtained. + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the hostif. + **/ +s32 txgbe_read_ee_hostif_data(struct txgbe_hw *hw, u16 offset, + u16 *data) +{ + s32 status; + struct txgbe_hic_read_shadow_ram buffer; + + DEBUGFUNC("\n"); + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = TXGBE_CPU_TO_BE32(offset * 2); + /* one word */ + buffer.length = TXGBE_CPU_TO_BE16(sizeof(u16)); + + status = txgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + TXGBE_HI_COMMAND_TIMEOUT, false); + + if (status) + return status; + if (txgbe_check_mng_access(hw)) + *data = (u16)rd32a(hw, TXGBE_MNG_MBOX, + FW_NVM_DATA_OFFSET); + else { + status = TXGBE_ERR_MNG_ACCESS_FAILED; + return status; + } + + return 0; +} + +/** + * txgbe_read_ee_hostif - Read EEPROM word using a host interface cmd + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the hostif. + **/ +s32 txgbe_read_ee_hostif(struct txgbe_hw *hw, u16 offset, + u16 *data) +{ + s32 status = 0; + + DEBUGFUNC("\n"); + + if (TCALL(hw, mac.ops.acquire_swfw_sync, + TXGBE_MNG_SWFW_SYNC_SW_FLASH) == 0) { + status = txgbe_read_ee_hostif_data(hw, offset, data); + TCALL(hw, mac.ops.release_swfw_sync, + TXGBE_MNG_SWFW_SYNC_SW_FLASH); + } else { + status = TXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * txgbe_read_ee_hostif_buffer- Read EEPROM word(s) using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @words: number of words + * @data: word(s) read from the EEPROM + * + * Reads a 16 bit word(s) from the EEPROM using the hostif. + **/ +s32 txgbe_read_ee_hostif_buffer(struct txgbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + struct txgbe_hic_read_shadow_ram buffer; + u32 current_word = 0; + u16 words_to_read; + s32 status; + u32 i; + u32 value = 0; + + DEBUGFUNC("\n"); + + /* Take semaphore for the entire operation. */ + status = TCALL(hw, mac.ops.acquire_swfw_sync, + TXGBE_MNG_SWFW_SYNC_SW_FLASH); + if (status) { + DEBUGOUT("EEPROM read buffer - semaphore failed\n"); + return status; + } + while (words) { + if (words > FW_MAX_READ_BUFFER_SIZE / 2) + words_to_read = FW_MAX_READ_BUFFER_SIZE / 2; + else + words_to_read = words; + + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = TXGBE_CPU_TO_BE32((offset + current_word) * 2); + buffer.length = TXGBE_CPU_TO_BE16(words_to_read * 2); + + status = txgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + TXGBE_HI_COMMAND_TIMEOUT, + false); + + if (status) { + DEBUGOUT("Host interface command failed\n"); + goto out; + } + + for (i = 0; i < words_to_read; i++) { + u32 reg = TXGBE_MNG_MBOX + (FW_NVM_DATA_OFFSET << 2) + + 2 * i; + if (txgbe_check_mng_access(hw)) + value = rd32(hw, reg); + else { + status = TXGBE_ERR_MNG_ACCESS_FAILED; + return status; + } + data[current_word] = (u16)(value & 0xffff); + current_word++; + i++; + if (i < words_to_read) { + value >>= 16; + data[current_word] = (u16)(value & 0xffff); + current_word++; + } + } + words -= words_to_read; + } + +out: + TCALL(hw, mac.ops.release_swfw_sync, + TXGBE_MNG_SWFW_SYNC_SW_FLASH); + return status; +} + +/** + * txgbe_write_ee_hostif - Write EEPROM word using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the hostif. + **/ +s32 txgbe_write_ee_hostif_data(struct txgbe_hw *hw, u16 offset, + u16 data) +{ + s32 status; + struct txgbe_hic_write_shadow_ram buffer; + + DEBUGFUNC("\n"); + + buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* one word */ + buffer.length = TXGBE_CPU_TO_BE16(sizeof(u16)); + buffer.data = data; + buffer.address = TXGBE_CPU_TO_BE32(offset * 2); + + status = txgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + TXGBE_HI_COMMAND_TIMEOUT, false); + + return status; +} + +/** + * txgbe_write_ee_hostif - Write EEPROM word using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the hostif. + **/ +s32 txgbe_write_ee_hostif(struct txgbe_hw *hw, u16 offset, + u16 data) +{ + s32 status = 0; + + DEBUGFUNC("\n"); + + if (TCALL(hw, mac.ops.acquire_swfw_sync, + TXGBE_MNG_SWFW_SYNC_SW_FLASH) == 0) { + status = txgbe_write_ee_hostif_data(hw, offset, data); + TCALL(hw, mac.ops.release_swfw_sync, + TXGBE_MNG_SWFW_SYNC_SW_FLASH); + } else { + DEBUGOUT("write ee hostif failed to get semaphore"); + status = TXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * txgbe_write_ee_hostif_buffer - Write EEPROM word(s) using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @words: number of words + * @data: word(s) write to the EEPROM + * + * Write a 16 bit word(s) to the EEPROM using the hostif. + **/ +s32 txgbe_write_ee_hostif_buffer(struct txgbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + s32 status = 0; + u16 i = 0; + + DEBUGFUNC("\n"); + + /* Take semaphore for the entire operation. */ + status = TCALL(hw, mac.ops.acquire_swfw_sync, + TXGBE_MNG_SWFW_SYNC_SW_FLASH); + if (status != 0) { + DEBUGOUT("EEPROM write buffer - semaphore failed\n"); + goto out; + } + + for (i = 0; i < words; i++) { + status = txgbe_write_ee_hostif_data(hw, offset + i, + data[i]); + + if (status != 0) { + DEBUGOUT("Eeprom buffered write failed\n"); + break; + } + } + + TCALL(hw, mac.ops.release_swfw_sync, TXGBE_MNG_SWFW_SYNC_SW_FLASH); +out: + + return status; +} + + + +/** + * txgbe_calc_eeprom_checksum - Calculates and returns the checksum + * @hw: pointer to hardware structure + * + * Returns a negative error code on error, or the 16-bit checksum + **/ +s32 txgbe_calc_eeprom_checksum(struct txgbe_hw *hw) +{ + u16 *buffer = NULL; + u32 buffer_size = 0; + + u16 *eeprom_ptrs = NULL; + u16 *local_buffer; + s32 status; + u16 checksum = 0; + u16 i; + + DEBUGFUNC("\n"); + + TCALL(hw, eeprom.ops.init_params); + + if (!buffer) { + eeprom_ptrs = (u16 *)vmalloc(TXGBE_EEPROM_LAST_WORD * + sizeof(u16)); + if (!eeprom_ptrs) + return TXGBE_ERR_NO_SPACE; + /* Read pointer area */ + status = txgbe_read_ee_hostif_buffer(hw, 0, + TXGBE_EEPROM_LAST_WORD, + eeprom_ptrs); + if (status) { + DEBUGOUT("Failed to read EEPROM image\n"); + return status; + } + local_buffer = eeprom_ptrs; + } else { + if (buffer_size < TXGBE_EEPROM_LAST_WORD) + return TXGBE_ERR_PARAM; + local_buffer = buffer; + } + + for (i = 0; i < TXGBE_EEPROM_LAST_WORD; i++) + if (i != hw->eeprom.sw_region_offset + TXGBE_EEPROM_CHECKSUM) + checksum += local_buffer[i]; + + checksum = (u16)TXGBE_EEPROM_SUM - checksum; + if (eeprom_ptrs) + vfree(eeprom_ptrs); + + return (s32)checksum; +} + +/** + * txgbe_update_eeprom_checksum - Updates the EEPROM checksum and flash + * @hw: pointer to hardware structure + * + * After writing EEPROM to shadow RAM using EEWR register, software calculates + * checksum and updates the EEPROM and instructs the hardware to update + * the flash. + **/ +s32 txgbe_update_eeprom_checksum(struct txgbe_hw *hw) +{ + s32 status; + u16 checksum = 0; + + DEBUGFUNC("\n"); + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = txgbe_read_ee_hostif(hw, 0, &checksum); + if (status) { + DEBUGOUT("EEPROM read failed\n"); + return status; + } + + status = txgbe_calc_eeprom_checksum(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = txgbe_write_ee_hostif(hw, TXGBE_EEPROM_CHECKSUM, + checksum); + if (status) + return status; + + return status; +} + +/** + * txgbe_validate_eeprom_checksum - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. + **/ +s32 txgbe_validate_eeprom_checksum(struct txgbe_hw *hw, + u16 *checksum_val) +{ + s32 status; + u16 checksum; + u16 read_checksum = 0; + + DEBUGFUNC("\n"); + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = TCALL(hw, eeprom.ops.read, 0, &checksum); + if (status) { + DEBUGOUT("EEPROM read failed\n"); + return status; + } + + status = TCALL(hw, eeprom.ops.calc_checksum); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = txgbe_read_ee_hostif(hw, hw->eeprom.sw_region_offset + + TXGBE_EEPROM_CHECKSUM, + &read_checksum); + if (status) + return status; + + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) { + status = TXGBE_ERR_EEPROM_CHECKSUM; + ERROR_REPORT1(TXGBE_ERROR_INVALID_STATE, + "Invalid EEPROM checksum\n"); + } + + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; + + return status; +} + +/** + * txgbe_update_flash - Instruct HW to copy EEPROM to Flash device + * @hw: pointer to hardware structure + * + * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash. + **/ +s32 txgbe_update_flash(struct txgbe_hw *hw) +{ + s32 status = 0; + union txgbe_hic_hdr2 buffer; + + DEBUGFUNC("\n"); + + buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD; + buffer.req.buf_lenh = 0; + buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN; + buffer.req.checksum = FW_DEFAULT_CHECKSUM; + + status = txgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + TXGBE_HI_COMMAND_TIMEOUT, false); + + return status; +} + + +/** + * txgbe_check_mac_link - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true when link is up + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Reads the links register to determine if link is up and the current speed + **/ +s32 txgbe_check_mac_link(struct txgbe_hw *hw, u32 *speed, + bool *link_up, bool link_up_wait_to_complete) +{ + u32 links_reg = 0; + u32 i; + u16 value; + + DEBUGFUNC("\n"); + + if (link_up_wait_to_complete) { + for (i = 0; i < TXGBE_LINK_UP_TIME; i++) { + if (TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_copper && + ((hw->subsystem_id & 0xF0) != TXGBE_ID_SFI_XAUI)) { + /* read ext phy link status */ + txgbe_read_mdio(&hw->phy_dev, hw->phy.addr, 0x03, 0x8008, &value); + if (value & 0x400) { + *link_up = true; + } else { + *link_up = false; + } + } else { + *link_up = true; + } + if (*link_up) { + links_reg = rd32(hw, + TXGBE_CFG_PORT_ST); + if (links_reg & TXGBE_CFG_PORT_ST_LINK_UP) { + *link_up = true; + break; + } else { + *link_up = false; + } + } + msleep(100); + } + } else { + if (TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_copper && + ((hw->subsystem_id & 0xF0) != TXGBE_ID_SFI_XAUI)) { + /* read ext phy link status */ + txgbe_read_mdio(&hw->phy_dev, hw->phy.addr, 0x03, 0x8008, &value); + if (value & 0x400) { + *link_up = true; + } else { + *link_up = false; + } + } else { + *link_up = true; + } + if (*link_up) { + links_reg = rd32(hw, TXGBE_CFG_PORT_ST); + if (links_reg & TXGBE_CFG_PORT_ST_LINK_UP) { + *link_up = true; + } else { + *link_up = false; + } + } + } + + if (*link_up) { + if (TCALL(hw, mac.ops.get_media_type) == txgbe_media_type_copper && + ((hw->subsystem_id & 0xF0) != TXGBE_ID_SFI_XAUI)) { + if ((value & 0xc000) == 0xc000) { + *speed = TXGBE_LINK_SPEED_10GB_FULL; + } else if ((value & 0xc000) == 0x8000) { + *speed = TXGBE_LINK_SPEED_1GB_FULL; + } else if ((value & 0xc000) == 0x4000) { + *speed = TXGBE_LINK_SPEED_100_FULL; + } else if ((value & 0xc000) == 0x0000) { + *speed = TXGBE_LINK_SPEED_10_FULL; + } + } else { + if ((links_reg & TXGBE_CFG_PORT_ST_LINK_10G) == + TXGBE_CFG_PORT_ST_LINK_10G) { + *speed = TXGBE_LINK_SPEED_10GB_FULL; + } else if ((links_reg & TXGBE_CFG_PORT_ST_LINK_1G) == + TXGBE_CFG_PORT_ST_LINK_1G){ + *speed = TXGBE_LINK_SPEED_1GB_FULL; + } else if ((links_reg & TXGBE_CFG_PORT_ST_LINK_100M) == + TXGBE_CFG_PORT_ST_LINK_100M){ + *speed = TXGBE_LINK_SPEED_100_FULL; + } else + *speed = TXGBE_LINK_SPEED_10_FULL; + } + } else + *speed = TXGBE_LINK_SPEED_UNKNOWN; + + return 0; +} + +/** + * txgbe_setup_eee - Enable/disable EEE support + * @hw: pointer to the HW structure + * @enable_eee: boolean flag to enable EEE + * + * Enable/disable EEE based on enable_eee flag. + * Auto-negotiation must be started after BASE-T EEE bits in PHY register 7.3C + * are modified. + * + **/ +s32 txgbe_setup_eee(struct txgbe_hw *hw, bool enable_eee) +{ + /* fix eee */ + UNREFERENCED_PARAMETER(hw); + UNREFERENCED_PARAMETER(enable_eee); + DEBUGFUNC("\n"); + + return 0; +} diff --git a/drivers/net/ethernet/netswift/txgbe/txgbe_hw.h b/drivers/net/ethernet/netswift/txgbe/txgbe_hw.h new file mode 100644 index 0000000000000000000000000000000000000000..97ce62a2cd26ac2b0a3e7b178f7a10af778c0a2a --- /dev/null +++ b/drivers/net/ethernet/netswift/txgbe/txgbe_hw.h @@ -0,0 +1,264 @@ +/* + * WangXun 10 Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + */ + +#ifndef _TXGBE_HW_H_ +#define _TXGBE_HW_H_ + +#define TXGBE_EMC_INTERNAL_DATA 0x00 +#define TXGBE_EMC_INTERNAL_THERM_LIMIT 0x20 +#define TXGBE_EMC_DIODE1_DATA 0x01 +#define TXGBE_EMC_DIODE1_THERM_LIMIT 0x19 +#define TXGBE_EMC_DIODE2_DATA 0x23 +#define TXGBE_EMC_DIODE2_THERM_LIMIT 0x1A +#define TXGBE_EMC_DIODE3_DATA 0x2A +#define TXGBE_EMC_DIODE3_THERM_LIMIT 0x30 + +/** + * Packet Type decoding + **/ +/* txgbe_dec_ptype.mac: outer mac */ +enum txgbe_dec_ptype_mac { + TXGBE_DEC_PTYPE_MAC_IP = 0, + TXGBE_DEC_PTYPE_MAC_L2 = 2, + TXGBE_DEC_PTYPE_MAC_FCOE = 3, +}; + +/* txgbe_dec_ptype.[e]ip: outer&encaped ip */ +#define TXGBE_DEC_PTYPE_IP_FRAG (0x4) +enum txgbe_dec_ptype_ip { + TXGBE_DEC_PTYPE_IP_NONE = 0, + TXGBE_DEC_PTYPE_IP_IPV4 = 1, + TXGBE_DEC_PTYPE_IP_IPV6 = 2, + TXGBE_DEC_PTYPE_IP_FGV4 = + (TXGBE_DEC_PTYPE_IP_FRAG | TXGBE_DEC_PTYPE_IP_IPV4), + TXGBE_DEC_PTYPE_IP_FGV6 = + (TXGBE_DEC_PTYPE_IP_FRAG | TXGBE_DEC_PTYPE_IP_IPV6), +}; + +/* txgbe_dec_ptype.etype: encaped type */ +enum txgbe_dec_ptype_etype { + TXGBE_DEC_PTYPE_ETYPE_NONE = 0, + TXGBE_DEC_PTYPE_ETYPE_IPIP = 1, /* IP+IP */ + TXGBE_DEC_PTYPE_ETYPE_IG = 2, /* IP+GRE */ + TXGBE_DEC_PTYPE_ETYPE_IGM = 3, /* IP+GRE+MAC */ + TXGBE_DEC_PTYPE_ETYPE_IGMV = 4, /* IP+GRE+MAC+VLAN */ +}; + +/* txgbe_dec_ptype.proto: payload proto */ +enum txgbe_dec_ptype_prot { + TXGBE_DEC_PTYPE_PROT_NONE = 0, + TXGBE_DEC_PTYPE_PROT_UDP = 1, + TXGBE_DEC_PTYPE_PROT_TCP = 2, + TXGBE_DEC_PTYPE_PROT_SCTP = 3, + TXGBE_DEC_PTYPE_PROT_ICMP = 4, + TXGBE_DEC_PTYPE_PROT_TS = 5, /* time sync */ +}; + +/* txgbe_dec_ptype.layer: payload layer */ +enum txgbe_dec_ptype_layer { + TXGBE_DEC_PTYPE_LAYER_NONE = 0, + TXGBE_DEC_PTYPE_LAYER_PAY2 = 1, + TXGBE_DEC_PTYPE_LAYER_PAY3 = 2, + TXGBE_DEC_PTYPE_LAYER_PAY4 = 3, +}; + +struct txgbe_dec_ptype { + u32 ptype:8; + u32 known:1; + u32 mac:2; /* outer mac */ + u32 ip:3; /* outer ip*/ + u32 etype:3; /* encaped type */ + u32 eip:3; /* encaped ip */ + u32 prot:4; /* payload proto */ + u32 layer:3; /* payload layer */ +}; +typedef struct txgbe_dec_ptype txgbe_dptype; + + +void txgbe_dcb_get_rtrup2tc(struct txgbe_hw *hw, u8 *map); +u16 txgbe_get_pcie_msix_count(struct txgbe_hw *hw); +s32 txgbe_init_hw(struct txgbe_hw *hw); +s32 txgbe_start_hw(struct txgbe_hw *hw); +s32 txgbe_clear_hw_cntrs(struct txgbe_hw *hw); +s32 txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num, + u32 pba_num_size); +s32 txgbe_get_mac_addr(struct txgbe_hw *hw, u8 *mac_addr); +s32 txgbe_get_bus_info(struct txgbe_hw *hw); +void txgbe_set_pci_config_data(struct txgbe_hw *hw, u16 link_status); +void txgbe_set_lan_id_multi_port_pcie(struct txgbe_hw *hw); +s32 txgbe_stop_adapter(struct txgbe_hw *hw); + +s32 txgbe_led_on(struct txgbe_hw *hw, u32 index); +s32 txgbe_led_off(struct txgbe_hw *hw, u32 index); + +s32 txgbe_set_rar(struct txgbe_hw *hw, u32 index, u8 *addr, u64 pools, + u32 enable_addr); +s32 txgbe_clear_rar(struct txgbe_hw *hw, u32 index); +s32 txgbe_init_rx_addrs(struct txgbe_hw *hw); +s32 txgbe_update_mc_addr_list(struct txgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, + txgbe_mc_addr_itr func, bool clear); +s32 txgbe_update_uc_addr_list(struct txgbe_hw *hw, u8 *addr_list, + u32 addr_count, txgbe_mc_addr_itr func); +s32 txgbe_enable_mc(struct txgbe_hw *hw); +s32 txgbe_disable_mc(struct txgbe_hw *hw); +s32 txgbe_disable_sec_rx_path(struct txgbe_hw *hw); +s32 txgbe_enable_sec_rx_path(struct txgbe_hw *hw); + +s32 txgbe_fc_enable(struct txgbe_hw *hw); +bool txgbe_device_supports_autoneg_fc(struct txgbe_hw *hw); +void txgbe_fc_autoneg(struct txgbe_hw *hw); +s32 txgbe_setup_fc(struct txgbe_hw *hw); + +s32 txgbe_validate_mac_addr(u8 *mac_addr); +s32 txgbe_acquire_swfw_sync(struct txgbe_hw *hw, u32 mask); +void txgbe_release_swfw_sync(struct txgbe_hw *hw, u32 mask); +s32 txgbe_disable_pcie_master(struct txgbe_hw *hw); + + +s32 txgbe_get_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr); +s32 txgbe_set_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr); + +s32 txgbe_set_vmdq(struct txgbe_hw *hw, u32 rar, u32 vmdq); +s32 txgbe_set_vmdq_san_mac(struct txgbe_hw *hw, u32 vmdq); +s32 txgbe_clear_vmdq(struct txgbe_hw *hw, u32 rar, u32 vmdq); +s32 txgbe_insert_mac_addr(struct txgbe_hw *hw, u8 *addr, u32 vmdq); +s32 txgbe_init_uta_tables(struct txgbe_hw *hw); +s32 txgbe_set_vfta(struct txgbe_hw *hw, u32 vlan, + u32 vind, bool vlan_on); +s32 txgbe_set_vlvf(struct txgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool *vfta_changed); +s32 txgbe_clear_vfta(struct txgbe_hw *hw); +s32 txgbe_find_vlvf_slot(struct txgbe_hw *hw, u32 vlan); + +s32 txgbe_get_wwn_prefix(struct txgbe_hw *hw, u16 *wwnn_prefix, + u16 *wwpn_prefix); + +void txgbe_set_mac_anti_spoofing(struct txgbe_hw *hw, bool enable, int pf); +void txgbe_set_vlan_anti_spoofing(struct txgbe_hw *hw, bool enable, int vf); +void txgbe_set_ethertype_anti_spoofing(struct txgbe_hw *hw, + bool enable, int vf); +s32 txgbe_get_device_caps(struct txgbe_hw *hw, u16 *device_caps); +void txgbe_set_rxpba(struct txgbe_hw *hw, int num_pb, u32 headroom, + int strategy); +s32 txgbe_set_fw_drv_ver(struct txgbe_hw *hw, u8 maj, u8 min, + u8 build, u8 ver); +s32 txgbe_reset_hostif(struct txgbe_hw *hw); +u8 txgbe_calculate_checksum(u8 *buffer, u32 length); +s32 txgbe_host_interface_command(struct txgbe_hw *hw, u32 *buffer, + u32 length, u32 timeout, bool return_data); + +void txgbe_clear_tx_pending(struct txgbe_hw *hw); +void txgbe_stop_mac_link_on_d3(struct txgbe_hw *hw); +bool txgbe_mng_present(struct txgbe_hw *hw); +bool txgbe_check_mng_access(struct txgbe_hw *hw); + +s32 txgbe_get_thermal_sensor_data(struct txgbe_hw *hw); +s32 txgbe_init_thermal_sensor_thresh(struct txgbe_hw *hw); +void txgbe_enable_rx(struct txgbe_hw *hw); +void txgbe_disable_rx(struct txgbe_hw *hw); +s32 txgbe_setup_mac_link_multispeed_fiber(struct txgbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete); +int txgbe_check_flash_load(struct txgbe_hw *hw, u32 check_bit); + +/* @txgbe_api.h */ +s32 txgbe_reinit_fdir_tables(struct txgbe_hw *hw); +s32 txgbe_init_fdir_signature(struct txgbe_hw *hw, u32 fdirctrl); +s32 txgbe_init_fdir_perfect(struct txgbe_hw *hw, u32 fdirctrl, + bool cloud_mode); +s32 txgbe_fdir_add_signature_filter(struct txgbe_hw *hw, + union txgbe_atr_hash_dword input, + union txgbe_atr_hash_dword common, + u8 queue); +s32 txgbe_fdir_set_input_mask(struct txgbe_hw *hw, + union txgbe_atr_input *input_mask, bool cloud_mode); +s32 txgbe_fdir_write_perfect_filter(struct txgbe_hw *hw, + union txgbe_atr_input *input, + u16 soft_id, u8 queue, bool cloud_mode); +s32 txgbe_fdir_erase_perfect_filter(struct txgbe_hw *hw, + union txgbe_atr_input *input, + u16 soft_id); +s32 txgbe_fdir_add_perfect_filter(struct txgbe_hw *hw, + union txgbe_atr_input *input, + union txgbe_atr_input *mask, + u16 soft_id, + u8 queue, + bool cloud_mode); +void txgbe_atr_compute_perfect_hash(union txgbe_atr_input *input, + union txgbe_atr_input *mask); +u32 txgbe_atr_compute_sig_hash(union txgbe_atr_hash_dword input, + union txgbe_atr_hash_dword common); + +s32 txgbe_get_link_capabilities(struct txgbe_hw *hw, + u32 *speed, bool *autoneg); +enum txgbe_media_type txgbe_get_media_type(struct txgbe_hw *hw); +void txgbe_disable_tx_laser_multispeed_fiber(struct txgbe_hw *hw); +void txgbe_enable_tx_laser_multispeed_fiber(struct txgbe_hw *hw); +void txgbe_flap_tx_laser_multispeed_fiber(struct txgbe_hw *hw); +void txgbe_set_hard_rate_select_speed(struct txgbe_hw *hw, + u32 speed); +s32 txgbe_setup_mac_link(struct txgbe_hw *hw, u32 speed, + bool autoneg_wait_to_complete); +void txgbe_init_mac_link_ops(struct txgbe_hw *hw); +s32 txgbe_reset_hw(struct txgbe_hw *hw); +s32 txgbe_identify_phy(struct txgbe_hw *hw); +s32 txgbe_init_phy_ops(struct txgbe_hw *hw); +s32 txgbe_enable_rx_dma(struct txgbe_hw *hw, u32 regval); +s32 txgbe_init_ops(struct txgbe_hw *hw); +s32 txgbe_setup_eee(struct txgbe_hw *hw, bool enable_eee); + +s32 txgbe_init_flash_params(struct txgbe_hw *hw); +s32 txgbe_read_flash_buffer(struct txgbe_hw *hw, u32 offset, + u32 dwords, u32 *data); +s32 txgbe_write_flash_buffer(struct txgbe_hw *hw, u32 offset, + u32 dwords, u32 *data); + +s32 txgbe_read_eeprom(struct txgbe_hw *hw, + u16 offset, u16 *data); +s32 txgbe_read_eeprom_buffer(struct txgbe_hw *hw, u16 offset, + u16 words, u16 *data); +s32 txgbe_init_eeprom_params(struct txgbe_hw *hw); +s32 txgbe_update_eeprom_checksum(struct txgbe_hw *hw); +s32 txgbe_calc_eeprom_checksum(struct txgbe_hw *hw); +s32 txgbe_validate_eeprom_checksum(struct txgbe_hw *hw, + u16 *checksum_val); +s32 txgbe_update_flash(struct txgbe_hw *hw); +s32 txgbe_write_ee_hostif_buffer(struct txgbe_hw *hw, + u16 offset, u16 words, u16 *data); +s32 txgbe_write_ee_hostif(struct txgbe_hw *hw, u16 offset, + u16 data); +s32 txgbe_read_ee_hostif_buffer(struct txgbe_hw *hw, + u16 offset, u16 words, u16 *data); +s32 txgbe_read_ee_hostif(struct txgbe_hw *hw, u16 offset, u16 *data); +u32 txgbe_rd32_epcs(struct txgbe_hw *hw, u32 addr); +void txgbe_wr32_epcs(struct txgbe_hw *hw, u32 addr, u32 data); +void txgbe_wr32_ephy(struct txgbe_hw *hw, u32 addr, u32 data); +u32 rd32_ephy(struct txgbe_hw *hw, u32 addr); + +s32 txgbe_upgrade_flash_hostif(struct txgbe_hw *hw, u32 region, + const u8 *data, u32 size); + +s32 txgbe_set_link_to_kr(struct txgbe_hw *hw, bool autoneg); +s32 txgbe_set_link_to_kx4(struct txgbe_hw *hw, bool autoneg); + +s32 txgbe_set_link_to_kx(struct txgbe_hw *hw, + u32 speed, + bool autoneg); + + +#endif /* _TXGBE_HW_H_ */ diff --git a/drivers/net/ethernet/netswift/txgbe/txgbe_lib.c b/drivers/net/ethernet/netswift/txgbe/txgbe_lib.c new file mode 100644 index 0000000000000000000000000000000000000000..bb402e45557eb75c8a58979954f8816cf64059c3 --- /dev/null +++ b/drivers/net/ethernet/netswift/txgbe/txgbe_lib.c @@ -0,0 +1,959 @@ +/* + * WangXun 10 Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * based on ixgbe_lib.c, Copyright(c) 1999 - 2017 Intel Corporation. + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + + +#include "txgbe.h" + +/** + * txgbe_cache_ring_dcb_vmdq - Descriptor ring to register mapping for VMDq + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for VMDq to the assigned rings. It + * will also try to cache the proper offsets if RSS/FCoE are enabled along + * with VMDq. + * + **/ +static bool txgbe_cache_ring_dcb_vmdq(struct txgbe_adapter *adapter) +{ + struct txgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + int i; + u16 reg_idx; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + /* verify we have DCB enabled before proceeding */ + if (tcs <= 1) + return false; + + /* verify we have VMDq enabled before proceeding */ + if (!(adapter->flags & TXGBE_FLAG_VMDQ_ENABLED)) + return false; + + /* start at VMDq register offset for SR-IOV enabled setups */ + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { + /* If we are greater than indices move to next pool */ + if ((reg_idx & ~vmdq->mask) >= tcs) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + adapter->rx_ring[i]->reg_idx = reg_idx; + } + + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { + /* If we are greater than indices move to next pool */ + if ((reg_idx & ~vmdq->mask) >= tcs) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + adapter->tx_ring[i]->reg_idx = reg_idx; + } + + return true; +} + +/* txgbe_get_first_reg_idx - Return first register index associated with ring */ +static void txgbe_get_first_reg_idx(struct txgbe_adapter *adapter, u8 tc, + u16 *tx, u16 *rx) +{ + struct net_device *dev = adapter->netdev; + u8 num_tcs = netdev_get_num_tc(dev); + + *tx = 0; + *rx = 0; + + + if (num_tcs > 4) { + /* + * TCs : TC0/1 TC2/3 TC4-7 + * TxQs/TC: 32 16 8 + * RxQs/TC: 16 16 16 + */ + *rx = tc << 4; + if (tc < 3) + *tx = tc << 5; /* 0, 32, 64 */ + else if (tc < 5) + *tx = (tc + 2) << 4; /* 80, 96 */ + else + *tx = (tc + 8) << 3; /* 104, 112, 120 */ + } else { + /* + * TCs : TC0 TC1 TC2/3 + * TxQs/TC: 64 32 16 + * RxQs/TC: 32 32 32 + */ + *rx = tc << 5; + if (tc < 2) + *tx = tc << 6; /* 0, 64 */ + else + *tx = (tc + 4) << 4; /* 96, 112 */ + } + +} + +/** + * txgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for DCB to the assigned rings. + * + **/ +static bool txgbe_cache_ring_dcb(struct txgbe_adapter *adapter) +{ + int tc, offset, rss_i, i; + u16 tx_idx, rx_idx; + struct net_device *dev = adapter->netdev; + u8 num_tcs = netdev_get_num_tc(dev); + + if (num_tcs <= 1) + return false; + + rss_i = adapter->ring_feature[RING_F_RSS].indices; + + for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) { + txgbe_get_first_reg_idx(adapter, (u8)tc, &tx_idx, &rx_idx); + for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) { + adapter->tx_ring[offset + i]->reg_idx = tx_idx; + adapter->rx_ring[offset + i]->reg_idx = rx_idx; + adapter->tx_ring[offset + i]->dcb_tc = (u8)tc; + adapter->rx_ring[offset + i]->dcb_tc = (u8)tc; + } + } + + return true; +} + +/** + * txgbe_cache_ring_vmdq - Descriptor ring to register mapping for VMDq + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for VMDq to the assigned rings. It + * will also try to cache the proper offsets if RSS/FCoE/SRIOV are enabled along + * with VMDq. + * + **/ +static bool txgbe_cache_ring_vmdq(struct txgbe_adapter *adapter) +{ + struct txgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + struct txgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS]; + int i; + u16 reg_idx; + + /* only proceed if VMDq is enabled */ + if (!(adapter->flags & TXGBE_FLAG_VMDQ_ENABLED)) + return false; + + /* start at VMDq register offset for SR-IOV enabled setups */ + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { + /* If we are greater than indices move to next pool */ + if ((reg_idx & ~vmdq->mask) >= rss->indices) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + adapter->rx_ring[i]->reg_idx = reg_idx; + } + + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { + /* If we are greater than indices move to next pool */ + if ((reg_idx & rss->mask) >= rss->indices) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + adapter->tx_ring[i]->reg_idx = reg_idx; + } + + return true; +} + +/** + * txgbe_cache_ring_rss - Descriptor ring to register mapping for RSS + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for RSS, ATR, FCoE, and SR-IOV. + * + **/ +static bool txgbe_cache_ring_rss(struct txgbe_adapter *adapter) +{ + u16 i; + + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->reg_idx = i; + + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->reg_idx = i; + + return true; +} + +/** + * txgbe_cache_ring_register - Descriptor ring to register mapping + * @adapter: board private structure to initialize + * + * Once we know the feature-set enabled for the device, we'll cache + * the register offset the descriptor ring is assigned to. + * + * Note, the order the various feature calls is important. It must start with + * the "most" features enabled at the same time, then trickle down to the + * least amount of features turned on at once. + **/ +static void txgbe_cache_ring_register(struct txgbe_adapter *adapter) +{ + if (txgbe_cache_ring_dcb_vmdq(adapter)) + return; + + if (txgbe_cache_ring_dcb(adapter)) + return; + + if (txgbe_cache_ring_vmdq(adapter)) + return; + + txgbe_cache_ring_rss(adapter); +} + +#define TXGBE_RSS_64Q_MASK 0x3F +#define TXGBE_RSS_16Q_MASK 0xF +#define TXGBE_RSS_8Q_MASK 0x7 +#define TXGBE_RSS_4Q_MASK 0x3 +#define TXGBE_RSS_2Q_MASK 0x1 +#define TXGBE_RSS_DISABLED_MASK 0x0 + +/** + * txgbe_set_dcb_vmdq_queues: Allocate queues for VMDq devices w/ DCB + * @adapter: board private structure to initialize + * + * When VMDq (Virtual Machine Devices queue) is enabled, allocate queues + * and VM pools where appropriate. Also assign queues based on DCB + * priorities and map accordingly.. + * + **/ +static bool txgbe_set_dcb_vmdq_queues(struct txgbe_adapter *adapter) +{ + u16 i; + u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; + u16 vmdq_m = 0; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + /* verify we have DCB enabled before proceeding */ + if (tcs <= 1) + return false; + + /* verify we have VMDq enabled before proceeding */ + if (!(adapter->flags & TXGBE_FLAG_VMDQ_ENABLED)) + return false; + + /* Add starting offset to total pool count */ + vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; + + /* 16 pools w/ 8 TC per pool */ + if (tcs > 4) { + vmdq_i = min_t(u16, vmdq_i, 16); + vmdq_m = TXGBE_VMDQ_8Q_MASK; + /* 32 pools w/ 4 TC per pool */ + } else { + vmdq_i = min_t(u16, vmdq_i, 32); + vmdq_m = TXGBE_VMDQ_4Q_MASK; + } + + /* remove the starting offset from the pool count */ + vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; + + /* save features for later use */ + adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; + adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; + + /* + * We do not support DCB, VMDq, and RSS all simultaneously + * so we will disable RSS since it is the lowest priority + */ + adapter->ring_feature[RING_F_RSS].indices = 1; + adapter->ring_feature[RING_F_RSS].mask = TXGBE_RSS_DISABLED_MASK; + + adapter->queues_per_pool = tcs; + + adapter->num_tx_queues = vmdq_i * tcs; + adapter->num_rx_queues = vmdq_i * tcs; + + /* disable ATR as it is not supported when VMDq is enabled */ + adapter->flags &= ~TXGBE_FLAG_FDIR_HASH_CAPABLE; + + /* configure TC to queue mapping */ + for (i = 0; i < tcs; i++) + netdev_set_tc_queue(adapter->netdev, (u8)i, 1, i); + + return true; +} + +/** + * txgbe_set_dcb_queues: Allocate queues for a DCB-enabled device + * @adapter: board private structure to initialize + * + * When DCB (Data Center Bridging) is enabled, allocate queues for + * each traffic class. If multiqueue isn't available,then abort DCB + * initialization. + * + * This function handles all combinations of DCB, RSS, and FCoE. + * + **/ +static bool txgbe_set_dcb_queues(struct txgbe_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + struct txgbe_ring_feature *f; + u16 rss_i, rss_m, i; + u16 tcs; + + /* Map queue offset and counts onto allocated tx queues */ + tcs = netdev_get_num_tc(dev); + + if (tcs <= 1) + return false; + + /* determine the upper limit for our current DCB mode */ + rss_i = dev->num_tx_queues / tcs; + + if (tcs > 4) { + /* 8 TC w/ 8 queues per TC */ + rss_i = min_t(u16, rss_i, 8); + rss_m = TXGBE_RSS_8Q_MASK; + } else { + /* 4 TC w/ 16 queues per TC */ + rss_i = min_t(u16, rss_i, 16); + rss_m = TXGBE_RSS_16Q_MASK; + } + + /* set RSS mask and indices */ + f = &adapter->ring_feature[RING_F_RSS]; + rss_i = min_t(u16, rss_i, f->limit); + f->indices = rss_i; + f->mask = rss_m; + + /* disable ATR as it is not supported when DCB is enabled */ + adapter->flags &= ~TXGBE_FLAG_FDIR_HASH_CAPABLE; + + for (i = 0; i < tcs; i++) + netdev_set_tc_queue(dev, (u8)i, rss_i, rss_i * i); + + adapter->num_tx_queues = rss_i * tcs; + adapter->num_rx_queues = rss_i * tcs; + + return true; +} + +/** + * txgbe_set_vmdq_queues: Allocate queues for VMDq devices + * @adapter: board private structure to initialize + * + * When VMDq (Virtual Machine Devices queue) is enabled, allocate queues + * and VM pools where appropriate. If RSS is available, then also try and + * enable RSS and map accordingly. + * + **/ +static bool txgbe_set_vmdq_queues(struct txgbe_adapter *adapter) +{ + u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; + u16 vmdq_m = 0; + u16 rss_i = adapter->ring_feature[RING_F_RSS].limit; + u16 rss_m = TXGBE_RSS_DISABLED_MASK; + + /* only proceed if VMDq is enabled */ + if (!(adapter->flags & TXGBE_FLAG_VMDQ_ENABLED)) + return false; + /* Add starting offset to total pool count */ + vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; + + /* double check we are limited to maximum pools */ + vmdq_i = min_t(u16, TXGBE_MAX_VMDQ_INDICES, vmdq_i); + + /* 64 pool mode with 2 queues per pool, or + * 16/32/64 pool mode with 1 queue per pool */ + if ((vmdq_i > 32) || (rss_i < 4)) { + vmdq_m = TXGBE_VMDQ_2Q_MASK; + rss_m = TXGBE_RSS_2Q_MASK; + rss_i = min_t(u16, rss_i, 2); + /* 32 pool mode with 4 queues per pool */ + } else { + vmdq_m = TXGBE_VMDQ_4Q_MASK; + rss_m = TXGBE_RSS_4Q_MASK; + rss_i = 4; + } + + /* remove the starting offset from the pool count */ + vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; + + /* save features for later use */ + adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; + adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; + + /* limit RSS based on user input and save for later use */ + adapter->ring_feature[RING_F_RSS].indices = rss_i; + adapter->ring_feature[RING_F_RSS].mask = rss_m; + + adapter->queues_per_pool = rss_i; + + adapter->num_rx_queues = vmdq_i * rss_i; + adapter->num_tx_queues = vmdq_i * rss_i; + + /* disable ATR as it is not supported when VMDq is enabled */ + adapter->flags &= ~TXGBE_FLAG_FDIR_HASH_CAPABLE; + + return true; +} + +/** + * txgbe_set_rss_queues: Allocate queues for RSS + * @adapter: board private structure to initialize + * + * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try + * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. + * + **/ +static bool txgbe_set_rss_queues(struct txgbe_adapter *adapter) +{ + struct txgbe_ring_feature *f; + u16 rss_i; + + /* set mask for 16 queue limit of RSS */ + f = &adapter->ring_feature[RING_F_RSS]; + rss_i = f->limit; + + f->indices = rss_i; + f->mask = TXGBE_RSS_64Q_MASK; + + /* disable ATR by default, it will be configured below */ + adapter->flags &= ~TXGBE_FLAG_FDIR_HASH_CAPABLE; + + /* + * Use Flow Director in addition to RSS to ensure the best + * distribution of flows across cores, even when an FDIR flow + * isn't matched. + */ + if (rss_i > 1 && adapter->atr_sample_rate) { + f = &adapter->ring_feature[RING_F_FDIR]; + + rss_i = f->indices = f->limit; + + if (!(adapter->flags & TXGBE_FLAG_FDIR_PERFECT_CAPABLE)) + adapter->flags |= TXGBE_FLAG_FDIR_HASH_CAPABLE; + } + + adapter->num_rx_queues = rss_i; + adapter->num_tx_queues = rss_i; + + return true; +} + +/* + * txgbe_set_num_queues: Allocate queues for device, feature dependent + * @adapter: board private structure to initialize + * + * This is the top level queue allocation routine. The order here is very + * important, starting with the "most" number of features turned on at once, + * and ending with the smallest set of features. This way large combinations + * can be allocated if they're turned on, and smaller combinations are the + * fallthrough conditions. + * + **/ +static void txgbe_set_num_queues(struct txgbe_adapter *adapter) +{ + /* Start with base case */ + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + adapter->queues_per_pool = 1; + + if (txgbe_set_dcb_vmdq_queues(adapter)) + return; + + if (txgbe_set_dcb_queues(adapter)) + return; + + if (txgbe_set_vmdq_queues(adapter)) + return; + + txgbe_set_rss_queues(adapter); +} + +/** + * txgbe_acquire_msix_vectors - acquire MSI-X vectors + * @adapter: board private structure + * + * Attempts to acquire a suitable range of MSI-X vector interrupts. Will + * return a negative error code if unable to acquire MSI-X vectors for any + * reason. + */ +static int txgbe_acquire_msix_vectors(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int i, vectors, vector_threshold; + + if (!(adapter->flags & TXGBE_FLAG_MSIX_CAPABLE)) + return -EOPNOTSUPP; + + /* We start by asking for one vector per queue pair */ + vectors = max(adapter->num_rx_queues, adapter->num_tx_queues); + + /* It is easy to be greedy for MSI-X vectors. However, it really + * doesn't do much good if we have a lot more vectors than CPUs. We'll + * be somewhat conservative and only ask for (roughly) the same number + * of vectors as there are CPUs. + */ + vectors = min_t(int, vectors, num_online_cpus()); + + /* Some vectors are necessary for non-queue interrupts */ + vectors += NON_Q_VECTORS; + + /* Hardware can only support a maximum of hw.mac->max_msix_vectors. + * With features such as RSS and VMDq, we can easily surpass the + * number of Rx and Tx descriptor queues supported by our device. + * Thus, we cap the maximum in the rare cases where the CPU count also + * exceeds our vector limit + */ + vectors = min_t(int, vectors, hw->mac.max_msix_vectors); + + /* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0] + * handler, and (2) an Other (Link Status Change, etc.) handler. + */ + vector_threshold = MIN_MSIX_COUNT; + + adapter->msix_entries = kcalloc(vectors, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!adapter->msix_entries) + return -ENOMEM; + + for (i = 0; i < vectors; i++) + adapter->msix_entries[i].entry = i; + + vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, + vector_threshold, vectors); + if (vectors < 0) { + /* A negative count of allocated vectors indicates an error in + * acquiring within the specified range of MSI-X vectors */ + e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n", + vectors); + + adapter->flags &= ~TXGBE_FLAG_MSIX_ENABLED; + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + + return vectors; + } + + /* we successfully allocated some number of vectors within our + * requested range. + */ + adapter->flags |= TXGBE_FLAG_MSIX_ENABLED; + + /* Adjust for only the vectors we'll use, which is minimum + * of max_q_vectors, or the number of vectors we were allocated. + */ + vectors -= NON_Q_VECTORS; + adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors); + + return 0; +} + +static void txgbe_add_ring(struct txgbe_ring *ring, + struct txgbe_ring_container *head) +{ + ring->next = head->ring; + head->ring = ring; + head->count++; +} + +/** + * txgbe_alloc_q_vector - Allocate memory for a single interrupt vector + * @adapter: board private structure to initialize + * @v_count: q_vectors allocated on adapter, used for ring interleaving + * @v_idx: index of vector in adapter struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + **/ +static int txgbe_alloc_q_vector(struct txgbe_adapter *adapter, + unsigned int v_count, unsigned int v_idx, + unsigned int txr_count, unsigned int txr_idx, + unsigned int rxr_count, unsigned int rxr_idx) +{ + struct txgbe_q_vector *q_vector; + struct txgbe_ring *ring; + int node = -1; + int cpu = -1; + u8 tcs = netdev_get_num_tc(adapter->netdev); + int ring_count, size; + + /* note this will allocate space for the ring structure as well! */ + ring_count = txr_count + rxr_count; + size = sizeof(struct txgbe_q_vector) + + (sizeof(struct txgbe_ring) * ring_count); + + /* customize cpu for Flow Director mapping */ + if ((tcs <= 1) && !(adapter->flags & TXGBE_FLAG_VMDQ_ENABLED)) { + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + if (rss_i > 1 && adapter->atr_sample_rate) { + if (cpu_online(v_idx)) { + cpu = v_idx; + node = cpu_to_node(cpu); + } + } + } + + /* allocate q_vector and rings */ + q_vector = kzalloc_node(size, GFP_KERNEL, node); + if (!q_vector) + q_vector = kzalloc(size, GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + /* setup affinity mask and node */ + if (cpu != -1) + cpumask_set_cpu(cpu, &q_vector->affinity_mask); + q_vector->numa_node = node; + + /* initialize CPU for DCA */ + q_vector->cpu = -1; + + /* initialize NAPI */ + netif_napi_add(adapter->netdev, &q_vector->napi, + txgbe_poll, 64); + + /* tie q_vector and adapter together */ + adapter->q_vector[v_idx] = q_vector; + q_vector->adapter = adapter; + q_vector->v_idx = v_idx; + + /* initialize work limits */ + q_vector->tx.work_limit = adapter->tx_work_limit; + q_vector->rx.work_limit = adapter->rx_work_limit; + + /* initialize pointer to rings */ + ring = q_vector->ring; + + /* intialize ITR */ + if (txr_count && !rxr_count) { + /* tx only vector */ + if (adapter->tx_itr_setting == 1) + q_vector->itr = TXGBE_12K_ITR; + else + q_vector->itr = adapter->tx_itr_setting; + } else { + /* rx or rx/tx vector */ + if (adapter->rx_itr_setting == 1) + q_vector->itr = TXGBE_20K_ITR; + else + q_vector->itr = adapter->rx_itr_setting; + } + + while (txr_count) { + /* assign generic ring traits */ + ring->dev = pci_dev_to_dev(adapter->pdev); + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + txgbe_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + if (adapter->num_vmdqs > 1) + ring->queue_index = + txr_idx % adapter->queues_per_pool; + else + ring->queue_index = txr_idx; + + /* assign ring to adapter */ + adapter->tx_ring[txr_idx] = ring; + + /* update count and index */ + txr_count--; + txr_idx += v_count; + + /* push pointer to next ring */ + ring++; + } + + while (rxr_count) { + /* assign generic ring traits */ + ring->dev = pci_dev_to_dev(adapter->pdev); + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + txgbe_add_ring(ring, &q_vector->rx); + + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_count; + if (adapter->num_vmdqs > 1) + ring->queue_index = + rxr_idx % adapter->queues_per_pool; + else + ring->queue_index = rxr_idx; + + /* assign ring to adapter */ + adapter->rx_ring[rxr_idx] = ring; + + /* update count and index */ + rxr_count--; + rxr_idx += v_count; + + /* push pointer to next ring */ + ring++; + } + + return 0; +} + +/** + * txgbe_free_q_vector - Free memory allocated for specific interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void txgbe_free_q_vector(struct txgbe_adapter *adapter, int v_idx) +{ + struct txgbe_q_vector *q_vector = adapter->q_vector[v_idx]; + struct txgbe_ring *ring; + + txgbe_for_each_ring(ring, q_vector->tx) + adapter->tx_ring[ring->queue_index] = NULL; + + txgbe_for_each_ring(ring, q_vector->rx) + adapter->rx_ring[ring->queue_index] = NULL; + + adapter->q_vector[v_idx] = NULL; + netif_napi_del(&q_vector->napi); + kfree_rcu(q_vector, rcu); +} + +/** + * txgbe_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int txgbe_alloc_q_vectors(struct txgbe_adapter *adapter) +{ + unsigned int q_vectors = adapter->num_q_vectors; + unsigned int rxr_remaining = adapter->num_rx_queues; + unsigned int txr_remaining = adapter->num_tx_queues; + unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0; + int err; + + if (q_vectors >= (rxr_remaining + txr_remaining)) { + for (; rxr_remaining; v_idx++) { + err = txgbe_alloc_q_vector(adapter, q_vectors, v_idx, + 0, 0, 1, rxr_idx); + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining--; + rxr_idx++; + } + } + + for (; v_idx < q_vectors; v_idx++) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); + int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + err = txgbe_alloc_q_vector(adapter, q_vectors, v_idx, + tqpv, txr_idx, + rqpv, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + txr_remaining -= tqpv; + rxr_idx++; + txr_idx++; + } + + return 0; + +err_out: + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + txgbe_free_q_vector(adapter, v_idx); + + return -ENOMEM; +} + +/** + * txgbe_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void txgbe_free_q_vectors(struct txgbe_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + txgbe_free_q_vector(adapter, v_idx); +} + +void txgbe_reset_interrupt_capability(struct txgbe_adapter *adapter) +{ + if (adapter->flags & TXGBE_FLAG_MSIX_ENABLED) { + adapter->flags &= ~TXGBE_FLAG_MSIX_ENABLED; + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else if (adapter->flags & TXGBE_FLAG_MSI_ENABLED) { + adapter->flags &= ~TXGBE_FLAG_MSI_ENABLED; + pci_disable_msi(adapter->pdev); + } +} + +/** + * txgbe_set_interrupt_capability - set MSI-X or MSI if supported + * @adapter: board private structure to initialize + * + * Attempt to configure the interrupts using the best available + * capabilities of the hardware and the kernel. + **/ +void txgbe_set_interrupt_capability(struct txgbe_adapter *adapter) +{ + int err; + + /* We will try to get MSI-X interrupts first */ + if (!txgbe_acquire_msix_vectors(adapter)) + return; + + /* At this point, we do not have MSI-X capabilities. We need to + * reconfigure or disable various features which require MSI-X + * capability. + */ + + /* Disable DCB unless we only have a single traffic class */ + if (netdev_get_num_tc(adapter->netdev) > 1) { + e_dev_warn("Number of DCB TCs exceeds number of available " + "queues. Disabling DCB support.\n"); + netdev_reset_tc(adapter->netdev); + } + + /* Disable VMDq support */ + e_dev_warn("Disabling VMQd support\n"); + adapter->flags &= ~TXGBE_FLAG_VMDQ_ENABLED; + + /* Disable RSS */ + e_dev_warn("Disabling RSS support\n"); + adapter->ring_feature[RING_F_RSS].limit = 1; + + /* recalculate number of queues now that many features have been + * changed or disabled. + */ + txgbe_set_num_queues(adapter); + adapter->num_q_vectors = 1; + + if (!(adapter->flags & TXGBE_FLAG_MSI_CAPABLE)) + return; + + err = pci_enable_msi(adapter->pdev); + if (err) + e_dev_warn("Failed to allocate MSI interrupt, falling back to " + "legacy. Error: %d\n", + err); + else + adapter->flags |= TXGBE_FLAG_MSI_ENABLED; +} + +/** + * txgbe_init_interrupt_scheme - Determine proper interrupt scheme + * @adapter: board private structure to initialize + * + * We determine which interrupt scheme to use based on... + * - Kernel support (MSI, MSI-X) + * - which can be user-defined (via MODULE_PARAM) + * - Hardware queue count (num_*_queues) + * - defined by miscellaneous hardware support/features (RSS, etc.) + **/ +int txgbe_init_interrupt_scheme(struct txgbe_adapter *adapter) +{ + int err; + + /* Number of supported queues */ + txgbe_set_num_queues(adapter); + + /* Set interrupt mode */ + txgbe_set_interrupt_capability(adapter); + + /* Allocate memory for queues */ + err = txgbe_alloc_q_vectors(adapter); + if (err) { + e_err(probe, "Unable to allocate memory for queue vectors\n"); + txgbe_reset_interrupt_capability(adapter); + return err; + } + + txgbe_cache_ring_register(adapter); + + set_bit(__TXGBE_DOWN, &adapter->state); + + return 0; +} + +/** + * txgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings + * @adapter: board private structure to clear interrupt scheme on + * + * We go through and clear interrupt specific resources and reset the structure + * to pre-load conditions + **/ +void txgbe_clear_interrupt_scheme(struct txgbe_adapter *adapter) +{ + txgbe_free_q_vectors(adapter); + txgbe_reset_interrupt_capability(adapter); +} + +void txgbe_tx_ctxtdesc(struct txgbe_ring *tx_ring, u32 vlan_macip_lens, + u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) +{ + struct txgbe_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; + + context_desc = TXGBE_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= TXGBE_TXD_DTYP_CTXT; + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); +} diff --git a/drivers/net/ethernet/netswift/txgbe/txgbe_main.c b/drivers/net/ethernet/netswift/txgbe/txgbe_main.c new file mode 100644 index 0000000000000000000000000000000000000000..d72946d838b120db0c0bc6736369a8d2d1f32861 --- /dev/null +++ b/drivers/net/ethernet/netswift/txgbe/txgbe_main.c @@ -0,0 +1,8077 @@ +/* + * WangXun 10 Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * based on ixgbe_main.c, Copyright(c) 1999 - 2017 Intel Corporation. + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "txgbe.h" +#include "txgbe_hw.h" +#include "txgbe_phy.h" +#include "txgbe_bp.h" + +char txgbe_driver_name[32] = TXGBE_NAME; +static const char txgbe_driver_string[] = + "WangXun 10 Gigabit PCI Express Network Driver"; + +#define DRV_HW_PERF + +#define FPGA + +#define DRIVERIOV + +#define BYPASS_TAG + +#define RELEASE_TAG + +#define DRV_VERSION __stringify(1.1.17oe) + +const char txgbe_driver_version[32] = DRV_VERSION; +static const char txgbe_copyright[] = + "Copyright (c) 2015 -2017 Beijing WangXun Technology Co., Ltd"; +static const char txgbe_overheat_msg[] = + "Network adapter has been stopped because it has over heated. " + "If the problem persists, restart the computer, or " + "power off the system and replace the adapter"; +static const char txgbe_underheat_msg[] = + "Network adapter has been started again since the temperature " + "has been back to normal state"; + +/* txgbe_pci_tbl - PCI Device ID Table + * + * Wildcard entries (PCI_ANY_ID) should come last + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static const struct pci_device_id txgbe_pci_tbl[] = { + { PCI_VDEVICE(TRUSTNETIC, TXGBE_DEV_ID_SP1000), 0}, + { PCI_VDEVICE(TRUSTNETIC, TXGBE_DEV_ID_WX1820), 0}, + /* required last entry */ + { .device = 0 } +}; +MODULE_DEVICE_TABLE(pci, txgbe_pci_tbl); + +MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, "); +MODULE_DESCRIPTION("WangXun(R) 10 Gigabit PCI Express Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +#define DEFAULT_DEBUG_LEVEL_SHIFT 3 + +static struct workqueue_struct *txgbe_wq; + +static bool txgbe_is_sfp(struct txgbe_hw *hw); +static bool txgbe_check_cfg_remove(struct txgbe_hw *hw, struct pci_dev *pdev); +static void txgbe_clean_rx_ring(struct txgbe_ring *rx_ring); +static void txgbe_clean_tx_ring(struct txgbe_ring *tx_ring); +static void txgbe_napi_enable_all(struct txgbe_adapter *adapter); +static void txgbe_napi_disable_all(struct txgbe_adapter *adapter); + +extern txgbe_dptype txgbe_ptype_lookup[256]; + +static inline txgbe_dptype txgbe_decode_ptype(const u8 ptype) +{ + return txgbe_ptype_lookup[ptype]; +} + +static inline txgbe_dptype +decode_rx_desc_ptype(const union txgbe_rx_desc *rx_desc) +{ + return txgbe_decode_ptype(TXGBE_RXD_PKTTYPE(rx_desc)); +} + +static void txgbe_check_minimum_link(struct txgbe_adapter *adapter, + int expected_gts) +{ + struct txgbe_hw *hw = &adapter->hw; + struct pci_dev *pdev; + + /* Some devices are not connected over PCIe and thus do not negotiate + * speed. These devices do not have valid bus info, and thus any report + * we generate may not be correct. + */ + if (hw->bus.type == txgbe_bus_type_internal) + return; + + pdev = adapter->pdev; + pcie_print_link_status(pdev); +} + +/** + * txgbe_enumerate_functions - Get the number of ports this device has + * @adapter: adapter structure + * + * This function enumerates the phsyical functions co-located on a single slot, + * in order to determine how many ports a device has. This is most useful in + * determining the required GT/s of PCIe bandwidth necessary for optimal + * performance. + **/ +static inline int txgbe_enumerate_functions(struct txgbe_adapter *adapter) +{ + struct pci_dev *entry, *pdev = adapter->pdev; + int physfns = 0; + + list_for_each_entry(entry, &pdev->bus->devices, bus_list) { + /* When the devices on the bus don't all match our device ID, + * we can't reliably determine the correct number of + * functions. This can occur if a function has been direct + * attached to a virtual machine using VT-d, for example. In + * this case, simply return -1 to indicate this. + */ + if ((entry->vendor != pdev->vendor) || + (entry->device != pdev->device)) + return -1; + + physfns++; + } + + return physfns; +} + +void txgbe_service_event_schedule(struct txgbe_adapter *adapter) +{ + if (!test_bit(__TXGBE_DOWN, &adapter->state) && + !test_bit(__TXGBE_REMOVING, &adapter->state) && + !test_and_set_bit(__TXGBE_SERVICE_SCHED, &adapter->state)) + queue_work(txgbe_wq, &adapter->service_task); +} + +static void txgbe_service_event_complete(struct txgbe_adapter *adapter) +{ + BUG_ON(!test_bit(__TXGBE_SERVICE_SCHED, &adapter->state)); + + /* flush memory to make sure state is correct before next watchdog */ + smp_mb__before_atomic(); + clear_bit(__TXGBE_SERVICE_SCHED, &adapter->state); +} + +static void txgbe_remove_adapter(struct txgbe_hw *hw) +{ + struct txgbe_adapter *adapter = hw->back; + + if (!hw->hw_addr) + return; + hw->hw_addr = NULL; + e_dev_err("Adapter removed\n"); + if (test_bit(__TXGBE_SERVICE_INITED, &adapter->state)) + txgbe_service_event_schedule(adapter); +} + +static void txgbe_check_remove(struct txgbe_hw *hw, u32 reg) +{ + u32 value; + + /* The following check not only optimizes a bit by not + * performing a read on the status register when the + * register just read was a status register read that + * returned TXGBE_FAILED_READ_REG. It also blocks any + * potential recursion. + */ + if (reg == TXGBE_CFG_PORT_ST) { + txgbe_remove_adapter(hw); + return; + } + value = rd32(hw, TXGBE_CFG_PORT_ST); + if (value == TXGBE_FAILED_READ_REG) + txgbe_remove_adapter(hw); +} + +static u32 txgbe_validate_register_read(struct txgbe_hw *hw, u32 reg, bool quiet) +{ + int i; + u32 value; + u8 __iomem *reg_addr; + struct txgbe_adapter *adapter = hw->back; + + reg_addr = READ_ONCE(hw->hw_addr); + if (TXGBE_REMOVED(reg_addr)) + return TXGBE_FAILED_READ_REG; + for (i = 0; i < TXGBE_DEAD_READ_RETRIES; ++i) { + value = txgbe_rd32(reg_addr + reg); + if (value != TXGBE_DEAD_READ_REG) + break; + } + if (quiet) + return value; + if (value == TXGBE_DEAD_READ_REG) + e_err(drv, "%s: register %x read unchanged\n", __func__, reg); + else + e_warn(hw, "%s: register %x read recovered after %d retries\n", + __func__, reg, i + 1); + return value; +} + +/** + * txgbe_read_reg - Read from device register + * @hw: hw specific details + * @reg: offset of register to read + * + * Returns : value read or TXGBE_FAILED_READ_REG if removed + * + * This function is used to read device registers. It checks for device + * removal by confirming any read that returns all ones by checking the + * status register value for all ones. This function avoids reading from + * the hardware if a removal was previously detected in which case it + * returns TXGBE_FAILED_READ_REG (all ones). + */ +u32 txgbe_read_reg(struct txgbe_hw *hw, u32 reg, bool quiet) +{ + u32 value; + u8 __iomem *reg_addr; + + reg_addr = READ_ONCE(hw->hw_addr); + if (TXGBE_REMOVED(reg_addr)) + return TXGBE_FAILED_READ_REG; + value = txgbe_rd32(reg_addr + reg); + if (unlikely(value == TXGBE_FAILED_READ_REG)) + txgbe_check_remove(hw, reg); + if (unlikely(value == TXGBE_DEAD_READ_REG)) + value = txgbe_validate_register_read(hw, reg, quiet); + return value; +} + +static void txgbe_release_hw_control(struct txgbe_adapter *adapter) +{ + /* Let firmware take over control of hw */ + wr32m(&adapter->hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_DRV_LOAD, 0); +} + +static void txgbe_get_hw_control(struct txgbe_adapter *adapter) +{ + /* Let firmware know the driver has taken over */ + wr32m(&adapter->hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_DRV_LOAD, TXGBE_CFG_PORT_CTL_DRV_LOAD); +} + +/** + * txgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors + * @adapter: pointer to adapter struct + * @direction: 0 for Rx, 1 for Tx, -1 for other causes + * @queue: queue to map the corresponding interrupt to + * @msix_vector: the vector to map to the corresponding queue + * + **/ +static void txgbe_set_ivar(struct txgbe_adapter *adapter, s8 direction, + u16 queue, u16 msix_vector) +{ + u32 ivar, index; + struct txgbe_hw *hw = &adapter->hw; + + if (direction == -1) { + /* other causes */ + msix_vector |= TXGBE_PX_IVAR_ALLOC_VAL; + index = 0; + ivar = rd32(&adapter->hw, TXGBE_PX_MISC_IVAR); + ivar &= ~(0xFF << index); + ivar |= (msix_vector << index); + wr32(&adapter->hw, TXGBE_PX_MISC_IVAR, ivar); + } else { + /* tx or rx causes */ + msix_vector |= TXGBE_PX_IVAR_ALLOC_VAL; + index = ((16 * (queue & 1)) + (8 * direction)); + ivar = rd32(hw, TXGBE_PX_IVAR(queue >> 1)); + ivar &= ~(0xFF << index); + ivar |= (msix_vector << index); + wr32(hw, TXGBE_PX_IVAR(queue >> 1), ivar); + } +} + +void txgbe_unmap_and_free_tx_resource(struct txgbe_ring *ring, + struct txgbe_tx_buffer *tx_buffer) +{ + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + /* tx_buffer must be completely set up in the transmit path */ +} + +static void txgbe_update_xoff_received(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_hw_stats *hwstats = &adapter->stats; + u32 xoff[8] = {0}; + int tc; + int i; + + /* update stats for each tc, only valid with PFC enabled */ + for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { + u32 pxoffrxc; + wr32m(hw, TXGBE_MMC_CONTROL, TXGBE_MMC_CONTROL_UP, i<<16); + pxoffrxc = rd32(hw, TXGBE_MAC_PXOFFRXC); + hwstats->pxoffrxc[i] += pxoffrxc; + /* Get the TC for given UP */ + tc = netdev_get_prio_tc_map(adapter->netdev, i); + xoff[tc] += pxoffrxc; + } + + /* disarm tx queues that have received xoff frames */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct txgbe_ring *tx_ring = adapter->tx_ring[i]; + + tc = tx_ring->dcb_tc; + if ((tc <= 7) && (xoff[tc])) + clear_bit(__TXGBE_HANG_CHECK_ARMED, &tx_ring->state); + } +} + +static u64 txgbe_get_tx_completed(struct txgbe_ring *ring) +{ + return ring->stats.packets; +} + +static u64 txgbe_get_tx_pending(struct txgbe_ring *ring) +{ + struct txgbe_adapter *adapter; + struct txgbe_hw *hw; + u32 head, tail; + + if (ring->accel) + adapter = ring->accel->adapter; + else + adapter = ring->q_vector->adapter; + + hw = &adapter->hw; + head = rd32(hw, TXGBE_PX_TR_RP(ring->reg_idx)); + tail = rd32(hw, TXGBE_PX_TR_WP(ring->reg_idx)); + + return ((head <= tail) ? tail : tail + ring->count) - head; +} + +static inline bool txgbe_check_tx_hang(struct txgbe_ring *tx_ring) +{ + u64 tx_done = txgbe_get_tx_completed(tx_ring); + u64 tx_done_old = tx_ring->tx_stats.tx_done_old; + u64 tx_pending = txgbe_get_tx_pending(tx_ring); + + clear_check_for_tx_hang(tx_ring); + + /* + * Check for a hung queue, but be thorough. This verifies + * that a transmit has been completed since the previous + * check AND there is at least one packet pending. The + * ARMED bit is set to indicate a potential hang. The + * bit is cleared if a pause frame is received to remove + * false hang detection due to PFC or 802.3x frames. By + * requiring this to fail twice we avoid races with + * pfc clearing the ARMED bit and conditions where we + * run the check_tx_hang logic with a transmit completion + * pending but without time to complete it yet. + */ + if (tx_done_old == tx_done && tx_pending) + /* make sure it is true for two checks in a row */ + return test_and_set_bit(__TXGBE_HANG_CHECK_ARMED, + &tx_ring->state); + /* update completed stats and continue */ + tx_ring->tx_stats.tx_done_old = tx_done; + /* reset the countdown */ + clear_bit(__TXGBE_HANG_CHECK_ARMED, &tx_ring->state); + + return false; +} + +/** + * txgbe_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void txgbe_tx_timeout(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + bool real_tx_hang = false; + int i; + u16 value = 0; + u32 value2 = 0, value3 = 0; + u32 head, tail; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct txgbe_ring *tx_ring = adapter->tx_ring[i]; + if (check_for_tx_hang(tx_ring) && txgbe_check_tx_hang(tx_ring)) + real_tx_hang = true; + } + + pci_read_config_word(adapter->pdev, PCI_VENDOR_ID, &value); + ERROR_REPORT1(TXGBE_ERROR_POLLING, "pci vendor id is 0x%x\n", value); + + pci_read_config_word(adapter->pdev, PCI_COMMAND, &value); + ERROR_REPORT1(TXGBE_ERROR_POLLING, "pci command reg is 0x%x.\n", value); + + for (i = 0; i < adapter->num_tx_queues; i++) { + head = rd32(&adapter->hw, TXGBE_PX_TR_RP(adapter->tx_ring[i]->reg_idx)); + tail = rd32(&adapter->hw, TXGBE_PX_TR_WP(adapter->tx_ring[i]->reg_idx)); + + ERROR_REPORT1(TXGBE_ERROR_POLLING, + "tx ring %d next_to_use is %d, next_to_clean is %d\n", + i, adapter->tx_ring[i]->next_to_use, adapter->tx_ring[i]->next_to_clean); + ERROR_REPORT1(TXGBE_ERROR_POLLING, + "tx ring %d hw rp is 0x%x, wp is 0x%x\n", i, head, tail); + } + + value2 = rd32(&adapter->hw, TXGBE_PX_IMS(0)); + value3 = rd32(&adapter->hw, TXGBE_PX_IMS(1)); + ERROR_REPORT1(TXGBE_ERROR_POLLING, + "PX_IMS0 value is 0x%08x, PX_IMS1 value is 0x%08x\n", value2, value3); + + if (value2 || value3) { + ERROR_REPORT1(TXGBE_ERROR_POLLING, "clear interrupt mask.\n"); + wr32(&adapter->hw, TXGBE_PX_ICS(0), value2); + wr32(&adapter->hw, TXGBE_PX_IMC(0), value2); + wr32(&adapter->hw, TXGBE_PX_ICS(1), value3); + wr32(&adapter->hw, TXGBE_PX_IMC(1), value3); + } + + if (adapter->hw.bus.lan_id == 0) { + ERROR_REPORT1(TXGBE_ERROR_POLLING, "tx timeout. do pcie recovery.\n"); + adapter->flags2 |= TXGBE_FLAG2_PCIE_NEED_RECOVER; + txgbe_service_event_schedule(adapter); + } else + wr32(&adapter->hw, TXGBE_MIS_PF_SM, 1); +} + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + +/** + * txgbe_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: structure containing interrupt and ring information + * @tx_ring: tx ring to clean + **/ +static bool txgbe_clean_tx_irq(struct txgbe_q_vector *q_vector, + struct txgbe_ring *tx_ring) +{ + struct txgbe_adapter *adapter = q_vector->adapter; + struct txgbe_tx_buffer *tx_buffer; + union txgbe_tx_desc *tx_desc; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = q_vector->tx.work_limit; + unsigned int i = tx_ring->next_to_clean; + + if (test_bit(__TXGBE_DOWN, &adapter->state)) + return true; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = TXGBE_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + union txgbe_tx_desc *eop_desc = tx_buffer->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + read_barrier_depends(); + + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(TXGBE_TXD_STAT_DD))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + /* free the skb */ + dev_consume_skb_any(tx_buffer->skb); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + + /* clear tx_buffer data */ + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = TXGBE_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + } + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = TXGBE_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + + if (check_for_tx_hang(tx_ring) && txgbe_check_tx_hang(tx_ring)) { + /* schedule immediate reset if we believe we hung */ + struct txgbe_hw *hw = &adapter->hw; + u16 value = 0; + + e_err(drv, "Detected Tx Unit Hang\n" + " Tx Queue <%d>\n" + " TDH, TDT <%x>, <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "tx_buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " jiffies <%lx>\n", + tx_ring->queue_index, + rd32(hw, TXGBE_PX_TR_RP(tx_ring->reg_idx)), + rd32(hw, TXGBE_PX_TR_WP(tx_ring->reg_idx)), + tx_ring->next_to_use, i, + tx_ring->tx_buffer_info[i].time_stamp, jiffies); + + pci_read_config_word(adapter->pdev, PCI_VENDOR_ID, &value); + if (value == TXGBE_FAILED_READ_CFG_WORD) { + e_info(hw, "pcie link has been lost.\n"); + } + + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + e_info(probe, + "tx hang %d detected on queue %d, resetting adapter\n", + adapter->tx_timeout_count + 1, tx_ring->queue_index); + + /* schedule immediate reset if we believe we hung */ + e_info(hw, "real tx hang. do pcie recovery.\n"); + adapter->flags2 |= TXGBE_FLAG2_PCIE_NEED_RECOVER; + txgbe_service_event_schedule(adapter); + + /* the adapter is about to reset, no point in enabling stuff */ + return true; + } + + netdev_tx_completed_queue(txring_txq(tx_ring), + total_packets, total_bytes); + + if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && + (txgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) + && !test_bit(__TXGBE_DOWN, &adapter->state)) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + } + } + + return !!budget; +} + +#define TXGBE_RSS_L4_TYPES_MASK \ + ((1ul << TXGBE_RXD_RSSTYPE_IPV4_TCP) | \ + (1ul << TXGBE_RXD_RSSTYPE_IPV4_UDP) | \ + (1ul << TXGBE_RXD_RSSTYPE_IPV4_SCTP) | \ + (1ul << TXGBE_RXD_RSSTYPE_IPV6_TCP) | \ + (1ul << TXGBE_RXD_RSSTYPE_IPV6_UDP) | \ + (1ul << TXGBE_RXD_RSSTYPE_IPV6_SCTP)) + +static inline void txgbe_rx_hash(struct txgbe_ring *ring, + union txgbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u16 rss_type; + + if (!(ring->netdev->features & NETIF_F_RXHASH)) + return; + + rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & + TXGBE_RXD_RSSTYPE_MASK; + + if (!rss_type) + return; + + skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + (TXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? + PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); +} + +/** + * txgbe_rx_checksum - indicate in skb if hw indicated a good cksum + * @ring: structure containing ring specific data + * @rx_desc: current Rx descriptor being processed + * @skb: skb currently being received and modified + **/ +static inline void txgbe_rx_checksum(struct txgbe_ring *ring, + union txgbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + txgbe_dptype dptype = decode_rx_desc_ptype(rx_desc); + + skb->ip_summed = CHECKSUM_NONE; + + skb_checksum_none_assert(skb); + + /* Rx csum disabled */ + if (!(ring->netdev->features & NETIF_F_RXCSUM)) + return; + + /* if IPv4 header checksum error */ + if ((txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_IPCS) && + txgbe_test_staterr(rx_desc, TXGBE_RXD_ERR_IPE)) || + (txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_OUTERIPCS) && + txgbe_test_staterr(rx_desc, TXGBE_RXD_ERR_OUTERIPER))) { + ring->rx_stats.csum_err++; + return; + } + + /* L4 checksum offload flag must set for the below code to work */ + if (!txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_L4CS)) + return; + + /*likely incorrect csum if IPv6 Dest Header found */ + if (dptype.prot != TXGBE_DEC_PTYPE_PROT_SCTP && TXGBE_RXD_IPV6EX(rx_desc)) + return; + + /* if L4 checksum error */ + if (txgbe_test_staterr(rx_desc, TXGBE_RXD_ERR_TCPE)) { + ring->rx_stats.csum_err++; + return; + } + /* If there is an outer header present that might contain a checksum + * we need to bump the checksum level by 1 to reflect the fact that + * we are indicating we validated the inner checksum. + */ + if (dptype.etype >= TXGBE_DEC_PTYPE_ETYPE_IG) { + skb->csum_level = 1; + /* FIXME :does skb->csum_level skb->encapsulation can both set ? */ + skb->encapsulation = 1; + } + + /* It must be a TCP or UDP or SCTP packet with a valid checksum */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + ring->rx_stats.csum_good_cnt++; +} + +static bool txgbe_alloc_mapped_skb(struct txgbe_ring *rx_ring, + struct txgbe_rx_buffer *bi) +{ + struct sk_buff *skb = bi->skb; + dma_addr_t dma = bi->dma; + + if (unlikely(dma)) + return true; + + if (likely(!skb)) { + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_buf_len); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + return false; + } + + bi->skb = skb; + + } + + dma = dma_map_single(rx_ring->dev, skb->data, + rx_ring->rx_buf_len, DMA_FROM_DEVICE); + + /* + * if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + dev_kfree_skb_any(skb); + bi->skb = NULL; + + rx_ring->rx_stats.alloc_rx_buff_failed++; + return false; + } + + bi->dma = dma; + return true; +} + +static bool txgbe_alloc_mapped_page(struct txgbe_ring *rx_ring, + struct txgbe_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + /* alloc new page for storage */ + page = dev_alloc_pages(txgbe_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page(rx_ring->dev, page, 0, + txgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); + + /* + * if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_pages(page, txgbe_rx_pg_order(rx_ring)); + + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + bi->page_dma = dma; + bi->page = page; + bi->page_offset = 0; + + return true; +} + +/** + * txgbe_alloc_rx_buffers - Replace used receive buffers + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +void txgbe_alloc_rx_buffers(struct txgbe_ring *rx_ring, u16 cleaned_count) +{ + union txgbe_rx_desc *rx_desc; + struct txgbe_rx_buffer *bi; + u16 i = rx_ring->next_to_use; + + /* nothing to do */ + if (!cleaned_count) + return; + + rx_desc = TXGBE_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; + + do { + if (ring_is_hs_enabled(rx_ring)) { + if (!txgbe_alloc_mapped_skb(rx_ring, bi)) + break; + rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); + } + + if (!txgbe_alloc_mapped_page(rx_ring, bi)) + break; + rx_desc->read.pkt_addr = + cpu_to_le64(bi->page_dma + bi->page_offset); + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = TXGBE_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the status bits for the next_to_use descriptor */ + rx_desc->wb.upper.status_error = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + rx_ring->next_to_use = i; + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, rx_ring->tail); + } +} + +static inline u16 txgbe_get_hlen(struct txgbe_ring *rx_ring, + union txgbe_rx_desc *rx_desc) +{ + __le16 hdr_info = rx_desc->wb.lower.lo_dword.hs_rss.hdr_info; + u16 hlen = le16_to_cpu(hdr_info) & TXGBE_RXD_HDRBUFLEN_MASK; + + UNREFERENCED_PARAMETER(rx_ring); + + if (hlen > (TXGBE_RX_HDR_SIZE << TXGBE_RXD_HDRBUFLEN_SHIFT)) + hlen = 0; + else + hlen >>= TXGBE_RXD_HDRBUFLEN_SHIFT; + + return hlen; +} + +static void txgbe_set_rsc_gso_size(struct txgbe_ring __maybe_unused *ring, + struct sk_buff *skb) +{ + u16 hdr_len = eth_get_headlen(skb->data, skb_headlen(skb)); + + /* set gso_size to avoid messing up TCP MSS */ + skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len), + TXGBE_CB(skb)->append_cnt); + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; +} + +static void txgbe_update_rsc_stats(struct txgbe_ring *rx_ring, + struct sk_buff *skb) +{ + /* if append_cnt is 0 then frame is not RSC */ + if (!TXGBE_CB(skb)->append_cnt) + return; + + rx_ring->rx_stats.rsc_count += TXGBE_CB(skb)->append_cnt; + rx_ring->rx_stats.rsc_flush++; + + txgbe_set_rsc_gso_size(rx_ring, skb); + + /* gso_size is computed using append_cnt so always clear it last */ + TXGBE_CB(skb)->append_cnt = 0; +} + +static void txgbe_rx_vlan(struct txgbe_ring *ring, + union txgbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u8 idx = 0; + u16 ethertype; + + if ((ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && + txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_VP)) { + idx = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & + TXGBE_RXD_TPID_MASK) >> TXGBE_RXD_TPID_SHIFT; + ethertype = ring->q_vector->adapter->hw.tpid[idx]; + __vlan_hwaccel_put_tag(skb, + htons(ethertype), + le16_to_cpu(rx_desc->wb.upper.vlan)); + } +} + +/** + * txgbe_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, VLAN, timestamp, protocol, and + * other fields within the skb. + **/ +static void txgbe_process_skb_fields(struct txgbe_ring *rx_ring, + union txgbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u32 flags = rx_ring->q_vector->adapter->flags; + + txgbe_update_rsc_stats(rx_ring, skb); + txgbe_rx_hash(rx_ring, rx_desc, skb); + txgbe_rx_checksum(rx_ring, rx_desc, skb); + + if (unlikely(flags & TXGBE_FLAG_RX_HWTSTAMP_ENABLED) && + unlikely(txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_TS))) { + txgbe_ptp_rx_hwtstamp(rx_ring->q_vector->adapter, skb); + rx_ring->last_rx_timestamp = jiffies; + } + + txgbe_rx_vlan(rx_ring, rx_desc, skb); + + skb_record_rx_queue(skb, rx_ring->queue_index); + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +} + +static void txgbe_rx_skb(struct txgbe_q_vector *q_vector, + struct sk_buff *skb) +{ + napi_gro_receive(&q_vector->napi, skb); +} + +/** + * txgbe_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: Current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool txgbe_is_non_eop(struct txgbe_ring *rx_ring, + union txgbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct txgbe_rx_buffer *rx_buffer = + &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(TXGBE_RX_DESC(rx_ring, ntc)); + + /* update RSC append count if present */ + if (ring_is_rsc_enabled(rx_ring)) { + __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data & + cpu_to_le32(TXGBE_RXD_RSCCNT_MASK); + + if (unlikely(rsc_enabled)) { + u32 rsc_cnt = le32_to_cpu(rsc_enabled); + + rsc_cnt >>= TXGBE_RXD_RSCCNT_SHIFT; + TXGBE_CB(skb)->append_cnt += rsc_cnt - 1; + + /* update ntc based on RSC value */ + ntc = le32_to_cpu(rx_desc->wb.upper.status_error); + ntc &= TXGBE_RXD_NEXTP_MASK; + ntc >>= TXGBE_RXD_NEXTP_SHIFT; + } + } + + /* if we are the last buffer then there is nothing else to do */ + if (likely(txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_EOP))) + return false; + + /* place skb in next buffer to be received */ + if (ring_is_hs_enabled(rx_ring)) { + rx_buffer->skb = rx_ring->rx_buffer_info[ntc].skb; + rx_buffer->dma = rx_ring->rx_buffer_info[ntc].dma; + rx_ring->rx_buffer_info[ntc].dma = 0; + } + rx_ring->rx_buffer_info[ntc].skb = skb; + rx_ring->rx_stats.non_eop_descs++; + + return true; +} + +/** + * txgbe_pull_tail - txgbe specific version of skb_pull_tail + * @skb: pointer to current skb being adjusted + * + * This function is an txgbe specific version of __pskb_pull_tail. The + * main difference between this version and the original function is that + * this function can make several assumptions about the state of things + * that allow for significant optimizations versus the standard function. + * As a result we can do things like drop a frag and maintain an accurate + * truesize for the skb. + */ +static void txgbe_pull_tail(struct sk_buff *skb) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + unsigned int pull_len; + + /* + * it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + /* + * we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(va, TXGBE_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + frag->page_offset += pull_len; + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +/** + * txgbe_dma_sync_frag - perform DMA sync for first frag of SKB + * @rx_ring: rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being updated + * + * This function provides a basic DMA sync up for the first fragment of an + * skb. The reason for doing this is that the first fragment cannot be + * unmapped until we have reached the end of packet descriptor for a buffer + * chain. + */ +static void txgbe_dma_sync_frag(struct txgbe_ring *rx_ring, + struct sk_buff *skb) +{ + if (ring_uses_build_skb(rx_ring)) { + unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK; + dma_sync_single_range_for_cpu(rx_ring->dev, + TXGBE_CB(skb)->dma, + offset, + skb_headlen(skb), + DMA_FROM_DEVICE); + } else { + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + dma_sync_single_range_for_cpu(rx_ring->dev, + TXGBE_CB(skb)->dma, + frag->page_offset, + skb_frag_size(frag), + DMA_FROM_DEVICE); + } + + /* If the page was released, just unmap it. */ + if (unlikely(TXGBE_CB(skb)->page_released)) { + dma_unmap_page_attrs(rx_ring->dev, TXGBE_CB(skb)->dma, + txgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + TXGBE_RX_DMA_ATTR); + } +} + +/** + * txgbe_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Check for corrupted packet headers caused by senders on the local L2 + * embedded NIC switch not setting up their Tx Descriptors right. These + * should be very rare. + * + * Also address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + **/ +static bool txgbe_cleanup_headers(struct txgbe_ring *rx_ring, + union txgbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *netdev = rx_ring->netdev; + + /* verify that the packet does not have any known errors */ + if (unlikely(txgbe_test_staterr(rx_desc, + TXGBE_RXD_ERR_FRAME_ERR_MASK) && + !(netdev->features & NETIF_F_RXALL))) { + dev_kfree_skb_any(skb); + return true; + } + + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb) && !skb_headlen(skb)) + txgbe_pull_tail(skb); + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +/** + * txgbe_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + **/ +static void txgbe_reuse_rx_page(struct txgbe_ring *rx_ring, + struct txgbe_rx_buffer *old_buff) +{ + struct txgbe_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + new_buff->page_dma = old_buff->page_dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, new_buff->page_dma, + new_buff->page_offset, + txgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); +} + +static inline bool txgbe_page_is_reserved(struct page *page) +{ + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); +} + +/** + * txgbe_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @rx_desc: descriptor containing length of buffer written by hardware + * @skb: sk_buff to place the data into + * + * This function will add the data contained in rx_buffer->page to the skb. + * This is done either through a direct copy if the data in the buffer is + * less than the skb header size, otherwise it will just attach the page as + * a frag to the skb. + * + * The function will then update the page offset if necessary and return + * true if the buffer can be reused by the adapter. + **/ +static bool txgbe_add_rx_frag(struct txgbe_ring *rx_ring, + struct txgbe_rx_buffer *rx_buffer, + union txgbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct page *page = rx_buffer->page; + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); +#if (PAGE_SIZE < 8192) + unsigned int truesize = txgbe_rx_bufsz(rx_ring); +#else + unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); + unsigned int last_offset = txgbe_rx_pg_size(rx_ring) - + txgbe_rx_bufsz(rx_ring); +#endif + + if ((size <= TXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb) && + !ring_is_hs_enabled(rx_ring)) { + unsigned char *va = page_address(page) + rx_buffer->page_offset; + + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + + /* page is not reserved, we can reuse buffer as-is */ + if (likely(!txgbe_page_is_reserved(page))) + return true; + + /* this page cannot be reused so discard it */ + __free_pages(page, txgbe_rx_pg_order(rx_ring)); + return false; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rx_buffer->page_offset, size, truesize); + + /* avoid re-using remote pages */ + if (unlikely(txgbe_page_is_reserved(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; + + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= truesize; +#else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; + + if (rx_buffer->page_offset > last_offset) + return false; +#endif + + /* Even if we own the page, we are not allowed to use atomic_set() + * This would break get_page_unless_zero() users. + */ + page_ref_inc(page); + + return true; +} + +static struct sk_buff *txgbe_fetch_rx_buffer(struct txgbe_ring *rx_ring, + union txgbe_rx_desc *rx_desc) +{ + struct txgbe_rx_buffer *rx_buffer; + struct sk_buff *skb; + struct page *page; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + page = rx_buffer->page; + prefetchw(page); + + skb = rx_buffer->skb; + + if (likely(!skb)) { + void *page_addr = page_address(page) + + rx_buffer->page_offset; + + /* prefetch first cache line of first page */ + prefetch(page_addr); +#if L1_CACHE_BYTES < 128 + prefetch(page_addr + L1_CACHE_BYTES); +#endif + + /* allocate a skb to store the frags */ + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + TXGBE_RX_HDR_SIZE); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + return NULL; + } + + /* + * we will be copying header into skb->data in + * pskb_may_pull so it is in our interest to prefetch + * it now to avoid a possible cache miss + */ + prefetchw(skb->data); + + /* + * Delay unmapping of the first packet. It carries the + * header information, HW may still access the header + * after the writeback. Only unmap it when EOP is + * reached + */ + if (likely(txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_EOP))) + goto dma_sync; + + TXGBE_CB(skb)->dma = rx_buffer->page_dma; + } else { + if (txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_EOP)) + txgbe_dma_sync_frag(rx_ring, skb); + +dma_sync: + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->page_dma, + rx_buffer->page_offset, + txgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + rx_buffer->skb = NULL; + } + + /* pull page into skb */ + if (txgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + /* hand second half of page back to the ring */ + txgbe_reuse_rx_page(rx_ring, rx_buffer); + } else if (TXGBE_CB(skb)->dma == rx_buffer->page_dma) { + /* the page has been released from the ring */ + TXGBE_CB(skb)->page_released = true; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rx_ring->dev, rx_buffer->page_dma, + txgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE); + } + + /* clear contents of buffer_info */ + rx_buffer->page = NULL; + + return skb; +} + +static struct sk_buff *txgbe_fetch_rx_buffer_hs(struct txgbe_ring *rx_ring, + union txgbe_rx_desc *rx_desc) +{ + struct txgbe_rx_buffer *rx_buffer; + struct sk_buff *skb; + struct page *page; + int hdr_len = 0; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + page = rx_buffer->page; + prefetchw(page); + + skb = rx_buffer->skb; + rx_buffer->skb = NULL; + prefetchw(skb->data); + + if (!skb_is_nonlinear(skb)) { + hdr_len = txgbe_get_hlen(rx_ring, rx_desc); + if (hdr_len > 0) { + __skb_put(skb, hdr_len); + TXGBE_CB(skb)->dma_released = true; + TXGBE_CB(skb)->dma = rx_buffer->dma; + rx_buffer->dma = 0; + } else { + dma_unmap_single(rx_ring->dev, + rx_buffer->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + rx_buffer->dma = 0; + if (likely(txgbe_test_staterr(rx_desc, + TXGBE_RXD_STAT_EOP))) + goto dma_sync; + TXGBE_CB(skb)->dma = rx_buffer->page_dma; + goto add_frag; + } + } + + if (txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_EOP)) { + if (skb_headlen(skb)) { + if (TXGBE_CB(skb)->dma_released == true) { + dma_unmap_single(rx_ring->dev, + TXGBE_CB(skb)->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + TXGBE_CB(skb)->dma = 0; + TXGBE_CB(skb)->dma_released = false; + } + } else + txgbe_dma_sync_frag(rx_ring, skb); + } + +dma_sync: + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->page_dma, + rx_buffer->page_offset, + txgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); +add_frag: + /* pull page into skb */ + if (txgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + /* hand second half of page back to the ring */ + txgbe_reuse_rx_page(rx_ring, rx_buffer); + } else if (TXGBE_CB(skb)->dma == rx_buffer->page_dma) { + /* the page has been released from the ring */ + TXGBE_CB(skb)->page_released = true; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rx_ring->dev, rx_buffer->page_dma, + txgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE); + } + + /* clear contents of buffer_info */ + rx_buffer->page = NULL; + + return skb; +} + +/** + * txgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf + * @q_vector: structure containing interrupt and ring information + * @rx_ring: rx descriptor ring to transact packets on + * @budget: Total limit on number of packets to process + * + * This function provides a "bounce buffer" approach to Rx interrupt + * processing. The advantage to this is that on systems that have + * expensive overhead for IOMMU access this provides a means of avoiding + * it by maintaining the mapping of the page to the syste. + * + * Returns amount of work completed. + **/ +static int txgbe_clean_rx_irq(struct txgbe_q_vector *q_vector, + struct txgbe_ring *rx_ring, + int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 cleaned_count = txgbe_desc_unused(rx_ring); + + do { + union txgbe_rx_desc *rx_desc; + struct sk_buff *skb; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= TXGBE_RX_BUFFER_WRITE) { + txgbe_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + rx_desc = TXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); + + if (!txgbe_test_staterr(rx_desc, TXGBE_RXD_STAT_DD)) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + /* retrieve a buffer from the ring */ + if (ring_is_hs_enabled(rx_ring)) + skb = txgbe_fetch_rx_buffer_hs(rx_ring, rx_desc); + else + skb = txgbe_fetch_rx_buffer(rx_ring, rx_desc); + + /* exit if we failed to retrieve a buffer */ + if (!skb) + break; + + cleaned_count++; + + /* place incomplete frames back on ring for completion */ + if (txgbe_is_non_eop(rx_ring, rx_desc, skb)) + continue; + + /* verify the packet layout is correct */ + if (txgbe_cleanup_headers(rx_ring, rx_desc, skb)) + continue; + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + txgbe_process_skb_fields(rx_ring, rx_desc, skb); + + txgbe_rx_skb(q_vector, skb); + + /* update budget accounting */ + total_rx_packets++; + } while (likely(total_rx_packets < budget)); + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + + return total_rx_packets; +} + +/** + * txgbe_configure_msix - Configure MSI-X hardware + * @adapter: board private structure + * + * txgbe_configure_msix sets up the hardware to properly generate MSI-X + * interrupts. + **/ +static void txgbe_configure_msix(struct txgbe_adapter *adapter) +{ + u16 v_idx; + + /* Populate MSIX to EITR Select */ + if (adapter->num_vfs >= 32) { + u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; + wr32(&adapter->hw, TXGBE_PX_ITRSEL, eitrsel); + } else { + wr32(&adapter->hw, TXGBE_PX_ITRSEL, 0); + } + + /* + * Populate the IVAR table and set the ITR values to the + * corresponding register. + */ + for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { + struct txgbe_q_vector *q_vector = adapter->q_vector[v_idx]; + struct txgbe_ring *ring; + + txgbe_for_each_ring(ring, q_vector->rx) + txgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx); + + txgbe_for_each_ring(ring, q_vector->tx) + txgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx); + + txgbe_write_eitr(q_vector); + } + + txgbe_set_ivar(adapter, -1, 0, v_idx); + + wr32(&adapter->hw, TXGBE_PX_ITR(v_idx), 1950); +} + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +/** + * txgbe_update_itr - update the dynamic ITR value based on statistics + * @q_vector: structure containing interrupt and ring information + * @ring_container: structure containing ring performance data + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * this functionality is controlled by the InterruptThrottleRate module + * parameter (see txgbe_param.c) + **/ +static void txgbe_update_itr(struct txgbe_q_vector *q_vector, + struct txgbe_ring_container *ring_container) +{ + int bytes = ring_container->total_bytes; + int packets = ring_container->total_packets; + u32 timepassed_us; + u64 bytes_perint; + u8 itr_setting = ring_container->itr; + + if (packets == 0) + return; + + /* simple throttlerate management + * 0-10MB/s lowest (100000 ints/s) + * 10-20MB/s low (20000 ints/s) + * 20-1249MB/s bulk (12000 ints/s) + */ + /* what was last interrupt timeslice? */ + timepassed_us = q_vector->itr >> 2; + if (timepassed_us == 0) + return; + bytes_perint = bytes / timepassed_us; /* bytes/usec */ + + switch (itr_setting) { + case lowest_latency: + if (bytes_perint > 10) { + itr_setting = low_latency; + } + break; + case low_latency: + if (bytes_perint > 20) { + itr_setting = bulk_latency; + } else if (bytes_perint <= 10) { + itr_setting = lowest_latency; + } + break; + case bulk_latency: + if (bytes_perint <= 20) { + itr_setting = low_latency; + } + break; + } + + /* clear work counters since we have the values we need */ + ring_container->total_bytes = 0; + ring_container->total_packets = 0; + + /* write updated itr to ring container */ + ring_container->itr = itr_setting; +} + +/** + * txgbe_write_eitr - write EITR register in hardware specific way + * @q_vector: structure containing interrupt and ring information + * + * This function is made to be called by ethtool and by the driver + * when it needs to update EITR registers at runtime. Hardware + * specific quirks/differences are taken care of here. + */ +void txgbe_write_eitr(struct txgbe_q_vector *q_vector) +{ + struct txgbe_adapter *adapter = q_vector->adapter; + struct txgbe_hw *hw = &adapter->hw; + int v_idx = q_vector->v_idx; + u32 itr_reg = q_vector->itr & TXGBE_MAX_EITR; + + itr_reg |= TXGBE_PX_ITR_CNT_WDIS; + + wr32(hw, TXGBE_PX_ITR(v_idx), itr_reg); +} + +static void txgbe_set_itr(struct txgbe_q_vector *q_vector) +{ + u16 new_itr = q_vector->itr; + u8 current_itr; + + txgbe_update_itr(q_vector, &q_vector->tx); + txgbe_update_itr(q_vector, &q_vector->rx); + + current_itr = max(q_vector->rx.itr, q_vector->tx.itr); + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = TXGBE_100K_ITR; + break; + case low_latency: + new_itr = TXGBE_20K_ITR; + break; + case bulk_latency: + new_itr = TXGBE_12K_ITR; + break; + default: + break; + } + + if (new_itr != q_vector->itr) { + /* do an exponential smoothing */ + new_itr = (10 * new_itr * q_vector->itr) / + ((9 * new_itr) + q_vector->itr); + + /* save the algorithm value here */ + q_vector->itr = new_itr; + + txgbe_write_eitr(q_vector); + } +} + +/** + * txgbe_check_overtemp_subtask - check for over temperature + * @adapter: pointer to adapter + **/ +static void txgbe_check_overtemp_subtask(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 eicr = adapter->interrupt_event; + s32 temp_state; + + if (test_bit(__TXGBE_DOWN, &adapter->state)) + return; + if (!(adapter->flags2 & TXGBE_FLAG2_TEMP_SENSOR_CAPABLE)) + return; + if (!(adapter->flags2 & TXGBE_FLAG2_TEMP_SENSOR_EVENT)) + return; + + adapter->flags2 &= ~TXGBE_FLAG2_TEMP_SENSOR_EVENT; + + /* + * Since the warning interrupt is for both ports + * we don't have to check if: + * - This interrupt wasn't for our port. + * - We may have missed the interrupt so always have to + * check if we got a LSC + */ + if (!(eicr & TXGBE_PX_MISC_IC_OVER_HEAT)) + return; + + temp_state = TCALL(hw, phy.ops.check_overtemp); + if (!temp_state || temp_state == TXGBE_NOT_IMPLEMENTED) + return; + + if (temp_state == TXGBE_ERR_UNDERTEMP && + test_bit(__TXGBE_HANGING, &adapter->state)) { + e_crit(drv, "%s\n", txgbe_underheat_msg); + wr32m(&adapter->hw, TXGBE_RDB_PB_CTL, + TXGBE_RDB_PB_CTL_RXEN, TXGBE_RDB_PB_CTL_RXEN); + netif_carrier_on(adapter->netdev); + + clear_bit(__TXGBE_HANGING, &adapter->state); + } else if (temp_state == TXGBE_ERR_OVERTEMP && + !test_and_set_bit(__TXGBE_HANGING, &adapter->state)) { + e_crit(drv, "%s\n", txgbe_overheat_msg); + netif_carrier_off(adapter->netdev); + + wr32m(&adapter->hw, TXGBE_RDB_PB_CTL, + TXGBE_RDB_PB_CTL_RXEN, 0); + } + + adapter->interrupt_event = 0; +} + +static void txgbe_check_overtemp_event(struct txgbe_adapter *adapter, u32 eicr) +{ + if (!(adapter->flags2 & TXGBE_FLAG2_TEMP_SENSOR_CAPABLE)) + return; + + if (!(eicr & TXGBE_PX_MISC_IC_OVER_HEAT)) + return; + + if (!test_bit(__TXGBE_DOWN, &adapter->state)) { + adapter->interrupt_event = eicr; + adapter->flags2 |= TXGBE_FLAG2_TEMP_SENSOR_EVENT; + txgbe_service_event_schedule(adapter); + } +} + +static void txgbe_check_sfp_event(struct txgbe_adapter *adapter, u32 eicr) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 eicr_mask = TXGBE_PX_MISC_IC_GPIO; + u32 reg; + + if (eicr & eicr_mask) { + if (!test_bit(__TXGBE_DOWN, &adapter->state)) { + wr32(hw, TXGBE_GPIO_INTMASK, 0xFF); + reg = rd32(hw, TXGBE_GPIO_INTSTATUS); + if (reg & TXGBE_GPIO_INTSTATUS_2) { + adapter->flags2 |= TXGBE_FLAG2_SFP_NEEDS_RESET; + wr32(hw, TXGBE_GPIO_EOI, + TXGBE_GPIO_EOI_2); + adapter->sfp_poll_time = 0; + txgbe_service_event_schedule(adapter); + } + if (reg & TXGBE_GPIO_INTSTATUS_3) { + adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; + wr32(hw, TXGBE_GPIO_EOI, + TXGBE_GPIO_EOI_3); + txgbe_service_event_schedule(adapter); + } + + if (reg & TXGBE_GPIO_INTSTATUS_6) { + wr32(hw, TXGBE_GPIO_EOI, + TXGBE_GPIO_EOI_6); + adapter->flags |= + TXGBE_FLAG_NEED_LINK_CONFIG; + txgbe_service_event_schedule(adapter); + } + wr32(hw, TXGBE_GPIO_INTMASK, 0x0); + } + } +} + +static void txgbe_check_lsc(struct txgbe_adapter *adapter) +{ + adapter->lsc_int++; + adapter->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + if (!test_bit(__TXGBE_DOWN, &adapter->state)) { + txgbe_service_event_schedule(adapter); + } +} + +/** + * txgbe_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +void txgbe_irq_enable(struct txgbe_adapter *adapter, bool queues, bool flush) +{ + u32 mask = 0; + struct txgbe_hw *hw = &adapter->hw; + u8 device_type = hw->subsystem_id & 0xF0; + + /* enable gpio interrupt */ + if (device_type != TXGBE_ID_MAC_XAUI && + device_type != TXGBE_ID_MAC_SGMII) { + mask |= TXGBE_GPIO_INTEN_2; + mask |= TXGBE_GPIO_INTEN_3; + mask |= TXGBE_GPIO_INTEN_6; + } + wr32(&adapter->hw, TXGBE_GPIO_INTEN, mask); + + if (device_type != TXGBE_ID_MAC_XAUI && + device_type != TXGBE_ID_MAC_SGMII) { + mask = TXGBE_GPIO_INTTYPE_LEVEL_2 | TXGBE_GPIO_INTTYPE_LEVEL_3 | + TXGBE_GPIO_INTTYPE_LEVEL_6; + } + wr32(&adapter->hw, TXGBE_GPIO_INTTYPE_LEVEL, mask); + + /* enable misc interrupt */ + mask = TXGBE_PX_MISC_IEN_MASK; + + if (adapter->flags2 & TXGBE_FLAG2_TEMP_SENSOR_CAPABLE) + mask |= TXGBE_PX_MISC_IEN_OVER_HEAT; + + if ((adapter->flags & TXGBE_FLAG_FDIR_HASH_CAPABLE) && + !(adapter->flags2 & TXGBE_FLAG2_FDIR_REQUIRES_REINIT)) + mask |= TXGBE_PX_MISC_IEN_FLOW_DIR; + + mask |= TXGBE_PX_MISC_IEN_TIMESYNC; + + wr32(&adapter->hw, TXGBE_PX_MISC_IEN, mask); + + /* unmask interrupt */ + txgbe_intr_enable(&adapter->hw, TXGBE_INTR_MISC(adapter)); + if (queues) + txgbe_intr_enable(&adapter->hw, TXGBE_INTR_QALL(adapter)); + + /* flush configuration */ + if (flush) + TXGBE_WRITE_FLUSH(&adapter->hw); +} + +static irqreturn_t txgbe_msix_other(int __always_unused irq, void *data) +{ + struct txgbe_adapter *adapter = data; + struct txgbe_hw *hw = &adapter->hw; + u32 eicr; + u32 ecc; + u32 value = 0; + u16 pci_val = 0; + + eicr = txgbe_misc_isb(adapter, TXGBE_ISB_MISC); + + if (BOND_CHECK_LINK_MODE == 1) { + if (eicr & (TXGBE_PX_MISC_IC_ETH_LKDN)) { + value = rd32(hw, 0x14404); + value = value & 0x1; + if (value == 0) { + adapter->link_up = false; + adapter->flags2 |= TXGBE_FLAG2_LINK_DOWN; + txgbe_service_event_schedule(adapter); + } + } + } else { + if (eicr & (TXGBE_PX_MISC_IC_ETH_LK | TXGBE_PX_MISC_IC_ETH_LKDN)) + txgbe_check_lsc(adapter); + } + if (eicr & TXGBE_PX_MISC_IC_ETH_AN) { + if (adapter->backplane_an == 1 && (KR_POLLING == 0)) { + value = txgbe_rd32_epcs(hw, 0x78002); + value = value & 0x4; + if (value == 0x4) { + txgbe_kr_intr_handle(adapter); + adapter->flags2 |= TXGBE_FLAG2_KR_TRAINING; + txgbe_service_event_schedule(adapter); + } + } + } + + if (eicr & TXGBE_PX_MISC_IC_PCIE_REQ_ERR) { + ERROR_REPORT1(TXGBE_ERROR_POLLING, + "lan id %d,PCIe request error founded.\n", hw->bus.lan_id); + + pci_read_config_word(adapter->pdev, PCI_VENDOR_ID, &pci_val); + ERROR_REPORT1(TXGBE_ERROR_POLLING, "pci vendor id is 0x%x\n", pci_val); + + pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_val); + ERROR_REPORT1(TXGBE_ERROR_POLLING, "pci command reg is 0x%x.\n", pci_val); + + if (hw->bus.lan_id == 0) { + adapter->flags2 |= TXGBE_FLAG2_PCIE_NEED_RECOVER; + txgbe_service_event_schedule(adapter); + } else + wr32(&adapter->hw, TXGBE_MIS_PF_SM, 1); + } + + if (eicr & TXGBE_PX_MISC_IC_INT_ERR) { + e_info(link, "Received unrecoverable ECC Err," + "initiating reset.\n"); + ecc = rd32(hw, TXGBE_MIS_ST); + if (((ecc & TXGBE_MIS_ST_LAN0_ECC) && (hw->bus.lan_id == 0)) || + ((ecc & TXGBE_MIS_ST_LAN1_ECC) && (hw->bus.lan_id == 1))) + adapter->flags2 |= TXGBE_FLAG2_PF_RESET_REQUESTED; + + txgbe_service_event_schedule(adapter); + } + if (eicr & TXGBE_PX_MISC_IC_DEV_RST) { + adapter->flags2 |= TXGBE_FLAG2_RESET_INTR_RECEIVED; + txgbe_service_event_schedule(adapter); + } + if ((eicr & TXGBE_PX_MISC_IC_STALL) || + (eicr & TXGBE_PX_MISC_IC_ETH_EVENT)) { + adapter->flags2 |= TXGBE_FLAG2_PF_RESET_REQUESTED; + txgbe_service_event_schedule(adapter); + } + + /* Handle Flow Director Full threshold interrupt */ + if (eicr & TXGBE_PX_MISC_IC_FLOW_DIR) { + int reinit_count = 0; + int i; + for (i = 0; i < adapter->num_tx_queues; i++) { + struct txgbe_ring *ring = adapter->tx_ring[i]; + if (test_and_clear_bit(__TXGBE_TX_FDIR_INIT_DONE, + &ring->state)) + reinit_count++; + } + if (reinit_count) { + /* no more flow director interrupts until after init */ + wr32m(hw, TXGBE_PX_MISC_IEN, + TXGBE_PX_MISC_IEN_FLOW_DIR, 0); + adapter->flags2 |= + TXGBE_FLAG2_FDIR_REQUIRES_REINIT; + txgbe_service_event_schedule(adapter); + } + } + + txgbe_check_sfp_event(adapter, eicr); + txgbe_check_overtemp_event(adapter, eicr); + + if (unlikely(eicr & TXGBE_PX_MISC_IC_TIMESYNC)) + txgbe_ptp_check_pps_event(adapter); + + /* re-enable the original interrupt state, no lsc, no queues */ + if (!test_bit(__TXGBE_DOWN, &adapter->state)) + txgbe_irq_enable(adapter, false, false); + + return IRQ_HANDLED; +} + +static irqreturn_t txgbe_msix_clean_rings(int __always_unused irq, void *data) +{ + struct txgbe_q_vector *q_vector = data; + + /* EIAM disabled interrupts (on this vector) for us */ + + if (q_vector->rx.ring || q_vector->tx.ring) + napi_schedule_irqoff(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * txgbe_poll - NAPI polling RX/TX cleanup routine + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * This function will clean all queues associated with a q_vector. + **/ +int txgbe_poll(struct napi_struct *napi, int budget) +{ + struct txgbe_q_vector *q_vector = + container_of(napi, struct txgbe_q_vector, napi); + struct txgbe_adapter *adapter = q_vector->adapter; + struct txgbe_ring *ring; + int per_ring_budget; + bool clean_complete = true; + + txgbe_for_each_ring(ring, q_vector->tx) { + if (!txgbe_clean_tx_irq(q_vector, ring)) + clean_complete = false; + } + + /* Exit if we are called by netpoll */ + if (budget <= 0) + return budget; + + /* attempt to distribute budget to each queue fairly, but don't allow + * the budget to go below 1 because we'll exit polling */ + if (q_vector->rx.count > 1) + per_ring_budget = max(budget/q_vector->rx.count, 1); + else + per_ring_budget = budget; + + txgbe_for_each_ring(ring, q_vector->rx) { + int cleaned = txgbe_clean_rx_irq(q_vector, ring, + per_ring_budget); + + if (cleaned >= per_ring_budget) + clean_complete = false; + } + + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + + /* all work done, exit the polling mode */ + napi_complete(napi); + if (adapter->rx_itr_setting == 1) + txgbe_set_itr(q_vector); + if (!test_bit(__TXGBE_DOWN, &adapter->state)) + txgbe_intr_enable(&adapter->hw, + TXGBE_INTR_Q(q_vector->v_idx)); + + return 0; +} + +/** + * txgbe_request_msix_irqs - Initialize MSI-X interrupts + * @adapter: board private structure + * + * txgbe_request_msix_irqs allocates MSI-X vectors and requests + * interrupts from the kernel. + **/ +static int txgbe_request_msix_irqs(struct txgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int vector, err; + int ri = 0, ti = 0; + + for (vector = 0; vector < adapter->num_q_vectors; vector++) { + struct txgbe_q_vector *q_vector = adapter->q_vector[vector]; + struct msix_entry *entry = &adapter->msix_entries[vector]; + + if (q_vector->tx.ring && q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-TxRx-%d", netdev->name, ri++); + ti++; + } else if (q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-rx-%d", netdev->name, ri++); + } else if (q_vector->tx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-tx-%d", netdev->name, ti++); + } else { + /* skip this unused q_vector */ + continue; + } + err = request_irq(entry->vector, &txgbe_msix_clean_rings, 0, + q_vector->name, q_vector); + if (err) { + e_err(probe, "request_irq failed for MSIX interrupt" + " '%s' Error: %d\n", q_vector->name, err); + goto free_queue_irqs; + } + + /* If Flow Director is enabled, set interrupt affinity */ + if (adapter->flags & TXGBE_FLAG_FDIR_HASH_CAPABLE) { + /* assign the mask for this irq */ + irq_set_affinity_hint(entry->vector, + &q_vector->affinity_mask); + } + } + + err = request_irq(adapter->msix_entries[vector].vector, + txgbe_msix_other, 0, netdev->name, adapter); + if (err) { + e_err(probe, "request_irq for msix_other failed: %d\n", err); + goto free_queue_irqs; + } + + return 0; + +free_queue_irqs: + while (vector) { + vector--; + irq_set_affinity_hint(adapter->msix_entries[vector].vector, + NULL); + free_irq(adapter->msix_entries[vector].vector, + adapter->q_vector[vector]); + } + adapter->flags &= ~TXGBE_FLAG_MSIX_ENABLED; + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + return err; +} + +/** + * txgbe_intr - legacy mode Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t txgbe_intr(int __always_unused irq, void *data) +{ + struct txgbe_adapter *adapter = data; + struct txgbe_q_vector *q_vector = adapter->q_vector[0]; + struct txgbe_hw *hw = &adapter->hw; + u32 eicr; + u32 eicr_misc; + u32 value ; + + eicr = txgbe_misc_isb(adapter, TXGBE_ISB_VEC0); + if (!eicr) { + /* + * shared interrupt alert! + * the interrupt that we masked before the EICR read. + */ + if (!test_bit(__TXGBE_DOWN, &adapter->state)) + txgbe_irq_enable(adapter, true, true); + return IRQ_NONE; /* Not our interrupt */ + } + adapter->isb_mem[TXGBE_ISB_VEC0] = 0; + if (!(adapter->flags & TXGBE_FLAG_MSI_ENABLED)) + wr32(&(adapter->hw), TXGBE_PX_INTA, 1); + + eicr_misc = txgbe_misc_isb(adapter, TXGBE_ISB_MISC); + if (eicr_misc & (TXGBE_PX_MISC_IC_ETH_LK | TXGBE_PX_MISC_IC_ETH_LKDN)) + txgbe_check_lsc(adapter); + + if (eicr_misc & TXGBE_PX_MISC_IC_ETH_AN) { + if (adapter->backplane_an == 1 && (KR_POLLING == 0)) { + value = txgbe_rd32_epcs(hw, 0x78002); + value = value & 0x4; + if (value == 0x4) { + txgbe_kr_intr_handle(adapter); + adapter->flags2 |= TXGBE_FLAG2_KR_TRAINING; + txgbe_service_event_schedule(adapter); + } + } + } + + if (eicr_misc & TXGBE_PX_MISC_IC_INT_ERR) { + e_info(link, "Received unrecoverable ECC Err," + "initiating reset.\n"); + adapter->flags2 |= TXGBE_FLAG2_GLOBAL_RESET_REQUESTED; + txgbe_service_event_schedule(adapter); + } + + if (eicr_misc & TXGBE_PX_MISC_IC_DEV_RST) { + adapter->flags2 |= TXGBE_FLAG2_RESET_INTR_RECEIVED; + txgbe_service_event_schedule(adapter); + } + txgbe_check_sfp_event(adapter, eicr_misc); + txgbe_check_overtemp_event(adapter, eicr_misc); + + if (unlikely(eicr_misc & TXGBE_PX_MISC_IC_TIMESYNC)) + txgbe_ptp_check_pps_event(adapter); + + adapter->isb_mem[TXGBE_ISB_MISC] = 0; + /* would disable interrupts here but it is auto disabled */ + napi_schedule_irqoff(&q_vector->napi); + + /* + * re-enable link(maybe) and non-queue interrupts, no flush. + * txgbe_poll will re-enable the queue interrupts + */ + if (!test_bit(__TXGBE_DOWN, &adapter->state)) + txgbe_irq_enable(adapter, false, false); + + return IRQ_HANDLED; +} + +/** + * txgbe_request_irq - initialize interrupts + * @adapter: board private structure + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int txgbe_request_irq(struct txgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + + if (adapter->flags & TXGBE_FLAG_MSIX_ENABLED) + err = txgbe_request_msix_irqs(adapter); + else if (adapter->flags & TXGBE_FLAG_MSI_ENABLED) + err = request_irq(adapter->pdev->irq, &txgbe_intr, 0, + netdev->name, adapter); + else + err = request_irq(adapter->pdev->irq, &txgbe_intr, IRQF_SHARED, + netdev->name, adapter); + + if (err) + e_err(probe, "request_irq failed, Error %d\n", err); + + return err; +} + +static void txgbe_free_irq(struct txgbe_adapter *adapter) +{ + int vector; + + if (!(adapter->flags & TXGBE_FLAG_MSIX_ENABLED)) { + free_irq(adapter->pdev->irq, adapter); + return; + } + + for (vector = 0; vector < adapter->num_q_vectors; vector++) { + struct txgbe_q_vector *q_vector = adapter->q_vector[vector]; + struct msix_entry *entry = &adapter->msix_entries[vector]; + + /* free only the irqs that were actually requested */ + if (!q_vector->rx.ring && !q_vector->tx.ring) + continue; + + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(entry->vector, NULL); + free_irq(entry->vector, q_vector); + } + + free_irq(adapter->msix_entries[vector++].vector, adapter); +} + +/** + * txgbe_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +void txgbe_irq_disable(struct txgbe_adapter *adapter) +{ + wr32(&adapter->hw, TXGBE_PX_MISC_IEN, 0); + txgbe_intr_disable(&adapter->hw, TXGBE_INTR_ALL); + + TXGBE_WRITE_FLUSH(&adapter->hw); + if (adapter->flags & TXGBE_FLAG_MSIX_ENABLED) { + int vector; + + for (vector = 0; vector < adapter->num_q_vectors; vector++) + synchronize_irq(adapter->msix_entries[vector].vector); + + synchronize_irq(adapter->msix_entries[vector++].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } +} + +/** + * txgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts + * + **/ +static void txgbe_configure_msi_and_legacy(struct txgbe_adapter *adapter) +{ + struct txgbe_q_vector *q_vector = adapter->q_vector[0]; + struct txgbe_ring *ring; + + txgbe_write_eitr(q_vector); + + txgbe_for_each_ring(ring, q_vector->rx) + txgbe_set_ivar(adapter, 0, ring->reg_idx, 0); + + txgbe_for_each_ring(ring, q_vector->tx) + txgbe_set_ivar(adapter, 1, ring->reg_idx, 0); + + txgbe_set_ivar(adapter, -1, 0, 1); + + e_info(hw, "Legacy interrupt IVAR setup done\n"); +} + +/** + * txgbe_configure_tx_ring - Configure Tx ring after Reset + * @adapter: board private structure + * @ring: structure containing ring specific data + * + * Configure the Tx descriptor ring after a reset. + **/ +void txgbe_configure_tx_ring(struct txgbe_adapter *adapter, + struct txgbe_ring *ring) +{ + struct txgbe_hw *hw = &adapter->hw; + u64 tdba = ring->dma; + int wait_loop = 10; + u32 txdctl = TXGBE_PX_TR_CFG_ENABLE; + u8 reg_idx = ring->reg_idx; + + /* disable queue to avoid issues while updating state */ + wr32(hw, TXGBE_PX_TR_CFG(reg_idx), TXGBE_PX_TR_CFG_SWFLSH); + TXGBE_WRITE_FLUSH(hw); + + wr32(hw, TXGBE_PX_TR_BAL(reg_idx), tdba & DMA_BIT_MASK(32)); + wr32(hw, TXGBE_PX_TR_BAH(reg_idx), tdba >> 32); + + /* reset head and tail pointers */ + wr32(hw, TXGBE_PX_TR_RP(reg_idx), 0); + wr32(hw, TXGBE_PX_TR_WP(reg_idx), 0); + ring->tail = adapter->io_addr + TXGBE_PX_TR_WP(reg_idx); + + /* reset ntu and ntc to place SW in sync with hardwdare */ + ring->next_to_clean = 0; + ring->next_to_use = 0; + + txdctl |= TXGBE_RING_SIZE(ring) << TXGBE_PX_TR_CFG_TR_SIZE_SHIFT; + + /* + * set WTHRESH to encourage burst writeback, it should not be set + * higher than 1 when: + * - ITR is 0 as it could cause false TX hangs + * - ITR is set to > 100k int/sec and BQL is enabled + * + * In order to avoid issues WTHRESH + PTHRESH should always be equal + * to or less than the number of on chip descriptors, which is + * currently 40. + */ + + txdctl |= 0x20 << TXGBE_PX_TR_CFG_WTHRESH_SHIFT; + + /* reinitialize flowdirector state */ + if (adapter->flags & TXGBE_FLAG_FDIR_HASH_CAPABLE) { + ring->atr_sample_rate = adapter->atr_sample_rate; + ring->atr_count = 0; + set_bit(__TXGBE_TX_FDIR_INIT_DONE, &ring->state); + } else { + ring->atr_sample_rate = 0; + } + + /* initialize XPS */ + if (!test_and_set_bit(__TXGBE_TX_XPS_INIT_DONE, &ring->state)) { + struct txgbe_q_vector *q_vector = ring->q_vector; + + if (q_vector) + netif_set_xps_queue(adapter->netdev, + &q_vector->affinity_mask, + ring->queue_index); + } + + clear_bit(__TXGBE_HANG_CHECK_ARMED, &ring->state); + + /* enable queue */ + wr32(hw, TXGBE_PX_TR_CFG(reg_idx), txdctl); + + + /* poll to verify queue is enabled */ + do { + msleep(1); + txdctl = rd32(hw, TXGBE_PX_TR_CFG(reg_idx)); + } while (--wait_loop && !(txdctl & TXGBE_PX_TR_CFG_ENABLE)); + if (!wait_loop) + e_err(drv, "Could not enable Tx Queue %d\n", reg_idx); +} + +/** + * txgbe_configure_tx - Configure Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void txgbe_configure_tx(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 i; + + /* TDM_CTL.TE must be before Tx queues are enabled */ + wr32m(hw, TXGBE_TDM_CTL, + TXGBE_TDM_CTL_TE, TXGBE_TDM_CTL_TE); + + /* Setup the HW Tx Head and Tail descriptor pointers */ + for (i = 0; i < adapter->num_tx_queues; i++) + txgbe_configure_tx_ring(adapter, adapter->tx_ring[i]); + + wr32m(hw, TXGBE_TSC_BUF_AE, 0x3FF, 0x10); + /* enable mac transmitter */ + wr32m(hw, TXGBE_MAC_TX_CFG, + TXGBE_MAC_TX_CFG_TE, TXGBE_MAC_TX_CFG_TE); +} + +static void txgbe_enable_rx_drop(struct txgbe_adapter *adapter, + struct txgbe_ring *ring) +{ + struct txgbe_hw *hw = &adapter->hw; + u16 reg_idx = ring->reg_idx; + + u32 srrctl = rd32(hw, TXGBE_PX_RR_CFG(reg_idx)); + + srrctl |= TXGBE_PX_RR_CFG_DROP_EN; + + wr32(hw, TXGBE_PX_RR_CFG(reg_idx), srrctl); +} + +static void txgbe_disable_rx_drop(struct txgbe_adapter *adapter, + struct txgbe_ring *ring) +{ + struct txgbe_hw *hw = &adapter->hw; + u16 reg_idx = ring->reg_idx; + + u32 srrctl = rd32(hw, TXGBE_PX_RR_CFG(reg_idx)); + + srrctl &= ~TXGBE_PX_RR_CFG_DROP_EN; + + wr32(hw, TXGBE_PX_RR_CFG(reg_idx), srrctl); +} + +void txgbe_set_rx_drop_en(struct txgbe_adapter *adapter) +{ + int i; + + /* + * We should set the drop enable bit if: + * SR-IOV is enabled + * or + * Number of Rx queues > 1 and flow control is disabled + * + * This allows us to avoid head of line blocking for security + * and performance reasons. + */ + if (adapter->num_vfs || (adapter->num_rx_queues > 1 && + !(adapter->hw.fc.current_mode & txgbe_fc_tx_pause))) { + for (i = 0; i < adapter->num_rx_queues; i++) + txgbe_enable_rx_drop(adapter, adapter->rx_ring[i]); + } else { + for (i = 0; i < adapter->num_rx_queues; i++) + txgbe_disable_rx_drop(adapter, adapter->rx_ring[i]); + } +} + +static void txgbe_configure_srrctl(struct txgbe_adapter *adapter, + struct txgbe_ring *rx_ring) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 srrctl; + u16 reg_idx = rx_ring->reg_idx; + + srrctl = rd32m(hw, TXGBE_PX_RR_CFG(reg_idx), + ~(TXGBE_PX_RR_CFG_RR_HDR_SZ | + TXGBE_PX_RR_CFG_RR_BUF_SZ | + TXGBE_PX_RR_CFG_SPLIT_MODE)); + /* configure header buffer length, needed for RSC */ + srrctl |= TXGBE_RX_HDR_SIZE << TXGBE_PX_RR_CFG_BSIZEHDRSIZE_SHIFT; + + /* configure the packet buffer length */ + srrctl |= txgbe_rx_bufsz(rx_ring) >> TXGBE_PX_RR_CFG_BSIZEPKT_SHIFT; + if (ring_is_hs_enabled(rx_ring)) + srrctl |= TXGBE_PX_RR_CFG_SPLIT_MODE; + + wr32(hw, TXGBE_PX_RR_CFG(reg_idx), srrctl); +} + +/** + * Return a number of entries in the RSS indirection table + * + * @adapter: device handle + * + */ +u32 txgbe_rss_indir_tbl_entries(struct txgbe_adapter *adapter) +{ + return 128; +} + +/** + * Write the RETA table to HW + * + * @adapter: device handle + * + * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. + */ +void txgbe_store_reta(struct txgbe_adapter *adapter) +{ + u32 i, reta_entries = txgbe_rss_indir_tbl_entries(adapter); + struct txgbe_hw *hw = &adapter->hw; + u32 reta = 0; + u8 *indir_tbl = adapter->rss_indir_tbl; + + /* Fill out the redirection table as follows: + * - 8 bit wide entries containing 4 bit RSS index + */ + + /* Write redirection table to HW */ + for (i = 0; i < reta_entries; i++) { + reta |= indir_tbl[i] << (i & 0x3) * 8; + if ((i & 3) == 3) { + wr32(hw, TXGBE_RDB_RSSTBL(i >> 2), reta); + reta = 0; + } + } +} + +/** + * Write the RETA table to HW (for devices in SRIOV mode) + * + * @adapter: device handle + * + * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. + */ +//static void txgbe_store_vfreta(struct txgbe_adapter *adapter) + +static void txgbe_setup_reta(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 i, j; + u32 reta_entries = txgbe_rss_indir_tbl_entries(adapter); + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + + /* + * Program table for at least 4 queues w/ SR-IOV so that VFs can + * make full use of any rings they may have. We will use the + * PSRTYPE register to control how many rings we use within the PF. + */ + if ((adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2)) + rss_i = 2; + + /* Fill out hash function seeds */ + for (i = 0; i < 10; i++) + wr32(hw, TXGBE_RDB_RSSRK(i), adapter->rss_key[i]); + + /* Fill out redirection table */ + memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl)); + + for (i = 0, j = 0; i < reta_entries; i++, j++) { + if (j == rss_i) + j = 0; + + adapter->rss_indir_tbl[i] = j; + } + + txgbe_store_reta(adapter); +} + +static void txgbe_setup_mrqc(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 rss_field = 0; + + /* VT, DCB and RSS do not coexist at the same time */ + if (adapter->flags & TXGBE_FLAG_VMDQ_ENABLED && + adapter->flags & TXGBE_FLAG_DCB_ENABLED) + return; + + /* Disable indicating checksum in descriptor, enables RSS hash */ + wr32m(hw, TXGBE_PSR_CTL, + TXGBE_PSR_CTL_PCSD, TXGBE_PSR_CTL_PCSD); + + /* Perform hash on these packet types */ + rss_field = TXGBE_RDB_RA_CTL_RSS_IPV4 | + TXGBE_RDB_RA_CTL_RSS_IPV4_TCP | + TXGBE_RDB_RA_CTL_RSS_IPV6 | + TXGBE_RDB_RA_CTL_RSS_IPV6_TCP; + + if (adapter->flags2 & TXGBE_FLAG2_RSS_FIELD_IPV4_UDP) + rss_field |= TXGBE_RDB_RA_CTL_RSS_IPV4_UDP; + if (adapter->flags2 & TXGBE_FLAG2_RSS_FIELD_IPV6_UDP) + rss_field |= TXGBE_RDB_RA_CTL_RSS_IPV6_UDP; + + netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key)); + + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) { + /*wait to fix txgbe_setup_vfreta(adapter);*/ + txgbe_setup_reta(adapter); + } else { + txgbe_setup_reta(adapter); + } + + if (adapter->flags2 & TXGBE_FLAG2_RSS_ENABLED) + rss_field |= TXGBE_RDB_RA_CTL_RSS_EN; + + wr32(hw, TXGBE_RDB_RA_CTL, rss_field); +} + +/** + * txgbe_clear_rscctl - disable RSC for the indicated ring + * @adapter: address of board private structure + * @ring: structure containing ring specific data + **/ +void txgbe_clear_rscctl(struct txgbe_adapter *adapter, + struct txgbe_ring *ring) +{ + struct txgbe_hw *hw = &adapter->hw; + u8 reg_idx = ring->reg_idx; + + wr32m(hw, TXGBE_PX_RR_CFG(reg_idx), + TXGBE_PX_RR_CFG_RSC, 0); + + clear_ring_rsc_enabled(ring); +} + +/** + * txgbe_configure_rscctl - enable RSC for the indicated ring + * @adapter: address of board private structure + * @ring: structure containing ring specific data + **/ +void txgbe_configure_rscctl(struct txgbe_adapter *adapter, + struct txgbe_ring *ring) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 rscctrl; + u8 reg_idx = ring->reg_idx; + + if (!ring_is_rsc_enabled(ring)) + return; + + rscctrl = rd32(hw, TXGBE_PX_RR_CFG(reg_idx)); + rscctrl |= TXGBE_PX_RR_CFG_RSC; + /* + * we must limit the number of descriptors so that the + * total size of max desc * buf_len is not greater + * than 65536 + */ +#if (MAX_SKB_FRAGS >= 16) + rscctrl |= TXGBE_PX_RR_CFG_MAX_RSCBUF_16; +#elif (MAX_SKB_FRAGS >= 8) + rscctrl |= TXGBE_PX_RR_CFG_MAX_RSCBUF_8; +#elif (MAX_SKB_FRAGS >= 4) + rscctrl |= TXGBE_PX_RR_CFG_MAX_RSCBUF_4; +#else + rscctrl |= TXGBE_PX_RR_CFG_MAX_RSCBUF_1; +#endif + wr32(hw, TXGBE_PX_RR_CFG(reg_idx), rscctrl); +} + +static void txgbe_rx_desc_queue_enable(struct txgbe_adapter *adapter, + struct txgbe_ring *ring) +{ + struct txgbe_hw *hw = &adapter->hw; + int wait_loop = TXGBE_MAX_RX_DESC_POLL; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + if (TXGBE_REMOVED(hw->hw_addr)) + return; + + do { + msleep(1); + rxdctl = rd32(hw, TXGBE_PX_RR_CFG(reg_idx)); + } while (--wait_loop && !(rxdctl & TXGBE_PX_RR_CFG_RR_EN)); + + if (!wait_loop) { + e_err(drv, "RXDCTL.ENABLE on Rx queue %d " + "not set within the polling period\n", reg_idx); + } +} + +/* disable the specified tx ring/queue */ +void txgbe_disable_tx_queue(struct txgbe_adapter *adapter, + struct txgbe_ring *ring) +{ + struct txgbe_hw *hw = &adapter->hw; + int wait_loop = TXGBE_MAX_RX_DESC_POLL; + u32 rxdctl, reg_offset, enable_mask; + u8 reg_idx = ring->reg_idx; + + if (TXGBE_REMOVED(hw->hw_addr)) + return; + + reg_offset = TXGBE_PX_TR_CFG(reg_idx); + enable_mask = TXGBE_PX_TR_CFG_ENABLE; + + /* write value back with TDCFG.ENABLE bit cleared */ + wr32m(hw, reg_offset, enable_mask, 0); + + /* the hardware may take up to 100us to really disable the tx queue */ + do { + udelay(10); + rxdctl = rd32(hw, reg_offset); + } while (--wait_loop && (rxdctl & enable_mask)); + + if (!wait_loop) { + e_err(drv, "TDCFG.ENABLE on Tx queue %d not cleared within " + "the polling period\n", reg_idx); + } +} + +/* disable the specified rx ring/queue */ +void txgbe_disable_rx_queue(struct txgbe_adapter *adapter, + struct txgbe_ring *ring) +{ + struct txgbe_hw *hw = &adapter->hw; + int wait_loop = TXGBE_MAX_RX_DESC_POLL; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + if (TXGBE_REMOVED(hw->hw_addr)) + return; + + /* write value back with RXDCTL.ENABLE bit cleared */ + wr32m(hw, TXGBE_PX_RR_CFG(reg_idx), + TXGBE_PX_RR_CFG_RR_EN, 0); + + /* the hardware may take up to 100us to really disable the rx queue */ + do { + udelay(10); + rxdctl = rd32(hw, TXGBE_PX_RR_CFG(reg_idx)); + } while (--wait_loop && (rxdctl & TXGBE_PX_RR_CFG_RR_EN)); + + if (!wait_loop) { + e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within " + "the polling period\n", reg_idx); + } +} + +void txgbe_configure_rx_ring(struct txgbe_adapter *adapter, + struct txgbe_ring *ring) +{ + struct txgbe_hw *hw = &adapter->hw; + u64 rdba = ring->dma; + u32 rxdctl; + u16 reg_idx = ring->reg_idx; + + /* disable queue to avoid issues while updating state */ + rxdctl = rd32(hw, TXGBE_PX_RR_CFG(reg_idx)); + txgbe_disable_rx_queue(adapter, ring); + + wr32(hw, TXGBE_PX_RR_BAL(reg_idx), rdba & DMA_BIT_MASK(32)); + wr32(hw, TXGBE_PX_RR_BAH(reg_idx), rdba >> 32); + + if (ring->count == TXGBE_MAX_RXD) + rxdctl |= 0 << TXGBE_PX_RR_CFG_RR_SIZE_SHIFT; + else + rxdctl |= (ring->count / 128) << TXGBE_PX_RR_CFG_RR_SIZE_SHIFT; + + rxdctl |= 0x1 << TXGBE_PX_RR_CFG_RR_THER_SHIFT; + wr32(hw, TXGBE_PX_RR_CFG(reg_idx), rxdctl); + + /* reset head and tail pointers */ + wr32(hw, TXGBE_PX_RR_RP(reg_idx), 0); + wr32(hw, TXGBE_PX_RR_WP(reg_idx), 0); + ring->tail = adapter->io_addr + TXGBE_PX_RR_WP(reg_idx); + + /* reset ntu and ntc to place SW in sync with hardwdare */ + ring->next_to_clean = 0; + ring->next_to_use = 0; + ring->next_to_alloc = 0; + + txgbe_configure_srrctl(adapter, ring); + /* In ESX, RSCCTL configuration is done by on demand */ + txgbe_configure_rscctl(adapter, ring); + + /* enable receive descriptor ring */ + wr32m(hw, TXGBE_PX_RR_CFG(reg_idx), + TXGBE_PX_RR_CFG_RR_EN, TXGBE_PX_RR_CFG_RR_EN); + + txgbe_rx_desc_queue_enable(adapter, ring); + txgbe_alloc_rx_buffers(ring, txgbe_desc_unused(ring)); +} + +static void txgbe_setup_psrtype(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int rss_i = adapter->ring_feature[RING_F_RSS].indices; + int pool; + + /* PSRTYPE must be initialized in adapters */ + u32 psrtype = TXGBE_RDB_PL_CFG_L4HDR | + TXGBE_RDB_PL_CFG_L3HDR | + TXGBE_RDB_PL_CFG_L2HDR | + TXGBE_RDB_PL_CFG_TUN_OUTER_L2HDR | + TXGBE_RDB_PL_CFG_TUN_TUNHDR; + + if (rss_i > 3) + psrtype |= 2 << 29; + else if (rss_i > 1) + psrtype |= 1 << 29; + + for_each_set_bit(pool, &adapter->fwd_bitmask, TXGBE_MAX_MACVLANS) + wr32(hw, TXGBE_RDB_PL_CFG(VMDQ_P(pool)), psrtype); +} + +/** + * txgbe_configure_bridge_mode - common settings for configuring bridge mode + * @adapter - the private structure + * + * This function's purpose is to remove code duplication and configure some + * settings require to switch bridge modes. + **/ +static void txgbe_configure_bridge_mode(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + unsigned int p; + + if (adapter->flags & TXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE) { + /* disable Tx loopback, rely on switch hairpin mode */ + wr32m(hw, TXGBE_PSR_CTL, + TXGBE_PSR_CTL_SW_EN, 0); + + /* enable Rx source address pruning. Note, this requires + * replication to be enabled or else it does nothing. + */ + for (p = 0; p < adapter->num_vfs; p++) { + TCALL(hw, mac.ops.set_source_address_pruning, true, p); + } + + for_each_set_bit(p, &adapter->fwd_bitmask, TXGBE_MAX_MACVLANS) { + TCALL(hw, mac.ops.set_source_address_pruning, true, VMDQ_P(p)); + } + } else { + /* enable Tx loopback for internal VF/PF communication */ + wr32m(hw, TXGBE_PSR_CTL, + TXGBE_PSR_CTL_SW_EN, TXGBE_PSR_CTL_SW_EN); + + /* disable Rx source address pruning, since we don't expect to + * be receiving external loopback of our transmitted frames. + */ + for (p = 0; p < adapter->num_vfs; p++) { + TCALL(hw, mac.ops.set_source_address_pruning, false, p); + } + + for_each_set_bit(p, &adapter->fwd_bitmask, TXGBE_MAX_MACVLANS) { + TCALL(hw, mac.ops.set_source_address_pruning, false, VMDQ_P(p)); + } + } +} + +static void txgbe_configure_virtualization(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 reg_offset, vf_shift; + u32 i; + + if (!(adapter->flags & TXGBE_FLAG_VMDQ_ENABLED)) + return; + + wr32m(hw, TXGBE_PSR_VM_CTL, + TXGBE_PSR_VM_CTL_POOL_MASK | + TXGBE_PSR_VM_CTL_REPLEN, + VMDQ_P(0) << TXGBE_PSR_VM_CTL_POOL_SHIFT | + TXGBE_PSR_VM_CTL_REPLEN); + + for_each_set_bit(i, &adapter->fwd_bitmask, TXGBE_MAX_MACVLANS) { + /* accept untagged packets until a vlan tag is + * specifically set for the VMDQ queue/pool + */ + wr32m(hw, TXGBE_PSR_VM_L2CTL(i), + TXGBE_PSR_VM_L2CTL_AUPE, TXGBE_PSR_VM_L2CTL_AUPE); + } + + vf_shift = VMDQ_P(0) % 32; + reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0; + + /* Enable only the PF pools for Tx/Rx */ + wr32(hw, TXGBE_RDM_VF_RE(reg_offset), (~0) << vf_shift); + wr32(hw, TXGBE_RDM_VF_RE(reg_offset ^ 1), reg_offset - 1); + wr32(hw, TXGBE_TDM_VF_TE(reg_offset), (~0) << vf_shift); + wr32(hw, TXGBE_TDM_VF_TE(reg_offset ^ 1), reg_offset - 1); + + if (!(adapter->flags & TXGBE_FLAG_SRIOV_ENABLED)) + return; + + /* configure default bridge settings */ + txgbe_configure_bridge_mode(adapter); + + /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be + * calling set_ethertype_anti_spoofing for each VF in loop below. + */ + if (hw->mac.ops.set_ethertype_anti_spoofing) { + wr32(hw, + TXGBE_PSR_ETYPE_SWC(TXGBE_PSR_ETYPE_SWC_FILTER_LLDP), + (TXGBE_PSR_ETYPE_SWC_FILTER_EN | /* enable filter */ + TXGBE_PSR_ETYPE_SWC_TX_ANTISPOOF | + TXGBE_ETH_P_LLDP)); /* LLDP eth procotol type */ + + wr32(hw, + TXGBE_PSR_ETYPE_SWC(TXGBE_PSR_ETYPE_SWC_FILTER_FC), + (TXGBE_PSR_ETYPE_SWC_FILTER_EN | + TXGBE_PSR_ETYPE_SWC_TX_ANTISPOOF | + ETH_P_PAUSE)); + } + + for (i = 0; i < adapter->num_vfs; i++) { + /* enable ethertype anti spoofing if hw supports it */ + TCALL(hw, mac.ops.set_ethertype_anti_spoofing, true, i); + } +} + +static void txgbe_set_rx_buffer_len(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + struct txgbe_ring *rx_ring; + int i; + u32 mhadd; + + /* adjust max frame to be at least the size of a standard frame */ + if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) + max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN); + + mhadd = rd32(hw, TXGBE_PSR_MAX_SZ); + if (max_frame != mhadd) { + wr32(hw, TXGBE_PSR_MAX_SZ, max_frame); + } + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + rx_ring = adapter->rx_ring[i]; + + if (adapter->flags & TXGBE_FLAG_RX_HS_ENABLED) { + rx_ring->rx_buf_len = TXGBE_RX_HDR_SIZE; + set_ring_hs_enabled(rx_ring); + } else + clear_ring_hs_enabled(rx_ring); + + if (adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED) + set_ring_rsc_enabled(rx_ring); + else + clear_ring_rsc_enabled(rx_ring); + } +} + +/** + * txgbe_configure_rx - Configure Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void txgbe_configure_rx(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int i; + u32 rxctrl, psrctl; + + /* disable receives while setting up the descriptors */ + TCALL(hw, mac.ops.disable_rx); + + txgbe_setup_psrtype(adapter); + + /* enable hw crc stripping */ + wr32m(hw, TXGBE_RSC_CTL, + TXGBE_RSC_CTL_CRC_STRIP, TXGBE_RSC_CTL_CRC_STRIP); + + /* RSC Setup */ + psrctl = rd32m(hw, TXGBE_PSR_CTL, ~TXGBE_PSR_CTL_RSC_DIS); + psrctl |= TXGBE_PSR_CTL_RSC_ACK; /* Disable RSC for ACK packets */ + if (!(adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED)) + psrctl |= TXGBE_PSR_CTL_RSC_DIS; + wr32(hw, TXGBE_PSR_CTL, psrctl); + + /* Program registers for the distribution of queues */ + txgbe_setup_mrqc(adapter); + + /* set_rx_buffer_len must be called before ring initialization */ + txgbe_set_rx_buffer_len(adapter); + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) + txgbe_configure_rx_ring(adapter, adapter->rx_ring[i]); + + rxctrl = rd32(hw, TXGBE_RDB_PB_CTL); + + /* enable all receives */ + rxctrl |= TXGBE_RDB_PB_CTL_RXEN; + TCALL(hw, mac.ops.enable_rx_dma, rxctrl); +} + +static int txgbe_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + int pool_ndx = VMDQ_P(0); + + /* add VID to filter table */ + if (hw->mac.ops.set_vfta) { + if (vid < VLAN_N_VID) + set_bit(vid, adapter->active_vlans); + TCALL(hw, mac.ops.set_vfta, vid, pool_ndx, true); + if (adapter->flags & TXGBE_FLAG_VMDQ_ENABLED) { + int i; + /* enable vlan id for all pools */ + for_each_set_bit(i, &adapter->fwd_bitmask, + TXGBE_MAX_MACVLANS) + TCALL(hw, mac.ops.set_vfta, vid, + VMDQ_P(i), true); + } + } + + return 0; +} + +static int txgbe_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + int pool_ndx = VMDQ_P(0); + + /* User is not allowed to remove vlan ID 0 */ + if (!vid) + return 0; + + /* remove VID from filter table */ + if (hw->mac.ops.set_vfta) { + TCALL(hw, mac.ops.set_vfta, vid, pool_ndx, false); + if (adapter->flags & TXGBE_FLAG_VMDQ_ENABLED) { + int i; + /* remove vlan id from all pools */ + for_each_set_bit(i, &adapter->fwd_bitmask, + TXGBE_MAX_MACVLANS) + TCALL(hw, mac.ops.set_vfta, vid, + VMDQ_P(i), false); + } + } + + clear_bit(vid, adapter->active_vlans); + + return 0; +} + +#ifdef HAVE_8021P_SUPPORT +/** + * txgbe_vlan_strip_disable - helper to disable vlan tag stripping + * @adapter: driver data + */ +void txgbe_vlan_strip_disable(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int i, j; + + /* leave vlan tag stripping enabled for DCB */ + if (adapter->flags & TXGBE_FLAG_DCB_ENABLED) + return; + + for (i = 0; i < adapter->num_rx_queues; i++) { + struct txgbe_ring *ring = adapter->rx_ring[i]; + if (ring->accel) + continue; + j = ring->reg_idx; + wr32m(hw, TXGBE_PX_RR_CFG(j), + TXGBE_PX_RR_CFG_VLAN, 0); + } +} + +#endif +/** + * txgbe_vlan_strip_enable - helper to enable vlan tag stripping + * @adapter: driver data + */ +void txgbe_vlan_strip_enable(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int i, j; + + for (i = 0; i < adapter->num_rx_queues; i++) { + struct txgbe_ring *ring = adapter->rx_ring[i]; + if (ring->accel) + continue; + j = ring->reg_idx; + wr32m(hw, TXGBE_PX_RR_CFG(j), + TXGBE_PX_RR_CFG_VLAN, TXGBE_PX_RR_CFG_VLAN); + } +} + +void txgbe_vlan_mode(struct net_device *netdev, u32 features) +{ +#if defined(HAVE_8021P_SUPPORT) + struct txgbe_adapter *adapter = netdev_priv(netdev); +#endif +#ifdef HAVE_8021P_SUPPORT + bool enable; +#endif + +#ifdef HAVE_8021P_SUPPORT + enable = !!(features & (NETIF_F_HW_VLAN_CTAG_RX)); + + if (enable) + /* enable VLAN tag insert/strip */ + txgbe_vlan_strip_enable(adapter); + else + /* disable VLAN tag insert/strip */ + txgbe_vlan_strip_disable(adapter); + +#endif /* HAVE_8021P_SUPPORT */ +} + +static void txgbe_restore_vlan(struct txgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u16 vid; + + txgbe_vlan_mode(netdev, netdev->features); + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + txgbe_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); +} + +static u8 *txgbe_addr_list_itr(struct txgbe_hw __maybe_unused *hw, + u8 **mc_addr_ptr, u32 *vmdq) +{ + struct netdev_hw_addr *mc_ptr; + u8 *addr = *mc_addr_ptr; + + /* VMDQ_P implicitely uses the adapter struct when CONFIG_PCI_IOV is + * defined, so we have to wrap the pointer above correctly to prevent + * a warning. + */ + *vmdq = VMDQ_P(0); + + mc_ptr = container_of(addr, struct netdev_hw_addr, addr[0]); + if (mc_ptr->list.next) { + struct netdev_hw_addr *ha; + ha = list_entry(mc_ptr->list.next, struct netdev_hw_addr, list); + *mc_addr_ptr = ha->addr; + } else + *mc_addr_ptr = NULL; + + return addr; +} + +/** + * txgbe_write_mc_addr_list - write multicast addresses to MTA + * @netdev: network interface device structure + * + * Writes multicast address list to the MTA hash table. + * Returns: -ENOMEM on failure + * 0 on no addresses written + * X on writing X addresses to MTA + **/ +int txgbe_write_mc_addr_list(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u8 *addr_list = NULL; + int addr_count = 0; + + if (!hw->mac.ops.update_mc_addr_list) + return -ENOMEM; + + if (!netif_running(netdev)) + return 0; + + + if (netdev_mc_empty(netdev)) { + TCALL(hw, mac.ops.update_mc_addr_list, NULL, 0, + txgbe_addr_list_itr, true); + } else { + ha = list_first_entry(&netdev->mc.list, + struct netdev_hw_addr, list); + addr_list = ha->addr; + addr_count = netdev_mc_count(netdev); + + TCALL(hw, mac.ops.update_mc_addr_list, addr_list, addr_count, + txgbe_addr_list_itr, true); + } + + return addr_count; +} + + +void txgbe_full_sync_mac_table(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int i; + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state & TXGBE_MAC_STATE_IN_USE) { + TCALL(hw, mac.ops.set_rar, i, + adapter->mac_table[i].addr, + adapter->mac_table[i].pools, + TXGBE_PSR_MAC_SWC_AD_H_AV); + } else { + TCALL(hw, mac.ops.clear_rar, i); + } + adapter->mac_table[i].state &= ~(TXGBE_MAC_STATE_MODIFIED); + } +} + +static void txgbe_sync_mac_table(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int i; + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state & TXGBE_MAC_STATE_MODIFIED) { + if (adapter->mac_table[i].state & + TXGBE_MAC_STATE_IN_USE) { + TCALL(hw, mac.ops.set_rar, i, + adapter->mac_table[i].addr, + adapter->mac_table[i].pools, + TXGBE_PSR_MAC_SWC_AD_H_AV); + } else { + TCALL(hw, mac.ops.clear_rar, i); + } + adapter->mac_table[i].state &= + ~(TXGBE_MAC_STATE_MODIFIED); + } + } +} + +int txgbe_available_rars(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 i, count = 0; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state == 0) + count++; + } + return count; +} + +/* this function destroys the first RAR entry */ +static void txgbe_mac_set_default_filter(struct txgbe_adapter *adapter, + u8 *addr) +{ + struct txgbe_hw *hw = &adapter->hw; + + memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN); + adapter->mac_table[0].pools = 1ULL << VMDQ_P(0); + adapter->mac_table[0].state = (TXGBE_MAC_STATE_DEFAULT | + TXGBE_MAC_STATE_IN_USE); + TCALL(hw, mac.ops.set_rar, 0, adapter->mac_table[0].addr, + adapter->mac_table[0].pools, + TXGBE_PSR_MAC_SWC_AD_H_AV); +} + +int txgbe_add_mac_filter(struct txgbe_adapter *adapter, + const u8 *addr, u16 pool) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state & TXGBE_MAC_STATE_IN_USE) { + continue; + } + adapter->mac_table[i].state |= (TXGBE_MAC_STATE_MODIFIED | + TXGBE_MAC_STATE_IN_USE); + memcpy(adapter->mac_table[i].addr, addr, ETH_ALEN); + adapter->mac_table[i].pools = (1ULL << pool); + txgbe_sync_mac_table(adapter); + return i; + } + return -ENOMEM; +} + +static void txgbe_flush_sw_mac_table(struct txgbe_adapter *adapter) +{ + u32 i; + struct txgbe_hw *hw = &adapter->hw; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + adapter->mac_table[i].state |= TXGBE_MAC_STATE_MODIFIED; + adapter->mac_table[i].state &= ~TXGBE_MAC_STATE_IN_USE; + memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + adapter->mac_table[i].pools = 0; + } + txgbe_sync_mac_table(adapter); +} + +int txgbe_del_mac_filter(struct txgbe_adapter *adapter, + const u8 *addr, u16 pool) +{ + /* search table for addr, if found, set to 0 and sync */ + u32 i; + struct txgbe_hw *hw = &adapter->hw; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (ether_addr_equal(addr, adapter->mac_table[i].addr) && + adapter->mac_table[i].pools | (1ULL << pool)) { + adapter->mac_table[i].state |= TXGBE_MAC_STATE_MODIFIED; + adapter->mac_table[i].state &= ~TXGBE_MAC_STATE_IN_USE; + memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + adapter->mac_table[i].pools = 0; + txgbe_sync_mac_table(adapter); + return 0; + } + } + return -ENOMEM; +} + +static int txgbe_uc_sync(struct net_device *netdev, const unsigned char *addr) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + int ret; + + ret = txgbe_add_mac_filter(adapter, addr, VMDQ_P(0)); + + return min_t(int, ret, 0); +} + +static int txgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + txgbe_del_mac_filter(adapter, addr, VMDQ_P(0)); + + return 0; +} + +/** + * txgbe_write_uc_addr_list - write unicast addresses to RAR table + * @netdev: network interface device structure + * + * Writes unicast address list to the RAR table. + * Returns: -ENOMEM on failure/insufficient address space + * 0 on no addresses written + * X on writing X addresses to the RAR table + **/ +int txgbe_write_uc_addr_list(struct net_device *netdev, int pool) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + int count = 0; + + /* return ENOMEM indicating insufficient memory for addresses */ + if (netdev_uc_count(netdev) > txgbe_available_rars(adapter)) + return -ENOMEM; + + if (!netdev_uc_empty(netdev)) { + struct netdev_hw_addr *ha; + + netdev_for_each_uc_addr(ha, netdev) { + txgbe_del_mac_filter(adapter, ha->addr, pool); + txgbe_add_mac_filter(adapter, ha->addr, pool); + count++; + } + } + return count; +} + +int txgbe_add_cloud_switcher(struct txgbe_adapter *adapter, u32 key, u16 pool) +{ + struct txgbe_hw *hw = &adapter->hw; + + UNREFERENCED_PARAMETER(pool); + + wr32(hw, TXGBE_PSR_CL_SWC_IDX, 0); + wr32(hw, TXGBE_PSR_CL_SWC_KEY, key); + wr32(hw, TXGBE_PSR_CL_SWC_CTL, + TXGBE_PSR_CL_SWC_CTL_VLD | TXGBE_PSR_CL_SWC_CTL_DST_MSK); + wr32(hw, TXGBE_PSR_CL_SWC_VM_L, 0x1); + wr32(hw, TXGBE_PSR_CL_SWC_VM_H, 0x0); + + return 0; +} + +int txgbe_del_cloud_switcher(struct txgbe_adapter *adapter, u32 key, u16 pool) +{ + /* search table for addr, if found, set to 0 and sync */ + struct txgbe_hw *hw = &adapter->hw; + + UNREFERENCED_PARAMETER(key); + UNREFERENCED_PARAMETER(pool); + + wr32(hw, TXGBE_PSR_CL_SWC_IDX, 0); + wr32(hw, TXGBE_PSR_CL_SWC_CTL, 0); + + return 0; +} + +/** + * txgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_method entry point is called whenever the unicast/multicast + * address list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast and + * promiscuous mode. + **/ +void txgbe_set_rx_mode(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u32 fctrl, vmolr, vlnctrl; + int count; + + /* Check for Promiscuous and All Multicast modes */ + fctrl = rd32m(hw, TXGBE_PSR_CTL, + ~(TXGBE_PSR_CTL_UPE | TXGBE_PSR_CTL_MPE)); + vmolr = rd32m(hw, TXGBE_PSR_VM_L2CTL(VMDQ_P(0)), + ~(TXGBE_PSR_VM_L2CTL_UPE | + TXGBE_PSR_VM_L2CTL_MPE | + TXGBE_PSR_VM_L2CTL_ROPE | + TXGBE_PSR_VM_L2CTL_ROMPE)); + vlnctrl = rd32m(hw, TXGBE_PSR_VLAN_CTL, + ~(TXGBE_PSR_VLAN_CTL_VFE | + TXGBE_PSR_VLAN_CTL_CFIEN)); + + /* set all bits that we expect to always be set */ + fctrl |= TXGBE_PSR_CTL_BAM | TXGBE_PSR_CTL_MFE; + vmolr |= TXGBE_PSR_VM_L2CTL_BAM | + TXGBE_PSR_VM_L2CTL_AUPE | + TXGBE_PSR_VM_L2CTL_VACC; + vlnctrl |= TXGBE_PSR_VLAN_CTL_VFE; + + hw->addr_ctrl.user_set_promisc = false; + if (netdev->flags & IFF_PROMISC) { + hw->addr_ctrl.user_set_promisc = true; + fctrl |= (TXGBE_PSR_CTL_UPE | TXGBE_PSR_CTL_MPE); + /* pf don't want packets routing to vf, so clear UPE */ + vmolr |= TXGBE_PSR_VM_L2CTL_MPE; + vlnctrl &= ~TXGBE_PSR_VLAN_CTL_VFE; + } + + if (netdev->flags & IFF_ALLMULTI) { + fctrl |= TXGBE_PSR_CTL_MPE; + vmolr |= TXGBE_PSR_VM_L2CTL_MPE; + } + + /* This is useful for sniffing bad packets. */ + if (netdev->features & NETIF_F_RXALL) { + vmolr |= (TXGBE_PSR_VM_L2CTL_UPE | TXGBE_PSR_VM_L2CTL_MPE); + vlnctrl &= ~TXGBE_PSR_VLAN_CTL_VFE; + /* receive bad packets */ + wr32m(hw, TXGBE_RSC_CTL, + TXGBE_RSC_CTL_SAVE_MAC_ERR, + TXGBE_RSC_CTL_SAVE_MAC_ERR); + } else { + vmolr |= TXGBE_PSR_VM_L2CTL_ROPE | TXGBE_PSR_VM_L2CTL_ROMPE; + } + + /* + * Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + if (__dev_uc_sync(netdev, txgbe_uc_sync, txgbe_uc_unsync)) { + vmolr &= ~TXGBE_PSR_VM_L2CTL_ROPE; + fctrl |= TXGBE_PSR_CTL_UPE; + } + + /* + * Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + count = txgbe_write_mc_addr_list(netdev); + if (count < 0) { + vmolr &= ~TXGBE_PSR_VM_L2CTL_ROMPE; + vmolr |= TXGBE_PSR_VM_L2CTL_MPE; + } + + wr32(hw, TXGBE_PSR_VLAN_CTL, vlnctrl); + wr32(hw, TXGBE_PSR_CTL, fctrl); + wr32(hw, TXGBE_PSR_VM_L2CTL(VMDQ_P(0)), vmolr); + + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) + txgbe_vlan_strip_enable(adapter); + else + txgbe_vlan_strip_disable(adapter); + + /* enable cloud switch */ + if (adapter->flags2 & TXGBE_FLAG2_CLOUD_SWITCH_ENABLED) { + txgbe_add_cloud_switcher(adapter, 0x10, 0); + } +} + +static void txgbe_napi_enable_all(struct txgbe_adapter *adapter) +{ + struct txgbe_q_vector *q_vector; + int q_idx; + + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { + q_vector = adapter->q_vector[q_idx]; + napi_enable(&q_vector->napi); + } +} + +static void txgbe_napi_disable_all(struct txgbe_adapter *adapter) +{ + struct txgbe_q_vector *q_vector; + int q_idx; + + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { + q_vector = adapter->q_vector[q_idx]; + napi_disable(&q_vector->napi); + } +} + +void txgbe_clear_vxlan_port(struct txgbe_adapter *adapter) +{ + adapter->vxlan_port = 0; + if (!(adapter->flags & TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + return; + wr32(&adapter->hw, TXGBE_CFG_VXLAN, 0); +} + +#define TXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPXIP4 | \ + NETIF_F_GSO_IPXIP6 | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + +static inline unsigned long txgbe_tso_features(void) +{ + unsigned long features = 0; + + features |= NETIF_F_TSO; + features |= NETIF_F_TSO6; + features |= NETIF_F_GSO_PARTIAL | TXGBE_GSO_PARTIAL_FEATURES; + + return features; +} + +static void txgbe_configure_lli(struct txgbe_adapter *adapter) +{ + /* lli should only be enabled with MSI-X and MSI */ + if (!(adapter->flags & TXGBE_FLAG_MSI_ENABLED) && + !(adapter->flags & TXGBE_FLAG_MSIX_ENABLED)) + return; + + if (adapter->lli_etype) { + wr32(&adapter->hw, TXGBE_RDB_5T_CTL1(0), + (TXGBE_RDB_5T_CTL1_LLI | + TXGBE_RDB_5T_CTL1_SIZE_BP)); + wr32(&adapter->hw, TXGBE_RDB_ETYPE_CLS(0), + TXGBE_RDB_ETYPE_CLS_LLI); + wr32(&adapter->hw, TXGBE_PSR_ETYPE_SWC(0), + (adapter->lli_etype | + TXGBE_PSR_ETYPE_SWC_FILTER_EN)); + } + + if (adapter->lli_port) { + wr32(&adapter->hw, TXGBE_RDB_5T_CTL1(0), + (TXGBE_RDB_5T_CTL1_LLI | + TXGBE_RDB_5T_CTL1_SIZE_BP)); + wr32(&adapter->hw, TXGBE_RDB_5T_CTL0(0), + (TXGBE_RDB_5T_CTL0_POOL_MASK_EN | + (TXGBE_RDB_5T_CTL0_PRIORITY_MASK << + TXGBE_RDB_5T_CTL0_PRIORITY_SHIFT) | + (TXGBE_RDB_5T_CTL0_DEST_PORT_MASK << + TXGBE_RDB_5T_CTL0_5TUPLE_MASK_SHIFT))); + + wr32(&adapter->hw, TXGBE_RDB_5T_SDP(0), + (adapter->lli_port << 16)); + } + + if (adapter->lli_size) { + wr32(&adapter->hw, TXGBE_RDB_5T_CTL1(0), + TXGBE_RDB_5T_CTL1_LLI); + wr32m(&adapter->hw, TXGBE_RDB_LLI_THRE, + TXGBE_RDB_LLI_THRE_SZ(~0), adapter->lli_size); + wr32(&adapter->hw, TXGBE_RDB_5T_CTL0(0), + (TXGBE_RDB_5T_CTL0_POOL_MASK_EN | + (TXGBE_RDB_5T_CTL0_PRIORITY_MASK << + TXGBE_RDB_5T_CTL0_PRIORITY_SHIFT) | + (TXGBE_RDB_5T_CTL0_5TUPLE_MASK_MASK << + TXGBE_RDB_5T_CTL0_5TUPLE_MASK_SHIFT))); + } + + if (adapter->lli_vlan_pri) { + wr32m(&adapter->hw, TXGBE_RDB_LLI_THRE, + TXGBE_RDB_LLI_THRE_PRIORITY_EN | + TXGBE_RDB_LLI_THRE_UP(~0), + TXGBE_RDB_LLI_THRE_PRIORITY_EN | + (adapter->lli_vlan_pri << TXGBE_RDB_LLI_THRE_UP_SHIFT)); + } +} + +/* Additional bittime to account for TXGBE framing */ +#define TXGBE_ETH_FRAMING 20 + +/* + * txgbe_hpbthresh - calculate high water mark for flow control + * + * @adapter: board private structure to calculate for + * @pb - packet buffer to calculate + */ +static int txgbe_hpbthresh(struct txgbe_adapter *adapter, int pb) +{ + struct txgbe_hw *hw = &adapter->hw; + struct net_device *dev = adapter->netdev; + int link, tc, kb, marker; + u32 dv_id, rx_pba; + + /* Calculate max LAN frame size */ + tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + TXGBE_ETH_FRAMING; + + /* Calculate delay value for device */ + dv_id = TXGBE_DV(link, tc); + + /* Loopback switch introduces additional latency */ + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) + dv_id += TXGBE_B2BT(tc); + + /* Delay value is calculated in bit times convert to KB */ + kb = TXGBE_BT2KB(dv_id); + rx_pba = rd32(hw, TXGBE_RDB_PB_SZ(pb)) + >> TXGBE_RDB_PB_SZ_SHIFT; + + marker = rx_pba - kb; + + /* It is possible that the packet buffer is not large enough + * to provide required headroom. In this case throw an error + * to user and a do the best we can. + */ + if (marker < 0) { + e_warn(drv, "Packet Buffer(%i) can not provide enough" + "headroom to suppport flow control." + "Decrease MTU or number of traffic classes\n", pb); + marker = tc + 1; + } + + return marker; +} + +/* + * txgbe_lpbthresh - calculate low water mark for for flow control + * + * @adapter: board private structure to calculate for + * @pb - packet buffer to calculate + */ +static int txgbe_lpbthresh(struct txgbe_adapter *adapter, int __maybe_unused pb) +{ + struct net_device *dev = adapter->netdev; + int tc; + u32 dv_id; + + /* Calculate max LAN frame size */ + tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN; + + /* Calculate delay value for device */ + dv_id = TXGBE_LOW_DV(tc); + + /* Delay value is calculated in bit times convert to KB */ + return TXGBE_BT2KB(dv_id); +} + +/* + * txgbe_pbthresh_setup - calculate and setup high low water marks + */ +static void txgbe_pbthresh_setup(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int num_tc = netdev_get_num_tc(adapter->netdev); + int i; + + if (!num_tc) + num_tc = 1; + + + for (i = 0; i < num_tc; i++) { + hw->fc.high_water[i] = txgbe_hpbthresh(adapter, i); + hw->fc.low_water[i] = txgbe_lpbthresh(adapter, i); + + /* Low water marks must not be larger than high water marks */ + if (hw->fc.low_water[i] > hw->fc.high_water[i]) + hw->fc.low_water[i] = 0; + } + + for (; i < TXGBE_DCB_MAX_TRAFFIC_CLASS; i++) + hw->fc.high_water[i] = 0; +} + +static void txgbe_configure_pb(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int hdrm; + int tc = netdev_get_num_tc(adapter->netdev); + + if (adapter->flags & TXGBE_FLAG_FDIR_HASH_CAPABLE || + adapter->flags & TXGBE_FLAG_FDIR_PERFECT_CAPABLE) + hdrm = 32 << adapter->fdir_pballoc; + else + hdrm = 0; + + TCALL(hw, mac.ops.setup_rxpba, tc, hdrm, PBA_STRATEGY_EQUAL); + txgbe_pbthresh_setup(adapter); +} + +static void txgbe_fdir_filter_restore(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct hlist_node *node; + struct txgbe_fdir_filter *filter; + + spin_lock(&adapter->fdir_perfect_lock); + + if (!hlist_empty(&adapter->fdir_filter_list)) + txgbe_fdir_set_input_mask(hw, &adapter->fdir_mask, + adapter->cloud_mode); + + hlist_for_each_entry_safe(filter, node, + &adapter->fdir_filter_list, fdir_node) { + txgbe_fdir_write_perfect_filter(hw, + &filter->filter, + filter->sw_idx, + (filter->action == TXGBE_RDB_FDIR_DROP_QUEUE) ? + TXGBE_RDB_FDIR_DROP_QUEUE : + adapter->rx_ring[filter->action]->reg_idx, + adapter->cloud_mode); + } + + spin_unlock(&adapter->fdir_perfect_lock); +} + +void txgbe_configure_isb(struct txgbe_adapter *adapter) +{ + /* set ISB Address */ + struct txgbe_hw *hw = &adapter->hw; + + wr32(hw, TXGBE_PX_ISB_ADDR_L, + adapter->isb_dma & DMA_BIT_MASK(32)); + wr32(hw, TXGBE_PX_ISB_ADDR_H, adapter->isb_dma >> 32); +} + +void txgbe_configure_port(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 value, i; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + if (adapter->flags & TXGBE_FLAG_VMDQ_ENABLED) { + if (tcs > 4) + /* 8 TCs */ + value = TXGBE_CFG_PORT_CTL_NUM_TC_8 | + TXGBE_CFG_PORT_CTL_NUM_VT_16 | + TXGBE_CFG_PORT_CTL_DCB_EN; + else if (tcs > 1) + /* 4 TCs */ + value = TXGBE_CFG_PORT_CTL_NUM_TC_4 | + TXGBE_CFG_PORT_CTL_NUM_VT_32 | + TXGBE_CFG_PORT_CTL_DCB_EN; + else if (adapter->ring_feature[RING_F_RSS].indices == 4) + value = TXGBE_CFG_PORT_CTL_NUM_VT_32; + else /* adapter->ring_feature[RING_F_RSS].indices <= 2 */ + value = TXGBE_CFG_PORT_CTL_NUM_VT_64; + } else { + if (tcs > 4) + value = TXGBE_CFG_PORT_CTL_NUM_TC_8 | + TXGBE_CFG_PORT_CTL_DCB_EN; + else if (tcs > 1) + value = TXGBE_CFG_PORT_CTL_NUM_TC_4 | + TXGBE_CFG_PORT_CTL_DCB_EN; + else + value = 0; + } + + value |= TXGBE_CFG_PORT_CTL_D_VLAN | TXGBE_CFG_PORT_CTL_QINQ; + wr32m(hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_NUM_TC_MASK | + TXGBE_CFG_PORT_CTL_NUM_VT_MASK | + TXGBE_CFG_PORT_CTL_DCB_EN | + TXGBE_CFG_PORT_CTL_D_VLAN | + TXGBE_CFG_PORT_CTL_QINQ, + value); + + wr32(hw, TXGBE_CFG_TAG_TPID(0), + ETH_P_8021Q | ETH_P_8021AD << 16); + adapter->hw.tpid[0] = ETH_P_8021Q; + adapter->hw.tpid[1] = ETH_P_8021AD; + for (i = 1; i < 4; i++) + wr32(hw, TXGBE_CFG_TAG_TPID(i), + ETH_P_8021Q | ETH_P_8021Q << 16); + for (i = 2; i < 8; i++) + adapter->hw.tpid[i] = ETH_P_8021Q; +} + +static void txgbe_configure(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + + txgbe_configure_pb(adapter); + + /* + * We must restore virtualization before VLANs or else + * the VLVF registers will not be populated + */ + txgbe_configure_virtualization(adapter); + txgbe_configure_port(adapter); + + txgbe_set_rx_mode(adapter->netdev); + txgbe_restore_vlan(adapter); + + TCALL(hw, mac.ops.disable_sec_rx_path); + + if (adapter->flags & TXGBE_FLAG_FDIR_HASH_CAPABLE) { + txgbe_init_fdir_signature(&adapter->hw, + adapter->fdir_pballoc); + } else if (adapter->flags & TXGBE_FLAG_FDIR_PERFECT_CAPABLE) { + txgbe_init_fdir_perfect(&adapter->hw, + adapter->fdir_pballoc, + adapter->cloud_mode); + txgbe_fdir_filter_restore(adapter); + } + + TCALL(hw, mac.ops.enable_sec_rx_path); + + TCALL(hw, mac.ops.setup_eee, + (adapter->flags2 & TXGBE_FLAG2_EEE_CAPABLE) && + (adapter->flags2 & TXGBE_FLAG2_EEE_ENABLED)); + + txgbe_configure_tx(adapter); + txgbe_configure_rx(adapter); + txgbe_configure_isb(adapter); +} + +static bool txgbe_is_sfp(struct txgbe_hw *hw) +{ + switch (TCALL(hw, mac.ops.get_media_type)) { + case txgbe_media_type_fiber: + return true; + default: + return false; + } +} + +static bool txgbe_is_backplane(struct txgbe_hw *hw) +{ + switch (TCALL(hw, mac.ops.get_media_type)) { + case txgbe_media_type_backplane: + return true; + default: + return false; + } +} + +/** + * txgbe_sfp_link_config - set up SFP+ link + * @adapter: pointer to private adapter struct + **/ +static void txgbe_sfp_link_config(struct txgbe_adapter *adapter) +{ + /* + * We are assuming the worst case scenerio here, and that + * is that an SFP was inserted/removed after the reset + * but before SFP detection was enabled. As such the best + * solution is to just start searching as soon as we start + */ + + adapter->flags2 |= TXGBE_FLAG2_SFP_NEEDS_RESET; + adapter->sfp_poll_time = 0; +} + +/** + * txgbe_non_sfp_link_config - set up non-SFP+ link + * @hw: pointer to private hardware struct + * + * Returns 0 on success, negative on failure + **/ +static int txgbe_non_sfp_link_config(struct txgbe_hw *hw) +{ + u32 speed; + bool autoneg, link_up = false; + u32 ret = TXGBE_ERR_LINK_SETUP; + + ret = TCALL(hw, mac.ops.check_link, &speed, &link_up, false); + + if (ret) + goto link_cfg_out; + + if (link_up) + return 0; + + if ((hw->subsystem_id & 0xF0) != TXGBE_ID_SFI_XAUI) { + /* setup external PHY Mac Interface */ + mtdSetMacInterfaceControl(&hw->phy_dev, hw->phy.addr, MTD_MAC_TYPE_XAUI, + MTD_FALSE, MTD_MAC_SNOOP_OFF, + 0, MTD_MAC_SPEED_1000_MBPS, + MTD_MAX_MAC_SPEED_LEAVE_UNCHANGED, + MTD_TRUE, MTD_TRUE); + + speed = hw->phy.autoneg_advertised; + if (!speed) + ret = TCALL(hw, mac.ops.get_link_capabilities, &speed, + &autoneg); + if (ret) + goto link_cfg_out; + } else { + speed = TXGBE_LINK_SPEED_10GB_FULL; + autoneg = false; + } + + ret = TCALL(hw, mac.ops.setup_link, speed, autoneg); + +link_cfg_out: + return ret; +} + +/** + * txgbe_clear_vf_stats_counters - Clear out VF stats after reset + * @adapter: board private structure + * + * On a reset we need to clear out the VF stats or accounting gets + * messed up because they're not clear on read. + **/ +static void txgbe_clear_vf_stats_counters(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < adapter->num_vfs; i++) { + adapter->vfinfo[i].last_vfstats.gprc = + rd32(hw, TXGBE_VX_GPRC(i)); + adapter->vfinfo[i].saved_rst_vfstats.gprc += + adapter->vfinfo[i].vfstats.gprc; + adapter->vfinfo[i].vfstats.gprc = 0; + adapter->vfinfo[i].last_vfstats.gptc = + rd32(hw, TXGBE_VX_GPTC(i)); + adapter->vfinfo[i].saved_rst_vfstats.gptc += + adapter->vfinfo[i].vfstats.gptc; + adapter->vfinfo[i].vfstats.gptc = 0; + adapter->vfinfo[i].last_vfstats.gorc = + rd32(hw, TXGBE_VX_GORC_LSB(i)); + adapter->vfinfo[i].saved_rst_vfstats.gorc += + adapter->vfinfo[i].vfstats.gorc; + adapter->vfinfo[i].vfstats.gorc = 0; + adapter->vfinfo[i].last_vfstats.gotc = + rd32(hw, TXGBE_VX_GOTC_LSB(i)); + adapter->vfinfo[i].saved_rst_vfstats.gotc += + adapter->vfinfo[i].vfstats.gotc; + adapter->vfinfo[i].vfstats.gotc = 0; + adapter->vfinfo[i].last_vfstats.mprc = + rd32(hw, TXGBE_VX_MPRC(i)); + adapter->vfinfo[i].saved_rst_vfstats.mprc += + adapter->vfinfo[i].vfstats.mprc; + adapter->vfinfo[i].vfstats.mprc = 0; + } +} + +static void txgbe_setup_gpie(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 gpie = 0; + + if (adapter->flags & TXGBE_FLAG_MSIX_ENABLED) { + gpie = TXGBE_PX_GPIE_MODEL; + /* + * use EIAM to auto-mask when MSI-X interrupt is asserted + * this saves a register write for every interrupt + */ + } else { + /* legacy interrupts, use EIAM to auto-mask when reading EICR, + * specifically only auto mask tx and rx interrupts */ + } + + wr32(hw, TXGBE_PX_GPIE, gpie); +} + +static void txgbe_up_complete(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int err; + u32 links_reg; + u16 value; + + txgbe_get_hw_control(adapter); + txgbe_setup_gpie(adapter); + + if (adapter->flags & TXGBE_FLAG_MSIX_ENABLED) + txgbe_configure_msix(adapter); + else + txgbe_configure_msi_and_legacy(adapter); + + /* enable the optics for SFP+ fiber */ + TCALL(hw, mac.ops.enable_tx_laser); + + smp_mb__before_atomic(); + clear_bit(__TXGBE_DOWN, &adapter->state); + txgbe_napi_enable_all(adapter); + + txgbe_configure_lli(adapter); + + if (txgbe_is_sfp(hw)) { + txgbe_sfp_link_config(adapter); + } else if (txgbe_is_backplane(hw)) { + adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; + txgbe_service_event_schedule(adapter); + } else { + err = txgbe_non_sfp_link_config(hw); + if (err) + e_err(probe, "link_config FAILED %d\n", err); + } + + links_reg = rd32(hw, TXGBE_CFG_PORT_ST); + if (links_reg & TXGBE_CFG_PORT_ST_LINK_UP) { + if (links_reg & TXGBE_CFG_PORT_ST_LINK_10G) { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_SPEED_MASK) | + TXGBE_MAC_TX_CFG_SPEED_10G); + } else if (links_reg & (TXGBE_CFG_PORT_ST_LINK_1G | TXGBE_CFG_PORT_ST_LINK_100M)) { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_SPEED_MASK) | + TXGBE_MAC_TX_CFG_SPEED_1G); + } + } + + /* clear any pending interrupts, may auto mask */ + rd32(hw, TXGBE_PX_IC(0)); + rd32(hw, TXGBE_PX_IC(1)); + rd32(hw, TXGBE_PX_MISC_IC); + txgbe_irq_enable(adapter, true, true); + + /* enable external PHY interrupt */ + if ((hw->subsystem_id & 0xF0) == TXGBE_ID_XAUI) { + txgbe_read_mdio(&hw->phy_dev, hw->phy.addr, 0x03, 0x8011, &value); + /* only enable T unit int */ + txgbe_write_mdio(&hw->phy_dev, hw->phy.addr, 31, 0xf043, 0x1); + /* active high */ + txgbe_write_mdio(&hw->phy_dev, hw->phy.addr, 31, 0xf041, 0x0); + /* enable AN complete and link status change int */ + txgbe_write_mdio(&hw->phy_dev, hw->phy.addr, 0x03, 0x8010, 0xc00); + } + + /* enable transmits */ + netif_tx_start_all_queues(adapter->netdev); + + /* bring the link up in the watchdog, this could race with our first + * link up interrupt but shouldn't be a problem */ + adapter->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + + mod_timer(&adapter->service_timer, jiffies); + txgbe_clear_vf_stats_counters(adapter); + + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + wr32m(hw, TXGBE_CFG_PORT_CTL, + TXGBE_CFG_PORT_CTL_PFRSTD, TXGBE_CFG_PORT_CTL_PFRSTD); +} + +void txgbe_reinit_locked(struct txgbe_adapter *adapter) +{ + WARN_ON(in_interrupt()); + /* put off any impending NetWatchDogTimeout */ + netif_trans_update(adapter->netdev); + + while (test_and_set_bit(__TXGBE_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + txgbe_down(adapter); + /* + * If SR-IOV enabled then wait a bit before bringing the adapter + * back up to give the VFs time to respond to the reset. The + * two second wait is based upon the watchdog timer cycle in + * the VF driver. + */ + if (adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) + msleep(2000); + txgbe_up(adapter); + clear_bit(__TXGBE_RESETTING, &adapter->state); +} + +void txgbe_up(struct txgbe_adapter *adapter) +{ + /* hardware has been reset, we need to reload some things */ + txgbe_configure(adapter); + + txgbe_up_complete(adapter); +} + +void txgbe_reset(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int err; + u8 old_addr[ETH_ALEN]; + + if (TXGBE_REMOVED(hw->hw_addr)) + return; + /* lock SFP init bit to prevent race conditions with the watchdog */ + while (test_and_set_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) + usleep_range(1000, 2000); + + /* clear all SFP and link config related flags while holding SFP_INIT */ + adapter->flags2 &= ~(TXGBE_FLAG2_SEARCH_FOR_SFP | + TXGBE_FLAG2_SFP_NEEDS_RESET); + adapter->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG; + + err = TCALL(hw, mac.ops.init_hw); + switch (err) { + case 0: + case TXGBE_ERR_SFP_NOT_PRESENT: + case TXGBE_ERR_SFP_NOT_SUPPORTED: + break; + case TXGBE_ERR_MASTER_REQUESTS_PENDING: + e_dev_err("master disable timed out\n"); + break; + case TXGBE_ERR_EEPROM_VERSION: + /* We are running on a pre-production device, log a warning */ + e_dev_warn("This device is a pre-production adapter/LOM. " + "Please be aware there may be issues associated " + "with your hardware. If you are experiencing " + "problems please contact your hardware " + "representative who provided you with this " + "hardware.\n"); + break; + default: + e_dev_err("Hardware Error: %d\n", err); + } + + clear_bit(__TXGBE_IN_SFP_INIT, &adapter->state); + /* do not flush user set addresses */ + memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len); + txgbe_flush_sw_mac_table(adapter); + txgbe_mac_set_default_filter(adapter, old_addr); + + /* update SAN MAC vmdq pool selection */ + TCALL(hw, mac.ops.set_vmdq_san_mac, VMDQ_P(0)); + + /* Clear saved DMA coalescing values except for watchdog_timer */ + hw->mac.dmac_config.fcoe_en = false; + hw->mac.dmac_config.link_speed = 0; + hw->mac.dmac_config.fcoe_tc = 0; + hw->mac.dmac_config.num_tcs = 0; + + if (test_bit(__TXGBE_PTP_RUNNING, &adapter->state)) + txgbe_ptp_reset(adapter); +} + +/** + * txgbe_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +static void txgbe_clean_rx_ring(struct txgbe_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + unsigned long size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!rx_ring->rx_buffer_info) + return; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + struct txgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; + if (rx_buffer->dma) { + dma_unmap_single(dev, + rx_buffer->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + rx_buffer->dma = 0; + } + + if (rx_buffer->skb) { + struct sk_buff *skb = rx_buffer->skb; + if (TXGBE_CB(skb)->dma_released) { + dma_unmap_single(dev, + TXGBE_CB(skb)->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + TXGBE_CB(skb)->dma = 0; + TXGBE_CB(skb)->dma_released = false; + } + + if (TXGBE_CB(skb)->page_released) + dma_unmap_page(dev, + TXGBE_CB(skb)->dma, + txgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + dev_kfree_skb(skb); + rx_buffer->skb = NULL; + } + + if (!rx_buffer->page) + continue; + + dma_unmap_page(dev, rx_buffer->page_dma, + txgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE); + + __free_pages(rx_buffer->page, + txgbe_rx_pg_order(rx_ring)); + rx_buffer->page = NULL; + } + + size = sizeof(struct txgbe_rx_buffer) * rx_ring->count; + memset(rx_ring->rx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} + +/** + * txgbe_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +static void txgbe_clean_tx_ring(struct txgbe_ring *tx_ring) +{ + struct txgbe_tx_buffer *tx_buffer_info; + unsigned long size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!tx_ring->tx_buffer_info) + return; + + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tx_ring->count; i++) { + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + txgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); + } + + netdev_tx_reset_queue(txring_txq(tx_ring)); + + size = sizeof(struct txgbe_tx_buffer) * tx_ring->count; + memset(tx_ring->tx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); +} + +/** + * txgbe_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void txgbe_clean_all_rx_rings(struct txgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + txgbe_clean_rx_ring(adapter->rx_ring[i]); +} + +/** + * txgbe_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void txgbe_clean_all_tx_rings(struct txgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + txgbe_clean_tx_ring(adapter->tx_ring[i]); +} + +static void txgbe_fdir_filter_exit(struct txgbe_adapter *adapter) +{ + struct hlist_node *node; + struct txgbe_fdir_filter *filter; + + spin_lock(&adapter->fdir_perfect_lock); + + hlist_for_each_entry_safe(filter, node, + &adapter->fdir_filter_list, fdir_node) { + hlist_del(&filter->fdir_node); + kfree(filter); + } + adapter->fdir_filter_count = 0; + + spin_unlock(&adapter->fdir_perfect_lock); +} + +void txgbe_disable_device(struct txgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct txgbe_hw *hw = &adapter->hw; + + u32 i; + + /* signal that we are down to the interrupt handler */ + if (test_and_set_bit(__TXGBE_DOWN, &adapter->state)) + return; /* do nothing if already down */ + + txgbe_disable_pcie_master(hw); + /* disable receives */ + TCALL(hw, mac.ops.disable_rx); + + /* disable all enabled rx queues */ + for (i = 0; i < adapter->num_rx_queues; i++) + /* this call also flushes the previous write */ + txgbe_disable_rx_queue(adapter, adapter->rx_ring[i]); + + netif_tx_stop_all_queues(netdev); + + /* call carrier off first to avoid false dev_watchdog timeouts */ + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + txgbe_irq_disable(adapter); + + txgbe_napi_disable_all(adapter); + + adapter->flags2 &= ~(TXGBE_FLAG2_FDIR_REQUIRES_REINIT | + TXGBE_FLAG2_PF_RESET_REQUESTED | + TXGBE_FLAG2_DEV_RESET_REQUESTED | + TXGBE_FLAG2_GLOBAL_RESET_REQUESTED); + adapter->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE; + + del_timer_sync(&adapter->service_timer); + + if (adapter->num_vfs) { + /* Clear EITR Select mapping */ + wr32(&adapter->hw, TXGBE_PX_ITRSEL, 0); + + /* Mark all the VFs as inactive */ + for (i = 0 ; i < adapter->num_vfs; i++) + adapter->vfinfo[i].clear_to_send = 0; + + /* ping all the active vfs to let them know we are going down */ + + /* Disable all VFTE/VFRE TX/RX */ + } + + if (!(((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) || + ((hw->subsystem_device_id & TXGBE_WOL_MASK) == TXGBE_WOL_SUP))) { + /* disable mac transmiter */ + wr32m(hw, TXGBE_MAC_TX_CFG, + TXGBE_MAC_TX_CFG_TE, 0); + } + /* disable transmits in the hardware now that interrupts are off */ + for (i = 0; i < adapter->num_tx_queues; i++) { + u8 reg_idx = adapter->tx_ring[i]->reg_idx; + wr32(hw, TXGBE_PX_TR_CFG(reg_idx), + TXGBE_PX_TR_CFG_SWFLSH); + } + + /* Disable the Tx DMA engine */ + wr32m(hw, TXGBE_TDM_CTL, TXGBE_TDM_CTL_TE, 0); +} + + +void txgbe_down(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + txgbe_disable_device(adapter); + + txgbe_reset(adapter); + + if (!(((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP))) + /* power down the optics for SFP+ fiber */ + TCALL(&adapter->hw, mac.ops.disable_tx_laser); + + txgbe_clean_all_tx_rings(adapter); + txgbe_clean_all_rx_rings(adapter); +} + +/** + * txgbe_init_shared_code - Initialize the shared code + * @hw: pointer to hardware structure + * + * This will assign function pointers and assign the MAC type and PHY code. + * Does not touch the hardware. This function must be called prior to any + * other function in the shared code. The txgbe_hw structure should be + * memset to 0 prior to calling this function. The following fields in + * hw structure should be filled in prior to calling this function: + * hw_addr, back, device_id, vendor_id, subsystem_device_id, + * subsystem_vendor_id, and revision_id + **/ +s32 txgbe_init_shared_code(struct txgbe_hw *hw) +{ + s32 status; + + DEBUGFUNC("\n"); + + status = txgbe_init_ops(hw); + return status; +} + +/** + * txgbe_sw_init - Initialize general software structures (struct txgbe_adapter) + * @adapter: board private structure to initialize + * + * txgbe_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static const u32 def_rss_key[10] = { + 0xE291D73D, 0x1805EC6C, 0x2A94B30D, + 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE, + 0x6A3E67EA, 0x14364D17, 0x3BED200D +}; + +static int txgbe_sw_init(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + int err; + unsigned int fdir; + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); + if (hw->revision_id == TXGBE_FAILED_READ_CFG_BYTE && + txgbe_check_cfg_remove(hw, pdev)) { + e_err(probe, "read of revision id failed\n"); + err = -ENODEV; + goto out; + } + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &hw->subsystem_id); + if (hw->subsystem_id == TXGBE_FAILED_READ_CFG_WORD) { + e_err(probe, "read of subsystem id failed\n"); + err = -ENODEV; + goto out; + } + + err = txgbe_init_shared_code(hw); + if (err) { + e_err(probe, "init_shared_code failed: %d\n", err); + goto out; + } + adapter->mac_table = kzalloc(sizeof(struct txgbe_mac_addr) * + hw->mac.num_rar_entries, + GFP_ATOMIC); + if (!adapter->mac_table) { + err = TXGBE_ERR_OUT_OF_MEM; + e_err(probe, "mac_table allocation failed: %d\n", err); + goto out; + } + + memcpy(adapter->rss_key, def_rss_key, sizeof(def_rss_key)); + + /* Set common capability flags and settings */ + adapter->flags2 |= TXGBE_FLAG2_RSC_CAPABLE; + fdir = min_t(int, TXGBE_MAX_FDIR_INDICES, num_online_cpus()); + adapter->ring_feature[RING_F_FDIR].limit = fdir; + adapter->max_q_vectors = TXGBE_MAX_MSIX_Q_VECTORS_SAPPHIRE; + + /* Set MAC specific capability flags and exceptions */ + adapter->flags |= TXGBE_FLAGS_SP_INIT; + adapter->flags2 |= TXGBE_FLAG2_TEMP_SENSOR_CAPABLE; + hw->phy.smart_speed = txgbe_smart_speed_off; + adapter->flags2 |= TXGBE_FLAG2_EEE_CAPABLE; + + /* n-tuple support exists, always init our spinlock */ + spin_lock_init(&adapter->fdir_perfect_lock); + + TCALL(hw, mbx.ops.init_params); + + /* default flow control settings */ + hw->fc.requested_mode = txgbe_fc_full; + hw->fc.current_mode = txgbe_fc_full; /* init for ethtool output */ + + adapter->last_lfc_mode = hw->fc.current_mode; + hw->fc.pause_time = TXGBE_DEFAULT_FCPAUSE; + hw->fc.send_xon = true; + hw->fc.disable_fc_autoneg = false; + + /* set default ring sizes */ + adapter->tx_ring_count = TXGBE_DEFAULT_TXD; + adapter->rx_ring_count = TXGBE_DEFAULT_RXD; + + /* set default work limits */ + adapter->tx_work_limit = TXGBE_DEFAULT_TX_WORK; + adapter->rx_work_limit = TXGBE_DEFAULT_RX_WORK; + + adapter->tx_timeout_recovery_level = 0; + + /* PF holds first pool slot */ + adapter->num_vmdqs = 1; + set_bit(0, &adapter->fwd_bitmask); + set_bit(__TXGBE_DOWN, &adapter->state); +out: + return err; +} + +/** + * txgbe_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +int txgbe_setup_tx_resources(struct txgbe_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int orig_node = dev_to_node(dev); + int numa_node = -1; + int size; + + size = sizeof(struct txgbe_tx_buffer) * tx_ring->count; + + if (tx_ring->q_vector) + numa_node = tx_ring->q_vector->numa_node; + + tx_ring->tx_buffer_info = vzalloc_node(size, numa_node); + if (!tx_ring->tx_buffer_info) + tx_ring->tx_buffer_info = vzalloc(size); + if (!tx_ring->tx_buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union txgbe_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + set_dev_node(dev, numa_node); + tx_ring->desc = dma_alloc_coherent(dev, + tx_ring->size, + &tx_ring->dma, + GFP_KERNEL); + set_dev_node(dev, orig_node); + if (!tx_ring->desc) + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) + goto err; + + return 0; + +err: + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); + return -ENOMEM; +} + +/** + * txgbe_setup_all_tx_resources - allocate all queues Tx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int txgbe_setup_all_tx_resources(struct txgbe_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = txgbe_setup_tx_resources(adapter->tx_ring[i]); + if (!err) + continue; + + e_err(probe, "Allocation for Tx Queue %u failed\n", i); + goto err_setup_tx; + } + + return 0; +err_setup_tx: + /* rewind the index freeing the rings as we go */ + while (i--) + txgbe_free_tx_resources(adapter->tx_ring[i]); + return err; +} + +/** + * txgbe_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +int txgbe_setup_rx_resources(struct txgbe_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + int orig_node = dev_to_node(dev); + int numa_node = -1; + int size; + + size = sizeof(struct txgbe_rx_buffer) * rx_ring->count; + + if (rx_ring->q_vector) + numa_node = rx_ring->q_vector->numa_node; + + rx_ring->rx_buffer_info = vzalloc_node(size, numa_node); + if (!rx_ring->rx_buffer_info) + rx_ring->rx_buffer_info = vzalloc(size); + if (!rx_ring->rx_buffer_info) + goto err; + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union txgbe_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + + set_dev_node(dev, numa_node); + rx_ring->desc = dma_alloc_coherent(dev, + rx_ring->size, + &rx_ring->dma, + GFP_KERNEL); + set_dev_node(dev, orig_node); + if (!rx_ring->desc) + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!rx_ring->desc) + goto err; + + return 0; +err: + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); + return -ENOMEM; +} + +/** + * txgbe_setup_all_rx_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int txgbe_setup_all_rx_resources(struct txgbe_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = txgbe_setup_rx_resources(adapter->rx_ring[i]); + if (!err) { + continue; + } + + e_err(probe, "Allocation for Rx Queue %u failed\n", i); + goto err_setup_rx; + } + + return 0; +err_setup_rx: + /* rewind the index freeing the rings as we go */ + while (i--) + txgbe_free_rx_resources(adapter->rx_ring[i]); + return err; +} + +/** + * txgbe_setup_isb_resources - allocate interrupt status resources + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +static int txgbe_setup_isb_resources(struct txgbe_adapter *adapter) +{ + struct device *dev = pci_dev_to_dev(adapter->pdev); + + adapter->isb_mem = dma_alloc_coherent(dev, + sizeof(u32) * TXGBE_ISB_MAX, + &adapter->isb_dma, + GFP_KERNEL); + if (!adapter->isb_mem) + return -ENOMEM; + memset(adapter->isb_mem, 0, sizeof(u32) * TXGBE_ISB_MAX); + return 0; +} + +/** + * txgbe_free_isb_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +static void txgbe_free_isb_resources(struct txgbe_adapter *adapter) +{ + struct device *dev = pci_dev_to_dev(adapter->pdev); + + dma_free_coherent(dev, sizeof(u32) * TXGBE_ISB_MAX, + adapter->isb_mem, adapter->isb_dma); + adapter->isb_mem = NULL; +} + +/** + * txgbe_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +void txgbe_free_tx_resources(struct txgbe_ring *tx_ring) +{ + txgbe_clean_tx_ring(tx_ring); + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + tx_ring->desc = NULL; +} + +/** + * txgbe_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +static void txgbe_free_all_tx_resources(struct txgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + txgbe_free_tx_resources(adapter->tx_ring[i]); +} + +/** + * txgbe_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +void txgbe_free_rx_resources(struct txgbe_ring *rx_ring) +{ + txgbe_clean_rx_ring(rx_ring); + + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * txgbe_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +static void txgbe_free_all_rx_resources(struct txgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + txgbe_free_rx_resources(adapter->rx_ring[i]); +} + +/** + * txgbe_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int txgbe_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + if ((new_mtu < 68) || (new_mtu > 9414)) + return -EINVAL; + + /* + * we cannot allow legacy VFs to enable their receive + * paths when MTU greater than 1500 is configured. So display a + * warning that legacy VFs will be disabled. + */ + if ((adapter->flags & TXGBE_FLAG_SRIOV_ENABLED) && + (new_mtu > ETH_DATA_LEN)) + e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n"); + + e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + + /* must set new MTU before calling down or up */ + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + txgbe_reinit_locked(adapter); + + return 0; +} + +/** + * txgbe_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +int txgbe_open(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + int err; + + /*special for backplane flow*/ + adapter->flags2 &= ~TXGBE_FLAG2_KR_PRO_DOWN; + + /* disallow open during test */ + if (test_bit(__TXGBE_TESTING, &adapter->state)) + return -EBUSY; + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = txgbe_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = txgbe_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + err = txgbe_setup_isb_resources(adapter); + if (err) + goto err_req_isb; + + txgbe_configure(adapter); + + err = txgbe_request_irq(adapter); + if (err) + goto err_req_irq; + + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(netdev, adapter->num_vmdqs > 1 + ? adapter->queues_per_pool + : adapter->num_tx_queues); + if (err) + goto err_set_queues; + + err = netif_set_real_num_rx_queues(netdev, adapter->num_vmdqs > 1 + ? adapter->queues_per_pool + : adapter->num_rx_queues); + if (err) + goto err_set_queues; + + txgbe_ptp_init(adapter); + + txgbe_up_complete(adapter); + + txgbe_clear_vxlan_port(adapter); + udp_tunnel_get_rx_info(netdev); + + return 0; + +err_set_queues: + txgbe_free_irq(adapter); +err_req_irq: + txgbe_free_isb_resources(adapter); +err_req_isb: + txgbe_free_all_rx_resources(adapter); + +err_setup_rx: + txgbe_free_all_tx_resources(adapter); +err_setup_tx: + txgbe_reset(adapter); + + return err; +} + +/** + * txgbe_close_suspend - actions necessary to both suspend and close flows + * @adapter: the private adapter struct + * + * This function should contain the necessary work common to both suspending + * and closing of the device. + */ +static void txgbe_close_suspend(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + + txgbe_ptp_suspend(adapter); + + txgbe_disable_device(adapter); + if (!((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP)) + TCALL(hw, mac.ops.disable_tx_laser); + txgbe_clean_all_tx_rings(adapter); + txgbe_clean_all_rx_rings(adapter); + + txgbe_free_irq(adapter); + + txgbe_free_isb_resources(adapter); + txgbe_free_all_rx_resources(adapter); + txgbe_free_all_tx_resources(adapter); +} + +/** + * txgbe_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +int txgbe_close(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + + if (hw->subsystem_device_id == TXGBE_ID_WX1820_KR_KX_KX4 || + hw->subsystem_device_id == TXGBE_ID_SP1000_KR_KX_KX4) { + txgbe_bp_close_protect(adapter); + } + + txgbe_ptp_stop(adapter); + + txgbe_down(adapter); + txgbe_free_irq(adapter); + + txgbe_free_isb_resources(adapter); + txgbe_free_all_rx_resources(adapter); + txgbe_free_all_tx_resources(adapter); + + txgbe_fdir_filter_exit(adapter); + + txgbe_release_hw_control(adapter); + + return 0; +} + +#ifdef CONFIG_PM +static int txgbe_resume(struct pci_dev *pdev) +{ + struct txgbe_adapter *adapter; + struct net_device *netdev; + u32 err; + + adapter = pci_get_drvdata(pdev); + netdev = adapter->netdev; + adapter->hw.hw_addr = adapter->io_addr; + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + /* + * pci_restore_state clears dev->state_saved so call + * pci_save_state to restore it. + */ + pci_save_state(pdev); + + err = pci_enable_device_mem(pdev); + if (err) { + e_dev_err("Cannot enable PCI device from suspend\n"); + return err; + } + smp_mb__before_atomic(); + clear_bit(__TXGBE_DISABLED, &adapter->state); + pci_set_master(pdev); + + pci_wake_from_d3(pdev, false); + + txgbe_reset(adapter); + + rtnl_lock(); + + err = txgbe_init_interrupt_scheme(adapter); + if (!err && netif_running(netdev)) + err = txgbe_open(netdev); + + rtnl_unlock(); + + if (err) + return err; + + netif_device_attach(netdev); + + return 0; +} +#endif /* CONFIG_PM */ +/* + * __txgbe_shutdown is not used when power management + * is disabled on older kernels (<2.6.12). causes a compile + * warning/error, because it is defined and not used. + */ +static int __txgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct txgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + struct txgbe_hw *hw = &adapter->hw; + u32 wufc = adapter->wol; +#ifdef CONFIG_PM + int retval = 0; +#endif + + netif_device_detach(netdev); + + rtnl_lock(); + if (netif_running(netdev)) + txgbe_close_suspend(adapter); + rtnl_unlock(); + + txgbe_clear_interrupt_scheme(adapter); + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; +#endif + + /* this won't stop link of managebility or WoL is enabled */ + txgbe_stop_mac_link_on_d3(hw); + + if (wufc) { + txgbe_set_rx_mode(netdev); + txgbe_configure_rx(adapter); + /* enable the optics for SFP+ fiber as we can WoL */ + TCALL(hw, mac.ops.enable_tx_laser); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & TXGBE_PSR_WKUP_CTL_MC) { + wr32m(hw, TXGBE_PSR_CTL, + TXGBE_PSR_CTL_MPE, TXGBE_PSR_CTL_MPE); + } + + pci_clear_master(adapter->pdev); + wr32(hw, TXGBE_PSR_WKUP_CTL, wufc); + } else { + wr32(hw, TXGBE_PSR_WKUP_CTL, 0); + } + + pci_wake_from_d3(pdev, !!wufc); + + *enable_wake = !!wufc; + txgbe_release_hw_control(adapter); + + if (!test_and_set_bit(__TXGBE_DISABLED, &adapter->state)) + pci_disable_device(pdev); + + return 0; +} + +#ifdef CONFIG_PM +static int txgbe_suspend(struct pci_dev *pdev, + pm_message_t __always_unused state) +{ + int retval; + bool wake; + + retval = __txgbe_shutdown(pdev, &wake); + if (retval) + return retval; + + if (wake) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } + + return 0; +} +#endif /* CONFIG_PM */ + +static void txgbe_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __txgbe_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +/** + * txgbe_get_stats64 - Get System Network Statistics + * @netdev: network interface device structure + * @stats: storage space for 64bit statistics + * + * Returns 64bit statistics, for use in the ndo_get_stats64 callback. This + * function replaces txgbe_get_stats for kernels which support it. + */ +static void txgbe_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + int i; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct txgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, + start)); + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct txgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, + start)); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } + } + rcu_read_unlock(); + /* following stats updated by txgbe_watchdog_task() */ + stats->multicast = netdev->stats.multicast; + stats->rx_errors = netdev->stats.rx_errors; + stats->rx_length_errors = netdev->stats.rx_length_errors; + stats->rx_crc_errors = netdev->stats.rx_crc_errors; + stats->rx_missed_errors = netdev->stats.rx_missed_errors; +} + +/** + * txgbe_update_stats - Update the board statistics counters. + * @adapter: board private structure + **/ +void txgbe_update_stats(struct txgbe_adapter *adapter) +{ + struct net_device_stats *net_stats = &adapter->netdev->stats; + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_hw_stats *hwstats = &adapter->stats; + u64 total_mpc = 0; + u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff; + u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0; + u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; + u64 bytes = 0, packets = 0, hw_csum_rx_error = 0; + u64 hw_csum_rx_good = 0; + + if (test_bit(__TXGBE_DOWN, &adapter->state) || + test_bit(__TXGBE_RESETTING, &adapter->state)) + return; + + if (adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED) { + u64 rsc_count = 0; + u64 rsc_flush = 0; + for (i = 0; i < adapter->num_rx_queues; i++) { + rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count; + rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush; + } + adapter->rsc_total_count = rsc_count; + adapter->rsc_total_flush = rsc_flush; + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + struct txgbe_ring *rx_ring = adapter->rx_ring[i]; + non_eop_descs += rx_ring->rx_stats.non_eop_descs; + alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; + alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; + hw_csum_rx_error += rx_ring->rx_stats.csum_err; + hw_csum_rx_good += rx_ring->rx_stats.csum_good_cnt; + bytes += rx_ring->stats.bytes; + packets += rx_ring->stats.packets; + + } + adapter->non_eop_descs = non_eop_descs; + adapter->alloc_rx_page_failed = alloc_rx_page_failed; + adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; + adapter->hw_csum_rx_error = hw_csum_rx_error; + adapter->hw_csum_rx_good = hw_csum_rx_good; + net_stats->rx_bytes = bytes; + net_stats->rx_packets = packets; + + bytes = 0; + packets = 0; + /* gather some stats to the adapter struct that are per queue */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct txgbe_ring *tx_ring = adapter->tx_ring[i]; + restart_queue += tx_ring->tx_stats.restart_queue; + tx_busy += tx_ring->tx_stats.tx_busy; + bytes += tx_ring->stats.bytes; + packets += tx_ring->stats.packets; + } + adapter->restart_queue = restart_queue; + adapter->tx_busy = tx_busy; + net_stats->tx_bytes = bytes; + net_stats->tx_packets = packets; + + hwstats->crcerrs += rd32(hw, TXGBE_RX_CRC_ERROR_FRAMES_LOW); + + /* 8 register reads */ + for (i = 0; i < 8; i++) { + /* for packet buffers not used, the register should read 0 */ + mpc = rd32(hw, TXGBE_RDB_MPCNT(i)); + missed_rx += mpc; + hwstats->mpc[i] += mpc; + total_mpc += hwstats->mpc[i]; + hwstats->pxontxc[i] += rd32(hw, TXGBE_RDB_PXONTXC(i)); + hwstats->pxofftxc[i] += + rd32(hw, TXGBE_RDB_PXOFFTXC(i)); + hwstats->pxonrxc[i] += rd32(hw, TXGBE_MAC_PXONRXC(i)); + } + + hwstats->gprc += rd32(hw, TXGBE_PX_GPRC); + + txgbe_update_xoff_received(adapter); + + hwstats->o2bgptc += rd32(hw, TXGBE_TDM_OS2BMC_CNT); + if (txgbe_check_mng_access(&adapter->hw)) { + hwstats->o2bspc += rd32(hw, TXGBE_MNG_OS2BMC_CNT); + hwstats->b2ospc += rd32(hw, TXGBE_MNG_BMC2OS_CNT); + } + hwstats->b2ogprc += rd32(hw, TXGBE_RDM_BMC2OS_CNT); + hwstats->gorc += rd32(hw, TXGBE_PX_GORC_LSB); + hwstats->gorc += (u64)rd32(hw, TXGBE_PX_GORC_MSB) << 32; + + hwstats->gotc += rd32(hw, TXGBE_PX_GOTC_LSB); + hwstats->gotc += (u64)rd32(hw, TXGBE_PX_GOTC_MSB) << 32; + + + adapter->hw_rx_no_dma_resources += + rd32(hw, TXGBE_RDM_DRP_PKT); + hwstats->lxonrxc += rd32(hw, TXGBE_MAC_LXONRXC); + + hwstats->fdirmatch += rd32(hw, TXGBE_RDB_FDIR_MATCH); + hwstats->fdirmiss += rd32(hw, TXGBE_RDB_FDIR_MISS); + + bprc = rd32(hw, TXGBE_RX_BC_FRAMES_GOOD_LOW); + hwstats->bprc += bprc; + hwstats->mprc = 0; + + for (i = 0; i < 128; i++) + hwstats->mprc += rd32(hw, TXGBE_PX_MPRC(i)); + + + hwstats->roc += rd32(hw, TXGBE_RX_OVERSIZE_FRAMES_GOOD); + hwstats->rlec += rd32(hw, TXGBE_RX_LEN_ERROR_FRAMES_LOW); + lxon = rd32(hw, TXGBE_RDB_LXONTXC); + hwstats->lxontxc += lxon; + lxoff = rd32(hw, TXGBE_RDB_LXOFFTXC); + hwstats->lxofftxc += lxoff; + + hwstats->gptc += rd32(hw, TXGBE_PX_GPTC); + hwstats->mptc += rd32(hw, TXGBE_TX_MC_FRAMES_GOOD_LOW); + hwstats->ruc += rd32(hw, TXGBE_RX_UNDERSIZE_FRAMES_GOOD); + hwstats->tpr += rd32(hw, TXGBE_RX_FRAME_CNT_GOOD_BAD_LOW); + hwstats->bptc += rd32(hw, TXGBE_TX_BC_FRAMES_GOOD_LOW); + /* Fill out the OS statistics structure */ + net_stats->multicast = hwstats->mprc; + + /* Rx Errors */ + net_stats->rx_errors = hwstats->crcerrs + + hwstats->rlec; + net_stats->rx_dropped = 0; + net_stats->rx_length_errors = hwstats->rlec; + net_stats->rx_crc_errors = hwstats->crcerrs; + net_stats->rx_missed_errors = total_mpc; + + /* + * VF Stats Collection - skip while resetting because these + * are not clear on read and otherwise you'll sometimes get + * crazy values. + */ + if (!test_bit(__TXGBE_RESETTING, &adapter->state)) { + for (i = 0; i < adapter->num_vfs; i++) { + UPDATE_VF_COUNTER_32bit(TXGBE_VX_GPRC(i), \ + adapter->vfinfo[i].last_vfstats.gprc, \ + adapter->vfinfo[i].vfstats.gprc); + UPDATE_VF_COUNTER_32bit(TXGBE_VX_GPTC(i), \ + adapter->vfinfo[i].last_vfstats.gptc, \ + adapter->vfinfo[i].vfstats.gptc); + UPDATE_VF_COUNTER_36bit(TXGBE_VX_GORC_LSB(i), \ + TXGBE_VX_GORC_MSB(i), \ + adapter->vfinfo[i].last_vfstats.gorc, \ + adapter->vfinfo[i].vfstats.gorc); + UPDATE_VF_COUNTER_36bit(TXGBE_VX_GOTC_LSB(i), \ + TXGBE_VX_GOTC_MSB(i), \ + adapter->vfinfo[i].last_vfstats.gotc, \ + adapter->vfinfo[i].vfstats.gotc); + UPDATE_VF_COUNTER_32bit(TXGBE_VX_MPRC(i), \ + adapter->vfinfo[i].last_vfstats.mprc, \ + adapter->vfinfo[i].vfstats.mprc); + } + } +} + +/** + * txgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table + * @adapter - pointer to the device adapter structure + **/ +static void txgbe_fdir_reinit_subtask(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + int i; + + if (!(adapter->flags2 & TXGBE_FLAG2_FDIR_REQUIRES_REINIT)) + return; + + adapter->flags2 &= ~TXGBE_FLAG2_FDIR_REQUIRES_REINIT; + + /* if interface is down do nothing */ + if (test_bit(__TXGBE_DOWN, &adapter->state)) + return; + + /* do nothing if we are not using signature filters */ + if (!(adapter->flags & TXGBE_FLAG_FDIR_HASH_CAPABLE)) + return; + + adapter->fdir_overflow++; + + if (txgbe_reinit_fdir_tables(hw) == 0) { + for (i = 0; i < adapter->num_tx_queues; i++) + set_bit(__TXGBE_TX_FDIR_INIT_DONE, + &(adapter->tx_ring[i]->state)); + /* re-enable flow director interrupts */ + wr32m(hw, TXGBE_PX_MISC_IEN, + TXGBE_PX_MISC_IEN_FLOW_DIR, TXGBE_PX_MISC_IEN_FLOW_DIR); + } else { + e_err(probe, "failed to finish FDIR re-initialization, " + "ignored adding FDIR ATR filters\n"); + } +} + +/** + * txgbe_check_hang_subtask - check for hung queues and dropped interrupts + * @adapter - pointer to the device adapter structure + * + * This function serves two purposes. First it strobes the interrupt lines + * in order to make certain interrupts are occurring. Secondly it sets the + * bits needed to check for TX hangs. As a result we should immediately + * determine if a hang has occurred. + */ +static void txgbe_check_hang_subtask(struct txgbe_adapter *adapter) +{ + int i; + + /* If we're down or resetting, just bail */ + if (test_bit(__TXGBE_DOWN, &adapter->state) || + test_bit(__TXGBE_REMOVING, &adapter->state) || + test_bit(__TXGBE_RESETTING, &adapter->state)) + return; + + /* Force detection of hung controller */ + if (netif_carrier_ok(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + set_check_for_tx_hang(adapter->tx_ring[i]); + } + +} + +/** + * txgbe_watchdog_update_link - update the link status + * @adapter - pointer to the device adapter structure + * @link_speed - pointer to a u32 to store the link_speed + **/ +static void txgbe_watchdog_update_link(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 link_speed = adapter->link_speed; + bool link_up = adapter->link_up; +// bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; + u32 reg; + u32 i = 1; + + if (!(adapter->flags & TXGBE_FLAG_NEED_LINK_UPDATE)) + return; + + link_speed = TXGBE_LINK_SPEED_10GB_FULL; + link_up = true; + TCALL(hw, mac.ops.check_link, &link_speed, &link_up, false); + + if (link_up || time_after(jiffies, (adapter->link_check_timeout + + TXGBE_TRY_LINK_TIMEOUT))) { + adapter->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE; + } + + for (i = 0; i < 3; i++) { + TCALL(hw, mac.ops.check_link, &link_speed, &link_up, false); + msleep(1); + } + + if (link_up && !((adapter->flags & TXGBE_FLAG_DCB_ENABLED))) { + TCALL(hw, mac.ops.fc_enable); + txgbe_set_rx_drop_en(adapter); + } + + if (link_up) { + adapter->last_rx_ptp_check = jiffies; + + if (test_bit(__TXGBE_PTP_RUNNING, &adapter->state)) + txgbe_ptp_start_cyclecounter(adapter); + + if (link_speed & TXGBE_LINK_SPEED_10GB_FULL) { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_SPEED_MASK) | TXGBE_MAC_TX_CFG_TE | + TXGBE_MAC_TX_CFG_SPEED_10G); + } else if (link_speed & (TXGBE_LINK_SPEED_1GB_FULL | + TXGBE_LINK_SPEED_100_FULL | TXGBE_LINK_SPEED_10_FULL)) { + wr32(hw, TXGBE_MAC_TX_CFG, + (rd32(hw, TXGBE_MAC_TX_CFG) & + ~TXGBE_MAC_TX_CFG_SPEED_MASK) | TXGBE_MAC_TX_CFG_TE | + TXGBE_MAC_TX_CFG_SPEED_1G); + } + + /* Re configure MAC RX */ + reg = rd32(hw, TXGBE_MAC_RX_CFG); + wr32(hw, TXGBE_MAC_RX_CFG, reg); + wr32(hw, TXGBE_MAC_PKT_FLT, TXGBE_MAC_PKT_FLT_PR); + reg = rd32(hw, TXGBE_MAC_WDG_TIMEOUT); + wr32(hw, TXGBE_MAC_WDG_TIMEOUT, reg); + } + + adapter->link_up = link_up; + adapter->link_speed = link_speed; + if (hw->mac.ops.dmac_config && hw->mac.dmac_config.watchdog_timer) { + u8 num_tcs = netdev_get_num_tc(adapter->netdev); + + if (hw->mac.dmac_config.link_speed != link_speed || + hw->mac.dmac_config.num_tcs != num_tcs) { + hw->mac.dmac_config.link_speed = link_speed; + hw->mac.dmac_config.num_tcs = num_tcs; + TCALL(hw, mac.ops.dmac_config); + } + } +} + +static void txgbe_update_default_up(struct txgbe_adapter *adapter) +{ + u8 up = 0; + + adapter->default_up = up; +} + +/** + * txgbe_watchdog_link_is_up - update netif_carrier status and + * print link up message + * @adapter - pointer to the device adapter structure + **/ +static void txgbe_watchdog_link_is_up(struct txgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct txgbe_hw *hw = &adapter->hw; + u32 link_speed = adapter->link_speed; + bool flow_rx, flow_tx; + + /* only continue if link was previously down */ + if (netif_carrier_ok(netdev)) + return; + + adapter->flags2 &= ~TXGBE_FLAG2_SEARCH_FOR_SFP; + + /* flow_rx, flow_tx report link flow control status */ + flow_rx = (rd32(hw, TXGBE_MAC_RX_FLOW_CTRL) & 0x101) == 0x1; + flow_tx = !!(TXGBE_RDB_RFCC_RFCE_802_3X & + rd32(hw, TXGBE_RDB_RFCC)); + + e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", + (link_speed == TXGBE_LINK_SPEED_10GB_FULL ? + "10 Gbps" : + (link_speed == TXGBE_LINK_SPEED_1GB_FULL ? + "1 Gbps" : + (link_speed == TXGBE_LINK_SPEED_100_FULL ? + "100 Mbps" : + (link_speed == TXGBE_LINK_SPEED_10_FULL ? + "10 Mbps" : + "unknown speed")))), + ((flow_rx && flow_tx) ? "RX/TX" : + (flow_rx ? "RX" : + (flow_tx ? "TX" : "None")))); + + netif_carrier_on(netdev); + netif_tx_wake_all_queues(netdev); + + /* update the default user priority for VFs */ + txgbe_update_default_up(adapter); + + /* ping all the active vfs to let them know link has changed */ +} + +/** + * txgbe_watchdog_link_is_down - update netif_carrier status and + * print link down message + * @adapter - pointer to the adapter structure + **/ +static void txgbe_watchdog_link_is_down(struct txgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct txgbe_hw *hw = &adapter->hw; + adapter->link_up = false; + adapter->link_speed = 0; + + /* only continue if link was up previously */ + if (!netif_carrier_ok(netdev)) + return; + + if (hw->subsystem_device_id == TXGBE_ID_WX1820_KR_KX_KX4 || + hw->subsystem_device_id == TXGBE_ID_SP1000_KR_KX_KX4) { + txgbe_bp_down_event(adapter); + } + + if (test_bit(__TXGBE_PTP_RUNNING, &adapter->state)) + txgbe_ptp_start_cyclecounter(adapter); + + e_info(drv, "NIC Link is Down\n"); + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + /* ping all the active vfs to let them know link has changed */ + +} + +static bool txgbe_ring_tx_pending(struct txgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct txgbe_ring *tx_ring = adapter->tx_ring[i]; + + if (tx_ring->next_to_use != tx_ring->next_to_clean) + return true; + } + + return false; +} + +static bool txgbe_vf_tx_pending(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + + u32 i, j; + + if (!adapter->num_vfs) + return false; + + for (i = 0; i < adapter->num_vfs; i++) { + for (j = 0; j < q_per_pool; j++) { + u32 h, t; + + h = rd32(hw, + TXGBE_PX_TR_RPn(q_per_pool, i, j)); + t = rd32(hw, + TXGBE_PX_TR_WPn(q_per_pool, i, j)); + + if (h != t) + return true; + } + } + + return false; +} + +/** + * txgbe_watchdog_flush_tx - flush queues on link down + * @adapter - pointer to the device adapter structure + **/ +static void txgbe_watchdog_flush_tx(struct txgbe_adapter *adapter) +{ + if (!netif_carrier_ok(adapter->netdev)) { + if (txgbe_ring_tx_pending(adapter) || + txgbe_vf_tx_pending(adapter)) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + e_warn(drv, "initiating reset due to lost link with " + "pending Tx work\n"); + adapter->flags2 |= TXGBE_FLAG2_PF_RESET_REQUESTED; + } + } +} + +/** + * txgbe_watchdog_subtask - check and bring link up + * @adapter - pointer to the device adapter structure + **/ +static void txgbe_watchdog_subtask(struct txgbe_adapter *adapter) +{ + u32 value = 0; + struct txgbe_hw *hw = &adapter->hw; + + /* if interface is down do nothing */ + if (test_bit(__TXGBE_DOWN, &adapter->state) || + test_bit(__TXGBE_REMOVING, &adapter->state) || + test_bit(__TXGBE_RESETTING, &adapter->state)) + return; + + if (hw->subsystem_device_id == TXGBE_ID_WX1820_KR_KX_KX4 || + hw->subsystem_device_id == TXGBE_ID_SP1000_KR_KX_KX4) { + txgbe_bp_watchdog_event(adapter); + } + + if (BOND_CHECK_LINK_MODE == 1) { + value = rd32(hw, 0x14404); + value = value & 0x1; + if (value == 1) + adapter->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; + } + if (!(adapter->flags2 & TXGBE_FLAG2_LINK_DOWN)) + txgbe_watchdog_update_link(adapter); + + if (adapter->link_up) + txgbe_watchdog_link_is_up(adapter); + else + txgbe_watchdog_link_is_down(adapter); + + txgbe_update_stats(adapter); + + txgbe_watchdog_flush_tx(adapter); +} + +/** + * txgbe_sfp_detection_subtask - poll for SFP+ cable + * @adapter - the txgbe adapter structure + **/ +static void txgbe_sfp_detection_subtask(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_mac_info *mac = &hw->mac; + s32 err; + + /* not searching for SFP so there is nothing to do here */ + if (!(adapter->flags2 & TXGBE_FLAG2_SEARCH_FOR_SFP) && + !(adapter->flags2 & TXGBE_FLAG2_SFP_NEEDS_RESET)) + return; + + if (adapter->sfp_poll_time && + time_after(adapter->sfp_poll_time, jiffies)) + return; /* If not yet time to poll for SFP */ + + /* someone else is in init, wait until next service event */ + if (test_and_set_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) + return; + + adapter->sfp_poll_time = jiffies + TXGBE_SFP_POLL_JIFFIES - 1; + + err = TCALL(hw, phy.ops.identify_sfp); + if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) + goto sfp_out; + + if (err == TXGBE_ERR_SFP_NOT_PRESENT) { + /* If no cable is present, then we need to reset + * the next time we find a good cable. */ + adapter->flags2 |= TXGBE_FLAG2_SFP_NEEDS_RESET; + } + + /* exit on error */ + if (err) + goto sfp_out; + + /* exit if reset not needed */ + if (!(adapter->flags2 & TXGBE_FLAG2_SFP_NEEDS_RESET)) + goto sfp_out; + + adapter->flags2 &= ~TXGBE_FLAG2_SFP_NEEDS_RESET; + + if (hw->phy.multispeed_fiber) { + /* Set up dual speed SFP+ support */ + mac->ops.setup_link = txgbe_setup_mac_link_multispeed_fiber; + mac->ops.setup_mac_link = txgbe_setup_mac_link; + mac->ops.set_rate_select_speed = + txgbe_set_hard_rate_select_speed; + } else { + mac->ops.setup_link = txgbe_setup_mac_link; + mac->ops.set_rate_select_speed = + txgbe_set_hard_rate_select_speed; + hw->phy.autoneg_advertised = 0; + } + + adapter->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; + e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type); + +sfp_out: + clear_bit(__TXGBE_IN_SFP_INIT, &adapter->state); + + if ((err == TXGBE_ERR_SFP_NOT_SUPPORTED) && + adapter->netdev_registered) { + e_dev_err("failed to initialize because an unsupported " + "SFP+ module type was detected.\n"); + } +} + +/** + * txgbe_sfp_link_config_subtask - set up link SFP after module install + * @adapter - the txgbe adapter structure + **/ +static void txgbe_sfp_link_config_subtask(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 speed; + bool autoneg = false; + u16 value; + u8 device_type = hw->subsystem_id & 0xF0; + + if (!(adapter->flags & TXGBE_FLAG_NEED_LINK_CONFIG)) + return; + + /* someone else is in init, wait until next service event */ + if (test_and_set_bit(__TXGBE_IN_SFP_INIT, &adapter->state)) + return; + + adapter->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG; + + if (device_type == TXGBE_ID_XAUI) { + /* clear ext phy int status */ + txgbe_read_mdio(&hw->phy_dev, hw->phy.addr, 0x03, 0x8011, &value); + if (value & 0x400) + adapter->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; + if (!(value & 0x800)) { + clear_bit(__TXGBE_IN_SFP_INIT, &adapter->state); + return; + } + } + + if (device_type == TXGBE_ID_MAC_XAUI || + (txgbe_get_media_type(hw) == txgbe_media_type_copper && + device_type == TXGBE_ID_SFI_XAUI)) { + speed = TXGBE_LINK_SPEED_10GB_FULL; + } else if (device_type == TXGBE_ID_MAC_SGMII) { + speed = TXGBE_LINK_SPEED_1GB_FULL; + } else { + speed = hw->phy.autoneg_advertised; + if ((!speed) && (hw->mac.ops.get_link_capabilities)) { + TCALL(hw, mac.ops.get_link_capabilities, &speed, &autoneg); + /* setup the highest link when no autoneg */ + if (!autoneg) { + if (speed & TXGBE_LINK_SPEED_10GB_FULL) + speed = TXGBE_LINK_SPEED_10GB_FULL; + } + } + } + + TCALL(hw, mac.ops.setup_link, speed, txgbe_is_sfp(hw)); + + adapter->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + clear_bit(__TXGBE_IN_SFP_INIT, &adapter->state); +} + +static void txgbe_sfp_reset_eth_phy_subtask(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 speed; + bool linkup = true; + u32 i = 0; + + if (!(adapter->flags2 & TXGBE_FLAG_NEED_ETH_PHY_RESET)) + return; + + adapter->flags2 &= ~TXGBE_FLAG_NEED_ETH_PHY_RESET; + + TCALL(hw, mac.ops.check_link, &speed, &linkup, false); + if (!linkup) { + txgbe_wr32_epcs(hw, TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1, + 0xA000); + /* wait phy initialization done */ + for (i = 0; i < TXGBE_PHY_INIT_DONE_POLLING_TIME; i++) { + if ((txgbe_rd32_epcs(hw, + TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1) & + TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1_VR_RST) == 0) + break; + msleep(100); + } + } +} + +/** + * txgbe_service_timer - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void txgbe_service_timer(struct timer_list *t) +{ + struct txgbe_adapter *adapter = from_timer(adapter, t, service_timer); + unsigned long next_event_offset; + struct txgbe_hw *hw = &adapter->hw; + + /* poll faster when waiting for link */ + if (adapter->flags & TXGBE_FLAG_NEED_LINK_UPDATE) { + if ((hw->subsystem_device_id & 0xF0) == TXGBE_ID_KR_KX_KX4) + next_event_offset = HZ; + else if (BOND_CHECK_LINK_MODE == 1) + next_event_offset = HZ / 100; + else + next_event_offset = HZ / 10; + } else + next_event_offset = HZ * 2; + + if ((rd32(&adapter->hw, TXGBE_MIS_PF_SM) == 1) && (hw->bus.lan_id)) { + adapter->flags2 |= TXGBE_FLAG2_PCIE_NEED_RECOVER; + } + + /* Reset the timer */ + mod_timer(&adapter->service_timer, next_event_offset + jiffies); + + txgbe_service_event_schedule(adapter); +} + +static void txgbe_reset_subtask(struct txgbe_adapter *adapter) +{ + u32 reset_flag = 0; + u32 value = 0; + + if (!(adapter->flags2 & (TXGBE_FLAG2_PF_RESET_REQUESTED | + TXGBE_FLAG2_DEV_RESET_REQUESTED | + TXGBE_FLAG2_GLOBAL_RESET_REQUESTED | + TXGBE_FLAG2_RESET_INTR_RECEIVED))) + return; + + /* If we're already down, just bail */ + if (test_bit(__TXGBE_DOWN, &adapter->state) || + test_bit(__TXGBE_REMOVING, &adapter->state)) + return; + + netdev_err(adapter->netdev, "Reset adapter\n"); + adapter->tx_timeout_count++; + + rtnl_lock(); + if (adapter->flags2 & TXGBE_FLAG2_GLOBAL_RESET_REQUESTED) { + reset_flag |= TXGBE_FLAG2_GLOBAL_RESET_REQUESTED; + adapter->flags2 &= ~TXGBE_FLAG2_GLOBAL_RESET_REQUESTED; + } + if (adapter->flags2 & TXGBE_FLAG2_DEV_RESET_REQUESTED) { + reset_flag |= TXGBE_FLAG2_DEV_RESET_REQUESTED; + adapter->flags2 &= ~TXGBE_FLAG2_DEV_RESET_REQUESTED; + } + if (adapter->flags2 & TXGBE_FLAG2_PF_RESET_REQUESTED) { + reset_flag |= TXGBE_FLAG2_PF_RESET_REQUESTED; + adapter->flags2 &= ~TXGBE_FLAG2_PF_RESET_REQUESTED; + } + + if (adapter->flags2 & TXGBE_FLAG2_RESET_INTR_RECEIVED) { + /* If there's a recovery already waiting, it takes + * precedence before starting a new reset sequence. + */ + adapter->flags2 &= ~TXGBE_FLAG2_RESET_INTR_RECEIVED; + value = rd32m(&adapter->hw, TXGBE_MIS_RST_ST, + TXGBE_MIS_RST_ST_DEV_RST_TYPE_MASK) >> + TXGBE_MIS_RST_ST_DEV_RST_TYPE_SHIFT; + if (value == TXGBE_MIS_RST_ST_DEV_RST_TYPE_SW_RST) { + adapter->hw.reset_type = TXGBE_SW_RESET; + /* errata 7 */ + if (txgbe_mng_present(&adapter->hw) && + adapter->hw.revision_id == TXGBE_SP_MPW) + adapter->flags2 |= + TXGBE_FLAG2_MNG_REG_ACCESS_DISABLED; + } else if (value == TXGBE_MIS_RST_ST_DEV_RST_TYPE_GLOBAL_RST) + adapter->hw.reset_type = TXGBE_GLOBAL_RESET; + adapter->hw.force_full_reset = true; + txgbe_reinit_locked(adapter); + adapter->hw.force_full_reset = false; + goto unlock; + } + + if (reset_flag & TXGBE_FLAG2_DEV_RESET_REQUESTED) { + /* Request a Device Reset + * + * This will start the chip's countdown to the actual full + * chip reset event, and a warning interrupt to be sent + * to all PFs, including the requestor. Our handler + * for the warning interrupt will deal with the shutdown + * and recovery of the switch setup. + */ + /*debug to up*/ + /*txgbe_dump(adapter);*/ + if (txgbe_mng_present(&adapter->hw)) { + txgbe_reset_hostif(&adapter->hw); + } else + wr32m(&adapter->hw, TXGBE_MIS_RST, + TXGBE_MIS_RST_SW_RST, TXGBE_MIS_RST_SW_RST); + + } else if (reset_flag & TXGBE_FLAG2_PF_RESET_REQUESTED) { + /*debug to up*/ + txgbe_reinit_locked(adapter); + } else if (reset_flag & TXGBE_FLAG2_GLOBAL_RESET_REQUESTED) { + /* Request a Global Reset + * + * This will start the chip's countdown to the actual full + * chip reset event, and a warning interrupt to be sent + * to all PFs, including the requestor. Our handler + * for the warning interrupt will deal with the shutdown + * and recovery of the switch setup. + */ + /*debug to up*/ + pci_save_state(adapter->pdev); + if (txgbe_mng_present(&adapter->hw)) { + txgbe_reset_hostif(&adapter->hw); + } else + wr32m(&adapter->hw, TXGBE_MIS_RST, + TXGBE_MIS_RST_GLOBAL_RST, + TXGBE_MIS_RST_GLOBAL_RST); + + } + +unlock: + rtnl_unlock(); +} + +static void txgbe_check_pcie_subtask(struct txgbe_adapter *adapter) +{ + if (!(adapter->flags2 & TXGBE_FLAG2_PCIE_NEED_RECOVER)) + return; + + e_info(probe, "do recovery\n"); + wr32m(&adapter->hw, TXGBE_MIS_PF_SM, + TXGBE_MIS_PF_SM_SM, 0); + adapter->flags2 &= ~TXGBE_FLAG2_PCIE_NEED_RECOVER; +} + +/** + * txgbe_service_task - manages and runs subtasks + * @work: pointer to work_struct containing our data + **/ +static void txgbe_service_task(struct work_struct *work) +{ + struct txgbe_adapter *adapter = container_of(work, + struct txgbe_adapter, + service_task); + if (TXGBE_REMOVED(adapter->hw.hw_addr)) { + if (!test_bit(__TXGBE_DOWN, &adapter->state)) { + rtnl_lock(); + txgbe_down(adapter); + rtnl_unlock(); + } + txgbe_service_event_complete(adapter); + return; + } + + if (adapter->flags2 & TXGBE_FLAG2_VXLAN_REREG_NEEDED) { + adapter->flags2 &= ~TXGBE_FLAG2_VXLAN_REREG_NEEDED; + udp_tunnel_get_rx_info(adapter->netdev); + } + + txgbe_check_pcie_subtask(adapter); + txgbe_reset_subtask(adapter); + txgbe_sfp_detection_subtask(adapter); + txgbe_sfp_link_config_subtask(adapter); + txgbe_sfp_reset_eth_phy_subtask(adapter); + txgbe_check_overtemp_subtask(adapter); + txgbe_watchdog_subtask(adapter); + txgbe_fdir_reinit_subtask(adapter); + txgbe_check_hang_subtask(adapter); + if (test_bit(__TXGBE_PTP_RUNNING, &adapter->state)) { + txgbe_ptp_overflow_check(adapter); + if (unlikely(adapter->flags & + TXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER)) + txgbe_ptp_rx_hang(adapter); + } + + txgbe_service_event_complete(adapter); +} + +union network_header { + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + void *raw; +}; + +static txgbe_dptype encode_tx_desc_ptype(const struct txgbe_tx_buffer *first) +{ + struct sk_buff *skb = first->skb; + u8 tun_prot = 0; + u8 l4_prot = 0; + u8 ptype = 0; + unsigned char *exthdr; + unsigned char *l4_hdr; + __be16 frag_off; + + if (skb->encapsulation) { + union network_header hdr; + + switch (first->protocol) { + case __constant_htons(ETH_P_IP): + tun_prot = ip_hdr(skb)->protocol; + if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) + goto encap_frag; + ptype = TXGBE_PTYPE_TUN_IPV4; + break; + case __constant_htons(ETH_P_IPV6): + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb) + + sizeof(struct ipv6hdr); + tun_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &tun_prot, &frag_off); + if (tun_prot == NEXTHDR_FRAGMENT) + goto encap_frag; + ptype = TXGBE_PTYPE_TUN_IPV6; + break; + default: + goto exit; + } + + if (tun_prot == IPPROTO_IPIP || + tun_prot == IPPROTO_IPV6) { + hdr.raw = (void *)inner_ip_hdr(skb); + ptype |= TXGBE_PTYPE_PKT_IPIP; + } else if (tun_prot == IPPROTO_UDP) { + hdr.raw = (void *)inner_ip_hdr(skb); + /* fixme: VXLAN-GPE neither ETHER nor IP */ + + if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || + skb->inner_protocol != htons(ETH_P_TEB)) { + ptype |= TXGBE_PTYPE_PKT_IG; + } else { + if (((struct ethhdr *) + skb_inner_mac_header(skb))->h_proto + == htons(ETH_P_8021Q)) { + ptype |= TXGBE_PTYPE_PKT_IGMV; + } else { + ptype |= TXGBE_PTYPE_PKT_IGM; + } + } + + } else if (tun_prot == IPPROTO_GRE) { + hdr.raw = (void *)inner_ip_hdr(skb); + if (skb->inner_protocol == htons(ETH_P_IP) || + skb->inner_protocol == htons(ETH_P_IPV6)) { + ptype |= TXGBE_PTYPE_PKT_IG; + } else { + if (((struct ethhdr *) + skb_inner_mac_header(skb))->h_proto + == htons(ETH_P_8021Q)) { + ptype |= TXGBE_PTYPE_PKT_IGMV; + } else { + ptype |= TXGBE_PTYPE_PKT_IGM; + } + } + } else { + goto exit; + } + + switch (hdr.ipv4->version) { + case IPVERSION: + l4_prot = hdr.ipv4->protocol; + if (hdr.ipv4->frag_off & htons(IP_MF | IP_OFFSET)) { + ptype |= TXGBE_PTYPE_TYP_IPFRAG; + goto exit; + } + break; + case 6: + l4_hdr = skb_inner_transport_header(skb); + exthdr = skb_inner_network_header(skb) + + sizeof(struct ipv6hdr); + l4_prot = inner_ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_prot, &frag_off); + ptype |= TXGBE_PTYPE_PKT_IPV6; + if (l4_prot == NEXTHDR_FRAGMENT) { + ptype |= TXGBE_PTYPE_TYP_IPFRAG; + goto exit; + } + break; + default: + goto exit; + } + } else { +encap_frag: + + switch (first->protocol) { + case __constant_htons(ETH_P_IP): + l4_prot = ip_hdr(skb)->protocol; + ptype = TXGBE_PTYPE_PKT_IP; + if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { + ptype |= TXGBE_PTYPE_TYP_IPFRAG; + goto exit; + } + break; +#ifdef NETIF_F_IPV6_CSUM + case __constant_htons(ETH_P_IPV6): + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb) + + sizeof(struct ipv6hdr); + l4_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_prot, &frag_off); + + ptype = TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_PKT_IPV6; + if (l4_prot == NEXTHDR_FRAGMENT) { + ptype |= TXGBE_PTYPE_TYP_IPFRAG; + goto exit; + } + break; +#endif /* NETIF_F_IPV6_CSUM */ + case __constant_htons(ETH_P_1588): + ptype = TXGBE_PTYPE_L2_TS; + goto exit; + case __constant_htons(ETH_P_FIP): + ptype = TXGBE_PTYPE_L2_FIP; + goto exit; + case __constant_htons(TXGBE_ETH_P_LLDP): + ptype = TXGBE_PTYPE_L2_LLDP; + goto exit; + case __constant_htons(TXGBE_ETH_P_CNM): + ptype = TXGBE_PTYPE_L2_CNM; + goto exit; + case __constant_htons(ETH_P_PAE): + ptype = TXGBE_PTYPE_L2_EAPOL; + goto exit; + case __constant_htons(ETH_P_ARP): + ptype = TXGBE_PTYPE_L2_ARP; + goto exit; + default: + ptype = TXGBE_PTYPE_L2_MAC; + goto exit; + } + + } + + switch (l4_prot) { + case IPPROTO_TCP: + ptype |= TXGBE_PTYPE_TYP_TCP; + break; + case IPPROTO_UDP: + ptype |= TXGBE_PTYPE_TYP_UDP; + break; + case IPPROTO_SCTP: + ptype |= TXGBE_PTYPE_TYP_SCTP; + break; + default: + ptype |= TXGBE_PTYPE_TYP_IP; + break; + } + +exit: + return txgbe_decode_ptype(ptype); +} + +static int txgbe_tso(struct txgbe_ring *tx_ring, + struct txgbe_tx_buffer *first, + u8 *hdr_len, txgbe_dptype dptype) +{ + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens, type_tucmd; + u32 mss_l4len_idx, l4len; + struct tcphdr *tcph; + struct iphdr *iph; + u32 tunhdr_eiplen_tunlen = 0; + u8 tun_prot = 0; + unsigned char *exthdr; + unsigned char *l4_hdr; + __be16 frag_off; + bool enc = skb->encapsulation; + + struct ipv6hdr *ipv6h; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + if (skb_header_cloned(skb)) { + int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (err) + return err; + } + + iph = enc ? inner_ip_hdr(skb) : ip_hdr(skb); + + if (iph->version == 4) { + + tcph = enc ? inner_tcp_hdr(skb) : tcp_hdr(skb); + + iph->tot_len = 0; + iph->check = 0; + tcph->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + first->tx_flags |= TXGBE_TX_FLAGS_TSO | + TXGBE_TX_FLAGS_CSUM | + TXGBE_TX_FLAGS_IPV4 | + TXGBE_TX_FLAGS_CC; + + } else if (iph->version == 6 && skb_is_gso_v6(skb)) { + + ipv6h = enc ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); + tcph = enc ? inner_tcp_hdr(skb) : tcp_hdr(skb); + + ipv6h->payload_len = 0; + tcph->check = + ~csum_ipv6_magic(&ipv6h->saddr, + &ipv6h->daddr, + 0, IPPROTO_TCP, 0); + first->tx_flags |= TXGBE_TX_FLAGS_TSO | + TXGBE_TX_FLAGS_CSUM | + TXGBE_TX_FLAGS_CC; + } + + /* compute header lengths */ + + l4len = enc ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb); + *hdr_len = enc ? (skb_inner_transport_header(skb) - skb->data) + : skb_transport_offset(skb); + *hdr_len += l4len; + + /* update gso size and bytecount with header size */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + + /* mss_l4len_id: use 0 as index for TSO */ + mss_l4len_idx = l4len << TXGBE_TXD_L4LEN_SHIFT; + mss_l4len_idx |= skb_shinfo(skb)->gso_size << TXGBE_TXD_MSS_SHIFT; + + /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ + + if (enc) { + switch (first->protocol) { + case __constant_htons(ETH_P_IP): + tun_prot = ip_hdr(skb)->protocol; + first->tx_flags |= TXGBE_TX_FLAGS_OUTER_IPV4; + break; + case __constant_htons(ETH_P_IPV6): + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb) + + sizeof(struct ipv6hdr); + tun_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &tun_prot, &frag_off); + break; + default: + break; + } + switch (tun_prot) { + case IPPROTO_UDP: + tunhdr_eiplen_tunlen = TXGBE_TXD_TUNNEL_UDP; + tunhdr_eiplen_tunlen |= + ((skb_network_header_len(skb) >> 2) << + TXGBE_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + TXGBE_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_GRE: + tunhdr_eiplen_tunlen = TXGBE_TXD_TUNNEL_GRE; + tunhdr_eiplen_tunlen |= + ((skb_network_header_len(skb) >> 2) << + TXGBE_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + TXGBE_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_IPIP: + case IPPROTO_IPV6: + tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb)- + (char *)ip_hdr(skb)) >> 2) << + TXGBE_TXD_OUTER_IPLEN_SHIFT; + break; + default: + break; + } + + vlan_macip_lens = skb_inner_network_header_len(skb) >> 1; + } else + vlan_macip_lens = skb_network_header_len(skb) >> 1; + + vlan_macip_lens |= skb_network_offset(skb) << TXGBE_TXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & TXGBE_TX_FLAGS_VLAN_MASK; + + type_tucmd = dptype.ptype << 24; + txgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen, + type_tucmd, mss_l4len_idx); + + return 1; +} + +static void txgbe_tx_csum(struct txgbe_ring *tx_ring, + struct txgbe_tx_buffer *first, txgbe_dptype dptype) +{ + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens = 0; + u32 mss_l4len_idx = 0; + u32 tunhdr_eiplen_tunlen = 0; + u8 tun_prot = 0; + u32 type_tucmd; + + if (skb->ip_summed != CHECKSUM_PARTIAL) { +csum_failed: + if (!(first->tx_flags & TXGBE_TX_FLAGS_HW_VLAN) && + !(first->tx_flags & TXGBE_TX_FLAGS_CC)) + return; + vlan_macip_lens = skb_network_offset(skb) << + TXGBE_TXD_MACLEN_SHIFT; + } else { + u8 l4_prot = 0; + unsigned char *exthdr; + unsigned char *l4_hdr; + __be16 frag_off; + + union { + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + u8 *raw; + } network_hdr; + union { + struct tcphdr *tcphdr; + u8 *raw; + } transport_hdr; + + if (skb->encapsulation) { + network_hdr.raw = skb_inner_network_header(skb); + transport_hdr.raw = skb_inner_transport_header(skb); + vlan_macip_lens = skb_network_offset(skb) << + TXGBE_TXD_MACLEN_SHIFT; + switch (first->protocol) { + case __constant_htons(ETH_P_IP): + tun_prot = ip_hdr(skb)->protocol; + break; + case __constant_htons(ETH_P_IPV6): + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb) + + sizeof(struct ipv6hdr); + tun_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, + exthdr - skb->data, + &tun_prot, &frag_off); + break; + default: + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, + "partial checksum but version=%d\n", + network_hdr.ipv4->version); + } + return; + } + switch (tun_prot) { + case IPPROTO_UDP: + tunhdr_eiplen_tunlen = TXGBE_TXD_TUNNEL_UDP; + tunhdr_eiplen_tunlen |= + ((skb_network_header_len(skb) >> 2) << + TXGBE_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + TXGBE_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_GRE: + tunhdr_eiplen_tunlen = TXGBE_TXD_TUNNEL_GRE; + tunhdr_eiplen_tunlen |= + ((skb_network_header_len(skb) >> 2) << + TXGBE_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + TXGBE_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_IPIP: + case IPPROTO_IPV6: + tunhdr_eiplen_tunlen = + (((char *)inner_ip_hdr(skb)- + (char *)ip_hdr(skb)) >> 2) << + TXGBE_TXD_OUTER_IPLEN_SHIFT; + break; + default: + break; + } + + } else { + network_hdr.raw = skb_network_header(skb); + transport_hdr.raw = skb_transport_header(skb); + vlan_macip_lens = skb_network_offset(skb) << + TXGBE_TXD_MACLEN_SHIFT; + } + + switch (network_hdr.ipv4->version) { + case IPVERSION: + vlan_macip_lens |= + (transport_hdr.raw - network_hdr.raw) >> 1; + l4_prot = network_hdr.ipv4->protocol; + break; + case 6: + vlan_macip_lens |= + (transport_hdr.raw - network_hdr.raw) >> 1; + exthdr = network_hdr.raw + sizeof(struct ipv6hdr); + l4_prot = network_hdr.ipv6->nexthdr; + if (transport_hdr.raw != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_prot, &frag_off); + break; + default: + break; + } + + switch (l4_prot) { + case IPPROTO_TCP: + mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) << + TXGBE_TXD_L4LEN_SHIFT; + break; + case IPPROTO_SCTP: + mss_l4len_idx = sizeof(struct sctphdr) << + TXGBE_TXD_L4LEN_SHIFT; + break; + case IPPROTO_UDP: + mss_l4len_idx = sizeof(struct udphdr) << + TXGBE_TXD_L4LEN_SHIFT; + break; + default: + skb_checksum_help(skb); + goto csum_failed; + } + + /* update TX checksum flag */ + first->tx_flags |= TXGBE_TX_FLAGS_CSUM; + } + first->tx_flags |= TXGBE_TX_FLAGS_CC; + /* vlan_macip_lens: MACLEN, VLAN tag */ + vlan_macip_lens |= first->tx_flags & TXGBE_TX_FLAGS_VLAN_MASK; + + type_tucmd = dptype.ptype << 24; + txgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen, + type_tucmd, mss_l4len_idx); +} + +static u32 txgbe_tx_cmd_type(u32 tx_flags) +{ + /* set type for advanced descriptor with frame checksum insertion */ + u32 cmd_type = TXGBE_TXD_DTYP_DATA | + TXGBE_TXD_IFCS; + + /* set HW vlan bit if vlan is present */ + cmd_type |= TXGBE_SET_FLAG(tx_flags, TXGBE_TX_FLAGS_HW_VLAN, + TXGBE_TXD_VLE); + + /* set segmentation enable bits for TSO/FSO */ + cmd_type |= TXGBE_SET_FLAG(tx_flags, TXGBE_TX_FLAGS_TSO, + TXGBE_TXD_TSE); + + /* set timestamp bit if present */ + cmd_type |= TXGBE_SET_FLAG(tx_flags, TXGBE_TX_FLAGS_TSTAMP, + TXGBE_TXD_MAC_TSTAMP); + + cmd_type |= TXGBE_SET_FLAG(tx_flags, TXGBE_TX_FLAGS_LINKSEC, + TXGBE_TXD_LINKSEC); + + return cmd_type; +} + +static void txgbe_tx_olinfo_status(union txgbe_tx_desc *tx_desc, + u32 tx_flags, unsigned int paylen) +{ + u32 olinfo_status = paylen << TXGBE_TXD_PAYLEN_SHIFT; + + /* enable L4 checksum for TSO and TX checksum offload */ + olinfo_status |= TXGBE_SET_FLAG(tx_flags, + TXGBE_TX_FLAGS_CSUM, + TXGBE_TXD_L4CS); + + /* enble IPv4 checksum for TSO */ + olinfo_status |= TXGBE_SET_FLAG(tx_flags, + TXGBE_TX_FLAGS_IPV4, + TXGBE_TXD_IIPCS); + /* enable outer IPv4 checksum for TSO */ + olinfo_status |= TXGBE_SET_FLAG(tx_flags, + TXGBE_TX_FLAGS_OUTER_IPV4, + TXGBE_TXD_EIPCS); + /* + * Check Context must be set if Tx switch is enabled, which it + * always is for case where virtual functions are running + */ + olinfo_status |= TXGBE_SET_FLAG(tx_flags, + TXGBE_TX_FLAGS_CC, + TXGBE_TXD_CC); + + olinfo_status |= TXGBE_SET_FLAG(tx_flags, + TXGBE_TX_FLAGS_IPSEC, + TXGBE_TXD_IPSEC); + + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); +} + +static int __txgbe_maybe_stop_tx(struct txgbe_ring *tx_ring, u16 size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (likely(txgbe_desc_unused(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + return 0; +} + +static inline int txgbe_maybe_stop_tx(struct txgbe_ring *tx_ring, u16 size) +{ + if (likely(txgbe_desc_unused(tx_ring) >= size)) + return 0; + + return __txgbe_maybe_stop_tx(tx_ring, size); +} + +#define TXGBE_TXD_CMD (TXGBE_TXD_EOP | \ + TXGBE_TXD_RS) + +static int txgbe_tx_map(struct txgbe_ring *tx_ring, + struct txgbe_tx_buffer *first, + const u8 hdr_len) +{ + struct sk_buff *skb = first->skb; + struct txgbe_tx_buffer *tx_buffer; + union txgbe_tx_desc *tx_desc; + skb_frag_t *frag; + dma_addr_t dma; + unsigned int data_len, size; + u32 tx_flags = first->tx_flags; + u32 cmd_type = txgbe_tx_cmd_type(tx_flags); + u16 i = tx_ring->next_to_use; + + tx_desc = TXGBE_TX_DESC(tx_ring, i); + + txgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len); + + size = skb_headlen(skb); + data_len = skb->data_len; + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_buffer = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + while (unlikely(size > TXGBE_MAX_DATA_PER_TXD)) { + tx_desc->read.cmd_type_len = + cpu_to_le32(cmd_type ^ TXGBE_MAX_DATA_PER_TXD); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = TXGBE_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + dma += TXGBE_MAX_DATA_PER_TXD; + size -= TXGBE_MAX_DATA_PER_TXD; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + } + + if (likely(!data_len)) + break; + + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = TXGBE_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + size = skb_frag_size(frag); + + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, + DMA_TO_DEVICE); + + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + /* write last descriptor with RS and EOP bits */ + cmd_type |= size | TXGBE_TXD_CMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + + /* set the timestamp */ + first->time_stamp = jiffies; + + /* + * Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + txgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); + + skb_tx_timestamp(skb); + + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { + writel(i, tx_ring->tail); + /* The following mmiowb() is required on certain + * architechtures (IA64/Altix in particular) in order to + * synchronize the I/O calls with respect to a spin lock. This + * is because the wmb() on those architectures does not + * guarantee anything for posted I/O writes. + * + * Note that the associated spin_unlock() is not within the + * driver code, but in the networking core stack. + */ + mmiowb(); + } + + return 0; +dma_error: + dev_err(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_buffer_info map */ + for (;;) { + tx_buffer = &tx_ring->tx_buffer_info[i]; + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + if (tx_buffer == first) + break; + if (i == 0) + i += tx_ring->count; + i--; + } + + dev_kfree_skb_any(first->skb); + first->skb = NULL; + + tx_ring->next_to_use = i; + + return -1; +} + +static void txgbe_atr(struct txgbe_ring *ring, + struct txgbe_tx_buffer *first, + txgbe_dptype dptype) +{ + struct txgbe_q_vector *q_vector = ring->q_vector; + union txgbe_atr_hash_dword input = { .dword = 0 }; + union txgbe_atr_hash_dword common = { .dword = 0 }; + union network_header hdr; + struct tcphdr *th; + + /* if ring doesn't have a interrupt vector, cannot perform ATR */ + if (!q_vector) + return; + + /* do nothing if sampling is disabled */ + if (!ring->atr_sample_rate) + return; + + ring->atr_count++; + + if (dptype.etype) { + if (TXGBE_PTYPE_TYP_TCP != TXGBE_PTYPE_TYPL4(dptype.ptype)) + return; + hdr.raw = (void *)skb_inner_network_header(first->skb); + th = inner_tcp_hdr(first->skb); + } else + + { + if (TXGBE_PTYPE_PKT_IP != TXGBE_PTYPE_PKT(dptype.ptype) || + TXGBE_PTYPE_TYP_TCP != TXGBE_PTYPE_TYPL4(dptype.ptype)) + return; + hdr.raw = (void *)skb_network_header(first->skb); + th = tcp_hdr(first->skb); + } + + /* skip this packet since it is invalid or the socket is closing */ + if (!th || th->fin) + return; + + /* sample on all syn packets or once every atr sample count */ + if (!th->syn && (ring->atr_count < ring->atr_sample_rate)) + return; + + /* reset sample count */ + ring->atr_count = 0; + + /* + * src and dst are inverted, think how the receiver sees them + * + * The input is broken into two sections, a non-compressed section + * containing vm_pool, vlan_id, and flow_type. The rest of the data + * is XORed together and stored in the compressed dword. + */ + input.formatted.vlan_id = htons((u16)dptype.ptype); + + /* + * since src port and flex bytes occupy the same word XOR them together + * and write the value to source port portion of compressed dword + */ + if (first->tx_flags & TXGBE_TX_FLAGS_SW_VLAN) + common.port.src ^= th->dest ^ first->skb->protocol; + else if (first->tx_flags & TXGBE_TX_FLAGS_HW_VLAN) + common.port.src ^= th->dest ^ first->skb->vlan_proto; + else + common.port.src ^= th->dest ^ first->protocol; + common.port.dst ^= th->source; + + if (TXGBE_PTYPE_PKT_IPV6 & TXGBE_PTYPE_PKT(dptype.ptype)) { + input.formatted.flow_type = TXGBE_ATR_FLOW_TYPE_TCPV6; + common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^ + hdr.ipv6->saddr.s6_addr32[1] ^ + hdr.ipv6->saddr.s6_addr32[2] ^ + hdr.ipv6->saddr.s6_addr32[3] ^ + hdr.ipv6->daddr.s6_addr32[0] ^ + hdr.ipv6->daddr.s6_addr32[1] ^ + hdr.ipv6->daddr.s6_addr32[2] ^ + hdr.ipv6->daddr.s6_addr32[3]; + } else { + input.formatted.flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4; + common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; + } + + /* This assumes the Rx queue and Tx queue are bound to the same CPU */ + txgbe_fdir_add_signature_filter(&q_vector->adapter->hw, + input, common, ring->queue_index); +} + +/** + * skb_pad - zero pad the tail of an skb + * @skb: buffer to pad + * @pad: space to pad + * + * Ensure that a buffer is followed by a padding area that is zero + * filled. Used by network drivers which may DMA or transfer data + * beyond the buffer end onto the wire. + * + * May return error in out of memory cases. The skb is freed on error. + */ + +int txgbe_skb_pad_nonzero(struct sk_buff *skb, int pad) +{ + int err; + int ntail; + + /* If the skbuff is non linear tailroom is always zero.. */ + if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { + memset(skb->data+skb->len, 0x1, pad); + return 0; + } + + ntail = skb->data_len + pad - (skb->end - skb->tail); + if (likely(skb_cloned(skb) || ntail > 0)) { + err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); + if (unlikely(err)) + goto free_skb; + } + + /* FIXME: The use of this function with non-linear skb's really needs + * to be audited. + */ + err = skb_linearize(skb); + if (unlikely(err)) + goto free_skb; + + memset(skb->data + skb->len, 0x1, pad); + return 0; + +free_skb: + kfree_skb(skb); + return err; +} + +netdev_tx_t txgbe_xmit_frame_ring(struct sk_buff *skb, + struct txgbe_adapter *adapter, + struct txgbe_ring *tx_ring) +{ + struct txgbe_tx_buffer *first; + int tso; + u32 tx_flags = 0; + unsigned short f; + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + __be16 protocol = skb->protocol; + u8 hdr_len = 0; + txgbe_dptype dptype; + + /* work around hw errata 3 */ + u16 _llcLen, *llcLen; + llcLen = skb_header_pointer(skb, ETH_HLEN - 2, sizeof(u16), &_llcLen); + if (*llcLen == 0x3 || *llcLen == 0x4 || *llcLen == 0x5) { + if (txgbe_skb_pad_nonzero(skb, ETH_ZLEN - skb->len)) + return -ENOMEM; + __skb_put(skb, ETH_ZLEN - skb->len); + } + + /* + * need: 1 descriptor per page * PAGE_SIZE/TXGBE_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/TXGBE_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)-> + frags[f])); + + if (txgbe_maybe_stop_tx(tx_ring, count + 3)) { + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + + /* if we have a HW VLAN tag being added default to the HW one */ + if (skb_vlan_tag_present(skb)) { + tx_flags |= skb_vlan_tag_get(skb) << TXGBE_TX_FLAGS_VLAN_SHIFT; + tx_flags |= TXGBE_TX_FLAGS_HW_VLAN; + /* else if it is a SW VLAN check the next protocol and store the tag */ + } else if (protocol == htons(ETH_P_8021Q)) { + struct vlan_hdr *vhdr, _vhdr; + vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); + if (!vhdr) + goto out_drop; + + protocol = vhdr->h_vlan_encapsulated_proto; + tx_flags |= ntohs(vhdr->h_vlan_TCI) << + TXGBE_TX_FLAGS_VLAN_SHIFT; + tx_flags |= TXGBE_TX_FLAGS_SW_VLAN; + } + + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + adapter->ptp_clock) { + if (!test_and_set_bit_lock(__TXGBE_PTP_TX_IN_PROGRESS, + &adapter->state)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + tx_flags |= TXGBE_TX_FLAGS_TSTAMP; + + /* schedule check for Tx timestamp */ + adapter->ptp_tx_skb = skb_get(skb); + adapter->ptp_tx_start = jiffies; + schedule_work(&adapter->ptp_tx_work); + } else { + adapter->tx_hwtstamp_skipped++; + } + } + + if ((adapter->flags & TXGBE_FLAG_DCB_ENABLED) && + ((tx_flags & (TXGBE_TX_FLAGS_HW_VLAN | TXGBE_TX_FLAGS_SW_VLAN)) || + (skb->priority != TC_PRIO_CONTROL))) { + tx_flags &= ~TXGBE_TX_FLAGS_VLAN_PRIO_MASK; + tx_flags |= skb->priority << + TXGBE_TX_FLAGS_VLAN_PRIO_SHIFT; + if (tx_flags & TXGBE_TX_FLAGS_SW_VLAN) { + struct vlan_ethhdr *vhdr; + if (skb_header_cloned(skb) && + pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) + goto out_drop; + vhdr = (struct vlan_ethhdr *)skb->data; + vhdr->h_vlan_TCI = htons(tx_flags >> + TXGBE_TX_FLAGS_VLAN_SHIFT); + } else { + tx_flags |= TXGBE_TX_FLAGS_HW_VLAN; + } + } + + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + first->protocol = protocol; + + dptype = encode_tx_desc_ptype(first); + + tso = txgbe_tso(tx_ring, first, &hdr_len, dptype); + if (tso < 0) + goto out_drop; + else if (!tso) + txgbe_tx_csum(tx_ring, first, dptype); + + /* add the ATR filter if ATR is on */ + if (test_bit(__TXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) + txgbe_atr(tx_ring, first, dptype); + + if (txgbe_tx_map(tx_ring, first, hdr_len)) + goto cleanup_tx_tstamp; + + return NETDEV_TX_OK; + +out_drop: + dev_kfree_skb_any(first->skb); + first->skb = NULL; + +cleanup_tx_tstamp: + if (unlikely(tx_flags & TXGBE_TX_FLAGS_TSTAMP)) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + cancel_work_sync(&adapter->ptp_tx_work); + clear_bit_unlock(__TXGBE_PTP_TX_IN_PROGRESS, &adapter->state); + } + + return NETDEV_TX_OK; +} + +static netdev_tx_t txgbe_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_ring *tx_ring; + unsigned int r_idx = skb->queue_mapping; + + if (!netif_carrier_ok(netdev)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* + * The minimum packet size for olinfo paylen is 17 so pad the skb + * in order to meet this minimum size requirement. + */ + if (skb_put_padto(skb, 17)) + return NETDEV_TX_OK; + + if (r_idx >= adapter->num_tx_queues) + r_idx = r_idx % adapter->num_tx_queues; + tx_ring = adapter->tx_ring[r_idx]; + + return txgbe_xmit_frame_ring(skb, adapter, tx_ring); +} + +/** + * txgbe_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int txgbe_set_mac(struct net_device *netdev, void *p) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + txgbe_del_mac_filter(adapter, hw->mac.addr, VMDQ_P(0)); + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + + txgbe_mac_set_default_filter(adapter, hw->mac.addr); + + return 0; +} + +/** + * txgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding + * netdev->dev_addr_list + * @netdev: network interface device structure + * + * Returns non-zero on failure + **/ +static int txgbe_add_sanmac_netdev(struct net_device *dev) +{ + int err = 0; + struct txgbe_adapter *adapter = netdev_priv(dev); + struct txgbe_hw *hw = &adapter->hw; + + if (is_valid_ether_addr(hw->mac.san_addr)) { + rtnl_lock(); + err = dev_addr_add(dev, hw->mac.san_addr, + NETDEV_HW_ADDR_T_SAN); + rtnl_unlock(); + + /* update SAN MAC vmdq pool selection */ + TCALL(hw, mac.ops.set_vmdq_san_mac, VMDQ_P(0)); + } + return err; +} + +/** + * txgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding + * netdev->dev_addr_list + * @netdev: network interface device structure + * + * Returns non-zero on failure + **/ +static int txgbe_del_sanmac_netdev(struct net_device *dev) +{ + int err = 0; + struct txgbe_adapter *adapter = netdev_priv(dev); + struct txgbe_mac_info *mac = &adapter->hw.mac; + + if (is_valid_ether_addr(mac->san_addr)) { + rtnl_lock(); + err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); + rtnl_unlock(); + } + return err; +} + +static int txgbe_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd) +{ + struct mii_ioctl_data *mii = (struct mii_ioctl_data *) &ifr->ifr_data; + int prtad, devad, ret; + struct txgbe_adapter *adapter = netdev_priv(netdev); + struct txgbe_hw *hw = &adapter->hw; + u16 value = 0; + + prtad = (mii->phy_id & MDIO_PHY_ID_PRTAD) >> 5; + devad = (mii->phy_id & MDIO_PHY_ID_DEVAD); + + if (cmd == SIOCGMIIREG) { + ret = txgbe_read_mdio(&hw->phy_dev, prtad, devad, mii->reg_num, + &value); + if (ret < 0) + return ret; + mii->val_out = value; + return 0; + } else { + return txgbe_write_mdio(&hw->phy_dev, prtad, devad, + mii->reg_num, mii->val_in); + } +} + +static int txgbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + + struct txgbe_adapter *adapter = netdev_priv(netdev); + + switch (cmd) { + case SIOCGHWTSTAMP: + return txgbe_ptp_get_ts_config(adapter, ifr); + case SIOCSHWTSTAMP: + return txgbe_ptp_set_ts_config(adapter, ifr); + case SIOCGMIIREG: + case SIOCSMIIREG: + return txgbe_mii_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +/* txgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. + * @adapter: pointer to txgbe_adapter + * @tc: number of traffic classes currently enabled + * + * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm + * 802.1Q priority maps to a packet buffer that exists. + */ +static void txgbe_validate_rtr(struct txgbe_adapter *adapter, u8 tc) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 reg, rsave; + + reg = rd32(hw, TXGBE_RDB_UP2TC); + rsave = reg; + if (reg != rsave) + wr32(hw, TXGBE_RDB_UP2TC, reg); + + return; +} + +/** + * txgbe_set_prio_tc_map - Configure netdev prio tc map + * @adapter: Pointer to adapter struct + * + * Populate the netdev user priority to tc map + */ +static void txgbe_set_prio_tc_map(struct txgbe_adapter __maybe_unused *adapter) +{ + UNREFERENCED_PARAMETER(adapter); +} + +/** + * txgbe_setup_tc - routine to configure net_device for multiple traffic + * classes. + * + * @netdev: net device to configure + * @tc: number of traffic classes to enable + */ +int txgbe_setup_tc(struct net_device *dev, u8 tc) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + + if (tc && adapter->num_vmdqs > TXGBE_MAX_DCBMACVLANS) + return -EBUSY; + + /* Hardware has to reinitialize queues and interrupts to + * match packet buffer alignment. Unfortunately, the + * hardware is not flexible enough to do this dynamically. + */ + if (netif_running(dev)) + txgbe_close(dev); + else + txgbe_reset(adapter); + + txgbe_clear_interrupt_scheme(adapter); + + if (tc) { + netdev_set_num_tc(dev, tc); + txgbe_set_prio_tc_map(adapter); + } else { + netdev_reset_tc(dev); + } + + txgbe_validate_rtr(adapter, tc); + + txgbe_init_interrupt_scheme(adapter); + if (netif_running(dev)) + txgbe_open(dev); + + return 0; +} + +static int txgbe_setup_tc_mqprio(struct net_device *dev, + struct tc_mqprio_qopt *mqprio) +{ + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + return txgbe_setup_tc(dev, mqprio->num_tc); +} + +static int __txgbe_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + switch (type) { + case TC_SETUP_QDISC_MQPRIO: + return txgbe_setup_tc_mqprio(dev, type_data); + default: + return -EOPNOTSUPP; + } +} + +void txgbe_do_reset(struct net_device *netdev) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + txgbe_reinit_locked(adapter); + else + txgbe_reset(adapter); +} + +static netdev_features_t txgbe_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + + /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ + if (!(features & NETIF_F_RXCSUM)) + features &= ~NETIF_F_LRO; + + /* Turn off LRO if not RSC capable */ + if (!(adapter->flags2 & TXGBE_FLAG2_RSC_CAPABLE)) + features &= ~NETIF_F_LRO; + + return features; +} + +static int txgbe_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct txgbe_adapter *adapter = netdev_priv(netdev); + bool need_reset = false; + + /* Make sure RSC matches LRO, reset if change */ + if (!(features & NETIF_F_LRO)) { + if (adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED) + need_reset = true; + adapter->flags2 &= ~TXGBE_FLAG2_RSC_ENABLED; + } else if ((adapter->flags2 & TXGBE_FLAG2_RSC_CAPABLE) && + !(adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED)) { + if (adapter->rx_itr_setting == 1 || + adapter->rx_itr_setting > TXGBE_MIN_RSC_ITR) { + adapter->flags2 |= TXGBE_FLAG2_RSC_ENABLED; + need_reset = true; + } else if ((netdev->features ^ features) & NETIF_F_LRO) { + + e_info(probe, "rx-usecs set too low, " + "disabling RSC\n"); + } + } + + /* + * Check if Flow Director n-tuple support was enabled or disabled. If + * the state changed, we need to reset. + */ + switch (features & NETIF_F_NTUPLE) { + case NETIF_F_NTUPLE: + /* turn off ATR, enable perfect filters and reset */ + if (!(adapter->flags & TXGBE_FLAG_FDIR_PERFECT_CAPABLE)) + need_reset = true; + + adapter->flags &= ~TXGBE_FLAG_FDIR_HASH_CAPABLE; + adapter->flags |= TXGBE_FLAG_FDIR_PERFECT_CAPABLE; + break; + default: + /* turn off perfect filters, enable ATR and reset */ + if (adapter->flags & TXGBE_FLAG_FDIR_PERFECT_CAPABLE) + need_reset = true; + + adapter->flags &= ~TXGBE_FLAG_FDIR_PERFECT_CAPABLE; + + /* We cannot enable ATR if VMDq is enabled */ + if (adapter->flags & TXGBE_FLAG_VMDQ_ENABLED) + break; + + /* We cannot enable ATR if we have 2 or more traffic classes */ + if (netdev_get_num_tc(netdev) > 1) + break; + + /* We cannot enable ATR if RSS is disabled */ + if (adapter->ring_feature[RING_F_RSS].limit <= 1) + break; + + /* A sample rate of 0 indicates ATR disabled */ + if (!adapter->atr_sample_rate) + break; + + adapter->flags |= TXGBE_FLAG_FDIR_HASH_CAPABLE; + break; + } + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + txgbe_vlan_strip_enable(adapter); + else + txgbe_vlan_strip_disable(adapter); + + if (adapter->flags & TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE && + features & NETIF_F_RXCSUM) { + if (!need_reset) + adapter->flags2 |= TXGBE_FLAG2_VXLAN_REREG_NEEDED; + } else { + txgbe_clear_vxlan_port(adapter); + } + + if (features & NETIF_F_RXHASH) { + if (!(adapter->flags2 & TXGBE_FLAG2_RSS_ENABLED)) { + wr32m(&adapter->hw, TXGBE_RDB_RA_CTL, + TXGBE_RDB_RA_CTL_RSS_EN, TXGBE_RDB_RA_CTL_RSS_EN); + adapter->flags2 |= TXGBE_FLAG2_RSS_ENABLED; + } + } else { + if (adapter->flags2 & TXGBE_FLAG2_RSS_ENABLED) { + wr32m(&adapter->hw, TXGBE_RDB_RA_CTL, + TXGBE_RDB_RA_CTL_RSS_EN, ~TXGBE_RDB_RA_CTL_RSS_EN); + adapter->flags2 &= ~TXGBE_FLAG2_RSS_ENABLED; + } + } + + if (need_reset) + txgbe_do_reset(netdev); + + return 0; +} + +/** + * txgbe_add_udp_tunnel_port - Get notifications about adding UDP tunnel ports + * @dev: The port's netdev + * @ti: Tunnel endpoint information + **/ +static void txgbe_add_udp_tunnel_port(struct net_device *dev, + struct udp_tunnel_info *ti) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + struct txgbe_hw *hw = &adapter->hw; + __be16 port = ti->port; + + if (ti->sa_family != AF_INET) + return; + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (!(adapter->flags & TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + return; + + if (adapter->vxlan_port == port) + return; + + if (adapter->vxlan_port) { + netdev_info(dev, + "VXLAN port %d set, not adding port %d\n", + ntohs(adapter->vxlan_port), + ntohs(port)); + return; + } + + adapter->vxlan_port = port; + wr32(hw, TXGBE_CFG_VXLAN, port); + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (adapter->geneve_port == port) + return; + + if (adapter->geneve_port) { + netdev_info(dev, + "GENEVE port %d set, not adding port %d\n", + ntohs(adapter->geneve_port), + ntohs(port)); + return; + } + + adapter->geneve_port = port; + wr32(hw, TXGBE_CFG_GENEVE, port); + break; + default: + return; + } +} + +/** + * ixgbe_del_udp_tunnel_port - Get notifications about removing UDP tunnel ports + * @dev: The port's netdev + * @ti: Tunnel endpoint information + **/ +static void txgbe_del_udp_tunnel_port(struct net_device *dev, + struct udp_tunnel_info *ti) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + + if (ti->type != UDP_TUNNEL_TYPE_VXLAN && + ti->type != UDP_TUNNEL_TYPE_GENEVE) + return; + + if (ti->sa_family != AF_INET) + return; + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (!(adapter->flags & TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + return; + + if (adapter->vxlan_port != ti->port) { + netdev_info(dev, "VXLAN port %d not found\n", + ntohs(ti->port)); + return; + } + + txgbe_clear_vxlan_port(adapter); + adapter->flags2 |= TXGBE_FLAG2_VXLAN_REREG_NEEDED; + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (adapter->geneve_port != ti->port) { + netdev_info(dev, "GENEVE port %d not found\n", + ntohs(ti->port)); + return; + } + + adapter->geneve_port = 0; + wr32(&adapter->hw, TXGBE_CFG_GENEVE, 0); + break; + default: + return; + } +} + +static int txgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, + u16 vid, + u16 flags) +{ + /* guarantee we can provide a unique filter for the unicast address */ + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { + if (TXGBE_MAX_PF_MACVLANS <= netdev_uc_count(dev)) + return -ENOMEM; + } + + return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); +} + +static int txgbe_ndo_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh, + __always_unused u16 flags) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + struct nlattr *attr, *br_spec; + int rem; + + if (!(adapter->flags & TXGBE_FLAG_SRIOV_ENABLED)) + return -EOPNOTSUPP; + + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + + nla_for_each_nested(attr, br_spec, rem) { + __u16 mode; + + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + + mode = nla_get_u16(attr); + if (mode == BRIDGE_MODE_VEPA) { + adapter->flags |= TXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; + } else if (mode == BRIDGE_MODE_VEB) { + adapter->flags &= ~TXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; + } else { + return -EINVAL; + } + + adapter->bridge_mode = mode; + + /* re-configure settings related to bridge mode */ + txgbe_configure_bridge_mode(adapter); + + e_info(drv, "enabling bridge mode: %s\n", + mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + } + + return 0; +} + +static int txgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, + u32 __maybe_unused filter_mask, + int nlflags) +{ + struct txgbe_adapter *adapter = netdev_priv(dev); + u16 mode; + + if (!(adapter->flags & TXGBE_FLAG_SRIOV_ENABLED)) + return 0; + + mode = adapter->bridge_mode; + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags, + filter_mask, NULL); +} + +#define TXGBE_MAX_TUNNEL_HDR_LEN 80 +static netdev_features_t +txgbe_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + u32 vlan_num = 0; + u16 vlan_depth = skb->mac_len; + __be16 type = skb->protocol; + struct vlan_hdr *vh; + + if (skb_vlan_tag_present(skb)) { + vlan_num++; + } + + if (vlan_depth) { + vlan_depth -= VLAN_HLEN; + } else { + vlan_depth = ETH_HLEN; + } + + while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) { + vlan_num++; + vh = (struct vlan_hdr *)(skb->data + vlan_depth); + type = vh->h_vlan_encapsulated_proto; + vlan_depth += VLAN_HLEN; + + } + + if (vlan_num > 2) + features &= ~(NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX); + + if (skb->encapsulation) { + if (unlikely(skb_inner_mac_header(skb) - + skb_transport_header(skb) > + TXGBE_MAX_TUNNEL_HDR_LEN)) + return features & ~NETIF_F_CSUM_MASK; + } + return features; +} + +static const struct net_device_ops txgbe_netdev_ops = { + .ndo_open = txgbe_open, + .ndo_stop = txgbe_close, + .ndo_start_xmit = txgbe_xmit_frame, + .ndo_set_rx_mode = txgbe_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = txgbe_set_mac, + .ndo_change_mtu = txgbe_change_mtu, + .ndo_tx_timeout = txgbe_tx_timeout, + .ndo_vlan_rx_add_vid = txgbe_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = txgbe_vlan_rx_kill_vid, + .ndo_do_ioctl = txgbe_ioctl, + .ndo_get_stats64 = txgbe_get_stats64, + .ndo_setup_tc = __txgbe_setup_tc, + .ndo_fdb_add = txgbe_ndo_fdb_add, + .ndo_bridge_setlink = txgbe_ndo_bridge_setlink, + .ndo_bridge_getlink = txgbe_ndo_bridge_getlink, + .ndo_udp_tunnel_add = txgbe_add_udp_tunnel_port, + .ndo_udp_tunnel_del = txgbe_del_udp_tunnel_port, + .ndo_features_check = txgbe_features_check, + .ndo_set_features = txgbe_set_features, + .ndo_fix_features = txgbe_fix_features, +}; + +void txgbe_assign_netdev_ops(struct net_device *dev) +{ + dev->netdev_ops = &txgbe_netdev_ops; + txgbe_set_ethtool_ops(dev); + dev->watchdog_timeo = 5 * HZ; +} + +/** + * txgbe_wol_supported - Check whether device supports WoL + * @adapter: the adapter private structure + * @device_id: the device ID + * @subdev_id: the subsystem device ID + * + * This function is used by probe and ethtool to determine + * which devices have WoL support + * + **/ +int txgbe_wol_supported(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + u16 wol_cap = adapter->eeprom_cap & TXGBE_DEVICE_CAPS_WOL_MASK; + + /* check eeprom to see if WOL is enabled */ + if ((wol_cap == TXGBE_DEVICE_CAPS_WOL_PORT0_1) || + ((wol_cap == TXGBE_DEVICE_CAPS_WOL_PORT0) && + (hw->bus.func == 0))) + return true; + else + return false; +} + +/** + * txgbe_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in txgbe_pci_tbl + * + * Returns 0 on success, negative on failure + * + * txgbe_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int txgbe_probe(struct pci_dev *pdev, + const struct pci_device_id __always_unused *ent) +{ + struct net_device *netdev; + struct txgbe_adapter *adapter = NULL; + struct txgbe_hw *hw = NULL; + static int cards_found; + int err, pci_using_dac, expected_gts; + u16 offset = 0; + u16 eeprom_verh = 0, eeprom_verl = 0; + u16 eeprom_cfg_blkh = 0, eeprom_cfg_blkl = 0; + u32 etrack_id = 0; + u16 build = 0, major = 0, patch = 0; + char *info_string, *i_s_var; + u8 part_str[TXGBE_PBANUM_LENGTH]; + unsigned int indices = MAX_TX_QUEUES; + + bool disable_dev = false; +/* #ifndef NETIF_F_GSO_PARTIA */ + netdev_features_t hw_features; + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + if (!dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64)) && + !dma_set_coherent_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64))) { + pci_using_dac = 1; + } else { + err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(32)); + if (err) { + err = dma_set_coherent_mask(pci_dev_to_dev(pdev), + DMA_BIT_MASK(32)); + if (err) { + dev_err(pci_dev_to_dev(pdev), "No usable DMA " + "configuration, aborting\n"); + goto err_dma; + } + } + pci_using_dac = 0; + } + + err = pci_request_selected_regions(pdev, pci_select_bars(pdev, + IORESOURCE_MEM), txgbe_driver_name); + if (err) { + dev_err(pci_dev_to_dev(pdev), + "pci_request_selected_regions failed 0x%x\n", err); + goto err_pci_reg; + } + + hw = vmalloc(sizeof(struct txgbe_hw)); + if (!hw) { + pr_info("Unable to allocate memory for early mac check\n"); + } else { + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + vfree(hw); + } + + pci_enable_pcie_error_reporting(pdev); + pci_set_master(pdev); + /* errata 16 */ + pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_READRQ, + 0x1000); + + netdev = alloc_etherdev_mq(sizeof(struct txgbe_adapter), indices); + if (!netdev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + + SET_NETDEV_DEV(netdev, pci_dev_to_dev(pdev)); + + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + hw = &adapter->hw; + hw->back = adapter; + adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; + + hw->hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + adapter->io_addr = hw->hw_addr; + if (!hw->hw_addr) { + err = -EIO; + goto err_ioremap; + } + + txgbe_assign_netdev_ops(netdev); + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + adapter->bd_number = cards_found; + + /* setup the private structure */ + err = txgbe_sw_init(adapter); + if (err) + goto err_sw_init; + + /* + * check_options must be called before setup_link to set up + * hw->fc completely + */ + txgbe_check_options(adapter); + txgbe_bp_mode_setting(adapter); + TCALL(hw, mac.ops.set_lan_id); + + /* check if flash load is done after hw power up */ + err = txgbe_check_flash_load(hw, TXGBE_SPI_ILDR_STATUS_PERST); + if (err) + goto err_sw_init; + err = txgbe_check_flash_load(hw, TXGBE_SPI_ILDR_STATUS_PWRRST); + if (err) + goto err_sw_init; + + /* reset_hw fills in the perm_addr as well */ + hw->phy.reset_if_overtemp = true; + err = TCALL(hw, mac.ops.reset_hw); + hw->phy.reset_if_overtemp = false; + if (err == TXGBE_ERR_SFP_NOT_PRESENT) { + err = 0; + } else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) { + e_dev_err("failed to load because an unsupported SFP+ " + "module type was detected.\n"); + e_dev_err("Reload the driver after installing a supported " + "module.\n"); + goto err_sw_init; + } else if (err) { + e_dev_err("HW Init failed: %d\n", err); + goto err_sw_init; + } + + netdev->features |= NETIF_F_SG | + NETIF_F_IP_CSUM; + +#ifdef NETIF_F_IPV6_CSUM + netdev->features |= NETIF_F_IPV6_CSUM; +#endif + + netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + + netdev->features |= txgbe_tso_features(); + + if (adapter->flags2 & TXGBE_FLAG2_RSS_ENABLED) + netdev->features |= NETIF_F_RXHASH; + + netdev->features |= NETIF_F_RXCSUM; + + /* copy netdev features into list of user selectable features */ + hw_features = netdev->hw_features; + hw_features |= netdev->features; + + /* give us the option of enabling RSC/LRO later */ + if (adapter->flags2 & TXGBE_FLAG2_RSC_CAPABLE) + hw_features |= NETIF_F_LRO; + + /* set this bit last since it cannot be part of hw_features */ + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + + netdev->features |= NETIF_F_NTUPLE; + + adapter->flags |= TXGBE_FLAG_FDIR_PERFECT_CAPABLE; + hw_features |= NETIF_F_NTUPLE; + netdev->hw_features = hw_features; + + netdev->vlan_features |= NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6; + + netdev->hw_enc_features |= NETIF_F_SG | NETIF_F_IP_CSUM | + TXGBE_GSO_PARTIAL_FEATURES | NETIF_F_TSO; + if (netdev->features & NETIF_F_LRO) { + if ((adapter->flags2 & TXGBE_FLAG2_RSC_CAPABLE) && + ((adapter->rx_itr_setting == 1) || + (adapter->rx_itr_setting > TXGBE_MIN_RSC_ITR))) { + adapter->flags2 |= TXGBE_FLAG2_RSC_ENABLED; + } else if (adapter->flags2 & TXGBE_FLAG2_RSC_CAPABLE) { + e_dev_info("InterruptThrottleRate set too high, " + "disabling RSC\n"); + } + } + + netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->priv_flags |= IFF_SUPP_NOFCS; + + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = TXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); + + if (pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= NETIF_F_HIGHDMA; + } + + /* make sure the EEPROM is good */ + if (TCALL(hw, eeprom.ops.validate_checksum, NULL)) { + e_dev_err("The EEPROM Checksum Is Not Valid\n"); + err = -EIO; + goto err_sw_init; + } + + memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + e_dev_err("invalid MAC address\n"); + err = -EIO; + goto err_sw_init; + } + + txgbe_mac_set_default_filter(adapter, hw->mac.perm_addr); + + timer_setup(&adapter->service_timer, txgbe_service_timer, 0); + + if (TXGBE_REMOVED(hw->hw_addr)) { + err = -EIO; + goto err_sw_init; + } + INIT_WORK(&adapter->service_task, txgbe_service_task); + set_bit(__TXGBE_SERVICE_INITED, &adapter->state); + clear_bit(__TXGBE_SERVICE_SCHED, &adapter->state); + + err = txgbe_init_interrupt_scheme(adapter); + if (err) + goto err_sw_init; + + /* WOL not supported for all devices */ + adapter->wol = 0; + TCALL(hw, eeprom.ops.read, + hw->eeprom.sw_region_offset + TXGBE_DEVICE_CAPS, + &adapter->eeprom_cap); + + if ((hw->subsystem_device_id & TXGBE_WOL_MASK) == TXGBE_WOL_SUP && + hw->bus.lan_id == 0) { + adapter->wol = TXGBE_PSR_WKUP_CTL_MAG; + wr32(hw, TXGBE_PSR_WKUP_CTL, adapter->wol); + } + hw->wol_enabled = !!(adapter->wol); + + device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev), adapter->wol); + + /* + * Save off EEPROM version number and Option Rom version which + * together make a unique identify for the eeprom + */ + TCALL(hw, eeprom.ops.read, + hw->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_H, + &eeprom_verh); + TCALL(hw, eeprom.ops.read, + hw->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_L, + &eeprom_verl); + etrack_id = (eeprom_verh << 16) | eeprom_verl; + + TCALL(hw, eeprom.ops.read, + hw->eeprom.sw_region_offset + TXGBE_ISCSI_BOOT_CONFIG, &offset); + + /* Make sure offset to SCSI block is valid */ + if (!(offset == 0x0) && !(offset == 0xffff)) { + TCALL(hw, eeprom.ops.read, offset + 0x84, &eeprom_cfg_blkh); + TCALL(hw, eeprom.ops.read, offset + 0x83, &eeprom_cfg_blkl); + + /* Only display Option Rom if exist */ + if (eeprom_cfg_blkl && eeprom_cfg_blkh) { + major = eeprom_cfg_blkl >> 8; + build = (eeprom_cfg_blkl << 8) | (eeprom_cfg_blkh >> 8); + patch = eeprom_cfg_blkh & 0x00ff; + + snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), + "0x%08x, %d.%d.%d", etrack_id, major, build, + patch); + } else { + snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), + "0x%08x", etrack_id); + } + } else { + snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), + "0x%08x", etrack_id); + } + + /* reset the hardware with the new settings */ + err = TCALL(hw, mac.ops.start_hw); + if (err == TXGBE_ERR_EEPROM_VERSION) { + /* We are running on a pre-production device, log a warning */ + e_dev_warn("This device is a pre-production adapter/LOM. " + "Please be aware there may be issues associated " + "with your hardware. If you are experiencing " + "problems please contact your hardware " + "representative who provided you with this " + "hardware.\n"); + } else if (err) { + e_dev_err("HW init failed\n"); + goto err_register; + } + + /* pick up the PCI bus settings for reporting later */ + TCALL(hw, mac.ops.get_bus_info); + + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_register; + + pci_set_drvdata(pdev, adapter); + adapter->netdev_registered = true; + + if (!((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP)) + /* power down the optics for SFP+ fiber */ + TCALL(hw, mac.ops.disable_tx_laser); + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + /* keep stopping all the transmit queues for older kernels */ + netif_tx_stop_all_queues(netdev); + + /* print all messages at the end so that we use our eth%d name */ + + /* calculate the expected PCIe bandwidth required for optimal + * performance. Note that some older parts will never have enough + * bandwidth due to being older generation PCIe parts. We clamp these + * parts to ensure that no warning is displayed, as this could confuse + * users otherwise. */ + + expected_gts = txgbe_enumerate_functions(adapter) * 10; + + /* don't check link if we failed to enumerate functions */ + if (expected_gts > 0) + txgbe_check_minimum_link(adapter, expected_gts); + + if ((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) + e_info(probe, "NCSI : support"); + else + e_info(probe, "NCSI : unsupported"); + + /* First try to read PBA as a string */ + err = txgbe_read_pba_string(hw, part_str, TXGBE_PBANUM_LENGTH); + if (err) + + strncpy(part_str, "Unknown", TXGBE_PBANUM_LENGTH); + if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present) + e_info(probe, "PHY: %d, SFP+: %d, PBA No: %s\n", + hw->phy.type, hw->phy.sfp_type, part_str); + else + e_info(probe, "PHY: %d, PBA No: %s\n", + hw->phy.type, part_str); + + e_dev_info("%02x:%02x:%02x:%02x:%02x:%02x\n", + netdev->dev_addr[0], netdev->dev_addr[1], + netdev->dev_addr[2], netdev->dev_addr[3], + netdev->dev_addr[4], netdev->dev_addr[5]); + +#define INFO_STRING_LEN 255 + info_string = kzalloc(INFO_STRING_LEN, GFP_KERNEL); + if (!info_string) { + e_err(probe, "allocation for info string failed\n"); + goto no_info_string; + } + i_s_var = info_string; + i_s_var += sprintf(info_string, "Enabled Features: "); + i_s_var += sprintf(i_s_var, "RxQ: %d TxQ: %d ", + adapter->num_rx_queues, adapter->num_tx_queues); + if (adapter->flags & TXGBE_FLAG_FDIR_HASH_CAPABLE) + i_s_var += sprintf(i_s_var, "FdirHash "); + if (adapter->flags & TXGBE_FLAG_DCB_ENABLED) + i_s_var += sprintf(i_s_var, "DCB "); + if (adapter->flags & TXGBE_FLAG_TPH_ENABLED) + i_s_var += sprintf(i_s_var, "TPH "); + if (adapter->flags2 & TXGBE_FLAG2_RSC_ENABLED) + i_s_var += sprintf(i_s_var, "RSC "); + if (adapter->flags & TXGBE_FLAG_VXLAN_OFFLOAD_ENABLE) + i_s_var += sprintf(i_s_var, "vxlan_rx "); + + BUG_ON(i_s_var > (info_string + INFO_STRING_LEN)); + /* end features printing */ + e_info(probe, "%s\n", info_string); + kfree(info_string); +no_info_string: + /* firmware requires blank driver version */ + TCALL(hw, mac.ops.set_fw_drv_ver, 0xFF, 0xFF, 0xFF, 0xFF); + + /* add san mac addr to netdev */ + txgbe_add_sanmac_netdev(netdev); + + e_info(probe, "WangXun(R) 10 Gigabit Network Connection\n"); + cards_found++; + + /* setup link for SFP devices with MNG FW, else wait for TXGBE_UP */ + if (txgbe_mng_present(hw) && txgbe_is_sfp(hw)) + TCALL(hw, mac.ops.setup_link, + TXGBE_LINK_SPEED_10GB_FULL | TXGBE_LINK_SPEED_1GB_FULL, + true); + + TCALL(hw, mac.ops.setup_eee, + (adapter->flags2 & TXGBE_FLAG2_EEE_CAPABLE) && + (adapter->flags2 & TXGBE_FLAG2_EEE_ENABLED)); + + return 0; + +err_register: + txgbe_clear_interrupt_scheme(adapter); + txgbe_release_hw_control(adapter); +err_sw_init: + adapter->flags2 &= ~TXGBE_FLAG2_SEARCH_FOR_SFP; + kfree(adapter->mac_table); + iounmap(adapter->io_addr); +err_ioremap: + disable_dev = !test_and_set_bit(__TXGBE_DISABLED, &adapter->state); + free_netdev(netdev); +err_alloc_etherdev: + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +err_pci_reg: +err_dma: + if (!adapter || disable_dev) + pci_disable_device(pdev); + return err; +} + +/** + * txgbe_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * txgbe_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void txgbe_remove(struct pci_dev *pdev) +{ + struct txgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev; + bool disable_dev; + + /* if !adapter then we already cleaned up in probe */ + if (!adapter) + return; + + netdev = adapter->netdev; + set_bit(__TXGBE_REMOVING, &adapter->state); + cancel_work_sync(&adapter->service_task); + + /* remove the added san mac */ + txgbe_del_sanmac_netdev(netdev); + + if (adapter->netdev_registered) { + unregister_netdev(netdev); + adapter->netdev_registered = false; + } + + txgbe_clear_interrupt_scheme(adapter); + txgbe_release_hw_control(adapter); + + iounmap(adapter->io_addr); + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); + + kfree(adapter->mac_table); + disable_dev = !test_and_set_bit(__TXGBE_DISABLED, &adapter->state); + free_netdev(netdev); + + pci_disable_pcie_error_reporting(pdev); + + if (disable_dev) + pci_disable_device(pdev); +} + +static bool txgbe_check_cfg_remove(struct txgbe_hw *hw, struct pci_dev *pdev) +{ + u16 value; + + pci_read_config_word(pdev, PCI_VENDOR_ID, &value); + if (value == TXGBE_FAILED_READ_CFG_WORD) { + txgbe_remove_adapter(hw); + return true; + } + return false; +} + +u16 txgbe_read_pci_cfg_word(struct txgbe_hw *hw, u32 reg) +{ + struct txgbe_adapter *adapter = hw->back; + u16 value; + + if (TXGBE_REMOVED(hw->hw_addr)) + return TXGBE_FAILED_READ_CFG_WORD; + pci_read_config_word(adapter->pdev, reg, &value); + if (value == TXGBE_FAILED_READ_CFG_WORD && + txgbe_check_cfg_remove(hw, adapter->pdev)) + return TXGBE_FAILED_READ_CFG_WORD; + return value; +} + +void txgbe_write_pci_cfg_word(struct txgbe_hw *hw, u32 reg, u16 value) +{ + struct txgbe_adapter *adapter = hw->back; + + if (TXGBE_REMOVED(hw->hw_addr)) + return; + pci_write_config_word(adapter->pdev, reg, value); +} + +static struct pci_driver txgbe_driver = { + .name = txgbe_driver_name, + .id_table = txgbe_pci_tbl, + .probe = txgbe_probe, + .remove = txgbe_remove, +#ifdef CONFIG_PM + .suspend = txgbe_suspend, + .resume = txgbe_resume, +#endif + .shutdown = txgbe_shutdown, +}; + +/** + * txgbe_init_module - Driver Registration Routine + * + * txgbe_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init txgbe_init_module(void) +{ + int ret; + pr_info("%s - version %s\n", txgbe_driver_string, txgbe_driver_version); + pr_info("%s\n", txgbe_copyright); + + txgbe_wq = create_singlethread_workqueue(txgbe_driver_name); + if (!txgbe_wq) { + pr_err("%s: Failed to create workqueue\n", txgbe_driver_name); + return -ENOMEM; + } + + ret = pci_register_driver(&txgbe_driver); + return ret; +} + +module_init(txgbe_init_module); + +/** + * txgbe_exit_module - Driver Exit Cleanup Routine + * + * txgbe_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit txgbe_exit_module(void) +{ + pci_unregister_driver(&txgbe_driver); + if (txgbe_wq) { + destroy_workqueue(txgbe_wq); + } +} + +module_exit(txgbe_exit_module); + +/* txgbe_main.c */ diff --git a/drivers/net/ethernet/netswift/txgbe/txgbe_mbx.c b/drivers/net/ethernet/netswift/txgbe/txgbe_mbx.c new file mode 100644 index 0000000000000000000000000000000000000000..08c67fdccc161dd0a6a913831f2885ac982276f9 --- /dev/null +++ b/drivers/net/ethernet/netswift/txgbe/txgbe_mbx.c @@ -0,0 +1,399 @@ +/* + * WangXun 10 Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * based on ixgbe_mbx.c, Copyright(c) 1999 - 2017 Intel Corporation. + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + + +#include "txgbe.h" +#include "txgbe_mbx.h" + +/** + * txgbe_read_mbx - Reads a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfuly read message from buffer + **/ +int txgbe_read_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + int err = TXGBE_ERR_MBX; + + /* limit read to size of mailbox */ + if (size > mbx->size) + size = mbx->size; + + err = TCALL(hw, mbx.ops.read, msg, size, mbx_id); + + return err; +} + +/** + * txgbe_write_mbx - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +int txgbe_write_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + int err = 0; + + if (size > mbx->size) { + err = TXGBE_ERR_MBX; + ERROR_REPORT2(TXGBE_ERROR_ARGUMENT, + "Invalid mailbox message size %d", size); + } else + err = TCALL(hw, mbx.ops.write, msg, size, mbx_id); + + return err; +} + +/** + * txgbe_check_for_msg - checks to see if someone sent us mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +int txgbe_check_for_msg(struct txgbe_hw *hw, u16 mbx_id) +{ + int err = TXGBE_ERR_MBX; + + err = TCALL(hw, mbx.ops.check_for_msg, mbx_id); + + return err; +} + +/** + * txgbe_check_for_ack - checks to see if someone sent us ACK + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +int txgbe_check_for_ack(struct txgbe_hw *hw, u16 mbx_id) +{ + int err = TXGBE_ERR_MBX; + + err = TCALL(hw, mbx.ops.check_for_ack, mbx_id); + + return err; +} + +/** + * txgbe_check_for_rst - checks to see if other side has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +int txgbe_check_for_rst(struct txgbe_hw *hw, u16 mbx_id) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + int err = TXGBE_ERR_MBX; + + if (mbx->ops.check_for_rst) + err = mbx->ops.check_for_rst(hw, mbx_id); + + return err; +} + +/** + * txgbe_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification + **/ +int txgbe_poll_for_msg(struct txgbe_hw *hw, u16 mbx_id) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_msg) + goto out; + + while (countdown && TCALL(hw, mbx.ops.check_for_msg, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->udelay); + } + + if (countdown == 0) + ERROR_REPORT2(TXGBE_ERROR_POLLING, + "Polling for VF%d mailbox message timedout", mbx_id); + +out: + return countdown ? 0 : TXGBE_ERR_MBX; +} + +/** + * txgbe_poll_for_ack - Wait for message acknowledgement + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message acknowledgement + **/ +int txgbe_poll_for_ack(struct txgbe_hw *hw, u16 mbx_id) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_ack) + goto out; + + while (countdown && TCALL(hw, mbx.ops.check_for_ack, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->udelay); + } + + if (countdown == 0) + ERROR_REPORT2(TXGBE_ERROR_POLLING, + "Polling for VF%d mailbox ack timedout", mbx_id); + +out: + return countdown ? 0 : TXGBE_ERR_MBX; +} + +int txgbe_check_for_bit_pf(struct txgbe_hw *hw, u32 mask, int index) +{ + u32 mbvficr = rd32(hw, TXGBE_MBVFICR(index)); + int err = TXGBE_ERR_MBX; + + if (mbvficr & mask) { + err = 0; + wr32(hw, TXGBE_MBVFICR(index), mask); + } + + return err; +} + +/** + * txgbe_check_for_msg_pf - checks to see if the VF has sent mail + * @hw: pointer to the HW structure + * @vf: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +int txgbe_check_for_msg_pf(struct txgbe_hw *hw, u16 vf) +{ + int err = TXGBE_ERR_MBX; + int index = TXGBE_MBVFICR_INDEX(vf); + u32 vf_bit = vf % 16; + + if (!txgbe_check_for_bit_pf(hw, TXGBE_MBVFICR_VFREQ_VF1 << vf_bit, + index)) { + err = 0; + hw->mbx.stats.reqs++; + } + + return err; +} + +/** + * txgbe_check_for_ack_pf - checks to see if the VF has ACKed + * @hw: pointer to the HW structure + * @vf: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +int txgbe_check_for_ack_pf(struct txgbe_hw *hw, u16 vf) +{ + int err = TXGBE_ERR_MBX; + int index = TXGBE_MBVFICR_INDEX(vf); + u32 vf_bit = vf % 16; + + if (!txgbe_check_for_bit_pf(hw, TXGBE_MBVFICR_VFACK_VF1 << vf_bit, + index)) { + err = 0; + hw->mbx.stats.acks++; + } + + return err; +} + +/** + * txgbe_check_for_rst_pf - checks to see if the VF has reset + * @hw: pointer to the HW structure + * @vf: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +int txgbe_check_for_rst_pf(struct txgbe_hw *hw, u16 vf) +{ + u32 reg_offset = (vf < 32) ? 0 : 1; + u32 vf_shift = vf % 32; + u32 vflre = 0; + int err = TXGBE_ERR_MBX; + + vflre = rd32(hw, TXGBE_VFLRE(reg_offset)); + + if (vflre & (1 << vf_shift)) { + err = 0; + wr32(hw, TXGBE_VFLREC(reg_offset), (1 << vf_shift)); + hw->mbx.stats.rsts++; + } + + return err; +} + +/** + * txgbe_obtain_mbx_lock_pf - obtain mailbox lock + * @hw: pointer to the HW structure + * @vf: the VF index + * + * return SUCCESS if we obtained the mailbox lock + **/ +int txgbe_obtain_mbx_lock_pf(struct txgbe_hw *hw, u16 vf) +{ + int err = TXGBE_ERR_MBX; + u32 mailbox; + + /* Take ownership of the buffer */ + wr32(hw, TXGBE_PXMAILBOX(vf), TXGBE_PXMAILBOX_PFU); + + /* reserve mailbox for vf use */ + mailbox = rd32(hw, TXGBE_PXMAILBOX(vf)); + if (mailbox & TXGBE_PXMAILBOX_PFU) + err = 0; + else + ERROR_REPORT2(TXGBE_ERROR_POLLING, + "Failed to obtain mailbox lock for PF%d", vf); + + + return err; +} + +/** + * txgbe_write_mbx_pf - Places a message in the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf: the VF index + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +int txgbe_write_mbx_pf(struct txgbe_hw *hw, u32 *msg, u16 size, + u16 vf) +{ + int err; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + err = txgbe_obtain_mbx_lock_pf(hw, vf); + if (err) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + txgbe_check_for_msg_pf(hw, vf); + txgbe_check_for_ack_pf(hw, vf); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + wr32a(hw, TXGBE_PXMBMEM(vf), i, msg[i]); + + /* Interrupt VF to tell it a message has been sent and release buffer*/ + /* set mirrored mailbox flags */ + wr32a(hw, TXGBE_PXMBMEM(vf), TXGBE_VXMAILBOX_SIZE, TXGBE_PXMAILBOX_STS); + wr32(hw, TXGBE_PXMAILBOX(vf), TXGBE_PXMAILBOX_STS); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + +out_no_write: + return err; + +} + +/** + * txgbe_read_mbx_pf - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf: the VF index + * + * This function copies a message from the mailbox buffer to the caller's + * memory buffer. The presumption is that the caller knows that there was + * a message due to a VF request so no polling for message is needed. + **/ +int txgbe_read_mbx_pf(struct txgbe_hw *hw, u32 *msg, u16 size, + u16 vf) +{ + int err; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + err = txgbe_obtain_mbx_lock_pf(hw, vf); + if (err) + goto out_no_read; + + /* copy the message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = rd32a(hw, TXGBE_PXMBMEM(vf), i); + + /* Acknowledge the message and release buffer */ + /* set mirrored mailbox flags */ + wr32a(hw, TXGBE_PXMBMEM(vf), TXGBE_VXMAILBOX_SIZE, TXGBE_PXMAILBOX_ACK); + wr32(hw, TXGBE_PXMAILBOX(vf), TXGBE_PXMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return err; +} + +/** + * txgbe_init_mbx_params_pf - set initial values for pf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for pf mailbox + */ +void txgbe_init_mbx_params_pf(struct txgbe_hw *hw) +{ + struct txgbe_mbx_info *mbx = &hw->mbx; + + mbx->timeout = 0; + mbx->udelay = 0; + + mbx->size = TXGBE_VXMAILBOX_SIZE; + + mbx->ops.read = txgbe_read_mbx_pf; + mbx->ops.write = txgbe_write_mbx_pf; + mbx->ops.check_for_msg = txgbe_check_for_msg_pf; + mbx->ops.check_for_ack = txgbe_check_for_ack_pf; + mbx->ops.check_for_rst = txgbe_check_for_rst_pf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; +} diff --git a/drivers/net/ethernet/netswift/txgbe/txgbe_mbx.h b/drivers/net/ethernet/netswift/txgbe/txgbe_mbx.h new file mode 100644 index 0000000000000000000000000000000000000000..e412a5e546e10c1e6ccd1d3ac4fbb7378a1ea971 --- /dev/null +++ b/drivers/net/ethernet/netswift/txgbe/txgbe_mbx.h @@ -0,0 +1,171 @@ +/* + * WangXun 10 Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * based on ixgbe_mbx.h, Copyright(c) 1999 - 2017 Intel Corporation. + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _TXGBE_MBX_H_ +#define _TXGBE_MBX_H_ + +#define TXGBE_VXMAILBOX_SIZE (16 - 1) + +/** + * VF Registers + **/ +#define TXGBE_VXMAILBOX 0x00600 +#define TXGBE_VXMAILBOX_REQ ((0x1) << 0) /* Request for PF Ready bit */ +#define TXGBE_VXMAILBOX_ACK ((0x1) << 1) /* Ack PF message received */ +#define TXGBE_VXMAILBOX_VFU ((0x1) << 2) /* VF owns the mailbox buffer */ +#define TXGBE_VXMAILBOX_PFU ((0x1) << 3) /* PF owns the mailbox buffer */ +#define TXGBE_VXMAILBOX_PFSTS ((0x1) << 4) /* PF wrote a message in the MB */ +#define TXGBE_VXMAILBOX_PFACK ((0x1) << 5) /* PF ack the previous VF msg */ +#define TXGBE_VXMAILBOX_RSTI ((0x1) << 6) /* PF has reset indication */ +#define TXGBE_VXMAILBOX_RSTD ((0x1) << 7) /* PF has indicated reset done */ +#define TXGBE_VXMAILBOX_R2C_BITS (TXGBE_VXMAILBOX_RSTD | \ + TXGBE_VXMAILBOX_PFSTS | TXGBE_VXMAILBOX_PFACK) + +#define TXGBE_VXMBMEM 0x00C00 /* 16*4B */ + +/** + * PF Registers + **/ +#define TXGBE_PXMAILBOX(i) (0x00600 + (4 * (i))) /* i=[0,63] */ +#define TXGBE_PXMAILBOX_STS ((0x1) << 0) /* Initiate message send to VF */ +#define TXGBE_PXMAILBOX_ACK ((0x1) << 1) /* Ack message recv'd from VF */ +#define TXGBE_PXMAILBOX_VFU ((0x1) << 2) /* VF owns the mailbox buffer */ +#define TXGBE_PXMAILBOX_PFU ((0x1) << 3) /* PF owns the mailbox buffer */ +#define TXGBE_PXMAILBOX_RVFU ((0x1) << 4) /* Reset VFU - used when VF stuck*/ + +#define TXGBE_PXMBMEM(i) (0x5000 + (64 * (i))) /* i=[0,63] */ + +#define TXGBE_VFLRP(i) (0x00490 + (4 * (i))) /* i=[0,1] */ +#define TXGBE_VFLRE(i) (0x004A0 + (4 * (i))) /* i=[0,1] */ +#define TXGBE_VFLREC(i) (0x004A8 + (4 * (i))) /* i=[0,1] */ + +/* SR-IOV specific macros */ +#define TXGBE_MBVFICR(i) (0x00480 + (4 * (i))) /* i=[0,3] */ +#define TXGBE_MBVFICR_INDEX(vf) ((vf) >> 4) +#define TXGBE_MBVFICR_VFREQ_MASK (0x0000FFFF) /* bits for VF messages */ +#define TXGBE_MBVFICR_VFREQ_VF1 (0x00000001) /* bit for VF 1 message */ +#define TXGBE_MBVFICR_VFACK_MASK (0xFFFF0000) /* bits for VF acks */ +#define TXGBE_MBVFICR_VFACK_VF1 (0x00010000) /* bit for VF 1 ack */ + +/** + * Messages + **/ +/* If it's a TXGBE_VF_* msg then it originates in the VF and is sent to the + * PF. The reverse is true if it is TXGBE_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +#define TXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with + * this are the ACK */ +#define TXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with + * this are the NACK */ +#define TXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still + * clear to send requests */ +#define TXGBE_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for extra info for certain messages */ +#define TXGBE_VT_MSGINFO_MASK (0xFF << TXGBE_VT_MSGINFO_SHIFT) + +/* definitions to support mailbox API version negotiation */ + +/* + * each element denotes a version of the API; existing numbers may not + * change; any additions must go at the end + */ +enum txgbe_pfvf_api_rev { + txgbe_mbox_api_null, + txgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */ + txgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ + txgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ + txgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */ + txgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ + txgbe_mbox_api_unknown, /* indicates that API version is not known */ +}; + +/* mailbox API, legacy requests */ +#define TXGBE_VF_RESET 0x01 /* VF requests reset */ +#define TXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define TXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define TXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ + +/* mailbox API, version 1.0 VF requests */ +#define TXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ +#define TXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ +#define TXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ + +/* mailbox API, version 1.1 VF requests */ +#define TXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ + +/* mailbox API, version 1.2 VF requests */ +#define TXGBE_VF_GET_RETA 0x0a /* VF request for RETA */ +#define TXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */ +#define TXGBE_VF_UPDATE_XCAST_MODE 0x0c +#define TXGBE_VF_BACKUP 0x8001 /* VF requests backup */ + +/* mode choices for IXGBE_VF_UPDATE_XCAST_MODE */ +enum txgbevf_xcast_modes { + TXGBEVF_XCAST_MODE_NONE = 0, + TXGBEVF_XCAST_MODE_MULTI, + TXGBEVF_XCAST_MODE_ALLMULTI, + TXGBEVF_XCAST_MODE_PROMISC, +}; + +/* GET_QUEUES return data indices within the mailbox */ +#define TXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */ +#define TXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */ +#define TXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */ +#define TXGBE_VF_DEF_QUEUE 4 /* Default queue offset */ + +/* length of permanent address message returned from PF */ +#define TXGBE_VF_PERMADDR_MSG_LEN 4 +/* word in permanent address message with the current multicast type */ +#define TXGBE_VF_MC_TYPE_WORD 3 + +#define TXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ + +/* mailbox API, version 2.0 VF requests */ +#define TXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ +#define TXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ +#define TXGBE_VF_ENABLE_MACADDR 0x0A /* enable MAC address */ +#define TXGBE_VF_DISABLE_MACADDR 0x0B /* disable MAC address */ +#define TXGBE_VF_GET_MACADDRS 0x0C /* get all configured MAC addrs */ +#define TXGBE_VF_SET_MCAST_PROMISC 0x0D /* enable multicast promiscuous */ +#define TXGBE_VF_GET_MTU 0x0E /* get bounds on MTU */ +#define TXGBE_VF_SET_MTU 0x0F /* set a specific MTU */ + +/* mailbox API, version 2.0 PF requests */ +#define TXGBE_PF_TRANSPARENT_VLAN 0x0101 /* enable transparent vlan */ + +#define TXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define TXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ + +int txgbe_read_mbx(struct txgbe_hw *, u32 *, u16, u16); +int txgbe_write_mbx(struct txgbe_hw *, u32 *, u16, u16); +int txgbe_read_posted_mbx(struct txgbe_hw *, u32 *, u16, u16); +int txgbe_write_posted_mbx(struct txgbe_hw *, u32 *, u16, u16); +int txgbe_check_for_msg(struct txgbe_hw *, u16); +int txgbe_check_for_ack(struct txgbe_hw *, u16); +int txgbe_check_for_rst(struct txgbe_hw *, u16); +void txgbe_init_mbx_ops(struct txgbe_hw *hw); +void txgbe_init_mbx_params_vf(struct txgbe_hw *); +void txgbe_init_mbx_params_pf(struct txgbe_hw *); + +#endif /* _TXGBE_MBX_H_ */ diff --git a/drivers/net/ethernet/netswift/txgbe/txgbe_mtd.c b/drivers/net/ethernet/netswift/txgbe/txgbe_mtd.c new file mode 100644 index 0000000000000000000000000000000000000000..5c29a28af0754aa15a4262b098fb6f044cf0ae06 --- /dev/null +++ b/drivers/net/ethernet/netswift/txgbe/txgbe_mtd.c @@ -0,0 +1,1366 @@ +/* + * WangXun 10 Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + */ + + +#include "txgbe.h" + +MTD_STATUS mtdHwXmdioWrite( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 dev, + IN MTD_U16 reg, + IN MTD_U16 value) +{ + MTD_STATUS result = MTD_OK; + + if (devPtr->fmtdWriteMdio != NULL) { + if (devPtr->fmtdWriteMdio(devPtr, port, dev, reg, value) == MTD_FAIL) { + result = MTD_FAIL; + MTD_DBG_INFO("fmtdWriteMdio 0x%04X failed to port=%d, dev=%d, reg=0x%04X\n", + (unsigned)(value), (unsigned)port, (unsigned)dev, (unsigned)reg); + } + } else + result = MTD_FAIL; + + return result; +} + +MTD_STATUS mtdHwXmdioRead( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 dev, + IN MTD_U16 reg, + OUT MTD_U16 * data) +{ + MTD_STATUS result = MTD_OK; + + if (devPtr->fmtdReadMdio != NULL) { + if (devPtr->fmtdReadMdio(devPtr, port, dev, reg, data) == MTD_FAIL) { + result = MTD_FAIL; + MTD_DBG_INFO("fmtdReadMdio failed from port=%d, dev=%d, reg=0x%04X\n", + (unsigned)port, (unsigned)dev, (unsigned)reg); + } + } else + result = MTD_FAIL; + + return result; +} + +/* + This macro calculates the mask for partial read/write of register's data. +*/ +#define MTD_CALC_MASK(fieldOffset, fieldLen, mask) do {\ + if ((fieldLen + fieldOffset) >= 16) \ + mask = (0 - (1 << fieldOffset)); \ + else \ + mask = (((1 << (fieldLen + fieldOffset))) - (1 << fieldOffset));\ + } while (0) + +MTD_STATUS mtdHwGetPhyRegField( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 dev, + IN MTD_U16 regAddr, + IN MTD_U8 fieldOffset, + IN MTD_U8 fieldLength, + OUT MTD_U16 * data) +{ + MTD_U16 tmpData; + MTD_STATUS retVal; + + retVal = mtdHwXmdioRead(devPtr, port, dev, regAddr, &tmpData); + + if (retVal != MTD_OK) { + MTD_DBG_ERROR("Failed to read register \n"); + return MTD_FAIL; + } + + mtdHwGetRegFieldFromWord(tmpData, fieldOffset, fieldLength, data); + + MTD_DBG_INFO("fOff %d, fLen %d, data 0x%04X.\n", (int)fieldOffset, + (int)fieldLength, (int)*data); + + return MTD_OK; +} + +MTD_STATUS mtdHwSetPhyRegField( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 dev, + IN MTD_U16 regAddr, + IN MTD_U8 fieldOffset, + IN MTD_U8 fieldLength, + IN MTD_U16 data) +{ + MTD_U16 tmpData, newData; + MTD_STATUS retVal; + + retVal = mtdHwXmdioRead(devPtr, port, dev, regAddr, &tmpData); + if (retVal != MTD_OK) { + MTD_DBG_ERROR("Failed to read register \n"); + return MTD_FAIL; + } + + mtdHwSetRegFieldToWord(tmpData, data, fieldOffset, fieldLength, &newData); + + retVal = mtdHwXmdioWrite(devPtr, port, dev, regAddr, newData); + + if (retVal != MTD_OK) { + MTD_DBG_ERROR("Failed to write register \n"); + return MTD_FAIL; + } + + MTD_DBG_INFO("fieldOff %d, fieldLen %d, data 0x%x.\n", fieldOffset, + fieldLength, data); + + return MTD_OK; +} + +MTD_STATUS mtdHwGetRegFieldFromWord( + IN MTD_U16 regData, + IN MTD_U8 fieldOffset, + IN MTD_U8 fieldLength, + OUT MTD_U16 *data) +{ + /* Bits mask to be read */ + MTD_U16 mask; + + MTD_CALC_MASK(fieldOffset, fieldLength, mask); + + *data = (regData & mask) >> fieldOffset; + + return MTD_OK; +} + +MTD_STATUS mtdHwSetRegFieldToWord( + IN MTD_U16 regData, + IN MTD_U16 bitFieldData, + IN MTD_U8 fieldOffset, + IN MTD_U8 fieldLength, + OUT MTD_U16 *data) +{ + /* Bits mask to be read */ + MTD_U16 mask; + + MTD_CALC_MASK(fieldOffset, fieldLength, mask); + + /* Set the desired bits to 0. */ + regData &= ~mask; + /* Set the given data into the above reset bits.*/ + regData |= ((bitFieldData << fieldOffset) & mask); + + *data = regData; + + return MTD_OK; +} + +MTD_STATUS mtdWait(IN MTD_UINT x) +{ + msleep(x); + return MTD_OK; +} + +/* internal device registers */ +MTD_STATUS mtdCheckDeviceCapabilities( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_BOOL * phyHasMacsec, + OUT MTD_BOOL * phyHasCopperInterface, + OUT MTD_BOOL * isE20X0Device) +{ + MTD_U8 major, minor, inc, test; + MTD_U16 abilities; + + *phyHasMacsec = MTD_TRUE; + *phyHasCopperInterface = MTD_TRUE; + *isE20X0Device = MTD_FALSE; + + if (mtdGetFirmwareVersion(devPtr, port, &major, &minor, &inc, &test) == MTD_FAIL) { + /* firmware not running will produce this case */ + major = minor = inc = test = 0; + } + + if (major == 0 && minor == 0 && inc == 0 && test == 0) { + /* no code loaded into internal processor */ + /* have to read it from the device itself the hard way */ + MTD_U16 reg2, reg3; + MTD_U16 index, index2; + MTD_U16 temp; + MTD_U16 bit16thru23[8]; + + /* save these registers */ + /* ATTEMPT(mtdHwXmdioRead(devPtr,port,MTD_REG_CCCR9,®1)); some revs can't read this register reliably */ + ATTEMPT(mtdHwXmdioRead(devPtr, port, 31, 0xF0F0, ®2)); + ATTEMPT(mtdHwXmdioRead(devPtr, port, 31, 0xF0F5, ®3)); + + /* clear these bit indications */ + for (index = 0; index < 8; index++) { + bit16thru23[index] = 0; + } + + ATTEMPT(mtdHwXmdioWrite(devPtr, port, 31, 0xF05E, 0x0300)); /* force clock on */ + mtdWait(1); + ATTEMPT(mtdHwXmdioWrite(devPtr, port, 31, 0xF0F0, 0x0102)); /* set access */ + mtdWait(1); + + ATTEMPT(mtdHwXmdioWrite(devPtr, port, 31, 0xF0F5, 0x06D3)); /* sequence needed */ + mtdWait(1); + ATTEMPT(mtdHwXmdioWrite(devPtr, port, 31, 0xF0F5, 0x0593)); + mtdWait(1); + ATTEMPT(mtdHwXmdioWrite(devPtr, port, 31, 0xF0F5, 0x0513)); + mtdWait(1); + + index = 0; + index2 = 0; + while (index < 24) { + ATTEMPT(mtdHwXmdioWrite(devPtr, port, 31, 0xF0F5, 0x0413)); + mtdWait(1); + ATTEMPT(mtdHwXmdioWrite(devPtr, port, 31, 0xF0F5, 0x0513)); + mtdWait(1); + + if (index >= 16) { + ATTEMPT(mtdHwXmdioRead(devPtr, port, 31, 0xF0F5, &bit16thru23[index2++])); + } else { + ATTEMPT(mtdHwXmdioRead(devPtr, port, 31, 0xF0F5, &temp)); + } + mtdWait(1); + index++; + } + + if (((bit16thru23[0] >> 11) & 1) | ((bit16thru23[1] >> 11) & 1)) { + *phyHasMacsec = MTD_FALSE; + } + if (((bit16thru23[4] >> 11) & 1) | ((bit16thru23[5] >> 11) & 1)) { + *phyHasCopperInterface = MTD_FALSE; + } + + if (((bit16thru23[6] >> 11) & 1) | ((bit16thru23[7] >> 11) & 1)) { + *isE20X0Device = MTD_TRUE; + } + + ATTEMPT(mtdHwXmdioWrite(devPtr, port, 31, 0xF0F5, 0x0413)); + mtdWait(1); + ATTEMPT(mtdHwXmdioWrite(devPtr, port, 31, 0xF0F5, 0x0493)); + mtdWait(1); + ATTEMPT(mtdHwXmdioWrite(devPtr, port, 31, 0xF0F5, 0x0413)); + mtdWait(1); + ATTEMPT(mtdHwXmdioWrite(devPtr, port, 31, 0xF0F5, 0x0513)); + mtdWait(1); + + /* restore the registers */ + /* ATTEMPT(mtdHwXmdioWrite(devPtr,port,MTD_REG_CCCR9,reg1)); Some revs can't read this register reliably */ + ATTEMPT(mtdHwXmdioWrite(devPtr, port, 31, 0xF05E, 0x5440)); /* set back to reset value */ + ATTEMPT(mtdHwXmdioWrite(devPtr, port, 31, 0xF0F0, reg2)); + ATTEMPT(mtdHwXmdioWrite(devPtr, port, 31, 0xF0F5, reg3)); + + } else { + /* should just read it from the firmware status register */ + ATTEMPT(mtdHwXmdioRead(devPtr, port, MTD_T_UNIT_PMA_PMD, MTD_TUNIT_XG_EXT_STATUS, &abilities)); + if (abilities & (1 << 12)) { + *phyHasMacsec = MTD_FALSE; + } + + if (abilities & (1 << 13)) { + *phyHasCopperInterface = MTD_FALSE; + } + + if (abilities & (1 << 14)) { + *isE20X0Device = MTD_TRUE; + } + + } + + return MTD_OK; +} + +MTD_STATUS mtdIsPhyReadyAfterReset( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_BOOL * phyReady) +{ + MTD_U16 val; + + *phyReady = MTD_FALSE; + + ATTEMPT(mtdHwGetPhyRegField(devPtr, port, MTD_T_UNIT_PMA_PMD, MTD_TUNIT_IEEE_PMA_CTRL1, 15, 1, &val)); + + if (val) { + /* if still in reset return '0' (could be coming up, or disabled by download mode) */ + *phyReady = MTD_FALSE; + } else { + /* if Phy is in normal operation */ + *phyReady = MTD_TRUE; + } + + return MTD_OK; +} + +MTD_STATUS mtdSoftwareReset( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 timeoutMs) +{ + MTD_U16 counter; + MTD_BOOL phyReady; + /* bit self clears when done */ + ATTEMPT(mtdHwSetPhyRegField(devPtr, port, MTD_T_UNIT_PMA_PMD, MTD_TUNIT_IEEE_PMA_CTRL1, 15, 1, 1)); + + if (timeoutMs) { + counter = 0; + ATTEMPT(mtdIsPhyReadyAfterReset(devPtr, port, &phyReady)); + while (phyReady == MTD_FALSE && counter <= timeoutMs) { + ATTEMPT(mtdWait(1)); + ATTEMPT(mtdIsPhyReadyAfterReset(devPtr, port, &phyReady)); + counter++; + } + + if (counter < timeoutMs) { + return MTD_OK; + } else { + /* timed out without becoming ready */ + return MTD_FAIL; + } + } else { + return MTD_OK; + } +} + +MTD_STATUS mtdIsPhyReadyAfterHardwareReset( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_BOOL *phyReady) +{ + MTD_U16 val; + + *phyReady = MTD_FALSE; + + ATTEMPT(mtdHwGetPhyRegField(devPtr, port, MTD_C_UNIT_GENERAL, MTD_CUNIT_PORT_CTRL, 14, 1, &val)); + + if (val) { + /* if still in reset return '0' (could be coming up, or disabled by download mode) */ + *phyReady = MTD_FALSE; + } else { + /* if Phy is in normal operation */ + *phyReady = MTD_TRUE; + } + return MTD_OK; +} + +MTD_STATUS mtdHardwareReset( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 timeoutMs) +{ + MTD_U16 counter; + MTD_BOOL phyReady; + + /* bit self clears when done */ + ATTEMPT(mtdHwSetPhyRegField(devPtr, port, MTD_C_UNIT_GENERAL, MTD_CUNIT_PORT_CTRL, 14, 1, 1)); + + if (timeoutMs) { + counter = 0; + ATTEMPT(mtdIsPhyReadyAfterHardwareReset(devPtr, port, &phyReady)); + while (phyReady == MTD_FALSE && counter <= timeoutMs) { + ATTEMPT(mtdWait(1)); + ATTEMPT(mtdIsPhyReadyAfterHardwareReset(devPtr, port, &phyReady)); + counter++; + } + if (counter < timeoutMs) + return MTD_OK; + else + return MTD_FAIL; /* timed out without becoming ready */ + } else { + return MTD_OK; + } +} + +/****************************************************************************/ + +/****************************************************************************/ +/******************************************************************* + 802.3 Clause 28 and Clause 45 + Autoneg Related Control & Status + *******************************************************************/ +/******************************************************************* + Enabling speeds for autonegotiation + Reading speeds enabled for autonegotation + Set/get pause advertisement for autonegotiation + Other Autoneg-related Control and Status (restart,disable/enable, + force master/slave/auto, checking for autoneg resolution, etc.) + *******************************************************************/ + +#define MTD_7_0010_SPEED_BIT_LENGTH 4 +#define MTD_7_0010_SPEED_BIT_POS 5 +#define MTD_7_8000_SPEED_BIT_LENGTH 2 +#define MTD_7_8000_SPEED_BIT_POS 8 +#define MTD_7_0020_SPEED_BIT_LENGTH 1 /* for 88X32X0 family and 88X33X0 family */ +#define MTD_7_0020_SPEED_BIT_POS 12 +#define MTD_7_0020_SPEED_BIT_LENGTH2 2 /* for 88X33X0 family A0 revision 2.5/5G */ +#define MTD_7_0020_SPEED_BIT_POS2 7 + +/* Bit defines for speed bits */ +#define MTD_FORCED_SPEEDS_BIT_MASK (MTD_SPEED_10M_HD_AN_DIS | MTD_SPEED_10M_FD_AN_DIS | \ + MTD_SPEED_100M_HD_AN_DIS | MTD_SPEED_100M_FD_AN_DIS) +#define MTD_LOWER_BITS_MASK 0x000F /* bits in base page */ +#define MTD_GIG_SPEED_POS 4 +#define MTD_XGIG_SPEED_POS 6 +#define MTD_2P5G_SPEED_POS 11 +#define MTD_5G_SPEED_POS 12 +#define MTD_GET_1000BT_BITS(_speedBits) ((_speedBits & (MTD_SPEED_1GIG_HD | MTD_SPEED_1GIG_FD)) \ + >> MTD_GIG_SPEED_POS) /* 1000BT bits */ +#define MTD_GET_10GBT_BIT(_speedBits) ((_speedBits & MTD_SPEED_10GIG_FD) \ + >> MTD_XGIG_SPEED_POS) /* 10GBT bit setting */ +#define MTD_GET_2P5GBT_BIT(_speedBits) ((_speedBits & MTD_SPEED_2P5GIG_FD) \ + >> MTD_2P5G_SPEED_POS) /* 2.5GBT bit setting */ +#define MTD_GET_5GBT_BIT(_speedBits) ((_speedBits & MTD_SPEED_5GIG_FD) \ + >> MTD_5G_SPEED_POS) /* 5GBT bit setting */ + +MTD_STATUS mtdEnableSpeeds( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 speed_bits, + IN MTD_BOOL anRestart) +{ + MTD_BOOL speedForced; + MTD_U16 dummy; + MTD_U16 tempRegValue; + + if (speed_bits & MTD_FORCED_SPEEDS_BIT_MASK) { + /* tried to force the speed, this function is for autonegotiation control */ + return MTD_FAIL; + } + + if (MTD_IS_X32X0_BASE(devPtr->deviceId) && ((speed_bits & MTD_SPEED_2P5GIG_FD) || + (speed_bits & MTD_SPEED_5GIG_FD))) { + return MTD_FAIL; /* tried to advertise 2.5G/5G on a 88X32X0 chipset */ + } + + if (MTD_IS_X33X0_BASE(devPtr->deviceId)) { + const MTD_U16 chipRev = (devPtr->deviceId & 0xf); /* get the chip revision */ + + if (chipRev == 9 || chipRev == 5 || chipRev == 1 || /* Z2 chip revisions */ + chipRev == 8 || chipRev == 4 || chipRev == 0) /* Z1 chip revisions */ { + /* this is an X33X0 or E20X0 Z2/Z1 device and not supported (not compatible with A0) */ + return MTD_FAIL; + } + } + + /* Enable AN and set speed back to power-on default in case previously forced + Only do it if forced, to avoid an extra/unnecessary soft reset */ + ATTEMPT(mtdGetForcedSpeed(devPtr, port, &speedForced, &dummy)); + if (speedForced) { + ATTEMPT(mtdUndoForcedSpeed(devPtr, port, MTD_FALSE)); + } + + if (speed_bits == MTD_ADV_NONE) { + /* Set all speeds to be disabled + Take care of bits in 7.0010 (advertisement register, 10BT and 100BT bits) */ + ATTEMPT(mtdHwSetPhyRegField(devPtr, port, 7, 0x0010,\ + MTD_7_0010_SPEED_BIT_POS, MTD_7_0010_SPEED_BIT_LENGTH, \ + 0)); + + /* Take care of speed bits in 7.8000 (1000BASE-T speed bits) */ + ATTEMPT(mtdHwSetPhyRegField(devPtr, port, 7, 0x8000,\ + MTD_7_8000_SPEED_BIT_POS, MTD_7_8000_SPEED_BIT_LENGTH, \ + 0)); + + /* Now take care of bit in 7.0020 (10GBASE-T) */ + ATTEMPT(mtdHwSetPhyRegField(devPtr, port, 7, 0x0020,\ + MTD_7_0020_SPEED_BIT_POS, MTD_7_0020_SPEED_BIT_LENGTH, 0)); + + if (MTD_IS_X33X0_BASE(devPtr->deviceId)) { + /* Now take care of bits in 7.0020 (2.5G, 5G speed bits) */ + ATTEMPT(mtdHwSetPhyRegField(devPtr, port, 7, 0x0020,\ + MTD_7_0020_SPEED_BIT_POS2, MTD_7_0020_SPEED_BIT_LENGTH2, 0)); + } + } else { + /* Take care of bits in 7.0010 (advertisement register, 10BT and 100BT bits) */ + ATTEMPT(mtdHwSetPhyRegField(devPtr, port, 7, 0x0010,\ + MTD_7_0010_SPEED_BIT_POS, MTD_7_0010_SPEED_BIT_LENGTH, \ + (speed_bits & MTD_LOWER_BITS_MASK))); + + /* Take care of speed bits in 7.8000 (1000BASE-T speed bits) */ + ATTEMPT(mtdHwSetPhyRegField(devPtr, port, 7, 0x8000,\ + MTD_7_8000_SPEED_BIT_POS, MTD_7_8000_SPEED_BIT_LENGTH, \ + MTD_GET_1000BT_BITS(speed_bits))); + + + /* Now take care of bits in 7.0020 (10GBASE-T first) */ + ATTEMPT(mtdHwXmdioRead(devPtr, port, 7, 0x0020, &tempRegValue)); + ATTEMPT(mtdHwSetRegFieldToWord(tempRegValue, MTD_GET_10GBT_BIT(speed_bits),\ + MTD_7_0020_SPEED_BIT_POS, MTD_7_0020_SPEED_BIT_LENGTH, \ + &tempRegValue)); + + if (MTD_IS_X33X0_BASE(devPtr->deviceId)) { + /* Now take care of 2.5G bit in 7.0020 */ + ATTEMPT(mtdHwSetRegFieldToWord(tempRegValue, MTD_GET_2P5GBT_BIT(speed_bits),\ + 7, 1, \ + &tempRegValue)); + + /* Now take care of 5G bit in 7.0020 */ + ATTEMPT(mtdHwSetRegFieldToWord(tempRegValue, MTD_GET_5GBT_BIT(speed_bits),\ + 8, 1, \ + &tempRegValue)); + } + + /* Now write result back to 7.0020 */ + ATTEMPT(mtdHwXmdioWrite(devPtr, port, 7, 0x0020, tempRegValue)); + + if (MTD_GET_10GBT_BIT(speed_bits) || + MTD_GET_2P5GBT_BIT(speed_bits) || + MTD_GET_5GBT_BIT(speed_bits)) { + /* Set XNP on if any bit that required it was set */ + ATTEMPT(mtdHwSetPhyRegField(devPtr, port, 7, 0, 13, 1, 1)); + } + } + + if (anRestart) { + return ((MTD_STATUS)(mtdAutonegEnable(devPtr, port) || + mtdAutonegRestart(devPtr, port))); + } + + return MTD_OK; +} + +MTD_STATUS mtdUndoForcedSpeed( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_BOOL anRestart) +{ + + ATTEMPT(mtdHwSetPhyRegField(devPtr, port, MTD_T_UNIT_PMA_PMD, MTD_TUNIT_IEEE_PMA_CTRL1, 13, 1, 1)); + ATTEMPT(mtdHwSetPhyRegField(devPtr, port, MTD_T_UNIT_PMA_PMD, MTD_TUNIT_IEEE_PMA_CTRL1, 6, 1, 1)); + + /* when speed bits are changed, T unit sw reset is required, wait until phy is ready */ + ATTEMPT(mtdSoftwareReset(devPtr, port, 1000)); + + if (anRestart) { + return ((MTD_STATUS)(mtdAutonegEnable(devPtr, port) || + mtdAutonegRestart(devPtr, port))); + } + + return MTD_OK; +} + + +MTD_STATUS mtdGetForcedSpeed( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_BOOL *speedIsForced, + OUT MTD_U16 *forcedSpeed) +{ + MTD_U16 val, bit0, bit1, forcedSpeedBits, duplexBit; + MTD_BOOL anDisabled; + + *speedIsForced = MTD_FALSE; + *forcedSpeed = MTD_ADV_NONE; + + /* check if 7.0.12 is 0 or 1 (disabled or enabled) */ + ATTEMPT(mtdHwGetPhyRegField(devPtr, port, 7, 0, 12, 1, &val)); + + (val) ? (anDisabled = MTD_FALSE) : (anDisabled = MTD_TRUE); + + if (anDisabled) { + /* autoneg is disabled, see if it's forced to one of the speeds that work without AN */ + ATTEMPT(mtdHwGetPhyRegField(devPtr, port, MTD_T_UNIT_PMA_PMD, MTD_TUNIT_IEEE_PMA_CTRL1, 6, 1, &bit0)); + ATTEMPT(mtdHwGetPhyRegField(devPtr, port, MTD_T_UNIT_PMA_PMD, MTD_TUNIT_IEEE_PMA_CTRL1, 13, 1, &bit1)); + + /* now read the duplex bit setting */ + ATTEMPT(mtdHwGetPhyRegField(devPtr, port, 7, 0x8000, 4, 1, &duplexBit)); + + forcedSpeedBits = 0; + forcedSpeedBits = bit0 | (bit1 << 1); + + if (forcedSpeedBits == 0) { + /* it's set to 10BT */ + if (duplexBit) { + *speedIsForced = MTD_TRUE; + *forcedSpeed = MTD_SPEED_10M_FD_AN_DIS; + } else { + *speedIsForced = MTD_TRUE; + *forcedSpeed = MTD_SPEED_10M_HD_AN_DIS; + } + } else if (forcedSpeedBits == 2) { + /* it's set to 100BT */ + if (duplexBit) { + *speedIsForced = MTD_TRUE; + *forcedSpeed = MTD_SPEED_100M_FD_AN_DIS; + } else { + *speedIsForced = MTD_TRUE; + *forcedSpeed = MTD_SPEED_100M_HD_AN_DIS; + } + } + /* else it's set to 1000BT or 10GBT which require AN to work */ + } + + return MTD_OK; +} + +MTD_STATUS mtdAutonegRestart( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port) +{ + /* set 7.0.9, restart AN */ + return (mtdHwSetPhyRegField(devPtr, port, 7, 0, + 9, 1, 1)); +} + + +MTD_STATUS mtdAutonegEnable( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port) +{ + /* set 7.0.12=1, enable AN */ + return (mtdHwSetPhyRegField(devPtr, port, 7, 0, + 12, 1, 1)); +} + +/****************************************************************************** + MTD_STATUS mtdAutonegIsSpeedDuplexResolutionDone + ( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_BOOL *anSpeedResolutionDone + ); + + Inputs: + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + port - MDIO port address, 0-31 + + Outputs: + anSpeedResolutionDone - one of the following + MTD_TRUE if speed/duplex is resolved + MTD_FALSE if speed/duplex is not resolved + + Returns: + MTD_OK or MTD_FAIL, if query was successful or not + + Description: + Queries register 3.8008.11 Speed/Duplex resolved to see if autonegotiation + is resolved or in progress. See note below. This function is only to be + called if autonegotation is enabled and speed is not forced. + + anSpeedResolutionDone being MTD_TRUE, only indicates if AN has determined + the speed and duplex bits in 3.8008, which will indicate what registers + to read later for AN resolution after AN has completed. + + Side effects: + None + + Notes/Warnings: + If autonegotiation is disabled or speed is forced, this function returns + MTD_TRUE. + +******************************************************************************/ +MTD_STATUS mtdAutonegIsSpeedDuplexResolutionDone( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_BOOL *anSpeedResolutionDone) +{ + MTD_U16 val; + + /* read speed/duplex resolution done bit in 3.8008 bit 11 */ + if (mtdHwGetPhyRegField(devPtr, port, + 3, 0x8008, 11, 1, &val) == MTD_FAIL) { + *anSpeedResolutionDone = MTD_FALSE; + return MTD_FAIL; + } + + (val) ? (*anSpeedResolutionDone = MTD_TRUE) : (*anSpeedResolutionDone = MTD_FALSE); + + return MTD_OK; +} + + +MTD_STATUS mtdGetAutonegSpeedDuplexResolution( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_U16 *speedResolution) +{ + MTD_U16 val, speed, speed2, duplex; + MTD_BOOL resDone; + + *speedResolution = MTD_ADV_NONE; + + /* check if AN is enabled */ + ATTEMPT(mtdHwGetPhyRegField(devPtr, port, \ + 7, 0, 12, 1, &val)); + + if (val) { + /* an is enabled, check if speed is resolved */ + ATTEMPT(mtdAutonegIsSpeedDuplexResolutionDone(devPtr, port, &resDone)); + + if (resDone) { + ATTEMPT(mtdHwGetPhyRegField(devPtr, port, \ + 3, 0x8008, 14, 2, &speed)); + + ATTEMPT(mtdHwGetPhyRegField(devPtr, port, \ + 3, 0x8008, 13, 1, &duplex)); + + switch (speed) { + case MTD_CU_SPEED_10_MBPS: + if (duplex) { + *speedResolution = MTD_SPEED_10M_FD; + } else { + *speedResolution = MTD_SPEED_10M_HD; + } + break; + case MTD_CU_SPEED_100_MBPS: + if (duplex) { + *speedResolution = MTD_SPEED_100M_FD; + } else { + *speedResolution = MTD_SPEED_100M_HD; + } + break; + case MTD_CU_SPEED_1000_MBPS: + if (duplex) { + *speedResolution = MTD_SPEED_1GIG_FD; + } else { + *speedResolution = MTD_SPEED_1GIG_HD; + } + break; + case MTD_CU_SPEED_10_GBPS: /* also MTD_CU_SPEED_NBT */ + if (MTD_IS_X32X0_BASE(devPtr->deviceId)) { + *speedResolution = MTD_SPEED_10GIG_FD; /* 10G has only full duplex, ignore duplex bit */ + } else { + ATTEMPT(mtdHwGetPhyRegField(devPtr, port, \ + 3, 0x8008, 2, 2, &speed2)); + + switch (speed2) { + case MTD_CU_SPEED_NBT_10G: + *speedResolution = MTD_SPEED_10GIG_FD; + break; + + case MTD_CU_SPEED_NBT_5G: + *speedResolution = MTD_SPEED_5GIG_FD; + break; + + case MTD_CU_SPEED_NBT_2P5G: + *speedResolution = MTD_SPEED_2P5GIG_FD; + break; + + default: + /* this is an error */ + return MTD_FAIL; + break; + } + } + break; + default: + /* this is an error */ + return MTD_FAIL; + break; + } + + } + + } + + return MTD_OK; +} + +MTD_STATUS mtdSetPauseAdvertisement( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U32 pauseType, + IN MTD_BOOL anRestart) +{ + /* sets/clears bits 11, 10 (A6,A5 in the tech bit field of 7.16) */ + ATTEMPT(mtdHwSetPhyRegField(devPtr, port, 7, 0x0010, \ + 10, 2, (MTD_U16)pauseType)); + + if (anRestart) { + return ((MTD_STATUS)(mtdAutonegEnable(devPtr, port) || + mtdAutonegRestart(devPtr, port))); + } + + return MTD_OK; +} + + +/****************************************************************************** + MTD_STATUS mtdAutonegIsCompleted + ( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_BOOL *anStatusReady + ); + + Inputs: + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + port - MDIO port address, 0-31 + + Outputs: + anStatusReady - one of the following + MTD_TRUE if AN status registers are available to be read (7.1, 7.33, 7.32769, etc.) + MTD_FALSE if AN is not completed and AN status registers may contain old data + + Returns: + MTD_OK or MTD_FAIL, if query was successful or not + + Description: + Checks 7.1.5 for 1. If 1, returns MTD_TRUE. If not, returns MTD_FALSE. Many + autonegotiation status registers are not valid unless AN has completed + meaning 7.1.5 = 1. + + Side effects: + None + + Notes/Warnings: + Call this function before reading 7.33 or 7.32769 to check for master/slave + resolution or other negotiated parameters which are negotiated during + autonegotiation like fast retrain, fast retrain type, etc. + +******************************************************************************/ +MTD_STATUS mtdAutonegIsCompleted( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_BOOL *anStatusReady) +{ + MTD_U16 val; + + /* read an completed, 7.1.5 bit */ + if (mtdHwGetPhyRegField(devPtr, port, + 7, 1, 5, 1, &val) == MTD_FAIL) { + *anStatusReady = MTD_FALSE; + return MTD_FAIL; + } + + (val) ? (*anStatusReady = MTD_TRUE) : (*anStatusReady = MTD_FALSE); + + return MTD_OK; +} + + +MTD_STATUS mtdGetLPAdvertisedPause( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_U8 *pauseBits) +{ + MTD_U16 val; + MTD_BOOL anStatusReady; + + /* Make sure AN is complete */ + ATTEMPT(mtdAutonegIsCompleted(devPtr, port, &anStatusReady)); + + if (anStatusReady == MTD_FALSE) { + *pauseBits = MTD_CLEAR_PAUSE; + return MTD_FAIL; + } + + /* get bits 11, 10 (A6,A5 in the tech bit field of 7.19) */ + if (mtdHwGetPhyRegField(devPtr, port, 7, 19, + 10, 2, &val) == MTD_FAIL) { + *pauseBits = MTD_CLEAR_PAUSE; + return MTD_FAIL; + } + + *pauseBits = (MTD_U8)val; + + return MTD_OK; +} + +/******************************************************************* + Firmware Version + *******************************************************************/ +/****************************************************************************/ +MTD_STATUS mtdGetFirmwareVersion( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_U8 *major, + OUT MTD_U8 *minor, + OUT MTD_U8 *inc, + OUT MTD_U8 *test) +{ + MTD_U16 reg_49169, reg_49170; + + ATTEMPT(mtdHwXmdioRead(devPtr, port, 1, 49169, ®_49169)); + + *major = (reg_49169 & 0xFF00) >> 8; + *minor = (reg_49169 & 0x00FF); + + ATTEMPT(mtdHwXmdioRead(devPtr, port, 1, 49170, ®_49170)); + + *inc = (reg_49170 & 0xFF00) >> 8; + *test = (reg_49170 & 0x00FF); + + /* firmware is not running if all 0's */ + if (!(*major || *minor || *inc || *test)) { + return MTD_FAIL; + } + return MTD_OK; +} + + +MTD_STATUS mtdGetPhyRevision( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_DEVICE_ID * phyRev, + OUT MTD_U8 *numPorts, + OUT MTD_U8 *thisPort) +{ + MTD_U16 temp = 0, tryCounter, temp2, baseType, reportedHwRev; + MTD_U16 revision = 0, numports, thisport, readyBit, fwNumports, fwThisport; + MTD_BOOL registerExists, regReady, hasMacsec, hasCopper, isE20X0Device; + MTD_U8 major, minor, inc, test; + + *phyRev = MTD_REV_UNKNOWN; /* in case we have any failed ATTEMPT below, will return unknown */ + *numPorts = 0; + *thisPort = 0; + + /* first check base type of device, get reported rev and port info */ + ATTEMPT(mtdHwXmdioRead(devPtr, port, 3, 0xD00D, &temp)); + baseType = ((temp & 0xFC00) >> 6); + reportedHwRev = (temp & 0x000F); + numports = ((temp & 0x0380) >> 7) + 1; + thisport = ((temp & 0x0070) >> 4); + + /* find out if device has macsec/ptp, copper unit or is an E20X0-type device */ + ATTEMPT(mtdCheckDeviceCapabilities(devPtr, port, &hasMacsec, &hasCopper, &isE20X0Device)); + + /* check if internal processor firmware is up and running, and if so, easier to get info */ + if (mtdGetFirmwareVersion(devPtr, port, &major, &minor, &inc, &test) == MTD_FAIL) { + major = minor = inc = test = 0; /* this is expected if firmware is not loaded/running */ + } + + if (major == 0 && minor == 0 && inc == 0 && test == 0) { + /* no firmware running, have to verify device revision */ + if (MTD_IS_X32X0_BASE(baseType)) { + /* A0 and Z2 report the same revision, need to check which is which */ + if (reportedHwRev == 1) { + /* need to figure out if it's A0 or Z2 */ + /* remove internal reset */ + ATTEMPT(mtdHwSetPhyRegField(devPtr, port, 3, 0xD801, 5, 1, 1)); + + /* wait until it's ready */ + regReady = MTD_FALSE; + tryCounter = 0; + while (regReady == MTD_FALSE && tryCounter++ < 10) { + ATTEMPT(mtdWait(1)); /* timeout is set to 10 ms */ + ATTEMPT(mtdHwGetPhyRegField(devPtr, port, 3, 0xD007, 6, 1, &readyBit)); + if (readyBit == 1) { + regReady = MTD_TRUE; + } + } + + if (regReady == MTD_FALSE) { + /* timed out, can't tell for sure what rev this is */ + *numPorts = 0; + *thisPort = 0; + *phyRev = MTD_REV_UNKNOWN; + return MTD_FAIL; + } + + /* perform test */ + registerExists = MTD_FALSE; + ATTEMPT(mtdHwXmdioRead(devPtr, port, 3, 0x8EC6, &temp)); + ATTEMPT(mtdHwXmdioWrite(devPtr, port, 3, 0x8EC6, 0xA5A5)); + ATTEMPT(mtdHwXmdioRead(devPtr, port, 3, 0x8EC6, &temp2)); + + /* put back internal reset */ + ATTEMPT(mtdHwSetPhyRegField(devPtr, port, 3, 0xD801, 5, 1, 0)); + + if (temp == 0 && temp2 == 0xA5A5) { + registerExists = MTD_TRUE; + } + + if (registerExists == MTD_TRUE) { + revision = 2; /* this is actually QA0 */ + } else { + revision = reportedHwRev; /* this is a QZ2 */ + } + + } else { + /* it's not A0 or Z2, use what's reported by the hardware */ + revision = reportedHwRev; + } + } else if (MTD_IS_X33X0_BASE(baseType)) { + /* all 33X0 devices report correct revision */ + revision = reportedHwRev; + } + + /* have to use what's reported by the hardware */ + *numPorts = (MTD_U8)numports; + *thisPort = (MTD_U8)thisport; + } else { + /* there is firmware loaded/running in internal processor */ + /* can get device revision reported by firmware */ + ATTEMPT(mtdHwXmdioRead(devPtr, port, MTD_T_UNIT_PMA_PMD, MTD_TUNIT_PHY_REV_INFO_REG, &temp)); + ATTEMPT(mtdHwGetRegFieldFromWord(temp, 0, 4, &revision)); + ATTEMPT(mtdHwGetRegFieldFromWord(temp, 4, 3, &fwNumports)); + ATTEMPT(mtdHwGetRegFieldFromWord(temp, 7, 3, &fwThisport)); + if (fwNumports == numports && fwThisport == thisport) { + *numPorts = (MTD_U8)numports; + *thisPort = (MTD_U8)thisport; + } else { + *phyRev = MTD_REV_UNKNOWN; + *numPorts = 0; + *thisPort = 0; + return MTD_FAIL; /* firmware and hardware are reporting different values */ + } + } + + /* now have correct information to build up the MTD_DEVICE_ID */ + if (MTD_IS_X32X0_BASE(baseType)) { + temp = MTD_X32X0_BASE; + } else if (MTD_IS_X33X0_BASE(baseType)) { + temp = MTD_X33X0_BASE; + } else { + *phyRev = MTD_REV_UNKNOWN; + *numPorts = 0; + *thisPort = 0; + return MTD_FAIL; + } + + if (hasMacsec) { + temp |= MTD_MACSEC_CAPABLE; + } + + if (hasCopper) { + temp |= MTD_COPPER_CAPABLE; + } + + if (MTD_IS_X33X0_BASE(baseType) && isE20X0Device) { + temp |= MTD_E20X0_DEVICE; + } + + temp |= (revision & 0xF); + + *phyRev = (MTD_DEVICE_ID)temp; + + /* make sure we got a good one */ + if (mtdIsPhyRevisionValid(*phyRev) == MTD_OK) { + return MTD_OK; + } else { + return MTD_FAIL; /* unknown or unsupported, if recognized but unsupported, value is still valid */ + } +} + +MTD_STATUS mtdIsPhyRevisionValid(IN MTD_DEVICE_ID phyRev) +{ + switch (phyRev) { + /* list must match MTD_DEVICE_ID */ + case MTD_REV_3240P_Z2: + case MTD_REV_3240P_A0: + case MTD_REV_3240P_A1: + case MTD_REV_3220P_Z2: + case MTD_REV_3220P_A0: + + case MTD_REV_3240_Z2: + case MTD_REV_3240_A0: + case MTD_REV_3240_A1: + case MTD_REV_3220_Z2: + case MTD_REV_3220_A0: + + case MTD_REV_3310P_A0: + case MTD_REV_3320P_A0: + case MTD_REV_3340P_A0: + case MTD_REV_3310_A0: + case MTD_REV_3320_A0: + case MTD_REV_3340_A0: + + case MTD_REV_E2010P_A0: + case MTD_REV_E2020P_A0: + case MTD_REV_E2040P_A0: + case MTD_REV_E2010_A0: + case MTD_REV_E2020_A0: + case MTD_REV_E2040_A0: + + case MTD_REV_2340P_A1: + case MTD_REV_2320P_A0: + case MTD_REV_2340_A1: + case MTD_REV_2320_A0: + return MTD_OK; + break; + + /* unsupported PHYs */ + case MTD_REV_3310P_Z1: + case MTD_REV_3320P_Z1: + case MTD_REV_3340P_Z1: + case MTD_REV_3310_Z1: + case MTD_REV_3320_Z1: + case MTD_REV_3340_Z1: + + case MTD_REV_3310P_Z2: + case MTD_REV_3320P_Z2: + case MTD_REV_3340P_Z2: + case MTD_REV_3310_Z2: + case MTD_REV_3320_Z2: + case MTD_REV_3340_Z2: + + + case MTD_REV_E2010P_Z2: + case MTD_REV_E2020P_Z2: + case MTD_REV_E2040P_Z2: + case MTD_REV_E2010_Z2: + case MTD_REV_E2020_Z2: + case MTD_REV_E2040_Z2: + default: + return MTD_FAIL; /* is either MTD_REV_UNKNOWN or not in the above list */ + break; + } +} + +/* mtdCunit.c */ +MTD_STATUS mtdCunitSwReset( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port) +{ + return mtdHwSetPhyRegField(devPtr, port, MTD_C_UNIT_GENERAL, MTD_CUNIT_PORT_CTRL, 15, 1, 1); +} + +/* mtdHxunit.c */ +MTD_STATUS mtdRerunSerdesAutoInitializationUseAutoMode( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port) +{ + MTD_U16 temp, temp2, temp3; + MTD_U16 waitCounter; + + ATTEMPT(mtdHwXmdioRead(devPtr, port, MTD_T_UNIT_AN, MTD_SERDES_CTRL_STATUS, &temp)); + + ATTEMPT(mtdHwSetRegFieldToWord(temp, 3, 14, 2, &temp2)); /* execute bits and disable bits set */ + + ATTEMPT(mtdHwXmdioWrite(devPtr, port, MTD_T_UNIT_AN, MTD_SERDES_CTRL_STATUS, temp2)); + + /* wait for it to be done */ + waitCounter = 0; + ATTEMPT(mtdHwXmdioRead(devPtr, port, MTD_T_UNIT_AN, MTD_SERDES_CTRL_STATUS, &temp3)); + while ((temp3 & 0x8000) && (waitCounter < 100)) { + ATTEMPT(mtdWait(1)); + ATTEMPT(mtdHwXmdioRead(devPtr, port, MTD_T_UNIT_AN, MTD_SERDES_CTRL_STATUS, &temp3)); + waitCounter++; + } + + /* if speed changed, let it stay. that's the speed that it ended up changing to/serdes was initialied to */ + if (waitCounter >= 100) { + return MTD_FAIL; /* execute timed out */ + } + + return MTD_OK; +} + + +/* mtdHunit.c */ +/****************************************************************************** + Mac Interface functions +******************************************************************************/ + +MTD_STATUS mtdSetMacInterfaceControl( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 macType, + IN MTD_BOOL macIfPowerDown, + IN MTD_U16 macIfSnoopSel, + IN MTD_U16 macIfActiveLaneSelect, + IN MTD_U16 macLinkDownSpeed, + IN MTD_U16 macMaxIfSpeed, /* 33X0/E20X0 devices only */ + IN MTD_BOOL doSwReset, + IN MTD_BOOL rerunSerdesInitialization) +{ + MTD_U16 cunitPortCtrl, cunitModeConfig; + + /* do range checking on parameters */ + if ((macType > MTD_MAC_LEAVE_UNCHANGED)) { + return MTD_FAIL; + } + + if ((macIfSnoopSel > MTD_MAC_SNOOP_LEAVE_UNCHANGED) || + (macIfSnoopSel == 1)) { + return MTD_FAIL; + } + + if (macIfActiveLaneSelect > 1) { + return MTD_FAIL; + } + + if (macLinkDownSpeed > MTD_MAC_SPEED_LEAVE_UNCHANGED) { + return MTD_FAIL; + } + + if (!(macMaxIfSpeed == MTD_MAX_MAC_SPEED_10G || + macMaxIfSpeed == MTD_MAX_MAC_SPEED_5G || + macMaxIfSpeed == MTD_MAX_MAC_SPEED_2P5G || + macMaxIfSpeed == MTD_MAX_MAC_SPEED_LEAVE_UNCHANGED || + macMaxIfSpeed == MTD_MAX_MAC_SPEED_NOT_APPLICABLE)) { + return MTD_FAIL; + } + + + ATTEMPT(mtdHwXmdioRead(devPtr, port, MTD_C_UNIT_GENERAL, MTD_CUNIT_PORT_CTRL, &cunitPortCtrl)); + ATTEMPT(mtdHwXmdioRead(devPtr, port, MTD_C_UNIT_GENERAL, MTD_CUNIT_MODE_CONFIG, &cunitModeConfig)); + + /* Because writes of some of these bits don't show up in the register on a read + * until after the software reset, we can't do repeated read-modify-writes + * to the same register or we will lose those changes. + + * This approach also cuts down on IO and speeds up the code + */ + + if (macType < MTD_MAC_LEAVE_UNCHANGED) { + ATTEMPT(mtdHwSetRegFieldToWord(cunitPortCtrl, macType, 0, 3, &cunitPortCtrl)); + } + + ATTEMPT(mtdHwSetRegFieldToWord(cunitModeConfig, (MTD_U16)macIfPowerDown, 3, 1, &cunitModeConfig)); + + if (macIfSnoopSel < MTD_MAC_SNOOP_LEAVE_UNCHANGED) { + ATTEMPT(mtdHwSetRegFieldToWord(cunitModeConfig, macIfSnoopSel, 8, 2, &cunitModeConfig)); + } + + ATTEMPT(mtdHwSetRegFieldToWord(cunitModeConfig, macIfActiveLaneSelect, 10, 1, &cunitModeConfig)); + + if (macLinkDownSpeed < MTD_MAC_SPEED_LEAVE_UNCHANGED) { + ATTEMPT(mtdHwSetRegFieldToWord(cunitModeConfig, macLinkDownSpeed, 6, 2, &cunitModeConfig)); + } + + /* Now write changed values */ + ATTEMPT(mtdHwXmdioWrite(devPtr, port, MTD_C_UNIT_GENERAL, MTD_CUNIT_PORT_CTRL, cunitPortCtrl)); + ATTEMPT(mtdHwXmdioWrite(devPtr, port, MTD_C_UNIT_GENERAL, MTD_CUNIT_MODE_CONFIG, cunitModeConfig)); + + if (MTD_IS_X33X0_BASE(devPtr->deviceId)) { + if (macMaxIfSpeed != MTD_MAX_MAC_SPEED_LEAVE_UNCHANGED) { + ATTEMPT(mtdHwSetPhyRegField(devPtr, port, 31, 0xF0A8, 0, 2, macMaxIfSpeed)); + } + } + + if (doSwReset == MTD_TRUE) { + ATTEMPT(mtdCunitSwReset(devPtr, port)); + + if (macLinkDownSpeed < MTD_MAC_SPEED_LEAVE_UNCHANGED) { + ATTEMPT(mtdCunitSwReset(devPtr, port)); /* need 2x for changes to macLinkDownSpeed */ + } + + if (rerunSerdesInitialization == MTD_TRUE) { + ATTEMPT(mtdRerunSerdesAutoInitializationUseAutoMode(devPtr, port)); + } + } + + return MTD_OK; +} + + +/******************************************************************************* +* mtdSemCreate +* +* DESCRIPTION: +* Create semaphore. +* +* INPUTS: +* state - beginning state of the semaphore, either MTD_SEM_EMPTY or MTD_SEM_FULL +* +* OUTPUTS: +* None +* +* RETURNS: +* MTD_SEM if success. Otherwise, NULL +* +* COMMENTS: +* None +* +*******************************************************************************/ +MTD_SEM mtdSemCreate( + IN MTD_DEV * dev, + IN MTD_SEM_BEGIN_STATE state) +{ + if (dev->semCreate) + return dev->semCreate(state); + + return 1; /* should return any value other than 0 to let it keep going */ +} + +MTD_STATUS mtdLoadDriver( + IN FMTD_READ_MDIO readMdio, + IN FMTD_WRITE_MDIO writeMdio, + IN MTD_BOOL macsecIndirectAccess, + IN FMTD_SEM_CREATE semCreate, + IN FMTD_SEM_DELETE semDelete, + IN FMTD_SEM_TAKE semTake, + IN FMTD_SEM_GIVE semGive, + IN MTD_U16 anyPort, + OUT MTD_DEV * dev) +{ + MTD_U16 data; + + MTD_DBG_INFO("mtdLoadDriver Called.\n"); + + /* Check for parameters validity */ + if (dev == NULL) { + MTD_DBG_ERROR("MTD_DEV pointer is NULL.\n"); + return MTD_API_ERR_DEV; + } + + /* The initialization was already done. */ + if (dev->devEnabled) { + MTD_DBG_ERROR("Device Driver already loaded.\n"); + return MTD_API_ERR_DEV_ALREADY_EXIST; + } + + /* Make sure mtdWait() was implemented */ + if (mtdWait(1) == MTD_FAIL) { + MTD_DBG_ERROR("mtdWait() not implemented.\n"); + return MTD_FAIL; + } + + dev->fmtdReadMdio = readMdio; + dev->fmtdWriteMdio = writeMdio; + + dev->semCreate = semCreate; + dev->semDelete = semDelete; + dev->semTake = semTake; + dev->semGive = semGive; + dev->macsecIndirectAccess = macsecIndirectAccess; /* 88X33X0 and later force direct access */ + + /* try to read 1.0 */ + if ((mtdHwXmdioRead(dev, anyPort, 1, 0, &data)) != MTD_OK) { + MTD_DBG_ERROR("Reading to reg %x failed.\n", 0); + return MTD_API_FAIL_READ_REG; + } + + MTD_DBG_INFO("mtdLoadDriver successful.\n"); + + /* Initialize the MACsec Register Access semaphore. */ + dev->multiAddrSem = mtdSemCreate(dev, MTD_SEM_FULL); + if (dev->multiAddrSem == 0) { + MTD_DBG_ERROR("semCreate Failed.\n"); + return MTD_API_FAIL_SEM_CREATE; + } + + if (dev->msec_ctrl.msec_rev == MTD_MSEC_REV_FPGA) { + dev->deviceId = MTD_REV_3310P_Z2; /* verification: change if needed */ + dev->numPorts = 1; /* verification: change if needed */ + dev->thisPort = 0; + } else { + /* After everything else is done, can fill in the device id */ + if ((mtdGetPhyRevision(dev, anyPort, + &(dev->deviceId), + &(dev->numPorts), + &(dev->thisPort))) != MTD_OK) { + MTD_DBG_ERROR("mtdGetPhyRevision Failed.\n"); + return MTD_FAIL; + } + } + + if (MTD_IS_X33X0_BASE(dev->deviceId)) { + dev->macsecIndirectAccess = MTD_FALSE; /* bug was fixed in 88X33X0 and later revisions, go direct */ + } + + dev->devEnabled = MTD_TRUE; + + MTD_DBG_INFO("mtdLoadDriver successful !!!.\n"); + + return MTD_OK; +} diff --git a/drivers/net/ethernet/netswift/txgbe/txgbe_mtd.h b/drivers/net/ethernet/netswift/txgbe/txgbe_mtd.h new file mode 100644 index 0000000000000000000000000000000000000000..1c5daae94a547ca1e2f183f240bf5089bc49fb3a --- /dev/null +++ b/drivers/net/ethernet/netswift/txgbe/txgbe_mtd.h @@ -0,0 +1,1540 @@ +/* + * WangXun 10 Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + */ + +#ifndef _TXGBE_MTD_H_ +#define _TXGBE_MTD_H_ + +#define C_LINKAGE 1 /* set to 1 if C compile/linkage on C files is desired with C++ */ + +#if C_LINKAGE +#if defined __cplusplus + extern "C" { +#endif +#endif + +/* general */ + +#undef IN +#define IN +#undef OUT +#define OUT +#undef INOUT +#define INOUT + +#ifndef NULL +#define NULL ((void *)0) +#endif + +typedef void MTD_VOID; +typedef char MTD_8; +typedef short MTD_16; +typedef long MTD_32; +typedef long long MTD_64; + +typedef unsigned char MTD_U8; +typedef unsigned short MTD_U16; +typedef unsigned long MTD_U32; +typedef unsigned int MTD_UINT; +typedef int MTD_INT; +typedef signed short MTD_S16; + +typedef unsigned long long MTD_U64; + +typedef enum { + MTD_FALSE = 0, + MTD_TRUE = 1 +} MTD_BOOL; + +#define MTD_CONVERT_BOOL_TO_UINT(boolVar, uintVar) \ + {(boolVar) ? (uintVar = 1) : (uintVar = 0); } +#define MTD_CONVERT_UINT_TO_BOOL(uintVar, boolVar) \ + {(uintVar) ? (boolVar = MTD_TRUE) : (boolVar = MTD_FALSE); } +#define MTD_GET_BOOL_AS_BIT(boolVar) ((boolVar) ? 1 : 0) +#define MTD_GET_BIT_AS_BOOL(uintVar) ((uintVar) ? MTD_TRUE : MTD_FALSE) + +typedef void (*MTD_VOIDFUNCPTR) (void); /* ptr to function returning void */ +typedef MTD_U32 (*MTD_INTFUNCPTR) (void); /* ptr to function returning int */ + +typedef MTD_U32 MTD_STATUS; + +/* Defines for semaphore support */ +typedef MTD_U32 MTD_SEM; + +typedef enum { + MTD_SEM_EMPTY, + MTD_SEM_FULL +} MTD_SEM_BEGIN_STATE; + +typedef MTD_SEM (*FMTD_SEM_CREATE)(MTD_SEM_BEGIN_STATE state); +typedef MTD_STATUS (*FMTD_SEM_DELETE)(MTD_SEM semId); +typedef MTD_STATUS (*FMTD_SEM_TAKE)(MTD_SEM semId, MTD_U32 timOut); +typedef MTD_STATUS (*FMTD_SEM_GIVE)(MTD_SEM semId); + +/* Defines for mtdLoadDriver() mtdUnloadDriver() and all API functions which need MTD_DEV */ +typedef struct _MTD_DEV MTD_DEV; +typedef MTD_DEV * MTD_DEV_PTR; + +typedef MTD_STATUS (*FMTD_READ_MDIO)( + MTD_DEV *dev, + MTD_U16 port, + MTD_U16 mmd, + MTD_U16 reg, + MTD_U16 *value); +typedef MTD_STATUS (*FMTD_WRITE_MDIO)( + MTD_DEV *dev, + MTD_U16 port, + MTD_U16 mmd, + MTD_U16 reg, + MTD_U16 value); + +/* MTD_DEVICE_ID format: */ +/* Bits 15:13 reserved */ +/* Bit 12: 1-> E20X0 device with max speed of 5G and no fiber interface */ +/* Bit 11: 1-> Macsec Capable (Macsec/PTP module included */ +/* Bit 10: 1-> Copper Capable (T unit interface included) */ +/* Bits 9:4 0x18 -> X32X0 base, 0x1A 0x33X0 base */ +/* Bits 3:0 revision/number of ports indication, see list */ +/* Following defines are for building MTD_DEVICE_ID */ +#define MTD_E20X0_DEVICE (1<<12) /* whether this is an E20X0 device group */ +#define MTD_MACSEC_CAPABLE (1<<11) /* whether the device has a Macsec/PTP module */ +#define MTD_COPPER_CAPABLE (1<<10) /* whether the device has a copper (T unit) module */ +#define MTD_X32X0_BASE (0x18<<4) /* whether the device uses X32X0 firmware base */ +#define MTD_X33X0_BASE (0x1A<<4) /* whether the device uses X33X0 firmware base */ + +/* Following macros are to test MTD_DEVICE_ID for various features */ +#define MTD_IS_E20X0_DEVICE(mTdrevId) ((MTD_BOOL)(mTdrevId & MTD_E20X0_DEVICE)) +#define MTD_IS_MACSEC_CAPABLE(mTdrevId) ((MTD_BOOL)(mTdrevId & MTD_MACSEC_CAPABLE)) +#define MTD_IS_COPPER_CAPABLE(mTdrevId) ((MTD_BOOL)(mTdrevId & MTD_COPPER_CAPABLE)) +#define MTD_IS_X32X0_BASE(mTdrevId) ((MTD_BOOL)((mTdrevId & (0x3F<<4)) == MTD_X32X0_BASE)) +#define MTD_IS_X33X0_BASE(mTdrevId) ((MTD_BOOL)((mTdrevId & (0x3F<<4)) == MTD_X33X0_BASE)) + +#define MTD_X33X0BASE_SINGLE_PORTA0 0xA +#define MTD_X33X0BASE_DUAL_PORTA0 0x6 +#define MTD_X33X0BASE_QUAD_PORTA0 0x2 + +/* WARNING: If you add/modify this list, you must also modify mtdIsPhyRevisionValid() */ +typedef enum { + MTD_REV_UNKNOWN = 0, + MTD_REV_3240P_Z2 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X32X0_BASE | 0x1), + MTD_REV_3240P_A0 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X32X0_BASE | 0x2), + MTD_REV_3240P_A1 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X32X0_BASE | 0x3), + MTD_REV_3220P_Z2 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X32X0_BASE | 0x4), + MTD_REV_3220P_A0 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X32X0_BASE | 0x5), + MTD_REV_3240_Z2 = (MTD_COPPER_CAPABLE | MTD_X32X0_BASE | 0x1), + MTD_REV_3240_A0 = (MTD_COPPER_CAPABLE | MTD_X32X0_BASE | 0x2), + MTD_REV_3240_A1 = (MTD_COPPER_CAPABLE | MTD_X32X0_BASE | 0x3), + MTD_REV_3220_Z2 = (MTD_COPPER_CAPABLE | MTD_X32X0_BASE | 0x4), + MTD_REV_3220_A0 = (MTD_COPPER_CAPABLE | MTD_X32X0_BASE | 0x5), + + MTD_REV_3310P_Z1 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x8), /* 88X33X0 Z1 not supported starting with version 1.2 of API */ + MTD_REV_3320P_Z1 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x4), + MTD_REV_3340P_Z1 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x0), + MTD_REV_3310_Z1 = (MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x8), + MTD_REV_3320_Z1 = (MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x4), + MTD_REV_3340_Z1 = (MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x0), + + MTD_REV_3310P_Z2 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x9), /* 88X33X0 Z2 not supported starting with version 1.2 of API */ + MTD_REV_3320P_Z2 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x5), + MTD_REV_3340P_Z2 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x1), + MTD_REV_3310_Z2 = (MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x9), + MTD_REV_3320_Z2 = (MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x5), + MTD_REV_3340_Z2 = (MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x1), + + MTD_REV_E2010P_Z2 = (MTD_E20X0_DEVICE | MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x9), /* E20X0 Z2 not supported starting with version 1.2 of API */ + MTD_REV_E2020P_Z2 = (MTD_E20X0_DEVICE | MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x5), + MTD_REV_E2040P_Z2 = (MTD_E20X0_DEVICE | MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x1), + MTD_REV_E2010_Z2 = (MTD_E20X0_DEVICE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x9), + MTD_REV_E2020_Z2 = (MTD_E20X0_DEVICE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x5), + MTD_REV_E2040_Z2 = (MTD_E20X0_DEVICE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | 0x1), + + + MTD_REV_3310P_A0 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | MTD_X33X0BASE_SINGLE_PORTA0), + MTD_REV_3320P_A0 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | MTD_X33X0BASE_DUAL_PORTA0), + MTD_REV_3340P_A0 = (MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | MTD_X33X0BASE_QUAD_PORTA0), + MTD_REV_3310_A0 = (MTD_COPPER_CAPABLE | MTD_X33X0_BASE | MTD_X33X0BASE_SINGLE_PORTA0), + MTD_REV_3320_A0 = (MTD_COPPER_CAPABLE | MTD_X33X0_BASE | MTD_X33X0BASE_DUAL_PORTA0), + MTD_REV_3340_A0 = (MTD_COPPER_CAPABLE | MTD_X33X0_BASE | MTD_X33X0BASE_QUAD_PORTA0), + + MTD_REV_E2010P_A0 = (MTD_E20X0_DEVICE | MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | MTD_X33X0BASE_SINGLE_PORTA0), + MTD_REV_E2020P_A0 = (MTD_E20X0_DEVICE | MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | MTD_X33X0BASE_DUAL_PORTA0), + MTD_REV_E2040P_A0 = (MTD_E20X0_DEVICE | MTD_MACSEC_CAPABLE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | MTD_X33X0BASE_QUAD_PORTA0), + MTD_REV_E2010_A0 = (MTD_E20X0_DEVICE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | MTD_X33X0BASE_SINGLE_PORTA0), + MTD_REV_E2020_A0 = (MTD_E20X0_DEVICE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | MTD_X33X0BASE_DUAL_PORTA0), + MTD_REV_E2040_A0 = (MTD_E20X0_DEVICE | MTD_COPPER_CAPABLE | MTD_X33X0_BASE | MTD_X33X0BASE_QUAD_PORTA0), + + MTD_REV_2340P_A1 = (MTD_MACSEC_CAPABLE | MTD_X32X0_BASE | 0x3), + MTD_REV_2320P_A0 = (MTD_MACSEC_CAPABLE | MTD_X32X0_BASE | 0x5), + MTD_REV_2340_A1 = (MTD_X32X0_BASE | 0x3), + MTD_REV_2320_A0 = (MTD_X32X0_BASE | 0x5) +} MTD_DEVICE_ID; + +typedef enum { + MTD_MSEC_REV_Z0A, + MTD_MSEC_REV_Y0A, + MTD_MSEC_REV_A0B, + MTD_MSEC_REV_FPGA, + MTD_MSEC_REV_UNKNOWN = -1 +} MTD_MSEC_REV; + +/* compatible for USB test */ +typedef struct _MTD_MSEC_CTRL { + MTD_32 dev_num; /* indicates the device number (0 if only one) when multiple devices are present on SVB.*/ + MTD_32 port_num; /* Indicates which port (0 to 4) is requesting CPU */ + MTD_U16 prev_addr; /* < Prev write address */ + MTD_U16 prev_dataL; /* < Prev dataL value */ + MTD_MSEC_REV msec_rev; /* revision */ +} MTD_MSEC_CTRL; + +struct _MTD_DEV { + MTD_DEVICE_ID deviceId; /* type of device and capabilities */ + MTD_BOOL devEnabled; /* whether mtdLoadDriver() called successfully */ + MTD_U8 numPorts; /* number of ports per device */ + MTD_U8 thisPort; /* relative port number on this device starting with 0 (not MDIO address) */ + MTD_SEM multiAddrSem; + + FMTD_READ_MDIO fmtdReadMdio; + FMTD_WRITE_MDIO fmtdWriteMdio; + + FMTD_SEM_CREATE semCreate; /* create semapore */ + FMTD_SEM_DELETE semDelete; /* delete the semapore */ + FMTD_SEM_TAKE semTake; /* try to get a semapore */ + FMTD_SEM_GIVE semGive; /* return semaphore */ + + MTD_U8 macsecIndirectAccess; /* if MTD_TRUE use internal processor to access Macsec */ + MTD_MSEC_CTRL msec_ctrl; /* structure use for internal verification */ + + void *appData; /* application specific data, anything the host wants to pass to the low layer */ +}; + +#define MTD_OK 0 /* Operation succeeded */ +#define MTD_FAIL 1 /* Operation failed */ +#define MTD_PENDING 2 /* Pending */ + +/* bit definition */ +#define MTD_BIT_0 0x0001 +#define MTD_BIT_1 0x0002 +#define MTD_BIT_2 0x0004 +#define MTD_BIT_3 0x0008 +#define MTD_BIT_4 0x0010 +#define MTD_BIT_5 0x0020 +#define MTD_BIT_6 0x0040 +#define MTD_BIT_7 0x0080 +#define MTD_BIT_8 0x0100 +#define MTD_BIT_9 0x0200 +#define MTD_BIT_10 0x0400 +#define MTD_BIT_11 0x0800 +#define MTD_BIT_12 0x1000 +#define MTD_BIT_13 0x2000 +#define MTD_BIT_14 0x4000 +#define MTD_BIT_15 0x8000 + +#define MTD_DBG_ERROR(...) +#define MTD_DBG_INFO(...) +#define MTD_DBG_CRITIC_INFO(...) + + +#define MTD_API_MAJOR_VERSION 2 +#define MTD_API_MINOR_VERSION 0 + +/* This macro is handy for calling a function when you want to test the + return value and return MTD_FAIL, if the function returned MTD_FAIL, + otherwise continue */ +#define ATTEMPT(xFuncToTry) do {if (xFuncToTry == MTD_FAIL) { return MTD_FAIL; } } while (0) + +/* These defines are used for some registers which represent the copper + speed as a 2-bit binary number */ +#define MTD_CU_SPEED_10_MBPS 0 /* copper is 10BASE-T */ +#define MTD_CU_SPEED_100_MBPS 1 /* copper is 100BASE-TX */ +#define MTD_CU_SPEED_1000_MBPS 2 /* copper is 1000BASE-T */ +#define MTD_CU_SPEED_10_GBPS 3 /* copper is 10GBASE-T */ + +/* for 88X33X0 family: */ +#define MTD_CU_SPEED_NBT 3 /* copper is NBASE-T */ +#define MTD_CU_SPEED_NBT_10G 0 /* copper is 10GBASE-T */ +#define MTD_CU_SPEED_NBT_5G 2 /* copper is 5GBASE-T */ +#define MTD_CU_SPEED_NBT_2P5G 1 /* copper is 2.5GBASE-T */ + +#define MTD_ADV_NONE 0x0000 /* No speeds to be advertised */ +#define MTD_SPEED_10M_HD 0x0001 /* 10BT half-duplex */ +#define MTD_SPEED_10M_FD 0x0002 /* 10BT full-duplex */ +#define MTD_SPEED_100M_HD 0x0004 /* 100BASE-TX half-duplex */ +#define MTD_SPEED_100M_FD 0x0008 /* 100BASE-TX full-duplex */ +#define MTD_SPEED_1GIG_HD 0x0010 /* 1000BASE-T half-duplex */ +#define MTD_SPEED_1GIG_FD 0x0020 /* 1000BASE-T full-duplex */ +#define MTD_SPEED_10GIG_FD 0x0040 /* 10GBASE-T full-duplex */ +#define MTD_SPEED_2P5GIG_FD 0x0800 /* 2.5GBASE-T full-duplex, 88X33X0/88E20X0 family only */ +#define MTD_SPEED_5GIG_FD 0x1000 /* 5GBASE-T full-duplex, 88X33X0/88E20X0 family only */ +#define MTD_SPEED_ALL (MTD_SPEED_10M_HD | \ + MTD_SPEED_10M_FD | \ + MTD_SPEED_100M_HD | \ + MTD_SPEED_100M_FD | \ + MTD_SPEED_1GIG_HD | \ + MTD_SPEED_1GIG_FD | \ + MTD_SPEED_10GIG_FD) +#define MTD_SPEED_ALL_33X0 (MTD_SPEED_10M_HD | \ + MTD_SPEED_10M_FD | \ + MTD_SPEED_100M_HD | \ + MTD_SPEED_100M_FD | \ + MTD_SPEED_1GIG_HD | \ + MTD_SPEED_1GIG_FD | \ + MTD_SPEED_10GIG_FD | \ + MTD_SPEED_2P5GIG_FD |\ + MTD_SPEED_5GIG_FD) + +/* these bits are for forcing the speed and disabling autonegotiation */ +#define MTD_SPEED_10M_HD_AN_DIS 0x0080 /* Speed forced to 10BT half-duplex */ +#define MTD_SPEED_10M_FD_AN_DIS 0x0100 /* Speed forced to 10BT full-duplex */ +#define MTD_SPEED_100M_HD_AN_DIS 0x0200 /* Speed forced to 100BT half-duplex */ +#define MTD_SPEED_100M_FD_AN_DIS 0x0400 /* Speed forced to 100BT full-duplex */ + +/* this value is returned for the speed when the link status is checked and the speed has been */ +/* forced to one speed but the link is up at a different speed. it indicates an error. */ +#define MTD_SPEED_MISMATCH 0x8000 /* Speed is forced to one speed, but status indicates another */ + + +/* for macType */ +#define MTD_MAC_TYPE_RXAUI_SGMII_AN_EN (0x0) /* X32X0/X33x0, but not E20x0 */ +#define MTD_MAC_TYPE_RXAUI_SGMII_AN_DIS (0x1) /* X32x0/X3340/X3320, but not X3310/E20x0 */ +#define MTD_MAC_TYPE_XAUI_RATE_ADAPT (0x1) /* X3310,E2010 only */ +#define MTD_MAC_TYPE_RXAUI_RATE_ADAPT (0x2) +#define MTD_MAC_TYPE_XAUI (0x3) /* X3310,E2010 only */ +#define MTD_MAC_TYPE_XFI_SGMII_AN_EN (0x4) /* XFI at 10G, X33x0/E20x0 also use 5GBASE-R/2500BASE-X */ +#define MTD_MAC_TYPE_XFI_SGMII_AN_DIS (0x5) /* XFI at 10G, X33x0/E20x0 also use 5GBASE-R/2500BASE-X */ +#define MTD_MAC_TYPE_XFI_RATE_ADAPT (0x6) +#define MTD_MAC_TYPE_USXGMII (0x7) /* X33x0 only */ +#define MTD_MAC_LEAVE_UNCHANGED (0x8) /* use this option to not touch these bits */ + +/* for macIfSnoopSel */ +#define MTD_MAC_SNOOP_FROM_NETWORK (0x2) +#define MTD_MAC_SNOOP_FROM_HOST (0x3) +#define MTD_MAC_SNOOP_OFF (0x0) +#define MTD_MAC_SNOOP_LEAVE_UNCHANGED (0x4) /* use this option to not touch these bits */ +/* for macLinkDownSpeed */ +#define MTD_MAC_SPEED_10_MBPS MTD_CU_SPEED_10_MBPS +#define MTD_MAC_SPEED_100_MBPS MTD_CU_SPEED_100_MBPS +#define MTD_MAC_SPEED_1000_MBPS MTD_CU_SPEED_1000_MBPS +#define MTD_MAC_SPEED_10_GBPS MTD_CU_SPEED_10_GBPS +#define MTD_MAC_SPEED_LEAVE_UNCHANGED (0x4) +/* X33X0/E20X0 devices only for macMaxIfSpeed */ +#define MTD_MAX_MAC_SPEED_10G (0) +#define MTD_MAX_MAC_SPEED_5G (2) +#define MTD_MAX_MAC_SPEED_2P5G (3) +#define MTD_MAX_MAC_SPEED_LEAVE_UNCHANGED (4) +#define MTD_MAX_MAC_SPEED_NOT_APPLICABLE (4) /* 32X0 devices can pass this */ + +/* 88X3240/3220 Device Number Definitions */ +#define MTD_T_UNIT_PMA_PMD 1 +#define MTD_T_UNIT_PCS_CU 3 +#define MTD_X_UNIT 3 +#define MTD_H_UNIT 4 +#define MTD_T_UNIT_AN 7 +#define MTD_XFI_DSP 30 +#define MTD_C_UNIT_GENERAL 31 +#define MTD_M_UNIT 31 + +/* 88X3240/3220 Device Number Definitions Host Redundant Mode */ +#define MTD_BASER_LANE_0 MTD_H_UNIT +#define MTD_BASER_LANE_1 MTD_X_UNIT + +/* 88X3240/3220 T Unit Registers MMD 1 */ +#define MTD_TUNIT_IEEE_PMA_CTRL1 0x0000 /* do not enclose in parentheses */ +#define MTD_TUNIT_XG_EXT_STATUS 0xC001 /* do not enclose in parentheses */ +#define MTD_TUNIT_PHY_REV_INFO_REG 0xC04E /* do not enclose in parentheses */ + +/* control/status for serdes initialization */ +#define MTD_SERDES_CTRL_STATUS 0x800F /* do not enclose in parentheses */ +/* 88X3240/3220 C Unit Registers MMD 31 */ +#define MTD_CUNIT_MODE_CONFIG 0xF000 /* do not enclose in parentheses */ +#define MTD_CUNIT_PORT_CTRL 0xF001 /* do not enclose in parentheses */ + +#define MTD_API_FAIL_SEM_CREATE (0x18<<24) /*semCreate Failed. */ +#define MTD_API_FAIL_SEM_DELETE (0x19<<24) /*semDelete Failed. */ +#define MTD_API_FAIL_READ_REG (0x16<<16) /*Reading from phy reg failed. */ +#define MTD_API_ERR_DEV (0x3c<<16) /*driver struture is NULL. */ +#define MTD_API_ERR_DEV_ALREADY_EXIST (0x3e<<16) /*Device Driver already loaded. */ + + +#define MTD_CLEAR_PAUSE 0 /* clears both pause bits */ +#define MTD_SYM_PAUSE 1 /* for symmetric pause only */ +#define MTD_ASYM_PAUSE 2 /* for asymmetric pause only */ +#define MTD_SYM_ASYM_PAUSE 3 /* for both */ + + +/******************************************************************************* + mtdLoadDriver + + DESCRIPTION: + Marvell X32X0 Driver Initialization Routine. + This is the first routine that needs be called by system software. + It takes parameters from system software, and retures a pointer (*dev) + to a data structure which includes infomation related to this Marvell Phy + device. This pointer (*dev) is then used for all the API functions. + The following is the job performed by this routine: + 1. store MDIO read/write function into the given MTD_DEV structure + 2. run any device specific initialization routine + 3. create semaphore if required + 4. Initialize the deviceId + + + INPUTS: + readMdio - pointer to host's function to do MDIO read + writeMdio - point to host's function to do MDIO write + macsecIndirectAccess - MTD_TRUE to access MacSec through T-unit processor + MTD_FALSE to do direct register access + semCreate - pointer to host's function to create a semaphore, NULL + if not used + semDelete - pointer to host's function to create a semaphore, NULL + if not used + semTake - pointer to host's function to take a semaphore, NULL + if not used + semGive - pointer to host's function to give a semaphore, NULL + if not used + anyPort - port address of any port for this device + + OUTPUTS: + dev - pointer to holds device information to be used for each API call. + + RETURNS: + MTD_OK - on success + MTD_FAIL - on error + + COMMENTS: + mtdUnloadDriver is also provided to do driver cleanup. + + An MTD_DEV is required for each type of X32X0 device in the system. For + example, if there are 16 ports of X3240 and 4 ports of X3220, + two MTD_DEV are required, and one call to mtdLoadDriver() must + be made with one of the X3240 ports, and one with one of the X3220 + ports. +*******************************************************************************/ +MTD_STATUS mtdLoadDriver +( + IN FMTD_READ_MDIO readMdio, + IN FMTD_WRITE_MDIO writeMdio, + IN MTD_BOOL macsecIndirectAccess, + IN FMTD_SEM_CREATE semCreate, + IN FMTD_SEM_DELETE semDelete, + IN FMTD_SEM_TAKE semTake, + IN FMTD_SEM_GIVE semGive, + IN MTD_U16 anyPort, + OUT MTD_DEV * dev +); + +/****************************************************************************** +MTD_STATUS mtdHwXmdioWrite +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 dev, + IN MTD_U16 reg, + IN MTD_U16 value +); + + Inputs: + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + port - MDIO port address, 0-31 + dev - MMD device address, 0-31 + reg - MMD register address + value - data to write + + Outputs: + None + + Returns: + MTD_OK - wrote successfully + MTD_FAIL - an error occurred + + Description: + Writes a 16-bit word to the MDIO + Address is in format X.Y.Z, where X selects the MDIO port (0-31), Y selects + the MMD/Device (0-31), and Z selects the register. + + Side effects: + None + + Notes/Warnings: + None + +******************************************************************************/ +MTD_STATUS mtdHwXmdioWrite +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 dev, + IN MTD_U16 reg, + IN MTD_U16 value +); + +/****************************************************************************** + MTD_STATUS mtdHwXmdioRead + ( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 dev, + IN MTD_U16 reg, + OUT MTD_U16 *data + ); + + Inputs: + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + port - MDIO port address, 0-31 + dev - MMD device address, 0-31 + reg - MMD register address + + Outputs: + data - Returns 16 bit word from the MDIO + + Returns: + MTD_OK - read successful + MTD_FAIL - read was unsuccessful + + Description: + Reads a 16-bit word from the MDIO + Address is in format X.Y.Z, where X selects the MDIO port (0-31), Y selects the + MMD/Device (0-31), and Z selects the register. + + Side effects: + None + + Notes/Warnings: + None + +******************************************************************************/ +MTD_STATUS mtdHwXmdioRead +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 dev, + IN MTD_U16 reg, + OUT MTD_U16 *data +); + + +/******************************************************************************* + MTD_STATUS mtdHwGetPhyRegField + ( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 dev, + IN MTD_U16 regAddr, + IN MTD_U8 fieldOffset, + IN MTD_U8 fieldLength, + OUT MTD_U16 *data + ); + + Inputs: + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + port - The port number, 0-31 + dev - The MMD device, 0-31 + regAddr - The register's address + fieldOffset - The field start bit index. (0 - 15) + fieldLength - Number of bits to read + + Outputs: + data - The read register field + + Returns: + MTD_OK on success, or + MTD_FAIL - on error + + Description: + This function reads a specified field from a port's phy register. + It first reads the register, then returns the specified bit + field from what was read. + + Side effects: + None + + Notes/Warnings: + The sum of fieldOffset & fieldLength parameters must be smaller- + equal to 16 + + Reading a register with latched bits may clear the latched bits. + Use with caution for registers with latched bits. + + To operate on several bits within a register which has latched bits + before reading the register again, first read the register with + mtdHwXmdioRead() to get the register value, then operate on the + register data repeatedly using mtdHwGetRegFieldFromWord() to + take apart the bit fields without re-reading the register again. + + This approach should also be used to reduce IO to the PHY when reading + multiple bit fields (do a single read, then grab different fields + from the register by using mtdHwGetRegFieldFromWord() repeatedly). + +*******************************************************************************/ +MTD_STATUS mtdHwGetPhyRegField +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 dev, + IN MTD_U16 regAddr, + IN MTD_U8 fieldOffset, + IN MTD_U8 fieldLength, + OUT MTD_U16 *data +); + +/******************************************************************************* + MTD_STATUS mtdHwSetPhyRegField + ( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 dev, + IN MTD_U16 regAddr, + IN MTD_U8 fieldOffset, + IN MTD_U8 fieldLength, + IN MTD_U16 data + ); + + Inputs: + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + port - The port number, 0-31 + dev - The MMD device, 0-31 + regAddr - The register's address + fieldOffset - The field start bit index. (0 - 15) + fieldLength - Number of bits to write + data - Data to be written. + + Outputs: + None. + + Returns: + MTD_OK on success, or + MTD_FAIL - on error + + Description: + This function writes to specified field in a port's phy register. + + Side effects: + None + + Notes/Warnings: + The sum of fieldOffset & fieldLength parameters must be smaller- + equal to 16. + +*******************************************************************************/ +MTD_STATUS mtdHwSetPhyRegField +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 dev, + IN MTD_U16 regAddr, + IN MTD_U8 fieldOffset, + IN MTD_U8 fieldLength, + IN MTD_U16 data +); + +/******************************************************************************* + MTD_STATUS mtdHwGetRegFieldFromWord + ( + IN MTD_U16 regData, + IN MTD_U8 fieldOffset, + IN MTD_U8 fieldLength, + OUT MTD_U16 *data + ); + + Inputs: + regData - The data previously read from the register + fieldOffset - The field start bit index. (0 - 15) + fieldLength - Number of bits to read + + Outputs: + data - The data from the associated bit field + + Returns: + MTD_OK always + + Description: + This function grabs a value from a bitfield within a word. It could + be used to get the value of a bitfield within a word which was previously + read from the PHY. + + Side effects: + None + + Notes/Warnings: + The sum of fieldOffset & fieldLength parameters must be smaller- + equal to 16 + + This register acts on data passed in. It does no hardware access. + + This function is useful if you want to do 1 register access and then + get different bit fields without doing another register access either + because there are latched bits in the register to avoid another read, + or to keep hardware IO down to improve performance/throughput. + + Example: + + MTD_U16 aword, nibble1, nibble2; + + mtdHwXmdioRead(devPtr,0,MTD_TUNIT_IEEE_PCS_CTRL1,&aword); // Read 3.0 from port 0 + mtdHwGetRegFieldFromWord(aword,0,4,&nibble1); // grab first nibble + mtdHwGetRegFieldFromWord(aword,4,4,&nibble2); // grab second nibble + +*******************************************************************************/ +MTD_STATUS mtdHwGetRegFieldFromWord +( + IN MTD_U16 regData, + IN MTD_U8 fieldOffset, + IN MTD_U8 fieldLength, + OUT MTD_U16 *data +); + +/******************************************************************************* + MTD_STATUS mtdHwSetRegFieldToWord + ( + IN MTD_U16 regData, + IN MTD_U16 bitFieldData, + IN MTD_U8 fieldOffset, + IN MTD_U8 fieldLength, + OUT MTD_U16 *data + ); + + Inputs: + regData - original word to modify + bitFieldData - The data to set the register field to + (must be <= largest value for that bit field, + no range checking is done by this function) + fieldOffset - The field start bit index. (0 - 15) + fieldLength - Number of bits to write to regData + + Outputs: + This function grabs a value from a bitfield within a word. It could + be used to get the value of a bitfield within a word which was previously + read from the PHY. + + Side effects: + None + + Notes/Warnings: + The sum of fieldOffset & fieldLength parameters must be smaller- + equal to 16 + + This register acts on data passed in. It does no hardware access. + + This function is useful if you want to do 1 register access and then + get different bit fields without doing another register access either + because there are latched bits in the register to avoid another read, + or to keep hardware IO down to improve performance/throughput. + + Example: + + MTD_U16 aword, nibble1, nibble2; + + mtdHwXmdioRead(devPtr,0,MTD_TUNIT_IEEE_PCS_CTRL1,&aword); // Read 3.0 from port 0 + mtdHwGetRegFieldFromWord(aword,0,4,&nibble1); // grab first nibble + mtdHwGetRegFieldFromWord(aword,4,4,&nibble2); // grab second nibble + +*******************************************************************************/ +MTD_STATUS mtdHwGetRegFieldFromWord +( + IN MTD_U16 regData, + IN MTD_U8 fieldOffset, + IN MTD_U8 fieldLength, + OUT MTD_U16 *data +); + +/******************************************************************************* + MTD_STATUS mtdHwSetRegFieldToWord + ( + IN MTD_U16 regData, + IN MTD_U16 bitFieldData, + IN MTD_U8 fieldOffset, + IN MTD_U8 fieldLength, + OUT MTD_U16 *data + ); + + Inputs: + regData - original word to modify + bitFieldData - The data to set the register field to + (must be <= largest value for that bit field, + no range checking is done by this function) + fieldOffset - The field start bit index. (0 - 15) + fieldLength - Number of bits to write to regData + + Outputs: + data - The new/modified regData with the bitfield changed + + Returns: + MTD_OK always + + Description: + This function write a value to a bitfield within a word. + + Side effects: + None + + Notes/Warnings: + The sum of fieldOffset & fieldLength parameters must be smaller- + equal to 16 + + This register acts on data passed in. It does no hardware access. + + This function is useful to reduce IO if several bit fields of a register + that has been read is to be changed before writing it back. + + MTD_U16 aword; + + mtdHwXmdioRead(devPtr,0,MTD_TUNIT_IEEE_PCS_CTRL1,&aword); // Read 3.0 from port 0 + mtdHwSetRegFieldToWord(aword,2,0,4,&aword); // Change first nibble to 2 + mtdHwSetRegFieldToWord(aword,3,4,4,&aword); // Change second nibble to 3 + +*******************************************************************************/ +MTD_STATUS mtdHwSetRegFieldToWord +( + IN MTD_U16 regData, + IN MTD_U16 bitFieldData, + IN MTD_U8 fieldOffset, + IN MTD_U8 fieldLength, + OUT MTD_U16 *data +); + + +/****************************************************************************** +MTD_STATUS mtdWait +( + IN MTD_DEV_PTR devPtr, + IN unsigned x +); + + Inputs: + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + x - number of milliseconds to wait + + Outputs: + None + + Returns: + MTD_OK if wait was successful, MTD_FAIL otherwise + + Description: + Waits X milliseconds + + Side effects: + None + + Notes/Warnings: + None + +******************************************************************************/ +MTD_STATUS mtdWait +( + IN MTD_UINT x +); + +/****************************************************************************** +MTD_STATUS mtdSoftwareReset +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 timeoutMs +); + + Inputs: + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + port - MDIO port address, 0-31 + timeoutMs - 0 will not wait for reset to complete, otherwise + waits 'timeout' milliseconds for reset to complete + + Outputs: + None + + Returns: + MTD_OK or MTD_FAIL if IO error or timed out + + Description: + Issues a software reset (1.0.15 <= 1) command. Resets firmware and + hardware state machines and returns non-retain bits to their hardware + reset values and retain bits keep their values through the reset. + + If timeoutMs is 0, returns immediately. If timeoutMs is non-zero, + waits up to 'timeoutMs' milliseconds looking for the reset to complete + before returning. Returns MTD_FAIL if times out. + + Side effects: + All "retain" bits keep their values through this reset. Non-"retain"-type + bits are returned to their hardware reset values following this reset. + See the Datasheet for a list of retain bits. + + Notes/Warnings: + Use mtdIsPhyReadyAfterReset() to see if the software reset is complete + before issuing any other MDIO commands following this reset or pass + in non-zero timeoutMs to have this function do it for you. + + This is a T unit software reset only. It may only be issued if the T + unit is ready (1.0.15 is 0) and the T unit is not in low power mode. + +******************************************************************************/ +MTD_STATUS mtdSoftwareReset +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 timeoutMs +); + +MTD_STATUS mtdHardwareReset +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 timeoutMs +); + +/****************************************************************************** + MTD_STATUS mtdSetMacInterfaceControl + ( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 macType, + IN MTD_BOOL macIfPowerDown, + IN MTD_U16 macIfSnoopSel, + IN MTD_U16 macIfActiveLaneSelect, + IN MTD_U16 macLinkDownSpeed, + IN MTD_U16 macMaxIfSpeed, - 33X0/E20X0 devices only - + IN MTD_BOOL doSwReset, + IN MTD_BOOL rerunSerdesInitialization + ); + + Inputs: + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + port - port number, 0-31 + macType - the type of MAC interface being used (the hardware interface). One of the following: + MTD_MAC_TYPE_RXAUI_SGMII_AN_EN - selects RXAUI with SGMII AN enabled + MTD_MAC_TYPE_RXAUI_SGMII_AN_DIS - selects RXAUI with SGMII AN disabled (not valid on X3310) + MTD_MAC_TYPE_XAUI_RATE_ADAPT - selects XAUI with rate matching (only valid on X3310) + MTD_MAC_TYPE_RXAUI_RATE_ADAPT - selects RXAUI with rate matching + MTD_MAC_TYPE_XAUI - selects XAUI (only valid on X3310) + MTD_MAC_TYPE_XFI_SGMII_AN_EN - selects XFI with SGMII AN enabled + MTD_MAC_TYPE_XFI_SGMII_AN_DIS - selects XFI with SGMII AN disabled + MTD_MAC_TYPE_XFI_RATE_ADAPT - selects XFI with rate matching + MTD_MAC_TYPE_USXGMII - selects USXGMII + MTD_MAC_LEAVE_UNCHANGED - option to leave this parameter unchanged/as it is + macIfPowerDown - MTD_TRUE if the host interface is always to be powered up + MTD_FALSE if the host interface can be powered down under + certain circumstances (see datasheet) + macIfSnoopSel - If snooping is requested on the other lane, selects the source + MTD_MAC_SNOOP_FROM_NETWORK - source of snooped data is to come from the network + MTD_MAC_SNOOP_FROM_HOST - source of snooped data is to come from the host + MTD_MAC_SNOOP_OFF - snooping is to be turned off + MTD_MAC_SNOOP_LEAVE_UNCHANGED - option to leave this parameter unchanged/as it is + macIfActiveLaneSelect - For redundant host mode, this selects the active lane. 0 or 1 + only. 0 selects 0 as the active lane and 1 as the standby. 1 selects the other way. + macLinkDownSpeed - The speed the mac interface should run when the media side is + link down. One of the following: + MTD_MAC_SPEED_10_MBPS + MTD_MAC_SPEED_100_MBPS + MTD_MAC_SPEED_1000_MBPS + MTD_MAC_SPEED_10_GBPS + MTD_MAC_SPEED_LEAVE_UNCHANGED + macMaxIfSpeed - For X33X0/E20X0 devices only. Can be used to limit the Mac interface speed + MTD_MAX_MAC_SPEED_10G + MTD_MAX_MAC_SPEED_5G + MTD_MAX_MAC_SPEED_2P5G + MTD_MAX_MAC_SPEED_LEAVE_UNCHANGED + MTD_MAX_MAC_SPEED_NOT_APPLICABLE (for 32X0 devices pass this) + doSwReset - MTD_TRUE if a software reset (31.F001.15) should be done after these changes + have been made, or MTD_FALSE otherwise. See note below. + rerunSerdesInitialization - MTD_TRUE if any parameter that is likely to change the speed + of the serdes interface was performed like macLinkDownSpeed or macType will attempt + to reset the H unit serdes (this needs to be done AFTER the soft reset, so if doSwReset + is passed as MTD_FALSE, host must later call + mtdRerunSerdesAutoInitializationUseAutoMode() eventually to re-init the serdes). + + + Outputs: + None + + Returns: + MTD_OK or MTD_FAIL if a bad parameter was passed, or an IO error occurs. + + Description: + Changes the above parameters as indicated in 31.F000 and 31.F001 and + optionally does a software reset afterwards for those bits which require a + software reset to take effect. + + Side effects: + None + + Notes/Warnings: + These bits are actually in the C unit, but pertain to the host interface + control so the API called was placed here. + + Changes to the MAC type (31.F001.2:0) do not take effect until a software + reset is performed on the port. + + Changes to macLinkDownSpeed (31.F001.7:6) require 2 software resets to + take effect. This function will do 2 resets if doSwReset is MTD_TRUE + and macLinkDownSpeed is being changed. + + IMPORTANT: the readback reads back the last written value following + a software reset. Writes followed by reads without an intervening + software reset will read back the old bit value for all those bits + requiring a software. + + Because of this, read-modify-writes to different bitfields must have an + intervening software reset to pick up the latest value before doing + another read-modify-write to the register, otherwise the bitfield + may lose the value. + + Suggest always setting doSwReset to MTD_TRUE to avoid problems of + possibly losing changes. + +******************************************************************************/ +MTD_STATUS mtdSetMacInterfaceControl +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 macType, + IN MTD_BOOL macIfPowerDown, + IN MTD_U16 macIfSnoopSel, + IN MTD_U16 macIfActiveLaneSelect, + IN MTD_U16 macLinkDownSpeed, + IN MTD_U16 macMaxIfSpeed, /* 33X0/E20X0 devices only */ + IN MTD_BOOL doSwReset, + IN MTD_BOOL rerunSerdesInitialization +); + +/****************************************************************************** + MTD_STATUS mtdEnableSpeeds + ( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 speed_bits, + IN MTD_BOOL anRestart + ); + + Inputs: 2 + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + port - MDIO port address, 0-31 + speed_bits - speeds to be advertised during auto-negotiation. One or more + of the following (bits logically OR together): + MTD_ADV_NONE (no bits set) + MTD_SPEED_10M_HD + MTD_SPEED_10M_FD + MTD_SPEED_100M_HD + MTD_SPEED_100M_FD + MTD_SPEED_1GIG_HD + MTD_SPEED_1GIG_FD + MTD_SPEED_10GIG_FD + MTD_SPEED_2P5GIG_FD (88X33X0/88E20X0 family only) + MTD_SPEED_5GIG_FD (88X33X0/88E20X0 family only) + MTD_SPEED_ALL + MTD_SPEED_ALL_33X0 (88X33X0/88E20X0 family only) + + anRestart - this takes the value of MTD_TRUE or MTD_FALSE and indicates + if auto-negotiation should be restarted following the speed + enable change. If this is MTD_FALSE, the change will not + take effect until AN is restarted in some other way (link + drop, toggle low power, toggle AN enable, toggle soft reset). + + If this is MTD_TRUE and AN has been disabled, it will be + enabled before being restarted. + + Outputs: + None + + Returns: + MTD_OK if action was successfully taken, MTD_FAIL if not. Also returns + MTD_FAIL if try to force the speed or try to advertise a speed not supported + on this PHY. + + Description: + This function allows the user to select the speeds to be advertised to the + link partner during auto-negotiation. + + First, this function enables auto-negotiation and XNPs by calling + mtdUndoForcedSpeed(). + + The function takes in a 16 bit value and sets the appropriate bits in MMD + 7 to have those speeds advertised. + + The function also checks if the input parameter is MTD_ADV_NONE, in which case + all speeds are disabled effectively disabling the phy from training + (but not disabling auto-negotiation). + + If anRestart is MTD_TRUE, an auto-negotiation restart is issued making the change + immediate. If anRestart is MTD_FALSE, the change will not take effect until the + next time auto-negotiation restarts. + + Side effects: + Setting speed in 1.0 to 10GBASE-T has the effect of enabling XNPs in 7.0 and + enabling auto-negotiation in 7.0. + + Notes/Warnings: + + Example: + To train the highest speed matching the far end among + either 1000BASE-T Full-duplex or 10GBASE-T: + mtdEnableSpeeds(devPtr,port,MTD_SPEED_1GIG_FD | MTD_SPEED_10GIG_FD, MTD_TRUE); + + To allow only 10GBASE-T to train: + mtdEnableSpeeds(devPtr,port,MTD_SPEED_10GIG_FD, MTD_TRUE); + + To disable all speeds (but AN will still be running, just advertising no + speeds) + mtdEnableSpeeds(devPtr,port,MTD_ADV_NONE, MTD_TRUE); + + This function is not to be used to disable autonegotiation and force the speed + to 10BASE-T or 100BASE-TX. Use mtdForceSpeed() for this. + + 88X33X0 Z1/Z2 and E20X0 Z2 are not supported starting with API version 1.2. + Version 1.2 and later require A0 revision of these devices. + +******************************************************************************/ +MTD_STATUS mtdEnableSpeeds +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U16 speed_bits, + IN MTD_BOOL anRestart +); + +MTD_STATUS mtdGetAutonegSpeedDuplexResolution +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_U16 *speedResolution +); + +MTD_STATUS mtdAutonegIsSpeedDuplexResolutionDone +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_BOOL *anSpeedResolutionDone +); + +/****************************************************************************/ +/******************************************************************* + Firmware Version + *******************************************************************/ +/****************************************************************************/ + +/****************************************************************************** +MTD_STATUS mtdGetFirmwareVersion +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_U8 *major, + OUT MTD_U8 *minor, + OUT MTD_U8 *inc, + OUT MTD_U8 *test +); + + Inputs: + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + port - MDIO port address, 0-31 + + Outputs: + major - major version, X.Y.Z.W, the X + minor - minor version, X.Y.Z.W, the Y + inc - incremental version, X.Y.Z.W, the Z + test - test version, X.Y.Z.W, the W, should be 0 for released code, + non-zero indicates this is a non-released code + + Returns: + MTD_FAIL if version can't be queried or firmware is in download mode + (meaning all version numbers are 0), MTD_OK otherwise + + Description: + This function reads the firmware version number and stores it in the + pointers passed in by the user. + + Side effects: + None + + Notes/Warnings: + This function returns all 0's if the phy is in download mode. The phy + application code must have started and be ready before issuing this + command. + +******************************************************************************/ +MTD_STATUS mtdGetFirmwareVersion +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_U8 *major, + OUT MTD_U8 *minor, + OUT MTD_U8 *inc, + OUT MTD_U8 *test +); + +/****************************************************************************** +MTD_STATUS mtdSetPauseAdvertisement +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U8 pauseType, + IN MTD_BOOL anRestart +); + + Inputs: + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + port - MDIO port address, 0-31 + pauseType - one of the following: + MTD_SYM_PAUSE, + MTD_ASYM_PAUSE, + MTD_SYM_ASYM_PAUSE or + MTD_CLEAR_PAUSE. + anRestart - this takes the value of MTD_TRUE or MTD_FALSE and indicates + if auto-negotiation should be restarted following the speed + enable change. If this is MTD_FALSE, the change will not + take effect until AN is restarted in some other way (link + drop, toggle low power, toggle AN enable, toggle soft reset). + + If this is MTD_TRUE and AN has been disabled, it will be + enabled before being restarted. + + Outputs: + None + + Returns: + MTD_OK or MTD_FAIL, if action was successful or failed + + Description: + This function sets the asymmetric and symmetric pause bits in the technology + ability field in the AN Advertisement register and optionally restarts + auto-negotiation to use the new values. This selects what type of pause + is to be advertised to the far end MAC during auto-negotiation. If + auto-negotiation is restarted, it is enabled first. + + Sets entire 2-bit field to the value passed in pauseType. + + To clear both bits, pass in MTD_CLEAR_PAUSE. + + Side effects: + None + + Notes/Warnings: + This function will not take effect unless the auto-negotiation is restarted. + +******************************************************************************/ +MTD_STATUS mtdSetPauseAdvertisement +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_U32 pauseType, + IN MTD_BOOL anRestart +); + + +/****************************************************************************** +MTD_STATUS mtdGetLPAdvertisedPause +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_U8 *pauseBits +); + + Inputs: + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + port - MDIO port address, 0-31 + + Outputs: + pauseBits - setting of link partner's pause bits based on bit definitions above in + mtdmtdSetPauseAdvertisement() + + Returns: + MTD_OK or MTD_FAIL, based on whether the query succeeded or failed. Returns + MTD_FAIL and MTD_CLEAR_PAUSE if AN is not complete. + + Description: + This function reads 7.19 (LP Base page ability) and returns the advertised + pause setting that was received from the link partner. + + Side effects: + None + + Notes/Warnings: + The user must make sure auto-negotiation has completed by calling + mtdAutonegIsCompleted() prior to calling this function. + +******************************************************************************/ +MTD_STATUS mtdGetLPAdvertisedPause +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_U8 *pauseBits +); + + + +/****************************************************************************** +MTD_STATUS mtdGetPhyRevision +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_DEVICE_ID *phyRev, + OUT MTD_U8 *numPorts, + OUT MTD_U8 *thisPort +); + + Inputs: + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + port - MDIO port address, 0-31 + + Outputs: + phyRev - revision of this chip, see MTD_DEVICE_ID definition for + a list of chip revisions with different options + numPorts - number of ports on this chip (see note below) + thisPort - this port number 0-1, or 0-4 + + Returns: + MTD_OK if query was successful, MTD_FAIL if not. + + Will return MTD_FAIL on an unsupported PHY (but will attempt to + return correct version). See below for a list of unsupported PHYs. + + Description: + Determines the PHY revision and returns the value in phyRev. + See definition of MTD_DEVICE_ID for a list of available + devices and capabilities. + + Side effects: + None. + + Notes/Warnings: + The phyRev can be used to determine number PHY revision, + number of ports, which port this is from PHY perspective + (0-based indexing 0...3 or 0..2) and what capabilities + the PHY has. + + If phyRev is MTD_REV_UNKNOWN, numPorts and thisPort will be returned + as 0 and the function will return MTD_FAIL. + + If T-unit is in download mode, thisPort will be returned as 0. + + 88X33X0 Z1/Z2 is not supported starting with version 1.2 of API. + E20X0 Z2 is not supported starting with version 1.2 of API. + +******************************************************************************/ +MTD_STATUS mtdGetPhyRevision +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_DEVICE_ID *phyRev, + OUT MTD_U8 *numPorts, + OUT MTD_U8 *thisPort +); + + + +/***************************************************************************** +MTD_STATUS mtdGetForcedSpeed +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_BOOL *speedIsForced, + OUT MTD_U16 *forcedSpeed +); + + Inputs: + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + port - MDIO port address, 0-31 + + Outputs: + speedIsForced - MTD_TRUE if an is disabled (1.0.12 == 0) AND + the speed in 1.0.13/6 is set to 10BT or 100BT (speeds which do + not require an to train). + forcedSpeed - one of the following if speedIsForced is MTD_TRUE + MTD_SPEED_10M_HD_AN_DIS - speed forced to 10BT half-duplex + MTD_SPEED_10M_FD_AN_DIS - speed forced to 10BT full-duplex + MTD_SPEED_100M_HD_AN_DIS - speed forced to 100BT half-duplex + MTD_SPEED_100M_FD_AN_DIS - speed forced to 100BT full-duplex + + Returns: + MTD_OK if the query was successful, or MTD_FAIL if not + + Description: + Checks if AN is disabled (7.0.12=0) and if the speed select in + register 1.0.13 and 1.0.6 is set to either 10BT or 100BT speeds. If + all of this is true, returns MTD_TRUE in speedIsForced along with + the speed/duplex setting in forcedSpeedBits. If any of this is + false (AN is enabled, or the speed is set to 1000BT or 10GBT), then + speedIsForced is returned MTD_FALSE and the forcedSpeedBit value + is invalid. + + Notes/Warnings: + None. + +******************************************************************************/ +MTD_STATUS mtdGetForcedSpeed +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + OUT MTD_BOOL *speedIsForced, + OUT MTD_U16 *forcedSpeed +); + + +/***************************************************************************** +MTD_STATUS mtdUndoForcedSpeed +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_BOOL anRestart +); + + Inputs: + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + port - MDIO port address, 0-31 + anRestart - this takes the value of MTD_TRUE or MTD_FALSE and indicates + if auto-negotiation should be restarted following the speed + enable change. If this is MTD_FALSE, the change will not + take effect until AN is restarted in some other way (link + drop, toggle low power, toggle AN enable, toggle soft reset). + + If this is MTD_TRUE and AN has been disabled, it will be + enabled before being restarted. + + Outputs: + None + + Returns: + MTD_OK if the change was successful, or MTD_FAIL if not + + Description: + Sets the speed bits in 1.0 back to the power-on default of 11b + (10GBASE-T). Enables auto-negotiation. + + Does a software reset of the T unit and wait until it is complete before + enabling AN and returning. + + Notes/Warnings: + None. + +******************************************************************************/ +MTD_STATUS mtdUndoForcedSpeed +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port, + IN MTD_BOOL anRestart +); + + +/****************************************************************************** + MTD_STATUS mtdAutonegEnable + ( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port + ); + + Inputs: + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + port - MDIO port address, 0-31 + + Outputs: + None + + Returns: + MTD_OK or MTD_FAIL, if action was successful or not + + Description: + Re-enables auto-negotiation. + + Side effects: + + Notes/Warnings: + Restart autonegation will not take effect if AN is disabled. + +******************************************************************************/ +MTD_STATUS mtdAutonegEnable +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port +); + + + +/****************************************************************************** + MTD_STATUS mtdAutonegRestart + ( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port + ); + + Inputs: + devPtr - pointer to MTD_DEV initialized by mtdLoadDriver() call + port - MDIO port address, 0-31 + + Outputs: + None + + Returns: + MTD_OK or MTD_FAIL, depending on if action was successful + + Description: + Restarts auto-negotiation. The bit is self-clearing. If the link is up, + the link will drop and auto-negotiation will start again. + + Side effects: + None. + + Notes/Warnings: + Restarting auto-negotiation will have no effect if auto-negotiation is + disabled. + + This function is important as it is necessary to restart auto-negotiation + after changing many auto-negotiation settings before the changes will take + effect. + +******************************************************************************/ +MTD_STATUS mtdAutonegRestart +( + IN MTD_DEV_PTR devPtr, + IN MTD_U16 port +); + + + +/****************************************************************************** +MTD_STATUS mtdIsPhyRevisionValid +( + IN MTD_DEVICE_ID phyRev +); + + + Inputs: + phyRev - a revision id to be checked against MTD_DEVICE_ID type + + Outputs: + None + + Returns: + MTD_OK if phyRev is a valid revision, MTD_FAIL otherwise + + Description: + Takes phyRev and returns MTD_OK if it is one of the MTD_DEVICE_ID + type, otherwise returns MTD_FAIL. + + Side effects: + None. + + Notes/Warnings: + None + +******************************************************************************/ +MTD_STATUS mtdIsPhyRevisionValid +( + IN MTD_DEVICE_ID phyRev +); + +#if C_LINKAGE +#if defined __cplusplus +} +#endif +#endif + +#endif /* _TXGBE_MTD_H_ */ diff --git a/drivers/net/ethernet/netswift/txgbe/txgbe_param.c b/drivers/net/ethernet/netswift/txgbe/txgbe_param.c new file mode 100644 index 0000000000000000000000000000000000000000..214993fb1a9b91852812ecf69c67f2f7f06524ae --- /dev/null +++ b/drivers/net/ethernet/netswift/txgbe/txgbe_param.c @@ -0,0 +1,1191 @@ +/* + * WangXun 10 Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * based on ixgbe_param.c, Copyright(c) 1999 - 2017 Intel Corporation. + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + + +#include +#include + +#include "txgbe.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ +#define TXGBE_MAX_NIC 32 +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +#define STRINGIFY(foo) #foo /* magic for getting defines into strings */ +#define XSTRINGIFY(bar) STRINGIFY(bar) + +#define TXGBE_PARAM_INIT { [0 ... TXGBE_MAX_NIC] = OPTION_UNSET } + +#define TXGBE_PARAM(X, desc) \ + static int X[TXGBE_MAX_NIC+1] = TXGBE_PARAM_INIT; \ + static unsigned int num_##X; \ + module_param_array(X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); + +/* ffe_main (KR/KX4/KX/SFI) + * + * Valid Range: 0-60 + * + * Default Value: 27 + */ +TXGBE_PARAM(FFE_MAIN, + "TX_EQ MAIN (0 - 40)"); +#define TXGBE_DEFAULT_FFE_MAIN 27 + +/* ffe_pre + * + * Valid Range: 0-60 + * + * Default Value: 8 + */ + +TXGBE_PARAM(FFE_PRE, + "TX_EQ PRE (0 - 40)"); +#define TXGBE_DEFAULT_FFE_PRE 8 + +/* ffe_post (VF Alloc Mode) + * + * Valid Range: 0-60 + * + * Default Value: 44 + */ + +TXGBE_PARAM(FFE_POST, + "TX_EQ POST (0 - 40)"); +#define TXGBE_DEFAULT_FFE_POST 44 + +/* ffe_set + * + * Valid Range: 0-4 + * + * Default Value: 0 + */ + +TXGBE_PARAM(FFE_SET, + "TX_EQ SET must choose to take effect (0 = NULL, 1 = sfi, 2 = kr, 3 = kx4, 4 = kx)"); +#define TXGBE_DEFAULT_FFE_SET 0 + +/* backplane_mode + * + * Valid Range: 0-4 + * - 0 - NULL + * - 1 - sfi + * - 2 - kr + * - 3 - kx4 + * - 4 - kx + * + * Default Value: 0 + */ + +TXGBE_PARAM(backplane_mode, + "Backplane Mode Support(0 = NULL, 1 = sfi, 2 = kr, 3 = kx4, 4 = kx)"); + +#define TXGBE_BP_NULL 0 +#define TXGBE_BP_SFI 1 +#define TXGBE_BP_KR 2 +#define TXGBE_BP_KX4 3 +#define TXGBE_BP_KX 4 +#define TXGBE_DEFAULT_BP_MODE TXGBE_BP_NULL + +/* backplane_auto + * + * Valid Range: 0-1 + * - 0 - NO AUTO + * - 1 - AUTO + * Default Value: 0 + */ + +TXGBE_PARAM(backplane_auto, + "Backplane AUTO mode (0 = NO AUTO, 1 = AUTO)"); + +#define TXGBE_BP_NAUTO 0 +#define TXGBE_BP_AUTO 1 +#define TXGBE_DEFAULT_BP_AUTO -1 + +/* VF_alloc_mode (VF Alloc Mode) + * + * Valid Range: 0-1 + * - 0 - 2 * 64 + * - 1 - 4 * 32 + * - 2 - 8 * 16 + * + * Default Value: 2 + */ + +TXGBE_PARAM(vf_alloc_mode, + "Change VF Alloc Mode (0 = 2*64, 1 = 4*32, 2 = 8*16)"); + +#define TXGBE_2Q 0 +#define TXGBE_4Q 1 +#define TXGBE_8Q 2 +#define TXGBE_DEFAULT_NUMQ TXGBE_2Q + +/* IntMode (Interrupt Mode) + * + * Valid Range: 0-2 + * - 0 - Legacy Interrupt + * - 1 - MSI Interrupt + * - 2 - MSI-X Interrupt(s) + * + * Default Value: 2 + */ + +TXGBE_PARAM(InterruptType, + "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), " + "default IntMode (deprecated)"); + +TXGBE_PARAM(IntMode, + "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), " + "default 2"); + +#define TXGBE_INT_LEGACY 0 +#define TXGBE_INT_MSI 1 +#define TXGBE_INT_MSIX 2 +#define TXGBE_DEFAULT_INT TXGBE_INT_MSIX + +/* MQ - Multiple Queue enable/disable + * + * Valid Range: 0, 1 + * - 0 - disables MQ + * - 1 - enables MQ + * + * Default Value: 1 + */ + +TXGBE_PARAM(MQ, + "Disable or enable Multiple Queues, default 1"); + +/* RSS - Receive-Side Scaling (RSS) Descriptor Queues + * + * Valid Range: 0-64 + * - 0 - enables RSS and sets the Desc. Q's to min(64, num_online_cpus()). + * - 1-64 - enables RSS and sets the Desc. Q's to the specified value. + * + * Default Value: 0 + */ + +TXGBE_PARAM(RSS, + "Number of Receive-Side Scaling Descriptor Queues, " + "default 0=number of cpus"); + +/* VMDQ - Virtual Machine Device Queues (VMDQ) + * + * Valid Range: 1-16 + * - 1 Disables VMDQ by allocating only a single queue. + * - 2-16 - enables VMDQ and sets the Desc. Q's to the specified value. + * + * Default Value: 1 + */ + +#define TXGBE_DEFAULT_NUM_VMDQ 8 + +TXGBE_PARAM(VMDQ, + "Number of Virtual Machine Device Queues: 0/1 = disable, " + "2-16 enable (default=" XSTRINGIFY(TXGBE_DEFAULT_NUM_VMDQ) ")"); + +/* Interrupt Throttle Rate (interrupts/sec) + * + * Valid Range: 980-500000 (0=off, 1=dynamic) + * + * Default Value: 1 + */ +#define DEFAULT_ITR 1 +TXGBE_PARAM(InterruptThrottleRate, + "Maximum interrupts per second, per vector, " + "(0,1,980-500000), default 1"); + +#define MAX_ITR TXGBE_MAX_INT_RATE +#define MIN_ITR TXGBE_MIN_INT_RATE + +/* LLIPort (Low Latency Interrupt TCP Port) + * + * Valid Range: 0 - 65535 + * + * Default Value: 0 (disabled) + */ +TXGBE_PARAM(LLIPort, + "Low Latency Interrupt TCP Port (0-65535)"); + +#define DEFAULT_LLIPORT 0 +#define MAX_LLIPORT 0xFFFF +#define MIN_LLIPORT 0 + +/* LLISize (Low Latency Interrupt on Packet Size) + * + * Valid Range: 0 - 1500 + * + * Default Value: 0 (disabled) + */ + +TXGBE_PARAM(LLISize, + "Low Latency Interrupt on Packet Size (0-1500)"); + +#define DEFAULT_LLISIZE 0 +#define MAX_LLISIZE 1500 +#define MIN_LLISIZE 0 + +/* LLIEType (Low Latency Interrupt Ethernet Type) + * + * Valid Range: 0 - 0x8fff + * + * Default Value: 0 (disabled) + */ + +TXGBE_PARAM(LLIEType, + "Low Latency Interrupt Ethernet Protocol Type"); + +#define DEFAULT_LLIETYPE 0 +#define MAX_LLIETYPE 0x8fff +#define MIN_LLIETYPE 0 + +/* LLIVLANP (Low Latency Interrupt on VLAN priority threshold) + * + * Valid Range: 0 - 7 + * + * Default Value: 0 (disabled) + */ + +TXGBE_PARAM(LLIVLANP, + "Low Latency Interrupt on VLAN priority threshold"); + +#define DEFAULT_LLIVLANP 0 +#define MAX_LLIVLANP 7 +#define MIN_LLIVLANP 0 + +/* Flow Director packet buffer allocation level + * + * Valid Range: 1-3 + * 1 = 8k hash/2k perfect, + * 2 = 16k hash/4k perfect, + * 3 = 32k hash/8k perfect + * + * Default Value: 0 + */ + +TXGBE_PARAM(FdirPballoc, + "Flow Director packet buffer allocation level:\n" + "\t\t\t1 = 8k hash filters or 2k perfect filters\n" + "\t\t\t2 = 16k hash filters or 4k perfect filters\n" + "\t\t\t3 = 32k hash filters or 8k perfect filters"); + +#define TXGBE_DEFAULT_FDIR_PBALLOC TXGBE_FDIR_PBALLOC_64K + +/* Software ATR packet sample rate + * + * Valid Range: 0-255 0 = off, 1-255 = rate of Tx packet inspection + * + * Default Value: 20 + */ + +TXGBE_PARAM(AtrSampleRate, + "Software ATR Tx packet sample rate"); + +#define TXGBE_MAX_ATR_SAMPLE_RATE 255 +#define TXGBE_MIN_ATR_SAMPLE_RATE 1 +#define TXGBE_ATR_SAMPLE_RATE_OFF 0 +#define TXGBE_DEFAULT_ATR_SAMPLE_RATE 20 + +/* Enable/disable Large Receive Offload + * + * Valid Values: 0(off), 1(on) + * + * Default Value: 1 + */ + +TXGBE_PARAM(LRO, + "Large Receive Offload (0,1), default 1 = on"); + +/* Enable/disable support for untested SFP+ modules on adapters + * + * Valid Values: 0(Disable), 1(Enable) + * + * Default Value: 0 + */ + +TXGBE_PARAM(allow_unsupported_sfp, + "Allow unsupported and untested " + "SFP+ modules on adapters, default 0 = Disable"); + +/* Enable/disable support for DMA coalescing + * + * Valid Values: 0(off), 41 - 10000(on) + * + * Default Value: 0 + */ + +TXGBE_PARAM(dmac_watchdog, + "DMA coalescing watchdog in microseconds (0,41-10000)," + "default 0 = off"); + +/* Enable/disable support for VXLAN rx checksum offload + * + * Valid Values: 0(Disable), 1(Enable) + * + * Default Value: 1 on hardware that supports it + */ + +TXGBE_PARAM(vxlan_rx, + "VXLAN receive checksum offload (0,1), default 1 = Enable"); + +/* Rx buffer mode + * + * Valid Range: 0-1 0 = no header split, 1 = hdr split + * + * Default Value: 0 + */ + +TXGBE_PARAM(RxBufferMode, + "0=(default)no header split\n" + "\t\t\t1=hdr split for recognized packet\n"); + +#define TXGBE_RXBUFMODE_NO_HEADER_SPLIT 0 +#define TXGBE_RXBUFMODE_HEADER_SPLIT 1 +#define TXGBE_DEFAULT_RXBUFMODE TXGBE_RXBUFMODE_NO_HEADER_SPLIT + +/* Cloud Switch mode + * + * Valid Range: 0-1 0 = disable Cloud Switch, 1 = enable Cloud Switch + * + * Default Value: 0 + */ + +TXGBE_PARAM(CloudSwitch, + "Cloud Switch (0,1), default 0 = disable, 1 = enable"); + +struct txgbe_option { + enum { enable_option, range_option, list_option } type; + const char *name; + const char *err; + const char *msg; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + const struct txgbe_opt_list { + int i; + char *str; + } *p; + } l; + } arg; +}; + +static int txgbe_validate_option(u32 *value, + struct txgbe_option *opt) +{ + int val = (int)*value; + + if (val == OPTION_UNSET) { + txgbe_info("txgbe: Invalid %s specified (%d), %s\n", + opt->name, val, opt->err); + *value = (u32)opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (val) { + case OPTION_ENABLED: + txgbe_info("txgbe: %s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + txgbe_info("txgbe: %s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if ((val >= opt->arg.r.min && val <= opt->arg.r.max) || + val == opt->def) { + if (opt->msg) + txgbe_info("txgbe: %s set to %d, %s\n", + opt->name, val, opt->msg); + else + txgbe_info("txgbe: %s set to %d\n", + opt->name, val); + return 0; + } + break; + case list_option: { + int i; + const struct txgbe_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (val == ent->i) { + if (ent->str[0] != '\0') + txgbe_info("%s\n", ent->str); + return 0; + } + } + } + break; + default: + BUG_ON(1); + } + + txgbe_info("txgbe: Invalid %s specified (%d), %s\n", + opt->name, val, opt->err); + *value = (u32)opt->def; + return -1; +} + +/** + * txgbe_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ +void txgbe_check_options(struct txgbe_adapter *adapter) +{ + u32 bd = adapter->bd_number; + u32 *aflags = &adapter->flags; + struct txgbe_ring_feature *feature = adapter->ring_feature; + u32 vmdq; + + if (bd >= TXGBE_MAX_NIC) { + txgbe_notice( + "Warning: no configuration for board #%d\n", bd); + txgbe_notice("Using defaults for all values\n"); + } + { /* MAIN */ + u32 ffe_main; + static struct txgbe_option opt = { + .type = range_option, + .name = "FFE_MAIN", + .err = + "using default of "__MODULE_STRING(TXGBE_DEFAULT_FFE_MAIN), + .def = TXGBE_DEFAULT_FFE_MAIN, + .arg = { .r = { .min = 0, + .max = 60} } + }; + + if (num_FFE_MAIN > bd) { + ffe_main = FFE_MAIN[bd]; + if (ffe_main == OPTION_UNSET) + ffe_main = FFE_MAIN[bd]; + txgbe_validate_option(&ffe_main, &opt); + adapter->ffe_main = ffe_main; + } else { + adapter->ffe_main = 27; + } + } + + { /* PRE */ + u32 ffe_pre; + static struct txgbe_option opt = { + .type = range_option, + .name = "FFE_PRE", + .err = + "using default of "__MODULE_STRING(TXGBE_DEFAULT_FFE_PRE), + .def = TXGBE_DEFAULT_FFE_PRE, + .arg = { .r = { .min = 0, + .max = 60} } + }; + + if (num_FFE_PRE > bd) { + ffe_pre = FFE_PRE[bd]; + if (ffe_pre == OPTION_UNSET) + ffe_pre = FFE_PRE[bd]; + txgbe_validate_option(&ffe_pre, &opt); + adapter->ffe_pre = ffe_pre; + } else { + adapter->ffe_pre = 8; + } + } + + { /* POST */ + u32 ffe_post; + static struct txgbe_option opt = { + .type = range_option, + .name = "FFE_POST", + .err = + "using default of "__MODULE_STRING(TXGBE_DEFAULT_FFE_POST), + .def = TXGBE_DEFAULT_FFE_POST, + .arg = { .r = { .min = 0, + .max = 60} } + }; + + if (num_FFE_POST > bd) { + ffe_post = FFE_POST[bd]; + if (ffe_post == OPTION_UNSET) + ffe_post = FFE_POST[bd]; + txgbe_validate_option(&ffe_post, &opt); + adapter->ffe_post = ffe_post; + } else { + adapter->ffe_post = 44; + } + } + + { /* ffe_set */ + u32 ffe_set; + static struct txgbe_option opt = { + .type = range_option, + .name = "FFE_SET", + .err = + "using default of "__MODULE_STRING(TXGBE_DEFAULT_FFE_SET), + .def = TXGBE_DEFAULT_FFE_SET, + .arg = { .r = { .min = 0, + .max = 4} } + }; + + if (num_FFE_SET > bd) { + ffe_set = FFE_SET[bd]; + if (ffe_set == OPTION_UNSET) + ffe_set = FFE_SET[bd]; + txgbe_validate_option(&ffe_set, &opt); + adapter->ffe_set = ffe_set; + } else { + adapter->ffe_set = 0; + } + } + + { /* backplane_mode */ + u32 bp_mode; + static struct txgbe_option opt = { + .type = range_option, + .name = "backplane_mode", + .err = + "using default of "__MODULE_STRING(TXGBE_DEFAULT_BP_MODE), + .def = TXGBE_DEFAULT_BP_MODE, + .arg = { .r = { .min = 0, + .max = 4} } + }; + + if (num_backplane_mode > bd) { + bp_mode = backplane_mode[bd]; + if (bp_mode == OPTION_UNSET) + bp_mode = backplane_mode[bd]; + txgbe_validate_option(&bp_mode, &opt); + adapter->backplane_mode = bp_mode; + } else { + adapter->backplane_mode = 0; + } + } + + { /* auto mode */ + u32 bp_auto; + static struct txgbe_option opt = { + .type = range_option, + .name = "bp_auto", + .err = + "using default of "__MODULE_STRING(TXGBE_DEFAULT_BP_AUTO), + .def = TXGBE_DEFAULT_BP_AUTO, + .arg = { .r = { .min = 0, + .max = 2} } + }; + + if (num_backplane_auto > bd) { + bp_auto = backplane_auto[bd]; + if (bp_auto == OPTION_UNSET) + bp_auto = backplane_auto[bd]; + txgbe_validate_option(&bp_auto, &opt); + adapter->backplane_auto = bp_auto; + } else { + adapter->backplane_auto = -1; + } + } + + { /* VF_alloc_mode */ + u32 vf_mode; + static struct txgbe_option opt = { + .type = range_option, + .name = "vf_alloc_mode", + .err = + "using default of "__MODULE_STRING(TXGBE_DEFAULT_NUMQ), + .def = TXGBE_DEFAULT_NUMQ, + .arg = { .r = { .min = TXGBE_2Q, + .max = TXGBE_8Q} } + }; + + if (num_vf_alloc_mode > bd) { + vf_mode = vf_alloc_mode[bd]; + if (vf_mode == OPTION_UNSET) + vf_mode = vf_alloc_mode[bd]; + txgbe_validate_option(&vf_mode, &opt); + switch (vf_mode) { + case TXGBE_8Q: + adapter->vf_mode = 15; + break; + case TXGBE_4Q: + adapter->vf_mode = 31; + break; + case TXGBE_2Q: + default: + adapter->vf_mode = 63; + break; + } + } else { + adapter->vf_mode = 63; + } + } + { /* Interrupt Mode */ + u32 int_mode; + static struct txgbe_option opt = { + .type = range_option, + .name = "Interrupt Mode", + .err = + "using default of "__MODULE_STRING(TXGBE_DEFAULT_INT), + .def = TXGBE_DEFAULT_INT, + .arg = { .r = { .min = TXGBE_INT_LEGACY, + .max = TXGBE_INT_MSIX} } + }; + + if (num_IntMode > bd || num_InterruptType > bd) { + int_mode = IntMode[bd]; + if (int_mode == OPTION_UNSET) + int_mode = InterruptType[bd]; + txgbe_validate_option(&int_mode, &opt); + switch (int_mode) { + case TXGBE_INT_MSIX: + if (!(*aflags & TXGBE_FLAG_MSIX_CAPABLE)) + txgbe_info( + "Ignoring MSI-X setting; " + "support unavailable\n"); + break; + case TXGBE_INT_MSI: + if (!(*aflags & TXGBE_FLAG_MSI_CAPABLE)) { + txgbe_info( + "Ignoring MSI setting; " + "support unavailable\n"); + } else { + *aflags &= ~TXGBE_FLAG_MSIX_CAPABLE; + } + break; + case TXGBE_INT_LEGACY: + default: + *aflags &= ~TXGBE_FLAG_MSIX_CAPABLE; + *aflags &= ~TXGBE_FLAG_MSI_CAPABLE; + break; + } + } else { + /* default settings */ + if (opt.def == TXGBE_INT_MSIX && + *aflags & TXGBE_FLAG_MSIX_CAPABLE) { + *aflags |= TXGBE_FLAG_MSIX_CAPABLE; + *aflags |= TXGBE_FLAG_MSI_CAPABLE; + } else if (opt.def == TXGBE_INT_MSI && + *aflags & TXGBE_FLAG_MSI_CAPABLE) { + *aflags &= ~TXGBE_FLAG_MSIX_CAPABLE; + *aflags |= TXGBE_FLAG_MSI_CAPABLE; + } else { + *aflags &= ~TXGBE_FLAG_MSIX_CAPABLE; + *aflags &= ~TXGBE_FLAG_MSI_CAPABLE; + } + } + } + { /* Multiple Queue Support */ + static struct txgbe_option opt = { + .type = enable_option, + .name = "Multiple Queue Support", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + + if (num_MQ > bd) { + u32 mq = MQ[bd]; + txgbe_validate_option(&mq, &opt); + if (mq) + *aflags |= TXGBE_FLAG_MQ_CAPABLE; + else + *aflags &= ~TXGBE_FLAG_MQ_CAPABLE; + } else { + if (opt.def == OPTION_ENABLED) + *aflags |= TXGBE_FLAG_MQ_CAPABLE; + else + *aflags &= ~TXGBE_FLAG_MQ_CAPABLE; + } + /* Check Interoperability */ + if ((*aflags & TXGBE_FLAG_MQ_CAPABLE) && + !(*aflags & TXGBE_FLAG_MSIX_CAPABLE)) { + DPRINTK(PROBE, INFO, + "Multiple queues are not supported while MSI-X " + "is disabled. Disabling Multiple Queues.\n"); + *aflags &= ~TXGBE_FLAG_MQ_CAPABLE; + } + } + + { /* Receive-Side Scaling (RSS) */ + static struct txgbe_option opt = { + .type = range_option, + .name = "Receive-Side Scaling (RSS)", + .err = "using default.", + .def = 0, + .arg = { .r = { .min = 0, + .max = 1} } + }; + u32 rss = RSS[bd]; + /* adjust Max allowed RSS queues based on MAC type */ + opt.arg.r.max = txgbe_max_rss_indices(adapter); + + if (num_RSS > bd) { + txgbe_validate_option(&rss, &opt); + /* base it off num_online_cpus() with hardware limit */ + if (!rss) + rss = min_t(int, opt.arg.r.max, + num_online_cpus()); + else + feature[RING_F_FDIR].limit = (u16)rss; + + feature[RING_F_RSS].limit = (u16)rss; + } else if (opt.def == 0) { + rss = min_t(int, txgbe_max_rss_indices(adapter), + num_online_cpus()); + feature[RING_F_RSS].limit = rss; + } + /* Check Interoperability */ + if (rss > 1) { + if (!(*aflags & TXGBE_FLAG_MQ_CAPABLE)) { + DPRINTK(PROBE, INFO, + "Multiqueue is disabled. " + "Limiting RSS.\n"); + feature[RING_F_RSS].limit = 1; + } + } + adapter->flags2 |= TXGBE_FLAG2_RSS_ENABLED; + } + { /* Virtual Machine Device Queues (VMDQ) */ + static struct txgbe_option opt = { + .type = range_option, + .name = "Virtual Machine Device Queues (VMDQ)", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED, + .arg = { .r = { .min = OPTION_DISABLED, + .max = TXGBE_MAX_VMDQ_INDICES + } } + }; + + if (num_VMDQ > bd) { + vmdq = VMDQ[bd]; + + txgbe_validate_option(&vmdq, &opt); + + /* zero or one both mean disabled from our driver's + * perspective */ + if (vmdq > 1) { + *aflags |= TXGBE_FLAG_VMDQ_ENABLED; + } else + *aflags &= ~TXGBE_FLAG_VMDQ_ENABLED; + + feature[RING_F_VMDQ].limit = (u16)vmdq; + } else { + if (opt.def == OPTION_DISABLED) + *aflags &= ~TXGBE_FLAG_VMDQ_ENABLED; + else + *aflags |= TXGBE_FLAG_VMDQ_ENABLED; + + feature[RING_F_VMDQ].limit = opt.def; + } + /* Check Interoperability */ + if (*aflags & TXGBE_FLAG_VMDQ_ENABLED) { + if (!(*aflags & TXGBE_FLAG_MQ_CAPABLE)) { + DPRINTK(PROBE, INFO, + "VMDQ is not supported while multiple " + "queues are disabled. " + "Disabling VMDQ.\n"); + *aflags &= ~TXGBE_FLAG_VMDQ_ENABLED; + feature[RING_F_VMDQ].limit = 0; + } + } + } + + { /* Interrupt Throttling Rate */ + static struct txgbe_option opt = { + .type = range_option, + .name = "Interrupt Throttling Rate (ints/sec)", + .err = "using default of "__MODULE_STRING(DEFAULT_ITR), + .def = DEFAULT_ITR, + .arg = { .r = { .min = MIN_ITR, + .max = MAX_ITR } } + }; + + if (num_InterruptThrottleRate > bd) { + u32 itr = InterruptThrottleRate[bd]; + switch (itr) { + case 0: + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + adapter->rx_itr_setting = 0; + break; + case 1: + DPRINTK(PROBE, INFO, "dynamic interrupt " + "throttling enabled\n"); + adapter->rx_itr_setting = 1; + break; + default: + txgbe_validate_option(&itr, &opt); + /* the first bit is used as control */ + adapter->rx_itr_setting = (u16)((1000000/itr) << 2); + break; + } + adapter->tx_itr_setting = adapter->rx_itr_setting; + } else { + adapter->rx_itr_setting = opt.def; + adapter->tx_itr_setting = opt.def; + } + } + + { /* Low Latency Interrupt TCP Port*/ + static struct txgbe_option opt = { + .type = range_option, + .name = "Low Latency Interrupt TCP Port", + .err = "using default of " + __MODULE_STRING(DEFAULT_LLIPORT), + .def = DEFAULT_LLIPORT, + .arg = { .r = { .min = MIN_LLIPORT, + .max = MAX_LLIPORT } } + }; + + if (num_LLIPort > bd) { + adapter->lli_port = LLIPort[bd]; + if (adapter->lli_port) { + txgbe_validate_option(&adapter->lli_port, &opt); + } else { + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + } + } else { + adapter->lli_port = opt.def; + } + } + { /* Low Latency Interrupt on Packet Size */ + static struct txgbe_option opt = { + .type = range_option, + .name = "Low Latency Interrupt on Packet Size", + .err = "using default of " + __MODULE_STRING(DEFAULT_LLISIZE), + .def = DEFAULT_LLISIZE, + .arg = { .r = { .min = MIN_LLISIZE, + .max = MAX_LLISIZE } } + }; + + if (num_LLISize > bd) { + adapter->lli_size = LLISize[bd]; + if (adapter->lli_size) { + txgbe_validate_option(&adapter->lli_size, &opt); + } else { + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + } + } else { + adapter->lli_size = opt.def; + } + } + { /* Low Latency Interrupt EtherType*/ + static struct txgbe_option opt = { + .type = range_option, + .name = "Low Latency Interrupt on Ethernet Protocol " + "Type", + .err = "using default of " + __MODULE_STRING(DEFAULT_LLIETYPE), + .def = DEFAULT_LLIETYPE, + .arg = { .r = { .min = MIN_LLIETYPE, + .max = MAX_LLIETYPE } } + }; + + if (num_LLIEType > bd) { + adapter->lli_etype = LLIEType[bd]; + if (adapter->lli_etype) { + txgbe_validate_option(&adapter->lli_etype, + &opt); + } else { + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + } + } else { + adapter->lli_etype = opt.def; + } + } + { /* LLI VLAN Priority */ + static struct txgbe_option opt = { + .type = range_option, + .name = "Low Latency Interrupt on VLAN priority " + "threshold", + .err = "using default of " + __MODULE_STRING(DEFAULT_LLIVLANP), + .def = DEFAULT_LLIVLANP, + .arg = { .r = { .min = MIN_LLIVLANP, + .max = MAX_LLIVLANP } } + }; + + if (num_LLIVLANP > bd) { + adapter->lli_vlan_pri = LLIVLANP[bd]; + if (adapter->lli_vlan_pri) { + txgbe_validate_option(&adapter->lli_vlan_pri, + &opt); + } else { + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + } + } else { + adapter->lli_vlan_pri = opt.def; + } + } + + { /* Flow Director packet buffer allocation */ + u32 fdir_pballoc_mode; + static struct txgbe_option opt = { + .type = range_option, + .name = "Flow Director packet buffer allocation", + .err = "using default of " + __MODULE_STRING(TXGBE_DEFAULT_FDIR_PBALLOC), + .def = TXGBE_DEFAULT_FDIR_PBALLOC, + .arg = {.r = {.min = TXGBE_FDIR_PBALLOC_64K, + .max = TXGBE_FDIR_PBALLOC_256K} } + }; + const char *pstring; + + if (num_FdirPballoc > bd) { + fdir_pballoc_mode = FdirPballoc[bd]; + txgbe_validate_option(&fdir_pballoc_mode, &opt); + switch (fdir_pballoc_mode) { + case TXGBE_FDIR_PBALLOC_256K: + adapter->fdir_pballoc = TXGBE_FDIR_PBALLOC_256K; + pstring = "256kB"; + break; + case TXGBE_FDIR_PBALLOC_128K: + adapter->fdir_pballoc = TXGBE_FDIR_PBALLOC_128K; + pstring = "128kB"; + break; + case TXGBE_FDIR_PBALLOC_64K: + default: + adapter->fdir_pballoc = TXGBE_FDIR_PBALLOC_64K; + pstring = "64kB"; + break; + } + DPRINTK(PROBE, INFO, "Flow Director will be allocated " + "%s of packet buffer\n", pstring); + } else { + adapter->fdir_pballoc = opt.def; + } + + } + { /* Flow Director ATR Tx sample packet rate */ + static struct txgbe_option opt = { + .type = range_option, + .name = "Software ATR Tx packet sample rate", + .err = "using default of " + __MODULE_STRING(TXGBE_DEFAULT_ATR_SAMPLE_RATE), + .def = TXGBE_DEFAULT_ATR_SAMPLE_RATE, + .arg = {.r = {.min = TXGBE_ATR_SAMPLE_RATE_OFF, + .max = TXGBE_MAX_ATR_SAMPLE_RATE} } + }; + static const char atr_string[] = + "ATR Tx Packet sample rate set to"; + + if (num_AtrSampleRate > bd) { + adapter->atr_sample_rate = AtrSampleRate[bd]; + + if (adapter->atr_sample_rate) { + txgbe_validate_option(&adapter->atr_sample_rate, + &opt); + DPRINTK(PROBE, INFO, "%s %d\n", atr_string, + adapter->atr_sample_rate); + } + } else { + adapter->atr_sample_rate = opt.def; + } + } + + { /* LRO - Set Large Receive Offload */ + struct txgbe_option opt = { + .type = enable_option, + .name = "LRO - Large Receive Offload", + .err = "defaulting to Disabled", + .def = OPTION_ENABLED + }; + struct net_device *netdev = adapter->netdev; + + if (!(adapter->flags2 & TXGBE_FLAG2_RSC_CAPABLE)) + opt.def = OPTION_DISABLED; + if (num_LRO > bd) { + u32 lro = LRO[bd]; + txgbe_validate_option(&lro, &opt); + if (lro) + netdev->features |= NETIF_F_LRO; + else + netdev->features &= ~NETIF_F_LRO; + } else if (opt.def == OPTION_ENABLED) { + netdev->features |= NETIF_F_LRO; + } else { + netdev->features &= ~NETIF_F_LRO; + } + + if ((netdev->features & NETIF_F_LRO) && + !(adapter->flags2 & TXGBE_FLAG2_RSC_CAPABLE)) { + DPRINTK(PROBE, INFO, + "RSC is not supported on this " + "hardware. Disabling RSC.\n"); + netdev->features &= ~NETIF_F_LRO; + } + } + { /* + * allow_unsupported_sfp - Enable/Disable support for unsupported + * and untested SFP+ modules. + */ + struct txgbe_option opt = { + .type = enable_option, + .name = "allow_unsupported_sfp", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED + }; + if (num_allow_unsupported_sfp > bd) { + u32 enable_unsupported_sfp = + allow_unsupported_sfp[bd]; + txgbe_validate_option(&enable_unsupported_sfp, &opt); + if (enable_unsupported_sfp) { + adapter->hw.allow_unsupported_sfp = true; + } else { + adapter->hw.allow_unsupported_sfp = false; + } + } else if (opt.def == OPTION_ENABLED) { + adapter->hw.allow_unsupported_sfp = true; + } else { + adapter->hw.allow_unsupported_sfp = false; + } + } + + { /* DMA Coalescing */ + struct txgbe_option opt = { + .type = range_option, + .name = "dmac_watchdog", + .err = "defaulting to 0 (disabled)", + .def = 0, + .arg = { .r = { .min = 41, .max = 10000 } }, + }; + const char *cmsg = "DMA coalescing not supported on this " + "hardware"; + + opt.err = cmsg; + opt.msg = cmsg; + opt.arg.r.min = 0; + opt.arg.r.max = 0; + + if (num_dmac_watchdog > bd) { + u32 dmac_wd = dmac_watchdog[bd]; + + txgbe_validate_option(&dmac_wd, &opt); + adapter->hw.mac.dmac_config.watchdog_timer = (u16)dmac_wd; + } else { + adapter->hw.mac.dmac_config.watchdog_timer = opt.def; + } + } + { /* VXLAN rx offload */ + struct txgbe_option opt = { + .type = range_option, + .name = "vxlan_rx", + .err = "defaulting to 1 (enabled)", + .def = 1, + .arg = { .r = { .min = 0, .max = 1 } }, + }; + const char *cmsg = "VXLAN rx offload not supported on this " + "hardware"; + const u32 flag = TXGBE_FLAG_VXLAN_OFFLOAD_ENABLE; + + if (!(adapter->flags & TXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) { + opt.err = cmsg; + opt.msg = cmsg; + opt.def = 0; + opt.arg.r.max = 0; + } + if (num_vxlan_rx > bd) { + u32 enable_vxlan_rx = vxlan_rx[bd]; + + txgbe_validate_option(&enable_vxlan_rx, &opt); + if (enable_vxlan_rx) + adapter->flags |= flag; + else + adapter->flags &= ~flag; + } else if (opt.def) { + adapter->flags |= flag; + } else { + adapter->flags &= ~flag; + } + } + + { /* Rx buffer mode */ + u32 rx_buf_mode; + static struct txgbe_option opt = { + .type = range_option, + .name = "Rx buffer mode", + .err = "using default of " + __MODULE_STRING(TXGBE_DEFAULT_RXBUFMODE), + .def = TXGBE_DEFAULT_RXBUFMODE, + .arg = {.r = {.min = TXGBE_RXBUFMODE_NO_HEADER_SPLIT, + .max = TXGBE_RXBUFMODE_HEADER_SPLIT} } + + }; + + if (num_RxBufferMode > bd) { + rx_buf_mode = RxBufferMode[bd]; + txgbe_validate_option(&rx_buf_mode, &opt); + switch (rx_buf_mode) { + case TXGBE_RXBUFMODE_NO_HEADER_SPLIT: + *aflags &= ~TXGBE_FLAG_RX_HS_ENABLED; + break; + case TXGBE_RXBUFMODE_HEADER_SPLIT: + *aflags |= TXGBE_FLAG_RX_HS_ENABLED; + break; + default: + break; + } + } else { + *aflags &= ~TXGBE_FLAG_RX_HS_ENABLED; + } + + } + { /* Cloud Switch */ + struct txgbe_option opt = { + .type = range_option, + .name = "CloudSwitch", + .err = "defaulting to 0 (disabled)", + .def = 0, + .arg = { .r = { .min = 0, .max = 1 } }, + }; + + if (num_CloudSwitch > bd) { + u32 enable_cloudswitch = CloudSwitch[bd]; + + txgbe_validate_option(&enable_cloudswitch, &opt); + if (enable_cloudswitch) + adapter->flags |= + TXGBE_FLAG2_CLOUD_SWITCH_ENABLED; + else + adapter->flags &= + ~TXGBE_FLAG2_CLOUD_SWITCH_ENABLED; + } else if (opt.def) { + adapter->flags |= TXGBE_FLAG2_CLOUD_SWITCH_ENABLED; + } else { + adapter->flags &= ~TXGBE_FLAG2_CLOUD_SWITCH_ENABLED; + } + } +} diff --git a/drivers/net/ethernet/netswift/txgbe/txgbe_phy.c b/drivers/net/ethernet/netswift/txgbe/txgbe_phy.c new file mode 100644 index 0000000000000000000000000000000000000000..2db6541f95a18c819bf5746d4c989b5a87396ea8 --- /dev/null +++ b/drivers/net/ethernet/netswift/txgbe/txgbe_phy.c @@ -0,0 +1,1014 @@ +/* + * WangXun 10 Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * based on ixgbe_phy.c, Copyright(c) 1999 - 2017 Intel Corporation. + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include "txgbe_phy.h" +#include "txgbe_mtd.h" + +/** + * txgbe_check_reset_blocked - check status of MNG FW veto bit + * @hw: pointer to the hardware structure + * + * This function checks the MMNGC.MNG_VETO bit to see if there are + * any constraints on link from manageability. For MAC's that don't + * have this bit just return faluse since the link can not be blocked + * via this method. + **/ +s32 txgbe_check_reset_blocked(struct txgbe_hw *hw) +{ + u32 mmngc; + + DEBUGFUNC("\n"); + + mmngc = rd32(hw, TXGBE_MIS_ST); + if (mmngc & TXGBE_MIS_ST_MNG_VETO) { + ERROR_REPORT1(TXGBE_ERROR_SOFTWARE, + "MNG_VETO bit detected.\n"); + return true; + } + + return false; +} + + +/** + * txgbe_get_phy_id - Get the phy type + * @hw: pointer to hardware structure + * + **/ +s32 txgbe_get_phy_id(struct txgbe_hw *hw) +{ + u32 status; + u16 phy_id_high = 0; + u16 phy_id_low = 0; + u8 numport, thisport; + DEBUGFUNC("\n"); + + status = mtdHwXmdioRead(&hw->phy_dev, hw->phy.addr, + TXGBE_MDIO_PMA_PMD_DEV_TYPE, + TXGBE_MDIO_PHY_ID_HIGH, &phy_id_high); + + if (status == 0) { + hw->phy.id = (u32)(phy_id_high << 16); + status = mtdHwXmdioRead(&hw->phy_dev, hw->phy.addr, + TXGBE_MDIO_PMA_PMD_DEV_TYPE, + TXGBE_MDIO_PHY_ID_LOW, &phy_id_low); + hw->phy.id |= (u32)(phy_id_low & TXGBE_PHY_REVISION_MASK); + } + + if (status == 0) { + status = mtdGetPhyRevision(&hw->phy_dev, hw->phy.addr, + (MTD_DEVICE_ID *)&hw->phy.revision, &numport, &thisport); + if (status == MTD_FAIL) { + ERROR_REPORT1(TXGBE_ERROR_INVALID_STATE, + "Error in mtdGetPhyRevision()\n"); + } + } + return status; +} + +/** + * txgbe_get_phy_type_from_id - Get the phy type + * @phy_id: PHY ID information + * + **/ +enum txgbe_phy_type txgbe_get_phy_type_from_id(struct txgbe_hw *hw) +{ + enum txgbe_phy_type phy_type; + u16 ext_ability = 0; + + DEBUGFUNC("\n"); + + switch (hw->phy.id) { + case TN1010_PHY_ID: + phy_type = txgbe_phy_tn; + break; + case QT2022_PHY_ID: + phy_type = txgbe_phy_qt; + break; + case ATH_PHY_ID: + phy_type = txgbe_phy_nl; + break; + default: + phy_type = txgbe_phy_unknown; + break; + } + if (phy_type == txgbe_phy_unknown) { + mtdHwXmdioRead(&hw->phy_dev, hw->phy.addr, + TXGBE_MDIO_PMA_PMD_DEV_TYPE, + TXGBE_MDIO_PHY_EXT_ABILITY, &ext_ability); + + if (ext_ability & (TXGBE_MDIO_PHY_10GBASET_ABILITY | + TXGBE_MDIO_PHY_1000BASET_ABILITY)) + phy_type = txgbe_phy_cu_unknown; + else + phy_type = txgbe_phy_generic; + } + return phy_type; +} + +/** + * txgbe_reset_phy - Performs a PHY reset + * @hw: pointer to hardware structure + **/ +s32 txgbe_reset_phy(struct txgbe_hw *hw) +{ + s32 status = 0; + + DEBUGFUNC("\n"); + + + if (status != 0 || hw->phy.type == txgbe_phy_none) + goto out; + + /* Don't reset PHY if it's shut down due to overtemp. */ + if (!hw->phy.reset_if_overtemp && + (TXGBE_ERR_OVERTEMP == TCALL(hw, phy.ops.check_overtemp))) + goto out; + + /* Blocked by MNG FW so bail */ + txgbe_check_reset_blocked(hw); + if (((hw->subsystem_device_id & TXGBE_NCSI_MASK) == TXGBE_NCSI_SUP) || + ((hw->subsystem_device_id & TXGBE_WOL_MASK) == TXGBE_WOL_SUP)) + goto out; + + status = mtdHardwareReset(&hw->phy_dev, hw->phy.addr, 1000); + +out: + return status; +} + +/** + * txgbe_read_phy_mdi - Reads a value from a specified PHY register without + * the SWFW lock + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @phy_data: Pointer to read data from PHY register + **/ +s32 txgbe_read_phy_reg_mdi(struct txgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 *phy_data) +{ + u32 command; + s32 status = 0; + + /* setup and write the address cycle command */ + command = TXGBE_MSCA_RA(reg_addr) | + TXGBE_MSCA_PA(hw->phy.addr) | + TXGBE_MSCA_DA(device_type); + wr32(hw, TXGBE_MSCA, command); + + command = TXGBE_MSCC_CMD(TXGBE_MSCA_CMD_READ) | TXGBE_MSCC_BUSY; + wr32(hw, TXGBE_MSCC, command); + + /* wait to complete */ + status = po32m(hw, TXGBE_MSCC, + TXGBE_MSCC_BUSY, ~TXGBE_MSCC_BUSY, + TXGBE_MDIO_TIMEOUT, 10); + if (status != 0) { + ERROR_REPORT1(TXGBE_ERROR_POLLING, + "PHY address command did not complete.\n"); + return TXGBE_ERR_PHY; + } + + /* read data from MSCC */ + *phy_data = 0xFFFF & rd32(hw, TXGBE_MSCC); + + return 0; +} + +/** + * txgbe_read_phy_reg - Reads a value from a specified PHY register + * using the SWFW lock - this function is needed in most cases + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @phy_data: Pointer to read data from PHY register + **/ +s32 txgbe_read_phy_reg(struct txgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data) +{ + s32 status; + u32 gssr = hw->phy.phy_semaphore_mask; + + DEBUGFUNC("\n"); + + if (0 == TCALL(hw, mac.ops.acquire_swfw_sync, gssr)) { + status = txgbe_read_phy_reg_mdi(hw, reg_addr, device_type, + phy_data); + TCALL(hw, mac.ops.release_swfw_sync, gssr); + } else { + status = TXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * txgbe_write_phy_reg_mdi - Writes a value to specified PHY register + * without SWFW lock + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 5 bit device type + * @phy_data: Data to write to the PHY register + **/ +s32 txgbe_write_phy_reg_mdi(struct txgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + u32 command; + s32 status = 0; + + /* setup and write the address cycle command */ + command = TXGBE_MSCA_RA(reg_addr) | + TXGBE_MSCA_PA(hw->phy.addr) | + TXGBE_MSCA_DA(device_type); + wr32(hw, TXGBE_MSCA, command); + + command = phy_data | TXGBE_MSCC_CMD(TXGBE_MSCA_CMD_WRITE) | + TXGBE_MSCC_BUSY; + wr32(hw, TXGBE_MSCC, command); + + /* wait to complete */ + status = po32m(hw, TXGBE_MSCC, + TXGBE_MSCC_BUSY, ~TXGBE_MSCC_BUSY, + TXGBE_MDIO_TIMEOUT, 10); + if (status != 0) { + ERROR_REPORT1(TXGBE_ERROR_POLLING, + "PHY address command did not complete.\n"); + return TXGBE_ERR_PHY; + } + + return 0; +} + +/** + * txgbe_write_phy_reg - Writes a value to specified PHY register + * using SWFW lock- this function is needed in most cases + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 5 bit device type + * @phy_data: Data to write to the PHY register + **/ +s32 txgbe_write_phy_reg(struct txgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + s32 status; + u32 gssr = hw->phy.phy_semaphore_mask; + + DEBUGFUNC("\n"); + + if (TCALL(hw, mac.ops.acquire_swfw_sync, gssr) == 0) { + status = txgbe_write_phy_reg_mdi(hw, reg_addr, device_type, + phy_data); + TCALL(hw, mac.ops.release_swfw_sync, gssr); + } else { + status = TXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +MTD_STATUS txgbe_read_mdio( + MTD_DEV * dev, + MTD_U16 port, + MTD_U16 mmd, + MTD_U16 reg, + MTD_U16 *value) +{ + struct txgbe_hw *hw = (struct txgbe_hw *)(dev->appData); + + if (hw->phy.addr != port) + return MTD_FAIL; + return txgbe_read_phy_reg(hw, reg, mmd, value); +} + +MTD_STATUS txgbe_write_mdio( + MTD_DEV * dev, + MTD_U16 port, + MTD_U16 mmd, + MTD_U16 reg, + MTD_U16 value) +{ + struct txgbe_hw *hw = (struct txgbe_hw *)(dev->appData); + + if (hw->phy.addr != port) + return MTD_FAIL; + + return txgbe_write_phy_reg(hw, reg, mmd, value); +} + +/** + * txgbe_setup_phy_link - Set and restart auto-neg + * @hw: pointer to hardware structure + * + * Restart auto-negotiation and PHY and waits for completion. + **/ +u32 txgbe_setup_phy_link(struct txgbe_hw *hw, u32 speed_set, bool autoneg_wait_to_complete) +{ + u16 speed = MTD_ADV_NONE; + MTD_DEV_PTR devptr = &hw->phy_dev; + MTD_BOOL anDone = MTD_FALSE; + u16 port = hw->phy.addr; + + UNREFERENCED_PARAMETER(speed_set); + DEBUGFUNC("\n"); + + if (!autoneg_wait_to_complete) { + mtdAutonegIsSpeedDuplexResolutionDone(devptr, port, &anDone); + if (anDone) { + mtdGetAutonegSpeedDuplexResolution(devptr, port, &speed); + } + } else { + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_10GB_FULL) + speed |= MTD_SPEED_10GIG_FD; + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_1GB_FULL) + speed |= MTD_SPEED_1GIG_FD; + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_100_FULL) + speed |= MTD_SPEED_100M_FD; + if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_10_FULL) + speed |= MTD_SPEED_10M_FD; + mtdEnableSpeeds(devptr, port, speed, MTD_TRUE); + + /* wait autoneg to be done */ + speed = MTD_ADV_NONE; + } + + switch (speed) { + case MTD_SPEED_10GIG_FD: + return TXGBE_LINK_SPEED_10GB_FULL; + case MTD_SPEED_1GIG_FD: + return TXGBE_LINK_SPEED_1GB_FULL; + case MTD_SPEED_100M_FD: + return TXGBE_LINK_SPEED_100_FULL; + case MTD_SPEED_10M_FD: + return TXGBE_LINK_SPEED_10_FULL; + default: + return TXGBE_LINK_SPEED_UNKNOWN; + } + +} + +/** + * txgbe_setup_phy_link_speed - Sets the auto advertised capabilities + * @hw: pointer to hardware structure + * @speed: new link speed + **/ +u32 txgbe_setup_phy_link_speed(struct txgbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete) +{ + + DEBUGFUNC("\n"); + + /* + * Clear autoneg_advertised and set new values based on input link + * speed. + */ + hw->phy.autoneg_advertised = 0; + + if (speed & TXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_10GB_FULL; + + if (speed & TXGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_1GB_FULL; + + if (speed & TXGBE_LINK_SPEED_100_FULL) + hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_100_FULL; + + if (speed & TXGBE_LINK_SPEED_10_FULL) + hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_10_FULL; + + /* Setup link based on the new speed settings */ + return txgbe_setup_phy_link(hw, speed, autoneg_wait_to_complete); +} + +/** + * txgbe_get_copper_link_capabilities - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: boolean auto-negotiation value + * + * Determines the supported link capabilities by reading the PHY auto + * negotiation register. + **/ +s32 txgbe_get_copper_link_capabilities(struct txgbe_hw *hw, + u32 *speed, + bool *autoneg) +{ + s32 status; + u16 speed_ability; + + DEBUGFUNC("\n"); + + *speed = 0; + *autoneg = true; + + status = mtdHwXmdioRead(&hw->phy_dev, hw->phy.addr, + TXGBE_MDIO_PMA_PMD_DEV_TYPE, + TXGBE_MDIO_PHY_SPEED_ABILITY, &speed_ability); + + if (status == 0) { + if (speed_ability & TXGBE_MDIO_PHY_SPEED_10G) + *speed |= TXGBE_LINK_SPEED_10GB_FULL; + if (speed_ability & TXGBE_MDIO_PHY_SPEED_1G) + *speed |= TXGBE_LINK_SPEED_1GB_FULL; + if (speed_ability & TXGBE_MDIO_PHY_SPEED_100M) + *speed |= TXGBE_LINK_SPEED_100_FULL; + if (speed_ability & TXGBE_MDIO_PHY_SPEED_10M) + *speed |= TXGBE_LINK_SPEED_10_FULL; + } + + return status; +} + +/** + * txgbe_identify_module - Identifies module type + * @hw: pointer to hardware structure + * + * Determines HW type and calls appropriate function. + **/ +s32 txgbe_identify_module(struct txgbe_hw *hw) +{ + s32 status = TXGBE_ERR_SFP_NOT_PRESENT; + + DEBUGFUNC("\n"); + + switch (TCALL(hw, mac.ops.get_media_type)) { + case txgbe_media_type_fiber: + status = txgbe_identify_sfp_module(hw); + break; + + default: + hw->phy.sfp_type = txgbe_sfp_type_not_present; + status = TXGBE_ERR_SFP_NOT_PRESENT; + break; + } + + return status; +} + +/** + * txgbe_identify_sfp_module - Identifies SFP modules + * @hw: pointer to hardware structure + * + * Searches for and identifies the SFP module and assigns appropriate PHY type. + **/ +s32 txgbe_identify_sfp_module(struct txgbe_hw *hw) +{ + s32 status = TXGBE_ERR_PHY_ADDR_INVALID; + u32 vendor_oui = 0; + enum txgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; + u8 identifier = 0; + u8 comp_codes_1g = 0; + u8 comp_codes_10g = 0; + u8 oui_bytes[3] = {0, 0, 0}; + u8 cable_tech = 0; + u8 cable_spec = 0; + + DEBUGFUNC("\n"); + + if (TCALL(hw, mac.ops.get_media_type) != txgbe_media_type_fiber) { + hw->phy.sfp_type = txgbe_sfp_type_not_present; + status = TXGBE_ERR_SFP_NOT_PRESENT; + goto out; + } + + /* LAN ID is needed for I2C access */ + txgbe_init_i2c(hw); + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_IDENTIFIER, + &identifier); + + if (status != 0) + goto err_read_i2c_eeprom; + + if (identifier != TXGBE_SFF_IDENTIFIER_SFP) { + hw->phy.type = txgbe_phy_sfp_unsupported; + status = TXGBE_ERR_SFP_NOT_SUPPORTED; + } else { + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_1GBE_COMP_CODES, + &comp_codes_1g); + + if (status != 0) + goto err_read_i2c_eeprom; + + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_10GBE_COMP_CODES, + &comp_codes_10g); + + if (status != 0) + goto err_read_i2c_eeprom; + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_CABLE_TECHNOLOGY, + &cable_tech); + + if (status != 0) + goto err_read_i2c_eeprom; + + /* ID Module + * ========= + * 0 SFP_DA_CU + * 1 SFP_SR + * 2 SFP_LR + * 3 SFP_DA_CORE0 + * 4 SFP_DA_CORE1 + * 5 SFP_SR/LR_CORE0 + * 6 SFP_SR/LR_CORE1 + * 7 SFP_act_lmt_DA_CORE0 + * 8 SFP_act_lmt_DA_CORE1 + * 9 SFP_1g_cu_CORE0 + * 10 SFP_1g_cu_CORE1 + * 11 SFP_1g_sx_CORE0 + * 12 SFP_1g_sx_CORE1 + */ + { + if (cable_tech & TXGBE_SFF_DA_PASSIVE_CABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + txgbe_sfp_type_da_cu_core0; + else + hw->phy.sfp_type = + txgbe_sfp_type_da_cu_core1; + } else if (cable_tech & TXGBE_SFF_DA_ACTIVE_CABLE) { + TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_CABLE_SPEC_COMP, + &cable_spec); + if (cable_spec & + TXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + txgbe_sfp_type_da_act_lmt_core0; + else + hw->phy.sfp_type = + txgbe_sfp_type_da_act_lmt_core1; + } else { + hw->phy.sfp_type = + txgbe_sfp_type_unknown; + } + } else if (comp_codes_10g & + (TXGBE_SFF_10GBASESR_CAPABLE | + TXGBE_SFF_10GBASELR_CAPABLE)) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + txgbe_sfp_type_srlr_core0; + else + hw->phy.sfp_type = + txgbe_sfp_type_srlr_core1; + } else if (comp_codes_1g & TXGBE_SFF_1GBASET_CAPABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + txgbe_sfp_type_1g_cu_core0; + else + hw->phy.sfp_type = + txgbe_sfp_type_1g_cu_core1; + } else if (comp_codes_1g & TXGBE_SFF_1GBASESX_CAPABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + txgbe_sfp_type_1g_sx_core0; + else + hw->phy.sfp_type = + txgbe_sfp_type_1g_sx_core1; + } else if (comp_codes_1g & TXGBE_SFF_1GBASELX_CAPABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + txgbe_sfp_type_1g_lx_core0; + else + hw->phy.sfp_type = + txgbe_sfp_type_1g_lx_core1; + } else { + hw->phy.sfp_type = txgbe_sfp_type_unknown; + } + } + + if (hw->phy.sfp_type != stored_sfp_type) + hw->phy.sfp_setup_needed = true; + + /* Determine if the SFP+ PHY is dual speed or not. */ + hw->phy.multispeed_fiber = false; + if (((comp_codes_1g & TXGBE_SFF_1GBASESX_CAPABLE) && + (comp_codes_10g & TXGBE_SFF_10GBASESR_CAPABLE)) || + ((comp_codes_1g & TXGBE_SFF_1GBASELX_CAPABLE) && + (comp_codes_10g & TXGBE_SFF_10GBASELR_CAPABLE))) + hw->phy.multispeed_fiber = true; + + /* Determine PHY vendor */ + if (hw->phy.type != txgbe_phy_nl) { + hw->phy.id = identifier; + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_VENDOR_OUI_BYTE0, + &oui_bytes[0]); + + if (status != 0) + goto err_read_i2c_eeprom; + + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_VENDOR_OUI_BYTE1, + &oui_bytes[1]); + + if (status != 0) + goto err_read_i2c_eeprom; + + status = TCALL(hw, phy.ops.read_i2c_eeprom, + TXGBE_SFF_VENDOR_OUI_BYTE2, + &oui_bytes[2]); + + if (status != 0) + goto err_read_i2c_eeprom; + + vendor_oui = + ((oui_bytes[0] << TXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | + (oui_bytes[1] << TXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | + (oui_bytes[2] << TXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT)); + + switch (vendor_oui) { + case TXGBE_SFF_VENDOR_OUI_TYCO: + if (cable_tech & TXGBE_SFF_DA_PASSIVE_CABLE) + hw->phy.type = + txgbe_phy_sfp_passive_tyco; + break; + case TXGBE_SFF_VENDOR_OUI_FTL: + if (cable_tech & TXGBE_SFF_DA_ACTIVE_CABLE) + hw->phy.type = txgbe_phy_sfp_ftl_active; + else + hw->phy.type = txgbe_phy_sfp_ftl; + break; + case TXGBE_SFF_VENDOR_OUI_AVAGO: + hw->phy.type = txgbe_phy_sfp_avago; + break; + case TXGBE_SFF_VENDOR_OUI_INTEL: + hw->phy.type = txgbe_phy_sfp_intel; + break; + default: + if (cable_tech & TXGBE_SFF_DA_PASSIVE_CABLE) + hw->phy.type = + txgbe_phy_sfp_passive_unknown; + else if (cable_tech & TXGBE_SFF_DA_ACTIVE_CABLE) + hw->phy.type = + txgbe_phy_sfp_active_unknown; + else + hw->phy.type = txgbe_phy_sfp_unknown; + break; + } + } + + /* Allow any DA cable vendor */ + if (cable_tech & (TXGBE_SFF_DA_PASSIVE_CABLE | + TXGBE_SFF_DA_ACTIVE_CABLE)) { + status = 0; + goto out; + } + + /* Verify supported 1G SFP modules */ + if (comp_codes_10g == 0 && + !(hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core1 || + hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == txgbe_sfp_type_1g_lx_core0 || + hw->phy.sfp_type == txgbe_sfp_type_1g_lx_core1 || + hw->phy.sfp_type == txgbe_sfp_type_1g_sx_core0 || + hw->phy.sfp_type == txgbe_sfp_type_1g_sx_core1)) { + hw->phy.type = txgbe_phy_sfp_unsupported; + status = TXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; + } + } + +out: + return status; + +err_read_i2c_eeprom: + hw->phy.sfp_type = txgbe_sfp_type_not_present; + if (hw->phy.type != txgbe_phy_nl) { + hw->phy.id = 0; + hw->phy.type = txgbe_phy_unknown; + } + return TXGBE_ERR_SFP_NOT_PRESENT; +} + +s32 txgbe_init_i2c(struct txgbe_hw *hw) +{ + + wr32(hw, TXGBE_I2C_ENABLE, 0); + + wr32(hw, TXGBE_I2C_CON, + (TXGBE_I2C_CON_MASTER_MODE | + TXGBE_I2C_CON_SPEED(1) | + TXGBE_I2C_CON_RESTART_EN | + TXGBE_I2C_CON_SLAVE_DISABLE)); + /* Default addr is 0xA0 ,bit 0 is configure for read/write! */ + wr32(hw, TXGBE_I2C_TAR, TXGBE_I2C_SLAVE_ADDR); + wr32(hw, TXGBE_I2C_SS_SCL_HCNT, 600); + wr32(hw, TXGBE_I2C_SS_SCL_LCNT, 600); + wr32(hw, TXGBE_I2C_RX_TL, 0); /* 1byte for rx full signal */ + wr32(hw, TXGBE_I2C_TX_TL, 4); + wr32(hw, TXGBE_I2C_SCL_STUCK_TIMEOUT, 0xFFFFFF); + wr32(hw, TXGBE_I2C_SDA_STUCK_TIMEOUT, 0xFFFFFF); + + wr32(hw, TXGBE_I2C_INTR_MASK, 0); + wr32(hw, TXGBE_I2C_ENABLE, 1); + return 0; +} + +s32 txgbe_clear_i2c(struct txgbe_hw *hw) +{ + s32 status = 0; + + /* wait for completion */ + status = po32m(hw, TXGBE_I2C_STATUS, + TXGBE_I2C_STATUS_MST_ACTIVITY, ~TXGBE_I2C_STATUS_MST_ACTIVITY, + TXGBE_I2C_TIMEOUT, 10); + if (status != 0) + goto out; + + wr32(hw, TXGBE_I2C_ENABLE, 0); + +out: + return status; +} + +/** + * txgbe_read_i2c_eeprom - Reads 8 bit EEPROM word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to read + * @eeprom_data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface. + **/ +s32 txgbe_read_i2c_eeprom(struct txgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data) +{ + DEBUGFUNC("\n"); + + return TCALL(hw, phy.ops.read_i2c_byte, byte_offset, + TXGBE_I2C_EEPROM_DEV_ADDR, + eeprom_data); +} + +/** + * txgbe_read_i2c_sff8472 - Reads 8 bit word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: byte offset at address 0xA2 + * @eeprom_data: value read + * + * Performs byte read operation to SFP module's SFF-8472 data over I2C + **/ +s32 txgbe_read_i2c_sff8472(struct txgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data) +{ + return TCALL(hw, phy.ops.read_i2c_byte, byte_offset, + TXGBE_I2C_EEPROM_DEV_ADDR2, + sff8472_data); +} + +/** + * txgbe_write_i2c_eeprom - Writes 8 bit EEPROM word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to write + * @eeprom_data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface. + **/ +s32 txgbe_write_i2c_eeprom(struct txgbe_hw *hw, u8 byte_offset, + u8 eeprom_data) +{ + DEBUGFUNC("\n"); + + return TCALL(hw, phy.ops.write_i2c_byte, byte_offset, + TXGBE_I2C_EEPROM_DEV_ADDR, + eeprom_data); +} + +/** + * txgbe_read_i2c_byte_int - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @data: value read + * @lock: true if to take and release semaphore + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +STATIC s32 txgbe_read_i2c_byte_int(struct txgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data, bool lock) +{ + s32 status = 0; + u32 swfw_mask = hw->phy.phy_semaphore_mask; + + UNREFERENCED_PARAMETER(dev_addr); + + if (lock && 0 != TCALL(hw, mac.ops.acquire_swfw_sync, swfw_mask)) + return TXGBE_ERR_SWFW_SYNC; + + /* wait tx empty */ + status = po32m(hw, TXGBE_I2C_RAW_INTR_STAT, + TXGBE_I2C_INTR_STAT_TX_EMPTY, TXGBE_I2C_INTR_STAT_TX_EMPTY, + TXGBE_I2C_TIMEOUT, 10); + if (status != 0) + goto out; + + /* read data */ + wr32(hw, TXGBE_I2C_DATA_CMD, + byte_offset | TXGBE_I2C_DATA_CMD_STOP); + wr32(hw, TXGBE_I2C_DATA_CMD, TXGBE_I2C_DATA_CMD_READ); + + /* wait for read complete */ + status = po32m(hw, TXGBE_I2C_RAW_INTR_STAT, + TXGBE_I2C_INTR_STAT_RX_FULL, TXGBE_I2C_INTR_STAT_RX_FULL, + TXGBE_I2C_TIMEOUT, 10); + if (status != 0) + goto out; + + *data = 0xFF & rd32(hw, TXGBE_I2C_DATA_CMD); + +out: + if (lock) + TCALL(hw, mac.ops.release_swfw_sync, swfw_mask); + return status; +} + +/** + * txgbe_switch_i2c_slave_addr - Switch I2C slave address + * @hw: pointer to hardware structure + * @dev_addr: slave addr to switch + * + **/ +s32 txgbe_switch_i2c_slave_addr(struct txgbe_hw *hw, u8 dev_addr) +{ + wr32(hw, TXGBE_I2C_ENABLE, 0); + wr32(hw, TXGBE_I2C_TAR, dev_addr >> 1); + wr32(hw, TXGBE_I2C_ENABLE, 1); + return 0; +} + + +/** + * txgbe_read_i2c_byte - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +s32 txgbe_read_i2c_byte(struct txgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + txgbe_switch_i2c_slave_addr(hw, dev_addr); + + return txgbe_read_i2c_byte_int(hw, byte_offset, dev_addr, + data, true); +} + +/** + * txgbe_write_i2c_byte_int - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @data: value to write + * @lock: true if to take and release semaphore + * + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +STATIC s32 txgbe_write_i2c_byte_int(struct txgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data, bool lock) +{ + s32 status = 0; + u32 swfw_mask = hw->phy.phy_semaphore_mask; + + UNREFERENCED_PARAMETER(dev_addr); + + if (lock && 0 != TCALL(hw, mac.ops.acquire_swfw_sync, swfw_mask)) + return TXGBE_ERR_SWFW_SYNC; + + /* wait tx empty */ + status = po32m(hw, TXGBE_I2C_RAW_INTR_STAT, + TXGBE_I2C_INTR_STAT_TX_EMPTY, TXGBE_I2C_INTR_STAT_TX_EMPTY, + TXGBE_I2C_TIMEOUT, 10); + if (status != 0) + goto out; + + wr32(hw, TXGBE_I2C_DATA_CMD, + byte_offset | TXGBE_I2C_DATA_CMD_STOP); + wr32(hw, TXGBE_I2C_DATA_CMD, + data | TXGBE_I2C_DATA_CMD_WRITE); + + /* wait for write complete */ + status = po32m(hw, TXGBE_I2C_RAW_INTR_STAT, + TXGBE_I2C_INTR_STAT_RX_FULL, TXGBE_I2C_INTR_STAT_RX_FULL, + TXGBE_I2C_TIMEOUT, 10); + +out: + if (lock) + TCALL(hw, mac.ops.release_swfw_sync, swfw_mask); + + return status; +} + +/** + * txgbe_write_i2c_byte - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +s32 txgbe_write_i2c_byte(struct txgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + return txgbe_write_i2c_byte_int(hw, byte_offset, dev_addr, + data, true); +} + +/** + * txgbe_tn_check_overtemp - Checks if an overtemp occurred. + * @hw: pointer to hardware structure + * + * Checks if the LASI temp alarm status was triggered due to overtemp + **/ +s32 txgbe_tn_check_overtemp(struct txgbe_hw *hw) +{ + s32 status = 0; + u32 ts_state; + + DEBUGFUNC("\n"); + + /* Check that the LASI temp alarm status was triggered */ + ts_state = rd32(hw, TXGBE_TS_ALARM_ST); + + if (ts_state & TXGBE_TS_ALARM_ST_DALARM) + status = TXGBE_ERR_UNDERTEMP; + else if (ts_state & TXGBE_TS_ALARM_ST_ALARM) + status = TXGBE_ERR_OVERTEMP; + + return status; +} + + +s32 txgbe_init_external_phy(struct txgbe_hw *hw) +{ + s32 status = 0; + + MTD_DEV_PTR devptr = &(hw->phy_dev); + + hw->phy.addr = 0; + + devptr->appData = hw; + status = mtdLoadDriver(txgbe_read_mdio, + txgbe_write_mdio, + MTD_FALSE, + NULL, + NULL, + NULL, + NULL, + hw->phy.addr, + devptr); + if (status != 0) { + ERROR_REPORT1(TXGBE_ERROR_INVALID_STATE, + "External PHY initilization failed.\n"); + return TXGBE_ERR_PHY; + } + + return status; +} + +s32 txgbe_set_phy_pause_advertisement(struct txgbe_hw *hw, u32 pause_bit) +{ + return mtdSetPauseAdvertisement(&hw->phy_dev, hw->phy.addr, + (pause_bit>>10)&0x3, MTD_FALSE); +} + +s32 txgbe_get_phy_advertised_pause(struct txgbe_hw *hw, u8 *pause_bit) +{ + u16 value; + s32 status = 0; + + status = mtdHwXmdioRead(&hw->phy_dev, hw->phy.addr, + TXGBE_MDIO_AUTO_NEG_DEV_TYPE, + TXGBE_MDIO_AUTO_NEG_ADVT, &value); + *pause_bit = (u8)((value>>10)&0x3); + return status; + +} + +s32 txgbe_get_lp_advertised_pause(struct txgbe_hw *hw, u8 *pause_bit) +{ + return mtdGetLPAdvertisedPause(&hw->phy_dev, hw->phy.addr, pause_bit); +} diff --git a/drivers/net/ethernet/netswift/txgbe/txgbe_phy.h b/drivers/net/ethernet/netswift/txgbe/txgbe_phy.h new file mode 100644 index 0000000000000000000000000000000000000000..f033b43cf4fe01e9d54d6d641288cd67fc886c11 --- /dev/null +++ b/drivers/net/ethernet/netswift/txgbe/txgbe_phy.h @@ -0,0 +1,190 @@ +/* + * WangXun 10 Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * based on ixgbe_phy.h, Copyright(c) 1999 - 2017 Intel Corporation. + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + + +#ifndef _TXGBE_PHY_H_ +#define _TXGBE_PHY_H_ + +#include "txgbe.h" + +#define TXGBE_I2C_EEPROM_DEV_ADDR 0xA0 +#define TXGBE_I2C_EEPROM_DEV_ADDR2 0xA2 +#define TXGBE_I2C_EEPROM_BANK_LEN 0xFF + +/* EEPROM byte offsets */ +#define TXGBE_SFF_IDENTIFIER 0x0 +#define TXGBE_SFF_IDENTIFIER_SFP 0x3 +#define TXGBE_SFF_VENDOR_OUI_BYTE0 0x25 +#define TXGBE_SFF_VENDOR_OUI_BYTE1 0x26 +#define TXGBE_SFF_VENDOR_OUI_BYTE2 0x27 +#define TXGBE_SFF_1GBE_COMP_CODES 0x6 +#define TXGBE_SFF_10GBE_COMP_CODES 0x3 +#define TXGBE_SFF_CABLE_TECHNOLOGY 0x8 +#define TXGBE_SFF_CABLE_SPEC_COMP 0x3C +#define TXGBE_SFF_SFF_8472_SWAP 0x5C +#define TXGBE_SFF_SFF_8472_COMP 0x5E +#define TXGBE_SFF_SFF_8472_OSCB 0x6E +#define TXGBE_SFF_SFF_8472_ESCB 0x76 +#define TXGBE_SFF_IDENTIFIER_QSFP_PLUS 0xD +#define TXGBE_SFF_QSFP_VENDOR_OUI_BYTE0 0xA5 +#define TXGBE_SFF_QSFP_VENDOR_OUI_BYTE1 0xA6 +#define TXGBE_SFF_QSFP_VENDOR_OUI_BYTE2 0xA7 +#define TXGBE_SFF_QSFP_CONNECTOR 0x82 +#define TXGBE_SFF_QSFP_10GBE_COMP 0x83 +#define TXGBE_SFF_QSFP_1GBE_COMP 0x86 +#define TXGBE_SFF_QSFP_CABLE_LENGTH 0x92 +#define TXGBE_SFF_QSFP_DEVICE_TECH 0x93 + +/* Bitmasks */ +#define TXGBE_SFF_DA_PASSIVE_CABLE 0x4 +#define TXGBE_SFF_DA_ACTIVE_CABLE 0x8 +#define TXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4 +#define TXGBE_SFF_1GBASESX_CAPABLE 0x1 +#define TXGBE_SFF_1GBASELX_CAPABLE 0x2 +#define TXGBE_SFF_1GBASET_CAPABLE 0x8 +#define TXGBE_SFF_10GBASESR_CAPABLE 0x10 +#define TXGBE_SFF_10GBASELR_CAPABLE 0x20 +#define TXGBE_SFF_SOFT_RS_SELECT_MASK 0x8 +#define TXGBE_SFF_SOFT_RS_SELECT_10G 0x8 +#define TXGBE_SFF_SOFT_RS_SELECT_1G 0x0 +#define TXGBE_SFF_ADDRESSING_MODE 0x4 +#define TXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1 +#define TXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8 +#define TXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23 +#define TXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0 +#define TXGBE_I2C_EEPROM_READ_MASK 0x100 +#define TXGBE_I2C_EEPROM_STATUS_MASK 0x3 +#define TXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0 +#define TXGBE_I2C_EEPROM_STATUS_PASS 0x1 +#define TXGBE_I2C_EEPROM_STATUS_FAIL 0x2 +#define TXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 + +#define TXGBE_CS4227 0xBE /* CS4227 address */ +#define TXGBE_CS4227_GLOBAL_ID_LSB 0 +#define TXGBE_CS4227_SCRATCH 2 +#define TXGBE_CS4227_GLOBAL_ID_VALUE 0x03E5 +#define TXGBE_CS4227_SCRATCH_VALUE 0x5aa5 +#define TXGBE_CS4227_RETRIES 5 +#define TXGBE_CS4227_LINE_SPARE22_MSB 0x12AD /* Reg to program speed */ +#define TXGBE_CS4227_LINE_SPARE24_LSB 0x12B0 /* Reg to program EDC */ +#define TXGBE_CS4227_HOST_SPARE22_MSB 0x1AAD /* Reg to program speed */ +#define TXGBE_CS4227_HOST_SPARE24_LSB 0x1AB0 /* Reg to program EDC */ +#define TXGBE_CS4227_EDC_MODE_CX1 0x0002 +#define TXGBE_CS4227_EDC_MODE_SR 0x0004 +#define TXGBE_CS4227_RESET_HOLD 500 /* microseconds */ +#define TXGBE_CS4227_RESET_DELAY 500 /* milliseconds */ +#define TXGBE_CS4227_CHECK_DELAY 30 /* milliseconds */ +#define TXGBE_PE 0xE0 /* Port expander address */ +#define TXGBE_PE_OUTPUT 1 /* Output register offset */ +#define TXGBE_PE_CONFIG 3 /* Config register offset */ +#define TXGBE_PE_BIT1 (1 << 1) + +/* Flow control defines */ +#define TXGBE_TAF_SYM_PAUSE (0x1) +#define TXGBE_TAF_ASM_PAUSE (0x2) + +/* Bit-shift macros */ +#define TXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24 +#define TXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16 +#define TXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 8 + +/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */ +#define TXGBE_SFF_VENDOR_OUI_TYCO 0x00407600 +#define TXGBE_SFF_VENDOR_OUI_FTL 0x00906500 +#define TXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00 +#define TXGBE_SFF_VENDOR_OUI_INTEL 0x001B2100 + +/* I2C SDA and SCL timing parameters for standard mode */ +#define TXGBE_I2C_T_HD_STA 4 +#define TXGBE_I2C_T_LOW 5 +#define TXGBE_I2C_T_HIGH 4 +#define TXGBE_I2C_T_SU_STA 5 +#define TXGBE_I2C_T_HD_DATA 5 +#define TXGBE_I2C_T_SU_DATA 1 +#define TXGBE_I2C_T_RISE 1 +#define TXGBE_I2C_T_FALL 1 +#define TXGBE_I2C_T_SU_STO 4 +#define TXGBE_I2C_T_BUF 5 + +/* SFP+ SFF-8472 Compliance */ +#define TXGBE_SFF_SFF_8472_UNSUP 0x00 + + +enum txgbe_phy_type txgbe_get_phy_type_from_id(struct txgbe_hw *hw); +s32 txgbe_get_phy_id(struct txgbe_hw *hw); +s32 txgbe_reset_phy(struct txgbe_hw *hw); +s32 txgbe_read_phy_reg_mdi(struct txgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 *phy_data); +s32 txgbe_write_phy_reg_mdi(struct txgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 phy_data); +s32 txgbe_read_phy_reg(struct txgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data); +s32 txgbe_write_phy_reg(struct txgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data); +u32 txgbe_setup_phy_link(struct txgbe_hw *hw, u32 speed_set, bool autoneg_wait_to_complete); +u32 txgbe_setup_phy_link_speed(struct txgbe_hw *hw, + u32 speed, + bool autoneg_wait_to_complete); +s32 txgbe_get_copper_link_capabilities(struct txgbe_hw *hw, + u32 *speed, + bool *autoneg); +s32 txgbe_check_reset_blocked(struct txgbe_hw *hw); + +s32 txgbe_identify_module(struct txgbe_hw *hw); +s32 txgbe_identify_sfp_module(struct txgbe_hw *hw); +s32 txgbe_tn_check_overtemp(struct txgbe_hw *hw); +s32 txgbe_init_i2c(struct txgbe_hw *hw); +s32 txgbe_clear_i2c(struct txgbe_hw *hw); +s32 txgbe_switch_i2c_slave_addr(struct txgbe_hw *hw, u8 dev_addr); +s32 txgbe_read_i2c_byte(struct txgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); + +s32 txgbe_write_i2c_byte(struct txgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); +s32 txgbe_read_i2c_eeprom(struct txgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data); +s32 txgbe_write_i2c_eeprom(struct txgbe_hw *hw, u8 byte_offset, + u8 eeprom_data); +s32 txgbe_read_i2c_sff8472(struct txgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data); +s32 txgbe_init_external_phy(struct txgbe_hw *hw); +s32 txgbe_set_phy_pause_advertisement(struct txgbe_hw *hw, u32 pause_bit); +s32 txgbe_get_phy_advertised_pause(struct txgbe_hw *hw, u8 *pause_bit); +s32 txgbe_get_lp_advertised_pause(struct txgbe_hw *hw, u8 *pause_bit); + +MTD_STATUS txgbe_read_mdio( + MTD_DEV * dev, + MTD_U16 port, + MTD_U16 mmd, + MTD_U16 reg, + MTD_U16 *value); + +MTD_STATUS txgbe_write_mdio( + MTD_DEV * dev, + MTD_U16 port, + MTD_U16 mmd, + MTD_U16 reg, + MTD_U16 value); + + +#endif /* _TXGBE_PHY_H_ */ diff --git a/drivers/net/ethernet/netswift/txgbe/txgbe_ptp.c b/drivers/net/ethernet/netswift/txgbe/txgbe_ptp.c new file mode 100644 index 0000000000000000000000000000000000000000..4a614a550e47a6d854a58f671e5f8c86b88dbb49 --- /dev/null +++ b/drivers/net/ethernet/netswift/txgbe/txgbe_ptp.c @@ -0,0 +1,884 @@ +/* + * WangXun 10 Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * based on ixgbe_ptp.c, Copyright(c) 1999 - 2017 Intel Corporation. + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + + +#include "txgbe.h" +#include + +/* + * SYSTIME is defined by a fixed point system which allows the user to + * define the scale counter increment value at every level change of + * the oscillator driving SYSTIME value. The time unit is determined by + * the clock frequency of the oscillator and TIMINCA register. + * The cyclecounter and timecounter structures are used to to convert + * the scale counter into nanoseconds. SYSTIME registers need to be converted + * to ns values by use of only a right shift. + * The following math determines the largest incvalue that will fit into + * the available bits in the TIMINCA register: + * Period * [ 2 ^ ( MaxWidth - PeriodWidth ) ] + * PeriodWidth: Number of bits to store the clock period + * MaxWidth: The maximum width value of the TIMINCA register + * Period: The clock period for the oscillator, which changes based on the link + * speed: + * At 10Gb link or no link, the period is 6.4 ns. + * At 1Gb link, the period is multiplied by 10. (64ns) + * At 100Mb link, the period is multiplied by 100. (640ns) + * round(): discard the fractional portion of the calculation + * + * The calculated value allows us to right shift the SYSTIME register + * value in order to quickly convert it into a nanosecond clock, + * while allowing for the maximum possible adjustment value. + * + * LinkSpeed ClockFreq ClockPeriod TIMINCA:IV + * 10000Mbps 156.25MHz 6.4*10^-9 0xCCCCCC(0xFFFFF/ns) + * 1000 Mbps 62.5 MHz 16 *10^-9 0x800000(0x7FFFF/ns) + * 100 Mbps 6.25 MHz 160*10^-9 0xA00000(0xFFFF/ns) + * 10 Mbps 0.625 MHz 1600*10^-9 0xC7F380(0xFFF/ns) + * FPGA 31.25 MHz 32 *10^-9 0x800000(0x3FFFF/ns) + * + * These diagrams are only for the 10Gb link period + * + * +--------------+ +--------------+ + * | 32 | | 8 | 3 | 20 | + * *--------------+ +--------------+ + * \________ 43 bits ______/ fract + * + * The 43 bit SYSTIME overflows every + * 2^43 * 10^-9 / 3600 = 2.4 hours + */ +#define TXGBE_INCVAL_10GB 0xCCCCCC +#define TXGBE_INCVAL_1GB 0x800000 +#define TXGBE_INCVAL_100 0xA00000 +#define TXGBE_INCVAL_10 0xC7F380 +#define TXGBE_INCVAL_FPGA 0x800000 + +#define TXGBE_INCVAL_SHIFT_10GB 20 +#define TXGBE_INCVAL_SHIFT_1GB 18 +#define TXGBE_INCVAL_SHIFT_100 15 +#define TXGBE_INCVAL_SHIFT_10 12 +#define TXGBE_INCVAL_SHIFT_FPGA 17 + +#define TXGBE_OVERFLOW_PERIOD (HZ * 30) +#define TXGBE_PTP_TX_TIMEOUT (HZ) + +/** + * txgbe_ptp_read - read raw cycle counter (to be used by time counter) + * @hw_cc: the cyclecounter structure + * + * this function reads the cyclecounter registers and is called by the + * cyclecounter structure used to construct a ns counter from the + * arbitrary fixed point registers + */ +static u64 txgbe_ptp_read(const struct cyclecounter *hw_cc) +{ + struct txgbe_adapter *adapter = + container_of(hw_cc, struct txgbe_adapter, hw_cc); + struct txgbe_hw *hw = &adapter->hw; + u64 stamp = 0; + + stamp |= (u64)rd32(hw, TXGBE_TSC_1588_SYSTIML); + stamp |= (u64)rd32(hw, TXGBE_TSC_1588_SYSTIMH) << 32; + + return stamp; +} + +/** + * txgbe_ptp_convert_to_hwtstamp - convert register value to hw timestamp + * @adapter: private adapter structure + * @hwtstamp: stack timestamp structure + * @systim: unsigned 64bit system time value + * + * We need to convert the adapter's RX/TXSTMP registers into a hwtstamp value + * which can be used by the stack's ptp functions. + * + * The lock is used to protect consistency of the cyclecounter and the SYSTIME + * registers. However, it does not need to protect against the Rx or Tx + * timestamp registers, as there can't be a new timestamp until the old one is + * unlatched by reading. + * + * In addition to the timestamp in hardware, some controllers need a software + * overflow cyclecounter, and this function takes this into account as well. + **/ +static void txgbe_ptp_convert_to_hwtstamp(struct txgbe_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamp, + u64 timestamp) +{ + unsigned long flags; + u64 ns; + + memset(hwtstamp, 0, sizeof(*hwtstamp)); + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_cyc2time(&adapter->hw_tc, timestamp); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + hwtstamp->hwtstamp = ns_to_ktime(ns); +} + +/** + * txgbe_ptp_adjfreq + * @ptp: the ptp clock structure + * @ppb: parts per billion adjustment from base + * + * adjust the frequency of the ptp cycle counter by the + * indicated ppb from the base frequency. + */ +static int txgbe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + struct txgbe_adapter *adapter = + container_of(ptp, struct txgbe_adapter, ptp_caps); + struct txgbe_hw *hw = &adapter->hw; + u64 freq, incval; + u32 diff; + int neg_adj = 0; + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + + smp_mb(); + incval = READ_ONCE(adapter->base_incval); + + freq = incval; + freq *= ppb; + diff = div_u64(freq, 1000000000ULL); + + incval = neg_adj ? (incval - diff) : (incval + diff); + + if (incval > TXGBE_TSC_1588_INC_IV(~0)) + e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n"); + wr32(hw, TXGBE_TSC_1588_INC, + TXGBE_TSC_1588_INC_IVP(incval, 2)); + + return 0; +} + + +/** + * txgbe_ptp_adjtime + * @ptp: the ptp clock structure + * @delta: offset to adjust the cycle counter by ns + * + * adjust the timer by resetting the timecounter structure. + */ +static int txgbe_ptp_adjtime(struct ptp_clock_info *ptp, + s64 delta) +{ + struct txgbe_adapter *adapter = + container_of(ptp, struct txgbe_adapter, ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + timecounter_adjtime(&adapter->hw_tc, delta); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + return 0; +} + +/** + * txgbe_ptp_gettime64 + * @ptp: the ptp clock structure + * @ts: timespec64 structure to hold the current time value + * + * read the timecounter and return the correct value on ns, + * after converting it into a struct timespec64. + */ +static int txgbe_ptp_gettime64(struct ptp_clock_info *ptp, + struct timespec64 *ts) +{ + struct txgbe_adapter *adapter = + container_of(ptp, struct txgbe_adapter, ptp_caps); + unsigned long flags; + u64 ns; + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_read(&adapter->hw_tc); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +/** + * txgbe_ptp_settime64 + * @ptp: the ptp clock structure + * @ts: the timespec64 containing the new time for the cycle counter + * + * reset the timecounter to use a new base value instead of the kernel + * wall timer value. + */ +static int txgbe_ptp_settime64(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct txgbe_adapter *adapter = + container_of(ptp, struct txgbe_adapter, ptp_caps); + u64 ns; + unsigned long flags; + + ns = timespec64_to_ns(ts); + + /* reset the timecounter */ + spin_lock_irqsave(&adapter->tmreg_lock, flags); + timecounter_init(&adapter->hw_tc, &adapter->hw_cc, ns); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + return 0; +} + +/** + * txgbe_ptp_feature_enable + * @ptp: the ptp clock structure + * @rq: the requested feature to change + * @on: whether to enable or disable the feature + * + * enable (or disable) ancillary features of the phc subsystem. + * our driver only supports the PPS feature on the X540 + */ +static int txgbe_ptp_feature_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + return -ENOTSUPP; +} + +/** + * txgbe_ptp_check_pps_event + * @adapter: the private adapter structure + * @eicr: the interrupt cause register value + * + * This function is called by the interrupt routine when checking for + * interrupts. It will check and handle a pps event. + */ +void txgbe_ptp_check_pps_event(struct txgbe_adapter *adapter) +{ + struct ptp_clock_event event; + + event.type = PTP_CLOCK_PPS; + + /* this check is necessary in case the interrupt was enabled via some + * alternative means (ex. debug_fs). Better to check here than + * everywhere that calls this function. + */ + if (!adapter->ptp_clock) + return; + + /* we don't config PPS on SDP yet, so just return. + * ptp_clock_event(adapter->ptp_clock, &event); + */ +} + +/** + * txgbe_ptp_overflow_check - watchdog task to detect SYSTIME overflow + * @adapter: private adapter struct + * + * this watchdog task periodically reads the timecounter + * in order to prevent missing when the system time registers wrap + * around. This needs to be run approximately twice a minute for the fastest + * overflowing hardware. We run it for all hardware since it shouldn't have a + * large impact. + */ +void txgbe_ptp_overflow_check(struct txgbe_adapter *adapter) +{ + bool timeout = time_is_before_jiffies(adapter->last_overflow_check + + TXGBE_OVERFLOW_PERIOD); + struct timespec64 ts; + + if (timeout) { + txgbe_ptp_gettime64(&adapter->ptp_caps, &ts); + adapter->last_overflow_check = jiffies; + } +} + +/** + * txgbe_ptp_rx_hang - detect error case when Rx timestamp registers latched + * @adapter: private network adapter structure + * + * this watchdog task is scheduled to detect error case where hardware has + * dropped an Rx packet that was timestamped when the ring is full. The + * particular error is rare but leaves the device in a state unable to timestamp + * any future packets. + */ +void txgbe_ptp_rx_hang(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct txgbe_ring *rx_ring; + u32 tsyncrxctl = rd32(hw, TXGBE_PSR_1588_CTL); + unsigned long rx_event; + int n; + + /* if we don't have a valid timestamp in the registers, just update the + * timeout counter and exit + */ + if (!(tsyncrxctl & TXGBE_PSR_1588_CTL_VALID)) { + adapter->last_rx_ptp_check = jiffies; + return; + } + + /* determine the most recent watchdog or rx_timestamp event */ + rx_event = adapter->last_rx_ptp_check; + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + if (time_after(rx_ring->last_rx_timestamp, rx_event)) + rx_event = rx_ring->last_rx_timestamp; + } + + /* only need to read the high RXSTMP register to clear the lock */ + if (time_is_before_jiffies(rx_event + 5*HZ)) { + rd32(hw, TXGBE_PSR_1588_STMPH); + adapter->last_rx_ptp_check = jiffies; + + adapter->rx_hwtstamp_cleared++; + e_warn(drv, "clearing RX Timestamp hang"); + } +} + +/** + * txgbe_ptp_clear_tx_timestamp - utility function to clear Tx timestamp state + * @adapter: the private adapter structure + * + * This function should be called whenever the state related to a Tx timestamp + * needs to be cleared. This helps ensure that all related bits are reset for + * the next Tx timestamp event. + */ +static void txgbe_ptp_clear_tx_timestamp(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + + rd32(hw, TXGBE_TSC_1588_STMPH); + if (adapter->ptp_tx_skb) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + } + clear_bit_unlock(__TXGBE_PTP_TX_IN_PROGRESS, &adapter->state); +} + +/** + * txgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp + * @adapter: the private adapter struct + * + * if the timestamp is valid, we convert it into the timecounter ns + * value, then store that result into the shhwtstamps structure which + * is passed up the network stack + */ +static void txgbe_ptp_tx_hwtstamp(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + struct skb_shared_hwtstamps shhwtstamps; + u64 regval = 0; + + regval |= (u64)rd32(hw, TXGBE_TSC_1588_STMPL); + regval |= (u64)rd32(hw, TXGBE_TSC_1588_STMPH) << 32; + + txgbe_ptp_convert_to_hwtstamp(adapter, &shhwtstamps, regval); + skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); + + txgbe_ptp_clear_tx_timestamp(adapter); +} + +/** + * txgbe_ptp_tx_hwtstamp_work + * @work: pointer to the work struct + * + * This work item polls TSYNCTXCTL valid bit to determine when a Tx hardware + * timestamp has been taken for the current skb. It is necesary, because the + * descriptor's "done" bit does not correlate with the timestamp event. + */ +static void txgbe_ptp_tx_hwtstamp_work(struct work_struct *work) +{ + struct txgbe_adapter *adapter = container_of(work, struct txgbe_adapter, + ptp_tx_work); + struct txgbe_hw *hw = &adapter->hw; + bool timeout = time_is_before_jiffies(adapter->ptp_tx_start + + TXGBE_PTP_TX_TIMEOUT); + u32 tsynctxctl; + + /* we have to have a valid skb to poll for a timestamp */ + if (!adapter->ptp_tx_skb) { + txgbe_ptp_clear_tx_timestamp(adapter); + return; + } + + /* stop polling once we have a valid timestamp */ + tsynctxctl = rd32(hw, TXGBE_TSC_1588_CTL); + if (tsynctxctl & TXGBE_TSC_1588_CTL_VALID) { + txgbe_ptp_tx_hwtstamp(adapter); + return; + } + + /* check timeout last in case timestamp event just occurred */ + if (timeout) { + txgbe_ptp_clear_tx_timestamp(adapter); + adapter->tx_hwtstamp_timeouts++; + e_warn(drv, "clearing Tx Timestamp hang"); + } else { + /* reschedule to keep checking until we timeout */ + schedule_work(&adapter->ptp_tx_work); + } +} + +/** + * txgbe_ptp_rx_rgtstamp - utility function which checks for RX time stamp + * @q_vector: structure containing interrupt and ring information + * @skb: particular skb to send timestamp with + * + * if the timestamp is valid, we convert it into the timecounter ns + * value, then store that result into the shhwtstamps structure which + * is passed up the network stack + */ +void txgbe_ptp_rx_hwtstamp(struct txgbe_adapter *adapter, struct sk_buff *skb) +{ + struct txgbe_hw *hw = &adapter->hw; + u64 regval = 0; + u32 tsyncrxctl; + + /* + * Read the tsyncrxctl register afterwards in order to prevent taking an + * I/O hit on every packet. + */ + tsyncrxctl = rd32(hw, TXGBE_PSR_1588_CTL); + if (!(tsyncrxctl & TXGBE_PSR_1588_CTL_VALID)) + return; + + regval |= (u64)rd32(hw, TXGBE_PSR_1588_STMPL); + regval |= (u64)rd32(hw, TXGBE_PSR_1588_STMPH) << 32; + + txgbe_ptp_convert_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); +} + +/** + * txgbe_ptp_get_ts_config - get current hardware timestamping configuration + * @adapter: pointer to adapter structure + * @ifreq: ioctl data + * + * This function returns the current timestamping settings. Rather than + * attempt to deconstruct registers to fill in the values, simply keep a copy + * of the old settings around, and return a copy when requested. + */ +int txgbe_ptp_get_ts_config(struct txgbe_adapter *adapter, struct ifreq *ifr) +{ + struct hwtstamp_config *config = &adapter->tstamp_config; + + return copy_to_user(ifr->ifr_data, config, + sizeof(*config)) ? -EFAULT : 0; +} + +/** + * txgbe_ptp_set_timestamp_mode - setup the hardware for the requested mode + * @adapter: the private txgbe adapter structure + * @config: the hwtstamp configuration requested + * + * Outgoing time stamping can be enabled and disabled. Play nice and + * disable it when requested, although it shouldn't cause any overhead + * when no packet needs it. At most one packet in the queue may be + * marked for time stamping, otherwise it would be impossible to tell + * for sure to which packet the hardware time stamp belongs. + * + * Incoming time stamping has to be configured via the hardware + * filters. Not all combinations are supported, in particular event + * type has to be specified. Matching the kind of event packet is + * not supported, with the exception of "all V2 events regardless of + * level 2 or 4". + * + * Since hardware always timestamps Path delay packets when timestamping V2 + * packets, regardless of the type specified in the register, only use V2 + * Event mode. This more accurately tells the user what the hardware is going + * to do anyways. + * + * Note: this may modify the hwtstamp configuration towards a more general + * mode, if required to support the specifically requested mode. + */ +static int txgbe_ptp_set_timestamp_mode(struct txgbe_adapter *adapter, + struct hwtstamp_config *config) +{ + struct txgbe_hw *hw = &adapter->hw; + u32 tsync_tx_ctl = TXGBE_TSC_1588_CTL_ENABLED; + u32 tsync_rx_ctl = TXGBE_PSR_1588_CTL_ENABLED; + u32 tsync_rx_mtrl = PTP_EV_PORT << 16; + bool is_l2 = false; + u32 regval; + + /* reserved for future extensions */ + if (config->flags) + return -EINVAL; + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + tsync_tx_ctl = 0; + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + tsync_rx_ctl = 0; + tsync_rx_mtrl = 0; + adapter->flags &= ~(TXGBE_FLAG_RX_HWTSTAMP_ENABLED | + TXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + tsync_rx_ctl |= TXGBE_PSR_1588_CTL_TYPE_L4_V1; + tsync_rx_mtrl |= TXGBE_PSR_1588_MSGTYPE_V1_SYNC_MSG; + adapter->flags |= (TXGBE_FLAG_RX_HWTSTAMP_ENABLED | + TXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + tsync_rx_ctl |= TXGBE_PSR_1588_CTL_TYPE_L4_V1; + tsync_rx_mtrl |= TXGBE_PSR_1588_MSGTYPE_V1_DELAY_REQ_MSG; + adapter->flags |= (TXGBE_FLAG_RX_HWTSTAMP_ENABLED | + TXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + tsync_rx_ctl |= TXGBE_PSR_1588_CTL_TYPE_EVENT_V2; + is_l2 = true; + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + adapter->flags |= (TXGBE_FLAG_RX_HWTSTAMP_ENABLED | + TXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_ALL: + default: + /* register RXMTRL must be set in order to do V1 packets, + * therefore it is not possible to time stamp both V1 Sync and + * Delay_Req messages unless hardware supports timestamping all + * packets => return error + */ + adapter->flags &= ~(TXGBE_FLAG_RX_HWTSTAMP_ENABLED | + TXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + config->rx_filter = HWTSTAMP_FILTER_NONE; + return -ERANGE; + } + + /* define ethertype filter for timestamping L2 packets */ + if (is_l2) + wr32(hw, + TXGBE_PSR_ETYPE_SWC(TXGBE_PSR_ETYPE_SWC_FILTER_1588), + (TXGBE_PSR_ETYPE_SWC_FILTER_EN | /* enable filter */ + TXGBE_PSR_ETYPE_SWC_1588 | /* enable timestamping */ + ETH_P_1588)); /* 1588 eth protocol type */ + else + wr32(hw, + TXGBE_PSR_ETYPE_SWC(TXGBE_PSR_ETYPE_SWC_FILTER_1588), + 0); + + /* enable/disable TX */ + regval = rd32(hw, TXGBE_TSC_1588_CTL); + regval &= ~TXGBE_TSC_1588_CTL_ENABLED; + regval |= tsync_tx_ctl; + wr32(hw, TXGBE_TSC_1588_CTL, regval); + + /* enable/disable RX */ + regval = rd32(hw, TXGBE_PSR_1588_CTL); + regval &= ~(TXGBE_PSR_1588_CTL_ENABLED | TXGBE_PSR_1588_CTL_TYPE_MASK); + regval |= tsync_rx_ctl; + wr32(hw, TXGBE_PSR_1588_CTL, regval); + + /* define which PTP packets are time stamped */ + wr32(hw, TXGBE_PSR_1588_MSGTYPE, tsync_rx_mtrl); + + TXGBE_WRITE_FLUSH(hw); + + /* clear TX/RX timestamp state, just to be sure */ + txgbe_ptp_clear_tx_timestamp(adapter); + rd32(hw, TXGBE_PSR_1588_STMPH); + + return 0; +} + +/** + * txgbe_ptp_set_ts_config - user entry point for timestamp mode + * @adapter: pointer to adapter struct + * @ifreq: ioctl data + * + * Set hardware to requested mode. If unsupported, return an error with no + * changes. Otherwise, store the mode for future reference. + */ +int txgbe_ptp_set_ts_config(struct txgbe_adapter *adapter, struct ifreq *ifr) +{ + struct hwtstamp_config config; + int err; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + err = txgbe_ptp_set_timestamp_mode(adapter, &config); + if (err) + return err; + + /* save these settings for future reference */ + memcpy(&adapter->tstamp_config, &config, + sizeof(adapter->tstamp_config)); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +static void txgbe_ptp_link_speed_adjust(struct txgbe_adapter *adapter, + u32 *shift, u32 *incval) +{ + /** + * Scale the NIC cycle counter by a large factor so that + * relatively small corrections to the frequency can be added + * or subtracted. The drawbacks of a large factor include + * (a) the clock register overflows more quickly, (b) the cycle + * counter structure must be able to convert the systime value + * to nanoseconds using only a multiplier and a right-shift, + * and (c) the value must fit within the timinca register space + * => math based on internal DMA clock rate and available bits + * + * Note that when there is no link, internal DMA clock is same as when + * link speed is 10Gb. Set the registers correctly even when link is + * down to preserve the clock setting + */ + switch (adapter->link_speed) { + case TXGBE_LINK_SPEED_10_FULL: + *shift = TXGBE_INCVAL_SHIFT_10; + *incval = TXGBE_INCVAL_10; + break; + case TXGBE_LINK_SPEED_100_FULL: + *shift = TXGBE_INCVAL_SHIFT_100; + *incval = TXGBE_INCVAL_100; + break; + case TXGBE_LINK_SPEED_1GB_FULL: + *shift = TXGBE_INCVAL_SHIFT_FPGA; + *incval = TXGBE_INCVAL_FPGA; + break; + case TXGBE_LINK_SPEED_10GB_FULL: + default: /* TXGBE_LINK_SPEED_10GB_FULL */ + *shift = TXGBE_INCVAL_SHIFT_10GB; + *incval = TXGBE_INCVAL_10GB; + break; + } + + return; +} + +/** + * txgbe_ptp_start_cyclecounter - create the cycle counter from hw + * @adapter: pointer to the adapter structure + * + * This function should be called to set the proper values for the TIMINCA + * register and tell the cyclecounter structure what the tick rate of SYSTIME + * is. It does not directly modify SYSTIME registers or the timecounter + * structure. It should be called whenever a new TIMINCA value is necessary, + * such as during initialization or when the link speed changes. + */ +void txgbe_ptp_start_cyclecounter(struct txgbe_adapter *adapter) +{ + struct txgbe_hw *hw = &adapter->hw; + unsigned long flags; + struct cyclecounter cc; + u32 incval = 0; + + /* For some of the boards below this mask is technically incorrect. + * The timestamp mask overflows at approximately 61bits. However the + * particular hardware does not overflow on an even bitmask value. + * Instead, it overflows due to conversion of upper 32bits billions of + * cycles. Timecounters are not really intended for this purpose so + * they do not properly function if the overflow point isn't 2^N-1. + * However, the actual SYSTIME values in question take ~138 years to + * overflow. In practice this means they won't actually overflow. A + * proper fix to this problem would require modification of the + * timecounter delta calculations. + */ + cc.mask = CLOCKSOURCE_MASK(64); + cc.mult = 1; + cc.shift = 0; + + cc.read = txgbe_ptp_read; + txgbe_ptp_link_speed_adjust(adapter, &cc.shift, &incval); + wr32(hw, TXGBE_TSC_1588_INC, + TXGBE_TSC_1588_INC_IVP(incval, 2)); + + /* update the base incval used to calculate frequency adjustment */ + WRITE_ONCE(adapter->base_incval, incval); + smp_mb(); + + /* need lock to prevent incorrect read while modifying cyclecounter */ + spin_lock_irqsave(&adapter->tmreg_lock, flags); + memcpy(&adapter->hw_cc, &cc, sizeof(adapter->hw_cc)); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); +} + +/** + * txgbe_ptp_reset + * @adapter: the txgbe private board structure + * + * When the MAC resets, all of the hardware configuration for timesync is + * reset. This function should be called to re-enable the device for PTP, + * using the last known settings. However, we do lose the current clock time, + * so we fallback to resetting it based on the kernel's realtime clock. + * + * This function will maintain the hwtstamp_config settings, and it retriggers + * the SDP output if it's enabled. + */ +void txgbe_ptp_reset(struct txgbe_adapter *adapter) +{ + unsigned long flags; + + /* reset the hardware timestamping mode */ + txgbe_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); + txgbe_ptp_start_cyclecounter(adapter); + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + timecounter_init(&adapter->hw_tc, &adapter->hw_cc, + ktime_to_ns(ktime_get_real())); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + adapter->last_overflow_check = jiffies; +} + +/** + * txgbe_ptp_create_clock + * @adapter: the txgbe private adapter structure + * + * This functino performs setup of the user entry point function table and + * initalizes the PTP clock device used by userspace to access the clock-like + * features of the PTP core. It will be called by txgbe_ptp_init, and may + * re-use a previously initialized clock (such as during a suspend/resume + * cycle). + */ + +static long txgbe_ptp_create_clock(struct txgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + long err; + + /* do nothing if we already have a clock device */ + if (!IS_ERR_OR_NULL(adapter->ptp_clock)) + return 0; + + snprintf(adapter->ptp_caps.name, sizeof(adapter->ptp_caps.name), + "%s", netdev->name); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 250000000; /* 10^-9s */ + adapter->ptp_caps.n_alarm = 0; + adapter->ptp_caps.n_ext_ts = 0; + adapter->ptp_caps.n_per_out = 0; + adapter->ptp_caps.pps = 0; + adapter->ptp_caps.adjfreq = txgbe_ptp_adjfreq; + adapter->ptp_caps.adjtime = txgbe_ptp_adjtime; + adapter->ptp_caps.gettime64 = txgbe_ptp_gettime64; + adapter->ptp_caps.settime64 = txgbe_ptp_settime64; + adapter->ptp_caps.enable = txgbe_ptp_feature_enable; + + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, + pci_dev_to_dev(adapter->pdev)); + if (IS_ERR(adapter->ptp_clock)) { + err = PTR_ERR(adapter->ptp_clock); + adapter->ptp_clock = NULL; + e_dev_err("ptp_clock_register failed\n"); + return err; + } else + e_dev_info("registered PHC device on %s\n", netdev->name); + + /* Set the default timestamp mode to disabled here. We do this in + * create_clock instead of initialization, because we don't want to + * override the previous settings during a suspend/resume cycle. + */ + adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + + return 0; +} + +/** + * txgbe_ptp_init + * @adapter: the txgbe private adapter structure + * + * This function performs the required steps for enabling ptp + * support. If ptp support has already been loaded it simply calls the + * cyclecounter init routine and exits. + */ +void txgbe_ptp_init(struct txgbe_adapter *adapter) +{ + /* initialize the spin lock first, since the user might call the clock + * functions any time after we've initialized the ptp clock device. + */ + spin_lock_init(&adapter->tmreg_lock); + + /* obtain a ptp clock device, or re-use an existing device */ + if (txgbe_ptp_create_clock(adapter)) + return; + + /* we have a clock, so we can intialize work for timestamps now */ + INIT_WORK(&adapter->ptp_tx_work, txgbe_ptp_tx_hwtstamp_work); + + /* reset the ptp related hardware bits */ + txgbe_ptp_reset(adapter); + + /* enter the TXGBE_PTP_RUNNING state */ + set_bit(__TXGBE_PTP_RUNNING, &adapter->state); + + return; +} + +/** + * txgbe_ptp_suspend - stop ptp work items + * @adapter: pointer to adapter struct + * + * This function suspends ptp activity, and prevents more work from being + * generated, but does not destroy the clock device. + */ +void txgbe_ptp_suspend(struct txgbe_adapter *adapter) +{ + /* leave the TXGBE_PTP_RUNNING STATE */ + if (!test_and_clear_bit(__TXGBE_PTP_RUNNING, &adapter->state)) + return; + + adapter->flags2 &= ~TXGBE_FLAG2_PTP_PPS_ENABLED; + + cancel_work_sync(&adapter->ptp_tx_work); + txgbe_ptp_clear_tx_timestamp(adapter); +} + +/** + * txgbe_ptp_stop - destroy the ptp_clock device + * @adapter: pointer to adapter struct + * + * Completely destroy the ptp_clock device, and disable all PTP related + * features. Intended to be run when the device is being closed. + */ +void txgbe_ptp_stop(struct txgbe_adapter *adapter) +{ + /* first, suspend ptp activity */ + txgbe_ptp_suspend(adapter); + + /* now destroy the ptp clock device */ + if (adapter->ptp_clock) { + ptp_clock_unregister(adapter->ptp_clock); + adapter->ptp_clock = NULL; + e_dev_info("removed PHC on %s\n", + adapter->netdev->name); + } +} diff --git a/drivers/net/ethernet/netswift/txgbe/txgbe_type.h b/drivers/net/ethernet/netswift/txgbe/txgbe_type.h new file mode 100644 index 0000000000000000000000000000000000000000..2f62819a848adf685b42e9002ff59be8e46f02f2 --- /dev/null +++ b/drivers/net/ethernet/netswift/txgbe/txgbe_type.h @@ -0,0 +1,3213 @@ +/* + * WangXun 10 Gigabit PCI Express Linux driver + * Copyright (c) 2015 - 2017 Beijing WangXun Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * based on ixgbe_type.h, Copyright(c) 1999 - 2017 Intel Corporation. + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + + +#ifndef _TXGBE_TYPE_H_ +#define _TXGBE_TYPE_H_ + +#include +#include +#include + +/* + * The following is a brief description of the error categories used by the + * ERROR_REPORT* macros. + * + * - TXGBE_ERROR_INVALID_STATE + * This category is for errors which represent a serious failure state that is + * unexpected, and could be potentially harmful to device operation. It should + * not be used for errors relating to issues that can be worked around or + * ignored. + * + * - TXGBE_ERROR_POLLING + * This category is for errors related to polling/timeout issues and should be + * used in any case where the timeout occured, or a failure to obtain a lock, or + * failure to receive data within the time limit. + * + * - TXGBE_ERROR_CAUTION + * This category should be used for reporting issues that may be the cause of + * other errors, such as temperature warnings. It should indicate an event which + * could be serious, but hasn't necessarily caused problems yet. + * + * - TXGBE_ERROR_SOFTWARE + * This category is intended for errors due to software state preventing + * something. The category is not intended for errors due to bad arguments, or + * due to unsupported features. It should be used when a state occurs which + * prevents action but is not a serious issue. + * + * - TXGBE_ERROR_ARGUMENT + * This category is for when a bad or invalid argument is passed. It should be + * used whenever a function is called and error checking has detected the + * argument is wrong or incorrect. + * + * - TXGBE_ERROR_UNSUPPORTED + * This category is for errors which are due to unsupported circumstances or + * configuration issues. It should not be used when the issue is due to an + * invalid argument, but for when something has occurred that is unsupported + * (Ex: Flow control autonegotiation or an unsupported SFP+ module.) + */ + +#include "txgbe_mtd.h" + +/* Little Endian defines */ +#ifndef __le16 +#define __le16 u16 +#endif +#ifndef __le32 +#define __le32 u32 +#endif +#ifndef __le64 +#define __le64 u64 + +#endif +#ifndef __be16 +/* Big Endian defines */ +#define __be16 u16 +#define __be32 u32 +#define __be64 u64 + +#endif + +/************ txgbe_register.h ************/ +/* Vendor ID */ +#ifndef PCI_VENDOR_ID_TRUSTNETIC +#define PCI_VENDOR_ID_TRUSTNETIC 0x8088 +#endif + +/* Device IDs */ +#define TXGBE_DEV_ID_SP1000 0x1001 +#define TXGBE_DEV_ID_WX1820 0x2001 + +/* Subsystem IDs */ +/* SFP */ +#define TXGBE_ID_SP1000_SFP 0x0000 +#define TXGBE_ID_WX1820_SFP 0x2000 +#define TXGBE_ID_SFP 0x00 + +/* copper */ +#define TXGBE_ID_SP1000_XAUI 0x1010 +#define TXGBE_ID_WX1820_XAUI 0x2010 +#define TXGBE_ID_XAUI 0x10 +#define TXGBE_ID_SP1000_SGMII 0x1020 +#define TXGBE_ID_WX1820_SGMII 0x2020 +#define TXGBE_ID_SGMII 0x20 +/* backplane */ +#define TXGBE_ID_SP1000_KR_KX_KX4 0x1030 +#define TXGBE_ID_WX1820_KR_KX_KX4 0x2030 +#define TXGBE_ID_KR_KX_KX4 0x30 +/* MAC Interface */ +#define TXGBE_ID_SP1000_MAC_XAUI 0x1040 +#define TXGBE_ID_WX1820_MAC_XAUI 0x2040 +#define TXGBE_ID_MAC_XAUI 0x40 +#define TXGBE_ID_SP1000_MAC_SGMII 0x1060 +#define TXGBE_ID_WX1820_MAC_SGMII 0x2060 +#define TXGBE_ID_MAC_SGMII 0x60 + +#define TXGBE_NCSI_SUP 0x8000 +#define TXGBE_NCSI_MASK 0x8000 +#define TXGBE_WOL_SUP 0x4000 +#define TXGBE_WOL_MASK 0x4000 + + +/* Combined interface*/ +#define TXGBE_ID_SFI_XAUI 0x50 + +/* Revision ID */ +#define TXGBE_SP_MPW 1 + +/* MDIO Manageable Devices (MMDs). */ +#define TXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1 /* PMA and PMD */ +#define TXGBE_MDIO_PCS_DEV_TYPE 0x3 /* Physical Coding Sublayer*/ +#define TXGBE_MDIO_PHY_XS_DEV_TYPE 0x4 /* PHY Extender Sublayer */ +#define TXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7 /* Auto-Negotiation */ +#define TXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Vendor specific 1 */ + +/* phy register definitions */ +/* VENDOR_SPECIFIC_1_DEV regs */ +#define TXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */ +#define TXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */ +#define TXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0-10G, 1-1G */ + +/* AUTO_NEG_DEV regs */ +#define TXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */ +#define TXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */ +#define TXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Reg */ +#define TXGBE_MDIO_AUTO_NEG_LP_STATUS 0xE820 /* AUTO NEG RX LP Status + * Reg */ +#define TXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG 0x20 /* 10G Control Reg */ +#define TXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */ +#define TXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */ +#define TXGBE_MII_AUTONEG_ADVERTISE_REG 0x10 /* 100M Advertisement */ + + +#define TXGBE_MDIO_AUTO_NEG_10GBASE_EEE_ADVT 0x8 +#define TXGBE_MDIO_AUTO_NEG_1000BASE_EEE_ADVT 0x4 +#define TXGBE_MDIO_AUTO_NEG_100BASE_EEE_ADVT 0x2 +#define TXGBE_MDIO_AUTO_NEG_LP_1000BASE_CAP 0x8000 +#define TXGBE_MDIO_AUTO_NEG_LP_10GBASE_CAP 0x0800 +#define TXGBE_MDIO_AUTO_NEG_10GBASET_STAT 0x0021 + +#define TXGBE_MII_10GBASE_T_ADVERTISE 0x1000 /* full duplex, bit:12*/ +#define TXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/ +#define TXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/ +#define TXGBE_MII_100BASE_T_ADVERTISE 0x0100 /* full duplex, bit:8 */ +#define TXGBE_MII_100BASE_T_ADVERTISE_HALF 0x0080 /* half duplex, bit:7 */ +#define TXGBE_MII_RESTART 0x200 +#define TXGBE_MII_AUTONEG_COMPLETE 0x20 +#define TXGBE_MII_AUTONEG_LINK_UP 0x04 +#define TXGBE_MII_AUTONEG_REG 0x0 + +/* PHY_XS_DEV regs */ +#define TXGBE_MDIO_PHY_XS_CONTROL 0x0 /* PHY_XS Control Reg */ +#define TXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */ + +/* Media-dependent registers. */ +#define TXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/ +#define TXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/ +#define TXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Ability Reg */ +#define TXGBE_MDIO_PHY_EXT_ABILITY 0xB /* Ext Ability Reg */ + +#define TXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */ +#define TXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */ +#define TXGBE_MDIO_PHY_SPEED_100M 0x0020 /* 100M capable */ +#define TXGBE_MDIO_PHY_SPEED_10M 0x0040 /* 10M capable */ + +#define TXGBE_MDIO_PHY_10GBASET_ABILITY 0x0004 /* 10GBaseT capable */ +#define TXGBE_MDIO_PHY_1000BASET_ABILITY 0x0020 /* 1000BaseT capable */ +#define TXGBE_MDIO_PHY_100BASETX_ABILITY 0x0080 /* 100BaseTX capable */ + +#define TXGBE_PHY_REVISION_MASK 0xFFFFFFF0U +#define TXGBE_MAX_PHY_ADDR 32 + +/* PHY IDs*/ +#define TN1010_PHY_ID 0x00A19410U +#define QT2022_PHY_ID 0x0043A400U +#define ATH_PHY_ID 0x03429050U +/* PHY FW revision */ +#define TNX_FW_REV 0xB +#define AQ_FW_REV 0x20 + +/* ETH PHY Registers */ +#define TXGBE_SR_XS_PCS_MMD_STATUS1 0x30001 +#define TXGBE_SR_PCS_CTL2 0x30007 +#define TXGBE_SR_PMA_MMD_CTL1 0x10000 +#define TXGBE_SR_MII_MMD_CTL 0x1F0000 +#define TXGBE_SR_MII_MMD_DIGI_CTL 0x1F8000 +#define TXGBE_SR_MII_MMD_AN_CTL 0x1F8001 +#define TXGBE_SR_MII_MMD_AN_ADV 0x1F0004 +#define TXGBE_SR_MII_MMD_AN_ADV_PAUSE(_v) ((0x3 & (_v)) << 7) +#define TXGBE_SR_MII_MMD_AN_ADV_PAUSE_ASM 0x80 +#define TXGBE_SR_MII_MMD_AN_ADV_PAUSE_SYM 0x100 +#define TXGBE_SR_MII_MMD_LP_BABL 0x1F0005 +#define TXGBE_SR_AN_MMD_CTL 0x70000 +#define TXGBE_SR_AN_MMD_ADV_REG1 0x70010 +#define TXGBE_SR_AN_MMD_ADV_REG1_PAUSE(_v) ((0x3 & (_v)) << 10) +#define TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_SYM 0x400 +#define TXGBE_SR_AN_MMD_ADV_REG1_PAUSE_ASM 0x800 +#define TXGBE_SR_AN_MMD_ADV_REG2 0x70011 +#define TXGBE_SR_AN_MMD_LP_ABL1 0x70013 +#define TXGBE_VR_AN_KR_MODE_CL 0x78003 +#define TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1 0x38000 +#define TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS 0x38010 +#define TXGBE_PHY_MPLLA_CTL0 0x18071 +#define TXGBE_PHY_MPLLA_CTL3 0x18077 +#define TXGBE_PHY_MISC_CTL0 0x18090 +#define TXGBE_PHY_VCO_CAL_LD0 0x18092 +#define TXGBE_PHY_VCO_CAL_LD1 0x18093 +#define TXGBE_PHY_VCO_CAL_LD2 0x18094 +#define TXGBE_PHY_VCO_CAL_LD3 0x18095 +#define TXGBE_PHY_VCO_CAL_REF0 0x18096 +#define TXGBE_PHY_VCO_CAL_REF1 0x18097 +#define TXGBE_PHY_RX_AD_ACK 0x18098 +#define TXGBE_PHY_AFE_DFE_ENABLE 0x1805D +#define TXGBE_PHY_DFE_TAP_CTL0 0x1805E +#define TXGBE_PHY_RX_EQ_ATT_LVL0 0x18057 +#define TXGBE_PHY_RX_EQ_CTL0 0x18058 +#define TXGBE_PHY_RX_EQ_CTL 0x1805C +#define TXGBE_PHY_TX_EQ_CTL0 0x18036 +#define TXGBE_PHY_TX_EQ_CTL1 0x18037 +#define TXGBE_PHY_TX_RATE_CTL 0x18034 +#define TXGBE_PHY_RX_RATE_CTL 0x18054 +#define TXGBE_PHY_TX_GEN_CTL2 0x18032 +#define TXGBE_PHY_RX_GEN_CTL2 0x18052 +#define TXGBE_PHY_RX_GEN_CTL3 0x18053 +#define TXGBE_PHY_MPLLA_CTL2 0x18073 +#define TXGBE_PHY_RX_POWER_ST_CTL 0x18055 +#define TXGBE_PHY_TX_POWER_ST_CTL 0x18035 +#define TXGBE_PHY_TX_GENCTRL1 0x18031 + +#define TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_R 0x0 +#define TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_X 0x1 +#define TXGBE_SR_PCS_CTL2_PCS_TYPE_SEL_MASK 0x3 +#define TXGBE_SR_PMA_MMD_CTL1_SPEED_SEL_1G 0x0 +#define TXGBE_SR_PMA_MMD_CTL1_SPEED_SEL_10G 0x2000 +#define TXGBE_SR_PMA_MMD_CTL1_SPEED_SEL_MASK 0x2000 +#define TXGBE_SR_PMA_MMD_CTL1_LB_EN 0x1 +#define TXGBE_SR_MII_MMD_CTL_AN_EN 0x1000 +#define TXGBE_SR_MII_MMD_CTL_RESTART_AN 0x0200 +#define TXGBE_SR_AN_MMD_CTL_RESTART_AN 0x0200 +#define TXGBE_SR_AN_MMD_CTL_ENABLE 0x1000 +#define TXGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_KX4 0x40 +#define TXGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_KX 0x20 +#define TXGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_KR 0x80 +#define TXGBE_SR_AN_MMD_ADV_REG2_BP_TYPE_MASK 0xFFFF +#define TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1_ENABLE 0x1000 +#define TXGBE_VR_XS_OR_PCS_MMD_DIGI_CTL1_VR_RST 0x8000 +#define TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_MASK 0x1C +#define TXGBE_VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_POWER_GOOD 0x10 + +#define TXGBE_PHY_MPLLA_CTL0_MULTIPLIER_1GBASEX_KX 32 +#define TXGBE_PHY_MPLLA_CTL0_MULTIPLIER_10GBASER_KR 33 +#define TXGBE_PHY_MPLLA_CTL0_MULTIPLIER_OTHER 40 +#define TXGBE_PHY_MPLLA_CTL0_MULTIPLIER_MASK 0xFF +#define TXGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_1GBASEX_KX 0x56 +#define TXGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_10GBASER_KR 0x7B +#define TXGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_OTHER 0x56 +#define TXGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_MASK 0x7FF +#define TXGBE_PHY_MISC_CTL0_TX2RX_LB_EN_0 0x1 +#define TXGBE_PHY_MISC_CTL0_TX2RX_LB_EN_3_1 0xE +#define TXGBE_PHY_MISC_CTL0_RX_VREF_CTRL 0x1F00 +#define TXGBE_PHY_VCO_CAL_LD0_1GBASEX_KX 1344 +#define TXGBE_PHY_VCO_CAL_LD0_10GBASER_KR 1353 +#define TXGBE_PHY_VCO_CAL_LD0_OTHER 1360 +#define TXGBE_PHY_VCO_CAL_LD0_MASK 0x1000 +#define TXGBE_PHY_VCO_CAL_REF0_LD0_1GBASEX_KX 42 +#define TXGBE_PHY_VCO_CAL_REF0_LD0_10GBASER_KR 41 +#define TXGBE_PHY_VCO_CAL_REF0_LD0_OTHER 34 +#define TXGBE_PHY_VCO_CAL_REF0_LD0_MASK 0x3F +#define TXGBE_PHY_AFE_DFE_ENABLE_DFE_EN0 0x10 +#define TXGBE_PHY_AFE_DFE_ENABLE_AFE_EN0 0x1 +#define TXGBE_PHY_AFE_DFE_ENABLE_MASK 0xFF +#define TXGBE_PHY_RX_EQ_CTL_CONT_ADAPT0 0x1 +#define TXGBE_PHY_RX_EQ_CTL_CONT_ADAPT_MASK 0xF +#define TXGBE_PHY_TX_RATE_CTL_TX0_RATE_10GBASER_KR 0x0 +#define TXGBE_PHY_TX_RATE_CTL_TX0_RATE_RXAUI 0x1 +#define TXGBE_PHY_TX_RATE_CTL_TX0_RATE_1GBASEX_KX 0x3 +#define TXGBE_PHY_TX_RATE_CTL_TX0_RATE_OTHER 0x2 +#define TXGBE_PHY_TX_RATE_CTL_TX1_RATE_OTHER 0x20 +#define TXGBE_PHY_TX_RATE_CTL_TX2_RATE_OTHER 0x200 +#define TXGBE_PHY_TX_RATE_CTL_TX3_RATE_OTHER 0x2000 +#define TXGBE_PHY_TX_RATE_CTL_TX0_RATE_MASK 0x7 +#define TXGBE_PHY_TX_RATE_CTL_TX1_RATE_MASK 0x70 +#define TXGBE_PHY_TX_RATE_CTL_TX2_RATE_MASK 0x700 +#define TXGBE_PHY_TX_RATE_CTL_TX3_RATE_MASK 0x7000 +#define TXGBE_PHY_RX_RATE_CTL_RX0_RATE_10GBASER_KR 0x0 +#define TXGBE_PHY_RX_RATE_CTL_RX0_RATE_RXAUI 0x1 +#define TXGBE_PHY_RX_RATE_CTL_RX0_RATE_1GBASEX_KX 0x3 +#define TXGBE_PHY_RX_RATE_CTL_RX0_RATE_OTHER 0x2 +#define TXGBE_PHY_RX_RATE_CTL_RX1_RATE_OTHER 0x20 +#define TXGBE_PHY_RX_RATE_CTL_RX2_RATE_OTHER 0x200 +#define TXGBE_PHY_RX_RATE_CTL_RX3_RATE_OTHER 0x2000 +#define TXGBE_PHY_RX_RATE_CTL_RX0_RATE_MASK 0x7 +#define TXGBE_PHY_RX_RATE_CTL_RX1_RATE_MASK 0x70 +#define TXGBE_PHY_RX_RATE_CTL_RX2_RATE_MASK 0x700 +#define TXGBE_PHY_RX_RATE_CTL_RX3_RATE_MASK 0x7000 +#define TXGBE_PHY_TX_GEN_CTL2_TX0_WIDTH_10GBASER_KR 0x200 +#define TXGBE_PHY_TX_GEN_CTL2_TX0_WIDTH_10GBASER_KR_RXAUI 0x300 +#define TXGBE_PHY_TX_GEN_CTL2_TX0_WIDTH_OTHER 0x100 +#define TXGBE_PHY_TX_GEN_CTL2_TX0_WIDTH_MASK 0x300 +#define TXGBE_PHY_TX_GEN_CTL2_TX1_WIDTH_OTHER 0x400 +#define TXGBE_PHY_TX_GEN_CTL2_TX1_WIDTH_MASK 0xC00 +#define TXGBE_PHY_TX_GEN_CTL2_TX2_WIDTH_OTHER 0x1000 +#define TXGBE_PHY_TX_GEN_CTL2_TX2_WIDTH_MASK 0x3000 +#define TXGBE_PHY_TX_GEN_CTL2_TX3_WIDTH_OTHER 0x4000 +#define TXGBE_PHY_TX_GEN_CTL2_TX3_WIDTH_MASK 0xC000 +#define TXGBE_PHY_RX_GEN_CTL2_RX0_WIDTH_10GBASER_KR 0x200 +#define TXGBE_PHY_RX_GEN_CTL2_RX0_WIDTH_10GBASER_KR_RXAUI 0x300 +#define TXGBE_PHY_RX_GEN_CTL2_RX0_WIDTH_OTHER 0x100 +#define TXGBE_PHY_RX_GEN_CTL2_RX0_WIDTH_MASK 0x300 +#define TXGBE_PHY_RX_GEN_CTL2_RX1_WIDTH_OTHER 0x400 +#define TXGBE_PHY_RX_GEN_CTL2_RX1_WIDTH_MASK 0xC00 +#define TXGBE_PHY_RX_GEN_CTL2_RX2_WIDTH_OTHER 0x1000 +#define TXGBE_PHY_RX_GEN_CTL2_RX2_WIDTH_MASK 0x3000 +#define TXGBE_PHY_RX_GEN_CTL2_RX3_WIDTH_OTHER 0x4000 +#define TXGBE_PHY_RX_GEN_CTL2_RX3_WIDTH_MASK 0xC000 + +#define TXGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_8 0x100 +#define TXGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_10 0x200 +#define TXGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_16P5 0x400 +#define TXGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_MASK 0x700 + +#define TXGBE_XPCS_POWER_GOOD_MAX_POLLING_TIME 100 +#define TXGBE_PHY_INIT_DONE_POLLING_TIME 100 + +/**************** Global Registers ****************************/ +/* chip control Registers */ +#define TXGBE_MIS_RST 0x1000C +#define TXGBE_MIS_PWR 0x10000 +#define TXGBE_MIS_CTL 0x10004 +#define TXGBE_MIS_PF_SM 0x10008 +#define TXGBE_MIS_ST 0x10028 +#define TXGBE_MIS_SWSM 0x1002C +#define TXGBE_MIS_RST_ST 0x10030 + +#define TXGBE_MIS_RST_SW_RST 0x00000001U +#define TXGBE_MIS_RST_LAN0_RST 0x00000002U +#define TXGBE_MIS_RST_LAN1_RST 0x00000004U +#define TXGBE_MIS_RST_LAN0_CHG_ETH_MODE 0x20000000U +#define TXGBE_MIS_RST_LAN1_CHG_ETH_MODE 0x40000000U +#define TXGBE_MIS_RST_GLOBAL_RST 0x80000000U +#define TXGBE_MIS_RST_MASK (TXGBE_MIS_RST_SW_RST | \ + TXGBE_MIS_RST_LAN0_RST | \ + TXGBE_MIS_RST_LAN1_RST) +#define TXGBE_MIS_PWR_LAN_ID(_r) ((0xC0000000U & (_r)) >> 30) +#define TXGBE_MIS_PWR_LAN_ID_0 (1) +#define TXGBE_MIS_PWR_LAN_ID_1 (2) +#define TXGBE_MIS_PWR_LAN_ID_A (3) +#define TXGBE_MIS_ST_MNG_INIT_DN 0x00000001U +#define TXGBE_MIS_ST_MNG_VETO 0x00000100U +#define TXGBE_MIS_ST_LAN0_ECC 0x00010000U +#define TXGBE_MIS_ST_LAN1_ECC 0x00020000U +#define TXGBE_MIS_ST_MNG_ECC 0x00040000U +#define TXGBE_MIS_ST_PCORE_ECC 0x00080000U +#define TXGBE_MIS_ST_PCIWRP_ECC 0x00100000U +#define TXGBE_MIS_SWSM_SMBI 1 +#define TXGBE_MIS_RST_ST_DEV_RST_ST_DONE 0x00000000U +#define TXGBE_MIS_RST_ST_DEV_RST_ST_REQ 0x00080000U +#define TXGBE_MIS_RST_ST_DEV_RST_ST_INPROGRESS 0x00100000U +#define TXGBE_MIS_RST_ST_DEV_RST_ST_MASK 0x00180000U +#define TXGBE_MIS_RST_ST_DEV_RST_TYPE_MASK 0x00070000U +#define TXGBE_MIS_RST_ST_DEV_RST_TYPE_SHIFT 16 +#define TXGBE_MIS_RST_ST_DEV_RST_TYPE_SW_RST 0x3 +#define TXGBE_MIS_RST_ST_DEV_RST_TYPE_GLOBAL_RST 0x5 +#define TXGBE_MIS_RST_ST_RST_INIT 0x0000FF00U +#define TXGBE_MIS_RST_ST_RST_INI_SHIFT 8 +#define TXGBE_MIS_RST_ST_RST_TIM 0x000000FFU +#define TXGBE_MIS_PF_SM_SM 1 + +/* Sensors for PVT(Process Voltage Temperature) */ +#define TXGBE_TS_CTL 0x10300 +#define TXGBE_TS_EN 0x10304 +#define TXGBE_TS_ST 0x10308 +#define TXGBE_TS_ALARM_THRE 0x1030C +#define TXGBE_TS_DALARM_THRE 0x10310 +#define TXGBE_TS_INT_EN 0x10314 +#define TXGBE_TS_ALARM_ST 0x10318 +#define TXGBE_TS_ALARM_ST_DALARM 0x00000002U +#define TXGBE_TS_ALARM_ST_ALARM 0x00000001U + +#define TXGBE_TS_CTL_EVAL_MD 0x80000000U +#define TXGBE_TS_EN_ENA 0x00000001U +#define TXGBE_TS_ST_DATA_OUT_MASK 0x000003FFU +#define TXGBE_TS_ALARM_THRE_MASK 0x000003FFU +#define TXGBE_TS_DALARM_THRE_MASK 0x000003FFU +#define TXGBE_TS_INT_EN_DALARM_INT_EN 0x00000002U +#define TXGBE_TS_INT_EN_ALARM_INT_EN 0x00000001U + +struct txgbe_thermal_diode_data { + s16 temp; + s16 alarm_thresh; + s16 dalarm_thresh; +}; + +struct txgbe_thermal_sensor_data { + struct txgbe_thermal_diode_data sensor; +}; + + +/* FMGR Registers */ +#define TXGBE_SPI_ILDR_STATUS 0x10120 +#define TXGBE_SPI_ILDR_STATUS_PERST 0x00000001U /* PCIE_PERST is done */ +#define TXGBE_SPI_ILDR_STATUS_PWRRST 0x00000002U /* Power on reset is done */ +#define TXGBE_SPI_ILDR_STATUS_SW_RESET 0x00000080U /* software reset is done */ +#define TXGBE_SPI_ILDR_STATUS_LAN0_SW_RST 0x00000200U /* lan0 soft reset done */ +#define TXGBE_SPI_ILDR_STATUS_LAN1_SW_RST 0x00000400U /* lan1 soft reset done */ + +#define TXGBE_MAX_FLASH_LOAD_POLL_TIME 10 + +#define TXGBE_SPI_CMD 0x10104 +#define TXGBE_SPI_CMD_CMD(_v) (((_v) & 0x7) << 28) +#define TXGBE_SPI_CMD_CLK(_v) (((_v) & 0x7) << 25) +#define TXGBE_SPI_CMD_ADDR(_v) (((_v) & 0xFFFFFF)) +#define TXGBE_SPI_DATA 0x10108 +#define TXGBE_SPI_DATA_BYPASS ((0x1) << 31) +#define TXGBE_SPI_DATA_STATUS(_v) (((_v) & 0xFF) << 16) +#define TXGBE_SPI_DATA_OP_DONE ((0x1)) + +#define TXGBE_SPI_STATUS 0x1010C +#define TXGBE_SPI_STATUS_OPDONE ((0x1)) +#define TXGBE_SPI_STATUS_FLASH_BYPASS ((0x1) << 31) + +#define TXGBE_SPI_USR_CMD 0x10110 +#define TXGBE_SPI_CMDCFG0 0x10114 +#define TXGBE_SPI_CMDCFG1 0x10118 +#define TXGBE_SPI_ECC_CTL 0x10130 +#define TXGBE_SPI_ECC_INJ 0x10134 +#define TXGBE_SPI_ECC_ST 0x10138 +#define TXGBE_SPI_ILDR_SWPTR 0x10124 + +/************************* Port Registers ************************************/ +/* I2C registers */ +#define TXGBE_I2C_CON 0x14900 /* I2C Control */ +#define TXGBE_I2C_CON_SLAVE_DISABLE ((1 << 6)) +#define TXGBE_I2C_CON_RESTART_EN ((1 << 5)) +#define TXGBE_I2C_CON_10BITADDR_MASTER ((1 << 4)) +#define TXGBE_I2C_CON_10BITADDR_SLAVE ((1 << 3)) +#define TXGBE_I2C_CON_SPEED(_v) (((_v) & 0x3) << 1) +#define TXGBE_I2C_CON_MASTER_MODE ((1 << 0)) +#define TXGBE_I2C_TAR 0x14904 /* I2C Target Address */ +#define TXGBE_I2C_DATA_CMD 0x14910 /* I2C Rx/Tx Data Buf and Cmd */ +#define TXGBE_I2C_DATA_CMD_STOP ((1 << 9)) +#define TXGBE_I2C_DATA_CMD_READ ((1 << 8) | TXGBE_I2C_DATA_CMD_STOP) +#define TXGBE_I2C_DATA_CMD_WRITE ((0 << 8) | TXGBE_I2C_DATA_CMD_STOP) +#define TXGBE_I2C_SS_SCL_HCNT 0x14914 /* Standard speed I2C Clock SCL + * High Count */ +#define TXGBE_I2C_SS_SCL_LCNT 0x14918 /* Standard speed I2C Clock SCL + * Low Count */ +#define TXGBE_I2C_FS_SCL_HCNT 0x1491C /* Fast Mode and Fast Mode Plus + * I2C Clock SCL High Count */ +#define TXGBE_I2C_FS_SCL_LCNT 0x14920 /* Fast Mode and Fast Mode Plus + * I2C Clock SCL Low Count */ +#define TXGBE_I2C_HS_SCL_HCNT 0x14924 /* High speed I2C Clock SCL + * High Count */ +#define TXGBE_I2C_HS_SCL_LCNT 0x14928 /* High speed I2C Clock SCL Low + * Count */ +#define TXGBE_I2C_INTR_STAT 0x1492C /* I2C Interrupt Status */ +#define TXGBE_I2C_RAW_INTR_STAT 0x14934 /* I2C Raw Interrupt Status */ +#define TXGBE_I2C_INTR_STAT_RX_FULL ((0x1) << 2) +#define TXGBE_I2C_INTR_STAT_TX_EMPTY ((0x1) << 4) +#define TXGBE_I2C_INTR_MASK 0x14930 /* I2C Interrupt Mask */ +#define TXGBE_I2C_RX_TL 0x14938 /* I2C Receive FIFO Threshold */ +#define TXGBE_I2C_TX_TL 0x1493C /* I2C TX FIFO Threshold */ +#define TXGBE_I2C_CLR_INTR 0x14940 /* Clear Combined and Individual + * Int */ +#define TXGBE_I2C_CLR_RX_UNDER 0x14944 /* Clear RX_UNDER Interrupt */ +#define TXGBE_I2C_CLR_RX_OVER 0x14948 /* Clear RX_OVER Interrupt */ +#define TXGBE_I2C_CLR_TX_OVER 0x1494C /* Clear TX_OVER Interrupt */ +#define TXGBE_I2C_CLR_RD_REQ 0x14950 /* Clear RD_REQ Interrupt */ +#define TXGBE_I2C_CLR_TX_ABRT 0x14954 /* Clear TX_ABRT Interrupt */ +#define TXGBE_I2C_CLR_RX_DONE 0x14958 /* Clear RX_DONE Interrupt */ +#define TXGBE_I2C_CLR_ACTIVITY 0x1495C /* Clear ACTIVITY Interrupt */ +#define TXGBE_I2C_CLR_STOP_DET 0x14960 /* Clear STOP_DET Interrupt */ +#define TXGBE_I2C_CLR_START_DET 0x14964 /* Clear START_DET Interrupt */ +#define TXGBE_I2C_CLR_GEN_CALL 0x14968 /* Clear GEN_CALL Interrupt */ +#define TXGBE_I2C_ENABLE 0x1496C /* I2C Enable */ +#define TXGBE_I2C_STATUS 0x14970 /* I2C Status register */ +#define TXGBE_I2C_STATUS_MST_ACTIVITY ((1U << 5)) +#define TXGBE_I2C_TXFLR 0x14974 /* Transmit FIFO Level Reg */ +#define TXGBE_I2C_RXFLR 0x14978 /* Receive FIFO Level Reg */ +#define TXGBE_I2C_SDA_HOLD 0x1497C /* SDA hold time length reg */ +#define TXGBE_I2C_TX_ABRT_SOURCE 0x14980 /* I2C TX Abort Status Reg */ +#define TXGBE_I2C_SDA_SETUP 0x14994 /* I2C SDA Setup Register */ +#define TXGBE_I2C_ENABLE_STATUS 0x1499C /* I2C Enable Status Register */ +#define TXGBE_I2C_FS_SPKLEN 0x149A0 /* ISS and FS spike suppression + * limit */ +#define TXGBE_I2C_HS_SPKLEN 0x149A4 /* HS spike suppression limit */ +#define TXGBE_I2C_SCL_STUCK_TIMEOUT 0x149AC /* I2C SCL stuck at low timeout + * register */ +#define TXGBE_I2C_SDA_STUCK_TIMEOUT 0x149B0 /*I2C SDA Stuck at Low Timeout*/ +#define TXGBE_I2C_CLR_SCL_STUCK_DET 0x149B4 /* Clear SCL Stuck at Low Detect + * Interrupt */ +#define TXGBE_I2C_DEVICE_ID 0x149b8 /* I2C Device ID */ +#define TXGBE_I2C_COMP_PARAM_1 0x149f4 /* Component Parameter Reg */ +#define TXGBE_I2C_COMP_VERSION 0x149f8 /* Component Version ID */ +#define TXGBE_I2C_COMP_TYPE 0x149fc /* DesignWare Component Type + * Reg */ + +#define TXGBE_I2C_SLAVE_ADDR (0xA0 >> 1) +#define TXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 + + +/* port cfg Registers */ +#define TXGBE_CFG_PORT_CTL 0x14400 +#define TXGBE_CFG_PORT_ST 0x14404 +#define TXGBE_CFG_EX_VTYPE 0x14408 +#define TXGBE_CFG_LED_CTL 0x14424 +#define TXGBE_CFG_VXLAN 0x14410 +#define TXGBE_CFG_VXLAN_GPE 0x14414 +#define TXGBE_CFG_GENEVE 0x14418 +#define TXGBE_CFG_TEREDO 0x1441C +#define TXGBE_CFG_TCP_TIME 0x14420 +#define TXGBE_CFG_TAG_TPID(_i) (0x14430 + ((_i) * 4)) +/* port cfg bit */ +#define TXGBE_CFG_PORT_CTL_PFRSTD 0x00004000U /* Phy Function Reset Done */ +#define TXGBE_CFG_PORT_CTL_D_VLAN 0x00000001U /* double vlan*/ +#define TXGBE_CFG_PORT_CTL_ETAG_ETYPE_VLD 0x00000002U +#define TXGBE_CFG_PORT_CTL_QINQ 0x00000004U +#define TXGBE_CFG_PORT_CTL_DRV_LOAD 0x00000008U +#define TXGBE_CFG_PORT_CTL_FORCE_LKUP 0x00000010U /* force link up */ +#define TXGBE_CFG_PORT_CTL_DCB_EN 0x00000400U /* dcb enabled */ +#define TXGBE_CFG_PORT_CTL_NUM_TC_MASK 0x00000800U /* number of TCs */ +#define TXGBE_CFG_PORT_CTL_NUM_TC_4 0x00000000U +#define TXGBE_CFG_PORT_CTL_NUM_TC_8 0x00000800U +#define TXGBE_CFG_PORT_CTL_NUM_VT_MASK 0x00003000U /* number of TVs */ +#define TXGBE_CFG_PORT_CTL_NUM_VT_NONE 0x00000000U +#define TXGBE_CFG_PORT_CTL_NUM_VT_16 0x00001000U +#define TXGBE_CFG_PORT_CTL_NUM_VT_32 0x00002000U +#define TXGBE_CFG_PORT_CTL_NUM_VT_64 0x00003000U +/* Status Bit */ +#define TXGBE_CFG_PORT_ST_LINK_UP 0x00000001U +#define TXGBE_CFG_PORT_ST_LINK_10G 0x00000002U +#define TXGBE_CFG_PORT_ST_LINK_1G 0x00000004U +#define TXGBE_CFG_PORT_ST_LINK_100M 0x00000008U +#define TXGBE_CFG_PORT_ST_LAN_ID(_r) ((0x00000100U & (_r)) >> 8) +#define TXGBE_LINK_UP_TIME 90 +/* LED CTL Bit */ +#define TXGBE_CFG_LED_CTL_LINK_BSY_SEL 0x00000010U +#define TXGBE_CFG_LED_CTL_LINK_100M_SEL 0x00000008U +#define TXGBE_CFG_LED_CTL_LINK_1G_SEL 0x00000004U +#define TXGBE_CFG_LED_CTL_LINK_10G_SEL 0x00000002U +#define TXGBE_CFG_LED_CTL_LINK_UP_SEL 0x00000001U +#define TXGBE_CFG_LED_CTL_LINK_OD_SHIFT 16 +/* LED modes */ +#define TXGBE_LED_LINK_UP TXGBE_CFG_LED_CTL_LINK_UP_SEL +#define TXGBE_LED_LINK_10G TXGBE_CFG_LED_CTL_LINK_10G_SEL +#define TXGBE_LED_LINK_ACTIVE TXGBE_CFG_LED_CTL_LINK_BSY_SEL +#define TXGBE_LED_LINK_1G TXGBE_CFG_LED_CTL_LINK_1G_SEL +#define TXGBE_LED_LINK_100M TXGBE_CFG_LED_CTL_LINK_100M_SEL + +/* GPIO Registers */ +#define TXGBE_GPIO_DR 0x14800 +#define TXGBE_GPIO_DDR 0x14804 +#define TXGBE_GPIO_CTL 0x14808 +#define TXGBE_GPIO_INTEN 0x14830 +#define TXGBE_GPIO_INTMASK 0x14834 +#define TXGBE_GPIO_INTTYPE_LEVEL 0x14838 +#define TXGBE_GPIO_INTSTATUS 0x14844 +#define TXGBE_GPIO_EOI 0x1484C +/*GPIO bit */ +#define TXGBE_GPIO_DR_0 0x00000001U /* SDP0 Data Value */ +#define TXGBE_GPIO_DR_1 0x00000002U /* SDP1 Data Value */ +#define TXGBE_GPIO_DR_2 0x00000004U /* SDP2 Data Value */ +#define TXGBE_GPIO_DR_3 0x00000008U /* SDP3 Data Value */ +#define TXGBE_GPIO_DR_4 0x00000010U /* SDP4 Data Value */ +#define TXGBE_GPIO_DR_5 0x00000020U /* SDP5 Data Value */ +#define TXGBE_GPIO_DR_6 0x00000040U /* SDP6 Data Value */ +#define TXGBE_GPIO_DR_7 0x00000080U /* SDP7 Data Value */ +#define TXGBE_GPIO_DDR_0 0x00000001U /* SDP0 IO direction */ +#define TXGBE_GPIO_DDR_1 0x00000002U /* SDP1 IO direction */ +#define TXGBE_GPIO_DDR_2 0x00000004U /* SDP1 IO direction */ +#define TXGBE_GPIO_DDR_3 0x00000008U /* SDP3 IO direction */ +#define TXGBE_GPIO_DDR_4 0x00000010U /* SDP4 IO direction */ +#define TXGBE_GPIO_DDR_5 0x00000020U /* SDP5 IO direction */ +#define TXGBE_GPIO_DDR_6 0x00000040U /* SDP6 IO direction */ +#define TXGBE_GPIO_DDR_7 0x00000080U /* SDP7 IO direction */ +#define TXGBE_GPIO_CTL_SW_MODE 0x00000000U /* SDP software mode */ +#define TXGBE_GPIO_INTEN_1 0x00000002U /* SDP1 interrupt enable */ +#define TXGBE_GPIO_INTEN_2 0x00000004U /* SDP2 interrupt enable */ +#define TXGBE_GPIO_INTEN_3 0x00000008U /* SDP3 interrupt enable */ +#define TXGBE_GPIO_INTEN_5 0x00000020U /* SDP5 interrupt enable */ +#define TXGBE_GPIO_INTEN_6 0x00000040U /* SDP6 interrupt enable */ +#define TXGBE_GPIO_INTTYPE_LEVEL_2 0x00000004U /* SDP2 interrupt type level */ +#define TXGBE_GPIO_INTTYPE_LEVEL_3 0x00000008U /* SDP3 interrupt type level */ +#define TXGBE_GPIO_INTTYPE_LEVEL_5 0x00000020U /* SDP5 interrupt type level */ +#define TXGBE_GPIO_INTTYPE_LEVEL_6 0x00000040U /* SDP6 interrupt type level */ +#define TXGBE_GPIO_INTSTATUS_1 0x00000002U /* SDP1 interrupt status */ +#define TXGBE_GPIO_INTSTATUS_2 0x00000004U /* SDP2 interrupt status */ +#define TXGBE_GPIO_INTSTATUS_3 0x00000008U /* SDP3 interrupt status */ +#define TXGBE_GPIO_INTSTATUS_5 0x00000020U /* SDP5 interrupt status */ +#define TXGBE_GPIO_INTSTATUS_6 0x00000040U /* SDP6 interrupt status */ +#define TXGBE_GPIO_EOI_2 0x00000004U /* SDP2 interrupt clear */ +#define TXGBE_GPIO_EOI_3 0x00000008U /* SDP3 interrupt clear */ +#define TXGBE_GPIO_EOI_5 0x00000020U /* SDP5 interrupt clear */ +#define TXGBE_GPIO_EOI_6 0x00000040U /* SDP6 interrupt clear */ + +/* TPH registers */ +#define TXGBE_CFG_TPH_TDESC 0x14F00 /* TPH conf for Tx desc write back */ +#define TXGBE_CFG_TPH_RDESC 0x14F04 /* TPH conf for Rx desc write back */ +#define TXGBE_CFG_TPH_RHDR 0x14F08 /* TPH conf for writing Rx pkt header */ +#define TXGBE_CFG_TPH_RPL 0x14F0C /* TPH conf for payload write access */ +/* TPH bit */ +#define TXGBE_CFG_TPH_TDESC_EN 0x80000000U +#define TXGBE_CFG_TPH_TDESC_PH_SHIFT 29 +#define TXGBE_CFG_TPH_TDESC_ST_SHIFT 16 +#define TXGBE_CFG_TPH_RDESC_EN 0x80000000U +#define TXGBE_CFG_TPH_RDESC_PH_SHIFT 29 +#define TXGBE_CFG_TPH_RDESC_ST_SHIFT 16 +#define TXGBE_CFG_TPH_RHDR_EN 0x00008000U +#define TXGBE_CFG_TPH_RHDR_PH_SHIFT 13 +#define TXGBE_CFG_TPH_RHDR_ST_SHIFT 0 +#define TXGBE_CFG_TPH_RPL_EN 0x80000000U +#define TXGBE_CFG_TPH_RPL_PH_SHIFT 29 +#define TXGBE_CFG_TPH_RPL_ST_SHIFT 16 + +/*********************** Transmit DMA registers **************************/ +/* transmit global control */ +#define TXGBE_TDM_CTL 0x18000 +#define TXGBE_TDM_VF_TE(_i) (0x18004 + ((_i) * 4)) +#define TXGBE_TDM_PB_THRE(_i) (0x18020 + ((_i) * 4)) /* 8 of these 0 - 7 */ +#define TXGBE_TDM_LLQ(_i) (0x18040 + ((_i) * 4)) /* 4 of these (0-3) */ +#define TXGBE_TDM_ETYPE_LB_L 0x18050 +#define TXGBE_TDM_ETYPE_LB_H 0x18054 +#define TXGBE_TDM_ETYPE_AS_L 0x18058 +#define TXGBE_TDM_ETYPE_AS_H 0x1805C +#define TXGBE_TDM_MAC_AS_L 0x18060 +#define TXGBE_TDM_MAC_AS_H 0x18064 +#define TXGBE_TDM_VLAN_AS_L 0x18070 +#define TXGBE_TDM_VLAN_AS_H 0x18074 +#define TXGBE_TDM_TCP_FLG_L 0x18078 +#define TXGBE_TDM_TCP_FLG_H 0x1807C +#define TXGBE_TDM_VLAN_INS(_i) (0x18100 + ((_i) * 4)) /* 64 of these 0 - 63 */ +/* TDM CTL BIT */ +#define TXGBE_TDM_CTL_TE 0x1 /* Transmit Enable */ +#define TXGBE_TDM_CTL_PADDING 0x2 /* Padding byte number for ipsec ESP */ +#define TXGBE_TDM_CTL_VT_SHIFT 16 /* VLAN EtherType */ +/* Per VF Port VLAN insertion rules */ +#define TXGBE_TDM_VLAN_INS_VLANA_DEFAULT 0x40000000U /*Always use default VLAN*/ +#define TXGBE_TDM_VLAN_INS_VLANA_NEVER 0x80000000U /* Never insert VLAN tag */ + +#define TXGBE_TDM_RP_CTL 0x18400 +#define TXGBE_TDM_RP_CTL_RST ((0x1) << 0) +#define TXGBE_TDM_RP_CTL_RPEN ((0x1) << 2) +#define TXGBE_TDM_RP_CTL_RLEN ((0x1) << 3) +#define TXGBE_TDM_RP_IDX 0x1820C +#define TXGBE_TDM_RP_RATE 0x18404 +#define TXGBE_TDM_RP_RATE_MIN(v) ((0x3FFF & (v))) +#define TXGBE_TDM_RP_RATE_MAX(v) ((0x3FFF & (v)) << 16) + +/* qos */ +#define TXGBE_TDM_PBWARB_CTL 0x18200 +#define TXGBE_TDM_PBWARB_CFG(_i) (0x18220 + ((_i) * 4)) /* 8 of these (0-7) */ +#define TXGBE_TDM_MMW 0x18208 +#define TXGBE_TDM_VM_CREDIT(_i) (0x18500 + ((_i) * 4)) +#define TXGBE_TDM_VM_CREDIT_VAL(v) (0x3FF & (v)) +/* fcoe */ +#define TXGBE_TDM_FC_EOF 0x18384 +#define TXGBE_TDM_FC_SOF 0x18380 +/* etag */ +#define TXGBE_TDM_ETAG_INS(_i) (0x18700 + ((_i) * 4)) /* 64 of these 0 - 63 */ +/* statistic */ +#define TXGBE_TDM_SEC_DRP 0x18304 +#define TXGBE_TDM_PKT_CNT 0x18308 +#define TXGBE_TDM_OS2BMC_CNT 0x18314 + +/**************************** Receive DMA registers **************************/ +/* receive control */ +#define TXGBE_RDM_ARB_CTL 0x12000 +#define TXGBE_RDM_VF_RE(_i) (0x12004 + ((_i) * 4)) +#define TXGBE_RDM_RSC_CTL 0x1200C +#define TXGBE_RDM_ARB_CFG(_i) (0x12040 + ((_i) * 4)) /* 8 of these (0-7) */ +#define TXGBE_RDM_PF_QDE(_i) (0x12080 + ((_i) * 4)) +#define TXGBE_RDM_PF_HIDE(_i) (0x12090 + ((_i) * 4)) +/* VFRE bitmask */ +#define TXGBE_RDM_VF_RE_ENABLE_ALL 0xFFFFFFFFU + +/* FCoE DMA Context Registers */ +#define TXGBE_RDM_FCPTRL 0x12410 +#define TXGBE_RDM_FCPTRH 0x12414 +#define TXGBE_RDM_FCBUF 0x12418 +#define TXGBE_RDM_FCBUF_VALID ((0x1)) /* DMA Context Valid */ +#define TXGBE_RDM_FCBUF_SIZE(_v) (((_v) & 0x3) << 3) /* User Buffer Size */ +#define TXGBE_RDM_FCBUF_COUNT(_v) (((_v) & 0xFF) << 8) /* Num of User Buf */ +#define TXGBE_RDM_FCBUF_OFFSET(_v) (((_v) & 0xFFFF) << 16) /* User Buf Offset*/ +#define TXGBE_RDM_FCRW 0x12420 +#define TXGBE_RDM_FCRW_FCSEL(_v) (((_v) & 0x1FF)) /* FC X_ID: 11 bits */ +#define TXGBE_RDM_FCRW_WE ((0x1) << 14) /* Write enable */ +#define TXGBE_RDM_FCRW_RE ((0x1) << 15) /* Read enable */ +#define TXGBE_RDM_FCRW_LASTSIZE(_v) (((_v) & 0xFFFF) << 16) + +/* statistic */ +#define TXGBE_RDM_DRP_PKT 0x12500 +#define TXGBE_RDM_BMC2OS_CNT 0x12510 + +/***************************** RDB registers *********************************/ +/* Flow Control Registers */ +#define TXGBE_RDB_RFCV(_i) (0x19200 + ((_i) * 4)) /* 4 of these (0-3)*/ +#define TXGBE_RDB_RFCL(_i) (0x19220 + ((_i) * 4)) /* 8 of these (0-7)*/ +#define TXGBE_RDB_RFCH(_i) (0x19260 + ((_i) * 4)) /* 8 of these (0-7)*/ +#define TXGBE_RDB_RFCRT 0x192A0 +#define TXGBE_RDB_RFCC 0x192A4 +/* receive packet buffer */ +#define TXGBE_RDB_PB_WRAP 0x19004 +#define TXGBE_RDB_PB_SZ(_i) (0x19020 + ((_i) * 4)) +#define TXGBE_RDB_PB_CTL 0x19000 +#define TXGBE_RDB_UP2TC 0x19008 +#define TXGBE_RDB_PB_SZ_SHIFT 10 +#define TXGBE_RDB_PB_SZ_MASK 0x000FFC00U +/* lli interrupt */ +#define TXGBE_RDB_LLI_THRE 0x19080 +#define TXGBE_RDB_LLI_THRE_SZ(_v) ((0xFFF & (_v))) +#define TXGBE_RDB_LLI_THRE_UP(_v) ((0x7 & (_v)) << 16) +#define TXGBE_RDB_LLI_THRE_UP_SHIFT 16 + +/* ring assignment */ +#define TXGBE_RDB_PL_CFG(_i) (0x19300 + ((_i) * 4)) +#define TXGBE_RDB_RSSTBL(_i) (0x19400 + ((_i) * 4)) +#define TXGBE_RDB_RSSRK(_i) (0x19480 + ((_i) * 4)) +#define TXGBE_RDB_RSS_TC 0x194F0 +#define TXGBE_RDB_RA_CTL 0x194F4 +#define TXGBE_RDB_5T_SA(_i) (0x19600 + ((_i) * 4)) /* Src Addr Q Filter */ +#define TXGBE_RDB_5T_DA(_i) (0x19800 + ((_i) * 4)) /* Dst Addr Q Filter */ +#define TXGBE_RDB_5T_SDP(_i) (0x19A00 + ((_i) * 4)) /*Src Dst Addr Q Filter*/ +#define TXGBE_RDB_5T_CTL0(_i) (0x19C00 + ((_i) * 4)) /* Five Tuple Q Filter */ +#define TXGBE_RDB_ETYPE_CLS(_i) (0x19100 + ((_i) * 4)) /* EType Q Select */ +#define TXGBE_RDB_SYN_CLS 0x19130 +#define TXGBE_RDB_5T_CTL1(_i) (0x19E00 + ((_i) * 4)) /*128 of these (0-127)*/ +/* Flow Director registers */ +#define TXGBE_RDB_FDIR_CTL 0x19500 +#define TXGBE_RDB_FDIR_HKEY 0x19568 +#define TXGBE_RDB_FDIR_SKEY 0x1956C +#define TXGBE_RDB_FDIR_DA4_MSK 0x1953C +#define TXGBE_RDB_FDIR_SA4_MSK 0x19540 +#define TXGBE_RDB_FDIR_TCP_MSK 0x19544 +#define TXGBE_RDB_FDIR_UDP_MSK 0x19548 +#define TXGBE_RDB_FDIR_SCTP_MSK 0x19560 +#define TXGBE_RDB_FDIR_IP6_MSK 0x19574 +#define TXGBE_RDB_FDIR_OTHER_MSK 0x19570 +#define TXGBE_RDB_FDIR_FLEX_CFG(_i) (0x19580 + ((_i) * 4)) +/* Flow Director Stats registers */ +#define TXGBE_RDB_FDIR_FREE 0x19538 +#define TXGBE_RDB_FDIR_LEN 0x1954C +#define TXGBE_RDB_FDIR_USE_ST 0x19550 +#define TXGBE_RDB_FDIR_FAIL_ST 0x19554 +#define TXGBE_RDB_FDIR_MATCH 0x19558 +#define TXGBE_RDB_FDIR_MISS 0x1955C +/* Flow Director Programming registers */ +#define TXGBE_RDB_FDIR_IP6(_i) (0x1950C + ((_i) * 4)) /* 3 of these (0-2)*/ +#define TXGBE_RDB_FDIR_SA 0x19518 +#define TXGBE_RDB_FDIR_DA 0x1951C +#define TXGBE_RDB_FDIR_PORT 0x19520 +#define TXGBE_RDB_FDIR_FLEX 0x19524 +#define TXGBE_RDB_FDIR_HASH 0x19528 +#define TXGBE_RDB_FDIR_CMD 0x1952C +/* VM RSS */ +#define TXGBE_RDB_VMRSSRK(_i, _p) (0x1A000 + ((_i) * 4) + ((_p) * 0x40)) +#define TXGBE_RDB_VMRSSTBL(_i, _p) (0x1B000 + ((_i) * 4) + ((_p) * 0x40)) +/* FCoE Redirection */ +#define TXGBE_RDB_FCRE_TBL_SIZE (8) /* Max entries in FCRETA */ +#define TXGBE_RDB_FCRE_CTL 0x19140 +#define TXGBE_RDB_FCRE_CTL_ENA ((0x1)) /* FCoE Redir Table Enable */ +#define TXGBE_RDB_FCRE_TBL(_i) (0x19160 + ((_i) * 4)) +#define TXGBE_RDB_FCRE_TBL_RING(_v) (((_v) & 0x7F)) /* output queue number */ +/* statistic */ +#define TXGBE_RDB_MPCNT(_i) (0x19040 + ((_i) * 4)) /* 8 of 3FA0-3FBC*/ +#define TXGBE_RDB_LXONTXC 0x1921C +#define TXGBE_RDB_LXOFFTXC 0x19218 +#define TXGBE_RDB_PXON2OFFCNT(_i) (0x19280 + ((_i) * 4)) /* 8 of these */ +#define TXGBE_RDB_PXONTXC(_i) (0x192E0 + ((_i) * 4)) /* 8 of 3F00-3F1C*/ +#define TXGBE_RDB_PXOFFTXC(_i) (0x192C0 + ((_i) * 4)) /* 8 of 3F20-3F3C*/ +#define TXGBE_RDB_PFCMACDAL 0x19210 +#define TXGBE_RDB_PFCMACDAH 0x19214 +#define TXGBE_RDB_TXSWERR 0x1906C +#define TXGBE_RDB_TXSWERR_TB_FREE 0x3FF +/* rdb_pl_cfg reg mask */ +#define TXGBE_RDB_PL_CFG_L4HDR 0x2 +#define TXGBE_RDB_PL_CFG_L3HDR 0x4 +#define TXGBE_RDB_PL_CFG_L2HDR 0x8 +#define TXGBE_RDB_PL_CFG_TUN_OUTER_L2HDR 0x20 +#define TXGBE_RDB_PL_CFG_TUN_TUNHDR 0x10 +#define TXGBE_RDB_PL_CFG_RSS_PL_MASK 0x7 +#define TXGBE_RDB_PL_CFG_RSS_PL_SHIFT 29 +/* RQTC Bit Masks and Shifts */ +#define TXGBE_RDB_RSS_TC_SHIFT_TC(_i) ((_i) * 4) +#define TXGBE_RDB_RSS_TC_TC0_MASK (0x7 << 0) +#define TXGBE_RDB_RSS_TC_TC1_MASK (0x7 << 4) +#define TXGBE_RDB_RSS_TC_TC2_MASK (0x7 << 8) +#define TXGBE_RDB_RSS_TC_TC3_MASK (0x7 << 12) +#define TXGBE_RDB_RSS_TC_TC4_MASK (0x7 << 16) +#define TXGBE_RDB_RSS_TC_TC5_MASK (0x7 << 20) +#define TXGBE_RDB_RSS_TC_TC6_MASK (0x7 << 24) +#define TXGBE_RDB_RSS_TC_TC7_MASK (0x7 << 28) +/* Packet Buffer Initialization */ +#define TXGBE_MAX_PACKET_BUFFERS 8 +#define TXGBE_RDB_PB_SZ_48KB 0x00000030U /* 48KB Packet Buffer */ +#define TXGBE_RDB_PB_SZ_64KB 0x00000040U /* 64KB Packet Buffer */ +#define TXGBE_RDB_PB_SZ_80KB 0x00000050U /* 80KB Packet Buffer */ +#define TXGBE_RDB_PB_SZ_128KB 0x00000080U /* 128KB Packet Buffer */ +#define TXGBE_RDB_PB_SZ_MAX 0x00000200U /* 512KB Packet Buffer */ + + +/* Packet buffer allocation strategies */ +enum { + PBA_STRATEGY_EQUAL = 0, /* Distribute PB space equally */ +#define PBA_STRATEGY_EQUAL PBA_STRATEGY_EQUAL + PBA_STRATEGY_WEIGHTED = 1, /* Weight front half of TCs */ +#define PBA_STRATEGY_WEIGHTED PBA_STRATEGY_WEIGHTED +}; + + +/* FCRTL Bit Masks */ +#define TXGBE_RDB_RFCL_XONE 0x80000000U /* XON enable */ +#define TXGBE_RDB_RFCH_XOFFE 0x80000000U /* Packet buffer fc enable */ +/* FCCFG Bit Masks */ +#define TXGBE_RDB_RFCC_RFCE_802_3X 0x00000008U /* Tx link FC enable */ +#define TXGBE_RDB_RFCC_RFCE_PRIORITY 0x00000010U /* Tx priority FC enable */ + +/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ +#define TXGBE_RDB_5T_CTL1_SIZE_BP 0x00001000U /* Packet size bypass */ +#define TXGBE_RDB_5T_CTL1_LLI 0x00100000U /* Enables low latency Int */ +#define TXGBE_RDB_LLI_THRE_PRIORITY_MASK 0x00070000U /* VLAN priority mask */ +#define TXGBE_RDB_LLI_THRE_PRIORITY_EN 0x00080000U /* VLAN priority enable */ +#define TXGBE_RDB_LLI_THRE_CMN_EN 0x00100000U /* cmn packet receiveed */ + +#define TXGBE_MAX_RDB_5T_CTL0_FILTERS 128 +#define TXGBE_RDB_5T_CTL0_PROTOCOL_MASK 0x00000003U +#define TXGBE_RDB_5T_CTL0_PROTOCOL_TCP 0x00000000U +#define TXGBE_RDB_5T_CTL0_PROTOCOL_UDP 0x00000001U +#define TXGBE_RDB_5T_CTL0_PROTOCOL_SCTP 2 +#define TXGBE_RDB_5T_CTL0_PRIORITY_MASK 0x00000007U +#define TXGBE_RDB_5T_CTL0_PRIORITY_SHIFT 2 +#define TXGBE_RDB_5T_CTL0_POOL_MASK 0x0000003FU +#define TXGBE_RDB_5T_CTL0_POOL_SHIFT 8 +#define TXGBE_RDB_5T_CTL0_5TUPLE_MASK_MASK 0x0000001FU +#define TXGBE_RDB_5T_CTL0_5TUPLE_MASK_SHIFT 25 +#define TXGBE_RDB_5T_CTL0_SOURCE_ADDR_MASK 0x1E +#define TXGBE_RDB_5T_CTL0_DEST_ADDR_MASK 0x1D +#define TXGBE_RDB_5T_CTL0_SOURCE_PORT_MASK 0x1B +#define TXGBE_RDB_5T_CTL0_DEST_PORT_MASK 0x17 +#define TXGBE_RDB_5T_CTL0_PROTOCOL_COMP_MASK 0x0F +#define TXGBE_RDB_5T_CTL0_POOL_MASK_EN 0x40000000U +#define TXGBE_RDB_5T_CTL0_QUEUE_ENABLE 0x80000000U + +#define TXGBE_RDB_ETYPE_CLS_RX_QUEUE 0x007F0000U /* bits 22:16 */ +#define TXGBE_RDB_ETYPE_CLS_RX_QUEUE_SHIFT 16 +#define TXGBE_RDB_ETYPE_CLS_LLI 0x20000000U /* bit 29 */ +#define TXGBE_RDB_ETYPE_CLS_QUEUE_EN 0x80000000U /* bit 31 */ + +/* Receive Config masks */ +#define TXGBE_RDB_PB_CTL_RXEN (0x80000000) /* Enable Receiver */ +#define TXGBE_RDB_PB_CTL_DISABLED 0x1 + +#define TXGBE_RDB_RA_CTL_RSS_EN 0x00000004U /* RSS Enable */ +#define TXGBE_RDB_RA_CTL_RSS_MASK 0xFFFF0000U +#define TXGBE_RDB_RA_CTL_RSS_IPV4_TCP 0x00010000U +#define TXGBE_RDB_RA_CTL_RSS_IPV4 0x00020000U +#define TXGBE_RDB_RA_CTL_RSS_IPV6 0x00100000U +#define TXGBE_RDB_RA_CTL_RSS_IPV6_TCP 0x00200000U +#define TXGBE_RDB_RA_CTL_RSS_IPV4_UDP 0x00400000U +#define TXGBE_RDB_RA_CTL_RSS_IPV6_UDP 0x00800000U + +enum txgbe_fdir_pballoc_type { + TXGBE_FDIR_PBALLOC_NONE = 0, + TXGBE_FDIR_PBALLOC_64K = 1, + TXGBE_FDIR_PBALLOC_128K = 2, + TXGBE_FDIR_PBALLOC_256K = 3, +}; + +/* Flow Director register values */ +#define TXGBE_RDB_FDIR_CTL_PBALLOC_64K 0x00000001U +#define TXGBE_RDB_FDIR_CTL_PBALLOC_128K 0x00000002U +#define TXGBE_RDB_FDIR_CTL_PBALLOC_256K 0x00000003U +#define TXGBE_RDB_FDIR_CTL_INIT_DONE 0x00000008U +#define TXGBE_RDB_FDIR_CTL_PERFECT_MATCH 0x00000010U +#define TXGBE_RDB_FDIR_CTL_REPORT_STATUS 0x00000020U +#define TXGBE_RDB_FDIR_CTL_REPORT_STATUS_ALWAYS 0x00000080U +#define TXGBE_RDB_FDIR_CTL_DROP_Q_SHIFT 8 +#define TXGBE_RDB_FDIR_CTL_FILTERMODE_SHIFT 21 +#define TXGBE_RDB_FDIR_CTL_MAX_LENGTH_SHIFT 24 +#define TXGBE_RDB_FDIR_CTL_HASH_BITS_SHIFT 20 +#define TXGBE_RDB_FDIR_CTL_FULL_THRESH_MASK 0xF0000000U +#define TXGBE_RDB_FDIR_CTL_FULL_THRESH_SHIFT 28 + + +#define TXGBE_RDB_FDIR_TCP_MSK_DPORTM_SHIFT 16 +#define TXGBE_RDB_FDIR_UDP_MSK_DPORTM_SHIFT 16 +#define TXGBE_RDB_FDIR_IP6_MSK_DIPM_SHIFT 16 +#define TXGBE_RDB_FDIR_OTHER_MSK_POOL 0x00000004U +#define TXGBE_RDB_FDIR_OTHER_MSK_L4P 0x00000008U +#define TXGBE_RDB_FDIR_OTHER_MSK_L3P 0x00000010U +#define TXGBE_RDB_FDIR_OTHER_MSK_TUN_TYPE 0x00000020U +#define TXGBE_RDB_FDIR_OTHER_MSK_TUN_OUTIP 0x00000040U +#define TXGBE_RDB_FDIR_OTHER_MSK_TUN 0x00000080U + +#define TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC 0x00000000U +#define TXGBE_RDB_FDIR_FLEX_CFG_BASE_IP 0x00000001U +#define TXGBE_RDB_FDIR_FLEX_CFG_BASE_L4_HDR 0x00000002U +#define TXGBE_RDB_FDIR_FLEX_CFG_BASE_L4_PAYLOAD 0x00000003U +#define TXGBE_RDB_FDIR_FLEX_CFG_BASE_MSK 0x00000003U +#define TXGBE_RDB_FDIR_FLEX_CFG_MSK 0x00000004U +#define TXGBE_RDB_FDIR_FLEX_CFG_OFST 0x000000F8U +#define TXGBE_RDB_FDIR_FLEX_CFG_OFST_SHIFT 3 +#define TXGBE_RDB_FDIR_FLEX_CFG_VM_SHIFT 8 + +#define TXGBE_RDB_FDIR_PORT_DESTINATION_SHIFT 16 +#define TXGBE_RDB_FDIR_FLEX_FLEX_SHIFT 16 +#define TXGBE_RDB_FDIR_HASH_BUCKET_VALID_SHIFT 15 +#define TXGBE_RDB_FDIR_HASH_SIG_SW_INDEX_SHIFT 16 + +#define TXGBE_RDB_FDIR_CMD_CMD_MASK 0x00000003U +#define TXGBE_RDB_FDIR_CMD_CMD_ADD_FLOW 0x00000001U +#define TXGBE_RDB_FDIR_CMD_CMD_REMOVE_FLOW 0x00000002U +#define TXGBE_RDB_FDIR_CMD_CMD_QUERY_REM_FILT 0x00000003U +#define TXGBE_RDB_FDIR_CMD_FILTER_VALID 0x00000004U +#define TXGBE_RDB_FDIR_CMD_FILTER_UPDATE 0x00000008U +#define TXGBE_RDB_FDIR_CMD_IPv6DMATCH 0x00000010U +#define TXGBE_RDB_FDIR_CMD_L4TYPE_UDP 0x00000020U +#define TXGBE_RDB_FDIR_CMD_L4TYPE_TCP 0x00000040U +#define TXGBE_RDB_FDIR_CMD_L4TYPE_SCTP 0x00000060U +#define TXGBE_RDB_FDIR_CMD_IPV6 0x00000080U +#define TXGBE_RDB_FDIR_CMD_CLEARHT 0x00000100U +#define TXGBE_RDB_FDIR_CMD_DROP 0x00000200U +#define TXGBE_RDB_FDIR_CMD_INT 0x00000400U +#define TXGBE_RDB_FDIR_CMD_LAST 0x00000800U +#define TXGBE_RDB_FDIR_CMD_COLLISION 0x00001000U +#define TXGBE_RDB_FDIR_CMD_QUEUE_EN 0x00008000U +#define TXGBE_RDB_FDIR_CMD_FLOW_TYPE_SHIFT 5 +#define TXGBE_RDB_FDIR_CMD_RX_QUEUE_SHIFT 16 +#define TXGBE_RDB_FDIR_CMD_TUNNEL_FILTER_SHIFT 23 +#define TXGBE_RDB_FDIR_CMD_VT_POOL_SHIFT 24 +#define TXGBE_RDB_FDIR_INIT_DONE_POLL 10 +#define TXGBE_RDB_FDIR_CMD_CMD_POLL 10 +#define TXGBE_RDB_FDIR_CMD_TUNNEL_FILTER 0x00800000U +#define TXGBE_RDB_FDIR_DROP_QUEUE 127 +#define TXGBE_FDIR_INIT_DONE_POLL 10 + +/******************************* PSR Registers *******************************/ +/* psr control */ +#define TXGBE_PSR_CTL 0x15000 +#define TXGBE_PSR_VLAN_CTL 0x15088 +#define TXGBE_PSR_VM_CTL 0x151B0 +/* Header split receive */ +#define TXGBE_PSR_CTL_SW_EN 0x00040000U +#define TXGBE_PSR_CTL_RSC_DIS 0x00010000U +#define TXGBE_PSR_CTL_RSC_ACK 0x00020000U +#define TXGBE_PSR_CTL_PCSD 0x00002000U +#define TXGBE_PSR_CTL_IPPCSE 0x00001000U +#define TXGBE_PSR_CTL_BAM 0x00000400U +#define TXGBE_PSR_CTL_UPE 0x00000200U +#define TXGBE_PSR_CTL_MPE 0x00000100U +#define TXGBE_PSR_CTL_MFE 0x00000080U +#define TXGBE_PSR_CTL_MO 0x00000060U +#define TXGBE_PSR_CTL_TPE 0x00000010U +#define TXGBE_PSR_CTL_MO_SHIFT 5 +/* VT_CTL bitmasks */ +#define TXGBE_PSR_VM_CTL_DIS_DEFPL 0x20000000U /* disable default pool */ +#define TXGBE_PSR_VM_CTL_REPLEN 0x40000000U /* replication enabled */ +#define TXGBE_PSR_VM_CTL_POOL_SHIFT 7 +#define TXGBE_PSR_VM_CTL_POOL_MASK (0x3F << TXGBE_PSR_VM_CTL_POOL_SHIFT) +/* VLAN Control Bit Masks */ +#define TXGBE_PSR_VLAN_CTL_VET 0x0000FFFFU /* bits 0-15 */ +#define TXGBE_PSR_VLAN_CTL_CFI 0x10000000U /* bit 28 */ +#define TXGBE_PSR_VLAN_CTL_CFIEN 0x20000000U /* bit 29 */ +#define TXGBE_PSR_VLAN_CTL_VFE 0x40000000U /* bit 30 */ + +/* vm L2 contorl */ +#define TXGBE_PSR_VM_L2CTL(_i) (0x15600 + ((_i) * 4)) +/* VMOLR bitmasks */ +#define TXGBE_PSR_VM_L2CTL_LBDIS 0x00000002U /* disable loopback */ +#define TXGBE_PSR_VM_L2CTL_LLB 0x00000004U /* local pool loopback */ +#define TXGBE_PSR_VM_L2CTL_UPE 0x00000010U /* unicast promiscuous */ +#define TXGBE_PSR_VM_L2CTL_TPE 0x00000020U /* ETAG promiscuous */ +#define TXGBE_PSR_VM_L2CTL_VACC 0x00000040U /* accept nomatched vlan */ +#define TXGBE_PSR_VM_L2CTL_VPE 0x00000080U /* vlan promiscuous mode */ +#define TXGBE_PSR_VM_L2CTL_AUPE 0x00000100U /* accept untagged packets */ +#define TXGBE_PSR_VM_L2CTL_ROMPE 0x00000200U /*accept packets in MTA tbl*/ +#define TXGBE_PSR_VM_L2CTL_ROPE 0x00000400U /* accept packets in UC tbl*/ +#define TXGBE_PSR_VM_L2CTL_BAM 0x00000800U /* accept broadcast packets*/ +#define TXGBE_PSR_VM_L2CTL_MPE 0x00001000U /* multicast promiscuous */ + +/* etype switcher 1st stage */ +#define TXGBE_PSR_ETYPE_SWC(_i) (0x15128 + ((_i) * 4)) /* EType Queue Filter */ +/* ETYPE Queue Filter/Select Bit Masks */ +#define TXGBE_MAX_PSR_ETYPE_SWC_FILTERS 8 +#define TXGBE_PSR_ETYPE_SWC_FCOE 0x08000000U /* bit 27 */ +#define TXGBE_PSR_ETYPE_SWC_TX_ANTISPOOF 0x20000000U /* bit 29 */ +#define TXGBE_PSR_ETYPE_SWC_1588 0x40000000U /* bit 30 */ +#define TXGBE_PSR_ETYPE_SWC_FILTER_EN 0x80000000U /* bit 31 */ +#define TXGBE_PSR_ETYPE_SWC_POOL_ENABLE (1 << 26) /* bit 26 */ +#define TXGBE_PSR_ETYPE_SWC_POOL_SHIFT 20 +/* + * ETQF filter list: one static filter per filter consumer. This is + * to avoid filter collisions later. Add new filters + * here!! + * + * Current filters: + * EAPOL 802.1x (0x888e): Filter 0 + * FCoE (0x8906): Filter 2 + * 1588 (0x88f7): Filter 3 + * FIP (0x8914): Filter 4 + * LLDP (0x88CC): Filter 5 + * LACP (0x8809): Filter 6 + * FC (0x8808): Filter 7 + */ +#define TXGBE_PSR_ETYPE_SWC_FILTER_EAPOL 0 +#define TXGBE_PSR_ETYPE_SWC_FILTER_FCOE 2 +#define TXGBE_PSR_ETYPE_SWC_FILTER_1588 3 +#define TXGBE_PSR_ETYPE_SWC_FILTER_FIP 4 +#define TXGBE_PSR_ETYPE_SWC_FILTER_LLDP 5 +#define TXGBE_PSR_ETYPE_SWC_FILTER_LACP 6 +#define TXGBE_PSR_ETYPE_SWC_FILTER_FC 7 + +/* mcasst/ucast overflow tbl */ +#define TXGBE_PSR_MC_TBL(_i) (0x15200 + ((_i) * 4)) +#define TXGBE_PSR_UC_TBL(_i) (0x15400 + ((_i) * 4)) + +/* vlan tbl */ +#define TXGBE_PSR_VLAN_TBL(_i) (0x16000 + ((_i) * 4)) + +/* mac switcher */ +#define TXGBE_PSR_MAC_SWC_AD_L 0x16200 +#define TXGBE_PSR_MAC_SWC_AD_H 0x16204 +#define TXGBE_PSR_MAC_SWC_VM_L 0x16208 +#define TXGBE_PSR_MAC_SWC_VM_H 0x1620C +#define TXGBE_PSR_MAC_SWC_IDX 0x16210 +/* RAH */ +#define TXGBE_PSR_MAC_SWC_AD_H_AD(v) (((v) & 0xFFFF)) +#define TXGBE_PSR_MAC_SWC_AD_H_ADTYPE(v) (((v) & 0x1) << 30) +#define TXGBE_PSR_MAC_SWC_AD_H_AV 0x80000000U +#define TXGBE_CLEAR_VMDQ_ALL 0xFFFFFFFFU + +/* vlan switch */ +#define TXGBE_PSR_VLAN_SWC 0x16220 +#define TXGBE_PSR_VLAN_SWC_VM_L 0x16224 +#define TXGBE_PSR_VLAN_SWC_VM_H 0x16228 +#define TXGBE_PSR_VLAN_SWC_IDX 0x16230 /* 64 vlan entries */ +/* VLAN pool filtering masks */ +#define TXGBE_PSR_VLAN_SWC_VIEN 0x80000000U /* filter is valid */ +#define TXGBE_PSR_VLAN_SWC_ENTRIES 64 +#define TXGBE_PSR_VLAN_SWC_VLANID_MASK 0x00000FFFU +#define TXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ + +/* cloud switch */ +#define TXGBE_PSR_CL_SWC_DST0 0x16240 +#define TXGBE_PSR_CL_SWC_DST1 0x16244 +#define TXGBE_PSR_CL_SWC_DST2 0x16248 +#define TXGBE_PSR_CL_SWC_DST3 0x1624c +#define TXGBE_PSR_CL_SWC_KEY 0x16250 +#define TXGBE_PSR_CL_SWC_CTL 0x16254 +#define TXGBE_PSR_CL_SWC_VM_L 0x16258 +#define TXGBE_PSR_CL_SWC_VM_H 0x1625c +#define TXGBE_PSR_CL_SWC_IDX 0x16260 + +#define TXGBE_PSR_CL_SWC_CTL_VLD 0x80000000U +#define TXGBE_PSR_CL_SWC_CTL_DST_MSK 0x00000002U +#define TXGBE_PSR_CL_SWC_CTL_KEY_MSK 0x00000001U + + +/* FCoE SOF/EOF */ +#define TXGBE_PSR_FC_EOF 0x15158 +#define TXGBE_PSR_FC_SOF 0x151F8 +/* FCoE Filter Context Registers */ +#define TXGBE_PSR_FC_FLT_CTXT 0x15108 +#define TXGBE_PSR_FC_FLT_CTXT_VALID ((0x1)) /* Filter Context Valid */ +#define TXGBE_PSR_FC_FLT_CTXT_FIRST ((0x1) << 1) /* Filter First */ +#define TXGBE_PSR_FC_FLT_CTXT_WR ((0x1) << 2) /* Write/Read Context */ +#define TXGBE_PSR_FC_FLT_CTXT_SEQID(_v) (((_v) & 0xFF) << 8) /* Sequence ID */ +#define TXGBE_PSR_FC_FLT_CTXT_SEQCNT(_v) (((_v) & 0xFFFF) << 16) /* Seq Count */ + +#define TXGBE_PSR_FC_FLT_RW 0x15110 +#define TXGBE_PSR_FC_FLT_RW_FCSEL(_v) (((_v) & 0x1FF)) /* FC OX_ID: 11 bits */ +#define TXGBE_PSR_FC_FLT_RW_RVALDT ((0x1) << 13) /* Fast Re-Validation */ +#define TXGBE_PSR_FC_FLT_RW_WE ((0x1) << 14) /* Write Enable */ +#define TXGBE_PSR_FC_FLT_RW_RE ((0x1) << 15) /* Read Enable */ + +#define TXGBE_PSR_FC_PARAM 0x151D8 + +/* FCoE Receive Control */ +#define TXGBE_PSR_FC_CTL 0x15100 +#define TXGBE_PSR_FC_CTL_FCOELLI ((0x1)) /* Low latency interrupt */ +#define TXGBE_PSR_FC_CTL_SAVBAD ((0x1) << 1) /* Save Bad Frames */ +#define TXGBE_PSR_FC_CTL_FRSTRDH ((0x1) << 2) /* EN 1st Read Header */ +#define TXGBE_PSR_FC_CTL_LASTSEQH ((0x1) << 3) /* EN Last Header in Seq */ +#define TXGBE_PSR_FC_CTL_ALLH ((0x1) << 4) /* EN All Headers */ +#define TXGBE_PSR_FC_CTL_FRSTSEQH ((0x1) << 5) /* EN 1st Seq. Header */ +#define TXGBE_PSR_FC_CTL_ICRC ((0x1) << 6) /* Ignore Bad FC CRC */ +#define TXGBE_PSR_FC_CTL_FCCRCBO ((0x1) << 7) /* FC CRC Byte Ordering */ +#define TXGBE_PSR_FC_CTL_FCOEVER(_v) (((_v) & 0xF) << 8) /* FCoE Version */ + +/* Management */ +#define TXGBE_PSR_MNG_FIT_CTL 0x15820 +/* Management Bit Fields and Masks */ +#define TXGBE_PSR_MNG_FIT_CTL_MPROXYE 0x40000000U /* Management Proxy Enable*/ +#define TXGBE_PSR_MNG_FIT_CTL_RCV_TCO_EN 0x00020000U /* Rcv TCO packet enable */ +#define TXGBE_PSR_MNG_FIT_CTL_EN_BMC2OS 0x10000000U /* Ena BMC2OS and OS2BMC + *traffic */ +#define TXGBE_PSR_MNG_FIT_CTL_EN_BMC2OS_SHIFT 28 + +#define TXGBE_PSR_MNG_FLEX_SEL 0x1582C +#define TXGBE_PSR_MNG_FLEX_DW_L(_i) (0x15A00 + ((_i) * 16)) +#define TXGBE_PSR_MNG_FLEX_DW_H(_i) (0x15A04 + ((_i) * 16)) +#define TXGBE_PSR_MNG_FLEX_MSK(_i) (0x15A08 + ((_i) * 16)) + +/* mirror */ +#define TXGBE_PSR_MR_CTL(_i) (0x15B00 + ((_i) * 4)) +#define TXGBE_PSR_MR_VLAN_L(_i) (0x15B10 + ((_i) * 8)) +#define TXGBE_PSR_MR_VLAN_H(_i) (0x15B14 + ((_i) * 8)) +#define TXGBE_PSR_MR_VM_L(_i) (0x15B30 + ((_i) * 8)) +#define TXGBE_PSR_MR_VM_H(_i) (0x15B34 + ((_i) * 8)) + +/* 1588 */ +#define TXGBE_PSR_1588_CTL 0x15188 /* Rx Time Sync Control register - RW */ +#define TXGBE_PSR_1588_STMPL 0x151E8 /* Rx timestamp Low - RO */ +#define TXGBE_PSR_1588_STMPH 0x151A4 /* Rx timestamp High - RO */ +#define TXGBE_PSR_1588_ATTRL 0x151A0 /* Rx timestamp attribute low - RO */ +#define TXGBE_PSR_1588_ATTRH 0x151A8 /* Rx timestamp attribute high - RO */ +#define TXGBE_PSR_1588_MSGTYPE 0x15120 /* RX message type register low - RW */ +/* 1588 CTL Bit */ +#define TXGBE_PSR_1588_CTL_VALID 0x00000001U /* Rx timestamp valid */ +#define TXGBE_PSR_1588_CTL_TYPE_MASK 0x0000000EU /* Rx type mask */ +#define TXGBE_PSR_1588_CTL_TYPE_L2_V2 0x00 +#define TXGBE_PSR_1588_CTL_TYPE_L4_V1 0x02 +#define TXGBE_PSR_1588_CTL_TYPE_L2_L4_V2 0x04 +#define TXGBE_PSR_1588_CTL_TYPE_EVENT_V2 0x0A +#define TXGBE_PSR_1588_CTL_ENABLED 0x00000010U /* Rx Timestamp enabled*/ +/* 1588 msg type bit */ +#define TXGBE_PSR_1588_MSGTYPE_V1_CTRLT_MASK 0x000000FFU +#define TXGBE_PSR_1588_MSGTYPE_V1_SYNC_MSG 0x00 +#define TXGBE_PSR_1588_MSGTYPE_V1_DELAY_REQ_MSG 0x01 +#define TXGBE_PSR_1588_MSGTYPE_V1_FOLLOWUP_MSG 0x02 +#define TXGBE_PSR_1588_MSGTYPE_V1_DELAY_RESP_MSG 0x03 +#define TXGBE_PSR_1588_MSGTYPE_V1_MGMT_MSG 0x04 +#define TXGBE_PSR_1588_MSGTYPE_V2_MSGID_MASK 0x0000FF00U +#define TXGBE_PSR_1588_MSGTYPE_V2_SYNC_MSG 0x0000 +#define TXGBE_PSR_1588_MSGTYPE_V2_DELAY_REQ_MSG 0x0100 +#define TXGBE_PSR_1588_MSGTYPE_V2_PDELAY_REQ_MSG 0x0200 +#define TXGBE_PSR_1588_MSGTYPE_V2_PDELAY_RESP_MSG 0x0300 +#define TXGBE_PSR_1588_MSGTYPE_V2_FOLLOWUP_MSG 0x0800 +#define TXGBE_PSR_1588_MSGTYPE_V2_DELAY_RESP_MSG 0x0900 +#define TXGBE_PSR_1588_MSGTYPE_V2_PDELAY_FOLLOWUP_MSG 0x0A00 +#define TXGBE_PSR_1588_MSGTYPE_V2_ANNOUNCE_MSG 0x0B00 +#define TXGBE_PSR_1588_MSGTYPE_V2_SIGNALLING_MSG 0x0C00 +#define TXGBE_PSR_1588_MSGTYPE_V2_MGMT_MSG 0x0D00 + +/* Wake up registers */ +#define TXGBE_PSR_WKUP_CTL 0x15B80 +#define TXGBE_PSR_WKUP_IPV 0x15B84 +#define TXGBE_PSR_LAN_FLEX_SEL 0x15B8C +#define TXGBE_PSR_WKUP_IP4TBL(_i) (0x15BC0 + ((_i) * 4)) +#define TXGBE_PSR_WKUP_IP6TBL(_i) (0x15BE0 + ((_i) * 4)) +#define TXGBE_PSR_LAN_FLEX_DW_L(_i) (0x15C00 + ((_i) * 16)) +#define TXGBE_PSR_LAN_FLEX_DW_H(_i) (0x15C04 + ((_i) * 16)) +#define TXGBE_PSR_LAN_FLEX_MSK(_i) (0x15C08 + ((_i) * 16)) +#define TXGBE_PSR_LAN_FLEX_CTL 0x15CFC +/* Wake Up Filter Control Bit */ +#define TXGBE_PSR_WKUP_CTL_LNKC 0x00000001U /* Link Status Change Wakeup Enable*/ +#define TXGBE_PSR_WKUP_CTL_MAG 0x00000002U /* Magic Packet Wakeup Enable */ +#define TXGBE_PSR_WKUP_CTL_EX 0x00000004U /* Directed Exact Wakeup Enable */ +#define TXGBE_PSR_WKUP_CTL_MC 0x00000008U /* Directed Multicast Wakeup Enable*/ +#define TXGBE_PSR_WKUP_CTL_BC 0x00000010U /* Broadcast Wakeup Enable */ +#define TXGBE_PSR_WKUP_CTL_ARP 0x00000020U /* ARP Request Packet Wakeup Enable*/ +#define TXGBE_PSR_WKUP_CTL_IPV4 0x00000040U /* Directed IPv4 Pkt Wakeup Enable */ +#define TXGBE_PSR_WKUP_CTL_IPV6 0x00000080U /* Directed IPv6 Pkt Wakeup Enable */ +#define TXGBE_PSR_WKUP_CTL_IGNORE_TCO 0x00008000U /* Ignore WakeOn TCO pkts */ +#define TXGBE_PSR_WKUP_CTL_FLX0 0x00010000U /* Flexible Filter 0 Ena */ +#define TXGBE_PSR_WKUP_CTL_FLX1 0x00020000U /* Flexible Filter 1 Ena */ +#define TXGBE_PSR_WKUP_CTL_FLX2 0x00040000U /* Flexible Filter 2 Ena */ +#define TXGBE_PSR_WKUP_CTL_FLX3 0x00080000U /* Flexible Filter 3 Ena */ +#define TXGBE_PSR_WKUP_CTL_FLX4 0x00100000U /* Flexible Filter 4 Ena */ +#define TXGBE_PSR_WKUP_CTL_FLX5 0x00200000U /* Flexible Filter 5 Ena */ +#define TXGBE_PSR_WKUP_CTL_FLX_FILTERS 0x000F0000U /* Mask for 4 flex filters */ +#define TXGBE_PSR_WKUP_CTL_FLX_FILTERS_6 0x003F0000U /* Mask for 6 flex filters*/ +#define TXGBE_PSR_WKUP_CTL_FLX_FILTERS_8 0x00FF0000U /* Mask for 8 flex filters*/ +#define TXGBE_PSR_WKUP_CTL_FW_RST_WK 0x80000000U /* Ena wake on FW reset + * assertion */ +/* Mask for Ext. flex filters */ +#define TXGBE_PSR_WKUP_CTL_EXT_FLX_FILTERS 0x00300000U +#define TXGBE_PSR_WKUP_CTL_ALL_FILTERS 0x000F00FFU /* Mask all 4 flex filters*/ +#define TXGBE_PSR_WKUP_CTL_ALL_FILTERS_6 0x003F00FFU /* Mask all 6 flex filters*/ +#define TXGBE_PSR_WKUP_CTL_ALL_FILTERS_8 0x00FF00FFU /* Mask all 8 flex filters*/ +#define TXGBE_PSR_WKUP_CTL_FLX_OFFSET 16 /* Offset to the Flex Filters bits*/ + +#define TXGBE_PSR_MAX_SZ 0x15020 + +/****************************** TDB ******************************************/ +#define TXGBE_TDB_RFCS 0x1CE00 +#define TXGBE_TDB_PB_SZ(_i) (0x1CC00 + ((_i) * 4)) /* 8 of these */ +#define TXGBE_TDB_MNG_TC 0x1CD10 +#define TXGBE_TDB_PRB_CTL 0x17010 +#define TXGBE_TDB_PBRARB_CTL 0x1CD00 +#define TXGBE_TDB_UP2TC 0x1C800 +#define TXGBE_TDB_PBRARB_CFG(_i) (0x1CD20 + ((_i) * 4)) /* 8 of (0-7) */ + +#define TXGBE_TDB_PB_SZ_20KB 0x00005000U /* 20KB Packet Buffer */ +#define TXGBE_TDB_PB_SZ_40KB 0x0000A000U /* 40KB Packet Buffer */ +#define TXGBE_TDB_PB_SZ_MAX 0x00028000U /* 160KB Packet Buffer */ +#define TXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */ +#define TXGBE_MAX_PB 8 + +/****************************** TSEC *****************************************/ +/* Security Control Registers */ +#define TXGBE_TSC_CTL 0x1D000 +#define TXGBE_TSC_ST 0x1D004 +#define TXGBE_TSC_BUF_AF 0x1D008 +#define TXGBE_TSC_BUF_AE 0x1D00C +#define TXGBE_TSC_PRB_CTL 0x1D010 +#define TXGBE_TSC_MIN_IFG 0x1D020 +/* Security Bit Fields and Masks */ +#define TXGBE_TSC_CTL_SECTX_DIS 0x00000001U +#define TXGBE_TSC_CTL_TX_DIS 0x00000002U +#define TXGBE_TSC_CTL_STORE_FORWARD 0x00000004U +#define TXGBE_TSC_CTL_IV_MSK_EN 0x00000008U +#define TXGBE_TSC_ST_SECTX_RDY 0x00000001U +#define TXGBE_TSC_ST_OFF_DIS 0x00000002U +#define TXGBE_TSC_ST_ECC_TXERR 0x00000004U + +/* LinkSec (MacSec) Registers */ +#define TXGBE_TSC_LSEC_CAP 0x1D200 +#define TXGBE_TSC_LSEC_CTL 0x1D204 +#define TXGBE_TSC_LSEC_SCI_L 0x1D208 +#define TXGBE_TSC_LSEC_SCI_H 0x1D20C +#define TXGBE_TSC_LSEC_SA 0x1D210 +#define TXGBE_TSC_LSEC_PKTNUM0 0x1D214 +#define TXGBE_TSC_LSEC_PKTNUM1 0x1D218 +#define TXGBE_TSC_LSEC_KEY0(_n) 0x1D21C +#define TXGBE_TSC_LSEC_KEY1(_n) 0x1D22C +#define TXGBE_TSC_LSEC_UNTAG_PKT 0x1D23C +#define TXGBE_TSC_LSEC_ENC_PKT 0x1D240 +#define TXGBE_TSC_LSEC_PROT_PKT 0x1D244 +#define TXGBE_TSC_LSEC_ENC_OCTET 0x1D248 +#define TXGBE_TSC_LSEC_PROT_OCTET 0x1D24C + +/* IpSec Registers */ +#define TXGBE_TSC_IPS_IDX 0x1D100 +#define TXGBE_TSC_IPS_IDX_WT 0x80000000U +#define TXGBE_TSC_IPS_IDX_RD 0x40000000U +#define TXGBE_TSC_IPS_IDX_SD_IDX 0x0U /* */ +#define TXGBE_TSC_IPS_IDX_EN 0x00000001U +#define TXGBE_TSC_IPS_SALT 0x1D104 +#define TXGBE_TSC_IPS_KEY(i) (0x1D108 + ((i) * 4)) + +/* 1588 */ +#define TXGBE_TSC_1588_CTL 0x1D400 /* Tx Time Sync Control reg */ +#define TXGBE_TSC_1588_STMPL 0x1D404 /* Tx timestamp value Low */ +#define TXGBE_TSC_1588_STMPH 0x1D408 /* Tx timestamp value High */ +#define TXGBE_TSC_1588_SYSTIML 0x1D40C /* System time register Low */ +#define TXGBE_TSC_1588_SYSTIMH 0x1D410 /* System time register High */ +#define TXGBE_TSC_1588_INC 0x1D414 /* Increment attributes reg */ +#define TXGBE_TSC_1588_INC_IV(v) (((v) & 0xFFFFFF)) +#define TXGBE_TSC_1588_INC_IP(v) (((v) & 0xFF) << 24) +#define TXGBE_TSC_1588_INC_IVP(v, p) \ + (((v) & 0xFFFFFF) | TXGBE_TSC_1588_INC_IP(p)) + +#define TXGBE_TSC_1588_ADJL 0x1D418 /* Time Adjustment Offset reg Low */ +#define TXGBE_TSC_1588_ADJH 0x1D41C /* Time Adjustment Offset reg High*/ +/* 1588 fields */ +#define TXGBE_TSC_1588_CTL_VALID 0x00000001U /* Tx timestamp valid */ +#define TXGBE_TSC_1588_CTL_ENABLED 0x00000010U /* Tx timestamping enabled */ + + +/********************************* RSEC **************************************/ +/* general rsec */ +#define TXGBE_RSC_CTL 0x17000 +#define TXGBE_RSC_ST 0x17004 +/* general rsec fields */ +#define TXGBE_RSC_CTL_SECRX_DIS 0x00000001U +#define TXGBE_RSC_CTL_RX_DIS 0x00000002U +#define TXGBE_RSC_CTL_CRC_STRIP 0x00000004U +#define TXGBE_RSC_CTL_IV_MSK_EN 0x00000008U +#define TXGBE_RSC_CTL_SAVE_MAC_ERR 0x00000040U +#define TXGBE_RSC_ST_RSEC_RDY 0x00000001U +#define TXGBE_RSC_ST_RSEC_OFLD_DIS 0x00000002U +#define TXGBE_RSC_ST_ECC_RXERR 0x00000004U + +/* link sec */ +#define TXGBE_RSC_LSEC_CAP 0x17200 +#define TXGBE_RSC_LSEC_CTL 0x17204 +#define TXGBE_RSC_LSEC_SCI_L 0x17208 +#define TXGBE_RSC_LSEC_SCI_H 0x1720C +#define TXGBE_RSC_LSEC_SA0 0x17210 +#define TXGBE_RSC_LSEC_SA1 0x17214 +#define TXGBE_RSC_LSEC_PKNUM0 0x17218 +#define TXGBE_RSC_LSEC_PKNUM1 0x1721C +#define TXGBE_RSC_LSEC_KEY0(_n) 0x17220 +#define TXGBE_RSC_LSEC_KEY1(_n) 0x17230 +#define TXGBE_RSC_LSEC_UNTAG_PKT 0x17240 +#define TXGBE_RSC_LSEC_DEC_OCTET 0x17244 +#define TXGBE_RSC_LSEC_VLD_OCTET 0x17248 +#define TXGBE_RSC_LSEC_BAD_PKT 0x1724C +#define TXGBE_RSC_LSEC_NOSCI_PKT 0x17250 +#define TXGBE_RSC_LSEC_UNSCI_PKT 0x17254 +#define TXGBE_RSC_LSEC_UNCHK_PKT 0x17258 +#define TXGBE_RSC_LSEC_DLY_PKT 0x1725C +#define TXGBE_RSC_LSEC_LATE_PKT 0x17260 +#define TXGBE_RSC_LSEC_OK_PKT(_n) 0x17264 +#define TXGBE_RSC_LSEC_INV_PKT(_n) 0x17274 +#define TXGBE_RSC_LSEC_BADSA_PKT 0x1727C +#define TXGBE_RSC_LSEC_INVSA_PKT 0x17280 + +/* ipsec */ +#define TXGBE_RSC_IPS_IDX 0x17100 +#define TXGBE_RSC_IPS_IDX_WT 0x80000000U +#define TXGBE_RSC_IPS_IDX_RD 0x40000000U +#define TXGBE_RSC_IPS_IDX_TB_IDX 0x0U /* */ +#define TXGBE_RSC_IPS_IDX_TB_IP 0x00000002U +#define TXGBE_RSC_IPS_IDX_TB_SPI 0x00000004U +#define TXGBE_RSC_IPS_IDX_TB_KEY 0x00000006U +#define TXGBE_RSC_IPS_IDX_EN 0x00000001U +#define TXGBE_RSC_IPS_IP(i) (0x17104 + ((i) * 4)) +#define TXGBE_RSC_IPS_SPI 0x17114 +#define TXGBE_RSC_IPS_IP_IDX 0x17118 +#define TXGBE_RSC_IPS_KEY(i) (0x1711C + ((i) * 4)) +#define TXGBE_RSC_IPS_SALT 0x1712C +#define TXGBE_RSC_IPS_MODE 0x17130 +#define TXGBE_RSC_IPS_MODE_IPV6 0x00000010 +#define TXGBE_RSC_IPS_MODE_DEC 0x00000008 +#define TXGBE_RSC_IPS_MODE_ESP 0x00000004 +#define TXGBE_RSC_IPS_MODE_AH 0x00000002 +#define TXGBE_RSC_IPS_MODE_VALID 0x00000001 + +/************************************** ETH PHY ******************************/ +#define TXGBE_XPCS_IDA_ADDR 0x13000 +#define TXGBE_XPCS_IDA_DATA 0x13004 +#define TXGBE_ETHPHY_IDA_ADDR 0x13008 +#define TXGBE_ETHPHY_IDA_DATA 0x1300C + +/************************************** MNG ********************************/ +#define TXGBE_MNG_FW_SM 0x1E000 +#define TXGBE_MNG_SW_SM 0x1E004 +#define TXGBE_MNG_SWFW_SYNC 0x1E008 +#define TXGBE_MNG_MBOX 0x1E100 +#define TXGBE_MNG_MBOX_CTL 0x1E044 +#define TXGBE_MNG_OS2BMC_CNT 0x1E094 +#define TXGBE_MNG_BMC2OS_CNT 0x1E090 + +/* Firmware Semaphore Register */ +#define TXGBE_MNG_FW_SM_MODE_MASK 0xE +#define TXGBE_MNG_FW_SM_TS_ENABLED 0x1 +/* SW Semaphore Register bitmasks */ +#define TXGBE_MNG_SW_SM_SM 0x00000001U /* software Semaphore */ + +/* SW_FW_SYNC definitions */ +#define TXGBE_MNG_SWFW_SYNC_SW_PHY 0x0001 +#define TXGBE_MNG_SWFW_SYNC_SW_FLASH 0x0008 +#define TXGBE_MNG_SWFW_SYNC_SW_MB 0x0004 + +#define TXGBE_MNG_MBOX_CTL_SWRDY 0x1 +#define TXGBE_MNG_MBOX_CTL_SWACK 0x2 +#define TXGBE_MNG_MBOX_CTL_FWRDY 0x4 +#define TXGBE_MNG_MBOX_CTL_FWACK 0x8 + +/************************************* ETH MAC *****************************/ +#define TXGBE_MAC_TX_CFG 0x11000 +#define TXGBE_MAC_RX_CFG 0x11004 +#define TXGBE_MAC_PKT_FLT 0x11008 +#define TXGBE_MAC_PKT_FLT_PR (0x1) /* promiscuous mode */ +#define TXGBE_MAC_PKT_FLT_RA (0x80000000) /* receive all */ +#define TXGBE_MAC_WDG_TIMEOUT 0x1100C +#define TXGBE_MAC_RX_FLOW_CTRL 0x11090 +#define TXGBE_MAC_ADDRESS0_HIGH 0x11300 +#define TXGBE_MAC_ADDRESS0_LOW 0x11304 + +#define TXGBE_MAC_TX_CFG_TE 0x00000001U +#define TXGBE_MAC_TX_CFG_SPEED_MASK 0x60000000U +#define TXGBE_MAC_TX_CFG_SPEED_10G 0x00000000U +#define TXGBE_MAC_TX_CFG_SPEED_1G 0x60000000U +#define TXGBE_MAC_RX_CFG_RE 0x00000001U +#define TXGBE_MAC_RX_CFG_JE 0x00000100U +#define TXGBE_MAC_RX_CFG_LM 0x00000400U +#define TXGBE_MAC_WDG_TIMEOUT_PWE 0x00000100U +#define TXGBE_MAC_WDG_TIMEOUT_WTO_MASK 0x0000000FU +#define TXGBE_MAC_WDG_TIMEOUT_WTO_DELTA 2 + +#define TXGBE_MAC_RX_FLOW_CTRL_RFE 0x00000001U /* receive fc enable */ +#define TXGBE_MAC_RX_FLOW_CTRL_PFCE 0x00000100U /* pfc enable */ + +#define TXGBE_MSCA 0x11200 +#define TXGBE_MSCA_RA(v) ((0xFFFF & (v))) +#define TXGBE_MSCA_PA(v) ((0x1F & (v)) << 16) +#define TXGBE_MSCA_DA(v) ((0x1F & (v)) << 21) +#define TXGBE_MSCC 0x11204 +#define TXGBE_MSCC_DATA(v) ((0xFFFF & (v))) +#define TXGBE_MSCC_CMD(v) ((0x3 & (v)) << 16) +enum TXGBE_MSCA_CMD_value { + TXGBE_MSCA_CMD_RSV = 0, + TXGBE_MSCA_CMD_WRITE, + TXGBE_MSCA_CMD_POST_READ, + TXGBE_MSCA_CMD_READ, +}; +#define TXGBE_MSCC_SADDR ((0x1U) << 18) +#define TXGBE_MSCC_CR(v) ((0x8U & (v)) << 19) +#define TXGBE_MSCC_BUSY ((0x1U) << 22) + +/* EEE registers */ + +/* statistic */ +#define TXGBE_MAC_LXONRXC 0x11E0C +#define TXGBE_MAC_LXOFFRXC 0x11988 +#define TXGBE_MAC_PXONRXC(_i) (0x11E30 + ((_i) * 4)) /* 8 of these */ +#define TXGBE_MAC_PXOFFRXC 0x119DC +#define TXGBE_RX_BC_FRAMES_GOOD_LOW 0x11918 +#define TXGBE_RX_CRC_ERROR_FRAMES_LOW 0x11928 +#define TXGBE_RX_LEN_ERROR_FRAMES_LOW 0x11978 +#define TXGBE_RX_UNDERSIZE_FRAMES_GOOD 0x11938 +#define TXGBE_RX_OVERSIZE_FRAMES_GOOD 0x1193C +#define TXGBE_RX_FRAME_CNT_GOOD_BAD_LOW 0x11900 +#define TXGBE_TX_FRAME_CNT_GOOD_BAD_LOW 0x1181C +#define TXGBE_TX_MC_FRAMES_GOOD_LOW 0x1182C +#define TXGBE_TX_BC_FRAMES_GOOD_LOW 0x11824 +#define TXGBE_MMC_CONTROL 0x11800 +#define TXGBE_MMC_CONTROL_RSTONRD 0x4 /* reset on read */ +#define TXGBE_MMC_CONTROL_UP 0x700 + + +/********************************* BAR registers ***************************/ +/* Interrupt Registers */ +#define TXGBE_BME_CTL 0x12020 +#define TXGBE_PX_MISC_IC 0x100 +#define TXGBE_PX_MISC_ICS 0x104 +#define TXGBE_PX_MISC_IEN 0x108 +#define TXGBE_PX_MISC_IVAR 0x4FC +#define TXGBE_PX_GPIE 0x118 +#define TXGBE_PX_ISB_ADDR_L 0x160 +#define TXGBE_PX_ISB_ADDR_H 0x164 +#define TXGBE_PX_TCP_TIMER 0x170 +#define TXGBE_PX_ITRSEL 0x180 +#define TXGBE_PX_IC(_i) (0x120 + (_i) * 4) +#define TXGBE_PX_ICS(_i) (0x130 + (_i) * 4) +#define TXGBE_PX_IMS(_i) (0x140 + (_i) * 4) +#define TXGBE_PX_IMC(_i) (0x150 + (_i) * 4) +#define TXGBE_PX_IVAR(_i) (0x500 + (_i) * 4) +#define TXGBE_PX_ITR(_i) (0x200 + (_i) * 4) +#define TXGBE_PX_TRANSACTION_PENDING 0x168 +#define TXGBE_PX_INTA 0x110 + +/* Interrupt register bitmasks */ +/* Extended Interrupt Cause Read */ +#define TXGBE_PX_MISC_IC_ETH_LKDN 0x00000100U /* eth link down */ +#define TXGBE_PX_MISC_IC_DEV_RST 0x00000400U /* device reset event */ +#define TXGBE_PX_MISC_IC_TIMESYNC 0x00000800U /* time sync */ +#define TXGBE_PX_MISC_IC_STALL 0x00001000U /* trans or recv path is + * stalled */ +#define TXGBE_PX_MISC_IC_LINKSEC 0x00002000U /* Tx LinkSec require key + * exchange */ +#define TXGBE_PX_MISC_IC_RX_MISS 0x00004000U /* Packet Buffer Overrun */ +#define TXGBE_PX_MISC_IC_FLOW_DIR 0x00008000U /* FDir Exception */ +#define TXGBE_PX_MISC_IC_I2C 0x00010000U /* I2C interrupt */ +#define TXGBE_PX_MISC_IC_ETH_EVENT 0x00020000U /* err reported by MAC except + * eth link down */ +#define TXGBE_PX_MISC_IC_ETH_LK 0x00040000U /* link up */ +#define TXGBE_PX_MISC_IC_ETH_AN 0x00080000U /* link auto-nego done */ +#define TXGBE_PX_MISC_IC_INT_ERR 0x00100000U /* integrity error */ +#define TXGBE_PX_MISC_IC_SPI 0x00200000U /* SPI interface */ +#define TXGBE_PX_MISC_IC_VF_MBOX 0x00800000U /* VF-PF message box */ +#define TXGBE_PX_MISC_IC_GPIO 0x04000000U /* GPIO interrupt */ +#define TXGBE_PX_MISC_IC_PCIE_REQ_ERR 0x08000000U /* pcie request error int */ +#define TXGBE_PX_MISC_IC_OVER_HEAT 0x10000000U /* overheat detection */ +#define TXGBE_PX_MISC_IC_PROBE_MATCH 0x20000000U /* probe match */ +#define TXGBE_PX_MISC_IC_MNG_HOST_MBOX 0x40000000U /* mng mailbox */ +#define TXGBE_PX_MISC_IC_TIMER 0x80000000U /* tcp timer */ + +/* Extended Interrupt Cause Set */ +#define TXGBE_PX_MISC_ICS_ETH_LKDN 0x00000100U +#define TXGBE_PX_MISC_ICS_DEV_RST 0x00000400U +#define TXGBE_PX_MISC_ICS_TIMESYNC 0x00000800U +#define TXGBE_PX_MISC_ICS_STALL 0x00001000U +#define TXGBE_PX_MISC_ICS_LINKSEC 0x00002000U +#define TXGBE_PX_MISC_ICS_RX_MISS 0x00004000U +#define TXGBE_PX_MISC_ICS_FLOW_DIR 0x00008000U +#define TXGBE_PX_MISC_ICS_I2C 0x00010000U +#define TXGBE_PX_MISC_ICS_ETH_EVENT 0x00020000U +#define TXGBE_PX_MISC_ICS_ETH_LK 0x00040000U +#define TXGBE_PX_MISC_ICS_ETH_AN 0x00080000U +#define TXGBE_PX_MISC_ICS_INT_ERR 0x00100000U +#define TXGBE_PX_MISC_ICS_SPI 0x00200000U +#define TXGBE_PX_MISC_ICS_VF_MBOX 0x00800000U +#define TXGBE_PX_MISC_ICS_GPIO 0x04000000U +#define TXGBE_PX_MISC_ICS_PCIE_REQ_ERR 0x08000000U +#define TXGBE_PX_MISC_ICS_OVER_HEAT 0x10000000U +#define TXGBE_PX_MISC_ICS_PROBE_MATCH 0x20000000U +#define TXGBE_PX_MISC_ICS_MNG_HOST_MBOX 0x40000000U +#define TXGBE_PX_MISC_ICS_TIMER 0x80000000U + +/* Extended Interrupt Enable Set */ +#define TXGBE_PX_MISC_IEN_ETH_LKDN 0x00000100U +#define TXGBE_PX_MISC_IEN_DEV_RST 0x00000400U +#define TXGBE_PX_MISC_IEN_TIMESYNC 0x00000800U +#define TXGBE_PX_MISC_IEN_STALL 0x00001000U +#define TXGBE_PX_MISC_IEN_LINKSEC 0x00002000U +#define TXGBE_PX_MISC_IEN_RX_MISS 0x00004000U +#define TXGBE_PX_MISC_IEN_FLOW_DIR 0x00008000U +#define TXGBE_PX_MISC_IEN_I2C 0x00010000U +#define TXGBE_PX_MISC_IEN_ETH_EVENT 0x00020000U +#define TXGBE_PX_MISC_IEN_ETH_LK 0x00040000U +#define TXGBE_PX_MISC_IEN_ETH_AN 0x00080000U +#define TXGBE_PX_MISC_IEN_INT_ERR 0x00100000U +#define TXGBE_PX_MISC_IEN_SPI 0x00200000U +#define TXGBE_PX_MISC_IEN_VF_MBOX 0x00800000U +#define TXGBE_PX_MISC_IEN_GPIO 0x04000000U +#define TXGBE_PX_MISC_IEN_PCIE_REQ_ERR 0x08000000U +#define TXGBE_PX_MISC_IEN_OVER_HEAT 0x10000000U +#define TXGBE_PX_MISC_IEN_PROBE_MATCH 0x20000000U +#define TXGBE_PX_MISC_IEN_MNG_HOST_MBOX 0x40000000U +#define TXGBE_PX_MISC_IEN_TIMER 0x80000000U + +#define TXGBE_PX_MISC_IEN_MASK ( \ + TXGBE_PX_MISC_IEN_ETH_LKDN| \ + TXGBE_PX_MISC_IEN_DEV_RST | \ + TXGBE_PX_MISC_IEN_ETH_EVENT | \ + TXGBE_PX_MISC_IEN_ETH_LK | \ + TXGBE_PX_MISC_IEN_ETH_AN | \ + TXGBE_PX_MISC_IEN_INT_ERR | \ + TXGBE_PX_MISC_IEN_VF_MBOX | \ + TXGBE_PX_MISC_IEN_GPIO | \ + TXGBE_PX_MISC_IEN_MNG_HOST_MBOX | \ + TXGBE_PX_MISC_IEN_STALL | \ + TXGBE_PX_MISC_IEN_PCIE_REQ_ERR | \ + TXGBE_PX_MISC_IEN_TIMER) + +/* General purpose Interrupt Enable */ +#define TXGBE_PX_GPIE_MODEL 0x00000001U +#define TXGBE_PX_GPIE_IMEN 0x00000002U +#define TXGBE_PX_GPIE_LL_INTERVAL 0x000000F0U +#define TXGBE_PX_GPIE_RSC_DELAY 0x00000700U + +/* Interrupt Vector Allocation Registers */ +#define TXGBE_PX_IVAR_REG_NUM 64 +#define TXGBE_PX_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ + +#define TXGBE_MAX_INT_RATE 500000 +#define TXGBE_MIN_INT_RATE 980 +#define TXGBE_MAX_EITR 0x00000FF8U +#define TXGBE_MIN_EITR 8 +#define TXGBE_PX_ITR_ITR_INT_MASK 0x00000FF8U +#define TXGBE_PX_ITR_LLI_CREDIT 0x001f0000U +#define TXGBE_PX_ITR_LLI_MOD 0x00008000U +#define TXGBE_PX_ITR_CNT_WDIS 0x80000000U +#define TXGBE_PX_ITR_ITR_CNT 0x0FE00000U + +/* transmit DMA Registers */ +#define TXGBE_PX_TR_BAL(_i) (0x03000 + ((_i) * 0x40)) +#define TXGBE_PX_TR_BAH(_i) (0x03004 + ((_i) * 0x40)) +#define TXGBE_PX_TR_WP(_i) (0x03008 + ((_i) * 0x40)) +#define TXGBE_PX_TR_RP(_i) (0x0300C + ((_i) * 0x40)) +#define TXGBE_PX_TR_CFG(_i) (0x03010 + ((_i) * 0x40)) +/* Transmit Config masks */ +#define TXGBE_PX_TR_CFG_ENABLE (1) /* Ena specific Tx Queue */ +#define TXGBE_PX_TR_CFG_TR_SIZE_SHIFT 1 /* tx desc number per ring */ +#define TXGBE_PX_TR_CFG_SWFLSH (1 << 26) /* Tx Desc. wr-bk flushing */ +#define TXGBE_PX_TR_CFG_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ +#define TXGBE_PX_TR_CFG_THRE_SHIFT 8 + + +#define TXGBE_PX_TR_RPn(q_per_pool, vf_number, vf_q_index) \ + (TXGBE_PX_TR_RP((q_per_pool)*(vf_number) + (vf_q_index))) +#define TXGBE_PX_TR_WPn(q_per_pool, vf_number, vf_q_index) \ + (TXGBE_PX_TR_WP((q_per_pool)*(vf_number) + (vf_q_index))) + +/* Receive DMA Registers */ +#define TXGBE_PX_RR_BAL(_i) (0x01000 + ((_i) * 0x40)) +#define TXGBE_PX_RR_BAH(_i) (0x01004 + ((_i) * 0x40)) +#define TXGBE_PX_RR_WP(_i) (0x01008 + ((_i) * 0x40)) +#define TXGBE_PX_RR_RP(_i) (0x0100C + ((_i) * 0x40)) +#define TXGBE_PX_RR_CFG(_i) (0x01010 + ((_i) * 0x40)) +/* PX_RR_CFG bit definitions */ +#define TXGBE_PX_RR_CFG_RR_SIZE_SHIFT 1 +#define TXGBE_PX_RR_CFG_BSIZEPKT_SHIFT 2 /* so many KBs */ +#define TXGBE_PX_RR_CFG_BSIZEHDRSIZE_SHIFT 6 /* 64byte resolution (>> 6) + * + at bit 8 offset (<< 12) + * = (<< 6) + */ +#define TXGBE_PX_RR_CFG_DROP_EN 0x40000000U +#define TXGBE_PX_RR_CFG_VLAN 0x80000000U +#define TXGBE_PX_RR_CFG_RSC 0x20000000U +#define TXGBE_PX_RR_CFG_CNTAG 0x10000000U +#define TXGBE_PX_RR_CFG_RSC_CNT_MD 0x08000000U +#define TXGBE_PX_RR_CFG_SPLIT_MODE 0x04000000U +#define TXGBE_PX_RR_CFG_STALL 0x02000000U +#define TXGBE_PX_RR_CFG_MAX_RSCBUF_1 0x00000000U +#define TXGBE_PX_RR_CFG_MAX_RSCBUF_4 0x00800000U +#define TXGBE_PX_RR_CFG_MAX_RSCBUF_8 0x01000000U +#define TXGBE_PX_RR_CFG_MAX_RSCBUF_16 0x01800000U +#define TXGBE_PX_RR_CFG_RR_THER 0x00070000U +#define TXGBE_PX_RR_CFG_RR_THER_SHIFT 16 + +#define TXGBE_PX_RR_CFG_RR_HDR_SZ 0x0000F000U +#define TXGBE_PX_RR_CFG_RR_BUF_SZ 0x00000F00U +#define TXGBE_PX_RR_CFG_RR_SZ 0x0000007EU +#define TXGBE_PX_RR_CFG_RR_EN 0x00000001U + +/* statistic */ +#define TXGBE_PX_MPRC(_i) (0x1020 + ((_i) * 64)) +#define TXGBE_VX_GPRC(_i) (0x01014 + (0x40 * (_i))) +#define TXGBE_VX_GPTC(_i) (0x03014 + (0x40 * (_i))) +#define TXGBE_VX_GORC_LSB(_i) (0x01018 + (0x40 * (_i))) +#define TXGBE_VX_GORC_MSB(_i) (0x0101C + (0x40 * (_i))) +#define TXGBE_VX_GOTC_LSB(_i) (0x03018 + (0x40 * (_i))) +#define TXGBE_VX_GOTC_MSB(_i) (0x0301C + (0x40 * (_i))) +#define TXGBE_VX_MPRC(_i) (0x01020 + (0x40 * (_i))) + +#define TXGBE_PX_GPRC 0x12504 +#define TXGBE_PX_GPTC 0x18308 + +#define TXGBE_PX_GORC_LSB 0x12508 +#define TXGBE_PX_GORC_MSB 0x1250C + +#define TXGBE_PX_GOTC_LSB 0x1830C +#define TXGBE_PX_GOTC_MSB 0x18310 + +/************************************* Stats registers ************************/ +#define TXGBE_FCCRC 0x15160 /* Num of Good Eth CRC w/ Bad FC CRC */ +#define TXGBE_FCOERPDC 0x12514 /* FCoE Rx Packets Dropped Count */ +#define TXGBE_FCLAST 0x12518 /* FCoE Last Error Count */ +#define TXGBE_FCOEPRC 0x15164 /* Number of FCoE Packets Received */ +#define TXGBE_FCOEDWRC 0x15168 /* Number of FCoE DWords Received */ +#define TXGBE_FCOEPTC 0x18318 /* Number of FCoE Packets Transmitted */ +#define TXGBE_FCOEDWTC 0x1831C /* Number of FCoE DWords Transmitted */ + +/*************************** Flash region definition *************************/ +/* EEC Register */ +#define TXGBE_EEC_SK 0x00000001U /* EEPROM Clock */ +#define TXGBE_EEC_CS 0x00000002U /* EEPROM Chip Select */ +#define TXGBE_EEC_DI 0x00000004U /* EEPROM Data In */ +#define TXGBE_EEC_DO 0x00000008U /* EEPROM Data Out */ +#define TXGBE_EEC_FWE_MASK 0x00000030U /* FLASH Write Enable */ +#define TXGBE_EEC_FWE_DIS 0x00000010U /* Disable FLASH writes */ +#define TXGBE_EEC_FWE_EN 0x00000020U /* Enable FLASH writes */ +#define TXGBE_EEC_FWE_SHIFT 4 +#define TXGBE_EEC_REQ 0x00000040U /* EEPROM Access Request */ +#define TXGBE_EEC_GNT 0x00000080U /* EEPROM Access Grant */ +#define TXGBE_EEC_PRES 0x00000100U /* EEPROM Present */ +#define TXGBE_EEC_ARD 0x00000200U /* EEPROM Auto Read Done */ +#define TXGBE_EEC_FLUP 0x00800000U /* Flash update command */ +#define TXGBE_EEC_SEC1VAL 0x02000000U /* Sector 1 Valid */ +#define TXGBE_EEC_FLUDONE 0x04000000U /* Flash update done */ +/* EEPROM Addressing bits based on type (0-small, 1-large) */ +#define TXGBE_EEC_ADDR_SIZE 0x00000400U +#define TXGBE_EEC_SIZE 0x00007800U /* EEPROM Size */ +#define TXGBE_EERD_MAX_ADDR 0x00003FFFU /* EERD alows 14 bits for addr. */ + +#define TXGBE_EEC_SIZE_SHIFT 11 +#define TXGBE_EEPROM_WORD_SIZE_SHIFT 6 +#define TXGBE_EEPROM_OPCODE_BITS 8 + +/* FLA Register */ +#define TXGBE_FLA_LOCKED 0x00000040U + +/* Part Number String Length */ +#define TXGBE_PBANUM_LENGTH 32 + +/* Checksum and EEPROM pointers */ +#define TXGBE_PBANUM_PTR_GUARD 0xFAFA +#define TXGBE_EEPROM_CHECKSUM 0x2F +#define TXGBE_EEPROM_SUM 0xBABA +#define TXGBE_ATLAS0_CONFIG_PTR 0x04 +#define TXGBE_PHY_PTR 0x04 +#define TXGBE_ATLAS1_CONFIG_PTR 0x05 +#define TXGBE_OPTION_ROM_PTR 0x05 +#define TXGBE_PCIE_GENERAL_PTR 0x06 +#define TXGBE_PCIE_CONFIG0_PTR 0x07 +#define TXGBE_PCIE_CONFIG1_PTR 0x08 +#define TXGBE_CORE0_PTR 0x09 +#define TXGBE_CORE1_PTR 0x0A +#define TXGBE_MAC0_PTR 0x0B +#define TXGBE_MAC1_PTR 0x0C +#define TXGBE_CSR0_CONFIG_PTR 0x0D +#define TXGBE_CSR1_CONFIG_PTR 0x0E +#define TXGBE_PCIE_ANALOG_PTR 0x02 +#define TXGBE_SHADOW_RAM_SIZE 0x4000 +#define TXGBE_TXGBE_PCIE_GENERAL_SIZE 0x24 +#define TXGBE_PCIE_CONFIG_SIZE 0x08 +#define TXGBE_EEPROM_LAST_WORD 0x800 +#define TXGBE_FW_PTR 0x0F +#define TXGBE_PBANUM0_PTR 0x05 +#define TXGBE_PBANUM1_PTR 0x06 +#define TXGBE_ALT_MAC_ADDR_PTR 0x37 +#define TXGBE_FREE_SPACE_PTR 0x3E +#define TXGBE_SW_REGION_PTR 0x1C + +#define TXGBE_SAN_MAC_ADDR_PTR 0x18 +#define TXGBE_DEVICE_CAPS 0x1C +#define TXGBE_EEPROM_VERSION_L 0x1D +#define TXGBE_EEPROM_VERSION_H 0x1E +#define TXGBE_ISCSI_BOOT_CONFIG 0x07 + +#define TXGBE_SERIAL_NUMBER_MAC_ADDR 0x11 +#define TXGBE_MAX_MSIX_VECTORS_SAPPHIRE 0x40 + +/* MSI-X capability fields masks */ +#define TXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF + +/* Legacy EEPROM word offsets */ +#define TXGBE_ISCSI_BOOT_CAPS 0x0033 +#define TXGBE_ISCSI_SETUP_PORT_0 0x0030 +#define TXGBE_ISCSI_SETUP_PORT_1 0x0034 + +/* EEPROM Commands - SPI */ +#define TXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */ +#define TXGBE_EEPROM_STATUS_RDY_SPI 0x01 +#define TXGBE_EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ +#define TXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ +#define TXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */ +#define TXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */ +/* EEPROM reset Write Enable latch */ +#define TXGBE_EEPROM_WRDI_OPCODE_SPI 0x04 +#define TXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */ +#define TXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */ +#define TXGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ +#define TXGBE_EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ +#define TXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ + +/* EEPROM Read Register */ +#define TXGBE_EEPROM_RW_REG_DATA 16 /* data offset in EEPROM read reg */ +#define TXGBE_EEPROM_RW_REG_DONE 2 /* Offset to READ done bit */ +#define TXGBE_EEPROM_RW_REG_START 1 /* First bit to start operation */ +#define TXGBE_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define TXGBE_NVM_POLL_WRITE 1 /* Flag for polling for wr complete */ +#define TXGBE_NVM_POLL_READ 0 /* Flag for polling for rd complete */ + +#define NVM_INIT_CTRL_3 0x38 +#define NVM_INIT_CTRL_3_LPLU 0x8 +#define NVM_INIT_CTRL_3_D10GMP_PORT0 0x40 +#define NVM_INIT_CTRL_3_D10GMP_PORT1 0x100 + +#define TXGBE_ETH_LENGTH_OF_ADDRESS 6 + +#define TXGBE_EEPROM_PAGE_SIZE_MAX 128 +#define TXGBE_EEPROM_RD_BUFFER_MAX_COUNT 256 /* words rd in burst */ +#define TXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* words wr in burst */ +#define TXGBE_EEPROM_CTRL_2 1 /* EEPROM CTRL word 2 */ +#define TXGBE_EEPROM_CCD_BIT 2 + +#ifndef TXGBE_EEPROM_GRANT_ATTEMPTS +#define TXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM attempts to gain grant */ +#endif + +#ifndef TXGBE_EERD_EEWR_ATTEMPTS +/* Number of 5 microseconds we wait for EERD read and + * EERW write to complete */ +#define TXGBE_EERD_EEWR_ATTEMPTS 100000 +#endif + +#ifndef TXGBE_FLUDONE_ATTEMPTS +/* # attempts we wait for flush update to complete */ +#define TXGBE_FLUDONE_ATTEMPTS 20000 +#endif + +#define TXGBE_PCIE_CTRL2 0x5 /* PCIe Control 2 Offset */ +#define TXGBE_PCIE_CTRL2_DUMMY_ENABLE 0x8 /* Dummy Function Enable */ +#define TXGBE_PCIE_CTRL2_LAN_DISABLE 0x2 /* LAN PCI Disable */ +#define TXGBE_PCIE_CTRL2_DISABLE_SELECT 0x1 /* LAN Disable Select */ + +#define TXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0 +#define TXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3 +#define TXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1 +#define TXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2 +#define TXGBE_FW_LESM_PARAMETERS_PTR 0x2 +#define TXGBE_FW_LESM_STATE_1 0x1 +#define TXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */ +#define TXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 +#define TXGBE_FW_PATCH_VERSION_4 0x7 +#define TXGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */ +#define TXGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */ +#define TXGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */ +#define TXGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */ +#define TXGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */ +#define TXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x17 /* Alt. SAN MAC block */ +#define TXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt SAN MAC capability */ +#define TXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt SAN MAC 0 offset */ +#define TXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt SAN MAC 1 offset */ +#define TXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt WWNN prefix offset */ +#define TXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt WWPN prefix offset */ +#define TXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt SAN MAC exists */ +#define TXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt WWN base exists */ +#define TXGBE_DEVICE_CAPS_WOL_PORT0_1 0x4 /* WoL supported on ports 0 & 1 */ +#define TXGBE_DEVICE_CAPS_WOL_PORT0 0x8 /* WoL supported on port 0 */ +#define TXGBE_DEVICE_CAPS_WOL_MASK 0xC /* Mask for WoL capabilities */ + +/******************************** PCI Bus Info *******************************/ +#define TXGBE_PCI_DEVICE_STATUS 0xAA +#define TXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020 +#define TXGBE_PCI_LINK_STATUS 0xB2 +#define TXGBE_PCI_DEVICE_CONTROL2 0xC8 +#define TXGBE_PCI_LINK_WIDTH 0x3F0 +#define TXGBE_PCI_LINK_WIDTH_1 0x10 +#define TXGBE_PCI_LINK_WIDTH_2 0x20 +#define TXGBE_PCI_LINK_WIDTH_4 0x40 +#define TXGBE_PCI_LINK_WIDTH_8 0x80 +#define TXGBE_PCI_LINK_SPEED 0xF +#define TXGBE_PCI_LINK_SPEED_2500 0x1 +#define TXGBE_PCI_LINK_SPEED_5000 0x2 +#define TXGBE_PCI_LINK_SPEED_8000 0x3 +#define TXGBE_PCI_HEADER_TYPE_REGISTER 0x0E +#define TXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 +#define TXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005 + +#define TXGBE_PCIDEVCTRL2_RELAX_ORDER_OFFSET 4 +#define TXGBE_PCIDEVCTRL2_RELAX_ORDER_MASK \ + (0x0001 << TXGBE_PCIDEVCTRL2_RELAX_ORDER_OFFSET) +#define TXGBE_PCIDEVCTRL2_RELAX_ORDER_ENABLE \ + (0x01 << TXGBE_PCIDEVCTRL2_RELAX_ORDER_OFFSET) + +#define TXGBE_PCIDEVCTRL2_TIMEO_MASK 0xf +#define TXGBE_PCIDEVCTRL2_16_32ms_def 0x0 +#define TXGBE_PCIDEVCTRL2_50_100us 0x1 +#define TXGBE_PCIDEVCTRL2_1_2ms 0x2 +#define TXGBE_PCIDEVCTRL2_16_32ms 0x5 +#define TXGBE_PCIDEVCTRL2_65_130ms 0x6 +#define TXGBE_PCIDEVCTRL2_260_520ms 0x9 +#define TXGBE_PCIDEVCTRL2_1_2s 0xa +#define TXGBE_PCIDEVCTRL2_4_8s 0xd +#define TXGBE_PCIDEVCTRL2_17_34s 0xe + + +/******************* Receive Descriptor bit definitions **********************/ +#define TXGBE_RXD_IPSEC_STATUS_SECP 0x00020000U +#define TXGBE_RXD_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000U +#define TXGBE_RXD_IPSEC_ERROR_INVALID_LENGTH 0x10000000U +#define TXGBE_RXD_IPSEC_ERROR_AUTH_FAILED 0x18000000U +#define TXGBE_RXD_IPSEC_ERROR_BIT_MASK 0x18000000U + +#define TXGBE_RXD_NEXTP_MASK 0x000FFFF0U /* Next Descriptor Index */ +#define TXGBE_RXD_NEXTP_SHIFT 0x00000004U +#define TXGBE_RXD_STAT_MASK 0x000fffffU /* Stat/NEXTP: bit 0-19 */ +#define TXGBE_RXD_STAT_DD 0x00000001U /* Done */ +#define TXGBE_RXD_STAT_EOP 0x00000002U /* End of Packet */ +#define TXGBE_RXD_STAT_CLASS_ID_MASK 0x0000001CU +#define TXGBE_RXD_STAT_CLASS_ID_TC_RSS 0x00000000U +#define TXGBE_RXD_STAT_CLASS_ID_FLM 0x00000004U /* FDir Match */ +#define TXGBE_RXD_STAT_CLASS_ID_SYN 0x00000008U +#define TXGBE_RXD_STAT_CLASS_ID_5_TUPLE 0x0000000CU +#define TXGBE_RXD_STAT_CLASS_ID_L2_ETYPE 0x00000010U +#define TXGBE_RXD_STAT_VP 0x00000020U /* IEEE VLAN Pkt */ +#define TXGBE_RXD_STAT_UDPCS 0x00000040U /* UDP xsum calculated */ +#define TXGBE_RXD_STAT_L4CS 0x00000080U /* L4 xsum calculated */ +#define TXGBE_RXD_STAT_IPCS 0x00000100U /* IP xsum calculated */ +#define TXGBE_RXD_STAT_PIF 0x00000200U /* passed in-exact filter */ +#define TXGBE_RXD_STAT_OUTERIPCS 0x00000400U /* Cloud IP xsum calculated*/ +#define TXGBE_RXD_STAT_VEXT 0x00000800U /* 1st VLAN found */ +#define TXGBE_RXD_STAT_LLINT 0x00002000U /* Pkt caused Low Latency + * Int */ +#define TXGBE_RXD_STAT_TS 0x00004000U /* IEEE1588 Time Stamp */ +#define TXGBE_RXD_STAT_SECP 0x00008000U /* Security Processing */ +#define TXGBE_RXD_STAT_LB 0x00010000U /* Loopback Status */ +#define TXGBE_RXD_STAT_FCEOFS 0x00020000U /* FCoE EOF/SOF Stat */ +#define TXGBE_RXD_STAT_FCSTAT 0x000C0000U /* FCoE Pkt Stat */ +#define TXGBE_RXD_STAT_FCSTAT_NOMTCH 0x00000000U /* 00: No Ctxt Match */ +#define TXGBE_RXD_STAT_FCSTAT_NODDP 0x00040000U /* 01: Ctxt w/o DDP */ +#define TXGBE_RXD_STAT_FCSTAT_FCPRSP 0x00080000U /* 10: Recv. FCP_RSP */ +#define TXGBE_RXD_STAT_FCSTAT_DDP 0x000C0000U /* 11: Ctxt w/ DDP */ + +#define TXGBE_RXD_ERR_MASK 0xfff00000U /* RDESC.ERRORS mask */ +#define TXGBE_RXD_ERR_SHIFT 20 /* RDESC.ERRORS shift */ +#define TXGBE_RXD_ERR_FCEOFE 0x80000000U /* FCEOFe/IPE */ +#define TXGBE_RXD_ERR_FCERR 0x00700000U /* FCERR/FDIRERR */ +#define TXGBE_RXD_ERR_FDIR_LEN 0x00100000U /* FDIR Length error */ +#define TXGBE_RXD_ERR_FDIR_DROP 0x00200000U /* FDIR Drop error */ +#define TXGBE_RXD_ERR_FDIR_COLL 0x00400000U /* FDIR Collision error */ +#define TXGBE_RXD_ERR_HBO 0x00800000U /*Header Buffer Overflow */ +#define TXGBE_RXD_ERR_OUTERIPER 0x04000000U /* CRC IP Header error */ +#define TXGBE_RXD_ERR_SECERR_MASK 0x18000000U +#define TXGBE_RXD_ERR_RXE 0x20000000U /* Any MAC Error */ +#define TXGBE_RXD_ERR_TCPE 0x40000000U /* TCP/UDP Checksum Error */ +#define TXGBE_RXD_ERR_IPE 0x80000000U /* IP Checksum Error */ + +#define TXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000U +#define TXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FFU + +#define TXGBE_RXD_RSSTYPE_MASK 0x0000000FU +#define TXGBE_RXD_TPID_MASK 0x000001C0U +#define TXGBE_RXD_TPID_SHIFT 6 +#define TXGBE_RXD_HDRBUFLEN_MASK 0x00007FE0U +#define TXGBE_RXD_RSCCNT_MASK 0x001E0000U +#define TXGBE_RXD_RSCCNT_SHIFT 17 +#define TXGBE_RXD_HDRBUFLEN_SHIFT 5 +#define TXGBE_RXD_SPLITHEADER_EN 0x00001000U +#define TXGBE_RXD_SPH 0x8000 + +/* RSS Hash results */ +#define TXGBE_RXD_RSSTYPE_NONE 0x00000000U +#define TXGBE_RXD_RSSTYPE_IPV4_TCP 0x00000001U +#define TXGBE_RXD_RSSTYPE_IPV4 0x00000002U +#define TXGBE_RXD_RSSTYPE_IPV6_TCP 0x00000003U +#define TXGBE_RXD_RSSTYPE_IPV4_SCTP 0x00000004U +#define TXGBE_RXD_RSSTYPE_IPV6 0x00000005U +#define TXGBE_RXD_RSSTYPE_IPV6_SCTP 0x00000006U +#define TXGBE_RXD_RSSTYPE_IPV4_UDP 0x00000007U +#define TXGBE_RXD_RSSTYPE_IPV6_UDP 0x00000008U + +/** + * receive packet type + * PTYPE:8 = TUN:2 + PKT:2 + TYP:4 + **/ +/* TUN */ +#define TXGBE_PTYPE_TUN_IPV4 (0x80) +#define TXGBE_PTYPE_TUN_IPV6 (0xC0) + +/* PKT for TUN */ +#define TXGBE_PTYPE_PKT_IPIP (0x00) /* IP+IP */ +#define TXGBE_PTYPE_PKT_IG (0x10) /* IP+GRE */ +#define TXGBE_PTYPE_PKT_IGM (0x20) /* IP+GRE+MAC */ +#define TXGBE_PTYPE_PKT_IGMV (0x30) /* IP+GRE+MAC+VLAN */ +/* PKT for !TUN */ +#define TXGBE_PTYPE_PKT_MAC (0x10) +#define TXGBE_PTYPE_PKT_IP (0x20) +#define TXGBE_PTYPE_PKT_FCOE (0x30) + +/* TYP for PKT=mac */ +#define TXGBE_PTYPE_TYP_MAC (0x01) +#define TXGBE_PTYPE_TYP_TS (0x02) /* time sync */ +#define TXGBE_PTYPE_TYP_FIP (0x03) +#define TXGBE_PTYPE_TYP_LLDP (0x04) +#define TXGBE_PTYPE_TYP_CNM (0x05) +#define TXGBE_PTYPE_TYP_EAPOL (0x06) +#define TXGBE_PTYPE_TYP_ARP (0x07) +/* TYP for PKT=ip */ +#define TXGBE_PTYPE_PKT_IPV6 (0x08) +#define TXGBE_PTYPE_TYP_IPFRAG (0x01) +#define TXGBE_PTYPE_TYP_IP (0x02) +#define TXGBE_PTYPE_TYP_UDP (0x03) +#define TXGBE_PTYPE_TYP_TCP (0x04) +#define TXGBE_PTYPE_TYP_SCTP (0x05) +/* TYP for PKT=fcoe */ +#define TXGBE_PTYPE_PKT_VFT (0x08) +#define TXGBE_PTYPE_TYP_FCOE (0x00) +#define TXGBE_PTYPE_TYP_FCDATA (0x01) +#define TXGBE_PTYPE_TYP_FCRDY (0x02) +#define TXGBE_PTYPE_TYP_FCRSP (0x03) +#define TXGBE_PTYPE_TYP_FCOTHER (0x04) + +/* Packet type non-ip values */ +enum txgbe_l2_ptypes { + TXGBE_PTYPE_L2_ABORTED = (TXGBE_PTYPE_PKT_MAC), + TXGBE_PTYPE_L2_MAC = (TXGBE_PTYPE_PKT_MAC | TXGBE_PTYPE_TYP_MAC), + TXGBE_PTYPE_L2_TS = (TXGBE_PTYPE_PKT_MAC | TXGBE_PTYPE_TYP_TS), + TXGBE_PTYPE_L2_FIP = (TXGBE_PTYPE_PKT_MAC | TXGBE_PTYPE_TYP_FIP), + TXGBE_PTYPE_L2_LLDP = (TXGBE_PTYPE_PKT_MAC | TXGBE_PTYPE_TYP_LLDP), + TXGBE_PTYPE_L2_CNM = (TXGBE_PTYPE_PKT_MAC | TXGBE_PTYPE_TYP_CNM), + TXGBE_PTYPE_L2_EAPOL = (TXGBE_PTYPE_PKT_MAC | TXGBE_PTYPE_TYP_EAPOL), + TXGBE_PTYPE_L2_ARP = (TXGBE_PTYPE_PKT_MAC | TXGBE_PTYPE_TYP_ARP), + + TXGBE_PTYPE_L2_IPV4_FRAG = (TXGBE_PTYPE_PKT_IP | + TXGBE_PTYPE_TYP_IPFRAG), + TXGBE_PTYPE_L2_IPV4 = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_TYP_IP), + TXGBE_PTYPE_L2_IPV4_UDP = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_TYP_UDP), + TXGBE_PTYPE_L2_IPV4_TCP = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_TYP_TCP), + TXGBE_PTYPE_L2_IPV4_SCTP = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_TYP_SCTP), + TXGBE_PTYPE_L2_IPV6_FRAG = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_PKT_IPV6 | + TXGBE_PTYPE_TYP_IPFRAG), + TXGBE_PTYPE_L2_IPV6 = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_PKT_IPV6 | + TXGBE_PTYPE_TYP_IP), + TXGBE_PTYPE_L2_IPV6_UDP = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_PKT_IPV6 | + TXGBE_PTYPE_TYP_UDP), + TXGBE_PTYPE_L2_IPV6_TCP = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_PKT_IPV6 | + TXGBE_PTYPE_TYP_TCP), + TXGBE_PTYPE_L2_IPV6_SCTP = (TXGBE_PTYPE_PKT_IP | TXGBE_PTYPE_PKT_IPV6 | + TXGBE_PTYPE_TYP_SCTP), + + TXGBE_PTYPE_L2_FCOE = (TXGBE_PTYPE_PKT_FCOE | TXGBE_PTYPE_TYP_FCOE), + TXGBE_PTYPE_L2_FCOE_FCDATA = (TXGBE_PTYPE_PKT_FCOE | + TXGBE_PTYPE_TYP_FCDATA), + TXGBE_PTYPE_L2_FCOE_FCRDY = (TXGBE_PTYPE_PKT_FCOE | + TXGBE_PTYPE_TYP_FCRDY), + TXGBE_PTYPE_L2_FCOE_FCRSP = (TXGBE_PTYPE_PKT_FCOE | + TXGBE_PTYPE_TYP_FCRSP), + TXGBE_PTYPE_L2_FCOE_FCOTHER = (TXGBE_PTYPE_PKT_FCOE | + TXGBE_PTYPE_TYP_FCOTHER), + TXGBE_PTYPE_L2_FCOE_VFT = (TXGBE_PTYPE_PKT_FCOE | TXGBE_PTYPE_PKT_VFT), + TXGBE_PTYPE_L2_FCOE_VFT_FCDATA = (TXGBE_PTYPE_PKT_FCOE | + TXGBE_PTYPE_PKT_VFT | TXGBE_PTYPE_TYP_FCDATA), + TXGBE_PTYPE_L2_FCOE_VFT_FCRDY = (TXGBE_PTYPE_PKT_FCOE | + TXGBE_PTYPE_PKT_VFT | TXGBE_PTYPE_TYP_FCRDY), + TXGBE_PTYPE_L2_FCOE_VFT_FCRSP = (TXGBE_PTYPE_PKT_FCOE | + TXGBE_PTYPE_PKT_VFT | TXGBE_PTYPE_TYP_FCRSP), + TXGBE_PTYPE_L2_FCOE_VFT_FCOTHER = (TXGBE_PTYPE_PKT_FCOE | + TXGBE_PTYPE_PKT_VFT | TXGBE_PTYPE_TYP_FCOTHER), + + TXGBE_PTYPE_L2_TUN4_MAC = (TXGBE_PTYPE_TUN_IPV4 | TXGBE_PTYPE_PKT_IGM), + TXGBE_PTYPE_L2_TUN6_MAC = (TXGBE_PTYPE_TUN_IPV6 | TXGBE_PTYPE_PKT_IGM), +}; + +#define TXGBE_RXD_PKTTYPE(_rxd) \ + ((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 9) & 0xFF) +#define TXGBE_PTYPE_TUN(_pt) ((_pt) & 0xC0) +#define TXGBE_PTYPE_PKT(_pt) ((_pt) & 0x30) +#define TXGBE_PTYPE_TYP(_pt) ((_pt) & 0x0F) +#define TXGBE_PTYPE_TYPL4(_pt) ((_pt) & 0x07) + +#define TXGBE_RXD_IPV6EX(_rxd) \ + ((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 6) & 0x1) + +/* Security Processing bit Indication */ +#define TXGBE_RXD_LNKSEC_STATUS_SECP 0x00020000U +#define TXGBE_RXD_LNKSEC_ERROR_NO_SA_MATCH 0x08000000U +#define TXGBE_RXD_LNKSEC_ERROR_REPLAY_ERROR 0x10000000U +#define TXGBE_RXD_LNKSEC_ERROR_BIT_MASK 0x18000000U +#define TXGBE_RXD_LNKSEC_ERROR_BAD_SIG 0x18000000U + +/* Masks to determine if packets should be dropped due to frame errors */ +#define TXGBE_RXD_ERR_FRAME_ERR_MASK TXGBE_RXD_ERR_RXE + +/*********************** Adv Transmit Descriptor Config Masks ****************/ +#define TXGBE_TXD_DTALEN_MASK 0x0000FFFFU /* Data buf length(bytes) */ +#define TXGBE_TXD_MAC_LINKSEC 0x00040000U /* Insert LinkSec */ +#define TXGBE_TXD_MAC_TSTAMP 0x00080000U /* IEEE1588 time stamp */ +#define TXGBE_TXD_IPSEC_SA_INDEX_MASK 0x000003FFU /* IPSec SA index */ +#define TXGBE_TXD_IPSEC_ESP_LEN_MASK 0x000001FFU /* IPSec ESP length */ +#define TXGBE_TXD_DTYP_MASK 0x00F00000U /* DTYP mask */ +#define TXGBE_TXD_DTYP_CTXT 0x00100000U /* Adv Context Desc */ +#define TXGBE_TXD_DTYP_DATA 0x00000000U /* Adv Data Descriptor */ +#define TXGBE_TXD_EOP 0x01000000U /* End of Packet */ +#define TXGBE_TXD_IFCS 0x02000000U /* Insert FCS */ +#define TXGBE_TXD_LINKSEC 0x04000000U /* enable linksec */ +#define TXGBE_TXD_RS 0x08000000U /* Report Status */ +#define TXGBE_TXD_ECU 0x10000000U /* DDP hdr type or iSCSI */ +#define TXGBE_TXD_QCN 0x20000000U /* cntag insertion enable */ +#define TXGBE_TXD_VLE 0x40000000U /* VLAN pkt enable */ +#define TXGBE_TXD_TSE 0x80000000U /* TCP Seg enable */ +#define TXGBE_TXD_STAT_DD 0x00000001U /* Descriptor Done */ +#define TXGBE_TXD_IDX_SHIFT 4 /* Adv desc Index shift */ +#define TXGBE_TXD_CC 0x00000080U /* Check Context */ +#define TXGBE_TXD_IPSEC 0x00000100U /* enable ipsec esp */ +#define TXGBE_TXD_IIPCS 0x00000400U +#define TXGBE_TXD_EIPCS 0x00000800U +#define TXGBE_TXD_L4CS 0x00000200U +#define TXGBE_TXD_PAYLEN_SHIFT 13 /* Adv desc PAYLEN shift */ +#define TXGBE_TXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define TXGBE_TXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +#define TXGBE_TXD_TAG_TPID_SEL_SHIFT 11 +#define TXGBE_TXD_IPSEC_TYPE_SHIFT 14 +#define TXGBE_TXD_ENC_SHIFT 15 + +#define TXGBE_TXD_TUCMD_IPSEC_TYPE_ESP 0x00004000U /* IPSec Type ESP */ +#define TXGBE_TXD_TUCMD_IPSEC_ENCRYPT_EN 0x00008000/* ESP Encrypt Enable */ +#define TXGBE_TXD_TUCMD_FCOE 0x00010000U /* FCoE Frame Type */ +#define TXGBE_TXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */ +#define TXGBE_TXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */ +#define TXGBE_TXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */ +#define TXGBE_TXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation End */ +#define TXGBE_TXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation Start */ +#define TXGBE_TXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */ +#define TXGBE_TXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */ +#define TXGBE_TXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */ +#define TXGBE_TXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */ +#define TXGBE_TXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define TXGBE_TXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +#define TXGBE_TXD_OUTER_IPLEN_SHIFT 12 /* Adv ctxt OUTERIPLEN shift */ +#define TXGBE_TXD_TUNNEL_LEN_SHIFT 21 /* Adv ctxt TUNNELLEN shift */ +#define TXGBE_TXD_TUNNEL_TYPE_SHIFT 11 /* Adv Tx Desc Tunnel Type shift */ +#define TXGBE_TXD_TUNNEL_DECTTL_SHIFT 27 /* Adv ctxt DECTTL shift */ +#define TXGBE_TXD_TUNNEL_UDP (0x0ULL << TXGBE_TXD_TUNNEL_TYPE_SHIFT) +#define TXGBE_TXD_TUNNEL_GRE (0x1ULL << TXGBE_TXD_TUNNEL_TYPE_SHIFT) + +/************ txgbe_type.h ************/ +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define TXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define TXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8 +#define TXGBE_REQ_TX_BUFFER_GRANULARITY 1024 + +/* Vlan-specific macros */ +#define TXGBE_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID in lower 12 bits */ +#define TXGBE_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority in upper 3 bits */ +#define TXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ +#define TXGBE_TX_DESC_SPECIAL_PRI_SHIFT TXGBE_RX_DESC_SPECIAL_PRI_SHIFT + +/* Transmit Descriptor */ +union txgbe_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Receive Descriptor */ +union txgbe_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /* RSS, Pkt type */ + __le16 hdr_info; /* Splithdr, hdrlen */ + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +/* Context descriptors */ +struct txgbe_tx_context_desc { + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +/************************* Flow Directory HASH *******************************/ +/* Software ATR hash keys */ +#define TXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2 +#define TXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614 + +/* Software ATR input stream values and masks */ +#define TXGBE_ATR_HASH_MASK 0x7fff +#define TXGBE_ATR_L4TYPE_MASK 0x3 +#define TXGBE_ATR_L4TYPE_UDP 0x1 +#define TXGBE_ATR_L4TYPE_TCP 0x2 +#define TXGBE_ATR_L4TYPE_SCTP 0x3 +#define TXGBE_ATR_L4TYPE_IPV6_MASK 0x4 +#define TXGBE_ATR_L4TYPE_TUNNEL_MASK 0x10 +enum txgbe_atr_flow_type { + TXGBE_ATR_FLOW_TYPE_IPV4 = 0x0, + TXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1, + TXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2, + TXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3, + TXGBE_ATR_FLOW_TYPE_IPV6 = 0x4, + TXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5, + TXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6, + TXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7, + TXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4 = 0x10, + TXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4 = 0x11, + TXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4 = 0x12, + TXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4 = 0x13, + TXGBE_ATR_FLOW_TYPE_TUNNELED_IPV6 = 0x14, + TXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV6 = 0x15, + TXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV6 = 0x16, + TXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV6 = 0x17, +}; + +/* Flow Director ATR input struct. */ +union txgbe_atr_input { + /* + * Byte layout in order, all values with MSB first: + * + * vm_pool - 1 byte + * flow_type - 1 byte + * vlan_id - 2 bytes + * src_ip - 16 bytes + * inner_mac - 6 bytes + * cloud_mode - 2 bytes + * tni_vni - 4 bytes + * dst_ip - 16 bytes + * src_port - 2 bytes + * dst_port - 2 bytes + * flex_bytes - 2 bytes + * bkt_hash - 2 bytes + */ + struct { + u8 vm_pool; + u8 flow_type; + __be16 vlan_id; + __be32 dst_ip[4]; + __be32 src_ip[4]; + __be16 src_port; + __be16 dst_port; + __be16 flex_bytes; + __be16 bkt_hash; + } formatted; + __be32 dword_stream[11]; +}; + +/* Flow Director compressed ATR hash input struct */ +union txgbe_atr_hash_dword { + struct { + u8 vm_pool; + u8 flow_type; + __be16 vlan_id; + } formatted; + __be32 ip; + struct { + __be16 src; + __be16 dst; + } port; + __be16 flex_bytes; + __be32 dword; +}; + + +/****************** Manageablility Host Interface defines ********************/ +#define TXGBE_HI_MAX_BLOCK_BYTE_LENGTH 256 /* Num of bytes in range */ +#define TXGBE_HI_MAX_BLOCK_DWORD_LENGTH 64 /* Num of dwords in range */ +#define TXGBE_HI_COMMAND_TIMEOUT 5000 /* Process HI command limit */ +#define TXGBE_HI_FLASH_ERASE_TIMEOUT 5000 /* Process Erase command limit */ +#define TXGBE_HI_FLASH_UPDATE_TIMEOUT 5000 /* Process Update command limit */ +#define TXGBE_HI_FLASH_VERIFY_TIMEOUT 60000 /* Process Apply command limit */ +#define TXGBE_HI_PHY_MGMT_REQ_TIMEOUT 2000 /* Wait up to 2 seconds */ + +/* CEM Support */ +#define FW_CEM_HDR_LEN 0x4 +#define FW_CEM_CMD_DRIVER_INFO 0xDD +#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5 +#define FW_CEM_CMD_RESERVED 0X0 +#define FW_CEM_UNUSED_VER 0x0 +#define FW_CEM_MAX_RETRIES 3 +#define FW_CEM_RESP_STATUS_SUCCESS 0x1 +#define FW_READ_SHADOW_RAM_CMD 0x31 +#define FW_READ_SHADOW_RAM_LEN 0x6 +#define FW_WRITE_SHADOW_RAM_CMD 0x33 +#define FW_WRITE_SHADOW_RAM_LEN 0xA /* 8 plus 1 WORD to write */ +#define FW_SHADOW_RAM_DUMP_CMD 0x36 +#define FW_SHADOW_RAM_DUMP_LEN 0 +#define FW_DEFAULT_CHECKSUM 0xFF /* checksum always 0xFF */ +#define FW_NVM_DATA_OFFSET 3 +#define FW_MAX_READ_BUFFER_SIZE 244 +#define FW_DISABLE_RXEN_CMD 0xDE +#define FW_DISABLE_RXEN_LEN 0x1 +#define FW_PHY_MGMT_REQ_CMD 0x20 +#define FW_RESET_CMD 0xDF +#define FW_RESET_LEN 0x2 +#define FW_SETUP_MAC_LINK_CMD 0xE0 +#define FW_SETUP_MAC_LINK_LEN 0x2 +#define FW_FLASH_UPGRADE_START_CMD 0xE3 +#define FW_FLASH_UPGRADE_START_LEN 0x1 +#define FW_FLASH_UPGRADE_WRITE_CMD 0xE4 +#define FW_FLASH_UPGRADE_VERIFY_CMD 0xE5 +#define FW_FLASH_UPGRADE_VERIFY_LEN 0x4 + +/* Host Interface Command Structures */ +struct txgbe_hic_hdr { + u8 cmd; + u8 buf_len; + union { + u8 cmd_resv; + u8 ret_status; + } cmd_or_resp; + u8 checksum; +}; + +struct txgbe_hic_hdr2_req { + u8 cmd; + u8 buf_lenh; + u8 buf_lenl; + u8 checksum; +}; + +struct txgbe_hic_hdr2_rsp { + u8 cmd; + u8 buf_lenl; + u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */ + u8 checksum; +}; + +union txgbe_hic_hdr2 { + struct txgbe_hic_hdr2_req req; + struct txgbe_hic_hdr2_rsp rsp; +}; + +struct txgbe_hic_drv_info { + struct txgbe_hic_hdr hdr; + u8 port_num; + u8 ver_sub; + u8 ver_build; + u8 ver_min; + u8 ver_maj; + u8 pad; /* end spacing to ensure length is mult. of dword */ + u16 pad2; /* end spacing to ensure length is mult. of dword2 */ +}; + +/* These need to be dword aligned */ +struct txgbe_hic_read_shadow_ram { + union txgbe_hic_hdr2 hdr; + u32 address; + u16 length; + u16 pad2; + u16 data; + u16 pad3; +}; + +struct txgbe_hic_write_shadow_ram { + union txgbe_hic_hdr2 hdr; + u32 address; + u16 length; + u16 pad2; + u16 data; + u16 pad3; +}; + +struct txgbe_hic_disable_rxen { + struct txgbe_hic_hdr hdr; + u8 port_number; + u8 pad2; + u16 pad3; +}; + +struct txgbe_hic_reset { + struct txgbe_hic_hdr hdr; + u16 lan_id; + u16 reset_type; +}; + +struct txgbe_hic_phy_cfg { + struct txgbe_hic_hdr hdr; + u8 lan_id; + u8 phy_mode; + u16 phy_speed; +}; + +enum txgbe_module_id { + TXGBE_MODULE_EEPROM = 0, + TXGBE_MODULE_FIRMWARE, + TXGBE_MODULE_HARDWARE, + TXGBE_MODULE_PCIE +}; + +struct txgbe_hic_upg_start { + struct txgbe_hic_hdr hdr; + u8 module_id; + u8 pad2; + u16 pad3; +}; + +struct txgbe_hic_upg_write { + struct txgbe_hic_hdr hdr; + u8 data_len; + u8 eof_flag; + u16 check_sum; + u32 data[62]; +}; + +enum txgbe_upg_flag { + TXGBE_RESET_NONE = 0, + TXGBE_RESET_FIRMWARE, + TXGBE_RELOAD_EEPROM, + TXGBE_RESET_LAN +}; + +struct txgbe_hic_upg_verify { + struct txgbe_hic_hdr hdr; + u32 action_flag; +}; + +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define TXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 + +/* Check whether address is multicast. This is little-endian specific check.*/ +#define TXGBE_IS_MULTICAST(Address) \ + (bool)(((u8 *)(Address))[0] & ((u8)0x01)) + +/* Check whether an address is broadcast. */ +#define TXGBE_IS_BROADCAST(Address) \ + ((((u8 *)(Address))[0] == ((u8)0xff)) && \ + (((u8 *)(Address))[1] == ((u8)0xff))) + +/* DCB registers */ +#define TXGBE_DCB_MAX_TRAFFIC_CLASS 8 + +/* Power Management */ +/* DMA Coalescing configuration */ +struct txgbe_dmac_config { + u16 watchdog_timer; /* usec units */ + bool fcoe_en; + u32 link_speed; + u8 fcoe_tc; + u8 num_tcs; +}; + + +/* Autonegotiation advertised speeds */ +typedef u32 txgbe_autoneg_advertised; +/* Link speed */ +#define TXGBE_LINK_SPEED_UNKNOWN 0 +#define TXGBE_LINK_SPEED_100_FULL 1 +#define TXGBE_LINK_SPEED_1GB_FULL 2 +#define TXGBE_LINK_SPEED_10GB_FULL 4 +#define TXGBE_LINK_SPEED_10_FULL 8 +#define TXGBE_LINK_SPEED_AUTONEG (TXGBE_LINK_SPEED_100_FULL | \ + TXGBE_LINK_SPEED_1GB_FULL | \ + TXGBE_LINK_SPEED_10GB_FULL | \ + TXGBE_LINK_SPEED_10_FULL) + +/* Physical layer type */ +typedef u32 txgbe_physical_layer; +#define TXGBE_PHYSICAL_LAYER_UNKNOWN 0 +#define TXGBE_PHYSICAL_LAYER_10GBASE_T 0x0001 +#define TXGBE_PHYSICAL_LAYER_1000BASE_T 0x0002 +#define TXGBE_PHYSICAL_LAYER_100BASE_TX 0x0004 +#define TXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x0008 +#define TXGBE_PHYSICAL_LAYER_10GBASE_LR 0x0010 +#define TXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x0020 +#define TXGBE_PHYSICAL_LAYER_10GBASE_SR 0x0040 +#define TXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x0080 +#define TXGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200 +#define TXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400 +#define TXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800 +#define TXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000 +#define TXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000 +#define TXGBE_PHYSICAL_LAYER_1000BASE_SX 0x4000 + + +/* Special PHY Init Routine */ +#define TXGBE_PHY_INIT_OFFSET_NL 0x002B +#define TXGBE_PHY_INIT_END_NL 0xFFFF +#define TXGBE_CONTROL_MASK_NL 0xF000 +#define TXGBE_DATA_MASK_NL 0x0FFF +#define TXGBE_CONTROL_SHIFT_NL 12 +#define TXGBE_DELAY_NL 0 +#define TXGBE_DATA_NL 1 +#define TXGBE_CONTROL_NL 0x000F +#define TXGBE_CONTROL_EOL_NL 0x0FFF +#define TXGBE_CONTROL_SOL_NL 0x0000 + +/* Flow Control Data Sheet defined values + * Calculation and defines taken from 802.1bb Annex O + */ + +/* BitTimes (BT) conversion */ +#define TXGBE_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024)) +#define TXGBE_B2BT(BT) (BT * 8) + +/* Calculate Delay to respond to PFC */ +#define TXGBE_PFC_D 672 + +/* Calculate Cable Delay */ +#define TXGBE_CABLE_DC 5556 /* Delay Copper */ +#define TXGBE_CABLE_DO 5000 /* Delay Optical */ + +/* Calculate Interface Delay X540 */ +#define TXGBE_PHY_DC 25600 /* Delay 10G BASET */ +#define TXGBE_MAC_DC 8192 /* Delay Copper XAUI interface */ +#define TXGBE_XAUI_DC (2 * 2048) /* Delay Copper Phy */ + +#define TXGBE_ID_X540 (TXGBE_MAC_DC + TXGBE_XAUI_DC + TXGBE_PHY_DC) + +/* Calculate Interface Delay */ +#define TXGBE_PHY_D 12800 +#define TXGBE_MAC_D 4096 +#define TXGBE_XAUI_D (2 * 1024) + +#define TXGBE_ID (TXGBE_MAC_D + TXGBE_XAUI_D + TXGBE_PHY_D) + +/* Calculate Delay incurred from higher layer */ +#define TXGBE_HD 6144 + +/* Calculate PCI Bus delay for low thresholds */ +#define TXGBE_PCI_DELAY 10000 + +/* Calculate X540 delay value in bit times */ +#define TXGBE_DV_X540(_max_frame_link, _max_frame_tc) \ + ((36 * \ + (TXGBE_B2BT(_max_frame_link) + \ + TXGBE_PFC_D + \ + (2 * TXGBE_CABLE_DC) + \ + (2 * TXGBE_ID_X540) + \ + TXGBE_HD) / 25 + 1) + \ + 2 * TXGBE_B2BT(_max_frame_tc)) + + +/* Calculate delay value in bit times */ +#define TXGBE_DV(_max_frame_link, _max_frame_tc) \ + ((36 * \ + (TXGBE_B2BT(_max_frame_link) + \ + TXGBE_PFC_D + \ + (2 * TXGBE_CABLE_DC) + \ + (2 * TXGBE_ID) + \ + TXGBE_HD) / 25 + 1) + \ + 2 * TXGBE_B2BT(_max_frame_tc)) + +/* Calculate low threshold delay values */ +#define TXGBE_LOW_DV_X540(_max_frame_tc) \ + (2 * TXGBE_B2BT(_max_frame_tc) + \ + (36 * TXGBE_PCI_DELAY / 25) + 1) + +#define TXGBE_LOW_DV(_max_frame_tc) \ + (2 * TXGBE_LOW_DV_X540(_max_frame_tc)) + + +/* + * Unavailable: The FCoE Boot Option ROM is not present in the flash. + * Disabled: Present; boot order is not set for any targets on the port. + * Enabled: Present; boot order is set for at least one target on the port. + */ +enum txgbe_fcoe_boot_status { + txgbe_fcoe_bootstatus_disabled = 0, + txgbe_fcoe_bootstatus_enabled = 1, + txgbe_fcoe_bootstatus_unavailable = 0xFFFF +}; + +enum txgbe_eeprom_type { + txgbe_eeprom_uninitialized = 0, + txgbe_eeprom_spi, + txgbe_flash, + txgbe_eeprom_none /* No NVM support */ +}; + +enum txgbe_phy_type { + txgbe_phy_unknown = 0, + txgbe_phy_none, + txgbe_phy_tn, + txgbe_phy_aq, + txgbe_phy_cu_unknown, + txgbe_phy_qt, + txgbe_phy_xaui, + txgbe_phy_nl, + txgbe_phy_sfp_passive_tyco, + txgbe_phy_sfp_passive_unknown, + txgbe_phy_sfp_active_unknown, + txgbe_phy_sfp_avago, + txgbe_phy_sfp_ftl, + txgbe_phy_sfp_ftl_active, + txgbe_phy_sfp_unknown, + txgbe_phy_sfp_intel, + txgbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/ + txgbe_phy_generic +}; + +/* + * SFP+ module type IDs: + * + * ID Module Type + * ============= + * 0 SFP_DA_CU + * 1 SFP_SR + * 2 SFP_LR + * 3 SFP_DA_CU_CORE0 + * 4 SFP_DA_CU_CORE1 + * 5 SFP_SR/LR_CORE0 + * 6 SFP_SR/LR_CORE1 + */ +enum txgbe_sfp_type { + txgbe_sfp_type_da_cu = 0, + txgbe_sfp_type_sr = 1, + txgbe_sfp_type_lr = 2, + txgbe_sfp_type_da_cu_core0 = 3, + txgbe_sfp_type_da_cu_core1 = 4, + txgbe_sfp_type_srlr_core0 = 5, + txgbe_sfp_type_srlr_core1 = 6, + txgbe_sfp_type_da_act_lmt_core0 = 7, + txgbe_sfp_type_da_act_lmt_core1 = 8, + txgbe_sfp_type_1g_cu_core0 = 9, + txgbe_sfp_type_1g_cu_core1 = 10, + txgbe_sfp_type_1g_sx_core0 = 11, + txgbe_sfp_type_1g_sx_core1 = 12, + txgbe_sfp_type_1g_lx_core0 = 13, + txgbe_sfp_type_1g_lx_core1 = 14, + txgbe_sfp_type_not_present = 0xFFFE, + txgbe_sfp_type_unknown = 0xFFFF +}; + +enum txgbe_media_type { + txgbe_media_type_unknown = 0, + txgbe_media_type_fiber, + txgbe_media_type_copper, + txgbe_media_type_backplane, + txgbe_media_type_virtual +}; + +/* Flow Control Settings */ +enum txgbe_fc_mode { + txgbe_fc_none = 0, + txgbe_fc_rx_pause, + txgbe_fc_tx_pause, + txgbe_fc_full, + txgbe_fc_default +}; + +/* Smart Speed Settings */ +#define TXGBE_SMARTSPEED_MAX_RETRIES 3 +enum txgbe_smart_speed { + txgbe_smart_speed_auto = 0, + txgbe_smart_speed_on, + txgbe_smart_speed_off +}; + +/* PCI bus types */ +enum txgbe_bus_type { + txgbe_bus_type_unknown = 0, + txgbe_bus_type_pci, + txgbe_bus_type_pcix, + txgbe_bus_type_pci_express, + txgbe_bus_type_internal, + txgbe_bus_type_reserved +}; + +/* PCI bus speeds */ +enum txgbe_bus_speed { + txgbe_bus_speed_unknown = 0, + txgbe_bus_speed_33 = 33, + txgbe_bus_speed_66 = 66, + txgbe_bus_speed_100 = 100, + txgbe_bus_speed_120 = 120, + txgbe_bus_speed_133 = 133, + txgbe_bus_speed_2500 = 2500, + txgbe_bus_speed_5000 = 5000, + txgbe_bus_speed_8000 = 8000, + txgbe_bus_speed_reserved +}; + +/* PCI bus widths */ +enum txgbe_bus_width { + txgbe_bus_width_unknown = 0, + txgbe_bus_width_pcie_x1 = 1, + txgbe_bus_width_pcie_x2 = 2, + txgbe_bus_width_pcie_x4 = 4, + txgbe_bus_width_pcie_x8 = 8, + txgbe_bus_width_32 = 32, + txgbe_bus_width_64 = 64, + txgbe_bus_width_reserved +}; + +struct txgbe_addr_filter_info { + u32 num_mc_addrs; + u32 rar_used_count; + u32 mta_in_use; + u32 overflow_promisc; + bool user_set_promisc; +}; + +/* Bus parameters */ +struct txgbe_bus_info { + enum txgbe_bus_speed speed; + enum txgbe_bus_width width; + enum txgbe_bus_type type; + + u16 func; + u16 lan_id; +}; + +/* Flow control parameters */ +struct txgbe_fc_info { + u32 high_water[TXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl High-water */ + u32 low_water[TXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl Low-water */ + u16 pause_time; /* Flow Control Pause timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + bool disable_fc_autoneg; /* Do not autonegotiate FC */ + bool fc_was_autonegged; /* Is current_mode the result of autonegging? */ + enum txgbe_fc_mode current_mode; /* FC mode in effect */ + enum txgbe_fc_mode requested_mode; /* FC mode requested by caller */ +}; + +/* Statistics counters collected by the MAC */ +struct txgbe_hw_stats { + u64 crcerrs; + u64 illerrc; + u64 errbc; + u64 mspdc; + u64 mpctotal; + u64 mpc[8]; + u64 mlfc; + u64 mrfc; + u64 rlec; + u64 lxontxc; + u64 lxonrxc; + u64 lxofftxc; + u64 lxoffrxc; + u64 pxontxc[8]; + u64 pxonrxc[8]; + u64 pxofftxc[8]; + u64 pxoffrxc[8]; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc[8]; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mngprc; + u64 mngpdc; + u64 mngptc; + u64 tor; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 xec; + u64 qprc[16]; + u64 qptc[16]; + u64 qbrc[16]; + u64 qbtc[16]; + u64 qprdc[16]; + u64 pxon2offc[8]; + u64 fdirustat_add; + u64 fdirustat_remove; + u64 fdirfstat_fadd; + u64 fdirfstat_fremove; + u64 fdirmatch; + u64 fdirmiss; + u64 fccrc; + u64 fclast; + u64 fcoerpdc; + u64 fcoeprc; + u64 fcoeptc; + u64 fcoedwrc; + u64 fcoedwtc; + u64 fcoe_noddp; + u64 fcoe_noddp_ext_buff; + u64 ldpcec; + u64 pcrc8ec; + u64 b2ospc; + u64 b2ogprc; + u64 o2bgptc; + u64 o2bspc; +}; + +/* forward declaration */ +struct txgbe_hw; + +/* iterator type for walking multicast address lists */ +typedef u8* (*txgbe_mc_addr_itr) (struct txgbe_hw *hw, u8 **mc_addr_ptr, + u32 *vmdq); + +/* Function pointer table */ +struct txgbe_eeprom_operations { + s32 (*init_params)(struct txgbe_hw *); + s32 (*read)(struct txgbe_hw *, u16, u16 *); + s32 (*read_buffer)(struct txgbe_hw *, u16, u16, u16 *); + s32 (*write)(struct txgbe_hw *, u16, u16); + s32 (*write_buffer)(struct txgbe_hw *, u16, u16, u16 *); + s32 (*validate_checksum)(struct txgbe_hw *, u16 *); + s32 (*update_checksum)(struct txgbe_hw *); + s32 (*calc_checksum)(struct txgbe_hw *); +}; + +struct txgbe_flash_operations { + s32 (*init_params)(struct txgbe_hw *); + s32 (*read_buffer)(struct txgbe_hw *, u32, u32, u32 *); + s32 (*write_buffer)(struct txgbe_hw *, u32, u32, u32 *); +}; + +struct txgbe_mac_operations { + s32 (*init_hw)(struct txgbe_hw *); + s32 (*reset_hw)(struct txgbe_hw *); + s32 (*start_hw)(struct txgbe_hw *); + s32 (*clear_hw_cntrs)(struct txgbe_hw *); + enum txgbe_media_type (*get_media_type)(struct txgbe_hw *); + s32 (*get_mac_addr)(struct txgbe_hw *, u8 *); + s32 (*get_san_mac_addr)(struct txgbe_hw *, u8 *); + s32 (*set_san_mac_addr)(struct txgbe_hw *, u8 *); + s32 (*get_device_caps)(struct txgbe_hw *, u16 *); + s32 (*get_wwn_prefix)(struct txgbe_hw *, u16 *, u16 *); + s32 (*stop_adapter)(struct txgbe_hw *); + s32 (*get_bus_info)(struct txgbe_hw *); + void (*set_lan_id)(struct txgbe_hw *); + s32 (*enable_rx_dma)(struct txgbe_hw *, u32); + s32 (*disable_sec_rx_path)(struct txgbe_hw *); + s32 (*enable_sec_rx_path)(struct txgbe_hw *); + s32 (*acquire_swfw_sync)(struct txgbe_hw *, u32); + void (*release_swfw_sync)(struct txgbe_hw *, u32); + + /* Link */ + void (*disable_tx_laser)(struct txgbe_hw *); + void (*enable_tx_laser)(struct txgbe_hw *); + void (*flap_tx_laser)(struct txgbe_hw *); + s32 (*setup_link)(struct txgbe_hw *, u32, bool); + s32 (*setup_mac_link)(struct txgbe_hw *, u32, bool); + s32 (*check_link)(struct txgbe_hw *, u32 *, bool *, bool); + s32 (*get_link_capabilities)(struct txgbe_hw *, u32 *, + bool *); + void (*set_rate_select_speed)(struct txgbe_hw *, u32); + + /* Packet Buffer manipulation */ + void (*setup_rxpba)(struct txgbe_hw *, int, u32, int); + + /* LED */ + s32 (*led_on)(struct txgbe_hw *, u32); + s32 (*led_off)(struct txgbe_hw *, u32); + + /* RAR, Multicast, VLAN */ + s32 (*set_rar)(struct txgbe_hw *, u32, u8 *, u64, u32); + s32 (*clear_rar)(struct txgbe_hw *, u32); + s32 (*insert_mac_addr)(struct txgbe_hw *, u8 *, u32); + s32 (*set_vmdq)(struct txgbe_hw *, u32, u32); + s32 (*set_vmdq_san_mac)(struct txgbe_hw *, u32); + s32 (*clear_vmdq)(struct txgbe_hw *, u32, u32); + s32 (*init_rx_addrs)(struct txgbe_hw *); + s32 (*update_uc_addr_list)(struct txgbe_hw *, u8 *, u32, + txgbe_mc_addr_itr); + s32 (*update_mc_addr_list)(struct txgbe_hw *, u8 *, u32, + txgbe_mc_addr_itr, bool clear); + s32 (*enable_mc)(struct txgbe_hw *); + s32 (*disable_mc)(struct txgbe_hw *); + s32 (*clear_vfta)(struct txgbe_hw *); + s32 (*set_vfta)(struct txgbe_hw *, u32, u32, bool); + s32 (*set_vlvf)(struct txgbe_hw *, u32, u32, bool, bool *); + s32 (*init_uta_tables)(struct txgbe_hw *); + void (*set_mac_anti_spoofing)(struct txgbe_hw *, bool, int); + void (*set_vlan_anti_spoofing)(struct txgbe_hw *, bool, int); + + /* Flow Control */ + s32 (*fc_enable)(struct txgbe_hw *); + s32 (*setup_fc)(struct txgbe_hw *); + + /* Manageability interface */ + s32 (*set_fw_drv_ver)(struct txgbe_hw *, u8, u8, u8, u8); + s32 (*get_thermal_sensor_data)(struct txgbe_hw *); + s32 (*init_thermal_sensor_thresh)(struct txgbe_hw *hw); + void (*get_rtrup2tc)(struct txgbe_hw *hw, u8 *map); + void (*disable_rx)(struct txgbe_hw *hw); + void (*enable_rx)(struct txgbe_hw *hw); + void (*set_source_address_pruning)(struct txgbe_hw *, bool, + unsigned int); + void (*set_ethertype_anti_spoofing)(struct txgbe_hw *, bool, int); + s32 (*dmac_config)(struct txgbe_hw *hw); + s32 (*setup_eee)(struct txgbe_hw *hw, bool enable_eee); +}; + +struct txgbe_phy_operations { + s32 (*identify)(struct txgbe_hw *); + s32 (*identify_sfp)(struct txgbe_hw *); + s32 (*init)(struct txgbe_hw *); + s32 (*reset)(struct txgbe_hw *); + s32 (*read_reg)(struct txgbe_hw *, u32, u32, u16 *); + s32 (*write_reg)(struct txgbe_hw *, u32, u32, u16); + s32 (*read_reg_mdi)(struct txgbe_hw *, u32, u32, u16 *); + s32 (*write_reg_mdi)(struct txgbe_hw *, u32, u32, u16); + u32 (*setup_link)(struct txgbe_hw *, u32, bool); + s32 (*setup_internal_link)(struct txgbe_hw *); + u32 (*setup_link_speed)(struct txgbe_hw *, u32, bool); + s32 (*check_link)(struct txgbe_hw *, u32 *, bool *); + s32 (*get_firmware_version)(struct txgbe_hw *, u16 *); + s32 (*read_i2c_byte)(struct txgbe_hw *, u8, u8, u8 *); + s32 (*write_i2c_byte)(struct txgbe_hw *, u8, u8, u8); + s32 (*read_i2c_sff8472)(struct txgbe_hw *, u8, u8 *); + s32 (*read_i2c_eeprom)(struct txgbe_hw *, u8, u8 *); + s32 (*write_i2c_eeprom)(struct txgbe_hw *, u8, u8); + s32 (*check_overtemp)(struct txgbe_hw *); +}; + +struct txgbe_eeprom_info { + struct txgbe_eeprom_operations ops; + enum txgbe_eeprom_type type; + u32 semaphore_delay; + u16 word_size; + u16 address_bits; + u16 word_page_size; + u16 ctrl_word_3; + u16 sw_region_offset; +}; + +struct txgbe_flash_info { + struct txgbe_flash_operations ops; + u32 semaphore_delay; + u32 dword_size; + u16 address_bits; +}; + + +#define TXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01 +struct txgbe_mac_info { + struct txgbe_mac_operations ops; + u8 addr[TXGBE_ETH_LENGTH_OF_ADDRESS]; + u8 perm_addr[TXGBE_ETH_LENGTH_OF_ADDRESS]; + u8 san_addr[TXGBE_ETH_LENGTH_OF_ADDRESS]; + /* prefix for World Wide Node Name (WWNN) */ + u16 wwnn_prefix; + /* prefix for World Wide Port Name (WWPN) */ + u16 wwpn_prefix; +#define TXGBE_MAX_MTA 128 +#define TXGBE_MAX_VFTA_ENTRIES 128 + u32 mta_shadow[TXGBE_MAX_MTA]; + s32 mc_filter_type; + u32 mcft_size; + u32 vft_shadow[TXGBE_MAX_VFTA_ENTRIES]; + u32 vft_size; + u32 num_rar_entries; + u32 rar_highwater; + u32 rx_pb_size; + u32 max_tx_queues; + u32 max_rx_queues; + u32 orig_sr_pcs_ctl2; + u32 orig_sr_pma_mmd_ctl1; + u32 orig_sr_an_mmd_ctl; + u32 orig_sr_an_mmd_adv_reg2; + u32 orig_vr_xs_or_pcs_mmd_digi_ctl1; + u8 san_mac_rar_index; + bool get_link_status; + u16 max_msix_vectors; + bool arc_subsystem_valid; + bool orig_link_settings_stored; + bool autotry_restart; + u8 flags; + struct txgbe_thermal_sensor_data thermal_sensor_data; + bool thermal_sensor_enabled; + struct txgbe_dmac_config dmac_config; + bool set_lben; +}; + +struct txgbe_phy_info { + struct txgbe_phy_operations ops; + enum txgbe_phy_type type; + u32 addr; + u32 id; + enum txgbe_sfp_type sfp_type; + bool sfp_setup_needed; + u32 revision; + enum txgbe_media_type media_type; + u32 phy_semaphore_mask; + u8 lan_id; /* to be delete */ + txgbe_autoneg_advertised autoneg_advertised; + enum txgbe_smart_speed smart_speed; + bool smart_speed_active; + bool multispeed_fiber; + bool reset_if_overtemp; + txgbe_physical_layer link_mode; +}; + +#include "txgbe_mbx.h" + +struct txgbe_mbx_operations { + void (*init_params)(struct txgbe_hw *hw); + s32 (*read)(struct txgbe_hw *, u32 *, u16, u16); + s32 (*write)(struct txgbe_hw *, u32 *, u16, u16); + s32 (*read_posted)(struct txgbe_hw *, u32 *, u16, u16); + s32 (*write_posted)(struct txgbe_hw *, u32 *, u16, u16); + s32 (*check_for_msg)(struct txgbe_hw *, u16); + s32 (*check_for_ack)(struct txgbe_hw *, u16); + s32 (*check_for_rst)(struct txgbe_hw *, u16); +}; + +struct txgbe_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct txgbe_mbx_info { + struct txgbe_mbx_operations ops; + struct txgbe_mbx_stats stats; + u32 timeout; + u32 udelay; + u32 v2p_mailbox; + u16 size; +}; + +enum txgbe_reset_type { + TXGBE_LAN_RESET = 0, + TXGBE_SW_RESET, + TXGBE_GLOBAL_RESET +}; + +enum txgbe_link_status { + TXGBE_LINK_STATUS_NONE = 0, + TXGBE_LINK_STATUS_KX, + TXGBE_LINK_STATUS_KX4 +}; + +struct txgbe_hw { + u8 __iomem *hw_addr; + void *back; + struct txgbe_mac_info mac; + struct txgbe_addr_filter_info addr_ctrl; + struct txgbe_fc_info fc; + struct txgbe_phy_info phy; + struct txgbe_eeprom_info eeprom; + struct txgbe_flash_info flash; + struct txgbe_bus_info bus; + struct txgbe_mbx_info mbx; + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + bool adapter_stopped; + int api_version; + enum txgbe_reset_type reset_type; + bool force_full_reset; + bool allow_unsupported_sfp; + bool wol_enabled; +#if defined(TXGBE_SUPPORT_KYLIN_FT) + bool Fdir_enabled; +#endif + MTD_DEV phy_dev; + enum txgbe_link_status link_status; + u16 subsystem_id; + u16 tpid[8]; +}; + +#define TCALL(hw, func, args...) (((hw)->func != NULL) \ + ? (hw)->func((hw), ##args) : TXGBE_NOT_IMPLEMENTED) + +/* Error Codes */ +#define TXGBE_ERR 100 +#define TXGBE_NOT_IMPLEMENTED 0x7FFFFFFF +/* (-TXGBE_ERR, TXGBE_ERR): reserved for non-txgbe defined error code */ +#define TXGBE_ERR_NOSUPP -(TXGBE_ERR+0) +#define TXGBE_ERR_EEPROM -(TXGBE_ERR+1) +#define TXGBE_ERR_EEPROM_CHECKSUM -(TXGBE_ERR+2) +#define TXGBE_ERR_PHY -(TXGBE_ERR+3) +#define TXGBE_ERR_CONFIG -(TXGBE_ERR+4) +#define TXGBE_ERR_PARAM -(TXGBE_ERR+5) +#define TXGBE_ERR_MAC_TYPE -(TXGBE_ERR+6) +#define TXGBE_ERR_UNKNOWN_PHY -(TXGBE_ERR+7) +#define TXGBE_ERR_LINK_SETUP -(TXGBE_ERR+8) +#define TXGBE_ERR_ADAPTER_STOPPED -(TXGBE_ERR+09) +#define TXGBE_ERR_INVALID_MAC_ADDR -(TXGBE_ERR+10) +#define TXGBE_ERR_DEVICE_NOT_SUPPORTED -(TXGBE_ERR+11) +#define TXGBE_ERR_MASTER_REQUESTS_PENDING -(TXGBE_ERR+12) +#define TXGBE_ERR_INVALID_LINK_SETTINGS -(TXGBE_ERR+13) +#define TXGBE_ERR_AUTONEG_NOT_COMPLETE -(TXGBE_ERR+14) +#define TXGBE_ERR_RESET_FAILED -(TXGBE_ERR+15) +#define TXGBE_ERR_SWFW_SYNC -(TXGBE_ERR+16) +#define TXGBE_ERR_PHY_ADDR_INVALID -(TXGBE_ERR+17) +#define TXGBE_ERR_I2C -(TXGBE_ERR+18) +#define TXGBE_ERR_SFP_NOT_SUPPORTED -(TXGBE_ERR+19) +#define TXGBE_ERR_SFP_NOT_PRESENT -(TXGBE_ERR+20) +#define TXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -(TXGBE_ERR+21) +#define TXGBE_ERR_NO_SAN_ADDR_PTR -(TXGBE_ERR+22) +#define TXGBE_ERR_FDIR_REINIT_FAILED -(TXGBE_ERR+23) +#define TXGBE_ERR_EEPROM_VERSION -(TXGBE_ERR+24) +#define TXGBE_ERR_NO_SPACE -(TXGBE_ERR+25) +#define TXGBE_ERR_OVERTEMP -(TXGBE_ERR+26) +#define TXGBE_ERR_UNDERTEMP -(TXGBE_ERR+27) +#define TXGBE_ERR_FC_NOT_NEGOTIATED -(TXGBE_ERR+28) +#define TXGBE_ERR_FC_NOT_SUPPORTED -(TXGBE_ERR+29) +#define TXGBE_ERR_SFP_SETUP_NOT_COMPLETE -(TXGBE_ERR+30) +#define TXGBE_ERR_PBA_SECTION -(TXGBE_ERR+31) +#define TXGBE_ERR_INVALID_ARGUMENT -(TXGBE_ERR+32) +#define TXGBE_ERR_HOST_INTERFACE_COMMAND -(TXGBE_ERR+33) +#define TXGBE_ERR_OUT_OF_MEM -(TXGBE_ERR+34) +#define TXGBE_ERR_FEATURE_NOT_SUPPORTED -(TXGBE_ERR+36) +#define TXGBE_ERR_EEPROM_PROTECTED_REGION -(TXGBE_ERR+37) +#define TXGBE_ERR_FDIR_CMD_INCOMPLETE -(TXGBE_ERR+38) +#define TXGBE_ERR_FLASH_LOADING_FAILED -(TXGBE_ERR+39) +#define TXGBE_ERR_XPCS_POWER_UP_FAILED -(TXGBE_ERR+40) +#define TXGBE_ERR_FW_RESP_INVALID -(TXGBE_ERR+41) +#define TXGBE_ERR_PHY_INIT_NOT_DONE -(TXGBE_ERR+42) +#define TXGBE_ERR_TIMEOUT -(TXGBE_ERR+43) +#define TXGBE_ERR_TOKEN_RETRY -(TXGBE_ERR+44) +#define TXGBE_ERR_REGISTER -(TXGBE_ERR+45) +#define TXGBE_ERR_MBX -(TXGBE_ERR+46) +#define TXGBE_ERR_MNG_ACCESS_FAILED -(TXGBE_ERR+47) + +/** + * register operations + **/ +/* read register */ +#define TXGBE_DEAD_READ_RETRIES 10 +#define TXGBE_DEAD_READ_REG 0xdeadbeefU +#define TXGBE_DEAD_READ_REG64 0xdeadbeefdeadbeefULL +#define TXGBE_FAILED_READ_REG 0xffffffffU +#define TXGBE_FAILED_READ_REG64 0xffffffffffffffffULL + +static inline bool TXGBE_REMOVED(void __iomem *addr) +{ + return unlikely(!addr); +} + +static inline u32 +txgbe_rd32(u8 __iomem *base) +{ + return readl(base); +} + +static inline u32 +rd32(struct txgbe_hw *hw, u32 reg) +{ + u8 __iomem *base = READ_ONCE(hw->hw_addr); + u32 val = TXGBE_FAILED_READ_REG; + + if (unlikely(!base)) + return val; + + val = txgbe_rd32(base + reg); + + return val; +} +#define rd32a(a, reg, offset) ( \ + rd32((a), (reg) + ((offset) << 2))) + +static inline u32 +rd32m(struct txgbe_hw *hw, u32 reg, u32 mask) +{ + u8 __iomem *base = READ_ONCE(hw->hw_addr); + u32 val = TXGBE_FAILED_READ_REG; + + if (unlikely(!base)) + return val; + + val = txgbe_rd32(base + reg); + if (unlikely(val == TXGBE_FAILED_READ_REG)) + return val; + + return val & mask; +} + +/* write register */ +static inline void +txgbe_wr32(u8 __iomem *base, u32 val) +{ + writel(val, base); +} + +static inline void +wr32(struct txgbe_hw *hw, u32 reg, u32 val) +{ + u8 __iomem *base = READ_ONCE(hw->hw_addr); + + if (unlikely(!base)) + return; + + txgbe_wr32(base + reg, val); +} +#define wr32a(a, reg, off, val) \ + wr32((a), (reg) + ((off) << 2), (val)) + +static inline void +wr32m(struct txgbe_hw *hw, u32 reg, u32 mask, u32 field) +{ + u8 __iomem *base = READ_ONCE(hw->hw_addr); + u32 val; + + if (unlikely(!base)) + return; + + val = txgbe_rd32(base + reg); + if (unlikely(val == TXGBE_FAILED_READ_REG)) + return; + + val = ((val & ~mask) | (field & mask)); + txgbe_wr32(base + reg, val); +} + +/* poll register */ +#define TXGBE_MDIO_TIMEOUT 1000 +#define TXGBE_I2C_TIMEOUT 1000 +#define TXGBE_SPI_TIMEOUT 1000 +static inline s32 +po32m(struct txgbe_hw *hw, u32 reg, + u32 mask, u32 field, int usecs, int count) +{ + int loop; + + loop = (count ? count : (usecs + 9) / 10); + usecs = (loop ? (usecs + loop - 1) / loop : 0); + + count = loop; + do { + u32 value = rd32(hw, reg); + if ((value & mask) == (field & mask)) { + break; + } + + if (loop-- <= 0) + break; + + udelay(usecs); + } while (true); + + return (count - loop <= count ? 0 : TXGBE_ERR_TIMEOUT); +} + +#define TXGBE_WRITE_FLUSH(H) rd32(H, TXGBE_MIS_PWR) + +#endif /* _TXGBE_TYPE_H_ */ diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c index 052b3d2c07a1222b7f902477f30d8eef5a75ab22..c662c6f5bee340f4a0b73e09ae36c7eb4bc878d3 100644 --- a/drivers/net/ethernet/nuvoton/w90p910_ether.c +++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c @@ -912,7 +912,7 @@ static const struct net_device_ops w90p910_ether_netdev_ops = { .ndo_validate_addr = eth_validate_addr, }; -static void __init get_mac_address(struct net_device *dev) +static void get_mac_address(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); struct platform_device *pdev; diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 08381ef8bdb481df81eb448b2f8ceda61c13ba8e..41d30f55c946be682dd6d790f5ab5ca9f6c88cea 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -1371,13 +1371,14 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) pldat->dma_buff_base_p = dma_handle; netdev_dbg(ndev, "IO address space :%pR\n", res); - netdev_dbg(ndev, "IO address size :%d\n", resource_size(res)); + netdev_dbg(ndev, "IO address size :%zd\n", + (size_t)resource_size(res)); netdev_dbg(ndev, "IO address (mapped) :0x%p\n", pldat->net_base); netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq); - netdev_dbg(ndev, "DMA buffer size :%d\n", pldat->dma_buff_size); - netdev_dbg(ndev, "DMA buffer P address :0x%08x\n", - pldat->dma_buff_base_p); + netdev_dbg(ndev, "DMA buffer size :%zd\n", pldat->dma_buff_size); + netdev_dbg(ndev, "DMA buffer P address :%pad\n", + &pldat->dma_buff_base_p); netdev_dbg(ndev, "DMA buffer V address :0x%p\n", pldat->dma_buff_base_v); @@ -1424,8 +1425,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) if (ret) goto err_out_unregister_netdev; - netdev_info(ndev, "LPC mac at 0x%08x irq %d\n", - res->start, ndev->irq); + netdev_info(ndev, "LPC mac at 0x%08lx irq %d\n", + (unsigned long)res->start, ndev->irq); phydev = ndev->phydev; diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c index 0ea141ece19ea14c02e79300d8aefe249b0db3ee..6547a9dd59355459251e92cd724f8f879fcf8bc7 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c @@ -1125,7 +1125,8 @@ netxen_validate_firmware(struct netxen_adapter *adapter) return -EINVAL; } val = nx_get_bios_version(adapter); - netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios); + if (netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios)) + return -EIO; if ((__force u32)val != bios) { dev_err(&pdev->dev, "%s: firmware bios is incompatible\n", fw_name[fw_type]); diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index a60e1c8d470a08734e1a5c02816fd112d2ec0a70..32e786a3952b1327a83fe310a98b72438e011d77 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -914,7 +914,7 @@ u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc); /* Prototypes */ int qed_fill_dev_info(struct qed_dev *cdev, struct qed_dev_info *dev_info); -void qed_link_update(struct qed_hwfn *hwfn); +void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt); u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, u8 *input_buf, u32 max_size, u8 *unzip_buf); diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index f5459de6d60a6abe4d2fec817b429dc5897d74ba..5900a506bf8df06f89a9edf3bcaa607b134eaa33 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -191,7 +191,7 @@ qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data) static void qed_dcbx_set_params(struct qed_dcbx_results *p_data, struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - bool enable, u8 prio, u8 tc, + bool app_tlv, bool enable, u8 prio, u8 tc, enum dcbx_protocol_type type, enum qed_pci_personality personality) { @@ -210,7 +210,7 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data, p_data->arr[type].dont_add_vlan0 = true; /* QM reconf data */ - if (p_hwfn->hw_info.personality == personality) + if (app_tlv && p_hwfn->hw_info.personality == personality) qed_hw_info_set_offload_tc(&p_hwfn->hw_info, tc); /* Configure dcbx vlan priority in doorbell block for roce EDPM */ @@ -225,7 +225,7 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data, static void qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - bool enable, u8 prio, u8 tc, + bool app_tlv, bool enable, u8 prio, u8 tc, enum dcbx_protocol_type type) { enum qed_pci_personality personality; @@ -240,7 +240,7 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, personality = qed_dcbx_app_update[i].personality; - qed_dcbx_set_params(p_data, p_hwfn, p_ptt, enable, + qed_dcbx_set_params(p_data, p_hwfn, p_ptt, app_tlv, enable, prio, tc, type, personality); } } @@ -318,8 +318,8 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enable = true; } - qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable, - priority, tc, type); + qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, true, + enable, priority, tc, type); } } @@ -340,7 +340,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, continue; enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version; - qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable, + qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, false, enable, priority, tc, type); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 97f073fd3725d7bbc8481a1f8cfd748087a55bef..a6a9688db307f50750ef5103a3fa612f48b2e94f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -179,6 +179,10 @@ void qed_resc_free(struct qed_dev *cdev) qed_iscsi_free(p_hwfn); qed_ooo_free(p_hwfn); } + + if (QED_IS_RDMA_PERSONALITY(p_hwfn)) + qed_rdma_info_free(p_hwfn); + qed_iov_free(p_hwfn); qed_l2_free(p_hwfn); qed_dmae_info_free(p_hwfn); @@ -469,13 +473,21 @@ static void qed_init_qm_pq(struct qed_hwfn *p_hwfn, /* get pq index according to PQ_FLAGS */ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn, - u32 pq_flags) + unsigned long pq_flags) { struct qed_qm_info *qm_info = &p_hwfn->qm_info; /* Can't have multiple flags set here */ - if (bitmap_weight((unsigned long *)&pq_flags, sizeof(pq_flags)) > 1) + if (bitmap_weight(&pq_flags, + sizeof(pq_flags) * BITS_PER_BYTE) > 1) { + DP_ERR(p_hwfn, "requested multiple pq flags 0x%lx\n", pq_flags); goto err; + } + + if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) { + DP_ERR(p_hwfn, "pq flag 0x%lx is not set\n", pq_flags); + goto err; + } switch (pq_flags) { case PQ_FLAGS_RLS: @@ -499,8 +511,7 @@ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn, } err: - DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags); - return NULL; + return &qm_info->start_pq; } /* save pq index in qm info */ @@ -524,20 +535,32 @@ u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc) { u8 max_tc = qed_init_qm_get_num_tcs(p_hwfn); + if (max_tc == 0) { + DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n", + PQ_FLAGS_MCOS); + return p_hwfn->qm_info.start_pq; + } + if (tc > max_tc) DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc); - return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc; + return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + (tc % max_tc); } u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf) { u16 max_vf = qed_init_qm_get_num_vfs(p_hwfn); + if (max_vf == 0) { + DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n", + PQ_FLAGS_VFS); + return p_hwfn->qm_info.start_pq; + } + if (vf > max_vf) DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf); - return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf; + return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + (vf % max_vf); } u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc) @@ -1074,6 +1097,12 @@ int qed_resc_alloc(struct qed_dev *cdev) goto alloc_err; } + if (QED_IS_RDMA_PERSONALITY(p_hwfn)) { + rc = qed_rdma_info_alloc(p_hwfn); + if (rc) + goto alloc_err; + } + /* DMA info initialization */ rc = qed_dmae_info_alloc(p_hwfn); if (rc) @@ -2091,11 +2120,8 @@ int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn) if (!p_ptt) return -EAGAIN; - /* If roce info is allocated it means roce is initialized and should - * be enabled in searcher. - */ if (p_hwfn->p_rdma_info && - p_hwfn->b_rdma_enabled_in_prs) + p_hwfn->p_rdma_info->active && p_hwfn->b_rdma_enabled_in_prs) qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0x1); /* Re-open incoming traffic */ @@ -3070,6 +3096,7 @@ static void qed_nvm_info_free(struct qed_hwfn *p_hwfn) static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, void __iomem *p_regview, void __iomem *p_doorbells, + u64 db_phys_addr, enum qed_pci_personality personality) { int rc = 0; @@ -3077,6 +3104,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, /* Split PCI bars evenly between hwfns */ p_hwfn->regview = p_regview; p_hwfn->doorbells = p_doorbells; + p_hwfn->db_phys_addr = db_phys_addr; if (IS_VF(p_hwfn->cdev)) return qed_vf_hw_prepare(p_hwfn); @@ -3172,7 +3200,9 @@ int qed_hw_prepare(struct qed_dev *cdev, /* Initialize the first hwfn - will learn number of hwfns */ rc = qed_hw_prepare_single(p_hwfn, cdev->regview, - cdev->doorbells, personality); + cdev->doorbells, + cdev->db_phys_addr, + personality); if (rc) return rc; @@ -3181,22 +3211,25 @@ int qed_hw_prepare(struct qed_dev *cdev, /* Initialize the rest of the hwfns */ if (cdev->num_hwfns > 1) { void __iomem *p_regview, *p_doorbell; - u8 __iomem *addr; + u64 db_phys_addr; + u32 offset; /* adjust bar offset for second engine */ - addr = cdev->regview + - qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt, - BAR_ID_0) / 2; - p_regview = addr; + offset = qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt, + BAR_ID_0) / 2; + p_regview = cdev->regview + offset; + + offset = qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt, + BAR_ID_1) / 2; + + p_doorbell = cdev->doorbells + offset; - addr = cdev->doorbells + - qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt, - BAR_ID_1) / 2; - p_doorbell = addr; + db_phys_addr = cdev->db_phys_addr + offset; /* prepare second hw function */ rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview, - p_doorbell, personality); + p_doorbell, db_phys_addr, + personality); /* in case of error, need to free the previously * initiliazed hwfn 0. diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c index cc1b373c0ace56e08564d3527de9f5da3f87b4e4..46dc93d3b9b53db6586b791bc6ffcf65b756daba 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c @@ -147,7 +147,8 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, "Cannot satisfy CQ amount. CQs requested %d, CQs available %d. Aborting function start\n", fcoe_pf_params->num_cqs, p_hwfn->hw_info.feat_num[QED_FCOE_CQ]); - return -EINVAL; + rc = -EINVAL; + goto err; } p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu); @@ -156,14 +157,14 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid); if (rc) - return rc; + goto err; cxt_info.iid = dummy_cid; rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info); if (rc) { DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n", dummy_cid); - return rc; + goto err; } p_cxt = cxt_info.p_cxt; SET_FIELD(p_cxt->tstorm_ag_context.flags3, @@ -240,6 +241,10 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, rc = qed_spq_post(p_hwfn, p_ent, NULL); return rc; + +err: + qed_sp_destroy_request(p_hwfn, p_ent); + return rc; } static int diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index a71382687ef2bedca91adcd50d8c16dcbd1cd3c7..bed8f48e029ac239291a738ff22fe4b97e2d8ce7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -12669,8 +12669,9 @@ enum MFW_DRV_MSG_TYPE { MFW_DRV_MSG_BW_UPDATE10, MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE, MFW_DRV_MSG_BW_UPDATE11, - MFW_DRV_MSG_OEM_CFG_UPDATE, + MFW_DRV_MSG_RESERVED, MFW_DRV_MSG_GET_TLV_REQ, + MFW_DRV_MSG_OEM_CFG_UPDATE, MFW_DRV_MSG_MAX }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 0f0aba793352c406404b53306f4bfb454b70a8b6..f9e475075d3ea249225b47538344403c59294db0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -939,7 +939,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn, snprintf(bit_name, 30, p_aeu->bit_name, num); else - strncpy(bit_name, + strlcpy(bit_name, p_aeu->bit_name, 30); /* We now need to pass bitmask in its @@ -992,6 +992,8 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn) */ do { index = p_sb_attn->sb_index; + /* finish reading index before the loop condition */ + dma_rmb(); attn_bits = le32_to_cpu(p_sb_attn->atten_bits); attn_acks = le32_to_cpu(p_sb_attn->atten_ack); } while (index != p_sb_attn->sb_index); diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c index 1135387bd99d704f517679c4716760e39acce52c..4f8a685d1a55febcf78e3213a1c56130c8535213 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -200,6 +200,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, "Cannot satisfy CQ amount. Queues requested %d, CQs available %d. Aborting function start\n", p_params->num_queues, p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]); + qed_sp_destroy_request(p_hwfn, p_ent); return -EINVAL; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index e860bdf0f7524195afce3607fafe8075f733bd0a..7002a660b6b4c278d130388e4ac59fb7e7cddb93 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -1689,6 +1689,15 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn, eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0); + if (!ether_addr_equal(ethh->h_dest, + p_hwfn->p_rdma_info->iwarp.mac_addr)) { + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "Got unexpected mac %pM instead of %pM\n", + ethh->h_dest, p_hwfn->p_rdma_info->iwarp.mac_addr); + return -EINVAL; + } + ether_addr_copy(remote_mac_addr, ethh->h_source); ether_addr_copy(local_mac_addr, ethh->h_dest); @@ -2606,7 +2615,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, struct qed_iwarp_info *iwarp_info; struct qed_ll2_acquire_data data; struct qed_ll2_cbs cbs; - u32 mpa_buff_size; + u32 buff_size; u16 n_ooo_bufs; int rc = 0; int i; @@ -2633,7 +2642,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, memset(&data, 0, sizeof(data)); data.input.conn_type = QED_LL2_TYPE_IWARP; - data.input.mtu = QED_IWARP_MAX_SYN_PKT_SIZE; + data.input.mtu = params->max_mtu; data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE; data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE; data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */ @@ -2655,9 +2664,10 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, goto err; } + buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu); rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, QED_IWARP_LL2_SYN_RX_SIZE, - QED_IWARP_MAX_SYN_PKT_SIZE, + buff_size, iwarp_info->ll2_syn_handle); if (rc) goto err; @@ -2699,6 +2709,8 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, data.input.rx_num_desc = n_ooo_bufs * 2; data.input.tx_num_desc = data.input.rx_num_desc; data.input.tx_max_bds_per_packet = QED_IWARP_MAX_BDS_PER_FPDU; + data.input.tx_tc = PKT_LB_TC; + data.input.tx_dest = QED_LL2_TX_DEST_LB; data.p_connection_handle = &iwarp_info->ll2_mpa_handle; data.input.secondary_queue = true; data.cbs = &cbs; @@ -2711,10 +2723,9 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, if (rc) goto err; - mpa_buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu); rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, data.input.rx_num_desc, - mpa_buff_size, + buff_size, iwarp_info->ll2_mpa_handle); if (rc) goto err; @@ -2727,7 +2738,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps; - iwarp_info->mpa_intermediate_buf = kzalloc(mpa_buff_size, GFP_KERNEL); + iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL); if (!iwarp_info->mpa_intermediate_buf) goto err; diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h index b8f612d002419ea751c098e722e5574b855b8f9a..7ac959038324ef6f74a131f7844d7db1117d4892 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h @@ -46,7 +46,6 @@ enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state); #define QED_IWARP_LL2_SYN_TX_SIZE (128) #define QED_IWARP_LL2_SYN_RX_SIZE (256) -#define QED_IWARP_MAX_SYN_PKT_SIZE (128) #define QED_IWARP_LL2_OOO_DEF_TX_SIZE (256) #define QED_IWARP_MAX_OOO (16) diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 82a1bd1f8a8ce3fd66acc6b0cc0c9e7bf6a57305..64ac95ca4df21e605c65046c85c86cd13c88cea6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -609,6 +609,10 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn, (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) && !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); + SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL, + (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) && + !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED))); + SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL, !!(accept_filter & QED_ACCEPT_BCAST)); @@ -740,11 +744,15 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn, rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params); if (rc) { - /* Return spq entry which is taken in qed_sp_init_request()*/ - qed_spq_return_entry(p_hwfn, p_ent); + qed_sp_destroy_request(p_hwfn, p_ent); return rc; } + if (p_params->update_ctl_frame_check) { + p_cmn->ctl_frame_mac_check_en = p_params->mac_chk_en; + p_cmn->ctl_frame_ethtype_check_en = p_params->ethtype_chk_en; + } + /* Update mcast bins for VFs, PF doesn't use this functionality */ qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params); @@ -1355,6 +1363,7 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn, DP_NOTICE(p_hwfn, "%d is not supported yet\n", p_filter_cmd->opcode); + qed_sp_destroy_request(p_hwfn, *pp_ent); return -EINVAL; } @@ -2056,13 +2065,13 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, } else { rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); if (rc) - return rc; + goto err; if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) { rc = qed_fw_l2_queue(p_hwfn, p_params->qid, &abs_rx_q_id); if (rc) - return rc; + goto err; p_ramrod->rx_qid_valid = 1; p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id); @@ -2083,6 +2092,10 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, (u64)p_params->addr, p_params->length); return qed_spq_post(p_hwfn, p_ent, NULL); + +err: + qed_sp_destroy_request(p_hwfn, p_ent); + return rc; } int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn, @@ -2203,7 +2216,7 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, u16 num_queues = 0; /* Since the feature controls only queue-zones, - * make sure we have the contexts [rx, tx, xdp] to + * make sure we have the contexts [rx, xdp, tcs] to * match. */ for_each_hwfn(cdev, i) { @@ -2213,7 +2226,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, u16 cids; cids = hwfn->pf_params.eth_pf_params.num_cons; - num_queues += min_t(u16, l2_queues, cids / 3); + cids /= (2 + info->num_tc); + num_queues += min_t(u16, l2_queues, cids); } /* queues might theoretically be >256, but interrupts' @@ -2684,7 +2698,8 @@ static int qed_configure_filter_rx_mode(struct qed_dev *cdev, if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) { accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED; - accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; + accept_flags.tx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED | + QED_ACCEPT_MCAST_UNMATCHED; } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) { accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index 8d80f1095d171c85b7d010bb5297a58a1961808d..7127d5aaac4223de0a2758478bb54e8e642c9430 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -219,6 +219,9 @@ struct qed_sp_vport_update_params { struct qed_rss_params *rss_params; struct qed_filter_accept_flags accept_flags; struct qed_sge_tpa_params *sge_tpa_params; + u8 update_ctl_frame_check; + u8 mac_chk_en; + u8 ethtype_chk_en; }; int qed_sp_vport_update(struct qed_hwfn *p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 14ac9cab265341b9a7d2d1c10fa037ea6e4dd20f..2847509a183d043a19cf26ef909df382b13f26a5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -796,7 +796,18 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn, tx_pkt.vlan = p_buffer->vlan; tx_pkt.bd_flags = bd_flags; tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w; - tx_pkt.tx_dest = p_ll2_conn->tx_dest; + switch (p_ll2_conn->tx_dest) { + case CORE_TX_DEST_NW: + tx_pkt.tx_dest = QED_LL2_TX_DEST_NW; + break; + case CORE_TX_DEST_LB: + tx_pkt.tx_dest = QED_LL2_TX_DEST_LB; + break; + case CORE_TX_DEST_DROP: + default: + tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP; + break; + } tx_pkt.first_frag = first_frag; tx_pkt.first_frag_len = p_buffer->packet_length; tx_pkt.cookie = p_buffer; @@ -1592,6 +1603,10 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn, cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain); rx_prod.bd_prod = cpu_to_le16(bd_prod); rx_prod.cqe_prod = cpu_to_le16(cq_prod); + + /* Make sure chain element is updated before ringing the doorbell */ + dma_wmb(); + DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod)); } @@ -2426,19 +2441,24 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb, { struct qed_ll2_tx_pkt_info pkt; const skb_frag_t *frag; + u8 flags = 0, nr_frags; int rc = -EINVAL, i; dma_addr_t mapping; u16 vlan = 0; - u8 flags = 0; if (unlikely(skb->ip_summed != CHECKSUM_NONE)) { DP_INFO(cdev, "Cannot transmit a checksummed packet\n"); return -EINVAL; } - if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) { + /* Cache number of fragments from SKB since SKB may be freed by + * the completion routine after calling qed_ll2_prepare_tx_packet() + */ + nr_frags = skb_shinfo(skb)->nr_frags; + + if (1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) { DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n", - 1 + skb_shinfo(skb)->nr_frags); + 1 + nr_frags); return -EINVAL; } @@ -2460,7 +2480,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb, } memset(&pkt, 0, sizeof(pkt)); - pkt.num_of_bds = 1 + skb_shinfo(skb)->nr_frags; + pkt.num_of_bds = 1 + nr_frags; pkt.vlan = vlan; pkt.bd_flags = flags; pkt.tx_dest = QED_LL2_TX_DEST_NW; @@ -2471,12 +2491,17 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb, test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags)) pkt.remove_stag = true; + /* qed_ll2_prepare_tx_packet() may actually send the packet if + * there are no fragments in the skb and subsequently the completion + * routine may run and free the SKB, so no dereferencing the SKB + * beyond this point unless skb has any fragments. + */ rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle, &pkt, 1); if (rc) goto err; - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + for (i = 0; i < nr_frags; i++) { frag = &skb_shinfo(skb)->frags[i]; mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0, @@ -2485,6 +2510,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb, if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) { DP_NOTICE(cdev, "Unable to map frag - dropping packet\n"); + rc = -ENOMEM; goto err; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 2094d86a7a087dac2eed0fe77bd71f66e7d975c7..049a83b40e46925ddc31906bcdfecddf58d5dae7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1150,7 +1150,7 @@ static int qed_slowpath_start(struct qed_dev *cdev, &drv_version); if (rc) { DP_NOTICE(cdev, "Failed sending drv version command\n"); - return rc; + goto err4; } } @@ -1158,6 +1158,8 @@ static int qed_slowpath_start(struct qed_dev *cdev, return 0; +err4: + qed_ll2_dealloc_if(cdev); err3: qed_hw_stop(cdev); err2: @@ -1460,6 +1462,7 @@ static int qed_get_link_data(struct qed_hwfn *hwfn, } static void qed_fill_link(struct qed_hwfn *hwfn, + struct qed_ptt *ptt, struct qed_link_output *if_link) { struct qed_mcp_link_params params; @@ -1540,7 +1543,7 @@ static void qed_fill_link(struct qed_hwfn *hwfn, /* TODO - fill duplex properly */ if_link->duplex = DUPLEX_FULL; - qed_mcp_get_media_type(hwfn->cdev, &media_type); + qed_mcp_get_media_type(hwfn, ptt, &media_type); if_link->port = qed_get_port_type(media_type); if_link->autoneg = params.speed.autoneg; @@ -1596,21 +1599,34 @@ static void qed_fill_link(struct qed_hwfn *hwfn, static void qed_get_current_link(struct qed_dev *cdev, struct qed_link_output *if_link) { + struct qed_hwfn *hwfn; + struct qed_ptt *ptt; int i; - qed_fill_link(&cdev->hwfns[0], if_link); + hwfn = &cdev->hwfns[0]; + if (IS_PF(cdev)) { + ptt = qed_ptt_acquire(hwfn); + if (ptt) { + qed_fill_link(hwfn, ptt, if_link); + qed_ptt_release(hwfn, ptt); + } else { + DP_NOTICE(hwfn, "Failed to fill link; No PTT\n"); + } + } else { + qed_fill_link(hwfn, NULL, if_link); + } for_each_hwfn(cdev, i) qed_inform_vf_link_state(&cdev->hwfns[i]); } -void qed_link_update(struct qed_hwfn *hwfn) +void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt) { void *cookie = hwfn->cdev->ops_cookie; struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; struct qed_link_output if_link; - qed_fill_link(hwfn, &if_link); + qed_fill_link(hwfn, ptt, &if_link); qed_inform_vf_link_state(hwfn); if (IS_LEAD_HWFN(hwfn) && cookie) @@ -1634,9 +1650,9 @@ static int qed_drain(struct qed_dev *cdev) return -EBUSY; } rc = qed_mcp_drain(hwfn, ptt); + qed_ptt_release(hwfn, ptt); if (rc) return rc; - qed_ptt_release(hwfn, ptt); } return 0; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 58c7eb9d8e1b85893ea33c7de48426e5f555bdc5..938ace333af10ca38da2c253d7911fbe1a29e764 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1382,7 +1382,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) qed_mcp_read_eee_config(p_hwfn, p_ptt, p_link); - qed_link_update(p_hwfn); + qed_link_update(p_hwfn, p_ptt); out: spin_unlock_bh(&p_hwfn->mcp_info->link_lock); } @@ -1849,12 +1849,10 @@ int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn, return 0; } -int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type) +int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *p_media_type) { - struct qed_hwfn *p_hwfn = &cdev->hwfns[0]; - struct qed_ptt *p_ptt; - - if (IS_VF(cdev)) + if (IS_VF(p_hwfn->cdev)) return -EINVAL; if (!qed_mcp_is_init(p_hwfn)) { @@ -1862,16 +1860,15 @@ int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type) return -EBUSY; } - *p_media_type = MEDIA_UNSPECIFIED; - - p_ptt = qed_ptt_acquire(p_hwfn); - if (!p_ptt) - return -EBUSY; - - *p_media_type = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + - offsetof(struct public_port, media_type)); + if (!p_ptt) { + *p_media_type = MEDIA_UNSPECIFIED; + return -EINVAL; + } - qed_ptt_release(p_hwfn, p_ptt); + *p_media_type = qed_rd(p_hwfn, p_ptt, + p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, + media_type)); return 0; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 85e6b3989e7a913c7157f27ff27469e6cbf1f3aa..80a6b5d1ff3386b35b9e816e5b7b30b7427cb8e7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -322,14 +322,15 @@ int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn, * @brief Get media type value of the port. * * @param cdev - qed dev pointer + * @param p_ptt * @param mfw_ver - media type value * * @return int - * 0 - Operation was successul. * -EBUSY - Operation failed */ -int qed_mcp_get_media_type(struct qed_dev *cdev, - u32 *media_type); +int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *media_type); /** * @brief General function for sending commands to the MCP diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index c71391b9c757a1b03f55f21cc641c4718bbce719..909422d9390330c6c133a4d47af5ee2b7a8d5dac 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -140,22 +140,34 @@ static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id) return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id; } -static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct qed_rdma_start_in_params *params) +int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) { struct qed_rdma_info *p_rdma_info; - u32 num_cons, num_tasks; - int rc = -ENOMEM; - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n"); - - /* Allocate a struct with current pf rdma info */ p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL); if (!p_rdma_info) - return rc; + return -ENOMEM; + + spin_lock_init(&p_rdma_info->lock); p_hwfn->p_rdma_info = p_rdma_info; + return 0; +} + +void qed_rdma_info_free(struct qed_hwfn *p_hwfn) +{ + kfree(p_hwfn->p_rdma_info); + p_hwfn->p_rdma_info = NULL; +} + +static int qed_rdma_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; + u32 num_cons, num_tasks; + int rc = -ENOMEM; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n"); + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) p_rdma_info->proto = PROTOCOLID_IWARP; else @@ -183,7 +195,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, /* Allocate a struct with device params and fill it */ p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL); if (!p_rdma_info->dev) - goto free_rdma_info; + return rc; /* Allocate a struct with port params and fill it */ p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL); @@ -298,8 +310,6 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, kfree(p_rdma_info->port); free_rdma_dev: kfree(p_rdma_info->dev); -free_rdma_info: - kfree(p_rdma_info); return rc; } @@ -370,8 +380,6 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) kfree(p_rdma_info->port); kfree(p_rdma_info->dev); - - kfree(p_rdma_info); } static void qed_rdma_free_tid(void *rdma_cxt, u32 itid) @@ -434,7 +442,7 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn, /* Vendor specific information */ dev->vendor_id = cdev->vendor_id; dev->vendor_part_id = cdev->device_id; - dev->hw_ver = 0; + dev->hw_ver = cdev->chip_rev; dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION); @@ -679,8 +687,6 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn, DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n"); - spin_lock_init(&p_hwfn->p_rdma_info->lock); - qed_rdma_init_devinfo(p_hwfn, params); qed_rdma_init_port(p_hwfn); qed_rdma_init_events(p_hwfn, params); @@ -727,7 +733,7 @@ static int qed_rdma_stop(void *rdma_cxt) /* Disable RoCE search */ qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0); p_hwfn->b_rdma_enabled_in_prs = false; - + p_hwfn->p_rdma_info->active = 0; qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN); @@ -797,7 +803,7 @@ static int qed_rdma_add_user(void *rdma_cxt, dpi_start_offset + ((out_params->dpi) * p_hwfn->dpi_size)); - out_params->dpi_phys_addr = p_hwfn->cdev->db_phys_addr + + out_params->dpi_phys_addr = p_hwfn->db_phys_addr + dpi_start_offset + ((out_params->dpi) * p_hwfn->dpi_size); @@ -1236,7 +1242,8 @@ qed_rdma_create_qp(void *rdma_cxt, u8 max_stats_queues; int rc; - if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) { + if (!rdma_cxt || !in_params || !out_params || + !p_hwfn->p_rdma_info->active) { DP_ERR(p_hwfn->cdev, "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n", rdma_cxt, in_params, out_params); @@ -1514,6 +1521,7 @@ qed_rdma_register_tid(void *rdma_cxt, default: rc = -EINVAL; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); + qed_sp_destroy_request(p_hwfn, p_ent); return rc; } SET_FIELD(p_ramrod->flags1, @@ -1801,8 +1809,8 @@ bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn) { bool result; - /* if rdma info has not been allocated, naturally there are no qps */ - if (!p_hwfn->p_rdma_info) + /* if rdma wasn't activated yet, naturally there are no qps */ + if (!p_hwfn->p_rdma_info->active) return false; spin_lock_bh(&p_hwfn->p_rdma_info->lock); @@ -1848,7 +1856,7 @@ static int qed_rdma_start(void *rdma_cxt, if (!p_ptt) goto err; - rc = qed_rdma_alloc(p_hwfn, p_ptt, params); + rc = qed_rdma_alloc(p_hwfn); if (rc) goto err1; @@ -1857,6 +1865,7 @@ static int qed_rdma_start(void *rdma_cxt, goto err2; qed_ptt_release(p_hwfn, p_ptt); + p_hwfn->p_rdma_info->active = 1; return rc; diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h index 6f722ee8ee945b13ee6f33df82d1692c0ae18304..3689fe3e593542fc487167aae73da99156add030 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h @@ -102,6 +102,7 @@ struct qed_rdma_info { u16 max_queue_zones; enum protocol_type proto; struct qed_iwarp_info iwarp; + u8 active:1; }; struct qed_rdma_qp { @@ -176,10 +177,14 @@ struct qed_rdma_qp { #if IS_ENABLED(CONFIG_QED_RDMA) void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn); +void qed_rdma_info_free(struct qed_hwfn *p_hwfn); #else static inline void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} +static inline int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) {return -EINVAL;} +static inline void qed_rdma_info_free(struct qed_hwfn *p_hwfn) {} #endif int diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index f9167d1354bbef3ccf2e972e8c002e64bbc24cce..e49fada854108718bf1dc5ea45fda2d4d264ded2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -745,6 +745,7 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, DP_NOTICE(p_hwfn, "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n", rc); + qed_sp_destroy_request(p_hwfn, p_ent); return rc; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index e95431f6acd46fb6ace4c20cfe227388c890cdea..dae2896e1d8e4ed8e7f2c2d547a9fd984aff9fe5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -167,6 +167,9 @@ struct qed_spq_entry { enum spq_mode comp_mode; struct qed_spq_comp_cb comp_cb; struct qed_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */ + + /* Posted entry for unlimited list entry in EBLOCK mode */ + struct qed_spq_entry *post_ent; }; struct qed_eq { @@ -377,6 +380,7 @@ void qed_consq_setup(struct qed_hwfn *p_hwfn); * @param p_hwfn */ void qed_consq_free(struct qed_hwfn *p_hwfn); +int qed_spq_pend_post(struct qed_hwfn *p_hwfn); /** * @file @@ -396,6 +400,17 @@ struct qed_sp_init_data { struct qed_spq_comp_cb *p_comp_data; }; +/** + * @brief Returns a SPQ entry to the pool / frees the entry if allocated. + * Should be called on in error flows after initializing the SPQ entry + * and before posting it. + * + * @param p_hwfn + * @param p_ent + */ +void qed_sp_destroy_request(struct qed_hwfn *p_hwfn, + struct qed_spq_entry *p_ent); + int qed_sp_init_request(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent, u8 cmd, diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 77b6248ad3b97d3a45caf27825faddabf9695a5b..888274fa208bc768b2ab9db2514407573bfab2e1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -47,6 +47,19 @@ #include "qed_sp.h" #include "qed_sriov.h" +void qed_sp_destroy_request(struct qed_hwfn *p_hwfn, + struct qed_spq_entry *p_ent) +{ + /* qed_spq_get_entry() can either get an entry from the free_pool, + * or, if no entries are left, allocate a new entry and add it to + * the unlimited_pending list. + */ + if (p_ent->queue == &p_hwfn->p_spq->unlimited_pending) + kfree(p_ent); + else + qed_spq_return_entry(p_hwfn, p_ent); +} + int qed_sp_init_request(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent, u8 cmd, u8 protocol, struct qed_sp_init_data *p_data) @@ -80,7 +93,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, case QED_SPQ_MODE_BLOCK: if (!p_data->p_comp_data) - return -EINVAL; + goto err; p_ent->comp_cb.cookie = p_data->p_comp_data->cookie; break; @@ -95,7 +108,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, default: DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n", p_ent->comp_mode); - return -EINVAL; + goto err; } DP_VERBOSE(p_hwfn, QED_MSG_SPQ, @@ -109,6 +122,11 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod)); return 0; + +err: + qed_sp_destroy_request(p_hwfn, p_ent); + + return -EINVAL; } static enum tunnel_clss qed_tunn_clss_to_fw_clss(u8 type) diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 1673fc90027f8e538a1aaf4aee92b0e33b9b09ce..a0ee847f379bbbe3f34a19430cf0b65ca9a10371 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -142,6 +142,7 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn, DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n"); rc = qed_mcp_drain(p_hwfn, p_ptt); + qed_ptt_release(p_hwfn, p_ptt); if (rc) { DP_NOTICE(p_hwfn, "MCP drain failed\n"); goto err; @@ -150,18 +151,15 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn, /* Retry after drain */ rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true); if (!rc) - goto out; + return 0; comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie; - if (comp_done->done == 1) + if (comp_done->done == 1) { if (p_fw_ret) *p_fw_ret = comp_done->fw_return_code; -out: - qed_ptt_release(p_hwfn, p_ptt); - return 0; - + return 0; + } err: - qed_ptt_release(p_hwfn, p_ptt); DP_NOTICE(p_hwfn, "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n", le32_to_cpu(p_ent->elem.hdr.cid), @@ -404,6 +402,11 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie) qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain)); + /* Attempt to post pending requests */ + spin_lock_bh(&p_hwfn->p_spq->lock); + rc = qed_spq_pend_post(p_hwfn); + spin_unlock_bh(&p_hwfn->p_spq->lock); + return rc; } @@ -685,6 +688,8 @@ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn, /* EBLOCK responsible to free the allocated p_ent */ if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK) kfree(p_ent); + else + p_ent->post_ent = p_en2; p_ent = p_en2; } @@ -745,7 +750,7 @@ static int qed_spq_post_list(struct qed_hwfn *p_hwfn, return 0; } -static int qed_spq_pend_post(struct qed_hwfn *p_hwfn) +int qed_spq_pend_post(struct qed_hwfn *p_hwfn) { struct qed_spq *p_spq = p_hwfn->p_spq; struct qed_spq_entry *p_ent = NULL; @@ -768,6 +773,25 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn) SPQ_HIGH_PRI_RESERVE_DEFAULT); } +/* Avoid overriding of SPQ entries when getting out-of-order completions, by + * marking the completions in a bitmap and increasing the chain consumer only + * for the first successive completed entries. + */ +static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo) +{ + u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE; + struct qed_spq *p_spq = p_hwfn->p_spq; + + __set_bit(pos, p_spq->p_comp_bitmap); + while (test_bit(p_spq->comp_bitmap_idx, + p_spq->p_comp_bitmap)) { + __clear_bit(p_spq->comp_bitmap_idx, + p_spq->p_comp_bitmap); + p_spq->comp_bitmap_idx++; + qed_chain_return_produced(&p_spq->chain); + } +} + int qed_spq_post(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent, u8 *fw_return_code) { @@ -825,11 +849,12 @@ int qed_spq_post(struct qed_hwfn *p_hwfn, p_ent->queue == &p_spq->unlimited_pending); if (p_ent->queue == &p_spq->unlimited_pending) { - /* This is an allocated p_ent which does not need to - * return to pool. - */ + struct qed_spq_entry *p_post_ent = p_ent->post_ent; + kfree(p_ent); - return rc; + + /* Return the entry which was actually posted */ + p_ent = p_post_ent; } if (rc) @@ -843,7 +868,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn, spq_post_fail2: spin_lock_bh(&p_spq->lock); list_del(&p_ent->list); - qed_chain_return_produced(&p_spq->chain); + qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo); spq_post_fail: /* return to the free pool */ @@ -863,7 +888,6 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent = NULL; struct qed_spq_entry *tmp; struct qed_spq_entry *found = NULL; - int rc; if (!p_hwfn) return -EINVAL; @@ -875,25 +899,8 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, spin_lock_bh(&p_spq->lock); list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) { if (p_ent->elem.hdr.echo == echo) { - u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE; - list_del(&p_ent->list); - - /* Avoid overriding of SPQ entries when getting - * out-of-order completions, by marking the completions - * in a bitmap and increasing the chain consumer only - * for the first successive completed entries. - */ - __set_bit(pos, p_spq->p_comp_bitmap); - - while (test_bit(p_spq->comp_bitmap_idx, - p_spq->p_comp_bitmap)) { - __clear_bit(p_spq->comp_bitmap_idx, - p_spq->p_comp_bitmap); - p_spq->comp_bitmap_idx++; - qed_chain_return_produced(&p_spq->chain); - } - + qed_spq_comp_bmap_update(p_hwfn, echo); p_spq->comp_count++; found = p_ent; break; @@ -932,20 +939,13 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, QED_MSG_SPQ, "Got a completion without a callback function\n"); - if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) || - (found->queue == &p_spq->unlimited_pending)) + if (found->comp_mode != QED_SPQ_MODE_EBLOCK) /* EBLOCK is responsible for returning its own entry into the - * free list, unless it originally added the entry into the - * unlimited pending list. + * free list. */ qed_spq_return_entry(p_hwfn, found); - /* Attempt to post pending requests */ - spin_lock_bh(&p_spq->lock); - rc = qed_spq_pend_post(p_hwfn); - spin_unlock_bh(&p_spq->lock); - - return rc; + return 0; } int qed_consq_alloc(struct qed_hwfn *p_hwfn) diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 9b08a9d9e15130f0518b1f7608bbaa36e6eb15b0..71a7af134dd8e74622a8cecb99d04628a7683069 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -101,6 +101,7 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) default: DP_NOTICE(p_hwfn, "Unknown VF personality %d\n", p_hwfn->hw_info.personality); + qed_sp_destroy_request(p_hwfn, p_ent); return -EINVAL; } @@ -1968,7 +1969,9 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, params.vport_id = vf->vport_id; params.max_buffers_per_cqe = start->max_buffers_per_cqe; params.mtu = vf->mtu; - params.check_mac = true; + + /* Non trusted VFs should enable control frame filtering */ + params.check_mac = !vf->p_vf_info.is_trusted_configured; rc = qed_sp_eth_vport_start(p_hwfn, ¶ms); if (rc) { @@ -5129,6 +5132,9 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn) params.opaque_fid = vf->opaque_fid; params.vport_id = vf->vport_id; + params.update_ctl_frame_check = 1; + params.mac_chk_en = !vf_info->is_trusted_configured; + if (vf_info->rx_accept_mode & mask) { flags->update_rx_mode_config = 1; flags->rx_accept_filter = vf_info->rx_accept_mode; @@ -5146,7 +5152,8 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn) } if (flags->update_rx_mode_config || - flags->update_tx_mode_config) + flags->update_tx_mode_config || + params.update_ctl_frame_check) qed_sp_vport_update(hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index be118d057b92c5ad494690b7c80c98140dbb8e7a..5dda547772c1363e9373f2d81ef72c6644936b85 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -261,6 +261,7 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp; struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; struct vf_pf_resc_request *p_resc; + u8 retry_cnt = VF_ACQUIRE_THRESH; bool resources_acquired = false; struct vfpf_acquire_tlv *req; int rc = 0, attempts = 0; @@ -314,6 +315,15 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) /* send acquire request */ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + + /* Re-try acquire in case of vf-pf hw channel timeout */ + if (retry_cnt && rc == -EBUSY) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF retrying to acquire due to VPC timeout\n"); + retry_cnt--; + continue; + } + if (rc) goto exit; @@ -1688,7 +1698,7 @@ static void qed_handle_bulletin_change(struct qed_hwfn *hwfn) ops->ports_update(cookie, vxlan_port, geneve_port); /* Always update link configuration according to bulletin */ - qed_link_update(hwfn); + qed_link_update(hwfn, NULL); } void qed_iov_vf_task(struct work_struct *work) diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 6a4d266fb8e2126f724cd4dbcfed3e02433c5d65..d242a5724069129fa0a87f8ce6f1c9efcf4c27d7 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -489,6 +489,9 @@ struct qede_reload_args { /* Datapath functions definition */ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev); +u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev, + select_queue_fallback_t fallback); netdev_features_t qede_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features); diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index b16ce7d93caff5802e41b79ecb8e8b8e5fc78ed5..c3d5d40afec078d277e279cc10256b883c476fee 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -1230,7 +1230,7 @@ qede_configure_mcast_filtering(struct net_device *ndev, netif_addr_lock_bh(ndev); mc_count = netdev_mc_count(ndev); - if (mc_count < 64) { + if (mc_count <= 64) { netdev_for_each_mc_addr(ha, ndev) { ether_addr_copy(temp, ha->addr); temp += ETH_ALEN; diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index 1a78027de071f1bc2286f0a6d31288feb8b84584..a96da16f340492e0758474936ec74f2281020d3e 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -1695,6 +1695,19 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev) return NETDEV_TX_OK; } +u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev, + select_queue_fallback_t fallback) +{ + struct qede_dev *edev = netdev_priv(dev); + int total_txq; + + total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc; + + return QEDE_TSS_COUNT(edev) ? + fallback(dev, skb, NULL) % total_txq : 0; +} + /* 8B udp header + 8B base tunnel header + 32B option length */ #define QEDE_MAX_TUN_HDR_LEN 48 diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 46d0f2eaa0c094c19ae47161f84320caeba2c567..0d8e39ffbcd1a05fe7f80c649918943c549c3d76 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -631,6 +631,7 @@ static const struct net_device_ops qede_netdev_ops = { .ndo_open = qede_open, .ndo_stop = qede_close, .ndo_start_xmit = qede_start_xmit, + .ndo_select_queue = qede_select_queue, .ndo_set_rx_mode = qede_set_rx_mode, .ndo_set_mac_address = qede_set_mac_addr, .ndo_validate_addr = eth_validate_addr, @@ -666,6 +667,7 @@ static const struct net_device_ops qede_netdev_vf_ops = { .ndo_open = qede_open, .ndo_stop = qede_close, .ndo_start_xmit = qede_start_xmit, + .ndo_select_queue = qede_select_queue, .ndo_set_rx_mode = qede_set_rx_mode, .ndo_set_mac_address = qede_set_mac_addr, .ndo_validate_addr = eth_validate_addr, @@ -684,6 +686,7 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = { .ndo_open = qede_open, .ndo_stop = qede_close, .ndo_start_xmit = qede_start_xmit, + .ndo_select_queue = qede_select_queue, .ndo_set_rx_mode = qede_set_rx_mode, .ndo_set_mac_address = qede_set_mac_addr, .ndo_validate_addr = eth_validate_addr, @@ -1167,8 +1170,16 @@ enum qede_remove_mode { static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) { struct net_device *ndev = pci_get_drvdata(pdev); - struct qede_dev *edev = netdev_priv(ndev); - struct qed_dev *cdev = edev->cdev; + struct qede_dev *edev; + struct qed_dev *cdev; + + if (!ndev) { + dev_info(&pdev->dev, "Device has already been removed\n"); + return; + } + + edev = netdev_priv(ndev); + cdev = edev->cdev; DP_INFO(edev, "Starting qede_remove\n"); @@ -1351,6 +1362,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) rxq->rx_buf_seg_size = roundup_pow_of_two(size); } else { rxq->rx_buf_seg_size = PAGE_SIZE; + edev->ndev->features &= ~NETIF_F_GRO_HW; } /* Allocate the parallel driver ring for Rx buffers */ @@ -1395,6 +1407,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) } } + edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW); if (!edev->gro_disable) qede_set_tpa_param(rxq); err: @@ -1595,8 +1608,6 @@ static void qede_init_fp(struct qede_dev *edev) snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", edev->ndev->name, queue_id); } - - edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW); } static int qede_set_real_num_queues(struct qede_dev *edev) diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index 013ff567283c738f342ca5d6f5358e30ca6daa72..5e574c3b625e513429bc5d674b8cbc87493075d8 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c @@ -490,18 +490,17 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc) ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev); if (IS_ERR(ptp->clock)) { - rc = -EINVAL; DP_ERR(edev, "PTP clock registration failed\n"); + qede_ptp_disable(edev); + rc = -EINVAL; goto err2; } return 0; -err2: - qede_ptp_disable(edev); - ptp->clock = NULL; err1: kfree(ptp); +err2: edev->ptp = NULL; return rc; diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index 10b075bc595966ac405751ade7cda6b78ed930d7..1b5e098b2367e9f9012e1935735ae07154bc6bb2 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -2757,6 +2757,9 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev) int err; for (i = 0; i < qdev->num_large_buffers; i++) { + lrg_buf_cb = &qdev->lrg_buf[i]; + memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); + skb = netdev_alloc_skb(qdev->ndev, qdev->lrg_buffer_len); if (unlikely(!skb)) { @@ -2767,11 +2770,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev) ql_free_large_buffers(qdev); return -ENOMEM; } else { - - lrg_buf_cb = &qdev->lrg_buf[i]; - memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); lrg_buf_cb->index = i; - lrg_buf_cb->skb = skb; /* * We save some space to copy the ethhdr from first * buffer @@ -2788,10 +2787,12 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev) netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n", err); + dev_kfree_skb_irq(skb); ql_free_large_buffers(qdev); return -ENOMEM; } + lrg_buf_cb->skb = skb; dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); dma_unmap_len_set(lrg_buf_cb, maplen, qdev->lrg_buffer_len - diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index a79d84f9910229515acf900e8286f71b8a010ae1..40df02590f8a8e5cb8be17d5080e76f5e6ec4f48 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -1079,8 +1079,14 @@ static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter) sds_mbx_size = sizeof(struct qlcnic_sds_mbx); context_id = recv_ctx->context_id; num_sds = adapter->drv_sds_rings - QLCNIC_MAX_SDS_RINGS; - ahw->hw_ops->alloc_mbx_args(&cmd, adapter, - QLCNIC_CMD_ADD_RCV_RINGS); + err = ahw->hw_ops->alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_ADD_RCV_RINGS); + if (err) { + dev_err(&adapter->pdev->dev, + "Failed to alloc mbx args %d\n", err); + return err; + } + cmd.req.arg[1] = 0 | (num_sds << 8) | (context_id << 16); /* set up status rings, mbx 2-81 */ diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c index 4b76c69fe86d2aaa8609456277b767f1993bffe4..834208e55f7b8a7d1e2ac535096e64b263505178 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c @@ -883,7 +883,7 @@ static u8 qlcnic_dcb_get_capability(struct net_device *netdev, int capid, struct qlcnic_adapter *adapter = netdev_priv(netdev); if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) - return 0; + return 1; switch (capid) { case DCB_CAP_ATTR_PG: diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index 031f6e6ee9c17af99c91cc62f12af3e5e3e097be..351a906980103982b28c14746f9661a11e5d4b1a 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -1449,6 +1449,7 @@ int emac_mac_tx_buf_send(struct emac_adapter *adpt, struct emac_tx_queue *tx_q, { struct emac_tpd tpd; u32 prod_idx; + int len; memset(&tpd, 0, sizeof(tpd)); @@ -1468,9 +1469,10 @@ int emac_mac_tx_buf_send(struct emac_adapter *adpt, struct emac_tx_queue *tx_q, if (skb_network_offset(skb) != ETH_HLEN) TPD_TYP_SET(&tpd, 1); + len = skb->len; emac_tx_fill_tpd(adpt, tx_q, skb, &tpd); - netdev_sent_queue(adpt->netdev, skb->len); + netdev_sent_queue(adpt->netdev, len); /* Make sure the are enough free descriptors to hold one * maximum-sized SKB. We need one desc for each fragment, diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 2a0cbc535a2ed5d527f6a622093d653766c1082d..938602864c24381f7badf5a2fd5cba929f742f0b 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -741,19 +741,26 @@ static int emac_remove(struct platform_device *pdev) struct net_device *netdev = dev_get_drvdata(&pdev->dev); struct emac_adapter *adpt = netdev_priv(netdev); + netif_carrier_off(netdev); + netif_tx_disable(netdev); + unregister_netdev(netdev); netif_napi_del(&adpt->rx_q.napi); + free_irq(adpt->irq.irq, &adpt->irq); + cancel_work_sync(&adpt->work_thread); + emac_clks_teardown(adpt); put_device(&adpt->phydev->mdio.dev); mdiobus_unregister(adpt->mii_bus); - free_netdev(netdev); if (adpt->phy.digital) iounmap(adpt->phy.digital); iounmap(adpt->phy.base); + free_netdev(netdev); + return 0; } diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index 5f4e447c5dce2d2cd4e4f8f981bde86d0335e10a..a7360fb3e45cff4bda3302d4e259b169d4494682 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -66,10 +66,10 @@ static int rmnet_unregister_real_device(struct net_device *real_dev, if (port->nr_rmnet_devs) return -EINVAL; - kfree(port); - netdev_rx_handler_unregister(real_dev); + kfree(port); + /* release reference on real_dev */ dev_put(real_dev); @@ -301,10 +301,13 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[], struct rmnet_port *port; u16 mux_id; + if (!dev) + return -ENODEV; + real_dev = __dev_get_by_index(dev_net(dev), nla_get_u32(tb[IFLA_LINK])); - if (!real_dev || !dev || !rmnet_is_real_dev_registered(real_dev)) + if (!real_dev || !rmnet_is_real_dev_registered(real_dev)) return -ENODEV; port = rmnet_get_port_rtnl(real_dev); @@ -373,7 +376,7 @@ static int rmnet_fill_info(struct sk_buff *skb, const struct net_device *dev) struct rtnl_link_ops rmnet_link_ops __read_mostly = { .kind = "rmnet", - .maxtype = __IFLA_RMNET_MAX, + .maxtype = IFLA_RMNET_MAX, .priv_size = sizeof(struct rmnet_priv), .setup = rmnet_vnd_setup, .validate = rmnet_rtnl_validate, diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h index 884f1f52dcc25e88713a28978bb9bdaa4bc3a320..70879a3ab567c57004da73656e6c8e49237000de 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h @@ -59,7 +59,7 @@ struct rmnet_map_dl_csum_trailer { struct rmnet_map_ul_csum_header { __be16 csum_start_offset; u16 csum_insert_offset:14; - u16 udp_ip4_ind:1; + u16 udp_ind:1; u16 csum_enabled:1; } __aligned(1); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c index 57a9c314a665fc8aae9ce94073b9a256fa4973ee..b2090cedd2e965aa1dc901b97b05dae6a537ef30 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c @@ -215,9 +215,9 @@ rmnet_map_ipv4_ul_csum_header(void *iphdr, ul_header->csum_insert_offset = skb->csum_offset; ul_header->csum_enabled = 1; if (ip4h->protocol == IPPROTO_UDP) - ul_header->udp_ip4_ind = 1; + ul_header->udp_ind = 1; else - ul_header->udp_ip4_ind = 0; + ul_header->udp_ind = 0; /* Changing remaining fields to network order */ hdr++; @@ -248,6 +248,7 @@ rmnet_map_ipv6_ul_csum_header(void *ip6hdr, struct rmnet_map_ul_csum_header *ul_header, struct sk_buff *skb) { + struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr; __be16 *hdr = (__be16 *)ul_header, offset; offset = htons((__force u16)(skb_transport_header(skb) - @@ -255,7 +256,11 @@ rmnet_map_ipv6_ul_csum_header(void *ip6hdr, ul_header->csum_start_offset = offset; ul_header->csum_insert_offset = skb->csum_offset; ul_header->csum_enabled = 1; - ul_header->udp_ip4_ind = 0; + + if (ip6h->nexthdr == IPPROTO_UDP) + ul_header->udp_ind = 1; + else + ul_header->udp_ind = 0; /* Changing remaining fields to network order */ hdr++; @@ -428,7 +433,7 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb, ul_header->csum_start_offset = 0; ul_header->csum_insert_offset = 0; ul_header->csum_enabled = 0; - ul_header->udp_ip4_ind = 0; + ul_header->udp_ind = 0; priv->stats.csum_sw++; } diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index 0afc3d335d562d24466b9192aea291b910ebcdfe..d11c16aeb19ad45759c44e1dac2bb259cf976054 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -234,7 +234,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, struct net_device *real_dev, struct rmnet_endpoint *ep) { - struct rmnet_priv *priv; + struct rmnet_priv *priv = netdev_priv(rmnet_dev); int rc; if (ep->egress_dev) @@ -247,6 +247,8 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; rmnet_dev->hw_features |= NETIF_F_SG; + priv->real_dev = real_dev; + rc = register_netdevice(rmnet_dev); if (!rc) { ep->egress_dev = rmnet_dev; @@ -255,9 +257,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, rmnet_dev->rtnl_link_ops = &rmnet_link_ops; - priv = netdev_priv(rmnet_dev); priv->mux_id = id; - priv->real_dev = real_dev; netdev_dbg(rmnet_dev, "rmnet dev created\n"); } diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 81045dfa1cd898726da2e2e37520be720e0e8842..44f6e4873aadd16b35ae34c3561393fc010a698b 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -571,6 +571,7 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance) struct cp_private *cp; int handled = 0; u16 status; + u16 mask; if (unlikely(dev == NULL)) return IRQ_NONE; @@ -578,6 +579,10 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance) spin_lock(&cp->lock); + mask = cpr16(IntrMask); + if (!mask) + goto out_unlock; + status = cpr16(IntrStatus); if (!status || (status == 0xFFFF)) goto out_unlock; diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 2c350099b83cff30ded1fa7deda5de8fb2f87e34..4ab87fe845427672a346f3d5aa38b447b3d9a968 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -214,6 +215,8 @@ enum cfg_version { }; static const struct pci_device_id rtl8169_pci_tbl[] = { + { PCI_VDEVICE(REALTEK, 0x2502), RTL_CFG_1 }, + { PCI_VDEVICE(REALTEK, 0x2600), RTL_CFG_1 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 }, @@ -717,6 +720,7 @@ module_param(use_dac, int, 0); MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); module_param_named(debug, debug.msg_enable, int, 0); MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); +MODULE_SOFTDEP("pre: realtek"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(FIRMWARE_8168D_1); MODULE_FIRMWARE(FIRMWARE_8168D_2); @@ -1006,6 +1010,10 @@ static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg) { int value; + /* Work around issue with chip reporting wrong PHY ID */ + if (reg == MII_PHYSID2) + return 0xc912; + r8168dp_2_mdio_start(tp); value = r8169_mdio_read(tp, reg); @@ -1528,6 +1536,8 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) } RTL_W8(tp, Cfg9346, Cfg9346_Lock); + + device_set_wakeup_enable(tp_to_dev(tp), wolopts); } static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) @@ -1549,8 +1559,6 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) rtl_unlock_work(tp); - device_set_wakeup_enable(d, tp->saved_wolopts); - pm_runtime_put_noidle(d); return 0; @@ -1730,11 +1738,13 @@ static bool rtl8169_reset_counters(struct rtl8169_private *tp) static bool rtl8169_update_counters(struct rtl8169_private *tp) { + u8 val = RTL_R8(tp, ChipCmd); + /* * Some chips are unable to dump tally counters when the receiver - * is disabled. + * is disabled. If 0xff chip may be in a PCI power-save state. */ - if ((RTL_R8(tp, ChipCmd) & CmdRxEnb) == 0) + if (!(val & CmdRxEnb) || val == 0xff) return true; return rtl8169_do_counters(tp, CounterDump); @@ -4175,10 +4185,15 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) static bool rtl_wol_pll_power_down(struct rtl8169_private *tp) { - if (!netif_running(tp->dev) || !__rtl8169_get_wol(tp)) + struct phy_device *phydev; + + if (!__rtl8169_get_wol(tp)) return false; - phy_speed_down(tp->dev->phydev, false); + /* phydev may not be attached to netdevice */ + phydev = mdiobus_get_phy(tp->mii_bus, 0); + + phy_speed_down(phydev, false); rtl_wol_suspend_quirk(tp); return true; @@ -5191,6 +5206,143 @@ static void rtl_hw_start_8411_2(struct rtl8169_private *tp) /* disable aspm and clock request before access ephy */ rtl_hw_aspm_clkreq_enable(tp, false); rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2)); + + /* The following Realtek-provided magic fixes an issue with the RX unit + * getting confused after the PHY having been powered-down. + */ + r8168_mac_ocp_write(tp, 0xFC28, 0x0000); + r8168_mac_ocp_write(tp, 0xFC2A, 0x0000); + r8168_mac_ocp_write(tp, 0xFC2C, 0x0000); + r8168_mac_ocp_write(tp, 0xFC2E, 0x0000); + r8168_mac_ocp_write(tp, 0xFC30, 0x0000); + r8168_mac_ocp_write(tp, 0xFC32, 0x0000); + r8168_mac_ocp_write(tp, 0xFC34, 0x0000); + r8168_mac_ocp_write(tp, 0xFC36, 0x0000); + mdelay(3); + r8168_mac_ocp_write(tp, 0xFC26, 0x0000); + + r8168_mac_ocp_write(tp, 0xF800, 0xE008); + r8168_mac_ocp_write(tp, 0xF802, 0xE00A); + r8168_mac_ocp_write(tp, 0xF804, 0xE00C); + r8168_mac_ocp_write(tp, 0xF806, 0xE00E); + r8168_mac_ocp_write(tp, 0xF808, 0xE027); + r8168_mac_ocp_write(tp, 0xF80A, 0xE04F); + r8168_mac_ocp_write(tp, 0xF80C, 0xE05E); + r8168_mac_ocp_write(tp, 0xF80E, 0xE065); + r8168_mac_ocp_write(tp, 0xF810, 0xC602); + r8168_mac_ocp_write(tp, 0xF812, 0xBE00); + r8168_mac_ocp_write(tp, 0xF814, 0x0000); + r8168_mac_ocp_write(tp, 0xF816, 0xC502); + r8168_mac_ocp_write(tp, 0xF818, 0xBD00); + r8168_mac_ocp_write(tp, 0xF81A, 0x074C); + r8168_mac_ocp_write(tp, 0xF81C, 0xC302); + r8168_mac_ocp_write(tp, 0xF81E, 0xBB00); + r8168_mac_ocp_write(tp, 0xF820, 0x080A); + r8168_mac_ocp_write(tp, 0xF822, 0x6420); + r8168_mac_ocp_write(tp, 0xF824, 0x48C2); + r8168_mac_ocp_write(tp, 0xF826, 0x8C20); + r8168_mac_ocp_write(tp, 0xF828, 0xC516); + r8168_mac_ocp_write(tp, 0xF82A, 0x64A4); + r8168_mac_ocp_write(tp, 0xF82C, 0x49C0); + r8168_mac_ocp_write(tp, 0xF82E, 0xF009); + r8168_mac_ocp_write(tp, 0xF830, 0x74A2); + r8168_mac_ocp_write(tp, 0xF832, 0x8CA5); + r8168_mac_ocp_write(tp, 0xF834, 0x74A0); + r8168_mac_ocp_write(tp, 0xF836, 0xC50E); + r8168_mac_ocp_write(tp, 0xF838, 0x9CA2); + r8168_mac_ocp_write(tp, 0xF83A, 0x1C11); + r8168_mac_ocp_write(tp, 0xF83C, 0x9CA0); + r8168_mac_ocp_write(tp, 0xF83E, 0xE006); + r8168_mac_ocp_write(tp, 0xF840, 0x74F8); + r8168_mac_ocp_write(tp, 0xF842, 0x48C4); + r8168_mac_ocp_write(tp, 0xF844, 0x8CF8); + r8168_mac_ocp_write(tp, 0xF846, 0xC404); + r8168_mac_ocp_write(tp, 0xF848, 0xBC00); + r8168_mac_ocp_write(tp, 0xF84A, 0xC403); + r8168_mac_ocp_write(tp, 0xF84C, 0xBC00); + r8168_mac_ocp_write(tp, 0xF84E, 0x0BF2); + r8168_mac_ocp_write(tp, 0xF850, 0x0C0A); + r8168_mac_ocp_write(tp, 0xF852, 0xE434); + r8168_mac_ocp_write(tp, 0xF854, 0xD3C0); + r8168_mac_ocp_write(tp, 0xF856, 0x49D9); + r8168_mac_ocp_write(tp, 0xF858, 0xF01F); + r8168_mac_ocp_write(tp, 0xF85A, 0xC526); + r8168_mac_ocp_write(tp, 0xF85C, 0x64A5); + r8168_mac_ocp_write(tp, 0xF85E, 0x1400); + r8168_mac_ocp_write(tp, 0xF860, 0xF007); + r8168_mac_ocp_write(tp, 0xF862, 0x0C01); + r8168_mac_ocp_write(tp, 0xF864, 0x8CA5); + r8168_mac_ocp_write(tp, 0xF866, 0x1C15); + r8168_mac_ocp_write(tp, 0xF868, 0xC51B); + r8168_mac_ocp_write(tp, 0xF86A, 0x9CA0); + r8168_mac_ocp_write(tp, 0xF86C, 0xE013); + r8168_mac_ocp_write(tp, 0xF86E, 0xC519); + r8168_mac_ocp_write(tp, 0xF870, 0x74A0); + r8168_mac_ocp_write(tp, 0xF872, 0x48C4); + r8168_mac_ocp_write(tp, 0xF874, 0x8CA0); + r8168_mac_ocp_write(tp, 0xF876, 0xC516); + r8168_mac_ocp_write(tp, 0xF878, 0x74A4); + r8168_mac_ocp_write(tp, 0xF87A, 0x48C8); + r8168_mac_ocp_write(tp, 0xF87C, 0x48CA); + r8168_mac_ocp_write(tp, 0xF87E, 0x9CA4); + r8168_mac_ocp_write(tp, 0xF880, 0xC512); + r8168_mac_ocp_write(tp, 0xF882, 0x1B00); + r8168_mac_ocp_write(tp, 0xF884, 0x9BA0); + r8168_mac_ocp_write(tp, 0xF886, 0x1B1C); + r8168_mac_ocp_write(tp, 0xF888, 0x483F); + r8168_mac_ocp_write(tp, 0xF88A, 0x9BA2); + r8168_mac_ocp_write(tp, 0xF88C, 0x1B04); + r8168_mac_ocp_write(tp, 0xF88E, 0xC508); + r8168_mac_ocp_write(tp, 0xF890, 0x9BA0); + r8168_mac_ocp_write(tp, 0xF892, 0xC505); + r8168_mac_ocp_write(tp, 0xF894, 0xBD00); + r8168_mac_ocp_write(tp, 0xF896, 0xC502); + r8168_mac_ocp_write(tp, 0xF898, 0xBD00); + r8168_mac_ocp_write(tp, 0xF89A, 0x0300); + r8168_mac_ocp_write(tp, 0xF89C, 0x051E); + r8168_mac_ocp_write(tp, 0xF89E, 0xE434); + r8168_mac_ocp_write(tp, 0xF8A0, 0xE018); + r8168_mac_ocp_write(tp, 0xF8A2, 0xE092); + r8168_mac_ocp_write(tp, 0xF8A4, 0xDE20); + r8168_mac_ocp_write(tp, 0xF8A6, 0xD3C0); + r8168_mac_ocp_write(tp, 0xF8A8, 0xC50F); + r8168_mac_ocp_write(tp, 0xF8AA, 0x76A4); + r8168_mac_ocp_write(tp, 0xF8AC, 0x49E3); + r8168_mac_ocp_write(tp, 0xF8AE, 0xF007); + r8168_mac_ocp_write(tp, 0xF8B0, 0x49C0); + r8168_mac_ocp_write(tp, 0xF8B2, 0xF103); + r8168_mac_ocp_write(tp, 0xF8B4, 0xC607); + r8168_mac_ocp_write(tp, 0xF8B6, 0xBE00); + r8168_mac_ocp_write(tp, 0xF8B8, 0xC606); + r8168_mac_ocp_write(tp, 0xF8BA, 0xBE00); + r8168_mac_ocp_write(tp, 0xF8BC, 0xC602); + r8168_mac_ocp_write(tp, 0xF8BE, 0xBE00); + r8168_mac_ocp_write(tp, 0xF8C0, 0x0C4C); + r8168_mac_ocp_write(tp, 0xF8C2, 0x0C28); + r8168_mac_ocp_write(tp, 0xF8C4, 0x0C2C); + r8168_mac_ocp_write(tp, 0xF8C6, 0xDC00); + r8168_mac_ocp_write(tp, 0xF8C8, 0xC707); + r8168_mac_ocp_write(tp, 0xF8CA, 0x1D00); + r8168_mac_ocp_write(tp, 0xF8CC, 0x8DE2); + r8168_mac_ocp_write(tp, 0xF8CE, 0x48C1); + r8168_mac_ocp_write(tp, 0xF8D0, 0xC502); + r8168_mac_ocp_write(tp, 0xF8D2, 0xBD00); + r8168_mac_ocp_write(tp, 0xF8D4, 0x00AA); + r8168_mac_ocp_write(tp, 0xF8D6, 0xE0C0); + r8168_mac_ocp_write(tp, 0xF8D8, 0xC502); + r8168_mac_ocp_write(tp, 0xF8DA, 0xBD00); + r8168_mac_ocp_write(tp, 0xF8DC, 0x0132); + + r8168_mac_ocp_write(tp, 0xFC26, 0x8000); + + r8168_mac_ocp_write(tp, 0xFC2A, 0x0743); + r8168_mac_ocp_write(tp, 0xFC2C, 0x0801); + r8168_mac_ocp_write(tp, 0xFC2E, 0x0BE9); + r8168_mac_ocp_write(tp, 0xFC30, 0x02FD); + r8168_mac_ocp_write(tp, 0xFC32, 0x0C25); + r8168_mac_ocp_write(tp, 0xFC34, 0x00A9); + r8168_mac_ocp_write(tp, 0xFC36, 0x012D); + rtl_hw_aspm_clkreq_enable(tp, true); } @@ -5407,7 +5559,7 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp) tp->cp_cmd |= PktCntrDisable | INTT_1; RTL_W16(tp, CPlusCmd, tp->cp_cmd); - RTL_W16(tp, IntrMitigate, 0x5151); + RTL_W16(tp, IntrMitigate, 0x5100); /* Work around for RxFIFO overflow. */ if (tp->mac_version == RTL_GIGA_MAC_VER_11) { @@ -7091,13 +7243,18 @@ static int rtl_alloc_irq(struct rtl8169_private *tp) { unsigned int flags; - if (tp->mac_version <= RTL_GIGA_MAC_VER_06) { + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: RTL_W8(tp, Cfg9346, Cfg9346_Unlock); RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable); RTL_W8(tp, Cfg9346, Cfg9346_Lock); + /* fall through */ + case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_24: flags = PCI_IRQ_LEGACY; - } else { + break; + default: flags = PCI_IRQ_ALL_TYPES; + break; } return pci_alloc_irq_vectors(tp->pci_dev, 1, 1, flags); @@ -7314,6 +7471,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return rc; } + /* Disable ASPM completely as that cause random device stop working + * problems as well as full system hangs for some PCIe devices users. + */ + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); + /* enable device (incl. PCI PM wakeup and hotplug setup) */ rc = pcim_enable_device(pdev); if (rc < 0) { diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index 9b6bf557a2f5ffde5fe405f17e3307014f0c6a05..e04af9546e52632f9e26320f8ad28169b769951a 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -1029,7 +1029,6 @@ struct ravb_private { phy_interface_t phy_interface; int msg_enable; int speed; - int duplex; int emac_irq; enum ravb_chip_id chip_id; int rx_irqs[NUM_RX_QUEUE]; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index d6f753925352d41758945aca8d5ef6340cceee9a..bf5e95ecec3ed84a71030dd5fb56fc6c8a829c80 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Renesas Ethernet AVB device driver * - * Copyright (C) 2014-2015 Renesas Electronics Corporation + * Copyright (C) 2014-2019 Renesas Electronics Corporation * Copyright (C) 2015 Renesas Solutions Corp. * Copyright (C) 2015-2016 Cogent Embedded, Inc. * @@ -82,13 +82,6 @@ static int ravb_config(struct net_device *ndev) return error; } -static void ravb_set_duplex(struct net_device *ndev) -{ - struct ravb_private *priv = netdev_priv(ndev); - - ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex ? ECMR_DM : 0); -} - static void ravb_set_rate(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); @@ -344,7 +337,7 @@ static int ravb_ring_init(struct net_device *ndev, int q) int i; priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) + - ETH_HLEN + VLAN_HLEN; + ETH_HLEN + VLAN_HLEN + sizeof(__sum16); /* Allocate RX and TX skb rings */ priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], @@ -398,13 +391,11 @@ static int ravb_ring_init(struct net_device *ndev, int q) /* E-MAC init function */ static void ravb_emac_init(struct net_device *ndev) { - struct ravb_private *priv = netdev_priv(ndev); - /* Receive frame limit set register */ ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR); /* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */ - ravb_write(ndev, ECMR_ZPF | (priv->duplex ? ECMR_DM : 0) | + ravb_write(ndev, ECMR_ZPF | ECMR_DM | (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) | ECMR_TE | ECMR_RE, ECMR); @@ -459,7 +450,7 @@ static int ravb_dmac_init(struct net_device *ndev) RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR); /* Set FIFO size */ - ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00222200, TGC); + ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00112200, TGC); /* Timestamp enable */ ravb_write(ndev, TCCR_TFEN, TCCR); @@ -514,7 +505,10 @@ static void ravb_get_tx_tstamp(struct net_device *ndev) kfree(ts_skb); if (tag == tfa_tag) { skb_tstamp_tx(skb, &shhwtstamps); + dev_consume_skb_any(skb); break; + } else { + dev_kfree_skb_any(skb); } } ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR); @@ -525,13 +519,15 @@ static void ravb_rx_csum(struct sk_buff *skb) { u8 *hw_csum; - /* The hardware checksum is 2 bytes appended to packet data */ - if (unlikely(skb->len < 2)) + /* The hardware checksum is contained in sizeof(__sum16) (2) bytes + * appended to packet data + */ + if (unlikely(skb->len < sizeof(__sum16))) return; - hw_csum = skb_tail_pointer(skb) - 2; + hw_csum = skb_tail_pointer(skb) - sizeof(__sum16); skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum)); skb->ip_summed = CHECKSUM_COMPLETE; - skb_trim(skb, skb->len - 2); + skb_trim(skb, skb->len - sizeof(__sum16)); } /* Packet receive function for Ethernet AVB */ @@ -987,12 +983,6 @@ static void ravb_adjust_link(struct net_device *ndev) ravb_rcv_snd_disable(ndev); if (phydev->link) { - if (phydev->duplex != priv->duplex) { - new_state = true; - priv->duplex = phydev->duplex; - ravb_set_duplex(ndev); - } - if (phydev->speed != priv->speed) { new_state = true; priv->speed = phydev->speed; @@ -1007,7 +997,6 @@ static void ravb_adjust_link(struct net_device *ndev) new_state = true; priv->link = 0; priv->speed = 0; - priv->duplex = -1; } /* Enable TX and RX right over here, if E-MAC change is ignored */ @@ -1037,7 +1026,6 @@ static int ravb_phy_init(struct net_device *ndev) priv->link = 0; priv->speed = 0; - priv->duplex = -1; /* Try connecting to PHY */ pn = of_parse_phandle(np, "phy-handle", 0); @@ -1458,6 +1446,12 @@ static void ravb_tx_timeout_work(struct work_struct *work) work); struct net_device *ndev = priv->ndev; + if (!rtnl_trylock()) { + usleep_range(1000, 2000); + schedule_work(&priv->work); + return; + } + netif_tx_stop_all_queues(ndev); /* Stop PTP Clock driver */ @@ -1479,6 +1473,8 @@ static void ravb_tx_timeout_work(struct work_struct *work) ravb_ptp_init(ndev, priv->pdev); netif_tx_start_all_queues(ndev); + + rtnl_unlock(); } /* Packet transmit function for Ethernet AVB */ @@ -1554,7 +1550,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) DMA_TO_DEVICE); goto unmap; } - ts_skb->skb = skb; + ts_skb->skb = skb_get(skb); ts_skb->tag = priv->ts_skb_tag++; priv->ts_skb_tag &= 0x3ff; list_add_tail(&ts_skb->list, &priv->ts_skb_list); @@ -1683,6 +1679,7 @@ static int ravb_close(struct net_device *ndev) /* Clear the timestamp list */ list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) { list_del(&ts_skb->list); + kfree_skb(ts_skb->skb); kfree(ts_skb); } @@ -1694,6 +1691,8 @@ static int ravb_close(struct net_device *ndev) of_phy_deregister_fixed_link(np); } + cancel_work_sync(&priv->work); + if (priv->chip_id != RCAR_GEN2) { free_irq(priv->tx_irqs[RAVB_NC], ndev); free_irq(priv->rx_irqs[RAVB_NC], ndev); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index f27a0dc8c56331db3f7063c251333b04716f0365..5e3e6e262ba3749a2771946c5e15fc681b757c5e 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1588,6 +1588,10 @@ static void sh_eth_dev_exit(struct net_device *ndev) sh_eth_get_stats(ndev); mdp->cd->soft_reset(ndev); + /* Set the RMII mode again if required */ + if (mdp->cd->rmiimode) + sh_eth_write(ndev, 0x1, RMIIMODE); + /* Set MAC address again */ update_mac_address(ndev); } diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c index 70cce63a6081698169f5737d7eda9c4d3fc069fe..ad557f457b2ce21bed258d73c37625fbc9939e45 100644 --- a/drivers/net/ethernet/seeq/sgiseeq.c +++ b/drivers/net/ethernet/seeq/sgiseeq.c @@ -735,6 +735,7 @@ static int sgiseeq_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, dev); + SET_NETDEV_DEV(dev, &pdev->dev); sp = netdev_priv(dev); /* Make private data page aligned */ @@ -792,15 +793,16 @@ static int sgiseeq_probe(struct platform_device *pdev) printk(KERN_ERR "Sgiseeq: Cannot register net device, " "aborting.\n"); err = -ENODEV; - goto err_out_free_page; + goto err_out_free_attrs; } printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr); return 0; -err_out_free_page: - free_page((unsigned long) sp->srings); +err_out_free_attrs: + dma_free_attrs(&pdev->dev, sizeof(*sp->srings), sp->srings, + sp->srings_dma, DMA_ATTR_NON_CONSISTENT); err_out_free_dev: free_netdev(dev); diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 7eeac3d6cfe898a9a4ef6df9378d8c6d29383ce1..1f971d31ec302c95b8a01a2b1b889383c4cf03d7 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -6042,22 +6042,25 @@ static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = { { NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" }, { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" }, }; +#define EF10_NVRAM_PARTITION_COUNT ARRAY_SIZE(efx_ef10_nvram_types) static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, struct efx_mcdi_mtd_partition *part, - unsigned int type) + unsigned int type, + unsigned long *found) { MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN); MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX); const struct efx_ef10_nvram_type_info *info; size_t size, erase_size, outlen; + int type_idx = 0; bool protected; int rc; - for (info = efx_ef10_nvram_types; ; info++) { - if (info == - efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types)) + for (type_idx = 0; ; type_idx++) { + if (type_idx == EF10_NVRAM_PARTITION_COUNT) return -ENODEV; + info = efx_ef10_nvram_types + type_idx; if ((type & ~info->type_mask) == info->type) break; } @@ -6070,6 +6073,13 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, if (protected) return -ENODEV; /* hide it */ + /* If we've already exposed a partition of this type, hide this + * duplicate. All operations on MTDs are keyed by the type anyway, + * so we can't act on the duplicate. + */ + if (__test_and_set_bit(type_idx, found)) + return -EEXIST; + part->nvram_type = type; MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type); @@ -6098,6 +6108,7 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, static int efx_ef10_mtd_probe(struct efx_nic *efx) { MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX); + DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT) = { 0 }; struct efx_mcdi_mtd_partition *parts; size_t outlen, n_parts_total, i, n_parts; unsigned int type; @@ -6126,11 +6137,13 @@ static int efx_ef10_mtd_probe(struct efx_nic *efx) for (i = 0; i < n_parts_total; i++) { type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID, i); - rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type); - if (rc == 0) - n_parts++; - else if (rc != -ENODEV) + rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type, + found); + if (rc == -EEXIST || rc == -ENODEV) + continue; + if (rc) goto fail; + n_parts++; } rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index f21661532ed395565552ef4e95694a453d7358b8..cc8fbf398c0d7ad4bd5f91eec028f10f6f148612 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -1534,7 +1534,8 @@ void efx_ptp_remove(struct efx_nic *efx) (void)efx_ptp_disable(efx); cancel_work_sync(&efx->ptp_data->work); - cancel_work_sync(&efx->ptp_data->pps_work); + if (efx->ptp_data->pps_workwq) + cancel_work_sync(&efx->ptp_data->pps_work); skb_queue_purge(&efx->ptp_data->rxq); skb_queue_purge(&efx->ptp_data->txq); diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index 4bb89f74742c908386410c23640ddbe6462c9cfc..d5bcbc40a55fc1239b20b1c88b044ffdcedcf927 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -1057,7 +1057,7 @@ sis900_open(struct net_device *net_dev) sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED); /* Enable all known interrupts by setting the interrupt mask. */ - sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE); + sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE | TxDESC); sw32(cr, RxENA | sr32(cr)); sw32(ier, IE); @@ -1578,7 +1578,7 @@ static void sis900_tx_timeout(struct net_device *net_dev) sw32(txdp, sis_priv->tx_ring_dma); /* Enable all known interrupts by setting the interrupt mask. */ - sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE); + sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE | TxDESC); } /** @@ -1618,7 +1618,7 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev) spin_unlock_irqrestore(&sis_priv->lock, flags); return NETDEV_TX_OK; } - sis_priv->tx_ring[entry].cmdsts = (OWN | skb->len); + sis_priv->tx_ring[entry].cmdsts = (OWN | INTR | skb->len); sw32(cr, TxENA | sr32(cr)); sis_priv->cur_tx ++; @@ -1674,7 +1674,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance) do { status = sr32(isr); - if ((status & (HIBERR|TxURN|TxERR|TxIDLE|RxORN|RxERR|RxOK)) == 0) + if ((status & (HIBERR|TxURN|TxERR|TxIDLE|TxDESC|RxORN|RxERR|RxOK)) == 0) /* nothing intresting happened */ break; handled = 1; @@ -1684,7 +1684,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance) /* Rx interrupt */ sis900_rx(net_dev); - if (status & (TxURN | TxERR | TxIDLE)) + if (status & (TxURN | TxERR | TxIDLE | TxDESC)) /* Tx interrupt */ sis900_finish_xmit(net_dev); @@ -1896,8 +1896,8 @@ static void sis900_finish_xmit (struct net_device *net_dev) if (tx_status & OWN) { /* The packet is not transmitted yet (owned by hardware) ! - * Note: the interrupt is generated only when Tx Machine - * is idle, so this is an almost impossible case */ + * Note: this is an almost impossible condition + * in case of TxDESC ('descriptor interrupt') */ break; } @@ -2473,7 +2473,7 @@ static int sis900_resume(struct pci_dev *pci_dev) sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED); /* Enable all known interrupts by setting the interrupt mask. */ - sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE); + sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE | TxDESC); sw32(cr, RxENA | sr32(cr)); sw32(ier, IE); diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c index b1b53f6c452f5267f8af304bf42bf68b0238703a..8355dfbb8ec3c71d01d6da5d6baad00829edf13a 100644 --- a/drivers/net/ethernet/smsc/smc911x.c +++ b/drivers/net/ethernet/smsc/smc911x.c @@ -513,7 +513,8 @@ static void smc911x_hardware_send_pkt(struct net_device *dev) * now, or set the card to generates an interrupt when ready * for the packet. */ -static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct smc911x_local *lp = netdev_priv(dev); unsigned int free; diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index b944828f9ea3ddfba9849ba0b1c6e66243cd8b78..8d6cff8bd16229655e84b65c9651838efa26b78b 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -638,7 +638,8 @@ done: if (!THROTTLE_TX_PKTS) * now, or set the card to generates an interrupt when ready * for the packet. */ -static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct smc_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index f0afb88d7bc2b02de3dc1054ec2ec5803f452a35..ce4bfecc26c7aadfada60b1c2dd7b628c89a9075 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -1786,7 +1786,8 @@ static int smsc911x_stop(struct net_device *dev) } /* Entry point for transmitting a packet */ -static int smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct smsc911x_data *pdata = netdev_priv(dev); unsigned int freespace; diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index 4289ccb26e4ec3045aee380144196150ca198f08..28d582c18afb9f5b9846428747fc5568c672e5e0 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -274,6 +274,7 @@ struct netsec_priv { struct clk *clk; u32 msg_enable; u32 freq; + u32 phy_addr; bool rx_cksum_offload_flag; }; @@ -940,6 +941,9 @@ static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id) dring->head = 0; dring->tail = 0; dring->pkt_cnt = 0; + + if (id == NETSEC_RING_TX) + netdev_reset_queue(priv->ndev); } static void netsec_free_dring(struct netsec_priv *priv, int id) @@ -1343,11 +1347,11 @@ static int netsec_netdev_stop(struct net_device *ndev) netsec_uninit_pkt_dring(priv, NETSEC_RING_TX); netsec_uninit_pkt_dring(priv, NETSEC_RING_RX); - ret = netsec_reset_hardware(priv, false); - phy_stop(ndev->phydev); phy_disconnect(ndev->phydev); + ret = netsec_reset_hardware(priv, false); + pm_runtime_put_sync(priv->dev); return ret; @@ -1357,6 +1361,7 @@ static int netsec_netdev_init(struct net_device *ndev) { struct netsec_priv *priv = netdev_priv(ndev); int ret; + u16 data; ret = netsec_alloc_dring(priv, NETSEC_RING_TX); if (ret) @@ -1366,6 +1371,11 @@ static int netsec_netdev_init(struct net_device *ndev) if (ret) goto err1; + /* set phy power down */ + data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR) | + BMCR_PDOWN; + netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data); + ret = netsec_reset_hardware(priv, true); if (ret) goto err2; @@ -1415,7 +1425,7 @@ static const struct net_device_ops netsec_netdev_ops = { }; static int netsec_of_probe(struct platform_device *pdev, - struct netsec_priv *priv) + struct netsec_priv *priv, u32 *phy_addr) { priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); if (!priv->phy_np) { @@ -1423,6 +1433,8 @@ static int netsec_of_probe(struct platform_device *pdev, return -EINVAL; } + *phy_addr = of_mdio_parse_addr(&pdev->dev, priv->phy_np); + priv->clk = devm_clk_get(&pdev->dev, NULL); /* get by 'phy_ref_clk' */ if (IS_ERR(priv->clk)) { dev_err(&pdev->dev, "phy_ref_clk not found\n"); @@ -1623,12 +1635,14 @@ static int netsec_probe(struct platform_device *pdev) } if (dev_of_node(&pdev->dev)) - ret = netsec_of_probe(pdev, priv); + ret = netsec_of_probe(pdev, priv, &phy_addr); else ret = netsec_acpi_probe(pdev, priv, &phy_addr); if (ret) goto free_ndev; + priv->phy_addr = phy_addr; + if (!priv->freq) { dev_err(&pdev->dev, "missing PHY reference clock frequency\n"); ret = -ENODEV; diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index f7ecceeb1e280fcb71b48dfc282ba42f3800fb06..09d25b87cf7c0ee09be03e785c76bc4a09bb7170 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -194,6 +194,7 @@ /* Parameter for ethernet frame */ #define AVE_MAX_ETHFRAME 1518 +#define AVE_FRAME_HEADROOM 2 /* Parameter for interrupt */ #define AVE_INTM_COUNT 20 @@ -585,12 +586,13 @@ static int ave_rxdesc_prepare(struct net_device *ndev, int entry) skb = priv->rx.desc[entry].skbs; if (!skb) { - skb = netdev_alloc_skb_ip_align(ndev, - AVE_MAX_ETHFRAME); + skb = netdev_alloc_skb(ndev, AVE_MAX_ETHFRAME); if (!skb) { netdev_err(ndev, "can't allocate skb for Rx\n"); return -ENOMEM; } + skb->data += AVE_FRAME_HEADROOM; + skb->tail += AVE_FRAME_HEADROOM; } /* set disable to cmdsts */ @@ -603,12 +605,12 @@ static int ave_rxdesc_prepare(struct net_device *ndev, int entry) * - Rx buffer begins with 2 byte headroom, and data will be put from * (buffer + 2). * To satisfy this, specify the address to put back the buffer - * pointer advanced by NET_IP_ALIGN by netdev_alloc_skb_ip_align(), - * and expand the map size by NET_IP_ALIGN. + * pointer advanced by AVE_FRAME_HEADROOM, and expand the map size + * by AVE_FRAME_HEADROOM. */ ret = ave_dma_map(ndev, &priv->rx.desc[entry], - skb->data - NET_IP_ALIGN, - AVE_MAX_ETHFRAME + NET_IP_ALIGN, + skb->data - AVE_FRAME_HEADROOM, + AVE_MAX_ETHFRAME + AVE_FRAME_HEADROOM, DMA_FROM_DEVICE, &paddr); if (ret) { netdev_err(ndev, "can't map skb for Rx\n"); @@ -904,11 +906,11 @@ static void ave_rxfifo_reset(struct net_device *ndev) /* assert reset */ writel(AVE_GRR_RXFFR, priv->base + AVE_GRR); - usleep_range(40, 50); + udelay(50); /* negate reset */ writel(0, priv->base + AVE_GRR); - usleep_range(10, 20); + udelay(20); /* negate interrupt status */ writel(AVE_GI_RXOVF, priv->base + AVE_GISR); diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 324049eebb9b140a1ca9a2c65de1eaf82048d263..97be6a0850758efbd3dc6e3b4f5c18e9a2e00b42 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -108,6 +108,15 @@ config DWMAC_ROCKCHIP This selects the Rockchip RK3288 SoC glue layer support for the stmmac device driver. +config DWMAC_PHYTIUM + tristate "Phytium dwmac support" + depends on (OF || ACPI) && (ARCH_PHYTIUM || COMPILE_TEST) + help + Support for GMAC controller on Phytium SoCs. + + This selects the Phytium GMAC glue layer support for the + stmmac device driver. + config DWMAC_SOCFPGA tristate "SOCFPGA dwmac support" default (ARCH_SOCFPGA || ARCH_STRATIX10) diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index 99967a80a8c810e23e68377a7412713b10ae0859..9bfb3a1036bb38017262e858c29ae1c7312fe244 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -23,6 +23,7 @@ obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o obj-$(CONFIG_DWMAC_SUN8I) += dwmac-sun8i.o obj-$(CONFIG_DWMAC_DWC_QOS_ETH) += dwmac-dwc-qos-eth.o obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o +obj-$(CONFIG_DWMAC_PHYTIUM) += dwmac-phytium.o stmmac-platform-objs:= stmmac_platform.o dwmac-altr-socfpga-objs := altr_tse_pcs.o dwmac-socfpga.o diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index b1b305f8f4143626fc664445182c5e2afc38b87c..b069b3a2453be2330b8790445e88ae32a6e26399 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -261,7 +261,7 @@ struct stmmac_safety_stats { #define STMMAC_COAL_TX_TIMER 1000 #define STMMAC_MAX_COAL_TX_TICK 100000 #define STMMAC_TX_MAX_FRAMES 256 -#define STMMAC_TX_FRAMES 25 +#define STMMAC_TX_FRAMES 1 /* Packets types */ enum packets_types { @@ -365,7 +365,8 @@ struct dma_features { /* GMAC TX FIFO is 8K, Rx FIFO is 16K */ #define BUF_SIZE_16KiB 16384 -#define BUF_SIZE_8KiB 8192 +/* RX Buffer size must be < 8191 and multiple of 4/8/16 bytes */ +#define BUF_SIZE_8KiB 8188 #define BUF_SIZE_4KiB 4096 #define BUF_SIZE_2KiB 2048 diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h index ca9d7e48034ceb33f5f4eb4db5b99691ed1a278f..3dfb07a78952533420da7cb1cb6b87b171d91661 100644 --- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h +++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h @@ -29,11 +29,13 @@ /* Specific functions used for Ring mode */ /* Enhanced descriptors */ -static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end) +static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end, + int bfsize) { - p->des1 |= cpu_to_le32(((BUF_SIZE_8KiB - 1) - << ERDES1_BUFFER2_SIZE_SHIFT) - & ERDES1_BUFFER2_SIZE_MASK); + if (bfsize == BUF_SIZE_16KiB) + p->des1 |= cpu_to_le32((BUF_SIZE_8KiB + << ERDES1_BUFFER2_SIZE_SHIFT) + & ERDES1_BUFFER2_SIZE_MASK); if (end) p->des1 |= cpu_to_le32(ERDES1_END_RING); @@ -59,11 +61,15 @@ static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len) } /* Normal descriptors */ -static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end) +static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end, int bfsize) { - p->des1 |= cpu_to_le32(((BUF_SIZE_2KiB - 1) - << RDES1_BUFFER2_SIZE_SHIFT) - & RDES1_BUFFER2_SIZE_MASK); + if (bfsize >= BUF_SIZE_2KiB) { + int bfsize2; + + bfsize2 = min(bfsize - BUF_SIZE_2KiB + 1, BUF_SIZE_2KiB - 1); + p->des1 |= cpu_to_le32((bfsize2 << RDES1_BUFFER2_SIZE_SHIFT) + & RDES1_BUFFER2_SIZE_MASK); + } if (end) p->des1 |= cpu_to_le32(RDES1_END_RING); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index c5979569fd60f28fbf67b72f0c03cd613355d9cc..94b46258e8ff82b5efe5afa7cbe1c672a762a6a1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -118,6 +118,14 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac) struct device *dev = dwmac->dev; const char *parent_name, *mux_parent_names[MUX_CLK_NUM_PARENTS]; struct meson8b_dwmac_clk_configs *clk_configs; + static const struct clk_div_table div_table[] = { + { .div = 2, .val = 2, }, + { .div = 3, .val = 3, }, + { .div = 4, .val = 4, }, + { .div = 5, .val = 5, }, + { .div = 6, .val = 6, }, + { .div = 7, .val = 7, }, + }; clk_configs = devm_kzalloc(dev, sizeof(*clk_configs), GFP_KERNEL); if (!clk_configs) @@ -152,9 +160,9 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac) clk_configs->m250_div.reg = dwmac->regs + PRG_ETH0; clk_configs->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT; clk_configs->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH; - clk_configs->m250_div.flags = CLK_DIVIDER_ONE_BASED | - CLK_DIVIDER_ALLOW_ZERO | - CLK_DIVIDER_ROUND_CLOSEST; + clk_configs->m250_div.table = div_table; + clk_configs->m250_div.flags = CLK_DIVIDER_ALLOW_ZERO | + CLK_DIVIDER_ROUND_CLOSEST; clk = meson8b_dwmac_register_clk(dwmac, "m250_div", &parent_name, 1, &clk_divider_ops, &clk_configs->m250_div.hw); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-phytium.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-phytium.c new file mode 100644 index 0000000000000000000000000000000000000000..c95a884c1c122d5aea5bdb94a3f521ef2f98e675 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-phytium.c @@ -0,0 +1,214 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium SWMAC specific glue layer + * + * Copyright (C) 2022, Phytium Technology Co., Ltd. + * + * Chen Baozi + */ + +#include +#include +#include +#include +#include +#include + +#include "stmmac.h" +#include "stmmac_platform.h" + +static int phytium_dwmac_acpi_phy(struct plat_stmmacenet_data *plat, + struct fwnode_handle *np, + struct device *dev) +{ + plat->mdio_bus_data = devm_kzalloc(dev, + sizeof(struct stmmac_mdio_bus_data), + GFP_KERNEL); + + if (!plat->mdio_bus_data) + return -ENOMEM; + + return 0; +} + +static int phytium_dwmac_probe(struct platform_device *pdev) +{ + struct fwnode_handle *fwnode = dev_fwnode(&pdev->dev); + struct plat_stmmacenet_data *plat; + struct stmmac_resources stmmac_res; + struct device_node *np = pdev->dev.of_node; + struct resource *res; + u64 clk_freq; + char clk_name[20]; + int ret; + + plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); + if (!plat) + return -ENOMEM; + + plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg), + GFP_KERNEL); + if (!plat->dma_cfg) + return -ENOMEM; + + plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi), GFP_KERNEL); + if (!plat->axi) + return -ENOMEM; + + plat->interface = device_get_phy_mode(&pdev->dev); + if (plat->interface < 0) + return plat->interface; + + /* Configure PHY if using device-tree */ + if (pdev->dev.of_node) + plat->phy_node = of_parse_phandle(np, "phy-handle", 0); + + if (pdev->dev.of_node) { + plat->bus_id = of_alias_get_id(np, "ethernet"); + if (plat->bus_id < 0) + plat->bus_id = 0; + } else if (fwnode_property_read_u32(fwnode, "bus_id", &plat->bus_id)) { + plat->bus_id = 2; + } + + plat->phy_addr = -1; + plat->clk_csr = -1; + plat->has_gmac = 1; + plat->enh_desc = 1; + plat->bugged_jumbo = 1; + plat->pmt = 1; + plat->force_sf_dma_mode = 1; + + if (fwnode_property_read_u32(fwnode, "max-speed", &plat->max_speed)) + plat->max_speed = -1; + + if (fwnode_property_read_u32(fwnode, "max-frame-size", &plat->maxmtu)) + plat->maxmtu = JUMBO_LEN; + + if (fwnode_property_read_u32(fwnode, "snps,multicast-filter-bins", + &plat->multicast_filter_bins)) + plat->multicast_filter_bins = HASH_TABLE_SIZE; + + if (fwnode_property_read_u32(fwnode, "snps,perfect-filter-entries", + &plat->unicast_filter_entries)) + plat->unicast_filter_entries = 1; + + if (fwnode_property_read_u32(fwnode, "tx-fifo-depth", + &plat->tx_fifo_size)) + plat->tx_fifo_size = 0x1000; + + if (fwnode_property_read_u32(fwnode, "rx-fifo-depth", + &plat->rx_fifo_size)) + plat->rx_fifo_size = 0x1000; + + if (phytium_dwmac_acpi_phy(plat, fwnode, &pdev->dev)) + return -ENODEV; + + plat->rx_queues_to_use = 1; + plat->tx_queues_to_use = 1; + plat->rx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB; + plat->tx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB; + + if (fwnode_property_read_u64(fwnode, "clock-frequency", &clk_freq)) + clk_freq = 125000000; + + /* Set system clock */ + snprintf(clk_name, sizeof(clk_name), "%s-%d", "stmmaceth", + plat->bus_id); + + plat->stmmac_clk = clk_register_fixed_rate(&pdev->dev, clk_name, + NULL, 0, clk_freq); + if (IS_ERR(plat->stmmac_clk)) { + dev_warn(&pdev->dev, "Fail to register stmmac-clk\n"); + plat->stmmac_clk = NULL; + } + + ret = clk_prepare_enable(plat->stmmac_clk); + if (ret) { + clk_unregister_fixed_rate(plat->stmmac_clk); + return ret; + } + + plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk); + plat->clk_ptp_ref = NULL; + + if (fwnode_property_read_u32(fwnode, "snps,pbl", &plat->dma_cfg->pbl)) + plat->dma_cfg->pbl = 16; + + fwnode_property_read_u32(fwnode, "snps,txpbl", &plat->dma_cfg->txpbl); + fwnode_property_read_u32(fwnode, "snps,rxpbl", &plat->dma_cfg->rxpbl); + + plat->dma_cfg->pblx8 = !fwnode_property_read_bool(fwnode, + "snps,no-pbl-x8"); + plat->dma_cfg->aal = fwnode_property_read_bool(fwnode, "snps,aal"); + plat->dma_cfg->fixed_burst = fwnode_property_read_bool(fwnode, + "snps,fixed-burst"); + plat->dma_cfg->mixed_burst = fwnode_property_read_bool(fwnode, + "snps,mixed-burst"); + + plat->axi->axi_lpi_en = false; + plat->axi->axi_xit_frm = false; + plat->axi->axi_wr_osr_lmt = 7; + plat->axi->axi_rd_osr_lmt = 7; + plat->axi->axi_blen[0] = 16; + + memset(&stmmac_res, 0, sizeof(stmmac_res)); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + stmmac_res.addr = devm_ioremap_resource(&pdev->dev, res); + if (stmmac_res.addr < 0) { + dev_err(&pdev->dev, "resource map failed.\n"); + return stmmac_res.addr; + } + stmmac_res.irq = platform_get_irq(pdev, 0); + if (stmmac_res.irq < 0) { + dev_err(&pdev->dev, "IRQ not found.\n"); + return -ENXIO; + } + stmmac_res.wol_irq = stmmac_res.irq; + stmmac_res.lpi_irq = -1; + + return stmmac_dvr_probe(&pdev->dev, plat, &stmmac_res); +} + +int phytium_dwmac_remove(struct platform_device *pdev) +{ + int ret; + struct net_device *ndev = platform_get_drvdata(pdev); + struct stmmac_priv *priv = netdev_priv(ndev); + struct plat_stmmacenet_data *plat = priv->plat; + + ret = stmmac_pltfr_remove(pdev); + clk_unregister_fixed_rate(plat->stmmac_clk); + return ret; +} + +#ifdef CONFIG_OF +static const struct of_device_id phytium_dwmac_of_match[] = { + { .compatible = "phytium,gmac" }, + { } +}; +MODULE_DEVICE_TABLE(of, phytium_dwmac_of_match); +#endif + +#ifdef CONFIG_ACPI +static const struct acpi_device_id phytium_dwmac_acpi_ids[] = { + { .id = "PHYT0004" }, + {} +}; +MODULE_DEVICE_TABLE(acpi, phytium_dwmac_acpi_ids); +#endif + +static struct platform_driver phytium_dwmac_driver = { + .probe = phytium_dwmac_probe, + .remove = phytium_dwmac_remove, + .driver = { + .name = "phytium-dwmac", + .pm = &stmmac_pltfr_pm_ops, + .of_match_table = of_match_ptr(phytium_dwmac_of_match), + .acpi_match_table = ACPI_PTR(phytium_dwmac_acpi_ids), + }, +}; +module_platform_driver(phytium_dwmac_driver); + +MODULE_AUTHOR("Chen Baozi "); +MODULE_DESCRIPTION("Phytium DWMAC specific glue layer"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 7b923362ee5509d6fdedc9c15d09cd760182fab2..f45df6df69328bfd359c9fa053dae5869f190fd3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -1203,10 +1203,8 @@ static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable) int ret; struct device *dev = &bsp_priv->pdev->dev; - if (!ldo) { - dev_err(dev, "no regulator found\n"); - return -1; - } + if (!ldo) + return 0; if (enable) { ret = regulator_enable(ldo); @@ -1342,8 +1340,10 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv) } ret = phy_power_on(bsp_priv, true); - if (ret) + if (ret) { + gmac_clk_enable(bsp_priv, false); return ret; + } pm_runtime_enable(dev); pm_runtime_get_sync(dev); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index f9a61f90cfbc6acb269d4e8320bb9a078ae04239..ef13a462c36df8a982d7848531f9016b3515dcc4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -714,8 +714,9 @@ static int get_ephy_nodes(struct stmmac_priv *priv) return -ENODEV; } - mdio_internal = of_find_compatible_node(mdio_mux, NULL, + mdio_internal = of_get_compatible_child(mdio_mux, "allwinner,sun8i-h3-mdio-internal"); + of_node_put(mdio_mux); if (!mdio_internal) { dev_err(priv->device, "Cannot get internal_mdio node\n"); return -ENODEV; @@ -729,13 +730,20 @@ static int get_ephy_nodes(struct stmmac_priv *priv) gmac->rst_ephy = of_reset_control_get_exclusive(iphynode, NULL); if (IS_ERR(gmac->rst_ephy)) { ret = PTR_ERR(gmac->rst_ephy); - if (ret == -EPROBE_DEFER) + if (ret == -EPROBE_DEFER) { + of_node_put(iphynode); + of_node_put(mdio_internal); return ret; + } continue; } dev_info(priv->device, "Found internal PHY node\n"); + of_node_put(iphynode); + of_node_put(mdio_internal); return 0; } + + of_node_put(mdio_internal); return -ENODEV; } @@ -885,6 +893,11 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv) * address. No need to mask it again. */ reg |= 1 << H3_EPHY_ADDR_SHIFT; + } else { + /* For SoCs without internal PHY the PHY selection bit should be + * set to 0 (external PHY). + */ + reg &= ~H3_EPHY_SELECT; } if (!of_property_read_u32(node, "allwinner,tx-delay-ps", &val)) { @@ -933,6 +946,9 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv) /* default */ break; case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: reg |= SYSCON_EPIT | SYSCON_ETCS_INT_GMII; break; case PHY_INTERFACE_MODE_RMII: @@ -1007,6 +1023,8 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv) mac->mac = &sun8i_dwmac_ops; mac->dma = &sun8i_dwmac_dma_ops; + priv->dev->priv_flags |= IFF_UNICAST_FLT; + /* The loopback bit seems to be re-set when link change * Simply mask it each time * Speed 10/100/1000 are set in BIT(2)/BIT(3) @@ -1184,7 +1202,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) dwmac_mux: sun8i_dwmac_unset_syscon(gmac); dwmac_exit: - sun8i_dwmac_exit(pdev, plat_dat->bsp_priv); + stmmac_pltfr_remove(pdev); return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c index d07520fb969e687aa6ee5c384c7e23ecf3a8e38a..ee5c0c6263516f57988bbd6ef5656171c82d03fa 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c @@ -53,7 +53,7 @@ static int sun7i_gmac_init(struct platform_device *pdev, void *priv) * rate, which then uses the auto-reparenting feature of the * clock driver, and enabling/disabling the clock. */ - if (gmac->interface == PHY_INTERFACE_MODE_RGMII) { + if (phy_interface_mode_is_rgmii(gmac->interface)) { clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE); clk_prepare_enable(gmac->tx_clk); gmac->clk_enabled = 1; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index 0877bde6e860b24a4f6003e817ee82a0726f0f11..21d131347e2effb5f94d777a264a72ed79ef813d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -216,6 +216,12 @@ static void dwmac1000_set_filter(struct mac_device_info *hw, GMAC_ADDR_LOW(reg)); reg++; } + + while (reg <= perfect_addr_number) { + writel(0, ioaddr + GMAC_ADDR_HIGH(reg)); + writel(0, ioaddr + GMAC_ADDR_LOW(reg)); + reg++; + } } #ifdef FRAME_FILTER_DEBUG diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index 7e5d5db0d5165b5ff9b65d9e82f229c4ea5c5888..48cf5e2b24417f282fd6be1c7991c9a3a5dccacb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -88,6 +88,8 @@ static void dwmac4_rx_queue_priority(struct mac_device_info *hw, u32 value; base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3; + if (queue >= 4) + queue -= 4; value = readl(ioaddr + base_register); @@ -105,6 +107,8 @@ static void dwmac4_tx_queue_priority(struct mac_device_info *hw, u32 value; base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1; + if (queue >= 4) + queue -= 4; value = readl(ioaddr + base_register); @@ -444,14 +448,20 @@ static void dwmac4_set_filter(struct mac_device_info *hw, * are required */ value |= GMAC_PACKET_FILTER_PR; - } else if (!netdev_uc_empty(dev)) { - int reg = 1; + } else { struct netdev_hw_addr *ha; + int reg = 1; netdev_for_each_uc_addr(ha, dev) { dwmac4_set_umac_addr(hw, ha->addr, reg); reg++; } + + while (reg <= GMAC_MAX_PERFECT_ADDRESSES) { + writel(0, ioaddr + GMAC_ADDR_HIGH(reg)); + writel(0, ioaddr + GMAC_ADDR_LOW(reg)); + reg++; + } } writel(value, ioaddr + GMAC_PACKET_FILTER); @@ -469,8 +479,9 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, if (fc & FLOW_RX) { pr_debug("\tReceive Flow-Control ON\n"); flow |= GMAC_RX_FLOW_CTRL_RFE; - writel(flow, ioaddr + GMAC_RX_FLOW_CTRL); } + writel(flow, ioaddr + GMAC_RX_FLOW_CTRL); + if (fc & FLOW_TX) { pr_debug("\tTransmit Flow-Control ON\n"); @@ -478,7 +489,7 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, pr_debug("\tduplex mode: PAUSE %d\n", pause_time); for (queue = 0; queue < tx_cnt; queue++) { - flow |= GMAC_TX_FLOW_CTRL_TFE; + flow = GMAC_TX_FLOW_CTRL_TFE; if (duplex) flow |= @@ -486,6 +497,9 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue)); } + } else { + for (queue = 0; queue < tx_cnt; queue++) + writel(0, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue)); } } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index 20299f6f65fce13d7deccacf5d27ba50cc858b46..313a58b68feec31d9faaf2a2e3daeb3cf55f7f09 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -241,15 +241,18 @@ static inline void dwmac4_get_timestamp(void *desc, u32 ats, u64 *ts) static int dwmac4_rx_check_timestamp(void *desc) { struct dma_desc *p = (struct dma_desc *)desc; + unsigned int rdes0 = le32_to_cpu(p->des0); + unsigned int rdes1 = le32_to_cpu(p->des1); + unsigned int rdes3 = le32_to_cpu(p->des3); u32 own, ctxt; int ret = 1; - own = p->des3 & RDES3_OWN; - ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR) + own = rdes3 & RDES3_OWN; + ctxt = ((rdes3 & RDES3_CONTEXT_DESCRIPTOR) >> RDES3_CONTEXT_DESCRIPTOR_SHIFT); if (likely(!own && ctxt)) { - if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff)) + if ((rdes0 == 0xffffffff) && (rdes1 == 0xffffffff)) /* Corrupted value */ ret = -EINVAL; else @@ -293,7 +296,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, void *next_desc, } static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, - int mode, int end) + int mode, int end, int bfsize) { dwmac4_set_rx_owner(p, disable_rx_ic); } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h index 0a80fa25afe3eaad70e39d0b1de5288456d9ba06..20974529475102d93e2391253cc76ef9e6774eed 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h @@ -169,6 +169,8 @@ #define XGMAC_DMA_CH_RX_CONTROL(x) (0x00003108 + (0x80 * (x))) #define XGMAC_RxPBL GENMASK(21, 16) #define XGMAC_RxPBL_SHIFT 16 +#define XGMAC_RBSZ GENMASK(14, 1) +#define XGMAC_RBSZ_SHIFT 1 #define XGMAC_RXST BIT(0) #define XGMAC_DMA_CH_TxDESC_LADDR(x) (0x00003114 + (0x80 * (x))) #define XGMAC_DMA_CH_RxDESC_LADDR(x) (0x0000311c + (0x80 * (x))) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c index d182f82f7b58608f0aa6e335bf6071562dc8cb6f..870302a7177e2334d19056dd4abc2df066851d16 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c @@ -106,6 +106,8 @@ static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio, u32 value, reg; reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3; + if (queue >= 4) + queue -= 4; value = readl(ioaddr + reg); value &= ~XGMAC_PSRQ(queue); @@ -169,6 +171,8 @@ static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue, u32 value, reg; reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1; + if (queue >= 4) + queue -= 4; value = readl(ioaddr + reg); value &= ~XGMAC_QxMDMACH(queue); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c index 1d858fdec99718ec63a5fa1064fe9dd99670d2e0..98fa471da7c0f2764729f98c7044f52e068c1db9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c @@ -123,7 +123,7 @@ static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc, } static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic, - int mode, int end) + int mode, int end, int bfsize) { dwxgmac2_set_rx_owner(p, disable_rx_ic); } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c index 20909036e002800fb39f1a5e66a90b4f7c60dcfa..27942c53b5673d1713f405bd0f4a56649e3bb915 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c @@ -260,6 +260,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x, u32 chan) { u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan)); + u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan)); int ret = 0; /* ABNORMAL interrupts */ @@ -279,8 +280,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr, x->normal_irq_n++; if (likely(intr_status & XGMAC_RI)) { - u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan)); - if (likely(value & XGMAC_RIE)) { + if (likely(intr_en & XGMAC_RIE)) { x->rx_normal_irq_n++; ret |= handle_rx; } @@ -292,7 +292,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr, } /* Clear interrupts */ - writel(~0x0, ioaddr + XGMAC_DMA_CH_STATUS(chan)); + writel(intr_en & intr_status, ioaddr + XGMAC_DMA_CH_STATUS(chan)); return ret; } @@ -379,7 +379,8 @@ static void dwxgmac2_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan) u32 value; value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); - value |= bfsize << 1; + value &= ~XGMAC_RBSZ; + value |= bfsize << XGMAC_RBSZ_SHIFT; writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); } diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 77914c89d7497de6f9a251196fe079f49364ee13..5202d6ad79194b0ed9134a7905d0aaa4309c6822 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -201,6 +201,11 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, if (unlikely(rdes0 & RDES0_OWN)) return dma_own; + if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) { + stats->rx_length_errors++; + return discard_frame; + } + if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) { if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) { x->rx_desc++; @@ -231,9 +236,10 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, * It doesn't match with the information reported into the databook. * At any rate, we need to understand if the CSUM hw computation is ok * and report this info to the upper layers. */ - ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR), - !!(rdes0 & RDES0_FRAME_TYPE), - !!(rdes0 & ERDES0_RX_MAC_ADDR)); + if (likely(ret == good_frame)) + ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR), + !!(rdes0 & RDES0_FRAME_TYPE), + !!(rdes0 & ERDES0_RX_MAC_ADDR)); if (unlikely(rdes0 & RDES0_DRIBBLING)) x->dribbling_bit++; @@ -259,15 +265,19 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, } static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, - int mode, int end) + int mode, int end, int bfsize) { + int bfsize1; + p->des0 |= cpu_to_le32(RDES0_OWN); - p->des1 |= cpu_to_le32((BUF_SIZE_8KiB - 1) & ERDES1_BUFFER1_SIZE_MASK); + + bfsize1 = min(bfsize, BUF_SIZE_8KiB); + p->des1 |= cpu_to_le32(bfsize1 & ERDES1_BUFFER1_SIZE_MASK); if (mode == STMMAC_CHAIN_MODE) ehn_desc_rx_set_on_chain(p); else - ehn_desc_rx_set_on_ring(p, end); + ehn_desc_rx_set_on_ring(p, end, bfsize); if (disable_rx_ic) p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC); diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 92b8944f26e3c8566d9e68de4820d06231ca10bd..5bb00234d961c6a5a2385c90bc3fdf54ff96e4ca 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -33,7 +33,7 @@ struct dma_extended_desc; struct stmmac_desc_ops { /* DMA RX descriptor ring initialization */ void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode, - int end); + int end, int bfsize); /* DMA TX descriptor ring initialization */ void (*init_tx_desc)(struct dma_desc *p, int mode, int end); /* Invoked by the xmit function to prepare the tx descriptor */ diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index de65bb29feba967cc7a0d6ff3184998f359dde34..6d690678c20e11bf8594729524fafa238bb1c4bb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -91,8 +91,6 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x, return dma_own; if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) { - pr_warn("%s: Oversized frame spanned multiple buffers\n", - __func__); stats->rx_length_errors++; return discard_frame; } @@ -135,15 +133,19 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x, } static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, - int end) + int end, int bfsize) { + int bfsize1; + p->des0 |= cpu_to_le32(RDES0_OWN); - p->des1 |= cpu_to_le32((BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK); + + bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1); + p->des1 |= cpu_to_le32(bfsize1 & RDES1_BUFFER1_SIZE_MASK); if (mode == STMMAC_CHAIN_MODE) ndesc_rx_set_on_chain(p, end); else - ndesc_rx_set_on_ring(p, end); + ndesc_rx_set_on_ring(p, end, bfsize); if (disable_rx_ic) p->des1 |= cpu_to_le32(RDES1_DISABLE_IC); diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index a7ffc73fffe82b057d2d5d036dc3c52a6e7e53e7..c0c75c111abba43da8f7365120ffef78d04f24c3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c @@ -59,7 +59,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum) desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum, - STMMAC_RING_MODE, 0, false, skb->len); + STMMAC_RING_MODE, 1, false, skb->len); tx_q->tx_skbuff[entry] = NULL; entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); @@ -91,7 +91,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum) tx_q->tx_skbuff_dma[entry].is_jumbo = true; desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum, - STMMAC_RING_MODE, 0, true, skb->len); + STMMAC_RING_MODE, 1, true, skb->len); } tx_q->cur_tx = entry; @@ -111,10 +111,11 @@ static unsigned int is_jumbo_frm(int len, int enh_desc) static void refill_desc3(void *priv_ptr, struct dma_desc *p) { - struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; + struct stmmac_rx_queue *rx_q = priv_ptr; + struct stmmac_priv *priv = rx_q->priv_data; /* Fill DES3 in case of RING mode */ - if (priv->dma_buf_sz >= BUF_SIZE_8KiB) + if (priv->dma_buf_sz == BUF_SIZE_16KiB) p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB); } @@ -140,7 +141,7 @@ static void clean_desc3(void *priv_ptr, struct dma_desc *p) static int set_16kib_bfsize(int mtu) { int ret = 0; - if (unlikely(mtu >= BUF_SIZE_8KiB)) + if (unlikely(mtu > BUF_SIZE_8KiB)) ret = BUF_SIZE_16KiB; return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 5710864fa80903102a82115aaa2bec9ac6d31aba..4d5fb4b51cc4fda15fce1dd54d40498662e8c743 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -692,33 +692,38 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev, struct ethtool_eee *edata) { struct stmmac_priv *priv = netdev_priv(dev); + int ret; - priv->eee_enabled = edata->eee_enabled; - - if (!priv->eee_enabled) + if (!edata->eee_enabled) { stmmac_disable_eee_mode(priv); - else { + } else { /* We are asking for enabling the EEE but it is safe * to verify all by invoking the eee_init function. * In case of failure it will return an error. */ - priv->eee_enabled = stmmac_eee_init(priv); - if (!priv->eee_enabled) + edata->eee_enabled = stmmac_eee_init(priv); + if (!edata->eee_enabled) return -EOPNOTSUPP; - - /* Do not change tx_lpi_timer in case of failure */ - priv->tx_lpi_timer = edata->tx_lpi_timer; } - return phy_ethtool_set_eee(dev->phydev, edata); + ret = phy_ethtool_set_eee(dev->phydev, edata); + if (ret) + return ret; + + priv->eee_enabled = edata->eee_enabled; + priv->tx_lpi_timer = edata->tx_lpi_timer; + return 0; } static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv) { unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); - if (!clk) - return 0; + if (!clk) { + clk = priv->plat->clk_ref_rate; + if (!clk) + return 0; + } return (usec * (clk / 1000000)) / 256; } @@ -727,8 +732,11 @@ static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv) { unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); - if (!clk) - return 0; + if (!clk) { + clk = priv->plat->clk_ref_rate; + if (!clk) + return 0; + } return (riwt * 256) / (clk / 1000000); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index 8d9cc2157afd103c4d0282c8b74973d6c8f6256e..7423262ce5907f9d05ac6e44025c144c86e89618 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -122,7 +122,7 @@ static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec, * programmed with (2^32 – ) */ if (gmac4) - sec = (100000000ULL - sec); + sec = -sec; value = readl(ioaddr + PTP_TCR); if (value & PTP_TCR_TSCTRLSSR) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 75896d6ba6e2b8e5990df01f7c3ff9d7abe96137..7ee0e46539c01e61f78a81c3e90ec479d1b59039 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -54,7 +54,7 @@ #include "dwxgmac2.h" #include "hwif.h" -#define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES) +#define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16) #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) /* Module parameters */ @@ -474,7 +474,7 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, struct sk_buff *skb) { struct skb_shared_hwtstamps shhwtstamp; - u64 ns; + u64 ns = 0; if (!priv->hwts_tx_en) return; @@ -513,7 +513,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, { struct skb_shared_hwtstamps *shhwtstamp = NULL; struct dma_desc *desc = p; - u64 ns; + u64 ns = 0; if (!priv->hwts_rx_en) return; @@ -558,8 +558,8 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) u32 snap_type_sel = 0; u32 ts_master_en = 0; u32 ts_event_en = 0; + u32 sec_inc = 0; u32 value = 0; - u32 sec_inc; bool xmac; xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; @@ -1111,11 +1111,13 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue) if (priv->extend_desc) stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, priv->use_riwt, priv->mode, - (i == DMA_RX_SIZE - 1)); + (i == DMA_RX_SIZE - 1), + priv->dma_buf_sz); else stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], priv->use_riwt, priv->mode, - (i == DMA_RX_SIZE - 1)); + (i == DMA_RX_SIZE - 1), + priv->dma_buf_sz); } /** @@ -2193,6 +2195,10 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) if (priv->plat->axi) stmmac_axi(priv, priv->ioaddr, priv->plat->axi); + /* DMA CSR Channel configuration */ + for (chan = 0; chan < dma_csr_ch; chan++) + stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); + /* DMA RX Channel Configuration */ for (chan = 0; chan < rx_channels_count; chan++) { rx_q = &priv->rx_queue[chan]; @@ -2218,10 +2224,6 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) tx_q->tx_tail_addr, chan); } - /* DMA CSR Channel configuration */ - for (chan = 0; chan < dma_csr_ch; chan++) - stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); - return ret; } @@ -2547,12 +2549,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) netdev_warn(priv->dev, "PTP init failed\n"); } -#ifdef CONFIG_DEBUG_FS - ret = stmmac_init_fs(dev); - if (ret < 0) - netdev_warn(priv->dev, "%s: failed debugFS registration\n", - __func__); -#endif priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; if (priv->use_riwt) { @@ -2601,8 +2597,6 @@ static int stmmac_open(struct net_device *dev) u32 chan; int ret; - stmmac_check_ether_addr(priv); - if (priv->hw->pcs != STMMAC_PCS_RGMII && priv->hw->pcs != STMMAC_PCS_TBI && priv->hw->pcs != STMMAC_PCS_RTBI) { @@ -2753,10 +2747,6 @@ static int stmmac_release(struct net_device *dev) netif_carrier_off(dev); -#ifdef CONFIG_DEBUG_FS - stmmac_exit_fs(dev); -#endif - stmmac_release_ptp(priv); return 0; @@ -2948,12 +2938,15 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) /* Manage tx mitigation */ tx_q->tx_count_frames += nfrags + 1; - if (priv->tx_coal_frames <= tx_q->tx_count_frames) { + if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) && + !(priv->synopsys_id >= DWMAC_CORE_4_00 && + (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + priv->hwts_tx_en)) { + stmmac_tx_timer_arm(priv, queue); + } else { + tx_q->tx_count_frames = 0; stmmac_set_tx_ic(priv, desc); priv->xstats.tx_set_ic_bit++; - tx_q->tx_count_frames = 0; - } else { - stmmac_tx_timer_arm(priv, queue); } skb_tx_timestamp(skb); @@ -3004,6 +2997,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); + stmmac_tx_timer_arm(priv, queue); return NETDEV_TX_OK; @@ -3038,6 +3032,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) tx_q = &priv->tx_queue[queue]; + if (priv->tx_path_in_lpi_mode) + stmmac_disable_eee_mode(priv); + /* Manage oversized TCP frames for GMAC4 device */ if (skb_is_gso(skb) && priv->tso) { if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) @@ -3056,9 +3053,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; } - if (priv->tx_path_in_lpi_mode) - stmmac_disable_eee_mode(priv); - entry = tx_q->cur_tx; first_entry = entry; WARN_ON(tx_q->tx_skbuff[first_entry]); @@ -3158,12 +3152,15 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) * element in case of no SG. */ tx_q->tx_count_frames += nfrags + 1; - if (priv->tx_coal_frames <= tx_q->tx_count_frames) { + if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) && + !(priv->synopsys_id >= DWMAC_CORE_4_00 && + (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + priv->hwts_tx_en)) { + stmmac_tx_timer_arm(priv, queue); + } else { + tx_q->tx_count_frames = 0; stmmac_set_tx_ic(priv, desc); priv->xstats.tx_set_ic_bit++; - tx_q->tx_count_frames = 0; - } else { - stmmac_tx_timer_arm(priv, queue); } skb_tx_timestamp(skb); @@ -3198,20 +3195,23 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) stmmac_prepare_tx_desc(priv, first, 1, nopaged_len, csum_insertion, priv->mode, 1, last_segment, skb->len); - - /* The own bit must be the latest setting done when prepare the - * descriptor and then barrier is needed to make sure that - * all is coherent before granting the DMA engine. - */ - wmb(); + } else { + stmmac_set_tx_owner(priv, first); } + /* The own bit must be the latest setting done when prepare the + * descriptor and then barrier is needed to make sure that + * all is coherent before granting the DMA engine. + */ + wmb(); + netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); stmmac_enable_dma_transmission(priv, priv->ioaddr); tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); + stmmac_tx_timer_arm(priv, queue); return NETDEV_TX_OK; @@ -3318,6 +3318,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE); } rx_q->dirty_rx = entry; + stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); } /** @@ -3332,9 +3333,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) { struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; struct stmmac_channel *ch = &priv->channel[queue]; - unsigned int entry = rx_q->cur_rx; + unsigned int next_entry = rx_q->cur_rx; int coe = priv->hw->rx_csum; - unsigned int next_entry; unsigned int count = 0; bool xmac; @@ -3352,10 +3352,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true); } while (count < limit) { - int status; + int entry, status; struct dma_desc *p; struct dma_desc *np; + entry = next_entry; + if (priv->extend_desc) p = (struct dma_desc *)(rx_q->dma_erx + entry); else @@ -3411,11 +3413,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) * ignored */ if (frame_len > priv->dma_buf_sz) { - netdev_err(priv->dev, - "len %d larger than size (%d)\n", - frame_len, priv->dma_buf_sz); + if (net_ratelimit()) + netdev_err(priv->dev, + "len %d larger than size (%d)\n", + frame_len, priv->dma_buf_sz); priv->dev->stats.rx_length_errors++; - break; + continue; } /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 @@ -3450,7 +3453,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) dev_warn(priv->device, "packet dropped\n"); priv->dev->stats.rx_dropped++; - break; + continue; } dma_sync_single_for_cpu(priv->device, @@ -3470,11 +3473,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) } else { skb = rx_q->rx_skbuff[entry]; if (unlikely(!skb)) { - netdev_err(priv->dev, - "%s: Inconsistent Rx chain\n", - priv->dev->name); + if (net_ratelimit()) + netdev_err(priv->dev, + "%s: Inconsistent Rx chain\n", + priv->dev->name); priv->dev->stats.rx_dropped++; - break; + continue; } prefetch(skb->data - NET_IP_ALIGN); rx_q->rx_skbuff[entry] = NULL; @@ -3509,7 +3513,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) priv->dev->stats.rx_packets++; priv->dev->stats.rx_bytes += frame_len; } - entry = next_entry; } stmmac_rx_refill(priv, queue); @@ -3532,27 +3535,28 @@ static int stmmac_napi_poll(struct napi_struct *napi, int budget) struct stmmac_channel *ch = container_of(napi, struct stmmac_channel, napi); struct stmmac_priv *priv = ch->priv_data; - int work_done = 0, work_rem = budget; + int work_done, rx_done = 0, tx_done = 0; u32 chan = ch->index; priv->xstats.napi_poll++; - if (ch->has_tx) { - int done = stmmac_tx_clean(priv, work_rem, chan); + if (ch->has_tx) + tx_done = stmmac_tx_clean(priv, budget, chan); + if (ch->has_rx) + rx_done = stmmac_rx(priv, budget, chan); - work_done += done; - work_rem -= done; - } + work_done = max(rx_done, tx_done); + work_done = min(work_done, budget); - if (ch->has_rx) { - int done = stmmac_rx(priv, work_rem, chan); + if (work_done < budget && napi_complete_done(napi, work_done)) { + int stat; - work_done += done; - work_rem -= done; - } - - if (work_done < budget && napi_complete_done(napi, work_done)) stmmac_enable_dma_irq(priv, priv->ioaddr, chan); + stat = stmmac_dma_interrupt_status(priv, priv->ioaddr, + &priv->xstats, chan); + if (stat && napi_reschedule(napi)) + stmmac_disable_dma_irq(priv, priv->ioaddr, chan); + } return work_done; } @@ -3602,12 +3606,24 @@ static void stmmac_set_rx_mode(struct net_device *dev) static int stmmac_change_mtu(struct net_device *dev, int new_mtu) { struct stmmac_priv *priv = netdev_priv(dev); + int txfifosz = priv->plat->tx_fifo_size; + + if (txfifosz == 0) + txfifosz = priv->dma_cap.tx_fifo_size; + + txfifosz /= priv->plat->tx_queues_to_use; if (netif_running(dev)) { netdev_err(priv->dev, "must be stopped to change its MTU\n"); return -EBUSY; } + new_mtu = STMMAC_ALIGN(new_mtu); + + /* If condition true, FIFO is too small or MTU too large */ + if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB)) + return -EINVAL; + dev->mtu = new_mtu; netdev_update_features(dev); @@ -3844,6 +3860,23 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, } } +static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev, + select_queue_fallback_t fallback) +{ + if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { + /* + * There is no way to determine the number of TSO + * capable Queues. Let's use always the Queue 0 + * because if TSO is supported then at least this + * one will be capable. + */ + return 0; + } + + return fallback(dev, skb, NULL) % dev->real_num_tx_queues; +} + static int stmmac_set_mac_address(struct net_device *ndev, void *addr) { struct stmmac_priv *priv = netdev_priv(ndev); @@ -3896,6 +3929,9 @@ static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v) u32 tx_count = priv->plat->tx_queues_to_use; u32 queue; + if ((dev->flags & IFF_UP) == 0) + return 0; + for (queue = 0; queue < rx_count; queue++) { struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; @@ -4083,6 +4119,7 @@ static const struct net_device_ops stmmac_netdev_ops = { .ndo_tx_timeout = stmmac_tx_timeout, .ndo_do_ioctl = stmmac_ioctl, .ndo_setup_tc = stmmac_setup_tc, + .ndo_select_queue = stmmac_select_queue, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = stmmac_poll_controller, #endif @@ -4198,6 +4235,18 @@ static int stmmac_hw_init(struct stmmac_priv *priv) return ret; } + /* Rx Watchdog is available in the COREs newer than the 3.40. + * In some case, for example on bugged HW this feature + * has to be disable and this can be done by passing the + * riwt_off field from the platform. + */ + if (((priv->synopsys_id >= DWMAC_CORE_3_50) || + (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { + priv->use_riwt = 1; + dev_info(priv->device, + "Enable RX Mitigation via HW Watchdog Timer\n"); + } + return 0; } @@ -4254,6 +4303,7 @@ int stmmac_dvr_probe(struct device *device, priv->wq = create_singlethread_workqueue("stmmac_wq"); if (!priv->wq) { dev_err(priv->device, "failed to create workqueue\n"); + ret = -ENOMEM; goto error_wq; } @@ -4280,6 +4330,8 @@ int stmmac_dvr_probe(struct device *device, if (ret) goto error_hw_init; + stmmac_check_ether_addr(priv); + /* Configure real RX and TX queues */ netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use); netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use); @@ -4329,18 +4381,6 @@ int stmmac_dvr_probe(struct device *device, if (flow_ctrl) priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ - /* Rx Watchdog is available in the COREs newer than the 3.40. - * In some case, for example on bugged HW this feature - * has to be disable and this can be done by passing the - * riwt_off field from the platform. - */ - if (((priv->synopsys_id >= DWMAC_CORE_3_50) || - (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { - priv->use_riwt = 1; - dev_info(priv->device, - "Enable RX Mitigation via HW Watchdog Timer\n"); - } - /* Setup channels NAPI */ maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); @@ -4394,6 +4434,13 @@ int stmmac_dvr_probe(struct device *device, goto error_netdev_register; } +#ifdef CONFIG_DEBUG_FS + ret = stmmac_init_fs(ndev); + if (ret < 0) + netdev_warn(priv->dev, "%s: failed debugFS registration\n", + __func__); +#endif + return ret; error_netdev_register: @@ -4429,6 +4476,9 @@ int stmmac_dvr_remove(struct device *dev) netdev_info(priv->dev, "%s: removing driver", __func__); +#ifdef CONFIG_DEBUG_FS + stmmac_exit_fs(ndev); +#endif stmmac_stop_all_dma(priv); stmmac_mac_set(priv, priv->ioaddr, false); @@ -4486,8 +4536,10 @@ int stmmac_suspend(struct device *dev) stmmac_mac_set(priv, priv->ioaddr, false); pinctrl_pm_select_sleep_state(priv->device); /* Disable clock in case of PWM is off */ - clk_disable(priv->plat->pclk); - clk_disable(priv->plat->stmmac_clk); + if (priv->plat->clk_ptp_ref) + clk_disable_unprepare(priv->plat->clk_ptp_ref); + clk_disable_unprepare(priv->plat->pclk); + clk_disable_unprepare(priv->plat->stmmac_clk); } mutex_unlock(&priv->lock); @@ -4552,8 +4604,10 @@ int stmmac_resume(struct device *dev) } else { pinctrl_pm_select_default_state(priv->device); /* enable the clk previously disabled */ - clk_enable(priv->plat->stmmac_clk); - clk_enable(priv->plat->pclk); + clk_prepare_enable(priv->plat->stmmac_clk); + clk_prepare_enable(priv->plat->pclk); + if (priv->plat->clk_ptp_ref) + clk_prepare_enable(priv->plat->clk_ptp_ref); /* reset the phy so that it's ready */ if (priv->mii) stmmac_mdio_reset(priv->mii); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index b72ef171477e0ec6dfba85c09815226a584ec30b..093a223fe40882cfdc8dae34f1a603034991a112 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -243,7 +243,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, */ int stmmac_mdio_reset(struct mii_bus *bus) { -#if defined(CONFIG_STMMAC_PLATFORM) +#if IS_ENABLED(CONFIG_STMMAC_PLATFORM) struct net_device *ndev = bus->priv; struct stmmac_priv *priv = netdev_priv(ndev); unsigned int mii_address = priv->hw->mii.addr; @@ -267,7 +267,8 @@ int stmmac_mdio_reset(struct mii_bus *bus) of_property_read_u32_array(np, "snps,reset-delays-us", data->delays, 3); - if (gpio_request(data->reset_gpio, "mdio-reset")) + if (devm_gpio_request(priv->device, data->reset_gpio, + "mdio-reset")) return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index c54a50dbd5ac264faa439550f949a981cb0440ae..cc1e887e47b50f31bba7a53e8f146d9ac7fb4fa7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -159,6 +159,12 @@ static const struct dmi_system_id quark_pci_dmi[] = { }, .driver_data = (void *)&galileo_stmmac_dmi_data, }, + /* + * There are 2 types of SIMATIC IOT2000: IOT20202 and IOT2040. + * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which + * has only one pci network device while other asset tags are + * for IOT2040 which has two. + */ { .matches = { DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"), @@ -170,8 +176,6 @@ static const struct dmi_system_id quark_pci_dmi[] = { { .matches = { DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"), - DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG, - "6ES7647-0AA00-1YA2"), }, .driver_data = (void *)&iot2040_stmmac_dmi_data, }, @@ -299,7 +303,17 @@ static int stmmac_pci_probe(struct pci_dev *pdev, */ static void stmmac_pci_remove(struct pci_dev *pdev) { + int i; + stmmac_dvr_remove(&pdev->dev); + + for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { + if (pci_resource_len(pdev, i) == 0) + continue; + pcim_iounmap_regions(pdev, BIT(i)); + break; + } + pci_disable_device(pdev); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index 2293e21f789f6c5e98e542d7dd2384d0a8f7a7f2..cc60b3fb0892792d61cad4e46620a20913a30b54 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -105,7 +105,7 @@ static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts) struct stmmac_priv *priv = container_of(ptp, struct stmmac_priv, ptp_clock_ops); unsigned long flags; - u64 ns; + u64 ns = 0; spin_lock_irqsave(&priv->ptp_lock, flags); stmmac_get_systime(priv, priv->ptpaddr, &ns); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 531294f4978bc42bbb0e3cb0177b1312a33bef61..37c0bc699cd9ca80dbfdb77e7893abaad150fe62 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -37,7 +37,7 @@ static struct stmmac_tc_entry *tc_find_entry(struct stmmac_priv *priv, entry = &priv->tc_entries[i]; if (!entry->in_use && !first && free) first = entry; - if (entry->handle == loc && !free) + if ((entry->handle == loc) && !free && !entry->is_frag) dup = entry; } @@ -301,6 +301,8 @@ static int tc_setup_cbs(struct stmmac_priv *priv, /* Queue 0 is not AVB capable */ if (queue <= 0 || queue >= tx_queues_count) return -EINVAL; + if (!priv->dma_cap.av) + return -EOPNOTSUPP; if (priv->speed != SPEED_100 && priv->speed != SPEED_1000) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 9020b084b953880466cdf9c0354ee8208b987873..7ec4eb74fe2160b53af40e08e4bc95feda125100 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -1,22 +1,9 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: GPL-2.0+ /* cassini.c: Sun Microsystems Cassini(+) ethernet driver. * * Copyright (C) 2004 Sun Microsystems Inc. * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com) * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see . - * * This driver uses the sungem driver (c) David Miller * (davem@redhat.com) as its basis. * diff --git a/drivers/net/ethernet/sun/cassini.h b/drivers/net/ethernet/sun/cassini.h index 13f3860496a861d3d2f9716fabd93add43b74f6c..ae5f05f03f8804e14616cc1c040b462d43b7351f 100644 --- a/drivers/net/ethernet/sun/cassini.h +++ b/drivers/net/ethernet/sun/cassini.h @@ -1,23 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0+ */ /* $Id: cassini.h,v 1.16 2004/08/17 21:15:16 zaumen Exp $ * cassini.h: Definitions for Sun Microsystems Cassini(+) ethernet driver. * * Copyright (C) 2004 Sun Microsystems Inc. * Copyright (c) 2003 Adrian Sun (asun@darksunrising.com) * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see . - * * vendor id: 0x108E (Sun Microsystems, Inc.) * device id: 0xabba (Cassini) * revision ids: 0x01 = Cassini diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c index d42f47f6c632fe8618348d40fc609bfed5deef4a..644e42c181ee6030dee0222479daf314a290a1f5 100644 --- a/drivers/net/ethernet/sun/ldmvsw.c +++ b/drivers/net/ethernet/sun/ldmvsw.c @@ -113,7 +113,7 @@ static u16 vsw_select_queue(struct net_device *dev, struct sk_buff *skb, } /* Wrappers to common functions */ -static int vsw_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t vsw_start_xmit(struct sk_buff *skb, struct net_device *dev) { return sunvnet_start_xmit_common(skb, dev, vsw_tx_port_find); } diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c index f047b27971564ec06f59810a8092c5d068503edb..720b7ac77f3b3c08e428faaf16ba4cbae1b76275 100644 --- a/drivers/net/ethernet/sun/sunbmac.c +++ b/drivers/net/ethernet/sun/sunbmac.c @@ -950,7 +950,8 @@ static void bigmac_tx_timeout(struct net_device *dev) } /* Put a packet on the wire. */ -static int bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct bigmac *bp = netdev_priv(dev); int len, entry; diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index b9221fc1674dfa0ef17a43f8ff86d700a1ae514f..de8ce1651989f6c0dca06a169a20450a504fd86c 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -959,17 +959,6 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -#ifdef CONFIG_NET_POLL_CONTROLLER -static void gem_poll_controller(struct net_device *dev) -{ - struct gem *gp = netdev_priv(dev); - - disable_irq(gp->pdev->irq); - gem_interrupt(gp->pdev->irq, dev); - enable_irq(gp->pdev->irq); -} -#endif - static void gem_tx_timeout(struct net_device *dev) { struct gem *gp = netdev_priv(dev); @@ -2835,9 +2824,6 @@ static const struct net_device_ops gem_netdev_ops = { .ndo_change_mtu = gem_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = gem_set_mac_address, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = gem_poll_controller, -#endif }; static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c index 7fe0d5e3392218ebea76ff524142a857e6b5e003..1468fa0a54e9b755884f808a3cd772b610a62b84 100644 --- a/drivers/net/ethernet/sun/sunqe.c +++ b/drivers/net/ethernet/sun/sunqe.c @@ -570,7 +570,7 @@ static void qe_tx_timeout(struct net_device *dev) } /* Get a packet queued to go onto the wire. */ -static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t qe_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct sunqe *qep = netdev_priv(dev); struct sunqe_buffers *qbufs = qep->buffers; diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 12539b357a78402dfc80a4a654761051a2fa6409..590172818b922f069a82765ff5042cd852093c27 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -247,7 +247,7 @@ static u16 vnet_select_queue(struct net_device *dev, struct sk_buff *skb, } /* Wrappers to common functions */ -static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) { return sunvnet_start_xmit_common(skb, dev, vnet_tx_port_find); } diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c index d8f4c3f281505810620a3c2f03a22bb657a8cb2f..59fc14b58c8389638f2749a487e85bc78f495e86 100644 --- a/drivers/net/ethernet/sun/sunvnet_common.c +++ b/drivers/net/ethernet/sun/sunvnet_common.c @@ -1216,9 +1216,10 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies) return skb; } -static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb, - struct vnet_port *(*vnet_tx_port) - (struct sk_buff *, struct net_device *)) +static netdev_tx_t +vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb, + struct vnet_port *(*vnet_tx_port) + (struct sk_buff *, struct net_device *)) { struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; @@ -1321,9 +1322,10 @@ static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb, return NETDEV_TX_OK; } -int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev, - struct vnet_port *(*vnet_tx_port) - (struct sk_buff *, struct net_device *)) +netdev_tx_t +sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev, + struct vnet_port *(*vnet_tx_port) + (struct sk_buff *, struct net_device *)) { struct vnet_port *port = NULL; struct vio_dring_state *dr; @@ -1351,27 +1353,12 @@ int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev, if (vio_version_after_eq(&port->vio, 1, 3)) localmtu -= VLAN_HLEN; - if (skb->protocol == htons(ETH_P_IP)) { - struct flowi4 fl4; - struct rtable *rt = NULL; - - memset(&fl4, 0, sizeof(fl4)); - fl4.flowi4_oif = dev->ifindex; - fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos); - fl4.daddr = ip_hdr(skb)->daddr; - fl4.saddr = ip_hdr(skb)->saddr; - - rt = ip_route_output_key(dev_net(dev), &fl4); - if (!IS_ERR(rt)) { - skb_dst_set(skb, &rt->dst); - icmp_send(skb, ICMP_DEST_UNREACH, - ICMP_FRAG_NEEDED, - htonl(localmtu)); - } - } + if (skb->protocol == htons(ETH_P_IP)) + icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, + htonl(localmtu)); #if IS_ENABLED(CONFIG_IPV6) else if (skb->protocol == htons(ETH_P_IPV6)) - icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu); + icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu); #endif goto out_dropped; } diff --git a/drivers/net/ethernet/sun/sunvnet_common.h b/drivers/net/ethernet/sun/sunvnet_common.h index 1ea0b016580a40a904b0da23cc49f7c2a159e75e..2b808d2482d60e740176b0bb9007cc8bef747e82 100644 --- a/drivers/net/ethernet/sun/sunvnet_common.h +++ b/drivers/net/ethernet/sun/sunvnet_common.h @@ -136,9 +136,10 @@ int sunvnet_close_common(struct net_device *dev); void sunvnet_set_rx_mode_common(struct net_device *dev, struct vnet *vp); int sunvnet_set_mac_addr_common(struct net_device *dev, void *p); void sunvnet_tx_timeout_common(struct net_device *dev); -int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev, - struct vnet_port *(*vnet_tx_port) - (struct sk_buff *, struct net_device *)); +netdev_tx_t +sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev, + struct vnet_port *(*vnet_tx_port) + (struct sk_buff *, struct net_device *)); #ifdef CONFIG_NET_POLL_CONTROLLER void sunvnet_poll_controller_common(struct net_device *dev, struct vnet *vp); #endif diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 832bce07c38519fff846f650dc32a0ea2056b735..0b7a3eb06a651410802519ee37e0228c7be95d5b 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -642,6 +642,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) /* Clear all mcast from ALE */ cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1); + __dev_mc_unsync(ndev, NULL); /* Flood All Unicast Packets to Host port */ cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1); @@ -953,8 +954,8 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id) { struct cpsw_common *cpsw = dev_id; - cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX); writel(0, &cpsw->wr_regs->rx_en); + cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX); if (cpsw->quirk_irq) { disable_irq_nosync(cpsw->irqs_table[0]); @@ -2978,7 +2979,7 @@ static void cpsw_get_ringparam(struct net_device *ndev, struct cpsw_common *cpsw = priv->cpsw; /* not supported */ - ering->tx_max_pending = 0; + ering->tx_max_pending = descs_pool_size - CPSW_MAX_QUEUES; ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma); ering->rx_max_pending = descs_pool_size - CPSW_MAX_QUEUES; ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma); diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index 5766225a4ce117957cdd77fe2a9d4ad4abc8f150..c245629a38c7675ae273139abb968beb786fbc90 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -793,6 +793,7 @@ EXPORT_SYMBOL_GPL(cpsw_ale_start); void cpsw_ale_stop(struct cpsw_ale *ale) { del_timer_sync(&ale->timer); + cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1); cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0); } EXPORT_SYMBOL_GPL(cpsw_ale_stop); @@ -877,6 +878,7 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params) ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS; } + cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1); return ale; } EXPORT_SYMBOL_GPL(cpsw_ale_create); diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c index b96b93c686bf15581b89c05d347a2179ae9883cc..d7543811dfae2dfe528ba6f7f25394cde10b88e7 100644 --- a/drivers/net/ethernet/ti/cpts.c +++ b/drivers/net/ethernet/ti/cpts.c @@ -119,9 +119,7 @@ static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event) if (time_after(jiffies, skb_cb->tmo)) { /* timeout any expired skbs over 1s */ - dev_dbg(cpts->dev, - "expiring tx timestamp mtype %u seqid %04x\n", - mtype, seqid); + dev_dbg(cpts->dev, "expiring tx timestamp from txq\n"); __skb_unlink(skb, &cpts->txq); dev_consume_skb_any(skb); } @@ -572,7 +570,9 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs, return ERR_CAST(cpts->refclk); } - clk_prepare(cpts->refclk); + ret = clk_prepare(cpts->refclk); + if (ret) + return ERR_PTR(ret); cpts->cc.read = cpts_systim_read; cpts->cc.mask = CLOCKSOURCE_MASK(32); diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 72b98e27c9920b3c4342e0e8a5f3eef8476439f4..d177dfd1df892240021424a0c826630631537140 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -3655,12 +3655,16 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device, gbe_dev->dma_chan_name, gbe_dev->tx_queue_id); - if (ret) + if (ret) { + of_node_put(interfaces); return ret; + } ret = netcp_txpipe_open(&gbe_dev->tx_pipe); - if (ret) + if (ret) { + of_node_put(interfaces); return ret; + } /* Create network interfaces */ INIT_LIST_HEAD(&gbe_dev->gbe_intf_head); diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index 93d142867c2af1fd54efa6d1253d4b109b0fbad8..c6595b4ff55a5c6596e4fd4538daba3584e7d3f7 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -312,9 +312,8 @@ static void tlan_remove_one(struct pci_dev *pdev) pci_release_regions(pdev); #endif - free_netdev(dev); - cancel_work_sync(&priv->tlan_tqueue); + free_netdev(dev); } static void tlan_start(struct net_device *dev) diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c index 88d74aef218a2b7480a1e84f61ea9f8f63ad3e49..75237c81c63d65dda6951137bcd69ce43b5d65cd 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c @@ -845,9 +845,9 @@ static int gelic_card_kick_txdma(struct gelic_card *card, * @skb: packet to send out * @netdev: interface device structure * - * returns 0 on success, <0 on failure + * returns NETDEV_TX_OK on success, NETDEV_TX_BUSY on failure */ -int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev) +netdev_tx_t gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev) { struct gelic_card *card = netdev_card(netdev); struct gelic_descr *descr; diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h index 003d0452d9cb157b1b7b2418341d2b9099342ceb..fbbf9b54b173bfc3d14c61253e83e51d3f4088db 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h @@ -370,7 +370,7 @@ void gelic_card_up(struct gelic_card *card); void gelic_card_down(struct gelic_card *card); int gelic_net_open(struct net_device *netdev); int gelic_net_stop(struct net_device *netdev); -int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev); +netdev_tx_t gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev); void gelic_net_set_multi(struct net_device *netdev); void gelic_net_tx_timeout(struct net_device *netdev); int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card); diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index d925b8203996691f1c380d70ff28de9f9c0b0a49..23417266b7ecc24aca2a0e562cd17985399c9cc3 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -880,9 +880,9 @@ spider_net_kick_tx_dma(struct spider_net_card *card) * @skb: packet to send out * @netdev: interface device structure * - * returns 0 on success, !0 on failure + * returns NETDEV_TX_OK on success, NETDEV_TX_BUSY on failure */ -static int +static netdev_tx_t spider_net_xmit(struct sk_buff *skb, struct net_device *netdev) { int cnt; diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index cce9c9ed46aa9a8462080c5949bdb6621e247f0d..03afc4d8c3ec1585b4d1e92fee653eff63414a83 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -474,7 +474,8 @@ static void free_rxbuf_skb(struct pci_dev *hwdev, struct sk_buff *skb, dma_addr_ /* Index to functions, as function prototypes. */ static int tc35815_open(struct net_device *dev); -static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev); +static netdev_tx_t tc35815_send_packet(struct sk_buff *skb, + struct net_device *dev); static irqreturn_t tc35815_interrupt(int irq, void *dev_id); static int tc35815_rx(struct net_device *dev, int limit); static int tc35815_poll(struct napi_struct *napi, int budget); @@ -1248,7 +1249,8 @@ tc35815_open(struct net_device *dev) * invariant will hold if you make sure that the netif_*_queue() * calls are done at the proper times. */ -static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +tc35815_send_packet(struct sk_buff *skb, struct net_device *dev) { struct tc35815_local *lp = netdev_priv(dev); struct TxFD *txfd; @@ -1497,7 +1499,7 @@ tc35815_rx(struct net_device *dev, int limit) pci_unmap_single(lp->pci_dev, lp->rx_skbs[cur_bd].skb_dma, RX_BUF_SIZE, PCI_DMA_FROMDEVICE); - if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN) + if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN != 0) memmove(skb->data, skb->data - NET_IP_ALIGN, pkt_len); data = skb_put(skb, pkt_len); diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index edcd1e60b30d17b729329a5d7558db854b233559..f076050c8ad37fe11c4a5bb37a2481c4c7561859 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c @@ -383,9 +383,10 @@ tsi108_stat_carry_one(int carry, int carry_bit, int carry_shift, static void tsi108_stat_carry(struct net_device *dev) { struct tsi108_prv_data *data = netdev_priv(dev); + unsigned long flags; u32 carry1, carry2; - spin_lock_irq(&data->misclock); + spin_lock_irqsave(&data->misclock, flags); carry1 = TSI_READ(TSI108_STAT_CARRY1); carry2 = TSI_READ(TSI108_STAT_CARRY2); @@ -453,7 +454,7 @@ static void tsi108_stat_carry(struct net_device *dev) TSI108_STAT_TXPAUSEDROP_CARRY, &data->tx_pause_drop); - spin_unlock_irq(&data->misclock); + spin_unlock_irqrestore(&data->misclock, flags); } /* Read a stat counter atomically with respect to carries. diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 60abc9250f56a7eac7025447626799e3b046c8f5..88e71aa890920394c4b7c79a6cb02e8b39cf9591 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -674,7 +674,8 @@ static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag) return 0; } -static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t +temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); struct cdmac_bd *cur_p; @@ -688,7 +689,7 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; - if (temac_check_tx_bd_space(lp, num_frag)) { + if (temac_check_tx_bd_space(lp, num_frag + 1)) { if (!netif_queue_stopped(ndev)) netif_stop_queue(ndev); return NETDEV_TX_BUSY; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index f24f48f3380204d102b5c7a06825d57bc7a16b22..28764268a44f87d61bc9d6b1f8976fd6fb3fbecc 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -614,6 +614,10 @@ static void axienet_start_xmit_done(struct net_device *ndev) ndev->stats.tx_packets += packets; ndev->stats.tx_bytes += size; + + /* Matches barrier in axienet_start_xmit */ + smp_mb(); + netif_wake_queue(ndev); } @@ -653,7 +657,8 @@ static inline int axienet_check_tx_bd_space(struct axienet_local *lp, * start the transmission. Additionally if checksum offloading is supported, * it populates AXI Stream Control fields with appropriate values. */ -static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t +axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) { u32 ii; u32 num_frag; @@ -668,9 +673,19 @@ static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; if (axienet_check_tx_bd_space(lp, num_frag)) { - if (!netif_queue_stopped(ndev)) - netif_stop_queue(ndev); - return NETDEV_TX_BUSY; + if (netif_queue_stopped(ndev)) + return NETDEV_TX_BUSY; + + netif_stop_queue(ndev); + + /* Matches barrier in axienet_start_xmit_done */ + smp_mb(); + + /* Space might have just been freed - check again */ + if (axienet_check_tx_bd_space(lp, num_frag)) + return NETDEV_TX_BUSY; + + netif_wake_queue(ndev); } if (skb->ip_summed == CHECKSUM_PARTIAL) { @@ -1574,12 +1589,14 @@ static int axienet_probe(struct platform_device *pdev) ret = of_address_to_resource(np, 0, &dmares); if (ret) { dev_err(&pdev->dev, "unable to get DMA resource\n"); + of_node_put(np); goto free_netdev; } lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares); if (IS_ERR(lp->dma_regs)) { dev_err(&pdev->dev, "could not map DMA regs\n"); ret = PTR_ERR(lp->dma_regs); + of_node_put(np); goto free_netdev; } lp->rx_irq = irq_of_parse_and_map(np, 1); diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 42f1f518dad6939300905f8b6218f4f31fabd212..23a4f9061072b86f91f7fe1343b8671ab01332a0 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -1020,9 +1020,10 @@ static int xemaclite_close(struct net_device *dev) * deferred and the Tx queue is stopped so that the deferred socket buffer can * be transmitted when the Emaclite device is free to transmit data. * - * Return: 0, always. + * Return: NETDEV_TX_OK, always. */ -static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev) +static netdev_tx_t +xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev) { struct net_local *lp = netdev_priv(dev); struct sk_buff *new_skb; @@ -1044,7 +1045,7 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev) /* Take the time stamp now, since we can't do this in an ISR. */ skb_tx_timestamp(new_skb); spin_unlock_irqrestore(&lp->reset_lock, flags); - return 0; + return NETDEV_TX_OK; } spin_unlock_irqrestore(&lp->reset_lock, flags); @@ -1053,7 +1054,7 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev) dev->stats.tx_bytes += len; dev_consume_skb_any(new_skb); - return 0; + return NETDEV_TX_OK; } /** @@ -1172,15 +1173,16 @@ static int xemaclite_of_probe(struct platform_device *ofdev) if (rc) { dev_err(dev, "Cannot register network device, aborting\n"); - goto error; + goto put_node; } dev_info(dev, - "Xilinx EmacLite at 0x%08X mapped to 0x%08X, irq=%d\n", - (unsigned int __force)ndev->mem_start, - (unsigned int __force)lp->base_addr, ndev->irq); + "Xilinx EmacLite at 0x%08X mapped to 0x%p, irq=%d\n", + (unsigned int __force)ndev->mem_start, lp->base_addr, ndev->irq); return 0; +put_node: + of_node_put(lp->phy_node); error: free_netdev(ndev); return rc; diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c index fd5288ff53b5303271f20137dda57f125d879901..e3438cef5f9c66a68434461ae8bfe5edef443300 100644 --- a/drivers/net/ethernet/xircom/xirc2ps_cs.c +++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c @@ -503,6 +503,11 @@ static void xirc2ps_detach(struct pcmcia_device *link) { struct net_device *dev = link->priv; + struct local_info *local = netdev_priv(dev); + + netif_carrier_off(dev); + netif_tx_disable(dev); + cancel_work_sync(&local->tx_timeout_task); dev_dbg(&link->dev, "detach\n"); diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index d3eae123904575aba430883ab7a0ba4d28b8b07c..1979f8f8dac721e549170ae1076519d2a71e29a6 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -181,6 +181,9 @@ static int fjes_acpi_add(struct acpi_device *device) /* create platform_device */ plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource, ARRAY_SIZE(fjes_resource)); + if (IS_ERR(plat_dev)) + return PTR_ERR(plat_dev); + device->driver_data = plat_dev; return 0; @@ -1252,8 +1255,17 @@ static int fjes_probe(struct platform_device *plat_dev) adapter->open_guard = false; adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0); + if (unlikely(!adapter->txrx_wq)) { + err = -ENOMEM; + goto err_free_netdev; + } + adapter->control_wq = alloc_workqueue(DRV_NAME "/control", WQ_MEM_RECLAIM, 0); + if (unlikely(!adapter->control_wq)) { + err = -ENOMEM; + goto err_free_txrx_wq; + } INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task); INIT_WORK(&adapter->raise_intr_rxdata_task, @@ -1270,7 +1282,7 @@ static int fjes_probe(struct platform_device *plat_dev) hw->hw_res.irq = platform_get_irq(plat_dev, 0); err = fjes_hw_init(&adapter->hw); if (err) - goto err_free_netdev; + goto err_free_control_wq; /* setup MAC address (02:00:00:00:00:[epid])*/ netdev->dev_addr[0] = 2; @@ -1292,6 +1304,10 @@ static int fjes_probe(struct platform_device *plat_dev) err_hw_exit: fjes_hw_exit(&adapter->hw); +err_free_control_wq: + destroy_workqueue(adapter->control_wq); +err_free_txrx_wq: + destroy_workqueue(adapter->txrx_wq); err_free_netdev: free_netdev(netdev); err_out: diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 493cd382b8aa0f187fcaf1dc520da765f9595fe0..386d8a86bbdbd1b8c3cf4ae99f8708fb305c6a90 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -217,14 +217,13 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs, struct metadata_dst *tun_dst = NULL; struct pcpu_sw_netstats *stats; unsigned int len; - int err = 0; + int nh, err = 0; void *oiph; if (ip_tunnel_collect_metadata() || gs->collect_md) { __be16 flags; - flags = TUNNEL_KEY | TUNNEL_GENEVE_OPT | - (gnvh->oam ? TUNNEL_OAM : 0) | + flags = TUNNEL_KEY | (gnvh->oam ? TUNNEL_OAM : 0) | (gnvh->critical ? TUNNEL_CRIT_OPT : 0); tun_dst = udp_tun_rx_dst(skb, geneve_get_sk_family(gs), flags, @@ -262,9 +261,23 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs, goto drop; } - oiph = skb_network_header(skb); + /* Save offset of outer header relative to skb->head, + * because we are going to reset the network header to the inner header + * and might change skb->head. + */ + nh = skb_network_header(skb) - skb->head; + skb_reset_network_header(skb); + if (!pskb_inet_may_pull(skb)) { + DEV_STATS_INC(geneve->dev, rx_length_errors); + DEV_STATS_INC(geneve->dev, rx_errors); + goto drop; + } + + /* Get the outer header. */ + oiph = skb->head + nh; + if (geneve_get_sk_family(gs) == AF_INET) err = IP_ECN_decapsulate(oiph, skb); #if IS_ENABLED(CONFIG_IPV6) @@ -636,15 +649,20 @@ static int geneve_sock_add(struct geneve_dev *geneve, bool ipv6) static int geneve_open(struct net_device *dev) { struct geneve_dev *geneve = netdev_priv(dev); - bool ipv6 = !!(geneve->info.mode & IP_TUNNEL_INFO_IPV6); bool metadata = geneve->collect_md; + bool ipv4, ipv6; int ret = 0; + ipv6 = geneve->info.mode & IP_TUNNEL_INFO_IPV6 || metadata; + ipv4 = !ipv6 || metadata; #if IS_ENABLED(CONFIG_IPV6) - if (ipv6 || metadata) + if (ipv6) { ret = geneve_sock_add(geneve, true); + if (ret < 0 && ret != -EAFNOSUPPORT) + ipv4 = false; + } #endif - if (!ret && (!ipv6 || metadata)) + if (ipv4) ret = geneve_sock_add(geneve, false); if (ret < 0) geneve_sock_release(geneve); @@ -716,7 +734,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, struct net_device *dev, struct geneve_sock *gs4, struct flowi4 *fl4, - const struct ip_tunnel_info *info) + const struct ip_tunnel_info *info, + __be16 dport, __be16 sport) { bool use_cache = ip_tunnel_dst_cache_usable(skb, info); struct geneve_dev *geneve = netdev_priv(dev); @@ -732,6 +751,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, fl4->flowi4_proto = IPPROTO_UDP; fl4->daddr = info->key.u.ipv4.dst; fl4->saddr = info->key.u.ipv4.src; + fl4->fl4_dport = dport; + fl4->fl4_sport = sport; tos = info->key.tos; if ((tos == 1) && !geneve->collect_md) { @@ -766,7 +787,8 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, struct net_device *dev, struct geneve_sock *gs6, struct flowi6 *fl6, - const struct ip_tunnel_info *info) + const struct ip_tunnel_info *info, + __be16 dport, __be16 sport) { bool use_cache = ip_tunnel_dst_cache_usable(skb, info); struct geneve_dev *geneve = netdev_priv(dev); @@ -782,21 +804,25 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, fl6->flowi6_proto = IPPROTO_UDP; fl6->daddr = info->key.u.ipv6.dst; fl6->saddr = info->key.u.ipv6.src; + fl6->fl6_dport = dport; + fl6->fl6_sport = sport; + prio = info->key.tos; if ((prio == 1) && !geneve->collect_md) { prio = ip_tunnel_get_dsfield(ip_hdr(skb), skb); use_cache = false; } - fl6->flowlabel = ip6_make_flowinfo(RT_TOS(prio), - info->key.label); + fl6->flowlabel = ip6_make_flowinfo(prio, info->key.label); dst_cache = (struct dst_cache *)&info->dst_cache; if (use_cache) { dst = dst_cache_get_ip6(dst_cache, &fl6->saddr); if (dst) return dst; } - if (ipv6_stub->ipv6_dst_lookup(geneve->net, gs6->sock->sk, &dst, fl6)) { + dst = ipv6_stub->ipv6_dst_lookup_flow(geneve->net, gs6->sock->sk, fl6, + NULL); + if (IS_ERR(dst)) { netdev_dbg(dev, "no route to %pI6\n", &fl6->daddr); return ERR_PTR(-ENETUNREACH); } @@ -826,14 +852,15 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be16 df; int err; - rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info); + sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); + rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info, + geneve->info.key.tp_dst, sport); if (IS_ERR(rt)) return PTR_ERR(rt); skb_tunnel_check_pmtu(skb, &rt->dst, GENEVE_IPV4_HLEN + info->options_len); - sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); if (geneve->collect_md) { tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); ttl = key->ttl; @@ -868,13 +895,14 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be16 sport; int err; - dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info); + sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); + dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info, + geneve->info.key.tp_dst, sport); if (IS_ERR(dst)) return PTR_ERR(dst); skb_tunnel_check_pmtu(skb, dst, GENEVE_IPV6_HLEN + info->options_len); - sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); if (geneve->collect_md) { prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); ttl = key->ttl; @@ -950,13 +978,18 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) { struct ip_tunnel_info *info = skb_tunnel_info(skb); struct geneve_dev *geneve = netdev_priv(dev); + __be16 sport; if (ip_tunnel_info_af(info) == AF_INET) { struct rtable *rt; struct flowi4 fl4; + struct geneve_sock *gs4 = rcu_dereference(geneve->sock4); + sport = udp_flow_src_port(geneve->net, skb, + 1, USHRT_MAX, true); - rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info); + rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info, + geneve->info.key.tp_dst, sport); if (IS_ERR(rt)) return PTR_ERR(rt); @@ -966,9 +999,13 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) } else if (ip_tunnel_info_af(info) == AF_INET6) { struct dst_entry *dst; struct flowi6 fl6; + struct geneve_sock *gs6 = rcu_dereference(geneve->sock6); + sport = udp_flow_src_port(geneve->net, skb, + 1, USHRT_MAX, true); - dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info); + dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info, + geneve->info.key.tp_dst, sport); if (IS_ERR(dst)) return PTR_ERR(dst); @@ -979,8 +1016,7 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) return -EINVAL; } - info->key.tp_src = udp_flow_src_port(geneve->net, skb, - 1, USHRT_MAX, true); + info->key.tp_src = sport; info->key.tp_dst = geneve->info.key.tp_dst; return 0; } @@ -1406,9 +1442,13 @@ static void geneve_link_config(struct net_device *dev, } #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: { - struct rt6_info *rt = rt6_lookup(geneve->net, - &info->key.u.ipv6.dst, NULL, 0, - NULL, 0); + struct rt6_info *rt; + + if (!__in6_dev_get(dev)) + break; + + rt = rt6_lookup(geneve->net, &info->key.u.ipv6.dst, NULL, 0, + NULL, 0); if (rt && rt->dst.dev) ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV6_HLEN; diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 7a145172d50385f56f54e4b114ab6194fd672ca5..b53d2d32385a471aa58468f00b9b75fa3ac43b7e 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -42,7 +42,6 @@ struct pdp_ctx { struct hlist_node hlist_addr; union { - u64 tid; struct { u64 tid; u16 flow; @@ -289,16 +288,29 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb) return gtp_rx(pctx, skb, hdrlen, gtp->role); } -static void gtp_encap_destroy(struct sock *sk) +static void __gtp_encap_destroy(struct sock *sk) { struct gtp_dev *gtp; - gtp = rcu_dereference_sk_user_data(sk); + lock_sock(sk); + gtp = sk->sk_user_data; if (gtp) { + if (gtp->sk0 == sk) + gtp->sk0 = NULL; + else + gtp->sk1u = NULL; udp_sk(sk)->encap_type = 0; rcu_assign_sk_user_data(sk, NULL); sock_put(sk); } + release_sock(sk); +} + +static void gtp_encap_destroy(struct sock *sk) +{ + rtnl_lock(); + __gtp_encap_destroy(sk); + rtnl_unlock(); } static void gtp_encap_disable_sock(struct sock *sk) @@ -306,7 +318,7 @@ static void gtp_encap_disable_sock(struct sock *sk) if (!sk) return; - gtp_encap_destroy(sk); + __gtp_encap_destroy(sk); } static void gtp_encap_disable(struct gtp_dev *gtp) @@ -532,14 +544,13 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev, mtu = dst_mtu(&rt->dst); } - rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu); + rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu, false); if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) && mtu < ntohs(iph->tot_len)) { netdev_dbg(dev, "packet too big, fragmentation needed\n"); - memset(IPCB(skb), 0, sizeof(*IPCB(skb))); - icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, - htonl(mtu)); + icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, + htonl(mtu)); goto err_rt; } @@ -563,6 +574,9 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev) if (skb_cow_head(skb, dev->needed_headroom)) goto tx_err; + if (!pskb_inet_may_pull(skb)) + goto tx_err; + skb_reset_inner_headers(skb); /* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */ @@ -632,9 +646,16 @@ static void gtp_link_setup(struct net_device *dev) } static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize); -static void gtp_hashtable_free(struct gtp_dev *gtp); static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]); +static void gtp_destructor(struct net_device *dev) +{ + struct gtp_dev *gtp = netdev_priv(dev); + + kfree(gtp->addr_hash); + kfree(gtp->tid_hash); +} + static int gtp_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) @@ -652,10 +673,13 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, if (err < 0) return err; - if (!data[IFLA_GTP_PDP_HASHSIZE]) + if (!data[IFLA_GTP_PDP_HASHSIZE]) { hashsize = 1024; - else + } else { hashsize = nla_get_u32(data[IFLA_GTP_PDP_HASHSIZE]); + if (!hashsize) + hashsize = 1024; + } err = gtp_hashtable_new(gtp, hashsize); if (err < 0) @@ -669,13 +693,15 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, gn = net_generic(dev_net(dev), gtp_net_id); list_add_rcu(>p->list, &gn->gtp_dev_list); + dev->priv_destructor = gtp_destructor; netdev_dbg(dev, "registered new GTP interface\n"); return 0; out_hashtable: - gtp_hashtable_free(gtp); + kfree(gtp->addr_hash); + kfree(gtp->tid_hash); out_encap: gtp_encap_disable(gtp); return err; @@ -684,9 +710,15 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, static void gtp_dellink(struct net_device *dev, struct list_head *head) { struct gtp_dev *gtp = netdev_priv(dev); + struct hlist_node *next; + struct pdp_ctx *pctx; + int i; + + for (i = 0; i < gtp->hash_size; i++) + hlist_for_each_entry_safe(pctx, next, >p->tid_hash[i], hlist_tid) + pdp_context_delete(pctx); gtp_encap_disable(gtp); - gtp_hashtable_free(gtp); list_del_rcu(>p->list); unregister_netdevice_queue(dev, head); } @@ -764,20 +796,6 @@ static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize) return -ENOMEM; } -static void gtp_hashtable_free(struct gtp_dev *gtp) -{ - struct pdp_ctx *pctx; - int i; - - for (i = 0; i < gtp->hash_size; i++) - hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], hlist_tid) - pdp_context_delete(pctx); - - synchronize_rcu(); - kfree(gtp->addr_hash); - kfree(gtp->tid_hash); -} - static struct sock *gtp_encap_enable_socket(int fd, int type, struct gtp_dev *gtp) { @@ -791,21 +809,24 @@ static struct sock *gtp_encap_enable_socket(int fd, int type, sock = sockfd_lookup(fd, &err); if (!sock) { pr_debug("gtp socket fd=%d not found\n", fd); - return NULL; + return ERR_PTR(err); } - if (sock->sk->sk_protocol != IPPROTO_UDP) { + sk = sock->sk; + if (sk->sk_protocol != IPPROTO_UDP || + sk->sk_type != SOCK_DGRAM || + (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) { pr_debug("socket fd=%d not UDP\n", fd); sk = ERR_PTR(-EINVAL); goto out_sock; } - if (rcu_dereference_sk_user_data(sock->sk)) { + lock_sock(sk); + if (sk->sk_user_data) { sk = ERR_PTR(-EBUSY); - goto out_sock; + goto out_rel_sock; } - sk = sock->sk; sock_hold(sk); tuncfg.sk_user_data = gtp; @@ -815,6 +836,8 @@ static struct sock *gtp_encap_enable_socket(int fd, int type, setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg); +out_rel_sock: + release_sock(sock->sk); out_sock: sockfd_put(sock); return sk; @@ -847,8 +870,13 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]) if (data[IFLA_GTP_ROLE]) { role = nla_get_u32(data[IFLA_GTP_ROLE]); - if (role > GTP_ROLE_SGSN) + if (role > GTP_ROLE_SGSN) { + if (sk0) + gtp_encap_disable_sock(sk0); + if (sk1u) + gtp_encap_disable_sock(sk1u); return -EINVAL; + } } gtp->sk0 = sk0; @@ -911,24 +939,31 @@ static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info) } } -static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk, - struct genl_info *info) +static int gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk, + struct genl_info *info) { + struct pdp_ctx *pctx, *pctx_tid = NULL; struct net_device *dev = gtp->dev; u32 hash_ms, hash_tid = 0; - struct pdp_ctx *pctx; + unsigned int version; bool found = false; __be32 ms_addr; ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]); hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size; + version = nla_get_u32(info->attrs[GTPA_VERSION]); - hlist_for_each_entry_rcu(pctx, >p->addr_hash[hash_ms], hlist_addr) { - if (pctx->ms_addr_ip4.s_addr == ms_addr) { - found = true; - break; - } - } + pctx = ipv4_pdp_find(gtp, ms_addr); + if (pctx) + found = true; + if (version == GTP_V0) + pctx_tid = gtp0_pdp_find(gtp, + nla_get_u64(info->attrs[GTPA_TID])); + else if (version == GTP_V1) + pctx_tid = gtp1_pdp_find(gtp, + nla_get_u32(info->attrs[GTPA_I_TEI])); + if (pctx_tid) + found = true; if (found) { if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) @@ -936,6 +971,11 @@ static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk, if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE) return -EOPNOTSUPP; + if (pctx && pctx_tid) + return -EEXIST; + if (!pctx) + pctx = pctx_tid; + ipv4_pdp_fill(pctx, info); if (pctx->gtp_version == GTP_V0) @@ -949,7 +989,7 @@ static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk, } - pctx = kmalloc(sizeof(struct pdp_ctx), GFP_KERNEL); + pctx = kmalloc(sizeof(*pctx), GFP_ATOMIC); if (pctx == NULL) return -ENOMEM; @@ -1038,6 +1078,7 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info) return -EINVAL; } + rtnl_lock(); rcu_read_lock(); gtp = gtp_find_dev(sock_net(skb->sk), info->attrs); @@ -1058,10 +1099,11 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info) goto out_unlock; } - err = ipv4_pdp_add(gtp, sk, info); + err = gtp_pdp_add(gtp, sk, info); out_unlock: rcu_read_unlock(); + rtnl_unlock(); return err; } @@ -1215,43 +1257,46 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb, struct netlink_callback *cb) { struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp; + int i, j, bucket = cb->args[0], skip = cb->args[1]; struct net *net = sock_net(skb->sk); - struct gtp_net *gn = net_generic(net, gtp_net_id); - unsigned long tid = cb->args[1]; - int i, k = cb->args[0], ret; struct pdp_ctx *pctx; + struct gtp_net *gn; + + gn = net_generic(net, gtp_net_id); if (cb->args[4]) return 0; + rcu_read_lock(); list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) { if (last_gtp && last_gtp != gtp) continue; else last_gtp = NULL; - for (i = k; i < gtp->hash_size; i++) { - hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], hlist_tid) { - if (tid && tid != pctx->u.tid) - continue; - else - tid = 0; - - ret = gtp_genl_fill_info(skb, - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, - cb->nlh->nlmsg_type, pctx); - if (ret < 0) { + for (i = bucket; i < gtp->hash_size; i++) { + j = 0; + hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], + hlist_tid) { + if (j >= skip && + gtp_genl_fill_info(skb, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + cb->nlh->nlmsg_type, pctx)) { cb->args[0] = i; - cb->args[1] = pctx->u.tid; + cb->args[1] = j; cb->args[2] = (unsigned long)gtp; goto out; } + j++; } + skip = 0; } + bucket = 0; } cb->args[4] = 1; out: + rcu_read_unlock(); return skb->len; } @@ -1335,26 +1380,26 @@ static int __init gtp_init(void) get_random_bytes(>p_h_initval, sizeof(gtp_h_initval)); - err = rtnl_link_register(>p_link_ops); + err = register_pernet_subsys(>p_net_ops); if (err < 0) goto error_out; - err = genl_register_family(>p_genl_family); + err = rtnl_link_register(>p_link_ops); if (err < 0) - goto unreg_rtnl_link; + goto unreg_pernet_subsys; - err = register_pernet_subsys(>p_net_ops); + err = genl_register_family(>p_genl_family); if (err < 0) - goto unreg_genl_family; + goto unreg_rtnl_link; pr_info("GTP module loaded (pdp ctx size %zd bytes)\n", sizeof(struct pdp_ctx)); return 0; -unreg_genl_family: - genl_unregister_family(>p_genl_family); unreg_rtnl_link: rtnl_link_unregister(>p_link_ops); +unreg_pernet_subsys: + unregister_pernet_subsys(>p_net_ops); error_out: pr_err("error loading GTP module loaded\n"); return err; @@ -1363,9 +1408,9 @@ late_initcall(gtp_init); static void __exit gtp_fini(void) { - unregister_pernet_subsys(>p_net_ops); genl_unregister_family(>p_genl_family); rtnl_link_unregister(>p_link_ops); + unregister_pernet_subsys(>p_net_ops); pr_info("GTP module unloaded\n"); } diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index d79a69dd2146d347b3f44892e2534fe06290ca4a..6f7e1598106f88958cc2743954092ed36877e5ba 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -311,7 +311,6 @@ static void sp_setup(struct net_device *dev) { /* Finish setting up the DEVICE info. */ dev->netdev_ops = &sp_netdev_ops; - dev->needs_free_netdev = true; dev->mtu = SIXP_MTU; dev->hard_header_len = AX25_MAX_HEADER_LEN; dev->header_ops = &ax25_header_ops; @@ -524,10 +523,7 @@ static void resync_tnc(struct timer_list *t) /* Start resync timer again -- the TNC might be still absent */ - - del_timer(&sp->resync_t); - sp->resync_t.expires = jiffies + SIXP_RESYNC_TIMEOUT; - add_timer(&sp->resync_t); + mod_timer(&sp->resync_t, jiffies + SIXP_RESYNC_TIMEOUT); } static inline int tnc_init(struct sixpack *sp) @@ -538,9 +534,7 @@ static inline int tnc_init(struct sixpack *sp) sp->tty->ops->write(sp->tty, &inbyte, 1); - del_timer(&sp->resync_t); - sp->resync_t.expires = jiffies + SIXP_RESYNC_TIMEOUT; - add_timer(&sp->resync_t); + mod_timer(&sp->resync_t, jiffies + SIXP_RESYNC_TIMEOUT); return 0; } @@ -659,10 +653,10 @@ static void sixpack_close(struct tty_struct *tty) { struct sixpack *sp; - write_lock_bh(&disc_data_lock); + write_lock_irq(&disc_data_lock); sp = tty->disc_data; tty->disc_data = NULL; - write_unlock_bh(&disc_data_lock); + write_unlock_irq(&disc_data_lock); if (!sp) return; @@ -679,14 +673,16 @@ static void sixpack_close(struct tty_struct *tty) */ netif_stop_queue(sp->dev); + unregister_netdev(sp->dev); + del_timer_sync(&sp->tx_t); del_timer_sync(&sp->resync_t); - /* Free all 6pack frame buffers. */ + /* Free all 6pack frame buffers after unreg. */ kfree(sp->rbuff); kfree(sp->xbuff); - unregister_netdev(sp->dev); + free_netdev(sp->dev); } /* Perform I/O control on an active 6pack channel. */ @@ -864,6 +860,12 @@ static void decode_data(struct sixpack *sp, unsigned char inbyte) return; } + if (sp->rx_count_cooked + 2 >= sizeof(sp->cooked_buf)) { + pr_err("6pack: cooked buffer overrun, data loss\n"); + sp->rx_count = 0; + return; + } + buf = sp->raw_buf; sp->cooked_buf[sp->rx_count_cooked++] = buf[0] | ((buf[1] << 2) & 0xc0); @@ -918,11 +920,8 @@ static void decode_prio_command(struct sixpack *sp, unsigned char cmd) /* if the state byte has been received, the TNC is present, so the resync timer can be reset. */ - if (sp->tnc_state == TNC_IN_SYNC) { - del_timer(&sp->resync_t); - sp->resync_t.expires = jiffies + SIXP_INIT_RESYNC_TIMEOUT; - add_timer(&sp->resync_t); - } + if (sp->tnc_state == TNC_IN_SYNC) + mod_timer(&sp->resync_t, jiffies + SIXP_INIT_RESYNC_TIMEOUT); sp->status1 = cmd & SIXP_PRIO_DATA_MASK; } diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index 13e4c1eff3536c6ab1bbc578d2a92d76c6e9027f..561f1f67f32ea42656cdf18cafd7fe51ebf4d79f 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -783,10 +783,10 @@ static void mkiss_close(struct tty_struct *tty) { struct mkiss *ax; - write_lock_bh(&disc_data_lock); + write_lock_irq(&disc_data_lock); ax = tty->disc_data; tty->disc_data = NULL; - write_unlock_bh(&disc_data_lock); + write_unlock_irq(&disc_data_lock); if (!ax) return; @@ -803,13 +803,15 @@ static void mkiss_close(struct tty_struct *tty) */ netif_stop_queue(ax->dev); + unregister_netdev(ax->dev); + /* Free all AX25 frame buffers. */ kfree(ax->rbuff); kfree(ax->xbuff); ax->tty = NULL; - unregister_netdev(ax->dev); + free_netdev(ax->dev); } /* Perform I/O control on an active ax25 channel. */ diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index ba9df430fca6e45ea10db1dc9d762ce700102121..f124ceab5f5e166ff40cc7b0248a94c8bbc12b37 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c @@ -966,9 +966,7 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) sizeof(struct yamdrv_ioctl_mcs)); if (IS_ERR(ym)) return PTR_ERR(ym); - if (ym->cmd != SIOCYAMSMCS) - return -EINVAL; - if (ym->bitrate > YAM_MAXBITRATE) { + if (ym->cmd != SIOCYAMSMCS || ym->bitrate > YAM_MAXBITRATE) { kfree(ym); return -EINVAL; } diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index a32ded5b4f416f662e2a820356f16c9bfbef00db..50709c76b6725aa55aa1b35b31040e6831764f7b 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -144,6 +144,8 @@ struct hv_netvsc_packet { u32 total_data_buflen; }; +#define NETVSC_HASH_KEYLEN 40 + struct netvsc_device_info { unsigned char mac_adr[ETH_ALEN]; u32 num_chn; @@ -151,6 +153,8 @@ struct netvsc_device_info { u32 recv_sections; u32 send_section_size; u32 recv_section_size; + + u8 rss_key[NETVSC_HASH_KEYLEN]; }; enum rndis_device_state { @@ -160,8 +164,6 @@ enum rndis_device_state { RNDIS_DEV_DATAINITIALIZED, }; -#define NETVSC_HASH_KEYLEN 40 - struct rndis_device { struct net_device *ndev; @@ -179,7 +181,6 @@ struct rndis_device { u8 hw_mac_adr[ETH_ALEN]; u8 rss_key[NETVSC_HASH_KEYLEN]; - u16 rx_table[ITAB_NUM]; }; @@ -210,7 +211,9 @@ int netvsc_recv_callback(struct net_device *net, void netvsc_channel_cb(void *context); int netvsc_poll(struct napi_struct *napi, int budget); -int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev); +int rndis_set_subchannel(struct net_device *ndev, + struct netvsc_device *nvdev, + struct netvsc_device_info *dev_info); int rndis_filter_open(struct netvsc_device *nvdev); int rndis_filter_close(struct netvsc_device *nvdev); struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, @@ -929,6 +932,8 @@ struct net_device_context { u32 tx_table[VRSS_SEND_TAB_SIZE]; + u16 rx_table[ITAB_NUM]; + /* Ethtool settings */ u8 duplex; u32 speed; @@ -966,6 +971,7 @@ struct netvsc_device { wait_queue_head_t wait_drain; bool destroy; + bool tx_disable; /* if true, do not wake up queue again */ /* Receive buffer allocated by us but manages by NetVSP */ void *recv_buf; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index fe01e141c8f87d50e42a5cb2670ff5ba4921744a..35413041dcf8174252f7db5956a923a245faa031 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -84,7 +84,7 @@ static void netvsc_subchan_work(struct work_struct *w) rdev = nvdev->extension; if (rdev) { - ret = rndis_set_subchannel(rdev->ndev, nvdev); + ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL); if (ret == 0) { netif_device_attach(rdev->ndev); } else { @@ -110,6 +110,7 @@ static struct netvsc_device *alloc_net_device(void) init_waitqueue_head(&net_device->wait_drain); net_device->destroy = false; + net_device->tx_disable = false; net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; @@ -716,7 +717,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev, } else { struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx); - if (netif_tx_queue_stopped(txq) && + if (netif_tx_queue_stopped(txq) && !net_device->tx_disable && (hv_get_avail_to_write_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) { netif_tx_wake_queue(txq); @@ -871,11 +872,6 @@ static inline int netvsc_send_pkt( } else if (ret == -EAGAIN) { netif_tx_stop_queue(txq); ndev_ctx->eth_stats.stop_queue++; - if (atomic_read(&nvchan->queue_sends) < 1) { - netif_tx_wake_queue(txq); - ndev_ctx->eth_stats.wake_queue++; - ret = -ENOSPC; - } } else { netdev_err(ndev, "Unable to send packet pages %u len %u, ret %d\n", @@ -883,6 +879,15 @@ static inline int netvsc_send_pkt( ret); } + if (netif_tx_queue_stopped(txq) && + atomic_read(&nvchan->queue_sends) < 1 && + !net_device->tx_disable) { + netif_tx_wake_queue(txq); + ndev_ctx->eth_stats.wake_queue++; + if (ret == -EAGAIN) + ret = -ENOSPC; + } + return ret; } diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 3af6d8d15233756e411500a8753d2789d544f076..0de42e8ec2bb5935b06d3211e88c4f60edd642c0 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -109,6 +109,15 @@ static void netvsc_set_rx_mode(struct net_device *net) rcu_read_unlock(); } +static void netvsc_tx_enable(struct netvsc_device *nvscdev, + struct net_device *ndev) +{ + nvscdev->tx_disable = false; + virt_wmb(); /* ensure queue wake up mechanism is on */ + + netif_tx_wake_all_queues(ndev); +} + static int netvsc_open(struct net_device *net) { struct net_device_context *ndev_ctx = netdev_priv(net); @@ -129,7 +138,7 @@ static int netvsc_open(struct net_device *net) rdev = nvdev->extension; if (!rdev->link_state) { netif_carrier_on(net); - netif_tx_wake_all_queues(net); + netvsc_tx_enable(nvdev, net); } if (vf_netdev) { @@ -184,6 +193,17 @@ static int netvsc_wait_until_empty(struct netvsc_device *nvdev) } } +static void netvsc_tx_disable(struct netvsc_device *nvscdev, + struct net_device *ndev) +{ + if (nvscdev) { + nvscdev->tx_disable = true; + virt_wmb(); /* ensure txq will not wake up after stop */ + } + + netif_tx_disable(ndev); +} + static int netvsc_close(struct net_device *net) { struct net_device_context *net_device_ctx = netdev_priv(net); @@ -192,7 +212,7 @@ static int netvsc_close(struct net_device *net) struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); int ret; - netif_tx_disable(net); + netvsc_tx_disable(nvdev, net); /* No need to close rndis filter if it is removed already */ if (!nvdev) @@ -743,6 +763,14 @@ void netvsc_linkstatus_callback(struct net_device *net, schedule_delayed_work(&ndev_ctx->dwork, 0); } +static void netvsc_comp_ipcsum(struct sk_buff *skb) +{ + struct iphdr *iph = (struct iphdr *)skb->data; + + iph->check = 0; + iph->check = ip_fast_csum(iph, iph->ihl); +} + static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, struct napi_struct *napi, const struct ndis_tcp_ip_checksum_info *csum_info, @@ -766,9 +794,17 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, /* skb is already created with CHECKSUM_NONE */ skb_checksum_none_assert(skb); - /* - * In Linux, the IP checksum is always checked. - * Do L4 checksum offload if enabled and present. + /* Incoming packets may have IP header checksum verified by the host. + * They may not have IP header checksum computed after coalescing. + * We compute it here if the flags are set, because on Linux, the IP + * checksum is always checked. + */ + if (csum_info && csum_info->receive.ip_checksum_value_invalid && + csum_info->receive.ip_checksum_succeeded && + skb->protocol == htons(ETH_P_IP)) + netvsc_comp_ipcsum(skb); + + /* Do L4 checksum offload if enabled and present. */ if (csum_info && (net->features & NETIF_F_RXCSUM)) { if (csum_info->receive.tcp_checksum_succeeded || @@ -811,7 +847,6 @@ int netvsc_recv_callback(struct net_device *net, csum_info, vlan, data, len); if (unlikely(!skb)) { ++net_device_ctx->eth_stats.rx_no_memory; - rcu_read_unlock(); return NVSP_STAT_FAIL; } @@ -856,6 +891,39 @@ static void netvsc_get_channels(struct net_device *net, } } +/* Alloc struct netvsc_device_info, and initialize it from either existing + * struct netvsc_device, or from default values. + */ +static struct netvsc_device_info *netvsc_devinfo_get + (struct netvsc_device *nvdev) +{ + struct netvsc_device_info *dev_info; + + dev_info = kzalloc(sizeof(*dev_info), GFP_ATOMIC); + + if (!dev_info) + return NULL; + + if (nvdev) { + dev_info->num_chn = nvdev->num_chn; + dev_info->send_sections = nvdev->send_section_cnt; + dev_info->send_section_size = nvdev->send_section_size; + dev_info->recv_sections = nvdev->recv_section_cnt; + dev_info->recv_section_size = nvdev->recv_section_size; + + memcpy(dev_info->rss_key, nvdev->extension->rss_key, + NETVSC_HASH_KEYLEN); + } else { + dev_info->num_chn = VRSS_CHANNEL_DEFAULT; + dev_info->send_sections = NETVSC_DEFAULT_TX; + dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE; + dev_info->recv_sections = NETVSC_DEFAULT_RX; + dev_info->recv_section_size = NETVSC_RECV_SECTION_SIZE; + } + + return dev_info; +} + static int netvsc_detach(struct net_device *ndev, struct netvsc_device *nvdev) { @@ -869,7 +937,7 @@ static int netvsc_detach(struct net_device *ndev, /* If device was up (receiving) then shutdown */ if (netif_running(ndev)) { - netif_tx_disable(ndev); + netvsc_tx_disable(nvdev, ndev); ret = rndis_filter_close(nvdev); if (ret) { @@ -907,7 +975,7 @@ static int netvsc_attach(struct net_device *ndev, return PTR_ERR(nvdev); if (nvdev->num_chn > 1) { - ret = rndis_set_subchannel(ndev, nvdev); + ret = rndis_set_subchannel(ndev, nvdev, dev_info); /* if unavailable, just proceed with one queue */ if (ret) { @@ -925,7 +993,7 @@ static int netvsc_attach(struct net_device *ndev, if (netif_running(ndev)) { ret = rndis_filter_open(nvdev); if (ret) - return ret; + goto err; rdev = nvdev->extension; if (!rdev->link_state) @@ -933,6 +1001,13 @@ static int netvsc_attach(struct net_device *ndev, } return 0; + +err: + netif_device_detach(ndev); + + rndis_filter_device_remove(hdev, nvdev); + + return ret; } static int netvsc_set_channels(struct net_device *net, @@ -941,7 +1016,7 @@ static int netvsc_set_channels(struct net_device *net, struct net_device_context *net_device_ctx = netdev_priv(net); struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); unsigned int orig, count = channels->combined_count; - struct netvsc_device_info device_info; + struct netvsc_device_info *device_info; int ret; /* We do not support separate count for rx, tx, or other */ @@ -960,24 +1035,26 @@ static int netvsc_set_channels(struct net_device *net, orig = nvdev->num_chn; - memset(&device_info, 0, sizeof(device_info)); - device_info.num_chn = count; - device_info.send_sections = nvdev->send_section_cnt; - device_info.send_section_size = nvdev->send_section_size; - device_info.recv_sections = nvdev->recv_section_cnt; - device_info.recv_section_size = nvdev->recv_section_size; + device_info = netvsc_devinfo_get(nvdev); + + if (!device_info) + return -ENOMEM; + + device_info->num_chn = count; ret = netvsc_detach(net, nvdev); if (ret) - return ret; + goto out; - ret = netvsc_attach(net, &device_info); + ret = netvsc_attach(net, device_info); if (ret) { - device_info.num_chn = orig; - if (netvsc_attach(net, &device_info)) + device_info->num_chn = orig; + if (netvsc_attach(net, device_info)) netdev_err(net, "restoring channel setting failed\n"); } +out: + kfree(device_info); return ret; } @@ -1044,48 +1121,45 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev); struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); int orig_mtu = ndev->mtu; - struct netvsc_device_info device_info; + struct netvsc_device_info *device_info; int ret = 0; if (!nvdev || nvdev->destroy) return -ENODEV; + device_info = netvsc_devinfo_get(nvdev); + + if (!device_info) + return -ENOMEM; + /* Change MTU of underlying VF netdev first. */ if (vf_netdev) { ret = dev_set_mtu(vf_netdev, mtu); if (ret) - return ret; + goto out; } - memset(&device_info, 0, sizeof(device_info)); - device_info.num_chn = nvdev->num_chn; - device_info.send_sections = nvdev->send_section_cnt; - device_info.send_section_size = nvdev->send_section_size; - device_info.recv_sections = nvdev->recv_section_cnt; - device_info.recv_section_size = nvdev->recv_section_size; - ret = netvsc_detach(ndev, nvdev); if (ret) goto rollback_vf; ndev->mtu = mtu; - ret = netvsc_attach(ndev, &device_info); - if (ret) - goto rollback; - - return 0; + ret = netvsc_attach(ndev, device_info); + if (!ret) + goto out; -rollback: /* Attempt rollback to original MTU */ ndev->mtu = orig_mtu; - if (netvsc_attach(ndev, &device_info)) + if (netvsc_attach(ndev, device_info)) netdev_err(ndev, "restoring mtu failed\n"); rollback_vf: if (vf_netdev) dev_set_mtu(vf_netdev, orig_mtu); +out: + kfree(device_info); return ret; } @@ -1181,12 +1255,15 @@ static void netvsc_get_stats64(struct net_device *net, struct rtnl_link_stats64 *t) { struct net_device_context *ndev_ctx = netdev_priv(net); - struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev); + struct netvsc_device *nvdev; struct netvsc_vf_pcpu_stats vf_tot; int i; + rcu_read_lock(); + + nvdev = rcu_dereference(ndev_ctx->nvdev); if (!nvdev) - return; + goto out; netdev_stats_to_stats64(t, &net->stats); @@ -1225,6 +1302,8 @@ static void netvsc_get_stats64(struct net_device *net, t->rx_packets += packets; t->multicast += multicast; } +out: + rcu_read_unlock(); } static int netvsc_set_mac_addr(struct net_device *ndev, void *p) @@ -1374,6 +1453,9 @@ static void netvsc_get_ethtool_stats(struct net_device *dev, pcpu_sum = kvmalloc_array(num_possible_cpus(), sizeof(struct netvsc_ethtool_pcpu_stats), GFP_KERNEL); + if (!pcpu_sum) + return; + netvsc_get_pcpu_stats(dev, pcpu_sum); for_each_present_cpu(cpu) { struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu]; @@ -1609,7 +1691,7 @@ static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, rndis_dev = ndev->extension; if (indir) { for (i = 0; i < ITAB_NUM; i++) - indir[i] = rndis_dev->rx_table[i]; + indir[i] = ndc->rx_table[i]; } if (key) @@ -1639,7 +1721,7 @@ static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir, return -EINVAL; for (i = 0; i < ITAB_NUM; i++) - rndis_dev->rx_table[i] = indir[i]; + ndc->rx_table[i] = indir[i]; } if (!key) { @@ -1690,7 +1772,7 @@ static int netvsc_set_ringparam(struct net_device *ndev, { struct net_device_context *ndevctx = netdev_priv(ndev); struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); - struct netvsc_device_info device_info; + struct netvsc_device_info *device_info; struct ethtool_ringparam orig; u32 new_tx, new_rx; int ret = 0; @@ -1710,26 +1792,29 @@ static int netvsc_set_ringparam(struct net_device *ndev, new_rx == orig.rx_pending) return 0; /* no change */ - memset(&device_info, 0, sizeof(device_info)); - device_info.num_chn = nvdev->num_chn; - device_info.send_sections = new_tx; - device_info.send_section_size = nvdev->send_section_size; - device_info.recv_sections = new_rx; - device_info.recv_section_size = nvdev->recv_section_size; + device_info = netvsc_devinfo_get(nvdev); + + if (!device_info) + return -ENOMEM; + + device_info->send_sections = new_tx; + device_info->recv_sections = new_rx; ret = netvsc_detach(ndev, nvdev); if (ret) - return ret; + goto out; - ret = netvsc_attach(ndev, &device_info); + ret = netvsc_attach(ndev, device_info); if (ret) { - device_info.send_sections = orig.tx_pending; - device_info.recv_sections = orig.rx_pending; + device_info->send_sections = orig.tx_pending; + device_info->recv_sections = orig.rx_pending; - if (netvsc_attach(ndev, &device_info)) + if (netvsc_attach(ndev, device_info)) netdev_err(ndev, "restoring ringparam failed"); } +out: + kfree(device_info); return ret; } @@ -1848,7 +1933,7 @@ static void netvsc_link_change(struct work_struct *w) if (rdev->link_state) { rdev->link_state = false; netif_carrier_on(net); - netif_tx_wake_all_queues(net); + netvsc_tx_enable(net_device, net); } else { notify = true; } @@ -1858,7 +1943,7 @@ static void netvsc_link_change(struct work_struct *w) if (!rdev->link_state) { rdev->link_state = true; netif_carrier_off(net); - netif_tx_stop_all_queues(net); + netvsc_tx_disable(net_device, net); } kfree(event); break; @@ -1867,7 +1952,7 @@ static void netvsc_link_change(struct work_struct *w) if (!rdev->link_state) { rdev->link_state = true; netif_carrier_off(net); - netif_tx_stop_all_queues(net); + netvsc_tx_disable(net_device, net); event->event = RNDIS_STATUS_MEDIA_CONNECT; spin_lock_irqsave(&ndev_ctx->lock, flags); list_add(&event->list, &ndev_ctx->reconfig_events); @@ -2022,14 +2107,15 @@ static void netvsc_vf_setup(struct work_struct *w) rtnl_unlock(); } -/* Find netvsc by VMBus serial number. - * The PCI hyperv controller records the serial number as the slot. +/* Find netvsc by VF serial number. + * The PCI hyperv controller records the serial number as the slot kobj name. */ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev) { struct device *parent = vf_netdev->dev.parent; struct net_device_context *ndev_ctx; struct pci_dev *pdev; + u32 serial; if (!parent || !dev_is_pci(parent)) return NULL; /* not a PCI device */ @@ -2040,16 +2126,22 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev) return NULL; } + if (kstrtou32(pci_slot_name(pdev->slot), 10, &serial)) { + netdev_notice(vf_netdev, "Invalid vf serial:%s\n", + pci_slot_name(pdev->slot)); + return NULL; + } + list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) { if (!ndev_ctx->vf_alloc) continue; - if (ndev_ctx->vf_serial == pdev->slot->number) + if (ndev_ctx->vf_serial == serial) return hv_get_drvdata(ndev_ctx->device_ctx); } netdev_notice(vf_netdev, - "no netdev found for slot %u\n", pdev->slot->number); + "no netdev found for vf serial:%u\n", serial); return NULL; } @@ -2151,7 +2243,7 @@ static int netvsc_probe(struct hv_device *dev, { struct net_device *net = NULL; struct net_device_context *net_device_ctx; - struct netvsc_device_info device_info; + struct netvsc_device_info *device_info = NULL; struct netvsc_device *nvdev; int ret = -ENOMEM; @@ -2198,21 +2290,21 @@ static int netvsc_probe(struct hv_device *dev, netif_set_real_num_rx_queues(net, 1); /* Notify the netvsc driver of the new device */ - memset(&device_info, 0, sizeof(device_info)); - device_info.num_chn = VRSS_CHANNEL_DEFAULT; - device_info.send_sections = NETVSC_DEFAULT_TX; - device_info.send_section_size = NETVSC_SEND_SECTION_SIZE; - device_info.recv_sections = NETVSC_DEFAULT_RX; - device_info.recv_section_size = NETVSC_RECV_SECTION_SIZE; - - nvdev = rndis_filter_device_add(dev, &device_info); + device_info = netvsc_devinfo_get(NULL); + + if (!device_info) { + ret = -ENOMEM; + goto devinfo_failed; + } + + nvdev = rndis_filter_device_add(dev, device_info); if (IS_ERR(nvdev)) { ret = PTR_ERR(nvdev); netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); goto rndis_failed; } - memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); + memcpy(net->dev_addr, device_info->mac_adr, ETH_ALEN); /* We must get rtnl lock before scheduling nvdev->subchan_work, * otherwise netvsc_subchan_work() can get rtnl lock first and wait @@ -2250,12 +2342,16 @@ static int netvsc_probe(struct hv_device *dev, list_add(&net_device_ctx->list, &netvsc_dev_list); rtnl_unlock(); + + kfree(device_info); return 0; register_failed: rtnl_unlock(); rndis_filter_device_remove(dev, nvdev); rndis_failed: + kfree(device_info); +devinfo_failed: free_percpu(net_device_ctx->vf_stats); no_stats: hv_set_drvdata(dev, NULL); @@ -2323,7 +2419,7 @@ static struct hv_driver netvsc_drv = { .probe = netvsc_probe, .remove = netvsc_remove, .driver = { - .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .probe_type = PROBE_FORCE_SYNCHRONOUS, }, }; diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 2a5209f23f296a8036c8aafd8248cc71a1582d23..f47e36ac42a7f8aa6b0a2a01302c884b5116fe63 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -715,10 +715,11 @@ rndis_filter_set_offload_params(struct net_device *ndev, return ret; } -int rndis_filter_set_rss_param(struct rndis_device *rdev, - const u8 *rss_key) +static int rndis_set_rss_param_msg(struct rndis_device *rdev, + const u8 *rss_key, u16 flag) { struct net_device *ndev = rdev->ndev; + struct net_device_context *ndc = netdev_priv(ndev); struct rndis_request *request; struct rndis_set_request *set; struct rndis_set_complete *set_complete; @@ -745,7 +746,7 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev, rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS; rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2; rssp->hdr.size = sizeof(struct ndis_recv_scale_param); - rssp->flag = 0; + rssp->flag = flag; rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 | NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_TCP_IPV6; @@ -758,7 +759,7 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev, /* Set indirection table entries */ itab = (u32 *)(rssp + 1); for (i = 0; i < ITAB_NUM; i++) - itab[i] = rdev->rx_table[i]; + itab[i] = ndc->rx_table[i]; /* Set hask key values */ keyp = (u8 *)((unsigned long)rssp + rssp->hashkey_offset); @@ -770,9 +771,12 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev, wait_for_completion(&request->wait_event); set_complete = &request->response_msg.msg.set_complete; - if (set_complete->status == RNDIS_STATUS_SUCCESS) - memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN); - else { + if (set_complete->status == RNDIS_STATUS_SUCCESS) { + if (!(flag & NDIS_RSS_PARAM_FLAG_DISABLE_RSS) && + !(flag & NDIS_RSS_PARAM_FLAG_HASH_KEY_UNCHANGED)) + memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN); + + } else { netdev_err(ndev, "Fail to set RSS parameters:0x%x\n", set_complete->status); ret = -EINVAL; @@ -783,6 +787,16 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev, return ret; } +int rndis_filter_set_rss_param(struct rndis_device *rdev, + const u8 *rss_key) +{ + /* Disable RSS before change */ + rndis_set_rss_param_msg(rdev, rss_key, + NDIS_RSS_PARAM_FLAG_DISABLE_RSS); + + return rndis_set_rss_param_msg(rdev, rss_key, 0); +} + static int rndis_filter_query_device_link_status(struct rndis_device *dev, struct netvsc_device *net_device) { @@ -1062,7 +1076,9 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) * This breaks overlap of processing the host message for the * new primary channel with the initialization of sub-channels. */ -int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev) +int rndis_set_subchannel(struct net_device *ndev, + struct netvsc_device *nvdev, + struct netvsc_device_info *dev_info) { struct nvsp_message *init_packet = &nvdev->channel_init_pkt; struct net_device_context *ndev_ctx = netdev_priv(ndev); @@ -1103,7 +1119,10 @@ int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev) atomic_read(&nvdev->open_chn) == nvdev->num_chn); /* ignore failues from setting rss parameters, still have channels */ - rndis_filter_set_rss_param(rdev, netvsc_hash_key); + if (dev_info) + rndis_filter_set_rss_param(rdev, dev_info->rss_key); + else + rndis_filter_set_rss_param(rdev, netvsc_hash_key); netif_set_real_num_tx_queues(ndev, nvdev->num_chn); netif_set_real_num_rx_queues(ndev, nvdev->num_chn); @@ -1226,6 +1245,7 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, struct netvsc_device_info *device_info) { struct net_device *net = hv_get_drvdata(dev); + struct net_device_context *ndc = netdev_priv(net); struct netvsc_device *net_device; struct rndis_device *rndis_device; struct ndis_recv_scale_cap rsscap; @@ -1312,9 +1332,11 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, /* We will use the given number of channels if available. */ net_device->num_chn = min(net_device->max_chn, device_info->num_chn); - for (i = 0; i < ITAB_NUM; i++) - rndis_device->rx_table[i] = ethtool_rxfh_indir_default( + if (!netif_is_rxfh_configured(net)) { + for (i = 0; i < ITAB_NUM; i++) + ndc->rx_table[i] = ethtool_rxfh_indir_default( i, net_device->num_chn); + } atomic_set(&net_device->open_chn, 1); vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open); diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c index cd1d8faccca5fb36b488312d734d5e42cebb7b1a..cd6b95e673a58319a2f0ea0ed15445cc1782435f 100644 --- a/drivers/net/ieee802154/adf7242.c +++ b/drivers/net/ieee802154/adf7242.c @@ -1268,6 +1268,10 @@ static int adf7242_probe(struct spi_device *spi) INIT_DELAYED_WORK(&lp->work, adf7242_rx_cal_work); lp->wqueue = alloc_ordered_workqueue(dev_name(&spi->dev), WQ_MEM_RECLAIM); + if (unlikely(!lp->wqueue)) { + ret = -ENOMEM; + goto err_hw_init; + } ret = adf7242_hw_init(lp); if (ret) diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 3d9e91579866826e476ceb2374b0d286e70c07fd..1bc09b6c308f855de8ab86badb469290550b0bdf 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -108,6 +108,7 @@ struct at86rf230_local { unsigned long cal_timeout; bool is_tx; bool is_tx_from_off; + bool was_tx; u8 tx_retry; struct sk_buff *tx_skb; struct at86rf230_state_change tx; @@ -351,7 +352,11 @@ at86rf230_async_error_recover_complete(void *context) if (ctx->free) kfree(ctx); - ieee802154_wake_queue(lp->hw); + if (lp->was_tx) { + lp->was_tx = 0; + dev_kfree_skb_any(lp->tx_skb); + ieee802154_wake_queue(lp->hw); + } } static void @@ -360,7 +365,11 @@ at86rf230_async_error_recover(void *context) struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; - lp->is_tx = 0; + if (lp->is_tx) { + lp->was_tx = 1; + lp->is_tx = 0; + } + at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON, at86rf230_async_error_recover_complete); } diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c index 4f684cbcdc57e4ce382f1b5e57f7885ded2f2af2..078027bbe0025e1d6f40f734f17edd2b6c06b162 100644 --- a/drivers/net/ieee802154/atusb.c +++ b/drivers/net/ieee802154/atusb.c @@ -1140,10 +1140,11 @@ static void atusb_disconnect(struct usb_interface *interface) ieee802154_unregister_hw(atusb->hw); + usb_put_dev(atusb->usb_dev); + ieee802154_free_hw(atusb->hw); usb_set_intfdata(interface, NULL); - usb_put_dev(atusb->usb_dev); pr_debug("%s done\n", __func__); } diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index 0ff5a403a8dc356a359fb085be26379ca011b67b..3f581dcc7e6b9db112cc3dea46aa8615c826472c 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -721,7 +721,7 @@ static void ca8210_mlme_reset_worker(struct work_struct *work) static void ca8210_rx_done(struct cas_control *cas_ctl) { u8 *buf; - u8 len; + unsigned int len; struct work_priv_container *mlme_reset_wpc; struct ca8210_priv *priv = cas_ctl->priv; @@ -730,7 +730,7 @@ static void ca8210_rx_done(struct cas_control *cas_ctl) if (len > CA8210_SPI_BUF_SIZE) { dev_crit( &priv->spi->dev, - "Received packet len (%d) erroneously long\n", + "Received packet len (%u) erroneously long\n", len ); goto finish; @@ -1769,6 +1769,7 @@ static int ca8210_async_xmit_complete( status ); if (status != MAC_TRANSACTION_OVERFLOW) { + dev_kfree_skb_any(priv->tx_skb); ieee802154_wake_queue(priv->hw); return 0; } @@ -2779,7 +2780,6 @@ static int ca8210_register_ext_clock(struct spi_device *spi) struct device_node *np = spi->dev.of_node; struct ca8210_priv *priv = spi_get_drvdata(spi); struct ca8210_platform_data *pdata = spi->dev.platform_data; - int ret = 0; if (!np) return -EFAULT; @@ -2796,18 +2796,8 @@ static int ca8210_register_ext_clock(struct spi_device *spi) dev_crit(&spi->dev, "Failed to register external clk\n"); return PTR_ERR(priv->clk); } - ret = of_clk_add_provider(np, of_clk_src_simple_get, priv->clk); - if (ret) { - clk_unregister(priv->clk); - dev_crit( - &spi->dev, - "Failed to register external clock as clock provider\n" - ); - } else { - dev_info(&spi->dev, "External clock set as clock provider\n"); - } - return ret; + return of_clk_add_provider(np, of_clk_src_simple_get, priv->clk); } /** @@ -2819,8 +2809,8 @@ static void ca8210_unregister_ext_clock(struct spi_device *spi) { struct ca8210_priv *priv = spi_get_drvdata(spi); - if (!priv->clk) - return + if (IS_ERR_OR_NULL(priv->clk)) + return; of_clk_del_provider(spi->dev.of_node); clk_unregister(priv->clk); @@ -3151,12 +3141,12 @@ static int ca8210_probe(struct spi_device *spi_device) goto error; } + priv->spi->dev.platform_data = pdata; ret = ca8210_get_platform_data(priv->spi, pdata); if (ret) { dev_crit(&spi_device->dev, "ca8210_get_platform_data failed\n"); goto error; } - priv->spi->dev.platform_data = pdata; ret = ca8210_dev_com_init(priv); if (ret) { diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c index bf70ab892e697865eeffd69a01674da634127885..be1f1a86bcd61d2bbbe21376e6caeff3c1d64076 100644 --- a/drivers/net/ieee802154/mac802154_hwsim.c +++ b/drivers/net/ieee802154/mac802154_hwsim.c @@ -332,7 +332,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info) goto out_err; } - genlmsg_reply(skb, info); + res = genlmsg_reply(skb, info); break; } @@ -500,7 +500,7 @@ static int hwsim_del_edge_nl(struct sk_buff *msg, struct genl_info *info) !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE]) return -EINVAL; - if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX + 1, + if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX, info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE], hwsim_edge_policy, NULL)) return -EINVAL; @@ -550,7 +550,7 @@ static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info) !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE]) return -EINVAL; - if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX + 1, + if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX, info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE], hwsim_edge_policy, NULL)) return -EINVAL; @@ -821,7 +821,7 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev, err = hwsim_subscribe_all_others(phy); if (err < 0) { mutex_unlock(&hwsim_phys_lock); - goto err_reg; + goto err_subscribe; } } list_add_tail(&phy->list, &hwsim_phys); @@ -831,6 +831,8 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev, return idx; +err_subscribe: + ieee802154_unregister_hw(phy->hw); err_reg: kfree(pib); err_pib: @@ -920,9 +922,9 @@ static __init int hwsim_init_module(void) return 0; platform_drv: - genl_unregister_family(&hwsim_genl_family); -platform_dev: platform_device_unregister(mac802154hwsim_dev); +platform_dev: + genl_unregister_family(&hwsim_genl_family); return rc; } diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c index 04891429a55423e4ea4a3f5f5025631c01cda13a..fe4057fca83d86fea99ffb3b7609f1935dcceaea 100644 --- a/drivers/net/ieee802154/mcr20a.c +++ b/drivers/net/ieee802154/mcr20a.c @@ -539,6 +539,8 @@ mcr20a_start(struct ieee802154_hw *hw) dev_dbg(printdev(lp), "no slotted operation\n"); ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1, DAR_PHY_CTRL1_SLOTTED, 0x0); + if (ret < 0) + return ret; /* enable irq */ enable_irq(lp->spi->irq); @@ -546,11 +548,15 @@ mcr20a_start(struct ieee802154_hw *hw) /* Unmask SEQ interrupt */ ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL2, DAR_PHY_CTRL2_SEQMSK, 0x0); + if (ret < 0) + return ret; /* Start the RX sequence */ dev_dbg(printdev(lp), "start the RX sequence\n"); ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1, DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_RX); + if (ret < 0) + return ret; return 0; } diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h index adb826f55e604c2d766c44bc260d1ac9aa96bc07..e66c3bed6f18c9c7f08a7b60b04f9e965f5ef8f6 100644 --- a/drivers/net/ipvlan/ipvlan.h +++ b/drivers/net/ipvlan/ipvlan.h @@ -44,6 +44,9 @@ #define IPVLAN_QBACKLOG_LIMIT 1000 +extern int sysctl_ipvlan_loop_qlen; +extern int sysctl_ipvlan_loop_delay; + typedef enum { IPVL_IPV6 = 0, IPVL_ICMPV6, @@ -75,6 +78,10 @@ struct ipvl_dev { netdev_features_t sfeatures; u32 msg_enable; spinlock_t addrs_lock; + int local_packets_cached; + unsigned long local_timeout; + struct timer_list local_free_timer; + struct sk_buff_head local_xmit_queue; }; struct ipvl_addr { diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index 1a8132eb2a3ec150fb1563a3d24ab03892b8aeab..0a82752b28b5452c31885ed52bd6c69de21bf34b 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -298,6 +298,7 @@ void ipvlan_process_multicast(struct work_struct *work) } if (dev) dev_put(dev); + cond_resched(); } } @@ -417,7 +418,7 @@ static struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, return addr; } -static int ipvlan_process_v4_outbound(struct sk_buff *skb) +static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb) { const struct iphdr *ip4h = ip_hdr(skb); struct net_device *dev = skb->dev; @@ -442,7 +443,10 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb) goto err; } skb_dst_set(skb, &rt->dst); - err = ip_local_out(net, skb->sk, skb); + + memset(IPCB(skb), 0, sizeof(*IPCB(skb))); + + err = ip_local_out(net, NULL, skb); if (unlikely(net_xmit_eval(err))) dev->stats.tx_errors++; else @@ -456,7 +460,139 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb) } #if IS_ENABLED(CONFIG_IPV6) + +static noinline_for_stack int +ipvlan_route_v6_outbound(struct net_device *dev, struct sk_buff *skb) +{ + const struct ipv6hdr *ip6h = ipv6_hdr(skb); + struct flowi6 fl6 = { + .flowi6_oif = dev->ifindex, + .daddr = ip6h->daddr, + .saddr = ip6h->saddr, + .flowi6_flags = FLOWI_FLAG_ANYSRC, + .flowlabel = ip6_flowinfo(ip6h), + .flowi6_mark = skb->mark, + .flowi6_proto = ip6h->nexthdr, + }; + struct dst_entry *dst; + int err; + + dst = ip6_route_output(dev_net(dev), NULL, &fl6); + err = dst->error; + if (err) { + dst_release(dst); + return err; + } + skb_dst_set(skb, dst); + return 0; +} + static int ipvlan_process_v6_outbound(struct sk_buff *skb) +{ + struct net_device *dev = skb->dev; + int err, ret = NET_XMIT_DROP; + + err = ipvlan_route_v6_outbound(dev, skb); + if (unlikely(err)) { + dev->stats.tx_errors++; + kfree_skb(skb); + return err; + } + + memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); + + err = ip6_local_out(dev_net(dev), NULL, skb); + if (unlikely(net_xmit_eval(err))) + dev->stats.tx_errors++; + else + ret = NET_XMIT_SUCCESS; + return ret; +} +#else +static int ipvlan_process_v6_outbound(struct sk_buff *skb) +{ + return NET_XMIT_DROP; +} +#endif + +static int ipvlan_process_outbound(struct sk_buff *skb) +{ + int ret = NET_XMIT_DROP; + + /* The ipvlan is a pseudo-L2 device, so the packets that we receive + * will have L2; which need to discarded and processed further + * in the net-ns of the main-device. + */ + if (skb_mac_header_was_set(skb)) { + /* In this mode we dont care about + * multicast and broadcast traffic */ + struct ethhdr *ethh = eth_hdr(skb); + + if (is_multicast_ether_addr(ethh->h_dest)) { + pr_debug_ratelimited( + "Dropped {multi|broad}cast of type=[%x]\n", + ntohs(skb->protocol)); + kfree_skb(skb); + goto out; + } + + skb_pull(skb, sizeof(*ethh)); + skb->mac_header = (typeof(skb->mac_header))~0U; + skb_reset_network_header(skb); + } + + if (skb->protocol == htons(ETH_P_IPV6)) + ret = ipvlan_process_v6_outbound(skb); + else if (skb->protocol == htons(ETH_P_IP)) + ret = ipvlan_process_v4_outbound(skb); + else { + pr_warn_ratelimited("Dropped outbound packet type=%x\n", + ntohs(skb->protocol)); + kfree_skb(skb); + } +out: + return ret; +} + +static int ipvlan_process_v4_forward(struct sk_buff *skb) +{ + const struct iphdr *ip4h = ip_hdr(skb); + struct net_device *dev = skb->dev; + struct net *net = dev_net(dev); + struct rtable *rt; + int err, ret = NET_XMIT_DROP; + struct flowi4 fl4 = { + .flowi4_tos = RT_TOS(ip4h->tos), + .flowi4_flags = FLOWI_FLAG_ANYSRC, + .flowi4_mark = skb->mark, + .daddr = ip4h->daddr, + .saddr = ip4h->saddr, + }; + + rt = ip_route_output_flow(net, &fl4, NULL); + if (IS_ERR(rt)) + goto err; + + if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) { + ip_rt_put(rt); + goto err; + } + skb_dst_set(skb, &rt->dst); + err = ip_local_out(net, skb->sk, skb); + if (unlikely(net_xmit_eval(err))) + dev->stats.tx_errors++; + else + ret = NET_XMIT_SUCCESS; + goto out; +err: + dev->stats.tx_errors++; + kfree_skb(skb); +out: + return ret; +} + +#if IS_ENABLED(CONFIG_IPV6) +static int ipvlan_process_v6_forward(struct sk_buff *skb) { const struct ipv6hdr *ip6h = ipv6_hdr(skb); struct net_device *dev = skb->dev; @@ -464,7 +600,6 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb) struct dst_entry *dst; int err, ret = NET_XMIT_DROP; struct flowi6 fl6 = { - .flowi6_oif = dev->ifindex, .daddr = ip6h->daddr, .saddr = ip6h->saddr, .flowi6_flags = FLOWI_FLAG_ANYSRC, @@ -493,15 +628,15 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb) return ret; } #else -static int ipvlan_process_v6_outbound(struct sk_buff *skb) +static int ipvlan_process_v6_forward(struct sk_buff *skb) { return NET_XMIT_DROP; } #endif -static int ipvlan_process_outbound(struct sk_buff *skb) +static int ipvlan_process_forward(struct sk_buff *skb) { - struct ethhdr *ethh = eth_hdr(skb); + struct ethhdr *ethh = skb_eth_hdr(skb); int ret = NET_XMIT_DROP; /* In this mode we dont care about multicast and broadcast traffic */ @@ -522,11 +657,11 @@ static int ipvlan_process_outbound(struct sk_buff *skb) skb_reset_network_header(skb); } - if (skb->protocol == htons(ETH_P_IPV6)) - ret = ipvlan_process_v6_outbound(skb); - else if (skb->protocol == htons(ETH_P_IP)) - ret = ipvlan_process_v4_outbound(skb); - else { + if (skb->protocol == htons(ETH_P_IPV6)) { + ret = ipvlan_process_v6_forward(skb); + } else if (skb->protocol == htons(ETH_P_IP)) { + ret = ipvlan_process_v4_forward(skb); + } else { pr_warn_ratelimited("Dropped outbound packet type=%x\n", ntohs(skb->protocol)); kfree_skb(skb); @@ -582,7 +717,8 @@ static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev) consume_skb(skb); return NET_XMIT_DROP; } - return ipvlan_rcv_frame(addr, &skb, true); + ipvlan_rcv_frame(addr, &skb, true); + return NET_XMIT_SUCCESS; } } out: @@ -593,7 +729,7 @@ static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev) static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) { const struct ipvl_dev *ipvlan = netdev_priv(dev); - struct ethhdr *eth = eth_hdr(skb); + struct ethhdr *eth = skb_eth_hdr(skb); struct ipvl_addr *addr; void *lyr3h; int addr_type; @@ -608,7 +744,8 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) consume_skb(skb); return NET_XMIT_DROP; } - return ipvlan_rcv_frame(addr, &skb, true); + ipvlan_rcv_frame(addr, &skb, true); + return NET_XMIT_SUCCESS; } } skb = skb_share_check(skb, GFP_ATOMIC); @@ -620,9 +757,85 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) * the skb for the main-dev. At the RX side we just return * RX_PASS for it to be processed further on the stack. */ - return dev_forward_skb(ipvlan->phy_dev, skb); + dev_forward_skb(ipvlan->phy_dev, skb); + return NET_XMIT_SUCCESS; } else if (is_multicast_ether_addr(eth->h_dest)) { + skb_reset_mac_header(skb); + ipvlan_skb_crossing_ns(skb, NULL); + ipvlan_multicast_enqueue(ipvlan->port, skb, true); + return NET_XMIT_SUCCESS; + } + + skb->dev = ipvlan->phy_dev; + return dev_queue_xmit(skb); +} + +static int ipvlan_l2e_local_xmit_event(struct ipvl_dev *ipvlan, + struct sk_buff **pskb) +{ + struct sk_buff *nskb, *tskb; + + while ((ipvlan->local_packets_cached >= sysctl_ipvlan_loop_qlen) && + (tskb = skb_dequeue(&ipvlan->local_xmit_queue))) { + ipvlan->local_packets_cached -= tskb->truesize; + if (ipvlan->local_packets_cached < 0 || + skb_queue_empty(&ipvlan->local_xmit_queue)) + ipvlan->local_packets_cached = 0; + kfree_skb(tskb); + } + + nskb = skb_clone(*pskb, GFP_ATOMIC); + if (!nskb) + return NET_XMIT_DROP; + + ipvlan->local_timeout = jiffies + + (sysctl_ipvlan_loop_delay * HZ) / 1000; + mod_timer(&ipvlan->local_free_timer, ipvlan->local_timeout); + skb_queue_tail(&ipvlan->local_xmit_queue, *pskb); + ipvlan->local_packets_cached += (*pskb)->truesize; + *pskb = nskb; + + return 0; +} + +static int ipvlan_xmit_mode_l2e(struct sk_buff *skb, struct net_device *dev) +{ + struct ipvl_dev *ipvlan = netdev_priv(dev); + struct ethhdr *eth = skb_eth_hdr(skb); + struct ipvl_addr *addr; + void *lyr3h; + int addr_type; + + if (!ipvlan_is_vepa(ipvlan->port) && + ether_addr_equal(eth->h_dest, eth->h_source)) { + lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type); + if (lyr3h) { + addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, + addr_type, true); + if (addr) { + if (ipvlan_is_private(ipvlan->port)) { + consume_skb(skb); + return NET_XMIT_DROP; + } + + if (unlikely((dev->features & + (NETIF_F_GSO | NETIF_F_TSO)) && + ipvlan_l2e_local_xmit_event(ipvlan, &skb))) + return NET_XMIT_DROP; + return ipvlan_rcv_frame(addr, &skb, true); + } + } + skb = skb_share_check(skb, GFP_ATOMIC); + if (!skb) + return NET_XMIT_DROP; + + /* maybe the packet need been forward */ + skb->dev = ipvlan->phy_dev; + ipvlan_skb_crossing_ns(skb, ipvlan->phy_dev); + return ipvlan_process_forward(skb); + } else if (is_multicast_ether_addr(eth->h_dest)) { + skb_reset_mac_header(skb); ipvlan_skb_crossing_ns(skb, NULL); ipvlan_multicast_enqueue(ipvlan->port, skb, true); return NET_XMIT_SUCCESS; @@ -649,6 +862,8 @@ int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) case IPVLAN_MODE_L3: case IPVLAN_MODE_L3S: return ipvlan_xmit_mode_l3(skb, dev); + case IPVLAN_MODE_L2E: + return ipvlan_xmit_mode_l2e(skb, dev); } /* Should not reach here */ @@ -730,6 +945,36 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb, return ret; } +static rx_handler_result_t ipvlan_handle_mode_l2e(struct sk_buff **pskb, + struct ipvl_port *port) +{ + struct sk_buff *skb = *pskb; + struct ethhdr *eth = eth_hdr(skb); + rx_handler_result_t ret = RX_HANDLER_PASS; + + if (is_multicast_ether_addr(eth->h_dest)) { + if (ipvlan_external_frame(skb, port)) { + struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); + + /* External frames are queued for device local + * distribution, but a copy is given to master + * straight away to avoid sending duplicates later + * when work-queue processes this frame. This is + * achieved by returning RX_HANDLER_PASS. + */ + if (nskb) { + ipvlan_skb_crossing_ns(nskb, NULL); + ipvlan_multicast_enqueue(port, nskb, false); + } + } + } else { + /* Perform like l3 mode for non-multicast packet */ + ret = ipvlan_handle_mode_l3(pskb, port); + } + + return ret; +} + rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb) { struct sk_buff *skb = *pskb; @@ -743,6 +988,8 @@ rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb) return ipvlan_handle_mode_l2(pskb, port); case IPVLAN_MODE_L3: return ipvlan_handle_mode_l3(pskb, port); + case IPVLAN_MODE_L2E: + return ipvlan_handle_mode_l2e(pskb, port); case IPVLAN_MODE_L3S: return RX_HANDLER_PASS; } diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 4a949569ec4c51668fe7b795caef7ece5d61854b..9057cdf9b2162d7caedce059b53aaa97d0d80ed1 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -9,6 +9,53 @@ #include "ipvlan.h" +static int one = 1; +static int delay_max = 100; +/* set loop queue length from 0 to 10 big packets(65536) */ +static int qlen_min; +static int qlen_max = 655360; + +int sysctl_ipvlan_loop_qlen = 131072; +int sysctl_ipvlan_loop_delay = 10; +static int ipvlan_default_mode = IPVLAN_MODE_L3; +module_param(ipvlan_default_mode, int, 0400); +MODULE_PARM_DESC(ipvlan_default_mode, "set ipvlan default mode: 0 for l2, 1 for l3, 2 for l2e, 3 for l3s, others invalid now"); + +static struct ctl_table_header *ipvlan_table_hrd; +static struct ctl_table ipvlan_table[] = { + { + .procname = "loop_delay", + .data = &sysctl_ipvlan_loop_delay, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &one, + .extra2 = &delay_max, + }, + { + .procname = "loop_qlen", + .data = &sysctl_ipvlan_loop_qlen, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &qlen_min, + .extra2 = &qlen_max, + }, + { } +}; + +static int ipvlan_sysctl_init(void) +{ + ipvlan_table_hrd = register_net_sysctl(&init_net, + "net/ipvlan", ipvlan_table); + return !ipvlan_table_hrd ? -ENOMEM : 0; +} + +static void ipvlan_sysctl_exit(void) +{ + unregister_net_sysctl_table(ipvlan_table_hrd); +} + static unsigned int ipvlan_netid __read_mostly; struct ipvlan_netns { @@ -97,12 +144,12 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval) err = ipvlan_register_nf_hook(read_pnet(&port->pnet)); if (!err) { mdev->l3mdev_ops = &ipvl_l3mdev_ops; - mdev->priv_flags |= IFF_L3MDEV_MASTER; + mdev->priv_flags |= IFF_L3MDEV_RX_HANDLER; } else goto fail; } else if (port->mode == IPVLAN_MODE_L3S) { /* Old mode was L3S */ - mdev->priv_flags &= ~IFF_L3MDEV_MASTER; + mdev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER; ipvlan_unregister_nf_hook(read_pnet(&port->pnet)); mdev->l3mdev_ops = NULL; } @@ -162,7 +209,7 @@ static void ipvlan_port_destroy(struct net_device *dev) struct sk_buff *skb; if (port->mode == IPVLAN_MODE_L3S) { - dev->priv_flags &= ~IFF_L3MDEV_MASTER; + dev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER; ipvlan_unregister_nf_hook(dev_net(dev)); dev->l3mdev_ops = NULL; } @@ -177,12 +224,21 @@ static void ipvlan_port_destroy(struct net_device *dev) kfree(port); } +#define IPVLAN_ALWAYS_ON_OFLOADS \ + (NETIF_F_SG | NETIF_F_HW_CSUM | \ + NETIF_F_GSO_ROBUST | NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL) + +#define IPVLAN_ALWAYS_ON \ + (IPVLAN_ALWAYS_ON_OFLOADS | NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED) + #define IPVLAN_FEATURES \ (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ NETIF_F_GSO | NETIF_F_TSO | NETIF_F_GSO_ROBUST | \ NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER) + /* NETIF_F_GSO_ENCAP_ALL NETIF_F_GSO_SOFTWARE Newly added */ + #define IPVLAN_STATE_MASK \ ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) @@ -196,7 +252,9 @@ static int ipvlan_init(struct net_device *dev) dev->state = (dev->state & ~IPVLAN_STATE_MASK) | (phy_dev->state & IPVLAN_STATE_MASK); dev->features = phy_dev->features & IPVLAN_FEATURES; - dev->features |= NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED; + dev->features |= IPVLAN_ALWAYS_ON; + dev->vlan_features = phy_dev->vlan_features & IPVLAN_FEATURES; + dev->vlan_features |= IPVLAN_ALWAYS_ON_OFLOADS; dev->gso_max_size = phy_dev->gso_max_size; dev->gso_max_segs = phy_dev->gso_max_segs; dev->hard_header_len = phy_dev->hard_header_len; @@ -219,6 +277,32 @@ static int ipvlan_init(struct net_device *dev) return 0; } +static void ipvlan_local_free_handler(struct timer_list *t) +{ + struct ipvl_dev *ipvlan = from_timer(ipvlan, t, local_free_timer); + + skb_queue_purge(&ipvlan->local_xmit_queue); + ipvlan->local_packets_cached = 0; +} + +static inline void ipvlan_local_init(struct net_device *dev) +{ + struct ipvl_dev *ipvlan = netdev_priv(dev); + + ipvlan->local_packets_cached = 0; + skb_queue_head_init(&ipvlan->local_xmit_queue); + timer_setup(&ipvlan->local_free_timer, + ipvlan_local_free_handler, 0); +} + +static inline void ipvlan_local_uninit(struct net_device *dev) +{ + struct ipvl_dev *ipvlan = netdev_priv(dev); + + del_timer(&ipvlan->local_free_timer); + skb_queue_purge(&ipvlan->local_xmit_queue); +} + static void ipvlan_uninit(struct net_device *dev) { struct ipvl_dev *ipvlan = netdev_priv(dev); @@ -236,7 +320,6 @@ static void ipvlan_uninit(struct net_device *dev) static int ipvlan_open(struct net_device *dev) { struct ipvl_dev *ipvlan = netdev_priv(dev); - struct net_device *phy_dev = ipvlan->phy_dev; struct ipvl_addr *addr; if (ipvlan->port->mode == IPVLAN_MODE_L3 || @@ -245,12 +328,13 @@ static int ipvlan_open(struct net_device *dev) else dev->flags &= ~IFF_NOARP; + ipvlan_local_init(dev); rcu_read_lock(); list_for_each_entry_rcu(addr, &ipvlan->addrs, anode) ipvlan_ht_addr_add(ipvlan, addr); rcu_read_unlock(); - return dev_uc_add(phy_dev, phy_dev->dev_addr); + return 0; } static int ipvlan_stop(struct net_device *dev) @@ -262,8 +346,7 @@ static int ipvlan_stop(struct net_device *dev) dev_uc_unsync(phy_dev, dev); dev_mc_unsync(phy_dev, dev); - dev_uc_del(phy_dev, phy_dev->dev_addr); - + ipvlan_local_uninit(dev); rcu_read_lock(); list_for_each_entry_rcu(addr, &ipvlan->addrs, anode) ipvlan_ht_addr_del(addr); @@ -300,7 +383,14 @@ static netdev_features_t ipvlan_fix_features(struct net_device *dev, { struct ipvl_dev *ipvlan = netdev_priv(dev); - return features & (ipvlan->sfeatures | ~IPVLAN_FEATURES); + features |= NETIF_F_ALL_FOR_ALL; + features &= (ipvlan->sfeatures | ~IPVLAN_FEATURES); + features = netdev_increment_features(ipvlan->phy_dev->features, + features, features); + features |= IPVLAN_ALWAYS_ON; + features &= (IPVLAN_FEATURES | IPVLAN_ALWAYS_ON); + + return features; } static void ipvlan_change_rx_flags(struct net_device *dev, int change) @@ -494,6 +584,8 @@ static int ipvlan_nl_changelink(struct net_device *dev, if (!data) return 0; + if (!ns_capable(dev_net(ipvlan->phy_dev)->user_ns, CAP_NET_ADMIN)) + return -EPERM; if (data[IFLA_IPVLAN_MODE]) { u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]); @@ -583,7 +675,7 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev, struct ipvl_port *port; struct net_device *phy_dev; int err; - u16 mode = IPVLAN_MODE_L3; + u16 mode = ipvlan_default_mode; if (!tb[IFLA_LINK]) return -EINVAL; @@ -596,6 +688,8 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev, struct ipvl_dev *tmp = netdev_priv(phy_dev); phy_dev = tmp->phy_dev; + if (!ns_capable(dev_net(phy_dev)->user_ns, CAP_NET_ADMIN)) + return -EPERM; } else if (!netif_is_ipvlan_port(phy_dev)) { /* Exit early if the underlying link is invalid or busy */ if (phy_dev->type != ARPHRD_ETHER || @@ -781,12 +875,14 @@ static int ipvlan_device_event(struct notifier_block *unused, write_pnet(&port->pnet, newnet); - old_vnet = net_generic(oldnet, ipvlan_netid); - if (!old_vnet->ipvl_nf_hook_refcnt) - break; + if (port->mode == IPVLAN_MODE_L3S) { + old_vnet = net_generic(oldnet, ipvlan_netid); + if (!old_vnet->ipvl_nf_hook_refcnt) + break; - ipvlan_register_nf_hook(newnet); - ipvlan_unregister_nf_hook(oldnet); + ipvlan_register_nf_hook(newnet); + ipvlan_unregister_nf_hook(oldnet); + } break; } case NETDEV_UNREGISTER: @@ -801,10 +897,9 @@ static int ipvlan_device_event(struct notifier_block *unused, case NETDEV_FEAT_CHANGE: list_for_each_entry(ipvlan, &port->ipvlans, pnode) { - ipvlan->dev->features = dev->features & IPVLAN_FEATURES; ipvlan->dev->gso_max_size = dev->gso_max_size; ipvlan->dev->gso_max_segs = dev->gso_max_segs; - netdev_features_change(ipvlan->dev); + netdev_update_features(ipvlan->dev); } break; @@ -1070,6 +1165,10 @@ static int __init ipvlan_init_module(void) { int err; + if (ipvlan_default_mode >= IPVLAN_MODE_MAX || + ipvlan_default_mode < IPVLAN_MODE_L2) + return -EINVAL; + ipvlan_init_secret(); register_netdevice_notifier(&ipvlan_notifier_block); #if IS_ENABLED(CONFIG_IPV6) @@ -1090,6 +1189,9 @@ static int __init ipvlan_init_module(void) goto error; } + err = ipvlan_sysctl_init(); + if (err < 0) + pr_err("ipvlan proc init failed, continue\n"); return 0; error: unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block); @@ -1117,6 +1219,7 @@ static void __exit ipvlan_cleanup_module(void) unregister_inet6addr_validator_notifier( &ipvlan_addr6_vtor_notifier_block); #endif + ipvlan_sysctl_exit(); } module_init(ipvlan_init_module); diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c index 0bcc07f346c3ecb26e5d8354adb1fcd547733e84..2e517e30c5ac1cfae9872043e250ee4d9bf834e4 100644 --- a/drivers/net/ipvlan/ipvtap.c +++ b/drivers/net/ipvlan/ipvtap.c @@ -193,7 +193,7 @@ static struct notifier_block ipvtap_notifier_block __read_mostly = { .notifier_call = ipvtap_device_event, }; -static int ipvtap_init(void) +static int __init ipvtap_init(void) { int err; @@ -227,7 +227,7 @@ static int ipvtap_init(void) } module_init(ipvtap_init); -static void ipvtap_exit(void) +static void __exit ipvtap_exit(void) { rtnl_link_unregister(&ipvtap_link_ops); unregister_netdevice_notifier(&ipvtap_notifier_block); diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 30612497643c08caa8a3bf352b13f784f729f725..d192936b76cff50522c8564eb84a84204094a964 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -75,6 +75,10 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb, int len; skb_tx_timestamp(skb); + + /* do not fool net_timestamp_check() with various clock bases */ + skb->tstamp = 0; + skb_orphan(skb); /* Before queueing this packet to netif_rx(), diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 7de88b33d5b96d7f18a5f7c242a54c935b587086..10a8ef2d025a16ad855f0978d96536c8ee44b7ff 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -869,6 +869,7 @@ static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev) static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len) { + skb->ip_summed = CHECKSUM_NONE; memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN); skb_pull(skb, hdr_len); pskb_trim_unique(skb, skb->len - icv_len); @@ -1103,10 +1104,9 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) } skb = skb_unshare(skb, GFP_ATOMIC); - if (!skb) { - *pskb = NULL; + *pskb = skb; + if (!skb) return RX_HANDLER_CONSUMED; - } pulled_sci = pskb_may_pull(skb, macsec_extra_len(true)); if (!pulled_sci) { @@ -1238,6 +1238,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) macsec_rxsa_put(rx_sa); macsec_rxsc_put(rx_sc); + skb_orphan(skb); ret = gro_cells_receive(&macsec->gro_cells, skb); if (ret == NET_RX_SUCCESS) count_rx(dev, skb->len); @@ -2812,9 +2813,6 @@ static int macsec_dev_open(struct net_device *dev) struct net_device *real_dev = macsec->real_dev; int err; - if (!(real_dev->flags & IFF_UP)) - return -ENETDOWN; - err = dev_uc_add(real_dev, dev->dev_addr); if (err < 0) return err; @@ -3007,12 +3005,10 @@ static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = { static void macsec_free_netdev(struct net_device *dev) { struct macsec_dev *macsec = macsec_priv(dev); - struct net_device *real_dev = macsec->real_dev; free_percpu(macsec->stats); free_percpu(macsec->secy.tx_sc.stats); - dev_put(real_dev); } static void macsec_setup(struct net_device *dev) @@ -3267,8 +3263,6 @@ static int macsec_newlink(struct net *net, struct net_device *dev, if (err < 0) return err; - dev_hold(real_dev); - macsec->nest_level = dev_get_nest_level(real_dev) + 1; netdev_lockdep_set_classes(dev); lockdep_set_class_and_subclass(&dev->addr_list_lock, @@ -3308,6 +3302,9 @@ static int macsec_newlink(struct net *net, struct net_device *dev, if (err < 0) goto del_dev; + netif_stacked_transfer_operstate(real_dev, dev); + linkwatch_fire_event(dev); + macsec_generation++; return 0; @@ -3492,6 +3489,20 @@ static int macsec_notify(struct notifier_block *this, unsigned long event, return NOTIFY_DONE; switch (event) { + case NETDEV_DOWN: + case NETDEV_UP: + case NETDEV_CHANGE: { + struct macsec_dev *m, *n; + struct macsec_rxh_data *rxd; + + rxd = macsec_data_rtnl(real_dev); + list_for_each_entry_safe(m, n, &rxd->secys, secys) { + struct net_device *dev = m->secy.netdev; + + netif_stacked_transfer_operstate(real_dev, dev); + } + break; + } case NETDEV_UNREGISTER: { struct macsec_dev *m, *n; struct macsec_rxh_data *rxd; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index cfda146f3b3bbb799532a48e2705c2fd4b5f2661..8c60e0fc52a6b57252205ec26971fb164a879f0e 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -338,6 +338,8 @@ static void macvlan_process_broadcast(struct work_struct *w) if (src) dev_put(src->dev); kfree_skb(skb); + + cond_resched(); } } @@ -363,10 +365,11 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port, } spin_unlock(&port->bc_queue.lock); + schedule_work(&port->bc_work); + if (err) goto free_nskb; - schedule_work(&port->bc_work); return; free_nskb: @@ -448,6 +451,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) int ret; rx_handler_result_t handle_res; + /* Packets from dev_loopback_xmit() do not have L2 header, bail out */ + if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) + return RX_HANDLER_PASS; + port = macvlan_port_get_rcu(skb->dev); if (is_multicast_ether_addr(eth->h_dest)) { unsigned int hash; @@ -516,10 +523,11 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) const struct macvlan_dev *dest; if (vlan->mode == MACVLAN_MODE_BRIDGE) { - const struct ethhdr *eth = (void *)skb->data; + const struct ethhdr *eth = skb_eth_hdr(skb); /* send to other bridge ports directly */ if (is_multicast_ether_addr(eth->h_dest)) { + skb_reset_mac_header(skb); macvlan_broadcast(skb, port, dev, MACVLAN_MODE_BRIDGE); goto xmit_world; } @@ -541,12 +549,11 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, struct sk_buff *skb) { #ifdef CONFIG_NET_POLL_CONTROLLER - if (vlan->netpoll) - netpoll_send_skb(vlan->netpoll, skb); + return netpoll_send_skb(vlan->netpoll, skb); #else BUG(); -#endif return NETDEV_TX_OK; +#endif } static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb, @@ -608,7 +615,7 @@ static int macvlan_open(struct net_device *dev) goto hash_add; } - err = -EBUSY; + err = -EADDRINUSE; if (macvlan_addr_busy(vlan->port, dev->dev_addr)) goto out; @@ -706,7 +713,7 @@ static int macvlan_sync_address(struct net_device *dev, unsigned char *addr) } else { /* Rehash and update the device filters */ if (macvlan_addr_busy(vlan->port, addr)) - return -EBUSY; + return -EADDRINUSE; if (!macvlan_passthru(port)) { err = dev_uc_add(lowerdev, addr); @@ -747,6 +754,9 @@ static int macvlan_set_mac_address(struct net_device *dev, void *p) return dev_set_mac_address(vlan->lowerdev, addr); } + if (macvlan_addr_busy(vlan->port, addr->sa_data)) + return -EADDRINUSE; + return macvlan_sync_address(dev, addr->sa_data); } @@ -758,7 +768,7 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change) if (dev->flags & IFF_UP) { if (change & IFF_ALLMULTI) dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); - if (change & IFF_PROMISC) + if (!macvlan_passthru(vlan->port) && change & IFF_PROMISC) dev_set_promiscuity(lowerdev, dev->flags & IFF_PROMISC ? 1 : -1); @@ -1126,7 +1136,7 @@ void macvlan_common_setup(struct net_device *dev) { ether_setup(dev); - dev->min_mtu = 0; + /* ether_setup() has set dev->min_mtu to ETH_MIN_MTU. */ dev->max_mtu = ETH_MAX_MTU; dev->priv_flags &= ~IFF_TX_SKB_SHARING; netif_keep_dst(dev); @@ -1219,6 +1229,9 @@ static void macvlan_port_destroy(struct net_device *dev) static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { + struct nlattr *nla, *head; + int rem, len; + if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) return -EINVAL; @@ -1266,6 +1279,20 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[], return -EADDRNOTAVAIL; } + if (data[IFLA_MACVLAN_MACADDR_DATA]) { + head = nla_data(data[IFLA_MACVLAN_MACADDR_DATA]); + len = nla_len(data[IFLA_MACVLAN_MACADDR_DATA]); + + nla_for_each_attr(nla, head, len, rem) { + if (nla_type(nla) != IFLA_MACVLAN_MACADDR || + nla_len(nla) != ETH_ALEN) + return -EINVAL; + + if (!is_valid_ether_addr(nla_data(nla))) + return -EADDRNOTAVAIL; + } + } + if (data[IFLA_MACVLAN_MACADDR_COUNT]) return -EINVAL; @@ -1322,10 +1349,6 @@ static int macvlan_changelink_sources(struct macvlan_dev *vlan, u32 mode, len = nla_len(data[IFLA_MACVLAN_MACADDR_DATA]); nla_for_each_attr(nla, head, len, rem) { - if (nla_type(nla) != IFLA_MACVLAN_MACADDR || - nla_len(nla) != ETH_ALEN) - continue; - addr = nla_data(nla); ret = macvlan_hash_add_source(vlan, addr); if (ret) @@ -1447,8 +1470,10 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, /* the macvlan port may be freed by macvlan_uninit when fail to register. * so we destroy the macvlan port only when it's valid. */ - if (create && macvlan_port_get_rtnl(lowerdev)) + if (create && macvlan_port_get_rtnl(lowerdev)) { + macvlan_flush_sources(port, vlan); macvlan_port_destroy(port->dev); + } return err; } EXPORT_SYMBOL_GPL(macvlan_common_newlink); @@ -1669,7 +1694,7 @@ static int macvlan_device_event(struct notifier_block *unused, struct macvlan_dev, list); - if (macvlan_sync_address(vlan->dev, dev->dev_addr)) + if (vlan && macvlan_sync_address(vlan->dev, dev->dev_addr)) return NOTIFY_BAD; break; diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 9a10029caf83fb4d2befdb9dc10c3566c9ca2cbd..085f1648a8a672fa6d5f813fb738f6499ac50147 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -132,11 +132,17 @@ static void macvtap_setup(struct net_device *dev) dev->tx_queue_len = TUN_READQ_SIZE; } +static struct net *macvtap_link_net(const struct net_device *dev) +{ + return dev_net(macvlan_dev_real_dev(dev)); +} + static struct rtnl_link_ops macvtap_link_ops __read_mostly = { .kind = "macvtap", .setup = macvtap_setup, .newlink = macvtap_newlink, .dellink = macvtap_dellink, + .get_link_net = macvtap_link_net, .priv_size = sizeof(struct macvtap_dev), }; diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c index 7ae1856d1f185207222fdc22907a8056b68ca057..beeb7eb76ca32d0469a8edc16639d6ff02442f1b 100644 --- a/drivers/net/net_failover.c +++ b/drivers/net/net_failover.c @@ -603,6 +603,9 @@ static int net_failover_slave_unregister(struct net_device *slave_dev, primary_dev = rtnl_dereference(nfo_info->primary_dev); standby_dev = rtnl_dereference(nfo_info->standby_dev); + if (WARN_ON_ONCE(slave_dev != primary_dev && slave_dev != standby_dev)) + return -ENODEV; + vlan_vids_del_by_dev(slave_dev, failover_dev); dev_uc_unsync(slave_dev, failover_dev); dev_mc_unsync(slave_dev, failover_dev); @@ -762,8 +765,10 @@ struct failover *net_failover_create(struct net_device *standby_dev) netif_carrier_off(failover_dev); failover = failover_register(failover_dev, &net_failover_ops); - if (IS_ERR(failover)) + if (IS_ERR(failover)) { + err = PTR_ERR(failover); goto err_failover_register; + } return failover; diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c index 81444208b2162126d515dc0948db09e936079a3f..12f100392ed1159d53fd7ed308465e9f33191708 100644 --- a/drivers/net/netdevsim/bpf.c +++ b/drivers/net/netdevsim/bpf.c @@ -493,6 +493,7 @@ nsim_bpf_map_alloc(struct netdevsim *ns, struct bpf_offloaded_map *offmap) goto err_free; key = nmap->entry[i].key; *key = i; + memset(nmap->entry[i].value, 0, offmap->map.value_size); } } diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c index b12023bc2cab5feb15ceedbe2fc357dfcf37627e..df8d49ad48c38ad9aaf5767878d96342f96d9b04 100644 --- a/drivers/net/ntb_netdev.c +++ b/drivers/net/ntb_netdev.c @@ -236,7 +236,7 @@ static void ntb_netdev_tx_timer(struct timer_list *t) struct net_device *ndev = dev->ndev; if (ntb_transport_tx_free_entry(dev->qp) < tx_stop) { - mod_timer(&dev->tx_timer, jiffies + msecs_to_jiffies(tx_time)); + mod_timer(&dev->tx_timer, jiffies + usecs_to_jiffies(tx_time)); } else { /* Make sure anybody stopping the queue after this sees the new * value of ntb_transport_tx_free_entry() diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 82070792edbb08c6af08fd79a256c53e59d01da4..1f5fd24cd749e34606537600736a89090e1c7468 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -227,7 +227,7 @@ config AQUANTIA_PHY ---help--- Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405 -config ASIX_PHY +config AX88796B_PHY tristate "Asix PHYs" help Currently supports the Asix Electronics PHY found in the X-Surf 100 diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 5805c0b7d60e31eb5ab3f973c580df15632c4d70..f21cda9d865edee42ed74f1e1afb05e423b224e5 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -46,7 +46,7 @@ obj-y += $(sfp-obj-y) $(sfp-obj-m) obj-$(CONFIG_AMD_PHY) += amd.o obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o -obj-$(CONFIG_ASIX_PHY) += asix.o +obj-$(CONFIG_AX88796B_PHY) += ax88796b.o obj-$(CONFIG_AT803X_PHY) += at803x.o obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o obj-$(CONFIG_BCM7XXX_PHY) += bcm7xxx.o diff --git a/drivers/net/phy/asix.c b/drivers/net/phy/ax88796b.c similarity index 100% rename from drivers/net/phy/asix.c rename to drivers/net/phy/ax88796b.c diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index b2b6307d64a4de95d5ef29ba4d79dcbc12b5e1f1..acaf072bb4b0f3d2cdf807ecef963bcadbbd0d02 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c @@ -643,6 +643,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev) .name = _name, \ .features = PHY_BASIC_FEATURES, \ .flags = PHY_IS_INTERNAL, \ + .soft_reset = genphy_soft_reset, \ .config_init = bcm7xxx_config_init, \ .suspend = bcm7xxx_suspend, \ .resume = bcm7xxx_config_init, \ diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 29aa8d772b0c1050b145a8d5fddcfa132bda9f97..59b3f1fbabd4f1d95e84e84bf208e7516bc98498 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -896,14 +896,14 @@ static void decode_txts(struct dp83640_private *dp83640, struct phy_txts *phy_txts) { struct skb_shared_hwtstamps shhwtstamps; + struct dp83640_skb_info *skb_info; struct sk_buff *skb; - u64 ns; u8 overflow; + u64 ns; /* We must already have the skb that triggered this. */ - +again: skb = skb_dequeue(&dp83640->tx_queue); - if (!skb) { pr_debug("have timestamp but tx_queue empty\n"); return; @@ -918,6 +918,11 @@ static void decode_txts(struct dp83640_private *dp83640, } return; } + skb_info = (struct dp83640_skb_info *)skb->cb; + if (time_after(jiffies, skb_info->tmo)) { + kfree_skb(skb); + goto again; + } ns = phy2txts(phy_txts); memset(&shhwtstamps, 0, sizeof(shhwtstamps)); @@ -1470,6 +1475,7 @@ static bool dp83640_rxtstamp(struct phy_device *phydev, static void dp83640_txtstamp(struct phy_device *phydev, struct sk_buff *skb, int type) { + struct dp83640_skb_info *skb_info = (struct dp83640_skb_info *)skb->cb; struct dp83640_private *dp83640 = phydev->priv; switch (dp83640->hwts_tx_en) { @@ -1482,6 +1488,7 @@ static void dp83640_txtstamp(struct phy_device *phydev, /* fall through */ case HWTSTAMP_TX_ON: skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT; skb_queue_tail(&dp83640->tx_queue, skb); break; diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index b3935778b19fee0f2ff5134c72c051d6ee2aa483..879096d3ff412a55f65fedae82a5e0b75d2a13bf 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -33,10 +33,18 @@ /* Extended Registers */ #define DP83867_CFG4 0x0031 +#define DP83867_CFG4_SGMII_ANEG_MASK (BIT(5) | BIT(6)) +#define DP83867_CFG4_SGMII_ANEG_TIMER_11MS (3 << 5) +#define DP83867_CFG4_SGMII_ANEG_TIMER_800US (2 << 5) +#define DP83867_CFG4_SGMII_ANEG_TIMER_2US (1 << 5) +#define DP83867_CFG4_SGMII_ANEG_TIMER_16MS (0 << 5) + #define DP83867_RGMIICTL 0x0032 #define DP83867_STRAP_STS1 0x006E #define DP83867_RGMIIDCTL 0x0086 #define DP83867_IO_MUX_CFG 0x0170 +#define DP83867_10M_SGMII_CFG 0x016F +#define DP83867_10M_SGMII_RATE_ADAPT_MASK BIT(7) #define DP83867_SW_RESET BIT(15) #define DP83867_SW_RESTART BIT(14) @@ -78,6 +86,10 @@ #define DP83867_IO_MUX_CFG_CLK_O_SEL_MASK (0x1f << 8) #define DP83867_IO_MUX_CFG_CLK_O_SEL_SHIFT 8 +/* CFG3 bits */ +#define DP83867_CFG3_INT_OE BIT(7) +#define DP83867_CFG3_ROBUST_AUTO_MDIX BIT(9) + /* CFG4 bits */ #define DP83867_CFG4_PORT_MIRROR_EN BIT(0) @@ -260,10 +272,8 @@ static int dp83867_config_init(struct phy_device *phydev) ret = phy_write(phydev, MII_DP83867_PHYCTRL, val); if (ret) return ret; - } - if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) && - (phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) { + /* Set up RGMII delays */ val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL); if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) @@ -296,13 +306,43 @@ static int dp83867_config_init(struct phy_device *phydev) } } - /* Enable Interrupt output INT_OE in CFG3 register */ - if (phy_interrupt_is_valid(phydev)) { - val = phy_read(phydev, DP83867_CFG3); - val |= BIT(7); - phy_write(phydev, DP83867_CFG3, val); + if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { + /* For support SPEED_10 in SGMII mode + * DP83867_10M_SGMII_RATE_ADAPT bit + * has to be cleared by software. That + * does not affect SPEED_100 and + * SPEED_1000. + */ + val = phy_read_mmd(phydev, DP83867_DEVADDR, + DP83867_10M_SGMII_CFG); + val &= ~DP83867_10M_SGMII_RATE_ADAPT_MASK; + ret = phy_write_mmd(phydev, DP83867_DEVADDR, + DP83867_10M_SGMII_CFG, val); + + if (ret) + return ret; + + /* After reset SGMII Autoneg timer is set to 2us (bits 6 and 5 + * are 01). That is not enough to finalize autoneg on some + * devices. Increase this timer duration to maximum 16ms. + */ + val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4); + val &= ~DP83867_CFG4_SGMII_ANEG_MASK; + val |= DP83867_CFG4_SGMII_ANEG_TIMER_16MS; + ret = phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4, val); + + if (ret) + return ret; } + val = phy_read(phydev, DP83867_CFG3); + /* Enable Interrupt output INT_OE in CFG3 register */ + if (phy_interrupt_is_valid(phydev)) + val |= DP83867_CFG3_INT_OE; + + val |= DP83867_CFG3_ROBUST_AUTO_MDIX; + phy_write(phydev, DP83867_CFG3, val); + if (dp83867->port_mirroring != DP83867_PORT_MIRROING_KEEP) dp83867_config_port_mirroring(phydev); diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c index 67b260877f305a33c8a8b4aa37d5862478fe7835..59820164502eba50479301ae2813505956a28c76 100644 --- a/drivers/net/phy/fixed_phy.c +++ b/drivers/net/phy/fixed_phy.c @@ -67,11 +67,11 @@ static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num) do { s = read_seqcount_begin(&fp->seqcount); /* Issue callback if user registered it. */ - if (fp->link_update) { + if (fp->link_update) fp->link_update(fp->phydev->attached_dev, &fp->status); - fixed_phy_update(fp); - } + /* Check the GPIO for change in status */ + fixed_phy_update(fp); state = fp->status; } while (read_seqcount_retry(&fp->seqcount, s)); diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index f7c69ca34056e0e9846301b2637e6547688a91de..fd6c66fd32fb9306ca5e5e21af0d0e566d316a17 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -133,6 +133,7 @@ #define MII_PHY_LED_CTRL 16 #define MII_88E1121_PHY_LED_DEF 0x0030 #define MII_88E1510_PHY_LED_DEF 0x1177 +#define MII_88E1510_PHY_LED0_LINK_LED1_ACTIVE 0x1040 #define MII_M1011_PHY_STATUS 0x11 #define MII_M1011_PHY_STATUS_1000 0x8000 @@ -650,7 +651,10 @@ static void marvell_config_led(struct phy_device *phydev) * LED[2] .. Blink, Activity */ case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1510): - def_config = MII_88E1510_PHY_LED_DEF; + if (phydev->dev_flags & MARVELL_PHY_LED0_LINK_LED1_ACTIVE) + def_config = MII_88E1510_PHY_LED0_LINK_LED1_ACTIVE; + else + def_config = MII_88E1510_PHY_LED_DEF; break; default: return; @@ -868,8 +872,6 @@ static int m88e1510_config_init(struct phy_device *phydev) /* SGMII-to-Copper mode initialization */ if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { - u32 pause; - /* Select page 18 */ err = marvell_set_page(phydev, 18); if (err < 0) @@ -892,16 +894,6 @@ static int m88e1510_config_init(struct phy_device *phydev) err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE); if (err < 0) return err; - - /* There appears to be a bug in the 88e1512 when used in - * SGMII to copper mode, where the AN advertisement register - * clears the pause bits each time a negotiation occurs. - * This means we can never be truely sure what was advertised, - * so disable Pause support. - */ - pause = SUPPORTED_Pause | SUPPORTED_Asym_Pause; - phydev->supported &= ~pause; - phydev->advertising &= ~pause; } return m88e1318_config_init(phydev); @@ -1063,6 +1055,39 @@ static int m88e1145_config_init(struct phy_device *phydev) return 0; } +/* The VOD can be out of specification on link up. Poke an + * undocumented register, in an undocumented page, with a magic value + * to fix this. + */ +static int m88e6390_errata(struct phy_device *phydev) +{ + int err; + + err = phy_write(phydev, MII_BMCR, + BMCR_ANENABLE | BMCR_SPEED1000 | BMCR_FULLDPLX); + if (err) + return err; + + usleep_range(300, 400); + + err = phy_write_paged(phydev, 0xf8, 0x08, 0x36); + if (err) + return err; + + return genphy_soft_reset(phydev); +} + +static int m88e6390_config_aneg(struct phy_device *phydev) +{ + int err; + + err = m88e6390_errata(phydev); + if (err) + return err; + + return m88e1510_config_aneg(phydev); +} + /** * fiber_lpa_to_ethtool_lpa_t * @lpa: value of the MII_LPA register for fiber link @@ -1418,7 +1443,7 @@ static int m88e1318_set_wol(struct phy_device *phydev, * before enabling it if !phy_interrupt_is_valid() */ if (!phy_interrupt_is_valid(phydev)) - phy_read(phydev, MII_M1011_IEVENT); + __phy_read(phydev, MII_M1011_IEVENT); /* Enable the WOL interrupt */ err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0, @@ -1492,9 +1517,10 @@ static int marvell_get_sset_count(struct phy_device *phydev) static void marvell_get_strings(struct phy_device *phydev, u8 *data) { + int count = marvell_get_sset_count(phydev); int i; - for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++) { + for (i = 0; i < count; i++) { strlcpy(data + i * ETH_GSTRING_LEN, marvell_hw_stats[i].string, ETH_GSTRING_LEN); } @@ -1522,9 +1548,10 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i) static void marvell_get_stats(struct phy_device *phydev, struct ethtool_stats *stats, u64 *data) { + int count = marvell_get_sset_count(phydev); int i; - for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++) + for (i = 0; i < count; i++) data[i] = marvell_get_stat(phydev, i); } @@ -2313,7 +2340,7 @@ static struct phy_driver marvell_drivers[] = { .flags = PHY_HAS_INTERRUPT, .probe = m88e6390_probe, .config_init = &marvell_config_init, - .config_aneg = &m88e1510_config_aneg, + .config_aneg = &m88e6390_config_aneg, .read_status = &marvell_read_status, .ack_interrupt = &marvell_ack_interrupt, .config_intr = &marvell_config_intr, diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index f77a2d9e7f9d85b9be5c78c961632119c018c26c..456a1f882b097888bfbaa814cc61f69fa0d86971 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -27,6 +27,9 @@ #include enum { + MV_PMA_BOOT = 0xc050, + MV_PMA_BOOT_FATAL = BIT(0), + MV_PCS_BASE_T = 0x0000, MV_PCS_BASE_R = 0x1000, MV_PCS_1000BASEX = 0x2000, @@ -226,6 +229,16 @@ static int mv3310_probe(struct phy_device *phydev) (phydev->c45_ids.devices_in_package & mmd_mask) != mmd_mask) return -ENODEV; + ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MV_PMA_BOOT); + if (ret < 0) + return ret; + + if (ret & MV_PMA_BOOT_FATAL) { + dev_warn(&phydev->mdio.dev, + "PHY failed to boot firmware, status=%04x\n", ret); + return -ENODEV; + } + priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c index 8d370667fa1b3e5ada10b299ee35e35294bca798..df75efa96a7d95cecd6e4678f5dd5dfbaa9f4095 100644 --- a/drivers/net/phy/mdio-bcm-unimac.c +++ b/drivers/net/phy/mdio-bcm-unimac.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -45,6 +46,8 @@ struct unimac_mdio_priv { void __iomem *base; int (*wait_func) (void *wait_func_data); void *wait_func_data; + struct clk *clk; + u32 clk_freq; }; static inline u32 unimac_mdio_readl(struct unimac_mdio_priv *priv, u32 offset) @@ -189,6 +192,35 @@ static int unimac_mdio_reset(struct mii_bus *bus) return 0; } +static void unimac_mdio_clk_set(struct unimac_mdio_priv *priv) +{ + unsigned long rate; + u32 reg, div; + + /* Keep the hardware default values */ + if (!priv->clk_freq) + return; + + if (!priv->clk) + rate = 250000000; + else + rate = clk_get_rate(priv->clk); + + div = (rate / (2 * priv->clk_freq)) - 1; + if (div & ~MDIO_CLK_DIV_MASK) { + pr_warn("Incorrect MDIO clock frequency, ignoring\n"); + return; + } + + /* The MDIO clock is the reference clock (typicaly 250Mhz) divided by + * 2 x (MDIO_CLK_DIV + 1) + */ + reg = unimac_mdio_readl(priv, MDIO_CFG); + reg &= ~(MDIO_CLK_DIV_MASK << MDIO_CLK_DIV_SHIFT); + reg |= div << MDIO_CLK_DIV_SHIFT; + unimac_mdio_writel(priv, reg, MDIO_CFG); +} + static int unimac_mdio_probe(struct platform_device *pdev) { struct unimac_mdio_pdata *pdata = pdev->dev.platform_data; @@ -217,9 +249,26 @@ static int unimac_mdio_probe(struct platform_device *pdev) return -ENOMEM; } + priv->clk = devm_clk_get(&pdev->dev, NULL); + if (PTR_ERR(priv->clk) == -EPROBE_DEFER) + return PTR_ERR(priv->clk); + else + priv->clk = NULL; + + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + if (of_property_read_u32(np, "clock-frequency", &priv->clk_freq)) + priv->clk_freq = 0; + + unimac_mdio_clk_set(priv); + priv->mii_bus = mdiobus_alloc(); - if (!priv->mii_bus) - return -ENOMEM; + if (!priv->mii_bus) { + ret = -ENOMEM; + goto out_clk_disable; + } bus = priv->mii_bus; bus->priv = priv; @@ -253,6 +302,8 @@ static int unimac_mdio_probe(struct platform_device *pdev) out_mdio_free: mdiobus_free(bus); +out_clk_disable: + clk_disable_unprepare(priv->clk); return ret; } @@ -262,10 +313,37 @@ static int unimac_mdio_remove(struct platform_device *pdev) mdiobus_unregister(priv->mii_bus); mdiobus_free(priv->mii_bus); + clk_disable_unprepare(priv->clk); + + return 0; +} + +static int __maybe_unused unimac_mdio_suspend(struct device *d) +{ + struct unimac_mdio_priv *priv = dev_get_drvdata(d); + + clk_disable_unprepare(priv->clk); + + return 0; +} + +static int __maybe_unused unimac_mdio_resume(struct device *d) +{ + struct unimac_mdio_priv *priv = dev_get_drvdata(d); + int ret; + + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + unimac_mdio_clk_set(priv); return 0; } +static SIMPLE_DEV_PM_OPS(unimac_mdio_pm_ops, + unimac_mdio_suspend, unimac_mdio_resume); + static const struct of_device_id unimac_mdio_ids[] = { { .compatible = "brcm,genet-mdio-v5", }, { .compatible = "brcm,genet-mdio-v4", }, @@ -281,6 +359,7 @@ static struct platform_driver unimac_mdio_driver = { .driver = { .name = UNIMAC_MDIO_DRV_NAME, .of_match_table = unimac_mdio_ids, + .pm = &unimac_mdio_pm_ops, }, .probe = unimac_mdio_probe, .remove = unimac_mdio_remove, diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 33265747bf3994c668cfcb2a8f7f3d9f768d370a..0fbcedcdf6e2ae5b6d9d8ecca66937532db263cb 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -63,7 +63,7 @@ static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir) * assume the pin serves as pull-up. If direction is * output, the default value is high. */ - gpiod_set_value(bitbang->mdo, 1); + gpiod_set_value_cansleep(bitbang->mdo, 1); return; } @@ -78,7 +78,7 @@ static int mdio_get(struct mdiobb_ctrl *ctrl) struct mdio_gpio_info *bitbang = container_of(ctrl, struct mdio_gpio_info, ctrl); - return gpiod_get_value(bitbang->mdio); + return gpiod_get_value_cansleep(bitbang->mdio); } static void mdio_set(struct mdiobb_ctrl *ctrl, int what) @@ -87,9 +87,9 @@ static void mdio_set(struct mdiobb_ctrl *ctrl, int what) container_of(ctrl, struct mdio_gpio_info, ctrl); if (bitbang->mdo) - gpiod_set_value(bitbang->mdo, what); + gpiod_set_value_cansleep(bitbang->mdo, what); else - gpiod_set_value(bitbang->mdio, what); + gpiod_set_value_cansleep(bitbang->mdio, what); } static void mdc_set(struct mdiobb_ctrl *ctrl, int what) @@ -97,7 +97,7 @@ static void mdc_set(struct mdiobb_ctrl *ctrl, int what) struct mdio_gpio_info *bitbang = container_of(ctrl, struct mdio_gpio_info, ctrl); - gpiod_set_value(bitbang->mdc, what); + gpiod_set_value_cansleep(bitbang->mdc, what); } static const struct mdiobb_ops mdio_gpio_ops = { diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 98f4b1f706df427f31be8cd031e12dfa525552e2..82cc1983c09aa002c96b5463f530e1943c299c74 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -377,10 +377,16 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) bus->dev.groups = NULL; dev_set_name(&bus->dev, "%s", bus->id); + /* We need to set state to MDIOBUS_UNREGISTERED to correctly release + * the device in mdiobus_free() + * + * State will be updated later in this function in case of success + */ + bus->state = MDIOBUS_UNREGISTERED; + err = device_register(&bus->dev); if (err) { pr_err("mii_bus %s failed to register\n", bus->id); - put_device(&bus->dev); return -EINVAL; } @@ -391,6 +397,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) if (IS_ERR(gpiod)) { dev_err(&bus->dev, "mii_bus %s couldn't get reset GPIO\n", bus->id); + device_del(&bus->dev); return PTR_ERR(gpiod); } else if (gpiod) { bus->reset_gpiod = gpiod; @@ -445,7 +452,8 @@ void mdiobus_unregister(struct mii_bus *bus) struct mdio_device *mdiodev; int i; - BUG_ON(bus->state != MDIOBUS_REGISTERED); + if (WARN_ON_ONCE(bus->state != MDIOBUS_REGISTERED)) + return; bus->state = MDIOBUS_UNREGISTERED; for (i = 0; i < PHY_MAX_ADDR; i++) { diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c index ddc2c5ea3787348d0672fb49639563be0b01fb78..19322d76ec00eb3a6d88fa90e1d9cae898d3260c 100644 --- a/drivers/net/phy/meson-gxl.c +++ b/drivers/net/phy/meson-gxl.c @@ -211,6 +211,7 @@ static int meson_gxl_ack_interrupt(struct phy_device *phydev) static int meson_gxl_config_intr(struct phy_device *phydev) { u16 val; + int ret; if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { val = INTSRC_ANEG_PR @@ -223,6 +224,11 @@ static int meson_gxl_config_intr(struct phy_device *phydev) val = 0; } + /* Ack any pending IRQ */ + ret = meson_gxl_ack_interrupt(phydev); + if (ret) + return ret; + return phy_write(phydev, INTSRC_MASK, val); } @@ -233,6 +239,7 @@ static struct phy_driver meson_gxl_phy[] = { .name = "Meson GXL Internal PHY", .features = PHY_BASIC_FEATURES, .flags = PHY_IS_INTERNAL | PHY_HAS_INTERRUPT, + .soft_reset = genphy_soft_reset, .config_init = meson_gxl_config_init, .aneg_done = genphy_aneg_done, .read_status = meson_gxl_read_status, diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 3db06b40580d319a34d94ae54baf6ee61bce3ef5..b4c67c3a928b5515f4bb22dda357c42feb4507cd 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -339,6 +339,17 @@ static int ksz8041_config_aneg(struct phy_device *phydev) return genphy_config_aneg(phydev); } +static int ksz8061_config_init(struct phy_device *phydev) +{ + int ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A); + if (ret) + return ret; + + return kszphy_config_init(phydev); +} + static int ksz9021_load_values_from_of(struct phy_device *phydev, const struct device_node *of_node, u16 reg, @@ -934,7 +945,7 @@ static struct phy_driver ksphy_driver[] = { .phy_id_mask = MICREL_PHY_ID_MASK, .features = PHY_BASIC_FEATURES, .flags = PHY_HAS_INTERRUPT, - .config_init = kszphy_config_init, + .config_init = ksz8061_config_init, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, .suspend = genphy_suspend, @@ -966,6 +977,7 @@ static struct phy_driver ksphy_driver[] = { .driver_data = &ksz9021_type, .probe = kszphy_probe, .config_init = ksz9031_config_init, + .soft_reset = genphy_soft_reset, .read_status = ksz9031_read_status, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c index 84ca9ff40ae0b0bedb3758f4618e6652db845eb4..36647b70b9a36a09e8ee52fc5dab4d19cd3680ec 100644 --- a/drivers/net/phy/mscc.c +++ b/drivers/net/phy/mscc.c @@ -111,8 +111,8 @@ struct vsc8531_private { #ifdef CONFIG_OF_MDIO struct vsc8531_edge_rate_table { - u16 vddmac; - u8 slowdown[8]; + u32 vddmac; + u32 slowdown[8]; }; static const struct vsc8531_edge_rate_table edge_table[] = { @@ -375,8 +375,7 @@ static void vsc85xx_wol_get(struct phy_device *phydev, #ifdef CONFIG_OF_MDIO static int vsc85xx_edge_rate_magic_get(struct phy_device *phydev) { - u8 sd; - u16 vdd; + u32 vdd, sd; int rc, i, j; struct device *dev = &phydev->mdio.dev; struct device_node *of_node = dev->of_node; @@ -385,11 +384,11 @@ static int vsc85xx_edge_rate_magic_get(struct phy_device *phydev) if (!of_node) return -ENODEV; - rc = of_property_read_u16(of_node, "vsc8531,vddmac", &vdd); + rc = of_property_read_u32(of_node, "vsc8531,vddmac", &vdd); if (rc != 0) vdd = MSCC_VDDMAC_3300; - rc = of_property_read_u8(of_node, "vsc8531,edge-slowdown", &sd); + rc = of_property_read_u32(of_node, "vsc8531,edge-slowdown", &sd); if (rc != 0) sd = 0; diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c index 2b1e336961f9ce3034a5268c8a20d4535bc7744b..bf4070ef6b84f841823d354e99a91e0eb5cc7fea 100644 --- a/drivers/net/phy/national.c +++ b/drivers/net/phy/national.c @@ -110,14 +110,17 @@ static void ns_giga_speed_fallback(struct phy_device *phydev, int mode) static void ns_10_base_t_hdx_loopack(struct phy_device *phydev, int disable) { + u16 lb_dis = BIT(1); + if (disable) - ns_exp_write(phydev, 0x1c0, ns_exp_read(phydev, 0x1c0) | 1); + ns_exp_write(phydev, 0x1c0, + ns_exp_read(phydev, 0x1c0) | lb_dis); else ns_exp_write(phydev, 0x1c0, - ns_exp_read(phydev, 0x1c0) & 0xfffe); + ns_exp_read(phydev, 0x1c0) & ~lb_dis); pr_debug("10BASE-T HDX loopback %s\n", - (ns_exp_read(phydev, 0x1c0) & 0x0001) ? "off" : "on"); + (ns_exp_read(phydev, 0x1c0) & lb_dis) ? "off" : "on"); } static int ns_config_init(struct phy_device *phydev) diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index e1225545362d513351451f388d3120f783441594..0ba3607585bdc34c7eb7111f054a7f02d0d989dc 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -147,9 +147,15 @@ int genphy_c45_read_link(struct phy_device *phydev, u32 mmd_mask) mmd_mask &= ~BIT(devad); /* The link state is latched low so that momentary link - * drops can be detected. Do not double-read the status - * register if the link is down. + * drops can be detected. Do not double-read the status + * in polling mode to detect such short link drops. */ + if (!phy_polling_mode(phydev)) { + val = phy_read_mmd(phydev, devad, MDIO_STAT1); + if (val < 0) + return val; + } + val = phy_read_mmd(phydev, devad, MDIO_STAT1); if (val < 0) return val; diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 1ee25877c4d163a15fb2d8c5e9f1c7f0d25389d2..51e40a91db520fd98ebb96e5663fc081f91eec48 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -861,6 +861,9 @@ void phy_stop(struct phy_device *phydev) out_unlock: mutex_unlock(&phydev->lock); + phy_state_machine(&phydev->state_queue.work); + phy_stop_machine(phydev); + /* Cannot call flush_scheduled_work() here as desired because * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change() * will not reenable interrupts. @@ -1060,8 +1063,8 @@ void phy_state_machine(struct work_struct *work) if (phydev->link) { phydev->link = 0; phy_link_down(phydev, true); - do_suspend = true; } + do_suspend = true; break; case PHY_RESUMING: if (AUTONEG_ENABLE == phydev->autoneg) { @@ -1121,9 +1124,13 @@ void phy_state_machine(struct work_struct *work) /* Only re-schedule a PHY state machine change if we are polling the * PHY, if PHY_IGNORE_INTERRUPT is set, then we will be moving - * between states from phy_mac_interrupt() + * between states from phy_mac_interrupt(). + * + * In state PHY_HALTED the PHY gets suspended, so rescheduling the + * state machine would be pointless and possibly error prone when + * called from phy_disconnect() synchronously. */ - if (phy_polling_mode(phydev)) + if (phy_polling_mode(phydev) && old_state != PHY_HALTED) queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, PHY_STATE_TIME * HZ); } @@ -1302,9 +1309,11 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data) /* Restart autonegotiation so the new modes get sent to the * link partner. */ - ret = phy_restart_aneg(phydev); - if (ret < 0) - return ret; + if (phydev->autoneg == AUTONEG_ENABLE) { + ret = phy_restart_aneg(phydev); + if (ret < 0) + return ret; + } } return 0; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 19ab8a7d1e4863dc5b0a5208c4b7e674ba2ff6de..4f3624818c303940d160c542ebd1dbcb0f67f694 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -164,11 +164,8 @@ static int mdio_bus_phy_restore(struct device *dev) if (ret < 0) return ret; - /* The PHY needs to renegotiate. */ - phydev->link = 0; - phydev->state = PHY_UP; - - phy_start_machine(phydev); + if (phydev->attached_dev && phydev->adjust_link) + phy_start_machine(phydev); return 0; } @@ -423,8 +420,8 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, mdiodev->device_free = phy_mdio_device_free; mdiodev->device_remove = phy_mdio_device_remove; - dev->speed = 0; - dev->duplex = -1; + dev->speed = SPEED_UNKNOWN; + dev->duplex = DUPLEX_UNKNOWN; dev->pause = 0; dev->asym_pause = 0; dev->link = 0; @@ -760,6 +757,9 @@ int phy_connect_direct(struct net_device *dev, struct phy_device *phydev, { int rc; + if (!dev) + return -EINVAL; + rc = phy_attach_direct(dev, phydev, phydev->dev_flags, interface); if (rc) return rc; @@ -825,8 +825,6 @@ void phy_disconnect(struct phy_device *phydev) if (phydev->irq > 0) phy_stop_interrupts(phydev); - phy_stop_machine(phydev); - phydev->adjust_link = NULL; phy_detach(phydev); @@ -885,8 +883,6 @@ int phy_init_hw(struct phy_device *phydev) if (phydev->drv->soft_reset) ret = phydev->drv->soft_reset(phydev); - else - ret = genphy_soft_reset(phydev); if (ret < 0) return ret; @@ -1076,6 +1072,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, error_module_put: module_put(d->driver->owner); + d->driver = NULL; error_put_device: put_device(d); if (ndev_owner != bus->owner) @@ -1101,6 +1098,9 @@ struct phy_device *phy_attach(struct net_device *dev, const char *bus_id, struct device *d; int rc; + if (!dev) + return ERR_PTR(-EINVAL); + /* Search the list of PHY devices on the mdio bus for the * PHY with the requested name */ @@ -1155,6 +1155,9 @@ void phy_detach(struct phy_device *phydev) phydev->mdio.dev.driver == &genphy_driver.mdiodrv.driver) device_release_driver(&phydev->mdio.dev); + /* Assert the reset signal */ + phy_device_reset(phydev, 1); + /* * The phydev might go away on the put_device() below, so avoid * a use-after-free bug by reading the underlying bus first. @@ -1164,9 +1167,6 @@ void phy_detach(struct phy_device *phydev) put_device(&phydev->mdio.dev); if (ndev_owner != bus->owner) module_put(bus->owner); - - /* Assert the reset signal */ - phy_device_reset(phydev, 1); } EXPORT_SYMBOL(phy_detach); @@ -1506,10 +1506,15 @@ int genphy_update_link(struct phy_device *phydev) { int status; - /* Do a fake read */ - status = phy_read(phydev, MII_BMSR); - if (status < 0) - return status; + /* The link state is latched low so that momentary link + * drops can be detected. Do not double-read the status + * in polling mode to detect such short link drops. + */ + if (!phy_polling_mode(phydev)) { + status = phy_read(phydev, MII_BMSR); + if (status < 0) + return status; + } /* Read link and autonegotiation status */ status = phy_read(phydev, MII_BMSR); @@ -1738,20 +1743,17 @@ EXPORT_SYMBOL(genphy_loopback); static int __set_phy_supported(struct phy_device *phydev, u32 max_speed) { - phydev->supported &= ~(PHY_1000BT_FEATURES | PHY_100BT_FEATURES | - PHY_10BT_FEATURES); - switch (max_speed) { - default: - return -ENOTSUPP; - case SPEED_1000: - phydev->supported |= PHY_1000BT_FEATURES; + case SPEED_10: + phydev->supported &= ~PHY_100BT_FEATURES; /* fall through */ case SPEED_100: - phydev->supported |= PHY_100BT_FEATURES; - /* fall through */ - case SPEED_10: - phydev->supported |= PHY_10BT_FEATURES; + phydev->supported &= ~PHY_1000BT_FEATURES; + break; + case SPEED_1000: + break; + default: + return -ENOTSUPP; } return 0; @@ -1930,6 +1932,14 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner) new_driver->mdiodrv.driver.remove = phy_remove; new_driver->mdiodrv.driver.owner = owner; + /* The following works around an issue where the PHY driver doesn't bind + * to the device, resulting in the genphy driver being used instead of + * the dedicated driver. The root cause of the issue isn't known yet + * and seems to be in the base driver core. Once this is fixed we may + * remove this workaround. + */ + new_driver->mdiodrv.driver.probe_type = PROBE_FORCE_SYNCHRONOUS; + retval = driver_register(&new_driver->mdiodrv.driver); if (retval) { pr_err("%s: Error %d in registering driver\n", diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c index 491efc1bf5c4894a3c3a8b8d67d236f68205efa4..7278eca70f9f36db2c837f9640526df6b6bf53db 100644 --- a/drivers/net/phy/phy_led_triggers.c +++ b/drivers/net/phy/phy_led_triggers.c @@ -58,8 +58,9 @@ void phy_led_trigger_change_speed(struct phy_device *phy) if (!phy->last_triggered) led_trigger_event(&phy->led_link_trigger->trigger, LED_FULL); + else + led_trigger_event(&phy->last_triggered->trigger, LED_OFF); - led_trigger_event(&phy->last_triggered->trigger, LED_OFF); led_trigger_event(&plt->trigger, LED_FULL); phy->last_triggered = plt; } diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 7abca86c3aa9bf367a6e2da8125e203a6f853ef7..723611ac910275069d4a882f6740d39bfe948a71 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -54,6 +54,10 @@ struct phylink { /* The link configuration settings */ struct phylink_link_state link_config; + + /* The current settings */ + phy_interface_t cur_interface; + struct gpio_desc *link_gpio; struct timer_list link_poll; void (*get_fixed_state)(struct net_device *dev, @@ -222,6 +226,8 @@ static int phylink_parse_fixedlink(struct phylink *pl, __ETHTOOL_LINK_MODE_MASK_NBITS, true); linkmode_zero(pl->supported); phylink_set(pl->supported, MII); + phylink_set(pl->supported, Pause); + phylink_set(pl->supported, Asym_Pause); if (s) { __set_bit(s->bit, pl->supported); } else { @@ -348,6 +354,10 @@ static int phylink_get_mac_state(struct phylink *pl, struct phylink_link_state * linkmode_zero(state->lp_advertising); state->interface = pl->link_config.interface; state->an_enabled = pl->link_config.an_enabled; + state->speed = SPEED_UNKNOWN; + state->duplex = DUPLEX_UNKNOWN; + state->pause = MLO_PAUSE_NONE; + state->an_complete = 0; state->link = 1; return pl->ops->mac_link_state(ndev, state); @@ -370,8 +380,8 @@ static void phylink_get_fixed_state(struct phylink *pl, struct phylink_link_stat * Local device Link partner * Pause AsymDir Pause AsymDir Result * 1 X 1 X TX+RX - * 0 1 1 1 RX - * 1 1 0 1 TX + * 0 1 1 1 TX + * 1 1 0 1 RX */ static void phylink_resolve_flow(struct phylink *pl, struct phylink_link_state *state) @@ -392,7 +402,7 @@ static void phylink_resolve_flow(struct phylink *pl, new_pause = MLO_PAUSE_TX | MLO_PAUSE_RX; else if (pause & MLO_PAUSE_ASYM) new_pause = state->pause & MLO_PAUSE_SYM ? - MLO_PAUSE_RX : MLO_PAUSE_TX; + MLO_PAUSE_TX : MLO_PAUSE_RX; } else { new_pause = pl->link_config.pause & MLO_PAUSE_TXRX_MASK; } @@ -473,12 +483,12 @@ static void phylink_resolve(struct work_struct *w) if (!link_state.link) { netif_carrier_off(ndev); pl->ops->mac_link_down(ndev, pl->link_an_mode, - pl->phy_state.interface); + pl->cur_interface); netdev_info(ndev, "Link is Down\n"); } else { + pl->cur_interface = link_state.interface; pl->ops->mac_link_up(ndev, pl->link_an_mode, - pl->phy_state.interface, - pl->phydev); + pl->cur_interface, pl->phydev); netif_carrier_on(ndev); @@ -502,6 +512,17 @@ static void phylink_run_resolve(struct phylink *pl) queue_work(system_power_efficient_wq, &pl->resolve); } +static void phylink_run_resolve_and_disable(struct phylink *pl, int bit) +{ + unsigned long state = pl->phylink_disable_state; + + set_bit(bit, &pl->phylink_disable_state); + if (state == 0) { + queue_work(system_power_efficient_wq, &pl->resolve); + flush_work(&pl->resolve); + } +} + static void phylink_fixed_poll(struct timer_list *t) { struct phylink *pl = container_of(t, struct phylink, link_poll); @@ -907,6 +928,9 @@ void phylink_start(struct phylink *pl) phylink_an_mode_str(pl->link_an_mode), phy_modes(pl->link_config.interface)); + /* Always set the carrier off */ + netif_carrier_off(pl->netdev); + /* Apply the link configuration to the MAC when starting. This allows * a fixed-link to start with the correct parameters, and also * ensures that we set the appropriate advertisement for Serdes links. @@ -952,9 +976,7 @@ void phylink_stop(struct phylink *pl) if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio)) del_timer_sync(&pl->link_poll); - set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); - queue_work(system_power_efficient_wq, &pl->resolve); - flush_work(&pl->resolve); + phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_STOPPED); } EXPORT_SYMBOL_GPL(phylink_stop); @@ -1661,9 +1683,7 @@ static void phylink_sfp_link_down(void *upstream) ASSERT_RTNL(); - set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state); - queue_work(system_power_efficient_wq, &pl->resolve); - flush_work(&pl->resolve); + phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_LINK); } static void phylink_sfp_link_up(void *upstream) diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 7fc8508b5231d94beab4c45bf7666d15d4ef786f..b47c696d9b2cf3caec688925900db788a8f98498 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -220,7 +220,7 @@ static struct phy_driver realtek_drvs[] = { .flags = PHY_HAS_INTERRUPT, }, { .phy_id = 0x001cc816, - .name = "RTL8201F 10/100Mbps Ethernet", + .name = "RTL8201F Fast Ethernet", .phy_id_mask = 0x001fffff, .features = PHY_BASIC_FEATURES, .flags = PHY_HAS_INTERRUPT, @@ -291,6 +291,7 @@ static struct phy_driver realtek_drvs[] = { .resume = genphy_resume, .read_page = rtl821x_read_page, .write_page = rtl821x_write_page, + .set_loopback = genphy_loopback, }, { .phy_id = 0x001cc961, .name = "RTL8366RB Gigabit Ethernet", diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index 83060fb349f4d5d458e762eb540afe0de2b935d6..fef701bfad62e67e3349d8f1cca24b440af1aceb 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -162,7 +162,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, /* 1000Base-PX or 1000Base-BX10 */ if ((id->base.e_base_px || id->base.e_base_bx10) && br_min <= 1300 && br_max >= 1200) - phylink_set(support, 1000baseX_Full); + phylink_set(modes, 1000baseX_Full); /* For active or passive cables, select the link modes * based on the bit rates and the cable compliance bytes. @@ -347,6 +347,7 @@ static int sfp_register_bus(struct sfp_bus *bus) return ret; } } + bus->socket_ops->attach(bus->sfp); if (bus->started) bus->socket_ops->start(bus->sfp); bus->netdev->sfp_bus = bus; @@ -362,6 +363,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus) if (bus->registered) { if (bus->started) bus->socket_ops->stop(bus->sfp); + bus->socket_ops->detach(bus->sfp); if (bus->phydev && ops && ops->disconnect_phy) ops->disconnect_phy(bus->upstream); } diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index fd8bb998ae52d946ca5b29172a4553176addc726..998d08ae7431aef493ae70063268e7d2a0ebc53a 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -184,10 +184,12 @@ struct sfp { struct gpio_desc *gpio[GPIO_MAX]; + bool attached; + struct mutex st_mutex; /* Protects state */ unsigned int state; struct delayed_work poll; struct delayed_work timeout; - struct mutex sm_mutex; + struct mutex sm_mutex; /* Protects state machine */ unsigned char sm_mod_state; unsigned char sm_dev_state; unsigned short sm_state; @@ -279,6 +281,7 @@ static int sfp_i2c_read(struct sfp *sfp, bool a2, u8 dev_addr, void *buf, { struct i2c_msg msgs[2]; u8 bus_addr = a2 ? 0x51 : 0x50; + size_t this_len; int ret; msgs[0].addr = bus_addr; @@ -290,11 +293,26 @@ static int sfp_i2c_read(struct sfp *sfp, bool a2, u8 dev_addr, void *buf, msgs[1].len = len; msgs[1].buf = buf; - ret = i2c_transfer(sfp->i2c, msgs, ARRAY_SIZE(msgs)); - if (ret < 0) - return ret; + while (len) { + this_len = len; + if (this_len > 16) + this_len = 16; - return ret == ARRAY_SIZE(msgs) ? len : 0; + msgs[1].len = this_len; + + ret = i2c_transfer(sfp->i2c, msgs, ARRAY_SIZE(msgs)); + if (ret < 0) + return ret; + + if (ret != ARRAY_SIZE(msgs)) + break; + + msgs[1].buf += this_len; + dev_addr += this_len; + len -= this_len; + } + + return msgs[1].buf - (u8 *)buf; } static int sfp_i2c_write(struct sfp *sfp, bool a2, u8 dev_addr, void *buf, @@ -496,7 +514,7 @@ static int sfp_hwmon_read_sensor(struct sfp *sfp, int reg, long *value) static void sfp_hwmon_to_rx_power(long *value) { - *value = DIV_ROUND_CLOSEST(*value, 100); + *value = DIV_ROUND_CLOSEST(*value, 10); } static void sfp_hwmon_calibrate(struct sfp *sfp, unsigned int slope, int offset, @@ -1475,7 +1493,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event) */ switch (sfp->sm_mod_state) { default: - if (event == SFP_E_INSERT) { + if (event == SFP_E_INSERT && sfp->attached) { sfp_module_tx_disable(sfp); sfp_sm_ins_next(sfp, SFP_MOD_PROBE, T_PROBE_INIT); } @@ -1607,6 +1625,19 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event) mutex_unlock(&sfp->sm_mutex); } +static void sfp_attach(struct sfp *sfp) +{ + sfp->attached = true; + if (sfp->state & SFP_F_PRESENT) + sfp_sm_event(sfp, SFP_E_INSERT); +} + +static void sfp_detach(struct sfp *sfp) +{ + sfp->attached = false; + sfp_sm_event(sfp, SFP_E_REMOVE); +} + static void sfp_start(struct sfp *sfp) { sfp_sm_event(sfp, SFP_E_DEV_UP); @@ -1667,6 +1698,8 @@ static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee, } static const struct sfp_socket_ops sfp_module_ops = { + .attach = sfp_attach, + .detach = sfp_detach, .start = sfp_start, .stop = sfp_stop, .module_info = sfp_module_info, @@ -1686,6 +1719,7 @@ static void sfp_check_state(struct sfp *sfp) { unsigned int state, i, changed; + mutex_lock(&sfp->st_mutex); state = sfp_get_state(sfp); changed = state ^ sfp->state; changed &= SFP_F_PRESENT | SFP_F_LOS | SFP_F_TX_FAULT; @@ -1711,6 +1745,7 @@ static void sfp_check_state(struct sfp *sfp) sfp_sm_event(sfp, state & SFP_F_LOS ? SFP_E_LOS_HIGH : SFP_E_LOS_LOW); rtnl_unlock(); + mutex_unlock(&sfp->st_mutex); } static irqreturn_t sfp_irq(int irq, void *data) @@ -1741,6 +1776,7 @@ static struct sfp *sfp_alloc(struct device *dev) sfp->dev = dev; mutex_init(&sfp->sm_mutex); + mutex_init(&sfp->st_mutex); INIT_DELAYED_WORK(&sfp->poll, sfp_poll); INIT_DELAYED_WORK(&sfp->timeout, sfp_timeout); @@ -1834,10 +1870,6 @@ static int sfp_probe(struct platform_device *pdev) dev_info(sfp->dev, "Host maximum power %u.%uW\n", sfp->max_power_mW / 1000, (sfp->max_power_mW / 100) % 10); - sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops); - if (!sfp->sfp_bus) - return -ENOMEM; - /* Get the initial state, and always signal TX disable, * since the network interface will not be up. */ @@ -1848,10 +1880,6 @@ static int sfp_probe(struct platform_device *pdev) sfp->state |= SFP_F_RATE_SELECT; sfp_set_state(sfp, sfp->state); sfp_module_tx_disable(sfp); - rtnl_lock(); - if (sfp->state & SFP_F_PRESENT) - sfp_sm_event(sfp, SFP_E_INSERT); - rtnl_unlock(); for (i = 0; i < GPIO_MAX; i++) { if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i]) @@ -1884,6 +1912,10 @@ static int sfp_probe(struct platform_device *pdev) dev_warn(sfp->dev, "No tx_disable pin: SFP modules will always be emitting.\n"); + sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops); + if (!sfp->sfp_bus) + return -ENOMEM; + return 0; } diff --git a/drivers/net/phy/sfp.h b/drivers/net/phy/sfp.h index 31b0acf337e27f985e4d3d44800bdb4652bb31f7..64f54b0bbd8c4d2518898ce211aca5ea794479e4 100644 --- a/drivers/net/phy/sfp.h +++ b/drivers/net/phy/sfp.h @@ -7,6 +7,8 @@ struct sfp; struct sfp_socket_ops { + void (*attach)(struct sfp *sfp); + void (*detach)(struct sfp *sfp); void (*start)(struct sfp *sfp); void (*stop)(struct sfp *sfp); int (*module_info)(struct sfp *sfp, struct ethtool_modinfo *modinfo); diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c index f17b3441779bfd10d7c8f9e80a375f77cfa8e0d9..d8ea4147dfe782249da0305ac6d21365092757fb 100644 --- a/drivers/net/phy/spi_ks8995.c +++ b/drivers/net/phy/spi_ks8995.c @@ -162,6 +162,14 @@ static const struct spi_device_id ks8995_id[] = { }; MODULE_DEVICE_TABLE(spi, ks8995_id); +static const struct of_device_id ks8895_spi_of_match[] = { + { .compatible = "micrel,ks8995" }, + { .compatible = "micrel,ksz8864" }, + { .compatible = "micrel,ksz8795" }, + { }, + }; +MODULE_DEVICE_TABLE(of, ks8895_spi_of_match); + static inline u8 get_chip_id(u8 val) { return (val >> ID1_CHIPID_S) & ID1_CHIPID_M; @@ -529,6 +537,7 @@ static int ks8995_remove(struct spi_device *spi) static struct spi_driver ks8995_driver = { .driver = { .name = "spi-ks8995", + .of_match_table = of_match_ptr(ks8895_spi_of_match), }, .probe = ks8995_probe, .remove = ks8995_remove, diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c index 74a8782313cf5b0319a7f3bad936926c4f4481da..bd6084e315de282b9f92558864cc392727898d7b 100644 --- a/drivers/net/phy/xilinx_gmii2rgmii.c +++ b/drivers/net/phy/xilinx_gmii2rgmii.c @@ -44,7 +44,10 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev) u16 val = 0; int err; - err = priv->phy_drv->read_status(phydev); + if (priv->phy_drv->read_status) + err = priv->phy_drv->read_status(phydev); + else + err = genphy_read_status(phydev); if (err < 0) return err; diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c index bdc4d23627c54aee483158dab31ded6434df9da9..fa3690cfed737cdb3b336c2920a5d3466121097a 100644 --- a/drivers/net/ppp/ppp_async.c +++ b/drivers/net/ppp/ppp_async.c @@ -474,6 +474,10 @@ ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg) case PPPIOCSMRU: if (get_user(val, p)) break; + if (val > U16_MAX) { + err = -EINVAL; + break; + } if (val < PPP_MRU) val = PPP_MRU; ap->mru = val; @@ -551,7 +555,7 @@ ppp_async_encode(struct asyncppp *ap) * and 7 (code-reject) must be sent as though no options * had been negotiated. */ - islcp = proto == PPP_LCP && 1 <= data[2] && data[2] <= 7; + islcp = proto == PPP_LCP && count >= 3 && 1 <= data[2] && data[2] <= 7; if (i == 0) { if (islcp) diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c index b5edc7f96a392d0080400ed4285cfb84e86d9e5c..685e875f51643aa35ab3573e1a9bd932e1c83444 100644 --- a/drivers/net/ppp/ppp_deflate.c +++ b/drivers/net/ppp/ppp_deflate.c @@ -610,12 +610,20 @@ static struct compressor ppp_deflate_draft = { static int __init deflate_init(void) { - int answer = ppp_register_compressor(&ppp_deflate); - if (answer == 0) - printk(KERN_INFO - "PPP Deflate Compression module registered\n"); - ppp_register_compressor(&ppp_deflate_draft); - return answer; + int rc; + + rc = ppp_register_compressor(&ppp_deflate); + if (rc) + return rc; + + rc = ppp_register_compressor(&ppp_deflate_draft); + if (rc) { + ppp_unregister_compressor(&ppp_deflate); + return rc; + } + + pr_info("PPP Deflate Compression module registered\n"); + return 0; } static void __exit deflate_cleanup(void) diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 02ad03a2fab773cd36707e50434cc559fc639f7c..5cd81bdf6d893bb15a4dcb9daaea58016f386467 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -73,6 +73,9 @@ #define MPHDRLEN 6 /* multilink protocol header length */ #define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */ +#define PPP_PROTO_LEN 2 +#define PPP_LCP_HDRLEN 4 + /* * An instance of /dev/ppp can be associated with either a ppp * interface unit or a ppp channel. In both cases, file->private_data @@ -493,6 +496,15 @@ static ssize_t ppp_read(struct file *file, char __user *buf, return ret; } +static bool ppp_check_packet(struct sk_buff *skb, size_t count) +{ + /* LCP packets must include LCP header which 4 bytes long: + * 1-byte code, 1-byte identifier, and 2-byte length. + */ + return get_unaligned_be16(skb->data) != PPP_LCP || + count >= PPP_PROTO_LEN + PPP_LCP_HDRLEN; +} + static ssize_t ppp_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { @@ -502,6 +514,9 @@ static ssize_t ppp_write(struct file *file, const char __user *buf, if (!pf) return -ENXIO; + /* All PPP packets should start with the 2-byte protocol */ + if (count < PPP_PROTO_LEN) + return -EINVAL; ret = -ENOMEM; skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL); if (!skb) @@ -512,6 +527,11 @@ static ssize_t ppp_write(struct file *file, const char __user *buf, kfree_skb(skb); goto out; } + ret = -EINVAL; + if (unlikely(!ppp_check_packet(skb, count))) { + kfree_skb(skb); + goto out; + } switch (pf->kind) { case INTERFACE: @@ -1419,6 +1439,8 @@ static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb) netif_wake_queue(ppp->dev); else netif_stop_queue(ppp->dev); + } else { + kfree_skb(skb); } ppp_xmit_unlock(ppp); } @@ -1537,7 +1559,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) } ++ppp->stats64.tx_packets; - ppp->stats64.tx_bytes += skb->len - 2; + ppp->stats64.tx_bytes += skb->len - PPP_PROTO_LEN; switch (proto) { case PPP_IP: diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c index a205750b431ba5565df39a3d7d94ac409513fd9e..8609c1a0777b21040f0a8f0127b2bde65fe299ad 100644 --- a/drivers/net/ppp/ppp_mppe.c +++ b/drivers/net/ppp/ppp_mppe.c @@ -63,6 +63,7 @@ MODULE_AUTHOR("Frank Cusack "); MODULE_DESCRIPTION("Point-to-Point Protocol Microsoft Point-to-Point Encryption support"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE)); +MODULE_SOFTDEP("pre: arc4"); MODULE_VERSION("1.0.2"); static unsigned int diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c index 047f6c68a4419ee6ea2ddee4b332049e83b97897..7a1b903d3fb8929e02dbce7c62525a6a278f6164 100644 --- a/drivers/net/ppp/ppp_synctty.c +++ b/drivers/net/ppp/ppp_synctty.c @@ -467,6 +467,10 @@ ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg) case PPPIOCSMRU: if (get_user(val, (int __user *) argp)) break; + if (val > U16_MAX) { + err = -EINVAL; + break; + } if (val < PPP_MRU) val = PPP_MRU; ap->mru = val; @@ -702,7 +706,7 @@ ppp_sync_input(struct syncppp *ap, const unsigned char *buf, /* strip address/control field if present */ p = skb->data; - if (p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) { + if (skb->len >= 2 && p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) { /* chop off address/control */ if (skb->len < 3) goto err; diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 62dc564b251d5e0c2019035355aa17426471cc8e..c04f3dc17d76f1c6d476faae5542975ef1c2707b 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -445,6 +445,7 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev, if (pskb_trim_rcsum(skb, len)) goto drop; + ph = pppoe_hdr(skb); pn = pppoe_pernet(dev_net(dev)); /* Note that get_item does a sock_hold(), so sk_pppox(po) @@ -1119,6 +1120,9 @@ static const struct proto_ops pppoe_ops = { .recvmsg = pppoe_recvmsg, .mmap = sock_no_mmap, .ioctl = pppox_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = pppox_compat_ioctl, +#endif }; static const struct pppox_proto pppoe_proto = { diff --git a/drivers/net/ppp/pppox.c b/drivers/net/ppp/pppox.c index c0599b3b23c06b179c843562dc7026f3e7d2f8df..9128e42e33e74f5f744fa4b9564cb31d2394d71b 100644 --- a/drivers/net/ppp/pppox.c +++ b/drivers/net/ppp/pppox.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -103,6 +104,18 @@ int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) EXPORT_SYMBOL(pppox_ioctl); +#ifdef CONFIG_COMPAT +int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) +{ + if (cmd == PPPOEIOCSFWD32) + cmd = PPPOEIOCSFWD; + + return pppox_ioctl(sock, cmd, (unsigned long)compat_ptr(arg)); +} + +EXPORT_SYMBOL(pppox_compat_ioctl); +#endif + static int pppox_create(struct net *net, struct socket *sock, int protocol, int kern) { diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index 67ffe74747a15720613295ab544aa62c071989f2..9ad3ff40a563f2428ac710895f8a5b24824b15a2 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@ -537,6 +537,7 @@ static void pptp_sock_destruct(struct sock *sk) pppox_unbind_sock(sk); } skb_queue_purge(&sk->sk_receive_queue); + dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1)); } static int pptp_create(struct net *net, struct socket *sock, int kern) @@ -632,6 +633,9 @@ static const struct proto_ops pptp_ops = { .recvmsg = sock_no_recvmsg, .mmap = sock_no_mmap, .ioctl = pppox_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = pppox_compat_ioctl, +#endif }; static const struct pppox_proto pppox_pptp_proto = { diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c index e9f101c9bae2ce1d9bde5dbe0d473119ead760e6..bfbb39f935545794c151c18de29655a574fd8bcd 100644 --- a/drivers/net/rionet.c +++ b/drivers/net/rionet.c @@ -216,9 +216,9 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev) * it just report sending a packet to the target * (without actual packet transfer). */ - dev_kfree_skb_any(skb); ndev->stats.tx_packets++; ndev->stats.tx_bytes += skb->len; + dev_kfree_skb_any(skb); } } diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c index f4e93f5fc2043ebb29c5b36e94afe49ec0c7d7ba..fc7c363ae7b0f549efa83efa97d935ea37bbda02 100644 --- a/drivers/net/slip/slhc.c +++ b/drivers/net/slip/slhc.c @@ -153,7 +153,7 @@ slhc_init(int rslots, int tslots) void slhc_free(struct slcompress *comp) { - if ( comp == NULLSLCOMPR ) + if ( IS_ERR_OR_NULL(comp) ) return; if ( comp->tstate != NULLSLSTATE ) @@ -637,46 +637,57 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize) int slhc_remember(struct slcompress *comp, unsigned char *icp, int isize) { - register struct cstate *cs; - unsigned ihl; - + const struct tcphdr *th; unsigned char index; + struct iphdr *iph; + register struct cstate *cs; + unsigned int ihl; - if(isize < 20) { - /* The packet is shorter than a legal IP header */ + /* The packet is shorter than a legal IP header. + * Also make sure isize is positive. + */ + if (isize < (int)sizeof(struct iphdr)) { +runt: comp->sls_i_runt++; - return slhc_toss( comp ); + return slhc_toss(comp); } + iph = (struct iphdr *)icp; /* Peek at the IP header's IHL field to find its length */ - ihl = icp[0] & 0xf; - if(ihl < 20 / 4){ - /* The IP header length field is too small */ - comp->sls_i_runt++; - return slhc_toss( comp ); - } - index = icp[9]; - icp[9] = IPPROTO_TCP; + ihl = iph->ihl; + /* The IP header length field is too small, + * or packet is shorter than the IP header followed + * by minimal tcp header. + */ + if (ihl < 5 || isize < ihl * 4 + sizeof(struct tcphdr)) + goto runt; + + index = iph->protocol; + iph->protocol = IPPROTO_TCP; if (ip_fast_csum(icp, ihl)) { /* Bad IP header checksum; discard */ comp->sls_i_badcheck++; - return slhc_toss( comp ); + return slhc_toss(comp); } - if(index > comp->rslot_limit) { + if (index > comp->rslot_limit) { comp->sls_i_error++; return slhc_toss(comp); } - + th = (struct tcphdr *)(icp + ihl * 4); + if (th->doff < sizeof(struct tcphdr) / 4) + goto runt; + if (isize < ihl * 4 + th->doff * 4) + goto runt; /* Update local state */ cs = &comp->rstate[comp->recv_current = index]; comp->flags &=~ SLF_TOSS; - memcpy(&cs->cs_ip,icp,20); - memcpy(&cs->cs_tcp,icp + ihl*4,20); + memcpy(&cs->cs_ip, iph, sizeof(*iph)); + memcpy(&cs->cs_tcp, th, sizeof(*th)); if (ihl > 5) - memcpy(cs->cs_ipopt, icp + sizeof(struct iphdr), (ihl - 5) * 4); - if (cs->cs_tcp.doff > 5) - memcpy(cs->cs_tcpopt, icp + ihl*4 + sizeof(struct tcphdr), (cs->cs_tcp.doff - 5) * 4); - cs->cs_hsize = ihl*2 + cs->cs_tcp.doff*2; + memcpy(cs->cs_ipopt, &iph[1], (ihl - 5) * 4); + if (th->doff > 5) + memcpy(cs->cs_tcpopt, &th[1], (th->doff - 5) * 4); + cs->cs_hsize = ihl*2 + th->doff*2; cs->initialized = true; /* Put headers back on packet * Neither header checksum is recalculated diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c index b008266e91eab6cfc8fb8e009f1940ef97211e55..a959e01610b1aed88e4770f931bce113304b7a06 100644 --- a/drivers/net/slip/slip.c +++ b/drivers/net/slip/slip.c @@ -452,9 +452,16 @@ static void slip_transmit(struct work_struct *work) */ static void slip_write_wakeup(struct tty_struct *tty) { - struct slip *sl = tty->disc_data; + struct slip *sl; + + rcu_read_lock(); + sl = rcu_dereference(tty->disc_data); + if (!sl) + goto out; schedule_work(&sl->tx_work); +out: + rcu_read_unlock(); } static void sl_tx_timeout(struct net_device *dev) @@ -464,7 +471,7 @@ static void sl_tx_timeout(struct net_device *dev) spin_lock(&sl->lock); if (netif_queue_stopped(dev)) { - if (!netif_running(dev)) + if (!netif_running(dev) || !sl->tty) goto out; /* May be we must check transmitter timeout here ? @@ -855,6 +862,8 @@ static int slip_open(struct tty_struct *tty) sl->tty = NULL; tty->disc_data = NULL; clear_bit(SLF_INUSE, &sl->flags); + sl_free_netdev(sl->dev); + free_netdev(sl->dev); err_exit: rtnl_unlock(); @@ -880,10 +889,11 @@ static void slip_close(struct tty_struct *tty) return; spin_lock_bh(&sl->lock); - tty->disc_data = NULL; + rcu_assign_pointer(tty->disc_data, NULL); sl->tty = NULL; spin_unlock_bh(&sl->lock); + synchronize_rcu(); flush_work(&sl->tx_work); /* VSV = very important to remove timers */ diff --git a/drivers/net/tap.c b/drivers/net/tap.c index f0f7cd9776671fb6d3f3dd6498611afe593b41c3..9ba0eef69842198f4cab78a8ca00bbbf30cf15b1 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -525,7 +525,7 @@ static int tap_open(struct inode *inode, struct file *file) q->sock.state = SS_CONNECTED; q->sock.file = file; q->sock.ops = &tap_socket_ops; - sock_init_data(&q->sock, &q->sk); + sock_init_data_uid(&q->sock, &q->sk, current_fsuid()); q->sk.sk_write_space = tap_sock_write_space; q->sk.sk_destruct = tap_sock_destruct; q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP; @@ -715,9 +715,8 @@ static ssize_t tap_get_user(struct tap_queue *q, struct msghdr *m, skb_probe_transport_header(skb, ETH_HLEN); /* Move network header to the right position for VLAN tagged packets */ - if ((skb->protocol == htons(ETH_P_8021Q) || - skb->protocol == htons(ETH_P_8021AD)) && - __vlan_get_protocol(skb, skb->protocol, &depth) != 0) + if (eth_type_vlan(skb->protocol) && + vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0) skb_set_network_header(skb, depth); rcu_read_lock(); diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index d887016e54b68dc06a1bdae7d1a72391020baf0d..44263bd823f6d80eb18ad2a5b4b380ef2e7d63d1 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -256,17 +256,6 @@ static void __team_option_inst_mark_removed_port(struct team *team, } } -static bool __team_option_inst_tmp_find(const struct list_head *opts, - const struct team_option_inst *needle) -{ - struct team_option_inst *opt_inst; - - list_for_each_entry(opt_inst, opts, tmp_list) - if (opt_inst == needle) - return true; - return false; -} - static int __team_options_register(struct team *team, const struct team_option *option, size_t option_count) @@ -302,8 +291,10 @@ static int __team_options_register(struct team *team, return 0; inst_rollback: - for (i--; i >= 0; i--) + for (i--; i >= 0; i--) { __team_option_inst_del_option(team, dst_opts[i]); + list_del(&dst_opts[i]->list); + } i = option_count - 1; alloc_rollback: @@ -985,8 +976,6 @@ static void team_port_disable(struct team *team, team->en_port_count--; team_queue_override_port_del(team, port); team_adjust_ops(team); - team_notify_peers(team); - team_mcast_rejoin(team); team_lower_state_changed(port); } @@ -1024,6 +1013,8 @@ static void __team_compute_features(struct team *team) team->dev->vlan_features = vlan_features; team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX | NETIF_F_GSO_UDP_L4; team->dev->hard_header_len = max_hard_header_len; @@ -1173,6 +1164,13 @@ static int team_port_add(struct team *team, struct net_device *port_dev, return -EINVAL; } + if (netdev_has_upper_dev(dev, port_dev)) { + NL_SET_ERR_MSG(extack, "Device is already an upper device of the team interface"); + netdev_err(dev, "Device %s is already an upper device of the team interface\n", + portname); + return -EBUSY; + } + if (port_dev->features & NETIF_F_VLAN_CHALLENGED && vlan_uses_dev(dev)) { NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up"); @@ -1263,22 +1261,44 @@ static int team_port_add(struct team *team, struct net_device *port_dev, goto err_option_port_add; } - netif_addr_lock_bh(dev); - dev_uc_sync_multiple(port_dev, dev); - dev_mc_sync_multiple(port_dev, dev); - netif_addr_unlock_bh(dev); + /* set promiscuity level to new slave */ + if (dev->flags & IFF_PROMISC) { + err = dev_set_promiscuity(port_dev, 1); + if (err) + goto err_set_slave_promisc; + } + + /* set allmulti level to new slave */ + if (dev->flags & IFF_ALLMULTI) { + err = dev_set_allmulti(port_dev, 1); + if (err) { + if (dev->flags & IFF_PROMISC) + dev_set_promiscuity(port_dev, -1); + goto err_set_slave_promisc; + } + } + + if (dev->flags & IFF_UP) { + netif_addr_lock_bh(dev); + dev_uc_sync_multiple(port_dev, dev); + dev_mc_sync_multiple(port_dev, dev); + netif_addr_unlock_bh(dev); + } port->index = -1; list_add_tail_rcu(&port->list, &team->port_list); team_port_enable(team, port); __team_compute_features(team); - __team_port_change_port_added(port, !!netif_carrier_ok(port_dev)); + __team_port_change_port_added(port, !!netif_oper_up(port_dev)); __team_options_change_check(team); netdev_info(dev, "Port device %s added\n", portname); return 0; +err_set_slave_promisc: + __team_option_inst_del_port(team, port); + err_option_port_add: team_upper_dev_unlink(team, port); @@ -1324,12 +1344,20 @@ static int team_port_del(struct team *team, struct net_device *port_dev) team_port_disable(team, port); list_del_rcu(&port->list); + + if (dev->flags & IFF_PROMISC) + dev_set_promiscuity(port_dev, -1); + if (dev->flags & IFF_ALLMULTI) + dev_set_allmulti(port_dev, -1); + team_upper_dev_unlink(team, port); netdev_rx_handler_unregister(port_dev); team_port_disable_netpoll(port); vlan_vids_del_by_dev(port_dev, dev); - dev_uc_unsync(port_dev, dev); - dev_mc_unsync(port_dev, dev); + if (dev->flags & IFF_UP) { + dev_uc_unsync(port_dev, dev); + dev_mc_unsync(port_dev, dev); + } dev_close(port_dev); team_port_leave(team, port); @@ -1677,6 +1705,14 @@ static int team_open(struct net_device *dev) static int team_close(struct net_device *dev) { + struct team *team = netdev_priv(dev); + struct team_port *port; + + list_for_each_entry(port, &team->port_list, list) { + dev_uc_unsync(port->dev, dev); + dev_mc_unsync(port->dev, dev); + } + return 0; } @@ -2057,13 +2093,27 @@ static const struct ethtool_ops team_ethtool_ops = { static void team_setup_by_port(struct net_device *dev, struct net_device *port_dev) { - dev->header_ops = port_dev->header_ops; + struct team *team = netdev_priv(dev); + + if (port_dev->type == ARPHRD_ETHER) + dev->header_ops = team->header_ops_cache; + else + dev->header_ops = port_dev->header_ops; dev->type = port_dev->type; dev->hard_header_len = port_dev->hard_header_len; dev->addr_len = port_dev->addr_len; dev->mtu = port_dev->mtu; memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len); eth_hw_addr_inherit(dev, port_dev); + + if (port_dev->flags & IFF_POINTOPOINT) { + dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); + dev->flags |= (IFF_POINTOPOINT | IFF_NOARP); + } else if ((port_dev->flags & (IFF_BROADCAST | IFF_MULTICAST)) == + (IFF_BROADCAST | IFF_MULTICAST)) { + dev->flags |= (IFF_BROADCAST | IFF_MULTICAST); + dev->flags &= ~(IFF_POINTOPOINT | IFF_NOARP); + } } static int team_dev_type_check_change(struct net_device *dev, @@ -2094,8 +2144,11 @@ static int team_dev_type_check_change(struct net_device *dev, static void team_setup(struct net_device *dev) { + struct team *team = netdev_priv(dev); + ether_setup(dev); dev->max_mtu = ETH_MAX_MTU; + team->header_ops_cache = dev->header_ops; dev->netdev_ops = &team_netdev_ops; dev->ethtool_ops = &team_ethtool_ops; @@ -2119,12 +2172,14 @@ static void team_setup(struct net_device *dev) dev->features |= NETIF_F_NETNS_LOCAL; dev->hw_features = TEAM_VLAN_FEATURES | - NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER; + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_FILTER; dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4; dev->features |= dev->hw_features; + dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; } static int team_newlink(struct net *src_net, struct net_device *dev, @@ -2465,7 +2520,6 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) int err = 0; int i; struct nlattr *nl_option; - LIST_HEAD(opt_inst_list); rtnl_lock(); @@ -2485,6 +2539,7 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1]; struct nlattr *attr; struct nlattr *attr_data; + LIST_HEAD(opt_inst_list); enum team_option_type opt_type; int opt_port_ifindex = 0; /* != 0 for per-port options */ u32 opt_array_index = 0; @@ -2589,23 +2644,17 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) if (err) goto team_put; opt_inst->changed = true; - - /* dumb/evil user-space can send us duplicate opt, - * keep only the last one - */ - if (__team_option_inst_tmp_find(&opt_inst_list, - opt_inst)) - continue; - list_add(&opt_inst->tmp_list, &opt_inst_list); } if (!opt_found) { err = -ENOENT; goto team_put; } - } - err = team_nl_send_event_options_get(team, &opt_inst_list); + err = team_nl_send_event_options_get(team, &opt_inst_list); + if (err) + break; + } team_put: team_nl_team_put(team); @@ -2937,7 +2986,7 @@ static int team_device_event(struct notifier_block *unused, switch (event) { case NETDEV_UP: - if (netif_carrier_ok(dev)) + if (netif_oper_up(dev)) team_port_change_check(port, true); break; case NETDEV_DOWN: diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c index a5ef97010eb344c1f5c9cc09d4c63d2103a8c719..5541e1c19936c926bf0f8c7fe91c17265c8333a7 100644 --- a/drivers/net/team/team_mode_loadbalance.c +++ b/drivers/net/team/team_mode_loadbalance.c @@ -325,6 +325,20 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx) return 0; } +static void lb_bpf_func_free(struct team *team) +{ + struct lb_priv *lb_priv = get_lb_priv(team); + struct bpf_prog *fp; + + if (!lb_priv->ex->orig_fprog) + return; + + __fprog_destroy(lb_priv->ex->orig_fprog); + fp = rcu_dereference_protected(lb_priv->fp, + lockdep_is_held(&team->lock)); + bpf_prog_destroy(fp); +} + static int lb_tx_method_get(struct team *team, struct team_gsetter_ctx *ctx) { struct lb_priv *lb_priv = get_lb_priv(team); @@ -639,6 +653,7 @@ static void lb_exit(struct team *team) team_options_unregister(team, lb_options, ARRAY_SIZE(lb_options)); + lb_bpf_func_free(team); cancel_delayed_work_sync(&lb_priv->ex->stats.refresh_dw); free_percpu(lb_priv->pcpu_stats); kfree(lb_priv->ex); diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c index e0d6760f321951da99e38dd6dc8eb3749848e115..4b5af24139703a474da45227fd86e854da80f22b 100644 --- a/drivers/net/thunderbolt.c +++ b/drivers/net/thunderbolt.c @@ -1285,6 +1285,7 @@ static int __maybe_unused tbnet_suspend(struct device *dev) tbnet_tear_down(net, true); } + tb_unregister_protocol_handler(&net->handler); return 0; } @@ -1293,6 +1294,8 @@ static int __maybe_unused tbnet_resume(struct device *dev) struct tb_service *svc = tb_to_service(dev); struct tbnet *net = tb_service_get_drvdata(svc); + tb_register_protocol_handler(&net->handler); + netif_carrier_off(net->dev); if (netif_running(net->dev)) { netif_device_attach(net->dev); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 50e9cc19023a701bad861ac117665a024ba776b1..0341daa43e7904612d1c5006faabbfc356501df5 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -248,6 +248,9 @@ struct tun_struct { struct tun_prog __rcu *steering_prog; struct tun_prog __rcu *filter_prog; struct ethtool_link_ksettings link_ksettings; + /* init args */ + struct file *file; + struct ifreq *ifr; }; struct veth { @@ -273,6 +276,9 @@ void *tun_ptr_to_xdp(void *ptr) } EXPORT_SYMBOL(tun_ptr_to_xdp); +static void tun_flow_init(struct tun_struct *tun); +static void tun_flow_uninit(struct tun_struct *tun); + static int tun_napi_receive(struct napi_struct *napi, int budget) { struct tun_file *tfile = container_of(napi, struct tun_file, napi); @@ -319,12 +325,18 @@ static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile, tfile->napi_enabled = napi_en; tfile->napi_frags_enabled = napi_en && napi_frags; if (napi_en) { - netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll, - NAPI_POLL_WEIGHT); + netif_tx_napi_add(tun->dev, &tfile->napi, tun_napi_poll, + NAPI_POLL_WEIGHT); napi_enable(&tfile->napi); } } +static void tun_napi_enable(struct tun_file *tfile) +{ + if (tfile->napi_enabled) + napi_enable(&tfile->napi); +} + static void tun_napi_disable(struct tun_file *tfile) { if (tfile->napi_enabled) @@ -599,13 +611,18 @@ static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb) static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb) { struct tun_prog *prog; + u32 numqueues; u16 ret = 0; + numqueues = READ_ONCE(tun->numqueues); + if (!numqueues) + return 0; + prog = rcu_dereference(tun->steering_prog); if (prog) ret = bpf_prog_run_clear_cb(prog->prog, skb); - return ret % tun->numqueues; + return ret % numqueues; } static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, @@ -691,7 +708,8 @@ static void __tun_detach(struct tun_file *tfile, bool clean) tun = rtnl_dereference(tfile->tun); if (tun && clean) { - tun_napi_disable(tfile); + if (!tfile->detached) + tun_napi_disable(tfile); tun_napi_del(tfile); } @@ -703,13 +721,18 @@ static void __tun_detach(struct tun_file *tfile, bool clean) tun->tfiles[tun->numqueues - 1]); ntfile = rtnl_dereference(tun->tfiles[index]); ntfile->queue_index = index; + ntfile->xdp_rxq.queue_index = index; + rcu_assign_pointer(tun->tfiles[tun->numqueues - 1], + NULL); --tun->numqueues; if (clean) { RCU_INIT_POINTER(tfile->tun, NULL); sock_put(&tfile->sk); - } else + } else { tun_disable_queue(tun, tfile); + tun_napi_disable(tfile); + } synchronize_net(); tun_flow_delete_by_queue(tun, tun->numqueues + 1); @@ -732,7 +755,6 @@ static void __tun_detach(struct tun_file *tfile, bool clean) if (tun) xdp_rxq_info_unreg(&tfile->xdp_rxq); ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free); - sock_put(&tfile->sk); } } @@ -748,6 +770,9 @@ static void tun_detach(struct tun_file *tfile, bool clean) if (dev) netdev_state_change(dev); rtnl_unlock(); + + if (clean) + sock_put(&tfile->sk); } static void tun_detach_all(struct net_device *dev) @@ -782,6 +807,7 @@ static void tun_detach_all(struct net_device *dev) sock_put(&tfile->sk); } list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { + tun_napi_del(tfile); tun_enable_queue(tfile); tun_queue_purge(tfile); xdp_rxq_info_unreg(&tfile->xdp_rxq); @@ -794,7 +820,8 @@ static void tun_detach_all(struct net_device *dev) } static int tun_attach(struct tun_struct *tun, struct file *file, - bool skip_filter, bool napi, bool napi_frags) + bool skip_filter, bool napi, bool napi_frags, + bool publish_tun) { struct tun_file *tfile = file->private_data; struct net_device *dev = tun->dev; @@ -859,23 +886,27 @@ static int tun_attach(struct tun_struct *tun, struct file *file, err = 0; } - rcu_assign_pointer(tfile->tun, tun); - rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); - tun->numqueues++; - if (tfile->detached) { tun_enable_queue(tfile); + tun_napi_enable(tfile); } else { sock_hold(&tfile->sk); tun_napi_init(tun, tfile, napi, napi_frags); } - tun_set_real_num_queues(tun); - /* device is allowed to go away first, so no need to hold extra * refcnt. */ + /* Publish tfile->tun and tun->tfiles only after we've fully + * initialized tfile; otherwise we risk using half-initialized + * object. + */ + if (publish_tun) + rcu_assign_pointer(tfile->tun, tun); + rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); + tun->numqueues++; + tun_set_real_num_queues(tun); out: return err; } @@ -1006,6 +1037,49 @@ static int check_filter(struct tap_filter *filter, const struct sk_buff *skb) static const struct ethtool_ops tun_ethtool_ops; +static int tun_net_init(struct net_device *dev) +{ + struct tun_struct *tun = netdev_priv(dev); + struct ifreq *ifr = tun->ifr; + int err; + + tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats); + if (!tun->pcpu_stats) + return -ENOMEM; + + spin_lock_init(&tun->lock); + + err = security_tun_dev_alloc_security(&tun->security); + if (err < 0) { + free_percpu(tun->pcpu_stats); + return err; + } + + tun_flow_init(tun); + + dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | + TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX; + dev->features = dev->hw_features | NETIF_F_LLTX; + dev->vlan_features = dev->features & + ~(NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX); + + tun->flags = (tun->flags & ~TUN_FEATURES) | + (ifr->ifr_flags & TUN_FEATURES); + + INIT_LIST_HEAD(&tun->disabled); + err = tun_attach(tun, tun->file, false, ifr->ifr_flags & IFF_NAPI, + ifr->ifr_flags & IFF_NAPI_FRAGS, false); + if (err < 0) { + tun_flow_uninit(tun); + security_tun_dev_free_security(tun->security); + free_percpu(tun->pcpu_stats); + return err; + } + return 0; +} + /* Net device detach from fd. */ static void tun_net_uninit(struct net_device *dev) { @@ -1015,18 +1089,8 @@ static void tun_net_uninit(struct net_device *dev) /* Net device open. */ static int tun_net_open(struct net_device *dev) { - struct tun_struct *tun = netdev_priv(dev); - int i; - netif_tx_start_all_queues(dev); - for (i = 0; i < tun->numqueues; i++) { - struct tun_file *tfile; - - tfile = rtnl_dereference(tun->tfiles[i]); - tfile->socket.sk->sk_write_space(tfile->socket.sk); - } - return 0; } @@ -1076,6 +1140,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); int txq = skb->queue_mapping; + struct netdev_queue *queue; struct tun_file *tfile; int len = skb->len; @@ -1083,7 +1148,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) tfile = rcu_dereference(tun->tfiles[txq]); /* Drop packet if interface is not attached */ - if (txq >= tun->numqueues) + if (!tfile) goto drop; if (!rcu_dereference(tun->steering_prog)) @@ -1122,6 +1187,10 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) if (ptr_ring_produce(&tfile->tx_ring, skb)) goto drop; + /* NETIF_F_LLTX requires to do our own update of trans_start */ + queue = netdev_get_tx_queue(dev, txq); + queue->trans_start = jiffies; + /* Notify and wake up reader process */ if (tfile->flags & TUN_FASYNC) kill_fasync(&tfile->fasync, SIGIO, POLL_IN); @@ -1241,6 +1310,7 @@ static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp) } static const struct net_device_ops tun_netdev_ops = { + .ndo_init = tun_net_init, .ndo_uninit = tun_net_uninit, .ndo_open = tun_net_open, .ndo_stop = tun_net_close, @@ -1274,6 +1344,7 @@ static int tun_xdp_xmit(struct net_device *dev, int n, rcu_read_lock(); +resample: numqueues = READ_ONCE(tun->numqueues); if (!numqueues) { rcu_read_unlock(); @@ -1282,6 +1353,8 @@ static int tun_xdp_xmit(struct net_device *dev, int n, tfile = rcu_dereference(tun->tfiles[smp_processor_id() % numqueues]); + if (unlikely(!tfile)) + goto resample; spin_lock(&tfile->tx_ring.producer_lock); for (i = 0; i < n; i++) { @@ -1317,6 +1390,7 @@ static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp) } static const struct net_device_ops tap_netdev_ops = { + .ndo_init = tun_net_init, .ndo_uninit = tun_net_uninit, .ndo_open = tun_net_open, .ndo_stop = tun_net_close, @@ -1356,7 +1430,7 @@ static void tun_flow_uninit(struct tun_struct *tun) #define MAX_MTU 65535 /* Initialize net device. */ -static void tun_net_init(struct net_device *dev) +static void tun_net_initialize(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); @@ -1445,7 +1519,8 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile, int err; int i; - if (it->nr_segs > MAX_SKB_FRAGS + 1) + if (it->nr_segs > MAX_SKB_FRAGS + 1 || + len > (ETH_MAX_MTU - NET_SKB_PAD - NET_IP_ALIGN)) return ERR_PTR(-ENOMEM); local_bh_disable(); @@ -1464,23 +1539,22 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile, skb->truesize += skb->data_len; for (i = 1; i < it->nr_segs; i++) { - struct page_frag *pfrag = ¤t->task_frag; size_t fragsz = it->iov[i].iov_len; + struct page *page; + void *frag; if (fragsz == 0 || fragsz > PAGE_SIZE) { err = -EINVAL; goto free; } - - if (!skb_page_frag_refill(fragsz, pfrag, GFP_KERNEL)) { + frag = netdev_alloc_frag(fragsz); + if (!frag) { err = -ENOMEM; goto free; } - - skb_fill_page_desc(skb, i - 1, pfrag->page, - pfrag->offset, fragsz); - page_ref_inc(pfrag->page); - pfrag->offset += fragsz; + page = virt_to_head_page(frag); + skb_fill_page_desc(skb, i - 1, page, + frag - page_address(page), fragsz); } return skb; @@ -1527,6 +1601,7 @@ static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile, if (!rx_batched || (!more && skb_queue_empty(queue))) { local_bh_disable(); + skb_record_rx_queue(skb, tfile->queue_index); netif_receive_skb(skb); local_bh_enable(); return; @@ -1546,8 +1621,11 @@ static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile, struct sk_buff *nskb; local_bh_disable(); - while ((nskb = __skb_dequeue(&process_queue))) + while ((nskb = __skb_dequeue(&process_queue))) { + skb_record_rx_queue(nskb, tfile->queue_index); netif_receive_skb(nskb); + } + skb_record_rx_queue(skb, tfile->queue_index); netif_receive_skb(skb); local_bh_enable(); } @@ -1568,7 +1646,7 @@ static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile, if (zerocopy) return false; - if (SKB_DATA_ALIGN(len + TUN_RX_PAD) + + if (SKB_DATA_ALIGN(len + TUN_RX_PAD + XDP_PACKET_HEADROOM) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE) return false; @@ -1676,6 +1754,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, skb_reserve(skb, pad - delta); skb_put(skb, len); + skb_set_owner_w(skb, tfile->socket.sk); get_page(alloc_frag->page); alloc_frag->offset += buflen; @@ -1712,9 +1791,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, int skb_xdp = 1; bool frags = tun_napi_frags_enabled(tfile); - if (!(tun->dev->flags & IFF_UP)) - return -EIO; - if (!(tun->flags & IFF_NO_PI)) { if (len < sizeof(pi)) return -EINVAL; @@ -1816,6 +1892,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, err = skb_copy_datagram_from_iter(skb, 0, from, len); if (err) { + err = -EFAULT; +drop: this_cpu_inc(tun->pcpu_stats->rx_dropped); kfree_skb(skb); if (frags) { @@ -1823,7 +1901,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, mutex_unlock(&tfile->napi_mutex); } - return -EFAULT; + return err; } } @@ -1862,8 +1940,11 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, skb->dev = tun->dev; break; case IFF_TAP: - if (!frags) - skb->protocol = eth_type_trans(skb, tun->dev); + if (frags && !pskb_may_pull(skb, ETH_HLEN)) { + err = -ENOMEM; + goto drop; + } + skb->protocol = eth_type_trans(skb, tun->dev); break; } @@ -1892,6 +1973,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, if (ret != XDP_PASS) { rcu_read_unlock(); local_bh_enable(); + if (frags) { + tfile->napi.skb = NULL; + mutex_unlock(&tfile->napi_mutex); + } return total_len; } } @@ -1907,13 +1992,24 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, !tfile->detached) rxhash = __skb_get_hash_symmetric(skb); + rcu_read_lock(); + if (unlikely(!(tun->dev->flags & IFF_UP))) { + err = -EIO; + rcu_read_unlock(); + goto drop; + } + if (frags) { + u32 headlen; + /* Exercise flow dissector code path. */ - u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb)); + skb_push(skb, ETH_HLEN); + headlen = eth_get_headlen(skb->data, skb_headlen(skb)); if (unlikely(headlen > skb_headlen(skb))) { this_cpu_inc(tun->pcpu_stats->rx_dropped); napi_free_frags(&tfile->napi); + rcu_read_unlock(); mutex_unlock(&tfile->napi_mutex); WARN_ON(1); return -ENOMEM; @@ -1941,6 +2037,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, } else { netif_rx_ni(skb); } + rcu_read_unlock(); stats = get_cpu_ptr(tun->pcpu_stats); u64_stats_update_begin(&stats->syncp); @@ -2051,14 +2148,16 @@ static ssize_t tun_put_user(struct tun_struct *tun, tun_is_little_endian(tun), true, vlan_hlen)) { struct skb_shared_info *sinfo = skb_shinfo(skb); - pr_err("unexpected GSO type: " - "0x%x, gso_size %d, hdr_len %d\n", - sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size), - tun16_to_cpu(tun, gso.hdr_len)); - print_hex_dump(KERN_ERR, "tun: ", - DUMP_PREFIX_NONE, - 16, 1, skb->head, - min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true); + + if (net_ratelimit()) { + netdev_err(tun->dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n", + sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size), + tun16_to_cpu(tun, gso.hdr_len)); + print_hex_dump(KERN_ERR, "tun: ", + DUMP_PREFIX_NONE, + 16, 1, skb->head, + min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true); + } WARN_ON_ONCE(1); return -EINVAL; } @@ -2116,9 +2215,9 @@ static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err) } add_wait_queue(&tfile->wq.wait, &wait); - current->state = TASK_INTERRUPTIBLE; while (1) { + set_current_state(TASK_INTERRUPTIBLE); ptr = ptr_ring_consume(&tfile->tx_ring); if (ptr) break; @@ -2134,7 +2233,7 @@ static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err) schedule(); } - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); remove_wait_queue(&tfile->wq.wait, &wait); out: @@ -2264,7 +2363,9 @@ static void tun_setup(struct net_device *dev) static int tun_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { - return -EINVAL; + NL_SET_ERR_MSG(extack, + "tun/tap creation via rtnetlink is not supported."); + return -EOPNOTSUPP; } static size_t tun_get_size(const struct net_device *dev) @@ -2536,7 +2637,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, ifr->ifr_flags & IFF_NAPI, - ifr->ifr_flags & IFF_NAPI_FRAGS); + ifr->ifr_flags & IFF_NAPI_FRAGS, true); if (err < 0) return err; @@ -2586,9 +2687,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) if (!dev) return -ENOMEM; - err = dev_get_valid_name(net, dev, name); - if (err < 0) - goto err_free_dev; dev_net_set(dev, net); dev->rtnl_link_ops = &tun_link_ops; @@ -2607,41 +2705,20 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) tun->rx_batched = 0; RCU_INIT_POINTER(tun->steering_prog, NULL); - tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats); - if (!tun->pcpu_stats) { - err = -ENOMEM; - goto err_free_dev; - } + tun->ifr = ifr; + tun->file = file; - spin_lock_init(&tun->lock); - - err = security_tun_dev_alloc_security(&tun->security); - if (err < 0) - goto err_free_stat; - - tun_net_init(dev); - tun_flow_init(tun); - - dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | - TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_STAG_TX; - dev->features = dev->hw_features | NETIF_F_LLTX; - dev->vlan_features = dev->features & - ~(NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_STAG_TX); - - tun->flags = (tun->flags & ~TUN_FEATURES) | - (ifr->ifr_flags & TUN_FEATURES); - - INIT_LIST_HEAD(&tun->disabled); - err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI, - ifr->ifr_flags & IFF_NAPI_FRAGS); - if (err < 0) - goto err_free_flow; + tun_net_initialize(dev); err = register_netdevice(tun->dev); - if (err < 0) - goto err_detach; + if (err < 0) { + free_netdev(dev); + return err; + } + /* free_netdev() won't check refcnt, to avoid race + * with dev_put() we need publish tun after registration. + */ + rcu_assign_pointer(tfile->tun, tun); } netif_carrier_on(tun->dev); @@ -2656,20 +2733,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) strcpy(ifr->ifr_name, tun->dev->name); return 0; - -err_detach: - tun_detach_all(dev); - /* register_netdevice() already called tun_free_netdev() */ - goto err_free_dev; - -err_free_flow: - tun_flow_uninit(tun); - security_tun_dev_free_security(tun->security); -err_free_stat: - free_percpu(tun->pcpu_stats); -err_free_dev: - free_netdev(dev); - return err; } static void tun_get_iff(struct net *net, struct tun_struct *tun, @@ -2785,7 +2848,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr) if (ret < 0) goto unlock; ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI, - tun->flags & IFF_NAPI_FRAGS); + tun->flags & IFF_NAPI_FRAGS, true); } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { tun = rtnl_dereference(tfile->tun); if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) @@ -3214,7 +3277,7 @@ static int tun_chr_open(struct inode *inode, struct file * file) tfile->socket.file = file; tfile->socket.ops = &tun_socket_ops; - sock_init_data(&tfile->socket, &tfile->sk); + sock_init_data_uid(&tfile->socket, &tfile->sk, current_fsuid()); tfile->sk.sk_write_space = tun_sock_write_space; tfile->sk.sk_sndbuf = INT_MAX; @@ -3417,6 +3480,7 @@ static int tun_device_event(struct notifier_block *unused, { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct tun_struct *tun = netdev_priv(dev); + int i; if (dev->rtnl_link_ops != &tun_link_ops) return NOTIFY_DONE; @@ -3426,6 +3490,14 @@ static int tun_device_event(struct notifier_block *unused, if (tun_queue_resize(tun)) return NOTIFY_BAD; break; + case NETDEV_UP: + for (i = 0; i < tun->numqueues; i++) { + struct tun_file *tfile; + + tfile = rtnl_dereference(tun->tfiles[i]); + tfile->socket.sk->sk_write_space(tfile->socket.sk); + } + break; default: break; } diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index 023b8d0bf1754e833e08514b9cf6165ce3240984..1e6ed483fdd8a7f71036e8fa0e54654ef584f71b 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c @@ -21,6 +21,8 @@ #include "asix.h" +#define AX_HOST_EN_RETRIES 30 + int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, u16 size, void *data, int in_pm) { @@ -75,6 +77,29 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index, value, index, data, size); } +static int asix_check_host_enable(struct usbnet *dev, int in_pm) +{ + int i, ret; + u8 smsr; + + for (i = 0; i < AX_HOST_EN_RETRIES; ++i) { + ret = asix_set_sw_mii(dev, in_pm); + if (ret == -ENODEV || ret == -ETIMEDOUT) + break; + usleep_range(1000, 1100); + ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, + 0, 0, 1, &smsr, in_pm); + if (ret == -ENODEV) + break; + else if (ret < sizeof(smsr)) + continue; + else if (smsr & AX_HOST_EN) + break; + } + + return i >= AX_HOST_EN_RETRIES ? -ETIMEDOUT : ret; +} + static void reset_asix_rx_fixup_info(struct asix_rx_fixup_info *rx) { /* Reset the variables that have a lifetime outside of @@ -458,19 +483,11 @@ int asix_mdio_read(struct net_device *netdev, int phy_id, int loc) { struct usbnet *dev = netdev_priv(netdev); __le16 res; - u8 smsr; - int i = 0; int ret; mutex_lock(&dev->phy_mutex); - do { - ret = asix_set_sw_mii(dev, 0); - if (ret == -ENODEV || ret == -ETIMEDOUT) - break; - usleep_range(1000, 1100); - ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, - 0, 0, 1, &smsr, 0); - } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV)); + + ret = asix_check_host_enable(dev, 0); if (ret == -ENODEV || ret == -ETIMEDOUT) { mutex_unlock(&dev->phy_mutex); return ret; @@ -491,22 +508,14 @@ void asix_mdio_write(struct net_device *netdev, int phy_id, int loc, int val) { struct usbnet *dev = netdev_priv(netdev); __le16 res = cpu_to_le16(val); - u8 smsr; - int i = 0; int ret; netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n", phy_id, loc, val); mutex_lock(&dev->phy_mutex); - do { - ret = asix_set_sw_mii(dev, 0); - if (ret == -ENODEV) - break; - usleep_range(1000, 1100); - ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, - 0, 0, 1, &smsr, 0); - } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV)); + + ret = asix_check_host_enable(dev, 0); if (ret == -ENODEV) { mutex_unlock(&dev->phy_mutex); return; @@ -522,19 +531,11 @@ int asix_mdio_read_nopm(struct net_device *netdev, int phy_id, int loc) { struct usbnet *dev = netdev_priv(netdev); __le16 res; - u8 smsr; - int i = 0; int ret; mutex_lock(&dev->phy_mutex); - do { - ret = asix_set_sw_mii(dev, 1); - if (ret == -ENODEV || ret == -ETIMEDOUT) - break; - usleep_range(1000, 1100); - ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, - 0, 0, 1, &smsr, 1); - } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV)); + + ret = asix_check_host_enable(dev, 1); if (ret == -ENODEV || ret == -ETIMEDOUT) { mutex_unlock(&dev->phy_mutex); return ret; @@ -556,22 +557,14 @@ asix_mdio_write_nopm(struct net_device *netdev, int phy_id, int loc, int val) { struct usbnet *dev = netdev_priv(netdev); __le16 res = cpu_to_le16(val); - u8 smsr; - int i = 0; int ret; netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n", phy_id, loc, val); mutex_lock(&dev->phy_mutex); - do { - ret = asix_set_sw_mii(dev, 1); - if (ret == -ENODEV) - break; - usleep_range(1000, 1100); - ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, - 0, 0, 1, &smsr, 1); - } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV)); + + ret = asix_check_host_enable(dev, 1); if (ret == -ENODEV) { mutex_unlock(&dev->phy_mutex); return; diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index b654f05b2ccd0b85c88cd42d52dc7c4fea44b868..2eca4168af2f0a658fcb2b1b0584653ac82e9efe 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -238,7 +238,7 @@ static void asix_phy_reset(struct usbnet *dev, unsigned int reset_bits) static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf) { int ret = 0; - u8 buf[ETH_ALEN]; + u8 buf[ETH_ALEN] = {0}; int i; unsigned long gpio_bits = dev->driver_info->data; @@ -689,7 +689,7 @@ static int asix_resume(struct usb_interface *intf) static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) { int ret, i; - u8 buf[ETH_ALEN], chipcode = 0; + u8 buf[ETH_ALEN] = {0}, chipcode = 0; u32 phyid; struct asix_common_private *priv; @@ -739,8 +739,13 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &chipcode, 0); chipcode &= AX_CHIPCODE_MASK; - (chipcode == AX_AX88772_CHIPCODE) ? ax88772_hw_reset(dev, 0) : - ax88772a_hw_reset(dev, 0); + ret = (chipcode == AX_AX88772_CHIPCODE) ? ax88772_hw_reset(dev, 0) : + ax88772a_hw_reset(dev, 0); + + if (ret < 0) { + netdev_dbg(dev->net, "Failed to reset AX88772: %d\n", ret); + return ret; + } /* Read PHYID register *AFTER* the PHY was reset properly */ phyid = asix_get_phyid(dev); @@ -1068,7 +1073,7 @@ static const struct net_device_ops ax88178_netdev_ops = { static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf) { int ret; - u8 buf[ETH_ALEN]; + u8 buf[ETH_ALEN] = {0}; usbnet_get_endpoints(dev,intf); diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c index 501576f538546392381471da43f0d2897df243bd..914cac55a7ae702b5a3a4dc7178effdac1d0eedb 100644 --- a/drivers/net/usb/ax88172a.c +++ b/drivers/net/usb/ax88172a.c @@ -208,7 +208,7 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf) /* Get the MAC address */ ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0); - if (ret < 0) { + if (ret < ETH_ALEN) { netdev_err(dev->net, "Failed to read MAC address: %d\n", ret); goto free; } diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index 2207f7a7d1ffbb3fe6c4fefa101c4bb2ae01384e..b47b91ca23a10c685c6973145637bd939b2add58 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1373,58 +1373,120 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb) u16 hdr_off; u32 *pkt_hdr; - /* This check is no longer done by usbnet */ - if (skb->len < dev->net->hard_header_len) + /* At the end of the SKB, there's a header telling us how many packets + * are bundled into this buffer and where we can find an array of + * per-packet metadata (which contains elements encoded into u16). + */ + + /* SKB contents for current firmware: + * + * ... + * + * + * ... + * + * + * + * where: + * contains pkt_len bytes: + * 2 bytes of IP alignment pseudo header + * packet received + * contains 4 bytes: + * pkt_len and fields AX_RXHDR_* + * 0-7 bytes to terminate at + * 8 bytes boundary (64-bit). + * 4 bytes to make rx_hdr terminate at + * 8 bytes boundary (64-bit) + * contains 4 bytes: + * pkt_len=0 and AX_RXHDR_DROP_ERR + * contains 4 bytes: + * pkt_cnt and hdr_off (offset of + * ) + * + * pkt_cnt is number of entrys in the per-packet metadata. + * In current firmware there is 2 entrys per packet. + * The first points to the packet and the + * second is a dummy header. + * This was done probably to align fields in 64-bit and + * maintain compatibility with old firmware. + * This code assumes that and are + * optional. + */ + + if (skb->len < 4) return 0; - skb_trim(skb, skb->len - 4); memcpy(&rx_hdr, skb_tail_pointer(skb), 4); le32_to_cpus(&rx_hdr); - pkt_cnt = (u16)rx_hdr; hdr_off = (u16)(rx_hdr >> 16); + + if (pkt_cnt == 0) + return 0; + + /* Make sure that the bounds of the metadata array are inside the SKB + * (and in front of the counter at the end). + */ + if (pkt_cnt * 4 + hdr_off > skb->len) + return 0; pkt_hdr = (u32 *)(skb->data + hdr_off); - while (pkt_cnt--) { + /* Packets must not overlap the metadata array */ + skb_trim(skb, hdr_off); + + for (; pkt_cnt > 0; pkt_cnt--, pkt_hdr++) { + u16 pkt_len_plus_padd; u16 pkt_len; le32_to_cpus(pkt_hdr); pkt_len = (*pkt_hdr >> 16) & 0x1fff; + pkt_len_plus_padd = (pkt_len + 7) & 0xfff8; + + /* Skip dummy header used for alignment + */ + if (pkt_len == 0) + continue; + + if (pkt_len_plus_padd > skb->len) + return 0; /* Check CRC or runt packet */ - if ((*pkt_hdr & AX_RXHDR_CRC_ERR) || - (*pkt_hdr & AX_RXHDR_DROP_ERR)) { - skb_pull(skb, (pkt_len + 7) & 0xFFF8); - pkt_hdr++; + if ((*pkt_hdr & (AX_RXHDR_CRC_ERR | AX_RXHDR_DROP_ERR)) || + pkt_len < 2 + ETH_HLEN) { + dev->net->stats.rx_errors++; + skb_pull(skb, pkt_len_plus_padd); continue; } - if (pkt_cnt == 0) { - /* Skip IP alignment psudo header */ + /* last packet */ + if (pkt_len_plus_padd == skb->len) { + skb_trim(skb, pkt_len); + + /* Skip IP alignment pseudo header */ skb_pull(skb, 2); - skb->len = pkt_len; - skb_set_tail_pointer(skb, pkt_len); - skb->truesize = pkt_len + sizeof(struct sk_buff); + + skb->truesize = SKB_TRUESIZE(pkt_len_plus_padd); ax88179_rx_checksum(skb, pkt_hdr); return 1; } ax_skb = skb_clone(skb, GFP_ATOMIC); - if (ax_skb) { - ax_skb->len = pkt_len; - ax_skb->data = skb->data + 2; - skb_set_tail_pointer(ax_skb, pkt_len); - ax_skb->truesize = pkt_len + sizeof(struct sk_buff); - ax88179_rx_checksum(ax_skb, pkt_hdr); - usbnet_skb_return(dev, ax_skb); - } else { + if (!ax_skb) return 0; - } + skb_trim(ax_skb, pkt_len); - skb_pull(skb, (pkt_len + 7) & 0xFFF8); - pkt_hdr++; + /* Skip IP alignment pseudo header */ + skb_pull(ax_skb, 2); + + skb->truesize = pkt_len_plus_padd + + SKB_DATA_ALIGN(sizeof(struct sk_buff)); + ax88179_rx_checksum(ax_skb, pkt_hdr); + usbnet_skb_return(dev, ax_skb); + + skb_pull(skb, pkt_len_plus_padd); } - return 1; + + return 0; } static struct sk_buff * diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c index 61ea4eaace5d0ae78a9613555562c248de0de977..e3f108080af12e85753af2a380ab9d29140493e9 100644 --- a/drivers/net/usb/cdc_eem.c +++ b/drivers/net/usb/cdc_eem.c @@ -135,10 +135,10 @@ static struct sk_buff *eem_tx_fixup(struct usbnet *dev, struct sk_buff *skb, } skb2 = skb_copy_expand(skb, EEM_HEAD, ETH_FCS_LEN + padlen, flags); + dev_kfree_skb_any(skb); if (!skb2) return NULL; - dev_kfree_skb_any(skb); skb = skb2; done: diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 5c42cf81a08b2c250bb98c792012436d88df9562..c3cf9ae6d1df4a79a7793ec876025fdac4e6e6ed 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -221,9 +221,16 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf) goto bad_desc; } skip: - if ( rndis && - header.usb_cdc_acm_descriptor && - header.usb_cdc_acm_descriptor->bmCapabilities) { + /* Communcation class functions with bmCapabilities are not + * RNDIS. But some Wireless class RNDIS functions use + * bmCapabilities for their own purpose. The failsafe is + * therefore applied only to Communication class RNDIS + * functions. The rndis test is redundant, but a cheap + * optimization. + */ + if (rndis && is_rndis(&intf->cur_altsetting->desc) && + header.usb_cdc_acm_descriptor && + header.usb_cdc_acm_descriptor->bmCapabilities) { dev_dbg(&intf->dev, "ACM capabilities %02x, not really RNDIS?\n", header.usb_cdc_acm_descriptor->bmCapabilities); @@ -793,6 +800,13 @@ static const struct usb_device_id products[] = { .driver_info = 0, }, +/* ThinkPad USB-C Dock Gen 2 (based on Realtek RTL8153) */ +{ + USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0xa387, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), + .driver_info = 0, +}, + /* NVIDIA Tegra USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */ { USB_DEVICE_AND_INTERFACE_INFO(NVIDIA_VENDOR_ID, 0x09ff, USB_CLASS_COMM, diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 1eaec648bd1f716db3d06622cdfb7834e64e4e38..1f57a6a2b8a259fcebab2a51595abf559612a6f9 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -578,8 +578,8 @@ static void cdc_ncm_set_dgram_size(struct usbnet *dev, int new_size) /* read current mtu value from device */ err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE, USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE, - 0, iface_no, &max_datagram_size, 2); - if (err < 0) { + 0, iface_no, &max_datagram_size, sizeof(max_datagram_size)); + if (err != sizeof(max_datagram_size)) { dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n"); goto out; } @@ -590,7 +590,7 @@ static void cdc_ncm_set_dgram_size(struct usbnet *dev, int new_size) max_datagram_size = cpu_to_le16(ctx->max_datagram_size); err = usbnet_write_cmd(dev, USB_CDC_SET_MAX_DATAGRAM_SIZE, USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE, - 0, iface_no, &max_datagram_size, 2); + 0, iface_no, &max_datagram_size, sizeof(max_datagram_size)); if (err < 0) dev_dbg(&dev->intf->dev, "SET_MAX_DATAGRAM_SIZE failed\n"); @@ -681,8 +681,12 @@ cdc_ncm_find_endpoints(struct usbnet *dev, struct usb_interface *intf) u8 ep; for (ep = 0; ep < intf->cur_altsetting->desc.bNumEndpoints; ep++) { - e = intf->cur_altsetting->endpoint + ep; + + /* ignore endpoints which cannot transfer data */ + if (!usb_endpoint_maxp(&e->desc)) + continue; + switch (e->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) { case USB_ENDPOINT_XFER_INT: if (usb_endpoint_dir_in(&e->desc)) { diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c index 947bea81d924124c3827e87f75e732e35adb2acd..dfbdea22fbad9b7f6ae0d2c3938af9565e4855e7 100644 --- a/drivers/net/usb/cx82310_eth.c +++ b/drivers/net/usb/cx82310_eth.c @@ -175,7 +175,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf) } if (!timeout) { dev_err(&udev->dev, "firmware not ready in time\n"); - return -ETIMEDOUT; + ret = -ETIMEDOUT; + goto err; } /* enable ethernet mode (?) */ diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 184c24baca1527333d92ec927c48e958ad6c95f0..160e3721fb4842d03805b2e34973e75dc6916cd7 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -625,7 +625,7 @@ static struct hso_serial *get_serial_by_index(unsigned index) return serial; } -static int get_free_serial_index(void) +static int obtain_minor(struct hso_serial *serial) { int index; unsigned long flags; @@ -633,8 +633,10 @@ static int get_free_serial_index(void) spin_lock_irqsave(&serial_table_lock, flags); for (index = 0; index < HSO_SERIAL_TTY_MINORS; index++) { if (serial_table[index] == NULL) { + serial_table[index] = serial->parent; + serial->minor = index; spin_unlock_irqrestore(&serial_table_lock, flags); - return index; + return 0; } } spin_unlock_irqrestore(&serial_table_lock, flags); @@ -643,15 +645,12 @@ static int get_free_serial_index(void) return -1; } -static void set_serial_by_index(unsigned index, struct hso_serial *serial) +static void release_minor(struct hso_serial *serial) { unsigned long flags; spin_lock_irqsave(&serial_table_lock, flags); - if (serial) - serial_table[index] = serial->parent; - else - serial_table[index] = NULL; + serial_table[serial->minor] = NULL; spin_unlock_irqrestore(&serial_table_lock, flags); } @@ -2243,6 +2242,7 @@ static int hso_stop_serial_device(struct hso_device *hso_dev) static void hso_serial_tty_unregister(struct hso_serial *serial) { tty_unregister_device(tty_drv, serial->minor); + release_minor(serial); } static void hso_serial_common_free(struct hso_serial *serial) @@ -2266,22 +2266,22 @@ static void hso_serial_common_free(struct hso_serial *serial) static int hso_serial_common_create(struct hso_serial *serial, int num_urbs, int rx_size, int tx_size) { - int minor; int i; tty_port_init(&serial->port); - minor = get_free_serial_index(); - if (minor < 0) - goto exit; + if (obtain_minor(serial)) + goto exit2; /* register our minor number */ serial->parent->dev = tty_port_register_device_attr(&serial->port, - tty_drv, minor, &serial->parent->interface->dev, + tty_drv, serial->minor, &serial->parent->interface->dev, serial->parent, hso_serial_dev_groups); + if (IS_ERR(serial->parent->dev)) { + release_minor(serial); + goto exit2; + } - /* fill in specific data for later use */ - serial->minor = minor; serial->magic = HSO_SERIAL_MAGIC; spin_lock_init(&serial->serial_lock); serial->num_rx_urbs = num_urbs; @@ -2323,6 +2323,7 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs, return 0; exit: hso_serial_tty_unregister(serial); +exit2: hso_serial_common_free(serial); return -1; } @@ -2366,7 +2367,7 @@ static int remove_net_device(struct hso_device *hso_dev) } /* Frees our network device */ -static void hso_free_net_device(struct hso_device *hso_dev) +static void hso_free_net_device(struct hso_device *hso_dev, bool bailout) { int i; struct hso_net *hso_net = dev2net(hso_dev); @@ -2389,7 +2390,7 @@ static void hso_free_net_device(struct hso_device *hso_dev) kfree(hso_net->mux_bulk_tx_buf); hso_net->mux_bulk_tx_buf = NULL; - if (hso_net->net) + if (hso_net->net && !bailout) free_netdev(hso_net->net); kfree(hso_dev); @@ -2509,7 +2510,7 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface, hso_net_init); if (!net) { dev_err(&interface->dev, "Unable to create ethernet device\n"); - goto exit; + goto err_hso_dev; } hso_net = netdev_priv(net); @@ -2522,13 +2523,13 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface, USB_DIR_IN); if (!hso_net->in_endp) { dev_err(&interface->dev, "Can't find BULK IN endpoint\n"); - goto exit; + goto err_net; } hso_net->out_endp = hso_get_ep(interface, USB_ENDPOINT_XFER_BULK, USB_DIR_OUT); if (!hso_net->out_endp) { dev_err(&interface->dev, "Can't find BULK OUT endpoint\n"); - goto exit; + goto err_net; } SET_NETDEV_DEV(net, &interface->dev); SET_NETDEV_DEVTYPE(net, &hso_type); @@ -2537,18 +2538,18 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface, for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) { hso_net->mux_bulk_rx_urb_pool[i] = usb_alloc_urb(0, GFP_KERNEL); if (!hso_net->mux_bulk_rx_urb_pool[i]) - goto exit; + goto err_mux_bulk_rx; hso_net->mux_bulk_rx_buf_pool[i] = kzalloc(MUX_BULK_RX_BUF_SIZE, GFP_KERNEL); if (!hso_net->mux_bulk_rx_buf_pool[i]) - goto exit; + goto err_mux_bulk_rx; } hso_net->mux_bulk_tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!hso_net->mux_bulk_tx_urb) - goto exit; + goto err_mux_bulk_rx; hso_net->mux_bulk_tx_buf = kzalloc(MUX_BULK_TX_BUF_SIZE, GFP_KERNEL); if (!hso_net->mux_bulk_tx_buf) - goto exit; + goto err_free_tx_urb; add_net_device(hso_dev); @@ -2556,7 +2557,7 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface, result = register_netdev(net); if (result) { dev_err(&interface->dev, "Failed to register device\n"); - goto exit; + goto err_free_tx_buf; } hso_log_port(hso_dev); @@ -2564,8 +2565,21 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface, hso_create_rfkill(hso_dev, interface); return hso_dev; -exit: - hso_free_net_device(hso_dev); + +err_free_tx_buf: + remove_net_device(hso_dev); + kfree(hso_net->mux_bulk_tx_buf); +err_free_tx_urb: + usb_free_urb(hso_net->mux_bulk_tx_urb); +err_mux_bulk_rx: + for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) { + usb_free_urb(hso_net->mux_bulk_rx_urb_pool[i]); + kfree(hso_net->mux_bulk_rx_buf_pool[i]); + } +err_net: + free_netdev(net); +err_hso_dev: + kfree(hso_dev); return NULL; } @@ -2634,14 +2648,18 @@ static struct hso_device *hso_create_bulk_serial_device( */ if (serial->tiocmget) { tiocmget = serial->tiocmget; + tiocmget->endp = hso_get_ep(interface, + USB_ENDPOINT_XFER_INT, + USB_DIR_IN); + if (!tiocmget->endp) { + dev_err(&interface->dev, "Failed to find INT IN ep\n"); + goto exit; + } + tiocmget->urb = usb_alloc_urb(0, GFP_KERNEL); if (tiocmget->urb) { mutex_init(&tiocmget->mutex); init_waitqueue_head(&tiocmget->waitq); - tiocmget->endp = hso_get_ep( - interface, - USB_ENDPOINT_XFER_INT, - USB_DIR_IN); } else hso_free_tiomget(serial); } @@ -2669,9 +2687,6 @@ static struct hso_device *hso_create_bulk_serial_device( serial->write_data = hso_std_serial_write_data; - /* and record this serial */ - set_serial_by_index(serial->minor, serial); - /* setup the proc dirs and files if needed */ hso_log_port(hso_dev); @@ -2728,9 +2743,6 @@ struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface, serial->shared_int->ref_count++; mutex_unlock(&serial->shared_int->shared_int_lock); - /* and record this serial */ - set_serial_by_index(serial->minor, serial); - /* setup the proc dirs and files if needed */ hso_log_port(hso_dev); @@ -2807,6 +2819,12 @@ static int hso_get_config_data(struct usb_interface *interface) return -EIO; } + /* check if we have a valid interface */ + if (if_num > 16) { + kfree(config_data); + return -EINVAL; + } + switch (config_data[if_num]) { case 0x0: result = 0; @@ -2877,10 +2895,18 @@ static int hso_probe(struct usb_interface *interface, /* Get the interface/port specification from either driver_info or from * the device itself */ - if (id->driver_info) + if (id->driver_info) { + /* if_num is controlled by the device, driver_info is a 0 terminated + * array. Make sure, the access is in bounds! */ + for (i = 0; i <= if_num; ++i) + if (((u32 *)(id->driver_info))[i] == 0) + goto exit; port_spec = ((u32 *)(id->driver_info))[if_num]; - else + } else { port_spec = hso_get_config_data(interface); + if (port_spec < 0) + goto exit; + } /* Check if we need to switch to alt interfaces prior to port * configuration */ @@ -3100,8 +3126,7 @@ static void hso_free_interface(struct usb_interface *interface) cancel_work_sync(&serial_table[i]->async_put_intf); cancel_work_sync(&serial_table[i]->async_get_intf); hso_serial_tty_unregister(serial); - kref_put(&serial_table[i]->ref, hso_serial_ref_free); - set_serial_by_index(i, NULL); + kref_put(&serial->parent->ref, hso_serial_ref_free); } } @@ -3119,7 +3144,7 @@ static void hso_free_interface(struct usb_interface *interface) rfkill_unregister(rfk); rfkill_destroy(rfk); } - hso_free_net_device(network_table[i]); + hso_free_net_device(network_table[i], false); } } } diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index 7275761a1177ca9cda569bfc734bb6de3e1558e1..3d71f17163902af71bb7485de78f5a3ed7af304c 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c @@ -140,7 +140,6 @@ struct ipheth_device { struct usb_device *udev; struct usb_interface *intf; struct net_device *net; - struct sk_buff *tx_skb; struct urb *tx_urb; struct urb *rx_urb; unsigned char *tx_buf; @@ -230,6 +229,7 @@ static void ipheth_rcvbulk_callback(struct urb *urb) case -ENOENT: case -ECONNRESET: case -ESHUTDOWN: + case -EPROTO: return; case 0: break; @@ -281,7 +281,6 @@ static void ipheth_sndbulk_callback(struct urb *urb) dev_err(&dev->intf->dev, "%s: urb status: %d\n", __func__, status); - dev_kfree_skb_irq(dev->tx_skb); if (status == 0) netif_wake_queue(dev->net); else @@ -423,7 +422,7 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net) if (skb->len > IPHETH_BUF_SIZE) { WARN(1, "%s: skb too large: %d bytes\n", __func__, skb->len); dev->net->stats.tx_dropped++; - dev_kfree_skb_irq(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -438,18 +437,18 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net) dev); dev->tx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + netif_stop_queue(net); retval = usb_submit_urb(dev->tx_urb, GFP_ATOMIC); if (retval) { dev_err(&dev->intf->dev, "%s: usb_submit_urb: %d\n", __func__, retval); dev->net->stats.tx_errors++; - dev_kfree_skb_irq(skb); + dev_kfree_skb_any(skb); + netif_wake_queue(net); } else { - dev->tx_skb = skb; - dev->net->stats.tx_packets++; dev->net->stats.tx_bytes += skb->len; - netif_stop_queue(net); + dev_consume_skb_any(skb); } return NETDEV_TX_OK; diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c index bd2ba365902883f1b4d66c8d83650d81f1813652..93ee909c79395fa54103f42c0c543ab31aa5bd04 100644 --- a/drivers/net/usb/kalmia.c +++ b/drivers/net/usb/kalmia.c @@ -69,8 +69,8 @@ kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 init_msg_len, init_msg, init_msg_len, &act_len, KALMIA_USB_TIMEOUT); if (status != 0) { netdev_err(dev->net, - "Error sending init packet. Status %i, length %i\n", - status, act_len); + "Error sending init packet. Status %i\n", + status); return status; } else if (act_len != init_msg_len) { @@ -87,8 +87,8 @@ kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 init_msg_len, if (status != 0) netdev_err(dev->net, - "Error receiving init result. Status %i, length %i\n", - status, act_len); + "Error receiving init result. Status %i\n", + status); else if (act_len != expected_len) netdev_err(dev->net, "Unexpected init result length: %i\n", act_len); @@ -117,16 +117,16 @@ kalmia_init_and_get_ethernet_addr(struct usbnet *dev, u8 *ethernet_addr) status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_1), usb_buf, 24); if (status != 0) - return status; + goto out; memcpy(usb_buf, init_msg_2, 12); status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_2), usb_buf, 28); if (status != 0) - return status; + goto out; memcpy(ethernet_addr, usb_buf + 10, ETH_ALEN); - +out: kfree(usb_buf); return status; } diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index c3c9ba44e2a12a038e012a3374977b5a6189e3f1..42715520c070614c308d58b535277aba77a85c83 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -522,7 +522,7 @@ static int lan78xx_read_stats(struct lan78xx_net *dev, } } else { netdev_warn(dev->net, - "Failed to read stat ret = 0x%x", ret); + "Failed to read stat ret = %d", ret); } kfree(stats); @@ -1278,8 +1278,11 @@ static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb) netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata); lan78xx_defer_kevent(dev, EVENT_LINK_RESET); - if (dev->domain_data.phyirq > 0) + if (dev->domain_data.phyirq > 0) { + local_irq_disable(); generic_handle_irq(dev->domain_data.phyirq); + local_irq_enable(); + } } else netdev_warn(dev->net, "unexpected interrupt: 0x%08x\n", intdata); @@ -1820,6 +1823,7 @@ static int lan78xx_mdio_init(struct lan78xx_net *dev) dev->mdiobus->read = lan78xx_mdiobus_read; dev->mdiobus->write = lan78xx_mdiobus_write; dev->mdiobus->name = "lan78xx-mdiobus"; + dev->mdiobus->parent = &dev->udev->dev; snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d", dev->udev->bus->busnum, dev->udev->devnum); @@ -2335,6 +2339,10 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p) ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo); ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi); + /* Added to support MAC address changes */ + ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo); + ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_); + return 0; } @@ -2728,11 +2736,6 @@ static int lan78xx_stop(struct net_device *net) return 0; } -static int lan78xx_linearize(struct sk_buff *skb) -{ - return skb_linearize(skb); -} - static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev, struct sk_buff *skb, gfp_t flags) { @@ -2743,8 +2746,10 @@ static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev, return NULL; } - if (lan78xx_linearize(skb) < 0) + if (skb_linearize(skb)) { + dev_kfree_skb_any(skb); return NULL; + } tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_; @@ -2943,6 +2948,11 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf) int i; ret = lan78xx_get_endpoints(dev, intf); + if (ret) { + netdev_warn(dev->net, "lan78xx_get_endpoints failed: %d\n", + ret); + return ret; + } dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL); @@ -3792,10 +3802,14 @@ static int lan78xx_probe(struct usb_interface *intf, /* driver requires remote-wakeup capability during autosuspend. */ intf->needs_remote_wakeup = 1; + ret = lan78xx_phy_init(dev); + if (ret < 0) + goto out4; + ret = register_netdev(netdev); if (ret != 0) { netif_err(dev, probe, netdev, "couldn't register the device\n"); - goto out3; + goto out5; } usb_set_intfdata(intf, dev); @@ -3808,14 +3822,12 @@ static int lan78xx_probe(struct usb_interface *intf, pm_runtime_set_autosuspend_delay(&udev->dev, DEFAULT_AUTOSUSPEND_DELAY); - ret = lan78xx_phy_init(dev); - if (ret < 0) - goto out4; - return 0; +out5: + phy_disconnect(netdev->phydev); out4: - unregister_netdev(netdev); + usb_free_urb(dev->urb_intr); out3: lan78xx_unbind(dev, intf); out2: diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c index 5a47e5510ca8243eed8f12cc86a4ec70ac42658e..c0f52a622964f66934313ec027660abd57d8f9e8 100644 --- a/drivers/net/usb/mcs7830.c +++ b/drivers/net/usb/mcs7830.c @@ -121,8 +121,16 @@ static const char driver_name[] = "MOSCHIP usb-ethernet driver"; static int mcs7830_get_reg(struct usbnet *dev, u16 index, u16 size, void *data) { - return usbnet_read_cmd(dev, MCS7830_RD_BREQ, MCS7830_RD_BMREQ, - 0x0000, index, data, size); + int ret; + + ret = usbnet_read_cmd(dev, MCS7830_RD_BREQ, MCS7830_RD_BMREQ, + 0x0000, index, data, size); + if (ret < 0) + return ret; + else if (ret < size) + return -ENODATA; + + return ret; } static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, const void *data) diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index f4247b275e0901a54ebf0b36d67adaf708bf6950..b7a0df95d4b0fe760b2f8a4976b5e4fc06003668 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -285,7 +285,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int val) static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata) { int i; - __u8 tmp; + __u8 tmp = 0; __le16 retdatai; int ret; diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c index 6fe59373cba9b8bd1afce514265171dbbd43aa9e..8bab7306e5a68785bfcc4c4803ca4de2e369d27d 100644 --- a/drivers/net/usb/plusb.c +++ b/drivers/net/usb/plusb.c @@ -69,9 +69,7 @@ static inline int pl_vendor_req(struct usbnet *dev, u8 req, u8 val, u8 index) { - return usbnet_read_cmd(dev, req, - USB_DIR_IN | USB_TYPE_VENDOR | - USB_RECIP_DEVICE, + return usbnet_write_cmd(dev, req, USB_TYPE_VENDOR | USB_RECIP_DEVICE, val, index, NULL, 0); } diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 72a55b6b421184c4fb69411ba3d0150e6c337a88..ad5feb20dd0f0041fa515a3725922f29384db240 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -63,6 +63,7 @@ enum qmi_wwan_flags { enum qmi_wwan_quirks { QMI_WWAN_QUIRK_DTR = 1 << 0, /* needs "set DTR" request */ + QMI_WWAN_QUIRK_QUECTEL_DYNCFG = 1 << 1, /* check num. endpoints */ }; struct qmimux_hdr { @@ -123,6 +124,7 @@ static void qmimux_setup(struct net_device *dev) dev->addr_len = 0; dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; dev->netdev_ops = &qmimux_netdev_ops; + dev->mtu = 1500; dev->needs_free_netdev = true; } @@ -151,32 +153,39 @@ static bool qmimux_has_slaves(struct usbnet *dev) static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { - unsigned int len, offset = sizeof(struct qmimux_hdr); + unsigned int len, offset = 0, pad_len, pkt_len; struct qmimux_hdr *hdr; struct net_device *net; struct sk_buff *skbn; + u8 qmimux_hdr_sz = sizeof(*hdr); - while (offset < skb->len) { - hdr = (struct qmimux_hdr *)skb->data; + while (offset + qmimux_hdr_sz < skb->len) { + hdr = (struct qmimux_hdr *)(skb->data + offset); len = be16_to_cpu(hdr->pkt_len); /* drop the packet, bogus length */ - if (offset + len > skb->len) + if (offset + len + qmimux_hdr_sz > skb->len) return 0; /* control packet, we do not know what to do */ if (hdr->pad & 0x80) goto skip; + /* extract padding length and check for valid length info */ + pad_len = hdr->pad & 0x3f; + if (len == 0 || pad_len >= len) + goto skip; + pkt_len = len - pad_len; + net = qmimux_find_dev(dev, hdr->mux_id); if (!net) goto skip; - skbn = netdev_alloc_skb(net, len); + skbn = netdev_alloc_skb(net, pkt_len); if (!skbn) return 0; skbn->dev = net; - switch (skb->data[offset] & 0xf0) { + switch (skb->data[offset + qmimux_hdr_sz] & 0xf0) { case 0x40: skbn->protocol = htons(ETH_P_IP); break; @@ -185,15 +194,16 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb) break; default: /* not ip - do not know what to do */ + kfree_skb(skbn); goto skip; } - skb_put_data(skbn, skb->data + offset, len); + skb_put_data(skbn, skb->data + offset + qmimux_hdr_sz, pkt_len); if (netif_rx(skbn) != NET_RX_SUCCESS) return 0; skip: - offset += len + sizeof(struct qmimux_hdr); + offset += len + qmimux_hdr_sz; } return 1; } @@ -238,13 +248,14 @@ static int qmimux_register_device(struct net_device *real_dev, u8 mux_id) return err; } -static void qmimux_unregister_device(struct net_device *dev) +static void qmimux_unregister_device(struct net_device *dev, + struct list_head *head) { struct qmimux_priv *priv = netdev_priv(dev); struct net_device *real_dev = priv->real_dev; netdev_upper_dev_unlink(real_dev, dev); - unregister_netdevice(dev); + unregister_netdevice_queue(dev, head); /* Get rid of the reference to real_dev */ dev_put(real_dev); @@ -353,8 +364,8 @@ static ssize_t add_mux_store(struct device *d, struct device_attribute *attr, c if (kstrtou8(buf, 0, &mux_id)) return -EINVAL; - /* mux_id [1 - 0x7f] range empirically found */ - if (mux_id < 1 || mux_id > 0x7f) + /* mux_id [1 - 254] for compatibility with ip(8) and the rmnet driver */ + if (mux_id < 1 || mux_id > 254) return -EINVAL; if (!rtnl_trylock()) @@ -415,7 +426,7 @@ static ssize_t del_mux_store(struct device *d, struct device_attribute *attr, c ret = -EINVAL; goto err; } - qmimux_unregister_device(del_dev); + qmimux_unregister_device(del_dev, NULL); if (!qmimux_has_slaves(dev)) info->flags &= ~QMI_WWAN_FLAG_MUX; @@ -843,6 +854,16 @@ static const struct driver_info qmi_wwan_info_quirk_dtr = { .data = QMI_WWAN_QUIRK_DTR, }; +static const struct driver_info qmi_wwan_info_quirk_quectel_dyncfg = { + .description = "WWAN/QMI device", + .flags = FLAG_WWAN | FLAG_SEND_ZLP, + .bind = qmi_wwan_bind, + .unbind = qmi_wwan_unbind, + .manage_power = qmi_wwan_manage_power, + .rx_fixup = qmi_wwan_rx_fixup, + .data = QMI_WWAN_QUIRK_DTR | QMI_WWAN_QUIRK_QUECTEL_DYNCFG, +}; + #define HUAWEI_VENDOR_ID 0x12D1 /* map QMI/wwan function by a fixed interface number */ @@ -863,6 +884,15 @@ static const struct driver_info qmi_wwan_info_quirk_dtr = { #define QMI_GOBI_DEVICE(vend, prod) \ QMI_FIXED_INTF(vend, prod, 0) +/* Quectel does not use fixed interface numbers on at least some of their + * devices. We need to check the number of endpoints to ensure that we bind to + * the correct interface. + */ +#define QMI_QUIRK_QUECTEL_DYNCFG(vend, prod) \ + USB_DEVICE_AND_INTERFACE_INFO(vend, prod, USB_CLASS_VENDOR_SPEC, \ + USB_SUBCLASS_VENDOR_SPEC, 0xff), \ + .driver_info = (unsigned long)&qmi_wwan_info_quirk_quectel_dyncfg + static const struct usb_device_id products[] = { /* 1. CDC ECM like devices match on the control interface */ { /* Huawei E392, E398 and possibly others sharing both device id and more... */ @@ -967,13 +997,9 @@ static const struct usb_device_id products[] = { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7), .driver_info = (unsigned long)&qmi_wwan_info, }, - { /* Quectel EP06/EG06/EM06 */ - USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0306, - USB_CLASS_VENDOR_SPEC, - USB_SUBCLASS_VENDOR_SPEC, - 0xff), - .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr, - }, + {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ + {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */ + {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */ /* 3. Combined interface devices matching on interface number */ {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */ @@ -1113,10 +1139,18 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x0846, 0x68d3, 8)}, /* Netgear Aircard 779S */ {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ + {QMI_FIXED_INTF(0x1435, 0x0918, 3)}, /* Wistron NeWeb D16Q1 */ + {QMI_FIXED_INTF(0x1435, 0x0918, 4)}, /* Wistron NeWeb D16Q1 */ + {QMI_FIXED_INTF(0x1435, 0x0918, 5)}, /* Wistron NeWeb D16Q1 */ + {QMI_FIXED_INTF(0x1435, 0x3185, 4)}, /* Wistron NeWeb M18Q5 */ + {QMI_FIXED_INTF(0x1435, 0xd111, 4)}, /* M9615A DM11-1 D51QC */ {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */ {QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */ {QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */ + {QMI_FIXED_INTF(0x1435, 0xd182, 4)}, /* Wistron NeWeb D18 */ + {QMI_FIXED_INTF(0x1435, 0xd182, 5)}, /* Wistron NeWeb D18 */ {QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */ + {QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)}, /* Fibocom NL668 series */ {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */ {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */ {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */ @@ -1170,6 +1204,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x19d2, 0x0265, 4)}, /* ONDA MT8205 4G LTE */ {QMI_FIXED_INTF(0x19d2, 0x0284, 4)}, /* ZTE MF880 */ {QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */ + {QMI_FIXED_INTF(0x19d2, 0x0396, 3)}, /* ZTE ZM8620 */ {QMI_FIXED_INTF(0x19d2, 0x0412, 4)}, /* Telewell TW-LTE 4G */ {QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */ {QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */ @@ -1190,16 +1225,20 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x19d2, 0x1425, 2)}, {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */ {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */ + {QMI_FIXED_INTF(0x19d2, 0x1432, 3)}, /* ZTE ME3620 */ {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ + {QMI_FIXED_INTF(0x2001, 0x7e16, 3)}, /* D-Link DWM-221 */ {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */ + {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */ {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */ + {QMI_FIXED_INTF(0x2020, 0x2060, 4)}, /* BroadMobi BM818 */ {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */ - {QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354 */ - {QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC7304/MC7354 */ + {QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354, WP76xx */ + {QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 10)},/* Sierra Wireless MC7304/MC7354 */ {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */ {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */ @@ -1229,6 +1268,9 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1260, 2)}, /* Telit LE910Cx */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1261, 2)}, /* Telit LE910Cx */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */ {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */ {QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */ {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */ @@ -1245,6 +1287,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */ {QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */ {QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/ + {QMI_QUIRK_SET_DTR(0x1e2d, 0x00b0, 4)}, /* Cinterion CLS8 */ {QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ @@ -1255,14 +1298,17 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */ + {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/ {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */ {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ {QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */ - {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */ {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ + {QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */ + {QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */ + {QMI_FIXED_INTF(0x0489, 0xe0b5, 0)}, /* Foxconn T77W968 LTE with eSIM support*/ /* 4. Gobi 1000 devices */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ @@ -1338,24 +1384,12 @@ static bool quectel_ec20_detected(struct usb_interface *intf) return false; } -static bool quectel_ep06_diag_detected(struct usb_interface *intf) -{ - struct usb_device *dev = interface_to_usbdev(intf); - struct usb_interface_descriptor intf_desc = intf->cur_altsetting->desc; - - if (le16_to_cpu(dev->descriptor.idVendor) == 0x2c7c && - le16_to_cpu(dev->descriptor.idProduct) == 0x0306 && - intf_desc.bNumEndpoints == 2) - return true; - - return false; -} - static int qmi_wwan_probe(struct usb_interface *intf, const struct usb_device_id *prod) { struct usb_device_id *id = (struct usb_device_id *)prod; struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc; + const struct driver_info *info; /* Workaround to enable dynamic IDs. This disables usbnet * blacklisting functionality. Which, if required, can be @@ -1385,14 +1419,18 @@ static int qmi_wwan_probe(struct usb_interface *intf, return -ENODEV; } - /* Quectel EP06/EM06/EG06 supports dynamic interface configuration, so + /* Several Quectel modems supports dynamic interface configuration, so * we need to match on class/subclass/protocol. These values are * identical for the diagnostic- and QMI-interface, but bNumEndpoints is * different. Ignore the current interface if the number of endpoints - * the number for the diag interface (two). + * equals the number for the diag interface (two). */ - if (quectel_ep06_diag_detected(intf)) - return -ENODEV; + info = (void *)id->driver_info; + + if (info->data & QMI_WWAN_QUIRK_QUECTEL_DYNCFG) { + if (desc->bNumEndpoints == 2) + return -ENODEV; + } return usbnet_probe(intf, id); } @@ -1403,6 +1441,7 @@ static void qmi_wwan_disconnect(struct usb_interface *intf) struct qmi_wwan_state *info; struct list_head *iter; struct net_device *ldev; + LIST_HEAD(list); /* called twice if separate control and data intf */ if (!dev) @@ -1415,8 +1454,9 @@ static void qmi_wwan_disconnect(struct usb_interface *intf) } rcu_read_lock(); netdev_for_each_upper_dev_rcu(dev->net, ldev, iter) - qmimux_unregister_device(ldev); + qmimux_unregister_device(ldev, &list); rcu_read_unlock(); + unregister_netdevice_many(&list); rtnl_unlock(); info->flags &= ~QMI_WWAN_FLAG_MUX; } diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index f1b5201cc32075da27cf14d94b781c9f58c16189..f28fc023c8b74b65c64cce3037af2afd044fb9f8 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -788,8 +788,11 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0), RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, value, index, tmp, size, 500); + if (ret < 0) + memset(data, 0xff, size); + else + memcpy(data, tmp, size); - memcpy(data, tmp, size); kfree(tmp); return ret; @@ -1383,7 +1386,9 @@ static void intr_callback(struct urb *urb) "Stop submitting intr, status %d\n", status); return; case -EOVERFLOW: - netif_info(tp, intr, tp->netdev, "intr status -EOVERFLOW\n"); + if (net_ratelimit()) + netif_info(tp, intr, tp->netdev, + "intr status -EOVERFLOW\n"); goto resubmit; /* -EPIPE: should clear the halt */ default: @@ -4471,10 +4476,9 @@ static int rtl8152_reset_resume(struct usb_interface *intf) struct r8152 *tp = usb_get_intfdata(intf); clear_bit(SELECTIVE_SUSPEND, &tp->flags); - mutex_lock(&tp->control); tp->rtl_ops.init(tp); queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0); - mutex_unlock(&tp->control); + set_ethernet_addr(tp); return rtl8152_resume(intf); } @@ -5337,6 +5341,7 @@ static const struct usb_device_id rtl8152_table[] = { {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)}, {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c)}, {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)}, + {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0xa387)}, {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)}, {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)}, {REALTEK_USB_DEVICE(VENDOR_ID_TPLINK, 0x0601)}, diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index ec287c9741e833eb2af7b2878ee08ff1941227b0..dea72fa0495857175e1c5b1c556677c44f0b3438 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -102,7 +102,9 @@ static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index, ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, &buf, 4); - if (unlikely(ret < 0)) { + if (unlikely(ret < 4)) { + ret = ret < 0 ? ret : -ENODATA; + netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n", index, ret); return ret; @@ -1495,7 +1497,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf) ret = smsc75xx_wait_ready(dev, 0); if (ret < 0) { netdev_warn(dev->net, "device not ready in smsc75xx_bind\n"); - return ret; + goto free_pdata; } smsc75xx_init_mac_address(dev); @@ -1504,7 +1506,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf) ret = smsc75xx_reset(dev); if (ret < 0) { netdev_warn(dev->net, "smsc75xx_reset error %d\n", ret); - return ret; + goto cancel_work; } dev->net->netdev_ops = &smsc75xx_netdev_ops; @@ -1514,6 +1516,13 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf) dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; dev->net->max_mtu = MAX_SINGLE_PACKET_SIZE; return 0; + +cancel_work: + cancel_work_sync(&pdata->set_multicast); +free_pdata: + kfree(pdata); + dev->data[0] = 0; + return ret; } static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf) @@ -1523,7 +1532,6 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf) cancel_work_sync(&pdata->set_multicast); netif_dbg(dev, ifdown, dev->net, "free pdata\n"); kfree(pdata); - pdata = NULL; dev->data[0] = 0; } } diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 262e7a3c23cb67fbfd66b81ed0d26af0f0480d84..6e971628bb50a6d5018d0ba1bb2295dd25a6d3b1 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -1295,6 +1295,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) dev->net->features |= NETIF_F_RXCSUM; dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM; + set_bit(EVENT_NO_IP_ALIGN, &dev->flags); smsc95xx_init_mac_address(dev); @@ -1321,6 +1322,8 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) dev->net->ethtool_ops = &smsc95xx_ethtool_ops; dev->net->flags |= IFF_MULTICAST; dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM; + dev->net->min_mtu = ETH_MIN_MTU; + dev->net->max_mtu = ETH_DATA_LEN; dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; pdata->dev = dev; @@ -1598,6 +1601,8 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message) return ret; } + cancel_delayed_work_sync(&pdata->carrier_check); + if (pdata->suspend_flags) { netdev_warn(dev->net, "error during last resume\n"); pdata->suspend_flags = 0; @@ -1840,6 +1845,11 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message) */ if (ret && PMSG_IS_AUTO(message)) usbnet_resume(intf); + + if (ret) + schedule_delayed_work(&pdata->carrier_check, + CARRIER_CHECK_DELAY); + return ret; } diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c index 6ac232e52bf7c683832aee40dc1d0fabcd9a6202..83640628c47dd7a201adda20de572c3698dbbeee 100644 --- a/drivers/net/usb/sr9700.c +++ b/drivers/net/usb/sr9700.c @@ -410,7 +410,7 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb) /* ignore the CRC length */ len = (skb->data[1] | (skb->data[2] << 8)) - 4; - if (len > ETH_FRAME_LEN) + if (len > ETH_FRAME_LEN || len > skb->len) return 0; /* the last packet of current skb */ diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c index 35f39f23d88144195b8f007035f207d38b48c1fd..a5ff7df10505b312cb97e5bd4e49bc0a3a3d3797 100644 --- a/drivers/net/usb/sr9800.c +++ b/drivers/net/usb/sr9800.c @@ -336,7 +336,7 @@ static void sr_set_multicast(struct net_device *net) static int sr_mdio_read(struct net_device *net, int phy_id, int loc) { struct usbnet *dev = netdev_priv(net); - __le16 res; + __le16 res = 0; mutex_lock(&dev->phy_mutex); sr_set_sw_mii(dev); @@ -737,7 +737,9 @@ static int sr9800_bind(struct usbnet *dev, struct usb_interface *intf) data->eeprom_len = SR9800_EEPROM_LEN; - usbnet_get_endpoints(dev, intf); + ret = usbnet_get_endpoints(dev, intf); + if (ret) + goto out; /* LED Setting Rule : * AABB:CCDD diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 770aa624147f170abd330d7820bc995ef9a712e6..e5c7a5c05109014a3c6beda1b02fcb8dc63978e5 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -112,6 +112,11 @@ int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf) int intr = 0; e = alt->endpoint + ep; + + /* ignore endpoints which cannot transfer data */ + if (!usb_endpoint_maxp(&e->desc)) + continue; + switch (e->desc.bmAttributes) { case USB_ENDPOINT_XFER_INT: if (!usb_endpoint_dir_in(&e->desc)) @@ -351,6 +356,8 @@ void usbnet_update_max_qlen(struct usbnet *dev) { enum usb_device_speed speed = dev->udev->speed; + if (!dev->rx_urb_size || !dev->hard_mtu) + goto insanity; switch (speed) { case USB_SPEED_HIGH: dev->rx_qlen = MAX_QUEUE_MEMORY / dev->rx_urb_size; @@ -367,6 +374,7 @@ void usbnet_update_max_qlen(struct usbnet *dev) dev->tx_qlen = 5 * MAX_QUEUE_MEMORY / dev->hard_mtu; break; default: +insanity: dev->rx_qlen = dev->tx_qlen = 4; } } @@ -506,6 +514,7 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) if (netif_running (dev->net) && netif_device_present (dev->net) && + test_bit(EVENT_DEV_OPEN, &dev->flags) && !test_bit (EVENT_RX_HALT, &dev->flags) && !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) { switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) { @@ -1431,6 +1440,11 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, spin_unlock_irqrestore(&dev->txq.lock, flags); goto drop; } + if (netif_queue_stopped(net)) { + usb_autopm_put_interface_async(dev->intf); + spin_unlock_irqrestore(&dev->txq.lock, flags); + goto drop; + } #ifdef CONFIG_PM /* if this triggers the device is still a sleep */ @@ -1770,6 +1784,11 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) if (!dev->rx_urb_size) dev->rx_urb_size = dev->hard_mtu; dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1); + if (dev->maxpacket == 0) { + /* that is a broken device */ + status = -ENODEV; + goto out4; + } /* let userspace know we have a random address */ if (ether_addr_equal(net->dev_addr, node_id)) @@ -1979,7 +1998,7 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, cmd, reqtype, value, index, size); if (size) { - buf = kmalloc(size, GFP_KERNEL); + buf = kmalloc(size, GFP_NOIO); if (!buf) goto out; } @@ -2011,7 +2030,7 @@ static int __usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, cmd, reqtype, value, index, size); if (data) { - buf = kmemdup(data, size, GFP_KERNEL); + buf = kmemdup(data, size, GFP_NOIO); if (!buf) goto out; } else { @@ -2112,7 +2131,7 @@ static void usbnet_async_cmd_cb(struct urb *urb) int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype, u16 value, u16 index, const void *data, u16 size) { - struct usb_ctrlrequest *req = NULL; + struct usb_ctrlrequest *req; struct urb *urb; int err = -ENOMEM; void *buf = NULL; @@ -2130,7 +2149,7 @@ int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype, if (!buf) { netdev_err(dev->net, "Error allocating buffer" " in %s!\n", __func__); - goto fail_free; + goto fail_free_urb; } } @@ -2154,14 +2173,21 @@ int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype, if (err < 0) { netdev_err(dev->net, "Error submitting the control" " message: status=%d\n", err); - goto fail_free; + goto fail_free_all; } return 0; +fail_free_all: + kfree(req); fail_free_buf: kfree(buf); -fail_free: - kfree(req); + /* + * avoid a double free + * needed because the flag can be set only + * after filling the URB + */ + urb->transfer_flags = 0; +fail_free_urb: usb_free_urb(urb); fail: return err; diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 41a00cd76955bf39047fafc0a1641eb347c6ee21..fe9a75a6128bd8a8f592c909b1c40504fe3ff126 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -152,9 +152,10 @@ static void __veth_xdp_flush(struct veth_rq *rq) { /* Write ptr_ring before reading rx_notify_masked */ smp_mb(); - if (!rq->rx_notify_masked) { - rq->rx_notify_masked = true; - napi_schedule(&rq->xdp_napi); + if (!READ_ONCE(rq->rx_notify_masked) && + napi_schedule_prep(&rq->xdp_napi)) { + WRITE_ONCE(rq->rx_notify_masked, true); + __napi_schedule(&rq->xdp_napi); } } @@ -187,7 +188,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) rcu_read_lock(); rcv = rcu_dereference(priv->peer); - if (unlikely(!rcv)) { + if (unlikely(!rcv) || !pskb_may_pull(skb, ETH_HLEN)) { kfree_skb(skb); goto drop; } @@ -197,8 +198,6 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) if (rxq < rcv->real_num_rx_queues) { rq = &rcv_priv->rq[rxq]; rcv_xdp = rcu_access_pointer(rq->xdp_prog); - if (rcv_xdp) - skb_record_rx_queue(skb, rxq); } if (likely(veth_forward_skb(rcv, skb, rq, rcv_xdp) == NET_RX_SUCCESS)) { @@ -625,8 +624,10 @@ static int veth_poll(struct napi_struct *napi, int budget) /* Write rx_notify_masked before reading ptr_ring */ smp_store_mb(rq->rx_notify_masked, false); if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) { - rq->rx_notify_masked = true; - napi_schedule(&rq->xdp_napi); + if (napi_schedule_prep(&rq->xdp_napi)) { + WRITE_ONCE(rq->rx_notify_masked, true); + __napi_schedule(&rq->xdp_napi); + } } } diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index ddfa3f24204c71e66f3d9bfeea5b257404ffc577..5b1622fef57b030798a6c4bcc8ee53c0aa413e4b 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -57,6 +57,8 @@ module_param(napi_tx, bool, 0644); #define VIRTIO_XDP_TX BIT(0) #define VIRTIO_XDP_REDIR BIT(1) +#define VIRTIO_XDP_FLAG BIT(0) + /* RX packet size EWMA. The average packet size is used to determine the packet * buffer size when refilling RX rings. As the entire RX ring may be refilled * at once, the weight is chosen so that the EWMA will be insensitive to short- @@ -70,7 +72,8 @@ static const unsigned long guest_offloads[] = { VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, VIRTIO_NET_F_GUEST_ECN, - VIRTIO_NET_F_GUEST_UFO + VIRTIO_NET_F_GUEST_UFO, + VIRTIO_NET_F_GUEST_CSUM }; struct virtnet_stat_desc { @@ -250,6 +253,21 @@ struct padded_vnet_hdr { char padding[4]; }; +static bool is_xdp_frame(void *ptr) +{ + return (unsigned long)ptr & VIRTIO_XDP_FLAG; +} + +static void *xdp_to_ptr(struct xdp_frame *ptr) +{ + return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG); +} + +static struct xdp_frame *ptr_to_xdp(void *ptr) +{ + return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG); +} + /* Converting between virtqueue no. and kernel tx/rx queue no. * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq */ @@ -364,7 +382,8 @@ static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx) static struct sk_buff *page_to_skb(struct virtnet_info *vi, struct receive_queue *rq, struct page *page, unsigned int offset, - unsigned int len, unsigned int truesize) + unsigned int len, unsigned int truesize, + bool hdr_valid, unsigned int metasize) { struct sk_buff *skb; struct virtio_net_hdr_mrg_rxbuf *hdr; @@ -386,17 +405,28 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, else hdr_padded_len = sizeof(struct padded_vnet_hdr); - memcpy(hdr, p, hdr_len); + /* hdr_valid means no XDP, so we can copy the vnet header */ + if (hdr_valid) + memcpy(hdr, p, hdr_len); len -= hdr_len; offset += hdr_padded_len; p += hdr_padded_len; - copy = len; - if (copy > skb_tailroom(skb)) - copy = skb_tailroom(skb); + /* Copy all frame if it fits skb->head, otherwise + * we let virtio_net_hdr_to_skb() and GRO pull headers as needed. + */ + if (len <= skb_tailroom(skb)) + copy = len; + else + copy = ETH_HLEN + metasize; skb_put_data(skb, p, copy); + if (metasize) { + __skb_pull(skb, metasize); + skb_metadata_set(skb, metasize); + } + len -= copy; offset += copy; @@ -442,10 +472,6 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, struct virtio_net_hdr_mrg_rxbuf *hdr; int err; - /* virtqueue want to use data area in-front of packet */ - if (unlikely(xdpf->metasize > 0)) - return -EOPNOTSUPP; - if (unlikely(xdpf->headroom < vi->hdr_len)) return -EOVERFLOW; @@ -458,7 +484,8 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, sg_init_one(sq->sg, xdpf->data, xdpf->len); - err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdpf, GFP_ATOMIC); + err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf), + GFP_ATOMIC); if (unlikely(err)) return -ENOSPC; /* Caller handle free/refcnt */ @@ -478,36 +505,47 @@ static int virtnet_xdp_xmit(struct net_device *dev, { struct virtnet_info *vi = netdev_priv(dev); struct receive_queue *rq = vi->rq; - struct xdp_frame *xdpf_sent; struct bpf_prog *xdp_prog; struct send_queue *sq; unsigned int len; + int packets = 0; + int bytes = 0; int drops = 0; int kicks = 0; int ret, err; + void *ptr; int i; - sq = virtnet_xdp_sq(vi); - - if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) { - ret = -EINVAL; - drops = n; - goto out; - } - /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this * indicate XDP resources have been successfully allocated. */ xdp_prog = rcu_dereference(rq->xdp_prog); - if (!xdp_prog) { - ret = -ENXIO; + if (!xdp_prog) + return -ENXIO; + + sq = virtnet_xdp_sq(vi); + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) { + ret = -EINVAL; drops = n; goto out; } /* Free up any pending old buffers before queueing new ones. */ - while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) - xdp_return_frame(xdpf_sent); + while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { + if (likely(is_xdp_frame(ptr))) { + struct xdp_frame *frame = ptr_to_xdp(ptr); + + bytes += frame->len; + xdp_return_frame(frame); + } else { + struct sk_buff *skb = ptr; + + bytes += skb->len; + napi_consume_skb(skb, false); + } + packets++; + } for (i = 0; i < n; i++) { struct xdp_frame *xdpf = frames[i]; @@ -526,6 +564,8 @@ static int virtnet_xdp_xmit(struct net_device *dev, } out: u64_stats_update_begin(&sq->stats.syncp); + sq->stats.bytes += bytes; + sq->stats.packets += packets; sq->stats.xdp_tx += n; sq->stats.xdp_tx_drops += drops; sq->stats.kicks += kicks; @@ -622,6 +662,7 @@ static struct sk_buff *receive_small(struct net_device *dev, unsigned int delta = 0; struct page *xdp_page; int err; + unsigned int metasize = 0; len -= vi->hdr_len; stats->bytes += len; @@ -661,8 +702,8 @@ static struct sk_buff *receive_small(struct net_device *dev, xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len; xdp.data = xdp.data_hard_start + xdp_headroom; - xdp_set_data_meta_invalid(&xdp); xdp.data_end = xdp.data + len; + xdp.data_meta = xdp.data; xdp.rxq = &rq->xdp_rxq; orig_data = xdp.data; act = bpf_prog_run_xdp(xdp_prog, &xdp); @@ -673,6 +714,7 @@ static struct sk_buff *receive_small(struct net_device *dev, /* Recalculate length in case bpf program changed it */ delta = orig_data - xdp.data; len = xdp.data_end - xdp.data; + metasize = xdp.data - xdp.data_meta; break; case XDP_TX: stats->xdp_tx++; @@ -718,6 +760,9 @@ static struct sk_buff *receive_small(struct net_device *dev, memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len); } /* keep zeroed vnet hdr since packet was changed by bpf */ + if (metasize) + skb_metadata_set(skb, metasize); + err: return skb; @@ -738,7 +783,8 @@ static struct sk_buff *receive_big(struct net_device *dev, struct virtnet_rq_stats *stats) { struct page *page = buf; - struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); + struct sk_buff *skb = + page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, true, 0); stats->bytes += len - vi->hdr_len; if (unlikely(!skb)) @@ -770,6 +816,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, unsigned int truesize; unsigned int headroom = mergeable_ctx_to_headroom(ctx); int err; + unsigned int metasize = 0; head_skb = NULL; stats->bytes += len - vi->hdr_len; @@ -816,8 +863,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, data = page_address(xdp_page) + offset; xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len; xdp.data = data + vi->hdr_len; - xdp_set_data_meta_invalid(&xdp); xdp.data_end = xdp.data + (len - vi->hdr_len); + xdp.data_meta = xdp.data; xdp.rxq = &rq->xdp_rxq; act = bpf_prog_run_xdp(xdp_prog, &xdp); @@ -825,23 +872,27 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, switch (act) { case XDP_PASS: + metasize = xdp.data - xdp.data_meta; + /* recalculate offset to account for any header - * adjustments. Note other cases do not build an - * skb and avoid using offset + * adjustments and minus the metasize to copy the + * metadata in page_to_skb(). Note other cases do not + * build an skb and avoid using offset */ - offset = xdp.data - - page_address(xdp_page) - vi->hdr_len; + offset = xdp.data - page_address(xdp_page) - + vi->hdr_len - metasize; - /* recalculate len if xdp.data or xdp.data_end were - * adjusted + /* recalculate len if xdp.data, xdp.data_end or + * xdp.data_meta were adjusted */ - len = xdp.data_end - xdp.data + vi->hdr_len; + len = xdp.data_end - xdp.data + vi->hdr_len + metasize; /* We can only create skb based on xdp_page. */ if (unlikely(xdp_page != page)) { rcu_read_unlock(); put_page(page); - head_skb = page_to_skb(vi, rq, xdp_page, - offset, len, PAGE_SIZE); + head_skb = page_to_skb(vi, rq, xdp_page, offset, + len, PAGE_SIZE, false, + metasize); return head_skb; } break; @@ -897,7 +948,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, goto err_skb; } - head_skb = page_to_skb(vi, rq, page, offset, len, truesize); + head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog, + metasize); curr_skb = head_skb; if (unlikely(!curr_skb)) @@ -1324,20 +1376,28 @@ static int virtnet_receive(struct receive_queue *rq, int budget, return stats.packets; } -static void free_old_xmit_skbs(struct send_queue *sq) +static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) { - struct sk_buff *skb; unsigned int len; unsigned int packets = 0; unsigned int bytes = 0; + void *ptr; - while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { - pr_debug("Sent skb %p\n", skb); + while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { + if (likely(!is_xdp_frame(ptr))) { + struct sk_buff *skb = ptr; - bytes += skb->len; - packets++; + pr_debug("Sent skb %p\n", skb); + + bytes += skb->len; + napi_consume_skb(skb, in_napi); + } else { + struct xdp_frame *frame = ptr_to_xdp(ptr); - dev_consume_skb_any(skb); + bytes += frame->len; + xdp_return_frame(frame); + } + packets++; } /* Avoid overhead when no packets have been processed @@ -1352,6 +1412,16 @@ static void free_old_xmit_skbs(struct send_queue *sq) u64_stats_update_end(&sq->stats.syncp); } +static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) +{ + if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) + return false; + else if (q < vi->curr_queue_pairs) + return true; + else + return false; +} + static void virtnet_poll_cleantx(struct receive_queue *rq) { struct virtnet_info *vi = rq->vq->vdev->priv; @@ -1359,11 +1429,11 @@ static void virtnet_poll_cleantx(struct receive_queue *rq) struct send_queue *sq = &vi->sq[index]; struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); - if (!sq->napi.weight) + if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index)) return; if (__netif_tx_trylock(txq)) { - free_old_xmit_skbs(sq); + free_old_xmit_skbs(sq, true); __netif_tx_unlock(txq); } @@ -1436,13 +1506,41 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget) { struct send_queue *sq = container_of(napi, struct send_queue, napi); struct virtnet_info *vi = sq->vq->vdev->priv; - struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq)); + unsigned int index = vq2txq(sq->vq); + struct netdev_queue *txq; + int opaque; + bool done; + + if (unlikely(is_xdp_raw_buffer_queue(vi, index))) { + /* We don't need to enable cb for XDP */ + napi_complete_done(napi, 0); + return 0; + } + txq = netdev_get_tx_queue(vi->dev, index); __netif_tx_lock(txq, raw_smp_processor_id()); - free_old_xmit_skbs(sq); + virtqueue_disable_cb(sq->vq); + free_old_xmit_skbs(sq, true); + + opaque = virtqueue_enable_cb_prepare(sq->vq); + + done = napi_complete_done(napi, 0); + + if (!done) + virtqueue_disable_cb(sq->vq); + __netif_tx_unlock(txq); - virtqueue_napi_complete(napi, sq->vq, 0); + if (done) { + if (unlikely(virtqueue_poll(sq->vq, opaque))) { + if (napi_schedule_prep(napi)) { + __netif_tx_lock(txq, raw_smp_processor_id()); + virtqueue_disable_cb(sq->vq); + __netif_tx_unlock(txq); + __napi_schedule(napi); + } + } + } if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) netif_tx_wake_queue(txq); @@ -1474,7 +1572,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) if (virtio_net_hdr_from_skb(skb, &hdr->hdr, virtio_is_little_endian(vi->vdev), false, 0)) - BUG(); + return -EPROTO; if (vi->mergeable_rx_bufs) hdr->num_buffers = 0; @@ -1508,7 +1606,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) bool use_napi = sq->napi.weight; /* Free up any pending old buffers before queueing new ones. */ - free_old_xmit_skbs(sq); + free_old_xmit_skbs(sq, false); if (use_napi && kick) virtqueue_enable_cb_delayed(sq->vq); @@ -1551,7 +1649,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) if (!use_napi && unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { /* More just got used, free them then recheck. */ - free_old_xmit_skbs(sq); + free_old_xmit_skbs(sq, false); if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { netif_start_subqueue(dev, qnum); virtqueue_disable_cb(sq->vq); @@ -2013,14 +2111,16 @@ static int virtnet_set_channels(struct net_device *dev, get_online_cpus(); err = _virtnet_set_queues(vi, queue_pairs); - if (!err) { - netif_set_real_num_tx_queues(dev, queue_pairs); - netif_set_real_num_rx_queues(dev, queue_pairs); - - virtnet_set_affinity(vi); + if (err) { + put_online_cpus(); + goto err; } + virtnet_set_affinity(vi); put_online_cpus(); + netif_set_real_num_tx_queues(dev, queue_pairs); + netif_set_real_num_rx_queues(dev, queue_pairs); + err: return err; } @@ -2213,7 +2313,6 @@ static const struct ethtool_ops virtnet_ethtool_ops = { static void virtnet_freeze_down(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; - int i; /* Make sure no work handler is accessing the device */ flush_work(&vi->config_work); @@ -2221,14 +2320,8 @@ static void virtnet_freeze_down(struct virtio_device *vdev) netif_tx_lock_bh(vi->dev); netif_device_detach(vi->dev); netif_tx_unlock_bh(vi->dev); - cancel_delayed_work_sync(&vi->refill); - - if (netif_running(vi->dev)) { - for (i = 0; i < vi->max_queue_pairs; i++) { - napi_disable(&vi->rq[i].napi); - virtnet_napi_tx_disable(&vi->sq[i].napi); - } - } + if (netif_running(vi->dev)) + virtnet_close(vi->dev); } static int init_vqs(struct virtnet_info *vi); @@ -2236,7 +2329,7 @@ static int init_vqs(struct virtnet_info *vi); static int virtnet_restore_up(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; - int err, i; + int err; err = init_vqs(vi); if (err) @@ -2245,15 +2338,9 @@ static int virtnet_restore_up(struct virtio_device *vdev) virtio_device_ready(vdev); if (netif_running(vi->dev)) { - for (i = 0; i < vi->curr_queue_pairs; i++) - if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) - schedule_delayed_work(&vi->refill, 0); - - for (i = 0; i < vi->max_queue_pairs; i++) { - virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); - virtnet_napi_tx_enable(vi, vi->sq[i].vq, - &vi->sq[i].napi); - } + err = virtnet_open(vi->dev); + if (err) + return err; } netif_tx_lock_bh(vi->dev); @@ -2285,9 +2372,6 @@ static int virtnet_clear_guest_offloads(struct virtnet_info *vi) if (!vi->guest_offloads) return 0; - if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM)) - offloads = 1ULL << VIRTIO_NET_F_GUEST_CSUM; - return virtnet_set_guest_offloads(vi, offloads); } @@ -2297,8 +2381,6 @@ static int virtnet_restore_guest_offloads(struct virtnet_info *vi) if (!vi->guest_offloads) return 0; - if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM)) - offloads |= 1ULL << VIRTIO_NET_F_GUEST_CSUM; return virtnet_set_guest_offloads(vi, offloads); } @@ -2316,8 +2398,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || - virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO))) { - NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO, disable LRO first"); + virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || + virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) { + NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO/CSUM, disable LRO/CSUM first"); return -EOPNOTSUPP; } @@ -2344,6 +2427,10 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, return -ENOMEM; } + old_prog = rtnl_dereference(vi->rq[0].xdp_prog); + if (!prog && !old_prog) + return 0; + if (prog) { prog = bpf_prog_add(prog, vi->max_queue_pairs - 1); if (IS_ERR(prog)) @@ -2351,36 +2438,62 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, } /* Make sure NAPI is not using any XDP TX queues for RX. */ - if (netif_running(dev)) - for (i = 0; i < vi->max_queue_pairs; i++) + if (netif_running(dev)) { + for (i = 0; i < vi->max_queue_pairs; i++) { napi_disable(&vi->rq[i].napi); + virtnet_napi_tx_disable(&vi->sq[i].napi); + } + } + + if (!prog) { + for (i = 0; i < vi->max_queue_pairs; i++) { + rcu_assign_pointer(vi->rq[i].xdp_prog, prog); + if (i == 0) + virtnet_restore_guest_offloads(vi); + } + synchronize_net(); + } - netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); err = _virtnet_set_queues(vi, curr_qp + xdp_qp); if (err) goto err; + netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); vi->xdp_queue_pairs = xdp_qp; - for (i = 0; i < vi->max_queue_pairs; i++) { - old_prog = rtnl_dereference(vi->rq[i].xdp_prog); - rcu_assign_pointer(vi->rq[i].xdp_prog, prog); - if (i == 0) { - if (!old_prog) + if (prog) { + for (i = 0; i < vi->max_queue_pairs; i++) { + rcu_assign_pointer(vi->rq[i].xdp_prog, prog); + if (i == 0 && !old_prog) virtnet_clear_guest_offloads(vi); - if (!prog) - virtnet_restore_guest_offloads(vi); } + } + + for (i = 0; i < vi->max_queue_pairs; i++) { if (old_prog) bpf_prog_put(old_prog); - if (netif_running(dev)) + if (netif_running(dev)) { virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); + virtnet_napi_tx_enable(vi, vi->sq[i].vq, + &vi->sq[i].napi); + } } return 0; err: - for (i = 0; i < vi->max_queue_pairs; i++) - virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); + if (!prog) { + virtnet_clear_guest_offloads(vi); + for (i = 0; i < vi->max_queue_pairs; i++) + rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog); + } + + if (netif_running(dev)) { + for (i = 0; i < vi->max_queue_pairs; i++) { + virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); + virtnet_napi_tx_enable(vi, vi->sq[i].vq, + &vi->sq[i].napi); + } + } if (prog) bpf_prog_sub(prog, vi->max_queue_pairs - 1); return err; @@ -2536,16 +2649,6 @@ static void free_receive_page_frags(struct virtnet_info *vi) put_page(vi->rq[i].alloc_frag.page); } -static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) -{ - if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) - return false; - else if (q < vi->curr_queue_pairs) - return true; - else - return false; -} - static void free_unused_bufs(struct virtnet_info *vi) { void *buf; @@ -2554,10 +2657,10 @@ static void free_unused_bufs(struct virtnet_info *vi) for (i = 0; i < vi->max_queue_pairs; i++) { struct virtqueue *vq = vi->sq[i].vq; while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { - if (!is_xdp_raw_buffer_queue(vi, i)) + if (!is_xdp_frame(buf)) dev_kfree_skb(buf); else - put_page(virt_to_head_page(buf)); + xdp_return_frame(ptr_to_xdp(buf)); } } @@ -2607,10 +2710,11 @@ static int virtnet_find_vqs(struct virtnet_info *vi) { vq_callback_t **callbacks; struct virtqueue **vqs; - int ret = -ENOMEM; - int i, total_vqs; const char **names; + int ret = -ENOMEM; + int total_vqs; bool *ctx; + u16 i; /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by @@ -2647,8 +2751,8 @@ static int virtnet_find_vqs(struct virtnet_info *vi) for (i = 0; i < vi->max_queue_pairs; i++) { callbacks[rxq2vq(i)] = skb_recv_done; callbacks[txq2vq(i)] = skb_xmit_done; - sprintf(vi->rq[i].name, "input.%d", i); - sprintf(vi->sq[i].name, "output.%d", i); + sprintf(vi->rq[i].name, "input.%u", i); + sprintf(vi->sq[i].name, "output.%u", i); names[rxq2vq(i)] = vi->rq[i].name; names[txq2vq(i)] = vi->sq[i].name; if (ctx) @@ -3000,22 +3104,28 @@ static int virtnet_probe(struct virtio_device *vdev) } } - err = register_netdev(dev); + /* serialize netdev register + virtio_device_ready() with ndo_open() */ + rtnl_lock(); + + err = register_netdevice(dev); if (err) { pr_debug("virtio_net: registering device failed\n"); + rtnl_unlock(); goto free_failover; } virtio_device_ready(vdev); + _virtnet_set_queues(vi, vi->curr_queue_pairs); + + rtnl_unlock(); + err = virtnet_cpu_notif_add(vi); if (err) { pr_debug("virtio_net: registering cpu notifier failed\n"); goto free_unregister_netdev; } - virtnet_set_queues(vi, vi->curr_queue_pairs); - /* Assume link up if device can't report link status, otherwise get link status from config. */ netif_carrier_off(dev); @@ -3105,8 +3215,11 @@ static __maybe_unused int virtnet_restore(struct virtio_device *vdev) virtnet_set_queues(vi, vi->curr_queue_pairs); err = virtnet_cpu_notif_add(vi); - if (err) + if (err) { + virtnet_freeze_down(vdev); + remove_vq_common(vi); return err; + } return 0; } diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index e454dfc9ad8f2151ddeaf2a2621033ccdce0e1c6..f0a7e10d4e29b5c00cf1bbace73f3e869b500f5c 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -595,6 +595,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, if (dma_mapping_error(&adapter->pdev->dev, rbi->dma_addr)) { dev_kfree_skb_any(rbi->skb); + rbi->skb = NULL; rq->stats.rx_buf_alloc_failure++; break; } @@ -619,6 +620,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, if (dma_mapping_error(&adapter->pdev->dev, rbi->dma_addr)) { put_page(rbi->page); + rbi->page = NULL; rq->stats.rx_buf_alloc_failure++; break; } @@ -1584,6 +1586,10 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq, u32 i, ring_idx; struct Vmxnet3_RxDesc *rxd; + /* ring has already been cleaned up */ + if (!rq->rx_ring[0].base) + return; + for (ring_idx = 0; ring_idx < 2; ring_idx++) { for (i = 0; i < rq->rx_ring[ring_idx].size; i++) { #ifdef __BIG_ENDIAN_BITFIELD diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index f93547f257fbbe22ccb8da8d7eca6c6230b28d40..6501ab4990d387fcdbd2285b5b49b9eff71d5fa6 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -169,25 +169,31 @@ static int vrf_ip6_local_out(struct net *net, struct sock *sk, static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb, struct net_device *dev) { - const struct ipv6hdr *iph = ipv6_hdr(skb); + const struct ipv6hdr *iph; struct net *net = dev_net(skb->dev); - struct flowi6 fl6 = { - /* needed to match OIF rule */ - .flowi6_oif = dev->ifindex, - .flowi6_iif = LOOPBACK_IFINDEX, - .daddr = iph->daddr, - .saddr = iph->saddr, - .flowlabel = ip6_flowinfo(iph), - .flowi6_mark = skb->mark, - .flowi6_proto = iph->nexthdr, - .flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF, - }; + struct flowi6 fl6; int ret = NET_XMIT_DROP; struct dst_entry *dst; struct dst_entry *dst_null = &net->ipv6.ip6_null_entry->dst; - dst = ip6_route_output(net, NULL, &fl6); - if (dst == dst_null) + if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr))) + goto err; + + iph = ipv6_hdr(skb); + + memset(&fl6, 0, sizeof(fl6)); + /* needed to match OIF rule */ + fl6.flowi6_oif = dev->ifindex; + fl6.flowi6_iif = LOOPBACK_IFINDEX; + fl6.daddr = iph->daddr; + fl6.saddr = iph->saddr; + fl6.flowlabel = ip6_flowinfo(iph); + fl6.flowi6_mark = skb->mark; + fl6.flowi6_proto = iph->nexthdr; + fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF; + + dst = ip6_dst_lookup_flow(net, NULL, &fl6, NULL); + if (IS_ERR(dst) || dst == dst_null) goto err; skb_dst_drop(skb); @@ -204,6 +210,7 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb, /* strip the ethernet header added for pass through VRF device */ __skb_pull(skb, skb_network_offset(skb)); + memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); ret = vrf_ip6_local_out(net, skb->sk, skb); if (unlikely(net_xmit_eval(ret))) dev->stats.tx_errors++; @@ -241,21 +248,27 @@ static int vrf_ip_local_out(struct net *net, struct sock *sk, static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb, struct net_device *vrf_dev) { - struct iphdr *ip4h = ip_hdr(skb); + struct iphdr *ip4h; int ret = NET_XMIT_DROP; - struct flowi4 fl4 = { - /* needed to match OIF rule */ - .flowi4_oif = vrf_dev->ifindex, - .flowi4_iif = LOOPBACK_IFINDEX, - .flowi4_tos = RT_TOS(ip4h->tos), - .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF, - .flowi4_proto = ip4h->protocol, - .daddr = ip4h->daddr, - .saddr = ip4h->saddr, - }; + struct flowi4 fl4; struct net *net = dev_net(vrf_dev); struct rtable *rt; + if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr))) + goto err; + + ip4h = ip_hdr(skb); + + memset(&fl4, 0, sizeof(fl4)); + /* needed to match OIF rule */ + fl4.flowi4_oif = vrf_dev->ifindex; + fl4.flowi4_iif = LOOPBACK_IFINDEX; + fl4.flowi4_tos = RT_TOS(ip4h->tos); + fl4.flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF; + fl4.flowi4_proto = ip4h->protocol; + fl4.daddr = ip4h->daddr; + fl4.saddr = ip4h->saddr; + rt = ip_route_output_flow(net, &fl4, NULL); if (IS_ERR(rt)) goto err; @@ -279,6 +292,7 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb, RT_SCOPE_LINK); } + memset(IPCB(skb), 0, sizeof(*IPCB(skb))); ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb); if (unlikely(net_xmit_eval(ret))) vrf_dev->stats.tx_errors++; @@ -324,8 +338,7 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev) return ret; } -static int vrf_finish_direct(struct net *net, struct sock *sk, - struct sk_buff *skb) +static void vrf_finish_direct(struct sk_buff *skb) { struct net_device *vrf_dev = skb->dev; @@ -344,7 +357,8 @@ static int vrf_finish_direct(struct net *net, struct sock *sk, skb_pull(skb, ETH_HLEN); } - return 1; + /* reset skb device */ + nf_reset(skb); } #if IS_ENABLED(CONFIG_IPV6) @@ -423,15 +437,41 @@ static struct sk_buff *vrf_ip6_out_redirect(struct net_device *vrf_dev, return skb; } +static int vrf_output6_direct_finish(struct net *net, struct sock *sk, + struct sk_buff *skb) +{ + vrf_finish_direct(skb); + + return vrf_ip6_local_out(net, sk, skb); +} + static int vrf_output6_direct(struct net *net, struct sock *sk, struct sk_buff *skb) { + int err = 1; + skb->protocol = htons(ETH_P_IPV6); - return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, - net, sk, skb, NULL, skb->dev, - vrf_finish_direct, - !(IPCB(skb)->flags & IPSKB_REROUTED)); + if (!(IPCB(skb)->flags & IPSKB_REROUTED)) + err = nf_hook(NFPROTO_IPV6, NF_INET_POST_ROUTING, net, sk, skb, + NULL, skb->dev, vrf_output6_direct_finish); + + if (likely(err == 1)) + vrf_finish_direct(skb); + + return err; +} + +static int vrf_ip6_out_direct_finish(struct net *net, struct sock *sk, + struct sk_buff *skb) +{ + int err; + + err = vrf_output6_direct(net, sk, skb); + if (likely(err == 1)) + err = vrf_ip6_local_out(net, sk, skb); + + return err; } static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev, @@ -444,18 +484,15 @@ static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev, skb->dev = vrf_dev; err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, - skb, NULL, vrf_dev, vrf_output6_direct); + skb, NULL, vrf_dev, vrf_ip6_out_direct_finish); if (likely(err == 1)) err = vrf_output6_direct(net, sk, skb); - /* reset skb device */ if (likely(err == 1)) - nf_reset(skb); - else - skb = NULL; + return skb; - return skb; + return NULL; } static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev, @@ -466,7 +503,8 @@ static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev, if (rt6_need_strict(&ipv6_hdr(skb)->daddr)) return skb; - if (qdisc_tx_is_default(vrf_dev)) + if (qdisc_tx_is_default(vrf_dev) || + IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) return vrf_ip6_out_direct(vrf_dev, sk, skb); return vrf_ip6_out_redirect(vrf_dev, skb); @@ -636,15 +674,41 @@ static struct sk_buff *vrf_ip_out_redirect(struct net_device *vrf_dev, return skb; } +static int vrf_output_direct_finish(struct net *net, struct sock *sk, + struct sk_buff *skb) +{ + vrf_finish_direct(skb); + + return vrf_ip_local_out(net, sk, skb); +} + static int vrf_output_direct(struct net *net, struct sock *sk, struct sk_buff *skb) { + int err = 1; + skb->protocol = htons(ETH_P_IP); - return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, - net, sk, skb, NULL, skb->dev, - vrf_finish_direct, - !(IPCB(skb)->flags & IPSKB_REROUTED)); + if (!(IPCB(skb)->flags & IPSKB_REROUTED)) + err = nf_hook(NFPROTO_IPV4, NF_INET_POST_ROUTING, net, sk, skb, + NULL, skb->dev, vrf_output_direct_finish); + + if (likely(err == 1)) + vrf_finish_direct(skb); + + return err; +} + +static int vrf_ip_out_direct_finish(struct net *net, struct sock *sk, + struct sk_buff *skb) +{ + int err; + + err = vrf_output_direct(net, sk, skb); + if (likely(err == 1)) + err = vrf_ip_local_out(net, sk, skb); + + return err; } static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev, @@ -657,18 +721,15 @@ static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev, skb->dev = vrf_dev; err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk, - skb, NULL, vrf_dev, vrf_output_direct); + skb, NULL, vrf_dev, vrf_ip_out_direct_finish); if (likely(err == 1)) err = vrf_output_direct(net, sk, skb); - /* reset skb device */ if (likely(err == 1)) - nf_reset(skb); - else - skb = NULL; + return skb; - return skb; + return NULL; } static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev, @@ -680,7 +741,8 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev, ipv4_is_lbcast(ip_hdr(skb)->daddr)) return skb; - if (qdisc_tx_is_default(vrf_dev)) + if (qdisc_tx_is_default(vrf_dev) || + IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) return vrf_ip_out_direct(vrf_dev, sk, skb); return vrf_ip_out_redirect(vrf_dev, skb); @@ -874,6 +936,7 @@ static const struct net_device_ops vrf_netdev_ops = { .ndo_init = vrf_dev_init, .ndo_uninit = vrf_dev_uninit, .ndo_start_xmit = vrf_xmit, + .ndo_set_mac_address = eth_mac_addr, .ndo_get_stats64 = vrf_get_stats64, .ndo_add_slave = vrf_add_slave, .ndo_del_slave = vrf_del_slave, @@ -1146,7 +1209,8 @@ static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it) struct sk_buff *skb; int err; - if (family == AF_INET6 && !ipv6_mod_enabled()) + if ((family == AF_INET6 || family == RTNL_FAMILY_IP6MR) && + !ipv6_mod_enabled()) return 0; skb = nlmsg_new(vrf_fib_rule_nl_size(), GFP_KERNEL); @@ -1215,8 +1279,19 @@ static int vrf_add_fib_rules(const struct net_device *dev) goto ipmr_err; #endif +#if IS_ENABLED(CONFIG_IPV6_MROUTE_MULTIPLE_TABLES) + err = vrf_fib_rule(dev, RTNL_FAMILY_IP6MR, true); + if (err < 0) + goto ip6mr_err; +#endif + return 0; +#if IS_ENABLED(CONFIG_IPV6_MROUTE_MULTIPLE_TABLES) +ip6mr_err: + vrf_fib_rule(dev, RTNL_FAMILY_IPMR, false); +#endif + #if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES) ipmr_err: vrf_fib_rule(dev, AF_INET6, false); @@ -1262,6 +1337,15 @@ static void vrf_setup(struct net_device *dev) /* default to no qdisc; user can add if desired */ dev->priv_flags |= IFF_NO_QUEUE; + dev->priv_flags |= IFF_NO_RX_HANDLER; + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + + /* VRF devices do not care about MTU, but if the MTU is set + * too low then the ipv4 and ipv6 protocols are disabled + * which breaks networking. + */ + dev->min_mtu = IPV6_MIN_MTU; + dev->max_mtu = ETH_MAX_MTU; } static int vrf_validate(struct nlattr *tb[], struct nlattr *data[], diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 27bd586b94b0a01f1f99e0c01d8df85c1757b960..7ea96096dbd15c92bf5c6476ee03c68b7610ae27 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -975,6 +975,7 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, for (h = 0; h < FDB_HASH_SIZE; ++h) { struct vxlan_fdb *f; + rcu_read_lock(); hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) { struct vxlan_rdst *rd; @@ -987,12 +988,15 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, cb->nlh->nlmsg_seq, RTM_NEWNEIGH, NLM_F_MULTI, rd); - if (err < 0) + if (err < 0) { + rcu_read_unlock(); goto out; + } skip: *idx += 1; } } + rcu_read_unlock(); } out: return err; @@ -1010,6 +1014,10 @@ static bool vxlan_snoop(struct net_device *dev, struct vxlan_fdb *f; u32 ifindex = 0; + /* Ignore packets from invalid src-address */ + if (!is_valid_ether_addr(src_mac)) + return true; + #if IS_ENABLED(CONFIG_IPV6) if (src_ip->sa.sa_family == AF_INET6 && (ipv6_addr_type(&src_ip->sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL)) @@ -1469,6 +1477,14 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) goto drop; } + rcu_read_lock(); + + if (unlikely(!(vxlan->dev->flags & IFF_UP))) { + rcu_read_unlock(); + atomic_long_inc(&vxlan->dev->rx_dropped); + goto drop; + } + stats = this_cpu_ptr(vxlan->dev->tstats); u64_stats_update_begin(&stats->syncp); stats->rx_packets++; @@ -1476,6 +1492,9 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) u64_stats_update_end(&stats->syncp); gro_cells_receive(&vxlan->gro_cells, skb); + + rcu_read_unlock(); + return 0; drop: @@ -1600,6 +1619,10 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request, ns_olen = request->len - skb_network_offset(request) - sizeof(struct ipv6hdr) - sizeof(*ns); for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) { + if (!ns->opt[i + 1]) { + kfree_skb(reply); + return NULL; + } if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) { daddr = ns->opt + i + sizeof(struct nd_opt_hdr); break; @@ -1663,6 +1686,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni) struct neighbour *n; struct nd_msg *msg; + rcu_read_lock(); in6_dev = __in6_dev_get(dev); if (!in6_dev) goto out; @@ -1714,6 +1738,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni) } out: + rcu_read_unlock(); consume_skb(skb); return NETDEV_TX_OK; } @@ -1952,7 +1977,6 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, bool use_cache = ip_tunnel_dst_cache_usable(skb, info); struct dst_entry *ndst; struct flowi6 fl6; - int err; if (!sock6) return ERR_PTR(-EIO); @@ -1975,10 +1999,9 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, fl6.fl6_dport = dport; fl6.fl6_sport = sport; - err = ipv6_stub->ipv6_dst_lookup(vxlan->net, - sock6->sock->sk, - &ndst, &fl6); - if (unlikely(err < 0)) { + ndst = ipv6_stub->ipv6_dst_lookup_flow(vxlan->net, sock6->sock->sk, + &fl6, NULL); + if (unlikely(IS_ERR(ndst))) { netdev_dbg(dev, "no route to %pI6\n", daddr); return ERR_PTR(-ENETUNREACH); } @@ -2003,7 +2026,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, struct pcpu_sw_netstats *tx_stats, *rx_stats; union vxlan_addr loopback; union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip; - struct net_device *dev = skb->dev; + struct net_device *dev; int len = skb->len; tx_stats = this_cpu_ptr(src_vxlan->dev->tstats); @@ -2023,9 +2046,15 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, #endif } + rcu_read_lock(); + dev = skb->dev; + if (unlikely(!(dev->flags & IFF_UP))) { + kfree_skb(skb); + goto drop; + } + if (dst_vxlan->cfg.flags & VXLAN_F_LEARN) - vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source, 0, - vni); + vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, 0, vni); u64_stats_update_begin(&tx_stats->syncp); tx_stats->tx_packets++; @@ -2038,8 +2067,10 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, rx_stats->rx_bytes += len; u64_stats_update_end(&rx_stats->syncp); } else { +drop: dev->stats.rx_dropped++; } + rcu_read_unlock(); } static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev, @@ -2155,9 +2186,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, vni = tunnel_id_to_key32(info->key.tun_id); ifindex = 0; dst_cache = &info->dst_cache; - if (info->options_len && - info->key.tun_flags & TUNNEL_VXLAN_OPT) + if (info->key.tun_flags & TUNNEL_VXLAN_OPT) { + if (info->options_len < sizeof(*md)) + goto drop; md = ip_tunnel_info_opts(info); + } ttl = info->key.ttl; tos = info->key.tos; label = info->key.label; @@ -2430,10 +2463,19 @@ static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan, /* Setup stats when device is created */ static int vxlan_init(struct net_device *dev) { + struct vxlan_dev *vxlan = netdev_priv(dev); + int err; + dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); if (!dev->tstats) return -ENOMEM; + err = gro_cells_init(&vxlan->gro_cells, dev); + if (err) { + free_percpu(dev->tstats); + return err; + } + return 0; } @@ -2452,6 +2494,8 @@ static void vxlan_uninit(struct net_device *dev) { struct vxlan_dev *vxlan = netdev_priv(dev); + gro_cells_destroy(&vxlan->gro_cells); + vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni); free_percpu(dev->tstats); @@ -2689,8 +2733,6 @@ static void vxlan_setup(struct net_device *dev) vxlan->dev = dev; - gro_cells_init(&vxlan->gro_cells, dev); - for (h = 0; h < FDB_HASH_SIZE; ++h) INIT_HLIST_HEAD(&vxlan->fdb_head[h]); } @@ -3144,6 +3186,9 @@ static void vxlan_config_apply(struct net_device *dev, dev->gso_max_segs = lowerdev->gso_max_segs; needed_headroom = lowerdev->hard_header_len; + needed_headroom += lowerdev->needed_headroom; + + dev->needed_tailroom = lowerdev->needed_tailroom; max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); @@ -3190,6 +3235,7 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev, struct vxlan_net *vn = net_generic(net, vxlan_net_id); struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_fdb *f = NULL; + bool unregister = false; int err; err = vxlan_dev_configure(net, dev, conf, false, extack); @@ -3215,12 +3261,11 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev, err = register_netdevice(dev); if (err) goto errout; + unregister = true; err = rtnl_configure_link(dev, NULL); - if (err) { - unregister_netdevice(dev); + if (err) goto errout; - } /* notify default fdb entry */ if (f) @@ -3228,9 +3273,16 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev, list_add(&vxlan->next, &vn->vxlan_list); return 0; + errout: + /* unregister_netdevice() destroys the default FDB entry with deletion + * notification. But the addition notification was not sent yet, so + * destroy the entry by hand here. + */ if (f) vxlan_fdb_destroy(vxlan, f, false); + if (unregister) + unregister_netdevice(dev); return err; } @@ -3466,7 +3518,6 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], struct vxlan_rdst *dst = &vxlan->default_dst; struct vxlan_rdst old_dst; struct vxlan_config conf; - struct vxlan_fdb *f = NULL; int err; err = vxlan_nl2conf(tb, data, @@ -3492,19 +3543,19 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], old_dst.remote_ifindex, 0); if (!vxlan_addr_any(&dst->remote_ip)) { - err = vxlan_fdb_create(vxlan, all_zeros_mac, + err = vxlan_fdb_update(vxlan, all_zeros_mac, &dst->remote_ip, NUD_REACHABLE | NUD_PERMANENT, + NLM_F_APPEND | NLM_F_CREATE, vxlan->cfg.dst_port, dst->remote_vni, dst->remote_vni, dst->remote_ifindex, - NTF_SELF, &f); + NTF_SELF); if (err) { spin_unlock_bh(&vxlan->hash_lock); return err; } - vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH); } spin_unlock_bh(&vxlan->hash_lock); } @@ -3518,7 +3569,6 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head) vxlan_flush(vxlan, true); - gro_cells_destroy(&vxlan->gro_cells); list_del(&vxlan->next); unregister_netdevice_queue(dev, head); } @@ -3778,10 +3828,8 @@ static void vxlan_destroy_tunnels(struct net *net, struct list_head *head) /* If vxlan->dev is in the same netns, it has already been added * to the list by the previous loop. */ - if (!net_eq(dev_net(vxlan->dev), net)) { - gro_cells_destroy(&vxlan->gro_cells); + if (!net_eq(dev_net(vxlan->dev), net)) unregister_netdevice_queue(vxlan->dev, head); - } } for (h = 0; h < PORT_HASH_SIZE; ++h) diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index 5f0366a125e2605b8c78f54c11004cd5ea37a533..0212f576a838ca2b4e798dc001345a449fa9ef80 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -1113,7 +1113,6 @@ static int ucc_hdlc_probe(struct platform_device *pdev) if (register_hdlc_device(dev)) { ret = -ENOBUFS; pr_err("ucc_hdlc: unable to register hdlc device\n"); - free_netdev(dev); goto free_dev; } diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index ab8b3cbbb205cc42f2bea15259bee0b77c2ff055..85844f26547dd2443842c360f12c141e1a9a798e 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c @@ -386,11 +386,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, } for (opt = data; len; len -= opt[1], opt += opt[1]) { - if (len < 2 || len < opt[1]) { - dev->stats.rx_errors++; - kfree(out); - return; /* bad packet, drop silently */ - } + if (len < 2 || opt[1] < 2 || len < opt[1]) + goto err_out; if (pid == PID_LCP) switch (opt[0]) { @@ -398,6 +395,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, continue; /* MRU always OK and > 1500 bytes? */ case LCP_OPTION_ACCM: /* async control character map */ + if (opt[1] < sizeof(valid_accm)) + goto err_out; if (!memcmp(opt, valid_accm, sizeof(valid_accm))) continue; @@ -409,6 +408,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, } break; case LCP_OPTION_MAGIC: + if (len < 6) + goto err_out; if (opt[1] != 6 || (!opt[2] && !opt[3] && !opt[4] && !opt[5])) break; /* reject invalid magic number */ @@ -427,6 +428,11 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, ppp_cp_event(dev, pid, RCR_GOOD, CP_CONF_ACK, id, req_len, data); kfree(out); + return; + +err_out: + dev->stats.rx_errors++; + kfree(out); } static int ppp_rx(struct sk_buff *skb) diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c index 74c06a5f586f7d529a0ece30a54d884e75cb903c..4f25c2d8fff065e11e0410aea94bb8b45c4c21ea 100644 --- a/drivers/net/wan/x25_asy.c +++ b/drivers/net/wan/x25_asy.c @@ -486,8 +486,10 @@ static int x25_asy_open(struct net_device *dev) /* Cleanup */ kfree(sl->xbuff); + sl->xbuff = NULL; noxbuff: kfree(sl->rbuff); + sl->rbuff = NULL; norbuff: return -ENOMEM; } diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c index e9fc168bb734504e535be77067dd250a8197bffe..489cba9b284d1b3bf633853c2f1f5de57e1533c2 100644 --- a/drivers/net/wimax/i2400m/fw.c +++ b/drivers/net/wimax/i2400m/fw.c @@ -351,13 +351,15 @@ int i2400m_barker_db_init(const char *_options) } result = i2400m_barker_db_add(barker); if (result < 0) - goto error_add; + goto error_parse_add; } kfree(options_orig); } return 0; +error_parse_add: error_parse: + kfree(options_orig); error_add: kfree(i2400m_barker_db); return result; diff --git a/drivers/net/wimax/i2400m/op-rfkill.c b/drivers/net/wimax/i2400m/op-rfkill.c index b0dba35a8ad2ae94e4cf89d78767156616fee59c..dc6fe93ce71f65e9e60491fbf4cc45ecde6174a2 100644 --- a/drivers/net/wimax/i2400m/op-rfkill.c +++ b/drivers/net/wimax/i2400m/op-rfkill.c @@ -147,6 +147,7 @@ int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev, error_alloc: d_fnend(4, dev, "(wimax_dev %p state %d) = %d\n", wimax_dev, state, result); + kfree(cmd); return result; } diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c index b94759daeaccf68c84d71b9938627bb6a2b1c793..91b524ca32435b1a687c1f1de047024ac3aa62f0 100644 --- a/drivers/net/wireless/ath/ar5523/ar5523.c +++ b/drivers/net/wireless/ath/ar5523/ar5523.c @@ -255,7 +255,8 @@ static int ar5523_cmd(struct ar5523 *ar, u32 code, const void *idata, if (flags & AR5523_CMD_FLAG_MAGIC) hdr->magic = cpu_to_be32(1 << 24); - memcpy(hdr + 1, idata, ilen); + if (ilen) + memcpy(hdr + 1, idata, ilen); cmd->odata = odata; cmd->olen = olen; @@ -1579,6 +1580,20 @@ static int ar5523_probe(struct usb_interface *intf, struct ar5523 *ar; int error = -ENOMEM; + static const u8 bulk_ep_addr[] = { + AR5523_CMD_TX_PIPE | USB_DIR_OUT, + AR5523_DATA_TX_PIPE | USB_DIR_OUT, + AR5523_CMD_RX_PIPE | USB_DIR_IN, + AR5523_DATA_RX_PIPE | USB_DIR_IN, + 0}; + + if (!usb_check_bulk_endpoints(intf, bulk_ep_addr)) { + dev_err(&dev->dev, + "Could not find all expected endpoints\n"); + error = -ENODEV; + goto out; + } + /* * Load firmware if the device requires it. This will return * -ENXIO on success and we'll get called back afer the usb diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h index 7a364eca46d642e25c69fc2275fce4077a113faa..f083fb9038c36384629631d0292a05152a6b17ce 100644 --- a/drivers/net/wireless/ath/ath.h +++ b/drivers/net/wireless/ath/ath.h @@ -197,12 +197,13 @@ struct sk_buff *ath_rxbuf_alloc(struct ath_common *common, bool ath_is_mybeacon(struct ath_common *common, struct ieee80211_hdr *hdr); void ath_hw_setbssidmask(struct ath_common *common); -void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key); +void ath_key_delete(struct ath_common *common, u8 hw_key_idx); int ath_key_config(struct ath_common *common, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key); bool ath_hw_keyreset(struct ath_common *common, u16 entry); +bool ath_hw_keysetmac(struct ath_common *common, u16 entry, const u8 *mac); void ath_hw_cycle_counters_update(struct ath_common *common); int32_t ath_hw_get_listen_time(struct ath_common *common); diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c index c9bd0e2b5db7ed5a062fbb0a5d1abbdffc4e50ec..be90c9e9e5bc133697e6cf47f65b7d4ae6b48d0e 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.c +++ b/drivers/net/wireless/ath/ath10k/ahb.c @@ -655,10 +655,10 @@ static void ath10k_ahb_hif_stop(struct ath10k *ar) ath10k_ahb_irq_disable(ar); synchronize_irq(ar_ahb->irq); - ath10k_pci_flush(ar); - napi_synchronize(&ar->napi); napi_disable(&ar->napi); + + ath10k_pci_flush(ar); } static int ath10k_ahb_hif_power_up(struct ath10k *ar) diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index 18c709c484e738cd02a0c7cd373c0f485b5170f9..f761d651c16e7d2e4dde004fdd599770412083f4 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -500,14 +500,8 @@ static int _ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, write_index = CE_RING_IDX_INCR(nentries_mask, write_index); /* WORKAROUND */ - if (!(flags & CE_SEND_FLAG_GATHER)) { - if (ar->hw_params.shadow_reg_support) - ath10k_ce_shadow_src_ring_write_index_set(ar, ce_state, - write_index); - else - ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, - write_index); - } + if (!(flags & CE_SEND_FLAG_GATHER)) + ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index); src_ring->write_index = write_index; exit: @@ -581,8 +575,14 @@ static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state, /* Update Source Ring Write Index */ write_index = CE_RING_IDX_INCR(nentries_mask, write_index); - if (!(flags & CE_SEND_FLAG_GATHER)) - ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index); + if (!(flags & CE_SEND_FLAG_GATHER)) { + if (ar->hw_params.shadow_reg_support) + ath10k_ce_shadow_src_ring_write_index_set(ar, ce_state, + write_index); + else + ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, + write_index); + } src_ring->write_index = write_index; exit: @@ -1394,12 +1394,12 @@ static int ath10k_ce_alloc_shadow_base(struct ath10k *ar, u32 nentries) { src_ring->shadow_base_unaligned = kcalloc(nentries, - sizeof(struct ce_desc), + sizeof(struct ce_desc_64), GFP_KERNEL); if (!src_ring->shadow_base_unaligned) return -ENOMEM; - src_ring->shadow_base = (struct ce_desc *) + src_ring->shadow_base = (struct ce_desc_64 *) PTR_ALIGN(src_ring->shadow_base_unaligned, CE_DESC_RING_ALIGN); return 0; @@ -1453,7 +1453,7 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id, ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries); if (ret) { dma_free_coherent(ar->dev, - (nentries * sizeof(struct ce_desc) + + (nentries * sizeof(struct ce_desc_64) + CE_DESC_RING_ALIGN), src_ring->base_addr_owner_space_unaligned, base_addr); diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index b8fb5382dedeb9830142f83d5b674f8d445c8e98..8088f7a66426171c780ba26a6f0861b1c33791e9 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -118,7 +118,7 @@ struct ath10k_ce_ring { u32 base_addr_ce_space; char *shadow_base_unaligned; - struct ce_desc *shadow_base; + struct ce_desc_64 *shadow_base; /* keep last */ void *per_transfer_context[0]; diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index c40cd129afe7b2c006fff3d8b270add0a3843d32..436eac342b62276aa7e39eaefa6a29521ef4f2f4 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -91,6 +91,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .shadow_reg_support = false, .rri_on_ddr = false, + .hw_filter_reset_required = true, }, { .id = QCA988X_HW_2_0_VERSION, @@ -124,6 +125,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .per_ce_irq = false, .shadow_reg_support = false, .rri_on_ddr = false, + .hw_filter_reset_required = true, }, { .id = QCA9887_HW_1_0_VERSION, @@ -157,6 +159,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .per_ce_irq = false, .shadow_reg_support = false, .rri_on_ddr = false, + .hw_filter_reset_required = true, }, { .id = QCA6174_HW_2_1_VERSION, @@ -189,6 +192,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .per_ce_irq = false, .shadow_reg_support = false, .rri_on_ddr = false, + .hw_filter_reset_required = true, }, { .id = QCA6174_HW_2_1_VERSION, @@ -221,6 +225,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .per_ce_irq = false, .shadow_reg_support = false, .rri_on_ddr = false, + .hw_filter_reset_required = true, }, { .id = QCA6174_HW_3_0_VERSION, @@ -253,6 +258,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .per_ce_irq = false, .shadow_reg_support = false, .rri_on_ddr = false, + .hw_filter_reset_required = true, }, { .id = QCA6174_HW_3_2_VERSION, @@ -288,6 +294,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .per_ce_irq = false, .shadow_reg_support = false, .rri_on_ddr = false, + .hw_filter_reset_required = true, }, { .id = QCA99X0_HW_2_0_DEV_VERSION, @@ -326,6 +333,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .per_ce_irq = false, .shadow_reg_support = false, .rri_on_ddr = false, + .hw_filter_reset_required = true, }, { .id = QCA9984_HW_1_0_DEV_VERSION, @@ -369,6 +377,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .per_ce_irq = false, .shadow_reg_support = false, .rri_on_ddr = false, + .hw_filter_reset_required = true, }, { .id = QCA9888_HW_2_0_DEV_VERSION, @@ -411,6 +420,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .per_ce_irq = false, .shadow_reg_support = false, .rri_on_ddr = false, + .hw_filter_reset_required = true, }, { .id = QCA9377_HW_1_0_DEV_VERSION, @@ -443,6 +453,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .per_ce_irq = false, .shadow_reg_support = false, .rri_on_ddr = false, + .hw_filter_reset_required = true, }, { .id = QCA9377_HW_1_1_DEV_VERSION, @@ -477,6 +488,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .per_ce_irq = false, .shadow_reg_support = false, .rri_on_ddr = false, + .hw_filter_reset_required = true, }, { .id = QCA4019_HW_1_0_DEV_VERSION, @@ -516,6 +528,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .per_ce_irq = false, .shadow_reg_support = false, .rri_on_ddr = false, + .hw_filter_reset_required = true, }, { .id = WCN3990_HW_1_0_DEV_VERSION, @@ -532,6 +545,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .hw_ops = &wcn3990_ops, .decap_align_bytes = 1, .num_peers = TARGET_HL_10_TLV_NUM_PEERS, + .n_cipher_suites = 11, .ast_skid_limit = TARGET_HL_10_TLV_AST_SKID_LIMIT, .num_wds_entries = TARGET_HL_10_TLV_NUM_WDS_ENTRIES, .target_64bit = true, @@ -539,6 +553,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .per_ce_irq = true, .shadow_reg_support = true, .rri_on_ddr = true, + .hw_filter_reset_required = false, }, }; @@ -2405,7 +2420,8 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, * possible to implicitly make it correct by creating a dummy vdev and * then deleting it. */ - if (mode == ATH10K_FIRMWARE_MODE_NORMAL) { + if (ar->hw_params.hw_filter_reset_required && + mode == ATH10K_FIRMWARE_MODE_NORMAL) { status = ath10k_core_reset_rx_filter(ar); if (status) { ath10k_err(ar, diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 9feea02e7d3730c9350c6c6cfda722acb2512c19..3cd49d29ac23f806824321f80da42298922472ba 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -414,6 +414,14 @@ struct ath10k_peer { /* protected by ar->data_lock */ struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1]; + union htt_rx_pn_t tids_last_pn[ATH10K_TXRX_NUM_EXT_TIDS]; + bool tids_last_pn_valid[ATH10K_TXRX_NUM_EXT_TIDS]; + union htt_rx_pn_t frag_tids_last_pn[ATH10K_TXRX_NUM_EXT_TIDS]; + u32 frag_tids_seq[ATH10K_TXRX_NUM_EXT_TIDS]; + struct { + enum htt_security_types sec_type; + int pn_len; + } rx_pn[ATH10K_HTT_TXRX_PEER_SECURITY_MAX]; }; struct ath10k_txq { @@ -1003,6 +1011,7 @@ struct ath10k { struct completion install_key_done; + int last_wmi_vdev_start_status; struct completion vdev_setup_done; struct workqueue_struct *workqueue; diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c index 4d28063052fec3f47c6eba3bf5c1eb752ec59525..385b84f24322d102ad79f527e06092ef0493e429 100644 --- a/drivers/net/wireless/ath/ath10k/coredump.c +++ b/drivers/net/wireless/ath/ath10k/coredump.c @@ -1105,9 +1105,11 @@ static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar) dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar); dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_RAM_DATA); dump_tlv->tlv_len = cpu_to_le32(crash_data->ramdump_buf_len); - memcpy(dump_tlv->tlv_data, crash_data->ramdump_buf, - crash_data->ramdump_buf_len); - sofar += sizeof(*dump_tlv) + crash_data->ramdump_buf_len; + if (crash_data->ramdump_buf_len) { + memcpy(dump_tlv->tlv_data, crash_data->ramdump_buf, + crash_data->ramdump_buf_len); + sofar += sizeof(*dump_tlv) + crash_data->ramdump_buf_len; + } } spin_unlock_bh(&ar->data_lock); @@ -1154,6 +1156,9 @@ int ath10k_coredump_register(struct ath10k *ar) if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) { crash_data->ramdump_buf_len = ath10k_coredump_get_ramdump_size(ar); + if (!crash_data->ramdump_buf_len) + return 0; + crash_data->ramdump_buf = vzalloc(crash_data->ramdump_buf_len); if (!crash_data->ramdump_buf) return -ENOMEM; diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c index a63c97e2c50c5d29915b334d2937eca5b3892601..6f10331e986bd9069bff8bed1d51467cf5b919ee 100644 --- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c +++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c @@ -71,7 +71,7 @@ void ath10k_sta_update_rx_tid_stats_ampdu(struct ath10k *ar, u16 peer_id, u8 tid spin_lock_bh(&ar->data_lock); peer = ath10k_peer_find_by_id(ar, peer_id); - if (!peer) + if (!peer || !peer->sta) goto out; arsta = (struct ath10k_sta *)peer->sta->drv_priv; diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 5d3ff80f3a1f9dd8a27019e85e0345b269998b19..95ca591abec7f70d6878808ce9b901418c57ec95 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -719,6 +719,20 @@ struct htt_rx_indication { struct htt_rx_indication_mpdu_range mpdu_ranges[0]; } __packed; +struct htt_hl_rx_desc { + __le32 info; + __le32 pn_31_0; + union { + struct { + __le16 pn_47_32; + __le16 pn_63_48; + } pn16; + __le32 pn_63_32; + } u0; + __le32 pn_95_64; + __le32 pn_127_96; +} __packed; + static inline struct htt_rx_indication_mpdu_range * htt_rx_ind_get_mpdu_ranges(struct htt_rx_indication *rx_ind) { @@ -764,6 +778,21 @@ struct htt_rx_peer_unmap { __le16 peer_id; } __packed; +enum htt_txrx_sec_cast_type { + HTT_TXRX_SEC_MCAST = 0, + HTT_TXRX_SEC_UCAST +}; + +enum htt_rx_pn_check_type { + HTT_RX_NON_PN_CHECK = 0, + HTT_RX_PN_CHECK +}; + +enum htt_rx_tkip_demic_type { + HTT_RX_NON_TKIP_MIC = 0, + HTT_RX_TKIP_MIC +}; + enum htt_security_types { HTT_SECURITY_NONE, HTT_SECURITY_WEP128, @@ -777,6 +806,10 @@ enum htt_security_types { HTT_NUM_SECURITY_TYPES /* keep this last! */ }; +#define ATH10K_HTT_TXRX_PEER_SECURITY_MAX 2 +#define ATH10K_TXRX_NUM_EXT_TIDS 19 +#define ATH10K_TXRX_NON_QOS_TID 16 + enum htt_security_flags { #define HTT_SECURITY_TYPE_MASK 0x7F #define HTT_SECURITY_TYPE_LSB 0 @@ -887,6 +920,11 @@ struct htt_rx_fragment_indication { u8 fw_msdu_rx_desc[0]; } __packed; +#define ATH10K_IEEE80211_EXTIV BIT(5) +#define ATH10K_IEEE80211_TKIP_MICLEN 8 /* trailing MIC */ + +#define HTT_RX_FRAG_IND_INFO0_HEADER_LEN 16 + #define HTT_RX_FRAG_IND_INFO0_EXT_TID_MASK 0x1F #define HTT_RX_FRAG_IND_INFO0_EXT_TID_LSB 0 #define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_MASK 0x20 @@ -1994,6 +2032,9 @@ struct htt_rx_desc { u8 msdu_payload[0]; }; +#define HTT_RX_DESC_HL_INFO_MCAST_BCAST_MASK 0x00010000 +#define HTT_RX_DESC_HL_INFO_MCAST_BCAST_LSB 16 + #define HTT_RX_DESC_ALIGN 8 #define HTT_MAC_ADDR_LEN 6 diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 4d1cd90d6d27c3d299095626efd80888eb9e840c..43658659ca83bacaedbef3ca9a7c600fdd687351 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -1510,16 +1510,97 @@ static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu) msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu); } +static u64 ath10k_htt_rx_h_get_pn(struct ath10k *ar, struct sk_buff *skb, + u16 offset, + enum htt_rx_mpdu_encrypt_type enctype) +{ + struct ieee80211_hdr *hdr; + u64 pn = 0; + u8 *ehdr; + + hdr = (struct ieee80211_hdr *)(skb->data + offset); + ehdr = skb->data + offset + ieee80211_hdrlen(hdr->frame_control); + + if (enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2) { + pn = ehdr[0]; + pn |= (u64)ehdr[1] << 8; + pn |= (u64)ehdr[4] << 16; + pn |= (u64)ehdr[5] << 24; + pn |= (u64)ehdr[6] << 32; + pn |= (u64)ehdr[7] << 40; + } + return pn; +} + +static bool ath10k_htt_rx_h_frag_multicast_check(struct ath10k *ar, + struct sk_buff *skb, + u16 offset) +{ + struct ieee80211_hdr *hdr; + + hdr = (struct ieee80211_hdr *)(skb->data + offset); + return !is_multicast_ether_addr(hdr->addr1); +} + +static bool ath10k_htt_rx_h_frag_pn_check(struct ath10k *ar, + struct sk_buff *skb, + u16 peer_id, + u16 offset, + enum htt_rx_mpdu_encrypt_type enctype) +{ + struct ath10k_peer *peer; + union htt_rx_pn_t *last_pn, new_pn = {0}; + struct ieee80211_hdr *hdr; + bool more_frags; + u8 tid, frag_number; + u32 seq; + + peer = ath10k_peer_find_by_id(ar, peer_id); + if (!peer) { + ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer for frag pn check\n"); + return false; + } + + hdr = (struct ieee80211_hdr *)(skb->data + offset); + if (ieee80211_is_data_qos(hdr->frame_control)) + tid = ieee80211_get_tid(hdr); + else + tid = ATH10K_TXRX_NON_QOS_TID; + + last_pn = &peer->frag_tids_last_pn[tid]; + new_pn.pn48 = ath10k_htt_rx_h_get_pn(ar, skb, offset, enctype); + more_frags = ieee80211_has_morefrags(hdr->frame_control); + frag_number = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; + seq = (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; + + if (frag_number == 0) { + last_pn->pn48 = new_pn.pn48; + peer->frag_tids_seq[tid] = seq; + } else { + if (seq != peer->frag_tids_seq[tid]) + return false; + + if (new_pn.pn48 != last_pn->pn48 + 1) + return false; + + last_pn->pn48 = new_pn.pn48; + } + + return true; +} + static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, struct sk_buff_head *amsdu, struct ieee80211_rx_status *status, bool fill_crypt_header, u8 *rx_hdr, - enum ath10k_pkt_rx_err *err) + enum ath10k_pkt_rx_err *err, + u16 peer_id, + bool frag) { struct sk_buff *first; struct sk_buff *last; - struct sk_buff *msdu; + struct sk_buff *msdu, *temp; struct htt_rx_desc *rxd; struct ieee80211_hdr *hdr; enum htt_rx_mpdu_encrypt_type enctype; @@ -1532,6 +1613,7 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, bool is_decrypted; bool is_mgmt; u32 attention; + bool frag_pn_check = true, multicast_check = true; if (skb_queue_empty(amsdu)) return; @@ -1630,7 +1712,37 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, } skb_queue_walk(amsdu, msdu) { + if (frag && !fill_crypt_header && is_decrypted && + enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2) + frag_pn_check = ath10k_htt_rx_h_frag_pn_check(ar, + msdu, + peer_id, + 0, + enctype); + + if (frag) + multicast_check = ath10k_htt_rx_h_frag_multicast_check(ar, + msdu, + 0); + + if (!frag_pn_check || !multicast_check) { + /* Discard the fragment with invalid PN or multicast DA + */ + temp = msdu->prev; + __skb_unlink(msdu, amsdu); + dev_kfree_skb_any(msdu); + msdu = temp; + frag_pn_check = true; + multicast_check = true; + continue; + } + ath10k_htt_rx_h_csum_offload(msdu); + + if (frag && !fill_crypt_header && + enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) + status->flag &= ~RX_FLAG_MMIC_STRIPPED; + ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype, is_decrypted); @@ -1648,6 +1760,11 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, hdr = (void *)msdu->data; hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); + + if (frag && !fill_crypt_header && + enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) + status->flag &= ~RX_FLAG_IV_STRIPPED & + ~RX_FLAG_MMIC_STRIPPED; } } @@ -1755,14 +1872,62 @@ static void ath10k_htt_rx_h_unchain(struct ath10k *ar, ath10k_unchain_msdu(amsdu, unchain_cnt); } +static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar, + struct sk_buff_head *amsdu) +{ + u8 *subframe_hdr; + struct sk_buff *first; + bool is_first, is_last; + struct htt_rx_desc *rxd; + struct ieee80211_hdr *hdr; + size_t hdr_len, crypto_len; + enum htt_rx_mpdu_encrypt_type enctype; + int bytes_aligned = ar->hw_params.decap_align_bytes; + + first = skb_peek(amsdu); + + rxd = (void *)first->data - sizeof(*rxd); + hdr = (void *)rxd->rx_hdr_status; + + is_first = !!(rxd->msdu_end.common.info0 & + __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); + is_last = !!(rxd->msdu_end.common.info0 & + __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); + + /* Return in case of non-aggregated msdu */ + if (is_first && is_last) + return true; + + /* First msdu flag is not set for the first msdu of the list */ + if (!is_first) + return false; + + enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), + RX_MPDU_START_INFO0_ENCRYPT_TYPE); + + hdr_len = ieee80211_hdrlen(hdr->frame_control); + crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); + + subframe_hdr = (u8 *)hdr + round_up(hdr_len, bytes_aligned) + + crypto_len; + + /* Validate if the amsdu has a proper first subframe. + * There are chances a single msdu can be received as amsdu when + * the unauthenticated amsdu flag of a QoS header + * gets flipped in non-SPP AMSDU's, in such cases the first + * subframe has llc/snap header in place of a valid da. + * return false if the da matches rfc1042 pattern + */ + if (ether_addr_equal(subframe_hdr, rfc1042_header)) + return false; + + return true; +} + static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar, struct sk_buff_head *amsdu, struct ieee80211_rx_status *rx_status) { - /* FIXME: It might be a good idea to do some fuzzy-testing to drop - * invalid/dangerous frames. - */ - if (!rx_status->freq) { ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n"); return false; @@ -1773,6 +1938,11 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar, return false; } + if (!ath10k_htt_rx_validate_amsdu(ar, amsdu)) { + ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid amsdu received\n"); + return false; + } + return true; } @@ -1835,7 +2005,8 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt) ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt); ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter); - ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err); + ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err, 0, + false); msdus_to_queue = skb_queue_len(&amsdu); ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status); @@ -2186,7 +2357,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id); ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL); ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL, - NULL); + NULL, peer_id, frag); ath10k_htt_rx_h_enqueue(ar, &amsdu, status); break; case -EAGAIN: @@ -2589,7 +2760,7 @@ static void ath10k_htt_fetch_peer_stats(struct ath10k *ar, rcu_read_lock(); spin_lock_bh(&ar->data_lock); peer = ath10k_peer_find_by_id(ar, peer_id); - if (!peer) { + if (!peer || !peer->sta) { ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n", peer_id); goto out; @@ -2642,7 +2813,7 @@ static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data) rcu_read_lock(); spin_lock_bh(&ar->data_lock); peer = ath10k_peer_find_by_id(ar, peer_id); - if (!peer) { + if (!peer || !peer->sta) { ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n", peer_id); goto out; diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c index 677535b3d2070eea5d20983b89c9aba44be4a829..476e0535f06f0ca6745b0f43bd6c3a5fd78247fe 100644 --- a/drivers/net/wireless/ath/ath10k/hw.c +++ b/drivers/net/wireless/ath/ath10k/hw.c @@ -168,7 +168,7 @@ const struct ath10k_hw_values qca6174_values = { }; const struct ath10k_hw_values qca99x0_values = { - .rtc_state_val_on = 5, + .rtc_state_val_on = 7, .ce_count = 12, .msi_assign_ce_max = 12, .num_target_ce_config_wlan = 10, diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 977f79ebb4fd5911bd8910d415e1ba6d492359ff..fac58c3c576a2054a4ef65a2b8d3657896a9037f 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -589,6 +589,11 @@ struct ath10k_hw_params { /* Number of bytes to be the offset for each FFT sample */ int spectral_bin_offset; + + /* targets which require hw filter reset during boot up, + * to avoid it sending spurious acks. + */ + bool hw_filter_reset_required; }; struct htt_rx_desc; diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 90f9372dec2548f4eb3fc671fd63d8e12dc4c238..9ac16be41841dd478a800e6f65aecf9ced8329c9 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -18,6 +18,7 @@ #include "mac.h" +#include #include #include #include @@ -967,7 +968,7 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar) if (time_left == 0) return -ETIMEDOUT; - return 0; + return ar->last_wmi_vdev_start_status; } static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id) @@ -1624,6 +1625,10 @@ static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif) if (arvif->vdev_type != WMI_VDEV_TYPE_AP) return 0; + /* For mesh, probe response and beacon share the same template */ + if (ieee80211_vif_is_mesh(vif)) + return 0; + prb = ieee80211_proberesp_get(hw, vif); if (!prb) { ath10k_warn(ar, "failed to get probe resp template from mac80211\n"); @@ -3646,7 +3651,7 @@ static int ath10k_mac_tx(struct ath10k *ar, struct ieee80211_vif *vif, enum ath10k_hw_txrx_mode txmode, enum ath10k_mac_tx_path txpath, - struct sk_buff *skb) + struct sk_buff *skb, bool noque_offchan) { struct ieee80211_hw *hw = ar->hw; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); @@ -3674,10 +3679,10 @@ static int ath10k_mac_tx(struct ath10k *ar, } } - if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { + if (!noque_offchan && info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { if (!ath10k_mac_tx_frm_has_freq(ar)) { - ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %pK\n", - skb); + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac queued offchannel skb %pK len %d\n", + skb, skb->len); skb_queue_tail(&ar->offchan_tx_queue, skb); ieee80211_queue_work(hw, &ar->offchan_tx_work); @@ -3739,8 +3744,8 @@ void ath10k_offchan_tx_work(struct work_struct *work) mutex_lock(&ar->conf_mutex); - ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK\n", - skb); + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK len %d\n", + skb, skb->len); hdr = (struct ieee80211_hdr *)skb->data; peer_addr = ieee80211_get_DA(hdr); @@ -3786,7 +3791,7 @@ void ath10k_offchan_tx_work(struct work_struct *work) txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); - ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); + ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, true); if (ret) { ath10k_warn(ar, "failed to transmit offchannel frame: %d\n", ret); @@ -3796,8 +3801,8 @@ void ath10k_offchan_tx_work(struct work_struct *work) time_left = wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ); if (time_left == 0) - ath10k_warn(ar, "timed out waiting for offchannel skb %pK\n", - skb); + ath10k_warn(ar, "timed out waiting for offchannel skb %pK, len: %d\n", + skb, skb->len); if (!peer && tmp_peer_created) { ret = ath10k_peer_delete(ar, vdev_id, peer_addr); @@ -3839,12 +3844,17 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work) ar->running_fw->fw_file.fw_features)) { paddr = dma_map_single(ar->dev, skb->data, skb->len, DMA_TO_DEVICE); - if (!paddr) + if (dma_mapping_error(ar->dev, paddr)) { + ieee80211_free_txskb(ar->hw, skb); continue; + } ret = ath10k_wmi_mgmt_tx_send(ar, skb, paddr); if (ret) { ath10k_warn(ar, "failed to transmit management frame by ref via WMI: %d\n", ret); + /* remove this msdu from idr tracking */ + ath10k_wmi_cleanup_mgmt_tx_send(ar, skb); + dma_unmap_single(ar->dev, paddr, skb->len, DMA_FROM_DEVICE); ieee80211_free_txskb(ar->hw, skb); @@ -3993,7 +4003,7 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, spin_unlock_bh(&ar->htt.tx_lock); } - ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); + ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, false); if (unlikely(ret)) { ath10k_warn(ar, "failed to push frame: %d\n", ret); @@ -4275,7 +4285,7 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw, spin_unlock_bh(&ar->htt.tx_lock); } - ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); + ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, false); if (ret) { ath10k_warn(ar, "failed to transmit frame: %d\n", ret); if (is_htt) { @@ -4681,6 +4691,14 @@ static int ath10k_start(struct ieee80211_hw *hw) goto err_core_stop; } + if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) { + ret = ath10k_wmi_scan_prob_req_oui(ar, ar->mac_addr); + if (ret) { + ath10k_err(ar, "failed to set prob req oui: %i\n", ret); + goto err_core_stop; + } + } + if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) { ret = ath10k_wmi_adaptive_qcs(ar, true); if (ret) { @@ -5622,7 +5640,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, } if (changed & BSS_CHANGED_MCAST_RATE && - !WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) { + !ath10k_mac_vif_chan(arvif->vif, &def)) { band = def.chan->band; rateidx = vif->bss_conf.mcast_rate[band] - 1; @@ -8359,6 +8377,7 @@ int ath10k_mac_register(struct ath10k *ar) ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band; } + wiphy_read_of_freq_limits(ar->hw->wiphy); ath10k_mac_setup_ht_vht_cap(ar); ar->hw->wiphy->interface_modes = @@ -8545,12 +8564,6 @@ int ath10k_mac_register(struct ath10k *ar) } if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) { - ret = ath10k_wmi_scan_prob_req_oui(ar, ar->mac_addr); - if (ret) { - ath10k_err(ar, "failed to set prob req oui: %i\n", ret); - goto err_dfs_detector_exit; - } - ar->hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; } diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index af2cf55c4c1e631ea075e5baa5742651b77435c1..2a503aacf0c645b6477d10fc452c355b5f996637 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -1054,10 +1054,9 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, struct ath10k_ce *ce = ath10k_ce_priv(ar); int ret = 0; u32 *buf; - unsigned int completed_nbytes, orig_nbytes, remaining_bytes; + unsigned int completed_nbytes, alloc_nbytes, remaining_bytes; struct ath10k_ce_pipe *ce_diag; void *data_buf = NULL; - u32 ce_data; /* Host buffer address in CE space */ dma_addr_t ce_data_base = 0; int i; @@ -1071,9 +1070,10 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, * 1) 4-byte alignment * 2) Buffer in DMA-able space */ - orig_nbytes = nbytes; + alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT); + data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, - orig_nbytes, + alloc_nbytes, &ce_data_base, GFP_ATOMIC); if (!data_buf) { @@ -1081,9 +1081,6 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, goto done; } - /* Copy caller's data to allocated DMA buf */ - memcpy(data_buf, data, orig_nbytes); - /* * The address supplied by the caller is in the * Target CPU virtual address space. @@ -1096,12 +1093,14 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, */ address = ath10k_pci_targ_cpu_to_ce_addr(ar, address); - remaining_bytes = orig_nbytes; - ce_data = ce_data_base; + remaining_bytes = nbytes; while (remaining_bytes) { /* FIXME: check cast */ nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT); + /* Copy caller's data to allocated DMA buf */ + memcpy(data_buf, data, nbytes); + /* Set up to receive directly into Target(!) address */ ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &address, address); if (ret != 0) @@ -1111,7 +1110,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, * Request CE to send caller-supplied data that * was copied to bounce buffer to Target(!) address. */ - ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data, + ret = ath10k_ce_send_nolock(ce_diag, NULL, ce_data_base, nbytes, 0, 0); if (ret != 0) goto done; @@ -1152,12 +1151,12 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, remaining_bytes -= nbytes; address += nbytes; - ce_data += nbytes; + data += nbytes; } done: if (data_buf) { - dma_free_coherent(ar->dev, orig_nbytes, data_buf, + dma_free_coherent(ar->dev, alloc_nbytes, data_buf, ce_data_base); } @@ -2053,6 +2052,11 @@ static void ath10k_pci_hif_stop(struct ath10k *ar) ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n"); + ath10k_pci_irq_disable(ar); + ath10k_pci_irq_sync(ar); + napi_synchronize(&ar->napi); + napi_disable(&ar->napi); + /* Most likely the device has HTT Rx ring configured. The only way to * prevent the device from accessing (and possible corrupting) host * memory is to reset the chip now. @@ -2066,11 +2070,7 @@ static void ath10k_pci_hif_stop(struct ath10k *ar) */ ath10k_pci_safe_chip_reset(ar); - ath10k_pci_irq_disable(ar); - ath10k_pci_irq_sync(ar); ath10k_pci_flush(ar); - napi_synchronize(&ar->napi); - napi_disable(&ar->napi); spin_lock_irqsave(&ar_pci->ps_lock, flags); WARN_ON(ar_pci->ps_wake_refcount > 0); diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c index 7f61591ce0de6b5c904fde8261c9a6689a587f5a..686759b5613f2c7f05685a8911c1f3a882e55732 100644 --- a/drivers/net/wireless/ath/ath10k/sdio.c +++ b/drivers/net/wireless/ath/ath10k/sdio.c @@ -613,6 +613,10 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar, full_len, last_in_bundle, last_in_bundle); + if (ret) { + ath10k_warn(ar, "alloc_rx_pkt error %d\n", ret); + goto err; + } } ar_sdio->n_rx_pkts = i; @@ -2069,6 +2073,9 @@ static void ath10k_sdio_remove(struct sdio_func *func) cancel_work_sync(&ar_sdio->wr_async_work); ath10k_core_unregister(ar); ath10k_core_destroy(ar); + + flush_workqueue(ar_sdio->workqueue); + destroy_workqueue(ar_sdio->workqueue); } static const struct sdio_device_id ath10k_sdio_devices[] = { diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index fa1843a7e0fdaaec3e74a96d9a8b359a921c9746..e2d78f77edb70cc681e83418228e5eb7bd1619be 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -1190,7 +1190,7 @@ static int ath10k_wcn3990_clk_init(struct ath10k *ar) return 0; err_clock_config: - for (; i >= 0; i--) { + for (i = i - 1; i >= 0; i--) { clk_info = &ar_snoc->clk[i]; if (!clk_info->handle) diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index cda164f6e9f62f87e36c40c96f5fb7586d87a94c..6c47e4b6aa6cdb537d6b19cfd465d72f73b34f1b 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -101,6 +101,8 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt, info = IEEE80211_SKB_CB(msdu); memset(&info->status, 0, sizeof(info->status)); + info->status.rates[0].idx = -1; + trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id); if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) @@ -156,6 +158,9 @@ struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id) { struct ath10k_peer *peer; + if (peer_id >= BITS_PER_TYPE(peer->peer_ids)) + return NULL; + lockdep_assert_held(&ar->data_lock); list_for_each_entry(peer, &ar->peers, list) diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c index d4803ff5a78a75eb7e2d63bc77a1623f146e283e..a8c8ecc11b773c0784744aa0e3827728919b95e5 100644 --- a/drivers/net/wireless/ath/ath10k/usb.c +++ b/drivers/net/wireless/ath/ath10k/usb.c @@ -49,6 +49,10 @@ ath10k_usb_alloc_urb_from_pipe(struct ath10k_usb_pipe *pipe) struct ath10k_urb_context *urb_context = NULL; unsigned long flags; + /* bail if this pipe is not initialized */ + if (!pipe->ar_usb) + return NULL; + spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags); if (!list_empty(&pipe->urb_list_head)) { urb_context = list_first_entry(&pipe->urb_list_head, @@ -66,6 +70,10 @@ static void ath10k_usb_free_urb_to_pipe(struct ath10k_usb_pipe *pipe, { unsigned long flags; + /* bail if this pipe is not initialized */ + if (!pipe->ar_usb) + return NULL; + spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags); pipe->urb_cnt++; @@ -446,6 +454,7 @@ static int ath10k_usb_hif_tx_sg(struct ath10k *ar, u8 pipe_id, ath10k_dbg(ar, ATH10K_DBG_USB_BULK, "usb bulk transmit failed: %d\n", ret); usb_unanchor_urb(urb); + usb_free_urb(urb); ret = -EINVAL; goto err_free_urb_to_pipe; } @@ -1025,7 +1034,7 @@ static int ath10k_usb_probe(struct usb_interface *interface, } /* TODO: remove this once USB support is fully implemented */ - ath10k_warn(ar, "WARNING: ath10k USB support is incomplete, don't expect anything to work!\n"); + ath10k_warn(ar, "Warning: ath10k USB support is incomplete, don't expect anything to work!\n"); return 0; diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h index 7fd63bbf8e2408372526924b66d9f825ea899e33..b6cd33fa79f87e07fd76fdbe6ec861d3077ee947 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-ops.h +++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h @@ -139,6 +139,7 @@ struct wmi_ops { struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar, struct sk_buff *skb, dma_addr_t paddr); + int (*cleanup_mgmt_tx_send)(struct ath10k *ar, struct sk_buff *msdu); struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable, u32 log_level); struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter); @@ -431,6 +432,15 @@ ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar) return ar->wmi.ops->get_txbf_conf_scheme(ar); } +static inline int +ath10k_wmi_cleanup_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu) +{ + if (!ar->wmi.ops->cleanup_mgmt_tx_send) + return -EOPNOTSUPP; + + return ar->wmi.ops->cleanup_mgmt_tx_send(ar, msdu); +} + static inline int ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu, dma_addr_t paddr) diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index cdc1e64d52ad50e30dd5fe59a0e300635eda3f9d..930d1e51a1040249dbf1014879fb7ce2f4bfa015 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -678,6 +678,10 @@ ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev(struct ath10k *ar, struct sk_buff *skb, } ev = tb[WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT]; + if (!ev) { + kfree(tb); + return -EPROTO; + } arg->desc_id = ev->desc_id; arg->status = ev->status; @@ -2638,6 +2642,23 @@ ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask) return skb; } +static int +ath10k_wmi_tlv_op_cleanup_mgmt_tx_send(struct ath10k *ar, + struct sk_buff *msdu) +{ + struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu); + struct ath10k_mgmt_tx_pkt_addr *pkt_addr; + struct ath10k_wmi *wmi = &ar->wmi; + + spin_lock_bh(&ar->data_lock); + pkt_addr = idr_remove(&wmi->mgmt_pending_tx, cb->msdu_id); + spin_unlock_bh(&ar->data_lock); + + kfree(pkt_addr); + + return 0; +} + static int ath10k_wmi_mgmt_tx_alloc_msdu_id(struct ath10k *ar, struct sk_buff *skb, dma_addr_t paddr) @@ -2710,6 +2731,8 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu, if (desc_id < 0) goto err_free_skb; + cb->msdu_id = desc_id; + ptr = (void *)skb->data; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD); @@ -3949,6 +3972,7 @@ static const struct wmi_ops wmi_tlv_ops = { .gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang, /* .gen_mgmt_tx = not implemented; HTT is used */ .gen_mgmt_tx_send = ath10k_wmi_tlv_op_gen_mgmt_tx_send, + .cleanup_mgmt_tx_send = ath10k_wmi_tlv_op_cleanup_mgmt_tx_send, .gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg, .gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable, .gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable, diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index fd612d2905b055f5654019a5ee6b3d4820ad1086..0208868673024e875510d92a4c4dfcd81ae14f3a 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -1869,6 +1869,12 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id) if (ret) dev_kfree_skb_any(skb); + if (ret == -EAGAIN) { + ath10k_warn(ar, "wmi command %d timeout, restarting hardware\n", + cmd_id); + queue_work(ar->workqueue, &ar->restart_work); + } + return ret; } @@ -2336,7 +2342,13 @@ static int wmi_process_mgmt_tx_comp(struct ath10k *ar, u32 desc_id, dma_unmap_single(ar->dev, pkt_addr->paddr, msdu->len, DMA_FROM_DEVICE); info = IEEE80211_SKB_CB(msdu); - info->flags |= status; + kfree(pkt_addr); + + if (status) + info->flags &= ~IEEE80211_TX_STAT_ACK; + else + info->flags |= IEEE80211_TX_STAT_ACK; + ieee80211_tx_status_irqsafe(ar->hw, msdu); ret = 0; @@ -2476,7 +2488,8 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) status->freq, status->band, status->signal, status->rate_idx); - ieee80211_rx(ar->hw, skb); + ieee80211_rx_ni(ar->hw, skb); + return 0; } @@ -3236,18 +3249,31 @@ void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb) { struct wmi_vdev_start_ev_arg arg = {}; int ret; + u32 status; ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n"); + ar->last_wmi_vdev_start_status = 0; + ret = ath10k_wmi_pull_vdev_start(ar, skb, &arg); if (ret) { ath10k_warn(ar, "failed to parse vdev start event: %d\n", ret); - return; + ar->last_wmi_vdev_start_status = ret; + goto out; } - if (WARN_ON(__le32_to_cpu(arg.status))) - return; + status = __le32_to_cpu(arg.status); + if (WARN_ON_ONCE(status)) { + ath10k_warn(ar, "vdev-start-response reports status error: %d (%s)\n", + status, (status == WMI_VDEV_START_CHAN_INVALID) ? + "chan-invalid" : "unknown"); + /* Setup is done one way or another though, so we should still + * do the completion, so don't return here. + */ + ar->last_wmi_vdev_start_status = -EINVAL; + } +out: complete(&ar->vdev_setup_done); } @@ -4774,6 +4800,13 @@ ath10k_wmi_tpc_final_get_rate(struct ath10k *ar, } } + if (pream == -1) { + ath10k_warn(ar, "unknown wmi tpc final index and frequency: %u, %u\n", + pream_idx, __le32_to_cpu(ev->chan_freq)); + tpc = 0; + goto out; + } + if (pream == 4) tpc = min_t(u8, ev->rates_array[rate_idx], ev->max_reg_allow_pow[ch]); @@ -9163,6 +9196,7 @@ static int ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id, void *ptr, dma_unmap_single(ar->dev, pkt_addr->paddr, msdu->len, DMA_FROM_DEVICE); ieee80211_free_txskb(ar->hw, msdu); + kfree(pkt_addr); return 0; } diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 36220258e3c7e686af0ffb5a2ab82869b8be5b01..e341cfb3fcc26845bb579ee1f4dbfa9088b1ae4f 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -6642,11 +6642,17 @@ struct wmi_ch_info_ev_arg { __le32 rx_frame_count; }; +/* From 10.4 firmware, not sure all have the same values. */ +enum wmi_vdev_start_status { + WMI_VDEV_START_OK = 0, + WMI_VDEV_START_CHAN_INVALID, +}; + struct wmi_vdev_start_ev_arg { __le32 vdev_id; __le32 req_id; __le32 resp_type; /* %WMI_VDEV_RESP_ */ - __le32 status; + __le32 status; /* See wmi_vdev_start_status enum above */ }; struct wmi_peer_kick_ev_arg { diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c index 16e052d02c94088006e380dd95bc9bec1ba28f88..0f4836fc3b7c1f74efe11f838cabc60633c74724 100644 --- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c +++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c @@ -522,7 +522,7 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, } break; case DISABLE_KEY: - ath_key_delete(common, key); + ath_key_delete(common, key->hw_key_idx); break; default: ret = -EINVAL; diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index e121187f371ff5e023b5efda39af6847b5a2c47a..d7c626d9594e153f8777427a21c24fb34fc48d2c 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -939,7 +939,7 @@ static int ath6kl_set_probed_ssids(struct ath6kl *ar, else ssid_list[i].flag = ANY_SSID_FLAG; - if (n_match_ssid == 0) + if (ar->wiphy->max_match_sets != 0 && n_match_ssid == 0) ssid_list[i].flag |= MATCH_SSID_FLAG; } @@ -1093,7 +1093,7 @@ void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted) if (vif->scan_req->n_ssids && vif->scan_req->ssids[0].ssid_len) { for (i = 0; i < vif->scan_req->n_ssids; i++) { ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, - i + 1, DISABLE_SSID_FLAG, + i, DISABLE_SSID_FLAG, 0, NULL); } } diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c index 4defb7a0330f430c181c5873fee7376401e92611..53b66e9434c99845dbbaa8b31aaa0e74e0a67fc8 100644 --- a/drivers/net/wireless/ath/ath6kl/usb.c +++ b/drivers/net/wireless/ath/ath6kl/usb.c @@ -132,6 +132,10 @@ ath6kl_usb_alloc_urb_from_pipe(struct ath6kl_usb_pipe *pipe) struct ath6kl_urb_context *urb_context = NULL; unsigned long flags; + /* bail if this pipe is not initialized */ + if (!pipe->ar_usb) + return NULL; + spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags); if (!list_empty(&pipe->urb_list_head)) { urb_context = @@ -150,6 +154,10 @@ static void ath6kl_usb_free_urb_to_pipe(struct ath6kl_usb_pipe *pipe, { unsigned long flags; + /* bail if this pipe is not initialized */ + if (!pipe->ar_usb) + return; + spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags); pipe->urb_cnt++; diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c index 777acc564ac9917d331de8f2ef87657199ce482a..bc7916f2add0971adb18b7fa3501b43a6a8bd7bf 100644 --- a/drivers/net/wireless/ath/ath6kl/wmi.c +++ b/drivers/net/wireless/ath/ath6kl/wmi.c @@ -1178,6 +1178,10 @@ static int ath6kl_wmi_pstream_timeout_event_rx(struct wmi *wmi, u8 *datap, return -EINVAL; ev = (struct wmi_pstream_timeout_event *) datap; + if (ev->traffic_class >= WMM_NUM_AC) { + ath6kl_err("invalid traffic class: %d\n", ev->traffic_class); + return -EINVAL; + } /* * When the pstream (fat pipe == AC) timesout, it means there were @@ -1519,6 +1523,10 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len, return -EINVAL; reply = (struct wmi_cac_event *) datap; + if (reply->ac >= WMM_NUM_AC) { + ath6kl_err("invalid AC: %d\n", reply->ac); + return -EINVAL; + } if ((reply->cac_indication == CAC_INDICATION_ADMISSION_RESP) && (reply->status_code != IEEE80211_TSPEC_STATUS_ADMISS_ACCEPTED)) { @@ -2635,7 +2643,7 @@ int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class, u16 active_tsids = 0; int ret; - if (traffic_class > 3) { + if (traffic_class >= WMM_NUM_AC) { ath6kl_err("invalid traffic class: %d\n", traffic_class); return -EINVAL; } diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index f019a20e5a1f8b01996d89f9bd8f7d85a1ae0b5a..983e1abbd9e43d26178f18c15c63b109642f33eb 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -4183,7 +4183,7 @@ static void ar9003_hw_thermometer_apply(struct ath_hw *ah) static void ar9003_hw_thermo_cal_apply(struct ath_hw *ah) { - u32 data, ko, kg; + u32 data = 0, ko, kg; if (!AR_SREV_9462_20_OR_LATER(ah)) return; diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 21ba20981a80bfd4b96133c0b0a9ecd6c51f6c41..0fca44e91a71207ea03f9453d14c413a39d5b23c 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -272,7 +272,7 @@ struct ath_node { #endif u8 key_idx[4]; - u32 ackto; + int ackto; struct list_head list; }; diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c index 440e16e641e4a6b774b0d6b715f1b059c5727198..f75eb068e6cfc4345d0746a4ba930f95d11f1a31 100644 --- a/drivers/net/wireless/ath/ath9k/common-spectral.c +++ b/drivers/net/wireless/ath/ath9k/common-spectral.c @@ -411,7 +411,7 @@ ath_cmn_process_ht20_40_fft(struct ath_rx_status *rs, ath_dbg(common, SPECTRAL_SCAN, "Calculated new upper max 0x%X at %i\n", - tmp_mag, i); + tmp_mag, fft_sample_40.upper_max_index); } else for (i = dc_pos; i < SPECTRAL_HT20_40_NUM_BINS; i++) { if (fft_sample_40.data[i] == (upper_mag >> max_exp)) diff --git a/drivers/net/wireless/ath/ath9k/dynack.c b/drivers/net/wireless/ath/ath9k/dynack.c index 7334c9b09e82ce7aedd0f15c09d0f113537b8cf9..6e236a4854311cc972b97ccda8e782392bf9a2e3 100644 --- a/drivers/net/wireless/ath/ath9k/dynack.c +++ b/drivers/net/wireless/ath/ath9k/dynack.c @@ -29,9 +29,13 @@ * ath_dynack_ewma - EWMA (Exponentially Weighted Moving Average) calculation * */ -static inline u32 ath_dynack_ewma(u32 old, u32 new) +static inline int ath_dynack_ewma(int old, int new) { - return (new * (EWMA_DIV - EWMA_LEVEL) + old * EWMA_LEVEL) / EWMA_DIV; + if (old > 0) + return (new * (EWMA_DIV - EWMA_LEVEL) + + old * EWMA_LEVEL) / EWMA_DIV; + else + return new; } /** @@ -82,10 +86,10 @@ static inline bool ath_dynack_bssidmask(struct ath_hw *ah, const u8 *mac) */ static void ath_dynack_compute_ackto(struct ath_hw *ah) { - struct ath_node *an; - u32 to = 0; - struct ath_dynack *da = &ah->dynack; struct ath_common *common = ath9k_hw_common(ah); + struct ath_dynack *da = &ah->dynack; + struct ath_node *an; + int to = 0; list_for_each_entry(an, &da->nodes, list) if (an->ackto > to) @@ -144,7 +148,8 @@ static void ath_dynack_compute_to(struct ath_hw *ah) an->ackto = ath_dynack_ewma(an->ackto, ackto); ath_dbg(ath9k_hw_common(ah), DYNACK, - "%pM to %u\n", dst, an->ackto); + "%pM to %d [%u]\n", dst, + an->ackto, ackto); if (time_is_before_jiffies(da->lto)) { ath_dynack_compute_ackto(ah); da->lto = jiffies + COMPUTE_TO; @@ -166,10 +171,12 @@ static void ath_dynack_compute_to(struct ath_hw *ah) * @ah: ath hw * @skb: socket buffer * @ts: tx status info + * @sta: station pointer * */ void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb, - struct ath_tx_status *ts) + struct ath_tx_status *ts, + struct ieee80211_sta *sta) { u8 ridx; struct ieee80211_hdr *hdr; @@ -177,7 +184,7 @@ void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb, struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - if ((info->flags & IEEE80211_TX_CTL_NO_ACK) || !da->enabled) + if (!da->enabled || (info->flags & IEEE80211_TX_CTL_NO_ACK)) return; spin_lock_bh(&da->qlock); @@ -187,11 +194,19 @@ void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb, /* late ACK */ if (ts->ts_status & ATH9K_TXERR_XRETRY) { if (ieee80211_is_assoc_req(hdr->frame_control) || - ieee80211_is_assoc_resp(hdr->frame_control)) { + ieee80211_is_assoc_resp(hdr->frame_control) || + ieee80211_is_auth(hdr->frame_control)) { ath_dbg(common, DYNACK, "late ack\n"); + ath9k_hw_setslottime(ah, (LATEACK_TO - 3) / 2); ath9k_hw_set_ack_timeout(ah, LATEACK_TO); ath9k_hw_set_cts_timeout(ah, LATEACK_TO); + if (sta) { + struct ath_node *an; + + an = (struct ath_node *)sta->drv_priv; + an->ackto = -1; + } da->lto = jiffies + LATEACK_DELAY; } @@ -251,7 +266,7 @@ void ath_dynack_sample_ack_ts(struct ath_hw *ah, struct sk_buff *skb, struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - if (!ath_dynack_bssidmask(ah, hdr->addr1) || !da->enabled) + if (!da->enabled || !ath_dynack_bssidmask(ah, hdr->addr1)) return; spin_lock_bh(&da->qlock); diff --git a/drivers/net/wireless/ath/ath9k/dynack.h b/drivers/net/wireless/ath/ath9k/dynack.h index 6d7bef976742c1a87b498c35927e27476c314853..cf60224d40dff336986e3ed6bcc94b66e55d7b6c 100644 --- a/drivers/net/wireless/ath/ath9k/dynack.h +++ b/drivers/net/wireless/ath/ath9k/dynack.h @@ -86,7 +86,8 @@ void ath_dynack_node_deinit(struct ath_hw *ah, struct ath_node *an); void ath_dynack_init(struct ath_hw *ah); void ath_dynack_sample_ack_ts(struct ath_hw *ah, struct sk_buff *skb, u32 ts); void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb, - struct ath_tx_status *ts); + struct ath_tx_status *ts, + struct ieee80211_sta *sta); #else static inline void ath_dynack_init(struct ath_hw *ah) {} static inline void ath_dynack_node_init(struct ath_hw *ah, @@ -97,7 +98,8 @@ static inline void ath_dynack_sample_ack_ts(struct ath_hw *ah, struct sk_buff *skb, u32 ts) {} static inline void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb, - struct ath_tx_status *ts) {} + struct ath_tx_status *ts, + struct ieee80211_sta *sta) {} #endif #endif /* DYNACK_H */ diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h index 9f64e32381f9451cc2cb2949c03a697172758685..5190cc859e44c66cf06c5de0e1a8d30fd59a8adb 100644 --- a/drivers/net/wireless/ath/ath9k/htc.h +++ b/drivers/net/wireless/ath/ath9k/htc.h @@ -325,11 +325,11 @@ static inline struct ath9k_htc_tx_ctl *HTC_SKB_CB(struct sk_buff *skb) } #ifdef CONFIG_ATH9K_HTC_DEBUGFS - -#define TX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c++) -#define TX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c += a) -#define RX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.skbrx_stats.c++) -#define RX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.skbrx_stats.c += a) +#define __STAT_SAFE(expr) (hif_dev->htc_handle->drv_priv ? (expr) : 0) +#define TX_STAT_INC(c) __STAT_SAFE(hif_dev->htc_handle->drv_priv->debug.tx_stats.c++) +#define TX_STAT_ADD(c, a) __STAT_SAFE(hif_dev->htc_handle->drv_priv->debug.tx_stats.c += a) +#define RX_STAT_INC(c) __STAT_SAFE(hif_dev->htc_handle->drv_priv->debug.skbrx_stats.c++) +#define RX_STAT_ADD(c, a) __STAT_SAFE(hif_dev->htc_handle->drv_priv->debug.skbrx_stats.c += a) #define CAB_STAT_INC priv->debug.tx_stats.cab_queued++ #define TX_QSTAT_INC(q) (priv->debug.tx_stats.queue_stats[q]++) @@ -509,6 +509,7 @@ struct ath9k_htc_priv { unsigned long ps_usecount; bool ps_enabled; bool ps_idle; + bool initialized; #ifdef CONFIG_MAC80211_LEDS enum led_brightness brightness; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index 214c68269a69f794c32163e6fcf0f8acfc2ced3f..590ba96c855dbad18d3fe6a4a5ad5780a1fb5b9d 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -945,7 +945,6 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev, priv->hw = hw; priv->htc = htc_handle; priv->dev = dev; - htc_handle->drv_priv = priv; SET_IEEE80211_DEV(hw, priv->dev); ret = ath9k_htc_wait_for_target(priv); @@ -966,6 +965,12 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev, if (ret) goto err_init; + htc_handle->drv_priv = priv; + + /* Allow ath9k_wmi_event_tasklet() to operate. */ + smp_wmb(); + priv->initialized = true; + return 0; err_init: diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index a82ad739ab80653efd17b2eae0929fb4c224432f..16a7bae62b7d36b1c7a774adc0275ef60aeca559 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -1460,7 +1460,7 @@ static int ath9k_htc_set_key(struct ieee80211_hw *hw, } break; case DISABLE_KEY: - ath_key_delete(common, key); + ath_key_delete(common, key->hw_key_idx); break; default: ret = -EINVAL; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index 799010ed04e0eedbde2be23824a9cb6b513984bf..2bca38e39721c38a5cc07f2ff19cb5a0b7cfe9b3 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -647,9 +647,10 @@ void ath9k_htc_txstatus(struct ath9k_htc_priv *priv, void *wmi_event) struct ath9k_htc_tx_event *tx_pend; int i; - for (i = 0; i < txs->cnt; i++) { - WARN_ON(txs->cnt > HTC_MAX_TX_STATUS); + if (WARN_ON_ONCE(txs->cnt > HTC_MAX_TX_STATUS)) + return; + for (i = 0; i < txs->cnt; i++) { __txs = &txs->txstatus[i]; skb = ath9k_htc_tx_get_packet(priv, __txs); @@ -808,6 +809,7 @@ int ath9k_tx_init(struct ath9k_htc_priv *priv) skb_queue_head_init(&priv->tx.data_vi_queue); skb_queue_head_init(&priv->tx.data_vo_queue); skb_queue_head_init(&priv->tx.tx_failed); + return 0; } @@ -973,6 +975,8 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv, struct ath_htc_rx_status *rxstatus; struct ath_rx_status rx_stats; bool decrypt_error = false; + __be16 rs_datalen; + bool is_phyerr; if (skb->len < HTC_RX_FRAME_HEADER_SIZE) { ath_err(common, "Corrupted RX frame, dropping (len: %d)\n", @@ -982,11 +986,24 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv, rxstatus = (struct ath_htc_rx_status *)skb->data; - if (be16_to_cpu(rxstatus->rs_datalen) - - (skb->len - HTC_RX_FRAME_HEADER_SIZE) != 0) { + rs_datalen = be16_to_cpu(rxstatus->rs_datalen); + if (unlikely(rs_datalen - + (skb->len - HTC_RX_FRAME_HEADER_SIZE) != 0)) { ath_err(common, "Corrupted RX data len, dropping (dlen: %d, skblen: %d)\n", - rxstatus->rs_datalen, skb->len); + rs_datalen, skb->len); + goto rx_next; + } + + is_phyerr = rxstatus->rs_status & ATH9K_RXERR_PHY; + /* + * Discard zero-length packets and packets smaller than an ACK + * which are not PHY_ERROR (short radar pulses have a length of 3) + */ + if (unlikely(!rs_datalen || (rs_datalen < 10 && !is_phyerr))) { + ath_warn(common, + "Short RX data len, dropping (dlen: %d)\n", + rs_datalen); goto rx_next; } @@ -1011,7 +1028,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv, * Process PHY errors and return so that the packet * can be dropped. */ - if (rx_stats.rs_status & ATH9K_RXERR_PHY) { + if (unlikely(is_phyerr)) { /* TODO: Not using DFS processing now. */ if (ath_cmn_process_fft(&priv->spec_priv, hdr, &rx_stats, rx_status->mactime)) { diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c index 1bf63a4efb4c84f09fe0ae6b213df849202bade7..d091c8ebdcf08244796021150d9d261ee43d2564 100644 --- a/drivers/net/wireless/ath/ath9k/htc_hst.c +++ b/drivers/net/wireless/ath/ath9k/htc_hst.c @@ -170,6 +170,7 @@ static int htc_config_pipe_credits(struct htc_target *target) time_left = wait_for_completion_timeout(&target->cmd_wait, HZ); if (!time_left) { dev_err(target->dev, "HTC credit config timeout\n"); + kfree_skb(skb); return -ETIMEDOUT; } @@ -205,6 +206,7 @@ static int htc_setup_complete(struct htc_target *target) time_left = wait_for_completion_timeout(&target->cmd_wait, HZ); if (!time_left) { dev_err(target->dev, "HTC start timeout\n"); + kfree_skb(skb); return -ETIMEDOUT; } @@ -277,6 +279,7 @@ int htc_connect_service(struct htc_target *target, if (!time_left) { dev_err(target->dev, "Service connection timeout for: %d\n", service_connreq->service_id); + kfree_skb(skb); return -ETIMEDOUT; } diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index bb319f22761fbe46379837ed21dee4f343475dc5..b4f7ee423d4072f1a3baaf6ac11b548745de7dc8 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -252,8 +252,9 @@ void ath9k_hw_get_channel_centers(struct ath_hw *ah, /* Chip Revisions */ /******************/ -static void ath9k_hw_read_revisions(struct ath_hw *ah) +static bool ath9k_hw_read_revisions(struct ath_hw *ah) { + u32 srev; u32 val; if (ah->get_mac_revision) @@ -269,25 +270,33 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah) val = REG_READ(ah, AR_SREV); ah->hw_version.macRev = MS(val, AR_SREV_REVISION2); } - return; + return true; case AR9300_DEVID_AR9340: ah->hw_version.macVersion = AR_SREV_VERSION_9340; - return; + return true; case AR9300_DEVID_QCA955X: ah->hw_version.macVersion = AR_SREV_VERSION_9550; - return; + return true; case AR9300_DEVID_AR953X: ah->hw_version.macVersion = AR_SREV_VERSION_9531; - return; + return true; case AR9300_DEVID_QCA956X: ah->hw_version.macVersion = AR_SREV_VERSION_9561; - return; + return true; } - val = REG_READ(ah, AR_SREV) & AR_SREV_ID; + srev = REG_READ(ah, AR_SREV); + + if (srev == -EIO) { + ath_err(ath9k_hw_common(ah), + "Failed to read SREV register"); + return false; + } + + val = srev & AR_SREV_ID; if (val == 0xFF) { - val = REG_READ(ah, AR_SREV); + val = srev; ah->hw_version.macVersion = (val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S; ah->hw_version.macRev = MS(val, AR_SREV_REVISION2); @@ -306,6 +315,8 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah) if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCIE) ah->is_pciexpress = true; } + + return true; } /************************************/ @@ -559,7 +570,10 @@ static int __ath9k_hw_init(struct ath_hw *ah) struct ath_common *common = ath9k_hw_common(ah); int r = 0; - ath9k_hw_read_revisions(ah); + if (!ath9k_hw_read_revisions(ah)) { + ath_err(common, "Could not read hardware revisions"); + return -EOPNOTSUPP; + } switch (ah->hw_version.macVersion) { case AR_SREV_VERSION_5416_PCI: diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index 68956cdc8c9ae21c0d974466ced9c57b85294ce9..4b5687b6c0c9a4faa957ac031b68ad3d4e8303d2 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -818,6 +818,7 @@ struct ath_hw { struct ath9k_pacal_info pacal_info; struct ar5416Stats stats; struct ath9k_tx_queue_info txq[ATH9K_NUM_TX_QUEUES]; + DECLARE_BITMAP(pending_del_keymap, ATH_KEYMAX); enum ath9k_int imask; u32 imrs2_reg; diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index c070a9e51ebf503dd8afbd4a8c92033ee0290a99..fae572b384169cb27598a271056d418669e4c2b3 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -636,15 +636,15 @@ static int ath9k_of_init(struct ath_softc *sc) ret = ath9k_eeprom_request(sc, eeprom_name); if (ret) return ret; + + ah->ah_flags &= ~AH_USE_EEPROM; + ah->ah_flags |= AH_NO_EEP_SWAP; } mac = of_get_mac_address(np); if (mac) ether_addr_copy(common->macaddr, mac); - ah->ah_flags &= ~AH_USE_EEPROM; - ah->ah_flags |= AH_NO_EEP_SWAP; - return 0; } diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 1049773378f274e2f8c4ccf1050cb58f50858366..577f5f79b44b8cd76aaf8bd5188d25364e3f2e65 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -818,12 +818,80 @@ static void ath9k_tx(struct ieee80211_hw *hw, ieee80211_free_txskb(hw, skb); } +static bool ath9k_txq_list_has_key(struct list_head *txq_list, u32 keyix) +{ + struct ath_buf *bf; + struct ieee80211_tx_info *txinfo; + struct ath_frame_info *fi; + + list_for_each_entry(bf, txq_list, list) { + if (bf->bf_state.stale || !bf->bf_mpdu) + continue; + + txinfo = IEEE80211_SKB_CB(bf->bf_mpdu); + fi = (struct ath_frame_info *)&txinfo->rate_driver_data[0]; + if (fi->keyix == keyix) + return true; + } + + return false; +} + +static bool ath9k_txq_has_key(struct ath_softc *sc, u32 keyix) +{ + struct ath_hw *ah = sc->sc_ah; + int i; + struct ath_txq *txq; + bool key_in_use = false; + + for (i = 0; !key_in_use && i < ATH9K_NUM_TX_QUEUES; i++) { + if (!ATH_TXQ_SETUP(sc, i)) + continue; + txq = &sc->tx.txq[i]; + if (!txq->axq_depth) + continue; + if (!ath9k_hw_numtxpending(ah, txq->axq_qnum)) + continue; + + ath_txq_lock(sc, txq); + key_in_use = ath9k_txq_list_has_key(&txq->axq_q, keyix); + if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { + int idx = txq->txq_tailidx; + + while (!key_in_use && + !list_empty(&txq->txq_fifo[idx])) { + key_in_use = ath9k_txq_list_has_key( + &txq->txq_fifo[idx], keyix); + INCR(idx, ATH_TXFIFO_DEPTH); + } + } + ath_txq_unlock(sc, txq); + } + + return key_in_use; +} + +static void ath9k_pending_key_del(struct ath_softc *sc, u8 keyix) +{ + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + + if (!test_bit(keyix, ah->pending_del_keymap) || + ath9k_txq_has_key(sc, keyix)) + return; + + /* No more TXQ frames point to this key cache entry, so delete it. */ + clear_bit(keyix, ah->pending_del_keymap); + ath_key_delete(common, keyix); +} + static void ath9k_stop(struct ieee80211_hw *hw) { struct ath_softc *sc = hw->priv; struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); bool prev_idle; + int i; ath9k_deinit_channel_context(sc); @@ -891,6 +959,14 @@ static void ath9k_stop(struct ieee80211_hw *hw) spin_unlock_bh(&sc->sc_pcu_lock); + for (i = 0; i < ATH_KEYMAX; i++) + ath9k_pending_key_del(sc, i); + + /* Clear key cache entries explicitly to get rid of any potentially + * remaining keys. + */ + ath9k_cmn_init_crypto(sc->sc_ah); + ath9k_ps_restore(sc); sc->ps_idle = prev_idle; @@ -1252,7 +1328,6 @@ static int ath9k_add_interface(struct ieee80211_hw *hw, struct ath_node *an = &avp->mcast_node; mutex_lock(&sc->mutex); - if (IS_ENABLED(CONFIG_ATH9K_TX99)) { if (sc->cur_chan->nvifs >= 1) { mutex_unlock(&sc->mutex); @@ -1534,12 +1609,11 @@ static void ath9k_del_ps_key(struct ath_softc *sc, { struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_node *an = (struct ath_node *) sta->drv_priv; - struct ieee80211_key_conf ps_key = { .hw_key_idx = an->ps_key }; if (!an->ps_key) return; - ath_key_delete(common, &ps_key); + ath_key_delete(common, an->ps_key); an->ps_key = 0; an->key_idx[0] = 0; } @@ -1701,6 +1775,12 @@ static int ath9k_set_key(struct ieee80211_hw *hw, if (sta) an = (struct ath_node *)sta->drv_priv; + /* Delete pending key cache entries if no more frames are pointing to + * them in TXQs. + */ + for (i = 0; i < ATH_KEYMAX; i++) + ath9k_pending_key_del(sc, i); + switch (cmd) { case SET_KEY: if (sta) @@ -1730,7 +1810,15 @@ static int ath9k_set_key(struct ieee80211_hw *hw, } break; case DISABLE_KEY: - ath_key_delete(common, key); + if (ath9k_txq_has_key(sc, key->hw_key_idx)) { + /* Delay key cache entry deletion until there are no + * remaining TXQ frames pointing to this entry. + */ + set_bit(key->hw_key_idx, sc->sc_ah->pending_del_keymap); + ath_hw_keysetmac(common, key->hw_key_idx, NULL); + } else { + ath_key_delete(common, key->hw_key_idx); + } if (an) { for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) { if (an->key_idx[i] != key->hw_key_idx) diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c index ce50d8f5835e03cf22cbb7f19c2bb1bc0b61296c..95544ce05acf9d35f7dc64b8f6dd5fb7c1e32b25 100644 --- a/drivers/net/wireless/ath/ath9k/tx99.c +++ b/drivers/net/wireless/ath/ath9k/tx99.c @@ -56,11 +56,6 @@ static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc) struct sk_buff *skb; struct ath_vif *avp; - if (!sc->tx99_vif) - return NULL; - - avp = (struct ath_vif *)sc->tx99_vif->drv_priv; - skb = alloc_skb(len, GFP_KERNEL); if (!skb) return NULL; @@ -77,7 +72,10 @@ static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc) memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN); memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN); - hdr->seq_ctrl |= cpu_to_le16(avp->seq_no); + if (sc->tx99_vif) { + avp = (struct ath_vif *) sc->tx99_vif->drv_priv; + hdr->seq_ctrl |= cpu_to_le16(avp->seq_no); + } tx_info = IEEE80211_SKB_CB(skb); memset(tx_info, 0, sizeof(*tx_info)); diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c index d1f6710ca63bdab330f09634ea9bdb3f3b4b02d8..f054c7d1c83c1ae5d8619d5495b9ad4036658199 100644 --- a/drivers/net/wireless/ath/ath9k/wmi.c +++ b/drivers/net/wireless/ath/ath9k/wmi.c @@ -153,6 +153,12 @@ void ath9k_wmi_event_tasklet(unsigned long data) } spin_unlock_irqrestore(&wmi->wmi_lock, flags); + /* Check if ath9k_htc_probe_device() completed. */ + if (!priv->initialized) { + kfree_skb(skb); + continue; + } + hdr = (struct wmi_cmd_hdr *) skb->data; cmd_id = be16_to_cpu(hdr->command_id); wmi_event = skb_pull(skb, sizeof(struct wmi_cmd_hdr)); @@ -336,6 +342,7 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n", wmi_cmd_to_name(cmd_id)); mutex_unlock(&wmi->op_mutex); + kfree_skb(skb); return -ETIMEDOUT; } diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 43b6c8508e4938f0dc95d0ac345044824d2fb7b2..4b7a7fc2a0fe0d36717e2bff6a73b0f9c3e7da01 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -629,7 +629,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, if (bf == bf->bf_lastbf) ath_dynack_sample_tx_ts(sc->sc_ah, bf->bf_mpdu, - ts); + ts, sta); } ath_tx_complete_buf(sc, bf, txq, &bf_head, sta, ts, @@ -773,7 +773,8 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, memcpy(info->control.rates, bf->rates, sizeof(info->control.rates)); ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok); - ath_dynack_sample_tx_ts(sc->sc_ah, bf->bf_mpdu, ts); + ath_dynack_sample_tx_ts(sc->sc_ah, bf->bf_mpdu, ts, + sta); } ath_tx_complete_buf(sc, bf, txq, bf_head, sta, ts, txok); } else diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c index e7c3f3b8457dfda3d03652b3e2b838c8133e5306..738f43b17e9599286aea3262c33bc7e38c3ee55f 100644 --- a/drivers/net/wireless/ath/carl9170/usb.c +++ b/drivers/net/wireless/ath/carl9170/usb.c @@ -128,6 +128,8 @@ static const struct usb_device_id carl9170_usb_ids[] = { }; MODULE_DEVICE_TABLE(usb, carl9170_usb_ids); +static struct usb_driver carl9170_driver; + static void carl9170_usb_submit_data_urb(struct ar9170 *ar) { struct urb *urb; @@ -966,32 +968,28 @@ static int carl9170_usb_init_device(struct ar9170 *ar) static void carl9170_usb_firmware_failed(struct ar9170 *ar) { - struct device *parent = ar->udev->dev.parent; - struct usb_device *udev; - - /* - * Store a copy of the usb_device pointer locally. - * This is because device_release_driver initiates - * carl9170_usb_disconnect, which in turn frees our - * driver context (ar). + /* Store a copies of the usb_interface and usb_device pointer locally. + * This is because release_driver initiates carl9170_usb_disconnect, + * which in turn frees our driver context (ar). */ - udev = ar->udev; + struct usb_interface *intf = ar->intf; + struct usb_device *udev = ar->udev; complete(&ar->fw_load_wait); + /* at this point 'ar' could be already freed. Don't use it anymore */ + ar = NULL; /* unbind anything failed */ - if (parent) - device_lock(parent); - - device_release_driver(&udev->dev); - if (parent) - device_unlock(parent); + usb_lock_device(udev); + usb_driver_release_interface(&carl9170_driver, intf); + usb_unlock_device(udev); - usb_put_dev(udev); + usb_put_intf(intf); } static void carl9170_usb_firmware_finish(struct ar9170 *ar) { + struct usb_interface *intf = ar->intf; int err; err = carl9170_parse_firmware(ar); @@ -1009,7 +1007,7 @@ static void carl9170_usb_firmware_finish(struct ar9170 *ar) goto err_unrx; complete(&ar->fw_load_wait); - usb_put_dev(ar->udev); + usb_put_intf(intf); return; err_unrx: @@ -1052,7 +1050,6 @@ static int carl9170_usb_probe(struct usb_interface *intf, return PTR_ERR(ar); udev = interface_to_usbdev(intf); - usb_get_dev(udev); ar->udev = udev; ar->intf = intf; ar->features = id->driver_info; @@ -1072,6 +1069,38 @@ static int carl9170_usb_probe(struct usb_interface *intf, ar->usb_ep_cmd_is_bulk = true; } + /* Verify that all expected endpoints are present */ + if (ar->usb_ep_cmd_is_bulk) { + u8 bulk_ep_addr[] = { + AR9170_USB_EP_RX | USB_DIR_IN, + AR9170_USB_EP_TX | USB_DIR_OUT, + AR9170_USB_EP_CMD | USB_DIR_OUT, + 0}; + u8 int_ep_addr[] = { + AR9170_USB_EP_IRQ | USB_DIR_IN, + 0}; + if (!usb_check_bulk_endpoints(intf, bulk_ep_addr) || + !usb_check_int_endpoints(intf, int_ep_addr)) + err = -ENODEV; + } else { + u8 bulk_ep_addr[] = { + AR9170_USB_EP_RX | USB_DIR_IN, + AR9170_USB_EP_TX | USB_DIR_OUT, + 0}; + u8 int_ep_addr[] = { + AR9170_USB_EP_IRQ | USB_DIR_IN, + AR9170_USB_EP_CMD | USB_DIR_OUT, + 0}; + if (!usb_check_bulk_endpoints(intf, bulk_ep_addr) || + !usb_check_int_endpoints(intf, int_ep_addr)) + err = -ENODEV; + } + + if (err) { + carl9170_free(ar); + return err; + } + usb_set_intfdata(intf, ar); SET_IEEE80211_DEV(ar->hw, &intf->dev); @@ -1094,15 +1123,14 @@ static int carl9170_usb_probe(struct usb_interface *intf, atomic_set(&ar->rx_anch_urbs, 0); atomic_set(&ar->rx_pool_urbs, 0); - usb_get_dev(ar->udev); + usb_get_intf(intf); carl9170_set_state(ar, CARL9170_STOPPED); err = request_firmware_nowait(THIS_MODULE, 1, CARL9170FW_NAME, &ar->udev->dev, GFP_KERNEL, ar, carl9170_usb_firmware_step2); if (err) { - usb_put_dev(udev); - usb_put_dev(udev); + usb_put_intf(intf); carl9170_free(ar); } return err; @@ -1131,7 +1159,6 @@ static void carl9170_usb_disconnect(struct usb_interface *intf) carl9170_release_firmware(ar); carl9170_free(ar); - usb_put_dev(udev); } #ifdef CONFIG_PM diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c index d52b31b45df7d1fd20f301573e7e5f0379df02b5..a274eb0d19688f8c8604647d03c38483ce07d923 100644 --- a/drivers/net/wireless/ath/dfs_pattern_detector.c +++ b/drivers/net/wireless/ath/dfs_pattern_detector.c @@ -111,7 +111,7 @@ static const struct radar_detector_specs jp_radar_ref_types[] = { JP_PATTERN(0, 0, 1, 1428, 1428, 1, 18, 29, false), JP_PATTERN(1, 2, 3, 3846, 3846, 1, 18, 29, false), JP_PATTERN(2, 0, 1, 1388, 1388, 1, 18, 50, false), - JP_PATTERN(3, 1, 2, 4000, 4000, 1, 18, 50, false), + JP_PATTERN(3, 0, 4, 4000, 4000, 1, 18, 50, false), JP_PATTERN(4, 0, 5, 150, 230, 1, 23, 50, false), JP_PATTERN(5, 6, 10, 200, 500, 1, 16, 50, false), JP_PATTERN(6, 11, 20, 200, 500, 1, 12, 50, false), diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c index 1816b4e7dc26409352da12b166aac3108c4acd76..61b59a804e308d8358903d3f81950a85c97afdf0 100644 --- a/drivers/net/wireless/ath/key.c +++ b/drivers/net/wireless/ath/key.c @@ -84,8 +84,7 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry) } EXPORT_SYMBOL(ath_hw_keyreset); -static bool ath_hw_keysetmac(struct ath_common *common, - u16 entry, const u8 *mac) +bool ath_hw_keysetmac(struct ath_common *common, u16 entry, const u8 *mac) { u32 macHi, macLo; u32 unicast_flag = AR_KEYTABLE_VALID; @@ -125,6 +124,7 @@ static bool ath_hw_keysetmac(struct ath_common *common, return true; } +EXPORT_SYMBOL(ath_hw_keysetmac); static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry, const struct ath_keyval *k, @@ -581,29 +581,38 @@ EXPORT_SYMBOL(ath_key_config); /* * Delete Key. */ -void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key) +void ath_key_delete(struct ath_common *common, u8 hw_key_idx) { - ath_hw_keyreset(common, key->hw_key_idx); - if (key->hw_key_idx < IEEE80211_WEP_NKID) + /* Leave CCMP and TKIP (main key) configured to avoid disabling + * encryption for potentially pending frames already in a TXQ with the + * keyix pointing to this key entry. Instead, only clear the MAC address + * to prevent RX processing from using this key cache entry. + */ + if (test_bit(hw_key_idx, common->ccmp_keymap) || + test_bit(hw_key_idx, common->tkip_keymap)) + ath_hw_keysetmac(common, hw_key_idx, NULL); + else + ath_hw_keyreset(common, hw_key_idx); + if (hw_key_idx < IEEE80211_WEP_NKID) return; - clear_bit(key->hw_key_idx, common->keymap); - clear_bit(key->hw_key_idx, common->ccmp_keymap); - if (key->cipher != WLAN_CIPHER_SUITE_TKIP) + clear_bit(hw_key_idx, common->keymap); + clear_bit(hw_key_idx, common->ccmp_keymap); + if (!test_bit(hw_key_idx, common->tkip_keymap)) return; - clear_bit(key->hw_key_idx + 64, common->keymap); + clear_bit(hw_key_idx + 64, common->keymap); - clear_bit(key->hw_key_idx, common->tkip_keymap); - clear_bit(key->hw_key_idx + 64, common->tkip_keymap); + clear_bit(hw_key_idx, common->tkip_keymap); + clear_bit(hw_key_idx + 64, common->tkip_keymap); if (!(common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED)) { - ath_hw_keyreset(common, key->hw_key_idx + 32); - clear_bit(key->hw_key_idx + 32, common->keymap); - clear_bit(key->hw_key_idx + 64 + 32, common->keymap); + ath_hw_keyreset(common, hw_key_idx + 32); + clear_bit(hw_key_idx + 32, common->keymap); + clear_bit(hw_key_idx + 64 + 32, common->keymap); - clear_bit(key->hw_key_idx + 32, common->tkip_keymap); - clear_bit(key->hw_key_idx + 64 + 32, common->tkip_keymap); + clear_bit(hw_key_idx + 32, common->tkip_keymap); + clear_bit(hw_key_idx + 64 + 32, common->tkip_keymap); } } EXPORT_SYMBOL(ath_key_delete); diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index f79c337105cb465f6895f39e7d37d7d1d624db74..1fc2bf66845c7789610ad859de6d722d930c887a 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -1131,7 +1131,12 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, params->wait); out: + /* when the sent packet was not acked by receiver(ACK=0), rc will + * be -EAGAIN. In this case this function needs to return success, + * the ACK=0 will be reflected in tx_status. + */ tx_status = (rc == 0); + rc = (rc == -EAGAIN) ? 0 : rc; cfg80211_mgmt_tx_status(wdev, cookie ? *cookie : 0, buf, len, tx_status, GFP_KERNEL); @@ -1420,6 +1425,12 @@ static int _wil_cfg80211_merge_extra_ies(const u8 *ies1, u16 ies1_len, u8 *buf, *dpos; const u8 *spos; + if (!ies1) + ies1_len = 0; + + if (!ies2) + ies2_len = 0; + if (ies1_len == 0 && ies2_len == 0) { *merged_ies = NULL; *merged_len = 0; @@ -1429,17 +1440,19 @@ static int _wil_cfg80211_merge_extra_ies(const u8 *ies1, u16 ies1_len, buf = kmalloc(ies1_len + ies2_len, GFP_KERNEL); if (!buf) return -ENOMEM; - memcpy(buf, ies1, ies1_len); + if (ies1) + memcpy(buf, ies1, ies1_len); dpos = buf + ies1_len; spos = ies2; - while (spos + 1 < ies2 + ies2_len) { + while (spos && (spos + 1 < ies2 + ies2_len)) { /* IE tag at offset 0, length at offset 1 */ u16 ielen = 2 + spos[1]; if (spos + ielen > ies2 + ies2_len) break; if (spos[0] == WLAN_EID_VENDOR_SPECIFIC && - !_wil_cfg80211_find_ie(ies1, ies1_len, spos, ielen)) { + (!ies1 || !_wil_cfg80211_find_ie(ies1, ies1_len, + spos, ielen))) { memcpy(dpos, spos, ielen); dpos += ielen; } diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 51c3330bc316f8c47ff005a55e16a46ec234ef10..44296c015925200864bb91b3ebe361802015a70b 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -662,10 +662,10 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf, enum { max_count = 4096 }; struct wil_blob_wrapper *wil_blob = file->private_data; struct wil6210_priv *wil = wil_blob->wil; - loff_t pos = *ppos; + loff_t aligned_pos, pos = *ppos; size_t available = wil_blob->blob.size; void *buf; - size_t ret; + size_t unaligned_bytes, aligned_count, ret; int rc; if (test_bit(wil_status_suspending, wil_blob->wil->status) || @@ -683,7 +683,12 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf, if (count > max_count) count = max_count; - buf = kmalloc(count, GFP_KERNEL); + /* set pos to 4 bytes aligned */ + unaligned_bytes = pos % 4; + aligned_pos = pos - unaligned_bytes; + aligned_count = count + unaligned_bytes; + + buf = kmalloc(aligned_count, GFP_KERNEL); if (!buf) return -ENOMEM; @@ -694,9 +699,9 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf, } wil_memcpy_fromio_32(buf, (const void __iomem *) - wil_blob->blob.data + pos, count); + wil_blob->blob.data + aligned_pos, aligned_count); - ret = copy_to_user(user_buf, buf, count); + ret = copy_to_user(user_buf, buf + unaligned_bytes, count); wil_pm_runtime_put(wil); @@ -1263,6 +1268,9 @@ static int wil_rx_buff_mgmt_debugfs_show(struct seq_file *s, void *data) int num_active; int num_free; + if (!rbm->buff_arr) + return -EINVAL; + seq_printf(s, " size = %zu\n", rbm->size); seq_printf(s, " free_list_empty_cnt = %lu\n", rbm->free_list_empty_cnt); diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c index 5d287a8e1b458a8aca674275a55c007df944e4c3..0655cd8845142c244a260252d01b3d74e003d062 100644 --- a/drivers/net/wireless/ath/wil6210/interrupt.c +++ b/drivers/net/wireless/ath/wil6210/interrupt.c @@ -296,21 +296,24 @@ void wil_configure_interrupt_moderation(struct wil6210_priv *wil) static irqreturn_t wil6210_irq_rx(int irq, void *cookie) { struct wil6210_priv *wil = cookie; - u32 isr = wil_ioread32_and_clear(wil->csr + - HOSTADDR(RGF_DMA_EP_RX_ICR) + - offsetof(struct RGF_ICR, ICR)); + u32 isr; bool need_unmask = true; + wil6210_mask_irq_rx(wil); + + isr = wil_ioread32_and_clear(wil->csr + + HOSTADDR(RGF_DMA_EP_RX_ICR) + + offsetof(struct RGF_ICR, ICR)); + trace_wil6210_irq_rx(isr); wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr); if (unlikely(!isr)) { wil_err_ratelimited(wil, "spurious IRQ: RX\n"); + wil6210_unmask_irq_rx(wil); return IRQ_NONE; } - wil6210_mask_irq_rx(wil); - /* RX_DONE and RX_HTRSH interrupts are the same if interrupt * moderation is not used. Interrupt moderation may cause RX * buffer overflow while RX_DONE is delayed. The required @@ -355,21 +358,24 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie) static irqreturn_t wil6210_irq_rx_edma(int irq, void *cookie) { struct wil6210_priv *wil = cookie; - u32 isr = wil_ioread32_and_clear(wil->csr + - HOSTADDR(RGF_INT_GEN_RX_ICR) + - offsetof(struct RGF_ICR, ICR)); + u32 isr; bool need_unmask = true; + wil6210_mask_irq_rx_edma(wil); + + isr = wil_ioread32_and_clear(wil->csr + + HOSTADDR(RGF_INT_GEN_RX_ICR) + + offsetof(struct RGF_ICR, ICR)); + trace_wil6210_irq_rx(isr); wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr); if (unlikely(!isr)) { wil_err(wil, "spurious IRQ: RX\n"); + wil6210_unmask_irq_rx_edma(wil); return IRQ_NONE; } - wil6210_mask_irq_rx_edma(wil); - if (likely(isr & BIT_RX_STATUS_IRQ)) { wil_dbg_irq(wil, "RX status ring\n"); isr &= ~BIT_RX_STATUS_IRQ; @@ -403,21 +409,24 @@ static irqreturn_t wil6210_irq_rx_edma(int irq, void *cookie) static irqreturn_t wil6210_irq_tx_edma(int irq, void *cookie) { struct wil6210_priv *wil = cookie; - u32 isr = wil_ioread32_and_clear(wil->csr + - HOSTADDR(RGF_INT_GEN_TX_ICR) + - offsetof(struct RGF_ICR, ICR)); + u32 isr; bool need_unmask = true; + wil6210_mask_irq_tx_edma(wil); + + isr = wil_ioread32_and_clear(wil->csr + + HOSTADDR(RGF_INT_GEN_TX_ICR) + + offsetof(struct RGF_ICR, ICR)); + trace_wil6210_irq_tx(isr); wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr); if (unlikely(!isr)) { wil_err(wil, "spurious IRQ: TX\n"); + wil6210_unmask_irq_tx_edma(wil); return IRQ_NONE; } - wil6210_mask_irq_tx_edma(wil); - if (likely(isr & BIT_TX_STATUS_IRQ)) { wil_dbg_irq(wil, "TX status ring\n"); isr &= ~BIT_TX_STATUS_IRQ; @@ -446,21 +455,24 @@ static irqreturn_t wil6210_irq_tx_edma(int irq, void *cookie) static irqreturn_t wil6210_irq_tx(int irq, void *cookie) { struct wil6210_priv *wil = cookie; - u32 isr = wil_ioread32_and_clear(wil->csr + - HOSTADDR(RGF_DMA_EP_TX_ICR) + - offsetof(struct RGF_ICR, ICR)); + u32 isr; bool need_unmask = true; + wil6210_mask_irq_tx(wil); + + isr = wil_ioread32_and_clear(wil->csr + + HOSTADDR(RGF_DMA_EP_TX_ICR) + + offsetof(struct RGF_ICR, ICR)); + trace_wil6210_irq_tx(isr); wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr); if (unlikely(!isr)) { wil_err_ratelimited(wil, "spurious IRQ: TX\n"); + wil6210_unmask_irq_tx(wil); return IRQ_NONE; } - wil6210_mask_irq_tx(wil); - if (likely(isr & BIT_DMA_EP_TX_ICR_TX_DONE)) { wil_dbg_irq(wil, "TX done\n"); isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE; @@ -532,20 +544,23 @@ static bool wil_validate_mbox_regs(struct wil6210_priv *wil) static irqreturn_t wil6210_irq_misc(int irq, void *cookie) { struct wil6210_priv *wil = cookie; - u32 isr = wil_ioread32_and_clear(wil->csr + - HOSTADDR(RGF_DMA_EP_MISC_ICR) + - offsetof(struct RGF_ICR, ICR)); + u32 isr; + + wil6210_mask_irq_misc(wil, false); + + isr = wil_ioread32_and_clear(wil->csr + + HOSTADDR(RGF_DMA_EP_MISC_ICR) + + offsetof(struct RGF_ICR, ICR)); trace_wil6210_irq_misc(isr); wil_dbg_irq(wil, "ISR MISC 0x%08x\n", isr); if (!isr) { wil_err(wil, "spurious IRQ: MISC\n"); + wil6210_unmask_irq_misc(wil, false); return IRQ_NONE; } - wil6210_mask_irq_misc(wil, false); - if (isr & ISR_MISC_FW_ERROR) { u32 fw_assert_code = wil_r(wil, wil->rgf_fw_assert_code_addr); u32 ucode_assert_code = diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 7debed6bec06b4dcbbd66dc6e91a9392c9857387..10673fa9388ec0b266d4963f8e008fe7dc27c8ee 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -223,6 +223,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) struct net_device *ndev = vif_to_ndev(vif); struct wireless_dev *wdev = vif_to_wdev(vif); struct wil_sta_info *sta = &wil->sta[cid]; + int min_ring_id = wil_get_min_tx_ring_id(wil); might_sleep(); wil_dbg_misc(wil, "disconnect_cid: CID %d, MID %d, status %d\n", @@ -273,7 +274,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) memset(sta->tid_crypto_rx, 0, sizeof(sta->tid_crypto_rx)); memset(&sta->group_crypto_rx, 0, sizeof(sta->group_crypto_rx)); /* release vrings */ - for (i = 0; i < ARRAY_SIZE(wil->ring_tx); i++) { + for (i = min_ring_id; i < ARRAY_SIZE(wil->ring_tx); i++) { if (wil->ring2cid_tid[i][0] == cid) wil_ring_fini_tx(wil, i); } @@ -604,8 +605,10 @@ int wil_priv_init(struct wil6210_priv *wil) wil->sta[i].mid = U8_MAX; } - for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) + for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { spin_lock_init(&wil->ring_tx_data[i].lock); + wil->ring2cid_tid[i][0] = WIL6210_MAX_CID; + } mutex_init(&wil->mutex); mutex_init(&wil->vif_mutex); @@ -653,8 +656,6 @@ int wil_priv_init(struct wil6210_priv *wil) /* edma configuration can be updated via debugfs before allocation */ wil->num_rx_status_rings = WIL_DEFAULT_NUM_RX_STATUS_RINGS; - wil->use_compressed_rx_status = true; - wil->use_rx_hw_reordering = true; wil->tx_status_ring_order = WIL_TX_SRING_SIZE_ORDER_DEFAULT; /* Rx status ring size should be bigger than the number of RX buffers @@ -995,10 +996,13 @@ static int wil_target_reset(struct wil6210_priv *wil, int no_flash) wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name); - /* Clear MAC link up */ - wil_s(wil, RGF_HP_CTRL, BIT(15)); - wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_HPAL_PERST_FROM_PAD); - wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST); + if (wil->hw_version < HW_VER_TALYN) { + /* Clear MAC link up */ + wil_s(wil, RGF_HP_CTRL, BIT(15)); + wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, + BIT_HPAL_PERST_FROM_PAD); + wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST); + } wil_halt_cpu(wil); @@ -1393,8 +1397,15 @@ static void wil_pre_fw_config(struct wil6210_priv *wil) wil6210_clear_irq(wil); /* CAF_ICR - clear and mask */ /* it is W1C, clear by writing back same value */ - wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0); - wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0); + if (wil->hw_version < HW_VER_TALYN_MB) { + wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0); + wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0); + } else { + wil_s(wil, + RGF_CAF_ICR_TALYN_MB + offsetof(struct RGF_ICR, ICR), 0); + wil_w(wil, RGF_CAF_ICR_TALYN_MB + + offsetof(struct RGF_ICR, IMV), ~0); + } /* clear PAL_UNIT_ICR (potential D0->D3 leftover) * In Talyn-MB host cannot access this register due to * access control, hence PAL_UNIT_ICR is cleared by the FW diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index 89119e7facd00c661600bdd4d7648570b733621c..c8c6613371d1bcbd2d0a8e079053bc08b2c41d89 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -108,6 +108,7 @@ int wil_set_capabilities(struct wil6210_priv *wil) set_bit(hw_capa_no_flash, wil->hw_capa); wil->use_enhanced_dma_hw = true; wil->use_rx_hw_reordering = true; + wil->use_compressed_rx_status = true; wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_TALYN : WIL_FW_NAME_TALYN; if (wil_fw_verify_file_exists(wil, wil_fw_name)) diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index 6a7943e487fb11ba0fc62966da01d5de66826d0a..73cdf54521f9bb8dad9c7506acc9df70b121ca05 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -77,8 +77,9 @@ bool wil_is_tx_idle(struct wil6210_priv *wil) { int i; unsigned long data_comp_to; + int min_ring_id = wil_get_min_tx_ring_id(wil); - for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { + for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) { struct wil_ring *vring = &wil->ring_tx[i]; int vring_index = vring - wil->ring_tx; struct wil_ring_tx_data *txdata = @@ -736,6 +737,7 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) [GRO_HELD] = "GRO_HELD", [GRO_NORMAL] = "GRO_NORMAL", [GRO_DROP] = "GRO_DROP", + [GRO_CONSUMED] = "GRO_CONSUMED", }; wil->txrx_ops.get_netif_rx_params(skb, &cid, &security); @@ -765,7 +767,14 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) return; } - if (wdev->iftype == NL80211_IFTYPE_AP && !vif->ap_isolate) { + if (wdev->iftype == NL80211_IFTYPE_STATION) { + if (mcast && ether_addr_equal(eth->h_source, ndev->dev_addr)) { + /* mcast packet looped back to us */ + rc = GRO_DROP; + dev_kfree_skb(skb); + goto stats; + } + } else if (wdev->iftype == NL80211_IFTYPE_AP && !vif->ap_isolate) { if (mcast) { /* send multicast frames both to higher layers in * local net stack and back to the wireless medium @@ -1313,6 +1322,8 @@ static struct wil_ring *wil_find_tx_bcast_2(struct wil6210_priv *wil, wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i); wil_set_da_for_vring(wil, skb2, i); wil_tx_ring(wil, vif, v2, skb2); + /* successful call to wil_tx_ring takes skb2 ref */ + dev_kfree_skb_any(skb2); } else { wil_err(wil, "skb_copy failed\n"); } @@ -1935,6 +1946,7 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil, bool check_stop) { int i; + int min_ring_id = wil_get_min_tx_ring_id(wil); if (unlikely(!vif)) return; @@ -1967,7 +1979,7 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil, return; /* check wake */ - for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { + for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) { struct wil_ring *cur_ring = &wil->ring_tx[i]; struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i]; diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c index bca61cb44c37542ca43ddc58f1d1b9a7fbfac103..5fa8d6ad66482641be406201d091e4564bd22b4b 100644 --- a/drivers/net/wireless/ath/wil6210/txrx_edma.c +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c @@ -234,9 +234,10 @@ static int wil_rx_refill_edma(struct wil6210_priv *wil) struct wil_ring *ring = &wil->ring_rx; u32 next_head; int rc = 0; - u32 swtail = *ring->edma_rx_swtail.va; + ring->swtail = *ring->edma_rx_swtail.va; - for (; next_head = wil_ring_next_head(ring), (next_head != swtail); + for (; next_head = wil_ring_next_head(ring), + (next_head != ring->swtail); ring->swhead = next_head) { rc = wil_ring_alloc_skb_edma(wil, ring, ring->swhead); if (unlikely(rc)) { @@ -264,41 +265,26 @@ static void wil_move_all_rx_buff_to_free_list(struct wil6210_priv *wil, struct wil_ring *ring) { struct device *dev = wil_to_dev(wil); - u32 next_tail; - u32 swhead = (ring->swhead + 1) % ring->size; + struct list_head *active = &wil->rx_buff_mgmt.active; dma_addr_t pa; - u16 dmalen; - for (; next_tail = wil_ring_next_tail(ring), (next_tail != swhead); - ring->swtail = next_tail) { - struct wil_rx_enhanced_desc dd, *d = ⅆ - struct wil_rx_enhanced_desc *_d = - (struct wil_rx_enhanced_desc *) - &ring->va[ring->swtail].rx.enhanced; - struct sk_buff *skb; - u16 buff_id; + while (!list_empty(active)) { + struct wil_rx_buff *rx_buff = + list_first_entry(active, struct wil_rx_buff, list); + struct sk_buff *skb = rx_buff->skb; - *d = *_d; - pa = wil_rx_desc_get_addr_edma(&d->dma); - dmalen = le16_to_cpu(d->dma.length); - dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE); - - /* Extract the SKB from the rx_buff management array */ - buff_id = __le16_to_cpu(d->mac.buff_id); - if (buff_id >= wil->rx_buff_mgmt.size) { - wil_err(wil, "invalid buff_id %d\n", buff_id); - continue; - } - skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb; - wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL; - if (unlikely(!skb)) - wil_err(wil, "No Rx skb at buff_id %d\n", buff_id); - else + if (unlikely(!skb)) { + wil_err(wil, "No Rx skb at buff_id %d\n", rx_buff->id); + } else { + rx_buff->skb = NULL; + memcpy(&pa, skb->cb, sizeof(pa)); + dma_unmap_single(dev, pa, wil->rx_buf_len, + DMA_FROM_DEVICE); kfree_skb(skb); + } /* Move the buffer from the active to the free list */ - list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list, - &wil->rx_buff_mgmt.free); + list_move(&rx_buff->list, &wil->rx_buff_mgmt.free); } } @@ -822,23 +808,24 @@ static int wil_rx_error_check_edma(struct wil6210_priv *wil, wil_dbg_txrx(wil, "L2 RX error, l2_rx_status=0x%x\n", l2_rx_status); /* Due to HW issue, KEY error will trigger a MIC error */ - if (l2_rx_status & WIL_RX_EDMA_ERROR_MIC) { - wil_dbg_txrx(wil, - "L2 MIC/KEY error, dropping packet\n"); + if (l2_rx_status == WIL_RX_EDMA_ERROR_MIC) { + wil_err_ratelimited(wil, + "L2 MIC/KEY error, dropping packet\n"); stats->rx_mic_error++; } - if (l2_rx_status & WIL_RX_EDMA_ERROR_KEY) { - wil_dbg_txrx(wil, "L2 KEY error, dropping packet\n"); + if (l2_rx_status == WIL_RX_EDMA_ERROR_KEY) { + wil_err_ratelimited(wil, + "L2 KEY error, dropping packet\n"); stats->rx_key_error++; } - if (l2_rx_status & WIL_RX_EDMA_ERROR_REPLAY) { - wil_dbg_txrx(wil, - "L2 REPLAY error, dropping packet\n"); + if (l2_rx_status == WIL_RX_EDMA_ERROR_REPLAY) { + wil_err_ratelimited(wil, + "L2 REPLAY error, dropping packet\n"); stats->rx_replay++; } - if (l2_rx_status & WIL_RX_EDMA_ERROR_AMSDU) { - wil_dbg_txrx(wil, - "L2 AMSDU error, dropping packet\n"); + if (l2_rx_status == WIL_RX_EDMA_ERROR_AMSDU) { + wil_err_ratelimited(wil, + "L2 AMSDU error, dropping packet\n"); stats->rx_amsdu_error++; } return -EFAULT; @@ -906,6 +893,9 @@ static struct sk_buff *wil_sring_reap_rx_edma(struct wil6210_priv *wil, wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL; if (!skb) { wil_err(wil, "No Rx skb at buff_id %d\n", buff_id); + /* Move the buffer from the active list to the free list */ + list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list, + &wil->rx_buff_mgmt.free); goto again; } diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 17c294b1ead13b3bfdca6328481a0a1a7db1a5ae..75fe1a3b70466165db6865957880bb7ccc613b6f 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -319,6 +319,7 @@ struct RGF_ICR { /* MAC timer, usec, for packet lifetime */ #define RGF_MAC_MTRL_COUNTER_0 (0x886aa8) +#define RGF_CAF_ICR_TALYN_MB (0x8893d4) /* struct RGF_ICR */ #define RGF_CAF_ICR (0x88946c) /* struct RGF_ICR */ #define RGF_CAF_OSC_CONTROL (0x88afa4) #define BIT_CAF_OSC_XTAL_EN BIT(0) diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 42c02a20ec97cafa5336c63ccae019c5735021ae..8a603432f531786abdfbaeed583fa25facda79fb 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -1639,16 +1639,17 @@ int wmi_call(struct wil6210_priv *wil, u16 cmdid, u8 mid, void *buf, u16 len, { int rc; unsigned long remain; + ulong flags; mutex_lock(&wil->wmi_mutex); - spin_lock(&wil->wmi_ev_lock); + spin_lock_irqsave(&wil->wmi_ev_lock, flags); wil->reply_id = reply_id; wil->reply_mid = mid; wil->reply_buf = reply; wil->reply_size = reply_size; reinit_completion(&wil->wmi_call); - spin_unlock(&wil->wmi_ev_lock); + spin_unlock_irqrestore(&wil->wmi_ev_lock, flags); rc = __wmi_send(wil, cmdid, mid, buf, len); if (rc) @@ -1668,12 +1669,12 @@ int wmi_call(struct wil6210_priv *wil, u16 cmdid, u8 mid, void *buf, u16 len, } out: - spin_lock(&wil->wmi_ev_lock); + spin_lock_irqsave(&wil->wmi_ev_lock, flags); wil->reply_id = 0; wil->reply_mid = U8_MAX; wil->reply_buf = NULL; wil->reply_size = 0; - spin_unlock(&wil->wmi_ev_lock); + spin_unlock_irqrestore(&wil->wmi_ev_lock, flags); mutex_unlock(&wil->wmi_mutex); @@ -2816,7 +2817,18 @@ static void wmi_event_handle(struct wil6210_priv *wil, /* check if someone waits for this event */ if (wil->reply_id && wil->reply_id == id && wil->reply_mid == mid) { - WARN_ON(wil->reply_buf); + if (wil->reply_buf) { + /* event received while wmi_call is waiting + * with a buffer. Such event should be handled + * in wmi_recv_cmd function. Handling the event + * here means a previous wmi_call was timeout. + * Drop the event and do not handle it. + */ + wil_err(wil, + "Old event (%d, %s) while wmi_call is waiting. Drop it and Continue waiting\n", + id, eventid2name(id)); + return; + } wmi_evt_call_handler(vif, id, evt_data, len - sizeof(*wmi)); @@ -3107,8 +3119,9 @@ int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len) rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, vif->mid, cmd, total, WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000); if (!rc && evt.evt.status != WMI_FW_STATUS_SUCCESS) { - wil_err(wil, "mgmt_tx failed with status %d\n", evt.evt.status); - rc = -EINVAL; + wil_dbg_wmi(wil, "mgmt_tx failed with status %d\n", + evt.evt.status); + rc = -EAGAIN; } kfree(cmd); @@ -3160,9 +3173,9 @@ int wmi_mgmt_tx_ext(struct wil6210_vif *vif, const u8 *buf, size_t len, rc = wmi_call(wil, WMI_SW_TX_REQ_EXT_CMDID, vif->mid, cmd, total, WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000); if (!rc && evt.evt.status != WMI_FW_STATUS_SUCCESS) { - wil_err(wil, "mgmt_tx_ext failed with status %d\n", - evt.evt.status); - rc = -EINVAL; + wil_dbg_wmi(wil, "mgmt_tx_ext failed with status %d\n", + evt.evt.status); + rc = -EAGAIN; } kfree(cmd); diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c index e99e766a302851e36b34f338cd819b6eaf616637..1cabae424839ebba77ee239c6ecd85a2b4b2c5bd 100644 --- a/drivers/net/wireless/atmel/at76c50x-usb.c +++ b/drivers/net/wireless/atmel/at76c50x-usb.c @@ -2585,8 +2585,8 @@ static int __init at76_mod_init(void) if (result < 0) printk(KERN_ERR DRIVER_NAME ": usb_register failed (status %d)\n", result); - - led_trigger_register_simple("at76_usb-tx", &ledtrig_tx); + else + led_trigger_register_simple("at76_usb-tx", &ledtrig_tx); return result; } diff --git a/drivers/net/wireless/broadcom/b43/b43.h b/drivers/net/wireless/broadcom/b43/b43.h index b77d1a904f7e68f29fc73f34ef8defdf4fb71c56..316811e1043203806e03c8841dd00a06e7c016d2 100644 --- a/drivers/net/wireless/broadcom/b43/b43.h +++ b/drivers/net/wireless/broadcom/b43/b43.h @@ -1082,6 +1082,22 @@ static inline bool b43_using_pio_transfers(struct b43_wldev *dev) return dev->__using_pio_transfers; } +static inline void b43_wake_queue(struct b43_wldev *dev, int queue_prio) +{ + if (dev->qos_enabled) + ieee80211_wake_queue(dev->wl->hw, queue_prio); + else + ieee80211_wake_queue(dev->wl->hw, 0); +} + +static inline void b43_stop_queue(struct b43_wldev *dev, int queue_prio) +{ + if (dev->qos_enabled) + ieee80211_stop_queue(dev->wl->hw, queue_prio); + else + ieee80211_stop_queue(dev->wl->hw, 0); +} + /* Message printing */ __printf(2, 3) void b43info(struct b43_wl *wl, const char *fmt, ...); __printf(2, 3) void b43err(struct b43_wl *wl, const char *fmt, ...); diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c index d46d57b989aec0d1fa869128b62022b7be401892..357d14d27101842ea8e328b3f4aa965690b43643 100644 --- a/drivers/net/wireless/broadcom/b43/dma.c +++ b/drivers/net/wireless/broadcom/b43/dma.c @@ -1461,8 +1461,8 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) should_inject_overflow(ring)) { /* This TX ring is full. */ unsigned int skb_mapping = skb_get_queue_mapping(skb); - ieee80211_stop_queue(dev->wl->hw, skb_mapping); - dev->wl->tx_queue_stopped[skb_mapping] = 1; + b43_stop_queue(dev, skb_mapping); + dev->wl->tx_queue_stopped[skb_mapping] = true; ring->stopped = true; if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index); @@ -1628,11 +1628,11 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, } if (dev->wl->tx_queue_stopped[ring->queue_prio]) { - dev->wl->tx_queue_stopped[ring->queue_prio] = 0; + dev->wl->tx_queue_stopped[ring->queue_prio] = false; } else { /* If the driver queue is running wake the corresponding * mac80211 queue. */ - ieee80211_wake_queue(dev->wl->hw, ring->queue_prio); + b43_wake_queue(dev, ring->queue_prio); if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index); } diff --git a/drivers/net/wireless/broadcom/b43/phy_common.c b/drivers/net/wireless/broadcom/b43/phy_common.c index 85f2ca98956567bd28599fbe4d1499e549a1f425..ef3ffa5ad4668d3a4b3e630aab2eb8ba8d717fbe 100644 --- a/drivers/net/wireless/broadcom/b43/phy_common.c +++ b/drivers/net/wireless/broadcom/b43/phy_common.c @@ -616,7 +616,7 @@ struct b43_c32 b43_cordic(int theta) u8 i; s32 tmp; s8 signx = 1; - u32 angle = 0; + s32 angle = 0; struct b43_c32 ret = { .i = 39797, .q = 0, }; while (theta > (180 << 16)) diff --git a/drivers/net/wireless/broadcom/b43/phy_lp.c b/drivers/net/wireless/broadcom/b43/phy_lp.c index 6922cbb99a044e253c3d474efde06e07fc94e956..5a0699fb4b9ab31d2310f0b8ec908e143ec937e3 100644 --- a/drivers/net/wireless/broadcom/b43/phy_lp.c +++ b/drivers/net/wireless/broadcom/b43/phy_lp.c @@ -1834,7 +1834,7 @@ static void lpphy_papd_cal(struct b43_wldev *dev, struct lpphy_tx_gains gains, static void lpphy_papd_cal_txpwr(struct b43_wldev *dev) { struct b43_phy_lp *lpphy = dev->phy.lp; - struct lpphy_tx_gains gains, oldgains; + struct lpphy_tx_gains oldgains; int old_txpctl, old_afe_ovr, old_rf, old_bbmult; lpphy_read_tx_pctl_mode_from_hardware(dev); @@ -1848,9 +1848,9 @@ static void lpphy_papd_cal_txpwr(struct b43_wldev *dev) lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF); if (dev->dev->chip_id == 0x4325 && dev->dev->chip_rev == 0) - lpphy_papd_cal(dev, gains, 0, 1, 30); + lpphy_papd_cal(dev, oldgains, 0, 1, 30); else - lpphy_papd_cal(dev, gains, 0, 1, 65); + lpphy_papd_cal(dev, oldgains, 0, 1, 65); if (old_afe_ovr) lpphy_set_tx_gains(dev, oldgains); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c index 1068a2a4494c46f46f39fbe70a82ece3e33fb74b..144e0b83b24b809d79b4d0bde12e6636856faba4 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c @@ -490,11 +490,18 @@ int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr) return -ENOMEM; } -void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr) +void brcmf_proto_bcdc_detach_pre_delif(struct brcmf_pub *drvr) +{ + struct brcmf_bcdc *bcdc = drvr->proto->pd; + + brcmf_fws_detach_pre_delif(bcdc->fws); +} + +void brcmf_proto_bcdc_detach_post_delif(struct brcmf_pub *drvr) { struct brcmf_bcdc *bcdc = drvr->proto->pd; drvr->proto->pd = NULL; - brcmf_fws_detach(bcdc->fws); + brcmf_fws_detach_post_delif(bcdc->fws); kfree(bcdc); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h index 3b0e9eff21b5826883bd2a0377c3baeb023490c5..4bc52240ccea2e4b0c3a944170cfc4d0613e0f6f 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h @@ -18,14 +18,16 @@ #ifdef CONFIG_BRCMFMAC_PROTO_BCDC int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr); -void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr); +void brcmf_proto_bcdc_detach_pre_delif(struct brcmf_pub *drvr); +void brcmf_proto_bcdc_detach_post_delif(struct brcmf_pub *drvr); void brcmf_proto_bcdc_txflowblock(struct device *dev, bool state); void brcmf_proto_bcdc_txcomplete(struct device *dev, struct sk_buff *txp, bool success); struct brcmf_fws_info *drvr_to_fws(struct brcmf_pub *drvr); #else static inline int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr) { return 0; } -static inline void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr) {} +static void brcmf_proto_bcdc_detach_pre_delif(struct brcmf_pub *drvr) {}; +static inline void brcmf_proto_bcdc_detach_post_delif(struct brcmf_pub *drvr) {} #endif #endif /* BRCMFMAC_BCDC_H */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 5444e6213d4592e6ac35dd1889c82e4a8d62db97..8f2cc54836cc1fb9db366d88cf0cc419ba585240 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -690,8 +690,7 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg, scan_request = cfg->scan_request; cfg->scan_request = NULL; - if (timer_pending(&cfg->escan_timeout)) - del_timer_sync(&cfg->escan_timeout); + del_timer_sync(&cfg->escan_timeout); if (fw_abort) { /* Do a scan abort to stop the driver's scan engine */ @@ -3466,6 +3465,8 @@ brcmf_wowl_nd_results(struct brcmf_if *ifp, const struct brcmf_event_msg *e, } netinfo = brcmf_get_netinfo_array(pfn_result); + if (netinfo->SSID_len > IEEE80211_MAX_SSID_LEN) + netinfo->SSID_len = IEEE80211_MAX_SSID_LEN; memcpy(cfg->wowl.nd->ssid.ssid, netinfo->SSID, netinfo->SSID_len); cfg->wowl.nd->ssid.ssid_len = netinfo->SSID_len; cfg->wowl.nd->n_channels = 1; @@ -5188,10 +5189,17 @@ static struct cfg80211_ops brcmf_cfg80211_ops = { .del_pmk = brcmf_cfg80211_del_pmk, }; -struct cfg80211_ops *brcmf_cfg80211_get_ops(void) +struct cfg80211_ops *brcmf_cfg80211_get_ops(struct brcmf_mp_device *settings) { - return kmemdup(&brcmf_cfg80211_ops, sizeof(brcmf_cfg80211_ops), + struct cfg80211_ops *ops; + + ops = kmemdup(&brcmf_cfg80211_ops, sizeof(brcmf_cfg80211_ops), GFP_KERNEL); + + if (ops && settings->roamoff) + ops->update_connect_params = NULL; + + return ops; } struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg, @@ -5347,6 +5355,11 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg, (struct brcmf_cfg80211_assoc_ielen_le *)cfg->extra_buf; req_len = le32_to_cpu(assoc_info->req_len); resp_len = le32_to_cpu(assoc_info->resp_len); + if (req_len > WL_EXTRA_BUF_MAX || resp_len > WL_EXTRA_BUF_MAX) { + brcmf_err("invalid lengths in assoc info: req %u resp %u\n", + req_len, resp_len); + return -EINVAL; + } if (req_len) { err = brcmf_fil_iovar_data_get(ifp, "assoc_req_ies", cfg->extra_buf, @@ -5359,6 +5372,8 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg, conn_info->req_ie = kmemdup(cfg->extra_buf, conn_info->req_ie_len, GFP_KERNEL); + if (!conn_info->req_ie) + conn_info->req_ie_len = 0; } else { conn_info->req_ie_len = 0; conn_info->req_ie = NULL; @@ -5375,6 +5390,8 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg, conn_info->resp_ie = kmemdup(cfg->extra_buf, conn_info->resp_ie_len, GFP_KERNEL); + if (!conn_info->resp_ie) + conn_info->resp_ie_len = 0; } else { conn_info->resp_ie_len = 0; conn_info->resp_ie = NULL; @@ -5997,7 +6014,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, * for subsequent chanspecs. */ channel->flags = IEEE80211_CHAN_NO_HT40 | - IEEE80211_CHAN_NO_80MHZ; + IEEE80211_CHAN_NO_80MHZ | + IEEE80211_CHAN_NO_160MHZ; ch.bw = BRCMU_CHAN_BW_20; cfg->d11inf.encchspec(&ch); chaninfo = ch.chspec; @@ -6300,6 +6318,16 @@ brcmf_txrx_stypes[NUM_NL80211_IFTYPES] = { .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) + }, + [NL80211_IFTYPE_AP] = { + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | + BIT(IEEE80211_STYPE_DISASSOC >> 4) | + BIT(IEEE80211_STYPE_AUTH >> 4) | + BIT(IEEE80211_STYPE_DEAUTH >> 4) | + BIT(IEEE80211_STYPE_ACTION >> 4) } }; @@ -7057,6 +7085,7 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg) wiphy_unregister(cfg->wiphy); kfree(cfg->ops); wl_deinit_priv(cfg); + cancel_work_sync(&cfg->escan_timeout_work); brcmf_free_wiphy(cfg->wiphy); kfree(cfg); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h index a4aec0004e4f1858cda1502eef3af16f523b39a7..9a6287f084a928324ebf04e663f0d309f0c42361 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h @@ -404,7 +404,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg); s32 brcmf_cfg80211_up(struct net_device *ndev); s32 brcmf_cfg80211_down(struct net_device *ndev); -struct cfg80211_ops *brcmf_cfg80211_get_ops(void); +struct cfg80211_ops *brcmf_cfg80211_get_ops(struct brcmf_mp_device *settings); enum nl80211_iftype brcmf_cfg80211_get_iftype(struct brcmf_if *ifp); struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg, diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c index cd3651069d0c4e5bbe1810b0a91a9e433f27217d..8510d207ee87d1f5b2f65dfae112a19201d6ba5e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c @@ -149,7 +149,7 @@ static int brcmf_c_process_clm_blob(struct brcmf_if *ifp) return err; } - err = request_firmware(&clm, clm_name, bus->dev); + err = firmware_request_nowarn(&clm, clm_name, bus->dev); if (err) { brcmf_info("no clm_blob available (err=%d), device may have limited channels available\n", err); @@ -296,9 +296,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) /* Replace all newline/linefeed characters with space * character */ - ptr = clmver; - while ((ptr = strnchr(ptr, '\n', sizeof(buf))) != NULL) - *ptr = ' '; + strreplace(clmver, '\n', ' '); brcmf_dbg(INFO, "CLM version = %s\n", clmver); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index b1f702faff4fba45cf4a86d778661eb277d54ae9..584e05fdca6adaecdd5120e24c98b73b204e500f 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -464,7 +464,8 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event) } else { /* Process special event packets */ if (handle_event) - brcmf_fweh_process_skb(ifp->drvr, skb); + brcmf_fweh_process_skb(ifp->drvr, skb, + BCMILCP_SUBTYPE_VENDOR_LONG); brcmf_netif_rx(ifp, skb); } @@ -481,7 +482,7 @@ void brcmf_rx_event(struct device *dev, struct sk_buff *skb) if (brcmf_rx_hdrpull(drvr, skb, &ifp)) return; - brcmf_fweh_process_skb(ifp->drvr, skb); + brcmf_fweh_process_skb(ifp->drvr, skb, 0); brcmu_pkt_buf_free_skb(skb); } @@ -783,17 +784,17 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx, bool rtnl_locked) { struct brcmf_if *ifp; + int ifidx; ifp = drvr->iflist[bsscfgidx]; - drvr->iflist[bsscfgidx] = NULL; if (!ifp) { brcmf_err("Null interface, bsscfgidx=%d\n", bsscfgidx); return; } brcmf_dbg(TRACE, "Enter, bsscfgidx=%d, ifidx=%d\n", bsscfgidx, ifp->ifidx); - if (drvr->if2bss[ifp->ifidx] == bsscfgidx) - drvr->if2bss[ifp->ifidx] = BRCMF_BSSIDX_INVALID; + ifidx = ifp->ifidx; + if (ifp->ndev) { if (bsscfgidx == 0) { if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) { @@ -821,6 +822,10 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx, brcmf_p2p_ifp_removed(ifp, rtnl_locked); kfree(ifp); } + + drvr->iflist[bsscfgidx] = NULL; + if (drvr->if2bss[ifidx] == bsscfgidx) + drvr->if2bss[ifidx] = BRCMF_BSSIDX_INVALID; } void brcmf_remove_interface(struct brcmf_if *ifp, bool rtnl_locked) @@ -1130,7 +1135,7 @@ int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings) brcmf_dbg(TRACE, "Enter\n"); - ops = brcmf_cfg80211_get_ops(); + ops = brcmf_cfg80211_get_ops(settings); if (!ops) return -ENOMEM; @@ -1239,6 +1244,13 @@ void brcmf_detach(struct device *dev) brcmf_bus_change_state(bus_if, BRCMF_BUS_DOWN); + brcmf_proto_detach_pre_delif(drvr); + + if (drvr->mon_if) { + brcmf_net_detach(drvr->mon_if->ndev, false); + drvr->mon_if = NULL; + } + /* make sure primary interface removed last */ for (i = BRCMF_MAX_IFS-1; i > -1; i--) brcmf_remove_interface(drvr->iflist[i], false); @@ -1248,7 +1260,7 @@ void brcmf_detach(struct device *dev) brcmf_bus_stop(drvr->bus_if); - brcmf_proto_detach(drvr); + brcmf_proto_detach_post_delif(drvr); bus_if->drvr = NULL; wiphy_free(drvr->wiphy); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c index 8347da632a5b0de85f9a68d9eedfd0436d2da915..4c5a3995dc352282e3243bd8a3ba608c55e17bf2 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c @@ -178,7 +178,7 @@ static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp, ifp->fwil_fwerr = false; } -#define MAX_CAPS_BUFFER_SIZE 512 +#define MAX_CAPS_BUFFER_SIZE 768 static void brcmf_feat_firmware_capabilities(struct brcmf_if *ifp) { char caps[MAX_CAPS_BUFFER_SIZE]; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c index 9095b830ae4d7a8146d9d77b4f818c825d541635..9927079a9ace43365fc3cf306f757697f919fc4d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c @@ -641,8 +641,9 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev, struct brcmf_fw_request *fwreq; char chipname[12]; const char *mp_path; + size_t mp_path_len; u32 i, j; - char end; + char end = '\0'; size_t reqsz; for (i = 0; i < table_size; i++) { @@ -667,7 +668,10 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev, mapping_table[i].fw_base, chipname); mp_path = brcmf_mp_global.firmware_path; - end = mp_path[strlen(mp_path) - 1]; + mp_path_len = strnlen(mp_path, BRCMF_FW_ALTPATH_LEN); + if (mp_path_len) + end = mp_path[mp_path_len - 1]; + fwreq->n_items = n_fwnames; for (j = 0; j < n_fwnames; j++) { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c index e7eaa57d11d98f61280bd99013e649aacec7d0d6..4a900d8d98b83f054d922b58f5488b992c55e77a 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c @@ -237,6 +237,10 @@ static void brcmf_fweh_event_worker(struct work_struct *work) brcmf_fweh_event_name(event->code), event->code, event->emsg.ifidx, event->emsg.bsscfgidx, event->emsg.addr); + if (event->emsg.bsscfgidx >= BRCMF_MAX_IFS) { + brcmf_err("invalid bsscfg index: %u\n", event->emsg.bsscfgidx); + goto event_free; + } /* convert event message */ emsg_be = &event->emsg; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h index 816f80ea925b110d79d0845617a70a1ac7e92ba8..ebd66fe0d949c96b91707c61029a81c285a3839d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h @@ -211,7 +211,7 @@ enum brcmf_fweh_event_code { */ #define BRCM_OUI "\x00\x10\x18" #define BCMILCP_BCM_SUBTYPE_EVENT 1 - +#define BCMILCP_SUBTYPE_VENDOR_LONG 32769 /** * struct brcm_ethhdr - broadcom specific ether header. @@ -334,10 +334,10 @@ void brcmf_fweh_process_event(struct brcmf_pub *drvr, void brcmf_fweh_p2pdev_setup(struct brcmf_if *ifp, bool ongoing); static inline void brcmf_fweh_process_skb(struct brcmf_pub *drvr, - struct sk_buff *skb) + struct sk_buff *skb, u16 stype) { struct brcmf_event *event_packet; - u16 usr_stype; + u16 subtype, usr_stype; /* only process events when protocol matches */ if (skb->protocol != cpu_to_be16(ETH_P_LINK_CTL)) @@ -346,8 +346,16 @@ static inline void brcmf_fweh_process_skb(struct brcmf_pub *drvr, if ((skb->len + ETH_HLEN) < sizeof(*event_packet)) return; - /* check for BRCM oui match */ event_packet = (struct brcmf_event *)skb_mac_header(skb); + + /* check subtype if needed */ + if (unlikely(stype)) { + subtype = get_unaligned_be16(&event_packet->hdr.subtype); + if (subtype != stype) + return; + } + + /* check for BRCM oui match */ if (memcmp(BRCM_OUI, &event_packet->hdr.oui[0], sizeof(event_packet->hdr.oui))) return; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c index f3cbf78c8899ca0567a1cd14e6497c2ae14b4677..1de8497d92b8a9506d93b7b7b23cc250ae18065c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c @@ -579,24 +579,6 @@ static bool brcmf_fws_ifidx_match(struct sk_buff *skb, void *arg) return ifidx == *(int *)arg; } -static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q, - int ifidx) -{ - bool (*matchfn)(struct sk_buff *, void *) = NULL; - struct sk_buff *skb; - int prec; - - if (ifidx != -1) - matchfn = brcmf_fws_ifidx_match; - for (prec = 0; prec < q->num_prec; prec++) { - skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx); - while (skb) { - brcmu_pkt_buf_free_skb(skb); - skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx); - } - } -} - static void brcmf_fws_hanger_init(struct brcmf_fws_hanger *hanger) { int i; @@ -668,6 +650,28 @@ static inline int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h, return 0; } +static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q, + int ifidx) +{ + bool (*matchfn)(struct sk_buff *, void *) = NULL; + struct sk_buff *skb; + int prec; + u32 hslot; + + if (ifidx != -1) + matchfn = brcmf_fws_ifidx_match; + for (prec = 0; prec < q->num_prec; prec++) { + skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx); + while (skb) { + hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT); + brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb, + true); + brcmu_pkt_buf_free_skb(skb); + skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx); + } + } +} + static int brcmf_fws_hanger_mark_suppressed(struct brcmf_fws_hanger *h, u32 slot_id) { @@ -2168,6 +2172,8 @@ void brcmf_fws_del_interface(struct brcmf_if *ifp) brcmf_fws_lock(fws); ifp->fws_desc = NULL; brcmf_dbg(TRACE, "deleting %s\n", entry->name); + brcmf_fws_macdesc_cleanup(fws, &fws->desc.iface[ifp->ifidx], + ifp->ifidx); brcmf_fws_macdesc_deinit(entry); brcmf_fws_cleanup(fws, ifp->ifidx); brcmf_fws_unlock(fws); @@ -2404,17 +2410,25 @@ struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr) return fws; fail: - brcmf_fws_detach(fws); + brcmf_fws_detach_pre_delif(fws); + brcmf_fws_detach_post_delif(fws); return ERR_PTR(rc); } -void brcmf_fws_detach(struct brcmf_fws_info *fws) +void brcmf_fws_detach_pre_delif(struct brcmf_fws_info *fws) { if (!fws) return; - - if (fws->fws_wq) + if (fws->fws_wq) { destroy_workqueue(fws->fws_wq); + fws->fws_wq = NULL; + } +} + +void brcmf_fws_detach_post_delif(struct brcmf_fws_info *fws) +{ + if (!fws) + return; /* cleanup */ brcmf_fws_lock(fws); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h index 4e6835766d5d8fb287560ea224d50d1fb3df4442..749c06dcdc17ddf958daccd6fda42bbaeed55e1f 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h @@ -19,7 +19,8 @@ #define FWSIGNAL_H_ struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr); -void brcmf_fws_detach(struct brcmf_fws_info *fws); +void brcmf_fws_detach_pre_delif(struct brcmf_fws_info *fws); +void brcmf_fws_detach_post_delif(struct brcmf_fws_info *fws); void brcmf_fws_debugfs_create(struct brcmf_pub *drvr); bool brcmf_fws_queue_skbs(struct brcmf_fws_info *fws); bool brcmf_fws_fc_active(struct brcmf_fws_info *fws); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c index 4e8397a0cbc8e891a739dad2a2a4d67ffd574237..ee922b0525610e9b355eeb7acb68d9122888de4e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c @@ -1116,7 +1116,7 @@ static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf) skb->protocol = eth_type_trans(skb, ifp->ndev); - brcmf_fweh_process_skb(ifp->drvr, skb); + brcmf_fweh_process_skb(ifp->drvr, skb, 0); exit: brcmu_pkt_buf_free_skb(skb); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c index 3e9c4f2f5dd12673e8c96e6dcacbea50cf7bc45e..456a1bf008b3d62242b386f9547dc1899e555b34 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c @@ -74,7 +74,7 @@ #define P2P_AF_MAX_WAIT_TIME msecs_to_jiffies(2000) #define P2P_INVALID_CHANNEL -1 #define P2P_CHANNEL_SYNC_RETRY 5 -#define P2P_AF_FRM_SCAN_MAX_WAIT msecs_to_jiffies(1500) +#define P2P_AF_FRM_SCAN_MAX_WAIT msecs_to_jiffies(450) #define P2P_DEFAULT_SLEEP_TIME_VSDB 200 /* WiFi P2P Public Action Frame OUI Subtypes */ @@ -1134,7 +1134,6 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p) { struct afx_hdl *afx_hdl = &p2p->afx_hdl; struct brcmf_cfg80211_vif *pri_vif; - unsigned long duration; s32 retry; brcmf_dbg(TRACE, "Enter\n"); @@ -1150,7 +1149,6 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p) * pending action frame tx is cancelled. */ retry = 0; - duration = msecs_to_jiffies(P2P_AF_FRM_SCAN_MAX_WAIT); while ((retry < P2P_CHANNEL_SYNC_RETRY) && (afx_hdl->peer_chan == P2P_INVALID_CHANNEL)) { afx_hdl->is_listen = false; @@ -1158,7 +1156,8 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p) retry); /* search peer on peer's listen channel */ schedule_work(&afx_hdl->afx_work); - wait_for_completion_timeout(&afx_hdl->act_frm_scan, duration); + wait_for_completion_timeout(&afx_hdl->act_frm_scan, + P2P_AF_FRM_SCAN_MAX_WAIT); if ((afx_hdl->peer_chan != P2P_INVALID_CHANNEL) || (!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status))) @@ -1171,7 +1170,7 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p) afx_hdl->is_listen = true; schedule_work(&afx_hdl->afx_work); wait_for_completion_timeout(&afx_hdl->act_frm_scan, - duration); + P2P_AF_FRM_SCAN_MAX_WAIT); } if ((afx_hdl->peer_chan != P2P_INVALID_CHANNEL) || (!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, @@ -1458,10 +1457,12 @@ int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp, return 0; if (e->event_code == BRCMF_E_ACTION_FRAME_COMPLETE) { - if (e->status == BRCMF_E_STATUS_SUCCESS) + if (e->status == BRCMF_E_STATUS_SUCCESS) { set_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status); - else { + if (!p2p->wait_for_offchan_complete) + complete(&p2p->send_af_done); + } else { set_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status); /* If there is no ack, we don't need to wait for * WLC_E_ACTION_FRAME_OFFCHAN_COMPLETE event @@ -1512,6 +1513,17 @@ static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p, p2p->af_sent_channel = le32_to_cpu(af_params->channel); p2p->af_tx_sent_jiffies = jiffies; + if (test_bit(BRCMF_P2P_STATUS_DISCOVER_LISTEN, &p2p->status) && + p2p->af_sent_channel == + ieee80211_frequency_to_channel(p2p->remain_on_channel.center_freq)) + p2p->wait_for_offchan_complete = false; + else + p2p->wait_for_offchan_complete = true; + + brcmf_dbg(TRACE, "Waiting for %s tx completion event\n", + (p2p->wait_for_offchan_complete) ? + "off-channel" : "on-channel"); + timeout = wait_for_completion_timeout(&p2p->send_af_done, P2P_AF_MAX_WAIT_TIME); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h index 0e8b34d2d85cb1b3dbc0ab9716e93c78628b132c..39f0d0218088236f20cb64eec06875836c1a4f96 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h @@ -124,6 +124,7 @@ struct afx_hdl { * @gon_req_action: about to send go negotiation requets frame. * @block_gon_req_tx: drop tx go negotiation requets frame. * @p2pdev_dynamically: is p2p device if created by module param or supplicant. + * @wait_for_offchan_complete: wait for off-channel tx completion event. */ struct brcmf_p2p_info { struct brcmf_cfg80211_info *cfg; @@ -144,6 +145,7 @@ struct brcmf_p2p_info { bool gon_req_action; bool block_gon_req_tx; bool p2pdev_dynamically; + bool wait_for_offchan_complete; }; s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg, bool p2pdev_forced); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c index c5ff551ec659e976fe5c73d182bc7589d575337a..74e6fdbd3a2b8db43a6deb9791fdf13b706ea916 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c @@ -67,16 +67,22 @@ int brcmf_proto_attach(struct brcmf_pub *drvr) return -ENOMEM; } -void brcmf_proto_detach(struct brcmf_pub *drvr) +void brcmf_proto_detach_post_delif(struct brcmf_pub *drvr) { brcmf_dbg(TRACE, "Enter\n"); if (drvr->proto) { if (drvr->bus_if->proto_type == BRCMF_PROTO_BCDC) - brcmf_proto_bcdc_detach(drvr); + brcmf_proto_bcdc_detach_post_delif(drvr); else if (drvr->bus_if->proto_type == BRCMF_PROTO_MSGBUF) brcmf_proto_msgbuf_detach(drvr); kfree(drvr->proto); drvr->proto = NULL; } } + +void brcmf_proto_detach_pre_delif(struct brcmf_pub *drvr) +{ + if (drvr->proto && drvr->bus_if->proto_type == BRCMF_PROTO_BCDC) + brcmf_proto_bcdc_detach_pre_delif(drvr); +} diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h index d3c3b9a815ad7f5377b7caa4a278538b28b5095b..72355aea902879533eb05f23bf272a717902aad5 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h @@ -54,7 +54,8 @@ struct brcmf_proto { int brcmf_proto_attach(struct brcmf_pub *drvr); -void brcmf_proto_detach(struct brcmf_pub *drvr); +void brcmf_proto_detach_pre_delif(struct brcmf_pub *drvr); +void brcmf_proto_detach_post_delif(struct brcmf_pub *drvr); static inline int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, struct sk_buff *skb, diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index a907d7b065fa8e0b7ab6a35dc2c265a6385d9c75..18e9e52f8ee70dd1c57cca27b3c08f6d22856a4e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -49,6 +49,10 @@ #define DCMD_RESP_TIMEOUT msecs_to_jiffies(2500) #define CTL_DONE_TIMEOUT msecs_to_jiffies(2500) +/* watermark expressed in number of words */ +#define DEFAULT_F2_WATERMARK 0x8 +#define CY_4373_F2_WATERMARK 0x40 + #ifdef DEBUG #define BRCMF_TRAP_INFO_SIZE 80 @@ -138,6 +142,8 @@ struct rte_console { /* 1: isolate internal sdio signals, put external pads in tri-state; requires * sdio bus power cycle to clear (rev 9) */ #define SBSDIO_DEVCTL_PADS_ISO 0x08 +/* 1: enable F2 Watermark */ +#define SBSDIO_DEVCTL_F2WM_ENAB 0x10 /* Force SD->SB reset mapping (rev 11) */ #define SBSDIO_DEVCTL_SB_RST_CTL 0x30 /* Determined by CoreControl bit */ @@ -615,6 +621,7 @@ BRCMF_FW_DEF(43430A0, "brcmfmac43430a0-sdio"); /* Note the names are not postfixed with a1 for backward compatibility */ BRCMF_FW_DEF(43430A1, "brcmfmac43430-sdio"); BRCMF_FW_DEF(43455, "brcmfmac43455-sdio"); +BRCMF_FW_DEF(43456, "brcmfmac43456-sdio"); BRCMF_FW_DEF(4354, "brcmfmac4354-sdio"); BRCMF_FW_DEF(4356, "brcmfmac4356-sdio"); BRCMF_FW_DEF(4373, "brcmfmac4373-sdio"); @@ -634,7 +641,8 @@ static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = { BRCMF_FW_ENTRY(BRCM_CC_4339_CHIP_ID, 0xFFFFFFFF, 4339), BRCMF_FW_ENTRY(BRCM_CC_43430_CHIP_ID, 0x00000001, 43430A0), BRCMF_FW_ENTRY(BRCM_CC_43430_CHIP_ID, 0xFFFFFFFE, 43430A1), - BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0xFFFFFFC0, 43455), + BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0x00000200, 43456), + BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0xFFFFFDC0, 43455), BRCMF_FW_ENTRY(BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, 4354), BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356), BRCMF_FW_ENTRY(CY_CC_4373_CHIP_ID, 0xFFFFFFFF, 4373) @@ -667,6 +675,12 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on) brcmf_dbg(TRACE, "Enter: on=%d\n", on); + sdio_retune_crc_disable(bus->sdiodev->func1); + + /* Cannot re-tune if device is asleep; defer till we're awake */ + if (on) + sdio_retune_hold_now(bus->sdiodev->func1); + wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT); /* 1st KSO write goes to AOS wake up core if device is asleep */ brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err); @@ -719,6 +733,11 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on) if (try_cnt > MAX_KSO_ATTEMPTS) brcmf_err("max tries: rd_val=0x%x err=%d\n", rd_val, err); + if (on) + sdio_retune_release(bus->sdiodev->func1); + + sdio_retune_crc_enable(bus->sdiodev->func1); + return err; } @@ -4049,6 +4068,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, void *nvram; u32 nvram_len; u8 saveclk; + u8 devctl; brcmf_dbg(TRACE, "Enter: dev=%s, err=%d\n", dev_name(dev), err); @@ -4104,8 +4124,26 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, brcmf_sdiod_writel(sdiod, core->base + SD_REG(hostintmask), bus->hostintmask, NULL); - - brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK, 8, &err); + switch (sdiod->func1->device) { + case SDIO_DEVICE_ID_CYPRESS_4373: + brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n", + CY_4373_F2_WATERMARK); + brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK, + CY_4373_F2_WATERMARK, &err); + devctl = brcmf_sdiod_readb(sdiod, SBSDIO_DEVICE_CTL, + &err); + devctl |= SBSDIO_DEVCTL_F2WM_ENAB; + brcmf_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl, + &err); + brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL, + CY_4373_F2_WATERMARK | + SBSDIO_MESBUSYCTRL_ENAB, &err); + break; + default: + brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK, + DEFAULT_F2_WATERMARK, &err); + break; + } } else { /* Disable F2 again */ sdio_disable_func(sdiod->func2); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h index 7faed831f07d5f59e5a485c4cb9e0af4b0f72338..34b031154da938a4de2cb56bb272165a760c0f3b 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h @@ -77,7 +77,7 @@ #define SBSDIO_GPIO_OUT 0x10006 /* gpio enable */ #define SBSDIO_GPIO_EN 0x10007 -/* rev < 7, watermark for sdio device */ +/* rev < 7, watermark for sdio device TX path */ #define SBSDIO_WATERMARK 0x10008 /* control busy signal generation */ #define SBSDIO_DEVICE_CTL 0x10009 @@ -104,6 +104,13 @@ #define SBSDIO_FUNC1_RFRAMEBCHI 0x1001C /* MesBusyCtl (rev 11) */ #define SBSDIO_FUNC1_MESBUSYCTRL 0x1001D +/* Watermark for sdio device RX path */ +#define SBSDIO_MESBUSY_RXFIFO_WM_MASK 0x7F +#define SBSDIO_MESBUSY_RXFIFO_WM_SHIFT 0 +/* Enable busy capability for MES access */ +#define SBSDIO_MESBUSYCTRL_ENAB 0x80 +#define SBSDIO_MESBUSYCTRL_ENAB_SHIFT 7 + /* Sdio Core Rev 12 */ #define SBSDIO_FUNC1_WAKEUPCTRL 0x1001E #define SBSDIO_FUNC1_WCTRL_ALPWAIT_MASK 0x1 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c index a4308c6e72d79559a43b1ddad868072c4f8c25df..44ead0fea7c61d55bb49ecfcfb51b0d18892f0cf 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c @@ -160,7 +160,7 @@ struct brcmf_usbdev_info { struct usb_device *usbdev; struct device *dev; - struct mutex dev_init_lock; + struct completion dev_init_done; int ctl_in_pipe, ctl_out_pipe; struct urb *ctl_urb; /* URB for control endpoint */ @@ -684,12 +684,18 @@ static int brcmf_usb_up(struct device *dev) static void brcmf_cancel_all_urbs(struct brcmf_usbdev_info *devinfo) { + int i; + if (devinfo->ctl_urb) usb_kill_urb(devinfo->ctl_urb); if (devinfo->bulk_urb) usb_kill_urb(devinfo->bulk_urb); - brcmf_usb_free_q(&devinfo->tx_postq, true); - brcmf_usb_free_q(&devinfo->rx_postq, true); + if (devinfo->tx_reqs) + for (i = 0; i < devinfo->bus_pub.ntxq; i++) + usb_kill_urb(devinfo->tx_reqs[i].urb); + if (devinfo->rx_reqs) + for (i = 0; i < devinfo->bus_pub.nrxq; i++) + usb_kill_urb(devinfo->rx_reqs[i].urb); } static void brcmf_usb_down(struct device *dev) @@ -1195,11 +1201,11 @@ static void brcmf_usb_probe_phase2(struct device *dev, int ret, if (ret) goto error; - mutex_unlock(&devinfo->dev_init_lock); + complete(&devinfo->dev_init_done); return; error: brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), ret); - mutex_unlock(&devinfo->dev_init_lock); + complete(&devinfo->dev_init_done); device_release_driver(dev); } @@ -1267,7 +1273,7 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo) if (ret) goto fail; /* we are done */ - mutex_unlock(&devinfo->dev_init_lock); + complete(&devinfo->dev_init_done); return 0; } bus->chip = bus_pub->devid; @@ -1327,11 +1333,10 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) devinfo->usbdev = usb; devinfo->dev = &usb->dev; - /* Take an init lock, to protect for disconnect while still loading. + /* Init completion, to protect for disconnect while still loading. * Necessary because of the asynchronous firmware load construction */ - mutex_init(&devinfo->dev_init_lock); - mutex_lock(&devinfo->dev_init_lock); + init_completion(&devinfo->dev_init_done); usb_set_intfdata(intf, devinfo); @@ -1409,7 +1414,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) return 0; fail: - mutex_unlock(&devinfo->dev_init_lock); + complete(&devinfo->dev_init_done); kfree(devinfo); usb_set_intfdata(intf, NULL); return ret; @@ -1424,7 +1429,7 @@ brcmf_usb_disconnect(struct usb_interface *intf) devinfo = (struct brcmf_usbdev_info *)usb_get_intfdata(intf); if (devinfo) { - mutex_lock(&devinfo->dev_init_lock); + wait_for_completion(&devinfo->dev_init_done); /* Make sure that devinfo still exists. Firmware probe routines * may have released the device and cleared the intfdata. */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c index 8eff2753abadeb2704f87f88f7c5eabd75bf091b..d493021f6031852b478706022418293ab8939090 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c @@ -35,9 +35,10 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy, struct brcmf_if *ifp; const struct brcmf_vndr_dcmd_hdr *cmdhdr = data; struct sk_buff *reply; - int ret, payload, ret_len; + unsigned int payload, ret_len; void *dcmd_buf = NULL, *wr_pointer; u16 msglen, maxmsglen = PAGE_SIZE - 0x100; + int ret; if (len < sizeof(*cmdhdr)) { brcmf_err("vendor command too short: %d\n", len); @@ -65,7 +66,7 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy, brcmf_err("oversize return buffer %d\n", ret_len); ret_len = BRCMF_DCMD_MAXLEN; } - payload = max(ret_len, len) + 1; + payload = max_t(unsigned int, ret_len, len) + 1; dcmd_buf = vzalloc(payload); if (NULL == dcmd_buf) return -ENOMEM; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c index ecc89e718b9c146865b6e17917e3fb51023614aa..6188275b17e5aee8df963ee5c848a4d5d4921687 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c @@ -502,6 +502,7 @@ brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) } spin_lock_bh(&wl->lock); + wl->wlc->vif = vif; wl->mute_tx = false; brcms_c_mute(wl->wlc, false); if (vif->type == NL80211_IFTYPE_STATION) @@ -519,6 +520,11 @@ brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) static void brcms_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { + struct brcms_info *wl = hw->priv; + + spin_lock_bh(&wl->lock); + wl->wlc->vif = NULL; + spin_unlock_bh(&wl->lock); } static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed) @@ -840,8 +846,8 @@ brcms_ops_ampdu_action(struct ieee80211_hw *hw, status = brcms_c_aggregatable(wl->wlc, tid); spin_unlock_bh(&wl->lock); if (!status) { - brcms_err(wl->wlc->hw->d11core, - "START: tid %d is not agg\'able\n", tid); + brcms_dbg_ht(wl->wlc->hw->d11core, + "START: tid %d is not agg\'able\n", tid); return -EINVAL; } ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); @@ -937,6 +943,25 @@ static void brcms_ops_set_tsf(struct ieee80211_hw *hw, spin_unlock_bh(&wl->lock); } +static int brcms_ops_beacon_set_tim(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, bool set) +{ + struct brcms_info *wl = hw->priv; + struct sk_buff *beacon = NULL; + u16 tim_offset = 0; + + spin_lock_bh(&wl->lock); + if (wl->wlc->vif) + beacon = ieee80211_beacon_get_tim(hw, wl->wlc->vif, + &tim_offset, NULL); + if (beacon) + brcms_c_set_new_beacon(wl->wlc, beacon, tim_offset, + wl->wlc->vif->bss_conf.dtim_period); + spin_unlock_bh(&wl->lock); + + return 0; +} + static const struct ieee80211_ops brcms_ops = { .tx = brcms_ops_tx, .start = brcms_ops_start, @@ -955,6 +980,7 @@ static const struct ieee80211_ops brcms_ops = { .flush = brcms_ops_flush, .get_tsf = brcms_ops_get_tsf, .set_tsf = brcms_ops_set_tsf, + .set_tim = brcms_ops_beacon_set_tim, }; void brcms_dpc(unsigned long data) @@ -1578,10 +1604,10 @@ int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, u32 idx) if (le32_to_cpu(hdr->idx) == idx) { pdata = wl->fw.fw_bin[i]->data + le32_to_cpu(hdr->offset); - *pbuf = kmemdup(pdata, len, GFP_KERNEL); + *pbuf = kvmalloc(len, GFP_KERNEL); if (*pbuf == NULL) goto fail; - + memcpy(*pbuf, pdata, len); return 0; } } @@ -1629,7 +1655,7 @@ int brcms_ucode_init_uint(struct brcms_info *wl, size_t *n_bytes, u32 idx) */ void brcms_ucode_free_buf(void *p) { - kfree(p); + kvfree(p); } /* diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h index c4d135cff04ad2f7883c783fb96244bbbabfa370..9f76b880814e8201744a83256f8a89af8e4f0ba7 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h @@ -563,6 +563,7 @@ struct brcms_c_info { struct wiphy *wiphy; struct scb pri_scb; + struct ieee80211_vif *vif; struct sk_buff *beacon; u16 beacon_tim_offset; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c index d8b79cb72b58d3153c095c5e93896a0ba99f539a..eb5db94f57453f2aed93e89a510fdd210bef7a34 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c @@ -77,6 +77,8 @@ static u16 d11ac_bw(enum brcmu_chan_bw bw) return BRCMU_CHSPEC_D11AC_BW_40; case BRCMU_CHAN_BW_80: return BRCMU_CHSPEC_D11AC_BW_80; + case BRCMU_CHAN_BW_160: + return BRCMU_CHSPEC_D11AC_BW_160; default: WARN_ON(1); } @@ -190,8 +192,41 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch) break; } break; - case BRCMU_CHSPEC_D11AC_BW_8080: case BRCMU_CHSPEC_D11AC_BW_160: + ch->bw = BRCMU_CHAN_BW_160; + ch->sb = brcmu_maskget16(ch->chspec, BRCMU_CHSPEC_D11AC_SB_MASK, + BRCMU_CHSPEC_D11AC_SB_SHIFT); + switch (ch->sb) { + case BRCMU_CHAN_SB_LLL: + ch->control_ch_num -= CH_70MHZ_APART; + break; + case BRCMU_CHAN_SB_LLU: + ch->control_ch_num -= CH_50MHZ_APART; + break; + case BRCMU_CHAN_SB_LUL: + ch->control_ch_num -= CH_30MHZ_APART; + break; + case BRCMU_CHAN_SB_LUU: + ch->control_ch_num -= CH_10MHZ_APART; + break; + case BRCMU_CHAN_SB_ULL: + ch->control_ch_num += CH_10MHZ_APART; + break; + case BRCMU_CHAN_SB_ULU: + ch->control_ch_num += CH_30MHZ_APART; + break; + case BRCMU_CHAN_SB_UUL: + ch->control_ch_num += CH_50MHZ_APART; + break; + case BRCMU_CHAN_SB_UUU: + ch->control_ch_num += CH_70MHZ_APART; + break; + default: + WARN_ON_ONCE(1); + break; + } + break; + case BRCMU_CHSPEC_D11AC_BW_8080: default: WARN_ON_ONCE(1); break; diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h index 7b9a77981df16bd7f78dbb7ef2abdf2b1dfedf8c..75b2a0438cfa782af3fdb660219d292bb5dfa23e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h +++ b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h @@ -29,6 +29,8 @@ #define CH_UPPER_SB 0x01 #define CH_LOWER_SB 0x02 #define CH_EWA_VALID 0x04 +#define CH_70MHZ_APART 14 +#define CH_50MHZ_APART 10 #define CH_30MHZ_APART 6 #define CH_20MHZ_APART 4 #define CH_10MHZ_APART 2 diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index 04dd7a936593826928cf11559b3b362d0ed43383..5512c7f73fce89ec231f42357d7ee257c3a6f7d7 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -5462,7 +5462,7 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) { we have to add a spin lock... */ rc = readBSSListRid(ai, doLoseSync, &BSSList_rid); while(rc == 0 && BSSList_rid.index != cpu_to_le16(0xffff)) { - ptr += sprintf(ptr, "%pM %*s rssi = %d", + ptr += sprintf(ptr, "%pM %.*s rssi = %d", BSSList_rid.bssid, (int)BSSList_rid.ssidLen, BSSList_rid.ssid, diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index 91ca77c7571cebb2a6dd3265b1713fb6d3202bda..d7335fabd929454eb404b136eb096a87ea7eb412 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -77,10 +77,13 @@ #define IWL_22000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-" #define IWL_22000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-" #define IWL_22000_HR_A_F0_FW_PRE "iwlwifi-QuQnj-f0-hr-a0-" -#define IWL_22000_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-" +#define IWL_22000_HR_B_F0_FW_PRE "iwlwifi-Qu-b0-hr-b0-" +#define IWL_22000_QU_B_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-" +#define IWL_22000_HR_B_FW_PRE "iwlwifi-QuQnj-b0-hr-b0-" #define IWL_22000_JF_B0_FW_PRE "iwlwifi-QuQnj-a0-jf-b0-" #define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-" #define IWL_22000_SU_Z0_FW_PRE "iwlwifi-su-z0-" +#define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-" #define IWL_22000_HR_MODULE_FIRMWARE(api) \ IWL_22000_HR_FW_PRE __stringify(api) ".ucode" @@ -88,7 +91,11 @@ IWL_22000_JF_FW_PRE __stringify(api) ".ucode" #define IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(api) \ IWL_22000_HR_A_F0_FW_PRE __stringify(api) ".ucode" -#define IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(api) \ +#define IWL_22000_HR_B_F0_QNJ_MODULE_FIRMWARE(api) \ + IWL_22000_HR_B_F0_FW_PRE __stringify(api) ".ucode" +#define IWL_22000_QU_B_HR_B_MODULE_FIRMWARE(api) \ + IWL_22000_QU_B_HR_B_FW_PRE __stringify(api) ".ucode" +#define IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(api) \ IWL_22000_HR_B_FW_PRE __stringify(api) ".ucode" #define IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(api) \ IWL_22000_JF_B0_FW_PRE __stringify(api) ".ucode" @@ -96,6 +103,8 @@ IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode" #define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \ IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode" +#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \ + IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode" #define NVM_HW_SECTION_NUM_FAMILY_22000 10 @@ -134,7 +143,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = { .ucode_api_min = IWL_22000_UCODE_API_MIN, \ .led_mode = IWL_LED_RF_STATE, \ .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_22000, \ - .non_shared_ant = ANT_A, \ + .non_shared_ant = ANT_B, \ .dccm_offset = IWL_22000_DCCM_OFFSET, \ .dccm_len = IWL_22000_DCCM_LEN, \ .dccm2_offset = IWL_22000_DCCM2_OFFSET, \ @@ -190,7 +199,54 @@ const struct iwl_cfg iwl22000_2ac_cfg_jf = { const struct iwl_cfg iwl22000_2ax_cfg_hr = { .name = "Intel(R) Dual Band Wireless AX 22000", - .fw_name_pre = IWL_22000_HR_FW_PRE, + .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE, + IWL_DEVICE_22500, + /* + * This device doesn't support receiving BlockAck with a large bitmap + * so we need to restrict the size of transmitted aggregation to the + * HT size; mac80211 would otherwise pick the HE max (256) by default. + */ + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, +}; + +/* + * All JF radio modules are part of the 9000 series, but the MAC part + * looks more like 22000. That's why this device is here, but called + * 9560 nevertheless. + */ +const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0 = { + .name = "Intel(R) Wireless-AC 9461", + .fw_name_pre = IWL_QU_B_JF_B_FW_PRE, + IWL_DEVICE_22500, +}; + +const struct iwl_cfg iwl9462_2ac_cfg_qu_b0_jf_b0 = { + .name = "Intel(R) Wireless-AC 9462", + .fw_name_pre = IWL_QU_B_JF_B_FW_PRE, + IWL_DEVICE_22500, +}; + +const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0 = { + .name = "Intel(R) Wireless-AC 9560", + .fw_name_pre = IWL_QU_B_JF_B_FW_PRE, + IWL_DEVICE_22500, +}; + +const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0 = { + .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)", + .fw_name_pre = IWL_QU_B_JF_B_FW_PRE, + IWL_DEVICE_22500, +}; + +const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0 = { + .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)", + .fw_name_pre = IWL_QU_B_JF_B_FW_PRE, + IWL_DEVICE_22500, +}; + +const struct iwl_cfg iwl22000_2ax_cfg_jf = { + .name = "Intel(R) Dual Band Wireless AX 22000", + .fw_name_pre = IWL_QU_B_JF_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap @@ -264,7 +320,9 @@ const struct iwl_cfg iwl22560_2ax_cfg_su_cdb = { MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_22000_HR_B_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c index 36151e61a26f018f6c0ee1bc13a5931b08a6cffb..c04d934b23b403c583b73dabf6a791b4253df665 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c @@ -1,7 +1,7 @@ /****************************************************************************** * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -140,6 +140,7 @@ const struct iwl_cfg iwl5350_agn_cfg = { .ht_params = &iwl5000_ht_params, .led_mode = IWL_LED_BLINK, .internal_wimax_coex = true, + .csr = &iwl_csr_v1, }; #define IWL_DEVICE_5150 \ diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/led.c b/drivers/net/wireless/intel/iwlwifi/dvm/led.c index 1bbd17ada974723623acc6b1da53b77131f6121b..20e16c423990162798874847678af389e043fb05 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/led.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/led.c @@ -185,6 +185,9 @@ void iwl_leds_init(struct iwl_priv *priv) priv->led.name = kasprintf(GFP_KERNEL, "%s-led", wiphy_name(priv->hw->wiphy)); + if (!priv->led.name) + return; + priv->led.brightness_set = iwl_led_brightness_set; priv->led.blink_set = iwl_led_blink_set; priv->led.max_brightness = 1; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c index 030482b357a3c3038bcf456f2c782b04747fa70f..06dd4e81b73745c3bfb6eebff4f0178658d7f61c 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c @@ -1227,6 +1227,23 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv) return 0; } +static int iwl_nvm_check_version(struct iwl_nvm_data *data, + struct iwl_trans *trans) +{ + if (data->nvm_version >= trans->cfg->nvm_ver || + data->calib_version >= trans->cfg->nvm_calib_ver) { + IWL_DEBUG_INFO(trans, "device EEPROM VER=0x%x, CALIB=0x%x\n", + data->nvm_version, data->calib_version); + return 0; + } + + IWL_ERR(trans, + "Unsupported (too old) EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n", + data->nvm_version, trans->cfg->nvm_ver, + data->calib_version, trans->cfg->nvm_calib_ver); + return -EINVAL; +} + static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, const struct iwl_fw *fw, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h index cb5f32c1d7057a4e54d8a9de0088c7b542c4280e..0b3b1223cff7e488061e05e15f65cde704d9e0f5 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -29,6 +30,7 @@ * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -84,7 +86,7 @@ #define ACPI_WRDS_WIFI_DATA_SIZE (ACPI_SAR_TABLE_SIZE + 2) #define ACPI_EWRD_WIFI_DATA_SIZE ((ACPI_SAR_PROFILE_NUM - 1) * \ ACPI_SAR_TABLE_SIZE + 3) -#define ACPI_WGDS_WIFI_DATA_SIZE 18 +#define ACPI_WGDS_WIFI_DATA_SIZE 19 #define ACPI_WRDD_WIFI_DATA_SIZE 2 #define ACPI_SPLC_WIFI_DATA_SIZE 2 diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h index 55594c93b014cfe9c94ed689dd7482d835217bfe..47dbd2d3e3b4f86b2de651a059e052bd277ff1f2 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h @@ -442,7 +442,7 @@ struct iwl_he_backoff_conf { * Support for Nss x BW (or RU) matrix: * (0=SISO, 1=MIMO2) x (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz) * Each entry contains 2 QAM thresholds for 8us and 16us: - * 0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6/7=RES + * 0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6=RES, 7=NONE * i.e. QAM_th1 < QAM_th2 such if TX uses QAM_tx: * QAM_tx < QAM_th1 --> PPE=0us * QAM_th1 <= QAM_tx < QAM_th2 --> PPE=8us diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h index 2f599353c8856b4c9604e4953722771c7fef4837..2ba1401e5c0d526c3336b9090d9c911b9b9b131d 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h @@ -574,6 +574,69 @@ struct iwl_rx_mpdu_desc { #define IWL_RX_DESC_SIZE_V1 offsetofend(struct iwl_rx_mpdu_desc, v1) +#define IWL_CD_STTS_OPTIMIZED_POS 0 +#define IWL_CD_STTS_OPTIMIZED_MSK 0x01 +#define IWL_CD_STTS_TRANSFER_STATUS_POS 1 +#define IWL_CD_STTS_TRANSFER_STATUS_MSK 0x0E +#define IWL_CD_STTS_WIFI_STATUS_POS 4 +#define IWL_CD_STTS_WIFI_STATUS_MSK 0xF0 + +/** + * enum iwl_completion_desc_transfer_status - transfer status (bits 1-3) + * @IWL_CD_STTS_UNUSED: unused + * @IWL_CD_STTS_UNUSED_2: unused + * @IWL_CD_STTS_END_TRANSFER: successful transfer complete. + * In sniffer mode, when split is used, set in last CD completion. (RX) + * @IWL_CD_STTS_OVERFLOW: In sniffer mode, when using split - used for + * all CD completion. (RX) + * @IWL_CD_STTS_ABORTED: CR abort / close flow. (RX) + * @IWL_CD_STTS_ERROR: general error (RX) + */ +enum iwl_completion_desc_transfer_status { + IWL_CD_STTS_UNUSED, + IWL_CD_STTS_UNUSED_2, + IWL_CD_STTS_END_TRANSFER, + IWL_CD_STTS_OVERFLOW, + IWL_CD_STTS_ABORTED, + IWL_CD_STTS_ERROR, +}; + +/** + * enum iwl_completion_desc_wifi_status - wifi status (bits 4-7) + * @IWL_CD_STTS_VALID: the packet is valid (RX) + * @IWL_CD_STTS_FCS_ERR: frame check sequence error (RX) + * @IWL_CD_STTS_SEC_KEY_ERR: error handling the security key of rx (RX) + * @IWL_CD_STTS_DECRYPTION_ERR: error decrypting the frame (RX) + * @IWL_CD_STTS_DUP: duplicate packet (RX) + * @IWL_CD_STTS_ICV_MIC_ERR: MIC error (RX) + * @IWL_CD_STTS_INTERNAL_SNAP_ERR: problems removing the snap (RX) + * @IWL_CD_STTS_SEC_PORT_FAIL: security port fail (RX) + * @IWL_CD_STTS_BA_OLD_SN: block ack received old SN (RX) + * @IWL_CD_STTS_QOS_NULL: QoS null packet (RX) + * @IWL_CD_STTS_MAC_HDR_ERR: MAC header conversion error (RX) + * @IWL_CD_STTS_MAX_RETRANS: reached max number of retransmissions (TX) + * @IWL_CD_STTS_EX_LIFETIME: exceeded lifetime (TX) + * @IWL_CD_STTS_NOT_USED: completed but not used (RX) + * @IWL_CD_STTS_REPLAY_ERR: pn check failed, replay error (RX) + */ +enum iwl_completion_desc_wifi_status { + IWL_CD_STTS_VALID, + IWL_CD_STTS_FCS_ERR, + IWL_CD_STTS_SEC_KEY_ERR, + IWL_CD_STTS_DECRYPTION_ERR, + IWL_CD_STTS_DUP, + IWL_CD_STTS_ICV_MIC_ERR, + IWL_CD_STTS_INTERNAL_SNAP_ERR, + IWL_CD_STTS_SEC_PORT_FAIL, + IWL_CD_STTS_BA_OLD_SN, + IWL_CD_STTS_QOS_NULL, + IWL_CD_STTS_MAC_HDR_ERR, + IWL_CD_STTS_MAX_RETRANS, + IWL_CD_STTS_EX_LIFETIME, + IWL_CD_STTS_NOT_USED, + IWL_CD_STTS_REPLAY_ERR, +}; + struct iwl_frame_release { u8 baid; u8 reserved; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h index 514b86123d3d366fd368e9d49f7d10222f77bae5..80853f6cbd6d225aa71f16bba67254d770455f5e 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h @@ -747,9 +747,9 @@ enum iwl_mvm_ba_resp_flags { * @tfd_cnt: number of TFD-Q elements * @ra_tid_cnt: number of RATID-Q elements * @tfd: array of TFD queue status updates. See &iwl_mvm_compressed_ba_tfd - * for details. + * for details. Length in @tfd_cnt. * @ra_tid: array of RA-TID queue status updates. For debug purposes only. See - * &iwl_mvm_compressed_ba_ratid for more details. + * &iwl_mvm_compressed_ba_ratid for more details. Length in @ra_tid_cnt. */ struct iwl_mvm_compressed_ba_notif { __le32 flags; @@ -766,7 +766,7 @@ struct iwl_mvm_compressed_ba_notif { __le32 tx_rate; __le16 tfd_cnt; __le16 ra_tid_cnt; - struct iwl_mvm_compressed_ba_tfd tfd[1]; + struct iwl_mvm_compressed_ba_tfd tfd[0]; struct iwl_mvm_compressed_ba_ratid ra_tid[0]; } __packed; /* COMPRESSED_BA_RES_API_S_VER_4 */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index a31a42e673c46fffec58ac95acbda96a442d803c..d6fca00ecb885601d26faa35a945228399fc4fb6 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -547,6 +547,7 @@ static struct scatterlist *alloc_sgtable(int size) if (new_page) __free_page(new_page); } + kfree(table); return NULL; } alloc_size = min_t(int, size, PAGE_SIZE); @@ -824,7 +825,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) } /* We only dump the FIFOs if the FW is in error state */ - if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) { + if (fifo_data_len) { iwl_fw_dump_fifos(fwrt, &dump_data); if (radio_len) iwl_read_radio_regs(fwrt, &dump_data); @@ -1016,7 +1017,7 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, * If the loading of the FW completed successfully, the next step is to * get the SMEM config data. Thus, if fwrt->smem_cfg.num_lmacs is non * zero, the FW was already loaded successully. If the state is "NO_FW" - * in such a case - WARN and exit, since FW may be dead. Otherwise, we + * in such a case - exit, since FW may be dead. Otherwise, we * can try to collect the data, since FW might just not be fully * loaded (no "ALIVE" yet), and the debug data is accessible. * @@ -1024,9 +1025,8 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, * config. In such a case, due to HW access problems, we might * collect garbage. */ - if (WARN((fwrt->trans->state == IWL_TRANS_NO_FW) && - fwrt->smem_cfg.num_lmacs, - "Can't collect dbg data when FW isn't alive\n")) + if (fwrt->trans->state == IWL_TRANS_NO_FW && + fwrt->smem_cfg.num_lmacs) return -EIO; if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status)) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/smem.c b/drivers/net/wireless/intel/iwlwifi/fw/smem.c index ff85d69c2a8cb3703bdc1bebe3148435a4cf2e43..557ee47bffd8c33baddf500fd13aebc337bcfa62 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/smem.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/smem.c @@ -8,7 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -134,6 +134,7 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt) .len = { 0, }, }; struct iwl_rx_packet *pkt; + int ret; if (fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) @@ -141,8 +142,13 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt) else cmd.id = SHARED_MEM_CFG; - if (WARN_ON(iwl_trans_send_cmd(fwrt->trans, &cmd))) + ret = iwl_trans_send_cmd(fwrt->trans, &cmd); + + if (ret) { + WARN(ret != -ERFKILL, + "Could not send the SMEM command: %d\n", ret); return; + } pkt = cmd.resp_pkt; if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 12fddcf15bab394521122a2fecaab3d2dcd83579..2e9fd7a303985174970f538dcd0530ee6f7ba41e 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -574,11 +574,18 @@ extern const struct iwl_cfg iwl22000_2ac_cfg_hr; extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb; extern const struct iwl_cfg iwl22000_2ac_cfg_jf; extern const struct iwl_cfg iwl22000_2ax_cfg_hr; +extern const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0; +extern const struct iwl_cfg iwl9462_2ac_cfg_qu_b0_jf_b0; +extern const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0; +extern const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0; +extern const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0; +extern const struct iwl_cfg iwl22000_2ax_cfg_jf; extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0; +extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0_f0; extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0; extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0; extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0; extern const struct iwl_cfg iwl22560_2ax_cfg_su_cdb; -#endif /* CONFIG_IWLMVM */ +#endif /* CPTCFG_IWLMVM || CPTCFG_IWLFMAC */ #endif /* __IWL_CONFIG_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index c0631255aee7ca3dedf6a5168856f34d3a20401d..db6628d390a2a99604066d91c791c4a9d22526b1 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1547,7 +1547,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) goto free; out_free_fw: - iwl_dealloc_ucode(drv); release_firmware(ucode_raw); out_unbind: complete(&drv->request_firmware_complete); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c index a4c96215933ba589d2bbbc272728c901c80a4214..a59bab8345f4ea83f4eb659299c1f2706921548f 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c @@ -928,22 +928,3 @@ iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg, return NULL; } IWL_EXPORT_SYMBOL(iwl_parse_eeprom_data); - -/* helper functions */ -int iwl_nvm_check_version(struct iwl_nvm_data *data, - struct iwl_trans *trans) -{ - if (data->nvm_version >= trans->cfg->nvm_ver || - data->calib_version >= trans->cfg->nvm_calib_ver) { - IWL_DEBUG_INFO(trans, "device EEPROM VER=0x%x, CALIB=0x%x\n", - data->nvm_version, data->calib_version); - return 0; - } - - IWL_ERR(trans, - "Unsupported (too old) EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n", - data->nvm_version, trans->cfg->nvm_ver, - data->calib_version, trans->cfg->nvm_calib_ver); - return -EINVAL; -} -IWL_EXPORT_SYMBOL(iwl_nvm_check_version); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h index 8be50ed12300f5b16df34be38c9ad7d7ca5b41eb..c59dd47cf15d332ce7b39ac15f04d91df03322c6 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h @@ -7,6 +7,7 @@ * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Mobile Communications GmbH + * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Mobile Communications GmbH + * Copyright (C) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -122,9 +124,6 @@ struct iwl_nvm_data * iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg, const u8 *eeprom, size_t eeprom_size); -int iwl_nvm_check_version(struct iwl_nvm_data *data, - struct iwl_trans *trans); - int iwl_init_sband_channels(struct iwl_nvm_data *data, struct ieee80211_supported_band *sband, int n_channels, enum nl80211_band band); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index 421a869633a32d861919dc80f11f23500f398ace..2e512f6e9ebcde4af31c7f6cf25710d40bd47871 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -8,6 +8,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -35,6 +36,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -399,6 +401,7 @@ enum aux_misc_master1_en { #define AUX_MISC_MASTER1_SMPHR_STATUS 0xA20800 #define RSA_ENABLE 0xA24B08 #define PREG_AUX_BUS_WPROT_0 0xA04CC0 +#define PREG_PRPH_WPROT_0 0xA04CE0 #define SB_CPU_1_STATUS 0xA01E30 #define SB_CPU_2_STATUS 0xA01E34 #define UMAG_SB_CPU_1_STATUS 0xA038C0 @@ -425,4 +428,8 @@ enum { #define UREG_CHICK (0xA05C00) #define UREG_CHICK_MSI_ENABLE BIT(24) #define UREG_CHICK_MSIX_ENABLE BIT(25) + +#define HPM_DEBUG 0xA03440 +#define PERSISTENCE_BIT BIT(12) +#define PREG_WFPM_ACCESS BIT(12) #endif /* __iwl_prph_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 279dd7b7a3fb921c615dcab4fe769f926f7e896d..0b8cf7f3af93304116152bf9a6b9a5273507226d 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -269,6 +269,7 @@ struct iwl_rx_cmd_buffer { bool _page_stolen; u32 _rx_page_order; unsigned int truesize; + u8 status; }; static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 79bdae994822844bcc07bb8b63b617f59982d042..868cb1195a74b23e7dbf5729e464cd7fe7fa21d9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -731,8 +731,10 @@ int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, { struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {}; struct iwl_wowlan_tkip_params_cmd tkip_cmd = {}; + bool unified = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); struct wowlan_key_data key_data = { - .configure_keys = !d0i3, + .configure_keys = !d0i3 && !unified, .use_rsc_tsc = false, .tkip = &tkip_cmd, .use_tkip = false, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 6bb1a99a197a22981f29962ab487cccb76dd0eb8..a5740f13898159c94468575691c526b7f9de4fe4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -132,13 +132,17 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm) static int iwl_configure_rxq(struct iwl_mvm *mvm) { - int i, num_queues, size; + int i, num_queues, size, ret; struct iwl_rfh_queue_config *cmd; + struct iwl_host_cmd hcmd = { + .id = WIDE_ID(DATA_PATH_GROUP, RFH_QUEUE_CONFIG_CMD), + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, + }; /* Do not configure default queue, it is configured via context info */ num_queues = mvm->trans->num_rx_queues - 1; - size = sizeof(*cmd) + num_queues * sizeof(struct iwl_rfh_queue_data); + size = struct_size(cmd, data, num_queues); cmd = kzalloc(size, GFP_KERNEL); if (!cmd) @@ -159,10 +163,14 @@ static int iwl_configure_rxq(struct iwl_mvm *mvm) cmd->data[i].fr_bd_wid = cpu_to_le32(data.fr_bd_wid); } - return iwl_mvm_send_cmd_pdu(mvm, - WIDE_ID(DATA_PATH_GROUP, - RFH_QUEUE_CONFIG_CMD), - 0, size, cmd); + hcmd.data[0] = cmd; + hcmd.len[0] = size; + + ret = iwl_mvm_send_cmd(mvm, &hcmd); + + kfree(cmd); + + return ret; } static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm) @@ -187,20 +195,10 @@ void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm, { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data; - __le32 *dump_data = mfu_dump_notif->data; - int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32); - int i; if (mfu_dump_notif->index_num == 0) IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n", le32_to_cpu(mfu_dump_notif->assert_id)); - - for (i = 0; i < n_words; i++) - IWL_DEBUG_INFO(mvm, - "MFUART assert dump, dword %u: 0x%08x\n", - le16_to_cpu(mfu_dump_notif->index_num) * - n_words + i, - le32_to_cpu(dump_data[i])); } static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, @@ -539,7 +537,9 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) if (mvm->nvm_file_name) iwl_mvm_load_nvm_to_nic(mvm); - WARN_ON(iwl_nvm_check_version(mvm->nvm_data, mvm->trans)); + WARN_ONCE(mvm->nvm_data->nvm_version < mvm->trans->cfg->nvm_ver, + "Too old NVM version (0x%0x, required = 0x%0x)", + mvm->nvm_data->nvm_version, mvm->trans->cfg->nvm_ver); /* * abort after reading the nvm in case RF Kill is on, we will complete @@ -704,15 +704,19 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm) enabled = !!(wifi_pkg->package.elements[1].integer.value); n_profiles = wifi_pkg->package.elements[2].integer.value; - /* in case of BIOS bug */ - if (n_profiles <= 0) { + /* + * Check the validity of n_profiles. The EWRD profiles start + * from index 1, so the maximum value allowed here is + * ACPI_SAR_PROFILES_NUM - 1. + */ + if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) { ret = -EINVAL; goto out_free; } for (i = 0; i < n_profiles; i++) { /* the tables start at element 3 */ - static int pos = 3; + int pos = 3; /* The EWRD profiles officially go from 2 to 4, but we * save them in sar_profiles[1-3] (because we don't @@ -824,6 +828,26 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd); } +static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm) +{ + /* + * The GEO_TX_POWER_LIMIT command is not supported on earlier + * firmware versions. Unfortunately, we don't have a TLV API + * flag to rely on, so rely on the major version which is in + * the first byte of ucode_ver. This was implemented + * initially on version 38 and then backported to 17. It was + * also backported to 29, but only for 7265D devices. The + * intention was to have it in 36 as well, but not all 8000 + * family got this feature enabled. The 8000 family is the + * only one using version 36, so skip this version entirely. + */ + return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 || + IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17 || + (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 && + ((mvm->trans->hw_rev & CSR_HW_REV_TYPE_MSK) == + CSR_HW_REV_TYPE_7265D)); +} + int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) { struct iwl_geo_tx_power_profiles_resp *resp; @@ -839,6 +863,9 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) .data = { &geo_cmd }, }; + if (!iwl_mvm_sar_geo_support(mvm)) + return -EOPNOTSUPP; + ret = iwl_mvm_send_cmd(mvm, &cmd); if (ret) { IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret); @@ -864,6 +891,9 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) int ret, i, j; u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT); + if (!iwl_mvm_sar_geo_support(mvm)) + return 0; + ret = iwl_mvm_sar_get_wgds_table(mvm); if (ret < 0) { IWL_DEBUG_RADIO(mvm, @@ -876,7 +906,7 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n"); BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS * - ACPI_WGDS_TABLE_SIZE != ACPI_WGDS_WIFI_DATA_SIZE); + ACPI_WGDS_TABLE_SIZE + 1 != ACPI_WGDS_WIFI_DATA_SIZE); BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES > IWL_NUM_GEO_PROFILES); @@ -911,6 +941,11 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm) return -ENOENT; } +static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm) +{ + return -ENOENT; +} + static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) { return 0; @@ -937,8 +972,11 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm) IWL_DEBUG_RADIO(mvm, "WRDS SAR BIOS table invalid or unavailable. (%d)\n", ret); - /* if not available, don't fail and don't bother with EWRD */ - return 0; + /* + * If not available, don't fail and don't bother with EWRD. + * Return 1 to tell that we can't use WGDS either. + */ + return 1; } ret = iwl_mvm_sar_get_ewrd_table(mvm); @@ -951,9 +989,13 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm) /* choose profile 1 (WRDS) as default for both chains */ ret = iwl_mvm_sar_select_profile(mvm, 1, 1); - /* if we don't have profile 0 from BIOS, just skip it */ + /* + * If we don't have profile 0 from BIOS, just skip it. This + * means that SAR Geo will not be enabled either, even if we + * have other valid profiles. + */ if (ret == -ENOENT) - return 0; + return 1; return ret; } @@ -1151,11 +1193,19 @@ int iwl_mvm_up(struct iwl_mvm *mvm) iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN); ret = iwl_mvm_sar_init(mvm); - if (ret) - goto error; + if (ret == 0) { + ret = iwl_mvm_sar_geo_init(mvm); + } else if (ret > 0 && !iwl_mvm_sar_get_wgds_table(mvm)) { + /* + * If basic SAR is not available, we check for WGDS, + * which should *not* be available either. If it is + * available, issue an error, because we can't use SAR + * Geo without basic SAR. + */ + IWL_ERR(mvm, "BIOS contains WGDS but no WRDS\n"); + } - ret = iwl_mvm_sar_geo_init(mvm); - if (ret) + if (ret < 0) goto error; iwl_mvm_leds_sync(mvm); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/led.c b/drivers/net/wireless/intel/iwlwifi/mvm/led.c index b27269504a622f3fff33b2e3736c13720af53e13..072f80c90ce4a475945095f63a40111c35617330 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/led.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/led.c @@ -131,6 +131,9 @@ int iwl_mvm_leds_init(struct iwl_mvm *mvm) mvm->led.name = kasprintf(GFP_KERNEL, "%s-led", wiphy_name(mvm->hw->wiphy)); + if (!mvm->led.name) + return -ENOMEM; + mvm->led.brightness_set = iwl_led_brightness_set; mvm->led.max_brightness = 1; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index b3fd20502abb3c604352fbc9d5aa84256f40a4f6..d90d58309bf0e8d4a27124fd5c47112daab39f42 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -85,6 +85,10 @@ const u8 iwl_mvm_ac_to_gen2_tx_fifo[] = { IWL_GEN2_EDCA_TX_FIFO_VI, IWL_GEN2_EDCA_TX_FIFO_BE, IWL_GEN2_EDCA_TX_FIFO_BK, + IWL_GEN2_TRIG_TX_FIFO_VO, + IWL_GEN2_TRIG_TX_FIFO_VI, + IWL_GEN2_TRIG_TX_FIFO_BE, + IWL_GEN2_TRIG_TX_FIFO_BK, }; struct iwl_mvm_mac_iface_iterator_data { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index b15b0d84bb7ea18a06933d455e495248c7bcd32c..476c44db0e64b1c5a0325a1e24bbd4aed6d42be0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -306,8 +306,12 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, goto out; } - if (changed) - *changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE); + if (changed) { + u32 status = le32_to_cpu(resp->status); + + *changed = (status == MCC_RESP_NEW_CHAN_PROFILE || + status == MCC_RESP_ILLEGAL); + } regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg, __le32_to_cpu(resp->n_channels), @@ -816,6 +820,21 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) sta = NULL; + /* If there is no sta, and it's not offchannel - send through AP */ + if (info->control.vif->type == NL80211_IFTYPE_STATION && + info->hw_queue != IWL_MVM_OFFCHANNEL_QUEUE && !sta) { + struct iwl_mvm_vif *mvmvif = + iwl_mvm_vif_from_mac80211(info->control.vif); + u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id); + + if (ap_sta_id < IWL_MVM_STATION_COUNT) { + /* mac80211 holds rcu read lock */ + sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]); + if (IS_ERR_OR_NULL(sta)) + goto drop; + } + } + if (sta) { if (iwl_mvm_defer_tx(mvm, sta, skb)) return; @@ -1233,12 +1252,15 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) iwl_mvm_del_aux_sta(mvm); /* - * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete() - * won't be called in this case). + * Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the + * hw (as restart_complete() won't be called in this case) and mac80211 + * won't execute the restart. * But make sure to cleanup interfaces that have gone down before/during * HW restart was requested. */ - if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || + test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, + &mvm->status)) ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm); @@ -1990,7 +2012,13 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, if (sta->he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR) sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP); - /* If PPE Thresholds exist, parse them into a FW-familiar format */ + /* + * Initialize the PPE thresholds to "None" (7), as described in Table + * 9-262ac of 80211.ax/D3.0. + */ + memset(&sta_ctxt_cmd.pkt_ext, 7, sizeof(sta_ctxt_cmd.pkt_ext)); + + /* If PPE Thresholds exist, parse them into a FW-familiar format. */ if (sta->he_cap.he_cap_elem.phy_cap_info[6] & IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { u8 nss = (sta->he_cap.ppe_thres[0] & @@ -2931,7 +2959,8 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); } - iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band); + iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, + false); ret = iwl_mvm_update_sta(mvm, vif, sta); } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTHORIZED) { @@ -2947,7 +2976,8 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, /* enable beacon filtering */ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); - iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band); + iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, + true); ret = 0; } else if (old_state == IEEE80211_STA_AUTHORIZED && @@ -4413,10 +4443,6 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); } - if (!fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) - return; - /* if beacon filtering isn't on mac80211 does it anyway */ if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER)) return; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index b3987a0a70181ad2dc6775d044ccf90a3bbd8cf8..6b65ad6c9b56d232ac047cf2e2a1e19fbd164b6b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1685,7 +1685,7 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif) #endif /* CONFIG_IWLWIFI_DEBUGFS */ /* rate scaling */ -int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init); +int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync); void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg); int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate); void rs_update_last_rssi(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index cf48517944ecf25f6876d0f07713f2fed7e8d8bc..f2579c94ffdbcab604c29ee06398cc1871469b77 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -545,9 +545,8 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, } IWL_DEBUG_LAR(mvm, - "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') change: %d n_chans: %d\n", - status, mcc, mcc >> 8, mcc & 0xff, - !!(status == MCC_RESP_NEW_CHAN_PROFILE), n_channels); + "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') n_chans: %d\n", + status, mcc, mcc >> 8, mcc & 0xff, n_channels); exit: iwl_free_resp(&cmd); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c index 8169d1450b3b9b3954ff45e4b32d44fd3062788b..5e1e671d200219579058074f97a607671b56fb77 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c @@ -98,8 +98,12 @@ static u8 rs_fw_sgi_cw_support(struct ieee80211_sta *sta) { struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; + struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; u8 supp = 0; + if (he_cap && he_cap->has_he) + return 0; + if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20) supp |= BIT(IWL_TLC_MNG_CH_WIDTH_20MHZ); if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40) @@ -311,7 +315,7 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, } void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - enum nl80211_band band) + enum nl80211_band band, bool update) { struct ieee80211_hw *hw = mvm->hw; struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); @@ -320,7 +324,8 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct ieee80211_supported_band *sband; struct iwl_tlc_config_cmd cfg_cmd = { .sta_id = mvmsta->sta_id, - .max_ch_width = rs_fw_bw_from_sta_bw(sta), + .max_ch_width = update ? + rs_fw_bw_from_sta_bw(sta) : RATE_MCS_CHAN_WIDTH_20, .flags = cpu_to_le16(rs_fw_set_config_flags(mvm, sta)), .chains = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm)), .max_mpdu_len = cpu_to_le16(sta->max_amsdu_len), diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 30cfd7d50bc939fae1d0111c2c4694f755c9c947..6f4508d62a97e3b070cc983971d146b34a3c2069 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -1239,7 +1239,11 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, !(info->flags & IEEE80211_TX_STAT_AMPDU)) return; - rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, &tx_resp_rate); + if (rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, + &tx_resp_rate)) { + WARN_ON_ONCE(1); + return; + } #ifdef CONFIG_MAC80211_DEBUGFS /* Disable last tx check if we are debugging with fixed rate but @@ -1276,7 +1280,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, (unsigned long)(lq_sta->last_tx + (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) { IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n"); - iwl_mvm_rs_rate_init(mvm, sta, info->band); + iwl_mvm_rs_rate_init(mvm, sta, info->band, true); return; } lq_sta->last_tx = jiffies; @@ -1290,7 +1294,10 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, */ table = &lq_sta->lq; lq_hwrate = le32_to_cpu(table->rs_table[0]); - rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate); + if (rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate)) { + WARN_ON_ONCE(1); + return; + } /* Here we actually compare this rate to the latest LQ command */ if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) { @@ -1392,8 +1399,12 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, /* Collect data for each rate used during failed TX attempts */ for (i = 0; i <= retries; ++i) { lq_hwrate = le32_to_cpu(table->rs_table[i]); - rs_rate_from_ucode_rate(lq_hwrate, info->band, - &lq_rate); + if (rs_rate_from_ucode_rate(lq_hwrate, info->band, + &lq_rate)) { + WARN_ON_ONCE(1); + return; + } + /* * Only collect stats if retried rate is in the same RS * table as active/search. @@ -2859,9 +2870,8 @@ void rs_update_last_rssi(struct iwl_mvm *mvm, static void rs_initialize_lq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_lq_sta *lq_sta, - enum nl80211_band band) + enum nl80211_band band, bool update) { - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_scale_tbl_info *tbl; struct rs_rate *rate; u8 active_tbl = 0; @@ -2890,8 +2900,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm, rs_set_expected_tpt_table(lq_sta, tbl); rs_fill_lq_cmd(mvm, sta, lq_sta, rate); /* TODO restore station should remember the lq cmd */ - iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, - mvmsta->sta_state < IEEE80211_STA_AUTHORIZED); + iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, !update); } static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta, @@ -3144,7 +3153,7 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg) * Called after adding a new station to initialize rate scaling */ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - enum nl80211_band band) + enum nl80211_band band, bool update) { int i, j; struct ieee80211_hw *hw = mvm->hw; @@ -3224,7 +3233,7 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, #ifdef CONFIG_IWLWIFI_DEBUGFS iwl_mvm_reset_frame_stats(mvm); #endif - rs_initialize_lq(mvm, sta, lq_sta, band); + rs_initialize_lq(mvm, sta, lq_sta, band, update); } static void rs_drv_rate_update(void *mvm_r, @@ -3244,7 +3253,7 @@ static void rs_drv_rate_update(void *mvm_r, for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) ieee80211_stop_tx_ba_session(sta, tid); - iwl_mvm_rs_rate_init(mvm, sta, sband->band); + iwl_mvm_rs_rate_init(mvm, sta, sband->band, true); } #ifdef CONFIG_MAC80211_DEBUGFS @@ -3262,7 +3271,10 @@ static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm, for (i = 0; i < num_rates; i++) lq_cmd->rs_table[i] = ucode_rate_le32; - rs_rate_from_ucode_rate(ucode_rate, band, &rate); + if (rs_rate_from_ucode_rate(ucode_rate, band, &rate)) { + WARN_ON_ONCE(1); + return; + } if (is_mimo(&rate)) lq_cmd->mimo_delim = num_rates - 1; @@ -4098,12 +4110,12 @@ static const struct rate_control_ops rs_mvm_ops_drv = { }; void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - enum nl80211_band band) + enum nl80211_band band, bool update) { if (iwl_mvm_has_tlc_offload(mvm)) - rs_fw_rate_init(mvm, sta, band); + rs_fw_rate_init(mvm, sta, band, update); else - rs_drv_rate_init(mvm, sta, band); + rs_drv_rate_init(mvm, sta, band, update); } int iwl_mvm_rate_control_register(void) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h index d2cf484e2b73be77bd61006d0aeb77683baec58d..d0f47899f2849505eb60d5343f19c216f8e3fb3c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h @@ -420,7 +420,7 @@ struct iwl_lq_sta { /* Initialize station's rate scaling information after adding station */ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - enum nl80211_band band); + enum nl80211_band band, bool init); /* Notify RS about Tx status */ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, @@ -461,7 +461,7 @@ void rs_remove_sta_debugfs(void *mvm, void *mvm_sta); void iwl_mvm_rs_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta); void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - enum nl80211_band band); + enum nl80211_band band, bool update); int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, bool enable); void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index bfb163419c679c2a98abb4fc09bf85e4c62c4f75..e6a67bc022090a52f9000a43df05cdef6197b971 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -62,6 +62,7 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ +#include #include #include #include "iwl-trans.h" @@ -360,7 +361,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data; hdr = (struct ieee80211_hdr *)(pkt->data + sizeof(*rx_res)); len = le16_to_cpu(rx_res->byte_count); - rx_pkt_status = le32_to_cpup((__le32 *) + rx_pkt_status = get_unaligned_le32((__le32 *) (pkt->data + sizeof(*rx_res) + len)); /* Dont use dev_alloc_skb(), we'll have enough headroom once diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index b53148f972a4a4e5c6e04d34ed7c052144235282..036d1d82d93e7eed2d6d08be91245f62d6d902f1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -143,9 +143,9 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb, } /* iwl_mvm_create_skb Adds the rxb to a new skb */ -static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr, - u16 len, u8 crypt_len, - struct iwl_rx_cmd_buffer *rxb) +static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_hdr *hdr, u16 len, u8 crypt_len, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_mpdu_desc *desc = (void *)pkt->data; @@ -178,6 +178,20 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr, * present before copying packet data. */ hdrlen += crypt_len; + + if (WARN_ONCE(headlen < hdrlen, + "invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n", + hdrlen, len, crypt_len)) { + /* + * We warn and trace because we want to be able to see + * it in trace-cmd as well. + */ + IWL_DEBUG_RX(mvm, + "invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n", + hdrlen, len, crypt_len); + return -EINVAL; + } + skb_put_data(skb, hdr, hdrlen); skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen); @@ -190,6 +204,8 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr, skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset, fraglen, rxb->truesize); } + + return 0; } /* iwl_mvm_pass_packet_to_mac80211 - passes the packet for mac80211 */ @@ -1425,7 +1441,11 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status->boottime_ns = ktime_get_boot_ns(); } - iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb); + if (iwl_mvm_create_skb(mvm, skb, hdr, len, crypt_len, rxb)) { + kfree_skb(skb); + goto out; + } + if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc)) iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta); out: diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 11ecdf63b7325b22f49402b10f2b1ca654c1fb36..fdabc0198b67d39ea7e0113c73f2288c7ca30bb0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -1229,7 +1229,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, if (IWL_MVM_ADWELL_MAX_BUDGET) cmd->v7.adwell_max_budget = cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET); - else if (params->ssids && params->ssids[0].ssid_len) + else if (params->n_ssids && params->ssids[0].ssid_len) cmd->v7.adwell_max_budget = cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN); else diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 18db1ed92d9b09741e0fec60e66389bd5af18dea..e850aa504b6084c94c55ad7d37ee79e6da309701 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -440,6 +440,16 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) rcu_read_unlock(); + /* + * The TX path may have been using this TXQ_ID from the tid_data, + * so make sure it's no longer running so that we can safely reuse + * this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE + * above, but nothing guarantees we've stopped using them. Thus, + * without this, we could get to iwl_mvm_disable_txq() and remove + * the queue while still sending frames to it. + */ + synchronize_net(); + return disable_agg_tids; } @@ -3133,10 +3143,6 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, switch (keyconf->cipher) { case WLAN_CIPHER_SUITE_TKIP: - if (vif->type == NL80211_IFTYPE_AP) { - ret = -EINVAL; - break; - } addr = iwl_mvm_get_mac_addr(mvm, vif, sta); /* get phase 1 key from mac80211 */ ieee80211_get_key_rx_seq(keyconf, 0, &seq); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index ff193dca2020c99987ca91df4a2d07ef6f58dbfd..449e3d32811a67d7e9fbc8a5ab389aece7423b32 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -641,6 +641,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) memcpy(&info, skb->cb, sizeof(info)); + if (WARN_ON_ONCE(skb->len > IEEE80211_MAX_DATA_LEN + hdrlen)) + return -1; + if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU)) return -1; @@ -668,7 +671,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE || info.control.vif->type == NL80211_IFTYPE_AP || info.control.vif->type == NL80211_IFTYPE_ADHOC) { - if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE) + if (!ieee80211_is_data(hdr->frame_control)) sta_id = mvmvif->bcast_sta.sta_id; else sta_id = mvmvif->mcast_sta.sta_id; @@ -775,6 +778,36 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes, return 0; } +static unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + unsigned int tid) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + enum nl80211_band band = mvmsta->vif->bss_conf.chandef.chan->band; + u8 ac = tid_to_mac80211_ac[tid]; + unsigned int txf; + int lmac = IWL_LMAC_24G_INDEX; + + if (iwl_mvm_is_cdb_supported(mvm) && + band == NL80211_BAND_5GHZ) + lmac = IWL_LMAC_5G_INDEX; + + /* For HE redirect to trigger based fifos */ + if (sta->he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm))) + ac += 4; + + txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac); + + /* + * Don't send an AMSDU that will be longer than the TXF. + * Add a security margin of 256 for the TX command + headers. + * We also want to have the start of the next packet inside the + * fifo to be able to send bursts. + */ + return min_t(unsigned int, mvmsta->max_amsdu_len, + mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256); +} + static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, @@ -787,7 +820,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, u16 snap_ip_tcp, pad; unsigned int dbg_max_amsdu_len; netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG; - u8 tid, txf; + u8 tid; snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) + tcp_hdrlen(skb); @@ -826,20 +859,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, !(mvmsta->amsdu_enabled & BIT(tid))) return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); - max_amsdu_len = mvmsta->max_amsdu_len; - - /* the Tx FIFO to which this A-MSDU will be routed */ - txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, tid_to_mac80211_ac[tid]); - - /* - * Don't send an AMSDU that will be longer than the TXF. - * Add a security margin of 256 for the TX command + headers. - * We also want to have the start of the next packet inside the - * fifo to be able to send bursts. - */ - max_amsdu_len = min_t(unsigned int, max_amsdu_len, - mvm->fwrt.smem_cfg.lmac[0].txfifo_size[txf] - - 256); + max_amsdu_len = iwl_mvm_max_amsdu_size(mvm, sta, tid); if (unlikely(dbg_max_amsdu_len)) max_amsdu_len = min_t(unsigned int, max_amsdu_len, @@ -1405,6 +1425,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, while (!skb_queue_empty(&skbs)) { struct sk_buff *skb = __skb_dequeue(&skbs); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (void *)skb->data; bool flushed = false; skb_freed++; @@ -1434,6 +1455,14 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, break; } + /* + * If we are freeing multiple frames, mark all the frames + * but the first one as acked, since they were acknowledged + * before + * */ + if (skb_freed > 1) + info->flags |= IEEE80211_TX_STAT_ACK; + iwl_mvm_tx_status_check_trigger(mvm, status); info->status.rates[0].count = tx_resp->failure_frame + 1; @@ -1449,11 +1478,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; info->flags &= ~IEEE80211_TX_CTL_AMPDU; - /* W/A FW bug: seq_ctl is wrong when the status isn't success */ - if (status != TX_STATUS_SUCCESS) { - struct ieee80211_hdr *hdr = (void *)skb->data; + /* W/A FW bug: seq_ctl is wrong upon failure / BAR frame */ + if (ieee80211_is_back_req(hdr->frame_control)) + seq_ctl = 0; + else if (status != TX_STATUS_SUCCESS) seq_ctl = le16_to_cpu(hdr->seq_ctrl); - } if (unlikely(!seq_ctl)) { struct ieee80211_hdr *hdr = (void *)skb->data; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index b002a7afb5f591d8434b0858c37610392a2ec5ea..00712205c05f2e9df1127842e1a71abde5d9e4bd 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -900,20 +900,19 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, /** * iwl_mvm_send_lq_cmd() - Send link quality command - * @init: This command is sent as part of station initialization right - * after station has been added. + * @sync: This command can be sent synchronously. * * The link quality command is sent as the last step of station creation. * This is the special case in which init is set and we call a callback in * this case to clear the state indicating that station creation is in * progress. */ -int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init) +int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync) { struct iwl_host_cmd cmd = { .id = LQ_CMD, .len = { sizeof(struct iwl_lq_cmd), }, - .flags = init ? 0 : CMD_ASYNC, + .flags = sync ? 0 : CMD_ASYNC, .data = { lq, }, }; @@ -1805,6 +1804,7 @@ void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel) void iwl_mvm_resume_tcm(struct iwl_mvm *mvm) { int mac; + bool low_latency = false; spin_lock_bh(&mvm->tcm.lock); mvm->tcm.ts = jiffies; @@ -1816,10 +1816,23 @@ void iwl_mvm_resume_tcm(struct iwl_mvm *mvm) memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts)); memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime)); memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime)); + + if (mvm->tcm.result.low_latency[mac]) + low_latency = true; } /* The TCM data needs to be reset before "paused" flag changes */ smp_mb(); mvm->tcm.paused = false; + + /* + * if the current load is not low or low latency is active, force + * re-evaluation to cover the case of no traffic. + */ + if (mvm->tcm.result.global_load > IWL_MVM_TRAFFIC_LOW) + schedule_delayed_work(&mvm->tcm.work, MVM_TCM_PERIOD); + else if (low_latency) + schedule_delayed_work(&mvm->tcm.work, MVM_LL_PERIOD); + spin_unlock_bh(&mvm->tcm.lock); } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c index 2146fda8da2fdbdece661ceb3e177ab7c5c2b83e..6783b20d9681bb54773f5bc87e0ddd8bfda202c5 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c @@ -102,13 +102,9 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, /* allocate ucode sections in dram and set addresses */ ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram); - if (ret) { - dma_free_coherent(trans->dev, - sizeof(*prph_scratch), - prph_scratch, - trans_pcie->prph_scratch_dma_addr); - return ret; - } + if (ret) + goto err_free_prph_scratch; + /* Allocate prph information * currently we don't assign to the prph info anything, but it would get @@ -116,16 +112,20 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, prph_info = dma_alloc_coherent(trans->dev, sizeof(*prph_info), &trans_pcie->prph_info_dma_addr, GFP_KERNEL); - if (!prph_info) - return -ENOMEM; + if (!prph_info) { + ret = -ENOMEM; + goto err_free_prph_scratch; + } /* Allocate context info */ ctxt_info_gen3 = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info_gen3), &trans_pcie->ctxt_info_dma_addr, GFP_KERNEL); - if (!ctxt_info_gen3) - return -ENOMEM; + if (!ctxt_info_gen3) { + ret = -ENOMEM; + goto err_free_prph_info; + } ctxt_info_gen3->prph_info_base_addr = cpu_to_le64(trans_pcie->prph_info_dma_addr); @@ -164,7 +164,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, memcpy(iml_img, trans->iml, trans->iml_len); - iwl_enable_interrupts(trans); + iwl_enable_fw_load_int_ctx_info(trans); /* kick FW self load */ iwl_write64(trans, CSR_CTXT_INFO_ADDR, @@ -176,6 +176,20 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT); return 0; + +err_free_prph_info: + dma_free_coherent(trans->dev, + sizeof(*prph_info), + prph_info, + trans_pcie->prph_info_dma_addr); + +err_free_prph_scratch: + dma_free_coherent(trans->dev, + sizeof(*prph_scratch), + prph_scratch, + trans_pcie->prph_scratch_dma_addr); + return ret; + } void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c index b2cd7ef5fc3a9ba3b37351745d6fdedfda985cf9..6f25fd1bbd8f40fb30181613715e09b826cd530f 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c @@ -206,7 +206,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, trans_pcie->ctxt_info = ctxt_info; - iwl_enable_interrupts(trans); + iwl_enable_fw_load_int_ctx_info(trans); /* Configure debug, if exists */ if (trans->dbg_dest_tlv) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index b150da4c6721e6bdb087cd3f5ef887ea0792bd49..844a1009484f6b297752d7e35b3178f8335069f5 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -518,6 +518,56 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)}, /* 9000 Series */ + {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x0060, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x0064, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x00A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x00A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x0230, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x0234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x0238, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x023C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x0260, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x0264, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x02A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_cfg)}, @@ -551,6 +601,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x4018, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)}, @@ -646,34 +697,33 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x4234, iwl9560_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x42A4, iwl9462_2ac_cfg_shared_clk)}, - {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x0060, iwl9461_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x0064, iwl9461_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x00A0, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x00A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x0230, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x0234, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x0238, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x023C, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x0260, iwl9461_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x0264, iwl9461_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x02A0, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x1010, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x34F0, 0x1030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x1210, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x34F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x4034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x40A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_soc)}, + + {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x4034, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_cfg_soc)}, @@ -838,7 +888,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x34F0, 0x0040, iwl22000_2ax_cfg_hr)}, {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)}, {IWL_PCI_DEVICE(0x34F0, 0x0078, iwl22000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ac_cfg_jf)}, + {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ax_cfg_hr)}, {IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22560_2ax_cfg_su_cdb)}, {IWL_PCI_DEVICE(0x40C0, 0x0010, iwl22560_2ax_cfg_su_cdb)}, {IWL_PCI_DEVICE(0x40c0, 0x0090, iwl22560_2ax_cfg_su_cdb)}, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index b63d44b7cd7c7be1e6a0a3fe0081edc5ea4820b7..e9d67ba3e56dd8ad66b6ce48d0c7b170f689bb16 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -102,66 +102,6 @@ struct isr_statistics { u32 unhandled; }; -#define IWL_CD_STTS_OPTIMIZED_POS 0 -#define IWL_CD_STTS_OPTIMIZED_MSK 0x01 -#define IWL_CD_STTS_TRANSFER_STATUS_POS 1 -#define IWL_CD_STTS_TRANSFER_STATUS_MSK 0x0E -#define IWL_CD_STTS_WIFI_STATUS_POS 4 -#define IWL_CD_STTS_WIFI_STATUS_MSK 0xF0 - -/** - * enum iwl_completion_desc_transfer_status - transfer status (bits 1-3) - * @IWL_CD_STTS_END_TRANSFER: successful transfer complete. - * In sniffer mode, when split is used, set in last CD completion. (RX) - * @IWL_CD_STTS_OVERFLOW: In sniffer mode, when using split - used for - * all CD completion. (RX) - * @IWL_CD_STTS_ABORTED: CR abort / close flow. (RX) - */ -enum iwl_completion_desc_transfer_status { - IWL_CD_STTS_UNUSED, - IWL_CD_STTS_UNUSED_2, - IWL_CD_STTS_END_TRANSFER, - IWL_CD_STTS_OVERFLOW, - IWL_CD_STTS_ABORTED, - IWL_CD_STTS_ERROR, -}; - -/** - * enum iwl_completion_desc_wifi_status - wifi status (bits 4-7) - * @IWL_CD_STTS_VALID: the packet is valid (RX) - * @IWL_CD_STTS_FCS_ERR: frame check sequence error (RX) - * @IWL_CD_STTS_SEC_KEY_ERR: error handling the security key of rx (RX) - * @IWL_CD_STTS_DECRYPTION_ERR: error decrypting the frame (RX) - * @IWL_CD_STTS_DUP: duplicate packet (RX) - * @IWL_CD_STTS_ICV_MIC_ERR: MIC error (RX) - * @IWL_CD_STTS_INTERNAL_SNAP_ERR: problems removing the snap (RX) - * @IWL_CD_STTS_SEC_PORT_FAIL: security port fail (RX) - * @IWL_CD_STTS_BA_OLD_SN: block ack received old SN (RX) - * @IWL_CD_STTS_QOS_NULL: QoS null packet (RX) - * @IWL_CD_STTS_MAC_HDR_ERR: MAC header conversion error (RX) - * @IWL_CD_STTS_MAX_RETRANS: reached max number of retransmissions (TX) - * @IWL_CD_STTS_EX_LIFETIME: exceeded lifetime (TX) - * @IWL_CD_STTS_NOT_USED: completed but not used (RX) - * @IWL_CD_STTS_REPLAY_ERR: pn check failed, replay error (RX) - */ -enum iwl_completion_desc_wifi_status { - IWL_CD_STTS_VALID, - IWL_CD_STTS_FCS_ERR, - IWL_CD_STTS_SEC_KEY_ERR, - IWL_CD_STTS_DECRYPTION_ERR, - IWL_CD_STTS_DUP, - IWL_CD_STTS_ICV_MIC_ERR, - IWL_CD_STTS_INTERNAL_SNAP_ERR, - IWL_CD_STTS_SEC_PORT_FAIL, - IWL_CD_STTS_BA_OLD_SN, - IWL_CD_STTS_QOS_NULL, - IWL_CD_STTS_MAC_HDR_ERR, - IWL_CD_STTS_MAX_RETRANS, - IWL_CD_STTS_EX_LIFETIME, - IWL_CD_STTS_NOT_USED, - IWL_CD_STTS_REPLAY_ERR, -}; - #define IWL_RX_TD_TYPE_MSK 0xff000000 #define IWL_RX_TD_SIZE_MSK 0x00ffffff #define IWL_RX_TD_SIZE_2K BIT(11) @@ -896,6 +836,33 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans) } } +static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + IWL_DEBUG_ISR(trans, "Enabling ALIVE interrupt only\n"); + + if (!trans_pcie->msix_enabled) { + /* + * When we'll receive the ALIVE interrupt, the ISR will call + * iwl_enable_fw_load_int_ctx_info again to set the ALIVE + * interrupt (which is not really needed anymore) but also the + * RX interrupt which will allow us to receive the ALIVE + * notification (which is Rx) and continue the flow. + */ + trans_pcie->inta_mask = CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX; + iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); + } else { + iwl_enable_hw_int_msk_msix(trans, + MSIX_HW_INT_CAUSES_REG_ALIVE); + /* + * Leave all the FH causes enabled to get the ALIVE + * notification. + */ + iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask); + } +} + static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index) { return index & (q->n_window - 1); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index d017aa2a0a8bd7bed6c75f5f20da83460cca1c7a..80a1a50f5da51034982215162326ce3f73d5c58c 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -502,7 +502,7 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rb_allocator *rba = &trans_pcie->rba; struct list_head local_empty; - int pending = atomic_xchg(&rba->req_pending, 0); + int pending = atomic_read(&rba->req_pending); IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending); @@ -557,11 +557,13 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans) i++; } + atomic_dec(&rba->req_pending); pending--; + if (!pending) { - pending = atomic_xchg(&rba->req_pending, 0); + pending = atomic_read(&rba->req_pending); IWL_DEBUG_RX(trans, - "Pending allocation requests = %d\n", + "Got more pending allocation requests = %d\n", pending); } @@ -573,12 +575,15 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans) spin_unlock(&rba->lock); atomic_inc(&rba->req_ready); + } spin_lock(&rba->lock); /* return unused rbds to the allocator empty list */ list_splice_tail(&local_empty, &rba->rbd_empty); spin_unlock(&rba->lock); + + IWL_DEBUG_RX(trans, "%s, exit.\n", __func__); } /* @@ -1144,6 +1149,14 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) kfree(trans_pcie->rxq); } +static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq, + struct iwl_rb_allocator *rba) +{ + spin_lock(&rba->lock); + list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); + spin_unlock(&rba->lock); +} + /* * iwl_pcie_rx_reuse_rbd - Recycle used RBDs * @@ -1175,9 +1188,7 @@ static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) { /* Move the 2 RBDs to the allocator ownership. Allocator has another 6 from pool for the request completion*/ - spin_lock(&rba->lock); - list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); - spin_unlock(&rba->lock); + iwl_pcie_rx_move_to_allocator(rxq, rba); atomic_inc(&rba->req_pending); queue_work(rba->alloc_wq, &rba->rx_alloc); @@ -1187,7 +1198,8 @@ static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, struct iwl_rxq *rxq, struct iwl_rx_mem_buffer *rxb, - bool emergency) + bool emergency, + int i) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; @@ -1213,6 +1225,9 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, .truesize = max_len, }; + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + rxcb.status = rxq->cd[i].status; + pkt = rxb_addr(&rxcb); if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) { @@ -1377,10 +1392,15 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans, static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq[queue]; + struct iwl_rxq *rxq; u32 r, i, count = 0; bool emergency = false; + if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd)) + return; + + rxq = &trans_pcie->rxq[queue]; + restart: spin_lock(&rxq->lock); /* uCode's read index (stored in shared DRAM) indicates the last Rx @@ -1396,17 +1416,25 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue) IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r); while (i != r) { + struct iwl_rb_allocator *rba = &trans_pcie->rba; struct iwl_rx_mem_buffer *rxb; - - if (unlikely(rxq->used_count == rxq->queue_size / 2)) + /* number of RBDs still waiting for page allocation */ + u32 rb_pending_alloc = + atomic_read(&trans_pcie->rba.req_pending) * + RX_CLAIM_REQ_ALLOC; + + if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 && + !emergency)) { + iwl_pcie_rx_move_to_allocator(rxq, rba); emergency = true; + } rxb = iwl_pcie_get_rxb(trans, rxq, i); if (!rxb) goto out; IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i); - iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency); + iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i); i = (i + 1) & (rxq->queue_size - 1); @@ -1421,17 +1449,13 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue) iwl_pcie_rx_allocator_get(trans, rxq); if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) { - struct iwl_rb_allocator *rba = &trans_pcie->rba; - /* Add the remaining empty RBDs for allocator use */ - spin_lock(&rba->lock); - list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); - spin_unlock(&rba->lock); + iwl_pcie_rx_move_to_allocator(rxq, rba); } else if (emergency) { count++; if (count == 8) { count = 0; - if (rxq->used_count < rxq->queue_size / 3) + if (rb_pending_alloc < rxq->queue_size / 3) emergency = false; rxq->read = i; @@ -1758,26 +1782,26 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) goto out; } - if (iwl_have_debug_level(IWL_DL_ISR)) { - /* NIC fires this, but we don't use it, redundant with WAKEUP */ - if (inta & CSR_INT_BIT_SCD) { - IWL_DEBUG_ISR(trans, - "Scheduler finished to transmit the frame/frames.\n"); - isr_stats->sch++; - } + /* NIC fires this, but we don't use it, redundant with WAKEUP */ + if (inta & CSR_INT_BIT_SCD) { + IWL_DEBUG_ISR(trans, + "Scheduler finished to transmit the frame/frames.\n"); + isr_stats->sch++; + } - /* Alive notification via Rx interrupt will do the real work */ - if (inta & CSR_INT_BIT_ALIVE) { - IWL_DEBUG_ISR(trans, "Alive interrupt\n"); - isr_stats->alive++; - if (trans->cfg->gen2) { - /* - * We can restock, since firmware configured - * the RFH - */ - iwl_pcie_rxmq_restock(trans, trans_pcie->rxq); - } + /* Alive notification via Rx interrupt will do the real work */ + if (inta & CSR_INT_BIT_ALIVE) { + IWL_DEBUG_ISR(trans, "Alive interrupt\n"); + isr_stats->alive++; + if (trans->cfg->gen2) { + /* + * We can restock, since firmware configured + * the RFH + */ + iwl_pcie_rxmq_restock(trans, trans_pcie->rxq); } + + handled |= CSR_INT_BIT_ALIVE; } /* Safely ignore these bits for debug checks below */ @@ -1896,6 +1920,9 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) /* Re-enable RF_KILL if it occurred */ else if (handled & CSR_INT_BIT_RF_KILL) iwl_enable_rfkill_int(trans); + /* Re-enable the ALIVE / Rx interrupt if it occurred */ + else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX)) + iwl_enable_fw_load_int_ctx_info(trans); spin_unlock(&trans_pcie->irq_lock); out: @@ -2040,10 +2067,18 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) return IRQ_NONE; } - if (iwl_have_debug_level(IWL_DL_ISR)) - IWL_DEBUG_ISR(trans, "ISR inta_fh 0x%08x, enabled 0x%08x\n", - inta_fh, + if (iwl_have_debug_level(IWL_DL_ISR)) { + IWL_DEBUG_ISR(trans, + "ISR inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n", + inta_fh, trans_pcie->fh_mask, iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD)); + if (inta_fh & ~trans_pcie->fh_mask) + IWL_DEBUG_ISR(trans, + "We got a masked interrupt (0x%08x)\n", + inta_fh & ~trans_pcie->fh_mask); + } + + inta_fh &= trans_pcie->fh_mask; if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) && inta_fh & MSIX_FH_INT_CAUSES_Q0) { @@ -2083,11 +2118,18 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) } /* After checking FH register check HW register */ - if (iwl_have_debug_level(IWL_DL_ISR)) + if (iwl_have_debug_level(IWL_DL_ISR)) { IWL_DEBUG_ISR(trans, - "ISR inta_hw 0x%08x, enabled 0x%08x\n", - inta_hw, + "ISR inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n", + inta_hw, trans_pcie->hw_mask, iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD)); + if (inta_hw & ~trans_pcie->hw_mask) + IWL_DEBUG_ISR(trans, + "We got a masked interrupt 0x%08x\n", + inta_hw & ~trans_pcie->hw_mask); + } + + inta_hw &= trans_pcie->hw_mask; /* Alive notification via Rx interrupt will do the real work */ if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) { diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index 2bc67219ed3efadd09597a28fe18e39ad5f1d736..31e72e1ff1e267ea5a57780b5b503e94d9b17cdd 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -289,6 +289,15 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr) * paging memory cannot be freed included since FW will still use it */ iwl_pcie_ctxt_info_free(trans); + + /* + * Re-enable all the interrupts, including the RF-Kill one, now that + * the firmware is alive. + */ + iwl_enable_interrupts(trans); + mutex_lock(&trans_pcie->mutex); + iwl_pcie_check_hw_rf_kill(trans); + mutex_unlock(&trans_pcie->mutex); } int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 7d319b6863feb2e14e8702907603c3f13b43de53..b3703c4c3663433ce9d7cc37846b624f9aa4edbc 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1747,6 +1747,7 @@ static int iwl_pcie_init_msix_handler(struct pci_dev *pdev, static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u32 hpm; int err; lockdep_assert_held(&trans_pcie->mutex); @@ -1757,6 +1758,17 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) return err; } + hpm = iwl_trans_read_prph(trans, HPM_DEBUG); + if (hpm != 0xa5a5a5a0 && (hpm & PERSISTENCE_BIT)) { + if (iwl_trans_read_prph(trans, PREG_PRPH_WPROT_0) & + PREG_WFPM_ACCESS) { + IWL_ERR(trans, + "Error, can not clear persistence bit\n"); + return -EPERM; + } + iwl_trans_write_prph(trans, HPM_DEBUG, hpm & ~PERSISTENCE_BIT); + } + iwl_trans_pcie_sw_reset(trans); err = iwl_pcie_apm_init(trans); @@ -1830,18 +1842,30 @@ static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs) return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); } +static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans) +{ + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + return 0x00FFFFFF; + else + return 0x000FFFFF; +} + static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg) { + u32 mask = iwl_trans_pcie_prph_msk(trans); + iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR, - ((reg & 0x000FFFFF) | (3 << 24))); + ((reg & mask) | (3 << 24))); return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT); } static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val) { + u32 mask = iwl_trans_pcie_prph_msk(trans); + iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR, - ((addr & 0x000FFFFF) | (3 << 24))); + ((addr & mask) | (3 << 24))); iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val); } @@ -3259,6 +3283,15 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, spin_lock_init(&trans_pcie->reg_lock); mutex_init(&trans_pcie->mutex); init_waitqueue_head(&trans_pcie->ucode_write_waitq); + + trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator", + WQ_HIGHPRI | WQ_UNBOUND, 1); + if (!trans_pcie->rba.alloc_wq) { + ret = -ENOMEM; + goto out_free_trans; + } + INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work); + trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page); if (!trans_pcie->tso_hdr_page) { ret = -ENOMEM; @@ -3391,8 +3424,26 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, #if IS_ENABLED(CONFIG_IWLMVM) trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID); - if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == - CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) { + if (cfg == &iwl22000_2ax_cfg_hr) { + if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == + CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) { + trans->cfg = &iwl22000_2ax_cfg_hr; + } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == + CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) { + trans->cfg = &iwl22000_2ax_cfg_jf; + } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == + CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HRCDB)) { + IWL_ERR(trans, "RF ID HRCDB is not supported\n"); + ret = -EINVAL; + goto out_no_pci; + } else { + IWL_ERR(trans, "Unrecognized RF ID 0x%08x\n", + CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id)); + ret = -EINVAL; + goto out_no_pci; + } + } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == + CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) { u32 hw_status; hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS); @@ -3443,9 +3494,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, trans_pcie->inta_mask = CSR_INI_SET_MASK; } - trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator", - WQ_HIGHPRI | WQ_UNBOUND, 1); - INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work); #ifdef CONFIG_IWLWIFI_PCIE_RTPM trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3; @@ -3459,6 +3507,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, iwl_pcie_free_ict(trans); out_no_pci: free_percpu(trans_pcie->tso_hdr_page); + destroy_workqueue(trans_pcie->rba.alloc_wq); +out_free_trans: iwl_trans_free(trans); return ERR_PTR(ret); } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index b99f33ff912306f5638bb95f9578df88b82aadc4..7b1dff92b7094ff10f8e04fb59d4ea54ee07b87c 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -242,27 +242,23 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans, struct ieee80211_hdr *hdr = (void *)skb->data; unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; unsigned int mss = skb_shinfo(skb)->gso_size; - u16 length, iv_len, amsdu_pad; + u16 length, amsdu_pad; u8 *start_hdr; struct iwl_tso_hdr_page *hdr_page; struct page **page_ptr; struct tso_t tso; - /* if the packet is protected, then it must be CCMP or GCMP */ - iv_len = ieee80211_has_protected(hdr->frame_control) ? - IEEE80211_CCMP_HDR_LEN : 0; - trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, start_len, 0); ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); - total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len; + total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len; amsdu_pad = 0; /* total amount of header we may need for this A-MSDU */ hdr_room = DIV_ROUND_UP(total_len, mss) * - (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; + (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)); /* Our device supports 9 segments at most, it will fit in 1 page */ hdr_page = get_page_hdr(trans, hdr_room); @@ -273,14 +269,12 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans, start_hdr = hdr_page->pos; page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); *page_ptr = hdr_page->page; - memcpy(hdr_page->pos, skb->data + hdr_len, iv_len); - hdr_page->pos += iv_len; /* - * Pull the ieee80211 header + IV to be able to use TSO core, + * Pull the ieee80211 header to be able to use TSO core, * we will restore it for the tx_status flow. */ - skb_pull(skb, hdr_len + iv_len); + skb_pull(skb, hdr_len); /* * Remove the length of all the headers that we don't actually @@ -355,8 +349,8 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans, } } - /* re -add the WiFi header and IV */ - skb_push(skb, hdr_len + iv_len); + /* re -add the WiFi header */ + skb_push(skb, hdr_len); return 0; @@ -526,7 +520,12 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, hdr_len = ieee80211_hdrlen(hdr->frame_control); - if (amsdu) + /* + * Only build A-MSDUs here if doing so by GSO, otherwise it may be + * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been + * built in the higher layers already. + */ + if (amsdu && skb_shinfo(skb)->gso_size) return iwl_pcie_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb, out_meta, hdr_len, len); @@ -555,18 +554,6 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, spin_lock(&txq->lock); - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { - struct iwl_tx_cmd_gen3 *tx_cmd_gen3 = - (void *)dev_cmd->payload; - - cmd_len = le16_to_cpu(tx_cmd_gen3->len); - } else { - struct iwl_tx_cmd_gen2 *tx_cmd_gen2 = - (void *)dev_cmd->payload; - - cmd_len = le16_to_cpu(tx_cmd_gen2->len); - } - if (iwl_queue_space(trans, txq) < txq->high_mark) { iwl_stop_queue(trans, txq); @@ -604,6 +591,18 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, return -1; } + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { + struct iwl_tx_cmd_gen3 *tx_cmd_gen3 = + (void *)dev_cmd->payload; + + cmd_len = le16_to_cpu(tx_cmd_gen3->len); + } else { + struct iwl_tx_cmd_gen2 *tx_cmd_gen2 = + (void *)dev_cmd->payload; + + cmd_len = le16_to_cpu(tx_cmd_gen2->len); + } + /* Set up entry for this TFD in Tx byte-count array */ iwl_pcie_gen2_update_byte_tbl(trans_pcie, txq, cmd_len, iwl_pcie_gen2_get_num_tbs(trans, tfd)); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 93f0d387688a1314a54a8f17f4c02153dadd802d..b73582ec03a085fa2840a1e6259e64931ef6bfb3 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -403,6 +403,8 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, DMA_TO_DEVICE); } + meta->tbs = 0; + if (trans->cfg->use_tfh) { struct iwl_tfh_tfd *tfd_fh = (void *)tfd; @@ -1101,7 +1103,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, if (!iwl_queue_used(txq, last_to_free)) { IWL_ERR(trans, - "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", + "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", __func__, txq_id, last_to_free, trans->cfg->base_params->max_tfd_queue_size, txq->write_ptr, txq->read_ptr); @@ -1245,11 +1247,11 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) if (idx >= trans->cfg->base_params->max_tfd_queue_size || (!iwl_queue_used(txq, idx))) { - IWL_ERR(trans, - "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", - __func__, txq_id, idx, - trans->cfg->base_params->max_tfd_queue_size, - txq->write_ptr, txq->read_ptr); + WARN_ONCE(test_bit(txq_id, trans_pcie->queue_used), + "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", + __func__, txq_id, idx, + trans->cfg->base_params->max_tfd_queue_size, + txq->write_ptr, txq->read_ptr); return; } diff --git a/drivers/net/wireless/intersil/p54/p54pci.c b/drivers/net/wireless/intersil/p54/p54pci.c index 27a49068d32d0a71e90b69c3a3b0289be956ff94..57ad56435dda53ed852f9a4935430fa9cdea7e52 100644 --- a/drivers/net/wireless/intersil/p54/p54pci.c +++ b/drivers/net/wireless/intersil/p54/p54pci.c @@ -554,7 +554,7 @@ static int p54p_probe(struct pci_dev *pdev, err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "Cannot enable new PCI device\n"); - return err; + goto err_put; } mem_addr = pci_resource_start(pdev, 0); @@ -639,6 +639,7 @@ static int p54p_probe(struct pci_dev *pdev, pci_release_regions(pdev); err_disable_dev: pci_disable_device(pdev); +err_put: pci_dev_put(pdev); return err; } diff --git a/drivers/net/wireless/intersil/p54/p54usb.c b/drivers/net/wireless/intersil/p54/p54usb.c index b0b86f7010610d1d89e37530f58ac7ea4a1fa137..15661da6eedc88d1b5c6e02af6af5957e65ae9b9 100644 --- a/drivers/net/wireless/intersil/p54/p54usb.c +++ b/drivers/net/wireless/intersil/p54/p54usb.c @@ -33,6 +33,8 @@ MODULE_ALIAS("prism54usb"); MODULE_FIRMWARE("isl3886usb"); MODULE_FIRMWARE("isl3887usb"); +static struct usb_driver p54u_driver; + /* * Note: * @@ -921,9 +923,9 @@ static void p54u_load_firmware_cb(const struct firmware *firmware, { struct p54u_priv *priv = context; struct usb_device *udev = priv->udev; + struct usb_interface *intf = priv->intf; int err; - complete(&priv->fw_wait_load); if (firmware) { priv->fw = firmware; err = p54u_start_ops(priv); @@ -932,26 +934,22 @@ static void p54u_load_firmware_cb(const struct firmware *firmware, dev_err(&udev->dev, "Firmware not found.\n"); } - if (err) { - struct device *parent = priv->udev->dev.parent; - - dev_err(&udev->dev, "failed to initialize device (%d)\n", err); - - if (parent) - device_lock(parent); + complete(&priv->fw_wait_load); + /* + * At this point p54u_disconnect may have already freed + * the "priv" context. Do not use it anymore! + */ + priv = NULL; - device_release_driver(&udev->dev); - /* - * At this point p54u_disconnect has already freed - * the "priv" context. Do not use it anymore! - */ - priv = NULL; + if (err) { + dev_err(&intf->dev, "failed to initialize device (%d)\n", err); - if (parent) - device_unlock(parent); + usb_lock_device(udev); + usb_driver_release_interface(&p54u_driver, intf); + usb_unlock_device(udev); } - usb_put_dev(udev); + usb_put_intf(intf); } static int p54u_load_firmware(struct ieee80211_hw *dev, @@ -972,14 +970,14 @@ static int p54u_load_firmware(struct ieee80211_hw *dev, dev_info(&priv->udev->dev, "Loading firmware file %s\n", p54u_fwlist[i].fw); - usb_get_dev(udev); + usb_get_intf(intf); err = request_firmware_nowait(THIS_MODULE, 1, p54u_fwlist[i].fw, device, GFP_KERNEL, priv, p54u_load_firmware_cb); if (err) { dev_err(&priv->udev->dev, "(p54usb) cannot load firmware %s " "(%d)!\n", p54u_fwlist[i].fw, err); - usb_put_dev(udev); + usb_put_intf(intf); } return err; @@ -1011,8 +1009,6 @@ static int p54u_probe(struct usb_interface *intf, skb_queue_head_init(&priv->rx_queue); init_usb_anchor(&priv->submitted); - usb_get_dev(udev); - /* really lazy and simple way of figuring out if we're a 3887 */ /* TODO: should just stick the identification in the device table */ i = intf->altsetting->desc.bNumEndpoints; @@ -1053,10 +1049,8 @@ static int p54u_probe(struct usb_interface *intf, priv->upload_fw = p54u_upload_firmware_net2280; } err = p54u_load_firmware(dev, intf); - if (err) { - usb_put_dev(udev); + if (err) p54_free_common(dev); - } return err; } @@ -1072,7 +1066,6 @@ static void p54u_disconnect(struct usb_interface *intf) wait_for_completion(&priv->fw_wait_load); p54_unregister_common(dev); - usb_put_dev(interface_to_usbdev(intf)); release_firmware(priv->fw); p54_free_common(dev); } diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 07442ada6dd0e419bf4f29fabd3cb67186e47650..ce2dd06af62e8b987a027d7cb097b08a5147e258 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2889,6 +2889,10 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); + tasklet_hrtimer_init(&data->beacon_timer, + mac80211_hwsim_beacon, + CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + err = ieee80211_register_hw(hw); if (err < 0) { pr_debug("mac80211_hwsim: ieee80211_register_hw failed (%d)\n", @@ -2913,10 +2917,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, data->debugfs, data, &hwsim_simulate_radar); - tasklet_hrtimer_init(&data->beacon_timer, - mac80211_hwsim_beacon, - CLOCK_MONOTONIC, HRTIMER_MODE_ABS); - spin_lock_bh(&hwsim_radio_lock); err = rhashtable_insert_fast(&hwsim_radios_rht, &data->rht, hwsim_rht_params); @@ -3454,7 +3454,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info) goto out_err; } - genlmsg_reply(skb, info); + res = genlmsg_reply(skb, info); break; } @@ -3502,10 +3502,12 @@ static int hwsim_dump_radio_nl(struct sk_buff *skb, hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, &hwsim_genl_family, NLM_F_MULTI, HWSIM_CMD_GET_RADIO); - if (!hdr) + if (hdr) { + genl_dump_check_consistent(cb, hdr); + genlmsg_end(skb, hdr); + } else { res = -EMSGSIZE; - genl_dump_check_consistent(cb, hdr); - genlmsg_end(skb, hdr); + } } done: @@ -3712,16 +3714,16 @@ static int __init init_mac80211_hwsim(void) if (err) goto out_unregister_pernet; + err = hwsim_init_netlink(); + if (err) + goto out_unregister_driver; + hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim"); if (IS_ERR(hwsim_class)) { err = PTR_ERR(hwsim_class); - goto out_unregister_driver; + goto out_exit_netlink; } - err = hwsim_init_netlink(); - if (err < 0) - goto out_unregister_driver; - for (i = 0; i < radios; i++) { struct hwsim_new_radio_params param = { 0 }; @@ -3827,6 +3829,8 @@ static int __init init_mac80211_hwsim(void) free_netdev(hwsim_mon); out_free_radios: mac80211_hwsim_free(); +out_exit_netlink: + hwsim_exit_netlink(); out_unregister_driver: platform_driver_unregister(&mac80211_hwsim_driver); out_unregister_pernet: diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c index 57edfada0665fda7ec68b7f60128eef3af9a5219..c9401c121a14e6dfd11d41aca19fb8d4f25b706b 100644 --- a/drivers/net/wireless/marvell/libertas/cfg.c +++ b/drivers/net/wireless/marvell/libertas/cfg.c @@ -273,6 +273,10 @@ add_ie_rates(u8 *tlv, const u8 *ie, int *nrates) int hw, ap, ap_max = ie[1]; u8 hw_rate; + if (ap_max > MAX_RATES) { + lbs_deb_assoc("invalid rates\n"); + return tlv; + } /* Advance past IE header */ ie += 2; @@ -1717,6 +1721,9 @@ static int lbs_ibss_join_existing(struct lbs_private *priv, struct cmd_ds_802_11_ad_hoc_join cmd; u8 preamble = RADIO_PREAMBLE_SHORT; int ret = 0; + int hw, i; + u8 rates_max; + u8 *rates; /* TODO: set preamble based on scan result */ ret = lbs_set_radio(priv, preamble, 1); @@ -1775,9 +1782,12 @@ static int lbs_ibss_join_existing(struct lbs_private *priv, if (!rates_eid) { lbs_add_rates(cmd.bss.rates); } else { - int hw, i; - u8 rates_max = rates_eid[1]; - u8 *rates = cmd.bss.rates; + rates_max = rates_eid[1]; + if (rates_max > MAX_RATES) { + lbs_deb_join("invalid rates"); + goto out; + } + rates = cmd.bss.rates; for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) { u8 hw_rate = lbs_rates[hw].bitrate / 5; for (i = 0; i < rates_max; i++) { diff --git a/drivers/net/wireless/marvell/libertas/cmd.c b/drivers/net/wireless/marvell/libertas/cmd.c index c1f42291873783cb8968a6075c158f348bd97b04..d1984f03fdfca2c0e40a43d10fd485cf7ec38826 100644 --- a/drivers/net/wireless/marvell/libertas/cmd.c +++ b/drivers/net/wireless/marvell/libertas/cmd.c @@ -1132,7 +1132,7 @@ int lbs_allocate_cmd_buffer(struct lbs_private *priv) if (!cmdarray[i].cmdbuf) { lbs_deb_host("ALLOC_CMD_BUF: ptempvirtualaddr is NULL\n"); ret = -1; - goto done; + goto free_cmd_array; } } @@ -1140,8 +1140,17 @@ int lbs_allocate_cmd_buffer(struct lbs_private *priv) init_waitqueue_head(&cmdarray[i].cmdwait_q); lbs_cleanup_and_insert_cmd(priv, &cmdarray[i]); } - ret = 0; + return 0; +free_cmd_array: + for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) { + if (cmdarray[i].cmdbuf) { + kfree(cmdarray[i].cmdbuf); + cmdarray[i].cmdbuf = NULL; + } + } + kfree(priv->cmd_array); + priv->cmd_array = NULL; done: return ret; } diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c index 39bf85d0ade0ed077b5d85c657d72a7f30698c53..282e409da6894a29a3a600a3c406cfc06d76ee5d 100644 --- a/drivers/net/wireless/marvell/libertas/if_sdio.c +++ b/drivers/net/wireless/marvell/libertas/if_sdio.c @@ -1183,6 +1183,10 @@ static int if_sdio_probe(struct sdio_func *func, spin_lock_init(&card->lock); card->workqueue = alloc_workqueue("libertas_sdio", WQ_MEM_RECLAIM, 0); + if (!card->workqueue) { + ret = -ENOMEM; + goto free_before_queue; + } INIT_WORK(&card->packet_worker, if_sdio_host_to_card_worker); init_waitqueue_head(&card->pwron_waitq); @@ -1234,6 +1238,7 @@ static int if_sdio_probe(struct sdio_func *func, lbs_remove_card(priv); free: destroy_workqueue(card->workqueue); +free_before_queue: while (card->packets) { packet = card->packets; card->packets = card->packets->next; diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c index c67a8e7be31069febfb0b0d15c77b243fe3485dd..9e82ec12564bb6128471ca2618ffa56481da887b 100644 --- a/drivers/net/wireless/marvell/libertas/if_usb.c +++ b/drivers/net/wireless/marvell/libertas/if_usb.c @@ -49,7 +49,8 @@ static const struct lbs_fw_table fw_table[] = { { MODEL_8388, "libertas/usb8388_v5.bin", NULL }, { MODEL_8388, "libertas/usb8388.bin", NULL }, { MODEL_8388, "usb8388.bin", NULL }, - { MODEL_8682, "libertas/usb8682.bin", NULL } + { MODEL_8682, "libertas/usb8682.bin", NULL }, + { 0, NULL, NULL } }; static const struct usb_device_id if_usb_table[] = { @@ -456,8 +457,6 @@ static int __if_usb_submit_rx_urb(struct if_usb_card *cardp, MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn, cardp); - cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET; - lbs_deb_usb2(&cardp->udev->dev, "Pointer for rx_urb %p\n", cardp->rx_urb); if ((ret = usb_submit_urb(cardp->rx_urb, GFP_ATOMIC))) { lbs_deb_usbd(&cardp->udev->dev, "Submit Rx URB failed: %d\n", ret); diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c index e92fc5001171714e50bfb36bd195d3d8ef421ced..6ede6168bd85a2b98c1fb7b2246571b93f0ad2ce 100644 --- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c +++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c @@ -433,8 +433,6 @@ static int __if_usb_submit_rx_urb(struct if_usb_card *cardp, skb_tail_pointer(skb), MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn, cardp); - cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET; - lbtf_deb_usb2(&cardp->udev->dev, "Pointer for rx_urb %p\n", cardp->rx_urb); ret = usb_submit_urb(cardp->rx_urb, GFP_ATOMIC); @@ -605,9 +603,10 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff, { unsigned long flags; - if (recvlength > LBS_CMD_BUFFER_SIZE) { + if (recvlength < MESSAGE_HEADER_LEN || + recvlength > LBS_CMD_BUFFER_SIZE) { lbtf_deb_usbd(&cardp->udev->dev, - "The receive buffer is too large\n"); + "The receive buffer is invalid: %d\n", recvlength); kfree_skb(skb); return; } diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c index e2addd8b878b290bbcb6656f6baeedb258400964..5d75c971004b4e480737aa0fe13627afb1e58cb7 100644 --- a/drivers/net/wireless/marvell/mwifiex/11n.c +++ b/drivers/net/wireless/marvell/mwifiex/11n.c @@ -696,11 +696,10 @@ void mwifiex_11n_delba(struct mwifiex_private *priv, int tid) "Send delba to tid=%d, %pM\n", tid, rx_reor_tbl_ptr->ta); mwifiex_send_delba(priv, tid, rx_reor_tbl_ptr->ta, 0); - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, - flags); - return; + goto exit; } } +exit: spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); } diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c index 8e63d14c1e1c57b4a93d430f5174c245f6a69b08..5380fba652cc49ff2a2aef2528b35e674b8f1a4a 100644 --- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c +++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c @@ -103,8 +103,6 @@ static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload) * There could be holes in the buffer, which are skipped by the function. * Since the buffer is linear, the function uses rotation to simulate * circular buffer. - * - * The caller must hold rx_reorder_tbl_lock spinlock. */ static void mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv, @@ -113,21 +111,25 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv, { int pkt_to_send, i; void *rx_tmp_ptr; + unsigned long flags; pkt_to_send = (start_win > tbl->start_win) ? min((start_win - tbl->start_win), tbl->win_size) : tbl->win_size; for (i = 0; i < pkt_to_send; ++i) { + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); rx_tmp_ptr = NULL; if (tbl->rx_reorder_ptr[i]) { rx_tmp_ptr = tbl->rx_reorder_ptr[i]; tbl->rx_reorder_ptr[i] = NULL; } + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); if (rx_tmp_ptr) mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr); } + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); /* * We don't have a circular buffer, hence use rotation to simulate * circular buffer @@ -138,6 +140,7 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv, } tbl->start_win = start_win; + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); } /* @@ -147,8 +150,6 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv, * The start window is adjusted automatically when a hole is located. * Since the buffer is linear, the function uses rotation to simulate * circular buffer. - * - * The caller must hold rx_reorder_tbl_lock spinlock. */ static void mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv, @@ -156,15 +157,22 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv, { int i, j, xchg; void *rx_tmp_ptr; + unsigned long flags; for (i = 0; i < tbl->win_size; ++i) { - if (!tbl->rx_reorder_ptr[i]) + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); + if (!tbl->rx_reorder_ptr[i]) { + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, + flags); break; + } rx_tmp_ptr = tbl->rx_reorder_ptr[i]; tbl->rx_reorder_ptr[i] = NULL; + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr); } + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); /* * We don't have a circular buffer, hence use rotation to simulate * circular buffer @@ -177,6 +185,7 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv, } } tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1); + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); } /* @@ -184,8 +193,6 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv, * * The function stops the associated timer and dispatches all the * pending packets in the Rx reorder table before deletion. - * - * The caller must hold rx_reorder_tbl_lock spinlock. */ static void mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv, @@ -211,7 +218,11 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv, del_timer_sync(&tbl->timer_context.timer); tbl->timer_context.timer_is_set = false; + + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); list_del(&tbl->list); + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); + kfree(tbl->rx_reorder_ptr); kfree(tbl); @@ -224,17 +235,22 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv, /* * This function returns the pointer to an entry in Rx reordering * table which matches the given TA/TID pair. - * - * The caller must hold rx_reorder_tbl_lock spinlock. */ struct mwifiex_rx_reorder_tbl * mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta) { struct mwifiex_rx_reorder_tbl *tbl; + unsigned long flags; - list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) - if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); + list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) { + if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) { + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, + flags); return tbl; + } + } + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); return NULL; } @@ -251,9 +267,14 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta) return; spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); - list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) - if (!memcmp(tbl->ta, ta, ETH_ALEN)) + list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) { + if (!memcmp(tbl->ta, ta, ETH_ALEN)) { + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, + flags); mwifiex_del_rx_reorder_entry(priv, tbl); + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); + } + } spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); return; @@ -262,18 +283,24 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta) /* * This function finds the last sequence number used in the packets * buffered in Rx reordering table. - * - * The caller must hold rx_reorder_tbl_lock spinlock. */ static int mwifiex_11n_find_last_seq_num(struct reorder_tmr_cnxt *ctx) { struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr = ctx->ptr; + struct mwifiex_private *priv = ctx->priv; + unsigned long flags; int i; - for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i) - if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); + for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i) { + if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) { + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, + flags); return i; + } + } + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); return -1; } @@ -291,22 +318,17 @@ mwifiex_flush_data(struct timer_list *t) struct reorder_tmr_cnxt *ctx = from_timer(ctx, t, timer); int start_win, seq_num; - unsigned long flags; ctx->timer_is_set = false; - spin_lock_irqsave(&ctx->priv->rx_reorder_tbl_lock, flags); seq_num = mwifiex_11n_find_last_seq_num(ctx); - if (seq_num < 0) { - spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags); + if (seq_num < 0) return; - } mwifiex_dbg(ctx->priv->adapter, INFO, "info: flush data %d\n", seq_num); start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1); mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr, start_win); - spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags); } /* @@ -333,14 +355,11 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta, * If we get a TID, ta pair which is already present dispatch all the * the packets and move the window size until the ssn */ - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta); if (tbl) { mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, seq_num); - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); return; } - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); /* if !tbl then create one */ new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL); if (!new_node) @@ -551,20 +570,16 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv, int prev_start_win, start_win, end_win, win_size; u16 pkt_index; bool init_window_shift = false; - unsigned long flags; int ret = 0; - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta); if (!tbl) { - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); if (pkt_type != PKT_TYPE_BAR) mwifiex_11n_dispatch_pkt(priv, payload); return ret; } if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) { - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); mwifiex_11n_dispatch_pkt(priv, payload); return ret; } @@ -651,8 +666,6 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv, if (!tbl->timer_context.timer_is_set || prev_start_win != tbl->start_win) mwifiex_11n_rxreorder_timer_restart(tbl); - - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); return ret; } @@ -681,18 +694,14 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac, peer_mac, tid, initiator); if (cleanup_rx_reorder_tbl) { - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, peer_mac); if (!tbl) { - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, - flags); mwifiex_dbg(priv->adapter, EVENT, "event: TID, TA not found in table\n"); return; } mwifiex_del_rx_reorder_entry(priv, tbl); - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); } else { ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac); if (!ptx_tbl) { @@ -726,7 +735,6 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv, int tid, win_size; struct mwifiex_rx_reorder_tbl *tbl; uint16_t block_ack_param_set; - unsigned long flags; block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set); @@ -740,20 +748,17 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv, mwifiex_dbg(priv->adapter, ERROR, "ADDBA RSP: failed %pM tid=%d)\n", add_ba_rsp->peer_mac_addr, tid); - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, add_ba_rsp->peer_mac_addr); if (tbl) mwifiex_del_rx_reorder_entry(priv, tbl); - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); return 0; } win_size = (block_ack_param_set & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> BLOCKACKPARAM_WINSIZE_POS; - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, add_ba_rsp->peer_mac_addr); if (tbl) { @@ -764,7 +769,6 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv, else tbl->amsdu = false; } - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); mwifiex_dbg(priv->adapter, CMD, "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n", @@ -804,8 +808,11 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv) spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); list_for_each_entry_safe(del_tbl_ptr, tmp_node, - &priv->rx_reorder_tbl_ptr, list) + &priv->rx_reorder_tbl_ptr, list) { + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr); + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); + } INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr); spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); @@ -929,7 +936,6 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv, int tlv_buf_left = len; int ret; u8 *tmp; - unsigned long flags; mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:", event_buf, len); @@ -949,18 +955,14 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv, tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num, tlv_bitmap_len); - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); rx_reor_tbl_ptr = mwifiex_11n_get_rx_reorder_tbl(priv, tlv_rxba->tid, tlv_rxba->mac); if (!rx_reor_tbl_ptr) { - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, - flags); mwifiex_dbg(priv->adapter, ERROR, "Can not find rx_reorder_tbl!"); return; } - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); for (i = 0; i < tlv_bitmap_len; i++) { for (j = 0 ; j < 8; j++) { diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index adc88433faa85a006b558c9f0e359327284c8854..7b74ef71bef1d9f09ca8515146038e246da3fa8b 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -376,11 +376,20 @@ mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy, struct mwifiex_power_cfg power_cfg; int dbm = MBM_TO_DBM(mbm); - if (type == NL80211_TX_POWER_FIXED) { + switch (type) { + case NL80211_TX_POWER_FIXED: power_cfg.is_power_auto = 0; + power_cfg.is_power_fixed = 1; power_cfg.power_level = dbm; - } else { + break; + case NL80211_TX_POWER_LIMITED: + power_cfg.is_power_auto = 0; + power_cfg.is_power_fixed = 0; + power_cfg.power_level = dbm; + break; + case NL80211_TX_POWER_AUTOMATIC: power_cfg.is_power_auto = 1; + break; } priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); @@ -4045,16 +4054,20 @@ static int mwifiex_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev, if (mwifiex_send_cmd(priv, 0, 0, 0, hostcmd, true)) { dev_err(priv->adapter->dev, "Failed to process hostcmd\n"); + kfree(hostcmd); return -EFAULT; } /* process hostcmd response*/ skb = cfg80211_testmode_alloc_reply_skb(wiphy, hostcmd->len); - if (!skb) + if (!skb) { + kfree(hostcmd); return -ENOMEM; + } err = nla_put(skb, MWIFIEX_TM_ATTR_DATA, hostcmd->len, hostcmd->cmd); if (err) { + kfree(hostcmd); kfree_skb(skb); return -EMSGSIZE; } @@ -4282,11 +4295,13 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) wiphy->mgmt_stypes = mwifiex_mgmt_stypes; wiphy->max_remain_on_channel_duration = 5000; wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_AP); + if (ISSUPP_ADHOC_ENABLED(adapter->fw_cap_info)) + wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); + wiphy->bands[NL80211_BAND_2GHZ] = &mwifiex_band_2ghz; if (adapter->config_bands & BAND_A) wiphy->bands[NL80211_BAND_5GHZ] = &mwifiex_band_5ghz; @@ -4346,11 +4361,13 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) wiphy->available_antennas_tx = BIT(adapter->number_of_antenna) - 1; wiphy->available_antennas_rx = BIT(adapter->number_of_antenna) - 1; - wiphy->features |= NL80211_FEATURE_HT_IBSS | - NL80211_FEATURE_INACTIVITY_TIMER | + wiphy->features |= NL80211_FEATURE_INACTIVITY_TIMER | NL80211_FEATURE_LOW_PRIORITY_SCAN | NL80211_FEATURE_NEED_OBSS_SCAN; + if (ISSUPP_ADHOC_ENABLED(adapter->fw_cap_info)) + wiphy->features |= NL80211_FEATURE_HT_IBSS; + if (ISSUPP_RANDOM_MAC(adapter->fw_cap_info)) wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR | NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR | diff --git a/drivers/net/wireless/marvell/mwifiex/cfp.c b/drivers/net/wireless/marvell/mwifiex/cfp.c index bfe84e55df7762349d65657cb5148457500d43cc..f1522fb1c1e878ae385a04648bae9737f698e1f6 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfp.c +++ b/drivers/net/wireless/marvell/mwifiex/cfp.c @@ -531,5 +531,8 @@ u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv, rate_index = (rx_rate > MWIFIEX_RATE_INDEX_OFDM0) ? rx_rate - 1 : rx_rate; + if (rate_index >= MWIFIEX_MAX_AC_RX_RATES) + rate_index = MWIFIEX_MAX_AC_RX_RATES - 1; + return rate_index; } diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c index cce70252fd96b5b57791f89f5ca902ce5b00bb0b..cbe4493b32664cba530fc97d744e560dc1934224 100644 --- a/drivers/net/wireless/marvell/mwifiex/debugfs.c +++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c @@ -273,15 +273,13 @@ mwifiex_histogram_read(struct file *file, char __user *ubuf, "total samples = %d\n", atomic_read(&phist_data->num_samples)); - p += sprintf(p, "rx rates (in Mbps): 0=1M 1=2M"); - p += sprintf(p, "2=5.5M 3=11M 4=6M 5=9M 6=12M\n"); - p += sprintf(p, "7=18M 8=24M 9=36M 10=48M 11=54M"); - p += sprintf(p, "12-27=MCS0-15(BW20) 28-43=MCS0-15(BW40)\n"); + p += sprintf(p, + "rx rates (in Mbps): 0=1M 1=2M 2=5.5M 3=11M 4=6M 5=9M 6=12M\n" + "7=18M 8=24M 9=36M 10=48M 11=54M 12-27=MCS0-15(BW20) 28-43=MCS0-15(BW40)\n"); if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info)) { - p += sprintf(p, "44-53=MCS0-9(VHT:BW20)"); - p += sprintf(p, "54-63=MCS0-9(VHT:BW40)"); - p += sprintf(p, "64-73=MCS0-9(VHT:BW80)\n\n"); + p += sprintf(p, + "44-53=MCS0-9(VHT:BW20) 54-63=MCS0-9(VHT:BW40) 64-73=MCS0-9(VHT:BW80)\n\n"); } else { p += sprintf(p, "\n"); } @@ -310,7 +308,7 @@ mwifiex_histogram_read(struct file *file, char __user *ubuf, for (i = 0; i < MWIFIEX_MAX_NOISE_FLR; i++) { value = atomic_read(&phist_data->noise_flr[i]); if (value) - p += sprintf(p, "noise_flr[-%02ddBm] = %d\n", + p += sprintf(p, "noise_flr[%02ddBm] = %d\n", (int)(i-128), value); } for (i = 0; i < MWIFIEX_MAX_SIG_STRENGTH; i++) { diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index b73f99dc5a72e0a78478d6613c6a9b2af93a5906..4842942c550e0f15c252e1f922897c3ce2a0f5c3 100644 --- a/drivers/net/wireless/marvell/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -852,7 +852,7 @@ struct mwifiex_ietypes_chanstats { struct mwifiex_ie_types_wildcard_ssid_params { struct mwifiex_ie_types_header header; u8 max_ssid_length; - u8 ssid[1]; + u8 ssid[]; } __packed; #define TSF_DATA_SIZE 8 @@ -1759,9 +1759,10 @@ struct mwifiex_ie_types_wmm_queue_status { struct ieee_types_vendor_header { u8 element_id; u8 len; - u8 oui[4]; /* 0~2: oui, 3: oui_type */ - u8 oui_subtype; - u8 version; + struct { + u8 oui[3]; + u8 oui_type; + } __packed oui; } __packed; struct ieee_types_wmm_parameter { @@ -1775,6 +1776,9 @@ struct ieee_types_wmm_parameter { * Version [1] */ struct ieee_types_vendor_header vend_hdr; + u8 oui_subtype; + u8 version; + u8 qos_info_bitmap; u8 reserved; struct ieee_types_wmm_ac_parameters ac_params[IEEE80211_NUM_ACS]; @@ -1792,6 +1796,8 @@ struct ieee_types_wmm_info { * Version [1] */ struct ieee_types_vendor_header vend_hdr; + u8 oui_subtype; + u8 version; u8 qos_info_bitmap; } __packed; diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c index 75cbd609d60619c9429093083cb8b3d477c26e41..a3f4a5e92105df46d99ccce441d23ddef87f2ba4 100644 --- a/drivers/net/wireless/marvell/mwifiex/ie.c +++ b/drivers/net/wireless/marvell/mwifiex/ie.c @@ -241,6 +241,9 @@ static int mwifiex_update_vs_ie(const u8 *ies, int ies_len, } vs_ie = (struct ieee_types_header *)vendor_ie; + if (le16_to_cpu(ie->ie_length) + vs_ie->len + 2 > + IEEE_MAX_IE_SIZE) + return -EINVAL; memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length), vs_ie, vs_ie->len + 2); le16_unaligned_add_cpu(&ie->ie_length, vs_ie->len + 2); @@ -329,6 +332,8 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv, struct ieee80211_vendor_ie *vendorhdr; u16 gen_idx = MWIFIEX_AUTO_IDX_MASK, ie_len = 0; int left_len, parsed_len = 0; + unsigned int token_len; + int err = 0; if (!info->tail || !info->tail_len) return 0; @@ -344,6 +349,12 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv, */ while (left_len > sizeof(struct ieee_types_header)) { hdr = (void *)(info->tail + parsed_len); + token_len = hdr->len + sizeof(struct ieee_types_header); + if (token_len > left_len) { + err = -EINVAL; + goto out; + } + switch (hdr->element_id) { case WLAN_EID_SSID: case WLAN_EID_SUPP_RATES: @@ -361,16 +372,19 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv, if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WMM, (const u8 *)hdr, - hdr->len + sizeof(struct ieee_types_header))) + token_len)) break; default: - memcpy(gen_ie->ie_buffer + ie_len, hdr, - hdr->len + sizeof(struct ieee_types_header)); - ie_len += hdr->len + sizeof(struct ieee_types_header); + if (ie_len + token_len > IEEE_MAX_IE_SIZE) { + err = -EINVAL; + goto out; + } + memcpy(gen_ie->ie_buffer + ie_len, hdr, token_len); + ie_len += token_len; break; } - left_len -= hdr->len + sizeof(struct ieee_types_header); - parsed_len += hdr->len + sizeof(struct ieee_types_header); + left_len -= token_len; + parsed_len += token_len; } /* parse only WPA vendor IE from tail, WMM IE is configured by @@ -380,15 +394,17 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv, WLAN_OUI_TYPE_MICROSOFT_WPA, info->tail, info->tail_len); if (vendorhdr) { - memcpy(gen_ie->ie_buffer + ie_len, vendorhdr, - vendorhdr->len + sizeof(struct ieee_types_header)); - ie_len += vendorhdr->len + sizeof(struct ieee_types_header); + token_len = vendorhdr->len + sizeof(struct ieee_types_header); + if (ie_len + token_len > IEEE_MAX_IE_SIZE) { + err = -EINVAL; + goto out; + } + memcpy(gen_ie->ie_buffer + ie_len, vendorhdr, token_len); + ie_len += token_len; } - if (!ie_len) { - kfree(gen_ie); - return 0; - } + if (!ie_len) + goto out; gen_ie->ie_index = cpu_to_le16(gen_idx); gen_ie->mgmt_subtype_mask = cpu_to_le16(MGMT_MASK_BEACON | @@ -398,13 +414,15 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv, if (mwifiex_update_uap_custom_ie(priv, gen_ie, &gen_idx, NULL, NULL, NULL, NULL)) { - kfree(gen_ie); - return -1; + err = -EINVAL; + goto out; } priv->gen_idx = gen_idx; + + out: kfree(gen_ie); - return 0; + return err; } /* This function parses different IEs-head & tail IEs, beacon IEs, diff --git a/drivers/net/wireless/marvell/mwifiex/ioctl.h b/drivers/net/wireless/marvell/mwifiex/ioctl.h index 48e154e1865df321e12f4f160d51085d599d8721..0dd592ea6e8332a26829483613c047ad5d77f5aa 100644 --- a/drivers/net/wireless/marvell/mwifiex/ioctl.h +++ b/drivers/net/wireless/marvell/mwifiex/ioctl.h @@ -267,6 +267,7 @@ struct mwifiex_ds_encrypt_key { struct mwifiex_power_cfg { u32 is_power_auto; + u32 is_power_fixed; u32 power_level; }; diff --git a/drivers/net/wireless/marvell/mwifiex/join.c b/drivers/net/wireless/marvell/mwifiex/join.c index d87aeff70cefb265c07131335a0bed6792794ff1..c2cb1e711c06ec3dabf0dafeaa71fdd09e54bf92 100644 --- a/drivers/net/wireless/marvell/mwifiex/join.c +++ b/drivers/net/wireless/marvell/mwifiex/join.c @@ -877,6 +877,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv, memset(adhoc_start->ssid, 0, IEEE80211_MAX_SSID_LEN); + if (req_ssid->ssid_len > IEEE80211_MAX_SSID_LEN) + req_ssid->ssid_len = IEEE80211_MAX_SSID_LEN; memcpy(adhoc_start->ssid, req_ssid->ssid, req_ssid->ssid_len); mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: SSID = %s\n", diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 20cee5c397fb6f4d72dc6378401ee9bcf2af6708..e48b47f425540a7319d7d64ce3360b5bbaca807b 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -960,10 +960,10 @@ int mwifiex_set_mac_address(struct mwifiex_private *priv, mac_addr = old_mac_addr; - if (priv->bss_type == MWIFIEX_BSS_TYPE_P2P) + if (priv->bss_type == MWIFIEX_BSS_TYPE_P2P) { mac_addr |= BIT_ULL(MWIFIEX_MAC_LOCAL_ADMIN_BIT); - - if (mwifiex_get_intf_num(priv->adapter, priv->bss_type) > 1) { + mac_addr += priv->bss_num; + } else if (priv->adapter->priv[0] != priv) { /* Set mac address based on bss_type/bss_num */ mac_addr ^= BIT_ULL(priv->bss_type + 8); mac_addr += priv->bss_num; diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index b025ba164412813f6b675b9f096969898c940af2..3643b0372f16f2f8fb3657f6d70e6640c6a0a590 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -124,6 +124,7 @@ enum { #define MWIFIEX_MAX_TOTAL_SCAN_TIME (MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S) +#define WPA_GTK_OUI_OFFSET 2 #define RSN_GTK_OUI_OFFSET 2 #define MWIFIEX_OUI_NOT_PRESENT 0 @@ -1318,6 +1319,9 @@ mwifiex_get_priv_by_id(struct mwifiex_adapter *adapter, for (i = 0; i < adapter->priv_num; i++) { if (adapter->priv[i]) { + if (adapter->priv[i]->bss_mode == NL80211_IFTYPE_UNSPECIFIED) + continue; + if ((adapter->priv[i]->bss_num == bss_num) && (adapter->priv[i]->bss_type == bss_type)) break; diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 3fe81b2a929ac13797d976015adff288683f3566..991b9cc18000636edd0c7d3c335011c8070f0767 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -691,8 +691,11 @@ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter) skb_put(skb, MAX_EVENT_SIZE); if (mwifiex_map_pci_memory(adapter, skb, MAX_EVENT_SIZE, - PCI_DMA_FROMDEVICE)) + PCI_DMA_FROMDEVICE)) { + kfree_skb(skb); + kfree(card->evtbd_ring_vbase); return -1; + } buf_pa = MWIFIEX_SKB_DMA_ADDR(skb); @@ -1033,8 +1036,10 @@ static int mwifiex_pcie_alloc_cmdrsp_buf(struct mwifiex_adapter *adapter) } skb_put(skb, MWIFIEX_UPLD_SIZE); if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE, - PCI_DMA_FROMDEVICE)) + PCI_DMA_FROMDEVICE)) { + kfree_skb(skb); return -1; + } card->cmdrsp_buf = skb; diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index 8e483b0bc3b173e218057adf4952be3cc38ce79d..85d6d5f3dce5b3fa0169900ec6e1f311dc1ff3e7 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -181,7 +181,8 @@ mwifiex_is_wpa_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher) u8 ret = MWIFIEX_OUI_NOT_PRESENT; if (has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC)) { - iebody = (struct ie_body *) bss_desc->bcn_wpa_ie->data; + iebody = (struct ie_body *)((u8 *)bss_desc->bcn_wpa_ie->data + + WPA_GTK_OUI_OFFSET); oui = &mwifiex_wpa_oui[cipher][0]; ret = mwifiex_search_oui_in_ie(iebody, oui); if (ret) @@ -1247,6 +1248,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, } switch (element_id) { case WLAN_EID_SSID: + if (element_len > IEEE80211_MAX_SSID_LEN) + return -EINVAL; bss_entry->ssid.ssid_len = element_len; memcpy(bss_entry->ssid.ssid, (current_ptr + 2), element_len); @@ -1256,6 +1259,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, break; case WLAN_EID_SUPP_RATES: + if (element_len > MWIFIEX_SUPPORTED_RATES) + return -EINVAL; memcpy(bss_entry->data_rates, current_ptr + 2, element_len); memcpy(bss_entry->supported_rates, current_ptr + 2, @@ -1265,6 +1270,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, break; case WLAN_EID_FH_PARAMS: + if (element_len + 2 < sizeof(*fh_param_set)) + return -EINVAL; fh_param_set = (struct ieee_types_fh_param_set *) current_ptr; memcpy(&bss_entry->phy_param_set.fh_param_set, @@ -1273,6 +1280,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, break; case WLAN_EID_DS_PARAMS: + if (element_len + 2 < sizeof(*ds_param_set)) + return -EINVAL; ds_param_set = (struct ieee_types_ds_param_set *) current_ptr; @@ -1284,6 +1293,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, break; case WLAN_EID_CF_PARAMS: + if (element_len + 2 < sizeof(*cf_param_set)) + return -EINVAL; cf_param_set = (struct ieee_types_cf_param_set *) current_ptr; memcpy(&bss_entry->ss_param_set.cf_param_set, @@ -1292,6 +1303,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, break; case WLAN_EID_IBSS_PARAMS: + if (element_len + 2 < sizeof(*ibss_param_set)) + return -EINVAL; ibss_param_set = (struct ieee_types_ibss_param_set *) current_ptr; @@ -1301,10 +1314,14 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, break; case WLAN_EID_ERP_INFO: + if (!element_len) + return -EINVAL; bss_entry->erp_flags = *(current_ptr + 2); break; case WLAN_EID_PWR_CONSTRAINT: + if (!element_len) + return -EINVAL; bss_entry->local_constraint = *(current_ptr + 2); bss_entry->sensed_11h = true; break; @@ -1348,15 +1365,22 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, vendor_ie = (struct ieee_types_vendor_specific *) current_ptr; - if (!memcmp - (vendor_ie->vend_hdr.oui, wpa_oui, - sizeof(wpa_oui))) { + /* 802.11 requires at least 3-byte OUI. */ + if (element_len < sizeof(vendor_ie->vend_hdr.oui.oui)) + return -EINVAL; + + /* Not long enough for a match? Skip it. */ + if (element_len < sizeof(wpa_oui)) + break; + + if (!memcmp(&vendor_ie->vend_hdr.oui, wpa_oui, + sizeof(wpa_oui))) { bss_entry->bcn_wpa_ie = (struct ieee_types_vendor_specific *) current_ptr; bss_entry->wpa_offset = (u16) (current_ptr - bss_entry->beacon_buf); - } else if (!memcmp(vendor_ie->vend_hdr.oui, wmm_oui, + } else if (!memcmp(&vendor_ie->vend_hdr.oui, wmm_oui, sizeof(wmm_oui))) { if (total_ie_len == sizeof(struct ieee_types_wmm_parameter) || @@ -1882,15 +1906,17 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info, ETH_ALEN)) mwifiex_update_curr_bss_params(priv, bss); - cfg80211_put_bss(priv->wdev.wiphy, bss); - } - if ((chan->flags & IEEE80211_CHAN_RADAR) || - (chan->flags & IEEE80211_CHAN_NO_IR)) { - mwifiex_dbg(adapter, INFO, - "radar or passive channel %d\n", - channel); - mwifiex_save_hidden_ssid_channels(priv, bss); + if ((chan->flags & IEEE80211_CHAN_RADAR) || + (chan->flags & IEEE80211_CHAN_NO_IR)) { + mwifiex_dbg(adapter, INFO, + "radar or passive channel %d\n", + channel); + mwifiex_save_hidden_ssid_channels(priv, + bss); + } + + cfg80211_put_bss(priv->wdev.wiphy, bss); } } } else { @@ -2868,6 +2894,13 @@ mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv, vs_param_set->header.len = cpu_to_le16((((u16) priv->vs_ie[id].ie[1]) & 0x00FF) + 2); + if (le16_to_cpu(vs_param_set->header.len) > + MWIFIEX_MAX_VSIE_LEN) { + mwifiex_dbg(priv->adapter, ERROR, + "Invalid param length!\n"); + break; + } + memcpy(vs_param_set->ie, priv->vs_ie[id].ie, le16_to_cpu(vs_param_set->header.len)); *buffer += le16_to_cpu(vs_param_set->header.len) + diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index d49fbd58afa7fefa7a740a5efe98893fa82edb22..bfbe3aa058d93c398ea893f39c9f157de9725912 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -181,7 +181,7 @@ static int mwifiex_sdio_resume(struct device *dev) adapter = card->adapter; - if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) { + if (!test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) { mwifiex_dbg(adapter, WARN, "device already resumed\n"); return 0; diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c index b454b5f855034be5667985b4037aeb86ff68dea8..6dd835f1efc21ae0d5145056a67244be4e0e6de9 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c @@ -229,6 +229,14 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv, "11D: skip setting domain info in FW\n"); return 0; } + + if (country_ie_len > + (IEEE80211_COUNTRY_STRING_LEN + MWIFIEX_MAX_TRIPLET_802_11D)) { + mwifiex_dbg(priv->adapter, ERROR, + "11D: country_ie_len overflow!, deauth AP\n"); + return -EINVAL; + } + memcpy(priv->adapter->country_code, &country_ie[2], 2); domain_info->country_code[0] = country_ie[2]; @@ -272,8 +280,9 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, priv->scan_block = false; if (bss) { - if (adapter->region_code == 0x00) - mwifiex_process_country_ie(priv, bss); + if (adapter->region_code == 0x00 && + mwifiex_process_country_ie(priv, bss)) + return -EINVAL; /* Allocate and fill new bss descriptor */ bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor), @@ -688,6 +697,9 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv, txp_cfg = (struct host_cmd_ds_txpwr_cfg *) buf; txp_cfg->action = cpu_to_le16(HostCmd_ACT_GEN_SET); if (!power_cfg->is_power_auto) { + u16 dbm_min = power_cfg->is_power_fixed ? + dbm : priv->min_tx_power_level; + txp_cfg->mode = cpu_to_le32(1); pg_tlv = (struct mwifiex_types_power_group *) (buf + sizeof(struct host_cmd_ds_txpwr_cfg)); @@ -702,7 +714,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv, pg->last_rate_code = 0x03; pg->modulation_class = MOD_CLASS_HR_DSSS; pg->power_step = 0; - pg->power_min = (s8) dbm; + pg->power_min = (s8) dbm_min; pg->power_max = (s8) dbm; pg++; /* Power group for modulation class OFDM */ @@ -710,7 +722,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv, pg->last_rate_code = 0x07; pg->modulation_class = MOD_CLASS_OFDM; pg->power_step = 0; - pg->power_min = (s8) dbm; + pg->power_min = (s8) dbm_min; pg->power_max = (s8) dbm; pg++; /* Power group for modulation class HTBW20 */ @@ -718,7 +730,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv, pg->last_rate_code = 0x20; pg->modulation_class = MOD_CLASS_HT; pg->power_step = 0; - pg->power_min = (s8) dbm; + pg->power_min = (s8) dbm_min; pg->power_max = (s8) dbm; pg->ht_bandwidth = HT_BW_20; pg++; @@ -727,7 +739,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv, pg->last_rate_code = 0x20; pg->modulation_class = MOD_CLASS_HT; pg->power_step = 0; - pg->power_min = (s8) dbm; + pg->power_min = (s8) dbm_min; pg->power_max = (s8) dbm; pg->ht_bandwidth = HT_BW_40; } @@ -1348,7 +1360,7 @@ mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr, /* Test to see if it is a WPA IE, if not, then * it is a gen IE */ - if (!memcmp(pvendor_ie->oui, wpa_oui, + if (!memcmp(&pvendor_ie->oui, wpa_oui, sizeof(wpa_oui))) { /* IE is a WPA/WPA2 IE so call set_wpa function */ @@ -1358,7 +1370,7 @@ mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr, goto next_ie; } - if (!memcmp(pvendor_ie->oui, wps_oui, + if (!memcmp(&pvendor_ie->oui, wps_oui, sizeof(wps_oui))) { /* Test to see if it is a WPS IE, * if so, enable wps session flag diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c index 27779d7317fd05d461785019a79daa3849c48400..6058c48d56dc6f901d346a4af35cac4f6f3f989e 100644 --- a/drivers/net/wireless/marvell/mwifiex/tdls.c +++ b/drivers/net/wireless/marvell/mwifiex/tdls.c @@ -956,59 +956,117 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv, switch (*pos) { case WLAN_EID_SUPP_RATES: + if (pos[1] > 32) + return; sta_ptr->tdls_cap.rates_len = pos[1]; for (i = 0; i < pos[1]; i++) sta_ptr->tdls_cap.rates[i] = pos[i + 2]; break; case WLAN_EID_EXT_SUPP_RATES: + if (pos[1] > 32) + return; basic = sta_ptr->tdls_cap.rates_len; + if (pos[1] > 32 - basic) + return; for (i = 0; i < pos[1]; i++) sta_ptr->tdls_cap.rates[basic + i] = pos[i + 2]; sta_ptr->tdls_cap.rates_len += pos[1]; break; case WLAN_EID_HT_CAPABILITY: - memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos, + if (pos > end - sizeof(struct ieee80211_ht_cap) - 2) + return; + if (pos[1] != sizeof(struct ieee80211_ht_cap)) + return; + /* copy the ie's value into ht_capb*/ + memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos + 2, sizeof(struct ieee80211_ht_cap)); sta_ptr->is_11n_enabled = 1; break; case WLAN_EID_HT_OPERATION: - memcpy(&sta_ptr->tdls_cap.ht_oper, pos, + if (pos > end - + sizeof(struct ieee80211_ht_operation) - 2) + return; + if (pos[1] != sizeof(struct ieee80211_ht_operation)) + return; + /* copy the ie's value into ht_oper*/ + memcpy(&sta_ptr->tdls_cap.ht_oper, pos + 2, sizeof(struct ieee80211_ht_operation)); break; case WLAN_EID_BSS_COEX_2040: + if (pos > end - 3) + return; + if (pos[1] != 1) + return; sta_ptr->tdls_cap.coex_2040 = pos[2]; break; case WLAN_EID_EXT_CAPABILITY: + if (pos > end - sizeof(struct ieee_types_header)) + return; + if (pos[1] < sizeof(struct ieee_types_header)) + return; + if (pos[1] > 8) + return; memcpy((u8 *)&sta_ptr->tdls_cap.extcap, pos, sizeof(struct ieee_types_header) + min_t(u8, pos[1], 8)); break; case WLAN_EID_RSN: + if (pos > end - sizeof(struct ieee_types_header)) + return; + if (pos[1] < sizeof(struct ieee_types_header)) + return; + if (pos[1] > IEEE_MAX_IE_SIZE - + sizeof(struct ieee_types_header)) + return; memcpy((u8 *)&sta_ptr->tdls_cap.rsn_ie, pos, sizeof(struct ieee_types_header) + min_t(u8, pos[1], IEEE_MAX_IE_SIZE - sizeof(struct ieee_types_header))); break; case WLAN_EID_QOS_CAPA: + if (pos > end - 3) + return; + if (pos[1] != 1) + return; sta_ptr->tdls_cap.qos_info = pos[2]; break; case WLAN_EID_VHT_OPERATION: - if (priv->adapter->is_hw_11ac_capable) - memcpy(&sta_ptr->tdls_cap.vhtoper, pos, + if (priv->adapter->is_hw_11ac_capable) { + if (pos > end - + sizeof(struct ieee80211_vht_operation) - 2) + return; + if (pos[1] != + sizeof(struct ieee80211_vht_operation)) + return; + /* copy the ie's value into vhtoper*/ + memcpy(&sta_ptr->tdls_cap.vhtoper, pos + 2, sizeof(struct ieee80211_vht_operation)); + } break; case WLAN_EID_VHT_CAPABILITY: if (priv->adapter->is_hw_11ac_capable) { - memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos, + if (pos > end - + sizeof(struct ieee80211_vht_cap) - 2) + return; + if (pos[1] != sizeof(struct ieee80211_vht_cap)) + return; + /* copy the ie's value into vhtcap*/ + memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos + 2, sizeof(struct ieee80211_vht_cap)); sta_ptr->is_11ac_enabled = 1; } break; case WLAN_EID_AID: - if (priv->adapter->is_hw_11ac_capable) + if (priv->adapter->is_hw_11ac_capable) { + if (pos > end - 4) + return; + if (pos[1] != 2) + return; sta_ptr->tdls_cap.aid = get_unaligned_le16((pos + 2)); + } + break; default: break; } diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c index 18f7d9bf30b28edc19acef376675b15d726bcc00..0939a8c8f3ab5cf74a55ae08c3feadc1c486f3ca 100644 --- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c @@ -265,6 +265,8 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg, rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len); if (rate_ie) { + if (rate_ie->len > MWIFIEX_SUPPORTED_RATES) + return; memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len); rate_len = rate_ie->len; } @@ -272,8 +274,11 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg, rate_ie = (void *)cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES, params->beacon.tail, params->beacon.tail_len); - if (rate_ie) + if (rate_ie) { + if (rate_ie->len > MWIFIEX_SUPPORTED_RATES - rate_len) + return; memcpy(bss_cfg->rates + rate_len, rate_ie + 1, rate_ie->len); + } return; } @@ -391,6 +396,8 @@ mwifiex_set_wmm_params(struct mwifiex_private *priv, params->beacon.tail_len); if (vendor_ie) { wmm_ie = vendor_ie; + if (*(wmm_ie + 1) > sizeof(struct mwifiex_types_wmm_info)) + return; memcpy(&bss_cfg->wmm_info, wmm_ie + sizeof(struct ieee_types_header), *(wmm_ie + 1)); priv->wmm_enabled = 1; diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c index a83c5afc256abcb9f3eca164dd84ce323e3b0eaf..5ce85d5727e4b882ebc37372f03bb49003d1a0c9 100644 --- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c @@ -421,15 +421,12 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv, spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); } - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); if (!priv->ap_11n_enabled || (!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) && (le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) { ret = mwifiex_handle_uap_rx_forward(priv, skb); - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); return ret; } - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); /* Reorder and send to kernel */ pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type); diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c index 433c6a16870b6b5ef92b782dd13a3cf95f7a520e..22a42eb0cc0ac84dc4ca51304e953edd051d1287 100644 --- a/drivers/net/wireless/marvell/mwifiex/usb.c +++ b/drivers/net/wireless/marvell/mwifiex/usb.c @@ -130,7 +130,8 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter, default: mwifiex_dbg(adapter, ERROR, "unknown recv_type %#x\n", recv_type); - return -1; + ret = -1; + goto exit_restore_skb; } break; case MWIFIEX_USB_EP_DATA: @@ -298,6 +299,19 @@ static int mwifiex_usb_submit_rx_urb(struct urb_context *ctx, int size) struct mwifiex_adapter *adapter = ctx->adapter; struct usb_card_rec *card = (struct usb_card_rec *)adapter->card; + if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) { + if (card->rx_cmd_ep == ctx->ep) { + mwifiex_dbg(adapter, INFO, "%s: free rx_cmd skb\n", + __func__); + dev_kfree_skb_any(ctx->skb); + ctx->skb = NULL; + } + mwifiex_dbg(adapter, ERROR, + "%s: card removed/suspended, EP %d rx_cmd URB submit skipped\n", + __func__, ctx->ep); + return -1; + } + if (card->rx_cmd_ep != ctx->ep) { ctx->skb = dev_alloc_skb(size); if (!ctx->skb) { diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c index 407b9932ca4d7a2295de0f8216b053690dcc746f..429ea2752e6aa6cda8f174615e357c3309a60ec8 100644 --- a/drivers/net/wireless/marvell/mwifiex/wmm.c +++ b/drivers/net/wireless/marvell/mwifiex/wmm.c @@ -240,7 +240,7 @@ mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv, mwifiex_dbg(priv->adapter, INFO, "info: WMM Parameter IE: version=%d,\t" "qos_info Parameter Set Count=%d, Reserved=%#x\n", - wmm_ie->vend_hdr.version, wmm_ie->qos_info_bitmap & + wmm_ie->version, wmm_ie->qos_info_bitmap & IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK, wmm_ie->reserved); @@ -977,6 +977,10 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv, "WMM Parameter Set Count: %d\n", wmm_param_ie->qos_info_bitmap & mask); + if (wmm_param_ie->vend_hdr.len + 2 > + sizeof(struct ieee_types_wmm_parameter)) + break; + memcpy((u8 *) &priv->curr_bss_params.bss_descriptor. wmm_ie, wmm_param_ie, wmm_param_ie->vend_hdr.len + 2); diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c index 8e4e9b6919e0274cbdbc3a2cfde72b2e40c8fb6f..ffc565ac21924b590481df8c88eefe422de4e463 100644 --- a/drivers/net/wireless/marvell/mwl8k.c +++ b/drivers/net/wireless/marvell/mwl8k.c @@ -441,6 +441,9 @@ static const struct ieee80211_rate mwl8k_rates_50[] = { #define MWL8K_CMD_UPDATE_STADB 0x1123 #define MWL8K_CMD_BASTREAM 0x1125 +#define MWL8K_LEGACY_5G_RATE_OFFSET \ + (ARRAY_SIZE(mwl8k_rates_24) - ARRAY_SIZE(mwl8k_rates_50)) + static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize) { u16 command = le16_to_cpu(cmd); @@ -1016,8 +1019,9 @@ mwl8k_rxd_ap_process(void *_rxd, struct ieee80211_rx_status *status, if (rxd->channel > 14) { status->band = NL80211_BAND_5GHZ; - if (!(status->encoding == RX_ENC_HT)) - status->rate_idx -= 5; + if (!(status->encoding == RX_ENC_HT) && + status->rate_idx >= MWL8K_LEGACY_5G_RATE_OFFSET) + status->rate_idx -= MWL8K_LEGACY_5G_RATE_OFFSET; } else { status->band = NL80211_BAND_2GHZ; } @@ -1124,8 +1128,9 @@ mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status, if (rxd->channel > 14) { status->band = NL80211_BAND_5GHZ; - if (!(status->encoding == RX_ENC_HT)) - status->rate_idx -= 5; + if (!(status->encoding == RX_ENC_HT) && + status->rate_idx >= MWL8K_LEGACY_5G_RATE_OFFSET) + status->rate_idx -= MWL8K_LEGACY_5G_RATE_OFFSET; } else { status->band = NL80211_BAND_2GHZ; } diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig index b6c5f17dca30a57395af65a4a828e95a58f75e3b..27826217ff762fe66f2029883d080e7fd2ea9387 100644 --- a/drivers/net/wireless/mediatek/mt76/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/Kconfig @@ -1,6 +1,12 @@ config MT76_CORE tristate +config MT76_LEDS + bool + depends on MT76_CORE + depends on LEDS_CLASS=y || MT76_CORE=LEDS_CLASS + default y + config MT76_USB tristate depends on MT76_CORE diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index c51da2205b938b87d5fb4b5c4ef0c15c1a5adaa4..cc6840377bc27a6ab9caa8532b72b1138c1d1a41 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -396,10 +396,13 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, struct page *page = virt_to_head_page(data); int offset = data - page_address(page); struct sk_buff *skb = q->rx_head; + struct skb_shared_info *shinfo = skb_shinfo(skb); - offset += q->buf_offset; - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, len, - q->buf_size); + if (shinfo->nr_frags < ARRAY_SIZE(shinfo->frags)) { + offset += q->buf_offset; + skb_add_rx_frag(skb, shinfo->nr_frags, page, offset, len, + q->buf_size); + } if (more) return; diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c index 530e5593765c8b9b0aa40b26e75c197114f995b9..a1529920d8776350a26c61a8256bb570a1784cc5 100644 --- a/drivers/net/wireless/mediatek/mt76/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/eeprom.c @@ -54,22 +54,30 @@ mt76_get_of_eeprom(struct mt76_dev *dev, int len) part = np->name; mtd = get_mtd_device_nm(part); - if (IS_ERR(mtd)) - return PTR_ERR(mtd); + if (IS_ERR(mtd)) { + ret = PTR_ERR(mtd); + goto out_put_node; + } - if (size <= sizeof(*list)) - return -EINVAL; + if (size <= sizeof(*list)) { + ret = -EINVAL; + goto out_put_node; + } offset = be32_to_cpup(list); ret = mtd_read(mtd, offset, len, &retlen, dev->eeprom.data); put_mtd_device(mtd); if (ret) - return ret; + goto out_put_node; - if (retlen < len) - return -EINVAL; + if (retlen < len) { + ret = -EINVAL; + goto out_put_node; + } - return 0; +out_put_node: + of_node_put(np); + return ret; #else return -ENOENT; #endif diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 029d54bce9e81e160fc6ad06a1192552ec817eca..1b5abd4816ed79eb7585db17bf98fa6145c04a38 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -342,9 +342,11 @@ int mt76_register_device(struct mt76_dev *dev, bool vht, mt76_check_sband(dev, NL80211_BAND_2GHZ); mt76_check_sband(dev, NL80211_BAND_5GHZ); - ret = mt76_led_init(dev); - if (ret) - return ret; + if (IS_ENABLED(CONFIG_MT76_LEDS)) { + ret = mt76_led_init(dev); + if (ret) + return ret; + } return ieee80211_register_hw(hw); } @@ -546,6 +548,12 @@ mt76_check_ps(struct mt76_dev *dev, struct sk_buff *skb) struct mt76_wcid *wcid = status->wcid; bool ps; + if (ieee80211_is_pspoll(hdr->frame_control) && !wcid) { + sta = ieee80211_find_sta_by_ifaddr(dev->hw, hdr->addr2, NULL); + if (sta) + wcid = status->wcid = (struct mt76_wcid *) sta->drv_priv; + } + if (!wcid || !wcid->sta) return; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c index 7cdb3e740522b72001866d4cfe56d3140531a7c2..da2ba51dec352d498422656e784664a894b81a83 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c @@ -369,7 +369,7 @@ static void mt76x0_stop_hardware(struct mt76x0_dev *dev) mt76x0_chip_onoff(dev, false, false); } -int mt76x0_init_hardware(struct mt76x0_dev *dev) +int mt76x0_init_hardware(struct mt76x0_dev *dev, bool reset) { static const u16 beacon_offsets[16] = { /* 512 byte per beacon */ @@ -382,7 +382,7 @@ int mt76x0_init_hardware(struct mt76x0_dev *dev) dev->beacon_offsets = beacon_offsets; - mt76x0_chip_onoff(dev, true, true); + mt76x0_chip_onoff(dev, true, reset); ret = mt76x0_wait_asic_ready(dev); if (ret) @@ -681,6 +681,7 @@ int mt76x0_register_device(struct mt76x0_dev *dev) ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES); ieee80211_hw_set(hw, AMPDU_AGGREGATION); ieee80211_hw_set(hw, SUPPORTS_RC_TABLE); + ieee80211_hw_set(hw, MFP_CAPABLE); hw->max_rates = 1; hw->max_report_rates = 7; hw->max_rate_tries = 1; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h index fc9857f61771ccbc9d712c94402c455f498b8853..f9dfe5097b099cf27fca0e27c3ae8e0e17e322fc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h @@ -279,7 +279,7 @@ void mt76x0_addr_wr(struct mt76x0_dev *dev, const u32 offset, const u8 *addr); /* Init */ struct mt76x0_dev *mt76x0_alloc_device(struct device *dev); -int mt76x0_init_hardware(struct mt76x0_dev *dev); +int mt76x0_init_hardware(struct mt76x0_dev *dev, bool reset); int mt76x0_register_device(struct mt76x0_dev *dev); void mt76x0_cleanup(struct mt76x0_dev *dev); void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c index 5da7bfbe907ff65c8ac3b871a468ee4e287bba5c..924c761f34fd9fbb856e58dc19a38864bc539d2a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c @@ -757,10 +757,10 @@ __mt76x0_phy_set_channel(struct mt76x0_dev *dev, /* Vendor driver don't do it */ /* mt76x0_phy_set_tx_power(dev, channel, rf_bw_band); */ + mt76x0_vco_cal(dev, channel); if (scan) - mt76x0_vco_cal(dev, channel); + mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1); - mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1); mt76x0_phy_set_chan_pwr(dev, channel); dev->mt76.chandef = *chandef; @@ -793,9 +793,8 @@ void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev) mt76_wr(dev, MT_TX_ALC_CFG_0, 0); usleep_range(500, 700); - reg_val = mt76_rr(dev, 0x2124); - reg_val &= 0xffffff7e; - mt76_wr(dev, 0x2124, reg_val); + reg_val = mt76_rr(dev, MT_BBP(IBI, 9)); + mt76_wr(dev, MT_BBP(IBI, 9), 0xffffff7e); mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 0); @@ -806,7 +805,7 @@ void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev) mt76x0_mcu_calibrate(dev, MCU_CAL_RXIQ, is_5ghz); mt76x0_mcu_calibrate(dev, MCU_CAL_RX_GROUP_DELAY, is_5ghz); - mt76_wr(dev, 0x2124, reg_val); + mt76_wr(dev, MT_BBP(IBI, 9), reg_val); mt76_wr(dev, MT_TX_ALC_CFG_0, tx_alc); msleep(100); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c b/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c index 751b49c28ae53f0fbe255ac6d6a3216e1a4eef1a..c45d05d5aab1d207fb319d6bc43da72e36a2a134 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c @@ -166,7 +166,7 @@ void mt76x0_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, if (sta) { msta = (struct mt76_sta *) sta->drv_priv; wcid = &msta->wcid; - } else if (vif && (!info->control.hw_key && wcid->hw_key_idx != -1)) { + } else if (vif && (!info->control.hw_key && wcid->hw_key_idx != 0xff)) { struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; wcid = &mvif->group_wcid; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c index 54ae1f113be23dd51b1ab7fafb79bd3d991af893..5aacb1f6a841d0720b5372112b2f3b325b598597 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c @@ -300,7 +300,7 @@ static int mt76x0_probe(struct usb_interface *usb_intf, if (!(mt76_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL)) dev_warn(dev->mt76.dev, "Warning: eFUSE not present\n"); - ret = mt76x0_init_hardware(dev); + ret = mt76x0_init_hardware(dev, true); if (ret) goto err; @@ -354,7 +354,7 @@ static int mt76x0_resume(struct usb_interface *usb_intf) struct mt76x0_dev *dev = usb_get_intfdata(usb_intf); int ret; - ret = mt76x0_init_hardware(dev); + ret = mt76x0_init_hardware(dev, false); if (ret) return ret; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c index 374cc655c11d7a8e246ea0839c0c4db5459503fe..16e6b6970e28491b081a08bac06954d1994ce1cd 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c @@ -799,7 +799,7 @@ static void mt76x2_dfs_set_bbp_params(struct mt76x2_dev *dev) /* enable detection*/ mt76_wr(dev, MT_BBP(DFS, 0), MT_DFS_CH_EN << 16); - mt76_wr(dev, 0x212c, 0x0c350001); + mt76_wr(dev, MT_BBP(IBI, 11), 0x0c350001); } void mt76x2_dfs_adjust_agc(struct mt76x2_dev *dev) @@ -842,7 +842,11 @@ void mt76x2_dfs_init_params(struct mt76x2_dev *dev) mt76_wr(dev, MT_BBP(DFS, 0), 0); /* clear detector status */ mt76_wr(dev, MT_BBP(DFS, 1), 0xf); - mt76_wr(dev, 0x212c, 0); + if (mt76_chip(&dev->mt76) == 0x7610 || + mt76_chip(&dev->mt76) == 0x7630) + mt76_wr(dev, MT_BBP(IBI, 11), 0xfde8081); + else + mt76_wr(dev, MT_BBP(IBI, 11), 0); mt76x2_irq_disable(dev, MT_INT_GPTIMER); mt76_rmw_field(dev, MT_INT_TIMER_EN, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c index b814391f79ac8b5a63d53ab3d7f4a55633785186..03b103c45d69bc8708fc34135d26b097ea437cc1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c @@ -581,8 +581,10 @@ int mt76x2_register_device(struct mt76x2_dev *dev) mt76x2_dfs_init_detector(dev); /* init led callbacks */ - dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness; - dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink; + if (IS_ENABLED(CONFIG_MT76_LEDS)) { + dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness; + dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink; + } ret = mt76_register_device(&dev->mt76, true, mt76x2_rates, ARRAY_SIZE(mt76x2_rates)); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c index 324b2a4b8b67cf3bec9c4006a37063f2bad75a31..54a9e1dfaf7a403a2a15e597177edb847d75c1e2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c @@ -72,6 +72,9 @@ void mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable) { u32 val; + if (!enable) + goto out; + val = mt76_rr(dev, MT_WLAN_FUN_CTRL); val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL; @@ -87,6 +90,7 @@ void mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable) mt76_wr(dev, MT_WLAN_FUN_CTRL, val); udelay(20); +out: mt76x2_set_wlan_state(dev, enable); } EXPORT_SYMBOL_GPL(mt76x2_reset_wlan); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c index 23cf437d14f96cbe3d39cd3fcb3a81ba4ae5b9a6..1a49d1be042dbc2245202bfd8efe150ca0712b8e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c @@ -128,8 +128,7 @@ __mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 bcn_idx, struct sk_buff *skb) if (skb) { ret = mt76_write_beacon(dev, beacon_addr, skb); if (!ret) - dev->beacon_data_mask |= BIT(bcn_idx) & - dev->beacon_mask; + dev->beacon_data_mask |= BIT(bcn_idx); } else { dev->beacon_data_mask &= ~BIT(bcn_idx); for (i = 0; i < beacon_len; i += 4) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c index 6542644bc3259a6e12cff1d05242895eb81adaf3..cec31f0c3017b1b65084c4b9569a73e0514d45e7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c @@ -402,7 +402,7 @@ void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi, ccmp_pn[6] = pn >> 32; ccmp_pn[7] = pn >> 40; txwi->iv = *((__le32 *)&ccmp_pn[0]); - txwi->eiv = *((__le32 *)&ccmp_pn[1]); + txwi->eiv = *((__le32 *)&ccmp_pn[4]); } spin_lock_bh(&dev->mt76.lock); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c index e66f047ea4481b2b4a13b670528360625e010a97..26cfda24ce0854c15ce3c535004449e22c2893cd 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c @@ -53,6 +53,7 @@ mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return -ENOMEM; mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]); + mt76x2_reset_wlan(dev, false); dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION); dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c index 9fd6ab4cbb949e617ee64527a9e2fa6a12f6c3fd..ca68dd184489be9e29362ec878c2fee7f08efc94 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c @@ -232,9 +232,9 @@ void mt76x2_phy_set_txpower(struct mt76x2_dev *dev) mt76_wr(dev, MT_TX_PWR_CFG_7, mt76x2_tx_power_mask(t.ofdm[6], t.vht[8], t.ht[6], t.vht[8])); mt76_wr(dev, MT_TX_PWR_CFG_8, - mt76x2_tx_power_mask(t.ht[14], t.vht[8], t.vht[8], 0)); + mt76x2_tx_power_mask(t.ht[14], 0, t.vht[8], t.vht[8])); mt76_wr(dev, MT_TX_PWR_CFG_9, - mt76x2_tx_power_mask(t.ht[6], t.vht[8], t.vht[8], 0)); + mt76x2_tx_power_mask(t.ht[6], 0, t.vht[8], t.vht[8])); } EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c index 36afb166fa3ffd29f24414e462961bca03545a6e..c0ca0df84ed8b86aa029c5edbe554f53888fe4cd 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c @@ -32,7 +32,7 @@ void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, msta = (struct mt76x2_sta *)control->sta->drv_priv; wcid = &msta->wcid; /* sw encrypted frames */ - if (!info->control.hw_key && wcid->hw_key_idx != -1) + if (!info->control.hw_key && wcid->hw_key_idx != 0xff) control->sta = NULL; } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c index 1428cfdee5795495c5df667c85abd7ab330080ce..9594433234cc30eba373549fc2e977548fd82a44 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c @@ -107,16 +107,24 @@ static int __maybe_unused mt76x2u_resume(struct usb_interface *intf) mt76u_mcu_complete_urb, &usb->mcu.cmpl); if (err < 0) - return err; + goto err; err = mt76u_submit_rx_buffers(&dev->mt76); if (err < 0) - return err; + goto err; tasklet_enable(&usb->rx_tasklet); tasklet_enable(&usb->tx_tasklet); - return mt76x2u_init_hardware(dev); + err = mt76x2u_init_hardware(dev); + if (err < 0) + goto err; + + return 0; + +err: + mt76x2u_cleanup(dev); + return err; } MODULE_DEVICE_TABLE(usb, mt76x2u_device_table); diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index af48d43bb7dca0ee7a104fa71b6c194aab1788c7..227e5ebfe3dc2c1bd0f146bb88387d1d4554b02e 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -148,7 +148,8 @@ mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; - if (!ieee80211_is_data_qos(hdr->frame_control)) + if (!ieee80211_is_data_qos(hdr->frame_control) || + !ieee80211_is_data_present(hdr->frame_control)) return; mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10; @@ -385,7 +386,12 @@ void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta, for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { struct ieee80211_txq *txq = sta->txq[i]; - struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv; + struct mt76_txq *mtxq; + + if (!txq) + continue; + + mtxq = (struct mt76_txq *)txq->drv_priv; spin_lock_bh(&mtxq->hwq->lock); mtxq->send_bar = mtxq->aggr && send_bar; diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c index 79e59f2379a26e116f2026f43d403dd625318f0a..8d40e92fb6f270abb64bd3571e05bf538c3b1183 100644 --- a/drivers/net/wireless/mediatek/mt76/usb.c +++ b/drivers/net/wireless/mediatek/mt76/usb.c @@ -796,16 +796,9 @@ int mt76u_alloc_queues(struct mt76_dev *dev) err = mt76u_alloc_rx(dev); if (err < 0) - goto err; - - err = mt76u_alloc_tx(dev); - if (err < 0) - goto err; + return err; - return 0; -err: - mt76u_queues_deinit(dev); - return err; + return mt76u_alloc_tx(dev); } EXPORT_SYMBOL_GPL(mt76u_alloc_queues); diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c index 7f3e3983b781d72af83b0fc47006b1a2ff34078c..47cebb2ec05c5a3939771b2c041e5c74ac8ca3f9 100644 --- a/drivers/net/wireless/mediatek/mt7601u/dma.c +++ b/drivers/net/wireless/mediatek/mt7601u/dma.c @@ -193,10 +193,23 @@ static void mt7601u_complete_rx(struct urb *urb) struct mt7601u_rx_queue *q = &dev->rx_q; unsigned long flags; - spin_lock_irqsave(&dev->rx_lock, flags); + /* do no schedule rx tasklet if urb has been unlinked + * or the device has been removed + */ + switch (urb->status) { + case -ECONNRESET: + case -ESHUTDOWN: + case -ENOENT: + return; + default: + dev_err_ratelimited(dev->dev, "rx urb failed: %d\n", + urb->status); + /* fall through */ + case 0: + break; + } - if (mt7601u_urb_has_error(urb)) - dev_err(dev->dev, "Error: RX urb failed:%d\n", urb->status); + spin_lock_irqsave(&dev->rx_lock, flags); if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch")) goto out; @@ -228,14 +241,25 @@ static void mt7601u_complete_tx(struct urb *urb) struct sk_buff *skb; unsigned long flags; - spin_lock_irqsave(&dev->tx_lock, flags); + switch (urb->status) { + case -ECONNRESET: + case -ESHUTDOWN: + case -ENOENT: + return; + default: + dev_err_ratelimited(dev->dev, "tx urb failed: %d\n", + urb->status); + /* fall through */ + case 0: + break; + } - if (mt7601u_urb_has_error(urb)) - dev_err(dev->dev, "Error: TX urb failed:%d\n", urb->status); + spin_lock_irqsave(&dev->tx_lock, flags); if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch")) goto out; skb = q->e[q->start].skb; + q->e[q->start].skb = NULL; trace_mt_tx_dma_done(dev, skb); __skb_queue_tail(&dev->tx_skb_done, skb); @@ -363,19 +387,9 @@ int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb, static void mt7601u_kill_rx(struct mt7601u_dev *dev) { int i; - unsigned long flags; - - spin_lock_irqsave(&dev->rx_lock, flags); - - for (i = 0; i < dev->rx_q.entries; i++) { - int next = dev->rx_q.end; - spin_unlock_irqrestore(&dev->rx_lock, flags); - usb_poison_urb(dev->rx_q.e[next].urb); - spin_lock_irqsave(&dev->rx_lock, flags); - } - - spin_unlock_irqrestore(&dev->rx_lock, flags); + for (i = 0; i < dev->rx_q.entries; i++) + usb_poison_urb(dev->rx_q.e[i].urb); } static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev, @@ -445,10 +459,10 @@ static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q) { int i; - WARN_ON(q->used); - for (i = 0; i < q->entries; i++) { usb_poison_urb(q->e[i].urb); + if (q->e[i].skb) + mt7601u_tx_status(q->dev, q->e[i].skb); usb_free_urb(q->e[i].urb); } } diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.h b/drivers/net/wireless/mediatek/mt7601u/eeprom.h index 662d12703b69cd82832623dfec7838630f1d3cb0..57b503ae63f1695f2d65733436ad4f198d7085b0 100644 --- a/drivers/net/wireless/mediatek/mt7601u/eeprom.h +++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.h @@ -17,7 +17,7 @@ struct mt7601u_dev; -#define MT7601U_EE_MAX_VER 0x0c +#define MT7601U_EE_MAX_VER 0x0d #define MT7601U_EEPROM_SIZE 256 #define MT7601U_DEFAULT_TX_POWER 6 diff --git a/drivers/net/wireless/mediatek/mt7601u/tx.c b/drivers/net/wireless/mediatek/mt7601u/tx.c index 3600e911a63e85d23ecc15f238c42db66865472d..4d81c45722fbb756dc3c91fa8559772dc8738fda 100644 --- a/drivers/net/wireless/mediatek/mt7601u/tx.c +++ b/drivers/net/wireless/mediatek/mt7601u/tx.c @@ -117,9 +117,9 @@ void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb) info->status.rates[0].idx = -1; info->flags |= IEEE80211_TX_STAT_ACK; - spin_lock(&dev->mac_lock); + spin_lock_bh(&dev->mac_lock); ieee80211_tx_status(dev->hw, skb); - spin_unlock(&dev->mac_lock); + spin_unlock_bh(&dev->mac_lock); } static int mt7601u_skb_rooms(struct mt7601u_dev *dev, struct sk_buff *skb) diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index 4aa332f4646b1b79396b55ca6c3c2d2b552debdf..ff8a46c9595e1bf1d2d6bded95c40425ab81ce51 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c @@ -521,9 +521,16 @@ static int qtnf_del_key(struct wiphy *wiphy, struct net_device *dev, int ret; ret = qtnf_cmd_send_del_key(vif, key_index, pairwise, mac_addr); - if (ret) - pr_err("VIF%u.%u: failed to delete key: idx=%u pw=%u\n", - vif->mac->macid, vif->vifid, key_index, pairwise); + if (ret) { + if (ret == -ENOENT) { + pr_debug("VIF%u.%u: key index %d out of bounds\n", + vif->mac->macid, vif->vifid, key_index); + } else { + pr_err("VIF%u.%u: failed to delete key: idx=%u pw=%u\n", + vif->mac->macid, vif->vifid, + key_index, pairwise); + } + } return ret; } @@ -1109,6 +1116,9 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) if (hw_info->hw_capab & QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR) wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; + if (!(hw_info->hw_capab & QLINK_HW_CAPAB_OBSS_SCAN)) + wiphy->features |= NL80211_FEATURE_NEED_OBSS_SCAN; + #ifdef CONFIG_PM if (macinfo->wowlan) wiphy->wowlan = macinfo->wowlan; @@ -1123,6 +1133,15 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED; } + if (mac->macinfo.extended_capabilities_len) { + wiphy->extended_capabilities = + mac->macinfo.extended_capabilities; + wiphy->extended_capabilities_mask = + mac->macinfo.extended_capabilities_mask; + wiphy->extended_capabilities_len = + mac->macinfo.extended_capabilities_len; + } + strlcpy(wiphy->fw_version, hw_info->fw_version, sizeof(wiphy->fw_version)); wiphy->hw_version = hw_info->hw_version; diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c index ae9e773005339b5ef22f23d5593297ffcbe04a7a..734844b34c2667fc77a5667906ecf5acea2eea93 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.c +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c @@ -544,6 +544,9 @@ qtnf_sta_info_parse_rate(struct rate_info *rate_dst, rate_dst->flags |= RATE_INFO_FLAGS_MCS; else if (rate_src->flags & QLINK_STA_INFO_RATE_FLAG_VHT_MCS) rate_dst->flags |= RATE_INFO_FLAGS_VHT_MCS; + + if (rate_src->flags & QLINK_STA_INFO_RATE_FLAG_SHORT_GI) + rate_dst->flags |= RATE_INFO_FLAGS_SHORT_GI; } static void @@ -1353,8 +1356,7 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac, ext_capa_mask = NULL; } - kfree(mac->macinfo.extended_capabilities); - kfree(mac->macinfo.extended_capabilities_mask); + qtnf_mac_ext_caps_free(mac); mac->macinfo.extended_capabilities = ext_capa; mac->macinfo.extended_capabilities_mask = ext_capa_mask; mac->macinfo.extended_capabilities_len = ext_capa_len; diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c index 19abbc4e23e068498118b562833197c053e999c4..08928d5e252d7a83944658ffd1bbb6540bcd52bc 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.c +++ b/drivers/net/wireless/quantenna/qtnfmac/core.c @@ -304,6 +304,19 @@ void qtnf_mac_iface_comb_free(struct qtnf_wmac *mac) } } +void qtnf_mac_ext_caps_free(struct qtnf_wmac *mac) +{ + if (mac->macinfo.extended_capabilities_len) { + kfree(mac->macinfo.extended_capabilities); + mac->macinfo.extended_capabilities = NULL; + + kfree(mac->macinfo.extended_capabilities_mask); + mac->macinfo.extended_capabilities_mask = NULL; + + mac->macinfo.extended_capabilities_len = 0; + } +} + static void qtnf_vif_reset_handler(struct work_struct *work) { struct qtnf_vif *vif = container_of(work, struct qtnf_vif, reset_work); @@ -493,8 +506,7 @@ static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid) } qtnf_mac_iface_comb_free(mac); - kfree(mac->macinfo.extended_capabilities); - kfree(mac->macinfo.extended_capabilities_mask); + qtnf_mac_ext_caps_free(mac); kfree(mac->macinfo.wowlan); wiphy_free(wiphy); bus->mac[macid] = NULL; diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h index a1e338a1f055a7ea906c5fc8ecfd979db84efe03..ecb5c41c8ed7618467d6a4bbdc47fc50182330aa 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.h +++ b/drivers/net/wireless/quantenna/qtnfmac/core.h @@ -151,6 +151,7 @@ struct qtnf_hw_info { struct qtnf_vif *qtnf_mac_get_free_vif(struct qtnf_wmac *mac); struct qtnf_vif *qtnf_mac_get_base_vif(struct qtnf_wmac *mac); void qtnf_mac_iface_comb_free(struct qtnf_wmac *mac); +void qtnf_mac_ext_caps_free(struct qtnf_wmac *mac); struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus); int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *priv, const char *name, unsigned char name_assign_type); diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h index 99d37e3efba634e0e649349259eeaf5b5a4a72b9..c5ae4ea9a47a9202a944ffc6c0b4daf997b56c1d 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h @@ -71,6 +71,7 @@ struct qlink_msg_header { * @QLINK_HW_CAPAB_DFS_OFFLOAD: device implements DFS offload functionality * @QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR: device supports MAC Address * Randomization in probe requests. + * @QLINK_HW_CAPAB_OBSS_SCAN: device can perform OBSS scanning. */ enum qlink_hw_capab { QLINK_HW_CAPAB_REG_UPDATE = BIT(0), @@ -78,6 +79,7 @@ enum qlink_hw_capab { QLINK_HW_CAPAB_DFS_OFFLOAD = BIT(2), QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR = BIT(3), QLINK_HW_CAPAB_PWR_MGMT = BIT(4), + QLINK_HW_CAPAB_OBSS_SCAN = BIT(5), }; enum qlink_iface_type { diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index a567bc273ffc6c725473710ecea9c82e5076284e..5c86b657aa9af64a5b0c55cfd7cff11cc41aef37 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -1440,14 +1440,20 @@ static void rt2800_config_wcid_attr_cipher(struct rt2x00_dev *rt2x00dev, offset = MAC_IVEIV_ENTRY(key->hw_key_idx); - memset(&iveiv_entry, 0, sizeof(iveiv_entry)); - if ((crypto->cipher == CIPHER_TKIP) || - (crypto->cipher == CIPHER_TKIP_NO_MIC) || - (crypto->cipher == CIPHER_AES)) - iveiv_entry.iv[3] |= 0x20; - iveiv_entry.iv[3] |= key->keyidx << 6; + if (crypto->cmd == SET_KEY) { + rt2800_register_multiread(rt2x00dev, offset, + &iveiv_entry, sizeof(iveiv_entry)); + if ((crypto->cipher == CIPHER_TKIP) || + (crypto->cipher == CIPHER_TKIP_NO_MIC) || + (crypto->cipher == CIPHER_AES)) + iveiv_entry.iv[3] |= 0x20; + iveiv_entry.iv[3] |= key->keyidx << 6; + } else { + memset(&iveiv_entry, 0, sizeof(iveiv_entry)); + } + rt2800_register_multiwrite(rt2x00dev, offset, - &iveiv_entry, sizeof(iveiv_entry)); + &iveiv_entry, sizeof(iveiv_entry)); } int rt2800_config_shared_key(struct rt2x00_dev *rt2x00dev, @@ -1636,6 +1642,25 @@ int rt2800_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, } EXPORT_SYMBOL_GPL(rt2800_sta_remove); +void rt2800_pre_reset_hw(struct rt2x00_dev *rt2x00dev) +{ + struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; + struct data_queue *queue = rt2x00dev->bcn; + struct queue_entry *entry; + int i, wcid; + + for (wcid = WCID_START; wcid < WCID_END; wcid++) { + drv_data->wcid_to_sta[wcid - WCID_START] = NULL; + __clear_bit(wcid - WCID_START, drv_data->sta_ids); + } + + for (i = 0; i < queue->limit; i++) { + entry = &queue->entries[i]; + clear_bit(ENTRY_BCN_ASSIGNED, &entry->flags); + } +} +EXPORT_SYMBOL_GPL(rt2800_pre_reset_hw); + void rt2800_config_filter(struct rt2x00_dev *rt2x00dev, const unsigned int filter_flags) { @@ -5546,15 +5571,22 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) * ASIC will keep garbage value after boot, clear encryption keys. */ for (i = 0; i < 4; i++) - rt2800_register_write(rt2x00dev, - SHARED_KEY_MODE_ENTRY(i), 0); + rt2800_register_write(rt2x00dev, SHARED_KEY_MODE_ENTRY(i), 0); for (i = 0; i < 256; i++) { rt2800_config_wcid(rt2x00dev, NULL, i); rt2800_delete_wcid_attr(rt2x00dev, i); - rt2800_register_write(rt2x00dev, MAC_IVEIV_ENTRY(i), 0); } + /* + * Clear encryption initialization vectors on start, but keep them + * for watchdog reset. Otherwise we will have wrong IVs and not be + * able to keep connections after reset. + */ + if (!test_bit(DEVICE_STATE_RESET, &rt2x00dev->flags)) + for (i = 0; i < 256; i++) + rt2800_register_write(rt2x00dev, MAC_IVEIV_ENTRY(i), 0); + /* * Clear all beacons */ diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h index 51d9c2a932cc4181efc4ac1248da5133a3c68836..18c90253cfb13d1bf7c2a39bc55b848d0edb26c4 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h @@ -254,5 +254,6 @@ void rt2800_disable_wpdma(struct rt2x00_dev *rt2x00dev); void rt2800_get_txwi_rxwi_size(struct rt2x00_dev *rt2x00dev, unsigned short *txwi_size, unsigned short *rxwi_size); +void rt2800_pre_reset_hw(struct rt2x00_dev *rt2x00dev); #endif /* RT2800LIB_H */ diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c index 71b1affc38856be9cb6ef8a584b6244463c1a733..39c1d7356428b81e52ae0ca6902d3f7b92263700 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c @@ -52,6 +52,11 @@ static bool modparam_nohwcrypt = false; module_param_named(nohwcrypt, modparam_nohwcrypt, bool, 0444); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); +void rt2x00lib_pre_reset_hw(struct rt2x00_dev *rt2x00dev) +{ + rt2800_pre_reset_hw(rt2x00dev); +} + static bool rt2800pci_hwcrypt_disabled(struct rt2x00_dev *rt2x00dev) { return modparam_nohwcrypt; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c index a502816214ab0b53cecf749c42c6c1c2440c8812..6343fec502f49205563af657cf16f25293324f96 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c @@ -44,6 +44,11 @@ static bool modparam_nohwcrypt; module_param_named(nohwcrypt, modparam_nohwcrypt, bool, 0444); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); +void rt2x00lib_pre_reset_hw(struct rt2x00_dev *rt2x00dev) +{ + rt2800_pre_reset_hw(rt2x00dev); +} + static bool rt2800soc_hwcrypt_disabled(struct rt2x00_dev *rt2x00dev) { return modparam_nohwcrypt; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c index 98a7313fea4aeee3bb49d01d871ab309b60f61f3..6dbcd1875faafa61a037520e4c0bb18d2320a683 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c @@ -46,6 +46,11 @@ static bool modparam_nohwcrypt; module_param_named(nohwcrypt, modparam_nohwcrypt, bool, 0444); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); +void rt2x00lib_pre_reset_hw(struct rt2x00_dev *rt2x00dev) +{ + rt2800_pre_reset_hw(rt2x00dev); +} + static bool rt2800usb_hwcrypt_disabled(struct rt2x00_dev *rt2x00dev) { return modparam_nohwcrypt; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h index a279a4363bc15a2e0f502dd2d0428291df59eba3..8aba6ea7d4e910f64f107fbdd11a48fa6348ede4 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h @@ -665,6 +665,7 @@ enum rt2x00_state_flags { DEVICE_STATE_STARTED, DEVICE_STATE_ENABLED_RADIO, DEVICE_STATE_SCANNING, + DEVICE_STATE_RESET, /* * Driver configuration @@ -672,7 +673,6 @@ enum rt2x00_state_flags { CONFIG_CHANNEL_HT40, CONFIG_POWERSAVING, CONFIG_HT_DISABLED, - CONFIG_QOS_DISABLED, CONFIG_MONITORING, /* @@ -1416,6 +1416,7 @@ static inline void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev, u32 rt2x00lib_get_bssidx(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif); void rt2x00lib_set_mac_address(struct rt2x00_dev *rt2x00dev, u8 *eeprom_mac_addr); +void rt2x00lib_pre_reset_hw(struct rt2x00_dev *rt2x00dev); /* * Interrupt context handlers. diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c index 357c0941aaad4748fd5b69871359f5cb0475216f..dc5317328b801f864c3b1f934905431689e2f1d8 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c @@ -112,6 +112,7 @@ void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev) rt2x00link_stop_tuner(rt2x00dev); rt2x00queue_stop_queues(rt2x00dev); rt2x00queue_flush_queues(rt2x00dev, true); + rt2x00queue_stop_queue(rt2x00dev->bcn); /* * Disable radio. @@ -1265,12 +1266,23 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev) return 0; } +void __weak rt2x00lib_pre_reset_hw(struct rt2x00_dev *rt2x00dev) +{ +} + int rt2x00lib_start(struct rt2x00_dev *rt2x00dev) { - int retval; + int retval = 0; - if (test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) - return 0; + if (test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) { + /* + * This is special case for ieee80211_restart_hw(), otherwise + * mac80211 never call start() two times in row without stop(); + */ + set_bit(DEVICE_STATE_RESET, &rt2x00dev->flags); + rt2x00lib_pre_reset_hw(rt2x00dev); + rt2x00lib_stop(rt2x00dev); + } /* * If this is the first interface which is added, @@ -1278,27 +1290,30 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev) */ retval = rt2x00lib_load_firmware(rt2x00dev); if (retval) - return retval; + goto out; /* * Initialize the device. */ retval = rt2x00lib_initialize(rt2x00dev); if (retval) - return retval; + goto out; rt2x00dev->intf_ap_count = 0; rt2x00dev->intf_sta_count = 0; rt2x00dev->intf_associated = 0; + rt2x00dev->intf_beaconing = 0; /* Enable the radio */ retval = rt2x00lib_enable_radio(rt2x00dev); if (retval) - return retval; + goto out; set_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags); - return 0; +out: + clear_bit(DEVICE_STATE_RESET, &rt2x00dev->flags); + return retval; } void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev) @@ -1315,6 +1330,7 @@ void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev) rt2x00dev->intf_ap_count = 0; rt2x00dev->intf_sta_count = 0; rt2x00dev->intf_associated = 0; + rt2x00dev->intf_beaconing = 0; } static inline void rt2x00lib_set_if_combinations(struct rt2x00_dev *rt2x00dev) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c index fa2fd64084ac91c497c0b32f9f465af30d5ad076..99c02fd077d22312059ce4a00e9059bb53d134cb 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c @@ -589,6 +589,17 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw, */ if (changes & BSS_CHANGED_BEACON_ENABLED) { mutex_lock(&intf->beacon_skb_mutex); + + /* + * Clear the 'enable_beacon' flag and clear beacon because + * the beacon queue has been stopped after hardware reset. + */ + if (test_bit(DEVICE_STATE_RESET, &rt2x00dev->flags) && + intf->enable_beacon) { + intf->enable_beacon = false; + rt2x00queue_clear_beacon(rt2x00dev, vif); + } + if (!bss_conf->enable_beacon && intf->enable_beacon) { rt2x00dev->intf_beaconing--; intf->enable_beacon = false; @@ -642,18 +653,8 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw, rt2x00dev->intf_associated--; rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated); - - clear_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags); } - /* - * Check for access point which do not support 802.11e . We have to - * generate data frames sequence number in S/W for such AP, because - * of H/W bug. - */ - if (changes & BSS_CHANGED_QOS && !bss_conf->qos) - set_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags); - /* * When the erp information has changed, we should perform * additional configuration steps. For all other changes we are done. diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c index 710e9641552e8ed65f5229f7883242a4b92970b1..85e320178a0e29c9aef05835caa77a889181cf75 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c @@ -200,15 +200,18 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev, if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) { /* * rt2800 has a H/W (or F/W) bug, device incorrectly increase - * seqno on retransmited data (non-QOS) frames. To workaround - * the problem let's generate seqno in software if QOS is - * disabled. + * seqno on retransmitted data (non-QOS) and management frames. + * To workaround the problem let's generate seqno in software. + * Except for beacons which are transmitted periodically by H/W + * hence hardware has to assign seqno for them. */ - if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags)) - __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); - else + if (ieee80211_is_beacon(hdr->frame_control)) { + __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); /* H/W will generate sequence number */ return; + } + + __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); } /* diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c index 9a1d15b3ce4535540184d34a5600be131ba329dd..518caaaf8a98765647b81cbc4f099f29d7a1534b 100644 --- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c @@ -444,12 +444,13 @@ static int rtl8187_init_urbs(struct ieee80211_hw *dev) skb_queue_tail(&priv->rx_queue, skb); usb_anchor_urb(entry, &priv->anchored); ret = usb_submit_urb(entry, GFP_KERNEL); - usb_put_urb(entry); if (ret) { skb_unlink(skb, &priv->rx_queue); usb_unanchor_urb(entry); + usb_put_urb(entry); goto err; } + usb_put_urb(entry); } return ret; diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c index c2d5b495c179a1021dd4cd4221c0032f3a99e34a..c089540116fa72e6952c5a2670aaf32cb99a53e8 100644 --- a/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c @@ -146,7 +146,7 @@ static int rtl8187_register_led(struct ieee80211_hw *dev, led->dev = dev; led->ledpin = ledpin; led->is_radio = is_radio; - strncpy(led->name, name, sizeof(led->name)); + strlcpy(led->name, name, sizeof(led->name)); led->led_dev.name = led->name; led->led_dev.default_trigger = default_trigger; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 8828baf26e7b88f0e367788a00e99345921cc84d..47c2bfe06d030bc2c5225c61e8c021d91427220f 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -1349,6 +1349,7 @@ struct rtl8xxxu_fileops { u8 has_s0s1:1; u8 has_tx_report:1; u8 gen2_thermal_meter:1; + u8 needs_full_init:1; u32 adda_1t_init; u32 adda_1t_path_on; u32 adda_2t_path_on_a; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c index 26b674aca1258bedcb8cec90b943d46cac664aea..14e207f2466caba92967186481dc417dabbf8be7 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c @@ -1673,6 +1673,7 @@ struct rtl8xxxu_fileops rtl8723bu_fops = { .has_s0s1 = 1, .has_tx_report = 1, .gen2_thermal_meter = 1, + .needs_full_init = 1, .adda_1t_init = 0x01c00014, .adda_1t_path_on = 0x01c00014, .adda_2t_path_on_a = 0x01c00014, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index 505ab1b055ff43cecd95dabf6634bfbc4f7c6dad..b2e1523b4dc14a5c8d57bbfe33c4c7c4f57d3ad2 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -3905,6 +3905,9 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) else macpower = true; + if (fops->needs_full_init) + macpower = false; + ret = fops->power_on(priv); if (ret < 0) { dev_warn(dev, "%s: Failed power on\n", __func__); @@ -5450,6 +5453,7 @@ static int rtl8xxxu_submit_int_urb(struct ieee80211_hw *hw) ret = usb_submit_urb(urb, GFP_KERNEL); if (ret) { usb_unanchor_urb(urb); + usb_free_urb(urb); goto error; } @@ -5691,6 +5695,7 @@ static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, break; case WLAN_CIPHER_SUITE_TKIP: key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; + break; default: return -EOPNOTSUPP; } diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index f4122c8fdd9777e852ac1bc0f01d9cedcde6c84a..ef9b502ce576b04bbcc562313957bd3619f14451 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -2289,6 +2289,7 @@ void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, struct sk_buff *skb) if (rtl_c2h_fast_cmd(hw, skb)) { rtl_c2h_content_parsing(hw, skb); + kfree_skb(skb); return; } diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c index b026e80940a4dc6fa57d790ca9787b59a57d7c06..6fbf8845a2ab6d03220df3d3735ab2113dc06fcc 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c @@ -1324,13 +1324,13 @@ bool exhalbtc_initlize_variables_wifi_only(struct rtl_priv *rtlpriv) switch (rtlpriv->rtlhal.interface) { case INTF_PCI: - wifionly_cfg->chip_interface = BTC_INTF_PCI; + wifionly_cfg->chip_interface = WIFIONLY_INTF_PCI; break; case INTF_USB: - wifionly_cfg->chip_interface = BTC_INTF_USB; + wifionly_cfg->chip_interface = WIFIONLY_INTF_USB; break; default: - wifionly_cfg->chip_interface = BTC_INTF_UNKNOWN; + wifionly_cfg->chip_interface = WIFIONLY_INTF_UNKNOWN; break; } diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c index 479a4cfc245d349e105457845719ca79b7ebb10d..5f998ea2d5a67db2fb74d0216a3d0100c49ccc61 100644 --- a/drivers/net/wireless/realtek/rtlwifi/ps.c +++ b/drivers/net/wireless/realtek/rtlwifi/ps.c @@ -775,6 +775,9 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data, return; } else { noa_num = (noa_len - 2) / 13; + if (noa_num > P2P_MAX_NOA_NUM) + noa_num = P2P_MAX_NOA_NUM; + } noa_index = ie[3]; if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode == @@ -869,6 +872,9 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data, return; } else { noa_num = (noa_len - 2) / 13; + if (noa_num > P2P_MAX_NOA_NUM) + noa_num = P2P_MAX_NOA_NUM; + } noa_index = ie[3]; if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode == diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c index 63874512598bb7cccacff2c5850a584db2de58a5..b5f91c994c7989782ebe3c838b8f57db555931a9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c @@ -622,6 +622,8 @@ void rtl88e_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) u1rsvdpageloc, 3); skb = dev_alloc_skb(totalpacketlen); + if (!skb) + return; skb_put_data(skb, &reserved_page_packet, totalpacketlen); rtstatus = rtl_cmd_send_packet(hw, skb); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c index f3bff66e85d0c391bba7e19809ce776be535914c..81ec0e6e07c1fff99b0e7574874763f292a94ddf 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c @@ -646,6 +646,8 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, skb = dev_alloc_skb(totalpacketlen); + if (!skb) + return; skb_put_data(skb, &reserved_page_packet, totalpacketlen); if (cmd_send_packet) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c index 1e60f70481f58c1c9b8aa262424cb9b56f470c05..8c60a84941d550e0f8d59fb8a2ef8b485f1874dc 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c @@ -1556,6 +1556,8 @@ static bool usb_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb) * This is maybe necessary: * rtlpriv->cfg->ops->fill_tx_cmddesc(hw, buffer, 1, 1, skb); */ + dev_kfree_skb(skb); + return true; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c index 85cedd083d2b89cd8870485e45b6ac285fa1e2cb..75bfa9dfef4aa2d5f90eab8331807216b7652afc 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c @@ -173,7 +173,7 @@ static int _rtl92d_fw_init(struct ieee80211_hw *hw) rtl_read_byte(rtlpriv, FW_MAC1_READY)); } RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, - "Polling FW ready fail!! REG_MCUFWDL:0x%08ul\n", + "Polling FW ready fail!! REG_MCUFWDL:0x%08x\n", rtl_read_dword(rtlpriv, REG_MCUFWDL)); return -1; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c index 80123fd9722198afd5ce68d816ac0283626f2fce..ee5ff7255090eef986b93753da38c90e0c9a2e64 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c @@ -1198,6 +1198,7 @@ void rtl92de_enable_interrupt(struct ieee80211_hw *hw) rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF); rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF); + rtlpci->irq_enabled = true; } void rtl92de_disable_interrupt(struct ieee80211_hw *hw) @@ -1207,7 +1208,7 @@ void rtl92de_disable_interrupt(struct ieee80211_hw *hw) rtl_write_dword(rtlpriv, REG_HIMR, IMR8190_DISABLED); rtl_write_dword(rtlpriv, REG_HIMRE, IMR8190_DISABLED); - synchronize_irq(rtlpci->pdev->irq); + rtlpci->irq_enabled = false; } static void _rtl92de_poweroff_adapter(struct ieee80211_hw *hw) @@ -1373,7 +1374,7 @@ void rtl92de_set_beacon_related_registers(struct ieee80211_hw *hw) bcn_interval = mac->beacon_interval; atim_window = 2; - /*rtl92de_disable_interrupt(hw); */ + rtl92de_disable_interrupt(hw); rtl_write_word(rtlpriv, REG_ATIMWND, atim_window); rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval); rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660f); @@ -1393,9 +1394,9 @@ void rtl92de_set_beacon_interval(struct ieee80211_hw *hw) RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG, "beacon_interval:%d\n", bcn_interval); - /* rtl92de_disable_interrupt(hw); */ + rtl92de_disable_interrupt(hw); rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval); - /* rtl92de_enable_interrupt(hw); */ + rtl92de_enable_interrupt(hw); } void rtl92de_update_interrupt_mask(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c index d5ba2bace79bb480bd9285b8cf0d146b01c82c24..2b0c0308d2817ba0951527d50e1534a5f22cfa4f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c @@ -238,6 +238,7 @@ static struct rtl_hal_ops rtl8192de_hal_ops = { .led_control = rtl92de_led_control, .set_desc = rtl92de_set_desc, .get_desc = rtl92de_get_desc, + .is_tx_desc_closed = rtl92de_is_tx_desc_closed, .tx_polling = rtl92de_tx_polling, .enable_hw_sec = rtl92de_enable_hw_security_config, .set_key = rtl92de_set_key, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c index d7b023cf74001db24145a5454950074e0ff97100..76f12247184aa4ddd94d229f7ad2815cb2120a0c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c @@ -840,13 +840,15 @@ u64 rtl92de_get_desc(struct ieee80211_hw *hw, break; } } else { - struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc; switch (desc_name) { case HW_DESC_OWN: - ret = GET_RX_DESC_OWN(pdesc); + ret = GET_RX_DESC_OWN(p_desc); break; case HW_DESC_RXPKT_LEN: - ret = GET_RX_DESC_PKT_LEN(pdesc); + ret = GET_RX_DESC_PKT_LEN(p_desc); + break; + case HW_DESC_RXBUFF_ADDR: + ret = GET_RX_DESC_BUFF_ADDR(p_desc); break; default: WARN_ONCE(true, "rtl8192de: ERR rxdesc :%d not processed\n", @@ -857,6 +859,23 @@ u64 rtl92de_get_desc(struct ieee80211_hw *hw, return ret; } +bool rtl92de_is_tx_desc_closed(struct ieee80211_hw *hw, + u8 hw_queue, u16 index) +{ + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue]; + u8 *entry = (u8 *)(&ring->desc[ring->idx]); + u8 own = (u8)rtl92de_get_desc(hw, entry, true, HW_DESC_OWN); + + /* a beacon packet will only use the first + * descriptor by defaut, and the own bit may not + * be cleared by the hardware + */ + if (own) + return false; + return true; +} + void rtl92de_tx_polling(struct ieee80211_hw *hw, u8 hw_queue) { struct rtl_priv *rtlpriv = rtl_priv(hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h index f7f776539438942e50a2f2e2b3dcbd8601818f27..3d026e518c02572cdb1f82978046124d7962812a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h @@ -737,6 +737,8 @@ void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, u8 desc_name, u8 *val); u64 rtl92de_get_desc(struct ieee80211_hw *hw, u8 *p_desc, bool istx, u8 desc_name); +bool rtl92de_is_tx_desc_closed(struct ieee80211_hw *hw, + u8 hw_queue, u16 index); void rtl92de_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool b_firstseg, bool b_lastseg, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c index 84a0d0eb72e1e404a746d0957ea36d4d289bd026..a933490928ba91df0d530150eda75b1ea56d7c9d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c @@ -766,6 +766,8 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) u1rsvdpageloc, 3); skb = dev_alloc_skb(totalpacketlen); + if (!skb) + return; skb_put_data(skb, &reserved_page_packet, totalpacketlen); rtstatus = rtl_cmd_send_packet(hw, skb); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c index bf9859f74b6f5e9bd3349c5dfef799c03c3d3f6e..52f108744e9693b9ed0743a0296062a962086539 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c @@ -470,6 +470,8 @@ void rtl8723e_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) u1rsvdpageloc, 3); skb = dev_alloc_skb(totalpacketlen); + if (!skb) + return; skb_put_data(skb, &reserved_page_packet, totalpacketlen); rtstatus = rtl_cmd_send_packet(hw, skb); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c index 545115db507e71c9876b66e53ceee5486b76e1a1..4dc9f4e96263b89e0509e2261f0f3d74c138e255 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c @@ -1699,6 +1699,7 @@ static void _rtl8723e_read_adapter_info(struct ieee80211_hw *hw, rtlhal->oem_id = RT_CID_819X_LENOVO; break; } + break; case 0x1025: rtlhal->oem_id = RT_CID_819X_ACER; break; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c index f2441fbb92f1e7fc1283650024f4ae9aa56a1dba..307c2bd77f0600926bde64542eaac82e7c6af9c4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c @@ -584,6 +584,8 @@ void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, u1rsvdpageloc, sizeof(u1rsvdpageloc)); skb = dev_alloc_skb(totalpacketlen); + if (!skb) + return; skb_put_data(skb, &reserved_page_packet, totalpacketlen); rtstatus = rtl_cmd_send_packet(hw, skb); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c index d868a034659fb88660cc2104a915296022f6ce3f..d7235f6165fdffabc1c5535dfaccf01b82d28577 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c @@ -1645,6 +1645,8 @@ void rtl8812ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, &reserved_page_packet_8812[0], totalpacketlen); skb = dev_alloc_skb(totalpacketlen); + if (!skb) + return; skb_put_data(skb, &reserved_page_packet_8812, totalpacketlen); rtstatus = rtl_cmd_send_packet(hw, skb); @@ -1781,6 +1783,8 @@ void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, &reserved_page_packet_8821[0], totalpacketlen); skb = dev_alloc_skb(totalpacketlen); + if (!skb) + return; skb_put_data(skb, &reserved_page_packet_8821, totalpacketlen); rtstatus = rtl_cmd_send_packet(hw, skb); diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index 2ac5004d7a401ab5d1255126c5c0a00a5e233705..1181b725f5033b6e8d7cffeb03c6dc6ac5633f83 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -1050,8 +1050,10 @@ int rtl_usb_probe(struct usb_interface *intf, rtlpriv->hw = hw; rtlpriv->usb_data = kcalloc(RTL_USB_MAX_RX_COUNT, sizeof(u32), GFP_KERNEL); - if (!rtlpriv->usb_data) + if (!rtlpriv->usb_data) { + ieee80211_free_hw(hw); return -ENOMEM; + } /* this spin lock must be initialized early */ spin_lock_init(&rtlpriv->locks.usb_lock); @@ -1081,13 +1083,13 @@ int rtl_usb_probe(struct usb_interface *intf, rtlpriv->cfg->ops->read_eeprom_info(hw); err = _rtl_usb_init(hw); if (err) - goto error_out; + goto error_out2; rtl_usb_init_sw(hw); /* Init mac80211 sw */ err = rtl_init_core(hw); if (err) { pr_err("Can't allocate sw for mac80211\n"); - goto error_out; + goto error_out2; } if (rtlpriv->cfg->ops->init_sw_vars(hw)) { pr_err("Can't init_sw_vars\n"); @@ -1108,9 +1110,11 @@ int rtl_usb_probe(struct usb_interface *intf, error_out: rtl_deinit_core(hw); +error_out2: _rtl_usb_io_handler_release(hw); usb_put_dev(udev); complete(&rtlpriv->firmware_loading_complete); + kfree(rtlpriv->usb_data); return -ENODEV; } EXPORT_SYMBOL(rtl_usb_probe); diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index 51e4e92d95a0d314c600771f06cb938e6731091b..0bbeb61ec3a38e417698eb47eb9906ed30505d7b 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -712,8 +712,8 @@ static int rndis_query_oid(struct usbnet *dev, u32 oid, void *data, int *len) struct rndis_query *get; struct rndis_query_c *get_c; } u; - int ret, buflen; - int resplen, respoffs, copylen; + int ret; + size_t buflen, resplen, respoffs, copylen; buflen = *len + sizeof(*u.get); if (buflen < CONTROL_BUFFER_SIZE) @@ -748,22 +748,15 @@ static int rndis_query_oid(struct usbnet *dev, u32 oid, void *data, int *len) if (respoffs > buflen) { /* Device returned data offset outside buffer, error. */ - netdev_dbg(dev->net, "%s(%s): received invalid " - "data offset: %d > %d\n", __func__, - oid_to_string(oid), respoffs, buflen); + netdev_dbg(dev->net, + "%s(%s): received invalid data offset: %zu > %zu\n", + __func__, oid_to_string(oid), respoffs, buflen); ret = -EINVAL; goto exit_unlock; } - if ((resplen + respoffs) > buflen) { - /* Device would have returned more data if buffer would - * have been big enough. Copy just the bits that we got. - */ - copylen = buflen - respoffs; - } else { - copylen = resplen; - } + copylen = min(resplen, buflen - respoffs); if (copylen > *len) copylen = *len; diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index 4e510cbe0a89f011d6f5af6db22846146702c1e8..be59d66585d6d275e2d7d37bc54781769f350ef1 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -188,27 +188,27 @@ bool rsi_is_cipher_wep(struct rsi_common *common) * @adapter: Pointer to the adapter structure. * @band: Operating band to be set. * - * Return: None. + * Return: int - 0 on success, negative error on failure. */ -static void rsi_register_rates_channels(struct rsi_hw *adapter, int band) +static int rsi_register_rates_channels(struct rsi_hw *adapter, int band) { struct ieee80211_supported_band *sbands = &adapter->sbands[band]; void *channels = NULL; if (band == NL80211_BAND_2GHZ) { - channels = kmalloc(sizeof(rsi_2ghz_channels), GFP_KERNEL); - memcpy(channels, - rsi_2ghz_channels, - sizeof(rsi_2ghz_channels)); + channels = kmemdup(rsi_2ghz_channels, sizeof(rsi_2ghz_channels), + GFP_KERNEL); + if (!channels) + return -ENOMEM; sbands->band = NL80211_BAND_2GHZ; sbands->n_channels = ARRAY_SIZE(rsi_2ghz_channels); sbands->bitrates = rsi_rates; sbands->n_bitrates = ARRAY_SIZE(rsi_rates); } else { - channels = kmalloc(sizeof(rsi_5ghz_channels), GFP_KERNEL); - memcpy(channels, - rsi_5ghz_channels, - sizeof(rsi_5ghz_channels)); + channels = kmemdup(rsi_5ghz_channels, sizeof(rsi_5ghz_channels), + GFP_KERNEL); + if (!channels) + return -ENOMEM; sbands->band = NL80211_BAND_5GHZ; sbands->n_channels = ARRAY_SIZE(rsi_5ghz_channels); sbands->bitrates = &rsi_rates[4]; @@ -227,6 +227,7 @@ static void rsi_register_rates_channels(struct rsi_hw *adapter, int band) sbands->ht_cap.mcs.rx_mask[0] = 0xff; sbands->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; /* sbands->ht_cap.mcs.rx_highest = 0x82; */ + return 0; } /** @@ -1985,11 +1986,16 @@ int rsi_mac80211_attach(struct rsi_common *common) wiphy->available_antennas_rx = 1; wiphy->available_antennas_tx = 1; - rsi_register_rates_channels(adapter, NL80211_BAND_2GHZ); + status = rsi_register_rates_channels(adapter, NL80211_BAND_2GHZ); + if (status) + return status; wiphy->bands[NL80211_BAND_2GHZ] = &adapter->sbands[NL80211_BAND_2GHZ]; if (common->num_supp_bands > 1) { - rsi_register_rates_channels(adapter, NL80211_BAND_5GHZ); + status = rsi_register_rates_channels(adapter, + NL80211_BAND_5GHZ); + if (status) + return status; wiphy->bands[NL80211_BAND_5GHZ] = &adapter->sbands[NL80211_BAND_5GHZ]; } diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index 1095df7d957350f20b218bd32c8dadb30d3d9153..1a3a5235cfb8d4ae0c86a7f6959431f886efc126 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -1583,6 +1583,7 @@ static int rsi_send_beacon(struct rsi_common *common) skb_pull(skb, (64 - dword_align_bytes)); if (rsi_prepare_beacon(common, skb)) { rsi_dbg(ERR_ZONE, "Failed to prepare beacon\n"); + dev_kfree_skb(skb); return -EINVAL; } skb_queue_tail(&common->tx_queue[MGMT_BEACON_Q], skb); diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index c0a163e404029ce6f3cbb10100d2cf02ba7c0492..14e56bee05484c33bc88db5f9ecfdf533684487a 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -266,15 +266,17 @@ static void rsi_rx_done_handler(struct urb *urb) if (urb->status) goto out; - if (urb->actual_length <= 0) { - rsi_dbg(INFO_ZONE, "%s: Zero length packet\n", __func__); + if (urb->actual_length <= 0 || + urb->actual_length > rx_cb->rx_skb->len) { + rsi_dbg(INFO_ZONE, "%s: Invalid packet length = %d\n", + __func__, urb->actual_length); goto out; } if (skb_queue_len(&dev->rx_q) >= RSI_MAX_RX_PKTS) { rsi_dbg(INFO_ZONE, "Max RX packets reached\n"); goto out; } - skb_put(rx_cb->rx_skb, urb->actual_length); + skb_trim(rx_cb->rx_skb, urb->actual_length); skb_queue_tail(&dev->rx_q, rx_cb->rx_skb); rsi_set_event(&dev->rx_thread.event); @@ -308,6 +310,7 @@ static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num) if (!skb) return -ENOMEM; skb_reserve(skb, MAX_DWORD_ALIGN_BYTES); + skb_put(skb, RSI_MAX_RX_USB_PKT_SIZE - MAX_DWORD_ALIGN_BYTES); dword_align_bytes = (unsigned long)skb->data & 0x3f; if (dword_align_bytes > 0) skb_push(skb, dword_align_bytes); @@ -319,7 +322,7 @@ static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num) usb_rcvbulkpipe(dev->usbdev, dev->bulkin_endpoint_addr[ep_num - 1]), urb->transfer_buffer, - RSI_MAX_RX_USB_PKT_SIZE, + skb->len, rsi_rx_done_handler, rx_cb); @@ -640,7 +643,6 @@ static int rsi_init_usb_interface(struct rsi_hw *adapter, kfree(rsi_dev->tx_buffer); fail_eps: - kfree(rsi_dev); return status; } diff --git a/drivers/net/wireless/rsi/rsi_common.h b/drivers/net/wireless/rsi/rsi_common.h index d9ff3b8be86ee19156ed3bb7c4d322a0aeea8515..60f1f286b030121cb38424b652bb840ed7661f26 100644 --- a/drivers/net/wireless/rsi/rsi_common.h +++ b/drivers/net/wireless/rsi/rsi_common.h @@ -75,7 +75,6 @@ static inline int rsi_kill_thread(struct rsi_thread *handle) atomic_inc(&handle->thread_done); rsi_set_event(&handle->event); - wait_for_completion(&handle->completion); return kthread_stop(handle->task); } diff --git a/drivers/net/wireless/st/cw1200/main.c b/drivers/net/wireless/st/cw1200/main.c index 90dc979f260b600c2aad6683122c1b9ba5aa6984..c1608f0bf6d017d9fc5be100dfd5a25ce3054599 100644 --- a/drivers/net/wireless/st/cw1200/main.c +++ b/drivers/net/wireless/st/cw1200/main.c @@ -345,6 +345,11 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr, mutex_init(&priv->wsm_cmd_mux); mutex_init(&priv->conf_mutex); priv->workqueue = create_singlethread_workqueue("cw1200_wq"); + if (!priv->workqueue) { + ieee80211_free_hw(hw); + return NULL; + } + sema_init(&priv->scan.lock, 1); INIT_WORK(&priv->scan.work, cw1200_scan_work); INIT_DELAYED_WORK(&priv->scan.probe_work, cw1200_probe_work); diff --git a/drivers/net/wireless/st/cw1200/scan.c b/drivers/net/wireless/st/cw1200/scan.c index 67213f11acbd2e11f0ed85fee5d77003c20fae55..71e9b91cf15bcea42ab10ade66578a591dd65e2d 100644 --- a/drivers/net/wireless/st/cw1200/scan.c +++ b/drivers/net/wireless/st/cw1200/scan.c @@ -78,27 +78,30 @@ int cw1200_hw_scan(struct ieee80211_hw *hw, if (req->n_ssids > WSM_SCAN_MAX_NUM_OF_SSIDS) return -EINVAL; + /* will be unlocked in cw1200_scan_work() */ + down(&priv->scan.lock); + mutex_lock(&priv->conf_mutex); + frame.skb = ieee80211_probereq_get(hw, priv->vif->addr, NULL, 0, req->ie_len); - if (!frame.skb) + if (!frame.skb) { + mutex_unlock(&priv->conf_mutex); + up(&priv->scan.lock); return -ENOMEM; + } if (req->ie_len) skb_put_data(frame.skb, req->ie, req->ie_len); - /* will be unlocked in cw1200_scan_work() */ - down(&priv->scan.lock); - mutex_lock(&priv->conf_mutex); - ret = wsm_set_template_frame(priv, &frame); if (!ret) { /* Host want to be the probe responder. */ ret = wsm_set_probe_responder(priv, true); } if (ret) { + dev_kfree_skb(frame.skb); mutex_unlock(&priv->conf_mutex); up(&priv->scan.lock); - dev_kfree_skb(frame.skb); return ret; } @@ -120,10 +123,9 @@ int cw1200_hw_scan(struct ieee80211_hw *hw, ++priv->scan.n_ssids; } - mutex_unlock(&priv->conf_mutex); - if (frame.skb) dev_kfree_skb(frame.skb); + mutex_unlock(&priv->conf_mutex); queue_work(priv->workqueue, &priv->scan.work); return 0; } diff --git a/drivers/net/wireless/ti/wl1251/cmd.c b/drivers/net/wireless/ti/wl1251/cmd.c index 9547aea01b0fbd3be677a7b524d287f444af826f..ea0215246c5c835239f3af77334b98b06fda102f 100644 --- a/drivers/net/wireless/ti/wl1251/cmd.c +++ b/drivers/net/wireless/ti/wl1251/cmd.c @@ -466,9 +466,12 @@ int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len, cmd->channels[i].channel = channels[i]->hw_value; } - cmd->params.ssid_len = ssid_len; - if (ssid) - memcpy(cmd->params.ssid, ssid, ssid_len); + if (ssid) { + int len = clamp_val(ssid_len, 0, IEEE80211_MAX_SSID_LEN); + + cmd->params.ssid_len = len; + memcpy(cmd->params.ssid, ssid, len); + } ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd)); if (ret < 0) { diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 89b0d0fade9f2bcf9594d2a892b155089d4425ab..2ca5658bbc2abef5a3699b0d191ed3dd05df3777 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -957,6 +957,8 @@ static void wl1271_recovery_work(struct work_struct *work) BUG_ON(wl->conf.recovery.bug_on_recovery && !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)); + clear_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags); + if (wl->conf.recovery.no_recovery) { wl1271_info("No recovery (chosen on module load). Fw will remain stuck."); goto out_unlock; @@ -1082,8 +1084,11 @@ static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt) goto out; ret = wl12xx_fetch_firmware(wl, plt); - if (ret < 0) - goto out; + if (ret < 0) { + kfree(wl->fw_status); + kfree(wl->raw_fw_status); + kfree(wl->tx_res_if); + } out: return ret; @@ -6710,6 +6715,7 @@ static int __maybe_unused wlcore_runtime_resume(struct device *dev) int ret; unsigned long start_time = jiffies; bool pending = false; + bool recovery = false; /* Nothing to do if no ELP mode requested */ if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags)) @@ -6726,7 +6732,7 @@ static int __maybe_unused wlcore_runtime_resume(struct device *dev) ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP); if (ret < 0) { - wl12xx_queue_recovery_work(wl); + recovery = true; goto err; } @@ -6734,11 +6740,12 @@ static int __maybe_unused wlcore_runtime_resume(struct device *dev) ret = wait_for_completion_timeout(&compl, msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT)); if (ret == 0) { - wl1271_error("ELP wakeup timeout!"); - wl12xx_queue_recovery_work(wl); + wl1271_warning("ELP wakeup timeout!"); /* Return no error for runtime PM for recovery */ - return 0; + ret = 0; + recovery = true; + goto err; } } @@ -6753,6 +6760,12 @@ static int __maybe_unused wlcore_runtime_resume(struct device *dev) spin_lock_irqsave(&wl->wl_lock, flags); wl->elp_compl = NULL; spin_unlock_irqrestore(&wl->wl_lock, flags); + + if (recovery) { + set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags); + wl12xx_queue_recovery_work(wl); + } + return ret; } diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index 750bea3574ee4e994b01f9e49a8046e1afa33211..627df164b7b6d3f718b4be28c5c933e97fbbaecd 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c @@ -164,6 +164,12 @@ static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue) } sdio_claim_host(func); + /* + * To guarantee that the SDIO card is power cycled, as required to make + * the FW programming to succeed, let's do a brute force HW reset. + */ + mmc_hw_reset(card->host); + sdio_enable_func(func); sdio_release_host(func); @@ -174,20 +180,13 @@ static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue) { struct sdio_func *func = dev_to_sdio_func(glue->dev); struct mmc_card *card = func->card; - int error; sdio_claim_host(func); sdio_disable_func(func); sdio_release_host(func); /* Let runtime PM know the card is powered off */ - error = pm_runtime_put(&card->dev); - if (error < 0 && error != -EBUSY) { - dev_err(&card->dev, "%s failed: %i\n", __func__, error); - - return error; - } - + pm_runtime_put(&card->dev); return 0; } diff --git a/drivers/net/wireless/ti/wlcore/vendor_cmd.c b/drivers/net/wireless/ti/wlcore/vendor_cmd.c index dbe78d8491effa32a3356d8a4cd238de5ae79d88..7f34ec077ee57000e49fc094042ceb834c680ff2 100644 --- a/drivers/net/wireless/ti/wlcore/vendor_cmd.c +++ b/drivers/net/wireless/ti/wlcore/vendor_cmd.c @@ -70,7 +70,7 @@ wlcore_vendor_cmd_smart_config_start(struct wiphy *wiphy, out: mutex_unlock(&wl->mutex); - return 0; + return ret; } static int diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 936c0b3e0ba28ec1f6586a5bab86d403a86d0dd6..4d9bf2efc88ed894ffd34d70e238a5309d63406d 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -140,6 +140,20 @@ struct xenvif_queue { /* Per-queue data for xenvif */ char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */ struct xenvif *vif; /* Parent VIF */ + /* + * TX/RX common EOI handling. + * When feature-split-event-channels = 0, interrupt handler sets + * NETBK_COMMON_EOI, otherwise NETBK_RX_EOI and NETBK_TX_EOI are set + * by the RX and TX interrupt handlers. + * RX and TX handler threads will issue an EOI when either + * NETBK_COMMON_EOI or their specific bits (NETBK_RX_EOI or + * NETBK_TX_EOI) are set and they will reset those bits. + */ + atomic_t eoi_pending; +#define NETBK_RX_EOI 0x01 +#define NETBK_TX_EOI 0x02 +#define NETBK_COMMON_EOI 0x04 + /* Use NAPI for guest TX */ struct napi_struct napi; /* When feature-split-event-channels = 0, tx_irq = rx_irq. */ @@ -189,6 +203,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */ unsigned int rx_queue_max; unsigned int rx_queue_len; unsigned long last_rx_time; + unsigned int rx_slots_needed; bool stalled; struct xenvif_copy_state rx_copy; @@ -357,8 +372,9 @@ int xenvif_dealloc_kthread(void *data); irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data); +bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread); void xenvif_rx_action(struct xenvif_queue *queue); -void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb); +bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb); void xenvif_carrier_on(struct xenvif *vif); diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c index 0ccb021f1e78687d7c7a9814a05369aafe7c6508..bb7545ab9cd1522568939becdaf4239097a21e61 100644 --- a/drivers/net/xen-netback/hash.c +++ b/drivers/net/xen-netback/hash.c @@ -94,7 +94,7 @@ static u32 xenvif_new_hash(struct xenvif *vif, const u8 *data, static void xenvif_flush_hash(struct xenvif *vif) { - struct xenvif_hash_cache_entry *entry; + struct xenvif_hash_cache_entry *entry, *n; unsigned long flags; if (xenvif_hash_cache_size == 0) @@ -102,7 +102,7 @@ static void xenvif_flush_hash(struct xenvif *vif) spin_lock_irqsave(&vif->hash.cache.lock, flags); - list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) { + list_for_each_entry_safe(entry, n, &vif->hash.cache.list, link) { list_del_rcu(&entry->link); vif->hash.cache.count--; kfree_rcu(entry, rcu); @@ -454,6 +454,8 @@ void xenvif_init_hash(struct xenvif *vif) if (xenvif_hash_cache_size == 0) return; + BUG_ON(vif->hash.cache.count); + spin_lock_init(&vif->hash.cache.lock); INIT_LIST_HEAD(&vif->hash.cache.list); } diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index f6ae23fc3f6b086e60149befd9a3ca9500a48bf1..058c132f24889b819056632a58b96e9c177dddec 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -77,12 +77,28 @@ int xenvif_schedulable(struct xenvif *vif) !vif->disabled; } +static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue) +{ + bool rc; + + rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx); + if (rc) + napi_schedule(&queue->napi); + return rc; +} + static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) { struct xenvif_queue *queue = dev_id; + int old; - if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)) - napi_schedule(&queue->napi); + old = atomic_fetch_or(NETBK_TX_EOI, &queue->eoi_pending); + WARN(old & NETBK_TX_EOI, "Interrupt while EOI pending\n"); + + if (!xenvif_handle_tx_interrupt(queue)) { + atomic_andnot(NETBK_TX_EOI, &queue->eoi_pending); + xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS); + } return IRQ_HANDLED; } @@ -116,19 +132,46 @@ static int xenvif_poll(struct napi_struct *napi, int budget) return work_done; } +static bool xenvif_handle_rx_interrupt(struct xenvif_queue *queue) +{ + bool rc; + + rc = xenvif_have_rx_work(queue, false); + if (rc) + xenvif_kick_thread(queue); + return rc; +} + static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) { struct xenvif_queue *queue = dev_id; + int old; - xenvif_kick_thread(queue); + old = atomic_fetch_or(NETBK_RX_EOI, &queue->eoi_pending); + WARN(old & NETBK_RX_EOI, "Interrupt while EOI pending\n"); + + if (!xenvif_handle_rx_interrupt(queue)) { + atomic_andnot(NETBK_RX_EOI, &queue->eoi_pending); + xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS); + } return IRQ_HANDLED; } irqreturn_t xenvif_interrupt(int irq, void *dev_id) { - xenvif_tx_interrupt(irq, dev_id); - xenvif_rx_interrupt(irq, dev_id); + struct xenvif_queue *queue = dev_id; + int old; + + old = atomic_fetch_or(NETBK_COMMON_EOI, &queue->eoi_pending); + WARN(old, "Interrupt while EOI pending\n"); + + /* Use bitwise or as we need to call both functions. */ + if ((!xenvif_handle_tx_interrupt(queue) | + !xenvif_handle_rx_interrupt(queue))) { + atomic_andnot(NETBK_COMMON_EOI, &queue->eoi_pending); + xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS); + } return IRQ_HANDLED; } @@ -153,6 +196,13 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb, { struct xenvif *vif = netdev_priv(dev); unsigned int size = vif->hash.size; + unsigned int num_queues; + + /* If queues are not set up internally - always return 0 + * as the packet going to be dropped anyway */ + num_queues = READ_ONCE(vif->num_queues); + if (num_queues < 1) + return 0; if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) return fallback(dev, skb, NULL) % dev->real_num_tx_queues; @@ -166,7 +216,8 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb, [skb_get_hash_raw(skb) % size]; } -static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct xenvif *vif = netdev_priv(dev); struct xenvif_queue *queue = NULL; @@ -216,14 +267,16 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) skb_clear_hash(skb); - xenvif_rx_queue_tail(queue, skb); + if (!xenvif_rx_queue_tail(queue, skb)) + goto drop; + xenvif_kick_thread(queue); return NETDEV_TX_OK; drop: vif->dev->stats.tx_dropped++; - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -587,7 +640,7 @@ int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref, shared = (struct xen_netif_ctrl_sring *)addr; BACK_RING_INIT(&vif->ctrl, shared, XEN_PAGE_SIZE); - err = bind_interdomain_evtchn_to_irq(vif->domid, evtchn); + err = bind_interdomain_evtchn_to_irq_lateeoi(vif->domid, evtchn); if (err < 0) goto err_unmap; @@ -645,7 +698,7 @@ int xenvif_connect_data(struct xenvif_queue *queue, if (tx_evtchn == rx_evtchn) { /* feature-split-event-channels == 0 */ - err = bind_interdomain_evtchn_to_irqhandler( + err = bind_interdomain_evtchn_to_irqhandler_lateeoi( queue->vif->domid, tx_evtchn, xenvif_interrupt, 0, queue->name, queue); if (err < 0) @@ -656,7 +709,7 @@ int xenvif_connect_data(struct xenvif_queue *queue, /* feature-split-event-channels == 1 */ snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), "%s-tx", queue->name); - err = bind_interdomain_evtchn_to_irqhandler( + err = bind_interdomain_evtchn_to_irqhandler_lateeoi( queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0, queue->tx_irq_name, queue); if (err < 0) @@ -666,7 +719,7 @@ int xenvif_connect_data(struct xenvif_queue *queue, snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name), "%s-rx", queue->name); - err = bind_interdomain_evtchn_to_irqhandler( + err = bind_interdomain_evtchn_to_irqhandler_lateeoi( queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0, queue->rx_irq_name, queue); if (err < 0) @@ -711,7 +764,6 @@ int xenvif_connect_data(struct xenvif_queue *queue, xenvif_unmap_frontend_data_rings(queue); netif_napi_del(&queue->napi); err: - module_put(THIS_MODULE); return err; } diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 3621e05a7494c43a94084fa79a6959fa428f64ec..850e8fdef34d1ef807a83b6d0210223dc1b9f3f7 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -162,6 +162,10 @@ void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue) if (more_to_do) napi_schedule(&queue->napi); + else if (atomic_fetch_andnot(NETBK_TX_EOI | NETBK_COMMON_EOI, + &queue->eoi_pending) & + (NETBK_TX_EOI | NETBK_COMMON_EOI)) + xen_irq_lateeoi(queue->tx_irq, 0); } static void tx_add_credit(struct xenvif_queue *queue) @@ -319,10 +323,13 @@ static int xenvif_count_requests(struct xenvif_queue *queue, struct xenvif_tx_cb { - u16 pending_idx; + u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1]; + u8 copy_count; }; #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb) +#define copy_pending_idx(skb, i) (XENVIF_TX_CB(skb)->copy_pending_idx[i]) +#define copy_count(skb) (XENVIF_TX_CB(skb)->copy_count) static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue, u16 pending_idx, @@ -357,52 +364,152 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size) return skb; } -static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue, - struct sk_buff *skb, - struct xen_netif_tx_request *txp, - struct gnttab_map_grant_ref *gop, - unsigned int frag_overflow, - struct sk_buff *nskb) +static void xenvif_get_requests(struct xenvif_queue *queue, + struct sk_buff *skb, + struct xen_netif_tx_request *first, + struct xen_netif_tx_request *txfrags, + unsigned *copy_ops, + unsigned *map_ops, + unsigned int frag_overflow, + struct sk_buff *nskb, + unsigned int extra_count, + unsigned int data_len) { struct skb_shared_info *shinfo = skb_shinfo(skb); skb_frag_t *frags = shinfo->frags; - u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; - int start; + u16 pending_idx; pending_ring_idx_t index; unsigned int nr_slots; + struct gnttab_copy *cop = queue->tx_copy_ops + *copy_ops; + struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops; + struct xen_netif_tx_request *txp = first; + + nr_slots = shinfo->nr_frags + frag_overflow + 1; + + copy_count(skb) = 0; + + /* Create copy ops for exactly data_len bytes into the skb head. */ + __skb_put(skb, data_len); + while (data_len > 0) { + int amount = data_len > txp->size ? txp->size : data_len; + + cop->source.u.ref = txp->gref; + cop->source.domid = queue->vif->domid; + cop->source.offset = txp->offset; + + cop->dest.domid = DOMID_SELF; + cop->dest.offset = (offset_in_page(skb->data + + skb_headlen(skb) - + data_len)) & ~XEN_PAGE_MASK; + cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb) + - data_len); + + cop->len = amount; + cop->flags = GNTCOPY_source_gref; + + index = pending_index(queue->pending_cons); + pending_idx = queue->pending_ring[index]; + callback_param(queue, pending_idx).ctx = NULL; + copy_pending_idx(skb, copy_count(skb)) = pending_idx; + copy_count(skb)++; + + cop++; + data_len -= amount; + + if (amount == txp->size) { + /* The copy op covered the full tx_request */ + + memcpy(&queue->pending_tx_info[pending_idx].req, + txp, sizeof(*txp)); + queue->pending_tx_info[pending_idx].extra_count = + (txp == first) ? extra_count : 0; + + if (txp == first) + txp = txfrags; + else + txp++; + queue->pending_cons++; + nr_slots--; + } else { + /* The copy op partially covered the tx_request. + * The remainder will be mapped. + */ + txp->offset += amount; + txp->size -= amount; + } + } - nr_slots = shinfo->nr_frags; + for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS; + nr_slots--) { + if (unlikely(!txp->size)) { + unsigned long flags; - /* Skip first skb fragment if it is on same page as header fragment. */ - start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); + spin_lock_irqsave(&queue->response_lock, flags); + make_tx_response(queue, txp, 0, XEN_NETIF_RSP_OKAY); + push_tx_responses(queue); + spin_unlock_irqrestore(&queue->response_lock, flags); + ++txp; + continue; + } - for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots; - shinfo->nr_frags++, txp++, gop++) { index = pending_index(queue->pending_cons++); pending_idx = queue->pending_ring[index]; - xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop); + xenvif_tx_create_map_op(queue, pending_idx, txp, + txp == first ? extra_count : 0, gop); frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); + ++shinfo->nr_frags; + ++gop; + + if (txp == first) + txp = txfrags; + else + txp++; } - if (frag_overflow) { + if (nr_slots > 0) { shinfo = skb_shinfo(nskb); frags = shinfo->frags; - for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow; - shinfo->nr_frags++, txp++, gop++) { + for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; ++txp) { + if (unlikely(!txp->size)) { + unsigned long flags; + + spin_lock_irqsave(&queue->response_lock, flags); + make_tx_response(queue, txp, 0, + XEN_NETIF_RSP_OKAY); + push_tx_responses(queue); + spin_unlock_irqrestore(&queue->response_lock, + flags); + continue; + } + index = pending_index(queue->pending_cons++); pending_idx = queue->pending_ring[index]; xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop); frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); + ++shinfo->nr_frags; + ++gop; } - skb_shinfo(skb)->frag_list = nskb; + if (shinfo->nr_frags) { + skb_shinfo(skb)->frag_list = nskb; + nskb = NULL; + } } - return gop; + if (nskb) { + /* A frag_list skb was allocated but it is no longer needed + * because enough slots were converted to copy ops above or some + * were empty. + */ + kfree_skb(nskb); + } + + (*copy_ops) = cop - queue->tx_copy_ops; + (*map_ops) = gop - queue->tx_map_ops; } static inline void xenvif_grant_handle_set(struct xenvif_queue *queue, @@ -438,7 +545,7 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue, struct gnttab_copy **gopp_copy) { struct gnttab_map_grant_ref *gop_map = *gopp_map; - u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; + u16 pending_idx; /* This always points to the shinfo of the skb being checked, which * could be either the first or the one on the frag_list */ @@ -449,24 +556,37 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue, struct skb_shared_info *first_shinfo = NULL; int nr_frags = shinfo->nr_frags; const bool sharedslot = nr_frags && - frag_get_pending_idx(&shinfo->frags[0]) == pending_idx; - int i, err; + frag_get_pending_idx(&shinfo->frags[0]) == + copy_pending_idx(skb, copy_count(skb) - 1); + int i, err = 0; - /* Check status of header. */ - err = (*gopp_copy)->status; - if (unlikely(err)) { - if (net_ratelimit()) - netdev_dbg(queue->vif->dev, - "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n", - (*gopp_copy)->status, - pending_idx, - (*gopp_copy)->source.u.ref); - /* The first frag might still have this slot mapped */ - if (!sharedslot) - xenvif_idx_release(queue, pending_idx, - XEN_NETIF_RSP_ERROR); + for (i = 0; i < copy_count(skb); i++) { + int newerr; + + /* Check status of header. */ + pending_idx = copy_pending_idx(skb, i); + + newerr = (*gopp_copy)->status; + if (likely(!newerr)) { + /* The first frag might still have this slot mapped */ + if (i < copy_count(skb) - 1 || !sharedslot) + xenvif_idx_release(queue, pending_idx, + XEN_NETIF_RSP_OKAY); + } else { + err = newerr; + if (net_ratelimit()) + netdev_dbg(queue->vif->dev, + "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n", + (*gopp_copy)->status, + pending_idx, + (*gopp_copy)->source.u.ref); + /* The first frag might still have this slot mapped */ + if (i < copy_count(skb) - 1 || !sharedslot) + xenvif_idx_release(queue, pending_idx, + XEN_NETIF_RSP_ERROR); + } + (*gopp_copy)++; } - (*gopp_copy)++; check_frags: for (i = 0; i < nr_frags; i++, gop_map++) { @@ -513,14 +633,6 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue, if (err) continue; - /* First error: if the header haven't shared a slot with the - * first frag, release it as well. - */ - if (!sharedslot) - xenvif_idx_release(queue, - XENVIF_TX_CB(skb)->pending_idx, - XEN_NETIF_RSP_OKAY); - /* Invalidate preceding fragments of this skb. */ for (j = 0; j < i; j++) { pending_idx = frag_get_pending_idx(&shinfo->frags[j]); @@ -790,7 +902,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, unsigned *copy_ops, unsigned *map_ops) { - struct gnttab_map_grant_ref *gop = queue->tx_map_ops; struct sk_buff *skb, *nskb; int ret; unsigned int frag_overflow; @@ -872,8 +983,12 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, continue; } + data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN) ? + XEN_NETBACK_TX_COPY_LEN : txreq.size; + ret = xenvif_count_requests(queue, &txreq, extra_count, txfrags, work_to_do); + if (unlikely(ret < 0)) break; @@ -899,9 +1014,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, index = pending_index(queue->pending_cons); pending_idx = queue->pending_ring[index]; - data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN && - ret < XEN_NETBK_LEGACY_SLOTS_MAX) ? - XEN_NETBACK_TX_COPY_LEN : txreq.size; + if (ret >= XEN_NETBK_LEGACY_SLOTS_MAX - 1 && data_len < txreq.size) + data_len = txreq.size; skb = xenvif_alloc_skb(data_len); if (unlikely(skb == NULL)) { @@ -912,8 +1026,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, } skb_shinfo(skb)->nr_frags = ret; - if (data_len < txreq.size) - skb_shinfo(skb)->nr_frags++; /* At this point shinfo->nr_frags is in fact the number of * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX. */ @@ -925,6 +1037,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS; nskb = xenvif_alloc_skb(0); if (unlikely(nskb == NULL)) { + skb_shinfo(skb)->nr_frags = 0; kfree_skb(skb); xenvif_tx_err(queue, &txreq, extra_count, idx); if (net_ratelimit()) @@ -940,6 +1053,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, if (xenvif_set_skb_gso(queue->vif, skb, gso)) { /* Failure in xenvif_set_skb_gso is fatal. */ + skb_shinfo(skb)->nr_frags = 0; kfree_skb(skb); kfree_skb(nskb); break; @@ -973,54 +1087,19 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, type); } - XENVIF_TX_CB(skb)->pending_idx = pending_idx; - - __skb_put(skb, data_len); - queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref; - queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid; - queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset; - - queue->tx_copy_ops[*copy_ops].dest.u.gmfn = - virt_to_gfn(skb->data); - queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF; - queue->tx_copy_ops[*copy_ops].dest.offset = - offset_in_page(skb->data) & ~XEN_PAGE_MASK; - - queue->tx_copy_ops[*copy_ops].len = data_len; - queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref; - - (*copy_ops)++; - - if (data_len < txreq.size) { - frag_set_pending_idx(&skb_shinfo(skb)->frags[0], - pending_idx); - xenvif_tx_create_map_op(queue, pending_idx, &txreq, - extra_count, gop); - gop++; - } else { - frag_set_pending_idx(&skb_shinfo(skb)->frags[0], - INVALID_PENDING_IDX); - memcpy(&queue->pending_tx_info[pending_idx].req, - &txreq, sizeof(txreq)); - queue->pending_tx_info[pending_idx].extra_count = - extra_count; - } - - queue->pending_cons++; - - gop = xenvif_get_requests(queue, skb, txfrags, gop, - frag_overflow, nskb); + xenvif_get_requests(queue, skb, &txreq, txfrags, copy_ops, + map_ops, frag_overflow, nskb, extra_count, + data_len); __skb_queue_tail(&queue->tx_queue, skb); queue->tx.req_cons = idx; - if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) || + if ((*map_ops >= ARRAY_SIZE(queue->tx_map_ops)) || (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops))) break; } - (*map_ops) = gop - queue->tx_map_ops; return; } @@ -1072,11 +1151,6 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s skb_frag_size_set(&frags[i], len); } - /* Copied all the bits from the frag list -- free it. */ - skb_frag_list_init(skb); - xenvif_skb_zerocopy_prepare(queue, nskb); - kfree_skb(nskb); - /* Release all the original (foreign) frags. */ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) skb_frag_unref(skb, f); @@ -1104,9 +1178,8 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) { struct xen_netif_tx_request *txp; u16 pending_idx; - unsigned data_len; - pending_idx = XENVIF_TX_CB(skb)->pending_idx; + pending_idx = copy_pending_idx(skb, 0); txp = &queue->pending_tx_info[pending_idx].req; /* Check the remap error code. */ @@ -1125,18 +1198,6 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) continue; } - data_len = skb->len; - callback_param(queue, pending_idx).ctx = NULL; - if (data_len < txp->size) { - /* Append the packet payload as a fragment. */ - txp->offset += data_len; - txp->size -= data_len; - } else { - /* Schedule a response immediately. */ - xenvif_idx_release(queue, pending_idx, - XEN_NETIF_RSP_OKAY); - } - if (txp->flags & XEN_NETTXF_csum_blank) skb->ip_summed = CHECKSUM_PARTIAL; else if (txp->flags & XEN_NETTXF_data_validated) @@ -1145,6 +1206,8 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) xenvif_fill_frags(queue, skb); if (unlikely(skb_has_frag_list(skb))) { + struct sk_buff *nskb = skb_shinfo(skb)->frag_list; + xenvif_skb_zerocopy_prepare(queue, nskb); if (xenvif_handle_frag_list(queue, skb)) { if (net_ratelimit()) netdev_err(queue->vif->dev, @@ -1153,6 +1216,9 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) kfree_skb(skb); continue; } + /* Copied all the bits from the frag list -- free it. */ + skb_frag_list_init(skb); + kfree_skb(nskb); } skb->dev = queue->vif->dev; @@ -1308,7 +1374,7 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue) /* Called after netfront has transmitted */ int xenvif_tx_action(struct xenvif_queue *queue, int budget) { - unsigned nr_mops, nr_cops = 0; + unsigned nr_mops = 0, nr_cops = 0; int work_done, ret; if (unlikely(!tx_work_todo(queue))) @@ -1325,7 +1391,15 @@ int xenvif_tx_action(struct xenvif_queue *queue, int budget) NULL, queue->pages_to_map, nr_mops); - BUG_ON(ret); + if (ret) { + unsigned int i; + + netdev_err(queue->vif->dev, "Map fail: nr %u ret %d\n", + nr_mops, ret); + for (i = 0; i < nr_mops; ++i) + WARN_ON_ONCE(queue->tx_map_ops[i].status == + GNTST_okay); + } } work_done = xenvif_tx_submit(queue); @@ -1611,9 +1685,14 @@ static bool xenvif_ctrl_work_todo(struct xenvif *vif) irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data) { struct xenvif *vif = data; + unsigned int eoi_flag = XEN_EOI_FLAG_SPURIOUS; - while (xenvif_ctrl_work_todo(vif)) + while (xenvif_ctrl_work_todo(vif)) { xenvif_ctrl_action(vif); + eoi_flag = 0; + } + + xen_irq_lateeoi(irq, eoi_flag); return IRQ_HANDLED; } diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c index ef5887037b225251cfa77ccac587f79ad338dd9d..57f1b1a72d35c2a5eb648ae3180c69aeb0e49335 100644 --- a/drivers/net/xen-netback/rx.c +++ b/drivers/net/xen-netback/rx.c @@ -33,22 +33,37 @@ #include #include +/* + * Update the needed ring page slots for the first SKB queued. + * Note that any call sequence outside the RX thread calling this function + * needs to wake up the RX thread via a call of xenvif_kick_thread() + * afterwards in order to avoid a race with putting the thread to sleep. + */ +static void xenvif_update_needed_slots(struct xenvif_queue *queue, + const struct sk_buff *skb) +{ + unsigned int needed = 0; + + if (skb) { + needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE); + if (skb_is_gso(skb)) + needed++; + if (skb->sw_hash) + needed++; + } + + WRITE_ONCE(queue->rx_slots_needed, needed); +} + static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) { RING_IDX prod, cons; - struct sk_buff *skb; - int needed; + unsigned int needed; - skb = skb_peek(&queue->rx_queue); - if (!skb) + needed = READ_ONCE(queue->rx_slots_needed); + if (!needed) return false; - needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE); - if (skb_is_gso(skb)) - needed++; - if (skb->sw_hash) - needed++; - do { prod = queue->rx.sring->req_prod; cons = queue->rx.req_cons; @@ -67,22 +82,30 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) return false; } -void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) +bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) { unsigned long flags; + bool ret = true; spin_lock_irqsave(&queue->rx_queue.lock, flags); - __skb_queue_tail(&queue->rx_queue, skb); - - queue->rx_queue_len += skb->len; - if (queue->rx_queue_len > queue->rx_queue_max) { + if (queue->rx_queue_len >= queue->rx_queue_max) { struct net_device *dev = queue->vif->dev; netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); + ret = false; + } else { + if (skb_queue_empty(&queue->rx_queue)) + xenvif_update_needed_slots(queue, skb); + + __skb_queue_tail(&queue->rx_queue, skb); + + queue->rx_queue_len += skb->len; } spin_unlock_irqrestore(&queue->rx_queue.lock, flags); + + return ret; } static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue) @@ -93,6 +116,8 @@ static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue) skb = __skb_dequeue(&queue->rx_queue); if (skb) { + xenvif_update_needed_slots(queue, skb_peek(&queue->rx_queue)); + queue->rx_queue_len -= skb->len; if (queue->rx_queue_len < queue->rx_queue_max) { struct netdev_queue *txq; @@ -127,6 +152,7 @@ static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue) break; xenvif_rx_dequeue(queue); kfree_skb(skb); + queue->vif->dev->stats.rx_dropped++; } } @@ -467,36 +493,40 @@ void xenvif_rx_action(struct xenvif_queue *queue) xenvif_rx_copy_flush(queue); } -static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue) +static RING_IDX xenvif_rx_queue_slots(const struct xenvif_queue *queue) { RING_IDX prod, cons; prod = queue->rx.sring->req_prod; cons = queue->rx.req_cons; + return prod - cons; +} + +static bool xenvif_rx_queue_stalled(const struct xenvif_queue *queue) +{ + unsigned int needed = READ_ONCE(queue->rx_slots_needed); + return !queue->stalled && - prod - cons < 1 && + xenvif_rx_queue_slots(queue) < needed && time_after(jiffies, queue->last_rx_time + queue->vif->stall_timeout); } static bool xenvif_rx_queue_ready(struct xenvif_queue *queue) { - RING_IDX prod, cons; + unsigned int needed = READ_ONCE(queue->rx_slots_needed); - prod = queue->rx.sring->req_prod; - cons = queue->rx.req_cons; - - return queue->stalled && prod - cons >= 1; + return queue->stalled && xenvif_rx_queue_slots(queue) >= needed; } -static bool xenvif_have_rx_work(struct xenvif_queue *queue) +bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread) { return xenvif_rx_ring_slots_available(queue) || (queue->vif->stall_timeout && (xenvif_rx_queue_stalled(queue) || xenvif_rx_queue_ready(queue))) || - kthread_should_stop() || + (test_kthread && kthread_should_stop()) || queue->vif->disabled; } @@ -527,15 +557,20 @@ static void xenvif_wait_for_rx_work(struct xenvif_queue *queue) { DEFINE_WAIT(wait); - if (xenvif_have_rx_work(queue)) + if (xenvif_have_rx_work(queue, true)) return; for (;;) { long ret; prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE); - if (xenvif_have_rx_work(queue)) + if (xenvif_have_rx_work(queue, true)) break; + if (atomic_fetch_andnot(NETBK_RX_EOI | NETBK_COMMON_EOI, + &queue->eoi_pending) & + (NETBK_RX_EOI | NETBK_COMMON_EOI)) + xen_irq_lateeoi(queue->rx_irq, 0); + ret = schedule_timeout(xenvif_rx_queue_timeout(queue)); if (!ret) break; diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index cd51492ae6c2dfae7bf9cef66b3206b8d126dbee..107bbd4ae825e1379e4be2d2f8e3e749b1b03460 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -777,12 +777,14 @@ static int xen_register_credit_watch(struct xenbus_device *dev, return -ENOMEM; snprintf(node, maxlen, "%s/rate", dev->nodename); vif->credit_watch.node = node; + vif->credit_watch.will_handle = NULL; vif->credit_watch.callback = xen_net_rate_changed; err = register_xenbus_watch(&vif->credit_watch); if (err) { pr_err("Failed to set watcher %s\n", vif->credit_watch.node); kfree(node); vif->credit_watch.node = NULL; + vif->credit_watch.will_handle = NULL; vif->credit_watch.callback = NULL; } return err; @@ -829,6 +831,7 @@ static int xen_register_mcast_ctrl_watch(struct xenbus_device *dev, snprintf(node, maxlen, "%s/request-multicast-control", dev->otherend); vif->mcast_ctrl_watch.node = node; + vif->mcast_ctrl_watch.will_handle = NULL; vif->mcast_ctrl_watch.callback = xen_mcast_ctrl_changed; err = register_xenbus_watch(&vif->mcast_ctrl_watch); if (err) { @@ -836,6 +839,7 @@ static int xen_register_mcast_ctrl_watch(struct xenbus_device *dev, vif->mcast_ctrl_watch.node); kfree(node); vif->mcast_ctrl_watch.node = NULL; + vif->mcast_ctrl_watch.will_handle = NULL; vif->mcast_ctrl_watch.callback = NULL; } return err; @@ -1039,7 +1043,7 @@ static void connect(struct backend_info *be) xenvif_carrier_on(be->vif); unregister_hotplug_status_watch(be); - err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, + err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL, hotplug_status_changed, "%s/%s", dev->nodename, "hotplug-status"); if (!err) diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index f17f602e6171203acd39ee448c305cba719ff1de..bade7377b82d827dd5d674a72dc9d70d313bb31d 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -63,6 +63,12 @@ module_param_named(max_queues, xennet_max_queues, uint, 0644); MODULE_PARM_DESC(max_queues, "Maximum number of queues per virtual interface"); +static bool __read_mostly xennet_trusted = true; +module_param_named(trusted, xennet_trusted, bool, 0644); +MODULE_PARM_DESC(trusted, "Is the backend trusted"); + +#define XENNET_TIMEOUT (5 * HZ) + static const struct ethtool_ops xennet_ethtool_ops; struct netfront_cb { @@ -119,21 +125,17 @@ struct netfront_queue { /* * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries - * are linked from tx_skb_freelist through skb_entry.link. - * - * NB. Freelist index entries are always going to be less than - * PAGE_OFFSET, whereas pointers to skbs will always be equal or - * greater than PAGE_OFFSET: we use this property to distinguish - * them. + * are linked from tx_skb_freelist through tx_link. */ - union skb_entry { - struct sk_buff *skb; - unsigned long link; - } tx_skbs[NET_TX_RING_SIZE]; + struct sk_buff *tx_skbs[NET_TX_RING_SIZE]; + unsigned short tx_link[NET_TX_RING_SIZE]; +#define TX_LINK_NONE 0xffff +#define TX_PENDING 0xfffe grant_ref_t gref_tx_head; grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; struct page *grant_tx_page[NET_TX_RING_SIZE]; unsigned tx_skb_freelist; + unsigned int tx_pend_queue; spinlock_t rx_lock ____cacheline_aligned_in_smp; struct xen_netif_rx_front_ring rx; @@ -144,6 +146,9 @@ struct netfront_queue { struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; grant_ref_t gref_rx_head; grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; + + unsigned int rx_rsp_unconsumed; + spinlock_t rx_cons_lock; }; struct netfront_info { @@ -159,6 +164,12 @@ struct netfront_info { struct netfront_stats __percpu *rx_stats; struct netfront_stats __percpu *tx_stats; + /* Is device behaving sane? */ + bool broken; + + /* Should skbs be bounced into a zeroed buffer? */ + bool bounce; + atomic_t rx_gso_checksum_fixup; }; @@ -167,33 +178,25 @@ struct netfront_rx_info { struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; }; -static void skb_entry_set_link(union skb_entry *list, unsigned short id) -{ - list->link = id; -} - -static int skb_entry_is_link(const union skb_entry *list) -{ - BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link)); - return (unsigned long)list->skb < PAGE_OFFSET; -} - /* * Access macros for acquiring freeing slots in tx_skbs[]. */ -static void add_id_to_freelist(unsigned *head, union skb_entry *list, - unsigned short id) +static void add_id_to_list(unsigned *head, unsigned short *list, + unsigned short id) { - skb_entry_set_link(&list[id], *head); + list[id] = *head; *head = id; } -static unsigned short get_id_from_freelist(unsigned *head, - union skb_entry *list) +static unsigned short get_id_from_list(unsigned *head, unsigned short *list) { unsigned int id = *head; - *head = list[id].link; + + if (id != TX_LINK_NONE) { + *head = list[id]; + list[id] = TX_LINK_NONE; + } return id; } @@ -265,7 +268,7 @@ static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue) if (unlikely(!skb)) return NULL; - page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); + page = alloc_page(GFP_ATOMIC | __GFP_NOWARN | __GFP_ZERO); if (!page) { kfree_skb(skb); return NULL; @@ -351,7 +354,7 @@ static int xennet_open(struct net_device *dev) unsigned int i = 0; struct netfront_queue *queue = NULL; - if (!np->queues) + if (!np->queues || np->broken) return -ENODEV; for (i = 0; i < num_queues; ++i) { @@ -373,41 +376,62 @@ static int xennet_open(struct net_device *dev) return 0; } -static void xennet_tx_buf_gc(struct netfront_queue *queue) +static bool xennet_tx_buf_gc(struct netfront_queue *queue) { RING_IDX cons, prod; unsigned short id; struct sk_buff *skb; bool more_to_do; + bool work_done = false; + const struct device *dev = &queue->info->netdev->dev; BUG_ON(!netif_carrier_ok(queue->info->netdev)); do { prod = queue->tx.sring->rsp_prod; + if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) { + dev_alert(dev, "Illegal number of responses %u\n", + prod - queue->tx.rsp_cons); + goto err; + } rmb(); /* Ensure we see responses up to 'rp'. */ for (cons = queue->tx.rsp_cons; cons != prod; cons++) { - struct xen_netif_tx_response *txrsp; + struct xen_netif_tx_response txrsp; + + work_done = true; - txrsp = RING_GET_RESPONSE(&queue->tx, cons); - if (txrsp->status == XEN_NETIF_RSP_NULL) + RING_COPY_RESPONSE(&queue->tx, cons, &txrsp); + if (txrsp.status == XEN_NETIF_RSP_NULL) continue; - id = txrsp->id; - skb = queue->tx_skbs[id].skb; - if (unlikely(gnttab_query_foreign_access( - queue->grant_tx_ref[id]) != 0)) { - pr_alert("%s: warning -- grant still in use by backend domain\n", - __func__); - BUG(); + id = txrsp.id; + if (id >= RING_SIZE(&queue->tx)) { + dev_alert(dev, + "Response has incorrect id (%u)\n", + id); + goto err; + } + if (queue->tx_link[id] != TX_PENDING) { + dev_alert(dev, + "Response for inactive request\n"); + goto err; + } + + queue->tx_link[id] = TX_LINK_NONE; + skb = queue->tx_skbs[id]; + queue->tx_skbs[id] = NULL; + if (unlikely(!gnttab_end_foreign_access_ref( + queue->grant_tx_ref[id], GNTMAP_readonly))) { + dev_alert(dev, + "Grant still in use by backend domain\n"); + goto err; } - gnttab_end_foreign_access_ref( - queue->grant_tx_ref[id], GNTMAP_readonly); gnttab_release_grant_reference( &queue->gref_tx_head, queue->grant_tx_ref[id]); queue->grant_tx_ref[id] = GRANT_INVALID_REF; queue->grant_tx_page[id] = NULL; - add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id); + add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id); dev_kfree_skb_irq(skb); } @@ -417,13 +441,22 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue) } while (more_to_do); xennet_maybe_wake_tx(queue); + + return work_done; + + err: + queue->info->broken = true; + dev_alert(dev, "Disabled for further use\n"); + + return work_done; } struct xennet_gnttab_make_txreq { struct netfront_queue *queue; struct sk_buff *skb; struct page *page; - struct xen_netif_tx_request *tx; /* Last request */ + struct xen_netif_tx_request *tx; /* Last request on ring page */ + struct xen_netif_tx_request tx_local; /* Last request local copy*/ unsigned int size; }; @@ -439,7 +472,7 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset, struct netfront_queue *queue = info->queue; struct sk_buff *skb = info->skb; - id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); + id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link); tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); ref = gnttab_claim_grant_reference(&queue->gref_tx_head); WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref)); @@ -447,34 +480,37 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset, gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, gfn, GNTMAP_readonly); - queue->tx_skbs[id].skb = skb; + queue->tx_skbs[id] = skb; queue->grant_tx_page[id] = page; queue->grant_tx_ref[id] = ref; - tx->id = id; - tx->gref = ref; - tx->offset = offset; - tx->size = len; - tx->flags = 0; + info->tx_local.id = id; + info->tx_local.gref = ref; + info->tx_local.offset = offset; + info->tx_local.size = len; + info->tx_local.flags = 0; + + *tx = info->tx_local; + + /* + * Put the request in the pending queue, it will be set to be pending + * when the producer index is about to be raised. + */ + add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id); info->tx = tx; - info->size += tx->size; + info->size += info->tx_local.size; } static struct xen_netif_tx_request *xennet_make_first_txreq( - struct netfront_queue *queue, struct sk_buff *skb, - struct page *page, unsigned int offset, unsigned int len) + struct xennet_gnttab_make_txreq *info, + unsigned int offset, unsigned int len) { - struct xennet_gnttab_make_txreq info = { - .queue = queue, - .skb = skb, - .page = page, - .size = 0, - }; + info->size = 0; - gnttab_for_one_grant(page, offset, len, xennet_tx_setup_grant, &info); + gnttab_for_one_grant(info->page, offset, len, xennet_tx_setup_grant, info); - return info.tx; + return info->tx; } static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset, @@ -487,35 +523,27 @@ static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset, xennet_tx_setup_grant(gfn, offset, len, data); } -static struct xen_netif_tx_request *xennet_make_txreqs( - struct netfront_queue *queue, struct xen_netif_tx_request *tx, - struct sk_buff *skb, struct page *page, +static void xennet_make_txreqs( + struct xennet_gnttab_make_txreq *info, + struct page *page, unsigned int offset, unsigned int len) { - struct xennet_gnttab_make_txreq info = { - .queue = queue, - .skb = skb, - .tx = tx, - }; - /* Skip unused frames from start of page */ page += offset >> PAGE_SHIFT; offset &= ~PAGE_MASK; while (len) { - info.page = page; - info.size = 0; + info->page = page; + info->size = 0; gnttab_foreach_grant_in_range(page, offset, len, xennet_make_one_txreq, - &info); + info); page++; offset = 0; - len -= info.size; + len -= info->size; } - - return info.tx; } /* @@ -563,13 +591,50 @@ static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb, return queue_idx; } +static void xennet_mark_tx_pending(struct netfront_queue *queue) +{ + unsigned int i; + + while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) != + TX_LINK_NONE) + queue->tx_link[i] = TX_PENDING; +} + +struct sk_buff *bounce_skb(const struct sk_buff *skb) +{ + unsigned int headerlen = skb_headroom(skb); + /* Align size to allocate full pages and avoid contiguous data leaks */ + unsigned int size = ALIGN(skb_end_offset(skb) + skb->data_len, + XEN_PAGE_SIZE); + struct sk_buff *n = alloc_skb(size, GFP_ATOMIC | __GFP_ZERO); + + if (!n) + return NULL; + + if (!IS_ALIGNED((uintptr_t)n->head, XEN_PAGE_SIZE)) { + WARN_ONCE(1, "misaligned skb allocated\n"); + kfree_skb(n); + return NULL; + } + + /* Set the data pointer */ + skb_reserve(n, headerlen); + /* Set the tail pointer and length */ + skb_put(n, skb->len); + + BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); + + skb_copy_header(n, skb); + return n; +} + #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1) static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats); - struct xen_netif_tx_request *tx, *first_tx; + struct xen_netif_tx_request *first_tx; unsigned int i; int notify; int slots; @@ -578,6 +643,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev unsigned int len; unsigned long flags; struct netfront_queue *queue = NULL; + struct xennet_gnttab_make_txreq info = { }; unsigned int num_queues = dev->real_num_tx_queues; u16 queue_index; struct sk_buff *nskb; @@ -585,6 +651,8 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev /* Drop the packet if no queues are set up */ if (num_queues < 1) goto drop; + if (unlikely(np->broken)) + goto drop; /* Determine which queue to transmit this SKB on */ queue_index = skb_get_queue_mapping(skb); queue = &np->queues[queue_index]; @@ -612,9 +680,13 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev /* The first req should be at least ETH_HLEN size or the packet will be * dropped by netback. + * + * If the backend is not trusted bounce all data to zeroed pages to + * avoid exposing contiguous data on the granted page not belonging to + * the skb. */ - if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) { - nskb = skb_copy(skb, GFP_ATOMIC); + if (np->bounce || unlikely(PAGE_SIZE - offset < ETH_HLEN)) { + nskb = bounce_skb(skb); if (!nskb) goto drop; dev_consume_skb_any(skb); @@ -635,21 +707,24 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev } /* First request for the linear area. */ - first_tx = tx = xennet_make_first_txreq(queue, skb, - page, offset, len); - offset += tx->size; + info.queue = queue; + info.skb = skb; + info.page = page; + first_tx = xennet_make_first_txreq(&info, offset, len); + offset += info.tx_local.size; if (offset == PAGE_SIZE) { page++; offset = 0; } - len -= tx->size; + len -= info.tx_local.size; if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */ - tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated; + first_tx->flags |= XEN_NETTXF_csum_blank | + XEN_NETTXF_data_validated; else if (skb->ip_summed == CHECKSUM_UNNECESSARY) /* remote but checksummed. */ - tx->flags |= XEN_NETTXF_data_validated; + first_tx->flags |= XEN_NETTXF_data_validated; /* Optional extra info after the first request. */ if (skb_shinfo(skb)->gso_size) { @@ -658,7 +733,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev gso = (struct xen_netif_extra_info *) RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); - tx->flags |= XEN_NETTXF_extra_info; + first_tx->flags |= XEN_NETTXF_extra_info; gso->u.gso.size = skb_shinfo(skb)->gso_size; gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ? @@ -672,19 +747,21 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev } /* Requests for the rest of the linear area. */ - tx = xennet_make_txreqs(queue, tx, skb, page, offset, len); + xennet_make_txreqs(&info, page, offset, len); /* Requests for all the frags. */ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - tx = xennet_make_txreqs(queue, tx, skb, - skb_frag_page(frag), frag->page_offset, + xennet_make_txreqs(&info, skb_frag_page(frag), + frag->page_offset, skb_frag_size(frag)); } /* First request has the packet length. */ first_tx->size = skb->len; + xennet_mark_tx_pending(queue); + RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify); if (notify) notify_remote_via_irq(queue->tx_irq); @@ -724,6 +801,38 @@ static int xennet_close(struct net_device *dev) return 0; } +static void xennet_destroy_queues(struct netfront_info *info) +{ + unsigned int i; + + for (i = 0; i < info->netdev->real_num_tx_queues; i++) { + struct netfront_queue *queue = &info->queues[i]; + + if (netif_running(info->netdev)) + napi_disable(&queue->napi); + netif_napi_del(&queue->napi); + } + + kfree(info->queues); + info->queues = NULL; +} + +static void xennet_uninit(struct net_device *dev) +{ + struct netfront_info *np = netdev_priv(dev); + xennet_destroy_queues(np); +} + +static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val) +{ + unsigned long flags; + + spin_lock_irqsave(&queue->rx_cons_lock, flags); + queue->rx.rsp_cons = val; + queue->rx_rsp_unconsumed = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx); + spin_unlock_irqrestore(&queue->rx_cons_lock, flags); +} + static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb, grant_ref_t ref) { @@ -742,7 +851,7 @@ static int xennet_get_extras(struct netfront_queue *queue, RING_IDX rp) { - struct xen_netif_extra_info *extra; + struct xen_netif_extra_info extra; struct device *dev = &queue->info->netdev->dev; RING_IDX cons = queue->rx.rsp_cons; int err = 0; @@ -758,26 +867,24 @@ static int xennet_get_extras(struct netfront_queue *queue, break; } - extra = (struct xen_netif_extra_info *) - RING_GET_RESPONSE(&queue->rx, ++cons); + RING_COPY_RESPONSE(&queue->rx, ++cons, &extra); - if (unlikely(!extra->type || - extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { + if (unlikely(!extra.type || + extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { if (net_ratelimit()) dev_warn(dev, "Invalid extra type: %d\n", - extra->type); + extra.type); err = -EINVAL; } else { - memcpy(&extras[extra->type - 1], extra, - sizeof(*extra)); + extras[extra.type - 1] = extra; } skb = xennet_get_rx_skb(queue, cons); ref = xennet_get_rx_ref(queue, cons); xennet_move_rx_slot(queue, skb, ref); - } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); + } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE); - queue->rx.rsp_cons = cons; + xennet_set_rx_rsp_cons(queue, cons); return err; } @@ -785,7 +892,7 @@ static int xennet_get_responses(struct netfront_queue *queue, struct netfront_rx_info *rinfo, RING_IDX rp, struct sk_buff_head *list) { - struct xen_netif_rx_response *rx = &rinfo->rx; + struct xen_netif_rx_response *rx = &rinfo->rx, rx_local; struct xen_netif_extra_info *extras = rinfo->extras; struct device *dev = &queue->info->netdev->dev; RING_IDX cons = queue->rx.rsp_cons; @@ -794,7 +901,6 @@ static int xennet_get_responses(struct netfront_queue *queue, int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD); int slots = 1; int err = 0; - unsigned long ret; if (rx->flags & XEN_NETRXF_extra_info) { err = xennet_get_extras(queue, extras, rp); @@ -825,8 +931,13 @@ static int xennet_get_responses(struct netfront_queue *queue, goto next; } - ret = gnttab_end_foreign_access_ref(ref, 0); - BUG_ON(!ret); + if (!gnttab_end_foreign_access_ref(ref, 0)) { + dev_alert(dev, + "Grant still in use by backend domain\n"); + queue->info->broken = true; + dev_alert(dev, "Disabled for further use\n"); + return -EINVAL; + } gnttab_release_grant_reference(&queue->gref_rx_head, ref); @@ -843,7 +954,8 @@ static int xennet_get_responses(struct netfront_queue *queue, break; } - rx = RING_GET_RESPONSE(&queue->rx, cons + slots); + RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local); + rx = &rx_local; skb = xennet_get_rx_skb(queue, cons + slots); ref = xennet_get_rx_ref(queue, cons + slots); slots++; @@ -856,7 +968,7 @@ static int xennet_get_responses(struct netfront_queue *queue, } if (unlikely(err)) - queue->rx.rsp_cons = cons + slots; + xennet_set_rx_rsp_cons(queue, cons + slots); return err; } @@ -890,39 +1002,43 @@ static int xennet_set_skb_gso(struct sk_buff *skb, return 0; } -static RING_IDX xennet_fill_frags(struct netfront_queue *queue, - struct sk_buff *skb, - struct sk_buff_head *list) +static int xennet_fill_frags(struct netfront_queue *queue, + struct sk_buff *skb, + struct sk_buff_head *list) { RING_IDX cons = queue->rx.rsp_cons; struct sk_buff *nskb; while ((nskb = __skb_dequeue(list))) { - struct xen_netif_rx_response *rx = - RING_GET_RESPONSE(&queue->rx, ++cons); + struct xen_netif_rx_response rx; skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; + RING_COPY_RESPONSE(&queue->rx, ++cons, &rx); + if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) { unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; - BUG_ON(pull_to <= skb_headlen(skb)); + BUG_ON(pull_to < skb_headlen(skb)); __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); } if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) { - queue->rx.rsp_cons = ++cons; + xennet_set_rx_rsp_cons(queue, + ++cons + skb_queue_len(list)); kfree_skb(nskb); - return ~0U; + return -ENOENT; } skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, skb_frag_page(nfrag), - rx->offset, rx->status, PAGE_SIZE); + rx.offset, rx.status, PAGE_SIZE); skb_shinfo(nskb)->nr_frags = 0; kfree_skb(nskb); } - return cons; + xennet_set_rx_rsp_cons(queue, cons); + + return 0; } static int checksum_setup(struct net_device *dev, struct sk_buff *skb) @@ -1007,17 +1123,28 @@ static int xennet_poll(struct napi_struct *napi, int budget) skb_queue_head_init(&tmpq); rp = queue->rx.sring->rsp_prod; + if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) { + dev_alert(&dev->dev, "Illegal number of responses %u\n", + rp - queue->rx.rsp_cons); + queue->info->broken = true; + spin_unlock(&queue->rx_lock); + return 0; + } rmb(); /* Ensure we see queued responses up to 'rp'. */ i = queue->rx.rsp_cons; work_done = 0; while ((i != rp) && (work_done < budget)) { - memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx)); + RING_COPY_RESPONSE(&queue->rx, i, rx); memset(extras, 0, sizeof(rinfo.extras)); err = xennet_get_responses(queue, &rinfo, rp, &tmpq); if (unlikely(err)) { + if (queue->info->broken) { + spin_unlock(&queue->rx_lock); + return 0; + } err: while ((skb = __skb_dequeue(&tmpq))) __skb_queue_tail(&errq, skb); @@ -1034,7 +1161,9 @@ static int xennet_poll(struct napi_struct *napi, int budget) if (unlikely(xennet_set_skb_gso(skb, gso))) { __skb_queue_head(&tmpq, skb); - queue->rx.rsp_cons += skb_queue_len(&tmpq); + xennet_set_rx_rsp_cons(queue, + queue->rx.rsp_cons + + skb_queue_len(&tmpq)); goto err; } } @@ -1048,8 +1177,7 @@ static int xennet_poll(struct napi_struct *napi, int budget) skb->data_len = rx->status; skb->len += rx->status; - i = xennet_fill_frags(queue, skb, &tmpq); - if (unlikely(i == ~0U)) + if (unlikely(xennet_fill_frags(queue, skb, &tmpq))) goto err; if (rx->flags & XEN_NETRXF_csum_blank) @@ -1059,7 +1187,8 @@ static int xennet_poll(struct napi_struct *napi, int budget) __skb_queue_tail(&rxq, skb); - queue->rx.rsp_cons = ++i; + i = queue->rx.rsp_cons + 1; + xennet_set_rx_rsp_cons(queue, i); work_done++; } @@ -1135,17 +1264,18 @@ static void xennet_release_tx_bufs(struct netfront_queue *queue) for (i = 0; i < NET_TX_RING_SIZE; i++) { /* Skip over entries which are actually freelist references */ - if (skb_entry_is_link(&queue->tx_skbs[i])) + if (!queue->tx_skbs[i]) continue; - skb = queue->tx_skbs[i].skb; + skb = queue->tx_skbs[i]; + queue->tx_skbs[i] = NULL; get_page(queue->grant_tx_page[i]); gnttab_end_foreign_access(queue->grant_tx_ref[i], GNTMAP_readonly, (unsigned long)page_address(queue->grant_tx_page[i])); queue->grant_tx_page[i] = NULL; queue->grant_tx_ref[i] = GRANT_INVALID_REF; - add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i); + add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i); dev_kfree_skb_irq(skb); } } @@ -1220,34 +1350,79 @@ static int xennet_set_features(struct net_device *dev, return 0; } -static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id) +static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi) { - struct netfront_queue *queue = dev_id; unsigned long flags; + if (unlikely(queue->info->broken)) + return false; + spin_lock_irqsave(&queue->tx_lock, flags); - xennet_tx_buf_gc(queue); + if (xennet_tx_buf_gc(queue)) + *eoi = 0; spin_unlock_irqrestore(&queue->tx_lock, flags); + return true; +} + +static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id) +{ + unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS; + + if (likely(xennet_handle_tx(dev_id, &eoiflag))) + xen_irq_lateeoi(irq, eoiflag); + return IRQ_HANDLED; } -static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id) +static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi) { - struct netfront_queue *queue = dev_id; - struct net_device *dev = queue->info->netdev; + unsigned int work_queued; + unsigned long flags; + + if (unlikely(queue->info->broken)) + return false; - if (likely(netif_carrier_ok(dev) && - RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))) + spin_lock_irqsave(&queue->rx_cons_lock, flags); + work_queued = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx); + if (work_queued > queue->rx_rsp_unconsumed) { + queue->rx_rsp_unconsumed = work_queued; + *eoi = 0; + } else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) { + const struct device *dev = &queue->info->netdev->dev; + + spin_unlock_irqrestore(&queue->rx_cons_lock, flags); + dev_alert(dev, "RX producer index going backwards\n"); + dev_alert(dev, "Disabled for further use\n"); + queue->info->broken = true; + return false; + } + spin_unlock_irqrestore(&queue->rx_cons_lock, flags); + + if (likely(netif_carrier_ok(queue->info->netdev) && work_queued)) napi_schedule(&queue->napi); + return true; +} + +static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id) +{ + unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS; + + if (likely(xennet_handle_rx(dev_id, &eoiflag))) + xen_irq_lateeoi(irq, eoiflag); + return IRQ_HANDLED; } static irqreturn_t xennet_interrupt(int irq, void *dev_id) { - xennet_tx_interrupt(irq, dev_id); - xennet_rx_interrupt(irq, dev_id); + unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS; + + if (xennet_handle_tx(dev_id, &eoiflag) && + xennet_handle_rx(dev_id, &eoiflag)) + xen_irq_lateeoi(irq, eoiflag); + return IRQ_HANDLED; } @@ -1258,12 +1433,17 @@ static void xennet_poll_controller(struct net_device *dev) struct netfront_info *info = netdev_priv(dev); unsigned int num_queues = dev->real_num_tx_queues; unsigned int i; + + if (info->broken) + return; + for (i = 0; i < num_queues; ++i) xennet_interrupt(0, &info->queues[i]); } #endif static const struct net_device_ops xennet_netdev_ops = { + .ndo_uninit = xennet_uninit, .ndo_open = xennet_open, .ndo_stop = xennet_close, .ndo_start_xmit = xennet_start_xmit, @@ -1336,12 +1516,15 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) netif_carrier_off(netdev); - xenbus_switch_state(dev, XenbusStateInitialising); - wait_event(module_wq, - xenbus_read_driver_state(dev->otherend) != - XenbusStateClosed && - xenbus_read_driver_state(dev->otherend) != - XenbusStateUnknown); + do { + xenbus_switch_state(dev, XenbusStateInitialising); + err = wait_event_timeout(module_wq, + xenbus_read_driver_state(dev->otherend) != + XenbusStateClosed && + xenbus_read_driver_state(dev->otherend) != + XenbusStateUnknown, XENNET_TIMEOUT); + } while (!err); + return netdev; exit: @@ -1470,9 +1653,10 @@ static int setup_netfront_single(struct netfront_queue *queue) if (err < 0) goto fail; - err = bind_evtchn_to_irqhandler(queue->tx_evtchn, - xennet_interrupt, - 0, queue->info->netdev->name, queue); + err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn, + xennet_interrupt, 0, + queue->info->netdev->name, + queue); if (err < 0) goto bind_fail; queue->rx_evtchn = queue->tx_evtchn; @@ -1500,18 +1684,18 @@ static int setup_netfront_split(struct netfront_queue *queue) snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), "%s-tx", queue->name); - err = bind_evtchn_to_irqhandler(queue->tx_evtchn, - xennet_tx_interrupt, - 0, queue->tx_irq_name, queue); + err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn, + xennet_tx_interrupt, 0, + queue->tx_irq_name, queue); if (err < 0) goto bind_tx_fail; queue->tx_irq = err; snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name), "%s-rx", queue->name); - err = bind_evtchn_to_irqhandler(queue->rx_evtchn, - xennet_rx_interrupt, - 0, queue->rx_irq_name, queue); + err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn, + xennet_rx_interrupt, 0, + queue->rx_irq_name, queue); if (err < 0) goto bind_rx_fail; queue->rx_irq = err; @@ -1535,7 +1719,7 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_queue *queue, unsigned int feature_split_evtchn) { struct xen_netif_tx_sring *txs; - struct xen_netif_rx_sring *rxs; + struct xen_netif_rx_sring *rxs = NULL; grant_ref_t gref; int err; @@ -1555,21 +1739,21 @@ static int setup_netfront(struct xenbus_device *dev, err = xenbus_grant_ring(dev, txs, 1, &gref); if (err < 0) - goto grant_tx_ring_fail; + goto fail; queue->tx_ring_ref = gref; rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!rxs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating rx ring page"); - goto alloc_rx_ring_fail; + goto fail; } SHARED_RING_INIT(rxs); FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE); err = xenbus_grant_ring(dev, rxs, 1, &gref); if (err < 0) - goto grant_rx_ring_fail; + goto fail; queue->rx_ring_ref = gref; if (feature_split_evtchn) @@ -1582,22 +1766,28 @@ static int setup_netfront(struct xenbus_device *dev, err = setup_netfront_single(queue); if (err) - goto alloc_evtchn_fail; + goto fail; return 0; /* If we fail to setup netfront, it is safe to just revoke access to * granted pages because backend is not accessing it at this point. */ -alloc_evtchn_fail: - gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0); -grant_rx_ring_fail: - free_page((unsigned long)rxs); -alloc_rx_ring_fail: - gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0); -grant_tx_ring_fail: - free_page((unsigned long)txs); -fail: + fail: + if (queue->rx_ring_ref != GRANT_INVALID_REF) { + gnttab_end_foreign_access(queue->rx_ring_ref, 0, + (unsigned long)rxs); + queue->rx_ring_ref = GRANT_INVALID_REF; + } else { + free_page((unsigned long)rxs); + } + if (queue->tx_ring_ref != GRANT_INVALID_REF) { + gnttab_end_foreign_access(queue->tx_ring_ref, 0, + (unsigned long)txs); + queue->tx_ring_ref = GRANT_INVALID_REF; + } else { + free_page((unsigned long)txs); + } return err; } @@ -1613,6 +1803,7 @@ static int xennet_init_queue(struct netfront_queue *queue) spin_lock_init(&queue->tx_lock); spin_lock_init(&queue->rx_lock); + spin_lock_init(&queue->rx_cons_lock); timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0); @@ -1620,13 +1811,15 @@ static int xennet_init_queue(struct netfront_queue *queue) snprintf(queue->name, sizeof(queue->name), "vif%s-q%u", devid, queue->id); - /* Initialise tx_skbs as a free chain containing every entry. */ + /* Initialise tx_skb_freelist as a free chain containing every entry. */ queue->tx_skb_freelist = 0; + queue->tx_pend_queue = TX_LINK_NONE; for (i = 0; i < NET_TX_RING_SIZE; i++) { - skb_entry_set_link(&queue->tx_skbs[i], i+1); + queue->tx_link[i] = i + 1; queue->grant_tx_ref[i] = GRANT_INVALID_REF; queue->grant_tx_page[i] = NULL; } + queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE; /* Clear out rx_skbs */ for (i = 0; i < NET_RX_RING_SIZE; i++) { @@ -1740,22 +1933,6 @@ static int write_queue_xenstore_keys(struct netfront_queue *queue, return err; } -static void xennet_destroy_queues(struct netfront_info *info) -{ - unsigned int i; - - for (i = 0; i < info->netdev->real_num_tx_queues; i++) { - struct netfront_queue *queue = &info->queues[i]; - - if (netif_running(info->netdev)) - napi_disable(&queue->napi); - netif_napi_del(&queue->napi); - } - - kfree(info->queues); - info->queues = NULL; -} - static int xennet_create_queues(struct netfront_info *info, unsigned int *num_queues) { @@ -1811,6 +1988,10 @@ static int talk_to_netback(struct xenbus_device *dev, info->netdev->irq = 0; + /* Check if backend is trusted. */ + info->bounce = !xennet_trusted || + !xenbus_read_unsigned(dev->nodename, "trusted", 1); + /* Check if backend supports multiple queues */ max_queues = xenbus_read_unsigned(info->xbdev->otherend, "multi-queue-max-queues", 1); @@ -1831,6 +2012,9 @@ static int talk_to_netback(struct xenbus_device *dev, if (info->queues) xennet_destroy_queues(info); + /* For the case of a reconnect reset the "broken" indicator. */ + info->broken = false; + err = xennet_create_queues(info, &num_queues); if (err < 0) { xenbus_dev_fatal(dev, err, "creating queues"); @@ -1961,6 +2145,9 @@ static int xennet_connect(struct net_device *dev) err = talk_to_netback(np->xbdev, np); if (err) return err; + if (np->bounce) + dev_info(&np->xbdev->dev, + "bouncing transmitted data to zeroed pages\n"); /* talk_to_netback() sets the correct number of queues */ num_queues = dev->real_num_tx_queues; @@ -2141,28 +2328,43 @@ static const struct attribute_group xennet_dev_group = { }; #endif /* CONFIG_SYSFS */ -static int xennet_remove(struct xenbus_device *dev) +static void xennet_bus_close(struct xenbus_device *dev) { - struct netfront_info *info = dev_get_drvdata(&dev->dev); - - dev_dbg(&dev->dev, "%s\n", dev->nodename); + int ret; - if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) { + if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed) + return; + do { xenbus_switch_state(dev, XenbusStateClosing); - wait_event(module_wq, - xenbus_read_driver_state(dev->otherend) == - XenbusStateClosing || - xenbus_read_driver_state(dev->otherend) == - XenbusStateUnknown); + ret = wait_event_timeout(module_wq, + xenbus_read_driver_state(dev->otherend) == + XenbusStateClosing || + xenbus_read_driver_state(dev->otherend) == + XenbusStateClosed || + xenbus_read_driver_state(dev->otherend) == + XenbusStateUnknown, + XENNET_TIMEOUT); + } while (!ret); + + if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed) + return; + do { xenbus_switch_state(dev, XenbusStateClosed); - wait_event(module_wq, - xenbus_read_driver_state(dev->otherend) == - XenbusStateClosed || - xenbus_read_driver_state(dev->otherend) == - XenbusStateUnknown); - } + ret = wait_event_timeout(module_wq, + xenbus_read_driver_state(dev->otherend) == + XenbusStateClosed || + xenbus_read_driver_state(dev->otherend) == + XenbusStateUnknown, + XENNET_TIMEOUT); + } while (!ret); +} + +static int xennet_remove(struct xenbus_device *dev) +{ + struct netfront_info *info = dev_get_drvdata(&dev->dev); + xennet_bus_close(dev); xennet_disconnect_backend(info); if (info->netdev->reg_state == NETREG_REGISTERED) diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c index d8d70dd830b07fe6254e1e5d52e12045a4ea64f4..7f143387b9ffccaff2937ad135483178b135c429 100644 --- a/drivers/nfc/fdp/i2c.c +++ b/drivers/nfc/fdp/i2c.c @@ -267,7 +267,7 @@ static void fdp_nci_i2c_read_device_properties(struct device *dev, *fw_vsc_cfg, len); if (r) { - devm_kfree(dev, fw_vsc_cfg); + devm_kfree(dev, *fw_vsc_cfg); goto vsc_read_err; } } else { diff --git a/drivers/nfc/nfcmrvl/main.c b/drivers/nfc/nfcmrvl/main.c index e65d027b91fafbbd752970cff0afdc9e8cfb0d7c..54d228acc0f5d9bac7b617cb2c36376abe1a0a42 100644 --- a/drivers/nfc/nfcmrvl/main.c +++ b/drivers/nfc/nfcmrvl/main.c @@ -194,6 +194,7 @@ void nfcmrvl_nci_unregister_dev(struct nfcmrvl_private *priv) { struct nci_dev *ndev = priv->ndev; + nci_unregister_device(ndev); if (priv->ndev->nfc_dev->fw_download_in_progress) nfcmrvl_fw_dnld_abort(priv); @@ -202,7 +203,6 @@ void nfcmrvl_nci_unregister_dev(struct nfcmrvl_private *priv) if (gpio_is_valid(priv->config.reset_n_io)) gpio_free(priv->config.reset_n_io); - nci_unregister_device(ndev); nci_free_device(ndev); kfree(priv); } @@ -244,7 +244,7 @@ void nfcmrvl_chip_reset(struct nfcmrvl_private *priv) /* Reset possible fault of previous session */ clear_bit(NFCMRVL_PHY_ERROR, &priv->flags); - if (priv->config.reset_n_io) { + if (gpio_is_valid(priv->config.reset_n_io)) { nfc_info(priv->dev, "reset the chip\n"); gpio_set_value(priv->config.reset_n_io, 0); usleep_range(5000, 10000); @@ -255,7 +255,7 @@ void nfcmrvl_chip_reset(struct nfcmrvl_private *priv) void nfcmrvl_chip_halt(struct nfcmrvl_private *priv) { - if (priv->config.reset_n_io) + if (gpio_is_valid(priv->config.reset_n_io)) gpio_set_value(priv->config.reset_n_io, 0); } diff --git a/drivers/nfc/nfcmrvl/uart.c b/drivers/nfc/nfcmrvl/uart.c index 91162f8e0366c87ab7d3e26f2f02344599fb100b..e5a622ce4b9517d299170f20d35bde2bd5e003da 100644 --- a/drivers/nfc/nfcmrvl/uart.c +++ b/drivers/nfc/nfcmrvl/uart.c @@ -26,7 +26,7 @@ static unsigned int hci_muxed; static unsigned int flow_control; static unsigned int break_control; -static unsigned int reset_n_io; +static int reset_n_io = -EINVAL; /* ** NFCMRVL NCI OPS @@ -73,10 +73,9 @@ static int nfcmrvl_uart_parse_dt(struct device_node *node, struct device_node *matched_node; int ret; - matched_node = of_find_compatible_node(node, NULL, "marvell,nfc-uart"); + matched_node = of_get_compatible_child(node, "marvell,nfc-uart"); if (!matched_node) { - matched_node = of_find_compatible_node(node, NULL, - "mrvl,nfc-uart"); + matched_node = of_get_compatible_child(node, "mrvl,nfc-uart"); if (!matched_node) return -ENODEV; } @@ -232,5 +231,5 @@ MODULE_PARM_DESC(break_control, "Tell if UART driver must drive break signal."); module_param(hci_muxed, uint, 0); MODULE_PARM_DESC(hci_muxed, "Tell if transport is muxed in HCI one."); -module_param(reset_n_io, uint, 0); +module_param(reset_n_io, int, 0); MODULE_PARM_DESC(reset_n_io, "GPIO that is wired to RESET_N signal."); diff --git a/drivers/nfc/nfcmrvl/usb.c b/drivers/nfc/nfcmrvl/usb.c index 945cc903d8f1123fd63a13c42564a3dfe7465a12..888e298f610b8ed7140c85c5d0355d55007b6cfc 100644 --- a/drivers/nfc/nfcmrvl/usb.c +++ b/drivers/nfc/nfcmrvl/usb.c @@ -305,6 +305,7 @@ static int nfcmrvl_probe(struct usb_interface *intf, /* No configuration for USB */ memset(&config, 0, sizeof(config)); + config.reset_n_io = -EINVAL; nfc_info(&udev->dev, "intf %p id %p\n", intf, id); diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c index ba695e392c3b7b1c4d85c9e877d1234c23502caf..0df745cad601a03b64ee1f7113fd293058984ee1 100644 --- a/drivers/nfc/nxp-nci/i2c.c +++ b/drivers/nfc/nxp-nci/i2c.c @@ -236,8 +236,10 @@ static irqreturn_t nxp_nci_i2c_irq_thread_fn(int irq, void *phy_id) if (r == -EREMOTEIO) { phy->hard_fault = r; - skb = NULL; - } else if (r < 0) { + if (info->mode == NXP_NCI_MODE_FW) + nxp_nci_fw_recv_frame(phy->ndev, NULL); + } + if (r < 0) { nfc_err(&client->dev, "Read failed with error %d\n", r); goto exit_irq_handled; } diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c index 5d823e965883b0f5f23db5ab39afc9f96a128267..95a13830a44035f7bb72e10f1549de58ab7b7342 100644 --- a/drivers/nfc/pn533/usb.c +++ b/drivers/nfc/pn533/usb.c @@ -165,10 +165,17 @@ static int pn533_usb_send_ack(struct pn533 *dev, gfp_t flags) return usb_submit_urb(phy->ack_urb, flags); } +struct pn533_out_arg { + struct pn533_usb_phy *phy; + struct completion done; +}; + static int pn533_usb_send_frame(struct pn533 *dev, struct sk_buff *out) { struct pn533_usb_phy *phy = dev->phy; + struct pn533_out_arg arg; + void *cntx; int rc; if (phy->priv == NULL) @@ -180,10 +187,18 @@ static int pn533_usb_send_frame(struct pn533 *dev, print_hex_dump_debug("PN533 TX: ", DUMP_PREFIX_NONE, 16, 1, out->data, out->len, false); + arg.phy = phy; + init_completion(&arg.done); + cntx = phy->out_urb->context; + phy->out_urb->context = &arg; + rc = usb_submit_urb(phy->out_urb, GFP_KERNEL); if (rc) return rc; + wait_for_completion(&arg.done); + phy->out_urb->context = cntx; + if (dev->protocol_type == PN533_PROTO_REQ_RESP) { /* request for response for sent packet directly */ rc = pn533_submit_urb_for_response(phy, GFP_KERNEL); @@ -424,7 +439,31 @@ static int pn533_acr122_poweron_rdr(struct pn533_usb_phy *phy) return arg.rc; } -static void pn533_send_complete(struct urb *urb) +static void pn533_out_complete(struct urb *urb) +{ + struct pn533_out_arg *arg = urb->context; + struct pn533_usb_phy *phy = arg->phy; + + switch (urb->status) { + case 0: + break; /* success */ + case -ECONNRESET: + case -ENOENT: + dev_dbg(&phy->udev->dev, + "The urb has been stopped (status %d)\n", + urb->status); + break; + case -ESHUTDOWN: + default: + nfc_err(&phy->udev->dev, + "Urb failure (status %d)\n", + urb->status); + } + + complete(&arg->done); +} + +static void pn533_ack_complete(struct urb *urb) { struct pn533_usb_phy *phy = urb->context; @@ -512,10 +551,10 @@ static int pn533_usb_probe(struct usb_interface *interface, usb_fill_bulk_urb(phy->out_urb, phy->udev, usb_sndbulkpipe(phy->udev, out_endpoint), - NULL, 0, pn533_send_complete, phy); + NULL, 0, pn533_out_complete, phy); usb_fill_bulk_urb(phy->ack_urb, phy->udev, usb_sndbulkpipe(phy->udev, out_endpoint), - NULL, 0, pn533_send_complete, phy); + NULL, 0, pn533_ack_complete, phy); switch (id->driver_info) { case PN533_DEVICE_STD: @@ -559,18 +598,25 @@ static int pn533_usb_probe(struct usb_interface *interface, rc = pn533_finalize_setup(priv); if (rc) - goto error; + goto err_deregister; usb_set_intfdata(interface, phy); return 0; +err_deregister: + pn533_unregister_device(phy->priv); error: + usb_kill_urb(phy->in_urb); + usb_kill_urb(phy->out_urb); + usb_kill_urb(phy->ack_urb); + usb_free_urb(phy->in_urb); usb_free_urb(phy->out_urb); usb_free_urb(phy->ack_urb); usb_put_dev(phy->udev); kfree(in_buf); + kfree(phy->ack_buffer); return rc; } diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c index bb43cebda9dcd39ec3aa3a5a729758f987354882..08da712ad45f8fe6cf9fae058eb93b879ee61eb3 100644 --- a/drivers/nfc/port100.c +++ b/drivers/nfc/port100.c @@ -792,7 +792,7 @@ static int port100_send_frame_async(struct port100 *dev, struct sk_buff *out, rc = port100_submit_urb_for_ack(dev, GFP_KERNEL); if (rc) - usb_unlink_urb(dev->out_urb); + usb_kill_urb(dev->out_urb); exit: mutex_unlock(&dev->out_urb_lock); @@ -1618,7 +1618,9 @@ static int port100_probe(struct usb_interface *interface, nfc_digital_free_device(dev->nfc_digital_dev); error: + usb_kill_urb(dev->in_urb); usb_free_urb(dev->in_urb); + usb_kill_urb(dev->out_urb); usb_free_urb(dev->out_urb); usb_put_dev(dev->udev); diff --git a/drivers/nfc/st-nci/ndlc.c b/drivers/nfc/st-nci/ndlc.c index f26d938d240f03dbb27889fc101d552511ea9d92..12d73f9dbe9f37e3ef208971b16dee783d362312 100644 --- a/drivers/nfc/st-nci/ndlc.c +++ b/drivers/nfc/st-nci/ndlc.c @@ -297,13 +297,15 @@ EXPORT_SYMBOL(ndlc_probe); void ndlc_remove(struct llt_ndlc *ndlc) { - st_nci_remove(ndlc->ndev); - /* cancel timers */ del_timer_sync(&ndlc->t1_timer); del_timer_sync(&ndlc->t2_timer); ndlc->t2_active = false; ndlc->t1_active = false; + /* cancel work */ + cancel_work_sync(&ndlc->sm_work); + + st_nci_remove(ndlc->ndev); skb_queue_purge(&ndlc->rcv_q); skb_queue_purge(&ndlc->send_q); diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c index f55d082ace71558c8bf23d1813d70da18c9c5a0d..5d6e7e931bc6cd06bc162a2672acba679d60d782 100644 --- a/drivers/nfc/st-nci/se.c +++ b/drivers/nfc/st-nci/se.c @@ -344,6 +344,8 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev, transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev, skb->len - 2, GFP_KERNEL); + if (!transaction) + return -ENOMEM; transaction->aid_len = skb->data[1]; memcpy(transaction->aid, &skb->data[2], transaction->aid_len); diff --git a/drivers/nfc/st21nfca/core.c b/drivers/nfc/st21nfca/core.c index e803fdfa918977f6703785253f06a16402a80387..f37069b53b20030ea99d7c5094d45950cef71520 100644 --- a/drivers/nfc/st21nfca/core.c +++ b/drivers/nfc/st21nfca/core.c @@ -719,6 +719,7 @@ static int st21nfca_hci_complete_target_discovered(struct nfc_hci_dev *hdev, NFC_PROTO_FELICA_MASK; } else { kfree_skb(nfcid_skb); + nfcid_skb = NULL; /* P2P in type A */ r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_F_GATE, ST21NFCA_RF_READER_F_NFCID1, diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c index 1b347096422ff2d4c9d0933682c79ef89fe86d01..ebea3b4dd8e1b9072d6ea45eb36d99dc4cec8489 100644 --- a/drivers/nfc/st21nfca/i2c.c +++ b/drivers/nfc/st21nfca/i2c.c @@ -544,7 +544,8 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client, phy->gpiod_ena = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(phy->gpiod_ena)) { nfc_err(dev, "Unable to get ENABLE GPIO\n"); - return PTR_ERR(phy->gpiod_ena); + r = PTR_ERR(phy->gpiod_ena); + goto out_free; } phy->se_status.is_ese_present = @@ -555,7 +556,7 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client, r = st21nfca_hci_platform_init(phy); if (r < 0) { nfc_err(&client->dev, "Unable to reboot st21nfca\n"); - return r; + goto out_free; } r = devm_request_threaded_irq(&client->dev, client->irq, NULL, @@ -564,15 +565,23 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client, ST21NFCA_HCI_DRIVER_NAME, phy); if (r < 0) { nfc_err(&client->dev, "Unable to register IRQ handler\n"); - return r; + goto out_free; } - return st21nfca_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME, - ST21NFCA_FRAME_HEADROOM, - ST21NFCA_FRAME_TAILROOM, - ST21NFCA_HCI_LLC_MAX_PAYLOAD, - &phy->hdev, - &phy->se_status); + r = st21nfca_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME, + ST21NFCA_FRAME_HEADROOM, + ST21NFCA_FRAME_TAILROOM, + ST21NFCA_HCI_LLC_MAX_PAYLOAD, + &phy->hdev, + &phy->se_status); + if (r) + goto out_free; + + return 0; + +out_free: + kfree_skb(phy->pending_skb); + return r; } static int st21nfca_hci_i2c_remove(struct i2c_client *client) @@ -585,6 +594,8 @@ static int st21nfca_hci_i2c_remove(struct i2c_client *client) if (phy->powered) st21nfca_hci_i2c_disable(phy); + if (phy->pending_skb) + kfree_skb(phy->pending_skb); return 0; } diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c index 4bed9e842db38126859d74d4d585dee66ea80d33..ced3c20d645394e3d731ba2ef94ab32eb52b8a3f 100644 --- a/drivers/nfc/st21nfca/se.c +++ b/drivers/nfc/st21nfca/se.c @@ -328,8 +328,15 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host, transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev, skb->len - 2, GFP_KERNEL); + if (!transaction) + return -ENOMEM; transaction->aid_len = skb->data[1]; + + /* Checking if the length of the AID is valid */ + if (transaction->aid_len > sizeof(transaction->aid)) + return -EINVAL; + memcpy(transaction->aid, &skb->data[2], transaction->aid_len); @@ -339,6 +346,11 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host, return -EPROTO; transaction->params_len = skb->data[transaction->aid_len + 3]; + + /* Total size is allocated (skb->len - 2) minus fixed array members */ + if (transaction->params_len > ((skb->len - 2) - sizeof(struct nfc_evt_transaction))) + return -EINVAL; + memcpy(transaction->params, skb->data + transaction->aid_len + 4, transaction->params_len); diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c index 2b26f762fbc3b3f5f837e27267d0de9fc1b9e5c8..01acb6e533655d6b6041cbbde43af8c1364aec60 100644 --- a/drivers/nfc/st95hf/core.c +++ b/drivers/nfc/st95hf/core.c @@ -1074,6 +1074,12 @@ static const struct spi_device_id st95hf_id[] = { }; MODULE_DEVICE_TABLE(spi, st95hf_id); +static const struct of_device_id st95hf_spi_of_match[] = { + { .compatible = "st,st95hf" }, + { }, +}; +MODULE_DEVICE_TABLE(of, st95hf_spi_of_match); + static int st95hf_probe(struct spi_device *nfc_spi_dev) { int ret; @@ -1260,6 +1266,7 @@ static struct spi_driver st95hf_driver = { .driver = { .name = "st95hf", .owner = THIS_MODULE, + .of_match_table = of_match_ptr(st95hf_spi_of_match), }, .id_table = st95hf_id, .probe = st95hf_probe, diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c index efb214fc545a231514ac1309033beb9579c6014a..ffc5a963e57409708726cff7f5b480eee08a57a9 100644 --- a/drivers/ntb/hw/amd/ntb_hw_amd.c +++ b/drivers/ntb/hw/amd/ntb_hw_amd.c @@ -855,8 +855,8 @@ static int amd_poll_link(struct amd_ntb_dev *ndev) ndev->cntl_sta = reg; - rc = pci_read_config_dword(ndev->ntb.pdev, - AMD_LINK_STATUS_OFFSET, &stat); + rc = pcie_capability_read_dword(ndev->ntb.pdev, + PCI_EXP_LNKCTL, &stat); if (rc) return 0; ndev->lnk_sta = stat; @@ -1125,6 +1125,7 @@ static const struct file_operations amd_ntb_debugfs_info = { static const struct pci_device_id amd_ntb_pci_tbl[] = { {PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_NTB)}, + {PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_NTB)}, {0} }; MODULE_DEVICE_TABLE(pci, amd_ntb_pci_tbl); diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.h b/drivers/ntb/hw/amd/ntb_hw_amd.h index 8f3617a46292dbc31e6d46e2adb8a3d34fb929b8..81d634bb72c832e961895fa684f77fa3004085f3 100644 --- a/drivers/ntb/hw/amd/ntb_hw_amd.h +++ b/drivers/ntb/hw/amd/ntb_hw_amd.h @@ -54,7 +54,6 @@ #define PCI_DEVICE_ID_AMD_NTB 0x145B #define AMD_LINK_HB_TIMEOUT msecs_to_jiffies(1000) -#define AMD_LINK_STATUS_OFFSET 0x68 #define NTB_LIN_STA_ACTIVE_BIT 0x00000002 #define NTB_LNK_STA_SPEED_MASK 0x000F0000 #define NTB_LNK_STA_WIDTH_MASK 0x03F00000 diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c index dbe72f116017ab305a0a1f19276155700f69211e..a67ef23e81bca70127edb92afcb78662ad3dc0e3 100644 --- a/drivers/ntb/hw/idt/ntb_hw_idt.c +++ b/drivers/ntb/hw/idt/ntb_hw_idt.c @@ -1105,9 +1105,9 @@ static struct idt_mw_cfg *idt_scan_mws(struct idt_ntb_dev *ndev, int port, } /* Allocate memory for memory window descriptors */ - ret_mws = devm_kcalloc(&ndev->ntb.pdev->dev, *mw_cnt, - sizeof(*ret_mws), GFP_KERNEL); - if (IS_ERR_OR_NULL(ret_mws)) + ret_mws = devm_kcalloc(&ndev->ntb.pdev->dev, *mw_cnt, sizeof(*ret_mws), + GFP_KERNEL); + if (!ret_mws) return ERR_PTR(-ENOMEM); /* Copy the info of detected memory windows */ @@ -2390,7 +2390,7 @@ static struct idt_ntb_dev *idt_create_dev(struct pci_dev *pdev, /* Allocate memory for the IDT PCIe-device descriptor */ ndev = devm_kzalloc(&pdev->dev, sizeof(*ndev), GFP_KERNEL); - if (IS_ERR_OR_NULL(ndev)) { + if (!ndev) { dev_err(&pdev->dev, "Memory allocation failed for descriptor"); return ERR_PTR(-ENOMEM); } diff --git a/drivers/ntb/hw/intel/Makefile b/drivers/ntb/hw/intel/Makefile index 4ff22af967c658464558c60dd0b9d3e9c8f10285..36c69412dc497ac0598ba401f2161c10c83e162f 100644 --- a/drivers/ntb/hw/intel/Makefile +++ b/drivers/ntb/hw/intel/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_NTB_INTEL) += ntb_hw_intel.o -ntb_hw_intel-y := ntb_hw_gen1.o ntb_hw_gen3.o +ntb_hw_intel-y := ntb_hw_gen1.o ntb_hw_gen3.o ntb_hw_gen4.o diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c index 6aa57322727916bd5bc1c8e5ab13f286f8fd1b1d..adf7904522090203de4ed920f85f00a87ddb82ac 100644 --- a/drivers/ntb/hw/intel/ntb_hw_gen1.c +++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c @@ -60,6 +60,7 @@ #include "ntb_hw_intel.h" #include "ntb_hw_gen1.h" #include "ntb_hw_gen3.h" +#include "ntb_hw_gen4.h" #define NTB_NAME "ntb_hw_intel" #define NTB_DESC "Intel(R) PCI-E Non-Transparent Bridge Driver" @@ -180,7 +181,7 @@ int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx) return ndev->reg->mw_bar[idx]; } -static inline int ndev_db_addr(struct intel_ntb_dev *ndev, +void ndev_db_addr(struct intel_ntb_dev *ndev, phys_addr_t *db_addr, resource_size_t *db_size, phys_addr_t reg_addr, unsigned long reg) { @@ -196,8 +197,6 @@ static inline int ndev_db_addr(struct intel_ntb_dev *ndev, *db_size = ndev->reg->db_size; dev_dbg(&ndev->ntb.pdev->dev, "Peer db size %llx\n", *db_size); } - - return 0; } u64 ndev_db_read(struct intel_ntb_dev *ndev, @@ -265,7 +264,7 @@ static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits, return 0; } -static inline int ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector) +static inline u64 ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector) { u64 shift, mask; @@ -764,6 +763,8 @@ static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf, return ndev_ntb_debugfs_read(filp, ubuf, count, offp); else if (pdev_is_gen3(ndev->ntb.pdev)) return ndev_ntb3_debugfs_read(filp, ubuf, count, offp); + else if (pdev_is_gen4(ndev->ntb.pdev)) + return ndev_ntb4_debugfs_read(filp, ubuf, count, offp); return -ENXIO; } @@ -1111,13 +1112,28 @@ int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits) ndev->self_reg->db_mask); } -int intel_ntb_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr, - resource_size_t *db_size) +static int intel_ntb_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr, + resource_size_t *db_size, u64 *db_data, int db_bit) { + u64 db_bits; struct intel_ntb_dev *ndev = ntb_ndev(ntb); - return ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr, + if (unlikely(db_bit >= BITS_PER_LONG_LONG)) + return -EINVAL; + + db_bits = BIT_ULL(db_bit); + + if (unlikely(db_bits & ~ntb_ndev(ntb)->db_valid_mask)) + return -EINVAL; + + ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr, ndev->peer_reg->db_bell); + + if (db_data) + *db_data = db_bits; + + + return 0; } static int intel_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits) @@ -1845,16 +1861,15 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev, int rc, node; node = dev_to_node(&pdev->dev); + ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node); + if (!ndev) { + rc = -ENOMEM; + goto err_ndev; + } - if (pdev_is_gen1(pdev)) { - ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node); - if (!ndev) { - rc = -ENOMEM; - goto err_ndev; - } - - ndev_init_struct(ndev, pdev); + ndev_init_struct(ndev, pdev); + if (pdev_is_gen1(pdev)) { rc = intel_ntb_init_pci(ndev, pdev); if (rc) goto err_init_pci; @@ -1862,17 +1877,8 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev, rc = xeon_init_dev(ndev); if (rc) goto err_init_dev; - } else if (pdev_is_gen3(pdev)) { - ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node); - if (!ndev) { - rc = -ENOMEM; - goto err_ndev; - } - - ndev_init_struct(ndev, pdev); ndev->ntb.ops = &intel_ntb3_ops; - rc = intel_ntb_init_pci(ndev, pdev); if (rc) goto err_init_pci; @@ -1880,10 +1886,18 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev, rc = gen3_init_dev(ndev); if (rc) goto err_init_dev; + } else if (pdev_is_gen4(pdev)) { + ndev->ntb.ops = &intel_ntb4_ops; + rc = intel_ntb_init_pci(ndev, pdev); + if (rc) + goto err_init_pci; + rc = gen4_init_dev(ndev); + if (rc) + goto err_init_dev; } else { rc = -EINVAL; - goto err_ndev; + goto err_init_pci; } ndev_reset_unsafe_flags(ndev); @@ -1902,7 +1916,7 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev, err_register: ndev_deinit_debugfs(ndev); - if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev)) + if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev) || pdev_is_gen4(pdev)) xeon_deinit_dev(ndev); err_init_dev: intel_ntb_deinit_pci(ndev); @@ -1918,7 +1932,7 @@ static void intel_ntb_pci_remove(struct pci_dev *pdev) ntb_unregister_device(&ndev->ntb); ndev_deinit_debugfs(ndev); - if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev)) + if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev) || pdev_is_gen4(pdev)) xeon_deinit_dev(ndev); intel_ntb_deinit_pci(ndev); kfree(ndev); @@ -2023,6 +2037,7 @@ static const struct file_operations intel_ntb_debugfs_info = { }; static const struct pci_device_id intel_ntb_pci_tbl[] = { + /* GEN1 */ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)}, {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)}, {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)}, @@ -2038,7 +2053,12 @@ static const struct pci_device_id intel_ntb_pci_tbl[] = { {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)}, {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)}, {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_BDX)}, + + /* GEN3 */ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SKX)}, + + /* GEN4 */ + {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_ICX)}, {0} }; MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl); diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.h b/drivers/ntb/hw/intel/ntb_hw_gen1.h index ad8ec1444436e535ecab21e90c6434b79ae87d96..1b759942d8aff4dbd83dacaaa04cbf36c1d507f3 100644 --- a/drivers/ntb/hw/intel/ntb_hw_gen1.h +++ b/drivers/ntb/hw/intel/ntb_hw_gen1.h @@ -140,6 +140,7 @@ #define NTB_HWERR_SB01BASE_LOCKUP BIT_ULL(1) #define NTB_HWERR_B2BDOORBELL_BIT14 BIT_ULL(2) #define NTB_HWERR_MSIX_VECTOR32_BAD BIT_ULL(3) +#define NTB_HWERR_BAR_ALIGN BIT_ULL(4) extern struct intel_b2b_addr xeon_b2b_usd_addr; extern struct intel_b2b_addr xeon_b2b_dsd_addr; @@ -147,6 +148,9 @@ extern struct intel_b2b_addr xeon_b2b_dsd_addr; int ndev_init_isr(struct intel_ntb_dev *ndev, int msix_min, int msix_max, int msix_shift, int total_shift); enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd); +void ndev_db_addr(struct intel_ntb_dev *ndev, + phys_addr_t *db_addr, resource_size_t *db_size, + phys_addr_t reg_addr, unsigned long reg); u64 ndev_db_read(struct intel_ntb_dev *ndev, void __iomem *mmio); int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits, void __iomem *mmio); @@ -166,8 +170,6 @@ int intel_ntb_db_vector_count(struct ntb_dev *ntb); u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector); int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits); int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits); -int intel_ntb_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr, - resource_size_t *db_size); int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb); int intel_ntb_spad_count(struct ntb_dev *ntb); u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx); diff --git a/drivers/ntb/hw/intel/ntb_hw_gen3.c b/drivers/ntb/hw/intel/ntb_hw_gen3.c index b3fa24778f9406e6b70837162e34e1a3a0db2335..9116333e01a80aad04e0a92c040baa6770ec1396 100644 --- a/drivers/ntb/hw/intel/ntb_hw_gen3.c +++ b/drivers/ntb/hw/intel/ntb_hw_gen3.c @@ -415,9 +415,8 @@ ssize_t ndev_ntb3_debugfs_read(struct file *filp, char __user *ubuf, return ret; } -static int intel_ntb3_link_enable(struct ntb_dev *ntb, - enum ntb_speed max_speed, - enum ntb_width max_width) +int intel_ntb3_link_enable(struct ntb_dev *ntb, enum ntb_speed max_speed, + enum ntb_width max_width) { struct intel_ntb_dev *ndev; u32 ntb_ctl; @@ -532,7 +531,38 @@ static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx, return 0; } -static int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits) +int intel_ntb3_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr, + resource_size_t *db_size, + u64 *db_data, int db_bit) +{ + phys_addr_t db_addr_base; + struct intel_ntb_dev *ndev = ntb_ndev(ntb); + + if (unlikely(db_bit >= BITS_PER_LONG_LONG)) + return -EINVAL; + + if (unlikely(BIT_ULL(db_bit) & ~ntb_ndev(ntb)->db_valid_mask)) + return -EINVAL; + + ndev_db_addr(ndev, &db_addr_base, db_size, ndev->peer_addr, + ndev->peer_reg->db_bell); + + if (db_addr) { + *db_addr = db_addr_base + (db_bit * 4); + dev_dbg(&ndev->ntb.pdev->dev, "Peer db addr %llx db bit %d\n", + *db_addr, db_bit); + } + + if (db_data) { + *db_data = 1; + dev_dbg(&ndev->ntb.pdev->dev, "Peer db data %llx db bit %d\n", + *db_data, db_bit); + } + + return 0; +} + +int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits) { struct intel_ntb_dev *ndev = ntb_ndev(ntb); int bit; @@ -550,7 +580,7 @@ static int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits) return 0; } -static u64 intel_ntb3_db_read(struct ntb_dev *ntb) +u64 intel_ntb3_db_read(struct ntb_dev *ntb) { struct intel_ntb_dev *ndev = ntb_ndev(ntb); @@ -559,7 +589,7 @@ static u64 intel_ntb3_db_read(struct ntb_dev *ntb) ndev->self_reg->db_clear); } -static int intel_ntb3_db_clear(struct ntb_dev *ntb, u64 db_bits) +int intel_ntb3_db_clear(struct ntb_dev *ntb, u64 db_bits) { struct intel_ntb_dev *ndev = ntb_ndev(ntb); @@ -584,7 +614,7 @@ const struct ntb_dev_ops intel_ntb3_ops = { .db_clear = intel_ntb3_db_clear, .db_set_mask = intel_ntb_db_set_mask, .db_clear_mask = intel_ntb_db_clear_mask, - .peer_db_addr = intel_ntb_peer_db_addr, + .peer_db_addr = intel_ntb3_peer_db_addr, .peer_db_set = intel_ntb3_peer_db_set, .spad_is_unsafe = intel_ntb_spad_is_unsafe, .spad_count = intel_ntb_spad_count, diff --git a/drivers/ntb/hw/intel/ntb_hw_gen3.h b/drivers/ntb/hw/intel/ntb_hw_gen3.h index 75fb86ca27bb12dee577c00ae52a3f27e65b208b..2bc5d83560455e33d3884f92ae31e543bc66c63f 100644 --- a/drivers/ntb/hw/intel/ntb_hw_gen3.h +++ b/drivers/ntb/hw/intel/ntb_hw_gen3.h @@ -104,6 +104,14 @@ static inline void gen3_db_iowrite(u64 bits, void __iomem *mmio) ssize_t ndev_ntb3_debugfs_read(struct file *filp, char __user *ubuf, size_t count, loff_t *offp); int gen3_init_dev(struct intel_ntb_dev *ndev); +int intel_ntb3_link_enable(struct ntb_dev *ntb, enum ntb_speed max_speed, + enum ntb_width max_width); +u64 intel_ntb3_db_read(struct ntb_dev *ntb); +int intel_ntb3_db_clear(struct ntb_dev *ntb, u64 db_bits); +int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits); +int intel_ntb3_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr, + resource_size_t *db_size, + u64 *db_data, int db_bit); extern const struct ntb_dev_ops intel_ntb3_ops; diff --git a/drivers/ntb/hw/intel/ntb_hw_gen4.c b/drivers/ntb/hw/intel/ntb_hw_gen4.c new file mode 100644 index 0000000000000000000000000000000000000000..bc4541cbf8c6e174011851c09627971613a17057 --- /dev/null +++ b/drivers/ntb/hw/intel/ntb_hw_gen4.c @@ -0,0 +1,552 @@ +// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) +/* Copyright(c) 2020 Intel Corporation. All rights reserved. */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ntb_hw_intel.h" +#include "ntb_hw_gen1.h" +#include "ntb_hw_gen3.h" +#include "ntb_hw_gen4.h" + +static int gen4_poll_link(struct intel_ntb_dev *ndev); +static int gen4_link_is_up(struct intel_ntb_dev *ndev); + +static const struct intel_ntb_reg gen4_reg = { + .poll_link = gen4_poll_link, + .link_is_up = gen4_link_is_up, + .db_ioread = gen3_db_ioread, + .db_iowrite = gen3_db_iowrite, + .db_size = sizeof(u32), + .ntb_ctl = GEN4_NTBCNTL_OFFSET, + .mw_bar = {2, 4}, +}; + +static const struct intel_ntb_alt_reg gen4_pri_reg = { + .db_clear = GEN4_IM_INT_STATUS_OFFSET, + .db_mask = GEN4_IM_INT_DISABLE_OFFSET, + .spad = GEN4_IM_SPAD_OFFSET, +}; + +static const struct intel_ntb_xlat_reg gen4_sec_xlat = { + .bar2_limit = GEN4_IM23XLMT_OFFSET, + .bar2_xlat = GEN4_IM23XBASE_OFFSET, + .bar2_idx = GEN4_IM23XBASEIDX_OFFSET, +}; + +static const struct intel_ntb_alt_reg gen4_b2b_reg = { + .db_bell = GEN4_IM_DOORBELL_OFFSET, + .spad = GEN4_EM_SPAD_OFFSET, +}; + +static int gen4_poll_link(struct intel_ntb_dev *ndev) +{ + u16 reg_val; + + /* + * We need to write to DLLSCS bit in the SLOTSTS before we + * can clear the hardware link interrupt on ICX NTB. + */ + iowrite16(GEN4_SLOTSTS_DLLSCS, ndev->self_mmio + GEN4_SLOTSTS); + ndev->reg->db_iowrite(ndev->db_link_mask, + ndev->self_mmio + + ndev->self_reg->db_clear); + + reg_val = ioread16(ndev->self_mmio + GEN4_LINK_STATUS_OFFSET); + if (reg_val == ndev->lnk_sta) + return 0; + + ndev->lnk_sta = reg_val; + + return 1; +} + +static int gen4_link_is_up(struct intel_ntb_dev *ndev) +{ + return NTB_LNK_STA_ACTIVE(ndev->lnk_sta); +} + +static int gen4_init_isr(struct intel_ntb_dev *ndev) +{ + int i; + + /* + * The MSIX vectors and the interrupt status bits are not lined up + * on Gen3 (Skylake) and Gen4. By default the link status bit is bit + * 32, however it is by default MSIX vector0. We need to fixup to + * line them up. The vectors at reset is 1-32,0. We need to reprogram + * to 0-32. + */ + for (i = 0; i < GEN4_DB_MSIX_VECTOR_COUNT; i++) + iowrite8(i, ndev->self_mmio + GEN4_INTVEC_OFFSET + i); + + return ndev_init_isr(ndev, GEN4_DB_MSIX_VECTOR_COUNT, + GEN4_DB_MSIX_VECTOR_COUNT, + GEN4_DB_MSIX_VECTOR_SHIFT, + GEN4_DB_TOTAL_SHIFT); +} + +static int gen4_setup_b2b_mw(struct intel_ntb_dev *ndev, + const struct intel_b2b_addr *addr, + const struct intel_b2b_addr *peer_addr) +{ + struct pci_dev *pdev; + void __iomem *mmio; + phys_addr_t bar_addr; + + pdev = ndev->ntb.pdev; + mmio = ndev->self_mmio; + + /* setup incoming bar limits == base addrs (zero length windows) */ + bar_addr = addr->bar2_addr64; + iowrite64(bar_addr, mmio + GEN4_IM23XLMT_OFFSET); + bar_addr = ioread64(mmio + GEN4_IM23XLMT_OFFSET); + dev_dbg(&pdev->dev, "IM23XLMT %#018llx\n", bar_addr); + + bar_addr = addr->bar4_addr64; + iowrite64(bar_addr, mmio + GEN4_IM45XLMT_OFFSET); + bar_addr = ioread64(mmio + GEN4_IM45XLMT_OFFSET); + dev_dbg(&pdev->dev, "IM45XLMT %#018llx\n", bar_addr); + + /* zero incoming translation addrs */ + iowrite64(0, mmio + GEN4_IM23XBASE_OFFSET); + iowrite64(0, mmio + GEN4_IM45XBASE_OFFSET); + + ndev->peer_mmio = ndev->self_mmio; + + return 0; +} + +static int gen4_init_ntb(struct intel_ntb_dev *ndev) +{ + int rc; + + + ndev->mw_count = XEON_MW_COUNT; + ndev->spad_count = GEN4_SPAD_COUNT; + ndev->db_count = GEN4_DB_COUNT; + ndev->db_link_mask = GEN4_DB_LINK_BIT; + + ndev->self_reg = &gen4_pri_reg; + ndev->xlat_reg = &gen4_sec_xlat; + ndev->peer_reg = &gen4_b2b_reg; + + if (ndev->ntb.topo == NTB_TOPO_B2B_USD) + rc = gen4_setup_b2b_mw(ndev, &xeon_b2b_dsd_addr, + &xeon_b2b_usd_addr); + else + rc = gen4_setup_b2b_mw(ndev, &xeon_b2b_usd_addr, + &xeon_b2b_dsd_addr); + if (rc) + return rc; + + ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1; + + ndev->reg->db_iowrite(ndev->db_valid_mask, + ndev->self_mmio + + ndev->self_reg->db_mask); + + return 0; +} + +static enum ntb_topo gen4_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd) +{ + switch (ppd & GEN4_PPD_TOPO_MASK) { + case GEN4_PPD_TOPO_B2B_USD: + return NTB_TOPO_B2B_USD; + case GEN4_PPD_TOPO_B2B_DSD: + return NTB_TOPO_B2B_DSD; + } + + return NTB_TOPO_NONE; +} + +int gen4_init_dev(struct intel_ntb_dev *ndev) +{ + struct pci_dev *pdev = ndev->ntb.pdev; + u32 ppd1/*, ppd0*/; + u16 lnkctl; + int rc; + + ndev->reg = &gen4_reg; + + if (pdev_is_ICX(pdev)) + ndev->hwerr_flags |= NTB_HWERR_BAR_ALIGN; + + ppd1 = ioread32(ndev->self_mmio + GEN4_PPD1_OFFSET); + ndev->ntb.topo = gen4_ppd_topo(ndev, ppd1); + dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd1, + ntb_topo_string(ndev->ntb.topo)); + if (ndev->ntb.topo == NTB_TOPO_NONE) + return -EINVAL; + + rc = gen4_init_ntb(ndev); + if (rc) + return rc; + + /* init link setup */ + lnkctl = ioread16(ndev->self_mmio + GEN4_LINK_CTRL_OFFSET); + lnkctl |= GEN4_LINK_CTRL_LINK_DISABLE; + iowrite16(lnkctl, ndev->self_mmio + GEN4_LINK_CTRL_OFFSET); + + return gen4_init_isr(ndev); +} + +ssize_t ndev_ntb4_debugfs_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *offp) +{ + struct intel_ntb_dev *ndev; + void __iomem *mmio; + char *buf; + size_t buf_size; + ssize_t ret, off; + union { u64 v64; u32 v32; u16 v16; } u; + + ndev = filp->private_data; + mmio = ndev->self_mmio; + + buf_size = min(count, 0x800ul); + + buf = kmalloc(buf_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + off = 0; + + off += scnprintf(buf + off, buf_size - off, + "NTB Device Information:\n"); + + off += scnprintf(buf + off, buf_size - off, + "Connection Topology -\t%s\n", + ntb_topo_string(ndev->ntb.topo)); + + off += scnprintf(buf + off, buf_size - off, + "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl); + off += scnprintf(buf + off, buf_size - off, + "LNK STA (cached) -\t\t%#06x\n", ndev->lnk_sta); + + if (!ndev->reg->link_is_up(ndev)) + off += scnprintf(buf + off, buf_size - off, + "Link Status -\t\tDown\n"); + else { + off += scnprintf(buf + off, buf_size - off, + "Link Status -\t\tUp\n"); + off += scnprintf(buf + off, buf_size - off, + "Link Speed -\t\tPCI-E Gen %u\n", + NTB_LNK_STA_SPEED(ndev->lnk_sta)); + off += scnprintf(buf + off, buf_size - off, + "Link Width -\t\tx%u\n", + NTB_LNK_STA_WIDTH(ndev->lnk_sta)); + } + + off += scnprintf(buf + off, buf_size - off, + "Memory Window Count -\t%u\n", ndev->mw_count); + off += scnprintf(buf + off, buf_size - off, + "Scratchpad Count -\t%u\n", ndev->spad_count); + off += scnprintf(buf + off, buf_size - off, + "Doorbell Count -\t%u\n", ndev->db_count); + off += scnprintf(buf + off, buf_size - off, + "Doorbell Vector Count -\t%u\n", ndev->db_vec_count); + off += scnprintf(buf + off, buf_size - off, + "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift); + + off += scnprintf(buf + off, buf_size - off, + "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask); + off += scnprintf(buf + off, buf_size - off, + "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask); + off += scnprintf(buf + off, buf_size - off, + "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask); + + u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask); + off += scnprintf(buf + off, buf_size - off, + "Doorbell Mask -\t\t%#llx\n", u.v64); + + off += scnprintf(buf + off, buf_size - off, + "\nNTB Incoming XLAT:\n"); + + u.v64 = ioread64(mmio + GEN4_IM23XBASE_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "IM23XBASE -\t\t%#018llx\n", u.v64); + + u.v64 = ioread64(mmio + GEN4_IM45XBASE_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "IM45XBASE -\t\t%#018llx\n", u.v64); + + u.v64 = ioread64(mmio + GEN4_IM23XLMT_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "IM23XLMT -\t\t\t%#018llx\n", u.v64); + + u.v64 = ioread64(mmio + GEN4_IM45XLMT_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "IM45XLMT -\t\t\t%#018llx\n", u.v64); + + off += scnprintf(buf + off, buf_size - off, + "\nNTB Statistics:\n"); + + off += scnprintf(buf + off, buf_size - off, + "\nNTB Hardware Errors:\n"); + + if (!pci_read_config_word(ndev->ntb.pdev, + GEN4_DEVSTS_OFFSET, &u.v16)) + off += scnprintf(buf + off, buf_size - off, + "DEVSTS -\t\t%#06x\n", u.v16); + + u.v16 = ioread16(mmio + GEN4_LINK_STATUS_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "LNKSTS -\t\t%#06x\n", u.v16); + + if (!pci_read_config_dword(ndev->ntb.pdev, + GEN4_UNCERRSTS_OFFSET, &u.v32)) + off += scnprintf(buf + off, buf_size - off, + "UNCERRSTS -\t\t%#06x\n", u.v32); + + if (!pci_read_config_dword(ndev->ntb.pdev, + GEN4_CORERRSTS_OFFSET, &u.v32)) + off += scnprintf(buf + off, buf_size - off, + "CORERRSTS -\t\t%#06x\n", u.v32); + + ret = simple_read_from_buffer(ubuf, count, offp, buf, off); + kfree(buf); + return ret; +} + +static int intel_ntb4_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx, + dma_addr_t addr, resource_size_t size) +{ + struct intel_ntb_dev *ndev = ntb_ndev(ntb); + unsigned long xlat_reg, limit_reg, idx_reg; + unsigned short base_idx, reg_val16; + resource_size_t bar_size, mw_size; + void __iomem *mmio; + u64 base, limit, reg_val; + int bar; + + if (pidx != NTB_DEF_PEER_IDX) + return -EINVAL; + + if (idx >= ndev->b2b_idx && !ndev->b2b_off) + idx += 1; + + bar = ndev_mw_to_bar(ndev, idx); + if (bar < 0) + return bar; + + bar_size = pci_resource_len(ndev->ntb.pdev, bar); + + if (idx == ndev->b2b_idx) + mw_size = bar_size - ndev->b2b_off; + else + mw_size = bar_size; + + if (ndev->hwerr_flags & NTB_HWERR_BAR_ALIGN) { + /* hardware requires that addr is aligned to bar size */ + if (addr & (bar_size - 1)) + return -EINVAL; + } else { + if (addr & (PAGE_SIZE - 1)) + return -EINVAL; + } + + /* make sure the range fits in the usable mw size */ + if (size > mw_size) + return -EINVAL; + + mmio = ndev->self_mmio; + xlat_reg = ndev->xlat_reg->bar2_xlat + (idx * 0x10); + limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10); + base = pci_resource_start(ndev->ntb.pdev, bar); + + /* Set the limit if supported, if size is not mw_size */ + if (limit_reg && size != mw_size) { + limit = base + size; + base_idx = __ilog2_u64(size); + } else { + limit = base + mw_size; + base_idx = __ilog2_u64(mw_size); + } + + + /* set and verify setting the translation address */ + iowrite64(addr, mmio + xlat_reg); + reg_val = ioread64(mmio + xlat_reg); + if (reg_val != addr) { + iowrite64(0, mmio + xlat_reg); + return -EIO; + } + + dev_dbg(&ntb->pdev->dev, "BAR %d IMXBASE: %#Lx\n", bar, reg_val); + + /* set and verify setting the limit */ + iowrite64(limit, mmio + limit_reg); + reg_val = ioread64(mmio + limit_reg); + if (reg_val != limit) { + iowrite64(base, mmio + limit_reg); + iowrite64(0, mmio + xlat_reg); + return -EIO; + } + + dev_dbg(&ntb->pdev->dev, "BAR %d IMXLMT: %#Lx\n", bar, reg_val); + + if (ndev->hwerr_flags & NTB_HWERR_BAR_ALIGN) { + idx_reg = ndev->xlat_reg->bar2_idx + (idx * 0x2); + iowrite16(base_idx, mmio + idx_reg); + reg_val16 = ioread16(mmio + idx_reg); + if (reg_val16 != base_idx) { + iowrite64(base, mmio + limit_reg); + iowrite64(0, mmio + xlat_reg); + iowrite16(0, mmio + idx_reg); + return -EIO; + } + dev_dbg(&ntb->pdev->dev, "BAR %d IMBASEIDX: %#x\n", bar, reg_val16); + } + + + return 0; +} + +static int intel_ntb4_link_enable(struct ntb_dev *ntb, + enum ntb_speed max_speed, enum ntb_width max_width) +{ + struct intel_ntb_dev *ndev; + u32 ntb_ctl, ppd0; + u16 lnkctl; + + ndev = container_of(ntb, struct intel_ntb_dev, ntb); + + dev_dbg(&ntb->pdev->dev, + "Enabling link with max_speed %d max_width %d\n", + max_speed, max_width); + + if (max_speed != NTB_SPEED_AUTO) + dev_dbg(&ntb->pdev->dev, + "ignoring max_speed %d\n", max_speed); + if (max_width != NTB_WIDTH_AUTO) + dev_dbg(&ntb->pdev->dev, + "ignoring max_width %d\n", max_width); + + ntb_ctl = NTB_CTL_E2I_BAR23_SNOOP | NTB_CTL_I2E_BAR23_SNOOP; + ntb_ctl |= NTB_CTL_E2I_BAR45_SNOOP | NTB_CTL_I2E_BAR45_SNOOP; + iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl); + + lnkctl = ioread16(ndev->self_mmio + GEN4_LINK_CTRL_OFFSET); + lnkctl &= ~GEN4_LINK_CTRL_LINK_DISABLE; + iowrite16(lnkctl, ndev->self_mmio + GEN4_LINK_CTRL_OFFSET); + + /* start link training in PPD0 */ + ppd0 = ioread32(ndev->self_mmio + GEN4_PPD0_OFFSET); + ppd0 |= GEN4_PPD_LINKTRN; + iowrite32(ppd0, ndev->self_mmio + GEN4_PPD0_OFFSET); + + /* make sure link training has started */ + ppd0 = ioread32(ndev->self_mmio + GEN4_PPD0_OFFSET); + if (!(ppd0 & GEN4_PPD_LINKTRN)) { + dev_warn(&ntb->pdev->dev, "Link is not training\n"); + return -ENXIO; + } + + ndev->dev_up = 1; + + return 0; +} + +static int intel_ntb4_link_disable(struct ntb_dev *ntb) +{ + struct intel_ntb_dev *ndev; + u32 ntb_cntl; + u16 lnkctl; + + ndev = container_of(ntb, struct intel_ntb_dev, ntb); + + dev_dbg(&ntb->pdev->dev, "Disabling link\n"); + + /* clear the snoop bits */ + ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl); + ntb_cntl &= ~(NTB_CTL_E2I_BAR23_SNOOP | NTB_CTL_I2E_BAR23_SNOOP); + ntb_cntl &= ~(NTB_CTL_E2I_BAR45_SNOOP | NTB_CTL_I2E_BAR45_SNOOP); + iowrite32(ntb_cntl, ndev->self_mmio + ndev->reg->ntb_ctl); + + lnkctl = ioread16(ndev->self_mmio + GEN4_LINK_CTRL_OFFSET); + lnkctl |= GEN4_LINK_CTRL_LINK_DISABLE; + iowrite16(lnkctl, ndev->self_mmio + GEN4_LINK_CTRL_OFFSET); + + ndev->dev_up = 0; + + return 0; +} + +static int intel_ntb4_mw_get_align(struct ntb_dev *ntb, int pidx, int idx, + resource_size_t *addr_align, + resource_size_t *size_align, + resource_size_t *size_max) +{ + struct intel_ntb_dev *ndev = ntb_ndev(ntb); + resource_size_t bar_size, mw_size; + int bar; + + if (pidx != NTB_DEF_PEER_IDX) + return -EINVAL; + + if (idx >= ndev->b2b_idx && !ndev->b2b_off) + idx += 1; + + bar = ndev_mw_to_bar(ndev, idx); + if (bar < 0) + return bar; + + bar_size = pci_resource_len(ndev->ntb.pdev, bar); + + if (idx == ndev->b2b_idx) + mw_size = bar_size - ndev->b2b_off; + else + mw_size = bar_size; + + if (addr_align) { + if (ndev->hwerr_flags & NTB_HWERR_BAR_ALIGN) + *addr_align = pci_resource_len(ndev->ntb.pdev, bar); + else + *addr_align = PAGE_SIZE; + } + + if (size_align) + *size_align = 1; + + if (size_max) + *size_max = mw_size; + + return 0; +} + +const struct ntb_dev_ops intel_ntb4_ops = { + .mw_count = intel_ntb_mw_count, + .mw_get_align = intel_ntb4_mw_get_align, + .mw_set_trans = intel_ntb4_mw_set_trans, + .peer_mw_count = intel_ntb_peer_mw_count, + .peer_mw_get_addr = intel_ntb_peer_mw_get_addr, + .link_is_up = intel_ntb_link_is_up, + .link_enable = intel_ntb4_link_enable, + .link_disable = intel_ntb4_link_disable, + .db_valid_mask = intel_ntb_db_valid_mask, + .db_vector_count = intel_ntb_db_vector_count, + .db_vector_mask = intel_ntb_db_vector_mask, + .db_read = intel_ntb3_db_read, + .db_clear = intel_ntb3_db_clear, + .db_set_mask = intel_ntb_db_set_mask, + .db_clear_mask = intel_ntb_db_clear_mask, + .peer_db_addr = intel_ntb3_peer_db_addr, + .peer_db_set = intel_ntb3_peer_db_set, + .spad_is_unsafe = intel_ntb_spad_is_unsafe, + .spad_count = intel_ntb_spad_count, + .spad_read = intel_ntb_spad_read, + .spad_write = intel_ntb_spad_write, + .peer_spad_addr = intel_ntb_peer_spad_addr, + .peer_spad_read = intel_ntb_peer_spad_read, + .peer_spad_write = intel_ntb_peer_spad_write, +}; + diff --git a/drivers/ntb/hw/intel/ntb_hw_gen4.h b/drivers/ntb/hw/intel/ntb_hw_gen4.h new file mode 100644 index 0000000000000000000000000000000000000000..a868c788de02f3abf37a1ac862814344e1d22f52 --- /dev/null +++ b/drivers/ntb/hw/intel/ntb_hw_gen4.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ +/* Copyright(c) 2020 Intel Corporation. All rights reserved. */ +#ifndef _NTB_INTEL_GEN4_H_ +#define _NTB_INTEL_GEN4_H_ + +#include "ntb_hw_intel.h" + +/* Supported PCI device revision range for ICX */ +#define PCI_DEVICE_REVISION_ICX_MIN 0x2 +#define PCI_DEVICE_REVISION_ICX_MAX 0xF + +/* Intel Gen4 NTB hardware */ +/* PCIe config space */ +#define GEN4_IMBAR23SZ_OFFSET 0x00c4 +#define GEN4_IMBAR45SZ_OFFSET 0x00c5 +#define GEN4_EMBAR23SZ_OFFSET 0x00c6 +#define GEN4_EMBAR45SZ_OFFSET 0x00c7 +#define GEN4_DEVCTRL_OFFSET 0x0048 +#define GEN4_DEVSTS_OFFSET 0x004a +#define GEN4_UNCERRSTS_OFFSET 0x0104 +#define GEN4_CORERRSTS_OFFSET 0x0110 + +/* BAR0 MMIO */ +#define GEN4_NTBCNTL_OFFSET 0x0000 +#define GEN4_IM23XBASE_OFFSET 0x0010 /* IMBAR1XBASE */ +#define GEN4_IM23XLMT_OFFSET 0x0018 /* IMBAR1XLMT */ +#define GEN4_IM45XBASE_OFFSET 0x0020 /* IMBAR2XBASE */ +#define GEN4_IM45XLMT_OFFSET 0x0028 /* IMBAR2XLMT */ +#define GEN4_IM_INT_STATUS_OFFSET 0x0040 +#define GEN4_IM_INT_DISABLE_OFFSET 0x0048 +#define GEN4_INTVEC_OFFSET 0x0050 /* 0-32 vecs */ +#define GEN4_IM23XBASEIDX_OFFSET 0x0074 +#define GEN4_IM45XBASEIDX_OFFSET 0x0076 +#define GEN4_IM_SPAD_OFFSET 0x0080 /* 0-15 SPADs */ +#define GEN4_IM_SPAD_SEM_OFFSET 0x00c0 /* SPAD hw semaphore */ +#define GEN4_IM_SPAD_STICKY_OFFSET 0x00c4 /* sticky SPAD */ +#define GEN4_IM_DOORBELL_OFFSET 0x0100 /* 0-31 doorbells */ +#define GEN4_EM_SPAD_OFFSET 0x8080 +/* note, link status is now in MMIO and not config space for NTB */ +#define GEN4_LINK_CTRL_OFFSET 0xb050 +#define GEN4_LINK_STATUS_OFFSET 0xb052 +#define GEN4_PPD0_OFFSET 0xb0d4 +#define GEN4_PPD1_OFFSET 0xb4c0 +#define GEN4_LTSSMSTATEJMP 0xf040 + +#define GEN4_PPD_CLEAR_TRN 0x0001 +#define GEN4_PPD_LINKTRN 0x0008 +#define GEN4_PPD_CONN_MASK 0x0300 +#define GEN4_PPD_CONN_B2B 0x0200 +#define GEN4_PPD_DEV_MASK 0x1000 +#define GEN4_PPD_DEV_DSD 0x1000 +#define GEN4_PPD_DEV_USD 0x0000 +#define GEN4_LINK_CTRL_LINK_DISABLE 0x0010 + +#define GEN4_SLOTSTS 0xb05a +#define GEN4_SLOTSTS_DLLSCS 0x100 + +#define GEN4_PPD_TOPO_MASK (GEN4_PPD_CONN_MASK | GEN4_PPD_DEV_MASK) +#define GEN4_PPD_TOPO_B2B_USD (GEN4_PPD_CONN_B2B | GEN4_PPD_DEV_USD) +#define GEN4_PPD_TOPO_B2B_DSD (GEN4_PPD_CONN_B2B | GEN4_PPD_DEV_DSD) + +#define GEN4_DB_COUNT 32 +#define GEN4_DB_LINK 32 +#define GEN4_DB_LINK_BIT BIT_ULL(GEN4_DB_LINK) +#define GEN4_DB_MSIX_VECTOR_COUNT 33 +#define GEN4_DB_MSIX_VECTOR_SHIFT 1 +#define GEN4_DB_TOTAL_SHIFT 33 +#define GEN4_SPAD_COUNT 16 + +#define NTB_CTL_E2I_BAR23_SNOOP 0x000004 +#define NTB_CTL_E2I_BAR23_NOSNOOP 0x000008 +#define NTB_CTL_I2E_BAR23_SNOOP 0x000010 +#define NTB_CTL_I2E_BAR23_NOSNOOP 0x000020 +#define NTB_CTL_E2I_BAR45_SNOOP 0x000040 +#define NTB_CTL_E2I_BAR45_NOSNOO 0x000080 +#define NTB_CTL_I2E_BAR45_SNOOP 0x000100 +#define NTB_CTL_I2E_BAR45_NOSNOOP 0x000200 +#define NTB_CTL_BUSNO_DIS_INC 0x000400 +#define NTB_CTL_LINK_DOWN 0x010000 + +#define NTB_SJC_FORCEDETECT 0x000004 + +ssize_t ndev_ntb4_debugfs_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *offp); +int gen4_init_dev(struct intel_ntb_dev *ndev); +ssize_t ndev_ntb4_debugfs_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *offp); + +extern const struct ntb_dev_ops intel_ntb4_ops; + +static inline int pdev_is_ICX(struct pci_dev *pdev) +{ + if (pdev_is_gen4(pdev) && + pdev->revision >= PCI_DEVICE_REVISION_ICX_MIN && + pdev->revision <= PCI_DEVICE_REVISION_ICX_MAX) + return 1; + return 0; +} + +#endif diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.h b/drivers/ntb/hw/intel/ntb_hw_intel.h index c49ff8970ce3dec1e7d87af3cd1c70a33292898c..4a7d8411b807b724f3ba5d1b8ec6d2d3d5b933aa 100644 --- a/drivers/ntb/hw/intel/ntb_hw_intel.h +++ b/drivers/ntb/hw/intel/ntb_hw_intel.h @@ -71,6 +71,7 @@ #define PCI_DEVICE_ID_INTEL_NTB_PS_BDX 0x6F0E #define PCI_DEVICE_ID_INTEL_NTB_SS_BDX 0x6F0F #define PCI_DEVICE_ID_INTEL_NTB_B2B_SKX 0x201C +#define PCI_DEVICE_ID_INTEL_NTB_B2B_ICX 0x347e /* Ntb control and link status */ #define NTB_CTL_CFG_LOCK BIT(0) @@ -119,6 +120,7 @@ struct intel_ntb_xlat_reg { unsigned long bar0_base; unsigned long bar2_xlat; unsigned long bar2_limit; + unsigned short bar2_idx; }; struct intel_b2b_addr { @@ -181,6 +183,9 @@ struct intel_ntb_dev { struct dentry *debugfs_dir; struct dentry *debugfs_info; + + /* gen4 entries */ + int dev_up; }; #define ntb_ndev(__ntb) container_of(__ntb, struct intel_ntb_dev, ntb) @@ -247,4 +252,11 @@ static inline void _iowrite64(u64 val, void __iomem *mmio) #endif #endif +static inline int pdev_is_gen4(struct pci_dev *pdev) +{ + if (pdev->device == PCI_DEVICE_ID_INTEL_NTB_B2B_ICX) + return 1; + + return 0; +} #endif diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c index 5ee5f40b4dfc3fba0ca2c3c8ddf824f4cd5b0f05..9fa1b991dd52e962c36789ea887ca3cef4c932b4 100644 --- a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c +++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c @@ -707,11 +707,16 @@ static u64 switchtec_ntb_db_read_mask(struct ntb_dev *ntb) static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr, - resource_size_t *db_size) + resource_size_t *db_size, + u64 *db_data, + int db_bit) { struct switchtec_ntb *sndev = ntb_sndev(ntb); unsigned long offset; + if (unlikely(db_bit >= BITS_PER_LONG_LONG)) + return -EINVAL; + offset = (unsigned long)sndev->mmio_peer_dbmsg->odb - (unsigned long)sndev->stdev->mmio; @@ -721,6 +726,8 @@ static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb, *db_addr = pci_resource_start(ntb->pdev, 0) + offset; if (db_size) *db_size = sizeof(u32); + if (db_data) + *db_data = BIT_ULL(db_bit) << sndev->db_peer_shift; return 0; } diff --git a/drivers/ntb/ntb.c b/drivers/ntb/ntb.c index 2581ab724c34771e05f137947e71c86d8ccfb886..3305a2fbea4e27604890b1ff585b271ddee13f4d 100644 --- a/drivers/ntb/ntb.c +++ b/drivers/ntb/ntb.c @@ -100,6 +100,8 @@ EXPORT_SYMBOL(ntb_unregister_client); int ntb_register_device(struct ntb_dev *ntb) { + int ret; + if (!ntb) return -EINVAL; if (!ntb->pdev) @@ -120,7 +122,11 @@ int ntb_register_device(struct ntb_dev *ntb) ntb->ctx_ops = NULL; spin_lock_init(&ntb->ctx_lock); - return device_register(&ntb->dev); + ret = device_register(&ntb->dev); + if (ret) + put_device(&ntb->dev); + + return ret; } EXPORT_SYMBOL(ntb_register_device); diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c index 2a9d6b0d1f193257266acd9b86ecfe68949d1eb9..ce6fa28d4cac630236f7b2e87ffa5f917660058e 100644 --- a/drivers/ntb/test/ntb_perf.c +++ b/drivers/ntb/test/ntb_perf.c @@ -149,7 +149,8 @@ struct perf_peer { u64 outbuf_xlat; resource_size_t outbuf_size; void __iomem *outbuf; - + phys_addr_t out_phys_addr; + dma_addr_t dma_dst_addr; /* Inbound MW params */ dma_addr_t inbuf_xlat; resource_size_t inbuf_size; @@ -777,6 +778,10 @@ static int perf_copy_chunk(struct perf_thread *pthr, struct dmaengine_unmap_data *unmap; struct device *dma_dev; int try = 0, ret = 0; + struct perf_peer *peer = pthr->perf->test_peer; + void __iomem *vbase; + void __iomem *dst_vaddr; + dma_addr_t dst_dma_addr; if (!use_dma) { memcpy_toio(dst, src, len); @@ -789,6 +794,10 @@ static int perf_copy_chunk(struct perf_thread *pthr, offset_in_page(dst), len)) return -EIO; + vbase = peer->outbuf; + dst_vaddr = dst; + dst_dma_addr = peer->dma_dst_addr + (dst_vaddr - vbase); + unmap = dmaengine_get_unmap_data(dma_dev, 2, GFP_NOWAIT); if (!unmap) return -ENOMEM; @@ -802,8 +811,7 @@ static int perf_copy_chunk(struct perf_thread *pthr, } unmap->to_cnt = 1; - unmap->addr[1] = dma_map_page(dma_dev, virt_to_page(dst), - offset_in_page(dst), len, DMA_FROM_DEVICE); + unmap->addr[1] = dst_dma_addr; if (dma_mapping_error(dma_dev, unmap->addr[1])) { ret = -EIO; goto err_free_resource; @@ -860,6 +868,7 @@ static int perf_init_test(struct perf_thread *pthr) { struct perf_ctx *perf = pthr->perf; dma_cap_mask_t dma_mask; + struct perf_peer *peer = pthr->perf->test_peer; pthr->src = kmalloc_node(perf->test_peer->outbuf_size, GFP_KERNEL, dev_to_node(&perf->ntb->dev)); @@ -877,15 +886,33 @@ static int perf_init_test(struct perf_thread *pthr) if (!pthr->dma_chan) { dev_err(&perf->ntb->dev, "%d: Failed to get DMA channel\n", pthr->tidx); - atomic_dec(&perf->tsync); - wake_up(&perf->twait); - kfree(pthr->src); - return -ENODEV; + goto err_free; } + peer->dma_dst_addr = + dma_map_resource(pthr->dma_chan->device->dev, + peer->out_phys_addr, peer->outbuf_size, + DMA_FROM_DEVICE, 0); + if (dma_mapping_error(pthr->dma_chan->device->dev, + peer->dma_dst_addr)) { + dev_err(pthr->dma_chan->device->dev, "%d: Failed to map DMA addr\n", + pthr->tidx); + peer->dma_dst_addr = 0; + dma_release_channel(pthr->dma_chan); + goto err_free; + } + dev_dbg(pthr->dma_chan->device->dev, "%d: Map MMIO %pa to DMA addr %pad\n", + pthr->tidx, + &peer->out_phys_addr, + &peer->dma_dst_addr); atomic_set(&pthr->dma_sync, 0); - return 0; + +err_free: + atomic_dec(&perf->tsync); + wake_up(&perf->twait); + kfree(pthr->src); + return -ENODEV; } static int perf_run_test(struct perf_thread *pthr) @@ -973,6 +1000,11 @@ static void perf_clear_test(struct perf_thread *pthr) * We call it anyway just to be sure of the transfers completion. */ (void)dmaengine_terminate_sync(pthr->dma_chan); + if (pthr->perf->test_peer->dma_dst_addr) + dma_unmap_resource(pthr->dma_chan->device->dev, + pthr->perf->test_peer->dma_dst_addr, + pthr->perf->test_peer->outbuf_size, + DMA_FROM_DEVICE, 0); dma_release_channel(pthr->dma_chan); @@ -1189,6 +1221,9 @@ static ssize_t perf_dbgfs_read_info(struct file *filep, char __user *ubuf, pos += scnprintf(buf + pos, buf_size - pos, "\tOut buffer addr 0x%pK\n", peer->outbuf); + pos += scnprintf(buf + pos, buf_size - pos, + "\tOut buff phys addr %pa[p]\n", &peer->out_phys_addr); + pos += scnprintf(buf + pos, buf_size - pos, "\tOut buffer size %pa\n", &peer->outbuf_size); @@ -1373,7 +1408,7 @@ static int perf_setup_peer_mw(struct perf_peer *peer) int ret; /* Get outbound MW parameters and map it */ - ret = ntb_peer_mw_get_addr(perf->ntb, peer->gidx, &phys_addr, + ret = ntb_peer_mw_get_addr(perf->ntb, perf->gidx, &phys_addr, &peer->outbuf_size); if (ret) return ret; @@ -1383,6 +1418,8 @@ static int perf_setup_peer_mw(struct perf_peer *peer) if (!peer->outbuf) return -ENOMEM; + peer->out_phys_addr = phys_addr; + if (max_mw_size && peer->outbuf_size > max_mw_size) { peer->outbuf_size = max_mw_size; dev_warn(&peer->perf->ntb->dev, diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index 0360c015f6580b1cb5fef2667b2844381f0c8696..75ae2c508a044691d85dee5ad7f36381bd034733 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -1260,11 +1260,11 @@ static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip, ret = btt_data_read(arena, page, off, postmap, cur_len); if (ret) { - int rc; - /* Media error - set the e_flag */ - rc = btt_map_write(arena, premap, postmap, 0, 1, - NVDIMM_IO_ATOMIC); + if (btt_map_write(arena, premap, postmap, 0, 1, NVDIMM_IO_ATOMIC)) + dev_warn_ratelimited(to_dev(arena), + "Error persistently tracking bad blocks at %#x\n", + premap); goto out_rtt; } diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c index 795ad4ff35cafdb91ac7c9629aa63ad22a87f1f9..e341498876cadee2b4980b31c297d09dad58ba47 100644 --- a/drivers/nvdimm/btt_devs.c +++ b/drivers/nvdimm/btt_devs.c @@ -190,14 +190,15 @@ static struct device *__nd_btt_create(struct nd_region *nd_region, return NULL; nd_btt->id = ida_simple_get(&nd_region->btt_ida, 0, 0, GFP_KERNEL); - if (nd_btt->id < 0) { - kfree(nd_btt); - return NULL; - } + if (nd_btt->id < 0) + goto out_nd_btt; nd_btt->lbasize = lbasize; - if (uuid) + if (uuid) { uuid = kmemdup(uuid, 16, GFP_KERNEL); + if (!uuid) + goto out_put_id; + } nd_btt->uuid = uuid; dev = &nd_btt->dev; dev_set_name(dev, "btt%d.%d", nd_region->id, nd_btt->id); @@ -212,6 +213,13 @@ static struct device *__nd_btt_create(struct nd_region *nd_region, return NULL; } return dev; + +out_put_id: + ida_simple_remove(&nd_region->btt_ida, nd_btt->id); + +out_nd_btt: + kfree(nd_btt); + return NULL; } struct device *nd_btt_create(struct nd_region *nd_region) diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index 8aae6dcc839fed90a76e31eaf35a5f47ac04957d..54a633e8cb5d21ef3d2e6afcede959ce1e1e3b06 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -86,7 +86,7 @@ static void nvdimm_bus_probe_end(struct nvdimm_bus *nvdimm_bus) { nvdimm_bus_lock(&nvdimm_bus->dev); if (--nvdimm_bus->probe_active == 0) - wake_up(&nvdimm_bus->probe_wait); + wake_up(&nvdimm_bus->wait); nvdimm_bus_unlock(&nvdimm_bus->dev); } @@ -189,7 +189,7 @@ static int nvdimm_clear_badblocks_region(struct device *dev, void *data) sector_t sector; /* make sure device is a region */ - if (!is_nd_pmem(dev)) + if (!is_memory(dev)) return 0; nd_region = to_nd_region(dev); @@ -348,7 +348,7 @@ struct nvdimm_bus *nvdimm_bus_register(struct device *parent, return NULL; INIT_LIST_HEAD(&nvdimm_bus->list); INIT_LIST_HEAD(&nvdimm_bus->mapping_list); - init_waitqueue_head(&nvdimm_bus->probe_wait); + init_waitqueue_head(&nvdimm_bus->wait); nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL); mutex_init(&nvdimm_bus->reconfig_mutex); badrange_init(&nvdimm_bus->badrange); @@ -418,6 +418,9 @@ static int nd_bus_remove(struct device *dev) list_del_init(&nvdimm_bus->list); mutex_unlock(&nvdimm_bus_list_mutex); + wait_event(nvdimm_bus->wait, + atomic_read(&nvdimm_bus->ioctl_active) == 0); + nd_synchronize(); device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister); @@ -488,6 +491,8 @@ static void nd_async_device_register(void *d, async_cookie_t cookie) put_device(dev); } put_device(dev); + if (dev->parent) + put_device(dev->parent); } static void nd_async_device_unregister(void *d, async_cookie_t cookie) @@ -507,6 +512,8 @@ void __nd_device_register(struct device *dev) if (!dev) return; dev->bus = &nvdimm_bus_type; + if (dev->parent) + get_device(dev->parent); get_device(dev); async_schedule_domain(nd_async_device_register, dev, &nd_async_domain); @@ -521,13 +528,38 @@ EXPORT_SYMBOL(nd_device_register); void nd_device_unregister(struct device *dev, enum nd_async_mode mode) { + bool killed; + switch (mode) { case ND_ASYNC: + /* + * In the async case this is being triggered with the + * device lock held and the unregistration work needs to + * be moved out of line iff this is thread has won the + * race to schedule the deletion. + */ + if (!kill_device(dev)) + return; + get_device(dev); async_schedule_domain(nd_async_device_unregister, dev, &nd_async_domain); break; case ND_SYNC: + /* + * In the sync case the device is being unregistered due + * to a state change of the parent. Claim the kill state + * to synchronize against other unregistration requests, + * or otherwise let the async path handle it if the + * unregistration was already queued. + */ + device_lock(dev); + killed = kill_device(dev); + device_unlock(dev); + + if (!killed) + return; + nd_synchronize(); device_unregister(dev); break; @@ -608,7 +640,7 @@ static struct attribute *nd_device_attributes[] = { NULL, }; -/** +/* * nd_device_attribute_group - generic attributes for all devices on an nd bus */ struct attribute_group nd_device_attribute_group = { @@ -637,7 +669,7 @@ static umode_t nd_numa_attr_visible(struct kobject *kobj, struct attribute *a, return a->mode; } -/** +/* * nd_numa_attribute_group - NUMA attributes for all devices on an nd bus */ struct attribute_group nd_numa_attribute_group = { @@ -833,10 +865,12 @@ void wait_nvdimm_bus_probe_idle(struct device *dev) do { if (nvdimm_bus->probe_active == 0) break; - nvdimm_bus_unlock(&nvdimm_bus->dev); - wait_event(nvdimm_bus->probe_wait, + nvdimm_bus_unlock(dev); + device_unlock(dev); + wait_event(nvdimm_bus->wait, nvdimm_bus->probe_active == 0); - nvdimm_bus_lock(&nvdimm_bus->dev); + device_lock(dev); + nvdimm_bus_lock(dev); } while (true); } @@ -919,20 +953,19 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, int read_only, unsigned int ioctl_cmd, unsigned long arg) { struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; - static char out_env[ND_CMD_MAX_ENVELOPE]; - static char in_env[ND_CMD_MAX_ENVELOPE]; const struct nd_cmd_desc *desc = NULL; unsigned int cmd = _IOC_NR(ioctl_cmd); struct device *dev = &nvdimm_bus->dev; void __user *p = (void __user *) arg; + char *out_env = NULL, *in_env = NULL; const char *cmd_name, *dimm_name; u32 in_len = 0, out_len = 0; unsigned int func = cmd; unsigned long cmd_mask; struct nd_cmd_pkg pkg; int rc, i, cmd_rc; + void *buf = NULL; u64 buf_len = 0; - void *buf; if (nvdimm) { desc = nd_cmd_dimm_desc(cmd); @@ -963,7 +996,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, case ND_CMD_ARS_START: case ND_CMD_CLEAR_ERROR: case ND_CMD_CALL: - dev_dbg(&nvdimm_bus->dev, "'%s' command while read-only.\n", + dev_dbg(dev, "'%s' command while read-only.\n", nvdimm ? nvdimm_cmd_name(cmd) : nvdimm_bus_cmd_name(cmd)); return -EPERM; @@ -972,6 +1005,9 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, } /* process an input envelope */ + in_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL); + if (!in_env) + return -ENOMEM; for (i = 0; i < desc->in_num; i++) { u32 in_size, copy; @@ -979,14 +1015,17 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, if (in_size == UINT_MAX) { dev_err(dev, "%s:%s unknown input size cmd: %s field: %d\n", __func__, dimm_name, cmd_name, i); - return -ENXIO; + rc = -ENXIO; + goto out; } - if (in_len < sizeof(in_env)) - copy = min_t(u32, sizeof(in_env) - in_len, in_size); + if (in_len < ND_CMD_MAX_ENVELOPE) + copy = min_t(u32, ND_CMD_MAX_ENVELOPE - in_len, in_size); else copy = 0; - if (copy && copy_from_user(&in_env[in_len], p + in_len, copy)) - return -EFAULT; + if (copy && copy_from_user(&in_env[in_len], p + in_len, copy)) { + rc = -EFAULT; + goto out; + } in_len += in_size; } @@ -998,6 +1037,12 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, } /* process an output envelope */ + out_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL); + if (!out_env) { + rc = -ENOMEM; + goto out; + } + for (i = 0; i < desc->out_num; i++) { u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, (u32 *) in_env, (u32 *) out_env, 0); @@ -1006,15 +1051,18 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, if (out_size == UINT_MAX) { dev_dbg(dev, "%s unknown output size cmd: %s field: %d\n", dimm_name, cmd_name, i); - return -EFAULT; + rc = -EFAULT; + goto out; } - if (out_len < sizeof(out_env)) - copy = min_t(u32, sizeof(out_env) - out_len, out_size); + if (out_len < ND_CMD_MAX_ENVELOPE) + copy = min_t(u32, ND_CMD_MAX_ENVELOPE - out_len, out_size); else copy = 0; if (copy && copy_from_user(&out_env[out_len], - p + in_len + out_len, copy)) - return -EFAULT; + p + in_len + out_len, copy)) { + rc = -EFAULT; + goto out; + } out_len += out_size; } @@ -1022,19 +1070,23 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, if (buf_len > ND_IOCTL_MAX_BUFLEN) { dev_dbg(dev, "%s cmd: %s buf_len: %llu > %d\n", dimm_name, cmd_name, buf_len, ND_IOCTL_MAX_BUFLEN); - return -EINVAL; + rc = -EINVAL; + goto out; } buf = vmalloc(buf_len); - if (!buf) - return -ENOMEM; + if (!buf) { + rc = -ENOMEM; + goto out; + } if (copy_from_user(buf, p, buf_len)) { rc = -EFAULT; goto out; } - nvdimm_bus_lock(&nvdimm_bus->dev); + device_lock(dev); + nvdimm_bus_lock(dev); rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, func, buf); if (rc) goto out_unlock; @@ -1049,39 +1101,24 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, nvdimm_account_cleared_poison(nvdimm_bus, clear_err->address, clear_err->cleared); } - nvdimm_bus_unlock(&nvdimm_bus->dev); if (copy_to_user(p, buf, buf_len)) rc = -EFAULT; - vfree(buf); - return rc; - - out_unlock: - nvdimm_bus_unlock(&nvdimm_bus->dev); - out: +out_unlock: + nvdimm_bus_unlock(dev); + device_unlock(dev); +out: + kfree(in_env); + kfree(out_env); vfree(buf); return rc; } -static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -{ - long id = (long) file->private_data; - int rc = -ENXIO, ro; - struct nvdimm_bus *nvdimm_bus; - - ro = ((file->f_flags & O_ACCMODE) == O_RDONLY); - mutex_lock(&nvdimm_bus_list_mutex); - list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) { - if (nvdimm_bus->id == id) { - rc = __nd_ioctl(nvdimm_bus, NULL, ro, cmd, arg); - break; - } - } - mutex_unlock(&nvdimm_bus_list_mutex); - - return rc; -} +enum nd_ioctl_mode { + BUS_IOCTL, + DIMM_IOCTL, +}; static int match_dimm(struct device *dev, void *data) { @@ -1096,31 +1133,62 @@ static int match_dimm(struct device *dev, void *data) return 0; } -static long nvdimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg, + enum nd_ioctl_mode mode) + { - int rc = -ENXIO, ro; - struct nvdimm_bus *nvdimm_bus; + struct nvdimm_bus *nvdimm_bus, *found = NULL; + long id = (long) file->private_data; + struct nvdimm *nvdimm = NULL; + int rc, ro; ro = ((file->f_flags & O_ACCMODE) == O_RDONLY); mutex_lock(&nvdimm_bus_list_mutex); list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) { - struct device *dev = device_find_child(&nvdimm_bus->dev, - file->private_data, match_dimm); - struct nvdimm *nvdimm; - - if (!dev) - continue; + if (mode == DIMM_IOCTL) { + struct device *dev; + + dev = device_find_child(&nvdimm_bus->dev, + file->private_data, match_dimm); + if (!dev) + continue; + nvdimm = to_nvdimm(dev); + found = nvdimm_bus; + } else if (nvdimm_bus->id == id) { + found = nvdimm_bus; + } - nvdimm = to_nvdimm(dev); - rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg); - put_device(dev); - break; + if (found) { + atomic_inc(&nvdimm_bus->ioctl_active); + break; + } } mutex_unlock(&nvdimm_bus_list_mutex); + if (!found) + return -ENXIO; + + nvdimm_bus = found; + rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg); + + if (nvdimm) + put_device(&nvdimm->dev); + if (atomic_dec_and_test(&nvdimm_bus->ioctl_active)) + wake_up(&nvdimm_bus->wait); + return rc; } +static long bus_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + return nd_ioctl(file, cmd, arg, BUS_IOCTL); +} + +static long dimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + return nd_ioctl(file, cmd, arg, DIMM_IOCTL); +} + static int nd_open(struct inode *inode, struct file *file) { long minor = iminor(inode); @@ -1132,16 +1200,16 @@ static int nd_open(struct inode *inode, struct file *file) static const struct file_operations nvdimm_bus_fops = { .owner = THIS_MODULE, .open = nd_open, - .unlocked_ioctl = nd_ioctl, - .compat_ioctl = nd_ioctl, + .unlocked_ioctl = bus_ioctl, + .compat_ioctl = bus_ioctl, .llseek = noop_llseek, }; static const struct file_operations nvdimm_fops = { .owner = THIS_MODULE, .open = nd_open, - .unlocked_ioctl = nvdimm_ioctl, - .compat_ioctl = nvdimm_ioctl, + .unlocked_ioctl = dimm_ioctl, + .compat_ioctl = dimm_ioctl, .llseek = noop_llseek, }; diff --git a/drivers/nvdimm/dax_devs.c b/drivers/nvdimm/dax_devs.c index 0453f49dc70814f35d2e0988f46304777f3e2559..326f02ffca81f7ce638d60f3935469170c959594 100644 --- a/drivers/nvdimm/dax_devs.c +++ b/drivers/nvdimm/dax_devs.c @@ -126,7 +126,7 @@ int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns) nvdimm_bus_unlock(&ndns->dev); if (!dax_dev) return -ENOMEM; - pfn_sb = devm_kzalloc(dev, sizeof(*pfn_sb), GFP_KERNEL); + pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL); nd_pfn->pfn_sb = pfn_sb; rc = nd_pfn_validate(nd_pfn, DAX_SIG); dev_dbg(dev, "dax: %s\n", rc == 0 ? dev_name(dax_dev) : ""); diff --git a/drivers/nvdimm/e820.c b/drivers/nvdimm/e820.c index 521eaf53a52aada9c99e804971f7041fcc327563..36be9b61918760e2edfa8e87be82e017a773c5df 100644 --- a/drivers/nvdimm/e820.c +++ b/drivers/nvdimm/e820.c @@ -47,6 +47,7 @@ static int e820_register_one(struct resource *res, void *data) ndr_desc.res = res; ndr_desc.attr_groups = e820_pmem_region_attribute_groups; ndr_desc.numa_node = e820_range_to_nid(res->start); + ndr_desc.target_node = ndr_desc.numa_node; set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags); if (!nvdimm_pmem_region_create(nvdimm_bus, &ndr_desc)) return -ENXIO; diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c index 1d28cd656536f4c086967faf97a277ec5d2c9ce8..9f1b7e3153f991866bc70667ad212e5d333097fc 100644 --- a/drivers/nvdimm/label.c +++ b/drivers/nvdimm/label.c @@ -25,6 +25,8 @@ static guid_t nvdimm_btt2_guid; static guid_t nvdimm_pfn_guid; static guid_t nvdimm_dax_guid; +static const char NSINDEX_SIGNATURE[] = "NAMESPACE_INDEX\0"; + static u32 best_seq(u32 a, u32 b) { a &= NSINDEX_SEQ_MASK; @@ -623,16 +625,27 @@ static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class, return &guid_null; } +static void reap_victim(struct nd_mapping *nd_mapping, + struct nd_label_ent *victim) +{ + struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); + u32 slot = to_slot(ndd, victim->label); + + dev_dbg(ndd->dev, "free: %d\n", slot); + nd_label_free_slot(ndd, slot); + victim->label = NULL; +} + static int __pmem_label_update(struct nd_region *nd_region, struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm, - int pos) + int pos, unsigned long flags) { struct nd_namespace_common *ndns = &nspm->nsio.common; struct nd_interleave_set *nd_set = nd_region->nd_set; struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); - struct nd_label_ent *label_ent, *victim = NULL; struct nd_namespace_label *nd_label; struct nd_namespace_index *nsindex; + struct nd_label_ent *label_ent; struct nd_label_id label_id; struct resource *res; unsigned long *free; @@ -666,7 +679,7 @@ static int __pmem_label_update(struct nd_region *nd_region, memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN); if (nspm->alt_name) memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN); - nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_UPDATING); + nd_label->flags = __cpu_to_le32(flags); nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings); nd_label->position = __cpu_to_le16(pos); nd_label->isetcookie = __cpu_to_le64(cookie); @@ -701,18 +714,10 @@ static int __pmem_label_update(struct nd_region *nd_region, list_for_each_entry(label_ent, &nd_mapping->labels, list) { if (!label_ent->label) continue; - if (memcmp(nspm->uuid, label_ent->label->uuid, - NSLABEL_UUID_LEN) != 0) - continue; - victim = label_ent; - list_move_tail(&victim->list, &nd_mapping->labels); - break; - } - if (victim) { - dev_dbg(ndd->dev, "free: %d\n", slot); - slot = to_slot(ndd, victim->label); - nd_label_free_slot(ndd, slot); - victim->label = NULL; + if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags) + || memcmp(nspm->uuid, label_ent->label->uuid, + NSLABEL_UUID_LEN) == 0) + reap_victim(nd_mapping, label_ent); } /* update index */ @@ -1120,13 +1125,13 @@ static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid) int nd_pmem_namespace_label_update(struct nd_region *nd_region, struct nd_namespace_pmem *nspm, resource_size_t size) { - int i; + int i, rc; for (i = 0; i < nd_region->ndr_mappings; i++) { struct nd_mapping *nd_mapping = &nd_region->mapping[i]; struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); struct resource *res; - int rc, count = 0; + int count = 0; if (size == 0) { rc = del_labels(nd_mapping, nspm->uuid); @@ -1144,7 +1149,20 @@ int nd_pmem_namespace_label_update(struct nd_region *nd_region, if (rc < 0) return rc; - rc = __pmem_label_update(nd_region, nd_mapping, nspm, i); + rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, + NSLABEL_FLAG_UPDATING); + if (rc) + return rc; + } + + if (size == 0) + return 0; + + /* Clear the UPDATING flag per UEFI 2.7 expectations */ + for (i = 0; i < nd_region->ndr_mappings; i++) { + struct nd_mapping *nd_mapping = &nd_region->mapping[i]; + + rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, 0); if (rc) return rc; } diff --git a/drivers/nvdimm/label.h b/drivers/nvdimm/label.h index 18bbe183b3a9bde29efe0d8e4940975960d012fd..52f9fcada00a1bcecda7492668c0740ccb50ad27 100644 --- a/drivers/nvdimm/label.h +++ b/drivers/nvdimm/label.h @@ -38,8 +38,6 @@ enum { ND_NSINDEX_INIT = 0x1, }; -static const char NSINDEX_SIGNATURE[] = "NAMESPACE_INDEX\0"; - /** * struct nd_namespace_index - label set superblock * @sig: NAMESPACE_INDEX\0 diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 4a4266250c28cfde56b08a2aa7ce04a27d6e6527..5dc3b407d7bd001e1f4daab57aa3d057215b5053 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -138,6 +138,7 @@ bool nd_is_uuid_unique(struct device *dev, u8 *uuid) bool pmem_should_map_pages(struct device *dev) { struct nd_region *nd_region = to_nd_region(dev->parent); + struct nd_namespace_common *ndns = to_ndns(dev); struct nd_namespace_io *nsio; if (!IS_ENABLED(CONFIG_ZONE_DEVICE)) @@ -149,6 +150,9 @@ bool pmem_should_map_pages(struct device *dev) if (is_nd_pfn(dev) || is_nd_btt(dev)) return false; + if (ndns->force_raw) + return false; + nsio = to_nd_namespace_io(dev); if (region_intersects(nsio->res.start, resource_size(&nsio->res), IORESOURCE_SYSTEM_RAM, @@ -1244,12 +1248,27 @@ static int namespace_update_uuid(struct nd_region *nd_region, for (i = 0; i < nd_region->ndr_mappings; i++) { struct nd_mapping *nd_mapping = &nd_region->mapping[i]; struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); + struct nd_label_ent *label_ent; struct resource *res; for_each_dpa_resource(ndd, res) if (strcmp(res->name, old_label_id.id) == 0) sprintf((void *) res->name, "%s", new_label_id.id); + + mutex_lock(&nd_mapping->lock); + list_for_each_entry(label_ent, &nd_mapping->labels, list) { + struct nd_namespace_label *nd_label = label_ent->label; + struct nd_label_id label_id; + + if (!nd_label) + continue; + nd_label_gen_id(&label_id, nd_label->uuid, + __le32_to_cpu(nd_label->flags)); + if (strcmp(old_label_id.id, label_id.id) == 0) + set_bit(ND_LABEL_REAP, &label_ent->flags); + } + mutex_unlock(&nd_mapping->lock); } kfree(*old_uuid); out: @@ -2247,9 +2266,12 @@ static struct device *create_namespace_blk(struct nd_region *nd_region, if (!nsblk->uuid) goto blk_err; memcpy(name, nd_label->name, NSLABEL_NAME_LEN); - if (name[0]) + if (name[0]) { nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN, GFP_KERNEL); + if (!nsblk->alt_name) + goto blk_err; + } res = nsblk_add_resource(nd_region, ndd, nsblk, __le64_to_cpu(nd_label->dpa)); if (!res) diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h index ac68072fb8cd683c8917c9f061f127315cabb2ef..adf62a6c0fe277cbd4cba1280490259e1031e49d 100644 --- a/drivers/nvdimm/nd-core.h +++ b/drivers/nvdimm/nd-core.h @@ -25,10 +25,11 @@ extern int nvdimm_major; struct nvdimm_bus { struct nvdimm_bus_descriptor *nd_desc; - wait_queue_head_t probe_wait; + wait_queue_head_t wait; struct list_head list; struct device dev; int id, probe_active; + atomic_t ioctl_active; struct list_head mapping_list; struct mutex reconfig_mutex; struct badrange badrange; @@ -112,6 +113,8 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, struct nd_mapping *nd_mapping, resource_size_t *overlap); resource_size_t nd_blk_available_dpa(struct nd_region *nd_region); resource_size_t nd_region_available_dpa(struct nd_region *nd_region); +int nd_region_conflict(struct nd_region *nd_region, resource_size_t start, + resource_size_t size); resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd, struct nd_label_id *label_id); int alias_dpa_busy(struct device *dev, void *data); diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index 98317e7ce5b54dbd2e2172a40d114e2734650391..5259b8953cc646960ee64a93b0655170fe5cab30 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -113,8 +113,12 @@ struct nd_percpu_lane { spinlock_t lock; }; +enum nd_label_flags { + ND_LABEL_REAP, +}; struct nd_label_ent { struct list_head list; + unsigned long flags; struct nd_namespace_label *label; }; @@ -153,7 +157,7 @@ struct nd_region { u16 ndr_mappings; u64 ndr_size; u64 ndr_start; - int id, num_lanes, ro, numa_node; + int id, num_lanes, ro, numa_node, target_node; void *provider_data; struct kernfs_node *bb_state; struct badblocks bb; diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c index 0a701837dfc0b9bd6c011f5ee092feb101a15acb..ecaaa27438e2526ac19653599d7366e05d06125c 100644 --- a/drivers/nvdimm/of_pmem.c +++ b/drivers/nvdimm/of_pmem.c @@ -68,6 +68,7 @@ static int of_pmem_region_probe(struct platform_device *pdev) memset(&ndr_desc, 0, sizeof(ndr_desc)); ndr_desc.attr_groups = region_attr_groups; ndr_desc.numa_node = dev_to_node(&pdev->dev); + ndr_desc.target_node = ndr_desc.numa_node; ndr_desc.res = &pdev->resource[i]; ndr_desc.of_node = np; set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags); diff --git a/drivers/nvdimm/pfn.h b/drivers/nvdimm/pfn.h index dde9853453d3c622c42511772ff807bcf1fd70d0..e901e3a3b04c9920007609f64443a8895bb2dca4 100644 --- a/drivers/nvdimm/pfn.h +++ b/drivers/nvdimm/pfn.h @@ -36,6 +36,7 @@ struct nd_pfn_sb { __le32 end_trunc; /* minor-version-2 record the base alignment of the mapping */ __le32 align; + /* minor-version-3 guarantee the padding and flags are zero */ u8 padding[4000]; __le64 checksum; }; diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index 3f7ad5bc443ee80416236143e0a0d58d8b138c04..86ed09b2a1929a52dcd16e0a6d85216331cf9b9f 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -361,6 +361,15 @@ struct device *nd_pfn_create(struct nd_region *nd_region) return dev; } +/** + * nd_pfn_validate - read and validate info-block + * @nd_pfn: fsdax namespace runtime state / properties + * @sig: 'devdax' or 'fsdax' signature + * + * Upon return the info-block buffer contents (->pfn_sb) are + * indeterminate when validation fails, and a coherent info-block + * otherwise. + */ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig) { u64 checksum, offset; @@ -506,7 +515,7 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns) nvdimm_bus_unlock(&ndns->dev); if (!pfn_dev) return -ENOMEM; - pfn_sb = devm_kzalloc(dev, sizeof(*pfn_sb), GFP_KERNEL); + pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL); nd_pfn = to_nd_pfn(pfn_dev); nd_pfn->pfn_sb = pfn_sb; rc = nd_pfn_validate(nd_pfn, PFN_SIG); @@ -534,7 +543,7 @@ static unsigned long init_altmap_base(resource_size_t base) static unsigned long init_altmap_reserve(resource_size_t base) { - unsigned long reserve = PHYS_PFN(SZ_8K); + unsigned long reserve = PFN_UP(SZ_8K); unsigned long base_pfn = PHYS_PFN(base); reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn); @@ -590,14 +599,47 @@ static u64 phys_pmem_align_down(struct nd_pfn *nd_pfn, u64 phys) ALIGN_DOWN(phys, nd_pfn->align)); } +/* + * Check if pmem collides with 'System RAM', or other regions when + * section aligned. Trim it accordingly. + */ +static void trim_pfn_device(struct nd_pfn *nd_pfn, u32 *start_pad, u32 *end_trunc) +{ + struct nd_namespace_common *ndns = nd_pfn->ndns; + struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); + struct nd_region *nd_region = to_nd_region(nd_pfn->dev.parent); + const resource_size_t start = nsio->res.start; + const resource_size_t end = start + resource_size(&nsio->res); + resource_size_t adjust, size; + + *start_pad = 0; + *end_trunc = 0; + + adjust = start - PHYS_SECTION_ALIGN_DOWN(start); + size = resource_size(&nsio->res) + adjust; + if (region_intersects(start - adjust, size, IORESOURCE_SYSTEM_RAM, + IORES_DESC_NONE) == REGION_MIXED + || nd_region_conflict(nd_region, start - adjust, size)) + *start_pad = PHYS_SECTION_ALIGN_UP(start) - start; + + /* Now check that end of the range does not collide. */ + adjust = PHYS_SECTION_ALIGN_UP(end) - end; + size = resource_size(&nsio->res) + adjust; + if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM, + IORES_DESC_NONE) == REGION_MIXED + || !IS_ALIGNED(end, nd_pfn->align) + || nd_region_conflict(nd_region, start, size)) + *end_trunc = end - phys_pmem_align_down(nd_pfn, end); +} + static int nd_pfn_init(struct nd_pfn *nd_pfn) { u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0; struct nd_namespace_common *ndns = nd_pfn->ndns; - u32 start_pad = 0, end_trunc = 0; + struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); resource_size_t start, size; - struct nd_namespace_io *nsio; struct nd_region *nd_region; + u32 start_pad, end_trunc; struct nd_pfn_sb *pfn_sb; unsigned long npfns; phys_addr_t offset; @@ -605,7 +647,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) u64 checksum; int rc; - pfn_sb = devm_kzalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL); + pfn_sb = devm_kmalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL); if (!pfn_sb) return -ENOMEM; @@ -614,11 +656,14 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) sig = DAX_SIG; else sig = PFN_SIG; + rc = nd_pfn_validate(nd_pfn, sig); if (rc != -ENODEV) return rc; /* no info block, do init */; + memset(pfn_sb, 0, sizeof(*pfn_sb)); + nd_region = to_nd_region(nd_pfn->dev.parent); if (nd_region->ro) { dev_info(&nd_pfn->dev, @@ -629,30 +674,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) memset(pfn_sb, 0, sizeof(*pfn_sb)); - /* - * Check if pmem collides with 'System RAM' when section aligned and - * trim it accordingly - */ - nsio = to_nd_namespace_io(&ndns->dev); - start = PHYS_SECTION_ALIGN_DOWN(nsio->res.start); - size = resource_size(&nsio->res); - if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM, - IORES_DESC_NONE) == REGION_MIXED) { - start = nsio->res.start; - start_pad = PHYS_SECTION_ALIGN_UP(start) - start; - } - - start = nsio->res.start; - size = PHYS_SECTION_ALIGN_UP(start + size) - start; - if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM, - IORES_DESC_NONE) == REGION_MIXED - || !IS_ALIGNED(start + resource_size(&nsio->res), - nd_pfn->align)) { - size = resource_size(&nsio->res); - end_trunc = start + size - phys_pmem_align_down(nd_pfn, - start + size); - } - + trim_pfn_device(nd_pfn, &start_pad, &end_trunc); if (start_pad + end_trunc) dev_info(&nd_pfn->dev, "%s alignment collision, truncate %d bytes\n", dev_name(&ndns->dev), start_pad + end_trunc); @@ -663,7 +685,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) * implementation will limit the pfns advertised through * ->direct_access() to those that are included in the memmap. */ - start += start_pad; + start = nsio->res.start + start_pad; size = resource_size(&nsio->res); npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - SZ_8K) / PAGE_SIZE); @@ -695,7 +717,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) memcpy(pfn_sb->uuid, nd_pfn->uuid, 16); memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16); pfn_sb->version_major = cpu_to_le16(1); - pfn_sb->version_minor = cpu_to_le16(2); + pfn_sb->version_minor = cpu_to_le16(3); pfn_sb->start_pad = cpu_to_le32(start_pad); pfn_sb->end_trunc = cpu_to_le32(end_trunc); pfn_sb->align = cpu_to_le32(nd_pfn->align); diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 6071e2942053c903564d6f08f278d3735a619308..a7ce2f1761a2c5d5b965e2944aa8cd79a44412f0 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -113,13 +113,13 @@ static void write_pmem(void *pmem_addr, struct page *page, while (len) { mem = kmap_atomic(page); - chunk = min_t(unsigned int, len, PAGE_SIZE); + chunk = min_t(unsigned int, len, PAGE_SIZE - off); memcpy_flushcache(pmem_addr, mem + off, chunk); kunmap_atomic(mem); len -= chunk; off = 0; page++; - pmem_addr += PAGE_SIZE; + pmem_addr += chunk; } } @@ -132,7 +132,7 @@ static blk_status_t read_pmem(struct page *page, unsigned int off, while (len) { mem = kmap_atomic(page); - chunk = min_t(unsigned int, len, PAGE_SIZE); + chunk = min_t(unsigned int, len, PAGE_SIZE - off); rem = memcpy_mcsafe(mem + off, pmem_addr, chunk); kunmap_atomic(mem); if (rem) @@ -140,7 +140,7 @@ static blk_status_t read_pmem(struct page *page, unsigned int off, len -= chunk; off = 0; page++; - pmem_addr += PAGE_SIZE; + pmem_addr += chunk; } return BLK_STS_OK; } @@ -281,16 +281,22 @@ static long pmem_dax_direct_access(struct dax_device *dax_dev, return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn); } +/* + * Use the 'no check' versions of copy_from_iter_flushcache() and + * copy_to_iter_mcsafe() to bypass HARDENED_USERCOPY overhead. Bounds + * checking, both file offset and device offset, is handled by + * dax_iomap_actor() + */ static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i) { - return copy_from_iter_flushcache(addr, bytes, i); + return _copy_from_iter_flushcache(addr, bytes, i); } static size_t pmem_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i) { - return copy_to_iter_mcsafe(addr, bytes, i); + return _copy_to_iter_mcsafe(addr, bytes, i); } static const struct dax_operations pmem_dax_ops = { @@ -309,8 +315,11 @@ static void pmem_release_queue(void *q) blk_cleanup_queue(q); } -static void pmem_freeze_queue(void *q) +static void pmem_freeze_queue(struct percpu_ref *ref) { + struct request_queue *q; + + q = container_of(ref, typeof(*q), q_usage_counter); blk_freeze_queue_start(q); } @@ -402,6 +411,7 @@ static int pmem_attach_disk(struct device *dev, pmem->pfn_flags = PFN_DEV; pmem->pgmap.ref = &q->q_usage_counter; + pmem->pgmap.kill = pmem_freeze_queue; if (is_nd_pfn(dev)) { if (setup_pagemap_fsdax(dev, &pmem->pgmap)) return -ENOMEM; @@ -421,16 +431,11 @@ static int pmem_attach_disk(struct device *dev, addr = devm_memremap_pages(dev, &pmem->pgmap); pmem->pfn_flags |= PFN_MAP; memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res)); - } else + } else { addr = devm_memremap(dev, pmem->phys_addr, pmem->size, ARCH_MEMREMAP_PMEM); - - /* - * At release time the queue must be frozen before - * devm_memremap_pages is unwound - */ - if (devm_add_action_or_reset(dev, pmem_freeze_queue, q)) - return -ENOMEM; + memcpy(&bb_res, &nsio->res, sizeof(bb_res)); + } if (IS_ERR(addr)) return PTR_ERR(addr); diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c index b9ca0033cc9996b295fe28e96369531ef87ab20a..22224b21c34df51368a1522c3b132335cc66aa29 100644 --- a/drivers/nvdimm/region.c +++ b/drivers/nvdimm/region.c @@ -42,18 +42,7 @@ static int nd_region_probe(struct device *dev) if (rc) return rc; - rc = nd_region_register_namespaces(nd_region, &err); - if (rc < 0) - return rc; - - ndrd = dev_get_drvdata(dev); - ndrd->ns_active = rc; - ndrd->ns_count = rc + err; - - if (rc && err && rc == err) - return -ENODEV; - - if (is_nd_pmem(&nd_region->dev)) { + if (is_memory(&nd_region->dev)) { struct resource ndr_res; if (devm_init_badblocks(dev, &nd_region->bb)) @@ -68,6 +57,17 @@ static int nd_region_probe(struct device *dev) nvdimm_badblocks_populate(nd_region, &nd_region->bb, &ndr_res); } + rc = nd_region_register_namespaces(nd_region, &err); + if (rc < 0) + return rc; + + ndrd = dev_get_drvdata(dev); + ndrd->ns_active = rc; + ndrd->ns_count = rc + err; + + if (rc && err && rc == err) + return -ENODEV; + nd_region->btt_seed = nd_btt_create(nd_region); nd_region->pfn_seed = nd_pfn_create(nd_region); nd_region->dax_seed = nd_dax_create(nd_region); @@ -131,7 +131,7 @@ static void nd_region_notify(struct device *dev, enum nvdimm_event event) struct nd_region *nd_region = to_nd_region(dev); struct resource res; - if (is_nd_pmem(&nd_region->dev)) { + if (is_memory(&nd_region->dev)) { res.start = nd_region->ndr_start; res.end = nd_region->ndr_start + nd_region->ndr_size - 1; diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index fa37afcd43ff8ceb8ac4e8b18a8a7f23218d3e90..a5c80767d81b970910b1449a73216c2c2c77fb16 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -425,10 +425,12 @@ static ssize_t available_size_show(struct device *dev, * memory nvdimm_bus_lock() is dropped, but that's userspace's * problem to not race itself. */ + device_lock(dev); nvdimm_bus_lock(dev); wait_nvdimm_bus_probe_idle(dev); available = nd_region_available_dpa(nd_region); nvdimm_bus_unlock(dev); + device_unlock(dev); return sprintf(buf, "%llu\n", available); } @@ -440,10 +442,12 @@ static ssize_t max_available_extent_show(struct device *dev, struct nd_region *nd_region = to_nd_region(dev); unsigned long long available = 0; + device_lock(dev); nvdimm_bus_lock(dev); wait_nvdimm_bus_probe_idle(dev); available = nd_region_allocatable_dpa(nd_region); nvdimm_bus_unlock(dev); + device_unlock(dev); return sprintf(buf, "%llu\n", available); } @@ -560,10 +564,17 @@ static ssize_t region_badblocks_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nd_region *nd_region = to_nd_region(dev); + ssize_t rc; - return badblocks_show(&nd_region->bb, buf, 0); -} + device_lock(dev); + if (dev->driver) + rc = badblocks_show(&nd_region->bb, buf, 0); + else + rc = -ENXIO; + device_unlock(dev); + return rc; +} static DEVICE_ATTR(badblocks, 0444, region_badblocks_show, NULL); static ssize_t resource_show(struct device *dev, @@ -622,11 +633,11 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) if (!is_memory(dev) && a == &dev_attr_dax_seed.attr) return 0; - if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr) + if (!is_memory(dev) && a == &dev_attr_badblocks.attr) return 0; if (a == &dev_attr_resource.attr) { - if (is_nd_pmem(dev)) + if (is_memory(dev)) return 0400; else return 0; @@ -1053,6 +1064,7 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, nd_region->flags = ndr_desc->flags; nd_region->ro = ro; nd_region->numa_node = ndr_desc->numa_node; + nd_region->target_node = ndr_desc->target_node; ida_init(&nd_region->ns_ida); ida_init(&nd_region->btt_ida); ida_init(&nd_region->pfn_ida); @@ -1177,6 +1189,47 @@ int nvdimm_has_cache(struct nd_region *nd_region) } EXPORT_SYMBOL_GPL(nvdimm_has_cache); +struct conflict_context { + struct nd_region *nd_region; + resource_size_t start, size; +}; + +static int region_conflict(struct device *dev, void *data) +{ + struct nd_region *nd_region; + struct conflict_context *ctx = data; + resource_size_t res_end, region_end, region_start; + + if (!is_memory(dev)) + return 0; + + nd_region = to_nd_region(dev); + if (nd_region == ctx->nd_region) + return 0; + + res_end = ctx->start + ctx->size; + region_start = nd_region->ndr_start; + region_end = region_start + nd_region->ndr_size; + if (ctx->start >= region_start && ctx->start < region_end) + return -EBUSY; + if (res_end > region_start && res_end <= region_end) + return -EBUSY; + return 0; +} + +int nd_region_conflict(struct nd_region *nd_region, resource_size_t start, + resource_size_t size) +{ + struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); + struct conflict_context ctx = { + .nd_region = nd_region, + .start = start, + .size = size, + }; + + return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict); +} + void __exit nd_region_devs_exit(void) { ida_destroy(®ion_ida); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 6bb9908bf46f1e2906f45af82be11684d18af444..b4d8aa84b74419f16ff736d822164cb34e42afb5 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -14,6 +14,7 @@ #include #include +#include #include #include #include @@ -88,7 +89,6 @@ EXPORT_SYMBOL_GPL(nvme_reset_wq); struct workqueue_struct *nvme_delete_wq; EXPORT_SYMBOL_GPL(nvme_delete_wq); -static DEFINE_IDA(nvme_subsystems_ida); static LIST_HEAD(nvme_subsystems); static DEFINE_MUTEX(nvme_subsystems_lock); @@ -111,10 +111,13 @@ static void nvme_set_queue_dying(struct nvme_ns *ns) */ if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags)) return; - revalidate_disk(ns->disk); blk_set_queue_dying(ns->queue); /* Forcibly unquiesce queues to avoid blocking dispatch */ blk_mq_unquiesce_queue(ns->queue); + /* + * Revalidate after unblocking dispatchers that may be holding bd_butex + */ + revalidate_disk(ns->disk); } static void nvme_queue_scan(struct nvme_ctrl *ctrl) @@ -126,6 +129,44 @@ static void nvme_queue_scan(struct nvme_ctrl *ctrl) queue_work(nvme_wq, &ctrl->scan_work); } +static void nvme_failfast_work(struct work_struct *work) +{ + struct nvme_ctrl_plus *ctrl_plus = container_of(to_delayed_work(work), + struct nvme_ctrl_plus, failfast_work); + struct nvme_ctrl *ctrl = &ctrl_plus->ctrl; + + if (ctrl->state != NVME_CTRL_CONNECTING) + return; + + set_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl_plus->flags); + dev_info(ctrl->device, "failfast expired\n"); + nvme_kick_requeue_lists(ctrl); +} + +static inline void nvme_start_failfast_work(struct nvme_ctrl *ctrl) +{ + struct nvmf_ctrl_options_plus *ops_plus = NULL; + + if (!ctrl->opts) + return; + + ops_plus = nvmf_opt_to_plus(ctrl->opts); + if (ops_plus->fast_io_fail_tmo == -1) + return; + + schedule_delayed_work(&nvme_ctrl_to_plus(ctrl)->failfast_work, + ops_plus->fast_io_fail_tmo * HZ); +} + +static inline void nvme_stop_failfast_work(struct nvme_ctrl *ctrl) +{ + if (!ctrl->opts) + return; + + cancel_delayed_work_sync(&nvme_ctrl_to_plus(ctrl)->failfast_work); + clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &nvme_ctrl_to_plus(ctrl)->flags); +} + int nvme_reset_ctrl(struct nvme_ctrl *ctrl) { if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) @@ -152,11 +193,8 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) } EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync); -static void nvme_delete_ctrl_work(struct work_struct *work) +static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl) { - struct nvme_ctrl *ctrl = - container_of(work, struct nvme_ctrl, delete_work); - dev_info(ctrl->device, "Removing ctrl: NQN \"%s\"\n", ctrl->opts->subsysnqn); @@ -165,7 +203,14 @@ static void nvme_delete_ctrl_work(struct work_struct *work) nvme_remove_namespaces(ctrl); ctrl->ops->delete_ctrl(ctrl); nvme_uninit_ctrl(ctrl); - nvme_put_ctrl(ctrl); +} + +static void nvme_delete_ctrl_work(struct work_struct *work) +{ + struct nvme_ctrl *ctrl = + container_of(work, struct nvme_ctrl, delete_work); + + nvme_do_delete_ctrl(ctrl); } int nvme_delete_ctrl(struct nvme_ctrl *ctrl) @@ -187,9 +232,10 @@ int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl) * can free the controller. */ nvme_get_ctrl(ctrl); - ret = nvme_delete_ctrl(ctrl); + if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) + ret = -EBUSY; if (!ret) - flush_work(&ctrl->delete_work); + nvme_do_delete_ctrl(ctrl); nvme_put_ctrl(ctrl); return ret; } @@ -200,9 +246,9 @@ static inline bool nvme_ns_has_pi(struct nvme_ns *ns) return ns->pi_type && ns->ms == sizeof(struct t10_pi_tuple); } -static blk_status_t nvme_error_status(struct request *req) +static blk_status_t nvme_error_status(u16 status) { - switch (nvme_req(req)->status & 0x7ff) { + switch (status & 0x7ff) { case NVME_SC_SUCCESS: return BLK_STS_OK; case NVME_SC_CAP_EXCEEDED: @@ -245,22 +291,34 @@ static inline bool nvme_req_needs_retry(struct request *req) return true; } +static void nvme_retry_req(struct request *req) +{ + struct nvme_ns *ns = req->q->queuedata; + unsigned long delay = 0; + u16 crd; + + /* The mask and shift result must be <= 3 */ + crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11; + if (ns && crd) + delay = ns->ctrl->crdt[crd - 1] * 100; + + nvme_req(req)->retries++; + blk_mq_requeue_request(req, false); + blk_mq_delay_kick_requeue_list(req->q, delay); +} + void nvme_complete_rq(struct request *req) { - blk_status_t status = nvme_error_status(req); + blk_status_t status = nvme_error_status(nvme_req(req)->status); trace_nvme_complete_rq(req); if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) { - if ((req->cmd_flags & REQ_NVME_MPATH) && - blk_path_error(status)) { - nvme_failover_req(req); + if ((req->cmd_flags & REQ_NVME_MPATH) && nvme_failover_req(req)) return; - } if (!blk_queue_dying(req->q)) { - nvme_req(req)->retries++; - blk_mq_requeue_request(req, true); + nvme_retry_req(req); return; } } @@ -273,8 +331,12 @@ void nvme_cancel_request(struct request *req, void *data, bool reserved) dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device, "Cancelling I/O %d", req->tag); - nvme_req(req)->status = NVME_SC_ABORT_REQ; - blk_mq_complete_request(req); + /* don't abort one completed request */ + if (blk_mq_request_completed(req)) + return; + + nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD; + blk_mq_force_complete_rq(req); } EXPORT_SYMBOL_GPL(nvme_cancel_request); @@ -360,8 +422,21 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, ctrl->state = new_state; spin_unlock_irqrestore(&ctrl->lock, flags); - if (changed && ctrl->state == NVME_CTRL_LIVE) - nvme_kick_requeue_lists(ctrl); + if (changed) { + switch (ctrl->state) { + case NVME_CTRL_LIVE: + if (old_state == NVME_CTRL_CONNECTING) + nvme_stop_failfast_work(ctrl); + nvme_kick_requeue_lists(ctrl); + break; + case NVME_CTRL_CONNECTING: + if (old_state == NVME_CTRL_RESETTING) + nvme_start_failfast_work(ctrl); + break; + default: + break; + } + } return changed; } EXPORT_SYMBOL_GPL(nvme_change_ctrl_state); @@ -373,7 +448,6 @@ static void nvme_free_ns_head(struct kref *ref) nvme_mpath_remove_disk(head); ida_simple_remove(&head->subsys->ns_ida, head->instance); - list_del_init(&head->entry); cleanup_srcu_struct_quiesced(&head->srcu); nvme_put_subsystem(head->subsys); kfree(head); @@ -493,19 +567,22 @@ static int nvme_configure_directives(struct nvme_ctrl *ctrl) ret = nvme_get_stream_params(ctrl, &s, NVME_NSID_ALL); if (ret) - return ret; + goto out_disable_stream; ctrl->nssa = le16_to_cpu(s.nssa); if (ctrl->nssa < BLK_MAX_WRITE_HINTS - 1) { dev_info(ctrl->device, "too few streams (%u) available\n", ctrl->nssa); - nvme_disable_streams(ctrl); - return 0; + goto out_disable_stream; } ctrl->nr_streams = min_t(unsigned, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1); dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams); return 0; + +out_disable_stream: + nvme_disable_streams(ctrl); + return ret; } /* @@ -533,6 +610,14 @@ static void nvme_assign_write_stream(struct nvme_ctrl *ctrl, req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9; } +static void nvme_setup_passthrough(struct request *req, + struct nvme_command *cmd) +{ + memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd)); + /* passthru commands should let the driver set the SGL flags */ + cmd->common.flags &= ~NVME_CMD_SGL_ALL; +} + static inline void nvme_setup_flush(struct nvme_ns *ns, struct nvme_command *cmnd) { @@ -548,9 +633,25 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, struct nvme_dsm_range *range; struct bio *bio; - range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC); - if (!range) - return BLK_STS_RESOURCE; + /* + * Some devices do not consider the DSM 'Number of Ranges' field when + * determining how much data to DMA. Always allocate memory for maximum + * number of segments to prevent device reading beyond end of buffer. + */ + static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES; + + range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN); + if (!range) { + /* + * If we fail allocation our range, fallback to the controller + * discard page. If that's also busy, it's safe to return + * busy, as we know we can make progress once that's freed. + */ + if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy)) + return BLK_STS_RESOURCE; + + range = page_address(ns->ctrl->discard_page); + } __rq_for_each_bio(bio, req) { u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector); @@ -565,7 +666,10 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, } if (WARN_ON_ONCE(n != segments)) { - kfree(range); + if (virt_to_page(range) == ns->ctrl->discard_page) + clear_bit_unlock(0, &ns->ctrl->discard_page_busy); + else + kfree(range); return BLK_STS_IOERR; } @@ -577,7 +681,7 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, req->special_vec.bv_page = virt_to_page(range); req->special_vec.bv_offset = offset_in_page(range); - req->special_vec.bv_len = sizeof(*range) * segments; + req->special_vec.bv_len = alloc_size; req->rq_flags |= RQF_SPECIAL_PAYLOAD; return BLK_STS_OK; @@ -650,8 +754,13 @@ void nvme_cleanup_cmd(struct request *req) blk_rq_bytes(req) >> ns->lba_shift); } if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { - kfree(page_address(req->special_vec.bv_page) + - req->special_vec.bv_offset); + struct nvme_ns *ns = req->rq_disk->private_data; + struct page *page = req->special_vec.bv_page; + + if (page == ns->ctrl->discard_page) + clear_bit_unlock(0, &ns->ctrl->discard_page_busy); + else + kfree(page_address(page) + req->special_vec.bv_offset); } } EXPORT_SYMBOL_GPL(nvme_cleanup_cmd); @@ -666,7 +775,7 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, switch (req_op(req)) { case REQ_OP_DRV_IN: case REQ_OP_DRV_OUT: - memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd)); + nvme_setup_passthrough(req, cmd); break; case REQ_OP_FLUSH: nvme_setup_flush(ns, cmd); @@ -747,9 +856,13 @@ static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf, if (!buf) goto out; - ret = -EFAULT; - if (write && copy_from_user(buf, ubuf, len)) - goto out_free_meta; + if (write) { + ret = -EFAULT; + if (copy_from_user(buf, ubuf, len)) + goto out_free_meta; + } else { + memset(buf, 0, len); + } bip = bio_integrity_alloc(bio, GFP_KERNEL, 1); if (IS_ERR(bip)) { @@ -831,6 +944,8 @@ static int nvme_submit_user_cmd(struct request_queue *q, static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status) { struct nvme_ctrl *ctrl = rq->end_io_data; + unsigned long flags; + bool startka = false; blk_mq_free_request(rq); @@ -841,7 +956,13 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status) return; } - schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); + spin_lock_irqsave(&ctrl->lock, flags); + if (ctrl->state == NVME_CTRL_LIVE || + ctrl->state == NVME_CTRL_CONNECTING) + startka = true; + spin_unlock_irqrestore(&ctrl->lock, flags); + if (startka) + schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); } static int nvme_keep_alive(struct nvme_ctrl *ctrl) @@ -920,6 +1041,9 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, int pos; int len; + if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST) + return 0; + c.identify.opcode = nvme_admin_identify; c.identify.nsid = cpu_to_le32(nsid); c.identify.cns = NVME_ID_CNS_NS_DESC_LIST; @@ -930,8 +1054,11 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data, NVME_IDENTIFY_DATA_SIZE); - if (status) + if (status) { + dev_warn(ctrl->device, + "Identify Descriptors failed (%d)\n", status); goto free_data; + } for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) { struct nvme_ns_id_desc *cur = data + pos; @@ -994,10 +1121,9 @@ static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *n NVME_IDENTIFY_DATA_SIZE); } -static struct nvme_id_ns *nvme_identify_ns(struct nvme_ctrl *ctrl, - unsigned nsid) +static int nvme_identify_ns(struct nvme_ctrl *ctrl, + unsigned nsid, struct nvme_id_ns **id) { - struct nvme_id_ns *id; struct nvme_command c = { }; int error; @@ -1006,18 +1132,17 @@ static struct nvme_id_ns *nvme_identify_ns(struct nvme_ctrl *ctrl, c.identify.nsid = cpu_to_le32(nsid); c.identify.cns = NVME_ID_CNS_NS; - id = kmalloc(sizeof(*id), GFP_KERNEL); - if (!id) - return NULL; + *id = kmalloc(sizeof(**id), GFP_KERNEL); + if (!*id) + return -ENOMEM; - error = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id)); + error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id)); if (error) { dev_warn(ctrl->device, "Identify namespace failed\n"); - kfree(id); - return NULL; + kfree(*id); } - return id; + return error; } static int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, @@ -1083,6 +1208,20 @@ static void nvme_enable_aen(struct nvme_ctrl *ctrl) if (status) dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n", supported_aens); + + queue_work(nvme_wq, &ctrl->async_event_work); +} + +/* + * Convert integer values from ioctl structures to user pointers, silently + * ignoring the upper bits in the compat case to match behaviour of 32-bit + * kernels. + */ +static void __user *nvme_to_user_ptr(uintptr_t ptrval) +{ + if (in_compat_syscall()) + ptrval = (compat_uptr_t)ptrval; + return (void __user *)ptrval; } static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) @@ -1107,8 +1246,21 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) } length = (io.nblocks + 1) << ns->lba_shift; - meta_len = (io.nblocks + 1) * ns->ms; - metadata = (void __user *)(uintptr_t)io.metadata; + + if ((io.control & NVME_RW_PRINFO_PRACT) && + ns->ms == sizeof(struct t10_pi_tuple)) { + /* + * Protection information is stripped/inserted by the + * controller. + */ + if (nvme_to_user_ptr(io.metadata)) + return -EINVAL; + meta_len = 0; + metadata = NULL; + } else { + meta_len = (io.nblocks + 1) * ns->ms; + metadata = nvme_to_user_ptr(io.metadata); + } if (ns->ext) { length += meta_len; @@ -1131,8 +1283,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) c.rw.appmask = cpu_to_le16(io.appmask); return nvme_submit_user_cmd(ns->queue, &c, - (void __user *)(uintptr_t)io.addr, length, - metadata, meta_len, io.slba, NULL, 0); + nvme_to_user_ptr(io.addr), length, + metadata, meta_len, lower_32_bits(io.slba), NULL, 0); } static u32 nvme_known_admin_effects(u8 opcode) @@ -1157,7 +1309,7 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, if (ns) { if (ctrl->effects) effects = le32_to_cpu(ctrl->effects->iocs[opcode]); - if (effects & ~NVME_CMD_EFFECTS_CSUPP) + if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC)) dev_warn(ctrl->device, "IO command:%02x has unhandled effects:%08x\n", opcode, effects); @@ -1174,6 +1326,10 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, * effects say only one namespace is affected. */ if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { + mutex_lock(&ctrl->scan_lock); + mutex_lock(&ctrl->subsys->lock); + nvme_mpath_start_freeze(ctrl->subsys); + nvme_mpath_wait_freeze(ctrl->subsys); nvme_start_freeze(ctrl); nvme_wait_freeze(ctrl); } @@ -1189,8 +1345,6 @@ static void nvme_update_formats(struct nvme_ctrl *ctrl) if (ns->disk && nvme_revalidate_disk(ns->disk)) nvme_set_queue_dying(ns); up_read(&ctrl->namespaces_rwsem); - - nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL); } static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) @@ -1202,8 +1356,13 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) */ if (effects & NVME_CMD_EFFECTS_LBCC) nvme_update_formats(ctrl); - if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) + if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { nvme_unfreeze(ctrl); + nvme_mpath_unfreeze(ctrl->subsys); + mutex_unlock(&ctrl->subsys->lock); + nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL); + mutex_unlock(&ctrl->scan_lock); + } if (effects & NVME_CMD_EFFECTS_CCC) nvme_init_identify(ctrl); if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) @@ -1244,8 +1403,8 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, effects = nvme_passthru_start(ctrl, ns, cmd.opcode); status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, - (void __user *)(uintptr_t)cmd.addr, cmd.data_len, - (void __user *)(uintptr_t)cmd.metadata, cmd.metadata_len, + nvme_to_user_ptr(cmd.addr), cmd.data_len, + nvme_to_user_ptr(cmd.metadata), cmd.metadata_len, 0, &cmd.result, timeout); nvme_passthru_end(ctrl, effects); @@ -1266,9 +1425,14 @@ static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk, { #ifdef CONFIG_NVME_MULTIPATH if (disk->fops == &nvme_ns_head_ops) { + struct nvme_ns *ns; + *head = disk->private_data; *srcu_idx = srcu_read_lock(&(*head)->srcu); - return nvme_find_path(*head); + ns = nvme_find_path(*head); + if (!ns) + srcu_read_unlock(&(*head)->srcu, *srcu_idx); + return ns; } #endif *head = NULL; @@ -1282,45 +1446,100 @@ static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx) srcu_read_unlock(&head->srcu, idx); } -static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned cmd, unsigned long arg) +static int nvme_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) { + struct nvme_ns_head *head = NULL; + void __user *argp = (void __user *)arg; + struct nvme_ns *ns; + int srcu_idx, ret; + + ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx); + if (unlikely(!ns)) + return -EWOULDBLOCK; + + /* + * Handle ioctls that apply to the controller instead of the namespace + * seperately and drop the ns SRCU reference early. This avoids a + * deadlock when deleting namespaces using the passthrough interface. + */ + if (cmd == NVME_IOCTL_ADMIN_CMD || is_sed_ioctl(cmd)) { + struct nvme_ctrl *ctrl = ns->ctrl; + + nvme_get_ctrl(ns->ctrl); + nvme_put_ns_from_disk(head, srcu_idx); + + if (cmd == NVME_IOCTL_ADMIN_CMD) + ret = nvme_user_cmd(ctrl, NULL, argp); + else + ret = sed_ioctl(ctrl->opal_dev, cmd, argp); + + nvme_put_ctrl(ctrl); + return ret; + } + switch (cmd) { case NVME_IOCTL_ID: force_successful_syscall_return(); - return ns->head->ns_id; - case NVME_IOCTL_ADMIN_CMD: - return nvme_user_cmd(ns->ctrl, NULL, (void __user *)arg); + ret = ns->head->ns_id; + break; case NVME_IOCTL_IO_CMD: - return nvme_user_cmd(ns->ctrl, ns, (void __user *)arg); + ret = nvme_user_cmd(ns->ctrl, ns, argp); + break; case NVME_IOCTL_SUBMIT_IO: - return nvme_submit_io(ns, (void __user *)arg); + ret = nvme_submit_io(ns, argp); + break; default: -#ifdef CONFIG_NVM if (ns->ndev) - return nvme_nvm_ioctl(ns, cmd, arg); -#endif - if (is_sed_ioctl(cmd)) - return sed_ioctl(ns->ctrl->opal_dev, cmd, - (void __user *) arg); - return -ENOTTY; + ret = nvme_nvm_ioctl(ns, cmd, arg); + else + ret = -ENOTTY; } + + nvme_put_ns_from_disk(head, srcu_idx); + return ret; } -static int nvme_ioctl(struct block_device *bdev, fmode_t mode, +#ifdef CONFIG_COMPAT +struct nvme_user_io32 { + __u8 opcode; + __u8 flags; + __u16 control; + __u16 nblocks; + __u16 rsvd; + __u64 metadata; + __u64 addr; + __u64 slba; + __u32 dsmgmt; + __u32 reftag; + __u16 apptag; + __u16 appmask; +} __attribute__((__packed__)); + +#define NVME_IOCTL_SUBMIT_IO32 _IOW('N', 0x42, struct nvme_user_io32) + +static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { - struct nvme_ns_head *head = NULL; - struct nvme_ns *ns; - int srcu_idx, ret; + /* + * Corresponds to the difference of NVME_IOCTL_SUBMIT_IO + * between 32 bit programs and 64 bit kernel. + * The cause is that the results of sizeof(struct nvme_user_io), + * which is used to define NVME_IOCTL_SUBMIT_IO, + * are not same between 32 bit compiler and 64 bit compiler. + * NVME_IOCTL_SUBMIT_IO32 is for 64 bit kernel handling + * NVME_IOCTL_SUBMIT_IO issued from 32 bit programs. + * Other IOCTL numbers are same between 32 bit and 64 bit. + * So there is nothing to do regarding to other IOCTL numbers. + */ + if (cmd == NVME_IOCTL_SUBMIT_IO32) + return nvme_ioctl(bdev, mode, NVME_IOCTL_SUBMIT_IO, arg); - ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx); - if (unlikely(!ns)) - ret = -EWOULDBLOCK; - else - ret = nvme_ns_ioctl(ns, cmd, arg); - nvme_put_ns_from_disk(head, srcu_idx); - return ret; + return nvme_ioctl(bdev, mode, cmd, arg); } +#else +#define nvme_compat_ioctl NULL +#endif /* CONFIG_COMPAT */ static int nvme_open(struct block_device *bdev, fmode_t mode) { @@ -1399,10 +1618,10 @@ static void nvme_set_chunk_size(struct nvme_ns *ns) blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size)); } -static void nvme_config_discard(struct nvme_ns *ns) +static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns) { struct nvme_ctrl *ctrl = ns->ctrl; - struct request_queue *queue = ns->queue; + struct request_queue *queue = disk->queue; u32 size = queue_logical_block_size(queue); if (!(ctrl->oncs & NVME_CTRL_ONCS_DSM)) { @@ -1430,7 +1649,7 @@ static void nvme_config_discard(struct nvme_ns *ns) blk_queue_max_write_zeroes_sectors(queue, UINT_MAX); } -static void nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid, +static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid, struct nvme_id_ns *id, struct nvme_ns_ids *ids) { memset(ids, 0, sizeof(*ids)); @@ -1439,14 +1658,9 @@ static void nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid, memcpy(ids->eui64, id->eui64, sizeof(id->eui64)); if (ctrl->vs >= NVME_VS(1, 2, 0)) memcpy(ids->nguid, id->nguid, sizeof(id->nguid)); - if (ctrl->vs >= NVME_VS(1, 3, 0)) { - /* Don't treat error as fatal we potentially - * already have a NGUID or EUI-64 - */ - if (nvme_identify_ns_descs(ctrl, nsid, ids)) - dev_warn(ctrl->device, - "%s: Identify Descriptors failed\n", __func__); - } + if (ctrl->vs >= NVME_VS(1, 3, 0)) + return nvme_identify_ns_descs(ctrl, nsid, ids); + return 0; } static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids) @@ -1469,6 +1683,10 @@ static void nvme_update_disk_info(struct gendisk *disk, sector_t capacity = le64_to_cpup(&id->nsze) << (ns->lba_shift - 9); unsigned short bs = 1 << ns->lba_shift; + if (ns->lba_shift > PAGE_SHIFT) { + /* unsupported block size, set capacity to 0 later */ + bs = (1 << 9); + } blk_mq_freeze_queue(disk->queue); blk_integrity_unregister(disk); @@ -1479,16 +1697,15 @@ static void nvme_update_disk_info(struct gendisk *disk, if (ns->ms && !ns->ext && (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) nvme_init_integrity(disk, ns->ms, ns->pi_type); - if (ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk)) + if ((ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk)) || + ns->lba_shift > PAGE_SHIFT) capacity = 0; set_capacity(disk, capacity); - nvme_config_discard(ns); + nvme_config_discard(disk, ns); if (id->nsattr & (1 << 0)) set_disk_ro(disk, true); - else - set_disk_ro(disk, false); blk_mq_unfreeze_queue(disk->queue); } @@ -1519,8 +1736,11 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) if (ns->ndev) nvme_nvm_update_nvm_info(ns); #ifdef CONFIG_NVME_MULTIPATH - if (ns->head->disk) + if (ns->head->disk) { nvme_update_disk_info(ns->head->disk, ns, id); + blk_queue_stack_limits(ns->head->disk->queue, ns->queue); + nvme_mpath_update_disk_size(ns->head->disk); + } #endif } @@ -1537,25 +1757,38 @@ static int nvme_revalidate_disk(struct gendisk *disk) return -ENODEV; } - id = nvme_identify_ns(ctrl, ns->head->ns_id); - if (!id) - return -ENODEV; + ret = nvme_identify_ns(ctrl, ns->head->ns_id, &id); + if (ret) + goto out; if (id->ncap == 0) { ret = -ENODEV; - goto out; + goto free_id; } - __nvme_revalidate_disk(disk, id); - nvme_report_ns_ids(ctrl, ns->head->ns_id, id, &ids); + ret = nvme_report_ns_ids(ctrl, ns->head->ns_id, id, &ids); + if (ret) + goto free_id; + if (!nvme_ns_ids_equal(&ns->head->ids, &ids)) { dev_err(ctrl->device, "identifiers changed for nsid %d\n", ns->head->ns_id); ret = -ENODEV; + goto free_id; } -out: + __nvme_revalidate_disk(disk, id); +free_id: kfree(id); +out: + /* + * Only fail the function if we got a fatal error back from the + * device, otherwise ignore the error and just move on. + */ + if (ret == -ENOMEM || (ret > 0 && !(ret & NVME_SC_DNR))) + ret = 0; + else if (ret > 0) + ret = blk_status_to_errno(nvme_error_status(ret)); return ret; } @@ -1641,13 +1874,15 @@ static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new, static int nvme_pr_clear(struct block_device *bdev, u64 key) { - u32 cdw10 = 1 | (key ? 1 << 3 : 0); - return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register); + u32 cdw10 = 1 | (key ? 0 : 1 << 3); + + return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); } static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type) { - u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0); + u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 0 : 1 << 3); + return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); } @@ -1684,7 +1919,7 @@ EXPORT_SYMBOL_GPL(nvme_sec_submit); static const struct block_device_operations nvme_fops = { .owner = THIS_MODULE, .ioctl = nvme_ioctl, - .compat_ioctl = nvme_ioctl, + .compat_ioctl = nvme_compat_ioctl, .open = nvme_open, .release = nvme_release, .getgeo = nvme_getgeo, @@ -1712,7 +1947,7 @@ const struct block_device_operations nvme_ns_head_ops = { .open = nvme_ns_head_open, .release = nvme_ns_head_release, .ioctl = nvme_ioctl, - .compat_ioctl = nvme_ioctl, + .compat_ioctl = nvme_compat_ioctl, .getgeo = nvme_getgeo, .pr_ops = &nvme_pr_ops, }; @@ -1871,6 +2106,26 @@ static int nvme_configure_timestamp(struct nvme_ctrl *ctrl) return ret; } +static int nvme_configure_acre(struct nvme_ctrl *ctrl) +{ + struct nvme_feat_host_behavior *host; + int ret; + + /* Don't bother enabling the feature if retry delay is not reported */ + if (!ctrl->crdt[0]) + return 0; + + host = kzalloc(sizeof(*host), GFP_KERNEL); + if (!host) + return 0; + + host->acre = NVME_ENABLE_ACRE; + ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0, + host, sizeof(*host), NULL); + kfree(host); + return ret; +} + static int nvme_configure_apst(struct nvme_ctrl *ctrl) { /* @@ -2011,7 +2266,8 @@ static void nvme_set_latency_tolerance(struct device *dev, s32 val) if (ctrl->ps_max_latency_us != latency) { ctrl->ps_max_latency_us = latency; - nvme_configure_apst(ctrl); + if (ctrl->state == NVME_CTRL_LIVE) + nvme_configure_apst(ctrl); } } @@ -2085,7 +2341,7 @@ static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ct /* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */ off = snprintf(subsys->subnqn, NVMF_NQN_SIZE, - "nqn.2014.08.org.nvmexpress:%4x%4x", + "nqn.2014.08.org.nvmexpress:%04x%04x", le16_to_cpu(id->vid), le16_to_cpu(id->ssvid)); memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn)); off += sizeof(id->sn); @@ -2094,15 +2350,14 @@ static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ct memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off); } -static void __nvme_release_subsystem(struct nvme_subsystem *subsys) -{ - ida_simple_remove(&nvme_subsystems_ida, subsys->instance); - kfree(subsys); -} - static void nvme_release_subsystem(struct device *dev) { - __nvme_release_subsystem(container_of(dev, struct nvme_subsystem, dev)); + struct nvme_subsystem *subsys = + container_of(dev, struct nvme_subsystem, dev); + + if (subsys->instance >= 0) + ida_simple_remove(&nvme_instance_ida, subsys->instance); + kfree(subsys); } static void nvme_destroy_subsystem(struct kref *ref) @@ -2212,12 +2467,8 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) subsys = kzalloc(sizeof(*subsys), GFP_KERNEL); if (!subsys) return -ENOMEM; - ret = ida_simple_get(&nvme_subsystems_ida, 0, 0, GFP_KERNEL); - if (ret < 0) { - kfree(subsys); - return ret; - } - subsys->instance = ret; + + subsys->instance = -1; mutex_init(&subsys->lock); kref_init(&subsys->ref); INIT_LIST_HEAD(&subsys->ctrls); @@ -2225,14 +2476,13 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) nvme_init_subnqn(subsys, ctrl, id); memcpy(subsys->serial, id->sn, sizeof(subsys->serial)); memcpy(subsys->model, id->mn, sizeof(subsys->model)); - memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev)); subsys->vendor_id = le16_to_cpu(id->vid); subsys->cmic = id->cmic; subsys->dev.class = nvme_subsys_class; subsys->dev.release = nvme_release_subsystem; subsys->dev.groups = nvme_subsys_attrs_groups; - dev_set_name(&subsys->dev, "nvme-subsys%d", subsys->instance); + dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance); device_initialize(&subsys->dev); mutex_lock(&nvme_subsystems_lock); @@ -2252,7 +2502,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) goto out_unlock; } - __nvme_release_subsystem(subsys); + put_device(&subsys->dev); subsys = found; } else { ret = device_add(&subsys->dev); @@ -2265,6 +2515,8 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) list_add_tail(&subsys->entry, &nvme_subsystems); } + if (!found) + subsys->instance = ctrl->instance; ctrl->subsys = subsys; mutex_unlock(&nvme_subsystems_lock); @@ -2316,7 +2568,7 @@ static int nvme_get_effects_log(struct nvme_ctrl *ctrl) if (!ctrl->effects) return 0; - ret = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CMD_EFFECTS, 0, + ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, ctrl->effects, sizeof(*ctrl->effects), 0); if (ret) { kfree(ctrl->effects); @@ -2369,10 +2621,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) if (!ctrl->identified) { int i; - ret = nvme_init_subsystem(ctrl, id); - if (ret) - goto out_free; - /* * Check for quirks. Quirk can depend on firmware version, * so, in principle, the set of quirks present can change @@ -2385,15 +2633,26 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) if (quirk_matches(id, &core_quirks[i])) ctrl->quirks |= core_quirks[i].quirks; } + + ret = nvme_init_subsystem(ctrl, id); + if (ret) + goto out_free; } + memcpy(ctrl->subsys->firmware_rev, id->fr, + sizeof(ctrl->subsys->firmware_rev)); if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) { dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n"); ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS; } + ctrl->crdt[0] = le16_to_cpu(id->crdt1); + ctrl->crdt[1] = le16_to_cpu(id->crdt2); + ctrl->crdt[2] = le16_to_cpu(id->crdt3); + ctrl->oacs = le16_to_cpu(id->oacs); ctrl->oncs = le16_to_cpup(&id->oncs); + ctrl->mtfa = le16_to_cpu(id->mtfa); ctrl->oaes = le32_to_cpu(id->oaes); atomic_set(&ctrl->abort_limit, id->acl + 1); ctrl->vwc = id->vwc; @@ -2468,7 +2727,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) ctrl->hmmaxd = le16_to_cpu(id->hmmaxd); } - ret = nvme_mpath_init(ctrl, id); + ret = nvme_mpath_init_identify(ctrl, id); kfree(id); if (ret < 0) @@ -2491,6 +2750,10 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) if (ret < 0) return ret; + ret = nvme_configure_acre(ctrl); + if (ret < 0) + return ret; + ctrl->identified = true; return 0; @@ -2514,10 +2777,26 @@ static int nvme_dev_open(struct inode *inode, struct file *file) return -EWOULDBLOCK; } + nvme_get_ctrl(ctrl); + if (!try_module_get(ctrl->ops->module)) { + nvme_put_ctrl(ctrl); + return -EINVAL; + } + file->private_data = ctrl; return 0; } +static int nvme_dev_release(struct inode *inode, struct file *file) +{ + struct nvme_ctrl *ctrl = + container_of(inode->i_cdev, struct nvme_ctrl, cdev); + + module_put(ctrl->ops->module); + nvme_put_ctrl(ctrl); + return 0; +} + static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp) { struct nvme_ns *ns; @@ -2563,11 +2842,17 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd, case NVME_IOCTL_IO_CMD: return nvme_dev_user_cmd(ctrl, argp); case NVME_IOCTL_RESET: + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; dev_warn(ctrl->device, "resetting controller\n"); return nvme_reset_ctrl_sync(ctrl); case NVME_IOCTL_SUBSYS_RESET: + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; return nvme_reset_subsystem(ctrl); case NVME_IOCTL_RESCAN: + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; nvme_queue_scan(ctrl); return 0; default: @@ -2578,6 +2863,7 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd, static const struct file_operations nvme_dev_fops = { .owner = THIS_MODULE, .open = nvme_dev_open, + .release = nvme_dev_release, .unlocked_ioctl = nvme_dev_ioctl, .compat_ioctl = nvme_dev_ioctl, }; @@ -2734,6 +3020,14 @@ const struct attribute_group nvme_ns_id_attr_group = { .is_visible = nvme_ns_id_attrs_are_visible, }; +const struct attribute_group *nvme_ns_id_attr_groups[] = { + &nvme_ns_id_attr_group, +#ifdef CONFIG_NVM + &nvme_nvm_attr_group, +#endif + NULL, +}; + #define nvme_show_str_function(field) \ static ssize_t field##_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ @@ -2825,6 +3119,37 @@ static ssize_t nvme_sysfs_show_address(struct device *dev, } static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL); +static ssize_t nvme_ctrl_fast_io_fail_tmo_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + int value = nvmf_opt_to_plus(ctrl->opts)->fast_io_fail_tmo; + + if (value == -1) + return sysfs_emit(buf, "off\n"); + return sysfs_emit(buf, "%d\n", value); +} + +static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + struct nvmf_ctrl_options *opts = ctrl->opts; + int fast_io_fail_tmo, err; + + err = kstrtoint(buf, 10, &fast_io_fail_tmo); + if (err) + return -EINVAL; + + if (fast_io_fail_tmo < 0) + nvmf_opt_to_plus(opts)->fast_io_fail_tmo = -1; + else + nvmf_opt_to_plus(opts)->fast_io_fail_tmo = fast_io_fail_tmo; + return count; +} +static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR, + nvme_ctrl_fast_io_fail_tmo_show, nvme_ctrl_fast_io_fail_tmo_store); + static struct attribute *nvme_dev_attrs[] = { &dev_attr_reset_controller.attr, &dev_attr_rescan_controller.attr, @@ -2837,6 +3162,7 @@ static struct attribute *nvme_dev_attrs[] = { &dev_attr_subsysnqn.attr, &dev_attr_address.attr, &dev_attr_state.attr, + &dev_attr_fast_io_fail_tmo.attr, NULL }; @@ -2850,6 +3176,8 @@ static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj, return 0; if (a == &dev_attr_address.attr && !ctrl->ops->get_address) return 0; + if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts) + return 0; return a->mode; } @@ -2888,7 +3216,6 @@ static int __nvme_check_ids(struct nvme_subsystem *subsys, list_for_each_entry(h, &subsys->nsheads, entry) { if (nvme_ns_ids_valid(&new->ids) && - !list_empty(&h->list) && nvme_ns_ids_equal(&new->ids, &h->ids)) return -EINVAL; } @@ -2917,7 +3244,9 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, head->ns_id = nsid; kref_init(&head->ref); - nvme_report_ns_ids(ctrl, nsid, id, &head->ids); + ret = nvme_report_ns_ids(ctrl, nsid, id, &head->ids); + if (ret) + goto out_cleanup_srcu; ret = __nvme_check_ids(ctrl->subsys, head); if (ret) { @@ -2942,6 +3271,8 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, out_free_head: kfree(head); out: + if (ret > 0) + ret = blk_status_to_errno(nvme_error_status(ret)); return ERR_PTR(ret); } @@ -2965,21 +3296,29 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, } else { struct nvme_ns_ids ids; - nvme_report_ns_ids(ctrl, nsid, id, &ids); + ret = nvme_report_ns_ids(ctrl, nsid, id, &ids); + if (ret) { + nvme_put_ns_head(head); + goto out_unlock; + } + if (!nvme_ns_ids_equal(&head->ids, &ids)) { dev_err(ctrl->device, "IDs don't match for shared namespace %d\n", nsid); ret = -EINVAL; + nvme_put_ns_head(head); goto out_unlock; } } - list_add_tail(&ns->siblings, &head->list); + list_add_tail_rcu(&ns->siblings, &head->list); ns->head = head; out_unlock: mutex_unlock(&ctrl->subsys->lock); + if (ret > 0) + ret = blk_status_to_errno(nvme_error_status(ret)); return ret; } @@ -3042,7 +3381,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) struct gendisk *disk; struct nvme_id_ns *id; char disk_name[DISK_NAME_LEN]; - int node = dev_to_node(ctrl->dev), flags = GENHD_FL_EXT_DEVT; + int node = dev_to_node(ctrl->dev), flags = GENHD_FL_EXT_DEVT, ret; ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); if (!ns) @@ -3061,8 +3400,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); nvme_set_queue_limits(ctrl, ns->queue); - id = nvme_identify_ns(ctrl, nsid); - if (!id) + ret = nvme_identify_ns(ctrl, nsid, &id); + if (ret) goto out_free_queue; if (id->ncap == 0) @@ -3099,14 +3438,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) nvme_get_ctrl(ctrl); + disk_to_dev(ns->disk)->groups = nvme_ns_id_attr_groups; device_add_disk(ctrl->device, ns->disk); - if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj, - &nvme_ns_id_attr_group)) - pr_warn("%s: failed to create sysfs group for identification\n", - ns->disk->disk_name); - if (ns->ndev && nvme_nvm_register_sysfs(ns)) - pr_warn("%s: failed to register lightnvm sysfs group for identification\n", - ns->disk->disk_name); nvme_mpath_add_disk(ns, id); nvme_fault_inject_init(ns); @@ -3116,7 +3449,10 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) out_unlink_ns: mutex_lock(&ctrl->subsys->lock); list_del_rcu(&ns->siblings); + if (list_empty(&ns->head->list)) + list_del_init(&ns->head->entry); mutex_unlock(&ctrl->subsys->lock); + nvme_put_ns_head(ns->head); out_free_id: kfree(id); out_free_queue: @@ -3131,27 +3467,26 @@ static void nvme_ns_remove(struct nvme_ns *ns) return; nvme_fault_inject_fini(ns); - if (ns->disk && ns->disk->flags & GENHD_FL_UP) { - sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, - &nvme_ns_id_attr_group); - if (ns->ndev) - nvme_nvm_unregister_sysfs(ns); - del_gendisk(ns->disk); - blk_cleanup_queue(ns->queue); - if (blk_get_integrity(ns->disk)) - blk_integrity_unregister(ns->disk); - } mutex_lock(&ns->ctrl->subsys->lock); list_del_rcu(&ns->siblings); - nvme_mpath_clear_current_path(ns); + if (list_empty(&ns->head->list)) + list_del_init(&ns->head->entry); mutex_unlock(&ns->ctrl->subsys->lock); + synchronize_rcu(); /* guarantee not available in head->list */ + nvme_mpath_clear_current_path(ns); + synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */ + + if (ns->disk && ns->disk->flags & GENHD_FL_UP) { + del_gendisk(ns->disk); + blk_cleanup_queue(ns->queue); + } + down_write(&ns->ctrl->namespaces_rwsem); list_del_init(&ns->list); up_write(&ns->ctrl->namespaces_rwsem); - synchronize_srcu(&ns->head->srcu); nvme_mpath_check_last_path(ns); nvme_put_ns(ns); } @@ -3191,7 +3526,8 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn) { struct nvme_ns *ns; __le32 *ns_list; - unsigned i, j, nsid, prev = 0, num_lists = DIV_ROUND_UP(nn, 1024); + unsigned i, j, nsid, prev = 0; + unsigned num_lists = DIV_ROUND_UP_ULL((u64)nn, 1024); int ret = 0; ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); @@ -3282,6 +3618,7 @@ static void nvme_scan_work(struct work_struct *work) if (nvme_identify_ctrl(ctrl, &id)) return; + mutex_lock(&ctrl->scan_lock); nn = le32_to_cpu(id->nn); if (ctrl->vs >= NVME_VS(1, 1, 0) && !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) { @@ -3290,6 +3627,7 @@ static void nvme_scan_work(struct work_struct *work) } nvme_scan_ns_sequential(ctrl, nn); out_free_id: + mutex_unlock(&ctrl->scan_lock); kfree(id); down_write(&ctrl->namespaces_rwsem); list_sort(NULL, &ctrl->namespaces, ns_cmp); @@ -3306,6 +3644,16 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl) struct nvme_ns *ns, *next; LIST_HEAD(ns_list); + /* + * make sure to requeue I/O to all namespaces as these + * might result from the scan itself and must complete + * for the scan_work to make progress + */ + nvme_mpath_clear_ctrl_paths(ctrl); + + /* prevent racing with ns scanning */ + flush_work(&ctrl->scan_work); + /* * The dead states indicates the controller was not gracefully * disconnected. In that case, we won't be able to flush any data while @@ -3346,7 +3694,14 @@ static void nvme_async_event_work(struct work_struct *work) container_of(work, struct nvme_ctrl, async_event_work); nvme_aen_uevent(ctrl); - ctrl->ops->submit_async_event(ctrl); + + /* + * The transport drivers must guarantee AER submission here is safe by + * flushing ctrl async_event_work after changing the controller state + * from LIVE and before freeing the admin queue. + */ + if (ctrl->state == NVME_CTRL_LIVE) + ctrl->ops->submit_async_event(ctrl); } static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl) @@ -3371,7 +3726,7 @@ static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl) if (!log) return; - if (nvme_get_log(ctrl, NVME_NSID_ALL, 0, NVME_LOG_FW_SLOT, log, + if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, log, sizeof(*log), 0)) dev_warn(ctrl->device, "Get FW SLOT INFO log error\n"); kfree(log); @@ -3460,8 +3815,8 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl) { nvme_mpath_stop(ctrl); nvme_stop_keep_alive(ctrl); + nvme_stop_failfast_work(ctrl); flush_work(&ctrl->async_event_work); - flush_work(&ctrl->scan_work); cancel_work_sync(&ctrl->fw_act_work); if (ctrl->ops->stop_ctrl) ctrl->ops->stop_ctrl(ctrl); @@ -3473,10 +3828,10 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl) if (ctrl->kato) nvme_start_keep_alive(ctrl); + nvme_enable_aen(ctrl); + if (ctrl->queue_count > 1) { nvme_queue_scan(ctrl); - nvme_enable_aen(ctrl); - queue_work(nvme_wq, &ctrl->async_event_work); nvme_start_queues(ctrl); } } @@ -3484,7 +3839,9 @@ EXPORT_SYMBOL_GPL(nvme_start_ctrl); void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) { + dev_pm_qos_hide_latency_tolerance(ctrl->device); cdev_device_del(&ctrl->cdev, ctrl->device); + nvme_put_ctrl(ctrl); } EXPORT_SYMBOL_GPL(nvme_uninit_ctrl); @@ -3494,9 +3851,12 @@ static void nvme_free_ctrl(struct device *dev) container_of(dev, struct nvme_ctrl, ctrl_device); struct nvme_subsystem *subsys = ctrl->subsys; - ida_simple_remove(&nvme_instance_ida, ctrl->instance); + if (!subsys || ctrl->instance != subsys->instance) + ida_simple_remove(&nvme_instance_ida, ctrl->instance); + kfree(ctrl->effects); nvme_mpath_uninit(ctrl); + __free_page(ctrl->discard_page); if (subsys) { mutex_lock(&subsys->lock); @@ -3522,7 +3882,9 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, int ret; ctrl->state = NVME_CTRL_NEW; + clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &nvme_ctrl_to_plus(ctrl)->flags); spin_lock_init(&ctrl->lock); + mutex_init(&ctrl->scan_lock); INIT_LIST_HEAD(&ctrl->namespaces); init_rwsem(&ctrl->namespaces_rwsem); ctrl->dev = dev; @@ -3536,6 +3898,16 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd)); ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive; + INIT_DELAYED_WORK(&nvme_ctrl_to_plus(ctrl)->failfast_work, + nvme_failfast_work); + + BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) > + PAGE_SIZE); + ctrl->discard_page = alloc_page(GFP_KERNEL); + if (!ctrl->discard_page) { + ret = -ENOMEM; + goto out; + } ret = ida_simple_get(&nvme_instance_ida, 0, 0, GFP_KERNEL); if (ret < 0) @@ -3554,6 +3926,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, if (ret) goto out_release_instance; + nvme_get_ctrl(ctrl); cdev_init(&ctrl->cdev, &nvme_dev_fops); ctrl->cdev.owner = ops->module; ret = cdev_device_add(&ctrl->cdev, ctrl->device); @@ -3567,13 +3940,17 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance; dev_pm_qos_update_user_latency_tolerance(ctrl->device, min(default_ps_max_latency_us, (unsigned long)S32_MAX)); + nvme_mpath_init_ctrl(ctrl); return 0; out_free_name: - kfree_const(dev->kobj.name); + nvme_put_ctrl(ctrl); + kfree_const(ctrl->device->kobj.name); out_release_instance: ida_simple_remove(&nvme_instance_ida, ctrl->instance); out: + if (ctrl->discard_page) + __free_page(ctrl->discard_page); return ret; } EXPORT_SYMBOL_GPL(nvme_init_ctrl); @@ -3592,7 +3969,7 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl) down_read(&ctrl->namespaces_rwsem); /* Forcibly unquiesce queues to avoid blocking dispatch */ - if (ctrl->admin_q) + if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q)) blk_mq_unquiesce_queue(ctrl->admin_q); list_for_each_entry(ns, &ctrl->namespaces, list) @@ -3652,11 +4029,15 @@ EXPORT_SYMBOL_GPL(nvme_start_freeze); void nvme_stop_queues(struct nvme_ctrl *ctrl) { struct nvme_ns *ns; + bool rcu = false; down_read(&ctrl->namespaces_rwsem); list_for_each_entry(ns, &ctrl->namespaces, list) - blk_mq_quiesce_queue(ns->queue); + rcu = (blk_mq_quiesce_queue_without_rcu(ns->queue) || rcu); up_read(&ctrl->namespaces_rwsem); + + if (rcu) + synchronize_rcu(); } EXPORT_SYMBOL_GPL(nvme_stop_queues); @@ -3671,6 +4052,17 @@ void nvme_start_queues(struct nvme_ctrl *ctrl) } EXPORT_SYMBOL_GPL(nvme_start_queues); +void nvme_sync_io_queues(struct nvme_ctrl *ctrl) +{ + struct nvme_ns *ns; + + down_read(&ctrl->namespaces_rwsem); + list_for_each_entry(ns, &ctrl->namespaces, list) + blk_sync_queue(ns->queue); + up_read(&ctrl->namespaces_rwsem); +} +EXPORT_SYMBOL_GPL(nvme_sync_io_queues); + int __init nvme_core_init(void) { int result = -ENOMEM; @@ -3723,13 +4115,13 @@ int __init nvme_core_init(void) void nvme_core_exit(void) { - ida_destroy(&nvme_subsystems_ida); class_destroy(nvme_subsys_class); class_destroy(nvme_class); unregister_chrdev_region(nvme_chr_devt, NVME_MINORS); destroy_workqueue(nvme_delete_wq); destroy_workqueue(nvme_reset_wq); destroy_workqueue(nvme_wq); + ida_destroy(&nvme_instance_ida); } MODULE_LICENSE("GPL"); diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 206d63cb1afc841507ab60edb898a35177eb5c22..a1f6648b63c403af557815afd0bef05792f8bab0 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -550,10 +550,15 @@ blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl, { if (ctrl->state != NVME_CTRL_DELETING && ctrl->state != NVME_CTRL_DEAD && + !test_bit(NVME_CTRL_FAILFAST_EXPIRED, + &nvme_ctrl_to_plus(ctrl)->flags) && !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) return BLK_STS_RESOURCE; - nvme_req(rq)->status = NVME_SC_ABORT_REQ; - return BLK_STS_IOERR; + + nvme_req(rq)->status = NVME_SC_HOST_PATH_ERROR; + blk_mq_start_request(rq); + nvme_complete_rq(rq); + return BLK_STS_OK; } EXPORT_SYMBOL_GPL(nvmf_fail_nonready_command); @@ -604,6 +609,7 @@ static const match_table_t opt_tokens = { { NVMF_OPT_HOST_TRADDR, "host_traddr=%s" }, { NVMF_OPT_HOST_ID, "hostid=%s" }, { NVMF_OPT_DUP_CONNECT, "duplicate_connect" }, + { NVMF_OPT_FAIL_FAST_TMO, "fast_io_fail_tmo=%d" }, { NVMF_OPT_ERR, NULL } }; @@ -614,7 +620,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, char *options, *o, *p; int token, ret = 0; size_t nqnlen = 0; - int ctrl_loss_tmo = NVMF_DEF_CTRL_LOSS_TMO; + int ctrl_loss_tmo = NVMF_DEF_RECONNECT_FOREVER; uuid_t hostid; /* Set defaults */ @@ -623,6 +629,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY; opts->kato = NVME_DEFAULT_KATO; opts->duplicate_connect = false; + nvmf_opt_to_plus(opts)->fast_io_fail_tmo = NVMF_DEF_FAIL_FAST_TMO; options = o = kstrdup(buf, GFP_KERNEL); if (!options) @@ -747,6 +754,17 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, pr_warn("ctrl_loss_tmo < 0 will reconnect forever\n"); ctrl_loss_tmo = token; break; + case NVMF_OPT_FAIL_FAST_TMO: + if (match_int(args, &token)) { + ret = -EINVAL; + goto out; + } + + if (token >= 0) + pr_warn("I/O will fail on after %d sec reconnect\n", + token); + nvmf_opt_to_plus(opts)->fast_io_fail_tmo = token; + break; case NVMF_OPT_HOSTNQN: if (opts->host) { pr_err("hostnqn already user-assigned: %s\n", @@ -827,11 +845,17 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, opts->nr_io_queues = 0; opts->duplicate_connect = true; } - if (ctrl_loss_tmo < 0) + + if (ctrl_loss_tmo < 0) { opts->max_reconnects = -1; - else + } else { opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo, opts->reconnect_delay); + if (ctrl_loss_tmo < nvmf_opt_to_plus(opts)->fast_io_fail_tmo) + pr_warn("failfast tmo (%d) > ctrl_loss_tmo (%d)\n", + nvmf_opt_to_plus(opts)->fast_io_fail_tmo, + ctrl_loss_tmo); + } if (!opts->host) { kref_get(&nvmf_default_host->ref); @@ -893,27 +917,30 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts) kfree(opts->trsvcid); kfree(opts->subsysnqn); kfree(opts->host_traddr); - kfree(opts); + kfree(nvmf_opt_to_plus(opts)); } EXPORT_SYMBOL_GPL(nvmf_free_options); #define NVMF_REQUIRED_OPTS (NVMF_OPT_TRANSPORT | NVMF_OPT_NQN) #define NVMF_ALLOWED_OPTS (NVMF_OPT_QUEUE_SIZE | NVMF_OPT_NR_IO_QUEUES | \ NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \ - NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT) + NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT |\ + NVMF_OPT_FAIL_FAST_TMO) static struct nvme_ctrl * nvmf_create_ctrl(struct device *dev, const char *buf, size_t count) { + struct nvmf_ctrl_options_plus *opts_plus; struct nvmf_ctrl_options *opts; struct nvmf_transport_ops *ops; struct nvme_ctrl *ctrl; int ret; - opts = kzalloc(sizeof(*opts), GFP_KERNEL); - if (!opts) + opts_plus = kzalloc(sizeof(*opts_plus), GFP_KERNEL); + if (!opts_plus) return ERR_PTR(-ENOMEM); + opts = &opts_plus->ops; ret = nvmf_parse_options(opts, buf); if (ret) goto out_free_opts; diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h index aa2fdb2a2e8fc0143b59ff48692284ba50c8225f..b7a47b8687573fee1766445fe53598173dc211e6 100644 --- a/drivers/nvme/host/fabrics.h +++ b/drivers/nvme/host/fabrics.h @@ -23,6 +23,9 @@ #define NVMF_DEF_RECONNECT_DELAY 10 /* default to 600 seconds of reconnect attempts before giving up */ #define NVMF_DEF_CTRL_LOSS_TMO 600 +#define NVMF_DEF_RECONNECT_FOREVER -1 +/* default is -1: the fail fast mechanism is disabled */ +#define NVMF_DEF_FAIL_FAST_TMO -1 /* * Define a host as seen by the target. We allocate one at boot, but also @@ -58,6 +61,7 @@ enum { NVMF_OPT_CTRL_LOSS_TMO = 1 << 11, NVMF_OPT_HOST_ID = 1 << 12, NVMF_OPT_DUP_CONNECT = 1 << 13, + NVMF_OPT_FAIL_FAST_TMO = 1 << 20, }; /** @@ -85,6 +89,7 @@ enum { * @max_reconnects: maximum number of allowed reconnect attempts before removing * the controller, (-1) means reconnect forever, zero means remove * immediately; + * @fast_io_fail_tmo: Fast I/O fail timeout in seconds */ struct nvmf_ctrl_options { unsigned mask; @@ -103,6 +108,14 @@ struct nvmf_ctrl_options { int max_reconnects; }; +struct nvmf_ctrl_options_plus { + struct nvmf_ctrl_options ops; + int fast_io_fail_tmo; +}; + +#define nvmf_opt_to_plus(ps) \ + container_of(ps, struct nvmf_ctrl_options_plus, ops) + /* * struct nvmf_transport_ops - used to register a specific * fabric implementation of NVMe fabrics. diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 611e70cae7544207e4ad75a36a1feff85f8198bf..59696249f2cfd71b1b23cb3351c0f144e7510942 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -160,7 +160,10 @@ struct nvme_fc_ctrl { struct nvme_fc_fcp_op aen_ops[NVME_NR_AEN_COMMANDS]; - struct nvme_ctrl ctrl; + union { + struct nvme_ctrl ctrl; + struct nvme_ctrl_plus ctrl_plus; + }; }; static inline struct nvme_fc_ctrl * @@ -204,8 +207,6 @@ static LIST_HEAD(nvme_fc_lport_list); static DEFINE_IDA(nvme_fc_local_port_cnt); static DEFINE_IDA(nvme_fc_ctrl_cnt); - - /* * These items are short-term. They will eventually be moved into * a generic FC class. See comments in module init. @@ -340,7 +341,8 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo, !template->ls_req || !template->fcp_io || !template->ls_abort || !template->fcp_abort || !template->max_hw_queues || !template->max_sgl_segments || - !template->max_dif_sgl_segments || !template->dma_boundary) { + !template->max_dif_sgl_segments || !template->dma_boundary || + !template->module) { ret = -EINVAL; goto out_reghost_failed; } @@ -1523,6 +1525,10 @@ nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl) struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops; int i; + /* ensure we've initialized the ops once */ + if (!(aen_op->flags & FCOP_FLAGS_AEN)) + return; + for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) __nvme_fc_abort_op(ctrl, aen_op); } @@ -1710,7 +1716,7 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl, if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) { dev_err(ctrl->dev, "FCP Op failed - cmdiu dma mapping failed.\n"); - ret = EFAULT; + ret = -EFAULT; goto out_on_error; } @@ -1720,7 +1726,7 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl, if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) { dev_err(ctrl->dev, "FCP Op failed - rspiu dma mapping failed.\n"); - ret = EFAULT; + ret = -EFAULT; } atomic_set(&op->state, FCPOP_STATE_IDLE); @@ -1785,6 +1791,7 @@ nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl) struct nvme_fc_fcp_op *aen_op; int i; + cancel_work_sync(&ctrl->ctrl.async_event_work); aen_op = ctrl->aen_ops; for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) { if (!aen_op->fcp_req.private) @@ -1838,7 +1845,7 @@ nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx) memset(queue, 0, sizeof(*queue)); queue->ctrl = ctrl; queue->qnum = idx; - atomic_set(&queue->csn, 1); + atomic_set(&queue->csn, 0); queue->dev = ctrl->dev; if (idx > 0) @@ -1880,7 +1887,7 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue) */ queue->connection_id = 0; - atomic_set(&queue->csn, 1); + atomic_set(&queue->csn, 0); } static void @@ -1941,7 +1948,7 @@ nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) return 0; delete_queues: - for (; i >= 0; i--) + for (; i > 0; i--) __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i); return ret; } @@ -1980,6 +1987,7 @@ nvme_fc_ctrl_free(struct kref *ref) { struct nvme_fc_ctrl *ctrl = container_of(ref, struct nvme_fc_ctrl, ref); + struct nvme_fc_lport *lport = ctrl->lport; unsigned long flags; if (ctrl->ctrl.tagset) { @@ -2005,6 +2013,7 @@ nvme_fc_ctrl_free(struct kref *ref) if (ctrl->ctrl.opts) nvmf_free_options(ctrl->ctrl.opts); kfree(ctrl); + module_put(lport->ops->module); } static void @@ -2033,15 +2042,36 @@ nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl) nvme_fc_ctrl_put(ctrl); } +static void __nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl); + static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg) { - /* only proceed if in LIVE state - e.g. on first error */ + /* + * if an error (io timeout, etc) while (re)connecting, + * it's an error on creating the new association. + * Start the error recovery thread if it hasn't already + * been started. It is expected there could be multiple + * ios hitting this path before things are cleaned up. + */ + if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) { + __nvme_fc_terminate_io(ctrl); + + /* + * Rescheduling the connection after recovering + * from the io error is left to the reconnect work + * item, which is what should have stalled waiting on + * the io that had the error that scheduled this work. + */ + return; + } + + /* Otherwise, only proceed if in LIVE state - e.g. on first error */ if (ctrl->ctrl.state != NVME_CTRL_LIVE) return; dev_warn(ctrl->ctrl.device, - "NVME-FC{%d}: transport association error detected: %s\n", + "NVME-FC{%d}: transport association event: %s\n", ctrl->cnum, errmsg); dev_warn(ctrl->ctrl.device, "NVME-FC{%d}: resetting controller\n", ctrl->cnum); @@ -2158,7 +2188,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, { struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; struct nvme_command *sqe = &cmdiu->sqe; - u32 csn; int ret, opstate; /* @@ -2173,8 +2202,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, /* format the FC-NVME CMD IU and fcp_req */ cmdiu->connection_id = cpu_to_be64(queue->connection_id); - csn = atomic_inc_return(&queue->csn); - cmdiu->csn = cpu_to_be32(csn); cmdiu->data_len = cpu_to_be32(data_len); switch (io_dir) { case NVMEFC_FCP_WRITE: @@ -2232,11 +2259,24 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, if (!(op->flags & FCOP_FLAGS_AEN)) blk_mq_start_request(op->rq); + cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn)); ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport, &ctrl->rport->remoteport, queue->lldd_handle, &op->fcp_req); if (ret) { + /* + * If the lld fails to send the command is there an issue with + * the csn value? If the command that fails is the Connect, + * no - as the connection won't be live. If it is a command + * post-connect, it's possible a gap in csn may be created. + * Does this matter? As Linux initiators don't send fused + * commands, no. The gap would exist, but as there's nothing + * that depends on csn order to be delivered on the target + * side, it shouldn't hurt. It would be difficult for a + * target to even detect the csn gap as it has no idea when the + * cmd with the csn was supposed to arrive. + */ opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); @@ -2853,6 +2893,33 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) } } +static void +__nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl) +{ + /* + * if state is connecting - the error occurred as part of a + * reconnect attempt. The create_association error paths will + * clean up any outstanding io. + * + * if it's a different state - ensure all pending io is + * terminated. Given this can delay while waiting for the + * aborted io to return, we recheck adapter state below + * before changing state. + */ + if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) { + nvme_stop_keep_alive(&ctrl->ctrl); + + /* will block will waiting for io to terminate */ + nvme_fc_delete_association(ctrl); + } + + if (ctrl->ctrl.state != NVME_CTRL_CONNECTING && + !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) + dev_err(ctrl->ctrl.device, + "NVME-FC{%d}: error_recovery: Couldn't change state " + "to CONNECTING\n", ctrl->cnum); +} + static void nvme_fc_reset_ctrl_work(struct work_struct *work) { @@ -2860,17 +2927,9 @@ nvme_fc_reset_ctrl_work(struct work_struct *work) container_of(work, struct nvme_fc_ctrl, ctrl.reset_work); int ret; - nvme_stop_ctrl(&ctrl->ctrl); - - /* will block will waiting for io to terminate */ - nvme_fc_delete_association(ctrl); + __nvme_fc_terminate_io(ctrl); - if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { - dev_err(ctrl->ctrl.device, - "NVME-FC{%d}: error_recovery: Couldn't change state " - "to CONNECTING\n", ctrl->cnum); - return; - } + nvme_stop_ctrl(&ctrl->ctrl); if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE) ret = nvme_fc_create_association(ctrl); @@ -2885,6 +2944,7 @@ nvme_fc_reset_ctrl_work(struct work_struct *work) ctrl->cnum); } + static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = { .name = "fc", .module = THIS_MODULE, @@ -2980,10 +3040,15 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, goto out_fail; } + if (!try_module_get(lport->ops->module)) { + ret = -EUNATCH; + goto out_free_ctrl; + } + idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL); if (idx < 0) { ret = -ENOSPC; - goto out_free_ctrl; + goto out_mod_put; } ctrl->ctrl.opts = opts; @@ -3071,10 +3136,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, goto fail_ctrl; } - nvme_get_ctrl(&ctrl->ctrl); - if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) { - nvme_put_ctrl(&ctrl->ctrl); dev_err(ctrl->ctrl.device, "NVME-FC{%d}: failed to schedule initial connect\n", ctrl->cnum); @@ -3122,6 +3184,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, out_free_ida: put_device(ctrl->dev); ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); +out_mod_put: + module_put(lport->ops->module); out_free_ctrl: kfree(ctrl); out_fail: @@ -3275,7 +3339,8 @@ static int __init nvme_fc_init_module(void) fc_class = class_create(THIS_MODULE, "fc"); if (IS_ERR(fc_class)) { pr_err("couldn't register class fc\n"); - return PTR_ERR(fc_class); + ret = PTR_ERR(fc_class); + return ret; } /* @@ -3299,19 +3364,39 @@ static int __init nvme_fc_init_module(void) device_destroy(fc_class, MKDEV(0, 0)); out_destroy_class: class_destroy(fc_class); + return ret; } +static void +nvme_fc_delete_controllers(struct nvme_fc_rport *rport) +{ + struct nvme_fc_ctrl *ctrl; + + spin_lock(&rport->lock); + list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { + dev_warn(ctrl->ctrl.device, + "NVME-FC{%d}: transport unloading: deleting ctrl\n", + ctrl->cnum); + nvme_delete_ctrl(&ctrl->ctrl); + } + spin_unlock(&rport->lock); +} + static void __exit nvme_fc_exit_module(void) { - /* sanity check - all lports should be removed */ - if (!list_empty(&nvme_fc_lport_list)) - pr_warn("%s: localport list not empty\n", __func__); + struct nvme_fc_lport *lport; + struct nvme_fc_rport *rport; + unsigned long flags; - nvmf_unregister_transport(&nvme_fc_transport); + spin_lock_irqsave(&nvme_fc_lock, flags); + list_for_each_entry(lport, &nvme_fc_lport_list, port_list) + list_for_each_entry(rport, &lport->endp_list, endp_list) + nvme_fc_delete_controllers(rport); + spin_unlock_irqrestore(&nvme_fc_lock, flags); + flush_workqueue(nvme_delete_wq); - ida_destroy(&nvme_fc_local_port_cnt); - ida_destroy(&nvme_fc_ctrl_cnt); + nvmf_unregister_transport(&nvme_fc_transport); device_destroy(fc_class, MKDEV(0, 0)); class_destroy(fc_class); diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index 6fe5923c95d4aa1f553c76890e84ffd243b6dcdf..d10257b9c5236e268608967983f6df541d2fc55a 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -968,6 +968,9 @@ void nvme_nvm_update_nvm_info(struct nvme_ns *ns) struct nvm_dev *ndev = ns->ndev; struct nvm_geo *geo = &ndev->geo; + if (geo->version == NVM_OCSSD_SPEC_12) + return; + geo->csecs = 1 << ns->lba_shift; geo->sos = ns->ms; } @@ -1190,10 +1193,29 @@ static NVM_DEV_ATTR_12_RO(multiplane_modes); static NVM_DEV_ATTR_12_RO(media_capabilities); static NVM_DEV_ATTR_12_RO(max_phys_secs); -static struct attribute *nvm_dev_attrs_12[] = { +/* 2.0 values */ +static NVM_DEV_ATTR_20_RO(groups); +static NVM_DEV_ATTR_20_RO(punits); +static NVM_DEV_ATTR_20_RO(chunks); +static NVM_DEV_ATTR_20_RO(clba); +static NVM_DEV_ATTR_20_RO(ws_min); +static NVM_DEV_ATTR_20_RO(ws_opt); +static NVM_DEV_ATTR_20_RO(maxoc); +static NVM_DEV_ATTR_20_RO(maxocpu); +static NVM_DEV_ATTR_20_RO(mw_cunits); +static NVM_DEV_ATTR_20_RO(write_typ); +static NVM_DEV_ATTR_20_RO(write_max); +static NVM_DEV_ATTR_20_RO(reset_typ); +static NVM_DEV_ATTR_20_RO(reset_max); + +static struct attribute *nvm_dev_attrs[] = { + /* version agnostic attrs */ &dev_attr_version.attr, &dev_attr_capabilities.attr, + &dev_attr_read_typ.attr, + &dev_attr_read_max.attr, + /* 1.2 attrs */ &dev_attr_vendor_opcode.attr, &dev_attr_device_mode.attr, &dev_attr_media_manager.attr, @@ -1208,8 +1230,6 @@ static struct attribute *nvm_dev_attrs_12[] = { &dev_attr_page_size.attr, &dev_attr_hw_sector_size.attr, &dev_attr_oob_sector_size.attr, - &dev_attr_read_typ.attr, - &dev_attr_read_max.attr, &dev_attr_prog_typ.attr, &dev_attr_prog_max.attr, &dev_attr_erase_typ.attr, @@ -1218,33 +1238,7 @@ static struct attribute *nvm_dev_attrs_12[] = { &dev_attr_media_capabilities.attr, &dev_attr_max_phys_secs.attr, - NULL, -}; - -static const struct attribute_group nvm_dev_attr_group_12 = { - .name = "lightnvm", - .attrs = nvm_dev_attrs_12, -}; - -/* 2.0 values */ -static NVM_DEV_ATTR_20_RO(groups); -static NVM_DEV_ATTR_20_RO(punits); -static NVM_DEV_ATTR_20_RO(chunks); -static NVM_DEV_ATTR_20_RO(clba); -static NVM_DEV_ATTR_20_RO(ws_min); -static NVM_DEV_ATTR_20_RO(ws_opt); -static NVM_DEV_ATTR_20_RO(maxoc); -static NVM_DEV_ATTR_20_RO(maxocpu); -static NVM_DEV_ATTR_20_RO(mw_cunits); -static NVM_DEV_ATTR_20_RO(write_typ); -static NVM_DEV_ATTR_20_RO(write_max); -static NVM_DEV_ATTR_20_RO(reset_typ); -static NVM_DEV_ATTR_20_RO(reset_max); - -static struct attribute *nvm_dev_attrs_20[] = { - &dev_attr_version.attr, - &dev_attr_capabilities.attr, - + /* 2.0 attrs */ &dev_attr_groups.attr, &dev_attr_punits.attr, &dev_attr_chunks.attr, @@ -1255,8 +1249,6 @@ static struct attribute *nvm_dev_attrs_20[] = { &dev_attr_maxocpu.attr, &dev_attr_mw_cunits.attr, - &dev_attr_read_typ.attr, - &dev_attr_read_max.attr, &dev_attr_write_typ.attr, &dev_attr_write_max.attr, &dev_attr_reset_typ.attr, @@ -1265,44 +1257,38 @@ static struct attribute *nvm_dev_attrs_20[] = { NULL, }; -static const struct attribute_group nvm_dev_attr_group_20 = { - .name = "lightnvm", - .attrs = nvm_dev_attrs_20, -}; - -int nvme_nvm_register_sysfs(struct nvme_ns *ns) +static umode_t nvm_dev_attrs_visible(struct kobject *kobj, + struct attribute *attr, int index) { + struct device *dev = container_of(kobj, struct device, kobj); + struct gendisk *disk = dev_to_disk(dev); + struct nvme_ns *ns = disk->private_data; struct nvm_dev *ndev = ns->ndev; - struct nvm_geo *geo = &ndev->geo; + struct device_attribute *dev_attr = + container_of(attr, typeof(*dev_attr), attr); if (!ndev) - return -EINVAL; - - switch (geo->major_ver_id) { - case 1: - return sysfs_create_group(&disk_to_dev(ns->disk)->kobj, - &nvm_dev_attr_group_12); - case 2: - return sysfs_create_group(&disk_to_dev(ns->disk)->kobj, - &nvm_dev_attr_group_20); - } - - return -EINVAL; -} + return 0; -void nvme_nvm_unregister_sysfs(struct nvme_ns *ns) -{ - struct nvm_dev *ndev = ns->ndev; - struct nvm_geo *geo = &ndev->geo; + if (dev_attr->show == nvm_dev_attr_show) + return attr->mode; - switch (geo->major_ver_id) { + switch (ndev->geo.major_ver_id) { case 1: - sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, - &nvm_dev_attr_group_12); + if (dev_attr->show == nvm_dev_attr_show_12) + return attr->mode; break; case 2: - sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, - &nvm_dev_attr_group_20); + if (dev_attr->show == nvm_dev_attr_show_20) + return attr->mode; break; } + + return 0; } + +const struct attribute_group nvme_nvm_attr_group = { + .name = "lightnvm", + .attrs = nvm_dev_attrs, + .is_visible = nvm_dev_attrs_visible, +}; diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 9fe3fff818b8a42281b30bcd3bba83c0e0dd36f8..267395704a3b7771b552287c6111e1f5449a3384 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -20,9 +20,34 @@ module_param(multipath, bool, 0444); MODULE_PARM_DESC(multipath, "turn on native support for multiple controllers per subsystem"); -inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) +void nvme_mpath_unfreeze(struct nvme_subsystem *subsys) { - return multipath && ctrl->subsys && (ctrl->subsys->cmic & (1 << 3)); + struct nvme_ns_head *h; + + lockdep_assert_held(&subsys->lock); + list_for_each_entry(h, &subsys->nsheads, entry) + if (h->disk) + blk_mq_unfreeze_queue(h->disk->queue); +} + +void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys) +{ + struct nvme_ns_head *h; + + lockdep_assert_held(&subsys->lock); + list_for_each_entry(h, &subsys->nsheads, entry) + if (h->disk) + blk_mq_freeze_queue_wait(h->disk->queue); +} + +void nvme_mpath_start_freeze(struct nvme_subsystem *subsys) +{ + struct nvme_ns_head *h; + + lockdep_assert_held(&subsys->lock); + list_for_each_entry(h, &subsys->nsheads, entry) + if (h->disk) + blk_freeze_queue_start(h->disk->queue); } /* @@ -39,7 +64,7 @@ void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance); } else if (ns->head->disk) { sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance, - ctrl->cntlid, ns->head->instance); + ctrl->instance, ns->head->instance); *flags = GENHD_FL_HIDDEN; } else { sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance, @@ -47,17 +72,12 @@ void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, } } -void nvme_failover_req(struct request *req) +bool nvme_failover_req(struct request *req) { struct nvme_ns *ns = req->q->queuedata; u16 status = nvme_req(req)->status; unsigned long flags; - spin_lock_irqsave(&ns->head->requeue_lock, flags); - blk_steal_bios(&ns->head->requeue_list, req); - spin_unlock_irqrestore(&ns->head->requeue_lock, flags); - blk_mq_end_request(req, 0); - switch (status & 0x7ff) { case NVME_SC_ANA_TRANSITION: case NVME_SC_ANA_INACCESSIBLE: @@ -77,16 +97,26 @@ void nvme_failover_req(struct request *req) queue_work(nvme_wq, &ns->ctrl->ana_work); } break; - default: + case NVME_SC_HOST_PATH_ERROR: + case NVME_SC_HOST_ABORTED_CMD: /* - * Reset the controller for any non-ANA error as we don't know - * what caused the error. + * Temporary transport disruption in talking to the controller. + * Try to send on a new path. */ - nvme_reset_ctrl(ns->ctrl); + nvme_mpath_clear_current_path(ns); break; + default: + /* This was a non-ANA error so follow the normal error path. */ + return false; } + spin_lock_irqsave(&ns->head->requeue_lock, flags); + blk_steal_bios(&ns->head->requeue_list, req); + spin_unlock_irqrestore(&ns->head->requeue_lock, flags); + blk_mq_end_request(req, 0); + kblockd_schedule_work(&ns->head->requeue_work); + return true; } void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) @@ -110,6 +140,19 @@ static const char *nvme_ana_state_names[] = { [NVME_ANA_CHANGE] = "change", }; +void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl) +{ + struct nvme_ns *ns; + + mutex_lock(&ctrl->scan_lock); + down_read(&ctrl->namespaces_rwsem); + list_for_each_entry(ns, &ctrl->namespaces, list) + if (nvme_mpath_clear_current_path(ns)) + kblockd_schedule_work(&ns->head->requeue_work); + up_read(&ctrl->namespaces_rwsem); + mutex_unlock(&ctrl->scan_lock); +} + static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head) { struct nvme_ns *ns, *fallback = NULL; @@ -150,6 +193,27 @@ inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head) return ns; } +static bool nvme_available_path(struct nvme_ns_head *head) +{ + struct nvme_ns *ns; + + list_for_each_entry_rcu(ns, &head->list, siblings) { + if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, + &nvme_ctrl_to_plus(ns->ctrl)->flags)) + continue; + switch (ns->ctrl->state) { + case NVME_CTRL_LIVE: + case NVME_CTRL_RESETTING: + case NVME_CTRL_CONNECTING: + /* fallthru */ + return true; + default: + break; + } + } + return false; +} + static blk_qc_t nvme_ns_head_make_request(struct request_queue *q, struct bio *bio) { @@ -159,6 +223,14 @@ static blk_qc_t nvme_ns_head_make_request(struct request_queue *q, blk_qc_t ret = BLK_QC_T_NONE; int srcu_idx; + /* + * The namespace might be going away and the bio might + * be moved to a different queue via blk_steal_bios(), + * so we need to use the bio_split pool from the original + * queue to allocate the bvecs from. + */ + blk_queue_split(q, &bio); + srcu_idx = srcu_read_lock(&head->srcu); ns = nvme_find_path(head); if (likely(ns)) { @@ -168,14 +240,14 @@ static blk_qc_t nvme_ns_head_make_request(struct request_queue *q, disk_devt(ns->head->disk), bio->bi_iter.bi_sector); ret = direct_make_request(bio); - } else if (!list_empty_careful(&head->list)) { - dev_warn_ratelimited(dev, "no path available - requeuing I/O\n"); + } else if (nvme_available_path(head)) { + dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n"); spin_lock_irq(&head->requeue_lock); bio_list_add(&head->requeue_list, bio); spin_unlock_irq(&head->requeue_lock); } else { - dev_warn_ratelimited(dev, "no path - failing I/O\n"); + dev_warn_ratelimited(dev, "no available path - failing I/O\n"); bio->bi_status = BLK_STS_IOERR; bio_endio(bio); @@ -250,6 +322,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) blk_queue_flag_set(QUEUE_FLAG_NONROT, q); /* set to a default value for 512 until disk is validated */ blk_queue_logical_block_size(q, 512); + blk_set_stacking_limits(&q->limits); /* we need to propagate up the VMC settings */ if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) @@ -277,20 +350,17 @@ static void nvme_mpath_set_live(struct nvme_ns *ns) { struct nvme_ns_head *head = ns->head; - lockdep_assert_held(&ns->head->lock); - if (!head->disk) return; - if (!(head->disk->flags & GENHD_FL_UP)) { + if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { + WARN_ON(disk_to_dev(head->disk)->groups); + disk_to_dev(head->disk)->groups = nvme_ns_id_attr_groups; device_add_disk(&head->subsys->dev, head->disk); - if (sysfs_create_group(&disk_to_dev(head->disk)->kobj, - &nvme_ns_id_attr_group)) - dev_warn(&head->subsys->dev, - "failed to create id group.\n"); } - kblockd_schedule_work(&ns->head->requeue_work); + synchronize_srcu(&head->srcu); + kblockd_schedule_work(&head->requeue_work); } static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data, @@ -305,8 +375,14 @@ static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data, for (i = 0; i < le16_to_cpu(ctrl->ana_log_buf->ngrps); i++) { struct nvme_ana_group_desc *desc = base + offset; - u32 nr_nsids = le32_to_cpu(desc->nnsids); - size_t nsid_buf_size = nr_nsids * sizeof(__le32); + u32 nr_nsids; + size_t nsid_buf_size; + + if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc))) + return -EINVAL; + + nr_nsids = le32_to_cpu(desc->nnsids); + nsid_buf_size = nr_nsids * sizeof(__le32); if (WARN_ON_ONCE(desc->grpid == 0)) return -EINVAL; @@ -326,8 +402,6 @@ static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data, return error; offset += nsid_buf_size; - if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc))) - return -EINVAL; } return 0; @@ -341,17 +415,12 @@ static inline bool nvme_state_is_live(enum nvme_ana_state state) static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc, struct nvme_ns *ns) { - enum nvme_ana_state old; - - mutex_lock(&ns->head->lock); - old = ns->ana_state; ns->ana_grpid = le32_to_cpu(desc->grpid); ns->ana_state = desc->state; clear_bit(NVME_NS_ANA_PENDING, &ns->flags); - if (nvme_state_is_live(ns->ana_state) && !nvme_state_is_live(old)) + if (nvme_state_is_live(ns->ana_state)) nvme_mpath_set_live(ns); - mutex_unlock(&ns->head->lock); } static int nvme_update_ana_state(struct nvme_ctrl *ctrl, @@ -371,16 +440,18 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl, if (!nr_nsids) return 0; - down_write(&ctrl->namespaces_rwsem); + down_read(&ctrl->namespaces_rwsem); list_for_each_entry(ns, &ctrl->namespaces, list) { - if (ns->head->ns_id != le32_to_cpu(desc->nsids[n])) + unsigned nsid = le32_to_cpu(desc->nsids[n]); + + if (ns->head->ns_id < nsid) continue; - nvme_update_ns_ana_state(desc, ns); + if (ns->head->ns_id == nsid) + nvme_update_ns_ana_state(desc, ns); if (++n == nr_nsids) break; } - up_write(&ctrl->namespaces_rwsem); - WARN_ON_ONCE(n < nr_nsids); + up_read(&ctrl->namespaces_rwsem); return 0; } @@ -462,31 +533,37 @@ static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr, } DEVICE_ATTR_RO(ana_state); -static int nvme_set_ns_ana_state(struct nvme_ctrl *ctrl, +static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *desc, void *data) { - struct nvme_ns *ns = data; + struct nvme_ana_group_desc *dst = data; - if (ns->ana_grpid == le32_to_cpu(desc->grpid)) { - nvme_update_ns_ana_state(desc, ns); - return -ENXIO; /* just break out of the loop */ - } + if (desc->grpid != dst->grpid) + return 0; - return 0; + *dst = *desc; + return -ENXIO; /* just break out of the loop */ } void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id) { if (nvme_ctrl_use_ana(ns->ctrl)) { + struct nvme_ana_group_desc desc = { + .grpid = id->anagrpid, + .state = 0, + }; + mutex_lock(&ns->ctrl->ana_lock); ns->ana_grpid = le32_to_cpu(id->anagrpid); - nvme_parse_ana_log(ns->ctrl, ns, nvme_set_ns_ana_state); + nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc); mutex_unlock(&ns->ctrl->ana_lock); + if (desc.state) { + /* found the group desc: update */ + nvme_update_ns_ana_state(&desc, ns); + } } else { - mutex_lock(&ns->head->lock); ns->ana_state = NVME_ANA_OPTIMIZED; nvme_mpath_set_live(ns); - mutex_unlock(&ns->head->lock); } } @@ -494,24 +571,39 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head) { if (!head->disk) return; - if (head->disk->flags & GENHD_FL_UP) { - sysfs_remove_group(&disk_to_dev(head->disk)->kobj, - &nvme_ns_id_attr_group); + if (head->disk->flags & GENHD_FL_UP) del_gendisk(head->disk); - } blk_set_queue_dying(head->disk->queue); /* make sure all pending bios are cleaned up */ kblockd_schedule_work(&head->requeue_work); flush_work(&head->requeue_work); blk_cleanup_queue(head->disk->queue); + if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { + /* + * if device_add_disk wasn't called, prevent + * disk release to put a bogus reference on the + * request queue + */ + head->disk->queue = NULL; + } put_disk(head->disk); } -int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) +void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl) { - int error; + mutex_init(&ctrl->ana_lock); + timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0); + INIT_WORK(&ctrl->ana_work, nvme_ana_work); +} - if (!nvme_ctrl_use_ana(ctrl)) +int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) +{ + size_t max_transfer_size = ctrl->max_hw_sectors << SECTOR_SHIFT; + size_t ana_log_size; + int error = 0; + + /* check if multipath is enabled and we have the capability */ + if (!multipath || !ctrl->subsys || !(ctrl->subsys->cmic & (1 << 3))) return 0; ctrl->anacap = id->anacap; @@ -519,41 +611,39 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) ctrl->nanagrpid = le32_to_cpu(id->nanagrpid); ctrl->anagrpmax = le32_to_cpu(id->anagrpmax); - mutex_init(&ctrl->ana_lock); - timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0); - ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) + - ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc); - if (!(ctrl->anacap & (1 << 6))) - ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32); - - if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) { + ana_log_size = sizeof(struct nvme_ana_rsp_hdr) + + ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc) + + ctrl->max_namespaces * sizeof(__le32); + if (ana_log_size > max_transfer_size) { dev_err(ctrl->device, - "ANA log page size (%zd) larger than MDTS (%d).\n", - ctrl->ana_log_size, - ctrl->max_hw_sectors << SECTOR_SHIFT); + "ANA log page size (%zd) larger than MDTS (%zd).\n", + ana_log_size, max_transfer_size); dev_err(ctrl->device, "disabling ANA support.\n"); - return 0; + goto out_uninit; } - INIT_WORK(&ctrl->ana_work, nvme_ana_work); - ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL); - if (!ctrl->ana_log_buf) { - error = -ENOMEM; - goto out; + if (ana_log_size > ctrl->ana_log_size) { + nvme_mpath_stop(ctrl); + kfree(ctrl->ana_log_buf); + ctrl->ana_log_buf = kmalloc(ana_log_size, GFP_KERNEL); + if (!ctrl->ana_log_buf) + return -ENOMEM; } - error = nvme_read_ana_log(ctrl, true); + ctrl->ana_log_size = ana_log_size; + error = nvme_read_ana_log(ctrl, false); if (error) - goto out_free_ana_log_buf; + goto out_uninit; return 0; -out_free_ana_log_buf: - kfree(ctrl->ana_log_buf); -out: + +out_uninit: + nvme_mpath_uninit(ctrl); return error; } void nvme_mpath_uninit(struct nvme_ctrl *ctrl) { kfree(ctrl->ana_log_buf); + ctrl->ana_log_buf = NULL; } diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index bb4a2003c0978722230a1e875ade13dc8c28311f..afe212063af257807987a1963df0734ba7e81202 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -90,6 +90,13 @@ enum nvme_quirks { * Set MEDIUM priority on SQ creation */ NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7), + + /* + * The controller doesn't handle the Identify Namespace + * Identification Descriptor list subcommand despite claiming + * NVMe 1.3 compliance. + */ + NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15), }; /* @@ -148,6 +155,7 @@ struct nvme_ctrl { enum nvme_ctrl_state state; bool identified; spinlock_t lock; + struct mutex scan_lock; const struct nvme_ctrl_ops *ops; struct request_queue *admin_q; struct request_queue *connect_q; @@ -179,6 +187,7 @@ struct nvme_ctrl { u32 page_size; u32 max_hw_sectors; u32 max_segments; + u16 crdt[3]; u16 oncs; u16 oacs; u16 nssa; @@ -237,8 +246,22 @@ struct nvme_ctrl { u16 maxcmd; int nr_reconnects; struct nvmf_ctrl_options *opts; + + struct page *discard_page; + unsigned long discard_page_busy; }; +#define NVME_CTRL_FAILFAST_EXPIRED 0 +struct nvme_ctrl_plus { + struct nvme_ctrl ctrl; + unsigned long flags; + struct delayed_work failfast_work; +}; + +#define nvme_ctrl_to_plus(t) \ + container_of(t, struct nvme_ctrl_plus, ctrl) + + struct nvme_subsystem { int instance; struct device dev; @@ -284,6 +307,8 @@ struct nvme_ns_head { spinlock_t requeue_lock; struct work_struct requeue_work; struct mutex lock; + unsigned long flags; +#define NVME_NSHEAD_DISK_LIVE 0 #endif struct list_head list; struct srcu_struct srcu; @@ -432,6 +457,7 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, void nvme_stop_queues(struct nvme_ctrl *ctrl); void nvme_start_queues(struct nvme_ctrl *ctrl); void nvme_kill_queues(struct nvme_ctrl *ctrl); +void nvme_sync_io_queues(struct nvme_ctrl *ctrl); void nvme_unfreeze(struct nvme_ctrl *ctrl); void nvme_wait_freeze(struct nvme_ctrl *ctrl); void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout); @@ -459,29 +485,41 @@ int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl); int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, void *log, size_t size, u64 offset); -extern const struct attribute_group nvme_ns_id_attr_group; +extern const struct attribute_group *nvme_ns_id_attr_groups[]; extern const struct block_device_operations nvme_ns_head_ops; #ifdef CONFIG_NVME_MULTIPATH -bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl); +static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) +{ + return ctrl->ana_log_buf != NULL; +} + +void nvme_mpath_unfreeze(struct nvme_subsystem *subsys); +void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys); +void nvme_mpath_start_freeze(struct nvme_subsystem *subsys); void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, struct nvme_ctrl *ctrl, int *flags); -void nvme_failover_req(struct request *req); +bool nvme_failover_req(struct request *req); void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id); void nvme_mpath_remove_disk(struct nvme_ns_head *head); -int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id); +int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id); +void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl); void nvme_mpath_uninit(struct nvme_ctrl *ctrl); void nvme_mpath_stop(struct nvme_ctrl *ctrl); -static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) +static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns) { struct nvme_ns_head *head = ns->head; - if (head && ns == rcu_access_pointer(head->current_path)) + if (head && ns == rcu_access_pointer(head->current_path)) { rcu_assign_pointer(head->current_path, NULL); + return true; + } + return false; } +void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl); struct nvme_ns *nvme_find_path(struct nvme_ns_head *head); static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) @@ -492,6 +530,16 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) kblockd_schedule_work(&head->requeue_work); } +static inline void nvme_mpath_update_disk_size(struct gendisk *disk) +{ + struct block_device *bdev = bdget_disk(disk, 0); + + if (bdev) { + bd_set_size(bdev, get_capacity(disk) << SECTOR_SHIFT); + bdput(bdev); + } +} + extern struct device_attribute dev_attr_ana_grpid; extern struct device_attribute dev_attr_ana_state; @@ -510,8 +558,9 @@ static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance); } -static inline void nvme_failover_req(struct request *req) +static inline bool nvme_failover_req(struct request *req) { + return false; } static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) { @@ -528,15 +577,25 @@ static inline void nvme_mpath_add_disk(struct nvme_ns *ns, static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head) { } -static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) +static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns) +{ + return false; +} +static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl) { } static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) { } -static inline int nvme_mpath_init(struct nvme_ctrl *ctrl, +static inline void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl) +{ +} +static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) { + if (ctrl->subsys->cmic & (1 << 3)) + dev_warn(ctrl->device, +"Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n"); return 0; } static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl) @@ -545,14 +604,25 @@ static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl) static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl) { } +static inline void nvme_mpath_unfreeze(struct nvme_subsystem *subsys) +{ +} +static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys) +{ +} +static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys) +{ +} +static inline void nvme_mpath_update_disk_size(struct gendisk *disk) +{ +} #endif /* CONFIG_NVME_MULTIPATH */ #ifdef CONFIG_NVM void nvme_nvm_update_nvm_info(struct nvme_ns *ns); int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node); void nvme_nvm_unregister(struct nvme_ns *ns); -int nvme_nvm_register_sysfs(struct nvme_ns *ns); -void nvme_nvm_unregister_sysfs(struct nvme_ns *ns); +extern const struct attribute_group nvme_nvm_attr_group; int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg); #else static inline void nvme_nvm_update_nvm_info(struct nvme_ns *ns) {}; @@ -563,11 +633,6 @@ static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, } static inline void nvme_nvm_unregister(struct nvme_ns *ns) {}; -static inline int nvme_nvm_register_sysfs(struct nvme_ns *ns) -{ - return 0; -} -static inline void nvme_nvm_unregister_sysfs(struct nvme_ns *ns) {}; static inline int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg) { diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index d668682f91dfdb3428e02a44df2c8ade9ccf0042..85d10d69afe8485fb8ba43d3b7907bf52632fc7c 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -66,10 +66,10 @@ MODULE_PARM_DESC(sgl_threshold, static int io_queue_depth_set(const char *val, const struct kernel_param *kp); static const struct kernel_param_ops io_queue_depth_ops = { .set = io_queue_depth_set, - .get = param_get_int, + .get = param_get_uint, }; -static int io_queue_depth = 1024; +static unsigned int io_queue_depth = 1024; module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644); MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2"); @@ -77,6 +77,7 @@ struct nvme_dev; struct nvme_queue; static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); +static void nvme_disable_io_queues(struct nvme_dev *dev); /* * Represents an NVM Express device. Each nvme_dev is a PCI function. @@ -92,7 +93,7 @@ struct nvme_dev { unsigned online_queues; unsigned max_qid; unsigned int num_vecs; - int q_depth; + u32 q_depth; u32 db_stride; void __iomem *bar; unsigned long bar_mapped_size; @@ -104,7 +105,10 @@ struct nvme_dev { u64 cmb_size; u32 cmbsz; u32 cmbloc; - struct nvme_ctrl ctrl; + union { + struct nvme_ctrl ctrl; + struct nvme_ctrl_plus ctrl_plus; + }; struct completion ioq_wait; mempool_t *iod_mempool; @@ -125,13 +129,14 @@ struct nvme_dev { static int io_queue_depth_set(const char *val, const struct kernel_param *kp) { - int n = 0, ret; + int ret; + u32 n; - ret = kstrtoint(val, 10, &n); + ret = kstrtou32(val, 10, &n); if (ret != 0 || n < 2) return -EINVAL; - return param_set_int(val, kp); + return param_set_uint(val, kp); } static inline unsigned int sq_idx(unsigned int qid, u32 stride) @@ -165,13 +170,15 @@ struct nvme_queue { dma_addr_t sq_dma_addr; dma_addr_t cq_dma_addr; u32 __iomem *q_db; - u16 q_depth; - s16 cq_vector; + u32 q_depth; + u16 cq_vector; u16 sq_tail; u16 cq_head; u16 last_cq_head; u16 qid; u8 cq_phase; + unsigned long flags; +#define NVMEQ_ENABLED 0 u32 *dbbuf_sq_db; u32 *dbbuf_cq_db; u32 *dbbuf_sq_ei; @@ -276,9 +283,21 @@ static void nvme_dbbuf_init(struct nvme_dev *dev, nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)]; } +static void nvme_dbbuf_free(struct nvme_queue *nvmeq) +{ + if (!nvmeq->qid) + return; + + nvmeq->dbbuf_sq_db = NULL; + nvmeq->dbbuf_cq_db = NULL; + nvmeq->dbbuf_sq_ei = NULL; + nvmeq->dbbuf_cq_ei = NULL; +} + static void nvme_dbbuf_set(struct nvme_dev *dev) { struct nvme_command c; + unsigned int i; if (!dev->dbbuf_dbs) return; @@ -292,6 +311,9 @@ static void nvme_dbbuf_set(struct nvme_dev *dev) dev_warn(dev->ctrl.device, "unable to set dbbuf\n"); /* Free memory and continue on */ nvme_dbbuf_dma_free(dev); + + for (i = 1; i <= dev->online_queues; i++) + nvme_dbbuf_free(&dev->queues[i]); } } @@ -817,7 +839,7 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, * We should not need to do this, but we're still using this to * ensure we can drain requests on a dying queue. */ - if (unlikely(nvmeq->cq_vector < 0)) + if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) return BLK_STS_IOERR; ret = nvme_setup_cmd(ns, req, &cmnd); @@ -873,13 +895,6 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) volatile struct nvme_completion *cqe = &nvmeq->cqes[idx]; struct request *req; - if (unlikely(cqe->command_id >= nvmeq->q_depth)) { - dev_warn(nvmeq->dev->ctrl.device, - "invalid id %d completed on queue %d\n", - cqe->command_id, le16_to_cpu(cqe->sq_id)); - return; - } - /* * AEN requests are special as they don't time out and can * survive any kind of queue freeze and often don't respond to @@ -894,6 +909,13 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) } req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id); + if (unlikely(!req)) { + dev_warn(nvmeq->dev->ctrl.device, + "invalid id %d completed on queue %d\n", + cqe->command_id, le16_to_cpu(cqe->sq_id)); + return; + } + nvme_end_request(req, cqe->status, cqe->result); } @@ -908,9 +930,11 @@ static void nvme_complete_cqes(struct nvme_queue *nvmeq, u16 start, u16 end) static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) { - if (++nvmeq->cq_head == nvmeq->q_depth) { + if (nvmeq->cq_head == nvmeq->q_depth - 1) { nvmeq->cq_head = 0; nvmeq->cq_phase = !nvmeq->cq_phase; + } else { + ++nvmeq->cq_head; } } @@ -1167,13 +1191,16 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) */ switch (dev->ctrl.state) { case NVME_CTRL_CONNECTING: - case NVME_CTRL_RESETTING: + nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); + case NVME_CTRL_DELETING: dev_warn_ratelimited(dev->ctrl.device, "I/O %d QID %d timeout, disable controller\n", req->tag, nvmeq->qid); - nvme_dev_disable(dev, false); nvme_req(req)->flags |= NVME_REQ_CANCELLED; + nvme_dev_disable(dev, true); return BLK_EH_DONE; + case NVME_CTRL_RESETTING: + return BLK_EH_RESET_TIMER; default: break; } @@ -1187,10 +1214,10 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) dev_warn(dev->ctrl.device, "I/O %d QID %d timeout, reset controller\n", req->tag, nvmeq->qid); + nvme_req(req)->flags |= NVME_REQ_CANCELLED; nvme_dev_disable(dev, false); nvme_reset_ctrl(&dev->ctrl); - nvme_req(req)->flags |= NVME_REQ_CANCELLED; return BLK_EH_DONE; } @@ -1253,30 +1280,25 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest) */ static int nvme_suspend_queue(struct nvme_queue *nvmeq) { - int vector; - - spin_lock_irq(&nvmeq->cq_lock); - if (nvmeq->cq_vector == -1) { - spin_unlock_irq(&nvmeq->cq_lock); + if (!test_and_clear_bit(NVMEQ_ENABLED, &nvmeq->flags)) return 1; - } - vector = nvmeq->cq_vector; - nvmeq->dev->online_queues--; - nvmeq->cq_vector = -1; - spin_unlock_irq(&nvmeq->cq_lock); - /* - * Ensure that nvme_queue_rq() sees it ->cq_vector == -1 without - * having to grab the lock. - */ + /* ensure that nvme_queue_rq() sees NVMEQ_ENABLED cleared */ mb(); + nvmeq->dev->online_queues--; if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q) blk_mq_quiesce_queue(nvmeq->dev->ctrl.admin_q); + pci_free_irq(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector, nvmeq); + return 0; +} - pci_free_irq(to_pci_dev(nvmeq->dev->dev), vector, nvmeq); +static void nvme_suspend_io_queues(struct nvme_dev *dev) +{ + int i; - return 0; + for (i = dev->ctrl.queue_count - 1; i > 0; i--) + nvme_suspend_queue(&dev->queues[i]); } static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown) @@ -1358,7 +1380,6 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth) nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; nvmeq->q_depth = depth; nvmeq->qid = qid; - nvmeq->cq_vector = -1; dev->ctrl.queue_count++; return 0; @@ -1403,7 +1424,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) { struct nvme_dev *dev = nvmeq->dev; int result; - s16 vector; + u16 vector = 0; if (dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { unsigned offset = (qid - 1) * roundup(SQ_SIZE(nvmeq->q_depth), @@ -1438,10 +1459,10 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) if (result < 0) goto release_sq; + set_bit(NVMEQ_ENABLED, &nvmeq->flags); return result; release_sq: - nvmeq->cq_vector = -1; dev->online_queues--; adapter_delete_sq(dev, qid); release_cq: @@ -1502,6 +1523,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev) dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset); if (IS_ERR(dev->ctrl.admin_q)) { blk_mq_free_tag_set(&dev->admin_tagset); + dev->ctrl.admin_q = NULL; return -ENOMEM; } if (!blk_get_queue(dev->ctrl.admin_q)) { @@ -1582,10 +1604,11 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev) nvme_init_queue(nvmeq, 0); result = queue_request_irq(nvmeq); if (result) { - nvmeq->cq_vector = -1; + dev->online_queues--; return result; } + set_bit(NVMEQ_ENABLED, &nvmeq->flags); return result; } @@ -1647,6 +1670,9 @@ static void nvme_map_cmb(struct nvme_dev *dev) struct pci_dev *pdev = to_pci_dev(dev->dev); int bar; + if (dev->cmb_size) + return; + dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); if (!dev->cmbsz) return; @@ -1727,8 +1753,9 @@ static void nvme_free_host_mem(struct nvme_dev *dev) struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i]; size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size; - dma_free_coherent(dev->dev, size, dev->host_mem_desc_bufs[i], - le64_to_cpu(desc->addr)); + dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i], + le64_to_cpu(desc->addr), + DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); } kfree(dev->host_mem_desc_bufs); @@ -1794,8 +1821,9 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, while (--i >= 0) { size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size; - dma_free_coherent(dev->dev, size, bufs[i], - le64_to_cpu(descs[i].addr)); + dma_free_attrs(dev->dev, size, bufs[i], + le64_to_cpu(descs[i].addr), + DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); } kfree(bufs); @@ -1889,6 +1917,8 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) if (nr_io_queues == 0) return 0; + clear_bit(NVMEQ_ENABLED, &adminq->flags); + if (dev->cmb && (dev->cmbsz & NVME_CMBSZ_SQS)) { result = nvme_cmb_qdepth(dev, nr_io_queues, sizeof(struct nvme_command)); @@ -1908,6 +1938,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) } while (1); adminq->q_db = dev->dbs; + retry: /* Deregister the admin queue's interrupt */ pci_free_irq(pdev, 0, adminq); @@ -1929,13 +1960,23 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) * path to scale better, even if the receive path is limited by the * number of interrupts. */ - result = queue_request_irq(adminq); - if (result) { - adminq->cq_vector = -1; + if (result) + return result; + set_bit(NVMEQ_ENABLED, &adminq->flags); + + result = nvme_create_io_queues(dev); + if (result || dev->online_queues < 2) return result; + + if (dev->online_queues - 1 < dev->max_qid) { + nr_io_queues = dev->online_queues - 1; + nvme_disable_io_queues(dev); + nvme_suspend_io_queues(dev); + goto retry; } - return nvme_create_io_queues(dev); + + return 0; } static void nvme_del_queue_end(struct request *req, blk_status_t error) @@ -2026,8 +2067,8 @@ static int nvme_dev_add(struct nvme_dev *dev) dev->tagset.nr_hw_queues = dev->online_queues - 1; dev->tagset.timeout = NVME_IO_TIMEOUT; dev->tagset.numa_node = dev_to_node(dev->dev); - dev->tagset.queue_depth = - min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1; + dev->tagset.queue_depth = min_t(unsigned int, dev->q_depth, + BLK_MQ_MAX_DEPTH) - 1; dev->tagset.cmd_size = nvme_pci_cmd_size(dev, false); if ((dev->ctrl.sgls & ((1 << 0) | (1 << 1))) && sgl_threshold) { dev->tagset.cmd_size = max(dev->tagset.cmd_size, @@ -2085,7 +2126,7 @@ static int nvme_pci_enable(struct nvme_dev *dev) dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP); - dev->q_depth = min_t(int, NVME_CAP_MQES(dev->ctrl.cap) + 1, + dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1, io_queue_depth); dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap); dev->dbs = dev->bar + 4096; @@ -2129,7 +2170,6 @@ static void nvme_pci_disable(struct nvme_dev *dev) { struct pci_dev *pdev = to_pci_dev(dev->dev); - nvme_release_cmb(dev); pci_free_irq_vectors(pdev); if (pci_is_enabled(pdev)) { @@ -2140,8 +2180,7 @@ static void nvme_pci_disable(struct nvme_dev *dev) static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) { - int i; - bool dead = true; + bool dead = true, freeze = false; struct pci_dev *pdev = to_pci_dev(dev->dev); mutex_lock(&dev->shutdown_lock); @@ -2149,8 +2188,10 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) u32 csts = readl(dev->bar + NVME_REG_CSTS); if (dev->ctrl.state == NVME_CTRL_LIVE || - dev->ctrl.state == NVME_CTRL_RESETTING) + dev->ctrl.state == NVME_CTRL_RESETTING) { + freeze = true; nvme_start_freeze(&dev->ctrl); + } dead = !!((csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY) || pdev->error_state != pci_channel_io_normal); } @@ -2159,10 +2200,8 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) * Give the controller a chance to complete all entered requests if * doing a safe shutdown. */ - if (!dead) { - if (shutdown) - nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT); - } + if (!dead && shutdown && freeze) + nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT); nvme_stop_queues(&dev->ctrl); @@ -2170,9 +2209,9 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) nvme_disable_io_queues(dev); nvme_disable_admin_queue(dev, shutdown); } - for (i = dev->ctrl.queue_count - 1; i >= 0; i--) - nvme_suspend_queue(&dev->queues[i]); + nvme_suspend_io_queues(dev); + nvme_suspend_queue(&dev->queues[0]); nvme_pci_disable(dev); blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl); @@ -2183,8 +2222,11 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) * must flush all entered requests to their failed completion to avoid * deadlocking blk-mq hot-cpu notifier. */ - if (shutdown) + if (shutdown) { nvme_start_queues(&dev->ctrl); + if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) + blk_mq_unquiesce_queue(dev->ctrl.admin_q); + } mutex_unlock(&dev->shutdown_lock); } @@ -2243,11 +2285,15 @@ static void nvme_reset_work(struct work_struct *work) struct nvme_dev *dev = container_of(work, struct nvme_dev, ctrl.reset_work); bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); - int result = -ENODEV; + int result; enum nvme_ctrl_state new_state = NVME_CTRL_LIVE; - if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) + if (dev->ctrl.state != NVME_CTRL_RESETTING) { + dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n", + dev->ctrl.state); + result = -ENODEV; goto out; + } /* * If we're called to reset a live controller first shut it down before @@ -2256,27 +2302,18 @@ static void nvme_reset_work(struct work_struct *work) if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) nvme_dev_disable(dev, false); - /* - * Introduce CONNECTING state from nvme-fc/rdma transports to mark the - * initializing procedure here. - */ - if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { - dev_warn(dev->ctrl.device, - "failed to mark controller CONNECTING\n"); - goto out; - } - + mutex_lock(&dev->shutdown_lock); result = nvme_pci_enable(dev); if (result) - goto out; + goto out_unlock; result = nvme_pci_configure_admin_queue(dev); if (result) - goto out; + goto out_unlock; result = nvme_alloc_admin_tags(dev); if (result) - goto out; + goto out_unlock; /* * Limit the max command size to prevent iod->sg allocations going @@ -2284,6 +2321,18 @@ static void nvme_reset_work(struct work_struct *work) */ dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1; dev->ctrl.max_segments = NVME_MAX_SEGS; + mutex_unlock(&dev->shutdown_lock); + + /* + * Introduce CONNECTING state from nvme-fc/rdma transports to mark the + * initializing procedure here. + */ + if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { + dev_warn(dev->ctrl.device, + "failed to mark controller CONNECTING\n"); + result = -EBUSY; + goto out; + } result = nvme_init_identify(&dev->ctrl); if (result) @@ -2342,12 +2391,15 @@ static void nvme_reset_work(struct work_struct *work) if (!nvme_change_ctrl_state(&dev->ctrl, new_state)) { dev_warn(dev->ctrl.device, "failed to mark controller state %d\n", new_state); + result = -ENODEV; goto out; } nvme_start_ctrl(&dev->ctrl); return; + out_unlock: + mutex_unlock(&dev->shutdown_lock); out: nvme_remove_dead_ctrl(dev, result); } @@ -2376,7 +2428,7 @@ static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) { - *val = readq(to_nvme_dev(ctrl)->bar + off); + *val = lo_hi_readq(to_nvme_dev(ctrl)->bar + off); return 0; } @@ -2450,7 +2502,7 @@ static void nvme_async_probe(void *data, async_cookie_t cookie) { struct nvme_dev *dev = data; - nvme_reset_ctrl_sync(&dev->ctrl); + flush_work(&dev->ctrl.reset_work); flush_work(&dev->ctrl.scan_work); nvme_put_ctrl(&dev->ctrl); } @@ -2517,7 +2569,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); - nvme_get_ctrl(&dev->ctrl); + nvme_reset_ctrl(&dev->ctrl); async_schedule(nvme_async_probe, dev); return 0; @@ -2564,8 +2616,6 @@ static void nvme_remove(struct pci_dev *pdev) struct nvme_dev *dev = pci_get_drvdata(pdev); nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); - - cancel_work_sync(&dev->ctrl.reset_work); pci_set_drvdata(pdev, NULL); if (!pci_device_is_present(pdev)) { @@ -2577,13 +2627,13 @@ static void nvme_remove(struct pci_dev *pdev) nvme_stop_ctrl(&dev->ctrl); nvme_remove_namespaces(&dev->ctrl); nvme_dev_disable(dev, true); + nvme_release_cmb(dev); nvme_free_host_mem(dev); nvme_dev_remove_admin(dev); nvme_free_queues(dev, 0); - nvme_uninit_ctrl(&dev->ctrl); nvme_release_prp_pools(dev); nvme_dev_unmap(dev); - nvme_put_ctrl(&dev->ctrl); + nvme_uninit_ctrl(&dev->ctrl); } #ifdef CONFIG_PM_SLEEP @@ -2678,6 +2728,8 @@ static const struct pci_device_id nvme_id_table[] = { NVME_QUIRK_MEDIUM_PRIO_SQ }, { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ .driver_data = NVME_QUIRK_IDENTIFY_CNS, }, + { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */ + .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, }, { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index dc042017c293adc77e0517efb80fd13157b8ac23..574c5e0182cf0b8f30a26dc2a3c6de52d384d4a3 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -93,6 +93,7 @@ struct nvme_rdma_queue { struct rdma_cm_id *cm_id; int cm_error; struct completion cm_done; + struct mutex queue_lock; }; struct nvme_rdma_ctrl { @@ -117,7 +118,10 @@ struct nvme_rdma_ctrl { struct sockaddr_storage addr; struct sockaddr_storage src_addr; - struct nvme_ctrl ctrl; + union { + struct nvme_ctrl ctrl; + struct nvme_ctrl_plus ctrl_plus; + }; bool use_inline_data; }; @@ -141,6 +145,10 @@ static bool register_always = true; module_param(register_always, bool, 0444); MODULE_PARM_DESC(register_always, "Use memory registration even for contiguous memory regions"); +static bool enable_inline_data = true; +module_param(enable_inline_data, bool, 0644); +MODULE_PARM_DESC(enable_inline_data, + "global switch for inline data when use rdma transport"); static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, struct rdma_cm_event *event); @@ -184,6 +192,7 @@ static int nvme_rdma_alloc_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe, qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir); if (ib_dma_mapping_error(ibdev, qe->dma)) { kfree(qe->data); + qe->data = NULL; return -ENOMEM; } @@ -212,6 +221,11 @@ static struct nvme_rdma_qe *nvme_rdma_alloc_ring(struct ib_device *ibdev, if (!ring) return NULL; + /* + * Bind the CQEs (post recv buffers) DMA mapping to the RDMA queue + * lifetime. It's safe, since any chage in the underlying RDMA device + * will issue error recovery and queue re-creation. + */ for (i = 0; i < ib_queue_size; i++) { if (nvme_rdma_alloc_qe(ibdev, &ring[i], capsule_size, dir)) goto out_free_ring; @@ -234,7 +248,7 @@ static void nvme_rdma_qp_event(struct ib_event *event, void *context) static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue) { wait_for_completion_interruptible_timeout(&queue->cm_done, - msecs_to_jiffies(NVME_RDMA_CONNECT_TIMEOUT_MS) + 1); + msecs_to_jiffies(2 * NVME_RDMA_CONNECT_TIMEOUT_MS)); return queue->cm_error; } @@ -266,14 +280,8 @@ static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor) static void nvme_rdma_exit_request(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx) { - struct nvme_rdma_ctrl *ctrl = set->driver_data; struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); - int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; - struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx]; - struct nvme_rdma_device *dev = queue->device; - - nvme_rdma_free_qe(dev->dev, &req->sqe, sizeof(struct nvme_command), - DMA_TO_DEVICE); + kfree(req->sqe.data); } static int nvme_rdma_init_request(struct blk_mq_tag_set *set, @@ -284,15 +292,11 @@ static int nvme_rdma_init_request(struct blk_mq_tag_set *set, struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx]; - struct nvme_rdma_device *dev = queue->device; - struct ib_device *ibdev = dev->dev; - int ret; nvme_req(rq)->ctrl = &ctrl->ctrl; - ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct nvme_command), - DMA_TO_DEVICE); - if (ret) - return ret; + req->sqe.data = kzalloc(sizeof(struct nvme_command), GFP_KERNEL); + if (!req->sqe.data) + return -ENOMEM; req->queue = queue; @@ -503,6 +507,7 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl, int ret; queue = &ctrl->queues[idx]; + mutex_init(&queue->queue_lock); queue->ctrl = ctrl; init_completion(&queue->cm_done); @@ -518,7 +523,8 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl, if (IS_ERR(queue->cm_id)) { dev_info(ctrl->ctrl.device, "failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id)); - return PTR_ERR(queue->cm_id); + ret = PTR_ERR(queue->cm_id); + goto out_destroy_mutex; } if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR) @@ -548,25 +554,33 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl, out_destroy_cm_id: rdma_destroy_id(queue->cm_id); nvme_rdma_destroy_queue_ib(queue); +out_destroy_mutex: + mutex_destroy(&queue->queue_lock); return ret; } -static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) +static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) { - if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags)) - return; - rdma_disconnect(queue->cm_id); ib_drain_qp(queue->qp); } +static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) +{ + mutex_lock(&queue->queue_lock); + if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags)) + __nvme_rdma_stop_queue(queue); + mutex_unlock(&queue->queue_lock); +} + static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue) { if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) return; - nvme_rdma_destroy_queue_ib(queue); rdma_destroy_id(queue->cm_id); + nvme_rdma_destroy_queue_ib(queue); + mutex_destroy(&queue->queue_lock); } static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl) @@ -587,6 +601,7 @@ static void nvme_rdma_stop_io_queues(struct nvme_rdma_ctrl *ctrl) static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx) { + struct nvme_rdma_queue *queue = &ctrl->queues[idx]; int ret; if (idx) @@ -594,11 +609,14 @@ static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx) else ret = nvmf_connect_admin_queue(&ctrl->ctrl); - if (!ret) - set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[idx].flags); - else + if (!ret) { + set_bit(NVME_RDMA_Q_LIVE, &queue->flags); + } else { + if (test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) + __nvme_rdma_stop_queue(queue); dev_info(ctrl->ctrl.device, "failed to connect queue: %d ret=%d\n", idx, ret); + } return ret; } @@ -641,10 +659,13 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl) if (ret) return ret; - ctrl->ctrl.queue_count = nr_io_queues + 1; - if (ctrl->ctrl.queue_count < 2) - return 0; + if (nr_io_queues == 0) { + dev_err(ctrl->ctrl.device, + "unable to set any I/O queues\n"); + return -ENOMEM; + } + ctrl->ctrl.queue_count = nr_io_queues + 1; dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues); @@ -664,15 +685,6 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl) return ret; } -static void nvme_rdma_free_tagset(struct nvme_ctrl *nctrl, - struct blk_mq_tag_set *set) -{ - struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); - - blk_mq_free_tag_set(set); - nvme_rdma_dev_put(ctrl->device); -} - static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl, bool admin) { @@ -710,24 +722,9 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl, ret = blk_mq_alloc_tag_set(set); if (ret) - goto out; - - /* - * We need a reference on the device as long as the tag_set is alive, - * as the MRs in the request structures need a valid ib_device. - */ - ret = nvme_rdma_dev_get(ctrl->device); - if (!ret) { - ret = -EINVAL; - goto out_free_tagset; - } + return ERR_PTR(ret); return set; - -out_free_tagset: - blk_mq_free_tag_set(set); -out: - return ERR_PTR(ret); } static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl, @@ -735,9 +732,10 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl, { if (remove) { blk_cleanup_queue(ctrl->ctrl.admin_q); - nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset); + blk_mq_free_tag_set(ctrl->ctrl.admin_tagset); } if (ctrl->async_event_sqe.data) { + cancel_work_sync(&ctrl->ctrl.async_event_work); nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, sizeof(struct nvme_command), DMA_TO_DEVICE); ctrl->async_event_sqe.data = NULL; @@ -757,6 +755,11 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, ctrl->device = ctrl->queues[0].device; ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev); + /* + * Bind the async event SQE DMA mapping to the admin queue lifetime. + * It's safe, since any chage in the underlying RDMA device will issue + * error recovery and queue re-creation. + */ error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe, sizeof(struct nvme_command), DMA_TO_DEVICE); @@ -812,10 +815,11 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, blk_cleanup_queue(ctrl->ctrl.admin_q); out_free_tagset: if (new) - nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset); + blk_mq_free_tag_set(ctrl->ctrl.admin_tagset); out_free_async_qe: nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, sizeof(struct nvme_command), DMA_TO_DEVICE); + ctrl->async_event_sqe.data = NULL; out_free_queue: nvme_rdma_free_queue(&ctrl->queues[0]); return error; @@ -826,7 +830,7 @@ static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl, { if (remove) { blk_cleanup_queue(ctrl->ctrl.connect_q); - nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.tagset); + blk_mq_free_tag_set(ctrl->ctrl.tagset); } nvme_rdma_free_io_queues(ctrl); } @@ -867,7 +871,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) blk_cleanup_queue(ctrl->ctrl.connect_q); out_free_tag_set: if (new) - nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.tagset); + blk_mq_free_tag_set(ctrl->ctrl.tagset); out_free_io_queues: nvme_rdma_free_io_queues(ctrl); return ret; @@ -877,9 +881,13 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl, bool remove) { blk_mq_quiesce_queue(ctrl->ctrl.admin_q); + blk_sync_queue(ctrl->ctrl.admin_q); nvme_rdma_stop_queue(&ctrl->queues[0]); - blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, nvme_cancel_request, - &ctrl->ctrl); + if (ctrl->ctrl.admin_tagset) { + blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset, + nvme_cancel_request, &ctrl->ctrl); + blk_mq_tagset_wait_completed_request(ctrl->ctrl.admin_tagset); + } blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); nvme_rdma_destroy_admin_queue(ctrl, remove); } @@ -889,9 +897,13 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, { if (ctrl->ctrl.queue_count > 1) { nvme_stop_queues(&ctrl->ctrl); + nvme_sync_io_queues(&ctrl->ctrl); nvme_rdma_stop_io_queues(ctrl); - blk_mq_tagset_busy_iter(&ctrl->tag_set, nvme_cancel_request, - &ctrl->ctrl); + if (ctrl->ctrl.tagset) { + blk_mq_tagset_busy_iter(ctrl->ctrl.tagset, + nvme_cancel_request, &ctrl->ctrl); + blk_mq_tagset_wait_completed_request(ctrl->ctrl.tagset); + } if (remove) nvme_start_queues(&ctrl->ctrl); nvme_rdma_destroy_io_queues(ctrl, remove); @@ -1033,6 +1045,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work) struct nvme_rdma_ctrl, err_work); nvme_stop_keep_alive(&ctrl->ctrl); + flush_work(&ctrl->ctrl.async_event_work); nvme_rdma_teardown_io_queues(ctrl, false); nvme_start_queues(&ctrl->ctrl); nvme_rdma_teardown_admin_queue(ctrl, false); @@ -1051,6 +1064,7 @@ static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl) if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING)) return; + dev_warn(ctrl->ctrl.device, "starting error recovery\n"); queue_work(nvme_wq, &ctrl->err_work); } @@ -1257,7 +1271,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, if (count <= dev->num_inline_segments) { if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) && - queue->ctrl->use_inline_data && + queue->ctrl->use_inline_data && enable_inline_data && blk_rq_payload_bytes(rq) <= nvme_rdma_inline_data_size(queue)) { ret = nvme_rdma_map_sg_inline(queue, req, c, count); @@ -1423,10 +1437,11 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue, req->result = cqe->result; if (wc->wc_flags & IB_WC_WITH_INVALIDATE) { - if (unlikely(wc->ex.invalidate_rkey != req->mr->rkey)) { + if (unlikely(!req->mr || + wc->ex.invalidate_rkey != req->mr->rkey)) { dev_err(queue->ctrl->ctrl.device, "Bogus remote invalidation for rkey %#x\n", - req->mr->rkey); + req->mr ? req->mr->rkey : 0); nvme_rdma_error_recovery(queue->ctrl); } } else if (req->mr) { @@ -1496,14 +1511,10 @@ static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue) for (i = 0; i < queue->queue_size; i++) { ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]); if (ret) - goto out_destroy_queue_ib; + return ret; } return 0; - -out_destroy_queue_ib: - nvme_rdma_destroy_queue_ib(queue); - return ret; } static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue, @@ -1595,14 +1606,10 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue) if (ret) { dev_err(ctrl->ctrl.device, "rdma_connect failed (%d).\n", ret); - goto out_destroy_queue_ib; + return ret; } return 0; - -out_destroy_queue_ib: - nvme_rdma_destroy_queue_ib(queue); - return ret; } static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, @@ -1628,14 +1635,11 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, complete(&queue->cm_done); return 0; case RDMA_CM_EVENT_REJECTED: - nvme_rdma_destroy_queue_ib(queue); cm_error = nvme_rdma_conn_rejected(queue, ev); break; case RDMA_CM_EVENT_ROUTE_ERROR: case RDMA_CM_EVENT_CONNECT_ERROR: case RDMA_CM_EVENT_UNREACHABLE: - nvme_rdma_destroy_queue_ib(queue); - /* fall through */ case RDMA_CM_EVENT_ADDR_ERROR: dev_dbg(queue->ctrl->ctrl.device, "CM error event %d\n", ev->event); @@ -1666,22 +1670,52 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, return 0; } +static void nvme_rdma_complete_timed_out(struct request *rq) +{ + struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); + struct nvme_rdma_queue *queue = req->queue; + + nvme_rdma_stop_queue(queue); + if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) { + nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD; + blk_mq_complete_request(rq); + } +} + static enum blk_eh_timer_return nvme_rdma_timeout(struct request *rq, bool reserved) { struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); + struct nvme_rdma_queue *queue = req->queue; + struct nvme_rdma_ctrl *ctrl = queue->ctrl; - dev_warn(req->queue->ctrl->ctrl.device, - "I/O %d QID %d timeout, reset controller\n", - rq->tag, nvme_rdma_queue_idx(req->queue)); - - /* queue error recovery */ - nvme_rdma_error_recovery(req->queue->ctrl); + dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n", + rq->tag, nvme_rdma_queue_idx(queue)); - /* fail with DNR on cmd timeout */ - nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR; + if (ctrl->ctrl.state != NVME_CTRL_LIVE) { + /* + * If we are resetting, connecting or deleting we should + * complete immediately because we may block controller + * teardown or setup sequence + * - ctrl disable/shutdown fabrics requests + * - connect requests + * - initialization admin requests + * - I/O requests that entered after unquiescing and + * the controller stopped responding + * + * All other requests should be cancelled by the error + * recovery work, so it's fine that we fail it here. + */ + nvme_rdma_complete_timed_out(rq); + return BLK_EH_DONE; + } - return BLK_EH_DONE; + /* + * LIVE state should trigger the normal error recovery which will + * handle completing this request. + */ + nvme_rdma_error_recovery(ctrl); + return BLK_EH_RESET_TIMER; } static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, @@ -1704,12 +1738,20 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq); dev = queue->device->dev; + + req->sqe.dma = ib_dma_map_single(dev, req->sqe.data, + sizeof(struct nvme_command), + DMA_TO_DEVICE); + err = ib_dma_mapping_error(dev, req->sqe.dma); + if (unlikely(err)) + return BLK_STS_RESOURCE; + ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(struct nvme_command), DMA_TO_DEVICE); ret = nvme_setup_cmd(ns, rq, c); if (ret) - return ret; + goto unmap_qe; blk_mq_start_request(rq); @@ -1736,8 +1778,13 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, return BLK_STS_OK; err: if (err == -ENOMEM || err == -EAGAIN) - return BLK_STS_RESOURCE; - return BLK_STS_IOERR; + ret = BLK_STS_RESOURCE; + else + ret = BLK_STS_IOERR; +unmap_qe: + ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command), + DMA_TO_DEVICE); + return ret; } static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag) @@ -1764,8 +1811,12 @@ static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag) static void nvme_rdma_complete_rq(struct request *rq) { struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); + struct nvme_rdma_queue *queue = req->queue; + struct ib_device *ibdev = queue->device->dev; - nvme_rdma_unmap_data(req->queue, rq); + nvme_rdma_unmap_data(queue, rq); + ib_dma_unmap_single(ibdev, req->sqe.dma, sizeof(struct nvme_command), + DMA_TO_DEVICE); nvme_complete_rq(rq); } @@ -1997,8 +2048,6 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n", ctrl->ctrl.opts->subsysnqn, &ctrl->addr); - nvme_get_ctrl(&ctrl->ctrl); - mutex_lock(&nvme_rdma_ctrl_mutex); list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list); mutex_unlock(&nvme_rdma_ctrl_mutex); @@ -2083,8 +2132,16 @@ static int __init nvme_rdma_init_module(void) static void __exit nvme_rdma_cleanup_module(void) { + struct nvme_rdma_ctrl *ctrl; + nvmf_unregister_transport(&nvme_rdma_transport); ib_unregister_client(&nvme_rdma_ib_client); + + mutex_lock(&nvme_rdma_ctrl_mutex); + list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) + nvme_delete_ctrl(&ctrl->ctrl); + mutex_unlock(&nvme_rdma_ctrl_mutex); + flush_workqueue(nvme_delete_wq); } module_init(nvme_rdma_init_module); diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 2008fa62a373bb9ba84775e39b5b8956dd25bca6..a8eb8784e151fb38b0da838199bf19275dcb953e 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -68,9 +68,11 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req, goto out; host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]); - data_units_read = part_stat_read(ns->bdev->bd_part, sectors[READ]); + data_units_read = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part, + sectors[READ]), 1000); host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]); - data_units_written = part_stat_read(ns->bdev->bd_part, sectors[WRITE]); + data_units_written = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part, + sectors[WRITE]), 1000); put_unaligned_le64(host_reads, &slog->host_reads[0]); put_unaligned_le64(data_units_read, &slog->data_units_read[0]); @@ -98,11 +100,11 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req, if (!ns->bdev) continue; host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]); - data_units_read += - part_stat_read(ns->bdev->bd_part, sectors[READ]); + data_units_read += DIV_ROUND_UP( + part_stat_read(ns->bdev->bd_part, sectors[READ]), 1000); host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]); - data_units_written += - part_stat_read(ns->bdev->bd_part, sectors[WRITE]); + data_units_written += DIV_ROUND_UP( + part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000); } rcu_read_unlock(); diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index b5ec96abd04870209ed7ea97452180cf6cb63038..4a2ab093811e9f92e6905336195e357f842dfade 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -498,6 +498,7 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status) { u32 old_sqhd, new_sqhd; u16 sqhd; + struct nvmet_ns *ns = req->ns; if (status) nvmet_set_status(req, status); @@ -514,9 +515,9 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status) req->rsp->sq_id = cpu_to_le16(req->sq->qid); req->rsp->command_id = req->cmd->common.command_id; - if (req->ns) - nvmet_put_namespace(req->ns); req->ops->queue_response(req); + if (ns) + nvmet_put_namespace(ns); } void nvmet_req_complete(struct nvmet_req *req, u16 status) @@ -921,6 +922,15 @@ bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys, return __nvmet_host_allowed(subsys, hostnqn); } +static void nvmet_fatal_error_handler(struct work_struct *work) +{ + struct nvmet_ctrl *ctrl = + container_of(work, struct nvmet_ctrl, fatal_err_work); + + pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid); + ctrl->ops->delete_ctrl(ctrl); +} + u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp) { @@ -962,6 +972,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work); INIT_LIST_HEAD(&ctrl->async_events); + INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE); memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE); @@ -1076,21 +1087,11 @@ void nvmet_ctrl_put(struct nvmet_ctrl *ctrl) kref_put(&ctrl->ref, nvmet_ctrl_free); } -static void nvmet_fatal_error_handler(struct work_struct *work) -{ - struct nvmet_ctrl *ctrl = - container_of(work, struct nvmet_ctrl, fatal_err_work); - - pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid); - ctrl->ops->delete_ctrl(ctrl); -} - void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl) { mutex_lock(&ctrl->lock); if (!(ctrl->csts & NVME_CSTS_CFS)) { ctrl->csts |= NVME_CSTS_CFS; - INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); schedule_work(&ctrl->fatal_err_work); } mutex_unlock(&ctrl->lock); diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c index d84ae004cb85cb96df5e26b9799ffcc047f71aad..637160deb3acab35d03f0fa6ef3c59da23ee9b5c 100644 --- a/drivers/nvme/target/fabrics-cmd.c +++ b/drivers/nvme/target/fabrics-cmd.c @@ -153,6 +153,8 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) goto out; } + d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0'; + d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0'; status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req, le32_to_cpu(c->kato), &ctrl); if (status) @@ -203,6 +205,8 @@ static void nvmet_execute_io_connect(struct nvmet_req *req) goto out; } + d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0'; + d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0'; status = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn, le16_to_cpu(d->cntlid), req, &ctrl); diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index 5251689a1d9ac2e5a5852c724f7f54d2ec80801c..f0536d341f2f2a347c54216c971e824d8472648a 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -648,6 +648,7 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport, break; /* Fall-Thru to RSP handling */ + /* FALLTHRU */ case NVMET_FCOP_RSP: if (fcpreq) { @@ -824,6 +825,7 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport) #define FCLOOP_DMABOUND_4G 0xFFFFFFFF static struct nvme_fc_port_template fctemplate = { + .module = THIS_MODULE, .localport_delete = fcloop_localport_delete, .remoteport_delete = fcloop_remoteport_delete, .create_queue = fcloop_create_queue, diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index 7bc9f624043296c2bd71d625b6a7ec36d9319015..1096dd01ca228813a16cca4ee808e74507a09f77 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -239,6 +239,7 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req) return 0; case nvme_cmd_write_zeroes: req->execute = nvmet_bdev_execute_write_zeroes; + req->data_len = 0; return 0; default: pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode, diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c index 81a9dc5290a8744b3f022aec8338098986b23967..ad6263cf7303cf38312b2a03ac59d07e3823f11f 100644 --- a/drivers/nvme/target/io-cmd-file.c +++ b/drivers/nvme/target/io-cmd-file.c @@ -38,9 +38,11 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns) ns->file = filp_open(ns->device_path, flags, 0); if (IS_ERR(ns->file)) { - pr_err("failed to open file %s: (%ld)\n", - ns->device_path, PTR_ERR(ns->file)); - return PTR_ERR(ns->file); + ret = PTR_ERR(ns->file); + pr_err("failed to open file %s: (%d)\n", + ns->device_path, ret); + ns->file = NULL; + return ret; } ret = vfs_getattr(&ns->file->f_path, @@ -246,7 +248,8 @@ static void nvmet_file_execute_discard(struct nvmet_req *req) break; offset = le64_to_cpu(range.slba) << req->ns->blksize_shift; - len = le32_to_cpu(range.nlb) << req->ns->blksize_shift; + len = le32_to_cpu(range.nlb); + len <<= req->ns->blksize_shift; if (offset + len > req->ns->size) { ret = NVME_SC_LBA_RANGE | NVME_SC_DNR; break; diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 9908082b32c4b42647085f18bd5dcdf1e0184652..c7dcc5a58b2587e4dd395cf784ae58f54a7425d2 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -42,7 +42,10 @@ struct nvme_loop_ctrl { struct list_head list; struct blk_mq_tag_set tag_set; struct nvme_loop_iod async_event_iod; - struct nvme_ctrl ctrl; + union { + struct nvme_ctrl ctrl; + struct nvme_ctrl_plus ctrl_plus; + }; struct nvmet_ctrl *target_ctrl; struct nvmet_port *port; @@ -274,7 +277,8 @@ static const struct blk_mq_ops nvme_loop_admin_mq_ops = { static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) { - clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags); + if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags)) + return; nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); blk_cleanup_queue(ctrl->ctrl.admin_q); blk_mq_free_tag_set(&ctrl->admin_tag_set); @@ -309,6 +313,7 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl) clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags); nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); } + ctrl->ctrl.queue_count = 1; } static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl) @@ -417,6 +422,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) return 0; out_cleanup_queue: + clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags); blk_cleanup_queue(ctrl->ctrl.admin_q); out_free_tagset: blk_mq_free_tag_set(&ctrl->admin_tag_set); @@ -506,7 +512,6 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work) out_disable: dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); nvme_uninit_ctrl(&ctrl->ctrl); - nvme_put_ctrl(&ctrl->ctrl); } static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = { @@ -602,8 +607,10 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev, ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops, 0 /* no quirks, we're perfect! */); - if (ret) - goto out_put_ctrl; + if (ret) { + kfree(ctrl); + goto out; + } ret = -ENOMEM; @@ -639,8 +646,6 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev, dev_info(ctrl->ctrl.device, "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn); - nvme_get_ctrl(&ctrl->ctrl); - changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); WARN_ON_ONCE(!changed); @@ -658,8 +663,8 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev, kfree(ctrl->queues); out_uninit_ctrl: nvme_uninit_ctrl(&ctrl->ctrl); -out_put_ctrl: nvme_put_ctrl(&ctrl->ctrl); +out: if (ret > 0) ret = -EIO; return ERR_PTR(ret); @@ -678,6 +683,14 @@ static void nvme_loop_remove_port(struct nvmet_port *port) mutex_lock(&nvme_loop_ports_mutex); list_del_init(&port->entry); mutex_unlock(&nvme_loop_ports_mutex); + + /* + * Ensure any ctrls that are in the process of being + * deleted are in fact deleted before we return + * and free the port. This is to prevent active + * ctrls from using a port after it's freed. + */ + flush_workqueue(nvme_delete_wq); } static const struct nvmet_fabrics_ops nvme_loop_ops = { diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index bfc4da660bb4036c9d53764793824f5befb3edb2..08f997a390d5da6e49c711b53cf3ea560424f1f7 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -139,6 +139,10 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc); static void nvmet_rdma_qp_event(struct ib_event *event, void *priv); static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); +static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev, + struct nvmet_rdma_rsp *r); +static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, + struct nvmet_rdma_rsp *r); static const struct nvmet_fabrics_ops nvmet_rdma_ops; @@ -182,9 +186,17 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) spin_unlock_irqrestore(&queue->rsps_lock, flags); if (unlikely(!rsp)) { - rsp = kmalloc(sizeof(*rsp), GFP_KERNEL); + int ret; + + rsp = kzalloc(sizeof(*rsp), GFP_KERNEL); if (unlikely(!rsp)) return NULL; + ret = nvmet_rdma_alloc_rsp(queue->dev, rsp); + if (unlikely(ret)) { + kfree(rsp); + return NULL; + } + rsp->allocated = true; } @@ -196,7 +208,8 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp) { unsigned long flags; - if (rsp->allocated) { + if (unlikely(rsp->allocated)) { + nvmet_rdma_free_rsp(rsp->queue->dev, rsp); kfree(rsp); return; } @@ -529,6 +542,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) { struct nvmet_rdma_rsp *rsp = container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe); + struct nvmet_rdma_queue *queue = cq->cq_context; nvmet_rdma_release_rsp(rsp); @@ -536,7 +550,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) wc->status != IB_WC_WR_FLUSH_ERR)) { pr_err("SEND for CQE 0x%p failed with status %s (%d).\n", wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); - nvmet_rdma_error_comp(rsp->queue); + nvmet_rdma_error_comp(queue); } } diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index aa1657831b70c2ca5075d1b580c32564c29b6a42..2e77d49c2657e80d5e5328c83e9b91c843d10519 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -415,10 +415,17 @@ static int nvmem_setup_compat(struct nvmem_device *nvmem, if (!config->base_dev) return -EINVAL; - if (nvmem->read_only) - nvmem->eeprom = bin_attr_ro_root_nvmem; - else - nvmem->eeprom = bin_attr_rw_root_nvmem; + if (nvmem->read_only) { + if (config->root_only) + nvmem->eeprom = bin_attr_ro_root_nvmem; + else + nvmem->eeprom = bin_attr_ro_nvmem; + } else { + if (config->root_only) + nvmem->eeprom = bin_attr_rw_root_nvmem; + else + nvmem->eeprom = bin_attr_rw_nvmem; + } nvmem->eeprom.attr.name = "eeprom"; nvmem->eeprom.size = nvmem->size; #ifdef CONFIG_DEBUG_LOCK_ALLOC @@ -516,11 +523,17 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) goto err_device_del; } - if (config->cells) - nvmem_add_cells(nvmem, config->cells, config->ncells); + if (config->cells) { + rval = nvmem_add_cells(nvmem, config->cells, config->ncells); + if (rval) + goto err_teardown_compat; + } return nvmem; +err_teardown_compat: + if (config->compat) + device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); err_device_del: device_del(&nvmem->dev); err_put_device: @@ -679,7 +692,7 @@ static struct nvmem_device *nvmem_find(const char *name) d = bus_find_device_by_name(&nvmem_bus_type, NULL, name); if (!d) - return NULL; + return ERR_PTR(-ENOENT); return to_nvmem_device(d); } @@ -1022,7 +1035,7 @@ EXPORT_SYMBOL_GPL(nvmem_cell_put); static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf) { u8 *p, *b; - int i, bit_offset = cell->bit_offset; + int i, extra, bit_offset = cell->bit_offset; p = b = buf; if (bit_offset) { @@ -1037,13 +1050,19 @@ static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf) p = b; *b++ >>= bit_offset; } - - /* result fits in less bytes */ - if (cell->bytes != DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE)) - *p-- = 0; + } else { + /* point to the msb */ + p += cell->bytes - 1; } + + /* result fits in less bytes */ + extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE); + while (--extra >= 0) + *p-- = 0; + /* clear msb bits if any leftover in the last byte */ - *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0); + if (cell->nbits % BITS_PER_BYTE) + *p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0); } static int __nvmem_cell_read(struct nvmem_device *nvmem, diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c index afb429a417fe04e6d23347006a4967e4c01b3184..926d9cc080cf4db3112e6614fcc584a9f581adb5 100644 --- a/drivers/nvmem/imx-ocotp.c +++ b/drivers/nvmem/imx-ocotp.c @@ -466,6 +466,10 @@ static int imx_ocotp_probe(struct platform_device *pdev) if (IS_ERR(priv->clk)) return PTR_ERR(priv->clk); + clk_prepare_enable(priv->clk); + imx_ocotp_clr_err_if_set(priv->base); + clk_disable_unprepare(priv->clk); + priv->params = of_device_get_match_data(&pdev->dev); imx_ocotp_nvmem_config.size = 4 * priv->params->nregs; imx_ocotp_nvmem_config.dev = dev; diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c index d020f89248fd76a7baca320bb6a4238a71d785c0..69f8e972e29c69c1a989827079d64c57edfe2732 100644 --- a/drivers/nvmem/sunxi_sid.c +++ b/drivers/nvmem/sunxi_sid.c @@ -235,8 +235,10 @@ static const struct sunxi_sid_cfg sun50i_a64_cfg = { static const struct of_device_id sunxi_sid_of_match[] = { { .compatible = "allwinner,sun4i-a10-sid", .data = &sun4i_a10_cfg }, { .compatible = "allwinner,sun7i-a20-sid", .data = &sun7i_a20_cfg }, + { .compatible = "allwinner,sun8i-a83t-sid", .data = &sun50i_a64_cfg }, { .compatible = "allwinner,sun8i-h3-sid", .data = &sun8i_h3_cfg }, { .compatible = "allwinner,sun50i-a64-sid", .data = &sun50i_a64_cfg }, + { .compatible = "allwinner,sun50i-h5-sid", .data = &sun50i_a64_cfg }, {/* sentinel */}, }; MODULE_DEVICE_TABLE(of, sunxi_sid_of_match); diff --git a/drivers/of/base.c b/drivers/of/base.c index 74eaedd5b860f1c1fd42faa187d5cda802d0ae8e..3925da5690d3eb67207850b13bcfc0d15f55bbba 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -67,6 +67,7 @@ bool of_node_name_eq(const struct device_node *np, const char *name) return (strlen(name) == len) && (strncmp(node_name, name, len) == 0); } +EXPORT_SYMBOL(of_node_name_eq); bool of_node_name_prefix(const struct device_node *np, const char *prefix) { @@ -75,6 +76,7 @@ bool of_node_name_prefix(const struct device_node *np, const char *prefix) return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0; } +EXPORT_SYMBOL(of_node_name_prefix); int of_n_addr_cells(struct device_node *np) { @@ -113,9 +115,6 @@ int __weak of_node_to_nid(struct device_node *np) } #endif -static struct device_node **phandle_cache; -static u32 phandle_cache_mask; - /* * Assumptions behind phandle_cache implementation: * - phandle property values are in a contiguous range of 1..n @@ -124,6 +123,66 @@ static u32 phandle_cache_mask; * - the phandle lookup overhead reduction provided by the cache * will likely be less */ + +static struct device_node **phandle_cache; +static u32 phandle_cache_mask; + +/* + * Caller must hold devtree_lock. + */ +static void __of_free_phandle_cache(void) +{ + u32 cache_entries = phandle_cache_mask + 1; + u32 k; + + if (!phandle_cache) + return; + + for (k = 0; k < cache_entries; k++) + of_node_put(phandle_cache[k]); + + kfree(phandle_cache); + phandle_cache = NULL; +} + +int of_free_phandle_cache(void) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&devtree_lock, flags); + + __of_free_phandle_cache(); + + raw_spin_unlock_irqrestore(&devtree_lock, flags); + + return 0; +} +#if !defined(CONFIG_MODULES) +late_initcall_sync(of_free_phandle_cache); +#endif + +/* + * Caller must hold devtree_lock. + */ +void __of_free_phandle_cache_entry(phandle handle) +{ + phandle masked_handle; + struct device_node *np; + + if (!handle) + return; + + masked_handle = handle & phandle_cache_mask; + + if (phandle_cache) { + np = phandle_cache[masked_handle]; + if (np && handle == np->phandle) { + of_node_put(np); + phandle_cache[masked_handle] = NULL; + } + } +} + void of_populate_phandle_cache(void) { unsigned long flags; @@ -133,8 +192,7 @@ void of_populate_phandle_cache(void) raw_spin_lock_irqsave(&devtree_lock, flags); - kfree(phandle_cache); - phandle_cache = NULL; + __of_free_phandle_cache(); for_each_of_allnodes(np) if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) @@ -152,30 +210,15 @@ void of_populate_phandle_cache(void) goto out; for_each_of_allnodes(np) - if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) + if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) { + of_node_get(np); phandle_cache[np->phandle & phandle_cache_mask] = np; + } out: raw_spin_unlock_irqrestore(&devtree_lock, flags); } -int of_free_phandle_cache(void) -{ - unsigned long flags; - - raw_spin_lock_irqsave(&devtree_lock, flags); - - kfree(phandle_cache); - phandle_cache = NULL; - - raw_spin_unlock_irqrestore(&devtree_lock, flags); - - return 0; -} -#if !defined(CONFIG_MODULES) -late_initcall_sync(of_free_phandle_cache); -#endif - void __init of_core_init(void) { struct device_node *np; @@ -1148,13 +1191,23 @@ struct device_node *of_find_node_by_phandle(phandle handle) if (phandle_cache[masked_handle] && handle == phandle_cache[masked_handle]->phandle) np = phandle_cache[masked_handle]; + if (np && of_node_check_flag(np, OF_DETACHED)) { + WARN_ON(1); /* did not uncache np on node removal */ + of_node_put(np); + phandle_cache[masked_handle] = NULL; + np = NULL; + } } if (!np) { for_each_of_allnodes(np) - if (np->phandle == handle) { - if (phandle_cache) + if (np->phandle == handle && + !of_node_check_flag(np, OF_DETACHED)) { + if (phandle_cache) { + /* will put when removed from cache */ + of_node_get(np); phandle_cache[masked_handle] = np; + } break; } } @@ -1581,6 +1634,7 @@ int of_parse_phandle_with_args_map(const struct device_node *np, out_args->np = new; of_node_put(cur); cur = new; + new = NULL; } put: of_node_put(cur); @@ -2013,7 +2067,7 @@ struct device_node *of_find_next_cache_node(const struct device_node *np) /* OF on pmac has nodes instead of properties named "l2-cache" * beneath CPU nodes. */ - if (!strcmp(np->type, "cpu")) + if (IS_ENABLED(CONFIG_PPC_PMAC) && !strcmp(np->type, "cpu")) for_each_child_of_node(np, child) if (!strcmp(child->type, "cache")) return child; diff --git a/drivers/of/device.c b/drivers/of/device.c index 5957cd4fa262127dad2d360fc4a8a42d3de96187..65235ff63c5dbd04b13dfd02b3acf74433c493ef 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c @@ -149,9 +149,11 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma) * set by the driver. */ mask = DMA_BIT_MASK(ilog2(dma_addr + size - 1) + 1); - dev->bus_dma_mask = mask; dev->coherent_dma_mask &= mask; *dev->dma_mask &= mask; + /* ...but only set bus mask if we found valid dma-ranges earlier */ + if (!ret) + dev->bus_dma_mask = mask; coherent = of_dma_is_coherent(np); dev_dbg(dev, "device is%sdma coherent\n", @@ -219,17 +221,19 @@ static ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len return -ENODEV; /* Name & Type */ - csize = snprintf(str, len, "of:N%sT%s", dev->of_node->name, + /* %p eats all alphanum characters, so %c must be used here */ + csize = snprintf(str, len, "of:N%pOFn%c%s", dev->of_node, 'T', dev->of_node->type); tsize = csize; + if (csize >= len) + csize = len > 0 ? len - 1 : 0; len -= csize; - if (str) - str += csize; + str += csize; of_property_for_each_string(dev->of_node, "compatible", p, compat) { csize = strlen(compat) + 1; tsize += csize; - if (csize > len) + if (csize >= len) continue; csize = snprintf(str, len, "C%s", compat); @@ -255,12 +259,15 @@ int of_device_request_module(struct device *dev) if (size < 0) return size; - str = kmalloc(size + 1, GFP_KERNEL); + /* Reserve an additional byte for the trailing '\0' */ + size++; + + str = kmalloc(size, GFP_KERNEL); if (!str) return -ENOMEM; of_device_get_modalias(dev, str, size); - str[size] = '\0'; + str[size - 1] = '\0'; ret = request_module(str); kfree(str); @@ -298,7 +305,7 @@ void of_device_uevent(struct device *dev, struct kobj_uevent_env *env) if ((!dev) || (!dev->of_node)) return; - add_uevent_var(env, "OF_NAME=%s", dev->of_node->name); + add_uevent_var(env, "OF_NAME=%pOFn", dev->of_node); add_uevent_var(env, "OF_FULLNAME=%pOF", dev->of_node); if (dev->of_node->type && strcmp("", dev->of_node->type) != 0) add_uevent_var(env, "OF_TYPE=%s", dev->of_node->type); diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c index f4f8ed9b5454cb91f618572d4155df0f27c667a5..a09c1c3cf831e70f1dc12102558aa1143c69138b 100644 --- a/drivers/of/dynamic.c +++ b/drivers/of/dynamic.c @@ -205,15 +205,24 @@ static void __of_attach_node(struct device_node *np) const __be32 *phandle; int sz; - np->name = __of_get_property(np, "name", NULL) ? : ""; - np->type = __of_get_property(np, "device_type", NULL) ? : ""; - - phandle = __of_get_property(np, "phandle", &sz); - if (!phandle) - phandle = __of_get_property(np, "linux,phandle", &sz); - if (IS_ENABLED(CONFIG_PPC_PSERIES) && !phandle) - phandle = __of_get_property(np, "ibm,phandle", &sz); - np->phandle = (phandle && (sz >= 4)) ? be32_to_cpup(phandle) : 0; + if (!of_node_check_flag(np, OF_OVERLAY)) { + np->name = __of_get_property(np, "name", NULL); + np->type = __of_get_property(np, "device_type", NULL); + if (!np->name) + np->name = ""; + if (!np->type) + np->type = ""; + + phandle = __of_get_property(np, "phandle", &sz); + if (!phandle) + phandle = __of_get_property(np, "linux,phandle", &sz); + if (IS_ENABLED(CONFIG_PPC_PSERIES) && !phandle) + phandle = __of_get_property(np, "ibm,phandle", &sz); + if (phandle && (sz >= 4)) + np->phandle = be32_to_cpup(phandle); + else + np->phandle = 0; + } np->child = NULL; np->sibling = np->parent->child; @@ -268,13 +277,13 @@ void __of_detach_node(struct device_node *np) } of_node_set_flag(np, OF_DETACHED); + + /* race with of_find_node_by_phandle() prevented by devtree_lock */ + __of_free_phandle_cache_entry(np->phandle); } /** * of_detach_node() - "Unplug" a node from the device tree. - * - * The caller must hold a reference to the node. The memory associated with - * the node is not freed until its refcount goes to zero. */ int of_detach_node(struct device_node *np) { @@ -330,6 +339,25 @@ void of_node_release(struct kobject *kobj) if (!of_node_check_flag(node, OF_DYNAMIC)) return; + if (of_node_check_flag(node, OF_OVERLAY)) { + + if (!of_node_check_flag(node, OF_OVERLAY_FREE_CSET)) { + /* premature refcount of zero, do not free memory */ + pr_err("ERROR: memory leak before free overlay changeset, %pOF\n", + node); + return; + } + + /* + * If node->properties non-empty then properties were added + * to this node either by different overlay that has not + * yet been removed, or by a non-overlay mechanism. + */ + if (node->properties) + pr_err("ERROR: %s(), unexpected properties in %pOF\n", + __func__, node); + } + property_list_free(node->properties); property_list_free(node->deadprops); @@ -434,6 +462,16 @@ struct device_node *__of_node_dup(const struct device_node *np, static void __of_changeset_entry_destroy(struct of_changeset_entry *ce) { + if (ce->action == OF_RECONFIG_ATTACH_NODE && + of_node_check_flag(ce->np, OF_OVERLAY)) { + if (kref_read(&ce->np->kobj.kref) > 1) { + pr_err("ERROR: memory leak, expected refcount 1 instead of %d, of_node_get()/of_node_put() unbalanced - destroy cset entry: attach overlay node %pOF\n", + kref_read(&ce->np->kobj.kref), ce->np); + } else { + of_node_set_flag(ce->np, OF_OVERLAY_FREE_CSET); + } + } + of_node_put(ce->np); list_del(&ce->node); kfree(ce); diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 800ad252cf9c635935f1a0ab75b14ae0b16bc003..a0265c6f280e340db07102e81fba3c4d76df15ed 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -391,7 +391,7 @@ static int unflatten_dt_nodes(const void *blob, for (offset = 0; offset >= 0 && depth >= initial_depth; offset = fdt_next_node(blob, offset, &depth)) { - if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH)) + if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH - 1)) continue; if (!IS_ENABLED(CONFIG_OF_KOBJ) && @@ -923,6 +923,8 @@ static void __init early_init_dt_check_for_initrd(unsigned long node) if (!prop) return; end = of_read_number(prop, len/4); + if (start > end) + return; __early_init_dt_declare_initrd(start, end); diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 02ad93a304a46dd252331791285234191f41ec05..f06c9df60e34d240988effa7bf35acfa1a9ae6b2 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -289,7 +289,8 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar struct device_node *p; const __be32 *addr; u32 intsize; - int i, res; + int i, res, addr_len; + __be32 addr_buf[3] = { 0 }; pr_debug("of_irq_parse_one: dev=%pOF, index=%d\n", device, index); @@ -298,13 +299,19 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar return of_irq_parse_oldworld(device, index, out_irq); /* Get the reg property (if any) */ - addr = of_get_property(device, "reg", NULL); + addr = of_get_property(device, "reg", &addr_len); + + /* Prevent out-of-bounds read in case of longer interrupt parent address size */ + if (addr_len > (3 * sizeof(__be32))) + addr_len = 3 * sizeof(__be32); + if (addr) + memcpy(addr_buf, addr, addr_len); /* Try the new-style interrupts-extended first */ res = of_parse_phandle_with_args(device, "interrupts-extended", "#interrupt-cells", index, out_irq); if (!res) - return of_irq_parse_raw(addr, out_irq); + return of_irq_parse_raw(addr_buf, out_irq); /* Look for the interrupt parent. */ p = of_irq_find_parent(device); @@ -334,7 +341,7 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar /* Check if there are any interrupt-map translations to process */ - res = of_irq_parse_raw(addr, out_irq); + res = of_irq_parse_raw(addr_buf, out_irq); out: of_node_put(p); return res; diff --git a/drivers/of/kobj.c b/drivers/of/kobj.c index 7a0a18980b98b7688ef5637e4fffc4176f493206..c72eef98804175c9d21fc9043c6582aec04bde13 100644 --- a/drivers/of/kobj.c +++ b/drivers/of/kobj.c @@ -133,6 +133,9 @@ int __of_attach_node_sysfs(struct device_node *np) } if (!name) return -ENOMEM; + + of_node_get(np); + rc = kobject_add(&np->kobj, parent, "%s", name); kfree(name); if (rc) @@ -159,6 +162,5 @@ void __of_detach_node_sysfs(struct device_node *np) kobject_del(&np->kobj); } - /* finally remove the kobj_init ref */ of_node_put(np); } diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index e92391d6d1bd6a52de298fc53dedf5902016b005..5ad1342f568252bb8fb6b845f1ae5e0b8e24fb50 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -97,8 +97,8 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, return rc; } - dev_dbg(&mdio->dev, "registered phy %s at address %i\n", - child->name, addr); + dev_dbg(&mdio->dev, "registered phy %pOFn at address %i\n", + child, addr); return 0; } @@ -127,8 +127,8 @@ static int of_mdiobus_register_device(struct mii_bus *mdio, return rc; } - dev_dbg(&mdio->dev, "registered mdio device %s at address %i\n", - child->name, addr); + dev_dbg(&mdio->dev, "registered mdio device %pOFn at address %i\n", + child, addr); return 0; } @@ -263,8 +263,8 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) continue; /* be noisy to encourage people to set reg property */ - dev_info(&mdio->dev, "scan phy %s at address %i\n", - child->name, addr); + dev_info(&mdio->dev, "scan phy %pOFn at address %i\n", + child, addr); if (of_mdiobus_child_is_phy(child)) { rc = of_mdiobus_register_phy(mdio, child, addr); diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c index 27d9b4bba535c21181324d6652129f84ff174083..f5b4522180928f7d600442ad7c0466f430e8412d 100644 --- a/drivers/of/of_numa.c +++ b/drivers/of/of_numa.c @@ -115,9 +115,14 @@ static int __init of_numa_parse_distance_map_v1(struct device_node *map) distance = of_read_number(matrix, 1); matrix++; + if ((nodea == nodeb && distance != LOCAL_DISTANCE) || + (nodea != nodeb && distance <= LOCAL_DISTANCE)) { + pr_err("Invalid distance[node%d -> node%d] = %d\n", + nodea, nodeb, distance); + return -EINVAL; + } + numa_set_distance(nodea, nodeb, distance); - pr_debug("distance[node%d -> node%d] = %d\n", - nodea, nodeb, distance); /* Set default distance of node B->A same as A->B */ if (nodeb > nodea) @@ -163,8 +168,8 @@ int of_node_to_nid(struct device_node *device) np = of_get_next_parent(np); } if (np && r) - pr_warn("Invalid \"numa-node-id\" property in node %s\n", - np->name); + pr_warn("Invalid \"numa-node-id\" property in node %pOFn\n", + np); of_node_put(np); /* diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h index 216175d11d3dc2ca3fdfaa429306ecf50218a01a..f5da842841e531c219f58e9fd0fcae81e7b078d0 100644 --- a/drivers/of/of_private.h +++ b/drivers/of/of_private.h @@ -76,6 +76,10 @@ static inline void __of_detach_node_sysfs(struct device_node *np) {} int of_resolve_phandles(struct device_node *tree); #endif +#if defined(CONFIG_OF_DYNAMIC) +void __of_free_phandle_cache_entry(phandle handle); +#endif + #if defined(CONFIG_OF_OVERLAY) void of_overlay_mutex_lock(void); void of_overlay_mutex_unlock(void); diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index 895c83e0c7b6c4c816b1aad72c67e2a1a5235a88..a3a3180b1561b4b1b5edfad81505201a2d12500e 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c @@ -37,22 +37,15 @@ int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size, * panic()s on allocation failure. */ end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end; - base = __memblock_alloc_base(size, align, end); + base = memblock_find_in_range(start, end, size, align); if (!base) return -ENOMEM; - /* - * Check if the allocated region fits in to start..end window - */ - if (base < start) { - memblock_free(base, size); - return -ENOMEM; - } - *res_base = base; if (nomap) return memblock_remove(base, size); - return 0; + + return memblock_reserve(base, size); } #else int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size, @@ -191,6 +184,7 @@ static int __init __reserved_mem_init_node(struct reserved_mem *rmem) { extern const struct of_device_id __reservedmem_of_table[]; const struct of_device_id *i; + int ret = -ENOENT; for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) { reservedmem_of_init_fn initfn = i->data; @@ -199,13 +193,14 @@ static int __init __reserved_mem_init_node(struct reserved_mem *rmem) if (!of_flat_dt_is_compatible(rmem->fdt_node, compat)) continue; - if (initfn(rmem) == 0) { + ret = initfn(rmem); + if (ret == 0) { pr_info("initialized node %s, compatible id %s\n", rmem->name, compat); - return 0; + break; } } - return -ENOENT; + return ret; } static int __init __rmem_cmp(const void *a, const void *b) @@ -265,7 +260,9 @@ void __init fdt_init_reserved_mem(void) int len; const __be32 *prop; int err = 0; + int nomap; + nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; prop = of_get_flat_dt_prop(node, "phandle", &len); if (!prop) prop = of_get_flat_dt_prop(node, "linux,phandle", &len); @@ -275,8 +272,16 @@ void __init fdt_init_reserved_mem(void) if (rmem->size == 0) err = __reserved_mem_alloc_size(node, rmem->name, &rmem->base, &rmem->size); - if (err == 0) - __reserved_mem_init_node(rmem); + if (err == 0) { + err = __reserved_mem_init_node(rmem); + if (err != 0 && err != -ENOENT) { + pr_info("node %s compatible matching fail\n", + rmem->name); + memblock_free(rmem->base, rmem->size); + if (nomap) + memblock_add(rmem->base, rmem->size); + } + } } } diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index eda57ef12fd057b3d92c750dec983703e85f38e3..b0a765fcef2f16276b08ac775e1375306b2db11f 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c @@ -23,6 +23,26 @@ #include "of_private.h" +/** + * struct target - info about current target node as recursing through overlay + * @np: node where current level of overlay will be applied + * @in_livetree: @np is a node in the live devicetree + * + * Used in the algorithm to create the portion of a changeset that describes + * an overlay fragment, which is a devicetree subtree. Initially @np is a node + * in the live devicetree where the overlay subtree is targeted to be grafted + * into. When recursing to the next level of the overlay subtree, the target + * also recurses to the next level of the live devicetree, as long as overlay + * subtree node also exists in the live devicetree. When a node in the overlay + * subtree does not exist at the same level in the live devicetree, target->np + * points to a newly allocated node, and all subsequent targets in the subtree + * will be newly allocated nodes. + */ +struct target { + struct device_node *np; + bool in_livetree; +}; + /** * struct fragment - info about fragment nodes in overlay expanded device tree * @target: target of the overlay operation @@ -72,8 +92,7 @@ static int devicetree_corrupt(void) } static int build_changeset_next_level(struct overlay_changeset *ovcs, - struct device_node *target_node, - const struct device_node *overlay_node); + struct target *target, const struct device_node *overlay_node); /* * of_resolve_phandles() finds the largest phandle in the live tree. @@ -242,6 +261,8 @@ static struct property *dup_and_fixup_symbol_prop( of_property_set_flag(new_prop, OF_DYNAMIC); + kfree(target_path); + return new_prop; err_free_new_prop: @@ -257,15 +278,23 @@ static struct property *dup_and_fixup_symbol_prop( /** * add_changeset_property() - add @overlay_prop to overlay changeset * @ovcs: overlay changeset - * @target_node: where to place @overlay_prop in live tree + * @target: where @overlay_prop will be placed * @overlay_prop: property to add or update, from overlay tree * @is_symbols_prop: 1 if @overlay_prop is from node "/__symbols__" * - * If @overlay_prop does not already exist in @target_node, add changeset entry - * to add @overlay_prop in @target_node, else add changeset entry to update + * If @overlay_prop does not already exist in live devicetree, add changeset + * entry to add @overlay_prop in @target, else add changeset entry to update * value of @overlay_prop. * - * Some special properties are not updated (no error returned). + * @target may be either in the live devicetree or in a new subtree that + * is contained in the changeset. + * + * Some special properties are not added or updated (no error returned): + * "name", "phandle", "linux,phandle". + * + * Properties "#address-cells" and "#size-cells" are not updated if they + * are already in the live tree, but if present in the live tree, the values + * in the overlay must match the values in the live tree. * * Update of property in symbols node is not allowed. * @@ -273,19 +302,41 @@ static struct property *dup_and_fixup_symbol_prop( * invalid @overlay. */ static int add_changeset_property(struct overlay_changeset *ovcs, - struct device_node *target_node, - struct property *overlay_prop, + struct target *target, struct property *overlay_prop, bool is_symbols_prop) { struct property *new_prop = NULL, *prop; int ret = 0; - prop = of_find_property(target_node, overlay_prop->name, NULL); + if (target->in_livetree) + if (!of_prop_cmp(overlay_prop->name, "name") || + !of_prop_cmp(overlay_prop->name, "phandle") || + !of_prop_cmp(overlay_prop->name, "linux,phandle")) + return 0; - if (!of_prop_cmp(overlay_prop->name, "name") || - !of_prop_cmp(overlay_prop->name, "phandle") || - !of_prop_cmp(overlay_prop->name, "linux,phandle")) - return 0; + if (target->in_livetree) + prop = of_find_property(target->np, overlay_prop->name, NULL); + else + prop = NULL; + + if (prop) { + if (!of_prop_cmp(prop->name, "#address-cells")) { + if (!of_prop_val_eq(prop, overlay_prop)) { + pr_err("ERROR: changing value of #address-cells is not allowed in %pOF\n", + target->np); + ret = -EINVAL; + } + return ret; + + } else if (!of_prop_cmp(prop->name, "#size-cells")) { + if (!of_prop_val_eq(prop, overlay_prop)) { + pr_err("ERROR: changing value of #size-cells is not allowed in %pOF\n", + target->np); + ret = -EINVAL; + } + return ret; + } + } if (is_symbols_prop) { if (prop) @@ -298,12 +349,21 @@ static int add_changeset_property(struct overlay_changeset *ovcs, if (!new_prop) return -ENOMEM; - if (!prop) - ret = of_changeset_add_property(&ovcs->cset, target_node, + if (!prop) { + if (!target->in_livetree) { + new_prop->next = target->np->deadprops; + target->np->deadprops = new_prop; + } + ret = of_changeset_add_property(&ovcs->cset, target->np, new_prop); - else - ret = of_changeset_update_property(&ovcs->cset, target_node, + } else { + ret = of_changeset_update_property(&ovcs->cset, target->np, new_prop); + } + + if (!of_node_check_flag(target->np, OF_OVERLAY)) + pr_err("WARNING: memory leak will occur if overlay removed, property: %pOF/%s\n", + target->np, new_prop->name); if (ret) { kfree(new_prop->name); @@ -315,14 +375,14 @@ static int add_changeset_property(struct overlay_changeset *ovcs, /** * add_changeset_node() - add @node (and children) to overlay changeset - * @ovcs: overlay changeset - * @target_node: where to place @node in live tree - * @node: node from within overlay device tree fragment + * @ovcs: overlay changeset + * @target: where @node will be placed in live tree or changeset + * @node: node from within overlay device tree fragment * - * If @node does not already exist in @target_node, add changeset entry - * to add @node in @target_node. + * If @node does not already exist in @target, add changeset entry + * to add @node in @target. * - * If @node already exists in @target_node, and the existing node has + * If @node already exists in @target, and the existing node has * a phandle, the overlay node is not allowed to have a phandle. * * If @node has child nodes, add the children recursively via @@ -355,36 +415,60 @@ static int add_changeset_property(struct overlay_changeset *ovcs, * invalid @overlay. */ static int add_changeset_node(struct overlay_changeset *ovcs, - struct device_node *target_node, struct device_node *node) + struct target *target, struct device_node *node) { const char *node_kbasename; + const __be32 *phandle; struct device_node *tchild; - int ret = 0; + struct target target_child; + int ret = 0, size; node_kbasename = kbasename(node->full_name); - for_each_child_of_node(target_node, tchild) + for_each_child_of_node(target->np, tchild) if (!of_node_cmp(node_kbasename, kbasename(tchild->full_name))) break; if (!tchild) { - tchild = __of_node_dup(node, node_kbasename); + tchild = __of_node_dup(NULL, node_kbasename); if (!tchild) return -ENOMEM; - tchild->parent = target_node; + tchild->parent = target->np; + tchild->name = __of_get_property(node, "name", NULL); + tchild->type = __of_get_property(node, "device_type", NULL); + + if (!tchild->name) + tchild->name = ""; + if (!tchild->type) + tchild->type = ""; + + /* ignore obsolete "linux,phandle" */ + phandle = __of_get_property(node, "phandle", &size); + if (phandle && (size == 4)) + tchild->phandle = be32_to_cpup(phandle); + + of_node_set_flag(tchild, OF_OVERLAY); ret = of_changeset_attach_node(&ovcs->cset, tchild); if (ret) return ret; - return build_changeset_next_level(ovcs, tchild, node); + target_child.np = tchild; + target_child.in_livetree = false; + + ret = build_changeset_next_level(ovcs, &target_child, node); + of_node_put(tchild); + return ret; } - if (node->phandle && tchild->phandle) + if (node->phandle && tchild->phandle) { ret = -EINVAL; - else - ret = build_changeset_next_level(ovcs, tchild, node); + } else { + target_child.np = tchild; + target_child.in_livetree = target->in_livetree; + ret = build_changeset_next_level(ovcs, &target_child, node); + } of_node_put(tchild); return ret; @@ -393,7 +477,7 @@ static int add_changeset_node(struct overlay_changeset *ovcs, /** * build_changeset_next_level() - add level of overlay changeset * @ovcs: overlay changeset - * @target_node: where to place @overlay_node in live tree + * @target: where to place @overlay_node in live tree * @overlay_node: node from within an overlay device tree fragment * * Add the properties (if any) and nodes (if any) from @overlay_node to the @@ -406,27 +490,26 @@ static int add_changeset_node(struct overlay_changeset *ovcs, * invalid @overlay_node. */ static int build_changeset_next_level(struct overlay_changeset *ovcs, - struct device_node *target_node, - const struct device_node *overlay_node) + struct target *target, const struct device_node *overlay_node) { struct device_node *child; struct property *prop; int ret; for_each_property_of_node(overlay_node, prop) { - ret = add_changeset_property(ovcs, target_node, prop, 0); + ret = add_changeset_property(ovcs, target, prop, 0); if (ret) { pr_debug("Failed to apply prop @%pOF/%s, err=%d\n", - target_node, prop->name, ret); + target->np, prop->name, ret); return ret; } } for_each_child_of_node(overlay_node, child) { - ret = add_changeset_node(ovcs, target_node, child); + ret = add_changeset_node(ovcs, target, child); if (ret) { - pr_debug("Failed to apply node @%pOF/%s, err=%d\n", - target_node, child->name, ret); + pr_debug("Failed to apply node @%pOF/%pOFn, err=%d\n", + target->np, child, ret); of_node_put(child); return ret; } @@ -439,17 +522,17 @@ static int build_changeset_next_level(struct overlay_changeset *ovcs, * Add the properties from __overlay__ node to the @ovcs->cset changeset. */ static int build_changeset_symbols_node(struct overlay_changeset *ovcs, - struct device_node *target_node, + struct target *target, const struct device_node *overlay_symbols_node) { struct property *prop; int ret; for_each_property_of_node(overlay_symbols_node, prop) { - ret = add_changeset_property(ovcs, target_node, prop, 1); + ret = add_changeset_property(ovcs, target, prop, 1); if (ret) { pr_debug("Failed to apply prop @%pOF/%s, err=%d\n", - target_node, prop->name, ret); + target->np, prop->name, ret); return ret; } } @@ -472,6 +555,7 @@ static int build_changeset_symbols_node(struct overlay_changeset *ovcs, static int build_changeset(struct overlay_changeset *ovcs) { struct fragment *fragment; + struct target target; int fragments_count, i, ret; /* @@ -486,7 +570,9 @@ static int build_changeset(struct overlay_changeset *ovcs) for (i = 0; i < fragments_count; i++) { fragment = &ovcs->fragments[i]; - ret = build_changeset_next_level(ovcs, fragment->target, + target.np = fragment->target; + target.in_livetree = true; + ret = build_changeset_next_level(ovcs, &target, fragment->overlay); if (ret) { pr_debug("apply failed '%pOF'\n", fragment->target); @@ -496,7 +582,10 @@ static int build_changeset(struct overlay_changeset *ovcs) if (ovcs->symbols_fragment) { fragment = &ovcs->fragments[ovcs->count - 1]; - ret = build_changeset_symbols_node(ovcs, fragment->target, + + target.np = fragment->target; + target.in_livetree = true; + ret = build_changeset_symbols_node(ovcs, &target, fragment->overlay); if (ret) { pr_debug("apply failed '%pOF'\n", fragment->target); @@ -514,7 +603,7 @@ static int build_changeset(struct overlay_changeset *ovcs) * 1) "target" property containing the phandle of the target * 2) "target-path" property containing the path of the target */ -static struct device_node *find_target_node(struct device_node *info_node) +static struct device_node *find_target(struct device_node *info_node) { struct device_node *node; const char *path; @@ -620,10 +709,11 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs, fragment = &fragments[cnt]; fragment->overlay = overlay_node; - fragment->target = find_target_node(node); + fragment->target = find_target(node); if (!fragment->target) { of_node_put(fragment->overlay); ret = -EINVAL; + of_node_put(node); goto err_free_fragments; } diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 6c59673933e90817dea1023f3e200c486f82fdde..04ad312fd85b9a2af8920fb81092b3ae4ca65a41 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -91,8 +91,8 @@ static void of_device_make_bus_id(struct device *dev) */ reg = of_get_property(node, "reg", NULL); if (reg && (addr = of_translate_address(node, reg)) != OF_BAD_ADDR) { - dev_set_name(dev, dev_name(dev) ? "%llx.%s:%s" : "%llx.%s", - (unsigned long long)addr, node->name, + dev_set_name(dev, dev_name(dev) ? "%llx.%pOFn:%s" : "%llx.%pOFn", + (unsigned long long)addr, node, dev_name(dev)); return; } @@ -142,8 +142,8 @@ struct platform_device *of_device_alloc(struct device_node *np, WARN_ON(rc); } if (of_irq_to_resource_table(np, res, num_irq) != num_irq) - pr_debug("not all legacy IRQ resources mapped for %s\n", - np->name); + pr_debug("not all legacy IRQ resources mapped for %pOFn\n", + np); } dev->dev.of_node = of_node_get(np); diff --git a/drivers/of/property.c b/drivers/of/property.c index f46828e3b082b87966d5c5be3d7df1637f0391a9..13c7e55f5cbadd95c898226476f61b4fb50a0a61 100644 --- a/drivers/of/property.c +++ b/drivers/of/property.c @@ -806,6 +806,7 @@ struct device_node *of_graph_get_remote_node(const struct device_node *node, if (!of_device_is_available(remote)) { pr_debug("not available for remote node\n"); + of_node_put(remote); return NULL; } @@ -917,8 +918,10 @@ of_fwnode_get_reference_args(const struct fwnode_handle *fwnode, nargs, index, &of_args); if (ret < 0) return ret; - if (!args) + if (!args) { + of_node_put(of_args.np); return 0; + } args->nargs = of_args.args_count; args->fwnode = of_fwnode_handle(of_args.np); diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c index 7edfac6f1914459f135af8d96823ea338770c6a6..ad5f24c2d2a9b03d18737b5c281d681646987b9d 100644 --- a/drivers/of/resolver.c +++ b/drivers/of/resolver.c @@ -206,16 +206,22 @@ static int adjust_local_phandle_references(struct device_node *local_fixups, for_each_child_of_node(local_fixups, child) { for_each_child_of_node(overlay, overlay_child) - if (!node_name_cmp(child, overlay_child)) + if (!node_name_cmp(child, overlay_child)) { + of_node_put(overlay_child); break; + } - if (!overlay_child) + if (!overlay_child) { + of_node_put(child); return -EINVAL; + } err = adjust_local_phandle_references(child, overlay_child, phandle_delta); - if (err) + if (err) { + of_node_put(child); return err; + } } return 0; diff --git a/drivers/of/unittest-data/overlay_15.dts b/drivers/of/unittest-data/overlay_15.dts index b98f2514df4b35b42c38bc4b2b7abef697b8ff75..5728490474f6bd2d628a23ec491fe2cf106645b2 100644 --- a/drivers/of/unittest-data/overlay_15.dts +++ b/drivers/of/unittest-data/overlay_15.dts @@ -20,8 +20,8 @@ #size-cells = <0>; reg = <0>; - test-mux-dev { - reg = <32>; + test-mux-dev@20 { + reg = <0x20>; compatible = "unittest-i2c-dev"; status = "okay"; }; diff --git a/drivers/of/unittest-data/tests-overlay.dtsi b/drivers/of/unittest-data/tests-overlay.dtsi index 25cf397b8f6b67f964646d4b310e5daf8b76b10e..4ea024d908ee22d6eae398c2f6ef8f6c59c12431 100644 --- a/drivers/of/unittest-data/tests-overlay.dtsi +++ b/drivers/of/unittest-data/tests-overlay.dtsi @@ -103,8 +103,8 @@ #size-cells = <0>; reg = <0>; - test-mux-dev { - reg = <32>; + test-mux-dev@20 { + reg = <0x20>; compatible = "unittest-i2c-dev"; status = "okay"; }; diff --git a/drivers/of/unittest-data/tests-phandle.dtsi b/drivers/of/unittest-data/tests-phandle.dtsi index 6b33be4c4416ce7bd72d5639a5cfe4b03a79f082..aa0d7027ffa68942889d9cfc0f845b8a469dd19c 100644 --- a/drivers/of/unittest-data/tests-phandle.dtsi +++ b/drivers/of/unittest-data/tests-phandle.dtsi @@ -38,6 +38,13 @@ phandle-map-pass-thru = <0x0 0xf0>; }; + provider5: provider5 { + #phandle-cells = <2>; + phandle-map = <2 7 &provider4 2 3>; + phandle-map-mask = <0xff 0xf>; + phandle-map-pass-thru = <0x0 0xf0>; + }; + consumer-a { phandle-list = <&provider1 1>, <&provider2 2 0>, @@ -64,7 +71,8 @@ <&provider4 4 0x100>, <&provider4 0 0x61>, <&provider0>, - <&provider4 19 0x20>; + <&provider4 19 0x20>, + <&provider5 2 7>; phandle-list-bad-phandle = <12345678 0 0>; phandle-list-bad-args = <&provider2 1 0>, <&provider4 0>; diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index 41b49716ac75f24f2f97d382fb4271f2998217ce..80f5ab19a63c627cf080070fbdb0a9f6447879cc 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -45,6 +45,12 @@ static struct unittest_results { failed; \ }) +#ifdef CONFIG_OF_KOBJ +#define OF_KREF_READ(NODE) kref_read(&(NODE)->kobj.kref) +#else +#define OF_KREF_READ(NODE) 1 +#endif + static void __init of_unittest_find_node_by_name(void) { struct device_node *np; @@ -212,8 +218,8 @@ static int __init of_unittest_check_node_linkage(struct device_node *np) for_each_child_of_node(np, child) { if (child->parent != np) { - pr_err("Child node %s links to wrong parent %s\n", - child->name, np->name); + pr_err("Child node %pOFn links to wrong parent %pOFn\n", + child, np); rc = -EINVAL; goto put_child; } @@ -375,6 +381,7 @@ static void __init of_unittest_parse_phandle_with_args(void) for (i = 0; i < 8; i++) { bool passed = true; + memset(&args, 0, sizeof(args)); rc = of_parse_phandle_with_args(np, "phandle-list", "#phandle-cells", i, &args); @@ -425,9 +432,13 @@ static void __init of_unittest_parse_phandle_with_args(void) unittest(passed, "index %i - data error on node %pOF rc=%i\n", i, args.np, rc); + + if (rc == 0) + of_node_put(args.np); } /* Check for missing list property */ + memset(&args, 0, sizeof(args)); rc = of_parse_phandle_with_args(np, "phandle-list-missing", "#phandle-cells", 0, &args); unittest(rc == -ENOENT, "expected:%i got:%i\n", -ENOENT, rc); @@ -436,6 +447,7 @@ static void __init of_unittest_parse_phandle_with_args(void) unittest(rc == -ENOENT, "expected:%i got:%i\n", -ENOENT, rc); /* Check for missing cells property */ + memset(&args, 0, sizeof(args)); rc = of_parse_phandle_with_args(np, "phandle-list", "#phandle-cells-missing", 0, &args); unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); @@ -444,6 +456,7 @@ static void __init of_unittest_parse_phandle_with_args(void) unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); /* Check for bad phandle in list */ + memset(&args, 0, sizeof(args)); rc = of_parse_phandle_with_args(np, "phandle-list-bad-phandle", "#phandle-cells", 0, &args); unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); @@ -452,6 +465,7 @@ static void __init of_unittest_parse_phandle_with_args(void) unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); /* Check for incorrectly formed argument list */ + memset(&args, 0, sizeof(args)); rc = of_parse_phandle_with_args(np, "phandle-list-bad-args", "#phandle-cells", 1, &args); unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); @@ -462,8 +476,9 @@ static void __init of_unittest_parse_phandle_with_args(void) static void __init of_unittest_parse_phandle_with_args_map(void) { - struct device_node *np, *p0, *p1, *p2, *p3; + struct device_node *np, *p[6] = {}; struct of_phandle_args args; + unsigned int prefs[6]; int i, rc; np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-b"); @@ -472,36 +487,27 @@ static void __init of_unittest_parse_phandle_with_args_map(void) return; } - p0 = of_find_node_by_path("/testcase-data/phandle-tests/provider0"); - if (!p0) { - pr_err("missing testcase data\n"); - return; - } - - p1 = of_find_node_by_path("/testcase-data/phandle-tests/provider1"); - if (!p1) { - pr_err("missing testcase data\n"); - return; - } - - p2 = of_find_node_by_path("/testcase-data/phandle-tests/provider2"); - if (!p2) { - pr_err("missing testcase data\n"); - return; - } - - p3 = of_find_node_by_path("/testcase-data/phandle-tests/provider3"); - if (!p3) { - pr_err("missing testcase data\n"); - return; + p[0] = of_find_node_by_path("/testcase-data/phandle-tests/provider0"); + p[1] = of_find_node_by_path("/testcase-data/phandle-tests/provider1"); + p[2] = of_find_node_by_path("/testcase-data/phandle-tests/provider2"); + p[3] = of_find_node_by_path("/testcase-data/phandle-tests/provider3"); + p[4] = of_find_node_by_path("/testcase-data/phandle-tests/provider4"); + p[5] = of_find_node_by_path("/testcase-data/phandle-tests/provider5"); + for (i = 0; i < ARRAY_SIZE(p); ++i) { + if (!p[i]) { + pr_err("missing testcase data\n"); + return; + } + prefs[i] = OF_KREF_READ(p[i]); } rc = of_count_phandle_with_args(np, "phandle-list", "#phandle-cells"); - unittest(rc == 7, "of_count_phandle_with_args() returned %i, expected 7\n", rc); + unittest(rc == 8, "of_count_phandle_with_args() returned %i, expected 8\n", rc); - for (i = 0; i < 8; i++) { + for (i = 0; i < 9; i++) { bool passed = true; + memset(&args, 0, sizeof(args)); rc = of_parse_phandle_with_args_map(np, "phandle-list", "phandle", i, &args); @@ -509,13 +515,13 @@ static void __init of_unittest_parse_phandle_with_args_map(void) switch (i) { case 0: passed &= !rc; - passed &= (args.np == p1); + passed &= (args.np == p[1]); passed &= (args.args_count == 1); passed &= (args.args[0] == 1); break; case 1: passed &= !rc; - passed &= (args.np == p3); + passed &= (args.np == p[3]); passed &= (args.args_count == 3); passed &= (args.args[0] == 2); passed &= (args.args[1] == 5); @@ -526,28 +532,36 @@ static void __init of_unittest_parse_phandle_with_args_map(void) break; case 3: passed &= !rc; - passed &= (args.np == p0); + passed &= (args.np == p[0]); passed &= (args.args_count == 0); break; case 4: passed &= !rc; - passed &= (args.np == p1); + passed &= (args.np == p[1]); passed &= (args.args_count == 1); passed &= (args.args[0] == 3); break; case 5: passed &= !rc; - passed &= (args.np == p0); + passed &= (args.np == p[0]); passed &= (args.args_count == 0); break; case 6: passed &= !rc; - passed &= (args.np == p2); + passed &= (args.np == p[2]); passed &= (args.args_count == 2); passed &= (args.args[0] == 15); passed &= (args.args[1] == 0x20); break; case 7: + passed &= !rc; + passed &= (args.np == p[3]); + passed &= (args.args_count == 3); + passed &= (args.args[0] == 2); + passed &= (args.args[1] == 5); + passed &= (args.args[2] == 3); + break; + case 8: passed &= (rc == -ENOENT); break; default: @@ -556,27 +570,41 @@ static void __init of_unittest_parse_phandle_with_args_map(void) unittest(passed, "index %i - data error on node %s rc=%i\n", i, args.np->full_name, rc); + + if (rc == 0) + of_node_put(args.np); } /* Check for missing list property */ + memset(&args, 0, sizeof(args)); rc = of_parse_phandle_with_args_map(np, "phandle-list-missing", "phandle", 0, &args); unittest(rc == -ENOENT, "expected:%i got:%i\n", -ENOENT, rc); /* Check for missing cells,map,mask property */ + memset(&args, 0, sizeof(args)); rc = of_parse_phandle_with_args_map(np, "phandle-list", "phandle-missing", 0, &args); unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); /* Check for bad phandle in list */ + memset(&args, 0, sizeof(args)); rc = of_parse_phandle_with_args_map(np, "phandle-list-bad-phandle", "phandle", 0, &args); unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); /* Check for incorrectly formed argument list */ + memset(&args, 0, sizeof(args)); rc = of_parse_phandle_with_args_map(np, "phandle-list-bad-args", "phandle", 1, &args); unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); + + for (i = 0; i < ARRAY_SIZE(p); ++i) { + unittest(prefs[i] == OF_KREF_READ(p[i]), + "provider%d: expected:%d got:%d\n", + i, prefs[i], OF_KREF_READ(p[i])); + of_node_put(p[i]); + } } static void __init of_unittest_property_string(void) @@ -783,7 +811,7 @@ static void __init of_unittest_parse_interrupts(void) for (i = 0; i < 4; i++) { bool passed = true; - args.args_count = 0; + memset(&args, 0, sizeof(args)); rc = of_irq_parse_one(np, i, &args); passed &= !rc; @@ -804,7 +832,7 @@ static void __init of_unittest_parse_interrupts(void) for (i = 0; i < 4; i++) { bool passed = true; - args.args_count = 0; + memset(&args, 0, sizeof(args)); rc = of_irq_parse_one(np, i, &args); /* Test the values from tests-phandle.dtsi */ @@ -860,6 +888,7 @@ static void __init of_unittest_parse_interrupts_extended(void) for (i = 0; i < 7; i++) { bool passed = true; + memset(&args, 0, sizeof(args)); rc = of_irq_parse_one(np, i, &args); /* Test the values from tests-phandle.dtsi */ @@ -1029,8 +1058,10 @@ static void __init of_unittest_platform_populate(void) test_bus = platform_device_register_full(&test_bus_info); rc = PTR_ERR_OR_ZERO(test_bus); unittest(!rc, "testbus registration failed; rc=%i\n", rc); - if (rc) + if (rc) { + of_node_put(np); return; + } test_bus->dev.of_node = np; /* @@ -1046,16 +1077,16 @@ static void __init of_unittest_platform_populate(void) for_each_child_of_node(np, child) { for_each_child_of_node(child, grandchild) unittest(of_find_device_by_node(grandchild), - "Could not create device for node '%s'\n", - grandchild->name); + "Could not create device for node '%pOFn'\n", + grandchild); } of_platform_depopulate(&test_bus->dev); for_each_child_of_node(np, child) { for_each_child_of_node(child, grandchild) unittest(!of_find_device_by_node(grandchild), - "device didn't get destroyed '%s'\n", - grandchild->name); + "device didn't get destroyed '%pOFn'\n", + grandchild); } platform_device_unregister(test_bus); @@ -1067,20 +1098,44 @@ static void __init of_unittest_platform_populate(void) * of np into dup node (present in live tree) and * updates parent of children of np to dup. * - * @np: node already present in live tree + * @np: node whose properties are being added to the live tree * @dup: node present in live tree to be updated */ static void update_node_properties(struct device_node *np, struct device_node *dup) { struct property *prop; + struct property *save_next; struct device_node *child; - - for_each_property_of_node(np, prop) - of_add_property(dup, prop); + int ret; for_each_child_of_node(np, child) child->parent = dup; + + /* + * "unittest internal error: unable to add testdata property" + * + * If this message reports a property in node '/__symbols__' then + * the respective unittest overlay contains a label that has the + * same name as a label in the live devicetree. The label will + * be in the live devicetree only if the devicetree source was + * compiled with the '-@' option. If you encounter this error, + * please consider renaming __all__ of the labels in the unittest + * overlay dts files with an odd prefix that is unlikely to be + * used in a real devicetree. + */ + + /* + * open code for_each_property_of_node() because of_add_property() + * sets prop->next to NULL + */ + for (prop = np->properties; prop != NULL; prop = save_next) { + save_next = prop->next; + ret = of_add_property(dup, prop); + if (ret) + pr_err("unittest internal error: unable to add testdata property %pOF/%s", + np, prop->name); + } } /** @@ -1089,18 +1144,25 @@ static void update_node_properties(struct device_node *np, * * @np: Node to attach to live tree */ -static int attach_node_and_children(struct device_node *np) +static void attach_node_and_children(struct device_node *np) { struct device_node *next, *dup, *child; unsigned long flags; const char *full_name; full_name = kasprintf(GFP_KERNEL, "%pOF", np); + + if (!strcmp(full_name, "/__local_fixups__") || + !strcmp(full_name, "/__fixups__")) { + kfree(full_name); + return; + } + dup = of_find_node_by_path(full_name); kfree(full_name); if (dup) { update_node_properties(np, dup); - return 0; + return; } child = np->child; @@ -1121,8 +1183,6 @@ static int attach_node_and_children(struct device_node *np) attach_node_and_children(child); child = next; } - - return 0; } /** @@ -1159,6 +1219,7 @@ static int __init unittest_data_add(void) of_fdt_unflatten_tree(unittest_data, NULL, &unittest_data_node); if (!unittest_data_node) { pr_warn("%s: No tree to attach; not running tests\n", __func__); + kfree(unittest_data); return -ENODATA; } diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 31ff03dbeb83771be1ba57fde89ea6c32f63c2aa..1e80f9ec1aa6ac27f8c6d3ee58d1ab93204472e8 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -191,12 +191,12 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) if (IS_ERR(opp_table)) return 0; - count = opp_table->regulator_count; - /* Regulator may not be required for the device */ - if (!count) + if (!opp_table->regulators) goto put_opp_table; + count = opp_table->regulator_count; + uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL); if (!uV) goto put_opp_table; @@ -313,7 +313,7 @@ int dev_pm_opp_get_opp_count(struct device *dev) count = PTR_ERR(opp_table); dev_dbg(dev, "%s: OPP table not found (%d)\n", __func__, count); - return 0; + return count; } count = _get_opp_count(opp_table); @@ -976,6 +976,9 @@ static bool _opp_supported_by_regulators(struct dev_pm_opp *opp, struct regulator *reg; int i; + if (!opp_table->regulators) + return true; + for (i = 0; i < opp_table->regulator_count; i++) { reg = opp_table->regulators[i]; @@ -1263,7 +1266,7 @@ static int _allocate_set_opp_data(struct opp_table *opp_table) struct dev_pm_set_opp_data *data; int len, count = opp_table->regulator_count; - if (WARN_ON(!count)) + if (WARN_ON(!opp_table->regulators)) return -EINVAL; /* space for set_opp_data */ diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 7af0ddec936bb422c44e0cd2f92b89d6723579ee..20988c42665017bea3402c0b0c67f7d1d1e91608 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -425,6 +425,7 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np) dev_err(dev, "Not all nodes have performance state set (%d: %d)\n", count, pstate_count); ret = -ENOENT; + _dev_pm_opp_remove_table(opp_table, dev, false); goto put_opp_table; } diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c index 9e5a9a3112c9cec57abcffab6c9b23640682756c..3f4fb4dbbe33b0ca755dc547707b815a337597e9 100644 --- a/drivers/opp/ti-opp-supply.c +++ b/drivers/opp/ti-opp-supply.c @@ -288,7 +288,10 @@ static int ti_opp_supply_set_opp(struct dev_pm_set_opp_data *data) int ret; vdd_uv = _get_optimal_vdd_voltage(dev, &opp_data, - new_supply_vbb->u_volt); + new_supply_vdd->u_volt); + + if (new_supply_vdd->u_volt_min < vdd_uv) + new_supply_vdd->u_volt_min = vdd_uv; /* Scaling up? Scale voltage before frequency */ if (freq > old_freq) { diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index 614823617b8b9d8fad74f67112db7c31761c78a2..b7b2e811d547192b232f27399d82b568be9ecc85 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -565,8 +565,6 @@ ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba, /* We currently only support kernel addresses */ BUG_ON(sid != KERNEL_SPACE); - mtsp(sid,1); - /* ** WORD 1 - low order word ** "hints" parm includes the VALID bit! @@ -597,7 +595,7 @@ ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba, ** Grab virtual index [0:11] ** Deposit virt_idx bits into I/O PDIR word */ - asm volatile ("lci %%r0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba)); + asm volatile ("lci %%r0(%1), %0" : "=r" (ci) : "r" (vba)); asm volatile ("extru %1,19,12,%0" : "+r" (ci) : "r" (ci)); asm volatile ("depw %1,15,12,%0" : "+r" (pa) : "r" (ci)); diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c index 7390fb8ca9d156c485f85aa1c0cc475988765b23..29df6ab29e95cd1fcb948f4e135978645d9deaf4 100644 --- a/drivers/parisc/dino.c +++ b/drivers/parisc/dino.c @@ -160,6 +160,15 @@ struct dino_device (struct dino_device *)__pdata; }) +/* Check if PCI device is behind a Card-mode Dino. */ +static int pci_dev_is_behind_card_dino(struct pci_dev *dev) +{ + struct dino_device *dino_dev; + + dino_dev = DINO_DEV(parisc_walk_tree(dev->bus->bridge)); + return is_card_dino(&dino_dev->hba.dev->id); +} + /* * Dino Configuration Space Accessor Functions */ @@ -442,6 +451,21 @@ static void quirk_cirrus_cardbus(struct pci_dev *dev) } DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_6832, quirk_cirrus_cardbus ); +#ifdef CONFIG_TULIP +static void pci_fixup_tulip(struct pci_dev *dev) +{ + if (!pci_dev_is_behind_card_dino(dev)) + return; + if (!(pci_resource_flags(dev, 1) & IORESOURCE_MEM)) + return; + pr_warn("%s: HP HSC-PCI Cards with card-mode Dino not yet supported.\n", + pci_name(dev)); + /* Disable this card by zeroing the PCI resources */ + memset(&dev->resource[0], 0, sizeof(dev->resource[0])); + memset(&dev->resource[1], 0, sizeof(dev->resource[1])); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_DEC, PCI_ANY_ID, pci_fixup_tulip); +#endif /* CONFIG_TULIP */ static void __init dino_bios_init(void) diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c index 0c6e8b44b4ede70806d2b986c8bba1b216735bf9..c60b465f6fe45e35f52d2843cca285830808cc75 100644 --- a/drivers/parisc/led.c +++ b/drivers/parisc/led.c @@ -568,6 +568,9 @@ int __init register_led_driver(int model, unsigned long cmd_reg, unsigned long d break; case DISPLAY_MODEL_LASI: + /* Skip to register LED in QEMU */ + if (running_on_qemu) + return 1; LED_DATA_REG = data_reg; led_func_ptr = led_LASI_driver; printk(KERN_INFO "LED display at %lx registered\n", LED_DATA_REG); diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index 11de0eccf96841b65e3e404b2031baf0bc95dd32..41269ad84d169800af81371d168d7d8a4073d5a3 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -575,8 +575,7 @@ sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba, pa = virt_to_phys(vba); pa &= IOVP_MASK; - mtsp(sid,1); - asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba)); + asm("lci 0(%1), %0" : "=r" (ci) : "r" (vba)); pa |= (ci >> PAGE_SHIFT) & 0xff; /* move CI (8 bits) into lowest byte */ pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */ @@ -1064,7 +1063,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, spin_unlock_irqrestore(&ioc->res_lock, flags); #endif - while (sg_dma_len(sglist) && nents--) { + while (nents && sg_dma_len(sglist)) { sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction, 0); @@ -1073,6 +1072,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, ioc->usingle_calls--; /* kluge since call is unmap_sg() */ #endif ++sglist; + nents--; } DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents); diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c index 380916bff9e05ce4a1cd12fbdbb6a4b787759dec..dee5b9e35ffd6c10ab9c1461ff4ea6f367f09b95 100644 --- a/drivers/parport/parport_pc.c +++ b/drivers/parport/parport_pc.c @@ -1377,7 +1377,7 @@ static struct superio_struct *find_superio(struct parport *p) { int i; for (i = 0; i < NR_SUPERIOS; i++) - if (superios[i].io != p->base) + if (superios[i].io == p->base) return &superios[i]; return NULL; } diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c index 48804049d6972631f3179659b31bf3db3966c149..c5f7e1ff06f1d0f21d60aba61b19c2a423229ed4 100644 --- a/drivers/parport/procfs.c +++ b/drivers/parport/procfs.c @@ -51,12 +51,12 @@ static int do_active_device(struct ctl_table *table, int write, for (dev = port->devices; dev ; dev = dev->next) { if(dev == port->cad) { - len += sprintf(buffer, "%s\n", dev->name); + len += snprintf(buffer, sizeof(buffer), "%s\n", dev->name); } } if(!len) { - len += sprintf(buffer, "%s\n", "none"); + len += snprintf(buffer, sizeof(buffer), "%s\n", "none"); } if (len > *lenp) @@ -87,19 +87,19 @@ static int do_autoprobe(struct ctl_table *table, int write, } if ((str = info->class_name) != NULL) - len += sprintf (buffer + len, "CLASS:%s;\n", str); + len += snprintf (buffer + len, sizeof(buffer) - len, "CLASS:%s;\n", str); if ((str = info->model) != NULL) - len += sprintf (buffer + len, "MODEL:%s;\n", str); + len += snprintf (buffer + len, sizeof(buffer) - len, "MODEL:%s;\n", str); if ((str = info->mfr) != NULL) - len += sprintf (buffer + len, "MANUFACTURER:%s;\n", str); + len += snprintf (buffer + len, sizeof(buffer) - len, "MANUFACTURER:%s;\n", str); if ((str = info->description) != NULL) - len += sprintf (buffer + len, "DESCRIPTION:%s;\n", str); + len += snprintf (buffer + len, sizeof(buffer) - len, "DESCRIPTION:%s;\n", str); if ((str = info->cmdset) != NULL) - len += sprintf (buffer + len, "COMMAND SET:%s;\n", str); + len += snprintf (buffer + len, sizeof(buffer) - len, "COMMAND SET:%s;\n", str); if (len > *lenp) len = *lenp; @@ -117,7 +117,7 @@ static int do_hardware_base_addr(struct ctl_table *table, int write, size_t *lenp, loff_t *ppos) { struct parport *port = (struct parport *)table->extra1; - char buffer[20]; + char buffer[64]; int len = 0; if (*ppos) { @@ -128,7 +128,7 @@ static int do_hardware_base_addr(struct ctl_table *table, int write, if (write) /* permissions prevent this anyway */ return -EACCES; - len += sprintf (buffer, "%lu\t%lu\n", port->base, port->base_hi); + len += snprintf (buffer, sizeof(buffer), "%lu\t%lu\n", port->base, port->base_hi); if (len > *lenp) len = *lenp; @@ -156,7 +156,7 @@ static int do_hardware_irq(struct ctl_table *table, int write, if (write) /* permissions prevent this anyway */ return -EACCES; - len += sprintf (buffer, "%d\n", port->irq); + len += snprintf (buffer, sizeof(buffer), "%d\n", port->irq); if (len > *lenp) len = *lenp; @@ -184,7 +184,7 @@ static int do_hardware_dma(struct ctl_table *table, int write, if (write) /* permissions prevent this anyway */ return -EACCES; - len += sprintf (buffer, "%d\n", port->dma); + len += snprintf (buffer, sizeof(buffer), "%d\n", port->dma); if (len > *lenp) len = *lenp; @@ -213,7 +213,13 @@ static int do_hardware_modes(struct ctl_table *table, int write, return -EACCES; { -#define printmode(x) {if(port->modes&PARPORT_MODE_##x){len+=sprintf(buffer+len,"%s%s",f?",":"",#x);f++;}} +#define printmode(x) { \ + if (port->modes & PARPORT_MODE_##x) { \ + len += snprintf(buffer + len, sizeof(buffer) - len, \ + "%s%s", f ? "," : "", #x); \ + f++; \ + } \ +} int f = 0; printmode(PCSPP); printmode(TRISTATE); diff --git a/drivers/parport/share.c b/drivers/parport/share.c index 5dc53d420ca8ca805c0c036c23e3c1a3fc42ac00..15c81cffd2de2f50bd8432ac4cedc2e87d53cb41 100644 --- a/drivers/parport/share.c +++ b/drivers/parport/share.c @@ -230,6 +230,18 @@ static int port_check(struct device *dev, void *dev_drv) return 0; } +/* + * Iterates through all the devices connected to the bus and return 1 + * if the device is a parallel port. + */ + +static int port_detect(struct device *dev, void *dev_drv) +{ + if (is_parport(dev)) + return 1; + return 0; +} + /** * parport_register_driver - register a parallel port device driver * @drv: structure describing the driver @@ -282,6 +294,15 @@ int __parport_register_driver(struct parport_driver *drv, struct module *owner, if (ret) return ret; + /* + * check if bus has any parallel port registered, if + * none is found then load the lowlevel driver. + */ + ret = bus_for_each_dev(&parport_bus_type, NULL, NULL, + port_detect); + if (!ret) + get_lowlevel_driver(); + mutex_lock(®istration_lock); if (drv->match_port) bus_for_each_dev(&parport_bus_type, NULL, drv, @@ -895,6 +916,7 @@ parport_register_dev_model(struct parport *port, const char *name, par_dev->devmodel = true; ret = device_register(&par_dev->dev); if (ret) { + kfree(par_dev->state); put_device(&par_dev->dev); goto err_put_port; } @@ -912,6 +934,7 @@ parport_register_dev_model(struct parport *port, const char *name, spin_unlock(&port->physport->pardevice_lock); pr_debug("%s: cannot grant exclusive access for device %s\n", port->name, name); + kfree(par_dev->state); device_unregister(&par_dev->dev); goto err_put_port; } diff --git a/drivers/pci/access.c b/drivers/pci/access.c index a3ad2fe185b9c517923fc0a26897af0e289dcdd6..dfb2fefbbce109c122fd47b5893cf9fabff980ea 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -207,14 +207,14 @@ static noinline void pci_wait_cfg(struct pci_dev *dev) { DECLARE_WAITQUEUE(wait, current); - __add_wait_queue(&pci_cfg_wait, &wait); do { set_current_state(TASK_UNINTERRUPTIBLE); raw_spin_unlock_irq(&pci_lock); + add_wait_queue(&pci_cfg_wait, &wait); schedule(); + remove_wait_queue(&pci_cfg_wait, &wait); raw_spin_lock_irq(&pci_lock); } while (dev->block_cfg_access); - __remove_wait_queue(&pci_cfg_wait, &wait); } /* Returns 0 on success, negative values indicate error. */ @@ -336,15 +336,6 @@ static inline int pcie_cap_version(const struct pci_dev *dev) return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS; } -static bool pcie_downstream_port(const struct pci_dev *dev) -{ - int type = pci_pcie_type(dev); - - return type == PCI_EXP_TYPE_ROOT_PORT || - type == PCI_EXP_TYPE_DOWNSTREAM || - type == PCI_EXP_TYPE_PCIE_BRIDGE; -} - bool pcie_cap_has_lnkctl(const struct pci_dev *dev) { int type = pci_pcie_type(dev); @@ -364,7 +355,7 @@ static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev) pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT; } -static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev) +bool pcie_cap_has_rtctl(const struct pci_dev *dev) { int type = pci_pcie_type(dev); diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c index 5b78f3b1b918a7f5600c4222b89288481ed985c2..4cefdfa40a6daf2140630849e3ae2937e10e5a1c 100644 --- a/drivers/pci/ats.c +++ b/drivers/pci/ats.c @@ -394,3 +394,20 @@ int pci_max_pasids(struct pci_dev *pdev) } EXPORT_SYMBOL_GPL(pci_max_pasids); #endif /* CONFIG_PCI_PASID */ + +#if defined(CONFIG_PCI_PASID) && defined(CONFIG_PCI_PRI) +bool pci_prg_resp_requires_prefix(struct pci_dev *pdev) +{ + u16 status; + int pos; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI); + if (!pos) + return false; + + pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status); + + return !!(status & PCI_PRI_STATUS_PRPR); +} +EXPORT_SYMBOL_GPL(pci_prg_resp_requires_prefix); +#endif /* CONFIG_PCI_PASID && CONFIG_PCI_PRI */ diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 5cb40b2518f9376dbe7edd4bf11d303daf97c025..495059d923f7da4e360a7689349c46260e16e797 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -23,7 +23,7 @@ void pci_add_resource_offset(struct list_head *resources, struct resource *res, entry = resource_list_create_entry(res, 0); if (!entry) { - printk(KERN_ERR "PCI: can't add host bridge window %pR\n", res); + pr_err("PCI: can't add host bridge window %pR\n", res); return; } @@ -288,8 +288,7 @@ bool pci_bus_clip_resource(struct pci_dev *dev, int idx) res->end = end; res->flags &= ~IORESOURCE_UNSET; orig_res.flags &= ~IORESOURCE_UNSET; - pci_printk(KERN_DEBUG, dev, "%pR clipped to %pR\n", - &orig_res, res); + pci_info(dev, "%pR clipped to %pR\n", &orig_res, res); return true; } diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig index 028b287466fbf9bf69130282c24e7dbe26e12a92..21bda955e66a4d1ad1f8d71780f33067ede6440d 100644 --- a/drivers/pci/controller/Kconfig +++ b/drivers/pci/controller/Kconfig @@ -279,4 +279,5 @@ config VMD module will be called vmd. source "drivers/pci/controller/dwc/Kconfig" +source "drivers/pci/controller/hisi-pcie-customer/Kconfig" endmenu diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile index d56a507495c5edd902a6012420924b9b837203a5..0771b957ce7b52282fc501fdc82612aa400dffc4 100644 --- a/drivers/pci/controller/Makefile +++ b/drivers/pci/controller/Makefile @@ -31,6 +31,7 @@ obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o obj-$(CONFIG_VMD) += vmd.o # pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW obj-y += dwc/ +obj-y += hisi-pcie-customer/ # The following drivers are for devices that use the generic ACPI diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c index ce9224a36f62fd8183d9662bf680e29515928171..412524aa1fdec4e4e97e84d9d0972590d6f95f3d 100644 --- a/drivers/pci/controller/dwc/pci-dra7xx.c +++ b/drivers/pci/controller/dwc/pci-dra7xx.c @@ -26,6 +26,7 @@ #include #include #include +#include #include "../../pci.h" #include "pcie-designware.h" @@ -542,7 +543,7 @@ static const struct of_device_id of_dra7xx_pcie_match[] = { }; /* - * dra7xx_pcie_ep_unaligned_memaccess: workaround for AM572x/AM571x Errata i870 + * dra7xx_pcie_unaligned_memaccess: workaround for AM572x/AM571x Errata i870 * @dra7xx: the dra7xx device where the workaround should be applied * * Access to the PCIe slave port that are not 32-bit aligned will result @@ -552,7 +553,7 @@ static const struct of_device_id of_dra7xx_pcie_match[] = { * * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1. */ -static int dra7xx_pcie_ep_unaligned_memaccess(struct device *dev) +static int dra7xx_pcie_unaligned_memaccess(struct device *dev) { int ret; struct device_node *np = dev->of_node; @@ -704,6 +705,11 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, DEVICE_TYPE_RC); + + ret = dra7xx_pcie_unaligned_memaccess(dev); + if (ret) + dev_err(dev, "WA for Errata i870 not applied\n"); + ret = dra7xx_add_pcie_port(dra7xx, pdev); if (ret < 0) goto err_gpio; @@ -717,7 +723,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, DEVICE_TYPE_EP); - ret = dra7xx_pcie_ep_unaligned_memaccess(dev); + ret = dra7xx_pcie_unaligned_memaccess(dev); if (ret) goto err_gpio; diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c index cee5f2f590e2d4c51e9531383225d16e69f0d15d..14a6ba4067fbec47c3c2ea5609e591b8c689faff 100644 --- a/drivers/pci/controller/dwc/pci-exynos.c +++ b/drivers/pci/controller/dwc/pci-exynos.c @@ -465,7 +465,7 @@ static int __init exynos_pcie_probe(struct platform_device *pdev) ep->phy = devm_of_phy_get(dev, np, NULL); if (IS_ERR(ep->phy)) { - if (PTR_ERR(ep->phy) == -EPROBE_DEFER) + if (PTR_ERR(ep->phy) != -ENODEV) return PTR_ERR(ep->phy); ep->phy = NULL; diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 4a9a673b47776f00c7177ea009043840d31c7ec4..3b2ceb566728943a012afa6193a90c319d56d4dc 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -66,6 +66,7 @@ struct imx6_pcie { #define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200 /* PCIe Root Complex registers (memory-mapped) */ +#define PCIE_RC_IMX6_MSI_CAP 0x50 #define PCIE_RC_LCR 0x7c #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1 #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2 @@ -80,8 +81,6 @@ struct imx6_pcie { #define PCIE_PL_PFLR_FORCE_LINK (1 << 15) #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28) #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c) -#define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29) -#define PCIE_PHY_DEBUG_R1_XMLH_LINK_UP (1 << 4) #define PCIE_PHY_CTRL (PL_OFFSET + 0x114) #define PCIE_PHY_CTRL_DATA_LOC 0 @@ -641,12 +640,6 @@ static int imx6_pcie_host_init(struct pcie_port *pp) return 0; } -static int imx6_pcie_link_up(struct dw_pcie *pci) -{ - return dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1) & - PCIE_PHY_DEBUG_R1_XMLH_LINK_UP; -} - static const struct dw_pcie_host_ops imx6_pcie_host_ops = { .host_init = imx6_pcie_host_init, }; @@ -679,7 +672,7 @@ static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie, } static const struct dw_pcie_ops dw_pcie_ops = { - .link_up = imx6_pcie_link_up, + /* No special ops needed, but pcie-designware still expects this struct */ }; static int imx6_pcie_probe(struct platform_device *pdev) @@ -690,6 +683,7 @@ static int imx6_pcie_probe(struct platform_device *pdev) struct resource *dbi_base; struct device_node *node = dev->of_node; int ret; + u16 val; imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL); if (!imx6_pcie) @@ -813,8 +807,8 @@ static int imx6_pcie_probe(struct platform_device *pdev) imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie"); if (IS_ERR(imx6_pcie->vpcie)) { - if (PTR_ERR(imx6_pcie->vpcie) == -EPROBE_DEFER) - return -EPROBE_DEFER; + if (PTR_ERR(imx6_pcie->vpcie) != -ENODEV) + return PTR_ERR(imx6_pcie->vpcie); imx6_pcie->vpcie = NULL; } @@ -824,6 +818,14 @@ static int imx6_pcie_probe(struct platform_device *pdev) if (ret < 0) return ret; + if (pci_msi_enabled()) { + val = dw_pcie_readw_dbi(pci, PCIE_RC_IMX6_MSI_CAP + + PCI_MSI_FLAGS); + val |= PCI_MSI_FLAGS_ENABLE; + dw_pcie_writew_dbi(pci, PCIE_RC_IMX6_MSI_CAP + PCI_MSI_FLAGS, + val); + } + return 0; } diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index e88bd221fffeabc856a67dc06f6a8676e8286858..765357b87ff695624e9feff0c4b47953fcf0ecb9 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -36,6 +36,7 @@ #define PCIE_RC_K2HK 0xb008 #define PCIE_RC_K2E 0xb009 #define PCIE_RC_K2L 0xb00a +#define PCIE_RC_K2G 0xb00b #define to_keystone_pcie(x) dev_get_drvdata((x)->dev) @@ -50,6 +51,8 @@ static void quirk_limit_mrrs(struct pci_dev *dev) .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L), .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, + { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2G), + .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, { 0, }, }; @@ -237,6 +240,7 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie) ks_dw_pcie_enable_error_irq(ks_pcie); } +#ifdef CONFIG_ARM /* * When a PCI device does not exist during config cycles, keystone host gets a * bus error instead of returning 0xffffffff. This handler always returns 0 @@ -256,6 +260,7 @@ static int keystone_pcie_fault(unsigned long addr, unsigned int fsr, return 0; } +#endif static int __init ks_pcie_host_init(struct pcie_port *pp) { @@ -279,12 +284,14 @@ static int __init ks_pcie_host_init(struct pcie_port *pp) val |= BIT(12); writel(val, pci->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL); +#ifdef CONFIG_ARM /* * PCIe access errors that result into OCP errors are caught by ARM as * "External aborts" */ hook_fault_code(17, keystone_pcie_fault, SIGBUS, 0, "Asynchronous external abort"); +#endif return 0; } diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c index 3724d3ef7008e88b59e398318fd212ad76b24c3a..7aa9a82b7ebd62043c5c8fd6c64c10b190cd1c9b 100644 --- a/drivers/pci/controller/dwc/pci-layerscape.c +++ b/drivers/pci/controller/dwc/pci-layerscape.c @@ -88,7 +88,7 @@ static void ls_pcie_disable_outbound_atus(struct ls_pcie *pcie) int i; for (i = 0; i < PCIE_IATU_NUM; i++) - dw_pcie_disable_atu(pcie->pci, DW_PCIE_REGION_OUTBOUND, i); + dw_pcie_disable_atu(pcie->pci, i, DW_PCIE_REGION_OUTBOUND); } static int ls1021_pcie_link_up(struct dw_pcie *pci) diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 1e7b02221eac934ea28b72c3094587c71c4fb96b..a3d07d9c598bff4f101340e04e8044575d25cd80 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -46,16 +46,19 @@ static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie *pci, u8 cap_ptr, u8 cap_id, next_cap_ptr; u16 reg; + if (!cap_ptr) + return 0; + reg = dw_pcie_readw_dbi(pci, cap_ptr); - next_cap_ptr = (reg & 0xff00) >> 8; cap_id = (reg & 0x00ff); - if (!next_cap_ptr || cap_id > PCI_CAP_ID_MAX) + if (cap_id > PCI_CAP_ID_MAX) return 0; if (cap_id == cap) return cap_ptr; + next_cap_ptr = (reg & 0xff00) >> 8; return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap); } @@ -67,9 +70,6 @@ static u8 dw_pcie_ep_find_capability(struct dw_pcie *pci, u8 cap) reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST); next_cap_ptr = (reg & 0x00ff); - if (!next_cap_ptr) - return 0; - return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap); } @@ -385,6 +385,7 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct pci_epc *epc = ep->epc; + unsigned int aligned_offset; u16 msg_ctrl, msg_data; u32 msg_addr_lower, msg_addr_upper, reg; u64 msg_addr; @@ -410,13 +411,15 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, reg = ep->msi_cap + PCI_MSI_DATA_32; msg_data = dw_pcie_readw_dbi(pci, reg); } - msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower; + aligned_offset = msg_addr_lower & (epc->mem->page_size - 1); + msg_addr = ((u64)msg_addr_upper) << 32 | + (msg_addr_lower & ~aligned_offset); ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr, epc->mem->page_size); if (ret) return ret; - writel(msg_data | (interrupt_num - 1), ep->msi_mem); + writel(msg_data | (interrupt_num - 1), ep->msi_mem + aligned_offset); dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys); @@ -440,7 +443,6 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, tbl_offset = dw_pcie_readl_dbi(pci, reg); bir = (tbl_offset & PCI_MSIX_TABLE_BIR); tbl_offset &= PCI_MSIX_TABLE_OFFSET; - tbl_offset >>= 3; reg = PCI_BASE_ADDRESS_0 + (4 * bir); bar_addr_upper = 0; diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index 29a05759a29421aab29efb6026ffec9d083abef1..7c181923887f9750b5ace9ace01e21bfcd99d0fd 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -99,9 +99,6 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) (i * MAX_MSI_IRQS_PER_CTRL) + pos); generic_handle_irq(irq); - dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + - (i * MSI_REG_CTRL_BLOCK_SIZE), - 4, 1 << pos); pos++; } } @@ -168,8 +165,8 @@ static void dw_pci_bottom_mask(struct irq_data *data) bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL; pp->irq_status[ctrl] &= ~(1 << bit); - dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, - pp->irq_status[ctrl]); + dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4, + ~pp->irq_status[ctrl]); } raw_spin_unlock_irqrestore(&pp->lock, flags); @@ -191,8 +188,8 @@ static void dw_pci_bottom_unmask(struct irq_data *data) bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL; pp->irq_status[ctrl] |= 1 << bit; - dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, - pp->irq_status[ctrl]); + dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4, + ~pp->irq_status[ctrl]); } raw_spin_unlock_irqrestore(&pp->lock, flags); @@ -200,13 +197,22 @@ static void dw_pci_bottom_unmask(struct irq_data *data) static void dw_pci_bottom_ack(struct irq_data *d) { - struct msi_desc *msi = irq_data_get_msi_desc(d); - struct pcie_port *pp; + struct pcie_port *pp = irq_data_get_irq_chip_data(d); + unsigned int res, bit, ctrl; + unsigned long flags; - pp = msi_desc_to_pci_sysdata(msi); + ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; + res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; + bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; + + raw_spin_lock_irqsave(&pp->lock, flags); + + dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, 1 << bit); if (pp->ops->msi_irq_ack) pp->ops->msi_irq_ack(d->hwirq, pp); + + raw_spin_unlock_irqrestore(&pp->lock, flags); } static struct irq_chip dw_pci_msi_bottom_irq_chip = { @@ -278,6 +284,8 @@ int dw_pcie_allocate_domains(struct pcie_port *pp) return -ENOMEM; } + irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS); + pp->msi_domain = pci_msi_create_irq_domain(fwnode, &dw_pcie_msi_domain_info, pp->irq_domain); @@ -297,20 +305,24 @@ void dw_pcie_free_msi(struct pcie_port *pp) irq_domain_remove(pp->msi_domain); irq_domain_remove(pp->irq_domain); + + if (pp->msi_page) + __free_page(pp->msi_page); } void dw_pcie_msi_init(struct pcie_port *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct device *dev = pci->dev; - struct page *page; u64 msi_target; - page = alloc_page(GFP_KERNEL); - pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); + pp->msi_page = alloc_page(GFP_KERNEL); + pp->msi_data = dma_map_page(dev, pp->msi_page, 0, PAGE_SIZE, + DMA_FROM_DEVICE); if (dma_mapping_error(dev, pp->msi_data)) { dev_err(dev, "Failed to map MSI data\n"); - __free_page(page); + __free_page(pp->msi_page); + pp->msi_page = NULL; return; } msi_target = (u64)pp->msi_data; @@ -346,7 +358,7 @@ int dw_pcie_host_init(struct pcie_port *pp) dev_err(dev, "Missing *config* reg space\n"); } - bridge = pci_alloc_host_bridge(0); + bridge = devm_pci_alloc_host_bridge(dev, 0); if (!bridge) return -ENOMEM; @@ -357,7 +369,7 @@ int dw_pcie_host_init(struct pcie_port *pp) ret = devm_request_pci_bus_resources(dev, &bridge->windows); if (ret) - goto error; + return ret; /* Get the I/O and memory ranges from DT */ resource_list_for_each_entry_safe(win, tmp, &bridge->windows) { @@ -401,8 +413,7 @@ int dw_pcie_host_init(struct pcie_port *pp) resource_size(pp->cfg)); if (!pci->dbi_base) { dev_err(dev, "Error with ioremap\n"); - ret = -ENOMEM; - goto error; + return -ENOMEM; } } @@ -413,8 +424,7 @@ int dw_pcie_host_init(struct pcie_port *pp) pp->cfg0_base, pp->cfg0_size); if (!pp->va_cfg0_base) { dev_err(dev, "Error with ioremap in function\n"); - ret = -ENOMEM; - goto error; + return -ENOMEM; } } @@ -424,8 +434,7 @@ int dw_pcie_host_init(struct pcie_port *pp) pp->cfg1_size); if (!pp->va_cfg1_base) { dev_err(dev, "Error with ioremap\n"); - ret = -ENOMEM; - goto error; + return -ENOMEM; } } @@ -433,7 +442,7 @@ int dw_pcie_host_init(struct pcie_port *pp) if (ret) pci->num_viewport = 2; - if (IS_ENABLED(CONFIG_PCI_MSI)) { + if (pci_msi_enabled()) { /* * If a specific SoC driver needs to change the * default number of vectors, it needs to implement @@ -448,14 +457,14 @@ int dw_pcie_host_init(struct pcie_port *pp) pp->num_vectors == 0) { dev_err(dev, "Invalid number of vectors\n"); - goto error; + return -EINVAL; } } if (!pp->ops->msi_host_init) { ret = dw_pcie_allocate_domains(pp); if (ret) - goto error; + return ret; if (pp->msi_irq) irq_set_chained_handler_and_data(pp->msi_irq, @@ -464,14 +473,14 @@ int dw_pcie_host_init(struct pcie_port *pp) } else { ret = pp->ops->msi_host_init(pp); if (ret < 0) - goto error; + return ret; } } if (pp->ops->host_init) { ret = pp->ops->host_init(pp); if (ret) - goto error; + goto err_free_msi; } pp->root_bus_nr = pp->busn->start; @@ -485,7 +494,7 @@ int dw_pcie_host_init(struct pcie_port *pp) ret = pci_scan_root_bus_bridge(bridge); if (ret) - goto error; + goto err_free_msi; bus = bridge->bus; @@ -501,8 +510,9 @@ int dw_pcie_host_init(struct pcie_port *pp) pci_bus_add_devices(bus); return 0; -error: - pci_free_host_bridge(bridge); +err_free_msi: + if (pci_msi_enabled() && !pp->ops->msi_host_init) + dw_pcie_free_msi(pp); return ret; } @@ -658,10 +668,15 @@ void dw_pcie_setup_rc(struct pcie_port *pp) num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; /* Initialize IRQ Status array */ - for (ctrl = 0; ctrl < num_ctrls; ctrl++) - dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + + for (ctrl = 0; ctrl < num_ctrls; ctrl++) { + dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + + (ctrl * MSI_REG_CTRL_BLOCK_SIZE), + 4, ~0); + dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + (ctrl * MSI_REG_CTRL_BLOCK_SIZE), - 4, &pp->irq_status[ctrl]); + 4, ~0); + pp->irq_status[ctrl] = 0; + } /* Setup RC BARs */ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004); diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index 9f1a5e399b7033eba9918a7981bdfdb0ea957a7a..14dcf66466996cc82aab5e841c20b5248a251e37 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -164,6 +164,7 @@ struct pcie_port { struct irq_domain *irq_domain; struct irq_domain *msi_domain; dma_addr_t msi_data; + struct page *msi_page; u32 num_vectors; u32 irq_status[MAX_MSI_CTRLS]; raw_spinlock_t lock; diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c index 7b32e619b959c8697eb16287c9e7d547581d97fd..a3489839a8fc3a06a340c03ed4b3c26f7af902fb 100644 --- a/drivers/pci/controller/dwc/pcie-histb.c +++ b/drivers/pci/controller/dwc/pcie-histb.c @@ -340,8 +340,8 @@ static int histb_pcie_probe(struct platform_device *pdev) hipcie->vpcie = devm_regulator_get_optional(dev, "vpcie"); if (IS_ERR(hipcie->vpcie)) { - if (PTR_ERR(hipcie->vpcie) == -EPROBE_DEFER) - return -EPROBE_DEFER; + if (PTR_ERR(hipcie->vpcie) != -ENODEV) + return PTR_ERR(hipcie->vpcie); hipcie->vpcie = NULL; } diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c index 5352e0c3be8259e4237e3d6c12c604f1b61746ff..9b599296205dcc5333a23577ee6215bc2ab4d430 100644 --- a/drivers/pci/controller/dwc/pcie-kirin.c +++ b/drivers/pci/controller/dwc/pcie-kirin.c @@ -467,8 +467,8 @@ static int kirin_pcie_add_msi(struct dw_pcie *pci, return 0; } -static int __init kirin_add_pcie_port(struct dw_pcie *pci, - struct platform_device *pdev) +static int kirin_add_pcie_port(struct dw_pcie *pci, + struct platform_device *pdev) { int ret; diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index 4352c1cb926d587532fd57d644b911b739026069..e292801fff7fd8ed4e434958cc4c5363457d19dc 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -178,6 +178,8 @@ static void qcom_ep_reset_assert(struct qcom_pcie *pcie) static void qcom_ep_reset_deassert(struct qcom_pcie *pcie) { + /* Ensure that PERST has been asserted for at least 100 ms */ + msleep(100); gpiod_set_value_cansleep(pcie->reset, 0); usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); } @@ -1089,7 +1091,6 @@ static int qcom_pcie_host_init(struct pcie_port *pp) struct qcom_pcie *pcie = to_qcom_pcie(pci); int ret; - pm_runtime_get_sync(pci->dev); qcom_ep_reset_assert(pcie); ret = pcie->ops->init(pcie); @@ -1126,7 +1127,6 @@ static int qcom_pcie_host_init(struct pcie_port *pp) phy_power_off(pcie->phy); err_deinit: pcie->ops->deinit(pcie); - pm_runtime_put(pci->dev); return ret; } @@ -1216,6 +1216,12 @@ static int qcom_pcie_probe(struct platform_device *pdev) return -ENOMEM; pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + pm_runtime_disable(dev); + return ret; + } + pci->dev = dev; pci->ops = &dw_pcie_ops; pp = &pci->pp; @@ -1224,45 +1230,57 @@ static int qcom_pcie_probe(struct platform_device *pdev) pcie->ops = of_device_get_match_data(dev); - pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW); - if (IS_ERR(pcie->reset)) - return PTR_ERR(pcie->reset); + pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH); + if (IS_ERR(pcie->reset)) { + ret = PTR_ERR(pcie->reset); + goto err_pm_runtime_put; + } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf"); pcie->parf = devm_ioremap_resource(dev, res); - if (IS_ERR(pcie->parf)) - return PTR_ERR(pcie->parf); + if (IS_ERR(pcie->parf)) { + ret = PTR_ERR(pcie->parf); + goto err_pm_runtime_put; + } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); pci->dbi_base = devm_pci_remap_cfg_resource(dev, res); - if (IS_ERR(pci->dbi_base)) - return PTR_ERR(pci->dbi_base); + if (IS_ERR(pci->dbi_base)) { + ret = PTR_ERR(pci->dbi_base); + goto err_pm_runtime_put; + } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi"); pcie->elbi = devm_ioremap_resource(dev, res); - if (IS_ERR(pcie->elbi)) - return PTR_ERR(pcie->elbi); + if (IS_ERR(pcie->elbi)) { + ret = PTR_ERR(pcie->elbi); + goto err_pm_runtime_put; + } pcie->phy = devm_phy_optional_get(dev, "pciephy"); - if (IS_ERR(pcie->phy)) - return PTR_ERR(pcie->phy); + if (IS_ERR(pcie->phy)) { + ret = PTR_ERR(pcie->phy); + goto err_pm_runtime_put; + } ret = pcie->ops->get_resources(pcie); if (ret) - return ret; + goto err_pm_runtime_put; pp->ops = &qcom_pcie_dw_ops; if (IS_ENABLED(CONFIG_PCI_MSI)) { pp->msi_irq = platform_get_irq_byname(pdev, "msi"); - if (pp->msi_irq < 0) - return pp->msi_irq; + if (pp->msi_irq < 0) { + ret = pp->msi_irq; + goto err_pm_runtime_put; + } } ret = phy_init(pcie->phy); if (ret) { pm_runtime_disable(&pdev->dev); - return ret; + goto err_pm_runtime_put; } platform_set_drvdata(pdev, pcie); @@ -1271,10 +1289,16 @@ static int qcom_pcie_probe(struct platform_device *pdev) if (ret) { dev_err(dev, "cannot initialize host\n"); pm_runtime_disable(&pdev->dev); - return ret; + goto err_pm_runtime_put; } return 0; + +err_pm_runtime_put: + pm_runtime_put(dev); + pm_runtime_disable(dev); + + return ret; } static const struct of_device_id qcom_pcie_match[] = { diff --git a/drivers/pci/controller/hisi-pcie-customer/Kconfig b/drivers/pci/controller/hisi-pcie-customer/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..4c0203dc508f8e20155cd410fbc3bedf46c05cc0 --- /dev/null +++ b/drivers/pci/controller/hisi-pcie-customer/Kconfig @@ -0,0 +1,3 @@ +config HISILICON_PCIE_CAE + tristate "hisi custom pcie driver for hiarmtool" + default m \ No newline at end of file diff --git a/drivers/pci/controller/hisi-pcie-customer/Makefile b/drivers/pci/controller/hisi-pcie-customer/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..9b2c7ef42babedb521e1026fa99eafd7d476ff42 --- /dev/null +++ b/drivers/pci/controller/hisi-pcie-customer/Makefile @@ -0,0 +1,2 @@ +pcie_cae-objs := hisi_pcie_cae.o +obj-$(CONFIG_HISILICON_PCIE_CAE) += pcie_cae.o \ No newline at end of file diff --git a/drivers/pci/controller/hisi-pcie-customer/hisi_pcie_cae.c b/drivers/pci/controller/hisi-pcie-customer/hisi_pcie_cae.c new file mode 100644 index 0000000000000000000000000000000000000000..fb7dbca1781ec38fbe07a18b28459a71d93eaeca --- /dev/null +++ b/drivers/pci/controller/hisi-pcie-customer/hisi_pcie_cae.c @@ -0,0 +1,227 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include +#include +#include +#include +#include +#include +#include + +#define CHIP_OFFSET 0x200000000000UL +#define APB_SUBCTRL_BASE 0x148070000UL +#define NVME_BAR_BASE 0x148800000UL +#define VIRTIO_BAR_BASE 0x148a00000UL +#define CHIP_MMAP_MASK 0xf +#define TYPE_MMAP_MASK 0xf0 +#define SYSCTRL_SC_ECO_RSV1 0x9401ff04 +#define PCIE_REG_SIZE 0X390000UL +#define NVME_BAR_SIZE 0x200000UL +#define VIRTIO_BAR_SIZE 0x200000UL +#define MAX_CHIP_NUM 4 +#define CHIP_INFO_REG_SIZE 4 +#define TYPE_SHIFT 4 +#define BIT_SHIFT_8 8 +#define PCIE_CMD_GET_CHIPNUMS 0x01 +#define HI1620_PCI_VENDOR_ID 0x19e5 +#define HI1620_PCI_DEVICE_ID 0xa120 +#define DEVICE_NAME "pcie_reg_dev" + +enum chip_type_t { + CHIP1620 = 0x13, + CHIP1620s = 0x12, + CHIP1601 = 0x10, + CHIPNONE = 0x0, +}; + +enum { + MMAP_TYPE_APB, + MMAP_TYPE_NVME, + MMAP_TYPE_VIRTIO +}; + +static u32 current_chip_nums; + +static const struct vm_operations_struct mmap_pcie_mem_ops = { +#ifdef CONFIG_HAVE_IOREMAP_PROT + .access = generic_access_phys +#endif +}; + +static int pcie_reg_mmap(struct file *filep, struct vm_area_struct *vma) +{ + u64 size = vma->vm_end - vma->vm_start; + u32 chip_id = (u32)vma->vm_pgoff & CHIP_MMAP_MASK; + u32 type = ((u32)vma->vm_pgoff & TYPE_MMAP_MASK) >> TYPE_SHIFT; + u64 phy_addr; + + if (chip_id >= current_chip_nums) { + pr_err("pcie_cae input chip_id %u is invalid\n", chip_id); + return -EINVAL; + } + + /* It's illegal to wrap around the end of the physical address space. */ + switch (type) { + case MMAP_TYPE_APB: + phy_addr = APB_SUBCTRL_BASE + CHIP_OFFSET * chip_id; + if (size > PCIE_REG_SIZE) { + pr_err("pcie_cae mmap_type_apb map size is invalid\n"); + return -EINVAL; + } + break; + case MMAP_TYPE_NVME: + phy_addr = NVME_BAR_BASE + CHIP_OFFSET * chip_id; + if (size > NVME_BAR_SIZE) { + pr_err("pcie_cae mmap_type_nvme map size is invalid\n"); + return -EINVAL; + } + break; + case MMAP_TYPE_VIRTIO: + phy_addr = VIRTIO_BAR_BASE + CHIP_OFFSET * chip_id; + if (size > VIRTIO_BAR_SIZE) { + pr_err("pcie_cae mmap_type_virtio map size is invalid\n"); + return -EINVAL; + } + break; + default: + pr_err("pcie_cae input addr type %u is invalid\n", type); + return -EINVAL; + } + vma->vm_pgoff = phy_addr >> PAGE_SHIFT; + vma->vm_page_prot = pgprot_device(vma->vm_page_prot); + vma->vm_ops = &mmap_pcie_mem_ops; + /* Remap-pfn-range will mark the range VM_IO */ + if (remap_pfn_range(vma, + vma->vm_start, + vma->vm_pgoff, + size, + vma->vm_page_prot)) { + pr_err("pcie_cae map pcie reg zone failed\n"); + return -EAGAIN; + } + + return 0; +} + +u32 pcie_get_chipnums(u32 cpu_info) +{ + int i; + u32 chip_count = 0; + u32 chip_i_info; + + for (i = 0; i < MAX_CHIP_NUM; i++) { + chip_i_info = ((cpu_info & (0xFF << (BIT_SHIFT_8 * i))) >> + (BIT_SHIFT_8 * i)); + if ((chip_i_info == CHIP1620) || + (chip_i_info == CHIP1620s) || + (chip_i_info == CHIP1601)) { + chip_count++; + } + } + + return chip_count; +} + +static int pcie_open(struct inode *inode, struct file *f) +{ + void __iomem *addr_base; + u32 val; + struct pci_dev *dev; + int type; + + dev = pci_get_device(HI1620_PCI_VENDOR_ID, HI1620_PCI_DEVICE_ID, NULL); + if (!dev) { + pr_err("pcie_cae can only work at Hi1620 series chip\n"); + return -EINVAL; + } + + type = pci_pcie_type(dev); + pr_info("pcie_cae detect chip PCIe Vendor ID:0x%x, Device ID:0x%x\n", + dev->vendor, dev->device); + pci_dev_put(dev); + + if (type != PCI_EXP_TYPE_ROOT_PORT) { + pr_err("pcie_cae can not support this chip\n"); + return -EINVAL; + } + + addr_base = ioremap_nocache(SYSCTRL_SC_ECO_RSV1, CHIP_INFO_REG_SIZE); + if (!addr_base) { + pr_err("pcie_cae map chip_info_reg zone failed\n"); + return -EPERM; + } + + val = readl(addr_base); + current_chip_nums = pcie_get_chipnums(val); + + iounmap(addr_base); + + return 0; +} + +static int pcie_release(struct inode *inode, struct file *f) +{ + return 0; +} + +static long pcie_reg_ioctl(struct file *pfile, unsigned int cmd, + unsigned long arg) +{ + int ret = 0; + + switch (cmd) { + case PCIE_CMD_GET_CHIPNUMS: + if ((void *)arg == NULL) { + pr_err("pcie_cae invalid arg address\n"); + ret = -EINVAL; + break; + } + + if (copy_to_user((void *)arg, (void *)¤t_chip_nums, + sizeof(int))) { + pr_err("pcie_cae copy chip_nums to usr failed\n"); + ret = -EINVAL; + } + break; + + default: + pr_err("pcie_cae invalid pcie ioctl cmd:%u\n", cmd); + ret = -EINVAL; + break; + } + + return ret; +} + +static const struct file_operations pcie_cae_fops = { + .owner = THIS_MODULE, + .open = pcie_open, + .release = pcie_release, + .llseek = noop_llseek, + .mmap = pcie_reg_mmap, + .unlocked_ioctl = pcie_reg_ioctl, +}; + +static struct miscdevice pcie_cae_misc = { + .minor = MISC_DYNAMIC_MINOR, + .fops = &pcie_cae_fops, + .name = DEVICE_NAME, +}; + +static int __init misc_dev_init(void) +{ + return misc_register(&pcie_cae_misc); +} + +static void __exit misc_dev_exit(void) +{ + (void)misc_deregister(&pcie_cae_misc); +} + +module_init(misc_dev_init); +module_exit(misc_dev_exit); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Huawei Technology Company"); +MODULE_DESCRIPTION("PCIe CAE Driver"); +MODULE_VERSION("V1.2"); diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index 6b4555ff254867eb3a7c50841bfe29d62c8f3df5..cc81417026cf4a3b079c6a6a5250dc9bb784827b 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c @@ -166,7 +166,8 @@ (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \ PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where)) -#define PIO_TIMEOUT_MS 1 +#define PIO_RETRY_CNT 750000 /* 1.5 s */ +#define PIO_RETRY_DELAY 2 /* 2 us*/ #define LINK_WAIT_MAX_RETRIES 10 #define LINK_WAIT_USLEEP_MIN 90000 @@ -373,20 +374,19 @@ static void advk_pcie_check_pio_status(struct advk_pcie *pcie) static int advk_pcie_wait_pio(struct advk_pcie *pcie) { struct device *dev = &pcie->pdev->dev; - unsigned long timeout; + int i; - timeout = jiffies + msecs_to_jiffies(PIO_TIMEOUT_MS); - - while (time_before(jiffies, timeout)) { + for (i = 0; i < PIO_RETRY_CNT; i++) { u32 start, isr; start = advk_readl(pcie, PIO_START); isr = advk_readl(pcie, PIO_ISR); if (!start && isr) return 0; + udelay(PIO_RETRY_DELAY); } - dev_err(dev, "config read/write timed out\n"); + dev_err(dev, "PIO read/write transfer time out\n"); return -ETIMEDOUT; } @@ -399,6 +399,35 @@ static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus, return true; } +static bool advk_pcie_pio_is_running(struct advk_pcie *pcie) +{ + struct device *dev = &pcie->pdev->dev; + + /* + * Trying to start a new PIO transfer when previous has not completed + * cause External Abort on CPU which results in kernel panic: + * + * SError Interrupt on CPU0, code 0xbf000002 -- SError + * Kernel panic - not syncing: Asynchronous SError Interrupt + * + * Functions advk_pcie_rd_conf() and advk_pcie_wr_conf() are protected + * by raw_spin_lock_irqsave() at pci_lock_config() level to prevent + * concurrent calls at the same time. But because PIO transfer may take + * about 1.5s when link is down or card is disconnected, it means that + * advk_pcie_wait_pio() does not always have to wait for completion. + * + * Some versions of ARM Trusted Firmware handles this External Abort at + * EL3 level and mask it to prevent kernel panic. Relevant TF-A commit: + * https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git/commit/?id=3c7dcdac5c50 + */ + if (advk_readl(pcie, PIO_START)) { + dev_err(dev, "Previous PIO read/write transfer is still running\n"); + return true; + } + + return false; +} + static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 *val) { @@ -411,9 +440,10 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, return PCIBIOS_DEVICE_NOT_FOUND; } - /* Start PIO */ - advk_writel(pcie, 0, PIO_START); - advk_writel(pcie, 1, PIO_ISR); + if (advk_pcie_pio_is_running(pcie)) { + *val = 0xffffffff; + return PCIBIOS_SET_FAILED; + } /* Program the control register */ reg = advk_readl(pcie, PIO_CTRL); @@ -432,7 +462,8 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, /* Program the data strobe */ advk_writel(pcie, 0xf, PIO_WR_DATA_STRB); - /* Start the transfer */ + /* Clear PIO DONE ISR and start the transfer */ + advk_writel(pcie, 1, PIO_ISR); advk_writel(pcie, 1, PIO_START); ret = advk_pcie_wait_pio(pcie); @@ -466,9 +497,8 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn, if (where % size) return PCIBIOS_SET_FAILED; - /* Start PIO */ - advk_writel(pcie, 0, PIO_START); - advk_writel(pcie, 1, PIO_ISR); + if (advk_pcie_pio_is_running(pcie)) + return PCIBIOS_SET_FAILED; /* Program the control register */ reg = advk_readl(pcie, PIO_CTRL); @@ -495,7 +525,8 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn, /* Program the data strobe */ advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB); - /* Start the transfer */ + /* Clear PIO DONE ISR and start the transfer */ + advk_writel(pcie, 1, PIO_ISR); advk_writel(pcie, 1, PIO_START); ret = advk_pcie_wait_pio(pcie); diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index 9ba4d12c179c7551d8a10b59433a33ed5be044cd..2b53976cd9f93736c1d9d2eeb37ae233eddc2386 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -391,14 +391,6 @@ struct hv_interrupt_entry { u32 data; }; -#define HV_VP_SET_BANK_COUNT_MAX 5 /* current implementation limit */ - -struct hv_vp_set { - u64 format; /* 0 (HvGenericSetSparse4k) */ - u64 valid_banks; - u64 masks[HV_VP_SET_BANK_COUNT_MAX]; -}; - /* * flags for hv_device_interrupt_target.flags */ @@ -410,7 +402,7 @@ struct hv_device_interrupt_target { u32 flags; union { u64 vp_mask; - struct hv_vp_set vp_set; + struct hv_vpset vp_set; }; }; @@ -420,7 +412,7 @@ struct retarget_msi_interrupt { struct hv_interrupt_entry int_entry; u64 reserved2; struct hv_device_interrupt_target int_target; -} __packed; +} __packed __aligned(8); /* * Driver specific state. @@ -460,12 +452,16 @@ struct hv_pcibus_device { struct msi_controller msi_chip; struct irq_domain *irq_domain; - /* hypercall arg, must not cross page boundary */ - struct retarget_msi_interrupt retarget_msi_interrupt_params; - spinlock_t retarget_msi_interrupt_lock; struct workqueue_struct *wq; + + /* hypercall arg, must not cross page boundary */ + struct retarget_msi_interrupt retarget_msi_interrupt_params; + + /* + * Don't put anything here: retarget_msi_interrupt_params must be last + */ }; /* @@ -910,12 +906,12 @@ static void hv_irq_unmask(struct irq_data *data) struct retarget_msi_interrupt *params; struct hv_pcibus_device *hbus; struct cpumask *dest; + cpumask_var_t tmp; struct pci_bus *pbus; struct pci_dev *pdev; unsigned long flags; u32 var_size = 0; - int cpu_vmbus; - int cpu; + int cpu, nr_bank; u64 res; dest = irq_data_get_effective_affinity_mask(data); @@ -955,28 +951,27 @@ static void hv_irq_unmask(struct irq_data *data) */ params->int_target.flags |= HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET; - params->int_target.vp_set.valid_banks = - (1ull << HV_VP_SET_BANK_COUNT_MAX) - 1; - - /* - * var-sized hypercall, var-size starts after vp_mask (thus - * vp_set.format does not count, but vp_set.valid_banks does). - */ - var_size = 1 + HV_VP_SET_BANK_COUNT_MAX; - for_each_cpu_and(cpu, dest, cpu_online_mask) { - cpu_vmbus = hv_cpu_number_to_vp_number(cpu); + if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) { + res = 1; + goto exit_unlock; + } - if (cpu_vmbus >= HV_VP_SET_BANK_COUNT_MAX * 64) { - dev_err(&hbus->hdev->device, - "too high CPU %d", cpu_vmbus); - res = 1; - goto exit_unlock; - } + cpumask_and(tmp, dest, cpu_online_mask); + nr_bank = cpumask_to_vpset(¶ms->int_target.vp_set, tmp); + free_cpumask_var(tmp); - params->int_target.vp_set.masks[cpu_vmbus / 64] |= - (1ULL << (cpu_vmbus & 63)); + if (nr_bank <= 0) { + res = 1; + goto exit_unlock; } + + /* + * var-sized hypercall, var-size starts after vp_mask (thus + * vp_set.format does not count, but vp_set.valid_bank_mask + * does). + */ + var_size = 1 + nr_bank; } else { for_each_cpu_and(cpu, dest, cpu_online_mask) { params->int_target.vp_mask |= @@ -1491,6 +1486,21 @@ static void hv_pci_assign_slots(struct hv_pcibus_device *hbus) } } +/* + * Remove entries in sysfs pci slot directory. + */ +static void hv_pci_remove_slots(struct hv_pcibus_device *hbus) +{ + struct hv_pci_dev *hpdev; + + list_for_each_entry(hpdev, &hbus->children, list_entry) { + if (!hpdev->pci_slot) + continue; + pci_destroy_slot(hpdev->pci_slot); + hpdev->pci_slot = NULL; + } +} + /** * create_root_hv_pci_bus() - Expose a new root PCI bus * @hbus: Root PCI bus, as understood by this driver @@ -1766,6 +1776,10 @@ static void pci_devices_present_work(struct work_struct *work) hpdev = list_first_entry(&removed, struct hv_pci_dev, list_entry); list_del(&hpdev->list_entry); + + if (hpdev->pci_slot) + pci_destroy_slot(hpdev->pci_slot); + put_pcichild(hpdev); } @@ -1861,6 +1875,7 @@ static void hv_pci_devices_present(struct hv_pcibus_device *hbus, static void hv_eject_device_work(struct work_struct *work) { struct pci_eject_response *ejct_pkt; + struct hv_pcibus_device *hbus; struct hv_pci_dev *hpdev; struct pci_dev *pdev; unsigned long flags; @@ -1871,6 +1886,7 @@ static void hv_eject_device_work(struct work_struct *work) } ctxt; hpdev = container_of(work, struct hv_pci_dev, wrk); + hbus = hpdev->hbus; WARN_ON(hpdev->state != hv_pcichild_ejecting); @@ -1881,8 +1897,7 @@ static void hv_eject_device_work(struct work_struct *work) * because hbus->pci_bus may not exist yet. */ wslot = wslot_to_devfn(hpdev->desc.win_slot.slot); - pdev = pci_get_domain_bus_and_slot(hpdev->hbus->sysdata.domain, 0, - wslot); + pdev = pci_get_domain_bus_and_slot(hbus->sysdata.domain, 0, wslot); if (pdev) { pci_lock_rescan_remove(); pci_stop_and_remove_bus_device(pdev); @@ -1890,9 +1905,9 @@ static void hv_eject_device_work(struct work_struct *work) pci_unlock_rescan_remove(); } - spin_lock_irqsave(&hpdev->hbus->device_list_lock, flags); + spin_lock_irqsave(&hbus->device_list_lock, flags); list_del(&hpdev->list_entry); - spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags); + spin_unlock_irqrestore(&hbus->device_list_lock, flags); if (hpdev->pci_slot) pci_destroy_slot(hpdev->pci_slot); @@ -1901,13 +1916,18 @@ static void hv_eject_device_work(struct work_struct *work) ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message; ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE; ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot; - vmbus_sendpacket(hpdev->hbus->hdev->channel, ejct_pkt, + vmbus_sendpacket(hbus->hdev->channel, ejct_pkt, sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt, VM_PKT_DATA_INBAND, 0); + /* For the get_pcichild() in hv_pci_eject_device() */ + put_pcichild(hpdev); + /* For the two refs got in new_pcichild_device() */ put_pcichild(hpdev); put_pcichild(hpdev); - put_hvpcibus(hpdev->hbus); + /* hpdev has been freed. Do not use it any more. */ + + put_hvpcibus(hbus); } /** @@ -2681,6 +2701,7 @@ static int hv_pci_remove(struct hv_device *hdev) /* Remove the bus from PCI's point of view. */ pci_lock_rescan_remove(); pci_stop_root_bus(hbus->pci_bus); + hv_pci_remove_slots(hbus); pci_remove_root_bus(hbus->pci_bus); pci_unlock_rescan_remove(); hbus->state = hv_pcibus_removed; diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c index f4f53d092e00526cd4dec08e575334fd9f400c1e..58e48735285337292d9ae0c1b78f5654ed6ed6b5 100644 --- a/drivers/pci/controller/pci-tegra.c +++ b/drivers/pci/controller/pci-tegra.c @@ -545,12 +545,15 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class); -/* Tegra PCIE requires relaxed ordering */ +/* Tegra20 and Tegra30 PCIE requires relaxed ordering */ static void tegra_pcie_relax_enable(struct pci_dev *dev) { pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN); } -DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_relax_enable); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_relax_enable); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_relax_enable); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_relax_enable); static int tegra_pcie_request_resources(struct tegra_pcie *pcie) { @@ -1975,14 +1978,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) err = of_pci_get_devfn(port); if (err < 0) { dev_err(dev, "failed to parse address: %d\n", err); - return err; + goto err_node_put; } index = PCI_SLOT(err); if (index < 1 || index > soc->num_ports) { dev_err(dev, "invalid port number: %d\n", index); - return -EINVAL; + err = -EINVAL; + goto err_node_put; } index--; @@ -1991,12 +1995,13 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) if (err < 0) { dev_err(dev, "failed to parse # of lanes: %d\n", err); - return err; + goto err_node_put; } if (value > 16) { dev_err(dev, "invalid # of lanes: %u\n", value); - return -EINVAL; + err = -EINVAL; + goto err_node_put; } lanes |= value << (index << 3); @@ -2010,13 +2015,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) lane += value; rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL); - if (!rp) - return -ENOMEM; + if (!rp) { + err = -ENOMEM; + goto err_node_put; + } err = of_address_to_resource(port, 0, &rp->regs); if (err < 0) { dev_err(dev, "failed to parse address: %d\n", err); - return err; + goto err_node_put; } INIT_LIST_HEAD(&rp->list); @@ -2043,6 +2050,10 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) return err; return 0; + +err_node_put: + of_node_put(port); + return err; } /* diff --git a/drivers/pci/controller/pcie-cadence-ep.c b/drivers/pci/controller/pcie-cadence-ep.c index 9e87dd7f9ac383ef8291238e0737c0b80db411c2..c3a088910f48d119f67918066be0be39302ed04f 100644 --- a/drivers/pci/controller/pcie-cadence-ep.c +++ b/drivers/pci/controller/pcie-cadence-ep.c @@ -258,7 +258,6 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx, bool is_asserted) { struct cdns_pcie *pcie = &ep->pcie; - u32 r = ep->max_regions - 1; u32 offset; u16 status; u8 msg_code; @@ -268,8 +267,8 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, /* Set the outbound region if needed. */ if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY || ep->irq_pci_fn != fn)) { - /* Last region was reserved for IRQ writes. */ - cdns_pcie_set_outbound_region_for_normal_msg(pcie, fn, r, + /* First region was reserved for IRQ writes. */ + cdns_pcie_set_outbound_region_for_normal_msg(pcie, fn, 0, ep->irq_phys_addr); ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY; ep->irq_pci_fn = fn; @@ -347,8 +346,8 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, /* Set the outbound region if needed. */ if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) || ep->irq_pci_fn != fn)) { - /* Last region was reserved for IRQ writes. */ - cdns_pcie_set_outbound_region(pcie, fn, ep->max_regions - 1, + /* First region was reserved for IRQ writes. */ + cdns_pcie_set_outbound_region(pcie, fn, 0, false, ep->irq_phys_addr, pci_addr & ~pci_addr_mask, @@ -356,7 +355,7 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, ep->irq_pci_addr = (pci_addr & ~pci_addr_mask); ep->irq_pci_fn = fn; } - writew(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask)); + writel(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask)); return 0; } @@ -517,6 +516,8 @@ static int cdns_pcie_ep_probe(struct platform_device *pdev) goto free_epc_mem; } ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE; + /* Reserve region 0 for IRQs */ + set_bit(0, &ep->ob_region_map); return 0; diff --git a/drivers/pci/controller/pcie-cadence.c b/drivers/pci/controller/pcie-cadence.c index 975bcdd6b5c0a73e0c75a947574c0b6b9b774e10..cd795f6fc1e231521a3dca7e0da8443839ff39bc 100644 --- a/drivers/pci/controller/pcie-cadence.c +++ b/drivers/pci/controller/pcie-cadence.c @@ -190,14 +190,16 @@ int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie) for (i = 0; i < phy_count; i++) { of_property_read_string_index(np, "phy-names", i, &name); - phy[i] = devm_phy_optional_get(dev, name); - if (IS_ERR(phy)) - return PTR_ERR(phy); - + phy[i] = devm_phy_get(dev, name); + if (IS_ERR(phy[i])) { + ret = PTR_ERR(phy[i]); + goto err_phy; + } link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS); if (!link[i]) { + devm_phy_put(dev, phy[i]); ret = -EINVAL; - goto err_link; + goto err_phy; } } @@ -207,13 +209,15 @@ int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie) ret = cdns_pcie_enable_phy(pcie); if (ret) - goto err_link; + goto err_phy; return 0; -err_link: - while (--i >= 0) +err_phy: + while (--i >= 0) { device_link_del(link[i]); + devm_phy_put(dev, phy[i]); + } return ret; } diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c index 3160e9342a2fb424ded9edea1c43bee4782eb54f..9191fdd8ce0c76c17290896c8a494c2e03711ab6 100644 --- a/drivers/pci/controller/pcie-iproc.c +++ b/drivers/pci/controller/pcie-iproc.c @@ -300,7 +300,7 @@ enum iproc_pcie_reg { }; /* iProc PCIe PAXB BCMA registers */ -static const u16 iproc_pcie_reg_paxb_bcma[] = { +static const u16 iproc_pcie_reg_paxb_bcma[IPROC_PCIE_MAX_NUM_REG] = { [IPROC_PCIE_CLK_CTRL] = 0x000, [IPROC_PCIE_CFG_IND_ADDR] = 0x120, [IPROC_PCIE_CFG_IND_DATA] = 0x124, @@ -311,7 +311,7 @@ static const u16 iproc_pcie_reg_paxb_bcma[] = { }; /* iProc PCIe PAXB registers */ -static const u16 iproc_pcie_reg_paxb[] = { +static const u16 iproc_pcie_reg_paxb[IPROC_PCIE_MAX_NUM_REG] = { [IPROC_PCIE_CLK_CTRL] = 0x000, [IPROC_PCIE_CFG_IND_ADDR] = 0x120, [IPROC_PCIE_CFG_IND_DATA] = 0x124, @@ -327,7 +327,7 @@ static const u16 iproc_pcie_reg_paxb[] = { }; /* iProc PCIe PAXB v2 registers */ -static const u16 iproc_pcie_reg_paxb_v2[] = { +static const u16 iproc_pcie_reg_paxb_v2[IPROC_PCIE_MAX_NUM_REG] = { [IPROC_PCIE_CLK_CTRL] = 0x000, [IPROC_PCIE_CFG_IND_ADDR] = 0x120, [IPROC_PCIE_CFG_IND_DATA] = 0x124, @@ -355,7 +355,7 @@ static const u16 iproc_pcie_reg_paxb_v2[] = { }; /* iProc PCIe PAXC v1 registers */ -static const u16 iproc_pcie_reg_paxc[] = { +static const u16 iproc_pcie_reg_paxc[IPROC_PCIE_MAX_NUM_REG] = { [IPROC_PCIE_CLK_CTRL] = 0x000, [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0, [IPROC_PCIE_CFG_IND_DATA] = 0x1f4, @@ -364,7 +364,7 @@ static const u16 iproc_pcie_reg_paxc[] = { }; /* iProc PCIe PAXC v2 registers */ -static const u16 iproc_pcie_reg_paxc_v2[] = { +static const u16 iproc_pcie_reg_paxc_v2[IPROC_PCIE_MAX_NUM_REG] = { [IPROC_PCIE_MSI_GIC_MODE] = 0x050, [IPROC_PCIE_MSI_BASE_ADDR] = 0x074, [IPROC_PCIE_MSI_WINDOW_SIZE] = 0x078, diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c index 861dda69f3669970163d81bff102d849c2bd191f..1bfbceb9f445867c18fcc896538340fcb1980b29 100644 --- a/drivers/pci/controller/pcie-mediatek.c +++ b/drivers/pci/controller/pcie-mediatek.c @@ -337,6 +337,17 @@ static struct mtk_pcie_port *mtk_pcie_find_port(struct pci_bus *bus, { struct mtk_pcie *pcie = bus->sysdata; struct mtk_pcie_port *port; + struct pci_dev *dev = NULL; + + /* + * Walk the bus hierarchy to get the devfn value + * of the port in the root bus. + */ + while (bus && bus->number) { + dev = bus->self; + bus = dev->bus; + devfn = dev->devfn; + } list_for_each_entry(port, &pcie->ports, list) if (port->slot == PCI_SLOT(devfn)) @@ -383,75 +394,6 @@ static struct pci_ops mtk_pcie_ops_v2 = { .write = mtk_pcie_config_write, }; -static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port) -{ - struct mtk_pcie *pcie = port->pcie; - struct resource *mem = &pcie->mem; - const struct mtk_pcie_soc *soc = port->pcie->soc; - u32 val; - size_t size; - int err; - - /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */ - if (pcie->base) { - val = readl(pcie->base + PCIE_SYS_CFG_V2); - val |= PCIE_CSR_LTSSM_EN(port->slot) | - PCIE_CSR_ASPM_L1_EN(port->slot); - writel(val, pcie->base + PCIE_SYS_CFG_V2); - } - - /* Assert all reset signals */ - writel(0, port->base + PCIE_RST_CTRL); - - /* - * Enable PCIe link down reset, if link status changed from link up to - * link down, this will reset MAC control registers and configuration - * space. - */ - writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL); - - /* De-assert PHY, PE, PIPE, MAC and configuration reset */ - val = readl(port->base + PCIE_RST_CTRL); - val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB | - PCIE_MAC_SRSTB | PCIE_CRSTB; - writel(val, port->base + PCIE_RST_CTRL); - - /* Set up vendor ID and class code */ - if (soc->need_fix_class_id) { - val = PCI_VENDOR_ID_MEDIATEK; - writew(val, port->base + PCIE_CONF_VEND_ID); - - val = PCI_CLASS_BRIDGE_HOST; - writew(val, port->base + PCIE_CONF_CLASS_ID); - } - - /* 100ms timeout value should be enough for Gen1/2 training */ - err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val, - !!(val & PCIE_PORT_LINKUP_V2), 20, - 100 * USEC_PER_MSEC); - if (err) - return -ETIMEDOUT; - - /* Set INTx mask */ - val = readl(port->base + PCIE_INT_MASK); - val &= ~INTX_MASK; - writel(val, port->base + PCIE_INT_MASK); - - /* Set AHB to PCIe translation windows */ - size = mem->end - mem->start; - val = lower_32_bits(mem->start) | AHB2PCIE_SIZE(fls(size)); - writel(val, port->base + PCIE_AHB_TRANS_BASE0_L); - - val = upper_32_bits(mem->start); - writel(val, port->base + PCIE_AHB_TRANS_BASE0_H); - - /* Set PCIe to AXI translation memory space.*/ - val = fls(0xffffffff) | WIN_ENABLE; - writel(val, port->base + PCIE_AXI_WINDOW0); - - return 0; -} - static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data); @@ -628,8 +570,6 @@ static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port, ret = mtk_pcie_allocate_msi_domains(port); if (ret) return ret; - - mtk_pcie_enable_msi(port); } return 0; @@ -696,6 +636,78 @@ static int mtk_pcie_setup_irq(struct mtk_pcie_port *port, return 0; } +static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port) +{ + struct mtk_pcie *pcie = port->pcie; + struct resource *mem = &pcie->mem; + const struct mtk_pcie_soc *soc = port->pcie->soc; + u32 val; + size_t size; + int err; + + /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */ + if (pcie->base) { + val = readl(pcie->base + PCIE_SYS_CFG_V2); + val |= PCIE_CSR_LTSSM_EN(port->slot) | + PCIE_CSR_ASPM_L1_EN(port->slot); + writel(val, pcie->base + PCIE_SYS_CFG_V2); + } + + /* Assert all reset signals */ + writel(0, port->base + PCIE_RST_CTRL); + + /* + * Enable PCIe link down reset, if link status changed from link up to + * link down, this will reset MAC control registers and configuration + * space. + */ + writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL); + + /* De-assert PHY, PE, PIPE, MAC and configuration reset */ + val = readl(port->base + PCIE_RST_CTRL); + val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB | + PCIE_MAC_SRSTB | PCIE_CRSTB; + writel(val, port->base + PCIE_RST_CTRL); + + /* Set up vendor ID and class code */ + if (soc->need_fix_class_id) { + val = PCI_VENDOR_ID_MEDIATEK; + writew(val, port->base + PCIE_CONF_VEND_ID); + + val = PCI_CLASS_BRIDGE_PCI; + writew(val, port->base + PCIE_CONF_CLASS_ID); + } + + /* 100ms timeout value should be enough for Gen1/2 training */ + err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val, + !!(val & PCIE_PORT_LINKUP_V2), 20, + 100 * USEC_PER_MSEC); + if (err) + return -ETIMEDOUT; + + /* Set INTx mask */ + val = readl(port->base + PCIE_INT_MASK); + val &= ~INTX_MASK; + writel(val, port->base + PCIE_INT_MASK); + + if (IS_ENABLED(CONFIG_PCI_MSI)) + mtk_pcie_enable_msi(port); + + /* Set AHB to PCIe translation windows */ + size = mem->end - mem->start; + val = lower_32_bits(mem->start) | AHB2PCIE_SIZE(fls(size)); + writel(val, port->base + PCIE_AHB_TRANS_BASE0_L); + + val = upper_32_bits(mem->start); + writel(val, port->base + PCIE_AHB_TRANS_BASE0_H); + + /* Set PCIe to AXI translation memory space.*/ + val = fls(0xffffffff) | WIN_ENABLE; + writel(val, port->base + PCIE_AXI_WINDOW0); + + return 0; +} + static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, int where) { @@ -1109,7 +1121,9 @@ static int mtk_pcie_request_resources(struct mtk_pcie *pcie) if (err < 0) return err; - devm_pci_remap_iospace(dev, &pcie->pio, pcie->io.start); + err = devm_pci_remap_iospace(dev, &pcie->pio, pcie->io.start); + if (err) + return err; return 0; } diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/pcie-mobiveil.c index a939e8d31735a7bc0cb56b3fdc231dae50751ab0..a2d1e89d48674842e8e0afb0b711ec6370c602fb 100644 --- a/drivers/pci/controller/pcie-mobiveil.c +++ b/drivers/pci/controller/pcie-mobiveil.c @@ -508,6 +508,12 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie) return err; } + /* setup bus numbers */ + value = csr_readl(pcie, PCI_PRIMARY_BUS); + value &= 0xff000000; + value |= 0x00ff0100; + csr_writel(pcie, value, PCI_PRIMARY_BUS); + /* * program Bus Master Enable Bit in Command Register in PAB Config * Space @@ -547,7 +553,7 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie) resource_size(pcie->ob_io_res)); /* memory inbound translation window */ - program_ib_windows(pcie, WIN_NUM_1, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE); + program_ib_windows(pcie, WIN_NUM_0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE); /* Get the I/O and memory ranges from DT */ resource_list_for_each_entry_safe(win, tmp, &pcie->resources) { @@ -559,11 +565,18 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie) if (type) { /* configure outbound translation window */ program_ob_windows(pcie, pcie->ob_wins_configured, - win->res->start, 0, type, - resource_size(win->res)); + win->res->start, + win->res->start - win->offset, + type, resource_size(win->res)); } } + /* fixup for PCIe class register */ + value = csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS); + value &= 0xff; + value |= (PCI_CLASS_BRIDGE_PCI << 16); + csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS); + /* setup MSI hardware registers */ mobiveil_pcie_enable_msi(pcie); @@ -804,9 +817,6 @@ static int mobiveil_pcie_probe(struct platform_device *pdev) goto error; } - /* fixup for PCIe class register */ - csr_writel(pcie, 0x060402ab, PAB_INTP_AXI_PIO_CLASS); - /* initialize the IRQ domains */ ret = mobiveil_pcie_init_irq_domain(pcie); if (ret) { diff --git a/drivers/pci/controller/pcie-rcar.c b/drivers/pci/controller/pcie-rcar.c index c8febb009454cdcfc0ed473f4cdb9540af0de59e..333ab6092f174d8463b54b169a5a5c834c49a05c 100644 --- a/drivers/pci/controller/pcie-rcar.c +++ b/drivers/pci/controller/pcie-rcar.c @@ -46,6 +46,7 @@ /* Transfer control */ #define PCIETCTLR 0x02000 +#define DL_DOWN BIT(3) #define CFINIT 1 #define PCIETSTR 0x02004 #define DATA_LINK_ACTIVE 1 @@ -92,8 +93,12 @@ #define LINK_SPEED_2_5GTS (1 << 16) #define LINK_SPEED_5_0GTS (2 << 16) #define MACCTLR 0x011058 +#define MACCTLR_NFTS_MASK GENMASK(23, 16) /* The name is from SH7786 */ #define SPEED_CHANGE BIT(24) #define SCRAMBLE_DISABLE BIT(27) +#define LTSMDIS BIT(31) +#define MACCTLR_INIT_VAL (LTSMDIS | MACCTLR_NFTS_MASK) +#define PMSR 0x01105c #define MACS2R 0x011078 #define MACCGSPSETR 0x011084 #define SPCNGRSN BIT(31) @@ -613,6 +618,8 @@ static int rcar_pcie_hw_init(struct rcar_pcie *pcie) if (IS_ENABLED(CONFIG_PCI_MSI)) rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR); + rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR); + /* Finish initialization - establish a PCI Express link */ rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR); @@ -890,7 +897,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie) { struct device *dev = pcie->dev; struct rcar_msi *msi = &pcie->msi; - unsigned long base; + phys_addr_t base; int err, i; mutex_init(&msi->lock); @@ -929,10 +936,14 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie) /* setup MSI data target */ msi->pages = __get_free_pages(GFP_KERNEL, 0); + if (!msi->pages) { + err = -ENOMEM; + goto err; + } base = virt_to_phys((void *)msi->pages); - rcar_pci_write_reg(pcie, base | MSIFE, PCIEMSIALR); - rcar_pci_write_reg(pcie, 0, PCIEMSIAUR); + rcar_pci_write_reg(pcie, lower_32_bits(base) | MSIFE, PCIEMSIALR); + rcar_pci_write_reg(pcie, upper_32_bits(base), PCIEMSIAUR); /* enable all MSI interrupts */ rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER); @@ -1130,6 +1141,7 @@ static int rcar_pcie_probe(struct platform_device *pdev) pcie = pci_host_bridge_priv(bridge); pcie->dev = dev; + platform_set_drvdata(pdev, pcie); err = pci_parse_request_of_pci_ranges(dev, &pcie->resources, NULL); if (err) @@ -1221,10 +1233,29 @@ static int rcar_pcie_probe(struct platform_device *pdev) return err; } +static int rcar_pcie_resume_noirq(struct device *dev) +{ + struct rcar_pcie *pcie = dev_get_drvdata(dev); + + if (rcar_pci_read_reg(pcie, PMSR) && + !(rcar_pci_read_reg(pcie, PCIETCTLR) & DL_DOWN)) + return 0; + + /* Re-establish the PCIe link */ + rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR); + rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR); + return rcar_pcie_wait_for_dl(pcie); +} + +static const struct dev_pm_ops rcar_pcie_pm_ops = { + .resume_noirq = rcar_pcie_resume_noirq, +}; + static struct platform_driver rcar_pcie_driver = { .driver = { .name = "rcar-pcie", .of_match_table = rcar_pcie_of_match, + .pm = &rcar_pcie_pm_ops, .suppress_bind_attrs = true, }, .probe = rcar_pcie_probe, diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c index 1372d270764f9e8c863007260191a27ec65b39d7..5ce8e6375687572a3f981d974ab893abe0040353 100644 --- a/drivers/pci/controller/pcie-rockchip-host.c +++ b/drivers/pci/controller/pcie-rockchip-host.c @@ -608,29 +608,29 @@ static int rockchip_pcie_parse_host_dt(struct rockchip_pcie *rockchip) rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v"); if (IS_ERR(rockchip->vpcie12v)) { - if (PTR_ERR(rockchip->vpcie12v) == -EPROBE_DEFER) - return -EPROBE_DEFER; + if (PTR_ERR(rockchip->vpcie12v) != -ENODEV) + return PTR_ERR(rockchip->vpcie12v); dev_info(dev, "no vpcie12v regulator found\n"); } rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3"); if (IS_ERR(rockchip->vpcie3v3)) { - if (PTR_ERR(rockchip->vpcie3v3) == -EPROBE_DEFER) - return -EPROBE_DEFER; + if (PTR_ERR(rockchip->vpcie3v3) != -ENODEV) + return PTR_ERR(rockchip->vpcie3v3); dev_info(dev, "no vpcie3v3 regulator found\n"); } rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8"); if (IS_ERR(rockchip->vpcie1v8)) { - if (PTR_ERR(rockchip->vpcie1v8) == -EPROBE_DEFER) - return -EPROBE_DEFER; + if (PTR_ERR(rockchip->vpcie1v8) != -ENODEV) + return PTR_ERR(rockchip->vpcie1v8); dev_info(dev, "no vpcie1v8 regulator found\n"); } rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9"); if (IS_ERR(rockchip->vpcie0v9)) { - if (PTR_ERR(rockchip->vpcie0v9) == -EPROBE_DEFER) - return -EPROBE_DEFER; + if (PTR_ERR(rockchip->vpcie0v9) != -ENODEV) + return PTR_ERR(rockchip->vpcie0v9); dev_info(dev, "no vpcie0v9 regulator found\n"); } diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c index fb32840ce8e66ac75f7c164a402ac9a0e0ae094b..4850a1b8eec127628eb390a3cf5401078dbc3e2d 100644 --- a/drivers/pci/controller/pcie-xilinx-nwl.c +++ b/drivers/pci/controller/pcie-xilinx-nwl.c @@ -483,15 +483,13 @@ static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, int i; mutex_lock(&msi->lock); - bit = bitmap_find_next_zero_area(msi->bitmap, INT_PCI_MSI_NR, 0, - nr_irqs, 0); - if (bit >= INT_PCI_MSI_NR) { + bit = bitmap_find_free_region(msi->bitmap, INT_PCI_MSI_NR, + get_count_order(nr_irqs)); + if (bit < 0) { mutex_unlock(&msi->lock); return -ENOSPC; } - bitmap_set(msi->bitmap, bit, nr_irqs); - for (i = 0; i < nr_irqs; i++) { irq_domain_set_info(domain, virq + i, bit + i, &nwl_irq_chip, domain->host_data, handle_simple_irq, @@ -509,7 +507,8 @@ static void nwl_irq_domain_free(struct irq_domain *domain, unsigned int virq, struct nwl_msi *msi = &pcie->msi; mutex_lock(&msi->lock); - bitmap_clear(msi->bitmap, data->hwirq, nr_irqs); + bitmap_release_region(msi->bitmap, data->hwirq, + get_count_order(nr_irqs)); mutex_unlock(&msi->lock); } diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c index 7b1389d8e2a5711383a4e8d8a6a424a4a33448f4..ea48cba5480b809d5c83207cf6b66cf64f13beff 100644 --- a/drivers/pci/controller/pcie-xilinx.c +++ b/drivers/pci/controller/pcie-xilinx.c @@ -336,14 +336,19 @@ static const struct irq_domain_ops msi_domain_ops = { * xilinx_pcie_enable_msi - Enable MSI support * @port: PCIe port information */ -static void xilinx_pcie_enable_msi(struct xilinx_pcie_port *port) +static int xilinx_pcie_enable_msi(struct xilinx_pcie_port *port) { phys_addr_t msg_addr; port->msi_pages = __get_free_pages(GFP_KERNEL, 0); + if (!port->msi_pages) + return -ENOMEM; + msg_addr = virt_to_phys((void *)port->msi_pages); pcie_write(port, 0x0, XILINX_PCIE_REG_MSIBASE1); pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2); + + return 0; } /* INTx Functions */ @@ -498,6 +503,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) struct device *dev = port->dev; struct device_node *node = dev->of_node; struct device_node *pcie_intc_node; + int ret; /* Setup INTx */ pcie_intc_node = of_get_next_child(node, NULL); @@ -526,7 +532,9 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) return -ENODEV; } - xilinx_pcie_enable_msi(port); + ret = xilinx_pcie_enable_msi(port); + if (ret) + return ret; } return 0; diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index fd2dbd7eed7bca808f44470ba060725acc1ec061..ab36e5ca1aca39ddf643f22d777cf009fe7bd633 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -31,6 +31,9 @@ #define PCI_REG_VMLOCK 0x70 #define MB2_SHADOW_EN(vmlock) (vmlock & 0x2) +#define MB2_SHADOW_OFFSET 0x2000 +#define MB2_SHADOW_SIZE 16 + enum vmd_features { /* * Device may contain registers which hint the physical location of the @@ -94,6 +97,7 @@ struct vmd_dev { struct resource resources[3]; struct irq_domain *irq_domain; struct pci_bus *bus; + u8 busn_start; #ifdef CONFIG_X86_DEV_DMA_OPS struct dma_map_ops dma_ops; @@ -465,7 +469,8 @@ static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus, unsigned int devfn, int reg, int len) { char __iomem *addr = vmd->cfgbar + - (bus->number << 20) + (devfn << 12) + reg; + ((bus->number - vmd->busn_start) << 20) + + (devfn << 12) + reg; if ((addr - vmd->cfgbar) + len >= resource_size(&vmd->dev->resource[VMD_CFGBAR])) @@ -588,7 +593,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) unsigned long flags; LIST_HEAD(resources); resource_size_t offset[2] = {0}; - resource_size_t membar2_offset = 0x2000, busn_start = 0; + resource_size_t membar2_offset = 0x2000; /* * Shadow registers may exist in certain VMD device ids which allow @@ -600,7 +605,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) u32 vmlock; int ret; - membar2_offset = 0x2018; + membar2_offset = MB2_SHADOW_OFFSET + MB2_SHADOW_SIZE; ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock); if (ret || vmlock == ~0) return -ENODEV; @@ -612,9 +617,9 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) if (!membar2) return -ENOMEM; offset[0] = vmd->dev->resource[VMD_MEMBAR1].start - - readq(membar2 + 0x2008); + readq(membar2 + MB2_SHADOW_OFFSET); offset[1] = vmd->dev->resource[VMD_MEMBAR2].start - - readq(membar2 + 0x2010); + readq(membar2 + MB2_SHADOW_OFFSET + 8); pci_iounmap(vmd->dev, membar2); } } @@ -630,14 +635,14 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) pci_read_config_dword(vmd->dev, PCI_REG_VMCONFIG, &vmconfig); if (BUS_RESTRICT_CAP(vmcap) && (BUS_RESTRICT_CFG(vmconfig) == 0x1)) - busn_start = 128; + vmd->busn_start = 128; } res = &vmd->dev->resource[VMD_CFGBAR]; vmd->resources[0] = (struct resource) { .name = "VMD CFGBAR", - .start = busn_start, - .end = busn_start + (resource_size(res) >> 20) - 1, + .start = vmd->busn_start, + .end = vmd->busn_start + (resource_size(res) >> 20) - 1, .flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED, }; @@ -705,8 +710,8 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]); pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]); - vmd->bus = pci_create_root_bus(&vmd->dev->dev, busn_start, &vmd_ops, - sd, &resources); + vmd->bus = pci_create_root_bus(&vmd->dev->dev, vmd->busn_start, + &vmd_ops, sd, &resources); if (!vmd->bus) { pci_free_resource_list(&resources); irq_domain_remove(vmd->irq_domain); @@ -813,12 +818,12 @@ static void vmd_remove(struct pci_dev *dev) { struct vmd_dev *vmd = pci_get_drvdata(dev); - vmd_detach_resources(vmd); sysfs_remove_link(&vmd->dev->dev.kobj, "domain"); pci_stop_root_bus(vmd->bus); pci_remove_root_bus(vmd->bus); vmd_cleanup_srcu(vmd); vmd_teardown_dma_ops(vmd); + vmd_detach_resources(vmd); irq_domain_remove(vmd->irq_domain); } diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c index 3e86fa3c7da32eed5f432d57126b1748951aa2ff..26a75f58765e4103a5750409d91ddab4bf61a7ec 100644 --- a/drivers/pci/endpoint/functions/pci-epf-test.c +++ b/drivers/pci/endpoint/functions/pci-epf-test.c @@ -175,7 +175,7 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test) goto err_map_addr; } - memcpy(buf, src_addr, reg->size); + memcpy_fromio(buf, src_addr, reg->size); crc32 = crc32_le(~0, buf, reg->size); if (crc32 != reg->checksum) @@ -230,7 +230,7 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test) get_random_bytes(buf, reg->size); reg->checksum = crc32_le(~0, buf, reg->size); - memcpy(dst_addr, buf, reg->size); + memcpy_toio(dst_addr, buf, reg->size); /* * wait 1ms inorder for the write to complete. Without this delay L3 @@ -570,6 +570,11 @@ static int __init pci_epf_test_init(void) kpcitest_workqueue = alloc_workqueue("kpcitest", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); + if (!kpcitest_workqueue) { + pr_err("Failed to allocate the kpcitest work queue\n"); + return -ENOMEM; + } + ret = pci_epf_register_driver(&test_driver); if (ret) { pr_err("Failed to register pci epf test driver --> %d\n", ret); diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c index 825fa24427a396a711b734ee03643038e1260185..8bfdcd2911960bbb56fd94f3fcd7a7ea584b6305 100644 --- a/drivers/pci/endpoint/pci-epf-core.c +++ b/drivers/pci/endpoint/pci-epf-core.c @@ -131,7 +131,9 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar) epf->bar[bar].phys_addr = phys_addr; epf->bar[bar].size = size; epf->bar[bar].barno = bar; - epf->bar[bar].flags = PCI_BASE_ADDRESS_SPACE_MEMORY; + epf->bar[bar].flags |= upper_32_bits(size) ? + PCI_BASE_ADDRESS_MEM_TYPE_64 : + PCI_BASE_ADDRESS_MEM_TYPE_32; return space; } diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 12afa7fdf77e9569d78f517a77b01de129151937..a5401f96d9eaace224b58098828d3f050fd32daf 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -449,8 +449,15 @@ static void acpiphp_native_scan_bridge(struct pci_dev *bridge) /* Scan non-hotplug bridges that need to be reconfigured */ for_each_pci_bridge(dev, bus) { - if (!hotplug_is_native(dev)) - max = pci_scan_bridge(bus, dev, max, 1); + if (hotplug_is_native(dev)) + continue; + + max = pci_scan_bridge(bus, dev, max, 1); + if (dev->subordinate) { + pcibios_resource_survey_bus(dev->subordinate); + pci_bus_size_bridges(dev->subordinate); + pci_bus_assign_resources(dev->subordinate); + } } } @@ -480,7 +487,6 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge) if (PCI_SLOT(dev->devfn) == slot->device) acpiphp_native_scan_bridge(dev); } - pci_assign_unassigned_bridge_resources(bus->self); } else { LIST_HEAD(add_list); int max, pass; @@ -526,6 +532,7 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge) slot->flags &= ~SLOT_ENABLED; continue; } + pci_dev_put(dev); } } diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 811cf83f956de4db38856abdc79712a19494c1ef..23ee5d82b3e3e00015c0bd10919fa625abcd803b 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -113,6 +113,7 @@ struct slot { struct controller { struct mutex ctrl_lock; struct pcie_device *pcie; + unsigned int inband_presence_disabled:1; struct rw_semaphore reset_lock; struct slot *slot; wait_queue_head_t queue; @@ -181,7 +182,7 @@ void pciehp_handle_button_press(struct slot *slot); void pciehp_handle_disable_request(struct slot *slot); void pciehp_handle_presence_or_link_change(struct slot *slot, u32 events); int pciehp_configure_device(struct slot *p_slot); -void pciehp_unconfigure_device(struct slot *p_slot); +void pciehp_unconfigure_device(struct slot *p_slot, bool presence); void pciehp_queue_pushbutton_work(struct work_struct *work); struct controller *pcie_init(struct pcie_device *dev); int pcie_init_notification(struct controller *ctrl); @@ -194,11 +195,12 @@ void pciehp_get_attention_status(struct slot *slot, u8 *status); void pciehp_set_attention_status(struct slot *slot, u8 status); void pciehp_get_latch_status(struct slot *slot, u8 *status); -void pciehp_get_adapter_status(struct slot *slot, u8 *status); int pciehp_query_power_fault(struct slot *slot); void pciehp_green_led_on(struct slot *slot); void pciehp_green_led_off(struct slot *slot); void pciehp_green_led_blink(struct slot *slot); +bool pciehp_card_present(struct controller *ctrl); +bool pciehp_card_present_or_link_active(struct controller *ctrl); int pciehp_check_link_status(struct controller *ctrl); bool pciehp_check_link_active(struct controller *ctrl); void pciehp_release_ctrl(struct controller *ctrl); @@ -212,4 +214,9 @@ static inline const char *slot_name(struct slot *slot) return hotplug_slot_name(slot->hotplug_slot); } +static inline struct pci_dev *ctrl_dev(struct controller *ctrl) +{ + return ctrl->pcie->port; +} + #endif /* _PCIEHP_H */ diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index ec48c9433ae507cd5e61157adbc0c50551a9f1ee..1e3f3bb1db09c5e061e23825c41006466daf681a 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -188,7 +188,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) struct pci_dev *pdev = slot->ctrl->pcie->port; pci_config_pm_runtime_get(pdev); - pciehp_get_adapter_status(slot, value); + *value = pciehp_card_present_or_link_active(slot->ctrl); pci_config_pm_runtime_put(pdev); return 0; } @@ -213,12 +213,12 @@ static int reset_slot(struct hotplug_slot *hotplug_slot, int probe) static void pciehp_check_presence(struct controller *ctrl) { struct slot *slot = ctrl->slot; - u8 occupied; + bool occupied; down_read(&ctrl->reset_lock); mutex_lock(&slot->lock); - pciehp_get_adapter_status(slot, &occupied); + occupied = pciehp_card_present_or_link_active(ctrl); if ((occupied && (slot->state == OFF_STATE || slot->state == BLINKINGON_STATE)) || (!occupied && (slot->state == ON_STATE || @@ -348,7 +348,7 @@ static struct pcie_port_service_driver hpdriver_portdrv = { #endif /* PM */ }; -static int __init pcied_init(void) +int __init pcie_hp_init(void) { int retval = 0; @@ -359,4 +359,3 @@ static int __init pcied_init(void) return retval; } -device_initcall(pcied_init); diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index da7c72372ffcfc15bc706062baa1038c8681a318..29fbda494d4ac51824c74ab218a27daaa8e95386 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c @@ -26,6 +26,9 @@ hotplug controller logic */ +#define SAFE_REMOVAL true +#define SURPRISE_REMOVAL false + static void set_slot_off(struct controller *ctrl, struct slot *pslot) { /* turn off slot, turn on Amber LED, turn off Green LED if supported*/ @@ -101,12 +104,13 @@ static int board_added(struct slot *p_slot) /** * remove_board - Turns off slot and LEDs * @p_slot: slot where board is being removed + * @safe_removal: whether the board is safely removed (versus surprise removed) */ -static void remove_board(struct slot *p_slot) +static void remove_board(struct slot *p_slot, bool safe_removal) { struct controller *ctrl = p_slot->ctrl; - pciehp_unconfigure_device(p_slot); + pciehp_unconfigure_device(p_slot, safe_removal); if (POWER_CTRL(ctrl)) { pciehp_power_off_slot(p_slot); @@ -117,6 +121,12 @@ static void remove_board(struct slot *p_slot) * removed from the slot/adapter. */ msleep(1000); + + /* Ignore link or presence changes caused by power off */ + atomic_and(~(PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC), + &ctrl->pending_events); + p_slot->work.data = p_slot->work.data & ~(PCI_EXP_SLTSTA_PDC | + PCI_EXP_SLTSTA_DLLSC); } /* turn off Green LED */ @@ -124,7 +134,7 @@ static void remove_board(struct slot *p_slot) } static int pciehp_enable_slot(struct slot *slot); -static int pciehp_disable_slot(struct slot *slot); +static int pciehp_disable_slot(struct slot *slot, bool safe_removal); void pciehp_request(struct controller *ctrl, int action) { @@ -137,6 +147,8 @@ void pciehp_queue_pushbutton_work(struct work_struct *work) { struct slot *p_slot = container_of(work, struct slot, work.work); struct controller *ctrl = p_slot->ctrl; + int events = p_slot->work.data; + struct pci_dev *rpdev = ctrl_dev(ctrl)->rpdev; mutex_lock(&p_slot->lock); switch (p_slot->state) { @@ -147,6 +159,15 @@ void pciehp_queue_pushbutton_work(struct work_struct *work) pciehp_request(ctrl, PCI_EXP_SLTSTA_PDC); break; default: + if (events) { + atomic_or(events, &ctrl->pending_events); + if (!pciehp_poll_mode) + irq_wake_thread(ctrl->pcie->irq, ctrl); + } else { + if (rpdev) + clear_bit(0, + &rpdev->slot_being_removed_rescanned); + } break; } mutex_unlock(&p_slot->lock); @@ -155,6 +176,7 @@ void pciehp_queue_pushbutton_work(struct work_struct *work) void pciehp_handle_button_press(struct slot *p_slot) { struct controller *ctrl = p_slot->ctrl; + struct pci_dev *rpdev = ctrl_dev(ctrl)->rpdev; mutex_lock(&p_slot->lock); switch (p_slot->state) { @@ -172,6 +194,7 @@ void pciehp_handle_button_press(struct slot *p_slot) /* blink green LED and turn off amber */ pciehp_green_led_blink(p_slot); pciehp_set_attention_status(p_slot, 0); + p_slot->work.data = 0; schedule_delayed_work(&p_slot->work, 5 * HZ); break; case BLINKINGOFF_STATE: @@ -193,10 +216,14 @@ void pciehp_handle_button_press(struct slot *p_slot) pciehp_set_attention_status(p_slot, 0); ctrl_info(ctrl, "Slot(%s): Action canceled due to button press\n", slot_name(p_slot)); + if (rpdev) + clear_bit(0, &rpdev->slot_being_removed_rescanned); break; default: ctrl_err(ctrl, "Slot(%s): Ignoring invalid state %#x\n", slot_name(p_slot), p_slot->state); + if (rpdev) + clear_bit(0, &rpdev->slot_being_removed_rescanned); break; } mutex_unlock(&p_slot->lock); @@ -205,6 +232,7 @@ void pciehp_handle_button_press(struct slot *p_slot) void pciehp_handle_disable_request(struct slot *slot) { struct controller *ctrl = slot->ctrl; + struct pci_dev *rpdev = ctrl_dev(ctrl)->rpdev; mutex_lock(&slot->lock); switch (slot->state) { @@ -216,14 +244,17 @@ void pciehp_handle_disable_request(struct slot *slot) slot->state = POWEROFF_STATE; mutex_unlock(&slot->lock); - ctrl->request_result = pciehp_disable_slot(slot); + ctrl->request_result = pciehp_disable_slot(slot, SAFE_REMOVAL); + if (rpdev) + clear_bit(0, &rpdev->slot_being_removed_rescanned); } void pciehp_handle_presence_or_link_change(struct slot *slot, u32 events) { struct controller *ctrl = slot->ctrl; - bool link_active; - u8 present; + bool present, link_active; + bool removal = SAFE_REMOVAL; + struct pci_dev *rpdev = ctrl_dev(ctrl)->rpdev; /* * If the slot is on and presence or link has changed, turn it off. @@ -243,7 +274,8 @@ void pciehp_handle_presence_or_link_change(struct slot *slot, u32 events) if (events & PCI_EXP_SLTSTA_PDC) ctrl_info(ctrl, "Slot(%s): Card not present\n", slot_name(slot)); - pciehp_disable_slot(slot); + pciehp_disable_slot(slot, SURPRISE_REMOVAL); + removal = SURPRISE_REMOVAL; break; default: mutex_unlock(&slot->lock); @@ -252,10 +284,21 @@ void pciehp_handle_presence_or_link_change(struct slot *slot, u32 events) /* Turn the slot on if it's occupied or link is up */ mutex_lock(&slot->lock); - pciehp_get_adapter_status(slot, &present); + /* + * if surprise removal and power controller present is not implemented, + * wait for at least 1 second before checking card present as + * data link layer state changed (link down) event reported + * prior to presence detect changed (card is not present). + */ + if (!removal && !POWER_CTRL(ctrl)) + msleep(1000); + + present = pciehp_card_present(ctrl); link_active = pciehp_check_link_active(ctrl); if (!present && !link_active) { mutex_unlock(&slot->lock); + if (rpdev) + clear_bit(0, &rpdev->slot_being_removed_rescanned); return; } @@ -278,6 +321,8 @@ void pciehp_handle_presence_or_link_change(struct slot *slot, u32 events) mutex_unlock(&slot->lock); break; } + if (rpdev) + clear_bit(0, &rpdev->slot_being_removed_rescanned); } static int __pciehp_enable_slot(struct slot *p_slot) @@ -285,11 +330,6 @@ static int __pciehp_enable_slot(struct slot *p_slot) u8 getstatus = 0; struct controller *ctrl = p_slot->ctrl; - pciehp_get_adapter_status(p_slot, &getstatus); - if (!getstatus) { - ctrl_info(ctrl, "Slot(%s): No adapter\n", slot_name(p_slot)); - return -ENODEV; - } if (MRL_SENS(p_slot->ctrl)) { pciehp_get_latch_status(p_slot, &getstatus); if (getstatus) { @@ -329,7 +369,7 @@ static int pciehp_enable_slot(struct slot *slot) return ret; } -static int __pciehp_disable_slot(struct slot *p_slot) +static int __pciehp_disable_slot(struct slot *p_slot, bool safe_removal) { u8 getstatus = 0; struct controller *ctrl = p_slot->ctrl; @@ -343,17 +383,17 @@ static int __pciehp_disable_slot(struct slot *p_slot) } } - remove_board(p_slot); + remove_board(p_slot, safe_removal); return 0; } -static int pciehp_disable_slot(struct slot *slot) +static int pciehp_disable_slot(struct slot *slot, bool safe_removal) { struct controller *ctrl = slot->ctrl; int ret; pm_runtime_get_sync(&ctrl->pcie->port->dev); - ret = __pciehp_disable_slot(slot); + ret = __pciehp_disable_slot(slot, safe_removal); pm_runtime_put(&ctrl->pcie->port->dev); mutex_lock(&slot->lock); @@ -366,6 +406,7 @@ static int pciehp_disable_slot(struct slot *slot) int pciehp_sysfs_enable_slot(struct slot *p_slot) { struct controller *ctrl = p_slot->ctrl; + struct pci_dev *pdev = ctrl->pcie->port; mutex_lock(&p_slot->lock); switch (p_slot->state) { @@ -377,9 +418,12 @@ int pciehp_sysfs_enable_slot(struct slot *p_slot) * card before the thread wakes up, so initialize to -ENODEV. */ ctrl->request_result = -ENODEV; - pciehp_request(ctrl, PCI_EXP_SLTSTA_PDC); - wait_event(ctrl->requester, - !atomic_read(&ctrl->pending_events)); + pci_config_pm_runtime_get(pdev); + down_read(&ctrl->reset_lock); + pciehp_handle_presence_or_link_change(p_slot, + PCI_EXP_SLTSTA_PDC); + up_read(&ctrl->reset_lock); + pci_config_pm_runtime_put(pdev); return ctrl->request_result; case POWERON_STATE: ctrl_info(ctrl, "Slot(%s): Already in powering on state\n", @@ -404,15 +448,28 @@ int pciehp_sysfs_enable_slot(struct slot *p_slot) int pciehp_sysfs_disable_slot(struct slot *p_slot) { struct controller *ctrl = p_slot->ctrl; + struct pci_dev *pdev = ctrl->pcie->port; + struct pci_dev *rpdev = pdev->rpdev; + + if (rpdev && test_and_set_bit(0, + &rpdev->slot_being_removed_rescanned)) { + ctrl_info(ctrl, "Slot(%s): Slot is being removed or rescanned, please try later!\n", + slot_name(p_slot)); + return -EINVAL; + } mutex_lock(&p_slot->lock); switch (p_slot->state) { case BLINKINGOFF_STATE: case ON_STATE: mutex_unlock(&p_slot->lock); - pciehp_request(ctrl, DISABLE_SLOT); - wait_event(ctrl->requester, - !atomic_read(&ctrl->pending_events)); + pci_config_pm_runtime_get(pdev); + down_read(&ctrl->reset_lock); + pciehp_handle_disable_request(p_slot); + up_read(&ctrl->reset_lock); + pci_config_pm_runtime_put(pdev); + if (rpdev) + clear_bit(0, &rpdev->slot_being_removed_rescanned); return ctrl->request_result; case POWEROFF_STATE: ctrl_info(ctrl, "Slot(%s): Already in powering off state\n", @@ -431,5 +488,8 @@ int pciehp_sysfs_disable_slot(struct slot *p_slot) } mutex_unlock(&p_slot->lock); + if (rpdev) + clear_bit(0, &rpdev->slot_being_removed_rescanned); + return -ENODEV; } diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index a938abdb41ceeb575dc62af740d78d278104280c..56137919c0c70e7559d2140598b2fc51e691a1d8 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -12,6 +12,7 @@ * Send feedback to , */ +#include #include #include #include @@ -27,10 +28,23 @@ #include "../pci.h" #include "pciehp.h" -static inline struct pci_dev *ctrl_dev(struct controller *ctrl) -{ - return ctrl->pcie->port; -} +static const struct dmi_system_id inband_presence_disabled_dmi_table[] = { + /* + * Match all Dell systems, as some Dell systems have inband + * presence disabled on NVMe slots (but don't support the bit to + * report it). Setting inband presence disabled should have no + * negative effect, except on broken hotplug slots that never + * assert presence detect--and those will still work, they will + * just have a bit of extra delay before being probed. + */ + { + .ident = "Dell System", + .matches = { + DMI_MATCH(DMI_OEM_STRING, "Dell System"), + }, + }, + {} +}; static irqreturn_t pciehp_isr(int irq, void *dev_id); static irqreturn_t pciehp_ist(int irq, void *dev_id); @@ -80,6 +94,8 @@ static int pcie_poll_cmd(struct controller *ctrl, int timeout) if (slot_status & PCI_EXP_SLTSTA_CC) { pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_CC); + ctrl->cmd_busy = 0; + smp_mb(); return 1; } if (timeout < 0) @@ -159,9 +175,9 @@ static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd, slot_ctrl |= (cmd & mask); ctrl->cmd_busy = 1; smp_mb(); + ctrl->slot_ctrl = slot_ctrl; pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, slot_ctrl); ctrl->cmd_started = jiffies; - ctrl->slot_ctrl = slot_ctrl; /* * Controllers with the Intel CF118 and similar errata advertise @@ -217,13 +233,6 @@ bool pciehp_check_link_active(struct controller *ctrl) return ret; } -static void pcie_wait_link_active(struct controller *ctrl) -{ - struct pci_dev *pdev = ctrl_dev(ctrl); - - pcie_wait_for_link(pdev, true); -} - static bool pci_bus_check_dev(struct pci_bus *bus, int devfn) { u32 l; @@ -250,24 +259,34 @@ static bool pci_bus_check_dev(struct pci_bus *bus, int devfn) return found; } +static void pcie_wait_for_presence(struct pci_dev *pdev) +{ + int timeout = 1250; + u16 slot_status; + + do { + pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); + if (slot_status & PCI_EXP_SLTSTA_PDS) + return; + msleep(10); + timeout -= 10; + } while (timeout > 0); + + pci_info(pdev, "Timeout waiting for Presence Detect\n"); +} + int pciehp_check_link_status(struct controller *ctrl) { struct pci_dev *pdev = ctrl_dev(ctrl); bool found; u16 lnk_status; - /* - * Data Link Layer Link Active Reporting must be capable for - * hot-plug capable downstream port. But old controller might - * not implement it. In this case, we wait for 1000 ms. - */ - if (ctrl->link_active_reporting) - pcie_wait_link_active(ctrl); - else - msleep(1000); + if (!pcie_wait_for_link(pdev, true)) + return -1; + + if (ctrl->inband_presence_disabled) + pcie_wait_for_presence(pdev); - /* wait 100ms before read pci conf, and try in 1s */ - msleep(100); found = pci_bus_check_dev(ctrl->pcie->port->subordinate, PCI_DEVFN(0, 0)); @@ -389,13 +408,27 @@ void pciehp_get_latch_status(struct slot *slot, u8 *status) *status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS); } -void pciehp_get_adapter_status(struct slot *slot, u8 *status) +bool pciehp_card_present(struct controller *ctrl) { - struct pci_dev *pdev = ctrl_dev(slot->ctrl); + struct pci_dev *pdev = ctrl_dev(ctrl); u16 slot_status; pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); - *status = !!(slot_status & PCI_EXP_SLTSTA_PDS); + return slot_status & PCI_EXP_SLTSTA_PDS; +} + +/** + * pciehp_card_present_or_link_active() - whether given slot is occupied + * @ctrl: PCIe hotplug controller + * + * Unlike pciehp_card_present(), which determines presence solely from the + * Presence Detect State bit, this helper also returns true if the Link Active + * bit is set. This is a concession to broken hotplug ports which hardwire + * Presence Detect State to zero, such as Wilocity's [1ae9:0200]. + */ +bool pciehp_card_present_or_link_active(struct controller *ctrl) +{ + return pciehp_card_present(ctrl) || pciehp_check_link_active(ctrl); } int pciehp_query_power_fault(struct slot *slot) @@ -530,12 +563,14 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id) struct controller *ctrl = (struct controller *)dev_id; struct pci_dev *pdev = ctrl_dev(ctrl); struct device *parent = pdev->dev.parent; - u16 status, events; + u16 status, events = 0; /* - * Interrupts only occur in D3hot or shallower (PCIe r4.0, sec 6.7.3.4). + * Interrupts only occur in D3hot or shallower and only if enabled + * in the Slot Control register (PCIe r4.0, sec 6.7.3.4). */ - if (pdev->current_state == PCI_D3cold) + if (pdev->current_state == PCI_D3cold || + (!(ctrl->slot_ctrl & PCI_EXP_SLTCTL_HPIE) && !pciehp_poll_mode)) return IRQ_NONE; /* @@ -553,6 +588,7 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id) } } +read_status: pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &status); if (status == (u16) ~0) { ctrl_info(ctrl, "%s: no response from device\n", __func__); @@ -565,24 +601,39 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id) * Slot Status contains plain status bits as well as event * notification bits; right now we only want the event bits. */ - events = status & (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | - PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC | - PCI_EXP_SLTSTA_DLLSC); + status &= PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | + PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC | + PCI_EXP_SLTSTA_DLLSC; /* * If we've already reported a power fault, don't report it again * until we've done something to handle it. */ if (ctrl->power_fault_detected) - events &= ~PCI_EXP_SLTSTA_PFD; + status &= ~PCI_EXP_SLTSTA_PFD; + else if (status & PCI_EXP_SLTSTA_PFD) + ctrl->power_fault_detected = true; + events |= status; if (!events) { if (parent) pm_runtime_put(parent); return IRQ_NONE; } - pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events); + if (status) { + pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, status); + + /* + * In MSI mode, all event bits must be zero before the port + * will send a new interrupt (PCIe Base Spec r5.0 sec 6.7.3.4). + * So re-read the Slot Status register in case a bit was set + * between read and write. + */ + if (pci_dev_msi_enabled(pdev) && !pciehp_poll_mode) + goto read_status; + } + ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", events); if (parent) pm_runtime_put(parent); @@ -616,6 +667,7 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id) { struct controller *ctrl = (struct controller *)dev_id; struct pci_dev *pdev = ctrl_dev(ctrl); + struct pci_dev *rpdev = pdev->rpdev; struct slot *slot = ctrl->slot; irqreturn_t ret; u32 events; @@ -643,12 +695,22 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id) if (events & PCI_EXP_SLTSTA_ABP) { ctrl_info(ctrl, "Slot(%s): Attention button pressed\n", slot_name(slot)); - pciehp_handle_button_press(slot); + if (!rpdev || (rpdev && !test_and_set_bit(0, + &rpdev->slot_being_removed_rescanned))) + pciehp_handle_button_press(slot); + else { + if (slot->state == BLINKINGOFF_STATE || + slot->state == BLINKINGON_STATE) + pciehp_handle_button_press(slot); + else + ctrl_info(ctrl, "Slot(%s): Slot operation failed because a remove or" + " rescan operation is under processing, please try later!\n", + slot_name(slot)); + } } /* Check Power Fault Detected */ - if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) { - ctrl->power_fault_detected = 1; + if (events & PCI_EXP_SLTSTA_PFD) { ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(slot)); pciehp_set_attention_status(slot, 1); pciehp_green_led_off(slot); @@ -659,10 +721,59 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id) * or Data Link Layer State Changed events. */ down_read(&ctrl->reset_lock); - if (events & DISABLE_SLOT) - pciehp_handle_disable_request(slot); - else if (events & (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC)) - pciehp_handle_presence_or_link_change(slot, events); + if (events & DISABLE_SLOT) { + if (!rpdev || (rpdev && !test_and_set_bit(0, + &rpdev->slot_being_removed_rescanned))) + pciehp_handle_disable_request(slot); + else { + if (slot->state == BLINKINGOFF_STATE || + slot->state == BLINKINGON_STATE) + pciehp_handle_disable_request(slot); + else { + ctrl_info(ctrl, "Slot(%s): DISABLE_SLOT event in remove or rescan process!\n", + slot_name(slot)); + /* + * we use the work_struct private data to store + * the event type + */ + slot->work.data = DISABLE_SLOT; + /* + * If 'work.timer' is pending, schedule the work will + * cause BUG_ON(). + */ + if (!timer_pending(&slot->work.timer)) + schedule_delayed_work(&slot->work, 3 * HZ); + else + ctrl_info(ctrl, "Slot(%s): Didn't schedule delayed_work because timer is pending!\n", + slot_name(slot)); + } + } + } else if (events & (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC)) { + if (!rpdev || (rpdev && !test_and_set_bit(0, + &rpdev->slot_being_removed_rescanned))) + pciehp_handle_presence_or_link_change(slot, events); + else { + if (slot->state == BLINKINGOFF_STATE || + slot->state == BLINKINGON_STATE) + pciehp_handle_presence_or_link_change(slot, + events); + else { + /* + * When we are removing or rescanning through + * sysfs, suprise link down/up happens. So we + * will handle this event 3 seconds later. + */ + ctrl_info(ctrl, "Slot(%s): Surprise link down/up in remove or rescan process!\n", + slot_name(slot)); + slot->work.data = events & (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC); + if (!timer_pending(&slot->work.timer)) + schedule_delayed_work(&slot->work, 3 * HZ); + else + ctrl_info(ctrl, "Slot(%s): Didn't schedule delayed_work because timer is pending!\n", + slot_name(slot)); + } + } + } up_read(&ctrl->reset_lock); pci_config_pm_runtime_put(pdev); @@ -856,8 +967,8 @@ static inline void dbg_ctrl(struct controller *ctrl) struct controller *pcie_init(struct pcie_device *dev) { struct controller *ctrl; - u32 slot_cap, link_cap; - u8 occupied, poweron; + u32 slot_cap, slot_cap2, link_cap; + u8 poweron; struct pci_dev *pdev = dev->port; ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); @@ -884,10 +995,18 @@ struct controller *pcie_init(struct pcie_device *dev) init_waitqueue_head(&ctrl->queue); dbg_ctrl(ctrl); + pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP2, &slot_cap2); + if (slot_cap2 & PCI_EXP_SLTCAP2_IBPD) { + pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_IBPD_DISABLE, + PCI_EXP_SLTCTL_IBPD_DISABLE); + ctrl->inband_presence_disabled = 1; + } + + if (dmi_first_match(inband_presence_disabled_dmi_table)) + ctrl->inband_presence_disabled = 1; + /* Check if Data Link Layer Link Active Reporting is implemented */ pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &link_cap); - if (link_cap & PCI_EXP_LNKCAP_DLLLARC) - ctrl->link_active_reporting = 1; /* Clear all remaining event bits in Slot Status register. */ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, @@ -895,7 +1014,7 @@ struct controller *pcie_init(struct pcie_device *dev) PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC); - ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c LLActRep%c%s\n", + ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c IbPresDis%c LLActRep%c%s\n", (slot_cap & PCI_EXP_SLTCAP_PSN) >> 19, FLAG(slot_cap, PCI_EXP_SLTCAP_ABP), FLAG(slot_cap, PCI_EXP_SLTCAP_PCP), @@ -906,6 +1025,7 @@ struct controller *pcie_init(struct pcie_device *dev) FLAG(slot_cap, PCI_EXP_SLTCAP_HPS), FLAG(slot_cap, PCI_EXP_SLTCAP_EIP), FLAG(slot_cap, PCI_EXP_SLTCAP_NCCS), + FLAG(slot_cap2, PCI_EXP_SLTCAP2_IBPD), FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC), pdev->broken_cmd_compl ? " (with Cmd Compl erratum)" : ""); @@ -917,9 +1037,8 @@ struct controller *pcie_init(struct pcie_device *dev) * requested yet, so avoid triggering a notification with this command. */ if (POWER_CTRL(ctrl)) { - pciehp_get_adapter_status(ctrl->slot, &occupied); pciehp_get_power_status(ctrl->slot, &poweron); - if (!occupied && poweron) { + if (!pciehp_card_present_or_link_active(ctrl) && poweron) { pcie_disable_notification(ctrl); pciehp_power_off_slot(ctrl->slot); } diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c index 5c58c22e0c084145115662855c6efadc0dea6268..f410b9d5641ad3725930ff8dcbb05ad066cf46d9 100644 --- a/drivers/pci/hotplug/pciehp_pci.c +++ b/drivers/pci/hotplug/pciehp_pci.c @@ -20,6 +20,14 @@ #include "../pci.h" #include "pciehp.h" +/** + * pciehp_configure_device() - enumerate PCI devices below a hotplug bridge + * @p_slot: PCIe hotplug slot + * + * Enumerate PCI devices below a hotplug bridge and add them to the system. + * Return 0 on success, %-EEXIST if the devices are already enumerated or + * %-ENODEV if enumeration failed. + */ int pciehp_configure_device(struct slot *p_slot) { struct pci_dev *dev; @@ -55,16 +63,33 @@ int pciehp_configure_device(struct slot *p_slot) pci_assign_unassigned_bridge_resources(bridge); pcie_bus_configure_settings(parent); + + /* + * Release reset_lock during driver binding + * to avoid AB-BA deadlock with device_lock. + */ + up_read(&ctrl->reset_lock); pci_bus_add_devices(parent); + down_read(&ctrl->reset_lock); out: pci_unlock_rescan_remove(); return ret; } -void pciehp_unconfigure_device(struct slot *p_slot) +/** + * pciehp_unconfigure_device() - remove PCI devices below a hotplug bridge + * @p_slot: PCIe hotplug slot + * @presence: whether the card is still present in the slot; + * true for safe removal via sysfs or an Attention Button press, + * false for surprise removal + * + * Unbind PCI devices below a hotplug bridge from their drivers and remove + * them from the system. Safely removed devices are quiesced. Surprise + * removed devices are marked as such to prevent further accesses. + */ +void pciehp_unconfigure_device(struct slot *p_slot, bool presence) { - u8 presence = 0; struct pci_dev *dev, *temp; struct pci_bus *parent = p_slot->ctrl->pcie->port->subordinate; u16 command; @@ -72,7 +97,9 @@ void pciehp_unconfigure_device(struct slot *p_slot) ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:00\n", __func__, pci_domain_nr(parent), parent->number); - pciehp_get_adapter_status(p_slot, &presence); + + if (!presence) + pci_walk_bus(parent, pci_dev_set_disconnected, NULL); pci_lock_rescan_remove(); @@ -85,13 +112,15 @@ void pciehp_unconfigure_device(struct slot *p_slot) list_for_each_entry_safe_reverse(dev, temp, &parent->devices, bus_list) { pci_dev_get(dev); - if (!presence) { - pci_dev_set_disconnected(dev, NULL); - if (pci_has_subordinate(dev)) - pci_walk_bus(dev->subordinate, - pci_dev_set_disconnected, NULL); - } + + /* + * Release reset_lock during driver unbinding + * to avoid AB-BA deadlock with device_lock. + */ + up_read(&ctrl->reset_lock); pci_stop_and_remove_bus_device(dev); + down_read(&ctrl->reset_lock); + /* * Ensure that no new Requests will be generated from * the device. diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c index 3276a5e4c430b6b9d4fbc96d1f1522959ffcb7a1..486fad4309584fbeaaf18e0a045b684413836af3 100644 --- a/drivers/pci/hotplug/pnv_php.c +++ b/drivers/pci/hotplug/pnv_php.c @@ -35,7 +35,6 @@ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot, bool disable_device) { struct pci_dev *pdev = php_slot->pdev; - int irq = php_slot->irq; u16 ctrl; if (php_slot->irq > 0) { @@ -54,7 +53,7 @@ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot, php_slot->wq = NULL; } - if (disable_device || irq > 0) { + if (disable_device) { if (pdev->msix_enabled) pci_disable_msix(pdev); else if (pdev->msi_enabled) diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c index e2356a9c7088a4202a41dd76898751947ff6b62f..182f9e3443eef83a4a6b455161f6e00d2029b4fe 100644 --- a/drivers/pci/hotplug/rpadlpar_core.c +++ b/drivers/pci/hotplug/rpadlpar_core.c @@ -51,6 +51,7 @@ static struct device_node *find_vio_slot_node(char *drc_name) if (rc == 0) break; } + of_node_put(parent); return dn; } @@ -71,6 +72,7 @@ static struct device_node *find_php_slot_pci_node(char *drc_name, return np; } +/* Returns a device_node with its reference count incremented */ static struct device_node *find_dlpar_node(char *drc_name, int *node_type) { struct device_node *dn; @@ -306,6 +308,7 @@ int dlpar_add_slot(char *drc_name) rc = dlpar_add_phb(drc_name, dn); break; } + of_node_put(dn); printk(KERN_INFO "%s: slot %s added\n", DLPAR_MODULE_NAME, drc_name); exit: @@ -439,6 +442,7 @@ int dlpar_remove_slot(char *drc_name) rc = dlpar_remove_pci_slot(drc_name, dn); break; } + of_node_put(dn); vm_unmap_aliases(); printk(KERN_INFO "%s: slot %s removed\n", DLPAR_MODULE_NAME, drc_name); diff --git a/drivers/pci/hotplug/rpadlpar_sysfs.c b/drivers/pci/hotplug/rpadlpar_sysfs.c index cdbfa5df3a51f3d629179bf83f830cf7f4f1095b..dbfa0b55d31a5126f35785653857117dac463d35 100644 --- a/drivers/pci/hotplug/rpadlpar_sysfs.c +++ b/drivers/pci/hotplug/rpadlpar_sysfs.c @@ -34,12 +34,11 @@ static ssize_t add_slot_store(struct kobject *kobj, struct kobj_attribute *attr, if (nbytes >= MAX_DRC_NAME_LEN) return 0; - memcpy(drc_name, buf, nbytes); + strscpy(drc_name, buf, nbytes + 1); end = strchr(drc_name, '\n'); - if (!end) - end = &drc_name[nbytes]; - *end = '\0'; + if (end) + *end = '\0'; rc = dlpar_add_slot(drc_name); if (rc) @@ -65,12 +64,11 @@ static ssize_t remove_slot_store(struct kobject *kobj, if (nbytes >= MAX_DRC_NAME_LEN) return 0; - memcpy(drc_name, buf, nbytes); + strscpy(drc_name, buf, nbytes + 1); end = strchr(drc_name, '\n'); - if (!end) - end = &drc_name[nbytes]; - *end = '\0'; + if (end) + *end = '\0'; rc = dlpar_remove_slot(drc_name); if (rc) diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c index 857c358b727b839aa1bca6f67b11c16bee282cb1..a306cad7047055d0aac8f63c3de9c5853b9c7958 100644 --- a/drivers/pci/hotplug/rpaphp_core.c +++ b/drivers/pci/hotplug/rpaphp_core.c @@ -154,11 +154,11 @@ static enum pci_bus_speed get_max_bus_speed(struct slot *slot) return speed; } -static int get_children_props(struct device_node *dn, const int **drc_indexes, - const int **drc_names, const int **drc_types, - const int **drc_power_domains) +static int get_children_props(struct device_node *dn, const __be32 **drc_indexes, + const __be32 **drc_names, const __be32 **drc_types, + const __be32 **drc_power_domains) { - const int *indexes, *names, *types, *domains; + const __be32 *indexes, *names, *types, *domains; indexes = of_get_property(dn, "ibm,drc-indexes", NULL); names = of_get_property(dn, "ibm,drc-names", NULL); @@ -194,8 +194,8 @@ static int rpaphp_check_drc_props_v1(struct device_node *dn, char *drc_name, char *drc_type, unsigned int my_index) { char *name_tmp, *type_tmp; - const int *indexes, *names; - const int *types, *domains; + const __be32 *indexes, *names; + const __be32 *types, *domains; int i, rc; rc = get_children_props(dn->parent, &indexes, &names, &types, &domains); @@ -208,7 +208,7 @@ static int rpaphp_check_drc_props_v1(struct device_node *dn, char *drc_name, /* Iterate through parent properties, looking for my-drc-index */ for (i = 0; i < be32_to_cpu(indexes[0]); i++) { - if ((unsigned int) indexes[i + 1] == my_index) + if (be32_to_cpu(indexes[i + 1]) == my_index) break; name_tmp += (strlen(name_tmp) + 1); @@ -230,7 +230,7 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name, struct of_drc_info drc; const __be32 *value; char cell_drc_name[MAX_DRC_NAME_LEN]; - int j, fndit; + int j; info = of_find_property(dn->parent, "ibm,drc-info", NULL); if (info == NULL) @@ -239,23 +239,22 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name, value = of_prop_next_u32(info, NULL, &entries); if (!value) return -EINVAL; + else + value++; for (j = 0; j < entries; j++) { of_read_drc_info_cell(&info, &value, &drc); /* Should now know end of current entry */ - if (my_index > drc.last_drc_index) - continue; - - fndit = 1; - break; + /* Found it */ + if (my_index >= drc.drc_index_start && my_index <= drc.last_drc_index) { + int index = my_index - drc.drc_index_start; + sprintf(cell_drc_name, "%s%d", drc.drc_name_prefix, + drc.drc_name_suffix_start + index); + break; + } } - /* Found it */ - - if (fndit) - sprintf(cell_drc_name, "%s%d", drc.drc_name_prefix, - my_index); if (((drc_name == NULL) || (drc_name && !strcmp(drc_name, cell_drc_name))) && @@ -269,7 +268,7 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name, int rpaphp_check_drc_props(struct device_node *dn, char *drc_name, char *drc_type) { - const unsigned int *my_index; + const __be32 *my_index; my_index = of_get_property(dn, "ibm,my-drc-index", NULL); if (!my_index) { @@ -277,12 +276,12 @@ int rpaphp_check_drc_props(struct device_node *dn, char *drc_name, return -EINVAL; } - if (firmware_has_feature(FW_FEATURE_DRC_INFO)) + if (of_find_property(dn->parent, "ibm,drc-info", NULL)) return rpaphp_check_drc_props_v2(dn, drc_name, drc_type, - *my_index); + be32_to_cpu(*my_index)); else return rpaphp_check_drc_props_v1(dn, drc_name, drc_type, - *my_index); + be32_to_cpu(*my_index)); } EXPORT_SYMBOL_GPL(rpaphp_check_drc_props); @@ -313,10 +312,11 @@ static int is_php_type(char *drc_type) * for built-in pci slots (even when the built-in slots are * dlparable.) */ -static int is_php_dn(struct device_node *dn, const int **indexes, - const int **names, const int **types, const int **power_domains) +static int is_php_dn(struct device_node *dn, const __be32 **indexes, + const __be32 **names, const __be32 **types, + const __be32 **power_domains) { - const int *drc_types; + const __be32 *drc_types; int rc; rc = get_children_props(dn, indexes, names, &drc_types, power_domains); @@ -351,7 +351,7 @@ int rpaphp_add_slot(struct device_node *dn) struct slot *slot; int retval = 0; int i; - const int *indexes, *names, *types, *power_domains; + const __be32 *indexes, *names, *types, *power_domains; char *name, *type; if (!dn->name || strcmp(dn->name, "pci")) diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index c5f3cd4ed766f8d79798e8e22a8e12f56764333f..8d1f1e436d1a088e75dfa2befbb65ff2eba5eebb 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -18,6 +18,8 @@ #define VIRTFN_ID_LEN 16 +static DEFINE_MUTEX(pci_sriov_numvfs_lock); + int pci_iov_virtfn_bus(struct pci_dev *dev, int vf_id) { if (!dev->is_physfn) @@ -212,6 +214,16 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id) return rc; } +int pci_iov_add_virtfn_locked(struct pci_dev *dev, int id) +{ + int rc; + + mutex_lock(&pci_sriov_numvfs_lock); + rc = pci_iov_add_virtfn(dev, id); + mutex_unlock(&pci_sriov_numvfs_lock); + return rc; +} + void pci_iov_remove_virtfn(struct pci_dev *dev, int id) { char buf[VIRTFN_ID_LEN]; @@ -241,6 +253,13 @@ void pci_iov_remove_virtfn(struct pci_dev *dev, int id) pci_dev_put(dev); } +void pci_iov_remove_virtfn_locked(struct pci_dev *dev, int id) +{ + mutex_lock(&pci_sriov_numvfs_lock); + pci_iov_remove_virtfn(dev, id); + mutex_unlock(&pci_sriov_numvfs_lock); +} + int __weak pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs) { return 0; @@ -337,7 +356,10 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn) pci_cfg_access_unlock(dev); for (i = 0; i < initial; i++) { - rc = pci_iov_add_virtfn(dev, i); + if (dev->bus->number != pci_iov_virtfn_bus(dev, i)) + rc = pci_iov_add_virtfn_locked(dev, i); + else + rc = pci_iov_add_virtfn(dev, i); if (rc) goto failed; } @@ -348,8 +370,12 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn) return 0; failed: - while (i--) - pci_iov_remove_virtfn(dev, i); + while (i--) { + if (dev->bus->number != pci_iov_virtfn_bus(dev, i)) + pci_iov_remove_virtfn_locked(dev, i); + else + pci_iov_remove_virtfn(dev, i); + } err_pcibios: iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE); @@ -375,8 +401,12 @@ static void sriov_disable(struct pci_dev *dev) if (!iov->num_VFs) return; - for (i = 0; i < iov->num_VFs; i++) - pci_iov_remove_virtfn(dev, i); + for (i = 0; i < iov->num_VFs; i++) { + if (dev->bus->number != pci_iov_virtfn_bus(dev, i)) + pci_iov_remove_virtfn_locked(dev, i); + else + pci_iov_remove_virtfn(dev, i); + } iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE); pci_cfg_access_lock(dev); diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index f2ef896464b376077462c53315533a555cf0198d..bf560dcf8dd4b018bc911f7ee83dbdaed78b9175 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -170,24 +170,25 @@ static inline __attribute_const__ u32 msi_mask(unsigned x) * reliably as devices without an INTx disable bit will then generate a * level IRQ which will never be cleared. */ -u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) +void __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) { - u32 mask_bits = desc->masked; + raw_spinlock_t *lock = &desc->dev->msi_lock; + unsigned long flags; if (pci_msi_ignore_mask || !desc->msi_attrib.maskbit) - return 0; + return; - mask_bits &= ~mask; - mask_bits |= flag; + raw_spin_lock_irqsave(lock, flags); + desc->masked &= ~mask; + desc->masked |= flag; pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos, - mask_bits); - - return mask_bits; + desc->masked); + raw_spin_unlock_irqrestore(lock, flags); } static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) { - desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag); + __pci_msi_desc_mask_irq(desc, mask, flag); } static void __iomem *pci_msix_desc_addr(struct msi_desc *desc) @@ -211,7 +212,7 @@ u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag) return 0; mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT; - if (flag) + if (flag & PCI_MSIX_ENTRY_CTRL_MASKBIT) mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT; writel(mask_bits, pci_msix_desc_addr(desc) + PCI_MSIX_ENTRY_VECTOR_CTRL); @@ -302,10 +303,28 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) /* Don't touch the hardware now */ } else if (entry->msi_attrib.is_msix) { void __iomem *base = pci_msix_desc_addr(entry); + bool unmasked = !(entry->masked & PCI_MSIX_ENTRY_CTRL_MASKBIT); + + /* + * The specification mandates that the entry is masked + * when the message is modified: + * + * "If software changes the Address or Data value of an + * entry while the entry is unmasked, the result is + * undefined." + */ + if (unmasked) + __pci_msix_desc_mask_irq(entry, PCI_MSIX_ENTRY_CTRL_MASKBIT); writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR); writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR); writel(msg->data, base + PCI_MSIX_ENTRY_DATA); + + if (unmasked) + __pci_msix_desc_mask_irq(entry, 0); + + /* Ensure that the writes are visible in the device */ + readl(base + PCI_MSIX_ENTRY_DATA); } else { int pos = dev->msi_cap; u16 msgctl; @@ -326,6 +345,8 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) pci_write_config_word(dev, pos + PCI_MSI_DATA_32, msg->data); } + /* Ensure that the writes are visible in the device */ + pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl); } entry->msg = *msg; } @@ -351,18 +372,6 @@ static void free_msi_irqs(struct pci_dev *dev) for (i = 0; i < entry->nvec_used; i++) BUG_ON(irq_has_action(entry->irq + i)); - pci_msi_teardown_msi_irqs(dev); - - list_for_each_entry_safe(entry, tmp, msi_list, list) { - if (entry->msi_attrib.is_msix) { - if (list_is_last(&entry->list, msi_list)) - iounmap(entry->mask_base); - } - - list_del(&entry->list); - free_msi_entry(entry); - } - if (dev->msi_irq_groups) { sysfs_remove_groups(&dev->dev.kobj, dev->msi_irq_groups); msi_attrs = dev->msi_irq_groups[0]->attrs; @@ -378,6 +387,18 @@ static void free_msi_irqs(struct pci_dev *dev) kfree(dev->msi_irq_groups); dev->msi_irq_groups = NULL; } + + pci_msi_teardown_msi_irqs(dev); + + list_for_each_entry_safe(entry, tmp, msi_list, list) { + if (entry->msi_attrib.is_msix) { + if (list_is_last(&entry->list, msi_list)) + iounmap(entry->mask_base); + } + + list_del(&entry->list); + free_msi_entry(entry); + } } static void pci_intx_for_msi(struct pci_dev *dev, int enable) @@ -548,6 +569,9 @@ msi_setup_entry(struct pci_dev *dev, int nvec, const struct irq_affinity *affd) goto out; pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); + /* Lies, damned lies, and MSIs */ + if (dev->dev_flags & PCI_DEV_FLAGS_HAS_MSI_MASKING) + control |= PCI_MSI_FLAGS_MASKBIT; entry->msi_attrib.is_msix = 0; entry->msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT); @@ -619,21 +643,21 @@ static int msi_capability_init(struct pci_dev *dev, int nvec, /* Configure MSI capability structure */ ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); if (ret) { - msi_mask_irq(entry, mask, ~mask); + msi_mask_irq(entry, mask, 0); free_msi_irqs(dev); return ret; } ret = msi_verify_entries(dev); if (ret) { - msi_mask_irq(entry, mask, ~mask); + msi_mask_irq(entry, mask, 0); free_msi_irqs(dev); return ret; } ret = populate_msi_sysfs(dev); if (ret) { - msi_mask_irq(entry, mask, ~mask); + msi_mask_irq(entry, mask, 0); free_msi_irqs(dev); return ret; } @@ -658,11 +682,22 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries) pci_read_config_dword(dev, dev->msix_cap + PCI_MSIX_TABLE, &table_offset); bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR); + if (bir >= DEVICE_COUNT_RESOURCE) { + dev_err(&dev->dev, "MSI-X Table BIR is out of range !\n"); + return NULL; + } + flags = pci_resource_flags(dev, bir); if (!flags || (flags & IORESOURCE_UNSET)) return NULL; table_offset &= PCI_MSIX_TABLE_OFFSET; + if (table_offset >= pci_resource_len(dev, bir)) { + dev_err(&dev->dev, + "MSI-X Table offset is out of range of BAR:%d!\n", + bir); + return NULL; + } phys_addr = pci_resource_start(dev, bir) + table_offset; return ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); @@ -674,6 +709,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, { struct cpumask *curmsk, *masks = NULL; struct msi_desc *entry; + void __iomem *addr; int ret, i; if (affd) @@ -693,6 +729,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, entry->msi_attrib.is_msix = 1; entry->msi_attrib.is_64 = 1; + if (entries) entry->msi_attrib.entry_nr = entries[i].entry; else @@ -700,6 +737,10 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, entry->msi_attrib.default_irq = dev->irq; entry->mask_base = base; + addr = pci_msix_desc_addr(entry); + if (addr) + entry->masked = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL); + list_add_tail(&entry->list, dev_to_msi_list(&dev->dev)); if (masks) curmsk++; @@ -710,21 +751,30 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, return ret; } -static void msix_program_entries(struct pci_dev *dev, - struct msix_entry *entries) +static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries) { struct msi_desc *entry; - int i = 0; for_each_pci_msi_entry(entry, dev) { - if (entries) - entries[i++].vector = entry->irq; - entry->masked = readl(pci_msix_desc_addr(entry) + - PCI_MSIX_ENTRY_VECTOR_CTRL); - msix_mask_irq(entry, 1); + if (entries) { + entries->vector = entry->irq; + entries++; + } } } +static void msix_mask_all(void __iomem *base, int tsize) +{ + u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT; + int i; + + if (pci_msi_ignore_mask) + return; + + for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE) + writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL); +} + /** * msix_capability_init - configure device's MSI-X capability * @dev: pointer to the pci_dev data structure of MSI-X device function @@ -739,22 +789,33 @@ static void msix_program_entries(struct pci_dev *dev, static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, int nvec, const struct irq_affinity *affd) { - int ret; - u16 control; void __iomem *base; + int ret, tsize; + u16 control; - /* Ensure MSI-X is disabled while it is set up */ - pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); + /* + * Some devices require MSI-X to be enabled before the MSI-X + * registers can be accessed. Mask all the vectors to prevent + * interrupts coming in before they're fully set up. + */ + pci_msix_clear_and_set_ctrl(dev, 0, PCI_MSIX_FLAGS_MASKALL | + PCI_MSIX_FLAGS_ENABLE); pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); /* Request & Map MSI-X table region */ - base = msix_map_region(dev, msix_table_size(control)); - if (!base) - return -ENOMEM; + tsize = msix_table_size(control); + base = msix_map_region(dev, tsize); + if (!base) { + ret = -ENOMEM; + goto out_disable; + } + + /* Ensure that all table entries are masked. */ + msix_mask_all(base, tsize); ret = msix_setup_entries(dev, base, entries, nvec, affd); if (ret) - return ret; + goto out_disable; ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); if (ret) @@ -765,15 +826,7 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, if (ret) goto out_free; - /* - * Some devices require MSI-X to be enabled before we can touch the - * MSI-X registers. We need to mask all the vectors to prevent - * interrupts coming in before they're fully set up. - */ - pci_msix_clear_and_set_ctrl(dev, 0, - PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE); - - msix_program_entries(dev, entries); + msix_update_entries(dev, entries); ret = populate_msi_sysfs(dev); if (ret) @@ -807,6 +860,9 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, out_free: free_msi_irqs(dev); +out_disable: + pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE, 0); + return ret; } @@ -894,8 +950,7 @@ static void pci_msi_shutdown(struct pci_dev *dev) /* Return the device with MSI unmasked as initial states */ mask = msi_mask(desc->msi_attrib.multi_cap); - /* Keep cached state to be restored */ - __pci_msi_desc_mask_irq(desc, mask, ~mask); + msi_mask_irq(desc, mask, 0); /* Restore dev->irq to its default pin-assertion irq */ dev->irq = desc->msi_attrib.default_irq; @@ -958,7 +1013,6 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, } } } - WARN_ON(!!dev->msix_enabled); /* Check whether driver already requested for MSI irq */ if (dev->msi_enabled) { @@ -981,10 +1035,8 @@ static void pci_msix_shutdown(struct pci_dev *dev) } /* Return the device with MSI-X masked as initial states */ - for_each_pci_msi_entry(entry, dev) { - /* Keep cached states to be restored */ + for_each_pci_msi_entry(entry, dev) __pci_msix_desc_mask_irq(entry, 1); - } pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); pci_intx_for_msi(dev, 1); @@ -1028,8 +1080,6 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, if (!pci_msi_supported(dev, minvec)) return -EINVAL; - WARN_ON(!!dev->msi_enabled); - /* Check whether driver already requested MSI-X irqs */ if (dev->msix_enabled) { pci_info(dev, "can't enable MSI (MSI-X already enabled)\n"); @@ -1039,6 +1089,9 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, if (maxvec < minvec) return -ERANGE; + if (WARN_ON_ONCE(dev->msi_enabled)) + return -EINVAL; + nvec = pci_msi_vec_count(dev); if (nvec < 0) return nvec; @@ -1087,6 +1140,9 @@ static int __pci_enable_msix_range(struct pci_dev *dev, if (maxvec < minvec) return -ERANGE; + if (WARN_ON_ONCE(dev->msix_enabled)) + return -EINVAL; + for (;;) { if (affd) { nvec = irq_calc_affinity_vectors(minvec, nvec, affd); @@ -1152,7 +1208,8 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, const struct irq_affinity *affd) { static const struct irq_affinity msi_default_affd; - int vecs = -ENOSPC; + int msix_vecs = -ENOSPC; + int msi_vecs = -ENOSPC; if (flags & PCI_IRQ_AFFINITY) { if (!affd) @@ -1163,16 +1220,17 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, } if (flags & PCI_IRQ_MSIX) { - vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs, - affd); - if (vecs > 0) - return vecs; + msix_vecs = __pci_enable_msix_range(dev, NULL, min_vecs, + max_vecs, affd); + if (msix_vecs > 0) + return msix_vecs; } if (flags & PCI_IRQ_MSI) { - vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, affd); - if (vecs > 0) - return vecs; + msi_vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, + affd); + if (msi_vecs > 0) + return msi_vecs; } /* use legacy irq if allowed */ @@ -1183,7 +1241,9 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, } } - return vecs; + if (msix_vecs == -ENOSPC) + return -ENOSPC; + return msi_vecs; } EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity); diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index c2ab577050434bb602fd8c4c8252547b90a9ff4f..15ba77cd105598178f66e99c12aef62ce54ac734 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -140,8 +140,7 @@ static acpi_status decode_type0_hpx_record(union acpi_object *record, hpx->t0->enable_perr = fields[5].integer.value; break; default: - printk(KERN_WARNING - "%s: Type 0 Revision %d record not supported\n", + pr_warn("%s: Type 0 Revision %d record not supported\n", __func__, revision); return AE_ERROR; } @@ -169,8 +168,7 @@ static acpi_status decode_type1_hpx_record(union acpi_object *record, hpx->t1->tot_max_split = fields[4].integer.value; break; default: - printk(KERN_WARNING - "%s: Type 1 Revision %d record not supported\n", + pr_warn("%s: Type 1 Revision %d record not supported\n", __func__, revision); return AE_ERROR; } @@ -211,8 +209,7 @@ static acpi_status decode_type2_hpx_record(union acpi_object *record, hpx->t2->sec_unc_err_mask_or = fields[17].integer.value; break; default: - printk(KERN_WARNING - "%s: Type 2 Revision %d record not supported\n", + pr_warn("%s: Type 2 Revision %d record not supported\n", __func__, revision); return AE_ERROR; } @@ -272,7 +269,7 @@ static acpi_status acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx) goto exit; break; default: - printk(KERN_ERR "%s: Type %d record not supported\n", + pr_err("%s: Type %d record not supported\n", __func__, type); status = AE_ERROR; goto exit; @@ -583,6 +580,14 @@ static pci_power_t acpi_pci_get_power_state(struct pci_dev *dev) return state_conv[state]; } +static void acpi_pci_refresh_power_state(struct pci_dev *dev) +{ + struct acpi_device *adev = ACPI_COMPANION(&dev->dev); + + if (adev && acpi_device_power_manageable(adev)) + acpi_device_update_power(adev, NULL); +} + static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable) { while (bus->parent) { @@ -625,7 +630,8 @@ static bool acpi_pci_need_resume(struct pci_dev *dev) if (!adev || !acpi_device_power_manageable(adev)) return false; - if (device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count) + if (adev->wakeup.flags.valid && + device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count) return true; if (acpi_target_system_state() == ACPI_STATE_S0) @@ -638,6 +644,7 @@ static const struct pci_platform_pm_ops acpi_pci_platform_pm = { .is_manageable = acpi_pci_power_manageable, .set_state = acpi_pci_set_power_state, .get_state = acpi_pci_get_power_state, + .refresh_state = acpi_pci_refresh_power_state, .choose_state = acpi_pci_choose_state, .set_wakeup = acpi_pci_wakeup, .need_resume = acpi_pci_need_resume, @@ -762,19 +769,33 @@ static void pci_acpi_setup(struct device *dev) return; device_set_wakeup_capable(dev, true); + /* + * For bridges that can do D3 we enable wake automatically (as + * we do for the power management itself in that case). The + * reason is that the bridge may have additional methods such as + * _DSW that need to be called. + */ + if (pci_dev->bridge_d3) + device_wakeup_enable(dev); + acpi_pci_wakeup(pci_dev, false); } static void pci_acpi_cleanup(struct device *dev) { struct acpi_device *adev = ACPI_COMPANION(dev); + struct pci_dev *pci_dev = to_pci_dev(dev); if (!adev) return; pci_acpi_remove_pm_notifier(adev); - if (adev->wakeup.flags.valid) + if (adev->wakeup.flags.valid) { + if (pci_dev->bridge_d3) + device_wakeup_disable(dev); + device_set_wakeup_capable(dev, false); + } } static bool pci_acpi_bus_match(struct device *dev) diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index bef17c3fca67cca5249a4489905f17d630a58b95..dc33bad9f1061f92bd2b18ffa815bfb799522457 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -399,7 +399,8 @@ void __weak pcibios_free_irq(struct pci_dev *dev) #ifdef CONFIG_PCI_IOV static inline bool pci_device_can_probe(struct pci_dev *pdev) { - return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe); + return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe || + pdev->driver_override); } #else static inline bool pci_device_can_probe(struct pci_dev *pdev) @@ -414,6 +415,9 @@ static int pci_device_probe(struct device *dev) struct pci_dev *pci_dev = to_pci_dev(dev); struct pci_driver *drv = to_pci_driver(dev->driver); + if (!pci_device_can_probe(pci_dev)) + return -ENODEV; + pci_assign_irq(pci_dev); error = pcibios_alloc_irq(pci_dev); @@ -421,12 +425,10 @@ static int pci_device_probe(struct device *dev) return error; pci_dev_get(pci_dev); - if (pci_device_can_probe(pci_dev)) { - error = __pci_device_probe(drv, pci_dev); - if (error) { - pcibios_free_irq(pci_dev); - pci_dev_put(pci_dev); - } + error = __pci_device_probe(drv, pci_dev); + if (error) { + pcibios_free_irq(pci_dev); + pci_dev_put(pci_dev); } return error; @@ -440,6 +442,13 @@ static int pci_device_remove(struct device *dev) if (drv) { if (drv->remove) { pm_runtime_get_sync(dev); + /* + * If the driver provides a .runtime_idle() callback and it has + * started to run already, it may continue to run in parallel + * with the code below, so wait until all of the runtime PM + * activity has completed. + */ + pm_runtime_barrier(dev); drv->remove(pci_dev); pm_runtime_put_noidle(dev); } @@ -511,7 +520,11 @@ static int pci_restore_standard_config(struct pci_dev *pci_dev) } pci_restore_state(pci_dev); - pci_pme_restore(pci_dev); + if (!(pci_dev->vendor == PCI_VENDOR_ID_ZHAOXIN && + pci_dev->device == 0x3104 && + (pci_dev->revision & 0xf0) == 0x90 && + pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)) + pci_pme_restore(pci_dev); return 0; } @@ -524,7 +537,6 @@ static void pci_pm_default_resume_early(struct pci_dev *pci_dev) pci_power_up(pci_dev); pci_restore_state(pci_dev); pci_pme_restore(pci_dev); - pci_fixup_device(pci_fixup_resume_early, pci_dev); } /* @@ -702,7 +714,14 @@ static void pci_pm_complete(struct device *dev) if (pm_runtime_suspended(dev) && pm_resume_via_firmware()) { pci_power_t pre_sleep_state = pci_dev->current_state; - pci_update_current_state(pci_dev, pci_dev->current_state); + pci_refresh_power_state(pci_dev); + /* + * On platforms with ACPI this check may also trigger for + * devices sharing power resources if one of those power + * resources has been activated as a result of a change of the + * power state of another device sharing it. However, in that + * case it is also better to resume the device, in general. + */ if (pci_dev->current_state < pre_sleep_state) pm_request_resume(dev); } @@ -734,6 +753,8 @@ static int pci_pm_suspend(struct device *dev) struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + pci_dev->skip_bus_pm = false; + if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_suspend(dev, PMSG_SUSPEND); @@ -827,7 +848,18 @@ static int pci_pm_suspend_noirq(struct device *dev) } } - if (!pci_dev->state_saved) { + if (pci_dev->skip_bus_pm) { + /* + * Either the device is a bridge with a child in D0 below it, or + * the function is running for the second time in a row without + * going through full resume, which is possible only during + * suspend-to-idle in a spurious wakeup case. The device should + * be in D0 at this point, but if it is a bridge, it may be + * necessary to save its state. + */ + if (!pci_dev->state_saved) + pci_save_state(pci_dev); + } else if (!pci_dev->state_saved) { pci_save_state(pci_dev); if (pci_power_manageable(pci_dev)) pci_prepare_to_sleep(pci_dev); @@ -836,6 +868,22 @@ static int pci_pm_suspend_noirq(struct device *dev) dev_dbg(dev, "PCI PM: Suspend power state: %s\n", pci_power_name(pci_dev->current_state)); + if (pci_dev->current_state == PCI_D0) { + pci_dev->skip_bus_pm = true; + /* + * Per PCI PM r1.2, table 6-1, a bridge must be in D0 if any + * downstream device is in D0, so avoid changing the power state + * of the parent bridge by setting the skip_bus_pm flag for it. + */ + if (pci_dev->bus->self) + pci_dev->bus->self->skip_bus_pm = true; + } + + if (pci_dev->skip_bus_pm && pm_suspend_no_platform()) { + dev_dbg(dev, "PCI PM: Skipped\n"); + goto Fixup; + } + pci_pm_set_unknown_state(pci_dev); /* @@ -883,7 +931,16 @@ static int pci_pm_resume_noirq(struct device *dev) if (dev_pm_smart_suspend_and_suspended(dev)) pm_runtime_set_active(dev); - pci_pm_default_resume_early(pci_dev); + /* + * In the suspend-to-idle case, devices left in D0 during suspend will + * stay in D0, so it is not necessary to restore or update their + * configuration here and attempting to put them into D0 again is + * pointless, so avoid doing that. + */ + if (!(pci_dev->skip_bus_pm && pm_suspend_no_platform())) + pci_pm_default_resume_early(pci_dev); + + pci_fixup_device(pci_fixup_resume_early, pci_dev); if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_resume_early(dev); @@ -957,15 +1014,15 @@ static int pci_pm_freeze(struct device *dev) } /* - * This used to be done in pci_pm_prepare() for all devices and some - * drivers may depend on it, so do it here. Ideally, runtime-suspended - * devices should not be touched during freeze/thaw transitions, - * however. + * Resume all runtime-suspended devices before creating a snapshot + * image of system memory, because the restore kernel generally cannot + * be expected to always handle them consistently and they need to be + * put into the runtime-active metastate during system resume anyway, + * so it is better to ensure that the state saved in the image will be + * always consistent with that. */ - if (!dev_pm_smart_suspend_and_suspended(dev)) { - pm_runtime_resume(dev); - pci_dev->state_saved = false; - } + pm_runtime_resume(dev); + pci_dev->state_saved = false; if (pm->freeze) { int error; @@ -1040,17 +1097,22 @@ static int pci_pm_thaw_noirq(struct device *dev) return error; } - if (pci_has_legacy_pm_support(pci_dev)) - return pci_legacy_resume_early(dev); - /* - * pci_restore_state() requires the device to be in D0 (because of MSI - * restoration among other things), so force it into D0 in case the - * driver's "freeze" callbacks put it into a low-power state directly. + * Both the legacy ->resume_early() and the new pm->thaw_noirq() + * callbacks assume the device has been returned to D0 and its + * config state has been restored. + * + * In addition, pci_restore_state() restores MSI-X state in MMIO + * space, which requires the device to be in D0, so return it to D0 + * in case the driver's "freeze" callbacks put it into a low-power + * state. */ pci_set_power_state(pci_dev, PCI_D0); pci_restore_state(pci_dev); + if (pci_has_legacy_pm_support(pci_dev)) + return pci_legacy_resume_early(dev); + if (drv && drv->pm && drv->pm->thaw_noirq) error = drv->pm->thaw_noirq(dev); @@ -1179,6 +1241,7 @@ static int pci_pm_restore_noirq(struct device *dev) } pci_pm_default_resume_early(pci_dev); + pci_fixup_device(pci_fixup_resume_early, pci_dev); if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_resume_early(dev); @@ -1251,30 +1314,29 @@ static int pci_pm_runtime_suspend(struct device *dev) return 0; } - if (!pm || !pm->runtime_suspend) - return -ENOSYS; - pci_dev->state_saved = false; - error = pm->runtime_suspend(dev); - if (error) { + if (pm && pm->runtime_suspend) { + error = pm->runtime_suspend(dev); /* * -EBUSY and -EAGAIN is used to request the runtime PM core * to schedule a new suspend, so log the event only with debug * log level. */ - if (error == -EBUSY || error == -EAGAIN) + if (error == -EBUSY || error == -EAGAIN) { dev_dbg(dev, "can't suspend now (%pf returned %d)\n", pm->runtime_suspend, error); - else + return error; + } else if (error) { dev_err(dev, "can't suspend (%pf returned %d)\n", pm->runtime_suspend, error); - - return error; + return error; + } } pci_fixup_device(pci_fixup_suspend, pci_dev); - if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0 + if (pm && pm->runtime_suspend + && !pci_dev->state_saved && pci_dev->current_state != PCI_D0 && pci_dev->current_state != PCI_UNKNOWN) { WARN_ONCE(pci_dev->current_state != prev, "PCI PM: State of device not saved by %pF\n", @@ -1292,7 +1354,7 @@ static int pci_pm_runtime_suspend(struct device *dev) static int pci_pm_runtime_resume(struct device *dev) { - int rc; + int rc = 0; struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; @@ -1306,14 +1368,12 @@ static int pci_pm_runtime_resume(struct device *dev) if (!pci_dev->driver) return 0; - if (!pm || !pm->runtime_resume) - return -ENOSYS; - pci_fixup_device(pci_fixup_resume_early, pci_dev); pci_enable_wake(pci_dev, PCI_D0, false); pci_fixup_device(pci_fixup_resume, pci_dev); - rc = pm->runtime_resume(dev); + if (pm && pm->runtime_resume) + rc = pm->runtime_resume(dev); pci_dev->runtime_d3cold = false; diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c index a5910f9428576672d0127c836e9d08019c86153e..9fb4ef568f405fe86b41408c8551c356092ce54b 100644 --- a/drivers/pci/pci-label.c +++ b/drivers/pci/pci-label.c @@ -162,7 +162,7 @@ static void dsm_label_utf16s_to_utf8s(union acpi_object *obj, char *buf) len = utf16s_to_utf8s((const wchar_t *)obj->buffer.pointer, obj->buffer.length, UTF16_LITTLE_ENDIAN, - buf, PAGE_SIZE); + buf, PAGE_SIZE - 1); buf[len] = '\n'; } diff --git a/drivers/pci/pci-mid.c b/drivers/pci/pci-mid.c index 314e135014dcd14387dc0ca7b8bdfb7198910baa..30fbe2ea6eab48bad6a974b423a4fe1aefe79224 100644 --- a/drivers/pci/pci-mid.c +++ b/drivers/pci/pci-mid.c @@ -62,8 +62,8 @@ static const struct pci_platform_pm_ops mid_pci_platform_pm = { * arch/x86/platform/intel-mid/pwr.c. */ static const struct x86_cpu_id lpss_cpu_ids[] = { - ICPU(INTEL_FAM6_ATOM_PENWELL), - ICPU(INTEL_FAM6_ATOM_MERRIFIELD), + ICPU(INTEL_FAM6_ATOM_SALTWELL_MID), + ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID), {} }; diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c index 66f8a59fadbd90496196f0e6ba4eef3baeee75be..e408099fea5257dfe3218282d138615d966ab2f0 100644 --- a/drivers/pci/pci-stub.c +++ b/drivers/pci/pci-stub.c @@ -66,20 +66,18 @@ static int __init pci_stub_init(void) &class, &class_mask); if (fields < 2) { - printk(KERN_WARNING - "pci-stub: invalid id string \"%s\"\n", id); + pr_warn("pci-stub: invalid ID string \"%s\"\n", id); continue; } - printk(KERN_INFO - "pci-stub: add %04X:%04X sub=%04X:%04X cls=%08X/%08X\n", + pr_info("pci-stub: add %04X:%04X sub=%04X:%04X cls=%08X/%08X\n", vendor, device, subvendor, subdevice, class, class_mask); rc = pci_add_dynid(&stub_driver, vendor, device, subvendor, subdevice, class, class_mask, 0); if (rc) - printk(KERN_WARNING - "pci-stub: failed to add dynamic id (%d)\n", rc); + pr_warn("pci-stub: failed to add dynamic ID (%d)\n", + rc); } return 0; diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 9ecfe13157c09660ec627076b470e118e794c405..48c56cb086524a0e48d0c29fd0238eea7fe957ed 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -156,7 +156,8 @@ static ssize_t max_link_speed_show(struct device *dev, { struct pci_dev *pdev = to_pci_dev(dev); - return sprintf(buf, "%s\n", PCIE_SPEED2STR(pcie_get_speed_cap(pdev))); + return sprintf(buf, "%s\n", + pci_speed_string(pcie_get_speed_cap(pdev))); } static DEVICE_ATTR_RO(max_link_speed); @@ -175,30 +176,15 @@ static ssize_t current_link_speed_show(struct device *dev, struct pci_dev *pci_dev = to_pci_dev(dev); u16 linkstat; int err; - const char *speed; + enum pci_bus_speed speed; err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat); if (err) return -EINVAL; - switch (linkstat & PCI_EXP_LNKSTA_CLS) { - case PCI_EXP_LNKSTA_CLS_16_0GB: - speed = "16 GT/s"; - break; - case PCI_EXP_LNKSTA_CLS_8_0GB: - speed = "8 GT/s"; - break; - case PCI_EXP_LNKSTA_CLS_5_0GB: - speed = "5 GT/s"; - break; - case PCI_EXP_LNKSTA_CLS_2_5GB: - speed = "2.5 GT/s"; - break; - default: - speed = "Unknown speed"; - } + speed = pcie_link_speed[linkstat & PCI_EXP_LNKSTA_CLS]; - return sprintf(buf, "%s\n", speed); + return sprintf(buf, "%s\n", pci_speed_string(speed)); } static DEVICE_ATTR_RO(current_link_speed); @@ -470,15 +456,42 @@ static ssize_t remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned long val; + struct pci_dev *pdev = to_pci_dev(dev); + struct pci_dev *rpdev = pdev->rpdev; if (kstrtoul(buf, 0, &val) < 0) return -EINVAL; - if (val && device_remove_file_self(dev, attr)) - pci_stop_and_remove_bus_device_locked(to_pci_dev(dev)); + if (rpdev && test_and_set_bit(0, + &rpdev->slot_being_removed_rescanned)) { + pr_info("Slot is being removed or rescanned, please try later!\n"); + return -EINVAL; + } + + /* + * if 'dev' is root port itself, 'pci_stop_and_remove_bus_device()' may + * free the 'rpdev', but we need to clear + * 'rpdev->slot_being_removed_rescanned' in the end. So get 'rpdev' to + * avoid possible 'use-after-free'. + */ + if (rpdev) + pci_dev_get(rpdev); + + if (val) { + pci_dev_get(pdev); + if (device_remove_file_self(dev, attr)) + pci_stop_and_remove_bus_device_locked(pdev); + pci_dev_put(pdev); + } + + if (rpdev) { + clear_bit(0, &rpdev->slot_being_removed_rescanned); + pci_dev_put(rpdev); + } + return count; } -static struct device_attribute dev_remove_attr = __ATTR(remove, +static struct device_attribute dev_remove_attr = __ATTR_IGNORE_LOCKDEP(remove, (S_IWUSR|S_IWGRP), NULL, remove_store); @@ -1112,8 +1125,7 @@ void pci_create_legacy_files(struct pci_bus *b) kfree(b->legacy_io); b->legacy_io = NULL; kzalloc_err: - printk(KERN_WARNING "pci: warning: could not create legacy I/O port and ISA memory resources to sysfs\n"); - return; + dev_warn(&b->dev, "could not create legacy I/O port and ISA memory resources in sysfs\n"); } void pci_remove_legacy_files(struct pci_bus *b) @@ -1289,11 +1301,9 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine) sysfs_bin_attr_init(res_attr); if (write_combine) { - pdev->res_attr_wc[num] = res_attr; sprintf(res_attr_name, "resource%d_wc", num); res_attr->mmap = pci_mmap_resource_wc; } else { - pdev->res_attr[num] = res_attr; sprintf(res_attr_name, "resource%d", num); if (pci_resource_flags(pdev, num) & IORESOURCE_IO) { res_attr->read = pci_read_resource_io; @@ -1309,10 +1319,17 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine) res_attr->size = pci_resource_len(pdev, num); res_attr->private = (void *)(unsigned long)num; retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr); - if (retval) + if (retval) { kfree(res_attr); + return retval; + } - return retval; + if (write_combine) + pdev->res_attr_wc[num] = res_attr; + else + pdev->res_attr[num] = res_attr; + + return 0; } /** diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 51b6c81671c1e21baba57422cd90d8727992f8e1..b93605616d4e4ee96a2f4790c07c771d06da8af0 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -35,6 +35,8 @@ #include #include "pci.h" +DEFINE_MUTEX(pci_slot_mutex); + const char *pci_power_names[] = { "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown", }; @@ -222,7 +224,7 @@ static int pci_dev_str_match_path(struct pci_dev *dev, const char *path, *endptr = strchrnul(path, ';'); - wpath = kmemdup_nul(path, *endptr - path, GFP_KERNEL); + wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC); if (!wpath) return -ENOMEM; @@ -774,6 +776,12 @@ static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev) return pci_platform_pm ? pci_platform_pm->get_state(dev) : PCI_UNKNOWN; } +static inline void platform_pci_refresh_power_state(struct pci_dev *dev) +{ + if (pci_platform_pm && pci_platform_pm->refresh_state) + pci_platform_pm->refresh_state(dev); +} + static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev) { return pci_platform_pm ? @@ -926,16 +934,18 @@ void pci_update_current_state(struct pci_dev *dev, pci_power_t state) } /** - * pci_power_up - Put the given device into D0 forcibly - * @dev: PCI device to power up + * pci_refresh_power_state - Refresh the given device's power state data + * @dev: Target PCI device. + * + * Ask the platform to refresh the devices power state information and invoke + * pci_update_current_state() to update its current PCI power state. */ -void pci_power_up(struct pci_dev *dev) +void pci_refresh_power_state(struct pci_dev *dev) { if (platform_pci_power_manageable(dev)) - platform_pci_set_power_state(dev, PCI_D0); + platform_pci_refresh_power_state(dev); - pci_raw_set_power_state(dev, PCI_D0); - pci_update_current_state(dev, PCI_D0); + pci_update_current_state(dev, dev->current_state); } /** @@ -999,7 +1009,7 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state) * because have already delayed for the bridge. */ if (dev->runtime_d3cold) { - if (dev->d3cold_delay) + if (dev->d3cold_delay && !dev->imm_ready) msleep(dev->d3cold_delay); /* * When powering on a bridge from D3cold, the @@ -1116,6 +1126,17 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state) } EXPORT_SYMBOL(pci_set_power_state); +/** + * pci_power_up - Put the given device into D0 forcibly + * @dev: PCI device to power up + */ +void pci_power_up(struct pci_dev *dev) +{ + __pci_start_power_transition(dev, PCI_D0); + pci_raw_set_power_state(dev, PCI_D0); + pci_update_current_state(dev, PCI_D0); +} + /** * pci_choose_state - Choose the power state of a PCI device * @dev: PCI device to be suspended @@ -1226,7 +1247,6 @@ static void pci_restore_pcie_state(struct pci_dev *dev) pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]); } - static int pci_save_pcix_state(struct pci_dev *dev) { int pos; @@ -1263,6 +1283,45 @@ static void pci_restore_pcix_state(struct pci_dev *dev) pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]); } +static void pci_save_ltr_state(struct pci_dev *dev) +{ + int ltr; + struct pci_cap_saved_state *save_state; + u16 *cap; + + if (!pci_is_pcie(dev)) + return; + + ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR); + if (!ltr) + return; + + save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR); + if (!save_state) { + pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n"); + return; + } + + cap = (u16 *)&save_state->cap.data[0]; + pci_read_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap++); + pci_read_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, cap++); +} + +static void pci_restore_ltr_state(struct pci_dev *dev) +{ + struct pci_cap_saved_state *save_state; + int ltr; + u16 *cap; + + save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR); + ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR); + if (!save_state || !ltr) + return; + + cap = (u16 *)&save_state->cap.data[0]; + pci_write_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap++); + pci_write_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, *cap++); +} /** * pci_save_state - save the PCI configuration space of a device before suspending @@ -1274,6 +1333,9 @@ int pci_save_state(struct pci_dev *dev) /* XXX: 100% dword access ok here? */ for (i = 0; i < 16; i++) pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]); + if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) + dev->saved_config_space[PCI_BRIDGE_CONTROL / 4] &= + ~(PCI_BRIDGE_CTL_BUS_RESET << 16); dev->state_saved = true; i = pci_save_pcie_state(dev); @@ -1284,6 +1346,9 @@ int pci_save_state(struct pci_dev *dev) if (i != 0) return i; + pci_save_ltr_state(dev); + pci_save_dpc_state(dev); + pci_save_aer_state(dev); return pci_save_vc_state(dev); } EXPORT_SYMBOL(pci_save_state); @@ -1366,7 +1431,7 @@ static void pci_restore_rebar_state(struct pci_dev *pdev) pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl); bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX; res = pdev->resource + bar_idx; - size = order_base_2((resource_size(res) >> 20) | 1) - 1; + size = ilog2(resource_size(res)) - 20; ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE; ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT; pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl); @@ -1382,15 +1447,22 @@ void pci_restore_state(struct pci_dev *dev) if (!dev->state_saved) return; - /* PCI Express register must be restored first */ + /* + * Restore max latencies (in the LTR capability) before enabling + * LTR itself (in the PCIe capability). + */ + pci_restore_ltr_state(dev); + pci_restore_pcie_state(dev); pci_restore_pasid_state(dev); pci_restore_pri_state(dev); pci_restore_ats_state(dev); pci_restore_vc_state(dev); pci_restore_rebar_state(dev); + pci_restore_dpc_state(dev); pci_cleanup_aer_error_status_regs(dev); + pci_restore_aer_state(dev); pci_restore_config_space(dev); @@ -1591,11 +1663,7 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags) * so that things like MSI message writing will behave as expected * (e.g. if the device really is in D0 at enable time). */ - if (dev->pm_cap) { - u16 pmcsr; - pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); - dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); - } + pci_update_current_state(dev, dev->current_state); if (atomic_inc_return(&dev->enable_cnt) > 1) return 0; /* already enabled */ @@ -2004,6 +2072,13 @@ static void pci_pme_list_scan(struct work_struct *work) */ if (bridge && bridge->current_state != PCI_D0) continue; + /* + * If the device is in D3cold it should not be + * polled either. + */ + if (pme_dev->dev->current_state == PCI_D3cold) + continue; + pci_pme_wakeup(pme_dev->dev, NULL); } else { list_del(&pme_dev->list); @@ -2163,7 +2238,14 @@ static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable if (enable) { int error; - if (pci_pme_capable(dev, state)) + /* + * Enable PME signaling if the device can signal PME from + * D3cold regardless of whether or not it can signal PME from + * the current target state, because that will allow it to + * signal PME when the hierarchy above it goes into D3cold and + * the device itself ends up in D3cold as a result of that. + */ + if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold)) pci_pme_active(dev, true); else ret = 1; @@ -2267,16 +2349,20 @@ static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup) if (dev->current_state == PCI_D3cold) target_state = PCI_D3cold; - if (wakeup) { + if (wakeup && dev->pme_support) { + pci_power_t state = target_state; + /* * Find the deepest state from which the device can generate * PME#. */ - if (dev->pme_support) { - while (target_state - && !(dev->pme_support & (1 << target_state))) - target_state--; - } + while (state && !(dev->pme_support & (1 << state))) + state--; + + if (state) + return state; + else if (dev->pme_support & 1) + return PCI_D0; } return target_state; @@ -2489,6 +2575,25 @@ void pci_config_pm_runtime_put(struct pci_dev *pdev) pm_runtime_put_sync(parent); } +static const struct dmi_system_id bridge_d3_blacklist[] = { +#ifdef CONFIG_X86 + { + /* + * Gigabyte X299 root port is not marked as hotplug capable + * which allows Linux to power manage it. However, this + * confuses the BIOS SMI handler so don't power manage root + * ports on that system. + */ + .ident = "X299 DESIGNARE EX-CF", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."), + DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"), + }, + }, +#endif + { } +}; + /** * pci_bridge_d3_possible - Is it possible to put the bridge into D3 * @bridge: Bridge to check @@ -2530,6 +2635,9 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge) if (bridge->is_hotplug_bridge) return false; + if (dmi_check_system(bridge_d3_blacklist)) + return false; + /* * It should be safe to put PCIe ports from 2015 or newer * to D3. @@ -2655,6 +2763,7 @@ EXPORT_SYMBOL_GPL(pci_d3cold_disable); void pci_pm_init(struct pci_dev *dev) { int pm; + u16 status; u16 pmc; pm_runtime_forbid(&dev->dev); @@ -2694,14 +2803,14 @@ void pci_pm_init(struct pci_dev *dev) dev->d2_support = true; if (dev->d1_support || dev->d2_support) - pci_printk(KERN_DEBUG, dev, "supports%s%s\n", + pci_info(dev, "supports%s%s\n", dev->d1_support ? " D1" : "", dev->d2_support ? " D2" : ""); } pmc &= PCI_PM_CAP_PME_MASK; if (pmc) { - pci_printk(KERN_DEBUG, dev, "PME# supported from%s%s%s%s%s\n", + pci_info(dev, "PME# supported from%s%s%s%s%s\n", (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", @@ -2717,6 +2826,10 @@ void pci_pm_init(struct pci_dev *dev) /* Disable the PME# generation functionality */ pci_pme_active(dev, false); } + + pci_read_config_word(dev, PCI_STATUS, &status); + if (status & PCI_STATUS_IMM_READY) + dev->imm_ready = 1; } static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop) @@ -2865,16 +2978,16 @@ static int pci_ea_read(struct pci_dev *dev, int offset) res->flags = flags; if (bei <= PCI_EA_BEI_BAR5) - pci_printk(KERN_DEBUG, dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n", + pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n", bei, res, prop); else if (bei == PCI_EA_BEI_ROM) - pci_printk(KERN_DEBUG, dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n", + pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n", res, prop); else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5) - pci_printk(KERN_DEBUG, dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n", + pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n", bei - PCI_EA_BEI_VF_BAR0, res, prop); else - pci_printk(KERN_DEBUG, dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n", + pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n", bei, res, prop); out: @@ -2977,6 +3090,11 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev) if (error) pci_err(dev, "unable to preallocate PCI-X save buffer\n"); + error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR, + 2 * sizeof(u16)); + if (error) + pci_err(dev, "unable to allocate suspend buffer for LTR\n"); + pci_allocate_vc_save_buffers(dev); } @@ -3440,7 +3558,7 @@ int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask) } /* Ensure upstream ports don't block AtomicOps on egress */ - if (!bridge->has_secondary_link) { + if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) { pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl2); if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK) @@ -3781,6 +3899,10 @@ int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr, ret = logic_pio_register_range(range); if (ret) kfree(range); + + /* Ignore duplicates due to deferred probing */ + if (ret == -EEXIST) + ret = 0; #endif return ret; @@ -4093,7 +4215,7 @@ int pci_set_cacheline_size(struct pci_dev *dev) if (cacheline_size == pci_cache_line_size) return 0; - pci_printk(KERN_DEBUG, dev, "cache line size of %d is not supported\n", + pci_info(dev, "cache line size of %d is not supported\n", pci_cache_line_size << 2); return -EINVAL; @@ -4387,6 +4509,9 @@ int pcie_flr(struct pci_dev *dev) pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); + if (dev->imm_ready) + return 0; + /* * Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within * 100ms, but may silently discard requests while the FLR is in @@ -4428,6 +4553,9 @@ static int pci_af_flr(struct pci_dev *dev, int probe) pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR); + if (dev->imm_ready) + return 0; + /* * Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006, * updated 27 July 2006; a device must complete an FLR within @@ -4496,21 +4624,42 @@ bool pcie_wait_for_link(struct pci_dev *pdev, bool active) bool ret; u16 lnk_status; + /* + * Some controllers might not implement link active reporting. In this + * case, we wait for 1000 + 100 ms. + */ + if (!pdev->link_active_reporting) { + msleep(1100); + return true; + } + + /* + * PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms, + * after which we should expect an link active if the reset was + * successful. If so, software must wait a minimum 100ms before sending + * configuration requests to devices downstream this port. + * + * If the link fails to activate, either the device was physically + * removed or the link is permanently failed. + */ + if (active) + msleep(20); for (;;) { pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status); ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA); if (ret == active) - return true; + break; if (timeout <= 0) break; msleep(10); timeout -= 10; } - - pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n", - active ? "set" : "cleared"); - - return false; + if (active && ret) + msleep(100); + else if (ret != active) + pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n", + active ? "set" : "cleared"); + return ret == active; } void pci_reset_secondary_bus(struct pci_dev *dev) @@ -4610,18 +4759,18 @@ static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe) static void pci_dev_lock(struct pci_dev *dev) { - pci_cfg_access_lock(dev); /* block PM suspend, driver probe, etc. */ device_lock(&dev->dev); + pci_cfg_access_lock(dev); } /* Return 1 on successful lock, 0 on contention */ static int pci_dev_trylock(struct pci_dev *dev) { - if (pci_cfg_access_trylock(dev)) { - if (device_trylock(&dev->dev)) + if (device_trylock(&dev->dev)) { + if (pci_cfg_access_trylock(dev)) return 1; - pci_cfg_access_unlock(dev); + device_unlock(&dev->dev); } return 0; @@ -4629,8 +4778,8 @@ static int pci_dev_trylock(struct pci_dev *dev) static void pci_dev_unlock(struct pci_dev *dev) { - device_unlock(&dev->dev); pci_cfg_access_unlock(dev); + device_unlock(&dev->dev); } static void pci_dev_save_and_disable(struct pci_dev *dev) @@ -4886,10 +5035,12 @@ static void pci_bus_lock(struct pci_bus *bus) { struct pci_dev *dev; + pci_dev_lock(bus->self); list_for_each_entry(dev, &bus->devices, bus_list) { - pci_dev_lock(dev); if (dev->subordinate) pci_bus_lock(dev->subordinate); + else + pci_dev_lock(dev); } } @@ -4901,8 +5052,10 @@ static void pci_bus_unlock(struct pci_bus *bus) list_for_each_entry(dev, &bus->devices, bus_list) { if (dev->subordinate) pci_bus_unlock(dev->subordinate); - pci_dev_unlock(dev); + else + pci_dev_unlock(dev); } + pci_dev_unlock(bus->self); } /* Return 1 on successful lock, 0 on contention */ @@ -4910,15 +5063,15 @@ static int pci_bus_trylock(struct pci_bus *bus) { struct pci_dev *dev; + if (!pci_dev_trylock(bus->self)) + return 0; + list_for_each_entry(dev, &bus->devices, bus_list) { - if (!pci_dev_trylock(dev)) - goto unlock; if (dev->subordinate) { - if (!pci_bus_trylock(dev->subordinate)) { - pci_dev_unlock(dev); + if (!pci_bus_trylock(dev->subordinate)) goto unlock; - } - } + } else if (!pci_dev_trylock(dev)) + goto unlock; } return 1; @@ -4926,8 +5079,10 @@ static int pci_bus_trylock(struct pci_bus *bus) list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) { if (dev->subordinate) pci_bus_unlock(dev->subordinate); - pci_dev_unlock(dev); + else + pci_dev_unlock(dev); } + pci_dev_unlock(bus->self); return 0; } @@ -4959,9 +5114,10 @@ static void pci_slot_lock(struct pci_slot *slot) list_for_each_entry(dev, &slot->bus->devices, bus_list) { if (!dev->slot || dev->slot != slot) continue; - pci_dev_lock(dev); if (dev->subordinate) pci_bus_lock(dev->subordinate); + else + pci_dev_lock(dev); } } @@ -4987,14 +5143,13 @@ static int pci_slot_trylock(struct pci_slot *slot) list_for_each_entry(dev, &slot->bus->devices, bus_list) { if (!dev->slot || dev->slot != slot) continue; - if (!pci_dev_trylock(dev)) - goto unlock; if (dev->subordinate) { if (!pci_bus_trylock(dev->subordinate)) { pci_dev_unlock(dev); goto unlock; } - } + } else if (!pci_dev_trylock(dev)) + goto unlock; } return 1; @@ -5005,44 +5160,48 @@ static int pci_slot_trylock(struct pci_slot *slot) continue; if (dev->subordinate) pci_bus_unlock(dev->subordinate); - pci_dev_unlock(dev); + else + pci_dev_unlock(dev); } return 0; } -/* Save and disable devices from the top of the tree down */ -static void pci_bus_save_and_disable(struct pci_bus *bus) +/* + * Save and disable devices from the top of the tree down while holding + * the @dev mutex lock for the entire tree. + */ +static void pci_bus_save_and_disable_locked(struct pci_bus *bus) { struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { - pci_dev_lock(dev); pci_dev_save_and_disable(dev); - pci_dev_unlock(dev); if (dev->subordinate) - pci_bus_save_and_disable(dev->subordinate); + pci_bus_save_and_disable_locked(dev->subordinate); } } /* - * Restore devices from top of the tree down - parent bridges need to be - * restored before we can get to subordinate devices. + * Restore devices from top of the tree down while holding @dev mutex lock + * for the entire tree. Parent bridges need to be restored before we can + * get to subordinate devices. */ -static void pci_bus_restore(struct pci_bus *bus) +static void pci_bus_restore_locked(struct pci_bus *bus) { struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { - pci_dev_lock(dev); pci_dev_restore(dev); - pci_dev_unlock(dev); if (dev->subordinate) - pci_bus_restore(dev->subordinate); + pci_bus_restore_locked(dev->subordinate); } } -/* Save and disable devices from the top of the tree down */ -static void pci_slot_save_and_disable(struct pci_slot *slot) +/* + * Save and disable devices from the top of the tree down while holding + * the @dev mutex lock for the entire tree. + */ +static void pci_slot_save_and_disable_locked(struct pci_slot *slot) { struct pci_dev *dev; @@ -5051,26 +5210,25 @@ static void pci_slot_save_and_disable(struct pci_slot *slot) continue; pci_dev_save_and_disable(dev); if (dev->subordinate) - pci_bus_save_and_disable(dev->subordinate); + pci_bus_save_and_disable_locked(dev->subordinate); } } /* - * Restore devices from top of the tree down - parent bridges need to be - * restored before we can get to subordinate devices. + * Restore devices from top of the tree down while holding @dev mutex lock + * for the entire tree. Parent bridges need to be restored before we can + * get to subordinate devices. */ -static void pci_slot_restore(struct pci_slot *slot) +static void pci_slot_restore_locked(struct pci_slot *slot) { struct pci_dev *dev; list_for_each_entry(dev, &slot->bus->devices, bus_list) { if (!dev->slot || dev->slot != slot) continue; - pci_dev_lock(dev); pci_dev_restore(dev); - pci_dev_unlock(dev); if (dev->subordinate) - pci_bus_restore(dev->subordinate); + pci_bus_restore_locked(dev->subordinate); } } @@ -5129,17 +5287,15 @@ static int __pci_reset_slot(struct pci_slot *slot) if (rc) return rc; - pci_slot_save_and_disable(slot); - if (pci_slot_trylock(slot)) { + pci_slot_save_and_disable_locked(slot); might_sleep(); rc = pci_reset_hotplug_slot(slot->hotplug, 0); + pci_slot_restore_locked(slot); pci_slot_unlock(slot); } else rc = -EAGAIN; - pci_slot_restore(slot); - return rc; } @@ -5164,6 +5320,41 @@ static int pci_bus_reset(struct pci_bus *bus, int probe) return ret; } +/** + * pci_bus_error_reset - reset the bridge's subordinate bus + * @bridge: The parent device that connects to the bus to reset + * + * This function will first try to reset the slots on this bus if the method is + * available. If slot reset fails or is not available, this will fall back to a + * secondary bus reset. + */ +int pci_bus_error_reset(struct pci_dev *bridge) +{ + struct pci_bus *bus = bridge->subordinate; + struct pci_slot *slot; + + if (!bus) + return -ENOTTY; + + mutex_lock(&pci_slot_mutex); + if (list_empty(&bus->slots)) + goto bus_reset; + + list_for_each_entry(slot, &bus->slots, list) + if (pci_probe_reset_slot(slot)) + goto bus_reset; + + list_for_each_entry(slot, &bus->slots, list) + if (pci_slot_reset(slot, 0)) + goto bus_reset; + + mutex_unlock(&pci_slot_mutex); + return 0; +bus_reset: + mutex_unlock(&pci_slot_mutex); + return pci_bus_reset(bridge->subordinate, 0); +} + /** * pci_probe_reset_bus - probe whether a PCI bus can be reset * @bus: PCI bus to probe @@ -5190,17 +5381,15 @@ static int __pci_reset_bus(struct pci_bus *bus) if (rc) return rc; - pci_bus_save_and_disable(bus); - if (pci_bus_trylock(bus)) { + pci_bus_save_and_disable_locked(bus); might_sleep(); rc = pci_bridge_secondary_bus_reset(bus->self); + pci_bus_restore_locked(bus); pci_bus_unlock(bus); } else rc = -EAGAIN; - pci_bus_restore(bus); - return rc; } @@ -5473,34 +5662,25 @@ enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev) u32 lnkcap2, lnkcap; /* - * PCIe r4.0 sec 7.5.3.18 recommends using the Supported Link - * Speeds Vector in Link Capabilities 2 when supported, falling - * back to Max Link Speed in Link Capabilities otherwise. + * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18. The + * implementation note there recommends using the Supported Link + * Speeds Vector in Link Capabilities 2 when supported. + * + * Without Link Capabilities 2, i.e., prior to PCIe r3.0, software + * should use the Supported Link Speeds field in Link Capabilities, + * where only 2.5 GT/s and 5.0 GT/s speeds were defined. */ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2); - if (lnkcap2) { /* PCIe r3.0-compliant */ - if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB) - return PCIE_SPEED_16_0GT; - else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) - return PCIE_SPEED_8_0GT; - else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) - return PCIE_SPEED_5_0GT; - else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) - return PCIE_SPEED_2_5GT; - return PCI_SPEED_UNKNOWN; - } + + /* PCIe r3.0-compliant */ + if (lnkcap2) + return PCIE_LNKCAP2_SLS2SPEED(lnkcap2); pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); - if (lnkcap) { - if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB) - return PCIE_SPEED_16_0GT; - else if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB) - return PCIE_SPEED_8_0GT; - else if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB) - return PCIE_SPEED_5_0GT; - else if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB) - return PCIE_SPEED_2_5GT; - } + if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB) + return PCIE_SPEED_5_0GT; + else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB) + return PCIE_SPEED_2_5GT; return PCI_SPEED_UNKNOWN; } @@ -5570,14 +5750,14 @@ void __pcie_print_link_status(struct pci_dev *dev, bool verbose) if (bw_avail >= bw_cap && verbose) pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n", bw_cap / 1000, bw_cap % 1000, - PCIE_SPEED2STR(speed_cap), width_cap); + pci_speed_string(speed_cap), width_cap); else if (bw_avail < bw_cap) pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n", bw_avail / 1000, bw_avail % 1000, - PCIE_SPEED2STR(speed), width, + pci_speed_string(speed), width, limiting_dev ? pci_name(limiting_dev) : "", bw_cap / 1000, bw_cap % 1000, - PCIE_SPEED2STR(speed_cap), width_cap); + pci_speed_string(speed_cap), width_cap); } /** @@ -5725,6 +5905,8 @@ bool pci_device_is_present(struct pci_dev *pdev) { u32 v; + /* Check PF if pdev is a VF, since VF Vendor/Device IDs are 0xffff */ + pdev = pci_physfn(pdev); if (pci_dev_is_disconnected(pdev)) return false; return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0); @@ -5780,19 +5962,21 @@ static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev, while (*p) { count = 0; if (sscanf(p, "%d%n", &align_order, &count) == 1 && - p[count] == '@') { + p[count] == '@') { p += count + 1; + if (align_order > 63) { + pr_err("PCI: Invalid requested alignment (order %d)\n", + align_order); + align_order = PAGE_SHIFT; + } } else { - align_order = -1; + align_order = PAGE_SHIFT; } ret = pci_dev_str_match(dev, p, &p); if (ret == 1) { *resize = true; - if (align_order == -1) - align = PAGE_SIZE; - else - align = 1 << align_order; + align = 1ULL << align_order; break; } else if (ret < 0) { pr_err("PCI: Can't parse resource_alignment parameter: %s\n", @@ -6117,8 +6301,7 @@ static int __init pci_setup(char *str) } else if (!strncmp(str, "disable_acs_redir=", 18)) { disable_acs_redir_param = str + 18; } else { - printk(KERN_ERR "PCI: Unknown option `%s'\n", - str); + pr_err("PCI: Unknown option `%s'\n", str); } } str = k; @@ -6126,3 +6309,19 @@ static int __init pci_setup(char *str) return 0; } early_param("pci", pci_setup); + +/* + * 'disable_acs_redir_param' is initialized in pci_setup(), above, to point + * to data in the __initdata section which will be freed after the init + * sequence is complete. We can't allocate memory in pci_setup() because some + * architectures do not have any memory allocation service available during + * an early_param() call. So we allocate memory and copy the variable here + * before the init section is freed. + */ +static int __init pci_realloc_setup_params(void) +{ + disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL); + + return 0; +} +pure_initcall(pci_realloc_setup_params); diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 6e0d1528d471c7970613680f88333480389b6ef4..ff5dde9e67457138c2ed481073ac519307e66761 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -2,6 +2,8 @@ #ifndef DRIVERS_PCI_H #define DRIVERS_PCI_H +#include + #define PCI_FIND_CAP_TTL 48 #define PCI_VSEC_ID_INTEL_TBT 0x1234 /* Thunderbolt */ @@ -10,6 +12,7 @@ extern const unsigned char pcie_link_speed[]; extern bool pci_early_dump; bool pcie_cap_has_lnkctl(const struct pci_dev *dev); +bool pcie_cap_has_rtctl(const struct pci_dev *dev); /* Functions internal to the PCI core code */ @@ -35,6 +38,7 @@ int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vmai, int pci_probe_reset_function(struct pci_dev *dev); int pci_bridge_secondary_bus_reset(struct pci_dev *dev); +int pci_bus_error_reset(struct pci_dev *dev); /** * struct pci_platform_pm_ops - Firmware PM callbacks @@ -46,6 +50,8 @@ int pci_bridge_secondary_bus_reset(struct pci_dev *dev); * * @get_state: queries the platform firmware for a device's current power state * + * @refresh_state: asks the platform to refresh the device's power state data + * * @choose_state: returns PCI power state of given device preferred by the * platform; to be used during system-wide transitions from a * sleeping state to the working state and vice versa @@ -63,6 +69,7 @@ struct pci_platform_pm_ops { bool (*is_manageable)(struct pci_dev *dev); int (*set_state)(struct pci_dev *dev, pci_power_t state); pci_power_t (*get_state)(struct pci_dev *dev); + void (*refresh_state)(struct pci_dev *dev); pci_power_t (*choose_state)(struct pci_dev *dev); int (*set_wakeup)(struct pci_dev *dev, bool enable); bool (*need_resume)(struct pci_dev *dev); @@ -70,6 +77,7 @@ struct pci_platform_pm_ops { int pci_set_platform_pm(const struct pci_platform_pm_ops *ops); void pci_update_current_state(struct pci_dev *dev, pci_power_t state); +void pci_refresh_power_state(struct pci_dev *dev); void pci_power_up(struct pci_dev *dev); void pci_disable_enabled_device(struct pci_dev *dev); int pci_finish_runtime_suspend(struct pci_dev *dev); @@ -107,6 +115,15 @@ static inline bool pci_power_manageable(struct pci_dev *pci_dev) return !pci_has_subordinate(pci_dev) || pci_dev->bridge_d3; } +static inline bool pcie_downstream_port(const struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_DOWNSTREAM || + type == PCI_EXP_TYPE_PCIE_BRIDGE; +} + int pci_vpd_init(struct pci_dev *dev); void pci_vpd_release(struct pci_dev *dev); void pcie_vpd_create_sysfs_dev_files(struct pci_dev *dev); @@ -136,6 +153,7 @@ static inline void pci_remove_legacy_files(struct pci_bus *bus) { return; } /* Lock for read/write access to pci device and bus lists */ extern struct rw_semaphore pci_bus_sem; +extern struct mutex pci_slot_mutex; extern raw_spinlock_t pci_lock; @@ -245,22 +263,25 @@ bool pci_bus_clip_resource(struct pci_dev *dev, int idx); void pci_reassigndev_resource_alignment(struct pci_dev *dev); void pci_disable_bridge_window(struct pci_dev *dev); -/* PCIe link information */ -#define PCIE_SPEED2STR(speed) \ - ((speed) == PCIE_SPEED_16_0GT ? "16 GT/s" : \ - (speed) == PCIE_SPEED_8_0GT ? "8 GT/s" : \ - (speed) == PCIE_SPEED_5_0GT ? "5 GT/s" : \ - (speed) == PCIE_SPEED_2_5GT ? "2.5 GT/s" : \ - "Unknown speed") +/* PCIe link information from Link Capabilities 2 */ +#define PCIE_LNKCAP2_SLS2SPEED(lnkcap2) \ + ((lnkcap2) & PCI_EXP_LNKCAP2_SLS_32_0GB ? PCIE_SPEED_32_0GT : \ + (lnkcap2) & PCI_EXP_LNKCAP2_SLS_16_0GB ? PCIE_SPEED_16_0GT : \ + (lnkcap2) & PCI_EXP_LNKCAP2_SLS_8_0GB ? PCIE_SPEED_8_0GT : \ + (lnkcap2) & PCI_EXP_LNKCAP2_SLS_5_0GB ? PCIE_SPEED_5_0GT : \ + (lnkcap2) & PCI_EXP_LNKCAP2_SLS_2_5GB ? PCIE_SPEED_2_5GT : \ + PCI_SPEED_UNKNOWN) /* PCIe speed to Mb/s reduced by encoding overhead */ #define PCIE_SPEED2MBS_ENC(speed) \ - ((speed) == PCIE_SPEED_16_0GT ? 16000*128/130 : \ + ((speed) == PCIE_SPEED_32_0GT ? 32000*128/130 : \ + (speed) == PCIE_SPEED_16_0GT ? 16000*128/130 : \ (speed) == PCIE_SPEED_8_0GT ? 8000*128/130 : \ (speed) == PCIE_SPEED_5_0GT ? 5000*8/10 : \ (speed) == PCIE_SPEED_2_5GT ? 2500*8/10 : \ 0) +const char *pci_speed_string(enum pci_bus_speed speed); enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev); enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev); u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed, @@ -291,23 +312,82 @@ struct pci_sriov { u16 subsystem_device; /* VF subsystem device */ resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */ bool drivers_autoprobe; /* Auto probing of VFs by driver */ + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) + KABI_RESERVE(5) + KABI_RESERVE(6) + KABI_RESERVE(7) + KABI_RESERVE(8) }; -/* pci_dev priv_flags */ -#define PCI_DEV_DISCONNECTED 0 -#define PCI_DEV_ADDED 1 +/** + * pci_dev_set_io_state - Set the new error state if possible. + * + * @dev - pci device to set new error_state + * @new - the state we want dev to be in + * + * Must be called with device_lock held. + * + * Returns true if state has been changed to the requested state. + */ +static inline bool pci_dev_set_io_state(struct pci_dev *dev, + pci_channel_state_t new) +{ + bool changed = false; + + device_lock_assert(&dev->dev); + switch (new) { + case pci_channel_io_perm_failure: + switch (dev->error_state) { + case pci_channel_io_frozen: + case pci_channel_io_normal: + case pci_channel_io_perm_failure: + changed = true; + break; + } + break; + case pci_channel_io_frozen: + switch (dev->error_state) { + case pci_channel_io_frozen: + case pci_channel_io_normal: + changed = true; + break; + } + break; + case pci_channel_io_normal: + switch (dev->error_state) { + case pci_channel_io_frozen: + case pci_channel_io_normal: + changed = true; + break; + } + break; + } + if (changed) + dev->error_state = new; + return changed; +} static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused) { - set_bit(PCI_DEV_DISCONNECTED, &dev->priv_flags); + device_lock(&dev->dev); + pci_dev_set_io_state(dev, pci_channel_io_perm_failure); + device_unlock(&dev->dev); + return 0; } static inline bool pci_dev_is_disconnected(const struct pci_dev *dev) { - return test_bit(PCI_DEV_DISCONNECTED, &dev->priv_flags); + return dev->error_state == pci_channel_io_perm_failure; } +/* pci_dev priv_flags */ +#define PCI_DEV_ADDED 0 + static inline void pci_dev_assign_added(struct pci_dev *dev, bool added) { assign_bit(PCI_DEV_ADDED, &dev->priv_flags, added); @@ -346,6 +426,14 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info); void aer_print_error(struct pci_dev *dev, struct aer_err_info *info); #endif /* CONFIG_PCIEAER */ +#ifdef CONFIG_PCIE_DPC +void pci_save_dpc_state(struct pci_dev *dev); +void pci_restore_dpc_state(struct pci_dev *dev); +#else +static inline void pci_save_dpc_state(struct pci_dev *dev) {} +static inline void pci_restore_dpc_state(struct pci_dev *dev) {} +#endif + #ifdef CONFIG_PCI_ATS void pci_restore_ats_state(struct pci_dev *dev); #else @@ -423,8 +511,8 @@ static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev) #endif /* PCI error reporting and recovery */ -void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service); -void pcie_do_nonfatal_recovery(struct pci_dev *dev); +void pcie_do_recovery(struct pci_dev *dev, enum pci_channel_state state, + u32 service); bool pcie_wait_for_link(struct pci_dev *pdev, bool active); #ifdef CONFIG_PCIEASPM @@ -530,7 +618,7 @@ void pci_aer_clear_fatal_status(struct pci_dev *dev); void pci_aer_clear_device_status(struct pci_dev *dev); #else static inline void pci_no_aer(void) { } -static inline int pci_aer_init(struct pci_dev *d) { return -ENODEV; } +static inline void pci_aer_init(struct pci_dev *d) { } static inline void pci_aer_exit(struct pci_dev *d) { } static inline void pci_aer_clear_fatal_status(struct pci_dev *dev) { } static inline void pci_aer_clear_device_status(struct pci_dev *dev) { } diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig index 0a1e9d379bc56e4fa7ed664b140aaaab88928a5a..49b3b7001de8ec4dd7ccd5b1a281f5e1149910c1 100644 --- a/drivers/pci/pcie/Kconfig +++ b/drivers/pci/pcie/Kconfig @@ -36,7 +36,7 @@ config PCIEAER config PCIEAER_INJECT tristate "PCI Express error injection support" depends on PCIEAER - default n + select GENERIC_IRQ_INJECTION help This enables PCI Express Root Port Advanced Error Reporting (AER) software error injector. diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c index 83180edd6ed470d5fa57f08833e8178d91a18d92..57584cbaa428984606863756712e6a8e6e2e1dcc 100644 --- a/drivers/pci/pcie/aer.c +++ b/drivers/pci/pcie/aer.c @@ -12,6 +12,10 @@ * Andrew Patterson */ +#define pr_fmt(fmt) "AER: " fmt +#define dev_fmt pr_fmt + +#include #include #include #include @@ -30,7 +34,7 @@ #include "../pci.h" #include "portdrv.h" -#define AER_ERROR_SOURCES_MAX 100 +#define AER_ERROR_SOURCES_MAX 128 #define AER_MAX_TYPEOF_COR_ERRS 16 /* as per PCI_ERR_COR_STATUS */ #define AER_MAX_TYPEOF_UNCOR_ERRS 26 /* as per PCI_ERR_UNCOR_STATUS*/ @@ -42,21 +46,7 @@ struct aer_err_source { struct aer_rpc { struct pci_dev *rpd; /* Root Port device */ - struct work_struct dpc_handler; - struct aer_err_source e_sources[AER_ERROR_SOURCES_MAX]; - struct aer_err_info e_info; - unsigned short prod_idx; /* Error Producer Index */ - unsigned short cons_idx; /* Error Consumer Index */ - int isr; - spinlock_t e_lock; /* - * Lock access to Error Status/ID Regs - * and error producer/consumer index - */ - struct mutex rpc_mutex; /* - * only one thread could do - * recovery on the same - * root port hierarchy - */ + DECLARE_KFIFO(aer_fifo, struct aer_err_source, AER_ERROR_SOURCES_MAX); }; /* AER stats for the device */ @@ -131,7 +121,7 @@ bool pci_aer_available(void) static int ecrc_policy = ECRC_POLICY_DEFAULT; -static const char *ecrc_policy_str[] = { +static const char * const ecrc_policy_str[] = { [ECRC_POLICY_DEFAULT] = "bios", [ECRC_POLICY_OFF] = "off", [ECRC_POLICY_ON] = "on" @@ -212,16 +202,14 @@ void pcie_set_ecrc_checking(struct pci_dev *dev) /** * pcie_ecrc_get_policy - parse kernel command-line ecrc option + * @str: ECRC policy from kernel command line to use */ void pcie_ecrc_get_policy(char *str) { int i; - for (i = 0; i < ARRAY_SIZE(ecrc_policy_str); i++) - if (!strncmp(str, ecrc_policy_str[i], - strlen(ecrc_policy_str[i]))) - break; - if (i >= ARRAY_SIZE(ecrc_policy_str)) + i = match_string(ecrc_policy_str, ARRAY_SIZE(ecrc_policy_str), str); + if (i < 0) return; ecrc_policy = i; @@ -462,12 +450,70 @@ int pci_cleanup_aer_error_status_regs(struct pci_dev *dev) return 0; } +void pci_save_aer_state(struct pci_dev *dev) +{ + struct pci_cap_saved_state *save_state; + u32 *cap; + int pos; + + pos = dev->aer_cap; + if (!pos) + return; + + save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_ERR); + if (!save_state) + return; + + cap = &save_state->cap.data[0]; + pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, cap++); + pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, cap++); + pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, cap++); + pci_read_config_dword(dev, pos + PCI_ERR_CAP, cap++); + if (pcie_cap_has_rtctl(dev)) + pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, cap++); +} + +void pci_restore_aer_state(struct pci_dev *dev) +{ + struct pci_cap_saved_state *save_state; + u32 *cap; + int pos; + + pos = dev->aer_cap; + if (!pos) + return; + + save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_ERR); + if (!save_state) + return; + + cap = &save_state->cap.data[0]; + pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, *cap++); + pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, *cap++); + pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, *cap++); + pci_write_config_dword(dev, pos + PCI_ERR_CAP, *cap++); + if (pcie_cap_has_rtctl(dev)) + pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, *cap++); +} + void pci_aer_init(struct pci_dev *dev) { + int n; + dev->aer_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); + if (!dev->aer_cap) + return; - if (dev->aer_cap) - dev->aer_stats = kzalloc(sizeof(struct aer_stats), GFP_KERNEL); + dev->aer_stats = kzalloc(sizeof(struct aer_stats), GFP_KERNEL); + + /* + * We save/restore PCI_ERR_UNCOR_MASK, PCI_ERR_UNCOR_SEVER, + * PCI_ERR_COR_MASK, and PCI_ERR_CAP. Root and Root Complex Event + * Collectors also implement PCI_ERR_ROOT_COMMAND (PCIe r5.0, sec + * 7.8.4). + */ + n = pcie_cap_has_rtctl(dev) ? 5 : 4; + pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_ERR, sizeof(u32) * n); pci_cleanup_aer_error_status_regs(dev); } @@ -671,7 +717,8 @@ const struct attribute_group aer_stats_attr_group = { static void pci_dev_aer_stats_incr(struct pci_dev *pdev, struct aer_err_info *info) { - int status, i, max = -1; + unsigned long status = info->status & ~info->mask; + int i, max = -1; u64 *counter = NULL; struct aer_stats *aer_stats = pdev->aer_stats; @@ -696,10 +743,8 @@ static void pci_dev_aer_stats_incr(struct pci_dev *pdev, break; } - status = (info->status & ~info->mask); - for (i = 0; i < max; i++) - if (status & (1 << i)) - counter[i]++; + for_each_set_bit(i, &status, max) + counter[i]++; } static void pci_rootport_aer_stats_incr(struct pci_dev *pdev, @@ -731,14 +776,11 @@ static void __print_tlp_header(struct pci_dev *dev, static void __aer_print_error(struct pci_dev *dev, struct aer_err_info *info) { - int i, status; + unsigned long status = info->status & ~info->mask; const char *errmsg = NULL; - status = (info->status & ~info->mask); - - for (i = 0; i < 32; i++) { - if (!(status & (1 << i))) - continue; + int i; + for_each_set_bit(i, &status, 32) { if (info->severity == AER_CORRECTABLE) errmsg = i < ARRAY_SIZE(aer_correctable_error_string) ? aer_correctable_error_string[i] : NULL; @@ -796,10 +838,11 @@ static void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info) u8 bus = info->id >> 8; u8 devfn = info->id & 0xff; - pci_info(dev, "AER: %s%s error received: %04x:%02x:%02x.%d\n", - info->multi_error_valid ? "Multiple " : "", - aer_error_severity_string[info->severity], - pci_domain_nr(dev->bus), bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); + pci_info(dev, "%s%s error received: %04x:%02x:%02x.%d\n", + info->multi_error_valid ? "Multiple " : "", + aer_error_severity_string[info->severity], + pci_domain_nr(dev->bus), bus, PCI_SLOT(devfn), + PCI_FUNC(devfn)); } #ifdef CONFIG_ACPI_APEI_PCIEAER @@ -866,7 +909,7 @@ void cper_print_aer(struct pci_dev *dev, int aer_severity, static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev) { if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) { - e_info->dev[e_info->error_dev_num] = dev; + e_info->dev[e_info->error_dev_num] = pci_dev_get(dev); e_info->error_dev_num++; return 0; } @@ -1010,9 +1053,12 @@ static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info) info->status); pci_aer_clear_device_status(dev); } else if (info->severity == AER_NONFATAL) - pcie_do_nonfatal_recovery(dev); + pcie_do_recovery(dev, pci_channel_io_normal, + PCIE_PORT_SERVICE_AER); else if (info->severity == AER_FATAL) - pcie_do_fatal_recovery(dev, PCIE_PORT_SERVICE_AER); + pcie_do_recovery(dev, pci_channel_io_frozen, + PCIE_PORT_SERVICE_AER); + pci_dev_put(dev); } #ifdef CONFIG_ACPI_APEI_PCIEAER @@ -1047,9 +1093,11 @@ static void aer_recover_work_func(struct work_struct *work) } cper_print_aer(pdev, entry.severity, entry.regs); if (entry.severity == AER_NONFATAL) - pcie_do_nonfatal_recovery(pdev); + pcie_do_recovery(pdev, pci_channel_io_normal, + PCIE_PORT_SERVICE_AER); else if (entry.severity == AER_FATAL) - pcie_do_fatal_recovery(pdev, PCIE_PORT_SERVICE_AER); + pcie_do_recovery(pdev, pci_channel_io_frozen, + PCIE_PORT_SERVICE_AER); pci_dev_put(pdev); } } @@ -1065,7 +1113,6 @@ static DECLARE_WORK(aer_recover_work, aer_recover_work_func); void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn, int severity, struct aer_capability_regs *aer_regs) { - unsigned long flags; struct aer_recover_entry entry = { .bus = bus, .devfn = devfn, @@ -1074,13 +1121,12 @@ void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn, .regs = aer_regs, }; - spin_lock_irqsave(&aer_recover_ring_lock, flags); - if (kfifo_put(&aer_recover_ring, entry)) + if (kfifo_in_spinlocked(&aer_recover_ring, &entry, 1, + &aer_recover_ring_lock)) schedule_work(&aer_recover_work); else pr_err("AER recover: Buffer overflow when recovering AER for %04x:%02x:%02x:%x\n", domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); - spin_unlock_irqrestore(&aer_recover_ring_lock, flags); } EXPORT_SYMBOL_GPL(aer_recover_queue); #endif @@ -1115,8 +1161,9 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info) &info->mask); if (!(info->status & ~info->mask)) return 0; - } else if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || - info->severity == AER_NONFATAL) { + } else if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT || + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM || + info->severity == AER_NONFATAL) { /* Link is still healthy for IO reads */ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, @@ -1170,7 +1217,7 @@ static void aer_isr_one_error(struct aer_rpc *rpc, struct aer_err_source *e_src) { struct pci_dev *pdev = rpc->rpd; - struct aer_err_info *e_info = &rpc->e_info; + struct aer_err_info e_info; pci_rootport_aer_stats_incr(pdev, e_src); @@ -1179,83 +1226,58 @@ static void aer_isr_one_error(struct aer_rpc *rpc, * uncorrectable error being logged. Report correctable error first. */ if (e_src->status & PCI_ERR_ROOT_COR_RCV) { - e_info->id = ERR_COR_ID(e_src->id); - e_info->severity = AER_CORRECTABLE; + e_info.id = ERR_COR_ID(e_src->id); + e_info.severity = AER_CORRECTABLE; if (e_src->status & PCI_ERR_ROOT_MULTI_COR_RCV) - e_info->multi_error_valid = 1; + e_info.multi_error_valid = 1; else - e_info->multi_error_valid = 0; - aer_print_port_info(pdev, e_info); + e_info.multi_error_valid = 0; + aer_print_port_info(pdev, &e_info); - if (find_source_device(pdev, e_info)) - aer_process_err_devices(e_info); + if (find_source_device(pdev, &e_info)) + aer_process_err_devices(&e_info); } if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) { - e_info->id = ERR_UNCOR_ID(e_src->id); + e_info.id = ERR_UNCOR_ID(e_src->id); if (e_src->status & PCI_ERR_ROOT_FATAL_RCV) - e_info->severity = AER_FATAL; + e_info.severity = AER_FATAL; else - e_info->severity = AER_NONFATAL; + e_info.severity = AER_NONFATAL; if (e_src->status & PCI_ERR_ROOT_MULTI_UNCOR_RCV) - e_info->multi_error_valid = 1; + e_info.multi_error_valid = 1; else - e_info->multi_error_valid = 0; - - aer_print_port_info(pdev, e_info); - - if (find_source_device(pdev, e_info)) - aer_process_err_devices(e_info); - } -} + e_info.multi_error_valid = 0; -/** - * get_e_source - retrieve an error source - * @rpc: pointer to the root port which holds an error - * @e_src: pointer to store retrieved error source - * - * Return 1 if an error source is retrieved, otherwise 0. - * - * Invoked by DPC handler to consume an error. - */ -static int get_e_source(struct aer_rpc *rpc, struct aer_err_source *e_src) -{ - unsigned long flags; + aer_print_port_info(pdev, &e_info); - /* Lock access to Root error producer/consumer index */ - spin_lock_irqsave(&rpc->e_lock, flags); - if (rpc->prod_idx == rpc->cons_idx) { - spin_unlock_irqrestore(&rpc->e_lock, flags); - return 0; + if (find_source_device(pdev, &e_info)) + aer_process_err_devices(&e_info); } - - *e_src = rpc->e_sources[rpc->cons_idx]; - rpc->cons_idx++; - if (rpc->cons_idx == AER_ERROR_SOURCES_MAX) - rpc->cons_idx = 0; - spin_unlock_irqrestore(&rpc->e_lock, flags); - - return 1; } /** * aer_isr - consume errors detected by root port - * @work: definition of this work item + * @irq: IRQ assigned to Root Port + * @context: pointer to Root Port data structure * * Invoked, as DPC, when root port records new detected error */ -static void aer_isr(struct work_struct *work) +static irqreturn_t aer_isr(int irq, void *context) { - struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler); + struct pcie_device *dev = (struct pcie_device *)context; + struct aer_rpc *rpc = get_service_data(dev); struct aer_err_source uninitialized_var(e_src); - mutex_lock(&rpc->rpc_mutex); - while (get_e_source(rpc, &e_src)) + if (kfifo_is_empty(&rpc->aer_fifo)) + return IRQ_NONE; + + while (kfifo_get(&rpc->aer_fifo, &e_src)) aer_isr_one_error(rpc, &e_src); - mutex_unlock(&rpc->rpc_mutex); + return IRQ_HANDLED; } /** @@ -1265,56 +1287,26 @@ static void aer_isr(struct work_struct *work) * * Invoked when Root Port detects AER messages. */ -irqreturn_t aer_irq(int irq, void *context) +static irqreturn_t aer_irq(int irq, void *context) { - unsigned int status, id; struct pcie_device *pdev = (struct pcie_device *)context; struct aer_rpc *rpc = get_service_data(pdev); - int next_prod_idx; - unsigned long flags; - int pos; + struct pci_dev *rp = rpc->rpd; + struct aer_err_source e_src = {}; + int pos = rp->aer_cap; - pos = pdev->port->aer_cap; - /* - * Must lock access to Root Error Status Reg, Root Error ID Reg, - * and Root error producer/consumer index - */ - spin_lock_irqsave(&rpc->e_lock, flags); - - /* Read error status */ - pci_read_config_dword(pdev->port, pos + PCI_ERR_ROOT_STATUS, &status); - if (!(status & (PCI_ERR_ROOT_UNCOR_RCV|PCI_ERR_ROOT_COR_RCV))) { - spin_unlock_irqrestore(&rpc->e_lock, flags); + pci_read_config_dword(rp, pos + PCI_ERR_ROOT_STATUS, &e_src.status); + if (!(e_src.status & (PCI_ERR_ROOT_UNCOR_RCV|PCI_ERR_ROOT_COR_RCV))) return IRQ_NONE; - } - /* Read error source and clear error status */ - pci_read_config_dword(pdev->port, pos + PCI_ERR_ROOT_ERR_SRC, &id); - pci_write_config_dword(pdev->port, pos + PCI_ERR_ROOT_STATUS, status); + pci_read_config_dword(rp, pos + PCI_ERR_ROOT_ERR_SRC, &e_src.id); + pci_write_config_dword(rp, pos + PCI_ERR_ROOT_STATUS, e_src.status); - /* Store error source for later DPC handler */ - next_prod_idx = rpc->prod_idx + 1; - if (next_prod_idx == AER_ERROR_SOURCES_MAX) - next_prod_idx = 0; - if (next_prod_idx == rpc->cons_idx) { - /* - * Error Storm Condition - possibly the same error occurred. - * Drop the error. - */ - spin_unlock_irqrestore(&rpc->e_lock, flags); + if (!kfifo_put(&rpc->aer_fifo, e_src)) return IRQ_HANDLED; - } - rpc->e_sources[rpc->prod_idx].status = status; - rpc->e_sources[rpc->prod_idx].id = id; - rpc->prod_idx = next_prod_idx; - spin_unlock_irqrestore(&rpc->e_lock, flags); - /* Invoke DPC handler */ - schedule_work(&rpc->dpc_handler); - - return IRQ_HANDLED; + return IRQ_WAKE_THREAD; } -EXPORT_SYMBOL_GPL(aer_irq); static int set_device_error_reporting(struct pci_dev *dev, void *data) { @@ -1422,33 +1414,6 @@ static void aer_disable_rootport(struct aer_rpc *rpc) pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, reg32); } -/** - * aer_alloc_rpc - allocate Root Port data structure - * @dev: pointer to the pcie_dev data structure - * - * Invoked when Root Port's AER service is loaded. - */ -static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev) -{ - struct aer_rpc *rpc; - - rpc = kzalloc(sizeof(struct aer_rpc), GFP_KERNEL); - if (!rpc) - return NULL; - - /* Initialize Root lock access, e_lock, to Root Error Status Reg */ - spin_lock_init(&rpc->e_lock); - - rpc->rpd = dev->port; - INIT_WORK(&rpc->dpc_handler, aer_isr); - mutex_init(&rpc->rpc_mutex); - - /* Use PCIe bus function to store rpc into PCIe device */ - set_service_data(dev, rpc); - - return rpc; -} - /** * aer_remove - clean up resources * @dev: pointer to the pcie_dev data structure @@ -1459,16 +1424,7 @@ static void aer_remove(struct pcie_device *dev) { struct aer_rpc *rpc = get_service_data(dev); - if (rpc) { - /* If register interrupt service, it must be free. */ - if (rpc->isr) - free_irq(dev->irq, dev); - - flush_work(&rpc->dpc_handler); - aer_disable_rootport(rpc); - kfree(rpc); - set_service_data(dev, NULL); - } + aer_disable_rootport(rpc); } /** @@ -1481,29 +1437,28 @@ static int aer_probe(struct pcie_device *dev) { int status; struct aer_rpc *rpc; - struct device *device = &dev->port->dev; + struct device *device = &dev->device; + struct pci_dev *port = dev->port; - /* Alloc rpc data structure */ - rpc = aer_alloc_rpc(dev); + rpc = devm_kzalloc(device, sizeof(struct aer_rpc), GFP_KERNEL); if (!rpc) { dev_printk(KERN_DEBUG, device, "alloc AER rpc failed\n"); - aer_remove(dev); return -ENOMEM; } - /* Request IRQ ISR */ - status = request_irq(dev->irq, aer_irq, IRQF_SHARED, "aerdrv", dev); + rpc->rpd = port; + set_service_data(dev, rpc); + + INIT_KFIFO(rpc->aer_fifo); + status = devm_request_threaded_irq(device, dev->irq, aer_irq, aer_isr, + IRQF_SHARED, "aerdrv", dev); if (status) { - dev_printk(KERN_DEBUG, device, "request AER IRQ %d failed\n", - dev->irq); - aer_remove(dev); + pci_err(port, "request AER IRQ %d failed\n", dev->irq); return status; } - rpc->isr = 1; - aer_enable_rootport(rpc); - dev_info(device, "AER enabled with IRQ %d\n", dev->irq); + pci_info(port, "enabled with IRQ %d\n", dev->irq); return 0; } @@ -1526,7 +1481,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev) reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK; pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32); - rc = pci_bridge_secondary_bus_reset(dev); + rc = pci_bus_error_reset(dev); pci_printk(KERN_DEBUG, dev, "Root Port link has been reset\n"); /* Clear Root Error Status */ @@ -1541,18 +1496,6 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev) return rc ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; } -/** - * aer_error_resume - clean up corresponding error status bits - * @dev: pointer to Root Port's pci_dev data structure - * - * Invoked by Port Bus driver during nonfatal recovery. - */ -static void aer_error_resume(struct pci_dev *dev) -{ - pci_aer_clear_device_status(dev); - pci_cleanup_aer_uncorrect_error_status(dev); -} - static struct pcie_port_service_driver aerdriver = { .name = "aer", .port_type = PCI_EXP_TYPE_ROOT_PORT, @@ -1560,7 +1503,6 @@ static struct pcie_port_service_driver aerdriver = { .probe = aer_probe, .remove = aer_remove, - .error_resume = aer_error_resume, .reset_link = aer_root_reset, }; @@ -1569,10 +1511,9 @@ static struct pcie_port_service_driver aerdriver = { * * Invoked when AER root service driver is loaded. */ -static int __init aer_service_init(void) +int __init pcie_aer_init(void) { if (!pci_aer_available() || aer_acpi_firmware_first()) return -ENXIO; return pcie_port_service_register(&aerdriver); } -device_initcall(aer_service_init); diff --git a/drivers/pci/pcie/aer_inject.c b/drivers/pci/pcie/aer_inject.c index 0eb24346cad3057f964d75b7045524bc792df6f3..5a1a82b2a4f682b42b55680b2189c9b7c6162565 100644 --- a/drivers/pci/pcie/aer_inject.c +++ b/drivers/pci/pcie/aer_inject.c @@ -12,8 +12,11 @@ * Huang Ying */ +#define dev_fmt(fmt) "aer_inject: " fmt + #include #include +#include #include #include #include @@ -175,14 +178,48 @@ static u32 *find_pci_config_dword(struct aer_error *err, int where, return target; } +static int aer_inj_read(struct pci_bus *bus, unsigned int devfn, int where, + int size, u32 *val) +{ + struct pci_ops *ops, *my_ops; + int rv; + + ops = __find_pci_bus_ops(bus); + if (!ops) + return -1; + + my_ops = bus->ops; + bus->ops = ops; + rv = ops->read(bus, devfn, where, size, val); + bus->ops = my_ops; + + return rv; +} + +static int aer_inj_write(struct pci_bus *bus, unsigned int devfn, int where, + int size, u32 val) +{ + struct pci_ops *ops, *my_ops; + int rv; + + ops = __find_pci_bus_ops(bus); + if (!ops) + return -1; + + my_ops = bus->ops; + bus->ops = ops; + rv = ops->write(bus, devfn, where, size, val); + bus->ops = my_ops; + + return rv; +} + static int aer_inj_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { u32 *sim; struct aer_error *err; unsigned long flags; - struct pci_ops *ops; - struct pci_ops *my_ops; int domain; int rv; @@ -203,18 +240,7 @@ static int aer_inj_read_config(struct pci_bus *bus, unsigned int devfn, return 0; } out: - ops = __find_pci_bus_ops(bus); - /* - * pci_lock must already be held, so we can directly - * manipulate bus->ops. Many config access functions, - * including pci_generic_config_read() require the original - * bus->ops be installed to function, so temporarily put them - * back. - */ - my_ops = bus->ops; - bus->ops = ops; - rv = ops->read(bus, devfn, where, size, val); - bus->ops = my_ops; + rv = aer_inj_read(bus, devfn, where, size, val); spin_unlock_irqrestore(&inject_lock, flags); return rv; } @@ -226,8 +252,6 @@ static int aer_inj_write_config(struct pci_bus *bus, unsigned int devfn, struct aer_error *err; unsigned long flags; int rw1cs; - struct pci_ops *ops; - struct pci_ops *my_ops; int domain; int rv; @@ -251,18 +275,7 @@ static int aer_inj_write_config(struct pci_bus *bus, unsigned int devfn, return 0; } out: - ops = __find_pci_bus_ops(bus); - /* - * pci_lock must already be held, so we can directly - * manipulate bus->ops. Many config access functions, - * including pci_generic_config_write() require the original - * bus->ops be installed to function, so temporarily put them - * back. - */ - my_ops = bus->ops; - bus->ops = ops; - rv = ops->write(bus, devfn, where, size, val); - bus->ops = my_ops; + rv = aer_inj_write(bus, devfn, where, size, val); spin_unlock_irqrestore(&inject_lock, flags); return rv; } @@ -294,6 +307,13 @@ static int pci_bus_set_aer_ops(struct pci_bus *bus) spin_lock_irqsave(&inject_lock, flags); if (ops == &aer_inj_pci_ops) goto out; + /* + * increments the reference count of the pci bus. Otherwise, when we + * restore the 'pci_ops' in 'aer_inject_exit', the 'pci_bus' may have + * been freed. + */ + pci_bus_get(bus); + pci_bus_ops_init(bus_ops, bus, ops); list_add(&bus_ops->list, &pci_bus_ops_list); bus_ops = NULL; @@ -303,32 +323,13 @@ static int pci_bus_set_aer_ops(struct pci_bus *bus) return 0; } -static int find_aer_device_iter(struct device *device, void *data) -{ - struct pcie_device **result = data; - struct pcie_device *pcie_dev; - - if (device->bus == &pcie_port_bus_type) { - pcie_dev = to_pcie_device(device); - if (pcie_dev->service & PCIE_PORT_SERVICE_AER) { - *result = pcie_dev; - return 1; - } - } - return 0; -} - -static int find_aer_device(struct pci_dev *dev, struct pcie_device **result) -{ - return device_for_each_child(&dev->dev, result, find_aer_device_iter); -} - static int aer_inject(struct aer_error_inj *einj) { struct aer_error *err, *rperr; struct aer_error *err_alloc = NULL, *rperr_alloc = NULL; struct pci_dev *dev, *rpdev; struct pcie_device *edev; + struct device *device; unsigned long flags; unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn); int pos_cap_err, rp_pos_cap_err; @@ -340,14 +341,14 @@ static int aer_inject(struct aer_error_inj *einj) return -ENODEV; rpdev = pcie_find_root_port(dev); if (!rpdev) { - pci_err(dev, "aer_inject: Root port not found\n"); + pci_err(dev, "Root port not found\n"); ret = -ENODEV; goto out_put; } pos_cap_err = dev->aer_cap; if (!pos_cap_err) { - pci_err(dev, "aer_inject: Device doesn't support AER\n"); + pci_err(dev, "Device doesn't support AER\n"); ret = -EPROTONOSUPPORT; goto out_put; } @@ -358,7 +359,7 @@ static int aer_inject(struct aer_error_inj *einj) rp_pos_cap_err = rpdev->aer_cap; if (!rp_pos_cap_err) { - pci_err(rpdev, "aer_inject: Root port doesn't support AER\n"); + pci_err(rpdev, "Root port doesn't support AER\n"); ret = -EPROTONOSUPPORT; goto out_put; } @@ -406,14 +407,14 @@ static int aer_inject(struct aer_error_inj *einj) if (!aer_mask_override && einj->cor_status && !(einj->cor_status & ~cor_mask)) { ret = -EINVAL; - pci_warn(dev, "aer_inject: The correctable error(s) is masked by device\n"); + pci_warn(dev, "The correctable error(s) is masked by device\n"); spin_unlock_irqrestore(&inject_lock, flags); goto out_put; } if (!aer_mask_override && einj->uncor_status && !(einj->uncor_status & ~uncor_mask)) { ret = -EINVAL; - pci_warn(dev, "aer_inject: The uncorrectable error(s) is masked by device\n"); + pci_warn(dev, "The uncorrectable error(s) is masked by device\n"); spin_unlock_irqrestore(&inject_lock, flags); goto out_put; } @@ -464,19 +465,19 @@ static int aer_inject(struct aer_error_inj *einj) if (ret) goto out_put; - if (find_aer_device(rpdev, &edev)) { + device = pcie_port_find_device(rpdev, PCIE_PORT_SERVICE_AER); + if (device) { + edev = to_pcie_device(device); if (!get_service_data(edev)) { - dev_warn(&edev->device, - "aer_inject: AER service is not initialized\n"); + pci_warn(edev->port, "AER service is not initialized\n"); ret = -EPROTONOSUPPORT; goto out_put; } - dev_info(&edev->device, - "aer_inject: Injecting errors %08x/%08x into device %s\n", + pci_info(edev->port, "Injecting errors %08x/%08x into device %s\n", einj->cor_status, einj->uncor_status, pci_name(dev)); - aer_irq(-1, edev); + ret = irq_inject_interrupt(edev->irq); } else { - pci_err(rpdev, "aer_inject: AER device not found\n"); + pci_err(rpdev, "AER device not found\n"); ret = -ENODEV; } out_put: @@ -533,6 +534,7 @@ static void __exit aer_inject_exit(void) while ((bus_ops = pci_bus_ops_pop())) { pci_bus_set_ops(bus_ops->bus, bus_ops->ops); + pci_bus_put(bus_ops->bus); kfree(bus_ops); } diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 5326916715d20663ead1c51c30ccf3cc2d0b9ddc..54fc2856995d55c9a5b8b6e282d060e758ee25a6 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -198,6 +198,38 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist) link->clkpm_capable = (blacklist) ? 0 : capable; } +static bool pcie_retrain_link(struct pcie_link_state *link) +{ + struct pci_dev *parent = link->pdev; + unsigned long start_jiffies; + u16 reg16; + + pcie_capability_read_word(parent, PCI_EXP_LNKCTL, ®16); + reg16 |= PCI_EXP_LNKCTL_RL; + pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); + if (parent->clear_retrain_link) { + /* + * Due to an erratum in some devices the Retrain Link bit + * needs to be cleared again manually to allow the link + * training to succeed. + */ + reg16 &= ~PCI_EXP_LNKCTL_RL; + pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); + } + + /* Wait for link training end. Break out after waiting for timeout */ + start_jiffies = jiffies; + for (;;) { + pcie_capability_read_word(parent, PCI_EXP_LNKSTA, ®16); + if (!(reg16 & PCI_EXP_LNKSTA_LT)) + break; + if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) + break; + msleep(1); + } + return !(reg16 & PCI_EXP_LNKSTA_LT); +} + /* * pcie_aspm_configure_common_clock: check if the 2 ends of a link * could use common clock. If they are, configure them to use the @@ -207,7 +239,6 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link) { int same_clock = 1; u16 reg16, parent_reg, child_reg[8]; - unsigned long start_jiffies; struct pci_dev *child, *parent = link->pdev; struct pci_bus *linkbus = parent->subordinate; /* @@ -265,21 +296,7 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link) reg16 &= ~PCI_EXP_LNKCTL_CCC; pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); - /* Retrain link */ - reg16 |= PCI_EXP_LNKCTL_RL; - pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); - - /* Wait for link training end. Break out after waiting for timeout */ - start_jiffies = jiffies; - for (;;) { - pcie_capability_read_word(parent, PCI_EXP_LNKSTA, ®16); - if (!(reg16 & PCI_EXP_LNKSTA_LT)) - break; - if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) - break; - msleep(1); - } - if (!(reg16 & PCI_EXP_LNKSTA_LT)) + if (pcie_retrain_link(link)) return; /* Training failed. Restore common clock configurations */ @@ -730,9 +747,9 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) /* Enable what we need to enable */ pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1, - PCI_L1SS_CAP_L1_PM_SS, val); + PCI_L1SS_CTL1_L1SS_MASK, val); pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1, - PCI_L1SS_CAP_L1_PM_SS, val); + PCI_L1SS_CTL1_L1SS_MASK, val); } static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val) @@ -895,7 +912,7 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev) struct pcie_link_state *link; int blacklist = !!pcie_aspm_sanity_check(pdev); - if (!aspm_support_enabled) + if (!aspm_support_enabled || aspm_disabled) return; if (pdev->link_state) @@ -903,10 +920,10 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev) /* * We allocate pcie_link_state for the component on the upstream - * end of a Link, so there's nothing to do unless this device has a - * Link on its secondary side. + * end of a Link, so there's nothing to do unless this device is + * downstream port. */ - if (!pdev->has_secondary_link) + if (!pcie_downstream_port(pdev)) return; /* VIA has a strange chipset, root port is under a bridge */ @@ -991,7 +1008,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev) * All PCIe functions are in one slot, remove one function will remove * the whole slot, so just wait until we are the last function left. */ - if (!list_is_last(&pdev->bus_list, &parent->subordinate->devices)) + if (!list_empty(&parent->subordinate->devices)) goto out; link = parent->link_state; @@ -1061,7 +1078,7 @@ static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem) if (!pci_is_pcie(pdev)) return; - if (pdev->has_secondary_link) + if (pcie_downstream_port(pdev)) parent = pdev; if (!parent || !parent->link_state) return; diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c index f03279fc87cd5978650b0085ad49019047f8bafd..e06f42f58d3d4d87e71d2310e04beaeb3e4a68f3 100644 --- a/drivers/pci/pcie/dpc.c +++ b/drivers/pci/pcie/dpc.c @@ -6,6 +6,8 @@ * Copyright (C) 2016 Intel Corp. */ +#define dev_fmt(fmt) "DPC: " fmt + #include #include #include @@ -44,11 +46,62 @@ static const char * const rp_pio_error_string[] = { "Memory Request Completion Timeout", /* Bit Position 18 */ }; +static struct dpc_dev *to_dpc_dev(struct pci_dev *dev) +{ + struct device *device; + + device = pcie_port_find_device(dev, PCIE_PORT_SERVICE_DPC); + if (!device) + return NULL; + return get_service_data(to_pcie_device(device)); +} + +void pci_save_dpc_state(struct pci_dev *dev) +{ + struct dpc_dev *dpc; + struct pci_cap_saved_state *save_state; + u16 *cap; + + if (!pci_is_pcie(dev)) + return; + + dpc = to_dpc_dev(dev); + if (!dpc) + return; + + save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_DPC); + if (!save_state) + return; + + cap = (u16 *)&save_state->cap.data[0]; + pci_read_config_word(dev, dpc->cap_pos + PCI_EXP_DPC_CTL, cap); +} + +void pci_restore_dpc_state(struct pci_dev *dev) +{ + struct dpc_dev *dpc; + struct pci_cap_saved_state *save_state; + u16 *cap; + + if (!pci_is_pcie(dev)) + return; + + dpc = to_dpc_dev(dev); + if (!dpc) + return; + + save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_DPC); + if (!save_state) + return; + + cap = (u16 *)&save_state->cap.data[0]; + pci_write_config_word(dev, dpc->cap_pos + PCI_EXP_DPC_CTL, *cap); +} + static int dpc_wait_rp_inactive(struct dpc_dev *dpc) { unsigned long timeout = jiffies + HZ; struct pci_dev *pdev = dpc->dev->port; - struct device *dev = &dpc->dev->device; u16 cap = dpc->cap_pos, status; pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status); @@ -58,7 +111,7 @@ static int dpc_wait_rp_inactive(struct dpc_dev *dpc) pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status); } if (status & PCI_EXP_DPC_RP_BUSY) { - dev_warn(dev, "DPC root port still busy\n"); + pci_warn(pdev, "root port still busy\n"); return -EBUSY; } return 0; @@ -67,18 +120,13 @@ static int dpc_wait_rp_inactive(struct dpc_dev *dpc) static pci_ers_result_t dpc_reset_link(struct pci_dev *pdev) { struct dpc_dev *dpc; - struct pcie_device *pciedev; - struct device *devdpc; - u16 cap; /* * DPC disables the Link automatically in hardware, so it has * already been reset by the time we get here. */ - devdpc = pcie_port_find_device(pdev, PCIE_PORT_SERVICE_DPC); - pciedev = to_pcie_device(devdpc); - dpc = get_service_data(pciedev); + dpc = to_dpc_dev(pdev); cap = dpc->cap_pos; /* @@ -93,13 +141,14 @@ static pci_ers_result_t dpc_reset_link(struct pci_dev *pdev) pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS, PCI_EXP_DPC_STATUS_TRIGGER); + if (!pcie_wait_for_link(pdev, true)) + return PCI_ERS_RESULT_DISCONNECT; + return PCI_ERS_RESULT_RECOVERED; } - static void dpc_process_rp_pio_error(struct dpc_dev *dpc) { - struct device *dev = &dpc->dev->device; struct pci_dev *pdev = dpc->dev->port; u16 cap = dpc->cap_pos, dpc_status, first_error; u32 status, mask, sev, syserr, exc, dw0, dw1, dw2, dw3, log, prefix; @@ -107,13 +156,13 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc) pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, &status); pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_MASK, &mask); - dev_err(dev, "rp_pio_status: %#010x, rp_pio_mask: %#010x\n", + pci_err(pdev, "rp_pio_status: %#010x, rp_pio_mask: %#010x\n", status, mask); pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_SEVERITY, &sev); pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_SYSERROR, &syserr); pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_EXCEPTION, &exc); - dev_err(dev, "RP PIO severity=%#010x, syserror=%#010x, exception=%#010x\n", + pci_err(pdev, "RP PIO severity=%#010x, syserror=%#010x, exception=%#010x\n", sev, syserr, exc); /* Get First Error Pointer */ @@ -122,7 +171,7 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc) for (i = 0; i < ARRAY_SIZE(rp_pio_error_string); i++) { if ((status & ~mask) & (1 << i)) - dev_err(dev, "[%2d] %s%s\n", i, rp_pio_error_string[i], + pci_err(pdev, "[%2d] %s%s\n", i, rp_pio_error_string[i], first_error == i ? " (First)" : ""); } @@ -136,40 +185,61 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc) &dw2); pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 12, &dw3); - dev_err(dev, "TLP Header: %#010x %#010x %#010x %#010x\n", + pci_err(pdev, "TLP Header: %#010x %#010x %#010x %#010x\n", dw0, dw1, dw2, dw3); if (dpc->rp_log_size < 5) goto clear_status; pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_IMPSPEC_LOG, &log); - dev_err(dev, "RP PIO ImpSpec Log %#010x\n", log); + pci_err(pdev, "RP PIO ImpSpec Log %#010x\n", log); for (i = 0; i < dpc->rp_log_size - 5; i++) { pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG, &prefix); - dev_err(dev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix); + pci_err(pdev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix); } clear_status: pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, status); } +static int dpc_get_aer_uncorrect_severity(struct pci_dev *dev, + struct aer_err_info *info) +{ + int pos = dev->aer_cap; + u32 status, mask, sev; + + pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); + pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask); + status &= ~mask; + if (!status) + return 0; + + pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &sev); + status &= sev; + if (status) + info->severity = AER_FATAL; + else + info->severity = AER_NONFATAL; + + return 1; +} + static irqreturn_t dpc_handler(int irq, void *context) { struct aer_err_info info; struct dpc_dev *dpc = context; struct pci_dev *pdev = dpc->dev->port; - struct device *dev = &dpc->dev->device; u16 cap = dpc->cap_pos, status, source, reason, ext_reason; pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status); pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID, &source); - dev_info(dev, "DPC containment event, status:%#06x source:%#06x\n", + pci_info(pdev, "containment event, status:%#06x source:%#06x\n", status, source); reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN) >> 1; ext_reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT) >> 5; - dev_warn(dev, "DPC %s detected, remove downstream devices\n", + pci_warn(pdev, "%s detected\n", (reason == 0) ? "unmasked uncorrectable error" : (reason == 1) ? "ERR_NONFATAL" : (reason == 2) ? "ERR_FATAL" : @@ -180,13 +250,16 @@ static irqreturn_t dpc_handler(int irq, void *context) /* show RP PIO error detail information */ if (dpc->rp_extensions && reason == 3 && ext_reason == 0) dpc_process_rp_pio_error(dpc); - else if (reason == 0 && aer_get_device_error_info(pdev, &info)) { + else if (reason == 0 && + dpc_get_aer_uncorrect_severity(pdev, &info) && + aer_get_device_error_info(pdev, &info)) { aer_print_error(pdev, &info); pci_cleanup_aer_uncorrect_error_status(pdev); + pci_aer_clear_fatal_status(pdev); } /* We configure DPC so it only triggers on ERR_FATAL */ - pcie_do_fatal_recovery(pdev, PCIE_PORT_SERVICE_DPC); + pcie_do_recovery(pdev, pci_channel_io_frozen, PCIE_PORT_SERVICE_DPC); return IRQ_HANDLED; } @@ -218,7 +291,7 @@ static int dpc_probe(struct pcie_device *dev) int status; u16 ctl, cap; - if (pcie_aer_get_firmware_first(pdev)) + if (pcie_aer_get_firmware_first(pdev) && !pcie_ports_dpc_native) return -ENOTSUPP; dpc = devm_kzalloc(device, sizeof(*dpc), GFP_KERNEL); @@ -233,7 +306,7 @@ static int dpc_probe(struct pcie_device *dev) dpc_handler, IRQF_SHARED, "pcie-dpc", dpc); if (status) { - dev_warn(device, "request IRQ%d failed: %d\n", dev->irq, + pci_warn(pdev, "request IRQ%d failed: %d\n", dev->irq, status); return status; } @@ -245,7 +318,7 @@ static int dpc_probe(struct pcie_device *dev) if (dpc->rp_extensions) { dpc->rp_log_size = (cap & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8; if (dpc->rp_log_size < 4 || dpc->rp_log_size > 9) { - dev_err(device, "RP PIO log size %u is invalid\n", + pci_err(pdev, "RP PIO log size %u is invalid\n", dpc->rp_log_size); dpc->rp_log_size = 0; } @@ -254,11 +327,13 @@ static int dpc_probe(struct pcie_device *dev) ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN; pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl); - dev_info(device, "DPC error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n", - cap & PCI_EXP_DPC_IRQ, FLAG(cap, PCI_EXP_DPC_CAP_RP_EXT), - FLAG(cap, PCI_EXP_DPC_CAP_POISONED_TLP), - FLAG(cap, PCI_EXP_DPC_CAP_SW_TRIGGER), dpc->rp_log_size, - FLAG(cap, PCI_EXP_DPC_CAP_DL_ACTIVE)); + pci_info(pdev, "error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n", + cap & PCI_EXP_DPC_IRQ, FLAG(cap, PCI_EXP_DPC_CAP_RP_EXT), + FLAG(cap, PCI_EXP_DPC_CAP_POISONED_TLP), + FLAG(cap, PCI_EXP_DPC_CAP_SW_TRIGGER), dpc->rp_log_size, + FLAG(cap, PCI_EXP_DPC_CAP_DL_ACTIVE)); + + pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_DPC, sizeof(u16)); return status; } @@ -282,8 +357,7 @@ static struct pcie_port_service_driver dpcdriver = { .reset_link = dpc_reset_link, }; -static int __init dpc_service_init(void) +int __init pcie_dpc_init(void) { return pcie_port_service_register(&dpcdriver); } -device_initcall(dpc_service_init); diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c index 708fd3a0d6466063e825383983206ced863adf4a..6d3d5b6a5c444df42c6c9b9d6f68334aa453aa7f 100644 --- a/drivers/pci/pcie/err.c +++ b/drivers/pci/pcie/err.c @@ -12,18 +12,12 @@ #include #include -#include #include #include #include #include "portdrv.h" #include "../pci.h" -struct aer_broadcast_data { - enum pci_channel_state state; - enum pci_ers_result result; -}; - static pci_ers_result_t merge_result(enum pci_ers_result orig, enum pci_ers_result new) { @@ -49,66 +43,54 @@ static pci_ers_result_t merge_result(enum pci_ers_result orig, return orig; } -static int report_error_detected(struct pci_dev *dev, void *data) +static int report_error_detected(struct pci_dev *dev, + enum pci_channel_state state, + enum pci_ers_result *result) { pci_ers_result_t vote; const struct pci_error_handlers *err_handler; - struct aer_broadcast_data *result_data; - - result_data = (struct aer_broadcast_data *) data; device_lock(&dev->dev); - dev->error_state = result_data->state; - - if (!dev->driver || + if (!pci_dev_set_io_state(dev, state) || + !dev->driver || !dev->driver->err_handler || !dev->driver->err_handler->error_detected) { - if (result_data->state == pci_channel_io_frozen && - dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) { - /* - * In case of fatal recovery, if one of down- - * stream device has no driver. We might be - * unable to recover because a later insmod - * of a driver for this device is unaware of - * its hw state. - */ - pci_printk(KERN_DEBUG, dev, "device has %s\n", - dev->driver ? - "no AER-aware driver" : "no driver"); - } - /* - * If there's any device in the subtree that does not - * have an error_detected callback, returning - * PCI_ERS_RESULT_NO_AER_DRIVER prevents calling of - * the subsequent mmio_enabled/slot_reset/resume - * callbacks of "any" device in the subtree. All the - * devices in the subtree are left in the error state - * without recovery. + * If any device in the subtree does not have an error_detected + * callback, PCI_ERS_RESULT_NO_AER_DRIVER prevents subsequent + * error callbacks of "any" device in the subtree, and will + * exit in the disconnected error state. */ - - if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) + if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) { vote = PCI_ERS_RESULT_NO_AER_DRIVER; - else + pci_info(dev, "AER: Can't recover (no error_detected callback)\n"); + } else { vote = PCI_ERS_RESULT_NONE; + } } else { err_handler = dev->driver->err_handler; - vote = err_handler->error_detected(dev, result_data->state); - pci_uevent_ers(dev, PCI_ERS_RESULT_NONE); + vote = err_handler->error_detected(dev, state); } - - result_data->result = merge_result(result_data->result, vote); + pci_uevent_ers(dev, vote); + *result = merge_result(*result, vote); device_unlock(&dev->dev); return 0; } +static int report_frozen_detected(struct pci_dev *dev, void *data) +{ + return report_error_detected(dev, pci_channel_io_frozen, data); +} + +static int report_normal_detected(struct pci_dev *dev, void *data) +{ + return report_error_detected(dev, pci_channel_io_normal, data); +} + static int report_mmio_enabled(struct pci_dev *dev, void *data) { - pci_ers_result_t vote; + pci_ers_result_t vote, *result = data; const struct pci_error_handlers *err_handler; - struct aer_broadcast_data *result_data; - - result_data = (struct aer_broadcast_data *) data; device_lock(&dev->dev); if (!dev->driver || @@ -118,7 +100,7 @@ static int report_mmio_enabled(struct pci_dev *dev, void *data) err_handler = dev->driver->err_handler; vote = err_handler->mmio_enabled(dev); - result_data->result = merge_result(result_data->result, vote); + *result = merge_result(*result, vote); out: device_unlock(&dev->dev); return 0; @@ -126,11 +108,8 @@ static int report_mmio_enabled(struct pci_dev *dev, void *data) static int report_slot_reset(struct pci_dev *dev, void *data) { - pci_ers_result_t vote; + pci_ers_result_t vote, *result = data; const struct pci_error_handlers *err_handler; - struct aer_broadcast_data *result_data; - - result_data = (struct aer_broadcast_data *) data; device_lock(&dev->dev); if (!dev->driver || @@ -140,7 +119,7 @@ static int report_slot_reset(struct pci_dev *dev, void *data) err_handler = dev->driver->err_handler; vote = err_handler->slot_reset(dev); - result_data->result = merge_result(result_data->result, vote); + *result = merge_result(*result, vote); out: device_unlock(&dev->dev); return 0; @@ -151,17 +130,16 @@ static int report_resume(struct pci_dev *dev, void *data) const struct pci_error_handlers *err_handler; device_lock(&dev->dev); - dev->error_state = pci_channel_io_normal; - - if (!dev->driver || + if (!pci_dev_set_io_state(dev, pci_channel_io_normal) || + !dev->driver || !dev->driver->err_handler || !dev->driver->err_handler->resume) goto out; err_handler = dev->driver->err_handler; err_handler->resume(dev); - pci_uevent_ers(dev, PCI_ERS_RESULT_RECOVERED); out: + pci_uevent_ers(dev, PCI_ERS_RESULT_RECOVERED); device_unlock(&dev->dev); return 0; } @@ -177,207 +155,86 @@ static pci_ers_result_t default_reset_link(struct pci_dev *dev) { int rc; - rc = pci_bridge_secondary_bus_reset(dev); + rc = pci_bus_error_reset(dev); pci_printk(KERN_DEBUG, dev, "downstream link has been reset\n"); return rc ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; } static pci_ers_result_t reset_link(struct pci_dev *dev, u32 service) { - struct pci_dev *udev; pci_ers_result_t status; struct pcie_port_service_driver *driver = NULL; - if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { - /* Reset this port for all subordinates */ - udev = dev; - } else { - /* Reset the upstream component (likely downstream port) */ - udev = dev->bus->self; - } - - /* Use the aer driver of the component firstly */ - driver = pcie_port_find_service(udev, service); - + driver = pcie_port_find_service(dev, service); if (driver && driver->reset_link) { - status = driver->reset_link(udev); - } else if (udev->has_secondary_link) { - status = default_reset_link(udev); + status = driver->reset_link(dev); + } else if (pcie_downstream_port(dev)) { + status = default_reset_link(dev); } else { pci_printk(KERN_DEBUG, dev, "no link-reset support at upstream device %s\n", - pci_name(udev)); + pci_name(dev)); return PCI_ERS_RESULT_DISCONNECT; } if (status != PCI_ERS_RESULT_RECOVERED) { pci_printk(KERN_DEBUG, dev, "link reset at upstream device %s failed\n", - pci_name(udev)); + pci_name(dev)); return PCI_ERS_RESULT_DISCONNECT; } return status; } -/** - * broadcast_error_message - handle message broadcast to downstream drivers - * @dev: pointer to from where in a hierarchy message is broadcasted down - * @state: error state - * @error_mesg: message to print - * @cb: callback to be broadcasted - * - * Invoked during error recovery process. Once being invoked, the content - * of error severity will be broadcasted to all downstream drivers in a - * hierarchy in question. - */ -static pci_ers_result_t broadcast_error_message(struct pci_dev *dev, - enum pci_channel_state state, - char *error_mesg, - int (*cb)(struct pci_dev *, void *)) +void pcie_do_recovery(struct pci_dev *dev, enum pci_channel_state state, + u32 service) { - struct aer_broadcast_data result_data; - - pci_printk(KERN_DEBUG, dev, "broadcast %s message\n", error_mesg); - result_data.state = state; - if (cb == report_error_detected) - result_data.result = PCI_ERS_RESULT_CAN_RECOVER; - else - result_data.result = PCI_ERS_RESULT_RECOVERED; - - if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { - /* - * If the error is reported by a bridge, we think this error - * is related to the downstream link of the bridge, so we - * do error recovery on all subordinates of the bridge instead - * of the bridge and clear the error status of the bridge. - */ - if (cb == report_error_detected) - dev->error_state = state; - pci_walk_bus(dev->subordinate, cb, &result_data); - if (cb == report_resume) { - pci_aer_clear_device_status(dev); - pci_cleanup_aer_uncorrect_error_status(dev); - dev->error_state = pci_channel_io_normal; - } + pci_ers_result_t status = PCI_ERS_RESULT_CAN_RECOVER; + struct pci_bus *bus; + + /* + * Error recovery runs on all subordinates of the first downstream port. + * If the downstream port detected the error, it is cleared at the end. + */ + if (!(pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT || + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM)) + dev = dev->bus->self; + bus = dev->subordinate; + + pci_dbg(dev, "broadcast error_detected message\n"); + if (state == pci_channel_io_frozen) { + pci_walk_bus(bus, report_frozen_detected, &status); + status = reset_link(dev, service); + if (status != PCI_ERS_RESULT_RECOVERED) + goto failed; } else { - /* - * If the error is reported by an end point, we think this - * error is related to the upstream link of the end point. - * The error is non fatal so the bus is ok; just invoke - * the callback for the function that logged the error. - */ - cb(dev, &result_data); + pci_walk_bus(bus, report_normal_detected, &status); } - return result_data.result; -} - -/** - * pcie_do_fatal_recovery - handle fatal error recovery process - * @dev: pointer to a pci_dev data structure of agent detecting an error - * - * Invoked when an error is fatal. Once being invoked, removes the devices - * beneath this AER agent, followed by reset link e.g. secondary bus reset - * followed by re-enumeration of devices. - */ -void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service) -{ - struct pci_dev *udev; - struct pci_bus *parent; - struct pci_dev *pdev, *temp; - pci_ers_result_t result; - - if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) - udev = dev; - else - udev = dev->bus->self; - - parent = udev->subordinate; - pci_lock_rescan_remove(); - pci_dev_get(dev); - list_for_each_entry_safe_reverse(pdev, temp, &parent->devices, - bus_list) { - pci_dev_get(pdev); - pci_dev_set_disconnected(pdev, NULL); - if (pci_has_subordinate(pdev)) - pci_walk_bus(pdev->subordinate, - pci_dev_set_disconnected, NULL); - pci_stop_and_remove_bus_device(pdev); - pci_dev_put(pdev); + if (status == PCI_ERS_RESULT_CAN_RECOVER) { + status = PCI_ERS_RESULT_RECOVERED; + pci_dbg(dev, "broadcast mmio_enabled message\n"); + pci_walk_bus(bus, report_mmio_enabled, &status); } - result = reset_link(udev, service); - - if ((service == PCIE_PORT_SERVICE_AER) && - (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)) { - /* - * If the error is reported by a bridge, we think this error - * is related to the downstream link of the bridge, so we - * do error recovery on all subordinates of the bridge instead - * of the bridge and clear the error status of the bridge. - */ - pci_aer_clear_fatal_status(dev); - pci_aer_clear_device_status(dev); - } - - if (result == PCI_ERS_RESULT_RECOVERED) { - if (pcie_wait_for_link(udev, true)) - pci_rescan_bus(udev->bus); - pci_info(dev, "Device recovery from fatal error successful\n"); - } else { - pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT); - pci_info(dev, "Device recovery from fatal error failed\n"); - } - - pci_dev_put(dev); - pci_unlock_rescan_remove(); -} - -/** - * pcie_do_nonfatal_recovery - handle nonfatal error recovery process - * @dev: pointer to a pci_dev data structure of agent detecting an error - * - * Invoked when an error is nonfatal/fatal. Once being invoked, broadcast - * error detected message to all downstream drivers within a hierarchy in - * question and return the returned code. - */ -void pcie_do_nonfatal_recovery(struct pci_dev *dev) -{ - pci_ers_result_t status; - enum pci_channel_state state; - - state = pci_channel_io_normal; - - status = broadcast_error_message(dev, - state, - "error_detected", - report_error_detected); - - if (status == PCI_ERS_RESULT_CAN_RECOVER) - status = broadcast_error_message(dev, - state, - "mmio_enabled", - report_mmio_enabled); - if (status == PCI_ERS_RESULT_NEED_RESET) { /* * TODO: Should call platform-specific * functions to reset slot before calling * drivers' slot_reset callbacks? */ - status = broadcast_error_message(dev, - state, - "slot_reset", - report_slot_reset); + status = PCI_ERS_RESULT_RECOVERED; + pci_dbg(dev, "broadcast slot_reset message\n"); + pci_walk_bus(bus, report_slot_reset, &status); } if (status != PCI_ERS_RESULT_RECOVERED) goto failed; - broadcast_error_message(dev, - state, - "resume", - report_resume); + pci_dbg(dev, "broadcast resume message\n"); + pci_walk_bus(bus, report_resume, &status); + pci_aer_clear_device_status(dev); + pci_cleanup_aer_uncorrect_error_status(dev); pci_info(dev, "AER: Device recovery successful\n"); return; diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c index 3ed67676ea2a16d14f41c9710d7b3a8edfd3b157..94479ec041c228440897456538e0eb14cfe46a48 100644 --- a/drivers/pci/pcie/pme.c +++ b/drivers/pci/pcie/pme.c @@ -363,6 +363,16 @@ static bool pcie_pme_check_wakeup(struct pci_bus *bus) return false; } +static void pcie_pme_disable_interrupt(struct pci_dev *port, + struct pcie_pme_service_data *data) +{ + spin_lock_irq(&data->lock); + pcie_pme_interrupt_enable(port, false); + pcie_clear_root_pme_status(port); + data->noirq = true; + spin_unlock_irq(&data->lock); +} + /** * pcie_pme_suspend - Suspend PCIe PME service device. * @srv: PCIe service device to suspend. @@ -387,11 +397,7 @@ static int pcie_pme_suspend(struct pcie_device *srv) return 0; } - spin_lock_irq(&data->lock); - pcie_pme_interrupt_enable(port, false); - pcie_clear_root_pme_status(port); - data->noirq = true; - spin_unlock_irq(&data->lock); + pcie_pme_disable_interrupt(port, data); synchronize_irq(srv->irq); @@ -400,7 +406,7 @@ static int pcie_pme_suspend(struct pcie_device *srv) /** * pcie_pme_resume - Resume PCIe PME service device. - * @srv - PCIe service device to resume. + * @srv: PCIe service device to resume. */ static int pcie_pme_resume(struct pcie_device *srv) { @@ -423,13 +429,16 @@ static int pcie_pme_resume(struct pcie_device *srv) /** * pcie_pme_remove - Prepare PCIe PME service device for removal. - * @srv - PCIe service device to remove. + * @srv: PCIe service device to remove. */ static void pcie_pme_remove(struct pcie_device *srv) { - pcie_pme_suspend(srv); + struct pcie_pme_service_data *data = get_service_data(srv); + + pcie_pme_disable_interrupt(srv->port, data); free_irq(srv->irq, srv); - kfree(get_service_data(srv)); + cancel_work_sync(&data->work); + kfree(data); } static struct pcie_port_service_driver pcie_pme_driver = { @@ -446,8 +455,7 @@ static struct pcie_port_service_driver pcie_pme_driver = { /** * pcie_pme_service_init - Register the PCIe PME service driver. */ -static int __init pcie_pme_service_init(void) +int __init pcie_pme_init(void) { return pcie_port_service_register(&pcie_pme_driver); } -device_initcall(pcie_pme_service_init); diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h index d59afa42fc14ba3702b26acb704dc9726442360d..c42cb76fa67fe9648b9345f3a06b143a4571b3f0 100644 --- a/drivers/pci/pcie/portdrv.h +++ b/drivers/pci/pcie/portdrv.h @@ -23,6 +23,32 @@ #define PCIE_PORT_DEVICE_MAXSERVICES 4 +extern bool pcie_ports_dpc_native; + +#ifdef CONFIG_PCIEAER +int pcie_aer_init(void); +#else +static inline int pcie_aer_init(void) { return 0; } +#endif + +#ifdef CONFIG_HOTPLUG_PCI_PCIE +int pcie_hp_init(void); +#else +static inline int pcie_hp_init(void) { return 0; } +#endif + +#ifdef CONFIG_PCIE_PME +int pcie_pme_init(void); +#else +static inline int pcie_pme_init(void) { return 0; } +#endif + +#ifdef CONFIG_PCIE_DPC +int pcie_dpc_init(void); +#else +static inline int pcie_dpc_init(void) { return 0; } +#endif + /* Port Type */ #define PCIE_ANY_PORT (~0) @@ -123,10 +149,6 @@ static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev) } #endif -#ifdef CONFIG_PCIEAER -irqreturn_t aer_irq(int irq, void *context); -#endif - struct pcie_port_service_driver *pcie_port_find_service(struct pci_dev *dev, u32 service); struct device *pcie_port_find_device(struct pci_dev *dev, u32 service); diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index 7c37d815229e9ee20e7732695dda58344fc77a20..54772e5b7f8e49b146b7d9d543b602853ee8a215 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c @@ -225,7 +225,9 @@ static int get_port_device_capability(struct pci_dev *dev) * Disable AER on this port in case it's been enabled by the * BIOS (the AER service driver will enable it when necessary). */ - pci_disable_pcie_error_reporting(dev); + if ((pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM) && + (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)) + pci_disable_pcie_error_reporting(dev); } #endif @@ -246,9 +248,14 @@ static int get_port_device_capability(struct pci_dev *dev) pcie_pme_interrupt_enable(dev, false); } - if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DPC) && - pci_aer_available() && services & PCIE_PORT_SERVICE_AER) - services |= PCIE_PORT_SERVICE_DPC; + /* + * With dpc-native, allow Linux to use DPC even if it doesn't have + * permission to use AER. + */ + if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DPC) && + pci_aer_available() && + (pcie_ports_dpc_native || (services & PCIE_PORT_SERVICE_AER))) + services |= PCIE_PORT_SERVICE_DPC; return services; } @@ -466,6 +473,7 @@ struct device *pcie_port_find_device(struct pci_dev *dev, device = pdrvs.dev; return device; } +EXPORT_SYMBOL_GPL(pcie_port_find_device); /** * pcie_port_device_remove - unregister PCI Express port service devices diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index eef22dc29140cd104318de4f332ab1dbb3e88034..94c2fd71c6ca286eb837185b729c7c00220e347f 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -29,12 +29,20 @@ bool pcie_ports_disabled; */ bool pcie_ports_native; +/* + * If the user specified "pcie_ports=dpc-native", use the Linux DPC PCIe + * service even if the platform hasn't given us permission. + */ +bool pcie_ports_dpc_native; + static int __init pcie_port_setup(char *str) { if (!strncmp(str, "compat", 6)) pcie_ports_disabled = true; else if (!strncmp(str, "native", 6)) pcie_ports_native = true; + else if (!strncmp(str, "dpc-native", 10)) + pcie_ports_dpc_native = true; return 1; } @@ -146,6 +154,13 @@ static pci_ers_result_t pcie_portdrv_error_detected(struct pci_dev *dev, return PCI_ERS_RESULT_CAN_RECOVER; } +static pci_ers_result_t pcie_portdrv_slot_reset(struct pci_dev *dev) +{ + pci_restore_state(dev); + pci_save_state(dev); + return PCI_ERS_RESULT_RECOVERED; +} + static pci_ers_result_t pcie_portdrv_mmio_enabled(struct pci_dev *dev) { return PCI_ERS_RESULT_RECOVERED; @@ -177,14 +192,17 @@ static void pcie_portdrv_err_resume(struct pci_dev *dev) /* * LINUX Device Driver Model */ -static const struct pci_device_id port_pci_ids[] = { { +static const struct pci_device_id port_pci_ids[] = { /* handle any PCI-Express port */ - PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x00), ~0), - }, { /* end: all zeroes */ } + { PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x00), ~0) }, + /* subtractive decode PCI-to-PCI bridge, class type is 060401h */ + { PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x01), ~0) }, + { }, }; static const struct pci_error_handlers pcie_portdrv_err_handler = { .error_detected = pcie_portdrv_error_detected, + .slot_reset = pcie_portdrv_slot_reset, .mmio_enabled = pcie_portdrv_mmio_enabled, .resume = pcie_portdrv_err_resume, }; @@ -226,11 +244,20 @@ static const struct dmi_system_id pcie_portdrv_dmi_table[] __initconst = { {} }; +static void __init pcie_init_services(void) +{ + pcie_aer_init(); + pcie_pme_init(); + pcie_dpc_init(); + pcie_hp_init(); +} + static int __init pcie_portdrv_init(void) { if (pcie_ports_disabled) return -EACCES; + pcie_init_services(); dmi_check_system(pcie_portdrv_dmi_table); return pci_register_driver(&pcie_portdriver); diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 201f9e5ff55c0a97e330d9dcf139f8d2cd96bad1..055a2c47ae3acb19e0beec6e9868b37ca4232d7b 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -317,7 +317,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, res->flags = 0; out: if (res->flags) - pci_printk(KERN_DEBUG, dev, "reg 0x%x: %pR\n", pos, res); + pci_info(dev, "reg 0x%x: %pR\n", pos, res); return (res->flags & IORESOURCE_MEM_64) ? 1 : 0; } @@ -348,6 +348,57 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) } } +static void pci_read_bridge_windows(struct pci_dev *bridge) +{ + u16 io; + u32 pmem, tmp; + + pci_read_config_word(bridge, PCI_IO_BASE, &io); + if (!io) { + pci_write_config_word(bridge, PCI_IO_BASE, 0xe0f0); + pci_read_config_word(bridge, PCI_IO_BASE, &io); + pci_write_config_word(bridge, PCI_IO_BASE, 0x0); + } + if (io) + bridge->io_window = 1; + + /* + * DECchip 21050 pass 2 errata: the bridge may miss an address + * disconnect boundary by one PCI data phase. Workaround: do not + * use prefetching on this device. + */ + if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001) + return; + + pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); + if (!pmem) { + pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, + 0xffe0fff0); + pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); + pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0); + } + if (!pmem) + return; + + bridge->pref_window = 1; + + if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) { + + /* + * Bridge claims to have a 64-bit prefetchable memory + * window; verify that the upper bits are actually + * writable. + */ + pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &pmem); + pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, + 0xffffffff); + pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp); + pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, pmem); + if (tmp) + bridge->pref_64_window = 1; + } +} + static void pci_read_bridge_io(struct pci_bus *child) { struct pci_dev *dev = child->self; @@ -384,7 +435,7 @@ static void pci_read_bridge_io(struct pci_bus *child) region.start = base; region.end = limit + io_granularity - 1; pcibios_bus_to_resource(dev->bus, res, ®ion); - pci_printk(KERN_DEBUG, dev, " bridge window %pR\n", res); + pci_info(dev, " bridge window %pR\n", res); } } @@ -406,7 +457,7 @@ static void pci_read_bridge_mmio(struct pci_bus *child) region.start = base; region.end = limit + 0xfffff; pcibios_bus_to_resource(dev->bus, res, ®ion); - pci_printk(KERN_DEBUG, dev, " bridge window %pR\n", res); + pci_info(dev, " bridge window %pR\n", res); } } @@ -459,7 +510,7 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child) region.start = base; region.end = limit + 0xfffff; pcibios_bus_to_resource(dev->bus, res, ®ion); - pci_printk(KERN_DEBUG, dev, " bridge window %pR\n", res); + pci_info(dev, " bridge window %pR\n", res); } } @@ -489,8 +540,7 @@ void pci_read_bridge_bases(struct pci_bus *child) if (res && res->flags) { pci_bus_add_resource(child, res, PCI_SUBTRACTIVE_DECODE); - pci_printk(KERN_DEBUG, dev, - " bridge window %pR (subtractive decode)\n", + pci_info(dev, " bridge window %pR (subtractive decode)\n", res); } } @@ -535,16 +585,9 @@ static void pci_release_host_bridge_dev(struct device *dev) kfree(to_pci_host_bridge(dev)); } -struct pci_host_bridge *pci_alloc_host_bridge(size_t priv) +static void pci_init_host_bridge(struct pci_host_bridge *bridge) { - struct pci_host_bridge *bridge; - - bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL); - if (!bridge) - return NULL; - INIT_LIST_HEAD(&bridge->windows); - bridge->dev.release = pci_release_host_bridge_dev; /* * We assume we can manage these PCIe features. Some systems may @@ -557,6 +600,18 @@ struct pci_host_bridge *pci_alloc_host_bridge(size_t priv) bridge->native_shpc_hotplug = 1; bridge->native_pme = 1; bridge->native_ltr = 1; +} + +struct pci_host_bridge *pci_alloc_host_bridge(size_t priv) +{ + struct pci_host_bridge *bridge; + + bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL); + if (!bridge) + return NULL; + + pci_init_host_bridge(bridge); + bridge->dev.release = pci_release_host_bridge_dev; return bridge; } @@ -571,7 +626,7 @@ struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev, if (!bridge) return NULL; - INIT_LIST_HEAD(&bridge->windows); + pci_init_host_bridge(bridge); bridge->dev.release = devm_pci_release_host_bridge_dev; return bridge; @@ -586,6 +641,7 @@ void pci_free_host_bridge(struct pci_host_bridge *bridge) } EXPORT_SYMBOL(pci_free_host_bridge); +/* Indexed by PCI_X_SSTATUS_FREQ (secondary bus mode and frequency) */ static const unsigned char pcix_bus_speed[] = { PCI_SPEED_UNKNOWN, /* 0 */ PCI_SPEED_66MHz_PCIX, /* 1 */ @@ -605,13 +661,14 @@ static const unsigned char pcix_bus_speed[] = { PCI_SPEED_133MHz_PCIX_533 /* F */ }; +/* Indexed by PCI_EXP_LNKCAP_SLS, PCI_EXP_LNKSTA_CLS */ const unsigned char pcie_link_speed[] = { PCI_SPEED_UNKNOWN, /* 0 */ PCIE_SPEED_2_5GT, /* 1 */ PCIE_SPEED_5_0GT, /* 2 */ PCIE_SPEED_8_0GT, /* 3 */ PCIE_SPEED_16_0GT, /* 4 */ - PCI_SPEED_UNKNOWN, /* 5 */ + PCIE_SPEED_32_0GT, /* 5 */ PCI_SPEED_UNKNOWN, /* 6 */ PCI_SPEED_UNKNOWN, /* 7 */ PCI_SPEED_UNKNOWN, /* 8 */ @@ -623,6 +680,44 @@ const unsigned char pcie_link_speed[] = { PCI_SPEED_UNKNOWN, /* E */ PCI_SPEED_UNKNOWN /* F */ }; +EXPORT_SYMBOL_GPL(pcie_link_speed); + +const char *pci_speed_string(enum pci_bus_speed speed) +{ + /* Indexed by the pci_bus_speed enum */ + static const char *speed_strings[] = { + "33 MHz PCI", /* 0x00 */ + "66 MHz PCI", /* 0x01 */ + "66 MHz PCI-X", /* 0x02 */ + "100 MHz PCI-X", /* 0x03 */ + "133 MHz PCI-X", /* 0x04 */ + NULL, /* 0x05 */ + NULL, /* 0x06 */ + NULL, /* 0x07 */ + NULL, /* 0x08 */ + "66 MHz PCI-X 266", /* 0x09 */ + "100 MHz PCI-X 266", /* 0x0a */ + "133 MHz PCI-X 266", /* 0x0b */ + "Unknown AGP", /* 0x0c */ + "1x AGP", /* 0x0d */ + "2x AGP", /* 0x0e */ + "4x AGP", /* 0x0f */ + "8x AGP", /* 0x10 */ + "66 MHz PCI-X 533", /* 0x11 */ + "100 MHz PCI-X 533", /* 0x12 */ + "133 MHz PCI-X 533", /* 0x13 */ + "2.5 GT/s PCIe", /* 0x14 */ + "5.0 GT/s PCIe", /* 0x15 */ + "8.0 GT/s PCIe", /* 0x16 */ + "16.0 GT/s PCIe", /* 0x17 */ + "32.0 GT/s PCIe", /* 0x18 */ + }; + + if (speed < ARRAY_SIZE(speed_strings)) + return speed_strings[speed]; + return "Unknown"; +} +EXPORT_SYMBOL_GPL(pci_speed_string); void pcie_update_link_speed(struct pci_bus *bus, u16 linksta) { @@ -713,6 +808,7 @@ static void pci_set_bus_speed(struct pci_bus *bus) pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap); bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS]; + bridge->link_active_reporting = !!(linkcap & PCI_EXP_LNKCAP_DLLLARC); pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta); pcie_update_link_speed(bus, linksta); @@ -792,6 +888,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) bus->sysdata = bridge->sysdata; bus->msi = bridge->msi; bus->ops = bridge->ops; + bus->backup_ops = bus->ops; bus->number = bus->busn_res.start = bridge->busnr; #ifdef CONFIG_PCI_DOMAINS_GENERIC bus->domain_nr = pci_bus_find_domain_nr(bus, parent); @@ -813,9 +910,10 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) goto free; err = device_register(&bridge->dev); - if (err) + if (err) { put_device(&bridge->dev); - + goto free; + } bus->bridge = get_device(&bridge->dev); device_enable_async_suspend(bus->bridge); pci_set_bus_of_node(bus); @@ -937,7 +1035,11 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, return NULL; child->parent = parent; - child->ops = parent->ops; + if (parent->backup_ops) + child->ops = parent->backup_ops; + else + child->ops = parent->ops; + child->backup_ops = child->ops; child->msi = parent->msi; child->sysdata = parent->sysdata; child->bus_flags = parent->bus_flags; @@ -1029,6 +1131,42 @@ static void pci_enable_crs(struct pci_dev *pdev) static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus, unsigned int available_buses); +/** + * pci_ea_fixed_busnrs() - Read fixed Secondary and Subordinate bus + * numbers from EA capability. + * @dev: Bridge + * @sec: updated with secondary bus number from EA + * @sub: updated with subordinate bus number from EA + * + * If @dev is a bridge with EA capability that specifies valid secondary + * and subordinate bus numbers, return true with the bus numbers in @sec + * and @sub. Otherwise return false. + */ +static bool pci_ea_fixed_busnrs(struct pci_dev *dev, u8 *sec, u8 *sub) +{ + int ea, offset; + u32 dw; + u8 ea_sec, ea_sub; + + if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) + return false; + + /* find PCI EA capability in list */ + ea = pci_find_capability(dev, PCI_CAP_ID_EA); + if (!ea) + return false; + + offset = ea + PCI_EA_FIRST_ENT; + pci_read_config_dword(dev, offset, &dw); + ea_sec = dw & PCI_EA_SEC_BUS_MASK; + ea_sub = (dw & PCI_EA_SUB_BUS_MASK) >> PCI_EA_SUB_BUS_SHIFT; + if (ea_sec == 0 || ea_sub < ea_sec) + return false; + + *sec = ea_sec; + *sub = ea_sub; + return true; +} /* * pci_scan_bridge_extend() - Scan buses behind a bridge @@ -1063,6 +1201,9 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev, u16 bctl; u8 primary, secondary, subordinate; int broken = 0; + bool fixed_buses; + u8 fixed_sec, fixed_sub; + int next_busnr; /* * Make sure the bridge is powered on to be able to access config @@ -1162,17 +1303,24 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev, /* Clear errors */ pci_write_config_word(dev, PCI_STATUS, 0xffff); + /* Read bus numbers from EA Capability (if present) */ + fixed_buses = pci_ea_fixed_busnrs(dev, &fixed_sec, &fixed_sub); + if (fixed_buses) + next_busnr = fixed_sec; + else + next_busnr = max + 1; + /* * Prevent assigning a bus number that already exists. * This can happen when a bridge is hot-plugged, so in this * case we only re-scan this bus. */ - child = pci_find_bus(pci_domain_nr(bus), max+1); + child = pci_find_bus(pci_domain_nr(bus), next_busnr); if (!child) { - child = pci_add_new_bus(bus, dev, max+1); + child = pci_add_new_bus(bus, dev, next_busnr); if (!child) goto out; - pci_bus_insert_busn_res(child, max+1, + pci_bus_insert_busn_res(child, next_busnr, bus->busn_res.end); } max++; @@ -1233,7 +1381,13 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev, max += i; } - /* Set subordinate bus number to its real value */ + /* + * Set subordinate bus number to its real value. + * If fixed subordinate bus number exists from EA + * capability then use it. + */ + if (fixed_buses) + max = fixed_sub; pci_bus_update_busn_res_end(child, max); pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max); } @@ -1327,26 +1481,38 @@ void set_pcie_port_type(struct pci_dev *pdev) pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, ®16); pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD; + parent = pci_upstream_bridge(pdev); + if (!parent) + return; + /* - * A Root Port or a PCI-to-PCIe bridge is always the upstream end - * of a Link. No PCIe component has two Links. Two Links are - * connected by a Switch that has a Port on each Link and internal - * logic to connect the two Ports. + * Some systems do not identify their upstream/downstream ports + * correctly so detect impossible configurations here and correct + * the port type accordingly. */ type = pci_pcie_type(pdev); - if (type == PCI_EXP_TYPE_ROOT_PORT || - type == PCI_EXP_TYPE_PCIE_BRIDGE) - pdev->has_secondary_link = 1; - else if (type == PCI_EXP_TYPE_UPSTREAM || - type == PCI_EXP_TYPE_DOWNSTREAM) { - parent = pci_upstream_bridge(pdev); - + if (type == PCI_EXP_TYPE_DOWNSTREAM) { + /* + * If pdev claims to be downstream port but the parent + * device is also downstream port assume pdev is actually + * upstream port. + */ + if (pcie_downstream_port(parent)) { + pci_info(pdev, "claims to be downstream port but is acting as upstream port, correcting type\n"); + pdev->pcie_flags_reg &= ~PCI_EXP_FLAGS_TYPE; + pdev->pcie_flags_reg |= PCI_EXP_TYPE_UPSTREAM; + } + } else if (type == PCI_EXP_TYPE_UPSTREAM) { /* - * Usually there's an upstream device (Root Port or Switch - * Downstream Port), but we can't assume one exists. + * If pdev claims to be upstream port but the parent + * device is also upstream port assume pdev is actually + * downstream port. */ - if (parent && !parent->has_secondary_link) - pdev->has_secondary_link = 1; + if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM) { + pci_info(pdev, "claims to be upstream port but is acting as downstream port, correcting type\n"); + pdev->pcie_flags_reg &= ~PCI_EXP_FLAGS_TYPE; + pdev->pcie_flags_reg |= PCI_EXP_TYPE_DOWNSTREAM; + } } } @@ -1608,7 +1774,7 @@ int pci_setup_device(struct pci_dev *dev) dev->revision = class & 0xff; dev->class = class >> 8; /* upper 3 bytes */ - pci_printk(KERN_DEBUG, dev, "[%04x:%04x] type %02x class %#08x\n", + pci_info(dev, "[%04x:%04x] type %02x class %#08x\n", dev->vendor, dev->device, dev->hdr_type, dev->class); if (pci_early_dump) @@ -1629,7 +1795,7 @@ int pci_setup_device(struct pci_dev *dev) /* Device class may be changed after fixup */ class = dev->class >> 8; - if (dev->non_compliant_bars) { + if (dev->non_compliant_bars && !dev->mmio_always_on) { pci_read_config_word(dev, PCI_COMMAND, &cmd); if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) { pci_info(dev, "device has non-compliant BARs; disabling IO/MEM decoding\n"); @@ -1706,6 +1872,7 @@ int pci_setup_device(struct pci_dev *dev) pci_read_irq(dev); dev->transparent = ((dev->class & 0xff) == 1); pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); + pci_read_bridge_windows(dev); set_pcie_hotplug_bridge(dev); pos = pci_find_capability(dev, PCI_CAP_ID_SSVID); if (pos) { @@ -1823,8 +1990,6 @@ static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp) pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, hpp->latency_timer); pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl); - if (hpp->enable_serr) - pci_bctl |= PCI_BRIDGE_CTL_SERR; if (hpp->enable_perr) pci_bctl |= PCI_BRIDGE_CTL_PARITY; pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl); @@ -2038,11 +2203,8 @@ static void pci_configure_ltr(struct pci_dev *dev) { #ifdef CONFIG_PCIEASPM struct pci_host_bridge *host = pci_find_host_bridge(dev->bus); - u32 cap; struct pci_dev *bridge; - - if (!host->native_ltr) - return; + u32 cap, ctl; if (!pci_is_pcie(dev)) return; @@ -2051,22 +2213,35 @@ static void pci_configure_ltr(struct pci_dev *dev) if (!(cap & PCI_EXP_DEVCAP2_LTR)) return; - /* - * Software must not enable LTR in an Endpoint unless the Root - * Complex and all intermediate Switches indicate support for LTR. - * PCIe r3.1, sec 6.18. - */ - if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) - dev->ltr_path = 1; - else { + pcie_capability_read_dword(dev, PCI_EXP_DEVCTL2, &ctl); + if (ctl & PCI_EXP_DEVCTL2_LTR_EN) { + if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) { + dev->ltr_path = 1; + return; + } + bridge = pci_upstream_bridge(dev); if (bridge && bridge->ltr_path) dev->ltr_path = 1; + + return; } - if (dev->ltr_path) + if (!host->native_ltr) + return; + + /* + * Software must not enable LTR in an Endpoint unless the Root + * Complex and all intermediate Switches indicate support for LTR. + * PCIe r4.0, sec 6.18. + */ + if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT || + ((bridge = pci_upstream_bridge(dev)) && + bridge->ltr_path)) { pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_LTR_EN); + dev->ltr_path = 1; + } #endif } @@ -2096,6 +2271,24 @@ static void pci_configure_eetlp_prefix(struct pci_dev *dev) #endif } +static void pci_configure_serr(struct pci_dev *dev) +{ + u16 control; + + if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { + + /* + * A bridge will not forward ERR_ messages coming from an + * endpoint unless SERR# forwarding is enabled. + */ + pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &control); + if (!(control & PCI_BRIDGE_CTL_SERR)) { + control |= PCI_BRIDGE_CTL_SERR; + pci_write_config_word(dev, PCI_BRIDGE_CONTROL, control); + } + } +} + static void pci_configure_device(struct pci_dev *dev) { struct hotplug_params hpp; @@ -2106,6 +2299,7 @@ static void pci_configure_device(struct pci_dev *dev) pci_configure_relaxed_ordering(dev); pci_configure_ltr(dev); pci_configure_eetlp_prefix(dev); + pci_configure_serr(dev); memset(&hpp, 0, sizeof(hpp)); ret = pci_get_hp_params(dev, &hpp); @@ -2212,9 +2406,444 @@ static bool pci_bus_wait_crs(struct pci_bus *bus, int devfn, u32 *l, return true; } +int skip_bus_flag = 0; +EXPORT_SYMBOL_GPL(skip_bus_flag); + +#define HOSTBRIGE_1620_NUM 48 +struct skip_bus_num { + char module_name[32]; + char label[4]; + int bus_num; + int dev_num; + int skip; +} skip_1620_bus_num[HOSTBRIGE_1620_NUM] = { + /*chip 0*/ + { + .module_name = "chip0_pcie", + .label = "a0", + .bus_num = 0, + .dev_num = 0xff, + .skip = 0 + }, + { + .module_name = "chip0_pcie_dma", + .label = "a1", + .bus_num = 0x7b, + .dev_num = 0xff, + .skip = 0 + }, + { + .module_name = "chip0_pcie_sdi", + .label = "a2", + .bus_num = 0x7b, + .dev_num = 0x1, + .skip = 0 + }, + { + .module_name = "chip0_USB", + .label = "a3", + .bus_num = 0x7a, + .dev_num = 0xff, + .skip = 0 + }, + { + .module_name = "chip0_hpre", + .label = "a4", + .bus_num = 0x78, + .dev_num = 0x0, + .skip = 0 + }, + { + .module_name = "chip0_rde", + .label = "a5", + .bus_num = 0x78, + .dev_num = 0x1, + .skip = 0 + }, + { + .module_name = "chip0_nic", + .label = "aa", + .bus_num = 0x7c, + .dev_num = 0xff, + .skip = 0 + }, + { + .module_name = "chip0_sas", + .label = "a6", + .bus_num = 0x74, + .dev_num = 0x02, + .skip = 0 + }, + { + .module_name = "chip0_sas1", + .label = "ab", + .bus_num = 0x74, + .dev_num = 0x04, + .skip = 0 + }, + { + .module_name = "chip0_sata", + .label = "a7", + .bus_num = 0x74, + .dev_num = 0x03, + .skip = 0 + }, + { + .module_name = "chip0_zip", + .label = "a8", + .bus_num = 0x74, + .dev_num = 0x0, + .skip = 0 + }, + { + .module_name = "chip0_sec", + .label = "a9", + .bus_num = 0x74, + .dev_num = 0x1, + .skip = 0 + }, + /*chip 1*/ + { + .module_name = "chip1_pcie", + .label = "b0", + .bus_num = 0x80, + .dev_num = 0xff, + .skip = 0 + }, + { + .module_name = "chip1_pcie_dma", + .label = "b1", + .bus_num = 0xbb, + .dev_num = 0xff, + .skip = 0 + }, + { + .module_name = "chip1_pcie_sdi", + .label = "b2", + .bus_num = 0xbb, + .dev_num = 0x1, + .skip = 0 + }, + { + .module_name = "chip1_USB", + .label = "b3", + .bus_num = 0xba, + .dev_num = 0xff, + .skip = 0 + }, + { + .module_name = "chip1_hpre", + .label = "b4", + .bus_num = 0xb8, + .dev_num = 0x0, + .skip = 0 + }, + { + .module_name = "chip1_rde", + .label = "b5", + .bus_num = 0xb8, + .dev_num = 0x1, + .skip = 0 + }, + { + .module_name = "chip1_nic", + .label = "ba", + .bus_num = 0xbc, + .dev_num = 0xff, + .skip = 0 + }, + { + .module_name = "chip1_sas", + .label = "b6", + .bus_num = 0xb4, + .dev_num = 0x02, + .skip = 0 + }, + { + .module_name = "chip1_sas1", + .label = "bb", + .bus_num = 0xb4, + .dev_num = 0x04, + .skip = 0 + }, + { + .module_name = "chip1_sata", + .label = "b7", + .bus_num = 0xb4, + .dev_num = 0x03, + .skip = 0 + }, + { + .module_name = "chip1_zip", + .label = "b8", + .bus_num = 0xb4, + .dev_num = 0x0, + .skip = 0 + }, + { + .module_name = "chip0_sec", + .label = "b9", + .bus_num = 0xb4, + .dev_num = 0x1, + .skip = 0 + }, + + /*chip 2*/ + { + .module_name = "chip2_pcie", + .label = "c0", + .bus_num = 0xc0, + .dev_num = 0xff, + .skip = 0 + }, + { + .module_name = "chip2_pcie_dma", + .label = "c1", + .bus_num = 0xdb, + .dev_num = 0xff, + .skip = 0 + }, + { + .module_name = "chip2_pcie_sdi", + .label = "c2", + .bus_num = 0xdb, + .dev_num = 0x1, + .skip = 0 + }, + { + .module_name = "chip2_USB", + .label = "c3", + .bus_num = 0xda, + .dev_num = 0xff, + .skip = 0 + }, + { + .module_name = "chip2_hpre", + .label = "c4", + .bus_num = 0xd8, + .dev_num = 0x0, + .skip = 0 + }, + { + .module_name = "chip2_rde", + .label = "c5", + .bus_num = 0xd8, + .dev_num = 0x1, + .skip = 0 + }, + { + .module_name = "chip2_nic", + .label = "ca", + .bus_num = 0xdc, + .dev_num = 0xff, + .skip = 0 + }, + { + .module_name = "chip2_sas", + .label = "c6", + .bus_num = 0xd4, + .dev_num = 0x02, + .skip = 0 + }, + { + .module_name = "chip2_sas1", + .label = "cb", + .bus_num = 0xd4, + .dev_num = 0x04, + .skip = 0 + }, + { + .module_name = "chip2_sata", + .label = "c7", + .bus_num = 0xd4, + .dev_num = 0x03, + .skip = 0 + }, + { + .module_name = "chip2_zip", + .label = "c8", + .bus_num = 0xd4, + .dev_num = 0x0, + .skip = 0 + }, + { + .module_name = "chip2_sec", + .label = "c9", + .bus_num = 0xd4, + .dev_num = 0x1, + .skip = 0 + }, + + /*chip 3*/ + { + .module_name = "chip3_pcie", + .label = "d0", + .bus_num = 0xe0, + .dev_num = 0xff, + .skip = 0 + }, + { + .module_name = "chip3_pcie_dma", + .label = "d1", + .bus_num = 0xfb, + .dev_num = 0xff, + .skip = 0 + }, + { + .module_name = "chip3_pcie_sdi", + .label = "d2", + .bus_num = 0xfb, + .dev_num = 0x1, + .skip = 0 + }, + { + .module_name = "chip3_USB", + .label = "d3", + .bus_num = 0xfa, + .dev_num = 0xff, + .skip = 0 + }, + { + .module_name = "chip3_hpre", + .label = "d4", + .bus_num = 0xf8, + .dev_num = 0x0, + .skip = 0 + }, + { + .module_name = "chip3_rde", + .label = "d5", + .bus_num = 0xf8, + .dev_num = 0x1, + .skip = 0 + }, + { + .module_name = "chip3_nic", + .label = "da", + .bus_num = 0xfc, + .dev_num = 0xff, + .skip = 0 + }, + { + .module_name = "chip3_sas", + .label = "d6", + .bus_num = 0xf4, + .dev_num = 0x02, + .skip = 0 + }, + { + .module_name = "chip3_sas1", + .label = "db", + .bus_num = 0xf4, + .dev_num = 0x04, + .skip = 0 + }, + { + .module_name = "chip3_sata", + .label = "d7", + .bus_num = 0xf4, + .dev_num = 0x03, + .skip = 0 + }, + { + .module_name = "chip3_zip", + .label = "d8", + .bus_num = 0xf4, + .dev_num = 0x0, + .skip = 0 + }, + { + .module_name = "chip3_sec", + .label = "d9", + .bus_num = 0xf4, + .dev_num = 0x1, + .skip = 0 + }, +}; + +static int is_skip_bus(int busno, int devno) +{ + int i; + + /* record wether the busno has been matched in the for loop. */ + int matched = 0; + + + for (i = 0; i < HOSTBRIGE_1620_NUM; i++) { + if (skip_1620_bus_num[i].bus_num == busno) { + char skip_line[16]; + + matched = 1; + + if ((skip_1620_bus_num[i].dev_num == 0xff) + || (skip_1620_bus_num[i].dev_num == (devno >> 3))) { + + if (skip_1620_bus_num[i].skip) { + strcpy(skip_line, "skiped"); + } else { + strcpy(skip_line, "not skip"); + } + + printk(KERN_INFO "qzf===> dev %x:%x(%s): %s\n", + busno, devno, + skip_1620_bus_num[i].module_name, + skip_line); + + return skip_1620_bus_num[i].skip; + } + } + } + + /* the this bus matched before, skip or not is determined in + the for loop, arrived here still, just skip it. Or not matched, + that means it is a secondary bus, we must not skip. */ + if (matched) + pr_info("qzf===> dev %x:%x (unknown): skipped\n", busno, devno); + return matched; +} + +static int set_skip_bus(const char *label) +{ + int i; + + for (i = 0; i < HOSTBRIGE_1620_NUM; i++) { + if (!strncmp(label, skip_1620_bus_num[i].label, 2)) { + skip_1620_bus_num[i].skip = 1; + break; + } + } + + return 0; +} + +static int __init pci_skip_bus(char *str) +{ + int i = 0; + const char *label_skip[] = {"a0", "a1", "a2", "a3", "a4", "a5", + "a6", "a7", "a8", "a9", "aa", "ab", "b0", "b1", "b2", "b3", "b4", + "b5", "b6", "b7", "b8", "b9", "ba", "bb", "c0", "c1", "c2", "c3", + "c4", "c5", "c6", "c7", "c8", "c9", "ca", "cb", "d0", "d1", "d2", + "d3", "d4", "d5", "d6", "d7", "d8", "d9", "da", "db", "ff"}; + + skip_bus_flag = 1; + while (strncmp(label_skip[i], "ff", 2)) { + if (strstr(str, label_skip[i])) + set_skip_bus(label_skip[i]); + + i++; + } + + return 0; +} + +__setup("pci_skip_bus=", pci_skip_bus); + bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l, int timeout) { + if (skip_bus_flag && is_skip_bus(bus->number, devfn)) + return false; + if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l)) return false; @@ -2271,6 +2900,7 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) pci_set_of_node(dev); if (pci_setup_device(dev)) { + pci_release_of_node(dev); pci_bus_put(dev->bus); kfree(dev); return NULL; @@ -2426,6 +3056,11 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) /* Set up MSI IRQ domain */ pci_set_msi_domain(dev); + if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) + dev->rpdev = dev; + else + dev->rpdev = pcie_find_root_port(dev); + /* Notifier could use PCI capabilities */ dev->match_driver = false; ret = device_add(&dev->dev); @@ -2495,12 +3130,8 @@ static int only_one_child(struct pci_bus *bus) * A PCIe Downstream Port normally leads to a Link with only Device * 0 on it (PCIe spec r3.1, sec 7.3.1). As an optimization, scan * only for Device 0 in that situation. - * - * Checking has_secondary_link is a hack to identify Downstream - * Ports because sometimes Switches are configured such that the - * PCIe Port Type labels are backwards. */ - if (bridge && pci_is_pcie(bridge) && bridge->has_secondary_link) + if (bridge && pci_is_pcie(bridge) && pcie_downstream_port(bridge)) return 1; return 0; @@ -2977,7 +3608,7 @@ int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max) conflict = request_resource_conflict(parent_res, res); if (conflict) - dev_printk(KERN_DEBUG, &b->dev, + dev_info(&b->dev, "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n", res, pci_is_root_bus(b) ? "domain " : "", parent_res, conflict->name, conflict); @@ -2997,8 +3628,7 @@ int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max) size = bus_max - res->start + 1; ret = adjust_resource(res, res->start, size); - dev_printk(KERN_DEBUG, &b->dev, - "busn_res: %pR end %s updated to %02x\n", + dev_info(&b->dev, "busn_res: %pR end %s updated to %02x\n", &old_res, ret ? "can not be" : "is", bus_max); if (!ret && !res->parent) @@ -3016,8 +3646,7 @@ void pci_bus_release_busn_res(struct pci_bus *b) return; ret = release_resource(res); - dev_printk(KERN_DEBUG, &b->dev, - "busn_res: %pR %s released\n", + dev_info(&b->dev, "busn_res: %pR %s released\n", res, ret ? "can not be" : "is"); } diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c index 7ac035af39f0458ac26431a0629e3d0a144939d7..6fa1627ce08d35360bca82b8059ad968423574a1 100644 --- a/drivers/pci/proc.c +++ b/drivers/pci/proc.c @@ -52,7 +52,7 @@ static ssize_t proc_bus_pci_read(struct file *file, char __user *buf, nbytes = size - pos; cnt = nbytes; - if (!access_ok(VERIFY_WRITE, buf, cnt)) + if (!access_ok(buf, cnt)) return -EINVAL; pci_config_pm_runtime_get(dev); @@ -125,7 +125,7 @@ static ssize_t proc_bus_pci_write(struct file *file, const char __user *buf, nbytes = size - pos; cnt = nbytes; - if (!access_ok(VERIFY_READ, buf, cnt)) + if (!access_ok(buf, cnt)) return -EINVAL; pci_config_pm_runtime_get(dev); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 6bc27b7fd452ad591626625c69454d85041c1a02..6874c96b36eb3337afe9acca3d24b9ad5bba4cd5 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -159,8 +159,7 @@ static int __init pci_apply_final_quirks(void) u8 tmp; if (pci_cache_line_size) - printk(KERN_DEBUG "PCI: CLS %u bytes\n", - pci_cache_line_size << 2); + pr_info("PCI: CLS %u bytes\n", pci_cache_line_size << 2); pci_apply_fixup_final_quirks = true; for_each_pci_dev(dev) { @@ -177,16 +176,16 @@ static int __init pci_apply_final_quirks(void) if (!tmp || cls == tmp) continue; - printk(KERN_DEBUG "PCI: CLS mismatch (%u != %u), using %u bytes\n", - cls << 2, tmp << 2, - pci_dfl_cache_line_size << 2); + pci_info(dev, "CLS mismatch (%u != %u), using %u bytes\n", + cls << 2, tmp << 2, + pci_dfl_cache_line_size << 2); pci_cache_line_size = pci_dfl_cache_line_size; } } if (!pci_cache_line_size) { - printk(KERN_DEBUG "PCI: CLS %u bytes, default %u\n", - cls << 2, pci_dfl_cache_line_size << 2); + pr_info("PCI: CLS %u bytes, default %u\n", cls << 2, + pci_dfl_cache_line_size << 2); pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size; } @@ -2220,6 +2219,23 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s); +/* + * Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain + * Link bit cleared after starting the link retrain process to allow this + * process to finish. + * + * Affected devices: PI7C9X110, PI7C9X111SL, PI7C9X130. See also the + * Pericom Errata Sheet PI7C9X111SLB_errata_rev1.2_102711.pdf. + */ +static void quirk_enable_clear_retrain_link(struct pci_dev *dev) +{ + dev->clear_retrain_link = 1; + pci_info(dev, "Enable PCIe Retrain Link quirk\n"); +} +DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe110, quirk_enable_clear_retrain_link); +DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe111, quirk_enable_clear_retrain_link); +DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe130, quirk_enable_clear_retrain_link); + static void fixup_rev1_53c810(struct pci_dev *dev) { u32 class = dev->class; @@ -2571,7 +2587,7 @@ static void nvbridge_check_legacy_irq_routing(struct pci_dev *dev) pci_read_config_dword(dev, 0x74, &cfg); if (cfg & ((1 << 2) | (1 << 15))) { - printk(KERN_INFO "Rewriting IRQ routing register on MCP55\n"); + pr_info("Rewriting IRQ routing register on MCP55\n"); cfg &= ~((1 << 2) | (1 << 15)); pci_write_config_dword(dev, 0x74, cfg); } @@ -3190,7 +3206,11 @@ static void disable_igfx_irq(struct pci_dev *dev) pci_iounmap(dev, regs); } +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0042, disable_igfx_irq); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0046, disable_igfx_irq); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x004a, disable_igfx_irq); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0106, disable_igfx_irq); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq); @@ -3244,6 +3264,8 @@ DECLARE_PCI_FIXUP_FINAL(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */ quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(0x1b7c, 0x0004, /* Ceton InfiniTV4 */ quirk_broken_intx_masking); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_20K2, + quirk_broken_intx_masking); /* * Realtek RTL8169 PCI Gigabit Ethernet Controller (rev 10) @@ -3379,6 +3401,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset); /* * Root port on some Cavium CN8xxx chips do not successfully complete a bus @@ -3779,6 +3802,79 @@ static int delay_250ms_after_flr(struct pci_dev *dev, int probe) return 0; } +#define PCI_DEVICE_ID_HINIC_VF 0x375E +#define HINIC_VF_FLR_TYPE 0x1000 +#define HINIC_VF_FLR_CAP_BIT_SHIFT 6 +#define HINIC_VF_OP 0xE80 +#define HINIC_VF_FLR_PROC_BIT_SHIFT 10 +#define HINIC_OPERATION_TIMEOUT 15000 + +/* Device-specific reset method for Huawei Intelligent NIC virtual functions */ +static int reset_hinic_vf_dev(struct pci_dev *pdev, int probe) +{ + unsigned long timeout; + void __iomem *bar; + u16 old_command; + u32 val; + + if (probe) + return 0; + + bar = pci_iomap(pdev, 0, 0); + if (!bar) + return -ENOTTY; + + pci_read_config_word(pdev, PCI_COMMAND, &old_command); + + /* + * FLR cap bit bit30, FLR processing bit: bit18, to avoid big-endian + * conversion the big-endian bit6, bit10 is directly operated here. + * + * Get and check firmware capabilities. + */ + val = readl(bar + HINIC_VF_FLR_TYPE); + if (!(val & (1UL << HINIC_VF_FLR_CAP_BIT_SHIFT))) { + pci_iounmap(pdev, bar); + return -ENOTTY; + } + + /* + * Set the processing bit for the start of FLR, which will be cleared + * by the firmware after FLR is completed. + */ + val = readl(bar + HINIC_VF_OP); + val = val | (1UL << HINIC_VF_FLR_PROC_BIT_SHIFT); + writel(val, bar + HINIC_VF_OP); + + /* Perform the actual device function reset */ + pcie_flr(pdev); + + pci_write_config_word(pdev, PCI_COMMAND, + old_command | PCI_COMMAND_MEMORY); + + /* Waiting for device reset complete */ + timeout = jiffies + msecs_to_jiffies(HINIC_OPERATION_TIMEOUT); + do { + val = readl(bar + HINIC_VF_OP); + if (!(val & (1UL << HINIC_VF_FLR_PROC_BIT_SHIFT))) + goto reset_complete; + msleep(20); + } while (time_before(jiffies, timeout)); + + val = readl(bar + HINIC_VF_OP); + if (!(val & (1UL << HINIC_VF_FLR_PROC_BIT_SHIFT))) + goto reset_complete; + + pci_warn(pdev, "Reset dev timeout, flr ack reg: %x\n", + be32_to_cpu(val)); + +reset_complete: + pci_write_config_word(pdev, PCI_COMMAND, old_command); + pci_iounmap(pdev, bar); + + return 0; +} + static const struct pci_dev_reset_methods pci_dev_reset_methods[] = { { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF, reset_intel_82599_sfp_virtfn }, @@ -3790,6 +3886,8 @@ static const struct pci_dev_reset_methods pci_dev_reset_methods[] = { { PCI_VENDOR_ID_INTEL, 0x0953, delay_250ms_after_flr }, { PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID, reset_chelsio_generic_dev }, + { PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HINIC_VF, + reset_hinic_vf_dev }, { 0 } }; @@ -3827,6 +3925,10 @@ static void quirk_dma_func0_alias(struct pci_dev *dev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe832, quirk_dma_func0_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe476, quirk_dma_func0_alias); +/* Some Glenfly chips use function 0 as the PCIe Requester ID for DMA */ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_GLENFLY, 0x3d40, quirk_dma_func0_alias); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_GLENFLY, 0x3d41, quirk_dma_func0_alias); + static void quirk_dma_func1_alias(struct pci_dev *dev) { if (PCI_FUNC(dev->devfn) != 1) @@ -3848,6 +3950,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128, /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130, quirk_dma_func1_alias); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9170, + quirk_dma_func1_alias); /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c47 + c57 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172, quirk_dma_func1_alias); @@ -4071,6 +4175,31 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0d, PCI_CLASS_NOT_DEFINED DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0e, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); + /* + * Many Zhaoxin Root Ports and Switch Downstream Ports have no ACS capability. + * But the implementation could block peer-to-peer transactions between them + * and provide ACS-like functionality. + */ +static int pci_quirk_zhaoxin_pcie_ports_acs(struct pci_dev *dev, u16 acs_flags) +{ + u16 flags = (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV); + int ret = acs_flags & ~flags ? 0 : 1; + + if (!pci_is_pcie(dev) || + ((pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) && + (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM))) + return -ENOTTY; + + switch (dev->device) { + case 0x0710 ... 0x071e: + case 0x0721: + case 0x0723 ... 0x0732: + return ret; + } + + return false; +} + /* * The AMD ARM A1100 (aka "SEATTLE") SoC has a bug in its PCIe Root Complex * where Upstream Transaction Layer Packets with the Relaxed Ordering @@ -4195,15 +4324,21 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags) static bool pci_quirk_cavium_acs_match(struct pci_dev *dev) { + if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) + return false; + + switch (dev->device) { /* - * Effectively selects all downstream ports for whole ThunderX 1 - * family by 0xf800 mask (which represents 8 SoCs), while the lower - * bits of device ID are used to indicate which subdevice is used - * within the SoC. + * Effectively selects all downstream ports for whole ThunderX1 + * (which represents 8 SoCs). */ - return (pci_is_pcie(dev) && - (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) && - ((dev->device & 0xf800) == 0xa000)); + case 0xa000 ... 0xa7ff: /* ThunderX1 */ + case 0xaf84: /* ThunderX2 */ + case 0xb884: /* ThunderX3 */ + return true; + default: + return false; + } } static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags) @@ -4496,6 +4631,8 @@ static const struct pci_dev_acs_enabled { { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_spt_pch_acs }, { 0x19a2, 0x710, pci_quirk_mf_endpoint_acs }, /* Emulex BE3-R */ { 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */ + { 0x1077, 0x2031, pci_quirk_mf_endpoint_acs}, /* QLogic QL2672 */ + { 0x1077, 0x2532, pci_quirk_mf_endpoint_acs}, /* Cavium ThunderX */ { PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, pci_quirk_cavium_acs }, /* APM X-Gene */ @@ -4509,6 +4646,24 @@ static const struct pci_dev_acs_enabled { { PCI_VENDOR_ID_AMPERE, 0xE00A, pci_quirk_xgene_acs }, { PCI_VENDOR_ID_AMPERE, 0xE00B, pci_quirk_xgene_acs }, { PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs }, + /* Broadcom multi-function device */ + { PCI_VENDOR_ID_BROADCOM, 0x16D7, pci_quirk_mf_endpoint_acs }, + { PCI_VENDOR_ID_BROADCOM, 0x1750, pci_quirk_mf_endpoint_acs }, + { PCI_VENDOR_ID_BROADCOM, 0x1751, pci_quirk_mf_endpoint_acs }, + { PCI_VENDOR_ID_BROADCOM, 0x1752, pci_quirk_mf_endpoint_acs }, + /* Zhaoxin multi-function devices */ + { PCI_VENDOR_ID_ZHAOXIN, 0x3038, pci_quirk_mf_endpoint_acs }, + { PCI_VENDOR_ID_ZHAOXIN, 0x3104, pci_quirk_mf_endpoint_acs }, + { PCI_VENDOR_ID_ZHAOXIN, 0x9083, pci_quirk_mf_endpoint_acs }, + /* Zhaoxin Root/Downstream Ports */ + { PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs }, +#ifdef CONFIG_ARCH_PHYTIUM + /* because PLX switch Vendor id is 0x10b5 on phytium cpu */ + { 0x10b5, PCI_ANY_ID, pci_quirk_xgene_acs }, + /* because rootcomplex Vendor id is 0x17cd on phytium cpu */ + { 0x17cd, PCI_ANY_ID, pci_quirk_xgene_acs }, +#endif + { PCI_VENDOR_ID_TRUSTNETIC, PCI_ANY_ID, pci_quirk_mf_endpoint_acs }, { 0 } }; @@ -4552,7 +4707,7 @@ int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags) #define INTEL_BSPR_REG_BPPD (1 << 9) /* Upstream Peer Decode Configuration Register */ -#define INTEL_UPDCR_REG 0x1114 +#define INTEL_UPDCR_REG 0x1014 /* 5:0 Peer Decode Enable bits */ #define INTEL_UPDCR_REG_MASK 0x3f @@ -4872,6 +5027,7 @@ static void quirk_no_ats(struct pci_dev *pdev) /* AMD Stoney platform GPU */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_no_ats); #endif /* CONFIG_PCI_ATS */ /* Freescale PCIe doesn't support MSI in RC mode */ @@ -5057,59 +5213,118 @@ static void quirk_switchtec_ntb_dma_alias(struct pci_dev *pdev) pci_iounmap(pdev, mmio); pci_disable_device(pdev); } -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8531, - quirk_switchtec_ntb_dma_alias); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8532, - quirk_switchtec_ntb_dma_alias); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8533, - quirk_switchtec_ntb_dma_alias); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8534, - quirk_switchtec_ntb_dma_alias); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8535, - quirk_switchtec_ntb_dma_alias); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8536, - quirk_switchtec_ntb_dma_alias); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8543, - quirk_switchtec_ntb_dma_alias); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8544, - quirk_switchtec_ntb_dma_alias); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8545, - quirk_switchtec_ntb_dma_alias); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8546, - quirk_switchtec_ntb_dma_alias); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8551, - quirk_switchtec_ntb_dma_alias); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8552, - quirk_switchtec_ntb_dma_alias); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8553, - quirk_switchtec_ntb_dma_alias); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8554, - quirk_switchtec_ntb_dma_alias); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8555, - quirk_switchtec_ntb_dma_alias); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8556, - quirk_switchtec_ntb_dma_alias); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8561, - quirk_switchtec_ntb_dma_alias); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8562, - quirk_switchtec_ntb_dma_alias); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8563, - quirk_switchtec_ntb_dma_alias); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8564, - quirk_switchtec_ntb_dma_alias); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8565, - quirk_switchtec_ntb_dma_alias); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8566, - quirk_switchtec_ntb_dma_alias); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8571, - quirk_switchtec_ntb_dma_alias); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8572, - quirk_switchtec_ntb_dma_alias); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8573, - quirk_switchtec_ntb_dma_alias); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8574, - quirk_switchtec_ntb_dma_alias); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8575, - quirk_switchtec_ntb_dma_alias); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8576, - quirk_switchtec_ntb_dma_alias); +#define SWITCHTEC_QUIRK(vid) \ + DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_MICROSEMI, vid, \ + PCI_CLASS_BRIDGE_OTHER, 8, quirk_switchtec_ntb_dma_alias) + +SWITCHTEC_QUIRK(0x8531); /* PFX 24xG3 */ +SWITCHTEC_QUIRK(0x8532); /* PFX 32xG3 */ +SWITCHTEC_QUIRK(0x8533); /* PFX 48xG3 */ +SWITCHTEC_QUIRK(0x8534); /* PFX 64xG3 */ +SWITCHTEC_QUIRK(0x8535); /* PFX 80xG3 */ +SWITCHTEC_QUIRK(0x8536); /* PFX 96xG3 */ +SWITCHTEC_QUIRK(0x8541); /* PSX 24xG3 */ +SWITCHTEC_QUIRK(0x8542); /* PSX 32xG3 */ +SWITCHTEC_QUIRK(0x8543); /* PSX 48xG3 */ +SWITCHTEC_QUIRK(0x8544); /* PSX 64xG3 */ +SWITCHTEC_QUIRK(0x8545); /* PSX 80xG3 */ +SWITCHTEC_QUIRK(0x8546); /* PSX 96xG3 */ +SWITCHTEC_QUIRK(0x8551); /* PAX 24XG3 */ +SWITCHTEC_QUIRK(0x8552); /* PAX 32XG3 */ +SWITCHTEC_QUIRK(0x8553); /* PAX 48XG3 */ +SWITCHTEC_QUIRK(0x8554); /* PAX 64XG3 */ +SWITCHTEC_QUIRK(0x8555); /* PAX 80XG3 */ +SWITCHTEC_QUIRK(0x8556); /* PAX 96XG3 */ +SWITCHTEC_QUIRK(0x8561); /* PFXL 24XG3 */ +SWITCHTEC_QUIRK(0x8562); /* PFXL 32XG3 */ +SWITCHTEC_QUIRK(0x8563); /* PFXL 48XG3 */ +SWITCHTEC_QUIRK(0x8564); /* PFXL 64XG3 */ +SWITCHTEC_QUIRK(0x8565); /* PFXL 80XG3 */ +SWITCHTEC_QUIRK(0x8566); /* PFXL 96XG3 */ +SWITCHTEC_QUIRK(0x8571); /* PFXI 24XG3 */ +SWITCHTEC_QUIRK(0x8572); /* PFXI 32XG3 */ +SWITCHTEC_QUIRK(0x8573); /* PFXI 48XG3 */ +SWITCHTEC_QUIRK(0x8574); /* PFXI 64XG3 */ +SWITCHTEC_QUIRK(0x8575); /* PFXI 80XG3 */ +SWITCHTEC_QUIRK(0x8576); /* PFXI 96XG3 */ + +/* + * On Lenovo Thinkpad P50 SKUs with a Nvidia Quadro M1000M, the BIOS does + * not always reset the secondary Nvidia GPU between reboots if the system + * is configured to use Hybrid Graphics mode. This results in the GPU + * being left in whatever state it was in during the *previous* boot, which + * causes spurious interrupts from the GPU, which in turn causes us to + * disable the wrong IRQ and end up breaking the touchpad. Unsurprisingly, + * this also completely breaks nouveau. + * + * Luckily, it seems a simple reset of the Nvidia GPU brings it back to a + * clean state and fixes all these issues. + * + * When the machine is configured in Dedicated display mode, the issue + * doesn't occur. Fortunately the GPU advertises NoReset+ when in this + * mode, so we can detect that and avoid resetting it. + */ +static void quirk_reset_lenovo_thinkpad_p50_nvgpu(struct pci_dev *pdev) +{ + void __iomem *map; + int ret; + + if (pdev->subsystem_vendor != PCI_VENDOR_ID_LENOVO || + pdev->subsystem_device != 0x222e || + !pdev->reset_fn) + return; + + if (pci_enable_device_mem(pdev)) + return; + + /* + * Based on nvkm_device_ctor() in + * drivers/gpu/drm/nouveau/nvkm/engine/device/base.c + */ + map = pci_iomap(pdev, 0, 0x23000); + if (!map) { + pci_err(pdev, "Can't map MMIO space\n"); + goto out_disable; + } + + /* + * Make sure the GPU looks like it's been POSTed before resetting + * it. + */ + if (ioread32(map + 0x2240c) & 0x2) { + pci_info(pdev, FW_BUG "GPU left initialized by EFI, resetting\n"); + ret = pci_reset_bus(pdev); + if (ret < 0) + pci_err(pdev, "Failed to reset GPU: %d\n", ret); + } + + iounmap(map); +out_disable: + pci_disable_device(pdev); +} +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, 0x13b1, + PCI_CLASS_DISPLAY_VGA, 8, + quirk_reset_lenovo_thinkpad_p50_nvgpu); + +static void pci_quirk_hisi_fixup_class(struct pci_dev *dev) +{ + dev->class = PCI_CLASS_NETWORK_ETHERNET << 8; + pci_info(dev, "force hisi class type to network\n"); +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_HUAWEI, PCIE_DEVICE_ID_HISI_5896, + pci_quirk_hisi_fixup_class); + +static void pci_quirk_hisi_fixup_bar(struct pci_dev *dev) +{ + int i, start = 3; + + for (i = start; i < PCI_NUM_RESOURCES; i++) { + dev->resource[i].start = 0; + dev->resource[i].end = 0; + dev->resource[i].flags = 0; + } + + pci_info(dev, "force disable hisilicon np bar\n"); +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, PCIE_DEVICE_ID_HISI_5896, + pci_quirk_hisi_fixup_bar); diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c index 461e7fd2756fb317d147fbebc0ec0ca2bda339c1..e9c6b120cf451331dc294f50a3ac1315cd37c2c3 100644 --- a/drivers/pci/remove.c +++ b/drivers/pci/remove.c @@ -25,9 +25,6 @@ static void pci_stop_dev(struct pci_dev *dev) pci_dev_assign_added(dev, false); } - - if (dev->bus->self) - pcie_aspm_exit_link_state(dev); } static void pci_destroy_dev(struct pci_dev *dev) @@ -41,6 +38,7 @@ static void pci_destroy_dev(struct pci_dev *dev) list_del(&dev->bus_list); up_write(&pci_bus_sem); + pcie_aspm_exit_link_state(dev); pci_bridge_d3_update(dev); pci_free_resources(dev); put_device(&dev->dev); diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c index 137bf0cee897cf791f33885f2df5aece15464ee7..d7bae7a804c46ca0793d5aed2498f42e08b4b6fe 100644 --- a/drivers/pci/rom.c +++ b/drivers/pci/rom.c @@ -98,6 +98,12 @@ static size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, } /* get the PCI data structure and check its "PCIR" signature */ pds = image + readw(image + 24); + /* The PCIR data structure must begin on a 4-byte boundary */ + if (!IS_ALIGNED((unsigned long)pds, 4)) { + pci_info(pdev, "Invalid PCI ROM header signature: PCIR %#06x\n", + readw(image + 24)); + break; + } if (readl(pds) != 0x52494350) { pci_info(pdev, "Invalid PCI ROM data signature: expecting 0x52494350, got %#010x\n", readl(pds)); diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 79b1824e83b47c058a262185668aff946293bf5e..39d19302f3cbe0655ffc8be116391744ba579210 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -255,10 +255,9 @@ static void reassign_resources_sorted(struct list_head *realloc_head, (IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN); if (pci_reassign_resource(add_res->dev, idx, add_size, align)) - pci_printk(KERN_DEBUG, add_res->dev, - "failed to add %llx res[%d]=%pR\n", - (unsigned long long)add_size, - idx, res); + pci_info(add_res->dev, "failed to add %llx res[%d]=%pR\n", + (unsigned long long) add_size, idx, + res); } out: list_del(&add_res->list); @@ -735,58 +734,21 @@ int pci_claim_bridge_resource(struct pci_dev *bridge, int i) base/limit registers must be read-only and read as 0. */ static void pci_bridge_check_ranges(struct pci_bus *bus) { - u16 io; - u32 pmem; struct pci_dev *bridge = bus->self; - struct resource *b_res; + struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES]; - b_res = &bridge->resource[PCI_BRIDGE_RESOURCES]; b_res[1].flags |= IORESOURCE_MEM; - pci_read_config_word(bridge, PCI_IO_BASE, &io); - if (!io) { - pci_write_config_word(bridge, PCI_IO_BASE, 0xe0f0); - pci_read_config_word(bridge, PCI_IO_BASE, &io); - pci_write_config_word(bridge, PCI_IO_BASE, 0x0); - } - if (io) + if (bridge->io_window) b_res[0].flags |= IORESOURCE_IO; - /* DECchip 21050 pass 2 errata: the bridge may miss an address - disconnect boundary by one PCI data phase. - Workaround: do not use prefetching on this device. */ - if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001) - return; - - pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); - if (!pmem) { - pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, - 0xffe0fff0); - pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); - pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0); - } - if (pmem) { + if (bridge->pref_window) { b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH; - if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == - PCI_PREF_RANGE_TYPE_64) { + if (bridge->pref_64_window) { b_res[2].flags |= IORESOURCE_MEM_64; b_res[2].flags |= PCI_PREF_RANGE_TYPE_64; } } - - /* double check if bridge does support 64 bit pref */ - if (b_res[2].flags & IORESOURCE_MEM_64) { - u32 mem_base_hi, tmp; - pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, - &mem_base_hi); - pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, - 0xffffffff); - pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp); - if (!tmp) - b_res[2].flags &= ~IORESOURCE_MEM_64; - pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, - mem_base_hi); - } } /* Helper function for sizing routines: find first available @@ -951,9 +913,9 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, if (size1 > size0 && realloc_head) { add_to_list(realloc_head, bus->self, b_res, size1-size0, min_align); - pci_printk(KERN_DEBUG, bus->self, "bridge window %pR to %pR add_size %llx\n", - b_res, &bus->busn_res, - (unsigned long long)size1-size0); + pci_info(bus->self, "bridge window %pR to %pR add_size %llx\n", + b_res, &bus->busn_res, + (unsigned long long) size1 - size0); } } @@ -1098,7 +1060,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, b_res->flags |= IORESOURCE_STARTALIGN; if (size1 > size0 && realloc_head) { add_to_list(realloc_head, bus->self, b_res, size1-size0, add_align); - pci_printk(KERN_DEBUG, bus->self, "bridge window %pR to %pR add_size %llx add_align %llx\n", + pci_info(bus->self, "bridge window %pR to %pR add_size %llx add_align %llx\n", b_res, &bus->busn_res, (unsigned long long) (size1 - size0), (unsigned long long) add_align); @@ -1566,8 +1528,8 @@ static void pci_bridge_release_resources(struct pci_bus *bus, release_child_resources(r); if (!release_resource(r)) { type = old_flags = r->flags & PCI_RES_TYPE_MASK; - pci_printk(KERN_DEBUG, dev, "resource %d %pR released\n", - PCI_BRIDGE_RESOURCES + idx, r); + pci_info(dev, "resource %d %pR released\n", + PCI_BRIDGE_RESOURCES + idx, r); /* keep the old size */ r->end = resource_size(r) - 1; r->start = 0; @@ -1631,7 +1593,7 @@ static void pci_bus_dump_res(struct pci_bus *bus) if (!res || !res->end || !res->flags) continue; - dev_printk(KERN_DEBUG, &bus->dev, "resource %d %pR\n", i, res); + dev_info(&bus->dev, "resource %d %pR\n", i, res); } } @@ -1724,10 +1686,15 @@ static enum enable_type pci_realloc_detect(struct pci_bus *bus, enum enable_type enable_local) { bool unassigned = false; + struct pci_host_bridge *host; if (enable_local != undefined) return enable_local; + host = pci_find_host_bridge(bus); + if (host->preserve_config) + return auto_disabled; + pci_walk_bus(bus, iov_resources_unassigned, &unassigned); if (unassigned) return auto_enabled; @@ -1765,9 +1732,8 @@ void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus) int max_depth = pci_bus_get_depth(bus); pci_try_num = max_depth + 1; - dev_printk(KERN_DEBUG, &bus->dev, - "max bus depth: %d pci_try_num: %d\n", - max_depth, pci_try_num); + dev_info(&bus->dev, "max bus depth: %d pci_try_num: %d\n", + max_depth, pci_try_num); } again: @@ -1801,8 +1767,8 @@ void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus) goto dump; } - dev_printk(KERN_DEBUG, &bus->dev, - "No. %d try to assign unassigned res\n", tried_times + 1); + dev_info(&bus->dev, "No. %d try to assign unassigned res\n", + tried_times + 1); /* third times and later will not check if it is leaf */ if ((tried_times + 1) > 2) @@ -1820,12 +1786,18 @@ void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus) /* restore size and flags */ list_for_each_entry(fail_res, &fail_head, list) { struct resource *res = fail_res->res; + int idx; res->start = fail_res->start; res->end = fail_res->end; res->flags = fail_res->flags; - if (fail_res->dev->subordinate) - res->flags = 0; + + if (pci_is_bridge(fail_res->dev)) { + idx = res - &fail_res->dev->resource[0]; + if (idx >= PCI_BRIDGE_RESOURCES && + idx <= PCI_BRIDGE_RESOURCE_END) + res->flags = 0; + } } free_list(&fail_head); @@ -2066,12 +2038,18 @@ void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge) /* restore size and flags */ list_for_each_entry(fail_res, &fail_head, list) { struct resource *res = fail_res->res; + int idx; res->start = fail_res->start; res->end = fail_res->end; res->flags = fail_res->flags; - if (fail_res->dev->subordinate) - res->flags = 0; + + if (pci_is_bridge(fail_res->dev)) { + idx = res - &fail_res->dev->resource[0]; + if (idx >= PCI_BRIDGE_RESOURCES && + idx <= PCI_BRIDGE_RESOURCE_END) + res->flags = 0; + } } free_list(&fail_head); diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index d8ca40a9769347b8268a4f139a15cd922b14c467..d21fa04fa44d2bca1abd87bacbe9428e540901c0 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -439,10 +439,11 @@ int pci_resize_resource(struct pci_dev *dev, int resno, int size) res->end = res->start + pci_rebar_size_to_bytes(size) - 1; /* Check if the new config works by trying to assign everything. */ - ret = pci_reassign_bridge_resources(dev->bus->self, res->flags); - if (ret) - goto error_resize; - + if (dev->bus->self) { + ret = pci_reassign_bridge_resources(dev->bus->self, res->flags); + if (ret) + goto error_resize; + } return 0; error_resize: diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c index e634229ece8957607b881e4f1e93c6b7028c85fe..ddc0e962285fc72eeced5a744f6068c048689621 100644 --- a/drivers/pci/slot.c +++ b/drivers/pci/slot.c @@ -14,7 +14,6 @@ struct kset *pci_slots_kset; EXPORT_SYMBOL_GPL(pci_slots_kset); -static DEFINE_MUTEX(pci_slot_mutex); static ssize_t pci_slot_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) @@ -50,44 +49,9 @@ static ssize_t address_read_file(struct pci_slot *slot, char *buf) slot->number); } -/* these strings match up with the values in pci_bus_speed */ -static const char *pci_bus_speed_strings[] = { - "33 MHz PCI", /* 0x00 */ - "66 MHz PCI", /* 0x01 */ - "66 MHz PCI-X", /* 0x02 */ - "100 MHz PCI-X", /* 0x03 */ - "133 MHz PCI-X", /* 0x04 */ - NULL, /* 0x05 */ - NULL, /* 0x06 */ - NULL, /* 0x07 */ - NULL, /* 0x08 */ - "66 MHz PCI-X 266", /* 0x09 */ - "100 MHz PCI-X 266", /* 0x0a */ - "133 MHz PCI-X 266", /* 0x0b */ - "Unknown AGP", /* 0x0c */ - "1x AGP", /* 0x0d */ - "2x AGP", /* 0x0e */ - "4x AGP", /* 0x0f */ - "8x AGP", /* 0x10 */ - "66 MHz PCI-X 533", /* 0x11 */ - "100 MHz PCI-X 533", /* 0x12 */ - "133 MHz PCI-X 533", /* 0x13 */ - "2.5 GT/s PCIe", /* 0x14 */ - "5.0 GT/s PCIe", /* 0x15 */ - "8.0 GT/s PCIe", /* 0x16 */ - "16.0 GT/s PCIe", /* 0x17 */ -}; - static ssize_t bus_speed_read(enum pci_bus_speed speed, char *buf) { - const char *speed_string; - - if (speed < ARRAY_SIZE(pci_bus_speed_strings)) - speed_string = pci_bus_speed_strings[speed]; - else - speed_string = "Unknown"; - - return sprintf(buf, "%s\n", speed_string); + return sprintf(buf, "%s\n", pci_speed_string(speed)); } static ssize_t max_speed_read_file(struct pci_slot *slot, char *buf) @@ -115,6 +79,7 @@ static void pci_slot_release(struct kobject *kobj) up_read(&pci_bus_sem); list_del(&slot->list); + pci_bus_put(slot->bus); kfree(slot); } @@ -296,7 +261,7 @@ struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, goto err; } - slot->bus = parent; + slot->bus = pci_bus_get(parent); slot->number = slot_nr; slot->kobj.kset = pci_slots_kset; @@ -304,16 +269,20 @@ struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, slot_name = make_slot_name(name); if (!slot_name) { err = -ENOMEM; + pci_bus_put(slot->bus); + kfree(slot); goto err; } + INIT_LIST_HEAD(&slot->list); + list_add(&slot->list, &parent->slots); + err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL, "%s", slot_name); - if (err) + if (err) { + kobject_put(&slot->kobj); goto err; - - INIT_LIST_HEAD(&slot->list); - list_add(&slot->list, &parent->slots); + } down_read(&pci_bus_sem); list_for_each_entry(dev, &parent->devices, bus_list) @@ -329,7 +298,6 @@ struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, mutex_unlock(&pci_slot_mutex); return slot; err: - kfree(slot); slot = ERR_PTR(err); goto out; } @@ -404,7 +372,7 @@ static int pci_slot_init(void) pci_slots_kset = kset_create_and_add("slots", NULL, &pci_bus_kset->kobj); if (!pci_slots_kset) { - printk(KERN_ERR "PCI: Slot initialization failure\n"); + pr_err("PCI: Slot initialization failure\n"); return -ENOMEM; } return 0; diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c index 54a8b30dda38c446c8c4f839ba2c6cb8be504a8d..5aaa4ce04ec3dfbdecb71286104cbba9d26ff881 100644 --- a/drivers/pci/switch/switchtec.c +++ b/drivers/pci/switch/switchtec.c @@ -13,7 +13,7 @@ #include #include #include - +#include #include MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver"); @@ -633,7 +633,7 @@ static int ioctl_event_summary(struct switchtec_dev *stdev, u32 reg; s.global = ioread32(&stdev->mmio_sw_event->global_summary); - s.part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap); + s.part_bitmap = readq(&stdev->mmio_sw_event->part_event_bitmap); s.local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary); for (i = 0; i < stdev->partition_count; i++) { @@ -800,6 +800,7 @@ static int ioctl_event_ctl(struct switchtec_dev *stdev, { int ret; int nr_idxs; + unsigned int event_flags; struct switchtec_ioctl_event_ctl ctl; if (copy_from_user(&ctl, uctl, sizeof(ctl))) @@ -821,7 +822,9 @@ static int ioctl_event_ctl(struct switchtec_dev *stdev, else return -EINVAL; + event_flags = ctl.flags; for (ctl.index = 0; ctl.index < nr_idxs; ctl.index++) { + ctl.flags = event_flags; ret = event_ctl(stdev, &ctl); if (ret < 0) return ret; @@ -1113,7 +1116,8 @@ static int mask_event(struct switchtec_dev *stdev, int eid, int idx) if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ)) return 0; - if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE) + if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE || + eid == SWITCHTEC_IOCTL_EVENT_MRPC_COMP) return 0; dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr); diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c index d96626c614f5674b4de24b1c11bcb13a456a6ca5..cdd71126fa468d02d00b451f41793287272cff79 100644 --- a/drivers/pci/syscall.c +++ b/drivers/pci/syscall.c @@ -21,8 +21,10 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn, long err; long cfg_ret; + err = -EPERM; + dev = NULL; if (!capable(CAP_SYS_ADMIN)) - return -EPERM; + goto error; err = -ENODEV; dev = pci_get_domain_bus_and_slot(0, bus, dfn); diff --git a/drivers/pci/vc.c b/drivers/pci/vc.c index 5acd9c02683af78dd96fffa7d8613c7fc9d582fd..9ae9fb9339e80b6136c3281bcd55cd7acbfba429 100644 --- a/drivers/pci/vc.c +++ b/drivers/pci/vc.c @@ -13,6 +13,8 @@ #include #include +#include "pci.h" + /** * pci_vc_save_restore_dwords - Save or restore a series of dwords * @dev: device @@ -105,7 +107,7 @@ static void pci_vc_enable(struct pci_dev *dev, int pos, int res) struct pci_dev *link = NULL; /* Enable VCs from the downstream device */ - if (!dev->has_secondary_link) + if (!pci_is_pcie(dev) || !pcie_downstream_port(dev)) return; ctrl_pos = pos + PCI_VC_RES_CTRL + (res * PCI_CAP_VC_PER_VC_SIZEOF); diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c index eba6e33147a2fd355ff5de0efa6654885b36f939..fbd8d9f54d6a4ef187f2eed1ad822c446c1fd933 100644 --- a/drivers/pci/xen-pcifront.c +++ b/drivers/pci/xen-pcifront.c @@ -291,8 +291,7 @@ static int pci_frontend_enable_msix(struct pci_dev *dev, vector[i] = op.msix_entries[i].vector; } } else { - printk(KERN_DEBUG "enable msix get value %x\n", - op.value); + pr_info("enable msix get value %x\n", op.value); err = op.value; } } else { @@ -364,12 +363,12 @@ static void pci_frontend_disable_msi(struct pci_dev *dev) err = do_pci_op(pdev, &op); if (err == XEN_PCI_ERR_dev_not_found) { /* XXX No response from backend, what shall we do? */ - printk(KERN_DEBUG "get no response from backend for disable MSI\n"); + pr_info("get no response from backend for disable MSI\n"); return; } if (err) /* how can pciback notify us fail? */ - printk(KERN_DEBUG "get fake response frombackend\n"); + pr_info("get fake response from backend\n"); } static struct xen_pci_frontend_ops pci_frontend_ops = { diff --git a/drivers/pcmcia/ricoh.h b/drivers/pcmcia/ricoh.h index 01098c841f877c81ab1c539a2b19f309bc3749e8..8ac7b138c094865f3f94cc41ba8824f430b76a22 100644 --- a/drivers/pcmcia/ricoh.h +++ b/drivers/pcmcia/ricoh.h @@ -119,6 +119,10 @@ #define RL5C4XX_MISC_CONTROL 0x2F /* 8 bit */ #define RL5C4XX_ZV_ENABLE 0x08 +/* Misc Control 3 Register */ +#define RL5C4XX_MISC3 0x00A2 /* 16 bit */ +#define RL5C47X_MISC3_CB_CLKRUN_DIS BIT(1) + #ifdef __YENTA_H #define rl_misc(socket) ((socket)->private[0]) @@ -156,6 +160,35 @@ static void ricoh_set_zv(struct yenta_socket *socket) } } +static void ricoh_set_clkrun(struct yenta_socket *socket, bool quiet) +{ + u16 misc3; + + /* + * RL5C475II likely has this setting, too, however no datasheet + * is publicly available for this chip + */ + if (socket->dev->device != PCI_DEVICE_ID_RICOH_RL5C476 && + socket->dev->device != PCI_DEVICE_ID_RICOH_RL5C478) + return; + + if (socket->dev->revision < 0x80) + return; + + misc3 = config_readw(socket, RL5C4XX_MISC3); + if (misc3 & RL5C47X_MISC3_CB_CLKRUN_DIS) { + if (!quiet) + dev_dbg(&socket->dev->dev, + "CLKRUN feature already disabled\n"); + } else if (disable_clkrun) { + if (!quiet) + dev_info(&socket->dev->dev, + "Disabling CLKRUN feature\n"); + misc3 |= RL5C47X_MISC3_CB_CLKRUN_DIS; + config_writew(socket, RL5C4XX_MISC3, misc3); + } +} + static void ricoh_save_state(struct yenta_socket *socket) { rl_misc(socket) = config_readw(socket, RL5C4XX_MISC); @@ -172,6 +205,7 @@ static void ricoh_restore_state(struct yenta_socket *socket) config_writew(socket, RL5C4XX_16BIT_IO_0, rl_io(socket)); config_writew(socket, RL5C4XX_16BIT_MEM_0, rl_mem(socket)); config_writew(socket, RL5C4XX_CONFIG, rl_config(socket)); + ricoh_set_clkrun(socket, true); } @@ -197,6 +231,7 @@ static int ricoh_override(struct yenta_socket *socket) config_writew(socket, RL5C4XX_CONFIG, config); ricoh_set_zv(socket); + ricoh_set_clkrun(socket, false); return 0; } diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c index ab3da2262f0fc89f7b65fe898d6efa5bd3113d8f..ac6a3f46b1e6c0faaa23d90c57c5b0446d70b0f6 100644 --- a/drivers/pcmcia/yenta_socket.c +++ b/drivers/pcmcia/yenta_socket.c @@ -26,7 +26,8 @@ static bool disable_clkrun; module_param(disable_clkrun, bool, 0444); -MODULE_PARM_DESC(disable_clkrun, "If PC card doesn't function properly, please try this option"); +MODULE_PARM_DESC(disable_clkrun, + "If PC card doesn't function properly, please try this option (TI and Ricoh bridges only)"); static bool isa_probe = 1; module_param(isa_probe, bool, 0444); diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig index 08ebaf7cca8baf14b4ac19345d38cdabf80c110e..d87337d3be525862d7138e408652be69331cf4f3 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig @@ -52,6 +52,15 @@ config ARM_PMU_ACPI depends on ARM_PMU && ACPI def_bool y +config ARM_SMMU_V3_PMU + bool "ARM SMMUv3 Performance Monitors Extension" + depends on ARM64 && ACPI && ARM_SMMU_V3 + help + Provides support for the SMMU version 3 performance monitor unit (PMU) + on ARM-based systems. + Adds the SMMU PMU into the perf events subsystem for + monitoring SMMU performance events. + config ARM_DSU_PMU tristate "ARM DynamIQ Shared Unit (DSU) PMU" depends on ARM64 @@ -61,13 +70,6 @@ config ARM_DSU_PMU system, control logic. The PMU allows counting various events related to DSU. -config HISI_PMU - bool "HiSilicon SoC PMU" - depends on ARM64 && ACPI - help - Support for HiSilicon SoC uncore performance monitoring - unit (PMU), such as: L3C, HHA and DDRC. - config QCOM_L2_PMU bool "Qualcomm Technologies L2-cache PMU" depends on ARCH_QCOM && ARM64 && ACPI @@ -102,4 +104,6 @@ config ARM_SPE_PMU Extension, which provides periodic sampling of operations in the CPU pipeline and reports this via the perf AUX interface. +source "drivers/perf/hisilicon/Kconfig" + endmenu diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile index b3902bd37d5328b50b5e4df4170544300d3dca96..f10a932b0d79539b42b666b3f90d04dda4d1e4da 100644 --- a/drivers/perf/Makefile +++ b/drivers/perf/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_ARM_CCN) += arm-ccn.o obj-$(CONFIG_ARM_DSU_PMU) += arm_dsu_pmu.o obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o +obj-$(CONFIG_ARM_SMMU_V3_PMU) += arm_smmuv3_pmu.o obj-$(CONFIG_HISI_PMU) += hisilicon/ obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c index 1bfeb160c5b16b841f34415efbfaba97972f747b..3bbc853dc12fd6537d3180be7e11b53701fedde9 100644 --- a/drivers/perf/arm-cci.c +++ b/drivers/perf/arm-cci.c @@ -1692,21 +1692,24 @@ static int cci_pmu_probe(struct platform_device *pdev) raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock); mutex_init(&cci_pmu->reserve_mutex); atomic_set(&cci_pmu->active_events, 0); - cci_pmu->cpu = get_cpu(); - - ret = cci_pmu_init(cci_pmu, pdev); - if (ret) { - put_cpu(); - return ret; - } + cci_pmu->cpu = raw_smp_processor_id(); + g_cci_pmu = cci_pmu; cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE, "perf/arm/cci:online", NULL, cci_pmu_offline_cpu); - put_cpu(); - g_cci_pmu = cci_pmu; + + ret = cci_pmu_init(cci_pmu, pdev); + if (ret) + goto error_pmu_init; + pr_info("ARM %s PMU driver probed", cci_pmu->model->name); return 0; + +error_pmu_init: + cpuhp_remove_state(CPUHP_AP_PERF_ARM_CCI_ONLINE); + g_cci_pmu = NULL; + return ret; } static int cci_pmu_remove(struct platform_device *pdev) @@ -1725,6 +1728,7 @@ static struct platform_driver cci_pmu_driver = { .driver = { .name = DRIVER_NAME, .of_match_table = arm_cci_pmu_matches, + .suppress_bind_attrs = true, }, .probe = cci_pmu_probe, .remove = cci_pmu_remove, diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c index 7dd850e02f192e43fbb9ad3ab6868975d4170abe..e4e06d2fbe294429b9fe1d7983a403c93b47791c 100644 --- a/drivers/perf/arm-ccn.c +++ b/drivers/perf/arm-ccn.c @@ -1553,6 +1553,7 @@ static struct platform_driver arm_ccn_driver = { .driver = { .name = "arm-ccn", .of_match_table = arm_ccn_match, + .suppress_bind_attrs = true, }, .probe = arm_ccn_probe, .remove = arm_ccn_remove, diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c index 660cb8ac886ac9bb11319c5205f6dfd451264a8d..f1cb7a910394e8ceb65c173eeb408e5d7e6f431b 100644 --- a/drivers/perf/arm_dsu_pmu.c +++ b/drivers/perf/arm_dsu_pmu.c @@ -767,6 +767,7 @@ static struct platform_driver dsu_pmu_driver = { .driver = { .name = DRVNAME, .of_match_table = of_match_ptr(dsu_pmu_of_match), + .suppress_bind_attrs = true, }, .probe = dsu_pmu_device_probe, .remove = dsu_pmu_device_remove, @@ -823,7 +824,11 @@ static int __init dsu_pmu_init(void) if (ret < 0) return ret; dsu_pmu_cpuhp_state = ret; - return platform_driver_register(&dsu_pmu_driver); + ret = platform_driver_register(&dsu_pmu_driver); + if (ret) + cpuhp_remove_multi_state(dsu_pmu_cpuhp_state); + + return ret; } static void __exit dsu_pmu_exit(void) diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index d0b7dd8fb184b041446707dceccda87588ba45a2..18e23e6c029f6e950c4bb7bb62e578bcbb184582 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -25,8 +25,92 @@ #include +static int armpmu_count_irq_users(const int irq); + +struct pmu_irq_ops { + void (*enable_pmuirq)(unsigned int irq); + void (*disable_pmuirq)(unsigned int irq); + void (*free_pmuirq)(unsigned int irq, int cpu, void __percpu *devid); +}; + +static void armpmu_free_pmuirq(unsigned int irq, int cpu, void __percpu *devid) +{ + free_irq(irq, per_cpu_ptr(devid, cpu)); +} + +static const struct pmu_irq_ops pmuirq_ops = { + .enable_pmuirq = enable_irq, + .disable_pmuirq = disable_irq_nosync, + .free_pmuirq = armpmu_free_pmuirq +}; + +bool pmu_nmi_enable; + +static int __init pmu_nmi_enable_setup(char *str) +{ + pmu_nmi_enable = true; + + return 1; +} +__setup("pmu_nmi_enable", pmu_nmi_enable_setup); + +static void armpmu_free_pmunmi(unsigned int irq, int cpu, void __percpu *devid) +{ + free_nmi(irq, per_cpu_ptr(devid, cpu)); +} + +static const struct pmu_irq_ops pmunmi_ops = { + .enable_pmuirq = enable_nmi, + .disable_pmuirq = disable_nmi_nosync, + .free_pmuirq = armpmu_free_pmunmi +}; + +static void armpmu_enable_percpu_pmuirq(unsigned int irq) +{ + enable_percpu_irq(irq, IRQ_TYPE_NONE); +} + +static void armpmu_free_percpu_pmuirq(unsigned int irq, int cpu, + void __percpu *devid) +{ + if (armpmu_count_irq_users(irq) == 1) + free_percpu_irq(irq, devid); +} + +static const struct pmu_irq_ops percpu_pmuirq_ops = { + .enable_pmuirq = armpmu_enable_percpu_pmuirq, + .disable_pmuirq = disable_percpu_irq, + .free_pmuirq = armpmu_free_percpu_pmuirq +}; + +static void armpmu_enable_percpu_pmunmi(unsigned int irq) +{ + if (!prepare_percpu_nmi(irq)) + enable_percpu_nmi(irq, IRQ_TYPE_NONE); +} + +static void armpmu_disable_percpu_pmunmi(unsigned int irq) +{ + disable_percpu_nmi(irq); + teardown_percpu_nmi(irq); +} + +static void armpmu_free_percpu_pmunmi(unsigned int irq, int cpu, + void __percpu *devid) +{ + if (armpmu_count_irq_users(irq) == 1) + free_percpu_nmi(irq, devid); +} + +static const struct pmu_irq_ops percpu_pmunmi_ops = { + .enable_pmuirq = armpmu_enable_percpu_pmunmi, + .disable_pmuirq = armpmu_disable_percpu_pmunmi, + .free_pmuirq = armpmu_free_percpu_pmunmi +}; + static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu); static DEFINE_PER_CPU(int, cpu_irq); +static DEFINE_PER_CPU(const struct pmu_irq_ops *, cpu_irq_ops); static inline u64 arm_pmu_event_max_period(struct perf_event *event) { @@ -321,6 +405,9 @@ validate_group(struct perf_event *event) if (!validate_event(event->pmu, &fake_pmu, leader)) return -EINVAL; + if (event == leader) + return 0; + for_each_sibling_event(sibling, leader) { if (!validate_event(event->pmu, &fake_pmu, sibling)) return -EINVAL; @@ -418,12 +505,7 @@ __hw_perf_event_init(struct perf_event *event) local64_set(&hwc->period_left, hwc->sample_period); } - if (event->group_leader != event) { - if (validate_group(event) != 0) - return -EINVAL; - } - - return 0; + return validate_group(event); } static int armpmu_event_init(struct perf_event *event) @@ -551,6 +633,19 @@ static int armpmu_count_irq_users(const int irq) return count; } +static const struct pmu_irq_ops *armpmu_find_irq_ops(int irq) +{ + int cpu; + + for_each_possible_cpu(cpu) { + if (per_cpu(cpu_irq, cpu) == irq + && per_cpu(cpu_irq_ops, cpu)) + return per_cpu(cpu_irq_ops, cpu); + } + + return NULL; +} + void armpmu_free_irq(int irq, int cpu) { if (per_cpu(cpu_irq, cpu) == 0) @@ -558,48 +653,112 @@ void armpmu_free_irq(int irq, int cpu) if (WARN_ON(irq != per_cpu(cpu_irq, cpu))) return; - if (!irq_is_percpu_devid(irq)) - free_irq(irq, per_cpu_ptr(&cpu_armpmu, cpu)); - else if (armpmu_count_irq_users(irq) == 1) - free_percpu_irq(irq, &cpu_armpmu); + if (pmu_nmi_enable) { + per_cpu(cpu_irq_ops, cpu)->free_pmuirq(irq, cpu, &cpu_armpmu); - per_cpu(cpu_irq, cpu) = 0; + per_cpu(cpu_irq, cpu) = 0; + per_cpu(cpu_irq_ops, cpu) = NULL; + } else { + if (!irq_is_percpu_devid(irq)) + free_irq(irq, per_cpu_ptr(&cpu_armpmu, cpu)); + else if (armpmu_count_irq_users(irq) == 1) + free_percpu_irq(irq, &cpu_armpmu); + + per_cpu(cpu_irq, cpu) = 0; + } } int armpmu_request_irq(int irq, int cpu) { int err = 0; const irq_handler_t handler = armpmu_dispatch_irq; + const struct pmu_irq_ops *irq_ops; + if (!irq) return 0; - if (!irq_is_percpu_devid(irq)) { - unsigned long irq_flags; - - err = irq_force_affinity(irq, cpumask_of(cpu)); + if (pmu_nmi_enable) { + if (!irq_is_percpu_devid(irq)) { + unsigned long irq_flags; + + err = irq_force_affinity(irq, cpumask_of(cpu)); + + if (err && num_possible_cpus() > 1) { + pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", + irq, cpu); + goto err_out; + } + + irq_flags = IRQF_PERCPU | + IRQF_NOBALANCING | + IRQF_NO_THREAD; + + irq_set_status_flags(irq, IRQ_NOAUTOEN); + + err = request_nmi(irq, handler, irq_flags, "arm-pmu", + per_cpu_ptr(&cpu_armpmu, cpu)); + + /* If cannot get an NMI, get a normal interrupt */ + if (err) { + err = request_irq(irq, handler, irq_flags, "arm-pmu", + per_cpu_ptr(&cpu_armpmu, cpu)); + irq_ops = &pmuirq_ops; + } else { + irq_ops = &pmunmi_ops; + } + } else if (armpmu_count_irq_users(irq) == 0) { + err = request_percpu_nmi(irq, handler, "arm-pmu", &cpu_armpmu); + + /* If cannot get an NMI, get a normal interrupt */ + if (err) { + err = request_percpu_irq(irq, handler, "arm-pmu", + &cpu_armpmu); + irq_ops = &percpu_pmuirq_ops; + } else { + irq_ops = &percpu_pmunmi_ops; + } + } else { + /* Per cpudevid irq was already requested by another CPU */ + irq_ops = armpmu_find_irq_ops(irq); + + if (WARN_ON(!irq_ops)) + err = -EINVAL; + } - if (err && num_possible_cpus() > 1) { - pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", - irq, cpu); + if (err) goto err_out; + + per_cpu(cpu_irq, cpu) = irq; + per_cpu(cpu_irq_ops, cpu) = irq_ops; + } else { + if (!irq_is_percpu_devid(irq)) { + unsigned long irq_flags; + + err = irq_force_affinity(irq, cpumask_of(cpu)); + + if (err && num_possible_cpus() > 1) { + pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", + irq, cpu); + goto err_out; + } + + irq_flags = IRQF_PERCPU | + IRQF_NOBALANCING | + IRQF_NO_THREAD; + + irq_set_status_flags(irq, IRQ_NOAUTOEN); + err = request_irq(irq, handler, irq_flags, "arm-pmu", + per_cpu_ptr(&cpu_armpmu, cpu)); + } else if (armpmu_count_irq_users(irq) == 0) { + err = request_percpu_irq(irq, handler, "arm-pmu", + &cpu_armpmu); } - irq_flags = IRQF_PERCPU | - IRQF_NOBALANCING | - IRQF_NO_THREAD; + if (err) + goto err_out; - irq_set_status_flags(irq, IRQ_NOAUTOEN); - err = request_irq(irq, handler, irq_flags, "arm-pmu", - per_cpu_ptr(&cpu_armpmu, cpu)); - } else if (armpmu_count_irq_users(irq) == 0) { - err = request_percpu_irq(irq, handler, "arm-pmu", - &cpu_armpmu); + per_cpu(cpu_irq, cpu) = irq; } - - if (err) - goto err_out; - - per_cpu(cpu_irq, cpu) = irq; return 0; err_out: @@ -633,10 +792,14 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node) irq = armpmu_get_cpu_irq(pmu, cpu); if (irq) { - if (irq_is_percpu_devid(irq)) - enable_percpu_irq(irq, IRQ_TYPE_NONE); - else - enable_irq(irq); + if (pmu_nmi_enable) { + per_cpu(cpu_irq_ops, cpu)->enable_pmuirq(irq); + } else { + if (irq_is_percpu_devid(irq)) + enable_percpu_irq(irq, IRQ_TYPE_NONE); + else + enable_irq(irq); + } } return 0; @@ -652,10 +815,14 @@ static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node) irq = armpmu_get_cpu_irq(pmu, cpu); if (irq) { - if (irq_is_percpu_devid(irq)) - disable_percpu_irq(irq); - else - disable_irq_nosync(irq); + if (pmu_nmi_enable) { + per_cpu(cpu_irq_ops, cpu)->disable_pmuirq(irq); + } else { + if (irq_is_percpu_devid(irq)) + disable_percpu_irq(irq); + else + disable_irq_nosync(irq); + } } per_cpu(cpu_armpmu, cpu) = NULL; @@ -730,8 +897,8 @@ static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd, cpu_pm_pmu_setup(armpmu, cmd); break; case CPU_PM_EXIT: - cpu_pm_pmu_setup(armpmu, cmd); case CPU_PM_ENTER_FAILED: + cpu_pm_pmu_setup(armpmu, cmd); armpmu->start(armpmu); break; default: @@ -830,7 +997,8 @@ static struct arm_pmu *__armpmu_alloc(gfp_t flags) struct pmu_hw_events *events; events = per_cpu_ptr(pmu->hw_events, cpu); - raw_spin_lock_init(&events->pmu_lock); + if (!pmu_nmi_enable) + raw_spin_lock_init(&events->pmu_lock); events->percpu_pmu = pmu; } diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c index 0f197516d7089cf970926501aca996a9ab79a7f8..864d7ebe45e926b0223c2b2df8a8fe6bee7777a2 100644 --- a/drivers/perf/arm_pmu_acpi.c +++ b/drivers/perf/arm_pmu_acpi.c @@ -74,6 +74,76 @@ static void arm_pmu_acpi_unregister_irq(int cpu) acpi_unregister_gsi(gsi); } +#if IS_ENABLED(CONFIG_ARM_SPE_PMU) +static struct resource spe_resources[] = { + { + /* irq */ + .flags = IORESOURCE_IRQ, + } +}; + +static struct platform_device spe_dev = { + .name = ARMV8_SPE_PDEV_NAME, + .id = -1, + .resource = spe_resources, + .num_resources = ARRAY_SIZE(spe_resources) +}; + +/* + * For lack of a better place, hook the normal PMU MADT walk + * and create a SPE device if we detect a recent MADT with + * a homogeneous PPI mapping. + */ +static void arm_spe_acpi_register_device(void) +{ + int cpu, hetid, irq, ret; + bool first = true; + u16 gsi = 0; + + /* + * Sanity check all the GICC tables for the same interrupt number. + * For now, we only support homogeneous ACPI/SPE machines. + */ + for_each_possible_cpu(cpu) { + struct acpi_madt_generic_interrupt *gicc; + + gicc = acpi_cpu_get_madt_gicc(cpu); + if (gicc->header.length < ACPI_MADT_GICC_SPE) + return; + + if (first) { + gsi = gicc->spe_interrupt; + if (!gsi) + return; + hetid = find_acpi_cpu_topology_hetero_id(cpu); + first = false; + } else if ((gsi != gicc->spe_interrupt) || + (hetid != find_acpi_cpu_topology_hetero_id(cpu))) { + pr_warn("ACPI: SPE must be homogeneous\n"); + return; + } + } + + irq = acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, + ACPI_ACTIVE_HIGH); + if (irq < 0) { + pr_warn("ACPI: SPE Unable to register interrupt: %d\n", gsi); + return; + } + + spe_resources[0].start = irq; + ret = platform_device_register(&spe_dev); + if (ret < 0) { + pr_warn("ACPI: SPE: Unable to register device\n"); + acpi_unregister_gsi(gsi); + } +} +#else +static inline void arm_spe_acpi_register_device(void) +{ +} +#endif /* CONFIG_ARM_SPE_PMU */ + static int arm_pmu_acpi_parse_irqs(void) { int irq, cpu, irq_cpu, err; @@ -279,6 +349,8 @@ static int arm_pmu_acpi_init(void) if (acpi_disabled) return 0; + arm_spe_acpi_register_device(); + ret = arm_pmu_acpi_parse_irqs(); if (ret) return ret; diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c index 96075cecb0aecdae90a7ff1fae0c41de0e8c5922..199293450acfc54ae1115e9731eeee2c3a822b39 100644 --- a/drivers/perf/arm_pmu_platform.c +++ b/drivers/perf/arm_pmu_platform.c @@ -236,7 +236,7 @@ int arm_pmu_device_probe(struct platform_device *pdev, ret = armpmu_register(pmu); if (ret) - goto out_free; + goto out_free_irqs; return 0; diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c new file mode 100644 index 0000000000000000000000000000000000000000..bda901e2a5fc3a4d9b5731de3fd10b783b5f4c48 --- /dev/null +++ b/drivers/perf/arm_smmuv3_pmu.c @@ -0,0 +1,903 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * This driver adds support for perf events to use the Performance + * Monitor Counter Groups (PMCG) associated with an SMMUv3 node + * to monitor that node. + * + * SMMUv3 PMCG devices are named as smmuv3_pmcg_ where + * is the physical page address of the SMMU PMCG wrapped + * to 4K boundary. For example, the PMCG at 0xff88840000 is named + * smmuv3_pmcg_ff88840 + * + * Filtering by stream id is done by specifying filtering parameters + * with the event. options are: + * filter_enable - 0 = no filtering, 1 = filtering enabled + * filter_span - 0 = exact match, 1 = pattern match + * filter_stream_id - pattern to filter against + * + * To match a partial StreamID where the X most-significant bits must match + * but the Y least-significant bits might differ, STREAMID is programmed + * with a value that contains: + * STREAMID[Y - 1] == 0. + * STREAMID[Y - 2:0] == 1 (where Y > 1). + * The remainder of implemented bits of STREAMID (X bits, from bit Y upwards) + * contain a value to match from the corresponding bits of event StreamID. + * + * Example: perf stat -e smmuv3_pmcg_ff88840/transaction,filter_enable=1, + * filter_span=1,filter_stream_id=0x42/ -a netperf + * Applies filter pattern 0x42 to transaction events, which means events + * matching stream ids 0x42 and 0x43 are counted. Further filtering + * information is available in the SMMU documentation. + * + * SMMU events are not attributable to a CPU, so task mode and sampling + * are not supported. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SMMU_PMCG_EVCNTR0 0x0 +#define SMMU_PMCG_EVCNTR(n, stride) (SMMU_PMCG_EVCNTR0 + (n) * (stride)) +#define SMMU_PMCG_EVTYPER0 0x400 +#define SMMU_PMCG_EVTYPER(n) (SMMU_PMCG_EVTYPER0 + (n) * 4) +#define SMMU_PMCG_SID_SPAN_SHIFT 29 +#define SMMU_PMCG_SMR0 0xA00 +#define SMMU_PMCG_SMR(n) (SMMU_PMCG_SMR0 + (n) * 4) +#define SMMU_PMCG_CNTENSET0 0xC00 +#define SMMU_PMCG_CNTENCLR0 0xC20 +#define SMMU_PMCG_INTENSET0 0xC40 +#define SMMU_PMCG_INTENCLR0 0xC60 +#define SMMU_PMCG_OVSCLR0 0xC80 +#define SMMU_PMCG_OVSSET0 0xCC0 +#define SMMU_PMCG_CFGR 0xE00 +#define SMMU_PMCG_CFGR_SID_FILTER_TYPE BIT(23) +#define SMMU_PMCG_CFGR_MSI BIT(21) +#define SMMU_PMCG_CFGR_RELOC_CTRS BIT(20) +#define SMMU_PMCG_CFGR_SIZE GENMASK(13, 8) +#define SMMU_PMCG_CFGR_NCTR GENMASK(5, 0) +#define SMMU_PMCG_CR 0xE04 +#define SMMU_PMCG_CR_ENABLE BIT(0) +#define SMMU_PMCG_CEID0 0xE20 +#define SMMU_PMCG_CEID1 0xE28 +#define SMMU_PMCG_IRQ_CTRL 0xE50 +#define SMMU_PMCG_IRQ_CTRL_IRQEN BIT(0) +#define SMMU_PMCG_IRQ_CFG0 0xE58 +#define SMMU_PMCG_IRQ_CFG1 0xE60 +#define SMMU_PMCG_IRQ_CFG2 0xE64 + +/* MSI config fields */ +#define MSI_CFG0_ADDR_MASK GENMASK_ULL(51, 2) +#define MSI_CFG2_MEMATTR_DEVICE_nGnRE 0x1 + +#define SMMU_PMCG_DEFAULT_FILTER_SPAN 1 +#define SMMU_PMCG_DEFAULT_FILTER_SID GENMASK(31, 0) + +#define SMMU_PMCG_MAX_COUNTERS 64 +#define SMMU_PMCG_ARCH_MAX_EVENTS 128 + +#define SMMU_PMCG_PA_SHIFT 12 + +#define SMMU_PMCG_EVCNTR_RDONLY BIT(0) + +static int cpuhp_state_num; + +struct smmu_pmu { + struct hlist_node node; + struct perf_event *events[SMMU_PMCG_MAX_COUNTERS]; + DECLARE_BITMAP(used_counters, SMMU_PMCG_MAX_COUNTERS); + DECLARE_BITMAP(supported_events, SMMU_PMCG_ARCH_MAX_EVENTS); + unsigned int irq; + unsigned int on_cpu; + struct pmu pmu; + unsigned int num_counters; + struct device *dev; + void __iomem *reg_base; + void __iomem *reloc_base; + u32 options; + u64 counter_mask; + bool global_filter; +}; + +#define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu)) + +#define SMMU_PMU_EVENT_ATTR_EXTRACTOR(_name, _config, _start, _end) \ + static inline u32 get_##_name(struct perf_event *event) \ + { \ + return FIELD_GET(GENMASK_ULL(_end, _start), \ + event->attr._config); \ + } \ + +SMMU_PMU_EVENT_ATTR_EXTRACTOR(event, config, 0, 15); +SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_stream_id, config1, 0, 31); +SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_span, config1, 32, 32); +SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_enable, config1, 33, 33); + +static inline void smmu_pmu_enable(struct pmu *pmu) +{ + struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu); + + writel(SMMU_PMCG_IRQ_CTRL_IRQEN, + smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL); + writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base + SMMU_PMCG_CR); +} + +static inline void smmu_pmu_disable(struct pmu *pmu) +{ + struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu); + + writel(0, smmu_pmu->reg_base + SMMU_PMCG_CR); + writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL); +} + +static inline void smmu_pmu_counter_set_value(struct smmu_pmu *smmu_pmu, + u32 idx, u64 value) +{ + if (smmu_pmu->counter_mask & BIT(32)) + writeq(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8)); + else + writel(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4)); +} + +static inline u64 smmu_pmu_counter_get_value(struct smmu_pmu *smmu_pmu, u32 idx) +{ + u64 value; + + if (smmu_pmu->counter_mask & BIT(32)) + value = readq(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8)); + else + value = readl(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4)); + + return value; +} + +static inline void smmu_pmu_counter_enable(struct smmu_pmu *smmu_pmu, u32 idx) +{ + writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENSET0); +} + +static inline void smmu_pmu_counter_disable(struct smmu_pmu *smmu_pmu, u32 idx) +{ + writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0); +} + +static inline void smmu_pmu_interrupt_enable(struct smmu_pmu *smmu_pmu, u32 idx) +{ + writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENSET0); +} + +static inline void smmu_pmu_interrupt_disable(struct smmu_pmu *smmu_pmu, + u32 idx) +{ + writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0); +} + +static inline void smmu_pmu_set_evtyper(struct smmu_pmu *smmu_pmu, u32 idx, + u32 val) +{ + writel(val, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx)); +} + +static inline void smmu_pmu_set_smr(struct smmu_pmu *smmu_pmu, u32 idx, u32 val) +{ + writel(val, smmu_pmu->reg_base + SMMU_PMCG_SMR(idx)); +} + +static void smmu_pmu_event_update(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); + u64 delta, prev, now; + u32 idx = hwc->idx; + + do { + prev = local64_read(&hwc->prev_count); + now = smmu_pmu_counter_get_value(smmu_pmu, idx); + } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev); + + /* handle overflow. */ + delta = now - prev; + delta &= smmu_pmu->counter_mask; + + local64_add(delta, &event->count); +} + +static void smmu_pmu_set_period(struct smmu_pmu *smmu_pmu, + struct hw_perf_event *hwc) +{ + u32 idx = hwc->idx; + u64 new; + + if (smmu_pmu->options & SMMU_PMCG_EVCNTR_RDONLY) { + /* + * On platforms that require this quirk, if the counter starts + * at < half_counter value and wraps, the current logic of + * handling the overflow may not work. It is expected that, + * those platforms will have full 64 counter bits implemented + * so that such a possibility is remote(eg: HiSilicon HIP08). + */ + new = smmu_pmu_counter_get_value(smmu_pmu, idx); + } else { + /* + * We limit the max period to half the max counter value + * of the counter size, so that even in the case of extreme + * interrupt latency the counter will (hopefully) not wrap + * past its initial value. + */ + new = smmu_pmu->counter_mask >> 1; + smmu_pmu_counter_set_value(smmu_pmu, idx, new); + } + + local64_set(&hwc->prev_count, new); +} + +static void smmu_pmu_set_event_filter(struct perf_event *event, + int idx, u32 span, u32 sid) +{ + struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); + u32 evtyper; + + evtyper = get_event(event) | span << SMMU_PMCG_SID_SPAN_SHIFT; + smmu_pmu_set_evtyper(smmu_pmu, idx, evtyper); + smmu_pmu_set_smr(smmu_pmu, idx, sid); +} + +static bool smmu_pmu_check_global_filter(struct perf_event *curr, + struct perf_event *new) +{ + if (get_filter_enable(new) != get_filter_enable(curr)) + return false; + + if (!get_filter_enable(new)) + return true; + + return get_filter_span(new) == get_filter_span(curr) && + get_filter_stream_id(new) == get_filter_stream_id(curr); +} + +static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu, + struct perf_event *event, int idx) +{ + u32 span, sid; + unsigned int cur_idx, num_ctrs = smmu_pmu->num_counters; + bool filter_en = !!get_filter_enable(event); + + span = filter_en ? get_filter_span(event) : + SMMU_PMCG_DEFAULT_FILTER_SPAN; + sid = filter_en ? get_filter_stream_id(event) : + SMMU_PMCG_DEFAULT_FILTER_SID; + + cur_idx = find_first_bit(smmu_pmu->used_counters, num_ctrs); + /* + * Per-counter filtering, or scheduling the first globally-filtered + * event into an empty PMU so idx == 0 and it works out equivalent. + */ + if (!smmu_pmu->global_filter || cur_idx == num_ctrs) { + smmu_pmu_set_event_filter(event, idx, span, sid); + return 0; + } + + /* Otherwise, must match whatever's currently scheduled */ + if (smmu_pmu_check_global_filter(smmu_pmu->events[cur_idx], event)) { + smmu_pmu_set_evtyper(smmu_pmu, idx, get_event(event)); + return 0; + } + + return -EAGAIN; +} + +static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu, + struct perf_event *event) +{ + int idx, err; + unsigned int num_ctrs = smmu_pmu->num_counters; + + idx = find_first_zero_bit(smmu_pmu->used_counters, num_ctrs); + if (idx == num_ctrs) + /* The counters are all in use. */ + return -EAGAIN; + + err = smmu_pmu_apply_event_filter(smmu_pmu, event, idx); + if (err) + return err; + + set_bit(idx, smmu_pmu->used_counters); + + return idx; +} + +static bool smmu_pmu_events_compatible(struct perf_event *curr, + struct perf_event *new) +{ + if (new->pmu != curr->pmu) + return false; + + if (to_smmu_pmu(new->pmu)->global_filter && + !smmu_pmu_check_global_filter(curr, new)) + return false; + + return true; +} + +/* + * Implementation of abstract pmu functionality required by + * the core perf events code. + */ + +static int smmu_pmu_event_init(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); + struct device *dev = smmu_pmu->dev; + struct perf_event *sibling; + int group_num_events = 1; + u16 event_id; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + if (hwc->sample_period) { + dev_dbg(dev, "Sampling not supported\n"); + return -EOPNOTSUPP; + } + + if (event->cpu < 0) { + dev_dbg(dev, "Per-task mode not supported\n"); + return -EOPNOTSUPP; + } + + /* We cannot filter accurately so we just don't allow it. */ + if (event->attr.exclude_user || event->attr.exclude_kernel || + event->attr.exclude_hv || event->attr.exclude_idle || + event->attr.exclude_host || event->attr.exclude_guest) { + dev_dbg(dev, "Can't exclude execution levels\n"); + return -EINVAL; + } + + /* Verify specified event is supported on this PMU */ + event_id = get_event(event); + if (event_id < SMMU_PMCG_ARCH_MAX_EVENTS && + (!test_bit(event_id, smmu_pmu->supported_events))) { + dev_dbg(dev, "Invalid event %d for this PMU\n", event_id); + return -EINVAL; + } + + /* Don't allow groups with mixed PMUs, except for s/w events */ + if (!is_software_event(event->group_leader)) { + if (!smmu_pmu_events_compatible(event->group_leader, event)) + return -EINVAL; + + if (++group_num_events > smmu_pmu->num_counters) + return -EINVAL; + } + + for_each_sibling_event(sibling, event->group_leader) { + if (is_software_event(sibling)) + continue; + + if (!smmu_pmu_events_compatible(sibling, event)) + return -EINVAL; + + if (++group_num_events > smmu_pmu->num_counters) + return -EINVAL; + } + + hwc->idx = -1; + + /* + * Ensure all events are on the same cpu so all events are in the + * same cpu context, to avoid races on pmu_enable etc. + */ + event->cpu = smmu_pmu->on_cpu; + + return 0; +} + +static void smmu_pmu_event_start(struct perf_event *event, int flags) +{ + struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + hwc->state = 0; + + smmu_pmu_set_period(smmu_pmu, hwc); + + smmu_pmu_counter_enable(smmu_pmu, idx); +} + +static void smmu_pmu_event_stop(struct perf_event *event, int flags) +{ + struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (hwc->state & PERF_HES_STOPPED) + return; + + smmu_pmu_counter_disable(smmu_pmu, idx); + /* As the counter gets updated on _start, ignore PERF_EF_UPDATE */ + smmu_pmu_event_update(event); + hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; +} + +static int smmu_pmu_event_add(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + int idx; + struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); + + idx = smmu_pmu_get_event_idx(smmu_pmu, event); + if (idx < 0) + return idx; + + hwc->idx = idx; + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + smmu_pmu->events[idx] = event; + local64_set(&hwc->prev_count, 0); + + smmu_pmu_interrupt_enable(smmu_pmu, idx); + + if (flags & PERF_EF_START) + smmu_pmu_event_start(event, flags); + + /* Propagate changes to the userspace mapping. */ + perf_event_update_userpage(event); + + return 0; +} + +static void smmu_pmu_event_del(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); + int idx = hwc->idx; + + smmu_pmu_event_stop(event, flags | PERF_EF_UPDATE); + smmu_pmu_interrupt_disable(smmu_pmu, idx); + smmu_pmu->events[idx] = NULL; + clear_bit(idx, smmu_pmu->used_counters); + + perf_event_update_userpage(event); +} + +static void smmu_pmu_event_read(struct perf_event *event) +{ + smmu_pmu_event_update(event); +} + +/* cpumask */ + +static ssize_t smmu_pmu_cpumask_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev)); + + return cpumap_print_to_pagebuf(true, buf, cpumask_of(smmu_pmu->on_cpu)); +} + +static struct device_attribute smmu_pmu_cpumask_attr = + __ATTR(cpumask, 0444, smmu_pmu_cpumask_show, NULL); + +static struct attribute *smmu_pmu_cpumask_attrs[] = { + &smmu_pmu_cpumask_attr.attr, + NULL +}; + +static struct attribute_group smmu_pmu_cpumask_group = { + .attrs = smmu_pmu_cpumask_attrs, +}; + +/* Events */ + +ssize_t smmu_pmu_event_show(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct perf_pmu_events_attr *pmu_attr; + + pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); + + return sprintf(page, "event=0x%02llx\n", pmu_attr->id); +} + +#define SMMU_EVENT_ATTR(name, config) \ + PMU_EVENT_ATTR(name, smmu_event_attr_##name, \ + config, smmu_pmu_event_show) +SMMU_EVENT_ATTR(cycles, 0); +SMMU_EVENT_ATTR(transaction, 1); +SMMU_EVENT_ATTR(tlb_miss, 2); +SMMU_EVENT_ATTR(config_cache_miss, 3); +SMMU_EVENT_ATTR(trans_table_walk_access, 4); +SMMU_EVENT_ATTR(config_struct_access, 5); +SMMU_EVENT_ATTR(pcie_ats_trans_rq, 6); +SMMU_EVENT_ATTR(pcie_ats_trans_passed, 7); + +static struct attribute *smmu_pmu_events[] = { + &smmu_event_attr_cycles.attr.attr, + &smmu_event_attr_transaction.attr.attr, + &smmu_event_attr_tlb_miss.attr.attr, + &smmu_event_attr_config_cache_miss.attr.attr, + &smmu_event_attr_trans_table_walk_access.attr.attr, + &smmu_event_attr_config_struct_access.attr.attr, + &smmu_event_attr_pcie_ats_trans_rq.attr.attr, + &smmu_event_attr_pcie_ats_trans_passed.attr.attr, + NULL +}; + +static umode_t smmu_pmu_event_is_visible(struct kobject *kobj, + struct attribute *attr, int unused) +{ + struct device *dev = kobj_to_dev(kobj); + struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev)); + struct perf_pmu_events_attr *pmu_attr; + + pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr); + + if (test_bit(pmu_attr->id, smmu_pmu->supported_events)) + return attr->mode; + + return 0; +} + +static struct attribute_group smmu_pmu_events_group = { + .name = "events", + .attrs = smmu_pmu_events, + .is_visible = smmu_pmu_event_is_visible, +}; + +/* Formats */ +PMU_FORMAT_ATTR(event, "config:0-15"); +PMU_FORMAT_ATTR(filter_stream_id, "config1:0-31"); +PMU_FORMAT_ATTR(filter_span, "config1:32"); +PMU_FORMAT_ATTR(filter_enable, "config1:33"); + +static struct attribute *smmu_pmu_formats[] = { + &format_attr_event.attr, + &format_attr_filter_stream_id.attr, + &format_attr_filter_span.attr, + &format_attr_filter_enable.attr, + NULL +}; + +static struct attribute_group smmu_pmu_format_group = { + .name = "format", + .attrs = smmu_pmu_formats, +}; + +static const struct attribute_group *smmu_pmu_attr_grps[] = { + &smmu_pmu_cpumask_group, + &smmu_pmu_events_group, + &smmu_pmu_format_group, + NULL +}; + +/* + * Generic device handlers + */ + +static int smmu_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct smmu_pmu *smmu_pmu; + unsigned int target; + + smmu_pmu = hlist_entry_safe(node, struct smmu_pmu, node); + if (cpu != smmu_pmu->on_cpu) + return 0; + + target = cpumask_any_but(cpu_online_mask, cpu); + if (target >= nr_cpu_ids) + return 0; + + perf_pmu_migrate_context(&smmu_pmu->pmu, cpu, target); + smmu_pmu->on_cpu = target; + WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(target))); + + return 0; +} + +static irqreturn_t smmu_pmu_handle_irq(int irq_num, void *data) +{ + struct smmu_pmu *smmu_pmu = data; + u64 ovsr; + unsigned int idx; + + ovsr = readq(smmu_pmu->reloc_base + SMMU_PMCG_OVSSET0); + if (!ovsr) + return IRQ_NONE; + + writeq(ovsr, smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0); + + for_each_set_bit(idx, (unsigned long *)&ovsr, smmu_pmu->num_counters) { + struct perf_event *event = smmu_pmu->events[idx]; + struct hw_perf_event *hwc; + + if (WARN_ON_ONCE(!event)) + continue; + + smmu_pmu_event_update(event); + hwc = &event->hw; + + smmu_pmu_set_period(smmu_pmu, hwc); + } + + return IRQ_HANDLED; +} + +static void smmu_pmu_free_msis(void *data) +{ + struct device *dev = data; + + platform_msi_domain_free_irqs(dev); +} + +static void smmu_pmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) +{ + phys_addr_t doorbell; + struct device *dev = msi_desc_to_dev(desc); + struct smmu_pmu *pmu = dev_get_drvdata(dev); + + doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo; + doorbell &= MSI_CFG0_ADDR_MASK; + + writeq_relaxed(doorbell, pmu->reg_base + SMMU_PMCG_IRQ_CFG0); + writel_relaxed(msg->data, pmu->reg_base + SMMU_PMCG_IRQ_CFG1); + writel_relaxed(MSI_CFG2_MEMATTR_DEVICE_nGnRE, + pmu->reg_base + SMMU_PMCG_IRQ_CFG2); +} + +static void smmu_pmu_setup_msi(struct smmu_pmu *pmu) +{ + struct msi_desc *desc; + struct device *dev = pmu->dev; + int ret; + + /* Clear MSI address reg */ + writeq_relaxed(0, pmu->reg_base + SMMU_PMCG_IRQ_CFG0); + + /* MSI supported or not */ + if (!(readl(pmu->reg_base + SMMU_PMCG_CFGR) & SMMU_PMCG_CFGR_MSI)) + return; + + ret = platform_msi_domain_alloc_irqs(dev, 1, smmu_pmu_write_msi_msg); + if (ret) { + dev_warn(dev, "failed to allocate MSIs\n"); + return; + } + + desc = first_msi_entry(dev); + if (desc) + pmu->irq = desc->irq; + + /* Add callback to free MSIs on teardown */ + devm_add_action(dev, smmu_pmu_free_msis, dev); +} + +static int smmu_pmu_setup_irq(struct smmu_pmu *pmu) +{ + unsigned long flags = IRQF_NOBALANCING | IRQF_SHARED | IRQF_NO_THREAD; + int irq, ret = -ENXIO; + + smmu_pmu_setup_msi(pmu); + + irq = pmu->irq; + if (irq) + ret = devm_request_irq(pmu->dev, irq, smmu_pmu_handle_irq, + flags, "smmuv3-pmu", pmu); + return ret; +} + +static void smmu_pmu_reset(struct smmu_pmu *smmu_pmu) +{ + u64 counter_present_mask = GENMASK_ULL(smmu_pmu->num_counters - 1, 0); + + smmu_pmu_disable(&smmu_pmu->pmu); + + /* Disable counter and interrupt */ + writeq_relaxed(counter_present_mask, + smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0); + writeq_relaxed(counter_present_mask, + smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0); + writeq_relaxed(counter_present_mask, + smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0); +} + +static void smmu_pmu_get_acpi_options(struct smmu_pmu *smmu_pmu) +{ + u32 model; + + model = *(u32 *)dev_get_platdata(smmu_pmu->dev); + + switch (model) { + case IORT_SMMU_V3_PMCG_HISI_HIP08: + /* HiSilicon Erratum 162001800 */ + smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY; + break; + } + + dev_notice(smmu_pmu->dev, "option mask 0x%x\n", smmu_pmu->options); +} + +static int smmu_pmu_probe(struct platform_device *pdev) +{ + struct smmu_pmu *smmu_pmu; + struct resource *res_0, *res_1; + u32 cfgr, reg_size; + u64 ceid_64[2]; + int irq, err; + char *name; + struct device *dev = &pdev->dev; + + smmu_pmu = devm_kzalloc(dev, sizeof(*smmu_pmu), GFP_KERNEL); + if (!smmu_pmu) + return -ENOMEM; + + smmu_pmu->dev = dev; + platform_set_drvdata(pdev, smmu_pmu); + + smmu_pmu->pmu = (struct pmu) { + .module = THIS_MODULE, + .task_ctx_nr = perf_invalid_context, + .pmu_enable = smmu_pmu_enable, + .pmu_disable = smmu_pmu_disable, + .event_init = smmu_pmu_event_init, + .add = smmu_pmu_event_add, + .del = smmu_pmu_event_del, + .start = smmu_pmu_event_start, + .stop = smmu_pmu_event_stop, + .read = smmu_pmu_event_read, + .attr_groups = smmu_pmu_attr_grps, + }; + + res_0 = platform_get_resource(pdev, IORESOURCE_MEM, 0); + smmu_pmu->reg_base = devm_ioremap_resource(dev, res_0); + if (IS_ERR(smmu_pmu->reg_base)) + return PTR_ERR(smmu_pmu->reg_base); + + cfgr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CFGR); + + /* Determine if page 1 is present */ + if (cfgr & SMMU_PMCG_CFGR_RELOC_CTRS) { + res_1 = platform_get_resource(pdev, IORESOURCE_MEM, 1); + smmu_pmu->reloc_base = devm_ioremap_resource(dev, res_1); + if (IS_ERR(smmu_pmu->reloc_base)) + return PTR_ERR(smmu_pmu->reloc_base); + } else { + smmu_pmu->reloc_base = smmu_pmu->reg_base; + } + + irq = platform_get_irq(pdev, 0); + if (irq > 0) + smmu_pmu->irq = irq; + + ceid_64[0] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID0); + ceid_64[1] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID1); + bitmap_from_arr32(smmu_pmu->supported_events, (u32 *)ceid_64, + SMMU_PMCG_ARCH_MAX_EVENTS); + + smmu_pmu->num_counters = FIELD_GET(SMMU_PMCG_CFGR_NCTR, cfgr) + 1; + + smmu_pmu->global_filter = !!(cfgr & SMMU_PMCG_CFGR_SID_FILTER_TYPE); + + reg_size = FIELD_GET(SMMU_PMCG_CFGR_SIZE, cfgr); + smmu_pmu->counter_mask = GENMASK_ULL(reg_size, 0); + + smmu_pmu_reset(smmu_pmu); + + err = smmu_pmu_setup_irq(smmu_pmu); + if (err) { + dev_err(dev, "Setup irq failed, PMU @%pa\n", &res_0->start); + return err; + } + + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "smmuv3_pmcg_%llx", + (res_0->start) >> SMMU_PMCG_PA_SHIFT); + if (!name) { + dev_err(dev, "Create name failed, PMU @%pa\n", &res_0->start); + return -EINVAL; + } + + smmu_pmu_get_acpi_options(smmu_pmu); + + /* Pick one CPU to be the preferred one to use */ + smmu_pmu->on_cpu = raw_smp_processor_id(); + WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(smmu_pmu->on_cpu))); + + err = cpuhp_state_add_instance_nocalls(cpuhp_state_num, + &smmu_pmu->node); + if (err) { + dev_err(dev, "Error %d registering hotplug, PMU @%pa\n", + err, &res_0->start); + goto out_cpuhp_err; + } + + err = perf_pmu_register(&smmu_pmu->pmu, name, -1); + if (err) { + dev_err(dev, "Error %d registering PMU @%pa\n", + err, &res_0->start); + goto out_unregister; + } + + dev_info(dev, "Registered PMU @ %pa using %d counters with %s filter settings\n", + &res_0->start, smmu_pmu->num_counters, + smmu_pmu->global_filter ? "Global(Counter0)" : + "Individual"); + + return 0; + +out_unregister: + cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node); +out_cpuhp_err: + irq_set_affinity_hint(smmu_pmu->irq, NULL); + return err; +} + +static int smmu_pmu_remove(struct platform_device *pdev) +{ + struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev); + + perf_pmu_unregister(&smmu_pmu->pmu); + cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node); + irq_set_affinity_hint(smmu_pmu->irq, NULL); + + return 0; +} + +static void smmu_pmu_shutdown(struct platform_device *pdev) +{ + struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev); + + smmu_pmu_disable(&smmu_pmu->pmu); +} + +static struct platform_driver smmu_pmu_driver = { + .driver = { + .name = "arm-smmu-v3-pmcg", + .suppress_bind_attrs = true, + }, + .probe = smmu_pmu_probe, + .remove = smmu_pmu_remove, + .shutdown = smmu_pmu_shutdown, +}; + +static int __init arm_smmu_pmu_init(void) +{ + cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, + "perf/arm/pmcg:online", + NULL, + smmu_pmu_offline_cpu); + if (cpuhp_state_num < 0) + return cpuhp_state_num; + + return platform_driver_register(&smmu_pmu_driver); +} +module_init(arm_smmu_pmu_init); + +static void __exit arm_smmu_pmu_exit(void) +{ + platform_driver_unregister(&smmu_pmu_driver); + cpuhp_remove_multi_state(cpuhp_state_num); +} + +module_exit(arm_smmu_pmu_exit); + +MODULE_DESCRIPTION("PMU driver for ARM SMMUv3 Performance Monitors Extension"); +MODULE_AUTHOR("Neil Leeder "); +MODULE_AUTHOR("Shameer Kolothum "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c index 54ec278d2fc4669ec0c4ccb841b8bd298059d7bb..af84f3a61e96697e3f680aaa85212bf0541787b8 100644 --- a/drivers/perf/arm_spe_pmu.c +++ b/drivers/perf/arm_spe_pmu.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -824,10 +825,10 @@ static void arm_spe_pmu_read(struct perf_event *event) { } -static void *arm_spe_pmu_setup_aux(int cpu, void **pages, int nr_pages, - bool snapshot) +static void *arm_spe_pmu_setup_aux(struct perf_event *event, void **pages, + int nr_pages, bool snapshot) { - int i; + int i, cpu = event->cpu; struct page **pglist; struct arm_spe_pmu_buf *buf; @@ -855,16 +856,8 @@ static void *arm_spe_pmu_setup_aux(int cpu, void **pages, int nr_pages, if (!pglist) goto out_free_buf; - for (i = 0; i < nr_pages; ++i) { - struct page *page = virt_to_page(pages[i]); - - if (PagePrivate(page)) { - pr_warn("unexpected high-order page for auxbuf!"); - goto out_free_pglist; - } - + for (i = 0; i < nr_pages; ++i) pglist[i] = virt_to_page(pages[i]); - } buf->base = vmap(pglist, nr_pages, VM_MAP, PAGE_KERNEL); if (!buf->base) @@ -927,6 +920,11 @@ static int arm_spe_pmu_perf_init(struct arm_spe_pmu *spe_pmu) idx = atomic_inc_return(&pmu_idx); name = devm_kasprintf(dev, GFP_KERNEL, "%s_%d", PMUNAME, idx); + if (!name) { + dev_err(dev, "failed to allocate name for pmu %d\n", idx); + return -ENOMEM; + } + return perf_pmu_register(&spe_pmu->pmu, name, -1); } @@ -1169,8 +1167,15 @@ static const struct of_device_id arm_spe_pmu_of_match[] = { { .compatible = "arm,statistical-profiling-extension-v1", .data = (void *)1 }, { /* Sentinel */ }, }; +MODULE_DEVICE_TABLE(of, arm_spe_pmu_of_match); + +static const struct platform_device_id arm_spe_match[] = { + { ARMV8_SPE_PDEV_NAME, 0}, + { } +}; +MODULE_DEVICE_TABLE(platform, arm_spe_match); -static int arm_spe_pmu_device_dt_probe(struct platform_device *pdev) +static int arm_spe_pmu_device_probe(struct platform_device *pdev) { int ret; struct arm_spe_pmu *spe_pmu; @@ -1230,11 +1235,13 @@ static int arm_spe_pmu_device_remove(struct platform_device *pdev) } static struct platform_driver arm_spe_pmu_driver = { + .id_table = arm_spe_match, .driver = { .name = DRVNAME, .of_match_table = of_match_ptr(arm_spe_pmu_of_match), + .suppress_bind_attrs = true, }, - .probe = arm_spe_pmu_device_dt_probe, + .probe = arm_spe_pmu_device_probe, .remove = arm_spe_pmu_device_remove, }; diff --git a/drivers/perf/hisilicon/Kconfig b/drivers/perf/hisilicon/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..c5d1b7019fffd8e20face5b9dd7f312b70d70816 --- /dev/null +++ b/drivers/perf/hisilicon/Kconfig @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-only +config HISI_PMU + tristate "HiSilicon SoC PMU drivers" + depends on ARM64 && ACPI + help + Support for HiSilicon SoC L3 Cache performance monitor, Hydra Home + Agent performance monitor and DDR Controller performance monitor. diff --git a/drivers/perf/hisilicon/Makefile b/drivers/perf/hisilicon/Makefile index 2621d51ae87a0944954e30c2279647ee828f225f..63942ae6b167e9e1bcc1ff86fbd814fee7913783 100644 --- a/drivers/perf/hisilicon/Makefile +++ b/drivers/perf/hisilicon/Makefile @@ -1 +1,6 @@ -obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o hisi_uncore_l3c_pmu.o hisi_uncore_hha_pmu.o hisi_uncore_ddrc_pmu.o +obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o \ + hisi_uncore_l3c_pmu.o \ + hisi_uncore_hha_pmu.o \ + hisi_uncore_ddrc_pmu.o \ + hisi_uncore_lpddrc_pmu.o \ + hisi_uncore_l3t_pmu.o diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c index 1b10ea05a914903ca1a22233356fc7410b28d6b1..8d1c48bc98121e316881cd9174bebdb634c1a025 100644 --- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -30,8 +31,8 @@ #define DDRC_FLUX_RCMD 0x38c #define DDRC_PRE_CMD 0x3c0 #define DDRC_ACT_CMD 0x3c4 -#define DDRC_BNK_CHG 0x3c8 #define DDRC_RNK_CHG 0x3cc +#define DDRC_RW_CHG 0x3d0 #define DDRC_EVENT_CTRL 0x6C0 #define DDRC_INT_MASK 0x6c8 #define DDRC_INT_STATUS 0x6cc @@ -51,7 +52,7 @@ static const u32 ddrc_reg_off[] = { DDRC_FLUX_WR, DDRC_FLUX_RD, DDRC_FLUX_WCMD, DDRC_FLUX_RCMD, - DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_BNK_CHG, DDRC_RNK_CHG + DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_RNK_CHG, DDRC_RW_CHG }; /* @@ -384,25 +385,13 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev) name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_ddrc%u", ddrc_pmu->sccl_id, ddrc_pmu->index_id); - ddrc_pmu->pmu = (struct pmu) { - .name = name, - .task_ctx_nr = perf_invalid_context, - .event_init = hisi_uncore_pmu_event_init, - .pmu_enable = hisi_uncore_pmu_enable, - .pmu_disable = hisi_uncore_pmu_disable, - .add = hisi_uncore_pmu_add, - .del = hisi_uncore_pmu_del, - .start = hisi_uncore_pmu_start, - .stop = hisi_uncore_pmu_stop, - .read = hisi_uncore_pmu_read, - .attr_groups = hisi_ddrc_pmu_attr_groups, - }; - + HISI_INIT_PMU(&ddrc_pmu->pmu, name, hisi_ddrc_pmu_attr_groups); ret = perf_pmu_register(&ddrc_pmu->pmu, name, -1); if (ret) { dev_err(ddrc_pmu->dev, "DDRC PMU register failed!\n"); - cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, - &ddrc_pmu->node); + cpuhp_state_remove_instance_nocalls( + CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, &ddrc_pmu->node); + irq_set_affinity_hint(ddrc_pmu->irq, NULL); } return ret; @@ -413,8 +402,9 @@ static int hisi_ddrc_pmu_remove(struct platform_device *pdev) struct hisi_pmu *ddrc_pmu = platform_get_drvdata(pdev); perf_pmu_unregister(&ddrc_pmu->pmu); - cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, - &ddrc_pmu->node); + cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, + &ddrc_pmu->node); + irq_set_affinity_hint(ddrc_pmu->irq, NULL); return 0; } @@ -423,6 +413,7 @@ static struct platform_driver hisi_ddrc_pmu_driver = { .driver = { .name = "hisi_ddrc_pmu", .acpi_match_table = ACPI_PTR(hisi_ddrc_pmu_acpi_match), + .suppress_bind_attrs = true, }, .probe = hisi_ddrc_pmu_probe, .remove = hisi_ddrc_pmu_remove, diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c index 443906e0aff3edad67ebd2514473b1f11f3dbeea..52286739c8b94d80f5143894047d0b32cd1471b0 100644 --- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -45,7 +46,7 @@ * Select the counter register offset using the counter index * each counter is 48-bits. */ -static u32 hisi_hha_pmu_get_counter_offset(int cntr_idx) +static u32 hisi_hha_pmu_get_counter_offset(u32 cntr_idx) { return (HHA_CNT0_LOWER + (cntr_idx * 8)); } @@ -235,20 +236,34 @@ static const struct acpi_device_id hisi_hha_pmu_acpi_match[] = { }; MODULE_DEVICE_TABLE(acpi, hisi_hha_pmu_acpi_match); -static int hisi_hha_pmu_init_data(struct platform_device *pdev, +#ifdef CONFIG_ACPI +static int hisi_hha_pmu_init_index(struct platform_device *pdev, struct hisi_pmu *hha_pmu) { - unsigned long long id; - struct resource *res; acpi_status status; + unsigned long long id; status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev), - "_UID", NULL, &id); + "_UID", NULL, &id); if (ACPI_FAILURE(status)) return -EINVAL; hha_pmu->index_id = id; + return 0; +} +#endif + +static int hisi_hha_pmu_init_data(struct platform_device *pdev, + struct hisi_pmu *hha_pmu) +{ + struct resource *res; + +#ifdef CONFIG_ACPI + if (hisi_hha_pmu_init_index(pdev, hha_pmu)) + dev_info(&pdev->dev, "Can not init index id by acpi!\n"); +#endif + /* * Use SCCL_ID and UID to identify the HHA PMU, while * SCCL_ID is in MPIDR[aff2]. @@ -290,7 +305,7 @@ static struct attribute *hisi_hha_pmu_events_attr[] = { HISI_PMU_EVENT_ATTR(rx_wbip, 0x05), HISI_PMU_EVENT_ATTR(rx_wtistash, 0x11), HISI_PMU_EVENT_ATTR(rd_ddr_64b, 0x1c), - HISI_PMU_EVENT_ATTR(wr_dr_64b, 0x1d), + HISI_PMU_EVENT_ATTR(wr_ddr_64b, 0x1d), HISI_PMU_EVENT_ATTR(rd_ddr_128b, 0x1e), HISI_PMU_EVENT_ATTR(wr_ddr_128b, 0x1f), HISI_PMU_EVENT_ATTR(spill_num, 0x20), @@ -395,25 +410,13 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev) name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_hha%u", hha_pmu->sccl_id, hha_pmu->index_id); - hha_pmu->pmu = (struct pmu) { - .name = name, - .task_ctx_nr = perf_invalid_context, - .event_init = hisi_uncore_pmu_event_init, - .pmu_enable = hisi_uncore_pmu_enable, - .pmu_disable = hisi_uncore_pmu_disable, - .add = hisi_uncore_pmu_add, - .del = hisi_uncore_pmu_del, - .start = hisi_uncore_pmu_start, - .stop = hisi_uncore_pmu_stop, - .read = hisi_uncore_pmu_read, - .attr_groups = hisi_hha_pmu_attr_groups, - }; - + HISI_INIT_PMU(&hha_pmu->pmu, name, hisi_hha_pmu_attr_groups); ret = perf_pmu_register(&hha_pmu->pmu, name, -1); if (ret) { dev_err(hha_pmu->dev, "HHA PMU register failed!\n"); - cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, - &hha_pmu->node); + cpuhp_state_remove_instance_nocalls( + CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, &hha_pmu->node); + irq_set_affinity_hint(hha_pmu->irq, NULL); } return ret; @@ -424,8 +427,9 @@ static int hisi_hha_pmu_remove(struct platform_device *pdev) struct hisi_pmu *hha_pmu = platform_get_drvdata(pdev); perf_pmu_unregister(&hha_pmu->pmu); - cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, - &hha_pmu->node); + cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, + &hha_pmu->node); + irq_set_affinity_hint(hha_pmu->irq, NULL); return 0; } @@ -434,6 +438,7 @@ static struct platform_driver hisi_hha_pmu_driver = { .driver = { .name = "hisi_hha_pmu", .acpi_match_table = ACPI_PTR(hisi_hha_pmu_acpi_match), + .suppress_bind_attrs = true, }, .probe = hisi_hha_pmu_probe, .remove = hisi_hha_pmu_remove, diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c index 0bde5d919b2eb49db7c0a8dc3d5f99b028b1930d..0e766cea4a11a5de2894aff778f0fb6dc38c6567 100644 --- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -38,13 +39,13 @@ /* L3C has 8-counters */ #define L3C_NR_COUNTERS 0x8 -#define L3C_PERF_CTRL_EN 0x20000 +#define L3C_PERF_CTRL_EN 0x10000 #define L3C_EVTYPE_NONE 0xff /* * Select the counter register offset using the counter index */ -static u32 hisi_l3c_pmu_get_counter_offset(int cntr_idx) +static u32 hisi_l3c_pmu_get_counter_offset(u32 cntr_idx) { return (L3C_CNTR0_LOWER + (cntr_idx * 8)); } @@ -234,20 +235,33 @@ static const struct acpi_device_id hisi_l3c_pmu_acpi_match[] = { }; MODULE_DEVICE_TABLE(acpi, hisi_l3c_pmu_acpi_match); -static int hisi_l3c_pmu_init_data(struct platform_device *pdev, +#ifdef CONFIG_ACPI +static int hisi_l3c_pmu_init_index(struct platform_device *pdev, struct hisi_pmu *l3c_pmu) { unsigned long long id; - struct resource *res; acpi_status status; status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev), - "_UID", NULL, &id); + "_UID", NULL, &id); if (ACPI_FAILURE(status)) return -EINVAL; l3c_pmu->index_id = id; + return 0; +} +#endif + +static int hisi_l3c_pmu_init_data(struct platform_device *pdev, + struct hisi_pmu *l3c_pmu) +{ + struct resource *res; + +#ifdef CONFIG_ACPI + if (hisi_l3c_pmu_init_index(pdev, l3c_pmu)) + dev_info(&pdev->dev, "Can not init index id by acpi!"); +#endif /* * Use the SCCL_ID and CCL_ID to identify the L3C PMU, while * SCCL_ID is in MPIDR[aff2] and CCL_ID is in MPIDR[aff1]. @@ -385,25 +399,14 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev) name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_l3c%u", l3c_pmu->sccl_id, l3c_pmu->index_id); - l3c_pmu->pmu = (struct pmu) { - .name = name, - .task_ctx_nr = perf_invalid_context, - .event_init = hisi_uncore_pmu_event_init, - .pmu_enable = hisi_uncore_pmu_enable, - .pmu_disable = hisi_uncore_pmu_disable, - .add = hisi_uncore_pmu_add, - .del = hisi_uncore_pmu_del, - .start = hisi_uncore_pmu_start, - .stop = hisi_uncore_pmu_stop, - .read = hisi_uncore_pmu_read, - .attr_groups = hisi_l3c_pmu_attr_groups, - }; + HISI_INIT_PMU(&l3c_pmu->pmu, name, hisi_l3c_pmu_attr_groups); ret = perf_pmu_register(&l3c_pmu->pmu, name, -1); if (ret) { dev_err(l3c_pmu->dev, "L3C PMU register failed!\n"); - cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, - &l3c_pmu->node); + cpuhp_state_remove_instance_nocalls( + CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, &l3c_pmu->node); + irq_set_affinity_hint(l3c_pmu->irq, NULL); } return ret; @@ -414,8 +417,9 @@ static int hisi_l3c_pmu_remove(struct platform_device *pdev) struct hisi_pmu *l3c_pmu = platform_get_drvdata(pdev); perf_pmu_unregister(&l3c_pmu->pmu); - cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, - &l3c_pmu->node); + cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, + &l3c_pmu->node); + irq_set_affinity_hint(l3c_pmu->irq, NULL); return 0; } @@ -424,6 +428,7 @@ static struct platform_driver hisi_l3c_pmu_driver = { .driver = { .name = "hisi_l3c_pmu", .acpi_match_table = ACPI_PTR(hisi_l3c_pmu_acpi_match), + .suppress_bind_attrs = true, }, .probe = hisi_l3c_pmu_probe, .remove = hisi_l3c_pmu_remove, diff --git a/drivers/perf/hisilicon/hisi_uncore_l3t_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3t_pmu.c new file mode 100644 index 0000000000000000000000000000000000000000..bd4e600e0c3238edc62776fd35b467e3ad600795 --- /dev/null +++ b/drivers/perf/hisilicon/hisi_uncore_l3t_pmu.c @@ -0,0 +1,432 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * HiSilicon SoC L3T uncore Hardware event counters support + * + * Copyright (C) 2021 Hisilicon Limited + * Author: Fang Lijun + * Anurup M + * Shaokun Zhang + * + * This code is based on the uncore PMUs like arm-cci and arm-ccn. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hisi_uncore_pmu.h" + +/* L3T register definition */ +#define L3T_PERF_CTRL 0x0408 +#define L3T_INT_MASK 0x0800 +#define L3T_INT_STATUS 0x0808 +#define L3T_INT_CLEAR 0x080c +#define L3T_EVENT_CTRL 0x1c00 +#define L3T_EVENT_TYPE0 0x1d00 +/* + * Each counter is 48-bits and [48:63] are reserved + * which are Read-As-Zero and Writes-Ignored. + */ +#define L3T_CNTR0_LOWER 0x1e00 + +/* L3T has 8-counters */ +#define L3T_NR_COUNTERS 0x8 + +#define L3T_PERF_CTRL_EN 0x20000 +#define L3T_EVTYPE_NONE 0xff + +/* + * Select the counter register offset using the counter index + */ +static u32 hisi_l3t_pmu_get_counter_offset(u32 cntr_idx) +{ + return (L3T_CNTR0_LOWER + (cntr_idx * 8)); +} + +static u64 hisi_l3t_pmu_read_counter(struct hisi_pmu *l3t_pmu, + struct hw_perf_event *hwc) +{ + u32 idx = hwc->idx; + + if (!hisi_uncore_pmu_counter_valid(l3t_pmu, idx)) { + dev_err(l3t_pmu->dev, "Unsupported event index:%d!\n", idx); + return 0; + } + + /* Read 64-bits and the upper 16 bits are RAZ */ + return readq(l3t_pmu->base + hisi_l3t_pmu_get_counter_offset(idx)); +} + +static void hisi_l3t_pmu_write_counter(struct hisi_pmu *l3t_pmu, + struct hw_perf_event *hwc, u64 val) +{ + u32 idx = hwc->idx; + + if (!hisi_uncore_pmu_counter_valid(l3t_pmu, idx)) { + dev_err(l3t_pmu->dev, "Unsupported event index:%d!\n", idx); + return; + } + + /* Write 64-bits and the upper 16 bits are WI */ + writeq(val, l3t_pmu->base + hisi_l3t_pmu_get_counter_offset(idx)); +} + +static void hisi_l3t_pmu_write_evtype(struct hisi_pmu *l3t_pmu, int idx, + u32 type) +{ + u32 reg, reg_idx, shift, val; + + /* + * Select the appropriate event select register(L3T_EVENT_TYPE0/1). + * There are 2 event select registers for the 8 hardware counters. + * Event code is 8-bits and for the former 4 hardware counters, + * L3T_EVENT_TYPE0 is chosen. For the latter 4 hardware counters, + * L3T_EVENT_TYPE1 is chosen. + */ + reg = L3T_EVENT_TYPE0 + (idx / 4) * 4; + reg_idx = idx % 4; + shift = 8 * reg_idx; + + /* Write event code to L3T_EVENT_TYPEx Register */ + val = readl(l3t_pmu->base + reg); + val &= ~(L3T_EVTYPE_NONE << shift); + val |= (type << shift); + writel(val, l3t_pmu->base + reg); +} + +static void hisi_l3t_pmu_start_counters(struct hisi_pmu *l3t_pmu) +{ + u32 val; + + /* + * Set perf_enable bit in L3T_PERF_CTRL register to start counting + * for all enabled counters. + */ + val = readl(l3t_pmu->base + L3T_PERF_CTRL); + val |= L3T_PERF_CTRL_EN; + writel(val, l3t_pmu->base + L3T_PERF_CTRL); +} + +static void hisi_l3t_pmu_stop_counters(struct hisi_pmu *l3t_pmu) +{ + u32 val; + + /* + * Clear perf_enable bit in L3T_PERF_CTRL register to stop counting + * for all enabled counters. + */ + val = readl(l3t_pmu->base + L3T_PERF_CTRL); + val &= ~(L3T_PERF_CTRL_EN); + writel(val, l3t_pmu->base + L3T_PERF_CTRL); +} + +static void hisi_l3t_pmu_enable_counter(struct hisi_pmu *l3t_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Enable counter index in L3T_EVENT_CTRL register */ + val = readl(l3t_pmu->base + L3T_EVENT_CTRL); + val |= (1 << hwc->idx); + writel(val, l3t_pmu->base + L3T_EVENT_CTRL); +} + +static void hisi_l3t_pmu_disable_counter(struct hisi_pmu *l3t_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Clear counter index in L3T_EVENT_CTRL register */ + val = readl(l3t_pmu->base + L3T_EVENT_CTRL); + val &= ~(1 << hwc->idx); + writel(val, l3t_pmu->base + L3T_EVENT_CTRL); +} + +static void hisi_l3t_pmu_enable_counter_int(struct hisi_pmu *l3t_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + val = readl(l3t_pmu->base + L3T_INT_MASK); + /* Write 0 to enable interrupt */ + val &= ~(1 << hwc->idx); + writel(val, l3t_pmu->base + L3T_INT_MASK); +} + +static void hisi_l3t_pmu_disable_counter_int(struct hisi_pmu *l3t_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + val = readl(l3t_pmu->base + L3T_INT_MASK); + /* Write 1 to mask interrupt */ + val |= (1 << hwc->idx); + writel(val, l3t_pmu->base + L3T_INT_MASK); +} + +static irqreturn_t hisi_l3t_pmu_isr(int irq, void *dev_id) +{ + struct hisi_pmu *l3t_pmu = dev_id; + struct perf_event *event; + unsigned long overflown; + int idx; + + /* Read L3T_INT_STATUS register */ + overflown = readl(l3t_pmu->base + L3T_INT_STATUS); + if (!overflown) + return IRQ_NONE; + + /* + * Find the counter index which overflowed if the bit was set + * and handle it. + */ + for_each_set_bit(idx, &overflown, L3T_NR_COUNTERS) { + /* Write 1 to clear the IRQ status flag */ + writel((1 << idx), l3t_pmu->base + L3T_INT_CLEAR); + + /* Get the corresponding event struct */ + event = l3t_pmu->pmu_events.hw_events[idx]; + if (!event) + continue; + + hisi_uncore_pmu_event_update(event); + hisi_uncore_pmu_set_event_period(event); + } + + return IRQ_HANDLED; +} + +static int hisi_l3t_pmu_init_irq(struct hisi_pmu *l3t_pmu, + struct platform_device *pdev) +{ + int irq, ret; + + /* Read and init IRQ */ + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "L3T PMU get irq fail; irq:%d\n", irq); + return irq; + } + + ret = devm_request_irq(&pdev->dev, irq, hisi_l3t_pmu_isr, + IRQF_NOBALANCING | IRQF_NO_THREAD | IRQF_SHARED, + dev_name(&pdev->dev), l3t_pmu); + if (ret < 0) { + dev_err(&pdev->dev, + "Fail to request IRQ:%d ret:%d\n", irq, ret); + return ret; + } + + l3t_pmu->irq = irq; + + return 0; +} + +static const struct of_device_id l3t_of_match[] = { + { .compatible = "hisilicon,l3t-pmu", }, + {}, +}; +MODULE_DEVICE_TABLE(of, l3t_of_match); + +static int hisi_l3t_pmu_init_data(struct platform_device *pdev, + struct hisi_pmu *l3t_pmu) +{ + struct resource *res; + + /* + * Use the SCCL_ID and CCL_ID to identify the L3T PMU, while + * SCCL_ID is in MPIDR[aff2] and CCL_ID is in MPIDR[aff1]. + */ + if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id", + &l3t_pmu->sccl_id)) { + dev_err(&pdev->dev, "Can not read l3t sccl-id!\n"); + return -EINVAL; + } + + if (device_property_read_u32(&pdev->dev, "hisilicon,ccl-id", + &l3t_pmu->ccl_id)) { + dev_err(&pdev->dev, "Can not read l3t ccl-id!\n"); + return -EINVAL; + } + + if (device_property_read_u32(&pdev->dev, "hisilicon,index-id", + &l3t_pmu->index_id)) { + dev_err(&pdev->dev, "Can not read l3t index-id!\n"); + return -EINVAL; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + l3t_pmu->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(l3t_pmu->base)) { + dev_err(&pdev->dev, "ioremap failed for l3t_pmu resource\n"); + return PTR_ERR(l3t_pmu->base); + } + + return 0; +} + +static struct attribute *hisi_l3t_pmu_format_attr[] = { + HISI_PMU_FORMAT_ATTR(event, "config:0-7"), + NULL, +}; + +static const struct attribute_group hisi_l3t_pmu_format_group = { + .name = "format", + .attrs = hisi_l3t_pmu_format_attr, +}; + +static struct attribute *hisi_l3t_pmu_events_attr[] = { + HISI_PMU_EVENT_ATTR(rd_cpipe, 0x00), + HISI_PMU_EVENT_ATTR(wr_cpipe, 0x01), + HISI_PMU_EVENT_ATTR(rd_hit_cpipe, 0x02), + HISI_PMU_EVENT_ATTR(wr_hit_cpipe, 0x03), + HISI_PMU_EVENT_ATTR(victim_num, 0x04), + HISI_PMU_EVENT_ATTR(rd_spipe, 0x20), + HISI_PMU_EVENT_ATTR(wr_spipe, 0x21), + HISI_PMU_EVENT_ATTR(rd_hit_spipe, 0x22), + HISI_PMU_EVENT_ATTR(wr_hit_spipe, 0x23), + HISI_PMU_EVENT_ATTR(back_invalid, 0x29), + HISI_PMU_EVENT_ATTR(retry_cpu, 0x40), + HISI_PMU_EVENT_ATTR(retry_ring, 0x41), + HISI_PMU_EVENT_ATTR(prefetch_drop, 0x42), + NULL, +}; + +static const struct attribute_group hisi_l3t_pmu_events_group = { + .name = "events", + .attrs = hisi_l3t_pmu_events_attr, +}; + +static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL); + +static struct attribute *hisi_l3t_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static const struct attribute_group hisi_l3t_pmu_cpumask_attr_group = { + .attrs = hisi_l3t_pmu_cpumask_attrs, +}; + +static const struct attribute_group *hisi_l3t_pmu_attr_groups[] = { + &hisi_l3t_pmu_format_group, + &hisi_l3t_pmu_events_group, + &hisi_l3t_pmu_cpumask_attr_group, + NULL, +}; + +static const struct hisi_uncore_ops hisi_uncore_l3t_ops = { + .write_evtype = hisi_l3t_pmu_write_evtype, + .get_event_idx = hisi_uncore_pmu_get_event_idx, + .start_counters = hisi_l3t_pmu_start_counters, + .stop_counters = hisi_l3t_pmu_stop_counters, + .enable_counter = hisi_l3t_pmu_enable_counter, + .disable_counter = hisi_l3t_pmu_disable_counter, + .enable_counter_int = hisi_l3t_pmu_enable_counter_int, + .disable_counter_int = hisi_l3t_pmu_disable_counter_int, + .write_counter = hisi_l3t_pmu_write_counter, + .read_counter = hisi_l3t_pmu_read_counter, +}; + +static int hisi_l3t_pmu_dev_probe(struct platform_device *pdev, + struct hisi_pmu *l3t_pmu) +{ + int ret; + + ret = hisi_l3t_pmu_init_data(pdev, l3t_pmu); + if (ret) + return ret; + + ret = hisi_l3t_pmu_init_irq(l3t_pmu, pdev); + if (ret) + return ret; + + l3t_pmu->num_counters = L3T_NR_COUNTERS; + l3t_pmu->counter_bits = 48; + l3t_pmu->ops = &hisi_uncore_l3t_ops; + l3t_pmu->dev = &pdev->dev; + l3t_pmu->on_cpu = -1; + l3t_pmu->check_event = 0x59; + + return 0; +} + +static int hisi_l3t_pmu_probe(struct platform_device *pdev) +{ + struct hisi_pmu *l3t_pmu; + char *name; + int ret; + + l3t_pmu = devm_kzalloc(&pdev->dev, sizeof(*l3t_pmu), GFP_KERNEL); + if (!l3t_pmu) + return -ENOMEM; + + platform_set_drvdata(pdev, l3t_pmu); + + ret = hisi_l3t_pmu_dev_probe(pdev, l3t_pmu); + if (ret) + return ret; + + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_l3t%u", + l3t_pmu->sccl_id, l3t_pmu->index_id); + HISI_INIT_PMU(&l3t_pmu->pmu, name, hisi_l3t_pmu_attr_groups); + + ret = perf_pmu_register(&l3t_pmu->pmu, name, -1); + if (ret) { + dev_err(l3t_pmu->dev, "L3T PMU register failed!\n"); + return ret; + } + + /* Pick one core to use for cpumask attributes */ + cpumask_set_cpu(smp_processor_id(), &l3t_pmu->associated_cpus); + + l3t_pmu->on_cpu = cpumask_first(&l3t_pmu->associated_cpus); + if (l3t_pmu->on_cpu >= nr_cpu_ids) + return -EINVAL; + + return 0; +} + +static int hisi_l3t_pmu_remove(struct platform_device *pdev) +{ + struct hisi_pmu *l3t_pmu = platform_get_drvdata(pdev); + + perf_pmu_unregister(&l3t_pmu->pmu); + + return 0; +} + +static struct platform_driver hisi_l3t_pmu_driver = { + .driver = { + .name = "hisi_l3t_pmu", + .of_match_table = of_match_ptr(l3t_of_match), + }, + .probe = hisi_l3t_pmu_probe, + .remove = hisi_l3t_pmu_remove, +}; + +static int __init hisi_l3t_pmu_module_init(void) +{ + return platform_driver_register(&hisi_l3t_pmu_driver); +} +module_init(hisi_l3t_pmu_module_init); + +static void __exit hisi_l3t_pmu_module_exit(void) +{ + platform_driver_unregister(&hisi_l3t_pmu_driver); +} +module_exit(hisi_l3t_pmu_module_exit); + +MODULE_DESCRIPTION("HiSilicon SoC L3T uncore PMU driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("HUAWEI TECHNOLOGIES CO., LTD."); +MODULE_AUTHOR("Fang Lijun "); diff --git a/drivers/perf/hisilicon/hisi_uncore_lpddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_lpddrc_pmu.c new file mode 100644 index 0000000000000000000000000000000000000000..ca395252ccc3d9b23304733b5121d679b0f61ae4 --- /dev/null +++ b/drivers/perf/hisilicon/hisi_uncore_lpddrc_pmu.c @@ -0,0 +1,429 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * HiSilicon SoC LPDDRC uncore Hardware event counters support + * + * Copyright (C) 2021 Hisilicon Limited + * Author: Fang Lijun + * Shaokun Zhang + * Anurup M + * + * This code is based on the uncore PMUs like arm-cci and arm-ccn. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hisi_uncore_pmu.h" + +/* LPDDRC register definition */ +#define LPDDRC_PERF_CTRL 0x4930 +#define LPDDRC_FLUX_WR 0x4948 +#define LPDDRC_FLUX_RD 0x494c +#define LPDDRC_FLUX_WCMD 0x4950 +#define LPDDRC_FLUX_RCMD 0x4954 +#define LPDDRC_PRE_CMD 0x4984 +#define LPDDRC_ACT_CMD 0x4988 +#define LPDDRC_RNK_CHG 0x4990 +#define LPDDRC_RW_CHG 0x4994 +#define LPDDRC_EVENT_CTRL 0x4d60 +#define LPDDRC_INT_MASK 0x6c8 +#define LPDDRC_INT_STATUS 0x6cc +#define LPDDRC_INT_CLEAR 0x6d0 + +/* LPDDRC has 8-counters */ +#define LPDDRC_NR_COUNTERS 0x8 +#define LPDDRC_PERF_CTRL_EN 0x1 + +/* + * For LPDDRC PMU, there are eight-events and every event has been mapped + * to fixed-purpose counters which register offset is not consistent. + * Therefore there is no write event type and we assume that event + * code (0 to 7) is equal to counter index in PMU driver. + */ +#define GET_LPDDRC_EVENTID(hwc) (hwc->config_base & 0x7) + +static const u32 lpddrc_reg_off[] = { + LPDDRC_FLUX_WR, LPDDRC_FLUX_RD, LPDDRC_FLUX_WCMD, LPDDRC_FLUX_RCMD, + LPDDRC_PRE_CMD, LPDDRC_ACT_CMD, LPDDRC_RNK_CHG, LPDDRC_RW_CHG +}; + +/* + * Select the counter register offset using the counter index. + * In LPDDRC there are no programmable counter, the count + * is readed form the statistics counter register itself. + */ +static u32 hisi_lpddrc_pmu_get_counter_offset(int cntr_idx) +{ + return lpddrc_reg_off[cntr_idx]; +} + +static u64 hisi_lpddrc_pmu_read_counter(struct hisi_pmu *lpddrc_pmu, + struct hw_perf_event *hwc) +{ + /* Use event code as counter index */ + u32 idx = GET_LPDDRC_EVENTID(hwc); + + if (!hisi_uncore_pmu_counter_valid(lpddrc_pmu, idx)) { + dev_err(lpddrc_pmu->dev, "Unsupported event index:%d!\n", idx); + return 0; + } + + return readl(lpddrc_pmu->base + hisi_lpddrc_pmu_get_counter_offset(idx)); +} + +/* + * For LPDDRC PMU, event counter should be reset when start counters, + * reset the prev_count by software, because the counter register was RO. + */ +static void hisi_lpddrc_pmu_write_counter(struct hisi_pmu *lpddrc_pmu, + struct hw_perf_event *hwc, u64 val) +{ + local64_set(&hwc->prev_count, 0); +} + +/* + * For LPDDRC PMU, event has been mapped to fixed-purpose counter by hardware, + * so there is no need to write event type. + */ +static void hisi_lpddrc_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx, + u32 type) +{ +} + +static void hisi_lpddrc_pmu_start_counters(struct hisi_pmu *lpddrc_pmu) +{ + u32 val; + + /* Set perf_enable in LPDDRC_PERF_CTRL to start event counting */ + val = readl(lpddrc_pmu->base + LPDDRC_PERF_CTRL); + val |= LPDDRC_PERF_CTRL_EN; + writel(val, lpddrc_pmu->base + LPDDRC_PERF_CTRL); +} + +static void hisi_lpddrc_pmu_stop_counters(struct hisi_pmu *lpddrc_pmu) +{ + u32 val; + + /* Clear perf_enable in LPDDRC_PERF_CTRL to stop event counting */ + val = readl(lpddrc_pmu->base + LPDDRC_PERF_CTRL); + val &= ~LPDDRC_PERF_CTRL_EN; + writel(val, lpddrc_pmu->base + LPDDRC_PERF_CTRL); +} + +static void hisi_lpddrc_pmu_enable_counter(struct hisi_pmu *lpddrc_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Set counter index(event code) in LPDDRC_EVENT_CTRL register */ + val = readl(lpddrc_pmu->base + LPDDRC_EVENT_CTRL); + val |= (1 << GET_LPDDRC_EVENTID(hwc)); + writel(val, lpddrc_pmu->base + LPDDRC_EVENT_CTRL); +} + +static void hisi_lpddrc_pmu_disable_counter(struct hisi_pmu *lpddrc_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Clear counter index(event code) in LPDDRC_EVENT_CTRL register */ + val = readl(lpddrc_pmu->base + LPDDRC_EVENT_CTRL); + val &= ~(1 << GET_LPDDRC_EVENTID(hwc)); + writel(val, lpddrc_pmu->base + LPDDRC_EVENT_CTRL); +} + +static int hisi_lpddrc_pmu_get_event_idx(struct perf_event *event) +{ + struct hisi_pmu *lpddrc_pmu = to_hisi_pmu(event->pmu); + unsigned long *used_mask = lpddrc_pmu->pmu_events.used_mask; + struct hw_perf_event *hwc = &event->hw; + /* For LPDDRC PMU, we use event code as counter index */ + int idx = GET_LPDDRC_EVENTID(hwc); + + if (test_bit(idx, used_mask)) + return -EAGAIN; + + set_bit(idx, used_mask); + + return idx; +} + +static void hisi_lpddrc_pmu_enable_counter_int(struct hisi_pmu *lpddrc_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Write 0 to enable interrupt */ + val = readl(lpddrc_pmu->base + LPDDRC_INT_MASK); + val &= ~(1 << GET_LPDDRC_EVENTID(hwc)); + writel(val, lpddrc_pmu->base + LPDDRC_INT_MASK); +} + +static void hisi_lpddrc_pmu_disable_counter_int(struct hisi_pmu *lpddrc_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Write 1 to mask interrupt */ + val = readl(lpddrc_pmu->base + LPDDRC_INT_MASK); + val |= (1 << GET_LPDDRC_EVENTID(hwc)); + writel(val, lpddrc_pmu->base + LPDDRC_INT_MASK); +} + +static irqreturn_t hisi_lpddrc_pmu_isr(int irq, void *dev_id) +{ + struct hisi_pmu *lpddrc_pmu = dev_id; + struct perf_event *event; + unsigned long overflown; + int idx; + + /* Read the LPDDRC_INT_STATUS register */ + overflown = readl(lpddrc_pmu->base + LPDDRC_INT_STATUS); + if (!overflown) + return IRQ_NONE; + + /* + * Find the counter index which overflowed if the bit was set + * and handle it + */ + for_each_set_bit(idx, &overflown, LPDDRC_NR_COUNTERS) { + /* Write 1 to clear the IRQ status flag */ + writel((1 << idx), lpddrc_pmu->base + LPDDRC_INT_CLEAR); + + /* Get the corresponding event struct */ + event = lpddrc_pmu->pmu_events.hw_events[idx]; + if (!event) + continue; + + hisi_uncore_pmu_event_update(event); + hisi_uncore_pmu_set_event_period(event); + } + + return IRQ_HANDLED; +} + +static int hisi_lpddrc_pmu_init_irq(struct hisi_pmu *lpddrc_pmu, + struct platform_device *pdev) +{ + int irq, ret; + + /* Read and init IRQ */ + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "LPDDRC PMU get irq fail; irq:%d\n", irq); + return irq; + } + + ret = devm_request_irq(&pdev->dev, irq, hisi_lpddrc_pmu_isr, + IRQF_NOBALANCING | IRQF_NO_THREAD | IRQF_SHARED, + dev_name(&pdev->dev), lpddrc_pmu); + if (ret < 0) { + dev_err(&pdev->dev, + "Fail to request IRQ:%d ret:%d\n", irq, ret); + return ret; + } + + lpddrc_pmu->irq = irq; + + return 0; +} + +static const struct of_device_id lpddrc_of_match[] = { + { .compatible = "hisilicon,lpddrc-pmu", }, + {}, +}; +MODULE_DEVICE_TABLE(of, lpddrc_of_match); + +static int hisi_lpddrc_pmu_init_data(struct platform_device *pdev, + struct hisi_pmu *lpddrc_pmu) +{ + struct resource *res; + + /* + * Use the SCCL_ID and LPDDRC channel ID to identify the + * LPDDRC PMU, while SCCL_ID is in MPIDR[aff2]. + */ + if (device_property_read_u32(&pdev->dev, "hisilicon,ch-id", + &lpddrc_pmu->index_id)) { + dev_err(&pdev->dev, "Can not read lpddrc channel-id!\n"); + return -EINVAL; + } + + if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id", + &lpddrc_pmu->sccl_id)) { + dev_err(&pdev->dev, "Can not read lpddrc sccl-id!\n"); + return -EINVAL; + } + /* LPDDRC PMUs only share the same SCCL */ + lpddrc_pmu->ccl_id = -1; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + lpddrc_pmu->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(lpddrc_pmu->base)) { + dev_err(&pdev->dev, "ioremap failed for lpddrc_pmu resource\n"); + return PTR_ERR(lpddrc_pmu->base); + } + + return 0; +} + +static struct attribute *hisi_lpddrc_pmu_format_attr[] = { + HISI_PMU_FORMAT_ATTR(event, "config:0-4"), + NULL, +}; + +static const struct attribute_group hisi_lpddrc_pmu_format_group = { + .name = "format", + .attrs = hisi_lpddrc_pmu_format_attr, +}; + +static struct attribute *hisi_lpddrc_pmu_events_attr[] = { + HISI_PMU_EVENT_ATTR(flux_wr, 0x00), + HISI_PMU_EVENT_ATTR(flux_rd, 0x01), + HISI_PMU_EVENT_ATTR(flux_wcmd, 0x02), + HISI_PMU_EVENT_ATTR(flux_rcmd, 0x03), + HISI_PMU_EVENT_ATTR(pre_cmd, 0x04), + HISI_PMU_EVENT_ATTR(act_cmd, 0x05), + HISI_PMU_EVENT_ATTR(rnk_chg, 0x06), + HISI_PMU_EVENT_ATTR(rw_chg, 0x07), + NULL, +}; + +static const struct attribute_group hisi_lpddrc_pmu_events_group = { + .name = "events", + .attrs = hisi_lpddrc_pmu_events_attr, +}; + +static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL); + +static struct attribute *hisi_lpddrc_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static const struct attribute_group hisi_lpddrc_pmu_cpumask_attr_group = { + .attrs = hisi_lpddrc_pmu_cpumask_attrs, +}; + +static const struct attribute_group *hisi_lpddrc_pmu_attr_groups[] = { + &hisi_lpddrc_pmu_format_group, + &hisi_lpddrc_pmu_events_group, + &hisi_lpddrc_pmu_cpumask_attr_group, + NULL, +}; + +static const struct hisi_uncore_ops hisi_uncore_lpddrc_ops = { + .write_evtype = hisi_lpddrc_pmu_write_evtype, + .get_event_idx = hisi_lpddrc_pmu_get_event_idx, + .start_counters = hisi_lpddrc_pmu_start_counters, + .stop_counters = hisi_lpddrc_pmu_stop_counters, + .enable_counter = hisi_lpddrc_pmu_enable_counter, + .disable_counter = hisi_lpddrc_pmu_disable_counter, + .enable_counter_int = hisi_lpddrc_pmu_enable_counter_int, + .disable_counter_int = hisi_lpddrc_pmu_disable_counter_int, + .write_counter = hisi_lpddrc_pmu_write_counter, + .read_counter = hisi_lpddrc_pmu_read_counter, +}; + +static int hisi_lpddrc_pmu_dev_probe(struct platform_device *pdev, + struct hisi_pmu *lpddrc_pmu) +{ + int ret; + + ret = hisi_lpddrc_pmu_init_data(pdev, lpddrc_pmu); + if (ret) + return ret; + + ret = hisi_lpddrc_pmu_init_irq(lpddrc_pmu, pdev); + if (ret) + return ret; + + lpddrc_pmu->num_counters = LPDDRC_NR_COUNTERS; + lpddrc_pmu->counter_bits = 32; + lpddrc_pmu->ops = &hisi_uncore_lpddrc_ops; + lpddrc_pmu->dev = &pdev->dev; + lpddrc_pmu->on_cpu = -1; + lpddrc_pmu->check_event = 7; + + return 0; +} + +static int hisi_lpddrc_pmu_probe(struct platform_device *pdev) +{ + struct hisi_pmu *lpddrc_pmu; + char *name; + int ret; + + lpddrc_pmu = devm_kzalloc(&pdev->dev, sizeof(*lpddrc_pmu), GFP_KERNEL); + if (!lpddrc_pmu) + return -ENOMEM; + + platform_set_drvdata(pdev, lpddrc_pmu); + + ret = hisi_lpddrc_pmu_dev_probe(pdev, lpddrc_pmu); + if (ret) + return ret; + + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_lpddrc%u", + lpddrc_pmu->sccl_id, lpddrc_pmu->index_id); + HISI_INIT_PMU(&lpddrc_pmu->pmu, name, hisi_lpddrc_pmu_attr_groups); + ret = perf_pmu_register(&lpddrc_pmu->pmu, name, -1); + if (ret) { + dev_err(lpddrc_pmu->dev, "LPDDRC PMU register failed!\n"); + return ret; + } + + /* Pick one core to use for cpumask attributes */ + cpumask_set_cpu(smp_processor_id(), &lpddrc_pmu->associated_cpus); + + lpddrc_pmu->on_cpu = cpumask_first(&lpddrc_pmu->associated_cpus); + if (lpddrc_pmu->on_cpu >= nr_cpu_ids) + return -EINVAL; + + return 0; +} + +static int hisi_lpddrc_pmu_remove(struct platform_device *pdev) +{ + struct hisi_pmu *lpddrc_pmu = platform_get_drvdata(pdev); + + perf_pmu_unregister(&lpddrc_pmu->pmu); + + return 0; +} + +static struct platform_driver hisi_lpddrc_pmu_driver = { + .driver = { + .name = "hisi_lpddrc_pmu", + .of_match_table = lpddrc_of_match, + }, + .probe = hisi_lpddrc_pmu_probe, + .remove = hisi_lpddrc_pmu_remove, +}; + +static int __init hisi_lpddrc_pmu_module_init(void) +{ + return platform_driver_register(&hisi_lpddrc_pmu_driver); +} +module_init(hisi_lpddrc_pmu_module_init); + +static void __exit hisi_lpddrc_pmu_module_exit(void) +{ + platform_driver_unregister(&hisi_lpddrc_pmu_driver); +} +module_exit(hisi_lpddrc_pmu_module_exit); + +MODULE_DESCRIPTION("HiSilicon SoC LPDDRC uncore PMU driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Fang Lijun "); +MODULE_AUTHOR("HUAWEI TECHNOLOGIES CO., LTD."); diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c index 9efd2413240cb43ceb0471363cbd679ca776748c..53f623ac4cc69b91a1748445ba704016e298a842 100644 --- a/drivers/perf/hisilicon/hisi_uncore_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c @@ -18,6 +18,7 @@ #include #include +#include #include #include "hisi_uncore_pmu.h" @@ -37,6 +38,7 @@ ssize_t hisi_format_sysfs_show(struct device *dev, return sprintf(buf, "%s\n", (char *)eattr->var); } +EXPORT_SYMBOL_GPL(hisi_format_sysfs_show); /* * PMU event attributes @@ -50,6 +52,7 @@ ssize_t hisi_event_sysfs_show(struct device *dev, return sprintf(page, "config=0x%lx\n", (unsigned long)eattr->var); } +EXPORT_SYMBOL_GPL(hisi_event_sysfs_show); /* * sysfs cpumask attributes. For uncore PMU, we only have a single CPU to show @@ -61,6 +64,7 @@ ssize_t hisi_cpumask_sysfs_show(struct device *dev, return sprintf(buf, "%d\n", hisi_pmu->on_cpu); } +EXPORT_SYMBOL_GPL(hisi_cpumask_sysfs_show); static bool hisi_validate_event_group(struct perf_event *event) { @@ -99,6 +103,7 @@ int hisi_uncore_pmu_counter_valid(struct hisi_pmu *hisi_pmu, int idx) { return idx >= 0 && idx < hisi_pmu->num_counters; } +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_counter_valid); int hisi_uncore_pmu_get_event_idx(struct perf_event *event) { @@ -115,6 +120,7 @@ int hisi_uncore_pmu_get_event_idx(struct perf_event *event) return idx; } +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_get_event_idx); static void hisi_uncore_pmu_clear_event_idx(struct hisi_pmu *hisi_pmu, int idx) { @@ -184,6 +190,7 @@ int hisi_uncore_pmu_event_init(struct perf_event *event) return 0; } +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_event_init); /* * Set the counter to count the event that we're interested in, @@ -231,6 +238,7 @@ void hisi_uncore_pmu_set_event_period(struct perf_event *event) /* Write start value to the hardware event counter */ hisi_pmu->ops->write_counter(hisi_pmu, hwc, val); } +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_set_event_period); void hisi_uncore_pmu_event_update(struct perf_event *event) { @@ -251,6 +259,7 @@ void hisi_uncore_pmu_event_update(struct perf_event *event) HISI_MAX_PERIOD(hisi_pmu->counter_bits); local64_add(delta, &event->count); } +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_event_update); void hisi_uncore_pmu_start(struct perf_event *event, int flags) { @@ -273,6 +282,7 @@ void hisi_uncore_pmu_start(struct perf_event *event, int flags) hisi_uncore_pmu_enable_event(event); perf_event_update_userpage(event); } +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_start); void hisi_uncore_pmu_stop(struct perf_event *event, int flags) { @@ -289,6 +299,7 @@ void hisi_uncore_pmu_stop(struct perf_event *event, int flags) hisi_uncore_pmu_event_update(event); hwc->state |= PERF_HES_UPTODATE; } +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_stop); int hisi_uncore_pmu_add(struct perf_event *event, int flags) { @@ -311,6 +322,7 @@ int hisi_uncore_pmu_add(struct perf_event *event, int flags) return 0; } +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_add); void hisi_uncore_pmu_del(struct perf_event *event, int flags) { @@ -322,12 +334,14 @@ void hisi_uncore_pmu_del(struct perf_event *event, int flags) perf_event_update_userpage(event); hisi_pmu->pmu_events.hw_events[hwc->idx] = NULL; } +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_del); void hisi_uncore_pmu_read(struct perf_event *event) { /* Read hardware counter and update the perf counter statistics */ hisi_uncore_pmu_event_update(event); } +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_read); void hisi_uncore_pmu_enable(struct pmu *pmu) { @@ -340,6 +354,7 @@ void hisi_uncore_pmu_enable(struct pmu *pmu) hisi_pmu->ops->start_counters(hisi_pmu); } +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_enable); void hisi_uncore_pmu_disable(struct pmu *pmu) { @@ -347,30 +362,46 @@ void hisi_uncore_pmu_disable(struct pmu *pmu) hisi_pmu->ops->stop_counters(hisi_pmu); } +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_disable); + /* - * Read Super CPU cluster and CPU cluster ID from MPIDR_EL1. - * If multi-threading is supported, CCL_ID is the low 3-bits in MPIDR[Aff2] - * and SCCL_ID is the upper 5-bits of Aff2 field; if not, SCCL_ID - * is in MPIDR[Aff2] and CCL_ID is in MPIDR[Aff1]. + * The Super CPU Cluster (SCCL) and CPU Cluster (CCL) IDs can be + * determined from the MPIDR_EL1, but the encoding varies by CPU: + * + * - For MT variants of TSV110: + * SCCL is Aff2[7:3], CCL is Aff2[2:0] + * + * - For other MT parts: + * SCCL is Aff3[7:0], CCL is Aff2[7:0] + * + * - For non-MT parts: + * SCCL is Aff2[7:0], CCL is Aff1[7:0] */ -static void hisi_read_sccl_and_ccl_id(int *sccl_id, int *ccl_id) +static void hisi_read_sccl_and_ccl_id(int *scclp, int *cclp) { u64 mpidr = read_cpuid_mpidr(); - - if (mpidr & MPIDR_MT_BITMASK) { - int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2); - - if (sccl_id) - *sccl_id = aff2 >> 3; - if (ccl_id) - *ccl_id = aff2 & 0x7; + int aff3 = MPIDR_AFFINITY_LEVEL(mpidr, 3); + int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2); + int aff1 = MPIDR_AFFINITY_LEVEL(mpidr, 1); + bool mt = mpidr & MPIDR_MT_BITMASK; + int sccl, ccl; + + if (mt && read_cpuid_part_number() == HISI_CPU_PART_TSV110) { + sccl = aff2 >> 3; + ccl = aff2 & 0x7; + } else if (mt) { + sccl = aff3; + ccl = aff2; } else { - if (sccl_id) - *sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); - if (ccl_id) - *ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); + sccl = aff2; + ccl = aff1; } + + if (scclp) + *scclp = sccl; + if (cclp) + *cclp = ccl; } /* @@ -410,10 +441,11 @@ int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node) hisi_pmu->on_cpu = cpu; /* Overflow interrupt also should use the same CPU */ - WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(cpu))); + WARN_ON(irq_set_affinity_hint(hisi_pmu->irq, cpumask_of(cpu))); return 0; } +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_online_cpu); int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) { @@ -442,7 +474,10 @@ int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) perf_pmu_migrate_context(&hisi_pmu->pmu, cpu, target); /* Use this CPU for event counting */ hisi_pmu->on_cpu = target; - WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(target))); + WARN_ON(irq_set_affinity_hint(hisi_pmu->irq, cpumask_of(target))); return 0; } +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_offline_cpu); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.h b/drivers/perf/hisilicon/hisi_uncore_pmu.h index f21226a0e9c6e6188154e0028d7e27d2d1bb20f8..129896662cf2594a31a9fe542403ac99a20186e9 100644 --- a/drivers/perf/hisilicon/hisi_uncore_pmu.h +++ b/drivers/perf/hisilicon/hisi_uncore_pmu.h @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -99,4 +100,22 @@ ssize_t hisi_cpumask_sysfs_show(struct device *dev, struct device_attribute *attr, char *buf); int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node); int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node); + +static inline void HISI_INIT_PMU(struct pmu *pmu, const char *name, + const struct attribute_group **attr_groups) +{ + pmu->name = name; + pmu->module = THIS_MODULE; + pmu->task_ctx_nr = perf_invalid_context; + pmu->event_init = hisi_uncore_pmu_event_init; + pmu->pmu_enable = hisi_uncore_pmu_enable; + pmu->pmu_disable = hisi_uncore_pmu_disable; + pmu->add = hisi_uncore_pmu_add; + pmu->del = hisi_uncore_pmu_del; + pmu->start = hisi_uncore_pmu_start; + pmu->stop = hisi_uncore_pmu_stop; + pmu->read = hisi_uncore_pmu_read; + pmu->attr_groups = attr_groups; +} + #endif /* __HISI_UNCORE_PMU_H__ */ diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c index 842135cf35a3acba6f72b4d27c9ebb2335d40ee9..72cf1c4861f23fba5fe5bfe5992be97cb1ecf889 100644 --- a/drivers/perf/qcom_l2_pmu.c +++ b/drivers/perf/qcom_l2_pmu.c @@ -1047,6 +1047,7 @@ static struct platform_driver l2_cache_pmu_driver = { .driver = { .name = "qcom-l2cache-pmu", .acpi_match_table = ACPI_PTR(l2_cache_pmu_acpi_match), + .suppress_bind_attrs = true, }, .probe = l2_cache_pmu_probe, .remove = l2_cache_pmu_remove, diff --git a/drivers/perf/qcom_l3_pmu.c b/drivers/perf/qcom_l3_pmu.c index 2dc63d61f2ea885991977bb3c830b0f9b2f6cc27..b349e7bf4dd90d1a7748030f419b76f911b9e42a 100644 --- a/drivers/perf/qcom_l3_pmu.c +++ b/drivers/perf/qcom_l3_pmu.c @@ -828,6 +828,7 @@ static struct platform_driver qcom_l3_cache_pmu_driver = { .driver = { .name = "qcom-l3cache-pmu", .acpi_match_table = ACPI_PTR(qcom_l3_cache_pmu_acpi_match), + .suppress_bind_attrs = true, }, .probe = qcom_l3_cache_pmu_probe, }; diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c index 0e31f1392a53ca042519bbbe4bc37dab3bfe87c0..76a7799008fa2d8dc89eedb3055694e98f1662c6 100644 --- a/drivers/perf/xgene_pmu.c +++ b/drivers/perf/xgene_pmu.c @@ -1935,6 +1935,7 @@ static struct platform_driver xgene_pmu_driver = { .name = "xgene-pmu", .of_match_table = xgene_pmu_of_match, .acpi_match_table = ACPI_PTR(xgene_pmu_acpi_match), + .suppress_bind_attrs = true, }, }; diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c index d4dcd39b8d76f0e8b1ee3e99416de6d2ec576c55..1f8809bab002cd56cce290ae1ca0d54a85a9432f 100644 --- a/drivers/phy/allwinner/phy-sun4i-usb.c +++ b/drivers/phy/allwinner/phy-sun4i-usb.c @@ -126,6 +126,7 @@ struct sun4i_usb_phy_cfg { bool dedicated_clocks; bool enable_pmu_unk1; bool phy0_dual_route; + int missing_phys; }; struct sun4i_usb_phy_data { @@ -480,8 +481,11 @@ static int sun4i_usb_phy_set_mode(struct phy *_phy, enum phy_mode mode) struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy); int new_mode; - if (phy->index != 0) + if (phy->index != 0) { + if (mode == PHY_MODE_USB_HOST) + return 0; return -EINVAL; + } switch (mode) { case PHY_MODE_USB_HOST: @@ -546,6 +550,7 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work) struct sun4i_usb_phy_data *data = container_of(work, struct sun4i_usb_phy_data, detect.work); struct phy *phy0 = data->phys[0].phy; + struct sun4i_usb_phy *phy = phy_get_drvdata(phy0); bool force_session_end, id_notify = false, vbus_notify = false; int id_det, vbus_det; @@ -602,6 +607,9 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work) mutex_unlock(&phy0->mutex); } + /* Enable PHY0 passby for host mode only. */ + sun4i_usb_phy_passby(phy, !id_det); + /* Re-route PHY0 if necessary */ if (data->cfg->phy0_dual_route) sun4i_usb_phy0_reroute(data, id_det); @@ -646,6 +654,9 @@ static struct phy *sun4i_usb_phy_xlate(struct device *dev, if (args->args[0] >= data->cfg->num_phys) return ERR_PTR(-ENODEV); + if (data->cfg->missing_phys & BIT(args->args[0])) + return ERR_PTR(-ENODEV); + return data->phys[args->args[0]].phy; } @@ -741,6 +752,9 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev) struct sun4i_usb_phy *phy = data->phys + i; char name[16]; + if (data->cfg->missing_phys & BIT(i)) + continue; + snprintf(name, sizeof(name), "usb%d_vbus", i); phy->vbus = devm_regulator_get_optional(dev, name); if (IS_ERR(phy->vbus)) { diff --git a/drivers/phy/broadcom/Kconfig b/drivers/phy/broadcom/Kconfig index 8786a9674471de5a892238b7ad063a9aa7b68356..aa917a61071db97bfbc84de84386a6aa4b196d21 100644 --- a/drivers/phy/broadcom/Kconfig +++ b/drivers/phy/broadcom/Kconfig @@ -60,7 +60,8 @@ config PHY_NS2_USB_DRD config PHY_BRCM_SATA tristate "Broadcom SATA PHY driver" - depends on ARCH_BRCMSTB || ARCH_BCM_IPROC || BMIPS_GENERIC || COMPILE_TEST + depends on ARCH_BRCMSTB || ARCH_BCM_IPROC || BMIPS_GENERIC || \ + ARCH_BCM_63XX || COMPILE_TEST depends on OF select GENERIC_PHY default ARCH_BCM_IPROC diff --git a/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c b/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c index 986224fca9e913ca521695376bdb818cbec28612..5a180f71d8d4d1ae9843d8747794de75468764cc 100644 --- a/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c +++ b/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c @@ -156,7 +156,6 @@ static int ltq_rcu_usb2_of_parse(struct ltq_rcu_usb2_priv *priv, { struct device *dev = priv->dev; const __be32 *offset; - int ret; priv->reg_bits = of_device_get_match_data(dev); diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c index 3eb8e1bd7b78d5a23a335b2d9ca19ff0ae42f1d4..889e7ea68792b83e626d92d7acfa8216b0002e4d 100644 --- a/drivers/phy/mediatek/phy-mtk-tphy.c +++ b/drivers/phy/mediatek/phy-mtk-tphy.c @@ -924,6 +924,8 @@ static int mtk_phy_init(struct phy *phy) break; default: dev_err(tphy->dev, "incompatible PHY type\n"); + clk_disable_unprepare(instance->ref_clk); + clk_disable_unprepare(tphy->u3phya_ref); return -EINVAL; } diff --git a/drivers/phy/motorola/Kconfig b/drivers/phy/motorola/Kconfig index 82651524ffb9ce33424dd5a3eb9851cc14eef11f..718f8729701df1d7c0c2763965e611346d5194fe 100644 --- a/drivers/phy/motorola/Kconfig +++ b/drivers/phy/motorola/Kconfig @@ -13,7 +13,7 @@ config PHY_CPCAP_USB config PHY_MAPPHONE_MDM6600 tristate "Motorola Mapphone MDM6600 modem USB PHY driver" - depends on OF && USB_SUPPORT + depends on OF && USB_SUPPORT && GPIOLIB select GENERIC_PHY help Enable this for MDM6600 USB modem to work on Motorola phones diff --git a/drivers/phy/qualcomm/phy-ath79-usb.c b/drivers/phy/qualcomm/phy-ath79-usb.c index 6fd6e07ab345f6813c8285038206dad2ff3c1726..09a77e556ecebfa9c7b318e88a5fdaadb6a41747 100644 --- a/drivers/phy/qualcomm/phy-ath79-usb.c +++ b/drivers/phy/qualcomm/phy-ath79-usb.c @@ -31,7 +31,7 @@ static int ath79_usb_phy_power_on(struct phy *phy) err = reset_control_deassert(priv->reset); if (err && priv->no_suspend_override) - reset_control_assert(priv->no_suspend_override); + reset_control_deassert(priv->no_suspend_override); return err; } @@ -69,7 +69,7 @@ static int ath79_usb_phy_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - priv->reset = devm_reset_control_get(&pdev->dev, "usb-phy"); + priv->reset = devm_reset_control_get(&pdev->dev, "phy"); if (IS_ERR(priv->reset)) return PTR_ERR(priv->reset); diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c index e70e425f26f50d409fd9fe4bde2f35fc9306f7a9..69c92843eb3b2833affb52af03e8e88f56629fd9 100644 --- a/drivers/phy/qualcomm/phy-qcom-qusb2.c +++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c @@ -231,6 +231,7 @@ static const struct qusb2_phy_cfg sdm845_phy_cfg = { .mask_core_ready = CORE_READY_STATUS, .has_pll_override = true, .autoresume_en = BIT(0), + .update_tune1_with_efuse = true, }; static const char * const qusb2_phy_vreg_names[] = { @@ -402,10 +403,10 @@ static void qusb2_phy_set_tune2_param(struct qusb2_phy *qphy) /* * Read efuse register having TUNE2/1 parameter's high nibble. - * If efuse register shows value as 0x0, or if we fail to find - * a valid efuse register settings, then use default value - * as 0xB for high nibble that we have already set while - * configuring phy. + * If efuse register shows value as 0x0 (indicating value is not + * fused), or if we fail to find a valid efuse register setting, + * then use default value for high nibble that we have already + * set while configuring the phy. */ val = nvmem_cell_read(qphy->cell, NULL); if (IS_ERR(val) || !val[0]) { @@ -415,12 +416,13 @@ static void qusb2_phy_set_tune2_param(struct qusb2_phy *qphy) /* Fused TUNE1/2 value is the higher nibble only */ if (cfg->update_tune1_with_efuse) - qusb2_setbits(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE1], - val[0] << 0x4); + qusb2_write_mask(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE1], + val[0] << HSTX_TRIM_SHIFT, + HSTX_TRIM_MASK); else - qusb2_setbits(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE2], - val[0] << 0x4); - + qusb2_write_mask(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE2], + val[0] << HSTX_TRIM_SHIFT, + HSTX_TRIM_MASK); } static int qusb2_phy_set_mode(struct phy *phy, enum phy_mode mode) diff --git a/drivers/phy/qualcomm/phy-qcom-usb-hs.c b/drivers/phy/qualcomm/phy-qcom-usb-hs.c index abbbe75070daa0263765262fd6d4395401cdba55..5629d56a6257831e348c341862a935865e585456 100644 --- a/drivers/phy/qualcomm/phy-qcom-usb-hs.c +++ b/drivers/phy/qualcomm/phy-qcom-usb-hs.c @@ -160,8 +160,8 @@ static int qcom_usb_hs_phy_power_on(struct phy *phy) /* setup initial state */ qcom_usb_hs_phy_vbus_notifier(&uphy->vbus_notify, state, uphy->vbus_edev); - ret = devm_extcon_register_notifier(&ulpi->dev, uphy->vbus_edev, - EXTCON_USB, &uphy->vbus_notify); + ret = extcon_register_notifier(uphy->vbus_edev, EXTCON_USB, + &uphy->vbus_notify); if (ret) goto err_ulpi; } @@ -182,6 +182,9 @@ static int qcom_usb_hs_phy_power_off(struct phy *phy) { struct qcom_usb_hs_phy *uphy = phy_get_drvdata(phy); + if (uphy->vbus_edev) + extcon_unregister_notifier(uphy->vbus_edev, EXTCON_USB, + &uphy->vbus_notify); regulator_disable(uphy->v3p3); regulator_disable(uphy->v1p8); clk_disable_unprepare(uphy->sleep_clk); diff --git a/drivers/phy/renesas/phy-rcar-gen2.c b/drivers/phy/renesas/phy-rcar-gen2.c index 97d4dd6ea9247d8fc3ec36ce4021ed17af1d0ac3..aa02b19b7e0e92ea4b42e40bd570d31a70f13071 100644 --- a/drivers/phy/renesas/phy-rcar-gen2.c +++ b/drivers/phy/renesas/phy-rcar-gen2.c @@ -288,6 +288,7 @@ static int rcar_gen2_phy_probe(struct platform_device *pdev) error = of_property_read_u32(np, "reg", &channel_num); if (error || channel_num > 2) { dev_err(dev, "Invalid \"reg\" property\n"); + of_node_put(np); return error; } channel->select_mask = select_mask[channel_num]; @@ -303,6 +304,7 @@ static int rcar_gen2_phy_probe(struct platform_device *pdev) &rcar_gen2_phy_ops); if (IS_ERR(phy->phy)) { dev_err(dev, "Failed to create PHY\n"); + of_node_put(np); return PTR_ERR(phy->phy); } phy_set_drvdata(phy->phy, phy); diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c index fb8f05e39cf7f359f5c04c5c12db300517102d92..50cdf2032f1b6f493bd22f200edd4a8e3d1e4c32 100644 --- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c +++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -66,6 +67,7 @@ USB2_OBINT_IDDIGCHG) /* VBCTRL */ +#define USB2_VBCTRL_OCCLREN BIT(16) #define USB2_VBCTRL_DRVVBUSSEL BIT(8) /* LINECTRL1 */ @@ -198,7 +200,7 @@ static void rcar_gen3_init_from_a_peri_to_a_host(struct rcar_gen3_chan *ch) val = readl(usb2_base + USB2_OBINTEN); writel(val & ~USB2_OBINT_BITS, usb2_base + USB2_OBINTEN); - rcar_gen3_enable_vbus_ctrl(ch, 0); + rcar_gen3_enable_vbus_ctrl(ch, 1); rcar_gen3_init_for_host(ch); writel(val | USB2_OBINT_BITS, usb2_base + USB2_OBINTEN); @@ -240,9 +242,9 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr, if (!ch->has_otg_pins || !ch->phy->init_count) return -EIO; - if (!strncmp(buf, "host", strlen("host"))) + if (sysfs_streq(buf, "host")) new_mode = PHY_MODE_USB_HOST; - else if (!strncmp(buf, "peripheral", strlen("peripheral"))) + else if (sysfs_streq(buf, "peripheral")) new_mode = PHY_MODE_USB_DEVICE; else return -EINVAL; @@ -289,6 +291,7 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch) u32 val; val = readl(usb2_base + USB2_VBCTRL); + val &= ~USB2_VBCTRL_OCCLREN; writel(val | USB2_VBCTRL_DRVVBUSSEL, usb2_base + USB2_VBCTRL); writel(USB2_OBINT_BITS, usb2_base + USB2_OBINTSTA); val = readl(usb2_base + USB2_OBINTEN); diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c index de1b4ebe4de28356053b915458d756ccc87f6102..9e8fa1834a154912944d8a6d6684b0c6f679f440 100644 --- a/drivers/phy/tegra/xusb.c +++ b/drivers/phy/tegra/xusb.c @@ -441,7 +441,7 @@ tegra_xusb_find_port_node(struct tegra_xusb_padctl *padctl, const char *type, name = kasprintf(GFP_KERNEL, "%s-%u", type, index); if (!name) { of_node_put(ports); - return ERR_PTR(-ENOMEM); + return NULL; } np = of_get_child_by_name(ports, name); kfree(name); diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c index fe909fd8144f95df33d29ee346ff9eae19325d7c..ae94e1e66bcc3ff5fb2ed1e75bba7ef594b2d886 100644 --- a/drivers/phy/ti/phy-omap-usb2.c +++ b/drivers/phy/ti/phy-omap-usb2.c @@ -64,7 +64,7 @@ static int omap_usb_set_vbus(struct usb_otg *otg, bool enabled) { struct omap_usb *phy = phy_to_omapusb(otg->usb_phy); - if (!phy->comparator) + if (!phy->comparator || !phy->comparator->set_vbus) return -ENODEV; return phy->comparator->set_vbus(phy->comparator, enabled); @@ -74,7 +74,7 @@ static int omap_usb_start_srp(struct usb_otg *otg) { struct omap_usb *phy = phy_to_omapusb(otg->usb_phy); - if (!phy->comparator) + if (!phy->comparator || !phy->comparator->start_srp) return -ENODEV; return phy->comparator->start_srp(phy->comparator); diff --git a/drivers/phy/ti/phy-ti-pipe3.c b/drivers/phy/ti/phy-ti-pipe3.c index 68ce4a082b9b2284535965cdcbd856a3d5e7aeeb..693acc167351cf4ae63a522026f6b9e4f94221bf 100644 --- a/drivers/phy/ti/phy-ti-pipe3.c +++ b/drivers/phy/ti/phy-ti-pipe3.c @@ -303,7 +303,7 @@ static void ti_pipe3_calibrate(struct ti_pipe3 *phy) val = ti_pipe3_readl(phy->phy_rx, PCIEPHYRX_ANA_PROGRAMMABILITY); val &= ~(INTERFACE_MASK | LOSD_MASK | MEM_PLLDIV); - val = (0x1 << INTERFACE_SHIFT | 0xA << LOSD_SHIFT); + val |= (0x1 << INTERFACE_SHIFT | 0xA << LOSD_SHIFT); ti_pipe3_writel(phy->phy_rx, PCIEPHYRX_ANA_PROGRAMMABILITY, val); val = ti_pipe3_readl(phy->phy_rx, PCIEPHYRX_DIGITAL_MODES); diff --git a/drivers/phy/ti/phy-twl4030-usb.c b/drivers/phy/ti/phy-twl4030-usb.c index a44680d64f9b479dc62b30520a901fd569af135e..c267afb68f077cd5817059c9a5125cc61996d1d4 100644 --- a/drivers/phy/ti/phy-twl4030-usb.c +++ b/drivers/phy/ti/phy-twl4030-usb.c @@ -144,6 +144,7 @@ #define PMBR1 0x0D #define GPIO_USB_4PIN_ULPI_2430C (3 << 0) +static irqreturn_t twl4030_usb_irq(int irq, void *_twl); /* * If VBUS is valid or ID is ground, then we know a * cable is present and we need to be runtime-enabled @@ -395,6 +396,33 @@ static void __twl4030_phy_power(struct twl4030_usb *twl, int on) WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0); } +static int __maybe_unused twl4030_usb_suspend(struct device *dev) +{ + struct twl4030_usb *twl = dev_get_drvdata(dev); + + /* + * we need enabled runtime on resume, + * so turn irq off here, so we do not get it early + * note: wakeup on usb plug works independently of this + */ + dev_dbg(twl->dev, "%s\n", __func__); + disable_irq(twl->irq); + + return 0; +} + +static int __maybe_unused twl4030_usb_resume(struct device *dev) +{ + struct twl4030_usb *twl = dev_get_drvdata(dev); + + dev_dbg(twl->dev, "%s\n", __func__); + enable_irq(twl->irq); + /* check whether cable status changed */ + twl4030_usb_irq(0, twl); + + return 0; +} + static int __maybe_unused twl4030_usb_runtime_suspend(struct device *dev) { struct twl4030_usb *twl = dev_get_drvdata(dev); @@ -655,6 +683,7 @@ static const struct phy_ops ops = { static const struct dev_pm_ops twl4030_usb_pm_ops = { SET_RUNTIME_PM_OPS(twl4030_usb_runtime_suspend, twl4030_usb_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(twl4030_usb_suspend, twl4030_usb_resume) }; static int twl4030_usb_probe(struct platform_device *pdev) diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c index fa530913a2c8fc73c37a9b668447f1058d99815a..1bd3c10ce189394ead8d74faa0b543471b79fce3 100644 --- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c +++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c @@ -72,10 +72,8 @@ #define GPIO_REG_OFFSET(p) ((p) / 32) #define GPIO_REG_SHIFT(p) ((p) % 32) -enum bcm2835_pinconf_param { - /* argument: bcm2835_pinconf_pull */ - BCM2835_PINCONF_PARAM_PULL = (PIN_CONFIG_END + 1), -}; +/* argument: bcm2835_pinconf_pull */ +#define BCM2835_PINCONF_PARAM_PULL (PIN_CONFIG_END + 1) struct bcm2835_pinctrl { struct device *dev; @@ -90,7 +88,7 @@ struct bcm2835_pinctrl { struct gpio_chip gpio_chip; struct pinctrl_gpio_range gpio_range; - spinlock_t irq_lock[BCM2835_NUM_BANKS]; + raw_spinlock_t irq_lock[BCM2835_NUM_BANKS]; }; /* pins are just named GPIO0..GPIO53 */ @@ -461,10 +459,10 @@ static void bcm2835_gpio_irq_enable(struct irq_data *data) unsigned bank = GPIO_REG_OFFSET(gpio); unsigned long flags; - spin_lock_irqsave(&pc->irq_lock[bank], flags); + raw_spin_lock_irqsave(&pc->irq_lock[bank], flags); set_bit(offset, &pc->enabled_irq_map[bank]); bcm2835_gpio_irq_config(pc, gpio, true); - spin_unlock_irqrestore(&pc->irq_lock[bank], flags); + raw_spin_unlock_irqrestore(&pc->irq_lock[bank], flags); } static void bcm2835_gpio_irq_disable(struct irq_data *data) @@ -476,12 +474,12 @@ static void bcm2835_gpio_irq_disable(struct irq_data *data) unsigned bank = GPIO_REG_OFFSET(gpio); unsigned long flags; - spin_lock_irqsave(&pc->irq_lock[bank], flags); + raw_spin_lock_irqsave(&pc->irq_lock[bank], flags); bcm2835_gpio_irq_config(pc, gpio, false); /* Clear events that were latched prior to clearing event sources */ bcm2835_gpio_set_bit(pc, GPEDS0, gpio); clear_bit(offset, &pc->enabled_irq_map[bank]); - spin_unlock_irqrestore(&pc->irq_lock[bank], flags); + raw_spin_unlock_irqrestore(&pc->irq_lock[bank], flags); } static int __bcm2835_gpio_irq_set_type_disabled(struct bcm2835_pinctrl *pc, @@ -584,7 +582,7 @@ static int bcm2835_gpio_irq_set_type(struct irq_data *data, unsigned int type) unsigned long flags; int ret; - spin_lock_irqsave(&pc->irq_lock[bank], flags); + raw_spin_lock_irqsave(&pc->irq_lock[bank], flags); if (test_bit(offset, &pc->enabled_irq_map[bank])) ret = __bcm2835_gpio_irq_set_type_enabled(pc, gpio, type); @@ -596,7 +594,7 @@ static int bcm2835_gpio_irq_set_type(struct irq_data *data, unsigned int type) else irq_set_handler_locked(data, handle_level_irq); - spin_unlock_irqrestore(&pc->irq_lock[bank], flags); + raw_spin_unlock_irqrestore(&pc->irq_lock[bank], flags); return ret; } @@ -1047,7 +1045,7 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev) for_each_set_bit(offset, &events, 32) bcm2835_gpio_wr(pc, GPEDS0 + i * 4, BIT(offset)); - spin_lock_init(&pc->irq_lock[i]); + raw_spin_lock_init(&pc->irq_lock[i]); } err = gpiochip_add_data(&pc->gpio_chip, pc); diff --git a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c index 4b5cf0e0f16e2dbfcea320414f1aa8c9e8a56179..951090faa6a9194d53bc35e8972161b784aa417e 100644 --- a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c +++ b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c @@ -640,8 +640,8 @@ static int ns2_pinmux_enable(struct pinctrl_dev *pctrl_dev, const struct ns2_pin_function *func; const struct ns2_pin_group *grp; - if (grp_select > pinctrl->num_groups || - func_select > pinctrl->num_functions) + if (grp_select >= pinctrl->num_groups || + func_select >= pinctrl->num_functions) return -EINVAL; func = &pinctrl->functions[func_select]; diff --git a/drivers/pinctrl/cirrus/pinctrl-madera-core.c b/drivers/pinctrl/cirrus/pinctrl-madera-core.c index c4f4d904e4a61e4c0d622a08fcb85e1f5e63b6a7..618e04407ac854c9bd2b03f0da34ebd00124b7e8 100644 --- a/drivers/pinctrl/cirrus/pinctrl-madera-core.c +++ b/drivers/pinctrl/cirrus/pinctrl-madera-core.c @@ -608,7 +608,7 @@ static int madera_mux_set_mux(struct pinctrl_dev *pctldev, unsigned int n_chip_groups = priv->chip->n_pin_groups; const char *func_name = madera_mux_funcs[selector].name; unsigned int reg; - int i, ret; + int i, ret = 0; dev_dbg(priv->dev, "%s selecting %u (%s) for group %u (%s)\n", __func__, selector, func_name, group, diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index a3dd777e3ce85b050dba4ed4495b6414006ac97b..bcd4d29a7e98ab45a82a5d02cd9fec8c331081c0 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -627,7 +627,7 @@ static int pinctrl_generic_group_name_to_selector(struct pinctrl_dev *pctldev, while (selector < ngroups) { const char *gname = ops->get_group_name(pctldev, selector); - if (!strcmp(function, gname)) + if (gname && !strcmp(function, gname)) return selector; selector++; @@ -743,7 +743,7 @@ int pinctrl_get_group_selector(struct pinctrl_dev *pctldev, while (group_selector < ngroups) { const char *gname = pctlops->get_group_name(pctldev, group_selector); - if (!strcmp(gname, pin_group)) { + if (gname && !strcmp(gname, pin_group)) { dev_dbg(pctldev->dev, "found group selector %u for %s\n", group_selector, @@ -1078,8 +1078,8 @@ static struct pinctrl *create_pinctrl(struct device *dev, * an -EPROBE_DEFER later, as that is the worst case. */ if (ret == -EPROBE_DEFER) { - pinctrl_free(p, false); mutex_unlock(&pinctrl_maps_mutex); + pinctrl_free(p, false); return ERR_PTR(ret); } } @@ -1992,6 +1992,14 @@ pinctrl_init_controller(struct pinctrl_desc *pctldesc, struct device *dev, return ERR_PTR(ret); } +static void pinctrl_uninit_controller(struct pinctrl_dev *pctldev, struct pinctrl_desc *pctldesc) +{ + pinctrl_free_pindescs(pctldev, pctldesc->pins, + pctldesc->npins); + mutex_destroy(&pctldev->mutex); + kfree(pctldev); +} + static int pinctrl_claim_hogs(struct pinctrl_dev *pctldev) { pctldev->p = create_pinctrl(pctldev->dev, pctldev); @@ -2037,11 +2045,7 @@ int pinctrl_enable(struct pinctrl_dev *pctldev) error = pinctrl_claim_hogs(pctldev); if (error) { - dev_err(pctldev->dev, "could not claim hogs: %i\n", - error); - mutex_destroy(&pctldev->mutex); - kfree(pctldev); - + dev_err(pctldev->dev, "could not claim hogs: %i\n", error); return error; } @@ -2077,8 +2081,10 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc, return pctldev; error = pinctrl_enable(pctldev); - if (error) + if (error) { + pinctrl_uninit_controller(pctldev, pctldesc); return ERR_PTR(error); + } return pctldev; diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c index 2969ff3162c35b17fdce2b516f927b558543e497..01cc09e2bccbefdeab15d2721f278bf4e81297d5 100644 --- a/drivers/pinctrl/devicetree.c +++ b/drivers/pinctrl/devicetree.c @@ -40,6 +40,13 @@ struct pinctrl_dt_map { static void dt_free_map(struct pinctrl_dev *pctldev, struct pinctrl_map *map, unsigned num_maps) { + int i; + + for (i = 0; i < num_maps; ++i) { + kfree_const(map[i].dev_name); + map[i].dev_name = NULL; + } + if (pctldev) { const struct pinctrl_ops *ops = pctldev->desc->pctlops; if (ops->dt_free_map) @@ -74,7 +81,13 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename, /* Initialize common mapping table entry fields */ for (i = 0; i < num_maps; i++) { - map[i].dev_name = dev_name(p->dev); + const char *devname; + + devname = kstrdup_const(dev_name(p->dev), GFP_KERNEL); + if (!devname) + goto err_free_map; + + map[i].dev_name = devname; map[i].name = statename; if (pctldev) map[i].ctrl_dev_name = dev_name(pctldev->dev); @@ -82,10 +95,8 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename, /* Remember the converted mapping table entries */ dt_map = kzalloc(sizeof(*dt_map), GFP_KERNEL); - if (!dt_map) { - dt_free_map(pctldev, map, num_maps); - return -ENOMEM; - } + if (!dt_map) + goto err_free_map; dt_map->pctldev = pctldev; dt_map->map = map; @@ -93,6 +104,10 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename, list_add_tail(&dt_map->node, &p->dt_maps); return pinctrl_register_map(map, num_maps, false); + +err_free_map: + dt_free_map(pctldev, map, num_maps); + return -ENOMEM; } struct pinctrl_dev *of_pinctrl_get(struct device_node *np) @@ -220,12 +235,16 @@ int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev) for (state = 0; ; state++) { /* Retrieve the pinctrl-* property */ propname = kasprintf(GFP_KERNEL, "pinctrl-%d", state); + if (!propname) { + ret = -ENOMEM; + goto err; + } prop = of_find_property(np, propname, &size); kfree(propname); if (!prop) { if (state == 0) { - of_node_put(np); - return -ENODEV; + ret = -ENODEV; + goto err; } break; } diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c index f38d596efa05f57c995d96094acb241cb48c67be..021e28ff1194e6bec64604fc75d1033b7faeba1b 100644 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c @@ -196,7 +196,6 @@ struct byt_gpio { struct platform_device *pdev; struct pinctrl_dev *pctl_dev; struct pinctrl_desc pctl_desc; - raw_spinlock_t lock; const struct byt_pinctrl_soc_data *soc_data; struct byt_community *communities_copy; struct byt_gpio_pin_context *saved_context; @@ -707,6 +706,8 @@ static const struct byt_pinctrl_soc_data *byt_soc_data[] = { NULL, }; +static DEFINE_RAW_SPINLOCK(byt_lock); + static struct byt_community *byt_get_community(struct byt_gpio *vg, unsigned int pin) { @@ -848,7 +849,7 @@ static void byt_set_group_simple_mux(struct byt_gpio *vg, unsigned long flags; int i; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); for (i = 0; i < group.npins; i++) { void __iomem *padcfg0; @@ -868,7 +869,7 @@ static void byt_set_group_simple_mux(struct byt_gpio *vg, writel(value, padcfg0); } - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); } static void byt_set_group_mixed_mux(struct byt_gpio *vg, @@ -878,7 +879,7 @@ static void byt_set_group_mixed_mux(struct byt_gpio *vg, unsigned long flags; int i; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); for (i = 0; i < group.npins; i++) { void __iomem *padcfg0; @@ -898,7 +899,7 @@ static void byt_set_group_mixed_mux(struct byt_gpio *vg, writel(value, padcfg0); } - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); } static int byt_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector, @@ -947,11 +948,11 @@ static void byt_gpio_clear_triggering(struct byt_gpio *vg, unsigned int offset) unsigned long flags; u32 value; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); value = readl(reg); value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL); writel(value, reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); } static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev, @@ -963,7 +964,7 @@ static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev, u32 value, gpio_mux; unsigned long flags; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); /* * In most cases, func pin mux 000 means GPIO function. @@ -985,7 +986,7 @@ static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev, "pin %u forcibly re-configured as GPIO\n", offset); } - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); pm_runtime_get(&vg->pdev->dev); @@ -1013,7 +1014,7 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev, unsigned long flags; u32 value; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); value = readl(val_reg); value &= ~BYT_DIR_MASK; @@ -1030,7 +1031,7 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev, "Potential Error: Setting GPIO with direct_irq_en to output"); writel(value, val_reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); return 0; } @@ -1099,11 +1100,11 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset, u32 conf, pull, val, debounce; u16 arg = 0; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); conf = readl(conf_reg); pull = conf & BYT_PULL_ASSIGN_MASK; val = readl(val_reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); switch (param) { case PIN_CONFIG_BIAS_DISABLE: @@ -1130,9 +1131,9 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset, if (!(conf & BYT_DEBOUNCE_EN)) return -EINVAL; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); debounce = readl(db_reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); switch (debounce & BYT_DEBOUNCE_PULSE_MASK) { case BYT_DEBOUNCE_PULSE_375US: @@ -1184,7 +1185,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev, u32 conf, val, debounce; int i, ret = 0; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); conf = readl(conf_reg); val = readl(val_reg); @@ -1292,7 +1293,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev, if (!ret) writel(conf, conf_reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); return ret; } @@ -1317,9 +1318,9 @@ static int byt_gpio_get(struct gpio_chip *chip, unsigned offset) unsigned long flags; u32 val; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); val = readl(reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); return !!(val & BYT_LEVEL); } @@ -1334,13 +1335,13 @@ static void byt_gpio_set(struct gpio_chip *chip, unsigned offset, int value) if (!reg) return; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); old_val = readl(reg); if (value) writel(old_val | BYT_LEVEL, reg); else writel(old_val & ~BYT_LEVEL, reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); } static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) @@ -1353,9 +1354,9 @@ static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) if (!reg) return -EINVAL; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); value = readl(reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); if (!(value & BYT_OUTPUT_EN)) return GPIOF_DIR_OUT; @@ -1398,14 +1399,14 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) const char *label; unsigned int pin; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); pin = vg->soc_data->pins[i].number; reg = byt_gpio_reg(vg, pin, BYT_CONF0_REG); if (!reg) { seq_printf(s, "Could not retrieve pin %i conf0 reg\n", pin); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); continue; } conf0 = readl(reg); @@ -1414,11 +1415,11 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) if (!reg) { seq_printf(s, "Could not retrieve pin %i val reg\n", pin); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); continue; } val = readl(reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); comm = byt_get_community(vg, pin); if (!comm) { @@ -1502,9 +1503,9 @@ static void byt_irq_ack(struct irq_data *d) if (!reg) return; - raw_spin_lock(&vg->lock); + raw_spin_lock(&byt_lock); writel(BIT(offset % 32), reg); - raw_spin_unlock(&vg->lock); + raw_spin_unlock(&byt_lock); } static void byt_irq_mask(struct irq_data *d) @@ -1528,7 +1529,7 @@ static void byt_irq_unmask(struct irq_data *d) if (!reg) return; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); value = readl(reg); switch (irqd_get_trigger_type(d)) { @@ -1551,7 +1552,7 @@ static void byt_irq_unmask(struct irq_data *d) writel(value, reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); } static int byt_irq_type(struct irq_data *d, unsigned int type) @@ -1565,7 +1566,7 @@ static int byt_irq_type(struct irq_data *d, unsigned int type) if (!reg || offset >= vg->chip.ngpio) return -EINVAL; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); value = readl(reg); WARN(value & BYT_DIRECT_IRQ_EN, @@ -1587,7 +1588,7 @@ static int byt_irq_type(struct irq_data *d, unsigned int type) else if (type & IRQ_TYPE_LEVEL_MASK) irq_set_handler_locked(d, handle_level_irq); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); return 0; } @@ -1623,9 +1624,9 @@ static void byt_gpio_irq_handler(struct irq_desc *desc) continue; } - raw_spin_lock(&vg->lock); + raw_spin_lock(&byt_lock); pending = readl(reg); - raw_spin_unlock(&vg->lock); + raw_spin_unlock(&byt_lock); for_each_set_bit(pin, &pending, 32) { virq = irq_find_mapping(vg->chip.irq.domain, base + pin); generic_handle_irq(virq); @@ -1828,8 +1829,6 @@ static int byt_pinctrl_probe(struct platform_device *pdev) return PTR_ERR(vg->pctl_dev); } - raw_spin_lock_init(&vg->lock); - ret = byt_gpio_probe(vg); if (ret) return ret; @@ -1845,8 +1844,11 @@ static int byt_gpio_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct byt_gpio *vg = platform_get_drvdata(pdev); + unsigned long flags; int i; + raw_spin_lock_irqsave(&byt_lock, flags); + for (i = 0; i < vg->soc_data->npins; i++) { void __iomem *reg; u32 value; @@ -1867,6 +1869,7 @@ static int byt_gpio_suspend(struct device *dev) vg->saved_context[i].val = value; } + raw_spin_unlock_irqrestore(&byt_lock, flags); return 0; } @@ -1874,8 +1877,11 @@ static int byt_gpio_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct byt_gpio *vg = platform_get_drvdata(pdev); + unsigned long flags; int i; + raw_spin_lock_irqsave(&byt_lock, flags); + for (i = 0; i < vg->soc_data->npins; i++) { void __iomem *reg; u32 value; @@ -1913,6 +1919,7 @@ static int byt_gpio_resume(struct device *dev) } } + raw_spin_unlock_irqrestore(&byt_lock, flags); return 0; } #endif diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 6d31ad7999879edf3e32f3371adff4d7830fe919..f16baf9b869623e8fd2eae7e9031d90056aa86e6 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -157,6 +157,7 @@ struct chv_pin_context { * @pctldesc: Pin controller description * @pctldev: Pointer to the pin controller device * @chip: GPIO chip in this pin controller + * @irqchip: IRQ chip in this pin controller * @regs: MMIO registers * @intr_lines: Stores mapping between 16 HW interrupt wires and GPIO * offset (in GPIO number space) @@ -170,6 +171,7 @@ struct chv_pinctrl { struct pinctrl_desc pctldesc; struct pinctrl_dev *pctldev; struct gpio_chip chip; + struct irq_chip irqchip; void __iomem *regs; unsigned intr_lines[16]; const struct chv_community *community; @@ -1477,16 +1479,6 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned type) return 0; } -static struct irq_chip chv_gpio_irqchip = { - .name = "chv-gpio", - .irq_startup = chv_gpio_irq_startup, - .irq_ack = chv_gpio_irq_ack, - .irq_mask = chv_gpio_irq_mask, - .irq_unmask = chv_gpio_irq_unmask, - .irq_set_type = chv_gpio_irq_type, - .flags = IRQCHIP_SKIP_SET_WAKE, -}; - static void chv_gpio_irq_handler(struct irq_desc *desc) { struct gpio_chip *gc = irq_desc_get_handler_data(desc); @@ -1524,7 +1516,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"), - DMI_MATCH(DMI_BOARD_VERSION, "1.0"), }, }, { @@ -1532,7 +1523,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "HP"), DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"), - DMI_MATCH(DMI_BOARD_VERSION, "1.0"), }, }, { @@ -1540,7 +1530,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"), - DMI_MATCH(DMI_BOARD_VERSION, "1.0"), }, }, { @@ -1548,7 +1537,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), DMI_MATCH(DMI_PRODUCT_NAME, "Celes"), - DMI_MATCH(DMI_BOARD_VERSION, "1.0"), }, }, {} @@ -1599,7 +1587,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) intsel >>= CHV_PADCTRL0_INTSEL_SHIFT; if (need_valid_mask && intsel >= community->nirqs) - clear_bit(i, chip->irq.valid_mask); + clear_bit(desc->number, chip->irq.valid_mask); } /* @@ -1630,7 +1618,15 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) } } - ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0, + pctrl->irqchip.name = "chv-gpio"; + pctrl->irqchip.irq_startup = chv_gpio_irq_startup; + pctrl->irqchip.irq_ack = chv_gpio_irq_ack; + pctrl->irqchip.irq_mask = chv_gpio_irq_mask; + pctrl->irqchip.irq_unmask = chv_gpio_irq_unmask; + pctrl->irqchip.irq_set_type = chv_gpio_irq_type; + pctrl->irqchip.flags = IRQCHIP_SKIP_SET_WAKE; + + ret = gpiochip_irqchip_add(chip, &pctrl->irqchip, 0, handle_bad_irq, IRQ_TYPE_NONE); if (ret) { dev_err(pctrl->dev, "failed to add IRQ chip\n"); @@ -1647,7 +1643,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) } } - gpiochip_set_chained_irqchip(chip, &chv_gpio_irqchip, irq, + gpiochip_set_chained_irqchip(chip, &pctrl->irqchip, irq, chv_gpio_irq_handler); return 0; } diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index 1ea3438ea67e925aa82b6e57e503efb72689fde3..89ff2795a8b5521ce52ddc12bac4972d8e75a404 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -49,6 +49,7 @@ #define PADCFG0_GPIROUTNMI BIT(17) #define PADCFG0_PMODE_SHIFT 10 #define PADCFG0_PMODE_MASK (0xf << PADCFG0_PMODE_SHIFT) +#define PADCFG0_PMODE_GPIO 0 #define PADCFG0_GPIORXDIS BIT(9) #define PADCFG0_GPIOTXDIS BIT(8) #define PADCFG0_GPIORXSTATE BIT(1) @@ -301,7 +302,7 @@ static void intel_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, cfg1 = readl(intel_get_padcfg(pctrl, pin, PADCFG1)); mode = (cfg0 & PADCFG0_PMODE_MASK) >> PADCFG0_PMODE_SHIFT; - if (!mode) + if (mode == PADCFG0_PMODE_GPIO) seq_puts(s, "GPIO "); else seq_printf(s, "mode %d ", mode); @@ -422,6 +423,11 @@ static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input) writel(value, padcfg0); } +static int intel_gpio_get_gpio_mode(void __iomem *padcfg0) +{ + return (readl(padcfg0) & PADCFG0_PMODE_MASK) >> PADCFG0_PMODE_SHIFT; +} + static void intel_gpio_set_gpio_mode(void __iomem *padcfg0) { u32 value; @@ -450,7 +456,20 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, } padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0); + + /* + * If pin is already configured in GPIO mode, we assume that + * firmware provides correct settings. In such case we avoid + * potential glitches on the pin. Otherwise, for the pin in + * alternative mode, consumer has to supply respective flags. + */ + if (intel_gpio_get_gpio_mode(padcfg0) == PADCFG0_PMODE_GPIO) { + raw_spin_unlock_irqrestore(&pctrl->lock, flags); + return 0; + } + intel_gpio_set_gpio_mode(padcfg0); + /* Disable TX buffer and enable RX (this will be input) */ __intel_gpio_set_direction(padcfg0, true); diff --git a/drivers/pinctrl/mediatek/mtk-eint.c b/drivers/pinctrl/mediatek/mtk-eint.c index a613e546717a8d52d42d0b9e9872a9d565dc9e96..564cfaee129d2a54f673642e0e406ec90bd089d6 100644 --- a/drivers/pinctrl/mediatek/mtk-eint.c +++ b/drivers/pinctrl/mediatek/mtk-eint.c @@ -113,6 +113,8 @@ static void mtk_eint_mask(struct irq_data *d) void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->mask_set); + eint->cur_mask[d->hwirq >> 5] &= ~mask; + writel(mask, reg); } @@ -123,6 +125,8 @@ static void mtk_eint_unmask(struct irq_data *d) void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->mask_clr); + eint->cur_mask[d->hwirq >> 5] |= mask; + writel(mask, reg); if (eint->dual_edge[d->hwirq]) @@ -217,19 +221,6 @@ static void mtk_eint_chip_write_mask(const struct mtk_eint *eint, } } -static void mtk_eint_chip_read_mask(const struct mtk_eint *eint, - void __iomem *base, u32 *buf) -{ - int port; - void __iomem *reg; - - for (port = 0; port < eint->hw->ports; port++) { - reg = base + eint->regs->mask + (port << 2); - buf[port] = ~readl_relaxed(reg); - /* Mask is 0 when irq is enabled, and 1 when disabled. */ - } -} - static int mtk_eint_irq_request_resources(struct irq_data *d) { struct mtk_eint *eint = irq_data_get_irq_chip_data(d); @@ -318,7 +309,7 @@ static void mtk_eint_irq_handler(struct irq_desc *desc) struct irq_chip *chip = irq_desc_get_chip(desc); struct mtk_eint *eint = irq_desc_get_handler_data(desc); unsigned int status, eint_num; - int offset, index, virq; + int offset, mask_offset, index, virq; void __iomem *reg = mtk_eint_get_offset(eint, 0, eint->regs->stat); int dual_edge, start_level, curr_level; @@ -328,10 +319,24 @@ static void mtk_eint_irq_handler(struct irq_desc *desc) status = readl(reg); while (status) { offset = __ffs(status); + mask_offset = eint_num >> 5; index = eint_num + offset; virq = irq_find_mapping(eint->domain, index); status &= ~BIT(offset); + /* + * If we get an interrupt on pin that was only required + * for wake (but no real interrupt requested), mask the + * interrupt (as would mtk_eint_resume do anyway later + * in the resume sequence). + */ + if (eint->wake_mask[mask_offset] & BIT(offset) && + !(eint->cur_mask[mask_offset] & BIT(offset))) { + writel_relaxed(BIT(offset), reg - + eint->regs->stat + + eint->regs->mask_set); + } + dual_edge = eint->dual_edge[index]; if (dual_edge) { /* @@ -370,7 +375,6 @@ static void mtk_eint_irq_handler(struct irq_desc *desc) int mtk_eint_do_suspend(struct mtk_eint *eint) { - mtk_eint_chip_read_mask(eint, eint->base, eint->cur_mask); mtk_eint_chip_write_mask(eint, eint->base, eint->wake_mask); return 0; diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c index 4ceb06f8a33c965aa51cb317b56e8a672b55a090..c4c70dc57dbeec99813a9cfd5542146a6ade1883 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c +++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c @@ -198,8 +198,8 @@ static const unsigned int uart_rts_b_pins[] = { GPIODV_27 }; static const unsigned int uart_tx_c_pins[] = { GPIOY_13 }; static const unsigned int uart_rx_c_pins[] = { GPIOY_14 }; -static const unsigned int uart_cts_c_pins[] = { GPIOX_11 }; -static const unsigned int uart_rts_c_pins[] = { GPIOX_12 }; +static const unsigned int uart_cts_c_pins[] = { GPIOY_11 }; +static const unsigned int uart_rts_c_pins[] = { GPIOY_12 }; static const unsigned int i2c_sck_a_pins[] = { GPIODV_25 }; static const unsigned int i2c_sda_a_pins[] = { GPIODV_24 }; @@ -445,10 +445,10 @@ static struct meson_pmx_group meson_gxbb_periphs_groups[] = { GROUP(pwm_f_x, 3, 18), /* Bank Y */ - GROUP(uart_cts_c, 1, 19), - GROUP(uart_rts_c, 1, 18), - GROUP(uart_tx_c, 1, 17), - GROUP(uart_rx_c, 1, 16), + GROUP(uart_cts_c, 1, 17), + GROUP(uart_rts_c, 1, 16), + GROUP(uart_tx_c, 1, 19), + GROUP(uart_rx_c, 1, 18), GROUP(pwm_a_y, 1, 21), GROUP(pwm_f_y, 1, 20), GROUP(i2s_out_ch23_y, 1, 5), @@ -830,7 +830,7 @@ static struct meson_bank meson_gxbb_periphs_banks[] = { static struct meson_bank meson_gxbb_aobus_banks[] = { /* name first last irq pullen pull dir out in */ - BANK("AO", GPIOAO_0, GPIOAO_13, 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0), + BANK("AO", GPIOAO_0, GPIOAO_13, 0, 13, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0), }; static struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data = { diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c index 7dae1d7bf6b0a50f75c9d104f43f93972abaf27d..158f618f169570d07dcbd2b9850c72f7334ca495 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c +++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c @@ -807,7 +807,7 @@ static struct meson_bank meson_gxl_periphs_banks[] = { static struct meson_bank meson_gxl_aobus_banks[] = { /* name first last irq pullen pull dir out in */ - BANK("AO", GPIOAO_0, GPIOAO_9, 0, 9, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0), + BANK("AO", GPIOAO_0, GPIOAO_9, 0, 9, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0), }; static struct meson_pinctrl_data meson_gxl_periphs_pinctrl_data = { diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c index 29a458da78db15bf26d5245f3fd29c2eaa1e1fa0..c8eff70fdb1c29de4ab3f1f5cd2316590f8329fb 100644 --- a/drivers/pinctrl/meson/pinctrl-meson.c +++ b/drivers/pinctrl/meson/pinctrl-meson.c @@ -191,8 +191,9 @@ static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin, case PIN_CONFIG_BIAS_DISABLE: dev_dbg(pc->dev, "pin %u: disable bias\n", pin); - meson_calc_reg_and_bit(bank, pin, REG_PULL, ®, &bit); - ret = regmap_update_bits(pc->reg_pull, reg, + meson_calc_reg_and_bit(bank, pin, REG_PULLEN, ®, + &bit); + ret = regmap_update_bits(pc->reg_pullen, reg, BIT(bit), 0); if (ret) return ret; diff --git a/drivers/pinctrl/meson/pinctrl-meson8.c b/drivers/pinctrl/meson/pinctrl-meson8.c index c6d79315218fa69cadcdde9aa49d657d6916f5bb..e482672e833a49a3bbc6b8988403275081763171 100644 --- a/drivers/pinctrl/meson/pinctrl-meson8.c +++ b/drivers/pinctrl/meson/pinctrl-meson8.c @@ -807,7 +807,9 @@ static const char * const gpio_groups[] = { "BOOT_5", "BOOT_6", "BOOT_7", "BOOT_8", "BOOT_9", "BOOT_10", "BOOT_11", "BOOT_12", "BOOT_13", "BOOT_14", "BOOT_15", "BOOT_16", "BOOT_17", "BOOT_18", +}; +static const char * const gpio_aobus_groups[] = { "GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3", "GPIOAO_4", "GPIOAO_5", "GPIOAO_6", "GPIOAO_7", "GPIOAO_8", "GPIOAO_9", "GPIOAO_10", "GPIOAO_11", @@ -1030,6 +1032,7 @@ static struct meson_pmx_func meson8_cbus_functions[] = { }; static struct meson_pmx_func meson8_aobus_functions[] = { + FUNCTION(gpio_aobus), FUNCTION(uart_ao), FUNCTION(remote), FUNCTION(i2c_slave_ao), @@ -1053,7 +1056,7 @@ static struct meson_bank meson8_cbus_banks[] = { static struct meson_bank meson8_aobus_banks[] = { /* name first last irq pullen pull dir out in */ - BANK("AO", GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0), + BANK("AO", GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0), }; static struct meson_pinctrl_data meson8_cbus_pinctrl_data = { diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c index bb2a30964fc69a20bfe488a8904ebd6badaf8e71..036124fd363cbeb4548e4887f26afbef3ad823aa 100644 --- a/drivers/pinctrl/meson/pinctrl-meson8b.c +++ b/drivers/pinctrl/meson/pinctrl-meson8b.c @@ -346,6 +346,8 @@ static const unsigned int eth_rx_dv_pins[] = { DIF_1_P }; static const unsigned int eth_rx_clk_pins[] = { DIF_1_N }; static const unsigned int eth_txd0_1_pins[] = { DIF_2_P }; static const unsigned int eth_txd1_1_pins[] = { DIF_2_N }; +static const unsigned int eth_rxd3_pins[] = { DIF_2_P }; +static const unsigned int eth_rxd2_pins[] = { DIF_2_N }; static const unsigned int eth_tx_en_pins[] = { DIF_3_P }; static const unsigned int eth_ref_clk_pins[] = { DIF_3_N }; static const unsigned int eth_mdc_pins[] = { DIF_4_P }; @@ -571,6 +573,8 @@ static struct meson_pmx_group meson8b_cbus_groups[] = { GROUP(eth_ref_clk, 6, 8), GROUP(eth_mdc, 6, 9), GROUP(eth_mdio_en, 6, 10), + GROUP(eth_rxd3, 7, 22), + GROUP(eth_rxd2, 7, 23), }; static struct meson_pmx_group meson8b_aobus_groups[] = { @@ -646,16 +650,18 @@ static const char * const gpio_groups[] = { "BOOT_10", "BOOT_11", "BOOT_12", "BOOT_13", "BOOT_14", "BOOT_15", "BOOT_16", "BOOT_17", "BOOT_18", - "GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3", - "GPIOAO_4", "GPIOAO_5", "GPIOAO_6", "GPIOAO_7", - "GPIOAO_8", "GPIOAO_9", "GPIOAO_10", "GPIOAO_11", - "GPIOAO_12", "GPIOAO_13", "GPIO_BSD_EN", "GPIO_TEST_N", - "DIF_0_P", "DIF_0_N", "DIF_1_P", "DIF_1_N", "DIF_2_P", "DIF_2_N", "DIF_3_P", "DIF_3_N", "DIF_4_P", "DIF_4_N" }; +static const char * const gpio_aobus_groups[] = { + "GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3", + "GPIOAO_4", "GPIOAO_5", "GPIOAO_6", "GPIOAO_7", + "GPIOAO_8", "GPIOAO_9", "GPIOAO_10", "GPIOAO_11", + "GPIOAO_12", "GPIOAO_13", "GPIO_BSD_EN", "GPIO_TEST_N" +}; + static const char * const sd_a_groups[] = { "sd_d0_a", "sd_d1_a", "sd_d2_a", "sd_d3_a", "sd_clk_a", "sd_cmd_a" @@ -663,7 +669,7 @@ static const char * const sd_a_groups[] = { static const char * const sdxc_a_groups[] = { "sdxc_d0_0_a", "sdxc_d13_0_a", "sdxc_d47_a", "sdxc_clk_a", - "sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d0_13_1_a" + "sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d13_1_a" }; static const char * const pcm_a_groups[] = { @@ -718,7 +724,7 @@ static const char * const ethernet_groups[] = { "eth_tx_clk", "eth_tx_en", "eth_txd1_0", "eth_txd1_1", "eth_txd0_0", "eth_txd0_1", "eth_rx_clk", "eth_rx_dv", "eth_rxd1", "eth_rxd0", "eth_mdio_en", "eth_mdc", "eth_ref_clk", - "eth_txd2", "eth_txd3" + "eth_txd2", "eth_txd3", "eth_rxd3", "eth_rxd2" }; static const char * const i2c_a_groups[] = { @@ -871,6 +877,7 @@ static struct meson_pmx_func meson8b_cbus_functions[] = { }; static struct meson_pmx_func meson8b_aobus_functions[] = { + FUNCTION(gpio_aobus), FUNCTION(uart_ao), FUNCTION(uart_ao_b), FUNCTION(i2c_slave_ao), @@ -906,7 +913,7 @@ static struct meson_bank meson8b_cbus_banks[] = { static struct meson_bank meson8b_aobus_banks[] = { /* name first lastc irq pullen pull dir out in */ - BANK("AO", GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0), + BANK("AO", GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0), }; static struct meson_pinctrl_data meson8b_cbus_pinctrl_data = { diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c index aa48b3f23c7fd199138a6896328679569d2b8a75..d76ac6b4b40dfc576d13e4cbf059d051184677d1 100644 --- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c +++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c @@ -183,10 +183,10 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = { PIN_GRP_EXTRA("uart2", 9, 2, BIT(1) | BIT(13) | BIT(14) | BIT(19), BIT(1) | BIT(13) | BIT(14), BIT(1) | BIT(19), 18, 2, "gpio", "uart"), - PIN_GRP_GPIO("led0_od", 11, 1, BIT(20), "led"), - PIN_GRP_GPIO("led1_od", 12, 1, BIT(21), "led"), - PIN_GRP_GPIO("led2_od", 13, 1, BIT(22), "led"), - PIN_GRP_GPIO("led3_od", 14, 1, BIT(23), "led"), + PIN_GRP_GPIO_2("led0_od", 11, 1, BIT(20), BIT(20), 0, "led"), + PIN_GRP_GPIO_2("led1_od", 12, 1, BIT(21), BIT(21), 0, "led"), + PIN_GRP_GPIO_2("led2_od", 13, 1, BIT(22), BIT(22), 0, "led"), + PIN_GRP_GPIO_2("led3_od", 14, 1, BIT(23), BIT(23), 0, "led"), }; @@ -218,11 +218,11 @@ static const struct armada_37xx_pin_data armada_37xx_pin_sb = { }; static inline void armada_37xx_update_reg(unsigned int *reg, - unsigned int offset) + unsigned int *offset) { /* We never have more than 2 registers */ - if (offset >= GPIO_PER_REG) { - offset -= GPIO_PER_REG; + if (*offset >= GPIO_PER_REG) { + *offset -= GPIO_PER_REG; *reg += sizeof(u32); } } @@ -373,7 +373,7 @@ static inline void armada_37xx_irq_update_reg(unsigned int *reg, { int offset = irqd_to_hwirq(d); - armada_37xx_update_reg(reg, offset); + armada_37xx_update_reg(reg, &offset); } static int armada_37xx_gpio_direction_input(struct gpio_chip *chip, @@ -383,7 +383,7 @@ static int armada_37xx_gpio_direction_input(struct gpio_chip *chip, unsigned int reg = OUTPUT_EN; unsigned int mask; - armada_37xx_update_reg(®, offset); + armada_37xx_update_reg(®, &offset); mask = BIT(offset); return regmap_update_bits(info->regmap, reg, mask, 0); @@ -396,7 +396,7 @@ static int armada_37xx_gpio_get_direction(struct gpio_chip *chip, unsigned int reg = OUTPUT_EN; unsigned int val, mask; - armada_37xx_update_reg(®, offset); + armada_37xx_update_reg(®, &offset); mask = BIT(offset); regmap_read(info->regmap, reg, &val); @@ -410,7 +410,7 @@ static int armada_37xx_gpio_direction_output(struct gpio_chip *chip, unsigned int reg = OUTPUT_EN; unsigned int mask, val, ret; - armada_37xx_update_reg(®, offset); + armada_37xx_update_reg(®, &offset); mask = BIT(offset); ret = regmap_update_bits(info->regmap, reg, mask, mask); @@ -431,7 +431,7 @@ static int armada_37xx_gpio_get(struct gpio_chip *chip, unsigned int offset) unsigned int reg = INPUT_VAL; unsigned int val, mask; - armada_37xx_update_reg(®, offset); + armada_37xx_update_reg(®, &offset); mask = BIT(offset); regmap_read(info->regmap, reg, &val); @@ -446,7 +446,7 @@ static void armada_37xx_gpio_set(struct gpio_chip *chip, unsigned int offset, unsigned int reg = OUTPUT_VAL; unsigned int mask, val; - armada_37xx_update_reg(®, offset); + armada_37xx_update_reg(®, &offset); mask = BIT(offset); val = value ? mask : 0; @@ -592,10 +592,10 @@ static int armada_37xx_irq_set_type(struct irq_data *d, unsigned int type) regmap_read(info->regmap, in_reg, &in_val); /* Set initial polarity based on current input level. */ - if (in_val & d->mask) - val |= d->mask; /* falling */ + if (in_val & BIT(d->hwirq % GPIO_PER_REG)) + val |= BIT(d->hwirq % GPIO_PER_REG); /* falling */ else - val &= ~d->mask; /* rising */ + val &= ~(BIT(d->hwirq % GPIO_PER_REG)); /* rising */ break; } default: diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c index b4f7f8a458eaf0fe9d8c6afc15ce3b70bb6822ad..78fb9a1dc10a190d3ba5d09b9fdfd756bd174f39 100644 --- a/drivers/pinctrl/pinconf-generic.c +++ b/drivers/pinctrl/pinconf-generic.c @@ -390,8 +390,10 @@ int pinconf_generic_dt_node_to_map(struct pinctrl_dev *pctldev, for_each_available_child_of_node(np_config, np) { ret = pinconf_generic_dt_subnode_to_map(pctldev, np, map, &reserved_maps, num_maps, type); - if (ret < 0) + if (ret < 0) { + of_node_put(np); goto exit; + } } return 0; diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index 1425c2874d4028b5140cc74933733a2bf4f8f22b..b1ffdd3f6d076ab31bb94a922f97467d4ccc5817 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -544,7 +544,8 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id) irqreturn_t ret = IRQ_NONE; unsigned int i, irqnr; unsigned long flags; - u32 *regs, regval; + u32 __iomem *regs; + u32 regval; u64 status, mask; /* Read the wake status */ @@ -569,15 +570,25 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id) !(regval & BIT(INTERRUPT_MASK_OFF))) continue; irq = irq_find_mapping(gc->irq.domain, irqnr + i); - generic_handle_irq(irq); + if (irq != 0) + generic_handle_irq(irq); /* Clear interrupt. * We must read the pin register again, in case the * value was changed while executing * generic_handle_irq() above. + * If we didn't find a mapping for the interrupt, + * disable it in order to avoid a system hang caused + * by an interrupt storm. */ raw_spin_lock_irqsave(&gpio_dev->lock, flags); regval = readl(regs + i); + if (irq == 0) { + regval &= ~BIT(INTERRUPT_ENABLE_OFF); + dev_dbg(&gpio_dev->pdev->dev, + "Disabling spurious GPIO IRQ %d\n", + irqnr + i); + } writel(regval, regs + i); raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); ret = IRQ_HANDLED; diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c index ef7ab208b951eeb307b73191f373b88ecd8462a3..9e2f3738bf3ecdec3314be32863b4fcc89b46739 100644 --- a/drivers/pinctrl/pinctrl-at91-pio4.c +++ b/drivers/pinctrl/pinctrl-at91-pio4.c @@ -493,7 +493,6 @@ static int atmel_pctl_dt_subnode_to_map(struct pinctrl_dev *pctldev, unsigned num_pins, num_configs, reserve; unsigned long *configs; struct property *pins; - bool has_config; u32 pinfunc; int ret, i; @@ -509,9 +508,6 @@ static int atmel_pctl_dt_subnode_to_map(struct pinctrl_dev *pctldev, return ret; } - if (num_configs) - has_config = true; - num_pins = pins->length / sizeof(u32); if (!num_pins) { dev_err(pctldev->dev, "no pins found in node %pOF\n", np); @@ -524,7 +520,7 @@ static int atmel_pctl_dt_subnode_to_map(struct pinctrl_dev *pctldev, * map for each pin. */ reserve = 1; - if (has_config && num_pins >= 1) + if (num_configs) reserve++; reserve *= num_pins; ret = pinctrl_utils_reserve_map(pctldev, map, reserved_maps, num_maps, @@ -547,7 +543,7 @@ static int atmel_pctl_dt_subnode_to_map(struct pinctrl_dev *pctldev, pinctrl_utils_add_map_mux(pctldev, map, reserved_maps, num_maps, group, func); - if (has_config) { + if (num_configs) { ret = pinctrl_utils_add_map_configs(pctldev, map, reserved_maps, num_maps, group, configs, num_configs, diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c index 50f0ec42c63723528576b4498df84fa147428250..fad0e132ead84b187e8d9294f98d29b0068f577d 100644 --- a/drivers/pinctrl/pinctrl-at91.c +++ b/drivers/pinctrl/pinctrl-at91.c @@ -1574,16 +1574,6 @@ void at91_pinctrl_gpio_resume(void) #define gpio_irq_set_wake NULL #endif /* CONFIG_PM */ -static struct irq_chip gpio_irqchip = { - .name = "GPIO", - .irq_ack = gpio_irq_ack, - .irq_disable = gpio_irq_mask, - .irq_mask = gpio_irq_mask, - .irq_unmask = gpio_irq_unmask, - /* .irq_set_type is set dynamically */ - .irq_set_wake = gpio_irq_set_wake, -}; - static void gpio_irq_handler(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); @@ -1624,12 +1614,22 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev, struct gpio_chip *gpiochip_prev = NULL; struct at91_gpio_chip *prev = NULL; struct irq_data *d = irq_get_irq_data(at91_gpio->pioc_virq); + struct irq_chip *gpio_irqchip; int ret, i; + gpio_irqchip = devm_kzalloc(&pdev->dev, sizeof(*gpio_irqchip), GFP_KERNEL); + if (!gpio_irqchip) + return -ENOMEM; + at91_gpio->pioc_hwirq = irqd_to_hwirq(d); - /* Setup proper .irq_set_type function */ - gpio_irqchip.irq_set_type = at91_gpio->ops->irq_type; + gpio_irqchip->name = "GPIO"; + gpio_irqchip->irq_ack = gpio_irq_ack; + gpio_irqchip->irq_disable = gpio_irq_mask; + gpio_irqchip->irq_mask = gpio_irq_mask; + gpio_irqchip->irq_unmask = gpio_irq_unmask; + gpio_irqchip->irq_set_wake = gpio_irq_set_wake, + gpio_irqchip->irq_set_type = at91_gpio->ops->irq_type; /* Disable irqs of this PIO controller */ writel_relaxed(~0, at91_gpio->regbase + PIO_IDR); @@ -1640,7 +1640,7 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev, * interrupt. */ ret = gpiochip_irqchip_add(&at91_gpio->chip, - &gpio_irqchip, + gpio_irqchip, 0, handle_edge_irq, IRQ_TYPE_NONE); @@ -1658,7 +1658,7 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev, if (!gpiochip_prev) { /* Then register the chain on the parent IRQ */ gpiochip_set_chained_irqchip(&at91_gpio->chip, - &gpio_irqchip, + gpio_irqchip, at91_gpio->pioc_virq, gpio_irq_handler); return 0; diff --git a/drivers/pinctrl/pinctrl-gemini.c b/drivers/pinctrl/pinctrl-gemini.c index fa7d998e1d5a86ba1354c6b976acfb32b2beba01..3535f984186152838ed1d8609ee55f30c3831eba 100644 --- a/drivers/pinctrl/pinctrl-gemini.c +++ b/drivers/pinctrl/pinctrl-gemini.c @@ -591,13 +591,16 @@ static const unsigned int tvc_3512_pins[] = { 319, /* TVC_DATA[1] */ 301, /* TVC_DATA[2] */ 283, /* TVC_DATA[3] */ - 265, /* TVC_CLK */ 320, /* TVC_DATA[4] */ 302, /* TVC_DATA[5] */ 284, /* TVC_DATA[6] */ 266, /* TVC_DATA[7] */ }; +static const unsigned int tvc_clk_3512_pins[] = { + 265, /* TVC_CLK */ +}; + /* NAND flash pins */ static const unsigned int nflash_3512_pins[] = { 199, 200, 201, 202, 216, 217, 218, 219, 220, 234, 235, 236, 237, 252, @@ -629,7 +632,7 @@ static const unsigned int pflash_3512_pins_extended[] = { /* Serial flash pins CE0, CE1, DI, DO, CK */ static const unsigned int sflash_3512_pins[] = { 230, 231, 232, 233, 211 }; -/* The GPIO0A (0) pin overlap with TVC and extended parallel flash */ +/* The GPIO0A (0) pin overlap with TVC CLK and extended parallel flash */ static const unsigned int gpio0a_3512_pins[] = { 265 }; /* The GPIO0B (1-4) pins overlap with TVC and ICE */ @@ -823,7 +826,13 @@ static const struct gemini_pin_group gemini_3512_pin_groups[] = { .num_pins = ARRAY_SIZE(tvc_3512_pins), /* Conflict with character LCD and ICE */ .mask = LCD_PADS_ENABLE, - .value = TVC_PADS_ENABLE | TVC_CLK_PAD_ENABLE, + .value = TVC_PADS_ENABLE, + }, + { + .name = "tvcclkgrp", + .pins = tvc_clk_3512_pins, + .num_pins = ARRAY_SIZE(tvc_clk_3512_pins), + .value = TVC_CLK_PAD_ENABLE, }, /* * The construction is done such that it is possible to use a serial @@ -860,8 +869,8 @@ static const struct gemini_pin_group gemini_3512_pin_groups[] = { .name = "gpio0agrp", .pins = gpio0a_3512_pins, .num_pins = ARRAY_SIZE(gpio0a_3512_pins), - /* Conflict with TVC */ - .mask = TVC_PADS_ENABLE, + /* Conflict with TVC CLK */ + .mask = TVC_CLK_PAD_ENABLE, }, { .name = "gpio0bgrp", @@ -1531,13 +1540,16 @@ static const unsigned int tvc_3516_pins[] = { 311, /* TVC_DATA[1] */ 394, /* TVC_DATA[2] */ 374, /* TVC_DATA[3] */ - 333, /* TVC_CLK */ 354, /* TVC_DATA[4] */ 395, /* TVC_DATA[5] */ 312, /* TVC_DATA[6] */ 334, /* TVC_DATA[7] */ }; +static const unsigned int tvc_clk_3516_pins[] = { + 333, /* TVC_CLK */ +}; + /* NAND flash pins */ static const unsigned int nflash_3516_pins[] = { 243, 260, 261, 224, 280, 262, 281, 264, 300, 263, 282, 301, 320, 283, @@ -1570,7 +1582,7 @@ static const unsigned int pflash_3516_pins_extended[] = { static const unsigned int sflash_3516_pins[] = { 296, 338, 295, 359, 339 }; /* The GPIO0A (0-4) pins overlap with TVC and extended parallel flash */ -static const unsigned int gpio0a_3516_pins[] = { 333, 354, 395, 312, 334 }; +static const unsigned int gpio0a_3516_pins[] = { 354, 395, 312, 334 }; /* The GPIO0B (5-7) pins overlap with ICE */ static const unsigned int gpio0b_3516_pins[] = { 375, 396, 376 }; @@ -1602,6 +1614,9 @@ static const unsigned int gpio0j_3516_pins[] = { 359, 339 }; /* The GPIO0K (30,31) pins overlap with NAND flash */ static const unsigned int gpio0k_3516_pins[] = { 275, 298 }; +/* The GPIO0L (0) pins overlap with TVC_CLK */ +static const unsigned int gpio0l_3516_pins[] = { 333 }; + /* The GPIO1A (0-4) pins that overlap with IDE and parallel flash */ static const unsigned int gpio1a_3516_pins[] = { 221, 200, 222, 201, 220 }; @@ -1761,7 +1776,13 @@ static const struct gemini_pin_group gemini_3516_pin_groups[] = { .num_pins = ARRAY_SIZE(tvc_3516_pins), /* Conflict with character LCD */ .mask = LCD_PADS_ENABLE, - .value = TVC_PADS_ENABLE | TVC_CLK_PAD_ENABLE, + .value = TVC_PADS_ENABLE, + }, + { + .name = "tvcclkgrp", + .pins = tvc_clk_3516_pins, + .num_pins = ARRAY_SIZE(tvc_clk_3516_pins), + .value = TVC_CLK_PAD_ENABLE, }, /* * The construction is done such that it is possible to use a serial @@ -1872,6 +1893,13 @@ static const struct gemini_pin_group gemini_3516_pin_groups[] = { /* Conflict with parallel and NAND flash */ .value = PFLASH_PADS_DISABLE | NAND_PADS_DISABLE, }, + { + .name = "gpio0lgrp", + .pins = gpio0l_3516_pins, + .num_pins = ARRAY_SIZE(gpio0l_3516_pins), + /* Conflict with TVE CLK */ + .mask = TVC_CLK_PAD_ENABLE, + }, { .name = "gpio1agrp", .pins = gpio1a_3516_pins, @@ -2184,7 +2212,8 @@ static int gemini_pmx_set_mux(struct pinctrl_dev *pctldev, func->name, grp->name); regmap_read(pmx->map, GLOBAL_MISC_CTRL, &before); - regmap_update_bits(pmx->map, GLOBAL_MISC_CTRL, grp->mask, + regmap_update_bits(pmx->map, GLOBAL_MISC_CTRL, + grp->mask | grp->value, grp->value); regmap_read(pmx->map, GLOBAL_MISC_CTRL, &after); diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c index 628817c40e3bbc79cc1098368a23fc2d75e587e5..a5accffbc8c9115dbcf81e8d8fb35a8ec700cf4b 100644 --- a/drivers/pinctrl/pinctrl-ingenic.c +++ b/drivers/pinctrl/pinctrl-ingenic.c @@ -847,4 +847,4 @@ static int __init ingenic_pinctrl_drv_register(void) { return platform_driver_register(&ingenic_pinctrl_driver); } -postcore_initcall(ingenic_pinctrl_drv_register); +subsys_initcall(ingenic_pinctrl_drv_register); diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c index 190f17e4bbdafd287b9dd8d9cdeee4011f80c6c5..1d3b88e6ab862e0d468af104881478f7df7db061 100644 --- a/drivers/pinctrl/pinctrl-lpc18xx.c +++ b/drivers/pinctrl/pinctrl-lpc18xx.c @@ -630,14 +630,8 @@ static const struct pinctrl_pin_desc lpc18xx_pins[] = { LPC18XX_PIN(i2c0_sda, PIN_I2C0_SDA), }; -/** - * enum lpc18xx_pin_config_param - possible pin configuration parameters - * @PIN_CONFIG_GPIO_PIN_INT: route gpio to the gpio pin interrupt - * controller. - */ -enum lpc18xx_pin_config_param { - PIN_CONFIG_GPIO_PIN_INT = PIN_CONFIG_END + 1, -}; +/* PIN_CONFIG_GPIO_PIN_INT: route gpio to the gpio pin interrupt controller */ +#define PIN_CONFIG_GPIO_PIN_INT (PIN_CONFIG_END + 1) static const struct pinconf_generic_params lpc18xx_params[] = { {"nxp,gpio-pin-interrupt", PIN_CONFIG_GPIO_PIN_INT, 0}, diff --git a/drivers/pinctrl/pinctrl-max77620.c b/drivers/pinctrl/pinctrl-max77620.c index a7f37063518ec3217633e2fffdac2f38d2ddce57..3d05bc1937d40bd081ceee657a1c7243f3de7a4f 100644 --- a/drivers/pinctrl/pinctrl-max77620.c +++ b/drivers/pinctrl/pinctrl-max77620.c @@ -34,14 +34,12 @@ enum max77620_pin_ppdrv { MAX77620_PIN_PP_DRV, }; -enum max77620_pinconf_param { - MAX77620_ACTIVE_FPS_SOURCE = PIN_CONFIG_END + 1, - MAX77620_ACTIVE_FPS_POWER_ON_SLOTS, - MAX77620_ACTIVE_FPS_POWER_DOWN_SLOTS, - MAX77620_SUSPEND_FPS_SOURCE, - MAX77620_SUSPEND_FPS_POWER_ON_SLOTS, - MAX77620_SUSPEND_FPS_POWER_DOWN_SLOTS, -}; +#define MAX77620_ACTIVE_FPS_SOURCE (PIN_CONFIG_END + 1) +#define MAX77620_ACTIVE_FPS_POWER_ON_SLOTS (PIN_CONFIG_END + 2) +#define MAX77620_ACTIVE_FPS_POWER_DOWN_SLOTS (PIN_CONFIG_END + 3) +#define MAX77620_SUSPEND_FPS_SOURCE (PIN_CONFIG_END + 4) +#define MAX77620_SUSPEND_FPS_POWER_ON_SLOTS (PIN_CONFIG_END + 5) +#define MAX77620_SUSPEND_FPS_POWER_DOWN_SLOTS (PIN_CONFIG_END + 6) struct max77620_pin_function { const char *name; diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c index cf73a403d22dfc1778d6a893bca04d5b706dce18..33c3eca0ece97f1c9aec1901554bb4dd5682b15b 100644 --- a/drivers/pinctrl/pinctrl-mcp23s08.c +++ b/drivers/pinctrl/pinctrl-mcp23s08.c @@ -832,8 +832,13 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev, break; case MCP_TYPE_S18: + one_regmap_config = + devm_kmemdup(dev, &mcp23x17_regmap, + sizeof(struct regmap_config), GFP_KERNEL); + if (!one_regmap_config) + return -ENOMEM; mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp, - &mcp23x17_regmap); + one_regmap_config); mcp->reg_shift = 1; mcp->chip.ngpio = 16; mcp->chip.label = "mcp23s18"; @@ -884,6 +889,10 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev, if (ret < 0) goto fail; + ret = devm_gpiochip_add_data(dev, &mcp->chip, mcp); + if (ret < 0) + goto fail; + mcp->irq_controller = device_property_read_bool(dev, "interrupt-controller"); if (mcp->irq && mcp->irq_controller) { @@ -925,10 +934,6 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev, goto fail; } - ret = devm_gpiochip_add_data(dev, &mcp->chip, mcp); - if (ret < 0) - goto fail; - if (one_regmap_config) { mcp->pinctrl_desc.name = devm_kasprintf(dev, GFP_KERNEL, "mcp23xxx-pinctrl.%d", raw_chip_address); diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c index 302190d1558d98e2124b48aa35e7ce714ae2a559..0d7d379e9bb80e76536946ab19438e2f0e85e230 100644 --- a/drivers/pinctrl/pinctrl-pistachio.c +++ b/drivers/pinctrl/pinctrl-pistachio.c @@ -1368,6 +1368,7 @@ static int pistachio_gpio_register(struct pistachio_pinctrl *pctl) if (!of_find_property(child, "gpio-controller", NULL)) { dev_err(pctl->dev, "No gpio-controller property for bank %u\n", i); + of_node_put(child); ret = -ENODEV; goto err; } @@ -1375,6 +1376,7 @@ static int pistachio_gpio_register(struct pistachio_pinctrl *pctl) irq = irq_of_parse_and_map(child, 0); if (irq < 0) { dev_err(pctl->dev, "No IRQ for bank %u: %d\n", i, irq); + of_node_put(child); ret = irq; goto err; } diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c index f4a61429e06e7bd28c20a9ff0e54cfbf41cee4fe..8d83817935dae789d8e81dacbb29e0e9f4b89c4c 100644 --- a/drivers/pinctrl/pinctrl-rockchip.c +++ b/drivers/pinctrl/pinctrl-rockchip.c @@ -3172,6 +3172,7 @@ static int rockchip_get_bank_data(struct rockchip_pin_bank *bank, base, &rockchip_regmap_config); } + of_node_put(node); } bank->irq = irq_of_parse_and_map(bank->of_node, 0); diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index 7ec72ff2419a025ce72d86bacad93cd0584a8dfd..4aea8e10aea7af9774e63d6d7ee98e9d72ca3bf8 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -323,6 +323,8 @@ static int pcs_get_function(struct pinctrl_dev *pctldev, unsigned pin, return -ENOTSUPP; fselector = setting->func; function = pinmux_generic_get_function(pctldev, fselector); + if (!function) + return -EINVAL; *func = function->data; if (!(*func)) { dev_err(pcs->dev, "%s could not find function%i\n", @@ -345,6 +347,8 @@ static int pcs_set_mux(struct pinctrl_dev *pctldev, unsigned fselector, if (!pcs->fmask) return 0; function = pinmux_generic_get_function(pctldev, fselector); + if (!function) + return -EINVAL; func = function->data; if (!func) return -EINVAL; diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c index cbf58a10113df861fbc12335e68c2158c492a60b..4d87d75b9c6eac47ca1a38f8475ee8fc1af78016 100644 --- a/drivers/pinctrl/pinctrl-sx150x.c +++ b/drivers/pinctrl/pinctrl-sx150x.c @@ -1166,7 +1166,6 @@ static int sx150x_probe(struct i2c_client *client, } /* Register GPIO controller */ - pctl->gpio.label = devm_kstrdup(dev, client->name, GFP_KERNEL); pctl->gpio.base = -1; pctl->gpio.ngpio = pctl->data->npins; pctl->gpio.get_direction = sx150x_gpio_get_direction; @@ -1180,6 +1179,10 @@ static int sx150x_probe(struct i2c_client *client, pctl->gpio.of_node = dev->of_node; #endif pctl->gpio.can_sleep = true; + pctl->gpio.label = devm_kstrdup(dev, client->name, GFP_KERNEL); + if (!pctl->gpio.label) + return -ENOMEM; + /* * Setting multiple pins is not safe when all pins are not * handled by the same regmap register. The oscio pin (present @@ -1200,13 +1203,15 @@ static int sx150x_probe(struct i2c_client *client, /* Add Interrupt support if an irq is specified */ if (client->irq > 0) { - pctl->irq_chip.name = devm_kstrdup(dev, client->name, - GFP_KERNEL); pctl->irq_chip.irq_mask = sx150x_irq_mask; pctl->irq_chip.irq_unmask = sx150x_irq_unmask; pctl->irq_chip.irq_set_type = sx150x_irq_set_type; pctl->irq_chip.irq_bus_lock = sx150x_irq_bus_lock; pctl->irq_chip.irq_bus_sync_unlock = sx150x_irq_bus_sync_unlock; + pctl->irq_chip.name = devm_kstrdup(dev, client->name, + GFP_KERNEL); + if (!pctl->irq_chip.name) + return -ENOMEM; pctl->irq.masked = ~0; pctl->irq.sense = 0; diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c index 93f8bd04e7fe69ab92bb48c2715aa24371d315c3..ae74b260b014bddd2700b4edca00f44c99d3e84a 100644 --- a/drivers/pinctrl/pinctrl-xway.c +++ b/drivers/pinctrl/pinctrl-xway.c @@ -1746,14 +1746,6 @@ static int pinmux_xway_probe(struct platform_device *pdev) } xway_pctrl_desc.pins = xway_info.pads; - /* register the gpio chip */ - xway_chip.parent = &pdev->dev; - ret = devm_gpiochip_add_data(&pdev->dev, &xway_chip, NULL); - if (ret) { - dev_err(&pdev->dev, "Failed to register gpio chip\n"); - return ret; - } - /* setup the data needed by pinctrl */ xway_pctrl_desc.name = dev_name(&pdev->dev); xway_pctrl_desc.npins = xway_chip.ngpio; @@ -1775,10 +1767,33 @@ static int pinmux_xway_probe(struct platform_device *pdev) return ret; } - /* finish with registering the gpio range in pinctrl */ - xway_gpio_range.npins = xway_chip.ngpio; - xway_gpio_range.base = xway_chip.base; - pinctrl_add_gpio_range(xway_info.pctrl, &xway_gpio_range); + /* register the gpio chip */ + xway_chip.parent = &pdev->dev; + xway_chip.owner = THIS_MODULE; + xway_chip.of_node = pdev->dev.of_node; + ret = devm_gpiochip_add_data(&pdev->dev, &xway_chip, NULL); + if (ret) { + dev_err(&pdev->dev, "Failed to register gpio chip\n"); + return ret; + } + + /* + * For DeviceTree-supported systems, the gpio core checks the + * pinctrl's device node for the "gpio-ranges" property. + * If it is present, it takes care of adding the pin ranges + * for the driver. In this case the driver can skip ahead. + * + * In order to remain compatible with older, existing DeviceTree + * files which don't set the "gpio-ranges" property or systems that + * utilize ACPI the driver has to call gpiochip_add_pin_range(). + */ + if (!of_property_read_bool(pdev->dev.of_node, "gpio-ranges")) { + /* finish with registering the gpio range in pinctrl */ + xway_gpio_range.npins = xway_chip.ngpio; + xway_gpio_range.base = xway_chip.base; + pinctrl_add_gpio_range(xway_info.pctrl, &xway_gpio_range); + } + dev_info(&pdev->dev, "Init done\n"); return 0; } diff --git a/drivers/pinctrl/pinctrl-zynq.c b/drivers/pinctrl/pinctrl-zynq.c index a0daf27042bd0f95555f68ee01722ec0c1260ab1..90fd37e8207bfbabd39b53e45379719c311bad8a 100644 --- a/drivers/pinctrl/pinctrl-zynq.c +++ b/drivers/pinctrl/pinctrl-zynq.c @@ -971,15 +971,12 @@ enum zynq_io_standards { zynq_iostd_max }; -/** - * enum zynq_pin_config_param - possible pin configuration parameters - * @PIN_CONFIG_IOSTANDARD: if the pin can select an IO standard, the argument to +/* + * PIN_CONFIG_IOSTANDARD: if the pin can select an IO standard, the argument to * this parameter (on a custom format) tells the driver which alternative * IO standard to use. */ -enum zynq_pin_config_param { - PIN_CONFIG_IOSTANDARD = PIN_CONFIG_END + 1, -}; +#define PIN_CONFIG_IOSTANDARD (PIN_CONFIG_END + 1) static const struct pinconf_generic_params zynq_dt_params[] = { {"io-standard", PIN_CONFIG_IOSTANDARD, zynq_iostd_lvcmos18}, diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index 5d72ffad32c299eb2db1e24c5827a11a54a4084e..ce1ade47ea37706bf0cb5070682e8bd3313759b5 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -566,6 +566,42 @@ static void msm_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) #define msm_gpio_dbg_show NULL #endif +static int msm_gpio_init_valid_mask(struct gpio_chip *chip) +{ + struct msm_pinctrl *pctrl = gpiochip_get_data(chip); + int ret; + unsigned int len, i; + unsigned int max_gpios = pctrl->soc->ngpios; + u16 *tmp; + + /* The number of GPIOs in the ACPI tables */ + len = ret = device_property_read_u16_array(pctrl->dev, "gpios", NULL, + 0); + if (ret < 0) + return 0; + + if (ret > max_gpios) + return -EINVAL; + + tmp = kmalloc_array(len, sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + ret = device_property_read_u16_array(pctrl->dev, "gpios", tmp, len); + if (ret < 0) { + dev_err(pctrl->dev, "could not read list of GPIOs\n"); + goto out; + } + + bitmap_zero(chip->valid_mask, max_gpios); + for (i = 0; i < len; i++) + set_bit(tmp[i], chip->valid_mask); + +out: + kfree(tmp); + return ret; +} + static const struct gpio_chip msm_gpio_template = { .direction_input = msm_gpio_direction_input, .direction_output = msm_gpio_direction_output, @@ -575,6 +611,7 @@ static const struct gpio_chip msm_gpio_template = { .request = gpiochip_generic_request, .free = gpiochip_generic_free, .dbg_show = msm_gpio_dbg_show, + .init_valid_mask = msm_gpio_init_valid_mask, }; /* For dual-edge interrupts in software, since some hardware has no @@ -855,41 +892,6 @@ static void msm_gpio_irq_handler(struct irq_desc *desc) chained_irq_exit(chip, desc); } -static int msm_gpio_init_valid_mask(struct gpio_chip *chip, - struct msm_pinctrl *pctrl) -{ - int ret; - unsigned int len, i; - unsigned int max_gpios = pctrl->soc->ngpios; - u16 *tmp; - - /* The number of GPIOs in the ACPI tables */ - len = ret = device_property_read_u16_array(pctrl->dev, "gpios", NULL, 0); - if (ret < 0) - return 0; - - if (ret > max_gpios) - return -EINVAL; - - tmp = kmalloc_array(len, sizeof(*tmp), GFP_KERNEL); - if (!tmp) - return -ENOMEM; - - ret = device_property_read_u16_array(pctrl->dev, "gpios", tmp, len); - if (ret < 0) { - dev_err(pctrl->dev, "could not read list of GPIOs\n"); - goto out; - } - - bitmap_zero(chip->valid_mask, max_gpios); - for (i = 0; i < len; i++) - set_bit(tmp[i], chip->valid_mask); - -out: - kfree(tmp); - return ret; -} - static bool msm_gpio_needs_valid_mask(struct msm_pinctrl *pctrl) { return device_property_read_u16_array(pctrl->dev, "gpios", NULL, 0) > 0; @@ -926,13 +928,6 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl) return ret; } - ret = msm_gpio_init_valid_mask(chip, pctrl); - if (ret) { - dev_err(pctrl->dev, "Failed to setup irq valid bits\n"); - gpiochip_remove(&pctrl->chip); - return ret; - } - /* * For DeviceTree-supported systems, the gpio core checks the * pinctrl's device node for the "gpio-ranges" property. diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c index cf82db78e69e6f4fe994be61e0f88e2feee61a05..0c30f5eb4c714c27b4296edfa99e93041d367a8f 100644 --- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c +++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c @@ -1028,10 +1028,23 @@ static int pmic_gpio_probe(struct platform_device *pdev) return ret; } - ret = gpiochip_add_pin_range(&state->chip, dev_name(dev), 0, 0, npins); - if (ret) { - dev_err(dev, "failed to add pin range\n"); - goto err_range; + /* + * For DeviceTree-supported systems, the gpio core checks the + * pinctrl's device node for the "gpio-ranges" property. + * If it is present, it takes care of adding the pin ranges + * for the driver. In this case the driver can skip ahead. + * + * In order to remain compatible with older, existing DeviceTree + * files which don't set the "gpio-ranges" property or systems that + * utilize ACPI the driver has to call gpiochip_add_pin_range(). + */ + if (!of_property_read_bool(dev->of_node, "gpio-ranges")) { + ret = gpiochip_add_pin_range(&state->chip, dev_name(dev), 0, 0, + npins); + if (ret) { + dev_err(dev, "failed to add pin range\n"); + goto err_range; + } } return 0; diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c index 6556dbeae65efc18df3d4d2dd879dc87bf3573f3..ac251c62bc666660a404ab25605c6e74e6b5202c 100644 --- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c +++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c @@ -319,6 +319,8 @@ static int pmic_mpp_set_mux(struct pinctrl_dev *pctldev, unsigned function, pad->function = function; ret = pmic_mpp_write_mode_ctl(state, pad); + if (ret < 0) + return ret; val = pad->is_enabled << PMIC_MPP_REG_MASTER_EN_SHIFT; @@ -343,13 +345,12 @@ static int pmic_mpp_config_get(struct pinctrl_dev *pctldev, switch (param) { case PIN_CONFIG_BIAS_DISABLE: - arg = pad->pullup == PMIC_MPP_PULL_UP_OPEN; + if (pad->pullup != PMIC_MPP_PULL_UP_OPEN) + return -EINVAL; + arg = 1; break; case PIN_CONFIG_BIAS_PULL_UP: switch (pad->pullup) { - case PMIC_MPP_PULL_UP_OPEN: - arg = 0; - break; case PMIC_MPP_PULL_UP_0P6KOHM: arg = 600; break; @@ -364,13 +365,17 @@ static int pmic_mpp_config_get(struct pinctrl_dev *pctldev, } break; case PIN_CONFIG_BIAS_HIGH_IMPEDANCE: - arg = !pad->is_enabled; + if (pad->is_enabled) + return -EINVAL; + arg = 1; break; case PIN_CONFIG_POWER_SOURCE: arg = pad->power_source; break; case PIN_CONFIG_INPUT_ENABLE: - arg = pad->input_enabled; + if (!pad->input_enabled) + return -EINVAL; + arg = 1; break; case PIN_CONFIG_OUTPUT: arg = pad->out_value; @@ -382,7 +387,9 @@ static int pmic_mpp_config_get(struct pinctrl_dev *pctldev, arg = pad->amux_input; break; case PMIC_MPP_CONF_PAIRED: - arg = pad->paired; + if (!pad->paired) + return -EINVAL; + arg = 1; break; case PIN_CONFIG_DRIVE_STRENGTH: arg = pad->drive_strength; @@ -455,7 +462,7 @@ static int pmic_mpp_config_set(struct pinctrl_dev *pctldev, unsigned int pin, pad->dtest = arg; break; case PIN_CONFIG_DRIVE_STRENGTH: - arg = pad->drive_strength; + pad->drive_strength = arg; break; case PMIC_MPP_CONF_AMUX_ROUTE: if (arg >= PMIC_MPP_AMUX_ROUTE_ABUS4) @@ -502,6 +509,10 @@ static int pmic_mpp_config_set(struct pinctrl_dev *pctldev, unsigned int pin, if (ret < 0) return ret; + ret = pmic_mpp_write(state, pad, PMIC_MPP_REG_SINK_CTL, pad->drive_strength); + if (ret < 0) + return ret; + val = pad->is_enabled << PMIC_MPP_REG_MASTER_EN_SHIFT; return pmic_mpp_write(state, pad, PMIC_MPP_REG_EN_CTL, val); diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c index f53e32a9d8fcef232ec9ce825e58edd0684c4007..6bed433e5420564f20d091400e3cd26fdae79b82 100644 --- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c +++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c @@ -260,22 +260,32 @@ static int pm8xxx_pin_config_get(struct pinctrl_dev *pctldev, switch (param) { case PIN_CONFIG_BIAS_DISABLE: - arg = pin->bias == PM8XXX_GPIO_BIAS_NP; + if (pin->bias != PM8XXX_GPIO_BIAS_NP) + return -EINVAL; + arg = 1; break; case PIN_CONFIG_BIAS_PULL_DOWN: - arg = pin->bias == PM8XXX_GPIO_BIAS_PD; + if (pin->bias != PM8XXX_GPIO_BIAS_PD) + return -EINVAL; + arg = 1; break; case PIN_CONFIG_BIAS_PULL_UP: - arg = pin->bias <= PM8XXX_GPIO_BIAS_PU_1P5_30; + if (pin->bias > PM8XXX_GPIO_BIAS_PU_1P5_30) + return -EINVAL; + arg = 1; break; case PM8XXX_QCOM_PULL_UP_STRENGTH: arg = pin->pull_up_strength; break; case PIN_CONFIG_BIAS_HIGH_IMPEDANCE: - arg = pin->disable; + if (!pin->disable) + return -EINVAL; + arg = 1; break; case PIN_CONFIG_INPUT_ENABLE: - arg = pin->mode == PM8XXX_GPIO_MODE_INPUT; + if (pin->mode != PM8XXX_GPIO_MODE_INPUT) + return -EINVAL; + arg = 1; break; case PIN_CONFIG_OUTPUT: if (pin->mode & PM8XXX_GPIO_MODE_OUTPUT) @@ -290,10 +300,14 @@ static int pm8xxx_pin_config_get(struct pinctrl_dev *pctldev, arg = pin->output_strength; break; case PIN_CONFIG_DRIVE_PUSH_PULL: - arg = !pin->open_drain; + if (pin->open_drain) + return -EINVAL; + arg = 1; break; case PIN_CONFIG_DRIVE_OPEN_DRAIN: - arg = pin->open_drain; + if (!pin->open_drain) + return -EINVAL; + arg = 1; break; default: return -EINVAL; @@ -748,12 +762,23 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev) return ret; } - ret = gpiochip_add_pin_range(&pctrl->chip, - dev_name(pctrl->dev), - 0, 0, pctrl->chip.ngpio); - if (ret) { - dev_err(pctrl->dev, "failed to add pin range\n"); - goto unregister_gpiochip; + /* + * For DeviceTree-supported systems, the gpio core checks the + * pinctrl's device node for the "gpio-ranges" property. + * If it is present, it takes care of adding the pin ranges + * for the driver. In this case the driver can skip ahead. + * + * In order to remain compatible with older, existing DeviceTree + * files which don't set the "gpio-ranges" property or systems that + * utilize ACPI the driver has to call gpiochip_add_pin_range(). + */ + if (!of_property_read_bool(pctrl->dev->of_node, "gpio-ranges")) { + ret = gpiochip_add_pin_range(&pctrl->chip, dev_name(pctrl->dev), + 0, 0, pctrl->chip.ngpio); + if (ret) { + dev_err(pctrl->dev, "failed to add pin range\n"); + goto unregister_gpiochip; + } } platform_set_drvdata(pdev, pctrl); diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c index 44c6b753f692ad9ce17faf68b3cc80eaeda27c51..85ddf49a5188594081b5adfcd0c890c570ac1fab 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c @@ -71,6 +71,7 @@ s5pv210_retention_init(struct samsung_pinctrl_drv_data *drvdata, } clk_base = of_iomap(np, 0); + of_node_put(np); if (!clk_base) { pr_err("%s: failed to map clock registers\n", __func__); return ERR_PTR(-EINVAL); diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c index f49ea3d92aa1e3eab716721cf2f5a12ea45a1ccd..e87ee43efa163340e066ad98e7fa92c011984d65 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos.c @@ -494,8 +494,10 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d) if (match) { irq_chip = kmemdup(match->data, sizeof(*irq_chip), GFP_KERNEL); - if (!irq_chip) + if (!irq_chip) { + of_node_put(np); return -ENOMEM; + } wkup_np = np; break; } @@ -512,6 +514,7 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d) bank->nr_pins, &exynos_eint_irqd_ops, bank); if (!bank->irq_domain) { dev_err(dev, "wkup irq domain add failed\n"); + of_node_put(wkup_np); return -ENXIO; } @@ -526,8 +529,10 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d) weint_data = devm_kcalloc(dev, bank->nr_pins, sizeof(*weint_data), GFP_KERNEL); - if (!weint_data) + if (!weint_data) { + of_node_put(wkup_np); return -ENOMEM; + } for (idx = 0; idx < bank->nr_pins; ++idx) { irq = irq_of_parse_and_map(bank->of_node, idx); @@ -544,10 +549,13 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d) } } - if (!muxed_banks) + if (!muxed_banks) { + of_node_put(wkup_np); return 0; + } irq = irq_of_parse_and_map(wkup_np, 0); + of_node_put(wkup_np); if (!irq) { dev_err(dev, "irq number for muxed EINTs not found\n"); return 0; diff --git a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c index 7e824e4d20f44e59856f3679b9f466f0b857088d..9bd0a3de101dd61f0808e257e864988494ef5bde 100644 --- a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c +++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c @@ -490,8 +490,10 @@ static int s3c24xx_eint_init(struct samsung_pinctrl_drv_data *d) return -ENODEV; eint_data = devm_kzalloc(dev, sizeof(*eint_data), GFP_KERNEL); - if (!eint_data) + if (!eint_data) { + of_node_put(eint_np); return -ENOMEM; + } eint_data->drvdata = d; @@ -503,12 +505,14 @@ static int s3c24xx_eint_init(struct samsung_pinctrl_drv_data *d) irq = irq_of_parse_and_map(eint_np, i); if (!irq) { dev_err(dev, "failed to get wakeup EINT IRQ %d\n", i); + of_node_put(eint_np); return -ENXIO; } eint_data->parents[i] = irq; irq_set_chained_handler_and_data(irq, handlers[i], eint_data); } + of_node_put(eint_np); bank = d->pin_banks; for (i = 0; i < d->nr_banks; ++i, ++bank) { diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c index c399f0932af5e26eb3d9af531bc91975abd9d18c..f97f8179f2b1b591d7f55f940c6e4cf4e723b60c 100644 --- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c +++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c @@ -704,8 +704,10 @@ static int s3c64xx_eint_eint0_init(struct samsung_pinctrl_drv_data *d) return -ENODEV; data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); - if (!data) + if (!data) { + of_node_put(eint0_np); return -ENOMEM; + } data->drvdata = d; for (i = 0; i < NUM_EINT0_IRQ; ++i) { @@ -714,6 +716,7 @@ static int s3c64xx_eint_eint0_init(struct samsung_pinctrl_drv_data *d) irq = irq_of_parse_and_map(eint0_np, i); if (!irq) { dev_err(dev, "failed to get wakeup EINT IRQ %d\n", i); + of_node_put(eint0_np); return -ENXIO; } @@ -721,6 +724,7 @@ static int s3c64xx_eint_eint0_init(struct samsung_pinctrl_drv_data *d) s3c64xx_eint0_handlers[i], data); } + of_node_put(eint0_np); bank = d->pin_banks; for (i = 0; i < d->nr_banks; ++i, ++bank) { diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c index 698c7d8c9a0864e2ad820b5422516a3a5c968462..c05217edcb0e080dea207c411b9868256c416e9f 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.c +++ b/drivers/pinctrl/samsung/pinctrl-samsung.c @@ -272,6 +272,7 @@ static int samsung_dt_node_to_map(struct pinctrl_dev *pctldev, &reserved_maps, num_maps); if (ret < 0) { samsung_dt_free_map(pctldev, *map, *num_maps); + of_node_put(np); return ret; } } @@ -785,8 +786,10 @@ static struct samsung_pmx_func *samsung_pinctrl_create_functions( if (!of_get_child_count(cfg_np)) { ret = samsung_pinctrl_create_function(dev, drvdata, cfg_np, func); - if (ret < 0) + if (ret < 0) { + of_node_put(cfg_np); return ERR_PTR(ret); + } if (ret > 0) { ++func; ++func_cnt; @@ -797,8 +800,11 @@ static struct samsung_pmx_func *samsung_pinctrl_create_functions( for_each_child_of_node(cfg_np, func_np) { ret = samsung_pinctrl_create_function(dev, drvdata, func_np, func); - if (ret < 0) + if (ret < 0) { + of_node_put(func_np); + of_node_put(cfg_np); return ERR_PTR(ret); + } if (ret > 0) { ++func; ++func_cnt; diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77990.c b/drivers/pinctrl/sh-pfc/pfc-r8a77990.c index b81c807ac54d52ed2719d83bf883043a81e133c3..d2fcf7f7b966829d78e329327c85029f05dbb805 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a77990.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a77990.c @@ -395,7 +395,7 @@ FM(IP12_31_28) IP12_31_28 FM(IP13_31_28) IP13_31_28 FM(IP14_31_28) IP14_31_28 FM #define MOD_SEL0_24 FM(SEL_HSCIF0_0) FM(SEL_HSCIF0_1) #define MOD_SEL0_23 FM(SEL_HSCIF1_0) FM(SEL_HSCIF1_1) #define MOD_SEL0_22 FM(SEL_HSCIF2_0) FM(SEL_HSCIF2_1) -#define MOD_SEL0_21_20 FM(SEL_I2C1_0) FM(SEL_I2C1_1) FM(SEL_I2C1_2) FM(SEL_I2C1_3) FM(SEL_I2C1_4) F_(0, 0) F_(0, 0) F_(0, 0) +#define MOD_SEL0_21_20 FM(SEL_I2C1_0) FM(SEL_I2C1_1) FM(SEL_I2C1_2) FM(SEL_I2C1_3) #define MOD_SEL0_19_18_17 FM(SEL_I2C2_0) FM(SEL_I2C2_1) FM(SEL_I2C2_2) FM(SEL_I2C2_3) FM(SEL_I2C2_4) F_(0, 0) F_(0, 0) F_(0, 0) #define MOD_SEL0_16 FM(SEL_NDFC_0) FM(SEL_NDFC_1) #define MOD_SEL0_15 FM(SEL_PWM0_0) FM(SEL_PWM0_1) diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7264.c b/drivers/pinctrl/sh-pfc/pfc-sh7264.c index 8070765311dbf7a488fe5edd9d3d7ce92e2f331a..e1c34e19222ee7bcf12d3a98f348b9d749b312fe 100644 --- a/drivers/pinctrl/sh-pfc/pfc-sh7264.c +++ b/drivers/pinctrl/sh-pfc/pfc-sh7264.c @@ -1716,6 +1716,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { }, { PINMUX_CFG_REG("PFCR3", 0xfffe38a8, 16, 4) { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF12MD_000, PF12MD_001, 0, PF12MD_011, PF12MD_100, PF12MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } @@ -1759,8 +1762,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { 0, 0, 0, 0, 0, 0, 0, 0, PF1MD_000, PF1MD_001, PF1MD_010, PF1MD_011, PF1MD_100, PF1MD_101, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0 - } + 0, 0, 0, 0, 0, 0, 0, 0, + PF0MD_000, PF0MD_001, PF0MD_010, PF0MD_011, + PF0MD_100, PF0MD_101, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PFIOR0", 0xfffe38b2, 16, 1) { diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7734.c b/drivers/pinctrl/sh-pfc/pfc-sh7734.c index 6502e676d368617927c9cf55be7fea2ea31697e8..3eccc9b3ca84aabe8ce6081355a0a3f7f7dee752 100644 --- a/drivers/pinctrl/sh-pfc/pfc-sh7734.c +++ b/drivers/pinctrl/sh-pfc/pfc-sh7734.c @@ -1453,7 +1453,7 @@ static const struct pinmux_func pinmux_func_gpios[] = { GPIO_FN(ET0_ETXD2_A), GPIO_FN(EX_CS5), GPIO_FN(SD1_CMD_A), GPIO_FN(ATADIR), GPIO_FN(QSSL_B), GPIO_FN(ET0_ETXD3_A), - GPIO_FN(RD_WR), GPIO_FN(TCLK1_B), + GPIO_FN(RD_WR), GPIO_FN(TCLK0), GPIO_FN(CAN_CLK_B), GPIO_FN(ET0_ETXD4), GPIO_FN(EX_WAIT0), GPIO_FN(TCLK1_B), GPIO_FN(EX_WAIT1), GPIO_FN(SD1_DAT0_A), GPIO_FN(DREQ2), GPIO_FN(CAN1_TX_C), GPIO_FN(ET0_LINK_C), GPIO_FN(ET0_ETXD5_A), @@ -1949,7 +1949,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { /* IP3_20 [1] */ FN_EX_WAIT0, FN_TCLK1_B, /* IP3_19_18 [2] */ - FN_RD_WR, FN_TCLK1_B, 0, 0, + FN_RD_WR, FN_TCLK0, FN_CAN_CLK_B, FN_ET0_ETXD4, /* IP3_17_15 [3] */ FN_EX_CS5, FN_SD1_CMD_A, FN_ATADIR, FN_QSSL_B, FN_ET0_ETXD3_A, 0, 0, 0, @@ -2213,22 +2213,22 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { /* IP10_22 [1] */ FN_CAN_CLK_A, FN_RX4_D, /* IP10_21_19 [3] */ - FN_AUDIO_CLKOUT, FN_TX1_E, FN_HRTS0_C, FN_FSE_B, - FN_LCD_M_DISP_B, 0, 0, 0, + FN_AUDIO_CLKOUT, FN_TX1_E, 0, FN_HRTS0_C, FN_FSE_B, + FN_LCD_M_DISP_B, 0, 0, /* IP10_18_16 [3] */ - FN_AUDIO_CLKC, FN_SCK1_E, FN_HCTS0_C, FN_FRB_B, - FN_LCD_VEPWC_B, 0, 0, 0, + FN_AUDIO_CLKC, FN_SCK1_E, 0, FN_HCTS0_C, FN_FRB_B, + FN_LCD_VEPWC_B, 0, 0, /* IP10_15 [1] */ FN_AUDIO_CLKB_A, FN_LCD_CLK_B, /* IP10_14_12 [3] */ FN_AUDIO_CLKA_A, FN_VI1_CLK_B, FN_SCK1_D, FN_IECLK_B, FN_LCD_FLM_B, 0, 0, 0, /* IP10_11_9 [3] */ - FN_SSI_SDATA3, FN_VI1_7_B, FN_HTX0_C, FN_FWE_B, - FN_LCD_CL2_B, 0, 0, 0, + FN_SSI_SDATA3, FN_VI1_7_B, 0, FN_HTX0_C, FN_FWE_B, + FN_LCD_CL2_B, 0, 0, /* IP10_8_6 [3] */ - FN_SSI_SDATA2, FN_VI1_6_B, FN_HRX0_C, FN_FRE_B, - FN_LCD_CL1_B, 0, 0, 0, + FN_SSI_SDATA2, FN_VI1_6_B, 0, FN_HRX0_C, FN_FRE_B, + FN_LCD_CL1_B, 0, 0, /* IP10_5_3 [3] */ FN_SSI_WS23, FN_VI1_5_B, FN_TX1_D, FN_HSCK0_C, FN_FALE_B, FN_LCD_DON_B, 0, 0, 0, diff --git a/drivers/pinctrl/sprd/pinctrl-sprd.c b/drivers/pinctrl/sprd/pinctrl-sprd.c index 78c2f548b25f1ef5dc3cbe702f2ef8db10416523..8f3468d9f848dda6309a1416892b24bafc0f3eac 100644 --- a/drivers/pinctrl/sprd/pinctrl-sprd.c +++ b/drivers/pinctrl/sprd/pinctrl-sprd.c @@ -159,10 +159,8 @@ struct sprd_pinctrl { struct sprd_pinctrl_soc_info *info; }; -enum sprd_pinconf_params { - SPRD_PIN_CONFIG_CONTROL = PIN_CONFIG_END + 1, - SPRD_PIN_CONFIG_SLEEP_MODE = PIN_CONFIG_END + 2, -}; +#define SPRD_PIN_CONFIG_CONTROL (PIN_CONFIG_END + 1) +#define SPRD_PIN_CONFIG_SLEEP_MODE (PIN_CONFIG_END + 2) static int sprd_pinctrl_get_id_by_name(struct sprd_pinctrl *sprd_pctl, const char *name) diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c index a9bec6e6fdd18820a2847fe02bc98730fe223da2..14dfbbd6c1c37220e751d77e8a123728ef34234d 100644 --- a/drivers/pinctrl/stm32/pinctrl-stm32.c +++ b/drivers/pinctrl/stm32/pinctrl-stm32.c @@ -410,7 +410,7 @@ static int stm32_pctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev, unsigned int num_configs; bool has_config = 0; unsigned reserve = 0; - int num_pins, num_funcs, maps_per_pin, i, err; + int num_pins, num_funcs, maps_per_pin, i, err = 0; pctl = pinctrl_dev_get_drvdata(pctldev); @@ -437,41 +437,45 @@ static int stm32_pctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev, if (has_config && num_pins >= 1) maps_per_pin++; - if (!num_pins || !maps_per_pin) - return -EINVAL; + if (!num_pins || !maps_per_pin) { + err = -EINVAL; + goto exit; + } reserve = num_pins * maps_per_pin; err = pinctrl_utils_reserve_map(pctldev, map, reserved_maps, num_maps, reserve); if (err) - return err; + goto exit; for (i = 0; i < num_pins; i++) { err = of_property_read_u32_index(node, "pinmux", i, &pinfunc); if (err) - return err; + goto exit; pin = STM32_GET_PIN_NO(pinfunc); func = STM32_GET_PIN_FUNC(pinfunc); if (!stm32_pctrl_is_function_valid(pctl, pin, func)) { dev_err(pctl->dev, "invalid function.\n"); - return -EINVAL; + err = -EINVAL; + goto exit; } grp = stm32_pctrl_find_group_by_pin(pctl, pin); if (!grp) { dev_err(pctl->dev, "unable to match pin %d to group\n", pin); - return -EINVAL; + err = -EINVAL; + goto exit; } err = stm32_pctrl_dt_node_to_map_func(pctl, pin, func, grp, map, reserved_maps, num_maps); if (err) - return err; + goto exit; if (has_config) { err = pinctrl_utils_add_map_configs(pctldev, map, @@ -479,11 +483,13 @@ static int stm32_pctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev, configs, num_configs, PIN_MAP_TYPE_CONFIGS_GROUP); if (err) - return err; + goto exit; } } - return 0; +exit: + kfree(configs); + return err; } static int stm32_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev, diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c index aa8b58125568ddcd90d64a76db5e414920e00873..ef4268cc62275057016846703cca8de58f8386f6 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c @@ -588,7 +588,7 @@ static const unsigned int h6_irq_bank_map[] = { 1, 5, 6, 7 }; static const struct sunxi_pinctrl_desc h6_pinctrl_data = { .pins = h6_pins, .npins = ARRAY_SIZE(h6_pins), - .irq_banks = 3, + .irq_banks = 4, .irq_bank_map = h6_irq_bank_map, .irq_read_needs_mux = true, }; diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c index 6624499eae72f5c2ba986c8c54c6f7e583f05f2a..4ada80317a3bd56bab5627b7ae947d1d3a82c352 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c @@ -568,7 +568,7 @@ static const struct sunxi_desc_pin sun8i_a83t_pins[] = { SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 11), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 1)), /* PH_EINT11 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 11)), /* PH_EINT11 */ }; static const struct sunxi_pinctrl_desc sun8i_a83t_pinctrl_data = { diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c index 4d9bf9b3e9f3e45d8e7d1eeeeedf4cb88181b46f..61aaaf58c59932a5ec45020ef9bf4d97b872240b 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c @@ -1042,6 +1042,7 @@ static int sunxi_pinctrl_add_function(struct sunxi_pinctrl *pctl, static int sunxi_pinctrl_build_state(struct platform_device *pdev) { struct sunxi_pinctrl *pctl = platform_get_drvdata(pdev); + void *ptr; int i; /* @@ -1079,10 +1080,9 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev) * We suppose that we won't have any more functions than pins, * we'll reallocate that later anyway */ - pctl->functions = devm_kcalloc(&pdev->dev, - pctl->ngroups, - sizeof(*pctl->functions), - GFP_KERNEL); + pctl->functions = kcalloc(pctl->ngroups, + sizeof(*pctl->functions), + GFP_KERNEL); if (!pctl->functions) return -ENOMEM; @@ -1109,13 +1109,15 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev) } /* And now allocated and fill the array for real */ - pctl->functions = krealloc(pctl->functions, - pctl->nfunctions * sizeof(*pctl->functions), - GFP_KERNEL); - if (!pctl->functions) { + ptr = krealloc(pctl->functions, + pctl->nfunctions * sizeof(*pctl->functions), + GFP_KERNEL); + if (!ptr) { kfree(pctl->functions); + pctl->functions = NULL; return -ENOMEM; } + pctl->functions = ptr; for (i = 0; i < pctl->desc->npins; i++) { const struct sunxi_desc_pin *pin = pctl->desc->pins + i; @@ -1133,8 +1135,10 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev) func_item = sunxi_pinctrl_find_function_by_name(pctl, func->name); - if (!func_item) + if (!func_item) { + kfree(pctl->functions); return -EINVAL; + } if (!func_item->groups) { func_item->groups = @@ -1142,8 +1146,10 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev) func_item->ngroups, sizeof(*func_item->groups), GFP_KERNEL); - if (!func_item->groups) + if (!func_item->groups) { + kfree(pctl->functions); return -ENOMEM; + } } func_grp = func_item->groups; diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c index 1aba75897d1476a3828bad0ffccd19c29f513ac3..26a3f1eb9c6bfdd654174b25ea906b89f1e80fe8 100644 --- a/drivers/pinctrl/tegra/pinctrl-tegra.c +++ b/drivers/pinctrl/tegra/pinctrl-tegra.c @@ -40,7 +40,9 @@ static inline u32 pmx_readl(struct tegra_pmx *pmx, u32 bank, u32 reg) static inline void pmx_writel(struct tegra_pmx *pmx, u32 val, u32 bank, u32 reg) { - writel(val, pmx->regs[bank] + reg); + writel_relaxed(val, pmx->regs[bank] + reg); + /* make sure pinmux register write completed */ + pmx_readl(pmx, bank, reg); } static int tegra_pinctrl_get_groups_count(struct pinctrl_dev *pctldev) diff --git a/drivers/pinctrl/zte/pinctrl-zx.c b/drivers/pinctrl/zte/pinctrl-zx.c index caa44dd2880a824c03c1366742287d1398b10f54..3cb69309912bad3bb57a6a2cb1aa08dfc5891a7d 100644 --- a/drivers/pinctrl/zte/pinctrl-zx.c +++ b/drivers/pinctrl/zte/pinctrl-zx.c @@ -411,6 +411,7 @@ int zx_pinctrl_init(struct platform_device *pdev, } zpctl->aux_base = of_iomap(np, 0); + of_node_put(np); if (!zpctl->aux_base) return -ENOMEM; diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c index b6fd4838f60f3f9c198988072e7dd25d6a01d02a..ac784ac66ac341f754d59bd6a7a70c61d1fd0915 100644 --- a/drivers/platform/chrome/cros_ec_proto.c +++ b/drivers/platform/chrome/cros_ec_proto.c @@ -67,6 +67,17 @@ static int send_command(struct cros_ec_device *ec_dev, else xfer_fxn = ec_dev->cmd_xfer; + if (!xfer_fxn) { + /* + * This error can happen if a communication error happened and + * the EC is trying to use protocol v2, on an underlying + * communication mechanism that does not support v2. + */ + dev_err_once(ec_dev->dev, + "missing EC transfer API, cannot send command\n"); + return -EIO; + } + ret = (*xfer_fxn)(ec_dev, msg); if (msg->result == EC_RES_IN_PROGRESS) { int i; @@ -575,6 +586,7 @@ static int get_keyboard_state_event(struct cros_ec_device *ec_dev) int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event) { + u8 event_type; u32 host_event; int ret; @@ -594,11 +606,22 @@ int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event) return ret; if (wake_event) { + event_type = ec_dev->event_data.event_type; host_event = cros_ec_get_host_event(ec_dev); - /* Consider non-host_event as wake event */ - *wake_event = !host_event || - !!(host_event & ec_dev->host_event_wake_mask); + /* + * Sensor events need to be parsed by the sensor sub-device. + * Defer them, and don't report the wakeup here. + */ + if (event_type == EC_MKBP_EVENT_SENSOR_FIFO) + *wake_event = false; + /* Masked host-events should not count as wake events. */ + else if (host_event && + !(host_event & ec_dev->host_event_wake_mask)) + *wake_event = false; + /* Consider all other events as wake events. */ + else + *wake_event = true; } return ret; diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c index 2da567540c2daeb7815fd9b559b593f1dc0bf03c..07eef853a47ddd258af81b5ac510bc8a9c45a1b4 100644 --- a/drivers/platform/goldfish/goldfish_pipe.c +++ b/drivers/platform/goldfish/goldfish_pipe.c @@ -455,8 +455,7 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, if (unlikely(bufflen == 0)) return 0; /* Check the buffer range for access */ - if (unlikely(!access_ok(is_write ? VERIFY_WRITE : VERIFY_READ, - buffer, bufflen))) + if (unlikely(!access_ok(buffer, bufflen))) return -EFAULT; address = (unsigned long)buffer; diff --git a/drivers/platform/mellanox/mlxreg-hotplug.c b/drivers/platform/mellanox/mlxreg-hotplug.c index b6d44550d98cf3fbe95a021b79b23dd1ad54bea2..d52c821b858457b65c8a644248347d18af00fce5 100644 --- a/drivers/platform/mellanox/mlxreg-hotplug.c +++ b/drivers/platform/mellanox/mlxreg-hotplug.c @@ -248,7 +248,8 @@ mlxreg_hotplug_work_helper(struct mlxreg_hotplug_priv_data *priv, struct mlxreg_core_item *item) { struct mlxreg_core_data *data; - u32 asserted, regval, bit; + unsigned long asserted; + u32 regval, bit; int ret; /* @@ -281,7 +282,7 @@ mlxreg_hotplug_work_helper(struct mlxreg_hotplug_priv_data *priv, asserted = item->cache ^ regval; item->cache = regval; - for_each_set_bit(bit, (unsigned long *)&asserted, 8) { + for_each_set_bit(bit, &asserted, 8) { data = item->data + bit; if (regval & BIT(bit)) { if (item->inversed) @@ -672,6 +673,7 @@ static int mlxreg_hotplug_remove(struct platform_device *pdev) /* Clean interrupts setup. */ mlxreg_hotplug_unset_irq(priv); + devm_free_irq(&pdev->dev, priv->irq, priv); return 0; } diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 0c1aa6c314f50a94c55aada1ae314a9edf24ea80..66d4b7faca610b5b9c41aeef3fbe67bb7562003a 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -856,6 +856,7 @@ config TOSHIBA_WMI config ACPI_CMPC tristate "CMPC Laptop Extras" depends on ACPI && INPUT + depends on BACKLIGHT_LCD_SUPPORT depends on RFKILL || RFKILL=n select BACKLIGHT_CLASS_DEVICE help @@ -1077,6 +1078,7 @@ config INTEL_OAKTRAIL config SAMSUNG_Q10 tristate "Samsung Q10 Extras" depends on ACPI + depends on BACKLIGHT_LCD_SUPPORT select BACKLIGHT_CLASS_DEVICE ---help--- This driver provides support for backlight control on Samsung Q10 @@ -1229,6 +1231,20 @@ config I2C_MULTI_INSTANTIATE To compile this driver as a module, choose M here: the module will be called i2c-multi-instantiate. +config INTEL_ATOMISP2_PM + tristate "Intel AtomISP2 dummy / power-management driver" + depends on PCI && IOSF_MBI && PM + help + Power-management driver for Intel's Image Signal Processor found on + Bay and Cherry Trail devices. This dummy driver's sole purpose is to + turn the ISP off (put it in D3) to save power and to allow entering + of S0ix modes. + + To compile this driver as a module, choose M here: the module + will be called intel_atomisp2_pm. + +source "drivers/platform/x86/intel_speed_select_if/Kconfig" + endif # X86_PLATFORM_DEVICES config PMC_ATOM diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index e6d1becf81ce8c6bd526e196b8c167576d0f5c6f..fb426466817bf428d665f1c2244572f2709a7038 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile @@ -92,3 +92,5 @@ obj-$(CONFIG_MLX_PLATFORM) += mlx-platform.o obj-$(CONFIG_INTEL_TURBO_MAX_3) += intel_turbo_max_3.o obj-$(CONFIG_INTEL_CHTDC_TI_PWRBTN) += intel_chtdc_ti_pwrbtn.o obj-$(CONFIG_I2C_MULTI_INSTANTIATE) += i2c-multi-instantiate.o +obj-$(CONFIG_INTEL_ATOMISP2_PM) += intel_atomisp2_pm.o +obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += intel_speed_select_if/ diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c index ea22591ee66feb9c08d8a831cbb822d8b9ab7591..53dfe67807e3994551dac12d40665bb2ba840d21 100644 --- a/drivers/platform/x86/acerhdf.c +++ b/drivers/platform/x86/acerhdf.c @@ -233,6 +233,7 @@ static const struct bios_settings bios_tbl[] = { {"Gateway", "LT31", "v1.3201", 0x55, 0x58, {0x9e, 0x00}, 0}, {"Gateway", "LT31", "v1.3302", 0x55, 0x58, {0x9e, 0x00}, 0}, {"Gateway", "LT31", "v1.3303t", 0x55, 0x58, {0x9e, 0x00}, 0}, + {"Gateway", "LT31", "v1.3307", 0x55, 0x58, {0x9e, 0x00}, 0}, /* Packard Bell */ {"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x00}, 0}, {"Packard Bell", "DOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00}, 0}, diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index db2af09067dbcb4d57da168ebffed7ede9c5fa94..59f3a37a44d7a68298ec81270b86c633244b9e85 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -78,10 +78,12 @@ static bool asus_q500a_i8042_filter(unsigned char data, unsigned char str, static struct quirk_entry quirk_asus_unknown = { .wapf = 0, + .wmi_backlight_set_devstate = true, }; static struct quirk_entry quirk_asus_q500a = { .i8042_filter = asus_q500a_i8042_filter, + .wmi_backlight_set_devstate = true, }; /* @@ -92,26 +94,32 @@ static struct quirk_entry quirk_asus_q500a = { static struct quirk_entry quirk_asus_x55u = { .wapf = 4, .wmi_backlight_power = true, + .wmi_backlight_set_devstate = true, .no_display_toggle = true, }; static struct quirk_entry quirk_asus_wapf4 = { .wapf = 4, + .wmi_backlight_set_devstate = true, }; static struct quirk_entry quirk_asus_x200ca = { .wapf = 2, + .wmi_backlight_set_devstate = true, }; static struct quirk_entry quirk_asus_ux303ub = { .wmi_backlight_native = true, + .wmi_backlight_set_devstate = true, }; static struct quirk_entry quirk_asus_x550lb = { + .wmi_backlight_set_devstate = true, .xusb2pr = 0x01D9, }; static struct quirk_entry quirk_asus_forceals = { + .wmi_backlight_set_devstate = true, .wmi_force_als_set = true, }; @@ -442,8 +450,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = { { KE_KEY, 0x30, { KEY_VOLUMEUP } }, { KE_KEY, 0x31, { KEY_VOLUMEDOWN } }, { KE_KEY, 0x32, { KEY_MUTE } }, - { KE_KEY, 0x33, { KEY_DISPLAYTOGGLE } }, /* LCD on */ - { KE_KEY, 0x34, { KEY_DISPLAY_OFF } }, /* LCD off */ + { KE_KEY, 0x35, { KEY_SCREENLOCK } }, { KE_KEY, 0x40, { KEY_PREVIOUSSONG } }, { KE_KEY, 0x41, { KEY_NEXTSONG } }, { KE_KEY, 0x43, { KEY_STOPCD } }, /* Stop/Eject */ diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 2d6e272315a82eae024c0fe3f242f097c2787f02..22eac449d3a39019ac522bb03cf8bb01ba0535c3 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -2231,7 +2231,8 @@ static int asus_wmi_add(struct platform_device *pdev) err = asus_wmi_backlight_init(asus); if (err && err != -ENODEV) goto fail_backlight; - } + } else if (asus->driver->quirks->wmi_backlight_set_devstate) + err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, 2, NULL); status = wmi_install_notify_handler(asus->driver->event_guid, asus_wmi_notify, asus); diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h index 6c1311f4b04def9548a62c0f7ca560c2ee8331d7..57a79bddb28611c4b5603553f4a68911f4a526a7 100644 --- a/drivers/platform/x86/asus-wmi.h +++ b/drivers/platform/x86/asus-wmi.h @@ -44,6 +44,7 @@ struct quirk_entry { bool store_backlight_power; bool wmi_backlight_power; bool wmi_backlight_native; + bool wmi_backlight_set_devstate; bool wmi_force_als_set; int wapf; /* diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index 06978c14c83b23c5e35c4cb721863903a736486d..3433986d52200379ff368dbefc6d9e2e63d9b43f 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -532,7 +532,7 @@ static void dell_rfkill_query(struct rfkill *rfkill, void *data) return; } - dell_fill_request(&buffer, 0, 0x2, 0, 0); + dell_fill_request(&buffer, 0x2, 0, 0, 0); ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL); hwswitch = buffer.output[1]; @@ -563,7 +563,7 @@ static int dell_debugfs_show(struct seq_file *s, void *data) return ret; status = buffer.output[1]; - dell_fill_request(&buffer, 0, 0x2, 0, 0); + dell_fill_request(&buffer, 0x2, 0, 0, 0); hwswitch_ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL); if (hwswitch_ret) return hwswitch_ret; @@ -648,7 +648,7 @@ static void dell_update_rfkill(struct work_struct *ignored) if (ret != 0) return; - dell_fill_request(&buffer, 0, 0x2, 0, 0); + dell_fill_request(&buffer, 0x2, 0, 0, 0); ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL); if (ret == 0 && (status & BIT(0))) diff --git a/drivers/platform/x86/dell-smbios-wmi.c b/drivers/platform/x86/dell-smbios-wmi.c index cf2229ece9ff6f11cce316f2e51c8088c30053ec..ccccce9b67efeb018187ac3600afbdbbe9111ef9 100644 --- a/drivers/platform/x86/dell-smbios-wmi.c +++ b/drivers/platform/x86/dell-smbios-wmi.c @@ -274,7 +274,8 @@ int init_dell_smbios_wmi(void) void exit_dell_smbios_wmi(void) { - wmi_driver_unregister(&dell_smbios_wmi_driver); + if (wmi_supported) + wmi_driver_unregister(&dell_smbios_wmi_driver); } MODULE_ALIAS("wmi:" DELL_WMI_SMBIOS_GUID); diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index b4224389febebe4688ea2195d8a3a786ba3c2081..06a3c1ef8eeee65455dc059b619b0580420eec5a 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -78,7 +78,7 @@ struct bios_args { u32 command; u32 commandtype; u32 datasize; - u32 data; + u8 data[128]; }; enum hp_wmi_commandtype { @@ -229,7 +229,7 @@ static int hp_wmi_perform_query(int query, enum hp_wmi_command command, .command = command, .commandtype = query, .datasize = insize, - .data = 0, + .data = { 0 }, }; struct acpi_buffer input = { sizeof(struct bios_args), &args }; struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; @@ -241,7 +241,7 @@ static int hp_wmi_perform_query(int query, enum hp_wmi_command command, if (WARN_ON(insize > sizeof(args.data))) return -EINVAL; - memcpy(&args.data, buffer, insize); + memcpy(&args.data[0], buffer, insize); wmi_evaluate_method(HPWMI_BIOS_GUID, 0, mid, &input, &output); @@ -313,7 +313,7 @@ static int __init hp_wmi_bios_2008_later(void) static int __init hp_wmi_bios_2009_later(void) { - int state = 0; + u8 state[128]; int ret = hp_wmi_perform_query(HPWMI_FEATURE2_QUERY, HPWMI_READ, &state, sizeof(state), sizeof(state)); if (!ret) @@ -393,7 +393,7 @@ static int hp_wmi_rfkill2_refresh(void) int err, i; err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state, - 0, sizeof(state)); + sizeof(state), sizeof(state)); if (err) return err; @@ -790,7 +790,7 @@ static int __init hp_wmi_rfkill2_setup(struct platform_device *device) int err, i; err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state, - 0, sizeof(state)); + sizeof(state), sizeof(state)); if (err) return err < 0 ? err : -EINVAL; diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index d4f1259ff5a233bc22bb0700ddb354ace4b40e58..62d4b94e2531d99bb717fec5a2a7064c99f83959 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -989,7 +989,7 @@ static const struct dmi_system_id no_hw_rfkill_list[] = { .ident = "Lenovo RESCUER R720-15IKBN", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_BOARD_NAME, "80WW"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo R720-15IKBN"), }, }, { diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c index 6cf9b7fa5bf0486fcf7e9d0b3ed805cfe07ca19d..3201a83073b5e99a9ca1c70566711b96bcaeca4e 100644 --- a/drivers/platform/x86/intel-hid.c +++ b/drivers/platform/x86/intel-hid.c @@ -373,7 +373,7 @@ static void notify_handler(acpi_handle handle, u32 event, void *context) * the 5-button array, but still send notifies with power button * event code to this device object on power button actions. * - * Report the power button press; catch and ignore the button release. + * Report the power button press and release. */ if (!priv->array) { if (event == 0xce) { @@ -382,8 +382,11 @@ static void notify_handler(acpi_handle handle, u32 event, void *context) return; } - if (event == 0xcf) + if (event == 0xcf) { + input_report_key(priv->input_dev, KEY_POWER, 0); + input_sync(priv->input_dev); return; + } } /* 0xC0 is for HID events, other values are for 5 button array */ diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c index 06cd7e818ed5dd8b9c28c5f67289d8dc90b07644..a0d0cecff55fd908a3b3096238c529a2ed657217 100644 --- a/drivers/platform/x86/intel-vbtn.c +++ b/drivers/platform/x86/intel-vbtn.c @@ -76,12 +76,24 @@ static void notify_handler(acpi_handle handle, u32 event, void *context) struct platform_device *device = context; struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev); unsigned int val = !(event & 1); /* Even=press, Odd=release */ - const struct key_entry *ke_rel; + const struct key_entry *ke, *ke_rel; bool autorelease; if (priv->wakeup_mode) { - if (sparse_keymap_entry_from_scancode(priv->input_dev, event)) { + ke = sparse_keymap_entry_from_scancode(priv->input_dev, event); + if (ke) { pm_wakeup_hard_event(&device->dev); + + /* + * Switch events like tablet mode will wake the device + * and report the new switch position to the input + * subsystem. + */ + if (ke->type == KE_SW) + sparse_keymap_report_event(priv->input_dev, + event, + val, + 0); return; } goto out_unknown; diff --git a/drivers/platform/x86/intel_atomisp2_pm.c b/drivers/platform/x86/intel_atomisp2_pm.c new file mode 100644 index 0000000000000000000000000000000000000000..b0f421fea2a58ed61d0063625c1855d61e575108 --- /dev/null +++ b/drivers/platform/x86/intel_atomisp2_pm.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Dummy driver for Intel's Image Signal Processor found on Bay and Cherry + * Trail devices. The sole purpose of this driver is to allow the ISP to + * be put in D3. + * + * Copyright (C) 2018 Hans de Goede + * + * Based on various non upstream patches for ISP support: + * Copyright (C) 2010-2017 Intel Corporation. All rights reserved. + * Copyright (c) 2010 Silicon Hive www.siliconhive.com. + */ + +#include +#include +#include +#include +#include +#include + +/* PCI configuration regs */ +#define PCI_INTERRUPT_CTRL 0x9c + +#define PCI_CSI_CONTROL 0xe8 +#define PCI_CSI_CONTROL_PORTS_OFF_MASK 0x7 + +/* IOSF BT_MBI_UNIT_PMC regs */ +#define ISPSSPM0 0x39 +#define ISPSSPM0_ISPSSC_OFFSET 0 +#define ISPSSPM0_ISPSSC_MASK 0x00000003 +#define ISPSSPM0_ISPSSS_OFFSET 24 +#define ISPSSPM0_ISPSSS_MASK 0x03000000 +#define ISPSSPM0_IUNIT_POWER_ON 0x0 +#define ISPSSPM0_IUNIT_POWER_OFF 0x3 + +static int isp_set_power(struct pci_dev *dev, bool enable) +{ + unsigned long timeout; + u32 val = enable ? ISPSSPM0_IUNIT_POWER_ON : + ISPSSPM0_IUNIT_POWER_OFF; + + /* Write to ISPSSPM0 bit[1:0] to power on/off the IUNIT */ + iosf_mbi_modify(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0, + val, ISPSSPM0_ISPSSC_MASK); + + /* + * There should be no IUNIT access while power-down is + * in progress HW sighting: 4567865 + * Wait up to 50 ms for the IUNIT to shut down. + * And we do the same for power on. + */ + timeout = jiffies + msecs_to_jiffies(50); + while (1) { + u32 tmp; + + /* Wait until ISPSSPM0 bit[25:24] shows the right value */ + iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0, &tmp); + tmp = (tmp & ISPSSPM0_ISPSSS_MASK) >> ISPSSPM0_ISPSSS_OFFSET; + if (tmp == val) + break; + + if (time_after(jiffies, timeout)) { + dev_err(&dev->dev, "IUNIT power-%s timeout.\n", + enable ? "on" : "off"); + return -EBUSY; + } + usleep_range(1000, 2000); + } + + return 0; +} + +static int isp_probe(struct pci_dev *dev, const struct pci_device_id *id) +{ + pm_runtime_allow(&dev->dev); + pm_runtime_put_sync_suspend(&dev->dev); + + return 0; +} + +static void isp_remove(struct pci_dev *dev) +{ + pm_runtime_get_sync(&dev->dev); + pm_runtime_forbid(&dev->dev); +} + +static int isp_pci_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + u32 val; + + pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, 0); + + /* + * MRFLD IUNIT DPHY is located in an always-power-on island + * MRFLD HW design need all CSI ports are disabled before + * powering down the IUNIT. + */ + pci_read_config_dword(pdev, PCI_CSI_CONTROL, &val); + val |= PCI_CSI_CONTROL_PORTS_OFF_MASK; + pci_write_config_dword(pdev, PCI_CSI_CONTROL, val); + + /* + * We lose config space access when punit power gates + * the ISP. Can't use pci_set_power_state() because + * pmcsr won't actually change when we write to it. + */ + pci_save_state(pdev); + pdev->current_state = PCI_D3cold; + isp_set_power(pdev, false); + + return 0; +} + +static int isp_pci_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + isp_set_power(pdev, true); + pdev->current_state = PCI_D0; + pci_restore_state(pdev); + + return 0; +} + +static UNIVERSAL_DEV_PM_OPS(isp_pm_ops, isp_pci_suspend, + isp_pci_resume, NULL); + +static const struct pci_device_id isp_id_table[] = { + { PCI_VDEVICE(INTEL, 0x0f38), }, + { PCI_VDEVICE(INTEL, 0x22b8), }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, isp_id_table); + +static struct pci_driver isp_pci_driver = { + .name = "intel_atomisp2_pm", + .id_table = isp_id_table, + .probe = isp_probe, + .remove = isp_remove, + .driver.pm = &isp_pm_ops, +}; + +module_pci_driver(isp_pci_driver); + +MODULE_DESCRIPTION("Intel AtomISP2 dummy / power-management drv (for suspend)"); +MODULE_AUTHOR("Hans de Goede "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/x86/intel_cht_int33fe.c b/drivers/platform/x86/intel_cht_int33fe.c index 39d4100c60a226203a9f18155b825c275d0e6646..f40b1c1921064b734614705c72dc212700f979fc 100644 --- a/drivers/platform/x86/intel_cht_int33fe.c +++ b/drivers/platform/x86/intel_cht_int33fe.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -34,7 +35,7 @@ struct cht_int33fe_data { struct i2c_client *fusb302; struct i2c_client *pi3usb30532; /* Contain a list-head must be per device */ - struct device_connection connections[3]; + struct device_connection connections[5]; }; /* @@ -88,9 +89,9 @@ static const struct property_entry fusb302_props[] = { { } }; -static int cht_int33fe_probe(struct i2c_client *client) +static int cht_int33fe_probe(struct platform_device *pdev) { - struct device *dev = &client->dev; + struct device *dev = &pdev->dev; struct i2c_board_info board_info; struct cht_int33fe_data *data; struct i2c_client *max17047; @@ -174,19 +175,20 @@ static int cht_int33fe_probe(struct i2c_client *client) return -EPROBE_DEFER; /* Wait for i2c-adapter to load */ } - data->connections[0].endpoint[0] = "i2c-fusb302"; + data->connections[0].endpoint[0] = "port0"; data->connections[0].endpoint[1] = "i2c-pi3usb30532"; data->connections[0].id = "typec-switch"; - data->connections[1].endpoint[0] = "i2c-fusb302"; + data->connections[1].endpoint[0] = "port0"; data->connections[1].endpoint[1] = "i2c-pi3usb30532"; data->connections[1].id = "typec-mux"; - data->connections[2].endpoint[0] = "i2c-fusb302"; - data->connections[2].endpoint[1] = "intel_xhci_usb_sw-role-switch"; - data->connections[2].id = "usb-role-switch"; + data->connections[2].endpoint[0] = "port0"; + data->connections[2].endpoint[1] = "i2c-pi3usb30532"; + data->connections[2].id = "idff01m01"; + data->connections[3].endpoint[0] = "i2c-fusb302"; + data->connections[3].endpoint[1] = "intel_xhci_usb_sw-role-switch"; + data->connections[3].id = "usb-role-switch"; - device_connection_add(&data->connections[0]); - device_connection_add(&data->connections[1]); - device_connection_add(&data->connections[2]); + device_connections_add(data->connections); memset(&board_info, 0, sizeof(board_info)); strlcpy(board_info.type, "typec_fusb302", I2C_NAME_SIZE); @@ -206,7 +208,7 @@ static int cht_int33fe_probe(struct i2c_client *client) if (!data->pi3usb30532) goto out_unregister_fusb302; - i2c_set_clientdata(client, data); + platform_set_drvdata(pdev, data); return 0; @@ -217,52 +219,41 @@ static int cht_int33fe_probe(struct i2c_client *client) if (data->max17047) i2c_unregister_device(data->max17047); - device_connection_remove(&data->connections[2]); - device_connection_remove(&data->connections[1]); - device_connection_remove(&data->connections[0]); + device_connections_remove(data->connections); return -EPROBE_DEFER; /* Wait for the i2c-adapter to load */ } -static int cht_int33fe_remove(struct i2c_client *i2c) +static int cht_int33fe_remove(struct platform_device *pdev) { - struct cht_int33fe_data *data = i2c_get_clientdata(i2c); + struct cht_int33fe_data *data = platform_get_drvdata(pdev); i2c_unregister_device(data->pi3usb30532); i2c_unregister_device(data->fusb302); if (data->max17047) i2c_unregister_device(data->max17047); - device_connection_remove(&data->connections[2]); - device_connection_remove(&data->connections[1]); - device_connection_remove(&data->connections[0]); + device_connections_remove(data->connections); return 0; } -static const struct i2c_device_id cht_int33fe_i2c_id[] = { - { } -}; -MODULE_DEVICE_TABLE(i2c, cht_int33fe_i2c_id); - static const struct acpi_device_id cht_int33fe_acpi_ids[] = { { "INT33FE", }, { } }; MODULE_DEVICE_TABLE(acpi, cht_int33fe_acpi_ids); -static struct i2c_driver cht_int33fe_driver = { +static struct platform_driver cht_int33fe_driver = { .driver = { .name = "Intel Cherry Trail ACPI INT33FE driver", .acpi_match_table = ACPI_PTR(cht_int33fe_acpi_ids), }, - .probe_new = cht_int33fe_probe, + .probe = cht_int33fe_probe, .remove = cht_int33fe_remove, - .id_table = cht_int33fe_i2c_id, - .disable_i2c_core_irq_mapping = true, }; -module_i2c_driver(cht_int33fe_driver); +module_platform_driver(cht_int33fe_driver); MODULE_DESCRIPTION("Intel Cherry Trail ACPI INT33FE pseudo device driver"); MODULE_AUTHOR("Hans de Goede "); diff --git a/drivers/platform/x86/intel_int0002_vgpio.c b/drivers/platform/x86/intel_int0002_vgpio.c index a473dc51b18d6d76d27d2cde2572cfcc42435bc6..e89ad4964dc139daec390560bfcecb145cdd5505 100644 --- a/drivers/platform/x86/intel_int0002_vgpio.c +++ b/drivers/platform/x86/intel_int0002_vgpio.c @@ -60,7 +60,7 @@ static const struct x86_cpu_id int0002_cpu_ids[] = { /* * Limit ourselves to Cherry Trail for now, until testing shows we * need to handle the INT0002 device on Baytrail too. - * ICPU(INTEL_FAM6_ATOM_SILVERMONT1), * Valleyview, Bay Trail * + * ICPU(INTEL_FAM6_ATOM_SILVERMONT), * Valleyview, Bay Trail * */ ICPU(INTEL_FAM6_ATOM_AIRMONT), /* Braswell, Cherry Trail */ {} diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c index d79fbf924b136823987011664d385e1bdceb927d..5ad44204a9c3c997bf237aacfb1bbb99ab292ef7 100644 --- a/drivers/platform/x86/intel_mid_powerbtn.c +++ b/drivers/platform/x86/intel_mid_powerbtn.c @@ -125,8 +125,8 @@ static const struct mid_pb_ddata mrfld_ddata = { { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (kernel_ulong_t)&ddata } static const struct x86_cpu_id mid_pb_cpu_ids[] = { - ICPU(INTEL_FAM6_ATOM_PENWELL, mfld_ddata), - ICPU(INTEL_FAM6_ATOM_MERRIFIELD, mrfld_ddata), + ICPU(INTEL_FAM6_ATOM_SALTWELL_MID, mfld_ddata), + ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID, mrfld_ddata), {} }; diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c index 2d272a3e017621365de7d38dfbfa68bef7d73ab2..36bd2545afb62d6af03a004d56ce71951a929493 100644 --- a/drivers/platform/x86/intel_pmc_core.c +++ b/drivers/platform/x86/intel_pmc_core.c @@ -185,7 +185,7 @@ static const struct pmc_bit_map cnp_pfear_map[] = { {"CNVI", BIT(3)}, {"UFS0", BIT(4)}, {"EMMC", BIT(5)}, - {"Res_6", BIT(6)}, + {"SPF", BIT(6)}, {"SBR6", BIT(7)}, {"SBR7", BIT(0)}, @@ -333,7 +333,8 @@ static int pmc_core_ppfear_sts_show(struct seq_file *s, void *unused) index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++) pf_regs[index] = pmc_core_reg_read_byte(pmcdev, iter); - for (index = 0; map[index].name; index++) + for (index = 0; map[index].name && + index < pmcdev->map->ppfear_buckets * 8; index++) pmc_core_display_map(s, index, pf_regs[index / 8], map); return 0; @@ -681,13 +682,17 @@ static int __init pmc_core_probe(void) * Sunrisepoint PCH regmap can't be used. Use Cannonlake PCH regmap * in this case. */ - if (!pci_dev_present(pmc_pci_ids)) + if (pmcdev->map == &spt_reg_map && !pci_dev_present(pmc_pci_ids)) pmcdev->map = &cnp_reg_map; - if (lpit_read_residency_count_address(&slp_s0_addr)) + if (lpit_read_residency_count_address(&slp_s0_addr)) { pmcdev->base_addr = PMC_BASE_ADDR_DEFAULT; - else + + if (page_is_ram(PHYS_PFN(pmcdev->base_addr))) + return -ENODEV; + } else { pmcdev->base_addr = slp_s0_addr - pmcdev->map->slp_s0_offset; + } pmcdev->regbase = ioremap(pmcdev->base_addr, pmcdev->map->regmap_length); diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h index 93a7e99e1f8b8a0a8d241416c1540cc66c779f20..3f9711b03cb4d52932dd73faf3e9be146dd3bab9 100644 --- a/drivers/platform/x86/intel_pmc_core.h +++ b/drivers/platform/x86/intel_pmc_core.h @@ -39,7 +39,7 @@ #define SPT_PMC_SLP_S0_RES_COUNTER_STEP 0x64 #define PMC_BASE_ADDR_MASK ~(SPT_PMC_MMIO_REG_LEN - 1) #define MTPMC_MASK 0xffff0000 -#define PPFEAR_MAX_NUM_ENTRIES 5 +#define PPFEAR_MAX_NUM_ENTRIES 12 #define SPT_PPFEAR_NUM_ENTRIES 5 #define SPT_PMC_READ_DISABLE_BIT 0x16 #define SPT_PMC_MSG_FULL_STS_BIT 0x18 diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c index e7edc8c6393674dfdcfcf53cccb80da50fd255f2..4ad9d127f2f58b93073fe7f9c2de0283049d4fa9 100644 --- a/drivers/platform/x86/intel_pmc_ipc.c +++ b/drivers/platform/x86/intel_pmc_ipc.c @@ -776,13 +776,17 @@ static int ipc_create_pmc_devices(void) if (ret) { dev_err(ipcdev.dev, "Failed to add punit platform device\n"); platform_device_unregister(ipcdev.tco_dev); + return ret; } if (!ipcdev.telem_res_inval) { ret = ipc_create_telemetry_device(); - if (ret) + if (ret) { dev_warn(ipcdev.dev, "Failed to add telemetry platform device\n"); + platform_device_unregister(ipcdev.punit_dev); + platform_device_unregister(ipcdev.tco_dev); + } } return ret; diff --git a/drivers/platform/x86/intel_speed_select_if/Kconfig b/drivers/platform/x86/intel_speed_select_if/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..ce3e3dc076d237d31fb98f13f45fe1cd94b4b47f --- /dev/null +++ b/drivers/platform/x86/intel_speed_select_if/Kconfig @@ -0,0 +1,17 @@ +menu "Intel Speed Select Technology interface support" + depends on PCI + depends on X86_64 || COMPILE_TEST + +config INTEL_SPEED_SELECT_INTERFACE + tristate "Intel(R) Speed Select Technology interface drivers" + help + This config enables the Intel(R) Speed Select Technology interface + drivers. The Intel(R) speed select technology features are non + architectural and only supported on specific Xeon(R) servers. + These drivers provide interface to directly communicate with hardware + via MMIO and Mail boxes to enumerate and control all the speed select + features. + + Enable this config, if there is a need to enable and control the + Intel(R) Speed Select Technology features from the user space. +endmenu diff --git a/drivers/platform/x86/intel_speed_select_if/Makefile b/drivers/platform/x86/intel_speed_select_if/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..856076206f35899ea0e0abdee709d43dc33399f8 --- /dev/null +++ b/drivers/platform/x86/intel_speed_select_if/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile - Intel Speed Select Interface drivers +# Copyright (c) 2019, Intel Corporation. +# + +obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += isst_if_common.o +obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += isst_if_mmio.o +obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += isst_if_mbox_pci.o +obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += isst_if_mbox_msr.o diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_common.c b/drivers/platform/x86/intel_speed_select_if/isst_if_common.c new file mode 100644 index 0000000000000000000000000000000000000000..64a1818eabaa82dec3af0c1e9da863c3376cdd27 --- /dev/null +++ b/drivers/platform/x86/intel_speed_select_if/isst_if_common.c @@ -0,0 +1,675 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel Speed Select Interface: Common functions + * Copyright (c) 2019, Intel Corporation. + * All rights reserved. + * + * Author: Srinivas Pandruvada + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "isst_if_common.h" + +#define MSR_THREAD_ID_INFO 0x53 +#define MSR_CPU_BUS_NUMBER 0x128 + +static struct isst_if_cmd_cb punit_callbacks[ISST_IF_DEV_MAX]; + +static int punit_msr_white_list[] = { + MSR_TURBO_RATIO_LIMIT, + MSR_CONFIG_TDP_CONTROL, +}; + +struct isst_valid_cmd_ranges { + u16 cmd; + u16 sub_cmd_beg; + u16 sub_cmd_end; +}; + +struct isst_cmd_set_req_type { + u16 cmd; + u16 sub_cmd; + u16 param; +}; + +static const struct isst_valid_cmd_ranges isst_valid_cmds[] = { + {0xD0, 0x00, 0x03}, + {0x7F, 0x00, 0x0B}, + {0x7F, 0x10, 0x12}, + {0x7F, 0x20, 0x23}, + {0x94, 0x03, 0x03}, + {0x95, 0x03, 0x03}, +}; + +static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = { + {0xD0, 0x00, 0x08}, + {0xD0, 0x01, 0x08}, + {0xD0, 0x02, 0x08}, + {0xD0, 0x03, 0x08}, + {0x7F, 0x02, 0x00}, + {0x7F, 0x08, 0x00}, + {0x95, 0x03, 0x03}, +}; + +struct isst_cmd { + struct hlist_node hnode; + u64 data; + u32 cmd; + int cpu; + int mbox_cmd_type; + u32 param; +}; + +static DECLARE_HASHTABLE(isst_hash, 8); +static DEFINE_MUTEX(isst_hash_lock); + +static int isst_store_new_cmd(int cmd, u32 cpu, int mbox_cmd_type, u32 param, + u32 data) +{ + struct isst_cmd *sst_cmd; + + sst_cmd = kmalloc(sizeof(*sst_cmd), GFP_KERNEL); + if (!sst_cmd) + return -ENOMEM; + + sst_cmd->cpu = cpu; + sst_cmd->cmd = cmd; + sst_cmd->mbox_cmd_type = mbox_cmd_type; + sst_cmd->param = param; + sst_cmd->data = data; + + hash_add(isst_hash, &sst_cmd->hnode, sst_cmd->cmd); + + return 0; +} + +static void isst_delete_hash(void) +{ + struct isst_cmd *sst_cmd; + struct hlist_node *tmp; + int i; + + hash_for_each_safe(isst_hash, i, tmp, sst_cmd, hnode) { + hash_del(&sst_cmd->hnode); + kfree(sst_cmd); + } +} + +/** + * isst_store_cmd() - Store command to a hash table + * @cmd: Mailbox command. + * @sub_cmd: Mailbox sub-command or MSR id. + * @mbox_cmd_type: Mailbox or MSR command. + * @param: Mailbox parameter. + * @data: Mailbox request data or MSR data. + * + * Stores the command to a hash table if there is no such command already + * stored. If already stored update the latest parameter and data for the + * command. + * + * Return: Return result of store to hash table, 0 for success, others for + * failure. + */ +int isst_store_cmd(int cmd, int sub_cmd, u32 cpu, int mbox_cmd_type, + u32 param, u64 data) +{ + struct isst_cmd *sst_cmd; + int full_cmd, ret; + + full_cmd = (cmd & GENMASK_ULL(15, 0)) << 16; + full_cmd |= (sub_cmd & GENMASK_ULL(15, 0)); + mutex_lock(&isst_hash_lock); + hash_for_each_possible(isst_hash, sst_cmd, hnode, full_cmd) { + if (sst_cmd->cmd == full_cmd && sst_cmd->cpu == cpu && + sst_cmd->mbox_cmd_type == mbox_cmd_type) { + sst_cmd->param = param; + sst_cmd->data = data; + mutex_unlock(&isst_hash_lock); + return 0; + } + } + + ret = isst_store_new_cmd(full_cmd, cpu, mbox_cmd_type, param, data); + mutex_unlock(&isst_hash_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(isst_store_cmd); + +static void isst_mbox_resume_command(struct isst_if_cmd_cb *cb, + struct isst_cmd *sst_cmd) +{ + struct isst_if_mbox_cmd mbox_cmd; + int wr_only; + + mbox_cmd.command = (sst_cmd->cmd & GENMASK_ULL(31, 16)) >> 16; + mbox_cmd.sub_command = sst_cmd->cmd & GENMASK_ULL(15, 0); + mbox_cmd.parameter = sst_cmd->param; + mbox_cmd.req_data = sst_cmd->data; + mbox_cmd.logical_cpu = sst_cmd->cpu; + (cb->cmd_callback)((u8 *)&mbox_cmd, &wr_only, 1); +} + +/** + * isst_resume_common() - Process Resume request + * + * On resume replay all mailbox commands and MSRs. + * + * Return: None. + */ +void isst_resume_common(void) +{ + struct isst_cmd *sst_cmd; + int i; + + hash_for_each(isst_hash, i, sst_cmd, hnode) { + struct isst_if_cmd_cb *cb; + + if (sst_cmd->mbox_cmd_type) { + cb = &punit_callbacks[ISST_IF_DEV_MBOX]; + if (cb->registered) + isst_mbox_resume_command(cb, sst_cmd); + } else { + wrmsrl_safe_on_cpu(sst_cmd->cpu, sst_cmd->cmd, + sst_cmd->data); + } + } +} +EXPORT_SYMBOL_GPL(isst_resume_common); + +static void isst_restore_msr_local(int cpu) +{ + struct isst_cmd *sst_cmd; + int i; + + mutex_lock(&isst_hash_lock); + for (i = 0; i < ARRAY_SIZE(punit_msr_white_list); ++i) { + if (!punit_msr_white_list[i]) + break; + + hash_for_each_possible(isst_hash, sst_cmd, hnode, + punit_msr_white_list[i]) { + if (!sst_cmd->mbox_cmd_type && sst_cmd->cpu == cpu) + wrmsrl_safe(sst_cmd->cmd, sst_cmd->data); + } + } + mutex_unlock(&isst_hash_lock); +} + +/** + * isst_if_mbox_cmd_invalid() - Check invalid mailbox commands + * @cmd: Pointer to the command structure to verify. + * + * Invalid command to PUNIT to may result in instability of the platform. + * This function has a whitelist of commands, which are allowed. + * + * Return: Return true if the command is invalid, else false. + */ +bool isst_if_mbox_cmd_invalid(struct isst_if_mbox_cmd *cmd) +{ + int i; + + if (cmd->logical_cpu >= nr_cpu_ids) + return true; + + for (i = 0; i < ARRAY_SIZE(isst_valid_cmds); ++i) { + if (cmd->command == isst_valid_cmds[i].cmd && + (cmd->sub_command >= isst_valid_cmds[i].sub_cmd_beg && + cmd->sub_command <= isst_valid_cmds[i].sub_cmd_end)) { + return false; + } + } + + return true; +} +EXPORT_SYMBOL_GPL(isst_if_mbox_cmd_invalid); + +/** + * isst_if_mbox_cmd_set_req() - Check mailbox command is a set request + * @cmd: Pointer to the command structure to verify. + * + * Check if the given mail box level is set request and not a get request. + * + * Return: Return true if the command is set_req, else false. + */ +bool isst_if_mbox_cmd_set_req(struct isst_if_mbox_cmd *cmd) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(isst_cmd_set_reqs); ++i) { + if (cmd->command == isst_cmd_set_reqs[i].cmd && + cmd->sub_command == isst_cmd_set_reqs[i].sub_cmd && + cmd->parameter == isst_cmd_set_reqs[i].param) { + return true; + } + } + + return false; +} +EXPORT_SYMBOL_GPL(isst_if_mbox_cmd_set_req); + +static int isst_if_get_platform_info(void __user *argp) +{ + struct isst_if_platform_info info; + + info.api_version = ISST_IF_API_VERSION, + info.driver_version = ISST_IF_DRIVER_VERSION, + info.max_cmds_per_ioctl = ISST_IF_CMD_LIMIT, + info.mbox_supported = punit_callbacks[ISST_IF_DEV_MBOX].registered; + info.mmio_supported = punit_callbacks[ISST_IF_DEV_MMIO].registered; + + if (copy_to_user(argp, &info, sizeof(info))) + return -EFAULT; + + return 0; +} + + +struct isst_if_cpu_info { + /* For BUS 0 and BUS 1 only, which we need for PUNIT interface */ + int bus_info[2]; + int punit_cpu_id; +}; + +static struct isst_if_cpu_info *isst_cpu_info; + +/** + * isst_if_get_pci_dev() - Get the PCI device instance for a CPU + * @cpu: Logical CPU number. + * @bus_number: The bus number assigned by the hardware. + * @dev: The device number assigned by the hardware. + * @fn: The function number assigned by the hardware. + * + * Using cached bus information, find out the PCI device for a bus number, + * device and function. + * + * Return: Return pci_dev pointer or NULL. + */ +struct pci_dev *isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn) +{ + int bus_number; + + if (bus_no < 0 || bus_no > 1 || cpu < 0 || cpu >= nr_cpu_ids || + cpu >= num_possible_cpus()) + return NULL; + + bus_number = isst_cpu_info[cpu].bus_info[bus_no]; + if (bus_number < 0) + return NULL; + + return pci_get_domain_bus_and_slot(0, bus_number, PCI_DEVFN(dev, fn)); +} +EXPORT_SYMBOL_GPL(isst_if_get_pci_dev); + +static int isst_if_cpu_online(unsigned int cpu) +{ + u64 data; + int ret; + + ret = rdmsrl_safe(MSR_CPU_BUS_NUMBER, &data); + if (ret) { + /* This is not a fatal error on MSR mailbox only I/F */ + isst_cpu_info[cpu].bus_info[0] = -1; + isst_cpu_info[cpu].bus_info[1] = -1; + } else { + isst_cpu_info[cpu].bus_info[0] = data & 0xff; + isst_cpu_info[cpu].bus_info[1] = (data >> 8) & 0xff; + } + + ret = rdmsrl_safe(MSR_THREAD_ID_INFO, &data); + if (ret) { + isst_cpu_info[cpu].punit_cpu_id = -1; + return ret; + } + isst_cpu_info[cpu].punit_cpu_id = data; + + isst_restore_msr_local(cpu); + + return 0; +} + +static int isst_if_online_id; + +static int isst_if_cpu_info_init(void) +{ + int ret; + + isst_cpu_info = kcalloc(num_possible_cpus(), + sizeof(*isst_cpu_info), + GFP_KERNEL); + if (!isst_cpu_info) + return -ENOMEM; + + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "platform/x86/isst-if:online", + isst_if_cpu_online, NULL); + if (ret < 0) { + kfree(isst_cpu_info); + return ret; + } + + isst_if_online_id = ret; + + return 0; +} + +static void isst_if_cpu_info_exit(void) +{ + cpuhp_remove_state(isst_if_online_id); + kfree(isst_cpu_info); +}; + +static long isst_if_proc_phyid_req(u8 *cmd_ptr, int *write_only, int resume) +{ + struct isst_if_cpu_map *cpu_map; + + cpu_map = (struct isst_if_cpu_map *)cmd_ptr; + if (cpu_map->logical_cpu >= nr_cpu_ids || + cpu_map->logical_cpu >= num_possible_cpus()) + return -EINVAL; + + *write_only = 0; + cpu_map->physical_cpu = isst_cpu_info[cpu_map->logical_cpu].punit_cpu_id; + + return 0; +} + +static bool match_punit_msr_white_list(int msr) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(punit_msr_white_list); ++i) { + if (punit_msr_white_list[i] == msr) + return true; + } + + return false; +} + +static long isst_if_msr_cmd_req(u8 *cmd_ptr, int *write_only, int resume) +{ + struct isst_if_msr_cmd *msr_cmd; + int ret; + + msr_cmd = (struct isst_if_msr_cmd *)cmd_ptr; + + if (!match_punit_msr_white_list(msr_cmd->msr)) + return -EINVAL; + + if (msr_cmd->logical_cpu >= nr_cpu_ids) + return -EINVAL; + + if (msr_cmd->read_write) { + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + ret = wrmsrl_safe_on_cpu(msr_cmd->logical_cpu, + msr_cmd->msr, + msr_cmd->data); + *write_only = 1; + if (!ret && !resume) + ret = isst_store_cmd(0, msr_cmd->msr, + msr_cmd->logical_cpu, + 0, 0, msr_cmd->data); + } else { + u64 data; + + ret = rdmsrl_safe_on_cpu(msr_cmd->logical_cpu, + msr_cmd->msr, &data); + if (!ret) { + msr_cmd->data = data; + *write_only = 0; + } + } + + + return ret; +} + +static long isst_if_exec_multi_cmd(void __user *argp, struct isst_if_cmd_cb *cb) +{ + unsigned char __user *ptr; + u32 cmd_count; + u8 *cmd_ptr; + long ret; + int i; + + /* Each multi command has u32 command count as the first field */ + if (copy_from_user(&cmd_count, argp, sizeof(cmd_count))) + return -EFAULT; + + if (!cmd_count || cmd_count > ISST_IF_CMD_LIMIT) + return -EINVAL; + + cmd_ptr = kmalloc(cb->cmd_size, GFP_KERNEL); + if (!cmd_ptr) + return -ENOMEM; + + /* cb->offset points to start of the command after the command count */ + ptr = argp + cb->offset; + + for (i = 0; i < cmd_count; ++i) { + int wr_only; + + if (signal_pending(current)) { + ret = -EINTR; + break; + } + + if (copy_from_user(cmd_ptr, ptr, cb->cmd_size)) { + ret = -EFAULT; + break; + } + + ret = cb->cmd_callback(cmd_ptr, &wr_only, 0); + if (ret) + break; + + if (!wr_only && copy_to_user(ptr, cmd_ptr, cb->cmd_size)) { + ret = -EFAULT; + break; + } + + ptr += cb->cmd_size; + } + + kfree(cmd_ptr); + + return i ? i : ret; +} + +static long isst_if_def_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + void __user *argp = (void __user *)arg; + struct isst_if_cmd_cb cmd_cb; + struct isst_if_cmd_cb *cb; + long ret = -ENOTTY; + + switch (cmd) { + case ISST_IF_GET_PLATFORM_INFO: + ret = isst_if_get_platform_info(argp); + break; + case ISST_IF_GET_PHY_ID: + cmd_cb.cmd_size = sizeof(struct isst_if_cpu_map); + cmd_cb.offset = offsetof(struct isst_if_cpu_maps, cpu_map); + cmd_cb.cmd_callback = isst_if_proc_phyid_req; + ret = isst_if_exec_multi_cmd(argp, &cmd_cb); + break; + case ISST_IF_IO_CMD: + cb = &punit_callbacks[ISST_IF_DEV_MMIO]; + if (cb->registered) + ret = isst_if_exec_multi_cmd(argp, cb); + break; + case ISST_IF_MBOX_COMMAND: + cb = &punit_callbacks[ISST_IF_DEV_MBOX]; + if (cb->registered) + ret = isst_if_exec_multi_cmd(argp, cb); + break; + case ISST_IF_MSR_COMMAND: + cmd_cb.cmd_size = sizeof(struct isst_if_msr_cmd); + cmd_cb.offset = offsetof(struct isst_if_msr_cmds, msr_cmd); + cmd_cb.cmd_callback = isst_if_msr_cmd_req; + ret = isst_if_exec_multi_cmd(argp, &cmd_cb); + break; + default: + break; + } + + return ret; +} + +static DEFINE_MUTEX(punit_misc_dev_lock); +static int misc_usage_count; +static int misc_device_ret; +static int misc_device_open; + +static int isst_if_open(struct inode *inode, struct file *file) +{ + int i, ret = 0; + + /* Fail open, if a module is going away */ + mutex_lock(&punit_misc_dev_lock); + for (i = 0; i < ISST_IF_DEV_MAX; ++i) { + struct isst_if_cmd_cb *cb = &punit_callbacks[i]; + + if (cb->registered && !try_module_get(cb->owner)) { + ret = -ENODEV; + break; + } + } + if (ret) { + int j; + + for (j = 0; j < i; ++j) { + struct isst_if_cmd_cb *cb; + + cb = &punit_callbacks[j]; + if (cb->registered) + module_put(cb->owner); + } + } else { + misc_device_open++; + } + mutex_unlock(&punit_misc_dev_lock); + + return ret; +} + +static int isst_if_relase(struct inode *inode, struct file *f) +{ + int i; + + mutex_lock(&punit_misc_dev_lock); + misc_device_open--; + for (i = 0; i < ISST_IF_DEV_MAX; ++i) { + struct isst_if_cmd_cb *cb = &punit_callbacks[i]; + + if (cb->registered) + module_put(cb->owner); + } + mutex_unlock(&punit_misc_dev_lock); + + return 0; +} + +static const struct file_operations isst_if_char_driver_ops = { + .open = isst_if_open, + .unlocked_ioctl = isst_if_def_ioctl, + .release = isst_if_relase, +}; + +static struct miscdevice isst_if_char_driver = { + .minor = MISC_DYNAMIC_MINOR, + .name = "isst_interface", + .fops = &isst_if_char_driver_ops, +}; + +/** + * isst_if_cdev_register() - Register callback for IOCTL + * @device_type: The device type this callback handling. + * @cb: Callback structure. + * + * This function registers a callback to device type. On very first call + * it will register a misc device, which is used for user kernel interface. + * Other calls simply increment ref count. Registry will fail, if the user + * already opened misc device for operation. Also if the misc device + * creation failed, then it will not try again and all callers will get + * failure code. + * + * Return: Return the return value from the misc creation device or -EINVAL + * for unsupported device type. + */ +int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb) +{ + if (misc_device_ret) + return misc_device_ret; + + if (device_type >= ISST_IF_DEV_MAX) + return -EINVAL; + + mutex_lock(&punit_misc_dev_lock); + if (misc_device_open) { + mutex_unlock(&punit_misc_dev_lock); + return -EAGAIN; + } + if (!misc_usage_count) { + int ret; + + misc_device_ret = misc_register(&isst_if_char_driver); + if (misc_device_ret) + goto unlock_exit; + + ret = isst_if_cpu_info_init(); + if (ret) { + misc_deregister(&isst_if_char_driver); + misc_device_ret = ret; + goto unlock_exit; + } + } + memcpy(&punit_callbacks[device_type], cb, sizeof(*cb)); + punit_callbacks[device_type].registered = 1; + misc_usage_count++; +unlock_exit: + mutex_unlock(&punit_misc_dev_lock); + + return misc_device_ret; +} +EXPORT_SYMBOL_GPL(isst_if_cdev_register); + +/** + * isst_if_cdev_unregister() - Unregister callback for IOCTL + * @device_type: The device type to unregister. + * + * This function unregisters the previously registered callback. If this + * is the last callback unregistering, then misc device is removed. + * + * Return: None. + */ +void isst_if_cdev_unregister(int device_type) +{ + mutex_lock(&punit_misc_dev_lock); + misc_usage_count--; + punit_callbacks[device_type].registered = 0; + if (device_type == ISST_IF_DEV_MBOX) + isst_delete_hash(); + if (!misc_usage_count && !misc_device_ret) { + misc_deregister(&isst_if_char_driver); + isst_if_cpu_info_exit(); + } + mutex_unlock(&punit_misc_dev_lock); +} +EXPORT_SYMBOL_GPL(isst_if_cdev_unregister); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_common.h b/drivers/platform/x86/intel_speed_select_if/isst_if_common.h new file mode 100644 index 0000000000000000000000000000000000000000..1409a5bb55820100a7362b1ff87fbbe5357eed62 --- /dev/null +++ b/drivers/platform/x86/intel_speed_select_if/isst_if_common.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Intel Speed Select Interface: Drivers Internal defines + * Copyright (c) 2019, Intel Corporation. + * All rights reserved. + * + * Author: Srinivas Pandruvada + */ + +#ifndef __ISST_IF_COMMON_H +#define __ISST_IF_COMMON_H + +#define INTEL_RAPL_PRIO_DEVID_0 0x3451 +#define INTEL_CFG_MBOX_DEVID_0 0x3459 + +/* + * Validate maximum commands in a single request. + * This is enough to handle command to every core in one ioctl, or all + * possible message id to one CPU. Limit is also helpful for resonse time + * per IOCTL request, as PUNIT may take different times to process each + * request and may hold for long for too many commands. + */ +#define ISST_IF_CMD_LIMIT 64 + +#define ISST_IF_API_VERSION 0x01 +#define ISST_IF_DRIVER_VERSION 0x01 + +#define ISST_IF_DEV_MBOX 0 +#define ISST_IF_DEV_MMIO 1 +#define ISST_IF_DEV_MAX 2 + +/** + * struct isst_if_cmd_cb - Used to register a IOCTL handler + * @registered: Used by the common code to store registry. Caller don't + * to touch this field + * @cmd_size: The command size of the individual command in IOCTL + * @offset: Offset to the first valid member in command structure. + * This will be the offset of the start of the command + * after command count field + * @cmd_callback: Callback function to handle IOCTL. The callback has the + * command pointer with data for command. There is a pointer + * called write_only, which when set, will not copy the + * response to user ioctl buffer. The "resume" argument + * can be used to avoid storing the command for replay + * during system resume + * + * This structure is used to register an handler for IOCTL. To avoid + * code duplication common code handles all the IOCTL command read/write + * including handling multiple command in single IOCTL. The caller just + * need to execute a command via the registered callback. + */ +struct isst_if_cmd_cb { + int registered; + int cmd_size; + int offset; + struct module *owner; + long (*cmd_callback)(u8 *ptr, int *write_only, int resume); +}; + +/* Internal interface functions */ +int isst_if_cdev_register(int type, struct isst_if_cmd_cb *cb); +void isst_if_cdev_unregister(int type); +struct pci_dev *isst_if_get_pci_dev(int cpu, int bus, int dev, int fn); +bool isst_if_mbox_cmd_set_req(struct isst_if_mbox_cmd *mbox_cmd); +bool isst_if_mbox_cmd_invalid(struct isst_if_mbox_cmd *cmd); +int isst_store_cmd(int cmd, int sub_command, u32 cpu, int mbox_cmd, + u32 param, u64 data); +void isst_resume_common(void); +#endif diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_msr.c b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_msr.c new file mode 100644 index 0000000000000000000000000000000000000000..89b042aecef3aa263ce371725b8bb877423d72cd --- /dev/null +++ b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_msr.c @@ -0,0 +1,216 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel Speed Select Interface: Mbox via MSR Interface + * Copyright (c) 2019, Intel Corporation. + * All rights reserved. + * + * Author: Srinivas Pandruvada + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "isst_if_common.h" + +#define MSR_OS_MAILBOX_INTERFACE 0xB0 +#define MSR_OS_MAILBOX_DATA 0xB1 +#define MSR_OS_MAILBOX_BUSY_BIT 31 + +/* + * Based on experiments count is never more than 1, as the MSR overhead + * is enough to finish the command. So here this is the worst case number. + */ +#define OS_MAILBOX_RETRY_COUNT 3 + +static int isst_if_send_mbox_cmd(u8 command, u8 sub_command, u32 parameter, + u32 command_data, u32 *response_data) +{ + u32 retries; + u64 data; + int ret; + + /* Poll for rb bit == 0 */ + retries = OS_MAILBOX_RETRY_COUNT; + do { + rdmsrl(MSR_OS_MAILBOX_INTERFACE, data); + if (data & BIT_ULL(MSR_OS_MAILBOX_BUSY_BIT)) { + ret = -EBUSY; + continue; + } + ret = 0; + break; + } while (--retries); + + if (ret) + return ret; + + /* Write DATA register */ + wrmsrl(MSR_OS_MAILBOX_DATA, command_data); + + /* Write command register */ + data = BIT_ULL(MSR_OS_MAILBOX_BUSY_BIT) | + (parameter & GENMASK_ULL(13, 0)) << 16 | + (sub_command << 8) | + command; + wrmsrl(MSR_OS_MAILBOX_INTERFACE, data); + + /* Poll for rb bit == 0 */ + retries = OS_MAILBOX_RETRY_COUNT; + do { + rdmsrl(MSR_OS_MAILBOX_INTERFACE, data); + if (data & BIT_ULL(MSR_OS_MAILBOX_BUSY_BIT)) { + ret = -EBUSY; + continue; + } + + if (data & 0xff) + return -ENXIO; + + if (response_data) { + rdmsrl(MSR_OS_MAILBOX_DATA, data); + *response_data = data; + } + ret = 0; + break; + } while (--retries); + + return ret; +} + +struct msrl_action { + int err; + struct isst_if_mbox_cmd *mbox_cmd; +}; + +/* revisit, smp_call_function_single should be enough for atomic mailbox! */ +static void msrl_update_func(void *info) +{ + struct msrl_action *act = info; + + act->err = isst_if_send_mbox_cmd(act->mbox_cmd->command, + act->mbox_cmd->sub_command, + act->mbox_cmd->parameter, + act->mbox_cmd->req_data, + &act->mbox_cmd->resp_data); +} + +static long isst_if_mbox_proc_cmd(u8 *cmd_ptr, int *write_only, int resume) +{ + struct msrl_action action; + int ret; + + action.mbox_cmd = (struct isst_if_mbox_cmd *)cmd_ptr; + + if (isst_if_mbox_cmd_invalid(action.mbox_cmd)) + return -EINVAL; + + if (isst_if_mbox_cmd_set_req(action.mbox_cmd) && + !capable(CAP_SYS_ADMIN)) + return -EPERM; + + /* + * To complete mailbox command, we need to access two MSRs. + * So we don't want race to complete a mailbox transcation. + * Here smp_call ensures that msrl_update_func() has no race + * and also with wait flag, wait for completion. + * smp_call_function_single is using get_cpu() and put_cpu(). + */ + ret = smp_call_function_single(action.mbox_cmd->logical_cpu, + msrl_update_func, &action, 1); + if (ret) + return ret; + + if (!action.err && !resume && isst_if_mbox_cmd_set_req(action.mbox_cmd)) + action.err = isst_store_cmd(action.mbox_cmd->command, + action.mbox_cmd->sub_command, + action.mbox_cmd->logical_cpu, 1, + action.mbox_cmd->parameter, + action.mbox_cmd->req_data); + *write_only = 0; + + return action.err; +} + + +static int isst_pm_notify(struct notifier_block *nb, + unsigned long mode, void *_unused) +{ + switch (mode) { + case PM_POST_HIBERNATION: + case PM_POST_RESTORE: + case PM_POST_SUSPEND: + isst_resume_common(); + break; + default: + break; + } + return 0; +} + +static struct notifier_block isst_pm_nb = { + .notifier_call = isst_pm_notify, +}; + +#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, } + +static const struct x86_cpu_id isst_if_cpu_ids[] = { + ICPU(INTEL_FAM6_SKYLAKE_X), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, isst_if_cpu_ids); + +static int __init isst_if_mbox_init(void) +{ + struct isst_if_cmd_cb cb; + const struct x86_cpu_id *id; + u64 data; + int ret; + + id = x86_match_cpu(isst_if_cpu_ids); + if (!id) + return -ENODEV; + + /* Check presence of mailbox MSRs */ + ret = rdmsrl_safe(MSR_OS_MAILBOX_INTERFACE, &data); + if (ret) + return ret; + + ret = rdmsrl_safe(MSR_OS_MAILBOX_DATA, &data); + if (ret) + return ret; + + memset(&cb, 0, sizeof(cb)); + cb.cmd_size = sizeof(struct isst_if_mbox_cmd); + cb.offset = offsetof(struct isst_if_mbox_cmds, mbox_cmd); + cb.cmd_callback = isst_if_mbox_proc_cmd; + cb.owner = THIS_MODULE; + ret = isst_if_cdev_register(ISST_IF_DEV_MBOX, &cb); + if (ret) + return ret; + + ret = register_pm_notifier(&isst_pm_nb); + if (ret) + isst_if_cdev_unregister(ISST_IF_DEV_MBOX); + + return ret; +} +module_init(isst_if_mbox_init) + +static void __exit isst_if_mbox_exit(void) +{ + unregister_pm_notifier(&isst_pm_nb); + isst_if_cdev_unregister(ISST_IF_DEV_MBOX); +} +module_exit(isst_if_mbox_exit) + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Intel speed select interface mailbox driver"); diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c new file mode 100644 index 0000000000000000000000000000000000000000..d84e2174cbdebe71d9bd48b547ac4cf9c846e3de --- /dev/null +++ b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel Speed Select Interface: Mbox via PCI Interface + * Copyright (c) 2019, Intel Corporation. + * All rights reserved. + * + * Author: Srinivas Pandruvada + */ + +#include +#include +#include +#include +#include +#include + +#include "isst_if_common.h" + +#define PUNIT_MAILBOX_DATA 0xA0 +#define PUNIT_MAILBOX_INTERFACE 0xA4 +#define PUNIT_MAILBOX_BUSY_BIT 31 + +/* + * The average time to complete some commands is about 40us. The current + * count is enough to satisfy 40us. But when the firmware is very busy, this + * causes timeout occasionally. So increase to deal with some worst case + * scenarios. Most of the command still complete in few us. + */ +#define OS_MAILBOX_RETRY_COUNT 100 + +struct isst_if_device { + struct mutex mutex; +}; + +static int isst_if_mbox_cmd(struct pci_dev *pdev, + struct isst_if_mbox_cmd *mbox_cmd) +{ + u32 retries, data; + int ret; + + /* Poll for rb bit == 0 */ + retries = OS_MAILBOX_RETRY_COUNT; + do { + ret = pci_read_config_dword(pdev, PUNIT_MAILBOX_INTERFACE, + &data); + if (ret) + return ret; + + if (data & BIT_ULL(PUNIT_MAILBOX_BUSY_BIT)) { + ret = -EBUSY; + continue; + } + ret = 0; + break; + } while (--retries); + + if (ret) + return ret; + + /* Write DATA register */ + ret = pci_write_config_dword(pdev, PUNIT_MAILBOX_DATA, + mbox_cmd->req_data); + if (ret) + return ret; + + /* Write command register */ + data = BIT_ULL(PUNIT_MAILBOX_BUSY_BIT) | + (mbox_cmd->parameter & GENMASK_ULL(13, 0)) << 16 | + (mbox_cmd->sub_command << 8) | + mbox_cmd->command; + + ret = pci_write_config_dword(pdev, PUNIT_MAILBOX_INTERFACE, data); + if (ret) + return ret; + + /* Poll for rb bit == 0 */ + retries = OS_MAILBOX_RETRY_COUNT; + do { + ret = pci_read_config_dword(pdev, PUNIT_MAILBOX_INTERFACE, + &data); + if (ret) + return ret; + + if (data & BIT_ULL(PUNIT_MAILBOX_BUSY_BIT)) { + ret = -EBUSY; + continue; + } + + if (data & 0xff) + return -ENXIO; + + ret = pci_read_config_dword(pdev, PUNIT_MAILBOX_DATA, &data); + if (ret) + return ret; + + mbox_cmd->resp_data = data; + ret = 0; + break; + } while (--retries); + + return ret; +} + +static long isst_if_mbox_proc_cmd(u8 *cmd_ptr, int *write_only, int resume) +{ + struct isst_if_mbox_cmd *mbox_cmd; + struct isst_if_device *punit_dev; + struct pci_dev *pdev; + int ret; + + mbox_cmd = (struct isst_if_mbox_cmd *)cmd_ptr; + + if (isst_if_mbox_cmd_invalid(mbox_cmd)) + return -EINVAL; + + if (isst_if_mbox_cmd_set_req(mbox_cmd) && !capable(CAP_SYS_ADMIN)) + return -EPERM; + + pdev = isst_if_get_pci_dev(mbox_cmd->logical_cpu, 1, 30, 1); + if (!pdev) + return -EINVAL; + + punit_dev = pci_get_drvdata(pdev); + if (!punit_dev) + return -EINVAL; + + /* + * Basically we are allowing one complete mailbox transaction on + * a mapped PCI device at a time. + */ + mutex_lock(&punit_dev->mutex); + ret = isst_if_mbox_cmd(pdev, mbox_cmd); + if (!ret && !resume && isst_if_mbox_cmd_set_req(mbox_cmd)) + ret = isst_store_cmd(mbox_cmd->command, + mbox_cmd->sub_command, + mbox_cmd->logical_cpu, 1, + mbox_cmd->parameter, + mbox_cmd->req_data); + mutex_unlock(&punit_dev->mutex); + if (ret) + return ret; + + *write_only = 0; + + return 0; +} + +static const struct pci_device_id isst_if_mbox_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CFG_MBOX_DEVID_0)}, + { 0 }, +}; +MODULE_DEVICE_TABLE(pci, isst_if_mbox_ids); + +static int isst_if_mbox_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct isst_if_device *punit_dev; + struct isst_if_cmd_cb cb; + int ret; + + punit_dev = devm_kzalloc(&pdev->dev, sizeof(*punit_dev), GFP_KERNEL); + if (!punit_dev) + return -ENOMEM; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + mutex_init(&punit_dev->mutex); + pci_set_drvdata(pdev, punit_dev); + + memset(&cb, 0, sizeof(cb)); + cb.cmd_size = sizeof(struct isst_if_mbox_cmd); + cb.offset = offsetof(struct isst_if_mbox_cmds, mbox_cmd); + cb.cmd_callback = isst_if_mbox_proc_cmd; + cb.owner = THIS_MODULE; + ret = isst_if_cdev_register(ISST_IF_DEV_MBOX, &cb); + + if (ret) + mutex_destroy(&punit_dev->mutex); + + return ret; +} + +static void isst_if_mbox_remove(struct pci_dev *pdev) +{ + struct isst_if_device *punit_dev; + + punit_dev = pci_get_drvdata(pdev); + isst_if_cdev_unregister(ISST_IF_DEV_MBOX); + mutex_destroy(&punit_dev->mutex); +} + +static int __maybe_unused isst_if_resume(struct device *device) +{ + isst_resume_common(); + return 0; +} + +static SIMPLE_DEV_PM_OPS(isst_if_pm_ops, NULL, isst_if_resume); + +static struct pci_driver isst_if_pci_driver = { + .name = "isst_if_mbox_pci", + .id_table = isst_if_mbox_ids, + .probe = isst_if_mbox_probe, + .remove = isst_if_mbox_remove, + .driver.pm = &isst_if_pm_ops, +}; + +module_pci_driver(isst_if_pci_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Intel speed select interface pci mailbox driver"); diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c b/drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c new file mode 100644 index 0000000000000000000000000000000000000000..bd3b867a495809dc0d602dc17c37b316181bc265 --- /dev/null +++ b/drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel Speed Select Interface: MMIO Interface + * Copyright (c) 2019, Intel Corporation. + * All rights reserved. + * + * Author: Srinivas Pandruvada + */ + +#include +#include +#include +#include +#include + +#include "isst_if_common.h" + +struct isst_mmio_range { + int beg; + int end; +}; + +struct isst_mmio_range mmio_range[] = { + {0x04, 0x14}, + {0x20, 0xD0}, +}; + +struct isst_if_device { + void __iomem *punit_mmio; + u32 range_0[5]; + u32 range_1[45]; + struct mutex mutex; +}; + +static long isst_if_mmio_rd_wr(u8 *cmd_ptr, int *write_only, int resume) +{ + struct isst_if_device *punit_dev; + struct isst_if_io_reg *io_reg; + struct pci_dev *pdev; + + io_reg = (struct isst_if_io_reg *)cmd_ptr; + if (io_reg->reg < 0x04 || io_reg->reg > 0xD0) + return -EINVAL; + + if (io_reg->read_write && !capable(CAP_SYS_ADMIN)) + return -EPERM; + + pdev = isst_if_get_pci_dev(io_reg->logical_cpu, 0, 0, 1); + if (!pdev) + return -EINVAL; + + punit_dev = pci_get_drvdata(pdev); + if (!punit_dev) + return -EINVAL; + + /* + * Ensure that operation is complete on a PCI device to avoid read + * write race by using per PCI device mutex. + */ + mutex_lock(&punit_dev->mutex); + if (io_reg->read_write) { + writel(io_reg->value, punit_dev->punit_mmio+io_reg->reg); + *write_only = 1; + } else { + io_reg->value = readl(punit_dev->punit_mmio+io_reg->reg); + *write_only = 0; + } + mutex_unlock(&punit_dev->mutex); + + return 0; +} + +static const struct pci_device_id isst_if_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_RAPL_PRIO_DEVID_0)}, + { 0 }, +}; +MODULE_DEVICE_TABLE(pci, isst_if_ids); + +static int isst_if_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct isst_if_device *punit_dev; + struct isst_if_cmd_cb cb; + u32 mmio_base, pcu_base; + u64 base_addr; + int ret; + + punit_dev = devm_kzalloc(&pdev->dev, sizeof(*punit_dev), GFP_KERNEL); + if (!punit_dev) + return -ENOMEM; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + ret = pci_read_config_dword(pdev, 0xD0, &mmio_base); + if (ret) + return ret; + + ret = pci_read_config_dword(pdev, 0xFC, &pcu_base); + if (ret) + return ret; + + pcu_base &= GENMASK(10, 0); + base_addr = (u64)mmio_base << 23 | (u64) pcu_base << 12; + punit_dev->punit_mmio = devm_ioremap(&pdev->dev, base_addr, 256); + if (!punit_dev->punit_mmio) + return -ENOMEM; + + mutex_init(&punit_dev->mutex); + pci_set_drvdata(pdev, punit_dev); + + memset(&cb, 0, sizeof(cb)); + cb.cmd_size = sizeof(struct isst_if_io_reg); + cb.offset = offsetof(struct isst_if_io_regs, io_reg); + cb.cmd_callback = isst_if_mmio_rd_wr; + cb.owner = THIS_MODULE; + ret = isst_if_cdev_register(ISST_IF_DEV_MMIO, &cb); + if (ret) + mutex_destroy(&punit_dev->mutex); + + return ret; +} + +static void isst_if_remove(struct pci_dev *pdev) +{ + struct isst_if_device *punit_dev; + + punit_dev = pci_get_drvdata(pdev); + isst_if_cdev_unregister(ISST_IF_DEV_MMIO); + mutex_destroy(&punit_dev->mutex); +} + +static int __maybe_unused isst_if_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct isst_if_device *punit_dev; + int i; + + punit_dev = pci_get_drvdata(pdev); + for (i = 0; i < ARRAY_SIZE(punit_dev->range_0); ++i) + punit_dev->range_0[i] = readl(punit_dev->punit_mmio + + mmio_range[0].beg + 4 * i); + for (i = 0; i < ARRAY_SIZE(punit_dev->range_1); ++i) + punit_dev->range_1[i] = readl(punit_dev->punit_mmio + + mmio_range[1].beg + 4 * i); + + return 0; +} + +static int __maybe_unused isst_if_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct isst_if_device *punit_dev; + int i; + + punit_dev = pci_get_drvdata(pdev); + for (i = 0; i < ARRAY_SIZE(punit_dev->range_0); ++i) + writel(punit_dev->range_0[i], punit_dev->punit_mmio + + mmio_range[0].beg + 4 * i); + for (i = 0; i < ARRAY_SIZE(punit_dev->range_1); ++i) + writel(punit_dev->range_1[i], punit_dev->punit_mmio + + mmio_range[1].beg + 4 * i); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(isst_if_pm_ops, isst_if_suspend, isst_if_resume); + +static struct pci_driver isst_if_pci_driver = { + .name = "isst_if_pci", + .id_table = isst_if_ids, + .probe = isst_if_probe, + .remove = isst_if_remove, + .driver.pm = &isst_if_pm_ops, +}; + +module_pci_driver(isst_if_pci_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Intel speed select interface mmio driver"); diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c index ffd0474b0531116a308343514950684a250b0b77..b998d7da97fbdff22c95766711087c861406ba77 100644 --- a/drivers/platform/x86/intel_telemetry_debugfs.c +++ b/drivers/platform/x86/intel_telemetry_debugfs.c @@ -320,7 +320,7 @@ static struct telemetry_debugfs_conf telem_apl_debugfs_conf = { static const struct x86_cpu_id telemetry_debugfs_cpu_ids[] = { TELEM_DEBUGFS_CPU(INTEL_FAM6_ATOM_GOLDMONT, telem_apl_debugfs_conf), - TELEM_DEBUGFS_CPU(INTEL_FAM6_ATOM_GEMINI_LAKE, telem_apl_debugfs_conf), + TELEM_DEBUGFS_CPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS, telem_apl_debugfs_conf), {} }; @@ -951,12 +951,16 @@ static int __init telemetry_debugfs_init(void) debugfs_conf = (struct telemetry_debugfs_conf *)id->driver_data; err = telemetry_pltconfig_valid(); - if (err < 0) + if (err < 0) { + pr_info("Invalid pltconfig, ensure IPC1 device is enabled in BIOS\n"); return -ENODEV; + } err = telemetry_debugfs_check_evts(); - if (err < 0) + if (err < 0) { + pr_info("telemetry_debugfs_check_evts failed\n"); return -EINVAL; + } register_pm_notifier(&pm_notifier); diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel_telemetry_pltdrv.c index 2f889d6c270e85c50fd8296af3636cc216e84ed9..fcc6bee51a422a1c95e205f0d2874fc746ed09e2 100644 --- a/drivers/platform/x86/intel_telemetry_pltdrv.c +++ b/drivers/platform/x86/intel_telemetry_pltdrv.c @@ -192,7 +192,7 @@ static struct telemetry_plt_config telem_glk_config = { static const struct x86_cpu_id telemetry_cpu_ids[] = { TELEM_CPU(INTEL_FAM6_ATOM_GOLDMONT, telem_apl_config), - TELEM_CPU(INTEL_FAM6_ATOM_GEMINI_LAKE, telem_glk_config), + TELEM_CPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS, telem_glk_config), {} }; diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c index d89936c93ba0fe069900d739e6f8d8ac1e02f902..69e28c12d59157499ab1bb42978c09fc767ba63e 100644 --- a/drivers/platform/x86/mlx-platform.c +++ b/drivers/platform/x86/mlx-platform.c @@ -83,12 +83,12 @@ #define MLXPLAT_CPLD_LPC_REG_TACHO4_OFFSET 0xe7 #define MLXPLAT_CPLD_LPC_REG_TACHO5_OFFSET 0xe8 #define MLXPLAT_CPLD_LPC_REG_TACHO6_OFFSET 0xe9 -#define MLXPLAT_CPLD_LPC_REG_TACHO7_OFFSET 0xea -#define MLXPLAT_CPLD_LPC_REG_TACHO8_OFFSET 0xeb -#define MLXPLAT_CPLD_LPC_REG_TACHO9_OFFSET 0xec -#define MLXPLAT_CPLD_LPC_REG_TACHO10_OFFSET 0xed -#define MLXPLAT_CPLD_LPC_REG_TACHO11_OFFSET 0xee -#define MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET 0xef +#define MLXPLAT_CPLD_LPC_REG_TACHO7_OFFSET 0xeb +#define MLXPLAT_CPLD_LPC_REG_TACHO8_OFFSET 0xec +#define MLXPLAT_CPLD_LPC_REG_TACHO9_OFFSET 0xed +#define MLXPLAT_CPLD_LPC_REG_TACHO10_OFFSET 0xee +#define MLXPLAT_CPLD_LPC_REG_TACHO11_OFFSET 0xef +#define MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET 0xf0 #define MLXPLAT_CPLD_LPC_IO_RANGE 0x100 #define MLXPLAT_CPLD_LPC_I2C_CH1_OFF 0xdb #define MLXPLAT_CPLD_LPC_I2C_CH2_OFF 0xda @@ -575,7 +575,7 @@ static struct mlxreg_core_item mlxplat_mlxcpld_msn201x_items[] = { static struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_msn201x_data = { - .items = mlxplat_mlxcpld_msn21xx_items, + .items = mlxplat_mlxcpld_msn201x_items, .counter = ARRAY_SIZE(mlxplat_mlxcpld_msn201x_items), .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET, .mask = MLXPLAT_CPLD_AGGR_MASK_DEF, @@ -1421,7 +1421,7 @@ static int __init mlxplat_dmi_msn201x_matched(const struct dmi_system_id *dmi) mlxplat_hotplug = &mlxplat_mlxcpld_msn201x_data; mlxplat_hotplug->deferred_nr = mlxplat_default_channels[i - 1][MLXPLAT_CPLD_GRP_CHNL_NUM - 1]; - mlxplat_led = &mlxplat_default_ng_led_data; + mlxplat_led = &mlxplat_msn21xx_led_data; mlxplat_regs_io = &mlxplat_msn21xx_regs_io_data; return 1; @@ -1439,7 +1439,7 @@ static int __init mlxplat_dmi_qmb7xx_matched(const struct dmi_system_id *dmi) mlxplat_hotplug = &mlxplat_mlxcpld_default_ng_data; mlxplat_hotplug->deferred_nr = mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1]; - mlxplat_led = &mlxplat_msn21xx_led_data; + mlxplat_led = &mlxplat_default_ng_led_data; mlxplat_fan = &mlxplat_default_fan_data; return 1; @@ -1626,7 +1626,7 @@ static int __init mlxplat_init(void) for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) { priv->pdev_mux[i] = platform_device_register_resndata( - &mlxplat_dev->dev, + &priv->pdev_i2c->dev, "i2c-mux-reg", i, NULL, 0, &mlxplat_mux_data[i], sizeof(mlxplat_mux_data[i])); diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c index 8361ad75389a96f4c8f7eb87ff57ddb48f2a3380..71e55d90f7128332d43948cec9144e45b7035b4c 100644 --- a/drivers/platform/x86/panasonic-laptop.c +++ b/drivers/platform/x86/panasonic-laptop.c @@ -429,6 +429,18 @@ static DEVICE_ATTR(lcdtype, S_IRUGO, show_lcdtype, NULL); static DEVICE_ATTR(mute, S_IRUGO, show_mute, NULL); static DEVICE_ATTR(sticky_key, S_IRUGO | S_IWUSR, show_sticky, set_sticky); +static umode_t pcc_sysfs_is_visible(struct kobject *kobj, struct attribute *attr, int idx) +{ + struct device *dev = kobj_to_dev(kobj); + struct acpi_device *acpi = to_acpi_device(dev); + struct pcc_acpi *pcc = acpi_driver_data(acpi); + + if (attr == &dev_attr_mute.attr) + return (pcc->num_sifr > SINF_MUTE) ? attr->mode : 0; + + return attr->mode; +} + static struct attribute *pcc_sysfs_entries[] = { &dev_attr_numbatt.attr, &dev_attr_lcdtype.attr, @@ -438,8 +450,9 @@ static struct attribute *pcc_sysfs_entries[] = { }; static const struct attribute_group pcc_attr_group = { - .name = NULL, /* put in device directory */ - .attrs = pcc_sysfs_entries, + .name = NULL, /* put in device directory */ + .attrs = pcc_sysfs_entries, + .is_visible = pcc_sysfs_is_visible, }; @@ -559,8 +572,12 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device) num_sifr = acpi_pcc_get_sqty(device); - if (num_sifr < 0 || num_sifr > 255) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "num_sifr out of range")); + /* + * pcc->sinf is expected to at least have the AC+DC brightness entries. + * Accesses to higher SINF entries are checked against num_sifr. + */ + if (num_sifr <= SINF_DC_CUR_BRIGHT || num_sifr > 255) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "num_sifr %d out of range %d - 255\n", num_sifr, SINF_DC_CUR_BRIGHT + 1)); return -ENODEV; } diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c index 8f018b3f3cd4c42ef40764d39587e7e2e11285d1..9c94ebb251cb5f1f5a6a64312423590ccb1860a5 100644 --- a/drivers/platform/x86/pmc_atom.c +++ b/drivers/platform/x86/pmc_atom.c @@ -17,6 +17,7 @@ #include #include +#include #include #include #include @@ -391,11 +392,83 @@ static int pmc_dbgfs_register(struct pmc_dev *pmc) } #endif /* CONFIG_DEBUG_FS */ +/* + * Some systems need one or more of their pmc_plt_clks to be + * marked as critical. + */ +static const struct dmi_system_id critclk_systems[] = { + { + /* pmc_plt_clk0 is used for an external HSIC USB HUB */ + .ident = "MPL CEC1x", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MPL AG"), + DMI_MATCH(DMI_PRODUCT_NAME, "CEC10 Family"), + }, + }, + { + /* pmc_plt_clk0 - 3 are used for the 4 ethernet controllers */ + .ident = "Lex 3I380D", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Lex BayTrail"), + DMI_MATCH(DMI_PRODUCT_NAME, "3I380D"), + }, + }, + { + /* pmc_plt_clk* - are used for ethernet controllers */ + .ident = "Beckhoff CB3163", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"), + DMI_MATCH(DMI_BOARD_NAME, "CB3163"), + }, + }, + { + /* pmc_plt_clk* - are used for ethernet controllers */ + .ident = "Beckhoff CB4063", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"), + DMI_MATCH(DMI_BOARD_NAME, "CB4063"), + }, + }, + { + /* pmc_plt_clk* - are used for ethernet controllers */ + .ident = "Beckhoff CB6263", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"), + DMI_MATCH(DMI_BOARD_NAME, "CB6263"), + }, + }, + { + /* pmc_plt_clk* - are used for ethernet controllers */ + .ident = "Beckhoff CB6363", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"), + DMI_MATCH(DMI_BOARD_NAME, "CB6363"), + }, + }, + { + .ident = "SIMATIC IPC227E", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SIEMENS AG"), + DMI_MATCH(DMI_PRODUCT_VERSION, "6ES7647-8B"), + }, + }, + { + .ident = "CONNECT X300", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SIEMENS AG"), + DMI_MATCH(DMI_PRODUCT_VERSION, "A5E45074588"), + }, + }, + + { /*sentinel*/ } +}; + static int pmc_setup_clks(struct pci_dev *pdev, void __iomem *pmc_regmap, const struct pmc_data *pmc_data) { struct platform_device *clkdev; struct pmc_clk_data *clk_data; + const struct dmi_system_id *d = dmi_first_match(critclk_systems); clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); if (!clk_data) @@ -403,6 +476,10 @@ static int pmc_setup_clks(struct pci_dev *pdev, void __iomem *pmc_regmap, clk_data->base = pmc_regmap; /* offset is added by client */ clk_data->clks = pmc_data->clks; + if (d) { + clk_data->critical = true; + pr_info("%s critclks quirk enabled\n", d->ident); + } clkdev = platform_device_register_data(&pdev->dev, "clk-pmc-atom", PLATFORM_DEVID_NONE, diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index b205b037fd61e6cb71eb16d5c0ee019569de71aa..b50f8f73fb4788a61fa46cb4638fba6121af0f6d 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c @@ -4424,14 +4424,16 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context) } return AE_OK; } + + case ACPI_RESOURCE_TYPE_END_TAG: + return AE_OK; + default: dprintk("Resource %d isn't an IRQ nor an IO port\n", resource->type); + return AE_CTRL_TERMINATE; - case ACPI_RESOURCE_TYPE_END_TAG: - return AE_OK; } - return AE_CTRL_TERMINATE; } static int sony_pic_possible_resources(struct acpi_device *device) diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index fde08a997557fed3aeef16b3729ef1b8c85efe75..8f85bb4fe7844112dd4cc3a11377b1aa18defdc2 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -79,7 +79,7 @@ #include #include #include -#include +#include #include #include #include @@ -4496,6 +4496,74 @@ static void bluetooth_exit(void) bluetooth_shutdown(); } +static const struct dmi_system_id bt_fwbug_list[] __initconst = { + { + .ident = "ThinkPad E485", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_BOARD_NAME, "20KU"), + }, + }, + { + .ident = "ThinkPad E585", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_BOARD_NAME, "20KV"), + }, + }, + { + .ident = "ThinkPad A285 - 20MW", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_BOARD_NAME, "20MW"), + }, + }, + { + .ident = "ThinkPad A285 - 20MX", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_BOARD_NAME, "20MX"), + }, + }, + { + .ident = "ThinkPad A485 - 20MU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_BOARD_NAME, "20MU"), + }, + }, + { + .ident = "ThinkPad A485 - 20MV", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_BOARD_NAME, "20MV"), + }, + }, + {} +}; + +static const struct pci_device_id fwbug_cards_ids[] __initconst = { + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x24F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x24FD) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2526) }, + {} +}; + + +static int __init have_bt_fwbug(void) +{ + /* + * Some AMD based ThinkPads have a firmware bug that calling + * "GBDC" will cause bluetooth on Intel wireless cards blocked + */ + if (dmi_check_system(bt_fwbug_list) && pci_dev_present(fwbug_cards_ids)) { + vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_RFKILL, + FW_BUG "disable bluetooth subdriver for Intel cards\n"); + return 1; + } else + return 0; +} + static int __init bluetooth_init(struct ibm_init_struct *iibm) { int res; @@ -4508,7 +4576,7 @@ static int __init bluetooth_init(struct ibm_init_struct *iibm) /* bluetooth not supported on 570, 600e/x, 770e, 770x, A21e, A2xm/p, G4x, R30, R31, R40e, R50e, T20-22, X20-21 */ - tp_features.bluetooth = hkey_handle && + tp_features.bluetooth = !have_bt_fwbug() && hkey_handle && acpi_evalf(hkey_handle, &status, "GBDC", "qd"); vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_RFKILL, diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index 04791ea5d97b69f9ed9fc30fb7de46cf2370acbe..eba605b967cbf0edd0d103efd00ccb8e8f0cc80a 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c @@ -783,21 +783,13 @@ static int wmi_dev_match(struct device *dev, struct device_driver *driver) } static int wmi_char_open(struct inode *inode, struct file *filp) { - const char *driver_name = filp->f_path.dentry->d_iname; - struct wmi_block *wblock = NULL; - struct wmi_block *next = NULL; - - list_for_each_entry_safe(wblock, next, &wmi_block_list, list) { - if (!wblock->dev.dev.driver) - continue; - if (strcmp(driver_name, wblock->dev.dev.driver->name) == 0) { - filp->private_data = wblock; - break; - } - } + /* + * The miscdevice already stores a pointer to itself + * inside filp->private_data + */ + struct wmi_block *wblock = container_of(filp->private_data, struct wmi_block, char_dev); - if (!filp->private_data) - return -ENODEV; + filp->private_data = wblock; return nonseekable_open(inode, filp); } diff --git a/drivers/pnp/isapnp/proc.c b/drivers/pnp/isapnp/proc.c index 262285e48a09481807940bea4aa4e53e45f9bb3d..0516131408121aa43d7a7284340cb15423015409 100644 --- a/drivers/pnp/isapnp/proc.c +++ b/drivers/pnp/isapnp/proc.c @@ -47,7 +47,7 @@ static ssize_t isapnp_proc_bus_read(struct file *file, char __user * buf, nbytes = size - pos; cnt = nbytes; - if (!access_ok(VERIFY_WRITE, buf, cnt)) + if (!access_ok(buf, cnt)) return -EINVAL; isapnp_cfg_begin(dev->card->number, dev->number); diff --git a/drivers/power/avs/smartreflex.c b/drivers/power/avs/smartreflex.c index 1360a7fa542c5dca0a79c4b611f2518e5af556f4..8760477d0e8af2db32ee1435a722c8accc6e0e96 100644 --- a/drivers/power/avs/smartreflex.c +++ b/drivers/power/avs/smartreflex.c @@ -1010,8 +1010,7 @@ static int omap_sr_remove(struct platform_device *pdev) if (sr_info->autocomp_active) sr_stop_vddautocomp(sr_info); - if (sr_info->dbg_dir) - debugfs_remove_recursive(sr_info->dbg_dir); + debugfs_remove_recursive(sr_info->dbg_dir); pm_runtime_disable(&pdev->dev); list_del(&sr_info->node); diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c index 0206cce328b3da69feb45917114debc5c849d577..d9493e893d64e2cef36a697a3d6c0fe36c052944 100644 --- a/drivers/power/reset/at91-sama5d2_shdwc.c +++ b/drivers/power/reset/at91-sama5d2_shdwc.c @@ -246,6 +246,9 @@ static int __init at91_shdwc_probe(struct platform_device *pdev) if (!pdev->dev.of_node) return -ENODEV; + if (at91_shdwc) + return -EBUSY; + at91_shdwc = devm_kzalloc(&pdev->dev, sizeof(*at91_shdwc), GFP_KERNEL); if (!at91_shdwc) return -ENOMEM; diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c index 02356f9b5f22a4e89ef74160a8a54e88b117e733..8bb89c697c1ebfe2563e65f4dac2fbb1db97986f 100644 --- a/drivers/power/supply/ab8500_fg.c +++ b/drivers/power/supply/ab8500_fg.c @@ -2433,17 +2433,14 @@ static ssize_t charge_full_store(struct ab8500_fg *di, const char *buf, size_t count) { unsigned long charge_full; - ssize_t ret; + int ret; ret = kstrtoul(buf, 10, &charge_full); + if (ret) + return ret; - dev_dbg(di->dev, "Ret %zd charge_full %lu", ret, charge_full); - - if (!ret) { - di->bat_cap.max_mah = (int) charge_full; - ret = count; - } - return ret; + di->bat_cap.max_mah = (int) charge_full; + return count; } static ssize_t charge_now_show(struct ab8500_fg *di, char *buf) @@ -2455,20 +2452,16 @@ static ssize_t charge_now_store(struct ab8500_fg *di, const char *buf, size_t count) { unsigned long charge_now; - ssize_t ret; + int ret; ret = kstrtoul(buf, 10, &charge_now); + if (ret) + return ret; - dev_dbg(di->dev, "Ret %zd charge_now %lu was %d", - ret, charge_now, di->bat_cap.prev_mah); - - if (!ret) { - di->bat_cap.user_mah = (int) charge_now; - di->flags.user_cap = true; - ret = count; - queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0); - } - return ret; + di->bat_cap.user_mah = (int) charge_now; + di->flags.user_cap = true; + queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0); + return count; } static struct ab8500_fg_sysfs_entry charge_full_attr = diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c index 735658ee1c60f3fd42adb6b11d66ba5f238861f9..c60659fb21deca7f2fd0c53c41e22e23802d2bb0 100644 --- a/drivers/power/supply/axp288_charger.c +++ b/drivers/power/supply/axp288_charger.c @@ -832,6 +832,10 @@ static int axp288_charger_probe(struct platform_device *pdev) /* Register charger interrupts */ for (i = 0; i < CHRG_INTR_END; i++) { pirq = platform_get_irq(info->pdev, i); + if (pirq < 0) { + dev_err(&pdev->dev, "Failed to get IRQ: %d\n", pirq); + return pirq; + } info->irq[i] = regmap_irq_get_virq(info->regmap_irqc, pirq); if (info->irq[i] < 0) { dev_warn(&info->pdev->dev, diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c index 084c8ba9749d7cc45500c4e419fb18855252e44d..ab0b6e78ca02a6811ee2a5dcd7fd1fa5b392908c 100644 --- a/drivers/power/supply/axp288_fuel_gauge.c +++ b/drivers/power/supply/axp288_fuel_gauge.c @@ -695,6 +695,26 @@ static void fuel_gauge_init_irq(struct axp288_fg_info *info) * detection reports one despite it not being there. */ static const struct dmi_system_id axp288_fuel_gauge_blacklist[] = { + { + /* ACEPC T8 Cherry Trail Z8350 mini PC */ + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"), + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "T8"), + /* also match on somewhat unique bios-version */ + DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"), + }, + }, + { + /* ACEPC T11 Cherry Trail Z8350 mini PC */ + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"), + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "T11"), + /* also match on somewhat unique bios-version */ + DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"), + }, + }, { /* Intel Cherry Trail Compute Stick, Windows version */ .matches = { diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c index b58df04d03b33804193cec98eaf2da4f3b6f823f..760feee2cad9f1eb6f36925d0da6e7fe052050e3 100644 --- a/drivers/power/supply/bq24190_charger.c +++ b/drivers/power/supply/bq24190_charger.c @@ -1821,6 +1821,7 @@ static int bq24190_remove(struct i2c_client *client) struct bq24190_dev_info *bdi = i2c_get_clientdata(client); int error; + cancel_delayed_work_sync(&bdi->input_current_limit_work); error = pm_runtime_get_sync(bdi->dev); if (error < 0) { dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", error); diff --git a/drivers/power/supply/charger-manager.c b/drivers/power/supply/charger-manager.c index faa1a67cf3d2c704bce6ceaea6c9e555b84d8ddb..7ae983e37f6485fb8be94e43b4a6328050185d4e 100644 --- a/drivers/power/supply/charger-manager.c +++ b/drivers/power/supply/charger-manager.c @@ -1212,7 +1212,6 @@ static int charger_extcon_init(struct charger_manager *cm, if (ret < 0) { pr_info("Cannot register extcon_dev for %s(cable: %s)\n", cable->extcon_name, cable->name); - ret = -EINVAL; } return ret; @@ -1633,7 +1632,7 @@ static int charger_manager_probe(struct platform_device *pdev) if (IS_ERR(desc)) { dev_err(&pdev->dev, "No platform data (desc) found\n"); - return -ENODEV; + return PTR_ERR(desc); } cm = devm_kzalloc(&pdev->dev, sizeof(*cm), GFP_KERNEL); diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c index 98ba07869c3b00afb16d8f4d71d4ece9f855c40d..e183a22de71538685d9e700041d079fd5ee4fda2 100644 --- a/drivers/power/supply/cpcap-battery.c +++ b/drivers/power/supply/cpcap-battery.c @@ -82,7 +82,7 @@ struct cpcap_battery_config { }; struct cpcap_coulomb_counter_data { - s32 sample; /* 24-bits */ + s32 sample; /* 24 or 32 bits */ s32 accumulator; s16 offset; /* 10-bits */ }; @@ -213,7 +213,7 @@ static int cpcap_battery_get_current(struct cpcap_battery_ddata *ddata) * TI or ST coulomb counter in the PMIC. */ static int cpcap_battery_cc_raw_div(struct cpcap_battery_ddata *ddata, - u32 sample, s32 accumulator, + s32 sample, s32 accumulator, s16 offset, u32 divider) { s64 acc; @@ -221,7 +221,9 @@ static int cpcap_battery_cc_raw_div(struct cpcap_battery_ddata *ddata, int avg_current; u32 cc_lsb; - sample &= 0xffffff; /* 24-bits, unsigned */ + if (!divider) + return 0; + offset &= 0x7ff; /* 10-bits, signed */ switch (ddata->vendor) { @@ -256,7 +258,7 @@ static int cpcap_battery_cc_raw_div(struct cpcap_battery_ddata *ddata, /* 3600000μAms = 1μAh */ static int cpcap_battery_cc_to_uah(struct cpcap_battery_ddata *ddata, - u32 sample, s32 accumulator, + s32 sample, s32 accumulator, s16 offset) { return cpcap_battery_cc_raw_div(ddata, sample, @@ -265,7 +267,7 @@ static int cpcap_battery_cc_to_uah(struct cpcap_battery_ddata *ddata, } static int cpcap_battery_cc_to_ua(struct cpcap_battery_ddata *ddata, - u32 sample, s32 accumulator, + s32 sample, s32 accumulator, s16 offset) { return cpcap_battery_cc_raw_div(ddata, sample, @@ -309,6 +311,8 @@ cpcap_battery_read_accumulated(struct cpcap_battery_ddata *ddata, /* Sample value CPCAP_REG_CCS1 & 2 */ ccd->sample = (buf[1] & 0x0fff) << 16; ccd->sample |= buf[0]; + if (ddata->vendor == CPCAP_VENDOR_TI) + ccd->sample = sign_extend32(24, ccd->sample); /* Accumulator value CPCAP_REG_CCA1 & 2 */ ccd->accumulator = ((s16)buf[3]) << 16; diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c index e4905bef26637aa567c01ed2cf018dc948a3ecaf..37be541e057d117d50fa631e788a08fdd13e4db7 100644 --- a/drivers/power/supply/cpcap-charger.c +++ b/drivers/power/supply/cpcap-charger.c @@ -458,6 +458,7 @@ static void cpcap_usb_detect(struct work_struct *work) goto out_err; } + power_supply_changed(ddata->usb); return; out_err: diff --git a/drivers/power/supply/da9150-charger.c b/drivers/power/supply/da9150-charger.c index 60099815296e75d86cfcde6a18d14c428214f987..b2d38eb32288a78d873e0e423010a0f74b46240c 100644 --- a/drivers/power/supply/da9150-charger.c +++ b/drivers/power/supply/da9150-charger.c @@ -666,6 +666,7 @@ static int da9150_charger_remove(struct platform_device *pdev) if (!IS_ERR_OR_NULL(charger->usb_phy)) usb_unregister_notifier(charger->usb_phy, &charger->otg_nb); + cancel_work_sync(&charger->otg_work); power_supply_unregister(charger->battery); power_supply_unregister(charger->usb); diff --git a/drivers/power/supply/max14656_charger_detector.c b/drivers/power/supply/max14656_charger_detector.c index b91b1d2999dc6d11ca4112ed160957947192843d..9e6472834e373010f34b3489895bfb88353b3885 100644 --- a/drivers/power/supply/max14656_charger_detector.c +++ b/drivers/power/supply/max14656_charger_detector.c @@ -240,6 +240,14 @@ static enum power_supply_property max14656_battery_props[] = { POWER_SUPPLY_PROP_MANUFACTURER, }; +static void stop_irq_work(void *data) +{ + struct max14656_chip *chip = data; + + cancel_delayed_work_sync(&chip->irq_work); +} + + static int max14656_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -278,7 +286,19 @@ static int max14656_probe(struct i2c_client *client, if (ret) return -ENODEV; + chip->detect_psy = devm_power_supply_register(dev, + &chip->psy_desc, &psy_cfg); + if (IS_ERR(chip->detect_psy)) { + dev_err(dev, "power_supply_register failed\n"); + return -EINVAL; + } + INIT_DELAYED_WORK(&chip->irq_work, max14656_irq_worker); + ret = devm_add_action(dev, stop_irq_work, chip); + if (ret) { + dev_err(dev, "devm_add_action %d failed\n", ret); + return ret; + } ret = devm_request_irq(dev, chip->irq, max14656_irq, IRQF_TRIGGER_FALLING, @@ -289,13 +309,6 @@ static int max14656_probe(struct i2c_client *client, } enable_irq_wake(chip->irq); - chip->detect_psy = devm_power_supply_register(dev, - &chip->psy_desc, &psy_cfg); - if (IS_ERR(chip->detect_psy)) { - dev_err(dev, "power_supply_register failed\n"); - return -EINVAL; - } - schedule_delayed_work(&chip->irq_work, msecs_to_jiffies(2000)); return 0; diff --git a/drivers/power/supply/max8998_charger.c b/drivers/power/supply/max8998_charger.c index cad7d1a8feec7d95ad0ec9399d6cfae68c741063..aa65e6c36c55eb7d3e03629032a817bfe147e804 100644 --- a/drivers/power/supply/max8998_charger.c +++ b/drivers/power/supply/max8998_charger.c @@ -86,7 +86,7 @@ static const struct power_supply_desc max8998_battery_desc = { static int max8998_battery_probe(struct platform_device *pdev) { struct max8998_dev *iodev = dev_get_drvdata(pdev->dev.parent); - struct max8998_platform_data *pdata = dev_get_platdata(iodev->dev); + struct max8998_platform_data *pdata = iodev->pdata; struct power_supply_config psy_cfg = {}; struct max8998_battery_data *max8998; struct i2c_client *i2c; diff --git a/drivers/power/supply/olpc_battery.c b/drivers/power/supply/olpc_battery.c index 6da79ae14860113b4d9dcec29c13343d54e2994d..5a97e42a35473d56d5acfb70ee63379a375ef541 100644 --- a/drivers/power/supply/olpc_battery.c +++ b/drivers/power/supply/olpc_battery.c @@ -428,14 +428,14 @@ static int olpc_bat_get_property(struct power_supply *psy, if (ret) return ret; - val->intval = (s16)be16_to_cpu(ec_word) * 100 / 256; + val->intval = (s16)be16_to_cpu(ec_word) * 10 / 256; break; case POWER_SUPPLY_PROP_TEMP_AMBIENT: ret = olpc_ec_cmd(EC_AMB_TEMP, NULL, 0, (void *)&ec_word, 2); if (ret) return ret; - val->intval = (int)be16_to_cpu(ec_word) * 100 / 256; + val->intval = (int)be16_to_cpu(ec_word) * 10 / 256; break; case POWER_SUPPLY_PROP_CHARGE_COUNTER: ret = olpc_ec_cmd(EC_BAT_ACR, NULL, 0, (void *)&ec_word, 2); diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index 6170ed8b6854b9d53d6cdf25dd27ded822923152..5358a80d854f99e0157a38bea979eb1b0912f46b 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -131,7 +131,8 @@ static ssize_t power_supply_show_property(struct device *dev, dev_dbg(dev, "driver has no data for `%s' property\n", attr->attr.name); else if (ret != -ENODEV && ret != -EAGAIN) - dev_err(dev, "driver failed to report `%s' property: %zd\n", + dev_err_ratelimited(dev, + "driver failed to report `%s' property: %zd\n", attr->attr.name, ret); return ret; } @@ -382,15 +383,11 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env) char *prop_buf; char *attrname; - dev_dbg(dev, "uevent\n"); - if (!psy || !psy->desc) { dev_dbg(dev, "No power supply yet\n"); return ret; } - dev_dbg(dev, "POWER_SUPPLY_NAME=%s\n", psy->desc->name); - ret = add_uevent_var(env, "POWER_SUPPLY_NAME=%s", psy->desc->name); if (ret) return ret; @@ -426,8 +423,6 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env) goto out; } - dev_dbg(dev, "prop %s=%s\n", attrname, prop_buf); - ret = add_uevent_var(env, "POWER_SUPPLY_%s=%s", attrname, prop_buf); kfree(attrname); if (ret) diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c index 8ba6abf584de2b6ca5a66573dd8cf2b873f023f7..3958ee03eec1450d3ef1ae32542af835c00c9618 100644 --- a/drivers/power/supply/sbs-battery.c +++ b/drivers/power/supply/sbs-battery.c @@ -323,17 +323,22 @@ static int sbs_get_battery_presence_and_health( { int ret; - if (psp == POWER_SUPPLY_PROP_PRESENT) { - /* Dummy command; if it succeeds, battery is present. */ - ret = sbs_read_word_data(client, sbs_data[REG_STATUS].addr); - if (ret < 0) - val->intval = 0; /* battery disconnected */ - else - val->intval = 1; /* battery present */ - } else { /* POWER_SUPPLY_PROP_HEALTH */ + /* Dummy command; if it succeeds, battery is present. */ + ret = sbs_read_word_data(client, sbs_data[REG_STATUS].addr); + + if (ret < 0) { /* battery not present*/ + if (psp == POWER_SUPPLY_PROP_PRESENT) { + val->intval = 0; + return 0; + } + return ret; + } + + if (psp == POWER_SUPPLY_PROP_PRESENT) + val->intval = 1; /* battery present */ + else /* POWER_SUPPLY_PROP_HEALTH */ /* SBS spec doesn't have a general health command. */ val->intval = POWER_SUPPLY_HEALTH_UNKNOWN; - } return 0; } @@ -629,12 +634,14 @@ static int sbs_get_property(struct power_supply *psy, switch (psp) { case POWER_SUPPLY_PROP_PRESENT: case POWER_SUPPLY_PROP_HEALTH: - if (client->flags & SBS_FLAGS_TI_BQ20Z75) + if (chip->flags & SBS_FLAGS_TI_BQ20Z75) ret = sbs_get_ti_battery_presence_and_health(client, psp, val); else ret = sbs_get_battery_presence_and_health(client, psp, val); + + /* this can only be true if no gpio is used */ if (psp == POWER_SUPPLY_PROP_PRESENT) return 0; break; diff --git a/drivers/power/supply/twl4030_charger.c b/drivers/power/supply/twl4030_charger.c index bbcaee56db9d743b6deb31adce88ee0b5379c56f..0e202d4273fb627f671af379f591bdabbcae297b 100644 --- a/drivers/power/supply/twl4030_charger.c +++ b/drivers/power/supply/twl4030_charger.c @@ -420,7 +420,8 @@ static void twl4030_current_worker(struct work_struct *data) if (v < USB_MIN_VOLT) { /* Back up and stop adjusting. */ - bci->usb_cur -= USB_CUR_STEP; + if (bci->usb_cur >= USB_CUR_STEP) + bci->usb_cur -= USB_CUR_STEP; bci->usb_cur_target = bci->usb_cur; } else if (bci->usb_cur >= bci->usb_cur_target || bci->usb_cur + USB_CUR_STEP > USB_MAX_CURRENT) { @@ -439,6 +440,7 @@ static void twl4030_current_worker(struct work_struct *data) static int twl4030_charger_enable_usb(struct twl4030_bci *bci, bool enable) { int ret; + u32 reg; if (bci->usb_mode == CHARGE_OFF) enable = false; @@ -452,14 +454,38 @@ static int twl4030_charger_enable_usb(struct twl4030_bci *bci, bool enable) bci->usb_enabled = 1; } - if (bci->usb_mode == CHARGE_AUTO) + if (bci->usb_mode == CHARGE_AUTO) { + /* Enable interrupts now. */ + reg = ~(u32)(TWL4030_ICHGLOW | TWL4030_ICHGEOC | + TWL4030_TBATOR2 | TWL4030_TBATOR1 | + TWL4030_BATSTS); + ret = twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, reg, + TWL4030_INTERRUPTS_BCIIMR1A); + if (ret < 0) { + dev_err(bci->dev, + "failed to unmask interrupts: %d\n", + ret); + return ret; + } /* forcing the field BCIAUTOUSB (BOOT_BCI[1]) to 1 */ ret = twl4030_clear_set_boot_bci(0, TWL4030_BCIAUTOUSB); + } /* forcing USBFASTMCHG(BCIMFSTS4[2]) to 1 */ ret = twl4030_clear_set(TWL_MODULE_MAIN_CHARGE, 0, TWL4030_USBFASTMCHG, TWL4030_BCIMFSTS4); if (bci->usb_mode == CHARGE_LINEAR) { + /* Enable interrupts now. */ + reg = ~(u32)(TWL4030_ICHGLOW | TWL4030_TBATOR2 | + TWL4030_TBATOR1 | TWL4030_BATSTS); + ret = twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, reg, + TWL4030_INTERRUPTS_BCIIMR1A); + if (ret < 0) { + dev_err(bci->dev, + "failed to unmask interrupts: %d\n", + ret); + return ret; + } twl4030_clear_set_boot_bci(TWL4030_BCIAUTOAC|TWL4030_CVENAC, 0); /* Watch dog key: WOVF acknowledge */ ret = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE, 0x33, @@ -996,12 +1022,13 @@ static int twl4030_bci_probe(struct platform_device *pdev) if (bci->dev->of_node) { struct device_node *phynode; - phynode = of_find_compatible_node(bci->dev->of_node->parent, - NULL, "ti,twl4030-usb"); + phynode = of_get_compatible_child(bci->dev->of_node->parent, + "ti,twl4030-usb"); if (phynode) { bci->usb_nb.notifier_call = twl4030_bci_usb_ncb; bci->transceiver = devm_usb_get_phy_by_node( bci->dev, phynode, &bci->usb_nb); + of_node_put(phynode); if (IS_ERR(bci->transceiver)) { ret = PTR_ERR(bci->transceiver); if (ret == -EPROBE_DEFER) diff --git a/drivers/power/supply/wm8350_power.c b/drivers/power/supply/wm8350_power.c index 15c0ca15e2aa9d857625061682bfea47bb489fe4..034ddfe3b62c31bdf3fcd8b7c45c79ac5b2cbda5 100644 --- a/drivers/power/supply/wm8350_power.c +++ b/drivers/power/supply/wm8350_power.c @@ -459,6 +459,7 @@ static void free_charger_irq(struct wm8350 *wm8350) wm8350_free_irq(wm8350, WM8350_IRQ_CHG_TO, wm8350); wm8350_free_irq(wm8350, WM8350_IRQ_CHG_END, wm8350); wm8350_free_irq(wm8350, WM8350_IRQ_CHG_START, wm8350); + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_FAST_RDY, wm8350); wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P9, wm8350); wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P1, wm8350); wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_2P85, wm8350); diff --git a/drivers/powercap/Kconfig b/drivers/powercap/Kconfig index 6ac27e5908f592cef46a533cd69b880afe6aad88..8ef2332b58630e674ebd9bb2f089059f67bbc6a8 100644 --- a/drivers/powercap/Kconfig +++ b/drivers/powercap/Kconfig @@ -15,14 +15,17 @@ menuconfig POWERCAP if POWERCAP # Client driver configurations go here. +config INTEL_RAPL_CORE + tristate + config INTEL_RAPL - tristate "Intel RAPL Support" + tristate "Intel RAPL Support via MSR Interface" depends on X86 && IOSF_MBI - default n + select INTEL_RAPL_CORE ---help--- This enables support for the Intel Running Average Power Limit (RAPL) - technology which allows power limits to be enforced and monitored on - modern Intel processors (Sandy Bridge and later). + technology via MSR interface, which allows power limits to be enforced + and monitored on modern Intel processors (Sandy Bridge and later). In RAPL, the platform level settings are divided into domains for fine grained control. These domains include processor package, DRAM diff --git a/drivers/powercap/Makefile b/drivers/powercap/Makefile index 1b328854b36e4efd5f6e12f60aa7981bc41f60be..7255c94ec61c4562dabbbf61bde4d7c24556a87e 100644 --- a/drivers/powercap/Makefile +++ b/drivers/powercap/Makefile @@ -1,3 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_POWERCAP) += powercap_sys.o -obj-$(CONFIG_INTEL_RAPL) += intel_rapl.o +obj-$(CONFIG_INTEL_RAPL_CORE) += intel_rapl_common.o +obj-$(CONFIG_INTEL_RAPL) += intel_rapl_msr.o obj-$(CONFIG_IDLE_INJECT) += idle_inject.o diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl_common.c similarity index 61% rename from drivers/powercap/intel_rapl.c rename to drivers/powercap/intel_rapl_common.c index 295d8dcba48cdd781784c7b3d86de63ebf7cca99..d85e8e079184e7690e7bd2e0f691fce249adef93 100644 --- a/drivers/powercap/intel_rapl.c +++ b/drivers/powercap/intel_rapl_common.c @@ -1,19 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0-only /* - * Intel Running Average Power Limit (RAPL) Driver - * Copyright (c) 2013, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc. - * + * Common code for Intel Running Average Power Limit (RAPL) support. + * Copyright (c) 2019, Intel Corporation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -30,15 +18,14 @@ #include #include #include -#include +#include +#include +#include -#include +#include #include #include -/* Local defines */ -#define MSR_PLATFORM_POWER_LIMIT 0x0000065C - /* bitmasks for RAPL MSRs, used by primitive access functions */ #define ENERGY_STATUS_MASK 0xffffffff @@ -49,8 +36,8 @@ #define POWER_LIMIT2_MASK (0x7FFFULL<<32) #define POWER_LIMIT2_ENABLE BIT_ULL(47) #define POWER_LIMIT2_CLAMP BIT_ULL(48) -#define POWER_PACKAGE_LOCK BIT_ULL(63) -#define POWER_PP_LOCK BIT(31) +#define POWER_HIGH_LOCK BIT_ULL(63) +#define POWER_LOW_LOCK BIT(31) #define TIME_WINDOW1_MASK (0x7FULL<<17) #define TIME_WINDOW2_MASK (0x7FULL<<49) @@ -73,139 +60,38 @@ #define PP_POLICY_MASK 0x1F /* Non HW constants */ -#define RAPL_PRIMITIVE_DERIVED BIT(1) /* not from raw data */ +#define RAPL_PRIMITIVE_DERIVED BIT(1) /* not from raw data */ #define RAPL_PRIMITIVE_DUMMY BIT(2) #define TIME_WINDOW_MAX_MSEC 40000 #define TIME_WINDOW_MIN_MSEC 250 -#define ENERGY_UNIT_SCALE 1000 /* scale from driver unit to powercap unit */ +#define ENERGY_UNIT_SCALE 1000 /* scale from driver unit to powercap unit */ enum unit_type { - ARBITRARY_UNIT, /* no translation */ + ARBITRARY_UNIT, /* no translation */ POWER_UNIT, ENERGY_UNIT, TIME_UNIT, }; -enum rapl_domain_type { - RAPL_DOMAIN_PACKAGE, /* entire package/socket */ - RAPL_DOMAIN_PP0, /* core power plane */ - RAPL_DOMAIN_PP1, /* graphics uncore */ - RAPL_DOMAIN_DRAM,/* DRAM control_type */ - RAPL_DOMAIN_PLATFORM, /* PSys control_type */ - RAPL_DOMAIN_MAX, -}; - -enum rapl_domain_msr_id { - RAPL_DOMAIN_MSR_LIMIT, - RAPL_DOMAIN_MSR_STATUS, - RAPL_DOMAIN_MSR_PERF, - RAPL_DOMAIN_MSR_POLICY, - RAPL_DOMAIN_MSR_INFO, - RAPL_DOMAIN_MSR_MAX, -}; - /* per domain data, some are optional */ -enum rapl_primitives { - ENERGY_COUNTER, - POWER_LIMIT1, - POWER_LIMIT2, - FW_LOCK, - - PL1_ENABLE, /* power limit 1, aka long term */ - PL1_CLAMP, /* allow frequency to go below OS request */ - PL2_ENABLE, /* power limit 2, aka short term, instantaneous */ - PL2_CLAMP, - - TIME_WINDOW1, /* long term */ - TIME_WINDOW2, /* short term */ - THERMAL_SPEC_POWER, - MAX_POWER, - - MIN_POWER, - MAX_TIME_WINDOW, - THROTTLED_TIME, - PRIORITY_LEVEL, - - /* below are not raw primitive data */ - AVERAGE_POWER, - NR_RAPL_PRIMITIVES, -}; - #define NR_RAW_PRIMITIVES (NR_RAPL_PRIMITIVES - 2) -/* Can be expanded to include events, etc.*/ -struct rapl_domain_data { - u64 primitives[NR_RAPL_PRIMITIVES]; - unsigned long timestamp; -}; - -struct msrl_action { - u32 msr_no; - u64 clear_mask; - u64 set_mask; - int err; -}; - #define DOMAIN_STATE_INACTIVE BIT(0) #define DOMAIN_STATE_POWER_LIMIT_SET BIT(1) #define DOMAIN_STATE_BIOS_LOCKED BIT(2) -#define NR_POWER_LIMITS (2) -struct rapl_power_limit { - struct powercap_zone_constraint *constraint; - int prim_id; /* primitive ID used to enable */ - struct rapl_domain *domain; - const char *name; - u64 last_power_limit; -}; - static const char pl1_name[] = "long_term"; static const char pl2_name[] = "short_term"; -struct rapl_package; -struct rapl_domain { - const char *name; - enum rapl_domain_type id; - int msrs[RAPL_DOMAIN_MSR_MAX]; - struct powercap_zone power_zone; - struct rapl_domain_data rdd; - struct rapl_power_limit rpl[NR_POWER_LIMITS]; - u64 attr_map; /* track capabilities */ - unsigned int state; - unsigned int domain_energy_unit; - struct rapl_package *rp; -}; #define power_zone_to_rapl_domain(_zone) \ container_of(_zone, struct rapl_domain, power_zone) - -/* Each physical package contains multiple domains, these are the common - * data across RAPL domains within a package. - */ -struct rapl_package { - unsigned int id; /* physical package/socket id */ - unsigned int nr_domains; - unsigned long domain_map; /* bit map of active domains */ - unsigned int power_unit; - unsigned int energy_unit; - unsigned int time_unit; - struct rapl_domain *domains; /* array of domains, sized at runtime */ - struct powercap_zone *power_zone; /* keep track of parent zone */ - unsigned long power_limit_irq; /* keep track of package power limit - * notify interrupt enable status. - */ - struct list_head plist; - int lead_cpu; /* one active cpu per package for access */ - /* Track active cpus */ - struct cpumask cpumask; -}; - struct rapl_defaults { u8 floor_freq_reg_addr; int (*check_unit)(struct rapl_package *rp, int cpu); void (*set_floor_freq)(struct rapl_domain *rd, bool mode); u64 (*compute_time_window)(struct rapl_package *rp, u64 val, - bool to_raw); + bool to_raw); unsigned int dram_domain_energy_unit; }; static struct rapl_defaults *rapl_defaults; @@ -224,7 +110,7 @@ struct rapl_primitive_info { const char *name; u64 mask; int shift; - enum rapl_domain_msr_id id; + enum rapl_domain_reg_id id; enum unit_type unit; u32 flag; }; @@ -240,19 +126,18 @@ struct rapl_primitive_info { static void rapl_init_domains(struct rapl_package *rp); static int rapl_read_data_raw(struct rapl_domain *rd, - enum rapl_primitives prim, - bool xlate, u64 *data); + enum rapl_primitives prim, + bool xlate, u64 *data); static int rapl_write_data_raw(struct rapl_domain *rd, - enum rapl_primitives prim, - unsigned long long value); + enum rapl_primitives prim, + unsigned long long value); static u64 rapl_unit_xlate(struct rapl_domain *rd, - enum unit_type type, u64 value, - int to_raw); + enum unit_type type, u64 value, int to_raw); static void package_power_limit_irq_save(struct rapl_package *rp); -static LIST_HEAD(rapl_packages); /* guarded by CPU hotplug lock */ +static LIST_HEAD(rapl_packages); /* guarded by CPU hotplug lock */ -static const char * const rapl_domain_names[] = { +static const char *const rapl_domain_names[] = { "package", "core", "uncore", @@ -260,23 +145,8 @@ static const char * const rapl_domain_names[] = { "psys", }; -static struct powercap_control_type *control_type; /* PowerCap Controller */ -static struct rapl_domain *platform_rapl_domain; /* Platform (PSys) domain */ - -/* caller to ensure CPU hotplug lock is held */ -static struct rapl_package *find_package_by_id(int id) -{ - struct rapl_package *rp; - - list_for_each_entry(rp, &rapl_packages, plist) { - if (rp->id == id) - return rp; - } - - return NULL; -} - -static int get_energy_counter(struct powercap_zone *power_zone, u64 *energy_raw) +static int get_energy_counter(struct powercap_zone *power_zone, + u64 *energy_raw) { struct rapl_domain *rd; u64 energy_now; @@ -375,50 +245,49 @@ static int get_domain_enable(struct powercap_zone *power_zone, bool *mode) static const struct powercap_zone_ops zone_ops[] = { /* RAPL_DOMAIN_PACKAGE */ { - .get_energy_uj = get_energy_counter, - .get_max_energy_range_uj = get_max_energy_counter, - .release = release_zone, - .set_enable = set_domain_enable, - .get_enable = get_domain_enable, - }, + .get_energy_uj = get_energy_counter, + .get_max_energy_range_uj = get_max_energy_counter, + .release = release_zone, + .set_enable = set_domain_enable, + .get_enable = get_domain_enable, + }, /* RAPL_DOMAIN_PP0 */ { - .get_energy_uj = get_energy_counter, - .get_max_energy_range_uj = get_max_energy_counter, - .release = release_zone, - .set_enable = set_domain_enable, - .get_enable = get_domain_enable, - }, + .get_energy_uj = get_energy_counter, + .get_max_energy_range_uj = get_max_energy_counter, + .release = release_zone, + .set_enable = set_domain_enable, + .get_enable = get_domain_enable, + }, /* RAPL_DOMAIN_PP1 */ { - .get_energy_uj = get_energy_counter, - .get_max_energy_range_uj = get_max_energy_counter, - .release = release_zone, - .set_enable = set_domain_enable, - .get_enable = get_domain_enable, - }, + .get_energy_uj = get_energy_counter, + .get_max_energy_range_uj = get_max_energy_counter, + .release = release_zone, + .set_enable = set_domain_enable, + .get_enable = get_domain_enable, + }, /* RAPL_DOMAIN_DRAM */ { - .get_energy_uj = get_energy_counter, - .get_max_energy_range_uj = get_max_energy_counter, - .release = release_zone, - .set_enable = set_domain_enable, - .get_enable = get_domain_enable, - }, + .get_energy_uj = get_energy_counter, + .get_max_energy_range_uj = get_max_energy_counter, + .release = release_zone, + .set_enable = set_domain_enable, + .get_enable = get_domain_enable, + }, /* RAPL_DOMAIN_PLATFORM */ { - .get_energy_uj = get_energy_counter, - .get_max_energy_range_uj = get_max_energy_counter, - .release = release_zone, - .set_enable = set_domain_enable, - .get_enable = get_domain_enable, - }, + .get_energy_uj = get_energy_counter, + .get_max_energy_range_uj = get_max_energy_counter, + .release = release_zone, + .set_enable = set_domain_enable, + .get_enable = get_domain_enable, + }, }; - /* * Constraint index used by powercap can be different than power limit (PL) - * index in that some PLs maybe missing due to non-existant MSRs. So we + * index in that some PLs maybe missing due to non-existent MSRs. So we * need to convert here by finding the valid PLs only (name populated). */ static int contraint_to_pl(struct rapl_domain *rd, int cid) @@ -437,7 +306,7 @@ static int contraint_to_pl(struct rapl_domain *rd, int cid) } static int set_power_limit(struct powercap_zone *power_zone, int cid, - u64 power_limit) + u64 power_limit) { struct rapl_domain *rd; struct rapl_package *rp; @@ -455,8 +324,8 @@ static int set_power_limit(struct powercap_zone *power_zone, int cid, rp = rd->rp; if (rd->state & DOMAIN_STATE_BIOS_LOCKED) { - dev_warn(&power_zone->dev, "%s locked by BIOS, monitoring only\n", - rd->name); + dev_warn(&power_zone->dev, + "%s locked by BIOS, monitoring only\n", rd->name); ret = -EACCES; goto set_exit; } @@ -479,7 +348,7 @@ static int set_power_limit(struct powercap_zone *power_zone, int cid, } static int get_current_power_limit(struct powercap_zone *power_zone, int cid, - u64 *data) + u64 *data) { struct rapl_domain *rd; u64 val; @@ -518,7 +387,7 @@ static int get_current_power_limit(struct powercap_zone *power_zone, int cid, } static int set_time_window(struct powercap_zone *power_zone, int cid, - u64 window) + u64 window) { struct rapl_domain *rd; int ret = 0; @@ -548,7 +417,8 @@ static int set_time_window(struct powercap_zone *power_zone, int cid, return ret; } -static int get_time_window(struct powercap_zone *power_zone, int cid, u64 *data) +static int get_time_window(struct powercap_zone *power_zone, int cid, + u64 *data) { struct rapl_domain *rd; u64 val; @@ -583,7 +453,8 @@ static int get_time_window(struct powercap_zone *power_zone, int cid, u64 *data) return ret; } -static const char *get_constraint_name(struct powercap_zone *power_zone, int cid) +static const char *get_constraint_name(struct powercap_zone *power_zone, + int cid) { struct rapl_domain *rd; int id; @@ -596,9 +467,7 @@ static const char *get_constraint_name(struct powercap_zone *power_zone, int cid return NULL; } - -static int get_max_power(struct powercap_zone *power_zone, int id, - u64 *data) +static int get_max_power(struct powercap_zone *power_zone, int id, u64 *data) { struct rapl_domain *rd; u64 val; @@ -640,73 +509,43 @@ static const struct powercap_zone_constraint_ops constraint_ops = { /* called after domain detection and package level data are set */ static void rapl_init_domains(struct rapl_package *rp) { - int i; + enum rapl_domain_type i; + enum rapl_domain_reg_id j; struct rapl_domain *rd = rp->domains; for (i = 0; i < RAPL_DOMAIN_MAX; i++) { unsigned int mask = rp->domain_map & (1 << i); - switch (mask) { - case BIT(RAPL_DOMAIN_PACKAGE): - rd->name = rapl_domain_names[RAPL_DOMAIN_PACKAGE]; - rd->id = RAPL_DOMAIN_PACKAGE; - rd->msrs[0] = MSR_PKG_POWER_LIMIT; - rd->msrs[1] = MSR_PKG_ENERGY_STATUS; - rd->msrs[2] = MSR_PKG_PERF_STATUS; - rd->msrs[3] = 0; - rd->msrs[4] = MSR_PKG_POWER_INFO; - rd->rpl[0].prim_id = PL1_ENABLE; - rd->rpl[0].name = pl1_name; + + if (!mask) + continue; + + rd->rp = rp; + rd->name = rapl_domain_names[i]; + rd->id = i; + rd->rpl[0].prim_id = PL1_ENABLE; + rd->rpl[0].name = pl1_name; + /* some domain may support two power limits */ + if (rp->priv->limits[i] == 2) { rd->rpl[1].prim_id = PL2_ENABLE; rd->rpl[1].name = pl2_name; - break; - case BIT(RAPL_DOMAIN_PP0): - rd->name = rapl_domain_names[RAPL_DOMAIN_PP0]; - rd->id = RAPL_DOMAIN_PP0; - rd->msrs[0] = MSR_PP0_POWER_LIMIT; - rd->msrs[1] = MSR_PP0_ENERGY_STATUS; - rd->msrs[2] = 0; - rd->msrs[3] = MSR_PP0_POLICY; - rd->msrs[4] = 0; - rd->rpl[0].prim_id = PL1_ENABLE; - rd->rpl[0].name = pl1_name; - break; - case BIT(RAPL_DOMAIN_PP1): - rd->name = rapl_domain_names[RAPL_DOMAIN_PP1]; - rd->id = RAPL_DOMAIN_PP1; - rd->msrs[0] = MSR_PP1_POWER_LIMIT; - rd->msrs[1] = MSR_PP1_ENERGY_STATUS; - rd->msrs[2] = 0; - rd->msrs[3] = MSR_PP1_POLICY; - rd->msrs[4] = 0; - rd->rpl[0].prim_id = PL1_ENABLE; - rd->rpl[0].name = pl1_name; - break; - case BIT(RAPL_DOMAIN_DRAM): - rd->name = rapl_domain_names[RAPL_DOMAIN_DRAM]; - rd->id = RAPL_DOMAIN_DRAM; - rd->msrs[0] = MSR_DRAM_POWER_LIMIT; - rd->msrs[1] = MSR_DRAM_ENERGY_STATUS; - rd->msrs[2] = MSR_DRAM_PERF_STATUS; - rd->msrs[3] = 0; - rd->msrs[4] = MSR_DRAM_POWER_INFO; - rd->rpl[0].prim_id = PL1_ENABLE; - rd->rpl[0].name = pl1_name; + } + + for (j = 0; j < RAPL_DOMAIN_REG_MAX; j++) + rd->regs[j] = rp->priv->regs[i][j]; + + if (i == RAPL_DOMAIN_DRAM) { rd->domain_energy_unit = - rapl_defaults->dram_domain_energy_unit; + rapl_defaults->dram_domain_energy_unit; if (rd->domain_energy_unit) pr_info("DRAM domain energy unit %dpj\n", rd->domain_energy_unit); - break; - } - if (mask) { - rd->rp = rp; - rd++; } + rd++; } } static u64 rapl_unit_xlate(struct rapl_domain *rd, enum unit_type type, - u64 value, int to_raw) + u64 value, int to_raw) { u64 units = 1; struct rapl_package *rp = rd->rp; @@ -743,40 +582,40 @@ static u64 rapl_unit_xlate(struct rapl_domain *rd, enum unit_type type, static struct rapl_primitive_info rpi[] = { /* name, mask, shift, msr index, unit divisor */ PRIMITIVE_INFO_INIT(ENERGY_COUNTER, ENERGY_STATUS_MASK, 0, - RAPL_DOMAIN_MSR_STATUS, ENERGY_UNIT, 0), + RAPL_DOMAIN_REG_STATUS, ENERGY_UNIT, 0), PRIMITIVE_INFO_INIT(POWER_LIMIT1, POWER_LIMIT1_MASK, 0, - RAPL_DOMAIN_MSR_LIMIT, POWER_UNIT, 0), + RAPL_DOMAIN_REG_LIMIT, POWER_UNIT, 0), PRIMITIVE_INFO_INIT(POWER_LIMIT2, POWER_LIMIT2_MASK, 32, - RAPL_DOMAIN_MSR_LIMIT, POWER_UNIT, 0), - PRIMITIVE_INFO_INIT(FW_LOCK, POWER_PP_LOCK, 31, - RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0), + RAPL_DOMAIN_REG_LIMIT, POWER_UNIT, 0), + PRIMITIVE_INFO_INIT(FW_LOCK, POWER_LOW_LOCK, 31, + RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0), PRIMITIVE_INFO_INIT(PL1_ENABLE, POWER_LIMIT1_ENABLE, 15, - RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0), + RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0), PRIMITIVE_INFO_INIT(PL1_CLAMP, POWER_LIMIT1_CLAMP, 16, - RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0), + RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0), PRIMITIVE_INFO_INIT(PL2_ENABLE, POWER_LIMIT2_ENABLE, 47, - RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0), + RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0), PRIMITIVE_INFO_INIT(PL2_CLAMP, POWER_LIMIT2_CLAMP, 48, - RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0), + RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0), PRIMITIVE_INFO_INIT(TIME_WINDOW1, TIME_WINDOW1_MASK, 17, - RAPL_DOMAIN_MSR_LIMIT, TIME_UNIT, 0), + RAPL_DOMAIN_REG_LIMIT, TIME_UNIT, 0), PRIMITIVE_INFO_INIT(TIME_WINDOW2, TIME_WINDOW2_MASK, 49, - RAPL_DOMAIN_MSR_LIMIT, TIME_UNIT, 0), + RAPL_DOMAIN_REG_LIMIT, TIME_UNIT, 0), PRIMITIVE_INFO_INIT(THERMAL_SPEC_POWER, POWER_INFO_THERMAL_SPEC_MASK, - 0, RAPL_DOMAIN_MSR_INFO, POWER_UNIT, 0), + 0, RAPL_DOMAIN_REG_INFO, POWER_UNIT, 0), PRIMITIVE_INFO_INIT(MAX_POWER, POWER_INFO_MAX_MASK, 32, - RAPL_DOMAIN_MSR_INFO, POWER_UNIT, 0), + RAPL_DOMAIN_REG_INFO, POWER_UNIT, 0), PRIMITIVE_INFO_INIT(MIN_POWER, POWER_INFO_MIN_MASK, 16, - RAPL_DOMAIN_MSR_INFO, POWER_UNIT, 0), + RAPL_DOMAIN_REG_INFO, POWER_UNIT, 0), PRIMITIVE_INFO_INIT(MAX_TIME_WINDOW, POWER_INFO_MAX_TIME_WIN_MASK, 48, - RAPL_DOMAIN_MSR_INFO, TIME_UNIT, 0), + RAPL_DOMAIN_REG_INFO, TIME_UNIT, 0), PRIMITIVE_INFO_INIT(THROTTLED_TIME, PERF_STATUS_THROTTLE_TIME_MASK, 0, - RAPL_DOMAIN_MSR_PERF, TIME_UNIT, 0), + RAPL_DOMAIN_REG_PERF, TIME_UNIT, 0), PRIMITIVE_INFO_INIT(PRIORITY_LEVEL, PP_POLICY_MASK, 0, - RAPL_DOMAIN_MSR_POLICY, ARBITRARY_UNIT, 0), + RAPL_DOMAIN_REG_POLICY, ARBITRARY_UNIT, 0), /* non-hardware */ PRIMITIVE_INFO_INIT(AVERAGE_POWER, 0, 0, 0, POWER_UNIT, - RAPL_PRIMITIVE_DERIVED), + RAPL_PRIMITIVE_DERIVED), {NULL, 0, 0, 0}, }; @@ -794,26 +633,25 @@ static struct rapl_primitive_info rpi[] = { * 63-------------------------- 31--------------------------- 0 */ static int rapl_read_data_raw(struct rapl_domain *rd, - enum rapl_primitives prim, - bool xlate, u64 *data) + enum rapl_primitives prim, bool xlate, u64 *data) { - u64 value, final; - u32 msr; + u64 value; struct rapl_primitive_info *rp = &rpi[prim]; + struct reg_action ra; int cpu; if (!rp->name || rp->flag & RAPL_PRIMITIVE_DUMMY) return -EINVAL; - msr = rd->msrs[rp->id]; - if (!msr) + ra.reg = rd->regs[rp->id]; + if (!ra.reg) return -EINVAL; cpu = rd->rp->lead_cpu; - /* special-case package domain, which uses a different bit*/ - if (prim == FW_LOCK && rd->id == RAPL_DOMAIN_PACKAGE) { - rp->mask = POWER_PACKAGE_LOCK; + /* domain with 2 limits has different bit */ + if (prim == FW_LOCK && rd->rp->priv->limits[rd->id] == 2) { + rp->mask = POWER_HIGH_LOCK; rp->shift = 63; } /* non-hardware data are collected by the polling thread */ @@ -822,56 +660,32 @@ static int rapl_read_data_raw(struct rapl_domain *rd, return 0; } - if (rdmsrl_safe_on_cpu(cpu, msr, &value)) { - pr_debug("failed to read msr 0x%x on cpu %d\n", msr, cpu); + ra.mask = rp->mask; + + if (rd->rp->priv->read_raw(cpu, &ra)) { + pr_debug("failed to read reg 0x%llx on cpu %d\n", ra.reg, cpu); return -EIO; } - final = value & rp->mask; - final = final >> rp->shift; + value = ra.value >> rp->shift; + if (xlate) - *data = rapl_unit_xlate(rd, rp->unit, final, 0); + *data = rapl_unit_xlate(rd, rp->unit, value, 0); else - *data = final; + *data = value; return 0; } - -static int msrl_update_safe(u32 msr_no, u64 clear_mask, u64 set_mask) -{ - int err; - u64 val; - - err = rdmsrl_safe(msr_no, &val); - if (err) - goto out; - - val &= ~clear_mask; - val |= set_mask; - - err = wrmsrl_safe(msr_no, val); - -out: - return err; -} - -static void msrl_update_func(void *info) -{ - struct msrl_action *ma = info; - - ma->err = msrl_update_safe(ma->msr_no, ma->clear_mask, ma->set_mask); -} - /* Similar use of primitive info in the read counterpart */ static int rapl_write_data_raw(struct rapl_domain *rd, - enum rapl_primitives prim, - unsigned long long value) + enum rapl_primitives prim, + unsigned long long value) { struct rapl_primitive_info *rp = &rpi[prim]; int cpu; u64 bits; - struct msrl_action ma; + struct reg_action ra; int ret; cpu = rd->rp->lead_cpu; @@ -879,17 +693,13 @@ static int rapl_write_data_raw(struct rapl_domain *rd, bits <<= rp->shift; bits &= rp->mask; - memset(&ma, 0, sizeof(ma)); + memset(&ra, 0, sizeof(ra)); - ma.msr_no = rd->msrs[rp->id]; - ma.clear_mask = rp->mask; - ma.set_mask = bits; + ra.reg = rd->regs[rp->id]; + ra.mask = rp->mask; + ra.value = bits; - ret = smp_call_function_single(cpu, msrl_update_func, &ma, 1); - if (ret) - WARN_ON_ONCE(ret); - else - ret = ma.err; + ret = rd->rp->priv->write_raw(cpu, &ra); return ret; } @@ -907,51 +717,56 @@ static int rapl_write_data_raw(struct rapl_domain *rd, */ static int rapl_check_unit_core(struct rapl_package *rp, int cpu) { - u64 msr_val; + struct reg_action ra; u32 value; - if (rdmsrl_safe_on_cpu(cpu, MSR_RAPL_POWER_UNIT, &msr_val)) { - pr_err("Failed to read power unit MSR 0x%x on CPU %d, exit.\n", - MSR_RAPL_POWER_UNIT, cpu); + ra.reg = rp->priv->reg_unit; + ra.mask = ~0; + if (rp->priv->read_raw(cpu, &ra)) { + pr_err("Failed to read power unit REG 0x%llx on CPU %d, exit.\n", + rp->priv->reg_unit, cpu); return -ENODEV; } - value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; + value = (ra.value & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; rp->energy_unit = ENERGY_UNIT_SCALE * 1000000 / (1 << value); - value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; + value = (ra.value & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; rp->power_unit = 1000000 / (1 << value); - value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET; + value = (ra.value & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET; rp->time_unit = 1000000 / (1 << value); - pr_debug("Core CPU package %d energy=%dpJ, time=%dus, power=%duW\n", - rp->id, rp->energy_unit, rp->time_unit, rp->power_unit); + pr_debug("Core CPU %s energy=%dpJ, time=%dus, power=%duW\n", + rp->name, rp->energy_unit, rp->time_unit, rp->power_unit); return 0; } static int rapl_check_unit_atom(struct rapl_package *rp, int cpu) { - u64 msr_val; + struct reg_action ra; u32 value; - if (rdmsrl_safe_on_cpu(cpu, MSR_RAPL_POWER_UNIT, &msr_val)) { - pr_err("Failed to read power unit MSR 0x%x on CPU %d, exit.\n", - MSR_RAPL_POWER_UNIT, cpu); + ra.reg = rp->priv->reg_unit; + ra.mask = ~0; + if (rp->priv->read_raw(cpu, &ra)) { + pr_err("Failed to read power unit REG 0x%llx on CPU %d, exit.\n", + rp->priv->reg_unit, cpu); return -ENODEV; } - value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; + + value = (ra.value & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; rp->energy_unit = ENERGY_UNIT_SCALE * 1 << value; - value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; + value = (ra.value & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; rp->power_unit = (1 << value) * 1000; - value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET; + value = (ra.value & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET; rp->time_unit = 1000000 / (1 << value); - pr_debug("Atom package %d energy=%dpJ, time=%dus, power=%duW\n", - rp->id, rp->energy_unit, rp->time_unit, rp->power_unit); + pr_debug("Atom %s energy=%dpJ, time=%dus, power=%duW\n", + rp->name, rp->energy_unit, rp->time_unit, rp->power_unit); return 0; } @@ -971,7 +786,6 @@ static void power_limit_irq_save_cpu(void *info) wrmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); } - /* REVISIT: * When package power limit is set artificially low by RAPL, LVT * thermal interrupt for package power limit should be ignored @@ -1055,9 +869,9 @@ static void set_floor_freq_atom(struct rapl_domain *rd, bool enable) } static u64 rapl_compute_time_window_core(struct rapl_package *rp, u64 value, - bool to_raw) + bool to_raw) { - u64 f, y; /* fraction and exp. used for time unit */ + u64 f, y; /* fraction and exp. used for time unit */ /* * Special processing based on 2^Y*(1+F/4), refer @@ -1077,7 +891,7 @@ static u64 rapl_compute_time_window_core(struct rapl_package *rp, u64 value, } static u64 rapl_compute_time_window_atom(struct rapl_package *rp, u64 value, - bool to_raw) + bool to_raw) { /* * Atom time unit encoding is straight forward val * time_unit, @@ -1085,8 +899,8 @@ static u64 rapl_compute_time_window_atom(struct rapl_package *rp, u64 value, */ if (!to_raw) return (value) ? value *= rp->time_unit : rp->time_unit; - else - value = div64_u64(value, rp->time_unit); + + value = div64_u64(value, rp->time_unit); return value; } @@ -1133,49 +947,48 @@ static const struct rapl_defaults rapl_defaults_cht = { .compute_time_window = rapl_compute_time_window_atom, }; -#define RAPL_CPU(_model, _ops) { \ - .vendor = X86_VENDOR_INTEL, \ - .family = 6, \ - .model = _model, \ - .driver_data = (kernel_ulong_t)&_ops, \ - } - static const struct x86_cpu_id rapl_ids[] __initconst = { - RAPL_CPU(INTEL_FAM6_SANDYBRIDGE, rapl_defaults_core), - RAPL_CPU(INTEL_FAM6_SANDYBRIDGE_X, rapl_defaults_core), - - RAPL_CPU(INTEL_FAM6_IVYBRIDGE, rapl_defaults_core), - RAPL_CPU(INTEL_FAM6_IVYBRIDGE_X, rapl_defaults_core), - - RAPL_CPU(INTEL_FAM6_HASWELL_CORE, rapl_defaults_core), - RAPL_CPU(INTEL_FAM6_HASWELL_ULT, rapl_defaults_core), - RAPL_CPU(INTEL_FAM6_HASWELL_GT3E, rapl_defaults_core), - RAPL_CPU(INTEL_FAM6_HASWELL_X, rapl_defaults_hsw_server), - - RAPL_CPU(INTEL_FAM6_BROADWELL_CORE, rapl_defaults_core), - RAPL_CPU(INTEL_FAM6_BROADWELL_GT3E, rapl_defaults_core), - RAPL_CPU(INTEL_FAM6_BROADWELL_XEON_D, rapl_defaults_core), - RAPL_CPU(INTEL_FAM6_BROADWELL_X, rapl_defaults_hsw_server), - - RAPL_CPU(INTEL_FAM6_SKYLAKE_DESKTOP, rapl_defaults_core), - RAPL_CPU(INTEL_FAM6_SKYLAKE_MOBILE, rapl_defaults_core), - RAPL_CPU(INTEL_FAM6_SKYLAKE_X, rapl_defaults_hsw_server), - RAPL_CPU(INTEL_FAM6_KABYLAKE_MOBILE, rapl_defaults_core), - RAPL_CPU(INTEL_FAM6_KABYLAKE_DESKTOP, rapl_defaults_core), - RAPL_CPU(INTEL_FAM6_CANNONLAKE_MOBILE, rapl_defaults_core), - - RAPL_CPU(INTEL_FAM6_ATOM_SILVERMONT1, rapl_defaults_byt), - RAPL_CPU(INTEL_FAM6_ATOM_AIRMONT, rapl_defaults_cht), - RAPL_CPU(INTEL_FAM6_ATOM_MERRIFIELD, rapl_defaults_tng), - RAPL_CPU(INTEL_FAM6_ATOM_MOOREFIELD, rapl_defaults_ann), - RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT, rapl_defaults_core), - RAPL_CPU(INTEL_FAM6_ATOM_GEMINI_LAKE, rapl_defaults_core), - RAPL_CPU(INTEL_FAM6_ATOM_DENVERTON, rapl_defaults_core), - - RAPL_CPU(INTEL_FAM6_XEON_PHI_KNL, rapl_defaults_hsw_server), - RAPL_CPU(INTEL_FAM6_XEON_PHI_KNM, rapl_defaults_hsw_server), + INTEL_CPU_FAM6(SANDYBRIDGE, rapl_defaults_core), + INTEL_CPU_FAM6(SANDYBRIDGE_X, rapl_defaults_core), + + INTEL_CPU_FAM6(IVYBRIDGE, rapl_defaults_core), + INTEL_CPU_FAM6(IVYBRIDGE_X, rapl_defaults_core), + + INTEL_CPU_FAM6(HASWELL_CORE, rapl_defaults_core), + INTEL_CPU_FAM6(HASWELL_ULT, rapl_defaults_core), + INTEL_CPU_FAM6(HASWELL_GT3E, rapl_defaults_core), + INTEL_CPU_FAM6(HASWELL_X, rapl_defaults_hsw_server), + + INTEL_CPU_FAM6(BROADWELL_CORE, rapl_defaults_core), + INTEL_CPU_FAM6(BROADWELL_GT3E, rapl_defaults_core), + INTEL_CPU_FAM6(BROADWELL_XEON_D, rapl_defaults_core), + INTEL_CPU_FAM6(BROADWELL_X, rapl_defaults_hsw_server), + + INTEL_CPU_FAM6(SKYLAKE_DESKTOP, rapl_defaults_core), + INTEL_CPU_FAM6(SKYLAKE_MOBILE, rapl_defaults_core), + INTEL_CPU_FAM6(SKYLAKE_X, rapl_defaults_hsw_server), + INTEL_CPU_FAM6(KABYLAKE_MOBILE, rapl_defaults_core), + INTEL_CPU_FAM6(KABYLAKE_DESKTOP, rapl_defaults_core), + INTEL_CPU_FAM6(CANNONLAKE_MOBILE, rapl_defaults_core), + INTEL_CPU_FAM6(ICELAKE_MOBILE, rapl_defaults_core), + INTEL_CPU_FAM6(ICELAKE_DESKTOP, rapl_defaults_core), + INTEL_CPU_FAM6(ICELAKE_X, rapl_defaults_hsw_server), + INTEL_CPU_FAM6(ICELAKE_XEON_D, rapl_defaults_hsw_server), + + INTEL_CPU_FAM6(ATOM_SILVERMONT, rapl_defaults_byt), + INTEL_CPU_FAM6(ATOM_AIRMONT, rapl_defaults_cht), + INTEL_CPU_FAM6(ATOM_SILVERMONT_MID, rapl_defaults_tng), + INTEL_CPU_FAM6(ATOM_AIRMONT_MID, rapl_defaults_ann), + INTEL_CPU_FAM6(ATOM_GOLDMONT, rapl_defaults_core), + INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, rapl_defaults_core), + INTEL_CPU_FAM6(ATOM_GOLDMONT_X, rapl_defaults_core), + INTEL_CPU_FAM6(ATOM_TREMONT_X, rapl_defaults_core), + + INTEL_CPU_FAM6(XEON_PHI_KNL, rapl_defaults_hsw_server), + INTEL_CPU_FAM6(XEON_PHI_KNM, rapl_defaults_hsw_server), {} }; + MODULE_DEVICE_TABLE(x86cpu, rapl_ids); /* Read once for all raw primitive data for domains */ @@ -1185,56 +998,39 @@ static void rapl_update_domain_data(struct rapl_package *rp) u64 val; for (dmn = 0; dmn < rp->nr_domains; dmn++) { - pr_debug("update package %d domain %s data\n", rp->id, + pr_debug("update %s domain %s data\n", rp->name, rp->domains[dmn].name); /* exclude non-raw primitives */ for (prim = 0; prim < NR_RAW_PRIMITIVES; prim++) { if (!rapl_read_data_raw(&rp->domains[dmn], prim, rpi[prim].unit, &val)) - rp->domains[dmn].rdd.primitives[prim] = val; + rp->domains[dmn].rdd.primitives[prim] = val; } } } -static void rapl_unregister_powercap(void) -{ - if (platform_rapl_domain) { - powercap_unregister_zone(control_type, - &platform_rapl_domain->power_zone); - kfree(platform_rapl_domain); - } - powercap_unregister_control_type(control_type); -} - static int rapl_package_register_powercap(struct rapl_package *rp) { struct rapl_domain *rd; - char dev_name[17]; /* max domain name = 7 + 1 + 8 for int + 1 for null*/ struct powercap_zone *power_zone = NULL; int nr_pl, ret; /* Update the domain data of the new package */ rapl_update_domain_data(rp); - /* first we register package domain as the parent zone*/ + /* first we register package domain as the parent zone */ for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) { if (rd->id == RAPL_DOMAIN_PACKAGE) { nr_pl = find_nr_power_limit(rd); - pr_debug("register socket %d package domain %s\n", - rp->id, rd->name); - memset(dev_name, 0, sizeof(dev_name)); - snprintf(dev_name, sizeof(dev_name), "%s-%d", - rd->name, rp->id); + pr_debug("register package domain %s\n", rp->name); power_zone = powercap_register_zone(&rd->power_zone, - control_type, - dev_name, NULL, - &zone_ops[rd->id], - nr_pl, - &constraint_ops); + rp->priv->control_type, rp->name, + NULL, &zone_ops[rd->id], nr_pl, + &constraint_ops); if (IS_ERR(power_zone)) { - pr_debug("failed to register package, %d\n", - rp->id); + pr_debug("failed to register power zone %s\n", + rp->name); return PTR_ERR(power_zone); } /* track parent zone in per package/socket data */ @@ -1247,21 +1043,21 @@ static int rapl_package_register_powercap(struct rapl_package *rp) pr_err("no package domain found, unknown topology!\n"); return -ENODEV; } - /* now register domains as children of the socket/package*/ + /* now register domains as children of the socket/package */ for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) { if (rd->id == RAPL_DOMAIN_PACKAGE) continue; /* number of power limits per domain varies */ nr_pl = find_nr_power_limit(rd); power_zone = powercap_register_zone(&rd->power_zone, - control_type, rd->name, - rp->power_zone, - &zone_ops[rd->id], nr_pl, - &constraint_ops); + rp->priv->control_type, + rd->name, rp->power_zone, + &zone_ops[rd->id], nr_pl, + &constraint_ops); if (IS_ERR(power_zone)) { - pr_debug("failed to register power_zone, %d:%s:%s\n", - rp->id, rd->name, dev_name); + pr_debug("failed to register power_zone, %s:%s\n", + rp->name, rd->name); ret = PTR_ERR(power_zone); goto err_cleanup; } @@ -1274,23 +1070,31 @@ static int rapl_package_register_powercap(struct rapl_package *rp) * failed after the first domain setup. */ while (--rd >= rp->domains) { - pr_debug("unregister package %d domain %s\n", rp->id, rd->name); - powercap_unregister_zone(control_type, &rd->power_zone); + pr_debug("unregister %s domain %s\n", rp->name, rd->name); + powercap_unregister_zone(rp->priv->control_type, + &rd->power_zone); } return ret; } -static int __init rapl_register_psys(void) +int rapl_add_platform_domain(struct rapl_if_priv *priv) { struct rapl_domain *rd; struct powercap_zone *power_zone; - u64 val; + struct reg_action ra; + int ret; - if (rdmsrl_safe_on_cpu(0, MSR_PLATFORM_ENERGY_STATUS, &val) || !val) + ra.reg = priv->regs[RAPL_DOMAIN_PLATFORM][RAPL_DOMAIN_REG_STATUS]; + ra.mask = ~0; + ret = priv->read_raw(0, &ra); + if (ret || !ra.value) return -ENODEV; - if (rdmsrl_safe_on_cpu(0, MSR_PLATFORM_POWER_LIMIT, &val) || !val) + ra.reg = priv->regs[RAPL_DOMAIN_PLATFORM][RAPL_DOMAIN_REG_LIMIT]; + ra.mask = ~0; + ret = priv->read_raw(0, &ra); + if (ret || !ra.value) return -ENODEV; rd = kzalloc(sizeof(*rd), GFP_KERNEL); @@ -1299,15 +1103,17 @@ static int __init rapl_register_psys(void) rd->name = rapl_domain_names[RAPL_DOMAIN_PLATFORM]; rd->id = RAPL_DOMAIN_PLATFORM; - rd->msrs[0] = MSR_PLATFORM_POWER_LIMIT; - rd->msrs[1] = MSR_PLATFORM_ENERGY_STATUS; + rd->regs[RAPL_DOMAIN_REG_LIMIT] = + priv->regs[RAPL_DOMAIN_PLATFORM][RAPL_DOMAIN_REG_LIMIT]; + rd->regs[RAPL_DOMAIN_REG_STATUS] = + priv->regs[RAPL_DOMAIN_PLATFORM][RAPL_DOMAIN_REG_STATUS]; rd->rpl[0].prim_id = PL1_ENABLE; rd->rpl[0].name = pl1_name; rd->rpl[1].prim_id = PL2_ENABLE; rd->rpl[1].name = pl2_name; - rd->rp = find_package_by_id(0); + rd->rp = rapl_find_package_domain(0, priv); - power_zone = powercap_register_zone(&rd->power_zone, control_type, + power_zone = powercap_register_zone(&rd->power_zone, priv->control_type, "psys", NULL, &zone_ops[RAPL_DOMAIN_PLATFORM], 2, &constraint_ops); @@ -1317,38 +1123,32 @@ static int __init rapl_register_psys(void) return PTR_ERR(power_zone); } - platform_rapl_domain = rd; + priv->platform_rapl_domain = rd; return 0; } +EXPORT_SYMBOL_GPL(rapl_add_platform_domain); -static int __init rapl_register_powercap(void) +void rapl_remove_platform_domain(struct rapl_if_priv *priv) { - control_type = powercap_register_control_type(NULL, "intel-rapl", NULL); - if (IS_ERR(control_type)) { - pr_debug("failed to register powercap control_type.\n"); - return PTR_ERR(control_type); + if (priv->platform_rapl_domain) { + powercap_unregister_zone(priv->control_type, + &priv->platform_rapl_domain->power_zone); + kfree(priv->platform_rapl_domain); } - return 0; } +EXPORT_SYMBOL_GPL(rapl_remove_platform_domain); -static int rapl_check_domain(int cpu, int domain) +static int rapl_check_domain(int cpu, int domain, struct rapl_package *rp) { - unsigned msr; - u64 val = 0; + struct reg_action ra; switch (domain) { case RAPL_DOMAIN_PACKAGE: - msr = MSR_PKG_ENERGY_STATUS; - break; case RAPL_DOMAIN_PP0: - msr = MSR_PP0_ENERGY_STATUS; - break; case RAPL_DOMAIN_PP1: - msr = MSR_PP1_ENERGY_STATUS; - break; case RAPL_DOMAIN_DRAM: - msr = MSR_DRAM_ENERGY_STATUS; + ra.reg = rp->priv->regs[domain][RAPL_DOMAIN_REG_STATUS]; break; case RAPL_DOMAIN_PLATFORM: /* PSYS(PLATFORM) is not a CPU domain, so avoid printng error */ @@ -1360,19 +1160,20 @@ static int rapl_check_domain(int cpu, int domain) /* make sure domain counters are available and contains non-zero * values, otherwise skip it. */ - if (rdmsrl_safe_on_cpu(cpu, msr, &val) || !val) + + ra.mask = ~0; + if (rp->priv->read_raw(cpu, &ra) || !ra.value) return -ENODEV; return 0; } - /* * Check if power limits are available. Two cases when they are not available: * 1. Locked by BIOS, in this case we still provide read-only access so that * users can see what limit is set by the BIOS. * 2. Some CPUs make some domains monitoring only which means PLx MSRs may not - * exist at all. In this case, we do not show the contraints in powercap. + * exist at all. In this case, we do not show the constraints in powercap. * * Called after domains are detected and initialized. */ @@ -1384,14 +1185,15 @@ static void rapl_detect_powerlimit(struct rapl_domain *rd) /* check if the domain is locked by BIOS, ignore if MSR doesn't exist */ if (!rapl_read_data_raw(rd, FW_LOCK, false, &val64)) { if (val64) { - pr_info("RAPL package %d domain %s locked by BIOS\n", - rd->rp->id, rd->name); + pr_info("RAPL %s domain %s locked by BIOS\n", + rd->rp->name, rd->name); rd->state |= DOMAIN_STATE_BIOS_LOCKED; } } - /* check if power limit MSRs exists, otherwise domain is monitoring only */ + /* check if power limit MSR exists, otherwise domain is monitoring only */ for (i = 0; i < NR_POWER_LIMITS; i++) { int prim = rd->rpl[i].prim_id; + if (rapl_read_data_raw(rd, prim, false, &val64)) rd->rpl[i].name = NULL; } @@ -1407,20 +1209,20 @@ static int rapl_detect_domains(struct rapl_package *rp, int cpu) for (i = 0; i < RAPL_DOMAIN_MAX; i++) { /* use physical package id to read counters */ - if (!rapl_check_domain(cpu, i)) { + if (!rapl_check_domain(cpu, i, rp)) { rp->domain_map |= 1 << i; pr_info("Found RAPL domain %s\n", rapl_domain_names[i]); } } - rp->nr_domains = bitmap_weight(&rp->domain_map, RAPL_DOMAIN_MAX); + rp->nr_domains = bitmap_weight(&rp->domain_map, RAPL_DOMAIN_MAX); if (!rp->nr_domains) { - pr_debug("no valid rapl domains found in package %d\n", rp->id); + pr_debug("no valid rapl domains found in %s\n", rp->name); return -ENODEV; } - pr_debug("found %d domains on package %d\n", rp->nr_domains, rp->id); + pr_debug("found %d domains on %s\n", rp->nr_domains, rp->name); rp->domains = kcalloc(rp->nr_domains + 1, sizeof(struct rapl_domain), - GFP_KERNEL); + GFP_KERNEL); if (!rp->domains) return -ENOMEM; @@ -1433,7 +1235,7 @@ static int rapl_detect_domains(struct rapl_package *rp, int cpu) } /* called from CPU hotplug notifier, hotplug lock held */ -static void rapl_remove_package(struct rapl_package *rp) +void rapl_remove_package(struct rapl_package *rp) { struct rapl_domain *rd, *rd_package = NULL; @@ -1450,20 +1252,41 @@ static void rapl_remove_package(struct rapl_package *rp) rd_package = rd; continue; } - pr_debug("remove package, undo power limit on %d: %s\n", - rp->id, rd->name); - powercap_unregister_zone(control_type, &rd->power_zone); + pr_debug("remove package, undo power limit on %s: %s\n", + rp->name, rd->name); + powercap_unregister_zone(rp->priv->control_type, + &rd->power_zone); } /* do parent zone last */ - powercap_unregister_zone(control_type, &rd_package->power_zone); + powercap_unregister_zone(rp->priv->control_type, + &rd_package->power_zone); list_del(&rp->plist); kfree(rp); } +EXPORT_SYMBOL_GPL(rapl_remove_package); + +/* caller to ensure CPU hotplug lock is held */ +struct rapl_package *rapl_find_package_domain(int cpu, struct rapl_if_priv *priv) +{ + int id = topology_logical_die_id(cpu); + struct rapl_package *rp; + + list_for_each_entry(rp, &rapl_packages, plist) { + if (rp->id == id + && rp->priv->control_type == priv->control_type) + return rp; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(rapl_find_package_domain); /* called from CPU hotplug notifier, hotplug lock held */ -static struct rapl_package *rapl_add_package(int cpu, int pkgid) +struct rapl_package *rapl_add_package(int cpu, struct rapl_if_priv *priv) { + int id = topology_logical_die_id(cpu); struct rapl_package *rp; + struct cpuinfo_x86 *c = &cpu_data(cpu); int ret; rp = kzalloc(sizeof(struct rapl_package), GFP_KERNEL); @@ -1471,12 +1294,19 @@ static struct rapl_package *rapl_add_package(int cpu, int pkgid) return ERR_PTR(-ENOMEM); /* add the new package to the list */ - rp->id = pkgid; + rp->id = id; rp->lead_cpu = cpu; + rp->priv = priv; + + if (topology_max_die_per_package() > 1) + snprintf(rp->name, PACKAGE_DOMAIN_NAME_LENGTH, + "package-%d-die-%d", c->phys_proc_id, c->cpu_die_id); + else + snprintf(rp->name, PACKAGE_DOMAIN_NAME_LENGTH, "package-%d", + c->phys_proc_id); /* check if the package contains valid domains */ - if (rapl_detect_domains(rp, cpu) || - rapl_defaults->check_unit(rp, cpu)) { + if (rapl_detect_domains(rp, cpu) || rapl_defaults->check_unit(rp, cpu)) { ret = -ENODEV; goto err_free_package; } @@ -1492,49 +1322,7 @@ static struct rapl_package *rapl_add_package(int cpu, int pkgid) kfree(rp); return ERR_PTR(ret); } - -/* Handles CPU hotplug on multi-socket systems. - * If a CPU goes online as the first CPU of the physical package - * we add the RAPL package to the system. Similarly, when the last - * CPU of the package is removed, we remove the RAPL package and its - * associated domains. Cooling devices are handled accordingly at - * per-domain level. - */ -static int rapl_cpu_online(unsigned int cpu) -{ - int pkgid = topology_physical_package_id(cpu); - struct rapl_package *rp; - - rp = find_package_by_id(pkgid); - if (!rp) { - rp = rapl_add_package(cpu, pkgid); - if (IS_ERR(rp)) - return PTR_ERR(rp); - } - cpumask_set_cpu(cpu, &rp->cpumask); - return 0; -} - -static int rapl_cpu_down_prep(unsigned int cpu) -{ - int pkgid = topology_physical_package_id(cpu); - struct rapl_package *rp; - int lead_cpu; - - rp = find_package_by_id(pkgid); - if (!rp) - return 0; - - cpumask_clear_cpu(cpu, &rp->cpumask); - lead_cpu = cpumask_first(&rp->cpumask); - if (lead_cpu >= nr_cpu_ids) - rapl_remove_package(rp); - else if (rp->lead_cpu == cpu) - rp->lead_cpu = lead_cpu; - return 0; -} - -static enum cpuhp_state pcap_rapl_online; +EXPORT_SYMBOL_GPL(rapl_add_package); static void power_limit_state_save(void) { @@ -1552,17 +1340,15 @@ static void power_limit_state_save(void) switch (rd->rpl[i].prim_id) { case PL1_ENABLE: ret = rapl_read_data_raw(rd, - POWER_LIMIT1, - true, - &rd->rpl[i].last_power_limit); + POWER_LIMIT1, true, + &rd->rpl[i].last_power_limit); if (ret) rd->rpl[i].last_power_limit = 0; break; case PL2_ENABLE: ret = rapl_read_data_raw(rd, - POWER_LIMIT2, - true, - &rd->rpl[i].last_power_limit); + POWER_LIMIT2, true, + &rd->rpl[i].last_power_limit); if (ret) rd->rpl[i].last_power_limit = 0; break; @@ -1588,15 +1374,13 @@ static void power_limit_state_restore(void) switch (rd->rpl[i].prim_id) { case PL1_ENABLE: if (rd->rpl[i].last_power_limit) - rapl_write_data_raw(rd, - POWER_LIMIT1, - rd->rpl[i].last_power_limit); + rapl_write_data_raw(rd, POWER_LIMIT1, + rd->rpl[i].last_power_limit); break; case PL2_ENABLE: if (rd->rpl[i].last_power_limit) - rapl_write_data_raw(rd, - POWER_LIMIT2, - rd->rpl[i].last_power_limit); + rapl_write_data_raw(rd, POWER_LIMIT2, + rd->rpl[i].last_power_limit); break; } } @@ -1605,7 +1389,7 @@ static void power_limit_state_restore(void) } static int rapl_pm_callback(struct notifier_block *nb, - unsigned long mode, void *_unused) + unsigned long mode, void *_unused) { switch (mode) { case PM_SUSPEND_PREPARE: @@ -1622,6 +1406,8 @@ static struct notifier_block rapl_pm_notifier = { .notifier_call = rapl_pm_callback, }; +static struct platform_device *rapl_msr_platdev; + static int __init rapl_init(void) { const struct x86_cpu_id *id; @@ -1630,50 +1416,43 @@ static int __init rapl_init(void) id = x86_match_cpu(rapl_ids); if (!id) { pr_err("driver does not support CPU family %d model %d\n", - boot_cpu_data.x86, boot_cpu_data.x86_model); + boot_cpu_data.x86, boot_cpu_data.x86_model); return -ENODEV; } rapl_defaults = (struct rapl_defaults *)id->driver_data; - ret = rapl_register_powercap(); + ret = register_pm_notifier(&rapl_pm_notifier); if (ret) return ret; - ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powercap/rapl:online", - rapl_cpu_online, rapl_cpu_down_prep); - if (ret < 0) - goto err_unreg; - pcap_rapl_online = ret; - - /* Don't bail out if PSys is not supported */ - rapl_register_psys(); + rapl_msr_platdev = platform_device_alloc("intel_rapl_msr", 0); + if (!rapl_msr_platdev) { + ret = -ENOMEM; + goto end; + } - ret = register_pm_notifier(&rapl_pm_notifier); + ret = platform_device_add(rapl_msr_platdev); if (ret) - goto err_unreg_all; - - return 0; + platform_device_put(rapl_msr_platdev); -err_unreg_all: - cpuhp_remove_state(pcap_rapl_online); +end: + if (ret) + unregister_pm_notifier(&rapl_pm_notifier); -err_unreg: - rapl_unregister_powercap(); return ret; } static void __exit rapl_exit(void) { + platform_device_unregister(rapl_msr_platdev); unregister_pm_notifier(&rapl_pm_notifier); - cpuhp_remove_state(pcap_rapl_online); - rapl_unregister_powercap(); } module_init(rapl_init); module_exit(rapl_exit); -MODULE_DESCRIPTION("Driver for Intel RAPL (Running Average Power Limit)"); +MODULE_DESCRIPTION("Intel Runtime Average Power Limit (RAPL) common code"); MODULE_AUTHOR("Jacob Pan "); MODULE_LICENSE("GPL v2"); diff --git a/drivers/powercap/intel_rapl_msr.c b/drivers/powercap/intel_rapl_msr.c new file mode 100644 index 0000000000000000000000000000000000000000..d5487965bdfe9d743ab0ab8c26d1e5f29ff0997f --- /dev/null +++ b/drivers/powercap/intel_rapl_msr.c @@ -0,0 +1,183 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Intel Running Average Power Limit (RAPL) Driver via MSR interface + * Copyright (c) 2019, Intel Corporation. + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +/* Local defines */ +#define MSR_PLATFORM_POWER_LIMIT 0x0000065C + +/* private data for RAPL MSR Interface */ +static struct rapl_if_priv rapl_msr_priv = { + .reg_unit = MSR_RAPL_POWER_UNIT, + .regs[RAPL_DOMAIN_PACKAGE] = { + MSR_PKG_POWER_LIMIT, MSR_PKG_ENERGY_STATUS, MSR_PKG_PERF_STATUS, 0, MSR_PKG_POWER_INFO }, + .regs[RAPL_DOMAIN_PP0] = { + MSR_PP0_POWER_LIMIT, MSR_PP0_ENERGY_STATUS, 0, MSR_PP0_POLICY, 0 }, + .regs[RAPL_DOMAIN_PP1] = { + MSR_PP1_POWER_LIMIT, MSR_PP1_ENERGY_STATUS, 0, MSR_PP1_POLICY, 0 }, + .regs[RAPL_DOMAIN_DRAM] = { + MSR_DRAM_POWER_LIMIT, MSR_DRAM_ENERGY_STATUS, MSR_DRAM_PERF_STATUS, 0, MSR_DRAM_POWER_INFO }, + .regs[RAPL_DOMAIN_PLATFORM] = { + MSR_PLATFORM_POWER_LIMIT, MSR_PLATFORM_ENERGY_STATUS, 0, 0, 0}, + .limits[RAPL_DOMAIN_PACKAGE] = 2, +}; + +/* Handles CPU hotplug on multi-socket systems. + * If a CPU goes online as the first CPU of the physical package + * we add the RAPL package to the system. Similarly, when the last + * CPU of the package is removed, we remove the RAPL package and its + * associated domains. Cooling devices are handled accordingly at + * per-domain level. + */ +static int rapl_cpu_online(unsigned int cpu) +{ + struct rapl_package *rp; + + rp = rapl_find_package_domain(cpu, &rapl_msr_priv); + if (!rp) { + rp = rapl_add_package(cpu, &rapl_msr_priv); + if (IS_ERR(rp)) + return PTR_ERR(rp); + } + cpumask_set_cpu(cpu, &rp->cpumask); + return 0; +} + +static int rapl_cpu_down_prep(unsigned int cpu) +{ + struct rapl_package *rp; + int lead_cpu; + + rp = rapl_find_package_domain(cpu, &rapl_msr_priv); + if (!rp) + return 0; + + cpumask_clear_cpu(cpu, &rp->cpumask); + lead_cpu = cpumask_first(&rp->cpumask); + if (lead_cpu >= nr_cpu_ids) + rapl_remove_package(rp); + else if (rp->lead_cpu == cpu) + rp->lead_cpu = lead_cpu; + return 0; +} + +static int rapl_msr_read_raw(int cpu, struct reg_action *ra) +{ + u32 msr = (u32)ra->reg; + + if (rdmsrl_safe_on_cpu(cpu, msr, &ra->value)) { + pr_debug("failed to read msr 0x%x on cpu %d\n", msr, cpu); + return -EIO; + } + ra->value &= ra->mask; + return 0; +} + +static void rapl_msr_update_func(void *info) +{ + struct reg_action *ra = info; + u32 msr = (u32)ra->reg; + u64 val; + + ra->err = rdmsrl_safe(msr, &val); + if (ra->err) + return; + + val &= ~ra->mask; + val |= ra->value; + + ra->err = wrmsrl_safe(msr, val); +} + +static int rapl_msr_write_raw(int cpu, struct reg_action *ra) +{ + int ret; + + ret = smp_call_function_single(cpu, rapl_msr_update_func, ra, 1); + if (WARN_ON_ONCE(ret)) + return ret; + + return ra->err; +} + +static int rapl_msr_probe(struct platform_device *pdev) +{ + int ret; + + rapl_msr_priv.read_raw = rapl_msr_read_raw; + rapl_msr_priv.write_raw = rapl_msr_write_raw; + + rapl_msr_priv.control_type = powercap_register_control_type(NULL, "intel-rapl", NULL); + if (IS_ERR(rapl_msr_priv.control_type)) { + pr_debug("failed to register powercap control_type.\n"); + return PTR_ERR(rapl_msr_priv.control_type); + } + + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powercap/rapl:online", + rapl_cpu_online, rapl_cpu_down_prep); + if (ret < 0) + goto out; + rapl_msr_priv.pcap_rapl_online = ret; + + /* Don't bail out if PSys is not supported */ + rapl_add_platform_domain(&rapl_msr_priv); + + return 0; + +out: + if (ret) + powercap_unregister_control_type(rapl_msr_priv.control_type); + return ret; +} + +static int rapl_msr_remove(struct platform_device *pdev) +{ + cpuhp_remove_state(rapl_msr_priv.pcap_rapl_online); + rapl_remove_platform_domain(&rapl_msr_priv); + powercap_unregister_control_type(rapl_msr_priv.control_type); + return 0; +} + +static const struct platform_device_id rapl_msr_ids[] = { + { .name = "intel_rapl_msr", }, + {} +}; +MODULE_DEVICE_TABLE(platform, rapl_msr_ids); + +static struct platform_driver intel_rapl_msr_driver = { + .probe = rapl_msr_probe, + .remove = rapl_msr_remove, + .id_table = rapl_msr_ids, + .driver = { + .name = "intel_rapl_msr", + }, +}; + +module_platform_driver(intel_rapl_msr_driver); + +MODULE_DESCRIPTION("Driver for Intel RAPL (Running Average Power Limit) control via MSR interface"); +MODULE_AUTHOR("Zhang Rui "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c index 9e2f274bd44f2e3446608cc97951a42329f73956..60c8375c3c816cad88f34c1b46aa0674a9a906d8 100644 --- a/drivers/powercap/powercap_sys.c +++ b/drivers/powercap/powercap_sys.c @@ -379,9 +379,9 @@ static void create_power_zone_common_attributes( &dev_attr_max_energy_range_uj.attr; if (power_zone->ops->get_energy_uj) { if (power_zone->ops->reset_energy_uj) - dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO; + dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUSR; else - dev_attr_energy_uj.attr.mode = S_IRUGO; + dev_attr_energy_uj.attr.mode = S_IRUSR; power_zone->zone_dev_attrs[count++] = &dev_attr_energy_uj.attr; } diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c index 8febacb8fc54df3965cf53b64d040ea705bb68cf..0951564b6830a7bed1f4ebe1b64617f180428c49 100644 --- a/drivers/pps/pps.c +++ b/drivers/pps/pps.c @@ -166,6 +166,14 @@ static long pps_cdev_ioctl(struct file *file, pps->params.mode |= PPS_CANWAIT; pps->params.api_version = PPS_API_VERS; + /* + * Clear unused fields of pps_kparams to avoid leaking + * uninitialized data of the PPS_SETPARAMS caller via + * PPS_GETPARAMS + */ + pps->params.assert_off_tu.flags = 0; + pps->params.clear_off_tu.flags = 0; + spin_unlock_irq(&pps->lock); break; diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c index 2012551d93e02381cb1136ada745e22da77f188d..796eeffdf93b1240af7be112ed4356881773b525 100644 --- a/drivers/ptp/ptp_chardev.c +++ b/drivers/ptp/ptp_chardev.c @@ -228,7 +228,9 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) pct->sec = ts.tv_sec; pct->nsec = ts.tv_nsec; pct++; - ptp->info->gettime64(ptp->info, &ts); + err = ptp->info->gettime64(ptp->info, &ts); + if (err) + goto out; pct->sec = ts.tv_sec; pct->nsec = ts.tv_nsec; pct++; @@ -281,6 +283,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) break; } +out: kfree(sysoff); return err; } diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index 7eacc1c4b3b10e1103e6e9c895112eb176245faa..7ef7befc899c8b9638508adb72c981133aaea948 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c @@ -175,10 +175,11 @@ static struct posix_clock_operations ptp_clock_ops = { .read = ptp_read, }; -static void delete_ptp_clock(struct posix_clock *pc) +static void ptp_clock_release(struct device *dev) { - struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); + struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev); + ptp_cleanup_pin_groups(ptp); mutex_destroy(&ptp->tsevq_mux); mutex_destroy(&ptp->pincfg_mux); ida_simple_remove(&ptp_clocks_map, ptp->index); @@ -222,7 +223,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, } ptp->clock.ops = ptp_clock_ops; - ptp->clock.release = delete_ptp_clock; ptp->info = info; ptp->devid = MKDEV(major, index); ptp->index = index; @@ -249,13 +249,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, if (err) goto no_pin_groups; - /* Create a new device in our class. */ - ptp->dev = device_create_with_groups(ptp_class, parent, ptp->devid, - ptp, ptp->pin_attr_groups, - "ptp%d", ptp->index); - if (IS_ERR(ptp->dev)) - goto no_device; - /* Register a new PPS source. */ if (info->pps) { struct pps_source_info pps; @@ -265,26 +258,40 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, pps.owner = info->owner; ptp->pps_source = pps_register_source(&pps, PTP_PPS_DEFAULTS); if (!ptp->pps_source) { + err = -EINVAL; pr_err("failed to register pps source\n"); goto no_pps; } } - /* Create a posix clock. */ - err = posix_clock_register(&ptp->clock, ptp->devid); + /* Initialize a new device of our class in our clock structure. */ + device_initialize(&ptp->dev); + ptp->dev.devt = ptp->devid; + ptp->dev.class = ptp_class; + ptp->dev.parent = parent; + ptp->dev.groups = ptp->pin_attr_groups; + ptp->dev.release = ptp_clock_release; + dev_set_drvdata(&ptp->dev, ptp); + dev_set_name(&ptp->dev, "ptp%d", ptp->index); + + /* Create a posix clock and link it to the device. */ + err = posix_clock_register(&ptp->clock, &ptp->dev); if (err) { + if (ptp->pps_source) + pps_unregister_source(ptp->pps_source); + + if (ptp->kworker) + kthread_destroy_worker(ptp->kworker); + + put_device(&ptp->dev); + pr_err("failed to create posix clock\n"); - goto no_clock; + return ERR_PTR(err); } return ptp; -no_clock: - if (ptp->pps_source) - pps_unregister_source(ptp->pps_source); no_pps: - device_destroy(ptp_class, ptp->devid); -no_device: ptp_cleanup_pin_groups(ptp); no_pin_groups: if (ptp->kworker) @@ -314,10 +321,8 @@ int ptp_clock_unregister(struct ptp_clock *ptp) if (ptp->pps_source) pps_unregister_source(ptp->pps_source); - device_destroy(ptp_class, ptp->devid); - ptp_cleanup_pin_groups(ptp); - posix_clock_unregister(&ptp->clock); + return 0; } EXPORT_SYMBOL(ptp_clock_unregister); diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h index c7c62b782cb95e20cfb21c2d98e91f4ae040076b..05f6b6a9bbd580192679b184d5e2ccdc50ce3b12 100644 --- a/drivers/ptp/ptp_private.h +++ b/drivers/ptp/ptp_private.h @@ -41,7 +41,7 @@ struct timestamp_event_queue { struct ptp_clock { struct posix_clock clock; - struct device *dev; + struct device dev; struct ptp_clock_info *info; dev_t devid; int index; /* index into clocks.map */ diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index 1581f6ab1b1f425986cb8693a961cee7fa4f42f8..b1b74cfb157162357b0c59d4f9bc1dfd820cc91d 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -311,10 +311,12 @@ int pwmchip_add_with_polarity(struct pwm_chip *chip, if (IS_ENABLED(CONFIG_OF)) of_pwmchip_add(chip); - pwmchip_sysfs_export(chip); - out: mutex_unlock(&pwm_lock); + + if (!ret) + pwmchip_sysfs_export(chip); + return ret; } EXPORT_SYMBOL_GPL(pwmchip_add_with_polarity); @@ -348,7 +350,7 @@ int pwmchip_remove(struct pwm_chip *chip) unsigned int i; int ret = 0; - pwmchip_sysfs_unexport_children(chip); + pwmchip_sysfs_unexport(chip); mutex_lock(&pwm_lock); @@ -368,8 +370,6 @@ int pwmchip_remove(struct pwm_chip *chip) free_pwms(chip); - pwmchip_sysfs_unexport(chip); - out: mutex_unlock(&pwm_lock); return ret; @@ -874,6 +874,7 @@ void pwm_put(struct pwm_device *pwm) if (pwm->chip->ops->free) pwm->chip->ops->free(pwm->chip, pwm); + pwm_set_chip_data(pwm, NULL); pwm->label = NULL; module_put(pwm->chip->ops->owner); diff --git a/drivers/pwm/pwm-bcm-iproc.c b/drivers/pwm/pwm-bcm-iproc.c index d961a8207b1cbe660cfb6f38d21b7383f1834847..31b01035d0ab32e833226726369e2cd7d45aa2aa 100644 --- a/drivers/pwm/pwm-bcm-iproc.c +++ b/drivers/pwm/pwm-bcm-iproc.c @@ -187,6 +187,7 @@ static int iproc_pwmc_apply(struct pwm_chip *chip, struct pwm_device *pwm, static const struct pwm_ops iproc_pwm_ops = { .apply = iproc_pwmc_apply, .get_state = iproc_pwmc_get_state, + .owner = THIS_MODULE, }; static int iproc_pwmc_probe(struct platform_device *pdev) diff --git a/drivers/pwm/pwm-berlin.c b/drivers/pwm/pwm-berlin.c index 7c8d6a168ceb254c69420ab293f08e3b43fcdd18..b91c477cc84be90fc29d200a3528fdffd20ae1aa 100644 --- a/drivers/pwm/pwm-berlin.c +++ b/drivers/pwm/pwm-berlin.c @@ -84,7 +84,6 @@ static void berlin_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) { struct berlin_pwm_channel *channel = pwm_get_chip_data(pwm); - pwm_set_chip_data(pwm, NULL); kfree(channel); } diff --git a/drivers/pwm/pwm-clps711x.c b/drivers/pwm/pwm-clps711x.c index 26ec24e457b12414cb9ca3a7cbe049f8a49f70e9..7e16b7def0dcb6ee6f32b4ab9b2a9689081c6cd7 100644 --- a/drivers/pwm/pwm-clps711x.c +++ b/drivers/pwm/pwm-clps711x.c @@ -48,7 +48,7 @@ static void clps711x_pwm_update_val(struct clps711x_chip *priv, u32 n, u32 v) static unsigned int clps711x_get_duty(struct pwm_device *pwm, unsigned int v) { /* Duty cycle 0..15 max */ - return DIV_ROUND_CLOSEST(v * 0xf, pwm_get_period(pwm)); + return DIV_ROUND_CLOSEST(v * 0xf, pwm->args.period); } static int clps711x_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm) @@ -71,7 +71,7 @@ static int clps711x_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, struct clps711x_chip *priv = to_clps711x_chip(chip); unsigned int duty; - if (period_ns != pwm_get_period(pwm)) + if (period_ns != pwm->args.period) return -EINVAL; duty = clps711x_get_duty(pwm, duty_ns); diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c index 4721a264bac2580cf8d21ee54396e0b494f1c9dc..1e69c1c9ec09635c9dffbb6b4d9cfd157f0c9108 100644 --- a/drivers/pwm/pwm-lpss.c +++ b/drivers/pwm/pwm-lpss.c @@ -97,7 +97,7 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm, unsigned long long on_time_div; unsigned long c = lpwm->info->clk_rate, base_unit_range; unsigned long long base_unit, freq = NSEC_PER_SEC; - u32 ctrl; + u32 orig_ctrl, ctrl; do_div(freq, period_ns); @@ -114,13 +114,17 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm, do_div(on_time_div, period_ns); on_time_div = 255ULL - on_time_div; - ctrl = pwm_lpss_read(pwm); + orig_ctrl = ctrl = pwm_lpss_read(pwm); ctrl &= ~PWM_ON_TIME_DIV_MASK; ctrl &= ~(base_unit_range << PWM_BASE_UNIT_SHIFT); base_unit &= base_unit_range; ctrl |= (u32) base_unit << PWM_BASE_UNIT_SHIFT; ctrl |= on_time_div; - pwm_lpss_write(pwm, ctrl); + + if (orig_ctrl != ctrl) { + pwm_lpss_write(pwm, ctrl); + pwm_lpss_write(pwm, ctrl | PWM_SW_UPDATE); + } } static inline void pwm_lpss_cond_enable(struct pwm_device *pwm, bool cond) @@ -144,7 +148,6 @@ static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm, return ret; } pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period); - pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE); pwm_lpss_cond_enable(pwm, lpwm->info->bypass == false); ret = pwm_lpss_wait_for_update(pwm); if (ret) { @@ -157,7 +160,6 @@ static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm, if (ret) return ret; pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period); - pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE); return pwm_lpss_wait_for_update(pwm); } } else if (pwm_is_enabled(pwm)) { diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c index c1ed641b3e26622041c0fead2931a13bface7454..f6e738ad7bd92942ee9ea0ee028ae11ae7befb0b 100644 --- a/drivers/pwm/pwm-meson.c +++ b/drivers/pwm/pwm-meson.c @@ -111,6 +111,10 @@ struct meson_pwm { const struct meson_pwm_data *data; void __iomem *base; u8 inverter_mask; + /* + * Protects register (write) access to the REG_MISC_AB register + * that is shared between the two PWMs. + */ spinlock_t lock; }; @@ -235,6 +239,7 @@ static void meson_pwm_enable(struct meson_pwm *meson, { u32 value, clk_shift, clk_enable, enable; unsigned int offset; + unsigned long flags; switch (id) { case 0: @@ -255,6 +260,8 @@ static void meson_pwm_enable(struct meson_pwm *meson, return; } + spin_lock_irqsave(&meson->lock, flags); + value = readl(meson->base + REG_MISC_AB); value &= ~(MISC_CLK_DIV_MASK << clk_shift); value |= channel->pre_div << clk_shift; @@ -267,11 +274,14 @@ static void meson_pwm_enable(struct meson_pwm *meson, value = readl(meson->base + REG_MISC_AB); value |= enable; writel(value, meson->base + REG_MISC_AB); + + spin_unlock_irqrestore(&meson->lock, flags); } static void meson_pwm_disable(struct meson_pwm *meson, unsigned int id) { u32 value, enable; + unsigned long flags; switch (id) { case 0: @@ -286,9 +296,13 @@ static void meson_pwm_disable(struct meson_pwm *meson, unsigned int id) return; } + spin_lock_irqsave(&meson->lock, flags); + value = readl(meson->base + REG_MISC_AB); value &= ~enable; writel(value, meson->base + REG_MISC_AB); + + spin_unlock_irqrestore(&meson->lock, flags); } static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, @@ -296,19 +310,16 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, { struct meson_pwm_channel *channel = pwm_get_chip_data(pwm); struct meson_pwm *meson = to_meson_pwm(chip); - unsigned long flags; int err = 0; if (!state) return -EINVAL; - spin_lock_irqsave(&meson->lock, flags); - if (!state->enabled) { meson_pwm_disable(meson, pwm->hwpwm); channel->state.enabled = false; - goto unlock; + return 0; } if (state->period != channel->state.period || @@ -329,7 +340,7 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, err = meson_pwm_calc(meson, channel, pwm->hwpwm, state->duty_cycle, state->period); if (err < 0) - goto unlock; + return err; channel->state.polarity = state->polarity; channel->state.period = state->period; @@ -341,9 +352,7 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, channel->state.enabled = true; } -unlock: - spin_unlock_irqrestore(&meson->lock, flags); - return err; + return 0; } static void meson_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c index a7eaf962a95b15725e43426edca3bd0f4d28acf6..567f5e2771c47288488063f49363fd3e0b4eaab7 100644 --- a/drivers/pwm/pwm-pca9685.c +++ b/drivers/pwm/pwm-pca9685.c @@ -176,7 +176,6 @@ static void pca9685_pwm_gpio_free(struct gpio_chip *gpio, unsigned int offset) pm_runtime_put(pca->chip.dev); mutex_lock(&pca->lock); pwm = &pca->chip.pwms[offset]; - pwm_set_chip_data(pwm, NULL); mutex_unlock(&pca->lock); } diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c index 062f2cfc45ec69545d3150c212d346597d76335e..3762432dd6a7fe7c245d6d4b843a942e92a6af4b 100644 --- a/drivers/pwm/pwm-samsung.c +++ b/drivers/pwm/pwm-samsung.c @@ -238,7 +238,6 @@ static int pwm_samsung_request(struct pwm_chip *chip, struct pwm_device *pwm) static void pwm_samsung_free(struct pwm_chip *chip, struct pwm_device *pwm) { devm_kfree(chip->dev, pwm_get_chip_data(pwm)); - pwm_set_chip_data(pwm, NULL); } static int pwm_samsung_enable(struct pwm_chip *chip, struct pwm_device *pwm) diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c index 0059b24cfdc3c39579b9e70b11b9f0271399db1a..28e1f64134763486d698183d892a50d2121fc1dd 100644 --- a/drivers/pwm/pwm-stm32-lp.c +++ b/drivers/pwm/pwm-stm32-lp.c @@ -58,6 +58,12 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm, /* Calculate the period and prescaler value */ div = (unsigned long long)clk_get_rate(priv->clk) * state->period; do_div(div, NSEC_PER_SEC); + if (!div) { + /* Clock is too slow to achieve requested period. */ + dev_dbg(priv->chip.dev, "Can't reach %u ns\n", state->period); + return -EINVAL; + } + prd = div; while (div > STM32_LPTIM_MAX_ARR) { presc++; diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c index f7b8a86fa5c5e9570a616ccbcdb61e7d427b8ae3..ad4a40c0f27cf45271e57aee9e8ee4e168118695 100644 --- a/drivers/pwm/pwm-tiehrpwm.c +++ b/drivers/pwm/pwm-tiehrpwm.c @@ -382,6 +382,8 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) } /* Update shadow register first before modifying active register */ + ehrpwm_modify(pc->mmio_base, AQSFRC, AQSFRC_RLDCSF_MASK, + AQSFRC_RLDCSF_ZRO); ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val); /* * Changes to immediate action on Action Qualifier. This puts diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c index 7c71cdb8a9d8f92102b5875d120b83a3364fb6d0..72bdda4ccebfd27d27a07205c6d4aa03299a567b 100644 --- a/drivers/pwm/sysfs.c +++ b/drivers/pwm/sysfs.c @@ -263,7 +263,6 @@ static int pwm_export_child(struct device *parent, struct pwm_device *pwm) export->pwm = pwm; mutex_init(&export->lock); - export->child.class = parent->class; export->child.release = pwm_export_release; export->child.parent = parent; export->child.devt = MKDEV(0, 0); @@ -399,19 +398,6 @@ void pwmchip_sysfs_export(struct pwm_chip *chip) } void pwmchip_sysfs_unexport(struct pwm_chip *chip) -{ - struct device *parent; - - parent = class_find_device(&pwm_class, NULL, chip, - pwmchip_sysfs_match); - if (parent) { - /* for class_find_device() */ - put_device(parent); - device_unregister(parent); - } -} - -void pwmchip_sysfs_unexport_children(struct pwm_chip *chip) { struct device *parent; unsigned int i; @@ -429,6 +415,7 @@ void pwmchip_sysfs_unexport_children(struct pwm_chip *chip) } put_device(parent); + device_unregister(parent); } static int __init pwm_sysfs_init(void) diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index cbe467ff1aba9d8ca72588f51856f2239dd4d0d9..fa0bbda4b3f2e5fad5e95572fe174ffe631a46cb 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c @@ -1688,6 +1688,7 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv, if (copy_from_user(&dev_info, arg, sizeof(dev_info))) return -EFAULT; + dev_info.name[sizeof(dev_info.name) - 1] = '\0'; rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name, dev_info.comptag, dev_info.destid, dev_info.hopcount); @@ -1819,6 +1820,7 @@ static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg) if (copy_from_user(&dev_info, arg, sizeof(dev_info))) return -EFAULT; + dev_info.name[sizeof(dev_info.name) - 1] = '\0'; mport = priv->md->mport; diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c index 2d9ec378a8bc34c92019cadb515315717fd2a7f2..dbea02b9edf4b0fe74fb58ace49a83dc9896e55d 100644 --- a/drivers/ras/cec.c +++ b/drivers/ras/cec.c @@ -2,6 +2,7 @@ #include #include #include +#include #include @@ -123,16 +124,12 @@ static u64 dfs_pfn; /* Amount of errors after which we offline */ static unsigned int count_threshold = COUNT_MASK; -/* - * The timer "decays" element count each timer_interval which is 24hrs by - * default. - */ - -#define CEC_TIMER_DEFAULT_INTERVAL 24 * 60 * 60 /* 24 hrs */ -#define CEC_TIMER_MIN_INTERVAL 1 * 60 * 60 /* 1h */ -#define CEC_TIMER_MAX_INTERVAL 30 * 24 * 60 * 60 /* one month */ -static struct timer_list cec_timer; -static u64 timer_interval = CEC_TIMER_DEFAULT_INTERVAL; +/* Each element "decays" each decay_interval which is 24hrs by default. */ +#define CEC_DECAY_DEFAULT_INTERVAL 24 * 60 * 60 /* 24 hrs */ +#define CEC_DECAY_MIN_INTERVAL 1 * 60 * 60 /* 1h */ +#define CEC_DECAY_MAX_INTERVAL 30 * 24 * 60 * 60 /* one month */ +static struct delayed_work cec_work; +static u64 decay_interval = CEC_DECAY_DEFAULT_INTERVAL; /* * Decrement decay value. We're using DECAY_BITS bits to denote decay of an @@ -160,20 +157,21 @@ static void do_spring_cleaning(struct ce_array *ca) /* * @interval in seconds */ -static void cec_mod_timer(struct timer_list *t, unsigned long interval) +static void cec_mod_work(unsigned long interval) { unsigned long iv; - iv = interval * HZ + jiffies; - - mod_timer(t, round_jiffies(iv)); + iv = interval * HZ; + mod_delayed_work(system_wq, &cec_work, round_jiffies(iv)); } -static void cec_timer_fn(struct timer_list *unused) +static void cec_work_fn(struct work_struct *work) { + mutex_lock(&ce_mutex); do_spring_cleaning(&ce_arr); + mutex_unlock(&ce_mutex); - cec_mod_timer(&cec_timer, timer_interval); + cec_mod_work(decay_interval); } /* @@ -183,32 +181,38 @@ static void cec_timer_fn(struct timer_list *unused) */ static int __find_elem(struct ce_array *ca, u64 pfn, unsigned int *to) { + int min = 0, max = ca->n - 1; u64 this_pfn; - int min = 0, max = ca->n; - while (min < max) { - int tmp = (max + min) >> 1; + while (min <= max) { + int i = (min + max) >> 1; - this_pfn = PFN(ca->array[tmp]); + this_pfn = PFN(ca->array[i]); if (this_pfn < pfn) - min = tmp + 1; + min = i + 1; else if (this_pfn > pfn) - max = tmp; - else { - min = tmp; - break; + max = i - 1; + else if (this_pfn == pfn) { + if (to) + *to = i; + + return i; } } + /* + * When the loop terminates without finding @pfn, min has the index of + * the element slot where the new @pfn should be inserted. The loop + * terminates when min > max, which means the min index points to the + * bigger element while the max index to the smaller element, in-between + * which the new @pfn belongs to. + * + * For more details, see exercise 1, Section 6.2.1 in TAOCP, vol. 3. + */ if (to) *to = min; - this_pfn = PFN(ca->array[min]); - - if (this_pfn == pfn) - return min; - return -ENOKEY; } @@ -365,7 +369,9 @@ static int pfn_set(void *data, u64 val) { *(u64 *)data = val; - return cec_add_elem(val); + cec_add_elem(val); + + return 0; } DEFINE_DEBUGFS_ATTRIBUTE(pfn_ops, u64_get, pfn_set, "0x%llx\n"); @@ -374,15 +380,15 @@ static int decay_interval_set(void *data, u64 val) { *(u64 *)data = val; - if (val < CEC_TIMER_MIN_INTERVAL) + if (val < CEC_DECAY_MIN_INTERVAL) return -EINVAL; - if (val > CEC_TIMER_MAX_INTERVAL) + if (val > CEC_DECAY_MAX_INTERVAL) return -EINVAL; - timer_interval = val; + decay_interval = val; - cec_mod_timer(&cec_timer, timer_interval); + cec_mod_work(decay_interval); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(decay_interval_ops, u64_get, decay_interval_set, "%lld\n"); @@ -426,7 +432,7 @@ static int array_dump(struct seq_file *m, void *v) seq_printf(m, "Flags: 0x%x\n", ca->flags); - seq_printf(m, "Timer interval: %lld seconds\n", timer_interval); + seq_printf(m, "Decay interval: %lld seconds\n", decay_interval); seq_printf(m, "Decays: %lld\n", ca->decays_done); seq_printf(m, "Action threshold: %d\n", count_threshold); @@ -472,7 +478,7 @@ static int __init create_debugfs_nodes(void) } decay = debugfs_create_file("decay_interval", S_IRUSR | S_IWUSR, d, - &timer_interval, &decay_interval_ops); + &decay_interval, &decay_interval_ops); if (!decay) { pr_warn("Error creating decay_interval debugfs node!\n"); goto err; @@ -505,11 +511,13 @@ void __init cec_init(void) return; } - if (create_debugfs_nodes()) + if (create_debugfs_nodes()) { + free_page((unsigned long)ce_arr.array); return; + } - timer_setup(&cec_timer, cec_timer_fn, 0); - cec_mod_timer(&cec_timer, CEC_TIMER_DEFAULT_INTERVAL); + INIT_DELAYED_WORK(&cec_work, cec_work_fn); + schedule_delayed_work(&cec_work, CEC_DECAY_DEFAULT_INTERVAL); pr_info("Correctable Errors collector initialized.\n"); } diff --git a/drivers/ras/ras.c b/drivers/ras/ras.c index 3f38907320dccd963246fb22008f35f8e9f52753..a526f124a5ff83dfcde76d0ffdd4c28e109a3688 100644 --- a/drivers/ras/ras.c +++ b/drivers/ras/ras.c @@ -21,9 +21,51 @@ void log_non_standard_event(const uuid_le *sec_type, const uuid_le *fru_id, trace_non_standard_event(sec_type, fru_id, fru_text, sev, err, len); } -void log_arm_hw_error(struct cper_sec_proc_arm *err) +void log_arm_hw_error(struct cper_sec_proc_arm *err, const u8 sev) { - trace_arm_event(err); + u32 pei_len; + u32 ctx_len = 0; + u32 vsei_len; + u8 *pei_err; + u8 *ctx_err; + u8 *ven_err_data; + struct cper_arm_err_info *err_info; + struct cper_arm_ctx_info *ctx_info; + int n, sz; + int cpu; + + pei_len = sizeof(struct cper_arm_err_info) * err->err_info_num; + pei_err = (u8 *)err + sizeof(struct cper_sec_proc_arm); + + err_info = (struct cper_arm_err_info *)(err + 1); + ctx_info = (struct cper_arm_ctx_info *)(err_info + err->err_info_num); + ctx_err = (u8 *)ctx_info; + for (n = 0; n < err->context_info_num; n++) { + sz = sizeof(struct cper_arm_ctx_info) + ctx_info->size; + ctx_info = (struct cper_arm_ctx_info *)((long)ctx_info + sz); + ctx_len += sz; + } + + vsei_len = err->section_length - (sizeof(struct cper_sec_proc_arm) + + pei_len + ctx_len); + if (vsei_len < 0) { + printk(FW_BUG + "section length: %d\n", err->section_length); + printk(FW_BUG + "section length is too small\n"); + pr_warn(FW_BUG + "firmware-generated error record is incorrect\n"); + vsei_len = 0; + } + ven_err_data = (u8 *)ctx_info; + + cpu = GET_LOGICAL_INDEX(err->mpidr); + /* when the return value is invalid, set cpu index to a large integer */ + if (cpu < 0) + cpu = 0xFFFF; + + trace_arm_event(err, pei_err, pei_len, ctx_err, ctx_len, + ven_err_data, vsei_len, sev, cpu); } static int __init ras_init(void) diff --git a/drivers/regulator/88pm800.c b/drivers/regulator/88pm800-regulator.c similarity index 100% rename from drivers/regulator/88pm800.c rename to drivers/regulator/88pm800-regulator.c diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile index 801d9a34a2037c5b7c7bb8e8c93a62ad1a9aebf1..bba9c4851faf30e6fb6d7fd7bc80fbc56dc66ffa 100644 --- a/drivers/regulator/Makefile +++ b/drivers/regulator/Makefile @@ -11,7 +11,7 @@ obj-$(CONFIG_REGULATOR_VIRTUAL_CONSUMER) += virtual.o obj-$(CONFIG_REGULATOR_USERSPACE_CONSUMER) += userspace-consumer.o obj-$(CONFIG_REGULATOR_88PG86X) += 88pg86x.o -obj-$(CONFIG_REGULATOR_88PM800) += 88pm800.o +obj-$(CONFIG_REGULATOR_88PM800) += 88pm800-regulator.o obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o obj-$(CONFIG_REGULATOR_CPCAP) += cpcap-regulator.o obj-$(CONFIG_REGULATOR_AAT2870) += aat2870-regulator.o diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c index 83dba3fbfe0cfcded4d3f8624bd59030ca08dcfa..60578820432d5c6e7dbcfa22674af42c0404a30f 100644 --- a/drivers/regulator/ab8500.c +++ b/drivers/regulator/ab8500.c @@ -956,23 +956,6 @@ static struct ab8500_regulator_info .update_val_idle = 0x82, .update_val_normal = 0x02, }, - [AB8505_LDO_USB] = { - .desc = { - .name = "LDO-USB", - .ops = &ab8500_regulator_mode_ops, - .type = REGULATOR_VOLTAGE, - .id = AB8505_LDO_USB, - .owner = THIS_MODULE, - .n_voltages = 1, - .volt_table = fixed_3300000_voltage, - }, - .update_bank = 0x03, - .update_reg = 0x82, - .update_mask = 0x03, - .update_val = 0x01, - .update_val_idle = 0x03, - .update_val_normal = 0x01, - }, [AB8505_LDO_AUDIO] = { .desc = { .name = "LDO-AUDIO", diff --git a/drivers/regulator/act8865-regulator.c b/drivers/regulator/act8865-regulator.c index 21e20483bd918b83e91cf5d8d14e73b4c208b99a..e0239cf3f56d77ec1e2ee4f0f91a82184929ea42 100644 --- a/drivers/regulator/act8865-regulator.c +++ b/drivers/regulator/act8865-regulator.c @@ -131,7 +131,7 @@ * ACT8865 voltage number */ #define ACT8865_VOLTAGE_NUM 64 -#define ACT8600_SUDCDC_VOLTAGE_NUM 255 +#define ACT8600_SUDCDC_VOLTAGE_NUM 256 struct act8865 { struct regmap *regmap; @@ -222,7 +222,8 @@ static const struct regulator_linear_range act8600_sudcdc_voltage_ranges[] = { REGULATOR_LINEAR_RANGE(3000000, 0, 63, 0), REGULATOR_LINEAR_RANGE(3000000, 64, 159, 100000), REGULATOR_LINEAR_RANGE(12600000, 160, 191, 200000), - REGULATOR_LINEAR_RANGE(19000000, 191, 255, 400000), + REGULATOR_LINEAR_RANGE(19000000, 192, 247, 400000), + REGULATOR_LINEAR_RANGE(41400000, 248, 255, 0), }; static struct regulator_ops act8865_ops = { diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 9577d89418468a06f1030ff69d2699f71b5710bc..4bab758d14b1abd0e7ecdcb42da3bbcdc082a625 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -1724,8 +1724,8 @@ struct regulator *_regulator_get(struct device *dev, const char *id, regulator = create_regulator(rdev, dev, id); if (regulator == NULL) { regulator = ERR_PTR(-ENOMEM); - put_device(&rdev->dev); module_put(rdev->owner); + put_device(&rdev->dev); return regulator; } @@ -1851,13 +1851,13 @@ static void _regulator_put(struct regulator *regulator) rdev->open_count--; rdev->exclusive = 0; - put_device(&rdev->dev); regulator_unlock(rdev); kfree_const(regulator->supply_name); kfree(regulator); module_put(rdev->owner); + put_device(&rdev->dev); } /** @@ -4789,7 +4789,7 @@ static int __init regulator_init(void) /* init early to allow our consumers to complete system booting */ core_initcall(regulator_init); -static int __init regulator_late_cleanup(struct device *dev, void *data) +static int regulator_late_cleanup(struct device *dev, void *data) { struct regulator_dev *rdev = dev_to_rdev(dev); const struct regulator_ops *ops = rdev->desc->ops; @@ -4838,17 +4838,8 @@ static int __init regulator_late_cleanup(struct device *dev, void *data) return 0; } -static int __init regulator_init_complete(void) +static void regulator_init_complete_work_function(struct work_struct *work) { - /* - * Since DT doesn't provide an idiomatic mechanism for - * enabling full constraints and since it's much more natural - * with DT to provide them just assume that a DT enabled - * system has full constraints. - */ - if (of_have_populated_dt()) - has_full_constraints = true; - /* * Regulators may had failed to resolve their input supplies * when were registered, either because the input supply was @@ -4866,6 +4857,35 @@ static int __init regulator_init_complete(void) */ class_for_each_device(®ulator_class, NULL, NULL, regulator_late_cleanup); +} + +static DECLARE_DELAYED_WORK(regulator_init_complete_work, + regulator_init_complete_work_function); + +static int __init regulator_init_complete(void) +{ + /* + * Since DT doesn't provide an idiomatic mechanism for + * enabling full constraints and since it's much more natural + * with DT to provide them just assume that a DT enabled + * system has full constraints. + */ + if (of_have_populated_dt()) + has_full_constraints = true; + + /* + * We punt completion for an arbitrary amount of time since + * systems like distros will load many drivers from userspace + * so consumers might not always be ready yet, this is + * particularly an issue with laptops where this might bounce + * the display off then on. Ideally we'd get a notification + * from userspace when this happens but we don't so just wait + * a bit and hope we waited long enough. It'd be better if + * we'd only do this on systems that need it, and a kernel + * command line option might be useful. + */ + schedule_delayed_work(®ulator_init_complete_work, + msecs_to_jiffies(30000)); class_for_each_device(®ulator_class, NULL, NULL, regulator_register_fill_coupling_array); diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c index 6c122b3df5d03529e5dc8b8046fbd92df31a2c30..8847d15c2b90f617131caa5cd0d270f6cfc72d36 100644 --- a/drivers/regulator/da9211-regulator.c +++ b/drivers/regulator/da9211-regulator.c @@ -469,6 +469,12 @@ static int da9211_i2c_probe(struct i2c_client *i2c, chip->chip_irq = i2c->irq; + ret = da9211_regulator_init(chip); + if (ret < 0) { + dev_err(chip->dev, "Failed to initialize regulator: %d\n", ret); + return ret; + } + if (chip->chip_irq != 0) { ret = devm_request_threaded_irq(chip->dev, chip->chip_irq, NULL, da9211_irq_handler, @@ -483,11 +489,6 @@ static int da9211_i2c_probe(struct i2c_client *i2c, dev_warn(chip->dev, "No IRQ configured\n"); } - ret = da9211_regulator_init(chip); - - if (ret < 0) - dev_err(chip->dev, "Failed to initialize regulator: %d\n", ret); - return ret; } diff --git a/drivers/regulator/lm363x-regulator.c b/drivers/regulator/lm363x-regulator.c index b615a413ca9f6ff3f2313349be5edd38dabb0855..27c0a67cfd0e290ad2d3a9ac65605e7ea88112b1 100644 --- a/drivers/regulator/lm363x-regulator.c +++ b/drivers/regulator/lm363x-regulator.c @@ -33,7 +33,7 @@ /* LM3632 */ #define LM3632_BOOST_VSEL_MAX 0x26 -#define LM3632_LDO_VSEL_MAX 0x29 +#define LM3632_LDO_VSEL_MAX 0x28 #define LM3632_VBOOST_MIN 4500000 #define LM3632_VLDO_MIN 4000000 diff --git a/drivers/regulator/max77620-regulator.c b/drivers/regulator/max77620-regulator.c index b94e3a721721b26449be79a49f287411a60d33b0..cd93cf53e23c08c951346b9ff895a4a40a699d01 100644 --- a/drivers/regulator/max77620-regulator.c +++ b/drivers/regulator/max77620-regulator.c @@ -1,7 +1,7 @@ /* * Maxim MAX77620 Regulator driver * - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Author: Mallikarjun Kasoju * Laxman Dewangan @@ -803,6 +803,14 @@ static int max77620_regulator_probe(struct platform_device *pdev) rdesc = &rinfo[id].desc; pmic->rinfo[id] = &max77620_regs_info[id]; pmic->enable_power_mode[id] = MAX77620_POWER_MODE_NORMAL; + pmic->reg_pdata[id].active_fps_src = -1; + pmic->reg_pdata[id].active_fps_pd_slot = -1; + pmic->reg_pdata[id].active_fps_pu_slot = -1; + pmic->reg_pdata[id].suspend_fps_src = -1; + pmic->reg_pdata[id].suspend_fps_pd_slot = -1; + pmic->reg_pdata[id].suspend_fps_pu_slot = -1; + pmic->reg_pdata[id].power_ok = -1; + pmic->reg_pdata[id].ramp_rate_setting = -1; ret = max77620_read_slew_rate(pmic, id); if (ret < 0) diff --git a/drivers/regulator/max8907-regulator.c b/drivers/regulator/max8907-regulator.c index 860400d2cd8591522ca3d4ff4366459cbdc2397e..a8f2f07239fb9dddf05503556af4de06ca4ec708 100644 --- a/drivers/regulator/max8907-regulator.c +++ b/drivers/regulator/max8907-regulator.c @@ -299,7 +299,10 @@ static int max8907_regulator_probe(struct platform_device *pdev) memcpy(pmic->desc, max8907_regulators, sizeof(pmic->desc)); /* Backwards compatibility with MAX8907B; SD1 uses different voltages */ - regmap_read(max8907->regmap_gen, MAX8907_REG_II2RR, &val); + ret = regmap_read(max8907->regmap_gen, MAX8907_REG_II2RR, &val); + if (ret) + return ret; + if ((val & MAX8907_II2RR_VERSION_MASK) == MAX8907_II2RR_VERSION_REV_B) { pmic->desc[MAX8907_SD1].min_uV = 637500; @@ -336,14 +339,20 @@ static int max8907_regulator_probe(struct platform_device *pdev) } if (pmic->desc[i].ops == &max8907_ldo_ops) { - regmap_read(config.regmap, pmic->desc[i].enable_reg, + ret = regmap_read(config.regmap, pmic->desc[i].enable_reg, &val); + if (ret) + return ret; + if ((val & MAX8907_MASK_LDO_SEQ) != MAX8907_MASK_LDO_SEQ) pmic->desc[i].ops = &max8907_ldo_hwctl_ops; } else if (pmic->desc[i].ops == &max8907_out5v_ops) { - regmap_read(config.regmap, pmic->desc[i].enable_reg, + ret = regmap_read(config.regmap, pmic->desc[i].enable_reg, &val); + if (ret) + return ret; + if ((val & (MAX8907_MASK_OUT5V_VINEN | MAX8907_MASK_OUT5V_ENSRC)) != MAX8907_MASK_OUT5V_ENSRC) diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c index 210fc20f7de7a9cd26dbbee68e24a7c0bc2dc94a..b255590aef36eeb79426553175074d8efede208e 100644 --- a/drivers/regulator/of_regulator.c +++ b/drivers/regulator/of_regulator.c @@ -214,12 +214,12 @@ static void of_get_regulation_constraints(struct device_node *np, "regulator-off-in-suspend")) suspend_state->enabled = DISABLE_IN_SUSPEND; - if (!of_property_read_u32(np, "regulator-suspend-min-microvolt", - &pval)) + if (!of_property_read_u32(suspend_np, + "regulator-suspend-min-microvolt", &pval)) suspend_state->min_uV = pval; - if (!of_property_read_u32(np, "regulator-suspend-max-microvolt", - &pval)) + if (!of_property_read_u32(suspend_np, + "regulator-suspend-max-microvolt", &pval)) suspend_state->max_uV = pval; if (!of_property_read_u32(suspend_np, diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c index bb5ab7d78895b817a9d42e03e6bd890d6b99e33e..c2cc392a27d4075ffc40dc123a78775d0e034a8f 100644 --- a/drivers/regulator/palmas-regulator.c +++ b/drivers/regulator/palmas-regulator.c @@ -443,13 +443,16 @@ static int palmas_ldo_write(struct palmas *palmas, unsigned int reg, static int palmas_set_mode_smps(struct regulator_dev *dev, unsigned int mode) { int id = rdev_get_id(dev); + int ret; struct palmas_pmic *pmic = rdev_get_drvdata(dev); struct palmas_pmic_driver_data *ddata = pmic->palmas->pmic_ddata; struct palmas_regs_info *rinfo = &ddata->palmas_regs_info[id]; unsigned int reg; bool rail_enable = true; - palmas_smps_read(pmic->palmas, rinfo->ctrl_addr, ®); + ret = palmas_smps_read(pmic->palmas, rinfo->ctrl_addr, ®); + if (ret) + return ret; reg &= ~PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK; diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c index 31c3a236120a80909ede26f29caa5288011d8cbf..69a377ab260413db25d7d82263f63d37380dc645 100644 --- a/drivers/regulator/pfuze100-regulator.c +++ b/drivers/regulator/pfuze100-regulator.c @@ -710,7 +710,13 @@ static int pfuze100_regulator_probe(struct i2c_client *client, /* SW2~SW4 high bit check and modify the voltage value table */ if (i >= sw_check_start && i <= sw_check_end) { - regmap_read(pfuze_chip->regmap, desc->vsel_reg, &val); + ret = regmap_read(pfuze_chip->regmap, + desc->vsel_reg, &val); + if (ret) { + dev_err(&client->dev, "Fails to read from the register.\n"); + return ret; + } + if (val & sw_hi) { if (pfuze_chip->chip_id == PFUZE3000 || pfuze_chip->chip_id == PFUZE3001) { diff --git a/drivers/regulator/rn5t618-regulator.c b/drivers/regulator/rn5t618-regulator.c index 790a4a73ea2c8f94c3fb80c8567827d0e8b0860b..40b74648bd31137782adb6d5015794d173354b90 100644 --- a/drivers/regulator/rn5t618-regulator.c +++ b/drivers/regulator/rn5t618-regulator.c @@ -154,6 +154,7 @@ static struct platform_driver rn5t618_regulator_driver = { module_platform_driver(rn5t618_regulator_driver); +MODULE_ALIAS("platform:rn5t618-regulator"); MODULE_AUTHOR("Beniamino Galvani "); MODULE_DESCRIPTION("RN5T618 regulator driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c index 095d25f3d2eae1fbc3220e2628f678c4e4498294..58a1fe583a6c939bb9a7e9964ee493dd3e2e2726 100644 --- a/drivers/regulator/s2mpa01.c +++ b/drivers/regulator/s2mpa01.c @@ -298,13 +298,13 @@ static const struct regulator_desc regulators[] = { regulator_desc_ldo(2, STEP_50_MV), regulator_desc_ldo(3, STEP_50_MV), regulator_desc_ldo(4, STEP_50_MV), - regulator_desc_ldo(5, STEP_50_MV), + regulator_desc_ldo(5, STEP_25_MV), regulator_desc_ldo(6, STEP_25_MV), regulator_desc_ldo(7, STEP_50_MV), regulator_desc_ldo(8, STEP_50_MV), regulator_desc_ldo(9, STEP_50_MV), regulator_desc_ldo(10, STEP_50_MV), - regulator_desc_ldo(11, STEP_25_MV), + regulator_desc_ldo(11, STEP_50_MV), regulator_desc_ldo(12, STEP_50_MV), regulator_desc_ldo(13, STEP_50_MV), regulator_desc_ldo(14, STEP_50_MV), @@ -315,11 +315,11 @@ static const struct regulator_desc regulators[] = { regulator_desc_ldo(19, STEP_50_MV), regulator_desc_ldo(20, STEP_50_MV), regulator_desc_ldo(21, STEP_50_MV), - regulator_desc_ldo(22, STEP_25_MV), - regulator_desc_ldo(23, STEP_25_MV), + regulator_desc_ldo(22, STEP_50_MV), + regulator_desc_ldo(23, STEP_50_MV), regulator_desc_ldo(24, STEP_50_MV), regulator_desc_ldo(25, STEP_50_MV), - regulator_desc_ldo(26, STEP_50_MV), + regulator_desc_ldo(26, STEP_25_MV), regulator_desc_buck1_4(1), regulator_desc_buck1_4(2), regulator_desc_buck1_4(3), diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c index 5bb6f4ca48db8e0abb49b7af9b419829031b41f9..7c598c156d9e17eac95bf06e8e0d9dff2ab33cb3 100644 --- a/drivers/regulator/s2mps11.c +++ b/drivers/regulator/s2mps11.c @@ -363,7 +363,7 @@ static const struct regulator_desc s2mps11_regulators[] = { regulator_desc_s2mps11_ldo(32, STEP_50_MV), regulator_desc_s2mps11_ldo(33, STEP_50_MV), regulator_desc_s2mps11_ldo(34, STEP_50_MV), - regulator_desc_s2mps11_ldo(35, STEP_50_MV), + regulator_desc_s2mps11_ldo(35, STEP_25_MV), regulator_desc_s2mps11_ldo(36, STEP_50_MV), regulator_desc_s2mps11_ldo(37, STEP_50_MV), regulator_desc_s2mps11_ldo(38, STEP_50_MV), @@ -373,8 +373,8 @@ static const struct regulator_desc s2mps11_regulators[] = { regulator_desc_s2mps11_buck1_4(4), regulator_desc_s2mps11_buck5, regulator_desc_s2mps11_buck67810(6, MIN_600_MV, STEP_6_25_MV), - regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_6_25_MV), - regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_6_25_MV), + regulator_desc_s2mps11_buck67810(7, MIN_750_MV, STEP_12_5_MV), + regulator_desc_s2mps11_buck67810(8, MIN_750_MV, STEP_12_5_MV), regulator_desc_s2mps11_buck9, regulator_desc_s2mps11_buck67810(10, MIN_750_MV, STEP_12_5_MV), }; diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c index cced1ffb896c1169dba81bde5c9b83a63890ceab..89b9314d64c9dbe5fe152b06c252b1567600857c 100644 --- a/drivers/regulator/ti-abb-regulator.c +++ b/drivers/regulator/ti-abb-regulator.c @@ -173,19 +173,14 @@ static int ti_abb_wait_txdone(struct device *dev, struct ti_abb *abb) while (timeout++ <= abb->settling_time) { status = ti_abb_check_txdone(abb); if (status) - break; + return 0; udelay(1); } - if (timeout > abb->settling_time) { - dev_warn_ratelimited(dev, - "%s:TRANXDONE timeout(%duS) int=0x%08x\n", - __func__, timeout, readl(abb->int_base)); - return -ETIMEDOUT; - } - - return 0; + dev_warn_ratelimited(dev, "%s:TRANXDONE timeout(%duS) int=0x%08x\n", + __func__, timeout, readl(abb->int_base)); + return -ETIMEDOUT; } /** @@ -205,19 +200,14 @@ static int ti_abb_clear_all_txdone(struct device *dev, const struct ti_abb *abb) status = ti_abb_check_txdone(abb); if (!status) - break; + return 0; udelay(1); } - if (timeout > abb->settling_time) { - dev_warn_ratelimited(dev, - "%s:TRANXDONE timeout(%duS) int=0x%08x\n", - __func__, timeout, readl(abb->int_base)); - return -ETIMEDOUT; - } - - return 0; + dev_warn_ratelimited(dev, "%s:TRANXDONE timeout(%duS) int=0x%08x\n", + __func__, timeout, readl(abb->int_base)); + return -ETIMEDOUT; } /** diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c index 02ccdaa226a73f97a5fc812f5173c2bef44d25f4..5ebb6ee73f0770283eff5c15f5c71d0b56fe08bb 100644 --- a/drivers/regulator/tps65910-regulator.c +++ b/drivers/regulator/tps65910-regulator.c @@ -1102,8 +1102,10 @@ static int tps65910_probe(struct platform_device *pdev) platform_set_drvdata(pdev, pmic); /* Give control of all register to control port */ - tps65910_reg_set_bits(pmic->mfd, TPS65910_DEVCTRL, + err = tps65910_reg_set_bits(pmic->mfd, TPS65910_DEVCTRL, DEVCTRL_SR_CTL_I2C_SEL_MASK); + if (err < 0) + return err; switch (tps65910_chip_id(tps65910)) { case TPS65910: diff --git a/drivers/remoteproc/da8xx_remoteproc.c b/drivers/remoteproc/da8xx_remoteproc.c index e230bef71be1c67abb92e44ca540545bdf3333ff..d200334577f68f79e0a0c990a8d9044b6409d85b 100644 --- a/drivers/remoteproc/da8xx_remoteproc.c +++ b/drivers/remoteproc/da8xx_remoteproc.c @@ -226,7 +226,7 @@ static int da8xx_rproc_get_internal_memories(struct platform_device *pdev, res->start & DA8XX_RPROC_LOCAL_ADDRESS_MASK; drproc->mem[i].size = resource_size(res); - dev_dbg(dev, "memory %8s: bus addr %pa size 0x%x va %p da 0x%x\n", + dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %p da 0x%x\n", mem_names[i], &drproc->mem[i].bus_addr, drproc->mem[i].size, drproc->mem[i].cpu_addr, drproc->mem[i].dev_addr); diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c index 61a760ee4aacc9c526defdf73df7806a46258b74..0d33e3079f0dc2abb0cab3bca2a8bf46a885d58f 100644 --- a/drivers/remoteproc/qcom_q6v5.c +++ b/drivers/remoteproc/qcom_q6v5.c @@ -84,6 +84,7 @@ static irqreturn_t q6v5_fatal_interrupt(int irq, void *data) else dev_err(q6v5->dev, "fatal error without message\n"); + q6v5->running = false; rproc_report_crash(q6v5->rproc, RPROC_FATAL_ERROR); return IRQ_HANDLED; @@ -150,8 +151,6 @@ int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5) { int ret; - q6v5->running = false; - qcom_smem_state_update_bits(q6v5->state, BIT(q6v5->stop_bit), BIT(q6v5->stop_bit)); @@ -188,6 +187,14 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev, init_completion(&q6v5->stop_done); q6v5->wdog_irq = platform_get_irq_byname(pdev, "wdog"); + if (q6v5->wdog_irq < 0) { + if (q6v5->wdog_irq != -EPROBE_DEFER) + dev_err(&pdev->dev, + "failed to retrieve wdog IRQ: %d\n", + q6v5->wdog_irq); + return q6v5->wdog_irq; + } + ret = devm_request_threaded_irq(&pdev->dev, q6v5->wdog_irq, NULL, q6v5_wdog_interrupt, IRQF_TRIGGER_RISING | IRQF_ONESHOT, @@ -198,6 +205,14 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev, } q6v5->fatal_irq = platform_get_irq_byname(pdev, "fatal"); + if (q6v5->fatal_irq < 0) { + if (q6v5->fatal_irq != -EPROBE_DEFER) + dev_err(&pdev->dev, + "failed to retrieve fatal IRQ: %d\n", + q6v5->fatal_irq); + return q6v5->fatal_irq; + } + ret = devm_request_threaded_irq(&pdev->dev, q6v5->fatal_irq, NULL, q6v5_fatal_interrupt, IRQF_TRIGGER_RISING | IRQF_ONESHOT, @@ -208,6 +223,14 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev, } q6v5->ready_irq = platform_get_irq_byname(pdev, "ready"); + if (q6v5->ready_irq < 0) { + if (q6v5->ready_irq != -EPROBE_DEFER) + dev_err(&pdev->dev, + "failed to retrieve ready IRQ: %d\n", + q6v5->ready_irq); + return q6v5->ready_irq; + } + ret = devm_request_threaded_irq(&pdev->dev, q6v5->ready_irq, NULL, q6v5_ready_interrupt, IRQF_TRIGGER_RISING | IRQF_ONESHOT, @@ -218,6 +241,14 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev, } q6v5->handover_irq = platform_get_irq_byname(pdev, "handover"); + if (q6v5->handover_irq < 0) { + if (q6v5->handover_irq != -EPROBE_DEFER) + dev_err(&pdev->dev, + "failed to retrieve handover IRQ: %d\n", + q6v5->handover_irq); + return q6v5->handover_irq; + } + ret = devm_request_threaded_irq(&pdev->dev, q6v5->handover_irq, NULL, q6v5_handover_interrupt, IRQF_TRIGGER_RISING | IRQF_ONESHOT, @@ -229,6 +260,14 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev, disable_irq(q6v5->handover_irq); q6v5->stop_irq = platform_get_irq_byname(pdev, "stop-ack"); + if (q6v5->stop_irq < 0) { + if (q6v5->stop_irq != -EPROBE_DEFER) + dev_err(&pdev->dev, + "failed to retrieve stop-ack IRQ: %d\n", + q6v5->stop_irq); + return q6v5->stop_irq; + } + ret = devm_request_threaded_irq(&pdev->dev, q6v5->stop_irq, NULL, q6v5_stop_interrupt, IRQF_TRIGGER_RISING | IRQF_ONESHOT, diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c index d7a4b9eca5d25bc6d7d37257f23bb424bc45541e..6a84b6372897dfce0b6cb640786e15f60574e8a7 100644 --- a/drivers/remoteproc/qcom_q6v5_pil.c +++ b/drivers/remoteproc/qcom_q6v5_pil.c @@ -1132,6 +1132,9 @@ static int q6v5_probe(struct platform_device *pdev) if (!desc) return -EINVAL; + if (desc->need_mem_protection && !qcom_scm_is_available()) + return -EPROBE_DEFER; + rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops, desc->hexagon_mba_image, sizeof(*qproc)); if (!rproc) { diff --git a/drivers/remoteproc/remoteproc_sysfs.c b/drivers/remoteproc/remoteproc_sysfs.c index 47be411400e56aed1b48f44d4254e0178a640412..3a4c3d7cafca35f7752c9ddb402a643549be4050 100644 --- a/drivers/remoteproc/remoteproc_sysfs.c +++ b/drivers/remoteproc/remoteproc_sysfs.c @@ -48,6 +48,11 @@ static ssize_t firmware_store(struct device *dev, } len = strcspn(buf, "\n"); + if (!len) { + dev_err(dev, "can't provide a NULL firmware\n"); + err = -EINVAL; + goto out; + } p = kstrndup(buf, len, GFP_KERNEL); if (!p) { diff --git a/drivers/reset/core.c b/drivers/reset/core.c index 225e34c56b94a2e315f17a598bd7a74d6dae1932..f7bf20493f23e9d5eedb9c3c6361a8f890272254 100644 --- a/drivers/reset/core.c +++ b/drivers/reset/core.c @@ -496,28 +496,29 @@ struct reset_control *__of_reset_control_get(struct device_node *node, break; } } - of_node_put(args.np); if (!rcdev) { - mutex_unlock(&reset_list_mutex); - return ERR_PTR(-EPROBE_DEFER); + rstc = ERR_PTR(-EPROBE_DEFER); + goto out; } if (WARN_ON(args.args_count != rcdev->of_reset_n_cells)) { - mutex_unlock(&reset_list_mutex); - return ERR_PTR(-EINVAL); + rstc = ERR_PTR(-EINVAL); + goto out; } rstc_id = rcdev->of_xlate(rcdev, &args); if (rstc_id < 0) { - mutex_unlock(&reset_list_mutex); - return ERR_PTR(rstc_id); + rstc = ERR_PTR(rstc_id); + goto out; } /* reset_list_mutex also protects the rcdev's reset_control list */ rstc = __reset_control_get_internal(rcdev, rstc_id, shared); +out: mutex_unlock(&reset_list_mutex); + of_node_put(args.np); return rstc; } @@ -606,6 +607,7 @@ static void reset_control_array_put(struct reset_control_array *resets) for (i = 0; i < resets->num_rstcs; i++) __reset_control_put_internal(resets->rstc[i]); mutex_unlock(&reset_list_mutex); + kfree(resets); } /** diff --git a/drivers/reset/reset-meson-audio-arb.c b/drivers/reset/reset-meson-audio-arb.c index 91751617b37af33b6241be4d13647775d5444881..c53a2185a0393c689c631d7df7f1b760cb2b72e0 100644 --- a/drivers/reset/reset-meson-audio-arb.c +++ b/drivers/reset/reset-meson-audio-arb.c @@ -130,6 +130,7 @@ static int meson_audio_arb_probe(struct platform_device *pdev) arb->rstc.nr_resets = ARRAY_SIZE(axg_audio_arb_reset_bits); arb->rstc.ops = &meson_audio_arb_rstc_ops; arb->rstc.of_node = dev->of_node; + arb->rstc.owner = THIS_MODULE; /* * Enable general : diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index e2ce4e638258b7a64de8e9f41e6167f09aaf8c74..25c394a7077b8a729a8d6c669acd51d65c26448e 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c @@ -241,10 +241,31 @@ static void qcom_glink_channel_release(struct kref *ref) { struct glink_channel *channel = container_of(ref, struct glink_channel, refcount); + struct glink_core_rx_intent *intent; + struct glink_core_rx_intent *tmp; unsigned long flags; + int iid; + + /* cancel pending rx_done work */ + cancel_work_sync(&channel->intent_work); spin_lock_irqsave(&channel->intent_lock, flags); + /* Free all non-reuse intents pending rx_done work */ + list_for_each_entry_safe(intent, tmp, &channel->done_intents, node) { + if (!intent->reuse) { + kfree(intent->data); + kfree(intent); + } + } + + idr_for_each_entry(&channel->liids, tmp, iid) { + kfree(tmp->data); + kfree(tmp); + } idr_destroy(&channel->liids); + + idr_for_each_entry(&channel->riids, tmp, iid) + kfree(tmp); idr_destroy(&channel->riids); spin_unlock_irqrestore(&channel->intent_lock, flags); @@ -1097,13 +1118,12 @@ static int qcom_glink_create_remote(struct qcom_glink *glink, close_link: /* * Send a close request to "undo" our open-ack. The close-ack will - * release the last reference. + * release qcom_glink_send_open_req() reference and the last reference + * will be relesed after receiving remote_close or transport unregister + * by calling qcom_glink_native_remove(). */ qcom_glink_send_close_req(glink, channel); - /* Release qcom_glink_send_open_req() reference */ - kref_put(&channel->refcount, qcom_glink_channel_release); - return ret; } @@ -1418,15 +1438,13 @@ static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid, ret = rpmsg_register_device(rpdev); if (ret) - goto free_rpdev; + goto rcid_remove; channel->rpdev = rpdev; } return 0; -free_rpdev: - kfree(rpdev); rcid_remove: spin_lock_irqsave(&glink->idr_lock, flags); idr_remove(&glink->rcids, channel->rcid); @@ -1547,6 +1565,18 @@ static void qcom_glink_work(struct work_struct *work) } } +static void qcom_glink_cancel_rx_work(struct qcom_glink *glink) +{ + struct glink_defer_cmd *dcmd; + struct glink_defer_cmd *tmp; + + /* cancel any pending deferred rx_work */ + cancel_work_sync(&glink->rx_work); + + list_for_each_entry_safe(dcmd, tmp, &glink->rx_queue, node) + kfree(dcmd); +} + struct qcom_glink *qcom_glink_native_probe(struct device *dev, unsigned long features, struct qcom_glink_pipe *rx, @@ -1622,23 +1652,24 @@ void qcom_glink_native_remove(struct qcom_glink *glink) struct glink_channel *channel; int cid; int ret; - unsigned long flags; disable_irq(glink->irq); - cancel_work_sync(&glink->rx_work); + qcom_glink_cancel_rx_work(glink); ret = device_for_each_child(glink->dev, NULL, qcom_glink_remove_device); if (ret) dev_warn(glink->dev, "Can't remove GLINK devices: %d\n", ret); - spin_lock_irqsave(&glink->idr_lock, flags); /* Release any defunct local channels, waiting for close-ack */ idr_for_each_entry(&glink->lcids, channel, cid) kref_put(&channel->refcount, qcom_glink_channel_release); + /* Release any defunct local channels, waiting for close-req */ + idr_for_each_entry(&glink->rcids, channel, cid) + kref_put(&channel->refcount, qcom_glink_channel_release); + idr_destroy(&glink->lcids); idr_destroy(&glink->rcids); - spin_unlock_irqrestore(&glink->idr_lock, flags); mbox_free_channel(glink->mbox_chan); } EXPORT_SYMBOL_GPL(qcom_glink_native_remove); diff --git a/drivers/rpmsg/qcom_glink_smem.c b/drivers/rpmsg/qcom_glink_smem.c index 2b5cf279095403f9ab54ffac2db194d4445b2503..bbd5759cc38ecff58c7d8190a441fc8e4a2f56fc 100644 --- a/drivers/rpmsg/qcom_glink_smem.c +++ b/drivers/rpmsg/qcom_glink_smem.c @@ -89,15 +89,11 @@ static void glink_smem_rx_peak(struct qcom_glink_pipe *np, tail -= pipe->native.length; len = min_t(size_t, count, pipe->native.length - tail); - if (len) { - __ioread32_copy(data, pipe->fifo + tail, - len / sizeof(u32)); - } + if (len) + memcpy_fromio(data, pipe->fifo + tail, len); - if (len != count) { - __ioread32_copy(data + len, pipe->fifo, - (count - len) / sizeof(u32)); - } + if (len != count) + memcpy_fromio(data + len, pipe->fifo, (count - len)); } static void glink_smem_rx_advance(struct qcom_glink_pipe *np, @@ -109,7 +105,7 @@ static void glink_smem_rx_advance(struct qcom_glink_pipe *np, tail = le32_to_cpu(*pipe->tail); tail += count; - if (tail > pipe->native.length) + if (tail >= pipe->native.length) tail -= pipe->native.length; *pipe->tail = cpu_to_le32(tail); diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c index 8da83a4ebadc32143ec2950f3fda2a922f86812c..b2e5a6abf7d5ca76976e6e39dc200ba6f6e0815d 100644 --- a/drivers/rpmsg/qcom_smd.c +++ b/drivers/rpmsg/qcom_smd.c @@ -1122,8 +1122,10 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed channel->edge = edge; channel->name = kstrdup(name, GFP_KERNEL); - if (!channel->name) - return ERR_PTR(-ENOMEM); + if (!channel->name) { + ret = -ENOMEM; + goto free_channel; + } spin_lock_init(&channel->tx_lock); spin_lock_init(&channel->recv_lock); @@ -1173,6 +1175,7 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed free_name_and_channel: kfree(channel->name); +free_channel: kfree(channel); return ERR_PTR(ret); diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c index a76b963a7e50f31e919c9e476de8d2161fad390f..2995647fb2c209908d2dbcd378850aa2f422428e 100644 --- a/drivers/rpmsg/rpmsg_char.c +++ b/drivers/rpmsg/rpmsg_char.c @@ -455,7 +455,6 @@ static void rpmsg_ctrldev_release_device(struct device *dev) ida_simple_remove(&rpmsg_ctrl_ida, dev->id); ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt)); - cdev_del(&ctrldev->cdev); kfree(ctrldev); } @@ -490,19 +489,13 @@ static int rpmsg_chrdev_probe(struct rpmsg_device *rpdev) dev->id = ret; dev_set_name(&ctrldev->dev, "rpmsg_ctrl%d", ret); - ret = cdev_add(&ctrldev->cdev, dev->devt, 1); + ret = cdev_device_add(&ctrldev->cdev, &ctrldev->dev); if (ret) goto free_ctrl_ida; /* We can now rely on the release function for cleanup */ dev->release = rpmsg_ctrldev_release_device; - ret = device_add(dev); - if (ret) { - dev_err(&rpdev->dev, "device_add failed: %d\n", ret); - put_device(dev); - } - dev_set_drvdata(&rpdev->dev, ctrldev); return ret; @@ -528,7 +521,7 @@ static void rpmsg_chrdev_remove(struct rpmsg_device *rpdev) if (ret) dev_warn(&rpdev->dev, "failed to nuke endpoints: %d\n", ret); - device_del(&ctrldev->dev); + cdev_device_del(&ctrldev->cdev, &ctrldev->dev); put_device(&ctrldev->dev); } diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c index 664f957012cdee0247484fed2e157a30b7ec1fc9..aab990339051777c46e98e56aab077caa1030a4d 100644 --- a/drivers/rpmsg/virtio_rpmsg_bus.c +++ b/drivers/rpmsg/virtio_rpmsg_bus.c @@ -381,6 +381,7 @@ static void virtio_rpmsg_release_device(struct device *dev) struct rpmsg_device *rpdev = to_rpmsg_device(dev); struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(rpdev); + kfree(rpdev->driver_override); kfree(vch); } diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 7d7be60a2413f060fe3f6e86b7ab51a9407f0c8f..eb1b0198b8a0882a73782235c34fe09e9ccbdfae 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -1793,6 +1793,15 @@ config RTC_DRV_RTD119X If you say yes here, you get support for the RTD1295 SoC Real Time Clock. +config RTC_DRV_PHYTIUM + tristate "Phytium RTC" + depends on ARCH_PHYTIUM + help + Say yes here to support the Phytium SoC real time clock. + + This driver can also be built as a module, if so, the module + will be called "rtc-phytium". + comment "HID Sensor RTC drivers" config RTC_DRV_HID_SENSOR_TIME diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index 5ff2fc0c361a84bcabe19642c9232b42cf0c25dd..289447233c6bac4812737a81ddfdf25eaa86f32a 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -120,6 +120,7 @@ obj-$(CONFIG_RTC_DRV_PCF85363) += rtc-pcf85363.o obj-$(CONFIG_RTC_DRV_PCF8523) += rtc-pcf8523.o obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o obj-$(CONFIG_RTC_DRV_PCF8583) += rtc-pcf8583.o +obj-$(CONFIG_RTC_DRV_PHYTIUM) += rtc-phytium.o obj-$(CONFIG_RTC_DRV_PIC32) += rtc-pic32.o obj-$(CONFIG_RTC_DRV_PL030) += rtc-pl030.o obj-$(CONFIG_RTC_DRV_PL031) += rtc-pl031.o diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c index e79f2a181ad24217a3e3bc232593184b82d494fd..b9ec4a16db1f6b6fd113c5661a28aa0e9153eeaa 100644 --- a/drivers/rtc/hctosys.c +++ b/drivers/rtc/hctosys.c @@ -50,8 +50,10 @@ static int __init rtc_hctosys(void) tv64.tv_sec = rtc_tm_to_time64(&tm); #if BITS_PER_LONG == 32 - if (tv64.tv_sec > INT_MAX) + if (tv64.tv_sec > INT_MAX) { + err = -ERANGE; goto err_read; + } #endif err = do_settimeofday64(&tv64); diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index 3d577e259e91b94f6e1b5abbce60bf3746308803..1ab619fb978a40ee24a660319ed45c605f16f5ec 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -127,7 +127,7 @@ EXPORT_SYMBOL_GPL(rtc_read_time); int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm) { - int err; + int err, uie; err = rtc_valid_tm(tm); if (err != 0) @@ -139,6 +139,17 @@ int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm) rtc_subtract_offset(rtc, tm); +#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL + uie = rtc->uie_rtctimer.enabled || rtc->uie_irq_active; +#else + uie = rtc->uie_rtctimer.enabled; +#endif + if (uie) { + err = rtc_update_irq_enable(rtc, 0); + if (err) + return err; + } + err = mutex_lock_interruptible(&rtc->ops_lock); if (err) return err; @@ -162,6 +173,12 @@ int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm) /* A timer might have just expired */ schedule_work(&rtc->irqwork); + if (uie) { + err = rtc_update_irq_enable(rtc, 1); + if (err) + return err; + } + trace_rtc_set_time(rtc_tm_to_time64(tm), err); return err; } @@ -897,13 +914,18 @@ void rtc_timer_do_work(struct work_struct *work) struct timerqueue_node *next; ktime_t now; struct rtc_time tm; + int err; struct rtc_device *rtc = container_of(work, struct rtc_device, irqwork); mutex_lock(&rtc->ops_lock); again: - __rtc_read_time(rtc, &tm); + err = __rtc_read_time(rtc, &tm); + if (err) { + mutex_unlock(&rtc->ops_lock); + return; + } now = rtc_tm_to_ktime(tm); while ((next = timerqueue_getnext(&rtc->timerqueue))) { if (next->expires > now) diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c index 01ffc0ef8033f850b864bbc2b9b499b279426303..fbcf13bbbd8d11b4bbe876c2f5e40f698250f8e8 100644 --- a/drivers/rtc/rtc-88pm860x.c +++ b/drivers/rtc/rtc-88pm860x.c @@ -414,7 +414,7 @@ static int pm860x_rtc_remove(struct platform_device *pdev) struct pm860x_rtc_info *info = platform_get_drvdata(pdev); #ifdef VRTC_CALIBRATION - flush_scheduled_work(); + cancel_delayed_work_sync(&info->calib_work); /* disable measurement */ pm860x_set_bits(info->i2c, PM8607_MEAS_EN2, MEAS2_VRTC, 0); #endif /* VRTC_CALIBRATION */ diff --git a/drivers/rtc/rtc-armada38x.c b/drivers/rtc/rtc-armada38x.c index bde53c8ccee2cbbcac52120dde4cc217e9c10f93..b74338d6dde60fe554b35838a03d9dea4fe7bd82 100644 --- a/drivers/rtc/rtc-armada38x.c +++ b/drivers/rtc/rtc-armada38x.c @@ -514,7 +514,6 @@ MODULE_DEVICE_TABLE(of, armada38x_rtc_of_match_table); static __init int armada38x_rtc_probe(struct platform_device *pdev) { - const struct rtc_class_ops *ops; struct resource *res; struct armada38x_rtc *rtc; const struct of_device_id *match; @@ -551,6 +550,11 @@ static __init int armada38x_rtc_probe(struct platform_device *pdev) dev_err(&pdev->dev, "no irq\n"); return rtc->irq; } + + rtc->rtc_dev = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(rtc->rtc_dev)) + return PTR_ERR(rtc->rtc_dev); + if (devm_request_irq(&pdev->dev, rtc->irq, armada38x_rtc_alarm_irq, 0, pdev->name, rtc) < 0) { dev_warn(&pdev->dev, "Interrupt not available.\n"); @@ -560,28 +564,24 @@ static __init int armada38x_rtc_probe(struct platform_device *pdev) if (rtc->irq != -1) { device_init_wakeup(&pdev->dev, 1); - ops = &armada38x_rtc_ops; + rtc->rtc_dev->ops = &armada38x_rtc_ops; } else { /* * If there is no interrupt available then we can't * use the alarm */ - ops = &armada38x_rtc_ops_noirq; + rtc->rtc_dev->ops = &armada38x_rtc_ops_noirq; } rtc->data = (struct armada38x_rtc_data *)match->data; - /* Update RTC-MBUS bridge timing parameters */ rtc->data->update_mbus_timing(rtc); - rtc->rtc_dev = devm_rtc_device_register(&pdev->dev, pdev->name, - ops, THIS_MODULE); - if (IS_ERR(rtc->rtc_dev)) { - ret = PTR_ERR(rtc->rtc_dev); + ret = rtc_register_device(rtc->rtc_dev); + if (ret) dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret); - return ret; - } - return 0; + + return ret; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index cd3a2411bc2f57b9a4758e2480acef0ac6fe82fc..a5a19ff10535463d91d39d69ced1f13110ff139d 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -50,6 +50,7 @@ /* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */ #include +#ifdef CONFIG_ACPI /* * Use ACPI SCI to replace HPET interrupt for RTC Alarm event * @@ -61,6 +62,18 @@ static bool use_acpi_alarm; module_param(use_acpi_alarm, bool, 0444); +static inline int cmos_use_acpi_alarm(void) +{ + return use_acpi_alarm; +} +#else /* !CONFIG_ACPI */ + +static inline int cmos_use_acpi_alarm(void) +{ + return 0; +} +#endif + struct cmos_rtc { struct rtc_device *rtc; struct device *dev; @@ -167,9 +180,9 @@ static inline int hpet_unregister_irq_handler(irq_handler_t handler) #endif /* Don't use HPET for RTC Alarm event if ACPI Fixed event is used */ -static int use_hpet_alarm(void) +static inline int use_hpet_alarm(void) { - return is_hpet_enabled() && !use_acpi_alarm; + return is_hpet_enabled() && !cmos_use_acpi_alarm(); } /*----------------------------------------------------------------*/ @@ -244,6 +257,7 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t) struct cmos_rtc *cmos = dev_get_drvdata(dev); unsigned char rtc_control; + /* This not only a rtc_op, but also called directly */ if (!is_valid_irq(cmos->irq)) return -EIO; @@ -340,7 +354,7 @@ static void cmos_irq_enable(struct cmos_rtc *cmos, unsigned char mask) if (use_hpet_alarm()) hpet_set_rtc_irq_bit(mask); - if ((mask & RTC_AIE) && use_acpi_alarm) { + if ((mask & RTC_AIE) && cmos_use_acpi_alarm()) { if (cmos->wake_on) cmos->wake_on(cmos->dev); } @@ -358,7 +372,7 @@ static void cmos_irq_disable(struct cmos_rtc *cmos, unsigned char mask) if (use_hpet_alarm()) hpet_mask_rtc_irq_bit(mask); - if ((mask & RTC_AIE) && use_acpi_alarm) { + if ((mask & RTC_AIE) && cmos_use_acpi_alarm()) { if (cmos->wake_off) cmos->wake_off(cmos->dev); } @@ -439,6 +453,7 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t) unsigned char mon, mday, hrs, min, sec, rtc_control; int ret; + /* This not only a rtc_op, but also called directly */ if (!is_valid_irq(cmos->irq)) return -EIO; @@ -503,9 +518,6 @@ static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled) struct cmos_rtc *cmos = dev_get_drvdata(dev); unsigned long flags; - if (!is_valid_irq(cmos->irq)) - return -EINVAL; - spin_lock_irqsave(&rtc_lock, flags); if (enabled) @@ -566,6 +578,12 @@ static const struct rtc_class_ops cmos_rtc_ops = { .alarm_irq_enable = cmos_alarm_irq_enable, }; +static const struct rtc_class_ops cmos_rtc_ops_no_alarm = { + .read_time = cmos_read_time, + .set_time = cmos_set_time, + .proc = cmos_procfs, +}; + /*----------------------------------------------------------------*/ /* @@ -842,9 +860,12 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) dev_dbg(dev, "IRQ %d is already in use\n", rtc_irq); goto cleanup1; } + + cmos_rtc.rtc->ops = &cmos_rtc_ops; + } else { + cmos_rtc.rtc->ops = &cmos_rtc_ops_no_alarm; } - cmos_rtc.rtc->ops = &cmos_rtc_ops; cmos_rtc.rtc->nvram_old_abi = true; retval = rtc_register_device(cmos_rtc.rtc); if (retval) @@ -980,7 +1001,7 @@ static int cmos_suspend(struct device *dev) } spin_unlock_irq(&rtc_lock); - if ((tmp & RTC_AIE) && !use_acpi_alarm) { + if ((tmp & RTC_AIE) && !cmos_use_acpi_alarm()) { cmos->enabled_wake = 1; if (cmos->wake_on) cmos->wake_on(dev); @@ -1031,7 +1052,7 @@ static void cmos_check_wkalrm(struct device *dev) * ACPI RTC wake event is cleared after resume from STR, * ACK the rtc irq here */ - if (t_now >= cmos->alarm_expires && use_acpi_alarm) { + if (t_now >= cmos->alarm_expires && cmos_use_acpi_alarm()) { cmos_interrupt(0, (void *)cmos->rtc); return; } @@ -1053,7 +1074,7 @@ static int __maybe_unused cmos_resume(struct device *dev) struct cmos_rtc *cmos = dev_get_drvdata(dev); unsigned char tmp; - if (cmos->enabled_wake && !use_acpi_alarm) { + if (cmos->enabled_wake && !cmos_use_acpi_alarm()) { if (cmos->wake_off) cmos->wake_off(dev); else @@ -1132,7 +1153,7 @@ static u32 rtc_handler(void *context) * Or else, ACPI SCI is enabled during suspend/resume only, * update rtc irq in that case. */ - if (use_acpi_alarm) + if (cmos_use_acpi_alarm()) cmos_interrupt(0, (void *)cmos->rtc); else { /* Fix me: can we use cmos_interrupt() here as well? */ diff --git a/drivers/rtc/rtc-cros-ec.c b/drivers/rtc/rtc-cros-ec.c index e5444296075ee147e74c93d35d3eb98e6a2b0c48..4d6bf9304ceb35932dfadbc921b2e658e5e3d2ec 100644 --- a/drivers/rtc/rtc-cros-ec.c +++ b/drivers/rtc/rtc-cros-ec.c @@ -298,7 +298,7 @@ static int cros_ec_rtc_suspend(struct device *dev) struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev); if (device_may_wakeup(dev)) - enable_irq_wake(cros_ec_rtc->cros_ec->irq); + return enable_irq_wake(cros_ec_rtc->cros_ec->irq); return 0; } @@ -309,7 +309,7 @@ static int cros_ec_rtc_resume(struct device *dev) struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev); if (device_may_wakeup(dev)) - disable_irq_wake(cros_ec_rtc->cros_ec->irq); + return disable_irq_wake(cros_ec_rtc->cros_ec->irq); return 0; } diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c index b4e054c64bad9e54d23adb3a47da1008224906b3..69b54e5556c06234c5339431f3149bc923ebcf49 100644 --- a/drivers/rtc/rtc-da9063.c +++ b/drivers/rtc/rtc-da9063.c @@ -480,6 +480,13 @@ static int da9063_rtc_probe(struct platform_device *pdev) da9063_data_to_tm(data, &rtc->alarm_time, rtc); rtc->rtc_sync = false; + /* + * TODO: some models have alarms on a minute boundary but still support + * real hardware interrupts. Add this once the core supports it. + */ + if (config->rtc_data_start != RTC_SEC) + rtc->rtc_dev->uie_unsupported = 1; + irq_alarm = platform_get_irq_byname(pdev, "ALARM"); ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL, da9063_alarm_event, diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index 4b2b4627daebfb89b77121d499110d7f553a7394..71396b62dc52b230fe33e87ae4ac235cb8e16bde 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c @@ -1384,7 +1384,6 @@ static void ds1307_clks_register(struct ds1307 *ds1307) static const struct regmap_config regmap_config = { .reg_bits = 8, .val_bits = 8, - .max_register = 0x9, }; static int ds1307_probe(struct i2c_client *client, diff --git a/drivers/rtc/rtc-hid-sensor-time.c b/drivers/rtc/rtc-hid-sensor-time.c index 2751dba850c614f452d4f297e8bdbb461e9ae15d..3e1abb4554721c496f2c16532e9ed12590946f1b 100644 --- a/drivers/rtc/rtc-hid-sensor-time.c +++ b/drivers/rtc/rtc-hid-sensor-time.c @@ -213,7 +213,7 @@ static int hid_rtc_read_time(struct device *dev, struct rtc_time *tm) /* get a report with all values through requesting one value */ sensor_hub_input_attr_get_raw_value(time_state->common_attributes.hsdev, HID_USAGE_SENSOR_TIME, hid_time_addresses[0], - time_state->info[0].report_id, SENSOR_HUB_SYNC); + time_state->info[0].report_id, SENSOR_HUB_SYNC, false); /* wait for all values (event) */ ret = wait_for_completion_killable_timeout( &time_state->comp_last_time, HZ*6); diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c index ea18a8f4bce063a91587d4424c8d615788af5dfd..033f65aef5788f225fd6dffd4fa65ebe8b0f403a 100644 --- a/drivers/rtc/rtc-isl1208.c +++ b/drivers/rtc/rtc-isl1208.c @@ -518,7 +518,7 @@ static ssize_t timestamp0_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct i2c_client *client = dev_get_drvdata(dev); + struct i2c_client *client = to_i2c_client(dev->parent); int sr; sr = isl1208_i2c_get_sr(client); @@ -540,7 +540,7 @@ static ssize_t timestamp0_store(struct device *dev, static ssize_t timestamp0_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct i2c_client *client = dev_get_drvdata(dev); + struct i2c_client *client = to_i2c_client(dev->parent); u8 regs[ISL1219_EVT_SECTION_LEN] = { 0, }; struct rtc_time tm; int sr; @@ -650,7 +650,7 @@ static ssize_t isl1208_sysfs_show_atrim(struct device *dev, struct device_attribute *attr, char *buf) { - int atr = isl1208_i2c_get_atr(to_i2c_client(dev)); + int atr = isl1208_i2c_get_atr(to_i2c_client(dev->parent)); if (atr < 0) return atr; @@ -663,7 +663,7 @@ static ssize_t isl1208_sysfs_show_dtrim(struct device *dev, struct device_attribute *attr, char *buf) { - int dtr = isl1208_i2c_get_dtr(to_i2c_client(dev)); + int dtr = isl1208_i2c_get_dtr(to_i2c_client(dev->parent)); if (dtr < 0) return dtr; @@ -676,7 +676,7 @@ static ssize_t isl1208_sysfs_show_usr(struct device *dev, struct device_attribute *attr, char *buf) { - int usr = isl1208_i2c_get_usr(to_i2c_client(dev)); + int usr = isl1208_i2c_get_usr(to_i2c_client(dev->parent)); if (usr < 0) return usr; @@ -701,7 +701,10 @@ isl1208_sysfs_store_usr(struct device *dev, if (usr < 0 || usr > 0xffff) return -EINVAL; - return isl1208_i2c_set_usr(to_i2c_client(dev), usr) ? -EIO : count; + if (isl1208_i2c_set_usr(to_i2c_client(dev->parent), usr)) + return -EIO; + + return count; } static DEVICE_ATTR(usr, S_IRUGO | S_IWUSR, isl1208_sysfs_show_usr, @@ -765,7 +768,6 @@ isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id) rtc->ops = &isl1208_rtc_ops; i2c_set_clientdata(client, rtc); - dev_set_drvdata(&rtc->dev, client); rc = isl1208_i2c_get_sr(client); if (rc < 0) { @@ -804,7 +806,7 @@ isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id) evdet_irq = of_irq_get_byname(np, "evdet"); } - rc = sysfs_create_group(&client->dev.kobj, &isl1208_rtc_sysfs_files); + rc = rtc_add_group(rtc, &isl1208_rtc_sysfs_files); if (rc) return rc; @@ -821,14 +823,6 @@ isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id) return rtc_register_device(rtc); } -static int -isl1208_remove(struct i2c_client *client) -{ - sysfs_remove_group(&client->dev.kobj, &isl1208_rtc_sysfs_files); - - return 0; -} - static const struct i2c_device_id isl1208_id[] = { { "isl1208", TYPE_ISL1208 }, { "isl1218", TYPE_ISL1218 }, @@ -851,7 +845,6 @@ static struct i2c_driver isl1208_driver = { .of_match_table = of_match_ptr(isl1208_of_match), }, .probe = isl1208_probe, - .remove = isl1208_remove, .id_table = isl1208_id, }; diff --git a/drivers/rtc/rtc-lib.c b/drivers/rtc/rtc-lib.c index 4a3c0f3aab1490feef3111c667d21dcfee4e5e62..48227002bbfd9a09e29b0b22dec04e18f97e95ee 100644 --- a/drivers/rtc/rtc-lib.c +++ b/drivers/rtc/rtc-lib.c @@ -100,7 +100,8 @@ int rtc_valid_tm(struct rtc_time *tm) if (tm->tm_year < 70 || ((unsigned)tm->tm_mon) >= 12 || tm->tm_mday < 1 - || tm->tm_mday > rtc_month_days(tm->tm_mon, tm->tm_year + 1900) + || tm->tm_year > (INT_MAX - 1900) + || tm->tm_mday > rtc_month_days(tm->tm_mon, ((unsigned)tm->tm_year + 1900)) || ((unsigned)tm->tm_hour) >= 24 || ((unsigned)tm->tm_min) >= 60 || ((unsigned)tm->tm_sec) >= 60) @@ -116,8 +117,8 @@ EXPORT_SYMBOL(rtc_valid_tm); */ time64_t rtc_tm_to_time64(struct rtc_time *tm) { - return mktime64(tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday, - tm->tm_hour, tm->tm_min, tm->tm_sec); + return mktime64(((unsigned)tm->tm_year + 1900), tm->tm_mon + 1, + tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec); } EXPORT_SYMBOL(rtc_tm_to_time64); diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index ad03e2f12f5d3abacb16174e9dbffc2ad34d8fc4..5808a1e4c2e9f6ae4b5a4f6ac82cb0bd20bbf4b0 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c @@ -393,7 +393,7 @@ static int m41t80_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) alrm->time.tm_min = bcd2bin(alarmvals[3] & 0x7f); alrm->time.tm_hour = bcd2bin(alarmvals[2] & 0x3f); alrm->time.tm_mday = bcd2bin(alarmvals[1] & 0x3f); - alrm->time.tm_mon = bcd2bin(alarmvals[0] & 0x3f); + alrm->time.tm_mon = bcd2bin(alarmvals[0] & 0x3f) - 1; alrm->enabled = !!(alarmvals[0] & M41T80_ALMON_AFE); alrm->pending = (flags & M41T80_FLAGS_AF) && alrm->enabled; diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c index 8a60900d6b8b53af51d348355c065590bc6518a1..4aff349ae301a476ab579b753fa388f83b4a45fd 100644 --- a/drivers/rtc/rtc-max77686.c +++ b/drivers/rtc/rtc-max77686.c @@ -360,7 +360,7 @@ static int max77686_rtc_read_time(struct device *dev, struct rtc_time *tm) out: mutex_unlock(&info->lock); - return 0; + return ret; } static int max77686_rtc_set_time(struct device *dev, struct rtc_time *tm) diff --git a/drivers/rtc/rtc-max8997.c b/drivers/rtc/rtc-max8997.c index 08c661a332ec0e73b47e08522feb39ad1f8ff5ae..20e50d9fdf88230165a3029e49281afa5e065b51 100644 --- a/drivers/rtc/rtc-max8997.c +++ b/drivers/rtc/rtc-max8997.c @@ -215,7 +215,7 @@ static int max8997_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) out: mutex_unlock(&info->lock); - return 0; + return ret; } static int max8997_rtc_stop_alarm(struct max8997_rtc_info *info) diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c index 2f1772a358ca50342d4375de3e69f2fba451ceaf..92869bf95abb0b6316bfb4495ae9dde16f32132b 100644 --- a/drivers/rtc/rtc-mc146818-lib.c +++ b/drivers/rtc/rtc-mc146818-lib.c @@ -7,6 +7,23 @@ #include #endif +#ifdef CONFIG_X86 +static inline bool follow_mc146818_divider_reset(void) +{ + if ((boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) && + (boot_cpu_data.x86 <= 7 && boot_cpu_data.x86_model <= 59)) { + return false; + } + return true; +} +#else +static inline bool follow_mc146818_divider_reset(void) +{ + return true; +} +#endif + /* * Returns true if a clock update is in progress */ @@ -98,6 +115,17 @@ unsigned int mc146818_get_time(struct rtc_time *time) } EXPORT_SYMBOL_GPL(mc146818_get_time); +/* AMD systems don't allow access to AltCentury with DV1 */ +static bool apply_amd_register_a_behavior(void) +{ +#ifdef CONFIG_X86 + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + return true; +#endif + return false; +} + /* Set the current date and time in the real time clock. */ int mc146818_set_time(struct rtc_time *time) { @@ -170,8 +198,13 @@ int mc146818_set_time(struct rtc_time *time) save_control = CMOS_READ(RTC_CONTROL); CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); - save_freq_select = CMOS_READ(RTC_FREQ_SELECT); - CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); + if (follow_mc146818_divider_reset()) { + save_freq_select = CMOS_READ(RTC_FREQ_SELECT); + if (apply_amd_register_a_behavior()) + CMOS_WRITE((save_freq_select & ~RTC_AMD_BANK_SELECT), RTC_FREQ_SELECT); + else + CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); + } #ifdef CONFIG_MACH_DECSTATION CMOS_WRITE(real_yrs, RTC_DEC_YEAR); @@ -189,7 +222,8 @@ int mc146818_set_time(struct rtc_time *time) #endif CMOS_WRITE(save_control, RTC_CONTROL); - CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); + if (follow_mc146818_divider_reset()) + CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); spin_unlock_irqrestore(&rtc_lock, flags); diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c index 385f8303bb412becfbe497ff24d2ebbe8ccfe09a..e9a25ec4d434f9c30c9b6a46c272547374d8a026 100644 --- a/drivers/rtc/rtc-mt6397.c +++ b/drivers/rtc/rtc-mt6397.c @@ -332,6 +332,10 @@ static int mtk_rtc_probe(struct platform_device *pdev) platform_set_drvdata(pdev, rtc); + rtc->rtc_dev = devm_rtc_allocate_device(rtc->dev); + if (IS_ERR(rtc->rtc_dev)) + return PTR_ERR(rtc->rtc_dev); + ret = request_threaded_irq(rtc->irq, NULL, mtk_rtc_irq_handler_thread, IRQF_ONESHOT | IRQF_TRIGGER_HIGH, @@ -344,11 +348,11 @@ static int mtk_rtc_probe(struct platform_device *pdev) device_init_wakeup(&pdev->dev, 1); - rtc->rtc_dev = rtc_device_register("mt6397-rtc", &pdev->dev, - &mtk_rtc_ops, THIS_MODULE); - if (IS_ERR(rtc->rtc_dev)) { + rtc->rtc_dev->ops = &mtk_rtc_ops; + + ret = rtc_register_device(rtc->rtc_dev); + if (ret) { dev_err(&pdev->dev, "register rtc device failed\n"); - ret = PTR_ERR(rtc->rtc_dev); goto out_free_irq; } @@ -365,7 +369,6 @@ static int mtk_rtc_remove(struct platform_device *pdev) { struct mt6397_rtc *rtc = platform_get_drvdata(pdev); - rtc_device_unregister(rtc->rtc_dev); free_irq(rtc->irq, rtc->rtc_dev); irq_dispose_mapping(rtc->irq); diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c index 9f99a0966550b5e77672e83cc707eda26b22ede5..7cb786d76e3c1da81bf5055bf01375546a74e9ff 100644 --- a/drivers/rtc/rtc-pcf2127.c +++ b/drivers/rtc/rtc-pcf2127.c @@ -303,6 +303,9 @@ static int pcf2127_i2c_gather_write(void *context, memcpy(buf + 1, val, val_size); ret = i2c_master_send(client, buf, val_size + 1); + + kfree(buf); + if (ret != val_size + 1) return ret < 0 ? ret : -EIO; diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c index 453615f8ac9a012ea0022227f947f5778c477fdf..2e03021f15d136b97c73f461d63c210566c6765b 100644 --- a/drivers/rtc/rtc-pcf8523.c +++ b/drivers/rtc/rtc-pcf8523.c @@ -85,23 +85,46 @@ static int pcf8523_write(struct i2c_client *client, u8 reg, u8 value) return 0; } -static int pcf8523_select_capacitance(struct i2c_client *client, bool high) +static int pcf8523_voltage_low(struct i2c_client *client) { u8 value; int err; + err = pcf8523_read(client, REG_CONTROL3, &value); + if (err < 0) + return err; + + return !!(value & REG_CONTROL3_BLF); +} + +static int pcf8523_load_capacitance(struct i2c_client *client) +{ + u32 load; + u8 value; + int err; + err = pcf8523_read(client, REG_CONTROL1, &value); if (err < 0) return err; - if (!high) - value &= ~REG_CONTROL1_CAP_SEL; - else + load = 12500; + of_property_read_u32(client->dev.of_node, "quartz-load-femtofarads", + &load); + + switch (load) { + default: + dev_warn(&client->dev, "Unknown quartz-load-femtofarads value: %d. Assuming 12500", + load); + /* fall through */ + case 12500: value |= REG_CONTROL1_CAP_SEL; + break; + case 7000: + value &= ~REG_CONTROL1_CAP_SEL; + break; + } err = pcf8523_write(client, REG_CONTROL1, value); - if (err < 0) - return err; return err; } @@ -167,6 +190,14 @@ static int pcf8523_rtc_read_time(struct device *dev, struct rtc_time *tm) struct i2c_msg msgs[2]; int err; + err = pcf8523_voltage_low(client); + if (err < 0) { + return err; + } else if (err > 0) { + dev_err(dev, "low voltage detected, time is unreliable\n"); + return -EINVAL; + } + msgs[0].addr = client->addr; msgs[0].flags = 0; msgs[0].len = 1; @@ -251,17 +282,13 @@ static int pcf8523_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) { struct i2c_client *client = to_i2c_client(dev); - u8 value; - int ret = 0, err; + int ret; switch (cmd) { case RTC_VL_READ: - err = pcf8523_read(client, REG_CONTROL3, &value); - if (err < 0) - return err; - - if (value & REG_CONTROL3_BLF) - ret = 1; + ret = pcf8523_voltage_low(client); + if (ret < 0) + return ret; if (copy_to_user((void __user *)arg, &ret, sizeof(int))) return -EFAULT; @@ -331,9 +358,10 @@ static int pcf8523_probe(struct i2c_client *client, if (!pcf) return -ENOMEM; - err = pcf8523_select_capacitance(client, true); + err = pcf8523_load_capacitance(client); if (err < 0) - return err; + dev_warn(&client->dev, "failed to set xtal load capacitance: %d", + err); err = pcf8523_set_pm(client, 0); if (err < 0) diff --git a/drivers/rtc/rtc-pcf85363.c b/drivers/rtc/rtc-pcf85363.c index c04a1edcd571630f49ccfd26b558e2aba06516f9..c3702684b342678d00f86831503720de5c1ab680 100644 --- a/drivers/rtc/rtc-pcf85363.c +++ b/drivers/rtc/rtc-pcf85363.c @@ -169,7 +169,12 @@ static int pcf85363_rtc_set_time(struct device *dev, struct rtc_time *tm) buf[DT_YEARS] = bin2bcd(tm->tm_year % 100); ret = regmap_bulk_write(pcf85363->regmap, CTRL_STOP_EN, - tmp, sizeof(tmp)); + tmp, 2); + if (ret) + return ret; + + ret = regmap_bulk_write(pcf85363->regmap, DT_100THS, + buf, sizeof(tmp) - 2); if (ret) return ret; diff --git a/drivers/rtc/rtc-phytium.c b/drivers/rtc/rtc-phytium.c new file mode 100644 index 0000000000000000000000000000000000000000..6c9e06fbd8d94cf8a2c64842134d6bb889f5402d --- /dev/null +++ b/drivers/rtc/rtc-phytium.c @@ -0,0 +1,339 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium Real Time Clock Driver + * + * Copyright (c) 2020 Phytium Corporation. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define RTC_CMR 0x04 +#define RTC_AES_SEL 0x08 +#define RTC_AES_SEL_COUNTER 0x100 +#define RTC_CCR 0x0C +#define RTC_CCR_IE BIT(0) +#define RTC_CCR_MASK BIT(1) +#define RTC_CCR_EN BIT(2) +#define RTC_CCR_WEN BIT(3) +#define RTC_STAT 0x10 +#define RTC_STAT_BIT BIT(0) +#define RTC_RSTAT 0x14 +#define RTC_EOI 0x18 +#define RTC_VER 0x1C +#define RTC_CDR_LOW 0x20 +#define RTC_CCVR 0x24 +#define RTC_CLR_LOW 0x28 +#define RTC_CLR 0x2c +#define RTC_COUNTER_HB_OFFSET 15 +#define RTC_COUNTER_LB_MASK 0x7fff + +spinlock_t spinlock_phytium_rtc; + +struct phytium_rtc_dev { + struct rtc_device *rtc; + struct device *dev; + unsigned long alarm_time; + void __iomem *csr_base; + struct clk *clk; + unsigned int irq_wake; + unsigned int irq_enabled; +}; + +static int phytium_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + struct phytium_rtc_dev *pdata = dev_get_drvdata(dev); + + unsigned long counter = 0; + unsigned long tmp = 0; + + spin_lock(&spinlock_phytium_rtc); + writel(RTC_AES_SEL_COUNTER, pdata->csr_base + RTC_AES_SEL); + counter = readl(pdata->csr_base + RTC_CCVR); + tmp = readl(pdata->csr_base + RTC_CDR_LOW); + + dev_info(dev, "%s_%d : counter : 0x%lx\n", + __func__, __LINE__, counter); + + spin_unlock(&spinlock_phytium_rtc); + + rtc_time_to_tm(counter, tm); + return rtc_valid_tm(tm); +} + +static int phytium_rtc_set_mmss(struct device *dev, unsigned long secs) +{ + struct phytium_rtc_dev *pdata = dev_get_drvdata(dev); + unsigned long counter = 0; + unsigned long tmp = 0; + + spin_lock(&spinlock_phytium_rtc); + + writel(RTC_AES_SEL_COUNTER, pdata->csr_base + RTC_AES_SEL); + writel(0x00000000, pdata->csr_base + RTC_CLR_LOW); + writel((u32)secs, pdata->csr_base + RTC_CLR); + writel(RTC_AES_SEL_COUNTER, pdata->csr_base + RTC_AES_SEL); + counter = readl(pdata->csr_base + RTC_CLR); + tmp = readl(pdata->csr_base + RTC_CLR_LOW); + + spin_unlock(&spinlock_phytium_rtc); + + return 0; +} + +static int phytium_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct phytium_rtc_dev *pdata = dev_get_drvdata(dev); + + rtc_time_to_tm(pdata->alarm_time, &alrm->time); + alrm->enabled = readl(pdata->csr_base + RTC_CCR) & RTC_CCR_IE; + + return 0; +} + +static int phytium_rtc_alarm_irq_enable(struct device *dev, u32 enabled) +{ + struct phytium_rtc_dev *pdata = dev_get_drvdata(dev); + u32 ccr; + + ccr = readl(pdata->csr_base + RTC_CCR); + if (enabled) { + ccr &= ~RTC_CCR_MASK; + ccr |= RTC_CCR_IE; + } else { + ccr &= ~RTC_CCR_IE; + ccr |= RTC_CCR_MASK; + } + writel(ccr, pdata->csr_base + RTC_CCR); + + return 0; +} + +static int phytium_rtc_alarm_irq_enabled(struct device *dev) +{ + struct phytium_rtc_dev *pdata = dev_get_drvdata(dev); + + return readl(pdata->csr_base + RTC_CCR) & RTC_CCR_IE ? 1 : 0; +} + +static int phytium_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct phytium_rtc_dev *pdata = dev_get_drvdata(dev); + unsigned long rtc_time; + unsigned long alarm_time; + + rtc_time = readl(pdata->csr_base + RTC_CCVR); + rtc_tm_to_time(&alrm->time, &alarm_time); + + pdata->alarm_time = alarm_time; + writel((u32) pdata->alarm_time, pdata->csr_base + RTC_CMR); + + phytium_rtc_alarm_irq_enable(dev, alrm->enabled); + + return 0; +} + +static const struct rtc_class_ops phytium_rtc_ops = { + .read_time = phytium_rtc_read_time, + .set_mmss = phytium_rtc_set_mmss, + .read_alarm = phytium_rtc_read_alarm, + .set_alarm = phytium_rtc_set_alarm, + .alarm_irq_enable = phytium_rtc_alarm_irq_enable, +}; + +static irqreturn_t phytium_rtc_interrupt(int irq, void *id) +{ + struct phytium_rtc_dev *pdata = (struct phytium_rtc_dev *) id; + + /* Check if interrupt asserted */ + if (!(readl(pdata->csr_base + RTC_STAT) & RTC_STAT_BIT)) + return IRQ_NONE; + + /* Clear interrupt */ + readl(pdata->csr_base + RTC_EOI); + + rtc_update_irq(pdata->rtc, 1, RTC_IRQF | RTC_AF); + + return IRQ_HANDLED; +} + +static int phytium_rtc_probe(struct platform_device *pdev) +{ + struct phytium_rtc_dev *pdata; + struct resource *res; + int ret; + int irq; + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + platform_set_drvdata(pdev, pdata); + pdata->dev = &pdev->dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pdata->csr_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pdata->csr_base)) + return PTR_ERR(pdata->csr_base); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "No IRQ resource\n"); + return irq; + } + ret = devm_request_irq(&pdev->dev, irq, phytium_rtc_interrupt, 0, + dev_name(&pdev->dev), pdata); + if (ret) { + dev_err(&pdev->dev, "Could not request IRQ\n"); + return ret; + } + +#ifndef CONFIG_ACPI + pdata->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(pdata->clk)) { + dev_err(&pdev->dev, "Couldn't get the clock for RTC\n"); + return -ENODEV; + } + + ret = clk_prepare_enable(pdata->clk); + if (ret) + return ret; +#endif + + spin_lock_init(&spinlock_phytium_rtc); + + /* Turn on the clock and the crystal */ + writel(RTC_CCR_EN, pdata->csr_base + RTC_CCR); + + ret = device_init_wakeup(&pdev->dev, 1); + if (ret) { + clk_disable_unprepare(pdata->clk); + return ret; + } + + pdata->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, + &phytium_rtc_ops, THIS_MODULE); + if (IS_ERR(pdata->rtc)) { + clk_disable_unprepare(pdata->clk); + return PTR_ERR(pdata->rtc); + } + + /* HW does not support update faster than 1 seconds */ + pdata->rtc->uie_unsupported = 1; + + return 0; +} + +static int phytium_rtc_remove(struct platform_device *pdev) +{ + struct phytium_rtc_dev *pdata = platform_get_drvdata(pdev); + + phytium_rtc_alarm_irq_enable(&pdev->dev, 0); + device_init_wakeup(&pdev->dev, 0); + clk_disable_unprepare(pdata->clk); + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int phytium_rtc_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct phytium_rtc_dev *pdata = platform_get_drvdata(pdev); + int irq; + + /* + * If this RTC alarm will be used for waking the system up, + * don't disable it of course. Else we just disable the alarm + * and await suspension. + */ + irq = platform_get_irq(pdev, 0); + if (device_may_wakeup(&pdev->dev)) { + if (!enable_irq_wake(irq)) + pdata->irq_wake = 1; + } else { + pdata->irq_enabled = phytium_rtc_alarm_irq_enabled(dev); + phytium_rtc_alarm_irq_enable(dev, 0); + clk_disable_unprepare(pdata->clk); + } + + return 0; +} + +static int phytium_rtc_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct phytium_rtc_dev *pdata = platform_get_drvdata(pdev); + int irq; + int rc; + + irq = platform_get_irq(pdev, 0); + if (device_may_wakeup(&pdev->dev)) { + if (pdata->irq_wake) { + disable_irq_wake(irq); + pdata->irq_wake = 0; + } + } else { + rc = clk_prepare_enable(pdata->clk); + if (rc) { + dev_err(dev, "Unable to enable clock error %d\n", rc); + return rc; + } + phytium_rtc_alarm_irq_enable(dev, pdata->irq_enabled); + } + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(phytium_rtc_pm_ops, +phytium_rtc_suspend, +phytium_rtc_resume); + +#ifdef CONFIG_OF +static const struct of_device_id phytium_rtc_of_match[] = { + { .compatible = "phytium,rtc" }, + { } +}; +MODULE_DEVICE_TABLE(of, phytium_rtc_of_match); +#endif + +#ifdef CONFIG_ACPI +static const struct acpi_device_id phytium_rtc_acpi_match[] = { + { "PHYT0002", 0 }, + { } +}; +#endif + +static struct platform_driver phytium_rtc_driver = { + .probe = phytium_rtc_probe, + .remove = phytium_rtc_remove, + .driver = { + .name = "phytium-rtc", + .pm = &phytium_rtc_pm_ops, + .of_match_table = of_match_ptr(phytium_rtc_of_match), + .acpi_match_table = ACPI_PTR(phytium_rtc_acpi_match), + }, +}; + +module_platform_driver(phytium_rtc_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Phytium RTC driver"); +MODULE_AUTHOR("Chen Baozi "); +MODULE_AUTHOR("Wang Nan "); diff --git a/drivers/rtc/rtc-pl030.c b/drivers/rtc/rtc-pl030.c index f85a1a93e669f9e14d4f68ab1a7040662eb60804..343bb6ed17839c91e6b003547dceb7b633ef14a0 100644 --- a/drivers/rtc/rtc-pl030.c +++ b/drivers/rtc/rtc-pl030.c @@ -112,6 +112,13 @@ static int pl030_probe(struct amba_device *dev, const struct amba_id *id) goto err_rtc; } + rtc->rtc = devm_rtc_allocate_device(&dev->dev); + if (IS_ERR(rtc->rtc)) { + ret = PTR_ERR(rtc->rtc); + goto err_rtc; + } + + rtc->rtc->ops = &pl030_ops; rtc->base = ioremap(dev->res.start, resource_size(&dev->res)); if (!rtc->base) { ret = -ENOMEM; @@ -128,12 +135,9 @@ static int pl030_probe(struct amba_device *dev, const struct amba_id *id) if (ret) goto err_irq; - rtc->rtc = rtc_device_register("pl030", &dev->dev, &pl030_ops, - THIS_MODULE); - if (IS_ERR(rtc->rtc)) { - ret = PTR_ERR(rtc->rtc); + ret = rtc_register_device(rtc->rtc); + if (ret) goto err_reg; - } return 0; @@ -154,7 +158,6 @@ static int pl030_remove(struct amba_device *dev) writel(0, rtc->base + RTC_CR); free_irq(dev->irq[0], rtc); - rtc_device_unregister(rtc->rtc); iounmap(rtc->base); amba_release_regions(dev); diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c index 29fc3d210392387ec27814500fcc215a3c838682..17ccef5d5db1a2138d80f66be8acfbda2a327ba9 100644 --- a/drivers/rtc/rtc-rv8803.c +++ b/drivers/rtc/rtc-rv8803.c @@ -623,7 +623,7 @@ MODULE_DEVICE_TABLE(i2c, rv8803_id); static const struct of_device_id rv8803_of_match[] = { { .compatible = "microcrystal,rv8803", - .data = (void *)rx_8900 + .data = (void *)rv_8803 }, { .compatible = "epson,rx8900", diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c index 77feb603cd4c0b363cf57abad36fb03b30f61cb7..3c64dbb08109acd6951d2b9fb4e6c85252e537e9 100644 --- a/drivers/rtc/rtc-s35390a.c +++ b/drivers/rtc/rtc-s35390a.c @@ -108,7 +108,7 @@ static int s35390a_get_reg(struct s35390a *s35390a, int reg, char *buf, int len) static int s35390a_init(struct s35390a *s35390a) { - char buf; + u8 buf; int ret; unsigned initcount = 0; diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index 75c8c5033e0877bc313527491df9a2e9edf21d3c..58e03ac3578b7310fbc3badacc2fff2b55da861e 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c @@ -327,7 +327,6 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) struct rtc_time *tm = &alrm->time; unsigned int alrm_en; int ret; - int year = tm->tm_year - 100; dev_dbg(dev, "s3c_rtc_setalarm: %d, %04d.%02d.%02d %02d:%02d:%02d\n", alrm->enabled, @@ -356,11 +355,6 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) writeb(bin2bcd(tm->tm_hour), info->base + S3C2410_ALMHOUR); } - if (year < 100 && year >= 0) { - alrm_en |= S3C2410_RTCALM_YEAREN; - writeb(bin2bcd(year), info->base + S3C2410_ALMYEAR); - } - if (tm->tm_mon < 12 && tm->tm_mon >= 0) { alrm_en |= S3C2410_RTCALM_MONEN; writeb(bin2bcd(tm->tm_mon + 1), info->base + S3C2410_ALMMON); diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c index 51ba414798a83f0a2dfd1b6610fbf5dbaf4375d4..3d7414e5ed35fe4630f0608954eee5a0c992a512 100644 --- a/drivers/rtc/rtc-sh.c +++ b/drivers/rtc/rtc-sh.c @@ -377,7 +377,7 @@ static int sh_rtc_set_time(struct device *dev, struct rtc_time *tm) static inline int sh_rtc_read_alarm_value(struct sh_rtc *rtc, int reg_off) { unsigned int byte; - int value = 0xff; /* return 0xff for ignored values */ + int value = -1; /* return -1 for ignored values */ byte = readb(rtc->regbase + reg_off); if (byte & AR_ENB) { diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c index b2483a749ac45fa58c4f9a7dd507ca19775222ca..3cf011e1205301d6123f1c237809237951369f66 100644 --- a/drivers/rtc/rtc-snvs.c +++ b/drivers/rtc/rtc-snvs.c @@ -273,6 +273,10 @@ static int snvs_rtc_probe(struct platform_device *pdev) if (!data) return -ENOMEM; + data->rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(data->rtc)) + return PTR_ERR(data->rtc); + data->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "regmap"); if (IS_ERR(data->regmap)) { @@ -335,10 +339,9 @@ static int snvs_rtc_probe(struct platform_device *pdev) goto error_rtc_device_register; } - data->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, - &snvs_rtc_ops, THIS_MODULE); - if (IS_ERR(data->rtc)) { - ret = PTR_ERR(data->rtc); + data->rtc->ops = &snvs_rtc_ops; + ret = rtc_register_device(data->rtc); + if (ret) { dev_err(&pdev->dev, "failed to register rtc: %d\n", ret); goto error_rtc_device_register; } diff --git a/drivers/rtc/rtc-stm32.c b/drivers/rtc/rtc-stm32.c index c5908cfea2340ff3233322e1786aac63b74ec8b8..8e6c9b3bcc29a4d2fec1c52cd2118f4a35cee00e 100644 --- a/drivers/rtc/rtc-stm32.c +++ b/drivers/rtc/rtc-stm32.c @@ -788,11 +788,14 @@ static int stm32_rtc_probe(struct platform_device *pdev) ret = device_init_wakeup(&pdev->dev, true); if (rtc->data->has_wakeirq) { rtc->wakeirq_alarm = platform_get_irq(pdev, 1); - if (rtc->wakeirq_alarm <= 0) - ret = rtc->wakeirq_alarm; - else + if (rtc->wakeirq_alarm > 0) { ret = dev_pm_set_dedicated_wake_irq(&pdev->dev, rtc->wakeirq_alarm); + } else { + ret = rtc->wakeirq_alarm; + if (rtc->wakeirq_alarm == -EPROBE_DEFER) + goto err; + } } if (ret) dev_warn(&pdev->dev, "alarm can't wake up the system: %d", ret); diff --git a/drivers/rtc/rtc-sysfs.c b/drivers/rtc/rtc-sysfs.c index f1ff30ade5343b07ff54d05bcd82fddbc3bea0d6..9746c32eee2eb64c186e63a30f5f57ee66d1dc4c 100644 --- a/drivers/rtc/rtc-sysfs.c +++ b/drivers/rtc/rtc-sysfs.c @@ -338,8 +338,8 @@ int rtc_add_groups(struct rtc_device *rtc, const struct attribute_group **grps) new_cnt = old_cnt + add_cnt + 1; groups = devm_kcalloc(&rtc->dev, new_cnt, sizeof(*groups), GFP_KERNEL); - if (IS_ERR_OR_NULL(groups)) - return PTR_ERR(groups); + if (!groups) + return -ENOMEM; memcpy(groups, rtc->dev.groups, old_cnt * sizeof(*groups)); memcpy(groups + old_cnt, grps, add_cnt * sizeof(*groups)); groups[old_cnt + add_cnt] = NULL; diff --git a/drivers/rtc/rtc-tx4939.c b/drivers/rtc/rtc-tx4939.c index 08dbefc79520e57093cc3fefc9629e2f4200b95a..61c110b2045f836dbfffb54563e8d7057fae3cc1 100644 --- a/drivers/rtc/rtc-tx4939.c +++ b/drivers/rtc/rtc-tx4939.c @@ -253,9 +253,7 @@ static int __init tx4939_rtc_probe(struct platform_device *pdev) struct resource *res; int irq, ret; struct nvmem_config nvmem_cfg = { - .name = "rv8803_nvram", - .word_size = 4, - .stride = 4, + .name = "tx4939_nvram", .size = TX4939_RTC_REG_RAMSIZE, .reg_read = tx4939_nvram_read, .reg_write = tx4939_nvram_write, diff --git a/drivers/rtc/rtc-xgene.c b/drivers/rtc/rtc-xgene.c index 153820876a820033cfebaf9cee45c5ae0162f8a6..2f741f455c30a9909706e25fe2a76d424cf66fb3 100644 --- a/drivers/rtc/rtc-xgene.c +++ b/drivers/rtc/rtc-xgene.c @@ -168,6 +168,10 @@ static int xgene_rtc_probe(struct platform_device *pdev) if (IS_ERR(pdata->csr_base)) return PTR_ERR(pdata->csr_base); + pdata->rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(pdata->rtc)) + return PTR_ERR(pdata->rtc); + irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "No IRQ resource\n"); @@ -198,15 +202,15 @@ static int xgene_rtc_probe(struct platform_device *pdev) return ret; } - pdata->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, - &xgene_rtc_ops, THIS_MODULE); - if (IS_ERR(pdata->rtc)) { - clk_disable_unprepare(pdata->clk); - return PTR_ERR(pdata->rtc); - } - /* HW does not support update faster than 1 seconds */ pdata->rtc->uie_unsupported = 1; + pdata->rtc->ops = &xgene_rtc_ops; + + ret = rtc_register_device(pdata->rtc); + if (ret) { + clk_disable_unprepare(pdata->clk); + return ret; + } return 0; } diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index a23e7d394a0ad1f1a74a241f1676accb9a57901a..180c24cb4538943f5f48f60862d533317c7c95ae 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -725,18 +725,20 @@ static void dasd_profile_start(struct dasd_block *block, * we count each request only once. */ device = cqr->startdev; - if (device->profile.data) { - counter = 1; /* request is not yet queued on the start device */ - list_for_each(l, &device->ccw_queue) - if (++counter >= 31) - break; - } + if (!device->profile.data) + return; + + spin_lock(get_ccwdev_lock(device->cdev)); + counter = 1; /* request is not yet queued on the start device */ + list_for_each(l, &device->ccw_queue) + if (++counter >= 31) + break; + spin_unlock(get_ccwdev_lock(device->cdev)); + spin_lock(&device->profile.lock); - if (device->profile.data) { - device->profile.data->dasd_io_nr_req[counter]++; - if (rq_data_dir(req) == READ) - device->profile.data->dasd_read_nr_req[counter]++; - } + device->profile.data->dasd_io_nr_req[counter]++; + if (rq_data_dir(req) == READ) + device->profile.data->dasd_read_nr_req[counter]++; spin_unlock(&device->profile.lock); } @@ -3501,12 +3503,11 @@ int dasd_generic_set_online(struct ccw_device *cdev, dasd_delete_device(device); return -EINVAL; } + device->base_discipline = base_discipline; if (!try_module_get(discipline->owner)) { - module_put(base_discipline->owner); dasd_delete_device(device); return -EINVAL; } - device->base_discipline = base_discipline; device->discipline = discipline; /* check_device will allocate block device if necessary */ @@ -3514,8 +3515,6 @@ int dasd_generic_set_online(struct ccw_device *cdev, if (rc) { pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n", dev_name(&cdev->dev), discipline->name, rc); - module_put(discipline->owner); - module_put(base_discipline->owner); dasd_delete_device(device); return rc; } diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c index b9ce93e9df89295eb72132fcfc81d0257aaa1723..88530a4952a9654b52fefab30445dd55f071d5ae 100644 --- a/drivers/s390/block/dasd_alias.c +++ b/drivers/s390/block/dasd_alias.c @@ -383,6 +383,20 @@ suborder_not_supported(struct dasd_ccw_req *cqr) char msg_format; char msg_no; + /* + * intrc values ENODEV, ENOLINK and EPERM + * will be optained from sleep_on to indicate that no + * IO operation can be started + */ + if (cqr->intrc == -ENODEV) + return 1; + + if (cqr->intrc == -ENOLINK) + return 1; + + if (cqr->intrc == -EPERM) + return 1; + sense = dasd_get_sense(&cqr->irb); if (!sense) return 0; @@ -447,12 +461,8 @@ static int read_unit_address_configuration(struct dasd_device *device, lcu->flags &= ~NEED_UAC_UPDATE; spin_unlock_irqrestore(&lcu->lock, flags); - do { - rc = dasd_sleep_on(cqr); - if (rc && suborder_not_supported(cqr)) - return -EOPNOTSUPP; - } while (rc && (cqr->retries > 0)); - if (rc) { + rc = dasd_sleep_on(cqr); + if (rc && !suborder_not_supported(cqr)) { spin_lock_irqsave(&lcu->lock, flags); lcu->flags |= NEED_UAC_UPDATE; spin_unlock_irqrestore(&lcu->lock, flags); @@ -647,12 +657,12 @@ int dasd_alias_remove_device(struct dasd_device *device) struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device) { struct dasd_eckd_private *alias_priv, *private = base_device->private; - struct alias_pav_group *group = private->pavgroup; struct alias_lcu *lcu = private->lcu; struct dasd_device *alias_device; + struct alias_pav_group *group; unsigned long flags; - if (!group || !lcu) + if (!lcu) return NULL; if (lcu->pav == NO_PAV || lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING)) @@ -669,6 +679,11 @@ struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device) } spin_lock_irqsave(&lcu->lock, flags); + group = private->pavgroup; + if (!group) { + spin_unlock_irqrestore(&lcu->lock, flags); + return NULL; + } alias_device = group->next; if (!alias_device) { if (list_empty(&group->aliaslist)) { diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 4e7b55a14b1a46d40920fde48460fa2400c46f84..a2e34c853ca989240122aee42ad590d79d3c54a6 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -1135,7 +1135,8 @@ static u32 get_fcx_max_data(struct dasd_device *device) { struct dasd_eckd_private *private = device->private; int fcx_in_css, fcx_in_gneq, fcx_in_features; - int tpm, mdc; + unsigned int mdc; + int tpm; if (dasd_nofcx) return 0; @@ -1149,7 +1150,7 @@ static u32 get_fcx_max_data(struct dasd_device *device) return 0; mdc = ccw_device_get_mdc(device->cdev, 0); - if (mdc < 0) { + if (mdc == 0) { dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n"); return 0; } else { @@ -1160,12 +1161,12 @@ static u32 get_fcx_max_data(struct dasd_device *device) static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm) { struct dasd_eckd_private *private = device->private; - int mdc; + unsigned int mdc; u32 fcx_max_data; if (private->fcx_max_data) { mdc = ccw_device_get_mdc(device->cdev, lpm); - if ((mdc < 0)) { + if (mdc == 0) { dev_warn(&device->cdev->dev, "Detecting the maximum data size for zHPF " "requests failed (rc=%d) for a new path %x\n", @@ -1769,7 +1770,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device) dasd_free_block(device->block); device->block = NULL; out_err1: - kfree(private->conf_data); + dasd_eckd_clear_conf_data(device); kfree(device->private); device->private = NULL; return rc; @@ -1778,7 +1779,6 @@ dasd_eckd_check_characteristics(struct dasd_device *device) static void dasd_eckd_uncheck_device(struct dasd_device *device) { struct dasd_eckd_private *private = device->private; - int i; if (!private) return; @@ -1788,21 +1788,7 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device) private->sneq = NULL; private->vdsneq = NULL; private->gneq = NULL; - private->conf_len = 0; - for (i = 0; i < 8; i++) { - kfree(device->path[i].conf_data); - if ((__u8 *)device->path[i].conf_data == - private->conf_data) { - private->conf_data = NULL; - private->conf_len = 0; - } - device->path[i].conf_data = NULL; - device->path[i].cssid = 0; - device->path[i].ssid = 0; - device->path[i].chpid = 0; - } - kfree(private->conf_data); - private->conf_data = NULL; + dasd_eckd_clear_conf_data(device); } static struct dasd_ccw_req * @@ -2004,14 +1990,14 @@ static int dasd_eckd_end_analysis(struct dasd_block *block) blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block); raw: - block->blocks = (private->real_cyl * + block->blocks = ((unsigned long) private->real_cyl * private->rdc_data.trk_per_cyl * blk_per_trk); dev_info(&device->cdev->dev, - "DASD with %d KB/block, %d KB total size, %d KB/track, " + "DASD with %u KB/block, %lu KB total size, %u KB/track, " "%s\n", (block->bp_block >> 10), - ((private->real_cyl * + (((unsigned long) private->real_cyl * private->rdc_data.trk_per_cyl * blk_per_trk * (block->bp_block >> 9)) >> 1), ((blk_per_trk * block->bp_block) >> 10), @@ -4469,6 +4455,14 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp) usrparm.psf_data &= 0x7fffffffULL; usrparm.rssd_result &= 0x7fffffffULL; } + /* at least 2 bytes are accessed and should be allocated */ + if (usrparm.psf_data_len < 2) { + DBF_DEV_EVENT(DBF_WARNING, device, + "Symmetrix ioctl invalid data length %d", + usrparm.psf_data_len); + rc = -EINVAL; + goto out; + } /* alloc I/O data area */ psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA); rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA); diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile index c6ab34f94b1b54c96d704abf3f19e6aa16eaca78..3072b89785ddf7329165d6f2c8e678821f79e1a4 100644 --- a/drivers/s390/char/Makefile +++ b/drivers/s390/char/Makefile @@ -11,6 +11,7 @@ endif GCOV_PROFILE_sclp_early_core.o := n KCOV_INSTRUMENT_sclp_early_core.o := n UBSAN_SANITIZE_sclp_early_core.o := n +KASAN_SANITIZE_sclp_early_core.o := n CFLAGS_sclp_early_core.o += -D__NO_FORTIFY diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c index fd2146bcc0add9aae3b71ba4cc88b788b7702591..e17364e13d2f71ec289a47f6a79f7c56ae85b264 100644 --- a/drivers/s390/char/con3270.c +++ b/drivers/s390/char/con3270.c @@ -629,7 +629,7 @@ con3270_init(void) (void (*)(unsigned long)) con3270_read_tasklet, (unsigned long) condev->read); - raw3270_add_view(&condev->view, &con3270_fn, 1); + raw3270_add_view(&condev->view, &con3270_fn, 1, RAW3270_VIEW_LOCK_IRQ); INIT_LIST_HEAD(&condev->freemem); for (i = 0; i < CON3270_STRING_PAGES; i++) { diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c index 16a4e8528bbc34198677f1e9f879790192fb4f33..2f9905ee047cdf6bd60ac4c3c0aa2c19f41f13f7 100644 --- a/drivers/s390/char/fs3270.c +++ b/drivers/s390/char/fs3270.c @@ -463,7 +463,8 @@ fs3270_open(struct inode *inode, struct file *filp) init_waitqueue_head(&fp->wait); fp->fs_pid = get_pid(task_pid(current)); - rc = raw3270_add_view(&fp->view, &fs3270_fn, minor); + rc = raw3270_add_view(&fp->view, &fs3270_fn, minor, + RAW3270_VIEW_LOCK_BH); if (rc) { fs3270_free_view(&fp->view); goto out; diff --git a/drivers/s390/char/keyboard.h b/drivers/s390/char/keyboard.h index c467589c7f452fdfc2aeabd509aaddd6086d7713..c06d399b9b1f158ebbd6038740323fbde8f734bc 100644 --- a/drivers/s390/char/keyboard.h +++ b/drivers/s390/char/keyboard.h @@ -56,7 +56,7 @@ static inline void kbd_put_queue(struct tty_port *port, int ch) { tty_insert_flip_char(port, ch, 0); - tty_schedule_flip(port); + tty_flip_buffer_push(port); } static inline void @@ -64,5 +64,5 @@ kbd_puts_queue(struct tty_port *port, char *cp) { while (*cp) tty_insert_flip_char(port, *cp++, 0); - tty_schedule_flip(port); + tty_flip_buffer_push(port); } diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index f8cd2935fbfd48c5aef1ad980457cc55433b6db4..63a41b16876102a8f1210396f1970d0d5e77df18 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c @@ -920,7 +920,7 @@ raw3270_deactivate_view(struct raw3270_view *view) * Add view to device with minor "minor". */ int -raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor) +raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor, int subclass) { unsigned long flags; struct raw3270 *rp; @@ -942,6 +942,7 @@ raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor) view->cols = rp->cols; view->ascebc = rp->ascebc; spin_lock_init(&view->lock); + lockdep_set_subclass(&view->lock, subclass); list_add(&view->list, &rp->view_list); rc = 0; spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h index 114ca7cbf8897dce734e59cb283923e2c160b3bf..3afaa35f73513cba47566e9601b775339e6cdf78 100644 --- a/drivers/s390/char/raw3270.h +++ b/drivers/s390/char/raw3270.h @@ -150,6 +150,8 @@ struct raw3270_fn { struct raw3270_view { struct list_head list; spinlock_t lock; +#define RAW3270_VIEW_LOCK_IRQ 0 +#define RAW3270_VIEW_LOCK_BH 1 atomic_t ref_count; struct raw3270 *dev; struct raw3270_fn *fn; @@ -158,7 +160,7 @@ struct raw3270_view { unsigned char *ascebc; /* ascii -> ebcdic table */ }; -int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int); +int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int, int); int raw3270_activate_view(struct raw3270_view *); void raw3270_del_view(struct raw3270_view *); void raw3270_deactivate_view(struct raw3270_view *); diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index e9aa71cdfc44e2fcf476cefff087605ecd7ba84b..74df353d2244c61c2c96b1047033ba178915f12f 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c @@ -1206,6 +1206,7 @@ sclp_init(void) fail_unregister_reboot_notifier: unregister_reboot_notifier(&sclp_reboot_notifier); fail_init_state_uninitialized: + list_del(&sclp_state_change_event.list); sclp_init_state = sclp_init_state_uninitialized; fail_unlock: spin_unlock_irqrestore(&sclp_lock, flags); diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c index 194ffd5c8580401a13eefb3f85e61fdba2d5e7e6..039b2074db7e5d39a88aeed516630e62dfc05e4e 100644 --- a/drivers/s390/char/sclp_config.c +++ b/drivers/s390/char/sclp_config.c @@ -60,7 +60,9 @@ static void sclp_cpu_capability_notify(struct work_struct *work) static void __ref sclp_cpu_change_notify(struct work_struct *work) { + lock_device_hotplug(); smp_rescan_cpus(); + unlock_device_hotplug(); } static void sclp_conf_receiver_fn(struct evbuf_header *evbuf) diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index 5b8af278228282914a4250ee655aa4f85355ef7f..81067f5bb178eeaff1677d6710e722a3bc548cd1 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c @@ -980,7 +980,8 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty) return PTR_ERR(tp); rc = raw3270_add_view(&tp->view, &tty3270_fn, - tty->index + RAW3270_FIRSTMINOR); + tty->index + RAW3270_FIRSTMINOR, + RAW3270_VIEW_LOCK_BH); if (rc) { tty3270_free_view(tp); return rc; diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index 93b2862bd3faecbc5702759855db207c43c49f17..674d848e377c8e0e5876c2b663980168950e122f 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c @@ -372,7 +372,7 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv, goto error; } /* Check for trailing stuff. */ - if (i == num_devices && strlen(buf) > 0) { + if (i == num_devices && buf && strlen(buf) > 0) { rc = -EINVAL; goto error; } diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index 9811fd8a0c7310b119a1e9c84da2b583ef763485..92eabbb5f18d44a0045b41e7147e487cafe802a4 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h @@ -115,7 +115,7 @@ struct subchannel { struct schib_config config; } __attribute__ ((aligned(8))); -DECLARE_PER_CPU(struct irb, cio_irb); +DECLARE_PER_CPU_ALIGNED(struct irb, cio_irb); #define to_subchannel(n) container_of(n, struct subchannel, dev) diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index aea50292264629e63f131dfd2dfd87d3ff108799..df09ed53ab45909a0ba3aebcc3c245325bcf38f3 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -1213,6 +1213,8 @@ device_initcall(cio_settle_init); int sch_is_pseudo_sch(struct subchannel *sch) { + if (!sch->dev.parent) + return 0; return sch == to_css(sch->dev.parent)->pseudo_subchannel; } diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index 4435ae0b3027046cabcb89deb0f0ed806956fcb1..f0cae1973f7850dadf443149766ee5231c25404b 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -624,7 +624,7 @@ EXPORT_SYMBOL(ccw_device_tm_start_timeout); * @mask: mask of paths to use * * Return the number of 64K-bytes blocks all paths at least support - * for a transport command. Return values <= 0 indicate failures. + * for a transport command. Return value 0 indicates failure. */ int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask) { diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c index 835de44dbbccd0ad5f1fbac5a7001dad8a7cef8e..b98526d3ddfdc8d97920681e92649004d75638a0 100644 --- a/drivers/s390/cio/idset.c +++ b/drivers/s390/cio/idset.c @@ -16,20 +16,21 @@ struct idset { unsigned long bitmap[0]; }; -static inline unsigned long bitmap_size(int num_ssid, int num_id) +static inline unsigned long idset_bitmap_size(int num_ssid, int num_id) { - return BITS_TO_LONGS(num_ssid * num_id) * sizeof(unsigned long); + return bitmap_size(size_mul(num_ssid, num_id)); } static struct idset *idset_new(int num_ssid, int num_id) { struct idset *set; - set = vmalloc(sizeof(struct idset) + bitmap_size(num_ssid, num_id)); + set = vmalloc(sizeof(struct idset) + + idset_bitmap_size(num_ssid, num_id)); if (set) { set->num_ssid = num_ssid; set->num_id = num_id; - memset(set->bitmap, 0, bitmap_size(num_ssid, num_id)); + memset(set->bitmap, 0, idset_bitmap_size(num_ssid, num_id)); } return set; } @@ -41,7 +42,8 @@ void idset_free(struct idset *set) void idset_fill(struct idset *set) { - memset(set->bitmap, 0xff, bitmap_size(set->num_ssid, set->num_id)); + memset(set->bitmap, 0xff, + idset_bitmap_size(set->num_ssid, set->num_id)); } static inline void idset_add(struct idset *set, int ssid, int id) diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 9c7d9da42ba0829692d0d8dadbbd1f42935962f3..4b7cc8d425b1c64c6b27e4f3308d4152362a0e21 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -749,6 +749,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q) switch (state) { case SLSB_P_OUTPUT_EMPTY: + case SLSB_P_OUTPUT_PENDING: /* the adapter got it */ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %02x", q->nr, count); @@ -1568,13 +1569,13 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags, rc = qdio_kick_outbound_q(q, phys_aob); } else if (need_siga_sync(q)) { rc = qdio_siga_sync_q(q); + } else if (count < QDIO_MAX_BUFFERS_PER_Q && + get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 && + state == SLSB_CU_OUTPUT_PRIMED) { + /* The previous buffer is not processed yet, tack on. */ + qperf_inc(q, fast_requeue); } else { - /* try to fast requeue buffers */ - get_buf_state(q, prev_buf(bufnr), &state, 0); - if (state != SLSB_CU_OUTPUT_PRIMED) - rc = qdio_kick_outbound_q(q, 0); - else - qperf_inc(q, fast_requeue); + rc = qdio_kick_outbound_q(q, 0); } /* in case of SIGA errors we must process the error immediately */ diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index 78f1be41b05e3fb2cc5c91b31e0ec00877735851..034528a5453ec4b058b86c65f513037371a4e37d 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@ -151,6 +151,7 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues) return -ENOMEM; } irq_ptr_qs[i] = q; + INIT_LIST_HEAD(&q->entry); } return 0; } @@ -179,6 +180,7 @@ static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr, q->mask = 1 << (31 - i); q->nr = i; q->handler = handler; + INIT_LIST_HEAD(&q->entry); } static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr, diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index 07dea602205bdf2a18bfe85fad5888cf2f91b4dd..6628e0c9e70e3596cb8d95abbd9e985bae1ad5e7 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c @@ -79,7 +79,6 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr) mutex_lock(&tiq_list_lock); list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list); mutex_unlock(&tiq_list_lock); - xchg(irq_ptr->dsci, 1 << 7); } void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) @@ -87,14 +86,14 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) struct qdio_q *q; q = irq_ptr->input_qs[0]; - /* if establish triggered an error */ - if (!q || !q->entry.prev || !q->entry.next) + if (!q) return; mutex_lock(&tiq_list_lock); list_del_rcu(&q->entry); mutex_unlock(&tiq_list_lock); synchronize_rcu(); + INIT_LIST_HEAD(&q->entry); } static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr) diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c index fd77e46eb3b21520f2bf155612aed0248e773884..4fe06ff7b2c8bcb680a464504d7e85696497645d 100644 --- a/drivers/s390/cio/vfio_ccw_cp.c +++ b/drivers/s390/cio/vfio_ccw_cp.c @@ -89,8 +89,10 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev, sizeof(*pa->pa_iova_pfn) + sizeof(*pa->pa_pfn), GFP_KERNEL); - if (unlikely(!pa->pa_iova_pfn)) + if (unlikely(!pa->pa_iova_pfn)) { + pa->pa_nr = 0; return -ENOMEM; + } pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr; pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT; @@ -387,8 +389,10 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp) * orb specified one of the unsupported formats, we defer * checking for IDAWs in unsupported formats to here. */ - if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw)) + if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw)) { + kfree(p); return -EOPNOTSUPP; + } if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw))) break; @@ -528,7 +532,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain, ret = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, ccw->cda, ccw->count); if (ret < 0) - goto out_init; + goto out_unpin; /* Translate this direct ccw to a idal ccw. */ idaws = kcalloc(ret, sizeof(*idaws), GFP_DMA | GFP_KERNEL); diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c index f47d16b5810b9154c7b8bd852039d1cdc89b33d3..7a06cdff6572d8549ced8ce4575517b530ababc2 100644 --- a/drivers/s390/cio/vfio_ccw_drv.c +++ b/drivers/s390/cio/vfio_ccw_drv.c @@ -40,26 +40,30 @@ int vfio_ccw_sch_quiesce(struct subchannel *sch) if (ret != -EBUSY) goto out_unlock; + iretry = 255; do { - iretry = 255; ret = cio_cancel_halt_clear(sch, &iretry); - while (ret == -EBUSY) { - /* - * Flush all I/O and wait for - * cancel/halt/clear completion. - */ - private->completion = &completion; - spin_unlock_irq(sch->lock); - wait_for_completion_timeout(&completion, 3*HZ); + if (ret == -EIO) { + pr_err("vfio_ccw: could not quiesce subchannel 0.%x.%04x!\n", + sch->schid.ssid, sch->schid.sch_no); + break; + } + + /* + * Flush all I/O and wait for + * cancel/halt/clear completion. + */ + private->completion = &completion; + spin_unlock_irq(sch->lock); - spin_lock_irq(sch->lock); - private->completion = NULL; - flush_workqueue(vfio_ccw_work_q); - ret = cio_cancel_halt_clear(sch, &iretry); - }; + if (ret == -EBUSY) + wait_for_completion_timeout(&completion, 3*HZ); + private->completion = NULL; + flush_workqueue(vfio_ccw_work_q); + spin_lock_irq(sch->lock); ret = cio_disable_subchannel(sch); } while (ret == -EBUSY); out_unlock: @@ -72,20 +76,24 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work) { struct vfio_ccw_private *private; struct irb *irb; + bool is_final; private = container_of(work, struct vfio_ccw_private, io_work); irb = &private->irb; + is_final = !(scsw_actl(&irb->scsw) & + (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)); if (scsw_is_solicited(&irb->scsw)) { cp_update_scsw(&private->cp, &irb->scsw); - cp_free(&private->cp); + if (is_final) + cp_free(&private->cp); } memcpy(private->io_region->irb_area, irb, sizeof(*irb)); if (private->io_trigger) eventfd_signal(private->io_trigger, 1); - if (private->mdev) + if (private->mdev && is_final) private->state = VFIO_CCW_STATE_IDLE; } diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c index f673e106c041535fd0e8b69de44cbddb92a5e6e1..dc5ff47de3feec42ca9aabd2f0cf97c238f612fe 100644 --- a/drivers/s390/cio/vfio_ccw_ops.c +++ b/drivers/s390/cio/vfio_ccw_ops.c @@ -130,11 +130,12 @@ static int vfio_ccw_mdev_remove(struct mdev_device *mdev) if ((private->state != VFIO_CCW_STATE_NOT_OPER) && (private->state != VFIO_CCW_STATE_STANDBY)) { - if (!vfio_ccw_mdev_reset(mdev)) + if (!vfio_ccw_sch_quiesce(private->sch)) private->state = VFIO_CCW_STATE_STANDBY; /* The state will be NOT_OPER on error. */ } + cp_free(&private->cp); private->mdev = NULL; atomic_inc(&private->avail); @@ -158,6 +159,14 @@ static void vfio_ccw_mdev_release(struct mdev_device *mdev) struct vfio_ccw_private *private = dev_get_drvdata(mdev_parent_dev(mdev)); + if ((private->state != VFIO_CCW_STATE_NOT_OPER) && + (private->state != VFIO_CCW_STATE_STANDBY)) { + if (!vfio_ccw_mdev_reset(mdev)) + private->state = VFIO_CCW_STATE_STANDBY; + /* The state will be NOT_OPER on error. */ + } + + cp_free(&private->cp); vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &private->nb); } diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index f039266b275dad7c31559e897f094a056a8c3c2c..91500365edec78a98ade082196126bc6d423e5fa 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -249,7 +249,8 @@ static inline int ap_test_config(unsigned int *field, unsigned int nr) static inline int ap_test_config_card_id(unsigned int id) { if (!ap_configuration) /* QCI not supported */ - return 1; + /* only ids 0...3F may be probed */ + return id < 0x40 ? 1 : 0; return ap_test_config(ap_configuration->apm, id); } @@ -776,6 +777,8 @@ static int ap_device_probe(struct device *dev) drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT; if (!!devres != !!drvres) return -ENODEV; + /* (re-)init queue's state machine */ + ap_queue_reinit_state(to_ap_queue(dev)); } /* Add queue/card to list of active queues/cards */ @@ -808,6 +811,8 @@ static int ap_device_remove(struct device *dev) struct ap_device *ap_dev = to_ap_dev(dev); struct ap_driver *ap_drv = ap_dev->drv; + if (is_queue_dev(dev)) + ap_queue_remove(to_ap_queue(dev)); if (ap_drv->remove) ap_drv->remove(ap_dev); @@ -911,7 +916,7 @@ static int hex2bitmap(const char *str, unsigned long *bitmap, int bits) */ static int modify_bitmap(const char *str, unsigned long *bitmap, int bits) { - int a, i, z; + unsigned long a, i, z; char *np, sign; /* bits needs to be a multiple of 8 */ @@ -1218,11 +1223,10 @@ static struct bus_attribute *const ap_bus_attrs[] = { }; /** - * ap_select_domain(): Select an AP domain. - * - * Pick one of the 16 AP domains. + * ap_select_domain(): Select an AP domain if possible and we haven't + * already done so before. */ -static int ap_select_domain(void) +static void ap_select_domain(void) { int count, max_count, best_domain; struct ap_queue_status status; @@ -1237,7 +1241,7 @@ static int ap_select_domain(void) if (ap_domain_index >= 0) { /* Domain has already been selected. */ spin_unlock_bh(&ap_domain_lock); - return 0; + return; } best_domain = -1; max_count = 0; @@ -1264,11 +1268,8 @@ static int ap_select_domain(void) if (best_domain >= 0) { ap_domain_index = best_domain; AP_DBF(DBF_DEBUG, "new ap_domain_index=%d\n", ap_domain_index); - spin_unlock_bh(&ap_domain_lock); - return 0; } spin_unlock_bh(&ap_domain_lock); - return -ENODEV; } /* @@ -1346,8 +1347,7 @@ static void ap_scan_bus(struct work_struct *unused) AP_DBF(DBF_DEBUG, "%s running\n", __func__); ap_query_configuration(ap_configuration); - if (ap_select_domain() != 0) - goto out; + ap_select_domain(); for (id = 0; id < AP_DEVICES; id++) { /* check if device is registered */ @@ -1445,10 +1445,6 @@ static void ap_scan_bus(struct work_struct *unused) aq->ap_dev.device.parent = &ac->ap_dev.device; dev_set_name(&aq->ap_dev.device, "%02x.%04x", id, dom); - /* Start with a device reset */ - spin_lock_bh(&aq->lock); - ap_wait(ap_sm_event(aq, AP_EVENT_POLL)); - spin_unlock_bh(&aq->lock); /* Register device */ rc = device_register(&aq->ap_dev.device); if (rc) { @@ -1467,12 +1463,11 @@ static void ap_scan_bus(struct work_struct *unused) } } /* end device loop */ - if (defdomdevs < 1) + if (ap_domain_index >= 0 && defdomdevs < 1) AP_DBF(DBF_INFO, "no queue device with default domain %d available\n", ap_domain_index); -out: mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ); } diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index 5246cd8c16a605f6748884b47bdc59d963c3b578..7e85d238767ba1d42938a24b67381221f7cc0593 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -253,6 +253,7 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type); void ap_queue_remove(struct ap_queue *aq); void ap_queue_suspend(struct ap_device *ap_dev); void ap_queue_resume(struct ap_device *ap_dev); +void ap_queue_reinit_state(struct ap_queue *aq); struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type, int comp_device_type, unsigned int functions); diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index 66f7334bcb03214307fb4f3fecebf91dbb3ba341..576ac08777c509ee038a50ec95a892fc79776355 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c @@ -14,6 +14,9 @@ #include #include "ap_bus.h" +#include "ap_debug.h" + +static void __ap_flush_queue(struct ap_queue *aq); /** * ap_queue_enable_interruption(): Enable interruption on an AP queue. @@ -541,7 +544,25 @@ static ssize_t reset_show(struct device *dev, return rc; } -static DEVICE_ATTR_RO(reset); +static ssize_t reset_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ap_queue *aq = to_ap_queue(dev); + + spin_lock_bh(&aq->lock); + __ap_flush_queue(aq); + aq->state = AP_STATE_RESET_START; + ap_wait(ap_sm_event(aq, AP_EVENT_POLL)); + spin_unlock_bh(&aq->lock); + + AP_DBF(DBF_INFO, "reset queue=%02x.%04x triggered by user\n", + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); + + return count; +} + +static DEVICE_ATTR_RW(reset); static ssize_t interrupt_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -718,5 +739,20 @@ void ap_queue_remove(struct ap_queue *aq) { ap_flush_queue(aq); del_timer_sync(&aq->timeout); + + /* reset with zero, also clears irq registration */ + spin_lock_bh(&aq->lock); + ap_zapq(aq->qid); + aq->state = AP_STATE_BORKED; + spin_unlock_bh(&aq->lock); } EXPORT_SYMBOL(ap_queue_remove); + +void ap_queue_reinit_state(struct ap_queue *aq) +{ + spin_lock_bh(&aq->lock); + aq->state = AP_STATE_RESET_START; + ap_wait(ap_sm_event(aq, AP_EVENT_POLL)); + spin_unlock_bh(&aq->lock); +} +EXPORT_SYMBOL(ap_queue_reinit_state); diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index 1b4001e0285fe0a5979558e2e9e2ce8f60e0808e..fa97e666f19e6075dde8e771a09a462bb822536a 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -45,7 +45,8 @@ static debug_info_t *debug_info; static void __init pkey_debug_init(void) { - debug_info = debug_register("pkey", 1, 1, 4 * sizeof(long)); + /* 5 arguments per dbf entry (including the format string ptr) */ + debug_info = debug_register("pkey", 1, 1, 5 * sizeof(long)); debug_register_view(debug_info, &debug_sprintf_view); debug_set_level(debug_info, 3); } @@ -1088,7 +1089,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, if (rc) break; if (copy_to_user(ucs, &kcs, sizeof(kcs))) - return -EFAULT; + rc = -EFAULT; memzero_explicit(&kcs, sizeof(kcs)); break; } @@ -1119,7 +1120,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, if (rc) break; if (copy_to_user(ucp, &kcp, sizeof(kcp))) - return -EFAULT; + rc = -EFAULT; memzero_explicit(&kcp, sizeof(kcp)); break; } diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index e6854127b4343dc5ee73357472b4c97f7f8a377e..4881ee2053f99901a3c944ca9dc837d67933e48b 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -158,6 +158,7 @@ static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc, { if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner)) return NULL; + zcrypt_card_get(zc); zcrypt_queue_get(zq); get_device(&zq->queue->ap_dev.device); atomic_add(weight, &zc->load); @@ -177,6 +178,7 @@ static inline void zcrypt_drop_queue(struct zcrypt_card *zc, atomic_sub(weight, &zq->load); put_device(&zq->queue->ap_dev.device); zcrypt_queue_put(zq); + zcrypt_card_put(zc); module_put(mod); } @@ -224,6 +226,7 @@ static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex) trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO); if (mex->outputdatalength < mex->inputdatalength) { + func_code = 0; rc = -EINVAL; goto out; } @@ -298,6 +301,7 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt) trace_s390_zcrypt_req(crt, TP_ICARSACRT); if (crt->outputdatalength < crt->inputdatalength) { + func_code = 0; rc = -EINVAL; goto out; } @@ -483,6 +487,7 @@ static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL); if (!targets) { + func_code = 0; rc = -ENOMEM; goto out; } @@ -490,6 +495,7 @@ static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) uptr = (struct ep11_target_dev __force __user *) xcrb->targets; if (copy_from_user(targets, uptr, target_num * sizeof(*targets))) { + func_code = 0; rc = -EFAULT; goto out_free; } diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c index f4ae5fa30ec970e99a39387b2b0a7a4b13903889..ff17a00273f77d60f63bf3e20c67b3408d96915b 100644 --- a/drivers/s390/crypto/zcrypt_cex2a.c +++ b/drivers/s390/crypto/zcrypt_cex2a.c @@ -198,7 +198,6 @@ static void zcrypt_cex2a_queue_remove(struct ap_device *ap_dev) struct ap_queue *aq = to_ap_queue(&ap_dev->device); struct zcrypt_queue *zq = aq->private; - ap_queue_remove(aq); if (zq) zcrypt_queue_unregister(zq); } diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c index 35d58dbbc4da3dcc5fa47f5d23a8dfcbe08ff9ae..2a42e5962317a1cf797720219ccac09aa2807638 100644 --- a/drivers/s390/crypto/zcrypt_cex4.c +++ b/drivers/s390/crypto/zcrypt_cex4.c @@ -273,7 +273,6 @@ static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev) struct ap_queue *aq = to_ap_queue(&ap_dev->device); struct zcrypt_queue *zq = aq->private; - ap_queue_remove(aq); if (zq) zcrypt_queue_unregister(zq); } diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h index 6f7ebc1dbe1033af41b51150295fedc663e8be13..2126f4cc6d3748c264c7d58ef16442a98755d3aa 100644 --- a/drivers/s390/crypto/zcrypt_error.h +++ b/drivers/s390/crypto/zcrypt_error.h @@ -52,6 +52,7 @@ struct error_hdr { #define REP82_ERROR_FORMAT_FIELD 0x29 #define REP82_ERROR_INVALID_COMMAND 0x30 #define REP82_ERROR_MALFORMED_MSG 0x40 +#define REP82_ERROR_INVALID_SPECIAL_CMD 0x41 #define REP82_ERROR_INVALID_DOMAIN_PRECHECK 0x42 #define REP82_ERROR_RESERVED_FIELDO 0x50 /* old value */ #define REP82_ERROR_WORD_ALIGNMENT 0x60 @@ -61,6 +62,7 @@ struct error_hdr { #define REP82_ERROR_EVEN_MOD_IN_OPND 0x85 #define REP82_ERROR_RESERVED_FIELD 0x88 #define REP82_ERROR_INVALID_DOMAIN_PENDING 0x8A +#define REP82_ERROR_FILTERED_BY_HYPERVISOR 0x8B #define REP82_ERROR_TRANSPORT_FAIL 0x90 #define REP82_ERROR_PACKET_TRUNCATED 0xA0 #define REP82_ERROR_ZERO_BUFFER_LEN 0xB0 @@ -90,6 +92,8 @@ static inline int convert_error(struct zcrypt_queue *zq, case REP88_ERROR_MESSAGE_MALFORMD: case REP82_ERROR_INVALID_DOMAIN_PRECHECK: case REP82_ERROR_INVALID_DOMAIN_PENDING: + case REP82_ERROR_INVALID_SPECIAL_CMD: + case REP82_ERROR_FILTERED_BY_HYPERVISOR: // REP88_ERROR_INVALID_KEY // '82' CEX2A // REP88_ERROR_OPERAND // '84' CEX2A // REP88_ERROR_OPERAND_EVEN_MOD // '85' CEX2A diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c index 94d9f7224aea3acbb394c424a88ed2e6ba867c04..baa683c3f5d302f8be3fb7f6d7e6dfd9434f7eb1 100644 --- a/drivers/s390/crypto/zcrypt_pcixcc.c +++ b/drivers/s390/crypto/zcrypt_pcixcc.c @@ -276,7 +276,6 @@ static void zcrypt_pcixcc_queue_remove(struct ap_device *ap_dev) struct ap_queue *aq = to_ap_queue(&ap_dev->device); struct zcrypt_queue *zq = aq->private; - ap_queue_remove(aq); if (zq) zcrypt_queue_unregister(zq); } diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index 7617d21cb2960618cbc097bbf85cb8515234aa14..f63c5c871d3ddf48f4a88fe3c2b2db684394c7b3 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c @@ -1595,6 +1595,7 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev) if (priv->channel[direction] == NULL) { if (direction == CTCM_WRITE) channel_free(priv->channel[CTCM_READ]); + result = -ENODEV; goto out_dev; } priv->channel[direction]->netdev = dev; diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c index c0631895154e6e7f5b3c7948a66741a302ea669f..00cc96341411c2f36684e2ff0548dc4027bb81a8 100644 --- a/drivers/s390/net/ism_drv.c +++ b/drivers/s390/net/ism_drv.c @@ -141,10 +141,13 @@ static int register_ieq(struct ism_dev *ism) static int unregister_sba(struct ism_dev *ism) { + int ret; + if (!ism->sba) return 0; - if (ism_cmd_simple(ism, ISM_UNREG_SBA)) + ret = ism_cmd_simple(ism, ISM_UNREG_SBA); + if (ret && ret != ISM_ERROR) return -EIO; dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, @@ -158,10 +161,13 @@ static int unregister_sba(struct ism_dev *ism) static int unregister_ieq(struct ism_dev *ism) { + int ret; + if (!ism->ieq) return 0; - if (ism_cmd_simple(ism, ISM_UNREG_IEQ)) + ret = ism_cmd_simple(ism, ISM_UNREG_IEQ); + if (ret && ret != ISM_ERROR) return -EIO; dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, @@ -288,7 +294,7 @@ static int ism_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb) cmd.request.dmb_tok = dmb->dmb_tok; ret = ism_cmd(ism, &cmd); - if (ret) + if (ret && ret != ISM_ERROR) goto out; ism_free_dmb(ism, dmb); @@ -415,9 +421,9 @@ static irqreturn_t ism_handle_irq(int irq, void *data) break; clear_bit_inv(bit, bv); + ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0; barrier(); smcd_handle_irq(ism->smcd, bit + ISM_DMB_BIT_OFFSET); - ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0; } if (ism->sba->e) { diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 34e0d476c5c615e2c3421bd47133ca7cd1a6bdda..b2657582cfcfd14ea7233084e3da5ae26fa7582b 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -200,6 +201,12 @@ struct qeth_vnicc_info { bool rx_bcast_enabled; }; +static inline int qeth_is_adp_supported(struct qeth_ipa_info *ipa, + enum qeth_ipa_setadp_cmd func) +{ + return (ipa->supported_funcs & func); +} + static inline int qeth_is_ipa_supported(struct qeth_ipa_info *ipa, enum qeth_ipa_funcs func) { @@ -213,9 +220,7 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, } #define qeth_adp_supported(c, f) \ - qeth_is_ipa_supported(&c->options.adp, f) -#define qeth_adp_enabled(c, f) \ - qeth_is_ipa_enabled(&c->options.adp, f) + qeth_is_adp_supported(&c->options.adp, f) #define qeth_is_supported(c, f) \ qeth_is_ipa_supported(&c->options.ipa4, f) #define qeth_is_enabled(c, f) \ @@ -826,6 +831,11 @@ struct qeth_trap_id { /*some helper functions*/ #define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "") +static inline bool qeth_netdev_is_registered(struct net_device *dev) +{ + return dev->netdev_ops != NULL; +} + static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf, unsigned int elements) { diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index ffce6f39828aa1799c49975df1d85fe2d8ccbe38..81e2c591acb0ba3f416e5d624fae707897d3d344 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -565,6 +565,7 @@ static int __qeth_issue_next_read(struct qeth_card *card) QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! " "rc=%i\n", dev_name(&card->gdev->dev), rc); atomic_set(&channel->irq_pending, 0); + qeth_release_buffer(channel, iob); card->read_or_write_problem = 1; qeth_schedule_recovery(card); wake_up(&card->wait_q); @@ -900,44 +901,6 @@ static void qeth_send_control_data_cb(struct qeth_channel *channel, qeth_release_buffer(channel, iob); } -static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers) -{ - int cnt; - - QETH_DBF_TEXT(SETUP, 2, "setupch"); - - channel->ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); - if (!channel->ccw) - return -ENOMEM; - channel->state = CH_STATE_DOWN; - atomic_set(&channel->irq_pending, 0); - init_waitqueue_head(&channel->wait_q); - - if (!alloc_buffers) - return 0; - - for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) { - channel->iob[cnt].data = - kzalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL); - if (channel->iob[cnt].data == NULL) - break; - channel->iob[cnt].state = BUF_STATE_FREE; - channel->iob[cnt].channel = channel; - channel->iob[cnt].callback = qeth_send_control_data_cb; - channel->iob[cnt].rc = 0; - } - if (cnt < QETH_CMD_BUFFER_NO) { - kfree(channel->ccw); - while (cnt-- > 0) - kfree(channel->iob[cnt].data); - return -ENOMEM; - } - channel->io_buf_no = 0; - spin_lock_init(&channel->iob_lock); - - return 0; -} - static int qeth_set_thread_start_bit(struct qeth_card *card, unsigned long thread) { @@ -1187,6 +1150,8 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, rc = qeth_get_problem(cdev, irb); if (rc) { card->read_or_write_problem = 1; + if (iob) + qeth_release_buffer(iob->channel, iob); qeth_clear_ipacmd_list(card); qeth_schedule_recovery(card); goto out; @@ -1336,14 +1301,61 @@ static void qeth_free_buffer_pool(struct qeth_card *card) static void qeth_clean_channel(struct qeth_channel *channel) { + struct ccw_device *cdev = channel->ccwdev; int cnt; QETH_DBF_TEXT(SETUP, 2, "freech"); + + spin_lock_irq(get_ccwdev_lock(cdev)); + cdev->handler = NULL; + spin_unlock_irq(get_ccwdev_lock(cdev)); + for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) kfree(channel->iob[cnt].data); kfree(channel->ccw); } +static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers) +{ + struct ccw_device *cdev = channel->ccwdev; + int cnt; + + QETH_DBF_TEXT(SETUP, 2, "setupch"); + + channel->ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); + if (!channel->ccw) + return -ENOMEM; + channel->state = CH_STATE_DOWN; + atomic_set(&channel->irq_pending, 0); + init_waitqueue_head(&channel->wait_q); + + spin_lock_irq(get_ccwdev_lock(cdev)); + cdev->handler = qeth_irq; + spin_unlock_irq(get_ccwdev_lock(cdev)); + + if (!alloc_buffers) + return 0; + + for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) { + channel->iob[cnt].data = + kzalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL); + if (channel->iob[cnt].data == NULL) + break; + channel->iob[cnt].state = BUF_STATE_FREE; + channel->iob[cnt].channel = channel; + channel->iob[cnt].callback = qeth_send_control_data_cb; + channel->iob[cnt].rc = 0; + } + if (cnt < QETH_CMD_BUFFER_NO) { + qeth_clean_channel(channel); + return -ENOMEM; + } + channel->io_buf_no = 0; + spin_lock_init(&channel->iob_lock); + + return 0; +} + static void qeth_set_single_write_queues(struct qeth_card *card) { if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) && @@ -1367,7 +1379,7 @@ static void qeth_set_multiple_write_queues(struct qeth_card *card) card->qdio.no_out_queues = 4; } -static void qeth_update_from_chp_desc(struct qeth_card *card) +static int qeth_update_from_chp_desc(struct qeth_card *card) { struct ccw_device *ccwdev; struct channel_path_desc_fmt0 *chp_dsc; @@ -1377,7 +1389,7 @@ static void qeth_update_from_chp_desc(struct qeth_card *card) ccwdev = card->data.ccwdev; chp_dsc = ccw_device_get_chp_desc(ccwdev, 0); if (!chp_dsc) - goto out; + return -ENOMEM; card->info.func_level = 0x4100 + chp_dsc->desc; if (card->info.type == QETH_CARD_TYPE_IQD) @@ -1392,6 +1404,7 @@ static void qeth_update_from_chp_desc(struct qeth_card *card) kfree(chp_dsc); QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues); QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level); + return 0; } static void qeth_init_qdio_info(struct qeth_card *card) @@ -1494,7 +1507,7 @@ static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr) CARD_BUS_ID(card), card->info.mcl_level); } -static struct qeth_card *qeth_alloc_card(void) +static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev) { struct qeth_card *card; @@ -1503,6 +1516,11 @@ static struct qeth_card *qeth_alloc_card(void) if (!card) goto out; QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); + + card->gdev = gdev; + CARD_RDEV(card) = gdev->cdev[0]; + CARD_WDEV(card) = gdev->cdev[1]; + CARD_DDEV(card) = gdev->cdev[2]; if (qeth_setup_channel(&card->read, true)) goto out_ip; if (qeth_setup_channel(&card->write, true)) @@ -1852,6 +1870,7 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel, QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc); QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); atomic_set(&channel->irq_pending, 0); + qeth_release_buffer(channel, iob); wake_up(&card->wait_q); return rc; } @@ -1923,6 +1942,7 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel, rc); QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); atomic_set(&channel->irq_pending, 0); + qeth_release_buffer(channel, iob); wake_up(&card->wait_q); return rc; } @@ -2110,6 +2130,7 @@ int qeth_send_control_data(struct qeth_card *card, int len, } reply = qeth_alloc_reply(card); if (!reply) { + qeth_release_buffer(channel, iob); return -ENOMEM; } reply->callback = reply_cb; @@ -2448,11 +2469,12 @@ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx) return 0; } -static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q) +static void qeth_free_output_queue(struct qeth_qdio_out_q *q) { if (!q) return; + qeth_clear_outq_buffers(q, 1); qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); kfree(q); } @@ -2526,10 +2548,8 @@ static int qeth_alloc_qdio_buffers(struct qeth_card *card) card->qdio.out_qs[i]->bufs[j] = NULL; } out_freeoutq: - while (i > 0) { - qeth_free_qdio_out_buf(card->qdio.out_qs[--i]); - qeth_clear_outq_buffers(card->qdio.out_qs[i], 1); - } + while (i > 0) + qeth_free_output_queue(card->qdio.out_qs[--i]); kfree(card->qdio.out_qs); card->qdio.out_qs = NULL; out_freepool: @@ -2562,10 +2582,8 @@ static void qeth_free_qdio_buffers(struct qeth_card *card) qeth_free_buffer_pool(card); /* free outbound qdio_qs */ if (card->qdio.out_qs) { - for (i = 0; i < card->qdio.no_out_queues; ++i) { - qeth_clear_outq_buffers(card->qdio.out_qs[i], 1); - qeth_free_qdio_out_buf(card->qdio.out_qs[i]); - } + for (i = 0; i < card->qdio.no_out_queues; i++) + qeth_free_output_queue(card->qdio.out_qs[i]); kfree(card->qdio.out_qs); card->qdio.out_qs = NULL; } @@ -4524,8 +4542,8 @@ static int qeth_snmp_command_cb(struct qeth_card *card, { struct qeth_ipa_cmd *cmd; struct qeth_arp_query_info *qinfo; - struct qeth_snmp_cmd *snmp; unsigned char *data; + void *snmp_data; __u16 data_len; QETH_CARD_TEXT(card, 3, "snpcmdcb"); @@ -4533,7 +4551,6 @@ static int qeth_snmp_command_cb(struct qeth_card *card, cmd = (struct qeth_ipa_cmd *) sdata; data = (unsigned char *)((char *)cmd - reply->offset); qinfo = (struct qeth_arp_query_info *) reply->param; - snmp = &cmd->data.setadapterparms.data.snmp; if (cmd->hdr.return_code) { QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code); @@ -4546,10 +4563,15 @@ static int qeth_snmp_command_cb(struct qeth_card *card, return 0; } data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data)); - if (cmd->data.setadapterparms.hdr.seq_no == 1) - data_len -= (__u16)((char *)&snmp->data - (char *)cmd); - else - data_len -= (__u16)((char *)&snmp->request - (char *)cmd); + if (cmd->data.setadapterparms.hdr.seq_no == 1) { + snmp_data = &cmd->data.setadapterparms.data.snmp; + data_len -= offsetof(struct qeth_ipa_cmd, + data.setadapterparms.data.snmp); + } else { + snmp_data = &cmd->data.setadapterparms.data.snmp.request; + data_len -= offsetof(struct qeth_ipa_cmd, + data.setadapterparms.data.snmp.request); + } /* check if there is enough room in userspace */ if ((qinfo->udata_len - qinfo->udata_offset) < data_len) { @@ -4562,16 +4584,9 @@ static int qeth_snmp_command_cb(struct qeth_card *card, QETH_CARD_TEXT_(card, 4, "sseqn%i", cmd->data.setadapterparms.hdr.seq_no); /*copy entries to user buffer*/ - if (cmd->data.setadapterparms.hdr.seq_no == 1) { - memcpy(qinfo->udata + qinfo->udata_offset, - (char *)snmp, - data_len + offsetof(struct qeth_snmp_cmd, data)); - qinfo->udata_offset += offsetof(struct qeth_snmp_cmd, data); - } else { - memcpy(qinfo->udata + qinfo->udata_offset, - (char *)&snmp->request, data_len); - } + memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len); qinfo->udata_offset += data_len; + /* check if all replies received ... */ QETH_CARD_TEXT_(card, 4, "srtot%i", cmd->data.setadapterparms.hdr.used_total); @@ -5090,7 +5105,9 @@ int qeth_core_hardsetup_card(struct qeth_card *card) QETH_DBF_TEXT(SETUP, 2, "hrdsetup"); atomic_set(&card->force_alloc_skb, 0); - qeth_update_from_chp_desc(card); + rc = qeth_update_from_chp_desc(card); + if (rc) + return rc; retry: if (retries < 3) QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n", @@ -5742,7 +5759,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev)); - card = qeth_alloc_card(); + card = qeth_alloc_card(gdev); if (!card) { QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM); rc = -ENOMEM; @@ -5758,17 +5775,11 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) goto err_card; } - card->read.ccwdev = gdev->cdev[0]; - card->write.ccwdev = gdev->cdev[1]; - card->data.ccwdev = gdev->cdev[2]; dev_set_drvdata(&gdev->dev, card); - card->gdev = gdev; - gdev->cdev[0]->handler = qeth_irq; - gdev->cdev[1]->handler = qeth_irq; - gdev->cdev[2]->handler = qeth_irq; - qeth_setup_card(card); - qeth_update_from_chp_desc(card); + rc = qeth_update_from_chp_desc(card); + if (rc) + goto err_chp_desc; card->dev = qeth_alloc_netdev(card); if (!card->dev) { @@ -5806,6 +5817,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) qeth_core_free_discipline(card); err_load: free_netdev(card->dev); +err_chp_desc: err_card: qeth_core_free_card(card); err_dev: diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index b5e38531733f26e49a90158acf481744106dd42f..95669d47c389eb67c39db0c450a564ece2bc2d83 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -789,7 +789,10 @@ static int __qeth_l2_open(struct net_device *dev) if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) { napi_enable(&card->napi); + local_bh_disable(); napi_schedule(&card->napi); + /* kick-start the NAPI softirq: */ + local_bh_enable(); } else rc = -EIO; return rc; @@ -854,7 +857,10 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev) if (cgdev->state == CCWGROUP_ONLINE) qeth_l2_set_offline(cgdev); - unregister_netdev(card->dev); + + cancel_work_sync(&card->close_dev_work); + if (qeth_netdev_is_registered(card->dev)) + unregister_netdev(card->dev); } static const struct ethtool_ops qeth_l2_ethtool_ops = { @@ -894,7 +900,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) { int rc; - if (card->dev->netdev_ops) + if (qeth_netdev_is_registered(card->dev)) return 0; card->dev->priv_flags |= IFF_UNICAST_FLT; @@ -1898,7 +1904,7 @@ static void qeth_bridgeport_an_set_cb(void *priv, l2entry = (struct qdio_brinfo_entry_l2 *)entry; code = IPA_ADDR_CHANGE_CODE_MACADDR; - if (l2entry->addr_lnid.lnid) + if (l2entry->addr_lnid.lnid < VLAN_N_VID) code |= IPA_ADDR_CHANGE_CODE_VLANID; qeth_bridge_emit_host_event(card, anev_reg_unreg, code, (struct net_if_token *)&l2entry->nit, diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index ada258c01a08e084b11d02d73e553404bfce82cb..52e0ae4dc7241225ff87ebc5e1292e1cd015a070 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -279,9 +279,6 @@ static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover) QETH_CARD_TEXT(card, 4, "clearip"); - if (recover && card->options.sniffer) - return; - spin_lock_bh(&card->ip_lock); hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { @@ -664,6 +661,8 @@ static int qeth_l3_register_addr_entry(struct qeth_card *card, int rc = 0; int cnt = 3; + if (card->options.sniffer) + return 0; if (addr->proto == QETH_PROT_IPV4) { QETH_CARD_TEXT(card, 2, "setaddr4"); @@ -698,6 +697,9 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card, { int rc = 0; + if (card->options.sniffer) + return 0; + if (addr->proto == QETH_PROT_IPV4) { QETH_CARD_TEXT(card, 2, "deladdr4"); QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int)); @@ -2412,7 +2414,10 @@ static int __qeth_l3_open(struct net_device *dev) if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) { napi_enable(&card->napi); + local_bh_disable(); napi_schedule(&card->napi); + /* kick-start the NAPI softirq: */ + local_bh_enable(); } else rc = -EIO; return rc; @@ -2512,7 +2517,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) { int rc; - if (card->dev->netdev_ops) + if (qeth_netdev_is_registered(card->dev)) return 0; if (card->info.type == QETH_CARD_TYPE_OSD || @@ -2584,12 +2589,14 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev) struct qeth_card *card = dev_get_drvdata(&gdev->dev); int rc; + hash_init(card->ip_htable); + if (gdev->dev.type == &qeth_generic_devtype) { rc = qeth_l3_create_device_attributes(&gdev->dev); if (rc) return rc; } - hash_init(card->ip_htable); + hash_init(card->ip_mc_htable); card->options.layer2 = 0; card->info.hwtrap = 0; @@ -2609,7 +2616,9 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) if (cgdev->state == CCWGROUP_ONLINE) qeth_l3_set_offline(cgdev); - unregister_netdev(card->dev); + cancel_work_sync(&card->close_dev_work); + if (qeth_netdev_is_registered(card->dev)) + unregister_netdev(card->dev); qeth_l3_clear_ip_htable(card, 0); qeth_l3_clear_ipato_list(card); } diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 94f4d8fe85e0e5bc595470d8d0e161ab664ae735..d1b531fe9ada1d72403a8181ccaae2270b866a07 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -275,16 +275,16 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter) */ int zfcp_status_read_refill(struct zfcp_adapter *adapter) { - while (atomic_read(&adapter->stat_miss) > 0) + while (atomic_add_unless(&adapter->stat_miss, -1, 0)) if (zfcp_fsf_status_read(adapter->qdio)) { + atomic_inc(&adapter->stat_miss); /* undo add -1 */ if (atomic_read(&adapter->stat_miss) >= adapter->stat_read_buf_num) { zfcp_erp_adapter_reopen(adapter, 0, "axsref1"); return 1; } break; - } else - atomic_dec(&adapter->stat_miss); + } return 0; } diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 3b368fcf13f403c0b1c186b6e6a137e1a95352e6..946380f0d7199830751fe38b2cba98a5de295bfa 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -94,11 +94,9 @@ void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req) memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE); - if (req->fsf_command != FSF_QTCB_FCP_CMND) { - rec->pl_len = q_head->log_length; - zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start, - rec->pl_len, "fsf_res", req->req_id); - } + rec->pl_len = q_head->log_length; + zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start, + rec->pl_len, "fsf_res", req->req_id); debug_event(dbf->hba, level, rec, sizeof(*rec)); spin_unlock_irqrestore(&dbf->hba_lock, flags); diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index e7e6b63905e20f04e0628883100da8972056e6b4..f602b42b8343d3fcc44d199c8864a4c11cffc97d 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -11,6 +11,7 @@ #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include +#include #include "zfcp_ext.h" #include "zfcp_reqlist.h" @@ -171,9 +172,6 @@ static int zfcp_erp_handle_failed(int want, struct zfcp_adapter *adapter, adapter, ZFCP_STATUS_COMMON_ERP_FAILED); } break; - default: - need = 0; - break; } return need; @@ -238,6 +236,12 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, struct zfcp_erp_action *erp_action; struct zfcp_scsi_dev *zfcp_sdev; + if (WARN_ON_ONCE(need != ZFCP_ERP_ACTION_REOPEN_LUN && + need != ZFCP_ERP_ACTION_REOPEN_PORT && + need != ZFCP_ERP_ACTION_REOPEN_PORT_FORCED && + need != ZFCP_ERP_ACTION_REOPEN_ADAPTER)) + return NULL; + switch (need) { case ZFCP_ERP_ACTION_REOPEN_LUN: zfcp_sdev = sdev_to_zfcp(sdev); @@ -643,6 +647,20 @@ static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action) add_timer(&erp_action->timer); } +void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter, + int clear, char *dbftag) +{ + unsigned long flags; + struct zfcp_port *port; + + write_lock_irqsave(&adapter->erp_lock, flags); + read_lock(&adapter->port_list_lock); + list_for_each_entry(port, &adapter->port_list, list) + _zfcp_erp_port_forced_reopen(port, clear, dbftag); + read_unlock(&adapter->port_list_lock); + write_unlock_irqrestore(&adapter->erp_lock, flags); +} + static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter, int clear, char *id) { @@ -1297,6 +1315,9 @@ static void zfcp_erp_try_rport_unblock(struct zfcp_port *port) struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev); int lun_status; + if (sdev->sdev_state == SDEV_DEL || + sdev->sdev_state == SDEV_CANCEL) + continue; if (zsdev->port != port) continue; /* LUN under port of interest */ diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index bd0c5a9f04cb34a4b0cff73224ee99e3c58c1a08..3d971ffbd4bc2dead005f9b7b532f06ae6540862 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -69,6 +69,8 @@ extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32); extern void zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id); extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *); extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *); +extern void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter, + int clear, char *dbftag); extern void zfcp_erp_set_lun_status(struct scsi_device *, u32); extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32); extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *); @@ -162,6 +164,7 @@ extern const struct attribute_group *zfcp_port_attr_groups[]; extern struct mutex zfcp_sysfs_port_units_mutex; extern struct device_attribute *zfcp_sysfs_sdev_attrs[]; extern struct device_attribute *zfcp_sysfs_shost_attrs[]; +bool zfcp_sysfs_port_is_removing(const struct zfcp_port *const port); /* zfcp_unit.c */ extern int zfcp_unit_add(struct zfcp_port *, u64); diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index f6c415d6ef4875c8e856a11720c975b6575d63a7..5eb7aabe2d8b25790b7fdd968c4b04c55f192f79 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -239,10 +239,6 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, list_for_each_entry(port, &adapter->port_list, list) { if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range)) zfcp_fc_test_link(port); - if (!port->d_id) - zfcp_erp_port_reopen(port, - ZFCP_STATUS_COMMON_ERP_FAILED, - "fcrscn1"); } read_unlock_irqrestore(&adapter->port_list_lock, flags); } @@ -250,6 +246,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req) { struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data; + struct zfcp_adapter *adapter = fsf_req->adapter; struct fc_els_rscn *head; struct fc_els_rscn_page *page; u16 i; @@ -263,6 +260,22 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req) no_entries = be16_to_cpu(head->rscn_plen) / sizeof(struct fc_els_rscn_page); + if (no_entries > 1) { + /* handle failed ports */ + unsigned long flags; + struct zfcp_port *port; + + read_lock_irqsave(&adapter->port_list_lock, flags); + list_for_each_entry(port, &adapter->port_list, list) { + if (port->d_id) + continue; + zfcp_erp_port_reopen(port, + ZFCP_STATUS_COMMON_ERP_FAILED, + "fcrscn1"); + } + read_unlock_irqrestore(&adapter->port_list_lock, flags); + } + for (i = 1; i < no_entries; i++) { /* skip head and start with 1st element */ page++; diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 3c86e27f094deb3fefb423a2d046a7b08d69eb46..91aa4bfcf8d612041ce5ffad18681f02fc01c62f 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -21,6 +21,11 @@ struct kmem_cache *zfcp_fsf_qtcb_cache; +static bool ber_stop = true; +module_param(ber_stop, bool, 0600); +MODULE_PARM_DESC(ber_stop, + "Shuts down FCP devices for FCP channels that report a bit-error count in excess of its threshold (default on)"); + static void zfcp_fsf_request_timeout_handler(struct timer_list *t) { struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer); @@ -230,10 +235,15 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) case FSF_STATUS_READ_SENSE_DATA_AVAIL: break; case FSF_STATUS_READ_BIT_ERROR_THRESHOLD: - dev_warn(&adapter->ccw_device->dev, - "The error threshold for checksum statistics " - "has been exceeded\n"); zfcp_dbf_hba_bit_err("fssrh_3", req); + if (ber_stop) { + dev_warn(&adapter->ccw_device->dev, + "All paths over this FCP device are disused because of excessive bit errors\n"); + zfcp_erp_adapter_shutdown(adapter, 0, "fssrh_b"); + } else { + dev_warn(&adapter->ccw_device->dev, + "The error threshold for checksum statistics has been exceeded\n"); + } break; case FSF_STATUS_READ_LINK_DOWN: zfcp_fsf_status_read_link_down(req); @@ -1594,6 +1604,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) { struct zfcp_qdio *qdio = wka_port->adapter->qdio; struct zfcp_fsf_req *req; + unsigned long req_id = 0; int retval = -EIO; spin_lock_irq(&qdio->req_q_lock); @@ -1616,6 +1627,8 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) hton24(req->qtcb->bottom.support.d_id, wka_port->d_id); req->data = wka_port; + req_id = req->req_id; + zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); retval = zfcp_fsf_req_send(req); if (retval) @@ -1623,7 +1636,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) out: spin_unlock_irq(&qdio->req_q_lock); if (!retval) - zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id); + zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req_id); return retval; } @@ -1649,6 +1662,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) { struct zfcp_qdio *qdio = wka_port->adapter->qdio; struct zfcp_fsf_req *req; + unsigned long req_id = 0; int retval = -EIO; spin_lock_irq(&qdio->req_q_lock); @@ -1671,6 +1685,8 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) req->data = wka_port; req->qtcb->header.port_handle = wka_port->handle; + req_id = req->req_id; + zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); retval = zfcp_fsf_req_send(req); if (retval) @@ -1678,7 +1694,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) out: spin_unlock_irq(&qdio->req_q_lock); if (!retval) - zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id); + zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req_id); return retval; } @@ -2088,11 +2104,8 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req, break; case FSF_CMND_LENGTH_NOT_VALID: dev_err(&req->adapter->ccw_device->dev, - "Incorrect CDB length %d, LUN 0x%016Lx on " - "port 0x%016Lx closed\n", - req->qtcb->bottom.io.fcp_cmnd_length, - (unsigned long long)zfcp_scsi_dev_lun(sdev), - (unsigned long long)zfcp_sdev->port->wwpn); + "Incorrect FCP_CMND length %d, FCP device closed\n", + req->qtcb->bottom.io.fcp_cmnd_length); zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch4"); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index a8efcb330bc1d10c4b41898a48f1d830301ddd75..588bf5ac6fb96fb57286fd27b3698e96f475c959 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -125,6 +125,15 @@ static int zfcp_scsi_slave_alloc(struct scsi_device *sdev) zfcp_sdev->erp_action.port = port; + mutex_lock(&zfcp_sysfs_port_units_mutex); + if (zfcp_sysfs_port_is_removing(port)) { + /* port is already gone */ + mutex_unlock(&zfcp_sysfs_port_units_mutex); + put_device(&port->dev); /* undo zfcp_get_port_by_wwpn() */ + return -ENXIO; + } + mutex_unlock(&zfcp_sysfs_port_units_mutex); + unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev)); if (unit) put_device(&unit->dev); @@ -362,6 +371,10 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; int ret = SUCCESS, fc_ret; + if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) { + zfcp_erp_port_forced_reopen_all(adapter, 0, "schrh_p"); + zfcp_erp_wait(adapter); + } zfcp_erp_adapter_reopen(adapter, 0, "schrh_1"); zfcp_erp_wait(adapter); fc_ret = fc_block_scsi_eh(scpnt); diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index b277be6f7611ccc9925bc8148416637f5fdb6b58..af197e2b3e69d180ff60c9101efbe8f44b34cebe 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -235,6 +235,53 @@ static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL, DEFINE_MUTEX(zfcp_sysfs_port_units_mutex); +static void zfcp_sysfs_port_set_removing(struct zfcp_port *const port) +{ + lockdep_assert_held(&zfcp_sysfs_port_units_mutex); + atomic_set(&port->units, -1); +} + +bool zfcp_sysfs_port_is_removing(const struct zfcp_port *const port) +{ + lockdep_assert_held(&zfcp_sysfs_port_units_mutex); + return atomic_read(&port->units) == -1; +} + +static bool zfcp_sysfs_port_in_use(struct zfcp_port *const port) +{ + struct zfcp_adapter *const adapter = port->adapter; + unsigned long flags; + struct scsi_device *sdev; + bool in_use = true; + + mutex_lock(&zfcp_sysfs_port_units_mutex); + if (atomic_read(&port->units) > 0) + goto unlock_port_units_mutex; /* zfcp_unit(s) under port */ + + spin_lock_irqsave(adapter->scsi_host->host_lock, flags); + __shost_for_each_device(sdev, adapter->scsi_host) { + const struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev); + + if (sdev->sdev_state == SDEV_DEL || + sdev->sdev_state == SDEV_CANCEL) + continue; + if (zsdev->port != port) + continue; + /* alive scsi_device under port of interest */ + goto unlock_host_lock; + } + + /* port is about to be removed, so no more unit_add or slave_alloc */ + zfcp_sysfs_port_set_removing(port); + in_use = false; + +unlock_host_lock: + spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags); +unlock_port_units_mutex: + mutex_unlock(&zfcp_sysfs_port_units_mutex); + return in_use; +} + static ssize_t zfcp_sysfs_port_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -257,15 +304,11 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev, else retval = 0; - mutex_lock(&zfcp_sysfs_port_units_mutex); - if (atomic_read(&port->units) > 0) { + if (zfcp_sysfs_port_in_use(port)) { retval = -EBUSY; - mutex_unlock(&zfcp_sysfs_port_units_mutex); + put_device(&port->dev); /* undo zfcp_get_port_by_wwpn() */ goto out; } - /* port is about to be removed, so no more unit_add */ - atomic_set(&port->units, -1); - mutex_unlock(&zfcp_sysfs_port_units_mutex); write_lock_irq(&adapter->port_list_lock); list_del(&port->list); diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c index 1bf0a0984a098f083b8ff5e53fffe3710cbe6884..e67bf7388cae82d5b533e9e6ff12f271754fdf32 100644 --- a/drivers/s390/scsi/zfcp_unit.c +++ b/drivers/s390/scsi/zfcp_unit.c @@ -124,7 +124,7 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun) int retval = 0; mutex_lock(&zfcp_sysfs_port_units_mutex); - if (atomic_read(&port->units) == -1) { + if (zfcp_sysfs_port_is_removing(port)) { /* port is already gone */ retval = -ENODEV; goto out; @@ -168,8 +168,14 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun) write_lock_irq(&port->unit_list_lock); list_add_tail(&unit->list, &port->unit_list); write_unlock_irq(&port->unit_list_lock); + /* + * lock order: shost->scan_mutex before zfcp_sysfs_port_units_mutex + * due to zfcp_unit_scsi_scan() => zfcp_scsi_slave_alloc() + */ + mutex_unlock(&zfcp_sysfs_port_units_mutex); zfcp_unit_scsi_scan(unit); + return retval; out: mutex_unlock(&zfcp_sysfs_port_units_mutex); diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index 8f5c1d7f751aee594b763100c2bf6265df9f3a7d..67efdf25657f33e83e15aafaafe5b2aafc3e5ede 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -56,6 +56,7 @@ struct virtio_ccw_device { unsigned int revision; /* Transport revision */ wait_queue_head_t wait_q; spinlock_t lock; + struct mutex io_lock; /* Serializes I/O requests */ struct list_head virtqueues; unsigned long indicators; unsigned long indicators2; @@ -131,6 +132,7 @@ struct airq_info { struct airq_iv *aiv; }; static struct airq_info *airq_areas[MAX_AIRQ_AREAS]; +static DEFINE_MUTEX(airq_areas_lock); #define CCW_CMD_SET_VQ 0x13 #define CCW_CMD_VDEV_RESET 0x33 @@ -243,9 +245,11 @@ static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs, unsigned long bit, flags; for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) { + mutex_lock(&airq_areas_lock); if (!airq_areas[i]) airq_areas[i] = new_airq_info(); info = airq_areas[i]; + mutex_unlock(&airq_areas_lock); if (!info) return 0; write_lock_irqsave(&info->lock, flags); @@ -271,6 +275,8 @@ static void virtio_ccw_drop_indicators(struct virtio_ccw_device *vcdev) { struct virtio_ccw_vq_info *info; + if (!vcdev->airq_info) + return; list_for_each_entry(info, &vcdev->virtqueues, node) drop_airq_indicator(info->vq, vcdev->airq_info); } @@ -296,6 +302,7 @@ static int ccw_io_helper(struct virtio_ccw_device *vcdev, unsigned long flags; int flag = intparm & VIRTIO_CCW_INTPARM_MASK; + mutex_lock(&vcdev->io_lock); do { spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags); ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0); @@ -308,7 +315,9 @@ static int ccw_io_helper(struct virtio_ccw_device *vcdev, cpu_relax(); } while (ret == -EBUSY); wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0); - return ret ? ret : vcdev->err; + ret = ret ? ret : vcdev->err; + mutex_unlock(&vcdev->io_lock); + return ret; } static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev, @@ -409,7 +418,7 @@ static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev, ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF); if (ret) return ret; - return vcdev->config_block->num; + return vcdev->config_block->num ?: -ENOENT; } static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw) @@ -828,6 +837,7 @@ static void virtio_ccw_get_config(struct virtio_device *vdev, int ret; struct ccw1 *ccw; void *config_area; + unsigned long flags; ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); if (!ccw) @@ -846,11 +856,13 @@ static void virtio_ccw_get_config(struct virtio_device *vdev, if (ret) goto out_free; + spin_lock_irqsave(&vcdev->lock, flags); memcpy(vcdev->config, config_area, offset + len); - if (buf) - memcpy(buf, &vcdev->config[offset], len); if (vcdev->config_ready < offset + len) vcdev->config_ready = offset + len; + spin_unlock_irqrestore(&vcdev->lock, flags); + if (buf) + memcpy(buf, config_area + offset, len); out_free: kfree(config_area); @@ -864,6 +876,7 @@ static void virtio_ccw_set_config(struct virtio_device *vdev, struct virtio_ccw_device *vcdev = to_vc_device(vdev); struct ccw1 *ccw; void *config_area; + unsigned long flags; ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); if (!ccw) @@ -876,9 +889,11 @@ static void virtio_ccw_set_config(struct virtio_device *vdev, /* Make sure we don't overwrite fields. */ if (vcdev->config_ready < offset) virtio_ccw_get_config(vdev, 0, NULL, offset); + spin_lock_irqsave(&vcdev->lock, flags); memcpy(&vcdev->config[offset], buf, len); /* Write the config area to the host. */ memcpy(config_area, vcdev->config, sizeof(vcdev->config)); + spin_unlock_irqrestore(&vcdev->lock, flags); ccw->cmd_code = CCW_CMD_WRITE_CONF; ccw->flags = 0; ccw->count = offset + len; @@ -1247,6 +1262,7 @@ static int virtio_ccw_online(struct ccw_device *cdev) init_waitqueue_head(&vcdev->wait_q); INIT_LIST_HEAD(&vcdev->virtqueues); spin_lock_init(&vcdev->lock); + mutex_init(&vcdev->io_lock); spin_lock_irqsave(get_ccwdev_lock(cdev), flags); dev_set_drvdata(&cdev->dev, vcdev); diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c index 5c8ed7350a04a4f65be2eefa103637f2943e5a37..a36e4cf1841d9da7fd22cb5f6491d134b9b7f96f 100644 --- a/drivers/sbus/char/display7seg.c +++ b/drivers/sbus/char/display7seg.c @@ -220,6 +220,7 @@ static int d7s_probe(struct platform_device *op) dev_set_drvdata(&op->dev, p); d7s_device = p; err = 0; + of_node_put(opts); out: return err; diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c index 56e962a014939e31c7ad74687263ecd69558ef7b..b8481927bfe4048b4147e01c77c0f7f695ab90bb 100644 --- a/drivers/sbus/char/envctrl.c +++ b/drivers/sbus/char/envctrl.c @@ -910,8 +910,10 @@ static void envctrl_init_i2c_child(struct device_node *dp, for (len = 0; len < PCF8584_MAX_CHANNELS; ++len) { pchild->mon_type[len] = ENVCTRL_NOMON; } + of_node_put(root_node); return; } + of_node_put(root_node); } /* Get the monitor channels. */ diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c index 6be77b3aa8a5f53210916c428190bf95d02b18be..ac79f2088b3167d1024de7205f6bcff6c96e636e 100644 --- a/drivers/scsi/53c700.c +++ b/drivers/scsi/53c700.c @@ -295,7 +295,7 @@ NCR_700_detect(struct scsi_host_template *tpnt, if(tpnt->sdev_attrs == NULL) tpnt->sdev_attrs = NCR_700_dev_attrs; - memory = dma_alloc_attrs(hostdata->dev, TOTAL_MEM_SIZE, &pScript, + memory = dma_alloc_attrs(dev, TOTAL_MEM_SIZE, &pScript, GFP_KERNEL, DMA_ATTR_NON_CONSISTENT); if(memory == NULL) { printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n"); diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c index 0d4ffe0ae3065b8129567110e42ea564d4b01a5a..9840f4b06d7813cf4858b359822b27280b831cb4 100644 --- a/drivers/scsi/BusLogic.c +++ b/drivers/scsi/BusLogic.c @@ -3605,7 +3605,7 @@ static void blogic_msg(enum blogic_msglevel msglevel, char *fmt, if (buf[0] != '\n' || len > 1) printk("%sscsi%d: %s", blogic_msglevelmap[msglevel], adapter->host_no, buf); } else - printk("%s", buf); + pr_cont("%s", buf); } else { if (begin) { if (adapter != NULL && adapter->adapter_initd) @@ -3613,7 +3613,7 @@ static void blogic_msg(enum blogic_msglevel msglevel, char *fmt, else printk("%s%s", blogic_msglevelmap[msglevel], buf); } else - printk("%s", buf); + pr_cont("%s", buf); } begin = (buf[len - 1] == '\n'); } diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 7c097006c54db679c40ebd51ca5efcc8d8dc297d..991eca431917079522b624dfacf3cc76280094a2 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -188,7 +188,7 @@ config CHR_DEV_SCH changers are listed as "Type: Medium Changer" in /proc/scsi/scsi. If you have such hardware and want to use it with linux, say Y here. Check for details. - + If you want to compile this as a module ( = code which can be inserted in and removed from the running kernel whenever you want), say M here and read and @@ -480,17 +480,6 @@ config SCSI_MVUMI To compile this driver as a module, choose M here: the module will be called mvumi. -config SCSI_DPT_I2O - tristate "Adaptec I2O RAID support " - depends on SCSI && PCI && VIRT_TO_BUS - help - This driver supports all of Adaptec's I2O based RAID controllers as - well as the DPT SmartRaid V cards. This is an Adaptec maintained - driver by Deanna Bonds. See . - - To compile this driver as a module, choose M here: the - module will be called dpt_i2o. - config SCSI_ADVANSYS tristate "AdvanSys SCSI support" depends on SCSI @@ -519,9 +508,12 @@ config SCSI_ARCMSR source "drivers/scsi/esas2r/Kconfig" source "drivers/scsi/megaraid/Kconfig.megaraid" +source "drivers/scsi/sssraid/Kconfig" source "drivers/scsi/mpt3sas/Kconfig" source "drivers/scsi/smartpqi/Kconfig" +source "drivers/scsi/hisi_raid/Kconfig" source "drivers/scsi/ufs/Kconfig" +source "drivers/scsi/spraid/Kconfig" config SCSI_HPTIOP tristate "HighPoint RocketRAID 3xxx/4xxx Controller support" @@ -647,7 +639,7 @@ config SCSI_GDTH ---help--- Formerly called GDT SCSI Disk Array Controller Support. - This is a driver for RAID/SCSI Disk Array Controllers (EISA/ISA/PCI) + This is a driver for RAID/SCSI Disk Array Controllers (EISA/ISA/PCI) manufactured by Intel Corporation/ICP vortex GmbH. It is documented in the kernel source in and . @@ -862,7 +854,7 @@ config SCSI_SNI_53C710 config 53C700_LE_ON_BE bool - depends on SCSI_LASI700 + depends on SCSI_LASI700 || SCSI_SNI_53C710 default y config SCSI_STEX @@ -1122,6 +1114,8 @@ source "drivers/scsi/qla4xxx/Kconfig" source "drivers/scsi/qedi/Kconfig" source "drivers/scsi/qedf/Kconfig" +source "drivers/scsi/huawei/Kconfig" + config SCSI_LPFC tristate "Emulex LightPulse Fibre Channel Support" depends on PCI && SCSI @@ -1174,8 +1168,6 @@ config SCSI_AM53C974 PCscsi/PCnet (Am53/79C974) solutions. This is a new implementation base on the generic esp_scsi driver. - Documentation can be found in . - Note that this driver does NOT support Tekram DC390W/U/F, which are based on NCR/Symbios chips. Use "NCR53C8XX SCSI support" for those. diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 6d71b2a9592b0a7d50a5291ce8bf2f7192c740f5..435f4a565207ff4509b1f8259815c8921edc12c9 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -64,7 +64,6 @@ obj-$(CONFIG_BVME6000_SCSI) += 53c700.o bvme6000_scsi.o obj-$(CONFIG_SCSI_SIM710) += 53c700.o sim710.o obj-$(CONFIG_SCSI_ADVANSYS) += advansys.o obj-$(CONFIG_SCSI_BUSLOGIC) += BusLogic.o -obj-$(CONFIG_SCSI_DPT_I2O) += dpt_i2o.o obj-$(CONFIG_SCSI_ARCMSR) += arcmsr/ obj-$(CONFIG_SCSI_AHA152X) += aha152x.o obj-$(CONFIG_SCSI_AHA1542) += aha1542.o @@ -79,24 +78,28 @@ obj-$(CONFIG_SCSI_IPS) += ips.o obj-$(CONFIG_SCSI_GENERIC_NCR5380) += g_NCR5380.o obj-$(CONFIG_SCSI_QLOGIC_FAS) += qlogicfas408.o qlogicfas.o obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o -obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o +obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/ obj-$(CONFIG_SCSI_QLA_ISCSI) += libiscsi.o qla4xxx/ obj-$(CONFIG_SCSI_LPFC) += lpfc/ +obj-$(CONFIG_SCSI_HUAWEI_FC) += huawei/ obj-$(CONFIG_SCSI_BFA_FC) += bfa/ obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor/ obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o obj-$(CONFIG_SCSI_HPSA) += hpsa.o obj-$(CONFIG_SCSI_SMARTPQI) += smartpqi/ +obj-$(CONFIG_SCSI_3SNIC_SSSRAID) += sssraid/ obj-$(CONFIG_SCSI_SYM53C8XX_2) += sym53c8xx_2/ obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o obj-$(CONFIG_SCSI_DC395x) += dc395x.o obj-$(CONFIG_SCSI_AM53C974) += esp_scsi.o am53c974.o obj-$(CONFIG_CXLFLASH) += cxlflash/ +obj-$(CONFIG_RAMAXEL_SPRAID) += spraid/ obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/ obj-$(CONFIG_MEGARAID_SAS) += megaraid/ obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas/ +obj-$(CONFIG_SCSI_HISI_RAID) += hisi_raid/ obj-$(CONFIG_SCSI_UFSHCD) += ufs/ obj-$(CONFIG_SCSI_ACARD) += atp870u.o obj-$(CONFIG_SCSI_SUNESP) += esp_scsi.o sun_esp.o diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index 90ea0f5d9bdbbfc78da0b474a1490b184f3bde25..95a3e3bf2b431831bcc2bd1e0a843121ee71de56 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c @@ -129,8 +129,12 @@ #define NCR5380_release_dma_irq(x) #endif +static unsigned int disconnect_mask = ~0; +module_param(disconnect_mask, int, 0444); + static int do_abort(struct Scsi_Host *); static void do_reset(struct Scsi_Host *); +static void bus_reset_cleanup(struct Scsi_Host *); /** * initialize_SCp - init the scsi pointer field @@ -513,16 +517,15 @@ static void complete_cmd(struct Scsi_Host *instance, if (hostdata->sensing == cmd) { /* Autosense processing ends here */ - if ((cmd->result & 0xff) != SAM_STAT_GOOD) { + if (status_byte(cmd->result) != GOOD) { scsi_eh_restore_cmnd(cmd, &hostdata->ses); - set_host_byte(cmd, DID_ERROR); - } else + } else { scsi_eh_restore_cmnd(cmd, &hostdata->ses); + set_driver_byte(cmd, DRIVER_SENSE); + } hostdata->sensing = NULL; } - hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun); - cmd->scsi_done(cmd); } @@ -710,6 +713,8 @@ static void NCR5380_main(struct work_struct *work) NCR5380_information_transfer(instance); done = 0; } + if (!hostdata->connected) + NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); spin_unlock_irq(&hostdata->lock); if (!done) cond_resched(); @@ -884,7 +889,14 @@ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id) /* Probably Bus Reset */ NCR5380_read(RESET_PARITY_INTERRUPT_REG); - dsprintk(NDEBUG_INTR, instance, "unknown interrupt\n"); + if (sr & SR_RST) { + /* Certainly Bus Reset */ + shost_printk(KERN_WARNING, instance, + "bus reset interrupt\n"); + bus_reset_cleanup(instance); + } else { + dsprintk(NDEBUG_INTR, instance, "unknown interrupt\n"); + } #ifdef SUN3_SCSI_VME dregs->csr |= CSR_DMA_ENABLE; #endif @@ -902,20 +914,16 @@ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id) return IRQ_RETVAL(handled); } -/* - * Function : int NCR5380_select(struct Scsi_Host *instance, - * struct scsi_cmnd *cmd) - * - * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command, - * including ARBITRATION, SELECTION, and initial message out for - * IDENTIFY and queue messages. +/** + * NCR5380_select - attempt arbitration and selection for a given command + * @instance: the Scsi_Host instance + * @cmd: the scsi_cmnd to execute * - * Inputs : instance - instantiation of the 5380 driver on which this - * target lives, cmd - SCSI command to execute. + * This routine establishes an I_T_L nexus for a SCSI command. This involves + * ARBITRATION, SELECTION and MESSAGE OUT phases and an IDENTIFY message. * - * Returns cmd if selection failed but should be retried, - * NULL if selection failed and should not be retried, or - * NULL if selection succeeded (hostdata->connected == cmd). + * Returns true if the operation should be retried. + * Returns false if it should not be retried. * * Side effects : * If bus busy, arbitration failed, etc, NCR5380_select() will exit @@ -923,16 +931,15 @@ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id) * SELECT_ENABLE will be set appropriately, the NCR5380 * will cease to drive any SCSI bus signals. * - * If successful : I_T_L or I_T_L_Q nexus will be established, - * instance->connected will be set to cmd. + * If successful : the I_T_L nexus will be established, and + * hostdata->connected will be set to cmd. * SELECT interrupt will be disabled. * * If failed (no target) : cmd->scsi_done() will be called, and the * cmd->result host byte set to DID_BAD_TARGET. */ -static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance, - struct scsi_cmnd *cmd) +static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd) __releases(&hostdata->lock) __acquires(&hostdata->lock) { struct NCR5380_hostdata *hostdata = shost_priv(instance); @@ -940,6 +947,10 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance, unsigned char *data; int len; int err; + bool ret = true; + bool can_disconnect = instance->irq != NO_IRQ && + cmd->cmnd[0] != REQUEST_SENSE && + (disconnect_mask & BIT(scmd_id(cmd))); NCR5380_dprint(NDEBUG_ARBITRATION, instance); dsprintk(NDEBUG_ARBITRATION, instance, "starting arbitration, id = %d\n", @@ -948,7 +959,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance, /* * Arbitration and selection phases are slow and involve dropping the * lock, so we have to watch out for EH. An exception handler may - * change 'selecting' to NULL. This function will then return NULL + * change 'selecting' to NULL. This function will then return false * so that the caller will forget about 'cmd'. (During information * transfer phases, EH may change 'connected' to NULL.) */ @@ -984,7 +995,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance, if (!hostdata->selecting) { /* Command was aborted */ NCR5380_write(MODE_REG, MR_BASE); - goto out; + return false; } if (err < 0) { NCR5380_write(MODE_REG, MR_BASE); @@ -1033,7 +1044,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance, if (!hostdata->selecting) { NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - goto out; + return false; } dsprintk(NDEBUG_ARBITRATION, instance, "won arbitration\n"); @@ -1106,8 +1117,6 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance, spin_lock_irq(&hostdata->lock); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_reselect(instance); - if (!hostdata->connected) - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); shost_printk(KERN_ERR, instance, "reselection after won arbitration?\n"); goto out; } @@ -1115,14 +1124,16 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance, if (err < 0) { spin_lock_irq(&hostdata->lock); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); + /* Can't touch cmd if it has been reclaimed by the scsi ML */ - if (hostdata->selecting) { - cmd->result = DID_BAD_TARGET << 16; - complete_cmd(instance, cmd); - dsprintk(NDEBUG_SELECTION, instance, "target did not respond within 250ms\n"); - cmd = NULL; - } + if (!hostdata->selecting) + return false; + + cmd->result = DID_BAD_TARGET << 16; + complete_cmd(instance, cmd); + dsprintk(NDEBUG_SELECTION, instance, + "target did not respond within 250ms\n"); + ret = false; goto out; } @@ -1150,17 +1161,16 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance, if (err < 0) { shost_printk(KERN_ERR, instance, "select: REQ timeout\n"); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); goto out; } if (!hostdata->selecting) { do_abort(instance); - goto out; + return false; } dsprintk(NDEBUG_SELECTION, instance, "target %d selected, going into MESSAGE OUT phase.\n", scmd_id(cmd)); - tmp[0] = IDENTIFY(((instance->irq == NO_IRQ) ? 0 : 1), cmd->device->lun); + tmp[0] = IDENTIFY(can_disconnect, cmd->device->lun); len = 1; data = tmp; @@ -1171,7 +1181,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance, cmd->result = DID_ERROR << 16; complete_cmd(instance, cmd); dsprintk(NDEBUG_SELECTION, instance, "IDENTIFY message transfer failed\n"); - cmd = NULL; + ret = false; goto out; } @@ -1186,13 +1196,13 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance, initialize_SCp(cmd); - cmd = NULL; + ret = false; out: if (!hostdata->selecting) return NULL; hostdata->selecting = NULL; - return cmd; + return ret; } /* @@ -1711,6 +1721,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) cmd->result = DID_ERROR << 16; complete_cmd(instance, cmd); hostdata->connected = NULL; + hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun); return; #endif case PHASE_DATAIN: @@ -1793,6 +1804,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) cmd, scmd_id(cmd), cmd->device->lun); hostdata->connected = NULL; + hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun); cmd->result &= ~0xffff; cmd->result |= cmd->SCp.Status; @@ -1817,9 +1829,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) */ NCR5380_write(TARGET_COMMAND_REG, 0); - /* Enable reselect interrupts */ - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - maybe_release_dma_irq(instance); return; case MESSAGE_REJECT: @@ -1851,8 +1860,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) */ NCR5380_write(TARGET_COMMAND_REG, 0); - /* Enable reselect interrupts */ - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); #ifdef SUN3_SCSI_VME dregs->csr |= CSR_DMA_ENABLE; #endif @@ -1951,10 +1958,10 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) NCR5380_transfer_pio(instance, &phase, &len, &data); if (msgout == ABORT) { hostdata->connected = NULL; + hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun); cmd->result = DID_ERROR << 16; complete_cmd(instance, cmd); maybe_release_dma_irq(instance); - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return; } msgout = NOP; @@ -2014,8 +2021,11 @@ static void NCR5380_reselect(struct Scsi_Host *instance) NCR5380_write(MODE_REG, MR_BASE); target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); - - dsprintk(NDEBUG_RESELECTION, instance, "reselect\n"); + if (!target_mask || target_mask & (target_mask - 1)) { + shost_printk(KERN_WARNING, instance, + "reselect: bad target_mask 0x%02x\n", target_mask); + return; + } /* * At this point, we have detected that our SCSI ID is on the bus, @@ -2029,6 +2039,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY); if (NCR5380_poll_politely(hostdata, STATUS_REG, SR_SEL, 0, 2 * HZ) < 0) { + shost_printk(KERN_ERR, instance, "reselect: !SEL timeout\n"); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); return; } @@ -2040,6 +2051,10 @@ static void NCR5380_reselect(struct Scsi_Host *instance) if (NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, 2 * HZ) < 0) { + if ((NCR5380_read(STATUS_REG) & (SR_BSY | SR_SEL)) == 0) + /* BUS FREE phase */ + return; + shost_printk(KERN_ERR, instance, "reselect: REQ timeout\n"); do_abort(instance); return; } @@ -2101,13 +2116,16 @@ static void NCR5380_reselect(struct Scsi_Host *instance) dsprintk(NDEBUG_RESELECTION | NDEBUG_QUEUES, instance, "reselect: removed %p from disconnected queue\n", tmp); } else { + int target = ffs(target_mask) - 1; + shost_printk(KERN_ERR, instance, "target bitmask 0x%02x lun %d not in disconnected queue.\n", target_mask, lun); /* * Since we have an established nexus that we can't do anything * with, we must abort it. */ - do_abort(instance); + if (do_abort(instance) == 0) + hostdata->busy[target] &= ~(1 << lun); return; } @@ -2272,15 +2290,16 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) if (list_del_cmd(&hostdata->autosense, cmd)) { dsprintk(NDEBUG_ABORT, instance, "abort: removed %p from sense queue\n", cmd); - set_host_byte(cmd, DID_ERROR); complete_cmd(instance, cmd); } out: if (result == FAILED) dsprintk(NDEBUG_ABORT, instance, "abort: failed to abort %p\n", cmd); - else + else { + hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun); dsprintk(NDEBUG_ABORT, instance, "abort: successfully aborted %p\n", cmd); + } queue_work(hostdata->work_q, &hostdata->main_task); maybe_release_dma_irq(instance); @@ -2290,31 +2309,12 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) } -/** - * NCR5380_host_reset - reset the SCSI host - * @cmd: SCSI command undergoing EH - * - * Returns SUCCESS - */ - -static int NCR5380_host_reset(struct scsi_cmnd *cmd) +static void bus_reset_cleanup(struct Scsi_Host *instance) { - struct Scsi_Host *instance = cmd->device->host; struct NCR5380_hostdata *hostdata = shost_priv(instance); int i; - unsigned long flags; struct NCR5380_cmd *ncmd; - spin_lock_irqsave(&hostdata->lock, flags); - -#if (NDEBUG & NDEBUG_ANY) - scmd_printk(KERN_INFO, cmd, __func__); -#endif - NCR5380_dprint(NDEBUG_ANY, instance); - NCR5380_dprint_phase(NDEBUG_ANY, instance); - - do_reset(instance); - /* reset NCR registers */ NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(TARGET_COMMAND_REG, 0); @@ -2326,11 +2326,6 @@ static int NCR5380_host_reset(struct scsi_cmnd *cmd) * commands! */ - if (list_del_cmd(&hostdata->unissued, cmd)) { - cmd->result = DID_RESET << 16; - cmd->scsi_done(cmd); - } - if (hostdata->selecting) { hostdata->selecting->result = DID_RESET << 16; complete_cmd(instance, hostdata->selecting); @@ -2348,7 +2343,6 @@ static int NCR5380_host_reset(struct scsi_cmnd *cmd) list_for_each_entry(ncmd, &hostdata->autosense, list) { struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd); - set_host_byte(cmd, DID_RESET); cmd->scsi_done(cmd); } INIT_LIST_HEAD(&hostdata->autosense); @@ -2365,6 +2359,41 @@ static int NCR5380_host_reset(struct scsi_cmnd *cmd) queue_work(hostdata->work_q, &hostdata->main_task); maybe_release_dma_irq(instance); +} + +/** + * NCR5380_host_reset - reset the SCSI host + * @cmd: SCSI command undergoing EH + * + * Returns SUCCESS + */ + +static int NCR5380_host_reset(struct scsi_cmnd *cmd) +{ + struct Scsi_Host *instance = cmd->device->host; + struct NCR5380_hostdata *hostdata = shost_priv(instance); + unsigned long flags; + struct NCR5380_cmd *ncmd; + + spin_lock_irqsave(&hostdata->lock, flags); + +#if (NDEBUG & NDEBUG_ANY) + shost_printk(KERN_INFO, instance, __func__); +#endif + NCR5380_dprint(NDEBUG_ANY, instance); + NCR5380_dprint_phase(NDEBUG_ANY, instance); + + list_for_each_entry(ncmd, &hostdata->unissued, list) { + struct scsi_cmnd *scmd = NCR5380_to_scmd(ncmd); + + scmd->result = DID_RESET << 16; + scmd->scsi_done(scmd); + } + INIT_LIST_HEAD(&hostdata->unissued); + + do_reset(instance); + bus_reset_cleanup(instance); + spin_unlock_irqrestore(&hostdata->lock, flags); return SUCCESS; diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h index 31096a0b0fdd30202068b0376396eaf6b783bf29..5935fd6d1a0581178c6e4676d25c740415899cb0 100644 --- a/drivers/scsi/NCR5380.h +++ b/drivers/scsi/NCR5380.h @@ -235,7 +235,7 @@ struct NCR5380_cmd { #define NCR5380_PIO_CHUNK_SIZE 256 /* Time limit (ms) to poll registers when IRQs are disabled, e.g. during PDMA */ -#define NCR5380_REG_POLL_TIME 15 +#define NCR5380_REG_POLL_TIME 10 static inline struct scsi_cmnd *NCR5380_to_scmd(struct NCR5380_cmd *ncmd_ptr) { @@ -275,7 +275,7 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id); static void NCR5380_main(struct work_struct *work); static const char *NCR5380_info(struct Scsi_Host *instance); static void NCR5380_reselect(struct Scsi_Host *instance); -static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *); +static bool NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *); static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data); static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data); static int NCR5380_poll_politely2(struct NCR5380_hostdata *, diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 6e356325d8d98a120aa93ceb5c12d5898880ba14..14938416c3dc1f1f64cc4ace9eb1da6f36849f6c 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -3452,7 +3452,7 @@ static int delete_disk(struct aac_dev *dev, void __user *arg) } } -int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg) +int aac_dev_ioctl(struct aac_dev *dev, unsigned int cmd, void __user *arg) { switch (cmd) { case FSACTL_QUERY_DISK: diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 39eb415987fc9fd901ba9ee157dedfcf76910829..f59b4f6eda0a87da6ae6b2757e5b658b3c43d2e9 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -2639,9 +2639,14 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor) return capacity; } +static inline int aac_pci_offline(struct aac_dev *dev) +{ + return pci_channel_offline(dev->pdev) || dev->handle_pci_error; +} + static inline int aac_adapter_check_health(struct aac_dev *dev) { - if (unlikely(pci_channel_offline(dev->pdev))) + if (unlikely(aac_pci_offline(dev))) return -1; return (dev)->a_ops.adapter_check_health(dev); @@ -2705,12 +2710,12 @@ void aac_set_intx_mode(struct aac_dev *dev); int aac_get_config_status(struct aac_dev *dev, int commit_flag); int aac_get_containers(struct aac_dev *dev); int aac_scsi_cmd(struct scsi_cmnd *cmd); -int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg); +int aac_dev_ioctl(struct aac_dev *dev, unsigned int cmd, void __user *arg); #ifndef shost_to_class #define shost_to_class(shost) &shost->shost_dev #endif ssize_t aac_get_serial_number(struct device *dev, char *buf); -int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg); +int aac_do_ioctl(struct aac_dev *dev, unsigned int cmd, void __user *arg); int aac_rx_init(struct aac_dev *dev); int aac_rkt_init(struct aac_dev *dev); int aac_nark_init(struct aac_dev *dev); diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index 25f6600d6c090c45e9dfb6c7aabaadc7ffa72662..fd8a10871598a74b181a3a6fcde909491e5bda5c 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -1061,7 +1061,7 @@ static int aac_send_reset_adapter(struct aac_dev *dev, void __user *arg) return retval; } -int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg) +int aac_do_ioctl(struct aac_dev *dev, unsigned int cmd, void __user *arg) { int status; diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 0dc7b5a4fea25b98d42342886af30242e5fe1dcf..0378fd3eb0392b06bca3f4ae9a566be758c9f191 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c @@ -652,6 +652,7 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) if (aac_comm_init(dev)<0){ kfree(dev->queues); + dev->queues = NULL; return NULL; } /* @@ -659,6 +660,7 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) */ if (aac_fib_setup(dev) < 0) { kfree(dev->queues); + dev->queues = NULL; return NULL; } diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 6e1b022a823d59d5e2c9ab664fa9a82f76526f3c..b7588de4484e58ea0001dd983564efc28e8cc93e 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -673,7 +673,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, return -ETIMEDOUT; } - if (unlikely(pci_channel_offline(dev->pdev))) + if (unlikely(aac_pci_offline(dev))) return -EFAULT; if ((blink = aac_adapter_check_health(dev)) > 0) { @@ -773,7 +773,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback, spin_unlock_irqrestore(&fibptr->event_lock, flags); - if (unlikely(pci_channel_offline(dev->pdev))) + if (unlikely(aac_pci_offline(dev))) return -EFAULT; fibptr->flags |= FIB_CONTEXT_FLAG_WAIT; @@ -1304,8 +1304,9 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) ADD : DELETE; break; } - case AifBuManagerEvent: - aac_handle_aif_bu(dev, aifcmd); + break; + case AifBuManagerEvent: + aac_handle_aif_bu(dev, aifcmd); break; } diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 04443577d48b371f37afb057ec5bceb9f2e000f7..e25960e119dedc488b112621946a6df52b7ecd3a 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -413,13 +413,16 @@ static int aac_slave_configure(struct scsi_device *sdev) if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS && aac->sa_firmware) { devtype = aac->hba_map[chn][tid].devtype; - if (devtype == AAC_DEVTYPE_NATIVE_RAW) + if (devtype == AAC_DEVTYPE_NATIVE_RAW) { depth = aac->hba_map[chn][tid].qd_limit; - else if (devtype == AAC_DEVTYPE_ARC_RAW) + set_timeout = 1; + goto common_config; + } + if (devtype == AAC_DEVTYPE_ARC_RAW) { set_qd_dev_type = true; - - set_timeout = 1; - goto common_config; + set_timeout = 1; + goto common_config; + } } if (aac->jbod && (sdev->type == TYPE_DISK)) @@ -616,7 +619,8 @@ static struct device_attribute *aac_dev_attrs[] = { NULL, }; -static int aac_ioctl(struct scsi_device *sdev, int cmd, void __user * arg) +static int aac_ioctl(struct scsi_device *sdev, unsigned int cmd, + void __user *arg) { struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata; if (!capable(CAP_SYS_RAWIO)) @@ -1205,7 +1209,8 @@ static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long return ret; } -static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) +static int aac_compat_ioctl(struct scsi_device *sdev, unsigned int cmd, + void __user *arg) { struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata; if (!capable(CAP_SYS_RAWIO)) diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c index 713f69033f201bf7ff4d9fcf29eadbd355db1e2d..2856b0ce7ab9a7ff0f0a664f93d07309852004b4 100644 --- a/drivers/scsi/advansys.c +++ b/drivers/scsi/advansys.c @@ -3370,8 +3370,8 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost) shost->host_no); seq_printf(m, - " iop_base 0x%lx, cable_detect: %X, err_code %u\n", - (unsigned long)v->iop_base, + " iop_base 0x%p, cable_detect: %X, err_code %u\n", + v->iop_base, AdvReadWordRegister(iop_base,IOPW_SCSI_CFG1) & CABLE_DETECT, v->err_code); diff --git a/drivers/scsi/aic7xxx/aic7770_osm.c b/drivers/scsi/aic7xxx/aic7770_osm.c index 3d401d02c01955bc02304fe4f761993e73eaad46..bdd177e3d76229bae1fb67003a9045a3f3e26c87 100644 --- a/drivers/scsi/aic7xxx/aic7770_osm.c +++ b/drivers/scsi/aic7xxx/aic7770_osm.c @@ -91,6 +91,7 @@ aic7770_probe(struct device *dev) ahc = ahc_alloc(&aic7xxx_driver_template, name); if (ahc == NULL) return (ENOMEM); + ahc->dev = dev; error = aic7770_config(ahc, aic7770_ident_table + edev->id.driver_data, eisaBase); if (error != 0) { diff --git a/drivers/scsi/aic7xxx/aic7xxx.h b/drivers/scsi/aic7xxx/aic7xxx.h index 4ce4e903a759e9eab6c84a6ee2dbf0c29c97f4b1..7f6e83296dfa49ec764f592e2c31079ca22719a5 100644 --- a/drivers/scsi/aic7xxx/aic7xxx.h +++ b/drivers/scsi/aic7xxx/aic7xxx.h @@ -949,6 +949,7 @@ struct ahc_softc { * Platform specific device information. */ ahc_dev_softc_t dev_softc; + struct device *dev; /* * Bus specific device information. diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c index c6be3aeb302b578e5c09051641945ce25af9ea16..306d0bf33478ce03ed917ed9cec0352c0199c92e 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c @@ -861,8 +861,8 @@ int ahc_dmamem_alloc(struct ahc_softc *ahc, bus_dma_tag_t dmat, void** vaddr, int flags, bus_dmamap_t *mapp) { - *vaddr = pci_alloc_consistent(ahc->dev_softc, - dmat->maxsize, mapp); + /* XXX: check if we really need the GFP_ATOMIC and unwind this mess! */ + *vaddr = dma_alloc_coherent(ahc->dev, dmat->maxsize, mapp, GFP_ATOMIC); if (*vaddr == NULL) return ENOMEM; return 0; @@ -872,8 +872,7 @@ void ahc_dmamem_free(struct ahc_softc *ahc, bus_dma_tag_t dmat, void* vaddr, bus_dmamap_t map) { - pci_free_consistent(ahc->dev_softc, dmat->maxsize, - vaddr, map); + dma_free_coherent(ahc->dev, dmat->maxsize, vaddr, map); } int @@ -1124,8 +1123,7 @@ ahc_linux_register_host(struct ahc_softc *ahc, struct scsi_host_template *templa host->transportt = ahc_linux_transport_template; - retval = scsi_add_host(host, - (ahc->dev_softc ? &ahc->dev_softc->dev : NULL)); + retval = scsi_add_host(host, ahc->dev); if (retval) { printk(KERN_WARNING "aic7xxx: scsi_add_host failed\n"); scsi_host_put(host); diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c index 0fc14dac7070ce6bab629c08a2998638fc26553e..717d8d1082ce18ae9899870e43238c1443fdb36f 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c @@ -250,6 +250,7 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } } ahc->dev_softc = pci; + ahc->dev = &pci->dev; error = ahc_pci_config(ahc, entry); if (error != 0) { ahc_free(ahc); diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c index 1391e5f3591809dcfb1bef83a4ee52f4732f7375..ad8a65ab489cf8b5ed627f616ecf4f19222919b1 100644 --- a/drivers/scsi/aic94xx/aic94xx_init.c +++ b/drivers/scsi/aic94xx/aic94xx_init.c @@ -71,6 +71,7 @@ static struct scsi_host_template aic94xx_sht = { .use_clustering = ENABLE_CLUSTERING, .eh_device_reset_handler = sas_eh_device_reset_handler, .eh_target_reset_handler = sas_eh_target_reset_handler, + .slave_alloc = sas_slave_alloc, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, .track_queue_depth = 1, @@ -281,7 +282,7 @@ static ssize_t asd_show_dev_rev(struct device *dev, return snprintf(buf, PAGE_SIZE, "%s\n", asd_dev_rev[asd_ha->revision_id]); } -static DEVICE_ATTR(revision, S_IRUGO, asd_show_dev_rev, NULL); +static DEVICE_ATTR(aic_revision, S_IRUGO, asd_show_dev_rev, NULL); static ssize_t asd_show_dev_bios_build(struct device *dev, struct device_attribute *attr,char *buf) @@ -478,7 +479,7 @@ static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha) { int err; - err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_revision); + err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision); if (err) return err; @@ -500,13 +501,13 @@ static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha) err_biosb: device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); err_rev: - device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision); + device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision); return err; } static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha) { - device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision); + device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision); device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn); device_remove_file(&asd_ha->pcidev->dev, &dev_attr_update_bios); diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 12316ef4c89318536f88e6b84f7f92d7d887d8f2..c75d4695f9828d0379a2e3218c7473a73f3f8bbc 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -4135,9 +4135,9 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb) pci_read_config_byte(acb->pdev, i, &value[i]); } /* hardware reset signal */ - if ((acb->dev_id == 0x1680)) { + if (acb->dev_id == 0x1680) { writel(ARCMSR_ARC1680_BUS_RESET, &pmuA->reserved1[0]); - } else if ((acb->dev_id == 0x1880)) { + } else if (acb->dev_id == 0x1880) { do { count++; writel(0xF, &pmuC->write_sequence); @@ -4161,7 +4161,7 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb) } while (((readl(&pmuE->host_diagnostic_3xxx) & ARCMSR_ARC1884_DiagWrite_ENABLE) == 0) && (count < 5)); writel(ARCMSR_ARC188X_RESET_ADAPTER, &pmuE->host_diagnostic_3xxx); - } else if ((acb->dev_id == 0x1214)) { + } else if (acb->dev_id == 0x1214) { writel(0x20, pmuD->reset_request); } else { pci_write_config_byte(acb->pdev, 0x84, 0x20); diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c index 89f5154c40b6ec1a61fb259ecd7f20c7441107c3..764c46d7333e6f8ac299292da2a017396b993d2a 100644 --- a/drivers/scsi/atari_scsi.c +++ b/drivers/scsi/atari_scsi.c @@ -742,7 +742,7 @@ static int __init atari_scsi_probe(struct platform_device *pdev) atari_scsi_template.sg_tablesize = SG_ALL; } else { atari_scsi_template.can_queue = 1; - atari_scsi_template.sg_tablesize = SG_NONE; + atari_scsi_template.sg_tablesize = 1; } if (setup_can_queue > 0) @@ -751,8 +751,8 @@ static int __init atari_scsi_probe(struct platform_device *pdev) if (setup_cmd_per_lun > 0) atari_scsi_template.cmd_per_lun = setup_cmd_per_lun; - /* Leave sg_tablesize at 0 on a Falcon! */ - if (ATARIHW_PRESENT(TT_SCSI) && setup_sg_tablesize >= 0) + /* Don't increase sg_tablesize on Falcon! */ + if (ATARIHW_PRESENT(TT_SCSI) && setup_sg_tablesize > 0) atari_scsi_template.sg_tablesize = setup_sg_tablesize; if (setup_hostid >= 0) { diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 3660059784f74bdbf9620aa787c5f3ab0c823036..a5b807c676fccb50300eff41b6536c6162e80a0c 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -423,7 +423,7 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev) "beiscsi_hba_alloc - iscsi_host_alloc failed\n"); return NULL; } - shost->max_id = BE2_MAX_SESSIONS; + shost->max_id = BE2_MAX_SESSIONS - 1; shost->max_channel = 0; shost->max_cmd_len = BEISCSI_MAX_CMD_LEN; shost->max_lun = BEISCSI_NUM_MAX_LUN; @@ -5336,7 +5336,7 @@ static int beiscsi_enable_port(struct beiscsi_hba *phba) /* Re-enable UER. If different TPE occurs then it is recoverable. */ beiscsi_set_uer_feature(phba); - phba->shost->max_id = phba->params.cxns_per_ctrl; + phba->shost->max_id = phba->params.cxns_per_ctrl - 1; phba->shost->can_queue = phba->params.ios_per_ctrl; ret = beiscsi_init_port(phba); if (ret < 0) { diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h index 3d0c96a5c8735d608c0fd15b94b788f9f99758ce..c19c26e0e405e467eb8426427c8577cf44f2d806 100644 --- a/drivers/scsi/bfa/bfa_defs_svc.h +++ b/drivers/scsi/bfa/bfa_defs_svc.h @@ -1453,7 +1453,7 @@ union bfa_aen_data_u { struct bfa_aen_entry_s { struct list_head qe; enum bfa_aen_category aen_category; - u32 aen_type; + int aen_type; union bfa_aen_data_u aen_data; u64 aen_tv_sec; u64 aen_tv_usec; diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index bd7e6a6fc1f184e0b0065408864e7199d0adb46b..7a2a9b05ed09196a2b7ebfbfe025fb6a707566f1 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -1711,9 +1711,8 @@ bfad_init(void) error = bfad_im_module_init(); if (error) { - error = -ENOMEM; printk(KERN_WARNING "bfad_im_module_init failure\n"); - goto ext; + return -ENOMEM; } if (strcmp(FCPI_NAME, " fcpim") == 0) diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c index 26b0fa4e90b58b5340bf18bb20e1a22fd8d5c677..3b84290cf0a7e8f1bd9d0e0c2b3297a4ab682466 100644 --- a/drivers/scsi/bfa/bfad_attr.c +++ b/drivers/scsi/bfa/bfad_attr.c @@ -283,8 +283,10 @@ bfad_im_get_stats(struct Scsi_Host *shost) rc = bfa_port_get_stats(BFA_FCPORT(&bfad->bfa), fcstats, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); - if (rc != BFA_STATUS_OK) + if (rc != BFA_STATUS_OK) { + kfree(fcstats); return NULL; + } wait_for_completion(&fcomp.comp); diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c index 349cfe7d055eb3fb615841b5ab36f1ba37e106bc..30c344322d5317721a957a52afafa5ccd849a313 100644 --- a/drivers/scsi/bfa/bfad_debugfs.c +++ b/drivers/scsi/bfa/bfad_debugfs.c @@ -258,7 +258,7 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf, unsigned long flags; void *kern_buf; - kern_buf = memdup_user(buf, nbytes); + kern_buf = memdup_user_nul(buf, nbytes); if (IS_ERR(kern_buf)) return PTR_ERR(kern_buf); @@ -325,7 +325,7 @@ bfad_debugfs_write_regwr(struct file *file, const char __user *buf, unsigned long flags; void *kern_buf; - kern_buf = memdup_user(buf, nbytes); + kern_buf = memdup_user_nul(buf, nbytes); if (IS_ERR(kern_buf)) return PTR_ERR(kern_buf); diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h index e61ed8dad0b4f01c7ec374592bf6f46bfd834e8e..bd4ac187fd8e7830bcee23d1ee1e975582830aad 100644 --- a/drivers/scsi/bfa/bfad_im.h +++ b/drivers/scsi/bfa/bfad_im.h @@ -143,7 +143,7 @@ struct bfad_im_s { static inline void bfad_im_post_vendor_event(struct bfa_aen_entry_s *entry, struct bfad_s *drv, int cnt, enum bfa_aen_category cat, - enum bfa_ioc_aen_event evt) + int evt) { struct timespec64 ts; diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index f000458133789935fbb2b15550d91106ec5c65ca..ea2c601da8e15c87baf0f658f84a01d3ba85ae50 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -80,7 +80,7 @@ static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba); static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba); static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface, struct device *parent, int npiv); -static void bnx2fc_destroy_work(struct work_struct *work); +static void bnx2fc_port_destroy(struct fcoe_port *port); static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev); static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device @@ -515,7 +515,8 @@ static int bnx2fc_l2_rcv_thread(void *arg) static void bnx2fc_recv_frame(struct sk_buff *skb) { - u32 fr_len; + u64 crc_err; + u32 fr_len, fr_crc; struct fc_lport *lport; struct fcoe_rcv_info *fr; struct fc_stats *stats; @@ -549,6 +550,11 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) skb_pull(skb, sizeof(struct fcoe_hdr)); fr_len = skb->len - sizeof(struct fcoe_crc_eof); + stats = per_cpu_ptr(lport->stats, get_cpu()); + stats->RxFrames++; + stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; + put_cpu(); + fp = (struct fc_frame *)skb; fc_frame_init(fp); fr_dev(fp) = lport; @@ -631,16 +637,15 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) return; } - stats = per_cpu_ptr(lport->stats, smp_processor_id()); - stats->RxFrames++; - stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; + fr_crc = le32_to_cpu(fr_crc(fp)); - if (le32_to_cpu(fr_crc(fp)) != - ~crc32(~0, skb->data, fr_len)) { - if (stats->InvalidCRCCount < 5) + if (unlikely(fr_crc != ~crc32(~0, skb->data, fr_len))) { + stats = per_cpu_ptr(lport->stats, get_cpu()); + crc_err = (stats->InvalidCRCCount++); + put_cpu(); + if (crc_err < 5) printk(KERN_WARNING PFX "dropping frame with " "CRC error\n"); - stats->InvalidCRCCount++; kfree_skb(skb); return; } @@ -911,9 +916,6 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event, __bnx2fc_destroy(interface); } mutex_unlock(&bnx2fc_dev_lock); - - /* Ensure ALL destroy work has been completed before return */ - flush_workqueue(bnx2fc_wq); return; default: @@ -1220,8 +1222,8 @@ static int bnx2fc_vport_destroy(struct fc_vport *vport) mutex_unlock(&n_port->lp_mutex); bnx2fc_free_vport(interface->hba, port->lport); bnx2fc_port_shutdown(port->lport); + bnx2fc_port_destroy(port); bnx2fc_interface_put(interface); - queue_work(bnx2fc_wq, &port->destroy_work); return 0; } @@ -1445,7 +1447,7 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic) static struct bnx2fc_interface * bnx2fc_interface_create(struct bnx2fc_hba *hba, struct net_device *netdev, - enum fip_state fip_mode) + enum fip_mode fip_mode) { struct fcoe_ctlr_device *ctlr_dev; struct bnx2fc_interface *interface; @@ -1530,7 +1532,6 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface, port->lport = lport; port->priv = interface; port->get_netdev = bnx2fc_netdev; - INIT_WORK(&port->destroy_work, bnx2fc_destroy_work); /* Configure fcoe_port */ rc = bnx2fc_lport_config(lport); @@ -1658,8 +1659,8 @@ static void __bnx2fc_destroy(struct bnx2fc_interface *interface) bnx2fc_interface_cleanup(interface); bnx2fc_stop(interface); list_del(&interface->list); + bnx2fc_port_destroy(port); bnx2fc_interface_put(interface); - queue_work(bnx2fc_wq, &port->destroy_work); } /** @@ -1700,15 +1701,12 @@ static int bnx2fc_destroy(struct net_device *netdev) return rc; } -static void bnx2fc_destroy_work(struct work_struct *work) +static void bnx2fc_port_destroy(struct fcoe_port *port) { - struct fcoe_port *port; struct fc_lport *lport; - port = container_of(work, struct fcoe_port, destroy_work); lport = port->lport; - - BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n"); + BNX2FC_HBA_DBG(lport, "Entered %s, destroying lport %p\n", __func__, lport); bnx2fc_if_destroy(lport); } @@ -2371,7 +2369,7 @@ static int _bnx2fc_create(struct net_device *netdev, if (!interface) { printk(KERN_ERR PFX "bnx2fc_interface_create failed\n"); rc = -ENOMEM; - goto ifput_err; + goto netdev_err; } if (is_vlan_dev(netdev)) { @@ -2562,9 +2560,6 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev) __bnx2fc_destroy(interface); mutex_unlock(&bnx2fc_dev_lock); - /* Ensure ALL destroy work has been completed before return */ - flush_workqueue(bnx2fc_wq); - bnx2fc_ulp_stop(hba); /* unregister cnic device */ if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic)) diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c index e8ae4d671d233b8a532cf1cb1fc56faaa3a16979..097305949a955c87ad67eceb096992ffb50565ec 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c @@ -830,7 +830,7 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) ((u64)err_entry->data.err_warn_bitmap_hi << 32) | (u64)err_entry->data.err_warn_bitmap_lo; for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) { - if (err_warn_bit_map & (u64) (1 << i)) { + if (err_warn_bit_map & ((u64)1 << i)) { err_warn = i; break; } diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 350257c13a5bac433f1fdfbfcdc12dc6698587cb..bc9f2a2365f4d4514aa744c591a7f5f84139ff78 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -240,6 +240,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba) return NULL; } + cmgr->hba = hba; cmgr->free_list = kcalloc(arr_sz, sizeof(*cmgr->free_list), GFP_KERNEL); if (!cmgr->free_list) { @@ -256,7 +257,6 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba) goto mem_err; } - cmgr->hba = hba; cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1); for (i = 0; i < arr_sz; i++) { @@ -295,7 +295,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba) /* Allocate pool of io_bdts - one for each bnx2fc_cmd */ mem_size = num_ios * sizeof(struct io_bdt *); - cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL); + cmgr->io_bdt_pool = kzalloc(mem_size, GFP_KERNEL); if (!cmgr->io_bdt_pool) { printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n"); goto mem_err; diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c index e3d1c7c440c8c0fd6edc85e0fef3fe5050cafef4..c7d6842b293da9286ca3262162d741996108a5ea 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c +++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c @@ -834,7 +834,6 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba, BNX2FC_TGT_DBG(tgt, "Freeing up session resources\n"); - spin_lock_bh(&tgt->cq_lock); ctx_base_ptr = tgt->ctx_base; tgt->ctx_base = NULL; @@ -890,7 +889,6 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba, tgt->sq, tgt->sq_dma); tgt->sq = NULL; } - spin_unlock_bh(&tgt->cq_lock); if (ctx_base_ptr) iounmap(ctx_base_ptr); diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index de0a507577ef56bc90330c8c0b532086823f1e85..f2f41502177077a0b842054841579b45e71a112a 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -793,7 +793,7 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic) return NULL; shost->dma_boundary = cnic->pcidev->dma_mask; shost->transportt = bnx2i_scsi_xport_template; - shost->max_id = ISCSI_MAX_CONNS_PER_HBA; + shost->max_id = ISCSI_MAX_CONNS_PER_HBA - 1; shost->max_channel = 0; shost->max_lun = 512; shost->max_cmd_len = 16; @@ -1173,10 +1173,8 @@ static void bnx2i_cleanup_task(struct iscsi_task *task) bnx2i_send_cmd_cleanup_req(hba, task->dd_data); spin_unlock_bh(&conn->session->back_lock); - spin_unlock_bh(&conn->session->frwd_lock); wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl, msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT)); - spin_lock_bh(&conn->session->frwd_lock); spin_lock_bh(&conn->session->back_lock); } bnx2i_iscsi_unmap_sg_list(task->dd_data); diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c index 1c5051b1c1253901ed984d6264ffb40bbd8607d2..9e287927b7f988c8c13e06914670b30b42bc2a70 100644 --- a/drivers/scsi/ch.c +++ b/drivers/scsi/ch.c @@ -578,7 +578,6 @@ ch_release(struct inode *inode, struct file *file) scsi_changer *ch = file->private_data; scsi_device_put(ch->device); - ch->device = NULL; file->private_data = NULL; kref_put(&ch->ref, ch_destroy); return 0; diff --git a/drivers/scsi/csiostor/csio_attr.c b/drivers/scsi/csiostor/csio_attr.c index 8a004036e3d72a666d3f49e22ee83539f165ba75..9bd2bd8dc2be24692c8ad72b13a1243721a5284c 100644 --- a/drivers/scsi/csiostor/csio_attr.c +++ b/drivers/scsi/csiostor/csio_attr.c @@ -594,12 +594,12 @@ csio_vport_create(struct fc_vport *fc_vport, bool disable) } fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING); + ln->fc_vport = fc_vport; if (csio_fcoe_alloc_vnp(hw, ln)) goto error; *(struct csio_lnode **)fc_vport->dd_data = ln; - ln->fc_vport = fc_vport; if (!fc_vport->node_name) fc_vport->node_name = wwn_to_u64(csio_ln_wwnn(ln)); if (!fc_vport->port_name) diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c index ed2dae657964be4997934c1efe191fb8b556363f..1793981337dd98763ca595ef62f597d1bf3ef929 100644 --- a/drivers/scsi/csiostor/csio_init.c +++ b/drivers/scsi/csiostor/csio_init.c @@ -649,7 +649,7 @@ csio_shost_init(struct csio_hw *hw, struct device *dev, if (csio_lnode_init(ln, hw, pln)) goto err_shost_put; - if (scsi_add_host(shost, dev)) + if (scsi_add_host_with_dma(shost, dev, &hw->pdev->dev)) goto err_lnode_exit; return ln; diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c index cc5611efc7a9a62dbb1762ce950e4688188bbeca..a8e29e3d3572626212cd599e1d1651e664752cd7 100644 --- a/drivers/scsi/csiostor/csio_lnode.c +++ b/drivers/scsi/csiostor/csio_lnode.c @@ -301,6 +301,7 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) struct fc_fdmi_port_name *port_name; uint8_t buf[64]; uint8_t *fc4_type; + unsigned long flags; if (fdmi_req->wr_status != FW_SUCCESS) { csio_ln_dbg(ln, "WR error:%x in processing fdmi rhba cmd\n", @@ -385,13 +386,13 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) len = (uint32_t)(pld - (uint8_t *)cmd); /* Submit FDMI RPA request */ - spin_lock_irq(&hw->lock); + spin_lock_irqsave(&hw->lock, flags); if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_done, FCOE_CT, &fdmi_req->dma_buf, len)) { CSIO_INC_STATS(ln, n_fdmi_err); csio_ln_dbg(ln, "Failed to issue fdmi rpa req\n"); } - spin_unlock_irq(&hw->lock); + spin_unlock_irqrestore(&hw->lock, flags); } /* @@ -412,6 +413,7 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) struct fc_fdmi_rpl *reg_pl; struct fs_fdmi_attrs *attrib_blk; uint8_t buf[64]; + unsigned long flags; if (fdmi_req->wr_status != FW_SUCCESS) { csio_ln_dbg(ln, "WR error:%x in processing fdmi dprt cmd\n", @@ -491,13 +493,13 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) attrib_blk->numattrs = htonl(numattrs); /* Submit FDMI RHBA request */ - spin_lock_irq(&hw->lock); + spin_lock_irqsave(&hw->lock, flags); if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_rhba_cbfn, FCOE_CT, &fdmi_req->dma_buf, len)) { CSIO_INC_STATS(ln, n_fdmi_err); csio_ln_dbg(ln, "Failed to issue fdmi rhba req\n"); } - spin_unlock_irq(&hw->lock); + spin_unlock_irqrestore(&hw->lock, flags); } /* @@ -512,6 +514,7 @@ csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) void *cmd; struct fc_fdmi_port_name *port_name; uint32_t len; + unsigned long flags; if (fdmi_req->wr_status != FW_SUCCESS) { csio_ln_dbg(ln, "WR error:%x in processing fdmi dhba cmd\n", @@ -542,13 +545,13 @@ csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) len += sizeof(*port_name); /* Submit FDMI request */ - spin_lock_irq(&hw->lock); + spin_lock_irqsave(&hw->lock, flags); if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dprt_cbfn, FCOE_CT, &fdmi_req->dma_buf, len)) { CSIO_INC_STATS(ln, n_fdmi_err); csio_ln_dbg(ln, "Failed to issue fdmi dprt req\n"); } - spin_unlock_irq(&hw->lock); + spin_unlock_irqrestore(&hw->lock, flags); } /** diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c index dab0d3f9bee139934eba2d551ece7db13d8486fa..e09c7f360dbde90d0b8eb54c8509956c8d93012a 100644 --- a/drivers/scsi/csiostor/csio_scsi.c +++ b/drivers/scsi/csiostor/csio_scsi.c @@ -1713,8 +1713,11 @@ csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req) } out: - if (req->nsge > 0) + if (req->nsge > 0) { scsi_dma_unmap(cmnd); + if (req->dcopy && (host_status == DID_OK)) + host_status = csio_scsi_copy_to_sgl(hw, req); + } cmnd->result = (((host_status) << 16) | scsi_status); cmnd->scsi_done(cmnd); diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index bf07735275a49d7720e98b4af5c4ef7c7693f72b..0fc382cb977bf9c33609e6c4547bdd73b16dd2d4 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c @@ -1144,7 +1144,7 @@ static void ddp_clear_map(struct cxgbi_device *cdev, struct cxgbi_ppm *ppm, } static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, - unsigned int tid, int pg_idx, bool reply) + unsigned int tid, int pg_idx) { struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, GFP_KERNEL); @@ -1160,7 +1160,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, req = (struct cpl_set_tcb_field *)skb->head; req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); - req->reply = V_NO_REPLY(reply ? 0 : 1); + req->reply = V_NO_REPLY(1); req->cpu_idx = 0; req->word = htons(31); req->mask = cpu_to_be64(0xF0000000); @@ -1177,11 +1177,10 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, * @tid: connection id * @hcrc: header digest enabled * @dcrc: data digest enabled - * @reply: request reply from h/w * set up the iscsi digest settings for a connection identified by tid */ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, - int hcrc, int dcrc, int reply) + int hcrc, int dcrc) { struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, GFP_KERNEL); @@ -1197,7 +1196,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, req = (struct cpl_set_tcb_field *)skb->head; req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); - req->reply = V_NO_REPLY(reply ? 0 : 1); + req->reply = V_NO_REPLY(1); req->cpu_idx = 0; req->word = htons(31); req->mask = cpu_to_be64(0x0F000000); diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 211da1d5a8699d2b6732e8505fba918c57b61688..689d6c813a50d9472c03c83feb67310aedf78d05 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -1517,16 +1517,22 @@ static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) struct cxgbi_sock *csk; csk = lookup_tid(t, tid); - if (!csk) + if (!csk) { pr_err("can't find conn. for tid %u.\n", tid); + return; + } log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,%lx,%u, status 0x%x.\n", csk, csk->state, csk->flags, csk->tid, rpl->status); - if (rpl->status != CPL_ERR_NONE) + if (rpl->status != CPL_ERR_NONE) { pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n", csk, tid, rpl->status); + csk->err = -EINVAL; + } + + complete(&csk->cmpl); __kfree_skb(skb); } @@ -1903,7 +1909,7 @@ static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk, } static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, - int pg_idx, bool reply) + int pg_idx) { struct sk_buff *skb; struct cpl_set_tcb_field *req; @@ -1919,7 +1925,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, req = (struct cpl_set_tcb_field *)skb->head; INIT_TP_WR(req, csk->tid); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); - req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid)); + req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); req->word_cookie = htons(0); req->mask = cpu_to_be64(0x3 << 8); req->val = cpu_to_be64(pg_idx << 8); @@ -1928,12 +1934,15 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx); + reinit_completion(&csk->cmpl); cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); - return 0; + wait_for_completion(&csk->cmpl); + + return csk->err; } static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, - int hcrc, int dcrc, int reply) + int hcrc, int dcrc) { struct sk_buff *skb; struct cpl_set_tcb_field *req; @@ -1951,7 +1960,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, req = (struct cpl_set_tcb_field *)skb->head; INIT_TP_WR(req, tid); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); - req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid)); + req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); req->word_cookie = htons(0); req->mask = cpu_to_be64(0x3 << 4); req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) | @@ -1961,8 +1970,11 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc); + reinit_completion(&csk->cmpl); cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); - return 0; + wait_for_completion(&csk->cmpl); + + return csk->err; } static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev) diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index 3f3af5e74a07dd9b23b64a929d1e6e3789b3b614..dcb148a09b06abda9ab12ed37511e656363c7e0a 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -338,7 +338,7 @@ void cxgbi_hbas_remove(struct cxgbi_device *cdev) EXPORT_SYMBOL_GPL(cxgbi_hbas_remove); int cxgbi_hbas_add(struct cxgbi_device *cdev, u64 max_lun, - unsigned int max_id, struct scsi_host_template *sht, + unsigned int max_conns, struct scsi_host_template *sht, struct scsi_transport_template *stt) { struct cxgbi_hba *chba; @@ -358,7 +358,7 @@ int cxgbi_hbas_add(struct cxgbi_device *cdev, u64 max_lun, shost->transportt = stt; shost->max_lun = max_lun; - shost->max_id = max_id; + shost->max_id = max_conns - 1; shost->max_channel = 0; shost->max_cmd_len = 16; @@ -573,6 +573,7 @@ static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev) skb_queue_head_init(&csk->receive_queue); skb_queue_head_init(&csk->write_queue); timer_setup(&csk->retry_timer, NULL, 0); + init_completion(&csk->cmpl); rwlock_init(&csk->callback_lock); csk->cdev = cdev; csk->flags = 0; @@ -640,6 +641,10 @@ cxgbi_check_route(struct sockaddr *dst_addr, int ifindex) if (ndev->flags & IFF_LOOPBACK) { ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr); + if (!ndev) { + err = -ENETUNREACH; + goto rel_neigh; + } mtu = ndev->mtu; pr_info("rt dev %s, loopback -> %s, mtu %u.\n", n->dev->name, ndev->name, mtu); @@ -2252,14 +2257,14 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn, if (!err && conn->hdrdgst_en) err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, conn->hdrdgst_en, - conn->datadgst_en, 0); + conn->datadgst_en); break; case ISCSI_PARAM_DATADGST_EN: err = iscsi_set_param(cls_conn, param, buf, buflen); if (!err && conn->datadgst_en) err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, conn->hdrdgst_en, - conn->datadgst_en, 0); + conn->datadgst_en); break; case ISCSI_PARAM_MAX_R2T: return iscsi_tcp_set_max_r2t(conn, buf); @@ -2385,7 +2390,7 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session, ppm = csk->cdev->cdev2ppm(csk->cdev); err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, - ppm->tformat.pgsz_idx_dflt, 0); + ppm->tformat.pgsz_idx_dflt); if (err < 0) return err; diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h index dcb190e7534349b42f2a82568a601d14e8a152ac..3bf7414a75e5eb964461d6db97b6c82ea853a8a9 100644 --- a/drivers/scsi/cxgbi/libcxgbi.h +++ b/drivers/scsi/cxgbi/libcxgbi.h @@ -146,6 +146,7 @@ struct cxgbi_sock { struct sk_buff_head receive_queue; struct sk_buff_head write_queue; struct timer_list retry_timer; + struct completion cmpl; int err; rwlock_t callback_lock; void *user_data; @@ -487,9 +488,9 @@ struct cxgbi_device { struct cxgbi_ppm *, struct cxgbi_task_tag_info *); int (*csk_ddp_setup_digest)(struct cxgbi_sock *, - unsigned int, int, int, int); + unsigned int, int, int); int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *, - unsigned int, int, bool); + unsigned int, int); void (*csk_release_offload_resources)(struct cxgbi_sock *); int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *); diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h index 8908a20065c83c73eca48ecb294a0a4ee23bcb5b..4d90106fcb374c0df70640166511726e9f71ebf9 100644 --- a/drivers/scsi/cxlflash/common.h +++ b/drivers/scsi/cxlflash/common.h @@ -334,7 +334,8 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t c, res_hndl_t r, u8 mode); void cxlflash_list_init(void); void cxlflash_term_global_luns(void); void cxlflash_free_errpage(void); -int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg); +int cxlflash_ioctl(struct scsi_device *sdev, unsigned int cmd, + void __user *arg); void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg); int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg); void cxlflash_term_local_luns(struct cxlflash_cfg *cfg); diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index 6637116529aa3de7077e1d12bcb1f6ebe5509969..8b8d522ede600a402c970c3cbbb2f05dac844b83 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -3289,7 +3289,7 @@ static int cxlflash_chr_open(struct inode *inode, struct file *file) * * Return: A string identifying the decoded host ioctl. */ -static char *decode_hioctl(int cmd) +static char *decode_hioctl(unsigned int cmd) { switch (cmd) { case HT_CXLFLASH_LUN_PROVISION: @@ -3694,6 +3694,7 @@ static int cxlflash_probe(struct pci_dev *pdev, host->max_cmd_len = CXLFLASH_MAX_CDB_LEN; cfg = shost_priv(host); + cfg->state = STATE_PROBING; cfg->host = host; rc = alloc_mem(cfg); if (rc) { @@ -3782,6 +3783,7 @@ static int cxlflash_probe(struct pci_dev *pdev, return rc; out_remove: + cfg->state = STATE_PROBED; cxlflash_remove(pdev); goto out; } diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c index acac6152f50b401020361890dd93952e98df5eb2..1a94a469051ea91f64dee409dafb4d525164c357 100644 --- a/drivers/scsi/cxlflash/superpipe.c +++ b/drivers/scsi/cxlflash/superpipe.c @@ -1924,7 +1924,7 @@ static int cxlflash_disk_verify(struct scsi_device *sdev, * * Return: A string identifying the decoded ioctl. */ -static char *decode_ioctl(int cmd) +static char *decode_ioctl(unsigned int cmd) { switch (cmd) { case DK_CXLFLASH_ATTACH: @@ -2051,7 +2051,7 @@ static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg) * * Return: 0 on success, -errno on failure */ -static int ioctl_common(struct scsi_device *sdev, int cmd) +static int ioctl_common(struct scsi_device *sdev, unsigned int cmd) { struct cxlflash_cfg *cfg = shost_priv(sdev->host); struct device *dev = &cfg->dev->dev; @@ -2096,7 +2096,7 @@ static int ioctl_common(struct scsi_device *sdev, int cmd) * * Return: 0 on success, -errno on failure */ -int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) +int cxlflash_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg) { typedef int (*sioctl) (struct scsi_device *, void *); @@ -2179,8 +2179,7 @@ int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) } if (unlikely(copy_from_user(&buf, arg, size))) { - dev_err(dev, "%s: copy_from_user() fail " - "size=%lu cmd=%d (%s) arg=%p\n", + dev_err(dev, "%s: copy_from_user() fail size=%lu cmd=%u (%s) arg=%p\n", __func__, size, cmd, decode_ioctl(cmd), arg); rc = -EFAULT; goto cxlflash_ioctl_exit; @@ -2203,8 +2202,7 @@ int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) rc = do_ioctl(sdev, (void *)&buf); if (likely(!rc)) if (unlikely(copy_to_user(arg, &buf, size))) { - dev_err(dev, "%s: copy_to_user() fail " - "size=%lu cmd=%d (%s) arg=%p\n", + dev_err(dev, "%s: copy_to_user() fail size=%lu cmd=%u (%s) arg=%p\n", __func__, size, cmd, decode_ioctl(cmd), arg); rc = -EFAULT; } diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c index 1ed2cd82129d2d840d89ab91b58add55b718913d..3943347ec3c7ca02f1555d2ee371ede711588321 100644 --- a/drivers/scsi/dc395x.c +++ b/drivers/scsi/dc395x.c @@ -1969,6 +1969,11 @@ static void sg_update_list(struct ScsiReqBlk *srb, u32 left) xferred -= psge->length; } else { /* Partial SG entry done */ + pci_dma_sync_single_for_cpu(srb->dcb-> + acb->dev, + srb->sg_bus_addr, + SEGMENTX_LEN, + PCI_DMA_TODEVICE); psge->length -= xferred; psge->address += xferred; srb->sg_index = idx; @@ -3447,14 +3452,12 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, } } - if (dir != PCI_DMA_NONE && scsi_sg_count(cmd)) - pci_dma_sync_sg_for_cpu(acb->dev, scsi_sglist(cmd), - scsi_sg_count(cmd), dir); - ckc_only = 0; /* Check Error Conditions */ ckc_e: + pci_unmap_srb(acb, srb); + if (cmd->cmnd[0] == INQUIRY) { unsigned char *base = NULL; struct ScsiInqData *ptr; @@ -3507,7 +3510,6 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, cmd, cmd->result); srb_free_insert(acb, srb); } - pci_unmap_srb(acb, srb); cmd->scsi_done(cmd); waiting_process_next(acb); diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 12dc7100bb4c240e1161e60043413d64611a3bd3..4f9475b8342356b6f5709418536e52f3d47d34ba 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -54,6 +54,7 @@ #define ALUA_FAILOVER_TIMEOUT 60 #define ALUA_FAILOVER_RETRIES 5 #define ALUA_RTPG_DELAY_MSECS 5 +#define ALUA_RTPG_RETRY_DELAY 2 /* device handler flags */ #define ALUA_OPTIMIZE_STPG 0x01 @@ -521,10 +522,12 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) struct alua_port_group *tmp_pg; int len, k, off, bufflen = ALUA_RTPG_SIZE; unsigned char *desc, *buff; - unsigned err, retval; + unsigned err; + int retval; unsigned int tpg_desc_tbl_off; unsigned char orig_transition_tmo; unsigned long flags; + bool transitioning_sense = false; if (!pg->expiry) { unsigned long transition_tmo = ALUA_FAILOVER_TIMEOUT * HZ; @@ -560,12 +563,12 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) kfree(buff); return SCSI_DH_OK; } - if (!scsi_sense_valid(&sense_hdr)) { + if (retval < 0 || !scsi_sense_valid(&sense_hdr)) { sdev_printk(KERN_INFO, sdev, "%s: rtpg failed, result %d\n", ALUA_DH_NAME, retval); kfree(buff); - if (driver_byte(retval) == DRIVER_ERROR) + if (retval < 0) return SCSI_DH_DEV_TEMP_BUSY; return SCSI_DH_IO; } @@ -585,13 +588,19 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) goto retry; } /* - * Retry on ALUA state transition or if any - * UNIT ATTENTION occurred. + * If the array returns with 'ALUA state transition' + * sense code here it cannot return RTPG data during + * transition. So set the state to 'transitioning' directly. */ if (sense_hdr.sense_key == NOT_READY && - sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) - err = SCSI_DH_RETRY; - else if (sense_hdr.sense_key == UNIT_ATTENTION) + sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) { + transitioning_sense = true; + goto skip_rtpg; + } + /* + * Retry on any other UNIT ATTENTION occurred. + */ + if (sense_hdr.sense_key == UNIT_ATTENTION) err = SCSI_DH_RETRY; if (err == SCSI_DH_RETRY && pg->expiry != 0 && time_before(jiffies, pg->expiry)) { @@ -679,7 +688,11 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) off = 8 + (desc[7] * 4); } + skip_rtpg: spin_lock_irqsave(&pg->lock, flags); + if (transitioning_sense) + pg->state = SCSI_ACCESS_STATE_TRANSITIONING; + sdev_printk(KERN_INFO, sdev, "%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n", ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state), @@ -696,7 +709,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) case SCSI_ACCESS_STATE_TRANSITIONING: if (time_before(jiffies, pg->expiry)) { /* State transition, retry */ - pg->interval = 2; + pg->interval = ALUA_RTPG_RETRY_DELAY; err = SCSI_DH_RETRY; } else { struct alua_dh_data *h; @@ -775,11 +788,11 @@ static unsigned alua_stpg(struct scsi_device *sdev, struct alua_port_group *pg) retval = submit_stpg(sdev, pg->group_id, &sense_hdr); if (retval) { - if (!scsi_sense_valid(&sense_hdr)) { + if (retval < 0 || !scsi_sense_valid(&sense_hdr)) { sdev_printk(KERN_INFO, sdev, "%s: stpg failed, result %d", ALUA_DH_NAME, retval); - if (driver_byte(retval) == DRIVER_ERROR) + if (retval < 0) return SCSI_DH_DEV_TEMP_BUSY; } else { sdev_printk(KERN_INFO, sdev, "%s: stpg failed\n", @@ -821,6 +834,8 @@ static void alua_rtpg_work(struct work_struct *work) spin_lock_irqsave(&pg->lock, flags); pg->flags &= ~ALUA_PG_RUNNING; pg->flags |= ALUA_PG_RUN_RTPG; + if (!pg->interval) + pg->interval = ALUA_RTPG_RETRY_DELAY; spin_unlock_irqrestore(&pg->lock, flags); queue_delayed_work(kaluad_wq, &pg->rtpg_work, pg->interval * HZ); @@ -832,6 +847,8 @@ static void alua_rtpg_work(struct work_struct *work) spin_lock_irqsave(&pg->lock, flags); if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) { pg->flags &= ~ALUA_PG_RUNNING; + if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG)) + pg->interval = ALUA_RTPG_RETRY_DELAY; pg->flags |= ALUA_PG_RUN_RTPG; spin_unlock_irqrestore(&pg->lock, flags); queue_delayed_work(kaluad_wq, &pg->rtpg_work, @@ -1031,10 +1048,12 @@ static int alua_activate(struct scsi_device *sdev, rcu_read_unlock(); mutex_unlock(&h->init_mutex); - if (alua_rtpg_queue(pg, sdev, qdata, true)) + if (alua_rtpg_queue(pg, sdev, qdata, true)) { fn = NULL; - else + } else { + kfree(qdata); err = SCSI_DH_DEV_OFFLINED; + } kref_put(&pg->kref, release_port_group); out: if (fn) @@ -1173,10 +1192,8 @@ static int __init alua_init(void) int r; kaluad_wq = alloc_workqueue("kaluad", WQ_MEM_RECLAIM, 0); - if (!kaluad_wq) { - /* Temporary failure, bypass */ - return SCSI_DH_DEV_TEMP_BUSY; - } + if (!kaluad_wq) + return -ENOMEM; r = scsi_register_device_handler(&alua_dh); if (r != 0) { diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index d27fabae8ddd9c2e045203ada3d3673799441ce8..b3c23edd4b6cbcce305b30b825a53f4d6d78cab5 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -453,8 +453,8 @@ static int initialize_controller(struct scsi_device *sdev, if (!h->ctlr) err = SCSI_DH_RES_TEMP_UNAVAIL; else { - list_add_rcu(&h->node, &h->ctlr->dh_list); h->sdev = sdev; + list_add_rcu(&h->node, &h->ctlr->dh_list); } spin_unlock(&list_lock); err = SCSI_DH_OK; @@ -546,6 +546,8 @@ static void send_mode_select(struct work_struct *work) spin_unlock(&ctlr->ms_lock); retry: + memset(cdb, 0, sizeof(cdb)); + data_size = rdac_failover_get(ctlr, &list, cdb); RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, " @@ -777,11 +779,11 @@ static void rdac_bus_detach( struct scsi_device *sdev ) spin_lock(&list_lock); if (h->ctlr) { list_del_rcu(&h->node); - h->sdev = NULL; kref_put(&h->ctlr->kref, release_controller); } spin_unlock(&list_lock); sdev->handler_data = NULL; + synchronize_rcu(); kfree(h); } diff --git a/drivers/scsi/dpt/dpti_i2o.h b/drivers/scsi/dpt/dpti_i2o.h deleted file mode 100644 index 16fc380b551259bb6113aed69c00842d3f62cb4d..0000000000000000000000000000000000000000 --- a/drivers/scsi/dpt/dpti_i2o.h +++ /dev/null @@ -1,446 +0,0 @@ -#ifndef _SCSI_I2O_H -#define _SCSI_I2O_H - -/* I2O kernel space accessible structures/APIs - * - * (c) Copyright 1999, 2000 Red Hat Software - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - ************************************************************************* - * - * This header file defined the I2O APIs/structures for use by - * the I2O kernel modules. - * - */ - -#ifdef __KERNEL__ /* This file to be included by kernel only */ - -#include - -#include -#include - - -/* - * Tunable parameters first - */ - -/* How many different OSM's are we allowing */ -#define MAX_I2O_MODULES 64 - -#define I2O_EVT_CAPABILITY_OTHER 0x01 -#define I2O_EVT_CAPABILITY_CHANGED 0x02 - -#define I2O_EVT_SENSOR_STATE_CHANGED 0x01 - -//#ifdef __KERNEL__ /* ioctl stuff only thing exported to users */ - -#define I2O_MAX_MANAGERS 4 - -/* - * I2O Interface Objects - */ - -#include -typedef wait_queue_head_t adpt_wait_queue_head_t; -#define ADPT_DECLARE_WAIT_QUEUE_HEAD(wait) DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait) -typedef wait_queue_entry_t adpt_wait_queue_entry_t; - -/* - * message structures - */ - -struct i2o_message -{ - u8 version_offset; - u8 flags; - u16 size; - u32 target_tid:12; - u32 init_tid:12; - u32 function:8; - u32 initiator_context; - /* List follows */ -}; - -struct adpt_device; -struct _adpt_hba; -struct i2o_device -{ - struct i2o_device *next; /* Chain */ - struct i2o_device *prev; - - char dev_name[8]; /* linux /dev name if available */ - i2o_lct_entry lct_data;/* Device LCT information */ - u32 flags; - struct proc_dir_entry* proc_entry; /* /proc dir */ - struct adpt_device *owner; - struct _adpt_hba *controller; /* Controlling IOP */ -}; - -/* - * Each I2O controller has one of these objects - */ - -struct i2o_controller -{ - char name[16]; - int unit; - int type; - int enabled; - - struct notifier_block *event_notifer; /* Events */ - atomic_t users; - struct i2o_device *devices; /* I2O device chain */ - struct i2o_controller *next; /* Controller chain */ - -}; - -/* - * I2O System table entry - */ -struct i2o_sys_tbl_entry -{ - u16 org_id; - u16 reserved1; - u32 iop_id:12; - u32 reserved2:20; - u16 seg_num:12; - u16 i2o_version:4; - u8 iop_state; - u8 msg_type; - u16 frame_size; - u16 reserved3; - u32 last_changed; - u32 iop_capabilities; - u32 inbound_low; - u32 inbound_high; -}; - -struct i2o_sys_tbl -{ - u8 num_entries; - u8 version; - u16 reserved1; - u32 change_ind; - u32 reserved2; - u32 reserved3; - struct i2o_sys_tbl_entry iops[0]; -}; - -/* - * I2O classes / subclasses - */ - -/* Class ID and Code Assignments - * (LCT.ClassID.Version field) - */ -#define I2O_CLASS_VERSION_10 0x00 -#define I2O_CLASS_VERSION_11 0x01 - -/* Class code names - * (from v1.5 Table 6-1 Class Code Assignments.) - */ - -#define I2O_CLASS_EXECUTIVE 0x000 -#define I2O_CLASS_DDM 0x001 -#define I2O_CLASS_RANDOM_BLOCK_STORAGE 0x010 -#define I2O_CLASS_SEQUENTIAL_STORAGE 0x011 -#define I2O_CLASS_LAN 0x020 -#define I2O_CLASS_WAN 0x030 -#define I2O_CLASS_FIBRE_CHANNEL_PORT 0x040 -#define I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL 0x041 -#define I2O_CLASS_SCSI_PERIPHERAL 0x051 -#define I2O_CLASS_ATE_PORT 0x060 -#define I2O_CLASS_ATE_PERIPHERAL 0x061 -#define I2O_CLASS_FLOPPY_CONTROLLER 0x070 -#define I2O_CLASS_FLOPPY_DEVICE 0x071 -#define I2O_CLASS_BUS_ADAPTER_PORT 0x080 -#define I2O_CLASS_PEER_TRANSPORT_AGENT 0x090 -#define I2O_CLASS_PEER_TRANSPORT 0x091 - -/* Rest of 0x092 - 0x09f reserved for peer-to-peer classes - */ - -#define I2O_CLASS_MATCH_ANYCLASS 0xffffffff - -/* Subclasses - */ - -#define I2O_SUBCLASS_i960 0x001 -#define I2O_SUBCLASS_HDM 0x020 -#define I2O_SUBCLASS_ISM 0x021 - -/* Operation functions */ - -#define I2O_PARAMS_FIELD_GET 0x0001 -#define I2O_PARAMS_LIST_GET 0x0002 -#define I2O_PARAMS_MORE_GET 0x0003 -#define I2O_PARAMS_SIZE_GET 0x0004 -#define I2O_PARAMS_TABLE_GET 0x0005 -#define I2O_PARAMS_FIELD_SET 0x0006 -#define I2O_PARAMS_LIST_SET 0x0007 -#define I2O_PARAMS_ROW_ADD 0x0008 -#define I2O_PARAMS_ROW_DELETE 0x0009 -#define I2O_PARAMS_TABLE_CLEAR 0x000A - -/* - * I2O serial number conventions / formats - * (circa v1.5) - */ - -#define I2O_SNFORMAT_UNKNOWN 0 -#define I2O_SNFORMAT_BINARY 1 -#define I2O_SNFORMAT_ASCII 2 -#define I2O_SNFORMAT_UNICODE 3 -#define I2O_SNFORMAT_LAN48_MAC 4 -#define I2O_SNFORMAT_WAN 5 - -/* Plus new in v2.0 (Yellowstone pdf doc) - */ - -#define I2O_SNFORMAT_LAN64_MAC 6 -#define I2O_SNFORMAT_DDM 7 -#define I2O_SNFORMAT_IEEE_REG64 8 -#define I2O_SNFORMAT_IEEE_REG128 9 -#define I2O_SNFORMAT_UNKNOWN2 0xff - -/* Transaction Reply Lists (TRL) Control Word structure */ - -#define TRL_SINGLE_FIXED_LENGTH 0x00 -#define TRL_SINGLE_VARIABLE_LENGTH 0x40 -#define TRL_MULTIPLE_FIXED_LENGTH 0x80 - -/* - * Messaging API values - */ - -#define I2O_CMD_ADAPTER_ASSIGN 0xB3 -#define I2O_CMD_ADAPTER_READ 0xB2 -#define I2O_CMD_ADAPTER_RELEASE 0xB5 -#define I2O_CMD_BIOS_INFO_SET 0xA5 -#define I2O_CMD_BOOT_DEVICE_SET 0xA7 -#define I2O_CMD_CONFIG_VALIDATE 0xBB -#define I2O_CMD_CONN_SETUP 0xCA -#define I2O_CMD_DDM_DESTROY 0xB1 -#define I2O_CMD_DDM_ENABLE 0xD5 -#define I2O_CMD_DDM_QUIESCE 0xC7 -#define I2O_CMD_DDM_RESET 0xD9 -#define I2O_CMD_DDM_SUSPEND 0xAF -#define I2O_CMD_DEVICE_ASSIGN 0xB7 -#define I2O_CMD_DEVICE_RELEASE 0xB9 -#define I2O_CMD_HRT_GET 0xA8 -#define I2O_CMD_ADAPTER_CLEAR 0xBE -#define I2O_CMD_ADAPTER_CONNECT 0xC9 -#define I2O_CMD_ADAPTER_RESET 0xBD -#define I2O_CMD_LCT_NOTIFY 0xA2 -#define I2O_CMD_OUTBOUND_INIT 0xA1 -#define I2O_CMD_PATH_ENABLE 0xD3 -#define I2O_CMD_PATH_QUIESCE 0xC5 -#define I2O_CMD_PATH_RESET 0xD7 -#define I2O_CMD_STATIC_MF_CREATE 0xDD -#define I2O_CMD_STATIC_MF_RELEASE 0xDF -#define I2O_CMD_STATUS_GET 0xA0 -#define I2O_CMD_SW_DOWNLOAD 0xA9 -#define I2O_CMD_SW_UPLOAD 0xAB -#define I2O_CMD_SW_REMOVE 0xAD -#define I2O_CMD_SYS_ENABLE 0xD1 -#define I2O_CMD_SYS_MODIFY 0xC1 -#define I2O_CMD_SYS_QUIESCE 0xC3 -#define I2O_CMD_SYS_TAB_SET 0xA3 - -#define I2O_CMD_UTIL_NOP 0x00 -#define I2O_CMD_UTIL_ABORT 0x01 -#define I2O_CMD_UTIL_CLAIM 0x09 -#define I2O_CMD_UTIL_RELEASE 0x0B -#define I2O_CMD_UTIL_PARAMS_GET 0x06 -#define I2O_CMD_UTIL_PARAMS_SET 0x05 -#define I2O_CMD_UTIL_EVT_REGISTER 0x13 -#define I2O_CMD_UTIL_EVT_ACK 0x14 -#define I2O_CMD_UTIL_CONFIG_DIALOG 0x10 -#define I2O_CMD_UTIL_DEVICE_RESERVE 0x0D -#define I2O_CMD_UTIL_DEVICE_RELEASE 0x0F -#define I2O_CMD_UTIL_LOCK 0x17 -#define I2O_CMD_UTIL_LOCK_RELEASE 0x19 -#define I2O_CMD_UTIL_REPLY_FAULT_NOTIFY 0x15 - -#define I2O_CMD_SCSI_EXEC 0x81 -#define I2O_CMD_SCSI_ABORT 0x83 -#define I2O_CMD_SCSI_BUSRESET 0x27 - -#define I2O_CMD_BLOCK_READ 0x30 -#define I2O_CMD_BLOCK_WRITE 0x31 -#define I2O_CMD_BLOCK_CFLUSH 0x37 -#define I2O_CMD_BLOCK_MLOCK 0x49 -#define I2O_CMD_BLOCK_MUNLOCK 0x4B -#define I2O_CMD_BLOCK_MMOUNT 0x41 -#define I2O_CMD_BLOCK_MEJECT 0x43 - -#define I2O_PRIVATE_MSG 0xFF - -/* - * Init Outbound Q status - */ - -#define I2O_CMD_OUTBOUND_INIT_IN_PROGRESS 0x01 -#define I2O_CMD_OUTBOUND_INIT_REJECTED 0x02 -#define I2O_CMD_OUTBOUND_INIT_FAILED 0x03 -#define I2O_CMD_OUTBOUND_INIT_COMPLETE 0x04 - -/* - * I2O Get Status State values - */ - -#define ADAPTER_STATE_INITIALIZING 0x01 -#define ADAPTER_STATE_RESET 0x02 -#define ADAPTER_STATE_HOLD 0x04 -#define ADAPTER_STATE_READY 0x05 -#define ADAPTER_STATE_OPERATIONAL 0x08 -#define ADAPTER_STATE_FAILED 0x10 -#define ADAPTER_STATE_FAULTED 0x11 - -/* I2O API function return values */ - -#define I2O_RTN_NO_ERROR 0 -#define I2O_RTN_NOT_INIT 1 -#define I2O_RTN_FREE_Q_EMPTY 2 -#define I2O_RTN_TCB_ERROR 3 -#define I2O_RTN_TRANSACTION_ERROR 4 -#define I2O_RTN_ADAPTER_ALREADY_INIT 5 -#define I2O_RTN_MALLOC_ERROR 6 -#define I2O_RTN_ADPTR_NOT_REGISTERED 7 -#define I2O_RTN_MSG_REPLY_TIMEOUT 8 -#define I2O_RTN_NO_STATUS 9 -#define I2O_RTN_NO_FIRM_VER 10 -#define I2O_RTN_NO_LINK_SPEED 11 - -/* Reply message status defines for all messages */ - -#define I2O_REPLY_STATUS_SUCCESS 0x00 -#define I2O_REPLY_STATUS_ABORT_DIRTY 0x01 -#define I2O_REPLY_STATUS_ABORT_NO_DATA_TRANSFER 0x02 -#define I2O_REPLY_STATUS_ABORT_PARTIAL_TRANSFER 0x03 -#define I2O_REPLY_STATUS_ERROR_DIRTY 0x04 -#define I2O_REPLY_STATUS_ERROR_NO_DATA_TRANSFER 0x05 -#define I2O_REPLY_STATUS_ERROR_PARTIAL_TRANSFER 0x06 -#define I2O_REPLY_STATUS_PROCESS_ABORT_DIRTY 0x08 -#define I2O_REPLY_STATUS_PROCESS_ABORT_NO_DATA_TRANSFER 0x09 -#define I2O_REPLY_STATUS_PROCESS_ABORT_PARTIAL_TRANSFER 0x0A -#define I2O_REPLY_STATUS_TRANSACTION_ERROR 0x0B -#define I2O_REPLY_STATUS_PROGRESS_REPORT 0x80 - -/* Status codes and Error Information for Parameter functions */ - -#define I2O_PARAMS_STATUS_SUCCESS 0x00 -#define I2O_PARAMS_STATUS_BAD_KEY_ABORT 0x01 -#define I2O_PARAMS_STATUS_BAD_KEY_CONTINUE 0x02 -#define I2O_PARAMS_STATUS_BUFFER_FULL 0x03 -#define I2O_PARAMS_STATUS_BUFFER_TOO_SMALL 0x04 -#define I2O_PARAMS_STATUS_FIELD_UNREADABLE 0x05 -#define I2O_PARAMS_STATUS_FIELD_UNWRITEABLE 0x06 -#define I2O_PARAMS_STATUS_INSUFFICIENT_FIELDS 0x07 -#define I2O_PARAMS_STATUS_INVALID_GROUP_ID 0x08 -#define I2O_PARAMS_STATUS_INVALID_OPERATION 0x09 -#define I2O_PARAMS_STATUS_NO_KEY_FIELD 0x0A -#define I2O_PARAMS_STATUS_NO_SUCH_FIELD 0x0B -#define I2O_PARAMS_STATUS_NON_DYNAMIC_GROUP 0x0C -#define I2O_PARAMS_STATUS_OPERATION_ERROR 0x0D -#define I2O_PARAMS_STATUS_SCALAR_ERROR 0x0E -#define I2O_PARAMS_STATUS_TABLE_ERROR 0x0F -#define I2O_PARAMS_STATUS_WRONG_GROUP_TYPE 0x10 - -/* DetailedStatusCode defines for Executive, DDM, Util and Transaction error - * messages: Table 3-2 Detailed Status Codes.*/ - -#define I2O_DSC_SUCCESS 0x0000 -#define I2O_DSC_BAD_KEY 0x0002 -#define I2O_DSC_TCL_ERROR 0x0003 -#define I2O_DSC_REPLY_BUFFER_FULL 0x0004 -#define I2O_DSC_NO_SUCH_PAGE 0x0005 -#define I2O_DSC_INSUFFICIENT_RESOURCE_SOFT 0x0006 -#define I2O_DSC_INSUFFICIENT_RESOURCE_HARD 0x0007 -#define I2O_DSC_CHAIN_BUFFER_TOO_LARGE 0x0009 -#define I2O_DSC_UNSUPPORTED_FUNCTION 0x000A -#define I2O_DSC_DEVICE_LOCKED 0x000B -#define I2O_DSC_DEVICE_RESET 0x000C -#define I2O_DSC_INAPPROPRIATE_FUNCTION 0x000D -#define I2O_DSC_INVALID_INITIATOR_ADDRESS 0x000E -#define I2O_DSC_INVALID_MESSAGE_FLAGS 0x000F -#define I2O_DSC_INVALID_OFFSET 0x0010 -#define I2O_DSC_INVALID_PARAMETER 0x0011 -#define I2O_DSC_INVALID_REQUEST 0x0012 -#define I2O_DSC_INVALID_TARGET_ADDRESS 0x0013 -#define I2O_DSC_MESSAGE_TOO_LARGE 0x0014 -#define I2O_DSC_MESSAGE_TOO_SMALL 0x0015 -#define I2O_DSC_MISSING_PARAMETER 0x0016 -#define I2O_DSC_TIMEOUT 0x0017 -#define I2O_DSC_UNKNOWN_ERROR 0x0018 -#define I2O_DSC_UNKNOWN_FUNCTION 0x0019 -#define I2O_DSC_UNSUPPORTED_VERSION 0x001A -#define I2O_DSC_DEVICE_BUSY 0x001B -#define I2O_DSC_DEVICE_NOT_AVAILABLE 0x001C - -/* Device Claim Types */ -#define I2O_CLAIM_PRIMARY 0x01000000 -#define I2O_CLAIM_MANAGEMENT 0x02000000 -#define I2O_CLAIM_AUTHORIZED 0x03000000 -#define I2O_CLAIM_SECONDARY 0x04000000 - -/* Message header defines for VersionOffset */ -#define I2OVER15 0x0001 -#define I2OVER20 0x0002 -/* Default is 1.5, FIXME: Need support for both 1.5 and 2.0 */ -#define I2OVERSION I2OVER15 -#define SGL_OFFSET_0 I2OVERSION -#define SGL_OFFSET_4 (0x0040 | I2OVERSION) -#define SGL_OFFSET_5 (0x0050 | I2OVERSION) -#define SGL_OFFSET_6 (0x0060 | I2OVERSION) -#define SGL_OFFSET_7 (0x0070 | I2OVERSION) -#define SGL_OFFSET_8 (0x0080 | I2OVERSION) -#define SGL_OFFSET_9 (0x0090 | I2OVERSION) -#define SGL_OFFSET_10 (0x00A0 | I2OVERSION) -#define SGL_OFFSET_12 (0x00C0 | I2OVERSION) - -#define TRL_OFFSET_5 (0x0050 | I2OVERSION) -#define TRL_OFFSET_6 (0x0060 | I2OVERSION) - - /* msg header defines for MsgFlags */ -#define MSG_STATIC 0x0100 -#define MSG_64BIT_CNTXT 0x0200 -#define MSG_MULTI_TRANS 0x1000 -#define MSG_FAIL 0x2000 -#define MSG_LAST 0x4000 -#define MSG_REPLY 0x8000 - - /* minimum size msg */ -#define THREE_WORD_MSG_SIZE 0x00030000 -#define FOUR_WORD_MSG_SIZE 0x00040000 -#define FIVE_WORD_MSG_SIZE 0x00050000 -#define SIX_WORD_MSG_SIZE 0x00060000 -#define SEVEN_WORD_MSG_SIZE 0x00070000 -#define EIGHT_WORD_MSG_SIZE 0x00080000 -#define NINE_WORD_MSG_SIZE 0x00090000 -#define TEN_WORD_MSG_SIZE 0x000A0000 -#define I2O_MESSAGE_SIZE(x) ((x)<<16) - - -/* Special TID Assignments */ - -#define ADAPTER_TID 0 -#define HOST_TID 1 - -#define MSG_FRAME_SIZE 128 -#define NMBR_MSG_FRAMES 128 - -#define MSG_POOL_SIZE 16384 - -#define I2O_POST_WAIT_OK 0 -#define I2O_POST_WAIT_TIMEOUT -ETIMEDOUT - - -#endif /* __KERNEL__ */ - -#endif /* _SCSI_I2O_H */ diff --git a/drivers/scsi/dpt/dpti_ioctl.h b/drivers/scsi/dpt/dpti_ioctl.h deleted file mode 100644 index f60236721e0ddef2074fdd3b08538a3ba1acc67f..0000000000000000000000000000000000000000 --- a/drivers/scsi/dpt/dpti_ioctl.h +++ /dev/null @@ -1,139 +0,0 @@ -/*************************************************************************** - dpti_ioctl.h - description - ------------------- - begin : Thu Sep 7 2000 - copyright : (C) 2001 by Adaptec - - See Documentation/scsi/dpti.txt for history, notes, license info - and credits - ***************************************************************************/ - -/*************************************************************************** - * * - * This program is free software; you can redistribute it and/or modify * - * it under the terms of the GNU General Public License as published by * - * the Free Software Foundation; either version 2 of the License, or * - * (at your option) any later version. * - * * - ***************************************************************************/ - -/*************************************************************************** - * This file is generated from osd_unix.h * - * *************************************************************************/ - -#ifndef _dpti_ioctl_h -#define _dpti_ioctl_h - -// IOCTL interface commands - -#ifndef _IOWR -# define _IOWR(x,y,z) (((x)<<8)|y) -#endif -#ifndef _IOW -# define _IOW(x,y,z) (((x)<<8)|y) -#endif -#ifndef _IOR -# define _IOR(x,y,z) (((x)<<8)|y) -#endif -#ifndef _IO -# define _IO(x,y) (((x)<<8)|y) -#endif -/* EATA PassThrough Command */ -#define EATAUSRCMD _IOWR('D',65,EATA_CP) -/* Set Debug Level If Enabled */ -#define DPT_DEBUG _IOW('D',66,int) -/* Get Signature Structure */ -#define DPT_SIGNATURE _IOR('D',67,dpt_sig_S) -#if defined __bsdi__ -#define DPT_SIGNATURE_PACKED _IOR('D',67,dpt_sig_S_Packed) -#endif -/* Get Number Of DPT Adapters */ -#define DPT_NUMCTRLS _IOR('D',68,int) -/* Get Adapter Info Structure */ -#define DPT_CTRLINFO _IOR('D',69,CtrlInfo) -/* Get Statistics If Enabled */ -#define DPT_STATINFO _IO('D',70) -/* Clear Stats If Enabled */ -#define DPT_CLRSTAT _IO('D',71) -/* Get System Info Structure */ -#define DPT_SYSINFO _IOR('D',72,sysInfo_S) -/* Set Timeout Value */ -#define DPT_TIMEOUT _IO('D',73) -/* Get config Data */ -#define DPT_CONFIG _IO('D',74) -/* Get Blink LED Code */ -#define DPT_BLINKLED _IOR('D',75,int) -/* Get Statistical information (if available) */ -#define DPT_STATS_INFO _IOR('D',80,STATS_DATA) -/* Clear the statistical information */ -#define DPT_STATS_CLEAR _IO('D',81) -/* Get Performance metrics */ -#define DPT_PERF_INFO _IOR('D',82,dpt_perf_t) -/* Send an I2O command */ -#define I2OUSRCMD _IO('D',76) -/* Inform driver to re-acquire LCT information */ -#define I2ORESCANCMD _IO('D',77) -/* Inform driver to reset adapter */ -#define I2ORESETCMD _IO('D',78) -/* See if the target is mounted */ -#define DPT_TARGET_BUSY _IOR('D',79, TARGET_BUSY_T) - - - /* Structure Returned From Get Controller Info */ - -typedef struct { - uCHAR state; /* Operational state */ - uCHAR id; /* Host adapter SCSI id */ - int vect; /* Interrupt vector number */ - int base; /* Base I/O address */ - int njobs; /* # of jobs sent to HA */ - int qdepth; /* Controller queue depth. */ - int wakebase; /* mpx wakeup base index. */ - uINT SGsize; /* Scatter/Gather list size. */ - unsigned heads; /* heads for drives on cntlr. */ - unsigned sectors; /* sectors for drives on cntlr. */ - uCHAR do_drive32; /* Flag for Above 16 MB Ability */ - uCHAR BusQuiet; /* SCSI Bus Quiet Flag */ - char idPAL[4]; /* 4 Bytes Of The ID Pal */ - uCHAR primary; /* 1 For Primary, 0 For Secondary */ - uCHAR eataVersion; /* EATA Version */ - uINT cpLength; /* EATA Command Packet Length */ - uINT spLength; /* EATA Status Packet Length */ - uCHAR drqNum; /* DRQ Index (0,5,6,7) */ - uCHAR flag1; /* EATA Flags 1 (Byte 9) */ - uCHAR flag2; /* EATA Flags 2 (Byte 30) */ -} CtrlInfo; - -typedef struct { - uSHORT length; // Remaining length of this - uSHORT drvrHBAnum; // Relative HBA # used by the driver - uINT baseAddr; // Base I/O address - uSHORT blinkState; // Blink LED state (0=Not in blink LED) - uCHAR pciBusNum; // PCI Bus # (Optional) - uCHAR pciDeviceNum; // PCI Device # (Optional) - uSHORT hbaFlags; // Miscellaneous HBA flags - uSHORT Interrupt; // Interrupt set for this device. -# if (defined(_DPT_ARC)) - uINT baseLength; - ADAPTER_OBJECT *AdapterObject; - LARGE_INTEGER DmaLogicalAddress; - PVOID DmaVirtualAddress; - LARGE_INTEGER ReplyLogicalAddress; - PVOID ReplyVirtualAddress; -# else - uINT reserved1; // Reserved for future expansion - uINT reserved2; // Reserved for future expansion - uINT reserved3; // Reserved for future expansion -# endif -} drvrHBAinfo_S; - -typedef struct TARGET_BUSY -{ - uLONG channel; - uLONG id; - uLONG lun; - uLONG isBusy; -} TARGET_BUSY_T; - -#endif - diff --git a/drivers/scsi/dpt/dptsig.h b/drivers/scsi/dpt/dptsig.h deleted file mode 100644 index a6644b332b538e510d4a32d86d6cf9a5eda8701c..0000000000000000000000000000000000000000 --- a/drivers/scsi/dpt/dptsig.h +++ /dev/null @@ -1,336 +0,0 @@ -/* BSDI dptsig.h,v 1.7 1998/06/03 19:15:00 karels Exp */ - -/* - * Copyright (c) 1996-1999 Distributed Processing Technology Corporation - * All rights reserved. - * - * Redistribution and use in source form, with or without modification, are - * permitted provided that redistributions of source code must retain the - * above copyright notice, this list of conditions and the following disclaimer. - * - * This software is provided `as is' by Distributed Processing Technology and - * any express or implied warranties, including, but not limited to, the - * implied warranties of merchantability and fitness for a particular purpose, - * are disclaimed. In no event shall Distributed Processing Technology be - * liable for any direct, indirect, incidental, special, exemplary or - * consequential damages (including, but not limited to, procurement of - * substitute goods or services; loss of use, data, or profits; or business - * interruptions) however caused and on any theory of liability, whether in - * contract, strict liability, or tort (including negligence or otherwise) - * arising in any way out of the use of this driver software, even if advised - * of the possibility of such damage. - * - */ - -#ifndef __DPTSIG_H_ -#define __DPTSIG_H_ -#ifdef _SINIX_ADDON -#include "dpt.h" -#endif -/* DPT SIGNATURE SPEC AND HEADER FILE */ -/* Signature Version 1 (sorry no 'A') */ - -/* to make sure we are talking the same size under all OS's */ -typedef unsigned char sigBYTE; -typedef unsigned short sigWORD; -typedef unsigned int sigINT; - -/* - * use sigWORDLittleEndian for: - * dsCapabilities - * dsDeviceSupp - * dsAdapterSupp - * dsApplication - * use sigLONGLittleEndian for: - * dsOS - * so that the sig can be standardised to Little Endian - */ -#if (defined(_DPT_BIG_ENDIAN)) -# define sigWORDLittleEndian(x) ((((x)&0xFF)<<8)|(((x)>>8)&0xFF)) -# define sigLONGLittleEndian(x) \ - ((((x)&0xFF)<<24) | \ - (((x)&0xFF00)<<8) | \ - (((x)&0xFF0000L)>>8) | \ - (((x)&0xFF000000L)>>24)) -#else -# define sigWORDLittleEndian(x) (x) -# define sigLONGLittleEndian(x) (x) -#endif - -/* must make sure the structure is not word or double-word aligned */ -/* --------------------------------------------------------------- */ -/* Borland will ignore the following pragma: */ -/* Word alignment is OFF by default. If in the, IDE make */ -/* sure that Options | Compiler | Code Generation | Word Alignment */ -/* is not checked. If using BCC, do not use the -a option. */ - -#ifndef NO_PACK -#if defined (_DPT_AIX) -#pragma options align=packed -#else -#pragma pack(1) -#endif /* aix */ -#endif -/* For the Macintosh */ -#ifdef STRUCTALIGNMENTSUPPORTED -#pragma options align=mac68k -#endif - - -/* Current Signature Version - sigBYTE dsSigVersion; */ -/* ------------------------------------------------------------------ */ -#define SIG_VERSION 1 - -/* Processor Family - sigBYTE dsProcessorFamily; DISTINCT VALUES */ -/* ------------------------------------------------------------------ */ -/* What type of processor the file is meant to run on. */ -/* This will let us know whether to read sigWORDs as high/low or low/high. */ -#define PROC_INTEL 0x00 /* Intel 80x86/ia64 */ -#define PROC_MOTOROLA 0x01 /* Motorola 68K */ -#define PROC_MIPS4000 0x02 /* MIPS RISC 4000 */ -#define PROC_ALPHA 0x03 /* DEC Alpha */ -#define PROC_POWERPC 0x04 /* IBM Power PC */ -#define PROC_i960 0x05 /* Intel i960 */ -#define PROC_ULTRASPARC 0x06 /* SPARC processor */ - -/* Specific Minimim Processor - sigBYTE dsProcessor; FLAG BITS */ -/* ------------------------------------------------------------------ */ -/* Different bit definitions dependent on processor_family */ - -/* PROC_INTEL: */ -#define PROC_8086 0x01 /* Intel 8086 */ -#define PROC_286 0x02 /* Intel 80286 */ -#define PROC_386 0x04 /* Intel 80386 */ -#define PROC_486 0x08 /* Intel 80486 */ -#define PROC_PENTIUM 0x10 /* Intel 586 aka P5 aka Pentium */ -#define PROC_SEXIUM 0x20 /* Intel 686 aka P6 aka Pentium Pro or MMX */ -#define PROC_IA64 0x40 /* Intel IA64 processor */ - -/* PROC_i960: */ -#define PROC_960RX 0x01 /* Intel 80960RC/RD */ -#define PROC_960HX 0x02 /* Intel 80960HA/HD/HT */ - -/* PROC_MOTOROLA: */ -#define PROC_68000 0x01 /* Motorola 68000 */ -#define PROC_68010 0x02 /* Motorola 68010 */ -#define PROC_68020 0x04 /* Motorola 68020 */ -#define PROC_68030 0x08 /* Motorola 68030 */ -#define PROC_68040 0x10 /* Motorola 68040 */ - -/* PROC_POWERPC */ -#define PROC_PPC601 0x01 /* PowerPC 601 */ -#define PROC_PPC603 0x02 /* PowerPC 603 */ -#define PROC_PPC604 0x04 /* PowerPC 604 */ - -/* PROC_MIPS4000: */ -#define PROC_R4000 0x01 /* MIPS R4000 */ - -/* Filetype - sigBYTE dsFiletype; DISTINCT VALUES */ -/* ------------------------------------------------------------------ */ -#define FT_EXECUTABLE 0 /* Executable Program */ -#define FT_SCRIPT 1 /* Script/Batch File??? */ -#define FT_HBADRVR 2 /* HBA Driver */ -#define FT_OTHERDRVR 3 /* Other Driver */ -#define FT_IFS 4 /* Installable Filesystem Driver */ -#define FT_ENGINE 5 /* DPT Engine */ -#define FT_COMPDRVR 6 /* Compressed Driver Disk */ -#define FT_LANGUAGE 7 /* Foreign Language file */ -#define FT_FIRMWARE 8 /* Downloadable or actual Firmware */ -#define FT_COMMMODL 9 /* Communications Module */ -#define FT_INT13 10 /* INT 13 style HBA Driver */ -#define FT_HELPFILE 11 /* Help file */ -#define FT_LOGGER 12 /* Event Logger */ -#define FT_INSTALL 13 /* An Install Program */ -#define FT_LIBRARY 14 /* Storage Manager Real-Mode Calls */ -#define FT_RESOURCE 15 /* Storage Manager Resource File */ -#define FT_MODEM_DB 16 /* Storage Manager Modem Database */ - -/* Filetype flags - sigBYTE dsFiletypeFlags; FLAG BITS */ -/* ------------------------------------------------------------------ */ -#define FTF_DLL 0x01 /* Dynamic Link Library */ -#define FTF_NLM 0x02 /* Netware Loadable Module */ -#define FTF_OVERLAYS 0x04 /* Uses overlays */ -#define FTF_DEBUG 0x08 /* Debug version */ -#define FTF_TSR 0x10 /* TSR */ -#define FTF_SYS 0x20 /* DOS Loadable driver */ -#define FTF_PROTECTED 0x40 /* Runs in protected mode */ -#define FTF_APP_SPEC 0x80 /* Application Specific */ -#define FTF_ROM (FTF_SYS|FTF_TSR) /* Special Case */ - -/* OEM - sigBYTE dsOEM; DISTINCT VALUES */ -/* ------------------------------------------------------------------ */ -#define OEM_DPT 0 /* DPT */ -#define OEM_ATT 1 /* ATT */ -#define OEM_NEC 2 /* NEC */ -#define OEM_ALPHA 3 /* Alphatronix */ -#define OEM_AST 4 /* AST */ -#define OEM_OLIVETTI 5 /* Olivetti */ -#define OEM_SNI 6 /* Siemens/Nixdorf */ -#define OEM_SUN 7 /* SUN Microsystems */ - -/* Operating System - sigLONG dsOS; FLAG BITS */ -/* ------------------------------------------------------------------ */ -#define OS_DOS 0x00000001 /* PC/MS-DOS */ -#define OS_WINDOWS 0x00000002 /* Microsoft Windows 3.x */ -#define OS_WINDOWS_NT 0x00000004 /* Microsoft Windows NT */ -#define OS_OS2M 0x00000008 /* OS/2 1.2.x,MS 1.3.0,IBM 1.3.x - Monolithic */ -#define OS_OS2L 0x00000010 /* Microsoft OS/2 1.301 - LADDR */ -#define OS_OS22x 0x00000020 /* IBM OS/2 2.x */ -#define OS_NW286 0x00000040 /* Novell NetWare 286 */ -#define OS_NW386 0x00000080 /* Novell NetWare 386 */ -#define OS_GEN_UNIX 0x00000100 /* Generic Unix */ -#define OS_SCO_UNIX 0x00000200 /* SCO Unix */ -#define OS_ATT_UNIX 0x00000400 /* ATT Unix */ -#define OS_UNIXWARE 0x00000800 /* USL Unix */ -#define OS_INT_UNIX 0x00001000 /* Interactive Unix */ -#define OS_SOLARIS 0x00002000 /* SunSoft Solaris */ -#define OS_QNX 0x00004000 /* QNX for Tom Moch */ -#define OS_NEXTSTEP 0x00008000 /* NeXTSTEP/OPENSTEP/MACH */ -#define OS_BANYAN 0x00010000 /* Banyan Vines */ -#define OS_OLIVETTI_UNIX 0x00020000/* Olivetti Unix */ -#define OS_MAC_OS 0x00040000 /* Mac OS */ -#define OS_WINDOWS_95 0x00080000 /* Microsoft Windows '95 */ -#define OS_NW4x 0x00100000 /* Novell Netware 4.x */ -#define OS_BSDI_UNIX 0x00200000 /* BSDi Unix BSD/OS 2.0 and up */ -#define OS_AIX_UNIX 0x00400000 /* AIX Unix */ -#define OS_FREE_BSD 0x00800000 /* FreeBSD Unix */ -#define OS_LINUX 0x01000000 /* Linux */ -#define OS_DGUX_UNIX 0x02000000 /* Data General Unix */ -#define OS_SINIX_N 0x04000000 /* SNI SINIX-N */ -#define OS_PLAN9 0x08000000 /* ATT Plan 9 */ -#define OS_TSX 0x10000000 /* SNH TSX-32 */ - -#define OS_OTHER 0x80000000 /* Other */ - -/* Capabilities - sigWORD dsCapabilities; FLAG BITS */ -/* ------------------------------------------------------------------ */ -#define CAP_RAID0 0x0001 /* RAID-0 */ -#define CAP_RAID1 0x0002 /* RAID-1 */ -#define CAP_RAID3 0x0004 /* RAID-3 */ -#define CAP_RAID5 0x0008 /* RAID-5 */ -#define CAP_SPAN 0x0010 /* Spanning */ -#define CAP_PASS 0x0020 /* Provides passthrough */ -#define CAP_OVERLAP 0x0040 /* Passthrough supports overlapped commands */ -#define CAP_ASPI 0x0080 /* Supports ASPI Command Requests */ -#define CAP_ABOVE16MB 0x0100 /* ISA Driver supports greater than 16MB */ -#define CAP_EXTEND 0x8000 /* Extended info appears after description */ -#ifdef SNI_MIPS -#define CAP_CACHEMODE 0x1000 /* dpt_force_cache is set in driver */ -#endif - -/* Devices Supported - sigWORD dsDeviceSupp; FLAG BITS */ -/* ------------------------------------------------------------------ */ -#define DEV_DASD 0x0001 /* DASD (hard drives) */ -#define DEV_TAPE 0x0002 /* Tape drives */ -#define DEV_PRINTER 0x0004 /* Printers */ -#define DEV_PROC 0x0008 /* Processors */ -#define DEV_WORM 0x0010 /* WORM drives */ -#define DEV_CDROM 0x0020 /* CD-ROM drives */ -#define DEV_SCANNER 0x0040 /* Scanners */ -#define DEV_OPTICAL 0x0080 /* Optical Drives */ -#define DEV_JUKEBOX 0x0100 /* Jukebox */ -#define DEV_COMM 0x0200 /* Communications Devices */ -#define DEV_OTHER 0x0400 /* Other Devices */ -#define DEV_ALL 0xFFFF /* All SCSI Devices */ - -/* Adapters Families Supported - sigWORD dsAdapterSupp; FLAG BITS */ -/* ------------------------------------------------------------------ */ -#define ADF_2001 0x0001 /* PM2001 */ -#define ADF_2012A 0x0002 /* PM2012A */ -#define ADF_PLUS_ISA 0x0004 /* PM2011,PM2021 */ -#define ADF_PLUS_EISA 0x0008 /* PM2012B,PM2022 */ -#define ADF_SC3_ISA 0x0010 /* PM2021 */ -#define ADF_SC3_EISA 0x0020 /* PM2022,PM2122, etc */ -#define ADF_SC3_PCI 0x0040 /* SmartCache III PCI */ -#define ADF_SC4_ISA 0x0080 /* SmartCache IV ISA */ -#define ADF_SC4_EISA 0x0100 /* SmartCache IV EISA */ -#define ADF_SC4_PCI 0x0200 /* SmartCache IV PCI */ -#define ADF_SC5_PCI 0x0400 /* Fifth Generation I2O products */ -/* - * Combinations of products - */ -#define ADF_ALL_2000 (ADF_2001|ADF_2012A) -#define ADF_ALL_PLUS (ADF_PLUS_ISA|ADF_PLUS_EISA) -#define ADF_ALL_SC3 (ADF_SC3_ISA|ADF_SC3_EISA|ADF_SC3_PCI) -#define ADF_ALL_SC4 (ADF_SC4_ISA|ADF_SC4_EISA|ADF_SC4_PCI) -#define ADF_ALL_SC5 (ADF_SC5_PCI) -/* All EATA Cacheing Products */ -#define ADF_ALL_CACHE (ADF_ALL_PLUS|ADF_ALL_SC3|ADF_ALL_SC4) -/* All EATA Bus Mastering Products */ -#define ADF_ALL_MASTER (ADF_2012A|ADF_ALL_CACHE) -/* All EATA Adapter Products */ -#define ADF_ALL_EATA (ADF_2001|ADF_ALL_MASTER) -#define ADF_ALL ADF_ALL_EATA - -/* Application - sigWORD dsApplication; FLAG BITS */ -/* ------------------------------------------------------------------ */ -#define APP_DPTMGR 0x0001 /* DPT Storage Manager */ -#define APP_ENGINE 0x0002 /* DPT Engine */ -#define APP_SYTOS 0x0004 /* Sytron Sytos Plus */ -#define APP_CHEYENNE 0x0008 /* Cheyenne ARCServe + ARCSolo */ -#define APP_MSCDEX 0x0010 /* Microsoft CD-ROM extensions */ -#define APP_NOVABACK 0x0020 /* NovaStor Novaback */ -#define APP_AIM 0x0040 /* Archive Information Manager */ - -/* Requirements - sigBYTE dsRequirements; FLAG BITS */ -/* ------------------------------------------------------------------ */ -#define REQ_SMARTROM 0x01 /* Requires SmartROM to be present */ -#define REQ_DPTDDL 0x02 /* Requires DPTDDL.SYS to be loaded */ -#define REQ_HBA_DRIVER 0x04 /* Requires an HBA driver to be loaded */ -#define REQ_ASPI_TRAN 0x08 /* Requires an ASPI Transport Modules */ -#define REQ_ENGINE 0x10 /* Requires a DPT Engine to be loaded */ -#define REQ_COMM_ENG 0x20 /* Requires a DPT Communications Engine */ - -/* - * You may adjust dsDescription_size with an override to a value less than - * 50 so that the structure allocates less real space. - */ -#if (!defined(dsDescription_size)) -# define dsDescription_size 50 -#endif - -typedef struct dpt_sig { - char dsSignature[6]; /* ALWAYS "dPtSiG" */ - sigBYTE dsSigVersion; /* signature version (currently 1) */ - sigBYTE dsProcessorFamily; /* what type of processor */ - sigBYTE dsProcessor; /* precise processor */ - sigBYTE dsFiletype; /* type of file */ - sigBYTE dsFiletypeFlags; /* flags to specify load type, etc. */ - sigBYTE dsOEM; /* OEM file was created for */ - sigINT dsOS; /* which Operating systems */ - sigWORD dsCapabilities; /* RAID levels, etc. */ - sigWORD dsDeviceSupp; /* Types of SCSI devices supported */ - sigWORD dsAdapterSupp; /* DPT adapter families supported */ - sigWORD dsApplication; /* applications file is for */ - sigBYTE dsRequirements; /* Other driver dependencies */ - sigBYTE dsVersion; /* 1 */ - sigBYTE dsRevision; /* 'J' */ - sigBYTE dsSubRevision; /* '9' ' ' if N/A */ - sigBYTE dsMonth; /* creation month */ - sigBYTE dsDay; /* creation day */ - sigBYTE dsYear; /* creation year since 1980 (1993=13) */ - /* description (NULL terminated) */ - char dsDescription[dsDescription_size]; -} dpt_sig_S; -/* 32 bytes minimum - with no description. Put NULL at description[0] */ -/* 81 bytes maximum - with 49 character description plus NULL. */ - -/* This line added at Roycroft's request */ -/* Microsoft's NT compiler gets confused if you do a pack and don't */ -/* restore it. */ - -#ifndef NO_UNPACK -#if defined (_DPT_AIX) -#pragma options align=reset -#elif defined (UNPACK_FOUR) -#pragma pack(4) -#else -#pragma pack() -#endif /* aix */ -#endif -/* For the Macintosh */ -#ifdef STRUCTALIGNMENTSUPPORTED -#pragma options align=reset -#endif - -#endif diff --git a/drivers/scsi/dpt/osd_defs.h b/drivers/scsi/dpt/osd_defs.h deleted file mode 100644 index de3ae5722982f7e5664e1ac21becdb03355aa8f5..0000000000000000000000000000000000000000 --- a/drivers/scsi/dpt/osd_defs.h +++ /dev/null @@ -1,79 +0,0 @@ -/* BSDI osd_defs.h,v 1.4 1998/06/03 19:14:58 karels Exp */ -/* - * Copyright (c) 1996-1999 Distributed Processing Technology Corporation - * All rights reserved. - * - * Redistribution and use in source form, with or without modification, are - * permitted provided that redistributions of source code must retain the - * above copyright notice, this list of conditions and the following disclaimer. - * - * This software is provided `as is' by Distributed Processing Technology and - * any express or implied warranties, including, but not limited to, the - * implied warranties of merchantability and fitness for a particular purpose, - * are disclaimed. In no event shall Distributed Processing Technology be - * liable for any direct, indirect, incidental, special, exemplary or - * consequential damages (including, but not limited to, procurement of - * substitute goods or services; loss of use, data, or profits; or business - * interruptions) however caused and on any theory of liability, whether in - * contract, strict liability, or tort (including negligence or otherwise) - * arising in any way out of the use of this driver software, even if advised - * of the possibility of such damage. - * - */ - -#ifndef _OSD_DEFS_H -#define _OSD_DEFS_H - -/*File - OSD_DEFS.H - **************************************************************************** - * - *Description: - * - * This file contains the OS dependent defines. This file is included - *in osd_util.h and provides the OS specific defines for that file. - * - *Copyright Distributed Processing Technology, Corp. - * 140 Candace Dr. - * Maitland, Fl. 32751 USA - * Phone: (407) 830-5522 Fax: (407) 260-5366 - * All Rights Reserved - * - *Author: Doug Anderson - *Date: 1/31/94 - * - *Editors: - * - *Remarks: - * - * - *****************************************************************************/ - - -/*Definitions - Defines & Constants ----------------------------------------- */ - - /* Define the operating system */ -#if (defined(__linux__)) -# define _DPT_LINUX -#elif (defined(__bsdi__)) -# define _DPT_BSDI -#elif (defined(__FreeBSD__)) -# define _DPT_FREE_BSD -#else -# define _DPT_SCO -#endif - -#if defined (ZIL_CURSES) -#define _DPT_CURSES -#else -#define _DPT_MOTIF -#endif - - /* Redefine 'far' to nothing - no far pointer type required in UNIX */ -#define far - - /* Define the mutually exclusive semaphore type */ -#define SEMAPHORE_T unsigned int * - /* Define a handle to a DLL */ -#define DLL_HANDLE_T unsigned int * - -#endif diff --git a/drivers/scsi/dpt/osd_util.h b/drivers/scsi/dpt/osd_util.h deleted file mode 100644 index b2613c2eaac7df353d671f4f2961ac738d948808..0000000000000000000000000000000000000000 --- a/drivers/scsi/dpt/osd_util.h +++ /dev/null @@ -1,358 +0,0 @@ -/* BSDI osd_util.h,v 1.8 1998/06/03 19:14:58 karels Exp */ - -/* - * Copyright (c) 1996-1999 Distributed Processing Technology Corporation - * All rights reserved. - * - * Redistribution and use in source form, with or without modification, are - * permitted provided that redistributions of source code must retain the - * above copyright notice, this list of conditions and the following disclaimer. - * - * This software is provided `as is' by Distributed Processing Technology and - * any express or implied warranties, including, but not limited to, the - * implied warranties of merchantability and fitness for a particular purpose, - * are disclaimed. In no event shall Distributed Processing Technology be - * liable for any direct, indirect, incidental, special, exemplary or - * consequential damages (including, but not limited to, procurement of - * substitute goods or services; loss of use, data, or profits; or business - * interruptions) however caused and on any theory of liability, whether in - * contract, strict liability, or tort (including negligence or otherwise) - * arising in any way out of the use of this driver software, even if advised - * of the possibility of such damage. - * - */ - -#ifndef __OSD_UTIL_H -#define __OSD_UTIL_H - -/*File - OSD_UTIL.H - **************************************************************************** - * - *Description: - * - * This file contains defines and function prototypes that are - *operating system dependent. The resources defined in this file - *are not specific to any particular application. - * - *Copyright Distributed Processing Technology, Corp. - * 140 Candace Dr. - * Maitland, Fl. 32751 USA - * Phone: (407) 830-5522 Fax: (407) 260-5366 - * All Rights Reserved - * - *Author: Doug Anderson - *Date: 1/7/94 - * - *Editors: - * - *Remarks: - * - * - *****************************************************************************/ - - -/*Definitions - Defines & Constants ----------------------------------------- */ - -/*----------------------------- */ -/* Operating system selections: */ -/*----------------------------- */ - -/*#define _DPT_MSDOS */ -/*#define _DPT_WIN_3X */ -/*#define _DPT_WIN_4X */ -/*#define _DPT_WIN_NT */ -/*#define _DPT_NETWARE */ -/*#define _DPT_OS2 */ -/*#define _DPT_SCO */ -/*#define _DPT_UNIXWARE */ -/*#define _DPT_SOLARIS */ -/*#define _DPT_NEXTSTEP */ -/*#define _DPT_BANYAN */ - -/*-------------------------------- */ -/* Include the OS specific defines */ -/*-------------------------------- */ - -/*#define OS_SELECTION From Above List */ -/*#define SEMAPHORE_T ??? */ -/*#define DLL_HANDLE_T ??? */ - -#if (defined(KERNEL) && (defined(__FreeBSD__) || defined(__bsdi__))) -# include "i386/isa/dpt_osd_defs.h" -#else -# include "osd_defs.h" -#endif - -#ifndef DPT_UNALIGNED - #define DPT_UNALIGNED -#endif - -#ifndef DPT_EXPORT - #define DPT_EXPORT -#endif - -#ifndef DPT_IMPORT - #define DPT_IMPORT -#endif - -#ifndef DPT_RUNTIME_IMPORT - #define DPT_RUNTIME_IMPORT DPT_IMPORT -#endif - -/*--------------------- */ -/* OS dependent defines */ -/*--------------------- */ - -#if defined (_DPT_MSDOS) || defined (_DPT_WIN_3X) - #define _DPT_16_BIT -#else - #define _DPT_32_BIT -#endif - -#if defined (_DPT_SCO) || defined (_DPT_UNIXWARE) || defined (_DPT_SOLARIS) || defined (_DPT_AIX) || defined (SNI_MIPS) || defined (_DPT_BSDI) || defined (_DPT_FREE_BSD) || defined(_DPT_LINUX) - #define _DPT_UNIX -#endif - -#if defined (_DPT_WIN_3x) || defined (_DPT_WIN_4X) || defined (_DPT_WIN_NT) \ - || defined (_DPT_OS2) - #define _DPT_DLL_SUPPORT -#endif - -#if !defined (_DPT_MSDOS) && !defined (_DPT_WIN_3X) && !defined (_DPT_NETWARE) - #define _DPT_PREEMPTIVE -#endif - -#if !defined (_DPT_MSDOS) && !defined (_DPT_WIN_3X) - #define _DPT_MULTI_THREADED -#endif - -#if !defined (_DPT_MSDOS) - #define _DPT_MULTI_TASKING -#endif - - /* These exist for platforms that */ - /* chunk when accessing mis-aligned */ - /* data */ -#if defined (SNI_MIPS) || defined (_DPT_SOLARIS) - #if defined (_DPT_BIG_ENDIAN) - #if !defined (_DPT_STRICT_ALIGN) - #define _DPT_STRICT_ALIGN - #endif - #endif -#endif - - /* Determine if in C or C++ mode */ -#ifdef __cplusplus - #define _DPT_CPP -#else - #define _DPT_C -#endif - -/*-------------------------------------------------------------------*/ -/* Under Solaris the compiler refuses to accept code like: */ -/* { {"DPT"}, 0, NULL .... }, */ -/* and complains about the {"DPT"} part by saying "cannot use { } */ -/* to initialize char*". */ -/* */ -/* By defining these ugly macros we can get around this and also */ -/* not have to copy and #ifdef large sections of code. I know that */ -/* these macros are *really* ugly, but they should help reduce */ -/* maintenance in the long run. */ -/* */ -/*-------------------------------------------------------------------*/ -#if !defined (DPTSQO) - #if defined (_DPT_SOLARIS) - #define DPTSQO - #define DPTSQC - #else - #define DPTSQO { - #define DPTSQC } - #endif /* solaris */ -#endif /* DPTSQO */ - - -/*---------------------- */ -/* OS dependent typedefs */ -/*---------------------- */ - -#if defined (_DPT_MSDOS) || defined (_DPT_SCO) - #define BYTE unsigned char - #define WORD unsigned short -#endif - -#ifndef _DPT_TYPEDEFS - #define _DPT_TYPEDEFS - typedef unsigned char uCHAR; - typedef unsigned short uSHORT; - typedef unsigned int uINT; - typedef unsigned long uLONG; - - typedef union { - uCHAR u8[4]; - uSHORT u16[2]; - uLONG u32; - } access_U; -#endif - -#if !defined (NULL) - #define NULL 0 -#endif - - -/*Prototypes - function ----------------------------------------------------- */ - -#ifdef __cplusplus - extern "C" { /* Declare all these functions as "C" functions */ -#endif - -/*------------------------ */ -/* Byte reversal functions */ -/*------------------------ */ - - /* Reverses the byte ordering of a 2 byte variable */ -#if (!defined(osdSwap2)) - uSHORT osdSwap2(DPT_UNALIGNED uSHORT *); -#endif // !osdSwap2 - - /* Reverses the byte ordering of a 4 byte variable and shifts left 8 bits */ -#if (!defined(osdSwap3)) - uLONG osdSwap3(DPT_UNALIGNED uLONG *); -#endif // !osdSwap3 - - -#ifdef _DPT_NETWARE - #include "novpass.h" /* For DPT_Bswapl() prototype */ - /* Inline the byte swap */ - #ifdef __cplusplus - inline uLONG osdSwap4(uLONG *inLong) { - return *inLong = DPT_Bswapl(*inLong); - } - #else - #define osdSwap4(inLong) DPT_Bswapl(inLong) - #endif // cplusplus -#else - /* Reverses the byte ordering of a 4 byte variable */ -# if (!defined(osdSwap4)) - uLONG osdSwap4(DPT_UNALIGNED uLONG *); -# endif // !osdSwap4 - - /* The following functions ALWAYS swap regardless of the * - * presence of DPT_BIG_ENDIAN */ - - uSHORT trueSwap2(DPT_UNALIGNED uSHORT *); - uLONG trueSwap4(DPT_UNALIGNED uLONG *); - -#endif // netware - - -/*-------------------------------------* - * Network order swap functions * - * * - * These functions/macros will be used * - * by the structure insert()/extract() * - * functions. * - * - * We will enclose all structure * - * portability modifications inside * - * #ifdefs. When we are ready, we * - * will #define DPT_PORTABLE to begin * - * using the modifications. * - *-------------------------------------*/ -uLONG netSwap4(uLONG val); - -#if defined (_DPT_BIG_ENDIAN) - -// for big-endian we need to swap - -#ifndef NET_SWAP_2 -#define NET_SWAP_2(x) (((x) >> 8) | ((x) << 8)) -#endif // NET_SWAP_2 - -#ifndef NET_SWAP_4 -#define NET_SWAP_4(x) netSwap4((x)) -#endif // NET_SWAP_4 - -#else - -// for little-endian we don't need to do anything - -#ifndef NET_SWAP_2 -#define NET_SWAP_2(x) (x) -#endif // NET_SWAP_2 - -#ifndef NET_SWAP_4 -#define NET_SWAP_4(x) (x) -#endif // NET_SWAP_4 - -#endif // big endian - - - -/*----------------------------------- */ -/* Run-time loadable module functions */ -/*----------------------------------- */ - - /* Loads the specified run-time loadable DLL */ -DLL_HANDLE_T osdLoadModule(uCHAR *); - /* Unloads the specified run-time loadable DLL */ -uSHORT osdUnloadModule(DLL_HANDLE_T); - /* Returns a pointer to a function inside a run-time loadable DLL */ -void * osdGetFnAddr(DLL_HANDLE_T,uCHAR *); - -/*--------------------------------------- */ -/* Mutually exclusive semaphore functions */ -/*--------------------------------------- */ - - /* Create a named semaphore */ -SEMAPHORE_T osdCreateNamedSemaphore(char *); - /* Create a mutually exlusive semaphore */ -SEMAPHORE_T osdCreateSemaphore(void); - /* create an event semaphore */ -SEMAPHORE_T osdCreateEventSemaphore(void); - /* create a named event semaphore */ -SEMAPHORE_T osdCreateNamedEventSemaphore(char *); - - /* Destroy the specified mutually exclusive semaphore object */ -uSHORT osdDestroySemaphore(SEMAPHORE_T); - /* Request access to the specified mutually exclusive semaphore */ -uLONG osdRequestSemaphore(SEMAPHORE_T,uLONG); - /* Release access to the specified mutually exclusive semaphore */ -uSHORT osdReleaseSemaphore(SEMAPHORE_T); - /* wait for a event to happen */ -uLONG osdWaitForEventSemaphore(SEMAPHORE_T, uLONG); - /* signal an event */ -uLONG osdSignalEventSemaphore(SEMAPHORE_T); - /* reset the event */ -uLONG osdResetEventSemaphore(SEMAPHORE_T); - -/*----------------- */ -/* Thread functions */ -/*----------------- */ - - /* Releases control to the task switcher in non-preemptive */ - /* multitasking operating systems. */ -void osdSwitchThreads(void); - - /* Starts a thread function */ -uLONG osdStartThread(void *,void *); - -/* what is my thread id */ -uLONG osdGetThreadID(void); - -/* wakes up the specifed thread */ -void osdWakeThread(uLONG); - -/* osd sleep for x milliseconds */ -void osdSleep(uLONG); - -#define DPT_THREAD_PRIORITY_LOWEST 0x00 -#define DPT_THREAD_PRIORITY_NORMAL 0x01 -#define DPT_THREAD_PRIORITY_HIGHEST 0x02 - -uCHAR osdSetThreadPriority(uLONG tid, uCHAR priority); - -#ifdef __cplusplus - } /* end the xtern "C" declaration */ -#endif - -#endif /* osd_util_h */ diff --git a/drivers/scsi/dpt/sys_info.h b/drivers/scsi/dpt/sys_info.h deleted file mode 100644 index a4aa1c31ff72ebbdd7faed93b79dcb72fc199e0d..0000000000000000000000000000000000000000 --- a/drivers/scsi/dpt/sys_info.h +++ /dev/null @@ -1,417 +0,0 @@ -/* BSDI sys_info.h,v 1.6 1998/06/03 19:14:59 karels Exp */ - -/* - * Copyright (c) 1996-1999 Distributed Processing Technology Corporation - * All rights reserved. - * - * Redistribution and use in source form, with or without modification, are - * permitted provided that redistributions of source code must retain the - * above copyright notice, this list of conditions and the following disclaimer. - * - * This software is provided `as is' by Distributed Processing Technology and - * any express or implied warranties, including, but not limited to, the - * implied warranties of merchantability and fitness for a particular purpose, - * are disclaimed. In no event shall Distributed Processing Technology be - * liable for any direct, indirect, incidental, special, exemplary or - * consequential damages (including, but not limited to, procurement of - * substitute goods or services; loss of use, data, or profits; or business - * interruptions) however caused and on any theory of liability, whether in - * contract, strict liability, or tort (including negligence or otherwise) - * arising in any way out of the use of this driver software, even if advised - * of the possibility of such damage. - * - */ - -#ifndef __SYS_INFO_H -#define __SYS_INFO_H - -/*File - SYS_INFO.H - **************************************************************************** - * - *Description: - * - * This file contains structure definitions for the OS dependent - *layer system information buffers. - * - *Copyright Distributed Processing Technology, Corp. - * 140 Candace Dr. - * Maitland, Fl. 32751 USA - * Phone: (407) 830-5522 Fax: (407) 260-5366 - * All Rights Reserved - * - *Author: Don Kemper - *Date: 5/10/94 - * - *Editors: - * - *Remarks: - * - * - *****************************************************************************/ - - -/*Include Files ------------------------------------------------------------- */ - -#include "osd_util.h" - -#ifndef NO_PACK -#if defined (_DPT_AIX) -#pragma options align=packed -#else -#pragma pack(1) -#endif /* aix */ -#endif // no unpack - - -/*struct - driveParam_S - start - *=========================================================================== - * - *Description: - * - * This structure defines the drive parameters seen during - *booting. - * - *---------------------------------------------------------------------------*/ - -#ifdef __cplusplus - struct driveParam_S { -#else - typedef struct { -#endif - - uSHORT cylinders; /* Up to 1024 */ - uCHAR heads; /* Up to 255 */ - uCHAR sectors; /* Up to 63 */ - -#ifdef __cplusplus - -//---------- Portability Additions ----------- in sp_sinfo.cpp -#ifdef DPT_PORTABLE - uSHORT netInsert(dptBuffer_S *buffer); - uSHORT netExtract(dptBuffer_S *buffer); -#endif // DPT PORTABLE -//-------------------------------------------- - - }; -#else - } driveParam_S; -#endif -/*driveParam_S - end */ - - -/*struct - sysInfo_S - start - *=========================================================================== - * - *Description: - * - * This structure defines the command system information that - *should be returned by every OS dependent layer. - * - *---------------------------------------------------------------------------*/ - -/*flags - bit definitions */ -#define SI_CMOS_Valid 0x0001 -#define SI_NumDrivesValid 0x0002 -#define SI_ProcessorValid 0x0004 -#define SI_MemorySizeValid 0x0008 -#define SI_DriveParamsValid 0x0010 -#define SI_SmartROMverValid 0x0020 -#define SI_OSversionValid 0x0040 -#define SI_OSspecificValid 0x0080 /* 1 if OS structure returned */ -#define SI_BusTypeValid 0x0100 - -#define SI_ALL_VALID 0x0FFF /* All Std SysInfo is valid */ -#define SI_NO_SmartROM 0x8000 - -/*busType - definitions */ -#define SI_ISA_BUS 0x00 -#define SI_MCA_BUS 0x01 -#define SI_EISA_BUS 0x02 -#define SI_PCI_BUS 0x04 - -#ifdef __cplusplus - struct sysInfo_S { -#else - typedef struct { -#endif - - uCHAR drive0CMOS; /* CMOS Drive 0 Type */ - uCHAR drive1CMOS; /* CMOS Drive 1 Type */ - uCHAR numDrives; /* 0040:0075 contents */ - uCHAR processorFamily; /* Same as DPTSIG's definition */ - uCHAR processorType; /* Same as DPTSIG's definition */ - uCHAR smartROMMajorVersion; - uCHAR smartROMMinorVersion; /* SmartROM version */ - uCHAR smartROMRevision; - uSHORT flags; /* See bit definitions above */ - uSHORT conventionalMemSize; /* in KB */ - uINT extendedMemSize; /* in KB */ - uINT osType; /* Same as DPTSIG's definition */ - uCHAR osMajorVersion; - uCHAR osMinorVersion; /* The OS version */ - uCHAR osRevision; -#ifdef _SINIX_ADDON - uCHAR busType; /* See defininitions above */ - uSHORT osSubRevision; - uCHAR pad[2]; /* For alignment */ -#else - uCHAR osSubRevision; - uCHAR busType; /* See defininitions above */ - uCHAR pad[3]; /* For alignment */ -#endif - driveParam_S drives[16]; /* SmartROM Logical Drives */ - -#ifdef __cplusplus - -//---------- Portability Additions ----------- in sp_sinfo.cpp -#ifdef DPT_PORTABLE - uSHORT netInsert(dptBuffer_S *buffer); - uSHORT netExtract(dptBuffer_S *buffer); -#endif // DPT PORTABLE -//-------------------------------------------- - - }; -#else - } sysInfo_S; -#endif -/*sysInfo_S - end */ - - -/*struct - DOS_Info_S - start - *=========================================================================== - * - *Description: - * - * This structure defines the system information specific to a - *DOS workstation. - * - *---------------------------------------------------------------------------*/ - -/*flags - bit definitions */ -#define DI_DOS_HIGH 0x01 /* DOS is loaded high */ -#define DI_DPMI_VALID 0x02 /* DPMI version is valid */ - -#ifdef __cplusplus - struct DOS_Info_S { -#else - typedef struct { -#endif - - uCHAR flags; /* See bit definitions above */ - uSHORT driverLocation; /* SmartROM BIOS address */ - uSHORT DOS_version; - uSHORT DPMI_version; - -#ifdef __cplusplus - -//---------- Portability Additions ----------- in sp_sinfo.cpp -#ifdef DPT_PORTABLE - uSHORT netInsert(dptBuffer_S *buffer); - uSHORT netExtract(dptBuffer_S *buffer); -#endif // DPT PORTABLE -//-------------------------------------------- - - }; -#else - } DOS_Info_S; -#endif -/*DOS_Info_S - end */ - - -/*struct - Netware_Info_S - start - *=========================================================================== - * - *Description: - * - * This structure defines the system information specific to a - *Netware machine. - * - *---------------------------------------------------------------------------*/ - -#ifdef __cplusplus - struct Netware_Info_S { -#else - typedef struct { -#endif - - uCHAR driverName[13]; /* ie PM12NW31.DSK */ - uCHAR serverName[48]; - uCHAR netwareVersion; /* The Netware OS version */ - uCHAR netwareSubVersion; - uCHAR netwareRevision; - uSHORT maxConnections; /* Probably 250 or 1000 */ - uSHORT connectionsInUse; - uSHORT maxVolumes; - uCHAR unused; - uCHAR SFTlevel; - uCHAR TTSlevel; - - uCHAR clibMajorVersion; /* The CLIB.NLM version */ - uCHAR clibMinorVersion; - uCHAR clibRevision; - -#ifdef __cplusplus - -//---------- Portability Additions ----------- in sp_sinfo.cpp -#ifdef DPT_PORTABLE - uSHORT netInsert(dptBuffer_S *buffer); - uSHORT netExtract(dptBuffer_S *buffer); -#endif // DPT PORTABLE -//-------------------------------------------- - - }; -#else - } Netware_Info_S; -#endif -/*Netware_Info_S - end */ - - -/*struct - OS2_Info_S - start - *=========================================================================== - * - *Description: - * - * This structure defines the system information specific to an - *OS/2 machine. - * - *---------------------------------------------------------------------------*/ - -#ifdef __cplusplus - struct OS2_Info_S { -#else - typedef struct { -#endif - - uCHAR something; - -#ifdef __cplusplus - -//---------- Portability Additions ----------- in sp_sinfo.cpp -#ifdef DPT_PORTABLE - uSHORT netInsert(dptBuffer_S *buffer); - uSHORT netExtract(dptBuffer_S *buffer); -#endif // DPT PORTABLE -//-------------------------------------------- - - }; -#else - } OS2_Info_S; -#endif -/*OS2_Info_S - end */ - - -/*struct - WinNT_Info_S - start - *=========================================================================== - * - *Description: - * - * This structure defines the system information specific to a - *Windows NT machine. - * - *---------------------------------------------------------------------------*/ - -#ifdef __cplusplus - struct WinNT_Info_S { -#else - typedef struct { -#endif - - uCHAR something; - -#ifdef __cplusplus - -//---------- Portability Additions ----------- in sp_sinfo.cpp -#ifdef DPT_PORTABLE - uSHORT netInsert(dptBuffer_S *buffer); - uSHORT netExtract(dptBuffer_S *buffer); -#endif // DPT PORTABLE -//-------------------------------------------- - - }; -#else - } WinNT_Info_S; -#endif -/*WinNT_Info_S - end */ - - -/*struct - SCO_Info_S - start - *=========================================================================== - * - *Description: - * - * This structure defines the system information specific to an - *SCO UNIX machine. - * - *---------------------------------------------------------------------------*/ - -#ifdef __cplusplus - struct SCO_Info_S { -#else - typedef struct { -#endif - - uCHAR something; - -#ifdef __cplusplus - -//---------- Portability Additions ----------- in sp_sinfo.cpp -#ifdef DPT_PORTABLE - uSHORT netInsert(dptBuffer_S *buffer); - uSHORT netExtract(dptBuffer_S *buffer); -#endif // DPT PORTABLE -//-------------------------------------------- - - }; -#else - } SCO_Info_S; -#endif -/*SCO_Info_S - end */ - - -/*struct - USL_Info_S - start - *=========================================================================== - * - *Description: - * - * This structure defines the system information specific to a - *USL UNIX machine. - * - *---------------------------------------------------------------------------*/ - -#ifdef __cplusplus - struct USL_Info_S { -#else - typedef struct { -#endif - - uCHAR something; - -#ifdef __cplusplus - -//---------- Portability Additions ----------- in sp_sinfo.cpp -#ifdef DPT_PORTABLE - uSHORT netInsert(dptBuffer_S *buffer); - uSHORT netExtract(dptBuffer_S *buffer); -#endif // DPT PORTABLE -//-------------------------------------------- - - }; -#else - } USL_Info_S; -#endif -/*USL_Info_S - end */ - - - /* Restore default structure packing */ -#ifndef NO_UNPACK -#if defined (_DPT_AIX) -#pragma options align=reset -#elif defined (UNPACK_FOUR) -#pragma pack(4) -#else -#pragma pack() -#endif /* aix */ -#endif // no unpack - -#endif // __SYS_INFO_H - diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c deleted file mode 100644 index 37de8fb186d7b61dbbd7db97a18ca6bcfa26f977..0000000000000000000000000000000000000000 --- a/drivers/scsi/dpt_i2o.c +++ /dev/null @@ -1,3616 +0,0 @@ -/*************************************************************************** - dpti.c - description - ------------------- - begin : Thu Sep 7 2000 - copyright : (C) 2000 by Adaptec - - July 30, 2001 First version being submitted - for inclusion in the kernel. V2.4 - - See Documentation/scsi/dpti.txt for history, notes, license info - and credits - ***************************************************************************/ - -/*************************************************************************** - * * - * This program is free software; you can redistribute it and/or modify * - * it under the terms of the GNU General Public License as published by * - * the Free Software Foundation; either version 2 of the License, or * - * (at your option) any later version. * - * * - ***************************************************************************/ -/*************************************************************************** - * Sat Dec 20 2003 Go Taniguchi - - Support 2.6 kernel and DMA-mapping - - ioctl fix for raid tools - - use schedule_timeout in long long loop - **************************************************************************/ - -/*#define DEBUG 1 */ -/*#define UARTDELAY 1 */ - -#include - -MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn"); -MODULE_DESCRIPTION("Adaptec I2O RAID Driver"); - -//////////////////////////////////////////////////////////////// - -#include /* For SCSI-Passthrough */ -#include - -#include -#include /* for kmalloc() */ -#include /* for PCI support */ -#include -#include -#include /* for udelay */ -#include -#include /* for printk */ -#include -#include -#include -#include - -#include -#include -#include -#include - -#include /* for boot_cpu_data */ -#include -#include /* for virt_to_bus, etc. */ - -#include -#include -#include -#include -#include - -#include "dpt/dptsig.h" -#include "dpti.h" - -/*============================================================================ - * Create a binary signature - this is read by dptsig - * Needed for our management apps - *============================================================================ - */ -static DEFINE_MUTEX(adpt_mutex); -static dpt_sig_S DPTI_sig = { - {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, -#ifdef __i386__ - PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, -#elif defined(__ia64__) - PROC_INTEL, PROC_IA64, -#elif defined(__sparc__) - PROC_ULTRASPARC, PROC_ULTRASPARC, -#elif defined(__alpha__) - PROC_ALPHA, PROC_ALPHA, -#else - (-1),(-1), -#endif - FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL, - ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION, - DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver" -}; - - - - -/*============================================================================ - * Globals - *============================================================================ - */ - -static DEFINE_MUTEX(adpt_configuration_lock); - -static struct i2o_sys_tbl *sys_tbl; -static dma_addr_t sys_tbl_pa; -static int sys_tbl_ind; -static int sys_tbl_len; - -static adpt_hba* hba_chain = NULL; -static int hba_count = 0; - -static struct class *adpt_sysfs_class; - -static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long); -#ifdef CONFIG_COMPAT -static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long); -#endif - -static const struct file_operations adpt_fops = { - .unlocked_ioctl = adpt_unlocked_ioctl, - .open = adpt_open, - .release = adpt_close, -#ifdef CONFIG_COMPAT - .compat_ioctl = compat_adpt_ioctl, -#endif - .llseek = noop_llseek, -}; - -/* Structures and definitions for synchronous message posting. - * See adpt_i2o_post_wait() for description - * */ -struct adpt_i2o_post_wait_data -{ - int status; - u32 id; - adpt_wait_queue_head_t *wq; - struct adpt_i2o_post_wait_data *next; -}; - -static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL; -static u32 adpt_post_wait_id = 0; -static DEFINE_SPINLOCK(adpt_post_wait_lock); - - -/*============================================================================ - * Functions - *============================================================================ - */ - -static inline int dpt_dma64(adpt_hba *pHba) -{ - return (sizeof(dma_addr_t) > 4 && (pHba)->dma64); -} - -static inline u32 dma_high(dma_addr_t addr) -{ - return upper_32_bits(addr); -} - -static inline u32 dma_low(dma_addr_t addr) -{ - return (u32)addr; -} - -static u8 adpt_read_blink_led(adpt_hba* host) -{ - if (host->FwDebugBLEDflag_P) { - if( readb(host->FwDebugBLEDflag_P) == 0xbc ){ - return readb(host->FwDebugBLEDvalue_P); - } - } - return 0; -} - -/*============================================================================ - * Scsi host template interface functions - *============================================================================ - */ - -#ifdef MODULE -static struct pci_device_id dptids[] = { - { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, - { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, - { 0, } -}; -#endif - -MODULE_DEVICE_TABLE(pci,dptids); - -static int adpt_detect(struct scsi_host_template* sht) -{ - struct pci_dev *pDev = NULL; - adpt_hba *pHba; - adpt_hba *next; - - PINFO("Detecting Adaptec I2O RAID controllers...\n"); - - /* search for all Adatpec I2O RAID cards */ - while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) { - if(pDev->device == PCI_DPT_DEVICE_ID || - pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){ - if(adpt_install_hba(sht, pDev) ){ - PERROR("Could not Init an I2O RAID device\n"); - PERROR("Will not try to detect others.\n"); - return hba_count-1; - } - pci_dev_get(pDev); - } - } - - /* In INIT state, Activate IOPs */ - for (pHba = hba_chain; pHba; pHba = next) { - next = pHba->next; - // Activate does get status , init outbound, and get hrt - if (adpt_i2o_activate_hba(pHba) < 0) { - adpt_i2o_delete_hba(pHba); - } - } - - - /* Active IOPs in HOLD state */ - -rebuild_sys_tab: - if (hba_chain == NULL) - return 0; - - /* - * If build_sys_table fails, we kill everything and bail - * as we can't init the IOPs w/o a system table - */ - if (adpt_i2o_build_sys_table() < 0) { - adpt_i2o_sys_shutdown(); - return 0; - } - - PDEBUG("HBA's in HOLD state\n"); - - /* If IOP don't get online, we need to rebuild the System table */ - for (pHba = hba_chain; pHba; pHba = pHba->next) { - if (adpt_i2o_online_hba(pHba) < 0) { - adpt_i2o_delete_hba(pHba); - goto rebuild_sys_tab; - } - } - - /* Active IOPs now in OPERATIONAL state */ - PDEBUG("HBA's in OPERATIONAL state\n"); - - printk("dpti: If you have a lot of devices this could take a few minutes.\n"); - for (pHba = hba_chain; pHba; pHba = next) { - next = pHba->next; - printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name); - if (adpt_i2o_lct_get(pHba) < 0){ - adpt_i2o_delete_hba(pHba); - continue; - } - - if (adpt_i2o_parse_lct(pHba) < 0){ - adpt_i2o_delete_hba(pHba); - continue; - } - adpt_inquiry(pHba); - } - - adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o"); - if (IS_ERR(adpt_sysfs_class)) { - printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n"); - adpt_sysfs_class = NULL; - } - - for (pHba = hba_chain; pHba; pHba = next) { - next = pHba->next; - if (adpt_scsi_host_alloc(pHba, sht) < 0){ - adpt_i2o_delete_hba(pHba); - continue; - } - pHba->initialized = TRUE; - pHba->state &= ~DPTI_STATE_RESET; - if (adpt_sysfs_class) { - struct device *dev = device_create(adpt_sysfs_class, - NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL, - "dpti%d", pHba->unit); - if (IS_ERR(dev)) { - printk(KERN_WARNING"dpti%d: unable to " - "create device in dpt_i2o class\n", - pHba->unit); - } - } - } - - // Register our control device node - // nodes will need to be created in /dev to access this - // the nodes can not be created from within the driver - if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) { - adpt_i2o_sys_shutdown(); - return 0; - } - return hba_count; -} - - -static void adpt_release(adpt_hba *pHba) -{ - struct Scsi_Host *shost = pHba->host; - - scsi_remove_host(shost); -// adpt_i2o_quiesce_hba(pHba); - adpt_i2o_delete_hba(pHba); - scsi_host_put(shost); -} - - -static void adpt_inquiry(adpt_hba* pHba) -{ - u32 msg[17]; - u32 *mptr; - u32 *lenptr; - int direction; - int scsidir; - u32 len; - u32 reqlen; - u8* buf; - dma_addr_t addr; - u8 scb[16]; - s32 rcode; - - memset(msg, 0, sizeof(msg)); - buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL); - if(!buf){ - printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name); - return; - } - memset((void*)buf, 0, 36); - - len = 36; - direction = 0x00000000; - scsidir =0x40000000; // DATA IN (iop<--dev) - - if (dpt_dma64(pHba)) - reqlen = 17; // SINGLE SGE, 64 bit - else - reqlen = 14; // SINGLE SGE, 32 bit - /* Stick the headers on */ - msg[0] = reqlen<<16 | SGL_OFFSET_12; - msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID); - msg[2] = 0; - msg[3] = 0; - // Adaptec/DPT Private stuff - msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16; - msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/; - /* Direction, disconnect ok | sense data | simple queue , CDBLen */ - // I2O_SCB_FLAG_ENABLE_DISCONNECT | - // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG | - // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE; - msg[6] = scsidir|0x20a00000| 6 /* cmd len*/; - - mptr=msg+7; - - memset(scb, 0, sizeof(scb)); - // Write SCSI command into the message - always 16 byte block - scb[0] = INQUIRY; - scb[1] = 0; - scb[2] = 0; - scb[3] = 0; - scb[4] = 36; - scb[5] = 0; - // Don't care about the rest of scb - - memcpy(mptr, scb, sizeof(scb)); - mptr+=4; - lenptr=mptr++; /* Remember me - fill in when we know */ - - /* Now fill in the SGList and command */ - *lenptr = len; - if (dpt_dma64(pHba)) { - *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */ - *mptr++ = 1 << PAGE_SHIFT; - *mptr++ = 0xD0000000|direction|len; - *mptr++ = dma_low(addr); - *mptr++ = dma_high(addr); - } else { - *mptr++ = 0xD0000000|direction|len; - *mptr++ = addr; - } - - // Send it on it's way - rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120); - if (rcode != 0) { - sprintf(pHba->detail, "Adaptec I2O RAID"); - printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode); - if (rcode != -ETIME && rcode != -EINTR) - dma_free_coherent(&pHba->pDev->dev, 80, buf, addr); - } else { - memset(pHba->detail, 0, sizeof(pHba->detail)); - memcpy(&(pHba->detail), "Vendor: Adaptec ", 16); - memcpy(&(pHba->detail[16]), " Model: ", 8); - memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16); - memcpy(&(pHba->detail[40]), " FW: ", 4); - memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4); - pHba->detail[48] = '\0'; /* precautionary */ - dma_free_coherent(&pHba->pDev->dev, 80, buf, addr); - } - adpt_i2o_status_get(pHba); - return ; -} - - -static int adpt_slave_configure(struct scsi_device * device) -{ - struct Scsi_Host *host = device->host; - adpt_hba* pHba; - - pHba = (adpt_hba *) host->hostdata[0]; - - if (host->can_queue && device->tagged_supported) { - scsi_change_queue_depth(device, - host->can_queue - 1); - } - return 0; -} - -static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *)) -{ - adpt_hba* pHba = NULL; - struct adpt_device* pDev = NULL; /* dpt per device information */ - - cmd->scsi_done = done; - /* - * SCSI REQUEST_SENSE commands will be executed automatically by the - * Host Adapter for any errors, so they should not be executed - * explicitly unless the Sense Data is zero indicating that no error - * occurred. - */ - - if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) { - cmd->result = (DID_OK << 16); - cmd->scsi_done(cmd); - return 0; - } - - pHba = (adpt_hba*)cmd->device->host->hostdata[0]; - if (!pHba) { - return FAILED; - } - - rmb(); - if ((pHba->state) & DPTI_STATE_RESET) - return SCSI_MLQUEUE_HOST_BUSY; - - // TODO if the cmd->device if offline then I may need to issue a bus rescan - // followed by a get_lct to see if the device is there anymore - if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) { - /* - * First command request for this device. Set up a pointer - * to the device structure. This should be a TEST_UNIT_READY - * command from scan_scsis_single. - */ - if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) { - // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response - // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue. - cmd->result = (DID_NO_CONNECT << 16); - cmd->scsi_done(cmd); - return 0; - } - cmd->device->hostdata = pDev; - } - pDev->pScsi_dev = cmd->device; - - /* - * If we are being called from when the device is being reset, - * delay processing of the command until later. - */ - if (pDev->state & DPTI_DEV_RESET ) { - return FAILED; - } - return adpt_scsi_to_i2o(pHba, cmd, pDev); -} - -static DEF_SCSI_QCMD(adpt_queue) - -static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev, - sector_t capacity, int geom[]) -{ - int heads=-1; - int sectors=-1; - int cylinders=-1; - - // *** First lets set the default geometry **** - - // If the capacity is less than ox2000 - if (capacity < 0x2000 ) { // floppy - heads = 18; - sectors = 2; - } - // else if between 0x2000 and 0x20000 - else if (capacity < 0x20000) { - heads = 64; - sectors = 32; - } - // else if between 0x20000 and 0x40000 - else if (capacity < 0x40000) { - heads = 65; - sectors = 63; - } - // else if between 0x4000 and 0x80000 - else if (capacity < 0x80000) { - heads = 128; - sectors = 63; - } - // else if greater than 0x80000 - else { - heads = 255; - sectors = 63; - } - cylinders = sector_div(capacity, heads * sectors); - - // Special case if CDROM - if(sdev->type == 5) { // CDROM - heads = 252; - sectors = 63; - cylinders = 1111; - } - - geom[0] = heads; - geom[1] = sectors; - geom[2] = cylinders; - - PDEBUG("adpt_bios_param: exit\n"); - return 0; -} - - -static const char *adpt_info(struct Scsi_Host *host) -{ - adpt_hba* pHba; - - pHba = (adpt_hba *) host->hostdata[0]; - return (char *) (pHba->detail); -} - -static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host) -{ - struct adpt_device* d; - int id; - int chan; - adpt_hba* pHba; - int unit; - - // Find HBA (host bus adapter) we are looking for - mutex_lock(&adpt_configuration_lock); - for (pHba = hba_chain; pHba; pHba = pHba->next) { - if (pHba->host == host) { - break; /* found adapter */ - } - } - mutex_unlock(&adpt_configuration_lock); - if (pHba == NULL) { - return 0; - } - host = pHba->host; - - seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION); - seq_printf(m, "%s\n", pHba->detail); - seq_printf(m, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n", - pHba->host->host_no, pHba->name, host->irq); - seq_printf(m, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n", - host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize); - - seq_puts(m, "Devices:\n"); - for(chan = 0; chan < MAX_CHANNEL; chan++) { - for(id = 0; id < MAX_ID; id++) { - d = pHba->channel[chan].device[id]; - while(d) { - seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor); - seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev); - - unit = d->pI2o_dev->lct_data.tid; - seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu) (%s)\n\n", - unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun, - scsi_device_online(d->pScsi_dev)? "online":"offline"); - d = d->next_lun; - } - } - } - return 0; -} - -/* - * Turn a struct scsi_cmnd * into a unique 32 bit 'context'. - */ -static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd) -{ - return (u32)cmd->serial_number; -} - -/* - * Go from a u32 'context' to a struct scsi_cmnd * . - * This could probably be made more efficient. - */ -static struct scsi_cmnd * - adpt_cmd_from_context(adpt_hba * pHba, u32 context) -{ - struct scsi_cmnd * cmd; - struct scsi_device * d; - - if (context == 0) - return NULL; - - spin_unlock(pHba->host->host_lock); - shost_for_each_device(d, pHba->host) { - unsigned long flags; - spin_lock_irqsave(&d->list_lock, flags); - list_for_each_entry(cmd, &d->cmd_list, list) { - if (((u32)cmd->serial_number == context)) { - spin_unlock_irqrestore(&d->list_lock, flags); - scsi_device_put(d); - spin_lock(pHba->host->host_lock); - return cmd; - } - } - spin_unlock_irqrestore(&d->list_lock, flags); - } - spin_lock(pHba->host->host_lock); - - return NULL; -} - -/* - * Turn a pointer to ioctl reply data into an u32 'context' - */ -static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply) -{ -#if BITS_PER_LONG == 32 - return (u32)(unsigned long)reply; -#else - ulong flags = 0; - u32 nr, i; - - spin_lock_irqsave(pHba->host->host_lock, flags); - nr = ARRAY_SIZE(pHba->ioctl_reply_context); - for (i = 0; i < nr; i++) { - if (pHba->ioctl_reply_context[i] == NULL) { - pHba->ioctl_reply_context[i] = reply; - break; - } - } - spin_unlock_irqrestore(pHba->host->host_lock, flags); - if (i >= nr) { - printk(KERN_WARNING"%s: Too many outstanding " - "ioctl commands\n", pHba->name); - return (u32)-1; - } - - return i; -#endif -} - -/* - * Go from an u32 'context' to a pointer to ioctl reply data. - */ -static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context) -{ -#if BITS_PER_LONG == 32 - return (void *)(unsigned long)context; -#else - void *p = pHba->ioctl_reply_context[context]; - pHba->ioctl_reply_context[context] = NULL; - - return p; -#endif -} - -/*=========================================================================== - * Error Handling routines - *=========================================================================== - */ - -static int adpt_abort(struct scsi_cmnd * cmd) -{ - adpt_hba* pHba = NULL; /* host bus adapter structure */ - struct adpt_device* dptdevice; /* dpt per device information */ - u32 msg[5]; - int rcode; - - if(cmd->serial_number == 0){ - return FAILED; - } - pHba = (adpt_hba*) cmd->device->host->hostdata[0]; - printk(KERN_INFO"%s: Trying to Abort\n",pHba->name); - if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) { - printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name); - return FAILED; - } - - memset(msg, 0, sizeof(msg)); - msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0; - msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid; - msg[2] = 0; - msg[3]= 0; - msg[4] = adpt_cmd_to_context(cmd); - if (pHba->host) - spin_lock_irq(pHba->host->host_lock); - rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER); - if (pHba->host) - spin_unlock_irq(pHba->host->host_lock); - if (rcode != 0) { - if(rcode == -EOPNOTSUPP ){ - printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name); - return FAILED; - } - printk(KERN_INFO"%s: Abort failed.\n",pHba->name); - return FAILED; - } - printk(KERN_INFO"%s: Abort complete.\n",pHba->name); - return SUCCESS; -} - - -#define I2O_DEVICE_RESET 0x27 -// This is the same for BLK and SCSI devices -// NOTE this is wrong in the i2o.h definitions -// This is not currently supported by our adapter but we issue it anyway -static int adpt_device_reset(struct scsi_cmnd* cmd) -{ - adpt_hba* pHba; - u32 msg[4]; - u32 rcode; - int old_state; - struct adpt_device* d = cmd->device->hostdata; - - pHba = (void*) cmd->device->host->hostdata[0]; - printk(KERN_INFO"%s: Trying to reset device\n",pHba->name); - if (!d) { - printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name); - return FAILED; - } - memset(msg, 0, sizeof(msg)); - msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0; - msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid); - msg[2] = 0; - msg[3] = 0; - - if (pHba->host) - spin_lock_irq(pHba->host->host_lock); - old_state = d->state; - d->state |= DPTI_DEV_RESET; - rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER); - d->state = old_state; - if (pHba->host) - spin_unlock_irq(pHba->host->host_lock); - if (rcode != 0) { - if(rcode == -EOPNOTSUPP ){ - printk(KERN_INFO"%s: Device reset not supported\n",pHba->name); - return FAILED; - } - printk(KERN_INFO"%s: Device reset failed\n",pHba->name); - return FAILED; - } else { - printk(KERN_INFO"%s: Device reset successful\n",pHba->name); - return SUCCESS; - } -} - - -#define I2O_HBA_BUS_RESET 0x87 -// This version of bus reset is called by the eh_error handler -static int adpt_bus_reset(struct scsi_cmnd* cmd) -{ - adpt_hba* pHba; - u32 msg[4]; - u32 rcode; - - pHba = (adpt_hba*)cmd->device->host->hostdata[0]; - memset(msg, 0, sizeof(msg)); - printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid ); - msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0; - msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid); - msg[2] = 0; - msg[3] = 0; - if (pHba->host) - spin_lock_irq(pHba->host->host_lock); - rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER); - if (pHba->host) - spin_unlock_irq(pHba->host->host_lock); - if (rcode != 0) { - printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name); - return FAILED; - } else { - printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name); - return SUCCESS; - } -} - -// This version of reset is called by the eh_error_handler -static int __adpt_reset(struct scsi_cmnd* cmd) -{ - adpt_hba* pHba; - int rcode; - char name[32]; - - pHba = (adpt_hba*)cmd->device->host->hostdata[0]; - strncpy(name, pHba->name, sizeof(name)); - printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n", name, cmd->device->channel, pHba->channel[cmd->device->channel].tid); - rcode = adpt_hba_reset(pHba); - if(rcode == 0){ - printk(KERN_WARNING"%s: HBA reset complete\n", name); - return SUCCESS; - } else { - printk(KERN_WARNING"%s: HBA reset failed (%x)\n", name, rcode); - return FAILED; - } -} - -static int adpt_reset(struct scsi_cmnd* cmd) -{ - int rc; - - spin_lock_irq(cmd->device->host->host_lock); - rc = __adpt_reset(cmd); - spin_unlock_irq(cmd->device->host->host_lock); - - return rc; -} - -// This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset -static int adpt_hba_reset(adpt_hba* pHba) -{ - int rcode; - - pHba->state |= DPTI_STATE_RESET; - - // Activate does get status , init outbound, and get hrt - if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) { - printk(KERN_ERR "%s: Could not activate\n", pHba->name); - adpt_i2o_delete_hba(pHba); - return rcode; - } - - if ((rcode=adpt_i2o_build_sys_table()) < 0) { - adpt_i2o_delete_hba(pHba); - return rcode; - } - PDEBUG("%s: in HOLD state\n",pHba->name); - - if ((rcode=adpt_i2o_online_hba(pHba)) < 0) { - adpt_i2o_delete_hba(pHba); - return rcode; - } - PDEBUG("%s: in OPERATIONAL state\n",pHba->name); - - if ((rcode=adpt_i2o_lct_get(pHba)) < 0){ - adpt_i2o_delete_hba(pHba); - return rcode; - } - - if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){ - adpt_i2o_delete_hba(pHba); - return rcode; - } - pHba->state &= ~DPTI_STATE_RESET; - - adpt_fail_posted_scbs(pHba); - return 0; /* return success */ -} - -/*=========================================================================== - * - *=========================================================================== - */ - - -static void adpt_i2o_sys_shutdown(void) -{ - adpt_hba *pHba, *pNext; - struct adpt_i2o_post_wait_data *p1, *old; - - printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n"); - printk(KERN_INFO" This could take a few minutes if there are many devices attached\n"); - /* Delete all IOPs from the controller chain */ - /* They should have already been released by the - * scsi-core - */ - for (pHba = hba_chain; pHba; pHba = pNext) { - pNext = pHba->next; - adpt_i2o_delete_hba(pHba); - } - - /* Remove any timedout entries from the wait queue. */ -// spin_lock_irqsave(&adpt_post_wait_lock, flags); - /* Nothing should be outstanding at this point so just - * free them - */ - for(p1 = adpt_post_wait_queue; p1;) { - old = p1; - p1 = p1->next; - kfree(old); - } -// spin_unlock_irqrestore(&adpt_post_wait_lock, flags); - adpt_post_wait_queue = NULL; - - printk(KERN_INFO "Adaptec I2O controllers down.\n"); -} - -static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev) -{ - - adpt_hba* pHba = NULL; - adpt_hba* p = NULL; - ulong base_addr0_phys = 0; - ulong base_addr1_phys = 0; - u32 hba_map0_area_size = 0; - u32 hba_map1_area_size = 0; - void __iomem *base_addr_virt = NULL; - void __iomem *msg_addr_virt = NULL; - int dma64 = 0; - - int raptorFlag = FALSE; - - if(pci_enable_device(pDev)) { - return -EINVAL; - } - - if (pci_request_regions(pDev, "dpt_i2o")) { - PERROR("dpti: adpt_config_hba: pci request region failed\n"); - return -EINVAL; - } - - pci_set_master(pDev); - - /* - * See if we should enable dma64 mode. - */ - if (sizeof(dma_addr_t) > 4 && - pci_set_dma_mask(pDev, DMA_BIT_MASK(64)) == 0) { - if (dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32)) - dma64 = 1; - } - if (!dma64 && pci_set_dma_mask(pDev, DMA_BIT_MASK(32)) != 0) - return -EINVAL; - - /* adapter only supports message blocks below 4GB */ - pci_set_consistent_dma_mask(pDev, DMA_BIT_MASK(32)); - - base_addr0_phys = pci_resource_start(pDev,0); - hba_map0_area_size = pci_resource_len(pDev,0); - - // Check if standard PCI card or single BAR Raptor - if(pDev->device == PCI_DPT_DEVICE_ID){ - if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){ - // Raptor card with this device id needs 4M - hba_map0_area_size = 0x400000; - } else { // Not Raptor - it is a PCI card - if(hba_map0_area_size > 0x100000 ){ - hba_map0_area_size = 0x100000; - } - } - } else {// Raptor split BAR config - // Use BAR1 in this configuration - base_addr1_phys = pci_resource_start(pDev,1); - hba_map1_area_size = pci_resource_len(pDev,1); - raptorFlag = TRUE; - } - -#if BITS_PER_LONG == 64 - /* - * The original Adaptec 64 bit driver has this comment here: - * "x86_64 machines need more optimal mappings" - * - * I assume some HBAs report ridiculously large mappings - * and we need to limit them on platforms with IOMMUs. - */ - if (raptorFlag == TRUE) { - if (hba_map0_area_size > 128) - hba_map0_area_size = 128; - if (hba_map1_area_size > 524288) - hba_map1_area_size = 524288; - } else { - if (hba_map0_area_size > 524288) - hba_map0_area_size = 524288; - } -#endif - - base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size); - if (!base_addr_virt) { - pci_release_regions(pDev); - PERROR("dpti: adpt_config_hba: io remap failed\n"); - return -EINVAL; - } - - if(raptorFlag == TRUE) { - msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size ); - if (!msg_addr_virt) { - PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n"); - iounmap(base_addr_virt); - pci_release_regions(pDev); - return -EINVAL; - } - } else { - msg_addr_virt = base_addr_virt; - } - - // Allocate and zero the data structure - pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL); - if (!pHba) { - if (msg_addr_virt != base_addr_virt) - iounmap(msg_addr_virt); - iounmap(base_addr_virt); - pci_release_regions(pDev); - return -ENOMEM; - } - - mutex_lock(&adpt_configuration_lock); - - if(hba_chain != NULL){ - for(p = hba_chain; p->next; p = p->next); - p->next = pHba; - } else { - hba_chain = pHba; - } - pHba->next = NULL; - pHba->unit = hba_count; - sprintf(pHba->name, "dpti%d", hba_count); - hba_count++; - - mutex_unlock(&adpt_configuration_lock); - - pHba->pDev = pDev; - pHba->base_addr_phys = base_addr0_phys; - - // Set up the Virtual Base Address of the I2O Device - pHba->base_addr_virt = base_addr_virt; - pHba->msg_addr_virt = msg_addr_virt; - pHba->irq_mask = base_addr_virt+0x30; - pHba->post_port = base_addr_virt+0x40; - pHba->reply_port = base_addr_virt+0x44; - - pHba->hrt = NULL; - pHba->lct = NULL; - pHba->lct_size = 0; - pHba->status_block = NULL; - pHba->post_count = 0; - pHba->state = DPTI_STATE_RESET; - pHba->pDev = pDev; - pHba->devices = NULL; - pHba->dma64 = dma64; - - // Initializing the spinlocks - spin_lock_init(&pHba->state_lock); - spin_lock_init(&adpt_post_wait_lock); - - if(raptorFlag == 0){ - printk(KERN_INFO "Adaptec I2O RAID controller" - " %d at %p size=%x irq=%d%s\n", - hba_count-1, base_addr_virt, - hba_map0_area_size, pDev->irq, - dma64 ? " (64-bit DMA)" : ""); - } else { - printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n", - hba_count-1, pDev->irq, - dma64 ? " (64-bit DMA)" : ""); - printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size); - printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size); - } - - if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) { - printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq); - adpt_i2o_delete_hba(pHba); - return -EINVAL; - } - - return 0; -} - - -static void adpt_i2o_delete_hba(adpt_hba* pHba) -{ - adpt_hba* p1; - adpt_hba* p2; - struct i2o_device* d; - struct i2o_device* next; - int i; - int j; - struct adpt_device* pDev; - struct adpt_device* pNext; - - - mutex_lock(&adpt_configuration_lock); - if(pHba->host){ - free_irq(pHba->host->irq, pHba); - } - p2 = NULL; - for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){ - if(p1 == pHba) { - if(p2) { - p2->next = p1->next; - } else { - hba_chain = p1->next; - } - break; - } - } - - hba_count--; - mutex_unlock(&adpt_configuration_lock); - - iounmap(pHba->base_addr_virt); - pci_release_regions(pHba->pDev); - if(pHba->msg_addr_virt != pHba->base_addr_virt){ - iounmap(pHba->msg_addr_virt); - } - if(pHba->FwDebugBuffer_P) - iounmap(pHba->FwDebugBuffer_P); - if(pHba->hrt) { - dma_free_coherent(&pHba->pDev->dev, - pHba->hrt->num_entries * pHba->hrt->entry_len << 2, - pHba->hrt, pHba->hrt_pa); - } - if(pHba->lct) { - dma_free_coherent(&pHba->pDev->dev, pHba->lct_size, - pHba->lct, pHba->lct_pa); - } - if(pHba->status_block) { - dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block), - pHba->status_block, pHba->status_block_pa); - } - if(pHba->reply_pool) { - dma_free_coherent(&pHba->pDev->dev, - pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4, - pHba->reply_pool, pHba->reply_pool_pa); - } - - for(d = pHba->devices; d ; d = next){ - next = d->next; - kfree(d); - } - for(i = 0 ; i < pHba->top_scsi_channel ; i++){ - for(j = 0; j < MAX_ID; j++){ - if(pHba->channel[i].device[j] != NULL){ - for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){ - pNext = pDev->next_lun; - kfree(pDev); - } - } - } - } - pci_dev_put(pHba->pDev); - if (adpt_sysfs_class) - device_destroy(adpt_sysfs_class, - MKDEV(DPTI_I2O_MAJOR, pHba->unit)); - kfree(pHba); - - if(hba_count <= 0){ - unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER); - if (adpt_sysfs_class) { - class_destroy(adpt_sysfs_class); - adpt_sysfs_class = NULL; - } - } -} - -static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun) -{ - struct adpt_device* d; - - if(chan < 0 || chan >= MAX_CHANNEL) - return NULL; - - d = pHba->channel[chan].device[id]; - if(!d || d->tid == 0) { - return NULL; - } - - /* If it is the only lun at that address then this should match*/ - if(d->scsi_lun == lun){ - return d; - } - - /* else we need to look through all the luns */ - for(d=d->next_lun ; d ; d = d->next_lun){ - if(d->scsi_lun == lun){ - return d; - } - } - return NULL; -} - - -static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout) -{ - // I used my own version of the WAIT_QUEUE_HEAD - // to handle some version differences - // When embedded in the kernel this could go back to the vanilla one - ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post); - int status = 0; - ulong flags = 0; - struct adpt_i2o_post_wait_data *p1, *p2; - struct adpt_i2o_post_wait_data *wait_data = - kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC); - DECLARE_WAITQUEUE(wait, current); - - if (!wait_data) - return -ENOMEM; - - /* - * The spin locking is needed to keep anyone from playing - * with the queue pointers and id while we do the same - */ - spin_lock_irqsave(&adpt_post_wait_lock, flags); - // TODO we need a MORE unique way of getting ids - // to support async LCT get - wait_data->next = adpt_post_wait_queue; - adpt_post_wait_queue = wait_data; - adpt_post_wait_id++; - adpt_post_wait_id &= 0x7fff; - wait_data->id = adpt_post_wait_id; - spin_unlock_irqrestore(&adpt_post_wait_lock, flags); - - wait_data->wq = &adpt_wq_i2o_post; - wait_data->status = -ETIMEDOUT; - - add_wait_queue(&adpt_wq_i2o_post, &wait); - - msg[2] |= 0x80000000 | ((u32)wait_data->id); - timeout *= HZ; - if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){ - set_current_state(TASK_INTERRUPTIBLE); - if(pHba->host) - spin_unlock_irq(pHba->host->host_lock); - if (!timeout) - schedule(); - else{ - timeout = schedule_timeout(timeout); - if (timeout == 0) { - // I/O issued, but cannot get result in - // specified time. Freeing resorces is - // dangerous. - status = -ETIME; - } - } - if(pHba->host) - spin_lock_irq(pHba->host->host_lock); - } - remove_wait_queue(&adpt_wq_i2o_post, &wait); - - if(status == -ETIMEDOUT){ - printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit); - // We will have to free the wait_data memory during shutdown - return status; - } - - /* Remove the entry from the queue. */ - p2 = NULL; - spin_lock_irqsave(&adpt_post_wait_lock, flags); - for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) { - if(p1 == wait_data) { - if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) { - status = -EOPNOTSUPP; - } - if(p2) { - p2->next = p1->next; - } else { - adpt_post_wait_queue = p1->next; - } - break; - } - } - spin_unlock_irqrestore(&adpt_post_wait_lock, flags); - - kfree(wait_data); - - return status; -} - - -static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len) -{ - - u32 m = EMPTY_QUEUE; - u32 __iomem *msg; - ulong timeout = jiffies + 30*HZ; - do { - rmb(); - m = readl(pHba->post_port); - if (m != EMPTY_QUEUE) { - break; - } - if(time_after(jiffies,timeout)){ - printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit); - return -ETIMEDOUT; - } - schedule_timeout_uninterruptible(1); - } while(m == EMPTY_QUEUE); - - msg = pHba->msg_addr_virt + m; - memcpy_toio(msg, data, len); - wmb(); - - //post message - writel(m, pHba->post_port); - wmb(); - - return 0; -} - - -static void adpt_i2o_post_wait_complete(u32 context, int status) -{ - struct adpt_i2o_post_wait_data *p1 = NULL; - /* - * We need to search through the adpt_post_wait - * queue to see if the given message is still - * outstanding. If not, it means that the IOP - * took longer to respond to the message than we - * had allowed and timer has already expired. - * Not much we can do about that except log - * it for debug purposes, increase timeout, and recompile - * - * Lock needed to keep anyone from moving queue pointers - * around while we're looking through them. - */ - - context &= 0x7fff; - - spin_lock(&adpt_post_wait_lock); - for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) { - if(p1->id == context) { - p1->status = status; - spin_unlock(&adpt_post_wait_lock); - wake_up_interruptible(p1->wq); - return; - } - } - spin_unlock(&adpt_post_wait_lock); - // If this happens we lose commands that probably really completed - printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context); - printk(KERN_DEBUG" Tasks in wait queue:\n"); - for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) { - printk(KERN_DEBUG" %d\n",p1->id); - } - return; -} - -static s32 adpt_i2o_reset_hba(adpt_hba* pHba) -{ - u32 msg[8]; - u8* status; - dma_addr_t addr; - u32 m = EMPTY_QUEUE ; - ulong timeout = jiffies + (TMOUT_IOPRESET*HZ); - - if(pHba->initialized == FALSE) { // First time reset should be quick - timeout = jiffies + (25*HZ); - } else { - adpt_i2o_quiesce_hba(pHba); - } - - do { - rmb(); - m = readl(pHba->post_port); - if (m != EMPTY_QUEUE) { - break; - } - if(time_after(jiffies,timeout)){ - printk(KERN_WARNING"Timeout waiting for message!\n"); - return -ETIMEDOUT; - } - schedule_timeout_uninterruptible(1); - } while (m == EMPTY_QUEUE); - - status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL); - if(status == NULL) { - adpt_send_nop(pHba, m); - printk(KERN_ERR"IOP reset failed - no free memory.\n"); - return -ENOMEM; - } - memset(status,0,4); - - msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0; - msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID; - msg[2]=0; - msg[3]=0; - msg[4]=0; - msg[5]=0; - msg[6]=dma_low(addr); - msg[7]=dma_high(addr); - - memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg)); - wmb(); - writel(m, pHba->post_port); - wmb(); - - while(*status == 0){ - if(time_after(jiffies,timeout)){ - printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name); - /* We lose 4 bytes of "status" here, but we cannot - free these because controller may awake and corrupt - those bytes at any time */ - /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */ - return -ETIMEDOUT; - } - rmb(); - schedule_timeout_uninterruptible(1); - } - - if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) { - PDEBUG("%s: Reset in progress...\n", pHba->name); - // Here we wait for message frame to become available - // indicated that reset has finished - do { - rmb(); - m = readl(pHba->post_port); - if (m != EMPTY_QUEUE) { - break; - } - if(time_after(jiffies,timeout)){ - printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name); - /* We lose 4 bytes of "status" here, but we - cannot free these because controller may - awake and corrupt those bytes at any time */ - /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */ - return -ETIMEDOUT; - } - schedule_timeout_uninterruptible(1); - } while (m == EMPTY_QUEUE); - // Flush the offset - adpt_send_nop(pHba, m); - } - adpt_i2o_status_get(pHba); - if(*status == 0x02 || - pHba->status_block->iop_state != ADAPTER_STATE_RESET) { - printk(KERN_WARNING"%s: Reset reject, trying to clear\n", - pHba->name); - } else { - PDEBUG("%s: Reset completed.\n", pHba->name); - } - - dma_free_coherent(&pHba->pDev->dev, 4, status, addr); -#ifdef UARTDELAY - // This delay is to allow someone attached to the card through the debug UART to - // set up the dump levels that they want before the rest of the initialization sequence - adpt_delay(20000); -#endif - return 0; -} - - -static int adpt_i2o_parse_lct(adpt_hba* pHba) -{ - int i; - int max; - int tid; - struct i2o_device *d; - i2o_lct *lct = pHba->lct; - u8 bus_no = 0; - s16 scsi_id; - u64 scsi_lun; - u32 buf[10]; // larger than 7, or 8 ... - struct adpt_device* pDev; - - if (lct == NULL) { - printk(KERN_ERR "%s: LCT is empty???\n",pHba->name); - return -1; - } - - max = lct->table_size; - max -= 3; - max /= 9; - - for(i=0;ilct_entry[i].user_tid != 0xfff){ - /* - * If we have hidden devices, we need to inform the upper layers about - * the possible maximum id reference to handle device access when - * an array is disassembled. This code has no other purpose but to - * allow us future access to devices that are currently hidden - * behind arrays, hotspares or have not been configured (JBOD mode). - */ - if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE && - lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL && - lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){ - continue; - } - tid = lct->lct_entry[i].tid; - // I2O_DPT_DEVICE_INFO_GROUP_NO; - if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) { - continue; - } - bus_no = buf[0]>>16; - scsi_id = buf[1]; - scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]); - if(bus_no >= MAX_CHANNEL) { // Something wrong skip it - printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no); - continue; - } - if (scsi_id >= MAX_ID){ - printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no); - continue; - } - if(bus_no > pHba->top_scsi_channel){ - pHba->top_scsi_channel = bus_no; - } - if(scsi_id > pHba->top_scsi_id){ - pHba->top_scsi_id = scsi_id; - } - if(scsi_lun > pHba->top_scsi_lun){ - pHba->top_scsi_lun = scsi_lun; - } - continue; - } - d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL); - if(d==NULL) - { - printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name); - return -ENOMEM; - } - - d->controller = pHba; - d->next = NULL; - - memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry)); - - d->flags = 0; - tid = d->lct_data.tid; - adpt_i2o_report_hba_unit(pHba, d); - adpt_i2o_install_device(pHba, d); - } - bus_no = 0; - for(d = pHba->devices; d ; d = d->next) { - if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT || - d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){ - tid = d->lct_data.tid; - // TODO get the bus_no from hrt-but for now they are in order - //bus_no = - if(bus_no > pHba->top_scsi_channel){ - pHba->top_scsi_channel = bus_no; - } - pHba->channel[bus_no].type = d->lct_data.class_id; - pHba->channel[bus_no].tid = tid; - if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0) - { - pHba->channel[bus_no].scsi_id = buf[1]; - PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]); - } - // TODO remove - this is just until we get from hrt - bus_no++; - if(bus_no >= MAX_CHANNEL) { // Something wrong skip it - printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no); - break; - } - } - } - - // Setup adpt_device table - for(d = pHba->devices; d ; d = d->next) { - if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE || - d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL || - d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){ - - tid = d->lct_data.tid; - scsi_id = -1; - // I2O_DPT_DEVICE_INFO_GROUP_NO; - if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) { - bus_no = buf[0]>>16; - scsi_id = buf[1]; - scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]); - if(bus_no >= MAX_CHANNEL) { // Something wrong skip it - continue; - } - if (scsi_id >= MAX_ID) { - continue; - } - if( pHba->channel[bus_no].device[scsi_id] == NULL){ - pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL); - if(pDev == NULL) { - return -ENOMEM; - } - pHba->channel[bus_no].device[scsi_id] = pDev; - } else { - for( pDev = pHba->channel[bus_no].device[scsi_id]; - pDev->next_lun; pDev = pDev->next_lun){ - } - pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL); - if(pDev->next_lun == NULL) { - return -ENOMEM; - } - pDev = pDev->next_lun; - } - pDev->tid = tid; - pDev->scsi_channel = bus_no; - pDev->scsi_id = scsi_id; - pDev->scsi_lun = scsi_lun; - pDev->pI2o_dev = d; - d->owner = pDev; - pDev->type = (buf[0])&0xff; - pDev->flags = (buf[0]>>8)&0xff; - if(scsi_id > pHba->top_scsi_id){ - pHba->top_scsi_id = scsi_id; - } - if(scsi_lun > pHba->top_scsi_lun){ - pHba->top_scsi_lun = scsi_lun; - } - } - if(scsi_id == -1){ - printk(KERN_WARNING"Could not find SCSI ID for %s\n", - d->lct_data.identity_tag); - } - } - } - return 0; -} - - -/* - * Each I2O controller has a chain of devices on it - these match - * the useful parts of the LCT of the board. - */ - -static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d) -{ - mutex_lock(&adpt_configuration_lock); - d->controller=pHba; - d->owner=NULL; - d->next=pHba->devices; - d->prev=NULL; - if (pHba->devices != NULL){ - pHba->devices->prev=d; - } - pHba->devices=d; - *d->dev_name = 0; - - mutex_unlock(&adpt_configuration_lock); - return 0; -} - -static int adpt_open(struct inode *inode, struct file *file) -{ - int minor; - adpt_hba* pHba; - - mutex_lock(&adpt_mutex); - //TODO check for root access - // - minor = iminor(inode); - if (minor >= hba_count) { - mutex_unlock(&adpt_mutex); - return -ENXIO; - } - mutex_lock(&adpt_configuration_lock); - for (pHba = hba_chain; pHba; pHba = pHba->next) { - if (pHba->unit == minor) { - break; /* found adapter */ - } - } - if (pHba == NULL) { - mutex_unlock(&adpt_configuration_lock); - mutex_unlock(&adpt_mutex); - return -ENXIO; - } - -// if(pHba->in_use){ - // mutex_unlock(&adpt_configuration_lock); -// return -EBUSY; -// } - - pHba->in_use = 1; - mutex_unlock(&adpt_configuration_lock); - mutex_unlock(&adpt_mutex); - - return 0; -} - -static int adpt_close(struct inode *inode, struct file *file) -{ - int minor; - adpt_hba* pHba; - - minor = iminor(inode); - if (minor >= hba_count) { - return -ENXIO; - } - mutex_lock(&adpt_configuration_lock); - for (pHba = hba_chain; pHba; pHba = pHba->next) { - if (pHba->unit == minor) { - break; /* found adapter */ - } - } - mutex_unlock(&adpt_configuration_lock); - if (pHba == NULL) { - return -ENXIO; - } - - pHba->in_use = 0; - - return 0; -} - - -static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg) -{ - u32 msg[MAX_MESSAGE_SIZE]; - u32* reply = NULL; - u32 size = 0; - u32 reply_size = 0; - u32 __user *user_msg = arg; - u32 __user * user_reply = NULL; - void **sg_list = NULL; - u32 sg_offset = 0; - u32 sg_count = 0; - int sg_index = 0; - u32 i = 0; - u32 rcode = 0; - void *p = NULL; - dma_addr_t addr; - ulong flags = 0; - - memset(&msg, 0, MAX_MESSAGE_SIZE*4); - // get user msg size in u32s - if(get_user(size, &user_msg[0])){ - return -EFAULT; - } - size = size>>16; - - user_reply = &user_msg[size]; - if(size > MAX_MESSAGE_SIZE){ - return -EFAULT; - } - size *= 4; // Convert to bytes - - /* Copy in the user's I2O command */ - if(copy_from_user(msg, user_msg, size)) { - return -EFAULT; - } - get_user(reply_size, &user_reply[0]); - reply_size = reply_size>>16; - if(reply_size > REPLY_FRAME_SIZE){ - reply_size = REPLY_FRAME_SIZE; - } - reply_size *= 4; - reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL); - if(reply == NULL) { - printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name); - return -ENOMEM; - } - sg_offset = (msg[0]>>4)&0xf; - msg[2] = 0x40000000; // IOCTL context - msg[3] = adpt_ioctl_to_context(pHba, reply); - if (msg[3] == (u32)-1) { - rcode = -EBUSY; - goto free; - } - - sg_list = kcalloc(pHba->sg_tablesize, sizeof(*sg_list), GFP_KERNEL); - if (!sg_list) { - rcode = -ENOMEM; - goto free; - } - if(sg_offset) { - // TODO add 64 bit API - struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset); - sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element); - if (sg_count > pHba->sg_tablesize){ - printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count); - rcode = -EINVAL; - goto free; - } - - for(i = 0; i < sg_count; i++) { - int sg_size; - - if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) { - printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count); - rcode = -EINVAL; - goto cleanup; - } - sg_size = sg[i].flag_count & 0xffffff; - /* Allocate memory for the transfer */ - p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL); - if(!p) { - printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", - pHba->name,sg_size,i,sg_count); - rcode = -ENOMEM; - goto cleanup; - } - sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame. - /* Copy in the user's SG buffer if necessary */ - if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) { - // sg_simple_element API is 32 bit - if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) { - printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i); - rcode = -EFAULT; - goto cleanup; - } - } - /* sg_simple_element API is 32 bit, but addr < 4GB */ - sg[i].addr_bus = addr; - } - } - - do { - /* - * Stop any new commands from enterring the - * controller while processing the ioctl - */ - if (pHba->host) { - scsi_block_requests(pHba->host); - spin_lock_irqsave(pHba->host->host_lock, flags); - } - rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER); - if (rcode != 0) - printk("adpt_i2o_passthru: post wait failed %d %p\n", - rcode, reply); - if (pHba->host) { - spin_unlock_irqrestore(pHba->host->host_lock, flags); - scsi_unblock_requests(pHba->host); - } - } while (rcode == -ETIMEDOUT); - - if(rcode){ - goto cleanup; - } - - if(sg_offset) { - /* Copy back the Scatter Gather buffers back to user space */ - u32 j; - // TODO add 64 bit API - struct sg_simple_element* sg; - int sg_size; - - // re-acquire the original message to handle correctly the sg copy operation - memset(&msg, 0, MAX_MESSAGE_SIZE*4); - // get user msg size in u32s - if(get_user(size, &user_msg[0])){ - rcode = -EFAULT; - goto cleanup; - } - size = size>>16; - size *= 4; - if (size > MAX_MESSAGE_SIZE) { - rcode = -EINVAL; - goto cleanup; - } - /* Copy in the user's I2O command */ - if (copy_from_user (msg, user_msg, size)) { - rcode = -EFAULT; - goto cleanup; - } - sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element); - - // TODO add 64 bit API - sg = (struct sg_simple_element*)(msg + sg_offset); - for (j = 0; j < sg_count; j++) { - /* Copy out the SG list to user's buffer if necessary */ - if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) { - sg_size = sg[j].flag_count & 0xffffff; - // sg_simple_element API is 32 bit - if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) { - printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus); - rcode = -EFAULT; - goto cleanup; - } - } - } - } - - /* Copy back the reply to user space */ - if (reply_size) { - // we wrote our own values for context - now restore the user supplied ones - if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) { - printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name); - rcode = -EFAULT; - } - if(copy_to_user(user_reply, reply, reply_size)) { - printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name); - rcode = -EFAULT; - } - } - - -cleanup: - if (rcode != -ETIME && rcode != -EINTR) { - struct sg_simple_element *sg = - (struct sg_simple_element*) (msg +sg_offset); - while(sg_index) { - if(sg_list[--sg_index]) { - dma_free_coherent(&pHba->pDev->dev, - sg[sg_index].flag_count & 0xffffff, - sg_list[sg_index], - sg[sg_index].addr_bus); - } - } - } - -free: - kfree(sg_list); - kfree(reply); - return rcode; -} - -#if defined __ia64__ -static void adpt_ia64_info(sysInfo_S* si) -{ - // This is all the info we need for now - // We will add more info as our new - // managmenent utility requires it - si->processorType = PROC_IA64; -} -#endif - -#if defined __sparc__ -static void adpt_sparc_info(sysInfo_S* si) -{ - // This is all the info we need for now - // We will add more info as our new - // managmenent utility requires it - si->processorType = PROC_ULTRASPARC; -} -#endif -#if defined __alpha__ -static void adpt_alpha_info(sysInfo_S* si) -{ - // This is all the info we need for now - // We will add more info as our new - // managmenent utility requires it - si->processorType = PROC_ALPHA; -} -#endif - -#if defined __i386__ - -#include - -static void adpt_i386_info(sysInfo_S* si) -{ - // This is all the info we need for now - // We will add more info as our new - // managmenent utility requires it - switch (boot_cpu_data.x86) { - case CPU_386: - si->processorType = PROC_386; - break; - case CPU_486: - si->processorType = PROC_486; - break; - case CPU_586: - si->processorType = PROC_PENTIUM; - break; - default: // Just in case - si->processorType = PROC_PENTIUM; - break; - } -} -#endif - -/* - * This routine returns information about the system. This does not effect - * any logic and if the info is wrong - it doesn't matter. - */ - -/* Get all the info we can not get from kernel services */ -static int adpt_system_info(void __user *buffer) -{ - sysInfo_S si; - - memset(&si, 0, sizeof(si)); - - si.osType = OS_LINUX; - si.osMajorVersion = 0; - si.osMinorVersion = 0; - si.osRevision = 0; - si.busType = SI_PCI_BUS; - si.processorFamily = DPTI_sig.dsProcessorFamily; - -#if defined __i386__ - adpt_i386_info(&si); -#elif defined (__ia64__) - adpt_ia64_info(&si); -#elif defined(__sparc__) - adpt_sparc_info(&si); -#elif defined (__alpha__) - adpt_alpha_info(&si); -#else - si.processorType = 0xff ; -#endif - if (copy_to_user(buffer, &si, sizeof(si))){ - printk(KERN_WARNING"dpti: Could not copy buffer TO user\n"); - return -EFAULT; - } - - return 0; -} - -static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg) -{ - int minor; - int error = 0; - adpt_hba* pHba; - ulong flags = 0; - void __user *argp = (void __user *)arg; - - minor = iminor(inode); - if (minor >= DPTI_MAX_HBA){ - return -ENXIO; - } - mutex_lock(&adpt_configuration_lock); - for (pHba = hba_chain; pHba; pHba = pHba->next) { - if (pHba->unit == minor) { - break; /* found adapter */ - } - } - mutex_unlock(&adpt_configuration_lock); - if(pHba == NULL){ - return -ENXIO; - } - - while((volatile u32) pHba->state & DPTI_STATE_RESET ) - schedule_timeout_uninterruptible(2); - - switch (cmd) { - // TODO: handle 3 cases - case DPT_SIGNATURE: - if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) { - return -EFAULT; - } - break; - case I2OUSRCMD: - return adpt_i2o_passthru(pHba, argp); - - case DPT_CTRLINFO:{ - drvrHBAinfo_S HbaInfo; - -#define FLG_OSD_PCI_VALID 0x0001 -#define FLG_OSD_DMA 0x0002 -#define FLG_OSD_I2O 0x0004 - memset(&HbaInfo, 0, sizeof(HbaInfo)); - HbaInfo.drvrHBAnum = pHba->unit; - HbaInfo.baseAddr = (ulong) pHba->base_addr_phys; - HbaInfo.blinkState = adpt_read_blink_led(pHba); - HbaInfo.pciBusNum = pHba->pDev->bus->number; - HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn); - HbaInfo.Interrupt = pHba->pDev->irq; - HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O; - if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){ - printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name); - return -EFAULT; - } - break; - } - case DPT_SYSINFO: - return adpt_system_info(argp); - case DPT_BLINKLED:{ - u32 value; - value = (u32)adpt_read_blink_led(pHba); - if (copy_to_user(argp, &value, sizeof(value))) { - return -EFAULT; - } - break; - } - case I2ORESETCMD: { - struct Scsi_Host *shost = pHba->host; - - if (shost) - spin_lock_irqsave(shost->host_lock, flags); - adpt_hba_reset(pHba); - if (shost) - spin_unlock_irqrestore(shost->host_lock, flags); - break; - } - case I2ORESCANCMD: - adpt_rescan(pHba); - break; - default: - return -EINVAL; - } - - return error; -} - -static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg) -{ - struct inode *inode; - long ret; - - inode = file_inode(file); - - mutex_lock(&adpt_mutex); - ret = adpt_ioctl(inode, file, cmd, arg); - mutex_unlock(&adpt_mutex); - - return ret; -} - -#ifdef CONFIG_COMPAT -static long compat_adpt_ioctl(struct file *file, - unsigned int cmd, unsigned long arg) -{ - struct inode *inode; - long ret; - - inode = file_inode(file); - - mutex_lock(&adpt_mutex); - - switch(cmd) { - case DPT_SIGNATURE: - case I2OUSRCMD: - case DPT_CTRLINFO: - case DPT_SYSINFO: - case DPT_BLINKLED: - case I2ORESETCMD: - case I2ORESCANCMD: - case (DPT_TARGET_BUSY & 0xFFFF): - case DPT_TARGET_BUSY: - ret = adpt_ioctl(inode, file, cmd, arg); - break; - default: - ret = -ENOIOCTLCMD; - } - - mutex_unlock(&adpt_mutex); - - return ret; -} -#endif - -static irqreturn_t adpt_isr(int irq, void *dev_id) -{ - struct scsi_cmnd* cmd; - adpt_hba* pHba = dev_id; - u32 m; - void __iomem *reply; - u32 status=0; - u32 context; - ulong flags = 0; - int handled = 0; - - if (pHba == NULL){ - printk(KERN_WARNING"adpt_isr: NULL dev_id\n"); - return IRQ_NONE; - } - if(pHba->host) - spin_lock_irqsave(pHba->host->host_lock, flags); - - while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) { - m = readl(pHba->reply_port); - if(m == EMPTY_QUEUE){ - // Try twice then give up - rmb(); - m = readl(pHba->reply_port); - if(m == EMPTY_QUEUE){ - // This really should not happen - printk(KERN_ERR"dpti: Could not get reply frame\n"); - goto out; - } - } - if (pHba->reply_pool_pa <= m && - m < pHba->reply_pool_pa + - (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) { - reply = (u8 *)pHba->reply_pool + - (m - pHba->reply_pool_pa); - } else { - /* Ick, we should *never* be here */ - printk(KERN_ERR "dpti: reply frame not from pool\n"); - reply = (u8 *)bus_to_virt(m); - } - - if (readl(reply) & MSG_FAIL) { - u32 old_m = readl(reply+28); - void __iomem *msg; - u32 old_context; - PDEBUG("%s: Failed message\n",pHba->name); - if(old_m >= 0x100000){ - printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m); - writel(m,pHba->reply_port); - continue; - } - // Transaction context is 0 in failed reply frame - msg = pHba->msg_addr_virt + old_m; - old_context = readl(msg+12); - writel(old_context, reply+12); - adpt_send_nop(pHba, old_m); - } - context = readl(reply+8); - if(context & 0x40000000){ // IOCTL - void *p = adpt_ioctl_from_context(pHba, readl(reply+12)); - if( p != NULL) { - memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4); - } - // All IOCTLs will also be post wait - } - if(context & 0x80000000){ // Post wait message - status = readl(reply+16); - if(status >> 24){ - status &= 0xffff; /* Get detail status */ - } else { - status = I2O_POST_WAIT_OK; - } - if(!(context & 0x40000000)) { - cmd = adpt_cmd_from_context(pHba, - readl(reply+12)); - if(cmd != NULL) { - printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context); - } - } - adpt_i2o_post_wait_complete(context, status); - } else { // SCSI message - cmd = adpt_cmd_from_context (pHba, readl(reply+12)); - if(cmd != NULL){ - scsi_dma_unmap(cmd); - if(cmd->serial_number != 0) { // If not timedout - adpt_i2o_to_scsi(reply, cmd); - } - } - } - writel(m, pHba->reply_port); - wmb(); - rmb(); - } - handled = 1; -out: if(pHba->host) - spin_unlock_irqrestore(pHba->host->host_lock, flags); - return IRQ_RETVAL(handled); -} - -static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d) -{ - int i; - u32 msg[MAX_MESSAGE_SIZE]; - u32* mptr; - u32* lptr; - u32 *lenptr; - int direction; - int scsidir; - int nseg; - u32 len; - u32 reqlen; - s32 rcode; - dma_addr_t addr; - - memset(msg, 0 , sizeof(msg)); - len = scsi_bufflen(cmd); - direction = 0x00000000; - - scsidir = 0x00000000; // DATA NO XFER - if(len) { - /* - * Set SCBFlags to indicate if data is being transferred - * in or out, or no data transfer - * Note: Do not have to verify index is less than 0 since - * cmd->cmnd[0] is an unsigned char - */ - switch(cmd->sc_data_direction){ - case DMA_FROM_DEVICE: - scsidir =0x40000000; // DATA IN (iop<--dev) - break; - case DMA_TO_DEVICE: - direction=0x04000000; // SGL OUT - scsidir =0x80000000; // DATA OUT (iop-->dev) - break; - case DMA_NONE: - break; - case DMA_BIDIRECTIONAL: - scsidir =0x40000000; // DATA IN (iop<--dev) - // Assume In - and continue; - break; - default: - printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n", - pHba->name, cmd->cmnd[0]); - cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8); - cmd->scsi_done(cmd); - return 0; - } - } - // msg[0] is set later - // I2O_CMD_SCSI_EXEC - msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid); - msg[2] = 0; - msg[3] = adpt_cmd_to_context(cmd); /* Want SCSI control block back */ - // Our cards use the transaction context as the tag for queueing - // Adaptec/DPT Private stuff - msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16); - msg[5] = d->tid; - /* Direction, disconnect ok | sense data | simple queue , CDBLen */ - // I2O_SCB_FLAG_ENABLE_DISCONNECT | - // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG | - // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE; - msg[6] = scsidir|0x20a00000|cmd->cmd_len; - - mptr=msg+7; - - // Write SCSI command into the message - always 16 byte block - memset(mptr, 0, 16); - memcpy(mptr, cmd->cmnd, cmd->cmd_len); - mptr+=4; - lenptr=mptr++; /* Remember me - fill in when we know */ - if (dpt_dma64(pHba)) { - reqlen = 16; // SINGLE SGE - *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */ - *mptr++ = 1 << PAGE_SHIFT; - } else { - reqlen = 14; // SINGLE SGE - } - /* Now fill in the SGList and command */ - - nseg = scsi_dma_map(cmd); - BUG_ON(nseg < 0); - if (nseg) { - struct scatterlist *sg; - - len = 0; - scsi_for_each_sg(cmd, sg, nseg, i) { - lptr = mptr; - *mptr++ = direction|0x10000000|sg_dma_len(sg); - len+=sg_dma_len(sg); - addr = sg_dma_address(sg); - *mptr++ = dma_low(addr); - if (dpt_dma64(pHba)) - *mptr++ = dma_high(addr); - /* Make this an end of list */ - if (i == nseg - 1) - *lptr = direction|0xD0000000|sg_dma_len(sg); - } - reqlen = mptr - msg; - *lenptr = len; - - if(cmd->underflow && len != cmd->underflow){ - printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n", - len, cmd->underflow); - } - } else { - *lenptr = len = 0; - reqlen = 12; - } - - /* Stick the headers on */ - msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0); - - // Send it on it's way - rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2); - if (rcode == 0) { - return 0; - } - return rcode; -} - - -static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht) -{ - struct Scsi_Host *host; - - host = scsi_host_alloc(sht, sizeof(adpt_hba*)); - if (host == NULL) { - printk("%s: scsi_host_alloc returned NULL\n", pHba->name); - return -1; - } - host->hostdata[0] = (unsigned long)pHba; - pHba->host = host; - - host->irq = pHba->pDev->irq; - /* no IO ports, so don't have to set host->io_port and - * host->n_io_port - */ - host->io_port = 0; - host->n_io_port = 0; - /* see comments in scsi_host.h */ - host->max_id = 16; - host->max_lun = 256; - host->max_channel = pHba->top_scsi_channel + 1; - host->cmd_per_lun = 1; - host->unique_id = (u32)sys_tbl_pa + pHba->unit; - host->sg_tablesize = pHba->sg_tablesize; - host->can_queue = pHba->post_fifo_size; - host->use_cmd_list = 1; - - return 0; -} - - -static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd) -{ - adpt_hba* pHba; - u32 hba_status; - u32 dev_status; - u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits - // I know this would look cleaner if I just read bytes - // but the model I have been using for all the rest of the - // io is in 4 byte words - so I keep that model - u16 detailed_status = readl(reply+16) &0xffff; - dev_status = (detailed_status & 0xff); - hba_status = detailed_status >> 8; - - // calculate resid for sg - scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20)); - - pHba = (adpt_hba*) cmd->device->host->hostdata[0]; - - cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false - - if(!(reply_flags & MSG_FAIL)) { - switch(detailed_status & I2O_SCSI_DSC_MASK) { - case I2O_SCSI_DSC_SUCCESS: - cmd->result = (DID_OK << 16); - // handle underflow - if (readl(reply+20) < cmd->underflow) { - cmd->result = (DID_ERROR <<16); - printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name); - } - break; - case I2O_SCSI_DSC_REQUEST_ABORTED: - cmd->result = (DID_ABORT << 16); - break; - case I2O_SCSI_DSC_PATH_INVALID: - case I2O_SCSI_DSC_DEVICE_NOT_PRESENT: - case I2O_SCSI_DSC_SELECTION_TIMEOUT: - case I2O_SCSI_DSC_COMMAND_TIMEOUT: - case I2O_SCSI_DSC_NO_ADAPTER: - case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE: - printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%llu) hba status=0x%x, dev status=0x%x, cmd=0x%x\n", - pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]); - cmd->result = (DID_TIME_OUT << 16); - break; - case I2O_SCSI_DSC_ADAPTER_BUSY: - case I2O_SCSI_DSC_BUS_BUSY: - cmd->result = (DID_BUS_BUSY << 16); - break; - case I2O_SCSI_DSC_SCSI_BUS_RESET: - case I2O_SCSI_DSC_BDR_MESSAGE_SENT: - cmd->result = (DID_RESET << 16); - break; - case I2O_SCSI_DSC_PARITY_ERROR_FAILURE: - printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name); - cmd->result = (DID_PARITY << 16); - break; - case I2O_SCSI_DSC_UNABLE_TO_ABORT: - case I2O_SCSI_DSC_COMPLETE_WITH_ERROR: - case I2O_SCSI_DSC_UNABLE_TO_TERMINATE: - case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED: - case I2O_SCSI_DSC_AUTOSENSE_FAILED: - case I2O_SCSI_DSC_DATA_OVERRUN: - case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE: - case I2O_SCSI_DSC_SEQUENCE_FAILURE: - case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR: - case I2O_SCSI_DSC_PROVIDE_FAILURE: - case I2O_SCSI_DSC_REQUEST_TERMINATED: - case I2O_SCSI_DSC_IDE_MESSAGE_SENT: - case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT: - case I2O_SCSI_DSC_MESSAGE_RECEIVED: - case I2O_SCSI_DSC_INVALID_CDB: - case I2O_SCSI_DSC_LUN_INVALID: - case I2O_SCSI_DSC_SCSI_TID_INVALID: - case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE: - case I2O_SCSI_DSC_NO_NEXUS: - case I2O_SCSI_DSC_CDB_RECEIVED: - case I2O_SCSI_DSC_LUN_ALREADY_ENABLED: - case I2O_SCSI_DSC_QUEUE_FROZEN: - case I2O_SCSI_DSC_REQUEST_INVALID: - default: - printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n", - pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, - hba_status, dev_status, cmd->cmnd[0]); - cmd->result = (DID_ERROR << 16); - break; - } - - // copy over the request sense data if it was a check - // condition status - if (dev_status == SAM_STAT_CHECK_CONDITION) { - u32 len = min(SCSI_SENSE_BUFFERSIZE, 40); - // Copy over the sense data - memcpy_fromio(cmd->sense_buffer, (reply+28) , len); - if(cmd->sense_buffer[0] == 0x70 /* class 7 */ && - cmd->sense_buffer[2] == DATA_PROTECT ){ - /* This is to handle an array failed */ - cmd->result = (DID_TIME_OUT << 16); - printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n", - pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, - hba_status, dev_status, cmd->cmnd[0]); - - } - } - } else { - /* In this condtion we could not talk to the tid - * the card rejected it. We should signal a retry - * for a limitted number of retries. - */ - cmd->result = (DID_TIME_OUT << 16); - printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%llu) tid=%d, cmd=0x%x\n", - pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, - ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]); - } - - cmd->result |= (dev_status); - - if(cmd->scsi_done != NULL){ - cmd->scsi_done(cmd); - } - return cmd->result; -} - - -static s32 adpt_rescan(adpt_hba* pHba) -{ - s32 rcode; - ulong flags = 0; - - if(pHba->host) - spin_lock_irqsave(pHba->host->host_lock, flags); - if ((rcode=adpt_i2o_lct_get(pHba)) < 0) - goto out; - if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0) - goto out; - rcode = 0; -out: if(pHba->host) - spin_unlock_irqrestore(pHba->host->host_lock, flags); - return rcode; -} - - -static s32 adpt_i2o_reparse_lct(adpt_hba* pHba) -{ - int i; - int max; - int tid; - struct i2o_device *d; - i2o_lct *lct = pHba->lct; - u8 bus_no = 0; - s16 scsi_id; - u64 scsi_lun; - u32 buf[10]; // at least 8 u32's - struct adpt_device* pDev = NULL; - struct i2o_device* pI2o_dev = NULL; - - if (lct == NULL) { - printk(KERN_ERR "%s: LCT is empty???\n",pHba->name); - return -1; - } - - max = lct->table_size; - max -= 3; - max /= 9; - - // Mark each drive as unscanned - for (d = pHba->devices; d; d = d->next) { - pDev =(struct adpt_device*) d->owner; - if(!pDev){ - continue; - } - pDev->state |= DPTI_DEV_UNSCANNED; - } - - printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max); - - for(i=0;ilct_entry[i].user_tid != 0xfff){ - continue; - } - - if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE || - lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL || - lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){ - tid = lct->lct_entry[i].tid; - if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) { - printk(KERN_ERR"%s: Could not query device\n",pHba->name); - continue; - } - bus_no = buf[0]>>16; - if (bus_no >= MAX_CHANNEL) { /* Something wrong skip it */ - printk(KERN_WARNING - "%s: Channel number %d out of range\n", - pHba->name, bus_no); - continue; - } - - scsi_id = buf[1]; - scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]); - pDev = pHba->channel[bus_no].device[scsi_id]; - /* da lun */ - while(pDev) { - if(pDev->scsi_lun == scsi_lun) { - break; - } - pDev = pDev->next_lun; - } - if(!pDev ) { // Something new add it - d = kmalloc(sizeof(struct i2o_device), - GFP_ATOMIC); - if(d==NULL) - { - printk(KERN_CRIT "Out of memory for I2O device data.\n"); - return -ENOMEM; - } - - d->controller = pHba; - d->next = NULL; - - memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry)); - - d->flags = 0; - adpt_i2o_report_hba_unit(pHba, d); - adpt_i2o_install_device(pHba, d); - - pDev = pHba->channel[bus_no].device[scsi_id]; - if( pDev == NULL){ - pDev = - kzalloc(sizeof(struct adpt_device), - GFP_ATOMIC); - if(pDev == NULL) { - return -ENOMEM; - } - pHba->channel[bus_no].device[scsi_id] = pDev; - } else { - while (pDev->next_lun) { - pDev = pDev->next_lun; - } - pDev = pDev->next_lun = - kzalloc(sizeof(struct adpt_device), - GFP_ATOMIC); - if(pDev == NULL) { - return -ENOMEM; - } - } - pDev->tid = d->lct_data.tid; - pDev->scsi_channel = bus_no; - pDev->scsi_id = scsi_id; - pDev->scsi_lun = scsi_lun; - pDev->pI2o_dev = d; - d->owner = pDev; - pDev->type = (buf[0])&0xff; - pDev->flags = (buf[0]>>8)&0xff; - // Too late, SCSI system has made up it's mind, but what the hey ... - if(scsi_id > pHba->top_scsi_id){ - pHba->top_scsi_id = scsi_id; - } - if(scsi_lun > pHba->top_scsi_lun){ - pHba->top_scsi_lun = scsi_lun; - } - continue; - } // end of new i2o device - - // We found an old device - check it - while(pDev) { - if(pDev->scsi_lun == scsi_lun) { - if(!scsi_device_online(pDev->pScsi_dev)) { - printk(KERN_WARNING"%s: Setting device (%d,%d,%llu) back online\n", - pHba->name,bus_no,scsi_id,scsi_lun); - if (pDev->pScsi_dev) { - scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING); - } - } - d = pDev->pI2o_dev; - if(d->lct_data.tid != tid) { // something changed - pDev->tid = tid; - memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry)); - if (pDev->pScsi_dev) { - pDev->pScsi_dev->changed = TRUE; - pDev->pScsi_dev->removable = TRUE; - } - } - // Found it - mark it scanned - pDev->state = DPTI_DEV_ONLINE; - break; - } - pDev = pDev->next_lun; - } - } - } - for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) { - pDev =(struct adpt_device*) pI2o_dev->owner; - if(!pDev){ - continue; - } - // Drive offline drives that previously existed but could not be found - // in the LCT table - if (pDev->state & DPTI_DEV_UNSCANNED){ - pDev->state = DPTI_DEV_OFFLINE; - printk(KERN_WARNING"%s: Device (%d,%d,%llu) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun); - if (pDev->pScsi_dev) { - scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE); - } - } - } - return 0; -} - -static void adpt_fail_posted_scbs(adpt_hba* pHba) -{ - struct scsi_cmnd* cmd = NULL; - struct scsi_device* d = NULL; - - shost_for_each_device(d, pHba->host) { - unsigned long flags; - spin_lock_irqsave(&d->list_lock, flags); - list_for_each_entry(cmd, &d->cmd_list, list) { - if(cmd->serial_number == 0){ - continue; - } - cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1); - cmd->scsi_done(cmd); - } - spin_unlock_irqrestore(&d->list_lock, flags); - } -} - - -/*============================================================================ - * Routines from i2o subsystem - *============================================================================ - */ - - - -/* - * Bring an I2O controller into HOLD state. See the spec. - */ -static int adpt_i2o_activate_hba(adpt_hba* pHba) -{ - int rcode; - - if(pHba->initialized ) { - if (adpt_i2o_status_get(pHba) < 0) { - if((rcode = adpt_i2o_reset_hba(pHba)) != 0){ - printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name); - return rcode; - } - if (adpt_i2o_status_get(pHba) < 0) { - printk(KERN_INFO "HBA not responding.\n"); - return -1; - } - } - - if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) { - printk(KERN_CRIT "%s: hardware fault\n", pHba->name); - return -1; - } - - if (pHba->status_block->iop_state == ADAPTER_STATE_READY || - pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL || - pHba->status_block->iop_state == ADAPTER_STATE_HOLD || - pHba->status_block->iop_state == ADAPTER_STATE_FAILED) { - adpt_i2o_reset_hba(pHba); - if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) { - printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name); - return -1; - } - } - } else { - if((rcode = adpt_i2o_reset_hba(pHba)) != 0){ - printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name); - return rcode; - } - - } - - if (adpt_i2o_init_outbound_q(pHba) < 0) { - return -1; - } - - /* In HOLD state */ - - if (adpt_i2o_hrt_get(pHba) < 0) { - return -1; - } - - return 0; -} - -/* - * Bring a controller online into OPERATIONAL state. - */ - -static int adpt_i2o_online_hba(adpt_hba* pHba) -{ - if (adpt_i2o_systab_send(pHba) < 0) - return -1; - /* In READY state */ - - if (adpt_i2o_enable_hba(pHba) < 0) - return -1; - - /* In OPERATIONAL state */ - return 0; -} - -static s32 adpt_send_nop(adpt_hba*pHba,u32 m) -{ - u32 __iomem *msg; - ulong timeout = jiffies + 5*HZ; - - while(m == EMPTY_QUEUE){ - rmb(); - m = readl(pHba->post_port); - if(m != EMPTY_QUEUE){ - break; - } - if(time_after(jiffies,timeout)){ - printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name); - return 2; - } - schedule_timeout_uninterruptible(1); - } - msg = (u32 __iomem *)(pHba->msg_addr_virt + m); - writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]); - writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]); - writel( 0,&msg[2]); - wmb(); - - writel(m, pHba->post_port); - wmb(); - return 0; -} - -static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba) -{ - u8 *status; - dma_addr_t addr; - u32 __iomem *msg = NULL; - int i; - ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ; - u32 m; - - do { - rmb(); - m = readl(pHba->post_port); - if (m != EMPTY_QUEUE) { - break; - } - - if(time_after(jiffies,timeout)){ - printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name); - return -ETIMEDOUT; - } - schedule_timeout_uninterruptible(1); - } while(m == EMPTY_QUEUE); - - msg=(u32 __iomem *)(pHba->msg_addr_virt+m); - - status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL); - if (!status) { - adpt_send_nop(pHba, m); - printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n", - pHba->name); - return -ENOMEM; - } - memset(status, 0, 4); - - writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]); - writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]); - writel(0, &msg[2]); - writel(0x0106, &msg[3]); /* Transaction context */ - writel(4096, &msg[4]); /* Host page frame size */ - writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */ - writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */ - writel((u32)addr, &msg[7]); - - writel(m, pHba->post_port); - wmb(); - - // Wait for the reply status to come back - do { - if (*status) { - if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) { - break; - } - } - rmb(); - if(time_after(jiffies,timeout)){ - printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name); - /* We lose 4 bytes of "status" here, but we - cannot free these because controller may - awake and corrupt those bytes at any time */ - /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */ - return -ETIMEDOUT; - } - schedule_timeout_uninterruptible(1); - } while (1); - - // If the command was successful, fill the fifo with our reply - // message packets - if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) { - dma_free_coherent(&pHba->pDev->dev, 4, status, addr); - return -2; - } - dma_free_coherent(&pHba->pDev->dev, 4, status, addr); - - if(pHba->reply_pool != NULL) { - dma_free_coherent(&pHba->pDev->dev, - pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4, - pHba->reply_pool, pHba->reply_pool_pa); - } - - pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev, - pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4, - &pHba->reply_pool_pa, GFP_KERNEL); - if (!pHba->reply_pool) { - printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name); - return -ENOMEM; - } - memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4); - - for(i = 0; i < pHba->reply_fifo_size; i++) { - writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4), - pHba->reply_port); - wmb(); - } - adpt_i2o_status_get(pHba); - return 0; -} - - -/* - * I2O System Table. Contains information about - * all the IOPs in the system. Used to inform IOPs - * about each other's existence. - * - * sys_tbl_ver is the CurrentChangeIndicator that is - * used by IOPs to track changes. - */ - - - -static s32 adpt_i2o_status_get(adpt_hba* pHba) -{ - ulong timeout; - u32 m; - u32 __iomem *msg; - u8 *status_block=NULL; - - if(pHba->status_block == NULL) { - pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev, - sizeof(i2o_status_block), - &pHba->status_block_pa, GFP_KERNEL); - if(pHba->status_block == NULL) { - printk(KERN_ERR - "dpti%d: Get Status Block failed; Out of memory. \n", - pHba->unit); - return -ENOMEM; - } - } - memset(pHba->status_block, 0, sizeof(i2o_status_block)); - status_block = (u8*)(pHba->status_block); - timeout = jiffies+TMOUT_GETSTATUS*HZ; - do { - rmb(); - m = readl(pHba->post_port); - if (m != EMPTY_QUEUE) { - break; - } - if(time_after(jiffies,timeout)){ - printk(KERN_ERR "%s: Timeout waiting for message !\n", - pHba->name); - return -ETIMEDOUT; - } - schedule_timeout_uninterruptible(1); - } while(m==EMPTY_QUEUE); - - - msg=(u32 __iomem *)(pHba->msg_addr_virt+m); - - writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]); - writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]); - writel(1, &msg[2]); - writel(0, &msg[3]); - writel(0, &msg[4]); - writel(0, &msg[5]); - writel( dma_low(pHba->status_block_pa), &msg[6]); - writel( dma_high(pHba->status_block_pa), &msg[7]); - writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes - - //post message - writel(m, pHba->post_port); - wmb(); - - while(status_block[87]!=0xff){ - if(time_after(jiffies,timeout)){ - printk(KERN_ERR"dpti%d: Get status timeout.\n", - pHba->unit); - return -ETIMEDOUT; - } - rmb(); - schedule_timeout_uninterruptible(1); - } - - // Set up our number of outbound and inbound messages - pHba->post_fifo_size = pHba->status_block->max_inbound_frames; - if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) { - pHba->post_fifo_size = MAX_TO_IOP_MESSAGES; - } - - pHba->reply_fifo_size = pHba->status_block->max_outbound_frames; - if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) { - pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES; - } - - // Calculate the Scatter Gather list size - if (dpt_dma64(pHba)) { - pHba->sg_tablesize - = ((pHba->status_block->inbound_frame_size * 4 - - 14 * sizeof(u32)) - / (sizeof(struct sg_simple_element) + sizeof(u32))); - } else { - pHba->sg_tablesize - = ((pHba->status_block->inbound_frame_size * 4 - - 12 * sizeof(u32)) - / sizeof(struct sg_simple_element)); - } - if (pHba->sg_tablesize > SG_LIST_ELEMENTS) { - pHba->sg_tablesize = SG_LIST_ELEMENTS; - } - - -#ifdef DEBUG - printk("dpti%d: State = ",pHba->unit); - switch(pHba->status_block->iop_state) { - case 0x01: - printk("INIT\n"); - break; - case 0x02: - printk("RESET\n"); - break; - case 0x04: - printk("HOLD\n"); - break; - case 0x05: - printk("READY\n"); - break; - case 0x08: - printk("OPERATIONAL\n"); - break; - case 0x10: - printk("FAILED\n"); - break; - case 0x11: - printk("FAULTED\n"); - break; - default: - printk("%x (unknown!!)\n",pHba->status_block->iop_state); - } -#endif - return 0; -} - -/* - * Get the IOP's Logical Configuration Table - */ -static int adpt_i2o_lct_get(adpt_hba* pHba) -{ - u32 msg[8]; - int ret; - u32 buf[16]; - - if ((pHba->lct_size == 0) || (pHba->lct == NULL)){ - pHba->lct_size = pHba->status_block->expected_lct_size; - } - do { - if (pHba->lct == NULL) { - pHba->lct = dma_alloc_coherent(&pHba->pDev->dev, - pHba->lct_size, &pHba->lct_pa, - GFP_ATOMIC); - if(pHba->lct == NULL) { - printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n", - pHba->name); - return -ENOMEM; - } - } - memset(pHba->lct, 0, pHba->lct_size); - - msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6; - msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID; - msg[2] = 0; - msg[3] = 0; - msg[4] = 0xFFFFFFFF; /* All devices */ - msg[5] = 0x00000000; /* Report now */ - msg[6] = 0xD0000000|pHba->lct_size; - msg[7] = (u32)pHba->lct_pa; - - if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) { - printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n", - pHba->name, ret); - printk(KERN_ERR"Adaptec: Error Reading Hardware.\n"); - return ret; - } - - if ((pHba->lct->table_size << 2) > pHba->lct_size) { - pHba->lct_size = pHba->lct->table_size << 2; - dma_free_coherent(&pHba->pDev->dev, pHba->lct_size, - pHba->lct, pHba->lct_pa); - pHba->lct = NULL; - } - } while (pHba->lct == NULL); - - PDEBUG("%s: Hardware resource table read.\n", pHba->name); - - - // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO; - if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) { - pHba->FwDebugBufferSize = buf[1]; - pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0], - pHba->FwDebugBufferSize); - if (pHba->FwDebugBuffer_P) { - pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P + - FW_DEBUG_FLAGS_OFFSET; - pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P + - FW_DEBUG_BLED_OFFSET; - pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1; - pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P + - FW_DEBUG_STR_LENGTH_OFFSET; - pHba->FwDebugBuffer_P += buf[2]; - pHba->FwDebugFlags = 0; - } - } - - return 0; -} - -static int adpt_i2o_build_sys_table(void) -{ - adpt_hba* pHba = hba_chain; - int count = 0; - - if (sys_tbl) - dma_free_coherent(&pHba->pDev->dev, sys_tbl_len, - sys_tbl, sys_tbl_pa); - - sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs - (hba_count) * sizeof(struct i2o_sys_tbl_entry); - - sys_tbl = dma_alloc_coherent(&pHba->pDev->dev, - sys_tbl_len, &sys_tbl_pa, GFP_KERNEL); - if (!sys_tbl) { - printk(KERN_WARNING "SysTab Set failed. Out of memory.\n"); - return -ENOMEM; - } - memset(sys_tbl, 0, sys_tbl_len); - - sys_tbl->num_entries = hba_count; - sys_tbl->version = I2OVERSION; - sys_tbl->change_ind = sys_tbl_ind++; - - for(pHba = hba_chain; pHba; pHba = pHba->next) { - u64 addr; - // Get updated Status Block so we have the latest information - if (adpt_i2o_status_get(pHba)) { - sys_tbl->num_entries--; - continue; // try next one - } - - sys_tbl->iops[count].org_id = pHba->status_block->org_id; - sys_tbl->iops[count].iop_id = pHba->unit + 2; - sys_tbl->iops[count].seg_num = 0; - sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version; - sys_tbl->iops[count].iop_state = pHba->status_block->iop_state; - sys_tbl->iops[count].msg_type = pHba->status_block->msg_type; - sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size; - sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ?? - sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities; - addr = pHba->base_addr_phys + 0x40; - sys_tbl->iops[count].inbound_low = dma_low(addr); - sys_tbl->iops[count].inbound_high = dma_high(addr); - - count++; - } - -#ifdef DEBUG -{ - u32 *table = (u32*)sys_tbl; - printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2)); - for(count = 0; count < (sys_tbl_len >>2); count++) { - printk(KERN_INFO "sys_tbl[%d] = %0#10x\n", - count, table[count]); - } -} -#endif - - return 0; -} - - -/* - * Dump the information block associated with a given unit (TID) - */ - -static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d) -{ - char buf[64]; - int unit = d->lct_data.tid; - - printk(KERN_INFO "TID %3.3d ", unit); - - if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0) - { - buf[16]=0; - printk(" Vendor: %-12.12s", buf); - } - if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0) - { - buf[16]=0; - printk(" Device: %-12.12s", buf); - } - if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0) - { - buf[8]=0; - printk(" Rev: %-12.12s\n", buf); - } -#ifdef DEBUG - printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id)); - printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class); - printk(KERN_INFO "\tFlags: "); - - if(d->lct_data.device_flags&(1<<0)) - printk("C"); // ConfigDialog requested - if(d->lct_data.device_flags&(1<<1)) - printk("U"); // Multi-user capable - if(!(d->lct_data.device_flags&(1<<4))) - printk("P"); // Peer service enabled! - if(!(d->lct_data.device_flags&(1<<5))) - printk("M"); // Mgmt service enabled! - printk("\n"); -#endif -} - -#ifdef DEBUG -/* - * Do i2o class name lookup - */ -static const char *adpt_i2o_get_class_name(int class) -{ - int idx = 16; - static char *i2o_class_name[] = { - "Executive", - "Device Driver Module", - "Block Device", - "Tape Device", - "LAN Interface", - "WAN Interface", - "Fibre Channel Port", - "Fibre Channel Device", - "SCSI Device", - "ATE Port", - "ATE Device", - "Floppy Controller", - "Floppy Device", - "Secondary Bus Port", - "Peer Transport Agent", - "Peer Transport", - "Unknown" - }; - - switch(class&0xFFF) { - case I2O_CLASS_EXECUTIVE: - idx = 0; break; - case I2O_CLASS_DDM: - idx = 1; break; - case I2O_CLASS_RANDOM_BLOCK_STORAGE: - idx = 2; break; - case I2O_CLASS_SEQUENTIAL_STORAGE: - idx = 3; break; - case I2O_CLASS_LAN: - idx = 4; break; - case I2O_CLASS_WAN: - idx = 5; break; - case I2O_CLASS_FIBRE_CHANNEL_PORT: - idx = 6; break; - case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL: - idx = 7; break; - case I2O_CLASS_SCSI_PERIPHERAL: - idx = 8; break; - case I2O_CLASS_ATE_PORT: - idx = 9; break; - case I2O_CLASS_ATE_PERIPHERAL: - idx = 10; break; - case I2O_CLASS_FLOPPY_CONTROLLER: - idx = 11; break; - case I2O_CLASS_FLOPPY_DEVICE: - idx = 12; break; - case I2O_CLASS_BUS_ADAPTER_PORT: - idx = 13; break; - case I2O_CLASS_PEER_TRANSPORT_AGENT: - idx = 14; break; - case I2O_CLASS_PEER_TRANSPORT: - idx = 15; break; - } - return i2o_class_name[idx]; -} -#endif - - -static s32 adpt_i2o_hrt_get(adpt_hba* pHba) -{ - u32 msg[6]; - int ret, size = sizeof(i2o_hrt); - - do { - if (pHba->hrt == NULL) { - pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev, - size, &pHba->hrt_pa, GFP_KERNEL); - if (pHba->hrt == NULL) { - printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name); - return -ENOMEM; - } - } - - msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4; - msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID; - msg[2]= 0; - msg[3]= 0; - msg[4]= (0xD0000000 | size); /* Simple transaction */ - msg[5]= (u32)pHba->hrt_pa; /* Dump it here */ - - if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) { - printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret); - return ret; - } - - if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) { - int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2; - dma_free_coherent(&pHba->pDev->dev, size, - pHba->hrt, pHba->hrt_pa); - size = newsize; - pHba->hrt = NULL; - } - } while(pHba->hrt == NULL); - return 0; -} - -/* - * Query one scalar group value or a whole scalar group. - */ -static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid, - int group, int field, void *buf, int buflen) -{ - u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field }; - u8 *opblk_va; - dma_addr_t opblk_pa; - u8 *resblk_va; - dma_addr_t resblk_pa; - - int size; - - /* 8 bytes for header */ - resblk_va = dma_alloc_coherent(&pHba->pDev->dev, - sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL); - if (resblk_va == NULL) { - printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name); - return -ENOMEM; - } - - opblk_va = dma_alloc_coherent(&pHba->pDev->dev, - sizeof(opblk), &opblk_pa, GFP_KERNEL); - if (opblk_va == NULL) { - dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen), - resblk_va, resblk_pa); - printk(KERN_CRIT "%s: query operation failed; Out of memory.\n", - pHba->name); - return -ENOMEM; - } - if (field == -1) /* whole group */ - opblk[4] = -1; - - memcpy(opblk_va, opblk, sizeof(opblk)); - size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid, - opblk_va, opblk_pa, sizeof(opblk), - resblk_va, resblk_pa, sizeof(u8)*(8+buflen)); - dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa); - if (size == -ETIME) { - dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen), - resblk_va, resblk_pa); - printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name); - return -ETIME; - } else if (size == -EINTR) { - dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen), - resblk_va, resblk_pa); - printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name); - return -EINTR; - } - - memcpy(buf, resblk_va+8, buflen); /* cut off header */ - - dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen), - resblk_va, resblk_pa); - if (size < 0) - return size; - - return buflen; -} - - -/* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET - * - * This function can be used for all UtilParamsGet/Set operations. - * The OperationBlock is given in opblk-buffer, - * and results are returned in resblk-buffer. - * Note that the minimum sized resblk is 8 bytes and contains - * ResultCount, ErrorInfoSize, BlockStatus and BlockSize. - */ -static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid, - void *opblk_va, dma_addr_t opblk_pa, int oplen, - void *resblk_va, dma_addr_t resblk_pa, int reslen) -{ - u32 msg[9]; - u32 *res = (u32 *)resblk_va; - int wait_status; - - msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5; - msg[1] = cmd << 24 | HOST_TID << 12 | tid; - msg[2] = 0; - msg[3] = 0; - msg[4] = 0; - msg[5] = 0x54000000 | oplen; /* OperationBlock */ - msg[6] = (u32)opblk_pa; - msg[7] = 0xD0000000 | reslen; /* ResultBlock */ - msg[8] = (u32)resblk_pa; - - if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) { - printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va); - return wait_status; /* -DetailedStatus */ - } - - if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */ - printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, " - "BlockStatus = 0x%02x, BlockSize = 0x%04x\n", - pHba->name, - (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET" - : "PARAMS_GET", - res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF); - return -((res[1] >> 16) & 0xFF); /* -BlockStatus */ - } - - return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */ -} - - -static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba) -{ - u32 msg[4]; - int ret; - - adpt_i2o_status_get(pHba); - - /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */ - - if((pHba->status_block->iop_state != ADAPTER_STATE_READY) && - (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){ - return 0; - } - - msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0; - msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID; - msg[2] = 0; - msg[3] = 0; - - if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) { - printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n", - pHba->unit, -ret); - } else { - printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit); - } - - adpt_i2o_status_get(pHba); - return ret; -} - - -/* - * Enable IOP. Allows the IOP to resume external operations. - */ -static int adpt_i2o_enable_hba(adpt_hba* pHba) -{ - u32 msg[4]; - int ret; - - adpt_i2o_status_get(pHba); - if(!pHba->status_block){ - return -ENOMEM; - } - /* Enable only allowed on READY state */ - if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL) - return 0; - - if(pHba->status_block->iop_state != ADAPTER_STATE_READY) - return -EINVAL; - - msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0; - msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID; - msg[2]= 0; - msg[3]= 0; - - if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) { - printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n", - pHba->name, ret); - } else { - PDEBUG("%s: Enabled.\n", pHba->name); - } - - adpt_i2o_status_get(pHba); - return ret; -} - - -static int adpt_i2o_systab_send(adpt_hba* pHba) -{ - u32 msg[12]; - int ret; - - msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6; - msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID; - msg[2] = 0; - msg[3] = 0; - msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */ - msg[5] = 0; /* Segment 0 */ - - /* - * Provide three SGL-elements: - * System table (SysTab), Private memory space declaration and - * Private i/o space declaration - */ - msg[6] = 0x54000000 | sys_tbl_len; - msg[7] = (u32)sys_tbl_pa; - msg[8] = 0x54000000 | 0; - msg[9] = 0; - msg[10] = 0xD4000000 | 0; - msg[11] = 0; - - if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) { - printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n", - pHba->name, ret); - } -#ifdef DEBUG - else { - PINFO("%s: SysTab set.\n", pHba->name); - } -#endif - - return ret; -} - - -/*============================================================================ - * - *============================================================================ - */ - - -#ifdef UARTDELAY - -static static void adpt_delay(int millisec) -{ - int i; - for (i = 0; i < millisec; i++) { - udelay(1000); /* delay for one millisecond */ - } -} - -#endif - -static struct scsi_host_template driver_template = { - .module = THIS_MODULE, - .name = "dpt_i2o", - .proc_name = "dpt_i2o", - .show_info = adpt_show_info, - .info = adpt_info, - .queuecommand = adpt_queue, - .eh_abort_handler = adpt_abort, - .eh_device_reset_handler = adpt_device_reset, - .eh_bus_reset_handler = adpt_bus_reset, - .eh_host_reset_handler = adpt_reset, - .bios_param = adpt_bios_param, - .slave_configure = adpt_slave_configure, - .can_queue = MAX_TO_IOP_MESSAGES, - .this_id = 7, - .use_clustering = ENABLE_CLUSTERING, -}; - -static int __init adpt_init(void) -{ - int error; - adpt_hba *pHba, *next; - - printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n"); - - error = adpt_detect(&driver_template); - if (error < 0) - return error; - if (hba_chain == NULL) - return -ENODEV; - - for (pHba = hba_chain; pHba; pHba = pHba->next) { - error = scsi_add_host(pHba->host, &pHba->pDev->dev); - if (error) - goto fail; - scsi_scan_host(pHba->host); - } - return 0; -fail: - for (pHba = hba_chain; pHba; pHba = next) { - next = pHba->next; - scsi_remove_host(pHba->host); - } - return error; -} - -static void __exit adpt_exit(void) -{ - adpt_hba *pHba, *next; - - for (pHba = hba_chain; pHba; pHba = next) { - next = pHba->next; - adpt_release(pHba); - } -} - -module_init(adpt_init); -module_exit(adpt_exit); - -MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/dpti.h b/drivers/scsi/dpti.h deleted file mode 100644 index dfc8d2eaa09e4b2ac3f178aeb634bc91884856af..0000000000000000000000000000000000000000 --- a/drivers/scsi/dpti.h +++ /dev/null @@ -1,335 +0,0 @@ -/*************************************************************************** - dpti.h - description - ------------------- - begin : Thu Sep 7 2000 - copyright : (C) 2001 by Adaptec - - See Documentation/scsi/dpti.txt for history, notes, license info - and credits - ***************************************************************************/ - -/*************************************************************************** - * * - * This program is free software; you can redistribute it and/or modify * - * it under the terms of the GNU General Public License as published by * - * the Free Software Foundation; either version 2 of the License, or * - * (at your option) any later version. * - * * - ***************************************************************************/ - -#ifndef _DPT_H -#define _DPT_H - -#define MAX_TO_IOP_MESSAGES (255) -#define MAX_FROM_IOP_MESSAGES (255) - - -/* - * SCSI interface function Prototypes - */ - -static int adpt_detect(struct scsi_host_template * sht); -static int adpt_queue(struct Scsi_Host *h, struct scsi_cmnd * cmd); -static int adpt_abort(struct scsi_cmnd * cmd); -static int adpt_reset(struct scsi_cmnd* cmd); -static int adpt_slave_configure(struct scsi_device *); - -static const char *adpt_info(struct Scsi_Host *pSHost); -static int adpt_bios_param(struct scsi_device * sdev, struct block_device *dev, - sector_t, int geom[]); - -static int adpt_bus_reset(struct scsi_cmnd* cmd); -static int adpt_device_reset(struct scsi_cmnd* cmd); - - -/* - * struct scsi_host_template (see scsi/scsi_host.h) - */ - -#define DPT_DRIVER_NAME "Adaptec I2O RAID" - -#ifndef HOSTS_C - -#include "dpt/sys_info.h" -#include -#include "dpt/dpti_i2o.h" -#include "dpt/dpti_ioctl.h" - -#define DPT_I2O_VERSION "2.4 Build 5go" -#define DPT_VERSION 2 -#define DPT_REVISION '4' -#define DPT_SUBREVISION '5' -#define DPT_BETA "" -#define DPT_MONTH 8 -#define DPT_DAY 7 -#define DPT_YEAR (2001-1980) - -#define DPT_DRIVER "dpt_i2o" -#define DPTI_I2O_MAJOR (151) -#define DPT_ORGANIZATION_ID (0x1B) /* For Private Messages */ -#define DPTI_MAX_HBA (16) -#define MAX_CHANNEL (5) // Maximum Channel # Supported -#define MAX_ID (128) // Maximum Target ID Supported - -/* Sizes in 4 byte words */ -#define REPLY_FRAME_SIZE (17) -#define MAX_MESSAGE_SIZE (128) -#define SG_LIST_ELEMENTS (56) - -#define EMPTY_QUEUE 0xffffffff -#define I2O_INTERRUPT_PENDING_B (0x08) - -#define PCI_DPT_VENDOR_ID (0x1044) // DPT PCI Vendor ID -#define PCI_DPT_DEVICE_ID (0xA501) // DPT PCI I2O Device ID -#define PCI_DPT_RAPTOR_DEVICE_ID (0xA511) - -/* Debugging macro from Linux Device Drivers - Rubini */ -#undef PDEBUG -#ifdef DEBUG -//TODO add debug level switch -# define PDEBUG(fmt, args...) printk(KERN_DEBUG "dpti: " fmt, ##args) -# define PDEBUGV(fmt, args...) printk(KERN_DEBUG "dpti: " fmt, ##args) -#else -# define PDEBUG(fmt, args...) /* not debugging: nothing */ -# define PDEBUGV(fmt, args...) /* not debugging: nothing */ -#endif - -#define PERROR(fmt, args...) printk(KERN_ERR fmt, ##args) -#define PWARN(fmt, args...) printk(KERN_WARNING fmt, ##args) -#define PINFO(fmt, args...) printk(KERN_INFO fmt, ##args) -#define PCRIT(fmt, args...) printk(KERN_CRIT fmt, ##args) - -#define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGTERM)) - -// Command timeouts -#define FOREVER (0) -#define TMOUT_INQUIRY (20) -#define TMOUT_FLUSH (360/45) -#define TMOUT_ABORT (30) -#define TMOUT_SCSI (300) -#define TMOUT_IOPRESET (360) -#define TMOUT_GETSTATUS (15) -#define TMOUT_INITOUTBOUND (15) -#define TMOUT_LCT (360) - - -#define I2O_SCSI_DEVICE_DSC_MASK 0x00FF - -#define I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION 0x000A - -#define I2O_SCSI_DSC_MASK 0xFF00 -#define I2O_SCSI_DSC_SUCCESS 0x0000 -#define I2O_SCSI_DSC_REQUEST_ABORTED 0x0200 -#define I2O_SCSI_DSC_UNABLE_TO_ABORT 0x0300 -#define I2O_SCSI_DSC_COMPLETE_WITH_ERROR 0x0400 -#define I2O_SCSI_DSC_ADAPTER_BUSY 0x0500 -#define I2O_SCSI_DSC_REQUEST_INVALID 0x0600 -#define I2O_SCSI_DSC_PATH_INVALID 0x0700 -#define I2O_SCSI_DSC_DEVICE_NOT_PRESENT 0x0800 -#define I2O_SCSI_DSC_UNABLE_TO_TERMINATE 0x0900 -#define I2O_SCSI_DSC_SELECTION_TIMEOUT 0x0A00 -#define I2O_SCSI_DSC_COMMAND_TIMEOUT 0x0B00 -#define I2O_SCSI_DSC_MR_MESSAGE_RECEIVED 0x0D00 -#define I2O_SCSI_DSC_SCSI_BUS_RESET 0x0E00 -#define I2O_SCSI_DSC_PARITY_ERROR_FAILURE 0x0F00 -#define I2O_SCSI_DSC_AUTOSENSE_FAILED 0x1000 -#define I2O_SCSI_DSC_NO_ADAPTER 0x1100 -#define I2O_SCSI_DSC_DATA_OVERRUN 0x1200 -#define I2O_SCSI_DSC_UNEXPECTED_BUS_FREE 0x1300 -#define I2O_SCSI_DSC_SEQUENCE_FAILURE 0x1400 -#define I2O_SCSI_DSC_REQUEST_LENGTH_ERROR 0x1500 -#define I2O_SCSI_DSC_PROVIDE_FAILURE 0x1600 -#define I2O_SCSI_DSC_BDR_MESSAGE_SENT 0x1700 -#define I2O_SCSI_DSC_REQUEST_TERMINATED 0x1800 -#define I2O_SCSI_DSC_IDE_MESSAGE_SENT 0x3300 -#define I2O_SCSI_DSC_RESOURCE_UNAVAILABLE 0x3400 -#define I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT 0x3500 -#define I2O_SCSI_DSC_MESSAGE_RECEIVED 0x3600 -#define I2O_SCSI_DSC_INVALID_CDB 0x3700 -#define I2O_SCSI_DSC_LUN_INVALID 0x3800 -#define I2O_SCSI_DSC_SCSI_TID_INVALID 0x3900 -#define I2O_SCSI_DSC_FUNCTION_UNAVAILABLE 0x3A00 -#define I2O_SCSI_DSC_NO_NEXUS 0x3B00 -#define I2O_SCSI_DSC_SCSI_IID_INVALID 0x3C00 -#define I2O_SCSI_DSC_CDB_RECEIVED 0x3D00 -#define I2O_SCSI_DSC_LUN_ALREADY_ENABLED 0x3E00 -#define I2O_SCSI_DSC_BUS_BUSY 0x3F00 -#define I2O_SCSI_DSC_QUEUE_FROZEN 0x4000 - - -#ifndef TRUE -#define TRUE 1 -#define FALSE 0 -#endif - -#define HBA_FLAGS_INSTALLED_B 0x00000001 // Adapter Was Installed -#define HBA_FLAGS_BLINKLED_B 0x00000002 // Adapter In Blink LED State -#define HBA_FLAGS_IN_RESET 0x00000040 /* in reset */ -#define HBA_HOSTRESET_FAILED 0x00000080 /* adpt_resethost failed */ - - -// Device state flags -#define DPTI_DEV_ONLINE 0x00 -#define DPTI_DEV_UNSCANNED 0x01 -#define DPTI_DEV_RESET 0x02 -#define DPTI_DEV_OFFLINE 0x04 - - -struct adpt_device { - struct adpt_device* next_lun; - u32 flags; - u32 type; - u32 capacity; - u32 block_size; - u8 scsi_channel; - u8 scsi_id; - u64 scsi_lun; - u8 state; - u16 tid; - struct i2o_device* pI2o_dev; - struct scsi_device *pScsi_dev; -}; - -struct adpt_channel { - struct adpt_device* device[MAX_ID]; /* used as an array of 128 scsi ids */ - u8 scsi_id; - u8 type; - u16 tid; - u32 state; - struct i2o_device* pI2o_dev; -}; - -// HBA state flags -#define DPTI_STATE_RESET (0x01) - -typedef struct _adpt_hba { - struct _adpt_hba *next; - struct pci_dev *pDev; - struct Scsi_Host *host; - u32 state; - spinlock_t state_lock; - int unit; - int host_no; /* SCSI host number */ - u8 initialized; - u8 in_use; /* is the management node open*/ - - char name[32]; - char detail[55]; - - void __iomem *base_addr_virt; - void __iomem *msg_addr_virt; - ulong base_addr_phys; - void __iomem *post_port; - void __iomem *reply_port; - void __iomem *irq_mask; - u16 post_count; - u32 post_fifo_size; - u32 reply_fifo_size; - u32* reply_pool; - dma_addr_t reply_pool_pa; - u32 sg_tablesize; // Scatter/Gather List Size. - u8 top_scsi_channel; - u8 top_scsi_id; - u64 top_scsi_lun; - u8 dma64; - - i2o_status_block* status_block; - dma_addr_t status_block_pa; - i2o_hrt* hrt; - dma_addr_t hrt_pa; - i2o_lct* lct; - dma_addr_t lct_pa; - uint lct_size; - struct i2o_device* devices; - struct adpt_channel channel[MAX_CHANNEL]; - struct proc_dir_entry* proc_entry; /* /proc dir */ - - void __iomem *FwDebugBuffer_P; // Virtual Address Of FW Debug Buffer - u32 FwDebugBufferSize; // FW Debug Buffer Size In Bytes - void __iomem *FwDebugStrLength_P;// Virtual Addr Of FW Debug String Len - void __iomem *FwDebugFlags_P; // Virtual Address Of FW Debug Flags - void __iomem *FwDebugBLEDflag_P;// Virtual Addr Of FW Debug BLED - void __iomem *FwDebugBLEDvalue_P;// Virtual Addr Of FW Debug BLED - u32 FwDebugFlags; - u32 *ioctl_reply_context[4]; -} adpt_hba; - -struct sg_simple_element { - u32 flag_count; - u32 addr_bus; -}; - -/* - * Function Prototypes - */ - -static void adpt_i2o_sys_shutdown(void); -static int adpt_init(void); -static int adpt_i2o_build_sys_table(void); -static irqreturn_t adpt_isr(int irq, void *dev_id); - -static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d); -static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid, - int group, int field, void *buf, int buflen); -#ifdef DEBUG -static const char *adpt_i2o_get_class_name(int class); -#endif -static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid, - void *opblk, dma_addr_t opblk_pa, int oplen, - void *resblk, dma_addr_t resblk_pa, int reslen); -static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout); -static int adpt_i2o_lct_get(adpt_hba* pHba); -static int adpt_i2o_parse_lct(adpt_hba* pHba); -static int adpt_i2o_activate_hba(adpt_hba* pHba); -static int adpt_i2o_enable_hba(adpt_hba* pHba); -static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d); -static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len); -static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba); -static s32 adpt_i2o_status_get(adpt_hba* pHba); -static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba); -static s32 adpt_i2o_hrt_get(adpt_hba* pHba); -static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* dptdevice); -static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd); -static s32 adpt_scsi_host_alloc(adpt_hba* pHba,struct scsi_host_template * sht); -static s32 adpt_hba_reset(adpt_hba* pHba); -static s32 adpt_i2o_reset_hba(adpt_hba* pHba); -static s32 adpt_rescan(adpt_hba* pHba); -static s32 adpt_i2o_reparse_lct(adpt_hba* pHba); -static s32 adpt_send_nop(adpt_hba*pHba,u32 m); -static void adpt_i2o_delete_hba(adpt_hba* pHba); -static void adpt_inquiry(adpt_hba* pHba); -static void adpt_fail_posted_scbs(adpt_hba* pHba); -static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun); -static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev) ; -static int adpt_i2o_online_hba(adpt_hba* pHba); -static void adpt_i2o_post_wait_complete(u32, int); -static int adpt_i2o_systab_send(adpt_hba* pHba); - -static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg); -static int adpt_open(struct inode *inode, struct file *file); -static int adpt_close(struct inode *inode, struct file *file); - - -#ifdef UARTDELAY -static void adpt_delay(int millisec); -#endif - -#define PRINT_BUFFER_SIZE 512 - -#define HBA_FLAGS_DBG_FLAGS_MASK 0xffff0000 // Mask for debug flags -#define HBA_FLAGS_DBG_KERNEL_PRINT_B 0x00010000 // Kernel Debugger Print -#define HBA_FLAGS_DBG_FW_PRINT_B 0x00020000 // Firmware Debugger Print -#define HBA_FLAGS_DBG_FUNCTION_ENTRY_B 0x00040000 // Function Entry Point -#define HBA_FLAGS_DBG_FUNCTION_EXIT_B 0x00080000 // Function Exit -#define HBA_FLAGS_DBG_ERROR_B 0x00100000 // Error Conditions -#define HBA_FLAGS_DBG_INIT_B 0x00200000 // Init Prints -#define HBA_FLAGS_DBG_OS_COMMANDS_B 0x00400000 // OS Command Info -#define HBA_FLAGS_DBG_SCAN_B 0x00800000 // Device Scan - -#define FW_DEBUG_STR_LENGTH_OFFSET 0 -#define FW_DEBUG_FLAGS_OFFSET 4 -#define FW_DEBUG_BLED_OFFSET 8 - -#define FW_DEBUG_FLAGS_NO_HEADERS_B 0x01 -#endif /* !HOSTS_C */ -#endif /* _DPT_H */ diff --git a/drivers/scsi/esas2r/esas2r.h b/drivers/scsi/esas2r/esas2r.h index 858c3b33db78bc37fb929473d61506288b08afff..7f43b95f4e945f04f168179f41210617c5ad52fc 100644 --- a/drivers/scsi/esas2r/esas2r.h +++ b/drivers/scsi/esas2r/esas2r.h @@ -965,8 +965,8 @@ struct esas2r_adapter { const char *esas2r_info(struct Scsi_Host *); int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq, struct esas2r_sas_nvram *data); -int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg); -int esas2r_ioctl(struct scsi_device *dev, int cmd, void __user *arg); +int esas2r_ioctl_handler(void *hostdata, unsigned int cmd, void __user *arg); +int esas2r_ioctl(struct scsi_device *dev, unsigned int cmd, void __user *arg); u8 handle_hba_ioctl(struct esas2r_adapter *a, struct atto_ioctl *ioctl_hba); int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd); diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c index 34bcc8c04ff4d823259ed8abb2a403d7bcb0405b..3d130523c2889a7208054f53535737d36aa14f5d 100644 --- a/drivers/scsi/esas2r/esas2r_ioctl.c +++ b/drivers/scsi/esas2r/esas2r_ioctl.c @@ -1274,7 +1274,7 @@ int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq, /* This function only cares about ATTO-specific ioctls (atto_express_ioctl) */ -int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg) +int esas2r_ioctl_handler(void *hostdata, unsigned int cmd, void __user *arg) { struct atto_express_ioctl *ioctl = NULL; struct esas2r_adapter *a; @@ -1292,9 +1292,8 @@ int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg) ioctl = memdup_user(arg, sizeof(struct atto_express_ioctl)); if (IS_ERR(ioctl)) { esas2r_log(ESAS2R_LOG_WARN, - "ioctl_handler access_ok failed for cmd %d, " - "address %p", cmd, - arg); + "ioctl_handler access_ok failed for cmd %u, address %p", + cmd, arg); return PTR_ERR(ioctl); } @@ -1493,7 +1492,7 @@ int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg) ioctl_done: if (err < 0) { - esas2r_log(ESAS2R_LOG_WARN, "err %d on ioctl cmd %d", err, + esas2r_log(ESAS2R_LOG_WARN, "err %d on ioctl cmd %u", err, cmd); switch (err) { @@ -1518,9 +1517,8 @@ int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg) err = __copy_to_user(arg, ioctl, sizeof(struct atto_express_ioctl)); if (err != 0) { esas2r_log(ESAS2R_LOG_WARN, - "ioctl_handler copy_to_user didn't copy " - "everything (err %d, cmd %d)", err, - cmd); + "ioctl_handler copy_to_user didn't copy everything (err %d, cmd %u)", + err, cmd); kfree(ioctl); return -EFAULT; @@ -1531,7 +1529,7 @@ int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg) return 0; } -int esas2r_ioctl(struct scsi_device *sd, int cmd, void __user *arg) +int esas2r_ioctl(struct scsi_device *sd, unsigned int cmd, void __user *arg) { return esas2r_ioctl_handler(sd->host->hostdata, cmd, arg); } diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c index c07118617d89647ecb777ab4e61bca1811fd3531..25dc8d44ed8e6d44d5dfe984d50b44a8770f52fd 100644 --- a/drivers/scsi/esas2r/esas2r_main.c +++ b/drivers/scsi/esas2r/esas2r_main.c @@ -624,7 +624,7 @@ static int esas2r_proc_major; long esas2r_proc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) { return esas2r_ioctl_handler(esas2r_proc_host->hostdata, - (int)cmd, (void __user *)arg); + cmd, (void __user *)arg); } static void __exit esas2r_exit(void) diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c index c3fc34b9964df75a610dd70a74bd1ca23ee147b8..9e5d3f7d29ae7ec47c8a70bc984de1bea6c92adb 100644 --- a/drivers/scsi/esp_scsi.c +++ b/drivers/scsi/esp_scsi.c @@ -1338,6 +1338,7 @@ static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent, bytes_sent = esp->data_dma_len; bytes_sent -= ecount; + bytes_sent -= esp->send_cmd_residual; /* * The am53c974 has a DMA 'pecularity'. The doc states: diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h index 8163dca2071bf659da9073576c3824edd9d9c7bb..a77772777a3092793891df99f4b9d33cccb7221c 100644 --- a/drivers/scsi/esp_scsi.h +++ b/drivers/scsi/esp_scsi.h @@ -540,6 +540,8 @@ struct esp { void *dma; int dmarev; + + u32 send_cmd_residual; }; /* A front-end driver for the ESP chip should do the following in diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index f46b312d04bc9c27c9d404c373222a8f3ff51cd5..6768b2e8148a2db0100ff92d0b4067dc58be770a 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -390,7 +390,7 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe, * Returns: pointer to a struct fcoe_interface or NULL on error */ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev, - enum fip_state fip_mode) + enum fip_mode fip_mode) { struct fcoe_ctlr_device *ctlr_dev; struct fcoe_ctlr *ctlr; diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 54da3166da8d71e207620186d064e1ad6e674886..658c0726581f930b6153a5b6edbd732864af480c 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -147,7 +147,7 @@ static void fcoe_ctlr_map_dest(struct fcoe_ctlr *fip) * fcoe_ctlr_init() - Initialize the FCoE Controller instance * @fip: The FCoE controller to initialize */ -void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode) +void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_mode mode) { fcoe_ctlr_set_state(fip, FIP_ST_LINK_WAIT); fip->mode = mode; @@ -267,9 +267,9 @@ static void fcoe_sysfs_fcf_del(struct fcoe_fcf *new) WARN_ON(!fcf_dev); new->fcf_dev = NULL; fcoe_fcf_device_delete(fcf_dev); - kfree(new); mutex_unlock(&cdev->lock); } + kfree(new); } /** @@ -454,7 +454,10 @@ void fcoe_ctlr_link_up(struct fcoe_ctlr *fip) mutex_unlock(&fip->ctlr_mutex); fc_linkup(fip->lp); } else if (fip->state == FIP_ST_LINK_WAIT) { - fcoe_ctlr_set_state(fip, fip->mode); + if (fip->mode == FIP_MODE_NON_FIP) + fcoe_ctlr_set_state(fip, FIP_ST_NON_FIP); + else + fcoe_ctlr_set_state(fip, FIP_ST_AUTO); switch (fip->mode) { default: LIBFCOE_FIP_DBG(fip, "invalid mode %d\n", fip->mode); @@ -2014,7 +2017,7 @@ EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac); */ static inline struct fcoe_rport *fcoe_ctlr_rport(struct fc_rport_priv *rdata) { - return (struct fcoe_rport *)(rdata + 1); + return container_of(rdata, struct fcoe_rport, rdata); } /** @@ -2278,7 +2281,7 @@ static void fcoe_ctlr_vn_start(struct fcoe_ctlr *fip) */ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip, struct sk_buff *skb, - struct fc_rport_priv *rdata) + struct fcoe_rport *frport) { struct fip_header *fiph; struct fip_desc *desc = NULL; @@ -2286,16 +2289,12 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip, struct fip_wwn_desc *wwn = NULL; struct fip_vn_desc *vn = NULL; struct fip_size_desc *size = NULL; - struct fcoe_rport *frport; size_t rlen; size_t dlen; u32 desc_mask = 0; u32 dtype; u8 sub; - memset(rdata, 0, sizeof(*rdata) + sizeof(*frport)); - frport = fcoe_ctlr_rport(rdata); - fiph = (struct fip_header *)skb->data; frport->flags = ntohs(fiph->fip_flags); @@ -2358,15 +2357,17 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip, if (dlen != sizeof(struct fip_wwn_desc)) goto len_err; wwn = (struct fip_wwn_desc *)desc; - rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn); + frport->rdata.ids.node_name = + get_unaligned_be64(&wwn->fd_wwn); break; case FIP_DT_VN_ID: if (dlen != sizeof(struct fip_vn_desc)) goto len_err; vn = (struct fip_vn_desc *)desc; memcpy(frport->vn_mac, vn->fd_mac, ETH_ALEN); - rdata->ids.port_id = ntoh24(vn->fd_fc_id); - rdata->ids.port_name = get_unaligned_be64(&vn->fd_wwpn); + frport->rdata.ids.port_id = ntoh24(vn->fd_fc_id); + frport->rdata.ids.port_name = + get_unaligned_be64(&vn->fd_wwpn); break; case FIP_DT_FC4F: if (dlen != sizeof(struct fip_fc4_feat)) @@ -2747,10 +2748,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) { struct fip_header *fiph; enum fip_vn2vn_subcode sub; - struct { - struct fc_rport_priv rdata; - struct fcoe_rport frport; - } buf; + struct fcoe_rport frport = { }; int rc, vlan_id = 0; fiph = (struct fip_header *)skb->data; @@ -2766,7 +2764,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) goto drop; } - rc = fcoe_ctlr_vn_parse(fip, skb, &buf.rdata); + rc = fcoe_ctlr_vn_parse(fip, skb, &frport); if (rc) { LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc); goto drop; @@ -2775,19 +2773,19 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) mutex_lock(&fip->ctlr_mutex); switch (sub) { case FIP_SC_VN_PROBE_REQ: - fcoe_ctlr_vn_probe_req(fip, &buf.rdata); + fcoe_ctlr_vn_probe_req(fip, &frport.rdata); break; case FIP_SC_VN_PROBE_REP: - fcoe_ctlr_vn_probe_reply(fip, &buf.rdata); + fcoe_ctlr_vn_probe_reply(fip, &frport.rdata); break; case FIP_SC_VN_CLAIM_NOTIFY: - fcoe_ctlr_vn_claim_notify(fip, &buf.rdata); + fcoe_ctlr_vn_claim_notify(fip, &frport.rdata); break; case FIP_SC_VN_CLAIM_REP: - fcoe_ctlr_vn_claim_resp(fip, &buf.rdata); + fcoe_ctlr_vn_claim_resp(fip, &frport.rdata); break; case FIP_SC_VN_BEACON: - fcoe_ctlr_vn_beacon(fip, &buf.rdata); + fcoe_ctlr_vn_beacon(fip, &frport.rdata); break; default: LIBFCOE_FIP_DBG(fip, "vn_recv unknown subcode %d\n", sub); @@ -2811,22 +2809,18 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) */ static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip, struct sk_buff *skb, - struct fc_rport_priv *rdata) + struct fcoe_rport *frport) { struct fip_header *fiph; struct fip_desc *desc = NULL; struct fip_mac_desc *macd = NULL; struct fip_wwn_desc *wwn = NULL; - struct fcoe_rport *frport; size_t rlen; size_t dlen; u32 desc_mask = 0; u32 dtype; u8 sub; - memset(rdata, 0, sizeof(*rdata) + sizeof(*frport)); - frport = fcoe_ctlr_rport(rdata); - fiph = (struct fip_header *)skb->data; frport->flags = ntohs(fiph->fip_flags); @@ -2880,7 +2874,8 @@ static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip, if (dlen != sizeof(struct fip_wwn_desc)) goto len_err; wwn = (struct fip_wwn_desc *)desc; - rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn); + frport->rdata.ids.node_name = + get_unaligned_be64(&wwn->fd_wwn); break; default: LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " @@ -2991,22 +2986,19 @@ static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) { struct fip_header *fiph; enum fip_vlan_subcode sub; - struct { - struct fc_rport_priv rdata; - struct fcoe_rport frport; - } buf; + struct fcoe_rport frport = { }; int rc; fiph = (struct fip_header *)skb->data; sub = fiph->fip_subcode; - rc = fcoe_ctlr_vlan_parse(fip, skb, &buf.rdata); + rc = fcoe_ctlr_vlan_parse(fip, skb, &frport); if (rc) { LIBFCOE_FIP_DBG(fip, "vlan_recv vlan_parse error %d\n", rc); goto drop; } mutex_lock(&fip->ctlr_mutex); if (sub == FIP_SC_VL_REQ) - fcoe_ctlr_vlan_disc_reply(fip, &buf.rdata); + fcoe_ctlr_vlan_disc_reply(fip, &frport.rdata); mutex_unlock(&fip->ctlr_mutex); drop: diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c index f4909cd206d3e69afd042cff5865fc32a94c056c..f15d5e1d56b1935cb00b4f5efe06b5e5f013a6e3 100644 --- a/drivers/scsi/fcoe/fcoe_transport.c +++ b/drivers/scsi/fcoe/fcoe_transport.c @@ -873,7 +873,7 @@ static int fcoe_transport_create(const char *buffer, int rc = -ENODEV; struct net_device *netdev = NULL; struct fcoe_transport *ft = NULL; - enum fip_state fip_mode = (enum fip_state)(long)kp->arg; + enum fip_mode fip_mode = (enum fip_mode)kp->arg; mutex_lock(&ft_mutex); diff --git a/drivers/scsi/hisi_raid/Kconfig b/drivers/scsi/hisi_raid/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..d402dc45a7c15643dbf94da6a5719610180e47b8 --- /dev/null +++ b/drivers/scsi/hisi_raid/Kconfig @@ -0,0 +1,14 @@ +# +# Kernel configuration file for the hisi_raid +# + +config SCSI_HISI_RAID + tristate "Huawei Hisi_Raid Adapter" + depends on PCI && SCSI + select BLK_DEV_BSGLIB + depends on ARM64 || X86_64 + help + This driver supports hisi_raid SPxxx serial RAID controller, which has + PCI Express Gen4 interface with host and supports SAS/SATA HDD/SSD. + To compile this driver as a module, choose M here: the module will + be called hisi_raid. diff --git a/drivers/scsi/hisi_raid/Makefile b/drivers/scsi/hisi_raid/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..b71a675f4190495f19fb63751ba8e0606a43a245 --- /dev/null +++ b/drivers/scsi/hisi_raid/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for the hisi_raid drivers. +# + +obj-$(CONFIG_SCSI_HISI_RAID) += hiraid.o + +hiraid-objs := hiraid_main.o diff --git a/drivers/scsi/hisi_raid/hiraid.h b/drivers/scsi/hisi_raid/hiraid.h new file mode 100644 index 0000000000000000000000000000000000000000..1ebc3dd3f2ec89e5923f38c3c14ea3acc0a17ef3 --- /dev/null +++ b/drivers/scsi/hisi_raid/hiraid.h @@ -0,0 +1,760 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 Huawei Technologies Co., Ltd */ + +#ifndef __HIRAID_H_ +#define __HIRAID_H_ + +#define HIRAID_HDD_PD_QD 64 +#define HIRAID_HDD_VD_QD 256 +#define HIRAID_SSD_PD_QD 64 +#define HIRAID_SSD_VD_QD 256 + +#define BGTASK_TYPE_REBUILD 4 +#define USR_CMD_READ 0xc2 +#define USR_CMD_RDLEN 0x1000 +#define USR_CMD_VDINFO 0x704 +#define USR_CMD_BGTASK 0x504 +#define VDINFO_PARAM_LEN 0x04 + +#define HIRAID_DEFAULT_MAX_CHANNEL 4 +#define HIRAID_DEFAULT_MAX_ID 240 +#define HIRAID_DEFAULT_MAX_LUN_PER_HOST 8 + +#define FUA_MASK 0x08 + +#define HIRAID_IO_SQES 7 +#define HIRAID_IO_CQES 4 +#define PRP_ENTRY_SIZE 8 + +#define EXTRA_POOL_SIZE 256 +#define MAX_EXTRA_POOL_NUM 16 +#define MAX_CMD_PER_DEV 64 +#define MAX_CDB_LEN 16 + +#define HIRAID_AQ_DEPTH 128 +#define HIRAID_ASYN_COMMANDS 16 +#define HIRAID_AQ_BLK_MQ_DEPTH (HIRAID_AQ_DEPTH - HIRAID_ASYN_COMMANDS) +#define HIRAID_AQ_MQ_TAG_DEPTH (HIRAID_AQ_BLK_MQ_DEPTH - 1) + +#define HIRAID_ADMIN_QUEUE_NUM 1 +#define HIRAID_PTHRU_CMDS_PERQ 1 +#define HIRAID_TOTAL_PTCMDS(qn) (HIRAID_PTHRU_CMDS_PERQ * (qn)) + +#define HIRAID_DEV_INFO_ATTR_BOOT(attr) ((attr) & 0x01) +#define HIRAID_DEV_INFO_ATTR_VD(attr) (((attr) & 0x02) == 0x0) +#define HIRAID_DEV_INFO_ATTR_PT(attr) (((attr) & 0x22) == 0x02) +#define HIRAID_DEV_INFO_ATTR_RAWDISK(attr) ((attr) & 0x20) +#define HIRAID_DEV_DISK_TYPE(attr) ((attr) & 0x1e) + +#define HIRAID_DEV_INFO_FLAG_VALID(flag) ((flag) & 0x01) +#define HIRAID_DEV_INFO_FLAG_CHANGE(flag) ((flag) & 0x02) + +#define HIRAID_CAP_MQES(cap) ((cap) & 0xffff) +#define HIRAID_CAP_STRIDE(cap) (((cap) >> 32) & 0xf) +#define HIRAID_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf) +#define HIRAID_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf) +#define HIRAID_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff) +#define HIRAID_CAP_DMAMASK(cap) (((cap) >> 37) & 0xff) + +#define IO_SQE_SIZE sizeof(struct hiraid_scsi_io_cmd) +#define ADMIN_SQE_SIZE sizeof(struct hiraid_admin_command) +#define SQE_SIZE(qid) (((qid) > 0) ? IO_SQE_SIZE : ADMIN_SQE_SIZE) +#define CQ_SIZE(depth) ((depth) * sizeof(struct hiraid_completion)) +#define SQ_SIZE(qid, depth) ((depth) * SQE_SIZE(qid)) + +#define SENSE_SIZE(depth) ((depth) * SCSI_SENSE_BUFFERSIZE) + +#define IO_6_DEFAULT_TX_LEN 256 + +#define MAX_DEV_ENTRY_PER_PAGE_4K 340 + +#define MAX_REALTIME_BGTASK_NUM 32 + +#define PCI_VENDOR_ID_HUAWEI_LOGIC 0x19E5 +#define HIRAID_SERVER_DEVICE_HBA_DID 0x3858 +#define HIRAID_SERVER_DEVICE_RAID_DID 0x3758 + +enum { + HIRAID_SC_SUCCESS = 0x0, + HIRAID_SC_INVALID_OPCODE = 0x1, + HIRAID_SC_INVALID_FIELD = 0x2, + + HIRAID_SC_ABORT_LIMIT = 0x103, + HIRAID_SC_ABORT_MISSING = 0x104, + HIRAID_SC_ASYNC_LIMIT = 0x105, + + HIRAID_SC_DNR = 0x4000, +}; + +enum { + HIRAID_REG_CAP = 0x0000, + HIRAID_REG_CC = 0x0014, + HIRAID_REG_CSTS = 0x001c, + HIRAID_REG_AQA = 0x0024, + HIRAID_REG_ASQ = 0x0028, + HIRAID_REG_ACQ = 0x0030, + HIRAID_REG_DBS = 0x1000, +}; + +enum { + HIRAID_CC_ENABLE = 1 << 0, + HIRAID_CC_CSS_NVM = 0 << 4, + HIRAID_CC_MPS_SHIFT = 7, + HIRAID_CC_AMS_SHIFT = 11, + HIRAID_CC_SHN_SHIFT = 14, + HIRAID_CC_IOSQES_SHIFT = 16, + HIRAID_CC_IOCQES_SHIFT = 20, + HIRAID_CC_AMS_RR = 0 << HIRAID_CC_AMS_SHIFT, + HIRAID_CC_SHN_NONE = 0 << HIRAID_CC_SHN_SHIFT, + HIRAID_CC_IOSQES = HIRAID_IO_SQES << HIRAID_CC_IOSQES_SHIFT, + HIRAID_CC_IOCQES = HIRAID_IO_CQES << HIRAID_CC_IOCQES_SHIFT, + HIRAID_CC_SHN_NORMAL = 1 << HIRAID_CC_SHN_SHIFT, + HIRAID_CC_SHN_MASK = 3 << HIRAID_CC_SHN_SHIFT, + HIRAID_CSTS_CFS_SHIFT = 1, + HIRAID_CSTS_SHST_SHIFT = 2, + HIRAID_CSTS_PP_SHIFT = 5, + HIRAID_CSTS_RDY = 1 << 0, + HIRAID_CSTS_SHST_CMPLT = 2 << 2, + HIRAID_CSTS_SHST_MASK = 3 << 2, + HIRAID_CSTS_CFS_MASK = 1 << HIRAID_CSTS_CFS_SHIFT, + HIRAID_CSTS_PP_MASK = 1 << HIRAID_CSTS_PP_SHIFT, +}; + +enum { + HIRAID_ADMIN_DELETE_SQ = 0x00, + HIRAID_ADMIN_CREATE_SQ = 0x01, + HIRAID_ADMIN_DELETE_CQ = 0x04, + HIRAID_ADMIN_CREATE_CQ = 0x05, + HIRAID_ADMIN_ABORT_CMD = 0x08, + HIRAID_ADMIN_SET_FEATURES = 0x09, + HIRAID_ADMIN_ASYNC_EVENT = 0x0c, + HIRAID_ADMIN_GET_INFO = 0xc6, + HIRAID_ADMIN_RESET = 0xc8, +}; + +enum { + HIRAID_GET_CTRL_INFO = 0, + HIRAID_GET_DEVLIST_INFO = 1, +}; + +enum hiraid_rst_type { + HIRAID_RESET_TARGET = 0, + HIRAID_RESET_BUS = 1, +}; + +enum { + HIRAID_ASYN_EVENT_ERROR = 0, + HIRAID_ASYN_EVENT_NOTICE = 2, + HIRAID_ASYN_EVENT_VS = 7, +}; + +enum { + HIRAID_ASYN_DEV_CHANGED = 0x00, + HIRAID_ASYN_FW_ACT_START = 0x01, + HIRAID_ASYN_HOST_PROBING = 0x10, +}; + +enum { + HIRAID_ASYN_TIMESYN = 0x00, + HIRAID_ASYN_FW_ACT_FINISH = 0x02, + HIRAID_ASYN_EVENT_MIN = 0x80, + HIRAID_ASYN_EVENT_MAX = 0xff, +}; + +enum { + HIRAID_CMD_WRITE = 0x01, + HIRAID_CMD_READ = 0x02, + + HIRAID_CMD_NONRW_NONE = 0x80, + HIRAID_CMD_NONRW_TODEV = 0x81, + HIRAID_CMD_NONRW_FROMDEV = 0x82, +}; + +enum { + HIRAID_QUEUE_PHYS_CONTIG = (1 << 0), + HIRAID_CQ_IRQ_ENABLED = (1 << 1), + + HIRAID_FEATURE_NUM_QUEUES = 0x07, + HIRAID_FEATURE_ASYNC_EVENT = 0x0b, + HIRAID_FEATURE_TIMESTAMP = 0x0e, +}; + +enum hiraid_dev_state { + DEV_NEW, + DEV_LIVE, + DEV_RESETTING, + DEV_DELETING, + DEV_DEAD, +}; + +enum { + HIRAID_CARD_HBA, + HIRAID_CARD_RAID, +}; + +enum hiraid_cmd_type { + HIRAID_CMD_ADMIN, + HIRAID_CMD_PTHRU, +}; + +enum { + SQE_FLAG_SGL_METABUF = (1 << 6), + SQE_FLAG_SGL_METASEG = (1 << 7), + SQE_FLAG_SGL_ALL = SQE_FLAG_SGL_METABUF | SQE_FLAG_SGL_METASEG, +}; + +enum hiraid_cmd_state { + CMD_IDLE = 0, + CMD_FLIGHT = 1, + CMD_COMPLETE = 2, + CMD_TIMEOUT = 3, + CMD_TMO_COMPLETE = 4, +}; + +enum { + HIRAID_BSG_ADMIN, + HIRAID_BSG_IOPTHRU, +}; + +enum { + HIRAID_SAS_HDD_VD = 0x04, + HIRAID_SATA_HDD_VD = 0x08, + HIRAID_SAS_SSD_VD = 0x0c, + HIRAID_SATA_SSD_VD = 0x10, + HIRAID_NVME_SSD_VD = 0x14, + HIRAID_SAS_HDD_PD = 0x06, + HIRAID_SATA_HDD_PD = 0x0a, + HIRAID_SAS_SSD_PD = 0x0e, + HIRAID_SATA_SSD_PD = 0x12, + HIRAID_NVME_SSD_PD = 0x16, +}; + +enum { + DISPATCH_BY_CPU, + DISPATCH_BY_DISK, +}; + +struct hiraid_completion { + __le32 result; + union { + struct { + __u8 sense_len; + __u8 resv[3]; + }; + __le32 result1; + }; + __le16 sq_head; + __le16 sq_id; + __le16 cmd_id; + __le16 status; +}; + +struct hiraid_ctrl_info { + __le32 nd; + __le16 max_cmds; + __le16 max_channel; + __le32 max_tgt_id; + __le16 max_lun; + __le16 max_num_sge; + __le16 lun_num_boot; + __u8 mdts; + __u8 acl; + __u8 asynevent; + __u8 card_type; + __u8 pt_use_sgl; + __u8 rsvd; + __le32 rtd3e; + __u8 sn[32]; + __u8 fw_version[16]; + __u8 rsvd1[4020]; +}; + +struct hiraid_dev { + struct pci_dev *pdev; + struct device *dev; + struct Scsi_Host *shost; + struct hiraid_queue *queues; + struct dma_pool *prp_page_pool; + struct dma_pool *prp_extra_pool[MAX_EXTRA_POOL_NUM]; + void __iomem *bar; + u32 max_qid; + u32 num_vecs; + u32 queue_count; + u32 ioq_depth; + u32 db_stride; + u32 __iomem *dbs; + struct rw_semaphore dev_rwsem; + int numa_node; + u32 page_size; + u32 ctrl_config; + u32 online_queues; + u64 cap; + u32 scsi_qd; + u32 instance; + struct hiraid_ctrl_info *ctrl_info; + struct hiraid_dev_info *dev_info; + + struct hiraid_cmd *adm_cmds; + struct list_head adm_cmd_list; + spinlock_t adm_cmd_lock; + + struct hiraid_cmd *io_ptcmds; + struct list_head io_pt_list; + spinlock_t io_pt_lock; + + struct work_struct scan_work; + struct work_struct timesyn_work; + struct work_struct reset_work; + struct work_struct fwact_work; + + enum hiraid_dev_state state; + spinlock_t state_lock; + + void *sense_buffer_virt; + dma_addr_t sense_buffer_phy; + u32 last_qcnt; + u8 hdd_dispatch; + + struct request_queue *bsg_queue; +}; + +struct hiraid_sgl_desc { + __le64 addr; + __le32 length; + __u8 rsvd[3]; + __u8 type; +}; + +union hiraid_data_ptr { + struct { + __le64 prp1; + __le64 prp2; + }; + struct hiraid_sgl_desc sgl; +}; + +struct hiraid_admin_com_cmd { + __u8 opcode; + __u8 flags; + __le16 cmd_id; + __le32 hdid; + __le32 cdw2[4]; + union hiraid_data_ptr dptr; + __le32 cdw10; + __le32 cdw11; + __le32 cdw12; + __le32 cdw13; + __le32 cdw14; + __le32 cdw15; +}; + +struct hiraid_features { + __u8 opcode; + __u8 flags; + __le16 cmd_id; + __le32 hdid; + __u64 rsvd2[2]; + union hiraid_data_ptr dptr; + __le32 fid; + __le32 dword11; + __le32 dword12; + __le32 dword13; + __le32 dword14; + __le32 dword15; +}; + +struct hiraid_create_cq { + __u8 opcode; + __u8 flags; + __le16 cmd_id; + __u32 rsvd1[5]; + __le64 prp1; + __u64 rsvd8; + __le16 cqid; + __le16 qsize; + __le16 cq_flags; + __le16 irq_vector; + __u32 rsvd12[4]; +}; + +struct hiraid_create_sq { + __u8 opcode; + __u8 flags; + __le16 cmd_id; + __u32 rsvd1[5]; + __le64 prp1; + __u64 rsvd8; + __le16 sqid; + __le16 qsize; + __le16 sq_flags; + __le16 cqid; + __u32 rsvd12[4]; +}; + +struct hiraid_delete_queue { + __u8 opcode; + __u8 flags; + __le16 cmd_id; + __u32 rsvd1[9]; + __le16 qid; + __u16 rsvd10; + __u32 rsvd11[5]; +}; + +struct hiraid_get_info { + __u8 opcode; + __u8 flags; + __le16 cmd_id; + __le32 hdid; + __u32 rsvd2[4]; + union hiraid_data_ptr dptr; + __u8 type; + __u8 rsvd10[3]; + __le32 cdw11; + __u32 rsvd12[4]; +}; + +struct hiraid_usr_cmd { + __u8 opcode; + __u8 flags; + __le16 cmd_id; + __le32 hdid; + union { + struct { + __le16 subopcode; + __le16 rsvd1; + } info_0; + __le32 cdw2; + }; + union { + struct { + __le16 data_len; + __le16 param_len; + } info_1; + __le32 cdw3; + }; + __u64 metadata; + union hiraid_data_ptr dptr; + __le32 cdw10; + __le32 cdw11; + __le32 cdw12; + __le32 cdw13; + __le32 cdw14; + __le32 cdw15; +}; + +struct hiraid_abort_cmd { + __u8 opcode; + __u8 flags; + __le16 cmd_id; + __le32 hdid; + __u64 rsvd2[4]; + __le16 sqid; + __le16 cid; + __u32 rsvd11[5]; +}; + +struct hiraid_reset_cmd { + __u8 opcode; + __u8 flags; + __le16 cmd_id; + __le32 hdid; + __u64 rsvd2[4]; + __u8 type; + __u8 rsvd10[3]; + __u32 rsvd11[5]; +}; + +struct hiraid_admin_command { + union { + struct hiraid_admin_com_cmd common; + struct hiraid_features features; + struct hiraid_create_cq create_cq; + struct hiraid_create_sq create_sq; + struct hiraid_delete_queue delete_queue; + struct hiraid_get_info get_info; + struct hiraid_abort_cmd abort; + struct hiraid_reset_cmd reset; + struct hiraid_usr_cmd usr_cmd; + }; +}; + +struct hiraid_scsi_io_com_cmd { + __u8 opcode; + __u8 flags; + __le16 cmd_id; + __le32 hdid; + __le16 sense_len; + __u8 cdb_len; + __u8 rsvd2; + __le32 cdw3[3]; + union hiraid_data_ptr dptr; + __le32 cdw10[6]; + __u8 cdb[32]; + __le64 sense_addr; + __le32 cdw26[6]; +}; + +struct hiraid_scsi_rw_cmd { + __u8 opcode; + __u8 flags; + __le16 cmd_id; + __le32 hdid; + __le16 sense_len; + __u8 cdb_len; + __u8 rsvd2; + __u32 rsvd3[3]; + union hiraid_data_ptr dptr; + __le64 slba; + __le16 nlb; + __le16 control; + __u32 rsvd13[3]; + __u8 cdb[32]; + __le64 sense_addr; + __u32 rsvd26[6]; +}; + +struct hiraid_scsi_nonrw_cmd { + __u8 opcode; + __u8 flags; + __le16 cmd_id; + __le32 hdid; + __le16 sense_len; + __u8 cdb_length; + __u8 rsvd2; + __u32 rsvd3[3]; + union hiraid_data_ptr dptr; + __u32 rsvd10[5]; + __le32 buf_len; + __u8 cdb[32]; + __le64 sense_addr; + __u32 rsvd26[6]; +}; + +struct hiraid_scsi_io_cmd { + union { + struct hiraid_scsi_io_com_cmd common; + struct hiraid_scsi_rw_cmd rw; + struct hiraid_scsi_nonrw_cmd nonrw; + }; +}; + +struct hiraid_passthru_common_cmd { + __u8 opcode; + __u8 flags; + __u16 rsvd0; + __u32 nsid; + union { + struct { + __u16 subopcode; + __u16 rsvd1; + } info_0; + __u32 cdw2; + }; + union { + struct { + __u16 data_len; + __u16 param_len; + } info_1; + __u32 cdw3; + }; + __u64 metadata; + + __u64 addr; + __u64 prp2; + + __u32 cdw10; + __u32 cdw11; + __u32 cdw12; + __u32 cdw13; + __u32 cdw14; + __u32 cdw15; + __u32 timeout_ms; + __u32 result0; + __u32 result1; +}; + +struct hiraid_passthru_io_cmd { + __u8 opcode; + __u8 flags; + __u16 rsvd0; + __u32 nsid; + union { + struct { + __u16 res_sense_len; + __u8 cdb_len; + __u8 rsvd0; + } info_0; + __u32 cdw2; + }; + union { + struct { + __u16 subopcode; + __u16 rsvd1; + } info_1; + __u32 cdw3; + }; + union { + struct { + __u16 rsvd; + __u16 param_len; + } info_2; + __u32 cdw4; + }; + __u32 cdw5; + __u64 addr; + __u64 prp2; + union { + struct { + __u16 eid; + __u16 sid; + } info_3; + __u32 cdw10; + }; + union { + struct { + __u16 did; + __u8 did_flag; + __u8 rsvd2; + } info_4; + __u32 cdw11; + }; + __u32 cdw12; + __u32 cdw13; + __u32 cdw14; + __u32 data_len; + __u32 cdw16; + __u32 cdw17; + __u32 cdw18; + __u32 cdw19; + __u32 cdw20; + __u32 cdw21; + __u32 cdw22; + __u32 cdw23; + __u64 sense_addr; + __u32 cdw26[4]; + __u32 timeout_ms; + __u32 result0; + __u32 result1; +}; + +struct hiraid_bsg_request { + u32 msgcode; + u32 control; + union { + struct hiraid_passthru_common_cmd admcmd; + struct hiraid_passthru_io_cmd pthrucmd; + }; +}; + +struct hiraid_cmd { + u16 qid; + u16 cid; + u32 result0; + u32 result1; + u16 status; + void *priv; + enum hiraid_cmd_state state; + struct completion cmd_done; + struct list_head list; +}; + +struct hiraid_queue { + struct hiraid_dev *hdev; + spinlock_t sq_lock; + + spinlock_t cq_lock ____cacheline_aligned_in_smp; + + void *sq_cmds; + + struct hiraid_completion *cqes; + + dma_addr_t sq_buffer_phy; + dma_addr_t cq_buffer_phy; + u32 __iomem *q_db; + u8 cq_phase; + u8 sqes; + u16 qid; + u16 sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 q_depth; + s16 cq_vector; + atomic_t inflight; + void *sense_buffer_virt; + dma_addr_t sense_buffer_phy; + struct dma_pool *prp_small_pool; +}; + +struct hiraid_mapmange { + struct hiraid_queue *hiraidq; + enum hiraid_cmd_state state; + u16 cid; + int page_cnt; + u32 sge_cnt; + u32 len; + bool use_sgl; + dma_addr_t first_dma; + void *sense_buffer_virt; + dma_addr_t sense_buffer_phy; + struct scatterlist *sgl; + void *list[0]; +}; + +struct hiraid_vd_info { + __u8 name[32]; + __le16 id; + __u8 rg_id; + __u8 rg_level; + __u8 sg_num; + __u8 sg_disk_num; + __u8 vd_status; + __u8 vd_type; + __u8 rsvd1[4056]; +}; + +struct bgtask_info { + __u8 type; + __u8 progress; + __u8 rate; + __u8 rsvd0; + __le16 vd_id; + __le16 time_left; + __u8 rsvd1[4]; +}; + +struct hiraid_bgtask { + __u8 sw; + __u8 task_num; + __u8 rsvd[6]; + struct bgtask_info bgtask[MAX_REALTIME_BGTASK_NUM]; +}; + +struct hiraid_dev_info { + __le32 hdid; + __le16 target; + __u8 channel; + __u8 lun; + __u8 attr; + __u8 flag; + __le16 max_io_kb; +}; + +struct hiraid_dev_list { + __le32 dev_num; + __u32 rsvd0[3]; + struct hiraid_dev_info devinfo[MAX_DEV_ENTRY_PER_PAGE_4K]; +}; + +struct hiraid_sdev_hostdata { + u32 hdid; + u16 max_io_kb; + u8 attr; + u8 flag; + u8 rg_id; + u8 hwq; + u16 pend_count; +}; + +#endif + diff --git a/drivers/scsi/hisi_raid/hiraid_main.c b/drivers/scsi/hisi_raid/hiraid_main.c new file mode 100644 index 0000000000000000000000000000000000000000..f84182fa9580490a84e2abe4cb5201687ecd4113 --- /dev/null +++ b/drivers/scsi/hisi_raid/hiraid_main.c @@ -0,0 +1,4160 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 Huawei Technologies Co., Ltd */ + +/* Huawei Raid Series Linux Driver */ + +#define pr_fmt(fmt) "hiraid: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "hiraid.h" + +static u32 admin_tmout = 60; +module_param(admin_tmout, uint, 0644); +MODULE_PARM_DESC(admin_tmout, "admin commands timeout (seconds)"); + +static u32 scmd_tmout_rawdisk = 180; +module_param(scmd_tmout_rawdisk, uint, 0644); +MODULE_PARM_DESC(scmd_tmout_rawdisk, "scsi commands timeout for rawdisk(seconds)"); + +static u32 scmd_tmout_vd = 180; +module_param(scmd_tmout_vd, uint, 0644); +MODULE_PARM_DESC(scmd_tmout_vd, "scsi commands timeout for vd(seconds)"); + +static bool max_io_force; +module_param(max_io_force, bool, 0644); +MODULE_PARM_DESC(max_io_force, "force max_hw_sectors_kb = 1024, default false(performance first)"); + +static bool work_mode; +module_param(work_mode, bool, 0444); +MODULE_PARM_DESC(work_mode, "work mode switch, default false for multi hw queues"); + +#define MAX_IO_QUEUES 128 +#define MIN_IO_QUEUES 1 + +static int ioq_num_set(const char *val, const struct kernel_param *kp) +{ + int n = 0; + int ret; + + ret = kstrtoint(val, 10, &n); + if (ret != 0 || n < MIN_IO_QUEUES || n > MAX_IO_QUEUES) + return -EINVAL; + + return param_set_int(val, kp); +} + +static const struct kernel_param_ops max_hwq_num_ops = { + .set = ioq_num_set, + .get = param_get_uint, +}; + +static u32 max_hwq_num = 128; +module_param_cb(max_hwq_num, &max_hwq_num_ops, &max_hwq_num, 0444); +MODULE_PARM_DESC(max_hwq_num, "max num of hw io queues, should >= 1, default 128"); + +static int io_queue_depth_set(const char *val, const struct kernel_param *kp) +{ + int n = 0; + int ret; + + ret = kstrtoint(val, 10, &n); + if (ret != 0 || n < 2) + return -EINVAL; + + return param_set_int(val, kp); +} + +static const struct kernel_param_ops io_queue_depth_ops = { + .set = io_queue_depth_set, + .get = param_get_uint, +}; + +static u32 io_queue_depth = 1024; +module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644); +MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2"); + +static u32 log_debug_switch; +module_param(log_debug_switch, uint, 0644); +MODULE_PARM_DESC(log_debug_switch, "set log state, default zero for switch off"); + +static int extra_pool_num_set(const char *val, const struct kernel_param *kp) +{ + u8 n = 0; + int ret; + + ret = kstrtou8(val, 10, &n); + if (ret != 0) + return -EINVAL; + if (n > MAX_EXTRA_POOL_NUM) + n = MAX_EXTRA_POOL_NUM; + if (n < 1) + n = 1; + *((u8 *)kp->arg) = n; + + return 0; +} + +static const struct kernel_param_ops small_pool_num_ops = { + .set = extra_pool_num_set, + .get = param_get_byte, +}; + +/* It was found that the spindlock of a single pool conflicts + * a lot with multiple CPUs.So multiple pools are introduced + * to reduce the conflictions. + */ +static unsigned char extra_pool_num = 4; +module_param_cb(extra_pool_num, &small_pool_num_ops, &extra_pool_num, 0644); +MODULE_PARM_DESC(extra_pool_num, "set prp extra pool num, default 4, MAX 16"); + +static void hiraid_handle_async_notice(struct hiraid_dev *hdev, u32 result); +static void hiraid_handle_async_vs(struct hiraid_dev *hdev, + u32 result, u32 result1); + +static struct class *hiraid_class; + +#define HIRAID_CAP_TIMEOUT_UNIT_MS (HZ / 2) + +static struct workqueue_struct *work_queue; + +#define dev_log_dbg(dev, fmt, ...) do { \ + if (unlikely(log_debug_switch)) \ + dev_info(dev, "[%s] " fmt, \ + __func__, ##__VA_ARGS__); \ +} while (0) + +#define HIRAID_DRV_VERSION "1.1.0.0" + +#define ADMIN_TIMEOUT (admin_tmout * HZ) +#define USRCMD_TIMEOUT (180 * HZ) +#define CTL_RST_TIME (600 * HZ) + +#define HIRAID_WAIT_ABNL_CMD_TIMEOUT 6 +#define HIRAID_WAIT_RST_IO_TIMEOUT 10 + +#define HIRAID_DMA_MSK_BIT_MAX 64 + +#define IOQ_PT_DATA_LEN 4096 +#define IOQ_PT_SGL_DATA_LEN (1024 * 1024) + +#define MAX_CAN_QUEUE (4096 - 1) +#define MIN_CAN_QUEUE (1024 - 1) + +enum SENSE_STATE_CODE { + SENSE_STATE_OK = 0, + SENSE_STATE_NEED_CHECK, + SENSE_STATE_ERROR, + SENSE_STATE_EP_PCIE_ERROR, + SENSE_STATE_NAC_DMA_ERROR, + SENSE_STATE_ABORTED, + SENSE_STATE_NEED_RETRY +}; + +enum { + FW_EH_OK = 0, + FW_EH_DEV_NONE = 0x701 +}; + +static const char * const raid_levels[] = {"0", "1", "5", "6", + "10", "50", "60", "NA"}; + +static const char * const raid_states[] = { + "NA", "NORMAL", "FAULT", "DEGRADE", "NOT_FORMATTED", "FORMATTING", + "SANITIZING", "INITIALIZING", "INITIALIZE_FAIL", "DELETING", + "DELETE_FAIL", "WRITE_PROTECT" +}; + +static int hiraid_remap_bar(struct hiraid_dev *hdev, u32 size) +{ + struct pci_dev *pdev = hdev->pdev; + + if (size > pci_resource_len(pdev, 0)) { + dev_err(hdev->dev, "input size[%u] exceed bar0 length[%llu]\n", + size, pci_resource_len(pdev, 0)); + return -ENOMEM; + } + + if (hdev->bar) + iounmap(hdev->bar); + + hdev->bar = ioremap(pci_resource_start(pdev, 0), size); + if (!hdev->bar) { + dev_err(hdev->dev, "ioremap for bar0 failed\n"); + return -ENOMEM; + } + hdev->dbs = hdev->bar + HIRAID_REG_DBS; + + return 0; +} + +static int hiraid_dev_map(struct hiraid_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + int ret; + + ret = pci_request_mem_regions(pdev, "hiraid"); + if (ret) { + dev_err(hdev->dev, "fail to request memory regions\n"); + return ret; + } + + ret = hiraid_remap_bar(hdev, HIRAID_REG_DBS + 4096); + if (ret) { + pci_release_mem_regions(pdev); + return ret; + } + + return 0; +} + +static void hiraid_dev_unmap(struct hiraid_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + + if (hdev->bar) { + iounmap(hdev->bar); + hdev->bar = NULL; + } + pci_release_mem_regions(pdev); +} + +static int hiraid_pci_enable(struct hiraid_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + int ret = -ENOMEM; + u64 maskbit = HIRAID_DMA_MSK_BIT_MAX; + + if (pci_enable_device_mem(pdev)) { + dev_err(hdev->dev, "enable pci device memory resources failed\n"); + return ret; + } + pci_set_master(pdev); + + if (readl(hdev->bar + HIRAID_REG_CSTS) == U32_MAX) { + ret = -ENODEV; + dev_err(hdev->dev, "read CSTS register failed\n"); + goto disable; + } + + hdev->cap = lo_hi_readq(hdev->bar + HIRAID_REG_CAP); + hdev->ioq_depth = min_t(u32, HIRAID_CAP_MQES(hdev->cap) + 1, + io_queue_depth); + hdev->db_stride = 1 << HIRAID_CAP_STRIDE(hdev->cap); + + maskbit = HIRAID_CAP_DMAMASK(hdev->cap); + if (maskbit < 32 || maskbit > HIRAID_DMA_MSK_BIT_MAX) { + dev_err(hdev->dev, "dma mask invalid[%llu], set to default\n", + maskbit); + maskbit = HIRAID_DMA_MSK_BIT_MAX; + } + + if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(maskbit))) { + if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { + dev_err(hdev->dev, "set dma mask[32] and coherent failed\n"); + goto disable; + } + dev_info(hdev->dev, "set dma mask[32] success\n"); + } else { + dev_info(hdev->dev, "set dma mask[%llu] success\n", maskbit); + } + + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); + if (ret < 0) { + dev_err(hdev->dev, "allocate one IRQ for setup admin queue failed\n"); + goto disable; + } + + pci_enable_pcie_error_reporting(pdev); + pci_save_state(pdev); + + return 0; + +disable: + pci_disable_device(pdev); + return ret; +} + + +/* + * It is fact that first prp and last prp may be not full page. + * The size to count total nprps for the io equal to size + page_size, + * it may be a slightly overestimate. + * + * 8B per prp address. It may be there is one prp_list address per page, + * prp_list address does not count in io data prps. So divisor equal to + * PAGE_SIZE - 8, it may be a slightly overestimate. + */ +static int hiraid_prp_pagenum(struct hiraid_dev *hdev) +{ + u32 size = 1U << ((hdev->ctrl_info->mdts) * 1U) << 12; + u32 nprps = DIV_ROUND_UP(size + hdev->page_size, hdev->page_size); + + return DIV_ROUND_UP(PRP_ENTRY_SIZE * nprps, + hdev->page_size - PRP_ENTRY_SIZE); +} + +/* + * Calculates the number of pages needed for the SGL segments. For example a 4k + * page can accommodate 256 SGL descriptors. + */ +static int hiraid_sgl_pagenum(struct hiraid_dev *hdev) +{ + u32 nsge = le16_to_cpu(hdev->ctrl_info->max_num_sge); + + return DIV_ROUND_UP(nsge * sizeof(struct hiraid_sgl_desc), + hdev->page_size); +} + +static inline void **hiraid_mapbuf_list(struct hiraid_mapmange *mapbuf) +{ + return mapbuf->list; +} + +static u32 hiraid_get_max_cmd_size(struct hiraid_dev *hdev) +{ + u32 alloc_size = sizeof(__le64 *) * max(hiraid_prp_pagenum(hdev), + hiraid_sgl_pagenum(hdev)); + + dev_info(hdev->dev, "mapbuf size[%lu], alloc_size[%u]\n", + sizeof(struct hiraid_mapmange), alloc_size); + + return sizeof(struct hiraid_mapmange) + alloc_size; +} + +static int hiraid_build_passthru_prp(struct hiraid_dev *hdev, + struct hiraid_mapmange *mapbuf) +{ + struct scatterlist *sg = mapbuf->sgl; + __le64 *phy_regpage, *prior_list; + u64 buf_addr = sg_dma_address(sg); + int buf_length = sg_dma_len(sg); + u32 page_size = hdev->page_size; + int offset = buf_addr & (page_size - 1); + void **list = hiraid_mapbuf_list(mapbuf); + int maplen = mapbuf->len; + struct dma_pool *pool; + dma_addr_t buffer_phy; + int i; + + maplen -= (page_size - offset); + if (maplen <= 0) { + mapbuf->first_dma = 0; + return 0; + } + + buf_length -= (page_size - offset); + if (buf_length) { + buf_addr += (page_size - offset); + } else { + sg = sg_next(sg); + buf_addr = sg_dma_address(sg); + buf_length = sg_dma_len(sg); + } + + if (maplen <= page_size) { + mapbuf->first_dma = buf_addr; + return 0; + } + + pool = hdev->prp_page_pool; + mapbuf->page_cnt = 1; + + phy_regpage = dma_pool_alloc(pool, GFP_ATOMIC, &buffer_phy); + if (!phy_regpage) { + dev_err_ratelimited(hdev->dev, "allocate first admin prp_list memory failed\n"); + mapbuf->first_dma = buf_addr; + mapbuf->page_cnt = -1; + return -ENOMEM; + } + list[0] = phy_regpage; + mapbuf->first_dma = buffer_phy; + i = 0; + for (;;) { + if (i == page_size / PRP_ENTRY_SIZE) { + prior_list = phy_regpage; + + phy_regpage = dma_pool_alloc(pool, GFP_ATOMIC, + &buffer_phy); + if (!phy_regpage) { + dev_err_ratelimited(hdev->dev, "allocate [%d]th admin prp list memory failed\n", + mapbuf->page_cnt + 1); + return -ENOMEM; + } + list[mapbuf->page_cnt++] = phy_regpage; + phy_regpage[0] = prior_list[i - 1]; + prior_list[i - 1] = cpu_to_le64(buffer_phy); + i = 1; + } + phy_regpage[i++] = cpu_to_le64(buf_addr); + buf_addr += page_size; + buf_length -= page_size; + maplen -= page_size; + if (maplen <= 0) + break; + if (buf_length > 0) + continue; + if (unlikely(buf_length < 0)) + goto bad_admin_sgl; + sg = sg_next(sg); + buf_addr = sg_dma_address(sg); + buf_length = sg_dma_len(sg); + } + + return 0; + +bad_admin_sgl: + dev_err(hdev->dev, "setup prps, invalid admin SGL for payload[%d] nents[%d]\n", + mapbuf->len, mapbuf->sge_cnt); + return -EIO; +} + +static int hiraid_build_prp(struct hiraid_dev *hdev, + struct hiraid_mapmange *mapbuf) +{ + struct scatterlist *sg = mapbuf->sgl; + __le64 *phy_regpage, *prior_list; + u64 buf_addr = sg_dma_address(sg); + int buf_length = sg_dma_len(sg); + u32 page_size = hdev->page_size; + int offset = buf_addr & (page_size - 1); + void **list = hiraid_mapbuf_list(mapbuf); + int maplen = mapbuf->len; + struct dma_pool *pool; + dma_addr_t buffer_phy; + int nprps, i; + + maplen -= (page_size - offset); + if (maplen <= 0) { + mapbuf->first_dma = 0; + return 0; + } + + buf_length -= (page_size - offset); + if (buf_length) { + buf_addr += (page_size - offset); + } else { + sg = sg_next(sg); + buf_addr = sg_dma_address(sg); + buf_length = sg_dma_len(sg); + } + + if (maplen <= page_size) { + mapbuf->first_dma = buf_addr; + return 0; + } + + nprps = DIV_ROUND_UP(maplen, page_size); + if (nprps <= (EXTRA_POOL_SIZE / PRP_ENTRY_SIZE)) { + pool = mapbuf->hiraidq->prp_small_pool; + mapbuf->page_cnt = 0; + } else { + pool = hdev->prp_page_pool; + mapbuf->page_cnt = 1; + } + + phy_regpage = dma_pool_alloc(pool, GFP_ATOMIC, &buffer_phy); + if (!phy_regpage) { + dev_err_ratelimited(hdev->dev, "allocate first prp_list memory failed\n"); + mapbuf->first_dma = buf_addr; + mapbuf->page_cnt = -1; + return -ENOMEM; + } + list[0] = phy_regpage; + mapbuf->first_dma = buffer_phy; + i = 0; + for (;;) { + if (i == page_size / PRP_ENTRY_SIZE) { + prior_list = phy_regpage; + + phy_regpage = dma_pool_alloc(pool, GFP_ATOMIC, + &buffer_phy); + if (!phy_regpage) { + dev_err_ratelimited(hdev->dev, "allocate [%d]th prp list memory failed\n", + mapbuf->page_cnt + 1); + return -ENOMEM; + } + list[mapbuf->page_cnt++] = phy_regpage; + phy_regpage[0] = prior_list[i - 1]; + prior_list[i - 1] = cpu_to_le64(buffer_phy); + i = 1; + } + phy_regpage[i++] = cpu_to_le64(buf_addr); + buf_addr += page_size; + buf_length -= page_size; + maplen -= page_size; + if (maplen <= 0) + break; + if (buf_length > 0) + continue; + if (unlikely(buf_length < 0)) + goto bad_sgl; + sg = sg_next(sg); + buf_addr = sg_dma_address(sg); + buf_length = sg_dma_len(sg); + } + + return 0; + +bad_sgl: + dev_err(hdev->dev, "setup prps, invalid SGL for payload[%d] nents[%d]\n", + mapbuf->len, mapbuf->sge_cnt); + return -EIO; +} + +#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct hiraid_sgl_desc)) + +static void hiraid_submit_cmd(struct hiraid_queue *hiraidq, const void *cmd) +{ + u32 sqes = SQE_SIZE(hiraidq->qid); + unsigned long flags; + struct hiraid_admin_com_cmd *acd = (struct hiraid_admin_com_cmd *)cmd; + + spin_lock_irqsave(&hiraidq->sq_lock, flags); + memcpy((hiraidq->sq_cmds + sqes * hiraidq->sq_tail), cmd, sqes); + if (++hiraidq->sq_tail == hiraidq->q_depth) + hiraidq->sq_tail = 0; + + writel(hiraidq->sq_tail, hiraidq->q_db); + spin_unlock_irqrestore(&hiraidq->sq_lock, flags); + + dev_log_dbg(hiraidq->hdev->dev, "cid[%d] qid[%d] opcode[0x%x] flags[0x%x] hdid[%u]\n", + le16_to_cpu(acd->cmd_id), hiraidq->qid, acd->opcode, + acd->flags, le32_to_cpu(acd->hdid)); +} + +static inline bool hiraid_is_rw_scmd(struct scsi_cmnd *scmd) +{ + switch (scmd->cmnd[0]) { + case READ_6: + case READ_10: + case READ_12: + case READ_16: + case WRITE_6: + case WRITE_10: + case WRITE_12: + case WRITE_16: + return true; + default: + return false; + } +} + +/* + * checks if prps can be built for the IO cmd + */ +static bool hiraid_is_prp(struct hiraid_dev *hdev, + struct scatterlist *sgl, u32 nsge) +{ + struct scatterlist *sg = sgl; + u32 page_mask = hdev->page_size - 1; + bool is_prp = true; + u32 i = 0; + + for_each_sg(sgl, sg, nsge, i) { + /* + * Data length of the middle sge multiple of page_size, + * address page_size aligned. + */ + if (i != 0 && i != nsge - 1) { + if ((sg_dma_len(sg) & page_mask) || + (sg_dma_address(sg) & page_mask)) { + is_prp = false; + break; + } + } + + /* + * The first sge addr plus the data length meets + * the page_size alignment. + */ + if (nsge > 1 && i == 0) { + if ((sg_dma_address(sg) + sg_dma_len(sg)) & page_mask) { + is_prp = false; + break; + } + } + + /* The last sge addr meets the page_size alignment. */ + if (nsge > 1 && i == (nsge - 1)) { + if (sg_dma_address(sg) & page_mask) { + is_prp = false; + break; + } + } + } + + return is_prp; +} + +enum { + HIRAID_SGL_FMT_DATA_DESC = 0x00, + HIRAID_SGL_FMT_SEG_DESC = 0x02, + HIRAID_SGL_FMT_LAST_SEG_DESC = 0x03, + HIRAID_KEY_SGL_FMT_DATA_DESC = 0x04, + HIRAID_TRANSPORT_SGL_DATA_DESC = 0x05 +}; + +static void hiraid_sgl_set_data(struct hiraid_sgl_desc *sge, + struct scatterlist *sg) +{ + sge->addr = cpu_to_le64(sg_dma_address(sg)); + sge->length = cpu_to_le32(sg_dma_len(sg)); + sge->type = HIRAID_SGL_FMT_DATA_DESC << 4; +} + +static void hiraid_sgl_set_seg(struct hiraid_sgl_desc *sge, + dma_addr_t buffer_phy, int entries) +{ + sge->addr = cpu_to_le64(buffer_phy); + if (entries <= SGES_PER_PAGE) { + sge->length = cpu_to_le32(entries * sizeof(*sge)); + sge->type = HIRAID_SGL_FMT_LAST_SEG_DESC << 4; + } else { + sge->length = cpu_to_le32(PAGE_SIZE); + sge->type = HIRAID_SGL_FMT_SEG_DESC << 4; + } +} + +static int hiraid_build_passthru_sgl(struct hiraid_dev *hdev, + struct hiraid_admin_command *admin_cmd, + struct hiraid_mapmange *mapbuf) +{ + struct hiraid_sgl_desc *sg_list, *link, *old_sg_list; + struct scatterlist *sg = mapbuf->sgl; + void **list = hiraid_mapbuf_list(mapbuf); + struct dma_pool *pool; + int nsge = mapbuf->sge_cnt; + dma_addr_t buffer_phy; + int i = 0; + + admin_cmd->common.flags |= SQE_FLAG_SGL_METABUF; + + if (nsge == 1) { + hiraid_sgl_set_data(&admin_cmd->common.dptr.sgl, sg); + return 0; + } + + pool = hdev->prp_page_pool; + mapbuf->page_cnt = 1; + + sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &buffer_phy); + if (!sg_list) { + dev_err_ratelimited(hdev->dev, "allocate first admin sgl_list failed\n"); + mapbuf->page_cnt = -1; + return -ENOMEM; + } + + list[0] = sg_list; + mapbuf->first_dma = buffer_phy; + hiraid_sgl_set_seg(&admin_cmd->common.dptr.sgl, buffer_phy, nsge); + do { + if (i == SGES_PER_PAGE) { + old_sg_list = sg_list; + link = &old_sg_list[SGES_PER_PAGE - 1]; + + sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &buffer_phy); + if (!sg_list) { + dev_err_ratelimited(hdev->dev, "allocate [%d]th admin sgl_list failed\n", + mapbuf->page_cnt + 1); + return -ENOMEM; + } + list[mapbuf->page_cnt++] = sg_list; + + i = 0; + memcpy(&sg_list[i++], link, sizeof(*link)); + hiraid_sgl_set_seg(link, buffer_phy, nsge); + } + + hiraid_sgl_set_data(&sg_list[i++], sg); + sg = sg_next(sg); + } while (--nsge > 0); + + return 0; +} + + +static int hiraid_build_sgl(struct hiraid_dev *hdev, + struct hiraid_scsi_io_cmd *io_cmd, + struct hiraid_mapmange *mapbuf) +{ + struct hiraid_sgl_desc *sg_list, *link, *old_sg_list; + struct scatterlist *sg = mapbuf->sgl; + void **list = hiraid_mapbuf_list(mapbuf); + struct dma_pool *pool; + int nsge = mapbuf->sge_cnt; + dma_addr_t buffer_phy; + int i = 0; + + io_cmd->common.flags |= SQE_FLAG_SGL_METABUF; + + if (nsge == 1) { + hiraid_sgl_set_data(&io_cmd->common.dptr.sgl, sg); + return 0; + } + + if (nsge <= (EXTRA_POOL_SIZE / sizeof(struct hiraid_sgl_desc))) { + pool = mapbuf->hiraidq->prp_small_pool; + mapbuf->page_cnt = 0; + } else { + pool = hdev->prp_page_pool; + mapbuf->page_cnt = 1; + } + + sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &buffer_phy); + if (!sg_list) { + dev_err_ratelimited(hdev->dev, "allocate first sgl_list failed\n"); + mapbuf->page_cnt = -1; + return -ENOMEM; + } + + list[0] = sg_list; + mapbuf->first_dma = buffer_phy; + hiraid_sgl_set_seg(&io_cmd->common.dptr.sgl, buffer_phy, nsge); + do { + if (i == SGES_PER_PAGE) { + old_sg_list = sg_list; + link = &old_sg_list[SGES_PER_PAGE - 1]; + + sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &buffer_phy); + if (!sg_list) { + dev_err_ratelimited(hdev->dev, "allocate [%d]th sgl_list failed\n", + mapbuf->page_cnt + 1); + return -ENOMEM; + } + list[mapbuf->page_cnt++] = sg_list; + + i = 0; + memcpy(&sg_list[i++], link, sizeof(*link)); + hiraid_sgl_set_seg(link, buffer_phy, nsge); + } + + hiraid_sgl_set_data(&sg_list[i++], sg); + sg = sg_next(sg); + } while (--nsge > 0); + + return 0; +} + +#define HIRAID_RW_FUA BIT(14) + +static int hiraid_setup_rw_cmd(struct hiraid_dev *hdev, + struct hiraid_scsi_rw_cmd *io_cmd, + struct scsi_cmnd *scmd) +{ + u32 start_lba_lo, start_lba_hi; + u32 datalength = 0; + u16 control = 0; + + start_lba_lo = 0; + start_lba_hi = 0; + + if (scmd->sc_data_direction == DMA_TO_DEVICE) { + io_cmd->opcode = HIRAID_CMD_WRITE; + } else if (scmd->sc_data_direction == DMA_FROM_DEVICE) { + io_cmd->opcode = HIRAID_CMD_READ; + } else { + dev_err(hdev->dev, "invalid RW_IO for unsupported data direction[%d]\n", + scmd->sc_data_direction); + WARN_ON(1); + return -EINVAL; + } + + /* 6-byte READ(0x08) or WRITE(0x0A) cdb */ + if (scmd->cmd_len == 6) { + datalength = (u32)(scmd->cmnd[4] == 0 ? + IO_6_DEFAULT_TX_LEN : scmd->cmnd[4]); + start_lba_lo = (u32)get_unaligned_be24(&scmd->cmnd[1]); + + start_lba_lo &= 0x1FFFFF; + } + + /* 10-byte READ(0x28) or WRITE(0x2A) cdb */ + else if (scmd->cmd_len == 10) { + datalength = (u32)get_unaligned_be16(&scmd->cmnd[7]); + start_lba_lo = get_unaligned_be32(&scmd->cmnd[2]); + + if (scmd->cmnd[1] & FUA_MASK) + control |= HIRAID_RW_FUA; + } + + /* 12-byte READ(0xA8) or WRITE(0xAA) cdb */ + else if (scmd->cmd_len == 12) { + datalength = get_unaligned_be32(&scmd->cmnd[6]); + start_lba_lo = get_unaligned_be32(&scmd->cmnd[2]); + + if (scmd->cmnd[1] & FUA_MASK) + control |= HIRAID_RW_FUA; + } + /* 16-byte READ(0x88) or WRITE(0x8A) cdb */ + else if (scmd->cmd_len == 16) { + datalength = get_unaligned_be32(&scmd->cmnd[10]); + start_lba_lo = get_unaligned_be32(&scmd->cmnd[6]); + start_lba_hi = get_unaligned_be32(&scmd->cmnd[2]); + + if (scmd->cmnd[1] & FUA_MASK) + control |= HIRAID_RW_FUA; + } + + if (unlikely(datalength > U16_MAX || datalength == 0)) { + dev_err(hdev->dev, "invalid IO for err trans data length[%u]\n", + datalength); + WARN_ON(1); + return -EINVAL; + } + + io_cmd->slba = cpu_to_le64(((u64)start_lba_hi << 32) | start_lba_lo); + /* 0base for nlb */ + io_cmd->nlb = cpu_to_le16((u16)(datalength - 1)); + io_cmd->control = cpu_to_le16(control); + + return 0; +} + +static int hiraid_setup_nonrw_cmd(struct hiraid_dev *hdev, + struct hiraid_scsi_nonrw_cmd *io_cmd, + struct scsi_cmnd *scmd) +{ + io_cmd->buf_len = cpu_to_le32(scsi_bufflen(scmd)); + + switch (scmd->sc_data_direction) { + case DMA_NONE: + io_cmd->opcode = HIRAID_CMD_NONRW_NONE; + break; + case DMA_TO_DEVICE: + io_cmd->opcode = HIRAID_CMD_NONRW_TODEV; + break; + case DMA_FROM_DEVICE: + io_cmd->opcode = HIRAID_CMD_NONRW_FROMDEV; + break; + default: + dev_err(hdev->dev, "invalid NON_IO for unsupported data direction[%d]\n", + scmd->sc_data_direction); + WARN_ON(1); + return -EINVAL; + } + + return 0; +} + +static int hiraid_setup_io_cmd(struct hiraid_dev *hdev, + struct hiraid_scsi_io_cmd *io_cmd, + struct scsi_cmnd *scmd) +{ + memcpy(io_cmd->common.cdb, scmd->cmnd, scmd->cmd_len); + io_cmd->common.cdb_len = scmd->cmd_len; + + if (hiraid_is_rw_scmd(scmd)) + return hiraid_setup_rw_cmd(hdev, &io_cmd->rw, scmd); + else + return hiraid_setup_nonrw_cmd(hdev, &io_cmd->nonrw, scmd); +} + +static inline void hiraid_init_mapbuff(struct hiraid_mapmange *mapbuf) +{ + mapbuf->sge_cnt = 0; + mapbuf->page_cnt = -1; + mapbuf->use_sgl = false; + WRITE_ONCE(mapbuf->state, CMD_IDLE); +} + +static void hiraid_free_mapbuf(struct hiraid_dev *hdev, + struct hiraid_mapmange *mapbuf) +{ + const int last_prp = hdev->page_size / sizeof(__le64) - 1; + dma_addr_t buffer_phy, next_buffer_phy; + struct hiraid_sgl_desc *sg_list; + __le64 *prp_list; + void *addr; + int i; + + buffer_phy = mapbuf->first_dma; + if (mapbuf->page_cnt == 0) + dma_pool_free(mapbuf->hiraidq->prp_small_pool, + hiraid_mapbuf_list(mapbuf)[0], buffer_phy); + + for (i = 0; i < mapbuf->page_cnt; i++) { + addr = hiraid_mapbuf_list(mapbuf)[i]; + + if (mapbuf->use_sgl) { + sg_list = addr; + next_buffer_phy = + le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr); + } else { + prp_list = addr; + next_buffer_phy = le64_to_cpu(prp_list[last_prp]); + } + + dma_pool_free(hdev->prp_page_pool, addr, buffer_phy); + buffer_phy = next_buffer_phy; + } + + mapbuf->sense_buffer_virt = NULL; + mapbuf->page_cnt = -1; +} + +static int hiraid_io_map_data(struct hiraid_dev *hdev, + struct hiraid_mapmange *mapbuf, + struct scsi_cmnd *scmd, + struct hiraid_scsi_io_cmd *io_cmd) +{ + int ret; + + ret = scsi_dma_map(scmd); + if (unlikely(ret < 0)) + return ret; + mapbuf->sge_cnt = ret; + + /* No data to DMA, it may be scsi no-rw command */ + if (unlikely(mapbuf->sge_cnt == 0)) + return 0; + + mapbuf->len = scsi_bufflen(scmd); + mapbuf->sgl = scsi_sglist(scmd); + mapbuf->use_sgl = !hiraid_is_prp(hdev, mapbuf->sgl, mapbuf->sge_cnt); + + if (mapbuf->use_sgl) { + ret = hiraid_build_sgl(hdev, io_cmd, mapbuf); + } else { + ret = hiraid_build_prp(hdev, mapbuf); + io_cmd->common.dptr.prp1 = + cpu_to_le64(sg_dma_address(mapbuf->sgl)); + io_cmd->common.dptr.prp2 = cpu_to_le64(mapbuf->first_dma); + } + + if (ret) + scsi_dma_unmap(scmd); + + return ret; +} + +static void hiraid_check_status(struct hiraid_mapmange *mapbuf, + struct scsi_cmnd *scmd, + struct hiraid_completion *cqe) +{ + scsi_set_resid(scmd, 0); + + switch ((le16_to_cpu(cqe->status) >> 1) & 0x7f) { + case SENSE_STATE_OK: + set_host_byte(scmd, DID_OK); + break; + case SENSE_STATE_NEED_CHECK: + set_host_byte(scmd, DID_OK); + scmd->result |= le16_to_cpu(cqe->status) >> 8; + if (scmd->result & SAM_STAT_CHECK_CONDITION) { + memset(scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); + memcpy(scmd->sense_buffer, + mapbuf->sense_buffer_virt, + SCSI_SENSE_BUFFERSIZE); + scmd->result = (scmd->result & 0x00ffffff) | + (DRIVER_SENSE << 24); + } + break; + case SENSE_STATE_ABORTED: + set_host_byte(scmd, DID_ABORT); + break; + case SENSE_STATE_NEED_RETRY: + set_host_byte(scmd, DID_REQUEUE); + break; + default: + set_host_byte(scmd, DID_BAD_TARGET); + dev_warn_ratelimited(mapbuf->hiraidq->hdev->dev, "cid[%d] qid[%d] sdev[%d:%d] opcode[%.2x] bad status[0x%x]\n", + le16_to_cpu(cqe->cmd_id), le16_to_cpu(cqe->sq_id), + scmd->device->channel, scmd->device->id, + scmd->cmnd[0], le16_to_cpu(cqe->status)); + break; + } +} + +static inline void hiraid_query_scmd_tag(struct scsi_cmnd *scmd, u16 *qid, + u16 *cid, struct hiraid_dev *hdev, + struct hiraid_sdev_hostdata *hostdata) +{ + u32 tag = blk_mq_unique_tag(blk_mq_rq_from_pdu((void *)scmd)); + + if (work_mode) { + if ((hdev->hdd_dispatch == DISPATCH_BY_DISK) && + (hostdata->hwq != 0)) + *qid = hostdata->hwq; + else + *qid = raw_smp_processor_id() % + (hdev->online_queues - 1) + 1; + } else { + *qid = blk_mq_unique_tag_to_hwq(tag) + 1; + } + *cid = blk_mq_unique_tag_to_tag(tag); +} + +static int hiraid_queue_command(struct Scsi_Host *shost, + struct scsi_cmnd *scmd) +{ + struct hiraid_mapmange *mapbuf = scsi_cmd_priv(scmd); + struct hiraid_dev *hdev = shost_priv(shost); + struct scsi_device *sdev = scmd->device; + struct hiraid_sdev_hostdata *hostdata; + struct hiraid_scsi_io_cmd io_cmd; + struct hiraid_queue *ioq; + u16 hwq, cid; + int ret; + + if (unlikely(hdev->state == DEV_RESETTING)) + return SCSI_MLQUEUE_HOST_BUSY; + + if (unlikely(hdev->state != DEV_LIVE)) { + set_host_byte(scmd, DID_NO_CONNECT); + scmd->scsi_done(scmd); + return 0; + } + + if (log_debug_switch) + scsi_print_command(scmd); + + hostdata = sdev->hostdata; + hiraid_query_scmd_tag(scmd, &hwq, &cid, hdev, hostdata); + ioq = &hdev->queues[hwq]; + + if (unlikely(atomic_inc_return(&ioq->inflight) > + (hdev->ioq_depth - HIRAID_PTHRU_CMDS_PERQ))) { + atomic_dec(&ioq->inflight); + return SCSI_MLQUEUE_HOST_BUSY; + } + + memset(&io_cmd, 0, sizeof(io_cmd)); + io_cmd.rw.hdid = cpu_to_le32(hostdata->hdid); + io_cmd.rw.cmd_id = cpu_to_le16(cid); + + ret = hiraid_setup_io_cmd(hdev, &io_cmd, scmd); + if (unlikely(ret)) { + set_host_byte(scmd, DID_ERROR); + scmd->scsi_done(scmd); + atomic_dec(&ioq->inflight); + return 0; + } + + ret = cid * SCSI_SENSE_BUFFERSIZE; + if (work_mode) { + mapbuf->sense_buffer_virt = hdev->sense_buffer_virt + ret; + mapbuf->sense_buffer_phy = hdev->sense_buffer_phy + ret; + } else { + mapbuf->sense_buffer_virt = ioq->sense_buffer_virt + ret; + mapbuf->sense_buffer_phy = ioq->sense_buffer_phy + ret; + } + io_cmd.common.sense_addr = cpu_to_le64(mapbuf->sense_buffer_phy); + io_cmd.common.sense_len = cpu_to_le16(SCSI_SENSE_BUFFERSIZE); + + hiraid_init_mapbuff(mapbuf); + + mapbuf->hiraidq = ioq; + mapbuf->cid = cid; + ret = hiraid_io_map_data(hdev, mapbuf, scmd, &io_cmd); + if (unlikely(ret)) { + dev_err(hdev->dev, "io map data err\n"); + set_host_byte(scmd, DID_ERROR); + scmd->scsi_done(scmd); + ret = 0; + goto deinit_iobuf; + } + + WRITE_ONCE(mapbuf->state, CMD_FLIGHT); + hiraid_submit_cmd(ioq, &io_cmd); + + return 0; + +deinit_iobuf: + atomic_dec(&ioq->inflight); + hiraid_free_mapbuf(hdev, mapbuf); + return ret; +} + +static int hiraid_match_dev(struct hiraid_dev *hdev, u16 idx, + struct scsi_device *sdev) +{ + if (HIRAID_DEV_INFO_FLAG_VALID(hdev->dev_info[idx].flag)) { + if (sdev->channel == hdev->dev_info[idx].channel && + sdev->id == le16_to_cpu(hdev->dev_info[idx].target) && + sdev->lun < hdev->dev_info[idx].lun) { + dev_info(hdev->dev, "match device success, channel:target:lun[%d:%d:%d]\n", + hdev->dev_info[idx].channel, + hdev->dev_info[idx].target, + hdev->dev_info[idx].lun); + return 1; + } + } + + return 0; +} + +static int hiraid_disk_qd(u8 attr) +{ + switch (HIRAID_DEV_DISK_TYPE(attr)) { + case HIRAID_SAS_HDD_VD: + case HIRAID_SATA_HDD_VD: + return HIRAID_HDD_VD_QD; + case HIRAID_SAS_SSD_VD: + case HIRAID_SATA_SSD_VD: + case HIRAID_NVME_SSD_VD: + return HIRAID_SSD_VD_QD; + case HIRAID_SAS_HDD_PD: + case HIRAID_SATA_HDD_PD: + return HIRAID_HDD_PD_QD; + case HIRAID_SAS_SSD_PD: + case HIRAID_SATA_SSD_PD: + case HIRAID_NVME_SSD_PD: + return HIRAID_SSD_PD_QD; + default: + return MAX_CMD_PER_DEV; + } +} + +static bool hiraid_disk_is_hdd(u8 attr) +{ + switch (HIRAID_DEV_DISK_TYPE(attr)) { + case HIRAID_SAS_HDD_VD: + case HIRAID_SATA_HDD_VD: + case HIRAID_SAS_HDD_PD: + case HIRAID_SATA_HDD_PD: + return true; + default: + return false; + } +} + +static int hiraid_slave_alloc(struct scsi_device *sdev) +{ + struct hiraid_sdev_hostdata *hostdata; + struct hiraid_dev *hdev; + u16 idx; + + hdev = shost_priv(sdev->host); + hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL); + if (!hostdata) { + dev_err(hdev->dev, "alloc scsi host data memory failed\n"); + return -ENOMEM; + } + + down_read(&hdev->dev_rwsem); + for (idx = 0; idx < le32_to_cpu(hdev->ctrl_info->nd); idx++) { + if (hiraid_match_dev(hdev, idx, sdev)) + goto scan_host; + } + up_read(&hdev->dev_rwsem); + + kfree(hostdata); + return -ENXIO; + +scan_host: + hostdata->hdid = le32_to_cpu(hdev->dev_info[idx].hdid); + hostdata->max_io_kb = le16_to_cpu(hdev->dev_info[idx].max_io_kb); + hostdata->attr = hdev->dev_info[idx].attr; + hostdata->flag = hdev->dev_info[idx].flag; + hostdata->rg_id = 0xff; + sdev->hostdata = hostdata; + up_read(&hdev->dev_rwsem); + return 0; +} + +static void hiraid_slave_destroy(struct scsi_device *sdev) +{ + kfree(sdev->hostdata); + sdev->hostdata = NULL; +} + +static int hiraid_slave_configure(struct scsi_device *sdev) +{ + unsigned int timeout = scmd_tmout_rawdisk * HZ; + struct hiraid_dev *hdev = shost_priv(sdev->host); + struct hiraid_sdev_hostdata *hostdata = sdev->hostdata; + u32 max_sec = sdev->host->max_sectors; + int qd = MAX_CMD_PER_DEV; + + if (hostdata) { + if (HIRAID_DEV_INFO_ATTR_VD(hostdata->attr)) + timeout = scmd_tmout_vd * HZ; + else if (HIRAID_DEV_INFO_ATTR_RAWDISK(hostdata->attr)) + timeout = scmd_tmout_rawdisk * HZ; + max_sec = hostdata->max_io_kb << 1; + qd = hiraid_disk_qd(hostdata->attr); + + if (hiraid_disk_is_hdd(hostdata->attr)) + hostdata->hwq = hostdata->hdid % + (hdev->online_queues - 1) + 1; + else + hostdata->hwq = 0; + } else { + dev_err(hdev->dev, "err, sdev->hostdata is null\n"); + } + + blk_queue_rq_timeout(sdev->request_queue, timeout); + sdev->eh_timeout = timeout; + scsi_change_queue_depth(sdev, qd); + + if ((max_sec == 0) || (max_sec > sdev->host->max_sectors)) + max_sec = sdev->host->max_sectors; + + if (!max_io_force) + blk_queue_max_hw_sectors(sdev->request_queue, max_sec); + + dev_info(hdev->dev, "sdev->channel:id:lun[%d:%d:%lld] scmd_timeout[%d]s maxsec[%d]\n", + sdev->channel, sdev->id, sdev->lun, timeout / HZ, max_sec); + + return 0; +} + +static void hiraid_shost_init(struct hiraid_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + u8 domain, bus; + u32 dev_func; + + domain = pci_domain_nr(pdev->bus); + bus = pdev->bus->number; + dev_func = pdev->devfn; + + hdev->shost->nr_hw_queues = work_mode ? 1 : hdev->online_queues - 1; + hdev->shost->can_queue = hdev->scsi_qd; + + hdev->shost->sg_tablesize = le16_to_cpu(hdev->ctrl_info->max_num_sge); + /* 512B per sector */ + hdev->shost->max_sectors = + (1U << ((hdev->ctrl_info->mdts) * 1U) << 12) / 512; + hdev->shost->cmd_per_lun = MAX_CMD_PER_DEV; + hdev->shost->max_channel = + le16_to_cpu(hdev->ctrl_info->max_channel) - 1; + hdev->shost->max_id = le32_to_cpu(hdev->ctrl_info->max_tgt_id); + hdev->shost->max_lun = le16_to_cpu(hdev->ctrl_info->max_lun); + + hdev->shost->this_id = -1; + hdev->shost->unique_id = (domain << 16) | (bus << 8) | dev_func; + hdev->shost->max_cmd_len = MAX_CDB_LEN; + hdev->shost->hostt->cmd_size = hiraid_get_max_cmd_size(hdev); +} + +static int hiraid_alloc_queue(struct hiraid_dev *hdev, u16 qid, u16 depth) +{ + struct hiraid_queue *hiraidq = &hdev->queues[qid]; + int ret = 0; + + if (hdev->queue_count > qid) { + dev_info(hdev->dev, "warn: queue[%d] is exist\n", qid); + return 0; + } + + hiraidq->cqes = dma_alloc_coherent(hdev->dev, CQ_SIZE(depth), + &hiraidq->cq_buffer_phy, + GFP_KERNEL | __GFP_ZERO); + if (!hiraidq->cqes) + return -ENOMEM; + + hiraidq->sq_cmds = dma_alloc_coherent(hdev->dev, SQ_SIZE(qid, depth), + &hiraidq->sq_buffer_phy, GFP_KERNEL); + if (!hiraidq->sq_cmds) { + ret = -ENOMEM; + goto free_cqes; + } + + /* + * single hwqueue, we donot need to alloc sense buffer for every queue, + * we have alloced all on hiraid_alloc_resources. + */ + if (work_mode) + goto initq; + + /* alloc sense buffer */ + hiraidq->sense_buffer_virt = dma_alloc_coherent(hdev->dev, + SENSE_SIZE(depth), + &hiraidq->sense_buffer_phy, + GFP_KERNEL | __GFP_ZERO); + if (!hiraidq->sense_buffer_virt) { + ret = -ENOMEM; + goto free_sq_cmds; + } + +initq: + spin_lock_init(&hiraidq->sq_lock); + spin_lock_init(&hiraidq->cq_lock); + hiraidq->hdev = hdev; + hiraidq->q_depth = depth; + hiraidq->qid = qid; + hiraidq->cq_vector = -1; + hdev->queue_count++; + + return 0; + +free_sq_cmds: + dma_free_coherent(hdev->dev, SQ_SIZE(qid, depth), + (void *)hiraidq->sq_cmds, + hiraidq->sq_buffer_phy); +free_cqes: + dma_free_coherent(hdev->dev, CQ_SIZE(depth), (void *)hiraidq->cqes, + hiraidq->cq_buffer_phy); + return ret; +} + +static int hiraid_wait_control_ready(struct hiraid_dev *hdev, + u64 cap, bool enabled) +{ + unsigned long timeout = + ((HIRAID_CAP_TIMEOUT(cap) + 1) * HIRAID_CAP_TIMEOUT_UNIT_MS) + jiffies; + u32 bit = enabled ? HIRAID_CSTS_RDY : 0; + + while ((readl(hdev->bar + HIRAID_REG_CSTS) & HIRAID_CSTS_RDY) != bit) { + usleep_range(1000, 2000); + if (fatal_signal_pending(current)) + return -EINTR; + + if (time_after(jiffies, timeout)) { + dev_err(hdev->dev, "device not ready; aborting %s\n", + enabled ? "initialisation" : "reset"); + return -ENODEV; + } + } + return 0; +} + +static int hiraid_shutdown_control(struct hiraid_dev *hdev) +{ + unsigned long timeout = + le32_to_cpu(hdev->ctrl_info->rtd3e) / 1000000 * HZ + jiffies; + + hdev->ctrl_config &= ~HIRAID_CC_SHN_MASK; + hdev->ctrl_config |= HIRAID_CC_SHN_NORMAL; + writel(hdev->ctrl_config, hdev->bar + HIRAID_REG_CC); + + while ((readl(hdev->bar + HIRAID_REG_CSTS) & HIRAID_CSTS_SHST_MASK) != + HIRAID_CSTS_SHST_CMPLT) { + msleep(100); + if (fatal_signal_pending(current)) + return -EINTR; + if (time_after(jiffies, timeout)) { + dev_err(hdev->dev, "device shutdown incomplete, abort shutdown\n"); + return -ENODEV; + } + } + return 0; +} + +static int hiraid_disable_control(struct hiraid_dev *hdev) +{ + hdev->ctrl_config &= ~HIRAID_CC_SHN_MASK; + hdev->ctrl_config &= ~HIRAID_CC_ENABLE; + writel(hdev->ctrl_config, hdev->bar + HIRAID_REG_CC); + + return hiraid_wait_control_ready(hdev, hdev->cap, false); +} + +static int hiraid_enable_control(struct hiraid_dev *hdev) +{ + u64 cap = hdev->cap; + u32 dev_page_min = HIRAID_CAP_MPSMIN(cap) + 12; + u32 page_shift = PAGE_SHIFT; + + if (page_shift < dev_page_min) { + dev_err(hdev->dev, "minimum device page size[%u], too large for host[%u]\n", + 1U << dev_page_min, 1U << page_shift); + return -ENODEV; + } + + page_shift = + min_t(unsigned int, HIRAID_CAP_MPSMAX(cap) + 12, PAGE_SHIFT); + hdev->page_size = 1U << page_shift; + + hdev->ctrl_config = HIRAID_CC_CSS_NVM; + hdev->ctrl_config |= (page_shift - 12) << HIRAID_CC_MPS_SHIFT; + hdev->ctrl_config |= HIRAID_CC_AMS_RR | HIRAID_CC_SHN_NONE; + hdev->ctrl_config |= HIRAID_CC_IOSQES | HIRAID_CC_IOCQES; + hdev->ctrl_config |= HIRAID_CC_ENABLE; + writel(hdev->ctrl_config, hdev->bar + HIRAID_REG_CC); + + return hiraid_wait_control_ready(hdev, cap, true); +} + +static void hiraid_init_queue(struct hiraid_queue *hiraidq, u16 qid) +{ + struct hiraid_dev *hdev = hiraidq->hdev; + + memset((void *)hiraidq->cqes, 0, CQ_SIZE(hiraidq->q_depth)); + + hiraidq->sq_tail = 0; + hiraidq->cq_head = 0; + hiraidq->cq_phase = 1; + hiraidq->q_db = &hdev->dbs[qid * 2 * hdev->db_stride]; + hiraidq->prp_small_pool = hdev->prp_extra_pool[qid % extra_pool_num]; + hdev->online_queues++; + atomic_set(&hiraidq->inflight, 0); +} + +static inline bool hiraid_cqe_pending(struct hiraid_queue *hiraidq) +{ + return (le16_to_cpu(hiraidq->cqes[hiraidq->cq_head].status) & 1) == + hiraidq->cq_phase; +} + +static void hiraid_complete_io_cmnd(struct hiraid_queue *ioq, + struct hiraid_completion *cqe) +{ + struct hiraid_dev *hdev = ioq->hdev; + struct blk_mq_tags *tags; + struct scsi_cmnd *scmd; + struct hiraid_mapmange *mapbuf; + struct request *req; + unsigned long elapsed; + + atomic_dec(&ioq->inflight); + + if (work_mode) + tags = hdev->shost->tag_set.tags[0]; + else + tags = hdev->shost->tag_set.tags[ioq->qid - 1]; + req = blk_mq_tag_to_rq(tags, le16_to_cpu(cqe->cmd_id)); + if (unlikely(!req || !blk_mq_request_started(req))) { + dev_warn(hdev->dev, "invalid id[%d] completed on queue[%d]\n", + le16_to_cpu(cqe->cmd_id), ioq->qid); + return; + } + + scmd = blk_mq_rq_to_pdu(req); + mapbuf = scsi_cmd_priv(scmd); + + elapsed = jiffies - scmd->jiffies_at_alloc; + dev_log_dbg(hdev->dev, "cid[%d] qid[%d] finish IO cost %3ld.%3ld seconds\n", + le16_to_cpu(cqe->cmd_id), ioq->qid, elapsed / HZ, elapsed % HZ); + + if (cmpxchg(&mapbuf->state, CMD_FLIGHT, CMD_COMPLETE) != CMD_FLIGHT) { + dev_warn(hdev->dev, "cid[%d] qid[%d] enters abnormal handler, cost %3ld.%3ld seconds\n", + le16_to_cpu(cqe->cmd_id), ioq->qid, + elapsed / HZ, elapsed % HZ); + WRITE_ONCE(mapbuf->state, CMD_TMO_COMPLETE); + + if (mapbuf->sge_cnt) { + mapbuf->sge_cnt = 0; + scsi_dma_unmap(scmd); + } + hiraid_free_mapbuf(hdev, mapbuf); + + return; + } + + hiraid_check_status(mapbuf, scmd, cqe); + if (mapbuf->sge_cnt) { + mapbuf->sge_cnt = 0; + scsi_dma_unmap(scmd); + } + hiraid_free_mapbuf(hdev, mapbuf); + scmd->scsi_done(scmd); +} + +static void hiraid_complete_admin_cmnd(struct hiraid_queue *adminq, + struct hiraid_completion *cqe) +{ + struct hiraid_dev *hdev = adminq->hdev; + struct hiraid_cmd *adm_cmd; + + adm_cmd = hdev->adm_cmds + le16_to_cpu(cqe->cmd_id); + if (unlikely(adm_cmd->state == CMD_IDLE)) { + dev_warn(adminq->hdev->dev, "invalid id[%d] completed on queue[%d]\n", + le16_to_cpu(cqe->cmd_id), le16_to_cpu(cqe->sq_id)); + return; + } + + adm_cmd->status = le16_to_cpu(cqe->status) >> 1; + adm_cmd->result0 = le32_to_cpu(cqe->result); + adm_cmd->result1 = le32_to_cpu(cqe->result1); + + complete(&adm_cmd->cmd_done); +} + +static void hiraid_send_async_event(struct hiraid_dev *hdev, u16 cid); + +static void hiraid_complete_async_event(struct hiraid_queue *hiraidq, + struct hiraid_completion *cqe) +{ + struct hiraid_dev *hdev = hiraidq->hdev; + u32 result = le32_to_cpu(cqe->result); + + dev_info(hdev->dev, "recv async event, cid[%d] status[0x%x] result[0x%x]\n", + le16_to_cpu(cqe->cmd_id), + le16_to_cpu(cqe->status) >> 1, + result); + + hiraid_send_async_event(hdev, le16_to_cpu(cqe->cmd_id)); + + if ((le16_to_cpu(cqe->status) >> 1) != HIRAID_SC_SUCCESS) + return; + switch (result & 0x7) { + case HIRAID_ASYN_EVENT_NOTICE: + hiraid_handle_async_notice(hdev, result); + break; + case HIRAID_ASYN_EVENT_VS: + hiraid_handle_async_vs(hdev, result, le32_to_cpu(cqe->result1)); + break; + default: + dev_warn(hdev->dev, "unsupported async event type[%u]\n", + result & 0x7); + break; + } +} + +static void hiraid_complete_pthru_cmnd(struct hiraid_queue *ioq, + struct hiraid_completion *cqe) +{ + struct hiraid_dev *hdev = ioq->hdev; + struct hiraid_cmd *ptcmd; + + ptcmd = hdev->io_ptcmds + (ioq->qid - 1) * HIRAID_PTHRU_CMDS_PERQ + + le16_to_cpu(cqe->cmd_id) - hdev->scsi_qd; + + ptcmd->status = le16_to_cpu(cqe->status) >> 1; + ptcmd->result0 = le32_to_cpu(cqe->result); + ptcmd->result1 = le32_to_cpu(cqe->result1); + + complete(&ptcmd->cmd_done); +} + +static inline void hiraid_handle_cqe(struct hiraid_queue *hiraidq, u16 idx) +{ + struct hiraid_completion *cqe = &hiraidq->cqes[idx]; + struct hiraid_dev *hdev = hiraidq->hdev; + u16 cid = le16_to_cpu(cqe->cmd_id); + + if (unlikely(!work_mode && (cid >= hiraidq->q_depth))) { + dev_err(hdev->dev, "invalid command id[%d] completed on queue[%d]\n", + cid, cqe->sq_id); + return; + } + + dev_log_dbg(hdev->dev, "cid[%d] qid[%d] result[0x%x] sqid[%d] status[0x%x]\n", + cid, hiraidq->qid, le32_to_cpu(cqe->result), + le16_to_cpu(cqe->sq_id), le16_to_cpu(cqe->status)); + + if (unlikely(hiraidq->qid == 0 && cid >= HIRAID_AQ_BLK_MQ_DEPTH)) { + hiraid_complete_async_event(hiraidq, cqe); + return; + } + + if (unlikely(hiraidq->qid && cid >= hdev->scsi_qd)) { + hiraid_complete_pthru_cmnd(hiraidq, cqe); + return; + } + + if (hiraidq->qid) + hiraid_complete_io_cmnd(hiraidq, cqe); + else + hiraid_complete_admin_cmnd(hiraidq, cqe); +} + +static void hiraid_complete_cqes(struct hiraid_queue *hiraidq, + u16 start, u16 end) +{ + while (start != end) { + hiraid_handle_cqe(hiraidq, start); + if (++start == hiraidq->q_depth) + start = 0; + } +} + +static inline void hiraid_update_cq_head(struct hiraid_queue *hiraidq) +{ + if (++hiraidq->cq_head == hiraidq->q_depth) { + hiraidq->cq_head = 0; + hiraidq->cq_phase = !hiraidq->cq_phase; + } +} + +static inline bool hiraid_process_cq(struct hiraid_queue *hiraidq, + u16 *start, u16 *end, int tag) +{ + bool found = false; + + *start = hiraidq->cq_head; + while (!found && hiraid_cqe_pending(hiraidq)) { + if (le16_to_cpu(hiraidq->cqes[hiraidq->cq_head].cmd_id) == tag) + found = true; + hiraid_update_cq_head(hiraidq); + } + *end = hiraidq->cq_head; + + if (*start != *end) + writel(hiraidq->cq_head, + hiraidq->q_db + hiraidq->hdev->db_stride); + + return found; +} + +static bool hiraid_poll_cq(struct hiraid_queue *hiraidq, int cid) +{ + u16 start, end; + bool found; + + if (!hiraid_cqe_pending(hiraidq)) + return 0; + + spin_lock_irq(&hiraidq->cq_lock); + found = hiraid_process_cq(hiraidq, &start, &end, cid); + spin_unlock_irq(&hiraidq->cq_lock); + + hiraid_complete_cqes(hiraidq, start, end); + return found; +} + +static irqreturn_t hiraid_handle_irq(int irq, void *data) +{ + struct hiraid_queue *hiraidq = data; + irqreturn_t ret = IRQ_NONE; + u16 start, end; + + spin_lock(&hiraidq->cq_lock); + if (hiraidq->cq_head != hiraidq->last_cq_head) + ret = IRQ_HANDLED; + + hiraid_process_cq(hiraidq, &start, &end, -1); + hiraidq->last_cq_head = hiraidq->cq_head; + spin_unlock(&hiraidq->cq_lock); + + if (start != end) { + hiraid_complete_cqes(hiraidq, start, end); + ret = IRQ_HANDLED; + } + return ret; +} + +static int hiraid_setup_admin_queue(struct hiraid_dev *hdev) +{ + struct hiraid_queue *adminq = &hdev->queues[0]; + u32 aqa; + int ret; + + dev_info(hdev->dev, "start disable controller\n"); + + ret = hiraid_disable_control(hdev); + if (ret) + return ret; + + ret = hiraid_alloc_queue(hdev, 0, HIRAID_AQ_DEPTH); + if (ret) + return ret; + + aqa = adminq->q_depth - 1; + aqa |= aqa << 16; + writel(aqa, hdev->bar + HIRAID_REG_AQA); + lo_hi_writeq(adminq->sq_buffer_phy, hdev->bar + HIRAID_REG_ASQ); + lo_hi_writeq(adminq->cq_buffer_phy, hdev->bar + HIRAID_REG_ACQ); + + dev_info(hdev->dev, "start enable controller\n"); + + ret = hiraid_enable_control(hdev); + if (ret) { + ret = -ENODEV; + return ret; + } + + adminq->cq_vector = 0; + ret = pci_request_irq(hdev->pdev, adminq->cq_vector, hiraid_handle_irq, + NULL, adminq, "hiraid%d_q%d", hdev->instance, adminq->qid); + if (ret) { + adminq->cq_vector = -1; + return ret; + } + + hiraid_init_queue(adminq, 0); + + dev_info(hdev->dev, "setup admin queue success, queuecount[%d] online[%d] pagesize[%d]\n", + hdev->queue_count, hdev->online_queues, hdev->page_size); + + return 0; +} + +static u32 hiraid_get_bar_size(struct hiraid_dev *hdev, u32 nr_ioqs) +{ + return (HIRAID_REG_DBS + ((nr_ioqs + 1) * 8 * hdev->db_stride)); +} + +static int hiraid_create_admin_cmds(struct hiraid_dev *hdev) +{ + u16 i; + + INIT_LIST_HEAD(&hdev->adm_cmd_list); + spin_lock_init(&hdev->adm_cmd_lock); + + hdev->adm_cmds = kcalloc_node(HIRAID_AQ_BLK_MQ_DEPTH, + sizeof(struct hiraid_cmd), GFP_KERNEL, hdev->numa_node); + + if (!hdev->adm_cmds) { + dev_err(hdev->dev, "alloc admin cmds failed\n"); + return -ENOMEM; + } + + for (i = 0; i < HIRAID_AQ_BLK_MQ_DEPTH; i++) { + hdev->adm_cmds[i].qid = 0; + hdev->adm_cmds[i].cid = i; + list_add_tail(&(hdev->adm_cmds[i].list), &hdev->adm_cmd_list); + } + + dev_info(hdev->dev, "alloc admin cmds success, num[%d]\n", + HIRAID_AQ_BLK_MQ_DEPTH); + + return 0; +} + +static void hiraid_free_admin_cmds(struct hiraid_dev *hdev) +{ + kfree(hdev->adm_cmds); + hdev->adm_cmds = NULL; + INIT_LIST_HEAD(&hdev->adm_cmd_list); +} + +static struct hiraid_cmd *hiraid_get_cmd(struct hiraid_dev *hdev, + enum hiraid_cmd_type type) +{ + struct hiraid_cmd *cmd = NULL; + unsigned long flags; + struct list_head *head = &hdev->adm_cmd_list; + spinlock_t *slock = &hdev->adm_cmd_lock; + + if (type == HIRAID_CMD_PTHRU) { + head = &hdev->io_pt_list; + slock = &hdev->io_pt_lock; + } + + spin_lock_irqsave(slock, flags); + if (list_empty(head)) { + spin_unlock_irqrestore(slock, flags); + dev_err(hdev->dev, "err, cmd[%d] list empty\n", type); + return NULL; + } + cmd = list_entry(head->next, struct hiraid_cmd, list); + list_del_init(&cmd->list); + spin_unlock_irqrestore(slock, flags); + + WRITE_ONCE(cmd->state, CMD_FLIGHT); + + return cmd; +} + +static void hiraid_put_cmd(struct hiraid_dev *hdev, struct hiraid_cmd *cmd, + enum hiraid_cmd_type type) +{ + unsigned long flags; + struct list_head *head = &hdev->adm_cmd_list; + spinlock_t *slock = &hdev->adm_cmd_lock; + + if (type == HIRAID_CMD_PTHRU) { + head = &hdev->io_pt_list; + slock = &hdev->io_pt_lock; + } + + spin_lock_irqsave(slock, flags); + WRITE_ONCE(cmd->state, CMD_IDLE); + list_add_tail(&cmd->list, head); + spin_unlock_irqrestore(slock, flags); +} + +static bool hiraid_admin_need_reset(struct hiraid_admin_command *cmd) +{ + switch (cmd->common.opcode) { + case HIRAID_ADMIN_DELETE_SQ: + case HIRAID_ADMIN_CREATE_SQ: + case HIRAID_ADMIN_DELETE_CQ: + case HIRAID_ADMIN_CREATE_CQ: + case HIRAID_ADMIN_SET_FEATURES: + return false; + default: + return true; + } +} + +static int hiraid_reset_work_sync(struct hiraid_dev *hdev); +static inline void hiraid_admin_timeout(struct hiraid_dev *hdev, + struct hiraid_cmd *cmd) +{ + /* command may be returned because controller reset */ + if (READ_ONCE(cmd->state) == CMD_COMPLETE) + return; + if (hiraid_reset_work_sync(hdev) == -EBUSY) + flush_work(&hdev->reset_work); +} + +static int hiraid_put_admin_sync_request(struct hiraid_dev *hdev, + struct hiraid_admin_command *cmd, + u32 *result0, u32 *result1, u32 timeout) +{ + struct hiraid_cmd *adm_cmd = hiraid_get_cmd(hdev, HIRAID_CMD_ADMIN); + + if (!adm_cmd) { + dev_err(hdev->dev, "err, get admin cmd failed\n"); + return -EFAULT; + } + + timeout = timeout ? timeout : ADMIN_TIMEOUT; + + init_completion(&adm_cmd->cmd_done); + + cmd->common.cmd_id = cpu_to_le16(adm_cmd->cid); + hiraid_submit_cmd(&hdev->queues[0], cmd); + + if (!wait_for_completion_timeout(&adm_cmd->cmd_done, timeout)) { + dev_err(hdev->dev, "cid[%d] qid[%d] timeout, opcode[0x%x] subopcode[0x%x]\n", + adm_cmd->cid, adm_cmd->qid, cmd->usr_cmd.opcode, + cmd->usr_cmd.info_0.subopcode); + + /* reset controller if admin timeout */ + if (hiraid_admin_need_reset(cmd)) + hiraid_admin_timeout(hdev, adm_cmd); + + hiraid_put_cmd(hdev, adm_cmd, HIRAID_CMD_ADMIN); + return -ETIME; + } + + if (result0) + *result0 = adm_cmd->result0; + if (result1) + *result1 = adm_cmd->result1; + + hiraid_put_cmd(hdev, adm_cmd, HIRAID_CMD_ADMIN); + + return adm_cmd->status; +} + +/** + * hiraid_create_cq - send cmd to controller for create controller cq + */ +static int hiraid_create_complete_queue(struct hiraid_dev *hdev, + u16 qid, struct hiraid_queue *hiraidq, u16 cq_vector) +{ + struct hiraid_admin_command admin_cmd; + int flags = HIRAID_QUEUE_PHYS_CONTIG | HIRAID_CQ_IRQ_ENABLED; + + memset(&admin_cmd, 0, sizeof(admin_cmd)); + admin_cmd.create_cq.opcode = HIRAID_ADMIN_CREATE_CQ; + admin_cmd.create_cq.prp1 = cpu_to_le64(hiraidq->cq_buffer_phy); + admin_cmd.create_cq.cqid = cpu_to_le16(qid); + admin_cmd.create_cq.qsize = cpu_to_le16(hiraidq->q_depth - 1); + admin_cmd.create_cq.cq_flags = cpu_to_le16(flags); + admin_cmd.create_cq.irq_vector = cpu_to_le16(cq_vector); + + return hiraid_put_admin_sync_request(hdev, &admin_cmd, NULL, NULL, 0); +} + +/** + * hiraid_create_sq - send cmd to controller for create controller sq + */ +static int hiraid_create_send_queue(struct hiraid_dev *hdev, u16 qid, + struct hiraid_queue *hiraidq) +{ + struct hiraid_admin_command admin_cmd; + int flags = HIRAID_QUEUE_PHYS_CONTIG; + + memset(&admin_cmd, 0, sizeof(admin_cmd)); + admin_cmd.create_sq.opcode = HIRAID_ADMIN_CREATE_SQ; + admin_cmd.create_sq.prp1 = cpu_to_le64(hiraidq->sq_buffer_phy); + admin_cmd.create_sq.sqid = cpu_to_le16(qid); + admin_cmd.create_sq.qsize = cpu_to_le16(hiraidq->q_depth - 1); + admin_cmd.create_sq.sq_flags = cpu_to_le16(flags); + admin_cmd.create_sq.cqid = cpu_to_le16(qid); + + return hiraid_put_admin_sync_request(hdev, &admin_cmd, NULL, NULL, 0); +} + +static void hiraid_free_all_queues(struct hiraid_dev *hdev) +{ + int i; + struct hiraid_queue *hq; + + for (i = 0; i < hdev->queue_count; i++) { + hq = &hdev->queues[i]; + dma_free_coherent(hdev->dev, CQ_SIZE(hq->q_depth), + (void *)hq->cqes, hq->cq_buffer_phy); + dma_free_coherent(hdev->dev, SQ_SIZE(hq->qid, hq->q_depth), + hq->sq_cmds, hq->sq_buffer_phy); + if (!work_mode) + dma_free_coherent(hdev->dev, SENSE_SIZE(hq->q_depth), + hq->sense_buffer_virt, + hq->sense_buffer_phy); + } + + hdev->queue_count = 0; +} + +static void hiraid_free_sense_buffer(struct hiraid_dev *hdev) +{ + if (hdev->sense_buffer_virt) { + dma_free_coherent(hdev->dev, + SENSE_SIZE(hdev->scsi_qd + + max_hwq_num * HIRAID_PTHRU_CMDS_PERQ), + hdev->sense_buffer_virt, hdev->sense_buffer_phy); + hdev->sense_buffer_virt = NULL; + } +} + +static int hiraid_delete_queue(struct hiraid_dev *hdev, u8 opcode, u16 qid) +{ + struct hiraid_admin_command admin_cmd; + int ret; + + memset(&admin_cmd, 0, sizeof(admin_cmd)); + admin_cmd.delete_queue.opcode = opcode; + admin_cmd.delete_queue.qid = cpu_to_le16(qid); + + ret = hiraid_put_admin_sync_request(hdev, &admin_cmd, NULL, NULL, 0); + + if (ret) + dev_err(hdev->dev, "delete %s:[%d] failed\n", + (opcode == HIRAID_ADMIN_DELETE_CQ) ? "cq" : "sq", qid); + + return ret; +} + +static int hiraid_delete_complete_queue(struct hiraid_dev *hdev, u16 cqid) +{ + return hiraid_delete_queue(hdev, HIRAID_ADMIN_DELETE_CQ, cqid); +} + +static int hiraid_delete_send_queue(struct hiraid_dev *hdev, u16 sqid) +{ + return hiraid_delete_queue(hdev, HIRAID_ADMIN_DELETE_SQ, sqid); +} + +static int hiraid_create_queue(struct hiraid_queue *hiraidq, u16 qid) +{ + struct hiraid_dev *hdev = hiraidq->hdev; + u16 cq_vector; + int ret; + + cq_vector = (hdev->num_vecs == 1) ? 0 : qid; + ret = hiraid_create_complete_queue(hdev, qid, hiraidq, cq_vector); + if (ret) + return ret; + + ret = hiraid_create_send_queue(hdev, qid, hiraidq); + if (ret) + goto delete_cq; + + hiraidq->cq_vector = cq_vector; + ret = pci_request_irq(hdev->pdev, cq_vector, hiraid_handle_irq, NULL, + hiraidq, "hiraid%d_q%d", hdev->instance, qid); + if (ret) { + hiraidq->cq_vector = -1; + dev_err(hdev->dev, "request queue[%d] irq failed\n", qid); + goto delete_sq; + } + + hiraid_init_queue(hiraidq, qid); + + return 0; + +delete_sq: + hiraid_delete_send_queue(hdev, qid); +delete_cq: + hiraid_delete_complete_queue(hdev, qid); + + return ret; +} + +static int hiraid_create_io_queues(struct hiraid_dev *hdev) +{ + u32 i, max; + int ret = 0; + + max = min(hdev->max_qid, hdev->queue_count - 1); + for (i = hdev->online_queues; i <= max; i++) { + ret = hiraid_create_queue(&hdev->queues[i], i); + if (ret) { + dev_err(hdev->dev, "create queue[%d] failed\n", i); + break; + } + } + + if (!hdev->last_qcnt) + hdev->last_qcnt = hdev->online_queues; + + dev_info(hdev->dev, "queue_count[%d] online_queue[%d] last_online[%d]", + hdev->queue_count, hdev->online_queues, hdev->last_qcnt); + + return ret >= 0 ? 0 : ret; +} + +static int hiraid_set_features(struct hiraid_dev *hdev, u32 fid, + u32 dword11, void *buffer, + size_t buflen, u32 *result) +{ + struct hiraid_admin_command admin_cmd; + int ret; + u8 *data_ptr = NULL; + dma_addr_t buffer_phy = 0; + + if (buffer && buflen) { + data_ptr = dma_alloc_coherent(hdev->dev, buflen, + &buffer_phy, GFP_KERNEL); + if (!data_ptr) + return -ENOMEM; + + memcpy(data_ptr, buffer, buflen); + } + + memset(&admin_cmd, 0, sizeof(admin_cmd)); + admin_cmd.features.opcode = HIRAID_ADMIN_SET_FEATURES; + admin_cmd.features.fid = cpu_to_le32(fid); + admin_cmd.features.dword11 = cpu_to_le32(dword11); + admin_cmd.common.dptr.prp1 = cpu_to_le64(buffer_phy); + + ret = hiraid_put_admin_sync_request(hdev, &admin_cmd, result, NULL, 0); + + if (data_ptr) + dma_free_coherent(hdev->dev, buflen, data_ptr, buffer_phy); + + return ret; +} + +static int hiraid_configure_timestamp(struct hiraid_dev *hdev) +{ + __le64 timestamp; + int ret; + + timestamp = cpu_to_le64(ktime_to_ms(ktime_get_real())); + ret = hiraid_set_features(hdev, HIRAID_FEATURE_TIMESTAMP, 0, + ×tamp, sizeof(timestamp), NULL); + + if (ret) + dev_err(hdev->dev, "set timestamp failed[%d]\n", ret); + return ret; +} + +static int hiraid_get_queue_cnt(struct hiraid_dev *hdev, u32 *cnt) +{ + u32 q_cnt = (*cnt - 1) | ((*cnt - 1) << 16); + u32 nr_ioqs, result; + int status; + + status = hiraid_set_features(hdev, HIRAID_FEATURE_NUM_QUEUES, + q_cnt, NULL, 0, &result); + if (status) { + dev_err(hdev->dev, "set queue count failed, status[%d]\n", + status); + return -EIO; + } + + nr_ioqs = min(result & 0xffff, result >> 16) + 1; + *cnt = min(*cnt, nr_ioqs); + if (*cnt == 0) { + dev_err(hdev->dev, "illegal qcnt: zero, nr_ioqs[%d], cnt[%d]\n", + nr_ioqs, *cnt); + return -EIO; + } + return 0; +} + +static int hiraid_setup_io_queues(struct hiraid_dev *hdev) +{ + struct hiraid_queue *adminq = &hdev->queues[0]; + struct pci_dev *pdev = hdev->pdev; + u32 i, size, nr_ioqs; + int ret; + + struct irq_affinity affd = { + .pre_vectors = 1 + }; + + /* alloc IO sense buffer for single hw queue mode */ + if (work_mode && !hdev->sense_buffer_virt) { + hdev->sense_buffer_virt = dma_alloc_coherent( + hdev->dev, + SENSE_SIZE(hdev->scsi_qd + + max_hwq_num * HIRAID_PTHRU_CMDS_PERQ), + &hdev->sense_buffer_phy, GFP_KERNEL | __GFP_ZERO); + if (!hdev->sense_buffer_virt) + return -ENOMEM; + } + + nr_ioqs = min(num_online_cpus(), max_hwq_num); + ret = hiraid_get_queue_cnt(hdev, &nr_ioqs); + if (ret < 0) + return ret; + + size = hiraid_get_bar_size(hdev, nr_ioqs); + ret = hiraid_remap_bar(hdev, size); + if (ret) + return -ENOMEM; + + adminq->q_db = hdev->dbs; + + pci_free_irq(pdev, 0, adminq); + pci_free_irq_vectors(pdev); + hdev->online_queues--; + + ret = pci_alloc_irq_vectors_affinity(pdev, 1, (nr_ioqs + 1), + PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, + &affd); + if (ret <= 0) + return -EIO; + + hdev->num_vecs = ret; + hdev->max_qid = max(ret - 1, 1); + + ret = pci_request_irq(pdev, adminq->cq_vector, hiraid_handle_irq, NULL, + adminq, "hiraid%d_q%d", hdev->instance, adminq->qid); + if (ret) { + dev_err(hdev->dev, "request admin irq failed\n"); + adminq->cq_vector = -1; + return ret; + } + + hdev->online_queues++; + + for (i = hdev->queue_count; i <= hdev->max_qid; i++) { + ret = hiraid_alloc_queue(hdev, i, hdev->ioq_depth); + if (ret) + break; + } + dev_info(hdev->dev, "max_qid[%d] queuecount[%d] onlinequeue[%d] ioqdepth[%d]\n", + hdev->max_qid, hdev->queue_count, + hdev->online_queues, hdev->ioq_depth); + + return hiraid_create_io_queues(hdev); +} + +static void hiraid_delete_io_queues(struct hiraid_dev *hdev) +{ + u16 queues = hdev->online_queues - 1; + u8 opcode = HIRAID_ADMIN_DELETE_SQ; + u16 i, pass; + + if (!pci_device_is_present(hdev->pdev)) { + dev_err(hdev->dev, "pci_device is not present, skip disable io queues\n"); + return; + } + + if (hdev->online_queues < 2) { + dev_err(hdev->dev, "err, io queue has been delete\n"); + return; + } + + for (pass = 0; pass < 2; pass++) { + for (i = queues; i > 0; i--) + if (hiraid_delete_queue(hdev, opcode, i)) + break; + + opcode = HIRAID_ADMIN_DELETE_CQ; + } +} + +static void hiraid_pci_disable(struct hiraid_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + u32 i; + + for (i = 0; i < hdev->online_queues; i++) + pci_free_irq(pdev, hdev->queues[i].cq_vector, &hdev->queues[i]); + pci_free_irq_vectors(pdev); + if (pci_is_enabled(pdev)) { + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + } + hdev->online_queues = 0; +} + +static void hiraid_disable_admin_queue(struct hiraid_dev *hdev, + bool shutdown) +{ + struct hiraid_queue *adminq = &hdev->queues[0]; + u16 start, end; + + if (pci_device_is_present(hdev->pdev)) { + if (shutdown) + hiraid_shutdown_control(hdev); + else + hiraid_disable_control(hdev); + } + + if (hdev->queue_count == 0) { + dev_err(hdev->dev, "err, admin queue has been delete\n"); + return; + } + + spin_lock_irq(&adminq->cq_lock); + hiraid_process_cq(adminq, &start, &end, -1); + spin_unlock_irq(&adminq->cq_lock); + hiraid_complete_cqes(adminq, start, end); +} + +static int hiraid_create_prp_pools(struct hiraid_dev *hdev) +{ + int i; + char poolname[20] = { 0 }; + + hdev->prp_page_pool = dma_pool_create("prp list page", hdev->dev, + PAGE_SIZE, PAGE_SIZE, 0); + + if (!hdev->prp_page_pool) { + dev_err(hdev->dev, "create prp_page_pool failed\n"); + return -ENOMEM; + } + + for (i = 0; i < extra_pool_num; i++) { + sprintf(poolname, "prp_list_256_%d", i); + hdev->prp_extra_pool[i] = dma_pool_create(poolname, hdev->dev, + EXTRA_POOL_SIZE, + EXTRA_POOL_SIZE, 0); + + if (!hdev->prp_extra_pool[i]) { + dev_err(hdev->dev, "create extra pool[%d] fail\n", i); + goto destroy_prp_extra_pool; + } + } + + return 0; + +destroy_prp_extra_pool: + while (i > 0) + dma_pool_destroy(hdev->prp_extra_pool[--i]); + dma_pool_destroy(hdev->prp_page_pool); + + return -ENOMEM; +} + +static void hiraid_free_prp_pools(struct hiraid_dev *hdev) +{ + int i; + + for (i = 0; i < extra_pool_num; i++) + dma_pool_destroy(hdev->prp_extra_pool[i]); + dma_pool_destroy(hdev->prp_page_pool); +} + +static int hiraid_request_devices(struct hiraid_dev *hdev, + struct hiraid_dev_info *dev) +{ + u32 nd = le32_to_cpu(hdev->ctrl_info->nd); + struct hiraid_admin_command admin_cmd; + struct hiraid_dev_list *list_buf; + dma_addr_t buffer_phy = 0; + u32 i, idx, hdid, ndev; + int ret = 0; + + list_buf = dma_alloc_coherent(hdev->dev, PAGE_SIZE, + &buffer_phy, GFP_KERNEL); + if (!list_buf) + return -ENOMEM; + + for (idx = 0; idx < nd;) { + memset(&admin_cmd, 0, sizeof(admin_cmd)); + admin_cmd.get_info.opcode = HIRAID_ADMIN_GET_INFO; + admin_cmd.get_info.type = HIRAID_GET_DEVLIST_INFO; + admin_cmd.get_info.cdw11 = cpu_to_le32(idx); + admin_cmd.common.dptr.prp1 = cpu_to_le64(buffer_phy); + + ret = hiraid_put_admin_sync_request(hdev, &admin_cmd, + NULL, NULL, 0); + + if (ret) { + dev_err(hdev->dev, "get device list failed, nd[%u] idx[%u] ret[%d]\n", + nd, idx, ret); + goto out; + } + ndev = le32_to_cpu(list_buf->dev_num); + + dev_info(hdev->dev, "get dev list ndev num[%u]\n", ndev); + + for (i = 0; i < ndev; i++) { + hdid = le32_to_cpu(list_buf->devinfo[i].hdid); + dev_info(hdev->dev, "devices[%d], hdid[%u] target[%d] channel[%d] lun[%d] attr[0x%x]\n", + i, hdid, + le16_to_cpu(list_buf->devinfo[i].target), + list_buf->devinfo[i].channel, + list_buf->devinfo[i].lun, + list_buf->devinfo[i].attr); + if (hdid > nd || hdid == 0) { + dev_err(hdev->dev, "err, hdid[%d] invalid\n", + hdid); + continue; + } + memcpy(&dev[hdid - 1], &list_buf->devinfo[i], + sizeof(struct hiraid_dev_info)); + } + idx += ndev; + + if (ndev < MAX_DEV_ENTRY_PER_PAGE_4K) + break; + } + +out: + dma_free_coherent(hdev->dev, PAGE_SIZE, list_buf, buffer_phy); + return ret; +} + +static void hiraid_send_async_event(struct hiraid_dev *hdev, u16 cid) +{ + struct hiraid_queue *adminq = &hdev->queues[0]; + struct hiraid_admin_command admin_cmd; + + memset(&admin_cmd, 0, sizeof(admin_cmd)); + admin_cmd.common.opcode = HIRAID_ADMIN_ASYNC_EVENT; + admin_cmd.common.cmd_id = cpu_to_le16(cid); + + hiraid_submit_cmd(adminq, &admin_cmd); + dev_info(hdev->dev, "send async event to controller, cid[%d]\n", cid); +} + +static inline void hiraid_init_async_event(struct hiraid_dev *hdev) +{ + u16 i; + + for (i = 0; i < hdev->ctrl_info->asynevent; i++) + hiraid_send_async_event(hdev, i + HIRAID_AQ_BLK_MQ_DEPTH); +} + +static int hiraid_add_device(struct hiraid_dev *hdev, + struct hiraid_dev_info *devinfo) +{ + struct Scsi_Host *shost = hdev->shost; + struct scsi_device *sdev; + + dev_info(hdev->dev, "add device, hdid[%u] target[%d] channel[%d] lun[%d] attr[0x%x]\n", + le32_to_cpu(devinfo->hdid), le16_to_cpu(devinfo->target), + devinfo->channel, devinfo->lun, devinfo->attr); + + sdev = scsi_device_lookup(shost, devinfo->channel, + le16_to_cpu(devinfo->target), 0); + if (sdev) { + dev_warn(hdev->dev, "device is already exist, channel[%d] targetid[%d] lun[%d]\n", + devinfo->channel, le16_to_cpu(devinfo->target), 0); + scsi_device_put(sdev); + return -EEXIST; + } + scsi_add_device(shost, devinfo->channel, + le16_to_cpu(devinfo->target), 0); + return 0; +} + +static int hiraid_rescan_device(struct hiraid_dev *hdev, + struct hiraid_dev_info *devinfo) +{ + struct Scsi_Host *shost = hdev->shost; + struct scsi_device *sdev; + + dev_info(hdev->dev, "rescan device, hdid[%u] target[%d] channel[%d] lun[%d] attr[0x%x]\n", + le32_to_cpu(devinfo->hdid), le16_to_cpu(devinfo->target), + devinfo->channel, devinfo->lun, devinfo->attr); + + sdev = scsi_device_lookup(shost, devinfo->channel, + le16_to_cpu(devinfo->target), 0); + if (!sdev) { + dev_warn(hdev->dev, "device is not exit rescan it, channel[%d] target_id[%d] lun[%d]\n", + devinfo->channel, le16_to_cpu(devinfo->target), 0); + return -ENODEV; + } + + scsi_rescan_device(&sdev->sdev_gendev); + scsi_device_put(sdev); + return 0; +} + +static int hiraid_delete_device(struct hiraid_dev *hdev, + struct hiraid_dev_info *devinfo) +{ + struct Scsi_Host *shost = hdev->shost; + struct scsi_device *sdev; + + dev_info(hdev->dev, "remove device, hdid[%u] target[%d] channel[%d] lun[%d] attr[0x%x]\n", + le32_to_cpu(devinfo->hdid), le16_to_cpu(devinfo->target), + devinfo->channel, devinfo->lun, devinfo->attr); + + sdev = scsi_device_lookup(shost, devinfo->channel, + le16_to_cpu(devinfo->target), 0); + if (!sdev) { + dev_warn(hdev->dev, "device is not exit remove it, channel[%d] target_id[%d] lun[%d]\n", + devinfo->channel, le16_to_cpu(devinfo->target), 0); + return -ENODEV; + } + + scsi_remove_device(sdev); + scsi_device_put(sdev); + return 0; +} + +static int hiraid_dev_list_init(struct hiraid_dev *hdev) +{ + u32 nd = le32_to_cpu(hdev->ctrl_info->nd); + + hdev->dev_info = kzalloc_node(nd * sizeof(struct hiraid_dev_info), + GFP_KERNEL, hdev->numa_node); + if (!hdev->dev_info) + return -ENOMEM; + + return 0; +} + +static int hiraid_luntarget_sort(const void *l, const void *r) +{ + const struct hiraid_dev_info *ln = l; + const struct hiraid_dev_info *rn = r; + int l_attr = HIRAID_DEV_INFO_ATTR_BOOT(ln->attr); + int r_attr = HIRAID_DEV_INFO_ATTR_BOOT(rn->attr); + + /* boot first */ + if (l_attr != r_attr) + return (r_attr - l_attr); + + if (ln->channel == rn->channel) + return le16_to_cpu(ln->target) - le16_to_cpu(rn->target); + + return ln->channel - rn->channel; +} + +static void hiraid_scan_work(struct work_struct *work) +{ + struct hiraid_dev *hdev = + container_of(work, struct hiraid_dev, scan_work); + struct hiraid_dev_info *dev, *old_dev, *new_dev; + u32 nd = le32_to_cpu(hdev->ctrl_info->nd); + u8 flag, org_flag; + int i, ret; + int count = 0; + + dev = kcalloc(nd, sizeof(struct hiraid_dev_info), GFP_KERNEL); + if (!dev) + return; + + new_dev = kcalloc(nd, sizeof(struct hiraid_dev_info), GFP_KERNEL); + if (!new_dev) + goto free_list; + + ret = hiraid_request_devices(hdev, dev); + if (ret) + goto free_all; + old_dev = hdev->dev_info; + for (i = 0; i < nd; i++) { + org_flag = old_dev[i].flag; + flag = dev[i].flag; + + dev_log_dbg(hdev->dev, "i[%d] org_flag[0x%x] flag[0x%x]\n", + i, org_flag, flag); + + if (HIRAID_DEV_INFO_FLAG_VALID(flag)) { + if (!HIRAID_DEV_INFO_FLAG_VALID(org_flag)) { + down_write(&hdev->dev_rwsem); + memcpy(&old_dev[i], &dev[i], + sizeof(struct hiraid_dev_info)); + memcpy(&new_dev[count++], &dev[i], + sizeof(struct hiraid_dev_info)); + up_write(&hdev->dev_rwsem); + } else if (HIRAID_DEV_INFO_FLAG_CHANGE(flag)) { + hiraid_rescan_device(hdev, &dev[i]); + } + } else { + if (HIRAID_DEV_INFO_FLAG_VALID(org_flag)) { + down_write(&hdev->dev_rwsem); + old_dev[i].flag &= 0xfe; + up_write(&hdev->dev_rwsem); + hiraid_delete_device(hdev, &old_dev[i]); + } + } + } + + dev_info(hdev->dev, "scan work add device num[%d]\n", count); + + sort(new_dev, count, sizeof(new_dev[0]), hiraid_luntarget_sort, NULL); + + for (i = 0; i < count; i++) + hiraid_add_device(hdev, &new_dev[i]); + +free_all: + kfree(new_dev); +free_list: + kfree(dev); +} + +static void hiraid_timesyn_work(struct work_struct *work) +{ + struct hiraid_dev *hdev = + container_of(work, struct hiraid_dev, timesyn_work); + + hiraid_configure_timestamp(hdev); +} + +static int hiraid_init_control_info(struct hiraid_dev *hdev); +static void hiraid_fwactive_work(struct work_struct *work) +{ + struct hiraid_dev *hdev = + container_of(work, struct hiraid_dev, fwact_work); + + if (hiraid_init_control_info(hdev)) + dev_err(hdev->dev, "get controller info failed after fw activation\n"); +} + +static void hiraid_queue_scan(struct hiraid_dev *hdev) +{ + queue_work(work_queue, &hdev->scan_work); +} + +static void hiraid_handle_async_notice(struct hiraid_dev *hdev, + u32 result) +{ + switch ((result & 0xff00) >> 8) { + case HIRAID_ASYN_DEV_CHANGED: + hiraid_queue_scan(hdev); + break; + case HIRAID_ASYN_FW_ACT_START: + dev_info(hdev->dev, "fw activation starting\n"); + break; + case HIRAID_ASYN_HOST_PROBING: + break; + default: + dev_warn(hdev->dev, "async event result[%08x]\n", result); + } +} + +static void hiraid_handle_async_vs(struct hiraid_dev *hdev, + u32 result, u32 result1) +{ + switch ((result & 0xff00) >> 8) { + case HIRAID_ASYN_TIMESYN: + queue_work(work_queue, &hdev->timesyn_work); + break; + case HIRAID_ASYN_FW_ACT_FINISH: + dev_info(hdev->dev, "fw activation finish\n"); + queue_work(work_queue, &hdev->fwact_work); + break; + case HIRAID_ASYN_EVENT_MIN ... HIRAID_ASYN_EVENT_MAX: + dev_info(hdev->dev, "recv card event[%d] param1[0x%x] param2[0x%x]\n", + (result & 0xff00) >> 8, result, result1); + break; + default: + dev_warn(hdev->dev, "async event result[0x%x]\n", result); + } +} + +static int hiraid_alloc_resources(struct hiraid_dev *hdev) +{ + int ret, nqueue; + + hdev->ctrl_info = kzalloc_node(sizeof(*hdev->ctrl_info), GFP_KERNEL, + hdev->numa_node); + if (!hdev->ctrl_info) + return -ENOMEM; + + ret = hiraid_create_prp_pools(hdev); + if (ret) + goto free_ctrl_info; + nqueue = min(num_possible_cpus(), max_hwq_num) + 1; + hdev->queues = kcalloc_node(nqueue, sizeof(struct hiraid_queue), + GFP_KERNEL, hdev->numa_node); + if (!hdev->queues) { + ret = -ENOMEM; + goto destroy_dma_pools; + } + + ret = hiraid_create_admin_cmds(hdev); + if (ret) + goto free_queues; + + dev_info(hdev->dev, "total queues num[%d]\n", nqueue); + + return 0; + +free_queues: + kfree(hdev->queues); +destroy_dma_pools: + hiraid_free_prp_pools(hdev); +free_ctrl_info: + kfree(hdev->ctrl_info); + + return ret; +} + +static void hiraid_free_resources(struct hiraid_dev *hdev) +{ + hiraid_free_admin_cmds(hdev); + kfree(hdev->queues); + hiraid_free_prp_pools(hdev); + kfree(hdev->ctrl_info); +} + +static void hiraid_bsg_buf_unmap(struct hiraid_dev *hdev, struct bsg_job *job) +{ + struct request *rq = blk_mq_rq_from_pdu(job); + struct hiraid_mapmange *mapbuf = job->dd_data; + enum dma_data_direction dma_dir = + rq_data_dir(rq) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; + + if (mapbuf->sge_cnt) + dma_unmap_sg(hdev->dev, mapbuf->sgl, mapbuf->sge_cnt, dma_dir); + + hiraid_free_mapbuf(hdev, mapbuf); +} + +static int hiraid_bsg_buf_map(struct hiraid_dev *hdev, struct bsg_job *job, + struct hiraid_admin_command *cmd) +{ + struct hiraid_bsg_request *bsg_req = job->request; + struct request *rq = blk_mq_rq_from_pdu(job); + struct hiraid_mapmange *mapbuf = job->dd_data; + enum dma_data_direction dma_dir = + rq_data_dir(rq) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; + int ret = 0; + + /* No data to DMA, it may be scsi no-rw command */ + mapbuf->sge_cnt = job->request_payload.sg_cnt; + mapbuf->sgl = job->request_payload.sg_list; + mapbuf->len = job->request_payload.payload_len; + mapbuf->page_cnt = -1; + if (unlikely(mapbuf->sge_cnt == 0)) + goto out; + + mapbuf->use_sgl = !hiraid_is_prp(hdev, mapbuf->sgl, mapbuf->sge_cnt); + + ret = dma_map_sg_attrs(hdev->dev, mapbuf->sgl, mapbuf->sge_cnt, + dma_dir, DMA_ATTR_NO_WARN); + if (!ret) + goto out; + + if ((mapbuf->use_sgl == (bool)true) && + (bsg_req->msgcode == HIRAID_BSG_IOPTHRU) && + (hdev->ctrl_info->pt_use_sgl != (bool)false)) { + ret = hiraid_build_passthru_sgl(hdev, cmd, mapbuf); + } else { + mapbuf->use_sgl = false; + + ret = hiraid_build_passthru_prp(hdev, mapbuf); + cmd->common.dptr.prp1 = + cpu_to_le64(sg_dma_address(mapbuf->sgl)); + cmd->common.dptr.prp2 = cpu_to_le64(mapbuf->first_dma); + } + + if (ret) + goto unmap; + + return 0; + +unmap: + dma_unmap_sg(hdev->dev, mapbuf->sgl, mapbuf->sge_cnt, dma_dir); +out: + return ret; +} + +static int hiraid_get_control_info(struct hiraid_dev *hdev, + struct hiraid_ctrl_info *ctrl_info) +{ + struct hiraid_admin_command admin_cmd; + u8 *data_ptr = NULL; + dma_addr_t buffer_phy = 0; + int ret; + + data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE, + &buffer_phy, GFP_KERNEL); + if (!data_ptr) + return -ENOMEM; + + memset(&admin_cmd, 0, sizeof(admin_cmd)); + admin_cmd.get_info.opcode = HIRAID_ADMIN_GET_INFO; + admin_cmd.get_info.type = HIRAID_GET_CTRL_INFO; + admin_cmd.common.dptr.prp1 = cpu_to_le64(buffer_phy); + + ret = hiraid_put_admin_sync_request(hdev, &admin_cmd, NULL, NULL, 0); + if (!ret) + memcpy(ctrl_info, data_ptr, sizeof(struct hiraid_ctrl_info)); + + dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, buffer_phy); + + return ret; +} + +static int hiraid_init_control_info(struct hiraid_dev *hdev) +{ + int ret; + + hdev->ctrl_info->nd = cpu_to_le32(240); + hdev->ctrl_info->mdts = 8; + hdev->ctrl_info->max_cmds = cpu_to_le16(4096); + hdev->ctrl_info->max_num_sge = cpu_to_le16(128); + hdev->ctrl_info->max_channel = cpu_to_le16(4); + hdev->ctrl_info->max_tgt_id = cpu_to_le32(3239); + hdev->ctrl_info->max_lun = cpu_to_le16(2); + + ret = hiraid_get_control_info(hdev, hdev->ctrl_info); + if (ret) + dev_err(hdev->dev, "get controller info failed[%d]\n", ret); + + dev_info(hdev->dev, "device_num = %d\n", hdev->ctrl_info->nd); + dev_info(hdev->dev, "max_cmd = %d\n", hdev->ctrl_info->max_cmds); + dev_info(hdev->dev, "max_channel = %d\n", hdev->ctrl_info->max_channel); + dev_info(hdev->dev, "max_tgt_id = %d\n", hdev->ctrl_info->max_tgt_id); + dev_info(hdev->dev, "max_lun = %d\n", hdev->ctrl_info->max_lun); + dev_info(hdev->dev, "max_num_sge = %d\n", hdev->ctrl_info->max_num_sge); + dev_info(hdev->dev, "lunumboot = %d\n", hdev->ctrl_info->lun_num_boot); + dev_info(hdev->dev, "maxdata_transsize = %d\n", hdev->ctrl_info->mdts); + dev_info(hdev->dev, "abort_cmd_limit = %d\n", hdev->ctrl_info->acl); + dev_info(hdev->dev, "asynevent_num = %d\n", hdev->ctrl_info->asynevent); + dev_info(hdev->dev, "card_type = %d\n", hdev->ctrl_info->card_type); + dev_info(hdev->dev, "pt_use_sgl = %d\n", hdev->ctrl_info->pt_use_sgl); + dev_info(hdev->dev, "rtd3e = %d\n", hdev->ctrl_info->rtd3e); + dev_info(hdev->dev, "serial_num = %s\n", hdev->ctrl_info->sn); + dev_info(hdev->dev, "fw_verion = %s\n", hdev->ctrl_info->fw_version); + + if (!hdev->ctrl_info->asynevent) + hdev->ctrl_info->asynevent = 1; + if (hdev->ctrl_info->asynevent > HIRAID_ASYN_COMMANDS) + hdev->ctrl_info->asynevent = HIRAID_ASYN_COMMANDS; + + hdev->scsi_qd = work_mode ? + le16_to_cpu(hdev->ctrl_info->max_cmds) : + (hdev->ioq_depth - HIRAID_PTHRU_CMDS_PERQ); + + return 0; +} + +static int hiraid_user_send_admcmd(struct hiraid_dev *hdev, struct bsg_job *job) +{ + struct hiraid_bsg_request *bsg_req = job->request; + struct hiraid_passthru_common_cmd *ptcmd = &(bsg_req->admcmd); + struct hiraid_admin_command admin_cmd; + u32 timeout = msecs_to_jiffies(ptcmd->timeout_ms); + u32 result[2] = {0}; + int status; + + if (hdev->state >= DEV_RESETTING) { + dev_err(hdev->dev, "err, host state[%d] is not right\n", + hdev->state); + return -EBUSY; + } + + memset(&admin_cmd, 0, sizeof(admin_cmd)); + admin_cmd.common.opcode = ptcmd->opcode; + admin_cmd.common.flags = ptcmd->flags; + admin_cmd.common.hdid = cpu_to_le32(ptcmd->nsid); + admin_cmd.common.cdw2[0] = cpu_to_le32(ptcmd->cdw2); + admin_cmd.common.cdw2[1] = cpu_to_le32(ptcmd->cdw3); + admin_cmd.common.cdw10 = cpu_to_le32(ptcmd->cdw10); + admin_cmd.common.cdw11 = cpu_to_le32(ptcmd->cdw11); + admin_cmd.common.cdw12 = cpu_to_le32(ptcmd->cdw12); + admin_cmd.common.cdw13 = cpu_to_le32(ptcmd->cdw13); + admin_cmd.common.cdw14 = cpu_to_le32(ptcmd->cdw14); + admin_cmd.common.cdw15 = cpu_to_le32(ptcmd->cdw15); + + status = hiraid_bsg_buf_map(hdev, job, &admin_cmd); + if (status) { + dev_err(hdev->dev, "err, map data failed\n"); + return status; + } + + status = hiraid_put_admin_sync_request(hdev, &admin_cmd, &result[0], + &result[1], timeout); + if (status >= 0) { + job->reply_len = sizeof(result); + memcpy(job->reply, result, sizeof(result)); + } + if (status) + dev_info(hdev->dev, "opcode[0x%x] subopcode[0x%x] status[0x%x] result0[0x%x];" + "result1[0x%x]\n", ptcmd->opcode, + ptcmd->info_0.subopcode, status, + result[0], result[1]); + + hiraid_bsg_buf_unmap(hdev, job); + + return status; +} + +static int hiraid_alloc_io_ptcmds(struct hiraid_dev *hdev) +{ + u32 i; + u32 ptnum = HIRAID_TOTAL_PTCMDS(hdev->online_queues - 1); + + INIT_LIST_HEAD(&hdev->io_pt_list); + spin_lock_init(&hdev->io_pt_lock); + + hdev->io_ptcmds = kcalloc_node(ptnum, sizeof(struct hiraid_cmd), + GFP_KERNEL, hdev->numa_node); + + if (!hdev->io_ptcmds) { + dev_err(hdev->dev, "alloc io pthrunum failed\n"); + return -ENOMEM; + } + + for (i = 0; i < ptnum; i++) { + hdev->io_ptcmds[i].qid = i / HIRAID_PTHRU_CMDS_PERQ + 1; + hdev->io_ptcmds[i].cid = + i % HIRAID_PTHRU_CMDS_PERQ + hdev->scsi_qd; + list_add_tail(&(hdev->io_ptcmds[i].list), &hdev->io_pt_list); + } + + dev_info(hdev->dev, "alloc io pthru cmd suc, pthrunum[%d]\n", ptnum); + + return 0; +} + +static void hiraid_free_io_ptcmds(struct hiraid_dev *hdev) +{ + kfree(hdev->io_ptcmds); + hdev->io_ptcmds = NULL; + + INIT_LIST_HEAD(&hdev->io_pt_list); +} + +static int hiraid_put_io_sync_request(struct hiraid_dev *hdev, + struct hiraid_scsi_io_cmd *io_cmd, + u32 *result, u32 *reslen, u32 timeout) +{ + int ret; + dma_addr_t buffer_phy; + struct hiraid_queue *ioq; + void *sense_addr = NULL; + struct hiraid_cmd *pt_cmd = hiraid_get_cmd(hdev, HIRAID_CMD_PTHRU); + + if (!pt_cmd) { + dev_err(hdev->dev, "err, get ioq cmd failed\n"); + return -EFAULT; + } + + timeout = timeout ? timeout : ADMIN_TIMEOUT; + + init_completion(&pt_cmd->cmd_done); + + ioq = &hdev->queues[pt_cmd->qid]; + if (work_mode) { + ret = ((pt_cmd->qid - 1) * HIRAID_PTHRU_CMDS_PERQ + + pt_cmd->cid) * SCSI_SENSE_BUFFERSIZE; + sense_addr = hdev->sense_buffer_virt + ret; + buffer_phy = hdev->sense_buffer_phy + ret; + } else { + ret = pt_cmd->cid * SCSI_SENSE_BUFFERSIZE; + sense_addr = ioq->sense_buffer_virt + ret; + buffer_phy = ioq->sense_buffer_phy + ret; + } + + io_cmd->common.sense_addr = cpu_to_le64(buffer_phy); + io_cmd->common.sense_len = cpu_to_le16(SCSI_SENSE_BUFFERSIZE); + io_cmd->common.cmd_id = cpu_to_le16(pt_cmd->cid); + + hiraid_submit_cmd(ioq, io_cmd); + + if (!wait_for_completion_timeout(&pt_cmd->cmd_done, timeout)) { + dev_err(hdev->dev, "cid[%d] qid[%d] timeout, opcode[0x%x] subopcode[0x%x]\n", + pt_cmd->cid, pt_cmd->qid, io_cmd->common.opcode, + (le32_to_cpu(io_cmd->common.cdw3[0]) & 0xffff)); + + hiraid_admin_timeout(hdev, pt_cmd); + + hiraid_put_cmd(hdev, pt_cmd, HIRAID_CMD_PTHRU); + return -ETIME; + } + + if (result && reslen) { + if ((pt_cmd->status & 0x17f) == 0x101) { + memcpy(result, sense_addr, SCSI_SENSE_BUFFERSIZE); + *reslen = SCSI_SENSE_BUFFERSIZE; + } + } + + hiraid_put_cmd(hdev, pt_cmd, HIRAID_CMD_PTHRU); + + return pt_cmd->status; +} + +static int hiraid_user_send_ptcmd(struct hiraid_dev *hdev, struct bsg_job *job) +{ + struct hiraid_bsg_request *bsg_req = + (struct hiraid_bsg_request *)(job->request); + struct hiraid_passthru_io_cmd *cmd = &(bsg_req->pthrucmd); + struct hiraid_scsi_io_cmd pthru_cmd; + int status = 0; + u32 timeout = msecs_to_jiffies(cmd->timeout_ms); + // data len is 4k before use sgl, now len is 1M + u32 io_pt_data_len = (hdev->ctrl_info->pt_use_sgl == (bool)true) ? + IOQ_PT_SGL_DATA_LEN : IOQ_PT_DATA_LEN; + + if (cmd->data_len > io_pt_data_len) { + dev_err(hdev->dev, "data len bigger than %d\n", io_pt_data_len); + return -EFAULT; + } + + if (hdev->state != DEV_LIVE) { + dev_err(hdev->dev, "err, host state[%d] is not live\n", + hdev->state); + return -EBUSY; + } + + memset(&pthru_cmd, 0, sizeof(pthru_cmd)); + pthru_cmd.common.opcode = cmd->opcode; + pthru_cmd.common.flags = cmd->flags; + pthru_cmd.common.hdid = cpu_to_le32(cmd->nsid); + pthru_cmd.common.sense_len = cpu_to_le16(cmd->info_0.res_sense_len); + pthru_cmd.common.cdb_len = cmd->info_0.cdb_len; + pthru_cmd.common.rsvd2 = cmd->info_0.rsvd0; + pthru_cmd.common.cdw3[0] = cpu_to_le32(cmd->cdw3); + pthru_cmd.common.cdw3[1] = cpu_to_le32(cmd->cdw4); + pthru_cmd.common.cdw3[2] = cpu_to_le32(cmd->cdw5); + + pthru_cmd.common.cdw10[0] = cpu_to_le32(cmd->cdw10); + pthru_cmd.common.cdw10[1] = cpu_to_le32(cmd->cdw11); + pthru_cmd.common.cdw10[2] = cpu_to_le32(cmd->cdw12); + pthru_cmd.common.cdw10[3] = cpu_to_le32(cmd->cdw13); + pthru_cmd.common.cdw10[4] = cpu_to_le32(cmd->cdw14); + pthru_cmd.common.cdw10[5] = cpu_to_le32(cmd->data_len); + + memcpy(pthru_cmd.common.cdb, &cmd->cdw16, cmd->info_0.cdb_len); + + pthru_cmd.common.cdw26[0] = cpu_to_le32(cmd->cdw26[0]); + pthru_cmd.common.cdw26[1] = cpu_to_le32(cmd->cdw26[1]); + pthru_cmd.common.cdw26[2] = cpu_to_le32(cmd->cdw26[2]); + pthru_cmd.common.cdw26[3] = cpu_to_le32(cmd->cdw26[3]); + + status = hiraid_bsg_buf_map(hdev, job, + (struct hiraid_admin_command *)&pthru_cmd); + if (status) { + dev_err(hdev->dev, "err, map data failed\n"); + return status; + } + + status = hiraid_put_io_sync_request(hdev, &pthru_cmd, job->reply, + &job->reply_len, timeout); + + if (status) + dev_info(hdev->dev, "opcode[0x%x] subopcode[0x%x] status[0x%x] replylen[%d]\n", + cmd->opcode, cmd->info_1.subopcode, + status, job->reply_len); + + hiraid_bsg_buf_unmap(hdev, job); + + return status; +} + +static bool hiraid_check_scmd_finished(struct scsi_cmnd *scmd) +{ + struct hiraid_dev *hdev = shost_priv(scmd->device->host); + struct hiraid_mapmange *mapbuf = scsi_cmd_priv(scmd); + struct hiraid_queue *hiraidq; + + hiraidq = mapbuf->hiraidq; + if (!hiraidq) + return false; + if (READ_ONCE(mapbuf->state) == CMD_COMPLETE || + hiraid_poll_cq(hiraidq, mapbuf->cid)) { + dev_warn(hdev->dev, "cid[%d] qid[%d] has been completed\n", + mapbuf->cid, hiraidq->qid); + return true; + } + return false; +} + +static enum blk_eh_timer_return hiraid_timed_out(struct scsi_cmnd *scmd) +{ + struct hiraid_mapmange *mapbuf = scsi_cmd_priv(scmd); + unsigned int timeout = scmd->device->request_queue->rq_timeout; + + if (hiraid_check_scmd_finished(scmd)) + goto out; + + if (time_after(jiffies, scmd->jiffies_at_alloc + timeout)) { + if (cmpxchg(&mapbuf->state, CMD_FLIGHT, CMD_TIMEOUT) == + CMD_FLIGHT) + return BLK_EH_DONE; + } +out: + return BLK_EH_RESET_TIMER; +} + +/* send abort command by admin queue temporary */ +static int hiraid_send_abort_cmd(struct hiraid_dev *hdev, u32 hdid, + u16 qid, u16 cid) +{ + struct hiraid_admin_command admin_cmd; + + memset(&admin_cmd, 0, sizeof(admin_cmd)); + admin_cmd.abort.opcode = HIRAID_ADMIN_ABORT_CMD; + admin_cmd.abort.hdid = cpu_to_le32(hdid); + admin_cmd.abort.sqid = cpu_to_le16(qid); + admin_cmd.abort.cid = cpu_to_le16(cid); + + return hiraid_put_admin_sync_request(hdev, &admin_cmd, NULL, NULL, 0); +} + +/* send reset command by admin quueue temporary */ +static int hiraid_send_reset_cmd(struct hiraid_dev *hdev, u8 type, u32 hdid) +{ + struct hiraid_admin_command admin_cmd; + + memset(&admin_cmd, 0, sizeof(admin_cmd)); + admin_cmd.reset.opcode = HIRAID_ADMIN_RESET; + admin_cmd.reset.hdid = cpu_to_le32(hdid); + admin_cmd.reset.type = type; + + return hiraid_put_admin_sync_request(hdev, &admin_cmd, NULL, NULL, 0); +} + +static bool hiraid_dev_state_trans(struct hiraid_dev *hdev, + enum hiraid_dev_state new_state) +{ + unsigned long flags; + enum hiraid_dev_state old_state; + bool change = false; + + spin_lock_irqsave(&hdev->state_lock, flags); + + old_state = hdev->state; + switch (new_state) { + case DEV_LIVE: + switch (old_state) { + case DEV_NEW: + case DEV_RESETTING: + change = true; + break; + default: + break; + } + break; + case DEV_RESETTING: + switch (old_state) { + case DEV_LIVE: + change = true; + break; + default: + break; + } + break; + case DEV_DELETING: + if (old_state != DEV_DELETING) + change = true; + break; + case DEV_DEAD: + switch (old_state) { + case DEV_NEW: + case DEV_LIVE: + case DEV_RESETTING: + change = true; + break; + default: + break; + } + break; + default: + break; + } + if (change) + hdev->state = new_state; + spin_unlock_irqrestore(&hdev->state_lock, flags); + + dev_info(hdev->dev, "oldstate[%d]->newstate[%d], change[%d]\n", + old_state, new_state, change); + + return change; +} + +static void hiraid_drain_pending_ios(struct hiraid_dev *hdev); + +static void hiraid_flush_running_cmds(struct hiraid_dev *hdev) +{ + int i, j; + + scsi_block_requests(hdev->shost); + hiraid_drain_pending_ios(hdev); + scsi_unblock_requests(hdev->shost); + + j = HIRAID_AQ_BLK_MQ_DEPTH; + for (i = 0; i < j; i++) { + if (READ_ONCE(hdev->adm_cmds[i].state) == CMD_FLIGHT) { + dev_info(hdev->dev, "flush admin, cid[%d]\n", i); + hdev->adm_cmds[i].status = 0xFFFF; + WRITE_ONCE(hdev->adm_cmds[i].state, CMD_COMPLETE); + complete(&(hdev->adm_cmds[i].cmd_done)); + } + } + + j = HIRAID_TOTAL_PTCMDS(hdev->online_queues - 1); + for (i = 0; i < j; i++) { + if (READ_ONCE(hdev->io_ptcmds[i].state) == CMD_FLIGHT) { + hdev->io_ptcmds[i].status = 0xFFFF; + WRITE_ONCE(hdev->io_ptcmds[i].state, CMD_COMPLETE); + complete(&(hdev->io_ptcmds[i].cmd_done)); + } + } +} + +static int hiraid_dev_disable(struct hiraid_dev *hdev, bool shutdown) +{ + int ret = -ENODEV; + struct hiraid_queue *adminq = &hdev->queues[0]; + u16 start, end; + + if (pci_device_is_present(hdev->pdev)) { + if (shutdown) + hiraid_shutdown_control(hdev); + else + ret = hiraid_disable_control(hdev); + } + + if (hdev->queue_count == 0) { + dev_err(hdev->dev, "warn: queue has been delete\n"); + return ret; + } + + spin_lock_irq(&adminq->cq_lock); + hiraid_process_cq(adminq, &start, &end, -1); + spin_unlock_irq(&adminq->cq_lock); + hiraid_complete_cqes(adminq, start, end); + + hiraid_pci_disable(hdev); + + hiraid_flush_running_cmds(hdev); + + return ret; +} + +static void hiraid_reset_work(struct work_struct *work) +{ + int ret = 0; + struct hiraid_dev *hdev = + container_of(work, struct hiraid_dev, reset_work); + + if (hdev->state != DEV_RESETTING) { + dev_err(hdev->dev, "err, host is not reset state\n"); + return; + } + + dev_info(hdev->dev, "enter host reset\n"); + + if (hdev->ctrl_config & HIRAID_CC_ENABLE) { + dev_info(hdev->dev, "start dev_disable\n"); + ret = hiraid_dev_disable(hdev, false); + } + + if (ret) + goto out; + + ret = hiraid_pci_enable(hdev); + if (ret) + goto out; + + ret = hiraid_setup_admin_queue(hdev); + if (ret) + goto pci_disable; + + ret = hiraid_setup_io_queues(hdev); + if (ret || hdev->online_queues != hdev->last_qcnt) + goto pci_disable; + + hiraid_dev_state_trans(hdev, DEV_LIVE); + + hiraid_init_async_event(hdev); + + hiraid_queue_scan(hdev); + + return; + +pci_disable: + hiraid_pci_disable(hdev); +out: + hiraid_dev_state_trans(hdev, DEV_DEAD); + dev_err(hdev->dev, "err, host reset failed\n"); +} + +static int hiraid_reset_work_sync(struct hiraid_dev *hdev) +{ + if (!hiraid_dev_state_trans(hdev, DEV_RESETTING)) { + dev_info(hdev->dev, "can't change to reset state\n"); + return -EBUSY; + } + + if (!queue_work(work_queue, &hdev->reset_work)) { + dev_err(hdev->dev, "err, host is already in reset state\n"); + return -EBUSY; + } + + flush_work(&hdev->reset_work); + if (hdev->state != DEV_LIVE) + return -ENODEV; + + return 0; +} + +static int hiraid_wait_io_completion(struct hiraid_mapmange *mapbuf) +{ + u16 times = 0; + + do { + if (READ_ONCE(mapbuf->state) == CMD_TMO_COMPLETE) + break; + msleep(500); + times++; + } while (times <= HIRAID_WAIT_ABNL_CMD_TIMEOUT); + + /* wait command completion timeout after abort/reset success */ + if (times >= HIRAID_WAIT_ABNL_CMD_TIMEOUT) + return -ETIMEDOUT; + + return 0; +} + +static void hiraid_tgt_rst_pending_io_count(struct request *rq, + void *data, bool reserved) +{ + unsigned int id = *(unsigned int *)data; + struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); + struct hiraid_mapmange *mapbuf; + struct hiraid_sdev_hostdata *hostdata; + + if (scmd) { + mapbuf = scsi_cmd_priv(scmd); + if ((mapbuf->state == CMD_FLIGHT) || + (mapbuf->state == CMD_TIMEOUT)) { + if ((scmd->device) && (scmd->device->id == id)) { + hostdata = scmd->device->hostdata; + hostdata->pend_count++; + } + } + } +} +static void hiraid_clean_pending_io(struct request *rq, + void *data, bool reserved) +{ + struct hiraid_dev *hdev = data; + struct scsi_cmnd *scmd; + struct hiraid_mapmange *mapbuf; + + if (unlikely(!rq || !blk_mq_request_started(rq))) + return; + + scmd = blk_mq_rq_to_pdu(rq); + mapbuf = scsi_cmd_priv(scmd); + + if ((cmpxchg(&mapbuf->state, CMD_FLIGHT, CMD_COMPLETE) != CMD_FLIGHT) && + (cmpxchg(&mapbuf->state, CMD_TIMEOUT, CMD_COMPLETE) != CMD_TIMEOUT)) + return; + + set_host_byte(scmd, DID_NO_CONNECT); + if (mapbuf->sge_cnt) + scsi_dma_unmap(scmd); + hiraid_free_mapbuf(hdev, mapbuf); + dev_warn_ratelimited(hdev->dev, "back unfinished CQE, cid[%d] qid[%d]\n", + mapbuf->cid, mapbuf->hiraidq->qid); + scmd->scsi_done(scmd); +} + +static void hiraid_drain_pending_ios(struct hiraid_dev *hdev) +{ + blk_mq_tagset_busy_iter(&hdev->shost->tag_set, + hiraid_clean_pending_io, (void *)(hdev)); +} + +static int wait_tgt_reset_io_done(struct scsi_cmnd *scmd) +{ + u16 timeout = 0; + struct hiraid_sdev_hostdata *hostdata; + struct hiraid_dev *hdev = shost_priv(scmd->device->host); + + hostdata = scmd->device->hostdata; + + do { + hostdata->pend_count = 0; + blk_mq_tagset_busy_iter(&hdev->shost->tag_set, + hiraid_tgt_rst_pending_io_count, + (void *)(&scmd->device->id)); + + if (!hostdata->pend_count) + return 0; + + msleep(500); + timeout++; + } while (timeout <= HIRAID_WAIT_RST_IO_TIMEOUT); + + return -ETIMEDOUT; +} + +static int hiraid_abort(struct scsi_cmnd *scmd) +{ + struct hiraid_dev *hdev = shost_priv(scmd->device->host); + struct hiraid_mapmange *mapbuf = scsi_cmd_priv(scmd); + struct hiraid_sdev_hostdata *hostdata; + u16 hwq, cid; + int ret; + + scsi_print_command(scmd); + + if (hdev->state != DEV_LIVE || !hiraid_wait_io_completion(mapbuf) || + hiraid_check_scmd_finished(scmd)) + return SUCCESS; + + hostdata = scmd->device->hostdata; + cid = mapbuf->cid; + hwq = mapbuf->hiraidq->qid; + + dev_warn(hdev->dev, "cid[%d] qid[%d] timeout, send abort\n", cid, hwq); + ret = hiraid_send_abort_cmd(hdev, hostdata->hdid, hwq, cid); + if (ret != -ETIME) { + ret = hiraid_wait_io_completion(mapbuf); + if (ret) { + dev_warn(hdev->dev, "cid[%d] qid[%d] abort failed, not found\n", + cid, hwq); + return FAILED; + } + dev_warn(hdev->dev, "cid[%d] qid[%d] abort succ\n", cid, hwq); + return SUCCESS; + } + dev_warn(hdev->dev, "cid[%d] qid[%d] abort failed, timeout\n", + cid, hwq); + return FAILED; +} + +static int hiraid_scsi_reset(struct scsi_cmnd *scmd, enum hiraid_rst_type rst) +{ + struct hiraid_dev *hdev = shost_priv(scmd->device->host); + struct hiraid_sdev_hostdata *hostdata; + int ret; + + if (hdev->state != DEV_LIVE) + return SUCCESS; + + hostdata = scmd->device->hostdata; + + dev_warn(hdev->dev, "sdev[%d:%d] send %s reset\n", + scmd->device->channel, scmd->device->id, + rst ? "bus" : "target"); + ret = hiraid_send_reset_cmd(hdev, rst, hostdata->hdid); + if ((ret == 0) || + (ret == FW_EH_DEV_NONE && rst == HIRAID_RESET_TARGET)) { + if (rst == HIRAID_RESET_TARGET) { + ret = wait_tgt_reset_io_done(scmd); + if (ret) { + dev_warn(hdev->dev, "sdev[%d:%d] target has %d peding cmd, target reset failed\n", + scmd->device->channel, scmd->device->id, + hostdata->pend_count); + return FAILED; + } + } + dev_warn(hdev->dev, "sdev[%d:%d] %s reset success\n", + scmd->device->channel, scmd->device->id, + rst ? "bus" : "target"); + return SUCCESS; + } + + dev_warn(hdev->dev, "sdev[%d:%d] %s reset failed\n", + scmd->device->channel, scmd->device->id, + rst ? "bus" : "target"); + return FAILED; +} + +static int hiraid_target_reset(struct scsi_cmnd *scmd) +{ + return hiraid_scsi_reset(scmd, HIRAID_RESET_TARGET); +} + +static int hiraid_bus_reset(struct scsi_cmnd *scmd) +{ + return hiraid_scsi_reset(scmd, HIRAID_RESET_BUS); +} + +static int hiraid_host_reset(struct scsi_cmnd *scmd) +{ + struct hiraid_dev *hdev = shost_priv(scmd->device->host); + + if (hdev->state != DEV_LIVE) + return SUCCESS; + + dev_warn(hdev->dev, "sdev[%d:%d] send host reset\n", + scmd->device->channel, scmd->device->id); + if (hiraid_reset_work_sync(hdev) == -EBUSY) + flush_work(&hdev->reset_work); + + if (hdev->state != DEV_LIVE) { + dev_warn(hdev->dev, "sdev[%d:%d] host reset failed\n", + scmd->device->channel, scmd->device->id); + return FAILED; + } + + dev_warn(hdev->dev, "sdev[%d:%d] host reset success\n", + scmd->device->channel, scmd->device->id); + + return SUCCESS; +} + +static pci_ers_result_t hiraid_pci_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct hiraid_dev *hdev = pci_get_drvdata(pdev); + + dev_info(hdev->dev, "pci error detected, state[%d]\n", state); + + switch (state) { + case pci_channel_io_normal: + dev_warn(hdev->dev, "channel is normal, do nothing\n"); + + return PCI_ERS_RESULT_CAN_RECOVER; + case pci_channel_io_frozen: + dev_warn(hdev->dev, "channel io frozen, need reset controller\n"); + + scsi_block_requests(hdev->shost); + + hiraid_dev_state_trans(hdev, DEV_RESETTING); + + return PCI_ERS_RESULT_NEED_RESET; + case pci_channel_io_perm_failure: + dev_warn(hdev->dev, "channel io failure, disconnect\n"); + + return PCI_ERS_RESULT_DISCONNECT; + } + + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t hiraid_pci_slot_reset(struct pci_dev *pdev) +{ + struct hiraid_dev *hdev = pci_get_drvdata(pdev); + + dev_info(hdev->dev, "restart after slot reset\n"); + + pci_restore_state(pdev); + + if (!queue_work(work_queue, &hdev->reset_work)) { + dev_err(hdev->dev, "err, the device is resetting state\n"); + return PCI_ERS_RESULT_NONE; + } + + flush_work(&hdev->reset_work); + + scsi_unblock_requests(hdev->shost); + + return PCI_ERS_RESULT_RECOVERED; +} + +static void hiraid_reset_pci_finish(struct pci_dev *pdev) +{ + struct hiraid_dev *hdev = pci_get_drvdata(pdev); + + dev_info(hdev->dev, "enter hiraid reset finish\n"); +} + +static ssize_t csts_pp_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct hiraid_dev *hdev = shost_priv(shost); + int ret = -1; + + if (pci_device_is_present(hdev->pdev)) { + ret = (readl(hdev->bar + HIRAID_REG_CSTS) & + HIRAID_CSTS_PP_MASK); + ret >>= HIRAID_CSTS_PP_SHIFT; + } + + return snprintf(buf, PAGE_SIZE, "%d\n", ret); +} + +static ssize_t csts_shst_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct hiraid_dev *hdev = shost_priv(shost); + int ret = -1; + + if (pci_device_is_present(hdev->pdev)) { + ret = (readl(hdev->bar + HIRAID_REG_CSTS) & + HIRAID_CSTS_SHST_MASK); + ret >>= HIRAID_CSTS_SHST_SHIFT; + } + + return snprintf(buf, PAGE_SIZE, "%d\n", ret); +} + +static ssize_t csts_cfs_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct hiraid_dev *hdev = shost_priv(shost); + int ret = -1; + + if (pci_device_is_present(hdev->pdev)) { + ret = (readl(hdev->bar + HIRAID_REG_CSTS) & + HIRAID_CSTS_CFS_MASK); + ret >>= HIRAID_CSTS_CFS_SHIFT; + } + + return snprintf(buf, PAGE_SIZE, "%d\n", ret); +} + +static ssize_t csts_rdy_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct hiraid_dev *hdev = shost_priv(shost); + int ret = -1; + + if (pci_device_is_present(hdev->pdev)) + ret = (readl(hdev->bar + HIRAID_REG_CSTS) & HIRAID_CSTS_RDY); + + return snprintf(buf, PAGE_SIZE, "%d\n", ret); +} + +static ssize_t fw_version_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct hiraid_dev *hdev = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%s\n", hdev->ctrl_info->fw_version); +} + +static ssize_t driver_canqueue_store(struct device *cdev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int val = 0; + struct Scsi_Host *shost = class_to_shost(cdev); + + // if single hw queue, this parameter is not needed. + if (work_mode) + return -EINVAL; + + if (kstrtoint(buf, 0, &val) != 0) + return -EINVAL; + if (val > MAX_CAN_QUEUE || val < MIN_CAN_QUEUE) + return -EINVAL; + shost->can_queue = val; + + return strlen(buf); +} + +static ssize_t driver_canqueue_show(struct device *cdev, + struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + + return snprintf(buf, PAGE_SIZE, "%d\n", shost->can_queue); +} + +static ssize_t hdd_dispatch_store(struct device *cdev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int val = 0; + struct Scsi_Host *shost = class_to_shost(cdev); + struct hiraid_dev *hdev = shost_priv(shost); + + if (kstrtoint(buf, 0, &val) != 0) + return -EINVAL; + if (val < DISPATCH_BY_CPU || val > DISPATCH_BY_DISK) + return -EINVAL; + hdev->hdd_dispatch = val; + + return strlen(buf); +} +static ssize_t hdd_dispatch_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct hiraid_dev *hdev = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%d\n", hdev->hdd_dispatch); +} + +static DEVICE_ATTR_RO(csts_pp); +static DEVICE_ATTR_RO(csts_shst); +static DEVICE_ATTR_RO(csts_cfs); +static DEVICE_ATTR_RO(csts_rdy); +static DEVICE_ATTR_RO(fw_version); +static DEVICE_ATTR_RW(hdd_dispatch); +static DEVICE_ATTR_RW(driver_canqueue); + +static struct device_attribute *hiraid_host_attrs[] = { + &dev_attr_csts_rdy, + &dev_attr_csts_pp, + &dev_attr_csts_cfs, + &dev_attr_fw_version, + &dev_attr_csts_shst, + &dev_attr_driver_canqueue, + &dev_attr_hdd_dispatch, + NULL, +}; + +static int hiraid_get_vd_info(struct hiraid_dev *hdev, + struct hiraid_vd_info *vd_info, u16 vid) +{ + struct hiraid_admin_command admin_cmd; + u8 *data_ptr = NULL; + dma_addr_t buffer_phy = 0; + int ret; + + if (hdev->state >= DEV_RESETTING) { + dev_err(hdev->dev, "err, host state[%d] is not right\n", + hdev->state); + return -EBUSY; + } + + data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE, + &buffer_phy, GFP_KERNEL); + if (!data_ptr) + return -ENOMEM; + + memset(&admin_cmd, 0, sizeof(admin_cmd)); + admin_cmd.usr_cmd.opcode = USR_CMD_READ; + admin_cmd.usr_cmd.info_0.subopcode = cpu_to_le16(USR_CMD_VDINFO); + admin_cmd.usr_cmd.info_1.data_len = cpu_to_le16(USR_CMD_RDLEN); + admin_cmd.usr_cmd.info_1.param_len = cpu_to_le16(VDINFO_PARAM_LEN); + admin_cmd.usr_cmd.cdw10 = cpu_to_le32(vid); + admin_cmd.common.dptr.prp1 = cpu_to_le64(buffer_phy); + + ret = hiraid_put_admin_sync_request(hdev, &admin_cmd, + NULL, NULL, USRCMD_TIMEOUT); + if (!ret) + memcpy(vd_info, data_ptr, sizeof(struct hiraid_vd_info)); + + dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, buffer_phy); + + return ret; +} + +static int hiraid_get_bgtask(struct hiraid_dev *hdev, + struct hiraid_bgtask *bgtask) +{ + struct hiraid_admin_command admin_cmd; + u8 *data_ptr = NULL; + dma_addr_t buffer_phy = 0; + int ret; + + if (hdev->state >= DEV_RESETTING) { + dev_err(hdev->dev, "err, host state[%d] is not right\n", + hdev->state); + return -EBUSY; + } + + data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE, + &buffer_phy, GFP_KERNEL); + if (!data_ptr) + return -ENOMEM; + + memset(&admin_cmd, 0, sizeof(admin_cmd)); + admin_cmd.usr_cmd.opcode = USR_CMD_READ; + admin_cmd.usr_cmd.info_0.subopcode = cpu_to_le16(USR_CMD_BGTASK); + admin_cmd.usr_cmd.info_1.data_len = cpu_to_le16(USR_CMD_RDLEN); + admin_cmd.common.dptr.prp1 = cpu_to_le64(buffer_phy); + + ret = hiraid_put_admin_sync_request(hdev, &admin_cmd, + NULL, NULL, USRCMD_TIMEOUT); + if (!ret) + memcpy(bgtask, data_ptr, sizeof(struct hiraid_bgtask)); + + dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, buffer_phy); + + return ret; +} + +static ssize_t raid_level_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev; + struct hiraid_dev *hdev; + struct hiraid_vd_info *vd_info; + struct hiraid_sdev_hostdata *hostdata; + int ret; + + sdev = to_scsi_device(dev); + hdev = shost_priv(sdev->host); + hostdata = sdev->hostdata; + + vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL); + if (!vd_info || !HIRAID_DEV_INFO_ATTR_VD(hostdata->attr)) + return snprintf(buf, PAGE_SIZE, "NA\n"); + + ret = hiraid_get_vd_info(hdev, vd_info, sdev->id); + if (ret) + vd_info->rg_level = ARRAY_SIZE(raid_levels) - 1; + + ret = (vd_info->rg_level < ARRAY_SIZE(raid_levels)) ? + vd_info->rg_level : (ARRAY_SIZE(raid_levels) - 1); + + kfree(vd_info); + + return snprintf(buf, PAGE_SIZE, "RAID-%s\n", raid_levels[ret]); +} + +static ssize_t raid_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev; + struct hiraid_dev *hdev; + struct hiraid_vd_info *vd_info; + struct hiraid_sdev_hostdata *hostdata; + int ret; + + sdev = to_scsi_device(dev); + hdev = shost_priv(sdev->host); + hostdata = sdev->hostdata; + + vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL); + if (!vd_info || !HIRAID_DEV_INFO_ATTR_VD(hostdata->attr)) + return snprintf(buf, PAGE_SIZE, "NA\n"); + + ret = hiraid_get_vd_info(hdev, vd_info, sdev->id); + if (ret) { + vd_info->vd_status = 0; + vd_info->rg_id = 0xff; + } + + ret = (vd_info->vd_status < ARRAY_SIZE(raid_states)) ? + vd_info->vd_status : 0; + + kfree(vd_info); + + return snprintf(buf, PAGE_SIZE, "%s\n", raid_states[ret]); +} + +static ssize_t raid_resync_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev; + struct hiraid_dev *hdev; + struct hiraid_vd_info *vd_info; + struct hiraid_bgtask *bgtask; + struct hiraid_sdev_hostdata *hostdata; + u8 rg_id, i, progress = 0; + int ret; + + sdev = to_scsi_device(dev); + hdev = shost_priv(sdev->host); + hostdata = sdev->hostdata; + + vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL); + if (!vd_info || !HIRAID_DEV_INFO_ATTR_VD(hostdata->attr)) + return snprintf(buf, PAGE_SIZE, "NA\n"); + + ret = hiraid_get_vd_info(hdev, vd_info, sdev->id); + if (ret) + goto out; + + rg_id = vd_info->rg_id; + + bgtask = (struct hiraid_bgtask *)vd_info; + ret = hiraid_get_bgtask(hdev, bgtask); + if (ret) + goto out; + for (i = 0; i < bgtask->task_num; i++) { + if ((bgtask->bgtask[i].type == BGTASK_TYPE_REBUILD) && + (le16_to_cpu(bgtask->bgtask[i].vd_id) == rg_id)) + progress = bgtask->bgtask[i].progress; + } + +out: + kfree(vd_info); + return snprintf(buf, PAGE_SIZE, "%d\n", progress); +} + +static ssize_t dispatch_hwq_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hiraid_sdev_hostdata *hostdata; + + hostdata = to_scsi_device(dev)->hostdata; + return snprintf(buf, PAGE_SIZE, "%d\n", hostdata->hwq); +} + +static ssize_t dispatch_hwq_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int val; + struct hiraid_dev *hdev; + struct scsi_device *sdev; + struct hiraid_sdev_hostdata *hostdata; + + sdev = to_scsi_device(dev); + hdev = shost_priv(sdev->host); + hostdata = sdev->hostdata; + + if (kstrtoint(buf, 0, &val) != 0) + return -EINVAL; + if (val <= 0 || val >= hdev->online_queues) + return -EINVAL; + if (!hiraid_disk_is_hdd(hostdata->attr)) + return -EINVAL; + + hostdata->hwq = val; + return strlen(buf); +} + +static DEVICE_ATTR_RO(raid_level); +static DEVICE_ATTR_RO(raid_state); +static DEVICE_ATTR_RO(raid_resync); +static DEVICE_ATTR_RW(dispatch_hwq); + +static struct device_attribute *hiraid_dev_attrs[] = { + &dev_attr_raid_state, + &dev_attr_raid_level, + &dev_attr_raid_resync, + &dev_attr_dispatch_hwq, + NULL, +}; + +static struct pci_error_handlers hiraid_err_handler = { + .error_detected = hiraid_pci_error_detected, + .slot_reset = hiraid_pci_slot_reset, + .reset_done = hiraid_reset_pci_finish, +}; + +static int hiraid_sysfs_host_reset(struct Scsi_Host *shost, int reset_type) +{ + int ret; + struct hiraid_dev *hdev = shost_priv(shost); + + dev_info(hdev->dev, "start sysfs host reset cmd\n"); + ret = hiraid_reset_work_sync(hdev); + dev_info(hdev->dev, "stop sysfs host reset cmd[%d]\n", ret); + + return ret; +} + +static int hiraid_scan_finished(struct Scsi_Host *shost, unsigned long time) +{ + struct hiraid_dev *hdev = shost_priv(shost); + + hiraid_scan_work(&hdev->scan_work); + + return 1; +} + +static struct scsi_host_template hiraid_driver_template = { + .module = THIS_MODULE, + .name = "hiraid", + .proc_name = "hiraid", + .queuecommand = hiraid_queue_command, + .slave_alloc = hiraid_slave_alloc, + .slave_destroy = hiraid_slave_destroy, + .slave_configure = hiraid_slave_configure, + .scan_finished = hiraid_scan_finished, + .eh_timed_out = hiraid_timed_out, + .eh_abort_handler = hiraid_abort, + .eh_target_reset_handler = hiraid_target_reset, + .eh_bus_reset_handler = hiraid_bus_reset, + .eh_host_reset_handler = hiraid_host_reset, + .change_queue_depth = scsi_change_queue_depth, + .this_id = -1, + .use_clustering = ENABLE_CLUSTERING, + .force_blk_mq = 1, + .unchecked_isa_dma = 0, + .shost_attrs = hiraid_host_attrs, + .sdev_attrs = hiraid_dev_attrs, + .host_reset = hiraid_sysfs_host_reset, +}; + +static void hiraid_shutdown(struct pci_dev *pdev) +{ + struct hiraid_dev *hdev = pci_get_drvdata(pdev); + + hiraid_delete_io_queues(hdev); + hiraid_disable_admin_queue(hdev, true); +} + +static bool hiraid_bsg_is_valid(struct bsg_job *job) +{ + u64 timeout = 0; + struct request *rq = blk_mq_rq_from_pdu(job); + struct hiraid_bsg_request *bsg_req = job->request; + struct hiraid_dev *hdev = shost_priv(dev_to_shost(job->dev)); + + if (bsg_req == NULL || job->request_len != + sizeof(struct hiraid_bsg_request)) + return false; + + switch (bsg_req->msgcode) { + case HIRAID_BSG_ADMIN: + timeout = msecs_to_jiffies(bsg_req->admcmd.timeout_ms); + break; + case HIRAID_BSG_IOPTHRU: + timeout = msecs_to_jiffies(bsg_req->pthrucmd.timeout_ms); + break; + default: + dev_info(hdev->dev, "bsg unsupport msgcode[%d]\n", + bsg_req->msgcode); + return false; + } + + if ((timeout + CTL_RST_TIME) > rq->timeout) { + dev_err(hdev->dev, "bsg invalid time\n"); + return false; + } + + return true; +} + +/* bsg dispatch user command */ +static int hiraid_bsg_dispatch(struct bsg_job *job) +{ + struct Scsi_Host *shost = dev_to_shost(job->dev); + struct hiraid_dev *hdev = shost_priv(shost); + struct request *rq = blk_mq_rq_from_pdu(job); + struct hiraid_bsg_request *bsg_req = job->request; + int ret = -ENOMSG; + + job->reply_len = 0; + + if (!hiraid_bsg_is_valid(job)) { + bsg_job_done(job, ret, 0); + return 0; + } + + dev_log_dbg(hdev->dev, "bsg msgcode[%d] msglen[%d] timeout[%d];" + "reqnsge[%d], reqlen[%d]\n", + bsg_req->msgcode, job->request_len, rq->timeout, + job->request_payload.sg_cnt, job->request_payload.payload_len); + + switch (bsg_req->msgcode) { + case HIRAID_BSG_ADMIN: + ret = hiraid_user_send_admcmd(hdev, job); + break; + case HIRAID_BSG_IOPTHRU: + ret = hiraid_user_send_ptcmd(hdev, job); + break; + default: + break; + } + + if (ret > 0) + ret = ret | (ret << 8); + + bsg_job_done(job, ret, 0); + return 0; +} + +static inline void hiraid_unregist_bsg(struct hiraid_dev *hdev) +{ + if (hdev->bsg_queue) { + bsg_unregister_queue(hdev->bsg_queue); + blk_cleanup_queue(hdev->bsg_queue); + } +} +static int hiraid_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct hiraid_dev *hdev; + struct Scsi_Host *shost; + int node, ret; + char bsg_name[15]; + + shost = scsi_host_alloc(&hiraid_driver_template, sizeof(*hdev)); + if (!shost) { + dev_err(&pdev->dev, "failed to allocate scsi host\n"); + return -ENOMEM; + } + hdev = shost_priv(shost); + hdev->pdev = pdev; + hdev->dev = get_device(&pdev->dev); + + node = dev_to_node(hdev->dev); + if (node == NUMA_NO_NODE) { + node = first_memory_node; + set_dev_node(hdev->dev, node); + } + hdev->numa_node = node; + hdev->shost = shost; + hdev->instance = shost->host_no; + pci_set_drvdata(pdev, hdev); + + ret = hiraid_dev_map(hdev); + if (ret) + goto put_dev; + + init_rwsem(&hdev->dev_rwsem); + INIT_WORK(&hdev->scan_work, hiraid_scan_work); + INIT_WORK(&hdev->timesyn_work, hiraid_timesyn_work); + INIT_WORK(&hdev->reset_work, hiraid_reset_work); + INIT_WORK(&hdev->fwact_work, hiraid_fwactive_work); + spin_lock_init(&hdev->state_lock); + + ret = hiraid_alloc_resources(hdev); + if (ret) + goto dev_unmap; + + ret = hiraid_pci_enable(hdev); + if (ret) + goto resources_free; + + ret = hiraid_setup_admin_queue(hdev); + if (ret) + goto pci_disable; + + ret = hiraid_init_control_info(hdev); + if (ret) + goto disable_admin_q; + + ret = hiraid_setup_io_queues(hdev); + if (ret) + goto disable_admin_q; + + hiraid_shost_init(hdev); + + ret = scsi_add_host(hdev->shost, hdev->dev); + if (ret) { + dev_err(hdev->dev, "add shost to system fail, ret[%d]\n", ret); + goto remove_io_queues; + } + + snprintf(bsg_name, sizeof(bsg_name), "hiraid%d", shost->host_no); + hdev->bsg_queue = bsg_setup_queue(&shost->shost_gendev, bsg_name, + hiraid_bsg_dispatch, + hiraid_get_max_cmd_size(hdev)); + if (IS_ERR(hdev->bsg_queue)) { + dev_err(hdev->dev, "err, setup bsg failed\n"); + hdev->bsg_queue = NULL; + goto remove_io_queues; + } + + if (hdev->online_queues == HIRAID_ADMIN_QUEUE_NUM) { + dev_warn(hdev->dev, "warn: only admin queue can be used\n"); + return 0; + } + + hdev->state = DEV_LIVE; + + hiraid_init_async_event(hdev); + + ret = hiraid_dev_list_init(hdev); + if (ret) + goto unregist_bsg; + + ret = hiraid_configure_timestamp(hdev); + if (ret) + dev_warn(hdev->dev, "time synchronization failed\n"); + + ret = hiraid_alloc_io_ptcmds(hdev); + if (ret) + goto unregist_bsg; + + scsi_scan_host(hdev->shost); + + return 0; + +unregist_bsg: + hiraid_unregist_bsg(hdev); +remove_io_queues: + hiraid_delete_io_queues(hdev); +disable_admin_q: + hiraid_free_sense_buffer(hdev); + hiraid_disable_admin_queue(hdev, false); +pci_disable: + hiraid_free_all_queues(hdev); + hiraid_pci_disable(hdev); +resources_free: + hiraid_free_resources(hdev); +dev_unmap: + hiraid_dev_unmap(hdev); +put_dev: + put_device(hdev->dev); + scsi_host_put(shost); + + return -ENODEV; +} + +static void hiraid_remove(struct pci_dev *pdev) +{ + struct hiraid_dev *hdev = pci_get_drvdata(pdev); + struct Scsi_Host *shost = hdev->shost; + + dev_info(hdev->dev, "enter hiraid remove\n"); + + hiraid_dev_state_trans(hdev, DEV_DELETING); + flush_work(&hdev->reset_work); + + if (!pci_device_is_present(pdev)) + hiraid_flush_running_cmds(hdev); + + hiraid_unregist_bsg(hdev); + scsi_remove_host(shost); + hiraid_free_io_ptcmds(hdev); + kfree(hdev->dev_info); + hiraid_delete_io_queues(hdev); + hiraid_free_sense_buffer(hdev); + hiraid_disable_admin_queue(hdev, false); + hiraid_free_all_queues(hdev); + hiraid_pci_disable(hdev); + hiraid_free_resources(hdev); + hiraid_dev_unmap(hdev); + put_device(hdev->dev); + scsi_host_put(shost); + + dev_info(hdev->dev, "exit hiraid remove\n"); +} + +static const struct pci_device_id hiraid_hw_card_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI_LOGIC, + HIRAID_SERVER_DEVICE_HBA_DID) }, + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI_LOGIC, + HIRAID_SERVER_DEVICE_RAID_DID) }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, hiraid_hw_card_ids); + +static struct pci_driver hiraid_driver = { + .name = "hiraid", + .id_table = hiraid_hw_card_ids, + .probe = hiraid_probe, + .remove = hiraid_remove, + .shutdown = hiraid_shutdown, + .err_handler = &hiraid_err_handler, +}; + +static int __init hiraid_init(void) +{ + int ret; + + work_queue = alloc_workqueue("hiraid-wq", WQ_UNBOUND | + WQ_MEM_RECLAIM | WQ_SYSFS, 0); + if (!work_queue) + return -ENOMEM; + + hiraid_class = class_create(THIS_MODULE, "hiraid"); + if (IS_ERR(hiraid_class)) { + ret = PTR_ERR(hiraid_class); + goto destroy_wq; + } + + ret = pci_register_driver(&hiraid_driver); + if (ret < 0) + goto destroy_class; + + return 0; + +destroy_class: + class_destroy(hiraid_class); +destroy_wq: + destroy_workqueue(work_queue); + + return ret; +} + +static void __exit hiraid_exit(void) +{ + pci_unregister_driver(&hiraid_driver); + class_destroy(hiraid_class); + destroy_workqueue(work_queue); +} + +MODULE_AUTHOR("Huawei Technologies CO., Ltd"); +MODULE_DESCRIPTION("Huawei RAID driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(HIRAID_DRV_VERSION); +module_init(hiraid_init); +module_exit(hiraid_exit); diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index 6c7d2e201abed7ec29e96bea97852ad8f3812a23..2880e4e22a73ea45571c8409e1710ffb73e40555 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -14,9 +14,11 @@ #include #include +#include #include #include #include +#include #include #include #include @@ -24,16 +26,18 @@ #include #include #include +#include #include #include #define HISI_SAS_MAX_PHYS 9 #define HISI_SAS_MAX_QUEUES 32 -#define HISI_SAS_QUEUE_SLOTS 512 +#define HISI_SAS_QUEUE_SLOTS 4096 #define HISI_SAS_MAX_ITCT_ENTRIES 1024 #define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES #define HISI_SAS_RESET_BIT 0 #define HISI_SAS_REJECT_CMD_BIT 1 +#define HISI_SAS_RESERVED_IPTT_CNT 96 #define HISI_SAS_STATUS_BUF_SZ (sizeof(struct hisi_sas_status_buffer)) #define HISI_SAS_COMMAND_TABLE_SZ (sizeof(union hisi_sas_command_table)) @@ -54,6 +58,12 @@ #define hisi_sas_sge_addr_mem(slot) hisi_sas_sge_addr(slot->buf) #define hisi_sas_sge_addr_dma(slot) hisi_sas_sge_addr(slot->buf_dma) +#define hisi_sas_sge_dif_addr(buf) \ + (buf + offsetof(struct hisi_sas_slot_dif_buf_table, \ + sge_dif_page)) +#define hisi_sas_sge_dif_addr_mem(slot) hisi_sas_sge_dif_addr(slot->buf) +#define hisi_sas_sge_dif_addr_dma(slot) hisi_sas_sge_dif_addr(slot->buf_dma) + #define HISI_SAS_MAX_SSP_RESP_SZ (sizeof(struct ssp_frame_hdr) + 1024) #define HISI_SAS_MAX_SMP_RESP_SZ 1028 #define HISI_SAS_MAX_STP_RESP_SZ 28 @@ -68,6 +78,19 @@ #define HISI_SAS_SATA_PROTOCOL_FPDMA 0x8 #define HISI_SAS_SATA_PROTOCOL_ATAPI 0x10 +#define HISI_SAS_DIF_PROT_MASK (SHOST_DIF_TYPE1_PROTECTION | \ + SHOST_DIF_TYPE2_PROTECTION | \ + SHOST_DIF_TYPE3_PROTECTION) + +#define HISI_SAS_DIX_PROT_MASK (SHOST_DIX_TYPE1_PROTECTION | \ + SHOST_DIX_TYPE2_PROTECTION | \ + SHOST_DIX_TYPE3_PROTECTION) + +#define HISI_SAS_PROT_MASK (HISI_SAS_DIF_PROT_MASK | HISI_SAS_DIX_PROT_MASK) + +#define HISI_SAS_WAIT_PHYUP_TIMEOUT (30 * HZ) +#define CLEAR_ITCT_TIMEOUT 20 + struct hisi_hba; enum { @@ -76,8 +99,9 @@ enum { }; enum dev_status { + HISI_SAS_DEV_INIT, HISI_SAS_DEV_NORMAL, - HISI_SAS_DEV_EH, + HISI_SAS_DEV_NCQ_ERR, }; enum { @@ -91,6 +115,12 @@ enum hisi_sas_dev_type { HISI_SAS_DEV_TYPE_SATA, }; +enum datapres_field { + NO_DATA = 0, + RESPONSE_DATA = 1, + SENSE_DATA = 2, +}; + struct hisi_sas_hw_error { u32 irq_msk; u32 msk; @@ -138,16 +168,20 @@ struct hisi_sas_phy { struct asd_sas_phy sas_phy; struct sas_identify identify; struct completion *reset_completion; + struct timer_list timer; spinlock_t lock; u64 port_id; /* from hw */ u64 frame_rcvd_size; u8 frame_rcvd[32]; u8 phy_attached; u8 in_reset; - u8 reserved[2]; + u8 need_notify; + u8 reserved; u32 phy_type; enum sas_linkrate minimum_linkrate; enum sas_linkrate maximum_linkrate; + u32 code_error_count; + int enable; }; struct hisi_sas_port { @@ -178,9 +212,10 @@ struct hisi_sas_device { struct hisi_sas_dq *dq; struct list_head list; enum sas_device_type dev_type; - int device_id; + unsigned int device_id; int sata_idx; - u8 dev_status; + spinlock_t lock; + enum dev_status dev_status; }; struct hisi_sas_tmf_task { @@ -196,12 +231,14 @@ struct hisi_sas_slot { struct sas_task *task; struct hisi_sas_port *port; u64 n_elem; + u64 n_elem_dif; int dlvry_queue; int dlvry_queue_slot; int cmplt_queue; int cmplt_queue_slot; int abort; int ready; + int device_id; void *cmd_hdr; dma_addr_t cmd_hdr_dma; struct timer_list internal_abort_timer; @@ -213,15 +250,54 @@ struct hisi_sas_slot { int idx; }; +#define HISI_SAS_DEBUGFS_REG(x) {#x, x} + +struct hisi_sas_debugfs_reg_lu { + char *name; + int off; +}; + +struct hisi_sas_debugfs_reg { + const struct hisi_sas_debugfs_reg_lu *lu; + int count; + int base_off; + union { + u32 (*read_global_reg)(struct hisi_hba *hisi_hba, u32 off); + u32 (*read_port_reg)(struct hisi_hba *hisi_hba, int port, + u32 off); + }; +}; + +enum { + HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL = 0, + HISI_SAS_BIST_LOOPBACK_MODE_SERDES, + HISI_SAS_BIST_LOOPBACK_MODE_REMOTE, +}; + +enum { + HISI_SAS_BIST_CODE_MODE_PRBS7 = 0, + HISI_SAS_BIST_CODE_MODE_PRBS23, + HISI_SAS_BIST_CODE_MODE_PRBS31, + HISI_SAS_BIST_CODE_MODE_JTPAT, + HISI_SAS_BIST_CODE_MODE_CJTPAT, + HISI_SAS_BIST_CODE_MODE_SCRAMBED_0, + HISI_SAS_BIST_CODE_MODE_TRAIN, + HISI_SAS_BIST_CODE_MODE_TRAIN_DONE, + HISI_SAS_BIST_CODE_MODE_HFTP, + HISI_SAS_BIST_CODE_MODE_MFTP, + HISI_SAS_BIST_CODE_MODE_LFTP, + HISI_SAS_BIST_CODE_MODE_FIXED_DATA, +}; + + struct hisi_sas_hw { int (*hw_init)(struct hisi_hba *hisi_hba); void (*setup_itct)(struct hisi_hba *hisi_hba, struct hisi_sas_device *device); - int (*slot_index_alloc)(struct hisi_hba *hisi_hba, int *slot_idx, - struct domain_device *device); + int (*slot_index_alloc)(struct hisi_hba *hisi_hba, + struct domain_device *device); struct hisi_sas_device *(*alloc_dev)(struct domain_device *device); - void (*sl_notify)(struct hisi_hba *hisi_hba, int phy_no); - int (*get_free_slot)(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq); + void (*sl_notify_ssp)(struct hisi_hba *hisi_hba, int phy_no); void (*start_delivery)(struct hisi_sas_dq *dq); void (*prep_ssp)(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot); @@ -230,8 +306,9 @@ struct hisi_sas_hw { void (*prep_stp)(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot); void (*prep_abort)(struct hisi_hba *hisi_hba, - struct hisi_sas_slot *slot, - int device_id, int abort_flag, int tag_to_abort); + struct hisi_sas_slot *slot, + unsigned int device_id, + int abort_flag, int tag_to_abort); int (*slot_complete)(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot); void (*phys_init)(struct hisi_hba *hisi_hba); @@ -242,7 +319,7 @@ struct hisi_sas_hw { void (*phy_set_linkrate)(struct hisi_hba *hisi_hba, int phy_no, struct sas_phy_linkrates *linkrates); enum sas_linkrate (*phy_get_max_linkrate)(void); - void (*clear_itct)(struct hisi_hba *hisi_hba, + int (*clear_itct)(struct hisi_hba *hisi_hba, struct hisi_sas_device *dev); void (*free_device)(struct hisi_sas_device *sas_dev); int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id); @@ -252,11 +329,20 @@ struct hisi_sas_hw { u32 (*get_phys_state)(struct hisi_hba *hisi_hba); int (*write_gpio)(struct hisi_hba *hisi_hba, u8 reg_type, u8 reg_index, u8 reg_count, u8 *write_data); - void (*wait_cmds_complete_timeout)(struct hisi_hba *hisi_hba, + int (*wait_cmds_complete_timeout)(struct hisi_hba *hisi_hba, int delay_ms, int timeout_ms); + void (*snapshot_prepare)(struct hisi_hba *hisi_hba); + void (*snapshot_restore)(struct hisi_hba *hisi_hba); + const struct cpumask *(*get_managed_irq_aff)(struct hisi_hba + *hisi_hba, int queue); + void (*debugfs_work_handler)(struct work_struct *work); int max_command_entries; int complete_hdr_size; struct scsi_host_template *sht; + + const struct hisi_sas_debugfs_reg *debugfs_reg_global; + const struct hisi_sas_debugfs_reg *debugfs_reg_port; + int (*set_bist)(struct hisi_hba *hisi_hba, bool enable); }; struct hisi_hba { @@ -320,7 +406,37 @@ struct hisi_hba { const struct hisi_sas_hw *hw; /* Low level hw interface */ unsigned long sata_dev_bitmap[BITS_TO_LONGS(HISI_SAS_MAX_DEVICES)]; struct work_struct rst_work; + struct work_struct debugfs_work; + struct work_struct notify_work; u32 phy_state; + u32 intr_coal_ticks; /* time of interrupt coalesce, unit:1us */ + u32 intr_coal_count; /* count of interrupt coalesce */ + /* bist */ + int bist_loopback_linkrate; + int bist_loopback_code_mode; + int bist_loopback_phy_id; + int bist_loopback_mode; + u32 bist_loopback_cnt; + int bist_loopback_enable; + + int enable_dix_dif; + + /* debugfs memories */ + void *debugfs_global_reg; + void *debugfs_port_reg[HISI_SAS_MAX_PHYS]; + void *debugfs_complete_hdr[HISI_SAS_MAX_QUEUES]; + struct hisi_sas_cmd_hdr *debugfs_cmd_hdr[HISI_SAS_MAX_QUEUES]; + struct hisi_sas_iost *debugfs_iost; + struct hisi_sas_itct *debugfs_itct; + + struct dentry *debugfs_dir; + struct dentry *debugfs_dump_dentry; + struct dentry *debugfs_bist_dentry; + + bool user_ctl_irq; + unsigned int dq_idx[NR_CPUS]; + int nvecs; + unsigned int dq_num_per_node; }; /* Generic HW DMA host memory structures */ @@ -414,11 +530,16 @@ struct hisi_sas_command_table_stp { u8 atapi_cdb[ATAPI_CDB_LEN]; }; -#define HISI_SAS_SGE_PAGE_CNT SG_CHUNK_SIZE +#define HISI_SAS_SGE_PAGE_CNT (124) struct hisi_sas_sge_page { struct hisi_sas_sge sge[HISI_SAS_SGE_PAGE_CNT]; } __aligned(16); +#define HISI_SAS_SGE_DIF_PAGE_CNT HISI_SAS_SGE_PAGE_CNT +struct hisi_sas_sge_dif_page { + struct hisi_sas_sge sge[HISI_SAS_SGE_DIF_PAGE_CNT]; +} __aligned(16); + struct hisi_sas_command_table_ssp { struct ssp_frame_hdr hdr; union { @@ -449,16 +570,23 @@ struct hisi_sas_slot_buf_table { struct hisi_sas_sge_page sge_page; }; +struct hisi_sas_slot_dif_buf_table { + struct hisi_sas_slot_buf_table slot_buf; + struct hisi_sas_sge_dif_page sge_dif_page; +}; + +extern bool hisi_sas_debugfs_enable; +extern struct dentry *hisi_sas_debugfs_dir; +extern int skip_bus_flag; extern struct scsi_transport_template *hisi_sas_stt; extern void hisi_sas_stop_phys(struct hisi_hba *hisi_hba); -extern int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost); +extern int hisi_sas_alloc(struct hisi_hba *hisi_hba); extern void hisi_sas_free(struct hisi_hba *hisi_hba); extern u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction); extern struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port); extern void hisi_sas_sata_done(struct sas_task *task, struct hisi_sas_slot *slot); -extern int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag); extern int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba); extern int hisi_sas_probe(struct platform_device *pdev, const struct hisi_sas_hw *ops); @@ -467,12 +595,14 @@ extern int hisi_sas_remove(struct platform_device *pdev); extern int hisi_sas_slave_configure(struct scsi_device *sdev); extern int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time); extern void hisi_sas_scan_start(struct Scsi_Host *shost); -extern struct device_attribute *host_attrs[]; extern int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type); +extern void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no, + int enable); extern void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy); extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, - struct hisi_sas_slot *slot); + struct hisi_sas_slot *slot, + bool need_lock); extern void hisi_sas_init_mem(struct hisi_hba *hisi_hba); extern void hisi_sas_rst_work_handler(struct work_struct *work); extern void hisi_sas_sync_rst_work_handler(struct work_struct *work); @@ -483,4 +613,8 @@ extern void hisi_sas_release_tasks(struct hisi_hba *hisi_hba); extern u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max); extern void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba); extern void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba); +extern void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba); +extern void hisi_sas_debugfs_exit(struct hisi_hba *hisi_hba); +extern void hisi_sas_snapshot_regs(struct hisi_hba *hisi_hba); +extern void hisi_sas_debugfs_work_handler(struct work_struct *work); #endif diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index a4e2e6aa9a6b8c9ea1ecda64dfde666f6e3efb6b..6bccd36556f2024ee293f5200c927089aae36495 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -96,12 +96,11 @@ u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction) return HISI_SAS_SATA_PROTOCOL_NONDATA; } - default: - { + default: { if (direction == DMA_NONE) return HISI_SAS_SATA_PROTOCOL_NONDATA; return HISI_SAS_SATA_PROTOCOL_PIO; - } + } } } EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol); @@ -123,32 +122,22 @@ void hisi_sas_sata_done(struct sas_task *task, } EXPORT_SYMBOL_GPL(hisi_sas_sata_done); -int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag) -{ - struct ata_queued_cmd *qc = task->uldd_task; - - if (qc) { - if (qc->tf.command == ATA_CMD_FPDMA_WRITE || - qc->tf.command == ATA_CMD_FPDMA_READ) { - *tag = qc->tag; - return 1; - } - } - return 0; -} -EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag); - /* * This function assumes linkrate mask fits in 8 bits, which it * does for all HW versions supported. */ u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max) { - u16 rate = 0; + u8 rate = 0; int i; + enum sas_linkrate max_linkrate = max; - max -= SAS_LINK_RATE_1_5_GBPS; - for (i = 0; i <= max; i++) + /* + * One bit represent one kind of link rate, + * So we shift one rate to other by multiply 2. + */ + max_linkrate -= SAS_LINK_RATE_1_5_GBPS; + for (i = 0; i <= max_linkrate; i++) rate |= 1 << (i * 2); return rate; } @@ -170,7 +159,7 @@ void hisi_sas_stop_phys(struct hisi_hba *hisi_hba) int phy_no; for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) - hisi_hba->hw->phy_disable(hisi_hba, phy_no); + hisi_sas_phy_enable(hisi_hba, phy_no, 0); } EXPORT_SYMBOL_GPL(hisi_sas_stop_phys); @@ -183,7 +172,14 @@ static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx) static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx) { - hisi_sas_slot_index_clear(hisi_hba, slot_idx); + unsigned long flags; + + if (hisi_hba->hw->slot_index_alloc || (slot_idx >= + hisi_hba->hw->max_command_entries - HISI_SAS_RESERVED_IPTT_CNT)) { + spin_lock_irqsave(&hisi_hba->lock, flags); + hisi_sas_slot_index_clear(hisi_hba, slot_idx); + spin_unlock_irqrestore(&hisi_hba->lock, flags); + } } static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx) @@ -193,24 +189,35 @@ static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx) set_bit(slot_idx, bitmap); } -static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx) +static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, + struct scsi_cmnd *scsi_cmnd) { - unsigned int index; + int index; void *bitmap = hisi_hba->slot_index_tags; + unsigned long flags; + + if (scsi_cmnd) { + return scsi_cmnd->request->tag; + } + spin_lock_irqsave(&hisi_hba->lock, flags); index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count, hisi_hba->last_slot_index + 1); if (index >= hisi_hba->slot_index_count) { - index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count, - 0); - if (index >= hisi_hba->slot_index_count) + index = find_next_zero_bit(bitmap, + hisi_hba->slot_index_count, + hisi_hba->hw->max_command_entries - + HISI_SAS_RESERVED_IPTT_CNT); + if (index >= hisi_hba->slot_index_count) { + spin_unlock_irqrestore(&hisi_hba->lock, flags); return -SAS_QUEUE_FULL; + } } hisi_sas_slot_index_set(hisi_hba, index); - *slot_idx = index; hisi_hba->last_slot_index = index; + spin_unlock_irqrestore(&hisi_hba->lock, flags); - return 0; + return index; } static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba) @@ -222,10 +229,11 @@ static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba) } void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, - struct hisi_sas_slot *slot) + struct hisi_sas_slot *slot, bool need_lock) { - struct hisi_sas_dq *dq = &hisi_hba->dq[slot->dlvry_queue]; unsigned long flags; + int device_id = slot->device_id; + struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id]; if (task) { struct device *dev = hisi_hba->dev; @@ -235,23 +243,34 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, task->lldd_task = NULL; - if (!sas_protocol_ata(task->task_proto)) + if (!sas_protocol_ata(task->task_proto)) { if (slot->n_elem) dma_unmap_sg(dev, task->scatter, task->num_scatter, task->data_dir); - } + if (slot->n_elem_dif) { + struct sas_ssp_task *ssp_task = &task->ssp_task; + struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; + dma_unmap_sg(dev, + scsi_prot_sglist(scsi_cmnd), + scsi_prot_sg_count(scsi_cmnd), + task->data_dir); + } + } + } - spin_lock_irqsave(&dq->lock, flags); - list_del_init(&slot->entry); - spin_unlock_irqrestore(&dq->lock, flags); + if (need_lock) { + spin_lock_irqsave(&sas_dev->lock, flags); + list_del_init(&slot->entry); + spin_unlock_irqrestore(&sas_dev->lock, flags); + } else { + list_del_init(&slot->entry); + } memset(slot, 0, offsetof(struct hisi_sas_slot, buf)); - spin_lock_irqsave(&hisi_hba->lock, flags); hisi_sas_slot_index_free(hisi_hba, slot->idx); - spin_unlock_irqrestore(&hisi_hba->lock, flags); } EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free); @@ -275,82 +294,56 @@ static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba, static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, - int device_id, int abort_flag, int tag_to_abort) + unsigned int device_id, int abort_flag, int tag_to_abort) { hisi_hba->hw->prep_abort(hisi_hba, slot, device_id, abort_flag, tag_to_abort); } -static int hisi_sas_task_prep(struct sas_task *task, - struct hisi_sas_dq **dq_pointer, - bool is_tmf, struct hisi_sas_tmf_task *tmf, - int *pass) +static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba, + struct sas_task *task, int n_elem, + int n_elem_req, int n_elem_resp) { - struct domain_device *device = task->dev; - struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); - struct hisi_sas_device *sas_dev = device->lldd_dev; - struct hisi_sas_port *port; - struct hisi_sas_slot *slot; - struct hisi_sas_cmd_hdr *cmd_hdr_base; - struct asd_sas_port *sas_port = device->port; struct device *dev = hisi_hba->dev; - int dlvry_queue_slot, dlvry_queue, rc, slot_idx; - int n_elem = 0, n_elem_req = 0, n_elem_resp = 0; - struct hisi_sas_dq *dq; - unsigned long flags; - int wr_q_index; - - if (!sas_port) { - struct task_status_struct *ts = &task->task_status; - - ts->resp = SAS_TASK_UNDELIVERED; - ts->stat = SAS_PHY_DOWN; - /* - * libsas will use dev->port, should - * not call task_done for sata - */ - if (device->dev_type != SAS_SATA_DEV) - task->task_done(task); - return -ECOMM; - } - - if (DEV_IS_GONE(sas_dev)) { - if (sas_dev) - dev_info(dev, "task prep: device %d not ready\n", - sas_dev->device_id); - else - dev_info(dev, "task prep: device %016llx not ready\n", - SAS_ADDR(device->sas_addr)); - return -ECOMM; + if (!sas_protocol_ata(task->task_proto)) { + if (task->num_scatter) { + if (n_elem) + dma_unmap_sg(dev, task->scatter, + task->num_scatter, + task->data_dir); + } else if (task->task_proto & SAS_PROTOCOL_SMP) { + if (n_elem_req) + dma_unmap_sg(dev, &task->smp_task.smp_req, + 1, DMA_TO_DEVICE); + if (n_elem_resp) + dma_unmap_sg(dev, &task->smp_task.smp_resp, + 1, DMA_FROM_DEVICE); + } } +} - *dq_pointer = dq = sas_dev->dq; - - port = to_hisi_sas_port(sas_port); - if (port && !port->port_attached) { - dev_info(dev, "task prep: %s port%d not attach device\n", - (dev_is_sata(device)) ? - "SATA/STP" : "SAS", - device->port->id); - - return -ECOMM; - } +static int hisi_sas_dma_map(struct hisi_hba *hisi_hba, + struct sas_task *task, int *n_elem, + int *n_elem_req, int *n_elem_resp) +{ + struct device *dev = hisi_hba->dev; + int rc; if (!sas_protocol_ata(task->task_proto)) { unsigned int req_len, resp_len; if (task->num_scatter) { - n_elem = dma_map_sg(dev, task->scatter, + *n_elem = dma_map_sg(dev, task->scatter, task->num_scatter, task->data_dir); - if (!n_elem) { + if (!*n_elem) { rc = -ENOMEM; goto prep_out; } } else if (task->task_proto & SAS_PROTOCOL_SMP) { - n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req, + *n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req, 1, DMA_TO_DEVICE); - if (!n_elem_req) { + if (!*n_elem_req) { rc = -ENOMEM; goto prep_out; } @@ -359,9 +352,9 @@ static int hisi_sas_task_prep(struct sas_task *task, rc = -EINVAL; goto err_out_dma_unmap; } - n_elem_resp = dma_map_sg(dev, &task->smp_task.smp_resp, + *n_elem_resp = dma_map_sg(dev, &task->smp_task.smp_resp, 1, DMA_FROM_DEVICE); - if (!n_elem_resp) { + if (!*n_elem_resp) { rc = -ENOMEM; goto err_out_dma_unmap; } @@ -372,43 +365,174 @@ static int hisi_sas_task_prep(struct sas_task *task, } } } else - n_elem = task->num_scatter; + *n_elem = task->num_scatter; - if (n_elem > HISI_SAS_SGE_PAGE_CNT) { + if (*n_elem > HISI_SAS_SGE_PAGE_CNT) { dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT", - n_elem); + *n_elem); rc = -EINVAL; goto err_out_dma_unmap; } + return 0; - spin_lock_irqsave(&hisi_hba->lock, flags); - if (hisi_hba->hw->slot_index_alloc) - rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx, - device); +err_out_dma_unmap: + hisi_sas_dma_unmap(hisi_hba, task, *n_elem, + *n_elem_req, *n_elem_resp); +prep_out: + return rc; +} + +static void hisi_sas_dif_dma_unmap(struct hisi_hba *hisi_hba, + struct sas_task *task, int n_elem_dif) +{ + struct device *dev = hisi_hba->dev; + + if (!sas_protocol_ata(task->task_proto) && + task->num_scatter && n_elem_dif) { + struct sas_ssp_task *ssp_task = &task->ssp_task; + struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; + + dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), + scsi_prot_sg_count(scsi_cmnd), + task->data_dir); + } +} + +static int hisi_sas_dif_dma_map(struct hisi_hba *hisi_hba, + int *n_elem_dif, bool is_tmf, struct sas_task *task) +{ + struct device *dev = hisi_hba->dev; + struct sas_ssp_task *ssp_task; + struct scsi_cmnd *scsi_cmnd; + int rc; + + if (sas_protocol_ata(task->task_proto)) + return 0; + + if (task->num_scatter && !is_tmf) { + ssp_task = &task->ssp_task; + scsi_cmnd = ssp_task->cmd; + + if (scsi_prot_sg_count(scsi_cmnd)) { + *n_elem_dif = dma_map_sg(dev, + scsi_prot_sglist(scsi_cmnd), + scsi_prot_sg_count(scsi_cmnd), + task->data_dir); + if (!*n_elem_dif) + return -ENOMEM; + } + } + + if (*n_elem_dif > HISI_SAS_SGE_DIF_PAGE_CNT) { + dev_err(dev, + "task prep: n_elem_dif(%d) > HISI_SAS_SGE_DIF_PAGE_CNT", + *n_elem_dif); + rc = -EINVAL; + goto err_out_dif_dma_unmap; + } + return 0; + +err_out_dif_dma_unmap: + dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), + scsi_prot_sg_count(scsi_cmnd), task->data_dir); + return rc; +} + +static int hisi_sas_task_prep(struct sas_task *task, + bool is_tmf, struct hisi_sas_tmf_task *tmf, + int *pass, struct hisi_sas_dq **dq_pointer) +{ + struct domain_device *device = task->dev; + struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); + struct hisi_sas_device *sas_dev = device->lldd_dev; + struct hisi_sas_port *port; + struct hisi_sas_slot *slot; + struct hisi_sas_cmd_hdr *cmd_hdr_base; + struct asd_sas_port *sas_port = device->port; + struct device *dev = hisi_hba->dev; + int dlvry_queue_slot, dlvry_queue, rc, slot_idx; + int n_elem = 0, n_elem_dif = 0, n_elem_req = 0, n_elem_resp = 0; + struct hisi_sas_dq *dq; + unsigned long flags; + int wr_q_index; + unsigned int curr_node_id = numa_node_id(); + unsigned int dq_index = + (hisi_hba->dq_idx[curr_node_id] % hisi_hba->dq_num_per_node) + + (hisi_hba->dq_num_per_node * curr_node_id); + + if (DEV_IS_GONE(sas_dev)) { + if (sas_dev) + dev_info(dev, "task prep: device %d not ready\n", + sas_dev->device_id); + else + dev_info(dev, "task prep: device %016llx not ready\n", + SAS_ADDR(device->sas_addr)); + + return -ECOMM; + } + + if (hisi_hba->user_ctl_irq) + *dq_pointer = dq = sas_dev->dq; else - rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx); - spin_unlock_irqrestore(&hisi_hba->lock, flags); - if (rc) + *dq_pointer = dq = &hisi_hba->dq[dq_index]; + + port = to_hisi_sas_port(sas_port); + if (port && !port->port_attached) { + dev_info(dev, "task prep: %s port%d not attach device\n", + (dev_is_sata(device)) ? + "SATA/STP" : "SAS", + device->port->id); + + return -ECOMM; + } + + rc = hisi_sas_dma_map(hisi_hba, task, &n_elem, + &n_elem_req, &n_elem_resp); + if (rc < 0) + goto prep_out; + + rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, is_tmf, task); + if (rc < 0) goto err_out_dma_unmap; - slot = &hisi_hba->slot_info[slot_idx]; + if (hisi_hba->hw->slot_index_alloc) + rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device); + else { + struct scsi_cmnd *scsi_cmnd = NULL; - spin_lock_irqsave(&dq->lock, flags); - wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq); - if (wr_q_index < 0) { - spin_unlock_irqrestore(&dq->lock, flags); - rc = -EAGAIN; - goto err_out_tag; + if (task->uldd_task) { + struct ata_queued_cmd *qc; + + if (dev_is_sata(device)) { + qc = task->uldd_task; + scsi_cmnd = qc->scsicmd; + } else { + scsi_cmnd = task->uldd_task; + } + } + rc = hisi_sas_slot_index_alloc(hisi_hba, scsi_cmnd); } + if (rc < 0) + goto err_out_dif_dma_unmap; + + slot_idx = rc; + slot = &hisi_hba->slot_info[slot_idx]; + spin_lock_irqsave(&dq->lock, flags); + wr_q_index = dq->wr_point; + dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS; list_add_tail(&slot->delivery, &dq->list); - list_add_tail(&slot->entry, &sas_dev->list); spin_unlock_irqrestore(&dq->lock, flags); + spin_lock_irqsave(&sas_dev->lock, flags); + list_add_tail(&slot->entry, &sas_dev->list); + spin_unlock_irqrestore(&sas_dev->lock, flags); dlvry_queue = dq->id; dlvry_queue_slot = wr_q_index; + slot->device_id = sas_dev->device_id; slot->n_elem = n_elem; + slot->n_elem_dif = n_elem_dif; slot->dlvry_queue = dlvry_queue; slot->dlvry_queue_slot = dlvry_queue_slot; cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; @@ -421,7 +545,8 @@ static int hisi_sas_task_prep(struct sas_task *task, memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); - memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ); + memset(hisi_sas_status_buf_addr_mem(slot), 0, + sizeof(struct hisi_sas_err_record)); switch (task->task_proto) { case SAS_PROTOCOL_SMP: @@ -446,28 +571,16 @@ static int hisi_sas_task_prep(struct sas_task *task, spin_unlock_irqrestore(&task->task_state_lock, flags); ++(*pass); + ++hisi_hba->dq_idx[curr_node_id]; WRITE_ONCE(slot->ready, 1); return 0; -err_out_tag: - spin_lock_irqsave(&hisi_hba->lock, flags); - hisi_sas_slot_index_free(hisi_hba, slot_idx); - spin_unlock_irqrestore(&hisi_hba->lock, flags); +err_out_dif_dma_unmap: + hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif); err_out_dma_unmap: - if (!sas_protocol_ata(task->task_proto)) { - if (task->num_scatter) { - dma_unmap_sg(dev, task->scatter, task->num_scatter, - task->data_dir); - } else if (task->task_proto & SAS_PROTOCOL_SMP) { - if (n_elem_req) - dma_unmap_sg(dev, &task->smp_task.smp_req, - 1, DMA_TO_DEVICE); - if (n_elem_resp) - dma_unmap_sg(dev, &task->smp_task.smp_resp, - 1, DMA_FROM_DEVICE); - } - } + hisi_sas_dma_unmap(hisi_hba, task, n_elem, + n_elem_req, n_elem_resp); prep_out: dev_err(dev, "task prep: failed[%d]!\n", rc); return rc; @@ -479,20 +592,35 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags, u32 rc; u32 pass = 0; unsigned long flags; - struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev); - struct device *dev = hisi_hba->dev; + struct hisi_hba *hisi_hba; + struct device *dev; + struct domain_device *device = task->dev; + struct asd_sas_port *sas_port = device->port; struct hisi_sas_dq *dq = NULL; - if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) { - if (in_softirq()) - return -EINVAL; + if (!sas_port) { + struct task_status_struct *ts = &task->task_status; - down(&hisi_hba->sem); - up(&hisi_hba->sem); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + /* + * libsas will use dev->port, should + * not call task_done for sata + */ + if (device->dev_type != SAS_SATA_DEV) + task->task_done(task); + return -ECOMM; } + hisi_hba = dev_to_hisi_hba(device); + dev = hisi_hba->dev; + + if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) + return -EINVAL; + + /* protect task_prep and start_delivery sequence */ - rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass); + rc = hisi_sas_task_prep(task, is_tmf, tmf, &pass, &dq); if (rc) dev_err(dev, "task exec: failed[%d]!\n", rc); @@ -539,7 +667,7 @@ static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no) id->initiator_bits = SAS_PROTOCOL_ALL; id->target_bits = phy->identify.target_port_protocols; } else if (phy->phy_type & PORT_TYPE_SATA) { - /*Nothing*/ + /* Nothing */ } sas_phy->frame_rcvd_size = phy->frame_rcvd_size; @@ -551,57 +679,91 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device) struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); struct hisi_sas_device *sas_dev = NULL; unsigned long flags; - int last = hisi_hba->last_dev_id; int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES; + int dev_id; int i; spin_lock_irqsave(&hisi_hba->lock, flags); - for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) { - if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { - int queue = i % hisi_hba->queue_count; + for (i = first; i < first + HISI_SAS_MAX_DEVICES; i++) { + dev_id = i % HISI_SAS_MAX_DEVICES; + if (hisi_hba->devices[dev_id].dev_type == SAS_PHY_UNUSED) { + int queue = dev_id % hisi_hba->queue_count; struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; - hisi_hba->devices[i].device_id = i; - sas_dev = &hisi_hba->devices[i]; - sas_dev->dev_status = HISI_SAS_DEV_NORMAL; + hisi_hba->devices[dev_id].device_id = dev_id; + sas_dev = &hisi_hba->devices[dev_id]; + sas_dev->dev_status = HISI_SAS_DEV_INIT; sas_dev->dev_type = device->dev_type; sas_dev->hisi_hba = hisi_hba; sas_dev->sas_device = device; sas_dev->dq = dq; - INIT_LIST_HEAD(&hisi_hba->devices[i].list); + spin_lock_init(&sas_dev->lock); + INIT_LIST_HEAD(&hisi_hba->devices[dev_id].list); break; } - i++; } - hisi_hba->last_dev_id = i; + if (sas_dev) + hisi_hba->last_dev_id = i; spin_unlock_irqrestore(&hisi_hba->lock, flags); return sas_dev; } -#define HISI_SAS_SRST_ATA_DISK_CNT 3 +#define HISI_SAS_DISK_RECOVER_CNT 3 static int hisi_sas_init_device(struct domain_device *device) { int rc = TMF_RESP_FUNC_COMPLETE; struct scsi_lun lun; struct hisi_sas_tmf_task tmf_task; - int retry = HISI_SAS_SRST_ATA_DISK_CNT; + int retry = HISI_SAS_DISK_RECOVER_CNT; struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); + struct device *dev = hisi_hba->dev; + struct sas_phy *local_phy; switch (device->dev_type) { case SAS_END_DEVICE: int_to_scsilun(0, &lun); tmf_task.tmf = TMF_CLEAR_TASK_SET; - rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun, + while (retry-- > 0) { + rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun, &tmf_task); - if (rc == TMF_RESP_FUNC_COMPLETE) - hisi_sas_release_task(hisi_hba, device); + if (rc == TMF_RESP_FUNC_COMPLETE) { + hisi_sas_release_task(hisi_hba, device); + break; + } + } break; case SAS_SATA_DEV: case SAS_SATA_PM: case SAS_SATA_PM_PORT: case SAS_SATA_PENDING: + /* + * send HARD RESET to clear previous affiliation of + * STP target port + */ + local_phy = sas_get_local_phy(device); + if (!scsi_is_sas_phy_local(local_phy) && + !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) { + unsigned long deadline = ata_deadline(jiffies, 20000); + struct sata_device *sata_dev = &device->sata_dev; + struct ata_host *ata_host = sata_dev->ata_host; + struct ata_port_operations *ops = ata_host->ops; + struct ata_port *ap = sata_dev->ap; + struct ata_link *link; + unsigned int classes; + + ata_for_each_link(link, ap, EDGE) + rc = ops->hardreset(link, &classes, + deadline); + } + sas_put_local_phy(local_phy); + if (rc) { + dev_warn(dev, "SATA disk hardreset fail: %d\n", + rc); + return rc; + } + while (retry-- > 0) { rc = hisi_sas_softreset_ata_disk(device); if (!rc) @@ -664,6 +826,7 @@ static int hisi_sas_dev_found(struct domain_device *device) rc = hisi_sas_init_device(device); if (rc) goto err_out; + sas_dev->dev_status = HISI_SAS_DEV_NORMAL; return 0; err_out: @@ -678,6 +841,12 @@ int hisi_sas_slave_configure(struct scsi_device *sdev) if (ret) return ret; + + /* + * The queue depth for the sdev should be + * set as 64 to avoid SAS_QUEUE_FULL error + * in high-datarate aging tests + */ if (!dev_is_sata(dev)) sas_change_queue_depth(sdev, 64); @@ -715,7 +884,8 @@ static void hisi_sas_phyup_work(struct work_struct *work) struct asd_sas_phy *sas_phy = &phy->sas_phy; int phy_no = sas_phy->id; - hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */ + if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP) + hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no); hisi_sas_bytes_dmaed(hisi_hba, phy_no); } @@ -773,7 +943,33 @@ static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no) INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]); spin_lock_init(&phy->lock); + + timer_setup(&phy->timer, NULL, 0); +} + +/* Wrapper to ensure we track hisi_sas_phy.enable properly */ +void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no, int enable) +{ + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct asd_sas_phy *aphy = &phy->sas_phy; + struct sas_phy *sphy = aphy->phy; + unsigned long flags; + + spin_lock_irqsave(&phy->lock, flags); + + if (enable) { + /* We may have been enabled already; if so, don't touch */ + if (!phy->enable) + sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; + hisi_hba->hw->phy_start(hisi_hba, phy_no); + } else { + sphy->negotiated_linkrate = SAS_PHY_DISABLED; + hisi_hba->hw->phy_disable(hisi_hba, phy_no); + } + phy->enable = enable; + spin_unlock_irqrestore(&phy->lock, flags); } +EXPORT_SYMBOL_GPL(hisi_sas_phy_enable); static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) { @@ -781,12 +977,13 @@ static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) struct hisi_hba *hisi_hba = sas_ha->lldd_ha; struct hisi_sas_phy *phy = sas_phy->lldd_phy; struct asd_sas_port *sas_port = sas_phy->port; - struct hisi_sas_port *port = to_hisi_sas_port(sas_port); + struct hisi_sas_port *port; unsigned long flags; if (!sas_port) return; + port = to_hisi_sas_port(sas_port); spin_lock_irqsave(&hisi_hba->lock, flags); port->port_attached = 1; port->id = phy->port_id; @@ -796,7 +993,7 @@ static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) } static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task, - struct hisi_sas_slot *slot) + struct hisi_sas_slot *slot, bool need_lock) { if (task) { unsigned long flags; @@ -809,11 +1006,16 @@ static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task spin_lock_irqsave(&task->task_state_lock, flags); task->task_state_flags &= ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); - task->task_state_flags |= SAS_TASK_STATE_DONE; + if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP) + task->task_state_flags |= SAS_TASK_STATE_DONE; spin_unlock_irqrestore(&task->task_state_lock, flags); + + if (slot->is_internal || (task->task_proto == SAS_PROTOCOL_SMP)) + if (task->task_done) + task->task_done(task); } - hisi_sas_slot_task_free(hisi_hba, task, slot); + hisi_sas_slot_task_free(hisi_hba, task, slot, need_lock); } static void hisi_sas_release_task(struct hisi_hba *hisi_hba, @@ -821,9 +1023,13 @@ static void hisi_sas_release_task(struct hisi_hba *hisi_hba, { struct hisi_sas_slot *slot, *slot2; struct hisi_sas_device *sas_dev = device->lldd_dev; + unsigned long flags; + spin_lock_irqsave(&sas_dev->lock, flags); list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) - hisi_sas_do_release_task(hisi_hba, slot->task, slot); + hisi_sas_do_release_task(hisi_hba, slot->task, slot, false); + + spin_unlock_irqrestore(&sas_dev->lock, flags); } void hisi_sas_release_tasks(struct hisi_hba *hisi_hba) @@ -857,25 +1063,40 @@ static void hisi_sas_dev_gone(struct domain_device *device) struct hisi_sas_device *sas_dev = device->lldd_dev; struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); struct device *dev = hisi_hba->dev; + int rc0 = 0; + int rc1 = 0; dev_info(dev, "dev[%d:%x] is gone\n", sas_dev->device_id, sas_dev->dev_type); + down(&hisi_hba->sem); if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) { - hisi_sas_internal_task_abort(hisi_hba, device, + rc0 = hisi_sas_internal_task_abort(hisi_hba, device, HISI_SAS_INT_ABT_DEV, 0); hisi_sas_dereg_device(hisi_hba, device); - - down(&hisi_hba->sem); - hisi_hba->hw->clear_itct(hisi_hba, sas_dev); - up(&hisi_hba->sem); + if (!list_empty(&sas_dev->list)) { + hisi_sas_release_task(hisi_hba, device); + dev_info(dev, "dev gone: release remain resources anyway.\n"); + } + + rc1 = hisi_hba->hw->clear_itct(hisi_hba, sas_dev); device->lldd_dev = NULL; } if (hisi_hba->hw->free_device) hisi_hba->hw->free_device(sas_dev); - sas_dev->dev_type = SAS_PHY_UNUSED; + + /* Don't mark it as SAS_PHY_UNUSED if failed to clear ITCT */ + if (!rc1) + sas_dev->dev_type = SAS_PHY_UNUSED; + sas_dev->sas_device = NULL; + up(&hisi_hba->sem); + + if (rc0 == -EIO) { + dev_err(dev, "internal abort timeout for dev gone.\n"); + queue_work(hisi_hba->wq, &hisi_hba->rst_work); + } } static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) @@ -883,7 +1104,7 @@ static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) return hisi_sas_task_exec(task, gfp_flags, 0, NULL); } -static void hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no, +static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no, struct sas_phy_linkrates *r) { struct sas_phy_linkrates _r; @@ -892,6 +1113,9 @@ static void hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no, struct asd_sas_phy *sas_phy = &phy->sas_phy; enum sas_linkrate min, max; + if (r->minimum_linkrate > SAS_LINK_RATE_1_5_GBPS) + return -EINVAL; + if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) { max = sas_phy->phy->maximum_linkrate; min = r->minimum_linkrate; @@ -899,15 +1123,21 @@ static void hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no, max = r->maximum_linkrate; min = sas_phy->phy->minimum_linkrate; } else - return; + return -EINVAL; _r.maximum_linkrate = max; _r.minimum_linkrate = min; - hisi_hba->hw->phy_disable(hisi_hba, phy_no); + /* Feed back modified linkrate value pass from upper layer */ + sas_phy->phy->maximum_linkrate = max; + sas_phy->phy->minimum_linkrate = min; + hisi_sas_phy_enable(hisi_hba, phy_no, 0); + /* Sleep 100ms after disable phy to meet hw need */ msleep(100); hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r); - hisi_hba->hw->phy_start(hisi_hba, phy_no); + hisi_sas_phy_enable(hisi_hba, phy_no, 1); + + return 0; } static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, @@ -923,18 +1153,18 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, break; case PHY_FUNC_LINK_RESET: - hisi_hba->hw->phy_disable(hisi_hba, phy_no); + hisi_sas_phy_enable(hisi_hba, phy_no, 0); + /* Sleep 100ms after disable phy to meet hw need */ msleep(100); - hisi_hba->hw->phy_start(hisi_hba, phy_no); + hisi_sas_phy_enable(hisi_hba, phy_no, 1); break; case PHY_FUNC_DISABLE: - hisi_hba->hw->phy_disable(hisi_hba, phy_no); + hisi_sas_phy_enable(hisi_hba, phy_no, 0); break; case PHY_FUNC_SET_LINK_RATE: - hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata); - break; + return hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata); case PHY_FUNC_GET_EVENTS: if (hisi_hba->hw->get_events) { hisi_hba->hw->get_events(hisi_hba, phy_no); @@ -950,8 +1180,7 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, static void hisi_sas_task_done(struct sas_task *task) { - if (!del_timer(&task->slow_task->timer)) - return; + del_timer(&task->slow_task->timer); complete(&task->slow_task->completion); } @@ -960,13 +1189,17 @@ static void hisi_sas_tmf_timedout(struct timer_list *t) struct sas_task_slow *slow = from_timer(slow, t, timer); struct sas_task *task = slow->task; unsigned long flags; + bool is_completed = true; spin_lock_irqsave(&task->task_state_lock, flags); - if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) + if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { task->task_state_flags |= SAS_TASK_STATE_ABORTED; + is_completed = false; + } spin_unlock_irqrestore(&task->task_state_lock, flags); - complete(&task->slow_task->completion); + if (!is_completed) + complete(&task->slow_task->completion); } #define TASK_TIMEOUT 20 @@ -999,7 +1232,7 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device, task->task_done = hisi_sas_task_done; task->slow_task->timer.function = hisi_sas_tmf_timedout; - task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ; + task->slow_task->timer.expires = jiffies + TASK_TIMEOUT * HZ; add_timer(&task->slow_task->timer); res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf); @@ -1019,8 +1252,16 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device, struct hisi_sas_slot *slot = task->lldd_task; dev_err(dev, "abort tmf: TMF task timeout and not done\n"); - if (slot) + if (slot) { + struct hisi_sas_cq *cq = + &hisi_hba->cq[slot->dlvry_queue]; + /* + * flush tasklet to avoid free'ing task + * before using task in IO completion + */ + tasklet_kill(&cq->tasklet); slot->task = NULL; + } goto ex_err; } else @@ -1044,8 +1285,7 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device, /* no error, but return the number of bytes of * underrun */ - dev_warn(dev, "abort tmf: task to dev %016llx " - "resp: 0x%x sts 0x%x underrun\n", + dev_warn(dev, "abort tmf: task to dev %016llx resp: 0x%x sts 0x%x underrun\n", SAS_ADDR(device->sas_addr), task->task_status.resp, task->task_status.stat); @@ -1060,10 +1300,16 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device, break; } - dev_warn(dev, "abort tmf: task to dev " - "%016llx resp: 0x%x status 0x%x\n", - SAS_ADDR(device->sas_addr), task->task_status.resp, - task->task_status.stat); + if (task->task_status.resp == SAS_TASK_COMPLETE && + task->task_status.stat == SAS_OPEN_REJECT) { + dev_warn(dev, "abort tmf: open reject failed\n"); + res = -EIO; + } else { + dev_warn(dev, "abort tmf: task to dev %016llx resp: 0x%x status 0x%x\n", + SAS_ADDR(device->sas_addr), + task->task_status.resp, + task->task_status.stat); + } sas_free_task(task); task = NULL; } @@ -1090,7 +1336,7 @@ static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev, static int hisi_sas_softreset_ata_disk(struct domain_device *device) { - u8 fis[20] = {0}; + struct host_to_dev_fis fis = {}; struct ata_port *ap = device->sata_dev.ap; struct ata_link *link; int rc = TMF_RESP_FUNC_FAILED; @@ -1101,8 +1347,8 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device) ata_for_each_link(link, ap, EDGE) { int pmp = sata_srst_pmp(link); - hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); - rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL); + hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, (u8 *)&fis); + rc = hisi_sas_exec_internal_tmf_task(device, &fis, s, NULL); if (rc != TMF_RESP_FUNC_COMPLETE) break; } @@ -1111,8 +1357,9 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device) ata_for_each_link(link, ap, EDGE) { int pmp = sata_srst_pmp(link); - hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis); - rc = hisi_sas_exec_internal_tmf_task(device, fis, + hisi_sas_fill_ata_reset_cmd(link->device, 0, + pmp, (u8 *)&fis); + rc = hisi_sas_exec_internal_tmf_task(device, &fis, s, NULL); if (rc != TMF_RESP_FUNC_COMPLETE) dev_err(dev, "ata disk de-reset failed\n"); @@ -1135,6 +1382,7 @@ static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device, if (!(device->tproto & SAS_PROTOCOL_SSP)) return TMF_RESP_FUNC_ESUPP; + /* LUN is define as 8 bytes array at upper layer */ memcpy(ssp_task.LUN, lun, 8); return hisi_sas_exec_internal_tmf_task(device, &ssp_task, @@ -1207,17 +1455,20 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state, sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); } - } else if (old_state & (1 << phy_no)) - /* PHY down but was up before */ + } else { hisi_sas_phy_down(hisi_hba, phy_no, 0); + } } } static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba) { - struct hisi_sas_device *sas_dev; + void *bitmap = hisi_hba->slot_index_tags; + struct device *dev = hisi_hba->dev; + struct hisi_sas_device *sas_dev; struct domain_device *device; + int slot_num = 0; int i; for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { @@ -1229,6 +1480,14 @@ static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba) hisi_sas_init_device(device); } + + /* Check whether there are IPTT remains. */ + for (i = 0; i < hisi_hba->slot_index_count; i++) + if (test_bit(i, bitmap)) + slot_num++; + + if (slot_num) + dev_warn(dev, "%d IPTT remains\n", slot_num); } static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba, @@ -1239,10 +1498,10 @@ static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba, struct ata_port *ap = device->sata_dev.ap; struct device *dev = hisi_hba->dev; int s = sizeof(struct host_to_dev_fis); + struct host_to_dev_fis fis = {}; int rc = TMF_RESP_FUNC_FAILED; struct asd_sas_phy *sas_phy; struct ata_link *link; - u8 fis[20] = {0}; u32 state; state = hisi_hba->hw->get_phys_state(hisi_hba); @@ -1254,8 +1513,9 @@ static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba, int pmp = sata_srst_pmp(link); tmf_task.phy_id = sas_phy->id; - hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); - rc = hisi_sas_exec_internal_tmf_task(device, fis, s, + hisi_sas_fill_ata_reset_cmd(link->device, 1, + pmp, (u8 *)&fis); + rc = hisi_sas_exec_internal_tmf_task(device, &fis, s, &tmf_task); if (rc != TMF_RESP_FUNC_COMPLETE) { dev_err(dev, "phy%d ata reset failed rc=%d\n", @@ -1314,6 +1574,8 @@ void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba) hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba); scsi_block_requests(shost); + + /* Delay: 100ms timeout: 5s */ hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000); if (timer_pending(&hisi_hba->timer)) @@ -1330,14 +1592,15 @@ void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba) /* Init and wait for PHYs to come up and all libsas event finished. */ hisi_hba->hw->phys_init(hisi_hba); + /* Sleep 1s to wait for phy up */ msleep(1000); hisi_sas_refresh_port_id(hisi_hba); clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); - up(&hisi_hba->sem); if (hisi_hba->reject_stp_links_msk) hisi_sas_terminate_stp_reject(hisi_hba); hisi_sas_reset_init_all_devices(hisi_hba); + up(&hisi_hba->sem); scsi_unblock_requests(shost); clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); @@ -1353,10 +1616,14 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba) int rc; if (!hisi_hba->hw->soft_reset) - return -1; + return -EINVAL; if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) - return -1; + return -EPERM; + + if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct && + !hisi_hba->debugfs_dump_dentry) + hisi_hba->hw->debugfs_work_handler(&hisi_hba->debugfs_work); dev_info(dev, "controller resetting...\n"); hisi_sas_controller_reset_prepare(hisi_hba); @@ -1383,6 +1650,7 @@ static int hisi_sas_abort_task(struct sas_task *task) struct hisi_sas_tmf_task tmf_task; struct domain_device *device = task->dev; struct hisi_sas_device *sas_dev = device->lldd_dev; + struct hisi_sas_slot *slot = task->lldd_task; struct hisi_hba *hisi_hba; struct device *dev; int rc = TMF_RESP_FUNC_FAILED; @@ -1396,6 +1664,16 @@ static int hisi_sas_abort_task(struct sas_task *task) spin_lock_irqsave(&task->task_state_lock, flags); if (task->task_state_flags & SAS_TASK_STATE_DONE) { + struct hisi_sas_cq *cq; + + if (slot) { + /* + * flush tasklet to avoid free'ing task + * before using task in IO completion + */ + cq = &hisi_hba->cq[slot->dlvry_queue]; + tasklet_kill(&cq->tasklet); + } spin_unlock_irqrestore(&task->task_state_lock, flags); rc = TMF_RESP_FUNC_COMPLETE; goto out; @@ -1403,10 +1681,8 @@ static int hisi_sas_abort_task(struct sas_task *task) task->task_state_flags |= SAS_TASK_STATE_ABORTED; spin_unlock_irqrestore(&task->task_state_lock, flags); - sas_dev->dev_status = HISI_SAS_DEV_EH; - if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { + if (slot && task->task_proto & SAS_PROTOCOL_SSP) { struct scsi_cmnd *cmnd = task->uldd_task; - struct hisi_sas_slot *slot = task->lldd_task; u32 tag = slot->idx; int rc2; @@ -1433,11 +1709,13 @@ static int hisi_sas_abort_task(struct sas_task *task) */ if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) { if (task->lldd_task) - hisi_sas_do_release_task(hisi_hba, task, slot); + hisi_sas_do_release_task(hisi_hba, task, slot, true); } } else if (task->task_proto & SAS_PROTOCOL_SATA || task->task_proto & SAS_PROTOCOL_STP) { if (task->dev->dev_type == SAS_SATA_DEV) { + struct ata_queued_cmd *qc = task->uldd_task; + rc = hisi_sas_internal_task_abort(hisi_hba, device, HISI_SAS_INT_ABT_DEV, 0); if (rc < 0) { @@ -1445,18 +1723,35 @@ static int hisi_sas_abort_task(struct sas_task *task) goto out; } hisi_sas_dereg_device(hisi_hba, device); - rc = hisi_sas_softreset_ata_disk(device); + + /* + * If an ATA internal command times out in ATA EH, it + * need to execute soft reset, so check the scsicmd + */ + if ((sas_dev->dev_status == HISI_SAS_DEV_NCQ_ERR) && + qc && qc->scsicmd) { + hisi_sas_do_release_task(hisi_hba, task, slot, true); + rc = TMF_RESP_FUNC_COMPLETE; + } else { + rc = hisi_sas_softreset_ata_disk(device); + } } - } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) { + } else if (slot && task->task_proto & SAS_PROTOCOL_SMP) { /* SMP */ - struct hisi_sas_slot *slot = task->lldd_task; u32 tag = slot->idx; + struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue]; rc = hisi_sas_internal_task_abort(hisi_hba, device, HISI_SAS_INT_ABT_CMD, tag); if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) && - task->lldd_task) - hisi_sas_do_release_task(hisi_hba, task, slot); + task->lldd_task) { + /* + * flush tasklet to avoid free'ing task + * before using task in IO completion + */ + tasklet_kill(&cq->tasklet); + slot->task = NULL; + } } out: @@ -1470,7 +1765,7 @@ static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun) struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); struct device *dev = hisi_hba->dev; struct hisi_sas_tmf_task tmf_task; - int rc = TMF_RESP_FUNC_FAILED; + int rc; rc = hisi_sas_internal_task_abort(hisi_hba, device, HISI_SAS_INT_ABT_DEV, 0); @@ -1491,7 +1786,7 @@ static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun) static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun) { - int rc = TMF_RESP_FUNC_FAILED; + int rc; struct hisi_sas_tmf_task tmf_task; tmf_task.tmf = TMF_CLEAR_ACA; @@ -1503,16 +1798,24 @@ static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun) static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) { struct sas_phy *local_phy = sas_get_local_phy(device); - int rc, reset_type = (device->dev_type == SAS_SATA_DEV || - (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; + struct hisi_sas_device *sas_dev = device->lldd_dev; + int rc, reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT || + !dev_is_sata(device)) ? true : false; struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); + struct device *dev = hisi_hba->dev; struct sas_ha_struct *sas_ha = &hisi_hba->sha; - struct asd_sas_phy *sas_phy = sas_ha->sas_phy[local_phy->number]; - struct hisi_sas_phy *phy = container_of(sas_phy, - struct hisi_sas_phy, sas_phy); DECLARE_COMPLETION_ONSTACK(phyreset); + if (!local_phy->enabled) { + sas_put_local_phy(local_phy); + return -ENODEV; + } + if (scsi_is_sas_phy_local(local_phy)) { + struct asd_sas_phy *sas_phy = + sas_ha->sas_phy[local_phy->number]; + struct hisi_sas_phy *phy = + container_of(sas_phy, struct hisi_sas_phy, sas_phy); phy->in_reset = 1; phy->reset_completion = &phyreset; } @@ -1521,7 +1824,13 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) sas_put_local_phy(local_phy); if (scsi_is_sas_phy_local(local_phy)) { - int ret = wait_for_completion_timeout(&phyreset, 2 * HZ); + struct asd_sas_phy *sas_phy = + sas_ha->sas_phy[local_phy->number]; + struct hisi_sas_phy *phy = + container_of(sas_phy, struct hisi_sas_phy, sas_phy); + /* Wait for I_T reset complete, time out after 2s */ + int ret = wait_for_completion_timeout(&phyreset, + HISI_SAS_WAIT_PHYUP_TIMEOUT); unsigned long flags; spin_lock_irqsave(&phy->lock, flags); @@ -1530,24 +1839,27 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) spin_unlock_irqrestore(&phy->lock, flags); /* report PHY down if timed out */ - if (!ret) + if (!ret) { + dev_warn(dev, "phy%d reset timeout\n", sas_phy->id); hisi_sas_phy_down(hisi_hba, sas_phy->id, 0); - } else - msleep(2000); + } + } else if (sas_dev->dev_status != HISI_SAS_DEV_INIT) + /* Sleep 2s, wait for I_T reset at expander env except fail */ + if (!rc) + msleep(2000); return rc; } static int hisi_sas_I_T_nexus_reset(struct domain_device *device) { - struct hisi_sas_device *sas_dev = device->lldd_dev; struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); struct device *dev = hisi_hba->dev; - int rc = TMF_RESP_FUNC_FAILED; + struct hisi_sas_device *sas_dev = device->lldd_dev; + int rc; - if (sas_dev->dev_status != HISI_SAS_DEV_EH) - return TMF_RESP_FUNC_FAILED; - sas_dev->dev_status = HISI_SAS_DEV_NORMAL; + if (sas_dev->dev_status == HISI_SAS_DEV_NCQ_ERR) + sas_dev->dev_status = HISI_SAS_DEV_NORMAL; rc = hisi_sas_internal_task_abort(hisi_hba, device, HISI_SAS_INT_ABT_DEV, 0); @@ -1557,6 +1869,13 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device) } hisi_sas_dereg_device(hisi_hba, device); + if (dev_is_sata(device)) { + rc = hisi_sas_softreset_ata_disk(device); + if (rc) + dev_err(dev, "I_T nexus reset: softreset failed (%d)\n", + rc); + } + rc = hisi_sas_debug_I_T_nexus_reset(device); if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) @@ -1572,22 +1891,21 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun) struct device *dev = hisi_hba->dev; int rc = TMF_RESP_FUNC_FAILED; - sas_dev->dev_status = HISI_SAS_DEV_EH; + /* Clear internal IO and then lu reset */ + rc = hisi_sas_internal_task_abort(hisi_hba, device, + HISI_SAS_INT_ABT_DEV, 0); + if (rc < 0) { + dev_err(dev, "lu_reset: internal abort failed\n"); + goto out; + } + hisi_sas_dereg_device(hisi_hba, device); + if (dev_is_sata(device)) { struct sas_phy *phy; - /* Clear internal IO and then hardreset */ - rc = hisi_sas_internal_task_abort(hisi_hba, device, - HISI_SAS_INT_ABT_DEV, 0); - if (rc < 0) { - dev_err(dev, "lu_reset: internal abort failed\n"); - goto out; - } - hisi_sas_dereg_device(hisi_hba, device); - phy = sas_get_local_phy(device); - rc = sas_phy_reset(phy, 1); + rc = sas_phy_reset(phy, true); if (rc == 0) hisi_sas_release_task(hisi_hba, device); @@ -1595,14 +1913,6 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun) } else { struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET }; - rc = hisi_sas_internal_task_abort(hisi_hba, device, - HISI_SAS_INT_ABT_DEV, 0); - if (rc < 0) { - dev_err(dev, "lu_reset: internal abort failed\n"); - goto out; - } - hisi_sas_dereg_device(hisi_hba, device); - rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); if (rc == TMF_RESP_FUNC_COMPLETE) hisi_sas_release_task(hisi_hba, device); @@ -1680,9 +1990,10 @@ static int hisi_sas_query_task(struct sas_task *task) } static int -hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, +hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, + unsigned int device_id, struct sas_task *task, int abort_flag, - int task_tag) + int task_tag, struct hisi_sas_dq *dq) { struct domain_device *device = task->dev; struct hisi_sas_device *sas_dev = device->lldd_dev; @@ -1691,7 +2002,6 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, struct hisi_sas_slot *slot; struct asd_sas_port *sas_port = device->port; struct hisi_sas_cmd_hdr *cmd_hdr_base; - struct hisi_sas_dq *dq = sas_dev->dq; int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx; unsigned long flags, flags_dq = 0; int wr_q_index; @@ -1700,34 +2010,31 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, return -EINVAL; if (!device->port) - return -1; + return -EPERM; port = to_hisi_sas_port(sas_port); /* simply get a slot and send abort command */ - spin_lock_irqsave(&hisi_hba->lock, flags); - rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx); - if (rc) { - spin_unlock_irqrestore(&hisi_hba->lock, flags); + rc = hisi_sas_slot_index_alloc(hisi_hba, NULL); + if (rc < 0) goto err_out; - } - spin_unlock_irqrestore(&hisi_hba->lock, flags); + slot_idx = rc; slot = &hisi_hba->slot_info[slot_idx]; spin_lock_irqsave(&dq->lock, flags_dq); - wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq); - if (wr_q_index < 0) { - spin_unlock_irqrestore(&dq->lock, flags_dq); - rc = -EAGAIN; - goto err_out_tag; - } + wr_q_index = dq->wr_point; + dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS; list_add_tail(&slot->delivery, &dq->list); spin_unlock_irqrestore(&dq->lock, flags_dq); + spin_lock_irqsave(&sas_dev->lock, flags); + list_add_tail(&slot->entry, &sas_dev->list); + spin_unlock_irqrestore(&sas_dev->lock, flags); dlvry_queue = dq->id; dlvry_queue_slot = wr_q_index; + slot->device_id = sas_dev->device_id; slot->n_elem = n_elem; slot->dlvry_queue = dlvry_queue; slot->dlvry_queue_slot = dlvry_queue_slot; @@ -1740,7 +2047,8 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); - memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ); + memset(hisi_sas_status_buf_addr_mem(slot), 0, + sizeof(struct hisi_sas_err_record)); hisi_sas_task_prep_abort(hisi_hba, slot, device_id, abort_flag, task_tag); @@ -1748,20 +2056,14 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, spin_lock_irqsave(&task->task_state_lock, flags); task->task_state_flags |= SAS_TASK_AT_INITIATOR; spin_unlock_irqrestore(&task->task_state_lock, flags); - WRITE_ONCE(slot->ready, 1); /* send abort command to the chip */ spin_lock_irqsave(&dq->lock, flags); - list_add_tail(&slot->entry, &sas_dev->list); hisi_hba->hw->start_delivery(dq); spin_unlock_irqrestore(&dq->lock, flags); return 0; -err_out_tag: - spin_lock_irqsave(&hisi_hba->lock, flags); - hisi_sas_slot_index_free(hisi_hba, slot_idx); - spin_unlock_irqrestore(&hisi_hba->lock, flags); err_out: dev_err(dev, "internal abort task prep: failed[%d]!\n", rc); @@ -1769,18 +2071,19 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, } /** - * hisi_sas_internal_task_abort -- execute an internal + * _hisi_sas_internal_task_abort -- execute an internal * abort command for single IO command or a device * @hisi_hba: host controller struct * @device: domain device * @abort_flag: mode of operation, device or single IO * @tag: tag of IO to be aborted (only relevant to single * IO mode) + * @dq: delivery queue for this internal abort command */ static int -hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, - struct domain_device *device, - int abort_flag, int tag) +_hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, + struct domain_device *device, + int abort_flag, int tag, struct hisi_sas_dq *dq) { struct sas_task *task; struct hisi_sas_device *sas_dev = device->lldd_dev; @@ -1804,11 +2107,11 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, task->task_proto = device->tproto; task->task_done = hisi_sas_task_done; task->slow_task->timer.function = hisi_sas_tmf_timedout; - task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT*HZ; + task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT * HZ; add_timer(&task->slow_task->timer); res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id, - task, abort_flag, tag); + task, abort_flag, tag, dq); if (res) { del_timer(&task->slow_task->timer); dev_err(dev, "internal task abort: executing internal task failed: %d\n", @@ -1823,9 +2126,18 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { struct hisi_sas_slot *slot = task->lldd_task; - if (slot) + if (slot) { + struct hisi_sas_cq *cq = + &hisi_hba->cq[slot->dlvry_queue]; + /* + * flush tasklet to avoid free'ing task + * before using task in IO completion + */ + tasklet_kill(&cq->tasklet); slot->task = NULL; + } dev_err(dev, "internal task abort: timeout and not done.\n"); + res = -EIO; goto exit; } else @@ -1845,10 +2157,8 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, } exit: - dev_dbg(dev, "internal task abort: task to dev %016llx task=%p " - "resp: 0x%x sts 0x%x\n", - SAS_ADDR(device->sas_addr), - task, + dev_dbg(dev, "internal task abort: task to dev %016llx task=%p resp: 0x%x sts 0x%x\n", + SAS_ADDR(device->sas_addr), task, task->task_status.resp, /* 0 is complete, -1 is undelivered */ task->task_status.stat); sas_free_task(task); @@ -1856,13 +2166,59 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, return res; } -static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy) +static int +hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, + struct domain_device *device, + int abort_flag, int tag) { - hisi_sas_port_notify_formed(sas_phy); + struct hisi_sas_slot *slot; + struct device *dev = hisi_hba->dev; + struct hisi_sas_dq *dq; + int rc, i; + + switch (abort_flag) { + case HISI_SAS_INT_ABT_CMD: + slot = &hisi_hba->slot_info[tag]; + dq = &hisi_hba->dq[slot->dlvry_queue]; + return _hisi_sas_internal_task_abort(hisi_hba, device, + abort_flag, tag, dq); + case HISI_SAS_INT_ABT_DEV: + for (i = 0; i < hisi_hba->nvecs; i++) { + const struct cpumask *mask = NULL; + + if (hisi_hba->hw->get_managed_irq_aff) + mask = hisi_hba->hw->get_managed_irq_aff( + hisi_hba, i); + /* + * The kernel will not permit unmanaged (MSI are + * managed) IRQ affinity to offline CPUs, so + * always issue internal abort on all queues + * in this case. + * For MSI interrupts, affinity may be set to + * offline CPUs, so ensure that there's an online + * CPU to handle the CQ interrupt. + */ + if (mask && !cpumask_intersects(cpu_online_mask, mask)) + continue; + dq = &hisi_hba->dq[i]; + rc = _hisi_sas_internal_task_abort(hisi_hba, device, + abort_flag, tag, dq); + if (rc) + return rc; + } + break; + default: + dev_err(dev, "Unrecognised internal abort flag (%d)\n", + abort_flag); + return -EINVAL; + } + + return 0; } -static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy) +static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy) { + hisi_sas_port_notify_formed(sas_phy); } static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type, @@ -1879,9 +2235,20 @@ static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type, static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy) { + struct asd_sas_phy *sas_phy = &phy->sas_phy; + struct sas_phy *sphy = sas_phy->phy; + unsigned long flags; + phy->phy_attached = 0; phy->phy_type = 0; phy->port = NULL; + + spin_lock_irqsave(&phy->lock, flags); + if (phy->enable) + sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; + else + sphy->negotiated_linkrate = SAS_PHY_DISABLED; + spin_unlock_irqrestore(&phy->lock, flags); } void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy) @@ -1926,7 +2293,7 @@ void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba) { int i; - for (i = 0; i < hisi_hba->queue_count; i++) { + for (i = 0; i < hisi_hba->nvecs; i++) { struct hisi_sas_cq *cq = &hisi_hba->cq[i]; tasklet_kill(&cq->tasklet); @@ -1934,15 +2301,22 @@ void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba) } EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets); +int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type) +{ + struct hisi_hba *hisi_hba = shost_priv(shost); + + if (reset_type == SCSI_ADAPTER_RESET) + queue_work(hisi_hba->wq, &hisi_hba->rst_work); + else + return -EOPNOTSUPP; + + return 0; +} +EXPORT_SYMBOL_GPL(hisi_sas_host_reset); + struct scsi_transport_template *hisi_sas_stt; EXPORT_SYMBOL_GPL(hisi_sas_stt); -struct device_attribute *host_attrs[] = { - &dev_attr_phy_event_threshold, - NULL, -}; -EXPORT_SYMBOL_GPL(host_attrs); - static struct sas_domain_function_template hisi_sas_transport_ops = { .lldd_dev_found = hisi_sas_dev_found, .lldd_dev_gone = hisi_sas_dev_gone, @@ -1954,10 +2328,9 @@ static struct sas_domain_function_template hisi_sas_transport_ops = { .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset, .lldd_lu_reset = hisi_sas_lu_reset, .lldd_query_task = hisi_sas_query_task, - .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha, + .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha, .lldd_port_formed = hisi_sas_port_formed, - .lldd_port_deformed = hisi_sas_port_deformed, - .lldd_write_gpio = hisi_sas_write_gpio, + .lldd_write_gpio = hisi_sas_write_gpio, }; void hisi_sas_init_mem(struct hisi_hba *hisi_hba) @@ -1991,7 +2364,7 @@ void hisi_sas_init_mem(struct hisi_hba *hisi_hba) } EXPORT_SYMBOL_GPL(hisi_sas_init_mem); -int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost) +int hisi_sas_alloc(struct hisi_hba *hisi_hba) { struct device *dev = hisi_hba->dev; int i, j, s, max_command_entries = hisi_hba->hw->max_command_entries; @@ -2000,6 +2373,10 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost) sema_init(&hisi_hba->sem, 1); spin_lock_init(&hisi_hba->lock); + + for (i = 0; i < NR_CPUS; i++) + hisi_hba->dq_idx[i] = 0; + for (i = 0; i < hisi_hba->n_phy; i++) { hisi_sas_phy_init(hisi_hba, i); hisi_hba->port[i].port_attached = 0; @@ -2009,7 +2386,7 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost) for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED; hisi_hba->devices[i].device_id = i; - hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL; + hisi_hba->devices[i].dev_status = HISI_SAS_DEV_INIT; } for (i = 0; i < hisi_hba->queue_count; i++) { @@ -2048,7 +2425,6 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost) GFP_KERNEL); if (!hisi_hba->itct) goto err_out; - memset(hisi_hba->itct, 0, s); hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries, sizeof(struct hisi_sas_slot), @@ -2056,21 +2432,28 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost) if (!hisi_hba->slot_info) goto err_out; - /* roundup to avoid overly large block size */ + /* + * roundup to avoid overly large block size + * 64 is a better setting after several repeated attempts + */ max_command_entries_ru = roundup(max_command_entries, 64); - sz_slot_buf_ru = roundup(sizeof(struct hisi_sas_slot_buf_table), 64); - s = lcm(max_command_entries_ru, sz_slot_buf_ru); + if (!hisi_hba->enable_dix_dif) + sz_slot_buf_ru = roundup(sizeof( + struct hisi_sas_slot_buf_table), 64); + else + sz_slot_buf_ru = roundup(sizeof( + struct hisi_sas_slot_dif_buf_table), 64); + s = max(lcm(max_command_entries_ru, sz_slot_buf_ru), PAGE_SIZE); blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s; slots_per_blk = s / sz_slot_buf_ru; for (i = 0; i < blk_cnt; i++) { - struct hisi_sas_slot_buf_table *buf; + void *buf; dma_addr_t buf_dma; int slot_index = i * slots_per_blk; buf = dmam_alloc_coherent(dev, s, &buf_dma, GFP_KERNEL); if (!buf) goto err_out; - memset(buf, 0, s); for (j = 0; j < slots_per_blk; j++, slot_index++) { struct hisi_sas_slot *slot; @@ -2080,8 +2463,8 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost) slot->buf_dma = buf_dma; slot->idx = slot_index; - buf++; - buf_dma += sizeof(*buf); + buf += sz_slot_buf_ru; + buf_dma += sz_slot_buf_ru; } } @@ -2117,9 +2500,10 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost) GFP_KERNEL); if (!hisi_hba->sata_breakpoint) goto err_out; - hisi_sas_init_mem(hisi_hba); hisi_sas_slot_index_init(hisi_hba); + hisi_hba->last_slot_index = hisi_hba->hw->max_command_entries - + HISI_SAS_RESERVED_IPTT_CNT; hisi_hba->wq = create_singlethread_workqueue(dev_name(dev)); if (!hisi_hba->wq) { @@ -2135,6 +2519,14 @@ EXPORT_SYMBOL_GPL(hisi_sas_alloc); void hisi_sas_free(struct hisi_hba *hisi_hba) { + int i; + + for (i = 0; i < hisi_hba->n_phy; i++) { + struct hisi_sas_phy *phy = &hisi_hba->phy[i]; + + del_timer_sync(&phy->timer); + } + if (hisi_hba->wq) destroy_workqueue(hisi_hba->wq); } @@ -2235,6 +2627,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, struct Scsi_Host *shost; struct hisi_hba *hisi_hba; struct device *dev = &pdev->dev; + int error; shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba)); if (!shost) { @@ -2255,8 +2648,11 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, if (hisi_sas_get_fw_info(hisi_hba) < 0) goto err_out; - if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) && - dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) { + error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); + if (error) + error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + + if (error) { dev_err(dev, "No usable DMA addressing method\n"); goto err_out; } @@ -2273,7 +2669,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, goto err_out; } - if (hisi_sas_alloc(hisi_hba, shost)) { + if (hisi_sas_alloc(hisi_hba)) { hisi_sas_free(hisi_hba); goto err_out; } @@ -2310,7 +2706,7 @@ int hisi_sas_probe(struct platform_device *pdev, arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL); if (!arr_phy || !arr_port) { rc = -ENOMEM; - goto err_out_ha; + goto err_out; } sha->sas_phy = arr_phy; @@ -2321,10 +2717,17 @@ int hisi_sas_probe(struct platform_device *pdev, shost->max_id = HISI_SAS_MAX_DEVICES; shost->max_lun = ~0; shost->max_channel = 1; + /* shost support 16 bytes cmd len base on hw */ shost->max_cmd_len = 16; - shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT); - shost->can_queue = hisi_hba->hw->max_command_entries; - shost->cmd_per_lun = hisi_hba->hw->max_command_entries; + if (hisi_hba->hw->slot_index_alloc) { + shost->can_queue = hisi_hba->hw->max_command_entries; + shost->cmd_per_lun = hisi_hba->hw->max_command_entries; + } else { + shost->can_queue = hisi_hba->hw->max_command_entries - + HISI_SAS_RESERVED_IPTT_CNT; + shost->cmd_per_lun = hisi_hba->hw->max_command_entries - + HISI_SAS_RESERVED_IPTT_CNT; + } sha->sas_ha_name = DRV_NAME; sha->dev = hisi_hba->dev; @@ -2340,7 +2743,7 @@ int hisi_sas_probe(struct platform_device *pdev, rc = scsi_add_host(shost, &pdev->dev); if (rc) - goto err_out_ha; + goto err_out; rc = sas_register_ha(sha); if (rc) @@ -2356,43 +2759,1040 @@ int hisi_sas_probe(struct platform_device *pdev, err_out_register_ha: scsi_remove_host(shost); -err_out_ha: +err_out: hisi_sas_free(hisi_hba); scsi_host_put(shost); return rc; } EXPORT_SYMBOL_GPL(hisi_sas_probe); -int hisi_sas_remove(struct platform_device *pdev) +struct dentry *hisi_sas_debugfs_dir; + +static void hisi_sas_debugfs_snapshot_cq_reg(struct hisi_hba *hisi_hba) { - struct sas_ha_struct *sha = platform_get_drvdata(pdev); - struct hisi_hba *hisi_hba = sha->lldd_ha; - struct Scsi_Host *shost = sha->core.shost; + int queue_entry_size = hisi_hba->hw->complete_hdr_size; + int i; - if (timer_pending(&hisi_hba->timer)) - del_timer(&hisi_hba->timer); + for (i = 0; i < hisi_hba->queue_count; i++) + memcpy(hisi_hba->debugfs_complete_hdr[i], + hisi_hba->complete_hdr[i], + HISI_SAS_QUEUE_SLOTS * queue_entry_size); +} - sas_unregister_ha(sha); - sas_remove_host(sha->core.shost); +static void hisi_sas_debugfs_snapshot_dq_reg(struct hisi_hba *hisi_hba) +{ + int queue_entry_size = sizeof(struct hisi_sas_cmd_hdr); + int i; + + for (i = 0; i < hisi_hba->queue_count; i++) + memcpy(hisi_hba->debugfs_cmd_hdr[i], + hisi_hba->cmd_hdr[i], + HISI_SAS_QUEUE_SLOTS * queue_entry_size); +} + +static void hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba *hisi_hba) +{ + const struct hisi_sas_debugfs_reg *port = + hisi_hba->hw->debugfs_reg_port; + int i, phy_cnt; + u32 offset; + u32 *databuf; + + for (phy_cnt = 0; phy_cnt < hisi_hba->n_phy; phy_cnt++) { + databuf = (u32 *)hisi_hba->debugfs_port_reg[phy_cnt]; + for (i = 0; i < port->count; i++, databuf++) { + offset = port->base_off + 4 * i; + *databuf = port->read_port_reg(hisi_hba, phy_cnt, + offset); + } + } +} + +static void hisi_sas_debugfs_snapshot_global_reg(struct hisi_hba *hisi_hba) +{ + u32 *databuf = (u32 *)hisi_hba->debugfs_global_reg; + const struct hisi_sas_debugfs_reg *global = + hisi_hba->hw->debugfs_reg_global; + int i; + + for (i = 0; i < global->count; i++, databuf++) + *databuf = global->read_global_reg(hisi_hba, 4 * i); +} + +static void hisi_sas_debugfs_snapshot_itct_reg(struct hisi_hba *hisi_hba) +{ + void *databuf = hisi_hba->debugfs_itct; + struct hisi_sas_itct *itct; + int i; + + itct = hisi_hba->itct; + + for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) { + memcpy(databuf, itct, sizeof(struct hisi_sas_itct)); + databuf += sizeof(struct hisi_sas_itct); + } +} + +static void hisi_sas_debugfs_snapshot_iost_reg(struct hisi_hba *hisi_hba) +{ + int max_command_entries = hisi_hba->hw->max_command_entries; + void *databuf = hisi_hba->debugfs_iost; + struct hisi_sas_iost *iost; + int i; + + iost = hisi_hba->iost; + + for (i = 0; i < max_command_entries; i++, iost++) { + memcpy(databuf, iost, sizeof(struct hisi_sas_iost)); + databuf += sizeof(struct hisi_sas_iost); + } +} + +const char * +hisi_sas_debugfs_to_reg_name(int off, int base_off, + const struct hisi_sas_debugfs_reg_lu *lu) +{ + for (; lu->name; lu++) { + if (off == lu->off - base_off) + return lu->name; + } + + return NULL; +} + +static void hisi_sas_debugfs_print_reg(u32 *regs_val, const void *ptr, + struct seq_file *s) +{ + const struct hisi_sas_debugfs_reg *reg = ptr; + int i; + + for (i = 0; i < reg->count; i++) { + int off = i * 4; + const char *name; + + name = hisi_sas_debugfs_to_reg_name(off, reg->base_off, + reg->lu); + + if (name) + seq_printf(s, "0x%08x 0x%08x %s\n", off, + le32_to_cpu(regs_val[i]), name); + else + seq_printf(s, "0x%08x 0x%08x\n", off, + le32_to_cpu(regs_val[i])); + } +} + +static int hisi_sas_debugfs_global_show(struct seq_file *s, void *p) +{ + struct hisi_hba *hisi_hba = s->private; + const struct hisi_sas_hw *hw = hisi_hba->hw; + const struct hisi_sas_debugfs_reg *reg_global = hw->debugfs_reg_global; + + hisi_sas_debugfs_print_reg((u32 *)hisi_hba->debugfs_global_reg, + reg_global, s); - hisi_sas_free(hisi_hba); - scsi_host_put(shost); return 0; } -EXPORT_SYMBOL_GPL(hisi_sas_remove); -static __init int hisi_sas_init(void) +static int hisi_sas_debugfs_global_open(struct inode *inode, struct file *filp) { - hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops); - if (!hisi_sas_stt) - return -ENOMEM; + return single_open(filp, hisi_sas_debugfs_global_show, + inode->i_private); +} + +static const struct file_operations hisi_sas_debugfs_global_fops = { + .open = hisi_sas_debugfs_global_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static struct { + int value; + char *name; +} hisi_sas_debugfs_loop_linkrate[] = { + { SAS_LINK_RATE_1_5_GBPS, "1.5 Gbit" }, + { SAS_LINK_RATE_3_0_GBPS, "3.0 Gbit" }, + { SAS_LINK_RATE_6_0_GBPS, "6.0 Gbit" }, + { SAS_LINK_RATE_12_0_GBPS, "12.0 Gbit"}, +}; + +static int hisi_sas_debugfs_bist_linkrate_show(struct seq_file *s, void *p) +{ + struct hisi_hba *hisi_hba = s->private; + int i; + + for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_linkrate); i++) { + int match = (hisi_hba->bist_loopback_linkrate == + hisi_sas_debugfs_loop_linkrate[i].value); + + seq_printf(s, "%s%s%s ", match ? "[" : "", + hisi_sas_debugfs_loop_linkrate[i].name, + match ? "]" : ""); + } + seq_puts(s, "\n"); return 0; } -static __exit void hisi_sas_exit(void) +ssize_t hisi_sas_debugfs_bist_linkrate_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *ppos) { - sas_release_transport(hisi_sas_stt); + struct seq_file *m = filp->private_data; + struct hisi_hba *hisi_hba = m->private; + char kbuf[16] = {}, *pkbuf; + bool found = false; + int i; + + if (hisi_hba->bist_loopback_enable) + return -EINVAL; + + if (count >= sizeof(kbuf)) + return -EINVAL; + + if (copy_from_user(kbuf, buf, count)) + return -EINVAL; + + pkbuf = strstrip(kbuf); + + for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_linkrate); i++) { + if (!strncmp(hisi_sas_debugfs_loop_linkrate[i].name, + pkbuf, 16)) { + hisi_hba->bist_loopback_linkrate = + hisi_sas_debugfs_loop_linkrate[i].value; + found = true; + break; + } + } + + if (!found) { + dev_err(hisi_hba->dev, "unknown mode\n"); + return -EINVAL; + } + + return count; +} + +static int hisi_sas_debugfs_bist_linkrate_open(struct inode *inode, + struct file *filp) +{ + return single_open(filp, hisi_sas_debugfs_bist_linkrate_show, + inode->i_private); +} + +static const struct file_operations hisi_sas_debugfs_bist_linkrate_ops = { + .open = hisi_sas_debugfs_bist_linkrate_open, + .read = seq_read, + .write = hisi_sas_debugfs_bist_linkrate_write, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static struct { + int value; + char *name; +} hisi_sas_debugfs_loop_code_mode[] = { + { HISI_SAS_BIST_CODE_MODE_PRBS7, "PRBS7" }, + { HISI_SAS_BIST_CODE_MODE_PRBS23, "PRBS23" }, + { HISI_SAS_BIST_CODE_MODE_PRBS31, "PRBS31" }, + { HISI_SAS_BIST_CODE_MODE_JTPAT, "JTPAT" }, + { HISI_SAS_BIST_CODE_MODE_CJTPAT, "CJTPAT" }, + { HISI_SAS_BIST_CODE_MODE_SCRAMBED_0, "SCRAMBED_0" }, + { HISI_SAS_BIST_CODE_MODE_TRAIN, "TRAIN" }, + { HISI_SAS_BIST_CODE_MODE_TRAIN_DONE, "TRAIN_DONE" }, + { HISI_SAS_BIST_CODE_MODE_HFTP, "HFTP" }, + { HISI_SAS_BIST_CODE_MODE_MFTP, "MFTP" }, + { HISI_SAS_BIST_CODE_MODE_LFTP, "LFTP" }, + { HISI_SAS_BIST_CODE_MODE_FIXED_DATA, "FIXED_DATA" }, +}; + +static int hisi_sas_debugfs_bist_code_mode_show(struct seq_file *s, void *p) +{ + struct hisi_hba *hisi_hba = s->private; + int i; + + for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_code_mode); i++) { + int match = (hisi_hba->bist_loopback_code_mode == + hisi_sas_debugfs_loop_code_mode[i].value); + + seq_printf(s, "%s%s%s ", match ? "[" : "", + hisi_sas_debugfs_loop_code_mode[i].name, + match ? "]" : ""); + } + seq_puts(s, "\n"); + + return 0; +} + +ssize_t hisi_sas_debugfs_bist_code_mode_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *ppos) +{ + struct seq_file *m = filp->private_data; + struct hisi_hba *hisi_hba = m->private; + char kbuf[16] = {}, *pkbuf; + bool found = false; + int i; + + if (hisi_hba->bist_loopback_enable) + return -EINVAL; + + if (count >= sizeof(kbuf)) + return -EINVAL; + + if (copy_from_user(kbuf, buf, count)) + return -EINVAL; + + pkbuf = strstrip(kbuf); + + for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_code_mode); i++) { + if (!strncmp(hisi_sas_debugfs_loop_code_mode[i].name, + pkbuf, 16)) { + hisi_hba->bist_loopback_code_mode = + hisi_sas_debugfs_loop_code_mode[i].value; + found = true; + break; + } + } + + if (!found) { + dev_err(hisi_hba->dev, "unknown mode\n"); + return -EINVAL; + } + + return count; +} + +static int hisi_sas_debugfs_bist_code_mode_open(struct inode *inode, + struct file *filp) +{ + return single_open(filp, hisi_sas_debugfs_bist_code_mode_show, + inode->i_private); +} + +static const struct file_operations hisi_sas_debugfs_bist_code_mode_ops = { + .open = hisi_sas_debugfs_bist_code_mode_open, + .read = seq_read, + .write = hisi_sas_debugfs_bist_code_mode_write, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +ssize_t hisi_sas_debugfs_bist_phy_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *ppos) +{ + struct seq_file *m = filp->private_data; + struct hisi_hba *hisi_hba = m->private; + char kbuf[16] = {}, *pkbuf; + unsigned int phy; + int val; + + if (hisi_hba->bist_loopback_enable) + return -EINVAL; + + if (count >= sizeof(kbuf)) + return -EINVAL; + + if (copy_from_user(kbuf, buf, count)) + return -EINVAL; + + pkbuf = strstrip(kbuf); + + val = kstrtouint(pkbuf, 0, &phy); + if (val < 0) + return val; + + if (phy >= hisi_hba->n_phy) { + dev_err(hisi_hba->dev, "phy index %d exceeds limit\n", phy); + return -EINVAL; + } + + hisi_hba->bist_loopback_phy_id = phy; + + return count; +} + +static int hisi_sas_debugfs_bist_phy_show(struct seq_file *s, void *p) +{ + struct hisi_hba *hisi_hba = s->private; + + seq_printf(s, "%d\n", hisi_hba->bist_loopback_phy_id); + + return 0; +} + +static int hisi_sas_debugfs_bist_phy_open(struct inode *inode, + struct file *filp) +{ + return single_open(filp, hisi_sas_debugfs_bist_phy_show, + inode->i_private); +} + +static const struct file_operations hisi_sas_debugfs_bist_phy_ops = { + .open = hisi_sas_debugfs_bist_phy_open, + .read = seq_read, + .write = hisi_sas_debugfs_bist_phy_write, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static struct { + int value; + char *name; +} hisi_sas_debugfs_loop_modes[] = { + { HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL, "digial" }, + { HISI_SAS_BIST_LOOPBACK_MODE_SERDES, "serdes" }, + { HISI_SAS_BIST_LOOPBACK_MODE_REMOTE, "remote" }, +}; + +static int hisi_sas_debugfs_bist_mode_show(struct seq_file *s, void *p) +{ + struct hisi_hba *hisi_hba = s->private; + int i; + + for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_modes); i++) { + int match = (hisi_hba->bist_loopback_mode == + hisi_sas_debugfs_loop_modes[i].value); + + seq_printf(s, "%s%s%s ", match ? "[" : "", + hisi_sas_debugfs_loop_modes[i].name, + match ? "]" : ""); + } + seq_puts(s, "\n"); + + return 0; +} + +ssize_t hisi_sas_debugfs_bist_mode_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *ppos) +{ + struct seq_file *m = filp->private_data; + struct hisi_hba *hisi_hba = m->private; + char kbuf[16] = {}, *pkbuf; + bool found = false; + int i; + + if (hisi_hba->bist_loopback_enable) + return -EINVAL; + + if (count >= sizeof(kbuf)) + return -EINVAL; + + if (copy_from_user(kbuf, buf, count)) + return -EINVAL; + + pkbuf = strstrip(kbuf); + + for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_modes); i++) { + if (!strncmp(hisi_sas_debugfs_loop_modes[i].name, pkbuf, 16)) { + hisi_hba->bist_loopback_mode = + hisi_sas_debugfs_loop_modes[i].value; + found = true; + break; + } + } + + if (!found) { + dev_err(hisi_hba->dev, "unknown mode\n"); + return -EINVAL; + } + + return count; +} + +static int hisi_sas_debugfs_bist_mode_open(struct inode *inode, + struct file *filp) +{ + return single_open(filp, hisi_sas_debugfs_bist_mode_show, + inode->i_private); +} + +static const struct file_operations hisi_sas_debugfs_bist_mode_ops = { + .open = hisi_sas_debugfs_bist_mode_open, + .read = seq_read, + .write = hisi_sas_debugfs_bist_mode_write, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +ssize_t hisi_sas_debugfs_bist_enable_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *ppos) +{ + struct seq_file *m = filp->private_data; + struct hisi_hba *hisi_hba = m->private; + char kbuf[16] = {}, *pkbuf; + int val; + unsigned int enable; + + if (count >= sizeof(kbuf)) + return -EINVAL; + + if (copy_from_user(kbuf, buf, count)) + return -EINVAL; + + pkbuf = strstrip(kbuf); + + val = kstrtoint(pkbuf, 0, &enable); + if (val < 0) + return val; + + if (enable > 1) { + dev_err(hisi_hba->dev, "must be 0 or 1\n"); + return -EINVAL; + } + + if (enable == hisi_hba->bist_loopback_enable) + return count; + + if (!hisi_hba->hw->set_bist) + return -EPERM; + + val = hisi_hba->hw->set_bist(hisi_hba, (bool)enable); + if (val < 0) + return val; + + hisi_hba->bist_loopback_enable = enable; + + return count; +} + +static int hisi_sas_debugfs_bist_enable_show(struct seq_file *s, void *p) +{ + struct hisi_hba *hisi_hba = s->private; + + seq_printf(s, "%d\n", hisi_hba->bist_loopback_enable); + + return 0; +} + +static int hisi_sas_debugfs_bist_enable_open(struct inode *inode, + struct file *filp) +{ + return single_open(filp, hisi_sas_debugfs_bist_enable_show, + inode->i_private); +} + +static const struct file_operations hisi_sas_debugfs_bist_enable_ops = { + .open = hisi_sas_debugfs_bist_enable_open, + .read = seq_read, + .write = hisi_sas_debugfs_bist_enable_write, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + + +static int hisi_sas_debugfs_port_show(struct seq_file *s, void *p) +{ + struct hisi_sas_phy *phy = s->private; + struct hisi_hba *hisi_hba = phy->hisi_hba; + const struct hisi_sas_hw *hw = hisi_hba->hw; + const struct hisi_sas_debugfs_reg *reg_port = hw->debugfs_reg_port; + u32 *databuf = hisi_hba->debugfs_port_reg[phy->sas_phy.id]; + + hisi_sas_debugfs_print_reg(databuf, reg_port, s); + + return 0; +} + +static int hisi_sas_debugfs_port_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, hisi_sas_debugfs_port_show, inode->i_private); +} + +static const struct file_operations hisi_sas_debugfs_port_fops = { + .open = hisi_sas_debugfs_port_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static int hisi_sas_show_row_64(struct seq_file *s, int index, + int sz, u64 *ptr) +{ + int i; + + /* completion header size not fixed per HW version */ + seq_printf(s, "index %04d:\n\t", index); + /* Convert unit of sz to 8 bytes before compare */ + for (i = 1; i <= sz / 8; i++, ptr++) { + seq_printf(s, " 0x%016llx", le64_to_cpu(*ptr)); + if (!(i % 2)) + seq_puts(s, "\n\t"); + } + + seq_puts(s, "\n"); + + return 0; +} + +static int hisi_sas_show_row_32(struct seq_file *s, int index, + int sz, u32 *ptr) +{ + int i; + + /* completion header size not fixed per HW version */ + seq_printf(s, "index %04d:\n\t", index); + /* Convert unit of sz to 4 bytes before compare */ + for (i = 1; i <= sz / 4; i++, ptr++) { + seq_printf(s, " 0x%08x", le32_to_cpu(*ptr)); + if (!(i % 4)) + seq_puts(s, "\n\t"); + } + seq_puts(s, "\n"); + + return 0; +} + +static int hisi_sas_cq_show_slot(struct seq_file *s, int slot, void *cq_ptr) +{ + struct hisi_sas_cq *cq = cq_ptr; + struct hisi_hba *hisi_hba = cq->hisi_hba; + void *complete_queue = hisi_hba->debugfs_complete_hdr[cq->id]; + u64 offset = hisi_hba->hw->complete_hdr_size * slot; + void *complete_hdr = complete_queue + offset; + + return hisi_sas_show_row_32(s, slot, + hisi_hba->hw->complete_hdr_size, + complete_hdr); +} + +static int hisi_sas_debugfs_cq_show(struct seq_file *s, void *p) +{ + struct hisi_sas_cq *cq = s->private; + int slot, ret; + + for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) { + ret = hisi_sas_cq_show_slot(s, slot, cq); + if (ret) + return ret; + } + return 0; +} + +static int hisi_sas_debugfs_cq_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, hisi_sas_debugfs_cq_show, inode->i_private); +} + +static const struct file_operations hisi_sas_debugfs_cq_fops = { + .open = hisi_sas_debugfs_cq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static int hisi_sas_dq_show_slot(struct seq_file *s, int slot, void *dq_ptr) +{ + struct hisi_sas_dq *dq = dq_ptr; + struct hisi_hba *hisi_hba = dq->hisi_hba; + void *cmd_queue = hisi_hba->debugfs_cmd_hdr[dq->id]; + u64 offset = sizeof(struct hisi_sas_cmd_hdr) * slot; + void *cmd_hdr = cmd_queue + offset; + + return hisi_sas_show_row_32(s, slot, sizeof(struct hisi_sas_cmd_hdr), + cmd_hdr); +} + +static int hisi_sas_debugfs_dq_show(struct seq_file *s, void *p) +{ + int slot, ret; + + for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) { + ret = hisi_sas_dq_show_slot(s, slot, s->private); + if (ret) + return ret; + } + return 0; +} + +static int hisi_sas_debugfs_dq_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, hisi_sas_debugfs_dq_show, inode->i_private); +} + +static const struct file_operations hisi_sas_debugfs_dq_fops = { + .open = hisi_sas_debugfs_dq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static int hisi_sas_debugfs_iost_show(struct seq_file *s, void *p) +{ + struct hisi_hba *hisi_hba = s->private; + struct hisi_sas_iost *debugfs_iost = hisi_hba->debugfs_iost; + int i, ret, max_command_entries = hisi_hba->hw->max_command_entries; + + for (i = 0; i < max_command_entries; i++, debugfs_iost++) { + ret = hisi_sas_show_row_64(s, i, sizeof(*debugfs_iost), + (u64 *)debugfs_iost); + if (ret) + return ret; + } + + return 0; +} + +static int hisi_sas_debugfs_iost_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, hisi_sas_debugfs_iost_show, inode->i_private); +} + +static const struct file_operations hisi_sas_debugfs_iost_fops = { + .open = hisi_sas_debugfs_iost_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static int hisi_sas_debugfs_itct_show(struct seq_file *s, void *p) +{ + int i, ret; + struct hisi_hba *hisi_hba = s->private; + struct hisi_sas_itct *debugfs_itct = hisi_hba->debugfs_itct; + + for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, debugfs_itct++) { + ret = hisi_sas_show_row_64(s, i, sizeof(*debugfs_itct), + (u64 *)debugfs_itct); + if (ret) + return ret; + } + + return 0; +} + +static int hisi_sas_debugfs_itct_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, hisi_sas_debugfs_itct_show, inode->i_private); +} + +static const struct file_operations hisi_sas_debugfs_itct_fops = { + .open = hisi_sas_debugfs_itct_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba) +{ + struct dentry *dump_dentry; + struct dentry *dentry; + char name[256]; + int p; + int c; + int d; + + /* Create dump dir inside device dir */ + dump_dentry = debugfs_create_dir("dump", hisi_hba->debugfs_dir); + if (!dump_dentry) + goto fail; + + hisi_hba->debugfs_dump_dentry = dump_dentry; + + if (!debugfs_create_file("global", 0400, dump_dentry, hisi_hba, + &hisi_sas_debugfs_global_fops)) + goto fail; + + /* Create port dir and files */ + dentry = debugfs_create_dir("port", dump_dentry); + if (!dentry) + goto fail; + + for (p = 0; p < hisi_hba->n_phy; p++) { + snprintf(name, sizeof(name), "%d", p); + if (!debugfs_create_file(name, 0400, dentry, + &hisi_hba->phy[p], + &hisi_sas_debugfs_port_fops)) + goto fail; + } + + /* Create CQ dir and files */ + dentry = debugfs_create_dir("cq", dump_dentry); + if (!dentry) + goto fail; + + for (c = 0; c < hisi_hba->queue_count; c++) { + snprintf(name, sizeof(name), "%d", c); + + if (!debugfs_create_file(name, 0400, dentry, + &hisi_hba->cq[c], + &hisi_sas_debugfs_cq_fops)) + goto fail; + } + + /* Create DQ dir and files */ + dentry = debugfs_create_dir("dq", dump_dentry); + if (!dentry) + goto fail; + + for (d = 0; d < hisi_hba->queue_count; d++) { + snprintf(name, sizeof(name), "%d", d); + + if (!debugfs_create_file(name, 0400, dentry, + &hisi_hba->dq[d], + &hisi_sas_debugfs_dq_fops)) + goto fail; + } + + if (!debugfs_create_file("iost", 0400, dump_dentry, hisi_hba, + &hisi_sas_debugfs_iost_fops)) + goto fail; + + if (!debugfs_create_file("itct", 0400, dump_dentry, hisi_hba, + &hisi_sas_debugfs_itct_fops)) + goto fail; + + return; +fail: + hisi_sas_debugfs_exit(hisi_hba); +} + +static void hisi_sas_debugfs_snapshot_regs(struct hisi_hba *hisi_hba) +{ + hisi_hba->hw->snapshot_prepare(hisi_hba); + + hisi_sas_debugfs_snapshot_global_reg(hisi_hba); + hisi_sas_debugfs_snapshot_port_reg(hisi_hba); + hisi_sas_debugfs_snapshot_cq_reg(hisi_hba); + hisi_sas_debugfs_snapshot_dq_reg(hisi_hba); + hisi_sas_debugfs_snapshot_itct_reg(hisi_hba); + hisi_sas_debugfs_snapshot_iost_reg(hisi_hba); + + /* Avoid re-create files here */ + if (!hisi_hba->debugfs_dump_dentry) + hisi_sas_debugfs_create_files(hisi_hba); + + hisi_hba->hw->snapshot_restore(hisi_hba); +} + +static ssize_t hisi_sas_debugfs_trigger_dump_write(struct file *file, + const char __user *user_buf, + size_t count, + loff_t *ppos) +{ + struct hisi_hba *hisi_hba = file->f_inode->i_private; + u8 buf[8]; + + /* + * The code, which used for upstream, check + * the value of debugfs_snapshot here. + * If not 0, will return -EFAULT. + * Keep manual dump as one time only + */ + + /* Not allow to input more than 8 char */ + if (count > sizeof(buf)) + return -EFAULT; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + + if (buf[0] == '1') + queue_work(hisi_hba->wq, &hisi_hba->debugfs_work); + else + return -EFAULT; + + return count; +} + +static const struct file_operations hisi_sas_debugfs_trigger_dump_fops = { + .write = &hisi_sas_debugfs_trigger_dump_write, + .owner = THIS_MODULE, +}; + +void hisi_sas_debugfs_work_handler(struct work_struct *work) +{ + struct hisi_hba *hisi_hba = + container_of(work, struct hisi_hba, debugfs_work); + + hisi_sas_debugfs_snapshot_regs(hisi_hba); +} +EXPORT_SYMBOL_GPL(hisi_sas_debugfs_work_handler); + +void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba) +{ + int max_command_entries = hisi_hba->hw->max_command_entries; + struct device *dev = hisi_hba->dev; + int p, i, c, d; + size_t sz; + + hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev), + hisi_sas_debugfs_dir); + + if (!hisi_hba->debugfs_dir) + return; + + debugfs_create_file("trigger_dump", 0600, + hisi_hba->debugfs_dir, + hisi_hba, + &hisi_sas_debugfs_trigger_dump_fops); + /* create bist structures */ + hisi_hba->debugfs_bist_dentry = debugfs_create_dir("bist", + hisi_hba->debugfs_dir); + if (!hisi_hba->debugfs_bist_dentry) + goto fail_global; + + if (!debugfs_create_file("link_rate", 0644, + hisi_hba->debugfs_bist_dentry, hisi_hba, + &hisi_sas_debugfs_bist_linkrate_ops)) + goto fail_global; + + if (!debugfs_create_file("code_mode", 0644, + hisi_hba->debugfs_bist_dentry, hisi_hba, + &hisi_sas_debugfs_bist_code_mode_ops)) + goto fail_global; + + if (!debugfs_create_file("phy_id", 0644, hisi_hba->debugfs_bist_dentry, + hisi_hba, &hisi_sas_debugfs_bist_phy_ops)) + goto fail_global; + + if (!debugfs_create_u32("cnt", 0644, hisi_hba->debugfs_bist_dentry, + &hisi_hba->bist_loopback_cnt)) + goto fail_global; + + if (!debugfs_create_file("loopback_mode", 0400, + hisi_hba->debugfs_bist_dentry, + hisi_hba, &hisi_sas_debugfs_bist_mode_ops)) + goto fail_global; + + if (!debugfs_create_file("enable", 0644, hisi_hba->debugfs_bist_dentry, + hisi_hba, &hisi_sas_debugfs_bist_enable_ops)) + goto fail_global; + + /* Alloc buffer for global */ + sz = hisi_hba->hw->debugfs_reg_global->count * 4; + hisi_hba->debugfs_global_reg = + devm_kmalloc(dev, sz, GFP_KERNEL); + + if (!hisi_hba->debugfs_global_reg) + goto fail_global; + + /* Alloc buffer for port */ + sz = hisi_hba->hw->debugfs_reg_port->count * 4; + for (p = 0; p < hisi_hba->n_phy; p++) { + hisi_hba->debugfs_port_reg[p] = + devm_kmalloc(dev, sz, GFP_KERNEL); + + if (!hisi_hba->debugfs_port_reg[p]) + goto fail_port; + } + + /* Alloc buffer for cq */ + sz = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; + for (c = 0; c < hisi_hba->queue_count; c++) { + hisi_hba->debugfs_complete_hdr[c] = + devm_kmalloc(dev, sz, GFP_KERNEL); + + if (!hisi_hba->debugfs_complete_hdr[c]) + goto fail_cq; + } + + /* Alloc buffer for dq */ + sz = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; + for (d = 0; d < hisi_hba->queue_count; d++) { + hisi_hba->debugfs_cmd_hdr[d] = + devm_kmalloc(dev, sz, GFP_KERNEL); + + if (!hisi_hba->debugfs_cmd_hdr[d]) + goto fail_iost_dq; + } + + /* Alloc buffer for iost */ + sz = max_command_entries * sizeof(struct hisi_sas_iost); + + hisi_hba->debugfs_iost = devm_kmalloc(dev, sz, GFP_KERNEL); + if (!hisi_hba->debugfs_iost) + goto fail_iost_dq; + + /* Alloc buffer for itct */ + /* New memory allocation must be locate before itct */ + sz = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); + + hisi_hba->debugfs_itct = devm_kmalloc(dev, sz, GFP_KERNEL); + if (!hisi_hba->debugfs_itct) + goto fail_itct; + + return; +fail_itct: + devm_kfree(dev, hisi_hba->debugfs_iost); +fail_iost_dq: + for (i = 0; i < d; i++) + devm_kfree(dev, hisi_hba->debugfs_cmd_hdr[i]); +fail_cq: + for (i = 0; i < c; i++) + devm_kfree(dev, hisi_hba->debugfs_complete_hdr[i]); +fail_port: + for (i = 0; i < p; i++) + devm_kfree(dev, hisi_hba->debugfs_port_reg[i]); + devm_kfree(dev, hisi_hba->debugfs_global_reg); +fail_global: + hisi_sas_debugfs_exit(hisi_hba); + dev_info(dev, "failed to init debugfs!\n"); +} +EXPORT_SYMBOL_GPL(hisi_sas_debugfs_init); + +void hisi_sas_debugfs_exit(struct hisi_hba *hisi_hba) +{ + debugfs_remove_recursive(hisi_hba->debugfs_dir); + hisi_hba->debugfs_dir = NULL; +} +EXPORT_SYMBOL_GPL(hisi_sas_debugfs_exit); + +int hisi_sas_remove(struct platform_device *pdev) +{ + struct sas_ha_struct *sha = platform_get_drvdata(pdev); + struct hisi_hba *hisi_hba = sha->lldd_ha; + struct Scsi_Host *shost = sha->core.shost; + + if (timer_pending(&hisi_hba->timer)) + del_timer(&hisi_hba->timer); + + sas_unregister_ha(sha); + sas_remove_host(sha->core.shost); + + hisi_sas_free(hisi_hba); + scsi_host_put(shost); + return 0; +} +EXPORT_SYMBOL_GPL(hisi_sas_remove); + +bool hisi_sas_debugfs_enable = true; +EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable); +module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444); +MODULE_PARM_DESC(hisi_sas_debugfs_enable, "Enable driver debugfs (default disabled)"); + +static __init int hisi_sas_init(void) +{ + hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops); + if (!hisi_sas_stt) + return -ENOMEM; + + if (hisi_sas_debugfs_enable) + hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL); + + return 0; +} + +static __exit void hisi_sas_exit(void) +{ + sas_release_transport(hisi_sas_stt); + + debugfs_remove(hisi_sas_debugfs_dir); } module_init(hisi_sas_init); diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c index 8f60f0e0459960616faf1849739632ad2e7c01b2..6485e2b6456c0abd1478bc1fdb813145fa72f86f 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c @@ -406,7 +406,7 @@ enum { TRANS_RX_SMP_RESP_TIMEOUT_ERR, /* 0x31a */ }; -#define HISI_SAS_COMMAND_ENTRIES_V1_HW 8192 +#define HISI_SAS_COMMAND_ENTRIES_V1_HW 4096 #define HISI_SAS_PHY_MAX_INT_NR (HISI_SAS_PHY_INT_NR * HISI_SAS_MAX_PHYS) #define HISI_SAS_CQ_MAX_INT_NR (HISI_SAS_MAX_QUEUES) @@ -423,13 +423,6 @@ static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) return readl(regs); } -static u32 hisi_sas_read32_relaxed(struct hisi_hba *hisi_hba, u32 off) -{ - void __iomem *regs = hisi_hba->regs + off; - - return readl_relaxed(regs); -} - static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val) { @@ -544,7 +537,7 @@ static void setup_itct_v1_hw(struct hisi_hba *hisi_hba, (0xff00ULL << ITCT_HDR_REJ_OPEN_TL_OFF)); } -static void clear_itct_v1_hw(struct hisi_hba *hisi_hba, +static int clear_itct_v1_hw(struct hisi_hba *hisi_hba, struct hisi_sas_device *sas_dev) { u64 dev_id = sas_dev->device_id; @@ -564,6 +557,8 @@ static void clear_itct_v1_hw(struct hisi_hba *hisi_hba, qw0 = cpu_to_le64(itct->qw0); qw0 &= ~ITCT_HDR_VALID_MSK; itct->qw0 = cpu_to_le64(qw0); + + return 0; } static int reset_hw_v1_hw(struct hisi_hba *hisi_hba) @@ -797,16 +792,11 @@ static void start_phy_v1_hw(struct hisi_hba *hisi_hba, int phy_no) enable_phy_v1_hw(hisi_hba, phy_no); } -static void stop_phy_v1_hw(struct hisi_hba *hisi_hba, int phy_no) -{ - disable_phy_v1_hw(hisi_hba, phy_no); -} - static void phy_hard_reset_v1_hw(struct hisi_hba *hisi_hba, int phy_no) { - stop_phy_v1_hw(hisi_hba, phy_no); + hisi_sas_phy_enable(hisi_hba, phy_no, 0); msleep(100); - start_phy_v1_hw(hisi_hba, phy_no); + hisi_sas_phy_enable(hisi_hba, phy_no, 1); } static void start_phys_v1_hw(struct timer_list *t) @@ -816,7 +806,7 @@ static void start_phys_v1_hw(struct timer_list *t) for (i = 0; i < hisi_hba->n_phy; i++) { hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x12a); - start_phy_v1_hw(hisi_hba, i); + hisi_sas_phy_enable(hisi_hba, i, 1); } } @@ -834,7 +824,7 @@ static void phys_init_v1_hw(struct hisi_hba *hisi_hba) mod_timer(timer, jiffies + HZ); } -static void sl_notify_v1_hw(struct hisi_hba *hisi_hba, int phy_no) +static void sl_notify_ssp_v1_hw(struct hisi_hba *hisi_hba, int phy_no) { u32 sl_control; @@ -875,40 +865,14 @@ static int get_wideport_bitmap_v1_hw(struct hisi_hba *hisi_hba, int port_id) return bitmap; } -/* - * The callpath to this function and upto writing the write - * queue pointer should be safe from interruption. - */ -static int -get_free_slot_v1_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq) -{ - struct device *dev = hisi_hba->dev; - int queue = dq->id; - u32 r, w; - - w = dq->wr_point; - r = hisi_sas_read32_relaxed(hisi_hba, - DLVRY_Q_0_RD_PTR + (queue * 0x14)); - if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) { - dev_warn(dev, "could not find free slot\n"); - return -EAGAIN; - } - - dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS; - - return w; -} - /* DQ lock must be taken here */ static void start_delivery_v1_hw(struct hisi_sas_dq *dq) { struct hisi_hba *hisi_hba = dq->hisi_hba; struct hisi_sas_slot *s, *s1, *s2 = NULL; - struct list_head *dq_list; int dlvry_queue = dq->id; int wp; - dq_list = &dq->list; list_for_each_entry_safe(s, s1, &dq->list, delivery) { if (!s->ready) break; @@ -930,9 +894,9 @@ static void start_delivery_v1_hw(struct hisi_sas_dq *dq) static void prep_prd_sge_v1_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, - struct hisi_sas_cmd_hdr *hdr, struct scatterlist *scatter, - int n_elem) + u64 n_elem, + struct hisi_sas_cmd_hdr *hdr) { struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot); struct scatterlist *sg; @@ -1001,9 +965,9 @@ static void prep_ssp_v1_hw(struct hisi_hba *hisi_hba, struct sas_ssp_task *ssp_task = &task->ssp_task; struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; struct hisi_sas_tmf_task *tmf = slot->tmf; - int has_data = 0, priority = !!tmf; + int has_data = 0; u8 *buf_cmd, fburst = 0; - u32 dw1, dw2; + u32 dw1, dw2, priority = !!tmf; /* create header */ hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) | @@ -1051,8 +1015,8 @@ static void prep_ssp_v1_hw(struct hisi_hba *hisi_hba, hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF); if (has_data) - prep_prd_sge_v1_hw(hisi_hba, slot, hdr, task->scatter, - slot->n_elem); + prep_prd_sge_v1_hw(hisi_hba, slot, task->scatter, + slot->n_elem, hdr); hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); @@ -1301,8 +1265,10 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba, !(cmplt_hdr_data & CMPLT_HDR_RSPNS_XFRD_MSK)) { slot_err_v1_hw(hisi_hba, task, slot); - if (unlikely(slot->abort)) + if (unlikely(slot->abort)) { + sas_task_abort(task); return ts->stat; + } goto out; } @@ -1354,7 +1320,7 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba, } out: - hisi_sas_slot_task_free(hisi_hba, task, slot); + hisi_sas_slot_task_free(hisi_hba, task, slot, true); sts = ts->stat; if (task->task_done) @@ -1799,6 +1765,11 @@ static int hisi_sas_v1_init(struct hisi_hba *hisi_hba) return 0; } +static struct device_attribute *host_attrs_v1_hw[] = { + &dev_attr_phy_event_threshold, + NULL +}; + static struct scsi_host_template sht_v1_hw = { .name = DRV_NAME, .module = THIS_MODULE, @@ -1809,26 +1780,26 @@ static struct scsi_host_template sht_v1_hw = { .scan_start = hisi_sas_scan_start, .change_queue_depth = sas_change_queue_depth, .bios_param = sas_bios_param, - .can_queue = 1, .this_id = -1, - .sg_tablesize = SG_ALL, + .sg_tablesize = HISI_SAS_SGE_PAGE_CNT, .max_sectors = SCSI_DEFAULT_MAX_SECTORS, .use_clustering = ENABLE_CLUSTERING, .eh_device_reset_handler = sas_eh_device_reset_handler, .eh_target_reset_handler = sas_eh_target_reset_handler, + .slave_alloc = sas_slave_alloc, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, - .shost_attrs = host_attrs, + .shost_attrs = host_attrs_v1_hw, + .host_reset = hisi_sas_host_reset, }; static const struct hisi_sas_hw hisi_sas_v1_hw = { .hw_init = hisi_sas_v1_init, .setup_itct = setup_itct_v1_hw, - .sl_notify = sl_notify_v1_hw, + .sl_notify_ssp = sl_notify_ssp_v1_hw, .clear_itct = clear_itct_v1_hw, .prep_smp = prep_smp_v1_hw, .prep_ssp = prep_ssp_v1_hw, - .get_free_slot = get_free_slot_v1_hw, .start_delivery = start_delivery_v1_hw, .slot_complete = slot_complete_v1_hw, .phys_init = phys_init_v1_hw, diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index 9c5c5a601332ed0e57adc36a7282c3c63fcf3d7f..f9867176fa14f288b0a3215114cf3ab50edddaf7 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -427,70 +427,70 @@ static const struct hisi_sas_hw_error one_bit_ecc_errors[] = { .irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_1B_OFF), .msk = HGC_DQE_ECC_1B_ADDR_MSK, .shift = HGC_DQE_ECC_1B_ADDR_OFF, - .msg = "hgc_dqe_acc1b_intr found: Ram address is 0x%08X\n", + .msg = "hgc_dqe_ecc1b_intr", .reg = HGC_DQE_ECC_ADDR, }, { .irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_1B_OFF), .msk = HGC_IOST_ECC_1B_ADDR_MSK, .shift = HGC_IOST_ECC_1B_ADDR_OFF, - .msg = "hgc_iost_acc1b_intr found: Ram address is 0x%08X\n", + .msg = "hgc_iost_ecc1b_intr", .reg = HGC_IOST_ECC_ADDR, }, { .irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_1B_OFF), .msk = HGC_ITCT_ECC_1B_ADDR_MSK, .shift = HGC_ITCT_ECC_1B_ADDR_OFF, - .msg = "hgc_itct_acc1b_intr found: am address is 0x%08X\n", + .msg = "hgc_itct_ecc1b_intr", .reg = HGC_ITCT_ECC_ADDR, }, { .irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF), .msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK, .shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF, - .msg = "hgc_iostl_acc1b_intr found: memory address is 0x%08X\n", + .msg = "hgc_iostl_ecc1b_intr", .reg = HGC_LM_DFX_STATUS2, }, { .irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF), .msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK, .shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF, - .msg = "hgc_itctl_acc1b_intr found: memory address is 0x%08X\n", + .msg = "hgc_itctl_ecc1b_intr", .reg = HGC_LM_DFX_STATUS2, }, { .irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_1B_OFF), .msk = HGC_CQE_ECC_1B_ADDR_MSK, .shift = HGC_CQE_ECC_1B_ADDR_OFF, - .msg = "hgc_cqe_acc1b_intr found: Ram address is 0x%08X\n", + .msg = "hgc_cqe_ecc1b_intr", .reg = HGC_CQE_ECC_ADDR, }, { .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF), .msk = HGC_RXM_DFX_STATUS14_MEM0_MSK, .shift = HGC_RXM_DFX_STATUS14_MEM0_OFF, - .msg = "rxm_mem0_acc1b_intr found: memory address is 0x%08X\n", + .msg = "rxm_mem0_ecc1b_intr", .reg = HGC_RXM_DFX_STATUS14, }, { .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF), .msk = HGC_RXM_DFX_STATUS14_MEM1_MSK, .shift = HGC_RXM_DFX_STATUS14_MEM1_OFF, - .msg = "rxm_mem1_acc1b_intr found: memory address is 0x%08X\n", + .msg = "rxm_mem1_ecc1b_intr", .reg = HGC_RXM_DFX_STATUS14, }, { .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF), .msk = HGC_RXM_DFX_STATUS14_MEM2_MSK, .shift = HGC_RXM_DFX_STATUS14_MEM2_OFF, - .msg = "rxm_mem2_acc1b_intr found: memory address is 0x%08X\n", + .msg = "rxm_mem2_ecc1b_intr", .reg = HGC_RXM_DFX_STATUS14, }, { .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF), .msk = HGC_RXM_DFX_STATUS15_MEM3_MSK, .shift = HGC_RXM_DFX_STATUS15_MEM3_OFF, - .msg = "rxm_mem3_acc1b_intr found: memory address is 0x%08X\n", + .msg = "rxm_mem3_ecc1b_intr", .reg = HGC_RXM_DFX_STATUS15, }, }; @@ -500,70 +500,70 @@ static const struct hisi_sas_hw_error multi_bit_ecc_errors[] = { .irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF), .msk = HGC_DQE_ECC_MB_ADDR_MSK, .shift = HGC_DQE_ECC_MB_ADDR_OFF, - .msg = "hgc_dqe_accbad_intr (0x%x) found: Ram address is 0x%08X\n", + .msg = "hgc_dqe_eccbad_intr", .reg = HGC_DQE_ECC_ADDR, }, { .irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF), .msk = HGC_IOST_ECC_MB_ADDR_MSK, .shift = HGC_IOST_ECC_MB_ADDR_OFF, - .msg = "hgc_iost_accbad_intr (0x%x) found: Ram address is 0x%08X\n", + .msg = "hgc_iost_eccbad_intr", .reg = HGC_IOST_ECC_ADDR, }, { .irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF), .msk = HGC_ITCT_ECC_MB_ADDR_MSK, .shift = HGC_ITCT_ECC_MB_ADDR_OFF, - .msg = "hgc_itct_accbad_intr (0x%x) found: Ram address is 0x%08X\n", + .msg = "hgc_itct_eccbad_intr", .reg = HGC_ITCT_ECC_ADDR, }, { .irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF), .msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK, .shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF, - .msg = "hgc_iostl_accbad_intr (0x%x) found: memory address is 0x%08X\n", + .msg = "hgc_iostl_eccbad_intr", .reg = HGC_LM_DFX_STATUS2, }, { .irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF), .msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK, .shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF, - .msg = "hgc_itctl_accbad_intr (0x%x) found: memory address is 0x%08X\n", + .msg = "hgc_itctl_eccbad_intr", .reg = HGC_LM_DFX_STATUS2, }, { .irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF), .msk = HGC_CQE_ECC_MB_ADDR_MSK, .shift = HGC_CQE_ECC_MB_ADDR_OFF, - .msg = "hgc_cqe_accbad_intr (0x%x) found: Ram address is 0x%08X\n", + .msg = "hgc_cqe_eccbad_intr", .reg = HGC_CQE_ECC_ADDR, }, { .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF), .msk = HGC_RXM_DFX_STATUS14_MEM0_MSK, .shift = HGC_RXM_DFX_STATUS14_MEM0_OFF, - .msg = "rxm_mem0_accbad_intr (0x%x) found: memory address is 0x%08X\n", + .msg = "rxm_mem0_eccbad_intr", .reg = HGC_RXM_DFX_STATUS14, }, { .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF), .msk = HGC_RXM_DFX_STATUS14_MEM1_MSK, .shift = HGC_RXM_DFX_STATUS14_MEM1_OFF, - .msg = "rxm_mem1_accbad_intr (0x%x) found: memory address is 0x%08X\n", + .msg = "rxm_mem1_eccbad_intr", .reg = HGC_RXM_DFX_STATUS14, }, { .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF), .msk = HGC_RXM_DFX_STATUS14_MEM2_MSK, .shift = HGC_RXM_DFX_STATUS14_MEM2_OFF, - .msg = "rxm_mem2_accbad_intr (0x%x) found: memory address is 0x%08X\n", + .msg = "rxm_mem2_eccbad_intr", .reg = HGC_RXM_DFX_STATUS14, }, { .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF), .msk = HGC_RXM_DFX_STATUS15_MEM3_MSK, .shift = HGC_RXM_DFX_STATUS15_MEM3_OFF, - .msg = "rxm_mem3_accbad_intr (0x%x) found: memory address is 0x%08X\n", + .msg = "rxm_mem3_eccbad_intr", .reg = HGC_RXM_DFX_STATUS15, }, }; @@ -770,14 +770,15 @@ static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba, /* This function needs to be protected from pre-emption. */ static int -slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx, - struct domain_device *device) +slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, + struct domain_device *device) { int sata_dev = dev_is_sata(device); void *bitmap = hisi_hba->slot_index_tags; struct hisi_sas_device *sas_dev = device->lldd_dev; int sata_idx = sas_dev->sata_idx; int start, end; + unsigned long flags; if (!sata_dev) { /* @@ -801,11 +802,14 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx, end = 64 * (sata_idx + 2); } + spin_lock_irqsave(&hisi_hba->lock, flags); while (1) { start = find_next_zero_bit(bitmap, hisi_hba->slot_index_count, start); - if (start >= end) + if (start >= end) { + spin_unlock_irqrestore(&hisi_hba->lock, flags); return -SAS_QUEUE_FULL; + } /* * SAS IPTT bit0 should be 1, and SATA IPTT bit0 should be 0. */ @@ -815,8 +819,8 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx, } set_bit(start, bitmap); - *slot_idx = start; - return 0; + spin_unlock_irqrestore(&hisi_hba->lock, flags); + return start; } static bool sata_index_alloc_v2_hw(struct hisi_hba *hisi_hba, int *idx) @@ -864,12 +868,13 @@ hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device) hisi_hba->devices[i].device_id = i; sas_dev = &hisi_hba->devices[i]; - sas_dev->dev_status = HISI_SAS_DEV_NORMAL; + sas_dev->dev_status = HISI_SAS_DEV_INIT; sas_dev->dev_type = device->dev_type; sas_dev->hisi_hba = hisi_hba; sas_dev->sas_device = device; sas_dev->sata_idx = sata_idx; sas_dev->dq = dq; + spin_lock_init(&sas_dev->lock); INIT_LIST_HEAD(&hisi_hba->devices[i].list); break; } @@ -973,13 +978,14 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba, (0x1ULL << ITCT_HDR_RTOLT_OFF)); } -static void clear_itct_v2_hw(struct hisi_hba *hisi_hba, +static int clear_itct_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_device *sas_dev) { DECLARE_COMPLETION_ONSTACK(completion); u64 dev_id = sas_dev->device_id; struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id]; u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); + struct device *dev = hisi_hba->dev; int i; sas_dev->completion = &completion; @@ -989,13 +995,20 @@ static void clear_itct_v2_hw(struct hisi_hba *hisi_hba, hisi_sas_write32(hisi_hba, ENT_INT_SRC3, ENT_INT_SRC3_ITC_INT_MSK); + /* need to set register twice to clear ITCT for v2 hw */ for (i = 0; i < 2; i++) { reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK); hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val); - wait_for_completion(sas_dev->completion); + if (!wait_for_completion_timeout(sas_dev->completion, + CLEAR_ITCT_TIMEOUT * HZ)) { + dev_warn(dev, "failed to clear ITCT\n"); + return -ETIMEDOUT; + } memset(itct, 0, sizeof(struct hisi_sas_itct)); } + + return 0; } static void free_device_v2_hw(struct hisi_sas_device *sas_dev) @@ -1540,14 +1553,14 @@ static void phy_hard_reset_v2_hw(struct hisi_hba *hisi_hba, int phy_no) struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; u32 txid_auto; - disable_phy_v2_hw(hisi_hba, phy_no); + hisi_sas_phy_enable(hisi_hba, phy_no, 0); if (phy->identify.device_type == SAS_END_DEVICE) { txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, txid_auto | TX_HARDRST_MSK); } msleep(100); - start_phy_v2_hw(hisi_hba, phy_no); + hisi_sas_phy_enable(hisi_hba, phy_no, 1); } static void phy_get_events_v2_hw(struct hisi_hba *hisi_hba, int phy_no) @@ -1580,11 +1593,11 @@ static void phys_init_v2_hw(struct hisi_hba *hisi_hba) if (!sas_phy->phy->enabled) continue; - start_phy_v2_hw(hisi_hba, i); + hisi_sas_phy_enable(hisi_hba, i, 1); } } -static void sl_notify_v2_hw(struct hisi_hba *hisi_hba, int phy_no) +static void sl_notify_ssp_v2_hw(struct hisi_hba *hisi_hba, int phy_no) { u32 sl_control; @@ -1636,41 +1649,14 @@ static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id) return bitmap; } -/* - * The callpath to this function and upto writing the write - * queue pointer should be safe from interruption. - */ -static int -get_free_slot_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq) -{ - struct device *dev = hisi_hba->dev; - int queue = dq->id; - u32 r, w; - - w = dq->wr_point; - r = hisi_sas_read32_relaxed(hisi_hba, - DLVRY_Q_0_RD_PTR + (queue * 0x14)); - if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) { - dev_warn(dev, "full queue=%d r=%d w=%d\n", - queue, r, w); - return -EAGAIN; - } - - dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS; - - return w; -} - /* DQ lock must be taken here */ static void start_delivery_v2_hw(struct hisi_sas_dq *dq) { struct hisi_hba *hisi_hba = dq->hisi_hba; struct hisi_sas_slot *s, *s1, *s2 = NULL; - struct list_head *dq_list; int dlvry_queue = dq->id; int wp; - dq_list = &dq->list; list_for_each_entry_safe(s, s1, &dq->list, delivery) { if (!s->ready) break; @@ -1692,9 +1678,9 @@ static void start_delivery_v2_hw(struct hisi_sas_dq *dq) static void prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, - struct hisi_sas_cmd_hdr *hdr, struct scatterlist *scatter, - int n_elem) + u64 n_elem, + struct hisi_sas_cmd_hdr *hdr) { struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot); struct scatterlist *sg; @@ -1764,9 +1750,9 @@ static void prep_ssp_v2_hw(struct hisi_hba *hisi_hba, struct sas_ssp_task *ssp_task = &task->ssp_task; struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; struct hisi_sas_tmf_task *tmf = slot->tmf; - int has_data = 0, priority = !!tmf; + int has_data = 0; u8 *buf_cmd; - u32 dw1 = 0, dw2 = 0; + u32 dw1 = 0, dw2 = 0, priority = !!tmf; hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) | (2 << CMD_HDR_TLR_CTRL_OFF) | @@ -1807,8 +1793,8 @@ static void prep_ssp_v2_hw(struct hisi_hba *hisi_hba, hdr->transfer_tags = cpu_to_le32(slot->idx); if (has_data) - prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter, - slot->n_elem); + prep_prd_sge_v2_hw(hisi_hba, slot, task->scatter, + slot->n_elem, hdr); hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); @@ -2047,6 +2033,11 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba, u16 dma_tx_err_type = cpu_to_le16(err_record->dma_tx_err_type); u16 sipc_rx_err_type = cpu_to_le16(err_record->sipc_rx_err_type); u32 dma_rx_err_type = cpu_to_le32(err_record->dma_rx_err_type); + struct hisi_sas_complete_v2_hdr *complete_queue = + hisi_hba->complete_hdr[slot->cmplt_queue]; + struct hisi_sas_complete_v2_hdr *complete_hdr = + &complete_queue[slot->cmplt_queue_slot]; + u32 dw0 = le32_to_cpu(complete_hdr->dw0); int error = -1; if (err_phase == 1) { @@ -2332,7 +2323,8 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba, break; } } - hisi_sas_sata_done(task, slot); + if (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) + hisi_sas_sata_done(task, slot); } break; default: @@ -2356,6 +2348,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) &complete_queue[slot->cmplt_queue_slot]; unsigned long flags; bool is_internal = slot->is_internal; + u32 dw0; if (unlikely(!task || !task->lldd_task || !task->dev)) return -EINVAL; @@ -2380,7 +2373,8 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) } /* Use SAS+TMF status codes */ - switch ((complete_hdr->dw0 & CMPLT_HDR_ABORT_STAT_MSK) + dw0 = le32_to_cpu(complete_hdr->dw0); + switch ((dw0 & CMPLT_HDR_ABORT_STAT_MSK) >> CMPLT_HDR_ABORT_STAT_OFF) { case STAT_IO_ABORTED: /* this io has been aborted by abort command */ @@ -2406,9 +2400,9 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) break; } - if ((complete_hdr->dw0 & CMPLT_HDR_ERX_MSK) && - (!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) { - u32 err_phase = (complete_hdr->dw0 & CMPLT_HDR_ERR_PHASE_MSK) + if ((dw0 & CMPLT_HDR_ERX_MSK) && + (!(dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) { + u32 err_phase = (dw0 & CMPLT_HDR_ERR_PHASE_MSK) >> CMPLT_HDR_ERR_PHASE_OFF; u32 *error_info = hisi_sas_status_buf_addr_mem(slot); @@ -2419,17 +2413,19 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) slot_err_v2_hw(hisi_hba, task, slot, 2); if (ts->stat != SAS_DATA_UNDERRUN) - dev_info(dev, "erroneous completion iptt=%d task=%p dev id=%d " + dev_info(dev, "erroneous completion iptt=%d task=%pK dev id=%d " "CQ hdr: 0x%x 0x%x 0x%x 0x%x " "Error info: 0x%x 0x%x 0x%x 0x%x\n", slot->idx, task, sas_dev->device_id, - complete_hdr->dw0, complete_hdr->dw1, + dw0, complete_hdr->dw1, complete_hdr->act, complete_hdr->dw3, error_info[0], error_info[1], error_info[2], error_info[3]); - if (unlikely(slot->abort)) + if (unlikely(slot->abort)) { + sas_task_abort(task); return ts->stat; + } goto out; } @@ -2468,7 +2464,8 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: { ts->stat = SAM_STAT_GOOD; - hisi_sas_sata_done(task, slot); + if (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) + hisi_sas_sata_done(task, slot); break; } default: @@ -2483,22 +2480,22 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) } out: - hisi_sas_slot_task_free(hisi_hba, task, slot); sts = ts->stat; spin_lock_irqsave(&task->task_state_lock, flags); if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { spin_unlock_irqrestore(&task->task_state_lock, flags); - dev_info(dev, "slot complete: task(%p) aborted\n", task); + dev_info(dev, "slot complete: task(%pK) aborted\n", task); return SAS_ABORTED_TASK; } task->task_state_flags |= SAS_TASK_STATE_DONE; spin_unlock_irqrestore(&task->task_state_lock, flags); + hisi_sas_slot_task_free(hisi_hba, task, slot, true); if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) { spin_lock_irqsave(&device->done_lock, flags); if (test_bit(SAS_HA_FROZEN, &ha->state)) { spin_unlock_irqrestore(&device->done_lock, flags); - dev_info(dev, "slot complete: task(%p) ignored\n ", + dev_info(dev, "slot complete: task(%pK) ignored\n ", task); return sts; } @@ -2523,16 +2520,22 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_port *port = to_hisi_sas_port(sas_port); struct hisi_sas_tmf_task *tmf = slot->tmf; u8 *buf_cmd; - int has_data = 0, hdr_tag = 0; - u32 dw1 = 0, dw2 = 0; + int has_data = 0; + u32 dw1 = 0, dw2 = 0, hdr_tag = 0; /* create header */ /* dw0 */ hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF); - if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) + if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) { hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF); - else + } else { + int phy_id = device->phy->identify.phy_identifier; + + hdr->dw0 |= cpu_to_le32((1 << phy_id) + << CMD_HDR_PHY_ID_OFF); + hdr->dw0 |= CMD_HDR_FORCE_PHY_MSK; hdr->dw0 |= cpu_to_le32(4 << CMD_HDR_CMD_OFF); + } if (tmf && tmf->force_phy) { hdr->dw0 |= CMD_HDR_FORCE_PHY_MSK; @@ -2565,7 +2568,10 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba, hdr->dw1 = cpu_to_le32(dw1); /* dw2 */ - if (task->ata_task.use_ncq && hisi_sas_get_ncq_tag(task, &hdr_tag)) { + if (task->ata_task.use_ncq) { + struct ata_queued_cmd *qc = task->uldd_task; + + hdr_tag = qc->tag; task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF; } @@ -2578,8 +2584,8 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba, hdr->transfer_tags = cpu_to_le32(slot->idx); if (has_data) - prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter, - slot->n_elem); + prep_prd_sge_v2_hw(hisi_hba, slot, task->scatter, + slot->n_elem, hdr); hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); @@ -2629,7 +2635,7 @@ static void hisi_sas_internal_abort_quirk_timeout(struct timer_list *t) static void prep_abort_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, - int device_id, int abort_flag, int tag_to_abort) + unsigned int device_id, int abort_flag, int tag_to_abort) { struct sas_task *task = slot->task; struct domain_device *dev = task->dev; @@ -2673,6 +2679,8 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba) if (is_sata_phy_v2_hw(hisi_hba, phy_no)) goto end; + del_timer(&phy->timer); + if (phy_no == 8) { u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE); @@ -2752,6 +2760,7 @@ static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba) struct hisi_sas_port *port = phy->port; struct device *dev = hisi_hba->dev; + del_timer(&phy->timer); hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1); phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); @@ -2880,6 +2889,30 @@ static const struct hisi_sas_hw_error port_ecc_axi_error[] = { }, }; +static void wait_phyup_timedout_v2_hw(struct timer_list *t) +{ + struct hisi_sas_phy *phy = from_timer(phy, t, timer); + struct hisi_hba *hisi_hba = phy->hisi_hba; + struct device *dev = hisi_hba->dev; + int phy_no = phy->sas_phy.id; + + dev_warn(dev, "phy%d wait phyup timeout, issuing link reset\n", phy_no); + hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); +} + +static void phy_oob_ready_v2_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct device *dev = hisi_hba->dev; + + if (!timer_pending(&phy->timer)) { + dev_dbg(dev, "phy%d OOB ready\n", phy_no); + phy->timer.function = wait_phyup_timedout_v2_hw; + phy->timer.expires = jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT; + add_timer(&phy->timer); + } +} + static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p) { struct hisi_hba *hisi_hba = p; @@ -2940,6 +2973,9 @@ static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p) if (irq_value0 & CHL_INT0_SL_RX_BCST_ACK_MSK) phy_bcast_v2_hw(phy_no, hisi_hba); + if (irq_value0 & CHL_INT0_PHY_RDY_MSK) + phy_oob_ready_v2_hw(hisi_hba, phy_no); + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, irq_value0 & (~CHL_INT0_HOTPLUG_TOUT_MSK) @@ -2969,7 +3005,8 @@ one_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba, u32 irq_value) val = hisi_sas_read32(hisi_hba, ecc_error->reg); val &= ecc_error->msk; val >>= ecc_error->shift; - dev_warn(dev, ecc_error->msg, val); + dev_warn(dev, "%s found: mem addr is 0x%08X\n", + ecc_error->msg, val); } } } @@ -2988,7 +3025,8 @@ static void multi_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba, val = hisi_sas_read32(hisi_hba, ecc_error->reg); val &= ecc_error->msk; val >>= ecc_error->shift; - dev_err(dev, ecc_error->msg, irq_value, val); + dev_err(dev, "%s (0x%x) found: mem addr is 0x%08X\n", + ecc_error->msg, irq_value, val); queue_work(hisi_hba->wq, &hisi_hba->rst_work); } } @@ -3217,6 +3255,8 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p) unsigned long flags; int phy_no, offset; + del_timer(&phy->timer); + phy_no = sas_phy->id; initial_fis = &hisi_hba->initial_fis[phy_no]; fis = &initial_fis->fis; @@ -3383,6 +3423,8 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba) tasklet_init(t, cq_tasklet_v2_hw, (unsigned long)cq); } + hisi_hba->nvecs = hisi_hba->queue_count; + hisi_hba->dq_num_per_node = hisi_hba->queue_count / num_online_nodes(); return 0; free_cq_int_irqs: @@ -3532,7 +3574,7 @@ static int write_gpio_v2_hw(struct hisi_hba *hisi_hba, u8 reg_type, return 0; } -static void wait_cmds_complete_timeout_v2_hw(struct hisi_hba *hisi_hba, +static int wait_cmds_complete_timeout_v2_hw(struct hisi_hba *hisi_hba, int delay_ms, int timeout_ms) { struct device *dev = hisi_hba->dev; @@ -3547,9 +3589,19 @@ static void wait_cmds_complete_timeout_v2_hw(struct hisi_hba *hisi_hba, msleep(delay_ms); } + if (time >= timeout_ms) + return -ETIMEDOUT; + dev_dbg(dev, "wait commands complete %dms\n", time); + + return 0; } +struct device_attribute *host_attrs_v2_hw[] = { + &dev_attr_phy_event_threshold, + NULL +}; + static struct scsi_host_template sht_v2_hw = { .name = DRV_NAME, .module = THIS_MODULE, @@ -3560,16 +3612,17 @@ static struct scsi_host_template sht_v2_hw = { .scan_start = hisi_sas_scan_start, .change_queue_depth = sas_change_queue_depth, .bios_param = sas_bios_param, - .can_queue = 1, .this_id = -1, - .sg_tablesize = SG_ALL, + .sg_tablesize = HISI_SAS_SGE_PAGE_CNT, .max_sectors = SCSI_DEFAULT_MAX_SECTORS, .use_clustering = ENABLE_CLUSTERING, .eh_device_reset_handler = sas_eh_device_reset_handler, .eh_target_reset_handler = sas_eh_target_reset_handler, + .slave_alloc = sas_slave_alloc, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, - .shost_attrs = host_attrs, + .shost_attrs = host_attrs_v2_hw, + .host_reset = hisi_sas_host_reset, }; static const struct hisi_sas_hw hisi_sas_v2_hw = { @@ -3577,7 +3630,7 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = { .setup_itct = setup_itct_v2_hw, .slot_index_alloc = slot_index_alloc_quirk_v2_hw, .alloc_dev = alloc_dev_quirk_v2_hw, - .sl_notify = sl_notify_v2_hw, + .sl_notify_ssp = sl_notify_ssp_v2_hw, .get_wideport_bitmap = get_wideport_bitmap_v2_hw, .clear_itct = clear_itct_v2_hw, .free_device = free_device_v2_hw, @@ -3585,7 +3638,6 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = { .prep_ssp = prep_ssp_v2_hw, .prep_stp = prep_ata_v2_hw, .prep_abort = prep_abort_v2_hw, - .get_free_slot = get_free_slot_v2_hw, .start_delivery = start_delivery_v2_hw, .slot_complete = slot_complete_v2_hw, .phys_init = phys_init_v2_hw, diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 08b503e274b818948546e724085c277d27ef3061..35076b9ce265cd00adeb73d875e857476d35946f 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -11,7 +11,7 @@ #include "hisi_sas.h" #define DRV_NAME "hisi_sas_v3_hw" -/* global registers need init*/ +/* global registers need init */ #define DLVRY_QUEUE_ENABLE 0x0 #define IOST_BASE_ADDR_LO 0x8 #define IOST_BASE_ADDR_HI 0xc @@ -28,11 +28,14 @@ #define ITCT_CLR_EN_MSK (0x1 << ITCT_CLR_EN_OFF) #define ITCT_DEV_OFF 0 #define ITCT_DEV_MSK (0x7ff << ITCT_DEV_OFF) +#define SAS_AXI_USER3 0x50 #define IO_SATA_BROKEN_MSG_ADDR_LO 0x58 #define IO_SATA_BROKEN_MSG_ADDR_HI 0x5c #define SATA_INITI_D2H_STORE_ADDR_LO 0x60 #define SATA_INITI_D2H_STORE_ADDR_HI 0x64 #define CFG_MAX_TAG 0x68 +#define SAS_DMAC_OUTSTAND 0x6c +#define TRANS_LOCK_ICT_TIME 0X70 #define HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL 0x84 #define HGC_SAS_TXFAIL_RETRY_CTRL 0x88 #define HGC_GET_ITV_TIME 0x90 @@ -42,8 +45,11 @@ #define MAX_CON_TIME_LIMIT_TIME 0xa4 #define BUS_INACTIVE_LIMIT_TIME 0xa8 #define REJECT_TO_OPEN_LIMIT_TIME 0xac +#define CQ_INT_CONVERGE_EN 0xb0 #define CFG_AGING_TIME 0xbc #define HGC_DFX_CFG2 0xc0 +#define CFG_ICT_TIMER_STEP_TRSH 0xc8 +#define CFG_1US_TIMER_TRSH 0xcc #define CFG_ABT_SET_QUERY_IPTT 0xd4 #define CFG_SET_ABORTED_IPTT_OFF 0 #define CFG_SET_ABORTED_IPTT_MSK (0xfff << CFG_SET_ABORTED_IPTT_OFF) @@ -51,7 +57,36 @@ #define CFG_ABT_SET_IPTT_DONE 0xd8 #define CFG_ABT_SET_IPTT_DONE_OFF 0 #define HGC_IOMB_PROC1_STATUS 0x104 +#define HGC_LM_DFX_STATUS2 0x128 +#define HGC_LM_DFX_STATUS2_IOSTLIST_OFF 0 +#define HGC_LM_DFX_STATUS2_IOSTLIST_MSK (0xfff << \ + HGC_LM_DFX_STATUS2_IOSTLIST_OFF) +#define HGC_LM_DFX_STATUS2_ITCTLIST_OFF 12 +#define HGC_LM_DFX_STATUS2_ITCTLIST_MSK (0x7ff << \ + HGC_LM_DFX_STATUS2_ITCTLIST_OFF) +#define HGC_CQE_ECC_ADDR 0x13c +#define HGC_CQE_ECC_1B_ADDR_OFF 0 +#define HGC_CQE_ECC_1B_ADDR_MSK (0x3f << HGC_CQE_ECC_1B_ADDR_OFF) +#define HGC_CQE_ECC_MB_ADDR_OFF 8 +#define HGC_CQE_ECC_MB_ADDR_MSK (0x3f << HGC_CQE_ECC_MB_ADDR_OFF) +#define HGC_IOST_ECC_ADDR 0x140 +#define HGC_IOST_ECC_1B_ADDR_OFF 0 +#define HGC_IOST_ECC_1B_ADDR_MSK (0x3ff << HGC_IOST_ECC_1B_ADDR_OFF) +#define HGC_IOST_ECC_MB_ADDR_OFF 16 +#define HGC_IOST_ECC_MB_ADDR_MSK (0x3ff << HGC_IOST_ECC_MB_ADDR_OFF) +#define HGC_DQE_ECC_ADDR 0x144 +#define HGC_DQE_ECC_1B_ADDR_OFF 0 +#define HGC_DQE_ECC_1B_ADDR_MSK (0xfff << HGC_DQE_ECC_1B_ADDR_OFF) +#define HGC_DQE_ECC_MB_ADDR_OFF 16 +#define HGC_DQE_ECC_MB_ADDR_MSK (0xfff << HGC_DQE_ECC_MB_ADDR_OFF) #define CHNL_INT_STATUS 0x148 +#define HGC_ITCT_ECC_ADDR 0x150 +#define HGC_ITCT_ECC_1B_ADDR_OFF 0 +#define HGC_ITCT_ECC_1B_ADDR_MSK (0x3ff << \ + HGC_ITCT_ECC_1B_ADDR_OFF) +#define HGC_ITCT_ECC_MB_ADDR_OFF 16 +#define HGC_ITCT_ECC_MB_ADDR_MSK (0x3ff << \ + HGC_ITCT_ECC_MB_ADDR_OFF) #define HGC_AXI_FIFO_ERR_INFO 0x154 #define AXI_ERR_INFO_OFF 0 #define AXI_ERR_INFO_MSK (0xff << AXI_ERR_INFO_OFF) @@ -80,6 +115,10 @@ #define ENT_INT_SRC3_ITC_INT_OFF 15 #define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF) #define ENT_INT_SRC3_ABT_OFF 16 +#define ENT_INT_SRC3_DQE_POISON_OFF 18 +#define ENT_INT_SRC3_IOST_POISON_OFF 19 +#define ENT_INT_SRC3_ITCT_POISON_OFF 20 +#define ENT_INT_SRC3_ITCT_NCQ_POISON_OFF 21 #define ENT_INT_SRC_MSK1 0x1c4 #define ENT_INT_SRC_MSK2 0x1c8 #define ENT_INT_SRC_MSK3 0x1cc @@ -89,6 +128,28 @@ #define HGC_COM_INT_MSK 0x1d8 #define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF) #define SAS_ECC_INTR 0x1e8 +#define SAS_ECC_INTR_DQE_ECC_1B_OFF 0 +#define SAS_ECC_INTR_DQE_ECC_MB_OFF 1 +#define SAS_ECC_INTR_IOST_ECC_1B_OFF 2 +#define SAS_ECC_INTR_IOST_ECC_MB_OFF 3 +#define SAS_ECC_INTR_ITCT_ECC_1B_OFF 4 +#define SAS_ECC_INTR_ITCT_ECC_MB_OFF 5 +#define SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF 6 +#define SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF 7 +#define SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF 8 +#define SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF 9 +#define SAS_ECC_INTR_CQE_ECC_1B_OFF 10 +#define SAS_ECC_INTR_CQE_ECC_MB_OFF 11 +#define SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF 12 +#define SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF 13 +#define SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF 14 +#define SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF 15 +#define SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF 16 +#define SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF 17 +#define SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF 18 +#define SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF 19 +#define SAS_ECC_INTR_OOO_RAM_ECC_1B_OFF 20 +#define SAS_ECC_INTR_OOO_RAM_ECC_MB_OFF 21 #define SAS_ECC_INTR_MSK 0x1ec #define HGC_ERR_STAT_EN 0x238 #define CQE_SEND_CNT 0x248 @@ -104,6 +165,20 @@ #define COMPL_Q_0_DEPTH 0x4e8 #define COMPL_Q_0_WR_PTR 0x4ec #define COMPL_Q_0_RD_PTR 0x4f0 +#define HGC_RXM_DFX_STATUS14 0xae8 +#define HGC_RXM_DFX_STATUS14_MEM0_OFF 0 +#define HGC_RXM_DFX_STATUS14_MEM0_MSK (0x1ff << \ + HGC_RXM_DFX_STATUS14_MEM0_OFF) +#define HGC_RXM_DFX_STATUS14_MEM1_OFF 9 +#define HGC_RXM_DFX_STATUS14_MEM1_MSK (0x1ff << \ + HGC_RXM_DFX_STATUS14_MEM1_OFF) +#define HGC_RXM_DFX_STATUS14_MEM2_OFF 18 +#define HGC_RXM_DFX_STATUS14_MEM2_MSK (0x1ff << \ + HGC_RXM_DFX_STATUS14_MEM2_OFF) +#define HGC_RXM_DFX_STATUS15 0xaec +#define HGC_RXM_DFX_STATUS15_MEM3_OFF 0 +#define HGC_RXM_DFX_STATUS15_MEM3_MSK (0x1ff << \ + HGC_RXM_DFX_STATUS15_MEM3_OFF) #define AWQOS_AWCACHE_CFG 0xc84 #define ARQOS_ARCACHE_CFG 0xc88 #define HILINK_ERR_DFX 0xe04 @@ -123,10 +198,32 @@ #define PHY_CFG_PHY_RST_OFF 3 #define PHY_CFG_PHY_RST_MSK (0x1 << PHY_CFG_PHY_RST_OFF) #define PROG_PHY_LINK_RATE (PORT_BASE + 0x8) +#define CFG_PROG_PHY_LINK_RATE_OFF 8 +#define CFG_PROG_PHY_LINK_RATE_MSK (0xf << CFG_PROG_PHY_LINK_RATE_OFF) #define PHY_CTRL (PORT_BASE + 0x14) #define PHY_CTRL_RESET_OFF 0 #define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF) +#define CMD_HDR_PIR_OFF 8 +#define CMD_HDR_PIR_MSK (0x1 << CMD_HDR_PIR_OFF) +#define SERDES_CFG (PORT_BASE + 0x1c) +#define CFG_ALOS_CHK_DISABLE_OFF 9 +#define CFG_ALOS_CHK_DISABLE_MSK (0x1 << CFG_ALOS_CHK_DISABLE_OFF) +#define SAS_PHY_BIST_CTRL (PORT_BASE + 0x2c) +#define CFG_BIST_MODE_SEL_OFF 0 +#define CFG_BIST_MODE_SEL_MSK (0xf << CFG_BIST_MODE_SEL_OFF) +#define CFG_LOOP_TEST_MODE_OFF 14 +#define CFG_LOOP_TEST_MODE_MSK (0x3 << CFG_LOOP_TEST_MODE_OFF) +#define SAS_PHY_BIST_CODE (PORT_BASE + 0x30) +#define SAS_PHY_BIST_CODE1 (PORT_BASE + 0x34) +#define CFG_RX_BIST_EN_OFF 16 +#define CFG_RX_BIST_EN_MSK (0x1 << CFG_RX_BIST_EN_OFF) +#define CFG_TX_BIST_EN_OFF 17 +#define CFG_TX_BIST_EN_MSK (0x1 << CFG_TX_BIST_EN_OFF) +#define CFG_BIST_TEST_OFF 18 +#define CFG_BIST_TEST_MSK (0x1 << CFG_BIST_TEST_OFF) +#define SAS_BIST_ERR_CNT (PORT_BASE + 0x38) #define SL_CFG (PORT_BASE + 0x84) +#define AIP_LIMIT (PORT_BASE + 0x90) #define SL_CONTROL (PORT_BASE + 0x94) #define SL_CONTROL_NOTIFY_EN_OFF 0 #define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF) @@ -149,6 +246,7 @@ #define TX_HARDRST_MSK (0x1 << TX_HARDRST_OFF) #define RX_IDAF_DWORD0 (PORT_BASE + 0xc4) #define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc) +#define DONE_RECEVIED_TIME (PORT_BASE + 0x11c) #define STP_LINK_TIMER (PORT_BASE + 0x120) #define STP_LINK_TIMEOUT_STATE (PORT_BASE + 0x124) #define CON_CFG_DRIVER (PORT_BASE + 0x130) @@ -167,21 +265,28 @@ #define CHL_INT0_PHY_RDY_OFF 5 #define CHL_INT0_PHY_RDY_MSK (0x1 << CHL_INT0_PHY_RDY_OFF) #define CHL_INT1 (PORT_BASE + 0x1b8) -#define CHL_INT1_DMAC_TX_ECC_ERR_OFF 15 -#define CHL_INT1_DMAC_TX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF) -#define CHL_INT1_DMAC_RX_ECC_ERR_OFF 17 -#define CHL_INT1_DMAC_RX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF) +#define CHL_INT1_DMAC_TX_ECC_MB_ERR_OFF 15 +#define CHL_INT1_DMAC_TX_ECC_1B_ERR_OFF 16 +#define CHL_INT1_DMAC_RX_ECC_MB_ERR_OFF 17 +#define CHL_INT1_DMAC_RX_ECC_1B_ERR_OFF 18 #define CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF 19 #define CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF 20 #define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21 #define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22 +#define CHL_INT1_DMAC_TX_FIFO_ERR_OFF 23 +#define CHL_INT1_DMAC_RX_FIFO_ERR_OFF 24 +#define CHL_INT1_DMAC_TX_AXI_RUSER_ERR_OFF 26 +#define CHL_INT1_DMAC_RX_AXI_RUSER_ERR_OFF 27 #define CHL_INT2 (PORT_BASE + 0x1bc) #define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0 +#define CHL_INT2_RX_DISP_ERR_OFF 28 +#define CHL_INT2_RX_CODE_ERR_OFF 29 #define CHL_INT2_RX_INVLD_DW_OFF 30 #define CHL_INT2_STP_LINK_TIMEOUT_OFF 31 #define CHL_INT0_MSK (PORT_BASE + 0x1c0) #define CHL_INT1_MSK (PORT_BASE + 0x1c4) #define CHL_INT2_MSK (PORT_BASE + 0x1c8) +#define SAS_EC_INT_COAL_TIME (PORT_BASE + 0x1cc) #define CHL_INT_COAL_EN (PORT_BASE + 0x1d0) #define SAS_RX_TRAIN_TIMER (PORT_BASE + 0x2a4) #define PHY_CTRL_RDY_MSK (PORT_BASE + 0x2b0) @@ -201,6 +306,7 @@ #define ERR_CNT_DWS_LOST (PORT_BASE + 0x380) #define ERR_CNT_RESET_PROB (PORT_BASE + 0x384) #define ERR_CNT_INVLD_DW (PORT_BASE + 0x390) +#define ERR_CNT_CODE_ERR (PORT_BASE + 0x394) #define ERR_CNT_DISP_ERR (PORT_BASE + 0x398) #define DEFAULT_ITCT_HW 2048 /* reset value, not reprogrammed */ @@ -218,10 +324,8 @@ #define AM_CFG_SINGLE_PORT_MAX_TRANS (0x5014) #define AXI_CFG (0x5100) #define AM_ROB_ECC_ERR_ADDR (0x510c) -#define AM_ROB_ECC_ONEBIT_ERR_ADDR_OFF 0 -#define AM_ROB_ECC_ONEBIT_ERR_ADDR_MSK (0xff << AM_ROB_ECC_ONEBIT_ERR_ADDR_OFF) -#define AM_ROB_ECC_MULBIT_ERR_ADDR_OFF 8 -#define AM_ROB_ECC_MULBIT_ERR_ADDR_MSK (0xff << AM_ROB_ECC_MULBIT_ERR_ADDR_OFF) +#define AM_ROB_ECC_ERR_ADDR_OFF 0 +#define AM_ROB_ECC_ERR_ADDR_MSK 0xffffffff /* RAS registers need init */ #define RAS_BASE (0x6000) @@ -244,6 +348,10 @@ #define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF) #define CMD_HDR_TLR_CTRL_OFF 6 #define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF) +#define CMD_HDR_PHY_ID_OFF 8 +#define CMD_HDR_PHY_ID_MSK (0x1ff << CMD_HDR_PHY_ID_OFF) +#define CMD_HDR_FORCE_PHY_OFF 17 +#define CMD_HDR_FORCE_PHY_MSK (0x1 << CMD_HDR_FORCE_PHY_OFF) #define CMD_HDR_PORT_OFF 18 #define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF) #define CMD_HDR_PRIORITY_OFF 27 @@ -291,8 +399,12 @@ #define CMPLT_HDR_CMPLT_MSK (0x3 << CMPLT_HDR_CMPLT_OFF) #define CMPLT_HDR_ERROR_PHASE_OFF 2 #define CMPLT_HDR_ERROR_PHASE_MSK (0xff << CMPLT_HDR_ERROR_PHASE_OFF) +/* bit[9:2] Error Phase */ +#define ERR_PHASE_RESPONSE_FRAME_REV_STAGE BIT(8) #define CMPLT_HDR_RSPNS_XFRD_OFF 10 #define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF) +#define CMPLT_HDR_RSPNS_GOOD_OFF 11 +#define CMPLT_HDR_RSPNS_GOOD_MSK (0x1 << CMPLT_HDR_RSPNS_GOOD_OFF) #define CMPLT_HDR_ERX_OFF 12 #define CMPLT_HDR_ERX_MSK (0x1 << CMPLT_HDR_ERX_OFF) #define CMPLT_HDR_ABORT_STAT_OFF 13 @@ -308,8 +420,16 @@ #define CMPLT_HDR_DEV_ID_OFF 16 #define CMPLT_HDR_DEV_ID_MSK (0xffff << CMPLT_HDR_DEV_ID_OFF) /* dw3 */ +/* ERR_CODE */ +#define SATA_DISK_IN_ERROR_STATUS BIT(8) +#define COMLT_HDR_SATA_DISK_ERR_OFF 16 +#define CMPLT_HDR_SATA_DISK_ERR_MSK (0x1 << COMLT_HDR_SATA_DISK_ERR_OFF) #define CMPLT_HDR_IO_IN_TARGET_OFF 17 #define CMPLT_HDR_IO_IN_TARGET_MSK (0x1 << CMPLT_HDR_IO_IN_TARGET_OFF) +/* bit[23:18] ERR_FIS_ATA_STATUS */ +#define FIS_ATA_STATUS_ERR BIT(18) +/* bit[31:24] ERR_FIS_TYPE */ +#define FIS_TYPE_SDB BIT(31) /* ITCT header */ /* qw0 */ @@ -331,6 +451,16 @@ #define ITCT_HDR_RTOLT_OFF 48 #define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF) +struct hisi_sas_protect_iu_v3_hw { + u32 dw0; + u32 lbrtcv; + u32 lbrtgv; + u32 dw3; + u32 dw4; + u32 dw5; + u32 rsv; +}; + struct hisi_sas_complete_v3_hdr { __le32 dw0; __le32 dw1; @@ -356,6 +486,9 @@ struct hisi_sas_err_record_v3 { #define RX_DATA_LEN_UNDERFLOW_OFF 6 #define RX_DATA_LEN_UNDERFLOW_MSK (1 << RX_DATA_LEN_UNDERFLOW_OFF) +#define RX_FIS_STATUS_ERR_OFF 0 +#define RX_FIS_STATUS_ERR_MSK (1 << RX_FIS_STATUS_ERR_OFF) + #define HISI_SAS_COMMAND_ENTRIES_V3_HW 4096 #define HISI_SAS_MSI_COUNT_V3_HW 32 @@ -370,18 +503,65 @@ struct hisi_sas_err_record_v3 { ((fis.command == ATA_CMD_DEV_RESET) && \ ((fis.control & ATA_SRST) != 0))) -static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) -{ - void __iomem *regs = hisi_hba->regs + off; +#define T10_INSRT_EN_OFF 0 +#define T10_INSRT_EN_MSK (1 << T10_INSRT_EN_OFF) +#define T10_RMV_EN_OFF 1 +#define T10_RMV_EN_MSK (1 << T10_RMV_EN_OFF) +#define T10_RPLC_EN_OFF 2 +#define T10_RPLC_EN_MSK (1 << T10_RPLC_EN_OFF) +#define T10_CHK_EN_OFF 3 +#define T10_CHK_EN_MSK (1 << T10_CHK_EN_OFF) +#define INCR_LBRT_OFF 5 +#define INCR_LBRT_MSK (1 << INCR_LBRT_OFF) +#define USR_DATA_BLOCK_SZ_OFF 20 +#define USR_DATA_BLOCK_SZ_MSK (0x3 << USR_DATA_BLOCK_SZ_OFF) +#define T10_CHK_MSK_OFF 16 + +#define HISI_SAS_CQ_INT_BASE_VECTORS_V3_HW 16 +#define HISI_SAS_MIN_VECTORS_V3_HW 17 + +#define NEXT_DQCQ_REG_OFF 0x14 + +#define HISI_SAS_BAR_TO_IOMAP 5 + +#define PCIDEV_REVISION_1620_ES 0x20 +#define PCIDEV_REVISION_1620_CS 0x21 + +#define PCI_IRQ_PHY 1 +#define PCI_IRQ_CHANNEL 2 +#define PCI_IRQ_AXI_FATAL 11 +#define PCI_IRQ_CQ_BASE HISI_SAS_CQ_INT_BASE_VECTORS_V3_HW + +#define HISI_SAS_IS_RW_CMD(op) \ + ((op == READ_6) || (op == WRITE_6) || \ + (op == READ_10) || (op == WRITE_10) || \ + (op == READ_12) || (op == WRITE_12) || \ + (op == READ_16) || (op == WRITE_16) || \ + (op == READ_32) || (op == WRITE_32)) - return readl(regs); -} +enum { + DSM_FUNC_ERR_HANDLE_MSI = 0, +}; + +static bool hisi_sas_intr_conv; +MODULE_PARM_DESC(intr_conv, "interrupt converge on or off:0 or 1(def=0)"); + +static int enable_dix_dif; +module_param(enable_dix_dif, int, 0444); +MODULE_PARM_DESC(enable_dix_dif, + " Enable DIX/DIF:\n" + " 0 -- No DIF support.\n"); -static u32 hisi_sas_read32_relaxed(struct hisi_hba *hisi_hba, u32 off) +static bool user_ctl_irq; +module_param(user_ctl_irq, bool, 0444); +MODULE_PARM_DESC(user_ctl_irq, "Enable user control irq affinity:\n" + "default is auto-control irq affinity"); + +static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) { void __iomem *regs = hisi_hba->regs + off; - return readl_relaxed(regs); + return readl(regs); } static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val) @@ -394,6 +574,7 @@ static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val) static void hisi_sas_phy_write32(struct hisi_hba *hisi_hba, int phy_no, u32 off, u32 val) { + /* Each reg of phy cost 0x400 bytes memory to save */ void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; writel(val, regs); @@ -402,6 +583,7 @@ static void hisi_sas_phy_write32(struct hisi_hba *hisi_hba, int phy_no, static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba, int phy_no, u32 off) { + /* Each reg of phy cost 0x400 bytes memory to save */ void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; return readl(regs); @@ -423,31 +605,34 @@ static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba, static void init_reg_v3_hw(struct hisi_hba *hisi_hba) { - struct pci_dev *pdev = hisi_hba->pci_dev; int i; /* Global registers init */ hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, (u32)((1ULL << hisi_hba->queue_count) - 1)); + hisi_sas_write32(hisi_hba, SAS_AXI_USER3, 0); hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff0400); + /* time / CLK_AHB = 2.5s / 2ns = 0x4A817C80 */ + hisi_sas_write32(hisi_hba, TRANS_LOCK_ICT_TIME, 0x4A817C80); hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108); + hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1); + hisi_sas_write32(hisi_hba, CFG_ICT_TIMER_STEP_TRSH, 0xf4240); hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1); hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1); hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1); + hisi_sas_write32(hisi_hba, CQ_INT_CONVERGE_EN, + hisi_sas_intr_conv); hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0xffff); hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff); hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff); hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe); - if (pdev->revision >= 0x21) - hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffff7fff); - else - hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xfffe20ff); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffc220ff); hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0); hisi_sas_write32(hisi_hba, CHNL_ENT_INT_MSK, 0x0); hisi_sas_write32(hisi_hba, HGC_COM_INT_MSK, 0x0); - hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0x0); + hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0x155555); hisi_sas_write32(hisi_hba, AWQOS_AWCACHE_CFG, 0xf0f0); hisi_sas_write32(hisi_hba, ARQOS_ARCACHE_CFG, 0xf0f0); for (i = 0; i < hisi_hba->queue_count; i++) @@ -455,6 +640,12 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba) hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1); + if (skip_bus_flag) { + hisi_sas_write32(hisi_hba, CFG_1US_TIMER_TRSH, 0x19); + hisi_sas_write32(hisi_hba, SAS_DMAC_OUTSTAND, 0x48); + hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE, 0x10000); + } + for (i = 0; i < hisi_hba->n_phy; i++) { struct hisi_sas_phy *phy = &hisi_hba->phy[i]; struct asd_sas_phy *sas_phy = &phy->sas_phy; @@ -470,53 +661,82 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba) hisi_sas_get_prog_phy_linkrate_mask(max) | 0x800; } - hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, - prog_phy_link_rate); - hisi_sas_phy_write32(hisi_hba, i, SAS_RX_TRAIN_TIMER, 0x13e80); + + if (skip_bus_flag) { + hisi_sas_phy_write32(hisi_hba, i, + PROG_PHY_LINK_RATE, 0x801); + } else { + hisi_sas_phy_write32(hisi_hba, i, + PROG_PHY_LINK_RATE, + prog_phy_link_rate); + hisi_sas_phy_write32(hisi_hba, i, + SAS_RX_TRAIN_TIMER, 0x13e80); + } + + hisi_sas_phy_write32(hisi_hba, i, SERDES_CFG, 0xffc00); hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff); hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff); hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff); hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000); - if (pdev->revision >= 0x21) - hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, - 0xffffffff); - else - hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, - 0xff87ffff); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xf2057fff); hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffbfe); hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0); hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0); hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0); hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x0); hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0); - hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x1); + + if (skip_bus_flag) { + hisi_sas_phy_write32(hisi_hba, i, + PHYCTRL_OOB_RESTART_MSK, 0x0); + hisi_sas_phy_write32(hisi_hba, i, + SAS_SSP_CON_TIMER_CFG, 0xa03e8); + hisi_sas_phy_write32(hisi_hba, i, + SAS_STP_CON_TIMER_CFG, 0xa03e8); + hisi_sas_phy_write32(hisi_hba, i, + DONE_RECEVIED_TIME, 0x100); + } else { + hisi_sas_phy_write32(hisi_hba, i, + PHYCTRL_OOB_RESTART_MSK, 0x1); + } hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120); hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x2a0a01); + hisi_sas_phy_write32(hisi_hba, i, SAS_EC_INT_COAL_TIME, + 0x30f4240); + hisi_sas_phy_write32(hisi_hba, i, + SAS_SSP_CON_TIMER_CFG, 0x32); /* used for 12G negotiate */ hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e); + hisi_sas_phy_write32(hisi_hba, i, AIP_LIMIT, 0x2ffff); } for (i = 0; i < hisi_hba->queue_count; i++) { /* Delivery queue */ hisi_sas_write32(hisi_hba, - DLVRY_Q_0_BASE_ADDR_HI + (i * 0x14), + DLVRY_Q_0_BASE_ADDR_HI + + (i * NEXT_DQCQ_REG_OFF), upper_32_bits(hisi_hba->cmd_hdr_dma[i])); - hisi_sas_write32(hisi_hba, DLVRY_Q_0_BASE_ADDR_LO + (i * 0x14), + hisi_sas_write32(hisi_hba, DLVRY_Q_0_BASE_ADDR_LO + + (i * NEXT_DQCQ_REG_OFF), lower_32_bits(hisi_hba->cmd_hdr_dma[i])); - hisi_sas_write32(hisi_hba, DLVRY_Q_0_DEPTH + (i * 0x14), + hisi_sas_write32(hisi_hba, DLVRY_Q_0_DEPTH + + (i * NEXT_DQCQ_REG_OFF), HISI_SAS_QUEUE_SLOTS); /* Completion queue */ - hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_HI + (i * 0x14), + hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_HI + + (i * NEXT_DQCQ_REG_OFF), upper_32_bits(hisi_hba->complete_hdr_dma[i])); - hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_LO + (i * 0x14), + hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_LO + + (i * NEXT_DQCQ_REG_OFF), lower_32_bits(hisi_hba->complete_hdr_dma[i])); - hisi_sas_write32(hisi_hba, COMPL_Q_0_DEPTH + (i * 0x14), + hisi_sas_write32(hisi_hba, COMPL_Q_0_DEPTH + + (i * NEXT_DQCQ_REG_OFF), HISI_SAS_QUEUE_SLOTS); } @@ -556,7 +776,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba) upper_32_bits(hisi_hba->initial_fis_dma)); /* RAS registers init */ - hisi_sas_write32(hisi_hba, SAS_RAS_INTR0_MASK, 0x0); + hisi_sas_write32(hisi_hba, SAS_RAS_INTR0_MASK, 0xf80000); hisi_sas_write32(hisi_hba, SAS_RAS_INTR1_MASK, 0x0); hisi_sas_write32(hisi_hba, SAS_RAS_INTR2_MASK, 0x0); hisi_sas_write32(hisi_hba, CFG_SAS_RAS_INTR_MASK, 0x0); @@ -660,13 +880,14 @@ static void setup_itct_v3_hw(struct hisi_hba *hisi_hba, (0x1ULL << ITCT_HDR_RTOLT_OFF)); } -static void clear_itct_v3_hw(struct hisi_hba *hisi_hba, +static int clear_itct_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_device *sas_dev) { DECLARE_COMPLETION_ONSTACK(completion); u64 dev_id = sas_dev->device_id; struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id]; u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); + struct device *dev = hisi_hba->dev; sas_dev->completion = &completion; @@ -675,12 +896,18 @@ static void clear_itct_v3_hw(struct hisi_hba *hisi_hba, hisi_sas_write32(hisi_hba, ENT_INT_SRC3, ENT_INT_SRC3_ITC_INT_MSK); - /* clear the itct table*/ + /* clear the itct table */ reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK); hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val); - wait_for_completion(sas_dev->completion); + if (!wait_for_completion_timeout(sas_dev->completion, + CLEAR_ITCT_TIMEOUT * HZ)) { + dev_warn(dev, "failed to clear ITCT\n"); + return -ETIMEDOUT; + } + memset(itct, 0, sizeof(struct hisi_sas_itct)); + return 0; } static void dereg_device_v3_hw(struct hisi_hba *hisi_hba, @@ -689,9 +916,11 @@ static void dereg_device_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, *slot2; struct hisi_sas_device *sas_dev = device->lldd_dev; u32 cfg_abt_set_query_iptt; + unsigned long flags; cfg_abt_set_query_iptt = hisi_sas_read32(hisi_hba, CFG_ABT_SET_QUERY_IPTT); + spin_lock_irqsave(&sas_dev->lock, flags); list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) { cfg_abt_set_query_iptt &= ~CFG_SET_ABORTED_IPTT_MSK; cfg_abt_set_query_iptt |= (1 << CFG_SET_ABORTED_EN_OFF) | @@ -699,6 +928,7 @@ static void dereg_device_v3_hw(struct hisi_hba *hisi_hba, hisi_sas_write32(hisi_hba, CFG_ABT_SET_QUERY_IPTT, cfg_abt_set_query_iptt); } + spin_unlock_irqrestore(&sas_dev->lock, flags); cfg_abt_set_query_iptt &= ~(1 << CFG_SET_ABORTED_EN_OFF); hisi_sas_write32(hisi_hba, CFG_ABT_SET_QUERY_IPTT, cfg_abt_set_query_iptt); @@ -714,11 +944,14 @@ static int reset_hw_v3_hw(struct hisi_hba *hisi_hba) hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0); - /* Disable all of the PHYs */ + /* + * Disable all of the PHYs, + * 50us delay is need for hw + */ hisi_sas_stop_phys(hisi_hba); udelay(50); - /* Ensure axi bus idle */ + /* Ensure axi bus idle, with 20ms delay and 1s time out */ ret = hisi_sas_read32_poll_timeout(AXI_CFG, val, !val, 20000, 1000000); if (ret) { @@ -745,6 +978,8 @@ static int reset_hw_v3_hw(struct hisi_hba *hisi_hba) static int hw_init_v3_hw(struct hisi_hba *hisi_hba) { struct device *dev = hisi_hba->dev; + union acpi_object *obj; + guid_t guid; int rc; rc = reset_hw_v3_hw(hisi_hba); @@ -753,9 +988,25 @@ static int hw_init_v3_hw(struct hisi_hba *hisi_hba) return rc; } + /* Delay 100ms to reset hw */ msleep(100); init_reg_v3_hw(hisi_hba); + if (guid_parse("D5918B4B-37AE-4E10-A99F-E5E8A6EF4C1F", &guid)) { + dev_err(dev, "Parse GUID failed\n"); + return -EINVAL; + } + + /* + * Switch over to MSI handling due to non-standard PCI implementation. + */ + obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid, 0, + DSM_FUNC_ERR_HANDLE_MSI, NULL); + if (!obj) + dev_warn(dev, "Switch over to MSI handling failed\n"); + else + ACPI_FREE(obj); + return 0; } @@ -771,11 +1022,18 @@ static void enable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) static void disable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) { u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); + u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2_MSK); + static const u32 msk = BIT(CHL_INT2_RX_DISP_ERR_OFF) | + BIT(CHL_INT2_RX_CODE_ERR_OFF) | + BIT(CHL_INT2_RX_INVLD_DW_OFF); u32 state; + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2_MSK, msk | irq_msk); + cfg &= ~PHY_CFG_ENA_MSK; hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); + /* Delay 50ms after disable phy to meet hw need */ mdelay(50); state = hisi_sas_read32(hisi_hba, PHY_STATE); @@ -783,6 +1041,20 @@ static void disable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) cfg |= PHY_CFG_PHY_RST_MSK; hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); } + + /* + * Wait 1us till all misleading error code to have occured + */ + udelay(1); + + hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_INVLD_DW); + hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DISP_ERR); + hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_CODE_ERR); + + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, msk); + + irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2_MSK); + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2_MSK, (~msk) & irq_msk); } static void start_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) @@ -797,19 +1069,25 @@ static void phy_hard_reset_v3_hw(struct hisi_hba *hisi_hba, int phy_no) struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; u32 txid_auto; - disable_phy_v3_hw(hisi_hba, phy_no); + hisi_sas_phy_enable(hisi_hba, phy_no, 0); if (phy->identify.device_type == SAS_END_DEVICE) { txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, txid_auto | TX_HARDRST_MSK); } + + /* Delay 100ms after phy hard reset to meet hw need */ msleep(100); - start_phy_v3_hw(hisi_hba, phy_no); + hisi_sas_phy_enable(hisi_hba, phy_no, 1); } static enum sas_linkrate phy_get_max_linkrate_v3_hw(void) { - return SAS_LINK_RATE_12_0_GBPS; + + if (skip_bus_flag) + return SAS_LINK_RATE_1_5_GBPS; + else + return SAS_LINK_RATE_12_0_GBPS; } static void phys_init_v3_hw(struct hisi_hba *hisi_hba) @@ -823,11 +1101,11 @@ static void phys_init_v3_hw(struct hisi_hba *hisi_hba) if (!sas_phy->phy->enabled) continue; - start_phy_v3_hw(hisi_hba, i); + hisi_sas_phy_enable(hisi_hba, i, 1); } } -static void sl_notify_v3_hw(struct hisi_hba *hisi_hba, int phy_no) +static void sl_notify_ssp_v3_hw(struct hisi_hba *hisi_hba, int phy_no) { u32 sl_control; @@ -840,6 +1118,21 @@ static void sl_notify_v3_hw(struct hisi_hba *hisi_hba, int phy_no) hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); } +void ssp_notify_work_handler(struct work_struct *work) +{ + struct hisi_hba *hisi_hba = + container_of(work, struct hisi_hba, notify_work); + int i; + + for (i = 0; i < HISI_SAS_MAX_PHYS; i++) { + if (hisi_hba->phy[i].need_notify) { + sl_notify_ssp_v3_hw(hisi_hba, i); + hisi_hba->phy[i].need_notify = 0; + } + } +} + + static int get_wideport_bitmap_v3_hw(struct hisi_hba *hisi_hba, int port_id) { int i, bitmap = 0; @@ -848,46 +1141,20 @@ static int get_wideport_bitmap_v3_hw(struct hisi_hba *hisi_hba, int port_id) for (i = 0; i < hisi_hba->n_phy; i++) if (phy_state & BIT(i)) + /* Each num ma of port cost 4 bit to save */ if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id) bitmap |= BIT(i); return bitmap; } -/** - * The callpath to this function and upto writing the write - * queue pointer should be safe from interruption. - */ -static int -get_free_slot_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq) -{ - struct device *dev = hisi_hba->dev; - int queue = dq->id; - u32 r, w; - - w = dq->wr_point; - r = hisi_sas_read32_relaxed(hisi_hba, - DLVRY_Q_0_RD_PTR + (queue * 0x14)); - if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) { - dev_warn(dev, "full queue=%d r=%d w=%d\n", - queue, r, w); - return -EAGAIN; - } - - dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS; - - return w; -} - static void start_delivery_v3_hw(struct hisi_sas_dq *dq) { struct hisi_hba *hisi_hba = dq->hisi_hba; struct hisi_sas_slot *s, *s1, *s2 = NULL; - struct list_head *dq_list; int dlvry_queue = dq->id; int wp; - dq_list = &dq->list; list_for_each_entry_safe(s, s1, &dq->list, delivery) { if (!s->ready) break; @@ -904,14 +1171,16 @@ static void start_delivery_v3_hw(struct hisi_sas_dq *dq) smp_rmb(); wp = (s2->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS; - hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp); + hisi_sas_write32(hisi_hba, + DLVRY_Q_0_WR_PTR + + (dlvry_queue * NEXT_DQCQ_REG_OFF), wp); } static void prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, - struct hisi_sas_cmd_hdr *hdr, struct scatterlist *scatter, - int n_elem) + u64 n_elem, + struct hisi_sas_cmd_hdr *hdr) { struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot); struct scatterlist *sg; @@ -928,7 +1197,112 @@ static void prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba, hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot)); - hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF); + hdr->sg_len |= cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF); +} + +static void prep_prd_sge_dif_v3_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot, + struct hisi_sas_cmd_hdr *hdr, + struct scatterlist *scatter, + int n_elem) +{ + struct hisi_sas_sge_dif_page *sge_dif_page = + hisi_sas_sge_dif_addr_mem(slot); + struct scatterlist *sg; + int i; + + for_each_sg(scatter, sg, n_elem, i) { + struct hisi_sas_sge *entry = &sge_dif_page->sge[i]; + + entry->addr = cpu_to_le64(sg_dma_address(sg)); + entry->page_ctrl_0 = entry->page_ctrl_1 = 0; + entry->data_len = cpu_to_le32(sg_dma_len(sg)); + entry->data_off = 0; + } + + hdr->dif_prd_table_addr = cpu_to_le64( + hisi_sas_sge_dif_addr_dma(slot)); + + hdr->sg_len |= cpu_to_le32(n_elem << CMD_HDR_DIF_SGL_LEN_OFF); +} + +static void hisi_sas_fill_prot_v3_hw(struct scsi_cmnd *scsi_cmnd, + struct hisi_sas_protect_iu_v3_hw *prot) +{ + u8 prot_type = scsi_get_prot_type(scsi_cmnd); + u8 prot_op = scsi_get_prot_op(scsi_cmnd); + u32 prot_interval = scsi_prot_interval(scsi_cmnd); + u32 lbrt_chk_val; + + /* + * if interval of sector size equal to 4096, + * lbrt_chk_val should divided by 8 for it + * was base on 512. + */ + if (scsi_prot_interval(scsi_cmnd) == 4096) + lbrt_chk_val = (u32)(scsi_get_lba(scsi_cmnd) >> 3); + else + lbrt_chk_val = (u32)scsi_get_lba(scsi_cmnd); + + switch (prot_op) { + case SCSI_PROT_READ_INSERT: + prot->dw0 |= T10_INSRT_EN_MSK; + prot->lbrtgv = lbrt_chk_val; + break; + case SCSI_PROT_READ_STRIP: + prot->dw0 |= (T10_RMV_EN_MSK | T10_CHK_EN_MSK); + prot->lbrtcv = lbrt_chk_val; + if (prot_type == SCSI_PROT_DIF_TYPE1) + prot->dw4 |= (0xc << 16); + else if (prot_type == SCSI_PROT_DIF_TYPE3) + prot->dw4 |= (0xfc << 16); + break; + case SCSI_PROT_READ_PASS: + prot->dw0 |= T10_CHK_EN_MSK; + prot->lbrtcv = lbrt_chk_val; + if (prot_type == SCSI_PROT_DIF_TYPE1) + prot->dw4 |= (0xc << 16); + else if (prot_type == SCSI_PROT_DIF_TYPE3) + prot->dw4 |= (0xfc << 16); + break; + case SCSI_PROT_WRITE_INSERT: + prot->dw0 |= T10_INSRT_EN_MSK; + prot->lbrtgv = lbrt_chk_val; + break; + case SCSI_PROT_WRITE_STRIP: + prot->dw0 |= (T10_RMV_EN_MSK | T10_CHK_EN_MSK); + prot->lbrtcv = lbrt_chk_val; + break; + case SCSI_PROT_WRITE_PASS: + prot->dw0 |= T10_CHK_EN_MSK; + prot->lbrtcv = lbrt_chk_val; + if (prot_type == SCSI_PROT_DIF_TYPE1) + prot->dw4 |= (0xc << 16); + else if (prot_type == SCSI_PROT_DIF_TYPE3) + prot->dw4 |= (0xfc << 16); + break; + default: + WARN(1, "prot_op(0x%x) is not valid\n", prot_op); + break; + } + + /* Different Config base on interval of sector size */ + switch (prot_interval) { + case 512: + break; + case 4096: + prot->dw0 |= (0x1 << USR_DATA_BLOCK_SZ_OFF); + break; + case 520: + prot->dw0 |= (0x2 << USR_DATA_BLOCK_SZ_OFF); + break; + default: + WARN(1, "prot_interval(0x%x) is not valid\n", + prot_interval); + break; + } + + prot->dw0 |= INCR_LBRT_MSK; } static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba, @@ -942,30 +1316,34 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba, struct sas_ssp_task *ssp_task = &task->ssp_task; struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; struct hisi_sas_tmf_task *tmf = slot->tmf; - int has_data = 0, priority = !!tmf; + int has_data = 0; u8 *buf_cmd; - u32 dw1 = 0, dw2 = 0; + u32 dw1 = 0, dw2 = 0, len = 0, priority = !!tmf; hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) | - (2 << CMD_HDR_TLR_CTRL_OFF) | + (2 << CMD_HDR_TLR_CTRL_OFF) | /* no retransmit */ (port->id << CMD_HDR_PORT_OFF) | (priority << CMD_HDR_PRIORITY_OFF) | (1 << CMD_HDR_CMD_OFF)); /* ssp */ dw1 = 1 << CMD_HDR_VDTL_OFF; if (tmf) { - dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF; + dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF; /* task frame */ dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF; } else { + if (!HISI_SAS_IS_RW_CMD(scsi_cmnd->cmnd[0])) + dw1 = 0 << CMD_HDR_VDTL_OFF; + else + dw1 = 1 << CMD_HDR_VDTL_OFF; dw1 |= 1 << CMD_HDR_FRAME_TYPE_OFF; switch (scsi_cmnd->sc_data_direction) { case DMA_TO_DEVICE: - has_data = 1; dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF; + has_data = 1; break; case DMA_FROM_DEVICE: - has_data = 1; dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF; + has_data = 1; break; default: dw1 &= ~CMD_HDR_DIR_MSK; @@ -976,6 +1354,10 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba, dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; hdr->dw1 = cpu_to_le32(dw1); + /* + * The unit of CMD Frame LEN is Dwords + * Add extra 3 bytes to avoid Numerical loss after divided by 4 + */ dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr) + 3) / 4) << CMD_HDR_CFL_OFF) | ((HISI_SAS_MAX_SSP_RESP_SZ / 4) << CMD_HDR_MRFL_OFF) | @@ -983,11 +1365,16 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba, hdr->dw2 = cpu_to_le32(dw2); hdr->transfer_tags = cpu_to_le32(slot->idx); - if (has_data) - prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter, - slot->n_elem); + if (has_data) { + prep_prd_sge_v3_hw(hisi_hba, slot, task->scatter, + slot->n_elem, hdr); + if (scsi_prot_sg_count(scsi_cmnd)) { + prep_prd_sge_dif_v3_hw(hisi_hba, slot, hdr, + scsi_prot_sglist(scsi_cmnd), + slot->n_elem_dif); + } + } - hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); @@ -996,9 +1383,17 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba, memcpy(buf_cmd, &task->ssp_task.LUN, 8); if (!tmf) { + /* + * Command frame + * Reference: SAS Protocol Layer,see chapter: Transport layer + */ buf_cmd[9] = ssp_task->task_attr | (ssp_task->task_prio << 3); memcpy(buf_cmd + 12, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); } else { + /* + * Task frame + * Reference: SAS Protocol Layer,see chapter: Transport layer + */ buf_cmd[10] = tmf->tmf; switch (tmf->tmf) { case TMF_ABORT_TASK: @@ -1012,6 +1407,31 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba, break; } } + + if (!tmf && !scsi_prot_op_normal(scsi_cmnd)) { + u8 *buf_cmd_prot; + struct hisi_sas_protect_iu_v3_hw prot; + int prot_op = scsi_get_prot_op(scsi_cmnd); + + hdr->dw7 |= 1 << CMD_HDR_ADDR_MODE_SEL_OFF; + hdr->dw1 |= CMD_HDR_PIR_MSK; + buf_cmd_prot = hisi_sas_cmd_hdr_addr_mem(slot) + + sizeof(struct ssp_frame_hdr) + + sizeof(struct ssp_command_iu); + memset(&prot, 0, sizeof(struct hisi_sas_protect_iu_v3_hw)); + hisi_sas_fill_prot_v3_hw(scsi_cmnd, &prot); + memcpy(buf_cmd_prot, &prot, + sizeof(struct hisi_sas_protect_iu_v3_hw)); + if ((prot_op == SCSI_PROT_READ_INSERT) || + (prot_op == SCSI_PROT_WRITE_INSERT) || + (prot_op == SCSI_PROT_WRITE_PASS) || + (prot_op == SCSI_PROT_READ_PASS)) + len = (task->total_xfer_len >> + ilog2(scsi_prot_interval(scsi_cmnd))) * 8; + + } + + hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len + len); } static void prep_smp_v3_hw(struct hisi_hba *hisi_hba, @@ -1065,14 +1485,20 @@ static void prep_ata_v3_hw(struct hisi_hba *hisi_hba, struct asd_sas_port *sas_port = device->port; struct hisi_sas_port *port = to_hisi_sas_port(sas_port); u8 *buf_cmd; - int has_data = 0, hdr_tag = 0; - u32 dw1 = 0, dw2 = 0; + int has_data = 0; + u32 dw1 = 0, dw2 = 0, hdr_tag = 0; hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF); - if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) - hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF); - else - hdr->dw0 |= cpu_to_le32(4 << CMD_HDR_CMD_OFF); + if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) { + hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF); /* STP */ + } else { + int phy_id = device->phy->identify.phy_identifier; + + hdr->dw0 |= cpu_to_le32((1 << phy_id) + << CMD_HDR_PHY_ID_OFF); + hdr->dw0 |= CMD_HDR_FORCE_PHY_MSK; + hdr->dw0 |= cpu_to_le32(4U << CMD_HDR_CMD_OFF); /* SATA */ + } switch (task->data_dir) { case DMA_TO_DEVICE: @@ -1102,21 +1528,24 @@ static void prep_ata_v3_hw(struct hisi_hba *hisi_hba, hdr->dw1 = cpu_to_le32(dw1); /* dw2 */ - if (task->ata_task.use_ncq && hisi_sas_get_ncq_tag(task, &hdr_tag)) { + if (task->ata_task.use_ncq) { + struct ata_queued_cmd *qc = task->uldd_task; + + hdr_tag = qc->tag; task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF; } dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / 4) << CMD_HDR_CFL_OFF | - 2 << CMD_HDR_SG_MOD_OFF; + 2 << CMD_HDR_SG_MOD_OFF; /* SGE mode */ hdr->dw2 = cpu_to_le32(dw2); /* dw3 */ hdr->transfer_tags = cpu_to_le32(slot->idx); if (has_data) - prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter, - slot->n_elem); + prep_prd_sge_v3_hw(hisi_hba, slot, task->scatter, + slot->n_elem, hdr); hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); @@ -1132,7 +1561,7 @@ static void prep_ata_v3_hw(struct hisi_hba *hisi_hba, static void prep_abort_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, - int device_id, int abort_flag, int tag_to_abort) + unsigned int device_id, int abort_flag, int tag_to_abort) { struct sas_task *task = slot->task; struct domain_device *dev = task->dev; @@ -1140,7 +1569,7 @@ static void prep_abort_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_port *port = slot->port; /* dw0 */ - hdr->dw0 = cpu_to_le32((5 << CMD_HDR_CMD_OFF) | /*abort*/ + hdr->dw0 = cpu_to_le32((5U << CMD_HDR_CMD_OFF) | /*abort*/ (port->id << CMD_HDR_PORT_OFF) | (dev_is_sata(dev) << CMD_HDR_ABORT_DEVICE_TYPE_OFF) | @@ -1159,15 +1588,18 @@ static void prep_abort_v3_hw(struct hisi_hba *hisi_hba, static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) { - int i, res; + int i; + irqreturn_t res; u32 context, port_id, link_rate; struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct asd_sas_phy *sas_phy = &phy->sas_phy; struct device *dev = hisi_hba->dev; unsigned long flags; + del_timer(&phy->timer); hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1); + /* Port id store in 4 bits */ port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); port_id = (port_id >> (4 * phy_no)) & 0xf; link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE); @@ -1178,6 +1610,7 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) res = IRQ_NONE; goto end; } + sas_phy->linkrate = link_rate; phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); @@ -1187,6 +1620,7 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) struct hisi_sas_initial_fis *initial_fis; struct dev_to_host_fis *fis; u8 attached_sas_addr[SAS_ADDR_SIZE] = {0}; + struct Scsi_Host *shost = hisi_hba->shost; dev_info(dev, "phyup: phy%d link_rate=%d(sata)\n", phy_no, link_rate); initial_fis = &hisi_hba->initial_fis[phy_no]; @@ -1202,7 +1636,8 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) } sas_phy->oob_mode = SATA_OOB_MODE; - attached_sas_addr[0] = 0x50; + attached_sas_addr[0] = 0x50; /* sas address for SATA */ + attached_sas_addr[6] = shost->host_no; attached_sas_addr[7] = phy_no; memcpy(sas_phy->attached_sas_addr, attached_sas_addr, @@ -1219,6 +1654,7 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) (struct sas_identify_frame *)frame_rcvd; dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate); + /* IDENTIFY Frame length: 7 Dwords, last Dwords reserved */ for (i = 0; i < 6; i++) { u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no, RX_IDAF_DWORD0 + (i * 4)); @@ -1259,9 +1695,11 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) static irqreturn_t phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba) { + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; u32 phy_state, sl_ctrl, txid_auto; struct device *dev = hisi_hba->dev; + del_timer(&phy->timer); hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1); phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); @@ -1336,6 +1774,7 @@ static irqreturn_t int_phy_up_down_bcast_v3_hw(int irq_no, void *p) res = IRQ_HANDLED; } } + /* Irq mask of each phy store in 4 bits */ irq_msk >>= 4; phy_no++; } @@ -1344,6 +1783,14 @@ static irqreturn_t int_phy_up_down_bcast_v3_hw(int irq_no, void *p) } static const struct hisi_sas_hw_error port_axi_error[] = { + { + .irq_msk = BIT(CHL_INT1_DMAC_TX_ECC_MB_ERR_OFF), + .msg = "dmac_tx_ecc_bad_err", + }, + { + .irq_msk = BIT(CHL_INT1_DMAC_RX_ECC_MB_ERR_OFF), + .msg = "dmac_rx_ecc_bad_err", + }, { .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF), .msg = "dma_tx_axi_wr_err", @@ -1360,6 +1807,22 @@ static const struct hisi_sas_hw_error port_axi_error[] = { .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF), .msg = "dma_rx_axi_rd_err", }, + { + .irq_msk = BIT(CHL_INT1_DMAC_TX_FIFO_ERR_OFF), + .msg = "dma_tx_fifo_err", + }, + { + .irq_msk = BIT(CHL_INT1_DMAC_RX_FIFO_ERR_OFF), + .msg = "dma_rx_fifo_err", + }, + { + .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_RUSER_ERR_OFF), + .msg = "dma_tx_axi_ruser_err", + }, + { + .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RUSER_ERR_OFF), + .msg = "dma_rx_axi_ruser_err", + }, }; static void handle_chl_int1_v3_hw(struct hisi_hba *hisi_hba, int phy_no) @@ -1394,6 +1857,9 @@ static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no) struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct pci_dev *pci_dev = hisi_hba->pci_dev; struct device *dev = hisi_hba->dev; + const u32 msk = BIT(CHL_INT2_RX_DISP_ERR_OFF) | + BIT(CHL_INT2_RX_CODE_ERR_OFF) | + BIT(CHL_INT2_RX_INVLD_DW_OFF); irq_value &= ~irq_msk; if (!irq_value) @@ -1410,15 +1876,37 @@ static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no) dev_warn(dev, "phy%d stp link timeout (0x%x)\n", phy_no, reg_value); + /* if time out is happened at state: L_RCVCHKRDY */ if (reg_value & BIT(4)) hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); } + if (pci_dev->revision > PCIDEV_REVISION_1620_ES && + (irq_value & msk)) { + struct asd_sas_phy *sas_phy = &phy->sas_phy; + struct sas_phy *sphy = sas_phy->phy; + + hisi_hba->hw->get_events(hisi_hba, phy_no); + + if (irq_value & BIT(CHL_INT2_RX_INVLD_DW_OFF)) + dev_info(dev, "phy%d invalid dword cnt: %u\n", phy_no, + sphy->invalid_dword_count); + + if (irq_value & BIT(CHL_INT2_RX_CODE_ERR_OFF)) + dev_info(dev, "phy%d code violation cnt: %u\n", phy_no, + phy->code_error_count); + + if (irq_value & BIT(CHL_INT2_RX_DISP_ERR_OFF)) + dev_info(dev, "phy%d disparity error cnt: %u\n", phy_no, + sphy->running_disparity_error_count); + } + if ((irq_value & BIT(CHL_INT2_RX_INVLD_DW_OFF)) && - (pci_dev->revision == 0x20)) { + (pci_dev->revision == PCIDEV_REVISION_1620_ES)) { u32 reg_value; int rc; + /* Delay: 1ms timeout: 10ms */ rc = hisi_sas_read32_poll_timeout_atomic( HILINK_ERR_DFX, reg_value, !((reg_value >> 8) & BIT(phy_no)), @@ -1430,6 +1918,44 @@ static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no) hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, irq_value); } +static void wait_phyup_timedout_v3_hw(struct timer_list *t) +{ + struct hisi_sas_phy *phy = from_timer(phy, t, timer); + struct hisi_hba *hisi_hba = phy->hisi_hba; + struct device *dev = hisi_hba->dev; + int phy_no = phy->sas_phy.id; + + dev_warn(dev, "phy%d wait phyup timeout, issuing link reset\n", phy_no); + hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); +} + +static void handle_chl_int0_v3_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT0); + struct device *dev = hisi_hba->dev; + + if (irq_value0 & CHL_INT0_PHY_RDY_MSK) { + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + + dev_dbg(dev, "phy%d OOB ready\n", phy_no); + if (phy->phy_attached) + goto out; + + if (!timer_pending(&phy->timer)) { + phy->timer.function = wait_phyup_timedout_v3_hw; + phy->timer.expires = jiffies + + HISI_SAS_WAIT_PHYUP_TIMEOUT; + add_timer(&phy->timer); + } + } + +out: + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, + irq_value0 & (~CHL_INT0_SL_RX_BCST_ACK_MSK) + & (~CHL_INT0_SL_PHY_ENABLE_MSK) + & (~CHL_INT0_NOT_RDY_MSK)); +} + static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p) { struct hisi_hba *hisi_hba = p; @@ -1438,10 +1964,13 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p) irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS) & 0xeeeeeeee; - + /* + * These three irqs mask save at high 3 bit, the last bit + * is reserved. So we use 0xe to clear those after handle. + */ while (irq_msk) { - u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no, - CHL_INT0); + if (irq_msk & (2 << (phy_no * 4))) + handle_chl_int0_v3_hw(hisi_hba, phy_no); if (irq_msk & (4 << (phy_no * 4))) handle_chl_int1_v3_hw(hisi_hba, phy_no); @@ -1449,13 +1978,6 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p) if (irq_msk & (8 << (phy_no * 4))) handle_chl_int2_v3_hw(hisi_hba, phy_no); - if (irq_msk & (2 << (phy_no * 4)) && irq_value0) { - hisi_sas_phy_write32(hisi_hba, phy_no, - CHL_INT0, irq_value0 - & (~CHL_INT0_SL_RX_BCST_ACK_MSK) - & (~CHL_INT0_SL_PHY_ENABLE_MSK) - & (~CHL_INT0_NOT_RDY_MSK)); - } irq_msk &= ~(0xe << (phy_no * 4)); phy_no++; } @@ -1463,6 +1985,122 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p) return IRQ_HANDLED; } +static const struct hisi_sas_hw_error multi_bit_ecc_errors[] = { + { + .irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF), + .msk = HGC_DQE_ECC_MB_ADDR_MSK, + .shift = HGC_DQE_ECC_MB_ADDR_OFF, + .msg = "hgc_dqe_eccbad_intr", + .reg = HGC_DQE_ECC_ADDR, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF), + .msk = HGC_IOST_ECC_MB_ADDR_MSK, + .shift = HGC_IOST_ECC_MB_ADDR_OFF, + .msg = "hgc_iost_eccbad_intr", + .reg = HGC_IOST_ECC_ADDR, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF), + .msk = HGC_ITCT_ECC_MB_ADDR_MSK, + .shift = HGC_ITCT_ECC_MB_ADDR_OFF, + .msg = "hgc_itct_eccbad_intr", + .reg = HGC_ITCT_ECC_ADDR, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF), + .msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK, + .shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF, + .msg = "hgc_iostl_eccbad_intr", + .reg = HGC_LM_DFX_STATUS2, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF), + .msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK, + .shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF, + .msg = "hgc_itctl_eccbad_intr", + .reg = HGC_LM_DFX_STATUS2, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF), + .msk = HGC_CQE_ECC_MB_ADDR_MSK, + .shift = HGC_CQE_ECC_MB_ADDR_OFF, + .msg = "hgc_cqe_eccbad_intr", + .reg = HGC_CQE_ECC_ADDR, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF), + .msk = HGC_RXM_DFX_STATUS14_MEM0_MSK, + .shift = HGC_RXM_DFX_STATUS14_MEM0_OFF, + .msg = "rxm_mem0_eccbad_intr", + .reg = HGC_RXM_DFX_STATUS14, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF), + .msk = HGC_RXM_DFX_STATUS14_MEM1_MSK, + .shift = HGC_RXM_DFX_STATUS14_MEM1_OFF, + .msg = "rxm_mem1_eccbad_intr", + .reg = HGC_RXM_DFX_STATUS14, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF), + .msk = HGC_RXM_DFX_STATUS14_MEM2_MSK, + .shift = HGC_RXM_DFX_STATUS14_MEM2_OFF, + .msg = "rxm_mem2_eccbad_intr", + .reg = HGC_RXM_DFX_STATUS14, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF), + .msk = HGC_RXM_DFX_STATUS15_MEM3_MSK, + .shift = HGC_RXM_DFX_STATUS15_MEM3_OFF, + .msg = "rxm_mem3_eccbad_intr", + .reg = HGC_RXM_DFX_STATUS15, + }, + { + .irq_msk = BIT(SAS_ECC_INTR_OOO_RAM_ECC_MB_OFF), + .msk = AM_ROB_ECC_ERR_ADDR_MSK, + .shift = AM_ROB_ECC_ERR_ADDR_OFF, + .msg = "ooo_ram_eccbad_intr", + .reg = AM_ROB_ECC_ERR_ADDR, + }, +}; + +static void multi_bit_ecc_error_process_v3_hw(struct hisi_hba *hisi_hba, + u32 irq_value) +{ + struct device *dev = hisi_hba->dev; + const struct hisi_sas_hw_error *ecc_error; + u32 val; + int i; + + for (i = 0; i < ARRAY_SIZE(multi_bit_ecc_errors); i++) { + ecc_error = &multi_bit_ecc_errors[i]; + if (irq_value & ecc_error->irq_msk) { + val = hisi_sas_read32(hisi_hba, ecc_error->reg); + val &= ecc_error->msk; + val >>= ecc_error->shift; + dev_err(dev, "%s (0x%x) found: mem addr is 0x%08X\n", + ecc_error->msg, irq_value, val); + queue_work(hisi_hba->wq, &hisi_hba->rst_work); + } + } +} + +static void fatal_ecc_int_v3_hw(struct hisi_hba *hisi_hba) +{ + u32 irq_value, irq_msk; + + irq_msk = hisi_sas_read32(hisi_hba, SAS_ECC_INTR_MSK); + hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xffffffff); + + irq_value = hisi_sas_read32(hisi_hba, SAS_ECC_INTR); + if (irq_value) + multi_bit_ecc_error_process_v3_hw(hisi_hba, irq_value); + + hisi_sas_write32(hisi_hba, SAS_ECC_INTR, irq_value); + hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk); +} + static const struct hisi_sas_hw_error axi_error[] = { { .msk = BIT(0), .msg = "IOST_AXI_W_ERR" }, { .msk = BIT(1), .msg = "IOST_AXI_R_ERR" }, @@ -1515,6 +2153,23 @@ static const struct hisi_sas_hw_error fatal_axi_error[] = { .irq_msk = BIT(ENT_INT_SRC3_ABT_OFF), .msg = "SAS_HGC_ABT fetch LM list", }, + { + .irq_msk = BIT(ENT_INT_SRC3_DQE_POISON_OFF), + .msg = "read dqe poison", + }, + { + .irq_msk = BIT(ENT_INT_SRC3_IOST_POISON_OFF), + .msg = "read iost poison", + }, + { + .irq_msk = BIT(ENT_INT_SRC3_ITCT_POISON_OFF), + .msg = "read itct poison", + }, + { + .irq_msk = BIT(ENT_INT_SRC3_ITCT_NCQ_POISON_OFF), + .msg = "read itct ncq poison", + }, + }; static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p) @@ -1522,6 +2177,7 @@ static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p) u32 irq_value, irq_msk; struct hisi_hba *hisi_hba = p; struct device *dev = hisi_hba->dev; + struct pci_dev *pdev = hisi_hba->pci_dev; int i; irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3); @@ -1553,8 +2209,21 @@ static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p) error->msg, irq_value); queue_work(hisi_hba->wq, &hisi_hba->rst_work); } + + if (pdev->revision < PCIDEV_REVISION_1620_CS) { + u32 reg_val; + + reg_val = hisi_sas_read32(hisi_hba, + AXI_MASTER_CFG_BASE + + AM_CTRL_GLOBAL); + reg_val |= AM_CTRL_SHUTDOWN_REQ_MSK; + hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + + AM_CTRL_GLOBAL, reg_val); + } } + fatal_ecc_int_v3_hw(hisi_hba); + if (irq_value & BIT(ENT_INT_SRC3_ITC_INT_OFF)) { u32 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR); u32 dev_id = reg_val & ITCT_DEV_MSK; @@ -1572,6 +2241,69 @@ static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p) return IRQ_HANDLED; } +static void hisi_sas_set_sense_data(struct sas_task *task, + struct hisi_sas_slot *slot) +{ + struct ssp_response_iu *iu = + hisi_sas_status_buf_addr_mem(slot) + + sizeof(struct hisi_sas_err_record); + if ((iu->status == SAM_STAT_CHECK_CONDITION) && + (iu->datapres == SENSE_DATA)) { + struct task_status_struct *ts = &task->task_status; + + ts->buf_valid_size = + min_t(int, SAS_STATUS_BUF_SIZE, + be32_to_cpu(iu->sense_data_len)); + memcpy(ts->buf, iu->sense_data, ts->buf_valid_size); + ts->stat = SAM_STAT_CHECK_CONDITION; + } +} + +static bool is_ncq_err(struct hisi_sas_complete_v3_hdr *complete_hdr) +{ + u32 dw0, dw3; + + dw0 = le32_to_cpu(complete_hdr->dw0); + dw3 = le32_to_cpu(complete_hdr->dw3); + + return (dw0 & ERR_PHASE_RESPONSE_FRAME_REV_STAGE) && + (dw3 & FIS_TYPE_SDB) && + (dw3 & FIS_ATA_STATUS_ERR); +} + +static void hisi_sas_ata_device_link_abort(struct domain_device *device) +{ + struct ata_port *ap = device->sata_dev.ap; + struct ata_link *link = &ap->link; + unsigned long flags; + + spin_lock_irqsave(ap->lock, flags); + device->sata_dev.fis[2] = ATA_ERR | ATA_DRDY; /* tf status */ + device->sata_dev.fis[3] = ATA_ABORTED; /* tf error */ + + link->eh_info.err_mask |= AC_ERR_DEV; + link->eh_info.action |= ATA_EH_RESET; + ata_link_abort(link); + spin_unlock_irqrestore(ap->lock, flags); +} + +static void set_aborted_iptt(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot) +{ + u32 cfg_abt_set_query_iptt = hisi_sas_read32(hisi_hba, + CFG_ABT_SET_QUERY_IPTT); + cfg_abt_set_query_iptt &= ~CFG_SET_ABORTED_IPTT_MSK; + cfg_abt_set_query_iptt |= (1 << CFG_SET_ABORTED_EN_OFF) | + (slot->idx << CFG_SET_ABORTED_IPTT_OFF); + hisi_sas_write32(hisi_hba, CFG_ABT_SET_QUERY_IPTT, + cfg_abt_set_query_iptt); + cfg_abt_set_query_iptt &= ~(1 << CFG_SET_ABORTED_EN_OFF); + hisi_sas_write32(hisi_hba, CFG_ABT_SET_QUERY_IPTT, + cfg_abt_set_query_iptt); + hisi_sas_write32(hisi_hba, CFG_ABT_SET_IPTT_DONE, + 1 << CFG_ABT_SET_IPTT_DONE_OFF); +} + static void slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task, struct hisi_sas_slot *slot) @@ -1585,13 +2317,20 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task, hisi_sas_status_buf_addr_mem(slot); u32 dma_rx_err_type = record->dma_rx_err_type; u32 trans_tx_fail_type = record->trans_tx_fail_type; + u16 sipc_rx_err_type = le16_to_cpu(record->sipc_rx_err_type); + u32 dw0 = le32_to_cpu(complete_hdr->dw0); + u32 dw3 = le32_to_cpu(complete_hdr->dw3); switch (task->task_proto) { case SAS_PROTOCOL_SSP: if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) { ts->residual = trans_tx_fail_type; ts->stat = SAS_DATA_UNDERRUN; - } else if (complete_hdr->dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) { + if (!(dw0 & CMPLT_HDR_RSPNS_GOOD_MSK) && + (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK)) { + hisi_sas_set_sense_data(task, slot); + } + } else if (dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) { ts->stat = SAS_QUEUE_FULL; slot->abort = 1; } else { @@ -1602,17 +2341,30 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task, case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: - if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) { - ts->residual = trans_tx_fail_type; - ts->stat = SAS_DATA_UNDERRUN; - } else if (complete_hdr->dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) { + if ((dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) && + (sipc_rx_err_type & RX_FIS_STATUS_ERR_MSK)) { + if (task->ata_task.use_ncq) { + struct domain_device *device = task->dev; + struct hisi_sas_device *sas_dev = + device->lldd_dev; + sas_dev->dev_status = HISI_SAS_DEV_NCQ_ERR; + slot->abort = 1; + } else { + ts->stat = SAS_PROTO_RESPONSE; + } + } else if ((dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) || + (dw3 & SATA_DISK_IN_ERROR_STATUS)) { ts->stat = SAS_PHY_DOWN; slot->abort = 1; + } else if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) { + ts->residual = trans_tx_fail_type; + ts->stat = SAS_DATA_UNDERRUN; } else { ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; } - hisi_sas_sata_done(task, slot); + if (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) + hisi_sas_sata_done(task, slot); break; case SAS_PROTOCOL_SMP: ts->stat = SAM_STAT_CHECK_CONDITION; @@ -1622,6 +2374,29 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task, } } +static int ssp_need_spin_up(struct hisi_sas_slot *slot) +{ + bool rc; + int sb_len; + u8 *sense_buffer; + struct scsi_sense_hdr sshdr; + struct ssp_response_iu *iu = + hisi_sas_status_buf_addr_mem(slot) + + sizeof(struct hisi_sas_err_record); + sb_len = iu->sense_data_len; + sense_buffer = iu->sense_data; + rc = scsi_normalize_sense(sense_buffer, sb_len, &sshdr); + + /* + * if the SAS disk response with ASC=04h, + * ASCQ=11h, host should send NOTIFY primitive. + */ + if (rc && sshdr.asc == 0x4 && sshdr.ascq == 0x11) + return 1; + + return 0; +} + static int slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) { @@ -1638,6 +2413,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) &complete_queue[slot->cmplt_queue_slot]; unsigned long flags; bool is_internal = slot->is_internal; + u32 dw0, dw1, dw3; if (unlikely(!task || !task->lldd_task || !task->dev)) return -EINVAL; @@ -1661,10 +2437,14 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) goto out; } + dw0 = le32_to_cpu(complete_hdr->dw0); + dw1 = le32_to_cpu(complete_hdr->dw1); + dw3 = le32_to_cpu(complete_hdr->dw3); + /* * Use SAS+TMF status codes */ - switch ((complete_hdr->dw0 & CMPLT_HDR_ABORT_STAT_MSK) + switch ((dw0 & CMPLT_HDR_ABORT_STAT_MSK) >> CMPLT_HDR_ABORT_STAT_OFF) { case STAT_IO_ABORTED: /* this IO has been aborted by abort command */ @@ -1687,22 +2467,69 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) break; } - /* check for erroneous completion */ - if ((complete_hdr->dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) { + /* check for erroneous completion, 0x3 means abnormal */ + if ((dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) { u32 *error_info = hisi_sas_status_buf_addr_mem(slot); + u32 device_id = (dw1 & 0xffff0000) >> 16; + struct hisi_sas_itct *itct = &hisi_hba->itct[device_id]; + set_aborted_iptt(hisi_hba, slot); slot_err_v3_hw(hisi_hba, task, slot); - if (ts->stat != SAS_DATA_UNDERRUN) - dev_info(dev, "erroneous completion iptt=%d task=%p dev id=%d " - "CQ hdr: 0x%x 0x%x 0x%x 0x%x " - "Error info: 0x%x 0x%x 0x%x 0x%x\n", - slot->idx, task, sas_dev->device_id, - complete_hdr->dw0, complete_hdr->dw1, - complete_hdr->act, complete_hdr->dw3, - error_info[0], error_info[1], - error_info[2], error_info[3]); - if (unlikely(slot->abort)) + dev_info(dev, "erroneous completion iptt=%d task=%pK dev id=%d sas_addr=0x%llx " + "CQ hdr: 0x%x 0x%x 0x%x 0x%x " + "Error info: 0x%x 0x%x 0x%x 0x%x\n", + slot->idx, task, sas_dev->device_id, + itct->sas_addr, + dw0, dw1, + complete_hdr->act, dw3, + error_info[0], error_info[1], + error_info[2], error_info[3]); + + if ((dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) && + (task->task_proto & SAS_PROTOCOL_SATA || + task->task_proto & SAS_PROTOCOL_STP)) { + struct hisi_sas_status_buffer *status_buf = + hisi_sas_status_buf_addr_mem(slot); + u8 *iu = &status_buf->iu[0]; + struct dev_to_host_fis *d2h = + (struct dev_to_host_fis *)iu; + + dev_info(dev, "sata d2h status 0x%02x, error 0x%02x\n", + d2h->status, d2h->error); + } + + if ((error_info[3] & RX_DATA_LEN_UNDERFLOW_MSK) && + (task->task_proto == SAS_PROTOCOL_SSP)) { + /*print detail sense info when data underflow happened*/ + bool rc; + int sb_len; + u8 *sense_buffer; + struct scsi_sense_hdr sshdr; + struct ssp_response_iu *iu = + hisi_sas_status_buf_addr_mem(slot) + + sizeof(struct hisi_sas_err_record); + + sb_len = iu->sense_data_len; + sense_buffer = iu->sense_data; + rc = scsi_normalize_sense(sense_buffer, sb_len, &sshdr); + if (rc) + dev_info(dev, "data underflow, rsp_code:0x%x, sensekey:0x%x, ASC:0x%x, ASCQ:0x%x.\n", + sshdr.response_code, + sshdr.sense_key, + sshdr.asc, + sshdr.ascq); + else + dev_info(dev, "data underflow without sense, rsp_code:0x%02x.\n", + iu->resp_data[0]); + } + if (unlikely(slot->abort)) { + if (dev_is_sata(device) && task->ata_task.use_ncq) + hisi_sas_ata_device_link_abort(device); + else + sas_task_abort(task); + return ts->stat; + } goto out; } @@ -1713,6 +2540,17 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) sizeof(struct hisi_sas_err_record); sas_ssp_task_response(dev, task, iu); + if ((!(device->parent && + DEV_IS_EXPANDER(device->parent->dev_type))) && + ssp_need_spin_up(slot)) { + int phy_no; + + dev_info(dev, "disk not ready ,need send NOTIFY primitive, phy_id:%d\n", + device->phy->identify.phy_identifier); + phy_no = device->phy->identify.phy_identifier; + hisi_hba->phy[phy_no].need_notify = 1; + queue_work(hisi_hba->wq, &hisi_hba->notify_work); + } break; } case SAS_PROTOCOL_SMP: { @@ -1737,7 +2575,8 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) case SAS_PROTOCOL_STP: case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: ts->stat = SAM_STAT_GOOD; - hisi_sas_sata_done(task, slot); + if (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) + hisi_sas_sata_done(task, slot); break; default: ts->stat = SAM_STAT_CHECK_CONDITION; @@ -1751,22 +2590,22 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) } out: - hisi_sas_slot_task_free(hisi_hba, task, slot); sts = ts->stat; spin_lock_irqsave(&task->task_state_lock, flags); if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { spin_unlock_irqrestore(&task->task_state_lock, flags); - dev_info(dev, "slot complete: task(%p) aborted\n", task); + dev_info(dev, "slot complete: task(%pK) aborted\n", task); return SAS_ABORTED_TASK; } task->task_state_flags |= SAS_TASK_STATE_DONE; spin_unlock_irqrestore(&task->task_state_lock, flags); + hisi_sas_slot_task_free(hisi_hba, task, slot, true); if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) { spin_lock_irqsave(&device->done_lock, flags); if (test_bit(SAS_HA_FROZEN, &ha->state)) { spin_unlock_irqrestore(&device->done_lock, flags); - dev_info(dev, "slot complete: task(%p) ignored\n ", + dev_info(dev, "slot complete: task(%pK) ignored\n ", task); return sts; } @@ -1779,6 +2618,31 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) return sts; } +static void hisi_sas_disk_err_handler(struct hisi_hba *hisi_hba, + struct hisi_sas_complete_v3_hdr *complete_hdr) +{ + u32 dw0 = le32_to_cpu(complete_hdr->dw0); + u32 dw1 = le32_to_cpu(complete_hdr->dw1); + u32 dw3 = le32_to_cpu(complete_hdr->dw3); + int device_id = (dw1 & CMPLT_HDR_DEV_ID_MSK) >> + CMPLT_HDR_DEV_ID_OFF; + struct hisi_sas_itct *itct = + &hisi_hba->itct[device_id]; + struct hisi_sas_device *sas_dev = + &hisi_hba->devices[device_id]; + struct domain_device *device = sas_dev->sas_device; + struct device *dev = hisi_hba->dev; + + dev_err(dev, "erroneous completion disk err dev id=%d sas_addr=0x%llx CQ hdr: 0x%x 0x%x 0x%x 0x%x\n", + device_id, itct->sas_addr, dw0, dw1, + complete_hdr->act, dw3); + + if (is_ncq_err(complete_hdr)) + sas_dev->dev_status = HISI_SAS_DEV_NCQ_ERR; + + hisi_sas_ata_device_link_abort(device); +} + static void cq_tasklet_v3_hw(unsigned long val) { struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val; @@ -1791,17 +2655,24 @@ static void cq_tasklet_v3_hw(unsigned long val) complete_queue = hisi_hba->complete_hdr[queue]; wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR + - (0x14 * queue)); + (NEXT_DQCQ_REG_OFF * queue)); while (rd_point != wr_point) { struct hisi_sas_complete_v3_hdr *complete_hdr; struct device *dev = hisi_hba->dev; + u32 dw0, dw1, dw3; int iptt; complete_hdr = &complete_queue[rd_point]; - - iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK; - if (likely(iptt < HISI_SAS_COMMAND_ENTRIES_V3_HW)) { + dw0 = le32_to_cpu(complete_hdr->dw0); + dw1 = le32_to_cpu(complete_hdr->dw1); + dw3 = le32_to_cpu(complete_hdr->dw3); + iptt = dw1 & CMPLT_HDR_IPTT_MSK; + + if (unlikely((dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) && + (dw3 & CMPLT_HDR_SATA_DISK_ERR_MSK)) { + hisi_sas_disk_err_handler(hisi_hba, complete_hdr); + } else if (likely(iptt < HISI_SAS_COMMAND_ENTRIES_V3_HW)) { slot = &hisi_hba->slot_info[iptt]; slot->cmplt_queue_slot = rd_point; slot->cmplt_queue = queue; @@ -1815,7 +2686,9 @@ static void cq_tasklet_v3_hw(unsigned long val) /* update rd_point */ cq->rd_point = rd_point; - hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); + hisi_sas_write32(hisi_hba, + COMPL_Q_0_RD_PTR + (NEXT_DQCQ_REG_OFF * queue), + rd_point); } static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p) @@ -1838,15 +2711,39 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) int vectors, rc; int i, k; int max_msi = HISI_SAS_MSI_COUNT_V3_HW; + int max_dq_num, online_numa_num; + struct irq_affinity desc = { + .pre_vectors = HISI_SAS_CQ_INT_BASE_VECTORS_V3_HW, + }; + + if (user_ctl_irq) { + vectors = pci_alloc_irq_vectors(hisi_hba->pci_dev, 1, + max_msi, PCI_IRQ_MSI); + } else { + vectors = pci_alloc_irq_vectors_affinity(hisi_hba->pci_dev, + HISI_SAS_MIN_VECTORS_V3_HW, + max_msi, + PCI_IRQ_MSI | + PCI_IRQ_AFFINITY, + &desc); + } - vectors = pci_alloc_irq_vectors(hisi_hba->pci_dev, 1, - max_msi, PCI_IRQ_MSI); - if (vectors < max_msi) { - dev_err(dev, "could not allocate all msi (%d)\n", vectors); + if (vectors < HISI_SAS_MIN_VECTORS_V3_HW) { + dev_err(dev, "allocate msi (%d) not enough\n", vectors); return -ENOENT; } - rc = devm_request_irq(dev, pci_irq_vector(pdev, 1), + hisi_hba->nvecs = vectors - HISI_SAS_CQ_INT_BASE_VECTORS_V3_HW; + max_dq_num = (hisi_hba->nvecs < hisi_hba->queue_count) ? + hisi_hba->nvecs : hisi_hba->queue_count; + online_numa_num = num_online_nodes(); + dev_info(dev, "vectors nvecs:%d, online_numa:%d\n", + hisi_hba->nvecs, online_numa_num); + if (max_dq_num > online_numa_num) + hisi_hba->dq_num_per_node = max_dq_num / online_numa_num; + else + hisi_hba->dq_num_per_node = 1; + rc = devm_request_irq(dev, pci_irq_vector(pdev, PCI_IRQ_PHY), int_phy_up_down_bcast_v3_hw, 0, DRV_NAME " phy", hisi_hba); if (rc) { @@ -1855,7 +2752,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) goto free_irq_vectors; } - rc = devm_request_irq(dev, pci_irq_vector(pdev, 2), + rc = devm_request_irq(dev, pci_irq_vector(pdev, PCI_IRQ_CHANNEL), int_chnl_int_v3_hw, 0, DRV_NAME " channel", hisi_hba); if (rc) { @@ -1864,7 +2761,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) goto free_phy_irq; } - rc = devm_request_irq(dev, pci_irq_vector(pdev, 11), + rc = devm_request_irq(dev, pci_irq_vector(pdev, PCI_IRQ_AXI_FATAL), fatal_axi_int_v3_hw, 0, DRV_NAME " fatal", hisi_hba); if (rc) { @@ -1874,12 +2771,15 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) } /* Init tasklets for cq only */ - for (i = 0; i < hisi_hba->queue_count; i++) { + for (i = 0; i < hisi_hba->nvecs; i++) { struct hisi_sas_cq *cq = &hisi_hba->cq[i]; struct tasklet_struct *t = &cq->tasklet; + int nr = hisi_sas_intr_conv ? PCI_IRQ_CQ_BASE : + PCI_IRQ_CQ_BASE + i; + unsigned long irqflags = hisi_sas_intr_conv ? IRQF_SHARED : 0; - rc = devm_request_irq(dev, pci_irq_vector(pdev, i+16), - cq_interrupt_v3_hw, 0, + rc = devm_request_irq(dev, pci_irq_vector(pdev, nr), + cq_interrupt_v3_hw, irqflags, DRV_NAME " cq", cq); if (rc) { dev_err(dev, @@ -1889,7 +2789,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) goto free_cq_irqs; } - tasklet_init(t, cq_tasklet_v3_hw, (unsigned long)cq); + tasklet_init(t, cq_tasklet_v3_hw, (uintptr_t)cq); } return 0; @@ -1897,14 +2797,16 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) free_cq_irqs: for (k = 0; k < i; k++) { struct hisi_sas_cq *cq = &hisi_hba->cq[k]; + int nr = hisi_sas_intr_conv ? PCI_IRQ_CQ_BASE : + PCI_IRQ_CQ_BASE + k; - free_irq(pci_irq_vector(pdev, k+16), cq); + free_irq(pci_irq_vector(pdev, nr), cq); } - free_irq(pci_irq_vector(pdev, 11), hisi_hba); + free_irq(pci_irq_vector(pdev, PCI_IRQ_AXI_FATAL), hisi_hba); free_chnl_interrupt: - free_irq(pci_irq_vector(pdev, 2), hisi_hba); + free_irq(pci_irq_vector(pdev, PCI_IRQ_CHANNEL), hisi_hba); free_phy_irq: - free_irq(pci_irq_vector(pdev, 1), hisi_hba); + free_irq(pci_irq_vector(pdev, PCI_IRQ_PHY), hisi_hba); free_irq_vectors: pci_free_irq_vectors(pdev); return rc; @@ -1929,6 +2831,7 @@ static void phy_set_linkrate_v3_hw(struct hisi_hba *hisi_hba, int phy_no, struct sas_phy_linkrates *r) { enum sas_linkrate max = r->maximum_linkrate; + /* init OOB link rate as 1.5 Gbits */ u32 prog_phy_link_rate = 0x800; prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max); @@ -1941,12 +2844,12 @@ static void interrupt_disable_v3_hw(struct hisi_hba *hisi_hba) struct pci_dev *pdev = hisi_hba->pci_dev; int i; - synchronize_irq(pci_irq_vector(pdev, 1)); - synchronize_irq(pci_irq_vector(pdev, 2)); - synchronize_irq(pci_irq_vector(pdev, 11)); - for (i = 0; i < hisi_hba->queue_count; i++) { + synchronize_irq(pci_irq_vector(pdev, PCI_IRQ_PHY)); + synchronize_irq(pci_irq_vector(pdev, PCI_IRQ_CHANNEL)); + synchronize_irq(pci_irq_vector(pdev, PCI_IRQ_AXI_FATAL)); + for (i = 0; i < hisi_hba->nvecs; i++) { hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0x1); - synchronize_irq(pci_irq_vector(pdev, i + 16)); + synchronize_irq(pci_irq_vector(pdev, i + PCI_IRQ_CQ_BASE)); } hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xffffffff); @@ -1973,8 +2876,11 @@ static void phy_get_events_v3_hw(struct hisi_hba *hisi_hba, int phy_no) struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct asd_sas_phy *sas_phy = &phy->sas_phy; struct sas_phy *sphy = sas_phy->phy; + unsigned long flags; u32 reg_value; + spin_lock_irqsave(&phy->lock, flags); + /* loss dword sync */ reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DWS_LOST); sphy->loss_of_dword_sync_count += reg_value; @@ -1991,6 +2897,11 @@ static void phy_get_events_v3_hw(struct hisi_hba *hisi_hba, int phy_no) reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DISP_ERR); sphy->running_disparity_error_count += reg_value; + /* code violation error */ + reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_CODE_ERR); + phy->code_error_count += reg_value; + + spin_unlock_irqrestore(&phy->lock, flags); } static int disable_host_v3_hw(struct hisi_hba *hisi_hba) @@ -2005,6 +2916,7 @@ static int disable_host_v3_hw(struct hisi_hba *hisi_hba) hisi_sas_stop_phys(hisi_hba); + /* Delay 10ms after stop phys to meet hw need */ mdelay(10); reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE + @@ -2013,7 +2925,7 @@ static int disable_host_v3_hw(struct hisi_hba *hisi_hba) hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + AM_CTRL_GLOBAL, reg_val); - /* wait until bus idle */ + /* wait until bus idle, delay:10us, timeout:100us */ rc = hisi_sas_read32_poll_timeout(AXI_MASTER_CFG_BASE + AM_CURR_TRANS_RETURN, status, status == 0x3, 10, 100); @@ -2050,6 +2962,10 @@ static int write_gpio_v3_hw(struct hisi_hba *hisi_hba, u8 reg_type, switch (reg_type) { case SAS_GPIO_REG_TX: + /* + * One register of GPIO can control 4 phys + * Add extra 3 bytes to avoid Numerical loss after divided by 4 + */ if ((reg_index + reg_count) > ((hisi_hba->n_phy + 3) / 4)) { dev_err(dev, "write gpio: invalid reg range[%d, %d]\n", reg_index, reg_index + reg_count - 1); @@ -2070,7 +2986,7 @@ static int write_gpio_v3_hw(struct hisi_hba *hisi_hba, u8 reg_type, return 0; } -static void wait_cmds_complete_timeout_v3_hw(struct hisi_hba *hisi_hba, +static int wait_cmds_complete_timeout_v3_hw(struct hisi_hba *hisi_hba, int delay_ms, int timeout_ms) { struct device *dev = hisi_hba->dev; @@ -2085,9 +3001,403 @@ static void wait_cmds_complete_timeout_v3_hw(struct hisi_hba *hisi_hba, msleep(delay_ms); } + if (time >= timeout_ms) + return -ETIMEDOUT; + dev_dbg(dev, "wait commands complete %dms\n", time); + + return 0; +} + +static ssize_t intr_conv_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%u\n", hisi_sas_intr_conv); +} +static DEVICE_ATTR_RO(intr_conv); + +static void config_intr_coal_v3_hw(struct hisi_hba *hisi_hba) +{ + /* config those registers between enable and disable PHYs */ + hisi_sas_stop_phys(hisi_hba); + if (hisi_hba->intr_coal_ticks == 0 || + hisi_hba->intr_coal_count == 0) { + hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1); + hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1); + hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1); + } else { /* intr coal */ + hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x3); + hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, + hisi_hba->intr_coal_ticks); + hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, + hisi_hba->intr_coal_count); + } + phys_init_v3_hw(hisi_hba); +} + +static ssize_t intr_coal_ticks_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct hisi_hba *hisi_hba = shost_priv(shost); + + return scnprintf(buf, PAGE_SIZE, "%u\n", + hisi_hba->intr_coal_ticks); +} + +static ssize_t intr_coal_ticks_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + u32 intr_coal_ticks; + struct Scsi_Host *shost = class_to_shost(dev); + struct hisi_hba *hisi_hba = shost_priv(shost); + int ret; + + ret = kstrtou32(buf, 10, &intr_coal_ticks); + if (ret) { + dev_err(dev, "Input data of interrupt coalesce unmatch\n"); + return -EINVAL; + } + + if (intr_coal_ticks >= BIT(24)) { + dev_err(dev, "intr_coal_ticks must be less than 2^24!\n"); + return -EINVAL; + } + + hisi_hba->intr_coal_ticks = intr_coal_ticks; + + config_intr_coal_v3_hw(hisi_hba); + + return count; +} +static DEVICE_ATTR_RW(intr_coal_ticks); + +static ssize_t intr_coal_count_show(struct device *dev, + struct device_attribute + *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct hisi_hba *hisi_hba = shost_priv(shost); + + return scnprintf(buf, PAGE_SIZE, "%u\n", + hisi_hba->intr_coal_count); +} + +static ssize_t intr_coal_count_store(struct device *dev, + struct device_attribute + *attr, const char *buf, size_t count) +{ + u32 intr_coal_count; + struct Scsi_Host *shost = class_to_shost(dev); + struct hisi_hba *hisi_hba = shost_priv(shost); + int ret; + + ret = kstrtou32(buf, 10, &intr_coal_count); + if (ret) { + dev_err(dev, "Input data of interrupt coalesce unmatch\n"); + return -EINVAL; + } + + if (intr_coal_count >= BIT(8)) { + dev_err(dev, "intr_coal_count must be less than 2^8!\n"); + return -EINVAL; + } + + hisi_hba->intr_coal_count = intr_coal_count; + + config_intr_coal_v3_hw(hisi_hba); + + return count; +} +static DEVICE_ATTR_RW(intr_coal_count); + +static const struct hisi_sas_debugfs_reg_lu debugfs_port_reg_lu[] = { + HISI_SAS_DEBUGFS_REG(PHY_CFG), + HISI_SAS_DEBUGFS_REG(HARD_PHY_LINKRATE), + HISI_SAS_DEBUGFS_REG(PROG_PHY_LINK_RATE), + HISI_SAS_DEBUGFS_REG(PHY_CTRL), + HISI_SAS_DEBUGFS_REG(SL_CFG), + HISI_SAS_DEBUGFS_REG(AIP_LIMIT), + HISI_SAS_DEBUGFS_REG(SL_CONTROL), + HISI_SAS_DEBUGFS_REG(RX_PRIMS_STATUS), + HISI_SAS_DEBUGFS_REG(TX_ID_DWORD0), + HISI_SAS_DEBUGFS_REG(TX_ID_DWORD1), + HISI_SAS_DEBUGFS_REG(TX_ID_DWORD2), + HISI_SAS_DEBUGFS_REG(TX_ID_DWORD3), + HISI_SAS_DEBUGFS_REG(TX_ID_DWORD4), + HISI_SAS_DEBUGFS_REG(TX_ID_DWORD5), + HISI_SAS_DEBUGFS_REG(TX_ID_DWORD6), + HISI_SAS_DEBUGFS_REG(TXID_AUTO), + HISI_SAS_DEBUGFS_REG(RX_IDAF_DWORD0), + HISI_SAS_DEBUGFS_REG(RXOP_CHECK_CFG_H), + HISI_SAS_DEBUGFS_REG(STP_LINK_TIMER), + HISI_SAS_DEBUGFS_REG(STP_LINK_TIMEOUT_STATE), + HISI_SAS_DEBUGFS_REG(CON_CFG_DRIVER), + HISI_SAS_DEBUGFS_REG(SAS_SSP_CON_TIMER_CFG), + HISI_SAS_DEBUGFS_REG(SAS_SMP_CON_TIMER_CFG), + HISI_SAS_DEBUGFS_REG(SAS_STP_CON_TIMER_CFG), + HISI_SAS_DEBUGFS_REG(CHL_INT0), + HISI_SAS_DEBUGFS_REG(CHL_INT1), + HISI_SAS_DEBUGFS_REG(CHL_INT2), + HISI_SAS_DEBUGFS_REG(CHL_INT0_MSK), + HISI_SAS_DEBUGFS_REG(CHL_INT1_MSK), + HISI_SAS_DEBUGFS_REG(CHL_INT2_MSK), + HISI_SAS_DEBUGFS_REG(SAS_EC_INT_COAL_TIME), + HISI_SAS_DEBUGFS_REG(CHL_INT_COAL_EN), + HISI_SAS_DEBUGFS_REG(SAS_RX_TRAIN_TIMER), + HISI_SAS_DEBUGFS_REG(PHY_CTRL_RDY_MSK), + HISI_SAS_DEBUGFS_REG(PHYCTRL_NOT_RDY_MSK), + HISI_SAS_DEBUGFS_REG(PHYCTRL_DWS_RESET_MSK), + HISI_SAS_DEBUGFS_REG(PHYCTRL_PHY_ENA_MSK), + HISI_SAS_DEBUGFS_REG(SL_RX_BCAST_CHK_MSK), + HISI_SAS_DEBUGFS_REG(PHYCTRL_OOB_RESTART_MSK), + HISI_SAS_DEBUGFS_REG(DMA_TX_STATUS), + HISI_SAS_DEBUGFS_REG(DMA_RX_STATUS), + HISI_SAS_DEBUGFS_REG(COARSETUNE_TIME), + HISI_SAS_DEBUGFS_REG(ERR_CNT_DWS_LOST), + HISI_SAS_DEBUGFS_REG(ERR_CNT_RESET_PROB), + HISI_SAS_DEBUGFS_REG(ERR_CNT_INVLD_DW), + HISI_SAS_DEBUGFS_REG(ERR_CNT_CODE_ERR), + HISI_SAS_DEBUGFS_REG(ERR_CNT_DISP_ERR), + {} +}; + +static const struct hisi_sas_debugfs_reg debugfs_port_reg = { + .lu = debugfs_port_reg_lu, + .count = 0x100, /* number of port regs */ + .base_off = PORT_BASE, + .read_port_reg = hisi_sas_phy_read32, +}; + +static const struct hisi_sas_debugfs_reg_lu debugfs_global_reg_lu[] = { + HISI_SAS_DEBUGFS_REG(DLVRY_QUEUE_ENABLE), + HISI_SAS_DEBUGFS_REG(IOST_BASE_ADDR_LO), + HISI_SAS_DEBUGFS_REG(IOST_BASE_ADDR_HI), + HISI_SAS_DEBUGFS_REG(ITCT_BASE_ADDR_LO), + HISI_SAS_DEBUGFS_REG(ITCT_BASE_ADDR_HI), + HISI_SAS_DEBUGFS_REG(IO_BROKEN_MSG_ADDR_LO), + HISI_SAS_DEBUGFS_REG(IO_BROKEN_MSG_ADDR_HI), + HISI_SAS_DEBUGFS_REG(PHY_CONTEXT), + HISI_SAS_DEBUGFS_REG(PHY_STATE), + HISI_SAS_DEBUGFS_REG(PHY_PORT_NUM_MA), + HISI_SAS_DEBUGFS_REG(PHY_CONN_RATE), + HISI_SAS_DEBUGFS_REG(ITCT_CLR), + HISI_SAS_DEBUGFS_REG(IO_SATA_BROKEN_MSG_ADDR_LO), + HISI_SAS_DEBUGFS_REG(IO_SATA_BROKEN_MSG_ADDR_HI), + HISI_SAS_DEBUGFS_REG(SATA_INITI_D2H_STORE_ADDR_LO), + HISI_SAS_DEBUGFS_REG(SATA_INITI_D2H_STORE_ADDR_HI), + HISI_SAS_DEBUGFS_REG(CFG_MAX_TAG), + HISI_SAS_DEBUGFS_REG(TRANS_LOCK_ICT_TIME), + HISI_SAS_DEBUGFS_REG(HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL), + HISI_SAS_DEBUGFS_REG(HGC_SAS_TXFAIL_RETRY_CTRL), + HISI_SAS_DEBUGFS_REG(HGC_GET_ITV_TIME), + HISI_SAS_DEBUGFS_REG(DEVICE_MSG_WORK_MODE), + HISI_SAS_DEBUGFS_REG(OPENA_WT_CONTI_TIME), + HISI_SAS_DEBUGFS_REG(I_T_NEXUS_LOSS_TIME), + HISI_SAS_DEBUGFS_REG(MAX_CON_TIME_LIMIT_TIME), + HISI_SAS_DEBUGFS_REG(BUS_INACTIVE_LIMIT_TIME), + HISI_SAS_DEBUGFS_REG(REJECT_TO_OPEN_LIMIT_TIME), + HISI_SAS_DEBUGFS_REG(CQ_INT_CONVERGE_EN), + HISI_SAS_DEBUGFS_REG(CFG_AGING_TIME), + HISI_SAS_DEBUGFS_REG(HGC_DFX_CFG2), + HISI_SAS_DEBUGFS_REG(CFG_ABT_SET_QUERY_IPTT), + HISI_SAS_DEBUGFS_REG(CFG_ABT_SET_IPTT_DONE), + HISI_SAS_DEBUGFS_REG(HGC_IOMB_PROC1_STATUS), + HISI_SAS_DEBUGFS_REG(CHNL_INT_STATUS), + HISI_SAS_DEBUGFS_REG(HGC_AXI_FIFO_ERR_INFO), + HISI_SAS_DEBUGFS_REG(INT_COAL_EN), + HISI_SAS_DEBUGFS_REG(OQ_INT_COAL_TIME), + HISI_SAS_DEBUGFS_REG(OQ_INT_COAL_CNT), + HISI_SAS_DEBUGFS_REG(ENT_INT_COAL_TIME), + HISI_SAS_DEBUGFS_REG(ENT_INT_COAL_CNT), + HISI_SAS_DEBUGFS_REG(OQ_INT_SRC), + HISI_SAS_DEBUGFS_REG(OQ_INT_SRC_MSK), + HISI_SAS_DEBUGFS_REG(ENT_INT_SRC1), + HISI_SAS_DEBUGFS_REG(ENT_INT_SRC2), + HISI_SAS_DEBUGFS_REG(ENT_INT_SRC3), + HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK1), + HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK2), + HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK3), + HISI_SAS_DEBUGFS_REG(CHNL_PHYUPDOWN_INT_MSK), + HISI_SAS_DEBUGFS_REG(CHNL_ENT_INT_MSK), + HISI_SAS_DEBUGFS_REG(HGC_COM_INT_MSK), + HISI_SAS_DEBUGFS_REG(SAS_ECC_INTR), + HISI_SAS_DEBUGFS_REG(SAS_ECC_INTR_MSK), + HISI_SAS_DEBUGFS_REG(HGC_ERR_STAT_EN), + HISI_SAS_DEBUGFS_REG(CQE_SEND_CNT), + HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_BASE_ADDR_LO), + HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_BASE_ADDR_HI), + HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_DEPTH), + HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_WR_PTR), + HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_RD_PTR), + HISI_SAS_DEBUGFS_REG(HYPER_STREAM_ID_EN_CFG), + HISI_SAS_DEBUGFS_REG(OQ0_INT_SRC_MSK), + HISI_SAS_DEBUGFS_REG(COMPL_Q_0_BASE_ADDR_LO), + HISI_SAS_DEBUGFS_REG(COMPL_Q_0_BASE_ADDR_HI), + HISI_SAS_DEBUGFS_REG(COMPL_Q_0_DEPTH), + HISI_SAS_DEBUGFS_REG(COMPL_Q_0_WR_PTR), + HISI_SAS_DEBUGFS_REG(COMPL_Q_0_RD_PTR), + HISI_SAS_DEBUGFS_REG(AWQOS_AWCACHE_CFG), + HISI_SAS_DEBUGFS_REG(ARQOS_ARCACHE_CFG), + HISI_SAS_DEBUGFS_REG(HILINK_ERR_DFX), + HISI_SAS_DEBUGFS_REG(SAS_GPIO_CFG_0), + HISI_SAS_DEBUGFS_REG(SAS_GPIO_CFG_1), + HISI_SAS_DEBUGFS_REG(SAS_GPIO_TX_0_1), + HISI_SAS_DEBUGFS_REG(SAS_CFG_DRIVE_VLD), + {} +}; + +static const struct hisi_sas_debugfs_reg debugfs_global_reg = { + .lu = debugfs_global_reg_lu, + .count = 0x800, /* number of global regs */ + .read_global_reg = hisi_sas_read32, +}; + +static void debugfs_snapshot_prepare_v3_hw(struct hisi_hba *hisi_hba) +{ + struct device *dev = hisi_hba->dev; + + set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); + + hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0); + + /* delay:100ms, timeout:5s */ + if (wait_cmds_complete_timeout_v3_hw(hisi_hba, 100, 5000) == -ETIMEDOUT) + dev_dbg(dev, "Wait commands complete timeout!\n"); + + hisi_sas_kill_tasklets(hisi_hba); +} + +static void debugfs_snapshot_restore_v3_hw(struct hisi_hba *hisi_hba) +{ + hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, + (u32)((1ULL << hisi_hba->queue_count) - 1)); + + clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); } +static void hisi_sas_bist_test_prep_v3_hw(struct hisi_hba *hisi_hba) +{ + u32 reg_val; + int phy_id = hisi_hba->bist_loopback_phy_id; + + /* disable PHY */ + hisi_sas_phy_enable(hisi_hba, phy_id, 0); + + /* disable ALOS */ + reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, SERDES_CFG); + reg_val |= CFG_ALOS_CHK_DISABLE_MSK; + hisi_sas_phy_write32(hisi_hba, phy_id, SERDES_CFG, reg_val); +} + +static void hisi_sas_bist_test_restore_v3_hw(struct hisi_hba *hisi_hba) +{ + u32 reg_val; + int phy_id = hisi_hba->bist_loopback_phy_id; + + /* disable loopback */ + reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, SAS_PHY_BIST_CTRL); + reg_val &= ~(CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK | + CFG_BIST_TEST_MSK); + hisi_sas_phy_write32(hisi_hba, phy_id, SAS_PHY_BIST_CTRL, reg_val); + + /* enable ALOS*/ + reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, SERDES_CFG); + reg_val &= ~CFG_ALOS_CHK_DISABLE_MSK; + hisi_sas_phy_write32(hisi_hba, phy_id, SERDES_CFG, reg_val); + + /*restore the linkrate*/ + reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, PROG_PHY_LINK_RATE); + /* init OOB link rate as 1.5 Gbits */ + reg_val &= ~CFG_PROG_PHY_LINK_RATE_MSK; + reg_val |= (0x800 << CFG_PROG_PHY_LINK_RATE_OFF); + hisi_sas_phy_write32(hisi_hba, phy_id, + PROG_PHY_LINK_RATE, reg_val); + + /* enable PHY */ + hisi_sas_phy_enable(hisi_hba, phy_id, 1); +} + +static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable) +{ + u32 reg_val, mode_tmp; + u32 linkrate = hisi_hba->bist_loopback_linkrate; + u32 phy_id = hisi_hba->bist_loopback_phy_id; + u32 code_mode = hisi_hba->bist_loopback_code_mode; + u32 path_mode = hisi_hba->bist_loopback_mode; + + pr_err("linkrate=%d phy_id=%d code_mode=%d path_mode=%d\n", linkrate, + phy_id, code_mode, path_mode); + mode_tmp = path_mode ? 2 : 1; + if (enable) { + /* some preparations before bist test */ + hisi_sas_bist_test_prep_v3_hw(hisi_hba); + + /* set linkrate of bit test*/ + reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, + PROG_PHY_LINK_RATE); + reg_val &= ~CFG_PROG_PHY_LINK_RATE_MSK; + reg_val |= (linkrate << CFG_PROG_PHY_LINK_RATE_OFF); + hisi_sas_phy_write32(hisi_hba, phy_id, + PROG_PHY_LINK_RATE, reg_val); + + /* set code mode of bit test */ + reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, + SAS_PHY_BIST_CTRL); + reg_val &= ~(CFG_BIST_MODE_SEL_MSK | + CFG_LOOP_TEST_MODE_MSK | + CFG_RX_BIST_EN_MSK | + CFG_TX_BIST_EN_MSK | + CFG_BIST_TEST_MSK); + reg_val |= ((code_mode << CFG_BIST_MODE_SEL_OFF) | + (mode_tmp << CFG_LOOP_TEST_MODE_OFF) | + CFG_BIST_TEST_MSK); + hisi_sas_phy_write32(hisi_hba, phy_id, + SAS_PHY_BIST_CTRL, reg_val); + + /*set the bist init data*/ + hisi_sas_phy_write32(hisi_hba, phy_id, + SAS_PHY_BIST_CODE, 0x1); + hisi_sas_phy_write32(hisi_hba, phy_id, + SAS_PHY_BIST_CODE1, 0x80); + + mdelay(100); + reg_val |= (CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK); + hisi_sas_phy_write32(hisi_hba, phy_id, + SAS_PHY_BIST_CTRL, reg_val); + + /* clear error bit */ + mdelay(100); + hisi_sas_phy_read32(hisi_hba, phy_id, SAS_BIST_ERR_CNT); + } else { + /* disable bist test and recover it */ + hisi_hba->bist_loopback_cnt += hisi_sas_phy_read32(hisi_hba, + phy_id, SAS_BIST_ERR_CNT); + hisi_sas_bist_test_restore_v3_hw(hisi_hba); + } + + return 0; +} + +const struct cpumask * +get_managed_irq_aff_v3_hw(struct hisi_hba *hisi_hba, int queue) +{ + if (user_ctl_irq) + return NULL; + + return pci_irq_get_affinity(hisi_hba->pci_dev, queue + + HISI_SAS_CQ_INT_BASE_VECTORS_V3_HW); +} + +struct device_attribute *host_attrs_v3_hw[] = { + &dev_attr_phy_event_threshold, + &dev_attr_intr_conv, + &dev_attr_intr_coal_ticks, + &dev_attr_intr_coal_count, + NULL +}; + static struct scsi_host_template sht_v3_hw = { .name = DRV_NAME, .module = THIS_MODULE, @@ -2098,16 +3408,19 @@ static struct scsi_host_template sht_v3_hw = { .scan_start = hisi_sas_scan_start, .change_queue_depth = sas_change_queue_depth, .bios_param = sas_bios_param, - .can_queue = 1, .this_id = -1, - .sg_tablesize = SG_ALL, + .sg_tablesize = HISI_SAS_SGE_PAGE_CNT, + .sg_prot_tablesize = HISI_SAS_SGE_PAGE_CNT, .max_sectors = SCSI_DEFAULT_MAX_SECTORS, .use_clustering = ENABLE_CLUSTERING, .eh_device_reset_handler = sas_eh_device_reset_handler, .eh_target_reset_handler = sas_eh_target_reset_handler, + .slave_alloc = sas_slave_alloc, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, - .shost_attrs = host_attrs, + .shost_attrs = host_attrs_v3_hw, + .host_reset = hisi_sas_host_reset, + .tag_alloc_policy = BLK_TAG_ALLOC_RR, }; static const struct hisi_sas_hw hisi_sas_v3_hw = { @@ -2117,12 +3430,11 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = { .get_wideport_bitmap = get_wideport_bitmap_v3_hw, .complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr), .clear_itct = clear_itct_v3_hw, - .sl_notify = sl_notify_v3_hw, + .sl_notify_ssp = sl_notify_ssp_v3_hw, .prep_ssp = prep_ssp_v3_hw, .prep_smp = prep_smp_v3_hw, .prep_stp = prep_ata_v3_hw, .prep_abort = prep_abort_v3_hw, - .get_free_slot = get_free_slot_v3_hw, .start_delivery = start_delivery_v3_hw, .slot_complete = slot_complete_v3_hw, .phys_init = phys_init_v3_hw, @@ -2137,6 +3449,13 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = { .get_events = phy_get_events_v3_hw, .write_gpio = write_gpio_v3_hw, .wait_cmds_complete_timeout = wait_cmds_complete_timeout_v3_hw, + .debugfs_reg_global = &debugfs_global_reg, + .debugfs_reg_port = &debugfs_port_reg, + .snapshot_prepare = debugfs_snapshot_prepare_v3_hw, + .snapshot_restore = debugfs_snapshot_restore_v3_hw, + .set_bist = debugfs_set_bist_v3_hw, + .get_managed_irq_aff = get_managed_irq_aff_v3_hw, + .debugfs_work_handler = hisi_sas_debugfs_work_handler, }; static struct Scsi_Host * @@ -2154,18 +3473,27 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev) hisi_hba = shost_priv(shost); INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler); + INIT_WORK(&hisi_hba->debugfs_work, hisi_sas_debugfs_work_handler); + INIT_WORK(&hisi_hba->notify_work, ssp_notify_work_handler); hisi_hba->hw = &hisi_sas_v3_hw; hisi_hba->pci_dev = pdev; hisi_hba->dev = dev; hisi_hba->shost = shost; SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; + hisi_hba->user_ctl_irq = user_ctl_irq; + + if (enable_dix_dif & ~HISI_SAS_PROT_MASK) + dev_err(dev, "unsupported protection mask 0x%x, using default (0x0)\n", + enable_dix_dif); + else + hisi_hba->enable_dix_dif = enable_dix_dif; timer_setup(&hisi_hba->timer, NULL, 0); if (hisi_sas_get_fw_info(hisi_hba) < 0) goto err_out; - if (hisi_sas_alloc(hisi_hba, shost)) { + if (hisi_sas_alloc(hisi_hba)) { hisi_sas_free(hisi_hba); goto err_out; } @@ -2198,14 +3526,13 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (rc) goto err_out_disable_device; - if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) || - (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)) { - if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) || - (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) { - dev_err(dev, "No usable DMA addressing method\n"); - rc = -EIO; - goto err_out_regions; - } + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (rc) + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (rc) { + dev_err(dev, "No usable DMA addressing method\n"); + rc = -ENODEV; + goto err_out_regions; } shost = hisi_sas_shost_alloc_pci(pdev); @@ -2218,13 +3545,14 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) hisi_hba = shost_priv(shost); dev_set_drvdata(dev, sha); - hisi_hba->regs = pcim_iomap(pdev, 5, 0); + hisi_hba->regs = pcim_iomap(pdev, HISI_SAS_BAR_TO_IOMAP, 0); if (!hisi_hba->regs) { dev_err(dev, "cannot map register.\n"); rc = -ENOMEM; goto err_out_ha; } + hisi_hba->last_dev_id = -1; phy_nr = port_nr = hisi_hba->n_phy; arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL); @@ -2243,10 +3571,12 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) shost->max_id = HISI_SAS_MAX_DEVICES; shost->max_lun = ~0; shost->max_channel = 1; + /* shost support 16 bytes cmd len base on hw */ shost->max_cmd_len = 16; - shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT); - shost->can_queue = hisi_hba->hw->max_command_entries; - shost->cmd_per_lun = hisi_hba->hw->max_command_entries; + shost->can_queue = hisi_hba->hw->max_command_entries - + HISI_SAS_RESERVED_IPTT_CNT; + shost->cmd_per_lun = hisi_hba->hw->max_command_entries - + HISI_SAS_RESERVED_IPTT_CNT; sha->sas_ha_name = DRV_NAME; sha->dev = dev; @@ -2260,6 +3590,16 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) sha->sas_port[i] = &hisi_hba->port[i].sas_port; } + if (hisi_sas_debugfs_enable) + hisi_sas_debugfs_init(hisi_hba); + + if (hisi_hba->enable_dix_dif) { + scsi_host_set_prot(hisi_hba->shost, + enable_dix_dif); + scsi_host_set_guard(hisi_hba->shost, + SHOST_DIX_GUARD_CRC); + } + rc = scsi_add_host(shost, dev); if (rc) goto err_out_ha; @@ -2293,13 +3633,15 @@ hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba) { int i; - free_irq(pci_irq_vector(pdev, 1), hisi_hba); - free_irq(pci_irq_vector(pdev, 2), hisi_hba); - free_irq(pci_irq_vector(pdev, 11), hisi_hba); - for (i = 0; i < hisi_hba->queue_count; i++) { + free_irq(pci_irq_vector(pdev, PCI_IRQ_PHY), hisi_hba); + free_irq(pci_irq_vector(pdev, PCI_IRQ_CHANNEL), hisi_hba); + free_irq(pci_irq_vector(pdev, PCI_IRQ_AXI_FATAL), hisi_hba); + for (i = 0; i < hisi_hba->nvecs; i++) { struct hisi_sas_cq *cq = &hisi_hba->cq[i]; + int nr = hisi_sas_intr_conv ? PCI_IRQ_CQ_BASE : + PCI_IRQ_CQ_BASE + i; - free_irq(pci_irq_vector(pdev, i+16), cq); + free_irq(pci_irq_vector(pdev, nr), cq); } pci_free_irq_vectors(pdev); } @@ -2315,6 +3657,7 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev) del_timer(&hisi_hba->timer); sas_unregister_ha(sha); + flush_workqueue(hisi_hba->wq); sas_remove_host(sha->core.shost); hisi_sas_v3_destroy_irqs(pdev, hisi_hba); @@ -2322,164 +3665,10 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev) pci_release_regions(pdev); pci_disable_device(pdev); hisi_sas_free(hisi_hba); + hisi_sas_debugfs_exit(hisi_hba); scsi_host_put(shost); } -static const struct hisi_sas_hw_error sas_ras_intr0_nfe[] = { - { .irq_msk = BIT(19), .msg = "HILINK_INT" }, - { .irq_msk = BIT(20), .msg = "HILINK_PLL0_OUT_OF_LOCK" }, - { .irq_msk = BIT(21), .msg = "HILINK_PLL1_OUT_OF_LOCK" }, - { .irq_msk = BIT(22), .msg = "HILINK_LOSS_OF_REFCLK0" }, - { .irq_msk = BIT(23), .msg = "HILINK_LOSS_OF_REFCLK1" }, - { .irq_msk = BIT(24), .msg = "DMAC0_TX_POISON" }, - { .irq_msk = BIT(25), .msg = "DMAC1_TX_POISON" }, - { .irq_msk = BIT(26), .msg = "DMAC2_TX_POISON" }, - { .irq_msk = BIT(27), .msg = "DMAC3_TX_POISON" }, - { .irq_msk = BIT(28), .msg = "DMAC4_TX_POISON" }, - { .irq_msk = BIT(29), .msg = "DMAC5_TX_POISON" }, - { .irq_msk = BIT(30), .msg = "DMAC6_TX_POISON" }, - { .irq_msk = BIT(31), .msg = "DMAC7_TX_POISON" }, -}; - -static const struct hisi_sas_hw_error sas_ras_intr1_nfe[] = { - { .irq_msk = BIT(0), .msg = "RXM_CFG_MEM3_ECC2B_INTR" }, - { .irq_msk = BIT(1), .msg = "RXM_CFG_MEM2_ECC2B_INTR" }, - { .irq_msk = BIT(2), .msg = "RXM_CFG_MEM1_ECC2B_INTR" }, - { .irq_msk = BIT(3), .msg = "RXM_CFG_MEM0_ECC2B_INTR" }, - { .irq_msk = BIT(4), .msg = "HGC_CQE_ECC2B_INTR" }, - { .irq_msk = BIT(5), .msg = "LM_CFG_IOSTL_ECC2B_INTR" }, - { .irq_msk = BIT(6), .msg = "LM_CFG_ITCTL_ECC2B_INTR" }, - { .irq_msk = BIT(7), .msg = "HGC_ITCT_ECC2B_INTR" }, - { .irq_msk = BIT(8), .msg = "HGC_IOST_ECC2B_INTR" }, - { .irq_msk = BIT(9), .msg = "HGC_DQE_ECC2B_INTR" }, - { .irq_msk = BIT(10), .msg = "DMAC0_RAM_ECC2B_INTR" }, - { .irq_msk = BIT(11), .msg = "DMAC1_RAM_ECC2B_INTR" }, - { .irq_msk = BIT(12), .msg = "DMAC2_RAM_ECC2B_INTR" }, - { .irq_msk = BIT(13), .msg = "DMAC3_RAM_ECC2B_INTR" }, - { .irq_msk = BIT(14), .msg = "DMAC4_RAM_ECC2B_INTR" }, - { .irq_msk = BIT(15), .msg = "DMAC5_RAM_ECC2B_INTR" }, - { .irq_msk = BIT(16), .msg = "DMAC6_RAM_ECC2B_INTR" }, - { .irq_msk = BIT(17), .msg = "DMAC7_RAM_ECC2B_INTR" }, - { .irq_msk = BIT(18), .msg = "OOO_RAM_ECC2B_INTR" }, - { .irq_msk = BIT(20), .msg = "HGC_DQE_POISON_INTR" }, - { .irq_msk = BIT(21), .msg = "HGC_IOST_POISON_INTR" }, - { .irq_msk = BIT(22), .msg = "HGC_ITCT_POISON_INTR" }, - { .irq_msk = BIT(23), .msg = "HGC_ITCT_NCQ_POISON_INTR" }, - { .irq_msk = BIT(24), .msg = "DMAC0_RX_POISON" }, - { .irq_msk = BIT(25), .msg = "DMAC1_RX_POISON" }, - { .irq_msk = BIT(26), .msg = "DMAC2_RX_POISON" }, - { .irq_msk = BIT(27), .msg = "DMAC3_RX_POISON" }, - { .irq_msk = BIT(28), .msg = "DMAC4_RX_POISON" }, - { .irq_msk = BIT(29), .msg = "DMAC5_RX_POISON" }, - { .irq_msk = BIT(30), .msg = "DMAC6_RX_POISON" }, - { .irq_msk = BIT(31), .msg = "DMAC7_RX_POISON" }, -}; - -static const struct hisi_sas_hw_error sas_ras_intr2_nfe[] = { - { .irq_msk = BIT(0), .msg = "DMAC0_AXI_BUS_ERR" }, - { .irq_msk = BIT(1), .msg = "DMAC1_AXI_BUS_ERR" }, - { .irq_msk = BIT(2), .msg = "DMAC2_AXI_BUS_ERR" }, - { .irq_msk = BIT(3), .msg = "DMAC3_AXI_BUS_ERR" }, - { .irq_msk = BIT(4), .msg = "DMAC4_AXI_BUS_ERR" }, - { .irq_msk = BIT(5), .msg = "DMAC5_AXI_BUS_ERR" }, - { .irq_msk = BIT(6), .msg = "DMAC6_AXI_BUS_ERR" }, - { .irq_msk = BIT(7), .msg = "DMAC7_AXI_BUS_ERR" }, - { .irq_msk = BIT(8), .msg = "DMAC0_FIFO_OMIT_ERR" }, - { .irq_msk = BIT(9), .msg = "DMAC1_FIFO_OMIT_ERR" }, - { .irq_msk = BIT(10), .msg = "DMAC2_FIFO_OMIT_ERR" }, - { .irq_msk = BIT(11), .msg = "DMAC3_FIFO_OMIT_ERR" }, - { .irq_msk = BIT(12), .msg = "DMAC4_FIFO_OMIT_ERR" }, - { .irq_msk = BIT(13), .msg = "DMAC5_FIFO_OMIT_ERR" }, - { .irq_msk = BIT(14), .msg = "DMAC6_FIFO_OMIT_ERR" }, - { .irq_msk = BIT(15), .msg = "DMAC7_FIFO_OMIT_ERR" }, - { .irq_msk = BIT(16), .msg = "HGC_RLSE_SLOT_UNMATCH" }, - { .irq_msk = BIT(17), .msg = "HGC_LM_ADD_FCH_LIST_ERR" }, - { .irq_msk = BIT(18), .msg = "HGC_AXI_BUS_ERR" }, - { .irq_msk = BIT(19), .msg = "HGC_FIFO_OMIT_ERR" }, -}; - -static bool process_non_fatal_error_v3_hw(struct hisi_hba *hisi_hba) -{ - struct device *dev = hisi_hba->dev; - const struct hisi_sas_hw_error *ras_error; - bool need_reset = false; - u32 irq_value; - int i; - - irq_value = hisi_sas_read32(hisi_hba, SAS_RAS_INTR0); - for (i = 0; i < ARRAY_SIZE(sas_ras_intr0_nfe); i++) { - ras_error = &sas_ras_intr0_nfe[i]; - if (ras_error->irq_msk & irq_value) { - dev_warn(dev, "SAS_RAS_INTR0: %s(irq_value=0x%x) found.\n", - ras_error->msg, irq_value); - need_reset = true; - } - } - hisi_sas_write32(hisi_hba, SAS_RAS_INTR0, irq_value); - - irq_value = hisi_sas_read32(hisi_hba, SAS_RAS_INTR1); - for (i = 0; i < ARRAY_SIZE(sas_ras_intr1_nfe); i++) { - ras_error = &sas_ras_intr1_nfe[i]; - if (ras_error->irq_msk & irq_value) { - dev_warn(dev, "SAS_RAS_INTR1: %s(irq_value=0x%x) found.\n", - ras_error->msg, irq_value); - need_reset = true; - } - } - hisi_sas_write32(hisi_hba, SAS_RAS_INTR1, irq_value); - - irq_value = hisi_sas_read32(hisi_hba, SAS_RAS_INTR2); - for (i = 0; i < ARRAY_SIZE(sas_ras_intr2_nfe); i++) { - ras_error = &sas_ras_intr2_nfe[i]; - if (ras_error->irq_msk & irq_value) { - dev_warn(dev, "SAS_RAS_INTR2: %s(irq_value=0x%x) found.\n", - ras_error->msg, irq_value); - need_reset = true; - } - } - hisi_sas_write32(hisi_hba, SAS_RAS_INTR2, irq_value); - - return need_reset; -} - -static pci_ers_result_t hisi_sas_error_detected_v3_hw(struct pci_dev *pdev, - pci_channel_state_t state) -{ - struct sas_ha_struct *sha = pci_get_drvdata(pdev); - struct hisi_hba *hisi_hba = sha->lldd_ha; - struct device *dev = hisi_hba->dev; - - dev_info(dev, "PCI error: detected callback, state(%d)!!\n", state); - if (state == pci_channel_io_perm_failure) - return PCI_ERS_RESULT_DISCONNECT; - - if (process_non_fatal_error_v3_hw(hisi_hba)) - return PCI_ERS_RESULT_NEED_RESET; - - return PCI_ERS_RESULT_CAN_RECOVER; -} - -static pci_ers_result_t hisi_sas_mmio_enabled_v3_hw(struct pci_dev *pdev) -{ - return PCI_ERS_RESULT_RECOVERED; -} - -static pci_ers_result_t hisi_sas_slot_reset_v3_hw(struct pci_dev *pdev) -{ - struct sas_ha_struct *sha = pci_get_drvdata(pdev); - struct hisi_hba *hisi_hba = sha->lldd_ha; - struct device *dev = hisi_hba->dev; - HISI_SAS_DECLARE_RST_WORK_ON_STACK(r); - - dev_info(dev, "PCI error: slot reset callback!!\n"); - queue_work(hisi_hba->wq, &r.work); - wait_for_completion(r.completion); - if (r.done) - return PCI_ERS_RESULT_RECOVERED; - - return PCI_ERS_RESULT_DISCONNECT; -} - static void hisi_sas_reset_prepare_v3_hw(struct pci_dev *pdev) { struct sas_ha_struct *sha = pci_get_drvdata(pdev); @@ -2535,7 +3724,7 @@ static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state) } if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) - return -1; + return -EPERM; scsi_block_requests(shost); set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); @@ -2580,15 +3769,21 @@ static int hisi_sas_v3_resume(struct pci_dev *pdev) pci_enable_wake(pdev, PCI_D0, 0); pci_restore_state(pdev); rc = pci_enable_device(pdev); - if (rc) + if (rc) { dev_err(dev, "enable device failed during resume (%d)\n", rc); + return rc; + } pci_set_master(pdev); scsi_unblock_requests(shost); clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); sas_prep_resume_ha(sha); - init_reg_v3_hw(hisi_hba); + rc = hw_init_v3_hw(hisi_hba); + if (rc) { + scsi_remove_host(shost); + pci_disable_device(pdev); + } hisi_hba->hw->phys_init(hisi_hba); sas_resume_ha(sha); clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); @@ -2603,9 +3798,6 @@ static const struct pci_device_id sas_v3_pci_table[] = { MODULE_DEVICE_TABLE(pci, sas_v3_pci_table); static const struct pci_error_handlers hisi_sas_err_handler = { - .error_detected = hisi_sas_error_detected_v3_hw, - .mmio_enabled = hisi_sas_mmio_enabled_v3_hw, - .slot_reset = hisi_sas_slot_reset_v3_hw, .reset_prepare = hisi_sas_reset_prepare_v3_hw, .reset_done = hisi_sas_reset_done_v3_hw, }; @@ -2621,6 +3813,7 @@ static struct pci_driver sas_v3_pci_driver = { }; module_pci_driver(sas_v3_pci_driver); +module_param_named(intr_conv, hisi_sas_intr_conv, bool, 0444); MODULE_LICENSE("GPL"); MODULE_AUTHOR("John Garry "); diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index ea4b0bb0c1cd4e6b12748256741afae339311591..5299fa75215a96bbe35f32c5ff4f5302d35eca48 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -261,12 +261,11 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, device_enable_async_suspend(&shost->shost_dev); + get_device(&shost->shost_gendev); error = device_add(&shost->shost_dev); if (error) goto out_del_gendev; - get_device(&shost->shost_gendev); - if (shost->transportt->host_size) { shost->shost_data = kzalloc(shost->transportt->host_size, GFP_KERNEL); @@ -283,34 +282,36 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, shost->work_q_name); if (!shost->work_q) { error = -EINVAL; - goto out_free_shost_data; + goto out_del_dev; } } error = scsi_sysfs_add_host(shost); if (error) - goto out_destroy_host; + goto out_del_dev; scsi_proc_host_add(shost); scsi_autopm_put_host(shost); return error; - out_destroy_host: - if (shost->work_q) - destroy_workqueue(shost->work_q); - out_free_shost_data: - kfree(shost->shost_data); + /* + * Any host allocation in this function will be freed in + * scsi_host_dev_release(). + */ out_del_dev: device_del(&shost->shost_dev); out_del_gendev: + /* + * Host state is SHOST_RUNNING so we have to explicitly release + * ->shost_dev. + */ + put_device(&shost->shost_dev); device_del(&shost->shost_gendev); out_disable_runtime_pm: device_disable_async_suspend(&shost->shost_gendev); pm_runtime_disable(&shost->shost_gendev); pm_runtime_set_suspended(&shost->shost_gendev); pm_runtime_put_noidle(&shost->shost_gendev); - if (shost_use_blk_mq(shost)) - scsi_mq_destroy_tags(shost); fail: return error; } @@ -356,7 +357,7 @@ static void scsi_host_dev_release(struct device *dev) ida_simple_remove(&host_index_ida, shost->host_no); - if (parent) + if (shost->shost_state != SHOST_CREATED) put_device(parent); kfree(shost); } @@ -403,8 +404,10 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) mutex_init(&shost->scan_mutex); index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL); - if (index < 0) - goto fail_kfree; + if (index < 0) { + kfree(shost); + return NULL; + } shost->host_no = index; shost->dma_channel = 0xff; @@ -474,6 +477,9 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) shost->use_blk_mq = scsi_use_blk_mq || shost->hostt->force_blk_mq; + if (!sht->module) + shost->is_builtin = true; + device_initialize(&shost->shost_gendev); dev_set_name(&shost->shost_gendev, "host%d", shost->host_no); shost->shost_gendev.bus = &scsi_bus_type; @@ -491,7 +497,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) shost_printk(KERN_WARNING, shost, "error handler thread failed to spawn, error = %ld\n", PTR_ERR(shost->ehandler)); - goto fail_index_remove; + shost->ehandler = NULL; + goto fail; } shost->tmf_work_q = alloc_workqueue("scsi_tmf_%d", @@ -500,17 +507,18 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) if (!shost->tmf_work_q) { shost_printk(KERN_WARNING, shost, "failed to create tmf workq\n"); - goto fail_kthread; + goto fail; } scsi_proc_hostdir_add(shost->hostt); return shost; + fail: + /* + * Host state is still SHOST_CREATED and that is enough to release + * ->shost_gendev. scsi_host_dev_release() will free + * dev_name(&shost->shost_dev). + */ + put_device(&shost->shost_gendev); - fail_kthread: - kthread_stop(shost->ehandler); - fail_index_remove: - ida_simple_remove(&host_index_ida, shost->host_no); - fail_kfree: - kfree(shost); return NULL; } EXPORT_SYMBOL(scsi_host_alloc); diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index c120929d4ffe52f3f24664226b7120b9057a2a1d..dac636880b4d5ec65978b96a6928edf7952c5265 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -251,10 +251,11 @@ static int number_of_controllers; static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); -static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg); +static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd, + void __user *arg); #ifdef CONFIG_COMPAT -static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, +static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd, void __user *arg); #endif @@ -2320,6 +2321,8 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h, case IOACCEL2_SERV_RESPONSE_COMPLETE: switch (c2->error_data.status) { case IOACCEL2_STATUS_SR_TASK_COMP_GOOD: + if (cmd) + cmd->result = 0; break; case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND: cmd->result |= SAM_STAT_CHECK_CONDITION; @@ -2479,8 +2482,10 @@ static void process_ioaccel2_completion(struct ctlr_info *h, /* check for good status */ if (likely(c2->error_data.serv_response == 0 && - c2->error_data.status == 0)) + c2->error_data.status == 0)) { + cmd->result = 0; return hpsa_cmd_free_and_done(h, c, cmd); + } /* * Any RAID offload error results in retry which will use @@ -4923,7 +4928,7 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, curr_sg->reserved[0] = 0; curr_sg->reserved[1] = 0; curr_sg->reserved[2] = 0; - curr_sg->chain_indicator = 0x80; + curr_sg->chain_indicator = IOACCEL2_CHAIN; curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex]; } @@ -4940,6 +4945,11 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, curr_sg++; } + /* + * Set the last s/g element bit + */ + (curr_sg - 1)->chain_indicator = IOACCEL2_LAST_SG; + switch (cmd->sc_data_direction) { case DMA_TO_DEVICE: cp->direction &= ~IOACCEL2_DIRECTION_MASK; @@ -5612,6 +5622,12 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) } c = cmd_tagged_alloc(h, cmd); + /* + * This is necessary because the SML doesn't zero out this field during + * error recovery. + */ + cmd->result = 0; + /* * Call alternate submit routine for I/O accelerated commands. * Retries always go down the normal I/O path. @@ -6122,7 +6138,7 @@ static void cmd_free(struct ctlr_info *h, struct CommandList *c) #ifdef CONFIG_COMPAT -static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, +static int hpsa_ioctl32_passthru(struct scsi_device *dev, unsigned int cmd, void __user *arg) { IOCTL32_Command_struct __user *arg32 = @@ -6159,7 +6175,7 @@ static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, } static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, - int cmd, void __user *arg) + unsigned int cmd, void __user *arg) { BIG_IOCTL32_Command_struct __user *arg32 = (BIG_IOCTL32_Command_struct __user *) arg; @@ -6196,7 +6212,8 @@ static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, return err; } -static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg) +static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd, + void __user *arg) { switch (cmd) { case CCISS_GETPCIINFO: @@ -6520,7 +6537,8 @@ static void check_ioctl_unit_attention(struct ctlr_info *h, /* * ioctl */ -static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg) +static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd, + void __user *arg) { struct ctlr_info *h; void __user *argp = (void __user *)arg; diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h index 21a726e2eec6ecbd3fa66221a4635adc3060c747..f6afca4b231915cd93121ea77f906f2e14c91d02 100644 --- a/drivers/scsi/hpsa_cmd.h +++ b/drivers/scsi/hpsa_cmd.h @@ -517,6 +517,7 @@ struct ioaccel2_sg_element { u8 reserved[3]; u8 chain_indicator; #define IOACCEL2_CHAIN 0x80 +#define IOACCEL2_LAST_SG 0x40 }; /* diff --git a/drivers/scsi/huawei/Kconfig b/drivers/scsi/huawei/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..a9fbdef9b4b38576c08467650ae1ffce9dfc60c2 --- /dev/null +++ b/drivers/scsi/huawei/Kconfig @@ -0,0 +1,21 @@ +# +# Huawei driver configuration +# + +config SCSI_HUAWEI_FC + tristate "Huawei devices" + depends on PCI && SCSI + depends on SCSI_FC_ATTRS + default m + ---help--- + If you have a Fibre Channel PCI card belonging to this class, say Y. + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Huawei cards. If you say Y, you will be asked + for your specific card in the following questions. + +if SCSI_HUAWEI_FC + +source "drivers/scsi/huawei/hifc/Kconfig" + +endif # SCSI_HUAWEI_FC diff --git a/drivers/scsi/huawei/Makefile b/drivers/scsi/huawei/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..fa48694cc16685175a90d9960132a0f50819de7f --- /dev/null +++ b/drivers/scsi/huawei/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Huawei device drivers. +# + +obj-$(CONFIG_SCSI_FC_HIFC) += hifc/ diff --git a/drivers/scsi/huawei/hifc/Kconfig b/drivers/scsi/huawei/hifc/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..79c7954a073501266419f32ee7e30d873b897859 --- /dev/null +++ b/drivers/scsi/huawei/hifc/Kconfig @@ -0,0 +1,11 @@ +# +# Huawei driver configuration +# +config SCSI_FC_HIFC + tristate "Huawei hifc Fibre Channel Support" + default m + depends on PCI && SCSI + depends on SCSI_FC_ATTRS + ---help--- + This driver supports Huawei Fibre Channel PCI and + PCIE host adapters. diff --git a/drivers/scsi/huawei/hifc/Makefile b/drivers/scsi/huawei/hifc/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..0128086c75d951d64e671dcb1fdbc403daad503e --- /dev/null +++ b/drivers/scsi/huawei/hifc/Makefile @@ -0,0 +1,42 @@ +obj-$(CONFIG_SCSI_FC_HIFC) += hifc.o + +hifc-objs += hifc_utils.o +hifc-objs += hifc_hba.o +hifc-objs += hifc_portmng.o + +hifc-objs += hifc_module.o +hifc-objs += hifc_chipitf.o +hifc-objs += hifc_io.o +hifc-objs += hifc_queue.o +hifc-objs += hifc_service.o +hifc-objs += hifc_wqe.o +hifc-objs += hifc_cfg.o +hifc-objs += hifc_lld.o + +hifc-objs += unf_io.o +hifc-objs += unf_io_abnormal.o +hifc-objs += unf_scsi.o +hifc-objs += unf_init.o +hifc-objs += unf_event.o +hifc-objs += unf_exchg.o +hifc-objs += unf_lport.o +hifc-objs += unf_disc.o +hifc-objs += unf_rport.o +hifc-objs += unf_service.o +hifc-objs += unf_portman.o +hifc-objs += unf_npiv.o +hifc-objs += hifc_sml.o +hifc-objs += hifc_tool.o +hifc-objs += hifc_tool_hw.o +hifc-objs += hifc_dbgtool_knl.o + +hifc-objs += hifc_hwif.o +hifc-objs += hifc_eqs.o +hifc-objs += hifc_api_cmd.o +hifc-objs += hifc_mgmt.o +hifc-objs += hifc_wq.o +hifc-objs += hifc_cmdq.o +hifc-objs += hifc_hwdev.o +hifc-objs += hifc_cqm_main.o +hifc-objs += hifc_cqm_object.o + diff --git a/drivers/scsi/huawei/hifc/hifc_api_cmd.c b/drivers/scsi/huawei/hifc/hifc_api_cmd.c new file mode 100644 index 0000000000000000000000000000000000000000..22632f77958205a078fbb683bc400e07f819706b --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_api_cmd.c @@ -0,0 +1,1155 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hifc_knl_adp.h" +#include "hifc_hw.h" +#include "hifc_hwdev.h" +#include "hifc_hwif.h" +#include "hifc_api_cmd.h" + +#define API_CMD_CHAIN_CELL_SIZE_SHIFT 6U + +#define API_CMD_CELL_DESC_SIZE 8 +#define API_CMD_CELL_DATA_ADDR_SIZE 8 + +#define API_CHAIN_NUM_CELLS 32 +#define API_CHAIN_CELL_SIZE 128 +#define API_CHAIN_RSP_DATA_SIZE 128 + +#define API_CMD_CELL_WB_ADDR_SIZE 8 + +#define API_CHAIN_CELL_ALIGNMENT 8 + +#define API_CMD_TIMEOUT 10000 +#define API_CMD_STATUS_TIMEOUT 100000 + +#define API_CMD_BUF_SIZE 2048ULL + +#define API_CMD_NODE_ALIGN_SIZE 512ULL +#define API_PAYLOAD_ALIGN_SIZE 64ULL + +#define API_CHAIN_RESP_ALIGNMENT 64ULL + +#define COMPLETION_TIMEOUT_DEFAULT 1000UL +#define POLLING_COMPLETION_TIMEOUT_DEFAULT 1000U + +#define API_CMD_RESPONSE_DATA_PADDR(val) be64_to_cpu(*((u64 *)(val))) + +#define READ_API_CMD_PRIV_DATA(id, token) (((id) << 16) + (token)) +#define WRITE_API_CMD_PRIV_DATA(id) (((u8)id) << 16) + +#define MASKED_IDX(chain, idx) ((idx) & ((chain)->num_cells - 1)) + +#define SIZE_4BYTES(size) (ALIGN((u32)(size), 4U) >> 2) +#define SIZE_8BYTES(size) (ALIGN((u32)(size), 8U) >> 3) + +enum api_cmd_data_format { + SGL_DATA = 1, +}; + +enum api_cmd_type { + API_CMD_WRITE_TYPE = 0, + API_CMD_READ_TYPE = 1, +}; + +enum api_cmd_bypass { + NOT_BYPASS = 0, + BYPASS = 1, +}; + +enum api_cmd_resp_aeq { + NOT_TRIGGER = 0, + TRIGGER = 1, +}; + +static u8 xor_chksum_set(void *data) +{ + int idx; + u8 checksum = 0; + u8 *val = data; + + for (idx = 0; idx < 7; idx++) + checksum ^= val[idx]; + + return checksum; +} + +static void set_prod_idx(struct hifc_api_cmd_chain *chain) +{ + enum hifc_api_cmd_chain_type chain_type = chain->chain_type; + struct hifc_hwif *hwif = chain->hwdev->hwif; + u32 hw_prod_idx_addr = HIFC_CSR_API_CMD_CHAIN_PI_ADDR(chain_type); + u32 prod_idx = chain->prod_idx; + + hifc_hwif_write_reg(hwif, hw_prod_idx_addr, prod_idx); +} + +static u32 get_hw_cons_idx(struct hifc_api_cmd_chain *chain) +{ + u32 addr, val; + + addr = HIFC_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type); + val = hifc_hwif_read_reg(chain->hwdev->hwif, addr); + + return HIFC_API_CMD_STATUS_GET(val, CONS_IDX); +} + +static void dump_api_chain_reg(struct hifc_api_cmd_chain *chain) +{ + void *dev = chain->hwdev->dev_hdl; + u32 addr, val; + + addr = HIFC_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type); + val = hifc_hwif_read_reg(chain->hwdev->hwif, addr); + + sdk_err(dev, "Chain type: 0x%x, cpld error: 0x%x, check error: 0x%x, current fsm: 0x%x\n", + chain->chain_type, HIFC_API_CMD_STATUS_GET(val, CPLD_ERR), + HIFC_API_CMD_STATUS_GET(val, CHKSUM_ERR), + HIFC_API_CMD_STATUS_GET(val, FSM)); + + sdk_err(dev, "Chain hw current ci: 0x%x\n", + HIFC_API_CMD_STATUS_GET(val, CONS_IDX)); + + addr = HIFC_CSR_API_CMD_CHAIN_PI_ADDR(chain->chain_type); + val = hifc_hwif_read_reg(chain->hwdev->hwif, addr); + sdk_err(dev, "Chain hw current pi: 0x%x\n", val); +} + +/** + * chain_busy - check if the chain is still processing last requests + * @chain: chain to check + * Return: 0 - success, negative - failure + **/ +static int chain_busy(struct hifc_api_cmd_chain *chain) +{ + void *dev = chain->hwdev->dev_hdl; + struct hifc_api_cmd_cell_ctxt *ctxt; + u64 resp_header; + + ctxt = &chain->cell_ctxt[chain->prod_idx]; + + switch (chain->chain_type) { + case HIFC_API_CMD_MULTI_READ: + case HIFC_API_CMD_POLL_READ: + resp_header = be64_to_cpu(ctxt->resp->header); + if (ctxt->status && + !HIFC_API_CMD_RESP_HEADER_VALID(resp_header)) { + sdk_err(dev, "Context(0x%x) busy!, pi: %d, resp_header: 0x%08x%08x\n", + ctxt->status, chain->prod_idx, + upper_32_bits(resp_header), + lower_32_bits(resp_header)); + dump_api_chain_reg(chain); + return -EBUSY; + } + break; + case HIFC_API_CMD_POLL_WRITE: + case HIFC_API_CMD_WRITE_TO_MGMT_CPU: + case HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + chain->cons_idx = get_hw_cons_idx(chain); + + if (chain->cons_idx == MASKED_IDX(chain, chain->prod_idx + 1)) { + sdk_err(dev, "API CMD chain %d is busy, cons_idx = %d, prod_idx = %d\n", + chain->chain_type, chain->cons_idx, + chain->prod_idx); + dump_api_chain_reg(chain); + return -EBUSY; + } + break; + default: + sdk_err(dev, "Unknown Chain type %d\n", chain->chain_type); + return -EINVAL; + } + + return 0; +} + +/** + * get_cell_data_size - get the data size of specific cell type + * @type: chain type + * @cmd_size: the command size + * Return: cell_data_size + **/ +static u16 get_cell_data_size(enum hifc_api_cmd_chain_type type, u16 cmd_size) +{ + u16 cell_data_size = 0; + + switch (type) { + case HIFC_API_CMD_POLL_READ: + cell_data_size = ALIGN(API_CMD_CELL_DESC_SIZE + + API_CMD_CELL_WB_ADDR_SIZE + + API_CMD_CELL_DATA_ADDR_SIZE, + API_CHAIN_CELL_ALIGNMENT); + break; + + case HIFC_API_CMD_WRITE_TO_MGMT_CPU: + case HIFC_API_CMD_POLL_WRITE: + case HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + cell_data_size = ALIGN(API_CMD_CELL_DESC_SIZE + + API_CMD_CELL_DATA_ADDR_SIZE, + API_CHAIN_CELL_ALIGNMENT); + break; + default: + break; + } + + return cell_data_size; +} + +/** + * prepare_cell_ctrl - prepare the ctrl of the cell for the command + * @cell_ctrl: the control of the cell to set the control into it + * @cell_len: the size of the cell + **/ +static void prepare_cell_ctrl(u64 *cell_ctrl, u16 cell_len) +{ + u64 ctrl; + u8 chksum; + + ctrl = HIFC_API_CMD_CELL_CTRL_SET(SIZE_8BYTES(cell_len), CELL_LEN) | + HIFC_API_CMD_CELL_CTRL_SET(0ULL, RD_DMA_ATTR_OFF) | + HIFC_API_CMD_CELL_CTRL_SET(0ULL, WR_DMA_ATTR_OFF); + + chksum = xor_chksum_set(&ctrl); + + ctrl |= HIFC_API_CMD_CELL_CTRL_SET(chksum, XOR_CHKSUM); + + /* The data in the HW should be in Big Endian Format */ + *cell_ctrl = cpu_to_be64(ctrl); +} + +/** + * prepare_api_cmd - prepare API CMD command + * @chain: chain for the command + * @cell: the cell of the command + * @dest: destination node on the card that will receive the command + * @cmd: command data + * @cmd_size: the command size + **/ +static void prepare_api_cmd(struct hifc_api_cmd_chain *chain, + struct hifc_api_cmd_cell *cell, + enum hifc_node_id dest, + const void *cmd, u16 cmd_size) +{ + struct hifc_api_cmd_cell_ctxt *cell_ctxt; + u32 priv; + + cell_ctxt = &chain->cell_ctxt[chain->prod_idx]; + + switch (chain->chain_type) { + case HIFC_API_CMD_POLL_READ: + priv = READ_API_CMD_PRIV_DATA(chain->chain_type, + cell_ctxt->saved_prod_idx); + cell->desc = HIFC_API_CMD_DESC_SET(SGL_DATA, API_TYPE) | + HIFC_API_CMD_DESC_SET(API_CMD_READ_TYPE, RD_WR) | + HIFC_API_CMD_DESC_SET(BYPASS, MGMT_BYPASS) | + HIFC_API_CMD_DESC_SET(NOT_TRIGGER, RESP_AEQE_EN) | + HIFC_API_CMD_DESC_SET(priv, PRIV_DATA); + break; + case HIFC_API_CMD_POLL_WRITE: + priv = WRITE_API_CMD_PRIV_DATA(chain->chain_type); + cell->desc = HIFC_API_CMD_DESC_SET(SGL_DATA, API_TYPE) | + HIFC_API_CMD_DESC_SET(API_CMD_WRITE_TYPE, RD_WR) | + HIFC_API_CMD_DESC_SET(BYPASS, MGMT_BYPASS) | + HIFC_API_CMD_DESC_SET(NOT_TRIGGER, RESP_AEQE_EN) | + HIFC_API_CMD_DESC_SET(priv, PRIV_DATA); + break; + case HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + case HIFC_API_CMD_WRITE_TO_MGMT_CPU: + priv = WRITE_API_CMD_PRIV_DATA(chain->chain_type); + cell->desc = HIFC_API_CMD_DESC_SET(SGL_DATA, API_TYPE) | + HIFC_API_CMD_DESC_SET(API_CMD_WRITE_TYPE, RD_WR) | + HIFC_API_CMD_DESC_SET(NOT_BYPASS, MGMT_BYPASS) | + HIFC_API_CMD_DESC_SET(TRIGGER, RESP_AEQE_EN) | + HIFC_API_CMD_DESC_SET(priv, PRIV_DATA); + break; + default: + sdk_err(chain->hwdev->dev_hdl, "Unknown Chain type: %d\n", + chain->chain_type); + return; + } + + cell->desc |= HIFC_API_CMD_DESC_SET(dest, DEST) | + HIFC_API_CMD_DESC_SET(SIZE_4BYTES(cmd_size), SIZE); + + cell->desc |= HIFC_API_CMD_DESC_SET(xor_chksum_set(&cell->desc), + XOR_CHKSUM); + + /* The data in the HW should be in Big Endian Format */ + cell->desc = cpu_to_be64(cell->desc); + + memcpy(cell_ctxt->api_cmd_vaddr, cmd, cmd_size); +} + +/** + * prepare_cell - prepare cell ctrl and cmd in the current producer cell + * @chain: chain for the command + * @dest: destination node on the card that will receive the command + * @cmd: command data + * @cmd_size: the command size + **/ +static void prepare_cell(struct hifc_api_cmd_chain *chain, + enum hifc_node_id dest, + void *cmd, u16 cmd_size) +{ + struct hifc_api_cmd_cell *curr_node; + u16 cell_size; + + curr_node = chain->curr_node; + + cell_size = get_cell_data_size(chain->chain_type, cmd_size); + + prepare_cell_ctrl(&curr_node->ctrl, cell_size); + prepare_api_cmd(chain, curr_node, dest, cmd, cmd_size); +} + +static inline void cmd_chain_prod_idx_inc(struct hifc_api_cmd_chain *chain) +{ + chain->prod_idx = MASKED_IDX(chain, chain->prod_idx + 1); +} + +static void issue_api_cmd(struct hifc_api_cmd_chain *chain) +{ + set_prod_idx(chain); +} + +/** + * api_cmd_status_update - update the status of the chain + * @chain: chain to update + **/ +static void api_cmd_status_update(struct hifc_api_cmd_chain *chain) +{ + struct hifc_api_cmd_status *wb_status; + enum hifc_api_cmd_chain_type chain_type; + u64 status_header; + u32 buf_desc; + + wb_status = chain->wb_status; + + buf_desc = be32_to_cpu(wb_status->buf_desc); + if (HIFC_API_CMD_STATUS_GET(buf_desc, CHKSUM_ERR)) + return; + + status_header = be64_to_cpu(wb_status->header); + chain_type = HIFC_API_CMD_STATUS_HEADER_GET(status_header, CHAIN_ID); + if (chain_type >= HIFC_API_CMD_MAX) + return; + + if (chain_type != chain->chain_type) + return; + + chain->cons_idx = HIFC_API_CMD_STATUS_GET(buf_desc, CONS_IDX); +} + +/** + * wait_for_status_poll - wait for write to mgmt command to complete + * @chain: the chain of the command + * Return: 0 - success, negative - failure + **/ +static int wait_for_status_poll(struct hifc_api_cmd_chain *chain) +{ + int err = -ETIMEDOUT; + u32 cnt = 0; + + while (cnt < API_CMD_STATUS_TIMEOUT && + chain->hwdev->chip_present_flag) { + api_cmd_status_update(chain); + + /* SYNC API CMD cmd should start after prev cmd finished */ + if (chain->cons_idx == chain->prod_idx) { + err = 0; + break; + } + + usleep_range(50, 100); + cnt++; + } + + return err; +} + +static void copy_resp_data(struct hifc_api_cmd_cell_ctxt *ctxt, void *ack, + u16 ack_size) +{ + struct hifc_api_cmd_resp_fmt *resp = ctxt->resp; + + memcpy(ack, &resp->resp_data, ack_size); + ctxt->status = 0; +} + +/** + * prepare_cell - polling for respense data of the read api-command + * @chain: pointer to api cmd chain + * + * Return: 0 - success, negative - failure + **/ +static int wait_for_resp_polling(struct hifc_api_cmd_cell_ctxt *ctxt) +{ + u64 resp_header; + int ret = -ETIMEDOUT; + u32 cnt = 0; + + while (cnt < POLLING_COMPLETION_TIMEOUT_DEFAULT) { + resp_header = be64_to_cpu(ctxt->resp->header); + + rmb(); /* read the latest header */ + + if (HIFC_API_CMD_RESP_HEADER_VALID(resp_header)) { + ret = 0; + break; + } + usleep_range(100, 1000); + cnt++; + } + + if (ret) + pr_err("Wait for api chain response timeout\n"); + + return ret; +} + +/** + * wait_for_api_cmd_completion - wait for command to complete + * @chain: chain for the command + * Return: 0 - success, negative - failure + **/ +static int wait_for_api_cmd_completion(struct hifc_api_cmd_chain *chain, + struct hifc_api_cmd_cell_ctxt *ctxt, + void *ack, u16 ack_size) +{ + void *dev = chain->hwdev->dev_hdl; + int err = 0; + + switch (chain->chain_type) { + case HIFC_API_CMD_POLL_READ: + err = wait_for_resp_polling(ctxt); + if (!err) + copy_resp_data(ctxt, ack, ack_size); + break; + case HIFC_API_CMD_POLL_WRITE: + case HIFC_API_CMD_WRITE_TO_MGMT_CPU: + err = wait_for_status_poll(chain); + if (err) { + sdk_err(dev, "API CMD Poll status timeout, chain type: %d\n", + chain->chain_type); + break; + } + break; + case HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + /* No need to wait */ + break; + default: + sdk_err(dev, "Unknown API CMD Chain type: %d\n", + chain->chain_type); + err = -EINVAL; + break; + } + + if (err) + dump_api_chain_reg(chain); + + return err; +} + +static inline void update_api_cmd_ctxt(struct hifc_api_cmd_chain *chain, + struct hifc_api_cmd_cell_ctxt *ctxt) +{ + ctxt->status = 1; + ctxt->saved_prod_idx = chain->prod_idx; + if (ctxt->resp) { + ctxt->resp->header = 0; + + /* make sure "header" was cleared */ + wmb(); + } +} + +/** + * api_cmd - API CMD command + * @chain: chain for the command + * @dest: destination node on the card that will receive the command + * @cmd: command data + * @size: the command size + * Return: 0 - success, negative - failure + **/ +static int api_cmd(struct hifc_api_cmd_chain *chain, + enum hifc_node_id dest, + void *cmd, u16 cmd_size, void *ack, u16 ack_size) +{ + struct hifc_api_cmd_cell_ctxt *ctxt; + + if (chain->chain_type == HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) + spin_lock(&chain->async_lock); + else + down(&chain->sem); + ctxt = &chain->cell_ctxt[chain->prod_idx]; + if (chain_busy(chain)) { + if (chain->chain_type == HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) + spin_unlock(&chain->async_lock); + else + up(&chain->sem); + return -EBUSY; + } + update_api_cmd_ctxt(chain, ctxt); + + prepare_cell(chain, dest, cmd, cmd_size); + + cmd_chain_prod_idx_inc(chain); + + wmb(); /* issue the command */ + + issue_api_cmd(chain); + + /* incremented prod idx, update ctxt */ + + chain->curr_node = chain->cell_ctxt[chain->prod_idx].cell_vaddr; + if (chain->chain_type == HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) + spin_unlock(&chain->async_lock); + else + up(&chain->sem); + + return wait_for_api_cmd_completion(chain, ctxt, ack, ack_size); +} + +/** + * hifc_api_cmd_write - Write API CMD command + * @chain: chain for write command + * @dest: destination node on the card that will receive the command + * @cmd: command data + * @size: the command size + * Return: 0 - success, negative - failure + **/ +int hifc_api_cmd_write(struct hifc_api_cmd_chain *chain, + enum hifc_node_id dest, void *cmd, u16 size) +{ + /* Verify the chain type */ + return api_cmd(chain, dest, cmd, size, NULL, 0); +} + +int hifc_api_cmd_read(struct hifc_api_cmd_chain *chain, + enum hifc_node_id dest, + void *cmd, u16 size, void *ack, u16 ack_size) +{ + return api_cmd(chain, dest, cmd, size, ack, ack_size); +} + +/** + * api_cmd_hw_restart - restart the chain in the HW + * @chain: the API CMD specific chain to restart + **/ +static int api_cmd_hw_restart(struct hifc_api_cmd_chain *cmd_chain) +{ + struct hifc_hwif *hwif = cmd_chain->hwdev->hwif; + u32 reg_addr, val; + int err; + u32 cnt = 0; + + /* Read Modify Write */ + reg_addr = HIFC_CSR_API_CMD_CHAIN_REQ_ADDR(cmd_chain->chain_type); + val = hifc_hwif_read_reg(hwif, reg_addr); + + val = HIFC_API_CMD_CHAIN_REQ_CLEAR(val, RESTART); + val |= HIFC_API_CMD_CHAIN_REQ_SET(1, RESTART); + + hifc_hwif_write_reg(hwif, reg_addr, val); + + err = -ETIMEDOUT; + while (cnt < API_CMD_TIMEOUT) { + val = hifc_hwif_read_reg(hwif, reg_addr); + + if (!HIFC_API_CMD_CHAIN_REQ_GET(val, RESTART)) { + err = 0; + break; + } + + usleep_range(900, 1000); + cnt++; + } + + return err; +} + +/** + * api_cmd_ctrl_init - set the control register of a chain + * @chain: the API CMD specific chain to set control register for + **/ +static void api_cmd_ctrl_init(struct hifc_api_cmd_chain *chain) +{ + struct hifc_hwif *hwif = chain->hwdev->hwif; + u32 reg_addr, ctrl; + u32 size; + + /* Read Modify Write */ + reg_addr = HIFC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type); + + size = (u32)ilog2(chain->cell_size >> API_CMD_CHAIN_CELL_SIZE_SHIFT); + + ctrl = hifc_hwif_read_reg(hwif, reg_addr); + + ctrl = HIFC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) & + HIFC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE); + + ctrl |= HIFC_API_CMD_CHAIN_CTRL_SET(0, AEQE_EN) | + HIFC_API_CMD_CHAIN_CTRL_SET(size, CELL_SIZE); + + hifc_hwif_write_reg(hwif, reg_addr, ctrl); +} + +/** + * api_cmd_set_status_addr - set the status address of a chain in the HW + * @chain: the API CMD specific chain to set status address for + **/ +static void api_cmd_set_status_addr(struct hifc_api_cmd_chain *chain) +{ + struct hifc_hwif *hwif = chain->hwdev->hwif; + u32 addr, val; + + addr = HIFC_CSR_API_CMD_STATUS_HI_ADDR(chain->chain_type); + val = upper_32_bits(chain->wb_status_paddr); + hifc_hwif_write_reg(hwif, addr, val); + + addr = HIFC_CSR_API_CMD_STATUS_LO_ADDR(chain->chain_type); + val = lower_32_bits(chain->wb_status_paddr); + hifc_hwif_write_reg(hwif, addr, val); +} + +/** + * api_cmd_set_num_cells - set the number cells of a chain in the HW + * @chain: the API CMD specific chain to set the number of cells for + **/ +static void api_cmd_set_num_cells(struct hifc_api_cmd_chain *chain) +{ + struct hifc_hwif *hwif = chain->hwdev->hwif; + u32 addr, val; + + addr = HIFC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(chain->chain_type); + val = chain->num_cells; + hifc_hwif_write_reg(hwif, addr, val); +} + +/** + * api_cmd_head_init - set the head cell of a chain in the HW + * @chain: the API CMD specific chain to set the head for + **/ +static void api_cmd_head_init(struct hifc_api_cmd_chain *chain) +{ + struct hifc_hwif *hwif = chain->hwdev->hwif; + u32 addr, val; + + addr = HIFC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(chain->chain_type); + val = upper_32_bits(chain->head_cell_paddr); + hifc_hwif_write_reg(hwif, addr, val); + + addr = HIFC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(chain->chain_type); + val = lower_32_bits(chain->head_cell_paddr); + hifc_hwif_write_reg(hwif, addr, val); +} + +/** + * wait_for_ready_chain - wait for the chain to be ready + * @chain: the API CMD specific chain to wait for + * Return: 0 - success, negative - failure + **/ +static int wait_for_ready_chain(struct hifc_api_cmd_chain *chain) +{ + struct hifc_hwif *hwif = chain->hwdev->hwif; + u32 addr, val; + u32 hw_cons_idx; + u32 cnt = 0; + int err; + + addr = HIFC_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type); + err = -ETIMEDOUT; + while (cnt < API_CMD_TIMEOUT) { + val = hifc_hwif_read_reg(hwif, addr); + hw_cons_idx = HIFC_API_CMD_STATUS_GET(val, CONS_IDX); + + /* wait for HW cons idx to be updated */ + if (hw_cons_idx == chain->cons_idx) { + err = 0; + break; + } + + usleep_range(900, 1000); + cnt++; + } + + return err; +} + +/** + * api_cmd_chain_hw_clean - clean the HW + * @chain: the API CMD specific chain + **/ +static void api_cmd_chain_hw_clean(struct hifc_api_cmd_chain *chain) +{ + struct hifc_hwif *hwif = chain->hwdev->hwif; + u32 addr, ctrl; + + addr = HIFC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type); + + ctrl = hifc_hwif_read_reg(hwif, addr); + ctrl = HIFC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, RESTART_EN) & + HIFC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_ERR) & + HIFC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) & + HIFC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_CHK_EN) & + HIFC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE); + + hifc_hwif_write_reg(hwif, addr, ctrl); +} + +/** + * api_cmd_chain_hw_init - initialize the chain in the HW + * @chain: the API CMD specific chain to initialize in HW + * Return: 0 - success, negative - failure + **/ +static int api_cmd_chain_hw_init(struct hifc_api_cmd_chain *chain) +{ + api_cmd_chain_hw_clean(chain); + + api_cmd_set_status_addr(chain); + + if (api_cmd_hw_restart(chain)) { + sdk_err(chain->hwdev->dev_hdl, "Failed to restart api_cmd_hw\n"); + return -EBUSY; + } + + api_cmd_ctrl_init(chain); + api_cmd_set_num_cells(chain); + api_cmd_head_init(chain); + + return wait_for_ready_chain(chain); +} + +/** + * alloc_cmd_buf - allocate a dma buffer for API CMD command + * @chain: the API CMD specific chain for the cmd + * @cell: the cell in the HW for the cmd + * @cell_idx: the index of the cell + * Return: 0 - success, negative - failure + **/ +static int alloc_cmd_buf(struct hifc_api_cmd_chain *chain, + struct hifc_api_cmd_cell *cell, u32 cell_idx) +{ + struct hifc_api_cmd_cell_ctxt *cell_ctxt; + void *dev = chain->hwdev->dev_hdl; + void *buf_vaddr; + u64 buf_paddr; + int err = 0; + + buf_vaddr = (u8 *)((u64)chain->buf_vaddr_base + + chain->buf_size_align * cell_idx); + buf_paddr = chain->buf_paddr_base + + chain->buf_size_align * cell_idx; + + cell_ctxt = &chain->cell_ctxt[cell_idx]; + + cell_ctxt->api_cmd_vaddr = buf_vaddr; + + /* set the cmd DMA address in the cell */ + switch (chain->chain_type) { + case HIFC_API_CMD_POLL_READ: + cell->read.hw_cmd_paddr = cpu_to_be64(buf_paddr); + break; + case HIFC_API_CMD_WRITE_TO_MGMT_CPU: + case HIFC_API_CMD_POLL_WRITE: + case HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + /* The data in the HW should be in Big Endian Format */ + cell->write.hw_cmd_paddr = cpu_to_be64(buf_paddr); + break; + default: + sdk_err(dev, "Unknown API CMD Chain type: %d\n", + chain->chain_type); + err = -EINVAL; + break; + } + + return err; +} + +static void alloc_resp_buf(struct hifc_api_cmd_chain *chain, + struct hifc_api_cmd_cell *cell, u32 cell_idx) +{ + struct hifc_api_cmd_cell_ctxt *cell_ctxt; + void *resp_vaddr; + u64 resp_paddr; + + resp_vaddr = (u8 *)((u64)chain->rsp_vaddr_base + + chain->rsp_size_align * cell_idx); + resp_paddr = chain->rsp_paddr_base + + chain->rsp_size_align * cell_idx; + + cell_ctxt = &chain->cell_ctxt[cell_idx]; + + cell_ctxt->resp = resp_vaddr; + cell->read.hw_wb_resp_paddr = cpu_to_be64(resp_paddr); +} + +static int hifc_alloc_api_cmd_cell_buf(struct hifc_api_cmd_chain *chain, + u32 cell_idx, + struct hifc_api_cmd_cell *node) +{ + void *dev = chain->hwdev->dev_hdl; + int err; + + /* For read chain, we should allocate buffer for the response data */ + if (chain->chain_type == HIFC_API_CMD_MULTI_READ || + chain->chain_type == HIFC_API_CMD_POLL_READ) + alloc_resp_buf(chain, node, cell_idx); + + switch (chain->chain_type) { + case HIFC_API_CMD_WRITE_TO_MGMT_CPU: + case HIFC_API_CMD_POLL_WRITE: + case HIFC_API_CMD_POLL_READ: + case HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + err = alloc_cmd_buf(chain, node, cell_idx); + if (err) { + sdk_err(dev, "Failed to allocate cmd buffer\n"); + goto alloc_cmd_buf_err; + } + break; + /* For api command write and api command read, the data section + * is directly inserted in the cell, so no need to allocate. + */ + case HIFC_API_CMD_MULTI_READ: + chain->cell_ctxt[cell_idx].api_cmd_vaddr = + &node->read.hw_cmd_paddr; + break; + default: + sdk_err(dev, "Unsupported API CMD chain type\n"); + err = -EINVAL; + goto alloc_cmd_buf_err; + } + + return 0; + +alloc_cmd_buf_err: + + return err; +} + +/** + * api_cmd_create_cell - create API CMD cell of specific chain + * @chain: the API CMD specific chain to create its cell + * @cell_idx: the cell index to create + * @pre_node: previous cell + * @node_vaddr: the virt addr of the cell + * Return: 0 - success, negative - failure + **/ +static int api_cmd_create_cell(struct hifc_api_cmd_chain *chain, u32 cell_idx, + struct hifc_api_cmd_cell *pre_node, + struct hifc_api_cmd_cell **node_vaddr) +{ + struct hifc_api_cmd_cell_ctxt *cell_ctxt; + struct hifc_api_cmd_cell *node; + void *cell_vaddr; + u64 cell_paddr; + int err; + + cell_vaddr = (void *)((u64)chain->cell_vaddr_base + + chain->cell_size_align * cell_idx); + cell_paddr = chain->cell_paddr_base + + chain->cell_size_align * cell_idx; + + cell_ctxt = &chain->cell_ctxt[cell_idx]; + cell_ctxt->cell_vaddr = cell_vaddr; + node = cell_ctxt->cell_vaddr; + + if (!pre_node) { + chain->head_node = cell_vaddr; + chain->head_cell_paddr = cell_paddr; + } else { + /* The data in the HW should be in Big Endian Format */ + pre_node->next_cell_paddr = cpu_to_be64(cell_paddr); + } + + /* Driver software should make sure that there is an empty API + * command cell at the end the chain + */ + node->next_cell_paddr = 0; + + err = hifc_alloc_api_cmd_cell_buf(chain, cell_idx, node); + if (err) + return err; + + *node_vaddr = node; + + return 0; +} + +/** + * api_cmd_create_cells - create API CMD cells for specific chain + * @chain: the API CMD specific chain + * Return: 0 - success, negative - failure + **/ +static int api_cmd_create_cells(struct hifc_api_cmd_chain *chain) +{ + struct hifc_api_cmd_cell *node = NULL, *pre_node = NULL; + void *dev = chain->hwdev->dev_hdl; + u32 cell_idx; + int err; + + for (cell_idx = 0; cell_idx < chain->num_cells; cell_idx++) { + err = api_cmd_create_cell(chain, cell_idx, pre_node, &node); + if (err) { + sdk_err(dev, "Failed to create API CMD cell\n"); + return err; + } + + pre_node = node; + } + + if (!node) + return -EFAULT; + + /* set the Final node to point on the start */ + node->next_cell_paddr = cpu_to_be64(chain->head_cell_paddr); + + /* set the current node to be the head */ + chain->curr_node = chain->head_node; + return 0; +} + +/** + * api_chain_init - initialize API CMD specific chain + * @chain: the API CMD specific chain to initialize + * @attr: attributes to set in the chain + * Return: 0 - success, negative - failure + **/ +static int api_chain_init(struct hifc_api_cmd_chain *chain, + struct hifc_api_cmd_chain_attr *attr) +{ + void *dev = chain->hwdev->dev_hdl; + size_t cell_ctxt_size; + size_t cells_buf_size; + int err; + + chain->chain_type = attr->chain_type; + chain->num_cells = attr->num_cells; + chain->cell_size = attr->cell_size; + chain->rsp_size = attr->rsp_size; + + chain->prod_idx = 0; + chain->cons_idx = 0; + + if (chain->chain_type == HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) + spin_lock_init(&chain->async_lock); + else + sema_init(&chain->sem, 1); + + cell_ctxt_size = chain->num_cells * sizeof(*chain->cell_ctxt); + if (!cell_ctxt_size) { + sdk_err(dev, "Api chain cell size cannot be zero\n"); + err = -EINVAL; + goto alloc_cell_ctxt_err; + } + + chain->cell_ctxt = kzalloc(cell_ctxt_size, GFP_KERNEL); + if (!chain->cell_ctxt) { + sdk_err(dev, "Failed to allocate cell contexts for a chain\n"); + err = -ENOMEM; + goto alloc_cell_ctxt_err; + } + + chain->wb_status = dma_zalloc_coherent(dev, + sizeof(*chain->wb_status), + &chain->wb_status_paddr, + GFP_KERNEL); + if (!chain->wb_status) { + sdk_err(dev, "Failed to allocate DMA wb status\n"); + err = -ENOMEM; + goto alloc_wb_status_err; + } + + chain->cell_size_align = ALIGN((u64)chain->cell_size, + API_CMD_NODE_ALIGN_SIZE); + chain->rsp_size_align = ALIGN((u64)chain->rsp_size, + API_CHAIN_RESP_ALIGNMENT); + chain->buf_size_align = ALIGN(API_CMD_BUF_SIZE, API_PAYLOAD_ALIGN_SIZE); + + cells_buf_size = (chain->cell_size_align + chain->rsp_size_align + + chain->buf_size_align) * chain->num_cells; + + err = hifc_dma_zalloc_coherent_align(dev, cells_buf_size, + API_CMD_NODE_ALIGN_SIZE, + GFP_KERNEL, + &chain->cells_addr); + if (err) { + sdk_err(dev, "Failed to allocate API CMD cells buffer\n"); + goto alloc_cells_buf_err; + } + + chain->cell_vaddr_base = chain->cells_addr.align_vaddr; + chain->cell_paddr_base = chain->cells_addr.align_paddr; + + chain->rsp_vaddr_base = (u8 *)((u64)chain->cell_vaddr_base + + chain->cell_size_align * chain->num_cells); + chain->rsp_paddr_base = chain->cell_paddr_base + + chain->cell_size_align * chain->num_cells; + + chain->buf_vaddr_base = (u8 *)((u64)chain->rsp_vaddr_base + + chain->rsp_size_align * chain->num_cells); + chain->buf_paddr_base = chain->rsp_paddr_base + + chain->rsp_size_align * chain->num_cells; + + return 0; + +alloc_cells_buf_err: + dma_free_coherent(dev, sizeof(*chain->wb_status), + chain->wb_status, chain->wb_status_paddr); + +alloc_wb_status_err: + kfree(chain->cell_ctxt); + +alloc_cell_ctxt_err: + return err; +} + +/** + * api_chain_free - free API CMD specific chain + * @chain: the API CMD specific chain to free + **/ +static void api_chain_free(struct hifc_api_cmd_chain *chain) +{ + void *dev = chain->hwdev->dev_hdl; + + hifc_dma_free_coherent_align(dev, &chain->cells_addr); + + dma_free_coherent(dev, sizeof(*chain->wb_status), + chain->wb_status, chain->wb_status_paddr); + kfree(chain->cell_ctxt); +} + +/** + * api_cmd_create_chain - create API CMD specific chain + * @chain: the API CMD specific chain to create + * @attr: attributes to set in the chain + * Return: 0 - success, negative - failure + **/ +static int api_cmd_create_chain(struct hifc_api_cmd_chain **cmd_chain, + struct hifc_api_cmd_chain_attr *attr) +{ + struct hifc_hwdev *hwdev = attr->hwdev; + struct hifc_api_cmd_chain *chain; + int err; + + if (attr->num_cells & (attr->num_cells - 1)) { + sdk_err(hwdev->dev_hdl, "Invalid number of cells, must be power of 2\n"); + return -EINVAL; + } + + chain = kzalloc(sizeof(*chain), GFP_KERNEL); + if (!chain) + return -ENOMEM; + + chain->hwdev = hwdev; + + err = api_chain_init(chain, attr); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to initialize chain\n"); + goto chain_init_err; + } + + err = api_cmd_create_cells(chain); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to create cells for API CMD chain\n"); + goto create_cells_err; + } + + err = api_cmd_chain_hw_init(chain); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to initialize chain HW\n"); + goto chain_hw_init_err; + } + + *cmd_chain = chain; + return 0; + +chain_hw_init_err: +create_cells_err: + api_chain_free(chain); + +chain_init_err: + kfree(chain); + return err; +} + +/** + * api_cmd_destroy_chain - destroy API CMD specific chain + * @chain: the API CMD specific chain to destroy + **/ +static void api_cmd_destroy_chain(struct hifc_api_cmd_chain *chain) +{ + api_chain_free(chain); + kfree(chain); +} + +/** + * hifc_api_cmd_init - Initialize all the API CMD chains + * @hwdev: the pointer to hw device + * @chain: the API CMD chains that will be initialized + * Return: 0 - success, negative - failure + **/ +int hifc_api_cmd_init(struct hifc_hwdev *hwdev, + struct hifc_api_cmd_chain **chain) +{ + void *dev = hwdev->dev_hdl; + struct hifc_api_cmd_chain_attr attr; + enum hifc_api_cmd_chain_type chain_type, i; + int err; + + attr.hwdev = hwdev; + attr.num_cells = API_CHAIN_NUM_CELLS; + attr.cell_size = API_CHAIN_CELL_SIZE; + attr.rsp_size = API_CHAIN_RSP_DATA_SIZE; + + chain_type = HIFC_API_CMD_WRITE_TO_MGMT_CPU; + for (; chain_type < HIFC_API_CMD_MAX; chain_type++) { + attr.chain_type = chain_type; + + err = api_cmd_create_chain(&chain[chain_type], &attr); + if (err) { + sdk_err(dev, "Failed to create chain %d\n", chain_type); + goto create_chain_err; + } + } + + return 0; + +create_chain_err: + i = HIFC_API_CMD_WRITE_TO_MGMT_CPU; + for (; i < chain_type; i++) + api_cmd_destroy_chain(chain[i]); + + return err; +} + +/** + * hifc_api_cmd_free - free the API CMD chains + * @chain: the API CMD chains that will be freed + **/ +void hifc_api_cmd_free(struct hifc_api_cmd_chain **chain) +{ + enum hifc_api_cmd_chain_type chain_type; + + chain_type = HIFC_API_CMD_WRITE_TO_MGMT_CPU; + + for (; chain_type < HIFC_API_CMD_MAX; chain_type++) + api_cmd_destroy_chain(chain[chain_type]); +} + diff --git a/drivers/scsi/huawei/hifc/hifc_api_cmd.h b/drivers/scsi/huawei/hifc/hifc_api_cmd.h new file mode 100644 index 0000000000000000000000000000000000000000..bd14db34a11983779f5dc5389919b8af48c9a513 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_api_cmd.h @@ -0,0 +1,268 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef HIFC_API_CMD_H_ +#define HIFC_API_CMD_H_ + +#define HIFC_API_CMD_CELL_CTRL_CELL_LEN_SHIFT 0 +#define HIFC_API_CMD_CELL_CTRL_RD_DMA_ATTR_OFF_SHIFT 16 +#define HIFC_API_CMD_CELL_CTRL_WR_DMA_ATTR_OFF_SHIFT 24 +#define HIFC_API_CMD_CELL_CTRL_XOR_CHKSUM_SHIFT 56 + +#define HIFC_API_CMD_CELL_CTRL_CELL_LEN_MASK 0x3FU +#define HIFC_API_CMD_CELL_CTRL_RD_DMA_ATTR_OFF_MASK 0x3FU +#define HIFC_API_CMD_CELL_CTRL_WR_DMA_ATTR_OFF_MASK 0x3FU +#define HIFC_API_CMD_CELL_CTRL_XOR_CHKSUM_MASK 0xFFU + +#define HIFC_API_CMD_CELL_CTRL_SET(val, member) \ + ((((u64)val) & HIFC_API_CMD_CELL_CTRL_##member##_MASK) << \ + HIFC_API_CMD_CELL_CTRL_##member##_SHIFT) + +#define HIFC_API_CMD_DESC_API_TYPE_SHIFT 0 +#define HIFC_API_CMD_DESC_RD_WR_SHIFT 1 +#define HIFC_API_CMD_DESC_MGMT_BYPASS_SHIFT 2 +#define HIFC_API_CMD_DESC_RESP_AEQE_EN_SHIFT 3 +#define HIFC_API_CMD_DESC_PRIV_DATA_SHIFT 8 +#define HIFC_API_CMD_DESC_DEST_SHIFT 32 +#define HIFC_API_CMD_DESC_SIZE_SHIFT 40 +#define HIFC_API_CMD_DESC_XOR_CHKSUM_SHIFT 56 + +#define HIFC_API_CMD_DESC_API_TYPE_MASK 0x1U +#define HIFC_API_CMD_DESC_RD_WR_MASK 0x1U +#define HIFC_API_CMD_DESC_MGMT_BYPASS_MASK 0x1U +#define HIFC_API_CMD_DESC_RESP_AEQE_EN_MASK 0x1U +#define HIFC_API_CMD_DESC_DEST_MASK 0x1FU +#define HIFC_API_CMD_DESC_SIZE_MASK 0x7FFU +#define HIFC_API_CMD_DESC_XOR_CHKSUM_MASK 0xFFU +#define HIFC_API_CMD_DESC_PRIV_DATA_MASK 0xFFFFFFU + +#define HIFC_API_CMD_DESC_SET(val, member) \ + ((((u64)val) & HIFC_API_CMD_DESC_##member##_MASK) << \ + HIFC_API_CMD_DESC_##member##_SHIFT) +#define HIFC_API_CMD_STATUS_HEADER_VALID_SHIFT 0 +#define HIFC_API_CMD_STATUS_HEADER_CHAIN_ID_SHIFT 16 + +#define HIFC_API_CMD_STATUS_HEADER_VALID_MASK 0xFFU +#define HIFC_API_CMD_STATUS_HEADER_CHAIN_ID_MASK 0xFFU +#define HIFC_API_CMD_STATUS_HEADER_GET(val, member) \ + (((val) >> HIFC_API_CMD_STATUS_HEADER_##member##_SHIFT) & \ + HIFC_API_CMD_STATUS_HEADER_##member##_MASK) +#define HIFC_API_CMD_CHAIN_REQ_RESTART_SHIFT 1 +#define HIFC_API_CMD_CHAIN_REQ_RESTART_MASK 0x1U +#define HIFC_API_CMD_CHAIN_REQ_WB_TRIGGER_MASK 0x1U +#define HIFC_API_CMD_CHAIN_REQ_SET(val, member) \ + (((val) & HIFC_API_CMD_CHAIN_REQ_##member##_MASK) << \ + HIFC_API_CMD_CHAIN_REQ_##member##_SHIFT) + +#define HIFC_API_CMD_CHAIN_REQ_GET(val, member) \ + (((val) >> HIFC_API_CMD_CHAIN_REQ_##member##_SHIFT) & \ + HIFC_API_CMD_CHAIN_REQ_##member##_MASK) + +#define HIFC_API_CMD_CHAIN_REQ_CLEAR(val, member) \ + ((val) & (~(HIFC_API_CMD_CHAIN_REQ_##member##_MASK \ + << HIFC_API_CMD_CHAIN_REQ_##member##_SHIFT))) + +#define HIFC_API_CMD_CHAIN_CTRL_RESTART_EN_SHIFT 1 +#define HIFC_API_CMD_CHAIN_CTRL_XOR_ERR_SHIFT 2 +#define HIFC_API_CMD_CHAIN_CTRL_AEQE_EN_SHIFT 4 +#define HIFC_API_CMD_CHAIN_CTRL_AEQ_ID_SHIFT 8 +#define HIFC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_SHIFT 28 +#define HIFC_API_CMD_CHAIN_CTRL_CELL_SIZE_SHIFT 30 + +#define HIFC_API_CMD_CHAIN_CTRL_RESTART_EN_MASK 0x1U +#define HIFC_API_CMD_CHAIN_CTRL_XOR_ERR_MASK 0x1U +#define HIFC_API_CMD_CHAIN_CTRL_AEQE_EN_MASK 0x1U +#define HIFC_API_CMD_CHAIN_CTRL_AEQ_ID_MASK 0x3U +#define HIFC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_MASK 0x3U +#define HIFC_API_CMD_CHAIN_CTRL_CELL_SIZE_MASK 0x3U + +#define HIFC_API_CMD_CHAIN_CTRL_SET(val, member) \ + (((val) & HIFC_API_CMD_CHAIN_CTRL_##member##_MASK) << \ + HIFC_API_CMD_CHAIN_CTRL_##member##_SHIFT) + +#define HIFC_API_CMD_CHAIN_CTRL_CLEAR(val, member) \ + ((val) & (~(HIFC_API_CMD_CHAIN_CTRL_##member##_MASK \ + << HIFC_API_CMD_CHAIN_CTRL_##member##_SHIFT))) + +#define HIFC_API_CMD_RESP_HEAD_VALID_MASK 0xFF +#define HIFC_API_CMD_RESP_HEAD_VALID_CODE 0xFF + +#define HIFC_API_CMD_RESP_HEADER_VALID(val) \ + (((val) & HIFC_API_CMD_RESP_HEAD_VALID_MASK) == \ + HIFC_API_CMD_RESP_HEAD_VALID_CODE) +#define HIFC_API_CMD_STATUS_CONS_IDX_MASK 0xFFFFFFU +#define HIFC_API_CMD_STATUS_CONS_IDX_SHIFT 0 +#define HIFC_API_CMD_STATUS_FSM_MASK 0xFU +#define HIFC_API_CMD_STATUS_FSM_SHIFT 24 +#define HIFC_API_CMD_STATUS_CHKSUM_ERR_MASK 0x3U +#define HIFC_API_CMD_STATUS_CHKSUM_ERR_SHIFT 28 +#define HIFC_API_CMD_STATUS_CPLD_ERR_MASK 0x1U +#define HIFC_API_CMD_STATUS_CPLD_ERR_SHIFT 30 + +#define HIFC_API_CMD_STATUS_GET(val, member) \ + (((val) >> HIFC_API_CMD_STATUS_##member##_SHIFT) & \ + HIFC_API_CMD_STATUS_##member##_MASK) + +/* API CMD registers */ +#define HIFC_CSR_API_CMD_BASE 0xF000 + +#define HIFC_CSR_API_CMD_STRIDE 0x100 + +#define HIFC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(idx) \ + (HIFC_CSR_API_CMD_BASE + 0x0 + (idx) * HIFC_CSR_API_CMD_STRIDE) + +#define HIFC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(idx) \ + (HIFC_CSR_API_CMD_BASE + 0x4 + (idx) * HIFC_CSR_API_CMD_STRIDE) + +#define HIFC_CSR_API_CMD_STATUS_HI_ADDR(idx) \ + (HIFC_CSR_API_CMD_BASE + 0x8 + (idx) * HIFC_CSR_API_CMD_STRIDE) + +#define HIFC_CSR_API_CMD_STATUS_LO_ADDR(idx) \ + (HIFC_CSR_API_CMD_BASE + 0xC + (idx) * HIFC_CSR_API_CMD_STRIDE) + +#define HIFC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(idx) \ + (HIFC_CSR_API_CMD_BASE + 0x10 + (idx) * HIFC_CSR_API_CMD_STRIDE) + +#define HIFC_CSR_API_CMD_CHAIN_CTRL_ADDR(idx) \ + (HIFC_CSR_API_CMD_BASE + 0x14 + (idx) * HIFC_CSR_API_CMD_STRIDE) + +#define HIFC_CSR_API_CMD_CHAIN_PI_ADDR(idx) \ + (HIFC_CSR_API_CMD_BASE + 0x1C + (idx) * HIFC_CSR_API_CMD_STRIDE) + +#define HIFC_CSR_API_CMD_CHAIN_REQ_ADDR(idx) \ + (HIFC_CSR_API_CMD_BASE + 0x20 + (idx) * HIFC_CSR_API_CMD_STRIDE) + +#define HIFC_CSR_API_CMD_STATUS_0_ADDR(idx) \ + (HIFC_CSR_API_CMD_BASE + 0x30 + (idx) * HIFC_CSR_API_CMD_STRIDE) + +enum hifc_api_cmd_chain_type { + /* write command with completion notification */ + HIFC_API_CMD_WRITE = 0, + /* read command with completion notification */ + HIFC_API_CMD_READ = 1, + /* write to mgmt cpu command with completion */ + HIFC_API_CMD_WRITE_TO_MGMT_CPU = 2, + /* multi read command with completion notification - not used */ + HIFC_API_CMD_MULTI_READ = 3, + /* write command without completion notification */ + HIFC_API_CMD_POLL_WRITE = 4, + /* read command without completion notification */ + HIFC_API_CMD_POLL_READ = 5, + /* read from mgmt cpu command with completion */ + HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU = 6, + HIFC_API_CMD_MAX, +}; + +struct hifc_api_cmd_status { + u64 header; + u32 buf_desc; + u32 cell_addr_hi; + u32 cell_addr_lo; + u32 rsvd0; + u64 rsvd1; +}; + +/* HW struct */ +struct hifc_api_cmd_cell { + u64 ctrl; + + /* address is 64 bit in HW struct */ + u64 next_cell_paddr; + + u64 desc; + + /* HW struct */ + union { + struct { + u64 hw_cmd_paddr; + } write; + + struct { + u64 hw_wb_resp_paddr; + u64 hw_cmd_paddr; + } read; + }; +}; + +struct hifc_api_cmd_resp_fmt { + u64 header; + u64 rsvd[3]; + u64 resp_data; +}; + +struct hifc_api_cmd_cell_ctxt { + struct hifc_api_cmd_cell *cell_vaddr; + + void *api_cmd_vaddr; + + struct hifc_api_cmd_resp_fmt *resp; + + struct completion done; + int status; + + u32 saved_prod_idx; +}; + +struct hifc_api_cmd_chain_attr { + struct hifc_hwdev *hwdev; + enum hifc_api_cmd_chain_type chain_type; + + u32 num_cells; + u16 rsp_size; + u16 cell_size; +}; + +struct hifc_api_cmd_chain { + struct hifc_hwdev *hwdev; + enum hifc_api_cmd_chain_type chain_type; + + u32 num_cells; + u16 cell_size; + u16 rsp_size; + + /* HW members is 24 bit format */ + u32 prod_idx; + u32 cons_idx; + + struct semaphore sem; + /* Async cmd can not be scheduling */ + spinlock_t async_lock; + + dma_addr_t wb_status_paddr; + struct hifc_api_cmd_status *wb_status; + + dma_addr_t head_cell_paddr; + struct hifc_api_cmd_cell *head_node; + + struct hifc_api_cmd_cell_ctxt *cell_ctxt; + struct hifc_api_cmd_cell *curr_node; + + struct hifc_dma_addr_align cells_addr; + + u8 *cell_vaddr_base; + u64 cell_paddr_base; + u8 *rsp_vaddr_base; + u64 rsp_paddr_base; + u8 *buf_vaddr_base; + u64 buf_paddr_base; + u64 cell_size_align; + u64 rsp_size_align; + u64 buf_size_align; +}; + +int hifc_api_cmd_write(struct hifc_api_cmd_chain *chain, + enum hifc_node_id dest, void *cmd, u16 size); + +int hifc_api_cmd_read(struct hifc_api_cmd_chain *chain, + enum hifc_node_id dest, void *cmd, u16 size, + void *ack, u16 ack_size); + +int hifc_api_cmd_init(struct hifc_hwdev *hwdev, + struct hifc_api_cmd_chain **chain); + +void hifc_api_cmd_free(struct hifc_api_cmd_chain **chain); + +#endif diff --git a/drivers/scsi/huawei/hifc/hifc_cfg.c b/drivers/scsi/huawei/hifc/hifc_cfg.c new file mode 100644 index 0000000000000000000000000000000000000000..794b887fafcf99ed446896e2173c17b5af359f78 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_cfg.c @@ -0,0 +1,823 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hifc_knl_adp.h" +#include "hifc_hw.h" +#include "hifc_hwdev.h" +#include "hifc_hwif.h" +#include "hifc_cqm_main.h" +#include "hifc_api_cmd.h" +#include "hifc_hw.h" +#include "hifc_mgmt.h" +#include "hifc_cfg.h" + +static uint intr_mode; + +int hifc_sync_time(void *hwdev, u64 time) +{ + struct hifc_sync_time_info time_info = {0}; + u16 out_size = sizeof(time_info); + int err; + + time_info.mstime = time; + err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_SYNC_TIME, &time_info, + sizeof(time_info), &time_info, &out_size, + 0); + if (err || time_info.status || !out_size) { + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Failed to sync time to mgmt, err: %d, status: 0x%x, out size: 0x%x\n", + err, time_info.status, out_size); + } + + return err; +} + +static void parse_pub_res_cap(struct service_cap *cap, + struct hifc_dev_cap *dev_cap, + enum func_type type) +{ + cap->port_id = dev_cap->port_id; + cap->force_up = dev_cap->force_up; + + pr_info("Get public resource capbility, force_up: 0x%x\n", + cap->force_up); + /* FC need max queue number, but max queue number info is in + * l2nic cap, we also put max queue num info in public cap, so + * FC can get correct max queue number info. + */ + cap->max_sqs = dev_cap->nic_max_sq + 1; + cap->max_rqs = dev_cap->nic_max_rq + 1; + + cap->host_total_function = dev_cap->host_total_func; + cap->host_oq_id_mask_val = dev_cap->host_oq_id_mask_val; + cap->max_connect_num = dev_cap->max_conn_num; + cap->max_stick2cache_num = dev_cap->max_stick2cache_num; + + pr_info("Get public resource capbility, svc_cap_en: 0x%x\n", + dev_cap->svc_cap_en); + pr_info("port_id=0x%x\n", cap->port_id); + pr_info("Host_total_function=0x%x, host_oq_id_mask_val=0x%x\n", + cap->host_total_function, cap->host_oq_id_mask_val); +} + +static void parse_fc_res_cap(struct service_cap *cap, + struct hifc_dev_cap *dev_cap, + enum func_type type) +{ + struct dev_fc_svc_cap *fc_cap = &cap->fc_cap.dev_fc_cap; + + fc_cap->max_parent_qpc_num = dev_cap->fc_max_pctx; + fc_cap->scq_num = dev_cap->fc_max_scq; + fc_cap->srq_num = dev_cap->fc_max_srq; + fc_cap->max_child_qpc_num = dev_cap->fc_max_cctx; + fc_cap->vp_id_start = dev_cap->fc_vp_id_start; + fc_cap->vp_id_end = dev_cap->fc_vp_id_end; + + pr_info("Get fc resource capbility\n"); + pr_info("Max_parent_qpc_num=0x%x, scq_num=0x%x, srq_num=0x%x, max_child_qpc_num=0x%x\n", + fc_cap->max_parent_qpc_num, fc_cap->scq_num, fc_cap->srq_num, + fc_cap->max_child_qpc_num); + pr_info("Vp_id_start=0x%x, vp_id_end=0x%x\n", + fc_cap->vp_id_start, fc_cap->vp_id_end); +} + +static void parse_dev_cap(struct hifc_hwdev *dev, + struct hifc_dev_cap *dev_cap, enum func_type type) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + + /* Public resource */ + parse_pub_res_cap(cap, dev_cap, type); + + /* PPF managed dynamic resource */ + + parse_fc_res_cap(cap, dev_cap, type); +} + +static int get_cap_from_fw(struct hifc_hwdev *dev, enum func_type type) +{ + struct hifc_dev_cap dev_cap = {0}; + u16 out_len = sizeof(dev_cap); + int err; + + dev_cap.version = HIFC_CMD_VER_FUNC_ID; + err = hifc_global_func_id_get(dev, &dev_cap.func_id); + if (err) + return err; + + sdk_info(dev->dev_hdl, "Get cap from fw, func_idx: %d\n", + dev_cap.func_id); + + err = hifc_msg_to_mgmt_sync(dev, HIFC_MOD_CFGM, HIFC_CFG_NIC_CAP, + &dev_cap, sizeof(dev_cap), + &dev_cap, &out_len, 0); + if (err || dev_cap.status || !out_len) { + sdk_err(dev->dev_hdl, + "Failed to get capability from FW, err: %d, status: 0x%x, out size: 0x%x\n", + err, dev_cap.status, out_len); + return -EFAULT; + } + + parse_dev_cap(dev, &dev_cap, type); + return 0; +} + +static void fc_param_fix(struct hifc_hwdev *dev) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct fc_service_cap *fc_cap = &cap->fc_cap; + + fc_cap->parent_qpc_size = FC_PCTX_SZ; + fc_cap->child_qpc_size = FC_CCTX_SZ; + fc_cap->sqe_size = FC_SQE_SZ; + + fc_cap->scqc_size = FC_SCQC_SZ; + fc_cap->scqe_size = FC_SCQE_SZ; + + fc_cap->srqc_size = FC_SRQC_SZ; + fc_cap->srqe_size = FC_SRQE_SZ; +} + +static void cfg_get_eq_num(struct hifc_hwdev *dev) +{ + struct cfg_eq_info *eq_info = &dev->cfg_mgmt->eq_info; + + eq_info->num_ceq = dev->hwif->attr.num_ceqs; + eq_info->num_ceq_remain = eq_info->num_ceq; +} + +static int cfg_init_eq(struct hifc_hwdev *dev) +{ + struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; + struct cfg_eq *eq; + u8 num_ceq, i = 0; + + cfg_get_eq_num(dev); + num_ceq = cfg_mgmt->eq_info.num_ceq; + + sdk_info(dev->dev_hdl, "Cfg mgmt: ceqs=0x%x, remain=0x%x\n", + cfg_mgmt->eq_info.num_ceq, cfg_mgmt->eq_info.num_ceq_remain); + + if (!num_ceq) { + sdk_err(dev->dev_hdl, "Ceq num cfg in fw is zero\n"); + return -EFAULT; + } + eq = kcalloc(num_ceq, sizeof(*eq), GFP_KERNEL); + if (!eq) + return -ENOMEM; + + for (i = 0; i < num_ceq; ++i) { + eq[i].eqn = i; + eq[i].free = CFG_FREE; + eq[i].type = SERVICE_T_MAX; + } + + cfg_mgmt->eq_info.eq = eq; + + mutex_init(&cfg_mgmt->eq_info.eq_mutex); + + return 0; +} + +static int cfg_init_interrupt(struct hifc_hwdev *dev) +{ + struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; + struct cfg_irq_info *irq_info = &cfg_mgmt->irq_param_info; + u16 intr_num = dev->hwif->attr.num_irqs; + + if (!intr_num) { + sdk_err(dev->dev_hdl, "Irq num cfg in fw is zero\n"); + return -EFAULT; + } + irq_info->alloc_info = kcalloc(intr_num, sizeof(*irq_info->alloc_info), + GFP_KERNEL); + if (!irq_info->alloc_info) + return -ENOMEM; + + irq_info->num_irq_hw = intr_num; + + cfg_mgmt->svc_cap.interrupt_type = intr_mode; + + mutex_init(&irq_info->irq_mutex); + + return 0; +} + +static int cfg_enable_interrupt(struct hifc_hwdev *dev) +{ + struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; + u16 nreq = cfg_mgmt->irq_param_info.num_irq_hw; + + void *pcidev = dev->pcidev_hdl; + struct irq_alloc_info_st *irq_info; + struct msix_entry *entry; + u16 i = 0; + int actual_irq; + + irq_info = cfg_mgmt->irq_param_info.alloc_info; + + sdk_info(dev->dev_hdl, "Interrupt type: %d, irq num: %d.\n", + cfg_mgmt->svc_cap.interrupt_type, nreq); + + switch (cfg_mgmt->svc_cap.interrupt_type) { + case INTR_TYPE_MSIX: + + if (!nreq) { + sdk_err(dev->dev_hdl, "Interrupt number cannot be zero\n"); + return -EINVAL; + } + entry = kcalloc(nreq, sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + for (i = 0; i < nreq; i++) + entry[i].entry = i; + + actual_irq = pci_enable_msix_range(pcidev, entry, + VECTOR_THRESHOLD, nreq); + if (actual_irq < 0) { + sdk_err(dev->dev_hdl, "Alloc msix entries with threshold 2 failed.\n"); + kfree(entry); + return -ENOMEM; + } + + nreq = (u16)actual_irq; + cfg_mgmt->irq_param_info.num_total = nreq; + cfg_mgmt->irq_param_info.num_irq_remain = nreq; + sdk_info(dev->dev_hdl, "Request %d msix vector success.\n", + nreq); + + for (i = 0; i < nreq; ++i) { + /* u16 driver uses to specify entry, OS writes */ + irq_info[i].info.msix_entry_idx = entry[i].entry; + /* u32 kernel uses to write allocated vector */ + irq_info[i].info.irq_id = entry[i].vector; + irq_info[i].type = SERVICE_T_MAX; + irq_info[i].free = CFG_FREE; + } + + kfree(entry); + + break; + + default: + sdk_err(dev->dev_hdl, "Unsupport interrupt type %d\n", + cfg_mgmt->svc_cap.interrupt_type); + break; + } + + return 0; +} + +int hifc_alloc_irqs(void *hwdev, enum hifc_service_type type, u16 num, + struct irq_info *irq_info_array, u16 *act_num) +{ + struct hifc_hwdev *dev = hwdev; + struct cfg_mgmt_info *cfg_mgmt; + struct cfg_irq_info *irq_info; + struct irq_alloc_info_st *alloc_info; + int max_num_irq; + u16 free_num_irq; + int i, j; + + if (!hwdev || !irq_info_array || !act_num) + return -EINVAL; + + cfg_mgmt = dev->cfg_mgmt; + irq_info = &cfg_mgmt->irq_param_info; + alloc_info = irq_info->alloc_info; + max_num_irq = irq_info->num_total; + free_num_irq = irq_info->num_irq_remain; + + mutex_lock(&irq_info->irq_mutex); + + if (num > free_num_irq) { + if (free_num_irq == 0) { + sdk_err(dev->dev_hdl, + "no free irq resource in cfg mgmt.\n"); + mutex_unlock(&irq_info->irq_mutex); + return -ENOMEM; + } + + sdk_warn(dev->dev_hdl, "only %d irq resource in cfg mgmt.\n", + free_num_irq); + num = free_num_irq; + } + + *act_num = 0; + + for (i = 0; i < num; i++) { + for (j = 0; j < max_num_irq; j++) { + if (alloc_info[j].free == CFG_FREE) { + if (irq_info->num_irq_remain == 0) { + sdk_err(dev->dev_hdl, "No free irq resource in cfg mgmt\n"); + mutex_unlock(&irq_info->irq_mutex); + return -EINVAL; + } + alloc_info[j].type = type; + alloc_info[j].free = CFG_BUSY; + + irq_info_array[i].msix_entry_idx = + alloc_info[j].info.msix_entry_idx; + irq_info_array[i].irq_id = + alloc_info[j].info.irq_id; + (*act_num)++; + irq_info->num_irq_remain--; + + break; + } + } + } + + mutex_unlock(&irq_info->irq_mutex); + return 0; +} + +void hifc_free_irq(void *hwdev, enum hifc_service_type type, u32 irq_id) +{ + struct hifc_hwdev *dev = hwdev; + struct cfg_mgmt_info *cfg_mgmt; + struct cfg_irq_info *irq_info; + struct irq_alloc_info_st *alloc_info; + int max_num_irq; + int i; + + if (!hwdev) + return; + + cfg_mgmt = dev->cfg_mgmt; + irq_info = &cfg_mgmt->irq_param_info; + alloc_info = irq_info->alloc_info; + max_num_irq = irq_info->num_total; + + mutex_lock(&irq_info->irq_mutex); + + for (i = 0; i < max_num_irq; i++) { + if (irq_id == alloc_info[i].info.irq_id && + type == alloc_info[i].type) { + if (alloc_info[i].free == CFG_BUSY) { + alloc_info[i].free = CFG_FREE; + irq_info->num_irq_remain++; + if (irq_info->num_irq_remain > max_num_irq) { + sdk_err(dev->dev_hdl, "Find target,but over range\n"); + mutex_unlock(&irq_info->irq_mutex); + return; + } + break; + } + } + } + + if (i >= max_num_irq) + sdk_warn(dev->dev_hdl, "Irq %d don`t need to free\n", irq_id); + + mutex_unlock(&irq_info->irq_mutex); +} + +static int init_cfg_mgmt(struct hifc_hwdev *dev) +{ + int err; + struct cfg_mgmt_info *cfg_mgmt; + + cfg_mgmt = kzalloc(sizeof(*cfg_mgmt), GFP_KERNEL); + if (!cfg_mgmt) + return -ENOMEM; + + dev->cfg_mgmt = cfg_mgmt; + cfg_mgmt->hwdev = dev; + + err = cfg_init_eq(dev); + if (err) { + sdk_err(dev->dev_hdl, "Failed to init cfg event queue, err: %d\n", + err); + goto free_mgmt_mem; + } + + err = cfg_init_interrupt(dev); + if (err) { + sdk_err(dev->dev_hdl, "Failed to init cfg interrupt, err: %d\n", + err); + goto free_eq_mem; + } + + err = cfg_enable_interrupt(dev); + if (err) { + sdk_err(dev->dev_hdl, "Failed to enable cfg interrupt, err: %d\n", + err); + goto free_interrupt_mem; + } + + return 0; + +free_interrupt_mem: + kfree(cfg_mgmt->irq_param_info.alloc_info); + + cfg_mgmt->irq_param_info.alloc_info = NULL; + +free_eq_mem: + kfree(cfg_mgmt->eq_info.eq); + + cfg_mgmt->eq_info.eq = NULL; + +free_mgmt_mem: + kfree(cfg_mgmt); + return err; +} + +static void free_cfg_mgmt(struct hifc_hwdev *dev) +{ + struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; + + /* if the allocated resource were recycled */ + if (cfg_mgmt->irq_param_info.num_irq_remain != + cfg_mgmt->irq_param_info.num_total || + cfg_mgmt->eq_info.num_ceq_remain != cfg_mgmt->eq_info.num_ceq) + sdk_err(dev->dev_hdl, "Can't reclaim all irq and event queue, please check\n"); + + switch (cfg_mgmt->svc_cap.interrupt_type) { + case INTR_TYPE_MSIX: + pci_disable_msix(dev->pcidev_hdl); + break; + + case INTR_TYPE_MSI: + pci_disable_msi(dev->pcidev_hdl); + break; + + case INTR_TYPE_INT: + default: + break; + } + + kfree(cfg_mgmt->irq_param_info.alloc_info); + cfg_mgmt->irq_param_info.alloc_info = NULL; + + kfree(cfg_mgmt->eq_info.eq); + cfg_mgmt->eq_info.eq = NULL; + + kfree(cfg_mgmt); +} + +static int init_capability(struct hifc_hwdev *dev) +{ + int err; + enum func_type type = HIFC_FUNC_TYPE(dev); + struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; + + cfg_mgmt->svc_cap.timer_en = 1; + cfg_mgmt->svc_cap.test_xid_alloc_mode = 1; + cfg_mgmt->svc_cap.test_gpa_check_enable = 1; + + err = get_cap_from_fw(dev, type); + if (err) { + sdk_err(dev->dev_hdl, "Failed to get PF/PPF capability\n"); + return err; + } + + fc_param_fix(dev); + + if (dev->cfg_mgmt->svc_cap.force_up) + dev->feature_cap |= HIFC_FUNC_FORCE_LINK_UP; + + sdk_info(dev->dev_hdl, "Init capability success\n"); + return 0; +} + +static void free_capability(struct hifc_hwdev *dev) +{ + sdk_info(dev->dev_hdl, "Free capability success"); +} + +bool hifc_support_fc(void *hwdev, struct fc_service_cap *cap) +{ + struct hifc_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.fc_cap, sizeof(*cap)); + + return true; +} + +u8 hifc_host_oq_id_mask(void *hwdev) +{ + struct hifc_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting host oq id mask\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.host_oq_id_mask_val; +} + +u16 hifc_func_max_qnum(void *hwdev) +{ + struct hifc_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting function max queue number\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.max_sqs; +} + +/* Caller should ensure atomicity when calling this function */ +int hifc_stateful_init(void *hwdev) +{ + struct hifc_hwdev *dev = hwdev; + int err; + + if (!dev) + return -EINVAL; + + if (dev->statufull_ref_cnt++) + return 0; + + err = cqm_init(dev); + if (err) { + sdk_err(dev->dev_hdl, "Failed to init cqm, err: %d\n", err); + goto init_cqm_err; + } + + sdk_info(dev->dev_hdl, "Initialize statefull resource success\n"); + + return 0; + +init_cqm_err: + + dev->statufull_ref_cnt--; + + return err; +} + +/* Caller should ensure atomicity when calling this function */ +void hifc_stateful_deinit(void *hwdev) +{ + struct hifc_hwdev *dev = hwdev; + + if (!dev || !dev->statufull_ref_cnt) + return; + + if (--dev->statufull_ref_cnt) + return; + + cqm_uninit(hwdev); + + sdk_info(dev->dev_hdl, "Clear statefull resource success\n"); +} + +bool hifc_is_hwdev_mod_inited(void *hwdev, enum hifc_hwdev_init_state state) +{ + struct hifc_hwdev *dev = hwdev; + + if (!hwdev || state >= HIFC_HWDEV_MAX_INVAL_INITED) + return false; + + return !!test_bit(state, &dev->func_state); +} + +static int hifc_os_dep_init(struct hifc_hwdev *hwdev) +{ + hwdev->workq = create_singlethread_workqueue(HIFC_HW_WQ_NAME); + if (!hwdev->workq) { + sdk_err(hwdev->dev_hdl, "Failed to initialize hardware workqueue\n"); + return -EFAULT; + } + + sema_init(&hwdev->fault_list_sem, 1); + + return 0; +} + +static void hifc_os_dep_deinit(struct hifc_hwdev *hwdev) +{ + destroy_workqueue(hwdev->workq); +} + +static int __hilink_phy_init(struct hifc_hwdev *hwdev) +{ + int err; + + err = hifc_phy_init_status_judge(hwdev); + if (err) { + sdk_info(hwdev->dev_hdl, "Phy init failed\n"); + return err; + } + + return 0; +} + +static int init_hwdev_and_hwif(struct hifc_init_para *para) +{ + struct hifc_hwdev *hwdev; + int err; + + if (!(*para->hwdev)) { + hwdev = kzalloc(sizeof(*hwdev), GFP_KERNEL); + if (!hwdev) + return -ENOMEM; + + *para->hwdev = hwdev; + hwdev->adapter_hdl = para->adapter_hdl; + hwdev->pcidev_hdl = para->pcidev_hdl; + hwdev->dev_hdl = para->dev_hdl; + hwdev->chip_node = para->chip_node; + + hwdev->chip_fault_stats = vzalloc(HIFC_CHIP_FAULT_SIZE); + if (!hwdev->chip_fault_stats) + goto alloc_chip_fault_stats_err; + + err = hifc_init_hwif(hwdev, para->cfg_reg_base, + para->intr_reg_base, + para->db_base_phy, para->db_base, + para->dwqe_mapping); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init hwif\n"); + goto init_hwif_err; + } + } + + return 0; + +init_hwif_err: + vfree(hwdev->chip_fault_stats); + +alloc_chip_fault_stats_err: + + *para->hwdev = NULL; + + return -EFAULT; +} + +static void deinit_hwdev_and_hwif(struct hifc_hwdev *hwdev) +{ + hifc_free_hwif(hwdev); + + vfree(hwdev->chip_fault_stats); + + kfree(hwdev); +} + +static int init_hw_cfg(struct hifc_hwdev *hwdev) +{ + int err; + + err = init_capability(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init capability\n"); + return err; + } + + err = __hilink_phy_init(hwdev); + if (err) + goto hilink_phy_init_err; + + return 0; + +hilink_phy_init_err: + free_capability(hwdev); + + return err; +} + +/* Return: + * 0: all success + * >0: partitial success + * <0: all failed + */ +int hifc_init_hwdev(struct hifc_init_para *para) +{ + struct hifc_hwdev *hwdev; + int err; + + err = init_hwdev_and_hwif(para); + if (err) + return err; + + hwdev = *para->hwdev; + + /* detect slave host according to BAR reg */ + hwdev->feature_cap = HIFC_FUNC_MGMT | HIFC_FUNC_PORT | + HIFC_FUNC_SUPP_RATE_LIMIT | HIFC_FUNC_SUPP_DFX_REG | + HIFC_FUNC_SUPP_RX_MODE | HIFC_FUNC_SUPP_SET_VF_MAC_VLAN | + HIFC_FUNC_SUPP_CHANGE_MAC; + + err = hifc_os_dep_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init os dependent\n"); + goto os_dep_init_err; + } + + hifc_set_chip_present(hwdev); + hifc_init_heartbeat(hwdev); + + err = init_cfg_mgmt(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init config mgmt\n"); + goto init_cfg_mgmt_err; + } + + err = hifc_init_comm_ch(hwdev); + if (err) { + if (!(hwdev->func_state & HIFC_HWDEV_INIT_MODES_MASK)) { + sdk_err(hwdev->dev_hdl, "Failed to init communication channel\n"); + goto init_comm_ch_err; + } else { + sdk_err(hwdev->dev_hdl, "Init communication channel partitail failed\n"); + return hwdev->func_state & HIFC_HWDEV_INIT_MODES_MASK; + } + } + + err = init_hw_cfg(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init hardware config\n"); + goto init_hw_cfg_err; + } + + set_bit(HIFC_HWDEV_ALL_INITED, &hwdev->func_state); + + sdk_info(hwdev->dev_hdl, "Init hwdev success\n"); + + return 0; + +init_hw_cfg_err: + return (hwdev->func_state & HIFC_HWDEV_INIT_MODES_MASK); + +init_comm_ch_err: + free_cfg_mgmt(hwdev); + +init_cfg_mgmt_err: + hifc_destroy_heartbeat(hwdev); + hifc_os_dep_deinit(hwdev); + +os_dep_init_err: + deinit_hwdev_and_hwif(hwdev); + *para->hwdev = NULL; + + return -EFAULT; +} + +void hifc_free_hwdev(void *hwdev) +{ + struct hifc_hwdev *dev = hwdev; + enum hifc_hwdev_init_state state = HIFC_HWDEV_ALL_INITED; + int flag = 0; + + if (!hwdev) + return; + + if (test_bit(HIFC_HWDEV_ALL_INITED, &dev->func_state)) { + clear_bit(HIFC_HWDEV_ALL_INITED, &dev->func_state); + + /* BM slave function not need to exec rx_tx_flush */ + + hifc_func_rx_tx_flush(hwdev); + + free_capability(dev); + } + while (state > HIFC_HWDEV_NONE_INITED) { + if (test_bit(state, &dev->func_state)) { + flag = 1; + break; + } + state--; + } + if (flag) { + hifc_uninit_comm_ch(dev); + free_cfg_mgmt(dev); + hifc_destroy_heartbeat(dev); + hifc_os_dep_deinit(dev); + } + clear_bit(HIFC_HWDEV_NONE_INITED, &dev->func_state); + + deinit_hwdev_and_hwif(dev); +} + +u64 hifc_get_func_feature_cap(void *hwdev) +{ + struct hifc_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting function feature capability\n"); + return 0; + } + + return dev->feature_cap; +} + diff --git a/drivers/scsi/huawei/hifc/hifc_cfg.h b/drivers/scsi/huawei/hifc/hifc_cfg.h new file mode 100644 index 0000000000000000000000000000000000000000..b8a9dd35b1fd433f09f09a5c2e69d84a20546a70 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_cfg.h @@ -0,0 +1,171 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef __CFG_MGT_H__ +#define __CFG_MGT_H__ + +enum { + CFG_FREE = 0, + CFG_BUSY = 1 +}; + +/* FC */ +#define FC_PCTX_SZ 256 +#define FC_CCTX_SZ 256 +#define FC_SQE_SZ 128 +#define FC_SCQC_SZ 64 +#define FC_SCQE_SZ 64 +#define FC_SRQC_SZ 64 +#define FC_SRQE_SZ 32 + +/* device capability */ +struct service_cap { + /* Host global resources */ + u16 host_total_function; + u8 host_oq_id_mask_val; + + /* DO NOT get interrupt_type from firmware */ + enum intr_type interrupt_type; + u8 intr_chip_en; + + u8 port_id; /* PF/VF's physical port */ + u8 force_up; + + u8 timer_en; /* 0:disable, 1:enable */ + + u16 max_sqs; + u16 max_rqs; + + /* For test */ + bool test_xid_alloc_mode; + bool test_gpa_check_enable; + + u32 max_connect_num; /* PF/VF maximum connection number(1M) */ + /* The maximum connections which can be stick to cache memory, max 1K */ + u16 max_stick2cache_num; + + struct nic_service_cap nic_cap; /* NIC capability */ + struct fc_service_cap fc_cap; /* FC capability */ +}; + +struct hifc_sync_time_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u64 mstime; +}; + +struct cfg_eq { + enum hifc_service_type type; + int eqn; + int free; /* 1 - alocated, 0- freed */ +}; + +struct cfg_eq_info { + struct cfg_eq *eq; + u8 num_ceq; + u8 num_ceq_remain; + /* mutex used for allocate EQs */ + struct mutex eq_mutex; +}; + +struct irq_alloc_info_st { + enum hifc_service_type type; + int free; /* 1 - alocated, 0- freed */ + struct irq_info info; +}; + +struct cfg_irq_info { + struct irq_alloc_info_st *alloc_info; + u16 num_total; + u16 num_irq_remain; + u16 num_irq_hw; /* device max irq number */ + + /* mutex used for allocate EQs */ + struct mutex irq_mutex; +}; + +#define VECTOR_THRESHOLD 2 + +struct cfg_mgmt_info { + struct hifc_hwdev *hwdev; + struct service_cap svc_cap; + struct cfg_eq_info eq_info; /* EQ */ + struct cfg_irq_info irq_param_info; /* IRQ */ + u32 func_seq_num; /* temporary */ +}; + +enum cfg_sub_cmd { + /* PPF(PF) <-> FW */ + HIFC_CFG_NIC_CAP = 0, + CFG_FW_VERSION, + CFG_UCODE_VERSION, + HIFC_CFG_FUNC_CAP, + HIFC_CFG_MBOX_CAP = 6, +}; + +struct hifc_dev_cap { + u8 status; + u8 version; + u8 rsvd0[6]; + + /* Public resource */ + u8 sf_svc_attr; + u8 host_id; + u8 sf_en_pf; + u8 sf_en_vf; + + u8 ep_id; + u8 intr_type; + u8 max_cos_id; + u8 er_id; + u8 port_id; + u8 max_vf; + u16 svc_cap_en; + u16 host_total_func; + u8 host_oq_id_mask_val; + u8 max_vf_cos_id; + + u32 max_conn_num; + u16 max_stick2cache_num; + u16 max_bfilter_start_addr; + u16 bfilter_len; + u16 hash_bucket_num; + u8 cfg_file_ver; + u8 net_port_mode; + u8 valid_cos_bitmap; /* every bit indicate cos is valid */ + u8 force_up; + u32 pf_num; + u32 pf_id_start; + u32 vf_num; + u32 vf_id_start; + + /* shared resource */ + u32 host_pctx_num; + u8 host_sf_en; + u8 rsvd2[3]; + u32 host_ccxt_num; + u32 host_scq_num; + u32 host_srq_num; + u32 host_mpt_num; + /* l2nic */ + u16 nic_max_sq; + u16 nic_max_rq; + u32 rsvd[46]; + /* FC */ + u32 fc_max_pctx; + u32 fc_max_scq; + u32 fc_max_srq; + + u32 fc_max_cctx; + u32 fc_cctx_id_start; + + u8 fc_vp_id_start; + u8 fc_vp_id_end; + u16 func_id; +}; +#endif diff --git a/drivers/scsi/huawei/hifc/hifc_chipitf.c b/drivers/scsi/huawei/hifc/hifc_chipitf.c new file mode 100644 index 0000000000000000000000000000000000000000..74e3df2fe8605196b843f9b7adb8ef4acf330a2a --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_chipitf.c @@ -0,0 +1,2095 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Fabric Channel Linux driver + * Copyright(c) 2018 Huawei Technologies Co., Ltd + * + */ + +#include "unf_common.h" +#include "hifc_chipitf.h" + +#define HIFC_MBOX_TIME_SEC_MAX 60 + +#define HIFC_LINK_UP_COUNT 1 +#define HIFC_LINK_DOWN_COUNT 2 +#define HIFC_FC_DELETE_CMND_COUNT 3 + +#define HIFC_MBX_MAX_TIMEOUT 10000 + +static unsigned int hifc_recv_fc_link_up(struct hifc_hba_s *v_hba, + void *v_buf_in); +static unsigned int hifc_recv_fc_link_down(struct hifc_hba_s *v_hba, + void *v_buf_in); +static unsigned int hifc_recv_fc_del_cmd(struct hifc_hba_s *v_hba, + void *v_buf_in); +static unsigned int hifc_recv_fc_error(struct hifc_hba_s *v_hba, + void *v_buf_in); + +static struct hifc_up_2_drv_msg_handle_s up_msg_handle[] = { + { HIFC_MBOX_RECV_FC_LINKUP, hifc_recv_fc_link_up }, + { HIFC_MBOX_RECV_FC_LINKDOWN, hifc_recv_fc_link_down }, + { HIFC_MBOX_RECV_FC_DELCMD, hifc_recv_fc_del_cmd }, + { HIFC_MBOX_RECV_FC_ERROR, hifc_recv_fc_error } +}; + +void hifc_up_msg_2_driver_proc(void *v_hwdev_handle, void *v_pri_handle, + unsigned char v_cmd, void *v_buf_in, + unsigned short v_in_size, void *v_buf_out, + unsigned short *v_out_size) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int index = 0; + struct hifc_hba_s *hba = NULL; + struct hifc_mbox_header_s *mbx_header = NULL; + + HIFC_CHECK(INVALID_VALUE32, v_hwdev_handle, return); + HIFC_CHECK(INVALID_VALUE32, v_pri_handle, return); + HIFC_CHECK(INVALID_VALUE32, v_buf_in, return); + HIFC_CHECK(INVALID_VALUE32, v_buf_out, return); + HIFC_CHECK(INVALID_VALUE32, v_out_size, return); + + hba = (struct hifc_hba_s *)v_pri_handle; + if (!hba) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EVENT, UNF_ERR, + "[err]Hba is null"); + + return; + } + + mbx_header = (struct hifc_mbox_header_s *)v_buf_in; + if (mbx_header->cmnd_type != v_cmd) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EVENT, UNF_ERR, + "[err]Port(0x%x) cmd(0x%x) is not matched with header cmd type(0x%x)", + hba->port_cfg.port_id, v_cmd, + mbx_header->cmnd_type); + return; + } + + while (index < (sizeof(up_msg_handle) / + sizeof(struct hifc_up_2_drv_msg_handle_s))) { + if ((v_cmd == up_msg_handle[index].cmd) && + (up_msg_handle[index].pfn_hifc_msg_up2drv_handler)) { + ret = + up_msg_handle[index].pfn_hifc_msg_up2drv_handler( + hba, + v_buf_in); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EVENT, + UNF_ERR, + "[warn]Port(0x%x) process up cmd(0x%x) failed", + hba->port_cfg.port_id, v_cmd); + } + + /* Process Done & return */ + return; + } + index++; + } + + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EVENT, UNF_ERR, + "[err]Port(0x%x) process up cmd(0x%x) failed", + hba->port_cfg.port_id, v_cmd); + + PRINT_OUTBOUND_IOB(UNF_MAJOR, v_buf_in, ((unsigned int)v_in_size)); +} + +unsigned int hifc_get_chip_msg(void *v_hba, void *v_mac) +{ + struct hifc_hba_s *hba = NULL; + struct unf_get_chip_info_argout *wwn = NULL; + struct hifc_inbox_get_chip_info_s get_chip_info; + union hifc_outmbox_generic_u *chip_info_sts = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_mac, return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)v_hba; + wwn = (struct unf_get_chip_info_argout *)v_mac; + + memset(&get_chip_info, 0, sizeof(struct hifc_inbox_get_chip_info_s)); + + chip_info_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!chip_info_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(chip_info_sts, 0, sizeof(union hifc_outmbox_generic_u)); + + get_chip_info.header.cmnd_type = HIFC_MBOX_GET_CHIP_INFO; + get_chip_info.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inbox_get_chip_info_s)); + + if (hifc_mb_send_and_wait_mbox(hba, &get_chip_info, + sizeof(get_chip_info), chip_info_sts) != + RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "hifc can't send and wait mailbox, command type: 0x%x.", + get_chip_info.header.cmnd_type); + + goto exit; + } + + if (chip_info_sts->get_chip_info_sts.status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) mailbox status incorrect status(0x%x) .", + hba->port_cfg.port_id, + chip_info_sts->get_chip_info_sts.status); + + goto exit; + } + + if (chip_info_sts->get_chip_info_sts.header.cmnd_type != + HIFC_MBOX_GET_CHIP_INFO_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) receive mailbox type incorrect type: 0x%x.", + hba->port_cfg.port_id, + chip_info_sts->get_chip_info_sts.header.cmnd_type); + + goto exit; + } + + wwn->board_type = chip_info_sts->get_chip_info_sts.board_type; + hba->card_info.card_type = chip_info_sts->get_chip_info_sts.board_type; + wwn->wwpn = chip_info_sts->get_chip_info_sts.wwpn; + wwn->wwnn = chip_info_sts->get_chip_info_sts.wwnn; + wwn->sys_mac = chip_info_sts->get_chip_info_sts.sys_mac; + + ret = RETURN_OK; +exit: + kfree(chip_info_sts); + return ret; +} + +unsigned int hifc_get_chip_capability(void *hw_dev_handle, + struct hifc_chip_info_s *v_chip_info) +{ + struct hifc_inbox_get_chip_info_s get_chip_info; + union hifc_outmbox_generic_u *chip_info_sts = NULL; + unsigned short out_size = 0; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, hw_dev_handle, return UNF_RETURN_ERROR); + + memset(&get_chip_info, 0, sizeof(struct hifc_inbox_get_chip_info_s)); + + chip_info_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!chip_info_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(chip_info_sts, 0, sizeof(union hifc_outmbox_generic_u)); + + get_chip_info.header.cmnd_type = HIFC_MBOX_GET_CHIP_INFO; + get_chip_info.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inbox_get_chip_info_s)); + out_size = sizeof(union hifc_outmbox_generic_u); + + if (hifc_msg_to_mgmt_sync(hw_dev_handle, HIFC_MOD_FC, + HIFC_MBOX_GET_CHIP_INFO, + (void *)&get_chip_info.header, + sizeof(struct hifc_inbox_get_chip_info_s), + (union hifc_outmbox_generic_u *)chip_info_sts, + &out_size, + (HIFC_MBX_MAX_TIMEOUT)) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "hifc can't send and wait mailbox, command type: 0x%x.", + HIFC_MBOX_GET_CHIP_INFO); + + goto exit; + } + + if (chip_info_sts->get_chip_info_sts.status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port mailbox status incorrect status(0x%x) .", + chip_info_sts->get_chip_info_sts.status); + + goto exit; + } + + if (chip_info_sts->get_chip_info_sts.header.cmnd_type != + HIFC_MBOX_GET_CHIP_INFO_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port receive mailbox type incorrect type: 0x%x.", + chip_info_sts->get_chip_info_sts.header.cmnd_type); + + goto exit; + } + + v_chip_info->wwnn = chip_info_sts->get_chip_info_sts.wwnn; + v_chip_info->wwpn = chip_info_sts->get_chip_info_sts.wwpn; + v_chip_info->tape_support = (unsigned char) + chip_info_sts->get_chip_info_sts.tape_support; + ret = RETURN_OK; +exit: + kfree(chip_info_sts); + return ret; +} + +void hifc_get_red_info_by_rw_type(struct unf_rw_reg_param_s *param, + struct hifc_inmbox_get_reg_info_s *v_reg_info) +{ + if ((param->rw_type == UNF_READ) || + (param->rw_type == UNF_READ_64)) { + v_reg_info->op_code = 0; + } else if ((param->rw_type == UNF_WRITE) || + (param->rw_type == UNF_WRITE_64)) { + v_reg_info->op_code = 1; + } + + if ((param->rw_type == UNF_READ) || + (param->rw_type == UNF_WRITE)) { + v_reg_info->reg_len = 32; + } else if ((param->rw_type == UNF_READ_64) || + (param->rw_type == UNF_WRITE_64)) { + v_reg_info->reg_len = 64; + } +} + +unsigned int hifc_rw_reg(void *v_hba, void *v_params) +{ + struct hifc_hba_s *hba = NULL; + struct unf_rw_reg_param_s *param = NULL; + struct hifc_inmbox_get_reg_info_s reg_info; + union hifc_outmbox_generic_u *reg_info_sts = NULL; + unsigned int para_value_out_l = 0; + unsigned int para_value_out_h = 0; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_params, return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)v_hba; + param = (struct unf_rw_reg_param_s *)v_params; + + memset(®_info, 0, sizeof(struct hifc_inmbox_get_reg_info_s)); + reg_info_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!reg_info_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(reg_info_sts, 0, sizeof(union hifc_outmbox_generic_u)); + + hifc_get_red_info_by_rw_type(param, ®_info); + + reg_info.reg_addr = param->offset; + reg_info.reg_value_l32 = (param->value) & VALUEMASK_L; + reg_info.reg_value_h32 = ((param->value) & VALUEMASK_H) >> 32; + + reg_info.header.cmnd_type = HIFC_MBOX_REG_RW_MODE; + reg_info.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_get_reg_info_s)); + + if (hifc_mb_send_and_wait_mbox(hba, ®_info, + sizeof(reg_info), + reg_info_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "hifc can't send and wait mailbox, command type: 0x%x.", + reg_info.header.cmnd_type); + + goto exit; + } + + if (reg_info_sts->get_reg_info_sts.status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) mailbox status incorrect status(0x%x) .", + hba->port_cfg.port_id, + reg_info_sts->get_reg_info_sts.status); + + goto exit; + } + + if (reg_info_sts->get_reg_info_sts.header.cmnd_type != + HIFC_MBOX_REG_RW_MODE_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) receive mailbox type incorrect type: 0x%x.", + hba->port_cfg.port_id, + reg_info_sts->get_reg_info_sts.header.cmnd_type); + + goto exit; + } + + para_value_out_l = reg_info_sts->get_reg_info_sts.reg_value_l32; + para_value_out_h = reg_info_sts->get_reg_info_sts.reg_value_h32; + param->value = (unsigned long long)para_value_out_l | + ((unsigned long long)para_value_out_h << 32); + + ret = RETURN_OK; +exit: + kfree(reg_info_sts); + return ret; +} + +unsigned int hifc_config_port_table(struct hifc_hba_s *v_hba) +{ + struct hifc_inbox_config_api_s config_api; + union hifc_outmbox_generic_u *out_mbox = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, NULL != v_hba, return UNF_RETURN_ERROR); + + memset(&config_api, 0, sizeof(config_api)); + out_mbox = kmalloc(sizeof(union hifc_outmbox_generic_u), GFP_ATOMIC); + if (!out_mbox) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(out_mbox, 0, sizeof(union hifc_outmbox_generic_u)); + + config_api.header.cmnd_type = HIFC_MBOX_CONFIG_API; + config_api.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inbox_config_api_s)); + + config_api.op_code = UNDEFINEOPCODE; + + /* change switching top cmd of CM to the cmd that up recognize */ + /* if the cmd equals UNF_TOP_P2P_MASK sending in CM means that it + * should be changed into P2P top, LL using HIFC_TOP_NON_LOOP_MASK + */ + if ((unsigned char)v_hba->port_topo_cfg == UNF_TOP_P2P_MASK) { + config_api.topy_mode = 0x2; + /* if the cmd equals UNF_TOP_LOOP_MASK sending in CM means that it + * should be changed into loop top, LL using HIFC_TOP_LOOP_MASK + */ + } else if ((unsigned char)v_hba->port_topo_cfg == UNF_TOP_LOOP_MASK) { + config_api.topy_mode = 0x1; + + /* if the cmd equals UNF_TOP_AUTO_MASK sending in CM means that it + * should be changed into loop top, LL using HIFC_TOP_AUTO_MASK + */ + } else if ((unsigned char)v_hba->port_topo_cfg == UNF_TOP_AUTO_MASK) { + config_api.topy_mode = 0x0; + } else { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) topo cmd is error, command type: 0x%x", + v_hba->port_cfg.port_id, + (unsigned char)v_hba->port_topo_cfg); + + goto exit; + } + + /* About speed */ + config_api.sfp_speed = (unsigned char)(v_hba->port_speed_cfg); + config_api.max_speed = (unsigned char)(v_hba->max_support_speed); + + config_api.rx_bbcredit_32g = HIFC_LOWLEVEL_DEFAULT_32G_BB_CREDIT; + config_api.rx_bbcredit_16g = HIFC_LOWLEVEL_DEFAULT_16G_BB_CREDIT; + config_api.rx_bbcredit_842g = HIFC_LOWLEVEL_DEFAULT_842G_BB_CREDIT; + config_api.rdy_cnt_bf_fst_frm = HIFC_LOWLEVEL_DEFAULT_LOOP_BB_CREDIT; + config_api.esch_value_32g = HIFC_LOWLEVEL_DEFAULT_32G_ESCH_VALUE; + config_api.esch_value_16g = HIFC_LOWLEVEL_DEFAULT_16G_ESCH_VALUE; + config_api.esch_value_8g = HIFC_LOWLEVEL_DEFAULT_842G_ESCH_VALUE; + config_api.esch_value_4g = HIFC_LOWLEVEL_DEFAULT_842G_ESCH_VALUE; + config_api.esch_value_2g = HIFC_LOWLEVEL_DEFAULT_842G_ESCH_VALUE; + config_api.esch_bust_size = HIFC_LOWLEVEL_DEFAULT_ESCH_BUS_SIZE; + + /* default value:0xFF */ + config_api.hard_alpa = 0xFF; + memcpy(config_api.port_name, v_hba->sys_port_name, UNF_WWN_LEN); + + /* if only for slave, the value is 1; if participate master choosing, + * the value is 0 + */ + config_api.slave = v_hba->port_loop_role; + + /* 1:auto negotiate, 0:fixed mode negotiate */ + if (config_api.sfp_speed == 0) + config_api.auto_sneg = 0x1; + else + config_api.auto_sneg = 0x0; + + /* send & wait */ + if (hifc_mb_send_and_wait_mbox(v_hba, &config_api, + sizeof(config_api), + out_mbox) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[warn]Port(0x%x) HIFC can't send and wait mailbox, command type: 0x%x", + v_hba->port_cfg.port_id, + config_api.header.cmnd_type); + + goto exit; + } + + /* mailbox status check */ + if (out_mbox->config_api_sts.status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[err]Port(0x%x) receive mailbox type(0x%x) with status(0x%x) error", + v_hba->port_cfg.port_id, + out_mbox->config_api_sts.header.cmnd_type, + out_mbox->config_api_sts.status); + + goto exit; + } + + /* RSP type check */ + if (out_mbox->config_api_sts.header.cmnd_type != + HIFC_MBOX_CONFIG_API_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[err]Port(0x%x) receive mailbox type(0x%x) error", + v_hba->port_cfg.port_id, + out_mbox->config_api_sts.header.cmnd_type); + + goto exit; + } + + ret = RETURN_OK; +exit: + kfree(out_mbox); + return ret; +} + +unsigned int hifc_port_switch(struct hifc_hba_s *v_hba, int turn_on) +{ + struct hifc_inbox_port_switch_s port_switch; + union hifc_outmbox_generic_u *port_switch_sts = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, NULL != v_hba, return UNF_RETURN_ERROR); + + memset(&port_switch, 0, sizeof(port_switch)); + + port_switch_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!port_switch_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(port_switch_sts, 0, sizeof(union hifc_outmbox_generic_u)); + + port_switch.header.cmnd_type = HIFC_MBOX_PORT_SWITCH; + port_switch.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inbox_port_switch_s)); + port_switch.op_code = (unsigned char)turn_on; + port_switch.port_type = (unsigned char)v_hba->port_type; + + /* set the value is 0 first, vn2vf mode, vlan discovery automatically */ + port_switch.host_id = 0; + port_switch.pf_id = + (unsigned char)(hifc_global_func_id(v_hba->hw_dev_handle)); + port_switch.fcoe_mode = HIFC_FIP_MODE_VN2VF; + port_switch.conf_vlan = 0xffff; + port_switch.sys_node_name = *(unsigned long long *)v_hba->sys_node_name; + port_switch.sys_port_wwn = *(unsigned long long *)v_hba->sys_port_name; + + /* send & wait mailbox */ + if (hifc_mb_send_and_wait_mbox(v_hba, &port_switch, sizeof(port_switch), + port_switch_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[warn]Port(0x%x) HIFC can't send and wait mailbox, command type(0x%x) opcode(0x%x)", + v_hba->port_cfg.port_id, + port_switch.header.cmnd_type, port_switch.op_code); + + goto exit; + } + + /* check mailbox rsp status */ + if (port_switch_sts->port_switch_sts.status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[err]Port(0x%x) receive mailbox type(0x%x) status(0x%x) error", + v_hba->port_cfg.port_id, + port_switch_sts->port_switch_sts.header.cmnd_type, + port_switch_sts->port_switch_sts.status); + + goto exit; + } + + /* check mailbox rsp type */ + if (port_switch_sts->port_switch_sts.header.cmnd_type != + HIFC_MBOX_PORT_SWITCH_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[err]Port(0x%x) receive mailbox type(0x%x) error", + v_hba->port_cfg.port_id, + port_switch_sts->port_switch_sts.header.cmnd_type); + + goto exit; + } + + HIFC_TRACE(UNF_EVTLOG_LINK_SUC, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "[event]Port(0x%x) switch succeed, turns to %s", + v_hba->port_cfg.port_id, + (turn_on) ? "on" : "off"); + + ret = RETURN_OK; +exit: + kfree(port_switch_sts); + return ret; +} + +unsigned int hifc_config_login_api(struct hifc_hba_s *v_hba, + struct unf_port_login_parms_s *v_login_para) +{ +#define HIFC_LOOP_RDYNUM 8 + int async_ret = RETURN_OK; + unsigned int ret = UNF_RETURN_ERROR; + struct hifc_inmbox_config_login_s cfg_login; + union hifc_outmbox_generic_u *cfg_login_sts = NULL; + + HIFC_CHECK(INVALID_VALUE32, NULL != v_hba, return UNF_RETURN_ERROR); + + memset(&cfg_login, 0, sizeof(cfg_login)); + cfg_login_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!cfg_login_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(cfg_login_sts, 0, sizeof(union hifc_outmbox_generic_u)); + + cfg_login.header.cmnd_type = HIFC_MBOX_CONFIG_LOGIN_API; + cfg_login.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_config_login_s)); + cfg_login.header.port_id = v_hba->port_index; + + cfg_login.op_code = UNDEFINEOPCODE; + + cfg_login.tx_bb_credit = v_hba->remote_bbcredit; + + cfg_login.etov = v_hba->compared_edtov_val; + cfg_login.rtov = v_hba->compared_ratov_val; + + cfg_login.rt_tov_tag = v_hba->remote_rttov_tag; + cfg_login.ed_tov_tag = v_hba->remote_edtov_tag; + cfg_login.bb_credit = v_hba->remote_bbcredit; + cfg_login.bbscn = HIFC_LSB(v_hba->compared_bbscn); + + if (cfg_login.bbscn) { + cfg_login.lr_flag = + (v_login_para->els_cmnd_code == ELS_PLOGI) ? 0 : 1; + ret = hifc_mb_send_and_wait_mbox(v_hba, &cfg_login, + sizeof(cfg_login), + cfg_login_sts); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "Port(0x%x) HIFC can't send and wait mailbox, command type: 0x%x.", + v_hba->port_cfg.port_id, + cfg_login.header.cmnd_type); + + goto exit; + } + + if (cfg_login_sts->config_login_sts.header.cmnd_type != + HIFC_MBOX_CONFIG_LOGIN_API_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, + UNF_INFO, "Port(0x%x) Receive mailbox type incorrect. Type: 0x%x.", + v_hba->port_cfg.port_id, + cfg_login_sts->config_login_sts.header.cmnd_type); + + goto exit; + } + + if (cfg_login_sts->config_login_sts.status != STATUS_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, "Port(0x%x) Receive mailbox type(0x%x) status incorrect. Status: 0x%x.", + v_hba->port_cfg.port_id, + cfg_login_sts->config_login_sts.header.cmnd_type, + cfg_login_sts->config_login_sts.status); + + goto exit; + } + } else { + async_ret = hifc_msg_to_mgmt_async(v_hba->hw_dev_handle, + HIFC_MOD_FC, + HIFC_MBOX_CONFIG_LOGIN_API, + &cfg_login, + sizeof(cfg_login)); + + if (async_ret != 0) { + HIFC_MAILBOX_STAT(v_hba, + HIFC_SEND_CONFIG_LOGINAPI_FAIL); + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "Port(0x%x) hifc can't send config login cmd to up,ret:%d.", + v_hba->port_cfg.port_id, async_ret); + + goto exit; + } + + HIFC_MAILBOX_STAT(v_hba, HIFC_SEND_CONFIG_LOGINAPI); + } + + HIFC_TRACE(UNF_EVTLOG_LINK_SUC, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "Port(0x%x) Topo(0x%x) Config login param to up: txbbcredit(0x%x), BB_SC_N(0x%x).", + v_hba->port_cfg.port_id, v_hba->active_topo, + cfg_login.tx_bb_credit, cfg_login.bbscn); + + ret = RETURN_OK; +exit: + kfree(cfg_login_sts); + return ret; +} + +unsigned int hifc_mb_send_and_wait_mbox(struct hifc_hba_s *v_hba, + const void *v_in_mbox, + unsigned short in_size, + union hifc_outmbox_generic_u *out_mbox) +{ + void *handle = NULL; + unsigned short out_size = 0; + unsigned long time_out = 0; + int ret = 0; + struct hifc_mbox_header_s *header; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_in_mbox, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, out_mbox, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_hba->hw_dev_handle, + return UNF_RETURN_ERROR); + + header = (struct hifc_mbox_header_s *)v_in_mbox; + out_size = sizeof(union hifc_outmbox_generic_u); + handle = v_hba->hw_dev_handle; + + /* Wait for las mailbox completion: */ + time_out = wait_for_completion_timeout( + &v_hba->mbox_complete, + (unsigned long)msecs_to_jiffies(HIFC_MBOX_TIME_SEC_MAX * 1000)); + if (time_out == UNF_ZERO) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[err]Port(0x%x) wait mailbox(0x%x) completion timeout: %d sec", + v_hba->port_cfg.port_id, header->cmnd_type, + HIFC_MBOX_TIME_SEC_MAX); + + return UNF_RETURN_ERROR; + } + + /* Send Msg to uP Sync: timer 10s */ + ret = hifc_msg_to_mgmt_sync(handle, HIFC_MOD_FC, header->cmnd_type, + (void *)v_in_mbox, in_size, + (union hifc_outmbox_generic_u *)out_mbox, + &out_size, + HIFC_MBX_MAX_TIMEOUT); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[warn]Port(0x%x) can not send mailbox(0x%x) with ret:%d", + v_hba->port_cfg.port_id, header->cmnd_type, ret); + + complete(&v_hba->mbox_complete); + return UNF_RETURN_ERROR; + } + + complete(&v_hba->mbox_complete); + return RETURN_OK; +} + +unsigned short hifc_get_global_base_qpn(void *v_handle) +{ +#define NIC_UP_CMD_GET_GLOBAL_QPN 102 + + int ret = 0; + unsigned short out_size = 0; + struct hifc_get_global_base_qpn_s qpn_base = { 0 }; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_handle, + return INVALID_VALUE16); + qpn_base.func_id = hifc_global_func_id(v_handle); + out_size = (u16)sizeof(struct hifc_get_global_base_qpn_s); + + /* Send Msg to uP Sync: timer 10s */ + ret = hifc_msg_to_mgmt_sync(v_handle, + HIFC_MOD_L2NIC, + NIC_UP_CMD_GET_GLOBAL_QPN, + &qpn_base, + (u16)sizeof(qpn_base), + &qpn_base, + &out_size, + HIFC_MBX_MAX_TIMEOUT); + + if (ret || (!out_size) || qpn_base.status) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[warn]hifc_get_global_base_qpn failed, ret %d, out_size %u, qpn_info.ret%u", + ret, out_size, qpn_base.status); + + return 0xFFFF; + } + + return (u16)(qpn_base.base_qpn); +} + +void hifc_initial_dynamic_info(struct hifc_hba_s *v_fc_port) +{ + struct hifc_hba_s *hba = v_fc_port; + unsigned long flag = 0; + + HIFC_CHECK(INVALID_VALUE32, NULL != hba, return); + + spin_lock_irqsave(&hba->hba_lock, flag); + hba->active_port_speed = UNF_PORT_SPEED_UNKNOWN; + hba->active_topo = UNF_ACT_TOP_UNKNOWN; + hba->phy_link = UNF_PORT_LINK_DOWN; + hba->q_set_stage = HIFC_QUEUE_SET_STAGE_INIT; + hba->loop_map_valid = LOOP_MAP_INVALID; + hba->delay_info.srq_delay_flag = 0; + hba->delay_info.root_rq_rcvd_flag = 0; + spin_unlock_irqrestore(&hba->hba_lock, flag); +} + +unsigned int hifc_recv_fc_link_up(struct hifc_hba_s *v_hba, void *v_buf_in) +{ +#define HIFC_LOOP_MASK 0x1 +#define HIFC_LOOPMAP_COUNT 128 + + unsigned int ret = UNF_RETURN_ERROR; + struct hifc_link_event_s *buf_in = NULL; + + buf_in = (struct hifc_link_event_s *)v_buf_in; + v_hba->phy_link = UNF_PORT_LINK_UP; + v_hba->active_port_speed = buf_in->speed; + v_hba->led_states.green_speed_led = + (unsigned char)(buf_in->green_speed_led); + v_hba->led_states.yellow_speed_led = + (unsigned char)(buf_in->yellow_speed_led); + v_hba->led_states.ac_led = (unsigned char)(buf_in->acled); + + if ((buf_in->top_type == HIFC_LOOP_MASK) && + ((buf_in->loop_map_info[1] == UNF_FL_PORT_LOOP_ADDR) || + (buf_in->loop_map_info[2] == UNF_FL_PORT_LOOP_ADDR))) { + v_hba->active_topo = UNF_ACT_TOP_PUBLIC_LOOP; /* Public Loop */ + v_hba->active_al_pa = buf_in->alpa_value; /* AL_PA */ + memcpy(v_hba->loop_map, buf_in->loop_map_info, + HIFC_LOOPMAP_COUNT); + v_hba->loop_map_valid = LOOP_MAP_VALID; + } else if (buf_in->top_type == HIFC_LOOP_MASK) { + v_hba->active_topo = UNF_ACT_TOP_PRIVATE_LOOP;/* Private Loop */ + v_hba->active_al_pa = buf_in->alpa_value; /* AL_PA */ + memcpy(v_hba->loop_map, buf_in->loop_map_info, + HIFC_LOOPMAP_COUNT); + v_hba->loop_map_valid = LOOP_MAP_VALID; + } else { + v_hba->active_topo = UNF_TOP_P2P_MASK; /* P2P_D or P2P_F */ + } + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_EVENT, UNF_KEVENT, + "[event]Port(0x%x) receive link up event(0x%x) with speed(0x%x) uP_topo(0x%x) driver_topo(0x%x)", + v_hba->port_cfg.port_id, buf_in->link_event, + buf_in->speed, buf_in->top_type, v_hba->active_topo); + + /* Set clear & flush state */ + hifc_set_hba_flush_state(v_hba, UNF_FALSE); + hifc_set_root_sq_flush_state(v_hba, UNF_FALSE); + hifc_set_rport_flush_state(v_hba, UNF_FALSE); + + /* Report link up event to COM */ + UNF_LOWLEVEL_PORT_EVENT(ret, v_hba->lport, + UNF_PORT_LINK_UP, &v_hba->active_port_speed); + + HIFC_LINK_EVENT_STAT(v_hba, HIFC_LINK_UP_COUNT); + + return ret; +} + +unsigned int hifc_recv_fc_link_down(struct hifc_hba_s *v_hba, void *v_buf_in) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct hifc_link_event_s *buf_in = NULL; + + buf_in = (struct hifc_link_event_s *)v_buf_in; + + /* 1. Led state setting */ + v_hba->led_states.green_speed_led = + (unsigned char)(buf_in->green_speed_led); + v_hba->led_states.yellow_speed_led = + (unsigned char)(buf_in->yellow_speed_led); + v_hba->led_states.ac_led = (unsigned char)(buf_in->acled); + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_EVENT, UNF_KEVENT, + "[event]Port(0x%x) receive link down event(0x%x) reason(0x%x)", + v_hba->port_cfg.port_id, buf_in->link_event, buf_in->reason); + + hifc_initial_dynamic_info(v_hba); + + /* 2. set HBA flush state */ + hifc_set_hba_flush_state(v_hba, UNF_TRUE); + + /* 3. set Root SQ flush state */ + hifc_set_root_sq_flush_state(v_hba, UNF_TRUE); + + /* 4. set R_Port (parent SQ) flush state */ + hifc_set_rport_flush_state(v_hba, UNF_TRUE); + + /* 5. Report link down event to COM */ + UNF_LOWLEVEL_PORT_EVENT(ret, v_hba->lport, UNF_PORT_LINK_DOWN, 0); + + /* DFX setting */ + HIFC_LINK_REASON_STAT(v_hba, buf_in->reason); + HIFC_LINK_EVENT_STAT(v_hba, HIFC_LINK_DOWN_COUNT); + + return ret; +} + +unsigned int hifc_recv_fc_del_cmd(struct hifc_hba_s *v_hba, void *v_buf_in) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct hifc_link_event_s *buf_in = NULL; + + buf_in = (struct hifc_link_event_s *)v_buf_in; + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[event]Port(0x%x) receive delete cmd event(0x%x)", + v_hba->port_cfg.port_id, buf_in->link_event); + + /* Send buffer clear cmnd */ + ret = hifc_clear_fetched_sq_wqe(v_hba); + + v_hba->q_set_stage = HIFC_QUEUE_SET_STAGE_SCANNING; + HIFC_LINK_EVENT_STAT(v_hba, HIFC_FC_DELETE_CMND_COUNT); + + HIFC_REFERNCE_VAR(buf_in, buf_in, ret); + return ret; +} + +unsigned int hifc_recv_fc_error(struct hifc_hba_s *v_hba, void *v_buf_in) +{ +#define FC_ERR_LEVEL_DEAD 0 +#define FC_ERR_LEVEL_HIGH 1 +#define FC_ERR_LEVEL_LOW 2 + + unsigned int ret = UNF_RETURN_ERROR; + struct hifc_up_error_event_s *buf_in = NULL; + + buf_in = (struct hifc_up_error_event_s *)v_buf_in; + if (buf_in->error_type >= HIFC_UP_ERR_BUTT || + buf_in->error_value >= HIFC_ERR_VALUE_BUTT) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) receive a unsupported UP Error Event Type(0x%x) Value(0x%x).", + v_hba->port_cfg.port_id, + buf_in->error_type, + buf_in->error_value); + return ret; + } + + switch (buf_in->error_level) { + case FC_ERR_LEVEL_DEAD: + /* todo: chip reset */ + ret = RETURN_OK; + break; + + case FC_ERR_LEVEL_HIGH: + /* port reset */ + UNF_LOWLEVEL_PORT_EVENT(ret, v_hba->lport, + UNF_PORT_ABNORMAL_RESET, NULL); + break; + + case FC_ERR_LEVEL_LOW: + ret = RETURN_OK; + break; + + default: + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) receive a unsupported UP Error Event Level(0x%x), Can not Process.", + v_hba->port_cfg.port_id, buf_in->error_level); + return ret; + } + if (buf_in->error_value < HIFC_ERR_VALUE_BUTT) + HIFC_UP_ERR_EVENT_STAT(v_hba, buf_in->error_value); + + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_KEVENT, + "[event]Port(0x%x) process UP Error Event Level(0x%x) Type(0x%x) Value(0x%x) %s.", + v_hba->port_cfg.port_id, buf_in->error_level, + buf_in->error_type, buf_in->error_value, + (ret == UNF_RETURN_ERROR) ? "ERROR" : "OK"); + + HIFC_REFERNCE_VAR(buf_in, buf_in, ret); + + return ret; +} + +unsigned int hifc_get_topo_cfg(void *v_hba, void *v_topo_cfg) +{ + struct hifc_hba_s *hba = v_hba; + unsigned int *topo_cfg = v_topo_cfg; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_topo_cfg, return UNF_RETURN_ERROR); + + *topo_cfg = hba->port_topo_cfg; + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Get topology config: 0x%x.", + *topo_cfg); + + return RETURN_OK; +} + +unsigned int hifc_get_topo_act(void *v_hba, void *topo_act) +{ + struct hifc_hba_s *hba = v_hba; + enum unf_act_topo_e *ret_topo_act = topo_act; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, topo_act, return UNF_RETURN_ERROR); + + /* Get topo from low_level */ + *ret_topo_act = hba->active_topo; + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Get active topology: 0x%x", + *ret_topo_act); + + return RETURN_OK; +} + +unsigned int hifc_get_loop_alpa(void *v_hba, void *v_alpa) +{ + unsigned long flags = 0; + struct hifc_hba_s *hba = v_hba; + unsigned char *alpa = v_alpa; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_alpa, return UNF_RETURN_ERROR); + + spin_lock_irqsave(&hba->hba_lock, flags); + *alpa = hba->active_al_pa; + spin_unlock_irqrestore(&hba->hba_lock, flags); + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Get active AL_PA(0x%x)", *alpa); + + return RETURN_OK; +} + +unsigned int hifc_get_lport_led(void *v_hba, void *v_led_state) +{ + unsigned int ret = RETURN_OK; + struct hifc_hba_s *hba = v_hba; + struct hifc_led_state_s *led_state = v_led_state; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_led_state, return UNF_RETURN_ERROR); + + led_state->green_speed_led = hba->led_states.green_speed_led; + led_state->yellow_speed_led = hba->led_states.yellow_speed_led; + led_state->ac_led = hba->led_states.ac_led; + + return ret; +} + +unsigned int hifc_get_hardware_version(void *v_fc_port, void *v_version) +{ + struct hifc_hba_s *fc_port = (struct hifc_hba_s *)v_fc_port; + struct unf_version_str_s *version = + (struct unf_version_str_s *)v_version; + char *hard_ware_ver = NULL; + + HIFC_CHECK(INVALID_VALUE32, version, return UNF_RETURN_ERROR); + hard_ware_ver = version->buf; + HIFC_CHECK(INVALID_VALUE32, hard_ware_ver, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, fc_port, return UNF_RETURN_ERROR); + + hard_ware_ver[UNF_HW_VERSION_LEN - 1] = 0; + + return RETURN_OK; +} + +unsigned int hifc_get_sfp_info(void *v_fc_port, void *v_sfp_info) +{ + struct unf_lport_sfp_info *sfp_info = + (struct unf_lport_sfp_info *)v_sfp_info; + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_fc_port; + struct hifc_inmbox_get_sfp_info_s get_sfp_info; + union hifc_outmbox_generic_u *get_sfp_info_sts = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, sfp_info, return UNF_RETURN_ERROR); + + memset(&get_sfp_info, 0, sizeof(get_sfp_info)); + + get_sfp_info_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!get_sfp_info_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(get_sfp_info_sts, 0, sizeof(union hifc_outmbox_generic_u)); + + get_sfp_info.header.cmnd_type = HIFC_MBOX_GET_SFP_INFO; + get_sfp_info.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_get_sfp_info_s)); + get_sfp_info.header.port_id = (hba->port_index); + + /* send mailbox and handle the return sts */ + if (hifc_mb_send_and_wait_mbox(hba, &get_sfp_info, sizeof(get_sfp_info), + get_sfp_info_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "Port(0x%x) HIFC can't send and wait mailbox, command type: 0x%x.", + hba->port_cfg.port_id, + get_sfp_info.header.cmnd_type); + + goto exit; + } + + sfp_info->status = get_sfp_info_sts->get_sfp_info_sts.status; + if (get_sfp_info_sts->get_sfp_info_sts.status != STATUS_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "Port(0x%x) Receive mailbox type(0x%x) status incorrect. Status: 0x%x.", + hba->port_cfg.port_id, + get_sfp_info_sts->get_sfp_info_sts.header.cmnd_type, + get_sfp_info_sts->get_sfp_info_sts.status); + + goto exit; + } + + if (get_sfp_info_sts->get_sfp_info_sts.header.cmnd_type != + HIFC_MBOX_GET_SFP_INFO_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "Port(0x%x) Receive mailbox type incorrect. Type: 0x%x.", + hba->port_cfg.port_id, + get_sfp_info_sts->get_sfp_info_sts.header.cmnd_type); + + goto exit; + } + + /* the real sfpinfo is beyond the header of sts */ + memcpy(&sfp_info->sfp_eeprom_info, + ((unsigned char *)get_sfp_info_sts + + sizeof(get_sfp_info_sts->get_sfp_info_sts)), + sizeof(union unf_sfp_eeprome_info)); + + ret = RETURN_OK; +exit: + kfree(get_sfp_info_sts); + return ret; +} + +unsigned int hifc_get_port_info(void *v_hba) +{ + unsigned long flags = 0; + struct hifc_inmbox_get_port_info_s get_port_info; + union hifc_outmbox_generic_u *port_info_sts = NULL; + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba; + unsigned int ret = UNF_RETURN_ERROR; + + memset(&get_port_info, 0, sizeof(get_port_info)); + port_info_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!port_info_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(port_info_sts, 0, sizeof(union hifc_outmbox_generic_u)); + + get_port_info.header.cmnd_type = HIFC_MBOX_GET_PORT_INFO; + get_port_info.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_get_port_info_s)); + get_port_info.header.port_id = hba->port_index; + + if (hifc_mb_send_and_wait_mbox(hba, &get_port_info, + sizeof(get_port_info), port_info_sts) != + RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "Port(0x%x) send and wait mailbox type(0x%x) failed.", + hba->port_cfg.port_id, + get_port_info.header.cmnd_type); + + goto exit; + } + + if ((port_info_sts->get_port_info_sts.status != STATUS_OK) || + (port_info_sts->get_port_info_sts.header.cmnd_type != + HIFC_MBOX_GET_PORT_INFO_STS)) { + HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "Port(0x%x) receive mailbox type(0x%x) status(0x%x) error.", + hba->port_cfg.port_id, + port_info_sts->get_port_info_sts.header.cmnd_type, + port_info_sts->get_port_info_sts.status); + + goto exit; + } + + spin_lock_irqsave(&hba->hba_lock, flags); + hba->active_bb_scn = port_info_sts->get_port_info_sts.bbscn; + hba->active_rx_bb_credit = + port_info_sts->get_port_info_sts.non_loop_rx_credit; + spin_unlock_irqrestore(&hba->hba_lock, flags); + + ret = RETURN_OK; +exit: + kfree(port_info_sts); + return ret; +} + +unsigned int hifc_get_port_current_info(void *v_hba, void *port_info) +{ + struct hifc_hba_s *hba = NULL; + struct hifc_inmbox_get_port_info_s get_port_info; + union hifc_outmbox_generic_u *port_info_sts = NULL; + struct unf_get_port_info_argout *current_port_info = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, port_info, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)v_hba; + current_port_info = (struct unf_get_port_info_argout *)port_info; + + memset(&get_port_info, 0, sizeof(get_port_info)); + port_info_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!port_info_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(port_info_sts, 0, sizeof(union hifc_outmbox_generic_u)); + + get_port_info.header.cmnd_type = HIFC_MBOX_GET_PORT_INFO; + get_port_info.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_get_port_info_s)); + get_port_info.header.port_id = hba->port_index; + + if (hifc_mb_send_and_wait_mbox(hba, &get_port_info, + sizeof(get_port_info), + port_info_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port(0x%x) send and wait mailbox type(0x%x) failed", + hba->port_cfg.port_id, + get_port_info.header.cmnd_type); + + goto exit; + } + + if ((port_info_sts->get_port_info_sts.status != STATUS_OK) || + (port_info_sts->get_port_info_sts.header.cmnd_type != + HIFC_MBOX_GET_PORT_INFO_STS)) { + HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "Port(0x%x) receive mailbox type(0x%x) status(0x%x) error.", + hba->port_cfg.port_id, + port_info_sts->get_port_info_sts.header.cmnd_type, + port_info_sts->get_port_info_sts.status); + + goto exit; + } + + current_port_info->sfp_speed = + (unsigned char)port_info_sts->get_port_info_sts.sfp_speed; + current_port_info->present = + (unsigned char)port_info_sts->get_port_info_sts.present; + + ret = RETURN_OK; +exit: + kfree(port_info_sts); + return ret; +} + +static void hifc_get_fabric_login_params( + struct hifc_hba_s *hba, + struct unf_port_login_parms_s *v_param_addr) +{ + unsigned long flag = 0; + + spin_lock_irqsave(&hba->hba_lock, flag); + hba->active_topo = v_param_addr->en_act_topo; + hba->compared_ratov_val = v_param_addr->compared_ratov_val; + hba->compared_edtov_val = v_param_addr->compared_edtov_val; + hba->compared_bbscn = v_param_addr->compared_bbscn; + hba->remote_edtov_tag = v_param_addr->remote_edtov_tag; + hba->remote_rttov_tag = v_param_addr->remote_rttov_tag; + hba->remote_bbcredit = v_param_addr->remote_bbcredit; + spin_unlock_irqrestore(&hba->hba_lock, flag); + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) topo(0x%x) get fabric params: R_A_TOV(0x%x) E_D_TOV(%u) BB_CREDIT(0x%x) BB_SC_N(0x%x)", + hba->port_cfg.port_id, hba->active_topo, + hba->compared_ratov_val, hba->compared_edtov_val, + hba->remote_bbcredit, hba->compared_bbscn); +} + +static void hifc_get_port_login_params( + struct hifc_hba_s *hba, + struct unf_port_login_parms_s *v_param_addr) +{ + unsigned long flag = 0; + + spin_lock_irqsave(&hba->hba_lock, flag); + hba->compared_ratov_val = v_param_addr->compared_ratov_val; + hba->compared_edtov_val = v_param_addr->compared_edtov_val; + hba->compared_bbscn = v_param_addr->compared_bbscn; + hba->remote_edtov_tag = v_param_addr->remote_edtov_tag; + hba->remote_rttov_tag = v_param_addr->remote_rttov_tag; + hba->remote_bbcredit = v_param_addr->remote_bbcredit; + spin_unlock_irqrestore(&hba->hba_lock, flag); + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "Port(0x%x) Topo(0x%x) Get Port Params: R_A_TOV(0x%x), E_D_TOV(0x%x), BB_CREDIT(0x%x), BB_SC_N(0x%x).", + hba->port_cfg.port_id, hba->active_topo, + hba->compared_ratov_val, hba->compared_edtov_val, + hba->remote_bbcredit, hba->compared_bbscn); +} + +unsigned int hifc_update_fabric_param(void *v_hba, void *v_para_in) +{ + unsigned int ret = RETURN_OK; + struct hifc_hba_s *hba = v_hba; + struct unf_port_login_parms_s *login_coparms = v_para_in; + + UNF_CHECK_VALID(0x4923, UNF_B_TRUE, hba, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x4924, UNF_B_TRUE, v_para_in, return UNF_RETURN_ERROR); + + hifc_get_fabric_login_params(hba, login_coparms); + + if ((hba->active_topo == UNF_ACT_TOP_P2P_FABRIC) || + (hba->active_topo == UNF_ACT_TOP_PUBLIC_LOOP)) { + if (hba->work_mode == HIFC_SMARTIO_WORK_MODE_FC) + ret = hifc_config_login_api(hba, login_coparms); + } + + return ret; +} + +unsigned int hifc_update_port_param(void *v_hba, void *v_para_in) +{ + unsigned int ret = RETURN_OK; + struct hifc_hba_s *hba = v_hba; + struct unf_port_login_parms_s *login_coparms = + (struct unf_port_login_parms_s *)v_para_in; + + UNF_CHECK_VALID(0x4923, UNF_B_TRUE, hba, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x4924, UNF_B_TRUE, v_para_in, return UNF_RETURN_ERROR); + + if ((hba->active_topo == UNF_ACT_TOP_PRIVATE_LOOP) || + (hba->active_topo == UNF_ACT_TOP_P2P_DIRECT)) { + hifc_get_port_login_params(hba, login_coparms); + ret = hifc_config_login_api(hba, login_coparms); + } + + hifc_save_login_para_in_sq_info(hba, login_coparms); + + return ret; +} + +unsigned int hifc_clear_port_error_code(void *v_hba, void *v_err_code) +{ + return RETURN_OK; +} + +unsigned int hifc_get_and_clear_port_error_code(void *v_hba, void *v_err_code) +{ + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba; + struct hifc_inmbox_get_err_code_s get_err_code; + union hifc_outmbox_generic_u *err_code_sts = NULL; + struct unf_err_code_s *unf_err_code = + (struct unf_err_code_s *)v_err_code; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, unf_err_code, return UNF_RETURN_ERROR); + + memset(&get_err_code, 0, sizeof(get_err_code)); + + err_code_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!err_code_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(err_code_sts, 0, sizeof(union hifc_outmbox_generic_u)); + + get_err_code.header.cmnd_type = HIFC_MBOX_GET_ERR_CODE; + get_err_code.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_get_err_code_s)); + + if (hifc_mb_send_and_wait_mbox(hba, &get_err_code, sizeof(get_err_code), + err_code_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "Port(0x%x) HIFC can't send and wait mailbox, command type: 0x%x.", + hba->port_cfg.port_id, + get_err_code.header.cmnd_type); + + goto exit; + } + + if (err_code_sts->get_err_code_sts.status != STATUS_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "Port(0x%x) Receive mailbox type(0x%x) status incorrect, status: 0x%x.", + hba->port_cfg.port_id, + err_code_sts->get_err_code_sts.header.cmnd_type, + err_code_sts->get_err_code_sts.status); + + goto exit; + } + + unf_err_code->link_fail_count = + err_code_sts->get_err_code_sts.err_code[0]; + unf_err_code->loss_of_sync_count = + err_code_sts->get_err_code_sts.err_code[1]; + unf_err_code->loss_of_signal_count = + err_code_sts->get_err_code_sts.err_code[2]; + unf_err_code->proto_error_count = + err_code_sts->get_err_code_sts.err_code[3]; + unf_err_code->bad_rx_char_count = + err_code_sts->get_err_code_sts.err_code[4]; + unf_err_code->bad_crc_count = + err_code_sts->get_err_code_sts.err_code[5]; + unf_err_code->rx_eo_fa_count = + err_code_sts->get_err_code_sts.err_code[6]; + unf_err_code->dis_frame_count = + err_code_sts->get_err_code_sts.err_code[7]; + + ret = RETURN_OK; +exit: + kfree(err_code_sts); + return ret; +} + +unsigned int hifc_get_work_bale_bbcredit(void *v_hba, void *v_bbcredit) +{ + unsigned int *bb_credit = (unsigned int *)v_bbcredit; + struct hifc_hba_s *hba = v_hba; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_bbcredit, return UNF_RETURN_ERROR); + + if (hba->active_port_speed == UNF_PORT_SPEED_32_G) + *bb_credit = HIFC_LOWLEVEL_DEFAULT_32G_BB_CREDIT; + else if (hba->active_port_speed == UNF_PORT_SPEED_16_G) + *bb_credit = HIFC_LOWLEVEL_DEFAULT_16G_BB_CREDIT; + else + *bb_credit = HIFC_LOWLEVEL_DEFAULT_842G_BB_CREDIT; + + return RETURN_OK; +} + +unsigned int hifc_get_work_bale_bbscn(void *v_hba, void *v_bbscn) +{ + unsigned int *bbscn = (unsigned int *)v_bbscn; + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_bbscn, return UNF_RETURN_ERROR); + + *bbscn = hba->port_bbscn_cfg; + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_INFO, "Return BBSCN(0x%x) to CM", + *bbscn); + + return RETURN_OK; +} + +unsigned int hifc_get_software_version(void *v_hba, void *v_version) +{ + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba; + struct hifc_inmbox_get_fw_version_s fw_ver; + union hifc_outmbox_generic_u *fw_ver_sts = NULL; + unsigned char *ver = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, v_version, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + + memset(&fw_ver, 0, sizeof(fw_ver)); + fw_ver_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), GFP_ATOMIC); + if (!fw_ver_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(fw_ver_sts, 0, sizeof(union hifc_outmbox_generic_u)); + ver = (unsigned char *)&fw_ver_sts->get_fw_ver_sts; + + fw_ver.header.cmnd_type = HIFC_MBOX_GET_FW_VERSION; + fw_ver.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_get_fw_version_s)); + + if (hifc_mb_send_and_wait_mbox(hba, &fw_ver, sizeof(fw_ver), + fw_ver_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) can't send and wait mailbox, command type: 0x%x.", + hba->port_cfg.port_id, + fw_ver.header.cmnd_type); + + goto exit; + } + + if (fw_ver_sts->get_fw_ver_sts.header.cmnd_type != + HIFC_MBOX_GET_FW_VERSION_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "Port(0x%x) recv mailbox type(0x%x) incorrect.", + hba->port_cfg.port_id, + fw_ver_sts->get_fw_ver_sts.header.cmnd_type); + + goto exit; + } + + if (fw_ver_sts->get_fw_ver_sts.status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "Port(0x%x) Receive mailbox type(0x%x) status(0x%x) incorrect.", + hba->port_cfg.port_id, + fw_ver_sts->get_fw_ver_sts.header.cmnd_type, + fw_ver_sts->get_fw_ver_sts.status); + + goto exit; + } + + memcpy(v_version, ver + HIFC_VER_ADDR_OFFSET, + sizeof(struct hifc_outmbox_get_fw_version_sts_s) - + HIFC_VER_ADDR_OFFSET); + + ret = RETURN_OK; +exit: + kfree(fw_ver_sts); + return ret; +} + +unsigned int hifc_get_firmware_version(void *v_fc_port, void *v_version) +{ + struct hifc_hba_s *fc_port = (struct hifc_hba_s *)v_fc_port; + struct unf_version_str_s *version = + (struct unf_version_str_s *)v_version; + char *fw_ver = NULL; + + HIFC_CHECK(INVALID_VALUE32, version, return UNF_RETURN_ERROR); + fw_ver = version->buf; + HIFC_CHECK(INVALID_VALUE32, fw_ver, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, fc_port, return UNF_RETURN_ERROR); + + fw_ver[UNF_FW_VERSION_LEN - 1] = 0; + + return RETURN_OK; +} + +unsigned int hifc_get_loop_map(void *v_hba, void *v_buf) +{ + unsigned long flags = 0; + struct unf_buf_s *buf = (struct unf_buf_s *)v_buf; + struct hifc_hba_s *hba = v_hba; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, buf, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, buf->cbuf, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, buf->buf_len, return UNF_RETURN_ERROR); + + if (buf->buf_len > UNF_LOOPMAP_COUNT) + return UNF_RETURN_ERROR; + + spin_lock_irqsave(&hba->hba_lock, flags); + if (hba->loop_map_valid != LOOP_MAP_VALID) { + spin_unlock_irqrestore(&hba->hba_lock, flags); + return UNF_RETURN_ERROR; + } + memcpy(buf->cbuf, hba->loop_map, buf->buf_len); /* do memcpy */ + spin_unlock_irqrestore(&hba->hba_lock, flags); + + return RETURN_OK; +} + +unsigned int hifc_get_speed_cfg(void *v_hba, void *v_speed_cfg) +{ + struct hifc_hba_s *hba = v_hba; + unsigned int *speed_cfg = v_speed_cfg; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_speed_cfg, return UNF_RETURN_ERROR); + + *speed_cfg = hba->port_speed_cfg; + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "Get config link rate: 0x%x.", + *speed_cfg); + + return RETURN_OK; +} + +unsigned int hifc_get_speed_act(void *v_hba, void *v_speed_act) +{ + struct hifc_hba_s *hba = v_hba; + unsigned int *speed_act = v_speed_act; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_speed_act, return UNF_RETURN_ERROR); + + *speed_act = hba->active_port_speed; + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Get config link rate: 0x%x.", + *speed_act); + return RETURN_OK; +} + +unsigned int hifc_get_port_fec(void *v_hba, void *v_para_out) +{ + struct hifc_hba_s *hba = v_hba; + int *fec = v_para_out; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, fec, return UNF_RETURN_ERROR); + + *fec = (hba->fec_status) ? UNF_TRUE : UNF_FALSE; + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Get Port fec: 0x%x.", + (hba->fec_status)); + return RETURN_OK; +} + +unsigned int hifc_save_hba_info(void *v_hba, void *v_para_in) +{ + struct hifc_inmbox_save_hba_info_s *hba_info = NULL; + struct hifc_outmbox_save_hba_info_sts_s *hba_info_sts = NULL; + void *hba_info_addr = v_para_in; + struct hifc_hba_s *hba = v_hba; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_para_in, return UNF_RETURN_ERROR); + + hba_info = vmalloc(sizeof(struct hifc_inmbox_save_hba_info_s)); + + if (!hba_info) + return UNF_RETURN_ERROR; + + hba_info_sts = vmalloc(sizeof(struct hifc_outmbox_save_hba_info_sts_s)); + + if (!hba_info_sts) { + vfree(hba_info); + return UNF_RETURN_ERROR; + } + + memset(hba_info, 0, sizeof(struct hifc_inmbox_save_hba_info_s)); + memset(hba_info_sts, 0, + sizeof(struct hifc_outmbox_save_hba_info_sts_s)); + + hba_info->header.cmnd_type = HIFC_MBOX_SAVE_HBA_INFO; + hba_info->header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_save_hba_info_s)); + + /* fill mailbox payload */ + memcpy(&hba_info->hba_save_info[0], hba_info_addr, SAVE_PORT_INFO_LEN); + + /* send & wait mailbox */ + if (hifc_mb_send_and_wait_mbox( + hba, hba_info, + sizeof(*hba_info), + (union hifc_outmbox_generic_u *)hba_info_sts) + != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[warn]Port(0x%x) HIFC can't send and wait mailbox, command type(0x%x)", + hba->port_cfg.port_id, + hba_info->header.cmnd_type); + + vfree(hba_info); + vfree(hba_info_sts); + + return UNF_RETURN_ERROR; + } + + /* check mailbox rsp status */ + if (hba_info_sts->status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[err]Port(0x%x) receive mailbox type(0x%x) status(0x%x) error", + hba->port_cfg.port_id, + hba_info_sts->header.cmnd_type, + hba_info_sts->status); + + vfree(hba_info); + vfree(hba_info_sts); + + return UNF_RETURN_ERROR; + } + + /* check mailbox rsp type */ + if (hba_info_sts->header.cmnd_type != HIFC_MBOX_SAVE_HBA_INFO_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[err]Port(0x%x) receive mailbox type(0x%x) error", + hba->port_cfg.port_id, + hba_info_sts->header.cmnd_type); + + vfree(hba_info); + vfree(hba_info_sts); + + return UNF_RETURN_ERROR; + } + + memcpy(hba_info_addr, &hba_info_sts->save_hba_info[0], + SAVE_PORT_INFO_LEN - 8); + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "[event]Port(0x%x) save hba info succeed", + hba->port_cfg.port_id); + + vfree(hba_info); + vfree(hba_info_sts); + + return RETURN_OK; +} + +unsigned int hifc_mbox_reset_chip(struct hifc_hba_s *v_hba, + unsigned char v_sub_type) +{ + struct hifc_inmbox_port_reset_s port_reset; + union hifc_outmbox_generic_u *port_reset_sts = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + + memset(&port_reset, 0, sizeof(port_reset)); + + port_reset_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!port_reset_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(port_reset_sts, 0, sizeof(union hifc_outmbox_generic_u)); + port_reset.header.cmnd_type = HIFC_MBOX_PORT_RESET; + port_reset.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_port_reset_s)); + port_reset.op_code = v_sub_type; + + if (hifc_mb_send_and_wait_mbox(v_hba, &port_reset, sizeof(port_reset), + port_reset_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[warn]Port(0x%x) can't send and wait mailbox with command type(0x%x)", + v_hba->port_cfg.port_id, + port_reset.header.cmnd_type); + + goto exit; + } + + if (port_reset_sts->port_reset_sts.status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[warn]Port(0x%x) receive mailbox type(0x%x) status(0x%x) incorrect", + v_hba->port_cfg.port_id, + port_reset_sts->port_reset_sts.header.cmnd_type, + port_reset_sts->port_reset_sts.status); + + goto exit; + } + + if (port_reset_sts->port_reset_sts.header.cmnd_type != + HIFC_MBOX_PORT_RESET_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[warn]Port(0x%x) recv mailbox type(0x%x) incorrect", + v_hba->port_cfg.port_id, + port_reset_sts->port_reset_sts.header.cmnd_type); + + goto exit; + } + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "[info]Port(0x%x) reset chip mailbox success", + v_hba->port_cfg.port_id); + + ret = RETURN_OK; +exit: + kfree(port_reset_sts); + return ret; +} + +unsigned int hifc_clear_sq_wqe_done(struct hifc_hba_s *v_hba) +{ + int async_ret = RETURN_OK; + struct hifc_inmbx_clear_node_s clear_done; + + clear_done.header.cmnd_type = HIFC_MBOX_BUFFER_CLEAR_DONE; + clear_done.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbx_clear_node_s)); + clear_done.header.port_id = v_hba->port_index; + + async_ret = hifc_msg_to_mgmt_async(v_hba->hw_dev_handle, + HIFC_MOD_FC, + HIFC_MBOX_BUFFER_CLEAR_DONE, + &clear_done, sizeof(clear_done)); + + if (async_ret != 0) { + HIFC_MAILBOX_STAT(v_hba, HIFC_SEND_CLEAR_DONE_FAIL); + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]HIFC Port(0x%x) can't send clear done cmd to up, ret:%d", + v_hba->port_cfg.port_id, async_ret); + + return UNF_RETURN_ERROR; + } + + HIFC_MAILBOX_STAT(v_hba, HIFC_SEND_CLEAR_DONE); + v_hba->q_set_stage = HIFC_QUEUE_SET_STAGE_FLUSHDONE; + v_hba->next_clearing_sq = 0; + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_EVENT, UNF_KEVENT, + "[info]Port(0x%x) clear done msg(0x%x) sent to up succeed with stage(0x%x)", + v_hba->port_cfg.port_id, + clear_done.header.cmnd_type, v_hba->q_set_stage); + + return RETURN_OK; +} + +unsigned int hifc_mbx_get_fw_clear_stat(struct hifc_hba_s *v_hba, + unsigned int *v_clear_state) +{ + struct hifc_inmbox_get_clear_state_s clr_state; + union hifc_outmbox_generic_u *port_clr_state_sts = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_clear_state, return UNF_RETURN_ERROR); + + memset(&clr_state, 0, sizeof(clr_state)); + + port_clr_state_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!port_clr_state_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(port_clr_state_sts, 0, sizeof(union hifc_outmbox_generic_u)); + + clr_state.header.cmnd_type = HIFC_MBOX_GET_CLEAR_STATE; + clr_state.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_get_clear_state_s)); + + if (hifc_mb_send_and_wait_mbox(v_hba, &clr_state, sizeof(clr_state), + port_clr_state_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "hifc can't send and wait mailbox, command type: 0x%x", + clr_state.header.cmnd_type); + + goto exit; + } + + if (port_clr_state_sts->get_clr_state_sts.status != RETURN_OK) { + HIFC_TRACE( + UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "Port(0x%x) Receive mailbox type(0x%x) status incorrect. Status: 0x%x, state 0x%x.", + v_hba->port_cfg.port_id, + port_clr_state_sts->get_clr_state_sts.header.cmnd_type, + port_clr_state_sts->get_clr_state_sts.status, + port_clr_state_sts->get_clr_state_sts.state); + + goto exit; + } + + if (port_clr_state_sts->get_clr_state_sts.header.cmnd_type != + HIFC_MBOX_GET_CLEAR_STATE_STS) { + HIFC_TRACE( + UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "Port(0x%x) recv mailbox type(0x%x) incorrect.", + v_hba->port_cfg.port_id, + port_clr_state_sts->get_clr_state_sts.header.cmnd_type); + + goto exit; + } + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_EVENT, UNF_MAJOR, + "Port(0x%x) get port clear state 0x%x.", + v_hba->port_cfg.port_id, + port_clr_state_sts->get_clr_state_sts.state); + + *v_clear_state = port_clr_state_sts->get_clr_state_sts.state; + + ret = RETURN_OK; +exit: + kfree(port_clr_state_sts); + return ret; +} + +unsigned int hifc_mbx_set_fec(struct hifc_hba_s *v_hba, + unsigned int v_fec_opcode) +{ + struct hifc_inmbox_config_fec_s cfg_fec; + union hifc_outmbox_generic_u *port_fec_state_sts = NULL; + unsigned char op_code = 0; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + + memset(&cfg_fec, 0, sizeof(cfg_fec)); + + port_fec_state_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!port_fec_state_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(port_fec_state_sts, 0, sizeof(union hifc_outmbox_generic_u)); + + op_code = (unsigned char)v_fec_opcode; + + cfg_fec.header.cmnd_type = HIFC_MBOX_CONFIG_FEC; + cfg_fec.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(HIFC_MBOX_CONFIG_FEC)); + cfg_fec.fec_op_code = op_code; + + if (hifc_mb_send_and_wait_mbox(v_hba, &cfg_fec, sizeof(cfg_fec), + port_fec_state_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) hifc can't send and wait mailbox, command type: 0x%x", + v_hba->port_cfg.port_id, cfg_fec.header.cmnd_type); + + goto exit; + } + + if (port_fec_state_sts->config_fec_sts.status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "Port(0x%x) Receive mailbox type(0x%x) status incorrect. Status: 0x%x.", + v_hba->port_cfg.port_id, + port_fec_state_sts->config_fec_sts.header.cmnd_type, + port_fec_state_sts->config_fec_sts.status); + + goto exit; + } + + if (port_fec_state_sts->config_fec_sts.header.cmnd_type != + HIFC_MBOX_CONFIG_FEC_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "Port(0x%x) recv mailbox type(0x%x) incorrect.", + v_hba->port_cfg.port_id, + port_fec_state_sts->config_fec_sts.header.cmnd_type); + + goto exit; + } + + v_hba->fec_status = v_fec_opcode; + + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EVENT, UNF_MAJOR, + "Port(0x%x) set FEC Status is %u.", + v_hba->port_cfg.port_id, op_code); + + ret = RETURN_OK; +exit: + kfree(port_fec_state_sts); + return ret; +} + +unsigned int hifc_notify_up_config_timer(struct hifc_hba_s *v_hba, int op_code, + unsigned int user_data) +{ + struct hifc_inmbox_config_timer_s time_cfg; + union hifc_outmbox_generic_u *time_cfg_sts = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + + memset(&time_cfg, 0, sizeof(time_cfg)); + + time_cfg_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!time_cfg_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(time_cfg_sts, 0, sizeof(union hifc_outmbox_generic_u)); + time_cfg.header.cmnd_type = HIFC_MBOX_CONFIG_TIMER; + time_cfg.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_config_timer_s)); + time_cfg.op_code = (unsigned short)op_code; + time_cfg.fun_id = hifc_global_func_id(v_hba->hw_dev_handle); + time_cfg.user_data = user_data; + + if (hifc_mb_send_and_wait_mbox(v_hba, &time_cfg, sizeof(time_cfg), + time_cfg_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[warn]Port(0x%x) hifc can't send and wait mailbox with command type(0x%x)", + v_hba->port_cfg.port_id, time_cfg.header.cmnd_type); + + goto exit; + } + + if (time_cfg_sts->timer_config_sts.header.cmnd_type != + HIFC_MBOX_CONFIG_TIMER_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[warn]Port(0x%x) recv mailbox type(0x%x) incorrect", + v_hba->port_cfg.port_id, + time_cfg_sts->timer_config_sts.header.cmnd_type); + + goto exit; + } + + if (time_cfg_sts->timer_config_sts.status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[warn]Port(0x%x) Receive mailbox type(0x%x) status(0x%x) incorrect", + v_hba->port_cfg.port_id, + time_cfg_sts->timer_config_sts.header.cmnd_type, + time_cfg_sts->timer_config_sts.status); + + goto exit; + } + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_EVENT, UNF_MAJOR, + "[info]Port(0x%x) notify uP to %s timer success", + v_hba->port_cfg.port_id, op_code ? "open" : "close"); + + ret = RETURN_OK; +exit: + kfree(time_cfg_sts); + return ret; +} + +unsigned int hifc_get_flash_data(void *v_hba, void *v_flash_data) +{ + struct hifc_hba_s *hba = NULL; + struct unf_mbox_flash_data_mgmt_s *flash_data_mgmt = NULL; + union hifc_outmbox_generic_u *flash_data_sts = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_flash_data, return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)v_hba; + + flash_data_mgmt = kmalloc(sizeof(struct unf_mbox_flash_data_mgmt_s), + GFP_ATOMIC); + + if (!flash_data_mgmt) { + HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_REG_ATT, UNF_KEVENT, + "can't malloc buff for set flashData."); + return ret; + } + flash_data_sts = kmalloc(sizeof(struct unf_flash_data_mgmt_sts_s), + GFP_ATOMIC); + + if (!flash_data_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_REG_ATT, UNF_KEVENT, + "can't malloc buff for set flashData sts."); + kfree(flash_data_mgmt); + return ret; + } + memset(flash_data_mgmt, 0, sizeof(struct unf_mbox_flash_data_mgmt_s)); + memset(flash_data_sts, 0, sizeof(struct unf_flash_data_mgmt_sts_s)); + flash_data_mgmt->mbox_head.cmnd_type = HIFC_MBOX_FLASH_DATA_MGMT; + flash_data_mgmt->mbox_head.length = 1; /* not used */ + flash_data_mgmt->mbox_head.op_code = 0; /* read config */ + + if (hifc_mb_send_and_wait_mbox( + hba, flash_data_mgmt, + sizeof(struct unf_mbox_flash_data_mgmt_s), + flash_data_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "hifc can't send and wait mailbox, command type: 0x%x.", + flash_data_mgmt->mbox_head.cmnd_type); + + goto exit; + } + + if (flash_data_sts->flash_data_sts.mbox_head.status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) mailbox status incorrect status(0x%x) .", + hba->port_cfg.port_id, + flash_data_sts->flash_data_sts.mbox_head.status); + + goto exit; + } + + if (flash_data_sts->flash_data_sts.mbox_head.cmnd_type != + HIFC_MBOX_FLASH_DATA_MGMT_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) receive mailbox type incorrect type: 0x%x.", + hba->port_cfg.port_id, + flash_data_sts->flash_data_sts.mbox_head.cmnd_type); + + goto exit; + } + + memcpy((unsigned char *)v_flash_data, + (unsigned char *)&flash_data_sts->flash_data_sts.flash_data, + sizeof(struct unf_flash_data_s)); + ret = RETURN_OK; +exit: + kfree(flash_data_mgmt); + kfree(flash_data_sts); + return ret; +} + +unsigned int hifc_set_flash_data(void *v_hba, void *v_flash_data) +{ + struct hifc_hba_s *hba = NULL; + struct unf_mbox_flash_data_mgmt_s *flash_data_mgmt = NULL; + union hifc_outmbox_generic_u *flash_data_sts = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_flash_data, return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)v_hba; + flash_data_mgmt = kmalloc(sizeof(struct unf_mbox_flash_data_mgmt_s), + GFP_ATOMIC); + + if (!flash_data_mgmt) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_KEVENT, + "can't malloc buff for set flashData."); + return ret; + } + flash_data_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + + if (!flash_data_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_KEVENT, + "can't malloc buff for set flashData sts."); + kfree(flash_data_mgmt); + return ret; + } + memset(flash_data_sts, 0, sizeof(union hifc_outmbox_generic_u)); + memset(flash_data_mgmt, 0, sizeof(struct unf_mbox_flash_data_mgmt_s)); + flash_data_mgmt->mbox_head.cmnd_type = HIFC_MBOX_FLASH_DATA_MGMT; + flash_data_mgmt->mbox_head.length = 1; /* not used */ + flash_data_mgmt->mbox_head.op_code = 2; /* flash config */ + + if (hifc_mb_send_and_wait_mbox( + hba, flash_data_mgmt, + sizeof(struct unf_mbox_flash_data_mgmt_s), + flash_data_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_KEVENT, + "hifc can't send and wait mailbox, command type: 0x%x.", + flash_data_sts->flash_data_sts.mbox_head.cmnd_type); + + goto END; + } + + if (flash_data_sts->flash_data_sts.mbox_head.status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_KEVENT, + "Port(0x%x) mailbox status incorrect status(0x%x) .", + hba->port_cfg.port_id, + flash_data_sts->flash_data_sts.mbox_head.status); + + goto END; + } + + if (flash_data_sts->flash_data_sts.mbox_head.cmnd_type != + HIFC_MBOX_FLASH_DATA_MGMT_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_KEVENT, + "Port(0x%x) receive mailbox type incorrect type: 0x%x.", + hba->port_cfg.port_id, + flash_data_sts->flash_data_sts.mbox_head.cmnd_type); + + goto END; + } + flash_data_mgmt->mbox_head.cmnd_type = HIFC_MBOX_FLASH_DATA_MGMT; + flash_data_mgmt->mbox_head.length = 1; /* not used */ + flash_data_mgmt->mbox_head.op_code = 1; /* write config */ + memcpy(&flash_data_mgmt->flash_data, + (unsigned char *)v_flash_data, sizeof(struct unf_flash_data_s)); + + if (hifc_mb_send_and_wait_mbox( + hba, flash_data_mgmt, + sizeof(struct unf_mbox_flash_data_mgmt_s), + flash_data_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "hifc can't send and wait mailbox, command type: 0x%x.", + flash_data_sts->flash_data_sts.mbox_head.cmnd_type); + + goto END; + } + + if (flash_data_sts->flash_data_sts.mbox_head.status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_KEVENT, + "Port(0x%x) mailbox status incorrect status(0x%x) .", + hba->port_cfg.port_id, + flash_data_sts->flash_data_sts.mbox_head.status); + + goto END; + } + + if (flash_data_sts->flash_data_sts.mbox_head.cmnd_type != + HIFC_MBOX_FLASH_DATA_MGMT_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_KEVENT, + "Port(0x%x) receive mailbox type incorrect type: 0x%x.", + hba->port_cfg.port_id, + flash_data_sts->flash_data_sts.mbox_head.cmnd_type); + + goto END; + } + ret = RETURN_OK; +END: + kfree(flash_data_mgmt); + kfree(flash_data_sts); + return ret; +} diff --git a/drivers/scsi/huawei/hifc/hifc_chipitf.h b/drivers/scsi/huawei/hifc/hifc_chipitf.h new file mode 100644 index 0000000000000000000000000000000000000000..8b4915d2a99078bdb126f8ed1c0e893bd08fc9e3 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_chipitf.h @@ -0,0 +1,643 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef __HIFC_CHIPITF_H__ +#define __HIFC_CHIPITF_H__ + +#include "unf_log.h" +#include "hifc_utils.h" +#include "hifc_module.h" +#include "hifc_service.h" + +/* CONF_API_CMND */ +#define HIFC_MBOX_CONFIG_API 0x00 +#define HIFC_MBOX_CONFIG_API_STS 0xA0 + +/* GET_CHIP_INFO_API_CMD */ +#define HIFC_MBOX_GET_CHIP_INFO 0x01 +#define HIFC_MBOX_GET_CHIP_INFO_STS 0xA1 + +/* PORT_RESET */ +#define HIFC_MBOX_PORT_RESET 0x02 +#define HIFC_MBOX_PORT_RESET_STS 0xA2 + +/* SFP_SWITCH_API_CMND */ +#define HIFC_MBOX_PORT_SWITCH 0x03 +#define HIFC_MBOX_PORT_SWITCH_STS 0xA3 + +/* GET_SFP_INFO */ +#define HIFC_MBOX_GET_SFP_INFO 0x04 +#define HIFC_MBOX_GET_SFP_INFO_STS 0xA4 + +/* CONF_AF_LOGIN_API_CMND */ +#define HIFC_MBOX_CONFIG_LOGIN_API 0x06 +#define HIFC_MBOX_CONFIG_LOGIN_API_STS 0xA6 + +/* BUFFER_CLEAR_DONE_CMND */ +#define HIFC_MBOX_BUFFER_CLEAR_DONE 0x07 +#define HIFC_MBOX_BUFFER_CLEAR_DONE_STS 0xA7 + +#define HIFC_MBOX_GET_ERR_CODE 0x08 +#define HIFC_MBOX_GET_ERR_CODE_STS 0xA8 + +#define HIFC_MBOX_GET_UP_STATE 0x09 +#define HIFC_MBOX_GET_UP_STATE_STS 0xA9 + +/* LOOPBACK MODE */ +#define HIFC_MBOX_LOOPBACK_MODE 0x0A +#define HIFC_MBOX_LOOPBACK_MODE_STS 0xAA + +/* REG RW MODE */ +#define HIFC_MBOX_REG_RW_MODE 0x0B +#define HIFC_MBOX_REG_RW_MODE_STS 0xAB + +/* GET CLEAR DONE STATE */ +#define HIFC_MBOX_GET_CLEAR_STATE 0x0E +#define HIFC_MBOX_GET_CLEAR_STATE_STS 0xAE + +/* GET UP & UCODE VER */ +#define HIFC_MBOX_GET_FW_VERSION 0x0F +#define HIFC_MBOX_GET_FW_VERSION_STS 0xAF + +/* CONFIG TIMER */ +#define HIFC_MBOX_CONFIG_TIMER 0x10 +#define HIFC_MBOX_CONFIG_TIMER_STS 0xB0 + +/* CONFIG SRQC */ +#define HIFC_MBOX_CONFIG_SRQC 0x11 +#define HIFC_MBOX_CONFIG_SRQC_STS 0xB1 + +/* Led Test */ +#define HIFC_MBOX_LED_TEST 0x12 +#define HIFC_MBOX_LED_TEST_STS 0xB2 + +/* set esch */ +#define HIFC_MBOX_SET_ESCH 0x13 +#define HIFC_MBOX_SET_ESCH_STS 0xB3 + +/* set get tx serdes */ +#define HIFC_MBOX_SET_GET_SERDES_TX 0x14 +#define HIFC_MBOX_SET_GET_SERDES_TX_STS 0xB4 + +/* get rx serdes */ +#define HIFC_MBOX_GET_SERDES_RX 0x15 +#define HIFC_MBOX_GET_SERDES_RX_STS 0xB5 + +/* i2c read write */ +#define HIFC_MBOX_I2C_WR_RD 0x16 +#define HIFC_MBOX_I2C_WR_RD_STS 0xB6 + +/* Set FEC Enable */ +#define HIFC_MBOX_CONFIG_FEC 0x17 +#define HIFC_MBOX_CONFIG_FEC_STS 0xB7 + +/* GET UCODE STATS CMD */ +#define HIFC_MBOX_GET_UCODE_STAT 0x18 +#define HIFC_MBOX_GET_UCODE_STAT_STS 0xB8 + +/* gpio read write */ +#define HIFC_MBOX_GPIO_WR_RD 0x19 +#define HIFC_MBOX_GPIO_WR_RD_STS 0xB9 + +/* GET PORT INFO CMD */ +#define HIFC_MBOX_GET_PORT_INFO 0x20 +#define HIFC_MBOX_GET_PORT_INFO_STS 0xC0 + +/* save hba info CMD */ +#define HIFC_MBOX_SAVE_HBA_INFO 0x24 +#define HIFC_MBOX_SAVE_HBA_INFO_STS 0xc4 + +#define HIFC_MBOX_FLASH_DATA_MGMT 0x25 +#define HIFC_MBOX_FLASH_DATA_MGMT_STS 0xc5 + +/* FCOE: DRV->UP */ +#define HIFC_MBOX_SEND_ELS_CMD 0x2A +#define HIFC_MBOX_SEND_VPORT_INFO 0x2B + +/* FC: UP->DRV */ +#define HIFC_MBOX_RECV_FC_LINKUP 0x40 +#define HIFC_MBOX_RECV_FC_LINKDOWN 0x41 +#define HIFC_MBOX_RECV_FC_DELCMD 0x42 +#define HIFC_MBOX_RECV_FC_ERROR 0x43 + +#define LOOP_MAP_VALID 1 +#define LOOP_MAP_INVALID 0 + +#define HIFC_MBOX_SIZE 1024 +#define HIFC_MBOX_HEADER_SIZE 4 + +#define ATUOSPEED 1 +#define FIXEDSPEED 0 +#define UNDEFINEOPCODE 0 + +#define VALUEMASK_L 0x00000000FFFFFFFF +#define VALUEMASK_H 0xFFFFFFFF00000000 + +#define STATUS_OK 0 +#define STATUS_FAIL 1 + +enum hifc_drv_2_up_unblock_msg_cmd_code_e { + HIFC_SEND_ELS_CMD, + HIFC_SEND_ELS_CMD_FAIL, + HIFC_RCV_ELS_CMD_RSP, + HIFC_SEND_CONFIG_LOGINAPI, + HIFC_SEND_CONFIG_LOGINAPI_FAIL, + HIFC_RCV_CONFIG_LOGIN_API_RSP, + HIFC_SEND_CLEAR_DONE, + HIFC_SEND_CLEAR_DONE_FAIL, + HIFC_RCV_CLEAR_DONE_RSP, + HIFC_SEND_VPORT_INFO_DONE, + HIFC_SEND_VPORT_INFO_FAIL, + HIFC_SEND_VPORT_INFO_RSP, + HIFC_MBOX_CMD_BUTT + +}; + +/* up to driver handle templete */ +struct hifc_up_2_drv_msg_handle_s { + unsigned char cmd; + unsigned int (*pfn_hifc_msg_up2drv_handler)(struct hifc_hba_s *v_hba, + void *v_buf_in); +}; + +/* Mbox Common Header */ +struct hifc_mbox_header_s { + unsigned char cmnd_type; + unsigned char length; + unsigned char port_id; + unsigned char reserved; + +}; + +/* open or close the sfp */ +struct hifc_inbox_port_switch_s { + struct hifc_mbox_header_s header; + + unsigned char op_code; + unsigned char port_type; + unsigned short reserved; + + unsigned char host_id; + unsigned char pf_id; + unsigned char fcoe_mode; + unsigned char reserved2; + + unsigned short conf_vlan; + unsigned short reserved3; + + unsigned long long sys_port_wwn; + unsigned long long sys_node_name; +}; + +struct hifc_outbox_port_switch_sts_s { + struct hifc_mbox_header_s header; + + unsigned short reserved; + unsigned char reserved2; + unsigned char status; +}; + +/* config API */ +struct hifc_inbox_config_api_s { + struct hifc_mbox_header_s header; + + unsigned int op_code : 8; + unsigned int reserved1 : 24; + + unsigned char topy_mode; + unsigned char sfp_speed; + unsigned char max_speed; + unsigned char hard_alpa; + + unsigned char port_name[UNF_WWN_LEN]; + + unsigned int slave : 1; + unsigned int auto_sneg : 1; + unsigned int reserved2 : 30; + + unsigned int rx_bbcredit_32g : 16; /* 160 */ + unsigned int rx_bbcredit_16g : 16; /* 80 */ + unsigned int rx_bbcredit_842g : 16; /* 50 */ + unsigned int rdy_cnt_bf_fst_frm : 16; /* 8 */ + + unsigned int esch_value_32g; + unsigned int esch_value_16g; + unsigned int esch_value_8g; + unsigned int esch_value_4g; + unsigned int esch_value_2g; + unsigned int esch_bust_size; +}; + +struct hifc_outbox_config_api_sts_s { + struct hifc_mbox_header_s header; + + unsigned short reserved; + unsigned char reserved2; + unsigned char status; +}; + +/* Get chip info */ +struct hifc_inbox_get_chip_info_s { + struct hifc_mbox_header_s header; + +}; + +struct hifc_outbox_get_chip_info_sts_s { + struct hifc_mbox_header_s header; + + unsigned char status; + unsigned char board_type; + unsigned char rvsd; + unsigned char tape_support : 1; + unsigned char reserved : 7; + + unsigned long long wwpn; + unsigned long long wwnn; + unsigned long long sys_mac; + +}; + +/* Get reg info */ +struct hifc_inmbox_get_reg_info_s { + struct hifc_mbox_header_s header; + unsigned int op_code : 1; + unsigned int reg_len : 8; + unsigned int rsvd : 23; + unsigned int reg_addr; + unsigned int reg_value_l32; + unsigned int reg_value_h32; + unsigned int rvsd[27]; +}; + +/* Get reg info sts */ +struct hifc_outmbox_get_reg_info_sts_s { + struct hifc_mbox_header_s header; + + unsigned short rvsd0; + unsigned char rvsd1; + unsigned char status; + unsigned int reg_value_l32; + unsigned int reg_value_h32; + unsigned int rvsd[28]; +}; + +/* Config login API */ +struct hifc_inmbox_config_login_s { + struct hifc_mbox_header_s header; + + unsigned int op_code : 8; + unsigned int reserved1 : 24; + + unsigned short tx_bb_credit; + unsigned short reserved2; + + unsigned int rtov; + unsigned int etov; + + unsigned int rt_tov_tag : 1; + unsigned int ed_tov_tag : 1; + unsigned int bb_credit : 6; + unsigned int bbscn : 8; + unsigned int lr_flag : 16; +}; + +struct hifc_outmbox_config_login_sts_s { + struct hifc_mbox_header_s header; + + unsigned short reserved; + unsigned char reserved2; + unsigned char status; +}; + +/* port reset */ +#define HIFC_MBOX_SUBTYPE_LIGHT_RESET 0x0 +#define HIFC_MBOX_SUBTYPE_HEAVY_RESET 0x1 + +struct hifc_inmbox_port_reset_s { + struct hifc_mbox_header_s header; + + unsigned int op_code : 8; + unsigned int reserved1 : 24; +}; + +struct hifc_outmbox_port_reset_sts_s { + struct hifc_mbox_header_s header; + + unsigned short reserved; + unsigned char reserved2; + unsigned char status; +}; + +struct hifc_inmbox_get_sfp_info_s { + struct hifc_mbox_header_s header; +}; + +struct hifc_outmbox_get_sfp_info_sts_s { + struct hifc_mbox_header_s header; + + unsigned int rcvd : 8; + unsigned int length : 16; + unsigned int status : 8; +}; + +/* get and clear error code */ +struct hifc_inmbox_get_err_code_s { + struct hifc_mbox_header_s header; +}; + +struct hifc_outmbox_get_err_code_sts_s { + struct hifc_mbox_header_s header; + + unsigned short rsvd; + unsigned char rsvd2; + unsigned char status; + + unsigned int err_code[8]; +}; + +/* uP-->Driver asyn event API */ +struct hifc_link_event_s { + struct hifc_mbox_header_s header; + + unsigned char link_event; + unsigned char reason; + unsigned char speed; + unsigned char top_type; + + unsigned char alpa_value; + unsigned char reserved1; + unsigned short paticpate : 1; + unsigned short acled : 1; + unsigned short yellow_speed_led : 1; + unsigned short green_speed_led : 1; + unsigned short reserved : 12; + + unsigned char loop_map_info[128]; +}; + +enum hifc_up_err_type_e { + HIFC_UP_ERR_DRV_PARA = 0, + HIFC_UP_ERR_SFP = 1, + HIFC_UP_ERR_32G_PUB = 2, + HIFC_UP_ERR_32G_UA = 3, + HIFC_UP_ERR_32G_MAC = 4, + HIFC_UP_ERR_NON32G_DFX = 5, + HIFC_UP_ERR_NON32G_MAC = 6, + HIFC_UP_ERR_BUTT +}; + +enum hifc_up_err_value_e { + /* ERR type 0 */ + HIFC_DRV_2_UP_PARA_ERR = 0, + + /* ERR type 1 */ + HIFC_SFP_SPEED_ERR, + + /* ERR type 2 */ + HIFC_32GPUB_UA_RXESCH_FIFO_OF, + HIFC_32GPUB_UA_RXESCH_FIFO_UCERR, + + /* ERR type 3 */ + HIFC_32G_UA_UATX_LEN_ABN, + HIFC_32G_UA_RXAFIFO_OF, + HIFC_32G_UA_TXAFIFO_OF, + HIFC_32G_UA_RXAFIFO_UCERR, + HIFC_32G_UA_TXAFIFO_UCERR, + + /* ERR type 4 */ + HIFC_32G_MAC_RX_BBC_FATAL, + HIFC_32G_MAC_TX_BBC_FATAL, + HIFC_32G_MAC_TXFIFO_UF, + HIFC_32G_MAC_PCS_TXFIFO_UF, + HIFC_32G_MAC_RXBBC_CRDT_TO, + HIFC_32G_MAC_PCS_RXAFIFO_OF, + HIFC_32G_MAC_PCS_TXFIFO_OF, + HIFC_32G_MAC_FC2P_RXFIFO_OF, + HIFC_32G_MAC_FC2P_TXFIFO_OF, + HIFC_32G_MAC_FC2P_CAFIFO_OF, + HIFC_32G_MAC_PCS_RXRSFECM_UCEER, + HIFC_32G_MAC_PCS_RXAFIFO_UCEER, + HIFC_32G_MAC_PCS_TXFIFO_UCEER, + HIFC_32G_MAC_FC2P_RXFIFO_UCEER, + HIFC_32G_MAC_FC2P_TXFIFO_UCEER, + + /* ERR type 5 */ + HIFC_NON32G_DFX_FC1_DFX_BF_FIFO, + HIFC_NON32G_DFX_FC1_DFX_BP_FIFO, + HIFC_NON32G_DFX_FC1_DFX_RX_AFIFO_ERR, + HIFC_NON32G_DFX_FC1_DFX_TX_AFIFO_ERR, + HIFC_NON32G_DFX_FC1_DFX_DIRQ_RXBUF_FIFO1, + HIFC_NON32G_DFX_FC1_DFX_DIRQ_RXBBC_TO, + HIFC_NON32G_DFX_FC1_DFX_DIRQ_TXDAT_FIFO, + HIFC_NON32G_DFX_FC1_DFX_DIRQ_TXCMD_FIFO, + HIFC_NON32G_DFX_FC1_ERR_R_RDY, + + /* ERR type 6 */ + HIFC_NON32G_MAC_FC1_FAIRNESS_ERROR, + + HIFC_ERR_VALUE_BUTT +}; + +struct hifc_up_error_event_s { + struct hifc_mbox_header_s header; + + unsigned char link_event; + unsigned char error_level; + unsigned char error_type; + unsigned char error_value; +}; + +struct hifc_inmbx_clear_node_s { + struct hifc_mbox_header_s header; +}; + +struct hifc_inmbox_get_clear_state_s { + struct hifc_mbox_header_s header; + unsigned int resvd[31]; +}; + +struct hifc_outmbox_get_clear_state_sts_s { + struct hifc_mbox_header_s header; + unsigned short rsvd; + unsigned char state; /* 1--clear doing. 0---clear done. */ + unsigned char status; /* 0--ok,!0---fail */ + unsigned int resvd[30]; +}; + +#define HIFC_FIP_MODE_VN2VF 0 +#define HIFC_FIP_MODE_VN2VN 1 + +/* get port state */ +struct hifc_inmbox_get_port_info_s { + struct hifc_mbox_header_s header; +}; + +/* save hba info */ +struct hifc_inmbox_save_hba_info_s { + struct hifc_mbox_header_s header; + + unsigned int hba_save_info[254]; + +}; + +struct hifc_outmbox_get_port_info_sts_s { + struct hifc_mbox_header_s header; + + unsigned int status : 8; + unsigned int fec_vis_tts_16g : 8; + unsigned int bbscn : 8; + unsigned int loop_credit : 8; + + unsigned int non_loop_rx_credit : 8; + unsigned int non_loop_tx_credit : 8; + unsigned int sfp_speed : 8; + unsigned int present : 8; + +}; + +struct hifc_outmbox_save_hba_info_sts_s { + struct hifc_mbox_header_s header; + unsigned short rsvd1; + unsigned char rsvd2; + unsigned char status; + unsigned int rsvd3; + unsigned int save_hba_info[252]; +}; + +#define HIFC_VER_ADDR_OFFSET (8) +struct hifc_inmbox_get_fw_version_s { + struct hifc_mbox_header_s header; +}; + +struct hifc_outmbox_get_fw_version_sts_s { + struct hifc_mbox_header_s header; + + unsigned char status; + unsigned char rsv[3]; + + unsigned char ucode_ver[HIFC_VER_LEN]; + unsigned char ucode_compile_time[HIFC_COMPILE_TIME_LEN]; + + unsigned char up_ver[HIFC_VER_LEN]; + unsigned char up_compile_time[HIFC_COMPILE_TIME_LEN]; + + unsigned char boot_ver[HIFC_VER_LEN]; + unsigned char boot_compile_time[HIFC_COMPILE_TIME_LEN]; +}; + +/* Set Fec Enable */ +struct hifc_inmbox_config_fec_s { + struct hifc_mbox_header_s header; + + unsigned char fec_op_code; + unsigned char rsv0; + unsigned short rsv1; +}; + +struct hifc_outmbox_config_fec_sts_s { + struct hifc_mbox_header_s header; + + unsigned short usrsv0; + unsigned char ucrsv1; + unsigned char status; +}; + +struct hifc_inmbox_config_timer_s { + struct hifc_mbox_header_s header; + + unsigned short op_code; + unsigned short fun_id; + unsigned int user_data; +}; + +struct hifc_outmbox_config_timer_sts_s { + struct hifc_mbox_header_s header; + + unsigned char status; + unsigned char rsv[3]; +}; + +union hifc_outmbox_generic_u { + struct { + struct hifc_mbox_header_s header; + unsigned int rsvd[(HIFC_MBOX_SIZE - HIFC_MBOX_HEADER_SIZE) / + sizeof(unsigned int)]; + } generic; + + struct hifc_outbox_port_switch_sts_s port_switch_sts; + struct hifc_outbox_config_api_sts_s config_api_sts; + struct hifc_outbox_get_chip_info_sts_s get_chip_info_sts; + struct hifc_outmbox_get_reg_info_sts_s get_reg_info_sts; + struct hifc_outmbox_config_login_sts_s config_login_sts; + struct hifc_outmbox_port_reset_sts_s port_reset_sts; + struct hifc_outmbox_get_sfp_info_sts_s get_sfp_info_sts; + struct hifc_outmbox_get_err_code_sts_s get_err_code_sts; + struct hifc_outmbox_get_clear_state_sts_s get_clr_state_sts; + struct hifc_outmbox_get_fw_version_sts_s get_fw_ver_sts; + struct hifc_outmbox_config_fec_sts_s config_fec_sts; + struct hifc_outmbox_config_timer_sts_s timer_config_sts; + struct hifc_outmbox_get_port_info_sts_s get_port_info_sts; + struct unf_flash_data_mgmt_sts_s flash_data_sts; +}; + +unsigned int hifc_get_chip_msg(void *v_hba, void *v_mac); +unsigned int hifc_config_port_table(struct hifc_hba_s *v_hba); +unsigned int hifc_port_switch(struct hifc_hba_s *v_hba, int turn_on); +unsigned int hifc_get_speed_act(void *v_hba, void *v_speed_act); +unsigned int hifc_get_speed_cfg(void *v_hba, void *v_speed_cfg); +unsigned int hifc_get_loop_map(void *v_hba, void *v_buf); +unsigned int hifc_get_firmware_version(void *v_fc_port, void *v_ver); +unsigned int hifc_get_work_bale_bbcredit(void *v_hba, void *v_bb_credit); +unsigned int hifc_get_work_bale_bbscn(void *v_hba, void *v_bbscn); +unsigned int hifc_get_and_clear_port_error_code(void *v_hba, void *v_err_code); +unsigned int hifc_get_port_current_info(void *v_hba, void *v_port_info); +unsigned int hifc_get_port_fec(void *v_hba, void *v_para_out); +unsigned int hifc_get_software_version(void *v_fc_port, void *v_ver); +unsigned int hifc_get_port_info(void *v_hba); +unsigned int hifc_rw_reg(void *v_hba, void *v_params); +unsigned int hifc_clear_port_error_code(void *v_hba, void *v_err_code); +unsigned int hifc_get_sfp_info(void *v_fc_port, void *v_sfp_info); +unsigned int hifc_get_hardware_version(void *v_fc_port, void *v_ver); +unsigned int hifc_get_lport_led(void *v_hba, void *v_led_state); +unsigned int hifc_get_loop_alpa(void *v_hba, void *v_alpa); +unsigned int hifc_get_topo_act(void *v_hba, void *v_topo_act); +unsigned int hifc_get_topo_cfg(void *v_hba, void *v_topo_cfg); +unsigned int hifc_config_login_api( + struct hifc_hba_s *v_hba, + struct unf_port_login_parms_s *v_login_parms); +unsigned int hifc_mb_send_and_wait_mbox(struct hifc_hba_s *v_hba, + const void *v_in_mbox, + unsigned short in_size, + union hifc_outmbox_generic_u + *v_out_mbox); +void hifc_up_msg_2_driver_proc(void *v_hwdev_handle, + void *v_pri_handle, + unsigned char v_cmd, + void *v_buf_in, + unsigned short v_in_size, + void *v_buf_out, + unsigned short *v_out_size); + +unsigned int hifc_mbox_reset_chip(struct hifc_hba_s *v_hba, + unsigned char v_sub_type); +unsigned int hifc_clear_sq_wqe_done(struct hifc_hba_s *v_hba); +unsigned int hifc_update_fabric_param(void *v_hba, void *v_para_in); +unsigned int hifc_update_port_param(void *v_hba, void *v_para_in); +unsigned int hifc_mbx_get_fw_clear_stat(struct hifc_hba_s *v_hba, + unsigned int *v_clear_state); +unsigned short hifc_get_global_base_qpn(void *v_handle); +unsigned int hifc_mbx_set_fec(struct hifc_hba_s *v_hba, + unsigned int v_fec_opcode); +unsigned int hifc_notify_up_config_timer(struct hifc_hba_s *v_hba, + int v_opcode, + unsigned int v_user_data); +unsigned int hifc_save_hba_info(void *v_hba, void *v_para_in); +unsigned int hifc_get_chip_capability(void *hw_dev_handle, + struct hifc_chip_info_s *v_chip_info); +unsigned int hifc_get_flash_data(void *v_hba, void *v_flash_data); +unsigned int hifc_set_flash_data(void *v_hba, void *v_flash_data); + +#endif diff --git a/drivers/scsi/huawei/hifc/hifc_cmdq.c b/drivers/scsi/huawei/hifc/hifc_cmdq.c new file mode 100644 index 0000000000000000000000000000000000000000..03531017c4128d924b6d5899b69734c4903e8b90 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_cmdq.c @@ -0,0 +1,1507 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hifc_knl_adp.h" +#include "hifc_hw.h" +#include "hifc_hwdev.h" +#include "hifc_hwif.h" +#include "hifc_wq.h" +#include "hifc_api_cmd.h" +#include "hifc_mgmt.h" +#include "hifc_eqs.h" +#include "hifc_cmdq.h" + +#define CMDQ_CMD_TIMEOUT 1000 /* millisecond */ +#define UPPER_8_BITS(data) (((data) >> 8) & 0xFF) +#define LOWER_8_BITS(data) ((data) & 0xFF) + +#define CMDQ_DB_INFO_HI_PROD_IDX_SHIFT 0 +#define CMDQ_DB_INFO_QUEUE_TYPE_SHIFT 23 +#define CMDQ_DB_INFO_CMDQ_TYPE_SHIFT 24 +#define CMDQ_DB_INFO_SRC_TYPE_SHIFT 27 +#define CMDQ_DB_INFO_HI_PROD_IDX_MASK 0xFFU +#define CMDQ_DB_INFO_QUEUE_TYPE_MASK 0x1U +#define CMDQ_DB_INFO_CMDQ_TYPE_MASK 0x7U +#define CMDQ_DB_INFO_SRC_TYPE_MASK 0x1FU + +#define CMDQ_DB_INFO_SET(val, member) \ + (((val) & CMDQ_DB_INFO_##member##_MASK) << \ + CMDQ_DB_INFO_##member##_SHIFT) + +#define CMDQ_CTRL_PI_SHIFT 0 +#define CMDQ_CTRL_CMD_SHIFT 16 +#define CMDQ_CTRL_MOD_SHIFT 24 +#define CMDQ_CTRL_ACK_TYPE_SHIFT 29 +#define CMDQ_CTRL_HW_BUSY_BIT_SHIFT 31 +#define CMDQ_CTRL_PI_MASK 0xFFFFU +#define CMDQ_CTRL_CMD_MASK 0xFFU +#define CMDQ_CTRL_MOD_MASK 0x1FU +#define CMDQ_CTRL_ACK_TYPE_MASK 0x3U +#define CMDQ_CTRL_HW_BUSY_BIT_MASK 0x1U + +#define CMDQ_CTRL_SET(val, member) \ + (((val) & CMDQ_CTRL_##member##_MASK) \ + << CMDQ_CTRL_##member##_SHIFT) + +#define CMDQ_CTRL_GET(val, member) \ + (((val) >> CMDQ_CTRL_##member##_SHIFT) \ + & CMDQ_CTRL_##member##_MASK) + +#define CMDQ_WQE_HEADER_BUFDESC_LEN_SHIFT 0 +#define CMDQ_WQE_HEADER_COMPLETE_FMT_SHIFT 15 +#define CMDQ_WQE_HEADER_DATA_FMT_SHIFT 22 +#define CMDQ_WQE_HEADER_COMPLETE_REQ_SHIFT 23 +#define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_SHIFT 27 +#define CMDQ_WQE_HEADER_CTRL_LEN_SHIFT 29 +#define CMDQ_WQE_HEADER_HW_BUSY_BIT_SHIFT 31 + +#define CMDQ_WQE_HEADER_BUFDESC_LEN_MASK 0xFFU +#define CMDQ_WQE_HEADER_COMPLETE_FMT_MASK 0x1U +#define CMDQ_WQE_HEADER_DATA_FMT_MASK 0x1U +#define CMDQ_WQE_HEADER_COMPLETE_REQ_MASK 0x1U +#define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_MASK 0x3U +#define CMDQ_WQE_HEADER_CTRL_LEN_MASK 0x3U +#define CMDQ_WQE_HEADER_HW_BUSY_BIT_MASK 0x1U + +#define CMDQ_WQE_HEADER_SET(val, member) \ + (((val) & CMDQ_WQE_HEADER_##member##_MASK) \ + << CMDQ_WQE_HEADER_##member##_SHIFT) + +#define CMDQ_WQE_HEADER_GET(val, member) \ + (((val) >> CMDQ_WQE_HEADER_##member##_SHIFT) \ + & CMDQ_WQE_HEADER_##member##_MASK) + +#define CMDQ_CTXT_CURR_WQE_PAGE_PFN_SHIFT 0 +#define CMDQ_CTXT_EQ_ID_SHIFT 56 +#define CMDQ_CTXT_CEQ_ARM_SHIFT 61 +#define CMDQ_CTXT_CEQ_EN_SHIFT 62 +#define CMDQ_CTXT_HW_BUSY_BIT_SHIFT 63 +#define CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK 0xFFFFFFFFFFFFF +#define CMDQ_CTXT_EQ_ID_MASK 0x1F +#define CMDQ_CTXT_CEQ_ARM_MASK 0x1 +#define CMDQ_CTXT_CEQ_EN_MASK 0x1 +#define CMDQ_CTXT_HW_BUSY_BIT_MASK 0x1 + +#define CMDQ_CTXT_PAGE_INFO_SET(val, member) \ + (((u64)(val) & CMDQ_CTXT_##member##_MASK) \ + << CMDQ_CTXT_##member##_SHIFT) + +#define CMDQ_CTXT_PAGE_INFO_GET(val, member) \ + (((u64)(val) >> CMDQ_CTXT_##member##_SHIFT) \ + & CMDQ_CTXT_##member##_MASK) + +#define CMDQ_CTXT_WQ_BLOCK_PFN_SHIFT 0 +#define CMDQ_CTXT_CI_SHIFT 52 +#define CMDQ_CTXT_WQ_BLOCK_PFN_MASK 0xFFFFFFFFFFFFF +#define CMDQ_CTXT_CI_MASK 0xFFF + +#define CMDQ_CTXT_BLOCK_INFO_SET(val, member) \ + (((u64)(val) & CMDQ_CTXT_##member##_MASK) \ + << CMDQ_CTXT_##member##_SHIFT) + +#define CMDQ_CTXT_BLOCK_INFO_GET(val, member) \ + (((u64)(val) >> CMDQ_CTXT_##member##_SHIFT) \ + & CMDQ_CTXT_##member##_MASK) + +#define SAVED_DATA_ARM_SHIFT 31 +#define SAVED_DATA_ARM_MASK 0x1U + +#define SAVED_DATA_SET(val, member) \ + (((val) & SAVED_DATA_##member##_MASK) \ + << SAVED_DATA_##member##_SHIFT) + +#define SAVED_DATA_CLEAR(val, member) \ + ((val) & (~(SAVED_DATA_##member##_MASK \ + << SAVED_DATA_##member##_SHIFT))) + +#define WQE_ERRCODE_VAL_SHIFT 20 +#define WQE_ERRCODE_VAL_MASK 0xF + +#define WQE_ERRCODE_GET(val, member) \ + (((val) >> WQE_ERRCODE_##member##_SHIFT) & \ + WQE_ERRCODE_##member##_MASK) + +#define CEQE_CMDQ_TYPE_SHIFT 0 +#define CEQE_CMDQ_TYPE_MASK 0x7 + +#define CEQE_CMDQ_GET(val, member) \ + (((val) >> CEQE_CMDQ_##member##_SHIFT) & CEQE_CMDQ_##member##_MASK) + +#define WQE_COMPLETED(ctrl_info) CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT) + +#define WQE_HEADER(wqe) ((struct hifc_cmdq_header *)(wqe)) + +#define CMDQ_DB_PI_OFF(pi) (((u16)LOWER_8_BITS(pi)) << 3) + +#define CMDQ_DB_ADDR(db_base, pi) \ + (((u8 *)(db_base) + HIFC_DB_OFF) + CMDQ_DB_PI_OFF(pi)) + +#define CMDQ_PFN_SHIFT 12 +#define CMDQ_PFN(addr) ((addr) >> CMDQ_PFN_SHIFT) + +#define FIRST_DATA_TO_WRITE_LAST sizeof(u64) +#define WQE_LCMD_SIZE 64 +#define WQE_SCMD_SIZE 64 +#define COMPLETE_LEN 3 +#define CMDQ_WQEBB_SIZE 64 +#define CMDQ_WQE_SIZE 64 +#define CMDQ_WQ_PAGE_SIZE 4096 + +#define WQE_NUM_WQEBBS(wqe_size, wq) \ + ((u16)(ALIGN((u32)(wqe_size), (wq)->wqebb_size) / (wq)->wqebb_size)) + +#define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \ + struct hifc_cmdqs, cmdq[0]) + +#define CMDQ_SEND_CMPT_CODE 10 +#define CMDQ_COMPLETE_CMPT_CODE 11 + +#define HIFC_GET_CMDQ_FREE_WQEBBS(cmdq_wq) \ + atomic_read(&(cmdq_wq)->delta) + +enum cmdq_scmd_type { + CMDQ_SET_ARM_CMD = 2, +}; + +enum cmdq_wqe_type { + WQE_LCMD_TYPE, + WQE_SCMD_TYPE, +}; + +enum ctrl_sect_len { + CTRL_SECT_LEN = 1, + CTRL_DIRECT_SECT_LEN = 2, +}; + +enum bufdesc_len { + BUFDESC_LCMD_LEN = 2, + BUFDESC_SCMD_LEN = 3, +}; + +enum data_format { + DATA_SGE, + DATA_DIRECT, +}; + +enum completion_format { + COMPLETE_DIRECT, + COMPLETE_SGE, +}; + +enum completion_request { + CEQ_SET = 1, +}; + +enum cmdq_cmd_type { + SYNC_CMD_DIRECT_RESP, + SYNC_CMD_SGE_RESP, + ASYNC_CMD, +}; + +bool hifc_cmdq_idle(struct hifc_cmdq *cmdq) +{ + struct hifc_wq *wq = cmdq->wq; + + return (atomic_read(&wq->delta) == wq->q_depth ? true : false); +} + +struct hifc_cmd_buf *hifc_alloc_cmd_buf(void *hwdev) +{ + struct hifc_cmdqs *cmdqs; + struct hifc_cmd_buf *cmd_buf; + void *dev; + + if (!hwdev) { + pr_err("Failed to alloc cmd buf, Invalid hwdev\n"); + return NULL; + } + + cmdqs = ((struct hifc_hwdev *)hwdev)->cmdqs; + dev = ((struct hifc_hwdev *)hwdev)->dev_hdl; + + cmd_buf = kzalloc(sizeof(*cmd_buf), GFP_ATOMIC); + if (!cmd_buf) + return NULL; + + cmd_buf->buf = pci_pool_alloc(cmdqs->cmd_buf_pool, GFP_ATOMIC, + &cmd_buf->dma_addr); + if (!cmd_buf->buf) { + sdk_err(dev, "Failed to allocate cmdq cmd buf from the pool\n"); + goto alloc_pci_buf_err; + } + + return cmd_buf; + +alloc_pci_buf_err: + kfree(cmd_buf); + return NULL; +} + +void hifc_free_cmd_buf(void *hwdev, struct hifc_cmd_buf *cmd_buf) +{ + struct hifc_cmdqs *cmdqs; + + if (!hwdev || !cmd_buf) { + pr_err("Failed to free cmd buf\n"); + return; + } + + cmdqs = ((struct hifc_hwdev *)hwdev)->cmdqs; + + pci_pool_free(cmdqs->cmd_buf_pool, cmd_buf->buf, cmd_buf->dma_addr); + kfree(cmd_buf); +} + +static int cmdq_wqe_size(enum cmdq_wqe_type wqe_type) +{ + int wqe_size = 0; + + switch (wqe_type) { + case WQE_LCMD_TYPE: + wqe_size = WQE_LCMD_SIZE; + break; + case WQE_SCMD_TYPE: + wqe_size = WQE_SCMD_SIZE; + break; + } + + return wqe_size; +} + +static int cmdq_get_wqe_size(enum bufdesc_len len) +{ + int wqe_size = 0; + + switch (len) { + case BUFDESC_LCMD_LEN: + wqe_size = WQE_LCMD_SIZE; + break; + case BUFDESC_SCMD_LEN: + wqe_size = WQE_SCMD_SIZE; + break; + } + + return wqe_size; +} + +static void cmdq_set_completion(struct hifc_cmdq_completion *complete, + struct hifc_cmd_buf *buf_out) +{ + struct hifc_sge_resp *sge_resp = &complete->sge_resp; + + hifc_set_sge(&sge_resp->sge, buf_out->dma_addr, + HIFC_CMDQ_BUF_SIZE); +} + +static void cmdq_set_lcmd_bufdesc(struct hifc_cmdq_wqe_lcmd *wqe, + struct hifc_cmd_buf *buf_in) +{ + hifc_set_sge(&wqe->buf_desc.sge, buf_in->dma_addr, buf_in->size); +} + +static void cmdq_set_inline_wqe_data(struct hifc_cmdq_inline_wqe *wqe, + const void *buf_in, u32 in_size) +{ + struct hifc_cmdq_wqe_scmd *wqe_scmd = &wqe->wqe_scmd; + + wqe_scmd->buf_desc.buf_len = in_size; + memcpy(wqe_scmd->buf_desc.data, buf_in, in_size); +} + +static void cmdq_fill_db(struct hifc_cmdq_db *db, + enum hifc_cmdq_type cmdq_type, u16 prod_idx) +{ + db->db_info = CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx), HI_PROD_IDX) | + CMDQ_DB_INFO_SET(HIFC_DB_CMDQ_TYPE, QUEUE_TYPE) | + CMDQ_DB_INFO_SET(cmdq_type, CMDQ_TYPE) | + CMDQ_DB_INFO_SET(HIFC_DB_SRC_CMDQ_TYPE, SRC_TYPE); +} + +static void cmdq_set_db(struct hifc_cmdq *cmdq, + enum hifc_cmdq_type cmdq_type, u16 prod_idx) +{ + struct hifc_cmdq_db db; + + cmdq_fill_db(&db, cmdq_type, prod_idx); + + /* The data that is written to HW should be in Big Endian Format */ + db.db_info = cpu_to_be32(db.db_info); + + wmb(); /* write all before the doorbell */ + writel(db.db_info, CMDQ_DB_ADDR(cmdq->db_base, prod_idx)); +} + +static void cmdq_wqe_fill(void *dst, const void *src) +{ + memcpy((u8 *)dst + FIRST_DATA_TO_WRITE_LAST, + (u8 *)src + FIRST_DATA_TO_WRITE_LAST, + CMDQ_WQE_SIZE - FIRST_DATA_TO_WRITE_LAST); + + wmb(); /* The first 8 bytes should be written last */ + + *(u64 *)dst = *(u64 *)src; +} + +static void cmdq_prepare_wqe_ctrl(struct hifc_cmdq_wqe *wqe, int wrapped, + enum hifc_ack_type ack_type, + enum hifc_mod_type mod, u8 cmd, u16 prod_idx, + enum completion_format complete_format, + enum data_format data_format, + enum bufdesc_len buf_len) +{ + struct hifc_ctrl *ctrl; + enum ctrl_sect_len ctrl_len; + struct hifc_cmdq_wqe_lcmd *wqe_lcmd; + struct hifc_cmdq_wqe_scmd *wqe_scmd; + u32 saved_data = WQE_HEADER(wqe)->saved_data; + + if (data_format == DATA_SGE) { + wqe_lcmd = &wqe->wqe_lcmd; + + wqe_lcmd->status.status_info = 0; + ctrl = &wqe_lcmd->ctrl; + ctrl_len = CTRL_SECT_LEN; + } else { + wqe_scmd = &wqe->inline_wqe.wqe_scmd; + + wqe_scmd->status.status_info = 0; + ctrl = &wqe_scmd->ctrl; + ctrl_len = CTRL_DIRECT_SECT_LEN; + } + + ctrl->ctrl_info = CMDQ_CTRL_SET(prod_idx, PI) | + CMDQ_CTRL_SET(cmd, CMD) | + CMDQ_CTRL_SET(mod, MOD) | + CMDQ_CTRL_SET(ack_type, ACK_TYPE); + + WQE_HEADER(wqe)->header_info = + CMDQ_WQE_HEADER_SET(buf_len, BUFDESC_LEN) | + CMDQ_WQE_HEADER_SET(complete_format, COMPLETE_FMT) | + CMDQ_WQE_HEADER_SET(data_format, DATA_FMT) | + CMDQ_WQE_HEADER_SET(CEQ_SET, COMPLETE_REQ) | + CMDQ_WQE_HEADER_SET(COMPLETE_LEN, COMPLETE_SECT_LEN) | + CMDQ_WQE_HEADER_SET(ctrl_len, CTRL_LEN) | + CMDQ_WQE_HEADER_SET((u32)wrapped, HW_BUSY_BIT); + + if (cmd == CMDQ_SET_ARM_CMD && mod == HIFC_MOD_COMM) { + saved_data &= SAVED_DATA_CLEAR(saved_data, ARM); + WQE_HEADER(wqe)->saved_data = saved_data | + SAVED_DATA_SET(1, ARM); + } else { + saved_data &= SAVED_DATA_CLEAR(saved_data, ARM); + WQE_HEADER(wqe)->saved_data = saved_data; + } +} + +static void cmdq_set_lcmd_wqe(struct hifc_cmdq_wqe *wqe, + enum cmdq_cmd_type cmd_type, + struct hifc_cmd_buf *buf_in, + struct hifc_cmd_buf *buf_out, int wrapped, + enum hifc_ack_type ack_type, + enum hifc_mod_type mod, u8 cmd, u16 prod_idx) +{ + struct hifc_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd; + enum completion_format complete_format = COMPLETE_DIRECT; + + switch (cmd_type) { + case SYNC_CMD_SGE_RESP: + if (buf_out) { + complete_format = COMPLETE_SGE; + cmdq_set_completion(&wqe_lcmd->completion, buf_out); + } + break; + case SYNC_CMD_DIRECT_RESP: + complete_format = COMPLETE_DIRECT; + wqe_lcmd->completion.direct_resp = 0; + break; + case ASYNC_CMD: + complete_format = COMPLETE_DIRECT; + wqe_lcmd->completion.direct_resp = 0; + + wqe_lcmd->buf_desc.saved_async_buf = (u64)(buf_in); + break; + } + + cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd, + prod_idx, complete_format, DATA_SGE, + BUFDESC_LCMD_LEN); + + cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in); +} + +static void cmdq_set_inline_wqe(struct hifc_cmdq_wqe *wqe, + enum cmdq_cmd_type cmd_type, + void *buf_in, u16 in_size, + struct hifc_cmd_buf *buf_out, int wrapped, + enum hifc_ack_type ack_type, + enum hifc_mod_type mod, u8 cmd, u16 prod_idx) +{ + struct hifc_cmdq_wqe_scmd *wqe_scmd = &wqe->inline_wqe.wqe_scmd; + enum completion_format complete_format = COMPLETE_DIRECT; + + switch (cmd_type) { + case SYNC_CMD_SGE_RESP: + complete_format = COMPLETE_SGE; + cmdq_set_completion(&wqe_scmd->completion, buf_out); + break; + case SYNC_CMD_DIRECT_RESP: + complete_format = COMPLETE_DIRECT; + wqe_scmd->completion.direct_resp = 0; + break; + default: + break; + } + + cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd, prod_idx, + complete_format, DATA_DIRECT, BUFDESC_SCMD_LEN); + + cmdq_set_inline_wqe_data(&wqe->inline_wqe, buf_in, in_size); +} + +static void cmdq_update_cmd_status(struct hifc_cmdq *cmdq, u16 prod_idx, + struct hifc_cmdq_wqe *wqe) +{ + struct hifc_cmdq_cmd_info *cmd_info; + struct hifc_cmdq_wqe_lcmd *wqe_lcmd; + u32 status_info; + + wqe_lcmd = &wqe->wqe_lcmd; + cmd_info = &cmdq->cmd_infos[prod_idx]; + + if (cmd_info->errcode) { + status_info = be32_to_cpu(wqe_lcmd->status.status_info); + *cmd_info->errcode = WQE_ERRCODE_GET(status_info, VAL); + } + + if (cmd_info->direct_resp && + cmd_info->cmd_type == HIFC_CMD_TYPE_DIRECT_RESP) + *cmd_info->direct_resp = + cpu_to_be64(wqe_lcmd->completion.direct_resp); +} + +static int hifc_cmdq_sync_timeout_check(struct hifc_cmdq *cmdq, + struct hifc_cmdq_wqe *wqe, u16 pi, + enum hifc_mod_type mod, u8 cmd) +{ + struct hifc_cmdq_wqe_lcmd *wqe_lcmd; + struct hifc_ctrl *ctrl; + u32 ctrl_info; + + wqe_lcmd = &wqe->wqe_lcmd; + ctrl = &wqe_lcmd->ctrl; + ctrl_info = be32_to_cpu((ctrl)->ctrl_info); + if (!WQE_COMPLETED(ctrl_info)) { + sdk_info(cmdq->hwdev->dev_hdl, "Cmdq sync command check busy bit not set, mod: %u, cmd: 0x%x\n", + mod, cmd); + return -EFAULT; + } + + cmdq_update_cmd_status(cmdq, pi, wqe); + + sdk_info(cmdq->hwdev->dev_hdl, "Cmdq sync command check succeed, mod: %u, cmd: 0x%x\n", + mod, cmd); + return 0; +} + +static void __clear_cmd_info(struct hifc_cmdq_cmd_info *cmd_info, + struct hifc_cmdq_cmd_info *saved_cmd_info) +{ + if (cmd_info->errcode == saved_cmd_info->errcode) + cmd_info->errcode = NULL; + + if (cmd_info->done == saved_cmd_info->done) + cmd_info->done = NULL; + + if (cmd_info->direct_resp == saved_cmd_info->direct_resp) + cmd_info->direct_resp = NULL; +} + +static int +cmdq_sync_cmd_timeout_handler(struct hifc_cmdq *cmdq, + struct hifc_cmdq_cmd_info *cmd_info, + struct hifc_cmdq_cmd_info *saved_cmd_info, + struct hifc_cmdq_wqe *curr_wqe, + enum hifc_mod_type mod, u8 cmd, + u16 curr_prod_idx, u64 curr_msg_id) +{ + int err; + + spin_lock_bh(&cmdq->cmdq_lock); + + if (cmd_info->cmpt_code == saved_cmd_info->cmpt_code) + cmd_info->cmpt_code = NULL; + + if (*saved_cmd_info->cmpt_code == CMDQ_COMPLETE_CMPT_CODE) { + sdk_info(cmdq->hwdev->dev_hdl, "Cmdq sync command (mod: %u, cmd: 0x%x)has been completed\n", + mod, cmd); + spin_unlock_bh(&cmdq->cmdq_lock); + return 0; + } + + if (curr_msg_id == cmd_info->cmdq_msg_id) { + err = hifc_cmdq_sync_timeout_check(cmdq, curr_wqe, + curr_prod_idx, + mod, cmd); + if (err) + cmd_info->cmd_type = HIFC_CMD_TYPE_TIMEOUT; + else + cmd_info->cmd_type = HIFC_CMD_TYPE_FAKE_TIMEOUT; + } else { + err = -ETIMEDOUT; + sdk_err(cmdq->hwdev->dev_hdl, + "Cmdq sync command current msg id dismatch with cmd_info msg id, mod: %u, cmd: 0x%x\n", + mod, cmd); + } + + __clear_cmd_info(cmd_info, saved_cmd_info); + + spin_unlock_bh(&cmdq->cmdq_lock); + + return err; +} + +static int cmdq_sync_cmd_direct_resp(struct hifc_cmdq *cmdq, + enum hifc_ack_type ack_type, + enum hifc_mod_type mod, u8 cmd, + struct hifc_cmd_buf *buf_in, + u64 *out_param, u32 timeout) +{ + struct hifc_wq *wq = cmdq->wq; + struct hifc_cmdq_wqe *curr_wqe, wqe; + struct hifc_cmdq_cmd_info *cmd_info, saved_cmd_info; + struct completion done; + u16 curr_prod_idx, next_prod_idx, num_wqebbs; + int wrapped, errcode = 0, wqe_size = cmdq_wqe_size(WQE_LCMD_TYPE); + int cmpt_code = CMDQ_SEND_CMPT_CODE; + ulong timeo; + u64 curr_msg_id; + int err; + + num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq); + + /* Keep wrapped and doorbell index correct. bh - for tasklet(ceq) */ + spin_lock_bh(&cmdq->cmdq_lock); + + /* in order to save a wqebb for setting arm_bit when + * send cmdq commands frequently resulting in cmdq full + */ + if (HIFC_GET_CMDQ_FREE_WQEBBS(wq) < num_wqebbs + 1) { + spin_unlock_bh(&cmdq->cmdq_lock); + return -EBUSY; + } + + /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow */ + curr_wqe = hifc_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx); + if (!curr_wqe) { + spin_unlock_bh(&cmdq->cmdq_lock); + sdk_err(cmdq->hwdev->dev_hdl, "Can not get avalible wqebb, mod: %u, cmd: 0x%x\n", + mod, cmd); + return -EBUSY; + } + + memset(&wqe, 0, sizeof(wqe)); + + wrapped = cmdq->wrapped; + + next_prod_idx = curr_prod_idx + num_wqebbs; + if (next_prod_idx >= wq->q_depth) { + cmdq->wrapped = !cmdq->wrapped; + next_prod_idx -= wq->q_depth; + } + + cmd_info = &cmdq->cmd_infos[curr_prod_idx]; + + init_completion(&done); + + cmd_info->done = &done; + cmd_info->errcode = &errcode; + cmd_info->direct_resp = out_param; + cmd_info->cmpt_code = &cmpt_code; + + memcpy(&saved_cmd_info, cmd_info, sizeof(*cmd_info)); + + cmdq_set_lcmd_wqe(&wqe, SYNC_CMD_DIRECT_RESP, buf_in, NULL, + wrapped, ack_type, mod, cmd, curr_prod_idx); + + /* The data that is written to HW should be in Big Endian Format */ + hifc_cpu_to_be32(&wqe, wqe_size); + + /* CMDQ WQE is not shadow, therefore wqe will be written to wq */ + cmdq_wqe_fill(curr_wqe, &wqe); + + cmd_info->cmd_type = HIFC_CMD_TYPE_DIRECT_RESP; + + (cmd_info->cmdq_msg_id)++; + curr_msg_id = cmd_info->cmdq_msg_id; + + cmdq_set_db(cmdq, HIFC_CMDQ_SYNC, next_prod_idx); + + spin_unlock_bh(&cmdq->cmdq_lock); + + timeo = msecs_to_jiffies(timeout ? timeout : CMDQ_CMD_TIMEOUT); + if (!wait_for_completion_timeout(&done, timeo)) { + err = cmdq_sync_cmd_timeout_handler(cmdq, cmd_info, + &saved_cmd_info, + curr_wqe, mod, cmd, + curr_prod_idx, curr_msg_id); + + if (!err) + goto timeout_check_ok; + + sdk_err(cmdq->hwdev->dev_hdl, "Cmdq sync command timeout, prod idx: 0x%x\n", + curr_prod_idx); + return -ETIMEDOUT; + } + +timeout_check_ok: + smp_rmb(); /* read error code after completion */ + + if (errcode > 1) + return errcode; + + return 0; +} + +static int cmdq_sync_cmd_detail_resp(struct hifc_cmdq *cmdq, + enum hifc_ack_type ack_type, + enum hifc_mod_type mod, u8 cmd, + struct hifc_cmd_buf *buf_in, + struct hifc_cmd_buf *buf_out, + u32 timeout) +{ + struct hifc_wq *wq = cmdq->wq; + struct hifc_cmdq_wqe *curr_wqe, wqe; + struct hifc_cmdq_cmd_info *cmd_info, saved_cmd_info; + struct completion done; + u16 curr_prod_idx, next_prod_idx, num_wqebbs; + int wrapped, errcode = 0, wqe_size = cmdq_wqe_size(WQE_LCMD_TYPE); + int cmpt_code = CMDQ_SEND_CMPT_CODE; + ulong timeo; + u64 curr_msg_id; + int err; + + num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq); + + /* Keep wrapped and doorbell index correct. bh - for tasklet(ceq) */ + spin_lock_bh(&cmdq->cmdq_lock); + + /* in order to save a wqebb for setting arm_bit when + * send cmdq commands frequently resulting in cmdq full + */ + if (HIFC_GET_CMDQ_FREE_WQEBBS(wq) < num_wqebbs + 1) { + spin_unlock_bh(&cmdq->cmdq_lock); + return -EBUSY; + } + + /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/ + curr_wqe = hifc_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx); + if (!curr_wqe) { + spin_unlock_bh(&cmdq->cmdq_lock); + sdk_err(cmdq->hwdev->dev_hdl, "Can not get avalible wqebb, mod: %u, cmd: 0x%x\n", + mod, cmd); + return -EBUSY; + } + + memset(&wqe, 0, sizeof(wqe)); + + wrapped = cmdq->wrapped; + + next_prod_idx = curr_prod_idx + num_wqebbs; + if (next_prod_idx >= wq->q_depth) { + cmdq->wrapped = !cmdq->wrapped; + next_prod_idx -= wq->q_depth; + } + + cmd_info = &cmdq->cmd_infos[curr_prod_idx]; + + init_completion(&done); + + cmd_info->done = &done; + cmd_info->errcode = &errcode; + cmd_info->cmpt_code = &cmpt_code; + + memcpy(&saved_cmd_info, cmd_info, sizeof(*cmd_info)); + + cmdq_set_lcmd_wqe(&wqe, SYNC_CMD_SGE_RESP, buf_in, buf_out, + wrapped, ack_type, mod, cmd, curr_prod_idx); + + hifc_cpu_to_be32(&wqe, wqe_size); + + cmdq_wqe_fill(curr_wqe, &wqe); + + cmd_info->cmd_type = HIFC_CMD_TYPE_SGE_RESP; + + (cmd_info->cmdq_msg_id)++; + curr_msg_id = cmd_info->cmdq_msg_id; + + cmdq_set_db(cmdq, HIFC_CMDQ_SYNC, next_prod_idx); + + spin_unlock_bh(&cmdq->cmdq_lock); + + timeo = msecs_to_jiffies(timeout ? timeout : CMDQ_CMD_TIMEOUT); + if (!wait_for_completion_timeout(&done, timeo)) { + err = cmdq_sync_cmd_timeout_handler(cmdq, cmd_info, + &saved_cmd_info, + curr_wqe, mod, cmd, + curr_prod_idx, curr_msg_id); + if (!err) + goto timeout_check_ok; + + sdk_err(cmdq->hwdev->dev_hdl, "Cmdq sync command timeout, prod idx: 0x%x\n", + curr_prod_idx); + return -ETIMEDOUT; + } + +timeout_check_ok: + + smp_rmb(); /* read error code after completion */ + + if (errcode > 1) + return errcode; + + return 0; +} + +static int cmdq_async_cmd(struct hifc_cmdq *cmdq, enum hifc_ack_type ack_type, + enum hifc_mod_type mod, u8 cmd, + struct hifc_cmd_buf *buf_in) +{ + struct hifc_wq *wq = cmdq->wq; + int wqe_size = cmdq_wqe_size(WQE_LCMD_TYPE); + u16 curr_prod_idx, next_prod_idx, num_wqebbs; + struct hifc_cmdq_wqe *curr_wqe, wqe; + int wrapped; + + num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq); + + spin_lock_bh(&cmdq->cmdq_lock); + + /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/ + curr_wqe = hifc_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx); + if (!curr_wqe) { + spin_unlock_bh(&cmdq->cmdq_lock); + return -EBUSY; + } + + memset(&wqe, 0, sizeof(wqe)); + + wrapped = cmdq->wrapped; + next_prod_idx = curr_prod_idx + num_wqebbs; + if (next_prod_idx >= cmdq->wq->q_depth) { + cmdq->wrapped = !cmdq->wrapped; + next_prod_idx -= cmdq->wq->q_depth; + } + + cmdq_set_lcmd_wqe(&wqe, ASYNC_CMD, buf_in, NULL, wrapped, + ack_type, mod, cmd, curr_prod_idx); + + /* The data that is written to HW should be in Big Endian Format */ + hifc_cpu_to_be32(&wqe, wqe_size); + + cmdq_wqe_fill(curr_wqe, &wqe); + + cmdq->cmd_infos[curr_prod_idx].cmd_type = HIFC_CMD_TYPE_ASYNC; + + cmdq_set_db(cmdq, HIFC_CMDQ_ASYNC, next_prod_idx); + + spin_unlock_bh(&cmdq->cmdq_lock); + + return 0; +} + +static int cmdq_set_arm_bit(struct hifc_cmdq *cmdq, void *buf_in, u16 in_size) +{ + struct hifc_wq *wq = cmdq->wq; + struct hifc_cmdq_wqe *curr_wqe, wqe; + u16 curr_prod_idx, next_prod_idx, num_wqebbs; + int wrapped, wqe_size = cmdq_wqe_size(WQE_SCMD_TYPE); + + num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq); + + /* Keep wrapped and doorbell index correct. bh - for tasklet(ceq) */ + spin_lock_bh(&cmdq->cmdq_lock); + + /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/ + curr_wqe = hifc_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx); + if (!curr_wqe) { + spin_unlock_bh(&cmdq->cmdq_lock); + sdk_err(cmdq->hwdev->dev_hdl, "Can not get avalible wqebb setting arm\n"); + return -EBUSY; + } + + memset(&wqe, 0, sizeof(wqe)); + + wrapped = cmdq->wrapped; + + next_prod_idx = curr_prod_idx + num_wqebbs; + if (next_prod_idx >= wq->q_depth) { + cmdq->wrapped = !cmdq->wrapped; + next_prod_idx -= wq->q_depth; + } + + cmdq_set_inline_wqe(&wqe, SYNC_CMD_DIRECT_RESP, buf_in, in_size, NULL, + wrapped, HIFC_ACK_TYPE_CMDQ, HIFC_MOD_COMM, + CMDQ_SET_ARM_CMD, curr_prod_idx); + + /* The data that is written to HW should be in Big Endian Format */ + hifc_cpu_to_be32(&wqe, wqe_size); + + /* cmdq wqe is not shadow, therefore wqe will be written to wq */ + cmdq_wqe_fill(curr_wqe, &wqe); + + cmdq->cmd_infos[curr_prod_idx].cmd_type = HIFC_CMD_TYPE_SET_ARM; + + cmdq_set_db(cmdq, cmdq->cmdq_type, next_prod_idx); + + spin_unlock_bh(&cmdq->cmdq_lock); + + return 0; +} + +static int cmdq_params_valid(void *hwdev, struct hifc_cmd_buf *buf_in) +{ + if (!buf_in || !hwdev) { + pr_err("Invalid CMDQ buffer addr\n"); + return -EINVAL; + } + + if (!buf_in->size || buf_in->size > HIFC_CMDQ_MAX_DATA_SIZE) { + pr_err("Invalid CMDQ buffer size: 0x%x\n", buf_in->size); + return -EINVAL; + } + + return 0; +} + +#define WAIT_CMDQ_ENABLE_TIMEOUT 300 + +static int wait_cmdqs_enable(struct hifc_cmdqs *cmdqs) +{ + unsigned long end; + + end = jiffies + msecs_to_jiffies(WAIT_CMDQ_ENABLE_TIMEOUT); + do { + if (cmdqs->status & HIFC_CMDQ_ENABLE) + return 0; + } while (time_before(jiffies, end) && cmdqs->hwdev->chip_present_flag && + !cmdqs->disable_flag); + + cmdqs->disable_flag = 1; + + return -EBUSY; +} + +int hifc_cmdq_direct_resp(void *hwdev, enum hifc_ack_type ack_type, + enum hifc_mod_type mod, u8 cmd, + struct hifc_cmd_buf *buf_in, u64 *out_param, + u32 timeout) +{ + struct hifc_cmdqs *cmdqs; + int err = cmdq_params_valid(hwdev, buf_in); + + if (err) { + pr_err("Invalid CMDQ parameters\n"); + return err; + } + + cmdqs = ((struct hifc_hwdev *)hwdev)->cmdqs; + + if (!(((struct hifc_hwdev *)hwdev)->chip_present_flag) || + !hifc_is_hwdev_mod_inited(hwdev, HIFC_HWDEV_CMDQ_INITED)) + return -EPERM; + + err = wait_cmdqs_enable(cmdqs); + if (err) { + sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n"); + return err; + } + + err = cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HIFC_CMDQ_SYNC], ack_type, + mod, cmd, buf_in, out_param, timeout); + if (!(((struct hifc_hwdev *)hwdev)->chip_present_flag)) + return -ETIMEDOUT; + else + return err; +} + +int hifc_cmdq_detail_resp(void *hwdev, + enum hifc_ack_type ack_type, + enum hifc_mod_type mod, u8 cmd, + struct hifc_cmd_buf *buf_in, + struct hifc_cmd_buf *buf_out, + u32 timeout) +{ + struct hifc_cmdqs *cmdqs; + int err = cmdq_params_valid(hwdev, buf_in); + + if (err) + return err; + + cmdqs = ((struct hifc_hwdev *)hwdev)->cmdqs; + + if (!(((struct hifc_hwdev *)hwdev)->chip_present_flag) || + !hifc_is_hwdev_mod_inited(hwdev, HIFC_HWDEV_CMDQ_INITED)) + return -EPERM; + + err = wait_cmdqs_enable(cmdqs); + if (err) { + sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n"); + return err; + } + + err = cmdq_sync_cmd_detail_resp(&cmdqs->cmdq[HIFC_CMDQ_SYNC], ack_type, + mod, cmd, buf_in, buf_out, timeout); + if (!(((struct hifc_hwdev *)hwdev)->chip_present_flag)) + return -ETIMEDOUT; + else + return err; +} + +int hifc_cmdq_async(void *hwdev, enum hifc_ack_type ack_type, + enum hifc_mod_type mod, u8 cmd, + struct hifc_cmd_buf *buf_in) +{ + struct hifc_cmdqs *cmdqs; + int err = cmdq_params_valid(hwdev, buf_in); + + if (err) + return err; + + cmdqs = ((struct hifc_hwdev *)hwdev)->cmdqs; + + if (!(((struct hifc_hwdev *)hwdev)->chip_present_flag) || + !hifc_is_hwdev_mod_inited(hwdev, HIFC_HWDEV_CMDQ_INITED)) + return -EPERM; + + err = wait_cmdqs_enable(cmdqs); + if (err) { + sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n"); + return err; + } + + return cmdq_async_cmd(&cmdqs->cmdq[HIFC_CMDQ_ASYNC], ack_type, mod, + cmd, buf_in); +} + +int hifc_set_arm_bit(void *hwdev, enum hifc_set_arm_type q_type, u16 q_id) +{ + struct hifc_cmdqs *cmdqs; + struct hifc_cmdq *cmdq; + struct hifc_cmdq_arm_bit arm_bit; + enum hifc_cmdq_type cmdq_type = HIFC_CMDQ_SYNC; + u16 in_size; + int err; + + if (!hwdev) + return -EINVAL; + + if (!(((struct hifc_hwdev *)hwdev)->chip_present_flag) || + !hifc_is_hwdev_mod_inited(hwdev, HIFC_HWDEV_CMDQ_INITED)) + return -EPERM; + + cmdqs = ((struct hifc_hwdev *)hwdev)->cmdqs; + + if (!(cmdqs->status & HIFC_CMDQ_ENABLE)) + return -EBUSY; + + if (q_type == HIFC_SET_ARM_CMDQ) { + if (q_id >= HIFC_MAX_CMDQ_TYPES) + return -EFAULT; + + cmdq_type = q_id; + } + /* sq is using interrupt now, so we only need to set arm bit for cmdq, + * remove comment below if need to set sq arm bit + * else + * cmdq_type = HIFC_CMDQ_SYNC; + */ + + cmdq = &cmdqs->cmdq[cmdq_type]; + + arm_bit.q_type = q_type; + arm_bit.q_id = q_id; + in_size = sizeof(arm_bit); + + err = cmdq_set_arm_bit(cmdq, &arm_bit, in_size); + if (err) { + sdk_err(cmdqs->hwdev->dev_hdl, + "Failed to set arm for q_type: %d, qid %d\n", + q_type, q_id); + return err; + } + + return 0; +} + +static void clear_wqe_complete_bit(struct hifc_cmdq *cmdq, + struct hifc_cmdq_wqe *wqe, u16 ci) +{ + struct hifc_cmdq_wqe_lcmd *wqe_lcmd; + struct hifc_cmdq_inline_wqe *inline_wqe; + struct hifc_cmdq_wqe_scmd *wqe_scmd; + struct hifc_ctrl *ctrl; + u32 header_info = be32_to_cpu(WQE_HEADER(wqe)->header_info); + int buf_len = CMDQ_WQE_HEADER_GET(header_info, BUFDESC_LEN); + int wqe_size = cmdq_get_wqe_size(buf_len); + u16 num_wqebbs; + + if (wqe_size == WQE_LCMD_SIZE) { + wqe_lcmd = &wqe->wqe_lcmd; + ctrl = &wqe_lcmd->ctrl; + } else { + inline_wqe = &wqe->inline_wqe; + wqe_scmd = &inline_wqe->wqe_scmd; + ctrl = &wqe_scmd->ctrl; + } + + /* clear HW busy bit */ + ctrl->ctrl_info = 0; + cmdq->cmd_infos[ci].cmd_type = HIFC_CMD_TYPE_NONE; + + wmb(); /* verify wqe is clear */ + + num_wqebbs = WQE_NUM_WQEBBS(wqe_size, cmdq->wq); + hifc_put_wqe(cmdq->wq, num_wqebbs); +} + +static void cmdq_sync_cmd_handler(struct hifc_cmdq *cmdq, + struct hifc_cmdq_wqe *wqe, u16 cons_idx) +{ + u16 prod_idx = cons_idx; + + spin_lock(&cmdq->cmdq_lock); + + cmdq_update_cmd_status(cmdq, prod_idx, wqe); + + if (cmdq->cmd_infos[prod_idx].cmpt_code) { + *cmdq->cmd_infos[prod_idx].cmpt_code = + CMDQ_COMPLETE_CMPT_CODE; + cmdq->cmd_infos[prod_idx].cmpt_code = NULL; + } + + /* make sure cmpt_code operation before done operation */ + smp_rmb(); + + if (cmdq->cmd_infos[prod_idx].done) { + complete(cmdq->cmd_infos[prod_idx].done); + cmdq->cmd_infos[prod_idx].done = NULL; + } + + spin_unlock(&cmdq->cmdq_lock); + + clear_wqe_complete_bit(cmdq, wqe, cons_idx); +} + +static void cmdq_async_cmd_handler(struct hifc_hwdev *hwdev, + struct hifc_cmdq *cmdq, + struct hifc_cmdq_wqe *wqe, u16 ci) +{ + u64 buf = wqe->wqe_lcmd.buf_desc.saved_async_buf; + int addr_sz = sizeof(u64); + + hifc_be32_to_cpu((void *)&buf, addr_sz); + if (buf) + hifc_free_cmd_buf(hwdev, (struct hifc_cmd_buf *)buf); + + clear_wqe_complete_bit(cmdq, wqe, ci); +} + +static int cmdq_arm_ceq_handler(struct hifc_cmdq *cmdq, + struct hifc_cmdq_wqe *wqe, u16 ci) +{ + struct hifc_cmdq_inline_wqe *inline_wqe = &wqe->inline_wqe; + struct hifc_cmdq_wqe_scmd *wqe_scmd = &inline_wqe->wqe_scmd; + struct hifc_ctrl *ctrl = &wqe_scmd->ctrl; + u32 ctrl_info = be32_to_cpu((ctrl)->ctrl_info); + + if (!WQE_COMPLETED(ctrl_info)) + return -EBUSY; + + clear_wqe_complete_bit(cmdq, wqe, ci); + + return 0; +} + +#define HIFC_CMDQ_WQE_HEAD_LEN 32 +static void hifc_dump_cmdq_wqe_head(struct hifc_hwdev *hwdev, + struct hifc_cmdq_wqe *wqe) +{ + u32 i; + u32 *data = (u32 *)wqe; + + for (i = 0; i < (HIFC_CMDQ_WQE_HEAD_LEN / sizeof(u32)); i += 4) { + sdk_info(hwdev->dev_hdl, "wqe data: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", + data[i], data[i + 1], data[i + 2], + data[i + 3]);/*lint !e679*/ + } +} + +#define CMDQ_CMD_TYPE_TIMEOUT(cmd_type) \ + ((cmd_type) == HIFC_CMD_TYPE_TIMEOUT || \ + (cmd_type) == HIFC_CMD_TYPE_FAKE_TIMEOUT) + +static inline void cmdq_response_handle(struct hifc_hwdev *hwdev, + struct hifc_cmdq *cmdq, + struct hifc_cmdq_wqe *wqe, + enum hifc_cmdq_type cmdq_type, u16 ci) +{ + if (cmdq_type == HIFC_CMDQ_ASYNC) + cmdq_async_cmd_handler(hwdev, cmdq, wqe, ci); + else + cmdq_sync_cmd_handler(cmdq, wqe, ci); +} + +static inline void set_arm_bit(struct hifc_hwdev *hwdev, int set_arm, + enum hifc_cmdq_type cmdq_type) +{ + if (set_arm) + hifc_set_arm_bit(hwdev, HIFC_SET_ARM_CMDQ, cmdq_type); +} + +void hifc_cmdq_ceq_handler(void *handle, u32 ceqe_data) +{ + struct hifc_cmdqs *cmdqs = ((struct hifc_hwdev *)handle)->cmdqs; + enum hifc_cmdq_type cmdq_type = CEQE_CMDQ_GET(ceqe_data, TYPE); + struct hifc_cmdq *cmdq = &cmdqs->cmdq[cmdq_type]; + struct hifc_hwdev *hwdev = cmdqs->hwdev; + struct hifc_cmdq_wqe *wqe; + struct hifc_cmdq_wqe_lcmd *wqe_lcmd; + struct hifc_ctrl *ctrl; + struct hifc_cmdq_cmd_info *cmd_info; + u32 ctrl_info; + u16 ci; + int set_arm = 1; + + while ((wqe = hifc_read_wqe(cmdq->wq, 1, &ci)) != NULL) { + cmd_info = &cmdq->cmd_infos[ci]; + + if (cmd_info->cmd_type == HIFC_CMD_TYPE_NONE) { + set_arm = 1; + break; + } else if (CMDQ_CMD_TYPE_TIMEOUT(cmd_info->cmd_type)) { + if (cmd_info->cmd_type == HIFC_CMD_TYPE_TIMEOUT) { + sdk_info(hwdev->dev_hdl, "Cmdq timeout, q_id: %u, ci: %u\n", + cmdq_type, ci); + hifc_dump_cmdq_wqe_head(hwdev, wqe); + } + + set_arm = 1; + clear_wqe_complete_bit(cmdq, wqe, ci); + } else if (cmd_info->cmd_type == HIFC_CMD_TYPE_SET_ARM) { + /* arm_bit was set until here */ + set_arm = 0; + + if (cmdq_arm_ceq_handler(cmdq, wqe, ci)) + break; + } else { + set_arm = 1; + + /* only arm bit is using scmd wqe, the wqe is lcmd */ + wqe_lcmd = &wqe->wqe_lcmd; + ctrl = &wqe_lcmd->ctrl; + ctrl_info = be32_to_cpu((ctrl)->ctrl_info); + + if (!WQE_COMPLETED(ctrl_info)) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the cmdq wqe until we have + * verified the command has been processed and + * written back. + */ + dma_rmb(); + + cmdq_response_handle(hwdev, cmdq, wqe, cmdq_type, ci); + } + } + + set_arm_bit(hwdev, set_arm, cmdq_type); +} + +static void cmdq_init_queue_ctxt(struct hifc_cmdq *cmdq, + struct hifc_cmdq_pages *cmdq_pages, + struct hifc_cmdq_ctxt *cmdq_ctxt) +{ + struct hifc_cmdqs *cmdqs = cmdq_to_cmdqs(cmdq); + struct hifc_hwdev *hwdev = cmdqs->hwdev; + struct hifc_wq *wq = cmdq->wq; + struct hifc_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info; + u64 wq_first_page_paddr, cmdq_first_block_paddr, pfn; + u16 start_ci = (u16)wq->cons_idx; + + /* The data in the HW is in Big Endian Format */ + wq_first_page_paddr = be64_to_cpu(*wq->block_vaddr); + + pfn = CMDQ_PFN(wq_first_page_paddr); + + ctxt_info->curr_wqe_page_pfn = + CMDQ_CTXT_PAGE_INFO_SET(1, HW_BUSY_BIT) | + CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN) | + CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_ARM) | + CMDQ_CTXT_PAGE_INFO_SET(HIFC_CEQ_ID_CMDQ, EQ_ID) | + CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN); + + /* If only use one page, use 0-level CLA */ + if (cmdq->wq->num_q_pages != 1) { + cmdq_first_block_paddr = cmdq_pages->cmdq_page_paddr; + pfn = CMDQ_PFN(cmdq_first_block_paddr); + } + + ctxt_info->wq_block_pfn = CMDQ_CTXT_BLOCK_INFO_SET(start_ci, CI) | + CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN); + + cmdq_ctxt->func_idx = hifc_global_func_id_hw(hwdev); + cmdq_ctxt->ppf_idx = HIFC_HWIF_PPF_IDX(hwdev->hwif); + cmdq_ctxt->cmdq_id = cmdq->cmdq_type; +} + +static int init_cmdq(struct hifc_cmdq *cmdq, struct hifc_hwdev *hwdev, + struct hifc_wq *wq, enum hifc_cmdq_type q_type) +{ + void __iomem *db_base; + int err = 0; + + cmdq->wq = wq; + cmdq->cmdq_type = q_type; + cmdq->wrapped = 1; + cmdq->hwdev = hwdev; + + spin_lock_init(&cmdq->cmdq_lock); + + cmdq->cmd_infos = kcalloc(wq->q_depth, sizeof(*cmdq->cmd_infos), + GFP_KERNEL); + if (!cmdq->cmd_infos) { + err = -ENOMEM; + goto cmd_infos_err; + } + + err = hifc_alloc_db_addr(hwdev, &db_base, NULL); + if (err) + goto alloc_db_err; + + cmdq->db_base = (u8 *)db_base; + return 0; + +alloc_db_err: + kfree(cmdq->cmd_infos); + +cmd_infos_err: + + return err; +} + +static void free_cmdq(struct hifc_hwdev *hwdev, struct hifc_cmdq *cmdq) +{ + hifc_free_db_addr(hwdev, cmdq->db_base, NULL); + kfree(cmdq->cmd_infos); +} + +int hifc_set_cmdq_ctxts(struct hifc_hwdev *hwdev) +{ + struct hifc_cmdqs *cmdqs = hwdev->cmdqs; + struct hifc_cmdq_ctxt *cmdq_ctxt, cmdq_ctxt_out = {0}; + enum hifc_cmdq_type cmdq_type; + u16 in_size; + u16 out_size = sizeof(*cmdq_ctxt); + int err; + + cmdq_type = HIFC_CMDQ_SYNC; + for (; cmdq_type < HIFC_MAX_CMDQ_TYPES; cmdq_type++) { + cmdq_ctxt = &cmdqs->cmdq[cmdq_type].cmdq_ctxt; + cmdq_ctxt->func_idx = hifc_global_func_id_hw(hwdev); + in_size = sizeof(*cmdq_ctxt); + err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_CMDQ_CTXT_SET, + cmdq_ctxt, in_size, + &cmdq_ctxt_out, &out_size, 0); + if (err || !out_size || cmdq_ctxt_out.status) { + sdk_err(hwdev->dev_hdl, "Failed to set cmdq ctxt, err: %d, status: 0x%x, out_size: 0x%x\n", + err, cmdq_ctxt_out.status, out_size); + return -EFAULT; + } + } + + cmdqs->status |= HIFC_CMDQ_ENABLE; + cmdqs->disable_flag = 0; + + return 0; +} + +void hifc_cmdq_flush_cmd(struct hifc_hwdev *hwdev, + struct hifc_cmdq *cmdq) +{ + struct hifc_cmdq_wqe *wqe; + struct hifc_cmdq_cmd_info *cmdq_info; + u16 ci, wqe_left, i; + u64 buf; + + spin_lock_bh(&cmdq->cmdq_lock); + wqe_left = cmdq->wq->q_depth - (u16)atomic_read(&cmdq->wq->delta); + ci = MASKED_WQE_IDX(cmdq->wq, cmdq->wq->cons_idx); + for (i = 0; i < wqe_left; i++, ci++) { + ci = MASKED_WQE_IDX(cmdq->wq, ci); + cmdq_info = &cmdq->cmd_infos[ci]; + + if (cmdq_info->cmd_type == HIFC_CMD_TYPE_SET_ARM) + continue; + + if (cmdq->cmdq_type == HIFC_CMDQ_ASYNC) { + wqe = hifc_get_wqebb_addr(cmdq->wq, ci); + buf = wqe->wqe_lcmd.buf_desc.saved_async_buf; + wqe->wqe_lcmd.buf_desc.saved_async_buf = 0; + + hifc_be32_to_cpu((void *)&buf, sizeof(u64)); + if (buf) + hifc_free_cmd_buf(hwdev, + (struct hifc_cmd_buf *)buf); + } else { + if (cmdq_info->done) { + complete(cmdq_info->done); + cmdq_info->done = NULL; + cmdq_info->cmpt_code = NULL; + cmdq_info->direct_resp = NULL; + cmdq_info->errcode = NULL; + } + } + } + + spin_unlock_bh(&cmdq->cmdq_lock); +} + +int hifc_reinit_cmdq_ctxts(struct hifc_hwdev *hwdev) +{ + struct hifc_cmdqs *cmdqs = hwdev->cmdqs; + enum hifc_cmdq_type cmdq_type; + + cmdq_type = HIFC_CMDQ_SYNC; + for (; cmdq_type < HIFC_MAX_CMDQ_TYPES; cmdq_type++) { + hifc_cmdq_flush_cmd(hwdev, &cmdqs->cmdq[cmdq_type]); + cmdqs->cmdq[cmdq_type].wrapped = 1; + hifc_wq_wqe_pg_clear(cmdqs->cmdq[cmdq_type].wq); + } + + return hifc_set_cmdq_ctxts(hwdev); +} + +int hifc_cmdqs_init(struct hifc_hwdev *hwdev) +{ + struct hifc_cmdqs *cmdqs; + struct hifc_cmdq_ctxt *cmdq_ctxt; + enum hifc_cmdq_type type, cmdq_type; + size_t saved_wqs_size; + u32 max_wqe_size; + int err; + + cmdqs = kzalloc(sizeof(*cmdqs), GFP_KERNEL); + if (!cmdqs) + return -ENOMEM; + + hwdev->cmdqs = cmdqs; + cmdqs->hwdev = hwdev; + + saved_wqs_size = HIFC_MAX_CMDQ_TYPES * sizeof(struct hifc_wq); + cmdqs->saved_wqs = kzalloc(saved_wqs_size, GFP_KERNEL); + if (!cmdqs->saved_wqs) { + sdk_err(hwdev->dev_hdl, "Failed to allocate saved wqs\n"); + err = -ENOMEM; + goto alloc_wqs_err; + } + + cmdqs->cmd_buf_pool = dma_pool_create("hifc_cmdq", hwdev->dev_hdl, + HIFC_CMDQ_BUF_SIZE, + HIFC_CMDQ_BUF_SIZE, 0ULL); + if (!cmdqs->cmd_buf_pool) { + sdk_err(hwdev->dev_hdl, "Failed to create cmdq buffer pool\n"); + err = -ENOMEM; + goto pool_create_err; + } + + max_wqe_size = (u32)cmdq_wqe_size(WQE_LCMD_TYPE); + err = hifc_cmdq_alloc(&cmdqs->cmdq_pages, cmdqs->saved_wqs, + hwdev->dev_hdl, HIFC_MAX_CMDQ_TYPES, + hwdev->wq_page_size, CMDQ_WQEBB_SIZE, + HIFC_CMDQ_DEPTH, max_wqe_size); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to allocate cmdq\n"); + goto cmdq_alloc_err; + } + + cmdq_type = HIFC_CMDQ_SYNC; + for (; cmdq_type < HIFC_MAX_CMDQ_TYPES; cmdq_type++) { + err = init_cmdq(&cmdqs->cmdq[cmdq_type], hwdev, + &cmdqs->saved_wqs[cmdq_type], cmdq_type); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to initialize cmdq type :%d\n", + cmdq_type); + goto init_cmdq_err; + } + + cmdq_ctxt = &cmdqs->cmdq[cmdq_type].cmdq_ctxt; + cmdq_init_queue_ctxt(&cmdqs->cmdq[cmdq_type], + &cmdqs->cmdq_pages, cmdq_ctxt); + } + + err = hifc_set_cmdq_ctxts(hwdev); + if (err) + goto init_cmdq_err; + + return 0; + +init_cmdq_err: + type = HIFC_CMDQ_SYNC; + for (; type < cmdq_type; type++) + free_cmdq(hwdev, &cmdqs->cmdq[type]); + + hifc_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs, + HIFC_MAX_CMDQ_TYPES); + +cmdq_alloc_err: + dma_pool_destroy(cmdqs->cmd_buf_pool); + +pool_create_err: + kfree(cmdqs->saved_wqs); + +alloc_wqs_err: + kfree(cmdqs); + + return err; +} + +void hifc_cmdqs_free(struct hifc_hwdev *hwdev) +{ + struct hifc_cmdqs *cmdqs = hwdev->cmdqs; + enum hifc_cmdq_type cmdq_type = HIFC_CMDQ_SYNC; + + cmdqs->status &= ~HIFC_CMDQ_ENABLE; + + for (; cmdq_type < HIFC_MAX_CMDQ_TYPES; cmdq_type++) { + hifc_cmdq_flush_cmd(hwdev, &cmdqs->cmdq[cmdq_type]); + free_cmdq(cmdqs->hwdev, &cmdqs->cmdq[cmdq_type]); + } + + hifc_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs, + HIFC_MAX_CMDQ_TYPES); + + dma_pool_destroy(cmdqs->cmd_buf_pool); + + kfree(cmdqs->saved_wqs); + + kfree(cmdqs); +} diff --git a/drivers/scsi/huawei/hifc/hifc_cmdq.h b/drivers/scsi/huawei/hifc/hifc_cmdq.h new file mode 100644 index 0000000000000000000000000000000000000000..cb2ac81c5edce4e460313061bd48c50ce1df7515 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_cmdq.h @@ -0,0 +1,210 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef HIFC_CMDQ_H_ +#define HIFC_CMDQ_H_ + +#define HIFC_DB_OFF 0x00000800 + +#define HIFC_SCMD_DATA_LEN 16 + +#define HIFC_CMDQ_DEPTH 4096 + +#define HIFC_CMDQ_BUF_SIZE 2048U +#define HIFC_CMDQ_BUF_HW_RSVD 8 +#define HIFC_CMDQ_MAX_DATA_SIZE \ + (HIFC_CMDQ_BUF_SIZE - HIFC_CMDQ_BUF_HW_RSVD) +#define WQ_PAGE_PFN_SHIFT 12 +#define WQ_BLOCK_PFN_SHIFT 9 + +#define WQ_PAGE_PFN(page_addr) ((page_addr) >> WQ_PAGE_PFN_SHIFT) +#define WQ_BLOCK_PFN(page_addr) ((page_addr) >> WQ_BLOCK_PFN_SHIFT) + +enum hifc_cmdq_type { + HIFC_CMDQ_SYNC, + HIFC_CMDQ_ASYNC, + HIFC_MAX_CMDQ_TYPES, +}; + +enum hifc_db_src_type { + HIFC_DB_SRC_CMDQ_TYPE, + HIFC_DB_SRC_L2NIC_SQ_TYPE, +}; + +enum hifc_cmdq_db_type { + HIFC_DB_SQ_RQ_TYPE, + HIFC_DB_CMDQ_TYPE, +}; + +/* CMDQ WQE CTRLS */ +struct hifc_cmdq_header { + u32 header_info; + u32 saved_data; +}; + +struct hifc_scmd_bufdesc { + u32 buf_len; + u32 rsvd; + u8 data[HIFC_SCMD_DATA_LEN]; +}; + +struct hifc_lcmd_bufdesc { + struct hifc_sge sge; + u32 rsvd1; + u64 saved_async_buf; + u64 rsvd3; +}; + +struct hifc_cmdq_db { + u32 db_info; + u32 rsvd; +}; + +struct hifc_status { + u32 status_info; +}; + +struct hifc_ctrl { + u32 ctrl_info; +}; + +struct hifc_sge_resp { + struct hifc_sge sge; + u32 rsvd; +}; + +struct hifc_cmdq_completion { + /* HW Format */ + union { + struct hifc_sge_resp sge_resp; + u64 direct_resp; + }; +}; + +struct hifc_cmdq_wqe_scmd { + struct hifc_cmdq_header header; + struct hifc_cmdq_db db; + struct hifc_status status; + struct hifc_ctrl ctrl; + struct hifc_cmdq_completion completion; + struct hifc_scmd_bufdesc buf_desc; +}; + +struct hifc_cmdq_wqe_lcmd { + struct hifc_cmdq_header header; + struct hifc_status status; + struct hifc_ctrl ctrl; + struct hifc_cmdq_completion completion; + struct hifc_lcmd_bufdesc buf_desc; +}; + +struct hifc_cmdq_inline_wqe { + struct hifc_cmdq_wqe_scmd wqe_scmd; +}; + +struct hifc_cmdq_wqe { + /* HW Format */ + union { + struct hifc_cmdq_inline_wqe inline_wqe; + struct hifc_cmdq_wqe_lcmd wqe_lcmd; + }; +}; + +struct hifc_cmdq_arm_bit { + u32 q_type; + u32 q_id; +}; + +struct hifc_cmdq_ctxt_info { + u64 curr_wqe_page_pfn; + u64 wq_block_pfn; +}; + +struct hifc_cmdq_ctxt { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 cmdq_id; + u8 ppf_idx; + + u8 rsvd1[4]; + + struct hifc_cmdq_ctxt_info ctxt_info; +}; + +enum hifc_cmdq_status { + HIFC_CMDQ_ENABLE = BIT(0), +}; + +enum hifc_cmdq_cmd_type { + HIFC_CMD_TYPE_NONE, + HIFC_CMD_TYPE_SET_ARM, + HIFC_CMD_TYPE_DIRECT_RESP, + HIFC_CMD_TYPE_SGE_RESP, + HIFC_CMD_TYPE_ASYNC, + HIFC_CMD_TYPE_TIMEOUT, + HIFC_CMD_TYPE_FAKE_TIMEOUT, +}; + +struct hifc_cmdq_cmd_info { + enum hifc_cmdq_cmd_type cmd_type; + + struct completion *done; + int *errcode; + int *cmpt_code; + u64 *direct_resp; + u64 cmdq_msg_id; +}; + +struct hifc_cmdq { + struct hifc_wq *wq; + + enum hifc_cmdq_type cmdq_type; + int wrapped; + + /* spinlock for send cmdq commands */ + spinlock_t cmdq_lock; + + /* doorbell area */ + u8 __iomem *db_base; + + struct hifc_cmdq_ctxt cmdq_ctxt; + + struct hifc_cmdq_cmd_info *cmd_infos; + + struct hifc_hwdev *hwdev; +}; + +struct hifc_cmdqs { + struct hifc_hwdev *hwdev; + + struct pci_pool *cmd_buf_pool; + + struct hifc_wq *saved_wqs; + + struct hifc_cmdq_pages cmdq_pages; + struct hifc_cmdq cmdq[HIFC_MAX_CMDQ_TYPES]; + + u32 status; + u32 disable_flag; +}; + +void hifc_cmdq_ceq_handler(void *hwdev, u32 ceqe_data); + +int hifc_reinit_cmdq_ctxts(struct hifc_hwdev *hwdev); + +bool hifc_cmdq_idle(struct hifc_cmdq *cmdq); + +int hifc_cmdqs_init(struct hifc_hwdev *hwdev); + +void hifc_cmdqs_free(struct hifc_hwdev *hwdev); + +void hifc_cmdq_flush_cmd(struct hifc_hwdev *hwdev, + struct hifc_cmdq *cmdq); + +#endif diff --git a/drivers/scsi/huawei/hifc/hifc_cqm_main.c b/drivers/scsi/huawei/hifc/hifc_cqm_main.c new file mode 100644 index 0000000000000000000000000000000000000000..4cd048f1e66266e40e8491cc289f1f1e412a7113 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_cqm_main.c @@ -0,0 +1,694 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#include +#include +#include +#include +#include + +#include "hifc_knl_adp.h" +#include "hifc_hw.h" +#include "hifc_hwdev.h" +#include "hifc_hwif.h" +#include "hifc_api_cmd.h" +#include "hifc_mgmt.h" +#include "hifc_cfg.h" +#include "hifc_cqm_object.h" +#include "hifc_cqm_main.h" + +#define GET_MAX(a, b) (((a) > (b)) ? (a) : (b)) +#define GET_MIN(a, b) (((a) < (b)) ? (a) : (b)) + +static void cqm_capability_init_check_ppf(void *ex_handle, + u32 *total_function_num) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + struct service_cap *service_capability = &handle->cfg_mgmt->svc_cap; + struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *) + (handle->cqm_hdl); + + if (cqm_handle->func_attribute.func_type == CQM_PPF) { + *total_function_num = service_capability->host_total_function; + cqm_handle->func_capability.timer_enable = + service_capability->timer_en; + + cqm_info(handle->dev_hdl, "Cap init: total function num 0x%x\n", + *total_function_num); + cqm_info(handle->dev_hdl, "Cap init: timer_enable %d (1: enable; 0: disable)\n", + cqm_handle->func_capability.timer_enable); + } +} + +void cqm_test_mode_init(struct cqm_handle_s *cqm_handle, + struct service_cap *service_capability) +{ + cqm_handle->func_capability.xid_alloc_mode = + service_capability->test_xid_alloc_mode; + cqm_handle->func_capability.gpa_check_enable = + service_capability->test_gpa_check_enable; +} + +static s32 cqm_service_capability_init_for_each( + struct cqm_handle_s *cqm_handle, + struct service_cap *service_capability) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)cqm_handle->ex_handle; + + cqm_info(handle->dev_hdl, "Cap init: fc is valid\n"); + cqm_handle->func_capability.hash_number += + service_capability->fc_cap.dev_fc_cap.max_parent_qpc_num; + cqm_handle->func_capability.hash_basic_size = CQM_HASH_BUCKET_SIZE_64; + cqm_handle->func_capability.qpc_number += + service_capability->fc_cap.dev_fc_cap.max_parent_qpc_num; + cqm_handle->func_capability.qpc_basic_size = + GET_MAX(service_capability->fc_cap.parent_qpc_size, + cqm_handle->func_capability.qpc_basic_size); + cqm_handle->func_capability.qpc_alloc_static = true; + cqm_handle->func_capability.scqc_number += + service_capability->fc_cap.dev_fc_cap.scq_num; + cqm_handle->func_capability.scqc_basic_size = + GET_MAX(service_capability->fc_cap.scqc_size, + cqm_handle->func_capability.scqc_basic_size); + cqm_handle->func_capability.srqc_number += + service_capability->fc_cap.dev_fc_cap.srq_num; + cqm_handle->func_capability.srqc_basic_size = + GET_MAX(service_capability->fc_cap.srqc_size, + cqm_handle->func_capability.srqc_basic_size); + cqm_handle->func_capability.lun_number = CQM_LUN_FC_NUM; + cqm_handle->func_capability.lun_basic_size = CQM_LUN_SIZE_8; + cqm_handle->func_capability.taskmap_number = CQM_TASKMAP_FC_NUM; + cqm_handle->func_capability.taskmap_basic_size = PAGE_SIZE; + cqm_handle->func_capability.childc_number += + service_capability->fc_cap.dev_fc_cap.max_child_qpc_num; + cqm_handle->func_capability.childc_basic_size = + GET_MAX(service_capability->fc_cap.child_qpc_size, + cqm_handle->func_capability.childc_basic_size); + cqm_handle->func_capability.pagesize_reorder = CQM_FC_PAGESIZE_ORDER; + + return CQM_SUCCESS; +} + +s32 cqm_service_capability_init(struct cqm_handle_s *cqm_handle, + struct service_cap *service_capability) +{ + cqm_handle->service.has_register = false; + cqm_handle->service.buf_order = 0; + + if (cqm_service_capability_init_for_each( + cqm_handle, + service_capability) == CQM_FAIL) + return CQM_FAIL; + + return CQM_SUCCESS; +} + +/** + * cqm_capability_init - Initialize capability of cqm function and service, + * need to read information from the configuration management module + * @ex_handle: handle of hwdev + */ +s32 cqm_capability_init(void *ex_handle) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + struct service_cap *service_capability = &handle->cfg_mgmt->svc_cap; + struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *) + (handle->cqm_hdl); + u32 total_function_num = 0; + int err = 0; + + cqm_capability_init_check_ppf(ex_handle, &total_function_num); + + cqm_handle->func_capability.flow_table_based_conn_number = + service_capability->max_connect_num; + cqm_handle->func_capability.flow_table_based_conn_cache_number = + service_capability->max_stick2cache_num; + cqm_info(handle->dev_hdl, "Cap init: cfg max_conn_num 0x%x, max_cache_conn_num 0x%x\n", + cqm_handle->func_capability.flow_table_based_conn_number, + cqm_handle->func_capability.flow_table_based_conn_cache_number); + + cqm_handle->func_capability.qpc_reserved = 0; + cqm_handle->func_capability.mpt_reserved = 0; + cqm_handle->func_capability.qpc_alloc_static = false; + cqm_handle->func_capability.scqc_alloc_static = false; + + cqm_handle->func_capability.l3i_number = CQM_L3I_COMM_NUM; + cqm_handle->func_capability.l3i_basic_size = CQM_L3I_SIZE_8; + + cqm_handle->func_capability.timer_number = CQM_TIMER_ALIGN_SCALE_NUM * + total_function_num; + cqm_handle->func_capability.timer_basic_size = CQM_TIMER_SIZE_32; + + if (cqm_service_capability_init(cqm_handle, service_capability) == + CQM_FAIL) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_service_capability_init)); + err = CQM_FAIL; + goto out; + } + + cqm_test_mode_init(cqm_handle, service_capability); + + cqm_info(handle->dev_hdl, "Cap init: pagesize_reorder %d\n", + cqm_handle->func_capability.pagesize_reorder); + cqm_info(handle->dev_hdl, "Cap init: xid_alloc_mode %d, gpa_check_enable %d\n", + cqm_handle->func_capability.xid_alloc_mode, + cqm_handle->func_capability.gpa_check_enable); + cqm_info(handle->dev_hdl, "Cap init: qpc_alloc_mode %d, scqc_alloc_mode %d\n", + cqm_handle->func_capability.qpc_alloc_static, + cqm_handle->func_capability.scqc_alloc_static); + cqm_info(handle->dev_hdl, "Cap init: hash_number 0x%x\n", + cqm_handle->func_capability.hash_number); + cqm_info(handle->dev_hdl, "Cap init: qpc_number 0x%x, qpc_reserved 0x%x\n", + cqm_handle->func_capability.qpc_number, + cqm_handle->func_capability.qpc_reserved); + cqm_info(handle->dev_hdl, "Cap init: scqc_number 0x%x scqc_reserved 0x%x\n", + cqm_handle->func_capability.scqc_number, + cqm_handle->func_capability.scq_reserved); + cqm_info(handle->dev_hdl, "Cap init: srqc_number 0x%x\n", + cqm_handle->func_capability.srqc_number); + cqm_info(handle->dev_hdl, "Cap init: mpt_number 0x%x, mpt_reserved 0x%x\n", + cqm_handle->func_capability.mpt_number, + cqm_handle->func_capability.mpt_reserved); + cqm_info(handle->dev_hdl, "Cap init: gid_number 0x%x, lun_number 0x%x\n", + cqm_handle->func_capability.gid_number, + cqm_handle->func_capability.lun_number); + cqm_info(handle->dev_hdl, "Cap init: taskmap_number 0x%x, l3i_number 0x%x\n", + cqm_handle->func_capability.taskmap_number, + cqm_handle->func_capability.l3i_number); + cqm_info(handle->dev_hdl, "Cap init: timer_number 0x%x\n", + cqm_handle->func_capability.timer_number); + cqm_info(handle->dev_hdl, "Cap init: xid2cid_number 0x%x, reorder_number 0x%x\n", + cqm_handle->func_capability.xid2cid_number, + cqm_handle->func_capability.reorder_number); + + return CQM_SUCCESS; + +out: + if (cqm_handle->func_attribute.func_type == CQM_PPF) + cqm_handle->func_capability.timer_enable = 0; + + return err; +} + +/** + * cqm_init - Initialize cqm + * @ex_handle: handle of hwdev + */ +s32 cqm_init(void *ex_handle) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + struct cqm_handle_s *cqm_handle = NULL; + s32 ret = CQM_FAIL; + + CQM_PTR_CHECK_RET(ex_handle, return CQM_FAIL, CQM_PTR_NULL(ex_handle)); + + cqm_handle = (struct cqm_handle_s *)kmalloc(sizeof(struct cqm_handle_s), + GFP_KERNEL | __GFP_ZERO); + CQM_PTR_CHECK_RET(cqm_handle, return CQM_FAIL, + CQM_ALLOC_FAIL(cqm_handle)); + /* Clear memory to prevent other systems' memory from being cleared */ + memset(cqm_handle, 0, sizeof(struct cqm_handle_s)); + + cqm_handle->ex_handle = handle; + cqm_handle->dev = (struct pci_dev *)(handle->pcidev_hdl); + + handle->cqm_hdl = (void *)cqm_handle; + + /* Clear statistics */ + memset(&handle->hw_stats.cqm_stats, 0, sizeof(struct hifc_cqm_stats)); + + /* Read information of vf or pf */ + cqm_handle->func_attribute = handle->hwif->attr; + cqm_info(handle->dev_hdl, "Func init: function type %d\n", + cqm_handle->func_attribute.func_type); + + /* Read ability from configuration management module */ + ret = cqm_capability_init(ex_handle); + if (ret == CQM_FAIL) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_capability_init)); + goto err1; + } + + /* Initialize entries of memory table such as BAT/CLA/bitmap */ + if (cqm_mem_init(ex_handle) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_mem_init)); + goto err1; + } + + /* Initialize event callback */ + if (cqm_event_init(ex_handle) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_event_init)); + goto err2; + } + + /* Initialize doorbell */ + if (cqm_db_init(ex_handle) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_db_init)); + goto err3; + } + + /* The timer bitmap is set directly from the beginning through CQM, + * no longer set/clear the bitmap through ifconfig up/down + */ + if (hifc_func_tmr_bitmap_set(ex_handle, 1) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, "Timer start: enable timer bitmap failed\n"); + goto err5; + } + + return CQM_SUCCESS; + +err5: + cqm_db_uninit(ex_handle); +err3: + cqm_event_uninit(ex_handle); +err2: + cqm_mem_uninit(ex_handle); +err1: + handle->cqm_hdl = NULL; + kfree(cqm_handle); + return CQM_FAIL; +} + +/** + * cqm_uninit - Deinitialize the cqm, and is called once removing a function + * @ex_handle: handle of hwdev + */ +void cqm_uninit(void *ex_handle) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + struct cqm_handle_s *cqm_handle = NULL; + s32 ret = CQM_FAIL; + + CQM_PTR_CHECK_NO_RET(ex_handle, CQM_PTR_NULL(ex_handle), return); + + cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl); + CQM_PTR_CHECK_NO_RET(cqm_handle, CQM_PTR_NULL(cqm_handle), return); + + /* The timer bitmap is set directly from the beginning through CQM, + * no longer set/clear the bitmap through ifconfig up/down + */ + cqm_info(handle->dev_hdl, "Timer stop: disable timer\n"); + if (hifc_func_tmr_bitmap_set(ex_handle, 0) != CQM_SUCCESS) + cqm_err(handle->dev_hdl, "Timer stop: disable timer bitmap failed\n"); + + /* Stopping timer, release the resource + * after a delay of one or two milliseconds + */ + if ((cqm_handle->func_attribute.func_type == CQM_PPF) && + (cqm_handle->func_capability.timer_enable == CQM_TIMER_ENABLE)) { + cqm_info(handle->dev_hdl, "Timer stop: hifc ppf timer stop\n"); + ret = hifc_ppf_tmr_stop(handle); + + if (ret != CQM_SUCCESS) { + cqm_info(handle->dev_hdl, "Timer stop: hifc ppf timer stop, ret=%d\n", + ret); + /* The timer fails to stop + * and does not affect resource release + */ + } + usleep_range(900, 1000); + } + + /* Release hardware doorbell */ + cqm_db_uninit(ex_handle); + + /* Cancel the callback of chipif */ + cqm_event_uninit(ex_handle); + + /* Release all table items + * and require the service to release all objects + */ + cqm_mem_uninit(ex_handle); + + /* Release cqm_handle */ + handle->cqm_hdl = NULL; + kfree(cqm_handle); +} + +/** + * cqm_mem_init - Initialize related memory of cqm, + * including all levels of entries + * @ex_handle: handle of hwdev + */ +s32 cqm_mem_init(void *ex_handle) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + struct cqm_handle_s *cqm_handle = NULL; + + cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl); + + if (cqm_bat_init(cqm_handle) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_bat_init)); + return CQM_FAIL; + } + + if (cqm_cla_init(cqm_handle) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_init)); + goto err1; + } + + if (cqm_bitmap_init(cqm_handle) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_bitmap_init)); + goto err2; + } + + if (cqm_object_table_init(cqm_handle) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_object_table_init)); + goto err3; + } + + return CQM_SUCCESS; + +err3: + cqm_bitmap_uninit(cqm_handle); +err2: + cqm_cla_uninit(cqm_handle); +err1: + cqm_bat_uninit(cqm_handle); + return CQM_FAIL; +} + +/** + * cqm_mem_uninit - Deinitialize related memory of cqm, + * including all levels of entries + * @ex_handle: handle of hwdev + */ +void cqm_mem_uninit(void *ex_handle) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + struct cqm_handle_s *cqm_handle = NULL; + + cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl); + + cqm_object_table_uninit(cqm_handle); + cqm_bitmap_uninit(cqm_handle); + cqm_cla_uninit(cqm_handle); + cqm_bat_uninit(cqm_handle); +} + +/** + * cqm_event_init - Initialize the event callback of cqm + * @ex_handle: handle of hwdev + */ +s32 cqm_event_init(void *ex_handle) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + + /* Register ceq and aeq callbacks with chipif */ + if (hifc_aeq_register_swe_cb(ex_handle, + HIFC_STATEFULL_EVENT, + cqm_aeq_callback) != CHIPIF_SUCCESS) { + cqm_err(handle->dev_hdl, "Event: fail to register aeq callback\n"); + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +/** + * cqm_event_uninit - Deinitialize the event callback of cqm + * @ex_handle: handle of hwdev + */ +void cqm_event_uninit(void *ex_handle) +{ + (void)hifc_aeq_unregister_swe_cb(ex_handle, HIFC_STATEFULL_EVENT); +} + +/** + * cqm_db_addr_alloc - Apply for a page of hardware doorbell and dwqe, + * with the same index, all obtained are physical addresses + * each function has up to 1K + * @ex_handle: handle of hwdev + * @db_addr: the address of doorbell + * @dwqe_addr: the address of dwqe + */ +s32 cqm_db_addr_alloc(void *ex_handle, void __iomem **db_addr, + void __iomem **dwqe_addr) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + + CQM_PTR_CHECK_RET(ex_handle, return CQM_FAIL, CQM_PTR_NULL(ex_handle)); + CQM_PTR_CHECK_RET(db_addr, return CQM_FAIL, CQM_PTR_NULL(db_addr)); + CQM_PTR_CHECK_RET(dwqe_addr, return CQM_FAIL, CQM_PTR_NULL(dwqe_addr)); + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_db_addr_alloc_cnt); + + return hifc_alloc_db_addr(ex_handle, db_addr, dwqe_addr); +} + +/** + * cqm_db_init - Initialize doorbell of cqm + * @ex_handle: handle of hwdev + */ +s32 cqm_db_init(void *ex_handle) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + struct cqm_handle_s *cqm_handle = NULL; + struct cqm_service_s *service = NULL; + + cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl); + + /* Assign hardware doorbell for service */ + service = &cqm_handle->service; + + if (cqm_db_addr_alloc(ex_handle, + &service->hardware_db_vaddr, + &service->dwqe_vaddr) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_db_addr_alloc)); + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +/** + * cqm_db_addr_free - Release a page of hardware doorbell and dwqe + * @ex_handle: handle of hwdev + * @db_addr: the address of doorbell + * @dwqe_addr: the address of dwqe + */ +void cqm_db_addr_free(void *ex_handle, void __iomem *db_addr, + void __iomem *dwqe_addr) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + + CQM_PTR_CHECK_NO_RET(ex_handle, CQM_PTR_NULL(ex_handle), return); + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_db_addr_free_cnt); + + hifc_free_db_addr(ex_handle, db_addr, dwqe_addr); +} + +/** + * cqm_db_uninit - Deinitialize doorbell of cqm + * @ex_handle: handle of hwdev + */ +void cqm_db_uninit(void *ex_handle) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + struct cqm_handle_s *cqm_handle = NULL; + struct cqm_service_s *service = NULL; + + cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl); + + /* Release hardware doorbell */ + service = &cqm_handle->service; + + cqm_db_addr_free(ex_handle, service->hardware_db_vaddr, + service->dwqe_vaddr); +} + +/** + * cqm_aeq_callback - cqm module callback processing of aeq + * @ex_handle: handle of hwdev + * @event: the input type of event + * @data: the input data + */ +u8 cqm_aeq_callback(void *ex_handle, u8 event, u64 data) +{ +#define CQM_AEQ_BASE_T_FC 48 +#define CQM_AEQ_BASE_T_FCOE 56 + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + struct cqm_handle_s *cqm_handle = NULL; + struct cqm_service_s *service = NULL; + struct service_register_template_s *service_template = NULL; + u8 event_level = FAULT_LEVEL_MAX; + + CQM_PTR_CHECK_RET(ex_handle, return event_level, + CQM_PTR_NULL(ex_handle)); + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_aeq_callback_cnt[event]); + + cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl); + CQM_PTR_CHECK_RET(cqm_handle, return event_level, + CQM_PTR_NULL(cqm_handle)); + + if (event >= (u8)CQM_AEQ_BASE_T_FC && + (event < (u8)CQM_AEQ_BASE_T_FCOE)) { + service = &cqm_handle->service; + service_template = &service->service_template; + + if (!service_template->aeq_callback) { + cqm_err(handle->dev_hdl, "Event: service aeq_callback unregistered\n"); + } else { + service_template->aeq_callback( + service_template->service_handle, event, data); + } + + return event_level; + } + + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(event)); + return CQM_FAIL; +} + +/** + * cqm_service_register - Service driver registers callback template with cqm + * @ex_handle: handle of hwdev + * @service_template: the template of service registration + */ +s32 cqm_service_register(void *ex_handle, + struct service_register_template_s *service_template) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + struct cqm_handle_s *cqm_handle = NULL; + struct cqm_service_s *service = NULL; + + CQM_PTR_CHECK_RET(ex_handle, return CQM_FAIL, CQM_PTR_NULL(ex_handle)); + + cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl); + CQM_PTR_CHECK_RET(cqm_handle, return CQM_FAIL, + CQM_PTR_NULL(cqm_handle)); + CQM_PTR_CHECK_RET(service_template, return CQM_FAIL, + CQM_PTR_NULL(service_template)); + + service = &cqm_handle->service; + + if (service->has_register == true) { + cqm_err(handle->dev_hdl, "Service register: service has registered\n"); + return CQM_FAIL; + } + + service->has_register = true; + (void)memcpy((void *)(&service->service_template), + (void *)service_template, + sizeof(struct service_register_template_s)); + + return CQM_SUCCESS; +} + +/** + * cqm_service_unregister - Service-driven cancellation to CQM + * @ex_handle: handle of hwdev + * @service_type: the type of service module + */ +void cqm_service_unregister(void *ex_handle) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + struct cqm_handle_s *cqm_handle = NULL; + struct cqm_service_s *service = NULL; + + CQM_PTR_CHECK_NO_RET(ex_handle, CQM_PTR_NULL(ex_handle), return); + + cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl); + CQM_PTR_CHECK_NO_RET(cqm_handle, CQM_PTR_NULL(cqm_handle), return); + + service = &cqm_handle->service; + + service->has_register = false; + memset(&service->service_template, 0, + sizeof(struct service_register_template_s)); +} + +/** + * cqm_cmd_alloc - Apply for a cmd buffer, the buffer size is fixed at 2K, + * the buffer content is not cleared, but the service needs to be cleared + * @ex_handle: handle of hwdev + */ +struct cqm_cmd_buf_s *cqm_cmd_alloc(void *ex_handle) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + + CQM_PTR_CHECK_RET(ex_handle, return NULL, CQM_PTR_NULL(ex_handle)); + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_cmd_alloc_cnt); + + return (struct cqm_cmd_buf_s *)hifc_alloc_cmd_buf(ex_handle); +} + +/** + * cqm_cmd_free - Free a cmd buffer + * @ex_handle: handle of hwdev + * @cmd_buf: the cmd buffer which needs freeing memory for + */ +void cqm_cmd_free(void *ex_handle, struct cqm_cmd_buf_s *cmd_buf) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + + CQM_PTR_CHECK_NO_RET(ex_handle, CQM_PTR_NULL(ex_handle), return); + CQM_PTR_CHECK_NO_RET(cmd_buf, CQM_PTR_NULL(cmd_buf), return); + CQM_PTR_CHECK_NO_RET(cmd_buf->buf, CQM_PTR_NULL(buf), return); + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_cmd_free_cnt); + + hifc_free_cmd_buf(ex_handle, (struct hifc_cmd_buf *)cmd_buf); +} + +/** + * cqm_send_cmd_box - Send a cmd in box mode, + * the interface will hang the completed amount, causing sleep + * @ex_handle: handle of hwdev + * @ack_type: the type of ack + * @mod: the mode of cqm send + * @cmd: the input cmd + * @buf_in: the input buffer of cqm_cmd + * @buf_out: the output buffer of cqm_cmd + * @timeout: exceeding the time limit will cause sleep + */ +s32 cqm_send_cmd_box(void *ex_handle, u8 ack_type, u8 mod, u8 cmd, + struct cqm_cmd_buf_s *buf_in, + struct cqm_cmd_buf_s *buf_out, u32 timeout) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + + CQM_PTR_CHECK_RET(ex_handle, return CQM_FAIL, CQM_PTR_NULL(ex_handle)); + CQM_PTR_CHECK_RET(buf_in, return CQM_FAIL, CQM_PTR_NULL(buf_in)); + CQM_PTR_CHECK_RET(buf_in->buf, return CQM_FAIL, CQM_PTR_NULL(buf)); + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_send_cmd_box_cnt); + + return hifc_cmdq_detail_resp(ex_handle, ack_type, mod, cmd, + (struct hifc_cmd_buf *)buf_in, + (struct hifc_cmd_buf *)buf_out, timeout); +} + +/** + * cqm_ring_hardware_db - Knock hardware doorbell + * @ex_handle: handle of hwdev + * @service_type: each kernel mode will be allocated a page of hardware doorbell + * @db_count: PI exceeding 64b in doorbell[7:0] + * @db: doorbell content, organized by the business, + * if there is a small-end conversion, the business needs to be completed + */ +s32 cqm_ring_hardware_db(void *ex_handle, u32 service_type, u8 db_count, u64 db) +{ + struct hifc_hwdev *handle; + struct cqm_handle_s *cqm_handle; + struct cqm_service_s *service; + + handle = (struct hifc_hwdev *)ex_handle; + cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl); + service = &cqm_handle->service; + + /* Write all before the doorbell */ + wmb(); + *((u64 *)service->hardware_db_vaddr + db_count) = db; + + return CQM_SUCCESS; +} diff --git a/drivers/scsi/huawei/hifc/hifc_cqm_main.h b/drivers/scsi/huawei/hifc/hifc_cqm_main.h new file mode 100644 index 0000000000000000000000000000000000000000..70b0c9ae060976d06c15b9abf5001fe7f0702340 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_cqm_main.h @@ -0,0 +1,366 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#ifndef __CQM_MAIN_H__ +#define __CQM_MAIN_H__ + +#define CHIPIF_SUCCESS 0 +#define CQM_TIMER_ENABLE 1 + +enum cqm_object_type_e { + CQM_OBJECT_ROOT_CTX = 0, + CQM_OBJECT_SERVICE_CTX, + CQM_OBJECT_NONRDMA_EMBEDDED_RQ = 10, + CQM_OBJECT_NONRDMA_EMBEDDED_SQ, + CQM_OBJECT_NONRDMA_SRQ, + CQM_OBJECT_NONRDMA_EMBEDDED_CQ, + CQM_OBJECT_NONRDMA_SCQ, +}; + +struct service_register_template_s { + u32 service_type; + u32 srq_ctx_size; /* srq,scq context_size config */ + u32 scq_ctx_size; + void *service_handle; /* ceq/aeq callback fun */ + + void (*aeq_callback)(void *service_handle, u8 event_type, u64 val); +}; + +struct cqm_service_s { + bool has_register; + void __iomem *hardware_db_vaddr; + void __iomem *dwqe_vaddr; + u32 buf_order; /* size of per buf 2^buf_order page */ + struct service_register_template_s service_template; +}; + +struct cqm_func_capability_s { + bool qpc_alloc_static; /* Allocate qpc memory dynamicly/statically */ + bool scqc_alloc_static; + u8 timer_enable; /* whether timer enable */ + + u32 flow_table_based_conn_number; + u32 flow_table_based_conn_cache_number; /* Maximum number in cache */ + u32 bloomfilter_length; /* Bloomfilter table size, aligned by 64B */ + /* The starting position of the bloomfilter table in the cache */ + u32 bloomfilter_addr; + u32 qpc_reserved; /* Reserved bits in bitmap */ + u32 mpt_reserved; /* There are also reserved bits in ROCE/IWARP mpt */ + /* All basic_size must be 2^n aligned */ + u32 hash_number; + /* Number of hash buckets, BAT table fill size is + * aligned with 64 buckets, at least 64 + */ + u32 hash_basic_size; + /* Hash bucket size is 64B, including 5 valid + * entries and 1 nxt_entry + */ + u32 qpc_number; + u32 qpc_basic_size; + + /* Note: for cqm specail test */ + u32 pagesize_reorder; + bool xid_alloc_mode; + bool gpa_check_enable; + u32 scq_reserved; + + u32 mpt_number; + u32 mpt_basic_size; + u32 scqc_number; + u32 scqc_basic_size; + u32 srqc_number; + u32 srqc_basic_size; + + u32 gid_number; + u32 gid_basic_size; + u32 lun_number; + u32 lun_basic_size; + u32 taskmap_number; + u32 taskmap_basic_size; + u32 l3i_number; + u32 l3i_basic_size; + u32 childc_number; + u32 childc_basic_size; + u32 child_qpc_id_start; /* Child ctx of FC is global addressing */ + /* The maximum number of child ctx in + * chip is 8096 + */ + u32 childc_number_all_function; + + u32 timer_number; + u32 timer_basic_size; + u32 xid2cid_number; + u32 xid2cid_basic_size; + u32 reorder_number; + u32 reorder_basic_size; +}; + +#define CQM_PF TYPE_PF +#define CQM_PPF TYPE_PPF +#define CQM_BAT_ENTRY_MAX (16) +#define CQM_BAT_ENTRY_SIZE (16) + +struct cqm_buf_list_s { + void *va; + dma_addr_t pa; + u32 refcount; +}; + +struct cqm_buf_s { + struct cqm_buf_list_s *buf_list; + struct cqm_buf_list_s direct; + u32 page_number; /* page_number=2^n buf_number */ + u32 buf_number; /* buf_list node count */ + u32 buf_size; /* buf_size=2^n PAGE_SIZE */ +}; + +struct cqm_bitmap_s { + ulong *table; + u32 max_num; + u32 last; + /* The index that cannot be allocated is reserved in the front */ + u32 reserved_top; + /* Lock for bitmap allocation */ + spinlock_t lock; +}; + +struct completion; +struct cqm_object_s { + u32 service_type; + u32 object_type; /* context,queue,mpt,mtt etc */ + u32 object_size; + /* for queue, ctx, MPT Byte */ + atomic_t refcount; + struct completion free; + void *cqm_handle; +}; + +struct cqm_object_table_s { + struct cqm_object_s **table; + u32 max_num; + rwlock_t lock; +}; + +struct cqm_cla_table_s { + u32 type; + u32 max_buffer_size; + u32 obj_num; + bool alloc_static; /* Whether the buffer is statically allocated */ + u32 cla_lvl; + /* The value of x calculated by the cacheline, used for chip */ + u32 cacheline_x; + /* The value of y calculated by the cacheline, used for chip */ + u32 cacheline_y; + /* The value of z calculated by the cacheline, used for chip */ + u32 cacheline_z; + /* The value of x calculated by the obj_size, used for software */ + u32 x; + /* The value of y calculated by the obj_size, used for software */ + u32 y; + /* The value of z calculated by the obj_size, used for software */ + u32 z; + struct cqm_buf_s cla_x_buf; + struct cqm_buf_s cla_y_buf; + struct cqm_buf_s cla_z_buf; + u32 trunk_order;/* A continuous physical page contains 2^order pages */ + u32 obj_size; + /* Lock for cla buffer allocation and free */ + struct mutex lock; + struct cqm_bitmap_s bitmap; + /* The association mapping table of index and object */ + struct cqm_object_table_s obj_table; +}; + +typedef void (*init_handler)(void *cqm_handle, + struct cqm_cla_table_s *cla_table, + void *cap); + +struct cqm_cla_entry_init_s { + u32 type; + init_handler cqm_cla_init_handler; +}; + +struct cqm_bat_table_s { + u32 bat_entry_type[CQM_BAT_ENTRY_MAX]; + u8 bat[CQM_BAT_ENTRY_MAX * CQM_BAT_ENTRY_SIZE]; + struct cqm_cla_table_s entry[CQM_BAT_ENTRY_MAX]; + u32 bat_size; +}; + +struct cqm_handle_s { + struct hifc_hwdev *ex_handle; + struct pci_dev *dev; + struct hifc_func_attr func_attribute; /* vf or pf */ + struct cqm_func_capability_s func_capability; + struct cqm_service_s service; + struct cqm_bat_table_s bat_table; + + struct list_head node; +}; + +struct cqm_cmd_buf_s { + void *buf; + dma_addr_t dma; + u16 size; +}; + +struct cqm_queue_header_s { + u64 doorbell_record; + u64 ci_record; + u64 rsv1; /* the share area bettween driver and ucode */ + u64 rsv2; /* the share area bettween driver and ucode*/ +}; + +struct cqm_queue_s { + struct cqm_object_s object; + u32 index; /* embedded queue QP has not index, SRQ and SCQ have */ + void *priv; /* service driver private info */ + u32 current_q_doorbell; + u32 current_q_room; + /* nonrdma: only select q_room_buf_1 for q_room_buf */ + struct cqm_buf_s q_room_buf_1; + struct cqm_buf_s q_room_buf_2; + struct cqm_queue_header_s *q_header_vaddr; + dma_addr_t q_header_paddr; + u8 *q_ctx_vaddr; /* SRQ and SCQ ctx space */ + dma_addr_t q_ctx_paddr; + u32 valid_wqe_num; + /*add for srq*/ + u8 *tail_container; + u8 *head_container; + u8 queue_link_mode; /*link,ring */ +}; + +struct cqm_nonrdma_qinfo_s { + struct cqm_queue_s common; + u32 wqe_size; + /* The number of wqe contained in each buf (excluding link wqe), + * For srq, it is the number of wqe contained in 1 container + */ + u32 wqe_per_buf; + u32 q_ctx_size; + /* When different services use different sizes of ctx, a large ctx will + * occupy multiple consecutive indexes of the bitmap + */ + u32 index_count; + u32 container_size; +}; + +/* service context, QPC, mpt */ +struct cqm_qpc_mpt_s { + struct cqm_object_s object; + u32 xid; + dma_addr_t paddr; + void *priv; /* service driver private info */ + u8 *vaddr; +}; + +struct cqm_qpc_mpt_info_s { + struct cqm_qpc_mpt_s common; + /* When different services use different sizes of QPC, large QPC/mpt + * will occupy multiple consecutive indexes of the bitmap + */ + u32 index_count; +}; + +#define CQM_ADDR_COMBINE(high_addr, low_addr) \ + ((((dma_addr_t)(high_addr)) << 32) + ((dma_addr_t)(low_addr))) +#define CQM_ADDR_HI(addr) ((u32)((u64)(addr) >> 32)) +#define CQM_ADDR_LW(addr) ((u32)((u64)(addr) & 0xffffffff)) +#define CQM_HASH_BUCKET_SIZE_64 (64) +#define CQM_LUN_SIZE_8 (8) +#define CQM_L3I_SIZE_8 (8) +#define CQM_TIMER_SIZE_32 (32) +#define CQM_LUN_FC_NUM (64) +#define CQM_TASKMAP_FC_NUM (4) +#define CQM_L3I_COMM_NUM (64) +#define CQM_TIMER_SCALE_NUM (2*1024) +#define CQM_TIMER_ALIGN_WHEEL_NUM (8) +#define CQM_TIMER_ALIGN_SCALE_NUM \ + (CQM_TIMER_SCALE_NUM*CQM_TIMER_ALIGN_WHEEL_NUM) +#define CQM_FC_PAGESIZE_ORDER (0) +#define CQM_QHEAD_ALIGN_ORDER (6) + +s32 cqm_mem_init(void *ex_handle); +void cqm_mem_uninit(void *ex_handle); +s32 cqm_event_init(void *ex_handle); +void cqm_event_uninit(void *ex_handle); +s32 cqm_db_init(void *ex_handle); +void cqm_db_uninit(void *ex_handle); +s32 cqm_init(void *ex_handle); +void cqm_uninit(void *ex_handle); +s32 cqm_service_register(void *ex_handle, + struct service_register_template_s *service_template); +void cqm_service_unregister(void *ex_handle); +s32 cqm_ring_hardware_db(void *ex_handle, + u32 service_type, + u8 db_count, u64 db); +s32 cqm_send_cmd_box(void *ex_handle, u8 ack_type, u8 mod, u8 cmd, + struct cqm_cmd_buf_s *buf_in, + struct cqm_cmd_buf_s *buf_out, + u32 timeout); +u8 cqm_aeq_callback(void *ex_handle, u8 event, u64 data); +void cqm_object_delete(struct cqm_object_s *object); +struct cqm_cmd_buf_s *cqm_cmd_alloc(void *ex_handle); +void cqm_cmd_free(void *ex_handle, struct cqm_cmd_buf_s *cmd_buf); +struct cqm_queue_s *cqm_object_fc_srq_create( + void *ex_handle, + enum cqm_object_type_e object_type, + u32 wqe_number, + u32 wqe_size, + void *object_priv); +struct cqm_qpc_mpt_s *cqm_object_qpc_mpt_create( + void *ex_handle, + enum cqm_object_type_e object_type, + u32 object_size, + void *object_priv, + u32 index); +struct cqm_queue_s *cqm_object_nonrdma_queue_create( + void *ex_handle, + enum cqm_object_type_e object_type, + u32 wqe_number, + u32 wqe_size, + void *object_priv); + +#define CQM_PTR_NULL(x) "%s: "#x" is null\n", __func__ +#define CQM_ALLOC_FAIL(x) "%s: "#x" alloc fail\n", __func__ +#define CQM_MAP_FAIL(x) "%s: "#x" map fail\n", __func__ +#define CQM_FUNCTION_FAIL(x) "%s: "#x" return failure\n", __func__ +#define CQM_WRONG_VALUE(x) "%s: "#x" %u is wrong\n", __func__, (u32)x + +#define cqm_err(dev, format, ...) \ + dev_err(dev, "[CQM]"format, ##__VA_ARGS__) +#define cqm_warn(dev, format, ...) \ + dev_warn(dev, "[CQM]"format, ##__VA_ARGS__) +#define cqm_notice(dev, format, ...) \ + dev_notice(dev, "[CQM]"format, ##__VA_ARGS__) +#define cqm_info(dev, format, ...) \ + dev_info(dev, "[CQM]"format, ##__VA_ARGS__) +#define cqm_dbg(format, ...) + +#define CQM_PTR_CHECK_RET(ptr, ret, desc) \ + do {\ + if (unlikely(NULL == (ptr))) {\ + pr_err("[CQM]"desc);\ + ret; \ + } \ + } while (0) + +#define CQM_PTR_CHECK_NO_RET(ptr, desc, ret) \ + do {\ + if (unlikely((ptr) == NULL)) {\ + pr_err("[CQM]"desc);\ + ret; \ + } \ + } while (0) +#define CQM_CHECK_EQUAL_RET(dev_hdl, actual, expect, ret, desc) \ + do {\ + if (unlikely((expect) != (actual))) {\ + cqm_err(dev_hdl, desc);\ + ret; \ + } \ + } while (0) + +#endif /* __CQM_MAIN_H__ */ diff --git a/drivers/scsi/huawei/hifc/hifc_cqm_object.c b/drivers/scsi/huawei/hifc/hifc_cqm_object.c new file mode 100644 index 0000000000000000000000000000000000000000..406b13f92e64f4fa4a9bdf81481ca06190e1cf8e --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_cqm_object.c @@ -0,0 +1,3599 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hifc_knl_adp.h" +#include "hifc_hw.h" +#include "hifc_hwdev.h" +#include "hifc_hwif.h" +#include "hifc_api_cmd.h" +#include "hifc_mgmt.h" +#include "hifc_cfg.h" +#include "hifc_cqm_object.h" +#include "hifc_cqm_main.h" +#define common_section + +#define CQM_MOD_CQM 8 +#define CQM_HARDWARE_DOORBELL 1 +/** + * cqm_swab64 - Convert a memory block to another endian by 8 byte basis + * @addr: start address of the memory block + * @cnt: the number of 8 byte basis in the memory block + */ +void cqm_swab64(u8 *addr, u32 cnt) +{ + u32 i = 0; + u64 *temp = (u64 *)addr; + u64 value = 0; + + for (i = 0; i < cnt; i++) { + value = __swab64(*temp); + *temp = value; + temp++; + } +} + +/** + * cqm_swab32 - Convert a memory block to another endian by 4 byte basis + * @addr: start address of the memory block + * @cnt: the number of 4 byte basis in the memory block + */ +void cqm_swab32(u8 *addr, u32 cnt) +{ + u32 i = 0; + u32 *temp = (u32 *)addr; + u32 value = 0; + + for (i = 0; i < cnt; i++) { + value = __swab32(*temp); + *temp = value; + temp++; + } +} + +/** + * cqm_shift - Find the base logarithm of two + * @data: the input data + */ +s32 cqm_shift(u32 data) +{ + s32 shift = -1; + + do { + data >>= 1; + shift++; + } while (data); + + return shift; +} + +/** + * cqm_check_align - Check whether the data is aligned as the base of 2^n + * @data: the input data + */ +bool cqm_check_align(u32 data) +{ + if (data == 0) + return false; + + do { + /* If data can be divided exactly by 2, + * it right shifts one bit + */ + if ((data & 0x1) == 0) { + data >>= 1; + } else { + /* If data can not be divided exactly by 2 + * it is not the base of 2^n,return false + */ + return false; + } + } while (data != 1); + + return true; +} + +/** + * cqm_kmalloc_align - Alloc memory whose start address is aligned as the basis + * of 2^n + * @size: the size of memory allocated + * @flags: the type of memory allocated + * @align_order: the basis for aligning + */ +static void *cqm_kmalloc_align(size_t size, gfp_t flags, u16 align_order) +{ + void *orig_addr = NULL; + void *align_addr = NULL; + void *index_addr = NULL; + + orig_addr = kmalloc(size + ((u64)1 << align_order) + sizeof(void *), + flags); + if (!orig_addr) + return NULL; + + index_addr = (void *)((char *)orig_addr + sizeof(void *)); + align_addr = (void *)((((u64)index_addr + + ((u64)1 << align_order) - 1) >> align_order) << align_order); + + /* Record the original memory address for memory release. */ + index_addr = (void *)((char *)align_addr - sizeof(void *)); + *(void **)index_addr = orig_addr; + + cqm_dbg("allocate %lu bytes aligned address: %p, original address: %p\n", + size, align_addr, orig_addr); + + return align_addr; +} + +/** + * cqm_kfree_align - Free memory whose start address is aligned as the basis of + * 2^n + * @addr: aligned address which would be free + */ +static void cqm_kfree_align(void *addr) +{ + void *index_addr = NULL; + + /* Release original memory address */ + index_addr = (void *)((char *)addr - sizeof(void *)); + + cqm_dbg("free aligned address: %p, original address: %p\n", + addr, *(void **)index_addr); + + kfree(*(void **)index_addr); +} + +/** + * cqm_buf_alloc_page - Alloc total pages memory for buffers + * @cqm_handle: handle of cqm + * @buf: the buffer which needs allocating memory for + */ +s32 cqm_buf_alloc_page(struct cqm_handle_s *cqm_handle, struct cqm_buf_s *buf) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + u32 order = 0; + void *va = NULL; + s32 i = 0; + + order = get_order(buf->buf_size); + + /*Here to allocate for every buffer's page for non-ovs*/ + for (i = 0; i < (s32)buf->buf_number; i++) { + va = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); + if (!va) { + cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(buf_page)); + break; + } + /* Pages should be initialized to 0 after applied + * especially related to the hash table + */ + memset(va, 0, buf->buf_size); + buf->buf_list[i].va = va; + } + + if (i != buf->buf_number) { + i--; + for (; i >= 0; i--) { + free_pages((ulong)(buf->buf_list[i].va), order); + buf->buf_list[i].va = NULL; + } + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +/** + * cqm_buf_alloc_map - Buffer pci mapping + * @cqm_handle: handle of cqm + * @buf: the buffer which needs map + */ +s32 cqm_buf_alloc_map(struct cqm_handle_s *cqm_handle, struct cqm_buf_s *buf) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct pci_dev *dev = cqm_handle->dev; + s32 i = 0; + void *va = NULL; + + for (i = 0; i < (s32)buf->buf_number; i++) { + va = buf->buf_list[i].va; + buf->buf_list[i].pa = + pci_map_single(dev, va, buf->buf_size, + PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(dev, buf->buf_list[i].pa)) { + cqm_err(handle->dev_hdl, CQM_MAP_FAIL(buf_list)); + break; + } + } + + if (i != buf->buf_number) { + i--; + for (; i >= 0; i--) { + pci_unmap_single(dev, buf->buf_list[i].pa, + buf->buf_size, PCI_DMA_BIDIRECTIONAL); + } + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +/** + * cqm_buf_alloc_direct - Buffer pci direct remapping + * @cqm_handle: handle of cqm + * @buf: the buffer which needs remap + */ +s32 cqm_buf_alloc_direct(struct cqm_handle_s *cqm_handle, + struct cqm_buf_s *buf, bool direct) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct page **pages = NULL; + u32 order = 0; + u32 i = 0; + u32 j = 0; + + order = get_order(buf->buf_size); + + if (direct == true) { + pages = (struct page **) + vmalloc(sizeof(struct page *) * buf->page_number); + if (!pages) { + cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(pages)); + return CQM_FAIL; + } + + for (i = 0; i < buf->buf_number; i++) { + for (j = 0; j < ((u32)1 << order); j++) { + pages[(i << order) + j] = (struct page *) + (void *)virt_to_page( + (u8 *)(buf->buf_list[i].va) + + (PAGE_SIZE * j)); + } + } + + /*lint -save -e648 + *Shield alarm for kernel functions' vmapping + */ + buf->direct.va = vmap(pages, buf->page_number, + VM_MAP, PAGE_KERNEL); + /*lint -restore*/ + vfree(pages); + if (!buf->direct.va) { + cqm_err(handle->dev_hdl, CQM_MAP_FAIL(buf->direct.va)); + return CQM_FAIL; + } + } else { + buf->direct.va = NULL; + } + + return CQM_SUCCESS; +} + +/** + * cqm_buf_alloc - Allocate for buffer and dma for the struct cqm_buf_s + * @cqm_handle: handle of cqm + * @buf: the buffer which needs allocating memory for and dma + */ +s32 cqm_buf_alloc(struct cqm_handle_s *cqm_handle, + struct cqm_buf_s *buf, bool direct) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct pci_dev *dev = cqm_handle->dev; + u32 order = 0; + s32 i = 0; + + order = get_order(buf->buf_size); + + /* Allocate for the descriptor space of buffer lists */ + buf->buf_list = (struct cqm_buf_list_s *) + vmalloc(buf->buf_number * + sizeof(struct cqm_buf_list_s)); + + CQM_PTR_CHECK_RET(buf->buf_list, return CQM_FAIL, + CQM_ALLOC_FAIL(buf_list)); + memset(buf->buf_list, 0, + buf->buf_number * sizeof(struct cqm_buf_list_s)); + + /* Allocate for every buffer's page */ + if (cqm_buf_alloc_page(cqm_handle, buf) == CQM_FAIL) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_buf_alloc_page)); + goto err1; + } + + /* Buffer pci remapping */ + if (cqm_buf_alloc_map(cqm_handle, buf) == CQM_FAIL) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_buf_alloc_map)); + goto err2; + } + + /* Buffer pci mapping */ + if (cqm_buf_alloc_direct(cqm_handle, buf, direct) == CQM_FAIL) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_buf_alloc_direct)); + goto err3; + } + + return CQM_SUCCESS; + +err3: + for (i = 0; i < (s32)buf->buf_number; i++) { + pci_unmap_single(dev, buf->buf_list[i].pa, buf->buf_size, + PCI_DMA_BIDIRECTIONAL); + } +err2: + for (i = 0; i < (s32)buf->buf_number; i++) { + free_pages((ulong)(buf->buf_list[i].va), order); + buf->buf_list[i].va = NULL; + } +err1: + vfree(buf->buf_list); + buf->buf_list = NULL; + return CQM_FAIL; +} + +/** + * cqm_cla_cache_invalid - Set the chip logical address cache invalid + * @cqm_handle: handle of cqm + * @gpa: global physical address + * @cache_size: chip cache size + */ +s32 cqm_cla_cache_invalid(struct cqm_handle_s *cqm_handle, dma_addr_t gpa, + u32 cache_size) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_cmd_buf_s *buf_in = NULL; + struct cqm_cla_cache_invalid_cmd_s *cmd = NULL; + s32 ret = CQM_FAIL; + + buf_in = cqm_cmd_alloc((void *)(cqm_handle->ex_handle)); + CQM_PTR_CHECK_RET(buf_in, return CQM_FAIL, + CQM_ALLOC_FAIL(buf_in)); + buf_in->size = sizeof(struct cqm_cla_cache_invalid_cmd_s); + + /* Fill command format, and turn into big endian */ + cmd = (struct cqm_cla_cache_invalid_cmd_s *)(buf_in->buf); + cmd->cache_size = cache_size; + cmd->gpa_h = CQM_ADDR_HI(gpa); + cmd->gpa_l = CQM_ADDR_LW(gpa); + + cqm_swab32((u8 *)cmd, + (sizeof(struct cqm_cla_cache_invalid_cmd_s) >> 2)); + + /* cmdq send a cmd */ + ret = cqm_send_cmd_box((void *)(cqm_handle->ex_handle), + CQM_CMD_ACK_TYPE_CMDQ, + CQM_MOD_CQM, CQM_CMD_T_CLA_CACHE_INVALID, + buf_in, NULL, CQM_CMD_TIMEOUT); + if (ret != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_send_cmd_box)); + cqm_err(handle->dev_hdl, "Cla cache invalid: cqm_send_cmd_box_ret=%d\n", + ret); + cqm_err(handle->dev_hdl, "Cla cache invalid: cla_cache_invalid_cmd: 0x%x 0x%x 0x%x\n", + cmd->gpa_h, cmd->gpa_l, cmd->cache_size); + } + + cqm_cmd_free((void *)(cqm_handle->ex_handle), buf_in); + return ret; +} + +/** + * cqm_buf_free - Free buffer space and dma for the struct cqm_buf_s + * @buf: the buffer which needs freeing memory for + * @dev: specific pci device + */ +void cqm_buf_free(struct cqm_buf_s *buf, struct pci_dev *dev) +{ + u32 order = 0; + s32 i = 0; + + order = get_order(buf->buf_size); + + if (buf->direct.va) { + vunmap(buf->direct.va); + buf->direct.va = NULL; + } + + if (buf->buf_list) { + for (i = 0; i < (s32)(buf->buf_number); i++) { + if (buf->buf_list[i].va) { + pci_unmap_single(dev, buf->buf_list[i].pa, + buf->buf_size, + PCI_DMA_BIDIRECTIONAL); + free_pages((ulong)(buf->buf_list[i].va), order); + buf->buf_list[i].va = NULL; + } + } + + vfree(buf->buf_list); + buf->buf_list = NULL; + } +} + +/** + * __free_cache_inv - Free cache and make buffer list invalid + * @cqm_handle: handle of cqm + * @buf: the buffer which needs freeing memory for + * @inv_flag: invalid or not + * @order:the basis for aligning + * @buf_idx:buffer index + */ +static void __free_cache_inv(struct cqm_handle_s *cqm_handle, + struct cqm_buf_s *buf, s32 *inv_flag, + u32 order, s32 buf_idx) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + + if (handle->chip_present_flag) { + *inv_flag = cqm_cla_cache_invalid(cqm_handle, + buf->buf_list[buf_idx].pa, + PAGE_SIZE << order); + if (*inv_flag != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, "Buffer free: fail to invalid buf_list pa cache, inv_flag=%d\n", + *inv_flag); + } + } + + pci_unmap_single(cqm_handle->dev, buf->buf_list[buf_idx].pa, + buf->buf_size, PCI_DMA_BIDIRECTIONAL); + + free_pages((unsigned long)(buf->buf_list[buf_idx].va), order); + + buf->buf_list[buf_idx].va = NULL; +} + +/** + * cqm_buf_free_cache_inv - Free cache and make buffer list invalid + * @cqm_handle: handle of cqm + * @buf: the buffer which needs freeing memory for + * @inv_flag: invalid or not + */ +void cqm_buf_free_cache_inv(struct cqm_handle_s *cqm_handle, + struct cqm_buf_s *buf, s32 *inv_flag) +{ + u32 order = 0; + s32 i = 0; + + order = get_order(buf->buf_size); + + if (buf->direct.va) { + vunmap(buf->direct.va); + buf->direct.va = NULL; + } + + if (buf->buf_list) { + for (i = 0; i < (s32)(buf->buf_number); i++) { + if (buf->buf_list[i].va) { + __free_cache_inv(cqm_handle, buf, + inv_flag, order, i); + } + } + + vfree(buf->buf_list); + buf->buf_list = NULL; + } +} + +#define bat_cla_section + +/** + * cqm_bat_update - Send cmds to the tile to update the BAT table through cmdq + * @cqm_handle: cqm handle + * Return: 0 - success, negative - failure + */ +s32 cqm_bat_update(struct cqm_handle_s *cqm_handle) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_cmd_buf_s *buf_in = NULL; + s32 ret = CQM_FAIL; + struct cqm_bat_update_cmd_s *bat_update_cmd = NULL; + + /* Allocate a cmd and fill */ + buf_in = cqm_cmd_alloc((void *)(cqm_handle->ex_handle)); + CQM_PTR_CHECK_RET(buf_in, return CQM_FAIL, CQM_ALLOC_FAIL(buf_in)); + buf_in->size = sizeof(struct cqm_bat_update_cmd_s); + + bat_update_cmd = (struct cqm_bat_update_cmd_s *)(buf_in->buf); + bat_update_cmd->byte_len = cqm_handle->bat_table.bat_size; + bat_update_cmd->offset = 0; + memcpy(bat_update_cmd->data, cqm_handle->bat_table.bat, + bat_update_cmd->byte_len); + + /*Big endian conversion*/ + cqm_swab32((u8 *)bat_update_cmd, + sizeof(struct cqm_bat_update_cmd_s) >> 2); + + /* send a cmd */ + ret = cqm_send_cmd_box((void *)(cqm_handle->ex_handle), + CQM_CMD_ACK_TYPE_CMDQ, CQM_MOD_CQM, + CQM_CMD_T_BAT_UPDATE, buf_in, + NULL, CQM_CMD_TIMEOUT); + if (ret != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_send_cmd_box)); + cqm_err(handle->dev_hdl, "Bat update: send_cmd_box ret=%d\n", + ret); + cqm_cmd_free((void *)(cqm_handle->ex_handle), buf_in); + return CQM_FAIL; + } + + /* Free a cmd */ + cqm_cmd_free((void *)(cqm_handle->ex_handle), buf_in); + + return CQM_SUCCESS; +} + +s32 cqm_bat_init_ft(struct cqm_handle_s *cqm_handle, + struct cqm_bat_table_s *bat_table, + enum func_type function_type) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + + if (function_type == CQM_PF || function_type == CQM_PPF) { + bat_table->bat_entry_type[0] = CQM_BAT_ENTRY_T_CFG; + bat_table->bat_entry_type[1] = CQM_BAT_ENTRY_T_HASH; + bat_table->bat_entry_type[2] = CQM_BAT_ENTRY_T_QPC; + bat_table->bat_entry_type[3] = CQM_BAT_ENTRY_T_SCQC; + bat_table->bat_entry_type[4] = CQM_BAT_ENTRY_T_LUN; + bat_table->bat_entry_type[5] = CQM_BAT_ENTRY_T_TASKMAP; + bat_table->bat_entry_type[6] = CQM_BAT_ENTRY_T_L3I; + bat_table->bat_entry_type[7] = CQM_BAT_ENTRY_T_CHILDC; + bat_table->bat_entry_type[8] = CQM_BAT_ENTRY_T_TIMER; + bat_table->bat_entry_type[9] = CQM_BAT_ENTRY_T_XID2CID; + bat_table->bat_entry_type[10] = CQM_BAT_ENTRY_T_REORDER; + bat_table->bat_size = CQM_BAT_SIZE_FT_PF; + } else { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(function_type)); + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +/** + * cqm_bat_init - Initialize the BAT table, only select the items to be + * initialized and arrange the entry order, the content of the BAT table entry + * needs to be filled after the CLA allocation + * @cqm_handle: cqm handle + * Return: 0 - success, negative - failure + */ +s32 cqm_bat_init(struct cqm_handle_s *cqm_handle) +{ + struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table; + u32 i = 0; + + memset(bat_table, 0, sizeof(struct cqm_bat_table_s)); + + /* Initialize the type of each bat entry */ + for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) + bat_table->bat_entry_type[i] = CQM_BAT_ENTRY_T_INVALID; + + if (cqm_bat_init_ft(cqm_handle, bat_table, + cqm_handle->func_attribute.func_type) == CQM_FAIL) { + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +/** + * cqm_bat_uninit - Deinitialize BAT table + * @cqm_handle: cqm handle + */ +void cqm_bat_uninit(struct cqm_handle_s *cqm_handle) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table; + u32 i = 0; + + for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) + bat_table->bat_entry_type[i] = CQM_BAT_ENTRY_T_INVALID; + + memset(bat_table->bat, 0, CQM_BAT_ENTRY_MAX * CQM_BAT_ENTRY_SIZE); + + /* Notify the chip to refresh the BAT table */ + if (cqm_bat_update(cqm_handle) != CQM_SUCCESS) + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_bat_update)); +} + +static void cqm_bat_config_entry_size( + struct cqm_cla_table_s *cla_table, + struct cqm_bat_entry_standerd_s *bat_entry_standerd) +{ + /* Except for QPC of 256/512/1024, the others are all cacheline 256B, + * and the conversion will be done inside the chip + */ + if (cla_table->obj_size > CQM_CHIP_CACHELINE) { + if (cla_table->obj_size == 512) { + bat_entry_standerd->entry_size = CQM_BAT_ENTRY_SIZE_512; + } else { + bat_entry_standerd->entry_size = + CQM_BAT_ENTRY_SIZE_1024; + } + bat_entry_standerd->max_number = + cla_table->max_buffer_size / cla_table->obj_size; + } else { + bat_entry_standerd->entry_size = CQM_BAT_ENTRY_SIZE_256; + bat_entry_standerd->max_number = + cla_table->max_buffer_size / CQM_CHIP_CACHELINE; + } +} + +void cqm_bat_fill_cla_std_entry(struct cqm_handle_s *cqm_handle, + struct cqm_cla_table_s *cla_table, + u8 *entry_base_addr, u32 entry_type, + u8 gpa_check_enable) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_bat_entry_standerd_s *bat_entry_standerd = NULL; + dma_addr_t pa = 0; + + if (cla_table->obj_num == 0) { + cqm_info(handle->dev_hdl, "Cla alloc: cla_type %u, obj_num=0, don't init bat entry\n", + cla_table->type); + return; + } + + bat_entry_standerd = (struct cqm_bat_entry_standerd_s *)entry_base_addr; + cqm_bat_config_entry_size(cla_table, bat_entry_standerd); + bat_entry_standerd->max_number = bat_entry_standerd->max_number - 1; + + bat_entry_standerd->bypass = CQM_BAT_NO_BYPASS_CACHE; + bat_entry_standerd->z = cla_table->cacheline_z; + bat_entry_standerd->y = cla_table->cacheline_y; + bat_entry_standerd->x = cla_table->cacheline_x; + bat_entry_standerd->cla_level = cla_table->cla_lvl; + + if (cla_table->cla_lvl == CQM_CLA_LVL_0) + pa = cla_table->cla_z_buf.buf_list[0].pa; + else if (cla_table->cla_lvl == CQM_CLA_LVL_1) + pa = cla_table->cla_y_buf.buf_list[0].pa; + else + pa = cla_table->cla_x_buf.buf_list[0].pa; + + bat_entry_standerd->cla_gpa_h = CQM_ADDR_HI(pa); + if (entry_type == CQM_BAT_ENTRY_T_REORDER) { + /* Reorder does not support GPA validity check */ + bat_entry_standerd->cla_gpa_l = CQM_ADDR_LW(pa); + } else { + /* GPA is valid when gpa[0]=1 */ + bat_entry_standerd->cla_gpa_l = + CQM_ADDR_LW(pa) | gpa_check_enable; + } +} + +static void cqm_bat_fill_cla_cfg(struct cqm_handle_s *cqm_handle, + u8 *entry_base_addr) +{ + struct cqm_bat_entry_cfg_s *bat_entry_cfg = + (struct cqm_bat_entry_cfg_s *)entry_base_addr; + + bat_entry_cfg->cur_conn_cache = 0; + bat_entry_cfg->max_conn_cache = + cqm_handle->func_capability.flow_table_based_conn_cache_number; + bat_entry_cfg->cur_conn_num_h_4 = 0; + bat_entry_cfg->cur_conn_num_l_16 = 0; + bat_entry_cfg->max_conn_num = + cqm_handle->func_capability.flow_table_based_conn_number; + /* Align by 64 buckets, shift right 6 bits */ + if ((cqm_handle->func_capability.hash_number >> 6) != 0) { + /* After shift right 6 bits, the value should - 1 for the hash + * value + */ + bat_entry_cfg->bucket_num = + ((cqm_handle->func_capability.hash_number >> 6) - 1); + } + if (cqm_handle->func_capability.bloomfilter_length != 0) { + bat_entry_cfg->bloom_filter_len = + cqm_handle->func_capability.bloomfilter_length - 1; + bat_entry_cfg->bloom_filter_addr = + cqm_handle->func_capability.bloomfilter_addr; + } +} + +static void cqm_bat_fill_cla_taskmap(struct cqm_handle_s *cqm_handle, + struct cqm_cla_table_s *cla_table, + u8 *entry_base_addr) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_bat_entry_taskmap_s *bat_entry_taskmap = + (struct cqm_bat_entry_taskmap_s *)entry_base_addr; + if (cqm_handle->func_capability.taskmap_number != 0) { + bat_entry_taskmap->gpa0_h = + (u32)(cla_table->cla_z_buf.buf_list[0].pa >> 32); + bat_entry_taskmap->gpa0_l = + (u32)(cla_table->cla_z_buf.buf_list[0].pa & 0xffffffff); + + bat_entry_taskmap->gpa1_h = + (u32)(cla_table->cla_z_buf.buf_list[1].pa >> 32); + bat_entry_taskmap->gpa1_l = + (u32)(cla_table->cla_z_buf.buf_list[1].pa & 0xffffffff); + + bat_entry_taskmap->gpa2_h = + (u32)(cla_table->cla_z_buf.buf_list[2].pa >> 32); + bat_entry_taskmap->gpa2_l = + (u32)(cla_table->cla_z_buf.buf_list[2].pa & 0xffffffff); + + bat_entry_taskmap->gpa3_h = + (u32)(cla_table->cla_z_buf.buf_list[3].pa >> 32); + bat_entry_taskmap->gpa3_l = + (u32)(cla_table->cla_z_buf.buf_list[3].pa & 0xffffffff); + + cqm_info(handle->dev_hdl, "Cla alloc: taskmap bat entry: 0x%x 0x%x, 0x%x 0x%x, 0x%x 0x%x, 0x%x 0x%x\n", + bat_entry_taskmap->gpa0_h, bat_entry_taskmap->gpa0_l, + bat_entry_taskmap->gpa1_h, bat_entry_taskmap->gpa1_l, + bat_entry_taskmap->gpa2_h, bat_entry_taskmap->gpa2_l, + bat_entry_taskmap->gpa3_h, bat_entry_taskmap->gpa3_l); + } +} + +/** + * cqm_bat_fill_cla - Fill the base address of the cla table into the bat table + * @cqm_handle: cqm handle + */ +void cqm_bat_fill_cla(struct cqm_handle_s *cqm_handle) +{ + struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table; + struct cqm_cla_table_s *cla_table = NULL; + u32 entry_type = CQM_BAT_ENTRY_T_INVALID; + u8 *entry_base_addr = NULL; + u32 i = 0; + u8 gpa_check_enable = cqm_handle->func_capability.gpa_check_enable; + + /* Fill each item according to the arranged BAT table */ + entry_base_addr = bat_table->bat; + for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { + entry_type = bat_table->bat_entry_type[i]; + if (entry_type == CQM_BAT_ENTRY_T_CFG) { + cqm_bat_fill_cla_cfg(cqm_handle, entry_base_addr); + entry_base_addr += sizeof(struct cqm_bat_entry_cfg_s); + } else if (entry_type == CQM_BAT_ENTRY_T_TASKMAP) { + cqm_bat_fill_cla_taskmap(cqm_handle, + &bat_table->entry[i], + entry_base_addr); + entry_base_addr += + sizeof(struct cqm_bat_entry_taskmap_s); + } else if ((entry_type == CQM_BAT_ENTRY_T_INVALID) || + ((entry_type == CQM_BAT_ENTRY_T_TIMER) && + (cqm_handle->func_attribute.func_type != CQM_PPF))) { + /* When entry_type is invalid, or the timer entry under + * PF does not need to apply for memory and bat filling + */ + entry_base_addr += CQM_BAT_ENTRY_SIZE; + } else { + cla_table = &bat_table->entry[i]; + cqm_bat_fill_cla_std_entry(cqm_handle, cla_table, + entry_base_addr, entry_type, + gpa_check_enable); + entry_base_addr += + sizeof(struct cqm_bat_entry_standerd_s); + } + /* Checks if entry_base_addr is out of bounds */ + if (entry_base_addr >= + (bat_table->bat + CQM_BAT_ENTRY_MAX * CQM_BAT_ENTRY_SIZE)) + break; + } +} + +static void cqm_cla_xyz_cacheline_lvl1(struct cqm_cla_table_s *cla_table, + u32 trunk_size) +{ + s32 shift = 0; + + if (cla_table->obj_size >= CQM_CHIP_CACHELINE) { + cla_table->cacheline_z = cla_table->z; + cla_table->cacheline_y = cla_table->y; + cla_table->cacheline_x = cla_table->x; + } else { + shift = cqm_shift(trunk_size / CQM_CHIP_CACHELINE); + cla_table->cacheline_z = shift ? (shift - 1) : (shift); + cla_table->cacheline_y = CQM_MAX_INDEX_BIT; + cla_table->cacheline_x = 0; + } +} + +s32 cqm_cla_xyz_lvl1(struct cqm_handle_s *cqm_handle, + struct cqm_cla_table_s *cla_table, + u32 trunk_size) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_buf_s *cla_y_buf = NULL; + struct cqm_buf_s *cla_z_buf = NULL; + dma_addr_t *base = NULL; + s32 shift = 0; + u32 i = 0; + s32 ret = CQM_FAIL; + u8 gpa_check_enable = cqm_handle->func_capability.gpa_check_enable; + + if (cla_table->type == CQM_BAT_ENTRY_T_REORDER) + gpa_check_enable = 0; + + cla_table->cla_lvl = CQM_CLA_LVL_1; + + shift = cqm_shift(trunk_size / cla_table->obj_size); + cla_table->z = shift ? (shift - 1) : (shift); + cla_table->y = CQM_MAX_INDEX_BIT; + cla_table->x = 0; + cqm_cla_xyz_cacheline_lvl1(cla_table, trunk_size); + + /* Allocate y buf space */ + cla_y_buf = &cla_table->cla_y_buf; + cla_y_buf->buf_size = trunk_size; + cla_y_buf->buf_number = 1; + cla_y_buf->page_number = cla_y_buf->buf_number << + cla_table->trunk_order; + ret = cqm_buf_alloc(cqm_handle, cla_y_buf, false); + + CQM_CHECK_EQUAL_RET(handle->dev_hdl, ret, CQM_SUCCESS, return CQM_FAIL, + CQM_ALLOC_FAIL(lvl_1_y_buf)); + + /* Allocate z buf space */ + cla_z_buf = &cla_table->cla_z_buf; + cla_z_buf->buf_size = trunk_size; + cla_z_buf->buf_number = ALIGN(cla_table->max_buffer_size, trunk_size) / + trunk_size; + cla_z_buf->page_number = cla_z_buf->buf_number << + cla_table->trunk_order; + /* Requires static allocation of all buffer space */ + if (cla_table->alloc_static == true) { + if (cqm_buf_alloc(cqm_handle, cla_z_buf, false) == CQM_FAIL) { + cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(lvl_1_z_buf)); + cqm_buf_free(cla_y_buf, cqm_handle->dev); + return CQM_FAIL; + } + + /* Fill gpa of z buf list into y buf */ + base = (dma_addr_t *)(cla_y_buf->buf_list->va); + for (i = 0; i < cla_z_buf->buf_number; i++) { + /*gpa[0]=1 means this GPA is valid*/ + *base = (cla_z_buf->buf_list[i].pa | gpa_check_enable); + base++; + } + + /* big-endian conversion */ + cqm_swab64((u8 *)(cla_y_buf->buf_list->va), + cla_z_buf->buf_number); + } else { + /* Only initialize and allocate buf list space, buffer spaces + * are dynamically allocated in service + */ + cla_z_buf->buf_list = (struct cqm_buf_list_s *) + vmalloc(cla_z_buf->buf_number * + sizeof(struct cqm_buf_list_s)); + + if (!cla_z_buf->buf_list) { + cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(lvl_1_z_buf)); + cqm_buf_free(cla_y_buf, cqm_handle->dev); + return CQM_FAIL; + } + memset(cla_z_buf->buf_list, 0, + cla_z_buf->buf_number * sizeof(struct cqm_buf_list_s)); + } + + return CQM_SUCCESS; +} + +static s32 cqm_cla_yz_lvl2_static(struct cqm_handle_s *cqm_handle, + struct cqm_buf_s *cla_y_buf, + struct cqm_buf_s *cla_z_buf, + u8 gpa_check_enable) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + dma_addr_t *base = NULL; + u32 i = 0; + + if (cqm_buf_alloc(cqm_handle, cla_z_buf, false) == CQM_FAIL) { + cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(lvl_2_z_buf)); + return CQM_FAIL; + } + + /* The virtual address of y buf is remapped for software access */ + if (cqm_buf_alloc(cqm_handle, cla_y_buf, true) == CQM_FAIL) { + cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(lvl_2_y_buf)); + cqm_buf_free(cla_z_buf, cqm_handle->dev); + return CQM_FAIL; + } + + /* Fill gpa of z buf list into y buf */ + base = (dma_addr_t *)(cla_y_buf->direct.va); + for (i = 0; i < cla_z_buf->buf_number; i++) { + /*gpa[0]=1 means this GPA is valid*/ + *base = (cla_z_buf->buf_list[i].pa | gpa_check_enable); + base++; + } + + /* big-endian conversion */ + cqm_swab64((u8 *)(cla_y_buf->direct.va), cla_z_buf->buf_number); + + return CQM_SUCCESS; +} + +static void cqm_cla_yz_lvl2_init_cacheline(struct cqm_cla_table_s *cla_table, + u32 trunk_size) +{ + s32 shift = 0; + + if (cla_table->obj_size >= CQM_CHIP_CACHELINE) { + cla_table->cacheline_z = cla_table->z; + cla_table->cacheline_y = cla_table->y; + cla_table->cacheline_x = cla_table->x; + } else { + shift = cqm_shift(trunk_size / CQM_CHIP_CACHELINE); + cla_table->cacheline_z = shift ? (shift - 1) : (shift); + shift = cqm_shift(trunk_size / sizeof(dma_addr_t)); + cla_table->cacheline_y = cla_table->cacheline_z + shift; + cla_table->cacheline_x = CQM_MAX_INDEX_BIT; + } +} + +s32 cqm_cla_xyz_lvl2(struct cqm_handle_s *cqm_handle, + struct cqm_cla_table_s *cla_table, + u32 trunk_size) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_buf_s *cla_x_buf = NULL; + struct cqm_buf_s *cla_y_buf = NULL; + struct cqm_buf_s *cla_z_buf = NULL; + dma_addr_t *base = NULL; + s32 shift = 0; + u32 i = 0; + s32 ret = CQM_FAIL; + u8 gpa_check_enable = cqm_handle->func_capability.gpa_check_enable; + + if (cla_table->type == CQM_BAT_ENTRY_T_REORDER) + gpa_check_enable = 0; + + cla_table->cla_lvl = CQM_CLA_LVL_2; + + shift = cqm_shift(trunk_size / cla_table->obj_size); + cla_table->z = shift ? (shift - 1) : (shift); + shift = cqm_shift(trunk_size / sizeof(dma_addr_t)); + cla_table->y = cla_table->z + shift; + cla_table->x = CQM_MAX_INDEX_BIT; + + cqm_cla_yz_lvl2_init_cacheline(cla_table, trunk_size); + + /* Allocate x buf space */ + cla_x_buf = &cla_table->cla_x_buf; + cla_x_buf->buf_size = trunk_size; + cla_x_buf->buf_number = 1; + cla_x_buf->page_number = cla_x_buf->buf_number << + cla_table->trunk_order; + + ret = cqm_buf_alloc(cqm_handle, cla_x_buf, false); + CQM_CHECK_EQUAL_RET(handle->dev_hdl, ret, CQM_SUCCESS, return CQM_FAIL, + CQM_ALLOC_FAIL(lvl_2_x_buf)); + + /* Allocate y buf and z buf space */ + cla_z_buf = &cla_table->cla_z_buf; + cla_z_buf->buf_size = trunk_size; + cla_z_buf->buf_number = ALIGN(cla_table->max_buffer_size, trunk_size) / + trunk_size; + cla_z_buf->page_number = cla_z_buf->buf_number << + cla_table->trunk_order; + + cla_y_buf = &cla_table->cla_y_buf; + cla_y_buf->buf_size = trunk_size; + cla_y_buf->buf_number = + (ALIGN(cla_z_buf->buf_number * sizeof(dma_addr_t), + trunk_size)) / trunk_size; + + cla_y_buf->page_number = cla_y_buf->buf_number << + cla_table->trunk_order; + + /* Requires static allocation of all buffer space */ + if (cla_table->alloc_static == true) { + if (cqm_cla_yz_lvl2_static(cqm_handle, + cla_y_buf, + cla_z_buf, + gpa_check_enable) == CQM_FAIL) { + cqm_buf_free(cla_x_buf, cqm_handle->dev); + return CQM_FAIL; + } + /* Fill gpa of y buf list into x buf */ + base = (dma_addr_t *)(cla_x_buf->buf_list->va); + for (i = 0; i < cla_y_buf->buf_number; i++) { + /* gpa[0]=1 means this GPA is valid */ + *base = (cla_y_buf->buf_list[i].pa | gpa_check_enable); + base++; + } + + /* big-endian conversion */ + cqm_swab64((u8 *)(cla_x_buf->buf_list->va), + cla_y_buf->buf_number); + } else { + /* Only initialize and allocate buf list space, buffer spaces + * are allocated in service + */ + cla_z_buf->buf_list = (struct cqm_buf_list_s *) + vmalloc(cla_z_buf->buf_number * + sizeof(struct cqm_buf_list_s)); + if (!cla_z_buf->buf_list) { + cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(lvl_2_z_buf)); + cqm_buf_free(cla_x_buf, cqm_handle->dev); + return CQM_FAIL; + } + memset(cla_z_buf->buf_list, 0, + cla_z_buf->buf_number * sizeof(struct cqm_buf_list_s)); + + cla_y_buf->buf_list = (struct cqm_buf_list_s *) + vmalloc(cla_y_buf->buf_number * + sizeof(struct cqm_buf_list_s)); + + if (!cla_y_buf->buf_list) { + cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(lvl_2_y_buf)); + cqm_buf_free(cla_z_buf, cqm_handle->dev); + cqm_buf_free(cla_x_buf, cqm_handle->dev); + return CQM_FAIL; + } + memset(cla_y_buf->buf_list, 0, + cla_y_buf->buf_number * sizeof(struct cqm_buf_list_s)); + } + + return CQM_SUCCESS; +} + +static s32 cqm_cla_xyz_check(struct cqm_handle_s *cqm_handle, + struct cqm_cla_table_s *cla_table) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + + if (cla_table->obj_num == 0) { + /* If the capability is set to 0, the CLA does not need to be + * initialized and exits directly + */ + cqm_info(handle->dev_hdl, "Cla alloc: cla_type %u, obj_num=0, don't alloc buffer\n", + cla_table->type); + return CQM_SUCCESS; + } + + /* Check whether obj_size is aligned with 2^n, and error is reported in + * case of 0 and 1 + */ + if (cqm_check_align(cla_table->obj_size) == false) { + cqm_err(handle->dev_hdl, "Cla alloc: cla_type %u, obj_size 0x%x is not align on 2^n\n", + cla_table->type, cla_table->obj_size); + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +/** + * cqm_cla_xyz - Calculate how many levels of cla tables and allocate space + * for each level of cla tables + * @cqm_handle: cqm handle + * @cla_table: cla table + * Return: 0 - success, negative - failure + */ +s32 cqm_cla_xyz(struct cqm_handle_s *cqm_handle, + struct cqm_cla_table_s *cla_table) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_buf_s *cla_z_buf = NULL; + u32 trunk_size = 0; + s32 ret = CQM_FAIL; + + if (cqm_cla_xyz_check(cqm_handle, cla_table) == CQM_FAIL) + return CQM_FAIL; + + trunk_size = PAGE_SIZE << cla_table->trunk_order; + + if (trunk_size < cla_table->obj_size) { + cqm_err(handle->dev_hdl, "Cla alloc: cla type %u, obj_size 0x%x is out of trunk size\n", + cla_table->type, cla_table->obj_size); + return CQM_FAIL; + } + + /* Level 0 CLA: The buffer occupies little space, and can be assigned to + * cla_z_buf during initialization + */ + if (cla_table->max_buffer_size <= trunk_size) { + cla_table->cla_lvl = CQM_CLA_LVL_0; + + cla_table->z = CQM_MAX_INDEX_BIT; + cla_table->y = 0; + cla_table->x = 0; + + cla_table->cacheline_z = cla_table->z; + cla_table->cacheline_y = cla_table->y; + cla_table->cacheline_x = cla_table->x; + + /* Allocate z buf space */ + cla_z_buf = &cla_table->cla_z_buf; + cla_z_buf->buf_size = trunk_size; + cla_z_buf->buf_number = 1; + cla_z_buf->page_number = + cla_z_buf->buf_number << cla_table->trunk_order; + ret = cqm_buf_alloc(cqm_handle, cla_z_buf, false); + CQM_CHECK_EQUAL_RET( + handle->dev_hdl, ret, CQM_SUCCESS, + return CQM_FAIL, CQM_ALLOC_FAIL(lvl_0_z_buf)); + + } else if (cla_table->max_buffer_size <= + (trunk_size * (trunk_size / sizeof(dma_addr_t)))) { + /* Level 1 CLA: Cla_y_buf is allocated during initialization, + * and cla_z_buf can be allocated dynamically + */ + if (cqm_cla_xyz_lvl1(cqm_handle, + cla_table, trunk_size) == CQM_FAIL) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_cla_xyz_lvl1)); + return CQM_FAIL; + } + } else if (cla_table->max_buffer_size <= + (trunk_size * (trunk_size / sizeof(dma_addr_t)) * + (trunk_size / sizeof(dma_addr_t)))) { + /* Level 2 CLA: Cla_x_buf is allocated during initialization, + * and cla_y_buf and cla_z_buf can be dynamically allocated + */ + if (cqm_cla_xyz_lvl2(cqm_handle, cla_table, trunk_size) == + CQM_FAIL) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_cla_xyz_lvl2)); + return CQM_FAIL; + } + } else { + cqm_err(handle->dev_hdl, "Cla alloc: cla max_buffer_size 0x%x exceeds support range\n", + cla_table->max_buffer_size); + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +static void cqm_bat_entry_hash_init(void *cqm_handle, + struct cqm_cla_table_s *cla_table, + void *cap) +{ + struct cqm_func_capability_s *capability = + (struct cqm_func_capability_s *)cap; + + cla_table->trunk_order = capability->pagesize_reorder; + cla_table->max_buffer_size = capability->hash_number * + capability->hash_basic_size; + cla_table->obj_size = capability->hash_basic_size; + cla_table->obj_num = capability->hash_number; + cla_table->alloc_static = true; +} + +static void cqm_bat_entry_qpc_init(void *cqm_handle, + struct cqm_cla_table_s *cla_table, + void *cap) +{ + struct cqm_handle_s *handle = (struct cqm_handle_s *)cqm_handle; + struct hifc_hwdev *hwdev_handle = handle->ex_handle; + struct cqm_func_capability_s *capability = + (struct cqm_func_capability_s *)cap; + + cla_table->trunk_order = capability->pagesize_reorder; + cla_table->max_buffer_size = capability->qpc_number * + capability->qpc_basic_size; + cla_table->obj_size = capability->qpc_basic_size; + cla_table->obj_num = capability->qpc_number; + cla_table->alloc_static = capability->qpc_alloc_static; + cqm_info(hwdev_handle->dev_hdl, "Cla alloc: qpc alloc_static=%d\n", + cla_table->alloc_static); +} + +static void cqm_bat_entry_mpt_init(void *cqm_handle, + struct cqm_cla_table_s *cla_table, + void *cap) +{ + struct cqm_func_capability_s *capability = + (struct cqm_func_capability_s *)cap; + + cla_table->trunk_order = capability->pagesize_reorder; + cla_table->max_buffer_size = capability->mpt_number * + capability->mpt_basic_size; + cla_table->obj_size = capability->mpt_basic_size; + cla_table->obj_num = capability->mpt_number; + cla_table->alloc_static = true; +} + +static void cqm_bat_entry_scqc_init(void *cqm_handle, + struct cqm_cla_table_s *cla_table, + void *cap) +{ + struct cqm_handle_s *handle = (struct cqm_handle_s *)cqm_handle; + struct hifc_hwdev *hwdev_handle = handle->ex_handle; + struct cqm_func_capability_s *capability = + (struct cqm_func_capability_s *)cap; + + cla_table->trunk_order = capability->pagesize_reorder; + cla_table->max_buffer_size = capability->scqc_number * + capability->scqc_basic_size; + cla_table->obj_size = capability->scqc_basic_size; + cla_table->obj_num = capability->scqc_number; + cla_table->alloc_static = capability->scqc_alloc_static; + cqm_info(hwdev_handle->dev_hdl, "Cla alloc: scqc alloc_static=%d\n", + cla_table->alloc_static); +} + +static void cqm_bat_entry_srqc_init(void *cqm_handle, + struct cqm_cla_table_s *cla_table, + void *cap) +{ + struct cqm_func_capability_s *capability = + (struct cqm_func_capability_s *)cap; + + cla_table->trunk_order = capability->pagesize_reorder; + cla_table->max_buffer_size = capability->srqc_number * + capability->srqc_basic_size; + cla_table->obj_size = capability->srqc_basic_size; + cla_table->obj_num = capability->srqc_number; + cla_table->alloc_static = false; +} + +static void cqm_bat_entry_gid_init(void *cqm_handle, + struct cqm_cla_table_s *cla_table, + void *cap) +{ + struct cqm_func_capability_s *capability = + (struct cqm_func_capability_s *)cap; + + cla_table->max_buffer_size = capability->gid_number * + capability->gid_basic_size; + cla_table->trunk_order = (u32)cqm_shift( + ALIGN( + cla_table->max_buffer_size, + PAGE_SIZE) / PAGE_SIZE); + cla_table->obj_size = capability->gid_basic_size; + cla_table->obj_num = capability->gid_number; + cla_table->alloc_static = true; +} + +static void cqm_bat_entry_lun_init(void *cqm_handle, + struct cqm_cla_table_s *cla_table, + void *cap) +{ + struct cqm_func_capability_s *capability = + (struct cqm_func_capability_s *)cap; + + cla_table->trunk_order = CLA_TABLE_PAGE_ORDER; + cla_table->max_buffer_size = capability->lun_number * + capability->lun_basic_size; + cla_table->obj_size = capability->lun_basic_size; + cla_table->obj_num = capability->lun_number; + cla_table->alloc_static = true; +} + +static void cqm_bat_entry_taskmap_init(void *cqm_handle, + struct cqm_cla_table_s *cla_table, + void *cap) +{ + struct cqm_func_capability_s *capability = + (struct cqm_func_capability_s *)cap; + + cla_table->trunk_order = CQM_4K_PAGE_ORDER; + cla_table->max_buffer_size = capability->taskmap_number * + capability->taskmap_basic_size; + cla_table->obj_size = capability->taskmap_basic_size; + cla_table->obj_num = capability->taskmap_number; + cla_table->alloc_static = true; +} + +static void cqm_bat_entry_l3i_init(void *cqm_handle, + struct cqm_cla_table_s *cla_table, + void *cap) +{ + struct cqm_func_capability_s *capability = + (struct cqm_func_capability_s *)cap; + + cla_table->trunk_order = CLA_TABLE_PAGE_ORDER; + cla_table->max_buffer_size = capability->l3i_number * + capability->l3i_basic_size; + cla_table->obj_size = capability->l3i_basic_size; + cla_table->obj_num = capability->l3i_number; + cla_table->alloc_static = true; +} + +static void cqm_bat_entry_childc_init(void *cqm_handle, + struct cqm_cla_table_s *cla_table, + void *cap) +{ + struct cqm_func_capability_s *capability = + (struct cqm_func_capability_s *)cap; + + cla_table->trunk_order = capability->pagesize_reorder; + cla_table->max_buffer_size = capability->childc_number * + capability->childc_basic_size; + cla_table->obj_size = capability->childc_basic_size; + cla_table->obj_num = capability->childc_number; + cla_table->alloc_static = true; +} + +static void cqm_bat_entry_timer_init(void *cqm_handle, + struct cqm_cla_table_s *cla_table, + void *cap) +{ + struct cqm_func_capability_s *capability = + (struct cqm_func_capability_s *)cap; + + cla_table->trunk_order = CQM_4K_PAGE_ORDER; + cla_table->max_buffer_size = capability->timer_number * + capability->timer_basic_size; + cla_table->obj_size = capability->timer_basic_size; + cla_table->obj_num = capability->timer_number; + cla_table->alloc_static = true; +} + +static void cqm_bat_entry_xid2cid_init(void *cqm_handle, + struct cqm_cla_table_s *cla_table, + void *cap) +{ + struct cqm_func_capability_s *capability = + (struct cqm_func_capability_s *)cap; + + cla_table->trunk_order = capability->pagesize_reorder; + cla_table->max_buffer_size = capability->xid2cid_number * + capability->xid2cid_basic_size; + cla_table->obj_size = capability->xid2cid_basic_size; + cla_table->obj_num = capability->xid2cid_number; + cla_table->alloc_static = true; +} + +static void cqm_bat_entry_reoder_init(void *cqm_handle, + struct cqm_cla_table_s *cla_table, + void *cap) +{ + struct cqm_func_capability_s *capability = + (struct cqm_func_capability_s *)cap; + + cla_table->trunk_order = capability->pagesize_reorder; + cla_table->max_buffer_size = capability->reorder_number * + capability->reorder_basic_size; + cla_table->obj_size = capability->reorder_basic_size; + cla_table->obj_num = capability->reorder_number; + cla_table->alloc_static = true; +} + +struct cqm_cla_entry_init_s cqm_cla_entry_init_tbl[] = { + {CQM_BAT_ENTRY_T_HASH, cqm_bat_entry_hash_init}, + {CQM_BAT_ENTRY_T_QPC, cqm_bat_entry_qpc_init}, + {CQM_BAT_ENTRY_T_MPT, cqm_bat_entry_mpt_init}, + {CQM_BAT_ENTRY_T_SCQC, cqm_bat_entry_scqc_init}, + {CQM_BAT_ENTRY_T_SRQC, cqm_bat_entry_srqc_init}, + {CQM_BAT_ENTRY_T_GID, cqm_bat_entry_gid_init}, + {CQM_BAT_ENTRY_T_LUN, cqm_bat_entry_lun_init}, + {CQM_BAT_ENTRY_T_TASKMAP, cqm_bat_entry_taskmap_init}, + {CQM_BAT_ENTRY_T_L3I, cqm_bat_entry_l3i_init}, + {CQM_BAT_ENTRY_T_CHILDC, cqm_bat_entry_childc_init}, + {CQM_BAT_ENTRY_T_TIMER, cqm_bat_entry_timer_init}, + {CQM_BAT_ENTRY_T_XID2CID, cqm_bat_entry_xid2cid_init}, + {CQM_BAT_ENTRY_T_REORDER, cqm_bat_entry_reoder_init}, +}; + +static struct cqm_cla_entry_init_s *cqm_get_cla_init_entry( + struct cqm_handle_s *cqm_handle, + u32 type) +{ + int i; + struct cqm_cla_entry_init_s *entry = NULL; + + for (i = 0; + i < (sizeof(cqm_cla_entry_init_tbl) / + sizeof(struct cqm_cla_entry_init_s)); i++) { + entry = &cqm_cla_entry_init_tbl[i]; + if (entry->type == type) + return entry; + } + + return NULL; +} + +void cqm_cla_init_entry(struct cqm_handle_s *cqm_handle, + struct cqm_cla_table_s *cla_table, + struct cqm_func_capability_s *capability) +{ + struct cqm_cla_entry_init_s *entry; + + entry = cqm_get_cla_init_entry(cqm_handle, cla_table->type); + if (entry && entry->cqm_cla_init_handler) + entry->cqm_cla_init_handler(cqm_handle, cla_table, capability); +} + +static s32 cqm_cla_fill_entry(struct cqm_handle_s *cqm_handle) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + s32 ret = CQM_FAIL; + + /* After the allocation of CLA entry, fill in the BAT table */ + cqm_bat_fill_cla(cqm_handle); + + /* Notify the chip to refresh the BAT table */ + ret = cqm_bat_update(cqm_handle); + if (ret != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_bat_update)); + return CQM_FAIL; + } + + cqm_info(handle->dev_hdl, "Timer start: func_type=%d, timer_enable=%u\n", + cqm_handle->func_attribute.func_type, + cqm_handle->func_capability.timer_enable); + + if ((cqm_handle->func_attribute.func_type == CQM_PPF) && + (cqm_handle->func_capability.timer_enable == CQM_TIMER_ENABLE)) { + /* After the timer resource is allocated, + * the timer needs to be enabled + */ + cqm_info(handle->dev_hdl, "Timer start: hifc ppf timer start\n"); + ret = hifc_ppf_tmr_start((void *)(cqm_handle->ex_handle)); + if (ret != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, "Timer start: hifc ppf timer start, ret=%d\n", + ret); + return CQM_FAIL; + } + } + return CQM_SUCCESS; +} + +s32 cqm_cla_init(struct cqm_handle_s *cqm_handle) +{ + struct cqm_func_capability_s *capability = &cqm_handle->func_capability; + struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table; + struct cqm_cla_table_s *cla_table = NULL; + s32 inv_flag = 0; + u32 i = 0; + u32 j = 0; + + for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { + cla_table = &bat_table->entry[i]; + cla_table->type = bat_table->bat_entry_type[i]; + + cqm_cla_init_entry(cqm_handle, cla_table, capability); + + /* Allocate CLA entry space of all levels */ + if ((cla_table->type >= CQM_BAT_ENTRY_T_HASH) && + (cla_table->type <= CQM_BAT_ENTRY_T_REORDER)) { + /* Only needs to allocate timer resources for PPF, + * 8 wheels * 2k scales * 32B * func_num, for PF, there + * is no need to allocate resources for the timer, nor + * to fill in the structure of the timer entry in the + * BAT table. + */ + if (!((cla_table->type == CQM_BAT_ENTRY_T_TIMER) && + (cqm_handle->func_attribute.func_type + != CQM_PPF))) { + if (cqm_cla_xyz(cqm_handle, cla_table) == + CQM_FAIL) + goto err; + } + } + mutex_init(&cla_table->lock); + } + if (cqm_cla_fill_entry(cqm_handle) == CQM_FAIL) + goto err; + + return CQM_SUCCESS; + +err: + for (j = 0; j < i; j++) { + cla_table = &bat_table->entry[j]; + if (cla_table->type != CQM_BAT_ENTRY_T_INVALID) { + cqm_buf_free_cache_inv(cqm_handle, + &cla_table->cla_x_buf, + &inv_flag); + cqm_buf_free_cache_inv(cqm_handle, + &cla_table->cla_y_buf, + &inv_flag); + cqm_buf_free_cache_inv(cqm_handle, + &cla_table->cla_z_buf, + &inv_flag); + } + } + + return CQM_FAIL; +} + +void cqm_cla_uninit(struct cqm_handle_s *cqm_handle) +{ + struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table; + struct cqm_cla_table_s *cla_table = NULL; + s32 inv_flag = 0; + u32 i = 0; + + for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { + cla_table = &bat_table->entry[i]; + if (cla_table->type != CQM_BAT_ENTRY_T_INVALID) { + cqm_buf_free_cache_inv(cqm_handle, + &cla_table->cla_x_buf, + &inv_flag); + cqm_buf_free_cache_inv(cqm_handle, + &cla_table->cla_y_buf, + &inv_flag); + cqm_buf_free_cache_inv(cqm_handle, + &cla_table->cla_z_buf, + &inv_flag); + } + } +} + +s32 cqm_cla_update(struct cqm_handle_s *cqm_handle, + struct cqm_buf_list_s *buf_node_parent, + struct cqm_buf_list_s *buf_node_child, + u32 child_index, u8 cla_update_mode) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_cmd_buf_s *buf_in = NULL; + struct cqm_cla_update_cmd_s *cmd = NULL; + dma_addr_t pa = 0; + s32 ret = CQM_FAIL; + u8 gpa_check_enable = cqm_handle->func_capability.gpa_check_enable; + + buf_in = cqm_cmd_alloc((void *)(cqm_handle->ex_handle)); + CQM_PTR_CHECK_RET(buf_in, return CQM_FAIL, CQM_ALLOC_FAIL(buf_in)); + buf_in->size = sizeof(struct cqm_cla_update_cmd_s); + + /* Fill the command format and convert to big endian */ + cmd = (struct cqm_cla_update_cmd_s *)(buf_in->buf); + + pa = buf_node_parent->pa + (child_index * sizeof(dma_addr_t)); + cmd->gpa_h = CQM_ADDR_HI(pa); + cmd->gpa_l = CQM_ADDR_LW(pa); + + pa = buf_node_child->pa; + cmd->value_h = CQM_ADDR_HI(pa); + cmd->value_l = CQM_ADDR_LW(pa); + + cqm_dbg("Cla alloc: cqm_cla_update, gpa=0x%x 0x%x, value=0x%x 0x%x, cla_update_mode=0x%x\n", + cmd->gpa_h, cmd->gpa_l, cmd->value_h, cmd->value_l, + cla_update_mode); + + /* CLA GPA check */ + if (gpa_check_enable) { + switch (cla_update_mode) { + /* gpa[0]=1 means this GPA is valid */ + case CQM_CLA_RECORD_NEW_GPA: + cmd->value_l |= 1; + break; + /* gpa[0]=0 means this GPA is valid */ + case CQM_CLA_DEL_GPA_WITHOUT_CACHE_INVALID: + case CQM_CLA_DEL_GPA_WITH_CACHE_INVALID: + cmd->value_l &= (~1); + break; + default: + cqm_err(handle->dev_hdl, + "Cla alloc: cqm_cla_update, cqm_cla_update, wrong cla_update_mode=%u\n", + cla_update_mode); + break; + } + } + + cqm_swab32((u8 *)cmd, (sizeof(struct cqm_cla_update_cmd_s) >> 2)); + + ret = cqm_send_cmd_box((void *)(cqm_handle->ex_handle), + CQM_CMD_ACK_TYPE_CMDQ, + CQM_MOD_CQM, CQM_CMD_T_CLA_UPDATE, + buf_in, NULL, CQM_CMD_TIMEOUT); + if (ret != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_send_cmd_box)); + cqm_err(handle->dev_hdl, + "Cla alloc: cqm_cla_update, cqm_send_cmd_box_ret=%d\n", + ret); + cqm_err(handle->dev_hdl, "Cla alloc: cqm_cla_update, cla_update_cmd: 0x%x 0x%x 0x%x 0x%x\n", + cmd->gpa_h, cmd->gpa_l, cmd->value_h, cmd->value_l); + cqm_cmd_free((void *)(cqm_handle->ex_handle), buf_in); + return CQM_FAIL; + } + + cqm_cmd_free((void *)(cqm_handle->ex_handle), buf_in); + return CQM_SUCCESS; +} + +/** + * cqm_cla_alloc - Allocate a CLA trunk page + * @cqm_handle: cqm handle + * @cla_table: cla handle + * @buf_node_parent: the parent node whose content is to be updated + * @buf_node_child: the child node whose content is to be allocated + * @child_index: child index + * Return: 0 - success, negative - failure + */ +s32 cqm_cla_alloc(struct cqm_handle_s *cqm_handle, + struct cqm_cla_table_s *cla_table, + struct cqm_buf_list_s *buf_node_parent, + struct cqm_buf_list_s *buf_node_child, u32 child_index) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + s32 ret = CQM_FAIL; + + /* Allocate trunk page */ + buf_node_child->va = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + cla_table->trunk_order); + CQM_PTR_CHECK_RET(buf_node_child->va, return CQM_FAIL, + CQM_ALLOC_FAIL(va)); + + /* pci mapping */ + buf_node_child->pa = + pci_map_single(cqm_handle->dev, buf_node_child->va, + PAGE_SIZE << cla_table->trunk_order, + PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(cqm_handle->dev, buf_node_child->pa)) { + cqm_err(handle->dev_hdl, CQM_MAP_FAIL(buf_node_child->pa)); + goto err1; + } + + /* Notify the chip of trunk_pa and + * let it fill in the cla table entry + */ + ret = cqm_cla_update(cqm_handle, buf_node_parent, + buf_node_child, child_index, + CQM_CLA_RECORD_NEW_GPA); + if (ret != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_update)); + goto err2; + } + + return CQM_SUCCESS; + +err2: + pci_unmap_single(cqm_handle->dev, buf_node_child->pa, + PAGE_SIZE << cla_table->trunk_order, + PCI_DMA_BIDIRECTIONAL); +err1: + free_pages((ulong)(buf_node_child->va), cla_table->trunk_order); + buf_node_child->va = NULL; + return CQM_FAIL; +} + +void cqm_cla_free(struct cqm_handle_s *cqm_handle, + struct cqm_cla_table_s *cla_table, + struct cqm_buf_list_s *buf_node_parent, + struct cqm_buf_list_s *buf_node_child, + u32 child_index, u8 cla_update_mode) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + + cqm_dbg("Cla free: cla_update_mode=%u\n", cla_update_mode); + + if (cqm_cla_update(cqm_handle, buf_node_parent, + buf_node_child, child_index, + cla_update_mode) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_update)); + return; + } + + if (cla_update_mode == CQM_CLA_DEL_GPA_WITH_CACHE_INVALID) { + if (cqm_cla_cache_invalid( + cqm_handle, buf_node_child->pa, + PAGE_SIZE << cla_table->trunk_order) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_cla_cache_invalid)); + return; + } + } + + /* Unblock the pci mapping of the trunk page */ + pci_unmap_single(cqm_handle->dev, buf_node_child->pa, + PAGE_SIZE << cla_table->trunk_order, + PCI_DMA_BIDIRECTIONAL); + + /* Free trunk page */ + free_pages((ulong)(buf_node_child->va), cla_table->trunk_order); + buf_node_child->va = NULL; +} + +/** + * cqm_static_qpc_cla_get - When QPC is a static allocation, allocate the count + * of buffer from the index position in the cla table without lock + * @cqm_handle: cqm handle + * @cla_table: cla handle + * @index: the index of table + * @count: the count of buffer + * @pa: the physical address + * Return: the virtual address + */ +u8 *cqm_static_qpc_cla_get(struct cqm_handle_s *cqm_handle, + struct cqm_cla_table_s *cla_table, + u32 index, u32 count, dma_addr_t *pa) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_buf_s *cla_y_buf = &cla_table->cla_y_buf; + struct cqm_buf_s *cla_z_buf = &cla_table->cla_z_buf; + struct cqm_buf_list_s *buf_node_z = NULL; + u32 x_index = 0; + u32 y_index = 0; + u32 z_index = 0; + u32 trunk_size = PAGE_SIZE << cla_table->trunk_order; + u8 *ret_addr = NULL; + u32 offset = 0; + + if (cla_table->cla_lvl == CQM_CLA_LVL_0) { + offset = index * cla_table->obj_size; + ret_addr = (u8 *)(cla_z_buf->buf_list->va) + offset; + *pa = cla_z_buf->buf_list->pa + offset; + } else if (cla_table->cla_lvl == CQM_CLA_LVL_1) { + z_index = index & ((1 << (cla_table->z + 1)) - 1); + y_index = index >> (cla_table->z + 1); + + if (y_index >= cla_z_buf->buf_number) { + cqm_err(handle->dev_hdl, + "Static qpc cla get: index exceeds buf_number, y_index %u, z_buf_number %u\n", + y_index, cla_z_buf->buf_number); + return NULL; + } + buf_node_z = &cla_z_buf->buf_list[y_index]; + if (!buf_node_z->va) { + cqm_err(handle->dev_hdl, "Cla get: static qpc cla_z_buf[%u].va=NULL\n", + y_index); + return NULL; + } + buf_node_z->refcount += count; + offset = z_index * cla_table->obj_size; + ret_addr = (u8 *)(buf_node_z->va) + offset; + *pa = buf_node_z->pa + offset; + } else { + z_index = index & ((1 << (cla_table->z + 1)) - 1); + y_index = (index >> (cla_table->z + 1)) & + ((1 << (cla_table->y - cla_table->z)) - 1); + x_index = index >> (cla_table->y + 1); + + if ((x_index >= cla_y_buf->buf_number) || + ((x_index * (trunk_size / sizeof(dma_addr_t)) + y_index) >= + cla_z_buf->buf_number)) { + cqm_err(handle->dev_hdl, + "Static qpc cla get: index exceeds buf_number,x_index %u, y_index %u, y_buf_number %u, z_buf_number %u\n ", + x_index, y_index, cla_y_buf->buf_number, + cla_z_buf->buf_number); + return NULL; + } + + buf_node_z = &(cla_z_buf->buf_list[x_index * + (trunk_size / sizeof(dma_addr_t)) + y_index]); + if (!buf_node_z->va) { + cqm_err(handle->dev_hdl, "Cla get: static qpc cla_z_buf.va=NULL\n"); + return NULL; + } + + buf_node_z->refcount += count; + offset = z_index * cla_table->obj_size; + ret_addr = (u8 *)(buf_node_z->va) + offset; + *pa = buf_node_z->pa + offset; + } + + return ret_addr; +} + +static s32 cqm_cla_get_level_two(struct cqm_handle_s *cqm_handle, + struct cqm_cla_table_s *cla_table, + u32 index, u32 count, + dma_addr_t *pa, u8 **ret_addr) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_buf_s *cla_x_buf = &cla_table->cla_x_buf; + struct cqm_buf_s *cla_y_buf = &cla_table->cla_y_buf; + struct cqm_buf_s *cla_z_buf = &cla_table->cla_z_buf; + struct cqm_buf_list_s *buf_node_x = NULL; + struct cqm_buf_list_s *buf_node_y = NULL; + struct cqm_buf_list_s *buf_node_z = NULL; + u32 x_index = 0; + u32 y_index = 0; + u32 z_index = 0; + u32 trunk_size = PAGE_SIZE << cla_table->trunk_order; + u32 offset = 0; + + z_index = index & ((1 << (cla_table->z + 1)) - 1); + y_index = (index >> (cla_table->z + 1)) & + ((1 << (cla_table->y - cla_table->z)) - 1); + x_index = index >> (cla_table->y + 1); + + if ((x_index >= cla_y_buf->buf_number) || + ((x_index * (trunk_size / sizeof(dma_addr_t)) + y_index) >= + cla_z_buf->buf_number)) { + cqm_err(handle->dev_hdl, + "Cla get: index exceeds buf_number, x_index %u, y_index %u, y_buf_number %u, z_buf_number %u\n", + x_index, y_index, cla_y_buf->buf_number, + cla_z_buf->buf_number); + return CQM_FAIL; + } + + buf_node_x = cla_x_buf->buf_list; + buf_node_y = &cla_y_buf->buf_list[x_index]; + buf_node_z = &(cla_z_buf->buf_list[x_index * + (trunk_size / sizeof(dma_addr_t)) + y_index]); + + /* Y buf node does not exist, allocates y node page */ + if (!buf_node_y->va) { + if (cqm_cla_alloc( + cqm_handle, cla_table, + buf_node_x, buf_node_y, x_index) == CQM_FAIL) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_cla_alloc)); + return CQM_FAIL; + } + } + + /* Z buf node does not exist, allocates z node page */ + if (!buf_node_z->va) { + if (cqm_cla_alloc(cqm_handle, + cla_table, + buf_node_y, + buf_node_z, + y_index) == CQM_FAIL) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_cla_alloc)); + if (buf_node_y->refcount == 0) { + /* Free y node, needs cache_invalid */ + cqm_cla_free( + cqm_handle, cla_table, + buf_node_x, buf_node_y, x_index, + CQM_CLA_DEL_GPA_WITH_CACHE_INVALID); + } + return CQM_FAIL; + } + + cqm_dbg("Cla get: 2L: y_refcount=0x%x\n", buf_node_y->refcount); + /* Y buf node's reference count should be +1 */ + buf_node_y->refcount++; + } + + cqm_dbg("Cla get: 2L: z_refcount=0x%x, count=0x%x\n", + buf_node_z->refcount, count); + buf_node_z->refcount += count; + offset = z_index * cla_table->obj_size; + *ret_addr = (u8 *)(buf_node_z->va) + offset; + *pa = buf_node_z->pa + offset; + + return CQM_SUCCESS; +} + +static s32 cqm_cla_get_level_one(struct cqm_handle_s *cqm_handle, + struct cqm_cla_table_s *cla_table, + u32 index, u32 count, dma_addr_t *pa, + u8 **ret_addr) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_buf_s *cla_y_buf = &cla_table->cla_y_buf; + struct cqm_buf_s *cla_z_buf = &cla_table->cla_z_buf; + struct cqm_buf_list_s *buf_node_y = NULL; + struct cqm_buf_list_s *buf_node_z = NULL; + u32 y_index = 0; + u32 z_index = 0; + u32 offset = 0; + + z_index = index & ((1 << (cla_table->z + 1)) - 1); + y_index = index >> (cla_table->z + 1); + + if (y_index >= cla_z_buf->buf_number) { + cqm_err(handle->dev_hdl, + "Cla get: index exceeds buf_number, y_index %u, z_buf_number %u\n", + y_index, cla_z_buf->buf_number); + return CQM_FAIL; + } + buf_node_z = &cla_z_buf->buf_list[y_index]; + buf_node_y = cla_y_buf->buf_list; + + /* Z buf node does not exist, first allocate the page */ + if (!buf_node_z->va) { + if (cqm_cla_alloc(cqm_handle, + cla_table, + buf_node_y, + buf_node_z, + y_index) == CQM_FAIL) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_cla_alloc)); + cqm_err(handle->dev_hdl, + "Cla get: cla_table->type=%u\n", + cla_table->type); + return CQM_FAIL; + } + } + + cqm_dbg("Cla get: 1L: z_refcount=0x%x, count=0x%x\n", + buf_node_z->refcount, count); + buf_node_z->refcount += count; + offset = z_index * cla_table->obj_size; + *ret_addr = (u8 *)(buf_node_z->va) + offset; + *pa = buf_node_z->pa + offset; + + return CQM_SUCCESS; +} + +/** + * cqm_cla_get - Allocate the count of buffer from the index position in the + * cla table + * @cqm_handle: cqm handle + * @cla_table: cla table + * @index: the index of table + * @count: the count of buffer + * @pa: the physical address + * Return: the virtual address + */ +u8 *cqm_cla_get(struct cqm_handle_s *cqm_handle, + struct cqm_cla_table_s *cla_table, u32 index, + u32 count, dma_addr_t *pa) +{ + struct cqm_buf_s *cla_z_buf = &cla_table->cla_z_buf; + u8 *ret_addr = NULL; + u32 offset = 0; + + mutex_lock(&cla_table->lock); + if (cla_table->cla_lvl == CQM_CLA_LVL_0) { + /* Level 0 CLA pages are statically allocated */ + offset = index * cla_table->obj_size; + ret_addr = (u8 *)(cla_z_buf->buf_list->va) + offset; + *pa = cla_z_buf->buf_list->pa + offset; + } else if (cla_table->cla_lvl == CQM_CLA_LVL_1) { + if (cqm_cla_get_level_one(cqm_handle, cla_table, + index, count, + pa, &ret_addr) == CQM_FAIL) { + mutex_unlock(&cla_table->lock); + return NULL; + } + } else { + if (cqm_cla_get_level_two(cqm_handle, + cla_table, + index, + count, + pa, + &ret_addr) == CQM_FAIL) { + mutex_unlock(&cla_table->lock); + return NULL; + } + } + + mutex_unlock(&cla_table->lock); + return ret_addr; +} + +/** + * cqm_cla_put -Decrease the reference count of the trunk page, if it is reduced + * to 0, release the trunk page + * @cqm_handle: cqm handle + * @cla_table: cla table + * @index: the index of table + * @count: the count of buffer + */ +void cqm_cla_put(struct cqm_handle_s *cqm_handle, + struct cqm_cla_table_s *cla_table, + u32 index, u32 count) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_buf_s *cla_x_buf = &cla_table->cla_x_buf; + struct cqm_buf_s *cla_y_buf = &cla_table->cla_y_buf; + struct cqm_buf_s *cla_z_buf = &cla_table->cla_z_buf; + struct cqm_buf_list_s *buf_node_x = NULL; + struct cqm_buf_list_s *buf_node_y = NULL; + struct cqm_buf_list_s *buf_node_z = NULL; + u32 x_index = 0; + u32 y_index = 0; + u32 trunk_size = PAGE_SIZE << cla_table->trunk_order; + + /* Buffer is statically allocated, + * no need to control the reference count + */ + if (cla_table->alloc_static == true) + return; + + mutex_lock(&cla_table->lock); + + if (cla_table->cla_lvl == CQM_CLA_LVL_1) { + y_index = index >> (cla_table->z + 1); + + if (y_index >= cla_z_buf->buf_number) { + cqm_err(handle->dev_hdl, + "Cla put: index exceeds buf_number, y_index %u, z_buf_number %u\n", + y_index, cla_z_buf->buf_number); + cqm_err(handle->dev_hdl, + "Cla put: cla_table->type=%u\n", + cla_table->type); + mutex_unlock(&cla_table->lock); + return; + } + + buf_node_z = &cla_z_buf->buf_list[y_index]; + buf_node_y = cla_y_buf->buf_list; + + /* When the z node page reference count is 0, + * release the z node page + */ + cqm_dbg("Cla put: 1L: z_refcount=0x%x, count=0x%x\n", + buf_node_z->refcount, count); + buf_node_z->refcount -= count; + if (buf_node_z->refcount == 0) { + /* Z node does not need cache invalid */ + cqm_cla_free(cqm_handle, cla_table, buf_node_y, + buf_node_z, y_index, + CQM_CLA_DEL_GPA_WITHOUT_CACHE_INVALID); + } + } else if (cla_table->cla_lvl == CQM_CLA_LVL_2) { + y_index = (index >> (cla_table->z + 1)) & + ((1 << (cla_table->y - cla_table->z)) - 1); + x_index = index >> (cla_table->y + 1); + + if ((x_index >= cla_y_buf->buf_number) || + ((x_index * (trunk_size / sizeof(dma_addr_t)) + y_index) >= + cla_z_buf->buf_number)) { + cqm_err(handle->dev_hdl, + "Cla put: index exceeds buf_number, x_index %u, y_index %u, y_buf_number %u, z_buf_number %u\n", + x_index, y_index, cla_y_buf->buf_number, + cla_z_buf->buf_number); + mutex_unlock(&cla_table->lock); + return; + } + + buf_node_x = cla_x_buf->buf_list; + buf_node_y = &cla_y_buf->buf_list[x_index]; + buf_node_z = &(cla_z_buf->buf_list[x_index * + (trunk_size / sizeof(dma_addr_t)) + y_index]); + cqm_dbg("Cla put: 2L: z_refcount=0x%x, count=0x%x\n", + buf_node_z->refcount, count); + + /* When the z node page reference count is 0, + * release the z node page + */ + buf_node_z->refcount -= count; + if (buf_node_z->refcount == 0) { + cqm_cla_free(cqm_handle, cla_table, buf_node_y, + buf_node_z, y_index, + CQM_CLA_DEL_GPA_WITHOUT_CACHE_INVALID); + + /* When the y node page reference count is 0, + * release the y node page + */ + cqm_dbg("Cla put: 2L: y_refcount=0x%x\n", + buf_node_y->refcount); + buf_node_y->refcount--; + if (buf_node_y->refcount == 0) { + /* Y node needs cache invalid */ + cqm_cla_free( + cqm_handle, cla_table, buf_node_x, + buf_node_y, x_index, + CQM_CLA_DEL_GPA_WITH_CACHE_INVALID); + } + } + } + + mutex_unlock(&cla_table->lock); +} + +/** + * cqm_cla_table_get - Find the CLA table structure corresponding to a BAT entry + * @bat_table: bat table + * @entry_type: entry type + * @count: the count of buffer + * Return: the CLA table + */ +struct cqm_cla_table_s *cqm_cla_table_get(struct cqm_bat_table_s *bat_table, + u32 entry_type) +{ + struct cqm_cla_table_s *cla_table = NULL; + u32 i = 0; + + for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { + cla_table = &bat_table->entry[i]; + if (entry_type == cla_table->type) + return cla_table; + } + + return NULL; +} + +#define bitmap_section + +/** + * __cqm_bitmap_init - Initialize a bitmap + * @bitmap: cqm bitmap table + * Return: 0 - success, negative - failure + */ +s32 __cqm_bitmap_init(struct cqm_bitmap_s *bitmap) +{ + spin_lock_init(&bitmap->lock); + + /* The max_num of bitmap is aligned by 8, and then shifted right by + * 3bits to get how many Bytes are needed + */ + bitmap->table = + (ulong *)vmalloc((ALIGN(bitmap->max_num, 8) >> 3)); + CQM_PTR_CHECK_RET(bitmap->table, return CQM_FAIL, + CQM_ALLOC_FAIL(bitmap->table)); + memset(bitmap->table, 0, (ALIGN(bitmap->max_num, 8) >> 3)); + + return CQM_SUCCESS; +} + +static s32 cqm_bitmap_init_by_type(struct cqm_handle_s *cqm_handle, + struct cqm_cla_table_s *cla_table, + struct cqm_bitmap_s *bitmap) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_func_capability_s *capability = &cqm_handle->func_capability; + s32 ret = CQM_SUCCESS; + + switch (cla_table->type) { + case CQM_BAT_ENTRY_T_QPC: + bitmap->max_num = capability->qpc_number; + bitmap->reserved_top = capability->qpc_reserved; + bitmap->last = capability->qpc_reserved; + cqm_info(handle->dev_hdl, "Bitmap init: cla_table_type=%u, max_num=0x%x\n", + cla_table->type, bitmap->max_num); + ret = __cqm_bitmap_init(bitmap); + break; + case CQM_BAT_ENTRY_T_MPT: + bitmap->max_num = capability->mpt_number; + bitmap->reserved_top = capability->mpt_reserved; + bitmap->last = capability->mpt_reserved; + cqm_info(handle->dev_hdl, "Bitmap init: cla_table_type=%u, max_num=0x%x\n", + cla_table->type, bitmap->max_num); + ret = __cqm_bitmap_init(bitmap); + break; + case CQM_BAT_ENTRY_T_SCQC: + bitmap->max_num = capability->scqc_number; + bitmap->reserved_top = capability->scq_reserved; + bitmap->last = capability->scq_reserved; + cqm_info(handle->dev_hdl, "Bitmap init: cla_table_type=%u, max_num=0x%x\n", + cla_table->type, bitmap->max_num); + ret = __cqm_bitmap_init(bitmap); + break; + case CQM_BAT_ENTRY_T_SRQC: + bitmap->max_num = capability->srqc_number; + bitmap->reserved_top = 0; + bitmap->last = 0; + cqm_info(handle->dev_hdl, "Bitmap init: cla_table_type=%u, max_num=0x%x\n", + cla_table->type, bitmap->max_num); + ret = __cqm_bitmap_init(bitmap); + break; + default: + break; + } + + return ret; +} + +/** + * cqm_bitmap_init - Initialize a bitmap + * @cqm_handle: cqm handle + * Return: 0 - success, negative - failure + */ +s32 cqm_bitmap_init(struct cqm_handle_s *cqm_handle) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table; + struct cqm_cla_table_s *cla_table = NULL; + struct cqm_bitmap_s *bitmap = NULL; + u32 i = 0; + s32 ret = CQM_SUCCESS; + + for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { + cla_table = &bat_table->entry[i]; + if (cla_table->obj_num == 0) { + cqm_info(handle->dev_hdl, "Cla alloc: cla_type %u, obj_num=0, don't init bitmap\n", + cla_table->type); + continue; + } + + bitmap = &cla_table->bitmap; + ret = cqm_bitmap_init_by_type(cqm_handle, cla_table, bitmap); + if (ret != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, "Bitmap init: failed to init cla_table_type=%u, obj_num=0x%x\n", + cla_table->type, cla_table->obj_num); + goto err; + } + } + + return CQM_SUCCESS; + +err: + cqm_bitmap_uninit(cqm_handle); + return CQM_FAIL; +} + +/** + * cqm_bitmap_uninit - Uninitialize a bitmap + * @cqm_handle: cqm handle + */ +void cqm_bitmap_uninit(struct cqm_handle_s *cqm_handle) +{ + struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table; + struct cqm_cla_table_s *cla_table = NULL; + struct cqm_bitmap_s *bitmap = NULL; + u32 i = 0; + + for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { + cla_table = &bat_table->entry[i]; + bitmap = &cla_table->bitmap; + if (cla_table->type != CQM_BAT_ENTRY_T_INVALID) { + if (bitmap->table) { + vfree(bitmap->table); + bitmap->table = NULL; + } + } + } +} + +/** + * cqm_bitmap_check_range - Starting from begin, check whether count bits are + * free in the table, required: 1. This set of bits cannot cross step, 2. This + * group of bits must be 0 + * @table: bitmap table + * @step: steps + * @max_num: max num + * @begin: begin position + * @count: the count of bit to check + * Return: If check valid return begin position + */ +u32 cqm_bitmap_check_range(const ulong *table, u32 step, + u32 max_num, u32 begin, u32 count) +{ + u32 i = 0; + u32 end = (begin + (count - 1)); + + /* Single bit is not checked */ + if (count == 1) + return begin; + + /* End is out of bounds */ + if (end >= max_num) + return max_num; + + /* Bit check, if there is a bit other than 0, return next bit */ + for (i = (begin + 1); i <= end; i++) { + if (test_bit((s32)i, table)) + return i + 1; + } + + /* Check if it is in a different step */ + if ((begin & (~(step - 1))) != (end & (~(step - 1)))) + return (end & (~(step - 1))); + + /* If check pass, return begin position */ + return begin; +} + +static void cqm_bitmap_set_bit(struct cqm_bitmap_s *bitmap, u32 index, + u32 max_num, u32 count, bool update_last, + ulong *table) +{ + u32 i; + + /* Set 1 to the found bit and reset last */ + if (index < max_num) { + for (i = index; i < (index + count); i++) + set_bit(i, table); + + if (update_last) { + bitmap->last = (index + count); + if (bitmap->last >= bitmap->max_num) + bitmap->last = bitmap->reserved_top; + } + } +} + +/** + * cqm_bitmap_alloc - Allocate a bitmap index, 0 and 1 should not be used, Scan + * back from the place where you last applied, and needs to support the + * application of a series of consecutive indexes, and should not to cross trunk + * @table: bitmap table + * @step: steps + * @count: the count of bit to check + * @update_last: update last + * Return: Success - return the index, failure - return the max + */ +u32 cqm_bitmap_alloc(struct cqm_bitmap_s *bitmap, u32 step, u32 count, + bool update_last) +{ + u32 index = 0; + u32 max_num = bitmap->max_num; + u32 last = bitmap->last; + ulong *table = bitmap->table; + + spin_lock(&bitmap->lock); + + /* Search for a free bit from the last position */ + do { + index = find_next_zero_bit(table, max_num, last); + if (index < max_num) { + last = cqm_bitmap_check_range(table, step, + max_num, index, count); + } else { + break; + } + } while (last != index); + + /* The above search failed, search for a free bit from the beginning */ + if (index >= max_num) { + last = bitmap->reserved_top; + do { + index = find_next_zero_bit(table, max_num, last); + if (index < max_num) { + last = cqm_bitmap_check_range(table, step, + max_num, + index, count); + } else { + break; + } + } while (last != index); + } + cqm_bitmap_set_bit(bitmap, index, max_num, count, update_last, table); + spin_unlock(&bitmap->lock); + return index; +} + +/** + * cqm_bitmap_alloc_reserved - Allocate the reserve bit according to index + * @bitmap: bitmap table + * @count: count + * @index: the index of bitmap + * Return: Success - return the index, failure - return the max + */ +u32 cqm_bitmap_alloc_reserved(struct cqm_bitmap_s *bitmap, u32 count, u32 index) +{ + ulong *table = bitmap->table; + u32 ret_index = CQM_INDEX_INVALID; + + if ((index >= bitmap->reserved_top) || (index >= bitmap->max_num) || + (count != 1)) { + return CQM_INDEX_INVALID; + } + + spin_lock(&bitmap->lock); + + if (test_bit(index, table)) { + ret_index = CQM_INDEX_INVALID; + } else { + set_bit(index, table); + ret_index = index; + } + + spin_unlock(&bitmap->lock); + return ret_index; +} + +/** + * cqm_bitmap_free - Release a bitmap index + * @bitmap: bitmap table + * @index: the index of bitmap + * @count: count + */ +void cqm_bitmap_free(struct cqm_bitmap_s *bitmap, u32 index, u32 count) +{ + ulong i = 0; + + spin_lock(&bitmap->lock); + + for (i = index; i < (index + count); i++) + clear_bit((s32)i, bitmap->table); + + spin_unlock(&bitmap->lock); +} + +#define obj_table_section + +/** + * _cqm_object_table_init - Initialize a table of object and index + * @cqm_handle: cqm handle + * Return: 0 - success, negative - failure + */ +s32 __cqm_object_table_init(struct cqm_object_table_s *obj_table) +{ + rwlock_init(&obj_table->lock); + + obj_table->table = (struct cqm_object_s **)vmalloc(obj_table->max_num * + sizeof(void *)); + CQM_PTR_CHECK_RET(obj_table->table, return CQM_FAIL, + CQM_ALLOC_FAIL(table)); + memset(obj_table->table, 0, obj_table->max_num * sizeof(void *)); + return CQM_SUCCESS; +} + +/** + * cqm_object_table_init - Initialize the association table of object and index + * @cqm_handle: cqm handle + * Return: 0 - success, negative - failure + */ +s32 cqm_object_table_init(struct cqm_handle_s *cqm_handle) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_func_capability_s *capability = &cqm_handle->func_capability; + struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table; + struct cqm_cla_table_s *cla_table = NULL; + struct cqm_object_table_s *obj_table = NULL; + s32 ret = CQM_SUCCESS; + u32 i = 0; + + for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { + cla_table = &bat_table->entry[i]; + if (cla_table->obj_num == 0) { + cqm_info(handle->dev_hdl, + "Obj table init: cla_table_type %u, obj_num=0, don't init obj table\n", + cla_table->type); + continue; + } + + obj_table = &cla_table->obj_table; + + switch (cla_table->type) { + case CQM_BAT_ENTRY_T_QPC: + obj_table->max_num = capability->qpc_number; + ret = __cqm_object_table_init(obj_table); + break; + case CQM_BAT_ENTRY_T_MPT: + obj_table->max_num = capability->mpt_number; + ret = __cqm_object_table_init(obj_table); + break; + case CQM_BAT_ENTRY_T_SCQC: + obj_table->max_num = capability->scqc_number; + ret = __cqm_object_table_init(obj_table); + break; + case CQM_BAT_ENTRY_T_SRQC: + obj_table->max_num = capability->srqc_number; + ret = __cqm_object_table_init(obj_table); + break; + default: + break; + } + + if (ret != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, + "Obj table init: failed to init cla_table_type=%u, obj_num=0x%x\n", + cla_table->type, cla_table->obj_num); + goto err; + } + } + + return CQM_SUCCESS; + +err: + cqm_object_table_uninit(cqm_handle); + return CQM_FAIL; +} + +/** + * cqm_object_table_uninit - Deinitialize the association table of object and + * index + * @cqm_handle: cqm handle + */ +void cqm_object_table_uninit(struct cqm_handle_s *cqm_handle) +{ + struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table; + struct cqm_cla_table_s *cla_table = NULL; + struct cqm_object_table_s *obj_table = NULL; + u32 i = 0; + + for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { + cla_table = &bat_table->entry[i]; + obj_table = &cla_table->obj_table; + if (cla_table->type != CQM_BAT_ENTRY_T_INVALID) { + if (obj_table->table) { + vfree(obj_table->table); + obj_table->table = NULL; + } + } + } +} + +/** + * cqm_object_table_insert - Insert an object, turn off the soft interrupt + * @cqm_handle: cqm handle + * @object_table: object table + * @index: the index of table + * @obj: the object to insert + * Return: 0 - success, negative - failure + */ +s32 cqm_object_table_insert(struct cqm_handle_s *cqm_handle, + struct cqm_object_table_s *object_table, u32 index, + struct cqm_object_s *obj) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + + if (index >= object_table->max_num) { + cqm_err(handle->dev_hdl, "Obj table insert: index 0x%x exceeds max_num 0x%x\n", + index, object_table->max_num); + return CQM_FAIL; + } + + write_lock(&object_table->lock); + + if (!object_table->table[index]) { + object_table->table[index] = obj; + write_unlock(&object_table->lock); + return CQM_SUCCESS; + } + write_unlock(&object_table->lock); + cqm_err(handle->dev_hdl, "Obj table insert: object_table->table[0x%x] has been inserted\n", + index); + return CQM_FAIL; +} + +/** + * cqm_object_table_remove - remove an object + * @cqm_handle: cqm handle + * @object_table: object table + * @index: the index of table + * @obj: the object to remove + * Return: 0 - success, negative - failure + */ +void cqm_object_table_remove(struct cqm_handle_s *cqm_handle, + struct cqm_object_table_s *object_table, + u32 index, const struct cqm_object_s *obj) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + + if (index >= object_table->max_num) { + cqm_err(handle->dev_hdl, "Obj table remove: index 0x%x exceeds max_num 0x%x\n", + index, object_table->max_num); + return; + } + + write_lock(&object_table->lock); + + if ((object_table->table[index]) && + (object_table->table[index] == obj)) { + object_table->table[index] = NULL; + } else { + cqm_err(handle->dev_hdl, "Obj table remove: object_table->table[0x%x] has been removed\n", + index); + } + + write_unlock(&object_table->lock); +} + +/** + * cqm_srq_used_rq_delete - Delete rq in TOE SRQ mode + * @object: cqm object + */ +void cqm_srq_used_rq_delete(struct cqm_object_s *object) +{ + struct cqm_queue_s *common = container_of(object, struct cqm_queue_s, + object); + struct cqm_nonrdma_qinfo_s *qinfo = container_of( + common, + struct cqm_nonrdma_qinfo_s, + common); + u32 link_wqe_offset = qinfo->wqe_per_buf * qinfo->wqe_size; + struct cqm_srq_linkwqe_s *srq_link_wqe = NULL; + dma_addr_t addr; + struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *) + (common->object.cqm_handle); + struct hifc_hwdev *handle = cqm_handle->ex_handle; + + /* The current SRQ solution does not support the case where RQ + * initialization without container, which may cause error when RQ + * resources are released. So RQ initializes with only one container, + * and releases only one contaienr when resourced are released. + */ + CQM_PTR_CHECK_NO_RET( + common->head_container, "Rq del: rq has no contianer to release\n", + return); + + /* Get current container pa from link wqe, and ummap it */ + srq_link_wqe = (struct cqm_srq_linkwqe_s *)(common->head_container + + link_wqe_offset); + /* Convert only the big endian of the wqe part of the link */ + cqm_swab32((u8 *)(srq_link_wqe), sizeof(struct cqm_linkwqe_s) >> 2); + + addr = CQM_ADDR_COMBINE(srq_link_wqe->current_buffer_gpa_h, + srq_link_wqe->current_buffer_gpa_l); + if (addr == 0) { + cqm_err(handle->dev_hdl, "Rq del: buffer physical addr is null\n"); + return; + } + pci_unmap_single(cqm_handle->dev, addr, qinfo->container_size, + PCI_DMA_BIDIRECTIONAL); + + /* Get current container va from link wqe, and free it */ + addr = CQM_ADDR_COMBINE(srq_link_wqe->current_buffer_addr_h, + srq_link_wqe->current_buffer_addr_l); + if (addr == 0) { + cqm_err(handle->dev_hdl, "Rq del: buffer virtual addr is null\n"); + return; + } + kfree((void *)addr); +} + +#define obj_intern_if_section + +/** + * cqm_qpc_mpt_bitmap_alloc - Apply index from bitmap when creating qpc or mpt + * @object: cqm object + * @cla_table: cla table + * Return: 0 - success, negative - failure + */ +s32 cqm_qpc_mpt_bitmap_alloc(struct cqm_object_s *object, + struct cqm_cla_table_s *cla_table) +{ + struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *) + object->cqm_handle; + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_qpc_mpt_s *common = container_of(object, + struct cqm_qpc_mpt_s, + object); + struct cqm_qpc_mpt_info_s *qpc_mpt_info = + container_of( + common, + struct cqm_qpc_mpt_info_s, + common); + struct cqm_bitmap_s *bitmap = &cla_table->bitmap; + u32 index = 0; + u32 count = 0; + + count = (ALIGN(object->object_size, cla_table->obj_size)) / + cla_table->obj_size; + qpc_mpt_info->index_count = count; + + if (qpc_mpt_info->common.xid == CQM_INDEX_INVALID) { + /* Allocate index normally */ + index = cqm_bitmap_alloc( + bitmap, + 1 << (cla_table->z + 1), + count, + cqm_handle->func_capability.xid_alloc_mode); + if (index < bitmap->max_num) { + qpc_mpt_info->common.xid = index; + } else { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_bitmap_alloc)); + return CQM_FAIL; + } + } else { + /* Allocate reserved index */ + index = cqm_bitmap_alloc_reserved( + bitmap, count, + qpc_mpt_info->common.xid); + if (index != qpc_mpt_info->common.xid) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_bitmap_alloc_reserved)); + return CQM_FAIL; + } + } + + return CQM_SUCCESS; +} + +static struct cqm_cla_table_s *cqm_qpc_mpt_prepare_cla_table( + struct cqm_object_s *object) +{ + struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *) + object->cqm_handle; + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table; + + struct cqm_cla_table_s *cla_table = NULL; + + /* Get the corresponding cla table */ + if (object->object_type == CQM_OBJECT_SERVICE_CTX) { + cla_table = cqm_cla_table_get(bat_table, CQM_BAT_ENTRY_T_QPC); + } else { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object->object_type)); + return NULL; + } + + CQM_PTR_CHECK_RET(cla_table, return NULL, + CQM_FUNCTION_FAIL(cqm_cla_table_get)); + + /* Allocate index for bitmap */ + if (cqm_qpc_mpt_bitmap_alloc(object, cla_table) == CQM_FAIL) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_qpc_mpt_bitmap_alloc)); + return NULL; + } + + return cla_table; +} + +/** + * cqm_qpc_mpt_create - Create qpc or mpt + * @object: cqm object + * Return: 0 - success, negative - failure + */ +s32 cqm_qpc_mpt_create(struct cqm_object_s *object) +{ + struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *) + object->cqm_handle; + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_qpc_mpt_s *common = + container_of(object, struct cqm_qpc_mpt_s, object); + struct cqm_qpc_mpt_info_s *qpc_mpt_info = + container_of(common, struct cqm_qpc_mpt_info_s, common); + struct cqm_cla_table_s *cla_table = NULL; + struct cqm_bitmap_s *bitmap = NULL; + struct cqm_object_table_s *object_table = NULL; + u32 index = 0; + u32 count = 0; + + cla_table = cqm_qpc_mpt_prepare_cla_table(object); + CQM_PTR_CHECK_RET(cla_table, return CQM_FAIL, + CQM_FUNCTION_FAIL(cqm_qpc_mpt_prepare_cla_table)); + + bitmap = &cla_table->bitmap; + index = qpc_mpt_info->common.xid; + count = qpc_mpt_info->index_count; + + /* Find the trunk page from BAT/CLA and allocate the buffer, the + * business needs to ensure that the released buffer has been cleared + */ + if (cla_table->alloc_static == true) { + qpc_mpt_info->common.vaddr = + cqm_static_qpc_cla_get(cqm_handle, cla_table, + index, count, &common->paddr); + } else { + qpc_mpt_info->common.vaddr = + cqm_cla_get(cqm_handle, cla_table, + index, count, &common->paddr); + } + if (!qpc_mpt_info->common.vaddr) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_get)); + cqm_err(handle->dev_hdl, + "Qpc mpt init: qpc mpt vaddr is null, cla_table->alloc_static=%d\n", + cla_table->alloc_static); + goto err1; + } + + /* Associate index with object, FC executes in interrupt context */ + object_table = &cla_table->obj_table; + + if (cqm_object_table_insert(cqm_handle, object_table, index, object) != + CQM_SUCCESS) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_object_table_insert)); + goto err2; + } + + return CQM_SUCCESS; + +err2: + cqm_cla_put(cqm_handle, cla_table, index, count); +err1: + cqm_bitmap_free(bitmap, index, count); + return CQM_FAIL; +} + +/** + * cqm_qpc_mpt_delete - Delete qpc or mpt + * @object: cqm object + */ +void cqm_qpc_mpt_delete(struct cqm_object_s *object) +{ + struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *) + object->cqm_handle; + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_qpc_mpt_s *common = container_of(object, + struct cqm_qpc_mpt_s, + object); + struct cqm_qpc_mpt_info_s *qpc_mpt_info = container_of( + common, + struct cqm_qpc_mpt_info_s, + common); + struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table; + struct cqm_cla_table_s *cla_table = NULL; + struct cqm_bitmap_s *bitmap = NULL; + struct cqm_object_table_s *object_table = NULL; + u32 index = qpc_mpt_info->common.xid; + u32 count = qpc_mpt_info->index_count; + + /* Find the response cla table */ + atomic_inc(&cqm_handle->ex_handle->hw_stats.cqm_stats.cqm_qpc_mpt_delete_cnt); + + if (object->object_type == CQM_OBJECT_SERVICE_CTX) { + cla_table = cqm_cla_table_get(bat_table, CQM_BAT_ENTRY_T_QPC); + } else { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object->object_type)); + return; + } + + CQM_PTR_CHECK_NO_RET( + cla_table, CQM_FUNCTION_FAIL(cqm_cla_table_get), return); + + /* Disassociate index with object */ + object_table = &cla_table->obj_table; + + cqm_object_table_remove(cqm_handle, object_table, index, object); + + /* Wait for the completion and ensure that all references to the QPC + * are completed + */ + if (atomic_dec_and_test(&object->refcount)) { + complete(&object->free); + } else { + cqm_err(handle->dev_hdl, + "Qpc mpt del: object is referred by others, has to wait for completion\n"); + } + + /* The QPC static allocation needs to be non-blocking, and the service + * guarantees that the QPC is completed when the QPC is deleted + */ + if (cla_table->alloc_static == false) + wait_for_completion(&object->free); + /* Free qpc buffer */ + cqm_cla_put(cqm_handle, cla_table, index, count); + + /* Free index into bitmap */ + bitmap = &cla_table->bitmap; + cqm_bitmap_free(bitmap, index, count); +} + +/** + * cqm_linkwqe_fill - Fill link wqe for non RDMA queue buffer + * @buf: cqm buffer + * @wqe_per_buf: not include link wqe + * @wqe_size: wqe size + * @wqe_number: not include link wqe + * @tail: true linkwqe must be at the tail, false linkwqe may not be at the tail + * @link_mode: link wqe mode + */ +void cqm_linkwqe_fill(struct cqm_buf_s *buf, + u32 wqe_per_buf, + u32 wqe_size, + u32 wqe_number, + bool tail, + u8 link_mode) +{ + struct cqm_linkwqe_s *wqe = NULL; + struct cqm_linkwqe_128b_s *linkwqe = NULL; + u8 *va = NULL; + u32 i = 0; + dma_addr_t addr; + + /* Except for the last buffer, the linkwqe of other buffers is directly + * filled to the tail + */ + for (i = 0; i < buf->buf_number; i++) { + va = (u8 *)(buf->buf_list[i].va); + + if (i != (buf->buf_number - 1)) { + wqe = (struct cqm_linkwqe_s *)(va + (u32)(wqe_size * + wqe_per_buf)); + wqe->wf = CQM_WQE_WF_LINK; + wqe->ctrlsl = CQM_LINK_WQE_CTRLSL_VALUE; + wqe->lp = CQM_LINK_WQE_LP_INVALID; + /* The Obit of valid link wqe needs to be set to 1, and + * each service needs to confirm that o-bit=1 means + * valid, o-bit=0 means invalid + */ + wqe->o = CQM_LINK_WQE_OWNER_VALID; + addr = buf->buf_list[(u32)(i + 1)].pa; + wqe->next_page_gpa_h = CQM_ADDR_HI(addr); + wqe->next_page_gpa_l = CQM_ADDR_LW(addr); + } else { + /* The last buffer of Linkwqe must fill specially */ + if (tail == true) { + /* Must be filled at the end of the page */ + wqe = (struct cqm_linkwqe_s *)(va + + (u32)(wqe_size * wqe_per_buf)); + } else { + /* The last linkwqe is filled immediately after + * the last wqe + */ + wqe = (struct cqm_linkwqe_s *) + (va + (u32)(wqe_size * + (wqe_number - wqe_per_buf * + (buf->buf_number - 1)))); + } + wqe->wf = CQM_WQE_WF_LINK; + wqe->ctrlsl = CQM_LINK_WQE_CTRLSL_VALUE; + + /* In link mode, the last link wqe is invalid, In ring + * mode, the last link wqe is valid, pointing to the + * home page, and lp is set + */ + if (link_mode == CQM_QUEUE_LINK_MODE) { + wqe->o = CQM_LINK_WQE_OWNER_INVALID; + } else { + /* The lp field of the last link_wqe is filled + * with 1,indicating that the o-bit is flipped + * from here + */ + wqe->lp = CQM_LINK_WQE_LP_VALID; + wqe->o = CQM_LINK_WQE_OWNER_VALID; + addr = buf->buf_list[0].pa; + wqe->next_page_gpa_h = CQM_ADDR_HI(addr); + wqe->next_page_gpa_l = CQM_ADDR_LW(addr); + } + } + + if (wqe_size == 128) { + /* The 128B wqe before and after 64B have obit need to be + * assigned, For IFOE, 63th penultimate bit of last 64B is + * obit, for TOE, 157th penultimate bit of last 64B is obit + */ + linkwqe = (struct cqm_linkwqe_128b_s *)wqe; + linkwqe->second_64b.third_16B.bs.toe_o = + CQM_LINK_WQE_OWNER_VALID; + linkwqe->second_64b.forth_16B.bs.ifoe_o = + CQM_LINK_WQE_OWNER_VALID; + + /* big endian conversion */ + cqm_swab32((u8 *)wqe, + sizeof(struct cqm_linkwqe_128b_s) >> 2); + } else { + /* big endian conversion */ + cqm_swab32((u8 *)wqe, + sizeof(struct cqm_linkwqe_s) >> 2); + } + } +} + +static s32 cqm_nonrdma_queue_ctx_create_srq(struct cqm_object_s *object) +{ + struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *) + object->cqm_handle; + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_queue_s *common = container_of(object, + struct cqm_queue_s, object); + struct cqm_nonrdma_qinfo_s *qinfo = container_of( + common, + struct cqm_nonrdma_qinfo_s, + common); + s32 shift = 0; + + shift = cqm_shift(qinfo->q_ctx_size); + common->q_ctx_vaddr = (u8 *)cqm_kmalloc_align( + qinfo->q_ctx_size, + GFP_KERNEL | __GFP_ZERO, + (u16)shift); + if (!common->q_ctx_vaddr) { + cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(q_ctx_vaddr)); + return CQM_FAIL; + } + + common->q_ctx_paddr = + pci_map_single(cqm_handle->dev, common->q_ctx_vaddr, + qinfo->q_ctx_size, PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(cqm_handle->dev, common->q_ctx_paddr)) { + cqm_err(handle->dev_hdl, CQM_MAP_FAIL(q_ctx_vaddr)); + cqm_kfree_align(common->q_ctx_vaddr); + common->q_ctx_vaddr = NULL; + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +static s32 cqm_nonrdma_queue_ctx_create_scq(struct cqm_object_s *object) +{ + struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *) + object->cqm_handle; + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_queue_s *common = container_of(object, + struct cqm_queue_s, + object); + struct cqm_nonrdma_qinfo_s *qinfo = container_of( + common, + struct cqm_nonrdma_qinfo_s, + common); + struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table; + struct cqm_cla_table_s *cla_table = NULL; + struct cqm_bitmap_s *bitmap = NULL; + struct cqm_object_table_s *object_table = NULL; + + /* Find the corresponding cla table */ + cla_table = cqm_cla_table_get(bat_table, CQM_BAT_ENTRY_T_SCQC); + if (!cla_table) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_table_get)); + return CQM_FAIL; + } + + /* Allocate index for bitmap */ + bitmap = &cla_table->bitmap; + qinfo->index_count = (ALIGN(qinfo->q_ctx_size, cla_table->obj_size)) / + cla_table->obj_size; + qinfo->common.index = cqm_bitmap_alloc(bitmap, 1 << (cla_table->z + 1), + qinfo->index_count, cqm_handle->func_capability.xid_alloc_mode); + if (qinfo->common.index >= bitmap->max_num) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_bitmap_alloc)); + return CQM_FAIL; + } + + /* Find the trunk page from BAT/CLA and allocate buffer */ + common->q_ctx_vaddr = cqm_cla_get(cqm_handle, cla_table, + qinfo->common.index, + qinfo->index_count, + &common->q_ctx_paddr); + if (!common->q_ctx_vaddr) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_get)); + cqm_bitmap_free(bitmap, qinfo->common.index, + qinfo->index_count); + return CQM_FAIL; + } + + /* Associate index with object */ + object_table = &cla_table->obj_table; + + if (cqm_object_table_insert( + cqm_handle, object_table, + qinfo->common.index, object) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_object_table_insert)); + cqm_cla_put(cqm_handle, cla_table, qinfo->common.index, + qinfo->index_count); + cqm_bitmap_free(bitmap, qinfo->common.index, + qinfo->index_count); + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +s32 cqm_nonrdma_queue_ctx_create(struct cqm_object_s *object) +{ + if (object->object_type == CQM_OBJECT_NONRDMA_SRQ) + return cqm_nonrdma_queue_ctx_create_srq(object); + else if (object->object_type == CQM_OBJECT_NONRDMA_SCQ) + return cqm_nonrdma_queue_ctx_create_scq(object); + + return CQM_SUCCESS; +} + +#define CQM_NORDMA_CHECK_WEQ_NUMBER(number) \ + (((number) < CQM_CQ_DEPTH_MIN) || ((number) > CQM_CQ_DEPTH_MAX)) + +/** + * cqm_nonrdma_queue_create - Create queue for non RDMA service + * @buf: cqm object + * Return: 0 - success, negative - failure + */ +s32 cqm_nonrdma_queue_create(struct cqm_object_s *object) +{ + struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *) + object->cqm_handle; + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_service_s *service = &cqm_handle->service; + struct cqm_queue_s *common = container_of(object, + struct cqm_queue_s, + object); + struct cqm_nonrdma_qinfo_s *qinfo = container_of( + common, + struct cqm_nonrdma_qinfo_s, + common); + struct cqm_buf_s *q_room_buf = &common->q_room_buf_1; + u32 wqe_number = qinfo->common.object.object_size; + u32 wqe_size = qinfo->wqe_size; + u32 order = service->buf_order; + u32 buf_number = 0; + u32 buf_size = 0; + bool tail = false; /* Whether linkwqe is at the end of the page */ + + /* When creating CQ/SCQ queue, the page size is 4k, linkwqe must be at + * the end of the page + */ + if (object->object_type == CQM_OBJECT_NONRDMA_SCQ) { + /* Depth must be 2^n alignment, depth range is 256~32K */ + if (CQM_NORDMA_CHECK_WEQ_NUMBER(wqe_number)) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(wqe_number)); + return CQM_FAIL; + } + if (cqm_check_align(wqe_number) == false) { + cqm_err(handle->dev_hdl, "Nonrdma queue alloc: wqe_number is not align on 2^n\n"); + return CQM_FAIL; + } + + order = CQM_4K_PAGE_ORDER; /* wqe page is 4k */ + tail = true; /* linkwqe must be at the end of the page */ + buf_size = CQM_4K_PAGE_SIZE; + } else { + buf_size = PAGE_SIZE << order; + } + + /* Calculate how many buffers are required, -1 is to deduct link wqe in + * a buf + */ + qinfo->wqe_per_buf = (buf_size / wqe_size) - 1; + /* The depth from service includes the number of linkwqe */ + buf_number = ALIGN((wqe_size * wqe_number), buf_size) / buf_size; + /* Allocate cqm buffer */ + q_room_buf->buf_number = buf_number; + q_room_buf->buf_size = buf_size; + q_room_buf->page_number = (buf_number << order); + if (cqm_buf_alloc(cqm_handle, q_room_buf, false) == CQM_FAIL) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_buf_alloc)); + return CQM_FAIL; + } + /* Fill link wqe, (wqe_number - buf_number) is the number of wqe without + * linkwqe + */ + cqm_linkwqe_fill(q_room_buf, qinfo->wqe_per_buf, wqe_size, + wqe_number - buf_number, tail, + common->queue_link_mode); + + /* Create queue header */ + qinfo->common.q_header_vaddr = + (struct cqm_queue_header_s *)cqm_kmalloc_align( + sizeof(struct cqm_queue_header_s), + GFP_KERNEL | __GFP_ZERO, CQM_QHEAD_ALIGN_ORDER); + if (!qinfo->common.q_header_vaddr) { + cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(q_header_vaddr)); + goto err1; + } + + common->q_header_paddr = + pci_map_single(cqm_handle->dev, + qinfo->common.q_header_vaddr, + sizeof(struct cqm_queue_header_s), + PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(cqm_handle->dev, common->q_header_paddr)) { + cqm_err(handle->dev_hdl, CQM_MAP_FAIL(q_header_vaddr)); + goto err2; + } + + /* Create queue ctx */ + if (cqm_nonrdma_queue_ctx_create(object) == CQM_FAIL) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_nonrdma_queue_ctx_create)); + goto err3; + } + + return CQM_SUCCESS; + +err3: + pci_unmap_single(cqm_handle->dev, common->q_header_paddr, + sizeof(struct cqm_queue_header_s), + PCI_DMA_BIDIRECTIONAL); +err2: + cqm_kfree_align(qinfo->common.q_header_vaddr); + qinfo->common.q_header_vaddr = NULL; +err1: + cqm_buf_free(q_room_buf, cqm_handle->dev); + return CQM_FAIL; +} + +static void cqm_nonrdma_queue_free_scq_srq(struct cqm_object_s *object, + struct cqm_cla_table_s *cla_table) +{ + struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *) + object->cqm_handle; + struct cqm_queue_s *common = container_of(object, + struct cqm_queue_s, + object); + struct cqm_nonrdma_qinfo_s *qinfo = + container_of(common, struct cqm_nonrdma_qinfo_s, common); + struct cqm_buf_s *q_room_buf = &common->q_room_buf_1; + u32 index = qinfo->common.index; + u32 count = qinfo->index_count; + struct cqm_bitmap_s *bitmap = NULL; + + /* If it is in TOE SRQ mode, delete the RQ */ + if (common->queue_link_mode == CQM_QUEUE_TOE_SRQ_LINK_MODE) { + cqm_dbg("Nonrdma queue del: delete srq used rq\n"); + cqm_srq_used_rq_delete(&common->object); + } else { + /* Free it if exists q room */ + cqm_buf_free(q_room_buf, cqm_handle->dev); + } + /* Free SRQ or SCQ ctx */ + if (object->object_type == CQM_OBJECT_NONRDMA_SRQ) { + /* ctx of nonrdma's SRQ is applied independently */ + if (common->q_ctx_vaddr) { + pci_unmap_single(cqm_handle->dev, common->q_ctx_paddr, + qinfo->q_ctx_size, + PCI_DMA_BIDIRECTIONAL); + + cqm_kfree_align(common->q_ctx_vaddr); + common->q_ctx_vaddr = NULL; + } + } else if (object->object_type == CQM_OBJECT_NONRDMA_SCQ) { + /* The ctx of SCQ of nonrdma is managed by BAT/CLA */ + cqm_cla_put(cqm_handle, cla_table, index, count); + + /* Release index into bitmap */ + bitmap = &cla_table->bitmap; + cqm_bitmap_free(bitmap, index, count); + } +} + +/** + * cqm_nonrdma_queue_delete - Free queue for non RDMA service + * @buf: cqm object + */ +void cqm_nonrdma_queue_delete(struct cqm_object_s *object) +{ + struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *) + object->cqm_handle; + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_queue_s *common = container_of(object, + struct cqm_queue_s, object); + struct cqm_nonrdma_qinfo_s *qinfo = container_of( + common, + struct cqm_nonrdma_qinfo_s, + common); + struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table; + struct cqm_cla_table_s *cla_table = NULL; + struct cqm_object_table_s *object_table = NULL; + u32 index = qinfo->common.index; + + atomic_inc(&cqm_handle->ex_handle->hw_stats.cqm_stats.cqm_nonrdma_queue_delete_cnt); + + /* SCQ has independent SCQN association */ + if (object->object_type == CQM_OBJECT_NONRDMA_SCQ) { + cla_table = cqm_cla_table_get(bat_table, CQM_BAT_ENTRY_T_SCQC); + CQM_PTR_CHECK_NO_RET( + cla_table, + CQM_FUNCTION_FAIL(cqm_cla_table_get), + return); + + /* index and object disassociate */ + object_table = &cla_table->obj_table; + + cqm_object_table_remove(cqm_handle, object_table, + index, object); + } + + /* Wait for the completion and ensure that all references to the QPC + * are completed + */ + if (atomic_dec_and_test(&object->refcount)) + complete(&object->free); + else + cqm_err(handle->dev_hdl, "Nonrdma queue del: object is referred by others, has to wait for completion\n"); + wait_for_completion(&object->free); + + /* Free it if exists q header */ + if (qinfo->common.q_header_vaddr) { + pci_unmap_single(cqm_handle->dev, common->q_header_paddr, + sizeof(struct cqm_queue_header_s), + PCI_DMA_BIDIRECTIONAL); + + cqm_kfree_align(qinfo->common.q_header_vaddr); + qinfo->common.q_header_vaddr = NULL; + } + cqm_nonrdma_queue_free_scq_srq(object, cla_table); +} + +#define obj_extern_if_section + +/** + * cqm_object_qpc_mpt_create - Create QPC and MPT + * @ex_handle: hw dev handle + * @service_type: service type + * @object_type: must be mpt and ctx + * @object_size: the unit is byte + * @object_priv: the private structure for service, can be NULL + * @index: get the reserved qpn based on this value, if wants to automatically + * allocate it, the value should be CQM_INDEX_INVALID + * Return: service ctx + */ +struct cqm_qpc_mpt_s *cqm_object_qpc_mpt_create( + void *ex_handle, + enum cqm_object_type_e object_type, + u32 object_size, void *object_priv, + u32 index) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + struct cqm_handle_s *cqm_handle = NULL; + struct cqm_qpc_mpt_info_s *qpc_mpt_info = NULL; + s32 ret = CQM_FAIL; + + CQM_PTR_CHECK_RET(ex_handle, return NULL, CQM_PTR_NULL(ex_handle)); + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_qpc_mpt_create_cnt); + + cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl); + CQM_PTR_CHECK_RET(cqm_handle, return NULL, CQM_PTR_NULL(cqm_handle)); + + /* If service does not register, returns NULL */ + if (cqm_handle->service.has_register == false) { + cqm_err(handle->dev_hdl, "service is not register"); + return NULL; + } + + if (object_type != CQM_OBJECT_SERVICE_CTX) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object_type)); + return NULL; + } + + qpc_mpt_info = (struct cqm_qpc_mpt_info_s *) + kmalloc(sizeof(struct cqm_qpc_mpt_info_s), + GFP_ATOMIC | __GFP_ZERO); + CQM_PTR_CHECK_RET(qpc_mpt_info, return NULL, + CQM_ALLOC_FAIL(qpc_mpt_info)); + + qpc_mpt_info->common.object.object_type = object_type; + qpc_mpt_info->common.object.object_size = object_size; + atomic_set(&qpc_mpt_info->common.object.refcount, 1); + init_completion(&qpc_mpt_info->common.object.free); + qpc_mpt_info->common.object.cqm_handle = cqm_handle; + qpc_mpt_info->common.xid = index; + qpc_mpt_info->common.priv = object_priv; + + ret = cqm_qpc_mpt_create(&qpc_mpt_info->common.object); + if (ret == CQM_SUCCESS) + return &qpc_mpt_info->common; + + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_qpc_mpt_create)); + kfree(qpc_mpt_info); + return NULL; +} + +/** + * cqm_object_fc_srq_create - Create RQ for FC, the number of valid wqe in the + * queue must be meet the incoming wqe number. Because linkwqe can only be + * filled at the end of the page, the actual effective number exceeds demand, + * need to inform the number of business creation. + * @ex_handle: hw dev handle + * @service_type: service type + * @object_type: must be CQM_OBJECT_NONRDMA_SRQ + * @wqe_number: valid wqe number + * @wqe_size: wqe size + * @object_priv: the private structure for service + * Return: srq structure + */ +struct cqm_queue_s *cqm_object_fc_srq_create( + void *ex_handle, + enum cqm_object_type_e object_type, + u32 wqe_number, u32 wqe_size, + void *object_priv) +{ + struct cqm_nonrdma_qinfo_s *nonrdma_qinfo = NULL; + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + struct cqm_handle_s *cqm_handle = NULL; + struct cqm_service_s *service = NULL; + u32 valid_wqe_per_buffer = 0; + u32 wqe_sum = 0; /* includes linkwqe, normal wqe */ + u32 buf_size = 0; + u32 buf_num = 0; + s32 ret = CQM_FAIL; + + CQM_PTR_CHECK_RET(ex_handle, return NULL, CQM_PTR_NULL(ex_handle)); + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_fc_srq_create_cnt); + + cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl); + CQM_PTR_CHECK_RET(cqm_handle, return NULL, CQM_PTR_NULL(cqm_handle)); + + /* service_type must be FC */ + if (cqm_handle->service.has_register == false) { + cqm_err(handle->dev_hdl, "service is not register\n"); + return NULL; + } + + /* wqe_size can not exceed PAGE_SIZE and should not be 0, and must be + * 2^n aligned. + */ + if ((wqe_size >= PAGE_SIZE) || (cqm_check_align(wqe_size) == false)) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(wqe_size)); + return NULL; + } + + /* FC's RQ is SRQ (unlike TOE's SRQ, fc is that all packets received by + * the stream will be put on the same rq, and TOE's srq is similar to + * rq's resource pool) + */ + if (object_type != CQM_OBJECT_NONRDMA_SRQ) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object_type)); + return NULL; + } + + service = &cqm_handle->service; + buf_size = PAGE_SIZE << (service->buf_order); + valid_wqe_per_buffer = buf_size / wqe_size - 1; /* Minus 1 link wqe */ + buf_num = wqe_number / valid_wqe_per_buffer; + if (wqe_number % valid_wqe_per_buffer != 0) + buf_num++; + + /* Calculate the total number of all wqe */ + wqe_sum = buf_num * (valid_wqe_per_buffer + 1); + nonrdma_qinfo = (struct cqm_nonrdma_qinfo_s *) + kmalloc(sizeof(struct cqm_nonrdma_qinfo_s), + GFP_KERNEL | __GFP_ZERO); + + CQM_PTR_CHECK_RET(nonrdma_qinfo, return NULL, + CQM_ALLOC_FAIL(nonrdma_qinfo)); + + /* Initialize object members */ + nonrdma_qinfo->common.object.object_type = object_type; + /* The total number of all wqe */ + nonrdma_qinfo->common.object.object_size = wqe_sum; + atomic_set(&nonrdma_qinfo->common.object.refcount, 1); + init_completion(&nonrdma_qinfo->common.object.free); + nonrdma_qinfo->common.object.cqm_handle = cqm_handle; + + /* Initialize the doorbell used by the current queue, default is the + * hardware doorbell + */ + nonrdma_qinfo->common.current_q_doorbell = CQM_HARDWARE_DOORBELL; + nonrdma_qinfo->common.queue_link_mode = CQM_QUEUE_RING_MODE; + + /* Initialize external public members */ + nonrdma_qinfo->common.priv = object_priv; + nonrdma_qinfo->common.valid_wqe_num = wqe_sum - buf_num; + + /* Initialize internal private members */ + nonrdma_qinfo->wqe_size = wqe_size; + /* The SRQ for FC, which needs to create ctx */ + nonrdma_qinfo->q_ctx_size = service->service_template.srq_ctx_size; + + ret = cqm_nonrdma_queue_create(&nonrdma_qinfo->common.object); + if (ret == CQM_SUCCESS) + return &nonrdma_qinfo->common; + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_nonrdma_queue_create)); + kfree(nonrdma_qinfo); + return NULL; +} + +static int cqm_object_nonrdma_queue_create_check( + void *ex_handle, + enum cqm_object_type_e object_type, + u32 wqe_size) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + struct cqm_handle_s *cqm_handle = NULL; + + CQM_PTR_CHECK_RET(ex_handle, return CQM_FAIL, CQM_PTR_NULL(ex_handle)); + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_nonrdma_queue_create_cnt); + + cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl); + CQM_PTR_CHECK_RET(cqm_handle, return CQM_FAIL, + CQM_PTR_NULL(cqm_handle)); + + /* If service does not register, returns NULL */ + if (cqm_handle->service.has_register == false) { + cqm_err(handle->dev_hdl, "service is not register\n"); + return CQM_FAIL; + } + /* Wqe size cannot exceed PAGE_SIZE, cannot be 0, and must be 2^n + * aligned. cqm_check_align check excludes 0, 1, non 2^n alignment + */ + if ((wqe_size >= PAGE_SIZE) || (cqm_check_align(wqe_size) == false)) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(wqe_size)); + return CQM_FAIL; + } + + /* Supported Nonrdma queue: RQ, SQ, SRQ, CQ, SCQ */ + if ((object_type < CQM_OBJECT_NONRDMA_EMBEDDED_RQ) || + (object_type > CQM_OBJECT_NONRDMA_SCQ)) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object_type)); + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +/** + * cqm_object_nonrdma_queue_create - Create queues for non-RDMA services + * @ex_handle: hw dev handle + * @service_type: service type + * @object_type: can create embedded RQ/SQ/CQ and SRQ/SCQ + * @wqe_number: wqe number, including link wqe + * @wqe_size: wqe size, nust be 2^n + * @object_priv: the private structure for service, can be NULL + * Return: srq structure + */ +struct cqm_queue_s *cqm_object_nonrdma_queue_create( + void *ex_handle, + enum cqm_object_type_e object_type, + u32 wqe_number, u32 wqe_size, + void *object_priv) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + struct cqm_handle_s *cqm_handle = NULL; + struct cqm_nonrdma_qinfo_s *nonrdma_qinfo = NULL; + struct cqm_service_s *service = NULL; + s32 ret = CQM_FAIL; + + cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl); + if (cqm_object_nonrdma_queue_create_check(ex_handle, + object_type, + wqe_size) == CQM_FAIL) { + return NULL; + } + + nonrdma_qinfo = (struct cqm_nonrdma_qinfo_s *) + kmalloc(sizeof(struct cqm_nonrdma_qinfo_s), + GFP_KERNEL | __GFP_ZERO); + CQM_PTR_CHECK_RET(nonrdma_qinfo, return NULL, + CQM_ALLOC_FAIL(nonrdma_qinfo)); + + /* Initialize object members */ + nonrdma_qinfo->common.object.object_type = object_type; + nonrdma_qinfo->common.object.object_size = wqe_number; + atomic_set(&nonrdma_qinfo->common.object.refcount, 1); + init_completion(&nonrdma_qinfo->common.object.free); + nonrdma_qinfo->common.object.cqm_handle = cqm_handle; + + /* Initialize the doorbell used by the current queue, default is the + * hardware doorbell + */ + nonrdma_qinfo->common.current_q_doorbell = CQM_HARDWARE_DOORBELL; + nonrdma_qinfo->common.queue_link_mode = CQM_QUEUE_RING_MODE; + + /* Initialize external public members */ + nonrdma_qinfo->common.priv = object_priv; + + /* Initialize internal private members */ + nonrdma_qinfo->wqe_size = wqe_size; + service = &cqm_handle->service; + switch (object_type) { + case CQM_OBJECT_NONRDMA_SCQ: + nonrdma_qinfo->q_ctx_size = + service->service_template.scq_ctx_size; + break; + case CQM_OBJECT_NONRDMA_SRQ: + /* The creation for SRQ uses a dedicated interface */ + nonrdma_qinfo->q_ctx_size = + service->service_template.srq_ctx_size; + break; + default: + break; + } + + ret = cqm_nonrdma_queue_create(&nonrdma_qinfo->common.object); + if (ret == CQM_SUCCESS) + return &nonrdma_qinfo->common; + + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_nonrdma_queue_create)); + kfree(nonrdma_qinfo); + return NULL; +} + +s32 cqm_qpc_mpt_delete_ret(struct cqm_object_s *object) +{ + u32 object_type = 0; + + object_type = object->object_type; + switch (object_type) { + case CQM_OBJECT_SERVICE_CTX: + cqm_qpc_mpt_delete(object); + return CQM_SUCCESS; + default: + return CQM_FAIL; + } +} + +s32 cqm_nonrdma_queue_delete_ret(struct cqm_object_s *object) +{ + u32 object_type = 0; + + object_type = object->object_type; + switch (object_type) { + case CQM_OBJECT_NONRDMA_SCQ: + case CQM_OBJECT_NONRDMA_SRQ: + cqm_nonrdma_queue_delete(object); + return CQM_SUCCESS; + default: + return CQM_FAIL; + } +} + +/** + * cqm_object_nonrdma_queue_create - Delete the created object, the function + * will sleep and wait for all operations on the object to complete before + * returning + * @object: cqm object + */ +void cqm_object_delete(struct cqm_object_s *object) +{ + struct cqm_handle_s *cqm_handle = NULL; + struct hifc_hwdev *handle = NULL; + + CQM_PTR_CHECK_NO_RET(object, CQM_PTR_NULL(object), return); + if (!object->cqm_handle) { + pr_err("[CQM]Obj del: cqm_handle is null, refcount %d\n", + (int)object->refcount.counter); + kfree(object); + return; + } + cqm_handle = (struct cqm_handle_s *)object->cqm_handle; + + if (!cqm_handle->ex_handle) { + pr_err("[CQM]Obj del: ex_handle is null, refcount %d\n", + (int)object->refcount.counter); + kfree(object); + return; + } + handle = cqm_handle->ex_handle; + + if (cqm_qpc_mpt_delete_ret(object) == CQM_SUCCESS) { + kfree(object); + return; + } + + if (cqm_nonrdma_queue_delete_ret(object) == CQM_SUCCESS) { + kfree(object); + return; + } + + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object->object_type)); + kfree(object); +} diff --git a/drivers/scsi/huawei/hifc/hifc_cqm_object.h b/drivers/scsi/huawei/hifc/hifc_cqm_object.h new file mode 100644 index 0000000000000000000000000000000000000000..308166ddd534dc83062ba9cc47ecf24eff4c994a --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_cqm_object.h @@ -0,0 +1,244 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef __CQM_OBJECT_H__ +#define __CQM_OBJECT_H__ + +#define CLA_TABLE_PAGE_ORDER (0) +#define CQM_4K_PAGE_ORDER (0) + +#define CQM_CQ_DEPTH_MAX (32768) +#define CQM_CQ_DEPTH_MIN (256) +#define CQM_BAT_SIZE_FT_PF (192) + +#define CQM_WQE_WF_LINK 1 +#define CQM_WQE_WF_NORMAL 0 +#define CQM_QUEUE_LINK_MODE 0 +#define CQM_QUEUE_RING_MODE 1 +#define CQM_4K_PAGE_SIZE 4096 + +#define CQM_SUCCESS 0 +#define CQM_FAIL -1 +#define CQM_QUEUE_TOE_SRQ_LINK_MODE 2 +#define CQM_CMD_TIMEOUT 10000 /*ms*/ + +#define CQM_INDEX_INVALID ~(0U) +#define CQM_INDEX_RESERVED (0xfffff) /* reserved by cqm alloc */ + +enum cqm_bat_entry_type_e { + CQM_BAT_ENTRY_T_CFG = 0, + CQM_BAT_ENTRY_T_HASH, + CQM_BAT_ENTRY_T_QPC, + CQM_BAT_ENTRY_T_SCQC, + CQM_BAT_ENTRY_T_SRQC, + CQM_BAT_ENTRY_T_MPT, + CQM_BAT_ENTRY_T_GID, + CQM_BAT_ENTRY_T_LUN, + CQM_BAT_ENTRY_T_TASKMAP, + CQM_BAT_ENTRY_T_L3I, + CQM_BAT_ENTRY_T_CHILDC, + CQM_BAT_ENTRY_T_TIMER, + CQM_BAT_ENTRY_T_XID2CID, + CQM_BAT_ENTRY_T_REORDER, + + CQM_BAT_ENTRY_T_INVALID = 0xff, +}; + +enum cqm_cmd_type_e { + CQM_CMD_T_INVALID = 0, + CQM_CMD_T_BAT_UPDATE, + CQM_CMD_T_CLA_UPDATE, + CQM_CMD_T_BLOOMFILTER_SET, + CQM_CMD_T_BLOOMFILTER_CLEAR, + CQM_CMD_T_COMPACT_SRQ_UPDATE, + CQM_CMD_T_CLA_CACHE_INVALID, + CQM_CMD_T_BLOOMFILTER_INIT, + QM_CMD_T_MAX +}; + +/*linkwqe*/ +#define CQM_LINK_WQE_CTRLSL_VALUE 2 +#define CQM_LINK_WQE_LP_VALID 1 +#define CQM_LINK_WQE_LP_INVALID 0 +#define CQM_LINK_WQE_OWNER_VALID 1 +#define CQM_LINK_WQE_OWNER_INVALID 0 + +/*CLA update mode*/ +#define CQM_CLA_RECORD_NEW_GPA 0 +#define CQM_CLA_DEL_GPA_WITHOUT_CACHE_INVALID 1 +#define CQM_CLA_DEL_GPA_WITH_CACHE_INVALID 2 + +#define CQM_CLA_LVL_0 0 +#define CQM_CLA_LVL_1 1 +#define CQM_CLA_LVL_2 2 + +#define CQM_MAX_INDEX_BIT 19 +#define CQM_CHIP_CACHELINE 256 +enum cqm_cmd_ack_type_e { + CQM_CMD_ACK_TYPE_CMDQ = 0, /* ack: write back to cmdq */ + CQM_CMD_ACK_TYPE_SHARE_CQN = 1, /* ack report scq by root ctx ctx */ + CQM_CMD_ACK_TYPE_APP_CQN = 2 /* ack report scq by parent ctx */ +}; + +struct cqm_bat_entry_cfg_s { + u32 cur_conn_num_h_4 :4; + u32 rsv1 :4; + u32 max_conn_num :20; + u32 rsv2 :4; + + u32 max_conn_cache :10; + u32 rsv3 :6; + u32 cur_conn_num_l_16 :16; + + u32 bloom_filter_addr :16; + u32 cur_conn_cache :10; + u32 rsv4 :6; + + u32 bucket_num :16; + u32 bloom_filter_len :16; +}; + +#define CQM_BAT_NO_BYPASS_CACHE 0 +#define CQM_BAT_ENTRY_SIZE_256 0 +#define CQM_BAT_ENTRY_SIZE_512 1 +#define CQM_BAT_ENTRY_SIZE_1024 2 + +struct cqm_bat_entry_standerd_s { + u32 entry_size :2; + u32 rsv1 :6; + u32 max_number :20; + u32 rsv2 :4; + + u32 cla_gpa_h :32; + + u32 cla_gpa_l :32; + + u32 rsv3 :8; + u32 z :5; + u32 y :5; + u32 x :5; + u32 rsv24 :1; + u32 bypass :1; + u32 cla_level :2; + u32 rsv5 :5; +}; + +struct cqm_bat_entry_taskmap_s { + u32 gpa0_h; + u32 gpa0_l; + + u32 gpa1_h; + u32 gpa1_l; + + u32 gpa2_h; + u32 gpa2_l; + + u32 gpa3_h; + u32 gpa3_l; +}; + +struct cqm_cla_cache_invalid_cmd_s { + u32 gpa_h; + u32 gpa_l; + u32 cache_size;/* CLA cache size=4096B */ +}; + +struct cqm_cla_update_cmd_s { + /* need to update gpa addr */ + u32 gpa_h; + u32 gpa_l; + + /* update value */ + u32 value_h; + u32 value_l; +}; + +struct cqm_bat_update_cmd_s { +#define CQM_BAT_MAX_SIZE 256 + u32 offset; /* byte offset,16Byte aligned */ + u32 byte_len; /* max size: 256byte */ + u8 data[CQM_BAT_MAX_SIZE]; +}; + +struct cqm_handle_s; + +struct cqm_linkwqe_s { + u32 rsv1 :14; + u32 wf :1; + u32 rsv2 :14; + u32 ctrlsl :2; + u32 o :1; + + u32 rsv3 :31; + u32 lp :1; + + u32 next_page_gpa_h; + u32 next_page_gpa_l; + + u32 next_buffer_addr_h; + u32 next_buffer_addr_l; +}; + +struct cqm_srq_linkwqe_s { + struct cqm_linkwqe_s linkwqe; + /*add by wss for srq*/ + u32 current_buffer_gpa_h; + u32 current_buffer_gpa_l; + u32 current_buffer_addr_h; + u32 current_buffer_addr_l; + + u32 fast_link_page_addr_h; + u32 fast_link_page_addr_l; + + u32 fixed_next_buffer_addr_h; + u32 fixed_next_buffer_addr_l; +}; + +union cqm_linkwqe_first_64b_s { + struct cqm_linkwqe_s basic_linkwqe; + u32 value[16]; +}; + +struct cqm_linkwqe_second_64b_s { + u32 rsvd0[4]; + u32 rsvd1[4]; + union { + struct { + u32 rsvd0[3]; + u32 rsvd1 :29; + u32 toe_o :1; + u32 resvd2 :2; + } bs; + u32 value[4]; + } third_16B; + + union { + struct { + u32 rsvd0[2]; + u32 rsvd1 :31; + u32 ifoe_o :1; + u32 rsvd2; + } bs; + u32 value[4]; + } forth_16B; + +}; + +struct cqm_linkwqe_128b_s { + union cqm_linkwqe_first_64b_s first_64b; + struct cqm_linkwqe_second_64b_s second_64b; +}; + +s32 cqm_bat_init(struct cqm_handle_s *cqm_handle); +void cqm_bat_uninit(struct cqm_handle_s *cqm_handle); +s32 cqm_cla_init(struct cqm_handle_s *cqm_handle); +void cqm_cla_uninit(struct cqm_handle_s *cqm_handle); +s32 cqm_bitmap_init(struct cqm_handle_s *cqm_handle); +void cqm_bitmap_uninit(struct cqm_handle_s *cqm_handle); +s32 cqm_object_table_init(struct cqm_handle_s *cqm_handle); +void cqm_object_table_uninit(struct cqm_handle_s *cqm_handle); + +#endif /* __CQM_OBJECT_H__ */ diff --git a/drivers/scsi/huawei/hifc/hifc_dbgtool_knl.c b/drivers/scsi/huawei/hifc/hifc_dbgtool_knl.c new file mode 100644 index 0000000000000000000000000000000000000000..adf70506c1b823d68297d946b93f55e6b4695e04 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_dbgtool_knl.c @@ -0,0 +1,953 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hifc_knl_adp.h" +#include "hifc_hw.h" +#include "hifc_hwdev.h" +#include "hifc_hwif.h" +#include "hifc_api_cmd.h" +#include "hifc_mgmt.h" +#include "hifc_lld.h" +#include "hifc_tool.h" +#include "hifc_dbgtool_knl.h" + +struct ffm_intr_info { + u8 node_id; + /* error level of the interrupt source */ + u8 err_level; + /* Classification by interrupt source properties */ + u16 err_type; + u32 err_csr_addr; + u32 err_csr_value; +}; + +#define DBGTOOL_MSG_MAX_SIZE 2048ULL +#define HIFC_SELF_CMD_UP2PF_FFM 0x26 + +void *g_card_node_array[MAX_CARD_NUM] = {0}; +void *g_hifc_card_vir_addr[MAX_CARD_NUM] = {0}; +u64 g_hifc_card_phy_addr[MAX_CARD_NUM] = {0}; +/* lock for g_hifc_card_vir_addr */ +struct mutex g_hifc_addr_lock; +int g_hifc_card_id; + +/* dbgtool character device name, class name, dev path */ +#define CHR_DEV_DBGTOOL "hifc_dbgtool_chr_dev" +#define CLASS_DBGTOOL "hifc_dbgtool_class" +#define DBGTOOL_DEV_PATH "/dev/hifc_dbgtool_chr_dev" + +struct dbgtool_k_glb_info { + struct semaphore dbgtool_sem; + struct ffm_record_info *ffm; +}; + +static dev_t dbgtool_dev_id; /* device id */ +static struct cdev dbgtool_chr_dev; /* struct of char device */ + +/*lint -save -e104 -e808*/ +static struct class *dbgtool_d_class; /* struct of char class */ +/*lint -restore*/ + +static int g_dbgtool_init_flag; +static int g_dbgtool_ref_cnt; + +static int dbgtool_knl_open(struct inode *pnode, + struct file *pfile) +{ + return 0; +} + +static int dbgtool_knl_release(struct inode *pnode, + struct file *pfile) +{ + return 0; +} + +static ssize_t dbgtool_knl_read(struct file *pfile, + char __user *ubuf, + size_t size, + loff_t *ppos) +{ + return 0; +} + +static ssize_t dbgtool_knl_write(struct file *pfile, + const char __user *ubuf, + size_t size, + loff_t *ppos) +{ + return 0; +} + +static bool is_valid_phy_addr(u64 offset) +{ + int i; + + for (i = 0; i < MAX_CARD_NUM; i++) { + if (offset == g_hifc_card_phy_addr[i]) + return true; + } + + return false; +} + +int hifc_mem_mmap(struct file *filp, struct vm_area_struct *vma) +{ + unsigned long vmsize = vma->vm_end - vma->vm_start; + phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; + phys_addr_t phy_addr; + + if (vmsize > (PAGE_SIZE * (1 << DBGTOOL_PAGE_ORDER))) { + pr_err("Map size = %lu is bigger than alloc\n", vmsize); + return -EAGAIN; + } + + if (offset && !is_valid_phy_addr((u64)offset) && + !hifc_is_valid_bar_addr((u64)offset)) { + pr_err("offset is invalid"); + return -EAGAIN; + } + + /* old version of tool set vma->vm_pgoff to 0 */ + phy_addr = offset ? offset : g_hifc_card_phy_addr[g_hifc_card_id]; + if (!phy_addr) { + pr_err("Card_id = %d physical address is 0\n", g_hifc_card_id); + return -EAGAIN; + } + + if (remap_pfn_range(vma, vma->vm_start, + (phy_addr >> PAGE_SHIFT), + vmsize, vma->vm_page_prot)) + return -EAGAIN; + + return 0; +} + +/** + * dbgtool_knl_api_cmd_read - used for read operations + * @para: the dbgtool parameter + * @g_func_handle_array: global function handle + * Return: 0 - success, negative - failure + */ +static long dbgtool_knl_api_cmd_read(struct dbgtool_param *para, + void **g_func_handle_array) +{ + long ret = 0; + u8 *cmd; + u16 size; + void *ack; + u16 ack_size; + u32 pf_id; + void *hwdev; + + pf_id = para->param.api_rd.pf_id; + if (pf_id >= 16) { + pr_err("PF id(0x%x) too big\n", pf_id); + return -EFAULT; + } + + /* obtaining pf_id chipif pointer */ + hwdev = g_func_handle_array[pf_id]; + if (!hwdev) { + pr_err("PF id(0x%x) handle null in api cmd read\n", pf_id); + return -EFAULT; + } + + /* alloc cmd and ack memory */ + size = para->param.api_rd.size; + if (para->param.api_rd.size == 0 || size > DBGTOOL_MSG_MAX_SIZE) { + pr_err("Read cmd size invalid or more than 2K\n"); + return -EINVAL; + } + cmd = kzalloc((unsigned long long)size, GFP_KERNEL); + if (!cmd) { + pr_err("Alloc read cmd mem fail\n"); + return -ENOMEM; + } + + ack_size = para->param.api_rd.ack_size; + if (para->param.api_rd.ack_size == 0 || + ack_size > DBGTOOL_MSG_MAX_SIZE) { + pr_err("Read cmd ack size is 0\n"); + ret = -ENOMEM; + goto alloc_ack_mem_fail; + } + + ack = kzalloc((unsigned long long)ack_size, GFP_KERNEL); + if (!ack) { + pr_err("Alloc read ack mem fail\n"); + ret = -ENOMEM; + goto alloc_ack_mem_fail; + } + + /* cmd content copied from user-mode */ + if (copy_from_user(cmd, para->param.api_rd.cmd, (unsigned long)size)) { + pr_err("Copy cmd from user fail\n"); + ret = -EFAULT; + goto copy_user_cmd_fail; + } + /* Invoke the api cmd interface read content*/ + ret = hifc_api_cmd_read_ack(hwdev, para->param.api_rd.dest, + cmd, size, ack, ack_size); + if (ret) { + pr_err("Api send single cmd ack fail!\n"); + goto api_rd_fail; + } + + /* Copy the contents of the ack to the user state */ + if (copy_to_user(para->param.api_rd.ack, ack, ack_size)) { + pr_err("Copy ack to user fail\n"); + ret = -EFAULT; + } +api_rd_fail: +copy_user_cmd_fail: + kfree(ack); +alloc_ack_mem_fail: + kfree(cmd); + return ret; +} + +/** + * dbgtool_knl_api_cmd_write - used for write operations + * @para: the dbgtool parameter + * @g_func_handle_array: global function handle + * Return: 0 - success, negative - failure + */ +static long dbgtool_knl_api_cmd_write(struct dbgtool_param *para, + void **g_func_handle_array) +{ + long ret = 0; + u8 *cmd; + u16 size; + u32 pf_id; + void *hwdev; + + pf_id = para->param.api_wr.pf_id; + if (pf_id >= 16) { + pr_err("PF id(0x%x) too big\n", pf_id); + return -EFAULT; + } + + /* obtaining chipif pointer according to pf_id */ + hwdev = g_func_handle_array[pf_id]; + if (!hwdev) { + pr_err("PF id(0x%x) handle null\n", pf_id); + return -EFAULT; + } + + /* alloc cmd memory */ + size = para->param.api_wr.size; + if (para->param.api_wr.size == 0 || size > DBGTOOL_MSG_MAX_SIZE) { + pr_err("Write cmd size invalid or more than 2K\n"); + return -EINVAL; + } + cmd = kzalloc((unsigned long long)size, GFP_KERNEL); + if (!cmd) { + pr_err("Alloc write cmd mem fail\n"); + return -ENOMEM; + } + + /* cmd content copied from user-mode */ + if (copy_from_user(cmd, para->param.api_wr.cmd, (unsigned long)size)) { + pr_err("Copy cmd from user fail\n"); + ret = -EFAULT; + goto copy_user_cmd_fail; + } + + /* api cmd interface is invoked to write the content */ + ret = hifc_api_cmd_write_nack(hwdev, para->param.api_wr.dest, + cmd, size); + if (ret) + pr_err("Api send single cmd nack fail\n"); + +copy_user_cmd_fail: + kfree(cmd); + return ret; +} + +void chipif_get_all_pf_dev_info(struct pf_dev_info *dev_info, int card_idx, + void **g_func_handle_array) +{ + u32 func_idx; + struct hifc_hwdev *hwdev; + + if (!dev_info) { + pr_err("Params error!\n"); + return; + } + + /* pf at most 16 */ + for (func_idx = 0; func_idx < 16; func_idx++) { + hwdev = (struct hifc_hwdev *)g_func_handle_array[func_idx]; + + dev_info[func_idx].phy_addr = g_hifc_card_phy_addr[card_idx]; + + if (!hwdev) { + dev_info[func_idx].bar0_size = 0; + dev_info[func_idx].bus = 0; + dev_info[func_idx].slot = 0; + dev_info[func_idx].func = 0; + } else { + dev_info[func_idx].bar0_size = + pci_resource_len + (((struct pci_dev *)hwdev->pcidev_hdl), 0); + dev_info[func_idx].bus = + ((struct pci_dev *) + hwdev->pcidev_hdl)->bus->number; + dev_info[func_idx].slot = + PCI_SLOT(((struct pci_dev *)hwdev->pcidev_hdl) + ->devfn); + dev_info[func_idx].func = + PCI_FUNC(((struct pci_dev *)hwdev->pcidev_hdl) + ->devfn); + } + } +} + +/** + * dbgtool_knl_pf_dev_info_get - Obtain the pf sdk_info + * @para: the dbgtool parameter + * @g_func_handle_array: global function handle + * Return: 0 - success, negative - failure + */ +static long dbgtool_knl_pf_dev_info_get(struct dbgtool_param *para, + void **g_func_handle_array) +{ + struct pf_dev_info dev_info[16] = { {0} }; + unsigned char *tmp; + int i; + + mutex_lock(&g_hifc_addr_lock); + if (!g_hifc_card_vir_addr[g_hifc_card_id]) { + g_hifc_card_vir_addr[g_hifc_card_id] = + (void *)__get_free_pages(GFP_KERNEL, + DBGTOOL_PAGE_ORDER); + if (!g_hifc_card_vir_addr[g_hifc_card_id]) { + pr_err("Alloc dbgtool api chain fail!\n"); + mutex_unlock(&g_hifc_addr_lock); + return -EFAULT; + } + + memset(g_hifc_card_vir_addr[g_hifc_card_id], 0, + PAGE_SIZE * (1 << DBGTOOL_PAGE_ORDER)); + + g_hifc_card_phy_addr[g_hifc_card_id] = + virt_to_phys(g_hifc_card_vir_addr[g_hifc_card_id]); + if (!g_hifc_card_phy_addr[g_hifc_card_id]) { + pr_err("phy addr for card %d is 0\n", g_hifc_card_id); + free_pages((unsigned long)g_hifc_card_vir_addr + [g_hifc_card_id], DBGTOOL_PAGE_ORDER); + g_hifc_card_vir_addr[g_hifc_card_id] = NULL; + mutex_unlock(&g_hifc_addr_lock); + return -EFAULT; + } + + tmp = g_hifc_card_vir_addr[g_hifc_card_id]; + for (i = 0; i < (1 << DBGTOOL_PAGE_ORDER); i++) { + SetPageReserved(virt_to_page(tmp)); + tmp += PAGE_SIZE; + } + } + mutex_unlock(&g_hifc_addr_lock); + + chipif_get_all_pf_dev_info(dev_info, g_hifc_card_id, + g_func_handle_array); + + /* Copy the dev_info to user mode */ + if (copy_to_user(para->param.dev_info, dev_info, + (unsigned int)sizeof(dev_info))) { + pr_err("Copy dev_info to user fail\n"); + return -EFAULT; + } + + return 0; +} + +/** + * dbgtool_knl_ffm_info_rd - Read ffm information + * @para: the dbgtool parameter + * @dbgtool_info: the dbgtool info + * Return: 0 - success, negative - failure + */ +static long dbgtool_knl_ffm_info_rd(struct dbgtool_param *para, + struct dbgtool_k_glb_info *dbgtool_info) +{ + /* Copy the ffm_info to user mode */ + if (copy_to_user(para->param.ffm_rd, dbgtool_info->ffm, + (unsigned int)sizeof(struct ffm_record_info))) { + pr_err("Copy ffm_info to user fail\n"); + return -EFAULT; + } + + return 0; +} + +/** + * dbgtool_knl_ffm_info_clr - Clear FFM information + * @para: unused + * @dbgtool_info: the dbgtool info + */ +static void dbgtool_knl_ffm_info_clr(struct dbgtool_param *para, + struct dbgtool_k_glb_info *dbgtool_info) +{ + dbgtool_info->ffm->ffm_num = 0; +} + +/** + * dbgtool_knl_msg_to_up - After receiving dbgtool command sends a message to uP + * @para: the dbgtool parameter + * @g_func_handle_array: global function handle + * Return: 0 - success, negative - failure + */ +static long dbgtool_knl_msg_to_up(struct dbgtool_param *para, + void **g_func_handle_array) +{ + long ret = 0; + void *buf_in; + void *buf_out; + u16 out_size; + u8 pf_id; + + if (para->param.msg2up.in_size > DBGTOOL_MSG_MAX_SIZE) { + pr_err("User data(%d) more than 2KB\n", + para->param.msg2up.in_size); + return -EFAULT; + } + + pf_id = para->param.msg2up.pf_id; + /* pf at most 16 */ + if (pf_id >= 16) { + pr_err("PF id(0x%x) too big in message to mgmt\n", pf_id); + return -EFAULT; + } + + if (!g_func_handle_array[pf_id]) { + pr_err("PF id(0x%x) handle null in message to mgmt\n", pf_id); + return -EFAULT; + } + + /* alloc buf_in and buf_out memory, apply for 2K */ + buf_in = kzalloc(DBGTOOL_MSG_MAX_SIZE, GFP_KERNEL); + if (!buf_in) { + pr_err("Alloc buf_in mem fail\n"); + return -ENOMEM; + } + + buf_out = kzalloc(DBGTOOL_MSG_MAX_SIZE, 0); + if (!buf_out) { + pr_err("Alloc buf_out mem fail\n"); + ret = -ENOMEM; + goto alloc_buf_out_mem_fail; + } + + /* copy buf_in from the user state */ + if (copy_from_user(buf_in, para->param.msg2up.buf_in, + (unsigned long)para->param.msg2up.in_size)) { + pr_err("Copy buf_in from user fail\n"); + ret = -EFAULT; + goto copy_user_buf_in_fail; + } + + out_size = DBGTOOL_MSG_MAX_SIZE; + /* Invoke the pf2up communication interface */ + ret = hifc_msg_to_mgmt_sync(g_func_handle_array[pf_id], + para->param.msg2up.mod, + para->param.msg2up.cmd, + buf_in, + para->param.msg2up.in_size, + buf_out, + &out_size, + 0); + if (ret) + goto msg_2_up_fail; + + /* Copy the out_size and buf_out content to user mode */ + if (copy_to_user(para->param.msg2up.out_size, &out_size, + (unsigned int)sizeof(out_size))) { + pr_err("Copy out_size to user fail\n"); + ret = -EFAULT; + goto copy_out_size_fail; + } + + if (copy_to_user(para->param.msg2up.buf_out, buf_out, out_size)) { + pr_err("Copy buf_out to user fail\n"); + ret = -EFAULT; + } + +copy_out_size_fail: +msg_2_up_fail: +copy_user_buf_in_fail: + kfree(buf_out); +alloc_buf_out_mem_fail: + kfree(buf_in); + return ret; +} + +long hifc_dbgtool_knl_free_mem(int id) +{ + unsigned char *tmp; + int i; + + mutex_lock(&g_hifc_addr_lock); + + if (!g_hifc_card_vir_addr[id]) { + mutex_unlock(&g_hifc_addr_lock); + return 0; + } + + tmp = g_hifc_card_vir_addr[id]; + for (i = 0; i < (1 << DBGTOOL_PAGE_ORDER); i++) { + ClearPageReserved(virt_to_page(tmp)); + tmp += PAGE_SIZE; + } + + free_pages((unsigned long)g_hifc_card_vir_addr[id], DBGTOOL_PAGE_ORDER); + g_hifc_card_vir_addr[id] = NULL; + g_hifc_card_phy_addr[id] = 0; + + mutex_unlock(&g_hifc_addr_lock); + + return 0; +} + +static int get_card_id_by_name(char *chip_name) +{ + struct card_node *card_info = NULL; + int i; + + for (i = 0; i < MAX_CARD_NUM; i++) { + card_info = (struct card_node *)g_card_node_array[i]; + if (!card_info) + continue; + if (!strncmp(chip_name, card_info->chip_name, IFNAMSIZ)) + break; + } + + if (i == MAX_CARD_NUM) { + pr_err("Can't find this card %s\n", chip_name); + return -EFAULT; + } + + return i; +} + +/*lint -save -e771 -e794*/ + +static long process_dbgtool_cmd(struct dbgtool_param *param, unsigned int cmd, + int idx) +{ + struct dbgtool_k_glb_info *dbgtool_info; + struct card_node *card_info = NULL; + unsigned int real_cmd; + long ret = 0; + + g_hifc_card_id = idx; + card_info = (struct card_node *)g_card_node_array[idx]; + dbgtool_info = (struct dbgtool_k_glb_info *)card_info->dbgtool_info; + + down(&dbgtool_info->dbgtool_sem); + + real_cmd = _IOC_NR(cmd); + + switch (real_cmd) { + case DBGTOOL_CMD_API_RD: + ret = dbgtool_knl_api_cmd_read(param, + card_info->func_handle_array); + break; + case DBGTOOL_CMD_API_WR: + ret = dbgtool_knl_api_cmd_write(param, + card_info->func_handle_array); + break; + case DBGTOOL_CMD_FFM_RD: + ret = dbgtool_knl_ffm_info_rd(param, dbgtool_info); + break; + case DBGTOOL_CMD_FFM_CLR: + dbgtool_knl_ffm_info_clr(param, dbgtool_info); + break; + case DBGTOOL_CMD_PF_DEV_INFO_GET: + ret = dbgtool_knl_pf_dev_info_get(param, + card_info->func_handle_array); + break; + case DBGTOOL_CMD_MSG_2_UP: + ret = dbgtool_knl_msg_to_up(param, + card_info->func_handle_array); + break; + case DBGTOOL_CMD_FREE_MEM: + ret = hifc_dbgtool_knl_free_mem(idx); + break; + default: + pr_err("Dbgtool cmd(x%x) not support now\n", real_cmd); + ret = -EFAULT; + } + + up(&dbgtool_info->dbgtool_sem); + + return ret; +} + +/** + * dbgtool_knl_unlocked_ioctl - dbgtool ioctl entry + * @pfile: the pointer to file + * @cmd: the command type + * @arg: user space + * Return: 0 - success, negative - failure + */ +static long dbgtool_knl_unlocked_ioctl(struct file *pfile, + unsigned int cmd, + unsigned long arg) +{ + struct dbgtool_param param; + int idx; + + (void)memset(¶m, 0, sizeof(param)); + + if (copy_from_user(¶m, (void *)arg, sizeof(param))) { + pr_err("Copy param from user fail\n"); + return -EFAULT; + } + + param.chip_name[IFNAMSIZ - 1] = '\0'; + idx = get_card_id_by_name(param.chip_name); + if (idx < 0) + return -EFAULT; + + return process_dbgtool_cmd(¶m, cmd, idx); +} + +static struct card_node *get_card_node_by_hwdev(const void *handle) +{ + struct card_node *card_info = NULL; + bool flag = false; + int i, j; + + for (i = 0; i < MAX_CARD_NUM; i++) { + card_info = (struct card_node *)g_card_node_array[i]; + if (!card_info) + continue; + + for (j = 0; j < MAX_FUNCTION_NUM; j++) { + if (handle == card_info->func_handle_array[j]) { + flag = true; + break; + } + } + + if (flag) + break; + } + + if (i == MAX_CARD_NUM) { + pr_err("Id(%d) cant find this card\n", i); + return NULL; + } + + return card_info; +} + +/** + * ffm_intr_msg_record - FFM interruption records sent up + * @handle: the function handle + * @buf_in: the pointer to input buffer + * @in_size: input buffer size + * @buf_out: the pointer to outputput buffer + * @out_size: output buffer size + */ +static void ffm_intr_msg_record(void *handle, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct dbgtool_k_glb_info *dbgtool_info; + struct ffm_intr_info *intr; + u32 ffm_idx; + struct timex txc; + struct rtc_time rctm; + struct card_node *card_info = NULL; + + card_info = get_card_node_by_hwdev(handle); + if (!card_info) + return; + + dbgtool_info = (struct dbgtool_k_glb_info *)card_info->dbgtool_info; + if (!dbgtool_info) { + pr_err("Dbgtool info is null\n"); + return; + } + + intr = (struct ffm_intr_info *)buf_in; + + if (!dbgtool_info->ffm) + return; + + ffm_idx = dbgtool_info->ffm->ffm_num; + if (ffm_idx < FFM_RECORD_NUM_MAX) { + pr_info("%s: recv intr, ffm_idx: %d\n", __func__, ffm_idx); + + dbgtool_info->ffm->ffm[ffm_idx].node_id = intr->node_id; + dbgtool_info->ffm->ffm[ffm_idx].err_level = intr->err_level; + dbgtool_info->ffm->ffm[ffm_idx].err_type = intr->err_type; + dbgtool_info->ffm->ffm[ffm_idx].err_csr_addr = + intr->err_csr_addr; + dbgtool_info->ffm->ffm[ffm_idx].err_csr_value = + intr->err_csr_value; + + /* Obtain the current UTC time */ + do_gettimeofday(&txc.time); + + /* Calculate the time in date value to tm */ + rtc_time_to_tm((unsigned long)txc.time.tv_sec + + 60 * 60 * 8, &rctm); + + /* tm_year starts from 1900; 0->1900, 1->1901, and so on */ + dbgtool_info->ffm->ffm[ffm_idx].year = + (u16)(rctm.tm_year + 1900); + /* tm_mon starts from 0, 0 indicates January, and so on */ + dbgtool_info->ffm->ffm[ffm_idx].mon = (u8)rctm.tm_mon + 1; + dbgtool_info->ffm->ffm[ffm_idx].mday = (u8)rctm.tm_mday; + dbgtool_info->ffm->ffm[ffm_idx].hour = (u8)rctm.tm_hour; + dbgtool_info->ffm->ffm[ffm_idx].min = (u8)rctm.tm_min; + dbgtool_info->ffm->ffm[ffm_idx].sec = (u8)rctm.tm_sec; + + dbgtool_info->ffm->ffm_num++; + } +} + +/*lint -restore*/ + +/*lint -save -e785 -e438*/ +static const struct file_operations dbgtool_file_operations = { + .owner = THIS_MODULE, + .open = dbgtool_knl_open, + .release = dbgtool_knl_release, + .read = dbgtool_knl_read, + .write = dbgtool_knl_write, + .unlocked_ioctl = dbgtool_knl_unlocked_ioctl, + .mmap = hifc_mem_mmap, +}; + +static int dbgtool_create_cdev(void) +{ + struct device *pdevice; + int ret = 0; + + /* alloc device id */ + ret = alloc_chrdev_region(&(dbgtool_dev_id), 0, 1, CHR_DEV_DBGTOOL); + if (ret) { + pr_err("Alloc dbgtool chrdev region fail, ret=0x%x\n", ret); + return ret; + } + + cdev_init(&(dbgtool_chr_dev), &dbgtool_file_operations); + + ret = cdev_add(&(dbgtool_chr_dev), dbgtool_dev_id, 1); + if (ret) { + pr_err("Add dgbtool dev fail, ret=0x%x\n", ret); + goto cdev_add_fail; + } + + /*lint -save -e160*/ + dbgtool_d_class = class_create(THIS_MODULE, CLASS_DBGTOOL); + /*lint -restore*/ + if (IS_ERR(dbgtool_d_class)) { + pr_err("Create dgbtool class fail\n"); + ret = -EFAULT; + goto cls_create_fail; + } + + /* Export device information to user space + * (/sys/class/class name/device name) + */ + pdevice = device_create(dbgtool_d_class, NULL, + dbgtool_dev_id, NULL, CHR_DEV_DBGTOOL); + if (IS_ERR(pdevice)) { + pr_err("Create dgbtool device fail\n"); + ret = -EFAULT; + goto dev_create_fail; + } + + return 0; + +dev_create_fail: + class_destroy(dbgtool_d_class); +cls_create_fail: + cdev_del(&(dbgtool_chr_dev)); +cdev_add_fail: + unregister_chrdev_region(dbgtool_dev_id, 1); + + return ret; +} + +/** + * hifc_dbgtool_knl_init - dbgtool character device init + * @hwdev: the pointer to hardware device + * @chip_node: the pointer to card node + * Return: 0 - success, negative - failure + */ +int hifc_dbgtool_knl_init(void *vhwdev, void *chip_node) +{ + struct card_node *chip_info = (struct card_node *)chip_node; + struct dbgtool_k_glb_info *dbgtool_info; + struct hifc_hwdev *hwdev = vhwdev; + int ret = 0; + int id; + + if (hifc_func_type(hwdev) == TYPE_VF) + return 0; + + ret = sysfs_create_file(&((struct device *)(hwdev->dev_hdl))->kobj, + &chip_info->dbgtool_attr_file); + if (ret) { + pr_err("Failed to sysfs create file\n"); + return ret; + } + + chip_info->func_handle_array[hifc_global_func_id(hwdev)] = hwdev; + + hifc_comm_recv_mgmt_self_cmd_reg(hwdev, HIFC_SELF_CMD_UP2PF_FFM, + ffm_intr_msg_record); + + if (chip_info->dbgtool_info) { + chip_info->func_num++; + return 0; + } + + dbgtool_info = (struct dbgtool_k_glb_info *) + kzalloc(sizeof(struct dbgtool_k_glb_info), GFP_KERNEL); + if (!dbgtool_info) { + pr_err("Failed to allocate dbgtool_info\n"); + ret = -EFAULT; + goto dbgtool_info_fail; + } + chip_info->dbgtool_info = dbgtool_info; + + /* FFM init */ + dbgtool_info->ffm = (struct ffm_record_info *) + kzalloc(sizeof(struct ffm_record_info), + GFP_KERNEL); + if (!dbgtool_info->ffm) { + pr_err("Failed to allocate cell contexts for a chain\n"); + ret = -EFAULT; + goto dbgtool_info_ffm_fail; + } + + sema_init(&dbgtool_info->dbgtool_sem, 1); + + ret = sscanf(chip_info->chip_name, HIFC_CHIP_NAME "%d", &id); + if (ret < 0) { + pr_err("Failed to get hifc id\n"); + goto sscanf_chdev_fail; + } + + g_card_node_array[id] = chip_info; + chip_info->func_num++; + + if (g_dbgtool_init_flag) { + g_dbgtool_ref_cnt++; + /* already initialized */ + return 0; + } + + ret = dbgtool_create_cdev(); + if (ret) + goto alloc_chdev_fail; + + g_dbgtool_init_flag = 1; + g_dbgtool_ref_cnt = 1; + mutex_init(&g_hifc_addr_lock); + + return 0; + +alloc_chdev_fail: + g_card_node_array[id] = NULL; +sscanf_chdev_fail: + kfree(dbgtool_info->ffm); +dbgtool_info_ffm_fail: + kfree(dbgtool_info); + dbgtool_info = NULL; + chip_info->dbgtool_info = NULL; +dbgtool_info_fail: + hifc_comm_recv_up_self_cmd_unreg(hwdev, HIFC_SELF_CMD_UP2PF_FFM); + chip_info->func_handle_array[hifc_global_func_id(hwdev)] = NULL; + sysfs_remove_file(&((struct device *)(hwdev->dev_hdl))->kobj, + &chip_info->dbgtool_attr_file); + return ret; +} + +/** + * hifc_dbgtool_knl_deinit - dbgtool character device deinit + * @hwdev: the pointer to hardware device + * @chip_node: the pointer to card node + */ +void hifc_dbgtool_knl_deinit(void *vhwdev, void *chip_node) +{ + struct dbgtool_k_glb_info *dbgtool_info; + struct card_node *chip_info = (struct card_node *)chip_node; + int id; + int err; + struct hifc_hwdev *hwdev = vhwdev; + + if (hifc_func_type(hwdev) == TYPE_VF) + return; + + hifc_comm_recv_up_self_cmd_unreg(hwdev, HIFC_SELF_CMD_UP2PF_FFM); + + chip_info->func_handle_array[hifc_global_func_id(hwdev)] = NULL; + + sysfs_remove_file(&((struct device *)(hwdev->dev_hdl))->kobj, + &chip_info->dbgtool_attr_file); + + chip_info->func_num--; + if (chip_info->func_num) + return; + + err = sscanf(chip_info->chip_name, HIFC_CHIP_NAME "%d", &id); + if (err < 0) + pr_err("Failed to get hifc id\n"); + + g_card_node_array[id] = NULL; + + dbgtool_info = chip_info->dbgtool_info; + /* FFM deinit */ + kfree(dbgtool_info->ffm); + dbgtool_info->ffm = NULL; + + kfree(dbgtool_info); + chip_info->dbgtool_info = NULL; + + (void)hifc_dbgtool_knl_free_mem(id); + + if (g_dbgtool_init_flag) { + if ((--g_dbgtool_ref_cnt)) + return; + } + + if (!dbgtool_d_class) + return; + + device_destroy(dbgtool_d_class, dbgtool_dev_id); + class_destroy(dbgtool_d_class); + dbgtool_d_class = NULL; + + cdev_del(&(dbgtool_chr_dev)); + unregister_chrdev_region(dbgtool_dev_id, 1); + + g_dbgtool_init_flag = 0; +} + +/*lint -restore*/ diff --git a/drivers/scsi/huawei/hifc/hifc_dbgtool_knl.h b/drivers/scsi/huawei/hifc/hifc_dbgtool_knl.h new file mode 100644 index 0000000000000000000000000000000000000000..ef3372f1dc8d17fe08017f0de6f4f8016c6a7c5e --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_dbgtool_knl.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef __DBGTOOL_KNL_H__ +#define __DBGTOOL_KNL_H__ + +enum dbg_tool_cmd { + DBGTOOL_CMD_API_RD = 0, + DBGTOOL_CMD_API_WR, + + DBGTOOL_CMD_FFM_RD, + DBGTOOL_CMD_FFM_CLR, + + DBGTOOL_CMD_PF_DEV_INFO_GET, + + DBGTOOL_CMD_MSG_2_UP, + + DBGTOOL_CMD_FREE_MEM, + DBGTOOL_CMD_NUM +}; + +struct api_cmd_rd { + u32 pf_id; + u8 dest; + u8 *cmd; + u16 size; + void *ack; + u16 ack_size; +}; + +struct api_cmd_wr { + u32 pf_id; + u8 dest; + u8 *cmd; + u16 size; +}; + +struct pf_dev_info { + u64 bar0_size; + u8 bus; + u8 slot; + u8 func; + u64 phy_addr; +}; + +/* Interrupt at most records, interrupt will be recorded in the FFM */ +#define FFM_RECORD_NUM_MAX 64 + +struct ffm_intr_tm_info { + u8 node_id; + /* error level of the interrupt source */ + u8 err_level; + /* Classification by interrupt source properties */ + u16 err_type; + u32 err_csr_addr; + u32 err_csr_value; + + u8 sec; /* second*/ + u8 min; /* minute */ + u8 hour; /* hour */ + u8 mday; /* day */ + u8 mon; /* month */ + u16 year; /* year */ +}; + +struct ffm_record_info { + u32 ffm_num; + struct ffm_intr_tm_info ffm[FFM_RECORD_NUM_MAX]; +}; + +struct msg_2_up { + u8 pf_id; /* which pf sends messages to the up */ + u8 mod; + u8 cmd; + void *buf_in; + u16 in_size; + void *buf_out; + u16 *out_size; +}; + +struct dbgtool_param { + union { + struct api_cmd_rd api_rd; + struct api_cmd_wr api_wr; + struct pf_dev_info *dev_info; + struct ffm_record_info *ffm_rd; + struct msg_2_up msg2up; + } param; + char chip_name[16]; +}; + +#ifndef MAX_CARD_NUM +#define MAX_CARD_NUM 64 +#endif +#define DBGTOOL_PAGE_ORDER 10 + +int hifc_dbgtool_knl_init(void *vhwdev, void *chip_node); +void hifc_dbgtool_knl_deinit(void *vhwdev, void *chip_node); +int hifc_mem_mmap(struct file *filp, struct vm_area_struct *vma); +void hifc_chipif_get_all_pf_dev_info(struct pf_dev_info *dev_info, int card_id, + void **g_func_handle_array); +long hifc_dbgtool_knl_free_mem(int id); + +#endif diff --git a/drivers/scsi/huawei/hifc/hifc_eqs.c b/drivers/scsi/huawei/hifc/hifc_eqs.c new file mode 100644 index 0000000000000000000000000000000000000000..803866e1fbf95cab151b052c165a8038f44a76ae --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_eqs.c @@ -0,0 +1,1347 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hifc_knl_adp.h" +#include "hifc_hw.h" +#include "hifc_hwif.h" +#include "hifc_api_cmd.h" +#include "hifc_mgmt.h" +#include "hifc_hwdev.h" +#include "hifc_eqs.h" + +#define HIFC_EQS_WQ_NAME "hifc_eqs" + +#define AEQ_CTRL_0_INTR_IDX_SHIFT 0 +#define AEQ_CTRL_0_FUNC_BUSY_SHIFT 10 +#define AEQ_CTRL_0_DMA_ATTR_SHIFT 12 +#define AEQ_CTRL_0_PCI_INTF_IDX_SHIFT 20 +#define AEQ_CTRL_0_QPS_NUM_SHIFT 22 +#define AEQ_CTRL_0_INTR_MODE_SHIFT 31 + +#define AEQ_CTRL_0_INTR_IDX_MASK 0x3FFU +#define AEQ_CTRL_0_FUNC_BUSY_MASK 0x1U +#define AEQ_CTRL_0_DMA_ATTR_MASK 0x3FU +#define AEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3U +#define AEQ_CTRL_0_QPS_NUM_MASK 0xFFU +#define AEQ_CTRL_0_INTR_MODE_MASK 0x1U + +#define AEQ_CTRL_0_GET(val, member) \ + (((val) >> AEQ_CTRL_0_##member##_SHIFT) & \ + AEQ_CTRL_0_##member##_MASK) + +#define AEQ_CTRL_0_SET(val, member) \ + (((val) & AEQ_CTRL_0_##member##_MASK) << \ + AEQ_CTRL_0_##member##_SHIFT) + +#define AEQ_CTRL_0_CLEAR(val, member) \ + ((val) & (~(AEQ_CTRL_0_##member##_MASK \ + << AEQ_CTRL_0_##member##_SHIFT))) + +#define AEQ_CTRL_1_LEN_SHIFT 0 +#define AEQ_CTRL_1_FUNC_OWN_SHIFT 21 +#define AEQ_CTRL_1_ELEM_SIZE_SHIFT 24 +#define AEQ_CTRL_1_PAGE_SIZE_SHIFT 28 + +#define AEQ_CTRL_1_LEN_MASK 0x1FFFFFU +#define AEQ_CTRL_1_FUNC_OWN_MASK 0x1U +#define AEQ_CTRL_1_ELEM_SIZE_MASK 0x3U +#define AEQ_CTRL_1_PAGE_SIZE_MASK 0xFU + +#define AEQ_CTRL_1_GET(val, member) \ + (((val) >> AEQ_CTRL_1_##member##_SHIFT) & \ + AEQ_CTRL_1_##member##_MASK) + +#define AEQ_CTRL_1_SET(val, member) \ + (((val) & AEQ_CTRL_1_##member##_MASK) << \ + AEQ_CTRL_1_##member##_SHIFT) + +#define AEQ_CTRL_1_CLEAR(val, member) \ + ((val) & (~(AEQ_CTRL_1_##member##_MASK \ + << AEQ_CTRL_1_##member##_SHIFT))) + +#define HIFC_EQ_PROD_IDX_MASK 0xFFFFF +#define HIFC_TASK_PROCESS_EQE_LIMIT 1024 +#define HIFC_EQ_UPDATE_CI_STEP 64 + +static uint g_aeq_len = HIFC_DEFAULT_AEQ_LEN; +module_param(g_aeq_len, uint, 0444); +MODULE_PARM_DESC(g_aeq_len, + "aeq depth, valid range is " __stringify(HIFC_MIN_AEQ_LEN) + " - " __stringify(HIFC_MAX_AEQ_LEN)); + +static uint g_ceq_len = HIFC_DEFAULT_CEQ_LEN; +module_param(g_ceq_len, uint, 0444); +MODULE_PARM_DESC(g_ceq_len, + "ceq depth, valid range is " __stringify(HIFC_MIN_CEQ_LEN) + " - " __stringify(HIFC_MAX_CEQ_LEN)); + +static uint g_num_ceqe_in_tasklet = HIFC_TASK_PROCESS_EQE_LIMIT; +module_param(g_num_ceqe_in_tasklet, uint, 0444); +MODULE_PARM_DESC(g_num_ceqe_in_tasklet, + "The max number of ceqe can be processed in tasklet, default = 1024"); + +#define CEQ_CTRL_0_INTR_IDX_SHIFT 0 +#define CEQ_CTRL_0_DMA_ATTR_SHIFT 12 +#define CEQ_CTRL_0_LIMIT_KICK_SHIFT 20 +#define CEQ_CTRL_0_PCI_INTF_IDX_SHIFT 24 +#define CEQ_CTRL_0_INTR_MODE_SHIFT 31 + +#define CEQ_CTRL_0_INTR_IDX_MASK 0x3FFU +#define CEQ_CTRL_0_DMA_ATTR_MASK 0x3FU +#define CEQ_CTRL_0_LIMIT_KICK_MASK 0xFU +#define CEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3U +#define CEQ_CTRL_0_INTR_MODE_MASK 0x1U + +#define CEQ_CTRL_0_SET(val, member) \ + (((val) & CEQ_CTRL_0_##member##_MASK) << \ + CEQ_CTRL_0_##member##_SHIFT) + +#define CEQ_CTRL_1_LEN_SHIFT 0 +#define CEQ_CTRL_1_PAGE_SIZE_SHIFT 28 +#define CEQ_CTRL_1_LEN_MASK 0x1FFFFFU +#define CEQ_CTRL_1_PAGE_SIZE_MASK 0xFU + +#define CEQ_CTRL_1_SET(val, member) \ + (((val) & CEQ_CTRL_1_##member##_MASK) << \ + CEQ_CTRL_1_##member##_SHIFT) + +#define EQ_ELEM_DESC_TYPE_SHIFT 0 +#define EQ_ELEM_DESC_SRC_SHIFT 7 +#define EQ_ELEM_DESC_SIZE_SHIFT 8 +#define EQ_ELEM_DESC_WRAPPED_SHIFT 31 +#define EQ_ELEM_DESC_TYPE_MASK 0x7FU +#define EQ_ELEM_DESC_SRC_MASK 0x1U +#define EQ_ELEM_DESC_SIZE_MASK 0xFFU +#define EQ_ELEM_DESC_WRAPPED_MASK 0x1U + +#define EQ_ELEM_DESC_GET(val, member) \ + (((val) >> EQ_ELEM_DESC_##member##_SHIFT) & \ + EQ_ELEM_DESC_##member##_MASK) + +#define EQ_CONS_IDX_CONS_IDX_SHIFT 0 +#define EQ_CONS_IDX_XOR_CHKSUM_SHIFT 24 +#define EQ_CONS_IDX_INT_ARMED_SHIFT 31 +#define EQ_CONS_IDX_CONS_IDX_MASK 0x1FFFFFU +#define EQ_CONS_IDX_XOR_CHKSUM_MASK 0xFU +#define EQ_CONS_IDX_INT_ARMED_MASK 0x1U + +#define EQ_CONS_IDX_SET(val, member) \ + (((val) & EQ_CONS_IDX_##member##_MASK) << \ + EQ_CONS_IDX_##member##_SHIFT) + +#define EQ_CONS_IDX_CLEAR(val, member) \ + ((val) & (~(EQ_CONS_IDX_##member##_MASK \ + << EQ_CONS_IDX_##member##_SHIFT))) + +#define EQ_WRAPPED(eq) ((u32)(eq)->wrapped << EQ_VALID_SHIFT) + +#define EQ_CONS_IDX(eq) ((eq)->cons_idx | \ + ((u32)(eq)->wrapped << EQ_WRAPPED_SHIFT)) + +#define EQ_CONS_IDX_REG_ADDR(eq) (((eq)->type == HIFC_AEQ) ? \ + HIFC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) : \ + HIFC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id)) + +#define EQ_PROD_IDX_REG_ADDR(eq) (((eq)->type == HIFC_AEQ) ? \ + HIFC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) : \ + HIFC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id)) + +#define GET_EQ_NUM_PAGES(eq, size) \ + ((u16)(ALIGN((u32)((eq)->eq_len * (eq)->elem_size), \ + (size)) / (size))) + +#define GET_EQ_NUM_ELEMS(eq, pg_size) ((pg_size) / (u32)(eq)->elem_size) + +#define GET_EQ_ELEMENT(eq, idx) \ + (((u8 *)(eq)->virt_addr[(idx) / (eq)->num_elem_in_pg]) + \ + (u32)(((idx) & ((eq)->num_elem_in_pg - 1)) * (eq)->elem_size)) + +#define GET_AEQ_ELEM(eq, idx) ((struct hifc_aeq_elem *)\ + GET_EQ_ELEMENT((eq), (idx))) + +#define GET_CEQ_ELEM(eq, idx) ((u32 *)GET_EQ_ELEMENT((eq), (idx))) + +#define GET_CURR_AEQ_ELEM(eq) GET_AEQ_ELEM((eq), (eq)->cons_idx) + +#define GET_CURR_CEQ_ELEM(eq) GET_CEQ_ELEM((eq), (eq)->cons_idx) + +#define PAGE_IN_4K(page_size) ((page_size) >> 12) +#define EQ_SET_HW_PAGE_SIZE_VAL(eq) \ + ((u32)ilog2(PAGE_IN_4K((eq)->page_size))) + +#define ELEMENT_SIZE_IN_32B(eq) (((eq)->elem_size) >> 5) +#define EQ_SET_HW_ELEM_SIZE_VAL(eq) ((u32)ilog2(ELEMENT_SIZE_IN_32B(eq))) + +#define AEQ_DMA_ATTR_DEFAULT 0 +#define CEQ_DMA_ATTR_DEFAULT 0 +#define CEQ_LMT_KICK_DEFAULT 0 +#define EQ_MSIX_RESEND_TIMER_CLEAR 1 +#define EQ_WRAPPED_SHIFT 20 +#define EQ_VALID_SHIFT 31 +#define CEQE_TYPE_SHIFT 23 +#define CEQE_TYPE_MASK 0x7 + +#define CEQE_TYPE(type) (((type) >> CEQE_TYPE_SHIFT) & \ + CEQE_TYPE_MASK) +#define CEQE_DATA_MASK 0x3FFFFFF +#define CEQE_DATA(data) ((data) & CEQE_DATA_MASK) +#define EQ_MIN_PAGE_SIZE 0x1000U +#define aeq_to_aeqs(eq) \ + container_of((eq) - (eq)->q_id, struct hifc_aeqs, aeq[0]) + +#define ceq_to_ceqs(eq) \ + container_of((eq) - (eq)->q_id, struct hifc_ceqs, ceq[0]) + +/** + * aeq_interrupt - aeq interrupt handler + * @irq: irq number + * @data: the async event queue of the event + **/ +static irqreturn_t aeq_interrupt(int irq, void *data) +{ + struct hifc_eq *aeq = (struct hifc_eq *)data; + struct hifc_hwdev *hwdev = aeq->hwdev; + + struct hifc_aeqs *aeqs = aeq_to_aeqs(aeq); + struct workqueue_struct *workq = aeqs->workq; + struct hifc_eq_work *aeq_work; + + /* clear resend timer cnt register */ + hifc_misx_intr_clear_resend_bit(hwdev, aeq->eq_irq.msix_entry_idx, + EQ_MSIX_RESEND_TIMER_CLEAR); + + aeq_work = &aeq->aeq_work; + aeq_work->data = aeq; + + queue_work(workq, &aeq_work->work); + + return IRQ_HANDLED; +} + +/** + * ceq_interrupt - ceq interrupt handler + * @irq: irq number + * @data: the completion event queue of the event + **/ +static irqreturn_t ceq_interrupt(int irq, void *data) +{ + struct hifc_eq *ceq = (struct hifc_eq *)data; + struct hifc_ceq_tasklet_data *ceq_tasklet_data; + + ceq->hard_intr_jif = jiffies; + + /* clear resend timer counters */ + hifc_misx_intr_clear_resend_bit(ceq->hwdev, ceq->eq_irq.msix_entry_idx, + EQ_MSIX_RESEND_TIMER_CLEAR); + + ceq_tasklet_data = &ceq->ceq_tasklet_data; + ceq_tasklet_data->data = data; + tasklet_schedule(&ceq->ceq_tasklet); + + return IRQ_HANDLED; +} + +static u8 eq_cons_idx_checksum_set(u32 val) +{ + u8 checksum = 0; + u8 idx; + + for (idx = 0; idx < 32; idx += 4) + checksum ^= ((val >> idx) & 0xF); + + return checksum & 0xF; +} + +/** + * hifc_aeq_register_hw_cb - register aeq callback for specific event + * @hwdev: pointer to hw device + * @event: event for the handler + * @hw_cb: callback function + * Return: 0 - success, negative - failure + **/ +int hifc_aeq_register_hw_cb(void *hwdev, enum hifc_aeq_type event, + hifc_aeq_hwe_cb hwe_cb) +{ + struct hifc_aeqs *aeqs; + + if (!hwdev || !hwe_cb || event >= HIFC_MAX_AEQ_EVENTS) + return -EINVAL; + + aeqs = ((struct hifc_hwdev *)hwdev)->aeqs; + + aeqs->aeq_hwe_cb[event] = hwe_cb; + + set_bit(HIFC_AEQ_HW_CB_REG, &aeqs->aeq_hw_cb_state[event]); + + return 0; +} + +/** + * hifc_aeq_unregister_hw_cb - unregister the aeq callback for specific event + * @hwdev: pointer to hw device + * @event: event for the handler + **/ +void hifc_aeq_unregister_hw_cb(void *hwdev, enum hifc_aeq_type event) +{ + struct hifc_aeqs *aeqs; + + if (!hwdev || event >= HIFC_MAX_AEQ_EVENTS) + return; + + aeqs = ((struct hifc_hwdev *)hwdev)->aeqs; + + clear_bit(HIFC_AEQ_HW_CB_REG, &aeqs->aeq_hw_cb_state[event]); + + while (test_bit(HIFC_AEQ_HW_CB_RUNNING, &aeqs->aeq_hw_cb_state[event])) + usleep_range(900, 1000); + + aeqs->aeq_hwe_cb[event] = NULL; +} + +/** + * hifc_aeq_register_sw_cb - register aeq callback for sw event + * @hwdev: pointer to hw device + * @event: soft event for the handler + * @sw_cb: callback function + * Return: 0 - success, negative - failure + **/ +int hifc_aeq_register_swe_cb(void *hwdev, enum hifc_aeq_sw_type event, + hifc_aeq_swe_cb aeq_swe_cb) +{ + struct hifc_aeqs *aeqs; + + if (!hwdev || !aeq_swe_cb || event >= HIFC_MAX_AEQ_SW_EVENTS) + return -EINVAL; + + aeqs = ((struct hifc_hwdev *)hwdev)->aeqs; + + aeqs->aeq_swe_cb[event] = aeq_swe_cb; + + set_bit(HIFC_AEQ_SW_CB_REG, &aeqs->aeq_sw_cb_state[event]); + + return 0; +} + +/** + * hifc_aeq_unregister_sw_cb - unregister the aeq callback for sw event + * @hwdev: pointer to hw device + * @event: soft event for the handler + **/ +void hifc_aeq_unregister_swe_cb(void *hwdev, enum hifc_aeq_sw_type event) +{ + struct hifc_aeqs *aeqs; + + if (!hwdev || event >= HIFC_MAX_AEQ_SW_EVENTS) + return; + + aeqs = ((struct hifc_hwdev *)hwdev)->aeqs; + + clear_bit(HIFC_AEQ_SW_CB_REG, &aeqs->aeq_sw_cb_state[event]); + + while (test_bit(HIFC_AEQ_SW_CB_RUNNING, &aeqs->aeq_sw_cb_state[event])) + usleep_range(900, 1000); + + aeqs->aeq_swe_cb[event] = NULL; +} + +/** + * hifc_ceq_register_sw_cb - register ceq callback for specific event + * @hwdev: pointer to hw device + * @event: event for the handler + * @callback: callback function + * Return: 0 - success, negative - failure + **/ +int hifc_ceq_register_cb(void *hwdev, enum hifc_ceq_event event, + hifc_ceq_event_cb callback) +{ + struct hifc_ceqs *ceqs; + + if (!hwdev || event >= HIFC_MAX_CEQ_EVENTS) + return -EINVAL; + + ceqs = ((struct hifc_hwdev *)hwdev)->ceqs; + + ceqs->ceq_cb[event] = callback; + + set_bit(HIFC_CEQ_CB_REG, &ceqs->ceq_cb_state[event]); + + return 0; +} + +/** + * hifc_ceq_unregister_cb - unregister ceq callback for specific event + * @hwdev: pointer to hw device + * @event: event for the handler + **/ +void hifc_ceq_unregister_cb(void *hwdev, enum hifc_ceq_event event) +{ + struct hifc_ceqs *ceqs; + + if (!hwdev || event >= HIFC_MAX_CEQ_EVENTS) + return; + + ceqs = ((struct hifc_hwdev *)hwdev)->ceqs; + + clear_bit(HIFC_CEQ_CB_REG, &ceqs->ceq_cb_state[event]); + + while (test_bit(HIFC_CEQ_CB_RUNNING, &ceqs->ceq_cb_state[event])) + usleep_range(900, 1000); + + ceqs->ceq_cb[event] = NULL; +} + +/** + * set_eq_cons_idx - write the cons idx to the hw + * @eq: The event queue to update the cons idx for + * @arm_state: arm state value + **/ +static void set_eq_cons_idx(struct hifc_eq *eq, u32 arm_state) +{ + u32 eq_wrap_ci, val; + u32 addr = EQ_CONS_IDX_REG_ADDR(eq); + + eq_wrap_ci = EQ_CONS_IDX(eq); + + /* other filed is resverd, set to 0 */ + val = EQ_CONS_IDX_SET(eq_wrap_ci, CONS_IDX) | + EQ_CONS_IDX_SET(arm_state, INT_ARMED); + + val |= EQ_CONS_IDX_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM); + + hifc_hwif_write_reg(eq->hwdev->hwif, addr, val); +} + +/** + * ceq_event_handler - handle for the ceq events + * @eqs: eqs part of the chip + * @ceqe: ceq element of the event + **/ +static void ceq_event_handler(struct hifc_ceqs *ceqs, u32 ceqe) +{ + struct hifc_hwdev *hwdev = ceqs->hwdev; + enum hifc_ceq_event event = CEQE_TYPE(ceqe); + u32 ceqe_data = CEQE_DATA(ceqe); + + if (event >= HIFC_MAX_CEQ_EVENTS) { + sdk_err(hwdev->dev_hdl, "Ceq unknown event:%d, ceqe date: 0x%x\n", + event, ceqe_data); + return; + } + + set_bit(HIFC_CEQ_CB_RUNNING, &ceqs->ceq_cb_state[event]); + + if (ceqs->ceq_cb[event] && + test_bit(HIFC_CEQ_CB_REG, &ceqs->ceq_cb_state[event])) + ceqs->ceq_cb[event](hwdev, ceqe_data); + + clear_bit(HIFC_CEQ_CB_RUNNING, &ceqs->ceq_cb_state[event]); +} + +static void aeq_swe_handler(struct hifc_aeqs *aeqs, + struct hifc_aeq_elem *aeqe_pos, + enum hifc_aeq_type event) +{ + enum hifc_ucode_event_type ucode_event; + enum hifc_aeq_sw_type sw_event; + u64 aeqe_data; + u8 lev; + + ucode_event = event; + /* SW event uses only the first 8B */ + sw_event = ucode_event >= HIFC_NIC_FATAL_ERROR_MAX ? + HIFC_STATEFULL_EVENT : + HIFC_STATELESS_EVENT; + aeqe_data = be64_to_cpu((*(u64 *)aeqe_pos->aeqe_data)); + set_bit(HIFC_AEQ_SW_CB_RUNNING, + &aeqs->aeq_sw_cb_state[sw_event]); + if (aeqs->aeq_swe_cb[sw_event] && + test_bit(HIFC_AEQ_SW_CB_REG, + &aeqs->aeq_sw_cb_state[sw_event])) { + lev = aeqs->aeq_swe_cb[sw_event](aeqs->hwdev, + ucode_event, + aeqe_data); + hifc_swe_fault_handler(aeqs->hwdev, lev, + ucode_event, aeqe_data); + } + clear_bit(HIFC_AEQ_SW_CB_RUNNING, + &aeqs->aeq_sw_cb_state[sw_event]); +} + +static void aeq_hwe_handler(struct hifc_aeqs *aeqs, + struct hifc_aeq_elem *aeqe_pos, + enum hifc_aeq_type event, u32 aeqe_desc) +{ + u8 size; + + if (event < HIFC_MAX_AEQ_EVENTS) { + size = EQ_ELEM_DESC_GET(aeqe_desc, SIZE); + set_bit(HIFC_AEQ_HW_CB_RUNNING, + &aeqs->aeq_hw_cb_state[event]); + if (aeqs->aeq_hwe_cb[event] && + test_bit(HIFC_AEQ_HW_CB_REG, + &aeqs->aeq_hw_cb_state[event])) + aeqs->aeq_hwe_cb[event](aeqs->hwdev, + aeqe_pos->aeqe_data, size); + clear_bit(HIFC_AEQ_HW_CB_RUNNING, + &aeqs->aeq_hw_cb_state[event]); + + return; + } + + sdk_warn(aeqs->hwdev->dev_hdl, "Unknown aeq hw event %d\n", event); +} + +/** + * aeq_irq_handler - handler for the aeq event + * @eq: the async event queue of the event + * Return: true - success, false - failure + **/ +static bool aeq_irq_handler(struct hifc_eq *eq) +{ + struct hifc_aeqs *aeqs = aeq_to_aeqs(eq); + struct hifc_aeq_elem *aeqe_pos; + enum hifc_aeq_type event; + u32 aeqe_desc; + u32 i, eqe_cnt = 0; + + for (i = 0; i < HIFC_TASK_PROCESS_EQE_LIMIT; i++) { + aeqe_pos = GET_CURR_AEQ_ELEM(eq); + + /* Data in HW is in Big endian Format */ + aeqe_desc = be32_to_cpu(aeqe_pos->desc); + + /* HW updates wrapped bit, when it adds eq element event */ + if (EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped) + return false; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the cmdq wqe until we have + * verified the command has been processed and + * written back. + */ + dma_rmb(); + + event = EQ_ELEM_DESC_GET(aeqe_desc, TYPE); + if (EQ_ELEM_DESC_GET(aeqe_desc, SRC)) + aeq_swe_handler(aeqs, aeqe_pos, event); + else + aeq_hwe_handler(aeqs, aeqe_pos, event, aeqe_desc); + + eq->cons_idx++; + + if (eq->cons_idx == eq->eq_len) { + eq->cons_idx = 0; + eq->wrapped = !eq->wrapped; + } + + if (++eqe_cnt >= HIFC_EQ_UPDATE_CI_STEP) { + eqe_cnt = 0; + set_eq_cons_idx(eq, HIFC_EQ_NOT_ARMED); + } + } + + return true; +} + +/** + * ceq_irq_handler - handler for the ceq event + * @eq: the completion event queue of the event + * Return: true - success, false - failure + **/ +static bool ceq_irq_handler(struct hifc_eq *eq) +{ + struct hifc_ceqs *ceqs = ceq_to_ceqs(eq); + u32 ceqe, eqe_cnt = 0; + u32 i; + + for (i = 0; i < g_num_ceqe_in_tasklet; i++) { + ceqe = *(GET_CURR_CEQ_ELEM(eq)); + ceqe = be32_to_cpu(ceqe); + + /* HW updates wrapped bit, when it adds eq element event */ + if (EQ_ELEM_DESC_GET(ceqe, WRAPPED) == eq->wrapped) + return false; + + ceq_event_handler(ceqs, ceqe); + + eq->cons_idx++; + + if (eq->cons_idx == eq->eq_len) { + eq->cons_idx = 0; + eq->wrapped = !eq->wrapped; + } + + if (++eqe_cnt >= HIFC_EQ_UPDATE_CI_STEP) { + eqe_cnt = 0; + set_eq_cons_idx(eq, HIFC_EQ_NOT_ARMED); + } + } + + return true; +} + +/** + * eq_irq_handler - handler for the eq event + * @data: the event queue of the event + * Return: true - success, false - failure + **/ +static bool eq_irq_handler(void *data) +{ + struct hifc_eq *eq = (struct hifc_eq *)data; + bool uncompleted; + + if (eq->type == HIFC_AEQ) + uncompleted = aeq_irq_handler(eq); + else + uncompleted = ceq_irq_handler(eq); + + set_eq_cons_idx(eq, uncompleted ? HIFC_EQ_NOT_ARMED : HIFC_EQ_ARMED); + + return uncompleted; +} + +static void reschedule_eq_handler(struct hifc_eq *eq) +{ + if (eq->type == HIFC_AEQ) { + struct hifc_aeqs *aeqs = aeq_to_aeqs(eq); + struct workqueue_struct *workq = aeqs->workq; + struct hifc_eq_work *aeq_work = &eq->aeq_work; + + queue_work(workq, &aeq_work->work); + } else { + tasklet_schedule(&eq->ceq_tasklet); + } +} + +/** + * ceq_tasklet - ceq tasklet for the event + * @ceq_data: data that will be used by the tasklet(ceq) + **/ + +static void ceq_tasklet(ulong ceq_data) +{ + struct hifc_ceq_tasklet_data *ceq_tasklet_data = + (struct hifc_ceq_tasklet_data *)ceq_data; + struct hifc_eq *eq = (struct hifc_eq *)ceq_tasklet_data->data; + + eq->soft_intr_jif = jiffies; + + if (eq_irq_handler(ceq_tasklet_data->data)) + reschedule_eq_handler(ceq_tasklet_data->data); +} + +/** + * eq_irq_work - eq work for the event + * @work: the work that is associated with the eq + **/ +static void eq_irq_work(struct work_struct *work) +{ + struct hifc_eq_work *aeq_work = + container_of(work, struct hifc_eq_work, work); + + if (eq_irq_handler(aeq_work->data)) + reschedule_eq_handler(aeq_work->data); +} + +struct hifc_ceq_ctrl_reg { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 q_id; + u32 ctrl0; + u32 ctrl1; +}; + +static int set_ceq_ctrl_reg(struct hifc_hwdev *hwdev, u16 q_id, + u32 ctrl0, u32 ctrl1) +{ + struct hifc_ceq_ctrl_reg ceq_ctrl = {0}; + u16 in_size = sizeof(ceq_ctrl); + u16 out_size = sizeof(ceq_ctrl); + int err; + + err = hifc_global_func_id_get(hwdev, &ceq_ctrl.func_id); + if (err) + return err; + + ceq_ctrl.q_id = q_id; + ceq_ctrl.ctrl0 = ctrl0; + ceq_ctrl.ctrl1 = ctrl1; + + err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_CEQ_CTRL_REG_WR_BY_UP, + &ceq_ctrl, in_size, + &ceq_ctrl, &out_size, 0); + if (err || !out_size || ceq_ctrl.status) { + sdk_err(hwdev->dev_hdl, "Failed to set ceq %d ctrl reg, err: %d status: 0x%x, out_size: 0x%x\n", + q_id, err, ceq_ctrl.status, out_size); + return -EFAULT; + } + + return 0; +} + +/** + * set_eq_ctrls - setting eq's ctrls registers + * @eq: the event queue for setting + * Return: 0 - success, negative - failure + **/ +static int set_eq_ctrls(struct hifc_eq *eq) +{ + enum hifc_eq_type type = eq->type; + struct hifc_hwif *hwif = eq->hwdev->hwif; + struct irq_info *eq_irq = &eq->eq_irq; + u32 addr, val, ctrl0, ctrl1, page_size_val, elem_size; + u32 pci_intf_idx = HIFC_PCI_INTF_IDX(hwif); + int err; + + if (type == HIFC_AEQ) { + /* set ctrl0 */ + addr = HIFC_CSR_AEQ_CTRL_0_ADDR(eq->q_id); + + val = hifc_hwif_read_reg(hwif, addr); + + val = AEQ_CTRL_0_CLEAR(val, INTR_IDX) & + AEQ_CTRL_0_CLEAR(val, DMA_ATTR) & + AEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) & + AEQ_CTRL_0_CLEAR(val, INTR_MODE); + + ctrl0 = AEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) | + AEQ_CTRL_0_SET(AEQ_DMA_ATTR_DEFAULT, DMA_ATTR) | + AEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) | + + AEQ_CTRL_0_SET(HIFC_INTR_MODE_ARMED, INTR_MODE); + + val |= ctrl0; + + hifc_hwif_write_reg(hwif, addr, val); + + /* set ctrl1 */ + addr = HIFC_CSR_AEQ_CTRL_1_ADDR(eq->q_id); + + page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq); + elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq); + + ctrl1 = AEQ_CTRL_1_SET(eq->eq_len, LEN) | + AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) | + AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE); + + hifc_hwif_write_reg(hwif, addr, ctrl1); + + } else { + ctrl0 = CEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) | + CEQ_CTRL_0_SET(CEQ_DMA_ATTR_DEFAULT, DMA_ATTR) | + CEQ_CTRL_0_SET(CEQ_LMT_KICK_DEFAULT, LIMIT_KICK) | + CEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) | + CEQ_CTRL_0_SET(HIFC_INTR_MODE_ARMED, INTR_MODE); + + page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq); + + ctrl1 = CEQ_CTRL_1_SET(eq->eq_len, LEN) | + CEQ_CTRL_1_SET(page_size_val, PAGE_SIZE); + + /* set ceq ctrl reg through mgmt cpu */ + err = set_ceq_ctrl_reg(eq->hwdev, eq->q_id, ctrl0, ctrl1); + if (err) + return err; + } + + return 0; +} + +/** + * ceq_elements_init - Initialize all the elements in the ceq + * @eq: the event queue + * @init_val: value to init with it the elements + **/ +static void ceq_elements_init(struct hifc_eq *eq, u32 init_val) +{ + u32 i; + u32 *ceqe; + + for (i = 0; i < eq->eq_len; i++) { + ceqe = GET_CEQ_ELEM(eq, i); + *(ceqe) = cpu_to_be32(init_val); + } + + wmb(); /* Write the init values */ +} + +/** + * aeq_elements_init - initialize all the elements in the aeq + * @eq: the event queue + * @init_val: value to init with it the elements + **/ +static void aeq_elements_init(struct hifc_eq *eq, u32 init_val) +{ + struct hifc_aeq_elem *aeqe; + u32 i; + + for (i = 0; i < eq->eq_len; i++) { + aeqe = GET_AEQ_ELEM(eq, i); + aeqe->desc = cpu_to_be32(init_val); + } + + wmb(); /* Write the init values */ +} + +static void free_eq_pages_desc(struct hifc_eq *eq) +{ + kfree(eq->virt_addr_for_free); + kfree(eq->dma_addr_for_free); + kfree(eq->virt_addr); + kfree(eq->dma_addr); +} + +static int alloc_eq_pages_desc(struct hifc_eq *eq) +{ + u64 dma_addr_size, virt_addr_size; + int err; + + dma_addr_size = eq->num_pages * sizeof(*eq->dma_addr); + virt_addr_size = eq->num_pages * sizeof(*eq->virt_addr); + + eq->dma_addr = kzalloc(dma_addr_size, GFP_KERNEL); + if (!eq->dma_addr) + return -ENOMEM; + + eq->virt_addr = kzalloc(virt_addr_size, GFP_KERNEL); + if (!eq->virt_addr) { + err = -ENOMEM; + goto virt_addr_alloc_err; + } + + eq->dma_addr_for_free = kzalloc(dma_addr_size, GFP_KERNEL); + if (!eq->dma_addr_for_free) { + err = -ENOMEM; + goto dma_addr_free_alloc_err; + } + + eq->virt_addr_for_free = kzalloc(virt_addr_size, GFP_KERNEL); + if (!eq->virt_addr_for_free) { + err = -ENOMEM; + goto virt_addr_free_alloc_err; + } + + return 0; + +virt_addr_free_alloc_err: + kfree(eq->dma_addr_for_free); +dma_addr_free_alloc_err: + kfree(eq->virt_addr); +virt_addr_alloc_err: + kfree(eq->dma_addr); + return err; +} + +#define IS_ALIGN(x, a) (((x) & ((a) - 1)) == 0) + +static int init_eq_elements(struct hifc_eq *eq) +{ + u32 init_val; + + eq->num_elem_in_pg = GET_EQ_NUM_ELEMS(eq, eq->page_size); + if (!IS_ALIGN(eq->num_elem_in_pg, eq->num_elem_in_pg)) { + sdk_err(eq->hwdev->dev_hdl, "Number element in eq page != power of 2\n"); + return -EINVAL; + } + + init_val = EQ_WRAPPED(eq); + + if (eq->type == HIFC_AEQ) + aeq_elements_init(eq, init_val); + else + ceq_elements_init(eq, init_val); + + return 0; +} + +/** + * alloc_eq_pages - allocate the pages for the queue + * @eq: the event queue + * Return: 0 - success, negative - failure + **/ +static int alloc_eq_pages(struct hifc_eq *eq) +{ + struct hifc_hwif *hwif = eq->hwdev->hwif; + u16 pg_num, i; + u32 reg; + int err; + u8 flag = 0; + + err = alloc_eq_pages_desc(eq); + if (err) { + sdk_err(eq->hwdev->dev_hdl, "Failed to alloc eq pages description\n"); + return err; + } + + for (pg_num = 0; pg_num < eq->num_pages; pg_num++) { + eq->virt_addr_for_free[pg_num] = dma_zalloc_coherent + (eq->hwdev->dev_hdl, eq->page_size, + &eq->dma_addr_for_free[pg_num], GFP_KERNEL); + if (!eq->virt_addr_for_free[pg_num]) { + err = -ENOMEM; + goto dma_alloc_err; + } + + eq->dma_addr[pg_num] = eq->dma_addr_for_free[pg_num]; + eq->virt_addr[pg_num] = eq->virt_addr_for_free[pg_num]; + if (!IS_ALIGN(eq->dma_addr_for_free[pg_num], + eq->page_size)) { + sdk_info(eq->hwdev->dev_hdl, + "Address is not aligned to %u-bytes as hardware required\n", + eq->page_size); + sdk_info(eq->hwdev->dev_hdl, "Change eq's page size %u\n", + ((eq->page_size) >> 1)); + eq->dma_addr[pg_num] = ALIGN + (eq->dma_addr_for_free[pg_num], + (u64)((eq->page_size) >> 1)); + eq->virt_addr[pg_num] = eq->virt_addr_for_free[pg_num] + + ((u64)eq->dma_addr[pg_num] + - (u64)eq->dma_addr_for_free[pg_num]); + flag = 1; + } + reg = HIFC_EQ_HI_PHYS_ADDR_REG(eq->type, eq->q_id, pg_num); + hifc_hwif_write_reg(hwif, reg, + upper_32_bits(eq->dma_addr[pg_num])); + + reg = HIFC_EQ_LO_PHYS_ADDR_REG(eq->type, eq->q_id, pg_num); + hifc_hwif_write_reg(hwif, reg, + lower_32_bits(eq->dma_addr[pg_num])); + } + + if (flag) { + eq->page_size = eq->page_size >> 1; + eq->eq_len = eq->eq_len >> 1; + } + + err = init_eq_elements(eq); + if (err) { + sdk_err(eq->hwdev->dev_hdl, "Failed to init eq elements\n"); + goto dma_alloc_err; + } + + return 0; + +dma_alloc_err: + for (i = 0; i < pg_num; i++) + dma_free_coherent(eq->hwdev->dev_hdl, eq->page_size, + eq->virt_addr_for_free[i], + eq->dma_addr_for_free[i]); + free_eq_pages_desc(eq); + return err; +} + +/** + * free_eq_pages - free the pages of the queue + * @eq: the event queue + **/ +static void free_eq_pages(struct hifc_eq *eq) +{ + struct hifc_hwdev *hwdev = eq->hwdev; + u16 pg_num; + + for (pg_num = 0; pg_num < eq->num_pages; pg_num++) + dma_free_coherent(hwdev->dev_hdl, eq->orig_page_size, + eq->virt_addr_for_free[pg_num], + eq->dma_addr_for_free[pg_num]); + + free_eq_pages_desc(eq); +} + +static inline u32 get_page_size(struct hifc_eq *eq) +{ + u32 total_size; + u16 count, n = 0; + + total_size = ALIGN((eq->eq_len * eq->elem_size), EQ_MIN_PAGE_SIZE); + + if (total_size <= (HIFC_EQ_MAX_PAGES * EQ_MIN_PAGE_SIZE)) + return EQ_MIN_PAGE_SIZE; + + count = (u16)(ALIGN((total_size / HIFC_EQ_MAX_PAGES), + EQ_MIN_PAGE_SIZE) / EQ_MIN_PAGE_SIZE); + + if (!(count & (count - 1))) + return EQ_MIN_PAGE_SIZE * count; + + while (count) { + count >>= 1; + n++; + } + + return EQ_MIN_PAGE_SIZE << n; +} + +static int request_eq_irq(struct hifc_eq *eq, enum hifc_eq_type type, + struct irq_info *entry) +{ + int err = 0; + + if (type == HIFC_AEQ) { + struct hifc_eq_work *aeq_work = &eq->aeq_work; + + INIT_WORK(&aeq_work->work, eq_irq_work); + } else { + tasklet_init(&eq->ceq_tasklet, ceq_tasklet, + (ulong)(&eq->ceq_tasklet_data)); + } + + if (type == HIFC_AEQ) { + snprintf(eq->irq_name, sizeof(eq->irq_name), + "hifc_aeq%d@pci:%s", eq->q_id, + pci_name(eq->hwdev->pcidev_hdl)); + + err = request_irq(entry->irq_id, aeq_interrupt, 0UL, + eq->irq_name, eq); + } else { + snprintf(eq->irq_name, sizeof(eq->irq_name), + "hifc_ceq%d@pci:%s", eq->q_id, + pci_name(eq->hwdev->pcidev_hdl)); + + err = request_irq(entry->irq_id, ceq_interrupt, 0UL, + eq->irq_name, eq); + } + + return err; +} + +/** + * init_eq - initialize eq + * @eq: the event queue + * @hwdev: the pointer to hw device + * @q_id: Queue id number + * @q_len: the number of EQ elements + * @type: the type of the event queue, ceq or aeq + * @entry: msix entry associated with the event queue + * Return: 0 - Success, Negative - failure + **/ +static int init_eq(struct hifc_eq *eq, struct hifc_hwdev *hwdev, u16 q_id, + u32 q_len, enum hifc_eq_type type, struct irq_info *entry) +{ + int err = 0; + + eq->hwdev = hwdev; + eq->q_id = q_id; + eq->type = type; + eq->eq_len = q_len; + + /* clear eq_len to force eqe drop in hardware */ + if (eq->type == HIFC_AEQ) + hifc_hwif_write_reg(eq->hwdev->hwif, + HIFC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0); + else + set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0); + + eq->cons_idx = 0; + eq->wrapped = 0; + + eq->elem_size = (type == HIFC_AEQ) ? + HIFC_AEQE_SIZE : HIFC_CEQE_SIZE; + + eq->page_size = get_page_size(eq); + eq->orig_page_size = eq->page_size; + eq->num_pages = GET_EQ_NUM_PAGES(eq, eq->page_size); + if (eq->num_pages > HIFC_EQ_MAX_PAGES) { + sdk_err(hwdev->dev_hdl, "Number pages:%d too many pages for eq\n", + eq->num_pages); + return -EINVAL; + } + + err = alloc_eq_pages(eq); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to allocate pages for eq\n"); + return err; + } + + eq->eq_irq.msix_entry_idx = entry->msix_entry_idx; + eq->eq_irq.irq_id = entry->irq_id; + + err = set_eq_ctrls(eq); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to allocate pages for eq\n"); + goto init_eq_ctrls_err; + } + + hifc_hwif_write_reg(eq->hwdev->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0); + set_eq_cons_idx(eq, HIFC_EQ_ARMED); + + err = request_eq_irq(eq, type, entry); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to request irq for the eq, err: %d\n", + err); + goto req_irq_err; + } + + hifc_set_msix_state(hwdev, entry->msix_entry_idx, HIFC_MSIX_ENABLE); + + return 0; + +init_eq_ctrls_err: +req_irq_err: + free_eq_pages(eq); + return err; +} + +/** + * remove_eq - remove eq + * @eq: the event queue + **/ +static void remove_eq(struct hifc_eq *eq) +{ + struct irq_info *entry = &eq->eq_irq; + + hifc_set_msix_state(eq->hwdev, entry->msix_entry_idx, + HIFC_MSIX_DISABLE); + synchronize_irq(entry->irq_id); + + free_irq(entry->irq_id, eq); + + if (eq->type == HIFC_AEQ) { + struct hifc_eq_work *aeq_work = &eq->aeq_work; + + cancel_work_sync(&aeq_work->work); + + /* clear eq_len to avoid hw access host memory */ + hifc_hwif_write_reg(eq->hwdev->hwif, + HIFC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0); + } else { + tasklet_kill(&eq->ceq_tasklet); + + set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0); + } + + /* update cons_idx to avoid invalid interrupt */ + eq->cons_idx = hifc_hwif_read_reg(eq->hwdev->hwif, + EQ_PROD_IDX_REG_ADDR(eq)); + set_eq_cons_idx(eq, HIFC_EQ_NOT_ARMED); + + free_eq_pages(eq); +} + +/** + * hifc_aeqs_init - init all the aeqs + * @hwdev: the pointer to hw device + * @num_ceqs: number of AEQs + * @msix_entries: msix entries associated with the event queues + * Return: 0 - Success, Negative - failure + **/ +int hifc_aeqs_init(struct hifc_hwdev *hwdev, u16 num_aeqs, + struct irq_info *msix_entries) +{ + struct hifc_aeqs *aeqs; + int err; + u16 i, q_id; + + aeqs = kzalloc(sizeof(*aeqs), GFP_KERNEL); + if (!aeqs) + return -ENOMEM; + + hwdev->aeqs = aeqs; + aeqs->hwdev = hwdev; + aeqs->num_aeqs = num_aeqs; + + aeqs->workq = create_singlethread_workqueue(HIFC_EQS_WQ_NAME); + if (!aeqs->workq) { + sdk_err(hwdev->dev_hdl, "Failed to initialize aeq workqueue\n"); + err = -ENOMEM; + goto create_work_err; + } + + if (g_aeq_len < HIFC_MIN_AEQ_LEN || g_aeq_len > HIFC_MAX_AEQ_LEN) { + sdk_warn(hwdev->dev_hdl, "Module Parameter g_aeq_len value %d out of range, resetting to %d\n", + g_aeq_len, HIFC_DEFAULT_AEQ_LEN); + g_aeq_len = HIFC_DEFAULT_AEQ_LEN; + } + + for (q_id = 0; q_id < num_aeqs; q_id++) { + err = init_eq(&aeqs->aeq[q_id], hwdev, q_id, g_aeq_len, + HIFC_AEQ, &msix_entries[q_id]); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init aeq %d\n", + q_id); + goto init_aeq_err; + } + } + + return 0; + +init_aeq_err: + for (i = 0; i < q_id; i++) + remove_eq(&aeqs->aeq[i]); + + destroy_workqueue(aeqs->workq); + +create_work_err: + kfree(aeqs); + + return err; +} + +/** + * hifc_aeqs_free - free all the aeqs + * @hwdev: the pointer to hw device + **/ +void hifc_aeqs_free(struct hifc_hwdev *hwdev) +{ + struct hifc_aeqs *aeqs = hwdev->aeqs; + enum hifc_aeq_type aeq_event = HIFC_HW_INTER_INT; + enum hifc_aeq_sw_type sw_aeq_event = HIFC_STATELESS_EVENT; + u16 q_id; + + for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) + remove_eq(&aeqs->aeq[q_id]); + + for (; sw_aeq_event < HIFC_MAX_AEQ_SW_EVENTS; sw_aeq_event++) + hifc_aeq_unregister_swe_cb(hwdev, sw_aeq_event); + + for (; aeq_event < HIFC_MAX_AEQ_EVENTS; aeq_event++) + hifc_aeq_unregister_hw_cb(hwdev, aeq_event); + + destroy_workqueue(aeqs->workq); + + kfree(aeqs); +} + +/** + * hifc_ceqs_init - init all the ceqs + * @hwdev: the pointer to hw device + * @num_ceqs: number of CEQs + * @msix_entries: msix entries associated with the event queues + * Return: 0 - Success, Negative - failure + **/ +int hifc_ceqs_init(struct hifc_hwdev *hwdev, u16 num_ceqs, + struct irq_info *msix_entries) +{ + struct hifc_ceqs *ceqs; + int err; + u16 i, q_id; + + ceqs = kzalloc(sizeof(*ceqs), GFP_KERNEL); + if (!ceqs) + return -ENOMEM; + + hwdev->ceqs = ceqs; + + ceqs->hwdev = hwdev; + ceqs->num_ceqs = num_ceqs; + + if (g_ceq_len < HIFC_MIN_CEQ_LEN || g_ceq_len > HIFC_MAX_CEQ_LEN) { + sdk_warn(hwdev->dev_hdl, "Module Parameter g_ceq_len value %d out of range, resetting to %d\n", + g_ceq_len, HIFC_DEFAULT_CEQ_LEN); + g_ceq_len = HIFC_DEFAULT_CEQ_LEN; + } + + if (!g_num_ceqe_in_tasklet) { + sdk_warn(hwdev->dev_hdl, "Module Parameter g_num_ceqe_in_tasklet can not be zero, resetting to %d\n", + HIFC_TASK_PROCESS_EQE_LIMIT); + g_num_ceqe_in_tasklet = HIFC_TASK_PROCESS_EQE_LIMIT; + } + + for (q_id = 0; q_id < num_ceqs; q_id++) { + err = init_eq(&ceqs->ceq[q_id], hwdev, q_id, g_ceq_len, + HIFC_CEQ, &msix_entries[q_id]); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init ceq %d\n", + q_id); + goto init_ceq_err; + } + } + + return 0; + +init_ceq_err: + for (i = 0; i < q_id; i++) + remove_eq(&ceqs->ceq[i]); + + kfree(ceqs); + + return err; +} + +/** + * hifc_ceqs_free - free all the ceqs + * @hwdev: the pointer to hw device + **/ +void hifc_ceqs_free(struct hifc_hwdev *hwdev) +{ + struct hifc_ceqs *ceqs = hwdev->ceqs; + enum hifc_ceq_event ceq_event = HIFC_CMDQ; + u16 q_id; + + for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) + remove_eq(&ceqs->ceq[q_id]); + + for (; ceq_event < HIFC_MAX_CEQ_EVENTS; ceq_event++) + hifc_ceq_unregister_cb(hwdev, ceq_event); + + kfree(ceqs); +} + +void hifc_get_ceq_irqs(struct hifc_hwdev *hwdev, struct irq_info *irqs, + u16 *num_irqs) +{ + struct hifc_ceqs *ceqs = hwdev->ceqs; + u16 q_id; + + for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) { + irqs[q_id].irq_id = ceqs->ceq[q_id].eq_irq.irq_id; + irqs[q_id].msix_entry_idx = + ceqs->ceq[q_id].eq_irq.msix_entry_idx; + } + + *num_irqs = ceqs->num_ceqs; +} + +void hifc_get_aeq_irqs(struct hifc_hwdev *hwdev, struct irq_info *irqs, + u16 *num_irqs) +{ + struct hifc_aeqs *aeqs = hwdev->aeqs; + u16 q_id; + + for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) { + irqs[q_id].irq_id = aeqs->aeq[q_id].eq_irq.irq_id; + irqs[q_id].msix_entry_idx = + aeqs->aeq[q_id].eq_irq.msix_entry_idx; + } + + *num_irqs = aeqs->num_aeqs; +} + +void hifc_dump_aeq_info(struct hifc_hwdev *hwdev) +{ + struct hifc_aeq_elem *aeqe_pos; + struct hifc_eq *eq; + u32 addr, ci, pi; + int q_id; + + for (q_id = 0; q_id < hwdev->aeqs->num_aeqs; q_id++) { + eq = &hwdev->aeqs->aeq[q_id]; + addr = EQ_CONS_IDX_REG_ADDR(eq); + ci = hifc_hwif_read_reg(hwdev->hwif, addr); + addr = EQ_PROD_IDX_REG_ADDR(eq); + pi = hifc_hwif_read_reg(hwdev->hwif, addr); + aeqe_pos = GET_CURR_AEQ_ELEM(eq); + sdk_err(hwdev->dev_hdl, "Aeq id: %d, ci: 0x%08x, pi: 0x%x, work_state: 0x%x, wrap: %d, desc: 0x%x\n", + q_id, ci, pi, work_busy(&eq->aeq_work.work), + eq->wrapped, be32_to_cpu(aeqe_pos->desc)); + } +} + diff --git a/drivers/scsi/huawei/hifc/hifc_eqs.h b/drivers/scsi/huawei/hifc/hifc_eqs.h new file mode 100644 index 0000000000000000000000000000000000000000..2dcfc432c8f29e8b3ae5d42e004548f000eaf289 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_eqs.h @@ -0,0 +1,233 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HIFC_EQS_H +#define HIFC_EQS_H + +#define HIFC_MAX_AEQS 3 +#define HIFC_MAX_CEQS 32 + +#define HIFC_EQ_MAX_PAGES 8 + +#define HIFC_AEQE_SIZE 64 +#define HIFC_CEQE_SIZE 4 + +#define HIFC_AEQE_DESC_SIZE 4 +#define HIFC_AEQE_DATA_SIZE \ + (HIFC_AEQE_SIZE - HIFC_AEQE_DESC_SIZE) + +#define HIFC_DEFAULT_AEQ_LEN 4096 +#define HIFC_DEFAULT_CEQ_LEN 8192 + +#define HIFC_MIN_AEQ_LEN 64 +#define HIFC_MAX_AEQ_LEN (512 * 1024) +#define HIFC_MIN_CEQ_LEN 64 +#define HIFC_MAX_CEQ_LEN (1024 * 1024) + +#define HIFC_CEQ_ID_CMDQ 0 +#define EQ_IRQ_NAME_LEN 64 + +/* EQ registers */ +#define HIFC_AEQ_MTT_OFF_BASE_ADDR 0x200 +#define HIFC_CEQ_MTT_OFF_BASE_ADDR 0x400 + +#define HIFC_EQ_MTT_OFF_STRIDE 0x40 + +#define HIFC_CSR_AEQ_MTT_OFF(id) \ + (HIFC_AEQ_MTT_OFF_BASE_ADDR + (id) * HIFC_EQ_MTT_OFF_STRIDE) + +#define HIFC_CSR_CEQ_MTT_OFF(id) \ + (HIFC_CEQ_MTT_OFF_BASE_ADDR + (id) * HIFC_EQ_MTT_OFF_STRIDE) + +#define HIFC_CSR_EQ_PAGE_OFF_STRIDE 8 + +#define HIFC_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \ + (HIFC_CSR_AEQ_MTT_OFF(q_id) + \ + (pg_num) * HIFC_CSR_EQ_PAGE_OFF_STRIDE) + +#define HIFC_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \ + (HIFC_CSR_AEQ_MTT_OFF(q_id) + \ + (pg_num) * HIFC_CSR_EQ_PAGE_OFF_STRIDE + 4) + +#define HIFC_CEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \ + (HIFC_CSR_CEQ_MTT_OFF(q_id) + \ + (pg_num) * HIFC_CSR_EQ_PAGE_OFF_STRIDE) + +#define HIFC_CEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \ + (HIFC_CSR_CEQ_MTT_OFF(q_id) + \ + (pg_num) * HIFC_CSR_EQ_PAGE_OFF_STRIDE + 4) + +#define HIFC_EQ_HI_PHYS_ADDR_REG(type, q_id, pg_num) \ + ((u32)((type == HIFC_AEQ) ? \ + HIFC_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num) : \ + HIFC_CEQ_HI_PHYS_ADDR_REG(q_id, pg_num))) + +#define HIFC_EQ_LO_PHYS_ADDR_REG(type, q_id, pg_num) \ + ((u32)((type == HIFC_AEQ) ? \ + HIFC_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num) : \ + HIFC_CEQ_LO_PHYS_ADDR_REG(q_id, pg_num))) + +#define HIFC_AEQ_CTRL_0_ADDR_BASE 0xE00 +#define HIFC_AEQ_CTRL_1_ADDR_BASE 0xE04 +#define HIFC_AEQ_CONS_IDX_0_ADDR_BASE 0xE08 +#define HIFC_AEQ_CONS_IDX_1_ADDR_BASE 0xE0C + +#define HIFC_EQ_OFF_STRIDE 0x80 + +#define HIFC_CSR_AEQ_CTRL_0_ADDR(idx) \ + (HIFC_AEQ_CTRL_0_ADDR_BASE + (idx) * HIFC_EQ_OFF_STRIDE) + +#define HIFC_CSR_AEQ_CTRL_1_ADDR(idx) \ + (HIFC_AEQ_CTRL_1_ADDR_BASE + (idx) * HIFC_EQ_OFF_STRIDE) + +#define HIFC_CSR_AEQ_CONS_IDX_ADDR(idx) \ + (HIFC_AEQ_CONS_IDX_0_ADDR_BASE + (idx) * HIFC_EQ_OFF_STRIDE) + +#define HIFC_CSR_AEQ_PROD_IDX_ADDR(idx) \ + (HIFC_AEQ_CONS_IDX_1_ADDR_BASE + (idx) * HIFC_EQ_OFF_STRIDE) + +#define HIFC_CEQ_CTRL_0_ADDR_BASE 0x1000 +#define HIFC_CEQ_CTRL_1_ADDR_BASE 0x1004 +#define HIFC_CEQ_CONS_IDX_0_ADDR_BASE 0x1008 +#define HIFC_CEQ_CONS_IDX_1_ADDR_BASE 0x100C + +#define HIFC_CSR_CEQ_CTRL_0_ADDR(idx) \ + (HIFC_CEQ_CTRL_0_ADDR_BASE + (idx) * HIFC_EQ_OFF_STRIDE) + +#define HIFC_CSR_CEQ_CTRL_1_ADDR(idx) \ + (HIFC_CEQ_CTRL_1_ADDR_BASE + (idx) * HIFC_EQ_OFF_STRIDE) + +#define HIFC_CSR_CEQ_CONS_IDX_ADDR(idx) \ + (HIFC_CEQ_CONS_IDX_0_ADDR_BASE + (idx) * HIFC_EQ_OFF_STRIDE) + +#define HIFC_CSR_CEQ_PROD_IDX_ADDR(idx) \ + (HIFC_CEQ_CONS_IDX_1_ADDR_BASE + (idx) * HIFC_EQ_OFF_STRIDE) + +enum hifc_eq_type { + HIFC_AEQ, + HIFC_CEQ +}; + +enum hifc_eq_intr_mode { + HIFC_INTR_MODE_ARMED, + HIFC_INTR_MODE_ALWAYS, +}; + +enum hifc_eq_ci_arm_state { + HIFC_EQ_NOT_ARMED, + HIFC_EQ_ARMED, +}; + +struct hifc_eq_work { + struct work_struct work; + void *data; +}; + +struct hifc_ceq_tasklet_data { + void *data; +}; + +struct hifc_eq { + struct hifc_hwdev *hwdev; + u16 q_id; + enum hifc_eq_type type; + u32 page_size; + u32 orig_page_size; + u32 eq_len; + + u32 cons_idx; + u16 wrapped; + + u16 elem_size; + u16 num_pages; + u32 num_elem_in_pg; + + struct irq_info eq_irq; + char irq_name[EQ_IRQ_NAME_LEN]; + + dma_addr_t *dma_addr; + u8 **virt_addr; + dma_addr_t *dma_addr_for_free; + u8 **virt_addr_for_free; + + struct hifc_eq_work aeq_work; + struct tasklet_struct ceq_tasklet; + struct hifc_ceq_tasklet_data ceq_tasklet_data; + + u64 hard_intr_jif; + u64 soft_intr_jif; +}; + +struct hifc_aeq_elem { + u8 aeqe_data[HIFC_AEQE_DATA_SIZE]; + u32 desc; +}; + +enum hifc_aeq_cb_state { + HIFC_AEQ_HW_CB_REG = 0, + HIFC_AEQ_HW_CB_RUNNING, + HIFC_AEQ_SW_CB_REG, + HIFC_AEQ_SW_CB_RUNNING, +}; + +struct hifc_aeqs { + struct hifc_hwdev *hwdev; + + hifc_aeq_hwe_cb aeq_hwe_cb[HIFC_MAX_AEQ_EVENTS]; + hifc_aeq_swe_cb aeq_swe_cb[HIFC_MAX_AEQ_SW_EVENTS]; + unsigned long aeq_hw_cb_state[HIFC_MAX_AEQ_EVENTS]; + unsigned long aeq_sw_cb_state[HIFC_MAX_AEQ_SW_EVENTS]; + + struct hifc_eq aeq[HIFC_MAX_AEQS]; + u16 num_aeqs; + + struct workqueue_struct *workq; +}; + +enum hifc_ceq_cb_state { + HIFC_CEQ_CB_REG = 0, + HIFC_CEQ_CB_RUNNING, +}; + +struct hifc_ceqs { + struct hifc_hwdev *hwdev; + + hifc_ceq_event_cb ceq_cb[HIFC_MAX_CEQ_EVENTS]; + void *ceq_data[HIFC_MAX_CEQ_EVENTS]; + unsigned long ceq_cb_state[HIFC_MAX_CEQ_EVENTS]; + + struct hifc_eq ceq[HIFC_MAX_CEQS]; + u16 num_ceqs; +}; + +int hifc_aeqs_init(struct hifc_hwdev *hwdev, u16 num_aeqs, + struct irq_info *msix_entries); + +void hifc_aeqs_free(struct hifc_hwdev *hwdev); + +int hifc_ceqs_init(struct hifc_hwdev *hwdev, u16 num_ceqs, + struct irq_info *msix_entries); + +void hifc_ceqs_free(struct hifc_hwdev *hwdev); + +void hifc_get_ceq_irqs(struct hifc_hwdev *hwdev, struct irq_info *irqs, + u16 *num_irqs); + +void hifc_get_aeq_irqs(struct hifc_hwdev *hwdev, struct irq_info *irqs, + u16 *num_irqs); + +void hifc_dump_aeq_info(struct hifc_hwdev *hwdev); + +#endif diff --git a/drivers/scsi/huawei/hifc/hifc_hba.c b/drivers/scsi/huawei/hifc/hifc_hba.c new file mode 100644 index 0000000000000000000000000000000000000000..bab735916b8e869c683adc761cd3528d2f78265a --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_hba.c @@ -0,0 +1,1627 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#include "hifc_module.h" +#include "hifc_chipitf.h" +#include "hifc_io.h" +#include "hifc_portmng.h" +#include "hifc_lld.h" +#include "hifc_cqm_object.h" +#include "hifc_cqm_main.h" +#include "hifc_mgmt.h" +#include "hifc_hba.h" + +struct hifc_hba_s *hifc_hba[HIFC_HBA_PORT_MAX_NUM]; +unsigned long probe_bit_map[HIFC_MAX_PROBE_PORT_NUM / HIFC_PORT_NUM_PER_TABLE]; +static unsigned long card_num_bit_map[HIFC_MAX_PROBE_PORT_NUM / + HIFC_PORT_NUM_PER_TABLE]; +static struct hifc_card_num_manage_s card_num_manage[HIFC_MAX_CARD_NUM]; +/* probe global lock */ +spinlock_t probe_spin_lock; +unsigned int max_parent_qpc_num; + +static unsigned int hifc_port_config_set(void *v_hba, + enum unf_port_config_set_op_e op_code, + void *v_var_in); +static unsigned int hifc_port_config_get(void *v_hba, + enum unf_port_config_get_op_e op_code, + void *param_out); +static unsigned int hifc_sfp_switch(void *v_hba, void *v_para_in); +static unsigned int hifc_get_hba_pcie_link_state(void *v_hba, + void *v_link_state); + +struct service_register_template_s service_cqm_temp = { + .scq_ctx_size = HIFC_SCQ_CNTX_SIZE, + /* srq, scq context_size configuration */ + .srq_ctx_size = HIFC_SRQ_CNTX_SIZE, + /* the API of asynchronous event from TILE to driver */ + .aeq_callback = hifc_process_aeqe, +}; + +/* default configuration: auto speed, auto topology, INI+TGT */ +static struct unf_cfg_item_s hifc_port_cfg_parm[] = { + { "port_id", 0, 0x110000, 0xffffff}, + /* port mode:INI(0x20), TGT(0x10), BOTH(0x30) */ + { "port_mode", 0, 0x20, 0xff}, + /* port topology, 0x3: loop, 0xc:p2p, 0xf:auto ,0x10:vn2vn */ + { "port_topology", 0, 0xf, 0x20}, + /* alpa address of port */ + { "port_alpa", 0, 0xdead, 0xffff}, + /* queue depth of originator registered to SCSI midlayer */ + { "max_queue_depth", 0, 512, 512}, + { "sest_num", 0, 4096, 4096}, + { "max_login", 0, 2048, 2048}, + /* nodename from 32 bit to 64 bit */ + { "node_name_high", 0, 0x1000286e, 0xffffffff}, + /* nodename from 0 bit to 31 bit */ + { "node_name_low", 0, 0xd4bbf12f, 0xffffffff}, + /* portname from 32 bit to 64 bit */ + { "port_name_high", 0, 0x2000286e, 0xffffffff}, + /* portname from 0 bit to 31 bit */ + { "port_name_low", 0, 0xd4bbf12f, 0xffffffff}, + /* port speed 0:auto 1:1Gbps 2:2Gbps 3:4Gbps 4:8Gbps 5:16Gbps */ + { "port_speed", 0, 0, 32}, + /* unit: us */ + { "interrupt_delay", 0, 0, 100}, + { "tape_support", 0, 0, 1}, /* tape support */ + { "End", 0, 0, 0} +}; + +struct unf_low_level_function_op_s hifc_fun_op = { + .low_level_type = UNF_HIFC_FC, + .name = "HIFC", + /* XID allocated from CM level */ + .xchg_mgr_type = UNF_LOW_LEVEL_MGR_TYPE_PASSTIVE, + .abts_xchg = UNF_NO_EXTRA_ABTS_XCHG, + .pass_through_flag = UNF_LOW_LEVEL_PASS_THROUGH_PORT_LOGIN, + .support_max_npiv_num = UNF_HIFC_MAXNPIV_NUM, + .chip_id = 0, + .support_max_speed = UNF_PORT_SPEED_32_G, + .support_max_rport = UNF_HIFC_MAXRPORT_NUM, + .sfp_type = UNF_PORT_TYPE_FC_SFP, + .rport_release_type = UNF_LOW_LEVEL_RELEASE_RPORT_ASYNC, + .sirt_page_mode = UNF_LOW_LEVEL_SIRT_PAGE_MODE_XCHG, + + /* Link service */ + .service_op = { + .pfn_unf_els_send = hifc_send_els_cmnd, + .pfn_unf_bls_send = hifc_send_bls_cmnd, + .pfn_unf_gs_send = hifc_send_gs_cmnd, + .pfn_unf_cmnd_send = hifc_send_scsi_cmnd, + .pfn_unf_release_rport_res = hifc_free_parent_resource, + .pfn_unf_flush_ini_resp_que = hifc_flush_ini_resp_queue, + .pfn_unf_alloc_rport_res = hifc_alloc_parent_resource, + .pfn_unf_rport_session_rst = hifc_rport_session_rst, + }, + + /* Port Mgr */ + .port_mgr_op = { + .pfn_ll_port_config_set = hifc_port_config_set, + .pfn_ll_port_config_get = hifc_port_config_get, + .pfn_ll_port_diagnose = hifc_port_diagnose, + } +}; + +struct hifc_port_config_op_s { + enum unf_port_config_set_op_e op_code; + unsigned int (*pfn_hifc_operation)(void *v_hba, void *v_para_in); +}; + +struct hifc_port_config_op_s hifc_config_set_op[] = { + { UNF_PORT_CFG_SET_SPEED, hifc_set_port_speed }, + { UNF_PORT_CFG_SET_TOPO, hifc_set_port_topo }, + { UNF_PORT_CFG_SET_BBSCN, hifc_set_port_bbscn }, + { UNF_PORT_CFG_SET_SFP_SWITCH, hifc_sfp_switch }, + { UNF_PORT_CFG_SET_PORT_SWITCH, hifc_sfp_switch }, + { UNF_PORT_CFG_SET_PORT_STATE, hifc_set_port_state }, + { UNF_PORT_CFG_UPDATE_WWN, NULL }, + { UNF_PORT_CFG_SET_FCP_CONF, hifc_set_port_fcp_conf }, + { UNF_PORT_CFG_SET_LOOP_ROLE, hifc_set_loop_role }, + { UNF_PORT_CFG_SET_MAX_SUPPORT_SPEED, hifc_set_max_support_speed }, + { UNF_PORT_CFG_UPDATE_FABRIC_PARAM, hifc_update_fabric_param }, + { UNF_PORT_CFG_UPDATE_PLOGI_PARAM, hifc_update_port_param }, + { UNF_PORT_CFG_UPDATE_FDISC_PARAM, NULL }, + { UNF_PORT_CFG_SAVE_HBA_INFO, hifc_save_hba_info }, + { UNF_PORT_CFG_SET_HBA_BASE_INFO, hifc_set_hba_base_info }, + { UNF_PORT_CFG_SET_FLASH_DATA_INFO, hifc_set_flash_data }, + { UNF_PORT_CFG_SET_BUTT, NULL } +}; + +struct hifc_port_cfg_get_op_s { + enum unf_port_config_get_op_e op_code; + unsigned int (*pfn_hifc_operation)(void *v_hba, void *param_out); +}; + +struct hifc_port_cfg_get_op_s hifc_config_get_op[] = { + { UNF_PORT_CFG_GET_SPEED_CFG, hifc_get_speed_cfg }, + { UNF_PORT_CFG_GET_SPEED_ACT, hifc_get_speed_act }, + { UNF_PORT_CFG_GET_TOPO_CFG, hifc_get_topo_cfg }, + { UNF_PORT_CFG_GET_TOPO_ACT, hifc_get_topo_act }, + { UNF_PORT_CFG_GET_LOOP_MAP, hifc_get_loop_map }, + { UNF_PORT_CFG_GET_SFP_PRESENT, NULL }, + { UNF_PORT_CFG_GET_SFP_INFO, hifc_get_sfp_info }, + { UNF_PORT_CFG_GET_FW_VER, hifc_get_firmware_version }, + { UNF_PORT_CFG_GET_HW_VER, hifc_get_hardware_version }, + { UNF_PORT_CFG_GET_WORKBALE_BBCREDIT, hifc_get_work_bale_bbcredit }, + { UNF_PORT_CFG_GET_WORKBALE_BBSCN, hifc_get_work_bale_bbscn }, + { UNF_PORT_CFG_GET_LOOP_ALPA, hifc_get_loop_alpa }, + { UNF_PORT_CFG_GET_MAC_ADDR, hifc_get_chip_msg }, + { UNF_PORT_CFG_CLR_LESB, hifc_clear_port_error_code }, + { UNF_PORT_CFG_GET_LESB_THEN_CLR, hifc_get_and_clear_port_error_code}, + { UNF_PORT_CFG_GET_PORT_INFO, hifc_get_port_current_info }, + { UNF_PORT_CFG_GET_LED_STATE, hifc_get_lport_led }, + { UNF_PORT_CFG_GET_FEC, hifc_get_port_fec }, + { UNF_PORT_CFG_GET_PCIE_LINK_STATE, hifc_get_hba_pcie_link_state }, + { UNF_PORT_CFG_GET_FLASH_DATA_INFO, hifc_get_flash_data }, + { UNF_PORT_CFG_GET_BUTT, NULL } +}; + +static unsigned int hifc_port_config_set(void *v_phba, + enum unf_port_config_set_op_e op_code, + void *v_var_in) +{ + unsigned int op_idx = 0; + + HIFC_CHECK(INVALID_VALUE32, v_phba, return UNF_RETURN_ERROR); + + for (op_idx = 0; + op_idx < sizeof(hifc_config_set_op) / + sizeof(struct hifc_port_config_op_s); + op_idx++) { + if (op_code == hifc_config_set_op[op_idx].op_code) { + if (!hifc_config_set_op[op_idx].pfn_hifc_operation) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Null operation for configuration, opcode(0x%x), operation ID(0x%x)", + op_code, op_idx); + return UNF_RETURN_ERROR; + } else { + return hifc_config_set_op[op_idx].pfn_hifc_operation(v_phba, v_var_in); + } + } + } + + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]No operation code for configuration, opcode(0x%x)", + op_code); + + return UNF_RETURN_ERROR; +} + +static unsigned int hifc_port_config_get(void *v_phba, + enum unf_port_config_get_op_e op_code, + void *v_para_out) +{ + unsigned int op_idx = 0; + + HIFC_CHECK(INVALID_VALUE32, v_phba, return UNF_RETURN_ERROR); + + for (op_idx = 0; + op_idx < sizeof(hifc_config_get_op) / + sizeof(struct hifc_port_cfg_get_op_s); + op_idx++) { + if (op_code == hifc_config_get_op[op_idx].op_code) { + if (!hifc_config_get_op[op_idx].pfn_hifc_operation) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Null operation to get configuration, opcode(0x%x), operation ID(0x%x)", + op_code, op_idx); + return UNF_RETURN_ERROR; + } else { + return hifc_config_get_op[op_idx].pfn_hifc_operation(v_phba, v_para_out); + } + } + } + + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]No operation to get configuration, opcode(0x%x)", + op_code); + + return UNF_RETURN_ERROR; +} + +static unsigned int hifc_check_port_cfg( + const struct hifc_port_cfg_s *v_port_cfg) +{ + int topo_condition, speed_condition; + /* About Work Topology */ + topo_condition = ((v_port_cfg->port_topology != UNF_TOP_LOOP_MASK) && + (v_port_cfg->port_topology != UNF_TOP_P2P_MASK) && + (v_port_cfg->port_topology != UNF_TOP_AUTO_MASK)); + if (topo_condition) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Configured port topology(0x%x) is incorrect", + v_port_cfg->port_topology); + return UNF_RETURN_ERROR; + } + + /* About Work Mode */ + if (v_port_cfg->port_mode != UNF_PORT_MODE_INI) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Configured port mode(0x%x) is incorrect", + v_port_cfg->port_mode); + + return UNF_RETURN_ERROR; + } + + /* About Work Speed */ + speed_condition = ((v_port_cfg->port_speed != UNF_PORT_SPEED_AUTO) && + (v_port_cfg->port_speed != UNF_PORT_SPEED_2_G) && + (v_port_cfg->port_speed != UNF_PORT_SPEED_4_G) && + (v_port_cfg->port_speed != UNF_PORT_SPEED_8_G) && + (v_port_cfg->port_speed != UNF_PORT_SPEED_16_G) && + (v_port_cfg->port_speed != UNF_PORT_SPEED_32_G)); + if (speed_condition) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Configured port speed(0x%x) is incorrect", + v_port_cfg->port_speed); + return UNF_RETURN_ERROR; + } + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Check port configuration OK"); + + return RETURN_OK; +} + +static unsigned int hifc_get_port_cfg(struct hifc_hba_s *v_hba, + struct hifc_chip_info_s *v_chip_info, + unsigned char v_card_num) +{ +#define UNF_CONFIG_ITEM_LEN 15 + + /* + * Maximum length of a configuration item name, including the end + * character + */ +#define UNF_MAX_ITEM_NAME_LEN (32 + 1) + + /* Get and check parameters */ + char cfg_item[UNF_MAX_ITEM_NAME_LEN]; + unsigned int ret = UNF_RETURN_ERROR; + struct hifc_hba_s *hba = v_hba; + int iret = RETURN_ERROR_S32; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + memset((void *)cfg_item, 0, sizeof(cfg_item)); + + hba->card_info.func_num = + (hifc_global_func_id(v_hba->hw_dev_handle)) & UNF_FUN_ID_MASK; + hba->card_info.card_num = v_card_num; + + /* The range of PF of FC server is from PF1 to PF2 */ + iret = snprintf(cfg_item, UNF_CONFIG_ITEM_LEN, "hifc_cfg_%1u", + (hba->card_info.func_num)); + UNF_FUNCTION_RETURN_CHECK(iret, UNF_CONFIG_ITEM_LEN); + cfg_item[UNF_MAX_ITEM_NAME_LEN - 1] = 0; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Get port configuration: %s", cfg_item); + + /* Get configuration parameters from file */ + UNF_LOWLEVEL_GET_CFG_PARMS(ret, cfg_item, &hifc_port_cfg_parm[0], + (unsigned int *)(void *)&hba->port_cfg, + sizeof(hifc_port_cfg_parm) / + sizeof(struct unf_cfg_item_s)); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) can't get configuration", + hba->port_cfg.port_id); + + return ret; + } + + if (max_parent_qpc_num <= 2048) { + hba->port_cfg.sest_num = 2048; + hba->port_cfg.max_login = 2048; + } + + hba->port_cfg.port_id &= 0xff0000; + hba->port_cfg.port_id |= hba->card_info.card_num << 8; + hba->port_cfg.port_id |= hba->card_info.func_num; + hba->port_cfg.tape_support = (unsigned int)v_chip_info->tape_support; + + /* Parameters check */ + ret = hifc_check_port_cfg(&hba->port_cfg); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) check configuration incorrect", + hba->port_cfg.port_id); + + return ret; + } + + /* Set configuration which is got from file */ + hba->port_speed_cfg = hba->port_cfg.port_speed; + hba->port_topo_cfg = hba->port_cfg.port_topology; + + return ret; +} + +void hifc_flush_root_ctx(struct hifc_hba_s *v_hba) +{ + int ret = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return); + + ret = hifc_func_rx_tx_flush(v_hba->hw_dev_handle); + if (ret) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]chipif_func_rx_tx_flush failed with return value(0x%x)", + ret); + } +} + +static unsigned int hifc_delete_srqc_via_cmdq_sync(struct hifc_hba_s *v_hba, + unsigned long long sqrc_gpa) +{ + /* Via CMND Queue */ +#define HIFC_DEL_SRQC_TIMEOUT 3000 + + int ret; + struct hifcoe_cmdqe_delete_srqc_s del_srqc_cmd; + struct hifc_cmd_buf *cmdq_in_buf; + + /* Alloc Cmnd buffer */ + cmdq_in_buf = hifc_alloc_cmd_buf(v_hba->hw_dev_handle); + if (!cmdq_in_buf) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]cmdq in_cmd_buf allocate failed"); + + HIFC_ERR_IO_STAT(v_hba, HIFCOE_TASK_T_DEL_SRQC); + return UNF_RETURN_ERROR; + } + + /* Build & Send Cmnd */ + memset(&del_srqc_cmd, 0, sizeof(del_srqc_cmd)); + del_srqc_cmd.wd0.task_type = HIFCOE_TASK_T_DEL_SRQC; + del_srqc_cmd.srqc_gpa_h = HIFC_HIGH_32_BITS(sqrc_gpa); + del_srqc_cmd.srqc_gpa_l = HIFC_LOW_32_BITS(sqrc_gpa); + hifc_cpu_to_big32(&del_srqc_cmd, sizeof(del_srqc_cmd)); + memcpy(cmdq_in_buf->buf, &del_srqc_cmd, sizeof(del_srqc_cmd)); + cmdq_in_buf->size = sizeof(del_srqc_cmd); + + ret = hifc_cmdq_detail_resp(v_hba->hw_dev_handle, HIFC_ACK_TYPE_CMDQ, + HIFC_MOD_FCOE, 0, + cmdq_in_buf, NULL, HIFC_DEL_SRQC_TIMEOUT); + + /* Free Cmnd Buffer */ + hifc_free_cmd_buf(v_hba->hw_dev_handle, cmdq_in_buf); + + if (ret) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Send del srqc via cmdq failed, ret=0x%x", ret); + + HIFC_ERR_IO_STAT(v_hba, HIFCOE_TASK_T_DEL_SRQC); + return UNF_RETURN_ERROR; + } + + HIFC_IO_STAT(v_hba, HIFCOE_TASK_T_DEL_SRQC); + + return RETURN_OK; +} + +void hifc_flush_srq_ctx(struct hifc_hba_s *v_hba) +{ + struct hifc_srq_info_s *srq_info = NULL; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Start destroy ELS SRQC"); + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return); + + /* Check state to avoid to flush SRQC again */ + srq_info = &v_hba->els_srq_info; + if (srq_info->srq_type == HIFC_SRQ_ELS && + srq_info->enable == UNF_TRUE) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[event]HBA(0x%x) flush ELS SRQC", + v_hba->port_index); + + (void)hifc_delete_srqc_via_cmdq_sync( + v_hba, + srq_info->cqm_srq_info->q_ctx_paddr); + } +} + +static unsigned int hifc_create_queues(struct hifc_hba_s *v_hba) +{ + unsigned int ret = UNF_RETURN_ERROR; + + ret = hifc_create_root_queues(v_hba); + if (ret != RETURN_OK) + goto out_creat_root_queue_fail; + + /* Initialize shared resources of SCQ and SRQ in parent queue */ + ret = hifc_create_common_share_queues(v_hba); + if (ret != RETURN_OK) + goto out_create_common_queue_fail; + + /* Initialize parent queue manager resources */ + ret = hifc_alloc_parent_queue_mgr(v_hba); + if (ret != RETURN_OK) + goto out_free_share_queue_resource; + + /* Initialize shared WQE page pool in parent SQ */ + ret = hifc_alloc_parent_sq_wqe_page_pool(v_hba); + if (ret != RETURN_OK) + goto out_free_parent_queue_resource; + + /* + * Notice: the configuration of SQ and QID(default_sq_id) + * must be the same in FC + */ + v_hba->next_clearing_sq = 0; + v_hba->default_sq_id = HIFC_QID_SQ; + + return RETURN_OK; + +out_free_parent_queue_resource: + hifc_free_parent_queue_mgr(v_hba); + +out_free_share_queue_resource: + hifc_flush_scq_ctx(v_hba); + hifc_flush_srq_ctx(v_hba); + hifc_destroy_common_share_queues(v_hba); + +out_create_common_queue_fail: + hifc_destroy_root_queues(v_hba); + +out_creat_root_queue_fail: + hifc_flush_root_ctx(v_hba); + + return ret; +} + +static void hifc_destroy_queues(struct hifc_hba_s *v_hba) +{ + /* Free parent queue resource */ + hifc_free_parent_queues(v_hba); + + /* Free queue manager resource */ + hifc_free_parent_queue_mgr(v_hba); + + /* Free linked List SQ and WQE page pool resource */ + hifc_free_parent_sq_wqe_page_pool(v_hba); + + /* Free shared SRQ and SCQ queue resource */ + hifc_destroy_common_share_queues(v_hba); + + /* Free root queue resource */ + hifc_destroy_root_queues(v_hba); +} + +static unsigned int hifc_notify_up_open_timer(struct hifc_hba_s *v_hba) +{ + int op_code = UNF_TRUE; + unsigned int cmd_scq_bit_map = 0; + unsigned int scq_index = 0; + unsigned int ret; + + for (scq_index = 0; scq_index < HIFC_TOTAL_SCQ_NUM; scq_index++) + cmd_scq_bit_map |= HIFC_SCQ_IS_CMD(scq_index) ? + (1 << scq_index) : (0 << scq_index); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) open timer, cmdscq bitmap:0x%x", + v_hba->port_cfg.port_id, cmd_scq_bit_map); + + ret = hifc_notify_up_config_timer(v_hba, op_code, cmd_scq_bit_map); + + return ret; +} + +static unsigned int hifc_notify_up_close_timer(struct hifc_hba_s *v_hba) +{ + int op_code = UNF_FALSE; + unsigned int cmd_scq_bit_map = 0; + unsigned int scq_index = 0; + unsigned int ret; + + for (scq_index = 0; scq_index < HIFC_TOTAL_SCQ_NUM; scq_index++) + cmd_scq_bit_map |= HIFC_SCQ_IS_CMD(scq_index) ? + (1 << scq_index) : (0 << scq_index); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) close timer with cmd_scq bitmap(0x%x)", + v_hba->port_cfg.port_id, cmd_scq_bit_map); + + ret = hifc_notify_up_config_timer(v_hba, op_code, cmd_scq_bit_map); + + return ret; +} + +static unsigned int hifc_initial_chip_access(struct hifc_hba_s *v_hba) +{ + int ret = RETURN_OK; + + /* 1. + * Initialize cqm access related with scq, emb cq, aeq(ucode-->driver) + */ + service_cqm_temp.service_handle = v_hba; + ret = cqm_service_register(v_hba->hw_dev_handle, &service_cqm_temp); + if (ret != CQM_SUCCESS) + return UNF_RETURN_ERROR; + + /* 2. Initialize mailbox(driver-->up), aeq(up--->driver) access */ + ret = hifc_register_mgmt_msg_cb(v_hba->hw_dev_handle, + HIFC_MOD_FC, v_hba, + hifc_up_msg_2_driver_proc); + if (ret != CQM_SUCCESS) + goto out_unreg_cqm; + + return RETURN_OK; + +out_unreg_cqm: + cqm_service_unregister(v_hba->hw_dev_handle); + + return UNF_RETURN_ERROR; +} + +static void hifc_release_chip_access(struct hifc_hba_s *v_hba) +{ + HIFC_CHECK(INVALID_VALUE32, v_hba->hw_dev_handle, return); + + hifc_unregister_mgmt_msg_cb(v_hba->hw_dev_handle, HIFC_MOD_FC); + + cqm_service_unregister(v_hba->hw_dev_handle); +} + +static void hifc_get_chip_info(struct hifc_hba_s *v_hba) +{ + unsigned int exi_base = 0; + unsigned int fun_index = 0; + + v_hba->vpid_start = v_hba->fc_service_cap.dev_fc_cap.vp_id_start; + v_hba->vpid_end = v_hba->fc_service_cap.dev_fc_cap.vp_id_end; + fun_index = hifc_global_func_id(v_hba->hw_dev_handle); + exi_base = 0; + + exi_base += (fun_index * HIFC_EXIT_STRIDE); + v_hba->exit_base = HIFC_LSW(exi_base); + v_hba->exit_count = HIFC_EXIT_STRIDE; + v_hba->image_count = UNF_HIFC_MAXRPORT_NUM; + v_hba->max_support_speed = max_speed; + v_hba->port_index = HIFC_LSB(fun_index); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) base information: PortIndex=0x%x, ImgCount=0x%x, ExiBase=0x%x, ExiCount=0x%x, VpIdStart=0x%x, VpIdEnd=0x%x, MaxSpeed=0x%x, Speed=0x%x, Topo=0x%x", + v_hba->port_cfg.port_id, v_hba->port_index, + v_hba->image_count, v_hba->exit_base, + v_hba->exit_count, v_hba->vpid_start, + v_hba->vpid_end, v_hba->max_support_speed, + v_hba->port_speed_cfg, v_hba->port_topo_cfg); +} + +static unsigned int hifc_init_host_res(struct hifc_hba_s *v_hba) +{ + unsigned int ret = RETURN_OK; + struct hifc_hba_s *hba = v_hba; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + + /* Initialize spin lock */ + spin_lock_init(&hba->hba_lock); + spin_lock_init(&hba->flush_state_lock); + spin_lock_init(&hba->delay_info.srq_lock); + /* Initialize init_completion */ + init_completion(&hba->hba_init_complete); + init_completion(&hba->mbox_complete); + + /* Step-1: initialize the communication channel between driver and uP */ + ret = hifc_initial_chip_access(hba); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]HIFC port(0x%x) can't initialize chip access", + hba->port_cfg.port_id); + + goto out_unmap_memory; + } + /* Step-2: get chip configuration information before creating + * queue resources + */ + hifc_get_chip_info(hba); + + /* Step-3: create queue resources */ + ret = hifc_create_queues(hba); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]HIFC port(0x%x) can't create queues", + hba->port_cfg.port_id); + + goto out_release_chip_access; + } + + /* Initialize status parameters */ + hba->active_port_speed = UNF_PORT_SPEED_UNKNOWN; + hba->active_topo = UNF_ACT_TOP_UNKNOWN; + hba->sfp_on = UNF_FALSE; + hba->port_loop_role = UNF_LOOP_ROLE_MASTER_OR_SLAVE; + hba->phy_link = UNF_PORT_LINK_DOWN; + hba->q_set_stage = HIFC_QUEUE_SET_STAGE_INIT; + + /* Initialize parameters referring to the lowlevel */ + hba->remote_rttov_tag = 0; + hba->port_bbscn_cfg = HIFC_LOWLEVEL_DEFAULT_BB_SCN; + + /* Initialize timer, and the unit of E_D_TOV is ms */ + hba->remote_edtov_tag = 0; + hba->remote_bbcredit = 0; + hba->compared_bbscn = 0; + hba->compared_edtov_val = UNF_DEFAULT_EDTOV; + hba->compared_ratov_val = UNF_DEFAULT_RATOV; + hba->removing = UNF_FALSE; + hba->dev_present = UNF_TRUE; + + /* Initialize parameters about cos */ + hba->cos_bit_map = cos_bit_map; + memset(hba->cos_rport_cnt, 0, HIFC_MAX_COS_NUM * sizeof(atomic_t)); + + /* Mailbox access completion */ + complete(&hba->mbox_complete); + + /* Notify uP to open timer after creating scq */ + ret = hifc_notify_up_open_timer(hba); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]HIFC port(0x%x) can't open timer", + hba->port_cfg.port_id); + + goto out_destroy_queues; + } + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]HIFC port(0x%x) initialize host resources succeeded", + hba->port_cfg.port_id); + + return ret; + +out_destroy_queues: + hifc_flush_scq_ctx(hba); + hifc_flush_srq_ctx(hba); + hifc_flush_root_ctx(hba); + hifc_destroy_queues(hba); + +out_release_chip_access: + hifc_release_chip_access(hba); + +out_unmap_memory: + return ret; +} + +static void hifc_update_lport_config( + struct hifc_hba_s *v_hba, + struct unf_low_level_function_op_s *v_low_level_fun) +{ +#define HIFC_MULTI_CONF_NONSUPPORT 0 + + struct unf_lport_cfg_item_s *lport_cfg_items = NULL; + + lport_cfg_items = &v_low_level_fun->lport_cfg_items; + + if (v_hba->port_cfg.max_login < v_low_level_fun->support_max_rport) + lport_cfg_items->max_login = v_hba->port_cfg.max_login; + else + lport_cfg_items->max_login = v_low_level_fun->support_max_rport; + + if ((v_hba->port_cfg.sest_num / 2) < UNF_RESERVE_SFS_XCHG) + lport_cfg_items->max_io = v_hba->port_cfg.sest_num; + else + lport_cfg_items->max_io = v_hba->port_cfg.sest_num - + UNF_RESERVE_SFS_XCHG; + + lport_cfg_items->max_sfs_xchg = UNF_MAX_SFS_XCHG; + lport_cfg_items->port_id = v_hba->port_cfg.port_id; + lport_cfg_items->port_mode = v_hba->port_cfg.port_mode; + lport_cfg_items->port_topology = v_hba->port_cfg.port_topology; + lport_cfg_items->max_queue_depth = v_hba->port_cfg.max_queue_depth; + + lport_cfg_items->port_speed = v_hba->port_cfg.port_speed; + lport_cfg_items->tape_support = v_hba->port_cfg.tape_support; + lport_cfg_items->res_mgmt_enabled = UNF_FALSE; + + v_low_level_fun->sys_port_name = + *(unsigned long long *)v_hba->sys_port_name; + v_low_level_fun->sys_node_name = + *(unsigned long long *)v_hba->sys_node_name; + + /* Update chip information */ + v_low_level_fun->dev = v_hba->pci_dev; + v_low_level_fun->chip_info.chip_work_mode = v_hba->work_mode; + v_low_level_fun->chip_info.chip_type = v_hba->chip_type; + v_low_level_fun->chip_info.disable_err_flag = 0; + v_low_level_fun->support_max_speed = v_hba->max_support_speed; + + v_low_level_fun->chip_id = 0; + + v_low_level_fun->sfp_type = UNF_PORT_TYPE_FC_SFP; + + v_low_level_fun->multi_conf_support = HIFC_MULTI_CONF_NONSUPPORT; + v_low_level_fun->support_max_xid_range = v_hba->port_cfg.sest_num; + v_low_level_fun->update_fw_reset_active = + UNF_PORT_UNGRADE_FW_RESET_INACTIVE; + v_low_level_fun->port_type = DRV_PORT_ENTITY_TYPE_PHYSICAL; + + if ((lport_cfg_items->port_id & UNF_FIRST_LPORT_ID_MASK) == + lport_cfg_items->port_id) { + v_low_level_fun->support_upgrade_report = + UNF_PORT_SUPPORT_UPGRADE_REPORT; + } else { + v_low_level_fun->support_upgrade_report = + UNF_PORT_UNSUPPORT_UPGRADE_REPORT; + } + + v_low_level_fun->low_level_type |= UNF_FC_PROTOCOL_TYPE; +} + +static unsigned int hifc_create_lport(struct hifc_hba_s *v_hba) +{ + void *lport = NULL; + struct unf_low_level_function_op_s low_level_fun; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + hifc_fun_op.dev = v_hba->pci_dev; + memcpy(&low_level_fun, &hifc_fun_op, + sizeof(struct unf_low_level_function_op_s)); + + /* Update port configuration table */ + hifc_update_lport_config(v_hba, &low_level_fun); + + /* Apply for lport resources */ + UNF_LOWLEVEL_ALLOC_LPORT(lport, v_hba, &low_level_fun); + if (!lport) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) can't allocate Lport", + v_hba->port_cfg.port_id); + + return UNF_RETURN_ERROR; + } + v_hba->lport = lport; + + return RETURN_OK; +} + +void hifc_release_probe_index(unsigned int probe_index) +{ + if (probe_index >= HIFC_MAX_PROBE_PORT_NUM) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Probe index(0x%x) is invalid", probe_index); + + return; + } + + spin_lock(&probe_spin_lock); + if (!test_bit((int)probe_index, (const unsigned long *)probe_bit_map)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Probe index(0x%x) is not probed", + probe_index); + + spin_unlock(&probe_spin_lock); + + return; + } + + clear_bit((int)probe_index, probe_bit_map); + spin_unlock(&probe_spin_lock); +} + +static void hifc_release_host_res(struct hifc_hba_s *v_hba) +{ + hifc_destroy_queues(v_hba); + hifc_release_chip_access(v_hba); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) release low level resource done", + v_hba->port_cfg.port_id); +} + +static struct hifc_hba_s *hifc_init_hba(struct pci_dev *v_dev, + void *v_hwdev_handle, + struct hifc_chip_info_s *v_chip_info, + unsigned char v_card_num) +{ + unsigned int ret = RETURN_OK; + struct hifc_hba_s *hba = NULL; + + /* Allocate HBA */ + hba = kmalloc(sizeof(*hba), GFP_ATOMIC); + HIFC_CHECK(INVALID_VALUE32, hba, return NULL); + memset(hba, 0, sizeof(struct hifc_hba_s)); + + /* Heartbeat default */ + hba->heart_status = 1; + + /* Private data in pciDev */ + hba->pci_dev = v_dev; /* PCI device */ + hba->hw_dev_handle = v_hwdev_handle; + + /* Work mode */ + hba->work_mode = v_chip_info->work_mode; + /* Create work queue */ + hba->work_queue = create_singlethread_workqueue("hifc"); + if (!hba->work_queue) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Hifc creat workqueue failed"); + + goto out_free_hba; + } + /* Init delay work */ + INIT_DELAYED_WORK(&hba->delay_info.del_work, + hifc_rcvd_els_from_srq_time_out); + + /* Notice: Only use FC features */ + (void)hifc_support_fc(v_hwdev_handle, &hba->fc_service_cap); + /* Check parent context available */ + if (hba->fc_service_cap.dev_fc_cap.max_parent_qpc_num == 0) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]FC parent context is not allocated in this function"); + + goto out_destroy_workqueue; + } + max_parent_qpc_num = hba->fc_service_cap.dev_fc_cap.max_parent_qpc_num; + + /* Get port configuration */ + ret = hifc_get_port_cfg(hba, v_chip_info, v_card_num); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Can't get port configuration"); + + goto out_destroy_workqueue; + } + /* Get WWN */ + *(unsigned long long *)hba->sys_node_name = v_chip_info->wwnn; + *(unsigned long long *)hba->sys_port_name = v_chip_info->wwpn; + + /* Initialize host resources */ + ret = hifc_init_host_res(hba); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]HIFC port(0x%x) can't initialize host resource", + hba->port_cfg.port_id); + + goto out_destroy_workqueue; + } + + /* Local Port create */ + ret = hifc_create_lport(hba); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]HIFC port(0x%x) can't create lport", + hba->port_cfg.port_id); + goto out_release_host_res; + } + complete(&hba->hba_init_complete); + + /* Print reference count */ + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "[info]Port(0x%x) probe succeeded.", + hba->port_cfg.port_id); + + return hba; + +out_release_host_res: + hifc_flush_scq_ctx(hba); + hifc_flush_srq_ctx(hba); + hifc_flush_root_ctx(hba); + hifc_release_host_res(hba); + +out_destroy_workqueue: + flush_workqueue(hba->work_queue); + destroy_workqueue(hba->work_queue); + hba->work_queue = NULL; + +out_free_hba: + kfree(hba); + + return NULL; +} + +void hifc_get_total_probed_num(unsigned int *v_probe_cnt) +{ + unsigned int i = 0; + unsigned int count = 0; + + spin_lock(&probe_spin_lock); + for (i = 0; i < HIFC_MAX_PROBE_PORT_NUM; i++) { + if (test_bit((int)i, (const unsigned long *)probe_bit_map)) + count++; + } + + *v_probe_cnt = count; + spin_unlock(&probe_spin_lock); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Probed port total number is 0x%x", count); +} + +static unsigned int hifc_assign_card_num(struct hifc_lld_dev *lld_dev, + struct hifc_chip_info_s *v_chip_info, + unsigned char *v_card_num) +{ + unsigned char i = 0; + unsigned long long card_index = 0; + + card_index = (!pci_is_root_bus(lld_dev->pdev->bus)) ? + lld_dev->pdev->bus->parent->number : + lld_dev->pdev->bus->number; + + spin_lock(&probe_spin_lock); + + for (i = 0; i < HIFC_MAX_CARD_NUM; i++) { + if (test_bit((int)i, (const unsigned long *)card_num_bit_map)) { + if ((card_num_manage[i].card_number == card_index) && + (card_num_manage[i].is_removing == UNF_FALSE)) { + card_num_manage[i].port_count++; + *v_card_num = i; + spin_unlock(&probe_spin_lock); + return RETURN_OK; + } + } + } + + for (i = 0; i < HIFC_MAX_CARD_NUM; i++) { + if (!test_bit((int)i, + (const unsigned long *)card_num_bit_map)) { + card_num_manage[i].card_number = card_index; + card_num_manage[i].port_count = 1; + card_num_manage[i].is_removing = UNF_FALSE; + *v_card_num = i; + set_bit(i, card_num_bit_map); + spin_unlock(&probe_spin_lock); + + return RETURN_OK; + } + } + + spin_unlock(&probe_spin_lock); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Have probe more than 0x%x port, probe failed", i); + + return UNF_RETURN_ERROR; +} + +static void hifc_dec_and_free_card_num(unsigned char v_card_num) +{ + /* 2 ports per card */ + if (v_card_num >= HIFC_MAX_CARD_NUM) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Card number(0x%x) is invalid", v_card_num); + + return; + } + + spin_lock(&probe_spin_lock); + + if (test_bit((int)v_card_num, + (const unsigned long *)card_num_bit_map)) { + card_num_manage[v_card_num].port_count--; + card_num_manage[v_card_num].is_removing = UNF_TRUE; + + if (card_num_manage[v_card_num].port_count == 0) { + card_num_manage[v_card_num].card_number = 0; + card_num_manage[v_card_num].is_removing = UNF_FALSE; + clear_bit((int)v_card_num, card_num_bit_map); + } + } else { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Can not find card number(0x%x)", v_card_num); + } + + spin_unlock(&probe_spin_lock); +} + +unsigned int hifc_assign_probe_index(unsigned int *v_probe_index) +{ + unsigned int i = 0; + + spin_lock(&probe_spin_lock); + for (i = 0; i < HIFC_MAX_PROBE_PORT_NUM; i++) { + if (!test_bit((int)i, (const unsigned long *)probe_bit_map)) { + *v_probe_index = i; + set_bit(i, probe_bit_map); + spin_unlock(&probe_spin_lock); + + return RETURN_OK; + } + } + spin_unlock(&probe_spin_lock); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Have probe more than 0x%x port, probe failed", i); + + return UNF_RETURN_ERROR; +} + +int hifc_probe(struct hifc_lld_dev *lld_dev, void **uld_dev, char *uld_dev_name) +{ + struct pci_dev *dev = NULL; + struct hifc_hba_s *hba = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int probe_index = 0; + unsigned int probe_total_num = 0; + unsigned char card_num = INVALID_VALUE8; + struct hifc_chip_info_s chip_info; + + HIFC_CHECK(INVALID_VALUE32, lld_dev, return UNF_RETURN_ERROR_S32); + HIFC_CHECK(INVALID_VALUE32, lld_dev->hwdev, + return UNF_RETURN_ERROR_S32); + HIFC_CHECK(INVALID_VALUE32, lld_dev->pdev, return UNF_RETURN_ERROR_S32); + HIFC_CHECK(INVALID_VALUE32, uld_dev, return UNF_RETURN_ERROR_S32); + HIFC_CHECK(INVALID_VALUE32, uld_dev_name, return UNF_RETURN_ERROR_S32); + + dev = lld_dev->pdev; /* pcie device */ + + memset(&chip_info, 0, sizeof(struct hifc_chip_info_s)); + /* 1. Get & check Total_Probed_number */ + hifc_get_total_probed_num(&probe_total_num); + if (probe_total_num >= HIFC_MAX_PORT_NUM) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Total probe num (0x%x) is larger than allowed number(64)", + probe_total_num); + + return UNF_RETURN_ERROR_S32; + } + /* 2. Check device work mode */ + if (hifc_support_fc(lld_dev->hwdev, NULL)) { + chip_info.work_mode = HIFC_SMARTIO_WORK_MODE_FC; + } else { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port work mode is not FC"); + return UNF_RETURN_ERROR_S32; + } + + /* 4. Assign & Get new Probe index */ + ret = hifc_assign_probe_index(&probe_index); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]AssignProbeIndex fail"); + + return UNF_RETURN_ERROR_S32; + } + + ret = hifc_get_chip_capability((void *)lld_dev->hwdev, &chip_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]GetChipCapability fail"); + return UNF_RETURN_ERROR_S32; + } + + /* Assign & Get new Card number */ + ret = hifc_assign_card_num(lld_dev, &chip_info, &card_num); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]hifc_assign_card_num fail"); + hifc_release_probe_index(probe_index); + + return UNF_RETURN_ERROR_S32; + } + + /* Init HBA resource */ + hba = hifc_init_hba(dev, lld_dev->hwdev, &chip_info, card_num); + if (!hba) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Probe HBA(0x%x) failed.", probe_index); + + hifc_release_probe_index(probe_index); + hifc_dec_and_free_card_num(card_num); + + return UNF_RETURN_ERROR_S32; + } + + /* Name by the order of probe */ + *uld_dev = hba; + + snprintf(uld_dev_name, HIFC_PORT_NAME_STR_LEN, "%s%02x%02x", + HIFC_PORT_NAME_LABEL, + hba->card_info.card_num, hba->card_info.func_num); + memcpy(hba->port_name, uld_dev_name, HIFC_PORT_NAME_STR_LEN); + + hba->probe_index = probe_index; + hifc_hba[probe_index] = hba; + + return RETURN_OK; +} + +static unsigned int hifc_port_check_fw_ready(struct hifc_hba_s *v_hba) +{ +#define HIFC_PORT_CLEAR_DONE 0 +#define HIFC_PORT_CLEAR_DOING 1 + unsigned int clear_state = HIFC_PORT_CLEAR_DOING; + unsigned int ret = RETURN_OK; + unsigned int wait_time_out = 0; + + do { + msleep(1000); + wait_time_out += 1000; + ret = hifc_mbx_get_fw_clear_stat(v_hba, &clear_state); + if (ret != RETURN_OK) + return UNF_RETURN_ERROR; + + /* Total time more than 30s, retry more than 3 times, failed */ + if ((wait_time_out > 30000) && + (clear_state != HIFC_PORT_CLEAR_DONE)) + return UNF_RETURN_ERROR; + + } while (clear_state != HIFC_PORT_CLEAR_DONE); + + return RETURN_OK; +} + +static unsigned int hifc_sfp_switch(void *v_hba, void *v_para_in) +{ + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba; + int turn_on = UNF_FALSE; + unsigned int ret = RETURN_OK; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_para_in, return UNF_RETURN_ERROR); + + /* Redundancy check */ + turn_on = *((int *)v_para_in); + if (turn_on == hba->sfp_on) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Port(0x%x) FC physical port is already %s", + hba->port_cfg.port_id, (turn_on) ? "on" : "off"); + + return ret; + } + if (turn_on == UNF_TRUE) { + ret = hifc_port_check_fw_ready(hba); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_WARN, + "[warn]Get port(0x%x) clear state failed, turn on fail", + hba->port_cfg.port_id); + return ret; + } + /* At first, configure port table info if necessary */ + ret = hifc_config_port_table(hba); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]Port(0x%x) can't configurate port table", + hba->port_cfg.port_id); + + return ret; + } + } + + /* Switch physical port */ + ret = hifc_port_switch(hba, turn_on); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Port(0x%x) switch failed", + hba->port_cfg.port_id); + return ret; + } + + /* Update HBA's sfp state */ + hba->sfp_on = turn_on; + + return ret; +} + +static unsigned int hifc_destroy_lport(struct hifc_hba_s *v_hba) +{ + unsigned int ret = UNF_RETURN_ERROR; + + UNF_LOWLEVEL_RELEASE_LOCAL_PORT(ret, v_hba->lport); + v_hba->lport = NULL; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) destroy L_Port done", + v_hba->port_cfg.port_id); + + return ret; +} + +unsigned int hifc_port_reset(struct hifc_hba_s *v_hba) +{ + unsigned int ret = RETURN_OK; + unsigned long time_out = 0; + int sfp_before_reset = UNF_FALSE; + int off_para_in = UNF_FALSE; + struct pci_dev *dev = NULL; + struct hifc_hba_s *hba = v_hba; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + dev = hba->pci_dev; + HIFC_CHECK(INVALID_VALUE32, dev, return UNF_RETURN_ERROR); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "[event]Port(0x%x) reset HBA begin", + hba->port_cfg.port_id); + + /* Wait for last init/reset completion */ + time_out = wait_for_completion_timeout( + &hba->hba_init_complete, + (unsigned long)HIFC_PORT_INIT_TIME_SEC_MAX * HZ); + + if (time_out == UNF_ZERO) { + UNF_TRACE(INVALID_VALUE32, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Last HBA initialize/reset timeout: %d second", + HIFC_PORT_INIT_TIME_SEC_MAX); + + return UNF_RETURN_ERROR; + } + + /* Save current port state */ + sfp_before_reset = hba->sfp_on; + + /* Inform the reset event to CM level before beginning */ + UNF_LOWLEVEL_PORT_EVENT(ret, hba->lport, UNF_PORT_RESET_START, NULL); + hba->reset_time = jiffies; + + /* Close SFP */ + ret = hifc_sfp_switch(hba, &off_para_in); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) can't close SFP", + hba->port_cfg.port_id); + hba->sfp_on = sfp_before_reset; + + complete(&hba->hba_init_complete); + + return ret; + } + + ret = hifc_port_check_fw_ready(hba); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Get port(0x%x) clear state failed, hang port and report chip error", + hba->port_cfg.port_id); + + complete(&hba->hba_init_complete); + return ret; + } + + hifc_queue_pre_process(hba, UNF_FALSE); + + ret = hifc_mbox_reset_chip(hba, HIFC_MBOX_SUBTYPE_LIGHT_RESET); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]HIFC port(0x%x) can't reset chip mailbox", + hba->port_cfg.port_id); + + UNF_LOWLEVEL_PORT_EVENT(ret, hba->lport, + UNF_PORT_GET_FWLOG, NULL); + UNF_LOWLEVEL_PORT_EVENT(ret, hba->lport, + UNF_PORT_DEBUG_DUMP, NULL); + } + + /* Inform the success to CM level */ + UNF_LOWLEVEL_PORT_EVENT(ret, hba->lport, UNF_PORT_RESET_END, NULL); + + /* Queue open */ + hifc_enable_queues_dispatch(hba); + + /* Open SFP */ + (void)hifc_sfp_switch(hba, &sfp_before_reset); + + complete(&hba->hba_init_complete); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[event]Port(0x%x) reset HBA done", + hba->port_cfg.port_id); + + return ret; +#undef HIFC_WAIT_LINKDOWN_EVENT_MS +} + +static unsigned int hifc_delete_scqc_via_cmdq_sync(struct hifc_hba_s *v_hba, + unsigned int scqn) +{ + /* Via CMND Queue */ +#define HIFC_DEL_SCQC_TIMEOUT 3000 + + int ret; + struct hifcoe_cmdqe_delete_scqc_s del_scqc_cmd; + struct hifc_cmd_buf *cmdq_in_buf; + + /* Alloc cmd buffer */ + cmdq_in_buf = hifc_alloc_cmd_buf(v_hba->hw_dev_handle); + if (!cmdq_in_buf) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]cmdq in_cmd_buf alloc failed"); + HIFC_ERR_IO_STAT(v_hba, HIFCOE_TASK_T_DEL_SCQC); + return UNF_RETURN_ERROR; + } + + /* Build & Send Cmnd */ + memset(&del_scqc_cmd, 0, sizeof(del_scqc_cmd)); + del_scqc_cmd.wd0.task_type = HIFCOE_TASK_T_DEL_SCQC; + del_scqc_cmd.wd1.scqn = HIFC_LSW(scqn); + hifc_cpu_to_big32(&del_scqc_cmd, sizeof(del_scqc_cmd)); + memcpy(cmdq_in_buf->buf, &del_scqc_cmd, sizeof(del_scqc_cmd)); + cmdq_in_buf->size = sizeof(del_scqc_cmd); + + ret = hifc_cmdq_detail_resp(v_hba->hw_dev_handle, HIFC_ACK_TYPE_CMDQ, + HIFC_MOD_FCOE, 0, + cmdq_in_buf, NULL, HIFC_DEL_SCQC_TIMEOUT); + + /* Free cmnd buffer */ + hifc_free_cmd_buf(v_hba->hw_dev_handle, cmdq_in_buf); + + if (ret) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Send del scqc via cmdq failed, ret=0x%x", ret); + + HIFC_ERR_IO_STAT(v_hba, HIFCOE_TASK_T_DEL_SCQC); + return UNF_RETURN_ERROR; + } + + HIFC_IO_STAT(v_hba, HIFCOE_TASK_T_DEL_SCQC); + + return RETURN_OK; +} + +void hifc_flush_scq_ctx(struct hifc_hba_s *v_hba) +{ + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Start destroy total 0x%x SCQC", HIFC_TOTAL_SCQ_NUM); + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return); + + (void)hifc_delete_scqc_via_cmdq_sync(v_hba, 0); +} + +void hifc_set_hba_flush_state(struct hifc_hba_s *v_hba, int in_flush) +{ + unsigned long flag = 0; + + spin_lock_irqsave(&v_hba->flush_state_lock, flag); + v_hba->in_flushing = in_flush; + spin_unlock_irqrestore(&v_hba->flush_state_lock, flag); +} + +static int hifc_hba_is_present(struct hifc_hba_s *v_hba) +{ + int ret = RETURN_OK; + int present = UNF_FALSE; + unsigned int vendor_id = 0; + + ret = pci_read_config_dword(v_hba->pci_dev, 0, &vendor_id); + vendor_id &= HIFC_PCI_VENDOR_ID_MASK; + if ((ret == RETURN_OK) && (vendor_id == HIFC_PCI_VENDOR_ID)) { + present = UNF_TRUE; + } else { + present = UNF_FALSE; + v_hba->dev_present = UNF_FALSE; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "[info]Port %s remove: vender_id=0x%x, ret=0x%x", + present ? "normal" : "surprise", vendor_id, ret); + + return present; +} + +static void hifc_exit(struct pci_dev *v_dev, struct hifc_hba_s *v_hba) +{ + unsigned int ret = UNF_RETURN_ERROR; + int sfp_switch = UNF_FALSE; + int present = UNF_TRUE; + + v_hba->removing = UNF_TRUE; + + /* 1. Check HBA present or not */ + present = hifc_hba_is_present(v_hba); + if (present == UNF_TRUE) { + if (v_hba->phy_link == UNF_PORT_LINK_DOWN) + v_hba->q_set_stage = HIFC_QUEUE_SET_STAGE_FLUSHDONE; + + /* At first, close sfp */ + sfp_switch = UNF_FALSE; + (void)hifc_sfp_switch((void *)v_hba, (void *)&sfp_switch); + } + + /* 2. Report COM with HBA removing: delete route timer delay work */ + UNF_LOWLEVEL_PORT_EVENT(ret, v_hba->lport, UNF_PORT_BEGIN_REMOVE, NULL); + + /* 3. Report COM with HBA Nop, COM release I/O(s) & R_Port(s) forcely */ + UNF_LOWLEVEL_PORT_EVENT(ret, v_hba->lport, UNF_PORT_NOP, NULL); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]PCI device(%p) remove port(0x%x) failed", + v_dev, v_hba->port_index); + } + + if (present == UNF_TRUE) { + /* 4.1 Wait for all SQ empty, free SRQ buffer & SRQC */ + hifc_queue_pre_process(v_hba, UNF_TRUE); + } + + /* 5. Destroy L_Port */ + (void)hifc_destroy_lport(v_hba); + + /* 6. With HBA is present */ + if (present == UNF_TRUE) { + /* Enable Queues dispatch */ + hifc_enable_queues_dispatch(v_hba); + /* Need reset port if necessary */ + (void)hifc_mbox_reset_chip(v_hba, + HIFC_MBOX_SUBTYPE_HEAVY_RESET); + + /* Flush SCQ context */ + hifc_flush_scq_ctx(v_hba); + + /* Flush SRQ context */ + hifc_flush_srq_ctx(v_hba); + + /* Flush Root context in order to prevent DMA */ + hifc_flush_root_ctx(v_hba); + + /* + * NOTE: while flushing txrx, hash bucket will be cached out in + * UP. Wait to clear resources completely + */ + msleep(1000); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) flush scq & srq & root context done", + v_hba->port_cfg.port_id); + } + + /* 7. Notify uP to close timer before delete SCQ */ + ret = hifc_notify_up_close_timer(v_hba); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]HIFC port(0x%x) can't close timer", + v_hba->port_cfg.port_id); + } + + /* 8. Release host resources */ + hifc_release_host_res(v_hba); + + /* 9. Destroy FC work queue */ + if (v_hba->work_queue) { + flush_workqueue(v_hba->work_queue); + destroy_workqueue(v_hba->work_queue); + v_hba->work_queue = NULL; + } + + /* 10. Release Probe index & Decrease card number */ + hifc_release_probe_index(v_hba->probe_index); + hifc_dec_and_free_card_num((unsigned char)v_hba->card_info.card_num); + + /* 11. Free HBA memory */ + kfree(v_hba); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[event]PCI device(%p) remove succeed", v_dev); +} + +void hifc_remove(struct hifc_lld_dev *lld_dev, void *uld_dev) +{ + struct pci_dev *dev = NULL; + struct hifc_hba_s *hba = (struct hifc_hba_s *)uld_dev; + unsigned int probe_total_num = 0; + unsigned int probe_index = 0; + + HIFC_CHECK(INVALID_VALUE32, NULL != lld_dev, return); + HIFC_CHECK(INVALID_VALUE32, NULL != uld_dev, return); + HIFC_CHECK(INVALID_VALUE32, NULL != lld_dev->hwdev, return); + HIFC_CHECK(INVALID_VALUE32, NULL != lld_dev->pdev, return); + + dev = hba->pci_dev; + + /* Get total probed port number */ + hifc_get_total_probed_num(&probe_total_num); + if (probe_total_num < 1) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port manager is empty and no need to remove"); + return; + } + + /* check pci vendor id */ + if (dev->vendor != HIFC_PCI_VENDOR_ID_HUAWEI) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Wrong vendor id(0x%x) and exit", dev->vendor); + return; + } + + /* Check function ability */ + + if (!(hifc_support_fc(lld_dev->hwdev, NULL))) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]FC is not enable in this function"); + return; + } + + /* Get probe index */ + probe_index = hba->probe_index; + + /* Parent context allocation check */ + if (hba->fc_service_cap.dev_fc_cap.max_parent_qpc_num == 0) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]FC parent context not allocate in this function"); + return; + } + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]HBA(0x%x) start removing...", hba->port_index); + + /* HBA removinig... */ + hifc_exit(dev, hba); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "[event]Port(0x%x) pci device removed, vendorid(0x%04x) devid(0x%04x)", + probe_index, dev->vendor, dev->device); + + /* Probe index check */ + if (probe_index < HIFC_HBA_PORT_MAX_NUM) { + hifc_hba[probe_index] = NULL; + } else { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Probe index(0x%x) is invalid and remove failed", + probe_index); + } + + hifc_get_total_probed_num(&probe_total_num); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[event]Removed index=%u, RemainNum=%u", + probe_index, probe_total_num); +} + +void hifc_event(struct hifc_lld_dev *lld_dev, void *uld_dev, + struct hifc_event_info *event) +{ + struct hifc_hba_s *hba = uld_dev; + + HIFC_CHECK(INVALID_VALUE32, NULL != lld_dev, return); + HIFC_CHECK(INVALID_VALUE32, NULL != lld_dev->hwdev, return); + HIFC_CHECK(INVALID_VALUE32, NULL != lld_dev->pdev, return); + HIFC_CHECK(INVALID_VALUE32, NULL != hba, return); + HIFC_CHECK(INVALID_VALUE32, NULL != event, return); + + switch (event->type) { + case HIFC_EVENT_HEART_LOST: + hba->heart_status = 0; + HIFC_COM_UP_ERR_EVENT_STAT(hba, HIFC_EVENT_HEART_LOST); + break; + default: + break; + } +} + +static unsigned int hifc_get_hba_pcie_link_state(void *v_hba, + void *v_link_state) +{ + int *link_state = v_link_state; + int present = UNF_TRUE; + struct hifc_hba_s *hba = v_hba; + int ret; + int last_dev_state = UNF_TRUE; + int cur_dev_state = UNF_TRUE; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_link_state, return UNF_RETURN_ERROR); + last_dev_state = hba->dev_present; + ret = hifc_get_card_present_state(hba->hw_dev_handle, (bool *)&present); + if (ret || present != UNF_TRUE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "[event]port(0x%x) is not present,ret:%d, present:%d", + hba->port_cfg.port_id, ret, present); + cur_dev_state = UNF_FALSE; + } else { + cur_dev_state = UNF_TRUE; + } + + hba->dev_present = cur_dev_state; + + /* the heartbeat is considered lost only when the PCIE link is down for + * two times. + */ + if ((last_dev_state == UNF_FALSE) && (cur_dev_state == UNF_FALSE)) + hba->heart_status = UNF_FALSE; + *link_state = hba->dev_present; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_ABNORMAL, UNF_INFO, + "Port:0x%x,get dev present:%d", hba->port_cfg.port_id, + *link_state); + return RETURN_OK; +} diff --git a/drivers/scsi/huawei/hifc/hifc_hba.h b/drivers/scsi/huawei/hifc/hifc_hba.h new file mode 100644 index 0000000000000000000000000000000000000000..0ffa7c3402b073a3c14fe3ba78befe922d5f8fbc --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_hba.h @@ -0,0 +1,234 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef __HIFC_HBA_H__ +#define __HIFC_HBA_H__ + +#include "unf_common.h" +#include "hifc_queue.h" +#include "hifc_api_cmd.h" +#include "hifc_mgmt.h" + +#define HIFC_PCI_VENDOR_ID_MASK (0xffff) + +#define HIFC_LOWLEVEL_DEFAULT_LOOP_BB_CREDIT 8 +#define HIFC_LOWLEVEL_DEFAULT_32G_BB_CREDIT 255 +#define HIFC_LOWLEVEL_DEFAULT_16G_BB_CREDIT 255 +#define HIFC_LOWLEVEL_DEFAULT_842G_BB_CREDIT 255 +#define HIFC_LOWLEVEL_DEFAULT_BB_SCN 0 + +#define HIFC_LOWLEVEL_DEFAULT_32G_ESCH_VALUE 28081 +#define HIFC_LOWLEVEL_DEFAULT_16G_ESCH_VALUE 14100 +#define HIFC_LOWLEVEL_DEFAULT_842G_ESCH_VALUE 7000 +#define HIFC_LOWLEVEL_DEFAULT_ESCH_BUS_SIZE 0x2000 + +#define HIFC_SMARTIO_WORK_MODE_FC 0x1 +#define UNF_FUN_ID_MASK 0x07 +#define UNF_HIFC_FC 0x01 +#define UNF_HIFC_MAXNPIV_NUM 64 +#define HIFC_MAX_COS_NUM 8 +#define HIFC_PCI_VENDOR_ID_HUAWEI 0x19e5 +#define HIFC_SCQ_CNTX_SIZE 32 +#define HIFC_SRQ_CNTX_SIZE 64 +#define HIFC_PORT_INIT_TIME_SEC_MAX 1 + +#define HIFC_PORT_NAME_LABEL "hifc" +#define HIFC_PORT_NAME_STR_LEN 16 +#define HIFC_MAX_PROBE_PORT_NUM 64 +#define HIFC_PORT_NUM_PER_TABLE 64 +#define HIFC_MAX_CARD_NUM 32 +#define HIFC_HBA_PORT_MAX_NUM HIFC_MAX_PROBE_PORT_NUM +/* Heart Lost Flag */ +#define HIFC_EVENT_HEART_LOST 0 + +#define HIFC_GET_HBA_PORT_ID(__hba) ((__hba)->port_index) +#define HIFC_HBA_NOT_PRESENT(__hba) ((__hba)->dev_present == UNF_FALSE) + +struct hifc_port_cfg_s { + unsigned int port_id; /* Port ID */ + unsigned int port_mode; /* Port mode:INI(0x20) TGT(0x10) BOTH(0x30) */ + unsigned int port_topology; /* Port topo:0x3:loop,0xc:p2p,0xf:auto */ + unsigned int port_alpa; /* Port ALPA */ + unsigned int max_queue_depth;/* Max Queue depth Registration to SCSI */ + unsigned int sest_num; /* IO burst num:512-4096 */ + unsigned int max_login; /* Max Login Session. */ + unsigned int node_name_hi; /* nodename high 32 bits */ + unsigned int node_name_lo; /* nodename low 32 bits */ + unsigned int port_name_hi; /* portname high 32 bits */ + unsigned int port_name_lo; /* portname low 32 bits */ + /* Port speed 0:auto 4:4Gbps 8:8Gbps 16:16Gbps */ + unsigned int port_speed; + unsigned int interrupt_delay; /* Delay times(ms) in interrupt */ + unsigned int tape_support; /* tape support */ +}; + +#define HIFC_VER_INFO_SIZE 128 +struct hifc_drv_version_s { + char ver[HIFC_VER_INFO_SIZE]; +}; + +struct hifc_card_info_s { + unsigned int card_num : 8; + unsigned int func_num : 8; + unsigned int base_func : 8; + /* + * Card type:UNF_FC_SERVER_BOARD_32_G(6) 32G mode, + * UNF_FC_SERVER_BOARD_16_G(7)16G mode + */ + unsigned int card_type : 8; +}; + +struct hifc_card_num_manage_s { + int is_removing; + unsigned int port_count; + unsigned long long card_number; +}; + +struct hifc_led_state_s { + unsigned char green_speed_led; + unsigned char yellow_speed_led; + unsigned char ac_led; + unsigned char reserved; +}; + +enum hifc_queue_set_stage_e { + HIFC_QUEUE_SET_STAGE_INIT = 0, + HIFC_QUEUE_SET_STAGE_SCANNING, + HIFC_QUEUE_SET_STAGE_FLUSHING, + HIFC_QUEUE_SET_STAGE_FLUSHDONE, + HIFC_QUEUE_SET_STAGE_BUTT +}; + +struct hifc_srq_delay_info_s { + unsigned char srq_delay_flag; /* Check whether need to delay */ + unsigned char root_rq_rcvd_flag; + unsigned short rsd; + spinlock_t srq_lock; + struct unf_frame_pkg_s pkg; + struct delayed_work del_work; +}; + +struct hifc_fw_ver_detail_s { + unsigned char ucode_ver[HIFC_VER_LEN]; + unsigned char ucode_compile_time[HIFC_COMPILE_TIME_LEN]; + unsigned char up_ver[HIFC_VER_LEN]; + unsigned char up_compile_time[HIFC_COMPILE_TIME_LEN]; + unsigned char boot_ver[HIFC_VER_LEN]; + unsigned char boot_compile_time[HIFC_COMPILE_TIME_LEN]; +}; + +/* get wwpn and wwnn */ +struct hifc_chip_info_s { + unsigned char work_mode; + unsigned char tape_support; + unsigned long long wwpn; + unsigned long long wwnn; +}; + +struct hifc_hba_s { + struct pci_dev *pci_dev; + void *hw_dev_handle; + struct fc_service_cap fc_service_cap; + struct hifc_scq_info_s scq_info[HIFC_TOTAL_SCQ_NUM]; + struct hifc_srq_info_s els_srq_info; + /* PCI IO Memory */ + void __iomem *bar0; + unsigned int bar0_len; + + struct hifc_root_info_s root_info; + struct hifc_parent_queue_mgr_s *parent_queue_mgr; + + /* Link list Sq WqePage Pool */ + struct hifc_sq_wqe_page_pool_s sq_wpg_pool; + + enum hifc_queue_set_stage_e q_set_stage; + unsigned int next_clearing_sq; + unsigned int default_sq_id; + /* Port parameters, Obtained through firmware */ + unsigned short q_s_max_count; + unsigned char port_type; /* FC Port */ + unsigned char port_index; /* Phy Port */ + unsigned int default_scqn; + + unsigned char chip_type; /* chiptype:Smart or fc */ + unsigned char work_mode; + struct hifc_card_info_s card_info; + char port_name[HIFC_PORT_NAME_STR_LEN]; + unsigned int probe_index; + + unsigned short exit_base; + unsigned short exit_count; + unsigned short image_count; + unsigned char vpid_start; + unsigned char vpid_end; + + spinlock_t flush_state_lock; + int in_flushing; + + struct hifc_port_cfg_s port_cfg; /* Obtained through Config */ + + void *lport; /* Used in UNF level */ + + unsigned char sys_node_name[UNF_WWN_LEN]; + unsigned char sys_port_name[UNF_WWN_LEN]; + + struct completion hba_init_complete; + struct completion mbox_complete; + + unsigned short removing; + int sfp_on; + int dev_present; + int heart_status; + spinlock_t hba_lock; + unsigned int port_topo_cfg; + unsigned int port_bbscn_cfg; + unsigned int port_loop_role; + unsigned int port_speed_cfg; + unsigned int max_support_speed; + + unsigned char remote_rttov_tag; + unsigned char remote_edtov_tag; + unsigned short compared_bbscn; + unsigned short remote_bbcredit; + unsigned int compared_edtov_val; + unsigned int compared_ratov_val; + enum unf_act_topo_e active_topo; + unsigned int active_port_speed; + unsigned int active_rx_bb_credit; + unsigned int active_bb_scn; + unsigned int phy_link; + unsigned int fcp_conf_cfg; + /* loop */ + unsigned char active_al_pa; + unsigned char loop_map_valid; + unsigned char loop_map[UNF_LOOPMAP_COUNT]; + + unsigned int cos_bit_map; + atomic_t cos_rport_cnt[HIFC_MAX_COS_NUM]; + struct hifc_led_state_s led_states; + unsigned int fec_status; + struct workqueue_struct *work_queue; + unsigned long long reset_time; + struct hifc_srq_delay_info_s delay_info; +}; + +enum drv_port_entity_type_e { + DRV_PORT_ENTITY_TYPE_PHYSICAL = 0, + DRV_PORT_ENTITY_TYPE_VIRTUAL = 1, + DRV_PORT_ENTITY_TYPE_BUTT +}; + +extern struct hifc_hba_s *hifc_hba[HIFC_HBA_PORT_MAX_NUM]; +extern spinlock_t probe_spin_lock; +extern unsigned long probe_bit_map[HIFC_MAX_PROBE_PORT_NUM / + HIFC_PORT_NUM_PER_TABLE]; + +unsigned int hifc_port_reset(struct hifc_hba_s *v_hba); +void hifc_flush_scq_ctx(struct hifc_hba_s *v_hba); +void hifc_set_hba_flush_state(struct hifc_hba_s *v_hba, int in_flush); +void hifc_get_total_probed_num(unsigned int *v_probe_cnt); + +#endif diff --git a/drivers/scsi/huawei/hifc/hifc_hw.h b/drivers/scsi/huawei/hifc/hifc_hw.h new file mode 100644 index 0000000000000000000000000000000000000000..49b2edd2bac624f2e0bb91521dc9e9a7c775be8b --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_hw.h @@ -0,0 +1,611 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef HIFC_HW_H_ +#define HIFC_HW_H_ + +#ifndef __BIG_ENDIAN__ +#define __BIG_ENDIAN__ 0x4321 +#endif + +#ifndef __LITTLE_ENDIAN__ +#define __LITTLE_ENDIAN__ 0x1234 +#endif + +enum hifc_mod_type { + HIFC_MOD_COMM = 0, /* HW communication module */ + HIFC_MOD_L2NIC = 1, /* L2NIC module*/ + HIFC_MOD_FCOE = 6, + HIFC_MOD_CFGM = 7, /* Configuration module */ + HIFC_MOD_FC = 10, + HIFC_MOD_HILINK = 14, + HIFC_MOD_HW_MAX = 16, /* hardware max module id */ + + /* Software module id, for PF/VF and multi-host */ + HIFC_MOD_MAX, +}; + +struct hifc_cmd_buf { + void *buf; + dma_addr_t dma_addr; + u16 size; +}; + +enum hifc_ack_type { + HIFC_ACK_TYPE_CMDQ, + HIFC_ACK_TYPE_SHARE_CQN, + HIFC_ACK_TYPE_APP_CQN, + HIFC_MOD_ACK_MAX = 15, +}; + +int hifc_msg_to_mgmt_sync(void *hwdev, enum hifc_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout); + +/* PF/VF send msg to uP by api cmd, and return immediately */ +int hifc_msg_to_mgmt_async(void *hwdev, enum hifc_mod_type mod, u8 cmd, + void *buf_in, u16 in_size); + +int hifc_api_cmd_write_nack(void *hwdev, u8 dest, + void *cmd, u16 size); + +int hifc_api_cmd_read_ack(void *hwdev, u8 dest, + void *cmd, u16 size, void *ack, u16 ack_size); +/* PF/VF send cmd to ucode by cmdq, and return if success. + * timeout=0, use default timeout. + */ +int hifc_cmdq_direct_resp(void *hwdev, enum hifc_ack_type ack_type, + enum hifc_mod_type mod, u8 cmd, + struct hifc_cmd_buf *buf_in, + u64 *out_param, u32 timeout); +/* 1. whether need the timeout parameter + * 2. out_param indicates the status of the microcode processing command + */ + +/* PF/VF send cmd to ucode by cmdq, and return detailed result. + * timeout=0, use default timeout. + */ +int hifc_cmdq_detail_resp(void *hwdev, enum hifc_ack_type ack_type, + enum hifc_mod_type mod, u8 cmd, + struct hifc_cmd_buf *buf_in, + struct hifc_cmd_buf *buf_out, u32 timeout); + +/* PF/VF send cmd to ucode by cmdq, and return immediately + */ +int hifc_cmdq_async(void *hwdev, enum hifc_ack_type ack_type, + enum hifc_mod_type mod, u8 cmd, + struct hifc_cmd_buf *buf_in); + +int hifc_ppf_tmr_start(void *hwdev); +int hifc_ppf_tmr_stop(void *hwdev); + +enum hifc_ceq_event { + HIFC_CMDQ = 3, + HIFC_MAX_CEQ_EVENTS = 6, +}; + +typedef void (*hifc_ceq_event_cb)(void *handle, u32 ceqe_data); +int hifc_ceq_register_cb(void *hwdev, enum hifc_ceq_event event, + hifc_ceq_event_cb callback); +void hifc_ceq_unregister_cb(void *hwdev, enum hifc_ceq_event event); + +enum hifc_aeq_type { + HIFC_HW_INTER_INT = 0, + HIFC_MBX_FROM_FUNC = 1, + HIFC_MSG_FROM_MGMT_CPU = 2, + HIFC_API_RSP = 3, + HIFC_API_CHAIN_STS = 4, + HIFC_MBX_SEND_RSLT = 5, + HIFC_MAX_AEQ_EVENTS +}; + +enum hifc_aeq_sw_type { + HIFC_STATELESS_EVENT = 0, + HIFC_STATEFULL_EVENT = 1, + HIFC_MAX_AEQ_SW_EVENTS +}; + +typedef void (*hifc_aeq_hwe_cb)(void *handle, u8 *data, u8 size); +int hifc_aeq_register_hw_cb(void *hwdev, enum hifc_aeq_type event, + hifc_aeq_hwe_cb hwe_cb); +void hifc_aeq_unregister_hw_cb(void *hwdev, enum hifc_aeq_type event); + +typedef u8 (*hifc_aeq_swe_cb)(void *handle, u8 event, u64 data); +int hifc_aeq_register_swe_cb(void *hwdev, enum hifc_aeq_sw_type event, + hifc_aeq_swe_cb aeq_swe_cb); +void hifc_aeq_unregister_swe_cb(void *hwdev, enum hifc_aeq_sw_type event); + +typedef void (*hifc_mgmt_msg_cb)(void *hwdev, void *pri_handle, + u8 cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size); + +int hifc_register_mgmt_msg_cb(void *hwdev, + enum hifc_mod_type mod, void *pri_handle, + hifc_mgmt_msg_cb callback); +void hifc_unregister_mgmt_msg_cb(void *hwdev, enum hifc_mod_type mod); + +struct hifc_cmd_buf *hifc_alloc_cmd_buf(void *hwdev); +void hifc_free_cmd_buf(void *hwdev, struct hifc_cmd_buf *buf); + +int hifc_alloc_db_addr(void *hwdev, void __iomem **db_base, + void __iomem **dwqe_base); +void hifc_free_db_addr(void *hwdev, void __iomem *db_base, + void __iomem *dwqe_base); + +struct nic_interrupt_info { + u32 lli_set; + u32 interrupt_coalesc_set; + u16 msix_index; + u8 lli_credit_limit; + u8 lli_timer_cfg; + u8 pending_limt; + u8 coalesc_timer_cfg; + u8 resend_timer_cfg; +}; + +int hifc_get_interrupt_cfg(void *hwdev, + struct nic_interrupt_info *interrupt_info); + +int hifc_set_interrupt_cfg(void *hwdev, + struct nic_interrupt_info interrupt_info); + +/* The driver code implementation interface*/ +void hifc_misx_intr_clear_resend_bit(void *hwdev, + u16 msix_idx, u8 clear_resend_en); + +struct hifc_sq_attr { + u8 dma_attr_off; + u8 pending_limit; + u8 coalescing_time; + u8 intr_en; + u16 intr_idx; + u32 l2nic_sqn; + u64 ci_dma_base; +}; + +int hifc_set_ci_table(void *hwdev, u16 q_id, struct hifc_sq_attr *attr); + +int hifc_set_root_ctxt(void *hwdev, u16 rq_depth, u16 sq_depth, int rx_buf_sz); +int hifc_clean_root_ctxt(void *hwdev); +void hifc_record_pcie_error(void *hwdev); + +int hifc_func_rx_tx_flush(void *hwdev); + +int hifc_func_tmr_bitmap_set(void *hwdev, bool enable); + +struct hifc_init_para { + /* Record hifc_pcidev or NDIS_Adapter pointer address*/ + void *adapter_hdl; + /* Record pcidev or Handler pointer address + * for example: ioremap interface input parameter + */ + void *pcidev_hdl; + /* Record pcidev->dev or Handler pointer address which used to + * dma address application or dev_err print the parameter + */ + void *dev_hdl; + + void *cfg_reg_base; /* Configure virtual address, bar0/1*/ + /* interrupt configuration register address, bar2/3 */ + void *intr_reg_base; + u64 db_base_phy; + void *db_base; /* the doorbell address, bar4/5 higher 4M space*/ + void *dwqe_mapping;/* direct wqe 4M, follow the doorbell address space*/ + void **hwdev; + void *chip_node; + /* In bmgw x86 host, driver can't send message to mgmt cpu directly, + * need to trasmit message ppf mbox to bmgw arm host. + */ + void *ppf_hwdev; +}; + +#ifndef IFNAMSIZ +#define IFNAMSIZ 16 +#endif +#define MAX_FUNCTION_NUM 512 +#define HIFC_MAX_PF_NUM 16 +#define HIFC_MAX_COS 8 +#define INIT_FAILED 0 +#define INIT_SUCCESS 1 +#define MAX_DRV_BUF_SIZE 4096 + +struct hifc_cmd_get_light_module_abs { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 port_id; + u8 abs_status; /* 0:present, 1:absent */ + u8 rsv[2]; +}; + +#define SFP_INFO_MAX_SIZE 512 +struct hifc_cmd_get_sfp_qsfp_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 port_id; + u8 wire_type; + u16 out_len; + u8 sfp_qsfp_info[SFP_INFO_MAX_SIZE]; +}; + +#define HIFC_MAX_PORT_ID 4 + +struct hifc_port_routine_cmd { + bool up_send_sfp_info; + bool up_send_sfp_abs; + + struct hifc_cmd_get_sfp_qsfp_info sfp_info; + struct hifc_cmd_get_light_module_abs abs; +}; + +struct card_node { + struct list_head node; + struct list_head func_list; + char chip_name[IFNAMSIZ]; + void *log_info; + void *dbgtool_info; + void *func_handle_array[MAX_FUNCTION_NUM]; + unsigned char dp_bus_num; + u8 func_num; + struct attribute dbgtool_attr_file; + + bool cos_up_setted; + u8 cos_up[HIFC_MAX_COS]; + bool ppf_state; + u8 pf_bus_num[HIFC_MAX_PF_NUM]; + + struct hifc_port_routine_cmd rt_cmd[HIFC_MAX_PORT_ID]; + + /* mutex used for copy sfp info */ + struct mutex sfp_mutex; +}; + +enum hifc_hwdev_init_state { + HIFC_HWDEV_NONE_INITED = 0, + HIFC_HWDEV_CLP_INITED, + HIFC_HWDEV_AEQ_INITED, + HIFC_HWDEV_MGMT_INITED, + HIFC_HWDEV_MBOX_INITED, + HIFC_HWDEV_CMDQ_INITED, + HIFC_HWDEV_COMM_CH_INITED, + HIFC_HWDEV_ALL_INITED, + HIFC_HWDEV_MAX_INVAL_INITED +}; + +enum hifc_func_cap { + /* send message to mgmt cpu directly */ + HIFC_FUNC_MGMT = 1 << 0, + /* setting port attribute, pause/speed etc. */ + HIFC_FUNC_PORT = 1 << 1, + /* Enable SR-IOV in default */ + HIFC_FUNC_SRIOV_EN_DFLT = 1 << 2, + /* Can't change VF num */ + HIFC_FUNC_SRIOV_NUM_FIX = 1 << 3, + /* Fcorce pf/vf link up */ + HIFC_FUNC_FORCE_LINK_UP = 1 << 4, + /* Support rate limit */ + HIFC_FUNC_SUPP_RATE_LIMIT = 1 << 5, + HIFC_FUNC_SUPP_DFX_REG = 1 << 6, + /* Support promisc/multicast/all-multi */ + HIFC_FUNC_SUPP_RX_MODE = 1 << 7, + /* Set vf mac and vlan by ip link */ + HIFC_FUNC_SUPP_SET_VF_MAC_VLAN = 1 << 8, + /* Support set mac by ifconfig */ + HIFC_FUNC_SUPP_CHANGE_MAC = 1 << 9, + /* OVS don't support SCTP_CRC/HW_VLAN/LRO */ + HIFC_FUNC_OFFLOAD_OVS_UNSUPP = 1 << 10, +}; + +#define FUNC_SUPPORT_MGMT(hwdev) \ + (!!(hifc_get_func_feature_cap(hwdev) & HIFC_FUNC_MGMT)) +#define FUNC_SUPPORT_PORT_SETTING(hwdev) \ + (!!(hifc_get_func_feature_cap(hwdev) & HIFC_FUNC_PORT)) +#define FUNC_SUPPORT_DCB(hwdev) \ + (FUNC_SUPPORT_PORT_SETTING(hwdev)) +#define FUNC_ENABLE_SRIOV_IN_DEFAULT(hwdev) \ + (!!(hifc_get_func_feature_cap(hwdev) & \ + HIFC_FUNC_SRIOV_EN_DFLT)) +#define FUNC_SRIOV_FIX_NUM_VF(hwdev) \ + (!!(hifc_get_func_feature_cap(hwdev) & \ + HIFC_FUNC_SRIOV_NUM_FIX)) +#define FUNC_SUPPORT_RX_MODE(hwdev) \ + (!!(hifc_get_func_feature_cap(hwdev) & \ + HIFC_FUNC_SUPP_RX_MODE)) +#define FUNC_SUPPORT_RATE_LIMIT(hwdev) \ + (!!(hifc_get_func_feature_cap(hwdev) & \ + HIFC_FUNC_SUPP_RATE_LIMIT)) +#define FUNC_SUPPORT_SET_VF_MAC_VLAN(hwdev) \ + (!!(hifc_get_func_feature_cap(hwdev) & \ + HIFC_FUNC_SUPP_SET_VF_MAC_VLAN)) +#define FUNC_SUPPORT_CHANGE_MAC(hwdev) \ + (!!(hifc_get_func_feature_cap(hwdev) & \ + HIFC_FUNC_SUPP_CHANGE_MAC)) +#define FUNC_FORCE_LINK_UP(hwdev) \ + (!!(hifc_get_func_feature_cap(hwdev) & \ + HIFC_FUNC_FORCE_LINK_UP)) +#define FUNC_SUPPORT_SCTP_CRC(hwdev) \ + (!(hifc_get_func_feature_cap(hwdev) & \ + HIFC_FUNC_OFFLOAD_OVS_UNSUPP)) +#define FUNC_SUPPORT_HW_VLAN(hwdev) \ + (!(hifc_get_func_feature_cap(hwdev) & \ + HIFC_FUNC_OFFLOAD_OVS_UNSUPP)) +#define FUNC_SUPPORT_LRO(hwdev) \ + (!(hifc_get_func_feature_cap(hwdev) & \ + HIFC_FUNC_OFFLOAD_OVS_UNSUPP)) + +int hifc_init_hwdev(struct hifc_init_para *para); +void hifc_free_hwdev(void *hwdev); +int hifc_stateful_init(void *hwdev); +void hifc_stateful_deinit(void *hwdev); +bool hifc_is_hwdev_mod_inited(void *hwdev, enum hifc_hwdev_init_state state); +u64 hifc_get_func_feature_cap(void *hwdev); +int hifc_slq_init(void *dev, int num_wqs); +void hifc_slq_uninit(void *dev); +int hifc_slq_alloc(void *dev, u16 wqebb_size, u16 q_depth, + u16 page_size, u64 *cla_addr, void **handle); +void hifc_slq_free(void *dev, void *handle); +u64 hifc_slq_get_addr(void *handle, u16 index); +u64 hifc_slq_get_first_pageaddr(void *handle); + +typedef void (*comm_up_self_msg_proc)(void *handle, void *buf_in, + u16 in_size, void *buf_out, + u16 *out_size); +void hifc_comm_recv_mgmt_self_cmd_reg(void *hwdev, u8 cmd, + comm_up_self_msg_proc proc); +void hifc_comm_recv_up_self_cmd_unreg(void *hwdev, u8 cmd); + +/* defined by chip */ +enum hifc_fault_type { + FAULT_TYPE_CHIP, + FAULT_TYPE_UCODE, + FAULT_TYPE_MEM_RD_TIMEOUT, + FAULT_TYPE_MEM_WR_TIMEOUT, + FAULT_TYPE_REG_RD_TIMEOUT, + FAULT_TYPE_REG_WR_TIMEOUT, + FAULT_TYPE_PHY_FAULT, + FAULT_TYPE_MAX, +}; + +/* defined by chip */ +enum hifc_fault_err_level { + /* default err_level=FAULT_LEVEL_FATAL if + * type==FAULT_TYPE_MEM_RD_TIMEOUT || FAULT_TYPE_MEM_WR_TIMEOUT || + * FAULT_TYPE_REG_RD_TIMEOUT || FAULT_TYPE_REG_WR_TIMEOUT || + * FAULT_TYPE_UCODE + * other: err_level in event.chip.err_level if type==FAULT_TYPE_CHIP + */ + FAULT_LEVEL_FATAL, + FAULT_LEVEL_SERIOUS_RESET, + FAULT_LEVEL_SERIOUS_FLR, + FAULT_LEVEL_GENERAL, + FAULT_LEVEL_SUGGESTION, + FAULT_LEVEL_MAX +}; + +enum hifc_fault_source_type { + /* same as FAULT_TYPE_CHIP */ + HIFC_FAULT_SRC_HW_MGMT_CHIP = 0, + /* same as FAULT_TYPE_UCODE */ + HIFC_FAULT_SRC_HW_MGMT_UCODE, + /* same as FAULT_TYPE_MEM_RD_TIMEOUT */ + HIFC_FAULT_SRC_HW_MGMT_MEM_RD_TIMEOUT, + /* same as FAULT_TYPE_MEM_WR_TIMEOUT */ + HIFC_FAULT_SRC_HW_MGMT_MEM_WR_TIMEOUT, + /* same as FAULT_TYPE_REG_RD_TIMEOUT */ + HIFC_FAULT_SRC_HW_MGMT_REG_RD_TIMEOUT, + /* same as FAULT_TYPE_REG_WR_TIMEOUT */ + HIFC_FAULT_SRC_HW_MGMT_REG_WR_TIMEOUT, + HIFC_FAULT_SRC_SW_MGMT_UCODE, + HIFC_FAULT_SRC_MGMT_WATCHDOG, + HIFC_FAULT_SRC_MGMT_RESET = 8, + HIFC_FAULT_SRC_HW_PHY_FAULT, + HIFC_FAULT_SRC_HOST_HEARTBEAT_LOST = 20, + HIFC_FAULT_SRC_TYPE_MAX, +}; + +struct hifc_fault_sw_mgmt { + u8 event_id; + u64 event_data; +}; + +union hifc_fault_hw_mgmt { + u32 val[4]; + /* valid only type==FAULT_TYPE_CHIP */ + struct { + u8 node_id; + /* enum hifc_fault_err_level */ + u8 err_level; + u16 err_type; + u32 err_csr_addr; + u32 err_csr_value; + /* func_id valid only err_level==FAULT_LEVEL_SERIOUS_FLR + */ + u16 func_id; + u16 rsvd2; + } chip; + + /* valid only type==FAULT_TYPE_UCODE */ + struct { + u8 cause_id; + u8 core_id; + u8 c_id; + u8 rsvd3; + u32 epc; + u32 rsvd4; + u32 rsvd5; + } ucode; + + /* valid only type==FAULT_TYPE_MEM_RD_TIMEOUT || + * FAULT_TYPE_MEM_WR_TIMEOUT + */ + struct { + u32 err_csr_ctrl; + u32 err_csr_data; + u32 ctrl_tab; + u32 mem_index; + } mem_timeout; + + /* valid only type==FAULT_TYPE_REG_RD_TIMEOUT || + * FAULT_TYPE_REG_WR_TIMEOUT + */ + struct { + u32 err_csr; + u32 rsvd6; + u32 rsvd7; + u32 rsvd8; + } reg_timeout; + + struct { + /* 0: read; 1: write */ + u8 op_type; + u8 port_id; + u8 dev_ad; + u8 rsvd9; + u32 csr_addr; + u32 op_data; + u32 rsvd10; + } phy_fault; +}; + +/* defined by chip */ +struct hifc_fault_event { + /* enum hifc_fault_type */ + u8 type; + u8 rsvd0[3]; + union hifc_fault_hw_mgmt event; +}; + +struct hifc_fault_recover_info { + u8 fault_src; /* enum hifc_fault_source_type */ + u8 fault_lev; /* enum hifc_fault_err_level */ + u8 rsvd0[2]; + union { + union hifc_fault_hw_mgmt hw_mgmt; + struct hifc_fault_sw_mgmt sw_mgmt; + u32 mgmt_rsvd[4]; + u32 host_rsvd[4]; + } fault_data; +}; + +struct hifc_dcb_state { + u8 dcb_on; + u8 default_cos; + u8 up_cos[8]; +}; + +enum link_err_type { + LINK_ERR_MODULE_UNRECOGENIZED, + LINK_ERR_NUM, +}; + +enum port_module_event_type { + HIFC_PORT_MODULE_CABLE_PLUGGED, + HIFC_PORT_MODULE_CABLE_UNPLUGGED, + HIFC_PORT_MODULE_LINK_ERR, + HIFC_PORT_MODULE_MAX_EVENT, +}; + +struct hifc_port_module_event { + enum port_module_event_type type; + enum link_err_type err_type; +}; + +struct hifc_event_link_info { + u8 valid; + u8 port_type; + u8 autoneg_cap; + u8 autoneg_state; + u8 duplex; + u8 speed; +}; + +struct hifc_mctp_host_info { + u8 major_cmd; + u8 sub_cmd; + u8 rsvd[2]; + + u32 data_len; + void *data; +}; + +enum hifc_event_type { + HIFC_EVENT_LINK_DOWN = 0, + HIFC_EVENT_LINK_UP = 1, + HIFC_EVENT_HEART_LOST = 2, + HIFC_EVENT_FAULT = 3, + HIFC_EVENT_NOTIFY_VF_DCB_STATE = 4, + HIFC_EVENT_DCB_STATE_CHANGE = 5, + HIFC_EVENT_FMW_ACT_NTC = 6, + HIFC_EVENT_PORT_MODULE_EVENT = 7, + HIFC_EVENT_MCTP_GET_HOST_INFO, + HIFC_EVENT_MULTI_HOST_MGMT, + HIFC_EVENT_INIT_MIGRATE_PF, +}; + +struct hifc_event_info { + enum hifc_event_type type; + union { + struct hifc_event_link_info link_info; + struct hifc_fault_event info; + struct hifc_dcb_state dcb_state; + struct hifc_port_module_event module_event; + u8 vf_default_cos; + struct hifc_mctp_host_info mctp_info; + }; +}; + +enum hifc_ucode_event_type { + HIFC_INTERNAL_TSO_FATAL_ERROR = 0x0, + HIFC_INTERNAL_LRO_FATAL_ERROR = 0x1, + HIFC_INTERNAL_TX_FATAL_ERROR = 0x2, + HIFC_INTERNAL_RX_FATAL_ERROR = 0x3, + HIFC_INTERNAL_OTHER_FATAL_ERROR = 0x4, + HIFC_NIC_FATAL_ERROR_MAX = 0x8, +}; + +typedef void (*hifc_event_handler)(void *handle, + struct hifc_event_info *event); +/* only register once */ +void hifc_event_register(void *dev, void *pri_handle, + hifc_event_handler callback); +void hifc_event_unregister(void *dev); + +void hifc_detect_hw_present(void *hwdev); + +void hifc_set_chip_absent(void *hwdev); + +int hifc_get_chip_present_flag(void *hwdev); + +void hifc_set_pcie_order_cfg(void *handle); + +int hifc_get_mgmt_channel_status(void *handle); + +struct hifc_board_info { + u32 board_type; + u32 port_num; + u32 port_speed; + u32 pcie_width; + u32 host_num; + u32 pf_num; + u32 vf_total_num; + u32 tile_num; + u32 qcm_num; + u32 core_num; + u32 work_mode; + u32 service_mode; + u32 pcie_mode; + u32 cfg_addr; + u32 boot_sel; + u32 board_id; +}; + +int hifc_get_board_info(void *hwdev, struct hifc_board_info *info); + +int hifc_get_card_present_state(void *hwdev, bool *card_present_state); + +#endif diff --git a/drivers/scsi/huawei/hifc/hifc_hwdev.c b/drivers/scsi/huawei/hifc/hifc_hwdev.c new file mode 100644 index 0000000000000000000000000000000000000000..765fc1ba90c751b84f28d76cd644a26bf644f3ef --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_hwdev.c @@ -0,0 +1,3675 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hifc_knl_adp.h" +#include "hifc_hw.h" +#include "hifc_hwdev.h" +#include "hifc_hwif.h" +#include "hifc_api_cmd.h" +#include "hifc_mgmt.h" +#include "hifc_eqs.h" +#include "hifc_wq.h" +#include "hifc_cmdq.h" +#include "hifc_hwif.h" + +#define HIFC_DEAULT_EQ_MSIX_PENDING_LIMIT 0 +#define HIFC_DEAULT_EQ_MSIX_COALESC_TIMER_CFG 0xFF +#define HIFC_DEAULT_EQ_MSIX_RESEND_TIMER_CFG 7 +#define HIFC_FLR_TIMEOUT 1000 +#define HIFC_HT_GPA_PAGE_SIZE 4096UL +#define HIFC_PPF_HT_GPA_SET_RETRY_TIMES 10 +#define HIFC_GET_SFP_INFO_REAL_TIME 0x1 +#define HIFC_GLB_SO_RO_CFG_SHIFT 0x0 +#define HIFC_GLB_SO_RO_CFG_MASK 0x1 +#define HIFC_DISABLE_ORDER 0 +#define HIFC_GLB_DMA_SO_RO_GET(val, member) \ + (((val) >> HIFC_GLB_##member##_SHIFT) & HIFC_GLB_##member##_MASK) + +#define HIFC_GLB_DMA_SO_R0_CLEAR(val, member) \ + ((val) & (~(HIFC_GLB_##member##_MASK << HIFC_GLB_##member##_SHIFT))) + +#define HIFC_GLB_DMA_SO_R0_SET(val, member) \ + (((val) & HIFC_GLB_##member##_MASK) << HIFC_GLB_##member##_SHIFT) + +#define HIFC_MGMT_CHANNEL_STATUS_SHIFT 0x0 +#define HIFC_MGMT_CHANNEL_STATUS_MASK 0x1 +#define HIFC_ACTIVE_STATUS_MASK 0x80000000 +#define HIFC_ACTIVE_STATUS_CLEAR 0x7FFFFFFF + +#define HIFC_GET_MGMT_CHANNEL_STATUS(val, member) \ + (((val) >> HIFC_##member##_SHIFT) & HIFC_##member##_MASK) + +#define HIFC_CLEAR_MGMT_CHANNEL_STATUS(val, member) \ + ((val) & (~(HIFC_##member##_MASK << HIFC_##member##_SHIFT))) + +#define HIFC_SET_MGMT_CHANNEL_STATUS(val, member) \ + (((val) & HIFC_##member##_MASK) << HIFC_##member##_SHIFT) + +#define HIFC_BOARD_IS_PHY(hwdev) \ + ((hwdev)->board_info.board_type == 4 && \ + (hwdev)->board_info.board_id == 24) + +struct comm_info_ht_gpa_set { + u8 status; + u8 version; + u8 rsvd0[6]; + u32 rsvd1; + u32 rsvd2; + u64 page_pa0; + u64 page_pa1; +}; + +struct hifc_cons_idx_attr { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 dma_attr_off; + u8 pending_limit; + u8 coalescing_time; + u8 intr_en; + u16 intr_idx; + u32 l2nic_sqn; + u32 sq_id; + u64 ci_addr; +}; + +struct hifc_clear_doorbell { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 ppf_idx; + u8 rsvd1; +}; + +struct hifc_clear_resource { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 ppf_idx; + u8 rsvd1; +}; + +struct hifc_msix_config { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 msix_index; + u8 pending_cnt; + u8 coalesct_timer_cnt; + u8 lli_tmier_cnt; + u8 lli_credit_cnt; + u8 resend_timer_cnt; + u8 rsvd1[3]; +}; + +enum func_tmr_bitmap_status { + FUNC_TMR_BITMAP_DISABLE, + FUNC_TMR_BITMAP_ENABLE, +}; + +struct hifc_func_tmr_bitmap_op { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 op_id; /* 0:start; 1:stop */ + u8 ppf_idx; + u32 rsvd1; +}; + +struct hifc_ppf_tmr_op { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 ppf_idx; + u8 op_id; /* 0: stop timer; 1:start timer */ + u8 rsvd1[2]; + u32 rsvd2; +}; + +struct hifc_cmd_set_res_state { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 state; + u8 rsvd1; + u32 rsvd2; +}; + +int hifc_hw_rx_buf_size[] = { + HIFC_RX_BUF_SIZE_32B, + HIFC_RX_BUF_SIZE_64B, + HIFC_RX_BUF_SIZE_96B, + HIFC_RX_BUF_SIZE_128B, + HIFC_RX_BUF_SIZE_192B, + HIFC_RX_BUF_SIZE_256B, + HIFC_RX_BUF_SIZE_384B, + HIFC_RX_BUF_SIZE_512B, + HIFC_RX_BUF_SIZE_768B, + HIFC_RX_BUF_SIZE_1K, + HIFC_RX_BUF_SIZE_1_5K, + HIFC_RX_BUF_SIZE_2K, + HIFC_RX_BUF_SIZE_3K, + HIFC_RX_BUF_SIZE_4K, + HIFC_RX_BUF_SIZE_8K, + HIFC_RX_BUF_SIZE_16K, +}; + +struct hifc_comm_board_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + struct hifc_board_info info; + + u32 rsvd1[4]; +}; + +#define PHY_DOING_INIT_TIMEOUT (15 * 1000) + +struct hifc_phy_init_status { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 init_status; + u8 rsvd1[3]; +}; + +enum phy_init_status_type { + PHY_INIT_DOING = 0, + PHY_INIT_SUCCESS = 1, + PHY_INIT_FAIL = 2, + PHY_NONSUPPORT = 3, +}; + +struct hifc_update_active { + u8 status; + u8 version; + u8 rsvd0[6]; + + u32 update_flag; + u32 update_status; +}; + +struct hifc_mgmt_watchdog_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u32 curr_time_h; + u32 curr_time_l; + u32 task_id; + u32 rsv; + + u32 reg[13]; + u32 pc; + u32 lr; + u32 cpsr; + + u32 stack_top; + u32 stack_bottom; + u32 sp; + u32 curr_used; + u32 peak_used; + u32 is_overflow; + + u32 stack_actlen; + u8 data[1024]; +}; + +struct hifc_fmw_act_ntc { + u8 status; + u8 version; + u8 rsvd0[6]; + + u32 rsvd1[5]; +}; + +#define HIFC_PAGE_SIZE_HW(pg_size) ((u8)ilog2((u32)((pg_size) >> 12))) + +struct hifc_wq_page_size { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 ppf_idx; + /* real_size=4KB*2^page_size, range(0~20) must be checked by driver */ + u8 page_size; + + u32 rsvd1; +}; + +#define MAX_PCIE_DFX_BUF_SIZE (1024) + +struct hifc_pcie_dfx_ntc { + u8 status; + u8 version; + u8 rsvd0[6]; + + int len; + u32 rsvd; +}; + +struct hifc_pcie_dfx_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 host_id; + u8 last; + u8 rsvd[2]; + u32 offset; + + u8 data[MAX_PCIE_DFX_BUF_SIZE]; +}; + +struct hifc_reg_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u32 reg_addr; + u32 val_length; + + u32 data[2]; +}; + +#define HIFC_DMA_ATTR_ENTRY_ST_SHIFT 0 +#define HIFC_DMA_ATTR_ENTRY_AT_SHIFT 8 +#define HIFC_DMA_ATTR_ENTRY_PH_SHIFT 10 +#define HIFC_DMA_ATTR_ENTRY_NO_SNOOPING_SHIFT 12 +#define HIFC_DMA_ATTR_ENTRY_TPH_EN_SHIFT 13 + +#define HIFC_DMA_ATTR_ENTRY_ST_MASK 0xFF +#define HIFC_DMA_ATTR_ENTRY_AT_MASK 0x3 +#define HIFC_DMA_ATTR_ENTRY_PH_MASK 0x3 +#define HIFC_DMA_ATTR_ENTRY_NO_SNOOPING_MASK 0x1 +#define HIFC_DMA_ATTR_ENTRY_TPH_EN_MASK 0x1 + +#define HIFC_DMA_ATTR_ENTRY_SET(val, member) \ + (((u32)(val) & HIFC_DMA_ATTR_ENTRY_##member##_MASK) << \ + HIFC_DMA_ATTR_ENTRY_##member##_SHIFT) + +#define HIFC_DMA_ATTR_ENTRY_CLEAR(val, member) \ + ((val) & (~(HIFC_DMA_ATTR_ENTRY_##member##_MASK \ + << HIFC_DMA_ATTR_ENTRY_##member##_SHIFT))) + +#define HIFC_PCIE_ST_DISABLE 0 +#define HIFC_PCIE_AT_DISABLE 0 +#define HIFC_PCIE_PH_DISABLE 0 + +#define PCIE_MSIX_ATTR_ENTRY 0 + +#define HIFC_CHIP_PRESENT 1 +#define HIFC_CHIP_ABSENT 0 + +struct hifc_cmd_fault_event { + u8 status; + u8 version; + u8 rsvd0[6]; + + struct hifc_fault_event event; +}; + +#define HEARTBEAT_DRV_MAGIC_ACK 0x5A5A5A5A + +struct hifc_heartbeat_event { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 mgmt_init_state; + u8 rsvd1[3]; + u32 heart; /* increased every event */ + u32 drv_heart; +}; + +static void hifc_set_mgmt_channel_status(void *handle, bool state) +{ + struct hifc_hwdev *hwdev = handle; + u32 val; + + if (!hwdev || hifc_func_type(hwdev) == TYPE_VF || + !(hwdev->feature_cap & HIFC_FUNC_SUPP_DFX_REG)) + return; + + val = hifc_hwif_read_reg(hwdev->hwif, HIFC_ICPL_RESERVD_ADDR); + val = HIFC_CLEAR_MGMT_CHANNEL_STATUS(val, MGMT_CHANNEL_STATUS); + val |= HIFC_SET_MGMT_CHANNEL_STATUS((u32)state, MGMT_CHANNEL_STATUS); + + hifc_hwif_write_reg(hwdev->hwif, HIFC_ICPL_RESERVD_ADDR, val); +} + +static void hifc_enable_mgmt_channel(void *hwdev, void *buf_out) +{ + struct hifc_hwdev *dev = hwdev; + struct hifc_update_active *active_info = buf_out; + + if (!active_info || hifc_func_type(hwdev) == TYPE_VF || + !(dev->feature_cap & HIFC_FUNC_SUPP_DFX_REG)) + return; + + if ((!active_info->status) && + (active_info->update_status & HIFC_ACTIVE_STATUS_MASK)) { + active_info->update_status &= HIFC_ACTIVE_STATUS_CLEAR; + return; + } + + hifc_set_mgmt_channel_status(hwdev, false); +} + +int hifc_set_wq_page_size(struct hifc_hwdev *hwdev, u16 func_idx, + u32 page_size); + +#define HIFC_QUEUE_MIN_DEPTH 6 +#define HIFC_QUEUE_MAX_DEPTH 12 +#define HIFC_MAX_RX_BUFFER_SIZE 15 + +#define ROOT_CTX_QPS_VALID(root_ctxt) \ + ((root_ctxt)->rq_depth >= HIFC_QUEUE_MIN_DEPTH && \ + (root_ctxt)->rq_depth <= HIFC_QUEUE_MAX_DEPTH && \ + (root_ctxt)->sq_depth >= HIFC_QUEUE_MIN_DEPTH && \ + (root_ctxt)->sq_depth <= HIFC_QUEUE_MAX_DEPTH && \ + (root_ctxt)->rx_buf_sz <= HIFC_MAX_RX_BUFFER_SIZE) + +struct hifc_mgmt_status_log { + u8 status; + const char *log; +}; + +static struct hifc_mgmt_status_log mgmt_status_log[] = { + {HIFC_MGMT_STATUS_ERR_PARAM, "Invalid parameter"}, + {HIFC_MGMT_STATUS_ERR_FAILED, "Operation failed"}, + {HIFC_MGMT_STATUS_ERR_PORT, "Invalid port"}, + {HIFC_MGMT_STATUS_ERR_TIMEOUT, "Operation time out"}, + {HIFC_MGMT_STATUS_ERR_NOMATCH, "Version not match"}, + {HIFC_MGMT_STATUS_ERR_EXIST, "Entry exists"}, + {HIFC_MGMT_STATUS_ERR_NOMEM, "Out of memory"}, + {HIFC_MGMT_STATUS_ERR_INIT, "Feature not initialized"}, + {HIFC_MGMT_STATUS_ERR_FAULT, "Invalid address"}, + {HIFC_MGMT_STATUS_ERR_PERM, "Operation not permitted"}, + {HIFC_MGMT_STATUS_ERR_EMPTY, "Table empty"}, + {HIFC_MGMT_STATUS_ERR_FULL, "Table full"}, + {HIFC_MGMT_STATUS_ERR_NOT_FOUND, "Not found"}, + {HIFC_MGMT_STATUS_ERR_BUSY, "Device or resource busy "}, + {HIFC_MGMT_STATUS_ERR_RESOURCE, "No resources for operation "}, + {HIFC_MGMT_STATUS_ERR_CONFIG, "Invalid configuration"}, + {HIFC_MGMT_STATUS_ERR_UNAVAIL, "Feature unavailable"}, + {HIFC_MGMT_STATUS_ERR_CRC, "CRC check failed"}, + {HIFC_MGMT_STATUS_ERR_NXIO, "No such device or address"}, + {HIFC_MGMT_STATUS_ERR_ROLLBACK, "Chip rollback fail"}, + {HIFC_MGMT_STATUS_ERR_LEN, "Length too short or too long"}, + {HIFC_MGMT_STATUS_ERR_UNSUPPORT, "Feature not supported"}, +}; + +static void __print_status_info(struct hifc_hwdev *dev, + enum hifc_mod_type mod, u8 cmd, int index) +{ + if (mod == HIFC_MOD_COMM) { + sdk_err(dev->dev_hdl, "Mgmt process mod(0x%x) cmd(0x%x) fail: %s", + mod, cmd, mgmt_status_log[index].log); + } else if (mod == HIFC_MOD_L2NIC || + mod == HIFC_MOD_HILINK) { + sdk_err(dev->dev_hdl, "Mgmt process mod(0x%x) cmd(0x%x) fail: %s", + mod, cmd, mgmt_status_log[index].log); + } +} + +static bool hifc_status_need_special_handle(enum hifc_mod_type mod, + u8 cmd, u8 status) +{ + if (mod == HIFC_MOD_L2NIC) { + /* optical module isn't plugged in */ + if (((cmd == HIFC_PORT_CMD_GET_STD_SFP_INFO) || + (cmd == HIFC_PORT_CMD_GET_SFP_INFO)) && + (status == HIFC_MGMT_STATUS_ERR_NXIO)) + return true; + + if ((cmd == HIFC_PORT_CMD_SET_MAC || + cmd == HIFC_PORT_CMD_UPDATE_MAC) && + status == HIFC_MGMT_STATUS_ERR_EXIST) + return true; + } + + return false; +} + +static bool print_status_info_valid(enum hifc_mod_type mod, + const void *buf_out) +{ + if (!buf_out) + return false; + + if (mod != HIFC_MOD_COMM && mod != HIFC_MOD_L2NIC && + mod != HIFC_MOD_HILINK) + return false; + + return true; +} + +static void hifc_print_status_info(void *hwdev, enum hifc_mod_type mod, + u8 cmd, const void *buf_out) +{ + struct hifc_hwdev *dev = hwdev; + int i, size; + u8 status; + + if (!print_status_info_valid(mod, buf_out)) + return; + + status = *(u8 *)buf_out; + + if (!status) + return; + + if (hifc_status_need_special_handle(mod, cmd, status)) + return; + + size = sizeof(mgmt_status_log) / sizeof(mgmt_status_log[0]); + for (i = 0; i < size; i++) { + if (status == mgmt_status_log[i].status) { + __print_status_info(dev, mod, cmd, i); + return; + } + } + + if (mod == HIFC_MOD_COMM) { + sdk_err(dev->dev_hdl, "Mgmt process mod(0x%x) cmd(0x%x) return driver unknown status(0x%x)\n", + mod, cmd, status); + } else if (mod == HIFC_MOD_L2NIC || mod == HIFC_MOD_HILINK) { + sdk_err(dev->dev_hdl, "Mgmt process mod(0x%x) cmd(0x%x) return driver unknown status(0x%x)\n", + mod, cmd, status); + } +} + +void hifc_set_chip_present(void *hwdev) +{ + ((struct hifc_hwdev *)hwdev)->chip_present_flag = HIFC_CHIP_PRESENT; +} + +void hifc_set_chip_absent(void *hwdev) +{ + struct hifc_hwdev *dev = hwdev; + + sdk_err(dev->dev_hdl, "Card not present\n"); + dev->chip_present_flag = HIFC_CHIP_ABSENT; +} + +int hifc_get_chip_present_flag(void *hwdev) +{ + int flag; + + if (!hwdev) + return -EINVAL; + flag = ((struct hifc_hwdev *)hwdev)->chip_present_flag; + return flag; +} + +void hifc_force_complete_all(void *hwdev) +{ + struct hifc_hwdev *dev = (struct hifc_hwdev *)hwdev; + struct hifc_recv_msg *recv_resp_msg; + + set_bit(HIFC_HWDEV_STATE_BUSY, &dev->func_state); + + if (hifc_func_type(dev) != TYPE_VF && + hifc_is_hwdev_mod_inited(dev, HIFC_HWDEV_MGMT_INITED)) { + recv_resp_msg = &dev->pf_to_mgmt->recv_resp_msg_from_mgmt; + if (dev->pf_to_mgmt->event_flag == SEND_EVENT_START) { + complete(&recv_resp_msg->recv_done); + dev->pf_to_mgmt->event_flag = SEND_EVENT_TIMEOUT; + } + } + + /* only flush sync cmdq to avoid blocking remove */ + if (hifc_is_hwdev_mod_inited(dev, HIFC_HWDEV_CMDQ_INITED)) + hifc_cmdq_flush_cmd(hwdev, + &dev->cmdqs->cmdq[HIFC_CMDQ_SYNC]); + + clear_bit(HIFC_HWDEV_STATE_BUSY, &dev->func_state); +} + +void hifc_detect_hw_present(void *hwdev) +{ + u32 addr, attr1; + + addr = HIFC_CSR_FUNC_ATTR1_ADDR; + attr1 = hifc_hwif_read_reg(((struct hifc_hwdev *)hwdev)->hwif, addr); + if (attr1 == HIFC_PCIE_LINK_DOWN) { + hifc_set_chip_absent(hwdev); + hifc_force_complete_all(hwdev); + } +} + +void hifc_record_pcie_error(void *hwdev) +{ + struct hifc_hwdev *dev = (struct hifc_hwdev *)hwdev; + + if (!hwdev) + return; + + atomic_inc(&dev->hw_stats.fault_event_stats.pcie_fault_stats); +} + +static inline void __set_heartbeat_ehd_detect_delay(struct hifc_hwdev *hwdev, + u32 delay_ms) +{ + hwdev->heartbeat_ehd.start_detect_jiffies = + jiffies + msecs_to_jiffies(delay_ms); +} + +static int __pf_to_mgmt_pre_handle(struct hifc_hwdev *hwdev, + enum hifc_mod_type mod, u8 cmd) +{ + if (hifc_get_mgmt_channel_status(hwdev)) { + if (mod == HIFC_MOD_COMM || mod == HIFC_MOD_L2NIC) + return HIFC_DEV_BUSY_ACTIVE_FW; + else + return -EBUSY; + } + + /* Set channel invalid, don't allowed to send other cmd */ + if (mod == HIFC_MOD_COMM && cmd == HIFC_MGMT_CMD_ACTIVATE_FW) { + hifc_set_mgmt_channel_status(hwdev, true); + /* stop heartbeat enhanced detection temporary, and will + * restart in firmware active event when mgmt is resetted + */ + __set_heartbeat_ehd_detect_delay(hwdev, + HIFC_DEV_ACTIVE_FW_TIMEOUT); + } + + return 0; +} + +static void __pf_to_mgmt_after_handle(struct hifc_hwdev *hwdev, + enum hifc_mod_type mod, u8 cmd, + int sw_status, void *mgmt_status) +{ + /* if activate fw is failed, set channel valid */ + if (mod == HIFC_MOD_COMM && + cmd == HIFC_MGMT_CMD_ACTIVATE_FW) { + if (sw_status) + hifc_set_mgmt_channel_status(hwdev, false); + else + hifc_enable_mgmt_channel(hwdev, mgmt_status); + } +} + +int hifc_pf_msg_to_mgmt_sync(void *hwdev, enum hifc_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout) +{ + int err; + + if (!hwdev) + return -EINVAL; + + if (!((struct hifc_hwdev *)hwdev)->chip_present_flag) + return -EPERM; + + if (!hifc_is_hwdev_mod_inited(hwdev, HIFC_HWDEV_MGMT_INITED)) + return -EPERM; + + if (in_size > HIFC_MSG_TO_MGMT_MAX_LEN) + return -EINVAL; + + err = __pf_to_mgmt_pre_handle(hwdev, mod, cmd); + if (err) + return err; + + err = hifc_pf_to_mgmt_sync(hwdev, mod, cmd, buf_in, in_size, + buf_out, out_size, timeout); + __pf_to_mgmt_after_handle(hwdev, mod, cmd, err, buf_out); + + return err; +} + +static bool is_sfp_info_cmd_cached(struct hifc_hwdev *hwdev, + enum hifc_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hifc_cmd_get_sfp_qsfp_info *sfp_info; + struct hifc_port_routine_cmd *rt_cmd; + struct card_node *chip_node = hwdev->chip_node; + + sfp_info = buf_in; + if (!chip_node->rt_cmd || sfp_info->port_id >= HIFC_MAX_PORT_ID || + *out_size < sizeof(*sfp_info)) + return false; + + if (sfp_info->version == HIFC_GET_SFP_INFO_REAL_TIME) + return false; + + rt_cmd = &chip_node->rt_cmd[sfp_info->port_id]; + mutex_lock(&chip_node->sfp_mutex); + memcpy(buf_out, &rt_cmd->sfp_info, sizeof(*sfp_info)); + mutex_unlock(&chip_node->sfp_mutex); + + return true; +} + +static bool is_sfp_abs_cmd_cached(struct hifc_hwdev *hwdev, + enum hifc_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hifc_cmd_get_light_module_abs *abs; + struct hifc_port_routine_cmd *rt_cmd; + struct card_node *chip_node = hwdev->chip_node; + + abs = buf_in; + if (!chip_node->rt_cmd || abs->port_id >= HIFC_MAX_PORT_ID || + *out_size < sizeof(*abs)) + return false; + + if (abs->version == HIFC_GET_SFP_INFO_REAL_TIME) + return false; + + rt_cmd = &chip_node->rt_cmd[abs->port_id]; + mutex_lock(&chip_node->sfp_mutex); + memcpy(buf_out, &rt_cmd->abs, sizeof(*abs)); + mutex_unlock(&chip_node->sfp_mutex); + + return true; +} + +static bool driver_processed_cmd(struct hifc_hwdev *hwdev, + enum hifc_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct card_node *chip_node = hwdev->chip_node; + + if (mod == HIFC_MOD_L2NIC) { + if (cmd == HIFC_PORT_CMD_GET_SFP_INFO && + chip_node->rt_cmd->up_send_sfp_info) { + return is_sfp_info_cmd_cached(hwdev, mod, cmd, buf_in, + in_size, buf_out, + out_size); + } else if (cmd == HIFC_PORT_CMD_GET_SFP_ABS && + chip_node->rt_cmd->up_send_sfp_abs) { + return is_sfp_abs_cmd_cached(hwdev, mod, cmd, buf_in, + in_size, buf_out, + out_size); + } + } + + return false; +} + +static int send_sync_mgmt_msg(void *hwdev, enum hifc_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout) +{ + unsigned long end; + + end = jiffies + msecs_to_jiffies(HIFC_DEV_ACTIVE_FW_TIMEOUT); + do { + if (!hifc_get_mgmt_channel_status(hwdev) || + !hifc_get_chip_present_flag(hwdev)) + break; + + msleep(1000); + } while (time_before(jiffies, end)); + + if (driver_processed_cmd(hwdev, mod, cmd, buf_in, in_size, buf_out, + out_size)) + return 0; + + return hifc_pf_msg_to_mgmt_sync(hwdev, mod, cmd, buf_in, in_size, + buf_out, out_size, timeout); +} + +int hifc_msg_to_mgmt_sync(void *hwdev, enum hifc_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout) +{ + struct hifc_hwdev *dev = hwdev; + int err; + + if (!hwdev) + return -EINVAL; + + if (!(dev->chip_present_flag)) + return -EPERM; + + err = send_sync_mgmt_msg(hwdev, mod, cmd, buf_in, in_size, + buf_out, out_size, timeout); + + hifc_print_status_info(hwdev, mod, cmd, buf_out); + + return err; +} + +/* PF/VF send msg to uP by api cmd, and return immediately */ +int hifc_msg_to_mgmt_async(void *hwdev, enum hifc_mod_type mod, u8 cmd, + void *buf_in, u16 in_size) +{ + int err; + + if (!hwdev) + return -EINVAL; + + if (!(((struct hifc_hwdev *)hwdev)->chip_present_flag) || + !hifc_is_hwdev_mod_inited(hwdev, HIFC_HWDEV_MGMT_INITED) || + hifc_get_mgmt_channel_status(hwdev)) + return -EPERM; + + if (hifc_func_type(hwdev) == TYPE_VF) { + err = -EFAULT; + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Mailbox don't support async cmd\n"); + } else { + err = hifc_pf_to_mgmt_async(hwdev, mod, cmd, buf_in, in_size); + } + + return err; +} + +int hifc_msg_to_mgmt_no_ack(void *hwdev, enum hifc_mod_type mod, u8 cmd, + void *buf_in, u16 in_size) +{ + struct hifc_hwdev *dev = hwdev; + int err; + + if (!hwdev) + return -EINVAL; + + if (!(dev->chip_present_flag)) + return -EPERM; + + err = hifc_pf_to_mgmt_no_ack(hwdev, mod, cmd, buf_in, in_size); + + return err; +} + +/** + * hifc_cpu_to_be32 - convert data to big endian 32 bit format + * @data: the data to convert + * @len: length of data to convert, must be Multiple of 4B + **/ +void hifc_cpu_to_be32(void *data, int len) +{ + int i, chunk_sz = sizeof(u32); + u32 *mem = data; + + if (!data) + return; + + len = len / chunk_sz; + + for (i = 0; i < len; i++) { + *mem = cpu_to_be32(*mem); + mem++; + } +} + +/** + * hifc_cpu_to_be32 - convert data from big endian 32 bit format + * @data: the data to convert + * @len: length of data to convert + **/ +void hifc_be32_to_cpu(void *data, int len) +{ + int i, chunk_sz = sizeof(u32); + u32 *mem = data; + + if (!data) + return; + + len = len / chunk_sz; + + for (i = 0; i < len; i++) { + *mem = be32_to_cpu(*mem); + mem++; + } +} + +/** + * hifc_set_sge - set dma area in scatter gather entry + * @sge: scatter gather entry + * @addr: dma address + * @len: length of relevant data in the dma address + **/ +void hifc_set_sge(struct hifc_sge *sge, dma_addr_t addr, u32 len) +{ + sge->hi_addr = upper_32_bits(addr); + sge->lo_addr = lower_32_bits(addr); + sge->len = len; +} + +int hifc_set_ci_table(void *hwdev, u16 q_id, struct hifc_sq_attr *attr) +{ + struct hifc_cons_idx_attr cons_idx_attr = {0}; + u16 out_size = sizeof(cons_idx_attr); + int err; + + if (!hwdev || !attr) + return -EINVAL; + + err = hifc_global_func_id_get(hwdev, &cons_idx_attr.func_idx); + if (err) + return err; + + cons_idx_attr.dma_attr_off = attr->dma_attr_off; + cons_idx_attr.pending_limit = attr->pending_limit; + cons_idx_attr.coalescing_time = attr->coalescing_time; + + if (attr->intr_en) { + cons_idx_attr.intr_en = attr->intr_en; + cons_idx_attr.intr_idx = attr->intr_idx; + } + + cons_idx_attr.l2nic_sqn = attr->l2nic_sqn; + cons_idx_attr.sq_id = q_id; + + cons_idx_attr.ci_addr = attr->ci_dma_base; + + err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_L2NIC_SQ_CI_ATTR_SET, + &cons_idx_attr, sizeof(cons_idx_attr), + &cons_idx_attr, &out_size, 0); + if (err || !out_size || cons_idx_attr.status) { + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Failed to set ci attribute table, err: %d, status: 0x%x, out_size: 0x%x\n", + err, cons_idx_attr.status, out_size); + return -EFAULT; + } + + return 0; +} + +static int hifc_set_cmdq_depth(struct hifc_hwdev *hwdev, u16 cmdq_depth) +{ + struct hifc_root_ctxt root_ctxt = {0}; + u16 out_size = sizeof(root_ctxt); + int err; + + err = hifc_global_func_id_get(hwdev, &root_ctxt.func_idx); + if (err) + return err; + + root_ctxt.ppf_idx = hifc_ppf_idx(hwdev); + + root_ctxt.set_cmdq_depth = 1; + root_ctxt.cmdq_depth = (u8)ilog2(cmdq_depth); + + err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_VAT_SET, + &root_ctxt, sizeof(root_ctxt), + &root_ctxt, &out_size, 0); + if (err || !out_size || root_ctxt.status) { + sdk_err(hwdev->dev_hdl, "Failed to set cmdq depth, err: %d, status: 0x%x, out_size: 0x%x\n", + err, root_ctxt.status, out_size); + return -EFAULT; + } + + return 0; +} + +static u16 get_hw_rx_buf_size(int rx_buf_sz) +{ +#define DEFAULT_RX_BUF_SIZE ((u16)0xB) + u16 num_hw_types = + sizeof(hifc_hw_rx_buf_size) / + sizeof(hifc_hw_rx_buf_size[0]); + u16 i; + + for (i = 0; i < num_hw_types; i++) { + if (hifc_hw_rx_buf_size[i] == rx_buf_sz) + return i; + } + + pr_err("Chip can't support rx buf size of %d\n", rx_buf_sz); + + return DEFAULT_RX_BUF_SIZE; +} + +int hifc_set_root_ctxt(void *hwdev, u16 rq_depth, u16 sq_depth, int rx_buf_sz) +{ + struct hifc_root_ctxt root_ctxt = {0}; + u16 out_size = sizeof(root_ctxt); + int err; + + if (!hwdev) + return -EINVAL; + + err = hifc_global_func_id_get(hwdev, &root_ctxt.func_idx); + if (err) + return err; + + root_ctxt.ppf_idx = hifc_ppf_idx(hwdev); + + root_ctxt.set_cmdq_depth = 0; + root_ctxt.cmdq_depth = 0; + + root_ctxt.lro_en = 1; + + root_ctxt.rq_depth = (u16)ilog2(rq_depth); + root_ctxt.rx_buf_sz = get_hw_rx_buf_size(rx_buf_sz); + root_ctxt.sq_depth = (u16)ilog2(sq_depth); + + err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_VAT_SET, + &root_ctxt, sizeof(root_ctxt), + &root_ctxt, &out_size, 0); + if (err || !out_size || root_ctxt.status) { + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Failed to set root context, err: %d, status: 0x%x, out_size: 0x%x\n", + err, root_ctxt.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hifc_clean_root_ctxt(void *hwdev) +{ + struct hifc_root_ctxt root_ctxt = {0}; + u16 out_size = sizeof(root_ctxt); + int err; + + if (!hwdev) + return -EINVAL; + + err = hifc_global_func_id_get(hwdev, &root_ctxt.func_idx); + if (err) + return err; + + root_ctxt.ppf_idx = hifc_ppf_idx(hwdev); + + err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_VAT_SET, + &root_ctxt, sizeof(root_ctxt), + &root_ctxt, &out_size, 0); + if (err || !out_size || root_ctxt.status) { + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Failed to set root context, err: %d, status: 0x%x, out_size: 0x%x\n", + err, root_ctxt.status, out_size); + return -EFAULT; + } + + return 0; +} + +static int wait_for_flr_finish(struct hifc_hwif *hwif) +{ + u32 cnt = 0; + enum hifc_pf_status status; + + while (cnt < HIFC_FLR_TIMEOUT) { + status = hifc_get_pf_status(hwif); + if (status == HIFC_PF_STATUS_FLR_FINISH_FLAG) { + hifc_set_pf_status(hwif, HIFC_PF_STATUS_ACTIVE_FLAG); + return 0; + } + + usleep_range(9900, 10000); + cnt++; + } + + return -EFAULT; +} + +#define HIFC_WAIT_CMDQ_IDLE_TIMEOUT 5000 + +static int wait_cmdq_stop(struct hifc_hwdev *hwdev) +{ + enum hifc_cmdq_type cmdq_type; + struct hifc_cmdqs *cmdqs = hwdev->cmdqs; + u32 cnt = 0; + int err = 0; + + if (!(cmdqs->status & HIFC_CMDQ_ENABLE)) + return 0; + + cmdqs->status &= ~HIFC_CMDQ_ENABLE; + + while (cnt < HIFC_WAIT_CMDQ_IDLE_TIMEOUT && hwdev->chip_present_flag) { + err = 0; + cmdq_type = HIFC_CMDQ_SYNC; + for (; cmdq_type < HIFC_MAX_CMDQ_TYPES; cmdq_type++) { + if (!hifc_cmdq_idle(&cmdqs->cmdq[cmdq_type])) { + err = -EBUSY; + break; + } + } + + if (!err) + return 0; + + usleep_range(500, 1000); + cnt++; + } + + cmdq_type = HIFC_CMDQ_SYNC; + for (; cmdq_type < HIFC_MAX_CMDQ_TYPES; cmdq_type++) { + if (!hifc_cmdq_idle(&cmdqs->cmdq[cmdq_type])) + sdk_err(hwdev->dev_hdl, "Cmdq %d busy\n", cmdq_type); + } + + cmdqs->status |= HIFC_CMDQ_ENABLE; + + return err; +} + +static int hifc_pf_rx_tx_flush(struct hifc_hwdev *hwdev) +{ + struct hifc_hwif *hwif = hwdev->hwif; + struct hifc_clear_doorbell clear_db = {0}; + struct hifc_clear_resource clr_res = {0}; + u16 out_size, func_id; + int err; + int ret = 0; + + /* wait ucode stop I/O */ + msleep(100); + + err = wait_cmdq_stop(hwdev); + if (err) { + sdk_warn(hwdev->dev_hdl, "CMDQ is still working, please check CMDQ timeout value is reasonable\n"); + ret = err; + } + + hifc_disable_doorbell(hwif); + + out_size = sizeof(clear_db); + func_id = hifc_global_func_id_hw(hwdev); + clear_db.func_idx = func_id; + clear_db.ppf_idx = HIFC_HWIF_PPF_IDX(hwif); + + err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_FLUSH_DOORBELL, &clear_db, + sizeof(clear_db), &clear_db, &out_size, 0); + if (err || !out_size || clear_db.status) { + sdk_warn(hwdev->dev_hdl, "Failed to flush doorbell, err: %d, status: 0x%x, out_size: 0x%x\n", + err, clear_db.status, out_size); + if (err) + ret = err; + else + ret = -EFAULT; + } + + hifc_set_pf_status(hwif, HIFC_PF_STATUS_FLR_START_FLAG); + + clr_res.func_idx = func_id; + clr_res.ppf_idx = HIFC_HWIF_PPF_IDX(hwif); + + err = hifc_msg_to_mgmt_no_ack(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_START_FLR, &clr_res, + sizeof(clr_res)); + if (err) { + sdk_warn(hwdev->dev_hdl, "Failed to notice flush message\n"); + ret = err; + } + + err = wait_for_flr_finish(hwif); + if (err) { + sdk_warn(hwdev->dev_hdl, "Wait firmware FLR timeout\n"); + ret = err; + } + + hifc_enable_doorbell(hwif); + + err = hifc_reinit_cmdq_ctxts(hwdev); + if (err) { + sdk_warn(hwdev->dev_hdl, "Failed to reinit cmdq\n"); + ret = err; + } + + return ret; +} + +int hifc_func_rx_tx_flush(void *hwdev) +{ + struct hifc_hwdev *dev = hwdev; + + if (!hwdev) + return -EINVAL; + + if (!dev->chip_present_flag) + return 0; + + return hifc_pf_rx_tx_flush(dev); +} + +int hifc_get_interrupt_cfg(void *hwdev, + struct nic_interrupt_info *interrupt_info) +{ + struct hifc_hwdev *nic_hwdev = hwdev; + struct hifc_msix_config msix_cfg = {0}; + u16 out_size = sizeof(msix_cfg); + int err; + + if (!hwdev || !interrupt_info) + return -EINVAL; + + err = hifc_global_func_id_get(hwdev, &msix_cfg.func_id); + if (err) + return err; + + msix_cfg.msix_index = interrupt_info->msix_index; + + err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_MSI_CTRL_REG_RD_BY_UP, + &msix_cfg, sizeof(msix_cfg), + &msix_cfg, &out_size, 0); + if (err || !out_size || msix_cfg.status) { + sdk_err(nic_hwdev->dev_hdl, "Failed to get interrupt config, err: %d, status: 0x%x, out size: 0x%x\n", + err, msix_cfg.status, out_size); + return -EINVAL; + } + + interrupt_info->lli_credit_limit = msix_cfg.lli_credit_cnt; + interrupt_info->lli_timer_cfg = msix_cfg.lli_tmier_cnt; + interrupt_info->pending_limt = msix_cfg.pending_cnt; + interrupt_info->coalesc_timer_cfg = msix_cfg.coalesct_timer_cnt; + interrupt_info->resend_timer_cfg = msix_cfg.resend_timer_cnt; + + return 0; +} + +int hifc_set_interrupt_cfg(void *hwdev, + struct nic_interrupt_info interrupt_info) +{ + struct hifc_hwdev *nic_hwdev = hwdev; + struct hifc_msix_config msix_cfg = {0}; + struct nic_interrupt_info temp_info; + u16 out_size = sizeof(msix_cfg); + int err; + + if (!hwdev) + return -EINVAL; + + temp_info.msix_index = interrupt_info.msix_index; + + err = hifc_get_interrupt_cfg(hwdev, &temp_info); + if (err) + return -EINVAL; + + err = hifc_global_func_id_get(hwdev, &msix_cfg.func_id); + if (err) + return err; + + msix_cfg.msix_index = (u16)interrupt_info.msix_index; + msix_cfg.lli_credit_cnt = temp_info.lli_credit_limit; + msix_cfg.lli_tmier_cnt = temp_info.lli_timer_cfg; + msix_cfg.pending_cnt = temp_info.pending_limt; + msix_cfg.coalesct_timer_cnt = temp_info.coalesc_timer_cfg; + msix_cfg.resend_timer_cnt = temp_info.resend_timer_cfg; + + if (interrupt_info.lli_set) { + msix_cfg.lli_credit_cnt = interrupt_info.lli_credit_limit; + msix_cfg.lli_tmier_cnt = interrupt_info.lli_timer_cfg; + } + + if (interrupt_info.interrupt_coalesc_set) { + msix_cfg.pending_cnt = interrupt_info.pending_limt; + msix_cfg.coalesct_timer_cnt = interrupt_info.coalesc_timer_cfg; + msix_cfg.resend_timer_cnt = interrupt_info.resend_timer_cfg; + } + + err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_MSI_CTRL_REG_WR_BY_UP, + &msix_cfg, sizeof(msix_cfg), + &msix_cfg, &out_size, 0); + if (err || !out_size || msix_cfg.status) { + sdk_err(nic_hwdev->dev_hdl, "Failed to set interrupt config, err: %d, status: 0x%x, out size: 0x%x\n", + err, msix_cfg.status, out_size); + return -EINVAL; + } + + return 0; +} + +#define HIFC_MSIX_CNT_RESEND_TIMER_SHIFT 29 +#define HIFC_MSIX_CNT_RESEND_TIMER_MASK 0x7U + +#define HIFC_MSIX_CNT_SET(val, member) \ + (((val) & HIFC_MSIX_CNT_##member##_MASK) << \ + HIFC_MSIX_CNT_##member##_SHIFT) + +void hifc_misx_intr_clear_resend_bit(void *hwdev, u16 msix_idx, + u8 clear_resend_en) +{ + struct hifc_hwif *hwif; + u32 msix_ctrl = 0, addr; + + if (!hwdev) + return; + + hwif = ((struct hifc_hwdev *)hwdev)->hwif; + + msix_ctrl = HIFC_MSIX_CNT_SET(clear_resend_en, RESEND_TIMER); + + addr = HIFC_CSR_MSIX_CNT_ADDR(msix_idx); + + hifc_hwif_write_reg(hwif, addr, msix_ctrl); +} + +static int init_aeqs_msix_attr(struct hifc_hwdev *hwdev) +{ + struct hifc_aeqs *aeqs = hwdev->aeqs; + struct nic_interrupt_info info = {0}; + struct hifc_eq *eq; + u16 q_id; + int err; + + info.lli_set = 0; + info.interrupt_coalesc_set = 1; + info.pending_limt = HIFC_DEAULT_EQ_MSIX_PENDING_LIMIT; + info.coalesc_timer_cfg = HIFC_DEAULT_EQ_MSIX_COALESC_TIMER_CFG; + info.resend_timer_cfg = HIFC_DEAULT_EQ_MSIX_RESEND_TIMER_CFG; + + for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) { + eq = &aeqs->aeq[q_id]; + info.msix_index = eq->eq_irq.msix_entry_idx; + err = hifc_set_interrupt_cfg(hwdev, info); + if (err) { + sdk_err(hwdev->dev_hdl, "Set msix attr for aeq %d failed\n", + q_id); + return -EFAULT; + } + } + + return 0; +} + +static int init_ceqs_msix_attr(struct hifc_hwdev *hwdev) +{ + struct hifc_ceqs *ceqs = hwdev->ceqs; + struct nic_interrupt_info info = {0}; + struct hifc_eq *eq; + u16 q_id; + int err; + + info.lli_set = 0; + info.interrupt_coalesc_set = 1; + info.pending_limt = HIFC_DEAULT_EQ_MSIX_PENDING_LIMIT; + info.coalesc_timer_cfg = HIFC_DEAULT_EQ_MSIX_COALESC_TIMER_CFG; + info.resend_timer_cfg = HIFC_DEAULT_EQ_MSIX_RESEND_TIMER_CFG; + + for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) { + eq = &ceqs->ceq[q_id]; + info.msix_index = eq->eq_irq.msix_entry_idx; + err = hifc_set_interrupt_cfg(hwdev, info); + if (err) { + sdk_err(hwdev->dev_hdl, "Set msix attr for ceq %d failed\n", + q_id); + return -EFAULT; + } + } + + return 0; +} + +/** + * set_pf_dma_attr_entry - set the dma attributes for entry + * @hwdev: the pointer to hw device + * @entry_idx: the entry index in the dma table + * @st: PCIE TLP steering tag + * @at: PCIE TLP AT field + * @ph: PCIE TLP Processing Hint field + * @no_snooping: PCIE TLP No snooping + * @tph_en: PCIE TLP Processing Hint Enable + **/ +static void set_pf_dma_attr_entry(struct hifc_hwdev *hwdev, u32 entry_idx, + u8 st, u8 at, u8 ph, + enum hifc_pcie_nosnoop no_snooping, + enum hifc_pcie_tph tph_en) +{ + u32 addr, val, dma_attr_entry; + + /* Read Modify Write */ + addr = HIFC_CSR_DMA_ATTR_TBL_ADDR(entry_idx); + + val = hifc_hwif_read_reg(hwdev->hwif, addr); + val = HIFC_DMA_ATTR_ENTRY_CLEAR(val, ST) & + HIFC_DMA_ATTR_ENTRY_CLEAR(val, AT) & + HIFC_DMA_ATTR_ENTRY_CLEAR(val, PH) & + HIFC_DMA_ATTR_ENTRY_CLEAR(val, NO_SNOOPING) & + HIFC_DMA_ATTR_ENTRY_CLEAR(val, TPH_EN); + + dma_attr_entry = HIFC_DMA_ATTR_ENTRY_SET(st, ST) | + HIFC_DMA_ATTR_ENTRY_SET(at, AT) | + HIFC_DMA_ATTR_ENTRY_SET(ph, PH) | + HIFC_DMA_ATTR_ENTRY_SET(no_snooping, NO_SNOOPING) | + HIFC_DMA_ATTR_ENTRY_SET(tph_en, TPH_EN); + + val |= dma_attr_entry; + hifc_hwif_write_reg(hwdev->hwif, addr, val); +} + +/** + * dma_attr_table_init - initialize the the default dma attributes + * @hwdev: the pointer to hw device + * Return: 0 - success, negative - failure + **/ +static int dma_attr_table_init(struct hifc_hwdev *hwdev) +{ + int err = 0; + + set_pf_dma_attr_entry(hwdev, PCIE_MSIX_ATTR_ENTRY, + HIFC_PCIE_ST_DISABLE, + HIFC_PCIE_AT_DISABLE, + HIFC_PCIE_PH_DISABLE, + HIFC_PCIE_SNOOP, + HIFC_PCIE_TPH_DISABLE); + + return err; +} + +static int resources_state_set(struct hifc_hwdev *hwdev, + enum hifc_res_state state) +{ + struct hifc_cmd_set_res_state res_state = {0}; + u16 out_size = sizeof(res_state); + int err; + + err = hifc_global_func_id_get(hwdev, &res_state.func_idx); + if (err) + return err; + + res_state.state = state; + + err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_RES_STATE_SET, + &res_state, sizeof(res_state), + &res_state, &out_size, 0); + if (err || !out_size || res_state.status) { + sdk_err(hwdev->dev_hdl, "Failed to set resources state, err: %d, status: 0x%x, out_size: 0x%x\n", + err, res_state.status, out_size); + return -EFAULT; + } + + return 0; +} + +static void comm_mgmt_msg_handler(void *hwdev, void *pri_handle, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size) +{ + struct hifc_msg_pf_to_mgmt *pf_to_mgmt = pri_handle; + u8 cmd_idx; + u32 *mem; + u16 i; + + for (cmd_idx = 0; cmd_idx < pf_to_mgmt->proc.cmd_num; cmd_idx++) { + if (cmd == pf_to_mgmt->proc.info[cmd_idx].cmd) { + if (!pf_to_mgmt->proc.info[cmd_idx].proc) { + sdk_warn(pf_to_mgmt->hwdev->dev_hdl, + "PF recv up comm msg handle null, cmd(0x%x)\n", + cmd); + } else { + pf_to_mgmt->proc.info[cmd_idx].proc(hwdev, + buf_in, in_size, buf_out, out_size); + } + + return; + } + } + + sdk_warn(pf_to_mgmt->hwdev->dev_hdl, "Received mgmt cpu event: 0x%x\n", + cmd); + + mem = buf_in; + for (i = 0; i < (in_size / sizeof(u32)); i++) { + pr_info("0x%x\n", *mem); + mem++; + } + + *out_size = 0; +} + +static int hifc_comm_aeqs_init(struct hifc_hwdev *hwdev) +{ + struct irq_info aeq_irqs[HIFC_MAX_AEQS] = {{0} }; + u16 num_aeqs, resp_num_irq = 0, i; + int err; + + num_aeqs = HIFC_HWIF_NUM_AEQS(hwdev->hwif); + if (num_aeqs > HIFC_MAX_AEQS) { + sdk_warn(hwdev->dev_hdl, "Adjust aeq num to %d\n", + HIFC_MAX_AEQS); + num_aeqs = HIFC_MAX_AEQS; + } + err = hifc_alloc_irqs(hwdev, SERVICE_T_INTF, num_aeqs, aeq_irqs, + &resp_num_irq); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to alloc aeq irqs, num_aeqs: %d\n", + num_aeqs); + return err; + } + + if (resp_num_irq < num_aeqs) { + sdk_warn(hwdev->dev_hdl, "Adjust aeq num to %d\n", + resp_num_irq); + num_aeqs = resp_num_irq; + } + + err = hifc_aeqs_init(hwdev, num_aeqs, aeq_irqs); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init aeqs\n"); + goto aeqs_init_err; + } + + set_bit(HIFC_HWDEV_AEQ_INITED, &hwdev->func_state); + + return 0; + +aeqs_init_err: + for (i = 0; i < num_aeqs; i++) + hifc_free_irq(hwdev, SERVICE_T_INTF, aeq_irqs[i].irq_id); + + return err; +} + +static void hifc_comm_aeqs_free(struct hifc_hwdev *hwdev) +{ + struct irq_info aeq_irqs[HIFC_MAX_AEQS] = {{0} }; + u16 num_irqs, i; + + clear_bit(HIFC_HWDEV_AEQ_INITED, &hwdev->func_state); + + hifc_get_aeq_irqs(hwdev, aeq_irqs, &num_irqs); + hifc_aeqs_free(hwdev); + for (i = 0; i < num_irqs; i++) + hifc_free_irq(hwdev, SERVICE_T_INTF, aeq_irqs[i].irq_id); +} + +static int hifc_comm_ceqs_init(struct hifc_hwdev *hwdev) +{ + struct irq_info ceq_irqs[HIFC_MAX_CEQS] = {{0} }; + u16 num_ceqs, resp_num_irq = 0, i; + int err; + + num_ceqs = HIFC_HWIF_NUM_CEQS(hwdev->hwif); + if (num_ceqs > HIFC_MAX_CEQS) { + sdk_warn(hwdev->dev_hdl, "Adjust ceq num to %d\n", + HIFC_MAX_CEQS); + num_ceqs = HIFC_MAX_CEQS; + } + + err = hifc_alloc_irqs(hwdev, SERVICE_T_INTF, num_ceqs, ceq_irqs, + &resp_num_irq); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to alloc ceq irqs, num_ceqs: %d\n", + num_ceqs); + return err; + } + + if (resp_num_irq < num_ceqs) { + sdk_warn(hwdev->dev_hdl, "Adjust ceq num to %d\n", + resp_num_irq); + num_ceqs = resp_num_irq; + } + + err = hifc_ceqs_init(hwdev, num_ceqs, ceq_irqs); + if (err) { + sdk_err(hwdev->dev_hdl, + "Failed to init ceqs, err:%d\n", err); + goto ceqs_init_err; + } + + return 0; + +ceqs_init_err: + for (i = 0; i < num_ceqs; i++) + hifc_free_irq(hwdev, SERVICE_T_INTF, ceq_irqs[i].irq_id); + + return err; +} + +static void hifc_comm_ceqs_free(struct hifc_hwdev *hwdev) +{ + struct irq_info ceq_irqs[HIFC_MAX_CEQS] = {{0} }; + u16 num_irqs; + int i; + + hifc_get_ceq_irqs(hwdev, ceq_irqs, &num_irqs); + hifc_ceqs_free(hwdev); + for (i = 0; i < num_irqs; i++) + hifc_free_irq(hwdev, SERVICE_T_INTF, ceq_irqs[i].irq_id); +} + +static int hifc_comm_pf_to_mgmt_init(struct hifc_hwdev *hwdev) +{ + int err; + + if (hifc_func_type(hwdev) == TYPE_VF || + !FUNC_SUPPORT_MGMT(hwdev)) + return 0; /* VF do not support send msg to mgmt directly */ + + err = hifc_pf_to_mgmt_init(hwdev); + if (err) + return err; + + hifc_aeq_register_hw_cb(hwdev, HIFC_MSG_FROM_MGMT_CPU, + hifc_mgmt_msg_aeqe_handler); + + hifc_register_mgmt_msg_cb(hwdev, HIFC_MOD_COMM, + hwdev->pf_to_mgmt, comm_mgmt_msg_handler); + + set_bit(HIFC_HWDEV_MGMT_INITED, &hwdev->func_state); + + return 0; +} + +static void hifc_comm_pf_to_mgmt_free(struct hifc_hwdev *hwdev) +{ + if (hifc_func_type(hwdev) == TYPE_VF || + !FUNC_SUPPORT_MGMT(hwdev)) + return; /* VF do not support send msg to mgmt directly */ + + hifc_unregister_mgmt_msg_cb(hwdev, HIFC_MOD_COMM); + + hifc_aeq_unregister_hw_cb(hwdev, HIFC_MSG_FROM_MGMT_CPU); + + hifc_pf_to_mgmt_free(hwdev); +} + +static int hifc_comm_clp_to_mgmt_init(struct hifc_hwdev *hwdev) +{ + int err; + + if (hifc_func_type(hwdev) == TYPE_VF || + !FUNC_SUPPORT_MGMT(hwdev)) + return 0; + + err = hifc_clp_pf_to_mgmt_init(hwdev); + if (err) + return err; + + set_bit(HIFC_HWDEV_CLP_INITED, &hwdev->func_state); + + return 0; +} + +static void hifc_comm_clp_to_mgmt_free(struct hifc_hwdev *hwdev) +{ + if (hifc_func_type(hwdev) == TYPE_VF || + !FUNC_SUPPORT_MGMT(hwdev)) + return; + + clear_bit(HIFC_HWDEV_CLP_INITED, &hwdev->func_state); + hifc_clp_pf_to_mgmt_free(hwdev); +} + +static int hifc_comm_cmdqs_init(struct hifc_hwdev *hwdev) +{ + int err; + + err = hifc_cmdqs_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init cmd queues\n"); + return err; + } + + hifc_ceq_register_cb(hwdev, HIFC_CMDQ, hifc_cmdq_ceq_handler); + + err = hifc_set_cmdq_depth(hwdev, HIFC_CMDQ_DEPTH); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to set cmdq depth\n"); + goto set_cmdq_depth_err; + } + + return 0; + +set_cmdq_depth_err: + hifc_cmdqs_free(hwdev); + + return err; +} + +static void hifc_comm_cmdqs_free(struct hifc_hwdev *hwdev) +{ + hifc_ceq_unregister_cb(hwdev, HIFC_CMDQ); + hifc_cmdqs_free(hwdev); +} + +static int hifc_sync_mgmt_func_state(struct hifc_hwdev *hwdev) +{ + int err; + + hifc_set_pf_status(hwdev->hwif, HIFC_PF_STATUS_ACTIVE_FLAG); + + err = resources_state_set(hwdev, HIFC_RES_ACTIVE); + if (err) { + sdk_err(hwdev->dev_hdl, + "Failed to set function resources state\n"); + goto resources_state_set_err; + } + + hwdev->heartbeat_ehd.en = false; + if (HIFC_FUNC_TYPE(hwdev) == TYPE_PPF) { + /* heartbeat synchronize must be after set pf active status */ + hifc_comm_recv_mgmt_self_cmd_reg( + hwdev, HIFC_MGMT_CMD_HEARTBEAT_EVENT, + mgmt_heartbeat_event_handler); + } + + return 0; + +resources_state_set_err: + hifc_set_pf_status(hwdev->hwif, HIFC_PF_STATUS_INIT); + + return err; +} + +static void hifc_unsync_mgmt_func_state(struct hifc_hwdev *hwdev) +{ + hifc_set_pf_status(hwdev->hwif, HIFC_PF_STATUS_INIT); + + hwdev->heartbeat_ehd.en = false; + if (HIFC_FUNC_TYPE(hwdev) == TYPE_PPF) { + hifc_comm_recv_up_self_cmd_unreg( + hwdev, HIFC_MGMT_CMD_HEARTBEAT_EVENT); + } + + resources_state_set(hwdev, HIFC_RES_CLEAN); +} + +int hifc_set_vport_enable(void *hwdev, bool enable) +{ + struct hifc_hwdev *nic_hwdev = (struct hifc_hwdev *)hwdev; + struct hifc_vport_state en_state = {0}; + u16 out_size = sizeof(en_state); + int err; + + if (!hwdev) + return -EINVAL; + + err = hifc_global_func_id_get(hwdev, &en_state.func_id); + if (err) + return err; + + en_state.state = enable ? 1 : 0; + + err = l2nic_msg_to_mgmt_sync(hwdev, HIFC_PORT_CMD_SET_VPORT_ENABLE, + &en_state, sizeof(en_state), + &en_state, &out_size); + if (err || !out_size || en_state.status) { + sdk_err(nic_hwdev->dev_hdl, "Failed to set vport state, err: %d, status: 0x%x, out size: 0x%x\n", + err, en_state.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hifc_l2nic_reset_base(struct hifc_hwdev *hwdev, u16 reset_flag) +{ + struct hifc_l2nic_reset l2nic_reset = {0}; + u16 out_size = sizeof(l2nic_reset); + int err = 0; + + err = hifc_set_vport_enable(hwdev, false); + if (err) + return err; + + msleep(100); + + sdk_info(hwdev->dev_hdl, "L2nic reset flag 0x%x\n", reset_flag); + + err = hifc_global_func_id_get(hwdev, &l2nic_reset.func_id); + if (err) + return err; + + l2nic_reset.reset_flag = reset_flag; + err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_L2NIC_RESET, &l2nic_reset, + sizeof(l2nic_reset), &l2nic_reset, + &out_size, 0); + if (err || !out_size || l2nic_reset.status) { + sdk_err(hwdev->dev_hdl, "Failed to reset L2NIC resources, err: %d, status: 0x%x, out_size: 0x%x\n", + err, l2nic_reset.status, out_size); + return -EIO; + } + + return 0; +} + +static int hifc_l2nic_reset(struct hifc_hwdev *hwdev) +{ + return hifc_l2nic_reset_base(hwdev, 0); +} + +static int __get_func_misc_info(struct hifc_hwdev *hwdev) +{ + int err; + + err = hifc_get_board_info(hwdev, &hwdev->board_info); + if (err) { + sdk_err(hwdev->dev_hdl, "Get board info failed\n"); + return err; + } + + return 0; +} + +static int init_func_mode(struct hifc_hwdev *hwdev) +{ + int err; + + err = __get_func_misc_info(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to get function msic information\n"); + return err; + } + + err = hifc_l2nic_reset(hwdev); + if (err) + return err; + + return 0; +} + +static int __init_eqs_msix_attr(struct hifc_hwdev *hwdev) +{ + int err; + + err = init_aeqs_msix_attr(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init aeqs msix attr\n"); + return err; + } + + err = init_ceqs_msix_attr(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init ceqs msix attr\n"); + return err; + } + + return 0; +} + +static int init_cmdqs_channel(struct hifc_hwdev *hwdev) +{ + u16 func_id; + int err; + + dma_attr_table_init(hwdev); + + err = hifc_comm_ceqs_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init completion event queues\n"); + return err; + } + + err = __init_eqs_msix_attr(hwdev); + if (err) + goto init_eqs_msix_err; + + /* set default wq page_size */ + hwdev->wq_page_size = HIFC_DEFAULT_WQ_PAGE_SIZE; + + err = hifc_global_func_id_get(hwdev, &func_id); + if (err) + goto get_func_id_err; + + err = hifc_set_wq_page_size(hwdev, func_id, hwdev->wq_page_size); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to set wq page size\n"); + goto init_wq_pg_size_err; + } + + err = hifc_comm_cmdqs_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init cmd queues\n"); + goto cmdq_init_err; + } + + set_bit(HIFC_HWDEV_CMDQ_INITED, &hwdev->func_state); + + return 0; + +cmdq_init_err: + if (HIFC_FUNC_TYPE(hwdev) != TYPE_VF) + hifc_set_wq_page_size(hwdev, func_id, HIFC_HW_WQ_PAGE_SIZE); +init_wq_pg_size_err: +get_func_id_err: +init_eqs_msix_err: + hifc_comm_ceqs_free(hwdev); + + return err; +} + +static int init_mgmt_channel(struct hifc_hwdev *hwdev) +{ + int err; + + err = hifc_comm_clp_to_mgmt_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init clp\n"); + return err; + } + + err = hifc_comm_aeqs_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init async event queues\n"); + goto aeqs_init_err; + } + + err = hifc_comm_pf_to_mgmt_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init msg\n"); + goto msg_init_err; + } + + return err; + +msg_init_err: + hifc_comm_aeqs_free(hwdev); + +aeqs_init_err: + hifc_comm_clp_to_mgmt_free(hwdev); + + return err; +} + +/* initialize communication channel */ +int hifc_init_comm_ch(struct hifc_hwdev *hwdev) +{ + int err; + + err = init_mgmt_channel(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init mgmt channel\n"); + return err; + } + + err = init_func_mode(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init function mode\n"); + goto func_mode_err; + } + + err = init_cmdqs_channel(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init cmdq channel\n"); + goto init_cmdqs_channel_err; + } + + err = hifc_sync_mgmt_func_state(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to synchronize mgmt function state\n"); + goto sync_mgmt_func_err; + } + + err = hifc_aeq_register_swe_cb(hwdev, HIFC_STATELESS_EVENT, + hifc_nic_sw_aeqe_handler); + if (err) { + sdk_err(hwdev->dev_hdl, + "Failed to register ucode aeqe handler\n"); + goto register_ucode_aeqe_err; + } + + set_bit(HIFC_HWDEV_COMM_CH_INITED, &hwdev->func_state); + + return 0; + +register_ucode_aeqe_err: + hifc_unsync_mgmt_func_state(hwdev); +sync_mgmt_func_err: + return err; + +init_cmdqs_channel_err: + +func_mode_err: + return err; +} + +static void __uninit_comm_module(struct hifc_hwdev *hwdev, + enum hifc_hwdev_init_state init_state) +{ + u16 func_id; + + switch (init_state) { + case HIFC_HWDEV_COMM_CH_INITED: + hifc_aeq_unregister_swe_cb(hwdev, + HIFC_STATELESS_EVENT); + hifc_unsync_mgmt_func_state(hwdev); + break; + case HIFC_HWDEV_CMDQ_INITED: + hifc_comm_cmdqs_free(hwdev); + /* VF can set page size of 256K only, any other value + * will return error in pf, pf will set all vf's page + * size to 4K when disable sriov + */ + if (HIFC_FUNC_TYPE(hwdev) != TYPE_VF) { + func_id = hifc_global_func_id_hw(hwdev); + hifc_set_wq_page_size(hwdev, func_id, + HIFC_HW_WQ_PAGE_SIZE); + } + + hifc_comm_ceqs_free(hwdev); + + break; + case HIFC_HWDEV_MBOX_INITED: + break; + case HIFC_HWDEV_MGMT_INITED: + hifc_comm_pf_to_mgmt_free(hwdev); + break; + case HIFC_HWDEV_AEQ_INITED: + hifc_comm_aeqs_free(hwdev); + break; + case HIFC_HWDEV_CLP_INITED: + hifc_comm_clp_to_mgmt_free(hwdev); + break; + default: + break; + } +} + +#define HIFC_FUNC_STATE_BUSY_TIMEOUT 300 +void hifc_uninit_comm_ch(struct hifc_hwdev *hwdev) +{ + enum hifc_hwdev_init_state init_state = HIFC_HWDEV_COMM_CH_INITED; + int cnt; + + while (init_state > HIFC_HWDEV_NONE_INITED) { + if (!test_bit(init_state, &hwdev->func_state)) { + init_state--; + continue; + } + clear_bit(init_state, &hwdev->func_state); + + cnt = 0; + while (test_bit(HIFC_HWDEV_STATE_BUSY, &hwdev->func_state) && + cnt++ <= HIFC_FUNC_STATE_BUSY_TIMEOUT) + usleep_range(900, 1000); + + __uninit_comm_module(hwdev, init_state); + + init_state--; + } +} + +int hifc_slq_init(void *dev, int num_wqs) +{ + struct hifc_hwdev *hwdev = dev; + int err; + + if (!dev) + return -EINVAL; + + hwdev->wqs = kzalloc(sizeof(*hwdev->wqs), GFP_KERNEL); + if (!hwdev->wqs) + return -ENOMEM; + + err = hifc_wqs_alloc(hwdev->wqs, num_wqs, hwdev->dev_hdl); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to alloc wqs\n"); + kfree(hwdev->wqs); + hwdev->wqs = NULL; + } + + return err; +} + +void hifc_slq_uninit(void *dev) +{ + struct hifc_hwdev *hwdev = dev; + + if (!hwdev) + return; + + hifc_wqs_free(hwdev->wqs); + + kfree(hwdev->wqs); +} + +int hifc_slq_alloc(void *dev, u16 wqebb_size, u16 q_depth, u16 page_size, + u64 *cla_addr, void **handle) +{ + struct hifc_hwdev *hwdev = dev; + struct hifc_wq *wq; + int err; + + if (!dev || !cla_addr || !handle) + return -EINVAL; + + wq = kzalloc(sizeof(*wq), GFP_KERNEL); + if (!wq) + return -ENOMEM; + + err = hifc_wq_allocate(hwdev->wqs, wq, wqebb_size, hwdev->wq_page_size, + q_depth, 0); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to alloc wq\n"); + kfree(wq); + return -EFAULT; + } + + *cla_addr = wq->block_paddr; + *handle = wq; + + return 0; +} + +void hifc_slq_free(void *dev, void *handle) +{ + struct hifc_hwdev *hwdev = dev; + + if (!hwdev || !handle) + return; + + hifc_wq_free(hwdev->wqs, handle); + kfree(handle); +} + +u64 hifc_slq_get_addr(void *handle, u16 index) +{ + if (!handle) + return 0; /* NULL of wqe addr */ + + return (u64)hifc_get_wqebb_addr(handle, index); +} + +u64 hifc_slq_get_first_pageaddr(void *handle) +{ + struct hifc_wq *wq = handle; + + if (!handle) + return 0; /* NULL of wqe addr */ + + return hifc_get_first_wqe_page_addr(wq); +} + +int hifc_func_tmr_bitmap_set(void *hwdev, bool en) +{ + struct hifc_func_tmr_bitmap_op bitmap_op = {0}; + u16 out_size = sizeof(bitmap_op); + int err; + + if (!hwdev) + return -EINVAL; + + err = hifc_global_func_id_get(hwdev, &bitmap_op.func_idx); + if (err) + return err; + + bitmap_op.ppf_idx = hifc_ppf_idx(hwdev); + if (en) + bitmap_op.op_id = FUNC_TMR_BITMAP_ENABLE; + else + bitmap_op.op_id = FUNC_TMR_BITMAP_DISABLE; + + err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_FUNC_TMR_BITMAT_SET, + &bitmap_op, sizeof(bitmap_op), + &bitmap_op, &out_size, 0); + if (err || !out_size || bitmap_op.status) { + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Failed to set timer bitmap, err: %d, status: 0x%x, out_size: 0x%x\n", + err, bitmap_op.status, out_size); + return -EFAULT; + } + + return 0; +} + +static int ppf_ht_gpa_set(struct hifc_hwdev *hwdev, struct hifc_page_addr *pg0, + struct hifc_page_addr *pg1) +{ + struct comm_info_ht_gpa_set ht_gpa_set = {0}; + u16 out_size = sizeof(ht_gpa_set); + int ret; + + pg0->virt_addr = dma_zalloc_coherent(hwdev->dev_hdl, + HIFC_HT_GPA_PAGE_SIZE, + &pg0->phys_addr, GFP_KERNEL); + if (!pg0->virt_addr) { + sdk_err(hwdev->dev_hdl, "Alloc pg0 page addr failed\n"); + return -EFAULT; + } + + pg1->virt_addr = dma_zalloc_coherent(hwdev->dev_hdl, + HIFC_HT_GPA_PAGE_SIZE, + &pg1->phys_addr, GFP_KERNEL); + if (!pg1->virt_addr) { + sdk_err(hwdev->dev_hdl, "Alloc pg1 page addr failed\n"); + return -EFAULT; + } + + ht_gpa_set.page_pa0 = pg0->phys_addr; + ht_gpa_set.page_pa1 = pg1->phys_addr; + sdk_info(hwdev->dev_hdl, "PPF ht gpa set: page_addr0.pa=0x%llx, page_addr1.pa=0x%llx\n", + pg0->phys_addr, pg1->phys_addr); + ret = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_PPF_HT_GPA_SET, + &ht_gpa_set, sizeof(ht_gpa_set), + &ht_gpa_set, &out_size, 0); + if (ret || !out_size || ht_gpa_set.status) { + sdk_warn(hwdev->dev_hdl, "PPF ht gpa set failed, ret: %d, status: 0x%x, out_size: 0x%x\n", + ret, ht_gpa_set.status, out_size); + return -EFAULT; + } + + hwdev->page_pa0.phys_addr = pg0->phys_addr; + hwdev->page_pa0.virt_addr = pg0->virt_addr; + + hwdev->page_pa1.phys_addr = pg1->phys_addr; + hwdev->page_pa1.virt_addr = pg1->virt_addr; + + return 0; +} + +int hifc_ppf_ht_gpa_init(struct hifc_hwdev *hwdev) +{ + int ret; + int i; + int j; + int size; + + struct hifc_page_addr page_addr0[HIFC_PPF_HT_GPA_SET_RETRY_TIMES]; + struct hifc_page_addr page_addr1[HIFC_PPF_HT_GPA_SET_RETRY_TIMES]; + + size = HIFC_PPF_HT_GPA_SET_RETRY_TIMES * sizeof(page_addr0[0]); + memset(page_addr0, 0, size); + memset(page_addr1, 0, size); + + for (i = 0; i < HIFC_PPF_HT_GPA_SET_RETRY_TIMES; i++) { + ret = ppf_ht_gpa_set(hwdev, &page_addr0[i], &page_addr1[i]); + if (!ret) + break; + } + + for (j = 0; j < i; j++) { + if (page_addr0[j].virt_addr) { + dma_free_coherent(hwdev->dev_hdl, + HIFC_HT_GPA_PAGE_SIZE, + page_addr0[j].virt_addr, + page_addr0[j].phys_addr); + page_addr0[j].virt_addr = NULL; + } + if (page_addr1[j].virt_addr) { + dma_free_coherent(hwdev->dev_hdl, + HIFC_HT_GPA_PAGE_SIZE, + page_addr1[j].virt_addr, + page_addr1[j].phys_addr); + page_addr1[j].virt_addr = NULL; + } + } + + if (i >= HIFC_PPF_HT_GPA_SET_RETRY_TIMES) { + sdk_err(hwdev->dev_hdl, "PPF ht gpa init failed, retry times: %d\n", + i); + return -EFAULT; + } + + return 0; +} + +void hifc_ppf_ht_gpa_deinit(struct hifc_hwdev *hwdev) +{ + if (hwdev->page_pa0.virt_addr) { + dma_free_coherent(hwdev->dev_hdl, HIFC_HT_GPA_PAGE_SIZE, + hwdev->page_pa0.virt_addr, + hwdev->page_pa0.phys_addr); + hwdev->page_pa0.virt_addr = NULL; + } + + if (hwdev->page_pa1.virt_addr) { + dma_free_coherent(hwdev->dev_hdl, HIFC_HT_GPA_PAGE_SIZE, + hwdev->page_pa1.virt_addr, + hwdev->page_pa1.phys_addr); + hwdev->page_pa1.virt_addr = NULL; + } +} + +static int set_ppf_tmr_status(struct hifc_hwdev *hwdev, + enum ppf_tmr_status status) +{ + struct hifc_ppf_tmr_op op = {0}; + u16 out_size = sizeof(op); + int err = 0; + + if (!hwdev) + return -EINVAL; + + if (hifc_func_type(hwdev) != TYPE_PPF) + return -EFAULT; + + if (status == HIFC_PPF_TMR_FLAG_START) { + err = hifc_ppf_ht_gpa_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "PPF ht gpa init fail!\n"); + return -EFAULT; + } + } else { + hifc_ppf_ht_gpa_deinit(hwdev); + } + + op.op_id = status; + op.ppf_idx = hifc_ppf_idx(hwdev); + + err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_PPF_TMR_SET, &op, + sizeof(op), &op, &out_size, 0); + if (err || !out_size || op.status) { + sdk_err(hwdev->dev_hdl, "Failed to set ppf timer, err: %d, status: 0x%x, out_size: 0x%x\n", + err, op.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hifc_ppf_tmr_start(void *hwdev) +{ + if (!hwdev) { + pr_err("Hwdev pointer is NULL for starting ppf timer\n"); + return -EINVAL; + } + + return set_ppf_tmr_status(hwdev, HIFC_PPF_TMR_FLAG_START); +} + +int hifc_ppf_tmr_stop(void *hwdev) +{ + if (!hwdev) { + pr_err("Hwdev pointer is NULL for stop ppf timer\n"); + return -EINVAL; + } + + return set_ppf_tmr_status(hwdev, HIFC_PPF_TMR_FLAG_STOP); +} + +int hifc_set_wq_page_size(struct hifc_hwdev *hwdev, u16 func_idx, + u32 page_size) +{ + struct hifc_wq_page_size page_size_info = {0}; + u16 out_size = sizeof(page_size_info); + int err; + + page_size_info.func_idx = func_idx; + page_size_info.ppf_idx = hifc_ppf_idx(hwdev); + page_size_info.page_size = HIFC_PAGE_SIZE_HW(page_size); + + err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_PAGESIZE_SET, + &page_size_info, sizeof(page_size_info), + &page_size_info, &out_size, 0); + if (err || !out_size || page_size_info.status) { + sdk_err(hwdev->dev_hdl, "Failed to set wq page size, err: %d, status: 0x%x, out_size: 0x%0x\n", + err, page_size_info.status, out_size); + return -EFAULT; + } + + return 0; +} + +bool hifc_mgmt_event_ack_first(u8 mod, u8 cmd) +{ + if ((mod == HIFC_MOD_COMM && cmd == HIFC_MGMT_CMD_GET_HOST_INFO) || + (mod == HIFC_MOD_COMM && cmd == HIFC_MGMT_CMD_HEARTBEAT_EVENT)) + return false; + + if (mod == HIFC_MOD_COMM || mod == HIFC_MOD_L2NIC || + mod == HIFC_MOD_HILINK) + return true; + + return false; +} + +#define FAULT_SHOW_STR_LEN 16 + +static void chip_fault_show(struct hifc_hwdev *hwdev, + struct hifc_fault_event *event) +{ + char fault_level[FAULT_LEVEL_MAX][FAULT_SHOW_STR_LEN + 1] = { + "fatal", "reset", "flr", "general", "suggestion"}; + char level_str[FAULT_SHOW_STR_LEN + 1]; + struct hifc_fault_event_stats *fault; + u8 node_id, level; + u32 pos, base; + + fault = &hwdev->hw_stats.fault_event_stats; + + memset(level_str, 0, FAULT_SHOW_STR_LEN + 1); + level = event->event.chip.err_level; + if (level < FAULT_LEVEL_MAX) + strncpy(level_str, fault_level[level], + FAULT_SHOW_STR_LEN); + else + strncpy(level_str, "Unknown", FAULT_SHOW_STR_LEN); + + if (level == FAULT_LEVEL_SERIOUS_FLR) { + sdk_err(hwdev->dev_hdl, "err_level: %d [%s], flr func_id: %d\n", + level, level_str, event->event.chip.func_id); + atomic_inc(&fault->fault_type_stat[event->type]); + } + sdk_err(hwdev->dev_hdl, "module_id: 0x%x, err_type: 0x%x, err_level: %d[%s], err_csr_addr: 0x%08x, err_csr_value: 0x%08x\n", + event->event.chip.node_id, + event->event.chip.err_type, level, level_str, + event->event.chip.err_csr_addr, + event->event.chip.err_csr_value); + + node_id = event->event.chip.node_id; + atomic_inc(&fault->chip_fault_stats[node_id][level]); + + base = event->event.chip.node_id * FAULT_LEVEL_MAX * + HIFC_CHIP_ERROR_TYPE_MAX; + pos = base + HIFC_CHIP_ERROR_TYPE_MAX * level + + event->event.chip.err_type; + if (pos < HIFC_CHIP_FAULT_SIZE) + hwdev->chip_fault_stats[pos]++; +} + +static void fault_report_show(struct hifc_hwdev *hwdev, + struct hifc_fault_event *event) +{ + char fault_type[FAULT_TYPE_MAX][FAULT_SHOW_STR_LEN + 1] = { + "chip", "ucode", "mem rd timeout", "mem wr timeout", + "reg rd timeout", "reg wr timeout", "phy fault"}; + char type_str[FAULT_SHOW_STR_LEN + 1]; + struct hifc_fault_event_stats *fault; + + sdk_err(hwdev->dev_hdl, "Fault event report received, func_id: %d.\n", + hifc_global_func_id(hwdev)); + + memset(type_str, 0, FAULT_SHOW_STR_LEN + 1); + if (event->type < FAULT_TYPE_MAX) + strncpy(type_str, fault_type[event->type], FAULT_SHOW_STR_LEN); + else + strncpy(type_str, "Unknown", FAULT_SHOW_STR_LEN); + + sdk_err(hwdev->dev_hdl, "Fault type: %d [%s]\n", event->type, type_str); + sdk_err(hwdev->dev_hdl, "Fault val[0]: 0x%08x, val[1]: 0x%08x, val[2]: 0x%08x, val[3]: 0x%08x\n", + event->event.val[0], event->event.val[1], event->event.val[2], + event->event.val[3]); + + fault = &hwdev->hw_stats.fault_event_stats; + + switch (event->type) { + case FAULT_TYPE_CHIP: + chip_fault_show(hwdev, event); + break; + case FAULT_TYPE_UCODE: + atomic_inc(&fault->fault_type_stat[event->type]); + + sdk_err(hwdev->dev_hdl, "cause_id: %d, core_id: %d, c_id: %d, epc: 0x%08x\n", + event->event.ucode.cause_id, event->event.ucode.core_id, + event->event.ucode.c_id, event->event.ucode.epc); + break; + case FAULT_TYPE_MEM_RD_TIMEOUT: + case FAULT_TYPE_MEM_WR_TIMEOUT: + atomic_inc(&fault->fault_type_stat[event->type]); + + sdk_err(hwdev->dev_hdl, "err_csr_ctrl: 0x%08x, err_csr_data: 0x%08x, ctrl_tab: 0x%08x, mem_index: 0x%08x\n", + event->event.mem_timeout.err_csr_ctrl, + event->event.mem_timeout.err_csr_data, + event->event.mem_timeout.ctrl_tab, + event->event.mem_timeout.mem_index); + break; + case FAULT_TYPE_REG_RD_TIMEOUT: + case FAULT_TYPE_REG_WR_TIMEOUT: + atomic_inc(&fault->fault_type_stat[event->type]); + sdk_err(hwdev->dev_hdl, "err_csr: 0x%08x\n", + event->event.reg_timeout.err_csr); + break; + case FAULT_TYPE_PHY_FAULT: + atomic_inc(&fault->fault_type_stat[event->type]); + sdk_err(hwdev->dev_hdl, "op_type: %u, port_id: %u, dev_ad: %u, csr_addr: 0x%08x, op_data: 0x%08x\n", + event->event.phy_fault.op_type, + event->event.phy_fault.port_id, + event->event.phy_fault.dev_ad, + event->event.phy_fault.csr_addr, + event->event.phy_fault.op_data); + break; + default: + break; + } +} + +static void hifc_refresh_history_fault(struct hifc_hwdev *hwdev, + struct hifc_fault_recover_info *info) +{ + if (!hwdev->history_fault_flag) { + hwdev->history_fault_flag = true; + memcpy(&hwdev->history_fault, info, + sizeof(struct hifc_fault_recover_info)); + } else { + if (hwdev->history_fault.fault_lev >= info->fault_lev) + memcpy(&hwdev->history_fault, info, + sizeof(struct hifc_fault_recover_info)); + } +} + +static void fault_event_handler(struct hifc_hwdev *hwdev, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct hifc_cmd_fault_event *fault_event; + struct hifc_event_info event_info; + struct hifc_fault_info_node *fault_node; + + if (in_size != sizeof(*fault_event)) { + sdk_err(hwdev->dev_hdl, "Invalid fault event report, length: %d, should be %ld.\n", + in_size, sizeof(*fault_event)); + return; + } + + fault_event = buf_in; + fault_report_show(hwdev, &fault_event->event); + + if (hwdev->event_callback) { + event_info.type = HIFC_EVENT_FAULT; + memcpy(&event_info.info, &fault_event->event, + sizeof(event_info.info)); + + hwdev->event_callback(hwdev->event_pri_handle, &event_info); + } + + /* refresh history fault info */ + fault_node = kzalloc(sizeof(*fault_node), GFP_KERNEL); + if (!fault_node) { + sdk_err(hwdev->dev_hdl, "Malloc fault node memory failed\n"); + return; + } + + if (fault_event->event.type <= FAULT_TYPE_REG_WR_TIMEOUT) + fault_node->info.fault_src = fault_event->event.type; + else if (fault_event->event.type == FAULT_TYPE_PHY_FAULT) + fault_node->info.fault_src = HIFC_FAULT_SRC_HW_PHY_FAULT; + + if (fault_node->info.fault_src == HIFC_FAULT_SRC_HW_MGMT_CHIP) + fault_node->info.fault_lev = + fault_event->event.event.chip.err_level; + else + fault_node->info.fault_lev = FAULT_LEVEL_FATAL; + + memcpy(&fault_node->info.fault_data.hw_mgmt, &fault_event->event.event, + sizeof(union hifc_fault_hw_mgmt)); + hifc_refresh_history_fault(hwdev, &fault_node->info); + + down(&hwdev->fault_list_sem); + kfree(fault_node); + up(&hwdev->fault_list_sem); +} + +static void heartbeat_lost_event_handler(struct hifc_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hifc_fault_info_node *fault_node; + struct hifc_event_info event_info = {0}; + + atomic_inc(&hwdev->hw_stats.heart_lost_stats); + sdk_err(hwdev->dev_hdl, "Heart lost report received, func_id: %d\n", + hifc_global_func_id(hwdev)); + + if (hwdev->event_callback) { + event_info.type = HIFC_EVENT_HEART_LOST; + hwdev->event_callback(hwdev->event_pri_handle, &event_info); + } + + /* refresh history fault info */ + fault_node = kzalloc(sizeof(*fault_node), GFP_KERNEL); + if (!fault_node) { + sdk_err(hwdev->dev_hdl, "Malloc fault node memory failed\n"); + return; + } + + fault_node->info.fault_src = HIFC_FAULT_SRC_HOST_HEARTBEAT_LOST; + fault_node->info.fault_lev = FAULT_LEVEL_FATAL; + hifc_refresh_history_fault(hwdev, &fault_node->info); + + down(&hwdev->fault_list_sem); + kfree(fault_node); + up(&hwdev->fault_list_sem); +} + +static void sw_watchdog_timeout_info_show(struct hifc_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hifc_mgmt_watchdog_info *watchdog_info; + u32 *dump_addr, *reg, stack_len, i, j; + + if (in_size != sizeof(*watchdog_info)) { + sdk_err(hwdev->dev_hdl, "Invalid mgmt watchdog report, length: %d, should be %ld.\n", + in_size, sizeof(*watchdog_info)); + return; + } + + watchdog_info = buf_in; + + sdk_err(hwdev->dev_hdl, "Mgmt deadloop time: 0x%x 0x%x, task id: 0x%x, sp: 0x%x\n", + watchdog_info->curr_time_h, watchdog_info->curr_time_l, + watchdog_info->task_id, watchdog_info->sp); + sdk_err(hwdev->dev_hdl, "Stack current used: 0x%x, peak used: 0x%x, overflow flag: 0x%x, top: 0x%x, bottom: 0x%x\n", + watchdog_info->curr_used, watchdog_info->peak_used, + watchdog_info->is_overflow, watchdog_info->stack_top, + watchdog_info->stack_bottom); + + sdk_err(hwdev->dev_hdl, "Mgmt pc: 0x%08x, lr: 0x%08x, cpsr:0x%08x\n", + watchdog_info->pc, watchdog_info->lr, watchdog_info->cpsr); + + sdk_err(hwdev->dev_hdl, "Mgmt register info\n"); + + for (i = 0; i < 3; i++) { + reg = watchdog_info->reg + (u64)(u32)(4 * i); + sdk_err(hwdev->dev_hdl, "0x%08x 0x%08x 0x%08x 0x%08x\n", + *(reg), *(reg + 1), *(reg + 2), *(reg + 3)); + } + + sdk_err(hwdev->dev_hdl, "0x%08x\n", watchdog_info->reg[12]); + + if (watchdog_info->stack_actlen <= 1024) { + stack_len = watchdog_info->stack_actlen; + } else { + sdk_err(hwdev->dev_hdl, "Oops stack length: 0x%x is wrong\n", + watchdog_info->stack_actlen); + stack_len = 1024; + } + + sdk_err(hwdev->dev_hdl, "Mgmt dump stack, 16Bytes per line(start from sp)\n"); + for (i = 0; i < (stack_len / 16); i++) { + dump_addr = (u32 *)(watchdog_info->data + ((u64)(u32)(i * 16))); + sdk_err(hwdev->dev_hdl, "0x%08x 0x%08x 0x%08x 0x%08x\n", + *dump_addr, *(dump_addr + 1), *(dump_addr + 2), + *(dump_addr + 3)); + } + + for (j = 0; j < ((stack_len % 16) / 4); j++) { + dump_addr = (u32 *)(watchdog_info->data + + ((u64)(u32)(i * 16 + j * 4))); + sdk_err(hwdev->dev_hdl, "0x%08x ", *dump_addr); + } + + *out_size = sizeof(*watchdog_info); + watchdog_info = buf_out; + watchdog_info->status = 0; +} + +static void mgmt_watchdog_timeout_event_handler(struct hifc_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hifc_fault_info_node *fault_node; + + sw_watchdog_timeout_info_show(hwdev, buf_in, in_size, + buf_out, out_size); + + /* refresh history fault info */ + fault_node = kzalloc(sizeof(*fault_node), GFP_KERNEL); + if (!fault_node) { + sdk_err(hwdev->dev_hdl, "Malloc fault node memory failed\n"); + return; + } + + fault_node->info.fault_src = HIFC_FAULT_SRC_MGMT_WATCHDOG; + fault_node->info.fault_lev = FAULT_LEVEL_FATAL; + hifc_refresh_history_fault(hwdev, &fault_node->info); + + down(&hwdev->fault_list_sem); + kfree(fault_node); + up(&hwdev->fault_list_sem); +} + +static void mgmt_reset_event_handler(struct hifc_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + sdk_info(hwdev->dev_hdl, "Mgmt is reset\n"); + + /* mgmt reset only occurred when hot update or Mgmt deadloop, + * if Mgmt deadloop, mgmt will report an event with + * mod=0, cmd=0x56, and will reported fault to os, + * so mgmt reset event don't need to report fault + */ +} + +static void hifc_fmw_act_ntc_handler(struct hifc_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hifc_event_info event_info = {0}; + struct hifc_fmw_act_ntc *notice_info; + + if (in_size != sizeof(*notice_info)) { + sdk_err(hwdev->dev_hdl, "Invalid mgmt firmware active notice, length: %d, should be %ld.\n", + in_size, sizeof(*notice_info)); + return; + } + + /* mgmt is active now, restart heartbeat enhanced detection */ + __set_heartbeat_ehd_detect_delay(hwdev, 0); + + if (!hwdev->event_callback) + return; + + event_info.type = HIFC_EVENT_FMW_ACT_NTC; + hwdev->event_callback(hwdev->event_pri_handle, &event_info); + + *out_size = sizeof(*notice_info); + notice_info = buf_out; + notice_info->status = 0; +} + +static void hifc_pcie_dfx_event_handler(struct hifc_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hifc_pcie_dfx_ntc *notice_info = buf_in; + struct hifc_pcie_dfx_info *dfx_info; + u16 size = 0; + u16 cnt = 0; + u32 num = 0; + u32 i, j; + int err; + u32 *reg; + + if (in_size != sizeof(*notice_info)) { + sdk_err(hwdev->dev_hdl, "Invalid mgmt firmware active notice, length: %d, should be %ld.\n", + in_size, sizeof(*notice_info)); + return; + } + + dfx_info = kzalloc(sizeof(*dfx_info), GFP_KERNEL); + if (!dfx_info) { + sdk_err(hwdev->dev_hdl, "Malloc dfx_info memory failed\n"); + return; + } + + ((struct hifc_pcie_dfx_ntc *)buf_out)->status = 0; + *out_size = sizeof(*notice_info); + num = (u32)(notice_info->len / 1024); + sdk_info(hwdev->dev_hdl, "INFO LEN: %d\n", notice_info->len); + sdk_info(hwdev->dev_hdl, "PCIE DFX:\n"); + dfx_info->host_id = 0; + for (i = 0; i < num; i++) { + dfx_info->offset = i * MAX_PCIE_DFX_BUF_SIZE; + if (i == (num - 1)) + dfx_info->last = 1; + size = sizeof(*dfx_info); + err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_PCIE_DFX_GET, + dfx_info, sizeof(*dfx_info), + dfx_info, &size, 0); + if (err || dfx_info->status || !size) { + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Failed to get pcie dfx info, err: %d, status: 0x%x, out size: 0x%x\n", + err, dfx_info->status, size); + kfree(dfx_info); + return; + } + + reg = (u32 *)dfx_info->data; + for (j = 0; j < 256; j = j + 8) { + /*lint -save -e661 -e662*/ + sdk_info(hwdev->dev_hdl, "0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", + cnt, reg[j], reg[(u32)(j + 1)], + reg[(u32)(j + 2)], reg[(u32)(j + 3)], + reg[(u32)(j + 4)], reg[(u32)(j + 5)], + reg[(u32)(j + 6)], reg[(u32)(j + 7)]); + /*lint -restore*/ + cnt = cnt + 32; + } + memset(dfx_info->data, 0, MAX_PCIE_DFX_BUF_SIZE); + } + kfree(dfx_info); +} + +struct hifc_mctp_get_host_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 huawei_cmd; + u8 sub_cmd; + u8 rsvd[2]; + + u32 actual_len; + + u8 data[1024]; +}; + +static void hifc_mctp_get_host_info_event_handler(struct hifc_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hifc_event_info event_info = {0}; + struct hifc_mctp_get_host_info *mctp_out, *mctp_in; + struct hifc_mctp_host_info *host_info; + + if (in_size != sizeof(*mctp_in)) { + sdk_err(hwdev->dev_hdl, "Invalid mgmt mctp info, length: %d, should be %ld\n", + in_size, sizeof(*mctp_in)); + return; + } + + *out_size = sizeof(*mctp_out); + mctp_out = buf_out; + mctp_out->status = 0; + + if (!hwdev->event_callback) { + mctp_out->status = HIFC_MGMT_STATUS_ERR_INIT; + return; + } + + mctp_in = buf_in; + host_info = &event_info.mctp_info; + host_info->major_cmd = mctp_in->huawei_cmd; + host_info->sub_cmd = mctp_in->sub_cmd; + host_info->data = mctp_out->data; + + event_info.type = HIFC_EVENT_MCTP_GET_HOST_INFO; + hwdev->event_callback(hwdev->event_pri_handle, &event_info); + + mctp_out->actual_len = host_info->data_len; +} + +char *__hw_to_char_fec[HILINK_FEC_MAX_TYPE] = {"RS-FEC", "BASE-FEC", "NO-FEC"}; + +char *__hw_to_char_port_type[LINK_PORT_MAX_TYPE] = { + "Unknown", "Fibre", "Electric", "Direct Attach Copper", "AOC", + "Back plane", "BaseT" +}; + +static void __get_port_type(struct hifc_hwdev *hwdev, + struct hifc_link_info *info, char **port_type) +{ + if (info->cable_absent) { + sdk_info(hwdev->dev_hdl, "Cable unpresent\n"); + return; + } + + if (info->port_type < LINK_PORT_MAX_TYPE) + *port_type = __hw_to_char_port_type[info->port_type]; + else + sdk_info(hwdev->dev_hdl, "Unknown port type: %u\n", + info->port_type); + if (info->port_type == LINK_PORT_FIBRE) { + if (info->port_sub_type == FIBRE_SUBTYPE_SR) + *port_type = "Fibre-SR"; + else if (info->port_sub_type == FIBRE_SUBTYPE_LR) + *port_type = "Fibre-LR"; + } +} + +static void __print_cable_info(struct hifc_hwdev *hwdev, + struct hifc_link_info *info) +{ + char tmp_str[512] = {0}; + char tmp_vendor[17] = {0}; + char *port_type = "Unknown port type"; + int i; + + __get_port_type(hwdev, info, &port_type); + + for (i = sizeof(info->vendor_name) - 1; i >= 0; i--) { + if (info->vendor_name[i] == ' ') + info->vendor_name[i] = '\0'; + else + break; + } + + memcpy(tmp_vendor, info->vendor_name, + sizeof(info->vendor_name)); + snprintf(tmp_str, sizeof(tmp_str) - 1, + "Vendor: %s, %s, length: %um, max_speed: %uGbps", + tmp_vendor, port_type, info->cable_length, + info->cable_max_speed); + if (info->port_type == LINK_PORT_FIBRE || + info->port_type == LINK_PORT_AOC) { + snprintf(tmp_str, sizeof(tmp_str) - 1, + "%s, %s, Temperature: %u", tmp_str, + info->sfp_type ? "SFP" : "QSFP", info->cable_temp); + if (info->sfp_type) { + snprintf(tmp_str, sizeof(tmp_str) - 1, + "%s, rx power: %uuW, tx power: %uuW", + tmp_str, info->power[0], info->power[1]); + } else { + snprintf(tmp_str, sizeof(tmp_str) - 1, + "%s, rx power: %uuw %uuW %uuW %uuW", + tmp_str, info->power[0], info->power[1], + info->power[2], info->power[3]); + } + } + + sdk_info(hwdev->dev_hdl, "Cable information: %s\n", + tmp_str); +} + +static void __hi30_lane_info(struct hifc_hwdev *hwdev, + struct hilink_lane *lane) +{ + struct hi30_ffe_data *ffe_data; + struct hi30_ctle_data *ctle_data; + + ffe_data = (struct hi30_ffe_data *)lane->hi30_ffe; + ctle_data = (struct hi30_ctle_data *)lane->hi30_ctle; + + sdk_info(hwdev->dev_hdl, "TX_FFE: PRE1=%s%d; PRE2=%s%d; MAIN=%d; POST1=%s%d; POST1X=%s%d\n", + (ffe_data->PRE1 & 0x10) ? "-" : "", + (int)(ffe_data->PRE1 & 0xf), + (ffe_data->PRE2 & 0x10) ? "-" : "", + (int)(ffe_data->PRE2 & 0xf), + (int)ffe_data->MAIN, + (ffe_data->POST1 & 0x10) ? "-" : "", + (int)(ffe_data->POST1 & 0xf), + (ffe_data->POST2 & 0x10) ? "-" : "", + (int)(ffe_data->POST2 & 0xf)); + sdk_info(hwdev->dev_hdl, "RX_CTLE: Gain1~3=%u %u %u; Boost1~3=%u %u %u; Zero1~3=%u %u %u; Squelch1~3=%u %u %u\n", + ctle_data->ctlebst[0], ctle_data->ctlebst[1], + ctle_data->ctlebst[2], ctle_data->ctlecmband[0], + ctle_data->ctlecmband[1], ctle_data->ctlecmband[2], + ctle_data->ctlermband[0], ctle_data->ctlermband[1], + ctle_data->ctlermband[2], ctle_data->ctleza[0], + ctle_data->ctleza[1], ctle_data->ctleza[2]); +} + +static void __print_hi30_status(struct hifc_hwdev *hwdev, + struct hifc_link_info *info) +{ + struct hilink_lane *lane; + int lane_used_num = 0, i; + + for (i = 0; i < HILINK_MAX_LANE; i++) { + lane = (struct hilink_lane *)(info->lane2 + i * sizeof(*lane)); + if (!lane->lane_used) + continue; + + __hi30_lane_info(hwdev, lane); + lane_used_num++; + } + + /* in new firmware, all lane info setted in lane2 */ + if (lane_used_num) + return; + + /* compatible old firmware */ + __hi30_lane_info(hwdev, (struct hilink_lane *)info->lane1); +} + +static void __print_link_info(struct hifc_hwdev *hwdev, + struct hifc_link_info *info, + enum hilink_info_print_event type) +{ + char *fec = "None"; + + if (info->fec < HILINK_FEC_MAX_TYPE) + fec = __hw_to_char_fec[info->fec]; + else + sdk_info(hwdev->dev_hdl, "Unknown fec type: %u\n", + info->fec); + + if (type == HILINK_EVENT_LINK_UP || !info->an_state) { + sdk_info(hwdev->dev_hdl, "Link information: speed %dGbps, %s, autoneg %s\n", + info->speed, fec, info->an_state ? "on" : "off"); + } else { + sdk_info(hwdev->dev_hdl, "Link information: antoneg: %s\n", + info->an_state ? "on" : "off"); + } +} + +static char *hilink_info_report_type[HILINK_EVENT_MAX_TYPE] = { + "", "link up", "link down", "cable plugged" +}; + +void print_hilink_info(struct hifc_hwdev *hwdev, + enum hilink_info_print_event type, + struct hifc_link_info *info) +{ + __print_cable_info(hwdev, info); + + __print_link_info(hwdev, info, type); + + __print_hi30_status(hwdev, info); + + if (type == HILINK_EVENT_LINK_UP) + return; + + if (type == HILINK_EVENT_CABLE_PLUGGED) { + sdk_info(hwdev->dev_hdl, "alos: %u, rx_los: %u\n", + info->alos, info->rx_los); + return; + } + + sdk_info(hwdev->dev_hdl, "PMA ctrl: %s, MAC tx %s, MAC rx %s, PMA debug info reg: 0x%x, PMA signal ok reg: 0x%x, RF/LF status reg: 0x%x\n", + info->pma_status == 1 ? "off" : "on", + info->mac_tx_en ? "enable" : "disable", + info->mac_rx_en ? "enable" : "disable", info->pma_dbg_info_reg, + info->pma_signal_ok_reg, info->rf_lf_status_reg); + sdk_info(hwdev->dev_hdl, "alos: %u, rx_los: %u, PCS block counter reg: 0x%x, PCS link: 0x%x, MAC link: 0x%x PCS_err_cnt: 0x%x\n", + info->alos, info->rx_los, info->pcs_err_blk_cnt_reg, + info->pcs_link_reg, info->mac_link_reg, info->pcs_err_cnt); +} + +static void hifc_print_hilink_info(struct hifc_hwdev *hwdev, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct hifc_hilink_link_info *hilink_info = buf_in; + struct hifc_link_info *info; + enum hilink_info_print_event type; + + if (in_size != sizeof(*hilink_info)) { + sdk_err(hwdev->dev_hdl, "Invalid hilink info message size %d, should be %ld\n", + in_size, sizeof(*hilink_info)); + return; + } + + ((struct hifc_hilink_link_info *)buf_out)->status = 0; + *out_size = sizeof(*hilink_info); + + info = &hilink_info->info; + type = hilink_info->info_type; + + if (type < HILINK_EVENT_LINK_UP || type >= HILINK_EVENT_MAX_TYPE) { + sdk_info(hwdev->dev_hdl, "Invalid hilink info report, type: %d\n", + type); + return; + } + + sdk_info(hwdev->dev_hdl, "Hilink info report after %s\n", + hilink_info_report_type[type]); + + print_hilink_info(hwdev, type, info); +} + +static void __port_sfp_info_event(struct hifc_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hifc_cmd_get_sfp_qsfp_info *sfp_info = buf_in; + struct hifc_port_routine_cmd *rt_cmd; + struct card_node *chip_node = hwdev->chip_node; + + if (in_size != sizeof(*sfp_info)) { + sdk_err(hwdev->dev_hdl, "Invalid sfp info cmd, length: %d, should be %ld\n", + in_size, sizeof(*sfp_info)); + return; + } + + if (sfp_info->port_id >= HIFC_MAX_PORT_ID) { + sdk_err(hwdev->dev_hdl, "Invalid sfp port id: %d, max port is %d\n", + sfp_info->port_id, HIFC_MAX_PORT_ID - 1); + return; + } + + if (!chip_node->rt_cmd) + return; + + rt_cmd = &chip_node->rt_cmd[sfp_info->port_id]; + mutex_lock(&chip_node->sfp_mutex); + memcpy(&rt_cmd->sfp_info, sfp_info, sizeof(rt_cmd->sfp_info)); + rt_cmd->up_send_sfp_info = true; + mutex_unlock(&chip_node->sfp_mutex); +} + +static void __port_sfp_abs_event(struct hifc_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hifc_cmd_get_light_module_abs *sfp_abs = buf_in; + struct hifc_port_routine_cmd *rt_cmd; + struct card_node *chip_node = hwdev->chip_node; + + if (in_size != sizeof(*sfp_abs)) { + sdk_err(hwdev->dev_hdl, "Invalid sfp absent cmd, length: %d, should be %ld\n", + in_size, sizeof(*sfp_abs)); + return; + } + + if (sfp_abs->port_id >= HIFC_MAX_PORT_ID) { + sdk_err(hwdev->dev_hdl, "Invalid sfp port id: %d, max port is %d\n", + sfp_abs->port_id, HIFC_MAX_PORT_ID - 1); + return; + } + + if (!chip_node->rt_cmd) + return; + + rt_cmd = &chip_node->rt_cmd[sfp_abs->port_id]; + mutex_lock(&chip_node->sfp_mutex); + memcpy(&rt_cmd->abs, sfp_abs, sizeof(rt_cmd->abs)); + rt_cmd->up_send_sfp_abs = true; + mutex_unlock(&chip_node->sfp_mutex); +} + +static void mgmt_heartbeat_enhanced_event(struct hifc_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hifc_heartbeat_event *hb_event = buf_in; + struct hifc_heartbeat_event *hb_event_out = buf_out; + struct hifc_hwdev *dev = hwdev; + + if (in_size != sizeof(*hb_event)) { + sdk_err(dev->dev_hdl, "Invalid data size from mgmt for heartbeat event: %d\n", + in_size); + return; + } + + if (dev->heartbeat_ehd.last_heartbeat != hb_event->heart) { + dev->heartbeat_ehd.last_update_jiffies = jiffies; + dev->heartbeat_ehd.last_heartbeat = hb_event->heart; + } + + hb_event_out->drv_heart = HEARTBEAT_DRV_MAGIC_ACK; + + hb_event_out->status = 0; + *out_size = sizeof(*hb_event_out); +} + +struct dev_event_handler { + u8 mod; + u8 cmd; + void (*handler)(struct hifc_hwdev *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); +}; + +struct dev_event_handler dev_cmd_handler[] = { + { + .mod = HIFC_MOD_L2NIC, + .cmd = HIFC_PORT_CMD_GET_SFP_INFO, + .handler = __port_sfp_info_event, + }, + + { + .mod = HIFC_MOD_L2NIC, + .cmd = HIFC_PORT_CMD_GET_SFP_ABS, + .handler = __port_sfp_abs_event, + }, + + { + .mod = HIFC_MOD_HILINK, + .cmd = HIFC_HILINK_CMD_GET_LINK_INFO, + .handler = hifc_print_hilink_info, + }, + + { + .mod = HIFC_MOD_COMM, + .cmd = HIFC_MGMT_CMD_FAULT_REPORT, + .handler = fault_event_handler, + }, + + { + .mod = HIFC_MOD_L2NIC, + .cmd = HIFC_MGMT_CMD_HEART_LOST_REPORT, + .handler = heartbeat_lost_event_handler, + }, + + { + .mod = HIFC_MOD_COMM, + .cmd = HIFC_MGMT_CMD_WATCHDOG_INFO, + .handler = mgmt_watchdog_timeout_event_handler, + }, + + { + .mod = HIFC_MOD_L2NIC, + .cmd = HIFC_PORT_CMD_MGMT_RESET, + .handler = mgmt_reset_event_handler, + }, + + { + .mod = HIFC_MOD_COMM, + .cmd = HIFC_MGMT_CMD_FMW_ACT_NTC, + .handler = hifc_fmw_act_ntc_handler, + }, + + { + .mod = HIFC_MOD_COMM, + .cmd = HIFC_MGMT_CMD_PCIE_DFX_NTC, + .handler = hifc_pcie_dfx_event_handler, + }, + + { + .mod = HIFC_MOD_COMM, + .cmd = HIFC_MGMT_CMD_GET_HOST_INFO, + .handler = hifc_mctp_get_host_info_event_handler, + }, + + { + .mod = HIFC_MOD_COMM, + .cmd = HIFC_MGMT_CMD_HEARTBEAT_EVENT, + .handler = mgmt_heartbeat_enhanced_event, + }, +}; + +/* public process for this event: + * pf link change event + * pf heart lost event ,TBD + * pf fault report event + * vf link change event + * vf heart lost event, TBD + * vf fault report event, TBD + */ +static void _event_handler(struct hifc_hwdev *hwdev, enum hifc_mod_type mod, + u8 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + u32 i, size = sizeof(dev_cmd_handler) / sizeof(dev_cmd_handler[0]); + + if (!hwdev) + return; + + *out_size = 0; + + for (i = 0; i < size; i++) { + if (cmd == dev_cmd_handler[i].cmd && + mod == dev_cmd_handler[i].mod) { + dev_cmd_handler[i].handler(hwdev, buf_in, in_size, + buf_out, out_size); + break; + } + } + + /* can't find this event cmd */ + if (i == size) + sdk_warn(hwdev->dev_hdl, "Unsupported mod(%d) event cmd(%d) to process\n", + mod, cmd); +} + +/* pf link change event */ +static void pf_nic_event_handler(void *hwdev, void *pri_handle, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + _event_handler(hwdev, HIFC_MOD_L2NIC, cmd, buf_in, in_size, + buf_out, out_size); +} + +static void pf_hilink_event_handler(void *hwdev, void *pri_handle, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + _event_handler(hwdev, HIFC_MOD_HILINK, cmd, buf_in, in_size, + buf_out, out_size); +} + +/* pf fault report event */ +static void pf_fault_event_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + _event_handler(hwdev, HIFC_MOD_COMM, HIFC_MGMT_CMD_FAULT_REPORT, + buf_in, in_size, buf_out, out_size); +} + +static void mgmt_watchdog_event_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + _event_handler(hwdev, HIFC_MOD_COMM, HIFC_MGMT_CMD_WATCHDOG_INFO, + buf_in, in_size, buf_out, out_size); +} + +static void mgmt_fmw_act_event_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + _event_handler(hwdev, HIFC_MOD_COMM, HIFC_MGMT_CMD_FMW_ACT_NTC, + buf_in, in_size, buf_out, out_size); +} + +static void mgmt_pcie_dfx_event_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + _event_handler(hwdev, HIFC_MOD_COMM, HIFC_MGMT_CMD_PCIE_DFX_NTC, + buf_in, in_size, buf_out, out_size); +} + +static void mgmt_get_mctp_event_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + _event_handler(hwdev, HIFC_MOD_COMM, HIFC_MGMT_CMD_GET_HOST_INFO, + buf_in, in_size, buf_out, out_size); +} + +void mgmt_heartbeat_event_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + _event_handler(hwdev, HIFC_MOD_COMM, HIFC_MGMT_CMD_HEARTBEAT_EVENT, + buf_in, in_size, buf_out, out_size); +} + +static void pf_event_register(struct hifc_hwdev *hwdev) +{ + if (hifc_is_hwdev_mod_inited(hwdev, HIFC_HWDEV_MGMT_INITED)) { + hifc_register_mgmt_msg_cb(hwdev, HIFC_MOD_L2NIC, + hwdev, pf_nic_event_handler); + hifc_register_mgmt_msg_cb(hwdev, HIFC_MOD_HILINK, + hwdev, + pf_hilink_event_handler); + hifc_comm_recv_mgmt_self_cmd_reg(hwdev, + HIFC_MGMT_CMD_FAULT_REPORT, + pf_fault_event_handler); + + hifc_comm_recv_mgmt_self_cmd_reg(hwdev, + HIFC_MGMT_CMD_WATCHDOG_INFO, + mgmt_watchdog_event_handler); + + hifc_comm_recv_mgmt_self_cmd_reg(hwdev, + HIFC_MGMT_CMD_FMW_ACT_NTC, + mgmt_fmw_act_event_handler); + hifc_comm_recv_mgmt_self_cmd_reg(hwdev, + HIFC_MGMT_CMD_PCIE_DFX_NTC, + mgmt_pcie_dfx_event_handler); + hifc_comm_recv_mgmt_self_cmd_reg(hwdev, + HIFC_MGMT_CMD_GET_HOST_INFO, + mgmt_get_mctp_event_handler); + } +} + +void hifc_event_register(void *dev, void *pri_handle, + hifc_event_handler callback) +{ + struct hifc_hwdev *hwdev = dev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for register event\n"); + return; + } + + hwdev->event_callback = callback; + hwdev->event_pri_handle = pri_handle; + + pf_event_register(hwdev); +} + +void hifc_event_unregister(void *dev) +{ + struct hifc_hwdev *hwdev = dev; + + hwdev->event_callback = NULL; + hwdev->event_pri_handle = NULL; + + hifc_unregister_mgmt_msg_cb(hwdev, HIFC_MOD_L2NIC); + hifc_unregister_mgmt_msg_cb(hwdev, HIFC_MOD_HILINK); + hifc_comm_recv_up_self_cmd_unreg(hwdev, + HIFC_MGMT_CMD_FAULT_REPORT); + hifc_comm_recv_up_self_cmd_unreg(hwdev, + HIFC_MGMT_CMD_WATCHDOG_INFO); + hifc_comm_recv_up_self_cmd_unreg(hwdev, + HIFC_MGMT_CMD_FMW_ACT_NTC); + hifc_comm_recv_up_self_cmd_unreg(hwdev, + HIFC_MGMT_CMD_PCIE_DFX_NTC); + hifc_comm_recv_up_self_cmd_unreg(hwdev, + HIFC_MGMT_CMD_GET_HOST_INFO); +} + +/* 0 - heartbeat lost, 1 - normal */ +static u8 hifc_get_heartbeat_status(struct hifc_hwdev *hwdev) +{ + struct hifc_hwif *hwif = hwdev->hwif; + u32 attr1; + + /* suprise remove should be set 1 */ + if (!hifc_get_chip_present_flag(hwdev)) + return 1; + + attr1 = hifc_hwif_read_reg(hwif, HIFC_CSR_FUNC_ATTR1_ADDR); + if (attr1 == HIFC_PCIE_LINK_DOWN) { + sdk_err(hwdev->dev_hdl, "Detect pcie is link down\n"); + hifc_set_chip_absent(hwdev); + hifc_force_complete_all(hwdev); + /* should notify chiperr to pangea + * when detecting pcie link down + */ + return 1; + } + + return HIFC_AF1_GET(attr1, MGMT_INIT_STATUS); +} + +static void hifc_heartbeat_event_handler(struct work_struct *work) +{ + struct hifc_hwdev *hwdev = + container_of(work, struct hifc_hwdev, timer_work); + u16 out = 0; + + _event_handler(hwdev, HIFC_MOD_L2NIC, HIFC_MGMT_CMD_HEART_LOST_REPORT, + NULL, 0, &out, &out); +} + +static bool __detect_heartbeat_ehd_lost(struct hifc_hwdev *hwdev) +{ + struct hifc_heartbeat_enhanced *hb_ehd = &hwdev->heartbeat_ehd; + u64 update_time; + bool hb_ehd_lost = false; + + if (!hb_ehd->en) + return false; + + if (time_after(jiffies, hb_ehd->start_detect_jiffies)) { + update_time = jiffies_to_msecs(jiffies - + hb_ehd->last_update_jiffies); + if (update_time > HIFC_HEARBEAT_ENHANCED_LOST) { + sdk_warn(hwdev->dev_hdl, "Heartbeat enhanced lost for %d millisecond\n", + (u32)update_time); + hb_ehd_lost = true; + } + } else { + /* mgmt may not report heartbeart enhanced event and won't + * update last_update_jiffies + */ + hb_ehd->last_update_jiffies = jiffies; + } + + return hb_ehd_lost; +} + +static void hifc_heartbeat_timer_handler(struct timer_list *t) +{ + struct hifc_hwdev *hwdev = from_timer(hwdev, t, heartbeat_timer); + + if (__detect_heartbeat_ehd_lost(hwdev) || + !hifc_get_heartbeat_status(hwdev)) { + hwdev->heartbeat_lost = 1; + queue_work(hwdev->workq, &hwdev->timer_work); + } else { + mod_timer(&hwdev->heartbeat_timer, + jiffies + msecs_to_jiffies(HIFC_HEARTBEAT_PERIOD)); + } +} + +void add_to_timer(struct timer_list *timer, long period) +{ + if (!timer) + return; + + add_timer(timer); +} + +void delete_timer(struct timer_list *timer) +{ + if (!timer) + return; + + del_timer_sync(timer); +} + +void hifc_init_heartbeat(struct hifc_hwdev *hwdev) +{ + timer_setup(&hwdev->heartbeat_timer, hifc_heartbeat_timer_handler, 0); + hwdev->heartbeat_timer.expires = + jiffies + msecs_to_jiffies(HIFC_HEARTBEAT_START_EXPIRE); + + add_to_timer(&hwdev->heartbeat_timer, HIFC_HEARTBEAT_PERIOD); + + INIT_WORK(&hwdev->timer_work, hifc_heartbeat_event_handler); +} + +void hifc_destroy_heartbeat(struct hifc_hwdev *hwdev) +{ + delete_timer(&hwdev->heartbeat_timer); +} + +u8 hifc_nic_sw_aeqe_handler(void *handle, u8 event, u64 data) +{ + struct hifc_hwdev *hwdev = (struct hifc_hwdev *)handle; + u8 event_level = FAULT_LEVEL_MAX; + + switch (event) { + case HIFC_INTERNAL_TSO_FATAL_ERROR: + case HIFC_INTERNAL_LRO_FATAL_ERROR: + case HIFC_INTERNAL_TX_FATAL_ERROR: + case HIFC_INTERNAL_RX_FATAL_ERROR: + case HIFC_INTERNAL_OTHER_FATAL_ERROR: + atomic_inc(&hwdev->hw_stats.nic_ucode_event_stats[event]); + sdk_err(hwdev->dev_hdl, "SW aeqe event type: 0x%x, data: 0x%llx\n", + event, data); + event_level = FAULT_LEVEL_FATAL; + break; + default: + sdk_err(hwdev->dev_hdl, "Unsupported sw event %d to process.\n", + event); + } + + return event_level; +} + +void hifc_set_pcie_order_cfg(void *handle) +{ + struct hifc_hwdev *hwdev = handle; + u32 val; + + if (!hwdev) + return; + + val = hifc_hwif_read_reg(hwdev->hwif, + HIFC_GLB_DMA_SO_RO_REPLACE_ADDR); + + if (HIFC_GLB_DMA_SO_RO_GET(val, SO_RO_CFG)) { + val = HIFC_GLB_DMA_SO_R0_CLEAR(val, SO_RO_CFG); + val |= HIFC_GLB_DMA_SO_R0_SET(HIFC_DISABLE_ORDER, SO_RO_CFG); + hifc_hwif_write_reg(hwdev->hwif, + HIFC_GLB_DMA_SO_RO_REPLACE_ADDR, val); + } +} + +int hifc_get_board_info(void *hwdev, struct hifc_board_info *info) +{ + struct hifc_comm_board_info board_info = {0}; + u16 out_size = sizeof(board_info); + int err; + + if (!hwdev || !info) + return -EINVAL; + + err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_GET_BOARD_INFO, + &board_info, sizeof(board_info), + &board_info, &out_size, 0); + if (err || board_info.status || !out_size) { + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Failed to get board info, err: %d, status: 0x%x, out size: 0x%x\n", + err, board_info.status, out_size); + return -EFAULT; + } + + memcpy(info, &board_info.info, sizeof(*info)); + + return 0; +} + +int hifc_get_phy_init_status(void *hwdev, + enum phy_init_status_type *init_status) +{ + struct hifc_phy_init_status phy_info = {0}; + u16 out_size = sizeof(phy_info); + int err; + + if (!hwdev || !init_status) + return -EINVAL; + + err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_GET_PHY_INIT_STATUS, + &phy_info, sizeof(phy_info), + &phy_info, &out_size, 0); + if ((phy_info.status != HIFC_MGMT_CMD_UNSUPPORTED && + phy_info.status) || err || !out_size) { + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Failed to get phy info, err: %d, status: 0x%x, out size: 0x%x\n", + err, phy_info.status, out_size); + return -EFAULT; + } + + *init_status = phy_info.init_status; + + return phy_info.status; +} + +int hifc_phy_init_status_judge(void *hwdev) +{ + enum phy_init_status_type init_status; + int ret; + unsigned long end; + + /* It's not a phy, so don't judge phy status */ + if (!HIFC_BOARD_IS_PHY((struct hifc_hwdev *)hwdev)) + return 0; + + end = jiffies + msecs_to_jiffies(PHY_DOING_INIT_TIMEOUT); + do { + ret = hifc_get_phy_init_status(hwdev, &init_status); + if (ret == HIFC_MGMT_CMD_UNSUPPORTED) + return 0; + else if (ret) + return -EFAULT; + + switch (init_status) { + case PHY_INIT_SUCCESS: + sdk_info(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Phy init is success\n"); + return 0; + case PHY_NONSUPPORT: + sdk_info(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Phy init is nonsupport\n"); + return 0; + case PHY_INIT_FAIL: + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Phy init is failed\n"); + return -EIO; + case PHY_INIT_DOING: + msleep(250); + break; + default: + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Phy init is invalid, init_status: %d\n", + init_status); + return -EINVAL; + } + } while (time_before(jiffies, end)); + + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Phy init is timeout\n"); + + return -ETIMEDOUT; +} + +int hifc_get_mgmt_channel_status(void *handle) +{ + struct hifc_hwdev *hwdev = handle; + u32 val; + + if (!hwdev) + return true; + + if (hifc_func_type(hwdev) == TYPE_VF || + !(hwdev->feature_cap & HIFC_FUNC_SUPP_DFX_REG)) + return false; + + val = hifc_hwif_read_reg(hwdev->hwif, HIFC_ICPL_RESERVD_ADDR); + + return HIFC_GET_MGMT_CHANNEL_STATUS(val, MGMT_CHANNEL_STATUS); +} + +#define HIFC_RED_REG_TIME_OUT 3000 + +int hifc_read_reg(void *hwdev, u32 reg_addr, u32 *val) +{ + struct hifc_reg_info reg_info = {0}; + u16 out_size = sizeof(reg_info); + int err; + + if (!hwdev || !val) + return -EINVAL; + + reg_info.reg_addr = reg_addr; + reg_info.val_length = sizeof(u32); + + err = hifc_pf_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_REG_READ, + ®_info, sizeof(reg_info), + ®_info, &out_size, + HIFC_RED_REG_TIME_OUT); + if (reg_info.status || err || !out_size) { + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Failed to read reg, err: %d, status: 0x%x, out size: 0x%x\n", + err, reg_info.status, out_size); + return -EFAULT; + } + + *val = reg_info.data[0]; + + return 0; +} + +void hifc_swe_fault_handler(struct hifc_hwdev *hwdev, u8 level, + u8 event, u64 val) +{ + struct hifc_fault_info_node *fault_node; + + if (level < FAULT_LEVEL_MAX) { + fault_node = kzalloc(sizeof(*fault_node), GFP_KERNEL); + if (!fault_node) { + sdk_err(hwdev->dev_hdl, "Malloc fault node memory failed\n"); + return; + } + + fault_node->info.fault_src = HIFC_FAULT_SRC_SW_MGMT_UCODE; + fault_node->info.fault_lev = level; + fault_node->info.fault_data.sw_mgmt.event_id = event; + fault_node->info.fault_data.sw_mgmt.event_data = val; + hifc_refresh_history_fault(hwdev, &fault_node->info); + + down(&hwdev->fault_list_sem); + kfree(fault_node); + up(&hwdev->fault_list_sem); + } +} + +void hifc_set_func_deinit_flag(void *hwdev) +{ + struct hifc_hwdev *dev = hwdev; + + set_bit(HIFC_HWDEV_FUNC_DEINIT, &dev->func_state); +} + +int hifc_get_card_present_state(void *hwdev, bool *card_present_state) +{ + u32 addr, attr1; + + if (!hwdev || !card_present_state) + return -EINVAL; + + addr = HIFC_CSR_FUNC_ATTR1_ADDR; + attr1 = hifc_hwif_read_reg(((struct hifc_hwdev *)hwdev)->hwif, addr); + if (attr1 == HIFC_PCIE_LINK_DOWN) { + sdk_warn(((struct hifc_hwdev *)hwdev)->dev_hdl, "Card is not present\n"); + *card_present_state = (bool)0; + } else { + *card_present_state = (bool)1; + } + + return 0; +} + +void hifc_disable_mgmt_msg_report(void *hwdev) +{ + struct hifc_hwdev *hw_dev = (struct hifc_hwdev *)hwdev; + + hifc_set_pf_status(hw_dev->hwif, HIFC_PF_STATUS_INIT); +} + diff --git a/drivers/scsi/huawei/hifc/hifc_hwdev.h b/drivers/scsi/huawei/hifc/hifc_hwdev.h new file mode 100644 index 0000000000000000000000000000000000000000..6ebf59b31fb8406871d066aff78f2f979fd96630 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_hwdev.h @@ -0,0 +1,456 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef HIFC_HWDEV_H_ +#define HIFC_HWDEV_H_ + +/* to use 0-level CLA, page size must be: 64B(wqebb) * 4096(max_q_depth) */ +#define HIFC_DEFAULT_WQ_PAGE_SIZE 0x40000 +#define HIFC_HW_WQ_PAGE_SIZE 0x1000 + +#define HIFC_MSG_TO_MGMT_MAX_LEN 2016 + +#define HIFC_MGMT_STATUS_ERR_OK 0 /* Ok */ +#define HIFC_MGMT_STATUS_ERR_PARAM 1 /* Invalid parameter */ +#define HIFC_MGMT_STATUS_ERR_FAILED 2 /* Operation failed */ +#define HIFC_MGMT_STATUS_ERR_PORT 3 /* Invalid port */ +#define HIFC_MGMT_STATUS_ERR_TIMEOUT 4 /* Operation time out */ +#define HIFC_MGMT_STATUS_ERR_NOMATCH 5 /* Version not match */ +#define HIFC_MGMT_STATUS_ERR_EXIST 6 /* Entry exists */ +#define HIFC_MGMT_STATUS_ERR_NOMEM 7 /* Out of memory */ +#define HIFC_MGMT_STATUS_ERR_INIT 8 /* Feature not initialized */ +#define HIFC_MGMT_STATUS_ERR_FAULT 9 /* Invalid address */ +#define HIFC_MGMT_STATUS_ERR_PERM 10 /* Operation not permitted */ +#define HIFC_MGMT_STATUS_ERR_EMPTY 11 /* Table empty */ +#define HIFC_MGMT_STATUS_ERR_FULL 12 /* Table full */ +#define HIFC_MGMT_STATUS_ERR_NOT_FOUND 13 /* Not found */ +#define HIFC_MGMT_STATUS_ERR_BUSY 14 /* Device or resource busy */ +#define HIFC_MGMT_STATUS_ERR_RESOURCE 15 /* No resources for operation */ +#define HIFC_MGMT_STATUS_ERR_CONFIG 16 /* Invalid configuration */ +#define HIFC_MGMT_STATUS_ERR_UNAVAIL 17 /* Feature unavailable */ +#define HIFC_MGMT_STATUS_ERR_CRC 18 /* CRC check failed */ +#define HIFC_MGMT_STATUS_ERR_NXIO 19 /* No such device or address */ +#define HIFC_MGMT_STATUS_ERR_ROLLBACK 20 /* Chip rollback fail */ +#define HIFC_MGMT_STATUS_ERR_LEN 32 /* Length too short or too long */ +#define HIFC_MGMT_STATUS_ERR_UNSUPPORT 0xFF /* Feature not supported*/ +/* Qe buffer relates define */ + +enum hifc_rx_buf_size { + HIFC_RX_BUF_SIZE_32B = 0x20, + HIFC_RX_BUF_SIZE_64B = 0x40, + HIFC_RX_BUF_SIZE_96B = 0x60, + HIFC_RX_BUF_SIZE_128B = 0x80, + HIFC_RX_BUF_SIZE_192B = 0xC0, + HIFC_RX_BUF_SIZE_256B = 0x100, + HIFC_RX_BUF_SIZE_384B = 0x180, + HIFC_RX_BUF_SIZE_512B = 0x200, + HIFC_RX_BUF_SIZE_768B = 0x300, + HIFC_RX_BUF_SIZE_1K = 0x400, + HIFC_RX_BUF_SIZE_1_5K = 0x600, + HIFC_RX_BUF_SIZE_2K = 0x800, + HIFC_RX_BUF_SIZE_3K = 0xC00, + HIFC_RX_BUF_SIZE_4K = 0x1000, + HIFC_RX_BUF_SIZE_8K = 0x2000, + HIFC_RX_BUF_SIZE_16K = 0x4000, +}; + +enum hifc_res_state { + HIFC_RES_CLEAN = 0, + HIFC_RES_ACTIVE = 1, +}; + +enum ppf_tmr_status { + HIFC_PPF_TMR_FLAG_STOP, + HIFC_PPF_TMR_FLAG_START, +}; + +struct cfg_mgmt_info; +struct hifc_hwif; +struct hifc_wqs; +struct hifc_aeqs; +struct hifc_ceqs; +struct hifc_msg_pf_to_mgmt; +struct hifc_cmdqs; + +struct hifc_root_ctxt { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u16 rsvd1; + u8 set_cmdq_depth; + u8 cmdq_depth; + u8 lro_en; + u8 rsvd2; + u8 ppf_idx; + u8 rsvd3; + u16 rq_depth; + u16 rx_buf_sz; + u16 sq_depth; +}; + +struct hifc_page_addr { + void *virt_addr; + u64 phys_addr; +}; + +#define HIFC_PCIE_LINK_DOWN 0xFFFFFFFF + +#define HIFC_DEV_ACTIVE_FW_TIMEOUT (35 * 1000) +#define HIFC_DEV_BUSY_ACTIVE_FW 0xFE + +#define HIFC_HW_WQ_NAME "hifc_hardware" +#define HIFC_HEARTBEAT_PERIOD 1000 +#define HIFC_HEARTBEAT_START_EXPIRE 5000 + +#define HIFC_CHIP_ERROR_TYPE_MAX 1024 +#define HIFC_CHIP_FAULT_SIZE \ + (HIFC_NODE_ID_MAX * FAULT_LEVEL_MAX * HIFC_CHIP_ERROR_TYPE_MAX) + +#define HIFC_CSR_DMA_ATTR_TBL_BASE 0xC80 +#define HIFC_CSR_DMA_ATTR_TBL_STRIDE 0x4 +#define HIFC_CSR_DMA_ATTR_TBL_ADDR(idx) \ + (HIFC_CSR_DMA_ATTR_TBL_BASE \ + + (idx) * HIFC_CSR_DMA_ATTR_TBL_STRIDE) + +/* MSI-X registers */ +#define HIFC_CSR_MSIX_CNT_BASE 0x2004 +#define HIFC_CSR_MSIX_STRIDE 0x8 + +#define HIFC_CSR_MSIX_CNT_ADDR(idx) \ + (HIFC_CSR_MSIX_CNT_BASE + (idx) * HIFC_CSR_MSIX_STRIDE) + +enum hifc_node_id { + HIFC_NODE_ID_IPSU = 4, + HIFC_NODE_ID_MGMT_HOST = 21, /*Host CPU send API to uP */ + HIFC_NODE_ID_MAX = 22 +}; + +#define HIFC_HWDEV_INIT_MODES_MASK ((1UL << HIFC_HWDEV_ALL_INITED) - 1) + +enum hifc_hwdev_func_state { + HIFC_HWDEV_FUNC_INITED = HIFC_HWDEV_ALL_INITED, + HIFC_HWDEV_FUNC_DEINIT, + HIFC_HWDEV_STATE_BUSY = 31, +}; + +struct hifc_cqm_stats { + atomic_t cqm_cmd_alloc_cnt; + atomic_t cqm_cmd_free_cnt; + atomic_t cqm_send_cmd_box_cnt; + atomic_t cqm_db_addr_alloc_cnt; + atomic_t cqm_db_addr_free_cnt; + atomic_t cqm_fc_srq_create_cnt; + atomic_t cqm_qpc_mpt_create_cnt; + atomic_t cqm_nonrdma_queue_create_cnt; + atomic_t cqm_qpc_mpt_delete_cnt; + atomic_t cqm_nonrdma_queue_delete_cnt; + atomic_t cqm_aeq_callback_cnt[112]; +}; + +struct hifc_link_event_stats { + atomic_t link_down_stats; + atomic_t link_up_stats; +}; + +struct hifc_fault_event_stats { + atomic_t chip_fault_stats[HIFC_NODE_ID_MAX][FAULT_LEVEL_MAX]; + atomic_t fault_type_stat[FAULT_TYPE_MAX]; + atomic_t pcie_fault_stats; +}; + +struct hifc_hw_stats { + atomic_t heart_lost_stats; + atomic_t nic_ucode_event_stats[HIFC_NIC_FATAL_ERROR_MAX]; + struct hifc_cqm_stats cqm_stats; + struct hifc_link_event_stats link_event_stats; + struct hifc_fault_event_stats fault_event_stats; +}; + +struct hifc_fault_info_node { + struct list_head list; + struct hifc_hwdev *hwdev; + struct hifc_fault_recover_info info; +}; + +enum heartbeat_support_state { + HEARTBEAT_NOT_SUPPORT = 0, + HEARTBEAT_SUPPORT, +}; + +/* 25s for max 5 heartbeat event lost */ +#define HIFC_HEARBEAT_ENHANCED_LOST 25000 +struct hifc_heartbeat_enhanced { + bool en; /* enable enhanced heartbeat or not */ + + unsigned long last_update_jiffies; + u32 last_heartbeat; + + unsigned long start_detect_jiffies; +}; + +#define HIFC_CMD_VER_FUNC_ID 2 +#define HIFC_GLB_DMA_SO_RO_REPLACE_ADDR 0x488C +#define HIFC_ICPL_RESERVD_ADDR 0x9204 + +#define l2nic_msg_to_mgmt_sync(hwdev, cmd, buf_in, in_size, buf_out, out_size)\ + hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_L2NIC, cmd, \ + buf_in, in_size, \ + buf_out, out_size, 0) + +struct hifc_hwdev { + void *adapter_hdl; /* pointer to hifc_pcidev or NDIS_Adapter */ + void *pcidev_hdl; /* pointer to pcidev or Handler */ + void *dev_hdl; /* pointer to pcidev->dev or Handler, for + * sdk_err() or dma_alloc() + */ + u32 wq_page_size; + + void *cqm_hdl; + void *chip_node; + + struct hifc_hwif *hwif; /* include void __iomem *bar */ + struct cfg_mgmt_info *cfg_mgmt; + struct hifc_wqs *wqs; /* for FC slq */ + + struct hifc_aeqs *aeqs; + struct hifc_ceqs *ceqs; + + struct hifc_msg_pf_to_mgmt *pf_to_mgmt; + struct hifc_clp_pf_to_mgmt *clp_pf_to_mgmt; + + struct hifc_cmdqs *cmdqs; + + struct hifc_page_addr page_pa0; + struct hifc_page_addr page_pa1; + + hifc_event_handler event_callback; + void *event_pri_handle; + bool history_fault_flag; + struct hifc_fault_recover_info history_fault; + struct semaphore fault_list_sem; + + struct work_struct timer_work; + struct workqueue_struct *workq; + struct timer_list heartbeat_timer; + /* true represent heartbeat lost, false represent heartbeat restore */ + u32 heartbeat_lost; + int chip_present_flag; + struct hifc_heartbeat_enhanced heartbeat_ehd; + struct hifc_hw_stats hw_stats; + u8 *chip_fault_stats; + + u32 statufull_ref_cnt; + ulong func_state; + + u64 feature_cap; /* enum hifc_func_cap */ + + /* In bmgw x86 host, driver can't send message to mgmt cpu directly, + * need to trasmit message ppf mbox to bmgw arm host. + */ + + struct hifc_board_info board_info; +}; + +int hifc_init_comm_ch(struct hifc_hwdev *hwdev); +void hifc_uninit_comm_ch(struct hifc_hwdev *hwdev); + +enum hifc_set_arm_type { + HIFC_SET_ARM_CMDQ, + HIFC_SET_ARM_SQ, + HIFC_SET_ARM_TYPE_NUM, +}; + +/* up to driver event */ +#define HIFC_PORT_CMD_MGMT_RESET 0x0 +struct hifc_vport_state { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; + u8 state; + u8 rsvd2[3]; +}; + +struct hifc_l2nic_reset { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 reset_flag; +}; + +/* HILINK module interface */ + +/* cmd of mgmt CPU message for HILINK module */ +enum hifc_hilink_cmd { + HIFC_HILINK_CMD_GET_LINK_INFO = 0x3, + HIFC_HILINK_CMD_SET_LINK_SETTINGS = 0x8, +}; + +enum hilink_info_print_event { + HILINK_EVENT_LINK_UP = 1, + HILINK_EVENT_LINK_DOWN, + HILINK_EVENT_CABLE_PLUGGED, + HILINK_EVENT_MAX_TYPE, +}; + +enum hifc_link_port_type { + LINK_PORT_FIBRE = 1, + LINK_PORT_ELECTRIC, + LINK_PORT_COPPER, + LINK_PORT_AOC, + LINK_PORT_BACKPLANE, + LINK_PORT_BASET, + LINK_PORT_MAX_TYPE, +}; + +enum hilink_fibre_subtype { + FIBRE_SUBTYPE_SR = 1, + FIBRE_SUBTYPE_LR, + FIBRE_SUBTYPE_MAX, +}; + +enum hilink_fec_type { + HILINK_FEC_RSFEC, + HILINK_FEC_BASEFEC, + HILINK_FEC_NOFEC, + HILINK_FEC_MAX_TYPE, +}; + +/* cmd of mgmt CPU message */ +enum hifc_port_cmd { + HIFC_PORT_CMD_SET_MAC = 0x9, + HIFC_PORT_CMD_GET_AUTONEG_CAP = 0xf, + HIFC_PORT_CMD_SET_VPORT_ENABLE = 0x5d, + HIFC_PORT_CMD_UPDATE_MAC = 0xa4, + HIFC_PORT_CMD_GET_SFP_INFO = 0xad, + HIFC_PORT_CMD_GET_STD_SFP_INFO = 0xF0, + HIFC_PORT_CMD_GET_SFP_ABS = 0xFB, +}; + +struct hi30_ffe_data { + u8 PRE2; + u8 PRE1; + u8 POST1; + u8 POST2; + u8 MAIN; +}; + +struct hi30_ctle_data { + u8 ctlebst[3]; + u8 ctlecmband[3]; + u8 ctlermband[3]; + u8 ctleza[3]; + u8 ctlesqh[3]; + u8 ctleactgn[3]; + u8 ctlepassgn; +}; + +#define HILINK_MAX_LANE 4 + +struct hilink_lane { + u8 lane_used; + u8 hi30_ffe[5]; + u8 hi30_ctle[19]; + u8 hi30_dfe[14]; + u8 rsvd4; +}; + +struct hifc_link_info { + u8 vendor_name[16]; + /* port type: + * 1 - fiber; 2 - electric; 3 - copper; 4 - AOC; 5 - backplane; + * 6 - baseT; 0xffff - unknown + * + * port subtype: + * Only when port_type is fiber: + * 1 - SR; 2 - LR + */ + u32 port_type; + u32 port_sub_type; + u32 cable_length; + u8 cable_temp; + u8 cable_max_speed; /* 1(G)/10(G)/25(G)... */ + u8 sfp_type; /* 0 - qsfp; 1 - sfp */ + u8 rsvd0; + u32 power[4]; /* uW; if is sfp, only power[2] is valid */ + + u8 an_state; /* 0 - off; 1 - on */ + u8 fec; /* 0 - RSFEC; 1 - BASEFEC; 2 - NOFEC */ + u16 speed; /* 1(G)/10(G)/25(G)... */ + + u8 cable_absent; /* 0 - cable present; 1 - cable unpresent */ + u8 alos; /* 0 - yes; 1 - no */ + u8 rx_los; /* 0 - yes; 1 - no */ + u8 pma_status; + u32 pma_dbg_info_reg; /* pma debug info: */ + u32 pma_signal_ok_reg; /* signal ok: */ + + u32 pcs_err_blk_cnt_reg; /* error block counter: */ + u32 rf_lf_status_reg; /* RF/LF status: */ + u8 pcs_link_reg; /* pcs link: */ + u8 mac_link_reg; /* mac link: */ + u8 mac_tx_en; + u8 mac_rx_en; + u32 pcs_err_cnt; + + /* struct hifc_hilink_lane: 40 bytes */ + u8 lane1[40]; /* 25GE lane in old firmware */ + + u8 rsvd1[266]; /* hilink machine state */ + + u8 lane2[HILINK_MAX_LANE * 40]; /* max 4 lane for 40GE/100GE */ + + u8 rsvd2[2]; +}; + +struct hifc_hilink_link_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 port_id; + u8 info_type; /* 1: link up 2: link down 3 cable plugged */ + u8 rsvd1; + + struct hifc_link_info info; + + u8 rsvd2[352]; +}; + +int hifc_set_arm_bit(void *hwdev, enum hifc_set_arm_type q_type, u16 q_id); +void hifc_set_chip_present(void *hwdev); +void hifc_force_complete_all(void *hwdev); +void hifc_init_heartbeat(struct hifc_hwdev *hwdev); +void hifc_destroy_heartbeat(struct hifc_hwdev *hwdev); +u8 hifc_nic_sw_aeqe_handler(void *handle, u8 event, u64 data); +int hifc_l2nic_reset_base(struct hifc_hwdev *hwdev, u16 reset_flag); +int hifc_pf_msg_to_mgmt_sync(void *hwdev, enum hifc_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout); +void hifc_swe_fault_handler(struct hifc_hwdev *hwdev, u8 level, + u8 event, u64 val); +bool hifc_mgmt_event_ack_first(u8 mod, u8 cmd); +int hifc_phy_init_status_judge(void *hwdev); +int hifc_api_csr_rd32(void *hwdev, u8 dest, u32 addr, u32 *val); +int hifc_api_csr_wr32(void *hwdev, u8 dest, u32 addr, u32 val); +void mgmt_heartbeat_event_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); +struct hifc_sge { + u32 hi_addr; + u32 lo_addr; + u32 len; +}; + +void hifc_cpu_to_be32(void *data, int len); +void hifc_be32_to_cpu(void *data, int len); +void hifc_set_sge(struct hifc_sge *sge, dma_addr_t addr, u32 len); +#endif diff --git a/drivers/scsi/huawei/hifc/hifc_hwif.c b/drivers/scsi/huawei/hifc/hifc_hwif.c new file mode 100644 index 0000000000000000000000000000000000000000..ec84c9bc2f2f77438adb43b98e55613c490042c2 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_hwif.c @@ -0,0 +1,630 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include + +#include "hifc_knl_adp.h" +#include "hifc_hw.h" +#include "hifc_hwdev.h" +#include "hifc_hwif.h" +#include "hifc_api_cmd.h" +#include "hifc_mgmt.h" +#include "hifc_eqs.h" + +#define WAIT_HWIF_READY_TIMEOUT 10000 +#define HIFC_SELFTEST_RESULT 0x883C + +u32 hifc_hwif_read_reg(struct hifc_hwif *hwif, u32 reg) +{ + return be32_to_cpu(readl(hwif->cfg_regs_base + reg)); +} + +void hifc_hwif_write_reg(struct hifc_hwif *hwif, u32 reg, u32 val) +{ + writel(cpu_to_be32(val), hwif->cfg_regs_base + reg); +} + +/** + * hwif_ready - test if the HW initialization passed + * @hwdev: the pointer to hw device + * Return: 0 - success, negative - failure + **/ +static int hwif_ready(struct hifc_hwdev *hwdev) +{ + u32 addr, attr1; + + addr = HIFC_CSR_FUNC_ATTR1_ADDR; + attr1 = hifc_hwif_read_reg(hwdev->hwif, addr); + + if (attr1 == HIFC_PCIE_LINK_DOWN) + return -EBUSY; + + if (!HIFC_AF1_GET(attr1, MGMT_INIT_STATUS)) + return -EBUSY; + + return 0; +} + +static int wait_hwif_ready(struct hifc_hwdev *hwdev) +{ + ulong timeout = 0; + + do { + if (!hwif_ready(hwdev)) + return 0; + + usleep_range(999, 1000); + timeout++; + } while (timeout <= WAIT_HWIF_READY_TIMEOUT); + + sdk_err(hwdev->dev_hdl, "Wait for hwif timeout\n"); + return -EBUSY; +} + +/** + * set_hwif_attr - set the attributes as members in hwif + * @hwif: the hardware interface of a pci function device + * @attr0: the first attribute that was read from the hw + * @attr1: the second attribute that was read from the hw + * @attr2: the third attribute that was read from the hw + **/ +static void set_hwif_attr(struct hifc_hwif *hwif, u32 attr0, u32 attr1, + u32 attr2) +{ + hwif->attr.func_global_idx = HIFC_AF0_GET(attr0, FUNC_GLOBAL_IDX); + hwif->attr.port_to_port_idx = HIFC_AF0_GET(attr0, P2P_IDX); + hwif->attr.pci_intf_idx = HIFC_AF0_GET(attr0, PCI_INTF_IDX); + hwif->attr.vf_in_pf = HIFC_AF0_GET(attr0, VF_IN_PF); + hwif->attr.func_type = HIFC_AF0_GET(attr0, FUNC_TYPE); + + hwif->attr.ppf_idx = HIFC_AF1_GET(attr1, PPF_IDX); + + hwif->attr.num_aeqs = BIT(HIFC_AF1_GET(attr1, AEQS_PER_FUNC)); + hwif->attr.num_ceqs = BIT(HIFC_AF1_GET(attr1, CEQS_PER_FUNC)); + hwif->attr.num_irqs = BIT(HIFC_AF1_GET(attr1, IRQS_PER_FUNC)); + hwif->attr.num_dma_attr = BIT(HIFC_AF1_GET(attr1, DMA_ATTR_PER_FUNC)); +} + +/** + * get_hwif_attr - read and set the attributes as members in hwif + * @hwif: the hardware interface of a pci function device + **/ +static void get_hwif_attr(struct hifc_hwif *hwif) +{ + u32 addr, attr0, attr1, attr2; + + addr = HIFC_CSR_FUNC_ATTR0_ADDR; + attr0 = hifc_hwif_read_reg(hwif, addr); + + addr = HIFC_CSR_FUNC_ATTR1_ADDR; + attr1 = hifc_hwif_read_reg(hwif, addr); + + addr = HIFC_CSR_FUNC_ATTR2_ADDR; + attr2 = hifc_hwif_read_reg(hwif, addr); + + set_hwif_attr(hwif, attr0, attr1, attr2); +} + +void hifc_set_pf_status(struct hifc_hwif *hwif, enum hifc_pf_status status) +{ + u32 attr5 = HIFC_AF5_SET(status, PF_STATUS); + u32 addr = HIFC_CSR_FUNC_ATTR5_ADDR; + + hifc_hwif_write_reg(hwif, addr, attr5); +} + +enum hifc_pf_status hifc_get_pf_status(struct hifc_hwif *hwif) +{ + u32 attr5 = hifc_hwif_read_reg(hwif, HIFC_CSR_FUNC_ATTR5_ADDR); + + return HIFC_AF5_GET(attr5, PF_STATUS); +} + +enum hifc_doorbell_ctrl hifc_get_doorbell_ctrl_status(struct hifc_hwif *hwif) +{ + u32 attr4 = hifc_hwif_read_reg(hwif, HIFC_CSR_FUNC_ATTR4_ADDR); + + return HIFC_AF4_GET(attr4, DOORBELL_CTRL); +} + +enum hifc_outbound_ctrl hifc_get_outbound_ctrl_status(struct hifc_hwif *hwif) +{ + u32 attr4 = hifc_hwif_read_reg(hwif, HIFC_CSR_FUNC_ATTR4_ADDR); + + return HIFC_AF4_GET(attr4, OUTBOUND_CTRL); +} + +void hifc_enable_doorbell(struct hifc_hwif *hwif) +{ + u32 addr, attr4; + + addr = HIFC_CSR_FUNC_ATTR4_ADDR; + attr4 = hifc_hwif_read_reg(hwif, addr); + + attr4 = HIFC_AF4_CLEAR(attr4, DOORBELL_CTRL); + attr4 |= HIFC_AF4_SET(ENABLE_DOORBELL, DOORBELL_CTRL); + + hifc_hwif_write_reg(hwif, addr, attr4); +} + +void hifc_disable_doorbell(struct hifc_hwif *hwif) +{ + u32 addr, attr4; + + addr = HIFC_CSR_FUNC_ATTR4_ADDR; + attr4 = hifc_hwif_read_reg(hwif, addr); + + attr4 = HIFC_AF4_CLEAR(attr4, DOORBELL_CTRL); + attr4 |= HIFC_AF4_SET(DISABLE_DOORBELL, DOORBELL_CTRL); + + hifc_hwif_write_reg(hwif, addr, attr4); +} + +/** + * set_ppf - try to set hwif as ppf and set the type of hwif in this case + * @hwif: the hardware interface of a pci function device + **/ +static void set_ppf(struct hifc_hwif *hwif) +{ + struct hifc_func_attr *attr = &hwif->attr; + u32 addr, val, ppf_election; + + /* Read Modify Write */ + addr = HIFC_CSR_PPF_ELECTION_ADDR; + + val = hifc_hwif_read_reg(hwif, addr); + val = HIFC_PPF_ELECTION_CLEAR(val, IDX); + + ppf_election = HIFC_PPF_ELECTION_SET(attr->func_global_idx, IDX); + val |= ppf_election; + + hifc_hwif_write_reg(hwif, addr, val); + + /* Check PPF */ + val = hifc_hwif_read_reg(hwif, addr); + + attr->ppf_idx = HIFC_PPF_ELECTION_GET(val, IDX); + if (attr->ppf_idx == attr->func_global_idx) + attr->func_type = TYPE_PPF; +} + +/** + * get_mpf - get the mpf index into the hwif + * @hwif: the hardware interface of a pci function device + **/ +static void get_mpf(struct hifc_hwif *hwif) +{ + struct hifc_func_attr *attr = &hwif->attr; + u32 mpf_election, addr; + + addr = HIFC_CSR_GLOBAL_MPF_ELECTION_ADDR; + + mpf_election = hifc_hwif_read_reg(hwif, addr); + attr->mpf_idx = HIFC_MPF_ELECTION_GET(mpf_election, IDX); +} + +/** + * set_mpf - try to set hwif as mpf and set the mpf idx in hwif + * @hwif: the hardware interface of a pci function device + **/ +static void set_mpf(struct hifc_hwif *hwif) +{ + struct hifc_func_attr *attr = &hwif->attr; + u32 addr, val, mpf_election; + + /* Read Modify Write */ + addr = HIFC_CSR_GLOBAL_MPF_ELECTION_ADDR; + + val = hifc_hwif_read_reg(hwif, addr); + + val = HIFC_MPF_ELECTION_CLEAR(val, IDX); + mpf_election = HIFC_MPF_ELECTION_SET(attr->func_global_idx, IDX); + + val |= mpf_election; + hifc_hwif_write_reg(hwif, addr, val); +} + +static void init_db_area_idx(struct hifc_free_db_area *free_db_area) +{ + u32 i; + + for (i = 0; i < HIFC_DB_MAX_AREAS; i++) + free_db_area->db_idx[i] = i; + + free_db_area->num_free = HIFC_DB_MAX_AREAS; + + spin_lock_init(&free_db_area->idx_lock); +} + +static int get_db_idx(struct hifc_hwif *hwif, u32 *idx) +{ + struct hifc_free_db_area *free_db_area = &hwif->free_db_area; + u32 pos; + u32 pg_idx; + + spin_lock(&free_db_area->idx_lock); + +retry: + if (free_db_area->num_free == 0) { + spin_unlock(&free_db_area->idx_lock); + return -ENOMEM; + } + + free_db_area->num_free--; + + pos = free_db_area->alloc_pos++; + pos &= HIFC_DB_MAX_AREAS - 1; + + pg_idx = free_db_area->db_idx[pos]; + + free_db_area->db_idx[pos] = 0xFFFFFFFF; + + /* pg_idx out of range */ + if (pg_idx >= HIFC_DB_MAX_AREAS) + goto retry; + + spin_unlock(&free_db_area->idx_lock); + + *idx = pg_idx; + + return 0; +} + +static void free_db_idx(struct hifc_hwif *hwif, u32 idx) +{ + struct hifc_free_db_area *free_db_area = &hwif->free_db_area; + u32 pos; + + if (idx >= HIFC_DB_MAX_AREAS) + return; + + spin_lock(&free_db_area->idx_lock); + + pos = free_db_area->return_pos++; + pos &= HIFC_DB_MAX_AREAS - 1; + + free_db_area->db_idx[pos] = idx; + + free_db_area->num_free++; + + spin_unlock(&free_db_area->idx_lock); +} + +void hifc_free_db_addr(void *hwdev, void __iomem *db_base, + void __iomem *dwqe_base) +{ + struct hifc_hwif *hwif; + u32 idx; + + if (!hwdev || !db_base) + return; + + hwif = ((struct hifc_hwdev *)hwdev)->hwif; + idx = DB_IDX(db_base, hwif->db_base); + +#if defined(__aarch64__) + /* No need to unmap */ +#else + if (dwqe_base) + io_mapping_unmap(dwqe_base); +#endif + + free_db_idx(hwif, idx); +} + +int hifc_alloc_db_addr(void *hwdev, void __iomem **db_base, + void __iomem **dwqe_base) +{ + struct hifc_hwif *hwif; + u64 offset; + u32 idx; + int err; + + if (!hwdev || !db_base) + return -EINVAL; + + hwif = ((struct hifc_hwdev *)hwdev)->hwif; + + err = get_db_idx(hwif, &idx); + if (err) + return -EFAULT; + + *db_base = hwif->db_base + idx * HIFC_DB_PAGE_SIZE; + + if (!dwqe_base) + return 0; + + offset = ((u64)idx) << PAGE_SHIFT; + +#if defined(__aarch64__) + *dwqe_base = hwif->dwqe_mapping + offset; +#else + *dwqe_base = io_mapping_map_wc(hwif->dwqe_mapping, offset, + HIFC_DB_PAGE_SIZE); +#endif + + if (!(*dwqe_base)) { + hifc_free_db_addr(hwdev, *db_base, NULL); + return -EFAULT; + } + + return 0; +} + +void hifc_set_msix_state(void *hwdev, u16 msix_idx, enum hifc_msix_state flag) +{ + struct hifc_hwif *hwif; + u32 offset = msix_idx * HIFC_PCI_MSIX_ENTRY_SIZE + + HIFC_PCI_MSIX_ENTRY_VECTOR_CTRL; + u32 mask_bits; + + if (!hwdev) + return; + + hwif = ((struct hifc_hwdev *)hwdev)->hwif; + + mask_bits = readl(hwif->intr_regs_base + offset); + mask_bits &= ~HIFC_PCI_MSIX_ENTRY_CTRL_MASKBIT; + if (flag) + mask_bits |= HIFC_PCI_MSIX_ENTRY_CTRL_MASKBIT; + + writel(mask_bits, hwif->intr_regs_base + offset); +} + +static void disable_all_msix(struct hifc_hwdev *hwdev) +{ + u16 num_irqs = hwdev->hwif->attr.num_irqs; + u16 i; + + for (i = 0; i < num_irqs; i++) + hifc_set_msix_state(hwdev, i, HIFC_MSIX_DISABLE); +} + +static int wait_until_doorbell_and_outbound_enabled(struct hifc_hwif *hwif) +{ + enum hifc_doorbell_ctrl db_ctrl; + enum hifc_outbound_ctrl outbound_ctrl; + u32 cnt = 0; + + while (cnt < HIFC_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT) { + db_ctrl = hifc_get_doorbell_ctrl_status(hwif); + outbound_ctrl = hifc_get_outbound_ctrl_status(hwif); + + if (outbound_ctrl == ENABLE_OUTBOUND && + db_ctrl == ENABLE_DOORBELL) + return 0; + + usleep_range(900, 1000); + cnt++; + } + + return -EFAULT; +} + +static void __print_selftest_reg(struct hifc_hwdev *hwdev) +{ + u32 addr, attr0, attr1; + + addr = HIFC_CSR_FUNC_ATTR1_ADDR; + attr1 = hifc_hwif_read_reg(hwdev->hwif, addr); + + if (attr1 == HIFC_PCIE_LINK_DOWN) { + sdk_err(hwdev->dev_hdl, "PCIE is link down\n"); + return; + } + + addr = HIFC_CSR_FUNC_ATTR0_ADDR; + attr0 = hifc_hwif_read_reg(hwdev->hwif, addr); + if (HIFC_AF0_GET(attr0, FUNC_TYPE) != TYPE_VF && + !HIFC_AF0_GET(attr0, PCI_INTF_IDX)) + sdk_err(hwdev->dev_hdl, "Selftest reg: 0x%08x\n", + hifc_hwif_read_reg(hwdev->hwif, + HIFC_SELFTEST_RESULT)); +} + +/** + * hifc_init_hwif - initialize the hw interface + * @hwdev: the pointer to hw device + * @cfg_reg_base: configuration base address + * Return: 0 - success, negative - failure + **/ +int hifc_init_hwif(struct hifc_hwdev *hwdev, void *cfg_reg_base, + void *intr_reg_base, u64 db_base_phy, + void *db_base, void *dwqe_mapping) +{ + struct hifc_hwif *hwif; + int err; + + hwif = kzalloc(sizeof(*hwif), GFP_KERNEL); + if (!hwif) + return -ENOMEM; + + hwdev->hwif = hwif; + hwif->pdev = hwdev->pcidev_hdl; + + hwif->cfg_regs_base = cfg_reg_base; + hwif->intr_regs_base = intr_reg_base; + + hwif->db_base_phy = db_base_phy; + hwif->db_base = db_base; + hwif->dwqe_mapping = dwqe_mapping; + init_db_area_idx(&hwif->free_db_area); + + err = wait_hwif_ready(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Chip status is not ready\n"); + __print_selftest_reg(hwdev); + goto hwif_ready_err; + } + + get_hwif_attr(hwif); + + err = wait_until_doorbell_and_outbound_enabled(hwif); + if (err) { + sdk_err(hwdev->dev_hdl, "Hw doorbell/outbound is disabled\n"); + goto hwif_ready_err; + } + + set_ppf(hwif); + + if (HIFC_IS_PPF(hwdev)) + set_mpf(hwif); + + get_mpf(hwif); + + disable_all_msix(hwdev); + /* disable mgmt cpu report any event */ + hifc_set_pf_status(hwdev->hwif, HIFC_PF_STATUS_INIT); + + pr_info("global_func_idx: %d, func_type: %d, host_id: %d, ppf: %d, mpf: %d\n", + hwif->attr.func_global_idx, hwif->attr.func_type, + hwif->attr.pci_intf_idx, hwif->attr.ppf_idx, + hwif->attr.mpf_idx); + + return 0; + +hwif_ready_err: + kfree(hwif); + + return err; +} + +/** + * hifc_free_hwif - free the hw interface + * @hwdev: the pointer to hw device + **/ +void hifc_free_hwif(struct hifc_hwdev *hwdev) +{ + kfree(hwdev->hwif); +} + +int hifc_dma_zalloc_coherent_align(void *dev_hdl, u64 size, u64 align, + unsigned flag, + struct hifc_dma_addr_align *mem_align) +{ + void *vaddr, *align_vaddr; + dma_addr_t paddr, align_paddr; + u64 real_size = size; + + vaddr = dma_zalloc_coherent(dev_hdl, real_size, &paddr, flag); + if (!vaddr) + return -ENOMEM; + + align_paddr = ALIGN(paddr, align); + /* align */ + if (align_paddr == paddr) { + align_vaddr = vaddr; + goto out; + } + + dma_free_coherent(dev_hdl, real_size, vaddr, paddr); + + /* realloc memory for align */ + real_size = size + align; + vaddr = dma_zalloc_coherent(dev_hdl, real_size, &paddr, flag); + if (!vaddr) + return -ENOMEM; + + align_paddr = ALIGN(paddr, align); + align_vaddr = (void *)((u64)vaddr + (align_paddr - paddr)); + +out: + mem_align->real_size = (u32)real_size; + mem_align->ori_vaddr = vaddr; + mem_align->ori_paddr = paddr; + mem_align->align_vaddr = align_vaddr; + mem_align->align_paddr = align_paddr; + + return 0; +} + +void hifc_dma_free_coherent_align(void *dev_hdl, + struct hifc_dma_addr_align *mem_align) +{ + dma_free_coherent(dev_hdl, mem_align->real_size, + mem_align->ori_vaddr, mem_align->ori_paddr); +} + +u16 hifc_global_func_id(void *hwdev) +{ + struct hifc_hwif *hwif; + + if (!hwdev) + return 0; + + hwif = ((struct hifc_hwdev *)hwdev)->hwif; + + return hwif->attr.func_global_idx; +} + +/** + * get function id from register,used by sriov hot migration process + * @hwdev: the pointer to hw device + **/ +u16 hifc_global_func_id_hw(void *hwdev) +{ + u32 addr, attr0; + struct hifc_hwdev *dev; + + dev = (struct hifc_hwdev *)hwdev; + addr = HIFC_CSR_FUNC_ATTR0_ADDR; + attr0 = hifc_hwif_read_reg(dev->hwif, addr); + + return HIFC_AF0_GET(attr0, FUNC_GLOBAL_IDX); +} + +/** + * get function id, used by sriov hot migratition process. + * @hwdev: the pointer to hw device + * @func_id: function id + **/ +int hifc_global_func_id_get(void *hwdev, u16 *func_id) +{ + *func_id = hifc_global_func_id(hwdev); + return 0; +} + +u8 hifc_pcie_itf_id(void *hwdev) +{ + struct hifc_hwif *hwif; + + if (!hwdev) + return 0; + + hwif = ((struct hifc_hwdev *)hwdev)->hwif; + + return hwif->attr.pci_intf_idx; +} +EXPORT_SYMBOL(hifc_pcie_itf_id); + +enum func_type hifc_func_type(void *hwdev) +{ + struct hifc_hwif *hwif; + + if (!hwdev) + return 0; + + hwif = ((struct hifc_hwdev *)hwdev)->hwif; + + return hwif->attr.func_type; +} + +u8 hifc_ppf_idx(void *hwdev) +{ + struct hifc_hwif *hwif; + + if (!hwdev) + return 0; + + hwif = ((struct hifc_hwdev *)hwdev)->hwif; + + return hwif->attr.ppf_idx; +} diff --git a/drivers/scsi/huawei/hifc/hifc_hwif.h b/drivers/scsi/huawei/hifc/hifc_hwif.h new file mode 100644 index 0000000000000000000000000000000000000000..da72253dcf5f717796c167bc7b8e239572d517ef --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_hwif.h @@ -0,0 +1,243 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef HIFC_HWIF_H +#define HIFC_HWIF_H + +#include "hifc_hwdev.h" + +#define HIFC_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT 60000 +#define HIFC_CSR_GLOBAL_BASE_ADDR 0x4000 +/* HW interface registers */ +#define HIFC_CSR_FUNC_ATTR0_ADDR 0x0 +#define HIFC_CSR_FUNC_ATTR1_ADDR 0x4 +#define HIFC_CSR_FUNC_ATTR2_ADDR 0x8 +#define HIFC_CSR_FUNC_ATTR4_ADDR 0x10 + +#define HIFC_CSR_FUNC_ATTR5_ADDR 0x14 +#define HIFC_PCI_MSIX_ENTRY_SIZE 16 +#define HIFC_PCI_MSIX_ENTRY_VECTOR_CTRL 12 +#define HIFC_PCI_MSIX_ENTRY_CTRL_MASKBIT 1 + +/* total doorbell or direct wqe size is 512kB, db num: 128, dwqe: 128*/ +#define HIFC_DB_DWQE_SIZE 0x00080000 +/* db/dwqe page size: 4K */ +#define HIFC_DB_PAGE_SIZE 0x00001000ULL +#define HIFC_DB_MAX_AREAS (HIFC_DB_DWQE_SIZE / HIFC_DB_PAGE_SIZE) + +#define HIFC_ELECTION_BASE 0x200 +#define HIFC_PPF_ELECTION_STRIDE 0x4 +#define HIFC_CSR_MAX_PORTS 4 +#define HIFC_CSR_PPF_ELECTION_ADDR \ + (HIFC_CSR_GLOBAL_BASE_ADDR + HIFC_ELECTION_BASE) + +#define HIFC_CSR_GLOBAL_MPF_ELECTION_ADDR \ + (HIFC_CSR_GLOBAL_BASE_ADDR + HIFC_ELECTION_BASE + \ + HIFC_CSR_MAX_PORTS * HIFC_PPF_ELECTION_STRIDE) +#define DB_IDX(db, db_base) \ + ((u32)(((ulong)(db) - (ulong)(db_base)) / \ + HIFC_DB_PAGE_SIZE)) + +#define HIFC_AF0_FUNC_GLOBAL_IDX_SHIFT 0 +#define HIFC_AF0_P2P_IDX_SHIFT 10 +#define HIFC_AF0_PCI_INTF_IDX_SHIFT 14 +#define HIFC_AF0_VF_IN_PF_SHIFT 16 +#define HIFC_AF0_FUNC_TYPE_SHIFT 24 +#define HIFC_AF0_FUNC_GLOBAL_IDX_MASK 0x3FF +#define HIFC_AF0_P2P_IDX_MASK 0xF +#define HIFC_AF0_PCI_INTF_IDX_MASK 0x3 +#define HIFC_AF0_VF_IN_PF_MASK 0xFF +#define HIFC_AF0_FUNC_TYPE_MASK 0x1 + +#define HIFC_AF0_GET(val, member) \ + (((val) >> HIFC_AF0_##member##_SHIFT) & HIFC_AF0_##member##_MASK) + +#define HIFC_AF1_PPF_IDX_SHIFT 0 +#define HIFC_AF1_AEQS_PER_FUNC_SHIFT 8 +#define HIFC_AF1_CEQS_PER_FUNC_SHIFT 12 +#define HIFC_AF1_IRQS_PER_FUNC_SHIFT 20 +#define HIFC_AF1_DMA_ATTR_PER_FUNC_SHIFT 24 +#define HIFC_AF1_MGMT_INIT_STATUS_SHIFT 30 +#define HIFC_AF1_PF_INIT_STATUS_SHIFT 31 + +#define HIFC_AF1_PPF_IDX_MASK 0x1F +#define HIFC_AF1_AEQS_PER_FUNC_MASK 0x3 +#define HIFC_AF1_CEQS_PER_FUNC_MASK 0x7 +#define HIFC_AF1_IRQS_PER_FUNC_MASK 0xF +#define HIFC_AF1_DMA_ATTR_PER_FUNC_MASK 0x7 +#define HIFC_AF1_MGMT_INIT_STATUS_MASK 0x1 +#define HIFC_AF1_PF_INIT_STATUS_MASK 0x1 + +#define HIFC_AF1_GET(val, member) \ + (((val) >> HIFC_AF1_##member##_SHIFT) & HIFC_AF1_##member##_MASK) + +#define HIFC_AF4_OUTBOUND_CTRL_SHIFT 0 +#define HIFC_AF4_DOORBELL_CTRL_SHIFT 1 +#define HIFC_AF4_OUTBOUND_CTRL_MASK 0x1 +#define HIFC_AF4_DOORBELL_CTRL_MASK 0x1 + +#define HIFC_AF4_GET(val, member) \ + (((val) >> HIFC_AF4_##member##_SHIFT) & HIFC_AF4_##member##_MASK) + +#define HIFC_AF4_SET(val, member) \ + (((val) & HIFC_AF4_##member##_MASK) << HIFC_AF4_##member##_SHIFT) + +#define HIFC_AF4_CLEAR(val, member) \ + ((val) & (~(HIFC_AF4_##member##_MASK << \ + HIFC_AF4_##member##_SHIFT))) + +#define HIFC_AF5_PF_STATUS_SHIFT 0 +#define HIFC_AF5_PF_STATUS_MASK 0xFFFF + +#define HIFC_AF5_SET(val, member) \ + (((val) & HIFC_AF5_##member##_MASK) << HIFC_AF5_##member##_SHIFT) + +#define HIFC_AF5_GET(val, member) \ + (((val) >> HIFC_AF5_##member##_SHIFT) & HIFC_AF5_##member##_MASK) + +#define HIFC_PPF_ELECTION_IDX_SHIFT 0 +#define HIFC_PPF_ELECTION_IDX_MASK 0x1F + +#define HIFC_PPF_ELECTION_SET(val, member) \ + (((val) & HIFC_PPF_ELECTION_##member##_MASK) << \ + HIFC_PPF_ELECTION_##member##_SHIFT) + +#define HIFC_PPF_ELECTION_GET(val, member) \ + (((val) >> HIFC_PPF_ELECTION_##member##_SHIFT) & \ + HIFC_PPF_ELECTION_##member##_MASK) + +#define HIFC_PPF_ELECTION_CLEAR(val, member) \ + ((val) & (~(HIFC_PPF_ELECTION_##member##_MASK \ + << HIFC_PPF_ELECTION_##member##_SHIFT))) + +#define HIFC_MPF_ELECTION_IDX_SHIFT 0 +#define HIFC_MPF_ELECTION_IDX_MASK 0x1F + +#define HIFC_MPF_ELECTION_SET(val, member) \ + (((val) & HIFC_MPF_ELECTION_##member##_MASK) << \ + HIFC_MPF_ELECTION_##member##_SHIFT) + +#define HIFC_MPF_ELECTION_GET(val, member) \ + (((val) >> HIFC_MPF_ELECTION_##member##_SHIFT) & \ + HIFC_MPF_ELECTION_##member##_MASK) + +#define HIFC_MPF_ELECTION_CLEAR(val, member) \ + ((val) & (~(HIFC_MPF_ELECTION_##member##_MASK \ + << HIFC_MPF_ELECTION_##member##_SHIFT))) + +#define HIFC_HWIF_NUM_AEQS(hwif) ((hwif)->attr.num_aeqs) +#define HIFC_HWIF_NUM_CEQS(hwif) ((hwif)->attr.num_ceqs) +#define HIFC_HWIF_PPF_IDX(hwif) ((hwif)->attr.ppf_idx) +#define HIFC_PCI_INTF_IDX(hwif) ((hwif)->attr.pci_intf_idx) + +#define HIFC_FUNC_TYPE(dev) ((dev)->hwif->attr.func_type) +#define HIFC_IS_PPF(dev) (HIFC_FUNC_TYPE(dev) == TYPE_PPF) + +enum hifc_pcie_nosnoop { + HIFC_PCIE_SNOOP = 0, + HIFC_PCIE_NO_SNOOP = 1, +}; + +enum hifc_pcie_tph { + HIFC_PCIE_TPH_DISABLE = 0, + HIFC_PCIE_TPH_ENABLE = 1, +}; + +enum hifc_pf_status { + HIFC_PF_STATUS_INIT = 0X0, + HIFC_PF_STATUS_ACTIVE_FLAG = 0x11, + HIFC_PF_STATUS_FLR_START_FLAG = 0x12, + HIFC_PF_STATUS_FLR_FINISH_FLAG = 0x13, +}; + +enum hifc_outbound_ctrl { + ENABLE_OUTBOUND = 0x0, + DISABLE_OUTBOUND = 0x1, +}; + +enum hifc_doorbell_ctrl { + ENABLE_DOORBELL = 0x0, + DISABLE_DOORBELL = 0x1, +}; + +struct hifc_free_db_area { + u32 db_idx[HIFC_DB_MAX_AREAS]; + u32 num_free; + u32 alloc_pos; + u32 return_pos; + /* spinlock for allocating doorbell area */ + spinlock_t idx_lock; +}; + +enum func_type { + TYPE_PF, + TYPE_VF, + TYPE_PPF, + TYPE_UNKNOWN, +}; + +struct hifc_func_attr { + u16 func_global_idx; + u8 port_to_port_idx; + u8 pci_intf_idx; + u8 vf_in_pf; + enum func_type func_type; + + u8 mpf_idx; + + u8 ppf_idx; + + u16 num_irqs; /* max: 2 ^ 15 */ + u8 num_aeqs; /* max: 2 ^ 3 */ + u8 num_ceqs; /* max: 2 ^ 7 */ + + u8 num_dma_attr; /* max: 2 ^ 6 */ +}; + +struct hifc_hwif { + u8 __iomem *cfg_regs_base; + u8 __iomem *intr_regs_base; + u64 db_base_phy; + u8 __iomem *db_base; + +#if defined(__aarch64__) + void __iomem *dwqe_mapping; +#else + struct io_mapping *dwqe_mapping; +#endif + struct hifc_free_db_area free_db_area; + struct hifc_func_attr attr; + void *pdev; +}; + +struct hifc_dma_addr_align { + u32 real_size; + void *ori_vaddr; + dma_addr_t ori_paddr; + void *align_vaddr; + dma_addr_t align_paddr; +}; + +u32 hifc_hwif_read_reg(struct hifc_hwif *hwif, u32 reg); +void hifc_hwif_write_reg(struct hifc_hwif *hwif, u32 reg, u32 val); +void hifc_set_pf_status(struct hifc_hwif *hwif, enum hifc_pf_status status); +enum hifc_pf_status hifc_get_pf_status(struct hifc_hwif *hwif); +enum hifc_doorbell_ctrl + hifc_get_doorbell_ctrl_status(struct hifc_hwif *hwif); +enum hifc_outbound_ctrl + hifc_get_outbound_ctrl_status(struct hifc_hwif *hwif); +void hifc_enable_doorbell(struct hifc_hwif *hwif); +void hifc_disable_doorbell(struct hifc_hwif *hwif); +int hifc_init_hwif(struct hifc_hwdev *hwdev, void *cfg_reg_base, + void *intr_reg_base, u64 db_base_phy, + void *db_base, void *dwqe_mapping); +void hifc_free_hwif(struct hifc_hwdev *hwdev); +int hifc_dma_zalloc_coherent_align(void *dev_hdl, u64 size, u64 align, + unsigned flag, + struct hifc_dma_addr_align *mem_align); +void hifc_dma_free_coherent_align(void *dev_hdl, + struct hifc_dma_addr_align *mem_align); +#endif diff --git a/drivers/scsi/huawei/hifc/hifc_io.c b/drivers/scsi/huawei/hifc/hifc_io.c new file mode 100644 index 0000000000000000000000000000000000000000..cd1c47fa7df258a99729372125c972197a2d5c52 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_io.c @@ -0,0 +1,1243 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#include "hifc_module.h" +#include "hifc_hba.h" +#include "hifc_service.h" +#include "hifc_io.h" + +/* Whether to enable the payload printing + * function depends on the content of exchange + */ +#ifdef HIFC_PRINT_PAYLOADINFO_ENABLE +#include "unf_exchg.h" +#endif + +/* Set this parameter based on EDTOV 2S */ +#define HIFC_IMMIDATA_ABORT_TIME 2000 +#define hifc_fill_pkg_status(com_err_code, control, scsi_status) \ + (((unsigned int)(com_err_code) << 16) |\ + ((unsigned int)(control) << 8) |\ + (unsigned int)(scsi_status)) + +unsigned int dif_protect_op_code = INVALID_VALUE32; +unsigned int dif_app_esc_check = HIFC_DIF_APP_REF_ESC_CHECK; +unsigned int dif_ref_esc_check = HIFC_DIF_APP_REF_ESC_CHECK; +unsigned int dif_sect_size; +unsigned int no_dif_sect_size; +unsigned int dix_flag; +unsigned int grd_ctrl; +unsigned int grd_agm_ctrl = HIFC_DIF_GUARD_VERIFY_ALGORITHM_CTL_T10_CRC16; +unsigned int cmp_app_tag_mask = 0xffff; +unsigned int ref_tag_mod = INVALID_VALUE32; +unsigned int rep_ref_tag; +unsigned short cmp_app_tag; +unsigned short rep_app_tag; + +static void hifc_dif_err_count(struct hifc_hba_s *v_hba, + unsigned char v_dif_info) +{ + unsigned char dif_info = v_dif_info; + + HIFC_DIF_ERR_STAT(v_hba, HIFC_DIF_RECV_DIFERR_ALL); + + if (dif_info & HIFC_DIF_ERROR_CODE_CRC) + HIFC_DIF_ERR_STAT(v_hba, HIFC_DIF_RECV_DIFERR_CRC); + + if (dif_info & HIFC_DIF_ERROR_CODE_APP) + HIFC_DIF_ERR_STAT(v_hba, HIFC_DIF_RECV_DIFERR_APP); + + if (dif_info & HIFC_DIF_ERROR_CODE_REF) + HIFC_DIF_ERR_STAT(v_hba, HIFC_DIF_RECV_DIFERR_REF); +} + +static void hifc_build_no_dif_control(struct unf_frame_pkg_s *v_pkg, + struct hifcoe_fc_dif_info_s *v_dif_info) +{ + struct hifcoe_fc_dif_info_s *dif_info = v_dif_info; + + /* dif enable or disable */ + dif_info->wd0.difx_en = HIFC_DIF_DISABLE; + + dif_info->wd1.vpid = v_pkg->qos_level; + dif_info->wd1.lun_qos_en = 0; +} + +void hifc_dif_action_forward(struct hifcoe_fc_dif_info_s *v_dif_info_l1, + struct unf_dif_control_info_s *v_dif_ctrl_u1) +{ + v_dif_info_l1->wd0.grd_ctrl |= + (v_dif_ctrl_u1->protect_opcode & UNF_VERIFY_CRC_MASK) ? + HIFC_DIF_GARD_REF_APP_CTRL_VERIFY : + HIFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY; + + v_dif_info_l1->wd0.grd_ctrl |= (v_dif_ctrl_u1->protect_opcode & + UNF_REPLACE_CRC_MASK) ? HIFC_DIF_GARD_REF_APP_CTRL_REPLACE : + HIFC_DIF_GARD_REF_APP_CTRL_FORWARD; + + v_dif_info_l1->wd0.ref_tag_ctrl |= + (v_dif_ctrl_u1->protect_opcode & UNF_VERIFY_LBA_MASK) ? + HIFC_DIF_GARD_REF_APP_CTRL_VERIFY : + HIFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY; + + v_dif_info_l1->wd0.ref_tag_ctrl |= + (v_dif_ctrl_u1->protect_opcode & UNF_REPLACE_LBA_MASK) ? + HIFC_DIF_GARD_REF_APP_CTRL_REPLACE : HIFC_DIF_GARD_REF_APP_CTRL_FORWARD; + + v_dif_info_l1->wd1.app_tag_ctrl |= (v_dif_ctrl_u1->protect_opcode & + UNF_VERIFY_APP_MASK) ? HIFC_DIF_GARD_REF_APP_CTRL_VERIFY : + HIFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY; + + v_dif_info_l1->wd1.app_tag_ctrl |= + (v_dif_ctrl_u1->protect_opcode & UNF_REPLACE_APP_MASK) ? + HIFC_DIF_GARD_REF_APP_CTRL_REPLACE : HIFC_DIF_GARD_REF_APP_CTRL_FORWARD; +} + +void hifc_dif_action_delete(struct hifcoe_fc_dif_info_s *v_dif_info_l1, + struct unf_dif_control_info_s *v_dif_ctrl_u1) +{ + v_dif_info_l1->wd0.grd_ctrl |= + (v_dif_ctrl_u1->protect_opcode & UNF_VERIFY_CRC_MASK) ? + HIFC_DIF_GARD_REF_APP_CTRL_VERIFY : + HIFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY; + v_dif_info_l1->wd0.grd_ctrl |= HIFC_DIF_GARD_REF_APP_CTRL_DELETE; + + v_dif_info_l1->wd0.ref_tag_ctrl |= + (v_dif_ctrl_u1->protect_opcode & UNF_VERIFY_LBA_MASK) ? + HIFC_DIF_GARD_REF_APP_CTRL_VERIFY : + HIFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY; + v_dif_info_l1->wd0.ref_tag_ctrl |= HIFC_DIF_GARD_REF_APP_CTRL_DELETE; + + v_dif_info_l1->wd1.app_tag_ctrl |= + (v_dif_ctrl_u1->protect_opcode & UNF_VERIFY_APP_MASK) ? + HIFC_DIF_GARD_REF_APP_CTRL_VERIFY : + HIFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY; + v_dif_info_l1->wd1.app_tag_ctrl |= HIFC_DIF_GARD_REF_APP_CTRL_DELETE; +} + + +static void hifc_convert_dif_action( + struct unf_dif_control_info_s *v_dif_ctrl_u1, + struct hifcoe_fc_dif_info_s *v_dif_info_l1) +{ + struct hifcoe_fc_dif_info_s *dif_info_l1 = NULL; + struct unf_dif_control_info_s *dif_ctrl_u1 = NULL; + + dif_info_l1 = v_dif_info_l1; + dif_ctrl_u1 = v_dif_ctrl_u1; + + switch (UNF_DIF_ACTION_MASK & dif_ctrl_u1->protect_opcode) { + case UNF_DIF_ACTION_VERIFY_AND_REPLACE: + case UNF_DIF_ACTION_VERIFY_AND_FORWARD: + hifc_dif_action_forward(dif_info_l1, dif_ctrl_u1); + break; + + case UNF_DIF_ACTION_INSERT: + dif_info_l1->wd0.grd_ctrl |= + HIFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY; + dif_info_l1->wd0.grd_ctrl |= + HIFC_DIF_GARD_REF_APP_CTRL_INSERT; + dif_info_l1->wd0.ref_tag_ctrl |= + HIFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY; + dif_info_l1->wd0.ref_tag_ctrl |= + HIFC_DIF_GARD_REF_APP_CTRL_INSERT; + dif_info_l1->wd1.app_tag_ctrl |= + HIFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY; + dif_info_l1->wd1.app_tag_ctrl |= + HIFC_DIF_GARD_REF_APP_CTRL_INSERT; + break; + + case UNF_DIF_ACTION_VERIFY_AND_DELETE: + hifc_dif_action_delete(dif_info_l1, dif_ctrl_u1); + break; + + default: + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "Unknown dif protect opcode 0x%x", + dif_ctrl_u1->protect_opcode); + break; + } +} + +void hifc_get_dif_info_l1(struct hifcoe_fc_dif_info_s *v_dif_info_l1, + struct unf_dif_control_info_s *v_dif_ctrl_u1) +{ + v_dif_info_l1->wd1.cmp_app_tag_msk = cmp_app_tag_mask; + + v_dif_info_l1->rep_app_tag = v_dif_ctrl_u1->app_tag; + v_dif_info_l1->rep_ref_tag = v_dif_ctrl_u1->start_lba; + + v_dif_info_l1->cmp_app_tag = v_dif_ctrl_u1->app_tag; + v_dif_info_l1->cmp_ref_tag = v_dif_ctrl_u1->start_lba; + + if (cmp_app_tag != 0) + v_dif_info_l1->cmp_app_tag = cmp_app_tag; + + if (rep_app_tag != 0) + v_dif_info_l1->rep_app_tag = rep_app_tag; + + if (rep_ref_tag != 0) + v_dif_info_l1->rep_ref_tag = rep_ref_tag; +} + +static void hifc_build_dif_control(struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + struct hifcoe_fc_dif_info_s *v_dif_info_l1) +{ + struct hifcoe_fc_dif_info_s *dif_info_l1 = NULL; + struct unf_dif_control_info_s *dif_ctrl_u1 = NULL; + + dif_info_l1 = v_dif_info_l1; + dif_ctrl_u1 = &v_pkg->dif_control; + + /* dif enable or disable */ + dif_info_l1->wd0.difx_en = HIFC_DIF_ENABLE; + + dif_info_l1->wd1.vpid = v_pkg->qos_level; + dif_info_l1->wd1.lun_qos_en = 0; + + /* 512B + 8 size mode */ + dif_info_l1->wd0.sct_size = + (dif_ctrl_u1->flags & UNF_DIF_SECTSIZE_4KB) ? + HIFC_DIF_SECTOR_4KB_MODE : HIFC_DIF_SECTOR_512B_MODE; + + no_dif_sect_size = (dif_ctrl_u1->flags & UNF_DIF_SECTSIZE_4KB) ? + HIFC_SECT_SIZE_4096 : HIFC_SECT_SIZE_512; + + dif_sect_size = (dif_ctrl_u1->flags & UNF_DIF_SECTSIZE_4KB) ? + HIFC_SECT_SIZE_4096_8 : HIFC_SECT_SIZE_512_8; + + /* The length is adjusted when the burst len is adjusted. + * The length is initialized to 0 + */ + dif_info_l1->wd0.difx_len = 0; + + /* dif type 1 */ + dif_info_l1->wd0.dif_verify_type = dif_type; + dif_info_l1->wd0.dif_ins_rep_type = dif_type; + + /* Check whether the 0xffff app or ref domain is isolated + * If all ff messages are displayed in type1 app, checkcheck sector + * v_dif_info_l1->wd0.difx_app_esc = HIFC_DIF_APP_REF_ESC_CHECK + */ + + dif_info_l1->wd0.difx_app_esc = dif_app_esc_check; + + /* type1 ref tag If all ff is displayed, check sector is required */ + dif_info_l1->wd0.difx_ref_esc = dif_ref_esc_check; + + /* Currently, only t10 crc is supported */ + dif_info_l1->wd0.grd_agm_ctrl = 0; + + /* Set this parameter based on the values of bit zero and bit one. + * The initial value is 0, and the value is UNF_DEFAULT_CRC_GUARD_SEED + */ + dif_info_l1->wd0.grd_agm_ini_ctrl = + HIFC_DIF_CRC_CS_INITIAL_CONFIG_BY_BIT0_1; + dif_info_l1->wd1.app_tag_ctrl = 0; + dif_info_l1->wd0.grd_ctrl = 0; + dif_info_l1->wd0.ref_tag_ctrl = 0; + + /* Convert the verify operation, replace, forward, insert, + * and delete operations based on the actual operation code of + * the upper layer + */ + if (dif_protect_op_code != INVALID_VALUE32) { + dif_ctrl_u1->protect_opcode = dif_protect_op_code | + (dif_ctrl_u1->protect_opcode & UNF_DIF_ACTION_MASK); + } + + hifc_convert_dif_action(dif_ctrl_u1, dif_info_l1); + + /* Address self-increase mode */ + dif_info_l1->wd0.ref_tag_mode = (dif_ctrl_u1->protect_opcode & + UNF_DIF_ACTION_NO_INCREASE_REFTAG) ? (BOTH_NONE) : (BOTH_INCREASE); + + if (ref_tag_mod != INVALID_VALUE32) + dif_info_l1->wd0.ref_tag_mode = ref_tag_mod; + + /* This parameter is used only when type 3 is set to 0xffff. */ + + hifc_get_dif_info_l1(dif_info_l1, dif_ctrl_u1); +} + +static unsigned int hifc_fill_external_sgl_page( + struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + struct unf_esgl_page_s *v_esgl_page, + unsigned int sge_num, + int v_direct, + unsigned int context_id, + unsigned int dif_flag) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int index = 0; + unsigned int sge_num_per_page = 0; + unsigned int buffer_addr = 0; + unsigned int buf_len = 0; + char *buf = NULL; + unsigned long phys = 0; + struct unf_esgl_page_s *esgl_page = NULL; + struct hifcoe_variable_sge_s *sge = NULL; + + esgl_page = v_esgl_page; + while (sge_num > 0) { + /* Obtains the initial address of the sge page */ + sge = (struct hifcoe_variable_sge_s *)esgl_page->page_address; + + /* Calculate the number of sge on each page */ + sge_num_per_page = (esgl_page->page_size) / + sizeof(struct hifcoe_variable_sge_s); + + /* Fill in sgl page. The last sge of each page is link sge + * by default + */ + for (index = 0; index < (sge_num_per_page - 1); index++) { + UNF_GET_SGL_ENTRY(ret, (void *)v_pkg, &buf, + &buf_len, dif_flag); + if (ret != RETURN_OK) + return UNF_RETURN_ERROR; + phys = (unsigned long)buf; + sge[index].buf_addr_hi = UNF_DMA_HI32(phys); + sge[index].buf_addr_lo = UNF_DMA_LO32(phys); + sge[index].wd0.buf_len = buf_len; + sge[index].wd0.r_flag = 0; + sge[index].wd1.extension_flag = + HIFC_WQE_SGE_NOT_EXTEND_FLAG; + sge[index].wd1.last_flag = HIFC_WQE_SGE_NOT_LAST_FLAG; + + /* parity bit */ + sge[index].wd1.buf_addr_gpa = + (sge[index].buf_addr_lo >> 16); + sge[index].wd1.xid = (context_id & 0x3fff); + + hifc_cpu_to_big32(&sge[index], + sizeof(struct hifcoe_variable_sge_s)); + + sge_num--; + if (sge_num == 0) + break; + } + + /* sge Set the end flag on the last sge of the page if all the + * pages have been filled. + */ + if (sge_num == 0) { + sge[index].wd1.extension_flag = + HIFC_WQE_SGE_NOT_EXTEND_FLAG; + sge[index].wd1.last_flag = HIFC_WQE_SGE_LAST_FLAG; + + /* parity bit */ + buffer_addr = be32_to_cpu(sge[index].buf_addr_lo); + sge[index].wd1.buf_addr_gpa = (buffer_addr >> 16); + sge[index].wd1.xid = (context_id & 0x3fff); + + hifc_cpu_to_big32(&sge[index].wd1, HIFC_DWORD_BYTE); + } + /* If only one sge is left empty, the sge reserved on the page + * is used for filling. + */ + else if (sge_num == 1) { + UNF_GET_SGL_ENTRY(ret, (void *)v_pkg, &buf, + &buf_len, dif_flag); + if (ret != RETURN_OK) + return UNF_RETURN_ERROR; + + phys = (unsigned long)buf; + sge[index].buf_addr_hi = UNF_DMA_HI32(phys); + sge[index].buf_addr_lo = UNF_DMA_LO32(phys); + sge[index].wd0.buf_len = buf_len; + sge[index].wd0.r_flag = 0; + sge[index].wd1.extension_flag = + HIFC_WQE_SGE_NOT_EXTEND_FLAG; + sge[index].wd1.last_flag = HIFC_WQE_SGE_LAST_FLAG; + + /* parity bit */ + sge[index].wd1.buf_addr_gpa = + (sge[index].buf_addr_lo >> 16); + sge[index].wd1.xid = (context_id & 0x3fff); + + hifc_cpu_to_big32(&sge[index], + sizeof(struct hifcoe_variable_sge_s)); + + sge_num--; + } else { + /* Apply for a new sgl page and fill in link sge */ + UNF_GET_FREE_ESGL_PAGE(esgl_page, v_hba->lport, v_pkg); + if (!esgl_page) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_REG_ATT, UNF_ERR, "Get free esgl page failed."); + return UNF_RETURN_ERROR; + } + phys = esgl_page->esgl_phyaddr; + sge[index].buf_addr_hi = UNF_DMA_HI32(phys); + sge[index].buf_addr_lo = UNF_DMA_LO32(phys); + + /* For the cascaded wqe, you only need to enter the + * cascading buffer address and extension flag, and do + * not need to fill in other fields + */ + sge[index].wd0.buf_len = 0; + sge[index].wd0.r_flag = 0; + sge[index].wd1.extension_flag = + HIFC_WQE_SGE_EXTEND_FLAG; + sge[index].wd1.last_flag = HIFC_WQE_SGE_NOT_LAST_FLAG; + + /* parity bit */ + sge[index].wd1.buf_addr_gpa = + (sge[index].buf_addr_lo >> 16); + sge[index].wd1.xid = (context_id & 0x3fff); + + hifc_cpu_to_big32(&sge[index], + sizeof(struct hifcoe_variable_sge_s)); + } + + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_INFO, + "Port(0x%x) SID(0x%x) DID(0x%x) RXID(0x%x) build esgl left sge num: %u.", + v_hba->port_cfg.port_id, + v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did, + v_pkg->frame_head.oxid_rxid, + sge_num); + } + + return RETURN_OK; +} + +static unsigned int hifc_build_local_dif_sgl(struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + struct hifcoe_sqe_s *v_sqe, + int v_direct, + unsigned int v_bd_sge_num) +{ + unsigned int ret = UNF_RETURN_ERROR; + char *buf = NULL; + unsigned int buf_len = 0; + unsigned long phys = 0; + unsigned int dif_sge_place = 0; + struct hifc_parent_sq_info_s *parent_sq = NULL; + + parent_sq = hifc_find_parent_sq_by_pkg((void *)v_hba, v_pkg); + if (unlikely(!parent_sq)) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "Port(0x%x) send packet oxid_rxid(0x%x) fail, as sid_did(0x%x_0x%x)'s parent sq is null.", + v_hba->port_cfg.port_id, + v_pkg->frame_head.oxid_rxid, + v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return UNF_RETURN_ERROR; + } + + /* DIF SGE must be followed by BD SGE */ + dif_sge_place = ((v_bd_sge_num <= v_pkg->entry_count) ? + v_bd_sge_num : v_pkg->entry_count); + + /* The entry_count= 0 needs to be specially processed and does not + * need to be mounted. As long as len is set to zero, Last-bit is set + * to one, and E-bit is set to 0. + */ + if (v_pkg->dif_control.dif_sge_count == 0) { + v_sqe->sge[dif_sge_place].buf_addr_hi = 0; + v_sqe->sge[dif_sge_place].buf_addr_lo = 0; + v_sqe->sge[dif_sge_place].wd0.buf_len = 0; + } else { + UNF_CM_GET_DIF_SGL_ENTRY(ret, (void *)v_pkg, &buf, &buf_len); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, "DOUBLE DIF Get Dif Buf Fail."); + return UNF_RETURN_ERROR; + } + + phys = (unsigned long)buf; + v_sqe->sge[dif_sge_place].buf_addr_hi = UNF_DMA_HI32(phys); + v_sqe->sge[dif_sge_place].buf_addr_lo = UNF_DMA_LO32(phys); + v_sqe->sge[dif_sge_place].wd0.buf_len = buf_len; + } + + /* rdma flag. If the fc is not used, enter 0. */ + v_sqe->sge[dif_sge_place].wd0.r_flag = 0; + + /* parity bit */ + v_sqe->sge[dif_sge_place].wd1.buf_addr_gpa = + (v_sqe->sge[dif_sge_place].buf_addr_lo >> 16); + v_sqe->sge[dif_sge_place].wd1.xid = (parent_sq->context_id & 0x3fff); + + /* The local sgl does not use the cascading SGE. Therefore, the value + * of this field is always 0. + */ + v_sqe->sge[dif_sge_place].wd1.extension_flag = + HIFC_WQE_SGE_NOT_EXTEND_FLAG; + v_sqe->sge[dif_sge_place].wd1.last_flag = HIFC_WQE_SGE_LAST_FLAG; + + hifc_cpu_to_big32(&v_sqe->sge[dif_sge_place], + sizeof(struct hifcoe_variable_sge_s)); + + return RETURN_OK; +} + +static unsigned int hifc_build_external_dif_sgl(struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + struct hifcoe_sqe_s *v_sqe, + int v_direct, + unsigned int v_bd_sge_num) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct unf_esgl_page_s *esgl_page = NULL; + unsigned long phys = 0; + unsigned int left_sge_num = 0; + unsigned int dif_sge_place = 0; + struct hifc_parent_sq_info_s *parent_sq = NULL; + + parent_sq = hifc_find_parent_sq_by_pkg((void *)v_hba, v_pkg); + if (unlikely(!parent_sq)) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "Port(0x%x) send packet oxid_rxid(0x%x) fail, as sid_did(0x%x_0x%x)'s parent sq is null.", + v_hba->port_cfg.port_id, + v_pkg->frame_head.oxid_rxid, + v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return UNF_RETURN_ERROR; + } + + /* DIF SGE must be followed by BD SGE */ + dif_sge_place = ((v_bd_sge_num <= v_pkg->entry_count) ? + v_bd_sge_num : v_pkg->entry_count); + + /* Allocate the first page first */ + UNF_GET_FREE_ESGL_PAGE(esgl_page, v_hba->lport, v_pkg); + if (!esgl_page) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, "DOUBLE DIF Get External Page Fail."); + return UNF_RETURN_ERROR; + } + + phys = esgl_page->esgl_phyaddr; + + /* Configuring the Address of the Cascading Page */ + v_sqe->sge[dif_sge_place].buf_addr_hi = UNF_DMA_HI32(phys); + v_sqe->sge[dif_sge_place].buf_addr_lo = UNF_DMA_LO32(phys); + + /* Configuring Control Information About the Cascading Page */ + v_sqe->sge[dif_sge_place].wd0.buf_len = 0; + v_sqe->sge[dif_sge_place].wd0.r_flag = 0; + v_sqe->sge[dif_sge_place].wd1.extension_flag = HIFC_WQE_SGE_EXTEND_FLAG; + v_sqe->sge[dif_sge_place].wd1.last_flag = HIFC_WQE_SGE_NOT_LAST_FLAG; + + /* parity bit */ + v_sqe->sge[dif_sge_place].wd1.buf_addr_gpa = + (v_sqe->sge[dif_sge_place].buf_addr_lo >> 16); + v_sqe->sge[dif_sge_place].wd1.xid = (parent_sq->context_id & 0x3fff); + + hifc_cpu_to_big32(&v_sqe->sge[dif_sge_place], + sizeof(struct hifcoe_variable_sge_s)); + + /* Fill in the sge information on the cascading page */ + left_sge_num = v_pkg->dif_control.dif_sge_count; + ret = hifc_fill_external_sgl_page(v_hba, v_pkg, esgl_page, left_sge_num, + v_direct, parent_sq->context_id, + UNF_TRUE); + if (ret != RETURN_OK) + return UNF_RETURN_ERROR; + + return RETURN_OK; +} + +static unsigned int hifc_build_local_sgl(struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + struct hifcoe_sqe_s *v_sqe, + int v_direct) +{ + unsigned int ret = UNF_RETURN_ERROR; + char *buf = NULL; + unsigned int buf_len = 0; + unsigned int index = 0; + unsigned long phys = 0; + struct hifc_parent_sq_info_s *parent_sq = NULL; + + parent_sq = hifc_find_parent_sq_by_pkg((void *)v_hba, v_pkg); + if (unlikely(!parent_sq)) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[fail]Port(0x%x) send packet oxid_rxid(0x%x) fail, as sid_did(0x%x_0x%x)'s parent sq is null.", + v_hba->port_cfg.port_id, + v_pkg->frame_head.oxid_rxid, + v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return UNF_RETURN_ERROR; + } + + for (index = 0; index < v_pkg->entry_count; index++) { + UNF_CM_GET_SGL_ENTRY(ret, (void *)v_pkg, &buf, &buf_len); + + if (ret != RETURN_OK) + return UNF_RETURN_ERROR; + + phys = (unsigned long)buf; + + v_sqe->sge[index].buf_addr_hi = UNF_DMA_HI32(phys); + v_sqe->sge[index].buf_addr_lo = UNF_DMA_LO32(phys); + v_sqe->sge[index].wd0.buf_len = buf_len; + + /* rdma flag. If the fc is not used, enter 0. */ + v_sqe->sge[index].wd0.r_flag = 0; + + /* parity bit */ + v_sqe->sge[index].wd1.buf_addr_gpa = + (v_sqe->sge[index].buf_addr_lo >> 16); + v_sqe->sge[index].wd1.xid = (parent_sq->context_id & 0x3fff); + + /* The local sgl does not use the cascading SGE. Therefore, the + * value of this field is always 0. + */ + v_sqe->sge[index].wd1.extension_flag = + HIFC_WQE_SGE_NOT_EXTEND_FLAG; + v_sqe->sge[index].wd1.last_flag = HIFC_WQE_SGE_NOT_LAST_FLAG; + + if (index == (v_pkg->entry_count - 1)) { + /* Sets the last WQE end flag 1 */ + v_sqe->sge[index].wd1.last_flag = + HIFC_WQE_SGE_LAST_FLAG; + } + + hifc_cpu_to_big32(&v_sqe->sge[index], + sizeof(struct hifcoe_variable_sge_s)); + } + + /* Adjust the length of the BDSL field in the CTRL domain. */ + HIFC_ADJUST_DATA(v_sqe->ctrl_sl.ch.wd0.bdsl, + HIFC_BYTES_TO_QW_NUM((v_pkg->entry_count * + sizeof(struct hifcoe_variable_sge_s)))); + + /* The entry_count= 0 needs to be specially processed and does not + * need to be mounted. As long as len is set to zero, Last-bit is set + * to one, and E-bit is set to 0. + */ + if (v_pkg->entry_count == 0) { + v_sqe->sge[0].buf_addr_hi = 0; + v_sqe->sge[0].buf_addr_lo = 0; + v_sqe->sge[0].wd0.buf_len = 0; + + /* rdma flag. This field is not used in fc. Set it to 0. */ + v_sqe->sge[0].wd0.r_flag = 0; + + /* parity bit */ + v_sqe->sge[0].wd1.buf_addr_gpa = + (v_sqe->sge[0].buf_addr_lo >> 16); + v_sqe->sge[0].wd1.xid = (parent_sq->context_id & 0x3fff); + + /* The local sgl does not use the cascading SGE. Therefore, + * the value of this field is always 0. + */ + v_sqe->sge[0].wd1.extension_flag = HIFC_WQE_SGE_NOT_EXTEND_FLAG; + v_sqe->sge[0].wd1.last_flag = HIFC_WQE_SGE_LAST_FLAG; + + hifc_cpu_to_big32(&v_sqe->sge[0], + sizeof(struct hifcoe_variable_sge_s)); + + /* Adjust the length of the BDSL field in the CTRL domain. */ + HIFC_ADJUST_DATA( + v_sqe->ctrl_sl.ch.wd0.bdsl, + HIFC_BYTES_TO_QW_NUM( + sizeof(struct hifcoe_variable_sge_s))); + } + + return RETURN_OK; +} + +static unsigned int hifc_build_external_sgl(struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + struct hifcoe_sqe_s *v_sqe, + int v_direct, + unsigned int v_bd_sge_num) +{ + unsigned int ret = UNF_RETURN_ERROR; + char *buf = NULL; + struct unf_esgl_page_s *esgl_page = NULL; + unsigned long phys = 0; + unsigned int buf_len = 0; + unsigned int index = 0; + unsigned int left_sge_num = 0; + unsigned int local_sge_num = 0; + struct hifc_parent_sq_info_s *parent_sq = NULL; + + parent_sq = hifc_find_parent_sq_by_pkg((void *)v_hba, v_pkg); + if (unlikely(!parent_sq)) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "Port(0x%x) send packet oxid_rxid(0x%x) fail, as sid_did(0x%x_0x%x)'s parent sq is null.", + v_hba->port_cfg.port_id, + v_pkg->frame_head.oxid_rxid, + v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return UNF_RETURN_ERROR; + } + + /* Ensure that the value of v_bd_sge_num is greater than or equal to one + */ + local_sge_num = v_bd_sge_num - 1; + + for (index = 0; index < local_sge_num; index++) { + UNF_CM_GET_SGL_ENTRY(ret, (void *)v_pkg, &buf, &buf_len); + if (unlikely(ret != RETURN_OK)) + return UNF_RETURN_ERROR; + phys = (unsigned long)buf; + + v_sqe->sge[index].buf_addr_hi = UNF_DMA_HI32(phys); + v_sqe->sge[index].buf_addr_lo = UNF_DMA_LO32(phys); + v_sqe->sge[index].wd0.buf_len = buf_len; + + /* RDMA flag, which is not used by FC. */ + v_sqe->sge[index].wd0.r_flag = 0; + v_sqe->sge[index].wd1.extension_flag = + HIFC_WQE_SGE_NOT_EXTEND_FLAG; + v_sqe->sge[index].wd1.last_flag = HIFC_WQE_SGE_NOT_LAST_FLAG; + + /* parity bit */ + v_sqe->sge[index].wd1.buf_addr_gpa = + (v_sqe->sge[index].buf_addr_lo >> 16); + v_sqe->sge[index].wd1.xid = (parent_sq->context_id & 0x3fff); + + hifc_cpu_to_big32(&v_sqe->sge[index], + sizeof(struct hifcoe_variable_sge_s)); + } + + /* Allocating the first cascading page */ + UNF_GET_FREE_ESGL_PAGE(esgl_page, v_hba->lport, v_pkg); + if (unlikely(!esgl_page)) + return UNF_RETURN_ERROR; + + phys = esgl_page->esgl_phyaddr; + /* Adjust the length of the BDSL field in the CTRL domain. */ + HIFC_ADJUST_DATA(v_sqe->ctrl_sl.ch.wd0.bdsl, + HIFC_BYTES_TO_QW_NUM((v_bd_sge_num * + sizeof(struct hifcoe_variable_sge_s)))); + + /* Configuring the Address of the Cascading Page */ + v_sqe->sge[index].buf_addr_hi = (u32)UNF_DMA_HI32(phys); + v_sqe->sge[index].buf_addr_lo = (u32)UNF_DMA_LO32(phys); + + /* Configuring Control Information About the Cascading Page */ + v_sqe->sge[index].wd0.buf_len = 0; + v_sqe->sge[index].wd0.r_flag = 0; + v_sqe->sge[index].wd1.extension_flag = HIFC_WQE_SGE_EXTEND_FLAG; + v_sqe->sge[index].wd1.last_flag = HIFC_WQE_SGE_NOT_LAST_FLAG; + + /* parity bit */ + v_sqe->sge[index].wd1.buf_addr_gpa = + (v_sqe->sge[index].buf_addr_lo >> 16); + v_sqe->sge[index].wd1.xid = (parent_sq->context_id & 0x3fff); + + hifc_cpu_to_big32(&v_sqe->sge[index], + sizeof(struct hifcoe_variable_sge_s)); + + /* Calculate the number of remaining sge. */ + left_sge_num = v_pkg->entry_count - local_sge_num; + + /* Fill in the sge information on the cascading page. */ + ret = hifc_fill_external_sgl_page(v_hba, v_pkg, esgl_page, + left_sge_num, v_direct, + parent_sq->context_id, + UNF_FALSE); + if (ret != RETURN_OK) + return UNF_RETURN_ERROR; + + return RETURN_OK; +} + +unsigned int hifc_build_sql_by_local_sge_num(struct unf_frame_pkg_s *v_pkg, + struct hifc_hba_s *v_hba, + struct hifcoe_sqe_s *v_sqe, + int v_direct, + unsigned int bd_sge_num) +{ + unsigned int ret = RETURN_OK; + + if (v_pkg->entry_count <= bd_sge_num) { + ret = hifc_build_local_sgl(v_hba, v_pkg, v_sqe, v_direct); + } else { + ret = hifc_build_external_sgl(v_hba, v_pkg, v_sqe, + v_direct, bd_sge_num); + } + return ret; +} + +unsigned int hifc_conf_dual_sgl_info(struct unf_frame_pkg_s *v_pkg, + struct hifc_hba_s *v_hba, + struct hifcoe_sqe_s *v_sqe, + int v_direct, + unsigned int bd_sge_num, + int double_sgl) +{ + unsigned int ret = RETURN_OK; + + if (double_sgl == UNF_TRUE) { + /* Adjust the length of the DIF_SL field in the CTRL domain */ + HIFC_ADJUST_DATA( + v_sqe->ctrl_sl.ch.wd0.dif_sl, + HIFC_BYTES_TO_QW_NUM( + sizeof(struct hifcoe_variable_sge_s))); + + if (v_pkg->dif_control.dif_sge_count <= + HIFC_WQE_SGE_DIF_ENTRY_NUM) { + ret = hifc_build_local_dif_sgl(v_hba, v_pkg, v_sqe, + v_direct, bd_sge_num); + } else { + ret = hifc_build_external_dif_sgl(v_hba, v_pkg, v_sqe, + v_direct, bd_sge_num); + } + } + + return ret; +} + +static unsigned int hifc_build_sgl(struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + struct hifcoe_sqe_s *v_sqe, + int v_direct, + unsigned int dif_flag) +{ + unsigned int ret = RETURN_OK; + unsigned int bd_sge_num = HIFC_WQE_SGE_ENTRY_NUM; + int double_sgl = UNF_FALSE; + + if ((dif_flag != 0) && + (v_pkg->dif_control.flags & UNF_DIF_DOUBLE_SGL)) { + bd_sge_num = + HIFC_WQE_SGE_ENTRY_NUM - HIFC_WQE_SGE_DIF_ENTRY_NUM; + double_sgl = UNF_TRUE; + } + + /* Only one wqe local sge can be loaded. If more than one wqe local sge + * is used, use the esgl + */ + ret = hifc_build_sql_by_local_sge_num(v_pkg, v_hba, v_sqe, + v_direct, bd_sge_num); + + if (unlikely(ret != RETURN_OK)) + return ret; + + /* Configuring Dual SGL Information for DIF */ + ret = hifc_conf_dual_sgl_info(v_pkg, v_hba, v_sqe, v_direct, + bd_sge_num, double_sgl); + + return ret; +} + +static void hifc_adjust_dix(struct unf_frame_pkg_s *v_pkg, + struct hifcoe_fc_dif_info_s *v_dif_info_l1, + unsigned char v_task_type) +{ + unsigned char task_type = v_task_type; + struct hifcoe_fc_dif_info_s *dif_info_l1 = NULL; + + dif_info_l1 = v_dif_info_l1; + + if (dix_flag == 1) { + if ((task_type == HIFC_SQE_FCP_IWRITE) || + (task_type == HIFC_SQE_FCP_TRD)) { + if ((UNF_DIF_ACTION_MASK & + (v_pkg->dif_control.protect_opcode)) == + UNF_DIF_ACTION_VERIFY_AND_FORWARD) { + dif_info_l1->wd0.grd_ctrl |= + HIFC_DIF_GARD_REF_APP_CTRL_REPLACE; + dif_info_l1->wd0.grd_agm_ctrl = + HIFC_DIF_GUARD_VERIFY_IP_CHECKSUM_REPLACE_CRC16; + } + + if ((UNF_DIF_ACTION_MASK & + (v_pkg->dif_control.protect_opcode)) == + UNF_DIF_ACTION_VERIFY_AND_DELETE) { + dif_info_l1->wd0.grd_agm_ctrl = + HIFC_DIF_GUARD_VERIFY_IP_CHECKSUM_REPLACE_CRC16; + } + } + + if ((task_type == HIFC_SQE_FCP_IREAD) || + (task_type == HIFC_SQE_FCP_TWR)) { + if ((UNF_DIF_ACTION_MASK & + (v_pkg->dif_control.protect_opcode)) == + UNF_DIF_ACTION_VERIFY_AND_FORWARD) { + dif_info_l1->wd0.grd_ctrl |= + HIFC_DIF_GARD_REF_APP_CTRL_REPLACE; + dif_info_l1->wd0.grd_agm_ctrl = + HIFC_DIF_GUARD_VERIFY_CRC16_REPLACE_IP_CHECKSUM; + } + + if ((UNF_DIF_ACTION_MASK & + (v_pkg->dif_control.protect_opcode)) == + UNF_DIF_ACTION_INSERT) { + dif_info_l1->wd0.grd_agm_ctrl = + HIFC_DIF_GUARD_VERIFY_CRC16_REPLACE_IP_CHECKSUM; + } + } + } + + if (grd_agm_ctrl != 0) + dif_info_l1->wd0.grd_agm_ctrl = grd_agm_ctrl; + + if (grd_ctrl != 0) + dif_info_l1->wd0.grd_ctrl = grd_ctrl; +} + +void hifc_get_dma_direction_by_fcp_cmnd(const struct unf_fcp_cmnd_s *v_fcp_cmnd, + int *v_pi_dma_direction, + unsigned char *v_task_type) +{ + if (UNF_FCP_WR_DATA & v_fcp_cmnd->control) { + *v_task_type = HIFC_SQE_FCP_IWRITE; + *v_pi_dma_direction = DMA_TO_DEVICE; + } else if (UNF_GET_TASK_MGMT_FLAGS(v_fcp_cmnd->control) != 0) { + *v_task_type = HIFC_SQE_FCP_ITMF; + *v_pi_dma_direction = DMA_FROM_DEVICE; + } else { + *v_task_type = HIFC_SQE_FCP_IREAD; + *v_pi_dma_direction = DMA_FROM_DEVICE; + } +} + +static void hifc_adjust_icmnd_burst_len(struct unf_frame_pkg_s *v_pkg, + struct hifcoe_sqe_ts_s *v_sqe_ts, + int direction) +{ + struct hifcoe_sqe_icmnd_s *icmnd = &v_sqe_ts->cont.icmnd; + + icmnd->info.dif_info.wd0.difx_len = 0; +} + +static inline unsigned int hifc_build_cmnd_wqe(struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + struct hifcoe_sqe_s *v_sge) +{ + unsigned int ret = RETURN_OK; + int direction = 0; + unsigned char task_type = 0; + struct unf_fcp_cmnd_s *fcp_cmnd = NULL; + struct hifcoe_sqe_s *sqe = v_sge; + unsigned int dif_flag = 0; + + fcp_cmnd = v_pkg->fcp_cmnd; + if (unlikely(!fcp_cmnd)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Package's FCP commond pointer is NULL."); + + return UNF_RETURN_ERROR; + } + + hifc_get_dma_direction_by_fcp_cmnd(fcp_cmnd, &direction, &task_type); + + hifc_build_icmnd_wqe_ts_header(v_pkg, sqe, task_type, + v_hba->exit_base, v_hba->port_index); + + hifc_build_trd_twr_wqe_ctrls(v_pkg, sqe); + + hifc_build_icmnd_wqe_ts(v_hba, v_pkg, &sqe->ts_sl); + + if (task_type != HIFC_SQE_FCP_ITMF) { + if (v_pkg->dif_control.protect_opcode == UNF_DIF_ACTION_NONE) { + dif_flag = 0; + hifc_build_no_dif_control( + v_pkg, + &sqe->ts_sl.cont.icmnd.info.dif_info); + } else { + dif_flag = 1; + hifc_build_dif_control( + v_hba, v_pkg, + &sqe->ts_sl.cont.icmnd.info.dif_info); + hifc_adjust_dix( + v_pkg, &sqe->ts_sl.cont.icmnd.info.dif_info, + task_type); + hifc_adjust_icmnd_burst_len(v_pkg, &sqe->ts_sl, + direction); + } + } + + ret = hifc_build_sgl(v_hba, v_pkg, sqe, direction, dif_flag); + + return ret; +} + +unsigned int hifc_send_scsi_cmnd(void *v_hba, struct unf_frame_pkg_s *v_pkg) +{ + struct hifc_hba_s *hba = NULL; + struct hifc_parent_sq_info_s *parent_sq = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct hifcoe_sqe_s sqe; + + /* input param check */ + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_pkg, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + (UNF_GET_OXID(v_pkg) != INVALID_VALUE16), return UNF_RETURN_ERROR); + + HIFC_CHECK_PKG_ALLOCTIME(v_pkg); + memset(&sqe, 0, sizeof(struct hifcoe_sqe_s)); + hba = v_hba; + + /* 1. find parent sq for scsi_cmnd(pkg) */ + parent_sq = hifc_find_parent_sq_by_pkg(hba, v_pkg); + if (unlikely(!parent_sq)) + /* Do not need to print info */ + return UNF_RETURN_ERROR; + + v_pkg->qos_level += hba->vpid_start; + + /* 2. build cmnd wqe (to sqe) for scsi_cmnd(pkg) */ + ret = hifc_build_cmnd_wqe(hba, v_pkg, &sqe); + if (unlikely(ret != RETURN_OK)) { + HIFC_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_ERR, + "[fail]Port(0x%x) Build WQE failed, SID(0x%x) DID(0x%x) OXID(0x%x) pkg type(0x%x) hot pool tag(0x%x).", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did, UNF_GET_OXID(v_pkg), + v_pkg->type, UNF_GET_XCHG_TAG(v_pkg)); + + return ret; + } + + /* 3. En-Queue Parent SQ for scsi_cmnd(pkg) sqe */ + ret = hifc_parent_sq_enqueue(parent_sq, &sqe); + + return ret; +} + +static void hifc_ini_status_default_handler(struct hifcoe_scqe_iresp_s *v_iresp, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned char control = 0; + unsigned short com_err_code = 0; + + control = v_iresp->wd2.fcp_flag & HIFC_CTRL_MASK; + + if (v_iresp->fcp_resid != 0) { + com_err_code = UNF_IO_FAILED; + v_pkg->residus_len = v_iresp->fcp_resid; + } else { + com_err_code = UNF_IO_SUCCESS; + v_pkg->residus_len = 0; + } + + v_pkg->status = hifc_fill_pkg_status(com_err_code, control, + v_iresp->wd2.scsi_status); + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_INFO, + "[info]Fill package with status: 0x%x, residus len: 0x%x", + v_pkg->status, v_pkg->residus_len); +} + +void hifc_check_fcp_rsp_iu(struct hifcoe_scqe_iresp_s *v_iresp, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned char scsi_status = 0; + unsigned char control = 0; + + control = (unsigned char)v_iresp->wd2.fcp_flag; + scsi_status = (unsigned char)v_iresp->wd2.scsi_status; + + /* FcpRspIU with Little End from IOB/WQE, to COM's pstPkg also */ + if (control & FCP_RESID_UNDER_MASK) { + /* under flow: usually occurs in inquiry */ + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_INFO, + "[info]I_STS IOB posts under flow with residus len: %u, FCP residue: %u.", + v_pkg->residus_len, v_iresp->fcp_resid); + + if (v_pkg->residus_len != v_iresp->fcp_resid) { + v_pkg->status = hifc_fill_pkg_status(UNF_IO_FAILED, + control, + scsi_status); + } else { + v_pkg->status = + hifc_fill_pkg_status(UNF_IO_UNDER_FLOW, + control, scsi_status); + } + } + + if (control & FCP_RESID_OVER_MASK) { + /* over flow: error happened */ + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]I_STS IOB posts over flow with residus len: %u, FCP residue: %u.", + v_pkg->residus_len, v_iresp->fcp_resid); + + if (v_pkg->residus_len != v_iresp->fcp_resid) { + v_pkg->status = hifc_fill_pkg_status(UNF_IO_FAILED, + control, + scsi_status); + } else { + v_pkg->status = hifc_fill_pkg_status(UNF_IO_OVER_FLOW, + control, + scsi_status); + } + } + + v_pkg->unf_rsp_pload_bl.length = 0; + v_pkg->unf_sense_pload_bl.length = 0; + + if (control & FCP_RSP_LEN_VALID_MASK) { + /* dma by chip */ + v_pkg->unf_rsp_pload_bl.buffer_ptr = NULL; + + v_pkg->unf_rsp_pload_bl.length = v_iresp->fcp_rsp_len; + v_pkg->byte_orders |= UNF_BIT_3; + } + + if (control & FCP_SNS_LEN_VALID_MASK) { + /* dma by chip */ + v_pkg->unf_sense_pload_bl.buffer_ptr = NULL; + + v_pkg->unf_sense_pload_bl.length = v_iresp->fcp_sns_len; + v_pkg->byte_orders |= UNF_BIT_4; + } +} + +unsigned short hifc_get_com_err_code(struct unf_frame_pkg_s *v_pkg) +{ + unsigned short com_err_code = UNF_IO_FAILED; + + if (v_pkg->status_sub_code == DRV_DIF_CRC_ERR) + com_err_code = UNF_IO_DIF_ERROR; + else if (v_pkg->status_sub_code == DRV_DIF_LBA_ERR) + com_err_code = UNF_IO_DIF_REF_ERROR; + else + com_err_code = UNF_IO_DIF_GEN_ERROR; + return com_err_code; +} + +void hifc_process_ini_fail_io(struct hifc_hba_s *v_hba, + struct hifcoe_scqe_iresp_s *v_iresp, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned short com_err_code = UNF_IO_FAILED; + unsigned char dif_info = 0; + + /* 1. error stats process */ + if (HIFC_GET_SCQE_STATUS((union hifcoe_scqe_u *)(void *)v_iresp) != 0) { + switch (HIFC_GET_SCQE_STATUS( + (union hifcoe_scqe_u *)(void *)v_iresp)) { + /* DIF error process */ + case HIFC_COMPLETION_STATUS_DIF_ERROR: + dif_info = (unsigned char)v_iresp->wd1.dif_info; + v_pkg->status_sub_code = + (dif_info & HIFC_DIF_ERROR_CODE_CRC) ? + DRV_DIF_CRC_ERR : ((dif_info & + HIFC_DIF_ERROR_CODE_REF) ? DRV_DIF_LBA_ERR : + ((dif_info & HIFC_DIF_ERROR_CODE_APP) ? + DRV_DIF_APP_ERR : 0)); + + com_err_code = hifc_get_com_err_code(v_pkg); + + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_MAJOR, + "[err]Port(0x%x) INI io oxid(0x%x), rxid(0x%x) status with dif err(0x%x)", + v_hba->port_cfg.port_id, v_iresp->wd0.ox_id, + v_iresp->wd0.rx_id, dif_info); + + hifc_dif_err_count(v_hba, dif_info); + break; + + /* I/O not complete: 1.session reset; 2.clear buffer */ + case FCOE_CQE_BUFFER_CLEAR_IO_COMPLETED: + case FCOE_CQE_SESSION_RST_CLEAR_IO_COMPLETED: + case FCOE_CQE_SESSION_ONLY_CLEAR_IO_COMPLETED: + case FCOE_CQE_WQE_FLUSH_IO_COMPLETED: + com_err_code = UNF_IO_CLEAN_UP; + + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_MAJOR, + "[warn]Port(0x%x) INI IO not complete, OX_ID(0x%x) RX_ID(0x%x) status(0x%x)", + v_hba->port_cfg.port_id, v_iresp->wd0.ox_id, + v_iresp->wd0.rx_id, com_err_code); + break; + + /* any other: I/O failed --->>> DID error */ + default: + com_err_code = UNF_IO_FAILED; + break; + } + + /* fill pkg status & return directly */ + v_pkg->status = + hifc_fill_pkg_status(com_err_code, v_iresp->wd2.fcp_flag, + v_iresp->wd2.scsi_status); + return; + } + + /* 2. default stats process */ + hifc_ini_status_default_handler(v_iresp, v_pkg); + + /* 3. FCP RSP IU check */ + hifc_check_fcp_rsp_iu(v_iresp, v_pkg); +} + +unsigned int hifc_scq_recv_iresp(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_wqe) +{ + struct hifcoe_scqe_iresp_s *iresp = NULL; + struct unf_frame_pkg_s pkg; + unsigned int ret = RETURN_OK; + + iresp = (struct hifcoe_scqe_iresp_s *)(void *)v_wqe; + + /* 1. Constraints: I_STS remain cnt must be zero */ + if (unlikely(HIFC_GET_SCQE_REMAIN_CNT(v_wqe) != 0)) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) ini_wqe(OX_ID:0x%x RX_ID:0x%x) remain_cnt(0x%x) abnormal, status(0x%x)", + v_hba->port_cfg.port_id, + iresp->wd0.ox_id, + iresp->wd0.rx_id, + HIFC_GET_SCQE_REMAIN_CNT(v_wqe), + HIFC_GET_SCQE_STATUS(v_wqe)); + + UNF_PRINT_SFS_LIMIT(UNF_MAJOR, v_hba->port_cfg.port_id, v_wqe, + sizeof(union hifcoe_scqe_u)); + + /* return directly */ + return UNF_RETURN_ERROR; + } + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME] = iresp->magic_num; + + /* 2. OX_ID validity check */ + if (likely(((unsigned short)iresp->wd0.ox_id >= v_hba->exit_base) && + ((unsigned short)iresp->wd0.ox_id < + v_hba->exit_base + v_hba->exit_count))) { + pkg.status = UNF_IO_SUCCESS; + pkg.private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = + iresp->wd0.ox_id - v_hba->exit_base; + } else { + /* OX_ID error: return by COM */ + pkg.status = UNF_IO_FAILED; + pkg.private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = INVALID_VALUE16; + + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) ini_cmnd_wqe(OX_ID:0x%x RX_ID:0x%x) ox_id invalid, status(0x%x)", + v_hba->port_cfg.port_id, + iresp->wd0.ox_id, + iresp->wd0.rx_id, + HIFC_GET_SCQE_STATUS(v_wqe)); + + UNF_PRINT_SFS_LIMIT(UNF_MAJOR, v_hba->port_cfg.port_id, + v_wqe, sizeof(union hifcoe_scqe_u)); + } + + /* 3. status check */ + if (unlikely(HIFC_GET_SCQE_STATUS(v_wqe) || + (iresp->wd2.scsi_status != 0) || + (iresp->fcp_resid != 0) || + ((iresp->wd2.fcp_flag & HIFC_CTRL_MASK) != 0))) { + HIFC_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_INFO, + "[warn]Port(0x%x) scq_status(0x%x) scsi_status(0x%x) fcp_resid(0x%x) fcp_flag(0x%x)", + v_hba->port_cfg.port_id, HIFC_GET_SCQE_STATUS(v_wqe), + iresp->wd2.scsi_status, iresp->fcp_resid, + iresp->wd2.fcp_flag); + + /* set pkg status & check fcp_rsp IU */ + hifc_process_ini_fail_io(v_hba, iresp, &pkg); + } + + /* 4. LL_Driver ---to--->>> COM_Driver */ + UNF_LOWLEVEL_SCSI_COMPLETED(ret, v_hba->lport, &pkg); + + return ret; +} diff --git a/drivers/scsi/huawei/hifc/hifc_io.h b/drivers/scsi/huawei/hifc/hifc_io.h new file mode 100644 index 0000000000000000000000000000000000000000..e7e7e1bd38a47c5d6fdcc0a7b52d1d325bbd0359 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_io.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef __HIFC_IO_H__ +#define __HIFC_IO_H__ + +enum dif_mode_e { + DIF_MODE_NONE = 0x0, + DIF_MODE_INSERT = 0x1, + DIF_MODE_REMOVE = 0x2, + DIF_MODE_FORWARD_OR_REPLACE = 0x3 +}; + +enum ref_tag_mode_e { + BOTH_NONE = 0x0, + RECEIVE_INCREASE = 0x1, + REPLACE_INCREASE = 0x2, + BOTH_INCREASE = 0x3 +}; + +#define HIFC_DIF_DISABLE 0 +#define HIFC_DIF_ENABLE 1 +#define HIFC_DIF_SECTOR_512B_MODE 0 +#define HIFC_DIF_SECTOR_4KB_MODE 1 +#define HIFC_DIF_GUARD_VERIFY_ALGORITHM_CTL_T10_CRC16 0x0 +#define HIFC_DIF_GUARD_VERIFY_CRC16_REPLACE_IP_CHECKSUM 0x1 +#define HIFC_DIF_GUARD_VERIFY_IP_CHECKSUM_REPLACE_CRC16 0x2 +#define HIFC_DIF_GUARD_VERIFY_ALGORITHM_CTL_IP_CHECKSUM 0x3 +#define HIFC_DIF_CRC_CS_INITIAL_CONFIG_BY_REGISTER 0 +#define HIFC_DIF_CRC_CS_INITIAL_CONFIG_BY_BIT0_1 0x4 + +#define HIFC_DIF_GARD_REF_APP_CTRL_VERIFY 0x4 +#define HIFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY 0x0 +#define HIFC_DIF_GARD_REF_APP_CTRL_INSERT 0x0 +#define HIFC_DIF_GARD_REF_APP_CTRL_DELETE 0x1 +#define HIFC_DIF_GARD_REF_APP_CTRL_FORWARD 0x2 +#define HIFC_DIF_GARD_REF_APP_CTRL_REPLACE 0x3 + +#define HIFC_DIF_ERROR_CODE_MASK 0xe +#define HIFC_DIF_ERROR_CODE_CRC 0x2 +#define HIFC_DIF_ERROR_CODE_REF 0x4 +#define HIFC_DIF_ERROR_CODE_APP 0x8 + +#define HIFC_DIF_SEND_DIFERR_PAYLOAD 0 +#define HIFC_DIF_SEND_DIFERR_CRC 1 +#define HIFC_DIF_SEND_DIFERR_APP 2 +#define HIFC_DIF_SEND_DIFERR_REF 3 +#define HIFC_DIF_RECV_DIFERR_ALL 4 +#define HIFC_DIF_RECV_DIFERR_CRC 5 +#define HIFC_DIF_RECV_DIFERR_APP 6 +#define HIFC_DIF_RECV_DIFERR_REF 7 + +#define HIFC_SECT_SIZE_512 512 +#define HIFC_SECT_SIZE_4096 4096 +#define HIFC_SECT_SIZE_512_8 520 +#define HIFC_SECT_SIZE_4096_8 4104 +#define HIFC_CTRL_MASK 0x1f + +unsigned int hifc_send_scsi_cmnd(void *v_hba, struct unf_frame_pkg_s *v_pkg); +unsigned int hifc_scq_recv_iresp(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_wqe); + +#endif /* __HIFC_IO_H__ */ diff --git a/drivers/scsi/huawei/hifc/hifc_knl_adp.h b/drivers/scsi/huawei/hifc/hifc_knl_adp.h new file mode 100644 index 0000000000000000000000000000000000000000..5b05ff6eb9bd5816eed04d44d061d9b82315211c --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_knl_adp.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef HIFC_KNL_ADP_H_ +#define HIFC_KNL_ADP_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define __TIME_STR__ "[compiled with the kernel]" + +#define sdk_err(dev, format, ...) \ + dev_err(dev, "[COMM]"format, ##__VA_ARGS__) +#define sdk_warn(dev, format, ...) \ + dev_warn(dev, "[COMM]"format, ##__VA_ARGS__) +#define sdk_notice(dev, format, ...) \ + dev_notice(dev, "[COMM]"format, ##__VA_ARGS__) +#define sdk_info(dev, format, ...) \ + dev_info(dev, "[COMM]"format, ##__VA_ARGS__) + +#endif diff --git a/drivers/scsi/huawei/hifc/hifc_lld.c b/drivers/scsi/huawei/hifc/hifc_lld.c new file mode 100644 index 0000000000000000000000000000000000000000..80972dbbf71120e6b64f4cea7b2c5917e1828637 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_lld.c @@ -0,0 +1,896 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hifc_knl_adp.h" +#include "hifc_hw.h" +#include "hifc_hwif.h" +#include "hifc_api_cmd.h" +#include "hifc_mgmt.h" +#include "hifc_lld.h" +#include "hifc_dbgtool_knl.h" +#include "hifc_tool.h" + +#define HIFC_PCI_CFG_REG_BAR 0 +#define HIFC_PCI_INTR_REG_BAR 2 +#define HIFC_PCI_DB_BAR 4 +#define HIFC_SECOND_BASE 1000 +#define HIFC_SYNC_YEAR_OFFSET 1900 +#define HIFC_SYNC_MONTH_OFFSET 1 + +#define HIFC_DRV_DESC "Huawei(R) Intelligent Network Interface Card Driver" +#define HIFCVF_DRV_DESC "Huawei(R) Intelligent Virtual Function Network Driver" + +MODULE_AUTHOR("Huawei Technologies CO., Ltd"); +MODULE_DESCRIPTION(HIFC_DRV_DESC); +MODULE_VERSION(HIFC_DRV_VERSION); +MODULE_LICENSE("GPL"); + +#define HIFC_EVENT_PROCESS_TIMEOUT 10000 + +#define FIND_BIT(num, n) (((num) & (1UL << (n))) ? 1 : 0) +#define SET_BIT(num, n) ((num) | (1UL << (n))) +#define CLEAR_BIT(num, n) ((num) & (~(1UL << (n)))) + +#define MAX_CARD_ID 64 +static u64 card_bit_map; +LIST_HEAD(g_hifc_chip_list); + +enum hifc_lld_status { + HIFC_NODE_CHANGE = BIT(0), +}; + +struct hifc_lld_lock { + /* lock for chip list */ + struct mutex lld_mutex; + unsigned long status; + atomic_t dev_ref_cnt; +}; + +static struct hifc_lld_lock g_lld_lock; + +#define WAIT_LLD_DEV_HOLD_TIMEOUT (10 * 60 * 1000) /* 10minutes */ +#define WAIT_LLD_DEV_NODE_CHANGED (10 * 60 * 1000) /* 10minutes */ +#define WAIT_LLD_DEV_REF_CNT_EMPTY (2 * 60 * 1000) /* 2minutes */ + +/* node in chip_node will changed, tools or driver can't get node + * during this situation + */ +static void lld_lock_chip_node(void) +{ + u32 loop_cnt; + + mutex_lock(&g_lld_lock.lld_mutex); + + loop_cnt = 0; + while (loop_cnt < WAIT_LLD_DEV_NODE_CHANGED) { + if (!test_and_set_bit(HIFC_NODE_CHANGE, &g_lld_lock.status)) + break; + + loop_cnt++; + + if (loop_cnt % 10000 == 0) + pr_warn("Wait for lld node change complete for %us\n", + loop_cnt / 1000); + + usleep_range(900, 1000); + } + + if (loop_cnt == WAIT_LLD_DEV_NODE_CHANGED) + pr_warn("Wait for lld node change complete timeout when try to get lld lock\n"); + + loop_cnt = 0; + while (loop_cnt < WAIT_LLD_DEV_REF_CNT_EMPTY) { + if (!atomic_read(&g_lld_lock.dev_ref_cnt)) + break; + + loop_cnt++; + + if (loop_cnt % 10000 == 0) + pr_warn("Wait for lld dev unused for %us, reference count: %d\n", + loop_cnt / 1000, + atomic_read(&g_lld_lock.dev_ref_cnt)); + + usleep_range(900, 1000); + } + + if (loop_cnt == WAIT_LLD_DEV_REF_CNT_EMPTY) + pr_warn("Wait for lld dev unused timeout\n"); + + mutex_unlock(&g_lld_lock.lld_mutex); +} + +static void lld_unlock_chip_node(void) +{ + clear_bit(HIFC_NODE_CHANGE, &g_lld_lock.status); +} + +/* When tools or other drivers want to get node of chip_node, use this function + * to prevent node be freed + */ +void lld_dev_hold(void) +{ + u32 loop_cnt = 0; + + /* ensure there have not any chip node in changing */ + mutex_lock(&g_lld_lock.lld_mutex); + + while (loop_cnt < WAIT_LLD_DEV_HOLD_TIMEOUT) { + if (!test_bit(HIFC_NODE_CHANGE, &g_lld_lock.status)) + break; + + loop_cnt++; + + if (loop_cnt % 10000 == 0) + pr_warn("Wait lld node change complete for %us\n", + loop_cnt / 1000); + + usleep_range(900, 1000); + } + + if (loop_cnt == WAIT_LLD_DEV_HOLD_TIMEOUT) + pr_warn("Wait lld node change complete timeout when try to hode lld dev\n"); + + atomic_inc(&g_lld_lock.dev_ref_cnt); + + mutex_unlock(&g_lld_lock.lld_mutex); +} + +void lld_dev_put(void) +{ + atomic_dec(&g_lld_lock.dev_ref_cnt); +} + +static void hifc_lld_lock_init(void) +{ + mutex_init(&g_lld_lock.lld_mutex); + atomic_set(&g_lld_lock.dev_ref_cnt, 0); +} + +extern int hifc_probe(struct hifc_lld_dev *lld_dev, + void **uld_dev, char *uld_dev_name); + +static int attach_uld(struct hifc_pcidev *dev) +{ + void *uld_dev = NULL; + + int err; + + mutex_lock(&dev->pdev_mutex); + + if (dev->init_state < HIFC_INIT_STATE_HWDEV_INITED) { + sdk_err(&dev->pcidev->dev, "SDK init failed, can not attach uld\n"); + err = -EFAULT; + goto out_unlock; + } + + err = hifc_stateful_init(dev->hwdev); + if (err) + goto out_unlock; + + err = hifc_probe(&dev->lld_dev, &uld_dev, dev->uld_dev_name); + if (err || !uld_dev) { + sdk_err(&dev->pcidev->dev, + "Failed to add object for driver to pcie device\n"); + goto probe_failed; + } + + dev->uld_dev = uld_dev; + mutex_unlock(&dev->pdev_mutex); + + sdk_info(&dev->pcidev->dev, + "Attach driver to pcie device succeed\n"); + return 0; + +probe_failed: + hifc_stateful_deinit(dev->hwdev); +out_unlock: + mutex_unlock(&dev->pdev_mutex); + + return err; +} + +extern void hifc_remove(struct hifc_lld_dev *lld_dev, void *uld_dev); + +static void detach_uld(struct hifc_pcidev *dev) +{ + u32 cnt = 0; + + mutex_lock(&dev->pdev_mutex); + + while (cnt < HIFC_EVENT_PROCESS_TIMEOUT) { + if (!test_and_set_bit(SERVICE_T_FC, &dev->state)) + break; + usleep_range(900, 1000); + cnt++; + } + + hifc_remove(&dev->lld_dev, dev->uld_dev); + dev->uld_dev = NULL; + hifc_stateful_deinit(dev->hwdev); + if (cnt < HIFC_EVENT_PROCESS_TIMEOUT) + clear_bit(SERVICE_T_FC, &dev->state); + + sdk_info(&dev->pcidev->dev, + "Detach driver from pcie device succeed\n"); + mutex_unlock(&dev->pdev_mutex); +} + +static void hifc_sync_time_to_fmw(struct hifc_pcidev *pdev_pri) +{ + struct timeval tv = {0}; + struct rtc_time rt_time = {0}; + u64 tv_msec; + int err; + + do_gettimeofday(&tv); + + tv_msec = tv.tv_sec * HIFC_SECOND_BASE + + tv.tv_usec / HIFC_SECOND_BASE; + err = hifc_sync_time(pdev_pri->hwdev, tv_msec); + if (err) { + sdk_err(&pdev_pri->pcidev->dev, "Synchronize UTC time to firmware failed, errno:%d.\n", + err); + } else { + rtc_time_to_tm(tv.tv_sec, &rt_time); + sdk_info(&pdev_pri->pcidev->dev, "Synchronize UTC time to firmware succeed. UTC time %d-%02d-%02d %02d:%02d:%02d.\n", + rt_time.tm_year + HIFC_SYNC_YEAR_OFFSET, + rt_time.tm_mon + HIFC_SYNC_MONTH_OFFSET, + rt_time.tm_mday, rt_time.tm_hour, + rt_time.tm_min, rt_time.tm_sec); + } +} + +#define MAX_VER_FIELD_LEN 4 +#define MAX_VER_SPLIT_NUM 4 + +struct mctp_hdr { + u16 resp_code; + u16 reason_code; + u32 manufacture_id; + + u8 cmd_rsvd; + u8 major_cmd; + u8 sub_cmd; + u8 spc_field; +}; + +struct mctp_bdf_info { + struct mctp_hdr hdr; /* spc_field: pf index */ + u8 rsvd; + u8 bus; + u8 device; + u8 function; +}; + +static void __mctp_set_hdr(struct mctp_hdr *hdr, + struct hifc_mctp_host_info *mctp_info) +{ + u32 manufacture_id = 0x07DB; + + hdr->cmd_rsvd = 0; + hdr->major_cmd = mctp_info->major_cmd; + hdr->sub_cmd = mctp_info->sub_cmd; + hdr->manufacture_id = cpu_to_be32(manufacture_id); + hdr->resp_code = cpu_to_be16(hdr->resp_code); + hdr->reason_code = cpu_to_be16(hdr->reason_code); +} + +static void __mctp_get_bdf(struct hifc_pcidev *pci_adapter, + struct hifc_mctp_host_info *mctp_info) +{ + struct pci_dev *pdev = pci_adapter->pcidev; + struct mctp_bdf_info *bdf_info = mctp_info->data; + + bdf_info->bus = pdev->bus->number; + bdf_info->device = (u8)(pdev->devfn >> 3); /* 5bits in devfn */ + bdf_info->function = (u8)(pdev->devfn & 0x7); /* 3bits in devfn */ + + memset(&bdf_info->hdr, 0, sizeof(bdf_info->hdr)); + __mctp_set_hdr(&bdf_info->hdr, mctp_info); + bdf_info->hdr.spc_field = + (u8)hifc_global_func_id_hw(pci_adapter->hwdev); + + mctp_info->data_len = sizeof(*bdf_info); +} + +#define MCTP_PUBLIC_SUB_CMD_BDF 0x1 + +static void __mctp_get_host_info(struct hifc_pcidev *dev, + struct hifc_mctp_host_info *mctp_info) +{ +#define COMMAND_UNSUPPORTED 3 + struct mctp_hdr *hdr; + + if (((((u16)mctp_info->major_cmd) << 8) | mctp_info->sub_cmd) == + MCTP_PUBLIC_SUB_CMD_BDF) { + __mctp_get_bdf(dev, mctp_info); + } else { + hdr = mctp_info->data; + hdr->reason_code = COMMAND_UNSUPPORTED; + __mctp_set_hdr(hdr, mctp_info); + mctp_info->data_len = sizeof(*hdr); + } +} + +void *hifc_get_ppf_hwdev_by_pdev(struct pci_dev *pdev) +{ + struct hifc_pcidev *pci_adapter; + struct card_node *chip_node; + struct hifc_pcidev *dev; + + if (!pdev) + return NULL; + + pci_adapter = pci_get_drvdata(pdev); + if (!pci_adapter) + return NULL; + + chip_node = pci_adapter->chip_node; + lld_dev_hold(); + list_for_each_entry(dev, &chip_node->func_list, node) { + if (dev->hwdev && hifc_func_type(dev->hwdev) == TYPE_PPF) { + lld_dev_put(); + return dev->hwdev; + } + } + lld_dev_put(); + + return NULL; +} + +void hifc_event(struct hifc_lld_dev *lld_dev, void *uld_dev, + struct hifc_event_info *event); + +void hifc_event_process(void *adapter, struct hifc_event_info *event) +{ + struct hifc_pcidev *dev = adapter; + + if (event->type == HIFC_EVENT_FMW_ACT_NTC) + return hifc_sync_time_to_fmw(dev); + else if (event->type == HIFC_EVENT_MCTP_GET_HOST_INFO) + return __mctp_get_host_info(dev, &event->mctp_info); + + if (test_and_set_bit(SERVICE_T_FC, &dev->state)) { + sdk_warn(&dev->pcidev->dev, "Event: 0x%x can't handler is in detach\n", + event->type); + return; + } + + hifc_event(&dev->lld_dev, dev->uld_dev, event); + clear_bit(SERVICE_T_FC, &dev->state); +} + +static int mapping_bar(struct pci_dev *pdev, struct hifc_pcidev *pci_adapter) +{ + u64 dwqe_addr; + + pci_adapter->cfg_reg_base = pci_ioremap_bar(pdev, HIFC_PCI_CFG_REG_BAR); + if (!pci_adapter->cfg_reg_base) { + sdk_err(&pci_adapter->pcidev->dev, + "Failed to map configuration regs\n"); + return -ENOMEM; + } + + pci_adapter->intr_reg_base = pci_ioremap_bar(pdev, + HIFC_PCI_INTR_REG_BAR); + if (!pci_adapter->intr_reg_base) { + sdk_err(&pci_adapter->pcidev->dev, + "Failed to map interrupt regs\n"); + goto map_intr_bar_err; + } + + pci_adapter->db_base_phy = pci_resource_start(pdev, HIFC_PCI_DB_BAR); + pci_adapter->db_base = ioremap(pci_adapter->db_base_phy, + HIFC_DB_DWQE_SIZE); + if (!pci_adapter->db_base) { + sdk_err(&pci_adapter->pcidev->dev, + "Failed to map doorbell regs\n"); + goto map_db_err; + } + + dwqe_addr = pci_adapter->db_base_phy + HIFC_DB_DWQE_SIZE; + +#if defined(__aarch64__) + /* arm do not support call ioremap_wc() */ + pci_adapter->dwqe_mapping = __ioremap(dwqe_addr, HIFC_DB_DWQE_SIZE, + __pgprot(PROT_DEVICE_nGnRnE)); +#else + pci_adapter->dwqe_mapping = io_mapping_create_wc(dwqe_addr, + HIFC_DB_DWQE_SIZE); + +#endif /* end of "defined(__aarch64__)" */ + if (!pci_adapter->dwqe_mapping) { + sdk_err(&pci_adapter->pcidev->dev, "Failed to io_mapping_create_wc\n"); + goto mapping_dwqe_err; + } + + return 0; + +mapping_dwqe_err: + iounmap(pci_adapter->db_base); + +map_db_err: + iounmap(pci_adapter->intr_reg_base); + +map_intr_bar_err: + iounmap(pci_adapter->cfg_reg_base); + + return -ENOMEM; +} + +static void unmapping_bar(struct hifc_pcidev *pci_adapter) +{ +#if defined(__aarch64__) + iounmap(pci_adapter->dwqe_mapping); +#else + io_mapping_free(pci_adapter->dwqe_mapping); +#endif /* end of "defined(__aarch64__)" */ + + iounmap(pci_adapter->db_base); + iounmap(pci_adapter->intr_reg_base); + iounmap(pci_adapter->cfg_reg_base); +} + +static int alloc_chip_node(struct hifc_pcidev *pci_adapter) +{ + struct card_node *chip_node; + unsigned char i; + unsigned char parent_bus_number = 0; + + if (!pci_is_root_bus(pci_adapter->pcidev->bus)) + parent_bus_number = pci_adapter->pcidev->bus->parent->number; + + if (parent_bus_number != 0) { + list_for_each_entry(chip_node, &g_hifc_chip_list, node) { + if (chip_node->dp_bus_num == parent_bus_number) { + pci_adapter->chip_node = chip_node; + return 0; + } + } + } + + for (i = 0; i < MAX_CARD_ID; i++) { + if (!FIND_BIT(card_bit_map, i)) { + card_bit_map = (u64)SET_BIT(card_bit_map, i); + break; + } + } + + if (i == MAX_CARD_ID) { + sdk_err(&pci_adapter->pcidev->dev, + "Failed to alloc card id\n"); + return -EFAULT; + } + + chip_node = kzalloc(sizeof(*chip_node), GFP_KERNEL); + if (!chip_node) { + card_bit_map = CLEAR_BIT(card_bit_map, i); + sdk_err(&pci_adapter->pcidev->dev, + "Failed to alloc chip node\n"); + return -ENOMEM; + } + + chip_node->dbgtool_attr_file.name = kzalloc(IFNAMSIZ, GFP_KERNEL); + if (!(chip_node->dbgtool_attr_file.name)) { + kfree(chip_node); + card_bit_map = CLEAR_BIT(card_bit_map, i); + sdk_err(&pci_adapter->pcidev->dev, + "Failed to alloc dbgtool attr file name\n"); + return -ENOMEM; + } + + /* parent bus number */ + chip_node->dp_bus_num = parent_bus_number; + + snprintf(chip_node->chip_name, IFNAMSIZ, "%s%d", HIFC_CHIP_NAME, i); + snprintf((char *)chip_node->dbgtool_attr_file.name, + IFNAMSIZ, "%s%d", HIFC_CHIP_NAME, i); + sdk_info(&pci_adapter->pcidev->dev, + "Add new chip %s to global list succeed\n", + chip_node->chip_name); + + list_add_tail(&chip_node->node, &g_hifc_chip_list); + + INIT_LIST_HEAD(&chip_node->func_list); + pci_adapter->chip_node = chip_node; + + mutex_init(&chip_node->sfp_mutex); + + return 0; +} + +static void free_chip_node(struct hifc_pcidev *pci_adapter) +{ + struct card_node *chip_node = pci_adapter->chip_node; + u32 id; + int err; + + if (list_empty(&chip_node->func_list)) { + list_del(&chip_node->node); + sdk_info(&pci_adapter->pcidev->dev, + "Delete chip %s from global list succeed\n", + chip_node->chip_name); + err = sscanf(chip_node->chip_name, HIFC_CHIP_NAME "%u", &id); + if (err < 0) + sdk_err(&pci_adapter->pcidev->dev, "Failed to get hifc id\n"); + + card_bit_map = CLEAR_BIT(card_bit_map, id); + + kfree(chip_node->dbgtool_attr_file.name); + kfree(chip_node); + } +} + +static int config_pci_dma_mask(struct pci_dev *pdev) +{ + int err; + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + sdk_warn(&pdev->dev, "Couldn't set 64-bit DMA mask\n"); + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + sdk_err(&pdev->dev, "Failed to set DMA mask\n"); + return err; + } + } + + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + sdk_warn(&pdev->dev, + "Couldn't set 64-bit coherent DMA mask\n"); + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + sdk_err(&pdev->dev, + "Failed to set coherent DMA mask\n"); + return err; + } + } + + return 0; +} + +static int hifc_pci_init(struct pci_dev *pdev) +{ + struct hifc_pcidev *pci_adapter = NULL; + int err; + + pci_adapter = kzalloc(sizeof(*pci_adapter), GFP_KERNEL); + if (!pci_adapter) { + sdk_err(&pdev->dev, + "Failed to alloc pci device adapter\n"); + return -ENOMEM; + } + pci_adapter->pcidev = pdev; + mutex_init(&pci_adapter->pdev_mutex); + + pci_set_drvdata(pdev, pci_adapter); + + err = pci_enable_device(pdev); + if (err) { + sdk_err(&pdev->dev, "Failed to enable PCI device\n"); + goto pci_enable_err; + } + + err = pci_request_regions(pdev, HIFC_DRV_NAME); + if (err) { + sdk_err(&pdev->dev, "Failed to request regions\n"); + goto pci_regions_err; + } + + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + + err = config_pci_dma_mask(pdev); + if (err) + goto dma_mask_err; + + return 0; + +dma_mask_err: + pci_clear_master(pdev); + pci_release_regions(pdev); + +pci_regions_err: + pci_disable_device(pdev); + +pci_enable_err: + pci_set_drvdata(pdev, NULL); + kfree(pci_adapter); + + return err; +} + +static void hifc_pci_deinit(struct pci_dev *pdev) +{ + struct hifc_pcidev *pci_adapter = pci_get_drvdata(pdev); + + pci_clear_master(pdev); + pci_release_regions(pdev); + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + kfree(pci_adapter); +} + +static int hifc_func_init(struct pci_dev *pdev, + struct hifc_pcidev *pci_adapter) +{ + struct hifc_init_para init_para; + + int err; + + init_para.adapter_hdl = pci_adapter; + init_para.pcidev_hdl = pdev; + init_para.dev_hdl = &pdev->dev; + init_para.cfg_reg_base = pci_adapter->cfg_reg_base; + init_para.intr_reg_base = pci_adapter->intr_reg_base; + init_para.db_base = pci_adapter->db_base; + init_para.db_base_phy = pci_adapter->db_base_phy; + init_para.dwqe_mapping = pci_adapter->dwqe_mapping; + init_para.hwdev = &pci_adapter->hwdev; + init_para.chip_node = pci_adapter->chip_node; + init_para.ppf_hwdev = hifc_get_ppf_hwdev_by_pdev(pdev); + err = hifc_init_hwdev(&init_para); + if (err) { + pci_adapter->hwdev = NULL; + sdk_err(&pdev->dev, "Failed to initialize hardware device\n"); + return -EFAULT; + } + + pci_adapter->init_state = HIFC_INIT_STATE_HWDEV_INITED; + + pci_adapter->lld_dev.pdev = pdev; + pci_adapter->lld_dev.hwdev = pci_adapter->hwdev; + + hifc_event_register(pci_adapter->hwdev, pci_adapter, + hifc_event_process); + + hifc_sync_time_to_fmw(pci_adapter); + + lld_lock_chip_node(); + err = hifc_dbgtool_knl_init(pci_adapter->hwdev, pci_adapter->chip_node); + if (err) { + lld_unlock_chip_node(); + sdk_err(&pdev->dev, "Failed to initialize dbgtool\n"); + hifc_event_unregister(pci_adapter->hwdev); + return err; + } + lld_unlock_chip_node(); + pci_adapter->init_state = HIFC_INIT_STATE_DBGTOOL_INITED; + + attach_uld(pci_adapter); + + sdk_info(&pdev->dev, "Pcie device probed\n"); + pci_adapter->init_state = HIFC_INIT_STATE_ALL_INITED; + + return 0; +} + +static void hifc_func_deinit(struct pci_dev *pdev) +{ + struct hifc_pcidev *pci_adapter = pci_get_drvdata(pdev); + + /* When function deinit, disable mgmt initiative report events firstly, + * then flush mgmt work-queue. + */ + if (pci_adapter->init_state >= HIFC_INIT_STATE_ALL_INITED) + detach_uld(pci_adapter); + + hifc_disable_mgmt_msg_report(pci_adapter->hwdev); + if (pci_adapter->init_state >= HIFC_INIT_STATE_HW_PART_INITED) + hifc_flush_mgmt_workq(pci_adapter->hwdev); + + hifc_set_func_deinit_flag(pci_adapter->hwdev); + + if (pci_adapter->init_state >= HIFC_INIT_STATE_DBGTOOL_INITED) { + lld_lock_chip_node(); + hifc_dbgtool_knl_deinit(pci_adapter->hwdev, + pci_adapter->chip_node); + lld_unlock_chip_node(); + hifc_event_unregister(pci_adapter->hwdev); + } + + if (pci_adapter->init_state >= HIFC_INIT_STATE_HW_IF_INITED) { + /*Remove the current node from node-list first, + * then it's safe to free hwdev + */ + lld_lock_chip_node(); + list_del(&pci_adapter->node); + lld_unlock_chip_node(); + + hifc_free_hwdev(pci_adapter->hwdev); + } +} + +static void remove_func(struct hifc_pcidev *pci_adapter) +{ + struct pci_dev *pdev = pci_adapter->pcidev; + + switch (pci_adapter->init_state) { + case HIFC_INIT_STATE_ALL_INITED: + /*lint -fallthrough*/ + + case HIFC_INIT_STATE_DBGTOOL_INITED: + case HIFC_INIT_STATE_HWDEV_INITED: + case HIFC_INIT_STATE_HW_PART_INITED: + case HIFC_INIT_STATE_HW_IF_INITED: + case HIFC_INIT_STATE_PCI_INITED: + set_bit(HIFC_FUNC_IN_REMOVE, &pci_adapter->flag); + + if (pci_adapter->init_state >= HIFC_INIT_STATE_HW_IF_INITED) + hifc_func_deinit(pdev); + + lld_lock_chip_node(); + if (pci_adapter->init_state < HIFC_INIT_STATE_HW_IF_INITED) + list_del(&pci_adapter->node); + hifc_tool_k_uninit(); + free_chip_node(pci_adapter); + lld_unlock_chip_node(); + unmapping_bar(pci_adapter); + hifc_pci_deinit(pdev); + + /*lint -fallthrough*/ + break; + + default: + break; + } +} + +static void hifc_hwdev_remove(struct pci_dev *pdev) +{ + struct hifc_pcidev *pci_adapter = pci_get_drvdata(pdev); + + if (!pci_adapter) + return; + + sdk_info(&pdev->dev, "Pcie device remove begin\n"); + + if (pci_adapter->init_state >= HIFC_INIT_STATE_HW_IF_INITED) + hifc_detect_hw_present(pci_adapter->hwdev); + + remove_func(pci_adapter); + + sdk_info(&pdev->dev, "Pcie device removed\n"); +} + +static int hifc_hwdev_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct hifc_pcidev *pci_adapter; + int err; + + sdk_info(&pdev->dev, "Pcie device probe begin\n"); + + err = hifc_pci_init(pdev); + if (err) + return err; + + pci_adapter = pci_get_drvdata(pdev); + clear_bit(HIFC_FUNC_PRB_ERR, &pci_adapter->flag); + clear_bit(HIFC_FUNC_PRB_DELAY, &pci_adapter->flag); + err = mapping_bar(pdev, pci_adapter); + if (err) { + sdk_err(&pdev->dev, "Failed to map bar\n"); + goto map_bar_failed; + } + + pci_adapter->id = *id; + + /* if chip information of pcie function exist, + * add the function into chip + */ + lld_lock_chip_node(); + err = alloc_chip_node(pci_adapter); + if (err) { + sdk_err(&pdev->dev, + "Failed to add new chip node to global list\n"); + goto alloc_chip_node_fail; + } + err = hifc_tool_k_init(); + if (err) { + sdk_warn(&pdev->dev, "Failed to init nictool"); + goto init_nictool_err; + } + list_add_tail(&pci_adapter->node, &pci_adapter->chip_node->func_list); + + lld_unlock_chip_node(); + + pci_adapter->init_state = HIFC_INIT_STATE_PCI_INITED; + + err = hifc_func_init(pdev, pci_adapter); + if (err) + goto func_init_err; + + return 0; + +func_init_err: + if (!test_bit(HIFC_FUNC_PRB_DELAY, &pci_adapter->flag)) + set_bit(HIFC_FUNC_PRB_ERR, &pci_adapter->flag); + return 0; +init_nictool_err: + free_chip_node(pci_adapter); +alloc_chip_node_fail: + lld_unlock_chip_node(); + unmapping_bar(pci_adapter); + +map_bar_failed: + hifc_pci_deinit(pdev); + + sdk_err(&pdev->dev, "Pcie device probe failed\n"); + return err; +} + +#define PCI_VENDOR_ID_HUAWEI 0x19e5 +#define HIFC_DEV_ID_1822_8G 0x0212 +#define HIFC_DEV_ID_1822_16G 0x0203 +#define HIFC_DEV_ID_1822_32G 0x0202 + +/*lint -save -e133 -e10*/ +static const struct pci_device_id hifc_pci_table[] = { + {PCI_VDEVICE(HUAWEI, HIFC_DEV_ID_1822_8G), 0}, + {PCI_VDEVICE(HUAWEI, HIFC_DEV_ID_1822_16G), 0}, + {PCI_VDEVICE(HUAWEI, HIFC_DEV_ID_1822_32G), 0}, + {0, 0} +}; + +/*lint -restore*/ +MODULE_DEVICE_TABLE(pci, hifc_pci_table); + +static void hifc_shutdown(struct pci_dev *pdev) +{ + sdk_err(&pdev->dev, "Shutdown device\n"); + + pci_disable_device(pdev); +} + +static struct pci_driver hifc_driver = { + .name = HIFC_DRV_NAME, + .id_table = hifc_pci_table, + .probe = hifc_hwdev_probe, + .remove = hifc_hwdev_remove, + .shutdown = hifc_shutdown, +}; + +extern int hifc_init_module(void); +extern void hifc_exit_module(void); + +static int __init hifc_lld_init(void) +{ + pr_info("%s - version %s\n", HIFC_DRV_DESC, HIFC_DRV_VERSION); + + hifc_lld_lock_init(); + + hifc_init_module(); + + return pci_register_driver(&hifc_driver); +} + +static void __exit hifc_lld_exit(void) +{ + pci_unregister_driver(&hifc_driver); + hifc_exit_module(); +} + +module_init(hifc_lld_init); +module_exit(hifc_lld_exit); diff --git a/drivers/scsi/huawei/hifc/hifc_lld.h b/drivers/scsi/huawei/hifc/hifc_lld.h new file mode 100644 index 0000000000000000000000000000000000000000..34fa5e6ea30ed89bc334fcc9731e2b3eb0b85f82 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_lld.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef HIFC_LLD_H_ +#define HIFC_LLD_H_ +#include "unf_common.h" +#define HIFC_PCI_VENDOR_ID (0x19e5) +#define HIFC_DRV_NAME "hifc_sdk" +#define HIFC_CHIP_NAME "hifc" +#define HIFC_DRV_VERSION UNF_FC_VERSION + +struct hifc_lld_dev { + struct pci_dev *pdev; + void *hwdev; +}; + +extern struct list_head g_hifc_chip_list; + +/* Structure pcidev private*/ +struct hifc_pcidev { + struct pci_dev *pcidev; + void *hwdev; + struct card_node *chip_node; + struct hifc_lld_dev lld_dev; + /* Record the service object address, + * such as hifc_dev and toe_dev, fc_dev + */ + void *uld_dev; + /* Record the service object name */ + char uld_dev_name[IFNAMSIZ]; + /* It is a the global variable for driver to manage + * all function device linked list + */ + struct list_head node; + + void __iomem *cfg_reg_base; + void __iomem *intr_reg_base; + u64 db_base_phy; + void __iomem *db_base; + +#if defined(__aarch64__) + void __iomem *dwqe_mapping; +#else + struct io_mapping *dwqe_mapping; +#endif + /* lock for attach/detach uld */ + struct mutex pdev_mutex; + + u32 init_state; + /* setted when uld driver processing event */ + unsigned long state; + struct pci_device_id id; + + unsigned long flag; +}; + +enum { + HIFC_FUNC_IN_REMOVE = BIT(0), + HIFC_FUNC_PRB_ERR = BIT(1), + HIFC_FUNC_PRB_DELAY = BIT(2), +}; + +enum hifc_init_state { + HIFC_INIT_STATE_NONE, + HIFC_INIT_STATE_PCI_INITED, + HIFC_INIT_STATE_HW_IF_INITED, + HIFC_INIT_STATE_HW_PART_INITED, + HIFC_INIT_STATE_HWDEV_INITED, + HIFC_INIT_STATE_DBGTOOL_INITED, + HIFC_INIT_STATE_ALL_INITED, +}; + +void lld_dev_put(void); +void lld_dev_hold(void); + +#endif diff --git a/drivers/scsi/huawei/hifc/hifc_mgmt.c b/drivers/scsi/huawei/hifc/hifc_mgmt.c new file mode 100644 index 0000000000000000000000000000000000000000..3f4818898e8d1c19a8b7e2b840e06c19ce84d839 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_mgmt.c @@ -0,0 +1,1426 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hifc_knl_adp.h" +#include "hifc_hw.h" +#include "hifc_hwdev.h" +#include "hifc_hwif.h" +#include "hifc_api_cmd.h" +#include "hifc_mgmt.h" +#include "hifc_eqs.h" + +#define BUF_OUT_DEFAULT_SIZE 1 +#define SEGMENT_LEN 48 +#define MGMT_MSG_MAX_SEQ_ID (ALIGN(HIFC_MSG_TO_MGMT_MAX_LEN, \ + SEGMENT_LEN) / SEGMENT_LEN) + +#define MAX_PF_MGMT_BUF_SIZE 2048UL +#define MGMT_MSG_SIZE_MIN 20 +#define MGMT_MSG_SIZE_STEP 16 +#define MGMT_MSG_RSVD_FOR_DEV 8 +#define MGMT_MSG_TIMEOUT 5000 /* millisecond */ +#define SYNC_MSG_ID_MASK 0x1FF +#define ASYNC_MSG_ID_MASK 0x1FF +#define ASYNC_MSG_FLAG 0x200 +#define MSG_NO_RESP 0xFFFF +#define MAX_MSG_SZ 2016 + +#define MSG_SZ_IS_VALID(in_size) ((in_size) <= MAX_MSG_SZ) + +#define SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id) + +#define SYNC_MSG_ID_INC(pf_to_mgmt) (SYNC_MSG_ID(pf_to_mgmt) = \ + (SYNC_MSG_ID(pf_to_mgmt) + 1) & SYNC_MSG_ID_MASK) + +#define ASYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->async_msg_id) + +#define ASYNC_MSG_ID_INC(pf_to_mgmt) (ASYNC_MSG_ID(pf_to_mgmt) = \ + ((ASYNC_MSG_ID(pf_to_mgmt) + 1) & ASYNC_MSG_ID_MASK) \ + | ASYNC_MSG_FLAG) + +static void pf_to_mgmt_send_event_set(struct hifc_msg_pf_to_mgmt *pf_to_mgmt, + int event_flag) +{ + spin_lock(&pf_to_mgmt->sync_event_lock); + pf_to_mgmt->event_flag = event_flag; + spin_unlock(&pf_to_mgmt->sync_event_lock); +} + +/** + * hifc_register_mgmt_msg_cb - register sync msg handler for a module + * @hwdev: the pointer to hw device + * @mod: module in the chip that this handler will handle its sync messages + * @pri_handle: pri handle function + * @callback: the handler for a sync message that will handle messages + * Return: 0 - success, negative - failure + **/ +int hifc_register_mgmt_msg_cb(void *hwdev, enum hifc_mod_type mod, + void *pri_handle, hifc_mgmt_msg_cb callback) +{ + struct hifc_msg_pf_to_mgmt *pf_to_mgmt; + + if (mod >= HIFC_MOD_HW_MAX || !hwdev) + return -EFAULT; + + pf_to_mgmt = ((struct hifc_hwdev *)hwdev)->pf_to_mgmt; + if (!pf_to_mgmt) + return -EINVAL; + + pf_to_mgmt->recv_mgmt_msg_cb[mod] = callback; + pf_to_mgmt->recv_mgmt_msg_data[mod] = pri_handle; + + set_bit(HIFC_MGMT_MSG_CB_REG, &pf_to_mgmt->mgmt_msg_cb_state[mod]); + + return 0; +} + +/** + * hifc_unregister_mgmt_msg_cb - unregister sync msg handler for a module + * @hwdev: the pointer to hw device + * @mod: module in the chip that this handler will handle its sync messages + **/ +void hifc_unregister_mgmt_msg_cb(void *hwdev, enum hifc_mod_type mod) +{ + struct hifc_msg_pf_to_mgmt *pf_to_mgmt; + + if (!hwdev || mod >= HIFC_MOD_HW_MAX) + return; + + pf_to_mgmt = ((struct hifc_hwdev *)hwdev)->pf_to_mgmt; + if (!pf_to_mgmt) + return; + + clear_bit(HIFC_MGMT_MSG_CB_REG, &pf_to_mgmt->mgmt_msg_cb_state[mod]); + + while (test_bit(HIFC_MGMT_MSG_CB_RUNNING, + &pf_to_mgmt->mgmt_msg_cb_state[mod])) + usleep_range(900, 1000); + + pf_to_mgmt->recv_mgmt_msg_cb[mod] = NULL; + pf_to_mgmt->recv_mgmt_msg_data[mod] = NULL; +} + +void hifc_comm_recv_mgmt_self_cmd_reg(void *hwdev, u8 cmd, + comm_up_self_msg_proc proc) +{ + struct hifc_msg_pf_to_mgmt *pf_to_mgmt; + u8 cmd_idx; + + if (!hwdev || !proc) + return; + + pf_to_mgmt = ((struct hifc_hwdev *)hwdev)->pf_to_mgmt; + if (!pf_to_mgmt) + return; + + cmd_idx = pf_to_mgmt->proc.cmd_num; + if (cmd_idx >= HIFC_COMM_SELF_CMD_MAX) { + sdk_err(pf_to_mgmt->hwdev->dev_hdl, + "Register recv up process failed(cmd=0x%x)\r\n", cmd); + return; + } + + pf_to_mgmt->proc.info[cmd_idx].cmd = cmd; + pf_to_mgmt->proc.info[cmd_idx].proc = proc; + + pf_to_mgmt->proc.cmd_num++; +} + +void hifc_comm_recv_up_self_cmd_unreg(void *hwdev, u8 cmd) +{ + struct hifc_msg_pf_to_mgmt *pf_to_mgmt; + u8 cmd_idx; + + if (!hwdev) + return; + + pf_to_mgmt = ((struct hifc_hwdev *)hwdev)->pf_to_mgmt; + if (!pf_to_mgmt) + return; + + cmd_idx = pf_to_mgmt->proc.cmd_num; + if (cmd_idx >= HIFC_COMM_SELF_CMD_MAX) { + sdk_err(pf_to_mgmt->hwdev->dev_hdl, + "Unregister recv up process failed(cmd=0x%x)\r\n", cmd); + return; + } + + for (cmd_idx = 0; cmd_idx < HIFC_COMM_SELF_CMD_MAX; cmd_idx++) { + if (cmd == pf_to_mgmt->proc.info[cmd_idx].cmd) { + pf_to_mgmt->proc.info[cmd_idx].cmd = 0; + pf_to_mgmt->proc.info[cmd_idx].proc = NULL; + pf_to_mgmt->proc.cmd_num--; + } + } +} + +/** + * mgmt_msg_len - calculate the total message length + * @msg_data_len: the length of the message data + * Return: the total message length + **/ +static u16 mgmt_msg_len(u16 msg_data_len) +{ + /* u64 - the size of the header */ + u16 msg_size; + + msg_size = (u16)(MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) + msg_data_len); + + if (msg_size > MGMT_MSG_SIZE_MIN) + msg_size = MGMT_MSG_SIZE_MIN + + ALIGN((msg_size - MGMT_MSG_SIZE_MIN), + MGMT_MSG_SIZE_STEP); + else + msg_size = MGMT_MSG_SIZE_MIN; + + return msg_size; +} + +/** + * prepare_header - prepare the header of the message + * @pf_to_mgmt: PF to MGMT channel + * @header: pointer of the header to prepare + * @msg_len: the length of the message + * @mod: module in the chip that will get the message + * @ack_type: message ack type + * @direction: the direction of the original message + * @cmd: vmd type + * @msg_id: message id + **/ +static void prepare_header(struct hifc_msg_pf_to_mgmt *pf_to_mgmt, + u64 *header, u16 msg_len, enum hifc_mod_type mod, + enum hifc_msg_ack_type ack_type, + enum hifc_msg_direction_type direction, + enum hifc_mgmt_cmd cmd, u32 msg_id) +{ + struct hifc_hwif *hwif = pf_to_mgmt->hwdev->hwif; + + *header = HIFC_MSG_HEADER_SET(msg_len, MSG_LEN) | + HIFC_MSG_HEADER_SET(mod, MODULE) | + HIFC_MSG_HEADER_SET(msg_len, SEG_LEN) | + HIFC_MSG_HEADER_SET(ack_type, NO_ACK) | + HIFC_MSG_HEADER_SET(0, ASYNC_MGMT_TO_PF) | + HIFC_MSG_HEADER_SET(0, SEQID) | + HIFC_MSG_HEADER_SET(LAST_SEGMENT, LAST) | + HIFC_MSG_HEADER_SET(direction, DIRECTION) | + HIFC_MSG_HEADER_SET(cmd, CMD) | + HIFC_MSG_HEADER_SET(HIFC_PCI_INTF_IDX(hwif), PCI_INTF_IDX) | + HIFC_MSG_HEADER_SET(hwif->attr.port_to_port_idx, P2P_IDX) | + HIFC_MSG_HEADER_SET(msg_id, MSG_ID); +} + +static void clp_prepare_header(struct hifc_hwdev *hwdev, + u64 *header, u16 msg_len, enum hifc_mod_type mod, + enum hifc_msg_ack_type ack_type, + enum hifc_msg_direction_type direction, + enum hifc_mgmt_cmd cmd, u32 msg_id) +{ + struct hifc_hwif *hwif = hwdev->hwif; + + *header = HIFC_MSG_HEADER_SET(msg_len, MSG_LEN) | + HIFC_MSG_HEADER_SET(mod, MODULE) | + HIFC_MSG_HEADER_SET(msg_len, SEG_LEN) | + HIFC_MSG_HEADER_SET(ack_type, NO_ACK) | + HIFC_MSG_HEADER_SET(0, ASYNC_MGMT_TO_PF) | + HIFC_MSG_HEADER_SET(0, SEQID) | + HIFC_MSG_HEADER_SET(LAST_SEGMENT, LAST) | + HIFC_MSG_HEADER_SET(direction, DIRECTION) | + HIFC_MSG_HEADER_SET(cmd, CMD) | + HIFC_MSG_HEADER_SET(HIFC_PCI_INTF_IDX(hwif), PCI_INTF_IDX) | + HIFC_MSG_HEADER_SET(hwif->attr.port_to_port_idx, P2P_IDX) | + HIFC_MSG_HEADER_SET(msg_id, MSG_ID); +} + +/** + * prepare_mgmt_cmd - prepare the mgmt command + * @mgmt_cmd: pointer to the command to prepare + * @header: pointer of the header to prepare + * @msg: the data of the message + * @msg_len: the length of the message + **/ +static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, const void *msg, + int msg_len) +{ + memset(mgmt_cmd, 0, MGMT_MSG_RSVD_FOR_DEV); + + mgmt_cmd += MGMT_MSG_RSVD_FOR_DEV; + memcpy(mgmt_cmd, header, sizeof(*header)); + + mgmt_cmd += sizeof(*header); + memcpy(mgmt_cmd, msg, msg_len); +} + +/** + * send_msg_to_mgmt_async - send async message + * @pf_to_mgmt: PF to MGMT channel + * @mod: module in the chip that will get the message + * @cmd: command of the message + * @msg: the data of the message + * @msg_len: the length of the message + * @direction: the direction of the original message + * @resp_msg_id: msg id to response for + * Return: 0 - success, negative - failure + **/ +static int send_msg_to_mgmt_async(struct hifc_msg_pf_to_mgmt *pf_to_mgmt, + enum hifc_mod_type mod, u8 cmd, + void *msg, u16 msg_len, + enum hifc_msg_direction_type direction, + u16 resp_msg_id) +{ + void *mgmt_cmd = pf_to_mgmt->async_msg_buf; + struct hifc_api_cmd_chain *chain; + u64 header; + u16 cmd_size = mgmt_msg_len(msg_len); + + if (!hifc_get_chip_present_flag(pf_to_mgmt->hwdev)) + return -EFAULT; + + if (direction == HIFC_MSG_RESPONSE) + prepare_header(pf_to_mgmt, &header, msg_len, mod, HIFC_MSG_ACK, + direction, cmd, resp_msg_id); + else + prepare_header(pf_to_mgmt, &header, msg_len, mod, HIFC_MSG_ACK, + direction, cmd, ASYNC_MSG_ID(pf_to_mgmt)); + + prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len); + + chain = pf_to_mgmt->cmd_chain[HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU]; + + return hifc_api_cmd_write(chain, HIFC_NODE_ID_MGMT_HOST, mgmt_cmd, + cmd_size); +} + +int hifc_pf_to_mgmt_async(void *hwdev, enum hifc_mod_type mod, + u8 cmd, void *buf_in, u16 in_size) +{ + struct hifc_msg_pf_to_mgmt *pf_to_mgmt; + void *dev = ((struct hifc_hwdev *)hwdev)->dev_hdl; + int err; + + pf_to_mgmt = ((struct hifc_hwdev *)hwdev)->pf_to_mgmt; + + /* Lock the async_msg_buf */ + spin_lock_bh(&pf_to_mgmt->async_msg_lock); + ASYNC_MSG_ID_INC(pf_to_mgmt); + + err = send_msg_to_mgmt_async(pf_to_mgmt, mod, cmd, buf_in, in_size, + HIFC_MSG_DIRECT_SEND, MSG_NO_RESP); + spin_unlock_bh(&pf_to_mgmt->async_msg_lock); + + if (err) { + sdk_err(dev, "Failed to send async mgmt msg\n"); + return err; + } + + return 0; +} + +/** + * send_msg_to_mgmt_sync - send async message + * @pf_to_mgmt: PF to MGMT channel + * @mod: module in the chip that will get the message + * @cmd: command of the message + * @msg: the msg data + * @msg_len: the msg data length + * @direction: the direction of the original message + * @resp_msg_id: msg id to response for + * Return: 0 - success, negative - failure + **/ +static int send_msg_to_mgmt_sync(struct hifc_msg_pf_to_mgmt *pf_to_mgmt, + enum hifc_mod_type mod, u8 cmd, + void *msg, u16 msg_len, + enum hifc_msg_ack_type ack_type, + enum hifc_msg_direction_type direction, + u16 resp_msg_id) +{ + void *mgmt_cmd = pf_to_mgmt->sync_msg_buf; + struct hifc_api_cmd_chain *chain; + u64 header; + u16 cmd_size = mgmt_msg_len(msg_len); + + if (!hifc_get_chip_present_flag(pf_to_mgmt->hwdev)) + return -EFAULT; + + if (direction == HIFC_MSG_RESPONSE) + prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type, + direction, cmd, resp_msg_id); + else + prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type, + direction, cmd, SYNC_MSG_ID_INC(pf_to_mgmt)); + + if (ack_type == HIFC_MSG_ACK) + pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_START); + + prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len); + + chain = pf_to_mgmt->cmd_chain[HIFC_API_CMD_WRITE_TO_MGMT_CPU]; + + return hifc_api_cmd_write(chain, HIFC_NODE_ID_MGMT_HOST, mgmt_cmd, + cmd_size); +} + +static inline void msg_to_mgmt_pre(enum hifc_mod_type mod, void *buf_in) +{ + struct hifc_msg_head *msg_head; + + /* set aeq fix num to 3, need to ensure response aeq id < 3*/ + if (mod == HIFC_MOD_COMM || mod == HIFC_MOD_L2NIC) { + msg_head = buf_in; + + if (msg_head->resp_aeq_num >= HIFC_MAX_AEQS) + msg_head->resp_aeq_num = 0; + } +} + +int hifc_pf_to_mgmt_sync(void *hwdev, enum hifc_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout) +{ + struct hifc_msg_pf_to_mgmt *pf_to_mgmt; + void *dev = ((struct hifc_hwdev *)hwdev)->dev_hdl; + struct hifc_recv_msg *recv_msg; + struct completion *recv_done; + ulong timeo; + int err; + ulong ret; + + msg_to_mgmt_pre(mod, buf_in); + + pf_to_mgmt = ((struct hifc_hwdev *)hwdev)->pf_to_mgmt; + + /* Lock the sync_msg_buf */ + down(&pf_to_mgmt->sync_msg_lock); + recv_msg = &pf_to_mgmt->recv_resp_msg_from_mgmt; + recv_done = &recv_msg->recv_done; + + init_completion(recv_done); + + err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size, + HIFC_MSG_ACK, HIFC_MSG_DIRECT_SEND, + MSG_NO_RESP); + if (err) { + sdk_err(dev, "Failed to send sync msg to mgmt, sync_msg_id: %d\n", + pf_to_mgmt->sync_msg_id); + pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_FAIL); + goto unlock_sync_msg; + } + + timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT); + + ret = wait_for_completion_timeout(recv_done, timeo); + if (!ret) { + sdk_err(dev, "Mgmt response sync cmd timeout, sync_msg_id: %d\n", + pf_to_mgmt->sync_msg_id); + hifc_dump_aeq_info((struct hifc_hwdev *)hwdev); + err = -ETIMEDOUT; + pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_TIMEOUT); + goto unlock_sync_msg; + } + pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_END); + + if (!(((struct hifc_hwdev *)hwdev)->chip_present_flag)) { + up(&pf_to_mgmt->sync_msg_lock); + return -ETIMEDOUT; + } + + if (buf_out && out_size) { + if (*out_size < recv_msg->msg_len) { + sdk_err(dev, "Invalid response message length: %d for mod %d cmd %d from mgmt, should less than: %d\n", + recv_msg->msg_len, mod, cmd, *out_size); + err = -EFAULT; + goto unlock_sync_msg; + } + + if (recv_msg->msg_len) + memcpy(buf_out, recv_msg->msg, recv_msg->msg_len); + + *out_size = recv_msg->msg_len; + } + +unlock_sync_msg: + up(&pf_to_mgmt->sync_msg_lock); + + return err; +} + +static int __get_clp_reg(void *hwdev, enum clp_data_type data_type, + enum clp_reg_type reg_type, u32 *reg_addr) +{ + struct hifc_hwdev *dev = hwdev; + u32 offset; + + offset = HIFC_CLP_REG_GAP * hifc_pcie_itf_id(dev); + + switch (reg_type) { + case HIFC_CLP_BA_HOST: + *reg_addr = (data_type == HIFC_CLP_REQ_HOST) ? + HIFC_CLP_REG(REQ_SRAM_BA) : + HIFC_CLP_REG(RSP_SRAM_BA); + break; + + case HIFC_CLP_SIZE_HOST: + *reg_addr = HIFC_CLP_REG(SRAM_SIZE); + break; + + case HIFC_CLP_LEN_HOST: + *reg_addr = (data_type == HIFC_CLP_REQ_HOST) ? + HIFC_CLP_REG(REQ) : HIFC_CLP_REG(RSP); + break; + + case HIFC_CLP_START_REQ_HOST: + *reg_addr = HIFC_CLP_REG(REQ); + break; + + case HIFC_CLP_READY_RSP_HOST: + *reg_addr = HIFC_CLP_REG(RSP); + break; + + default: + *reg_addr = 0; + break; + } + if (*reg_addr == 0) + return -EINVAL; + + *reg_addr += offset; + + return 0; +} + +static inline int clp_param_valid(struct hifc_hwdev *hwdev, + enum clp_data_type data_type, + enum clp_reg_type reg_type) +{ + if (data_type == HIFC_CLP_REQ_HOST && + reg_type == HIFC_CLP_READY_RSP_HOST) + return -EINVAL; + + if (data_type == HIFC_CLP_RSP_HOST && + reg_type == HIFC_CLP_START_REQ_HOST) + return -EINVAL; + + return 0; +} + +static u32 get_clp_reg_value(struct hifc_hwdev *hwdev, + enum clp_reg_type reg_type, u32 reg_addr) +{ + u32 reg_value; + + reg_value = hifc_hwif_read_reg(hwdev->hwif, reg_addr); + + switch (reg_type) { + case HIFC_CLP_BA_HOST: + reg_value = ((reg_value >> + HIFC_CLP_OFFSET(SRAM_BASE)) & + HIFC_CLP_MASK(SRAM_BASE)); + break; + + case HIFC_CLP_SIZE_HOST: + reg_value = ((reg_value >> + HIFC_CLP_OFFSET(SRAM_SIZE)) & + HIFC_CLP_MASK(SRAM_SIZE)); + break; + + case HIFC_CLP_LEN_HOST: + reg_value = ((reg_value >> HIFC_CLP_OFFSET(LEN)) & + HIFC_CLP_MASK(LEN)); + break; + + case HIFC_CLP_START_REQ_HOST: + reg_value = ((reg_value >> HIFC_CLP_OFFSET(START)) & + HIFC_CLP_MASK(START)); + break; + + case HIFC_CLP_READY_RSP_HOST: + reg_value = ((reg_value >> HIFC_CLP_OFFSET(READY)) & + HIFC_CLP_MASK(READY)); + break; + + default: + break; + } + + return reg_value; +} + +static int hifc_read_clp_reg(struct hifc_hwdev *hwdev, + enum clp_data_type data_type, + enum clp_reg_type reg_type, u32 *read_value) +{ + u32 reg_addr; + int err; + + err = clp_param_valid(hwdev, data_type, reg_type); + if (err) + return err; + + err = __get_clp_reg(hwdev, data_type, reg_type, ®_addr); + if (err) + return err; + + *read_value = get_clp_reg_value(hwdev, reg_type, reg_addr); + + return 0; +} + +static int __check_data_type(enum clp_data_type data_type, + enum clp_reg_type reg_type) +{ + if (data_type == HIFC_CLP_REQ_HOST && + reg_type == HIFC_CLP_READY_RSP_HOST) + return -EINVAL; + if (data_type == HIFC_CLP_RSP_HOST && + reg_type == HIFC_CLP_START_REQ_HOST) + return -EINVAL; + + return 0; +} + +static int __check_reg_value(enum clp_reg_type reg_type, u32 value) +{ + if (reg_type == HIFC_CLP_BA_HOST && + value > HIFC_CLP_SRAM_BASE_REG_MAX) + return -EINVAL; + + if (reg_type == HIFC_CLP_SIZE_HOST && + value > HIFC_CLP_SRAM_SIZE_REG_MAX) + return -EINVAL; + + if (reg_type == HIFC_CLP_LEN_HOST && + value > HIFC_CLP_LEN_REG_MAX) + return -EINVAL; + + if ((reg_type == HIFC_CLP_START_REQ_HOST || + reg_type == HIFC_CLP_READY_RSP_HOST) && + value > HIFC_CLP_START_OR_READY_REG_MAX) + return -EINVAL; + + return 0; +} + +static void hifc_write_clp_reg(struct hifc_hwdev *hwdev, + enum clp_data_type data_type, + enum clp_reg_type reg_type, u32 value) +{ + u32 reg_addr, reg_value; + + if (__check_data_type(data_type, reg_type)) + return; + + if (__check_reg_value(reg_type, value)) + return; + + if (__get_clp_reg(hwdev, data_type, reg_type, ®_addr)) + return; + + reg_value = hifc_hwif_read_reg(hwdev->hwif, reg_addr); + + switch (reg_type) { + case HIFC_CLP_LEN_HOST: + reg_value = reg_value & + (~(HIFC_CLP_MASK(LEN) << HIFC_CLP_OFFSET(LEN))); + reg_value = reg_value | (value << HIFC_CLP_OFFSET(LEN)); + break; + + case HIFC_CLP_START_REQ_HOST: + reg_value = reg_value & + (~(HIFC_CLP_MASK(START) << + HIFC_CLP_OFFSET(START))); + reg_value = reg_value | (value << HIFC_CLP_OFFSET(START)); + break; + + case HIFC_CLP_READY_RSP_HOST: + reg_value = reg_value & + (~(HIFC_CLP_MASK(READY) << + HIFC_CLP_OFFSET(READY))); + reg_value = reg_value | (value << HIFC_CLP_OFFSET(READY)); + break; + + default: + return; + } + + hifc_hwif_write_reg(hwdev->hwif, reg_addr, reg_value); +} + +static int hifc_read_clp_data(struct hifc_hwdev *hwdev, + void *buf_out, u16 *out_size) +{ + int err; + u32 reg = HIFC_CLP_DATA(RSP); + u32 ready, delay_cnt; + u32 *ptr = (u32 *)buf_out; + u32 temp_out_size = 0; + + err = hifc_read_clp_reg(hwdev, HIFC_CLP_RSP_HOST, + HIFC_CLP_READY_RSP_HOST, &ready); + if (err) + return err; + + delay_cnt = 0; + while (ready == 0) { + usleep_range(9000, 10000); + delay_cnt++; + err = hifc_read_clp_reg(hwdev, HIFC_CLP_RSP_HOST, + HIFC_CLP_READY_RSP_HOST, &ready); + if (err || delay_cnt > HIFC_CLP_DELAY_CNT_MAX) { + sdk_err(hwdev->dev_hdl, "timeout with delay_cnt:%d\n", + delay_cnt); + return -EINVAL; + } + } + + err = hifc_read_clp_reg(hwdev, HIFC_CLP_RSP_HOST, + HIFC_CLP_LEN_HOST, &temp_out_size); + if (err) + return err; + + if (temp_out_size > HIFC_CLP_SRAM_SIZE_REG_MAX || !temp_out_size) { + sdk_err(hwdev->dev_hdl, "invalid temp_out_size:%d\n", + temp_out_size); + return -EINVAL; + } + + *out_size = (u16)(temp_out_size & 0xffff); + for (; temp_out_size > 0; temp_out_size--) { + *ptr = hifc_hwif_read_reg(hwdev->hwif, reg); + ptr++; + reg = reg + 4; + } + + hifc_write_clp_reg(hwdev, HIFC_CLP_RSP_HOST, + HIFC_CLP_READY_RSP_HOST, (u32)0x0); + hifc_write_clp_reg(hwdev, HIFC_CLP_RSP_HOST, + HIFC_CLP_LEN_HOST, (u32)0x0); + + return 0; +} + +static int hifc_write_clp_data(struct hifc_hwdev *hwdev, + void *buf_in, u16 in_size) +{ + int err; + u32 reg = HIFC_CLP_DATA(REQ); + u32 start = 1; + u32 delay_cnt = 0; + u32 *ptr = (u32 *)buf_in; + + err = hifc_read_clp_reg(hwdev, HIFC_CLP_REQ_HOST, + HIFC_CLP_START_REQ_HOST, &start); + if (err) + return err; + + while (start == 1) { + usleep_range(9000, 10000); + delay_cnt++; + err = hifc_read_clp_reg(hwdev, HIFC_CLP_REQ_HOST, + HIFC_CLP_START_REQ_HOST, &start); + if (err || delay_cnt > HIFC_CLP_DELAY_CNT_MAX) + return -EINVAL; + } + + hifc_write_clp_reg(hwdev, HIFC_CLP_REQ_HOST, + HIFC_CLP_LEN_HOST, in_size); + hifc_write_clp_reg(hwdev, HIFC_CLP_REQ_HOST, + HIFC_CLP_START_REQ_HOST, (u32)0x1); + + for (; in_size > 0; in_size--) { + hifc_hwif_write_reg(hwdev->hwif, reg, *ptr); + ptr++; + reg = reg + 4; + } + + return 0; +} + +static int hifc_check_clp_init_status(struct hifc_hwdev *hwdev) +{ + int err; + u32 reg_value = 0; + + err = hifc_read_clp_reg(hwdev, HIFC_CLP_REQ_HOST, + HIFC_CLP_BA_HOST, ®_value); + if (err || !reg_value) { + sdk_err(hwdev->dev_hdl, "Wrong req ba value:0x%x\n", reg_value); + return -EINVAL; + } + + err = hifc_read_clp_reg(hwdev, HIFC_CLP_RSP_HOST, + HIFC_CLP_BA_HOST, ®_value); + if (err || !reg_value) { + sdk_err(hwdev->dev_hdl, "Wrong rsp ba value:0x%x\n", reg_value); + return -EINVAL; + } + + err = hifc_read_clp_reg(hwdev, HIFC_CLP_REQ_HOST, + HIFC_CLP_SIZE_HOST, ®_value); + if (err || !reg_value) { + sdk_err(hwdev->dev_hdl, "Wrong req size\n"); + return -EINVAL; + } + + err = hifc_read_clp_reg(hwdev, HIFC_CLP_RSP_HOST, + HIFC_CLP_SIZE_HOST, ®_value); + if (err || !reg_value) { + sdk_err(hwdev->dev_hdl, "Wrong rsp size\n"); + return -EINVAL; + } + + return 0; +} + +static void hifc_clear_clp_data(struct hifc_hwdev *hwdev, + enum clp_data_type data_type) +{ + u32 reg = (data_type == HIFC_CLP_REQ_HOST) ? + HIFC_CLP_DATA(REQ) : HIFC_CLP_DATA(RSP); + u32 count = HIFC_CLP_INPUT_BUFFER_LEN_HOST / HIFC_CLP_DATA_UNIT_HOST; + + for (; count > 0; count--) { + hifc_hwif_write_reg(hwdev->hwif, reg, 0x0); + reg = reg + 4; + } +} + +int hifc_pf_clp_to_mgmt(void *hwdev, enum hifc_mod_type mod, u8 cmd, + const void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hifc_clp_pf_to_mgmt *clp_pf_to_mgmt; + struct hifc_hwdev *dev = hwdev; + u64 header; + u16 real_size; + u8 *clp_msg_buf; + int err; + + clp_pf_to_mgmt = ((struct hifc_hwdev *)hwdev)->clp_pf_to_mgmt; + clp_msg_buf = clp_pf_to_mgmt->clp_msg_buf; + + /*4 bytes alignment*/ + if (in_size % HIFC_CLP_DATA_UNIT_HOST) + real_size = (in_size + (u16)sizeof(header) + + HIFC_CLP_DATA_UNIT_HOST); + else + real_size = in_size + (u16)sizeof(header); + real_size = real_size / HIFC_CLP_DATA_UNIT_HOST; + + if (real_size > + (HIFC_CLP_INPUT_BUFFER_LEN_HOST / HIFC_CLP_DATA_UNIT_HOST)) { + sdk_err(dev->dev_hdl, "Invalid real_size:%d\n", real_size); + return -EINVAL; + } + down(&clp_pf_to_mgmt->clp_msg_lock); + + err = hifc_check_clp_init_status(dev); + if (err) { + sdk_err(dev->dev_hdl, "Check clp init status failed\n"); + up(&clp_pf_to_mgmt->clp_msg_lock); + return err; + } + + hifc_clear_clp_data(dev, HIFC_CLP_RSP_HOST); + hifc_write_clp_reg(dev, HIFC_CLP_RSP_HOST, + HIFC_CLP_READY_RSP_HOST, 0x0); + + /*Send request*/ + memset(clp_msg_buf, 0x0, HIFC_CLP_INPUT_BUFFER_LEN_HOST); + clp_prepare_header(dev, &header, in_size, mod, 0, 0, cmd, 0); + + memcpy(clp_msg_buf, &header, sizeof(header)); + clp_msg_buf += sizeof(header); + memcpy(clp_msg_buf, buf_in, in_size); + + clp_msg_buf = clp_pf_to_mgmt->clp_msg_buf; + + hifc_clear_clp_data(dev, HIFC_CLP_REQ_HOST); + err = hifc_write_clp_data(hwdev, + clp_pf_to_mgmt->clp_msg_buf, real_size); + if (err) { + sdk_err(dev->dev_hdl, "Send clp request failed\n"); + up(&clp_pf_to_mgmt->clp_msg_lock); + return -EINVAL; + } + + /*Get response*/ + clp_msg_buf = clp_pf_to_mgmt->clp_msg_buf; + memset(clp_msg_buf, 0x0, HIFC_CLP_INPUT_BUFFER_LEN_HOST); + err = hifc_read_clp_data(hwdev, clp_msg_buf, &real_size); + hifc_clear_clp_data(dev, HIFC_CLP_RSP_HOST); + if (err) { + sdk_err(dev->dev_hdl, "Read clp response failed\n"); + up(&clp_pf_to_mgmt->clp_msg_lock); + return -EINVAL; + } + + real_size = (u16)((real_size * HIFC_CLP_DATA_UNIT_HOST) & 0xffff); + if ((real_size <= sizeof(header)) || + (real_size > HIFC_CLP_INPUT_BUFFER_LEN_HOST)) { + sdk_err(dev->dev_hdl, "Invalid response size:%d", real_size); + up(&clp_pf_to_mgmt->clp_msg_lock); + return -EINVAL; + } + real_size = real_size - sizeof(header); + if (real_size != *out_size) { + sdk_err(dev->dev_hdl, "Invalid real_size:%d, out_size:%d\n", + real_size, *out_size); + up(&clp_pf_to_mgmt->clp_msg_lock); + return -EINVAL; + } + + memcpy(buf_out, (clp_msg_buf + sizeof(header)), real_size); + up(&clp_pf_to_mgmt->clp_msg_lock); + + return 0; +} + +/* This function is only used by txrx flush */ +int hifc_pf_to_mgmt_no_ack(void *hwdev, enum hifc_mod_type mod, u8 cmd, + void *buf_in, u16 in_size) +{ + struct hifc_msg_pf_to_mgmt *pf_to_mgmt; + void *dev = ((struct hifc_hwdev *)hwdev)->dev_hdl; + int err = -EINVAL; + + if (!hifc_is_hwdev_mod_inited(hwdev, HIFC_HWDEV_MGMT_INITED)) { + sdk_err(dev, "Mgmt module not initialized\n"); + return -EINVAL; + } + + pf_to_mgmt = ((struct hifc_hwdev *)hwdev)->pf_to_mgmt; + + if (!MSG_SZ_IS_VALID(in_size)) { + sdk_err(dev, "Mgmt msg buffer size: %d is not valid\n", + in_size); + return -EINVAL; + } + + if (!(((struct hifc_hwdev *)hwdev)->chip_present_flag)) + return -EPERM; + + /* Lock the sync_msg_buf */ + down(&pf_to_mgmt->sync_msg_lock); + + err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size, + HIFC_MSG_NO_ACK, HIFC_MSG_DIRECT_SEND, + MSG_NO_RESP); + + up(&pf_to_mgmt->sync_msg_lock); + + return err; +} + +/** + * api cmd write or read bypass defaut use poll, if want to use aeq interrupt, + * please set wb_trigger_aeqe to 1 + **/ +int hifc_api_cmd_write_nack(void *hwdev, u8 dest, void *cmd, u16 size) +{ + struct hifc_msg_pf_to_mgmt *pf_to_mgmt; + struct hifc_api_cmd_chain *chain; + + if (!hwdev || !size || !cmd) + return -EINVAL; + + if (!hifc_is_hwdev_mod_inited(hwdev, HIFC_HWDEV_MGMT_INITED) || + hifc_get_mgmt_channel_status(hwdev)) + return -EPERM; + + pf_to_mgmt = ((struct hifc_hwdev *)hwdev)->pf_to_mgmt; + chain = pf_to_mgmt->cmd_chain[HIFC_API_CMD_POLL_WRITE]; + + if (!(((struct hifc_hwdev *)hwdev)->chip_present_flag)) + return -EPERM; + + return hifc_api_cmd_write(chain, dest, cmd, size); +} + +int hifc_api_cmd_read_ack(void *hwdev, u8 dest, void *cmd, u16 size, void *ack, + u16 ack_size) +{ + struct hifc_msg_pf_to_mgmt *pf_to_mgmt; + struct hifc_api_cmd_chain *chain; + + if (!hwdev || !cmd || (ack_size && !ack)) + return -EINVAL; + + if (!hifc_is_hwdev_mod_inited(hwdev, HIFC_HWDEV_MGMT_INITED) || + hifc_get_mgmt_channel_status(hwdev)) + return -EPERM; + + pf_to_mgmt = ((struct hifc_hwdev *)hwdev)->pf_to_mgmt; + chain = pf_to_mgmt->cmd_chain[HIFC_API_CMD_POLL_READ]; + + if (!(((struct hifc_hwdev *)hwdev)->chip_present_flag)) + return -EPERM; + + return hifc_api_cmd_read(chain, dest, cmd, size, ack, ack_size); +} + +static void __send_mgmt_ack(struct hifc_msg_pf_to_mgmt *pf_to_mgmt, + enum hifc_mod_type mod, u8 cmd, void *buf_in, + u16 in_size, u16 msg_id) +{ + u16 buf_size; + + if (!in_size) + buf_size = BUF_OUT_DEFAULT_SIZE; + else + buf_size = in_size; + + spin_lock_bh(&pf_to_mgmt->async_msg_lock); + /* MGMT sent sync msg, send the response */ + send_msg_to_mgmt_async(pf_to_mgmt, mod, cmd, + buf_in, buf_size, HIFC_MSG_RESPONSE, + msg_id); + spin_unlock_bh(&pf_to_mgmt->async_msg_lock); +} + +/** + * mgmt_recv_msg_handler - handler for message from mgmt cpu + * @pf_to_mgmt: PF to MGMT channel + * @recv_msg: received message details + **/ +static void mgmt_recv_msg_handler(struct hifc_msg_pf_to_mgmt *pf_to_mgmt, + enum hifc_mod_type mod, u8 cmd, void *buf_in, + u16 in_size, u16 msg_id, int need_resp) +{ + void *dev = pf_to_mgmt->hwdev->dev_hdl; + void *buf_out = pf_to_mgmt->mgmt_ack_buf; + enum hifc_mod_type tmp_mod = mod; + bool ack_first = false; + u16 out_size = 0; + + memset(buf_out, 0, MAX_PF_MGMT_BUF_SIZE); + + if (mod >= HIFC_MOD_HW_MAX) { + sdk_warn(dev, "Receive illegal message from mgmt cpu, mod = %d\n", + mod); + goto resp; + } + + set_bit(HIFC_MGMT_MSG_CB_RUNNING, + &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod]); + + if (!pf_to_mgmt->recv_mgmt_msg_cb[mod] || + !test_bit(HIFC_MGMT_MSG_CB_REG, + &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod])) { + sdk_warn(dev, "Receive mgmt callback is null, mod = %d\n", + mod); + clear_bit(HIFC_MGMT_MSG_CB_RUNNING, + &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod]); + goto resp; + } + + ack_first = hifc_mgmt_event_ack_first(mod, cmd); + if (ack_first && need_resp) { + /* send ack to mgmt first to avoid command timeout in + * mgmt(100ms in mgmt); + * mgmt to host command don't need any response data from host, + * just need ack from host + */ + __send_mgmt_ack(pf_to_mgmt, mod, cmd, buf_out, in_size, msg_id); + } + + pf_to_mgmt->recv_mgmt_msg_cb[tmp_mod](pf_to_mgmt->hwdev, + pf_to_mgmt->recv_mgmt_msg_data[tmp_mod], + cmd, buf_in, in_size, + buf_out, &out_size); + + clear_bit(HIFC_MGMT_MSG_CB_RUNNING, + &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod]); + +resp: + if (!ack_first && need_resp) + __send_mgmt_ack(pf_to_mgmt, mod, cmd, buf_out, out_size, + msg_id); +} + +/** + * mgmt_resp_msg_handler - handler for response message from mgmt cpu + * @pf_to_mgmt: PF to MGMT channel + * @recv_msg: received message details + **/ +static void mgmt_resp_msg_handler(struct hifc_msg_pf_to_mgmt *pf_to_mgmt, + struct hifc_recv_msg *recv_msg) +{ + void *dev = pf_to_mgmt->hwdev->dev_hdl; + + /* delete async msg */ + if (recv_msg->msg_id & ASYNC_MSG_FLAG) + return; + + spin_lock(&pf_to_mgmt->sync_event_lock); + if (recv_msg->msg_id == pf_to_mgmt->sync_msg_id && + pf_to_mgmt->event_flag == SEND_EVENT_START) { + complete(&recv_msg->recv_done); + } else if (recv_msg->msg_id != pf_to_mgmt->sync_msg_id) { + sdk_err(dev, "Send msg id(0x%x) recv msg id(0x%x) dismatch, event state=%d\n", + pf_to_mgmt->sync_msg_id, recv_msg->msg_id, + pf_to_mgmt->event_flag); + } else { + sdk_err(dev, "Wait timeout, send msg id(0x%x) recv msg id(0x%x), event state=%d!\n", + pf_to_mgmt->sync_msg_id, recv_msg->msg_id, + pf_to_mgmt->event_flag); + } + spin_unlock(&pf_to_mgmt->sync_event_lock); +} + +static void recv_mgmt_msg_work_handler(struct work_struct *work) +{ + struct hifc_mgmt_msg_handle_work *mgmt_work = + container_of(work, struct hifc_mgmt_msg_handle_work, work); + + mgmt_recv_msg_handler(mgmt_work->pf_to_mgmt, mgmt_work->mod, + mgmt_work->cmd, mgmt_work->msg, + mgmt_work->msg_len, mgmt_work->msg_id, + !mgmt_work->async_mgmt_to_pf); + + kfree(mgmt_work->msg); + kfree(mgmt_work); +} + +static bool check_mgmt_seq_id_and_seg_len(struct hifc_recv_msg *recv_msg, + u8 seq_id, u8 seg_len) +{ + if (seq_id > MGMT_MSG_MAX_SEQ_ID || seg_len > SEGMENT_LEN) + return false; + + if (seq_id == 0) { + recv_msg->seq_id = seq_id; + } else { + if (seq_id != recv_msg->seq_id + 1) + return false; + recv_msg->seq_id = seq_id; + } + + return true; +} + +/** + * recv_mgmt_msg_handler - handler a message from mgmt cpu + * @pf_to_mgmt: PF to MGMT channel + * @header: the header of the message + * @recv_msg: received message details + **/ +static void recv_mgmt_msg_handler(struct hifc_msg_pf_to_mgmt *pf_to_mgmt, + u8 *header, struct hifc_recv_msg *recv_msg) +{ + struct hifc_mgmt_msg_handle_work *mgmt_work; + u64 mbox_header = *((u64 *)header); + void *msg_body = header + sizeof(mbox_header); + u8 seq_id, seq_len; + u32 offset; + u64 dir; + + /* Don't need to get anything from hw when cmd is async */ + dir = HIFC_MSG_HEADER_GET(mbox_header, DIRECTION); + if (dir == HIFC_MSG_RESPONSE && + HIFC_MSG_HEADER_GET(mbox_header, MSG_ID) & ASYNC_MSG_FLAG) + return; + + seq_len = HIFC_MSG_HEADER_GET(mbox_header, SEG_LEN); + seq_id = HIFC_MSG_HEADER_GET(mbox_header, SEQID); + + if (!check_mgmt_seq_id_and_seg_len(recv_msg, seq_id, seq_len)) { + sdk_err(pf_to_mgmt->hwdev->dev_hdl, + "Mgmt msg sequence id and segment length check fail, front seq_id: 0x%x, current seq_id: 0x%x, seg len: 0x%x\n", + recv_msg->seq_id, seq_id, seq_len); + /* set seq_id to invalid seq_id */ + recv_msg->seq_id = MGMT_MSG_MAX_SEQ_ID; + return; + } + + offset = seq_id * SEGMENT_LEN; + memcpy((u8 *)recv_msg->msg + offset, msg_body, seq_len); + + if (!HIFC_MSG_HEADER_GET(mbox_header, LAST)) + return; + + recv_msg->cmd = HIFC_MSG_HEADER_GET(mbox_header, CMD); + recv_msg->mod = HIFC_MSG_HEADER_GET(mbox_header, MODULE); + recv_msg->async_mgmt_to_pf = HIFC_MSG_HEADER_GET(mbox_header, + ASYNC_MGMT_TO_PF); + recv_msg->msg_len = HIFC_MSG_HEADER_GET(mbox_header, MSG_LEN); + recv_msg->msg_id = HIFC_MSG_HEADER_GET(mbox_header, MSG_ID); + recv_msg->seq_id = MGMT_MSG_MAX_SEQ_ID; + + if (HIFC_MSG_HEADER_GET(mbox_header, DIRECTION) == + HIFC_MSG_RESPONSE) { + mgmt_resp_msg_handler(pf_to_mgmt, recv_msg); + return; + } + + mgmt_work = kzalloc(sizeof(*mgmt_work), GFP_KERNEL); + if (!mgmt_work) { + sdk_err(pf_to_mgmt->hwdev->dev_hdl, + "Allocate mgmt work memory failed\n"); + return; + } + + if (recv_msg->msg_len) { + mgmt_work->msg = kzalloc(recv_msg->msg_len, GFP_KERNEL); + if (!mgmt_work->msg) { + sdk_err(pf_to_mgmt->hwdev->dev_hdl, "Allocate mgmt msg memory failed\n"); + kfree(mgmt_work); + return; + } + } + + mgmt_work->pf_to_mgmt = pf_to_mgmt; + mgmt_work->msg_len = recv_msg->msg_len; + memcpy(mgmt_work->msg, recv_msg->msg, recv_msg->msg_len); + mgmt_work->msg_id = recv_msg->msg_id; + mgmt_work->mod = recv_msg->mod; + mgmt_work->cmd = recv_msg->cmd; + mgmt_work->async_mgmt_to_pf = recv_msg->async_mgmt_to_pf; + + INIT_WORK(&mgmt_work->work, recv_mgmt_msg_work_handler); + queue_work(pf_to_mgmt->workq, &mgmt_work->work); +} + +/** + * hifc_mgmt_msg_aeqe_handler - handler for a mgmt message event + * @hwdev: the pointer to hw device + * @header: the header of the message + * @size: unused + **/ +void hifc_mgmt_msg_aeqe_handler(void *hwdev, u8 *header, u8 size) +{ + struct hifc_hwdev *dev = (struct hifc_hwdev *)hwdev; + struct hifc_msg_pf_to_mgmt *pf_to_mgmt; + struct hifc_recv_msg *recv_msg; + bool is_send_dir = false; + + pf_to_mgmt = dev->pf_to_mgmt; + + is_send_dir = (HIFC_MSG_HEADER_GET(*(u64 *)header, DIRECTION) == + HIFC_MSG_DIRECT_SEND) ? true : false; + + recv_msg = is_send_dir ? &pf_to_mgmt->recv_msg_from_mgmt : + &pf_to_mgmt->recv_resp_msg_from_mgmt; + + recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg); +} + +/** + * alloc_recv_msg - allocate received message memory + * @recv_msg: pointer that will hold the allocated data + * Return: 0 - success, negative - failure + **/ +static int alloc_recv_msg(struct hifc_recv_msg *recv_msg) +{ + recv_msg->seq_id = MGMT_MSG_MAX_SEQ_ID; + + recv_msg->msg = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!recv_msg->msg) + return -ENOMEM; + + return 0; +} + +/** + * free_recv_msg - free received message memory + * @recv_msg: pointer that holds the allocated data + **/ +static void free_recv_msg(struct hifc_recv_msg *recv_msg) +{ + kfree(recv_msg->msg); +} + +/** + * alloc_msg_buf - allocate all the message buffers of PF to MGMT channel + * @pf_to_mgmt: PF to MGMT channel + * Return: 0 - success, negative - failure + **/ +static int alloc_msg_buf(struct hifc_msg_pf_to_mgmt *pf_to_mgmt) +{ + int err; + void *dev = pf_to_mgmt->hwdev->dev_hdl; + + err = alloc_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); + if (err) { + sdk_err(dev, "Failed to allocate recv msg\n"); + return err; + } + + err = alloc_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); + if (err) { + sdk_err(dev, "Failed to allocate resp recv msg\n"); + goto alloc_msg_for_resp_err; + } + + pf_to_mgmt->async_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!pf_to_mgmt->async_msg_buf) { + err = -ENOMEM; + goto async_msg_buf_err; + } + + pf_to_mgmt->sync_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!pf_to_mgmt->sync_msg_buf) { + err = -ENOMEM; + goto sync_msg_buf_err; + } + + pf_to_mgmt->mgmt_ack_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!pf_to_mgmt->mgmt_ack_buf) { + err = -ENOMEM; + goto ack_msg_buf_err; + } + + return 0; + +ack_msg_buf_err: + kfree(pf_to_mgmt->sync_msg_buf); + +sync_msg_buf_err: + kfree(pf_to_mgmt->async_msg_buf); + +async_msg_buf_err: + free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); + +alloc_msg_for_resp_err: + free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); + return err; +} + +/** + * free_msg_buf - free all the message buffers of PF to MGMT channel + * @pf_to_mgmt: PF to MGMT channel + **/ +static void free_msg_buf(struct hifc_msg_pf_to_mgmt *pf_to_mgmt) +{ + kfree(pf_to_mgmt->mgmt_ack_buf); + kfree(pf_to_mgmt->sync_msg_buf); + kfree(pf_to_mgmt->async_msg_buf); + + free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); + free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); +} + +/** + * hifc_pf_to_mgmt_init - initialize PF to MGMT channel + * @hwdev: the pointer to hw device + * Return: 0 - success, negative - failure + **/ +int hifc_pf_to_mgmt_init(struct hifc_hwdev *hwdev) +{ + struct hifc_msg_pf_to_mgmt *pf_to_mgmt; + void *dev = hwdev->dev_hdl; + int err; + + pf_to_mgmt = kzalloc(sizeof(*pf_to_mgmt), GFP_KERNEL); + if (!pf_to_mgmt) + return -ENOMEM; + + hwdev->pf_to_mgmt = pf_to_mgmt; + pf_to_mgmt->hwdev = hwdev; + spin_lock_init(&pf_to_mgmt->async_msg_lock); + spin_lock_init(&pf_to_mgmt->sync_event_lock); + sema_init(&pf_to_mgmt->sync_msg_lock, 1); + pf_to_mgmt->workq = create_singlethread_workqueue(HIFC_MGMT_WQ_NAME); + if (!pf_to_mgmt->workq) { + sdk_err(dev, "Failed to initialize MGMT workqueue\n"); + err = -ENOMEM; + goto create_mgmt_workq_err; + } + + err = alloc_msg_buf(pf_to_mgmt); + if (err) { + sdk_err(dev, "Failed to allocate msg buffers\n"); + goto alloc_msg_buf_err; + } + + err = hifc_api_cmd_init(hwdev, pf_to_mgmt->cmd_chain); + if (err) { + sdk_err(dev, "Failed to init the api cmd chains\n"); + goto api_cmd_init_err; + } + + return 0; + +api_cmd_init_err: + free_msg_buf(pf_to_mgmt); + +alloc_msg_buf_err: + destroy_workqueue(pf_to_mgmt->workq); + +create_mgmt_workq_err: + kfree(pf_to_mgmt); + + return err; +} + +/** + * hifc_pf_to_mgmt_free - free PF to MGMT channel + * @hwdev: the pointer to hw device + **/ +void hifc_pf_to_mgmt_free(struct hifc_hwdev *hwdev) +{ + struct hifc_msg_pf_to_mgmt *pf_to_mgmt = hwdev->pf_to_mgmt; + + /* destroy workqueue before free related pf_to_mgmt resources in case of + * illegal resource access + */ + destroy_workqueue(pf_to_mgmt->workq); + hifc_api_cmd_free(pf_to_mgmt->cmd_chain); + free_msg_buf(pf_to_mgmt); + kfree(pf_to_mgmt); +} + +void hifc_flush_mgmt_workq(void *hwdev) +{ + struct hifc_hwdev *dev = (struct hifc_hwdev *)hwdev; + + flush_workqueue(dev->aeqs->workq); + + if (hifc_func_type(dev) != TYPE_VF && + hifc_is_hwdev_mod_inited(hwdev, HIFC_HWDEV_MGMT_INITED)) + flush_workqueue(dev->pf_to_mgmt->workq); +} + +int hifc_clp_pf_to_mgmt_init(struct hifc_hwdev *hwdev) +{ + struct hifc_clp_pf_to_mgmt *clp_pf_to_mgmt; + + clp_pf_to_mgmt = kzalloc(sizeof(*clp_pf_to_mgmt), GFP_KERNEL); + if (!clp_pf_to_mgmt) + return -ENOMEM; + + clp_pf_to_mgmt->clp_msg_buf = kzalloc(HIFC_CLP_INPUT_BUFFER_LEN_HOST, + GFP_KERNEL); + if (!clp_pf_to_mgmt->clp_msg_buf) { + kfree(clp_pf_to_mgmt); + return -ENOMEM; + } + sema_init(&clp_pf_to_mgmt->clp_msg_lock, 1); + + hwdev->clp_pf_to_mgmt = clp_pf_to_mgmt; + + return 0; +} + +void hifc_clp_pf_to_mgmt_free(struct hifc_hwdev *hwdev) +{ + struct hifc_clp_pf_to_mgmt *clp_pf_to_mgmt = hwdev->clp_pf_to_mgmt; + + kfree(clp_pf_to_mgmt->clp_msg_buf); + kfree(clp_pf_to_mgmt); +} + diff --git a/drivers/scsi/huawei/hifc/hifc_mgmt.h b/drivers/scsi/huawei/hifc/hifc_mgmt.h new file mode 100644 index 0000000000000000000000000000000000000000..2adcfe2968c15f941ff395b0477d116f9feee79b --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_mgmt.h @@ -0,0 +1,407 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef HIFC_MGMT_H_ +#define HIFC_MGMT_H_ + +#define HIFC_MSG_HEADER_MSG_LEN_SHIFT 0 +#define HIFC_MSG_HEADER_MODULE_SHIFT 11 +#define HIFC_MSG_HEADER_SEG_LEN_SHIFT 16 +#define HIFC_MSG_HEADER_NO_ACK_SHIFT 22 +#define HIFC_MSG_HEADER_ASYNC_MGMT_TO_PF_SHIFT 23 +#define HIFC_MSG_HEADER_SEQID_SHIFT 24 +#define HIFC_MSG_HEADER_LAST_SHIFT 30 +#define HIFC_MSG_HEADER_DIRECTION_SHIFT 31 +#define HIFC_MSG_HEADER_CMD_SHIFT 32 +#define HIFC_MSG_HEADER_PCI_INTF_IDX_SHIFT 48 +#define HIFC_MSG_HEADER_P2P_IDX_SHIFT 50 +#define HIFC_MSG_HEADER_MSG_ID_SHIFT 54 + +#define HIFC_MSG_HEADER_MSG_LEN_MASK 0x7FF +#define HIFC_MSG_HEADER_MODULE_MASK 0x1F +#define HIFC_MSG_HEADER_SEG_LEN_MASK 0x3F +#define HIFC_MSG_HEADER_NO_ACK_MASK 0x1 +#define HIFC_MSG_HEADER_ASYNC_MGMT_TO_PF_MASK 0x1 +#define HIFC_MSG_HEADER_SEQID_MASK 0x3F +#define HIFC_MSG_HEADER_LAST_MASK 0x1 +#define HIFC_MSG_HEADER_DIRECTION_MASK 0x1 +#define HIFC_MSG_HEADER_CMD_MASK 0xFF +#define HIFC_MSG_HEADER_PCI_INTF_IDX_MASK 0x3 +#define HIFC_MSG_HEADER_P2P_IDX_MASK 0xF +#define HIFC_MSG_HEADER_MSG_ID_MASK 0x3FF + +#define HIFC_MSG_HEADER_GET(val, member) \ + (((val) >> HIFC_MSG_HEADER_##member##_SHIFT) & \ + HIFC_MSG_HEADER_##member##_MASK) + +#define HIFC_MSG_HEADER_SET(val, member) \ + ((u64)((val) & HIFC_MSG_HEADER_##member##_MASK) << \ + HIFC_MSG_HEADER_##member##_SHIFT) + +#define HIFC_MGMT_WQ_NAME "hifc_mgmt" + +/*CLP*/ +enum clp_data_type { + HIFC_CLP_REQ_HOST = 0, + HIFC_CLP_RSP_HOST = 1 +}; + +enum clp_reg_type { + HIFC_CLP_BA_HOST = 0, + HIFC_CLP_SIZE_HOST = 1, + HIFC_CLP_LEN_HOST = 2, + HIFC_CLP_START_REQ_HOST = 3, + HIFC_CLP_READY_RSP_HOST = 4 +}; + +/* cmd of mgmt CPU message for HW module */ +enum hifc_mgmt_cmd { + HIFC_MGMT_CMD_RESET_MGMT = 0x0, + HIFC_MGMT_CMD_START_FLR = 0x1, + HIFC_MGMT_CMD_FLUSH_DOORBELL = 0x2, + HIFC_MGMT_CMD_CMDQ_CTXT_SET = 0x10, + HIFC_MGMT_CMD_VAT_SET = 0x12, + HIFC_MGMT_CMD_L2NIC_SQ_CI_ATTR_SET = 0x14, + HIFC_MGMT_CMD_PPF_TMR_SET = 0x22, + HIFC_MGMT_CMD_PPF_HT_GPA_SET = 0x23, + HIFC_MGMT_CMD_RES_STATE_SET = 0x24, + HIFC_MGMT_CMD_FUNC_TMR_BITMAT_SET = 0x32, + HIFC_MGMT_CMD_CEQ_CTRL_REG_WR_BY_UP = 0x33, + HIFC_MGMT_CMD_MSI_CTRL_REG_WR_BY_UP, + HIFC_MGMT_CMD_MSI_CTRL_REG_RD_BY_UP, + HIFC_MGMT_CMD_FAULT_REPORT = 0x37, + HIFC_MGMT_CMD_HEART_LOST_REPORT = 0x38, + HIFC_MGMT_CMD_SYNC_TIME = 0x46, + HIFC_MGMT_CMD_REG_READ = 0x48, + HIFC_MGMT_CMD_L2NIC_RESET = 0x4b, + HIFC_MGMT_CMD_ACTIVATE_FW = 0x4F, + HIFC_MGMT_CMD_PAGESIZE_SET = 0x50, + HIFC_MGMT_CMD_GET_BOARD_INFO = 0x52, + HIFC_MGMT_CMD_WATCHDOG_INFO = 0x56, + HIFC_MGMT_CMD_FMW_ACT_NTC = 0x57, + HIFC_MGMT_CMD_PCIE_DFX_NTC = 0x65, + HIFC_MGMT_CMD_PCIE_DFX_GET = 0x66, + HIFC_MGMT_CMD_GET_HOST_INFO = 0x67, + HIFC_MGMT_CMD_GET_PHY_INIT_STATUS = 0x6A, + HIFC_MGMT_CMD_HEARTBEAT_EVENT = 0x6C, +}; + +#define HIFC_CLP_REG_GAP 0x20 +#define HIFC_CLP_INPUT_BUFFER_LEN_HOST 2048UL +#define HIFC_CLP_OUTPUT_BUFFER_LEN_HOST 2048UL +#define HIFC_CLP_DATA_UNIT_HOST 4UL +#define HIFC_BAR01_GLOABAL_CTL_OFFSET 0x4000 +#define HIFC_BAR01_CLP_OFFSET 0x5000 + +#define HIFC_CLP_SRAM_SIZE_REG (HIFC_BAR01_GLOABAL_CTL_OFFSET + 0x220) +#define HIFC_CLP_REQ_SRAM_BA_REG (HIFC_BAR01_GLOABAL_CTL_OFFSET + 0x224) +#define HIFC_CLP_RSP_SRAM_BA_REG (HIFC_BAR01_GLOABAL_CTL_OFFSET + 0x228) +#define HIFC_CLP_REQ_REG (HIFC_BAR01_GLOABAL_CTL_OFFSET + 0x22c) +#define HIFC_CLP_RSP_REG (HIFC_BAR01_GLOABAL_CTL_OFFSET + 0x230) +#define HIFC_CLP_REG(member) (HIFC_CLP_##member##_REG) + +#define HIFC_CLP_REQ_DATA (HIFC_BAR01_CLP_OFFSET) +#define HIFC_CLP_RSP_DATA (HIFC_BAR01_CLP_OFFSET + 0x1000) +#define HIFC_CLP_DATA(member) (HIFC_CLP_##member##_DATA) + +#define HIFC_CLP_SRAM_SIZE_OFFSET 16 +#define HIFC_CLP_SRAM_BASE_OFFSET 0 +#define HIFC_CLP_LEN_OFFSET 0 +#define HIFC_CLP_START_OFFSET 31 +#define HIFC_CLP_READY_OFFSET 31 +#define HIFC_CLP_OFFSET(member) (HIFC_CLP_##member##_OFFSET) + +#define HIFC_CLP_SRAM_SIZE_BIT_LEN 0x7ffUL +#define HIFC_CLP_SRAM_BASE_BIT_LEN 0x7ffffffUL +#define HIFC_CLP_LEN_BIT_LEN 0x7ffUL +#define HIFC_CLP_START_BIT_LEN 0x1UL +#define HIFC_CLP_READY_BIT_LEN 0x1UL +#define HIFC_CLP_MASK(member) (HIFC_CLP_##member##_BIT_LEN) + +#define HIFC_CLP_DELAY_CNT_MAX 200UL +#define HIFC_CLP_SRAM_SIZE_REG_MAX 0x3ff +#define HIFC_CLP_SRAM_BASE_REG_MAX 0x7ffffff +#define HIFC_CLP_LEN_REG_MAX 0x3ff +#define HIFC_CLP_START_OR_READY_REG_MAX 0x1 +#define HIFC_MGMT_CMD_UNSUPPORTED 0xFF + +enum hifc_msg_direction_type { + HIFC_MSG_DIRECT_SEND = 0, + HIFC_MSG_RESPONSE = 1 +}; + +enum hifc_msg_segment_type { + NOT_LAST_SEGMENT = 0, + LAST_SEGMENT = 1, +}; + +enum hifc_mgmt_msg_type { + ASYNC_MGMT_MSG = 0, + SYNC_MGMT_MSG = 1, +}; + +enum hifc_msg_ack_type { + HIFC_MSG_ACK = 0, + HIFC_MSG_NO_ACK = 1, +}; + +struct hifc_recv_msg { + void *msg; + + struct completion recv_done; + + u16 msg_len; + enum hifc_mod_type mod; + u8 cmd; + u8 seq_id; + u16 msg_id; + int async_mgmt_to_pf; +}; + +struct hifc_msg_head { + u8 status; + u8 version; + u8 resp_aeq_num; + u8 rsvd0[5]; +}; + +#define HIFC_COMM_SELF_CMD_MAX 8 + +struct comm_up_self_msg_sub_info { + u8 cmd; + comm_up_self_msg_proc proc; +}; + +struct comm_up_self_msg_info { + u8 cmd_num; + struct comm_up_self_msg_sub_info info[HIFC_COMM_SELF_CMD_MAX]; +}; + +enum comm_pf_to_mgmt_event_state { + SEND_EVENT_UNINIT = 0, + SEND_EVENT_START, + SEND_EVENT_FAIL, + SEND_EVENT_TIMEOUT, + SEND_EVENT_END, +}; + +enum hifc_mgmt_msg_cb_state { + HIFC_MGMT_MSG_CB_REG = 0, + HIFC_MGMT_MSG_CB_RUNNING, +}; + +struct hifc_clp_pf_to_mgmt { + struct semaphore clp_msg_lock; + void *clp_msg_buf; +}; + +struct hifc_msg_pf_to_mgmt { + struct hifc_hwdev *hwdev; + + /* Async cmd can not be scheduling */ + spinlock_t async_msg_lock; + struct semaphore sync_msg_lock; + + struct workqueue_struct *workq; + + void *async_msg_buf; + void *sync_msg_buf; + void *mgmt_ack_buf; + + struct hifc_recv_msg recv_msg_from_mgmt; + struct hifc_recv_msg recv_resp_msg_from_mgmt; + + u16 async_msg_id; + u16 sync_msg_id; + + struct hifc_api_cmd_chain *cmd_chain[HIFC_API_CMD_MAX]; + + hifc_mgmt_msg_cb recv_mgmt_msg_cb[HIFC_MOD_HW_MAX]; + void *recv_mgmt_msg_data[HIFC_MOD_HW_MAX]; + unsigned long mgmt_msg_cb_state[HIFC_MOD_HW_MAX]; + + struct comm_up_self_msg_info proc; + + /* lock when sending msg */ + spinlock_t sync_event_lock; + enum comm_pf_to_mgmt_event_state event_flag; +}; + +struct hifc_mgmt_msg_handle_work { + struct work_struct work; + struct hifc_msg_pf_to_mgmt *pf_to_mgmt; + void *msg; + u16 msg_len; + enum hifc_mod_type mod; + u8 cmd; + u16 msg_id; + int async_mgmt_to_pf; +}; + +/* show each drivers only such as nic_service_cap, + * toe_service_cap structure, but not show service_cap + */ +enum hifc_service_type { + SERVICE_T_NIC = 0, + + SERVICE_T_FC = 5, + + SERVICE_T_MAX, + + /* Only used for interruption resource management, + * mark the request module + */ + SERVICE_T_INTF = (1 << 15), + SERVICE_T_CQM = (1 << 16), +}; + +/* NIC service capability + * 1, The chip supports NIC RQ is 1K + * 2, PF/VF RQ specifications: + * disable RSS: + * disable VMDq: Each PF/VF at most 8 RQ + * enable the VMDq: Each PF/VF at most 1K RQ + * enable the RSS: + * disable VMDq: each PF at most 64 RQ, VF at most 32 RQ + * enable the VMDq: Each PF/VF at most 1K RQ + * + * 3, The chip supports NIC SQ is 1K + * 4, PF/VF SQ specifications: + * disable RSS: + * disable VMDq: Each PF/VF at most 8 SQ + * enable the VMDq: Each PF/VF at most 1K SQ + * enable the RSS: + * disable VMDq: each PF at most 64 SQ, VF at most 32 SQ + * enable the VMDq: Each PF/VF at most 1K SQ + */ +struct nic_service_cap { + /* PF resources*/ + u16 max_sqs; + u16 max_rqs; + + /* VF resources, vf obtain through the MailBox mechanism from + * according PF + */ + u16 vf_max_sqs; + u16 vf_max_rqs; + bool lro_en; /* LRO feature enable bit*/ + u8 lro_sz; /* LRO context space: n*16B */ + u8 tso_sz; /* TSO context space: n*16B */ + + u16 max_queue_allowed; +}; + +/* PF FC service resource structure defined*/ +struct dev_fc_svc_cap { + /* PF Parent QPC */ + u32 max_parent_qpc_num; /* max number is 2048*/ + + /* PF Child QPC */ + u32 max_child_qpc_num; /* max number is 2048*/ + + /* PF SCQ */ + u32 scq_num; /* 16 */ + + /* PF supports SRQ*/ + u32 srq_num; /* Number of SRQ is 2*/ + + u8 vp_id_start; + u8 vp_id_end; +}; + +/* FC services*/ +struct fc_service_cap { + struct dev_fc_svc_cap dev_fc_cap; + + /* Parent QPC */ + u32 parent_qpc_size; /* 256B */ + + /* Child QPC */ + u32 child_qpc_size; /* 256B */ + + /* SQ */ + u32 sqe_size; /* 128B(in linked list mode)*/ + + /* SCQ */ + u32 scqc_size; /* Size of the Context 32B*/ + u32 scqe_size; /* 64B */ + + /* SRQ */ + u32 srqc_size; /* Size of SRQ Context (64B)*/ + u32 srqe_size; /* 32B */ +}; + +bool hifc_support_fc(void *hwdev, struct fc_service_cap *cap); + +/* Service interface for obtaining service_cap public fields*/ +/* Obtain service_cap.host_oq_id_mask_val*/ +u8 hifc_host_oq_id_mask(void *hwdev); + +/* Obtain service_cap.dev_cap.max_sqs*/ +u16 hifc_func_max_qnum(void *hwdev); + +/* The following information is obtained from the bar space + * which is recorded by SDK layer. + * Here provide parameter query interface for service + */ +/* func_attr.glb_func_idx, global function index */ +u16 hifc_global_func_id(void *hwdev); +/* func_attr.intr_num, MSI-X table entry in function*/ +enum intr_type { + INTR_TYPE_MSIX, + INTR_TYPE_MSI, + INTR_TYPE_INT, + INTR_TYPE_NONE, +}; + +u8 hifc_pcie_itf_id(void *hwdev); /* func_attr.itf_idx, pcie interface index */ + +/* func_attr.func_type, 0-PF 1-VF 2-PPF */ +enum func_type hifc_func_type(void *hwdev); + +u8 hifc_ppf_idx(void *hwdev); + +enum hifc_msix_state { + HIFC_MSIX_ENABLE, + HIFC_MSIX_DISABLE, +}; + +void hifc_set_msix_state(void *hwdev, u16 msix_idx, + enum hifc_msix_state flag); + +/* Defines the IRQ information structure*/ +struct irq_info { + u16 msix_entry_idx; /* IRQ corresponding index number */ + u32 irq_id; /* the IRQ number from OS */ +}; + +int hifc_alloc_irqs(void *hwdev, enum hifc_service_type type, u16 req_num, + struct irq_info *irq_info_array, u16 *resp_num); +void hifc_free_irq(void *hwdev, enum hifc_service_type type, u32 irq_id); + +int hifc_sync_time(void *hwdev, u64 time); +void hifc_disable_mgmt_msg_report(void *hwdev); +void hifc_set_func_deinit_flag(void *hwdev); +void hifc_flush_mgmt_workq(void *hwdev); +int hifc_global_func_id_get(void *hwdev, u16 *func_id); +u16 hifc_global_func_id_hw(void *hwdev); +int hifc_pf_to_mgmt_no_ack(void *hwdev, enum hifc_mod_type mod, u8 cmd, + void *buf_in, u16 in_size); +void hifc_mgmt_msg_aeqe_handler(void *handle, u8 *header, u8 size); +int hifc_pf_to_mgmt_init(struct hifc_hwdev *hwdev); +void hifc_pf_to_mgmt_free(struct hifc_hwdev *hwdev); +int hifc_pf_to_mgmt_sync(void *hwdev, enum hifc_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout); +int hifc_pf_to_mgmt_async(void *hwdev, enum hifc_mod_type mod, u8 cmd, + void *buf_in, u16 in_size); +int hifc_pf_clp_to_mgmt(void *hwdev, enum hifc_mod_type mod, u8 cmd, + const void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); +int hifc_clp_pf_to_mgmt_init(struct hifc_hwdev *hwdev); +void hifc_clp_pf_to_mgmt_free(struct hifc_hwdev *hwdev); + +#endif diff --git a/drivers/scsi/huawei/hifc/hifc_module.c b/drivers/scsi/huawei/hifc/hifc_module.c new file mode 100644 index 0000000000000000000000000000000000000000..127e8f9de9ea15f926ca2e98dc477d8e8b3eb747 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_module.c @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#include "hifc_module.h" + +struct unf_cm_handle_op_s hifc_cm_handle = { 0 }; +unsigned int dif_sgl_mode; +unsigned int max_speed = HIFC_SPEED_32G; +unsigned int accum_db_num = 1; +unsigned int dif_type = 0x1; +unsigned int wqe_page_size = 4096; +unsigned int wqe_pre_load = 6; +unsigned int combo_length_kb = 8; +unsigned int cos_bit_map = 0x1f; +unsigned int hifc_dif_type; +unsigned int hifc_dif_enable; +unsigned char hifc_guard; + +/* dfx counter */ +atomic64_t rx_tx_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM]; +atomic64_t rx_tx_err[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM]; +atomic64_t scq_err_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM]; +atomic64_t aeq_err_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM]; +atomic64_t dif_err_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM]; +atomic64_t mail_box_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM]; +atomic64_t up_err_event_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM]; +unsigned long long link_event_stat[HIFC_MAX_PORT_NUM][HIFC_MAX_LINK_EVENT_CNT]; +unsigned long long link_reason_stat[HIFC_MAX_PORT_NUM][HIFC_MAX_LINK_REASON_CNT]; +unsigned long long hba_stat[HIFC_MAX_PORT_NUM][HIFC_HBA_STAT_BUTT]; +atomic64_t com_up_event_err_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM]; + +static void hifc_realease_cmo_op_handle(void) +{ + memset(&hifc_cm_handle, 0, sizeof(struct unf_cm_handle_op_s)); +} + +static void hifc_check_module_para(void) +{ + if (dif_sgl_mode != 0) + dif_sgl_mode = 1; +} + +int hifc_init_module(void) +{ + int ret = RETURN_OK; + + ret = unf_common_init(); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]unf_common_init failed"); + + return RETURN_ERROR_S32; + } + + memset(rx_tx_stat, 0, sizeof(rx_tx_stat)); + memset(rx_tx_err, 0, sizeof(rx_tx_err)); + memset(scq_err_stat, 0, sizeof(scq_err_stat)); + memset(aeq_err_stat, 0, sizeof(aeq_err_stat)); + memset(dif_err_stat, 0, sizeof(dif_err_stat)); + memset(link_event_stat, 0, sizeof(link_event_stat)); + memset(link_reason_stat, 0, sizeof(link_reason_stat)); + memset(hba_stat, 0, sizeof(hba_stat)); + memset(&hifc_cm_handle, 0, sizeof(struct unf_cm_handle_op_s)); + memset(up_err_event_stat, 0, sizeof(up_err_event_stat)); + memset(mail_box_stat, 0, sizeof(mail_box_stat)); + memset(hifc_hba, 0, sizeof(hifc_hba)); + + spin_lock_init(&probe_spin_lock); + + /* 2. Module parameters check */ + hifc_check_module_para(); + + /* 4. Get COM Handlers used for low_level */ + if (unf_get_cm_handle_op(&hifc_cm_handle) != RETURN_OK) { + hifc_realease_cmo_op_handle(); + return RETURN_ERROR_S32; + } + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "[event]Init HIFC module succeed"); + + return ret; +} + +void hifc_exit_module(void) +{ + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[event]HIFC module removing..."); + + hifc_realease_cmo_op_handle(); + + /* 2. Unregister FC COM module(level) */ + unf_common_exit(); +} + +module_param(dif_sgl_mode, uint, 0444); +module_param(max_speed, uint, 0444); +module_param(wqe_page_size, uint, 0444); +module_param(combo_length_kb, uint, 0444); +module_param(cos_bit_map, uint, 0444); + diff --git a/drivers/scsi/huawei/hifc/hifc_module.h b/drivers/scsi/huawei/hifc/hifc_module.h new file mode 100644 index 0000000000000000000000000000000000000000..5ae9a4962495ca04d50f51f9b9235042cb060fb5 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_module.h @@ -0,0 +1,289 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef __HIFC_MODULE_H__ +#define __HIFC_MODULE_H__ +#include "unf_log.h" +#include "unf_common.h" +#include "hifc_utils.h" +#include "hifc_hba.h" + +#define HIFC_SPEED_16G 0x10 +#define HIFC_SPEED_32G 0x20 +#define HIFC_MAX_PORT_NUM HIFC_MAX_PROBE_PORT_NUM +#define HIFC_TASK_TYPE_STAT_NUM 128 +#define HIFC_MAX_LINK_EVENT_CNT 4 +#define HIFC_MAX_LINK_REASON_CNT 256 + +/* Declare the global function. */ +extern struct unf_cm_handle_op_s hifc_cm_handle; +extern unsigned int max_speed; +extern unsigned int accum_db_num; +extern unsigned int wqe_page_size; +extern unsigned int dif_type; +extern unsigned int wqe_pre_load; +extern unsigned int combo_length_kb; +extern unsigned int cos_bit_map; + +extern atomic64_t rx_tx_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM]; +extern atomic64_t rx_tx_err[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM]; +extern atomic64_t scq_err_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM]; +extern atomic64_t aeq_err_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM]; +extern atomic64_t dif_err_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM]; +extern atomic64_t mail_box_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM]; +extern atomic64_t com_up_event_err_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM]; +extern unsigned long long link_event_stat[HIFC_MAX_PORT_NUM][HIFC_MAX_LINK_EVENT_CNT]; +extern unsigned long long link_reason_stat[HIFC_MAX_PORT_NUM][HIFC_MAX_LINK_REASON_CNT]; +extern atomic64_t up_err_event_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM]; +extern unsigned long long hba_stat[HIFC_MAX_PORT_NUM][HIFC_HBA_STAT_BUTT]; + +#define HIFC_LINK_EVENT_STAT(v_hba, link_ent) \ + (link_event_stat[(v_hba)->probe_index][link_ent]++) +#define HIFC_LINK_REASON_STAT(v_hba, link_rsn) \ + (link_reason_stat[(v_hba)->probe_index][link_rsn]++) +#define HIFC_HBA_STAT(v_hba, hba_stat_type) \ + (hba_stat[(v_hba)->probe_index][hba_stat_type]++) + +#define HIFC_UP_ERR_EVENT_STAT(v_hba, err_type) \ + (atomic64_inc(&up_err_event_stat[(v_hba)->probe_index][err_type])) +#define HIFC_UP_ERR_EVENT_STAT_READ(probe_index, io_type) \ + (atomic64_read(&up_err_event_stat[probe_index][io_type])) +#define HIFC_DIF_ERR_STAT(v_hba, dif_err) \ + (atomic64_inc(&dif_err_stat[(v_hba)->probe_index][dif_err])) +#define HIFC_DIF_ERR_STAT_READ(probe_index, dif_err) \ + (atomic64_read(&dif_err_stat[probe_index][dif_err])) + +#define HIFC_IO_STAT(v_hba, io_type) \ + (atomic64_inc(&rx_tx_stat[(v_hba)->probe_index][io_type])) +#define HIFC_IO_STAT_READ(probe_index, io_type) \ + (atomic64_read(&rx_tx_stat[probe_index][io_type])) + +#define HIFC_ERR_IO_STAT(v_hba, io_type) \ + (atomic64_inc(&rx_tx_err[(v_hba)->probe_index][io_type])) +#define HIFC_ERR_IO_STAT_READ(probe_index, io_type) \ + (atomic64_read(&rx_tx_err[probe_index][io_type])) + +#define HIFC_SCQ_ERR_TYPE_STAT(v_hba, err_type) \ + (atomic64_inc(&scq_err_stat[(v_hba)->probe_index][err_type])) +#define HIFC_SCQ_ERR_TYPE_STAT_READ(probe_index, io_type) \ + (atomic64_read(&scq_err_stat[probe_index][io_type])) +#define HIFC_AEQ_ERR_TYPE_STAT(v_hba, err_type) \ + (atomic64_inc(&aeq_err_stat[(v_hba)->probe_index][err_type])) +#define HIFC_AEQ_ERR_TYPE_STAT_READ(probe_index, io_type) \ + (atomic64_read(&aeq_err_stat[probe_index][io_type])) + +#define HIFC_MAILBOX_STAT(v_hba, io_type) \ + (atomic64_inc(&mail_box_stat[(v_hba)->probe_index][io_type])) + +#define HIFC_COM_UP_ERR_EVENT_STAT(v_hba, err_type) \ + (atomic64_inc(&com_up_event_err_stat[(v_hba)->probe_index][err_type])) +#define HIFC_COM_UP_ERR_EVENT_STAT_READ(probe_index, err_type) \ + (atomic64_read(&com_up_event_err_stat[probe_index][err_type])) + +/* + *----------------------------------------------* + * Define function * + *---------------------------------------------- + */ + +#define UNF_LOWLEVEL_ALLOC_LPORT(v_lport, fc_port, stLowLevel)\ + do {\ + if (hifc_cm_handle.pfn_unf_alloc_local_port) { \ + v_lport = \ + hifc_cm_handle.pfn_unf_alloc_local_port((fc_port), \ + (stLowLevel));\ + } else { \ + v_lport = NULL; \ + } \ + } while (0) + +#define UNF_LOWLEVEL_RECEIVE_ELS_PKG(v_ret, fc_port, pkg) \ + do { \ + if (hifc_cm_handle.pfn_unf_receive_els_pkg) {\ + v_ret =\ + hifc_cm_handle.pfn_unf_receive_els_pkg(\ + (fc_port), (pkg));\ + } else { \ + v_ret = UNF_RETURN_ERROR; \ + } \ + } while (0) + +#define UNF_LOWLEVEL_SEND_ELS_DONE(v_ret, fc_port, pkg) \ + do { \ + if (hifc_cm_handle.pfn_unf_send_els_done) {\ + v_ret = hifc_cm_handle.pfn_unf_send_els_done((fc_port),\ + (pkg)); \ + } else { \ + v_ret = UNF_RETURN_ERROR; \ + } \ + } while (0) + +#define UNF_LOWLEVEL_RECEIVE_GS_PKG(v_ret, fc_port, pkg)\ + do { \ + if (hifc_cm_handle.pfn_unf_receive_gs_pkg) {\ + v_ret = hifc_cm_handle.pfn_unf_receive_gs_pkg(\ + (fc_port),\ + (pkg)); \ + } else { \ + v_ret = UNF_RETURN_ERROR; \ + } \ + } while (0) + +#define UNF_LOWLEVEL_GET_CFG_PARMS(v_ret, \ + v_section_name, \ + v_cfg_parm, \ + v_cfg_value, \ + v_item_num) \ + do { \ + if (hifc_cm_handle.pfn_unf_get_cfg_parms) { \ + v_ret = (unsigned int)\ + hifc_cm_handle.pfn_unf_get_cfg_parms(\ + (v_section_name), \ + (v_cfg_parm), \ + (v_cfg_value), \ + (v_item_num)); \ + } else { \ + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT,\ + UNF_WARN,\ + "Get config parameter function is NULL.");\ + v_ret = UNF_RETURN_ERROR; \ + } \ + } while (0) + +#define UNF_LOWLEVEL_RELEASE_LOCAL_PORT(v_ret, lport) \ + do { \ + if (unlikely(!hifc_cm_handle.pfn_unf_release_local_port)) {\ + v_ret = UNF_RETURN_ERROR; \ + } else { \ + v_ret =\ + hifc_cm_handle.pfn_unf_release_local_port(\ + (lport));\ + } \ + } while (0) + +#define UNF_LOWLEVEL_TO_CM_HINICADM(v_ret, lport, pkg) \ + do { \ + if (unlikely(!hifc_cm_handle.pfn_unf_ioctl_to_com_handler)) {\ + v_ret = UNF_RETURN_ERROR; \ + } else { \ + v_ret = hifc_cm_handle.pfn_unf_ioctl_to_com_handler(\ + lport, pkg); \ + } \ + } while (0) + +#define UNF_CM_GET_SGL_ENTRY(v_ret, pkg, v_buf, v_buf_len) \ + do { \ + if (unlikely(!hifc_cm_handle.pfn_unf_cm_get_sgl_entry)) {\ + v_ret = UNF_RETURN_ERROR; \ + } else { \ + v_ret = hifc_cm_handle.pfn_unf_cm_get_sgl_entry(\ + pkg, v_buf, v_buf_len);\ + } \ + } while (0) + +#define UNF_CM_GET_DIF_SGL_ENTRY(v_ret, pkg, v_buf, v_buf_len)\ + do { \ + if (unlikely(!hifc_cm_handle.pfn_unf_cm_get_dif_sgl_entry)) {\ + v_ret = UNF_RETURN_ERROR; \ + } else { \ + v_ret = hifc_cm_handle.pfn_unf_cm_get_dif_sgl_entry(\ + pkg,\ + v_buf,\ + v_buf_len);\ + } \ + } while (0) + +#define UNF_GET_SGL_ENTRY(v_ret, pkg, v_buf, v_buf_len, v_dif_flag) \ + do { \ + if (v_dif_flag) { \ + UNF_CM_GET_DIF_SGL_ENTRY(v_ret, pkg, v_buf, v_buf_len);\ + } else { \ + UNF_CM_GET_SGL_ENTRY(v_ret, pkg, v_buf, v_buf_len);\ + } \ + } while (0) + +#define UNF_GET_FREE_ESGL_PAGE(v_ret, lport, pkg) \ + do { \ + if (unlikely(!hifc_cm_handle.pfn_unf_get_one_free_esgl_page)) {\ + v_ret = NULL; \ + } else { \ + v_ret = hifc_cm_handle.pfn_unf_get_one_free_esgl_page(\ + lport, pkg); \ + } \ + } while (0) + +#define UNF_LOWLEVEL_SCSI_COMPLETED(v_ret, lport, pkg) \ + do { \ + if (unlikely(!hifc_cm_handle.pfn_unf_receive_ini_rsponse)) {\ + v_ret = UNF_RETURN_ERROR; \ + } else { \ + v_ret = hifc_cm_handle.pfn_unf_receive_ini_rsponse(\ + lport, pkg);\ + } \ + } while (0) + +#define UNF_LOWLEVEL_PORT_EVENT(v_ret, lport, v_events, v_input)\ + do { \ + if (unlikely(!hifc_cm_handle.pfn_unf_fc_port_link_event)) {\ + v_ret = UNF_RETURN_ERROR; \ + } else { \ + v_ret = hifc_cm_handle.pfn_unf_fc_port_link_event(\ + lport, v_events, v_input);\ + } \ + } while (0) + +#define UNF_LOWLEVEL_RECEIVE_FC4LS_PKG(v_ret, fc_port, pkg)\ + do { \ + if (hifc_cm_handle.pfn_unf_receive_fc4_pkg) {\ + v_ret = hifc_cm_handle.pfn_unf_receive_fc4_pkg(\ + (fc_port), (pkg));\ + } else { \ + v_ret = UNF_RETURN_ERROR; \ + } \ + } while (0) + +#define UNF_LOWLEVEL_SEND_FC4LS_DONE(v_ret, lport, pkg) \ + do { \ + if (hifc_cm_handle.pfn_unf_send_fc4_done) {\ + v_ret = hifc_cm_handle.pfn_unf_send_fc4_done(\ + (lport), (pkg));\ + } else { \ + v_ret = UNF_RETURN_ERROR; \ + } \ + } while (0) + +#define UNF_LOWLEVEL_RECEIVE_BLS_PKG(v_ret, lport, pkg) \ + do { \ + if (hifc_cm_handle.pfn_unf_receive_bls_pkg) {\ + v_ret = hifc_cm_handle.pfn_unf_receive_bls_pkg(\ + (lport), (pkg)); \ + } else { \ + v_ret = UNF_RETURN_ERROR; \ + } \ + } while (0) + +#define UNF_LOWLEVEL_RECEIVE_MARKER_STS(v_ret, lport, pkg)\ + do { \ + if (hifc_cm_handle.pfn_unf_receive_marker_status) {\ + v_ret = hifc_cm_handle.pfn_unf_receive_marker_status(\ + (lport), (pkg));\ + } else { \ + v_ret = UNF_RETURN_ERROR; \ + } \ + } while (0) + +#define UNF_LOWLEVEL_RECEIVE_ABTS_MARKER_STS(v_ret, lport, pkg) \ + do { \ + if (hifc_cm_handle.pfn_unf_receive_abts_marker_status) {\ + v_ret =\ + hifc_cm_handle.pfn_unf_receive_abts_marker_status(\ + (lport), (pkg));\ + } else { \ + v_ret = UNF_RETURN_ERROR; \ + } \ + } while (0) + +#endif diff --git a/drivers/scsi/huawei/hifc/hifc_portmng.c b/drivers/scsi/huawei/hifc/hifc_portmng.c new file mode 100644 index 0000000000000000000000000000000000000000..34bc6755226542a7ae28b2669a9d60781d0d39e0 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_portmng.c @@ -0,0 +1,1273 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#include "hifc_module.h" +#include "hifc_utils.h" +#include "hifc_hba.h" +#include "hifc_chipitf.h" +#include "hifc_portmng.h" + +struct hifc_port_diag_op_s hifc_diag_op[] = { + { UNF_PORT_DIAG_PORT_DETAIL, hifc_show_fc_port_detail }, + { UNF_PORT_DIAG_RD_WR_REG, hifc_rw_reg }, + { UNF_PORT_DIAG_BUTT, NULL } +}; + +char *wqe_type[HIFC_MAX_COUNTER_TYPE] = { + "TASK_TYPE_EMPTY", + "HIFC_SEND_IWRITE", + "HIFC_SEND_IREAD", + "HIFC_RECV_IRESP", + /* obsoleted */ + "HIFC_RECV_TCMND", + /* FCP Read IO Control Command. */ + "HIFC_SEND_TREAD", + /* FCP Write IO Control Command (XFER_RDY). */ + "HIFC_SEND_TWRITE", + /* Target Mode send FCP_RESP of Read/Write */ + "HIFC_SEND_TRESP", + /* Status for FCP_TREAD/FCP_TWRITE/FCP_TRESP */ + "HIFC_RECV_TSTS", + "HIFC_SEND_ABTS", + "HIFC_SEND_IELS", + "HIFC_SEND_ITMF", + "HIFC_SEND_CLEAN_UP", + "HIFC_SEND_CLEAN_UP_ALL", + /* Receive unsolicited data */ + "HIFC_RECV_UNSOLICITED", + "HIFC_RECV_ERR_WARN", + "HIFC_RECV_SESS_EN", + "HIFC_SEND_SESS_DIS", + "HIFC_SEND_SESS_DEL", + "HIFC_SEND_CQE_AVAILABLE", + /* Receive FCP_CMND From Remote Port and Transfer to driver. 20 */ + "HIFC_RECV_TCMND", + /* Receive ELS From Remote Port and Transfer to driver. */ + "HIFC_RECV_ELS_CMD", + /* Receive ELS From Remote Port and Transfer to driver. */ + "HIFC_RECV_ABTS_CMD", + /* Receive immidiate data. */ + "HIFC_RECV_IMMIDIATE", + /* + * ESL response. PLOGI_ACC, PRLI_ACC will carry the parent context + * parameter indication. + */ + "HIFC_SEND_ELS_RSP", + /* Status for ELS. */ + "HIFC_RECV_ELS_RSP_STS", + /* ABTS response with abort flag. */ + "HIFC_SEND_ABTS_RSP", + /* Status for ABTS. */ + "HIFC_RECV_ABTS_RSP_STS", + /* Abort Command */ + "HIFC_SEND_ABORT", + /* Status for ABORT. */ + "HIFC_RECV_ABORT_STS", + + "HIFC_SEND_ELS", + "HIFC_RECV_ELS_RSP", + /* GS request Command */ + "HIFC_SEND_GS", + /* GS response. */ + "HIFC_RECV_GS_RSP", + /* Status for offload req. */ + "HIFC_RECV_SESS_EN_STS", + /* Status for session disable. */ + "HIFC_RECV_SESS_DIS_STS", + /* Status for session delete. */ + "HIFC_RECV_SESS_DEL_STS", + /* Status for ABORT. */ + "HIFC_RECV_ABTS_RSP", + /* Buffer Clear */ + "HIFC_SEND_BUFFER_CLEAR", + /* Status for Buffer Clear */ + "HIFC_RECV_BUFFER_CLEAR_STS", + /* Flush Sq 40 */ + "HIFC_SEND_FLUSH_SQ", + /* Status for FLUSH_SQ */ + "HIFC_RECV_FLUSH_SQ_STS", + /* Reset session SQE type */ + "HIFC_SEND_SESS_RESET", + /* Reset session SCQE type */ + "HIFC_RECV_SESS_RESET_STS", + "HIFC_RECV_CQE_AVAILABLE_STS", + "HIFC_SEND_DUMP_EXCH", + "HIFC_SEND_INIT_SRQC", + "HIFC_SEND_CLEAR_SRQ", + "HIFC_RECV_CLEAR_SRQ_STS", + "HIFC_SEND_INIT_SCQC", + "HIFC_SEND_DEL_SCQC", + "HIFC_SEND_TMF_RESP", + "HIFC_SEND_DEL_SRQC", + "HIFC_RECV_IMMI_CONTINUE", + "HIFC_RECV_ITMF_RESP", + "HIFC_RECV_MARKER_STS", + "HIFC_SEND_TACK", + "HIFC_SEND_AEQERR", + "HIFC_RECV_ABTS_MARKER_STS" +}; + +char *scq_err_type[HIFC_MAX_COUNTER_TYPE] = { + "HIFC_CQE_COMPLETED", + "HIFC_SESS_HT_INSERT_FAIL", + "HIFC_SESS_HT_INSERT_DUPLICATE", + "HIFC_SESS_HT_BIT_SET_FAIL", + "HIFC_SESS_HT_DELETE_FAIL", + + "HIFC_CQE_BUFFER_CLEAR_IO_COMPLETED", + "HIFC_CQE_SESSION_ONLY_CLEAR_IO_COMPLETED", + "HIFC_CQE_SESSION_RST_CLEAR_IO_COMPLETED", + "HIFC_CQE_TMF_RSP_IO_COMPLETED", + "HIFC_CQE_TMF_IO_COMPLETED", + "HIFC_CQE_DRV_ABORT_IO_COMPLETED", + "HIFC_CQE_DRV_ABORT_IO_IN_RSP_COMPLETED", + "HIFC_CQE_DRV_ABORT_IO_IN_CMD_COMPLETED", + "HIFC_CQE_WQE_FLUSH_IO_COMPLETED", + + "HIFC_ERROR_CODE_DATA_DIFX_FAILED", + "HIFC_ERROR_CODE_DATA_TASK_TYPE_INCORRECT", + "HIFC_ERROR_CODE_DATA_OOO_RO", + "HIFC_ERROR_CODE_DATA_EXCEEDS_DATA2TRNS", + + "HIFC_ERROR_CODE_FCP_RSP_INVALID_LENGTH_FIELD", + "HIFC_ERROR_CODE_FCP_CONF_NOT_SUPPORTED", + "HIFC_ERROR_CODE_FCP_RSP_OPENED_SEQ", + + "HIFC_ERROR_CODE_XFER_INVALID_PAYLOAD_SIZE", + "HIFC_ERROR_CODE_XFER_PEND_XFER_SET", + "HIFC_ERROR_CODE_XFER_OOO_RO", + "HIFC_ERROR_CODE_XFER_NULL_BURST_LEN", + + "HIFC_ERROR_CODE_REC_TIMER_EXPIRE", + "HIFC_ERROR_CODE_E_D_TIMER_EXPIRE", + "HIFC_ERROR_CODE_ABORT_TIMER_EXPIRE", + "HIFC_ERROR_CODE_ABORT_MAGIC_NUM_NOT_MATCH", + "HIFC_IMMI_CMDPKT_SETUP_FAIL", + "HIFC_ERROR_CODE_DATA_SEQ_ID_NOT_EQUAL", + + "HIFC_ELS_GS_RSP_EXCH_CHECK_FAIL", + "HIFC_CQE_ELS_GS_SRQE_GET_FAIL", + "HIFC_CQE_DATA_DMA_REQ_FAIL", + "HIFC_CQE_SESSION_CLOSED", + "HIFC_SCQ_IS_FULL", + "HIFC_SRQ_IS_FULL", + "HIFC_DUCHILDCTX_SETUP_FAIL", + "HIFC_ERROR_INVALID_TXMFS", + "HIFC_OFFLOAD_LACKOF_SCQE_FAIL", + "HIFC_INVALID_TASK_ID", + "HIFC_INVALID_PKT_LEN", + "HIFC_CQE_ELS_GS_REQ_CLR_IO_COMPLETED", + "HIFC_CQE_ELS_RSP_CLR_IO_COMPLETED", + "HIFC_CODE_RESID_UNDER_ERR" +}; + +char *com_up_err_event_type[HIFC_MAX_COUNTER_TYPE] = { + "HIFC_EVENT_HEART_LOST", +}; + +char *aeq_err_type[HIFC_MAX_COUNTER_TYPE] = { + /* que_err_code */ + "HIFC_SCQ_IS_FULL_ERR", + "HIFC_SRQ_IS_FULL_ERR", + /* wqe_fatal_err */ + "HIFC_SQE_CHILD_SETUP_WQE_MSN_ERR", + "HIFC_SQE_CHILD_SETUP_WQE_GPA_ERR", + "HIFC_CMDPKT_CHILD_SETUP_INVALID_WQE_ERR_1", + "HIFC_CMDPKT_CHILD_SETUP_INVALID_WQE_ERR_2", + "HIFC_CLEAEQ_WQE_ERR", + "HIFC_WQEFETCH_WQE_MSN_ERR", + "HIFC_WQEFETCH_QUINFO_ERR", + + /* ctx_fatal_err */ + "HIFC_SCQE_ERR_BIT_ERR", + "HIFC_UPDMA_ADDR_REQ_SRQ_ERR", + "HIFC_SOLICHILDDMA_ADDR_REQ_ERR", + "HIFC_UNSOLICHILDDMA_ADDR_REQ_ERR", + "HIFC_SQE_CHILD_SETUP_QINFO_ERR_1", + "HIFC_SQE_CHILD_SETUP_QINFO_ERR_2", + "HIFC_CMDPKT_CHILD_SETUP_QINFO_ERR_1", + "HIFC_CMDPKT_CHILD_SETUP_QINFO_ERR_2", + "HIFC_CMDPKT_CHILD_SETUP_PMSN_ERR", + "HIFC_CLEAEQ_CTX_ERR", + "HIFC_WQEFETCH_CTX_ERR", + "HIFC_FLUSH_QPC_ERR_LQP", + "HIFC_FLUSH_QPC_ERR_SMF", + "HIFC_PREFETCH_QPC_ERR_1", + "HIFC_PREFETCH_QPC_ERR_2", + "HIFC_PREFETCH_QPC_ERR_3", + "HIFC_PREFETCH_QPC_ERR_4", + "HIFC_PREFETCH_QPC_ERR_5", + "HIFC_PREFETCH_QPC_ERR_6", + "HIFC_PREFETCH_QPC_ERR_7", + "HIFC_PREFETCH_QPC_ERR_8", + "HIFC_PREFETCH_QPC_ERR_9", + "HIFC_PREFETCH_QPC_ERR_10", + "HIFC_PREFETCH_QPC_ERR_11", + "HIFC_PREFETCH_QPC_ERR_DEFAULT", + "HIFC_CHILDHASH_INSERT_SW_ERR", + "HIFC_CHILDHASH_LOOKUP_SW_ERR", + "HIFC_CHILDHASH_DEL_SW_ERR", + "HIFC_FLOWHASH_INSERT_SW_ERR", + "HIFC_FLOWHASH_LOOKUP_SW_ERR", + "HIFC_FLOWHASH_DEL_SW_ERR", +}; + +char *err_event_type[HIFC_MAX_COUNTER_TYPE] = { + /* ERR type 0 Err value */ + "HIFC_DRV_2_UP_PARA_ERR", + /* ERR type 1 Err value */ + "HIFC_SFP_SPEED_ERR", + /* ERR type 2 Err value */ + "HIFC_32GPUB_UA_RXESCH_FIFO_OF", + "HIFC_32GPUB_UA_RXESCH_FIFO_UCERR", + + /* ERR type 3 Err value */ + "HIFC_32G_UA_UATX_LEN_ABN", + "HIFC_32G_UA_RXAFIFO_OF", + "HIFC_32G_UA_TXAFIFO_OF", + "HIFC_32G_UA_RXAFIFO_UCERR", + "HIFC_32G_UA_TXAFIFO_UCERR", + + /* ERR type 4 Err value */ + "HIFC_32G_MAC_RX_BBC_FATAL", + "HIFC_32G_MAC_TX_BBC_FATAL", + "HIFC_32G_MAC_TXFIFO_UF", + "HIFC_32G_MAC_PCS_TXFIFO_UF", + "HIFC_32G_MAC_RXBBC_CRDT_TO", + "HIFC_32G_MAC_PCS_RXAFIFO_OF", + "HIFC_32G_MAC_PCS_TXFIFO_OF", + "HIFC_32G_MAC_FC2P_RXFIFO_OF", + "HIFC_32G_MAC_FC2P_TXFIFO_OF", + "HIFC_32G_MAC_FC2P_CAFIFO_OF", + "HIFC_32G_MAC_PCS_RXRSFECM_UCEER", + "HIFC_32G_MAC_PCS_RXAFIFO_UCEER", + "HIFC_32G_MAC_PCS_TXFIFO_UCEER", + "HIFC_32G_MAC_FC2P_RXFIFO_UCEER", + "HIFC_32G_MAC_FC2P_TXFIFO_UCEER", + + /* ERR type 5 Err value */ + "HIFC_NON32G_DFX_FC1_DFX_BF_FIFO", + "HIFC_NON32G_DFX_FC1_DFX_BP_FIFO", + "HIFC_NON32G_DFX_FC1_DFX_RX_AFIFO_ERR", + "HIFC_NON32G_DFX_FC1_DFX_TX_AFIFO_ERR", + "HIFC_NON32G_DFX_FC1_DFX_DIRQ_RXBUF_FIFO1", + "HIFC_NON32G_DFX_FC1_DFX_DIRQ_RXBBC_TO", + "HIFC_NON32G_DFX_FC1_DFX_DIRQ_TXDAT_FIFO", + "HIFC_NON32G_DFX_FC1_DFX_DIRQ_TXCMD_FIFO", + "HIFC_NON32G_DFX_FC1_ERR_R_RDY", + + /* ERR type 6 Err value */ + "HIFC_NON32G_MAC_FC1_FAIRNESS_ERROR", +}; + +unsigned int hifc_set_port_state(void *v_hba, void *v_para_in) +{ + unsigned int ret = UNF_RETURN_ERROR; + enum unf_port_config_state_e port_state = UNF_PORT_CONFIG_STATE_START; + + HIFC_CHECK(INVALID_VALUE32, NULL != v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, NULL != v_para_in, return UNF_RETURN_ERROR); + + port_state = *((enum unf_port_config_state_e *)v_para_in); + switch (port_state) { + case UNF_PORT_CONFIG_STATE_RESET: + ret = (unsigned int)hifc_port_reset(v_hba); + break; + default: + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Cannot set port_state(0x%x)", port_state); + break; + } + + return ret; +} + +unsigned int hifc_set_port_speed(void *v_hba, void *v_para_in) +{ + unsigned long flags = 0; + unsigned int port_speed = 0; + struct hifc_hba_s *hba = v_hba; + + HIFC_CHECK(INVALID_VALUE32, NULL != hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, NULL != v_para_in, return UNF_RETURN_ERROR); + port_speed = *((unsigned int *)v_para_in); + + if (port_speed > hba->max_support_speed) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Speed set(0x%x) exceed max speed(0x%x)", + port_speed, + hba->max_support_speed); + return UNF_RETURN_ERROR; + } + + if ((port_speed >= HIFC_SPEED_16G) && + (hba->port_topo_cfg == UNF_TOP_LOOP_MASK)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Cannot set speed(0x%x) in LOOP mode, check it", + port_speed); + + return UNF_RETURN_ERROR; + } + + spin_lock_irqsave(&hba->hba_lock, flags); + hba->port_speed_cfg = port_speed; + spin_unlock_irqrestore(&hba->hba_lock, flags); + + if (hifc_port_reset(hba) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]HIFC port(0x%x) can't reset HBA", + hba->port_cfg.port_id); + + return UNF_RETURN_ERROR; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_INFO, + "[info]HIFC port(0x%x) set port speed finished, configured speed: 0x%x", + hba->port_cfg.port_id, port_speed); + + return RETURN_OK; +} + +unsigned int hifc_set_max_support_speed(void *v_hba, void *para_in) +{ + unsigned long flags = 0; + unsigned char max_support_speed = 0; + struct hifc_hba_s *hba = v_hba; + + HIFC_CHECK(INVALID_VALUE32, NULL != hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, NULL != para_in, return UNF_RETURN_ERROR); + max_support_speed = *((unsigned char *)para_in); + + spin_lock_irqsave(&hba->hba_lock, flags); + hba->max_support_speed = max_support_speed; + spin_unlock_irqrestore(&hba->hba_lock, flags); + + if (hifc_port_reset(hba) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]HIFC port(0x%x) can't reset HBA", + hba->port_cfg.port_id); + return UNF_RETURN_ERROR; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]HIFC set port(0x%x) speed finished, configured max support speed: 0x%x", + hba->port_cfg.port_id, max_support_speed); + + return RETURN_OK; +} + +unsigned int hifc_set_loop_role(void *v_hba, void *para_in) +{ + unsigned long flags = 0; + unsigned int loop_role = 0; + struct hifc_hba_s *hba = v_hba; + + HIFC_CHECK(INVALID_VALUE32, NULL != hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, NULL != para_in, return UNF_RETURN_ERROR); + + loop_role = *((unsigned int *)para_in); + + spin_lock_irqsave(&hba->hba_lock, flags); + hba->port_loop_role = loop_role; + spin_unlock_irqrestore(&hba->hba_lock, flags); + + if (hifc_port_reset(hba) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]HIFC port(0x%x) can't reset HBA", + hba->port_cfg.port_id); + + return UNF_RETURN_ERROR; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_INFO, + "[info]HIFC port(0x%x) set loop role finished, configured loop role: 0x%x", + hba->port_cfg.port_id, loop_role); + + return RETURN_OK; +} + +unsigned int hifc_set_port_topo(void *v_hba, void *v_para_in) +{ + unsigned long flags = 0; + unsigned int top = 0; + struct hifc_hba_s *hba = v_hba; + + HIFC_CHECK(INVALID_VALUE32, NULL != hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, NULL != v_para_in, return UNF_RETURN_ERROR); + + top = *((unsigned int *)v_para_in); + if ((top == UNF_TOP_LOOP_MASK) && + (hba->port_speed_cfg >= HIFC_SPEED_16G)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Cannot set to loop mode at speed(0x%x), check it", + hba->port_speed_cfg); + + return UNF_RETURN_ERROR; + } + + spin_lock_irqsave(&hba->hba_lock, flags); + hba->port_topo_cfg = top; + spin_unlock_irqrestore(&hba->hba_lock, flags); + + if (hifc_port_reset(hba) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]HIFC port(0x%x) can't reset HBA", + hba->port_cfg.port_id); + + return UNF_RETURN_ERROR; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]HIFC port(0x%x) set port topology finished, configured topology: 0x%x", + hba->port_cfg.port_id, top); + + return RETURN_OK; +} + +unsigned int hifc_set_port_fcp_conf(void *v_hba, void *para_in) +{ + unsigned long flags = 0; + unsigned int fcp_conf = 0; + struct hifc_hba_s *hba = v_hba; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, para_in, return UNF_RETURN_ERROR); + + fcp_conf = *((unsigned int *)para_in); + + spin_lock_irqsave(&hba->hba_lock, flags); + hba->fcp_conf_cfg = fcp_conf; + spin_unlock_irqrestore(&hba->hba_lock, flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]HIFC set port(0x%x) FCP confirm finished, configured value: 0x%x", + hba->port_cfg.port_id, fcp_conf); + + return RETURN_OK; +} + +unsigned int hifc_set_port_bbscn(void *v_hba, void *para_in) +{ + unsigned long flags = 0; + unsigned int bbscn = 0; + struct hifc_hba_s *hba = v_hba; + + HIFC_CHECK(INVALID_VALUE32, NULL != hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, NULL != para_in, return UNF_RETURN_ERROR); + + bbscn = *((unsigned int *)para_in); + + spin_lock_irqsave(&hba->hba_lock, flags); + hba->port_bbscn_cfg = bbscn; + spin_unlock_irqrestore(&hba->hba_lock, flags); + + if (hifc_port_reset(hba) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]HIFC port(0x%x) can't reset HBA", + hba->port_cfg.port_id); + + return UNF_RETURN_ERROR; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]HIFC set port(0x%x) BBSCN finished, configured value: 0x%x", + hba->port_cfg.port_id, bbscn); + + return RETURN_OK; +} + +unsigned int hifc_show_fc_port_detail(void *v_hba, void *v_para) +{ + struct hifc_fw_ver_detail_s version; + void *ver_buf = NULL; + struct unf_fw_version_s *fw_version = (struct unf_fw_version_s *)v_para; + + memset(&version, 0, sizeof(struct hifc_fw_ver_detail_s)); + ver_buf = (void *)(&version); + + /* Obtain UP, ucode and boot version */ + if (hifc_get_software_version(v_hba, ver_buf) != RETURN_OK) + return UNF_RETURN_ERROR; + + if (fw_version->message_type == UNF_DEBUG_TYPE_MESSAGE) + memcpy(fw_version->fw_version, version.up_ver, HIFC_VER_LEN); + + return RETURN_OK; +} + +unsigned int hifc_port_diagnose(void *v_hba, + enum unf_port_diag_op_e op_code, + void *v_para) +{ + unsigned int op_idx = 0; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]port diagnose succeed, opcode(0x%x), operation ID(0x%x)", + op_code, op_idx); + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_para, return UNF_RETURN_ERROR); + + for (op_idx = 0; op_idx < sizeof(hifc_diag_op) / + sizeof(struct hifc_port_diag_op_s); + op_idx++) { + if (op_code == hifc_diag_op[op_idx].op_code) { + if (!hifc_diag_op[op_idx].pfn_hifc_operation) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_REG_ATT, UNF_ERR, + "[err]Null operation for diagnose, opcode(0x%x), operation ID(0x%x)", + op_code, op_idx); + + return UNF_RETURN_ERROR; + } else { + return hifc_diag_op[op_idx].pfn_hifc_operation(v_hba, v_para); + } + } + } + + return RETURN_OK; +} + +int hifc_dfx_get_rxtx_state(void *v_hba, void *v_buff_out) +{ + int ret = RETURN_OK; + unsigned long long *counter_info = NULL; + unsigned int probe_index = 0; + unsigned int index = 0; + unsigned int total = 0; + struct hifc_adm_dfx_cmd_s *buff_out = NULL; + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba; + + buff_out = (struct hifc_adm_dfx_cmd_s *)v_buff_out; + + counter_info = + vmalloc(sizeof(unsigned long long) * HIFC_DFX_BACK_INFO_SIZE64); + if (!counter_info) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]malloc memory failed"); + + return UNF_RETURN_ERROR; + } + + memset(counter_info, 0, + sizeof(unsigned long long) * HIFC_DFX_BACK_INFO_SIZE64); + probe_index = hba->probe_index; + total = sizeof(wqe_type) / sizeof(char *); + + for (index = 0; index < total; index++) { + if (wqe_type[index]) + counter_info[index] = HIFC_IO_STAT_READ(probe_index, + index); + } + + memcpy(buff_out->unresult.result, counter_info, + sizeof(unsigned long long) * HIFC_DFX_BACK_INFO_SIZE64); + vfree(counter_info); + return ret; +} + +int hifc_dfx_get_rxtx_error_state(void *v_hba, void *v_buff_out) +{ + char *hba_err_type[HIFC_HBA_STAT_BUTT]; + int ret = RETURN_OK; + unsigned long long *counter_info = NULL; + unsigned int probe_index = 0; + unsigned int index = 0; + unsigned int counter = 0; + unsigned int total = 0; + struct hifc_adm_dfx_cmd_s *buff_out = NULL; + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba; + + buff_out = (struct hifc_adm_dfx_cmd_s *)v_buff_out; + counter_info = + vmalloc(sizeof(unsigned long long) * HIFC_DFX_BACK_INFO_SIZE64); + if (!counter_info) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]malloc memory failed"); + return UNF_RETURN_ERROR; + } + + memset(counter_info, 0, + sizeof(unsigned long long) * HIFC_DFX_BACK_INFO_SIZE64); + probe_index = hba->probe_index; + total = sizeof(wqe_type) / sizeof(char *); + + for (index = 0; index < total; index++) { + if (wqe_type[index]) { + counter_info[counter] = + HIFC_ERR_IO_STAT_READ(probe_index, index); + counter++; + } + } + + total = sizeof(hba_err_type) / sizeof(char *); + for (index = 0; index < total; index++) { + counter_info[counter] = hba_stat[probe_index][index]; + counter++; + } + + memcpy(buff_out->unresult.result, counter_info, + sizeof(unsigned long long) * HIFC_DFX_BACK_INFO_SIZE64); + vfree(counter_info); + return ret; +} + +int hifc_dfx_get_error_state(void *v_hba, void *v_buff_out) +{ + int ret = RETURN_OK; + unsigned long long *counter_info = NULL; + unsigned int probe_index = 0; + unsigned int index = 0; + unsigned int counter = 0; + unsigned int total = 0; + struct hifc_adm_dfx_cmd_s *buff_out = NULL; + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba; + + buff_out = (struct hifc_adm_dfx_cmd_s *)v_buff_out; + + counter_info = + vmalloc(sizeof(unsigned long long) * HIFC_DFX_BACK_INFO_SIZE64); + if (!counter_info) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]malloc memory failed"); + return UNF_RETURN_ERROR; + } + memset(counter_info, 0, + sizeof(unsigned long long) * HIFC_DFX_BACK_INFO_SIZE64); + probe_index = hba->probe_index; + + total = sizeof(scq_err_type) / sizeof(char *); + for (index = 1; index < total; index++) { + if (scq_err_type[index]) { + counter_info[counter] = + HIFC_SCQ_ERR_TYPE_STAT_READ(probe_index, index); + counter++; + } + } + + total = sizeof(aeq_err_type) / sizeof(char *); + for (index = 0; index < total; index++) { + if (aeq_err_type[index]) { + counter_info[counter] = + HIFC_AEQ_ERR_TYPE_STAT_READ(probe_index, index); + counter++; + } + } + + total = sizeof(err_event_type) / sizeof(char *); + for (index = 0; index < total; index++) { + if (err_event_type[index]) { + counter_info[counter] = + HIFC_UP_ERR_EVENT_STAT_READ(probe_index, index); + counter++; + } + } + + total = sizeof(com_up_err_event_type) / sizeof(char *); + for (index = 0; index < total; index++) { + if (com_up_err_event_type[index]) { + counter_info[counter] = + HIFC_COM_UP_ERR_EVENT_STAT_READ(probe_index, + index); + counter++; + } + } + + memcpy(buff_out->unresult.result, counter_info, + sizeof(unsigned long long) * HIFC_DFX_BACK_INFO_SIZE64); + vfree(counter_info); + return ret; +} + +int hifc_dfx_get_link_state(void *v_hba, void *v_buff_out) +{ +#define HIFC_LINK_UNKNOW 0 +#define HIFC_LINK_UP 1 +#define HIFC_LINK_DOWN 2 +#define HIFC_FC_DELETE_CMND 3 +#define HIFC_LINK_DOWN_REASON 4 + + int ret = RETURN_OK; + unsigned int index; + unsigned int counter_index; + unsigned int *counter_info = NULL; + unsigned int probe_index = 0; + struct hifc_adm_dfx_cmd_s *buff_out = NULL; + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba; + + buff_out = (struct hifc_adm_dfx_cmd_s *)v_buff_out; + counter_info = vmalloc(sizeof(unsigned int) * HIFC_DFX_BACK_INFO_SIZE); + if (!counter_info) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]malloc memory failed"); + return UNF_RETURN_ERROR; + } + + memset(counter_info, 0, sizeof(unsigned int) * HIFC_DFX_BACK_INFO_SIZE); + probe_index = hba->probe_index; + + counter_info[HIFC_LINK_UP] = + (unsigned int)link_event_stat[probe_index][HIFC_LINK_UP]; + counter_info[HIFC_LINK_DOWN] = + (unsigned int)link_event_stat[probe_index][HIFC_LINK_DOWN]; + counter_info[HIFC_FC_DELETE_CMND] = + (unsigned int)link_event_stat[probe_index][HIFC_FC_DELETE_CMND]; + counter_info[HIFC_LINK_UNKNOW] = + (unsigned int)link_event_stat[probe_index][HIFC_LINK_UNKNOW]; + + for (index = 0; index < HIFC_MAX_LINK_REASON_CNT; index++) { + if (link_reason_stat[probe_index][index]) { + counter_index = HIFC_LINK_DOWN_REASON + index; + counter_info[counter_index] = + (unsigned int) + link_reason_stat[probe_index][index]; + } + } + + memcpy(buff_out->unresult.result, counter_info, + sizeof(unsigned int) * HIFC_DFX_BACK_INFO_SIZE); + vfree(counter_info); + return ret; +} + +int hifc_dfx_dif_error(void *v_hba, void *v_buff_out, unsigned int v_clear) +{ +#define HIFC_MAX_DIF_ERROR_COUNTER 8 + + int ret = RETURN_OK; + unsigned int index = 0; + unsigned int total = 0; + unsigned long long *counter_info = NULL; + unsigned int probe_index = 0; + struct hifc_adm_dfx_cmd_s *buff_out = NULL; + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba; + + buff_out = (struct hifc_adm_dfx_cmd_s *)v_buff_out; + + counter_info = + vmalloc(sizeof(unsigned long long) * HIFC_DFX_BACK_INFO_SIZE64); + if (!counter_info) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]malloc memory failed"); + return UNF_RETURN_ERROR; + } + memset(counter_info, 0, + sizeof(unsigned long long) * HIFC_DFX_BACK_INFO_SIZE64); + + probe_index = hba->probe_index; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR, + "[info]The clear flag of DIF error counter is %u", v_clear); + + if (!v_clear) { + total = HIFC_MAX_DIF_ERROR_COUNTER; + + for (index = 1; index < total; index++) + counter_info[index - 1] = + HIFC_DIF_ERR_STAT_READ(probe_index, index); + + memcpy(buff_out->unresult.result, counter_info, + sizeof(unsigned long long) * HIFC_DFX_BACK_INFO_SIZE64); + } else { + memset(dif_err_stat[probe_index], 0, sizeof(dif_err_stat[0])); + } + vfree(counter_info); + return ret; +} + +int hifc_set_dfx_mode(void *v_hba, struct unf_hinicam_pkg *v_input) +{ + int ret = UNF_RETURN_ERROR; + unsigned int mode; + struct hifc_adm_dfx_cmd_s *buff_in = NULL; + struct hifc_adm_dfx_cmd_s *buff_out = NULL; + + HIFC_CHECK(INVALID_VALUE32, v_input, return UNF_RETURN_ERROR); + + buff_in = v_input->buff_in; + buff_out = (struct hifc_adm_dfx_cmd_s *)v_input->buff_out; + + HIFC_CHECK(INVALID_VALUE32, buff_in, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, buff_out, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, + v_input->in_size >= sizeof(struct hifc_adm_dfx_cmd_s), + return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, + *v_input->out_size >= sizeof(struct hifc_adm_dfx_cmd_s), + return UNF_RETURN_ERROR); + + buff_out->msg_head.status = HIFC_ADM_MSG_DONE; + mode = buff_in->cmd[0]; + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Enter DFX mode(%u)", mode); + + switch (mode) { + /* HBA WQE and SCQE statistic */ + case HIFC_TX_RX_STATE_COUNTER: + ret = hifc_dfx_get_rxtx_state(v_hba, (void *)buff_out); + break; + + /* TX and RX error counter, HBA counter */ + case HIFC_TX_RX_ERROR_STATE_COUNTER: + ret = hifc_dfx_get_rxtx_error_state(v_hba, (void *)buff_out); + break; + + /* SCQ, AEQ, uP, common uP error counter */ + case HIFC_ERROR_STATE_COUNTER: + ret = hifc_dfx_get_error_state(v_hba, (void *)buff_out); + break; + + case HIFC_LINK_STATE_COUNTER: + ret = hifc_dfx_get_link_state(v_hba, (void *)buff_out); + break; + + case HIFC_HOST_COUNTER: + case HIFC_SESSION_COUNTER: + UNF_LOWLEVEL_TO_CM_HINICADM(ret, + ((struct hifc_hba_s *)v_hba)->lport, + v_input); + break; + + case HIFC_DIF_ERROR_COUNTER: + ret = hifc_dfx_dif_error(v_hba, (void *)buff_out, + buff_in->cmd[1]); + break; + + default: + break; + } + + if (ret != RETURN_OK) { + buff_out->msg_head.status = HIFC_ADM_MSG_FAILED; + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[warn]Get DFX information failed, mode:0x%0x", mode); + } + + buff_out->msg_head.size = sizeof(struct hifc_adm_dfx_cmd_s); + *v_input->out_size = sizeof(struct hifc_adm_dfx_cmd_s); + + return ret; +} + +unsigned int hifc_fec_mode(void *v_hba, struct unf_hinicam_pkg *v_input) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int fec_mode = 0; + struct hifc_adm_cmd_s *buff_in = NULL; + struct hifc_adm_cmd_s *buff_out = NULL; + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba; + + HIFC_CHECK(INVALID_VALUE32, NULL != v_input, return UNF_RETURN_ERROR); + + buff_in = v_input->buff_in; + buff_out = (struct hifc_adm_cmd_s *)v_input->buff_out; + + HIFC_CHECK(INVALID_VALUE32, NULL != buff_in, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, NULL != buff_out, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, + v_input->in_size >= sizeof(struct hifc_adm_cmd_s), + return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, + *v_input->out_size >= sizeof(struct hifc_adm_cmd_s), + return UNF_RETURN_ERROR); + + buff_out->msg_head.status = HIFC_ADM_MSG_DONE; + fec_mode = buff_in->cmd[0]; + + if (fec_mode < HIFC_QUERY_FEC_MODE) { + ret = hifc_mbx_set_fec((struct hifc_hba_s *)v_hba, fec_mode); + hba->fec_status = fec_mode; + if (ret != RETURN_OK) { + buff_out->msg_head.status = HIFC_ADM_MSG_FAILED; + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]Set fec mode(0x%x) failed", fec_mode); + + return ret; + } + } else if (fec_mode == HIFC_QUERY_FEC_MODE) { + buff_out->cmd[0] = hba->fec_status; + ret = RETURN_OK; + } + + buff_out->msg_head.size = sizeof(struct hifc_adm_msg_head_s); + *v_input->out_size = sizeof(struct hifc_adm_cmd_s); + + return ret; +} + +unsigned int hifc_set_hba_base_info(void *v_hba, void *v_para_in) +{ +#define HIFC_MML_CLOSE_FEC 0 +#define HIFC_MML_OPEN_FEC_VIA_TTS 1 +#define HIFC_MML_OPEN_FEC_ONLY 2 + + struct unf_port_info_entry_s *port_info = 0; + struct hifc_hba_s *hba = v_hba; + unsigned long flags = 0; + + HIFC_CHECK(INVALID_VALUE32, NULL != hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, NULL != v_para_in, return UNF_RETURN_ERROR); + port_info = (struct unf_port_info_entry_s *)v_para_in; + + if (port_info->speed > hba->max_support_speed) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port(0x%x) Speed set(0x%x) exceed max speed(0x%x)", + hba->port_cfg.port_id, port_info->speed, + hba->max_support_speed); + + return UNF_RETURN_ERROR; + } + + if ((port_info->speed >= HIFC_SPEED_16G) && + (port_info->topo == UNF_TOP_LOOP_MASK)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port(0x%x) Cannot set speed(0x%x) in LOOP mode, check it", + hba->port_cfg.port_id, port_info->speed); + + return UNF_RETURN_ERROR; + } + + if ((port_info->fec != HIFC_MML_CLOSE_FEC) && + (port_info->fec != HIFC_MML_OPEN_FEC_VIA_TTS) && + (port_info->fec != HIFC_MML_OPEN_FEC_ONLY)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Port(0x%x) parameter error! please input 0,1 or 2!", + hba->port_cfg.port_id); + + return UNF_RETURN_ERROR; + } + + if (hifc_mbx_set_fec(hba, port_info->fec) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Port(0x%x) set FEC %u failed.\n", + hba->port_cfg.port_id, + port_info->fec); + + return UNF_RETURN_ERROR; + } + + spin_lock_irqsave(&hba->hba_lock, flags); + hba->port_speed_cfg = port_info->speed; + hba->port_topo_cfg = port_info->topo; + hba->port_bbscn_cfg = port_info->bb_scn; + spin_unlock_irqrestore(&hba->hba_lock, flags); + + return RETURN_OK; +} + +unsigned int hifc_bbscn_mode(void *v_hba, struct unf_hinicam_pkg *v_input) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int bbscn_mode; + struct hifc_adm_cmd_s *buff_in = NULL; + struct hifc_adm_cmd_s *buff_out = NULL; + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba; + + HIFC_CHECK(INVALID_VALUE32, NULL != v_input, return UNF_RETURN_ERROR); + + buff_in = v_input->buff_in; + buff_out = (struct hifc_adm_cmd_s *)v_input->buff_out; + + HIFC_CHECK(INVALID_VALUE32, buff_in, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, buff_out, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, + v_input->in_size >= sizeof(struct hifc_adm_cmd_s), + return UNF_RETURN_ERROR); + + HIFC_CHECK(INVALID_VALUE32, + *v_input->out_size >= sizeof(struct hifc_adm_cmd_s), + return UNF_RETURN_ERROR); + + buff_out->msg_head.status = HIFC_ADM_MSG_DONE; + bbscn_mode = buff_in->cmd[0]; + + if (bbscn_mode == HIFC_SET_BBSCN_VALUE) { + UNF_LOWLEVEL_TO_CM_HINICADM(ret, hba->lport, v_input); + } else if (bbscn_mode == HIFC_QUERY_BBSCN_VALUE) { + ret = hifc_get_port_info((void *)hba); + if (hba->phy_link == UNF_PORT_LINK_UP) { + buff_out->cmd[0] = hba->active_bb_scn; + buff_out->cmd[1] = hba->port_bbscn_cfg; + } else { + buff_out->cmd[0] = UNF_FALSE; + buff_out->cmd[1] = hba->port_bbscn_cfg; + } + + buff_out->msg_head.size = sizeof(struct hifc_adm_msg_head_s) + + sizeof(unsigned int); + } + + if (ret != RETURN_OK) { + buff_out->msg_head.status = HIFC_ADM_MSG_FAILED; + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Execute BBSCN mode(0x%x) failed", bbscn_mode); + + return ret; + } + + *v_input->out_size = sizeof(struct hifc_adm_cmd_s); + + return ret; +} + +unsigned int hifc_port_stat(void *v_hba, struct unf_hinicam_pkg *v_input) +{ + struct hifc_adm_lsq_info_s *buff_in = NULL; + struct hifc_adm_lsq_info_s *buff_out = NULL; + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba; + unsigned int rport_start = 0; + struct hifc_parent_queue_mgr_s *parent_queue_mgr = NULL; + unsigned int index = 0; + unsigned int queue_state[HIFC_QUEUE_STATE_BUTT] = { 0 }; + struct hifc_parent_sq_info_s *sq = NULL; + int out_standing_cnt = 0; + unsigned int in_sq_cnt = 0; + + HIFC_CHECK(INVALID_VALUE32, NULL != v_input, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, NULL != v_hba, return UNF_RETURN_ERROR); + + buff_in = v_input->buff_in; + buff_out = (struct hifc_adm_lsq_info_s *)v_input->buff_out; + + HIFC_CHECK(INVALID_VALUE32, buff_in, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, buff_out, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, + v_input->in_size >= sizeof(struct hifc_adm_lsq_info_s), + return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, + *v_input->out_size >= sizeof(struct hifc_adm_lsq_info_s), + return UNF_RETURN_ERROR); + + rport_start = buff_in->cmd[0]; + + parent_queue_mgr = hba->parent_queue_mgr; + if (!parent_queue_mgr) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port 0x%x Parent Queue Manager is Empty", + hba->port_cfg.port_id); + return UNF_RETURN_ERROR; + } + + for (index = 0; index < UNF_HIFC_MAXRPORT_NUM; index++) { + if (parent_queue_mgr->parent_queues[index].offload_state < + HIFC_QUEUE_STATE_BUTT) + queue_state[parent_queue_mgr->parent_queues[index].offload_state]++; + } + + buff_out->port_state.port_id = hba->port_cfg.port_id; + buff_out->port_state.rport_num = + (UNF_HIFC_MAXRPORT_NUM - queue_state[HIFC_QUEUE_STATE_FREE]); + buff_out->port_state.init = queue_state[HIFC_QUEUE_STATE_INITIALIZED]; + buff_out->port_state.offloading = + queue_state[HIFC_QUEUE_STATE_OFFLOADING]; + buff_out->port_state.offloaded = + queue_state[HIFC_QUEUE_STATE_OFFLOADED]; + buff_out->port_state.destroying = + queue_state[HIFC_QUEUE_STATE_DESTROYING]; + + index = rport_start; + + if ((index < UNF_HIFC_MAXRPORT_NUM) && + (parent_queue_mgr->parent_queues[index].offload_state != + HIFC_QUEUE_STATE_FREE)) { + sq = &parent_queue_mgr->parent_queues[index].parent_sq_info; + + buff_out->sq.sq_id = index; + buff_out->sq.rport_index = sq->rport_index; + buff_out->sq.xid = sq->context_id; + buff_out->sq.cid = sq->cache_id; + buff_out->sq.sid = sq->local_port_id; + buff_out->sq.did = sq->remote_port_id; + buff_out->sq.vpid = parent_queue_mgr->parent_queues[index].parent_sq_info.vport_id; + buff_out->sq.cmd_local_queue_id = parent_queue_mgr->parent_queues[index].parent_cmd_scq_info.local_queue_id; + buff_out->sq.cmd_cqm_queue_id = parent_queue_mgr->parent_queues[index].parent_cmd_scq_info.cqm_queue_id; + buff_out->sq.sts_local_queue_id = parent_queue_mgr->parent_queues[index].parent_sts_scq_info.local_queue_id; + buff_out->sq.sts_cqm_queue_id = parent_queue_mgr->parent_queues[index].parent_sts_scq_info.cqm_queue_id; + buff_out->sq.cos = + parent_queue_mgr->parent_queues[index].queue_data_cos; + buff_out->sq.off_load = + parent_queue_mgr->parent_queues[index].offload_state; + + out_standing_cnt = atomic_read(&sq->sqe_minus_cqe_cnt); + /* read memory barrier */ + rmb(); + in_sq_cnt = HIFC_QUEUE_MSN_OFFSET(HIFC_GET_QUEUE_CMSN(sq), + sq->last_pmsn); + /* read memory barrier */ + rmb(); + + buff_out->sq.cmsn = HIFC_GET_QUEUE_CMSN(sq); + buff_out->sq.pmsn = sq->last_pmsn; + buff_out->sq.db_cnt = atomic_read(&sq->sq_dbl_cnt); + buff_out->sq.sqe_cnt = atomic_read(&sq->sq_wqe_cnt); + buff_out->sq.cqe_cnt = atomic_read(&sq->sq_cqe_cnt); + buff_out->sq.in_sq_cnt = in_sq_cnt; + buff_out->sq.in_chip_cnt = out_standing_cnt - (int)in_sq_cnt; + + buff_out->mark = UNF_TRUE; + + } else { + buff_out->mark = UNF_FALSE; + } + + return RETURN_OK; +} + +unsigned int hifc_port_info(struct unf_hinicam_pkg *v_input) +{ +#define HIFC_INQUIRE_PORT_NUM_MODE 1 + + unsigned int ret = UNF_RETURN_ERROR; + unsigned int inquire_type; + unsigned int probe_total_num = 0; + unsigned int probe_index = 0; + unsigned int count = 0; + struct hifc_adm_cmd_s *buff_in = NULL; + struct hifc_adm_cmd_s *buff_out = NULL; + struct hifc_hba_s *hba = NULL; + + HIFC_CHECK(INVALID_VALUE32, v_input, return UNF_RETURN_ERROR); + + buff_in = v_input->buff_in; + buff_out = (struct hifc_adm_cmd_s *)v_input->buff_out; + + HIFC_CHECK(INVALID_VALUE32, buff_in, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, buff_out, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, + v_input->in_size >= sizeof(struct hifc_adm_cmd_s), + return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, + *v_input->out_size >= sizeof(struct hifc_adm_cmd_s), + return UNF_RETURN_ERROR); + + hifc_get_total_probed_num(&probe_total_num); + + /* First bit is used to obtain total probe number */ + inquire_type = buff_in->cmd[0]; + if (inquire_type == HIFC_INQUIRE_PORT_NUM_MODE) { + buff_out->cmd[0] = probe_total_num; + buff_out->msg_head.status = HIFC_ADM_MSG_DONE; + *v_input->out_size = sizeof(struct hifc_adm_cmd_s); + + return RETURN_OK; + } + + spin_lock(&probe_spin_lock); + for (probe_index = 0; probe_index < HIFC_MAX_PROBE_PORT_NUM; + probe_index++) { + /* Second bit is used to determine to obtain which port */ + if (buff_in->cmd[1] == count) + break; + + if (test_bit((int)probe_index, + (const unsigned long *)probe_bit_map)) + count++; + } + spin_unlock(&probe_spin_lock); + + if (probe_index == HIFC_MAX_PROBE_PORT_NUM) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Can't find port(0x%x) total port(0x%x)", + buff_in->cmd[1], probe_total_num); + + buff_out->msg_head.status = HIFC_ADM_MSG_FAILED; + + return ret; + } + + hba = hifc_hba[probe_index]; + /* Obtain buffer length applied from user */ + v_input->in_size = buff_in->cmd[2]; + if (!hba) + return UNF_RETURN_ERROR; + + UNF_LOWLEVEL_TO_CM_HINICADM(ret, hba->lport, v_input); + + return ret; +} + +int hifc_adm(void *uld_dev, unsigned int msg_formate, void *buffin, + unsigned int in_size, void *buff_out, unsigned int *out_size) +{ + int ret = UNF_RETURN_ERROR; + struct hifc_hba_s *hba = NULL; + struct unf_hinicam_pkg adm_pkg = { 0 }; + struct hifc_drv_version_s *ver_info; + char ver_str[HIFC_VER_INFO_SIZE] = { 0 }; + + HIFC_CHECK(INVALID_VALUE32, NULL != buff_out, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, NULL != buffin, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, NULL != out_size, return UNF_RETURN_ERROR); + + adm_pkg.msg_format = msg_formate; + adm_pkg.buff_in = buffin; + adm_pkg.buff_out = buff_out; + adm_pkg.in_size = in_size; + adm_pkg.out_size = out_size; + + if (msg_formate == HIFC_GET_DRIVER_VERSION) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Enter HIFC_GET_DRIVER_VERSION"); + + snprintf(ver_str, sizeof(ver_str), "%s %s", UNF_FC_VERSION, + __TIME_STR__); + + ver_info = (struct hifc_drv_version_s *)buff_out; + HIFC_CHECK(INVALID_VALUE32, + *out_size >= sizeof(struct hifc_drv_version_s), + return UNF_RETURN_ERROR); + memcpy(ver_info->ver, ver_str, sizeof(ver_str)); + + *(unsigned int *)out_size = sizeof(struct hifc_drv_version_s); + + return RETURN_OK; + } + + if (msg_formate == HIFC_COMPAT_TEST) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Enter driver compatibility test"); + /* UNF_TRUE: driver is compatible with hifcadm */ + *(unsigned char *)buff_out = UNF_TRUE; + *(unsigned int *)out_size = sizeof(unsigned char); + + return RETURN_OK; + } + + HIFC_CHECK(INVALID_VALUE32, NULL != uld_dev, return UNF_RETURN_ERROR); + hba = (struct hifc_hba_s *)uld_dev; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Enter hifc_adm, msg_formate(0x%x)", msg_formate); + + switch (msg_formate) { + case HIFC_DFX: + ret = hifc_set_dfx_mode((void *)hba, &adm_pkg); + break; + case HIFC_FEC_SET: + ret = (int)hifc_fec_mode((void *)hba, &adm_pkg); + break; + case HIFC_BBSCN: + ret = (int)hifc_bbscn_mode((void *)hba, &adm_pkg); + break; + + case HIFC_PORTSTAT: + ret = (int)hifc_port_stat((void *)hba, &adm_pkg); + break; + + case HIFC_ALL_INFO_OP: + ret = (int)hifc_port_info(&adm_pkg); + break; + + default: + UNF_LOWLEVEL_TO_CM_HINICADM(ret, hba->lport, &adm_pkg); + break; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Enter hifc_adm 0x%x", *adm_pkg.out_size); + + return ret; +} + diff --git a/drivers/scsi/huawei/hifc/hifc_portmng.h b/drivers/scsi/huawei/hifc/hifc_portmng.h new file mode 100644 index 0000000000000000000000000000000000000000..76e0884125dcc5ae8ecb1fe4509820236d338384 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_portmng.h @@ -0,0 +1,223 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef __HIFC_PORTMNG_H__ +#define __HIFC_PORTMNG_H__ + +#include "unf_common.h" +#include "hifc_module.h" +#include "hifc_hba.h" + +#define HIFC_PORT_INFO_SIZE 10 +#define HIFC_DFX_BACK_INFO_SIZE 406 +#define HIFC_DFX_BACK_INFO_SIZE64 203 +#define HIFC_GET_DRIVER_VERSION 16 +#define HIFC_SET_BBSCN_VALUE 0 +#define HIFC_QUERY_BBSCN_VALUE 1 +#define HIFC_QUERY_FEC_MODE 2 + +#define FC_DFX_SEND_INFO_SIZE 5 +#define FC_DFX_BACK_INFO_64 203 +#define FC_DFX_BACK_INFO_32 406 +#define FC_DFX_MAX_IO_RETURN_VALUE 0x12 +#define FC_DFX_MAX_SCSI_CMD 0xFF +#define FC_DFX_SCSI_CMD_FIRST_GET 100 + +struct unf_adm_dfx_session_state { + unsigned char session1 : 4; + unsigned char session2 : 4; +}; + +struct session_counter_s { + u64 target_busy; + u64 host_busy; + u64 remote_port_wwpn; + u64 local_port_wwpn; + u32 device_alloc; + u32 device_destroy; + u32 scsi_state; + u32 remote_port_nportid; + u32 remote_port_state; + u32 remote_port_scsiid; + u32 remote_port_index; + u32 local_port_nportid; + u32 local_port_ini_state; + u32 local_port_state; + u32 port_id; + u32 host_id; + u32 target_id; + u32 abort_io; + u32 device_reset; + u32 target_reset; + u32 bus_reset; + u32 virtual_reset; + u32 abort_io_result; + u32 device_reset_result; + u32 target_reset_result; + u32 bus_reset_result; + u32 virtual_reset_result; +}; + +enum hifc_adm_msg_status_e { + HIFC_ADM_MSG_DONE = 0, + HIFC_ADM_MSG_INCOMPLETE, + HIFC_ADM_MSG_FAILED, + HIFC_ADM_MSG_BUTT +}; + +struct hifc_port_diag_op_s { + enum unf_port_diag_op_e op_code; + unsigned int (*pfn_hifc_operation)(void *v_hba, void *v_para); +}; + +enum hifc_adm_dfx_mod_e { + /* HBA WQE and SCQE statistic */ + HIFC_TX_RX_STATE_COUNTER = 0, + /* TX and RX error counter, HBA counter */ + HIFC_TX_RX_ERROR_STATE_COUNTER, + /* SCQ, AEQ, uP, common uP error counter */ + HIFC_ERROR_STATE_COUNTER, + /* Link state counter */ + HIFC_LINK_STATE_COUNTER, + /* Host counter */ + HIFC_HOST_COUNTER, + /* session counter */ + HIFC_SESSION_COUNTER, + /* DIF error counter */ + HIFC_DIF_ERROR_COUNTER, + HIFC_ALL_DFX_TYPE = 50, +}; + +enum hifc_msg_format_e { + HIFC_DFX = 7, + HIFC_FEC_SET, + HIFC_BBSCN, + HIFC_PORTSTAT = 24, + HIFC_ALL_INFO_OP = 25, + HIFC_COMPAT_TEST = 0xFF +}; + +struct hifc_adm_msg_head_s { + unsigned int size; + unsigned short status; + unsigned short rsvd; +}; + +/* port state for fc_portstat */ +struct hifc_adm_port_state { + unsigned int port_id; + unsigned int rport_num; + unsigned int init; + unsigned int offloading; + unsigned int offloaded; + unsigned int destroying; +}; + +/* SQ & IoStat for fc_portstat */ +struct hifc_adm_sq { + unsigned int sq_id; + unsigned int rport_index; + unsigned int xid; + unsigned int cid; + unsigned int sid; + unsigned int did; + unsigned int vpid; + unsigned int cmd_local_queue_id; + unsigned int cmd_cqm_queue_id; + unsigned int sts_local_queue_id; + unsigned int sts_cqm_queue_id; + unsigned int cos; + unsigned int off_load; + unsigned int cmsn; + unsigned int pmsn; + unsigned int db_cnt; + unsigned int sqe_cnt; + unsigned int cqe_cnt; + unsigned int in_sq_cnt; + unsigned int in_chip_cnt; +}; + +/* hifcadm fc_portstat struct,that is used to show ListSqinfo from mml */ +struct hifc_adm_lsq_info_s { + struct hifc_adm_msg_head_s msg_head; + unsigned int cmd[HIFC_PORT_INFO_SIZE]; + struct hifc_adm_port_state port_state; + struct hifc_adm_sq sq; + unsigned int mark; +}; + +struct unf_adm_dfx_host_counter_s { + unsigned int host_num; + unsigned int port_id; + unsigned int scsi_session_add_success; + unsigned int scsi_session_add_failed; + unsigned int scsi_session_del_success; + unsigned int scsi_session_del_failed; + unsigned int device_alloc; + unsigned int device_destroy; + unsigned int session_loss_tmo; + unsigned int alloc_scsi_id; + unsigned int reuse_scsi_id; + unsigned int resume_scsi_id; + unsigned int add_start_work_failed; + unsigned int add_closing_work_failed; + unsigned int abort_io; + unsigned int device_reset; + unsigned int target_reset; + unsigned int bus_reset; + unsigned int virtual_reset; + unsigned int abort_io_result; + unsigned int device_reset_result; + unsigned int target_reset_result; + unsigned int bus_reset_result; + unsigned int virtual_reset_result; + struct unf_adm_dfx_session_state session_state[1024]; +}; + +/* hifcadm fc_port struct */ +struct hifc_adm_cmd_s { + struct hifc_adm_msg_head_s msg_head; + unsigned int cmd[HIFC_PORT_INFO_SIZE]; +}; + +/* hifcadm fc_dfx struct */ +struct hifc_adm_dfx_cmd_s { + struct hifc_adm_msg_head_s msg_head; + unsigned int cmd[HIFC_PORT_INFO_SIZE]; + union { + unsigned long long result[HIFC_DFX_BACK_INFO_SIZE64]; + struct unf_adm_dfx_host_counter_s host_cnt; + struct session_counter_s session_cnt; + unsigned long long scsi_cmd_in; + unsigned long long scsi_cmd_done; + unsigned long long target_busy; + unsigned long long host_busy; + } unresult; +}; + +unsigned int hifc_port_diagnose(void *v_hba, enum unf_port_diag_op_e op_code, + void *v_para); +unsigned int hifc_set_port_speed(void *v_hba, void *v_para_in); +unsigned int hifc_set_port_bbscn(void *v_hba, void *v_para_in); +unsigned int hifc_set_port_state(void *v_hba, void *v_para_in); +unsigned int hifc_set_port_topo(void *v_hba, void *v_para_in); +unsigned int hifc_set_port_fcp_conf(void *v_hba, void *v_para_in); +unsigned int hifc_set_loop_role(void *v_hba, void *v_para_in); +unsigned int hifc_set_max_support_speed(void *v_hba, void *v_para_in); +unsigned int hifc_show_fc_port_detail(void *v_hba, void *v_para); +int hifc_adm(void *uld_dev, unsigned int msg_formate, void *buffin, + unsigned int in_size, void *buff_out, unsigned int *out_size); +unsigned int hifc_fec_mode(void *v_hba, struct unf_hinicam_pkg *v_input); +int hifc_set_dfx_mode(void *v_hba, struct unf_hinicam_pkg *v_input); +int hifc_dfx_get_link_state(void *v_hba, void *v_buff_out); +int hifc_dfx_get_error_state(void *v_hba, void *v_buff_out); +int hifc_dfx_get_rxtx_state(void *v_hba, void *v_buff_out); +unsigned int hifc_bbscn_mode(void *v_hba, struct unf_hinicam_pkg *v_input); +unsigned int hifc_port_stat(void *v_hba, struct unf_hinicam_pkg *v_input); +int hifc_dfx_dif_error(void *v_hba, void *v_buff_out, unsigned int v_clear); +unsigned int hifc_set_hba_base_info(void *v_hba, void *v_para_in); + +#endif /* __HIFC_PORTMNG_H__ */ diff --git a/drivers/scsi/huawei/hifc/hifc_queue.c b/drivers/scsi/huawei/hifc/hifc_queue.c new file mode 100644 index 0000000000000000000000000000000000000000..2c932d26bf901378b534c6fdf75b54d01ba03f14 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_queue.c @@ -0,0 +1,7020 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#include "unf_log.h" +#include "unf_common.h" +#include "hifc_queue.h" +#include "hifc_module.h" +#include "hifc_wqe.h" +#include "hifc_service.h" +#include "hifc_chipitf.h" +#include "hifc_cqm_object.h" +#include "hifc_cqm_main.h" + +#define HIFC_UCODE_CMD_MODIFY_QUEUE_CONTEXT 0 + +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) +#define HIFC_DONE_MASK (0x00000001) +#else +#define HIFC_DONE_MASK (0x01000000) +#endif +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) +#define HIFC_OWNER_MASK (0x80000000) +#else +#define HIFC_OWNER_MASK (0x00000080) +#endif +#define HIFC_SQ_LINK_PRE (1 << 2) + +#define HIFC_SQ_HEADER_ADDR_ALIGN_SIZE (64) +#define HIFC_SQ_HEADER_ADDR_ALIGN_SIZE_MASK (HIFC_SQ_HEADER_ADDR_ALIGN_SIZE - 1) + +#define HIFC_ADDR_64_ALIGN(addr)\ + (((addr) + (HIFC_SQ_HEADER_ADDR_ALIGN_SIZE_MASK)) &\ + ~(HIFC_SQ_HEADER_ADDR_ALIGN_SIZE_MASK)) + +static unsigned int hifc_get_parity_value(unsigned long long *v_src_data, + unsigned int v_row, + unsigned int v_column) +{ + unsigned int i = 0; + unsigned int j = 0; + unsigned int offset = 0; + unsigned int group = 0; + unsigned int bit_offset = 0; + unsigned int bit_val = 0; + unsigned int tmp_val = 0; + unsigned int dest_data = 0; + + for (i = 0; i < v_row; i++) { + for (j = 0; j < v_column; j++) { + offset = (v_row * j + i); + group = offset / (sizeof(v_src_data[0]) * 8); + bit_offset = offset % (sizeof(v_src_data[0]) * 8); + tmp_val = (v_src_data[group] >> bit_offset) & 0x1; + + if (j == 0) { + bit_val = tmp_val; + continue; + } + + bit_val ^= tmp_val; + } + + bit_val = (~bit_val) & 0x1; + + dest_data |= (bit_val << i); + } + + return dest_data; +} + +/** + * hifc_update_producer_info - update producer pi and obit value + * @q_depth: queue max depth + * @v_pi: pi vaue after updated queue + * @v_owner: owner vaue after updated queue + */ +static void hifc_update_producer_info(unsigned short q_depth, + unsigned short *v_pi, + unsigned short *v_owner) +{ + unsigned short cur_pi = 0; + unsigned short next_pi = 0; + unsigned short owner = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_pi, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_owner, return); + + cur_pi = *v_pi; + next_pi = cur_pi + 1; + + if (next_pi < q_depth) { + *v_pi = next_pi; + } else { + /* PI reversal */ + *v_pi = 0; + + /* obit reversal */ + owner = *v_owner; + *v_owner = !owner; + } +} + +/** + * hifc_update_consumer_info - update consumer ci and obit value + * @q_depth: queue max deppth + * @v_ci: ci vaue after updated queue + * @v_owner: owner vaue after updated queue + */ +static void hifc_update_consumer_info(unsigned short q_depth, + unsigned short *v_ci, + unsigned short *v_owner) +{ + unsigned short cur_ci = 0; + unsigned short next_ci = 0; + unsigned short owner = 0; + + cur_ci = *v_ci; + next_ci = cur_ci + 1; + + if (next_ci < q_depth) { + *v_ci = next_ci; + } else { + /* CI reversal */ + *v_ci = 0; + + /* obit reversal */ + owner = *v_owner; + *v_owner = !owner; + } +} + +static inline void hifc_update_cq_header(struct hifc_ci_record_s *v_ci_record, + unsigned short ci, + unsigned short owner) +{ + unsigned int size = 0; + struct hifc_ci_record_s ci_record = { 0 }; + + size = sizeof(struct hifc_ci_record_s); + memcpy(&ci_record, v_ci_record, size); + hifc_big_to_cpu64(&ci_record, size); + + ci_record.cmsn = ci + + (unsigned short)(owner << HIFC_CQ_HEADER_OWNER_SHIFT); + ci_record.dump_cmsn = ci_record.cmsn; + hifc_cpu_to_big64(&ci_record, size); + + wmb(); + memcpy(v_ci_record, &ci_record, size); +} + +static void hifc_update_srq_header(struct hifc_db_record *v_pmsn_record, + unsigned short pmsn) +{ + unsigned int size = 0; + struct hifc_db_record pmsn_record = { 0 }; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_pmsn_record, return); + + size = sizeof(struct hifc_db_record); + memcpy(&pmsn_record, v_pmsn_record, size); + hifc_big_to_cpu64(&pmsn_record, size); + + pmsn_record.pmsn = pmsn; + pmsn_record.dump_pmsn = pmsn_record.pmsn; + hifc_cpu_to_big64(&pmsn_record, sizeof(struct hifc_db_record)); + + wmb(); + memcpy(v_pmsn_record, &pmsn_record, size); +} + +static unsigned int hifc_alloc_root_sq_info( + struct hifc_root_info_s *v_root_info) +{ + unsigned int sq_info_size = 0; + struct hifc_root_sq_info_s *root_sq_info = NULL; + + sq_info_size = (unsigned int) + (sizeof(struct hifc_root_sq_info_s) * v_root_info->sq_num); + root_sq_info = (struct hifc_root_sq_info_s *)kmalloc(sq_info_size, + GFP_ATOMIC); + if (!root_sq_info) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Allocate Root SQ(s) failed"); + + return UNF_RETURN_ERROR; + } + + memset(root_sq_info, 0, sq_info_size); + v_root_info->sq_info = root_sq_info; + + return RETURN_OK; +} + +static void hifc_free_root_sq_info(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index; + struct hifc_root_sq_info_s *sq_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_root_info, return); + + for (q_index = 0; q_index < v_root_info->sq_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(v_root_info->sq_info) + + q_index; + UNF_REFERNCE_VAR(sq_info); + } + kfree(v_root_info->sq_info); + v_root_info->sq_info = NULL; +} + +static void hifc_init_root_sq_base_info(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + unsigned short global_base_qpn = 0; + unsigned short max_sq_num = 0; + struct hifc_root_sq_info_s *sq_info = NULL; + struct hifc_hba_s *hba = NULL; + + hba = (struct hifc_hba_s *)v_root_info->phba; + global_base_qpn = hifc_get_global_base_qpn(hba->hw_dev_handle); + max_sq_num = hifc_func_max_qnum(hba->hw_dev_handle); + + for (q_index = 0; q_index < v_root_info->sq_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(v_root_info->sq_info) + + q_index; + sq_info->qid = (unsigned short)q_index; + sq_info->max_qnum = max_sq_num; + spin_lock_init(&sq_info->root_sq_spin_lock); + sq_info->q_depth = HIFC_ROOT_SQ_DEPTH; + sq_info->wqe_bb_size = HIFC_ROOT_SQ_WQEBB; + sq_info->root_info = v_root_info; + sq_info->global_qpn = global_base_qpn + q_index; + sq_info->owner = HIFC_ROOT_SQ_LOOP_OWNER; + sq_info->in_flush = UNF_FALSE; + } +} + +static unsigned int hifc_alloc_root_sq_ci_addr( + struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + unsigned int ci_addr_size = 0; + unsigned int ci_addr_offset = 0; + struct hifc_hba_s *hba = NULL; + struct hifc_root_sq_info_s *sq_info = NULL; + + /* Alignment with 4 Bytes */ + ci_addr_size = HIFC_ROOT_SQ_CI_TABLE_STEP_BYTE * v_root_info->sq_num; + hba = (struct hifc_hba_s *)v_root_info->phba; + + v_root_info->virt_sq_ci_table_buff = dma_alloc_coherent( + &hba->pci_dev->dev, + ci_addr_size, + &v_root_info->sq_ci_table_dma, + GFP_KERNEL); + if (!v_root_info->virt_sq_ci_table_buff) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Allocate Root SQ CI table failed"); + + return UNF_RETURN_ERROR; + } + memset(v_root_info->virt_sq_ci_table_buff, 0, ci_addr_size); + v_root_info->sq_ci_table_size = ci_addr_size; + + for (q_index = 0; q_index < v_root_info->sq_num; q_index++) { + ci_addr_offset = q_index * HIFC_ROOT_SQ_CI_TABLE_STEP_BYTE; + sq_info = (struct hifc_root_sq_info_s *)(v_root_info->sq_info) + + q_index; + sq_info->ci_addr = (unsigned short *) + ((void *) + (((unsigned char *)v_root_info->virt_sq_ci_table_buff) + + ci_addr_offset)); + sq_info->ci_dma_addr = v_root_info->sq_ci_table_dma + + ci_addr_offset; + } + + return RETURN_OK; +} + +static void hifc_free_root_sq_ci_addr(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + struct hifc_root_info_s *root_info = NULL; + struct hifc_hba_s *hba = NULL; + struct hifc_root_sq_info_s *sq_info = NULL; + + root_info = v_root_info; + hba = (struct hifc_hba_s *)root_info->phba; + dma_free_coherent(&hba->pci_dev->dev, root_info->sq_ci_table_size, + root_info->virt_sq_ci_table_buff, + root_info->sq_ci_table_dma); + root_info->virt_sq_ci_table_buff = NULL; + root_info->sq_ci_table_dma = 0; + + for (q_index = 0; q_index < root_info->sq_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info) + + q_index; + sq_info->ci_addr = NULL; + sq_info->ci_dma_addr = 0; + } +} + +static unsigned int hifc_alloc_root_sq_buff( + struct hifc_root_info_s *v_root_info) +{ + int ret = 0; + unsigned int q_index = 0; + unsigned int back_q_num = 0; + struct hifc_root_sq_info_s *sq_info = NULL; + struct hifc_root_info_s *root_info = NULL; + struct hifc_hba_s *hba = NULL; + + root_info = v_root_info; + hba = (struct hifc_hba_s *)(root_info->phba); + + for (q_index = 0; q_index < root_info->sq_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info) + + q_index; + + /* Wqe_Base_Size:64; Depth:2048; Page_Size:4096 */ + ret = hifc_slq_alloc(hba->hw_dev_handle, sq_info->wqe_bb_size, + sq_info->q_depth, (u16)PAGE_SIZE, + (u64 *)&sq_info->cla_addr, + &sq_info->sq_handle); + if ((ret != 0) || (!sq_info->sq_handle) || + (sq_info->cla_addr == 0)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[err]Port(0x%x) slq_allocate Root SQ WQE buffer failed, SQ index = %u, return %u", + hba->port_cfg.port_id, q_index, ret); + + goto free_sq_wqe_buff; + } + } + + return RETURN_OK; + +free_sq_wqe_buff: + back_q_num = q_index; + + for (q_index = 0; q_index < back_q_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info) + + q_index; + hifc_slq_free(hba->hw_dev_handle, sq_info->sq_handle); + sq_info->sq_handle = NULL; + sq_info->cla_addr = 0; + } + + return UNF_RETURN_ERROR; +} + +static void hifc_free_root_sq_buff(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + struct hifc_root_sq_info_s *sq_info = NULL; + struct hifc_root_info_s *root_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_root_info, return); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)(root_info->phba); + + for (q_index = 0; q_index < root_info->sq_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info) + + q_index; + hifc_slq_free(hba->hw_dev_handle, sq_info->sq_handle); + sq_info->sq_handle = NULL; + sq_info->cla_addr = 0; + } +} + +irqreturn_t hifc_root_sq_irq(int v_irq, void *v_sq_info) +{ + struct hifc_root_sq_info_s *sq_info = NULL; + unsigned short cur_ci = 0; + static unsigned int enter_num; + + enter_num++; + sq_info = (struct hifc_root_sq_info_s *)v_sq_info; + + cur_ci = *sq_info->ci_addr; + cur_ci = be16_to_cpu(cur_ci); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[event]Root SQ Irq Enter Num is %u, Root SQ Ci is %u", + enter_num, cur_ci); + HIFC_REFERNCE_VAR(enter_num, INVALID_VALUE32, IRQ_HANDLED) + HIFC_REFERNCE_VAR(cur_ci, INVALID_VALUE16, IRQ_HANDLED) + + return IRQ_HANDLED; +} + +/* + * hifc_alloc_root_sq_int - Allocate interrupt resources in Root SQ, and + * register callback function. + * @v_root_info: root sq struct info + * @Return: 0 - success, negative - failure + */ +static unsigned int hifc_alloc_root_sq_int(struct hifc_root_info_s *v_root_info) +{ + int ret = UNF_RETURN_ERROR_S32; + unsigned int q_index = 0; + unsigned int cfg_num = 0; + unsigned short act_num = 0; + struct irq_info irq_info; + struct hifc_root_sq_info_s *sq_info = NULL; + struct hifc_root_info_s *root_info = NULL; + struct hifc_hba_s *hba = NULL; + + root_info = v_root_info; + hba = (struct hifc_hba_s *)(root_info->phba); + + for (q_index = 0; q_index < root_info->sq_num; q_index++) { + ret = hifc_alloc_irqs(hba->hw_dev_handle, SERVICE_T_FC, + HIFC_INT_NUM_PER_QUEUE, &irq_info, + &act_num); + if ((ret != RETURN_OK) || + (act_num != HIFC_INT_NUM_PER_QUEUE)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[err]cfg_alloc_irqs Root SQ irq failed, SQ Index = 0x%x, return 0x%x", + q_index, ret); + + goto free_irq; + } + + if (irq_info.msix_entry_idx >= HIFC_ROOT_Q_INT_ID_MAX) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]cfg_alloc_irqs Root SQ irq id exceed 1024, msix_entry_idx 0x%x", + irq_info.msix_entry_idx); + + hifc_free_irq(hba->hw_dev_handle, SERVICE_T_FC, + irq_info.irq_id); + goto free_irq; + } + + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info) + + q_index; + sq_info->irq_id = (unsigned int)(irq_info.irq_id); + sq_info->msix_entry_idx = (unsigned short) + (irq_info.msix_entry_idx); + + ret = snprintf(sq_info->irq_name, HIFC_IRQ_NAME_MAX - 1, + "Root SQ 0x%x", q_index); + UNF_FUNCTION_RETURN_CHECK(ret, HIFC_IRQ_NAME_MAX - 1); + ret = request_irq(sq_info->irq_id, hifc_root_sq_irq, 0UL, + sq_info->irq_name, sq_info); + hifc_set_msix_state(hba->hw_dev_handle, sq_info->msix_entry_idx, + HIFC_MSIX_ENABLE); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[err]UNF_OS_REQUEST_IRQ Root SQ irq failed, SQ Index = 0x%x, return 0x%x", + q_index, ret); + + hifc_free_irq(hba->hw_dev_handle, SERVICE_T_FC, + sq_info->irq_id); + sq_info->irq_id = 0; + sq_info->msix_entry_idx = 0; + goto free_irq; + } + } + + return RETURN_OK; + +free_irq: + cfg_num = q_index; + + for (q_index = 0; q_index < cfg_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info) + + q_index; + + free_irq(sq_info->irq_id, sq_info); + hifc_free_irq(hba->hw_dev_handle, SERVICE_T_FC, + sq_info->irq_id); + sq_info->irq_id = 0; + sq_info->msix_entry_idx = 0; + } + + return UNF_RETURN_ERROR; +} + +static void hifc_free_root_sq_int(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + struct hifc_root_sq_info_s *sq_info = NULL; + struct hifc_root_info_s *root_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_root_info, return); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)(root_info->phba); + + for (q_index = 0; q_index < root_info->sq_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info) + + q_index; + hifc_set_msix_state(hba->hw_dev_handle, sq_info->msix_entry_idx, + HIFC_MSIX_DISABLE); + free_irq(sq_info->irq_id, sq_info); + hifc_free_irq(hba->hw_dev_handle, SERVICE_T_FC, + sq_info->irq_id); + sq_info->irq_id = 0; + sq_info->msix_entry_idx = 0; + } +} + +/* + * hifc_cfg_root_sq_ci_tbl - Configure CI address in SQ and interrupt number. + * @v_root_info: root queue info + * @Return: 0 - success, negative - failure + */ +static unsigned int hifc_cfg_root_sq_ci_tbl( + struct hifc_root_info_s *v_root_info) +{ + int ret = 0; + unsigned int queue_index = 0; + dma_addr_t ci_dma_addr = 0; + struct hifc_sq_attr sq_ci_attr; + struct hifc_root_sq_info_s *sq_info = NULL; + void *handle = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, + return UNF_RETURN_ERROR); + + handle = ((struct hifc_hba_s *)v_root_info->phba)->hw_dev_handle; + + for (queue_index = 0; queue_index < v_root_info->sq_num; + queue_index++) { + /* Sync CI addr to hw, cfg attribute table format */ + memset(&sq_ci_attr, 0, sizeof(struct hifc_sq_attr)); + sq_info = (struct hifc_root_sq_info_s *)v_root_info->sq_info + + queue_index; + + sq_ci_attr.dma_attr_off = 0; + sq_ci_attr.pending_limit = 0; + sq_ci_attr.coalescing_time = 0; + sq_ci_attr.intr_en = HIFC_INT_ENABLE; + sq_ci_attr.intr_idx = sq_info->msix_entry_idx; + sq_ci_attr.l2nic_sqn = queue_index; + ci_dma_addr = HIFC_GET_ROOT_SQ_CI_ADDR(sq_info->ci_dma_addr, + queue_index); + sq_ci_attr.ci_dma_base = ci_dma_addr >> + HIFC_ROOT_SQ_CI_ATTRIBUTE_ADDRESS_SHIFT; + + /* Little endian used in UP */ + ret = hifc_set_ci_table(handle, sq_info->qid, &sq_ci_attr); + if (ret != 0) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]hifc_set_ci_table failed, return %d", + ret); + + return UNF_RETURN_ERROR; + } + } + + return RETURN_OK; +} + +/** + * hifc_alloc_root_sq_db - Allocate Doorbell buffer in root SQ + * @v_root_info: root queue struct info + * @Return: 0 - success, negative - failure + */ +static unsigned int hifc_alloc_root_sq_db(struct hifc_root_info_s *v_root_info) +{ + int ret = UNF_RETURN_ERROR_S32; + unsigned int q_index = 0; + unsigned int cfg_num = 0; + struct hifc_root_sq_info_s *sq_info = NULL; + struct hifc_root_info_s *root_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, + return UNF_RETURN_ERROR); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)(root_info->phba); + + for (q_index = 0; q_index < root_info->sq_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info) + + q_index; + + ret = hifc_alloc_db_addr(hba->hw_dev_handle, + &sq_info->normal_db.virt_map_addr, + NULL); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[err]Allocate Root SQ DB address failed, SQ Index = %u, return %d", + q_index, ret); + + goto free_buff; + } + + if (!sq_info->normal_db.virt_map_addr) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[err]virt_map_addr is invalid, SQ Index = %u", + q_index); + + goto free_buff; + } + } + + return RETURN_OK; + +free_buff: + cfg_num = q_index; + + for (q_index = 0; q_index < cfg_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info) + + q_index; + + hifc_free_db_addr(hba->hw_dev_handle, + sq_info->normal_db.virt_map_addr, NULL); + sq_info->normal_db.virt_map_addr = NULL; + sq_info->normal_db.phy_addr = 0; + } + + return UNF_RETURN_ERROR; +} + +static void hifc_afree_root_sq_db(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + struct hifc_root_sq_info_s *sq_info = NULL; + struct hifc_root_info_s *root_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_root_info, return); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)(root_info->phba); + + for (q_index = 0; q_index < root_info->sq_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info) + + q_index; + + hifc_free_db_addr(hba->hw_dev_handle, + sq_info->normal_db.virt_map_addr, NULL); + sq_info->normal_db.virt_map_addr = NULL; + + sq_info->normal_db.phy_addr = 0; + } +} + +static void hifc_assemble_root_sq_ctx(unsigned int cmd_sq_num, + struct hifc_root_sq_info_s *v_sq_info, + void *v_buf) +{ + unsigned int q_index = 0; + unsigned long long ci_init_addr = 0; + struct hifc_root_sq_info_s *sq_info = NULL; + + struct hifc_qp_ctxt_header *cmdq_header = NULL; + struct hifc_sq_ctxt *sq_ctx = NULL; + struct hifc_sq_ctxt_block *sq_ctx_block = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_sq_info, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_buf, return); + + sq_info = v_sq_info; + sq_ctx_block = (struct hifc_sq_ctxt_block *)v_buf; + cmdq_header = &sq_ctx_block->cmdq_hdr; + + /* CMD header initialization */ + cmdq_header->num_queues = (unsigned short)cmd_sq_num; + cmdq_header->queue_type = HIFC_CMDQ_QUEUE_TYPE_SQ; + cmdq_header->addr_offset = HIFC_ROOT_SQ_CTX_OFFSET(sq_info->max_qnum, + sq_info->qid); + + /* CMD Header convert to big endian */ + hifc_cpu_to_big32(cmdq_header, sizeof(struct hifc_qp_ctxt_header)); + + for (q_index = 0; q_index < cmd_sq_num; q_index++) { + sq_info = v_sq_info + q_index; + sq_ctx = &sq_ctx_block->sq_ctx[q_index]; + memset(sq_ctx, 0, sizeof(struct hifc_sq_ctxt)); + + sq_ctx->sq_ctx_dw0.global_sq_id = sq_info->global_qpn; + sq_ctx->sq_ctx_dw0.ceq_num = 0; + sq_ctx->sq_ctx_dw1.owner = HIFC_ROOT_SQ_LOOP_OWNER; + + ci_init_addr = hifc_slq_get_first_pageaddr(sq_info->sq_handle); + + sq_ctx->sq_ctx_dw2.ci_wqe_page_addr_hi = + HIFC_CI_WQE_PAGE_HIGH_ADDR(ci_init_addr); + sq_ctx->ci_wqe_page_addr_lo = + HIFC_CI_WQE_PAGE_LOW_ADDR(ci_init_addr); + sq_ctx->sq_ctx_dw4.prefetch_min = + HIFC_ROOT_CTX_WQE_PREFETCH_MIN; + sq_ctx->sq_ctx_dw4.prefetch_max = + HIFC_ROOT_CTX_WQE_PREFETCH_MAX; + sq_ctx->sq_ctx_dw4.prefetch_cache_threshold = + HIFC_ROOT_CTX_WQE_PRERETCH_THRESHOLD; + sq_ctx->sq_ctx_dw5.prefetch_owner = HIFC_ROOT_SQ_LOOP_OWNER; + sq_ctx->sq_ctx_dw6.prefetch_ci_wqe_addr_hi = + HIFC_CI_WQE_PAGE_HIGH_ADDR(ci_init_addr); + sq_ctx->prefetch_ci_wqe_addr_lo = + HIFC_CI_WQE_PAGE_LOW_ADDR(ci_init_addr); + sq_ctx->sq_ctx_dw10.cla_addr_hi = + HIFC_CLA_HIGH_ADDR(sq_info->cla_addr); + sq_ctx->cla_addr_lo = HIFC_CLA_LOW_ADDR(sq_info->cla_addr); + + /* big-little endian convert */ + hifc_cpu_to_big32(sq_ctx, sizeof(struct hifc_sq_ctxt)); + } +} + +static unsigned int hifc_cfg_root_sq_ctx(unsigned int cmd_sq_num, + void *v_handle, + struct hifc_cmd_buf *v_chipif_cmd_buff) +{ + int ret = 0; + unsigned short buff_used_size = 0; + unsigned int time_out = 0xF0000000; + unsigned long long uc_return = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_handle, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_chipif_cmd_buff, + return UNF_RETURN_ERROR); + + UNF_REFERNCE_VAR(uc_return); + UNF_REFERNCE_VAR(time_out); + UNF_REFERNCE_VAR(ret); + + buff_used_size = (unsigned short)(sizeof(struct hifc_qp_ctxt_header) + + sizeof(struct hifc_sq_ctxt) * cmd_sq_num); + v_chipif_cmd_buff->size = buff_used_size; + + ret = hifc_cmdq_direct_resp(v_handle, + HIFC_ACK_TYPE_CMDQ, + HIFC_MOD_L2NIC, + HIFC_UCODE_CMD_MODIFY_QUEUE_CONTEXT, + v_chipif_cmd_buff, + (u64 *)&uc_return, + time_out); + if ((ret != RETURN_OK) || (uc_return != RETURN_OK)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]chipif_cmd_to_ucode_imm failed, uiret %d, ullUcRet %llu", + ret, uc_return); + + return UNF_RETURN_ERROR; + } + + return RETURN_OK; +} + +static unsigned int hifc_calc_cmd_sq_num(unsigned int remain_sq_num) +{ + unsigned int sq_num = 0; + + if (remain_sq_num < HIFC_ROOT_CFG_SQ_NUM_MAX) + sq_num = remain_sq_num; + else + sq_num = HIFC_ROOT_CFG_SQ_NUM_MAX; + + return sq_num; +} + +static unsigned int hifc_init_root_sq_ctx(struct hifc_root_info_s *v_root_info) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int cmd_sq_num = 0; + unsigned int remain_sq_num = 0; + struct hifc_hba_s *hba = NULL; + struct hifc_root_sq_info_s *sq_info = NULL; + struct hifc_root_info_s *root_info = NULL; + struct hifc_cmd_buf *chipif_cmd_buf = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, + return UNF_RETURN_ERROR); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)root_info->phba; + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info); + + chipif_cmd_buf = hifc_alloc_cmd_buf(hba->hw_dev_handle); + if (!chipif_cmd_buf) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]hifc_alloc_cmd_buf failed."); + + return ENOMEM; + } + + remain_sq_num = root_info->sq_num; + while (remain_sq_num > 0) { + cmd_sq_num = hifc_calc_cmd_sq_num(remain_sq_num); + remain_sq_num -= cmd_sq_num; + + /* Assemble root SQ context */ + hifc_assemble_root_sq_ctx(cmd_sq_num, sq_info, + chipif_cmd_buf->buf); + + /* Send via ucode */ + ret = hifc_cfg_root_sq_ctx(cmd_sq_num, hba->hw_dev_handle, + chipif_cmd_buf); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]hifc_cfg_root_sq_ctx failed, return %u", + ret); + break; + } + + sq_info = sq_info + cmd_sq_num; + } + + /* Release cmd buffer */ + hifc_free_cmd_buf(hba->hw_dev_handle, chipif_cmd_buf); + return ret; +} + +static unsigned int hifc_create_root_sqs(struct hifc_root_info_s *v_root_info) +{ + unsigned int ret = UNF_RETURN_ERROR; + /* 1. Allocate sqinfo */ + ret = hifc_alloc_root_sq_info(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]hifc_alloc_root_sq_info failed, return %u", + ret); + + return ret; + } + + /* 2. Initialize sqinfo */ + hifc_init_root_sq_base_info(v_root_info); + + /* 3. Apply SQ CI address */ + ret = hifc_alloc_root_sq_ci_addr(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]hifc_alloc_root_sq_ci_addr failed, return %u", + ret); + + goto free_sq_info; + } + + /* 4. Allocate SQ buffer */ + ret = hifc_alloc_root_sq_buff(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]hifc_alloc_root_sq_buff failed, return %u", + ret); + + goto free_sq_ci_addr; + } + + /* 5. Register SQ(s) interrupt */ + ret = hifc_alloc_root_sq_int(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]hifc_alloc_root_sq_int failed, return %u", + ret); + + goto free_root_sq_buff; + } + + /* 6. Configure CI address in SQ and interrupt number */ + ret = hifc_cfg_root_sq_ci_tbl(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_ERR, + "[warn]hifc_cfg_root_sq_ci_tbl failed, return %u", + ret); + + goto free_root_sq_int; + } + + /* 7. Allocate Doorbell buffer */ + ret = hifc_alloc_root_sq_db(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]hifc_alloc_root_sq_db failed, return %u", + ret); + + goto free_root_sq_int; + } + + /* 8. Initialize SQ context */ + ret = hifc_init_root_sq_ctx(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[warn]hifc_init_root_sq_ctx failed, return %u", + ret); + + goto free_db; + } + + return RETURN_OK; + +free_db: + hifc_afree_root_sq_db(v_root_info); + +free_root_sq_int: + hifc_free_root_sq_int(v_root_info); + +free_root_sq_buff: + hifc_free_root_sq_buff(v_root_info); + +free_sq_ci_addr: + hifc_free_root_sq_ci_addr(v_root_info); + +free_sq_info: + hifc_free_root_sq_info(v_root_info); + + return ret; +} + +static void hifc_destroy_root_sqs(struct hifc_root_info_s *v_root_info) +{ + /* Free DB resources */ + hifc_afree_root_sq_db(v_root_info); + + /* Free interrupt resources */ + hifc_free_root_sq_int(v_root_info); + + /* Free WQE buffers */ + hifc_free_root_sq_buff(v_root_info); + + /* Free CI address */ + hifc_free_root_sq_ci_addr(v_root_info); + + /* Free Root SQ struct */ + hifc_free_root_sq_info(v_root_info); +} + +static unsigned int hifc_alloc_root_rq_info( + struct hifc_root_info_s *v_root_info) +{ + unsigned int rq_info_size = 0; + struct hifc_root_rq_info_s *root_rq_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, + return UNF_RETURN_ERROR); + + rq_info_size = (unsigned int) + (sizeof(struct hifc_root_rq_info_s) * + v_root_info->rq_num); + root_rq_info = (struct hifc_root_rq_info_s *)kmalloc(rq_info_size, + GFP_ATOMIC); + if (!root_rq_info) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Allocate Root RQ(s) failed"); + + return UNF_RETURN_ERROR; + } + memset(root_rq_info, 0, rq_info_size); + + v_root_info->rq_info = root_rq_info; + + return RETURN_OK; +} + +static void hifc_free_root_rq_info(struct hifc_root_info_s *v_root_info) +{ + struct hifc_root_info_s *root_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_root_info, return); + + root_info = v_root_info; + kfree(root_info->rq_info); + root_info->rq_info = NULL; +} + +static void hifc_init_root_rq_basic_info(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + unsigned short global_base_qpn = 0; + unsigned short max_q_num = 0; + struct hifc_root_rq_info_s *rq_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_root_info, return); + + hba = (struct hifc_hba_s *)v_root_info->phba; + global_base_qpn = hifc_get_global_base_qpn(hba->hw_dev_handle); + max_q_num = hifc_func_max_qnum(hba->hw_dev_handle); + + for (q_index = 0; q_index < v_root_info->rq_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(v_root_info->rq_info) + + q_index; + rq_info->max_qnum = max_q_num; + rq_info->qid = (unsigned short)q_index; + rq_info->q_depth = HIFC_ROOT_RQ_DEPTH; + rq_info->wqe_bb_size = HIFC_ROOT_RQ_WQEBB; + rq_info->root_info = v_root_info; + rq_info->global_qpn = global_base_qpn + q_index; + rq_info->owner = HIFC_ROOT_RQ_LOOP_OWNER; + } +} + +static unsigned int hifc_alloc_root_rq_pi_addr( + struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + unsigned int pi_addr_size = 0; + unsigned int pi_addr_offset = 0; + struct hifc_hba_s *hba = NULL; + struct hifc_root_rq_info_s *rq_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, + return UNF_RETURN_ERROR); + + pi_addr_size = HIFC_ROOT_RQ_PI_TABLE_STEP_BYTE * v_root_info->rq_num; + hba = (struct hifc_hba_s *)v_root_info->phba; + + v_root_info->virt_rq_pi_table_buff = + dma_alloc_coherent(&hba->pci_dev->dev, pi_addr_size, + &v_root_info->rq_pi_table_dma, + GFP_KERNEL); + if (!v_root_info->virt_rq_pi_table_buff) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Allocate Root RQ PI table failed"); + + return UNF_RETURN_ERROR; + } + memset(v_root_info->virt_rq_pi_table_buff, 0, pi_addr_size); + v_root_info->rq_pi_table_size = pi_addr_size; + + for (q_index = 0; q_index < v_root_info->rq_num; q_index++) { + pi_addr_offset = q_index * HIFC_ROOT_RQ_PI_TABLE_STEP_BYTE; + rq_info = (struct hifc_root_rq_info_s *)(v_root_info->rq_info) + + q_index; + rq_info->pi_vir_addr = + (unsigned short *) + ((unsigned long long)v_root_info->virt_rq_pi_table_buff + + pi_addr_offset); + rq_info->pi_dma_addr = v_root_info->rq_pi_table_dma + + pi_addr_offset; + } + + return RETURN_OK; +} + +static void hifc_free_root_rq_pi_addr(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + struct hifc_root_info_s *root_info = NULL; + struct hifc_hba_s *hba = NULL; + struct hifc_root_rq_info_s *rq_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, return); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)root_info->phba; + dma_free_coherent(&hba->pci_dev->dev, root_info->rq_pi_table_size, + root_info->virt_rq_pi_table_buff, + root_info->rq_pi_table_dma); + root_info->virt_rq_pi_table_buff = NULL; + root_info->rq_pi_table_dma = 0; + + for (q_index = 0; q_index < root_info->rq_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + q_index; + rq_info->pi_vir_addr = NULL; + rq_info->pi_dma_addr = 0; + } +} + +static unsigned int hifc_alloc_root_rq_buff( + struct hifc_root_info_s *v_root_info) +{ + int ret = 0; + unsigned int q_index = 0; + unsigned int back_q_num = 0; + struct hifc_root_rq_info_s *rq_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, + return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)(v_root_info->phba); + + for (q_index = 0; q_index < v_root_info->rq_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(v_root_info->rq_info) + + q_index; + + /* Wqe_Base_Size:32; Depth:2048; Page_Size:4096 */ + ret = hifc_slq_alloc(hba->hw_dev_handle, rq_info->wqe_bb_size, + rq_info->q_depth, (u16)PAGE_SIZE, + (u64 *)&rq_info->ci_cla_tbl_addr, + &rq_info->rq_handle); + if ((ret != 0) || (!rq_info->rq_handle) || + (rq_info->ci_cla_tbl_addr == 0)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[warn]slq_allocate Root RQ Buffer failed, RQ Index = %u, return %u", + q_index, ret); + + goto free_rq_buff; + } + } + + return RETURN_OK; + +free_rq_buff: + back_q_num = q_index; + + for (q_index = 0; q_index < back_q_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(v_root_info->rq_info) + + q_index; + hifc_slq_free(hba->hw_dev_handle, rq_info->rq_handle); + rq_info->rq_handle = NULL; + rq_info->ci_cla_tbl_addr = 0; + } + + return UNF_RETURN_ERROR; +} + +static void hifc_free_root_rq_buff(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + struct hifc_root_rq_info_s *rq_info = NULL; + struct hifc_root_info_s *root_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_root_info, return); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)(root_info->phba); + + for (q_index = 0; q_index < root_info->rq_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + q_index; + hifc_slq_free(hba->hw_dev_handle, rq_info->rq_handle); + rq_info->rq_handle = NULL; + rq_info->ci_cla_tbl_addr = 0; + } +} + +irqreturn_t hifc_root_rq_irq(int v_irq, void *v_rq_info) +{ + HIFC_CHECK(INVALID_VALUE32, NULL != v_rq_info, return IRQ_NONE); + + tasklet_schedule(&((struct hifc_root_rq_info_s *)v_rq_info)->tasklet); + + return IRQ_HANDLED; +} + +static unsigned int hifc_alloc_root_rq_int(struct hifc_root_info_s *v_root_info) +{ + int ret = UNF_RETURN_ERROR_S32; + unsigned int q_index = 0; + unsigned int cfg_num = 0; + unsigned short act_num = 0; + struct irq_info irq_info; + struct hifc_root_rq_info_s *rq_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, + return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)(v_root_info->phba); + + for (q_index = 0; q_index < v_root_info->rq_num; q_index++) { + ret = hifc_alloc_irqs(hba->hw_dev_handle, SERVICE_T_FC, + HIFC_INT_NUM_PER_QUEUE, &irq_info, + &act_num); + if ((ret != RETURN_OK) || (act_num != HIFC_INT_NUM_PER_QUEUE)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[warn]cfg_alloc_irqs Root RQ irq failed, RQ Index = %u, return %d", + q_index, ret); + + goto free_irq; + } + + if (irq_info.msix_entry_idx >= HIFC_ROOT_Q_INT_ID_MAX) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[warn]cfg_alloc_irqs Root RQ irq id exceed 1024, msix_entry_idx %u", + irq_info.msix_entry_idx); + + hifc_free_irq(hba->hw_dev_handle, SERVICE_T_FC, + irq_info.irq_id); + goto free_irq; + } + + rq_info = (struct hifc_root_rq_info_s *)(v_root_info->rq_info) + + q_index; + rq_info->irq_id = (unsigned int)(irq_info.irq_id); + rq_info->msix_entry_idx = (unsigned short) + (irq_info.msix_entry_idx); + + ret = snprintf(rq_info->irq_name, HIFC_IRQ_NAME_MAX - 1, + "Root RQ %u", q_index); + UNF_FUNCTION_RETURN_CHECK(ret, HIFC_IRQ_NAME_MAX - 1); + + tasklet_init(&rq_info->tasklet, hifc_process_root_rqe, + (unsigned long)rq_info); + + ret = request_irq(rq_info->irq_id, hifc_root_rq_irq, 0UL, + rq_info->irq_name, rq_info); + hifc_set_msix_state(hba->hw_dev_handle, rq_info->msix_entry_idx, + HIFC_MSIX_ENABLE); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[warn]UNF_OS_REQUEST_IRQ Root RQ irq failed, RQ Index = %u, return %d", + q_index, ret); + + hifc_free_irq(hba->hw_dev_handle, SERVICE_T_FC, + rq_info->irq_id); + memset(rq_info->irq_name, 0, HIFC_IRQ_NAME_MAX); + rq_info->irq_id = 0; + rq_info->msix_entry_idx = 0; + goto free_irq; + } + } + + return RETURN_OK; + +free_irq: + cfg_num = q_index; + + for (q_index = 0; q_index < cfg_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(v_root_info->rq_info) + + q_index; + + free_irq(rq_info->irq_id, rq_info); + tasklet_kill(&rq_info->tasklet); + hifc_free_irq(hba->hw_dev_handle, SERVICE_T_FC, + rq_info->irq_id); + rq_info->irq_id = 0; + rq_info->msix_entry_idx = 0; + } + + return UNF_RETURN_ERROR; +} + +static void hifc_free_root_rq_int(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + struct hifc_root_rq_info_s *rq_info = NULL; + struct hifc_root_info_s *root_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_root_info, return); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)(root_info->phba); + + for (q_index = 0; q_index < root_info->rq_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + q_index; + hifc_set_msix_state(hba->hw_dev_handle, rq_info->msix_entry_idx, + HIFC_MSIX_DISABLE); + free_irq(rq_info->irq_id, rq_info); + tasklet_kill(&rq_info->tasklet); + hifc_free_irq(hba->hw_dev_handle, SERVICE_T_FC, + rq_info->irq_id); + rq_info->irq_id = 0; + rq_info->msix_entry_idx = 0; + } +} + +static unsigned int hifc_alloc_root_rq_completion_buff( + struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + unsigned int back_index = 0; + unsigned int rqc_buff_size = 0; + struct hifc_root_info_s *root_info = NULL; + struct hifc_root_rq_info_s *rq_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, + return UNF_RETURN_ERROR); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)(root_info->phba); + + for (q_index = 0; q_index < root_info->rq_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + q_index; + + /* 2048 * Size */ + rqc_buff_size = rq_info->q_depth * + sizeof(struct hifc_root_rq_complet_info_s); + rq_info->rq_completion_buff = dma_alloc_coherent( + &hba->pci_dev->dev, + rqc_buff_size, + &rq_info->rq_completion_dma, + GFP_KERNEL); + if (!rq_info->rq_completion_buff) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[warn]Allocate Root RQ completion buffer failed, RQ Index = %u.", + q_index); + + goto free_buff; + } + memset(rq_info->rq_completion_buff, 0, rqc_buff_size); + rq_info->rqc_buff_size = rqc_buff_size; + } + + return RETURN_OK; + +free_buff: + + back_index = q_index; + + for (q_index = 0; q_index < back_index; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + q_index; + dma_free_coherent(&hba->pci_dev->dev, rq_info->rqc_buff_size, + rq_info->rq_completion_buff, + rq_info->rq_completion_dma); + rq_info->rq_completion_buff = NULL; + rq_info->rq_completion_dma = 0; + rq_info->rqc_buff_size = 0; + } + + return UNF_RETURN_ERROR; +} + +static void hifc_free_root_rq_completion_buff( + struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + struct hifc_root_info_s *root_info = NULL; + struct hifc_root_rq_info_s *rq_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, return); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)(root_info->phba); + + for (q_index = 0; q_index < root_info->rq_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + q_index; + dma_free_coherent(&hba->pci_dev->dev, rq_info->rqc_buff_size, + rq_info->rq_completion_buff, + rq_info->rq_completion_dma); + rq_info->rq_completion_buff = NULL; + rq_info->rq_completion_dma = 0; + rq_info->rqc_buff_size = 0; + } +} + +static unsigned int hifc_alloc_root_rq_rcv_buff( + struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + unsigned int back_index = 0; + unsigned int rq_rcv_buff_size = 0; + struct hifc_root_info_s *root_info = NULL; + struct hifc_root_rq_info_s *rq_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, + return UNF_RETURN_ERROR); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)(root_info->phba); + + for (q_index = 0; q_index < root_info->rq_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + q_index; + + /* Depth(2048) * Buff_Size(2048) */ + rq_rcv_buff_size = rq_info->q_depth * + HIFC_ROOT_RQ_RECV_BUFF_SIZE; + rq_info->rq_rcv_buff = dma_alloc_coherent(&hba->pci_dev->dev, + rq_rcv_buff_size, + &rq_info->rq_rcv_dma, + GFP_KERNEL); + if (!rq_info->rq_rcv_buff) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[warn]Allocate Root RQ receive buffer failed, RQ index = %u", + q_index); + + goto free_buff; + } + memset(rq_info->rq_rcv_buff, 0, rq_rcv_buff_size); + rq_info->rq_rcv_buff_size = rq_rcv_buff_size; + } + + return RETURN_OK; + +free_buff: + + back_index = q_index; + + for (q_index = 0; q_index < back_index; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + q_index; + dma_free_coherent(&hba->pci_dev->dev, + rq_info->rq_rcv_buff_size, + rq_info->rq_rcv_buff, rq_info->rq_rcv_dma); + rq_info->rq_rcv_buff = NULL; + rq_info->rq_rcv_dma = 0; + rq_info->rq_rcv_buff_size = 0; + } + + return UNF_RETURN_ERROR; +} + +static void hifc_free_root_rq_rcv_buff(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + struct hifc_root_info_s *root_info = NULL; + struct hifc_root_rq_info_s *rq_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_root_info, return); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)(root_info->phba); + + for (q_index = 0; q_index < root_info->rq_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + q_index; + dma_free_coherent(&hba->pci_dev->dev, + rq_info->rq_rcv_buff_size, + rq_info->rq_rcv_buff, rq_info->rq_rcv_dma); + rq_info->rq_rcv_buff = NULL; + rq_info->rq_rcv_dma = 0; + rq_info->rq_rcv_buff_size = 0; + } +} + +static void hifc_init_root_rq_wqe(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + unsigned short wqe_index = 0; + unsigned int dma_offset = 0; + dma_addr_t rq_completion_dma = 0; + dma_addr_t rq_rcv_dma = 0; + struct nic_rq_wqe *rq_wqe = NULL; + struct nic_wqe_ctrl_sec *wqe_ctrl = NULL; + struct nic_rq_sge_sec *buff_sge = NULL; + struct nic_rq_bd_sec *rq_buff_bd = NULL; + struct hifc_root_info_s *root_info = NULL; + struct hifc_root_rq_info_s *rq_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, return); + + root_info = v_root_info; + + for (q_index = 0; q_index < root_info->rq_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + q_index; + + for (wqe_index = 0; wqe_index < rq_info->q_depth; wqe_index++) { + rq_wqe = (struct nic_rq_wqe *) + hifc_slq_get_addr(rq_info->rq_handle, + wqe_index); + if (!rq_wqe) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_REG_ATT, UNF_ERR, "[err]Get Rq Wqe failed"); + + return; + } + memset(rq_wqe, 0, sizeof(struct nic_rq_wqe)); + + /* Initialize ctrl section */ + wqe_ctrl = &rq_wqe->rq_wqe_ctrl_sec; + wqe_ctrl->bs.owner = HIFC_ROOT_RQ_LOOP_OWNER; + /* control section = 8 bytes */ + wqe_ctrl->bs.ctrl_sec_len = 1; + /* complete section = 16B for SGE */ + wqe_ctrl->bs.completion_sec_len = 2; + /* bd section = 8B */ + wqe_ctrl->bs.buf_desc_sec_len = 1; + wqe_ctrl->bs.cf = 1; /* use SGE */ + + /* Fill wqe receive information section */ + buff_sge = &rq_wqe->rx_sge; + dma_offset = sizeof(struct hifc_root_rq_complet_info_s) + * wqe_index; + rq_completion_dma = rq_info->rq_completion_dma + + dma_offset; + buff_sge->wb_addr_low = + HIFC_LOW_32_BITS(rq_completion_dma); + buff_sge->wb_addr_high = + HIFC_HIGH_32_BITS(rq_completion_dma); + buff_sge->bs0.length = + sizeof(struct hifc_root_rq_complet_info_s); + + /* Fill db */ + rq_buff_bd = &rq_wqe->pkt_buf_addr; + dma_offset = HIFC_ROOT_RQ_RECV_BUFF_SIZE * wqe_index; + rq_rcv_dma = rq_info->rq_rcv_dma + dma_offset; + rq_buff_bd->pkt_buf_addr_high = + HIFC_HIGH_32_BITS(rq_rcv_dma); + rq_buff_bd->pkt_buf_addr_low = + HIFC_LOW_32_BITS(rq_rcv_dma); + + /* big-little endian convert */ + hifc_cpu_to_big32((void *)rq_wqe, + sizeof(struct nic_rq_wqe)); + } + + rq_info->pi = rq_info->q_depth - 1; + rq_info->owner = HIFC_ROOT_RQ_LOOP_OWNER; + } +} + +static unsigned int hifc_calc_cmd_rq_num(unsigned int remain_rq_num) +{ + unsigned int ret = 0; + + if (remain_rq_num < HIFC_ROOT_CFG_RQ_NUM_MAX) + ret = remain_rq_num; + else + ret = HIFC_ROOT_CFG_RQ_NUM_MAX; + + return ret; +} + +static void hifc_assemble_root_rq_ctx(unsigned int cmd_rq_num, + struct hifc_root_rq_info_s *v_rq_info, + void *v_buf) +{ + unsigned int q_index = 0; + unsigned long long ci_init_addr = 0; + struct hifc_root_rq_info_s *rq_info = NULL; + struct hifc_qp_ctxt_header *cmdq_header = NULL; + struct hifc_rq_ctxt *rq_ctx = NULL; + struct hifc_rq_ctxt_block *rq_ctx_block = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_rq_info, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_buf, return); + + rq_info = v_rq_info; + rq_ctx_block = (struct hifc_rq_ctxt_block *)v_buf; + cmdq_header = &rq_ctx_block->cmdq_hdr; + + /* cmdheader initialization */ + cmdq_header->num_queues = (unsigned short)cmd_rq_num; + cmdq_header->queue_type = HIFC_CMDQ_QUEUE_TYPE_RQ; + cmdq_header->addr_offset = HIFC_ROOT_RQ_CTX_OFFSET(rq_info->max_qnum, + rq_info->qid); + + /* big-little endian convert */ + hifc_cpu_to_big32(cmdq_header, sizeof(struct hifc_qp_ctxt_header)); + + for (q_index = 0; q_index < cmd_rq_num; q_index++) { + rq_info = v_rq_info + q_index; + rq_ctx = &rq_ctx_block->rq_ctx[q_index]; + memset(rq_ctx, 0, sizeof(struct hifc_rq_ctxt)); + + rq_ctx->pi_gpa_hi = HIFC_HIGH_32_BITS(rq_info->pi_dma_addr); + rq_ctx->pi_gpa_lo = HIFC_LOW_32_BITS(rq_info->pi_dma_addr); + rq_ctx->bs2.ci = 0; + rq_ctx->bs0.pi = 0; + + rq_ctx->bs6.ci_cla_tbl_addr_hi = + HIFC_CLA_HIGH_ADDR(rq_info->ci_cla_tbl_addr); + rq_ctx->ci_cla_tbl_addr_lo = + HIFC_CLA_LOW_ADDR(rq_info->ci_cla_tbl_addr); + + ci_init_addr = hifc_slq_get_first_pageaddr(rq_info->rq_handle); + rq_ctx->bs2.ci_wqe_page_addr_hi = + HIFC_CI_WQE_PAGE_HIGH_ADDR(ci_init_addr); + rq_ctx->ci_wqe_page_addr_lo = + HIFC_CI_WQE_PAGE_LOW_ADDR(ci_init_addr); + + rq_ctx->bs.ceq_en = 0; + rq_ctx->bs.owner = HIFC_ROOT_RQ_LOOP_OWNER; + rq_ctx->bs0.int_num = rq_info->msix_entry_idx; + + rq_ctx->bs3.prefetch_cache_threshold = + HIFC_ROOT_CTX_WQE_PRERETCH_THRESHOLD; + rq_ctx->bs3.prefetch_max = HIFC_ROOT_CTX_WQE_PREFETCH_MAX; + rq_ctx->bs3.prefetch_min = HIFC_ROOT_CTX_WQE_PREFETCH_MIN; + rq_ctx->bs5.prefetch_ci_wqe_page_addr_hi = + rq_ctx->bs2.ci_wqe_page_addr_hi; + rq_ctx->prefetch_ci_wqe_page_addr_lo = + rq_ctx->ci_wqe_page_addr_lo; + + /* big-little endian convert */ + hifc_cpu_to_big32(rq_ctx, sizeof(struct hifc_rq_ctxt)); + } +} + +static unsigned int hifc_cfg_root_rq_ctx(unsigned int cmd_rq_num, + void *v_handle, + struct hifc_cmd_buf *v_chipif_cmd_buff) +{ + int ret = 0; + unsigned short buff_used_size = 0; + unsigned int time_out = 0xF0000000; + unsigned long long uc_return = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_handle, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_chipif_cmd_buff, + return UNF_RETURN_ERROR); + + UNF_REFERNCE_VAR(uc_return); + UNF_REFERNCE_VAR(time_out); + UNF_REFERNCE_VAR(ret); + + buff_used_size = (unsigned short)(sizeof(struct hifc_qp_ctxt_header) + + sizeof(struct hifc_rq_ctxt) * cmd_rq_num); + v_chipif_cmd_buff->size = buff_used_size; + + ret = hifc_cmdq_direct_resp(v_handle, + HIFC_ACK_TYPE_CMDQ, + HIFC_MOD_L2NIC, + HIFC_UCODE_CMD_MODIFY_QUEUE_CONTEXT, + v_chipif_cmd_buff, + (u64 *)&uc_return, + time_out); + if ((ret != RETURN_OK) || (uc_return != RETURN_OK)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]hifc_cmdq_direct_resp failed, uiret %d, ullUcRet %llu", + ret, uc_return); + + return UNF_RETURN_ERROR; + } + + return RETURN_OK; +} + +static unsigned int hifc_init_root_rq_ctx( + void *v_handle, + struct hifc_root_info_s *v_root_info) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int cmd_rq_num = 0; + unsigned int remain_rq_num = 0; + struct hifc_root_rq_info_s *rq_info = NULL; + struct hifc_root_info_s *root_info = NULL; + struct hifc_cmd_buf *chipif_cmd_buf = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_handle, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, + return UNF_RETURN_ERROR); + + root_info = v_root_info; + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info); + + chipif_cmd_buf = hifc_alloc_cmd_buf(v_handle); + if (!chipif_cmd_buf) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]hifc_alloc_cmd_buf failed"); + + return ENOMEM; + } + + remain_rq_num = root_info->rq_num; + while (remain_rq_num > 0) { + cmd_rq_num = hifc_calc_cmd_rq_num(remain_rq_num); + remain_rq_num -= cmd_rq_num; + + /* Assemble cmd buffer context */ + hifc_assemble_root_rq_ctx(cmd_rq_num, rq_info, + chipif_cmd_buf->buf); + + /* Send via ucode */ + ret = hifc_cfg_root_rq_ctx(cmd_rq_num, v_handle, + chipif_cmd_buf); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]hifc_cfg_root_rq_ctx failed, return %u", + ret); + break; + } + + rq_info = rq_info + cmd_rq_num; + } + + /* Free cmd buffer */ + hifc_free_cmd_buf(v_handle, chipif_cmd_buf); + + return ret; +} + +static void hifc_update_root_rq_pi(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + struct hifc_root_rq_info_s *rq_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_root_info, return); + + for (q_index = 0; q_index < v_root_info->rq_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(v_root_info->rq_info) + + q_index; + + wmb(); + *rq_info->pi_vir_addr = cpu_to_be16(rq_info->pi); + } +} + +static unsigned int hifc_create_root_rqs(struct hifc_root_info_s *v_root_info) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, + return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)v_root_info->phba; + + /* Allocate RQ struct */ + ret = hifc_alloc_root_rq_info(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]hifc_alloc_root_rq_info failed"); + + return ret; + } + + /* Initialize RQ basic information */ + hifc_init_root_rq_basic_info(v_root_info); + + /* Apply RQ(s) PI GPA */ + ret = hifc_alloc_root_rq_pi_addr(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]hifc_alloc_root_rq_pi_addr failed, return %u", + ret); + + goto free_root_rq_info; + } + + /* Apply RQ's buffer */ + ret = hifc_alloc_root_rq_buff(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]hifc_alloc_root_rq_buff failed, return %u", + ret); + + goto free_rq_pi_addr; + } + + /* Apply completion buffer */ + ret = hifc_alloc_root_rq_completion_buff(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]hifc_alloc_root_rq_completion_buff failed, return %u", + ret); + + goto free_root_rq_buff; + } + + /* Allocate root RQ receiving buffer */ + ret = hifc_alloc_root_rq_rcv_buff(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]hifc_alloc_root_rq_rcv_buff failed, return %u", + ret); + + goto free_root_rq_completion_buff; + } + + /* Initialize RQ WQE struct */ + hifc_init_root_rq_wqe(v_root_info); + + /* Apply RQ's interrupt resources */ + ret = hifc_alloc_root_rq_int(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]hifc_alloc_root_rq_int failed, return %u", + ret); + + goto free_root_rq_receive_buff; + } + + /* Initialize RQ context */ + ret = hifc_init_root_rq_ctx(hba->hw_dev_handle, v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]hifc_init_root_rq_ctx Failed, return %u", ret); + + goto free_root_rq_int; + } + + /* Update SQ PI */ + hifc_update_root_rq_pi(v_root_info); + return RETURN_OK; + +free_root_rq_int: + hifc_free_root_rq_int(v_root_info); + +free_root_rq_receive_buff: + hifc_free_root_rq_rcv_buff(v_root_info); + +free_root_rq_completion_buff: + hifc_free_root_rq_completion_buff(v_root_info); + +free_root_rq_buff: + hifc_free_root_rq_buff(v_root_info); + +free_rq_pi_addr: + hifc_free_root_rq_pi_addr(v_root_info); + +free_root_rq_info: + hifc_free_root_rq_info(v_root_info); + + return ret; +} + +static void hifc_destroy_root_rqs(struct hifc_root_info_s *v_root_info) +{ + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, return); + + hifc_free_root_rq_rcv_buff(v_root_info); + + hifc_free_root_rq_completion_buff(v_root_info); + + hifc_free_root_rq_int(v_root_info); + + hifc_free_root_rq_buff(v_root_info); + + hifc_free_root_rq_pi_addr(v_root_info); + + hifc_free_root_rq_info(v_root_info); +} + +static unsigned int hifc_cfg_root_ctx(struct hifc_root_info_s *v_root_info) +{ + int ret; + struct hifc_hba_s *hba = NULL; + struct hifc_root_info_s *root_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, + return UNF_RETURN_ERROR); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)root_info->phba; + + ret = hifc_set_root_ctxt(hba->hw_dev_handle, HIFC_ROOT_RQ_DEPTH, + HIFC_ROOT_SQ_DEPTH, + HIFC_ROOT_RQ_RECV_BUFF_SIZE); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]chipif_func_vat_info_set failed, return [%d]", + ret); + + return UNF_RETURN_ERROR; + } + + return RETURN_OK; +} + +static void hifc_init_root_basic_info(struct hifc_hba_s *v_hba) +{ + struct hifc_root_info_s *root_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_hba, return); + + root_info = &v_hba->root_info; + memset(root_info, 0, sizeof(struct hifc_root_info_s)); + + root_info->phba = (void *)v_hba; + + root_info->rq_num = HIFC_ROOT_RQ_NUM; + root_info->sq_num = HIFC_ROOT_SQ_NUM; +} + +unsigned int hifc_create_root_queues(void *v_hba) +{ + unsigned int ret = UNF_RETURN_ERROR; + int slq_ret = 0; + struct hifc_root_info_s *root_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, + return UNF_RETURN_ERROR); + + /* Initialize basic root information */ + hba = (struct hifc_hba_s *)v_hba; + hifc_init_root_basic_info(hba); + + root_info = &hba->root_info; + + /* slq Init */ + slq_ret = hifc_slq_init(hba->hw_dev_handle, + (int)(root_info->sq_num + root_info->rq_num)); + if (slq_ret) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]hifc_slq_init init failed, ret:0x%x", slq_ret); + + return UNF_RETURN_ERROR; + } + + /* Create SQ, and send cmdq to ucode for initialization of SQ context */ + ret = hifc_create_root_sqs(root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]hifc_create_root_sqs failed, return [%u]", + ret); + + hifc_slq_uninit(hba->hw_dev_handle); + return ret; + } + + /* Create RQ */ + ret = hifc_create_root_rqs(root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]hifc_create_root_rqs failed, return [%u]", + ret); + + hifc_destroy_root_sqs(root_info); + hifc_slq_uninit(hba->hw_dev_handle); + return ret; + } + + /* Configure root context */ + ret = hifc_cfg_root_ctx(root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]hifc_cfg_root_ctx failed, return [%u]", ret); + + hifc_destroy_root_rqs(root_info); + hifc_destroy_root_sqs(root_info); + hifc_slq_uninit(hba->hw_dev_handle); + return ret; + } + + return RETURN_OK; +} + +void hifc_destroy_root_queues(void *v_hba) +{ + struct hifc_hba_s *hba = NULL; + struct hifc_root_info_s *root_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return); + + hba = (struct hifc_hba_s *)v_hba; + root_info = &hba->root_info; + + hifc_destroy_root_rqs(root_info); + hifc_destroy_root_sqs(root_info); + + hifc_slq_uninit(hba->hw_dev_handle); +} + +static void hifc_ring_root_sq_db(struct hifc_hba_s *v_hba, + struct hifc_root_sq_info_s *v_sq_info) +{ + struct nic_tx_doorbell db; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_sq_info, return); + + memset(&db, 0, sizeof(struct nic_tx_doorbell)); + + db.bs0.srv_type = HIFC_DOORBELL_SQ_TYPE; + db.bs0.queue_id = v_sq_info->qid; + db.bs0.pi_high = v_sq_info->pi >> HIFC_DOORBELL_SQ_PI_HIGH_BITS_SHIFT; + db.bs0.cos = 0; + + db.dw0 = cpu_to_be32(db.dw0); + wmb(); + + *((unsigned long long *)(v_sq_info->normal_db.virt_map_addr) + + (v_sq_info->pi & HIFC_DOORBELL_SQ_PI_LOW_BITS_MASK)) = + *(unsigned long long *)&db; +} + +static int hifc_root_sq_is_empty(struct hifc_root_sq_info_s *v_sq_info) +{ + unsigned short cur_pi = 0; + unsigned short cur_ci = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_sq_info, return UNF_TRUE); + + /* pi == ci empty, pi-ci = 1 full */ + cur_pi = v_sq_info->pi; + cur_ci = *v_sq_info->ci_addr; + cur_ci = be16_to_cpu(cur_ci); + + if (cur_pi == cur_ci) + return UNF_TRUE; + + return UNF_FALSE; +} + +static int hifc_root_sq_is_full(struct hifc_root_sq_info_s *v_sq_info) +{ + unsigned short cur_pi = 0; + unsigned short cur_ci = 0; + unsigned short valid_wqe_num = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_sq_info, return UNF_TRUE); + + /* pi == ci empty, pi-ci = 1 full */ + cur_pi = v_sq_info->pi; + cur_ci = *v_sq_info->ci_addr; + cur_ci = be16_to_cpu(cur_ci); + valid_wqe_num = v_sq_info->q_depth - 1; + + if ((valid_wqe_num == cur_pi - cur_ci) || + (valid_wqe_num == v_sq_info->q_depth + cur_pi - cur_ci)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Root SQ[%u] is full, PI %u, CI %u", + v_sq_info->global_qpn, cur_pi, cur_ci); + return UNF_TRUE; + } + + return UNF_FALSE; +} + +static void hifc_build_root_wqe_qsf(void *v_qsf) +{ + struct hifc_root_qsf_s *root_qsf = NULL; + + root_qsf = (struct hifc_root_qsf_s *)v_qsf; + + /* route to ucode */ + /* MSS range 0x50~0x3E00 */ + root_qsf->route_to_ucode = 1; + root_qsf->mss = 0x3E00; +} + +unsigned int hifc_root_sq_enqueue(void *v_hba, struct hifc_root_sqe_s *v_sqe) +{ + unsigned char task_type = 0; + struct hifc_root_info_s *root_info = NULL; + struct hifc_root_sq_info_s *sq_info = NULL; + struct hifc_hba_s *hba = NULL; + struct hifc_root_sqe_s *sqe = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_sqe, + return UNF_RETURN_ERROR); + + /* Root use one sq by default */ + hba = (struct hifc_hba_s *)v_hba; + root_info = &hba->root_info; + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info); + task_type = (unsigned char)v_sqe->task_section.fc_dw0.task_type; + + spin_lock_irqsave(&sq_info->root_sq_spin_lock, flag); + + /* Check flush state */ + if (sq_info->in_flush == UNF_TRUE) { + HIFC_ERR_IO_STAT(hba, task_type); + HIFC_HBA_STAT(hba, HIFC_STAT_ROOT_IO_FLUSHED); + spin_unlock_irqrestore(&sq_info->root_sq_spin_lock, flag); + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, "[err]Root SQ is flushing"); + return UNF_RETURN_ERROR; + } + + /* Check root SQ whether is full */ + if (hifc_root_sq_is_full(sq_info) == UNF_TRUE) { + HIFC_ERR_IO_STAT(hba, task_type); + HIFC_HBA_STAT(hba, HIFC_STAT_ROOT_SQ_FULL); + spin_unlock_irqrestore(&sq_info->root_sq_spin_lock, flag); + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, "[err]Root SQ is full"); + return UNF_RETURN_ERROR; + } + + if (unlikely(!hba->heart_status)) { + spin_unlock_irqrestore(&sq_info->root_sq_spin_lock, flag); + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[err]Heart status is false"); + return UNF_RETURN_ERROR; + } + /* Get available wqe */ + sqe = (struct hifc_root_sqe_s *)hifc_slq_get_addr(sq_info->sq_handle, + sq_info->pi); + if (!sqe) { + HIFC_ERR_IO_STAT(hba, task_type); + spin_unlock_irqrestore(&sq_info->root_sq_spin_lock, flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Get root SQ Sqe failed, PI %u", sq_info->pi); + + return UNF_RETURN_ERROR; + } + + hifc_build_root_wqe_qsf((void *)(&v_sqe->ctrl_section.qsf)); + HIFC_IO_STAT(hba, task_type); + hifc_convert_root_wqe_to_big_endian(v_sqe); + memcpy(sqe, v_sqe, sizeof(struct hifc_root_sqe_s)); + + /* Update PI and Obit */ + hifc_update_producer_info(sq_info->q_depth, &sq_info->pi, + &sq_info->owner); + + /* doorbell */ + hifc_ring_root_sq_db(hba, sq_info); + + spin_unlock_irqrestore(&sq_info->root_sq_spin_lock, flag); + UNF_REFERNCE_VAR(task_type); + + return RETURN_OK; +} + +static int hifc_root_rqe_done( + struct hifc_root_rq_complet_info_s *v_completion_info) +{ + if (v_completion_info->done != 0) + return UNF_TRUE; + + return UNF_FALSE; +} + +static void hifc_clear_root_rqe_done( + struct hifc_root_rq_complet_info_s *v_completion_info) +{ + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_completion_info, return); + + v_completion_info->done = 0; +} + +static int hifc_check_root_rqe_type( + struct hifc_root_rq_complet_info_s *v_completion_info) +{ + if (v_completion_info->fc_pkt != 0) + return UNF_TRUE; + + return UNF_FALSE; +} + +void hifc_update_root_rq_info(struct hifc_root_rq_info_s *v_rq_info, + unsigned short v_rcv_buf_num) +{ + unsigned short loop = 0; + struct hifc_root_rq_complet_info_s completion_info = { 0 }; + struct hifc_root_rq_complet_info_s *complet_info = NULL; + + for (loop = 0; loop < v_rcv_buf_num; loop++) { + /* Obtain CompletionInfo */ + complet_info = (struct hifc_root_rq_complet_info_s *) + (v_rq_info->rq_completion_buff) + v_rq_info->ci; + + /* big-little endian convert */ + memcpy(&completion_info, complet_info, sizeof(completion_info)); + hifc_big_to_cpu32(&completion_info, sizeof(completion_info)); + + /* Clear done bit */ + hifc_clear_root_rqe_done(&completion_info); + + /* Write back done bit */ + hifc_cpu_to_big32(&completion_info, sizeof(completion_info)); + memcpy(complet_info, &completion_info, sizeof(completion_info)); + + /* Update Obit and PI in RQE */ + hifc_update_producer_info(v_rq_info->q_depth, &v_rq_info->pi, + &v_rq_info->owner); + + v_rq_info->ci = ((v_rq_info->ci + 1) < v_rq_info->q_depth) ? + (v_rq_info->ci + 1) : 0; + + wmb(); + *v_rq_info->pi_vir_addr = cpu_to_be16(v_rq_info->pi); + } +} + +void hifc_root_rqe_analysis( + struct hifc_hba_s *v_hba, + struct hifc_root_rq_info_s *v_rq_info, + struct hifc_root_rq_complet_info_s *v_completion_info, + unsigned short v_rcv_buf_num) +{ + unsigned int ret = UNF_RETURN_ERROR; + + if (v_completion_info->sts_only) { + /* case1: receive ElsRsp Status */ + if (v_completion_info->status == RETURN_OK) + ret = hifc_rq_rcv_els_rsp_sts(v_hba, v_completion_info); + else + ret = hifc_rq_rcv_srv_err(v_hba, v_completion_info); + } else { + ret = hifc_rcv_service_frame_from_rq(v_hba, v_rq_info, + v_completion_info, + v_rcv_buf_num); + } + + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[warn]Up Layer Process RQE Frame or Status abnormal(0x%x)", + ret); + } +} + +void hifc_process_root_rqe(unsigned long v_rq_info) +{ + int rqe_done = UNF_FALSE; + int rqe_valid = UNF_FALSE; + unsigned short rcv_buf_num = 0; + unsigned int index = 0; + struct nic_rq_wqe *rq_wqe = NULL; + struct hifc_hba_s *hba = NULL; + struct hifc_root_info_s *root_info = NULL; + struct hifc_root_rq_complet_info_s *complet_info = NULL; + struct hifc_root_rq_complet_info_s completion_info = { 0 }; + + struct hifc_root_rq_info_s *rq_info = + (struct hifc_root_rq_info_s *)v_rq_info; + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, rq_info, return); + + root_info = (struct hifc_root_info_s *)(rq_info->root_info); + hba = (struct hifc_hba_s *)(root_info->phba); + + for (index = 0; index < HIFC_RQE_MAX_PROCESS_NUM_PER_INTR; index++) { + /* Obtain RQE */ + rq_wqe = (struct nic_rq_wqe *) + hifc_slq_get_addr(rq_info->rq_handle, rq_info->ci); + if (!rq_wqe) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, "[err]Get Rqe failed"); + break; + } + + /* Check whether to process RQE */ + complet_info = (struct hifc_root_rq_complet_info_s *) + (rq_info->rq_completion_buff) + rq_info->ci; + + memcpy(&completion_info, complet_info, sizeof(completion_info)); + hifc_big_to_cpu32(&completion_info, sizeof(completion_info)); + + rqe_done = hifc_root_rqe_done(&completion_info); + if (rqe_done != UNF_TRUE) { + atomic_set(&rq_info->flush_state, + HIFC_QUEUE_FLUSH_DONE); + break; + } + + rmb(); + + rcv_buf_num = (completion_info.buf_length + + HIFC_ROOT_RQ_RECV_BUFF_SIZE - 1) / + HIFC_ROOT_RQ_RECV_BUFF_SIZE; + if (rcv_buf_num == 0) + rcv_buf_num = 1; + + rqe_valid = hifc_check_root_rqe_type(&completion_info); + if (rqe_valid == UNF_TRUE) { + hifc_root_rqe_analysis(hba, rq_info, &completion_info, + rcv_buf_num); + } else { + /* Receive illegal frames and record */ + HIFC_IO_STAT(hba, HIFCOE_TASK_T_BUTT); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_IO_ATT, + UNF_WARN, + "[warn]Port(0x%x) Receive an unsupported frame, drop it", + hba->port_cfg.port_id); + } + + hifc_update_root_rq_info(rq_info, rcv_buf_num); + } + + if (index == HIFC_RQE_MAX_PROCESS_NUM_PER_INTR) + tasklet_schedule(&rq_info->tasklet); +} + +static inline int hifc_is_scq_link_wqe(struct hifc_scq_info_s *v_scq_info) +{ + unsigned short custom_scqe_num = 0; + + custom_scqe_num = v_scq_info->ci + 1; + + if ((custom_scqe_num % v_scq_info->wqe_num_per_buf == 0) || + (v_scq_info->valid_wqe_num == custom_scqe_num)) + return UNF_TRUE; + else + return UNF_FALSE; +} + +static inline struct hifcoe_scqe_type_s *hifc_get_scq_entry( + struct hifc_scq_info_s *v_scq_info) +{ + unsigned int buf_id = 0; + unsigned short buf_offset = 0; + unsigned short ci = 0; + struct cqm_buf_list_s *buf = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_scq_info, return NULL); + + ci = v_scq_info->ci; + buf_id = ci / v_scq_info->wqe_num_per_buf; + buf = &v_scq_info->cqm_scq_info->q_room_buf_1.buf_list[buf_id]; + buf_offset = (unsigned short)(ci % v_scq_info->wqe_num_per_buf); + + return (struct hifcoe_scqe_type_s *)(buf->va) + buf_offset; +} + +static inline int hifc_is_cqe_done(unsigned int *v_done, unsigned int *v_owner, + unsigned short v_driver_owner) +{ + return ((((unsigned short)(!!(*v_done & HIFC_DONE_MASK)) == + v_driver_owner) && ((unsigned short) + (!!(*v_owner & HIFC_OWNER_MASK)) == v_driver_owner)) ? + UNF_TRUE : UNF_FALSE); +} + +unsigned int hifc_process_scq_cqe_entity(unsigned long v_scq_info, + unsigned int proc_cnt) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int index = 0; + struct hifc_wq_header_s *queue_header = NULL; + struct hifcoe_scqe_type_s *scqe = NULL; + struct hifcoe_scqe_type_s tmp_scqe; + + struct hifc_scq_info_s *scq_info = (struct hifc_scq_info_s *)v_scq_info; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, scq_info, return ret); + + queue_header = (struct hifc_wq_header_s *) + (void *)(scq_info->cqm_scq_info->q_header_vaddr); + + for (index = 0; index < proc_cnt;) { + /* If linked wqe, then update CI */ + if (hifc_is_scq_link_wqe(scq_info) == UNF_TRUE) { + hifc_update_consumer_info(scq_info->valid_wqe_num, + &scq_info->ci, + &scq_info->ci_owner); + hifc_update_cq_header(&queue_header->ci_record, + scq_info->ci, + scq_info->ci_owner); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, + UNF_INFO, + "[info]Current wqe is a linked wqe"); + continue; + } + + /* Get SCQE and then check obit & donebit whether been set */ + scqe = hifc_get_scq_entry(scq_info); + if (unlikely(!scqe)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, "[warn]Scqe is NULL"); + break; + } + + if (hifc_is_cqe_done((unsigned int *)(void *)(&scqe->wd0), + (unsigned int *)(void *)(&scqe->ch.wd0), + scq_info->ci_owner) != UNF_TRUE) { + atomic_set(&scq_info->flush_state, + HIFC_QUEUE_FLUSH_DONE); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, + UNF_INFO, + "[info]Now has no valid scqe"); + break; + } + + /* rmb & do memory copy */ + rmb(); + memcpy(&tmp_scqe, scqe, sizeof(struct hifcoe_scqe_type_s)); + + hifc_big_to_cpu32(&tmp_scqe, sizeof(struct hifcoe_scqe_type_s)); + + /* process SCQ entry */ + ret = hifc_rcv_scqe_entry_from_scq(scq_info->phba, + (void *)&tmp_scqe, + scq_info->queue_id); + if (unlikely(ret != RETURN_OK)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_IO_ATT, + UNF_WARN, + "[warn]QueueId(0x%x) scqn(0x%x) scqe process error at CI(0x%x)", + scq_info->queue_id, scq_info->scqn, + scq_info->ci); + } + + /* Update Driver's CI & Obit */ + hifc_update_consumer_info(scq_info->valid_wqe_num, + &scq_info->ci, &scq_info->ci_owner); + hifc_update_cq_header(&queue_header->ci_record, scq_info->ci, + scq_info->ci_owner); + index++; + } + /* Re-schedule again if necessary */ + if (proc_cnt == index) + tasklet_schedule(&scq_info->tasklet); + + return index; +} + +void hifc_set_scq_irq_cfg(struct hifc_hba_s *hba, unsigned int mode, + unsigned short msix_index) +{ + unsigned char pending_limt = 0; + unsigned char coalesc_timer_cfg = 0; + + struct nic_interrupt_info info = { 0 }; + + if (mode != HIFC_SCQ_INTR_LOW_LATENCY_MODE) { + pending_limt = 5; + coalesc_timer_cfg = 10; + } + + memset(&info, 0, sizeof(info)); + info.interrupt_coalesc_set = 1; + info.lli_set = 0; + info.pending_limt = pending_limt; + info.coalesc_timer_cfg = coalesc_timer_cfg; + info.resend_timer_cfg = 0; + info.msix_index = msix_index; + hifc_set_interrupt_cfg(hba->hw_dev_handle, info); +} + +void hifc_process_scq_cqe(unsigned long v_scq_info) +{ + struct hifc_scq_info_s *scq_info = (struct hifc_scq_info_s *)v_scq_info; + + HIFC_CHECK(INVALID_VALUE32, scq_info, return); + + hifc_process_scq_cqe_entity(v_scq_info, + HIFC_CQE_MAX_PROCESS_NUM_PER_INTR); +} + +irqreturn_t hifc_scq_irq(int v_irq, void *v_scq_info) +{ + HIFC_CHECK(INVALID_VALUE32, NULL != v_scq_info, return IRQ_NONE); + + tasklet_schedule(&((struct hifc_scq_info_s *)v_scq_info)->tasklet); + + return IRQ_HANDLED; +} + +static unsigned int hifc_alloc_scq_int(struct hifc_scq_info_s *v_scq_info) +{ + int ret = UNF_RETURN_ERROR_S32; + unsigned short act_num = 0; + struct irq_info irq_info; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_scq_info, + return UNF_RETURN_ERROR); + + /* 1. Alloc & check SCQ IRQ */ + hba = (struct hifc_hba_s *)(v_scq_info->phba); + ret = hifc_alloc_irqs(hba->hw_dev_handle, SERVICE_T_FC, + HIFC_INT_NUM_PER_QUEUE, &irq_info, &act_num); + if ((ret != RETURN_OK) || (act_num != HIFC_INT_NUM_PER_QUEUE)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Allocate scq irq failed, return %d", ret); + + return UNF_RETURN_ERROR; + } + + if (irq_info.msix_entry_idx >= HIFC_SCQ_INT_ID_MAX) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]SCQ irq id exceed %d, msix_entry_idx %d", + HIFC_SCQ_INT_ID_MAX, irq_info.msix_entry_idx); + + hifc_free_irq(hba->hw_dev_handle, SERVICE_T_FC, + irq_info.irq_id); + + return UNF_RETURN_ERROR; + } + + v_scq_info->irq_id = (unsigned int)(irq_info.irq_id); + v_scq_info->msix_entry_idx = (unsigned short)(irq_info.msix_entry_idx); + + ret = snprintf(v_scq_info->irq_name, HIFC_IRQ_NAME_MAX - 1, + "fc_scq%u_%x_msix%u", v_scq_info->queue_id, + hba->port_cfg.port_id, v_scq_info->msix_entry_idx); + UNF_FUNCTION_RETURN_CHECK(ret, HIFC_IRQ_NAME_MAX - 1); + /* 2. SCQ IRQ tasklet init */ + tasklet_init(&v_scq_info->tasklet, hifc_process_scq_cqe, + (unsigned long)v_scq_info); + + /* 3. Request IRQ for SCQ */ + ret = request_irq(v_scq_info->irq_id, hifc_scq_irq, 0UL, + v_scq_info->irq_name, v_scq_info); + hifc_set_msix_state(hba->hw_dev_handle, v_scq_info->msix_entry_idx, + HIFC_MSIX_ENABLE); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Request SCQ irq failed, SCQ Index = %u, return %d", + v_scq_info->queue_id, ret); + + hifc_free_irq(hba->hw_dev_handle, SERVICE_T_FC, + v_scq_info->irq_id); + memset(v_scq_info->irq_name, 0, HIFC_IRQ_NAME_MAX); + v_scq_info->irq_id = 0; + v_scq_info->msix_entry_idx = 0; + return UNF_RETURN_ERROR; + } + return RETURN_OK; +} + +static void hifc_free_scq_int(struct hifc_scq_info_s *v_scq_info) +{ + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_scq_info, return); + + hba = (struct hifc_hba_s *)(v_scq_info->phba); + hifc_set_msix_state(hba->hw_dev_handle, v_scq_info->msix_entry_idx, + HIFC_MSIX_DISABLE); + free_irq(v_scq_info->irq_id, v_scq_info); + tasklet_kill(&v_scq_info->tasklet); + hifc_free_irq(hba->hw_dev_handle, SERVICE_T_FC, v_scq_info->irq_id); + memset(v_scq_info->irq_name, 0, HIFC_IRQ_NAME_MAX); + v_scq_info->irq_id = 0; + v_scq_info->msix_entry_idx = 0; +} + +static void hifc_init_scq_info(struct hifc_hba_s *v_hba, + struct cqm_queue_s *v_cqm_scq, + unsigned int queue_id, + struct hifc_scq_info_s **v_ppscq_info) +{ + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_cqm_scq, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_ppscq_info, return); + + *v_ppscq_info = &v_hba->scq_info[queue_id]; + (*v_ppscq_info)->queue_id = queue_id; + (*v_ppscq_info)->scqn = v_cqm_scq->index; + (*v_ppscq_info)->phba = (void *)v_hba; + + (*v_ppscq_info)->cqm_scq_info = v_cqm_scq; + (*v_ppscq_info)->wqe_num_per_buf = v_cqm_scq->q_room_buf_1.buf_size / + HIFC_SCQE_SIZE; + (*v_ppscq_info)->wqe_size = HIFC_SCQE_SIZE; + + (*v_ppscq_info)->valid_wqe_num = (HIFC_SCQ_IS_STS(queue_id) ? + HIFC_STS_SCQ_DEPTH : HIFC_CMD_SCQ_DEPTH); + (*v_ppscq_info)->scqc_cq_depth = (HIFC_SCQ_IS_STS(queue_id) ? + HIFC_STS_SCQC_CQ_DEPTH : HIFC_CMD_SCQC_CQ_DEPTH); + (*v_ppscq_info)->scqc_ci_type = (HIFC_SCQ_IS_STS(queue_id) ? + HIFC_STS_SCQ_CI_TYPE : HIFC_CMD_SCQ_CI_TYPE); + + (*v_ppscq_info)->ci = 0; + (*v_ppscq_info)->ci_owner = 1; +} + +static void hifc_init_scq_header(struct hifc_wq_header_s *v_queue_header) +{ + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_queue_header, return); + + memset(v_queue_header, 0, sizeof(struct hifc_wq_header_s)); + + /* Obit default is 1 */ + v_queue_header->db_record.pmsn = 1 << 15; + v_queue_header->db_record.dump_pmsn = + v_queue_header->db_record.pmsn; + v_queue_header->ci_record.cmsn = 1 << 15; + v_queue_header->ci_record.dump_cmsn = + v_queue_header->ci_record.cmsn; + + /* Big endian convert */ + hifc_cpu_to_big64((void *)v_queue_header, + sizeof(struct hifc_wq_header_s)); +} + +static void hifc_cfg_scq_ctx(struct hifc_scq_info_s *v_scq_info, + struct hifcoe_cq_qinfo_s *v_scq_ctx) +{ + struct cqm_queue_s *cqm_scq_info = NULL; + struct hifc_queue_info_bus_s queue_bus; + unsigned long long parity = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_scq_info, return); + + cqm_scq_info = v_scq_info->cqm_scq_info; + + v_scq_ctx->pcie_template_hi = 0; + v_scq_ctx->cur_cqe_gpa = + cqm_scq_info->q_room_buf_1.buf_list->pa >> HIFC_CQE_GPA_SHIFT; + v_scq_ctx->pi = 0; + v_scq_ctx->pi_o = 1; + v_scq_ctx->ci = v_scq_info->ci; + v_scq_ctx->ci_o = v_scq_info->ci_owner; + v_scq_ctx->c_eqn_msi_x = v_scq_info->msix_entry_idx; + v_scq_ctx->ci_type = v_scq_info->scqc_ci_type; + v_scq_ctx->cq_depth = v_scq_info->scqc_cq_depth; + v_scq_ctx->armq = HIFC_ARMQ_IDLE; + v_scq_ctx->cur_cqe_cnt = 0; + v_scq_ctx->cqe_max_cnt = 0; + v_scq_ctx->cqe_dmaattr_idx = 0; + v_scq_ctx->cq_so_ro = 0; + v_scq_ctx->init_mode = HIFC_CQ_INT_MODE; + v_scq_ctx->next_o = 1; + v_scq_ctx->loop_o = 1; + v_scq_ctx->next_cq_wqe_page_gpa = + cqm_scq_info->q_room_buf_1.buf_list[1].pa >> + HIFC_NEXT_CQE_GPA_SHIFT; + v_scq_ctx->pcie_template_lo = 0; + + v_scq_ctx->ci_gpa = (cqm_scq_info->q_header_paddr + + offsetof(struct hifc_wq_header_s, ci_record)) >> + HIFC_CQE_GPA_SHIFT; + + memset(&queue_bus, 0, sizeof(struct hifc_queue_info_bus_s)); + /* bits 20 */ + queue_bus.bus[0] |= + ((unsigned long long)(v_scq_info->scqn & 0xfffff)); + /* bits 3 */ + queue_bus.bus[0] |= + (((unsigned long long)(v_scq_ctx->pcie_template_lo)) << 20); + /* bits 28 */ + queue_bus.bus[0] |= + (((unsigned long long)(v_scq_ctx->ci_gpa & 0xfffffff)) << 23); + /* bits 6 */ + queue_bus.bus[0] |= + (((unsigned long long)(v_scq_ctx->cqe_dmaattr_idx)) << 51); + /* bits 2 */ + queue_bus.bus[0] |= + (((unsigned long long)(v_scq_ctx->cq_so_ro)) << 57); + /* bits 2 */ + queue_bus.bus[0] |= + (((unsigned long long)(v_scq_ctx->init_mode)) << 59); + /* bits 3 */ + queue_bus.bus[0] |= + (((unsigned long long)(v_scq_ctx->c_eqn_msi_x & 0x7)) << 61); + /* bits 7 */ + queue_bus.bus[1] |= + ((unsigned long long)(v_scq_ctx->c_eqn_msi_x >> 3)); + /* bits 1 */ + queue_bus.bus[1] |= + (((unsigned long long)(v_scq_ctx->ci_type)) << 7); + /* bits 3 */ + queue_bus.bus[1] |= + (((unsigned long long)(v_scq_ctx->cq_depth)) << 8); + /* bits 8 */ + queue_bus.bus[1] |= + (((unsigned long long)(v_scq_ctx->cqe_max_cnt)) << 11); + /* bits 3 */ + queue_bus.bus[1] |= + (((unsigned long long)(v_scq_ctx->pcie_template_hi)) << 19); + + parity = hifc_get_parity_value(queue_bus.bus, HIFC_SCQC_BUS_ROW, + HIFC_SCQC_BUS_COL); + v_scq_ctx->parity_0 = parity & 0x1; + v_scq_ctx->parity_1 = (parity >> 0x1) & 0x1; + v_scq_ctx->parity_2 = (parity >> 0x2) & 0x1; + + hifc_cpu_to_big64((void *)v_scq_ctx, sizeof(struct hifcoe_cq_qinfo_s)); +} + +static unsigned int hifc_create_scqc_via_cmdq_sync( + struct hifc_hba_s *v_hba, + struct hifcoe_cq_qinfo_s *v_scqc, + unsigned int scqn) +{ +#define HIFC_INIT_SCQC_TIMEOUT 3000 + + int ret; + unsigned int cvt_size; + struct hifcoe_cmdqe_creat_scqc_s init_scqc_cmd; + struct hifc_cmd_buf *cmdq_in_buf; + + cmdq_in_buf = hifc_alloc_cmd_buf(v_hba->hw_dev_handle); + if (!cmdq_in_buf) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]cmdq in_cmd_buf alloc failed"); + + HIFC_ERR_IO_STAT(v_hba, HIFCOE_TASK_T_INIT_SCQC); + return UNF_RETURN_ERROR; + } + + memset(&init_scqc_cmd, 0, sizeof(init_scqc_cmd)); + init_scqc_cmd.wd0.task_type = HIFCOE_TASK_T_INIT_SCQC; + init_scqc_cmd.wd1.scqn = HIFC_LSW(scqn); + cvt_size = sizeof(init_scqc_cmd) - sizeof(init_scqc_cmd.scqc); + hifc_cpu_to_big32(&init_scqc_cmd, cvt_size); + + /* v_scqc is already big endian */ + memcpy(init_scqc_cmd.scqc, v_scqc, sizeof(*v_scqc)); + memcpy(cmdq_in_buf->buf, &init_scqc_cmd, sizeof(init_scqc_cmd)); + cmdq_in_buf->size = sizeof(init_scqc_cmd); + + ret = hifc_cmdq_detail_resp(v_hba->hw_dev_handle, HIFC_ACK_TYPE_CMDQ, + HIFC_MOD_FCOE, 0, + cmdq_in_buf, NULL, HIFC_INIT_SCQC_TIMEOUT); + hifc_free_cmd_buf(v_hba->hw_dev_handle, cmdq_in_buf); + if (ret) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Send creat scqc via cmdq failed, ret=%d", ret); + + HIFC_ERR_IO_STAT(v_hba, HIFCOE_TASK_T_INIT_SCQC); + return UNF_RETURN_ERROR; + } + + HIFC_IO_STAT(v_hba, HIFCOE_TASK_T_INIT_SCQC); + + return RETURN_OK; +} + +static unsigned int hifc_create_scq(struct hifc_hba_s *v_hba) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int scq_index = 0; + unsigned int scq_cfg_num = 0; + struct cqm_queue_s *cqm_scq = NULL; + void *handle = NULL; + struct hifc_scq_info_s *scq_info = NULL; + struct hifcoe_cq_qinfo_s scq_ctx_info; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, + return UNF_RETURN_ERROR); + + handle = v_hba->hw_dev_handle; + + /* Create SCQ by CQM interface */ + for (scq_index = 0; scq_index < HIFC_TOTAL_SCQ_NUM; scq_index++) { + /* + * 1. Create/Allocate SCQ + * + * Notice: SCQ[0, 2, 4 ...]--->CMD SCQ, + * SCQ[1, 3, 5 ...]--->STS SCQ, SCQ[HIFC_TOTAL_SCQ_NUM-1] + * --->Defaul SCQ + */ + cqm_scq = cqm_object_nonrdma_queue_create( + handle, + CQM_OBJECT_NONRDMA_SCQ, + HIFC_SCQ_IS_STS(scq_index) ? + HIFC_STS_SCQ_DEPTH : + HIFC_CMD_SCQ_DEPTH, + HIFC_SCQE_SIZE, + v_hba); + if (!cqm_scq) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, "[err]Create scq failed"); + + goto free_scq; + } + + /* 2. Initialize SCQ (info) */ + hifc_init_scq_info(v_hba, cqm_scq, scq_index, &scq_info); + + /* 3. Allocate & Initialize SCQ interrupt */ + ret = hifc_alloc_scq_int(scq_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, "[err]Allocate scq interrupt failed"); + + cqm_object_delete(&cqm_scq->object); + memset(scq_info, 0, sizeof(struct hifc_scq_info_s)); + goto free_scq; + } + + /* 4. Initialize SCQ queue header */ + hifc_init_scq_header( + (struct hifc_wq_header_s *) + (void *)cqm_scq->q_header_vaddr); + + /* 5. Initialize & Create SCQ CTX */ + memset(&scq_ctx_info, 0, sizeof(scq_ctx_info)); + hifc_cfg_scq_ctx(scq_info, &scq_ctx_info); + ret = hifc_create_scqc_via_cmdq_sync(v_hba, + &scq_ctx_info, + scq_info->scqn); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, "[err]Create scq context failed"); + + cqm_object_delete(&cqm_scq->object); + memset(scq_info, 0, sizeof(struct hifc_scq_info_s)); + goto free_scq; + } + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Create SCQ[%u] Scqn=%u WqeNum=%u WqeSize=%u WqePerBuf=%u CqDepth=%u CiType=%u irq=%u msix=%u", + scq_info->queue_id, scq_info->scqn, + scq_info->valid_wqe_num, scq_info->wqe_size, + scq_info->wqe_num_per_buf, scq_info->scqc_cq_depth, + scq_info->scqc_ci_type, scq_info->irq_id, + scq_info->msix_entry_idx); + } + + /* + * Last SCQ is used to handle SCQE delivery access when clearing buffer + */ + v_hba->default_scqn = scq_info->scqn; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Default Scqn=%d CqmScqIndex=%u", + v_hba->default_scqn, cqm_scq->index); + + return RETURN_OK; + +free_scq: + hifc_flush_scq_ctx(v_hba); + + scq_cfg_num = scq_index; + for (scq_index = 0; scq_index < scq_cfg_num; scq_index++) { + scq_info = &v_hba->scq_info[scq_index]; + hifc_free_scq_int(scq_info); + cqm_scq = scq_info->cqm_scq_info; + cqm_object_delete(&cqm_scq->object); + memset(scq_info, 0, sizeof(struct hifc_scq_info_s)); + } + + return UNF_RETURN_ERROR; +} + +static void hifc_destroy_scq(struct hifc_hba_s *v_hba) +{ + unsigned int scq_index = 0; + struct cqm_queue_s *cqm_scq = NULL; + struct hifc_scq_info_s *scq_info = NULL; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Start destroy total %d SCQ", HIFC_TOTAL_SCQ_NUM); + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return); + + /* Use CQM to delete SCQ */ + for (scq_index = 0; scq_index < HIFC_TOTAL_SCQ_NUM; scq_index++) { + scq_info = &v_hba->scq_info[scq_index]; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_ALL, + "[info]Destroy SCQ%u, Scqn=%u, Irq=%u, msix=%u, name=%s", + scq_index, scq_info->scqn, scq_info->irq_id, + scq_info->msix_entry_idx, scq_info->irq_name); + + hifc_free_scq_int(scq_info); + cqm_scq = scq_info->cqm_scq_info; + cqm_object_delete(&cqm_scq->object); + memset(scq_info, 0, sizeof(struct hifc_scq_info_s)); + } +} + +static void hifc_init_srq_info(struct hifc_hba_s *v_hba, + struct cqm_queue_s *v_cqm_srq, + struct hifc_srq_info_s *v_srq_info) +{ + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_cqm_srq, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_srq_info, return); + + v_srq_info->phba = (void *)v_hba; + + v_srq_info->cqm_srq_info = v_cqm_srq; + v_srq_info->wqe_num_per_buf = v_cqm_srq->q_room_buf_1.buf_size / + HIFC_SRQE_SIZE - 1; + v_srq_info->wqe_size = HIFC_SRQE_SIZE; + v_srq_info->valid_wqe_num = v_cqm_srq->valid_wqe_num; + v_srq_info->pi = 0; + v_srq_info->pi_owner = HIFC_SRQ_INIT_LOOP_O; + v_srq_info->pmsn = 0; + v_srq_info->srqn = v_cqm_srq->index; + v_srq_info->first_rqe_rcv_dma = 0; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Init srq info(srq index 0x%x) valid wqe num 0x%x, buffer size 0x%x, wqe num per buf 0x%x", + v_cqm_srq->index, v_srq_info->valid_wqe_num, + v_cqm_srq->q_room_buf_1.buf_size, + v_srq_info->wqe_num_per_buf); +} + +static void hifc_init_srq_header(struct hifc_wq_header_s *v_queue_header) +{ + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_queue_header, return); + + memset(v_queue_header, 0, sizeof(struct hifc_wq_header_s)); +} + +static struct hifcoe_rqe_s *hifc_get_srq_entry( + struct hifc_srq_info_s *v_srq_info, + struct hifcoe_rqe_s **v_linked_rqe, + unsigned short position) +{ + unsigned int buf_id = 0; + unsigned int wqe_num_per_buf = 0; + unsigned short buf_offset = 0; + struct cqm_buf_list_s *buf = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_srq_info, return NULL); + + wqe_num_per_buf = v_srq_info->wqe_num_per_buf; + + buf_id = position / wqe_num_per_buf; + buf = &v_srq_info->cqm_srq_info->q_room_buf_1.buf_list[buf_id]; + buf_offset = position % ((unsigned short)wqe_num_per_buf); + + if (buf_offset + 1 == wqe_num_per_buf) + *v_linked_rqe = (struct hifcoe_rqe_s *)(buf->va) + + wqe_num_per_buf; + else + *v_linked_rqe = NULL; + + return (struct hifcoe_rqe_s *)(buf->va) + buf_offset; +} + +/** + * hifc_set_srq_wqe_owner_be - Assign a value to Owner Bit of WQE in the + * big-endian format of Wqe Page. + * @v_sqe_ctrl_in_wp: sqe ctrl wqe struct info for communicate with uncode + * @owner: owner value which need to set + */ +static void hifc_set_srq_wqe_owner_be( + struct hifcoe_wqe_ctrl_s *v_sqe_ctrl_in_wp, + unsigned int owner) +{ + struct hifcoe_wqe_ctrl_ch_s wqe_ctrl_ch; + + mb(); + + wqe_ctrl_ch.ctrl_ch_val = be32_to_cpu(v_sqe_ctrl_in_wp->ch.ctrl_ch_val); + wqe_ctrl_ch.wd0.owner = owner; + v_sqe_ctrl_in_wp->ch.ctrl_ch_val = cpu_to_be32(wqe_ctrl_ch.ctrl_ch_val); + + mb(); +} + +static void hifc_set_srq_link_wqe_owner_be(struct hifc_link_wqe_s *v_link_wqe, + unsigned int owner, + unsigned short pmsn) +{ + struct hifc_link_wqe_s local_lw; + + mb(); + local_lw.val_wd1 = be32_to_cpu(v_link_wqe->val_wd1); + local_lw.wd1.msn = pmsn; + local_lw.wd1.dump_msn = (local_lw.wd1.msn & 0x7fff); + v_link_wqe->val_wd1 = cpu_to_be32(local_lw.val_wd1); + + local_lw.val_wd0 = be32_to_cpu(v_link_wqe->val_wd0); + local_lw.wd0.o = owner; + v_link_wqe->val_wd0 = cpu_to_be32(local_lw.val_wd0); + mb(); +} + +void hifc_post_els_srq_wqe(struct hifc_srq_info_s *v_srq_info, + unsigned short buff_id) +{ + struct hifcoe_rqe_s *rqe = NULL; + struct hifcoe_rqe_s tmp_rqe; + struct hifcoe_rqe_s *linked_rqe = NULL; + struct hifc_wq_header_s *wq_header = NULL; + struct hifc_srq_buff_entry_s *buff_entry = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_srq_info, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + buff_id < v_srq_info->valid_wqe_num, return); + + buff_entry = v_srq_info->els_buff_entry_head + buff_id; + + spin_lock(&v_srq_info->srq_spin_lock); + + /* Obtain RQE, not include link wqe */ + rqe = hifc_get_srq_entry(v_srq_info, &linked_rqe, v_srq_info->pi); + if (!rqe) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]post els srq,get srqe failed, valid wqe num 0x%x, pi 0x%x, pmsn 0x%x", + v_srq_info->valid_wqe_num, v_srq_info->pi, + v_srq_info->pmsn); + + spin_unlock(&v_srq_info->srq_spin_lock); + + return; + } + + /* Initialize RQE */ + /* cs section is not used */ + memset(&tmp_rqe, 0, sizeof(struct hifcoe_rqe_s)); + + /* default Obit is invalid, and set valid finally */ + hifc_build_srq_wqe_ctrls(&tmp_rqe, !v_srq_info->pi_owner, + v_srq_info->pmsn + 1); + + tmp_rqe.bds_sl.buf_addr_hi = HIFC_HIGH_32_BITS(buff_entry->buff_dma); + tmp_rqe.bds_sl.buf_addr_lo = HIFC_LOW_32_BITS(buff_entry->buff_dma); + tmp_rqe.drv_sl.wd0.user_id = buff_id; + + /* convert to big endian */ + hifc_cpu_to_big32(&tmp_rqe, sizeof(struct hifcoe_rqe_s)); + + memcpy(rqe, &tmp_rqe, sizeof(struct hifcoe_rqe_s)); + + /* reset Obit */ + hifc_set_srq_wqe_owner_be( + (struct hifcoe_wqe_ctrl_s *)(void *)&rqe->ctrl_sl, + v_srq_info->pi_owner); + + if (linked_rqe) { + /* Update Obit in linked WQE */ + hifc_set_srq_link_wqe_owner_be( + (struct hifc_link_wqe_s *)(void *)linked_rqe, + v_srq_info->pi_owner, + v_srq_info->pmsn + 1); + } + + /* Update PI and PMSN */ + hifc_update_producer_info((unsigned short)(v_srq_info->valid_wqe_num), + &v_srq_info->pi, + &v_srq_info->pi_owner); + + /* + * pmsn is 16bit. The value is added to the maximum value and is + * automatically reversed + */ + v_srq_info->pmsn++; + + /* Update pmsn in queue header */ + wq_header = (struct hifc_wq_header_s *) + (void *)v_srq_info->cqm_srq_info->q_header_vaddr; + hifc_update_srq_header(&wq_header->db_record, v_srq_info->pmsn); + + spin_unlock(&v_srq_info->srq_spin_lock); +} + +static void hifc_cfg_srq_ctx(struct hifc_srq_info_s *v_srq_info, + struct hifc_srq_ctx_s *v_srq_ctx, + unsigned int v_sge_size, + unsigned long long v_rqe_gpa) +{ + struct hifc_srq_ctx_s *srq_ctx = NULL; + struct cqm_queue_s *cqm_srq_info = NULL; + struct hifc_queue_info_bus_s queue_bus; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_srq_info, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_srq_ctx, return); + + cqm_srq_info = v_srq_info->cqm_srq_info; + srq_ctx = v_srq_ctx; + + srq_ctx->last_rq_pmsn = 0; + srq_ctx->cur_rqe_msn = 0; + srq_ctx->pcie_template = 0; + /* The value of CTX needs to be updated when RQE is configured */ + srq_ctx->cur_rqe_gpa = v_rqe_gpa; + srq_ctx->cur_sge_v = 0; + srq_ctx->cur_sge_l = 0; + /* The information received by the SRQ is reported through the SCQ. + * The interrupt and ArmCQ are disabled. + */ + srq_ctx->ceqn_msix = 0; + srq_ctx->int_mode = 0; + srq_ctx->cur_sge_remain_len = 0; + srq_ctx->cur_sge_id = 0; + srq_ctx->consant_sge_len = v_sge_size; + srq_ctx->cur_wqe = 0; + srq_ctx->pmsn_type = HIFC_PMSN_CI_TYPE_FROM_HOST; + srq_ctx->bdsl = 0; + srq_ctx->cr = 0; + srq_ctx->csl = 0; + srq_ctx->cf = 0; + srq_ctx->ctrl_sl = 0; + srq_ctx->cur_sge_gpa = 0; + srq_ctx->cur_pmsn_gpa = cqm_srq_info->q_header_paddr; + srq_ctx->pre_fetch_max_msn = 0; + srq_ctx->cqe_max_cnt = 0; + srq_ctx->cur_cqe_cnt = 0; + srq_ctx->arm_q = 0; + srq_ctx->cq_so_ro = 0; + srq_ctx->cqe_dma_attr_idx = 0; + srq_ctx->rq_so_ro = 0; + srq_ctx->rqe_dma_attr_idx = 0; + srq_ctx->loop_o = HIFC_SRQ_INIT_LOOP_O; + srq_ctx->ring = HIFC_QUEUE_RING; + + memset(&queue_bus, 0, sizeof(struct hifc_queue_info_bus_s)); + /* bits 60 */ + queue_bus.bus[0] |= + ((unsigned long long)(cqm_srq_info->q_ctx_paddr >> 4)); + /* bits 4 */ + queue_bus.bus[0] |= + (((unsigned long long)(srq_ctx->rqe_dma_attr_idx & 0xf)) << 60); + /* bits 2 */ + queue_bus.bus[1] |= + ((unsigned long long)(srq_ctx->rqe_dma_attr_idx >> 4)); + /* bits 2 */ + queue_bus.bus[1] |= (((unsigned long long)(srq_ctx->rq_so_ro)) << 2); + /* bits 60 */ + queue_bus.bus[1] |= + (((unsigned long long)(srq_ctx->cur_pmsn_gpa >> 4)) << 4); + /* bits 17 */ + queue_bus.bus[2] |= ((unsigned long long)(srq_ctx->consant_sge_len)); + /* bits 6 */ + queue_bus.bus[2] |= + (((unsigned long long)(srq_ctx->pcie_template)) << 17); + + srq_ctx->parity = hifc_get_parity_value((void *)queue_bus.bus, + HIFC_SRQC_BUS_ROW, + HIFC_SRQC_BUS_COL); + + hifc_cpu_to_big64((void *)srq_ctx, sizeof(struct hifc_srq_ctx_s)); +} + +static unsigned int hifc_create_srqc_via_cmdq_sync( + struct hifc_hba_s *v_hba, + struct hifc_srq_ctx_s *v_srqc, + unsigned long long v_ctx_gpa) +{ +#define HIFC_INIT_SRQC_TIMEOUT 3000 + + int ret; + unsigned int cvt_size; + struct hifcoe_cmdqe_creat_srqc_s init_srqc_cmd; + struct hifc_cmd_buf *cmdq_in_buf; + + cmdq_in_buf = hifc_alloc_cmd_buf(v_hba->hw_dev_handle); + if (!cmdq_in_buf) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]cmdq in_cmd_buf alloc failed"); + + HIFC_ERR_IO_STAT(v_hba, HIFCOE_TASK_T_INIT_SRQC); + return UNF_RETURN_ERROR; + } + + memset(&init_srqc_cmd, 0, sizeof(init_srqc_cmd)); + init_srqc_cmd.wd0.task_type = HIFCOE_TASK_T_INIT_SRQC; + init_srqc_cmd.srqc_gpa_h = HIFC_HIGH_32_BITS(v_ctx_gpa); + init_srqc_cmd.srqc_gpa_l = HIFC_LOW_32_BITS(v_ctx_gpa); + cvt_size = sizeof(init_srqc_cmd) - sizeof(init_srqc_cmd.srqc); + hifc_cpu_to_big32(&init_srqc_cmd, cvt_size); + + /* v_srqc is already big-endian */ + memcpy(init_srqc_cmd.srqc, v_srqc, sizeof(*v_srqc)); + memcpy(cmdq_in_buf->buf, &init_srqc_cmd, sizeof(init_srqc_cmd)); + cmdq_in_buf->size = sizeof(init_srqc_cmd); + + ret = hifc_cmdq_detail_resp(v_hba->hw_dev_handle, HIFC_ACK_TYPE_CMDQ, + HIFC_MOD_FCOE, 0, cmdq_in_buf, + NULL, HIFC_INIT_SRQC_TIMEOUT); + + hifc_free_cmd_buf(v_hba->hw_dev_handle, cmdq_in_buf); + + if (ret) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Send creat srqc via cmdq failed, ret=%d", ret); + + HIFC_ERR_IO_STAT(v_hba, HIFCOE_TASK_T_INIT_SRQC); + return UNF_RETURN_ERROR; + } + + HIFC_IO_STAT(v_hba, HIFCOE_TASK_T_INIT_SRQC); + + return RETURN_OK; +} + +static void hifc_init_els_srq_wqe(struct hifc_srq_info_s *v_srq_info) +{ + unsigned int rqe_index = 0; + struct hifc_srq_buff_entry_s *buff_entry = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_srq_info, return); + + for (rqe_index = 0; rqe_index < v_srq_info->valid_wqe_num - 1; + rqe_index++) { + buff_entry = v_srq_info->els_buff_entry_head + rqe_index; + + hifc_post_els_srq_wqe(v_srq_info, buff_entry->buff_id); + } +} + +static void hifc_free_els_srq_buff(struct hifc_hba_s *v_hba, + unsigned int srq_valid_wqe) +{ + unsigned int buff_index = 0; + struct hifc_srq_info_s *srq_info = NULL; + struct hifc_srq_buff_entry_s *buff_entry = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return); + + srq_info = &v_hba->els_srq_info; + + if (!srq_info->els_buff_entry_head) + return; + + for (buff_index = 0; buff_index < srq_valid_wqe; buff_index++) { + buff_entry = &srq_info->els_buff_entry_head[buff_index]; + buff_entry->buff_addr = NULL; + } + + if (srq_info->buff_list.buflist) { + for (buff_index = 0; buff_index < srq_info->buff_list.buf_num; + buff_index++) { + if (srq_info->buff_list.buflist[buff_index].paddr) { + pci_unmap_single( + v_hba->pci_dev, + srq_info->buff_list.buflist[buff_index].paddr, + srq_info->buff_list.buf_size, + DMA_FROM_DEVICE); + srq_info->buff_list.buflist[buff_index].paddr = 0; + } + if (srq_info->buff_list.buflist[buff_index].vaddr) { + kfree(srq_info->buff_list.buflist[buff_index].vaddr); + srq_info->buff_list.buflist[buff_index].vaddr = NULL; + } + } + + kfree(srq_info->buff_list.buflist); + srq_info->buff_list.buflist = NULL; + } + + if (srq_info->els_buff_entry_head) { + kfree(srq_info->els_buff_entry_head); + srq_info->els_buff_entry_head = NULL; + } +} + +static unsigned int hifc_alloc_els_srq_buff(struct hifc_hba_s *v_hba, + unsigned int srq_valid_wqe) +{ + unsigned int req_buff_size = 0; + unsigned int buff_index = 0; + struct hifc_srq_info_s *srq_info = NULL; + struct hifc_srq_buff_entry_s *buff_entry = NULL; + unsigned int buf_total_size; + unsigned int buf_num; + unsigned int alloc_idx; + unsigned int cur_buf_idx = 0; + unsigned int cur_buf_offset = 0; + unsigned int buf_cnt_perhugebuf; + + srq_info = &v_hba->els_srq_info; + + /* Apply for entry buffer */ + req_buff_size = (unsigned int)(srq_valid_wqe * + sizeof(struct hifc_srq_buff_entry_s)); + srq_info->els_buff_entry_head = + (struct hifc_srq_buff_entry_s *)kmalloc(req_buff_size, + GFP_KERNEL); + if (!srq_info->els_buff_entry_head) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Allocate ELS Srq receive buffer entrys failed"); + + return UNF_RETURN_ERROR; + } + memset(srq_info->els_buff_entry_head, 0, req_buff_size); + + buf_total_size = HIFC_SRQ_ELS_SGE_LEN * srq_valid_wqe; + + srq_info->buff_list.buf_size = + buf_total_size > BUF_LIST_PAGE_SIZE ? + BUF_LIST_PAGE_SIZE : buf_total_size; + buf_cnt_perhugebuf = + srq_info->buff_list.buf_size / HIFC_SRQ_ELS_SGE_LEN; + buf_num = srq_valid_wqe % buf_cnt_perhugebuf ? srq_valid_wqe / + buf_cnt_perhugebuf + 1 : srq_valid_wqe / + buf_cnt_perhugebuf; + srq_info->buff_list.buflist = (struct buff_list_s *) + kmalloc(buf_num * sizeof(struct buff_list_s), + GFP_KERNEL); + srq_info->buff_list.buf_num = buf_num; + + if (!srq_info->buff_list.buflist) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Allocate ELS buf list failed out of memory"); + goto free_buff; + } + memset(srq_info->buff_list.buflist, 0, + buf_num * sizeof(struct buff_list_s)); + + for (alloc_idx = 0; alloc_idx < buf_num; alloc_idx++) { + srq_info->buff_list.buflist[alloc_idx].vaddr = + kmalloc(srq_info->buff_list.buf_size, GFP_KERNEL); + if (!srq_info->buff_list.buflist[alloc_idx].vaddr) + goto free_buff; + memset(srq_info->buff_list.buflist[alloc_idx].vaddr, 0, + srq_info->buff_list.buf_size); + + srq_info->buff_list.buflist[alloc_idx].paddr = + pci_map_single( + v_hba->pci_dev, + srq_info->buff_list.buflist[alloc_idx].vaddr, + srq_info->buff_list.buf_size, DMA_FROM_DEVICE); + if (pci_dma_mapping_error( + v_hba->pci_dev, + srq_info->buff_list.buflist[alloc_idx].paddr)) { + srq_info->buff_list.buflist[alloc_idx].paddr = 0; + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, "[err]Map els srq buffer failed"); + + goto free_buff; + } + } + + /* Apply for receiving buffer and attach it to the free linked list */ + for (buff_index = 0; buff_index < srq_valid_wqe; buff_index++) { + buff_entry = &srq_info->els_buff_entry_head[buff_index]; + + cur_buf_idx = buff_index / buf_cnt_perhugebuf; + + cur_buf_offset = HIFC_SRQ_ELS_SGE_LEN * + (buff_index % buf_cnt_perhugebuf); + buff_entry->buff_addr = + srq_info->buff_list.buflist[cur_buf_idx].vaddr + + cur_buf_offset; + + buff_entry->buff_dma = + srq_info->buff_list.buflist[cur_buf_idx].paddr + + cur_buf_offset; + + buff_entry->buff_id = (unsigned short)buff_index; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[EVENT]Allocate bufnum:%u,buf_total_size:%u", + buf_num, buf_total_size); + + return RETURN_OK; + +free_buff: + hifc_free_els_srq_buff(v_hba, srq_valid_wqe); + return UNF_RETURN_ERROR; +} + +/** + * hifc_root_cmdq_enqueue - Send commands to the chip via ROOT CMDQ. + * @v_hba: hba handler to send cmd + * @v_cmdqe: cmdqe buff + * @cmd_len: cmdqe buff len + * @Return: 0 - success, negative - failure + */ +unsigned int hifc_root_cmdq_enqueue(void *v_hba, union hifc_cmdqe_u *v_cmdqe, + unsigned short cmd_len) +{ + unsigned char wqe_type = 0; + int cmdq_ret = 0; + struct hifc_cmd_buf *cmdq_buf = NULL; + struct hifc_hba_s *hba = NULL; + + hba = (struct hifc_hba_s *)v_hba; + wqe_type = (unsigned char)v_cmdqe->common.wd0.task_type; + HIFC_IO_STAT(hba, wqe_type); + + cmdq_buf = hifc_alloc_cmd_buf(hba->hw_dev_handle); + if (!cmdq_buf) { + HIFC_ERR_IO_STAT(hba, wqe_type); + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) CqmHandle(0x%p) allocate cmdq buffer failed", + hba->port_cfg.port_id, hba->hw_dev_handle); + + return UNF_RETURN_ERROR; + } + + memcpy(cmdq_buf->buf, v_cmdqe, cmd_len); + hifc_cpu_to_big32(cmdq_buf->buf, cmd_len); + cmdq_buf->size = cmd_len; + + cmdq_ret = hifc_cmdq_async(hba->hw_dev_handle, HIFC_ACK_TYPE_CMDQ, + HIFC_MOD_FCOE, 0, cmdq_buf); + + if (cmdq_ret != RETURN_OK) { + hifc_free_cmd_buf(hba->hw_dev_handle, cmdq_buf); + HIFC_ERR_IO_STAT(hba, wqe_type); + + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) CqmHandle(0x%p) send buff clear cmnd failed(0x%x)", + hba->port_cfg.port_id, hba->hw_dev_handle, cmdq_ret); + + return UNF_RETURN_ERROR; + } + UNF_REFERNCE_VAR(wqe_type); + return RETURN_OK; +} + +static void hifc_send_clear_srq_cmd(struct hifc_hba_s *v_hba, + struct hifc_srq_info_s *v_srq_info) +{ + union hifc_cmdqe_u cmdqe; + struct cqm_queue_s *cqm_fcp_srq = NULL; + unsigned long flag = 0; + + memset(&cmdqe, 0, sizeof(union hifc_cmdqe_u)); + + spin_lock_irqsave(&v_srq_info->srq_spin_lock, flag); + + cqm_fcp_srq = v_srq_info->cqm_srq_info; + if (!cqm_fcp_srq) { + v_srq_info->state = HIFC_CLEAN_DONE; + spin_unlock_irqrestore(&v_srq_info->srq_spin_lock, flag); + return; + } + + cmdqe.clear_srq.wd0.task_type = HIFCOE_TASK_T_CLEAR_SRQ; + cmdqe.clear_srq.wd1.scqn = HIFC_LSW(v_hba->default_scqn); + cmdqe.clear_srq.wd1.srq_type = v_srq_info->srq_type; + cmdqe.clear_srq.srqc_gpa_h = HIFC_HIGH_32_BITS( + cqm_fcp_srq->q_ctx_paddr); + cmdqe.clear_srq.srqc_gpa_l = HIFC_LOW_32_BITS(cqm_fcp_srq->q_ctx_paddr); + + (void)queue_delayed_work(v_hba->work_queue, + &v_srq_info->del_work, + (unsigned long)msecs_to_jiffies(( + unsigned int)HIFC_SRQ_DEL_STAGE_TIMEOUT_MS)); + + spin_unlock_irqrestore(&v_srq_info->srq_spin_lock, flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port 0x%x begin to clear srq 0x%x(0x%x,0x%llx)", + v_hba->port_cfg.port_id, v_srq_info->srq_type, + HIFC_LSW(v_hba->default_scqn), + (unsigned long long)cqm_fcp_srq->q_ctx_paddr); + + /* Run the ROOT CMDQ command to issue the clear srq command. + * If the command fails to be delivered, retry upon timeout. + */ + (void)hifc_root_cmdq_enqueue(v_hba, &cmdqe, sizeof(cmdqe.clear_srq)); +} + +static void hifc_srq_clr_time_out(struct work_struct *work) +{ + struct hifc_srq_info_s *srq = NULL; + struct hifc_hba_s *hba = NULL; + struct cqm_queue_s *cqm_fcp_imm_srq = NULL; + unsigned long flag = 0; + + srq = container_of(work, struct hifc_srq_info_s, del_work.work); + + spin_lock_irqsave(&srq->srq_spin_lock, flag); + hba = srq->phba; + cqm_fcp_imm_srq = srq->cqm_srq_info; + spin_unlock_irqrestore(&srq->srq_spin_lock, flag); + + if (hba && cqm_fcp_imm_srq) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port 0x%x clear srq 0x%x stat 0x%x timeout", + hba->port_cfg.port_id, srq->srq_type, srq->state); + + /* + * If the delivery fails or the execution times out after the + * delivery, try again once + */ + srq->del_retry_time++; + + if (srq->del_retry_time < 2) + hifc_send_clear_srq_cmd(hba, srq); + else + srq->del_retry_time = 0; + } +} + +static unsigned int hifc_create_els_srq(struct hifc_hba_s *v_hba) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct cqm_queue_s *cqm_srq = NULL; + struct hifc_wq_header_s *wq_header = NULL; + struct hifc_srq_info_s *srq_info = NULL; + struct hifc_srq_ctx_s srq_ctx = { 0 }; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, + return UNF_RETURN_ERROR); + + cqm_srq = cqm_object_fc_srq_create(v_hba->hw_dev_handle, + CQM_OBJECT_NONRDMA_SRQ, + HIFC_SRQ_ELS_DATA_DEPTH, + HIFC_SRQE_SIZE, + v_hba); + if (!cqm_srq) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Create Els Srq failed"); + + return UNF_RETURN_ERROR; + } + + /* Initialize SRQ */ + srq_info = &v_hba->els_srq_info; + hifc_init_srq_info(v_hba, cqm_srq, srq_info); + srq_info->srq_type = HIFC_SRQ_ELS; + srq_info->enable = UNF_TRUE; + srq_info->state = HIFC_CLEAN_DONE; + srq_info->del_retry_time = 0; + + /* The srq lock is initialized and can be created repeatedly */ + spin_lock_init(&srq_info->srq_spin_lock); + srq_info->spin_lock_init = UNF_TRUE; + + /* Initialize queue header */ + wq_header = (struct hifc_wq_header_s *)(void *)cqm_srq->q_header_vaddr; + hifc_init_srq_header(wq_header); + + INIT_DELAYED_WORK(&srq_info->del_work, hifc_srq_clr_time_out); + + /* Apply for RQ buffer */ + ret = hifc_alloc_els_srq_buff(v_hba, srq_info->valid_wqe_num); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Allocate Els Srq buffer failed"); + + cqm_object_delete(&cqm_srq->object); + memset(srq_info, 0, sizeof(struct hifc_srq_info_s)); + return UNF_RETURN_ERROR; + } + + /* Fill RQE, update queue header */ + hifc_init_els_srq_wqe(srq_info); + + /* Fill SRQ CTX */ + memset(&srq_ctx, 0, sizeof(srq_ctx)); + hifc_cfg_srq_ctx(srq_info, &srq_ctx, HIFC_SRQ_ELS_SGE_LEN, + srq_info->cqm_srq_info->q_room_buf_1.buf_list->pa); + + ret = hifc_create_srqc_via_cmdq_sync( + v_hba, &srq_ctx, + srq_info->cqm_srq_info->q_ctx_paddr); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Creat Els Srqc failed"); + + hifc_free_els_srq_buff(v_hba, srq_info->valid_wqe_num); + cqm_object_delete(&cqm_srq->object); + memset(srq_info, 0, sizeof(struct hifc_srq_info_s)); + + return UNF_RETURN_ERROR; + } + + return RETURN_OK; +} + +void hifc_destroy_srq(void *v_hba) +{ + /* + * Receive clear els srq sts + * ---then--->>> destroy els srq + */ + struct hifc_hba_s *hba = NULL; + struct hifc_srq_info_s *srq_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_hba, return); + + hba = (struct hifc_hba_s *)v_hba; + srq_info = &hba->els_srq_info; + + /* release receive buffer */ + hifc_free_els_srq_buff(hba, srq_info->valid_wqe_num); + + /* release srq info */ + if (srq_info->cqm_srq_info) { + cqm_object_delete(&srq_info->cqm_srq_info->object); + srq_info->cqm_srq_info = NULL; + } + if (srq_info->spin_lock_init) + srq_info->spin_lock_init = UNF_FALSE; + srq_info->phba = NULL; + srq_info->enable = UNF_FALSE; + srq_info->state = HIFC_CLEAN_DONE; +} + +/** + * hifc_create_srq - Create SRQ, which contains four SRQ for receiving + * instant data and a SRQ for receiving ELS data. + * @v_hba: hba handler + * @Return: 0 - success, negative - failure + */ +static unsigned int hifc_create_srq(struct hifc_hba_s *v_hba) +{ + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, + return UNF_RETURN_ERROR); + + /* Create ELS SRQ */ + ret = hifc_create_els_srq(v_hba); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Create Els Srq failed"); + return UNF_RETURN_ERROR; + } + + return RETURN_OK; +} + +unsigned int hifc_create_common_share_queues(void *v_hba) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, + return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)v_hba; + + /* Create & Init 8 pairs SCQ */ + ret = hifc_create_scq(hba); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, "[err]Create scq failed"); + + return UNF_RETURN_ERROR; + } + + /* Alloc SRQ resource for SIRT & ELS */ + ret = hifc_create_srq(hba); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, "[err]Create srq failed"); + + hifc_flush_scq_ctx(hba); + hifc_destroy_scq(hba); + + return UNF_RETURN_ERROR; + } + + return RETURN_OK; +} + +void hifc_destroy_common_share_queues(void *v_hba) +{ + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_hba, return); + + hifc_destroy_scq((struct hifc_hba_s *)v_hba); + hifc_destroy_srq((struct hifc_hba_s *)v_hba); +} + +static unsigned char hifc_map_fcp_data_cos(struct hifc_hba_s *v_hba) +{ + unsigned char i = 0; + unsigned char min_cnt_index = HIFC_PACKET_COS_FC_DATA; + int get_init_index = UNF_FALSE; + + for (i = 0; i < HIFC_MAX_COS_NUM; i++) { + /* + * Check whether the CoS is valid for the FC and cannot be + * occupied by the CMD + */ + if ((!(v_hba->cos_bit_map & (1 << i))) || + (i == HIFC_PACKET_COS_FC_CMD)) { + continue; + } + + if (get_init_index == UNF_FALSE) { + min_cnt_index = i; + get_init_index = UNF_TRUE; + continue; + } + + if (atomic_read(&v_hba->cos_rport_cnt[i]) < + atomic_read(&v_hba->cos_rport_cnt[min_cnt_index])) { + min_cnt_index = i; + } + } + + atomic_inc(&v_hba->cos_rport_cnt[min_cnt_index]); + + return min_cnt_index; +} + +static void hifc_update_cos_rport_cnt(struct hifc_hba_s *v_hba, + unsigned char v_cos_index) +{ + if ((v_cos_index >= HIFC_MAX_COS_NUM) || + (v_cos_index == HIFC_PACKET_COS_FC_CMD) || + (!(v_hba->cos_bit_map & (1 << v_cos_index))) || + (atomic_read(&v_hba->cos_rport_cnt[v_cos_index]) == 0)) { + return; + } + + atomic_dec(&v_hba->cos_rport_cnt[v_cos_index]); +} + +void hifc_invalid_parent_sq(struct hifc_parent_sq_info_s *sq_info) +{ + sq_info->rport_index = INVALID_VALUE32; + sq_info->context_id = INVALID_VALUE32; + sq_info->sq_queue_id = INVALID_VALUE32; + sq_info->cache_id = INVALID_VALUE32; + sq_info->max_sqe_num = INVALID_VALUE32; + sq_info->wqe_num_per_buf = INVALID_VALUE32; + sq_info->wqe_size = HIFC_SCQE_SIZE; + sq_info->wqe_offset = INVALID_VALUE32; + sq_info->head_start_cmsn = HIFC_MAX_MSN; + sq_info->head_end_cmsn = HIFC_MAX_MSN; + sq_info->last_pmsn = INVALID_VALUE16; + sq_info->last_pi_owner = INVALID_VALUE16; + sq_info->local_port_id = INVALID_VALUE32; + sq_info->remote_port_id = INVALID_VALUE32; + sq_info->phba = NULL; + sq_info->del_start_jiff = INVALID_VALUE64; + sq_info->port_in_flush = UNF_FALSE; + sq_info->sq_in_sess_rst = UNF_FALSE; + sq_info->oqid_rd = INVALID_VALUE16; + sq_info->oqid_wr = INVALID_VALUE16; + sq_info->srq_ctx_addr = 0; + atomic_set(&sq_info->sq_cashed, UNF_FALSE); + sq_info->vport_id = 0; + sq_info->sirt_dif_control.protect_opcode = UNF_DIF_ACTION_NONE; + atomic_set(&sq_info->sq_valid, UNF_FALSE); + atomic_set(&sq_info->fush_done_wait_cnt, 0); + + memset(&sq_info->delay_sqe, 0, + sizeof(struct hifc_delay_sqe_ctrl_info_s)); + memset(sq_info->io_stat, 0, sizeof(sq_info->io_stat)); +} + +static void hifc_free_link_list_wpg(struct hifc_parent_sq_info_s *v_sq) +{ + unsigned long flag = 0; + struct hifc_hba_s *hba = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + struct list_head *entry_head_wqe_page = NULL; + struct hifc_sq_wqe_page_s *sq_wpg = NULL; + + hba = (struct hifc_hba_s *)v_sq->phba; + + list_for_each_safe(node, next_node, &v_sq->list_linked_list_sq) { + sq_wpg = list_entry(node, struct hifc_sq_wqe_page_s, entry_wpg); + memset((void *)sq_wpg->wpg_addr, WQE_MARKER_0, + hba->sq_wpg_pool.wpg_size); + + spin_lock_irqsave(&hba->sq_wpg_pool.wpg_pool_lock, flag); + + entry_head_wqe_page = &sq_wpg->entry_wpg; + list_del(entry_head_wqe_page); + list_add_tail(entry_head_wqe_page, + &hba->sq_wpg_pool.list_free_wpg_pool); + + /* WqePage Pool counter */ + atomic_dec(&v_sq->wqe_page_cnt); + atomic_dec(&hba->sq_wpg_pool.wpg_in_use); + + spin_unlock_irqrestore(&hba->sq_wpg_pool.wpg_pool_lock, flag); + } + + HIFC_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_INFO, + "[info]Port(0x%x) RPort(0x%x) Sq(0x%x) link list destroyed, Sq.WqePageCnt=0x%x, SqWpgPool.wpg_in_use=0x%x", + hba->port_cfg.port_id, v_sq->rport_index, v_sq->context_id, + atomic_read(&v_sq->wqe_page_cnt), + atomic_read(&hba->sq_wpg_pool.wpg_in_use)); +} + +static void hifc_free_parent_sq(struct hifc_hba_s *v_hba, + struct hifc_parent_queue_info_s *v_parentq_info) +{ + unsigned int ctx_flush_done = 0; + unsigned int *ctx_dw = NULL; + struct hifc_parent_sq_info_s *sq_info = NULL; + unsigned int delay_cnt = 0; + + sq_info = &v_parentq_info->parent_sq_info; + + /* Free data cos */ + hifc_update_cos_rport_cnt(v_hba, v_parentq_info->queue_data_cos); + + hifc_free_link_list_wpg(sq_info); + + if (sq_info->queue_header_original) { + pci_unmap_single(v_hba->pci_dev, + sq_info->queue_hdr_phy_addr_original, + sizeof(struct hifc_queue_header_s) + + HIFC_SQ_HEADER_ADDR_ALIGN_SIZE, + DMA_BIDIRECTIONAL); + kfree(sq_info->queue_header_original); + sq_info->queue_header_original = NULL; + } + + if (v_parentq_info->parent_ctx.cqm_parent_ctx_obj) { + ctx_dw = (unsigned int *)((void *)( + v_parentq_info->parent_ctx.cqm_parent_ctx_obj->vaddr)); + ctx_flush_done = ctx_dw[HIFC_CTXT_FLUSH_DONE_DW_POS] & + HIFC_CTXT_FLUSH_DONE_MASK_BE; + mb(); + if ((v_parentq_info->offload_state == + HIFC_QUEUE_STATE_DESTROYING) && (ctx_flush_done == 0)) { + do { + ctx_flush_done = + ctx_dw[HIFC_CTXT_FLUSH_DONE_DW_POS] & + HIFC_CTXT_FLUSH_DONE_MASK_BE; + mb(); + if (ctx_flush_done != 0) + break; + delay_cnt++; + } while (delay_cnt < 100); + + if (ctx_flush_done == 0) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port(0x%x) Rport(0x%x) flush done is not set", + v_hba->port_cfg.port_id, + sq_info->rport_index); + } + } + + cqm_object_delete( + &v_parentq_info->parent_ctx.cqm_parent_ctx_obj->object); + v_parentq_info->parent_ctx.cqm_parent_ctx_obj = NULL; + } + + hifc_invalid_parent_sq(sq_info); +} + +static inline struct hifcoe_sqe_s *hifc_get_wqe_page_entry( + struct hifc_sq_wqe_page_s *v_wpg, + unsigned int wqe_offset) +{ + struct hifcoe_sqe_s *wpg = NULL; + + wpg = (struct hifcoe_sqe_s *)(v_wpg->wpg_addr); + wpg += wqe_offset; + + return wpg; +} + +static struct hifc_sq_wqe_page_s *hifc_add_tail_wqe_page( + struct hifc_parent_sq_info_s *v_sq) +{ + struct hifc_hba_s *hba = NULL; + struct hifc_sq_wqe_page_s *esgl = NULL; + struct list_head *free_list_head = NULL; + unsigned long flag = 0; + + hba = (struct hifc_hba_s *)v_sq->phba; + + spin_lock_irqsave(&hba->sq_wpg_pool.wpg_pool_lock, flag); + + /* Get a WqePage from hba->sq_wpg_pool.list_free_wpg_pool, and add + * to v_sq.list_SqTailWqePage + */ + if (!list_empty(&hba->sq_wpg_pool.list_free_wpg_pool)) { + free_list_head = (&hba->sq_wpg_pool.list_free_wpg_pool)->next; + list_del(free_list_head); + list_add_tail(free_list_head, &v_sq->list_linked_list_sq); + esgl = list_entry(free_list_head, struct hifc_sq_wqe_page_s, + entry_wpg); + + /* WqePage Pool counter */ + atomic_inc(&hba->sq_wpg_pool.wpg_in_use); + } else { + esgl = NULL; + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]SQ pool is empty when SQ(0x%x) try to get wqe page", + v_sq->rport_index); + HIFC_HBA_STAT(hba, HIFC_STAT_SQ_POOL_EMPTY); + } + + spin_unlock_irqrestore(&hba->sq_wpg_pool.wpg_pool_lock, flag); + + return esgl; +} + +static struct hifc_sq_wqe_page_s *hifc_add_one_wqe_page( + struct hifc_parent_sq_info_s *v_sq) +{ + unsigned int wqe_idx = 0; + struct hifc_sq_wqe_page_s *wqe_page = NULL; + struct hifcoe_sqe_s *sqe_in_wp = NULL; + struct hifc_link_wqe_s *link_wqe_in_wpg = NULL; + struct hifc_link_wqe_s link_wqe; + + /* Add a new Wqe Page */ + wqe_page = hifc_add_tail_wqe_page(v_sq); + + if (!wqe_page) + return NULL; + + for (wqe_idx = 0; wqe_idx <= v_sq->wqe_num_per_buf; wqe_idx++) { + sqe_in_wp = hifc_get_wqe_page_entry(wqe_page, wqe_idx); + sqe_in_wp->ctrl_sl.ch.ctrl_ch_val = 0; + } + + /* Set last WqePage as linkwqe */ + link_wqe_in_wpg = (struct hifc_link_wqe_s *) + hifc_get_wqe_page_entry(wqe_page, v_sq->wqe_num_per_buf); + link_wqe.val_wd0 = 0; + link_wqe.val_wd1 = 0; + link_wqe.next_page_addr_hi = 0; + link_wqe.next_page_addr_lo = 0; + link_wqe.wd0.wf = CQM_WQE_WF_LINK; + link_wqe.wd0.ctrlsl = CQM_LINK_WQE_CTRLSL_VALUE; + link_wqe.wd0.o = !(v_sq->last_pi_owner); + link_wqe.wd1.lp = CQM_LINK_WQE_LP_INVALID; + hifc_cpu_to_big32(&link_wqe, sizeof(struct hifc_link_wqe_s)); + memcpy(link_wqe_in_wpg, &link_wqe, sizeof(struct hifc_link_wqe_s)); + + return wqe_page; +} + +static void hifc_alloc_sq_oqid(struct hifc_hba_s *v_hba, + struct hifc_parent_sq_info_s *v_sq) +{ + unsigned short read_oqid = INVALID_VALUE16; + unsigned short write_oqid = INVALID_VALUE16; + unsigned short vf_id = INVALID_VALUE16; + unsigned short mask_value = hifc_host_oq_id_mask(v_hba->hw_dev_handle); + unsigned int cqm_xid = v_sq->context_id; + + vf_id = hifc_global_func_id(v_hba->hw_dev_handle); + + HIFC_OQID_RD((unsigned short)cqm_xid, vf_id, mask_value, read_oqid); + HIFC_OQID_WR((unsigned short)cqm_xid, vf_id, mask_value, write_oqid); + + v_sq->oqid_rd = read_oqid; + v_sq->oqid_wr = write_oqid; +} + +static void hifc_parent_sq_operate_time_out(struct work_struct *work) +{ + int free_sq = UNF_FALSE; + unsigned long flag = 0; + struct hifc_parent_sq_info_s *parent_sq = NULL; + struct hifc_parent_queue_info_s *parent_queue = NULL; + struct hifc_hba_s *hba = NULL; + + HIFC_CHECK(INVALID_VALUE32, work, return); + + parent_sq = container_of(work, struct hifc_parent_sq_info_s, + del_work.work); + parent_queue = container_of(parent_sq, struct hifc_parent_queue_info_s, + parent_sq_info); + hba = (struct hifc_hba_s *)parent_sq->phba; + HIFC_CHECK(INVALID_VALUE32, hba, return); + + spin_lock_irqsave(&parent_queue->parent_queue_state_lock, flag); + if (parent_queue->offload_state == HIFC_QUEUE_STATE_DESTROYING) { + free_sq = UNF_TRUE; + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "Port(0x%x) sq rport index(0x%x) local nportid(0x%x),remote nportid(0x%x) reset timeout.", + hba->port_cfg.port_id, + parent_sq->rport_index, + parent_sq->local_port_id, + parent_sq->remote_port_id); + } + spin_unlock_irqrestore(&parent_queue->parent_queue_state_lock, flag); + + /* In the server scenario, if the connection deletion times out, you + * can only wait or perform the FLR operation on the port. If the FLR + * command is run, the fault diffusion mode will be used. + */ + if ((parent_queue->parent_sq_info.del_start_jiff > hba->reset_time) && + (parent_queue->parent_sq_info.del_start_jiff != INVALID_VALUE64) && + (hba->removing == UNF_FALSE)) { + /* There is nothing to do if session reset timeout */ + ; + } + + if (free_sq == UNF_TRUE) { + /* There is nothing to do if session reset timeout */ + ; + } +} + +static void hifc_parent_sq_wait_flush_done_time_out(struct work_struct *work) +{ + unsigned long flag = 0; + struct hifc_parent_sq_info_s *parent_sq = NULL; + struct hifc_parent_queue_info_s *parent_queue = NULL; + struct hifc_hba_s *hba = NULL; + unsigned int ctx_flush_done; + unsigned int *ctx_dw = NULL; + int ret; + + HIFC_CHECK(INVALID_VALUE32, work, return); + + parent_sq = container_of(work, struct hifc_parent_sq_info_s, + flush_done_tmo_work.work); + + HIFC_CHECK(INVALID_VALUE32, parent_sq, return); + + parent_queue = container_of(parent_sq, struct hifc_parent_queue_info_s, + parent_sq_info); + hba = (struct hifc_hba_s *)parent_sq->phba; + HIFC_CHECK(INVALID_VALUE32, hba, return); + HIFC_CHECK(INVALID_VALUE32, parent_queue, return); + + spin_lock_irqsave(&parent_queue->parent_queue_state_lock, flag); + + if (parent_queue->offload_state != HIFC_QUEUE_STATE_DESTROYING) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) sq rport index(0x%x) is not destroying status,offloadsts is %d", + hba->port_cfg.port_id, + parent_sq->rport_index, + parent_queue->offload_state); + spin_unlock_irqrestore(&parent_queue->parent_queue_state_lock, + flag); + return; + } + + if (parent_queue->parent_ctx.cqm_parent_ctx_obj) { + ctx_dw = (unsigned int *)((void *) + (parent_queue->parent_ctx.cqm_parent_ctx_obj->vaddr)); + ctx_flush_done = + ctx_dw[HIFC_CTXT_FLUSH_DONE_DW_POS] & + HIFC_CTXT_FLUSH_DONE_MASK_BE; + if (ctx_flush_done == 0) { + spin_unlock_irqrestore( + &parent_queue->parent_queue_state_lock, flag); + + if (atomic_read(&parent_queue->parent_sq_info.fush_done_wait_cnt) < HIFC_SQ_WAIT_FLUSH_DONE_TIMEOUT_CNT) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[info]Port(0x%x) sq rport index(0x%x) wait flush done timeout %d times", + hba->port_cfg.port_id, + parent_sq->rport_index, + atomic_read(&parent_queue->parent_sq_info.fush_done_wait_cnt)); + + atomic_inc(&parent_queue->parent_sq_info.fush_done_wait_cnt); + + /* Delay Free Sq info */ + ret = queue_delayed_work(hba->work_queue, + &parent_queue->parent_sq_info.flush_done_tmo_work, + (unsigned long)msecs_to_jiffies((unsigned int)HIFC_SQ_WAIT_FLUSH_DONE_TIMEOUT_MS)); + if (ret == (int)false) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) rport(0x%x) queue delayed work failed iret:%d", + hba->port_cfg.port_id, + parent_sq->rport_index, + ret); + HIFC_HBA_STAT(hba, HIFC_STAT_PARENT_SQ_QUEUE_DELAYED_WORK); + } + + return; + } else { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) sq rport index(0x%x) has wait flush done %d times,do not free sq", + hba->port_cfg.port_id, + parent_sq->rport_index, + atomic_read(&parent_queue->parent_sq_info.fush_done_wait_cnt)); + + HIFC_HBA_STAT(hba, HIFC_STAT_CTXT_FLUSH_DONE); + + return; + } + } + } + + spin_unlock_irqrestore(&parent_queue->parent_queue_state_lock, flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) sq rport index(0x%x) flush done bit is ok,free sq now", + hba->port_cfg.port_id, + parent_sq->rport_index); + + hifc_free_parent_queue_info(hba, parent_queue); +} + +unsigned int hifc_alloc_parent_sq( + struct hifc_hba_s *v_hba, + struct hifc_parent_queue_info_s *v_parentq_info, + struct unf_rport_info_s *v_rport_info) +{ + struct hifc_parent_sq_info_s *sq_ctrl = NULL; + struct hifc_sq_wqe_page_s *head_wpg = NULL; + struct cqm_qpc_mpt_s *prnt_ctx = NULL; + unsigned int queue_header_alloc_size = 0; + unsigned long flag = 0; + + /* Craete parent context via CQM */ + prnt_ctx = cqm_object_qpc_mpt_create(v_hba->hw_dev_handle, + CQM_OBJECT_SERVICE_CTX, + HIFC_CNTX_SIZE_256B, + v_parentq_info, + CQM_INDEX_INVALID); + if (!prnt_ctx) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Create parent context failed, CQM_INDEX is 0x%x", + CQM_INDEX_INVALID); + goto parent_create_fail; + } + v_parentq_info->parent_ctx.cqm_parent_ctx_obj = prnt_ctx; + + /* Initialize struct hifc_parent_sq_info_s */ + sq_ctrl = &v_parentq_info->parent_sq_info; + sq_ctrl->phba = (void *)v_hba; + sq_ctrl->rport_index = v_rport_info->rport_index; + sq_ctrl->context_id = prnt_ctx->xid; + sq_ctrl->sq_queue_id = HIFC_QID_SQ; + sq_ctrl->cache_id = INVALID_VALUE32; + sq_ctrl->max_sqe_num = v_hba->exit_count; + /* Reduce one Link Wqe */ + sq_ctrl->wqe_num_per_buf = v_hba->sq_wpg_pool.wqe_per_wpg - 1; + sq_ctrl->wqe_size = HIFC_SQE_SIZE; + sq_ctrl->wqe_offset = 0; + sq_ctrl->head_start_cmsn = 0; + sq_ctrl->head_end_cmsn = HIFC_GET_WP_END_CMSN(0, + sq_ctrl->wqe_num_per_buf); + sq_ctrl->last_pmsn = 0; + /* Linked List SQ Owner Bit 1 valid, 0 invalid */ + sq_ctrl->last_pi_owner = 1; + sq_ctrl->local_port_id = INVALID_VALUE32; + sq_ctrl->remote_port_id = INVALID_VALUE32; + sq_ctrl->sq_in_sess_rst = UNF_FALSE; + atomic_set(&sq_ctrl->sq_valid, UNF_TRUE); + sq_ctrl->del_start_jiff = INVALID_VALUE64; + sq_ctrl->service_type = HIFC_GET_SERVICE_TYPE(v_hba); + sq_ctrl->vport_id = 0; + sq_ctrl->sirt_dif_control.protect_opcode = UNF_DIF_ACTION_NONE; + hifc_alloc_sq_oqid(v_hba, sq_ctrl); + atomic_set(&sq_ctrl->fush_done_wait_cnt, 0); + + /* Check whether the HBA is in the Linkdown state. Note that + * offload_state must be in the non-FREE state. + */ + spin_lock_irqsave(&v_hba->flush_state_lock, flag); + sq_ctrl->port_in_flush = v_hba->in_flushing; + spin_unlock_irqrestore(&v_hba->flush_state_lock, flag); + + INIT_LIST_HEAD(&sq_ctrl->list_linked_list_sq); + atomic_set(&sq_ctrl->wqe_page_cnt, 0); + atomic_set(&sq_ctrl->sq_dbl_cnt, 0); + atomic_set(&sq_ctrl->sqe_minus_cqe_cnt, 1); + atomic_set(&sq_ctrl->sq_wqe_cnt, 0); + atomic_set(&sq_ctrl->sq_cqe_cnt, 0); + memset(sq_ctrl->io_stat, 0, sizeof(sq_ctrl->io_stat)); + + INIT_DELAYED_WORK(&sq_ctrl->del_work, hifc_parent_sq_operate_time_out); + INIT_DELAYED_WORK(&sq_ctrl->flush_done_tmo_work, + hifc_parent_sq_wait_flush_done_time_out); + + memset(&sq_ctrl->delay_sqe, 0, + sizeof(struct hifc_delay_sqe_ctrl_info_s)); + + /* Allocate and initialize the Queue Header space. 64B alignment is + * required. Additional 64B is applied for alignment + */ + queue_header_alloc_size = sizeof(struct hifc_queue_header_s) + + HIFC_SQ_HEADER_ADDR_ALIGN_SIZE; + sq_ctrl->queue_header_original = kmalloc(queue_header_alloc_size, + GFP_ATOMIC); + if (!sq_ctrl->queue_header_original) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]RPort(0x%x) create SQ queue header failed", + v_rport_info->rport_index); + goto qheader_create_fail; + } + + memset((unsigned char *)sq_ctrl->queue_header_original, 0, + queue_header_alloc_size); + + sq_ctrl->queue_hdr_phy_addr_original = pci_map_single( + v_hba->pci_dev, + sq_ctrl->queue_header_original, + queue_header_alloc_size, + DMA_BIDIRECTIONAL); + + if (pci_dma_mapping_error(v_hba->pci_dev, + sq_ctrl->queue_hdr_phy_addr_original)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]RPort(0x%x) SQ queue header DMA mapping failed", + v_rport_info->rport_index); + goto qheader_dma_map_fail; + } + + /* Obtains the 64B alignment address */ + sq_ctrl->queue_header = (struct hifc_queue_header_s *) + HIFC_ADDR_64_ALIGN( + (unsigned long long) + (sq_ctrl->queue_header_original)); + sq_ctrl->queue_hdr_phy_addr = + HIFC_ADDR_64_ALIGN(sq_ctrl->queue_hdr_phy_addr_original); + + /* Each SQ is allocated with a Wqe Page by default. The WqePageCnt is + * incremented by one + */ + head_wpg = hifc_add_one_wqe_page(sq_ctrl); + if (!head_wpg) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]RPort(0x%x) create SQ first wqe page failed", + v_rport_info->rport_index); + goto headwpg_create_fail; + } + + atomic_inc(&sq_ctrl->wqe_page_cnt); + + return RETURN_OK; + +headwpg_create_fail: + pci_unmap_single(v_hba->pci_dev, sq_ctrl->queue_hdr_phy_addr_original, + queue_header_alloc_size, DMA_BIDIRECTIONAL); + +qheader_dma_map_fail: + kfree(sq_ctrl->queue_header_original); + sq_ctrl->queue_header_original = NULL; + +qheader_create_fail: + cqm_object_delete(&prnt_ctx->object); + +parent_create_fail: + v_parentq_info->parent_ctx.cqm_parent_ctx_obj = NULL; + + return UNF_RETURN_ERROR; +} + +static void hifc_init_prnt_ctx_sq_qinfo( + struct hifc_parent_queue_info_s *v_parent_qinfo) +{ + struct hifc_parent_sq_info_s *sq = NULL; + struct hifc_sq_wqe_page_s *head_wqe_page = NULL; + struct hifcoe_parent_context_s *ctx = NULL; + struct hifcoe_sq_qinfo_s *parent_sq_ctx = NULL; + struct hifc_queue_info_bus_s queue_bus; + + /* Obtains the Parent Context address */ + sq = &v_parent_qinfo->parent_sq_info; + ctx = (struct hifcoe_parent_context_s *)(void *) + (v_parent_qinfo->parent_ctx.virt_parent_ctx); + head_wqe_page = HIFC_GET_SQ_HEAD(sq); + + parent_sq_ctx = &ctx->sq_qinfo; + + /* The PMSN is updated by the host driver */ + parent_sq_ctx->pmsn_type = HIFC_PMSN_CI_TYPE_FROM_HOST; + + /* Indicates the value of O of the valid SQE in the current round of SQ. + * The value of Linked List SQ is always one, and the value of 0 is + * invalid. + */ + /* current valid o-bit */ + parent_sq_ctx->loop_o = HIFC_OWNER_DRIVER_PRODUCT; + + /* should be opposite from loop_o */ + parent_sq_ctx->cur_wqe_o = ~(parent_sq_ctx->loop_o); + + /* the first sqe's gpa */ + parent_sq_ctx->cur_sqe_gpa = head_wqe_page->wpg_phy_addr; + + /* Indicates the GPA of the Queue header that is initialized to the SQ + * in the Host memory. The value must be 16-byte aligned. + */ + parent_sq_ctx->pmsn_gpa = sq->queue_hdr_phy_addr; + if (wqe_pre_load != 0) + parent_sq_ctx->pmsn_gpa |= HIFC_SQ_LINK_PRE; + + /* + * This field is used to fill in the dmaattr_idx field of the ComboDMA. + * The default value is 0 + */ + parent_sq_ctx->sqe_dmaattr_idx = HIFC_DMA_ATTR_OFST; + + /* + * This field is filled using the value of RO_SO in the SGL0 of + * the ComboDMA + */ + parent_sq_ctx->sq_so_ro = HIFC_PCIE_RELAXED_ORDERING; + + parent_sq_ctx->ring = HIFC_QUEUE_LINK_STYLE; + + /* This field is used to set the SGL0 field of the Child solicDMA */ + parent_sq_ctx->zerocopy_dmaattr_idx = HIFC_DMA_ATTR_OFST; + + parent_sq_ctx->zerocopy_so_ro = HIFC_PCIE_RELAXED_ORDERING; + + /* PCIe attribute information */ + parent_sq_ctx->pcie_template = HIFC_PCIE_TEMPLATE; + + memset(&queue_bus, 0, sizeof(struct hifc_queue_info_bus_s)); + /* bits 20 */ + queue_bus.bus[0] |= ((unsigned long long)(sq->context_id & 0xfffff)); + /* bits 6 */ + queue_bus.bus[0] |= + (((unsigned long long)(parent_sq_ctx->sqe_dmaattr_idx)) << 20); + /* bits 2 */ + queue_bus.bus[0] |= + (((unsigned long long)(parent_sq_ctx->sq_so_ro)) << 26); + /* bits 1 */ + queue_bus.bus[0] |= (((unsigned long long)(parent_sq_ctx->ring)) << 28); + /* bits 6 */ + queue_bus.bus[0] |= + (((unsigned long long)(parent_sq_ctx->zerocopy_dmaattr_idx)) + << 29); + /* bits 2 */ + queue_bus.bus[0] |= + (((unsigned long long)(parent_sq_ctx->zerocopy_so_ro)) << 35); + /* bits 6 */ + queue_bus.bus[0] |= + (((unsigned long long)(parent_sq_ctx->pcie_template)) << 37); + /* bits 21 */ + queue_bus.bus[0] |= + (((unsigned long long)(parent_sq_ctx->pmsn_gpa >> 4)) << 43); + /* bits 39 */ + queue_bus.bus[1] |= + ((unsigned long long)(parent_sq_ctx->pmsn_gpa >> 25)); + /* bits 1 */ + queue_bus.bus[1] |= + (((unsigned long long)(parent_sq_ctx->pmsn_type)) << 39); + + parent_sq_ctx->parity = + hifc_get_parity_value(queue_bus.bus, HIFC_SQC_BUS_ROW, + HIFC_SQC_BUS_COL); + + hifc_cpu_to_big64(parent_sq_ctx, sizeof(struct hifcoe_sq_qinfo_s)); +} + +static void hifc_init_parent_ctx_sqc_qinfo( + void *v_hba, + struct hifc_parent_queue_info_s *v_parent_qinfo) +{ + unsigned int resp_scqn = 0; + struct hifcoe_parent_context_s *ctx = NULL; + struct hifcoe_scq_qinfo_s *resp_parent_scq_ctx = NULL; + struct hifc_queue_info_bus_s queue_bus; + + /* + * Obtains the queue id of the scq returned by the CQM when the SCQ + * is created + */ + resp_scqn = v_parent_qinfo->parent_sts_scq_info.cqm_queue_id; + + /* Obtains the Parent Context address */ + ctx = (struct hifcoe_parent_context_s *) + (v_parent_qinfo->parent_ctx.virt_parent_ctx); + + resp_parent_scq_ctx = &ctx->resp_scq_qinfo; + resp_parent_scq_ctx->hw_scqc_config.info.rq_th2_preld_cache_num = + wqe_pre_load; + resp_parent_scq_ctx->hw_scqc_config.info.rq_th1_preld_cache_num = + wqe_pre_load; + resp_parent_scq_ctx->hw_scqc_config.info.rq_th0_preld_cache_num = + wqe_pre_load; + resp_parent_scq_ctx->hw_scqc_config.info.rq_min_preld_cache_num = + wqe_pre_load; + resp_parent_scq_ctx->hw_scqc_config.info.sq_th2_preld_cache_num = + wqe_pre_load; + resp_parent_scq_ctx->hw_scqc_config.info.sq_th1_preld_cache_num = + wqe_pre_load; + resp_parent_scq_ctx->hw_scqc_config.info.sq_th0_preld_cache_num = + wqe_pre_load; + resp_parent_scq_ctx->hw_scqc_config.info.sq_min_preld_cache_num = + wqe_pre_load; + resp_parent_scq_ctx->hw_scqc_config.info.scq_n = + (unsigned long long)resp_scqn; + resp_parent_scq_ctx->hw_scqc_config.info.parity = 0; + + memset(&queue_bus, 0, sizeof(struct hifc_queue_info_bus_s)); + queue_bus.bus[0] = resp_parent_scq_ctx->hw_scqc_config.pctxt_val1; + resp_parent_scq_ctx->hw_scqc_config.info.parity = + hifc_get_parity_value( + queue_bus.bus, + HIFC_HW_SCQC_BUS_ROW, + HIFC_HW_SCQC_BUS_COL); + + hifc_cpu_to_big64(resp_parent_scq_ctx, + sizeof(struct hifcoe_scq_qinfo_s)); +} + +static void hifc_init_parent_ctx_srq_qinfo( + void *v_hba, + struct hifc_parent_queue_info_s *v_parent_qinfo) +{ + struct hifc_hba_s *hba = NULL; + struct hifcoe_parent_context_s *ctx = NULL; + struct cqm_queue_s *cqm_els_srq = NULL; + struct hifc_parent_sq_info_s *sq = NULL; + struct hifc_queue_info_bus_s queue_bus; + + /* Obtains the SQ address */ + sq = &v_parent_qinfo->parent_sq_info; + + /* Obtains the Parent Context address */ + ctx = (struct hifcoe_parent_context_s *) + (v_parent_qinfo->parent_ctx.virt_parent_ctx); + + hba = (struct hifc_hba_s *)v_hba; + cqm_els_srq = hba->els_srq_info.cqm_srq_info; + + /* Initialize the Parent SRQ INFO used when the ELS is received */ + ctx->els_srq_info.srqc_gpa = cqm_els_srq->q_ctx_paddr >> 4; + + memset(&queue_bus, 0, sizeof(struct hifc_queue_info_bus_s)); + queue_bus.bus[0] = ctx->els_srq_info.srqc_gpa; + ctx->els_srq_info.parity = hifc_get_parity_value( + queue_bus.bus, + HIFC_HW_SRQC_BUS_ROW, + HIFC_HW_SRQC_BUS_COL); + + hifc_cpu_to_big64(&ctx->els_srq_info, + sizeof(struct hifcoe_srq_qinfo_s)); + + ctx->imm_srq_info.srqc_gpa = 0; + sq->srq_ctx_addr = 0; +} + +static void hifc_init_parent_rsvd_qinfo( + struct hifc_parent_queue_info_s *v_parent_qinfo) +{ + struct hifcoe_parent_context_s *ctx = NULL; + struct hifcoe_hw_rsvd_queue_s *hw_rsvd_qinfo = NULL; + unsigned short max_seq = 0; + unsigned int each = 0, seq_index = 0; + + /* Obtains the Parent Context address */ + ctx = (struct hifcoe_parent_context_s *) + (v_parent_qinfo->parent_ctx.virt_parent_ctx); + hw_rsvd_qinfo = (struct hifcoe_hw_rsvd_queue_s *)&ctx->hw_rsvdq; + memset(hw_rsvd_qinfo->seq_id_bitmap, 0, + sizeof(hw_rsvd_qinfo->seq_id_bitmap)); + + max_seq = HIFC_HRQI_SEQ_ID_MAX; + + /* special set for sequence id 0, which is always kept by ucode for + * sending fcp-cmd + */ + hw_rsvd_qinfo->seq_id_bitmap[HIFC_HRQI_SEQ_SEPCIAL_ID] = 1; + seq_index = HIFC_HRQI_SEQ_SEPCIAL_ID - + (max_seq >> HIFC_HRQI_SEQ_INDEX_SHIFT); + + /* Set the unavailable mask to start from max + 1 */ + for (each = (max_seq % HIFC_HRQI_SEQ_INDEX_MAX) + 1; + each < HIFC_HRQI_SEQ_INDEX_MAX; each++) { + hw_rsvd_qinfo->seq_id_bitmap[seq_index] |= 0x1 << each; + } + + hw_rsvd_qinfo->seq_id_bitmap[seq_index] = + cpu_to_be64(hw_rsvd_qinfo->seq_id_bitmap[seq_index]); + + /* sepcial set for sequence id 0 */ + if (seq_index != HIFC_HRQI_SEQ_SEPCIAL_ID) { + hw_rsvd_qinfo->seq_id_bitmap[HIFC_HRQI_SEQ_SEPCIAL_ID] = + cpu_to_be64( + hw_rsvd_qinfo->seq_id_bitmap[HIFC_HRQI_SEQ_SEPCIAL_ID]); + } + + for (each = 0; each < seq_index; each++) + hw_rsvd_qinfo->seq_id_bitmap[each] = HIFC_HRQI_SEQ_INVALID_ID; + + /* no matter what the range of seq id, last_req_seq_id is fixed + * value 0xff + */ + hw_rsvd_qinfo->wd0.last_req_seq_id = HIFC_HRQI_SEQ_ID_MAX; + hw_rsvd_qinfo->wd0.xid = v_parent_qinfo->parent_sq_info.context_id; + + *(unsigned long long *)&hw_rsvd_qinfo->wd0 = + cpu_to_be64(*(unsigned long long *)&hw_rsvd_qinfo->wd0); +} + +static void hifc_init_oqid_in_ctx( + struct hifcoe_parent_context_s *v_parent_ctx, + struct hifc_parent_queue_info_s *v_parent_qinfo) +{ + v_parent_ctx->sw_section.oqid_rd = + cpu_to_be16(v_parent_qinfo->parent_sq_info.oqid_rd); + v_parent_ctx->sw_section.oqid_wr = + cpu_to_be16(v_parent_qinfo->parent_sq_info.oqid_wr); +} + +static void hifc_init_parent_sw_section_info( + void *v_hba, + struct hifc_parent_queue_info_s *v_parent_qinfo) +{ +#define HIFC_VLAN_ENABLE (1) + + unsigned short rport_index; + struct hifc_hba_s *hba = NULL; + struct hifcoe_parent_context_s *ctx = NULL; + struct hifcoe_sw_section_s *sw_section = NULL; + + /* Obtains the Parent Context address */ + hba = (struct hifc_hba_s *)v_hba; + ctx = (struct hifcoe_parent_context_s *) + (v_parent_qinfo->parent_ctx.virt_parent_ctx); + sw_section = &ctx->sw_section; + + /* xid+vPortId */ + sw_section->sw_ctxt_vport_xid.xid = + v_parent_qinfo->parent_sq_info.context_id; + sw_section->sw_ctxt_vport_xid.vport = + v_parent_qinfo->parent_sq_info.vport_id; + sw_section->sw_ctxt_vport_xid.csctrl = 0; + hifc_cpu_to_big32(&sw_section->sw_ctxt_vport_xid, + sizeof(sw_section->sw_ctxt_vport_xid)); + + /* conn_id */ + rport_index = HIFC_LSW(v_parent_qinfo->parent_sq_info.rport_index); + sw_section->conn_id = cpu_to_be16(rport_index); + + /* Immediate parameters */ + sw_section->immi_rq_page_size = 0; + + /* Parent SCQ INFO used for sending packets to the Cmnd */ + sw_section->scq_num_rcv_cmd = + cpu_to_be32(v_parent_qinfo->parent_cmd_scq_info.cqm_queue_id); + + /* sw_ctxt_misc */ + sw_section->sw_ctxt_misc.dw.srv_type = + v_parent_qinfo->parent_sq_info.service_type; + sw_section->sw_ctxt_misc.dw.port_id = hba->port_index; + + /* only the VN2VF mode is supported */ + sw_section->sw_ctxt_misc.dw.vlan_id = 0; + hifc_cpu_to_big32(&sw_section->sw_ctxt_misc.pctxt_val0, + sizeof(sw_section->sw_ctxt_misc.pctxt_val0)); + + /* oqid_rd, oqid_wr */ + hifc_init_oqid_in_ctx(ctx, v_parent_qinfo); + + /* Configuring the combo length */ + sw_section->per_xmit_data_size = cpu_to_be32(combo_length_kb * 1024); + + /* sw_ctxt_config */ + sw_section->sw_ctxt_config.dw.work_mode = HIFC_PORT_MODE_INI; + + sw_section->sw_ctxt_config.dw.status = FCOE_PARENT_STATUS_INVALID; + sw_section->sw_ctxt_config.dw.cos = hba->port_index; + sw_section->sw_ctxt_config.dw.oq_cos_cmd = HIFC_PACKET_COS_FC_CMD; + sw_section->sw_ctxt_config.dw.oq_cos_data = + v_parent_qinfo->queue_data_cos; + sw_section->sw_ctxt_config.dw.priority = 0; + sw_section->sw_ctxt_config.dw.vlan_enable = HIFC_VLAN_ENABLE; + sw_section->sw_ctxt_config.dw.sgl_num = dif_sgl_mode; + hifc_cpu_to_big32(&sw_section->sw_ctxt_config.pctxt_val1, + sizeof(sw_section->sw_ctxt_config.pctxt_val1)); + + hifc_cpu_to_big32(&sw_section->immi_dif_info, + sizeof(sw_section->immi_dif_info)); + + sw_section->cmd_scq_gpa_h = + HIFC_HIGH_32_BITS(hba->scq_info[v_parent_qinfo->parent_cmd_scq_info.local_queue_id].cqm_scq_info->q_header_paddr); + sw_section->cmd_scq_gpa_l = + HIFC_LOW_32_BITS(hba->scq_info[v_parent_qinfo->parent_cmd_scq_info.local_queue_id].cqm_scq_info->q_header_paddr); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Port(0x%x) RPort(0x%x) CmdLocalScqn(0x%x) QheaderGpaH(0x%x) QheaderGpaL(0x%x)", + hba->port_cfg.port_id, + v_parent_qinfo->parent_sq_info.rport_index, + v_parent_qinfo->parent_cmd_scq_info.local_queue_id, + sw_section->cmd_scq_gpa_h, + sw_section->cmd_scq_gpa_l); + + hifc_cpu_to_big32(&sw_section->cmd_scq_gpa_h, + sizeof(sw_section->cmd_scq_gpa_h)); + hifc_cpu_to_big32(&sw_section->cmd_scq_gpa_l, + sizeof(sw_section->cmd_scq_gpa_l)); +} + +void hifc_init_parent_ctx(void *v_hba, + struct hifc_parent_queue_info_s *v_parent_qinfo) +{ + struct hifcoe_parent_context_s *ctx = NULL; + + ctx = (struct hifcoe_parent_context_s *) + (v_parent_qinfo->parent_ctx.virt_parent_ctx); + + /* Initialize Parent Context */ + memset(ctx, 0, HIFC_CNTX_SIZE_256B); + + /* Initialize the Queue Info hardware area */ + hifc_init_prnt_ctx_sq_qinfo(v_parent_qinfo); + hifc_init_parent_ctx_sqc_qinfo(v_hba, v_parent_qinfo); + hifc_init_parent_ctx_srq_qinfo(v_hba, v_parent_qinfo); + hifc_init_parent_rsvd_qinfo(v_parent_qinfo); + + /* Initialize Software Section */ + hifc_init_parent_sw_section_info(v_hba, v_parent_qinfo); +} + +unsigned int hifc_get_rport_maped_cmd_scqn(void *phba, unsigned int rport_index) +{ + unsigned int cmd_scqn_local = 0; + struct hifc_hba_s *hba = (struct hifc_hba_s *)phba; + + cmd_scqn_local = HIFC_RPORTID_TO_CMD_SCQN(rport_index); + + return hba->scq_info[cmd_scqn_local].scqn; +} + +/** + * hifc_get_rport_maped_sts_scqn - Obtains the SCQ channel of RPort that is used + * to send STS. + * @v_hba: hba handle + * @rport_index: rport index + * @Return: related scqn value with rport index + */ +unsigned int hifc_get_rport_maped_sts_scqn(void *phba, unsigned int rport_index) +{ + unsigned int sts_scqn_local = 0; + struct hifc_hba_s *hba = (struct hifc_hba_s *)phba; + + sts_scqn_local = HIFC_RPORTID_TO_STS_SCQN(rport_index); + + return hba->scq_info[sts_scqn_local].scqn; +} + +void hifc_map_shared_queue_qid( + struct hifc_hba_s *v_hba, + struct hifc_parent_queue_info_s *v_parent_queue_info, + unsigned int rport_index) +{ + unsigned int cmd_scqn_local = 0; + unsigned int sts_scqn_local = 0; + + /* The SCQ is used for each connection based on the balanced + * distribution of commands and responses + */ + cmd_scqn_local = HIFC_RPORTID_TO_CMD_SCQN(rport_index); + sts_scqn_local = HIFC_RPORTID_TO_STS_SCQN(rport_index); + v_parent_queue_info->parent_cmd_scq_info.local_queue_id = + cmd_scqn_local; + v_parent_queue_info->parent_sts_scq_info.local_queue_id = + sts_scqn_local; + v_parent_queue_info->parent_cmd_scq_info.cqm_queue_id = + v_hba->scq_info[cmd_scqn_local].scqn; + v_parent_queue_info->parent_sts_scq_info.cqm_queue_id = + v_hba->scq_info[sts_scqn_local].scqn; + + /* Each session share with immediate SRQ and ElsSRQ */ + v_parent_queue_info->parent_els_srq_info.local_queue_id = 0; + v_parent_queue_info->parent_els_srq_info.cqm_queue_id = + v_hba->els_srq_info.srqn; + + /* Allocate fcp data cos value */ + v_parent_queue_info->queue_data_cos = hifc_map_fcp_data_cos(v_hba); + + /* Allocate Parent SQ vPort */ + v_parent_queue_info->parent_sq_info.vport_id += + v_parent_queue_info->queue_vport_id; +} + +unsigned int hifc_alloc_parent_resource(void *v_hba, + struct unf_rport_info_s *v_rport_info) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct hifc_hba_s *hba = NULL; + struct hifc_parent_queue_info_s *v_parent_queue_info = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_rport_info, + return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)v_hba; + + if (!hba->parent_queue_mgr) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) cannot find parent queue pool", + hba->port_cfg.port_id); + + return UNF_RETURN_ERROR; + } + + if (v_rport_info->rport_index >= UNF_HIFC_MAXRPORT_NUM) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) allocate parent resource failed, invlaid rport index(0x%x),rport nportid(0x%x)", + hba->port_cfg.port_id, + v_rport_info->rport_index, + v_rport_info->nport_id); + + return UNF_RETURN_ERROR; + } + + v_parent_queue_info = + &hba->parent_queue_mgr->parent_queues[v_rport_info->rport_index]; + + spin_lock_irqsave(&v_parent_queue_info->parent_queue_state_lock, flag); + + if (v_parent_queue_info->offload_state != HIFC_QUEUE_STATE_FREE) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) allocate parent resource failed, invlaid rport index(0x%x),rport nportid(0x%x), offload state(0x%x)", + hba->port_cfg.port_id, + v_rport_info->rport_index, + v_rport_info->nport_id, + v_parent_queue_info->offload_state); + + spin_unlock_irqrestore( + &v_parent_queue_info->parent_queue_state_lock, + flag); + return UNF_RETURN_ERROR; + } + + v_parent_queue_info->offload_state = HIFC_QUEUE_STATE_INITIALIZED; + + /* Create Parent Context and Link List SQ */ + ret = hifc_alloc_parent_sq(hba, v_parent_queue_info, v_rport_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "Port(0x%x) alloc sq resoure failed.rport index(0x%x),rport nportid(0x%x).", + hba->port_cfg.port_id, v_rport_info->rport_index, + v_rport_info->nport_id); + + v_parent_queue_info->offload_state = HIFC_QUEUE_STATE_FREE; + hifc_invalid_parent_sq(&v_parent_queue_info->parent_sq_info); + spin_unlock_irqrestore( + &v_parent_queue_info->parent_queue_state_lock, + flag); + + return UNF_RETURN_ERROR; + } + + /* Allocate the corresponding queue xid to each parent */ + hifc_map_shared_queue_qid(hba, v_parent_queue_info, + v_rport_info->rport_index); + + /* Initialize Parent Context, including hardware area and ucode area */ + hifc_init_parent_ctx(v_hba, v_parent_queue_info); + + spin_unlock_irqrestore(&v_parent_queue_info->parent_queue_state_lock, + flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) allocate parent sq success,rport index(0x%x),rport nportid(0x%x),context id(0x%x)", + hba->port_cfg.port_id, + v_rport_info->rport_index, + v_rport_info->nport_id, + v_parent_queue_info->parent_sq_info.context_id); + + return ret; +} + +unsigned int hifc_free_parent_resource(void *v_hba, + struct unf_rport_info_s *v_rport_info) +{ + struct hifc_hba_s *hba = NULL; + struct hifc_parent_queue_info_s *v_parent_queue_info = NULL; + unsigned long flag = 0; + unsigned long rst_flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + enum hifc_session_reset_mode_e mode = + HIFC_SESS_RST_DELETE_IO_CONN_BOTH; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_rport_info, + return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)v_hba; + if (!hba->parent_queue_mgr) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[warn]Port(0x%x) cannot find parent queue pool", + hba->port_cfg.port_id); + + return UNF_RETURN_ERROR; + } + + /* get parent queue info (by rport index) */ + if (v_rport_info->rport_index >= UNF_HIFC_MAXRPORT_NUM) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[warn]Port(0x%x) free parent resource failed, invlaid rport_index(%u) rport_nport_id(0x%x)", + hba->port_cfg.port_id, + v_rport_info->rport_index, + v_rport_info->nport_id); + + return UNF_RETURN_ERROR; + } + v_parent_queue_info = &hba->parent_queue_mgr->parent_queues[v_rport_info->rport_index]; + + spin_lock_irqsave(&v_parent_queue_info->parent_queue_state_lock, flag); + + /* 1. for has been offload */ + if (v_parent_queue_info->offload_state == HIFC_QUEUE_STATE_OFFLOADED) { + v_parent_queue_info->offload_state = + HIFC_QUEUE_STATE_DESTROYING; + spin_unlock_irqrestore( + &v_parent_queue_info->parent_queue_state_lock, + flag); + + /* set reset state, in order to prevent I/O in_SQ */ + spin_lock_irqsave( + &v_parent_queue_info->parent_sq_info.parent_sq_enqueue_lock, + rst_flag); + v_parent_queue_info->parent_sq_info.sq_in_sess_rst = UNF_TRUE; + spin_unlock_irqrestore( + &v_parent_queue_info->parent_sq_info.parent_sq_enqueue_lock, + rst_flag); + + /* check pcie device state */ + if (HIFC_HBA_NOT_PRESENT(hba)) { + HIFC_TRACE( + UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x) hba is not present, free directly. rport_index(0x%x:0x%x) local_nportid(0x%x) remote_nportid(0x%x:0x%x)", + hba->port_cfg.port_id, + v_rport_info->rport_index, + v_parent_queue_info->parent_sq_info.rport_index, + v_parent_queue_info->parent_sq_info.local_port_id, + v_rport_info->nport_id, + v_parent_queue_info->parent_sq_info.remote_port_id); + + hifc_free_parent_queue_info(hba, v_parent_queue_info); + return RETURN_OK; + } + + v_parent_queue_info->parent_sq_info.del_start_jiff = jiffies; + (void)queue_delayed_work( + hba->work_queue, + &v_parent_queue_info->parent_sq_info.del_work, + (unsigned long) + msecs_to_jiffies((unsigned int) + HIFC_SQ_DEL_STAGE_TIMEOUT_MS)); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) begin to reset parent session, rport_index(0x%x:0x%x) local_nportid(0x%x) remote_nportid(0x%x:0x%x)", + hba->port_cfg.port_id, + v_rport_info->rport_index, + v_parent_queue_info->parent_sq_info.rport_index, + v_parent_queue_info->parent_sq_info.local_port_id, + v_rport_info->nport_id, + v_parent_queue_info->parent_sq_info.remote_port_id); + + /* Forcibly set both mode */ + mode = HIFC_SESS_RST_DELETE_IO_CONN_BOTH; + ret = hifc_send_session_rst_cmd(v_hba, v_parent_queue_info, + mode); + + return ret; + } else if (v_parent_queue_info->offload_state == + HIFC_QUEUE_STATE_INITIALIZED) { + /* 2. for resource has been alloc, but not offload */ + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) parent sq is not offloaded, free directly. rport_index(0x%x:0x%x) local_nportid(0x%x) remote_nportid(0x%x:0x%x)", + hba->port_cfg.port_id, + v_rport_info->rport_index, + v_parent_queue_info->parent_sq_info.rport_index, + v_parent_queue_info->parent_sq_info.local_port_id, + v_rport_info->nport_id, + v_parent_queue_info->parent_sq_info.remote_port_id); + + spin_unlock_irqrestore( + &v_parent_queue_info->parent_queue_state_lock, + flag); + hifc_free_parent_queue_info(hba, v_parent_queue_info); + + return RETURN_OK; + } else if (v_parent_queue_info->offload_state == + HIFC_QUEUE_STATE_OFFLOADING) { + /* 3. for driver has offloading CMND to uCode */ + hifc_push_destroy_parent_queue_sqe(v_hba, + v_parent_queue_info, + v_rport_info); + spin_unlock_irqrestore( + &v_parent_queue_info->parent_queue_state_lock, + flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) parent sq is offloading, push to delay free. rport_index(0x%x:0x%x) local_nportid(0x%x) remote_nportid(0x%x:0x%x)", + hba->port_cfg.port_id, + v_rport_info->rport_index, + v_parent_queue_info->parent_sq_info.rport_index, + v_parent_queue_info->parent_sq_info.local_port_id, + v_rport_info->nport_id, + v_parent_queue_info->parent_sq_info.remote_port_id); + + return RETURN_OK; + } else { + /* other state */ + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) parent sq is not created, do not need free state(0x%x) rport_index(0x%x:0x%x) local_nportid(0x%x) remote_nportid(0x%x:0x%x)", + hba->port_cfg.port_id, + v_parent_queue_info->offload_state, + v_rport_info->rport_index, + v_parent_queue_info->parent_sq_info.rport_index, + v_parent_queue_info->parent_sq_info.local_port_id, + v_rport_info->nport_id, + v_parent_queue_info->parent_sq_info.remote_port_id); + + spin_unlock_irqrestore( + &v_parent_queue_info->parent_queue_state_lock, + flag); + + return RETURN_OK; + } +} + +void hifc_free_parent_queue_mgr(void *v_hba) +{ + struct hifc_hba_s *hba = NULL; + unsigned int index = 0; + struct hifc_parent_queue_mgr_s *parent_queue_mgr; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return); + hba = (struct hifc_hba_s *)v_hba; + + if (!hba->parent_queue_mgr) + return; + parent_queue_mgr = hba->parent_queue_mgr; + + for (index = 0; index < UNF_HIFC_MAXRPORT_NUM; index++) { + if (parent_queue_mgr->parent_queues[index].parent_ctx.virt_parent_ctx) + parent_queue_mgr->parent_queues[index].parent_ctx.virt_parent_ctx = NULL; + } + + if (parent_queue_mgr->parent_sq_buf_list.buflist) { + for (index = 0; + index < parent_queue_mgr->parent_sq_buf_list.buf_num; + index++) { + if (parent_queue_mgr->parent_sq_buf_list.buflist[index].paddr != 0) { + pci_unmap_single( + hba->pci_dev, + parent_queue_mgr->parent_sq_buf_list.buflist[index].paddr, + parent_queue_mgr->parent_sq_buf_list.buf_size, + DMA_BIDIRECTIONAL); + parent_queue_mgr->parent_sq_buf_list.buflist[index].paddr = 0; + } + if (parent_queue_mgr->parent_sq_buf_list.buflist[index].vaddr) { + kfree(parent_queue_mgr->parent_sq_buf_list.buflist[index].vaddr); + parent_queue_mgr->parent_sq_buf_list.buflist[index].vaddr = NULL; + } + } + + kfree(parent_queue_mgr->parent_sq_buf_list.buflist); + parent_queue_mgr->parent_sq_buf_list.buflist = NULL; + } + + vfree(parent_queue_mgr); + hba->parent_queue_mgr = NULL; +} + +void hifc_free_parent_queues(void *v_hba) +{ + unsigned int index = 0; + unsigned long flag = 0; + struct hifc_parent_queue_mgr_s *parent_queue_mgr = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return); + hba = (struct hifc_hba_s *)v_hba; + parent_queue_mgr = hba->parent_queue_mgr; + + for (index = 0; index < UNF_HIFC_MAXRPORT_NUM; index++) { + spin_lock_irqsave(&parent_queue_mgr->parent_queues[index].parent_queue_state_lock, flag); + + if (parent_queue_mgr->parent_queues[index].offload_state == + HIFC_QUEUE_STATE_DESTROYING) { + spin_unlock_irqrestore( + &parent_queue_mgr->parent_queues[index].parent_queue_state_lock, + flag); + + (void)cancel_delayed_work_sync(&parent_queue_mgr->parent_queues[index].parent_sq_info.del_work); + (void)cancel_delayed_work_sync(&parent_queue_mgr->parent_queues[index].parent_sq_info.flush_done_tmo_work); + + /* free parent queue */ + hifc_free_parent_queue_info( + hba, + &parent_queue_mgr->parent_queues[index]); + continue; + } + + spin_unlock_irqrestore(&parent_queue_mgr->parent_queues[index].parent_queue_state_lock, flag); + } +} + +unsigned int hifc_alloc_parent_queue_mgr(void *v_hba) +{ + unsigned int index = 0; + struct hifc_parent_queue_mgr_s *parent_queue_mgr = NULL; + struct hifc_hba_s *hba = NULL; + unsigned int buf_total_size; + unsigned int buf_num; + unsigned int alloc_idx; + unsigned int cur_buf_idx = 0; + unsigned int cur_buf_offset = 0; + unsigned int uiprtctxsize = sizeof(struct hifcoe_parent_context_s); + unsigned int buf_cnt_perhugebuf; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, + return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)v_hba; + + parent_queue_mgr = (struct hifc_parent_queue_mgr_s *)vmalloc( + sizeof(struct hifc_parent_queue_mgr_s)); + if (!parent_queue_mgr) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) cannot allocate queue manager", + hba->port_cfg.port_id); + + return UNF_RETURN_ERROR; + } + + hba->parent_queue_mgr = parent_queue_mgr; + memset(parent_queue_mgr, 0, sizeof(struct hifc_parent_queue_mgr_s)); + + for (index = 0; index < UNF_HIFC_MAXRPORT_NUM; index++) { + spin_lock_init(&parent_queue_mgr->parent_queues[index].parent_queue_state_lock); + parent_queue_mgr->parent_queues[index].offload_state = + HIFC_QUEUE_STATE_FREE; + parent_queue_mgr->parent_queues[index].parent_sq_info.queue_header_original = NULL; + spin_lock_init(&parent_queue_mgr->parent_queues[index].parent_sq_info.parent_sq_enqueue_lock); + parent_queue_mgr->parent_queues[index].parent_cmd_scq_info.cqm_queue_id = INVALID_VALUE32; + parent_queue_mgr->parent_queues[index].parent_sts_scq_info.cqm_queue_id = INVALID_VALUE32; + parent_queue_mgr->parent_queues[index].parent_els_srq_info.cqm_queue_id = INVALID_VALUE32; + parent_queue_mgr->parent_queues[index].parent_sq_info.del_start_jiff = INVALID_VALUE64; + parent_queue_mgr->parent_queues[index].queue_vport_id = + hba->vpid_start; + } + + buf_total_size = uiprtctxsize * UNF_HIFC_MAXRPORT_NUM; + parent_queue_mgr->parent_sq_buf_list.buf_size = + buf_total_size > BUF_LIST_PAGE_SIZE ? BUF_LIST_PAGE_SIZE : + buf_total_size; + buf_cnt_perhugebuf = + parent_queue_mgr->parent_sq_buf_list.buf_size / uiprtctxsize; + buf_num = + UNF_HIFC_MAXRPORT_NUM % buf_cnt_perhugebuf ? + UNF_HIFC_MAXRPORT_NUM / buf_cnt_perhugebuf + 1 : + UNF_HIFC_MAXRPORT_NUM / buf_cnt_perhugebuf; + parent_queue_mgr->parent_sq_buf_list.buflist = (struct buff_list_s *) + kmalloc(buf_num * sizeof(struct buff_list_s), + GFP_KERNEL); + parent_queue_mgr->parent_sq_buf_list.buf_num = buf_num; + + if (!parent_queue_mgr->parent_sq_buf_list.buflist) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Allocate QueuMgr buf list failed out of memory"); + goto free_parent_queue; + } + memset(parent_queue_mgr->parent_sq_buf_list.buflist, 0, + buf_num * sizeof(struct buff_list_s)); + + for (alloc_idx = 0; alloc_idx < buf_num; alloc_idx++) { + parent_queue_mgr->parent_sq_buf_list.buflist[alloc_idx].vaddr = + kmalloc(parent_queue_mgr->parent_sq_buf_list.buf_size, + GFP_KERNEL); + if (!parent_queue_mgr->parent_sq_buf_list.buflist[alloc_idx].vaddr) + goto free_parent_queue; + memset( + parent_queue_mgr->parent_sq_buf_list.buflist[alloc_idx].vaddr, + 0, parent_queue_mgr->parent_sq_buf_list.buf_size); + + parent_queue_mgr->parent_sq_buf_list.buflist[alloc_idx].paddr = + pci_map_single( + hba->pci_dev, + parent_queue_mgr->parent_sq_buf_list.buflist[alloc_idx].vaddr, + parent_queue_mgr->parent_sq_buf_list.buf_size, + DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error( + hba->pci_dev, + parent_queue_mgr->parent_sq_buf_list.buflist[alloc_idx].paddr)) { + parent_queue_mgr->parent_sq_buf_list.buflist[alloc_idx].paddr = 0; + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, "[err]Map QueuMgr address failed"); + + goto free_parent_queue; + } + } + + for (index = 0; index < UNF_HIFC_MAXRPORT_NUM; index++) { + cur_buf_idx = index / buf_cnt_perhugebuf; + cur_buf_offset = uiprtctxsize * (index % buf_cnt_perhugebuf); + + parent_queue_mgr->parent_queues[index].parent_ctx.virt_parent_ctx = parent_queue_mgr->parent_sq_buf_list.buflist[cur_buf_idx].vaddr + cur_buf_offset; + parent_queue_mgr->parent_queues[index].parent_ctx.parent_ctx = parent_queue_mgr->parent_sq_buf_list.buflist[cur_buf_idx].paddr + cur_buf_offset; + } + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[EVENT]Allocate bufnum:%u,buf_total_size:%u", buf_num, + buf_total_size); + + return RETURN_OK; + +free_parent_queue: + hifc_free_parent_queue_mgr(hba); + return UNF_RETURN_ERROR; +} + +static void hifc_release_all_wqe_pages(struct hifc_hba_s *v_hba) +{ + unsigned int index; + struct hifc_sq_wqe_page_s *wpg = NULL; + + UNF_CHECK_VALID(0x2218, UNF_TRUE, v_hba, return); + + wpg = v_hba->sq_wpg_pool.wpg_pool_addr; + + for (index = 0; index < v_hba->sq_wpg_pool.wpg_cnt; index++) { + if (wpg->wpg_addr) { + dma_pool_free(v_hba->sq_wpg_pool.wpg_dma_pool, + wpg->wpg_addr, wpg->wpg_phy_addr); + wpg->wpg_addr = NULL; + wpg->wpg_phy_addr = 0; + } + + wpg++; + } + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port[%u] free total %u wqepages", v_hba->port_index, + index); +} + +unsigned int hifc_alloc_parent_sq_wqe_page_pool(void *v_hba) +{ + unsigned int index = 0; + struct hifc_sq_wqe_page_pool_s *wpg_pool = NULL; + struct hifc_sq_wqe_page_s *wpg = NULL; + struct hifc_hba_s *hba = NULL; + + hba = (struct hifc_hba_s *)v_hba; + wpg_pool = &hba->sq_wpg_pool; + + INIT_LIST_HEAD(&wpg_pool->list_free_wpg_pool); + spin_lock_init(&wpg_pool->wpg_pool_lock); + atomic_set(&wpg_pool->wpg_in_use, 0); + + /* Calculate the number of Wqe Page required in the pool */ + wpg_pool->wpg_size = wqe_page_size; + wpg_pool->wpg_cnt = (HIFC_MIN_WP_NUM * hba->image_count + + ((hba->exit_count * HIFC_SQE_SIZE) / + wpg_pool->wpg_size)); + + wpg_pool->wqe_per_wpg = wpg_pool->wpg_size / HIFC_SQE_SIZE; + + /* Craete DMA POOL */ + wpg_pool->wpg_dma_pool = dma_pool_create("hifc_wpg_pool", + &hba->pci_dev->dev, + wpg_pool->wpg_size, + HIFC_SQE_SIZE, 0); + if (!wpg_pool->wpg_dma_pool) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Cannot allocate SQ WqePage DMA pool"); + + goto out_create_dma_pool_err; + } + + /* Allocate arrays to record all WqePage addresses */ + wpg_pool->wpg_pool_addr = + (struct hifc_sq_wqe_page_s *) + vmalloc(wpg_pool->wpg_cnt * sizeof(struct hifc_sq_wqe_page_s)); + if (!wpg_pool->wpg_pool_addr) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Allocate SQ WqePageAddr array failed"); + + goto out_alloc_wpg_array_err; + } + wpg = wpg_pool->wpg_pool_addr; + memset(wpg, 0, wpg_pool->wpg_cnt * sizeof(struct hifc_sq_wqe_page_s)); + + for (index = 0; index < wpg_pool->wpg_cnt; index++) { + /* Apply for WqePage from DMA POOL */ + wpg->wpg_addr = dma_pool_alloc(wpg_pool->wpg_dma_pool, + GFP_KERNEL, + (u64 *)&wpg->wpg_phy_addr); + if (!wpg->wpg_addr) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, "[err]Dma pool allocated failed"); + + break; + } + + /* To ensure security, clear the memory */ + memset(wpg->wpg_addr, 0, wpg_pool->wpg_size); + + /* Add to the idle linked list */ + INIT_LIST_HEAD(&wpg->entry_wpg); + list_add_tail(&wpg->entry_wpg, + &wpg_pool->list_free_wpg_pool); + + wpg++; + } + /* ALL allocated successfully */ + if (index == wpg_pool->wpg_cnt) + return RETURN_OK; + + hifc_release_all_wqe_pages(hba); + vfree(wpg_pool->wpg_pool_addr); + wpg_pool->wpg_pool_addr = NULL; + +out_alloc_wpg_array_err: + dma_pool_destroy(wpg_pool->wpg_dma_pool); + wpg_pool->wpg_dma_pool = NULL; + +out_create_dma_pool_err: + return UNF_RETURN_ERROR; +} + +void hifc_free_parent_sq_wqe_page_pool(void *v_hba) +{ + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(0x2220, UNF_TRUE, v_hba, return); + hba = (struct hifc_hba_s *)v_hba; + + hifc_release_all_wqe_pages(hba); + hba->sq_wpg_pool.wpg_cnt = 0; + + if (hba->sq_wpg_pool.wpg_pool_addr) { + vfree(hba->sq_wpg_pool.wpg_pool_addr); + hba->sq_wpg_pool.wpg_pool_addr = NULL; + } + + if (hba->sq_wpg_pool.wpg_dma_pool) { + dma_pool_destroy(hba->sq_wpg_pool.wpg_dma_pool); + hba->sq_wpg_pool.wpg_dma_pool = NULL; + } +} + +static inline void hifc_set_sq_wqe_owner_be(void *v_sqe) +{ + unsigned int *sqe_dw = (unsigned int *)v_sqe; + + /* Ensure that the write of WQE is complete */ + mb(); + sqe_dw[HIFC_SQE_SECOND_OBIT_DW_POS] |= HIFC_SQE_OBIT_SET_MASK_BE; + + /* Ensure that the write of Second Obit is complete */ + mb(); + sqe_dw[HIFC_SQE_FIRST_OBIT_DW_POS] |= HIFC_SQE_OBIT_SET_MASK_BE; +} + +static void hifc_free_head_wqe_page(struct hifc_parent_sq_info_s *v_sq) +{ + struct hifc_hba_s *hba = NULL; + struct hifc_sq_wqe_page_s *sq_wpg = NULL; + struct list_head *entry_head_wqe_page = NULL; + unsigned long flag = 0; + + atomic_dec(&v_sq->wqe_page_cnt); + + hba = (struct hifc_hba_s *)v_sq->phba; + sq_wpg = HIFC_GET_SQ_HEAD(v_sq); + memset((void *)sq_wpg->wpg_addr, WQE_MARKER_0, + hba->sq_wpg_pool.wpg_size); + + spin_lock_irqsave(&hba->sq_wpg_pool.wpg_pool_lock, flag); + entry_head_wqe_page = &sq_wpg->entry_wpg; + list_del(entry_head_wqe_page); + list_add_tail(entry_head_wqe_page, + &hba->sq_wpg_pool.list_free_wpg_pool); + + /* WqePage Pool counter */ + atomic_dec(&hba->sq_wpg_pool.wpg_in_use); + + spin_unlock_irqrestore(&hba->sq_wpg_pool.wpg_pool_lock, flag); +} + +static unsigned int hifc_parent_sq_ring_door_bell( + struct hifc_parent_sq_info_s *v_sq) +{ + unsigned int ret = RETURN_OK; + int ravl; + unsigned short pmsn; + unsigned char pmsn_lo; + unsigned char pmsn_hi; + unsigned long long db_val_qw; + struct hifc_hba_s *hba; + struct hifc_parent_sq_db_s door_bell; + + hba = (struct hifc_hba_s *)v_sq->phba; + pmsn = v_sq->last_pmsn; + /* Obtain the low 8 Bit of PMSN */ + pmsn_lo = (unsigned char)(pmsn & 0xFF); + /* Obtain the high 8 Bit of PMSN */ + pmsn_hi = (unsigned char)((pmsn >> 8) & 0xFF); + door_bell.wd0.service_type = HIFC_LSW(v_sq->service_type); + door_bell.wd0.cos = hba->port_index; + door_bell.wd0.c = 0; + door_bell.wd0.arm = HIFC_DB_ARM_DISABLE; + door_bell.wd0.cntx_size = HIFC_CNTX_SIZE_T_256B; + door_bell.wd0.vport = v_sq->vport_id; + door_bell.wd0.xid = v_sq->context_id; + door_bell.wd1.sm_data = v_sq->cache_id; + door_bell.wd1.qid = v_sq->sq_queue_id; + door_bell.wd1.pi_hi = (unsigned int)pmsn_hi; + + if (unlikely(v_sq->cache_id == INVALID_VALUE32)) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) SQ(0x%x) send DB error invalid cachedid", + hba->port_cfg.port_id, v_sq->context_id); + HIFC_HBA_STAT(hba, HIFC_STAT_PARENT_SQ_INVALID_CACHED_ID); + return UNF_RETURN_ERROR; + } + + /* Fill Doorbell Record */ + db_val_qw = v_sq->queue_header->doorbell_record; + db_val_qw &= (unsigned long long)(~(0xFFFFFFFF)); + db_val_qw |= (unsigned long long)((unsigned long long)pmsn << 16 | + pmsn); + v_sq->queue_header->doorbell_record = cpu_to_be64(db_val_qw); + + /* ring doorbell */ + db_val_qw = *(unsigned long long *)&door_bell; + hifc_cpu_to_big32(&db_val_qw, sizeof(db_val_qw)); + + ravl = cqm_ring_hardware_db(hba->hw_dev_handle, SERVICE_T_FC, pmsn_lo, + db_val_qw); + if (unlikely(ravl != CQM_SUCCESS)) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]SQ(0x%x) send DB(0x%llx) failed", + v_sq->context_id, db_val_qw); + + ret = UNF_RETURN_ERROR; + } + + /* Doorbell success counter */ + atomic_inc(&v_sq->sq_dbl_cnt); + + return ret; +} + +unsigned int hifc_parent_sq_enqueue(struct hifc_parent_sq_info_s *v_sq, + struct hifcoe_sqe_s *v_io_sqe) +{ + unsigned char wqe_type = 0; + unsigned int ret = RETURN_OK; + unsigned int addr_wd = INVALID_VALUE32; + unsigned int msn_wd = INVALID_VALUE32; + unsigned short link_wqe_msn = 0; + unsigned long flag = 0; + struct hifc_sq_wqe_page_s *new_wqe_page = NULL; + struct hifc_sq_wqe_page_s *tail_wpg = NULL; + struct hifcoe_sqe_s *sqe_in_wp = NULL; + struct hifc_link_wqe_s *link_wqe = NULL; + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_sq->phba; + + wqe_type = (unsigned char)HIFC_GET_WQE_TYPE(v_io_sqe); + + /* Serial enqueue */ + spin_lock_irqsave(&v_sq->parent_sq_enqueue_lock, flag); + + /* If the SQ is invalid, the wqe is discarded */ + if (unlikely(!atomic_read(&v_sq->sq_valid))) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]SQ is invalid, reject wqe(0x%x)", wqe_type); + + spin_unlock_irqrestore(&v_sq->parent_sq_enqueue_lock, flag); + + return UNF_RETURN_ERROR; + } + + /* + * The heartbeat detection status is 0, which allows control sessions + * enqueuing + */ + if (unlikely((!hba->heart_status) && HIFC_WQE_IS_IO(v_io_sqe))) { + spin_unlock_irqrestore(&v_sq->parent_sq_enqueue_lock, flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[err]Heart status is false"); + + return UNF_RETURN_ERROR; + } + + /* Ensure to be offloaded */ + if (unlikely(atomic_read(&v_sq->sq_cashed) != UNF_TRUE)) { + HIFC_ERR_IO_STAT((struct hifc_hba_s *)v_sq->phba, wqe_type); + HIFC_HBA_STAT((struct hifc_hba_s *)v_sq->phba, + HIFC_STAT_PARENT_SQ_NOT_OFFLOADED); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[err]RPort(0x%x) Sq(0x%x) is not offloaded, reject wqe(0x%x)", + v_sq->rport_index, v_sq->context_id, wqe_type); + + spin_unlock_irqrestore(&v_sq->parent_sq_enqueue_lock, flag); + + return UNF_RETURN_ERROR; + } + + /* + * Whether the SQ is in the flush state. Temporarily allow the control + * sessions to enqueue. + */ + if (unlikely(v_sq->port_in_flush && HIFC_WQE_IS_IO(v_io_sqe))) { + HIFC_ERR_IO_STAT((struct hifc_hba_s *)v_sq->phba, wqe_type); + HIFC_HBA_STAT((struct hifc_hba_s *)v_sq->phba, + HIFC_STAT_PARENT_IO_FLUSHED); + + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]SQ(0x%x) in flush, cmsn(0x%x)-pmsn(0x%x), reject wqe(0x%x)", + v_sq->context_id, + HIFC_GET_QUEUE_CMSN(v_sq), + v_sq->last_pmsn, wqe_type); + + spin_unlock_irqrestore(&v_sq->parent_sq_enqueue_lock, flag); + + return UNF_RETURN_ERROR; + } + + /* + * If the SQ is in the Seesion deletion state and is the WQE of the + * I/O path, the I/O failure is directly returned + */ + if (unlikely(v_sq->sq_in_sess_rst && HIFC_WQE_IS_IO(v_io_sqe))) { + HIFC_ERR_IO_STAT((struct hifc_hba_s *)v_sq->phba, wqe_type); + HIFC_HBA_STAT((struct hifc_hba_s *)v_sq->phba, + HIFC_STAT_PARENT_IO_FLUSHED); + + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]SQ(0x%x) in session reset, reject wqe(0x%x)", + v_sq->context_id, wqe_type); + + spin_unlock_irqrestore(&v_sq->parent_sq_enqueue_lock, flag); + + return UNF_RETURN_ERROR; + } + + /* + * The PMSN position of the SQE that can be put into the SQE is LinkWqe. + * Apply to the CQM for a new page + */ + tail_wpg = HIFC_GET_SQ_TAIL(v_sq); + + if (v_sq->wqe_offset == v_sq->wqe_num_per_buf) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_INFO, + "[info]RPort(0x%x) Sq(0x%x) add wqepage at pmsn(0x%x), WpgCnt(0x%x)", + v_sq->rport_index, v_sq->context_id, v_sq->last_pmsn, + atomic_read(&v_sq->wqe_page_cnt)); + + /* Add a new Wqe Page */ + new_wqe_page = hifc_add_one_wqe_page(v_sq); + if (unlikely(!new_wqe_page)) { + HIFC_ERR_IO_STAT((struct hifc_hba_s *)v_sq->phba, + wqe_type); + spin_unlock_irqrestore(&v_sq->parent_sq_enqueue_lock, + flag); + + return UNF_RETURN_ERROR; + } + + /* + * Set the next address of LinkWqe to the newly applied WqePage + */ + link_wqe = (struct hifc_link_wqe_s *) + hifc_get_wqe_page_entry(tail_wpg, v_sq->wqe_offset); + addr_wd = HIFC_MSD(new_wqe_page->wpg_phy_addr); + link_wqe->next_page_addr_hi = cpu_to_be32(addr_wd); + addr_wd = HIFC_LSD(new_wqe_page->wpg_phy_addr); + link_wqe->next_page_addr_lo = cpu_to_be32(addr_wd); + + /* Fill LinkWqe msn */ + link_wqe_msn = HIFC_MSN_DEC(v_sq->last_pmsn); + msn_wd = be32_to_cpu(link_wqe->val_wd1); + msn_wd |= ((unsigned int)(link_wqe_msn & 0xffff)); + msn_wd |= (((unsigned int)(link_wqe_msn & 0x7fff)) << 16); + link_wqe->val_wd1 = cpu_to_be32(msn_wd); + + /* Set LinkWqe's Owner Bit valid */ + hifc_set_sq_wqe_owner_be(link_wqe); + + /* The newly added WqePage starts from 0 */ + v_sq->wqe_offset = 0; + + /* Point to the tail, Link Wqe */ + tail_wpg = HIFC_GET_SQ_TAIL(v_sq); + + /* Update counter */ + atomic_inc(&v_sq->wqe_page_cnt); + } + + /* Set pmsn of WQE Control Section, and set Owner-Bit invalid */ + hifc_build_wqe_owner_pmsn(&v_io_sqe->ctrl_sl, !v_sq->last_pi_owner, + v_sq->last_pmsn); + + /* Port WQE send counter */ + HIFC_IO_STAT((struct hifc_hba_s *)v_sq->phba, wqe_type); + + /* + * Set Done Bit of WQE, convert Control and Task Section to big endian + */ + hifc_convert_parent_wqe_to_big_endian(v_io_sqe); + + /* + * Find the position of the pointer that the SQE is placed in the + * WQEPAGE + */ + sqe_in_wp = (struct hifcoe_sqe_s *) + hifc_get_wqe_page_entry(tail_wpg, v_sq->wqe_offset); + + /* Copy sqe from the local memory to WqePage */ + memcpy(sqe_in_wp, v_io_sqe, sizeof(struct hifcoe_sqe_s)); + + hifc_set_sq_wqe_owner_be(sqe_in_wp); + + /* ring DoorBell */ + ret = hifc_parent_sq_ring_door_bell(v_sq); + if (unlikely(ret != RETURN_OK)) + HIFC_ERR_IO_STAT((struct hifc_hba_s *)v_sq->phba, wqe_type); + + /* Update the count of the next SQE enqueuing */ + v_sq->wqe_offset += 1; + v_sq->last_pmsn = HIFC_MSN_INC(v_sq->last_pmsn); + + /* sq_wqe_cnt is updated for SQ statistics */ + atomic_inc(&v_sq->sq_wqe_cnt); + atomic_inc(&v_sq->sqe_minus_cqe_cnt); + HIFC_SQ_IO_STAT(v_sq, wqe_type); + spin_unlock_irqrestore(&v_sq->parent_sq_enqueue_lock, flag); + + return ret; +} + +static int hifc_msn_in_wqe_page(unsigned int start_msn, unsigned int end_msn, + unsigned int cur_msn) +{ + int ret = UNF_TRUE; + + if (end_msn >= start_msn) { + if ((cur_msn < start_msn) || (cur_msn > end_msn)) + ret = UNF_FALSE; + else + ret = UNF_TRUE; + + } else { + if ((cur_msn > end_msn) && (cur_msn < start_msn)) + ret = UNF_FALSE; + else + ret = UNF_TRUE; + } + + return ret; +} + +void hifc_free_sq_wqe_page(struct hifc_parent_sq_info_s *v_sq, + unsigned int cur_msn) +{ + unsigned short wpg_start_cmsn = 0; + unsigned short wpg_end_cmsn = 0; + int wqe_page_in_use; + + /* If there is only zero or one Wqe Page, no release is required */ + if (atomic_read(&v_sq->wqe_page_cnt) <= HIFC_MIN_WP_NUM) + return; + + /* + * Check whether the current MSN is within the MSN range covered + * by the WqePage + */ + wpg_start_cmsn = v_sq->head_start_cmsn; + wpg_end_cmsn = v_sq->head_end_cmsn; + wqe_page_in_use = hifc_msn_in_wqe_page(wpg_start_cmsn, + wpg_end_cmsn, cur_msn); + + /* + * If the value of CMSN is within the current Wqe Page, no release is + * required + */ + if (wqe_page_in_use == UNF_TRUE) + return; + /* Free WqePage */ + hifc_free_head_wqe_page(v_sq); + + /* Obtain the start MSN of the next WqePage */ + wpg_start_cmsn = HIFC_MSN_INC(wpg_end_cmsn); + + /* obtain the end MSN of the next WqePage */ + wpg_end_cmsn = HIFC_GET_WP_END_CMSN(wpg_start_cmsn, + v_sq->wqe_num_per_buf); + + /* Set new MSN range */ + v_sq->head_start_cmsn = wpg_start_cmsn; + v_sq->head_end_cmsn = wpg_end_cmsn; +} + +static void hifc_update_sq_wqe_completion_stat( + struct hifc_parent_sq_info_s *v_sq, + union hifcoe_scqe_u *v_scqe) +{ + struct hifcoe_scqe_rcv_els_gs_rsp_s *els_gs_rsp = NULL; + + els_gs_rsp = (struct hifcoe_scqe_rcv_els_gs_rsp_s *)v_scqe; + + /* + * For the ELS/GS RSP intermediate frame and the CQE that is more + * than the ELS_GS_RSP_EXCH_CHECK_FAIL, no statistics are required + */ + if (unlikely(HIFC_GET_SCQE_TYPE(v_scqe) == HIFC_SCQE_ELS_RSP) || + (HIFC_GET_SCQE_TYPE(v_scqe) == HIFC_SCQE_GS_RSP)) { + if (!els_gs_rsp->wd3.end_rsp || !HIFC_SCQE_ERR_TO_CM(v_scqe)) + return; + } + + /* + * When the SQ statistics are updated, the PlogiAcc or PlogiAccSts + * that is implicitly unloaded will enter here, and one more CQE count + * is added + */ + atomic_inc(&v_sq->sq_cqe_cnt); + atomic_dec(&v_sq->sqe_minus_cqe_cnt); + HIFC_SQ_IO_STAT(v_sq, HIFC_GET_SCQE_TYPE(v_scqe)); +} + +unsigned int hifc_reclaim_sq_wqe_page(void *v_hba, union hifcoe_scqe_u *v_scqe) +{ + unsigned int cur_msn = 0; + unsigned int rport_index = INVALID_VALUE32; + struct hifc_hba_s *hba = NULL; + struct hifc_parent_sq_info_s *sq = NULL; + struct hifc_parent_queue_info_s *v_parent_queue_info = NULL; + unsigned long state_lock_flag = 0; + + hba = (struct hifc_hba_s *)v_hba; + rport_index = HIFC_GET_SCQE_CONN_ID(v_scqe); + if (rport_index >= UNF_HIFC_MAXRPORT_NUM) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) do not have rport index: 0x%x", + hba->port_cfg.port_id, rport_index); + + return UNF_RETURN_ERROR; + } + + v_parent_queue_info = + &hba->parent_queue_mgr->parent_queues[rport_index]; + sq = &v_parent_queue_info->parent_sq_info; + /* If there is only zero or one Wqe Page, no release is required */ + if (atomic_read(&sq->wqe_page_cnt) <= HIFC_MIN_WP_NUM) { + hifc_update_sq_wqe_completion_stat(sq, v_scqe); + return RETURN_OK; + } else { + spin_lock_irqsave( + &v_parent_queue_info->parent_queue_state_lock, + state_lock_flag); + + if (v_parent_queue_info->offload_state == + HIFC_QUEUE_STATE_FREE) { + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%x) already released, no need to reclaim sq wqepage", + hba->port_cfg.port_id, rport_index); + spin_unlock_irqrestore( + &v_parent_queue_info->parent_queue_state_lock, + state_lock_flag); + + return RETURN_OK; + } + + cur_msn = HIFC_GET_QUEUE_CMSN(sq); + hifc_free_sq_wqe_page(sq, cur_msn); + hifc_update_sq_wqe_completion_stat(sq, v_scqe); + + spin_unlock_irqrestore( + &v_parent_queue_info->parent_queue_state_lock, + state_lock_flag); + + return RETURN_OK; + } +} + +struct hifc_parent_queue_info_s *hifc_find_parent_queue_info_by_pkg( + void *v_hba, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned int rport_index = 0; + struct hifc_parent_queue_info_s *v_parent_queue_info = NULL; + struct hifc_hba_s *hba = NULL; + + hba = (struct hifc_hba_s *)v_hba; + rport_index = v_pkg->private[PKG_PRIVATE_XCHG_RPORT_INDEX]; + + if (unlikely(rport_index >= UNF_HIFC_MAXRPORT_NUM)) { + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_MAJOR, + "[warn]Port(0x%x) send pkg sid_did(0x%x_0x%x), but uplevel allocate invalid rport index: 0x%x", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did, rport_index); + + return NULL; + } + + /* parent -->> session */ + v_parent_queue_info = + &hba->parent_queue_mgr->parent_queues[rport_index]; + + return v_parent_queue_info; +} + +struct hifc_parent_queue_info_s *hifc_find_parent_queue_info_by_id( + struct hifc_hba_s *v_hba, + unsigned int v_local_id, + unsigned int v_remote_id) +{ + unsigned int index = 0; + unsigned long flag = 0; + struct hifc_parent_queue_mgr_s *parent_queue_mgr = NULL; + struct hifc_parent_queue_info_s *v_parent_queue_info = NULL; + + parent_queue_mgr = v_hba->parent_queue_mgr; + if (!parent_queue_mgr) + return NULL; + + /* rport_number -->> parent_number -->> session_number */ + for (index = 0; index < UNF_HIFC_MAXRPORT_NUM; index++) { + spin_lock_irqsave(&parent_queue_mgr->parent_queues[index].parent_queue_state_lock, flag); + + /* local_id & remote_id & offload */ + if ((v_local_id == parent_queue_mgr->parent_queues[index].parent_sq_info.local_port_id) && + (v_remote_id == parent_queue_mgr->parent_queues[index].parent_sq_info.remote_port_id) && + (parent_queue_mgr->parent_queues[index].offload_state == + HIFC_QUEUE_STATE_OFFLOADED)) { + v_parent_queue_info = + &parent_queue_mgr->parent_queues[index]; + spin_unlock_irqrestore(&parent_queue_mgr->parent_queues[index].parent_queue_state_lock, flag); + + return v_parent_queue_info; + } + + spin_unlock_irqrestore(&parent_queue_mgr->parent_queues[index].parent_queue_state_lock, flag); + } + + return NULL; +} + +struct hifc_parent_queue_info_s *hifc_find_offload_parent_queue( + void *v_hba, + unsigned int v_local_id, + unsigned int v_remote_id, + unsigned int v_rport_index) +{ + unsigned int index = 0; + unsigned long flag = 0; + struct hifc_hba_s *hba = v_hba; + struct hifc_parent_queue_mgr_s *parent_queue_mgr = NULL; + struct hifc_parent_queue_info_s *v_parent_queue_info = NULL; + + parent_queue_mgr = hba->parent_queue_mgr; + if (!parent_queue_mgr) + return NULL; + + for (index = 0; index < UNF_HIFC_MAXRPORT_NUM; index++) { + if (index == v_rport_index) + continue; + + spin_lock_irqsave(&parent_queue_mgr->parent_queues[index].parent_queue_state_lock, flag); + + if ((v_local_id == parent_queue_mgr->parent_queues[index].parent_sq_info.local_port_id) && + (v_remote_id == parent_queue_mgr->parent_queues[index].parent_sq_info.remote_port_id) && + (parent_queue_mgr->parent_queues[index].offload_state != + HIFC_QUEUE_STATE_FREE) && + (parent_queue_mgr->parent_queues[index].offload_state != + HIFC_QUEUE_STATE_INITIALIZED)) { + v_parent_queue_info = + &parent_queue_mgr->parent_queues[index]; + spin_unlock_irqrestore( + &parent_queue_mgr->parent_queues[index].parent_queue_state_lock, flag); + + return v_parent_queue_info; + } + + spin_unlock_irqrestore(&parent_queue_mgr->parent_queues[index].parent_queue_state_lock, flag); + } + + return NULL; +} + +struct hifc_parent_sq_info_s *hifc_find_parent_sq_by_pkg( + void *v_hba, + struct unf_frame_pkg_s *v_pkg) +{ + struct hifc_parent_queue_info_s *v_parent_queue_info = NULL; + struct cqm_qpc_mpt_s *cqm_parent_ctx_obj = NULL; + struct hifc_hba_s *hba = NULL; + + hba = (struct hifc_hba_s *)v_hba; + + v_parent_queue_info = hifc_find_parent_queue_info_by_pkg(hba, v_pkg); + if (unlikely(!v_parent_queue_info)) { + v_parent_queue_info = hifc_find_parent_queue_info_by_id( + hba, + v_pkg->frame_head.csctl_sid & + UNF_NPORTID_MASK, + v_pkg->frame_head.rctl_did & + UNF_NPORTID_MASK); + if (!v_parent_queue_info) { + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[err]Port(0x%x) send pkg sid_did(0x%x_0x%x), get a null parent queue information", + hba->port_cfg.port_id, + v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return NULL; + } + } + + cqm_parent_ctx_obj = v_parent_queue_info->parent_ctx.cqm_parent_ctx_obj; + if (unlikely(!cqm_parent_ctx_obj)) { + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[err]Port(0x%x) send pkg sid_did(0x%x_0x%x) with this rport has not alloc parent sq information", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return NULL; + } + + return &v_parent_queue_info->parent_sq_info; +} + +struct hifc_parent_ctx_s *hifc_get_parnt_ctx_virt_addr_by_pkg( + void *v_hba, + struct unf_frame_pkg_s *v_pkg) +{ + struct hifc_parent_queue_info_s *v_parent_queue_info = NULL; + struct hifc_hba_s *hba = NULL; + + hba = (struct hifc_hba_s *)v_hba; + v_parent_queue_info = hifc_find_parent_queue_info_by_pkg(hba, v_pkg); + if (!v_parent_queue_info) { + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[err]Port(0x%x) send pkg sid_did(0x%x_0x%x), get a null parent queue information", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return NULL; + } + + if ((!v_parent_queue_info->parent_ctx.cqm_parent_ctx_obj) || + (!v_parent_queue_info->parent_ctx.virt_parent_ctx)) { + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[err]Port(0x%x) send pkg sid_did(0x%x_0x%x), but this rport have not allocate a parent context yet", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return NULL; + } + + if (!v_parent_queue_info->parent_ctx.cqm_parent_ctx_obj->vaddr) { + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[err]Port(0x%x) send pkg sid_did(0x%x_0x%x), but cqm have not allocate a parent context yet", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return NULL; + } + + return &v_parent_queue_info->parent_ctx; +} + +unsigned int hifc_check_all_parent_queue_free(struct hifc_hba_s *v_hba) +{ + unsigned int index = 0; + unsigned long flag = 0; + struct hifc_parent_queue_mgr_s *parent_queue_mgr = NULL; + + parent_queue_mgr = v_hba->parent_queue_mgr; + if (!parent_queue_mgr) { + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[err]Port(0x%x) get a null parent queue mgr", + v_hba->port_cfg.port_id); + + return UNF_RETURN_ERROR; + } + + for (index = 0; index < UNF_HIFC_MAXRPORT_NUM; index++) { + spin_lock_irqsave( + &parent_queue_mgr->parent_queues[index].parent_queue_state_lock, + flag); + + if (parent_queue_mgr->parent_queues[index].offload_state != + HIFC_QUEUE_STATE_FREE) { + spin_unlock_irqrestore(&parent_queue_mgr->parent_queues[index].parent_queue_state_lock, flag); + return UNF_RETURN_ERROR; + } + + spin_unlock_irqrestore(&parent_queue_mgr->parent_queues[index].parent_queue_state_lock, flag); + } + + return RETURN_OK; +} + +unsigned int hifc_get_parent_ctx_xid_by_pkg(void *v_hba, + struct unf_frame_pkg_s *v_pkg) +{ + struct hifc_parent_queue_info_s *v_parent_queue_info = NULL; + struct hifc_hba_s *hba = NULL; + + hba = (struct hifc_hba_s *)v_hba; + + v_parent_queue_info = hifc_find_parent_queue_info_by_pkg(hba, v_pkg); + if (!v_parent_queue_info) { + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[err]Port(0x%x) send pkg sid_did(0x%x_0x%x), get a null parent queue information", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return INVALID_VALUE32; + } + + if ((!v_parent_queue_info->parent_ctx.cqm_parent_ctx_obj) || + (!v_parent_queue_info->parent_ctx.cqm_parent_ctx_obj->vaddr)) { + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[err]Port(0x%x) send pkg sid_did(0x%x_0x%x),but this rport have not allocate a parent context yet", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return INVALID_VALUE32; + } + + return v_parent_queue_info->parent_ctx.cqm_parent_ctx_obj->xid; +} + +static void hifc_flush_specific_scq(struct hifc_hba_s *v_hba, + unsigned int index) +{ + /* + * The software interrupt is scheduled and processed during the second + * timeout period + */ + struct hifc_scq_info_s *scq_info = NULL; + unsigned int flush_done_time = 0; + + scq_info = &v_hba->scq_info[index]; + atomic_set(&scq_info->flush_state, HIFC_QUEUE_FLUSH_DOING); + tasklet_schedule(&scq_info->tasklet); + + /* + * Wait for a maximum of 2 seconds. If the SCQ soft interrupt is not + * scheduled within 2 seconds, only timeout is returned + */ + while ((atomic_read(&scq_info->flush_state) != HIFC_QUEUE_FLUSH_DONE) && + (flush_done_time < HIFC_QUEUE_FLUSH_WAIT_TIMEOUT_MS)) { + msleep(HIFC_QUEUE_FLUSH_WAIT_MS); + flush_done_time += HIFC_QUEUE_FLUSH_WAIT_MS; + tasklet_schedule(&scq_info->tasklet); + } + + if (atomic_read(&scq_info->flush_state) != HIFC_QUEUE_FLUSH_DONE) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_WARN, + "[warn]Port(0x%x) special scq(0x%x) flush timeout", + v_hba->port_cfg.port_id, index); + } +} + +static void hifc_flush_cmd_scq(struct hifc_hba_s *v_hba) +{ + unsigned int index = 0; + + for (index = HIFC_CMD_SCQN_START; index < HIFC_SESSION_SCQ_NUM; + index += HIFC_SCQS_PER_SESSION) + hifc_flush_specific_scq(v_hba, index); +} + +static void hifc_flush_sts_scq(struct hifc_hba_s *v_hba) +{ + unsigned int index = 0; + + /* for each STS SCQ */ + for (index = HIFC_STS_SCQN_START; index < HIFC_SESSION_SCQ_NUM; + index += HIFC_SCQS_PER_SESSION) + hifc_flush_specific_scq(v_hba, index); +} + +static void hifc_flush_all_scq(struct hifc_hba_s *v_hba) +{ + hifc_flush_cmd_scq(v_hba); + hifc_flush_sts_scq(v_hba); + /* Flush Default SCQ */ + hifc_flush_specific_scq(v_hba, HIFC_SESSION_SCQ_NUM); +} + +static void hifc_wait_root_rq_empty(struct hifc_hba_s *v_hba) +{ + unsigned int q_index; + struct hifc_root_info_s *root_info; + struct hifc_root_rq_info_s *rq_info; + unsigned int flush_done_time = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return); + + root_info = &v_hba->root_info; + + for (q_index = 0; q_index < root_info->rq_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + q_index; + atomic_set(&rq_info->flush_state, HIFC_QUEUE_FLUSH_DOING); + flush_done_time = 0; + + while ((atomic_read(&rq_info->flush_state) != + HIFC_QUEUE_FLUSH_DONE) && + (flush_done_time < HIFC_QUEUE_FLUSH_WAIT_TIMEOUT_MS)) { + msleep(HIFC_QUEUE_FLUSH_WAIT_MS); + flush_done_time += HIFC_QUEUE_FLUSH_WAIT_MS; + tasklet_schedule(&rq_info->tasklet); + } + + if (atomic_read(&rq_info->flush_state) != + HIFC_QUEUE_FLUSH_DONE) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, + UNF_WARN, + "[warn]Port(0x%x) RootRq(0x%x) flush timeout", + v_hba->port_cfg.port_id, q_index); + } + } +} + +void hifc_wait_root_sq_empty(void *v_hba) +{ +#define HIFC_WAIT_ROOT_SQ_EMPTY_TIMEOUT_MS (50) + + unsigned int q_index = 0; + struct hifc_root_info_s *root_info = NULL; + struct hifc_root_sq_info_s *sq_info = NULL; + struct hifc_hba_s *hba = NULL; + unsigned int start_wait_time = 0; + int time_out = UNF_FALSE; + + hba = (struct hifc_hba_s *)v_hba; + root_info = &hba->root_info; + + /* + * Traverse all root sq (just one) in the HBA and change the status to + * in_flush + */ + for (q_index = 0; q_index < root_info->sq_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info) + + q_index; + if (!sq_info) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]Port(0x%x) root sq(0x%x) info is NULL", + hba->port_cfg.port_id, q_index); + continue; + } + + start_wait_time = 0; + time_out = UNF_TRUE; + + /* Wait 1 second to check whether the Root Sq is empty */ + do { + if (hifc_root_sq_is_empty(sq_info)) { + time_out = UNF_FALSE; + break; + } + msleep(20); + start_wait_time++; + } while (start_wait_time < HIFC_WAIT_ROOT_SQ_EMPTY_TIMEOUT_MS); + + if (time_out) { + HIFC_HBA_STAT(hba, HIFC_STAT_SQ_WAIT_EMPTY); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]Port(0x%x) waiting for root sq(0x%x) empty timeout", + hba->port_cfg.port_id, q_index); + } + } +} + +void hifc_wait_all_queues_empty(struct hifc_hba_s *v_hba) +{ + hifc_wait_root_rq_empty(v_hba); + hifc_flush_all_scq(v_hba); +} + +void hifc_set_root_sq_flush_state(void *v_hba, int in_flush) +{ + unsigned int q_index = 0; + unsigned long flags = 0; + struct hifc_root_info_s *root_info = NULL; + struct hifc_root_sq_info_s *sq_info = NULL; + struct hifc_hba_s *hba = NULL; + + hba = (struct hifc_hba_s *)v_hba; + root_info = &hba->root_info; + + /* + * for each root sq (so far, just one), + * set root sq state with been flushing or flush done + */ + for (q_index = 0; q_index < root_info->sq_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info) + + q_index; + if (!sq_info) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]Port(0x%x) root sq(0x%x) info is NULL", + hba->port_cfg.port_id, q_index); + continue; + } + + spin_lock_irqsave(&sq_info->root_sq_spin_lock, flags); + sq_info->in_flush = in_flush; + spin_unlock_irqrestore(&sq_info->root_sq_spin_lock, flags); + } +} + +void hifc_set_rport_flush_state(void *v_hba, int in_flush) +{ + unsigned int index = 0; + unsigned long flag = 0; + struct hifc_parent_queue_mgr_s *parent_queue_mgr = NULL; + struct hifc_hba_s *hba = NULL; + + hba = (struct hifc_hba_s *)v_hba; + parent_queue_mgr = hba->parent_queue_mgr; + if (!parent_queue_mgr) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) parent queue manager is empty", + hba->port_cfg.port_id); + return; + } + + /* + * for each HBA's R_Port(SQ), + * set state with been flushing or flush done + */ + for (index = 0; index < UNF_HIFC_MAXRPORT_NUM; index++) { + spin_lock_irqsave(&parent_queue_mgr->parent_queues[index].parent_sq_info.parent_sq_enqueue_lock, flag); + if (parent_queue_mgr->parent_queues[index].offload_state != + HIFC_QUEUE_STATE_FREE) { + parent_queue_mgr->parent_queues[index].parent_sq_info.port_in_flush = in_flush; + } + spin_unlock_irqrestore(&parent_queue_mgr->parent_queues[index].parent_sq_info.parent_sq_enqueue_lock, flag); + } +} + +/** + * hifc_clear_fetched_sq_wqe - Inform the chip to clear the WQE that is being + * processed by the chip. + * @v_hba : hba handle + * @Return: 0 - success, negative - failure + */ +unsigned int hifc_clear_fetched_sq_wqe(void *v_hba) +{ + unsigned int ret = UNF_RETURN_ERROR; + union hifc_cmdqe_u cmdqe; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(0x4909, UNF_TRUE, v_hba, return UNF_RETURN_ERROR); + hba = (struct hifc_hba_s *)v_hba; + + /* + * The ROOT SQ cannot control the WQE in the empty queue of the ROOT SQ. + * Therefore, the ROOT SQ does not enqueue the WQE after the hardware + * obtains the. Link down after the wait mode is used. Therefore, + * the WQE of the hardware driver needs to enter the WQE of the queue + * after the Link down of the Link down is reported. + */ + hifc_wait_root_sq_empty(v_hba); + + memset(&cmdqe, 0, sizeof(union hifc_cmdqe_u)); + hifc_build_cmdqe_common(&cmdqe, HIFCOE_TASK_T_BUFFER_CLEAR, 0); + cmdqe.buffer_clear.wd1.rx_id_start = hba->exit_base; + cmdqe.buffer_clear.wd1.rx_id_end = + hba->exit_base + hba->exit_count - 1; + cmdqe.buffer_clear.scqn = hba->default_scqn; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EVENT, UNF_MAJOR, + "[info]Port(0x%x) start clear all fetched wqe in start(0x%x) - end(0x%x) scqn(0x%x) stage(0x%x)", + hba->port_cfg.port_id, + cmdqe.buffer_clear.wd1.rx_id_start, + cmdqe.buffer_clear.wd1.rx_id_end, + cmdqe.buffer_clear.scqn, + hba->q_set_stage); + + /* Send BUFFER_CLEAR command via ROOT CMDQ */ + ret = hifc_root_cmdq_enqueue(hba, &cmdqe, + sizeof(cmdqe.buffer_clear)); + + return ret; +} + +/** + * hifc_clear_pending_sq_wqe -Inform the chip to clear the Pending Sq WQE that + * is being processed by the chip. + * @v_hba: hba handle + * @Return: 0 - success, negative - failure + */ +unsigned int hifc_clear_pending_sq_wqe(void *v_hba) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int rport_index = 0; + unsigned int entry_cnt = 0; + unsigned int entry_cnt_max = 0; + unsigned int next_clr_sq = 0; + unsigned int cmdqe_len = 0; + unsigned long flag = 0; + struct hifc_parent_queue_info_s *parent_qinfo; + struct hifcoe_cmdqe_flush_sq_info_s *entry = NULL; + union hifc_cmdqe_u *cmdqe = NULL; + struct hifc_hba_s *hba = NULL; + + hba = (struct hifc_hba_s *)v_hba; + cmdqe = (union hifc_cmdqe_u *)kmalloc(HIFC_CMDQE_BUFF_LEN_MAX, + GFP_ATOMIC); + if (!cmdqe) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_EVENT, UNF_CRITICAL, + "[err]Port(0x%x) malloc flush sq information buffer cmnd failed, stage(0x%x)", + hba->port_cfg.port_id, hba->q_set_stage); + + return UNF_RETURN_ERROR; + } + + memset(cmdqe, 0, HIFC_CMDQE_BUFF_LEN_MAX); + hifc_build_cmdqe_common(cmdqe, HIFCOE_TASK_T_FLUSH_SQ, 0); + cmdqe->flush_sq.wd0.wqe_type = HIFCOE_TASK_T_FLUSH_SQ; + cmdqe->flush_sq.wd0.sq_qid = HIFC_LSW(hba->default_sq_id); + cmdqe->flush_sq.wd1.scqn = HIFC_LSW(hba->default_scqn); + cmdqe->flush_sq.wd1.port_id = hba->port_index; + + /* + * The CMDQE can contain a maximum of Clear 253 SQ information at a time + */ + entry_cnt = 0; + entry_cnt_max = (HIFC_CMDQE_BUFF_LEN_MAX - sizeof(cmdqe->flush_sq)) / + sizeof(*entry); + entry = cmdqe->flush_sq.sq_info_entry; + next_clr_sq = hba->next_clearing_sq; + + for (rport_index = next_clr_sq; rport_index < UNF_HIFC_MAXRPORT_NUM; + rport_index++) { + parent_qinfo = + &hba->parent_queue_mgr->parent_queues[rport_index]; + + spin_lock_irqsave(&parent_qinfo->parent_queue_state_lock, flag); + if (HIFC_RPORT_FLUSH_NOT_NEEDED(parent_qinfo)) { + spin_unlock_irqrestore( + &parent_qinfo->parent_queue_state_lock, flag); + next_clr_sq++; + continue; + } + entry->xid = parent_qinfo->parent_sq_info.context_id; + entry->cid = parent_qinfo->parent_sq_info.cache_id; + spin_unlock_irqrestore(&parent_qinfo->parent_queue_state_lock, + flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EVENT, UNF_MAJOR, + "[info]Port(0x%x) RPort[0x%x] flush pending SQ Entry: xid=0x%x, cid=0x%x", + hba->port_cfg.port_id, rport_index, + entry->xid, entry->cid); + + entry_cnt++; + entry++; + next_clr_sq++; + + if (entry_cnt >= entry_cnt_max) + break; + } + + if (entry_cnt == 0) { + /* If no SQ needs to be flushed, the Clear Done command is + * directly sent to the uP + */ + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EVENT, UNF_INFO, + "[info]Port(0x%x) non SQ need flush wqe, clear done directly, stage (0x%x)", + hba->port_cfg.port_id, hba->q_set_stage); + + /* Sends the Clear Done command to the chip */ + ret = hifc_clear_sq_wqe_done(hba); + goto free_flush_sq_cmdqe; + } + + hba->next_clearing_sq = next_clr_sq; + cmdqe->flush_sq.wd0.entry_count = entry_cnt; + + if (rport_index == UNF_HIFC_MAXRPORT_NUM) + cmdqe->flush_sq.wd1.last_wqe = 1; + else + cmdqe->flush_sq.wd1.last_wqe = 0; + + /* Clear pending Queue */ + cmdqe_len = (unsigned int)(sizeof(cmdqe->flush_sq) + + entry_cnt * sizeof(*entry)); + ret = hifc_root_cmdq_enqueue(hba, cmdqe, (unsigned short)cmdqe_len); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EVENT, UNF_MAJOR, + "[info]Port(0x%x) clear total 0x%x SQ in this CMDQE(last=%u), stage (0x%x)", + hba->port_cfg.port_id, entry_cnt, + cmdqe->flush_sq.wd1.last_wqe, hba->q_set_stage); + +free_flush_sq_cmdqe: + kfree(cmdqe); + + return ret; +} + +unsigned int hifc_wait_queue_set_flush_done(struct hifc_hba_s *v_hba) +{ + unsigned int flush_done_time = 0; + unsigned int ret = RETURN_OK; + + while ((v_hba->q_set_stage != HIFC_QUEUE_SET_STAGE_FLUSHDONE) && + (flush_done_time < HIFC_QUEUE_FLUSH_WAIT_TIMEOUT_MS)) { + msleep(HIFC_QUEUE_FLUSH_WAIT_MS); + flush_done_time += HIFC_QUEUE_FLUSH_WAIT_MS; + } + + if (v_hba->q_set_stage != HIFC_QUEUE_SET_STAGE_FLUSHDONE) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_WARN, + "[warn]Port(0x%x) queue sets flush timeout with stage(0x%x)", + v_hba->port_cfg.port_id, v_hba->q_set_stage); + + ret = UNF_RETURN_ERROR; + } + + return ret; +} + +static void hifc_disable_all_scq_schedule(struct hifc_hba_s *v_hba) +{ + struct hifc_scq_info_s *scq_info = NULL; + unsigned int index = 0; + + for (index = 0; index < HIFC_TOTAL_SCQ_NUM; index++) { + scq_info = &v_hba->scq_info[index]; + tasklet_disable(&scq_info->tasklet); + } +} + +static void hifc_disable_root_rq_schedule(struct hifc_hba_s *v_hba) +{ + unsigned int index = 0; + struct hifc_root_info_s *root_info = NULL; + struct hifc_root_rq_info_s *rq_info = NULL; + + root_info = &v_hba->root_info; + + for (index = 0; index < root_info->rq_num; index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + index; + tasklet_disable(&rq_info->tasklet); + } +} + +void hifc_disable_queues_dispatch(struct hifc_hba_s *v_hba) +{ + hifc_disable_root_rq_schedule(v_hba); + hifc_disable_all_scq_schedule(v_hba); +} + +static void hifc_enable_root_rq_schedule(struct hifc_hba_s *v_hba) +{ + unsigned int index = 0; + struct hifc_root_info_s *root_info = NULL; + struct hifc_root_rq_info_s *rq_info = NULL; + + root_info = &v_hba->root_info; + + for (index = 0; index < root_info->rq_num; index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + index; + tasklet_enable(&rq_info->tasklet); + } +} + +static void hifc_enable_all_scq_schedule(struct hifc_hba_s *v_hba) +{ + struct hifc_scq_info_s *scq_info = NULL; + unsigned int index = 0; + + for (index = 0; index < HIFC_TOTAL_SCQ_NUM; index++) { + scq_info = &v_hba->scq_info[index]; + tasklet_enable(&scq_info->tasklet); + } +} + +void hifc_enable_queues_dispatch(void *v_hba) +{ + hifc_enable_root_rq_schedule((struct hifc_hba_s *)v_hba); + hifc_enable_all_scq_schedule((struct hifc_hba_s *)v_hba); +} + +void hifc_clear_els_srq(struct hifc_hba_s *v_hba) +{ + unsigned int index = 0; + unsigned long flag = 0; + struct hifc_srq_info_s *srq_info = NULL; + + srq_info = &v_hba->els_srq_info; + + spin_lock_irqsave(&srq_info->srq_spin_lock, flag); + if ((srq_info->enable == UNF_FALSE) || + (srq_info->state == HIFC_CLEAN_DOING)) { + spin_unlock_irqrestore(&srq_info->srq_spin_lock, flag); + + return; + } + srq_info->enable = UNF_FALSE; + srq_info->state = HIFC_CLEAN_DOING; + spin_unlock_irqrestore(&srq_info->srq_spin_lock, flag); + + hifc_send_clear_srq_cmd(v_hba, &v_hba->els_srq_info); + + /* wait for uCode to clear SRQ context, the timer is 30S */ + while ((srq_info->state != HIFC_CLEAN_DONE) && (index < 60)) { + msleep(500); + index++; + } + + if (srq_info->state != HIFC_CLEAN_DONE) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_WARN, + "[warn]HIFC Port(0x%x) clear els srq timeout", + v_hba->port_cfg.port_id); + } +} + +unsigned int hifc_wait_all_parent_queue_free(struct hifc_hba_s *v_hba) +{ + unsigned int index = 0; + unsigned int ret = UNF_RETURN_ERROR; + + do { + ret = hifc_check_all_parent_queue_free(v_hba); + if (ret == RETURN_OK) + break; + + index++; + msleep(20); + } while (index < 1500); + + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[warn]Port(0x%x) wait all parent queue state free timeout", + v_hba->port_cfg.port_id); + } + + return ret; +} + +void hifc_queue_pre_process(void *v_hba, int v_clean) +{ +#define HIFC_WAIT_LINKDOWN_EVENT_MS 500 + + /* From port reset & port remove */ + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba; + + /* 1. Wait for 2s and wait for QUEUE to be FLUSH Done. */ + if (hifc_wait_queue_set_flush_done(hba) != RETURN_OK) { + /* + * During the process of removing the card, if the port is + * disabled and the flush done is not available, the chip is + * powered off or the pcie link is disconnected. In this case, + * you can proceed with the next step. + */ + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]HIFC Port(0x%x) clean queue sets timeout", + hba->port_cfg.port_id); + } + + /* + * 2. Port remove: + * 2.1 free parent queue + * 2.2 clear & destroy ELS/SIRT SRQ + */ + if (v_clean == UNF_TRUE) { + if (hifc_wait_all_parent_queue_free(hba) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[warn]HIFC Port(0x%x) free all parent queue timeout", + hba->port_cfg.port_id); + } + + /* clear & than destroy ELS SRQ */ + hifc_clear_els_srq(hba); + } + + msleep(HIFC_WAIT_LINKDOWN_EVENT_MS); + + /* + * 3. The internal resources of the port chip are flush done. However, + * there may be residual scqe or rq in the queue. The scheduling is + * forcibly refreshed once. + */ + hifc_wait_all_queues_empty(hba); + + /* + * 4. Disable tasklet scheduling for upstream queues on the software + * layer + */ + hifc_disable_queues_dispatch(hba); +} + +unsigned int hifc_push_delay_sqe( + void *v_hba, + struct hifc_parent_queue_info_s *v_offload_parent_queue, + struct hifc_root_sqe_s *v_sqe, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned long flag = 0; + + spin_lock_irqsave(&v_offload_parent_queue->parent_queue_state_lock, + flag); + + if ((v_offload_parent_queue->offload_state != + HIFC_QUEUE_STATE_INITIALIZED) && + (v_offload_parent_queue->offload_state != HIFC_QUEUE_STATE_FREE)) { + memcpy(&v_offload_parent_queue->parent_sq_info.delay_sqe.sqe, + v_sqe, sizeof(struct hifc_root_sqe_s)); + v_offload_parent_queue->parent_sq_info.delay_sqe.start_jiff = + jiffies; + v_offload_parent_queue->parent_sq_info.delay_sqe.time_out = + v_pkg->private[PKG_PRIVATE_XCHG_TIMEER]; + v_offload_parent_queue->parent_sq_info.delay_sqe.valid = + UNF_TRUE; + v_offload_parent_queue->parent_sq_info.delay_sqe.rport_index = + v_pkg->private[PKG_PRIVATE_XCHG_RPORT_INDEX]; + v_offload_parent_queue->parent_sq_info.delay_sqe.sid = + v_pkg->frame_head.csctl_sid & UNF_NPORTID_MASK; + v_offload_parent_queue->parent_sq_info.delay_sqe.did = + v_pkg->frame_head.rctl_did & UNF_NPORTID_MASK; + + spin_unlock_irqrestore( + &v_offload_parent_queue->parent_queue_state_lock, flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) RPort(0x%x) delay send ELS, OXID(0x%x), RXID(0x%x)", + ((struct hifc_hba_s *)v_hba)->port_cfg.port_id, + v_pkg->private[PKG_PRIVATE_XCHG_RPORT_INDEX], + UNF_GET_OXID(v_pkg), UNF_GET_RXID(v_pkg)); + + return RETURN_OK; + } + + spin_unlock_irqrestore(&v_offload_parent_queue->parent_queue_state_lock, + flag); + + return UNF_RETURN_ERROR; +} + +void hifc_pop_delay_sqe(struct hifc_hba_s *v_hba, + struct hifc_delay_sqe_ctrl_info_s *v_sqe_info) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned long flag = 0; + unsigned int delay_rport_index = INVALID_VALUE32; + struct hifc_parent_queue_info_s *parent_queue = NULL; + enum hifc_parent_queue_state_e offload_state = + HIFC_QUEUE_STATE_DESTROYING; + struct hifc_destroy_ctrl_info_s destroy_sqe_info = { 0 }; + + /* + * According to the sequence, the rport index id is reported and then + * the sqe of the new link setup request is delivered. + */ + if (v_sqe_info->valid != UNF_TRUE) + return; + if (jiffies_to_msecs(jiffies - v_sqe_info->start_jiff) >= + v_sqe_info->time_out) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) pop delay root sqe failed, sqe start time 0x%llx, timeout value 0x%x", + v_hba->port_cfg.port_id, + v_sqe_info->start_jiff, + v_sqe_info->time_out); + } + + delay_rport_index = v_sqe_info->rport_index; + if (delay_rport_index >= UNF_HIFC_MAXRPORT_NUM) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) pop delay root sqe failed, rport index(0x%x) is invalid", + v_hba->port_cfg.port_id, + delay_rport_index); + + return; + } + + parent_queue = + &v_hba->parent_queue_mgr->parent_queues[delay_rport_index]; + + /* Before the root sq is delivered, check the status again to + * ensure that the initialization status is not uninstalled. Other + * states are not processed and are discarded directly. + */ + spin_lock_irqsave(&parent_queue->parent_queue_state_lock, flag); + offload_state = parent_queue->offload_state; + + /* Before re-enqueuing the rootsq, check whether the offload status and + * connection information is consistent to prevent the old request from + * being sent after the connection status is changed. + */ + if ((offload_state == HIFC_QUEUE_STATE_INITIALIZED) && + (parent_queue->parent_sq_info.local_port_id == v_sqe_info->sid) && + (parent_queue->parent_sq_info.remote_port_id == v_sqe_info->did) && + HIFC_CHECK_XID_MATCHED( + parent_queue->parent_sq_info.context_id, + v_sqe_info->sqe.task_section.fc_dw4.parent_xid)) { + parent_queue->offload_state = HIFC_QUEUE_STATE_OFFLOADING; + spin_unlock_irqrestore(&parent_queue->parent_queue_state_lock, + flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) pop up delay sqe to root sq, sqe start time 0x%llx, timeout value 0x%x, rport index 0x%x, offload state 0x%x", + v_hba->port_cfg.port_id, + v_sqe_info->start_jiff, + v_sqe_info->time_out, + delay_rport_index, + offload_state); + + ret = hifc_root_sq_enqueue(v_hba, &v_sqe_info->sqe); + if (ret != RETURN_OK) { + spin_lock_irqsave( + &parent_queue->parent_queue_state_lock, flag); + + if (parent_queue->offload_state == + HIFC_QUEUE_STATE_OFFLOADING) + parent_queue->offload_state = offload_state; + + if (parent_queue->parent_sq_info.destroy_sqe.valid == + UNF_TRUE) { + memcpy( + &destroy_sqe_info, + &parent_queue->parent_sq_info.destroy_sqe, + sizeof(struct hifc_destroy_ctrl_info_s)); + + parent_queue->parent_sq_info.destroy_sqe.valid = + UNF_FALSE; + } + + spin_unlock_irqrestore( + &parent_queue->parent_queue_state_lock, flag); + + hifc_pop_destroy_parent_queue_sqe((void *)v_hba, + &destroy_sqe_info); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, + UNF_ERR, + "[err]Port(0x%x) pop up delay sqe to root sq fail, recover offload state 0x%x", + v_hba->port_cfg.port_id, + parent_queue->offload_state); + } + } else { + ret = UNF_RETURN_ERROR; + spin_unlock_irqrestore(&parent_queue->parent_queue_state_lock, + flag); + } + + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port 0x%x pop delay root sqe failed, sqe start time 0x%llx, timeout value 0x%x, rport index 0x%x, offload state 0x%x", + v_hba->port_cfg.port_id, + v_sqe_info->start_jiff, + v_sqe_info->time_out, + delay_rport_index, + offload_state); + } +} + +void hifc_push_destroy_parent_queue_sqe( + void *v_hba, + struct hifc_parent_queue_info_s *v_parent_qinfo, + struct unf_rport_info_s *v_rport_info) +{ + v_parent_qinfo->parent_sq_info.destroy_sqe.valid = UNF_TRUE; + v_parent_qinfo->parent_sq_info.destroy_sqe.rport_index = + v_rport_info->rport_index; + v_parent_qinfo->parent_sq_info.destroy_sqe.time_out = + HIFC_SQ_DEL_STAGE_TIMEOUT_MS; + v_parent_qinfo->parent_sq_info.destroy_sqe.start_jiff = jiffies; + + v_parent_qinfo->parent_sq_info.destroy_sqe.rport_info.nport_id = + v_rport_info->nport_id; + v_parent_qinfo->parent_sq_info.destroy_sqe.rport_info.rport_index = + v_rport_info->rport_index; + v_parent_qinfo->parent_sq_info.destroy_sqe.rport_info.port_name = + v_rport_info->port_name; +} + +void hifc_pop_destroy_parent_queue_sqe( + void *v_hba, + struct hifc_destroy_ctrl_info_s *v_destroy_sqe_info) +{ + struct hifc_hba_s *hba = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned long flag = 0; + unsigned int delay_rport_index = INVALID_VALUE32; + struct hifc_parent_queue_info_s *parent_queue = NULL; + enum hifc_parent_queue_state_e offload_state = + HIFC_QUEUE_STATE_DESTROYING; + + hba = (struct hifc_hba_s *)v_hba; + + if (v_destroy_sqe_info->valid != UNF_TRUE) + return; + + if (jiffies_to_msecs(jiffies - v_destroy_sqe_info->start_jiff) < + v_destroy_sqe_info->time_out) { + delay_rport_index = v_destroy_sqe_info->rport_index; + parent_queue = + &hba->parent_queue_mgr->parent_queues[delay_rport_index]; + + /* Before delivery, check the status again to ensure that the + * initialization status is not uninstalled. Other states are + * not processed and are discarded directly. + */ + spin_lock_irqsave(&parent_queue->parent_queue_state_lock, flag); + + offload_state = parent_queue->offload_state; + if ((offload_state == HIFC_QUEUE_STATE_OFFLOADED) || + (offload_state == HIFC_QUEUE_STATE_INITIALIZED)) { + spin_unlock_irqrestore( + &parent_queue->parent_queue_state_lock, flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port 0x%x pop up delay destroy parent sq, sqe start time 0x%llx, timeout value 0x%x, rport index 0x%x, offload state 0x%x", + hba->port_cfg.port_id, + v_destroy_sqe_info->start_jiff, + v_destroy_sqe_info->time_out, + delay_rport_index, + offload_state); + ret = hifc_free_parent_resource( + hba, + &v_destroy_sqe_info->rport_info); + } else { + ret = UNF_RETURN_ERROR; + spin_unlock_irqrestore( + &parent_queue->parent_queue_state_lock, flag); + } + } + + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port 0x%x pop delay destroy parent sq failed, sqe start time 0x%llx, timeout value 0x%x, rport index 0x%x, rport nport id 0x%x,offload state 0x%x", + hba->port_cfg.port_id, + v_destroy_sqe_info->start_jiff, + v_destroy_sqe_info->time_out, + delay_rport_index, + v_destroy_sqe_info->rport_info.nport_id, + offload_state); + } +} + +void hifc_free_parent_queue_info( + void *v_hba, + struct hifc_parent_queue_info_s *v_parent_queue_info) +{ + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int rport_index = INVALID_VALUE32; + struct hifc_hba_s *hba = NULL; + struct hifc_delay_sqe_ctrl_info_s sqe_info; + + memset(&sqe_info, 0, sizeof(struct hifc_delay_sqe_ctrl_info_s)); + hba = (struct hifc_hba_s *)v_hba; + + spin_lock_irqsave(&v_parent_queue_info->parent_queue_state_lock, flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) begin to free parent sq, rport_index(0x%x)", + hba->port_cfg.port_id, + v_parent_queue_info->parent_sq_info.rport_index); + + if (v_parent_queue_info->offload_state == HIFC_QUEUE_STATE_FREE) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[info]Port(0x%x) duplicate free parent sq, rport_index(0x%x)", + hba->port_cfg.port_id, + v_parent_queue_info->parent_sq_info.rport_index); + + spin_unlock_irqrestore( + &v_parent_queue_info->parent_queue_state_lock, + flag); + return; + } + + if (v_parent_queue_info->parent_sq_info.delay_sqe.valid == UNF_TRUE) { + memcpy(&sqe_info, + &v_parent_queue_info->parent_sq_info.delay_sqe, + sizeof(struct hifc_delay_sqe_ctrl_info_s)); + } + + rport_index = v_parent_queue_info->parent_sq_info.rport_index; + + /* The Parent Contexe and SQ information is released. After + * initialization, the Parent Contexe and SQ information is associated + * with the sq in the queue of the parent + */ + hifc_free_parent_sq(hba, v_parent_queue_info); + + /* The initialization of all queue id is invalid */ + v_parent_queue_info->parent_cmd_scq_info.cqm_queue_id = INVALID_VALUE32; + v_parent_queue_info->parent_sts_scq_info.cqm_queue_id = INVALID_VALUE32; + v_parent_queue_info->parent_els_srq_info.cqm_queue_id = INVALID_VALUE32; + v_parent_queue_info->offload_state = HIFC_QUEUE_STATE_FREE; + + spin_unlock_irqrestore(&v_parent_queue_info->parent_queue_state_lock, + flag); + + UNF_LOWLEVEL_PORT_EVENT(ret, hba->lport, UNF_PORT_RELEASE_RPORT_INDEX, + (void *)&rport_index); + hifc_pop_delay_sqe(hba, &sqe_info); + + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[warn]Port(0x%x) free parent sq with rport_index(0x%x) failed", + hba->port_cfg.port_id, rport_index); + } +} + +void hifc_build_session_rst_wqe(void *v_hba, + struct hifc_parent_sq_info_s *v_sq, + struct hifcoe_sqe_s *v_sqe, + enum hifc_session_reset_mode_e v_mode, + unsigned int scqn) +{ + struct hifc_hba_s *hba = NULL; + + hba = (struct hifc_hba_s *)v_hba; + + /* + * The reset session command does not occupy xid. Therefore, + * 0xffff can be used to align with the microcode. + */ + v_sqe->ts_sl.task_type = HIFC_SQE_SESS_RST; + v_sqe->ts_sl.local_xid = 0xffff; + v_sqe->ts_sl.wd0.conn_id = (unsigned short)(v_sq->rport_index); + v_sqe->ts_sl.wd0.remote_xid = 0xffff; + + v_sqe->ts_sl.cont.reset_session.wd0.reset_exch_start = hba->exit_base; + v_sqe->ts_sl.cont.reset_session.wd0.reset_exch_end = hba->exit_base + + (hba->exit_count - 1); + v_sqe->ts_sl.cont.reset_session.wd1.reset_did = v_sq->remote_port_id; + v_sqe->ts_sl.cont.reset_session.wd1.mode = v_mode; + v_sqe->ts_sl.cont.reset_session.wd2.reset_sid = v_sq->local_port_id; + v_sqe->ts_sl.cont.reset_session.wd3.scqn = scqn; + + hifc_build_common_wqe_ctrls(&v_sqe->ctrl_sl, + sizeof(struct hifcoe_sqe_ts_s) / + HIFC_WQE_SECTION_CHUNK_SIZE); +} + +unsigned int hifc_send_session_rst_cmd( + void *v_hba, + struct hifc_parent_queue_info_s *v_parent_queue_info, + enum hifc_session_reset_mode_e v_mode) +{ + struct hifc_parent_sq_info_s *sq = NULL; + struct hifcoe_sqe_s rst_sess_sqe; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int sts_scqn = 0; + + memset(&rst_sess_sqe, 0, sizeof(struct hifcoe_sqe_s)); + sq = &v_parent_queue_info->parent_sq_info; + sts_scqn = ((struct hifc_hba_s *)v_hba)->default_scqn; + hifc_build_session_rst_wqe(v_hba, sq, &rst_sess_sqe, v_mode, sts_scqn); + + /* Run the sq command to issue the reset session command to the + * microcode, that is, the last command. + */ + ret = hifc_parent_sq_enqueue(sq, &rst_sess_sqe); + + return ret; +} + +void hifc_rcvd_els_from_srq_time_out(struct work_struct *work) +{ + struct hifc_hba_s *hba = NULL; + + hba = container_of(work, struct hifc_hba_s, delay_info.del_work.work); + + /* + * If the frame is not processed, the frame is pushed to the CM layer: + * The frame may have been processed when the root rq receives data. + */ + if (hba->delay_info.srq_delay_flag) { + hifc_rcv_els_cmnd( + hba, &hba->delay_info.pkg, + hba->delay_info.pkg.unf_cmnd_pload_bl.buffer_ptr, + 0, UNF_FALSE); + hba->delay_info.srq_delay_flag = 0; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) srq delay work timeout, send saved plgoi to CM", + hba->port_cfg.port_id); + } +} + +unsigned int hifc_rport_session_rst(void *v_hba, + struct unf_rport_info_s *v_rport_info) +{ + /* NOT USE NOW */ + struct hifc_hba_s *hba = NULL; + struct hifc_parent_queue_info_s *v_parent_queue_info = NULL; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_rport_info, return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)v_hba; + if (!hba->parent_queue_mgr) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port 0x%x cannot find parent queue pool", + hba->port_cfg.port_id); + + return UNF_RETURN_ERROR; + } + + if (v_rport_info->rport_index >= UNF_HIFC_MAXRPORT_NUM) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port 0x%x free parent resource failed, invlaid rport index %u,Rport NPortId 0x%x", + hba->port_cfg.port_id, + v_rport_info->rport_index, + v_rport_info->nport_id); + + return UNF_RETURN_ERROR; + } + + v_parent_queue_info = + &hba->parent_queue_mgr->parent_queues[v_rport_info->rport_index]; + + spin_lock_irqsave(&v_parent_queue_info->parent_queue_state_lock, flag); + + if (v_parent_queue_info->offload_state == HIFC_QUEUE_STATE_OFFLOADED) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port 0x%x parent sq reset session, rport index 0x%x:0x%x,local nportid 0x%x,remote nportid 0x%x:0x%x,ctx id 0x%x, cid 0x%x", + hba->port_cfg.port_id, + v_rport_info->rport_index, + v_parent_queue_info->parent_sq_info.rport_index, + v_parent_queue_info->parent_sq_info.local_port_id, + v_rport_info->nport_id, + v_parent_queue_info->parent_sq_info.remote_port_id, + v_parent_queue_info->parent_sq_info.context_id, + v_parent_queue_info->parent_sq_info.cache_id); + + /* this scenario does not exist */ + (void)queue_delayed_work( + hba->work_queue, + &v_parent_queue_info->parent_sq_info.del_work, + (unsigned long) + msecs_to_jiffies((unsigned int) + HIFC_SQ_DEL_STAGE_TIMEOUT_MS)); + + spin_unlock_irqrestore( + &v_parent_queue_info->parent_queue_state_lock, flag); + + /* + * The current session reset is in clear I/O mode, and the + * connection resources are not deleted + */ + ret = hifc_send_session_rst_cmd(hba, + v_parent_queue_info, + HIFC_SESS_RST_DELETE_IO_ONLY); + } else { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port 0x%x parent sq is not offloaded, no need reset session , rport index 0x%x:0x%x,local nportid 0x%x,remote nportid 0x%x:0x%x", + hba->port_cfg.port_id, + v_rport_info->rport_index, + v_parent_queue_info->parent_sq_info.rport_index, + v_parent_queue_info->parent_sq_info.local_port_id, + v_rport_info->nport_id, + v_parent_queue_info->parent_sq_info.remote_port_id); + + spin_unlock_irqrestore( + &v_parent_queue_info->parent_queue_state_lock, flag); + + ret = RETURN_OK; + } + + return ret; +} + +/** + * hifc_flush_ini_resp_queue - Pay attention to the processing that is being + * processed, but do not pay attention to the subsequent + * processing. This is the main difference between the + * HIFC_FlushScq and the HIFC_FlushScq. + * @v_hba: hba handle + * @Return: 0 - success, negative - failure + */ +unsigned int hifc_flush_ini_resp_queue(void *v_hba) +{ + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, + return UNF_RETURN_ERROR); + hba = (struct hifc_hba_s *)v_hba; + + /* + * Although this function is called, the original HIFC_FlushScq is based + * on the scenario where the port is disabled. That is, the function is + * executed and the SCQ is empty. However, because the port is not + * disabled in the current scenario, it can only indicate that a batch + * of processing is completed. + */ + hifc_flush_sts_scq(hba); + + return RETURN_OK; +} + +/* + * Function Name : hifc_handle_aeq_queue_error + * Function Description: Process the queue error event sent by the chip + * through AEQ. + * Input Parameters : *v_hba, + * : *v_aeq_msg + * Output Parameters : N/A + * Return Type : void + */ +static void hifc_handle_aeq_queue_error(struct hifc_hba_s *v_hba, + struct hifcoe_aqe_data_s *v_aeq_msg) +{ + unsigned int sts_scqn_local = 0; + unsigned int full_ci = INVALID_VALUE32; + unsigned int full_ci_owner = INVALID_VALUE32; + struct hifc_scq_info_s *scq_info = NULL; + struct hifcoe_aqe_data_s *aeq_msg = NULL; + + aeq_msg = v_aeq_msg; + + sts_scqn_local = HIFC_RPORTID_TO_STS_SCQN(aeq_msg->wd0.conn_id); + scq_info = &v_hba->scq_info[sts_scqn_local]; + full_ci = scq_info->ci; + full_ci_owner = scq_info->ci_owner; + + /* + * Currently, Flush is forcibly set to StsScq. No matter whether scq is + * processed, AEQE is returned + */ + tasklet_schedule(&scq_info->tasklet); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) RPort(0x%x) LocalScqn(0x%x) CqmScqn(0x%x) is full, force flush CI from (%d|0x%x) to (%d|0x%x)", + v_hba->port_cfg.port_id, aeq_msg->wd0.conn_id, + sts_scqn_local, scq_info->scqn, + full_ci_owner, full_ci, scq_info->ci_owner, scq_info->ci); +} + +void hifc_process_aeqe(void *v_srv_handle, + unsigned char event_type, + u64 event_val) +{ + unsigned int ret = RETURN_OK; + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_srv_handle; + struct hifcoe_aqe_data_s aeq_msg; + unsigned long long aeq_info = 0; + unsigned char event_code = INVALID_VALUE8; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, hba, return); + + aeq_info = cpu_to_be64(event_val); + memcpy(&aeq_msg, (struct hifcoe_aqe_data_s *)&aeq_info, + sizeof(struct hifcoe_aqe_data_s)); + hifc_big_to_cpu32(&aeq_msg, sizeof(struct hifcoe_aqe_data_s)); + event_code = (unsigned char)aeq_msg.wd0.evt_code; + + switch (event_type) { + case FC_AEQ_EVENT_QUEUE_ERROR: + hifc_handle_aeq_queue_error(hba, &aeq_msg); + break; + + case FC_AEQ_EVENT_WQE_FATAL_ERROR: + UNF_LOWLEVEL_PORT_EVENT(ret, + hba->lport, + UNF_PORT_ABNORMAL_RESET, + NULL); + break; + + case FC_AEQ_EVENT_CTX_FATAL_ERROR: + break; + + case FC_AEQ_EVENT_OFFLOAD_ERROR: + ret = hifc_handle_aeq_offload_err(hba, &aeq_msg); + break; + + default: + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[warn]Port(0x%x) receive a unsupported AEQ EventType(0x%x) EventVal(0x%llx).", + hba->port_cfg.port_id, event_type, + (unsigned long long)event_val); + return; + } + + if (event_code < FC_AEQ_EVT_ERR_CODE_BUTT) + HIFC_AEQ_ERR_TYPE_STAT(hba, aeq_msg.wd0.evt_code); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "[info]Port(0x%x) receive AEQ EventType(0x%x) EventVal(0x%llx) EvtCode(0x%x) Conn_id(0x%x) Xid(0x%x) %s", + hba->port_cfg.port_id, event_type, + (unsigned long long)event_val, event_code, + aeq_msg.wd0.conn_id, aeq_msg.wd1.xid, + (ret == UNF_RETURN_ERROR) ? "ERROR" : "OK"); +} + diff --git a/drivers/scsi/huawei/hifc/hifc_queue.h b/drivers/scsi/huawei/hifc/hifc_queue.h new file mode 100644 index 0000000000000000000000000000000000000000..cc3e753be7a6c036fb4f0cebcbebbb0cf9fd83f5 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_queue.h @@ -0,0 +1,1363 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef __HIFC_QUEUE_H__ +#define __HIFC_QUEUE_H__ + +#include "hifc_wqe.h" +#include "hifc_hw.h" +#include "hifc_hwif.h" +#include "hifc_cqm_main.h" + +#define WQE_MARKER_0 0x0 +#define WQE_MARKER_6B 0x6b + +#define HIFC_SQE_SIZE 128 +#define HIFC_MIN_WP_NUM 2 + +/* Counter */ +#define HIFC_STAT_SESSION_IO + +/*************** PARENT SQ&Context defines *******************************/ +#define HIFC_MAX_MSN (65535) +#define HIFC_MSN_MASK (0xffff000000000000LL) +#define HIFC_SQE_TS_SIZE (72) +#define HIFC_SQE_FIRST_OBIT_DW_POS (0) +#define HIFC_SQE_SECOND_OBIT_DW_POS (30) +#define HIFC_SQE_OBIT_SET_MASK_BE (0x80) +#define HIFC_SQE_OBIT_CLEAR_MASK_BE (0xffffff7f) +#define HIFC_MAX_SQ_TASK_TYPE_CNT (128) + +/* + * Note: if the location of flush done bit changes, the definition must be + * modifyed again + */ +#define HIFC_CTXT_FLUSH_DONE_DW_POS (58) +#define HIFC_CTXT_FLUSH_DONE_MASK_BE (0x4000) + +#define HIFC_GET_SQ_HEAD(v_sq) \ + list_entry((&(v_sq)->list_linked_list_sq)->next,\ + struct hifc_sq_wqe_page_s, entry_wpg) +#define HIFC_GET_SQ_TAIL(v_sq) \ + list_entry((&(v_sq)->list_linked_list_sq)->prev, \ + struct hifc_sq_wqe_page_s, entry_wpg) +#ifdef HIFC_STAT_SESSION_IO +#define HIFC_SQ_IO_STAT(v_sq, io_type) \ + (atomic_inc(&(v_sq)->io_stat[io_type])) +#define HIFC_SQ_IO_STAT_READ(v_sq, io_type) \ + (atomic_read(&(v_sq)->io_stat[io_type])) +#endif +#define HIFC_GET_QUEUE_CMSN(v_sq)\ + ((unsigned int)(be64_to_cpu(((((v_sq)->queue_header)->ci_record) \ + & HIFC_MSN_MASK)))) +#define HIFC_GET_WP_END_CMSN(head_start_cmsn, wqe_num_per_buf) \ + (unsigned short)(((unsigned int)(head_start_cmsn) +\ + (unsigned int)(wqe_num_per_buf) - 1) % (HIFC_MAX_MSN + 1)) +#define HIFC_MSN_INC(msn) (((HIFC_MAX_MSN) == (msn)) ? 0 : ((msn) + 1)) +#define HIFC_MSN_DEC(msn) ((0 == (msn)) ? (HIFC_MAX_MSN) : ((msn) - 1)) +#define HIFC_QUEUE_MSN_OFFSET(start_cmsn, end_cmsn) \ + (unsigned int)((((unsigned int)(end_cmsn) + (HIFC_MAX_MSN)) - \ + (unsigned int)(start_cmsn)) % (HIFC_MAX_MSN + 1)) + +/******************* ROOT SQ&RQ defines ***********************************/ +#define HIFC_ROOT_Q_CTX_SIZE (48) +#define HIFC_ROOT_Q_CTX_CI_WQE_HI_SHIFT (44) +#define HIFC_ROOT_Q_CTX_CI_WQE_LOW_SHIFT (12) +#define HIFC_ROOT_Q_CTX_CLA_HI_SHIFT (41) +#define HIFC_ROOT_Q_CTX_CLA_LOW_SHIFT (9) +#define HIFC_ROOT_TSO_LRO_SPACE (0) +#define HIFC_ROOT_CTX_WQE_PREFETCH_MAX (3) +#define HIFC_ROOT_CTX_WQE_PREFETCH_MIN (1) +#define HIFC_ROOT_CTX_WQE_PRERETCH_THRESHOLD (2) +#define HIFC_CI_WQE_PAGE_HIGH_ADDR(x) \ + (unsigned int)(((x) >> HIFC_ROOT_Q_CTX_CI_WQE_HI_SHIFT) & 0xffffffff) +#define HIFC_CI_WQE_PAGE_LOW_ADDR(x) \ + (unsigned int)(((x) >> HIFC_ROOT_Q_CTX_CI_WQE_LOW_SHIFT) & 0xffffffff) +#define HIFC_CLA_HIGH_ADDR(x)\ + (unsigned int)(((x) >> HIFC_ROOT_Q_CTX_CLA_HI_SHIFT) & 0xffffffff) +#define HIFC_CLA_LOW_ADDR(x) \ + (unsigned int)(((x) >> HIFC_ROOT_Q_CTX_CLA_LOW_SHIFT) & 0xffffffff) + +/*********************** ROOT SQ defines ***********************************/ +#define HIFC_ROOT_SQ_NUM (1) +#define HIFC_ROOT_SQ_DEPTH (2048) +#define HIFC_ROOT_SQ_WQEBB (64) +#define HIFC_ROOT_SQ_CI_TABLE_STEP_BYTE (4) +#define HIFC_ROOT_SQ_LOOP_OWNER (1) +#define HIFC_ROOT_SQ_CI_ATTRIBUTE_ADDRESS_SHIFT (2) +#define HIFC_DOORBELL_SQ_TYPE (1) +#define HIFC_DOORBELL_SQ_PI_HIGH_BITS_SHIFT (8) +#define HIFC_DOORBELL_SQ_PI_LOW_BITS_MASK (0xFF) +#define HIFC_INT_NUM_PER_QUEUE (1) +#define HIFC_INT_ENABLE (1) +#define HIFC_ROOT_CFG_SQ_NUM_MAX (42) +#define HIFC_CMDQ_QUEUE_TYPE_SQ (0) +#define HIFC_GET_ROOT_SQ_CI_ADDR(addr, index) \ + ((addr) + (unsigned int)((index) * HIFC_ROOT_SQ_CI_TABLE_STEP_BYTE)) +#define HIFC_ROOT_SQ_CTX_OFFSET(q_num, q_id) \ + ((HIFC_ROOT_TSO_LRO_SPACE * 2 * (q_num) +\ + HIFC_ROOT_Q_CTX_SIZE * (q_id)) / 16) + +/********************** ROOT RQ defines ***********************************/ +#define HIFC_ROOT_RQ_NUM (1) +#define HIFC_ROOT_RQ_DEPTH (1024) +#define HIFC_ROOT_RQ_WQEBB (32) +#define HIFC_ROOT_RQ_PI_TABLE_STEP_BYTE (4) +#define HIFC_ROOT_RQ_LOOP_OWNER (1) +#define HIFC_ROOT_RQ_RECV_BUFF_SIZE (1024) +#define HIFC_ROOT_Q_INT_ID_MAX (1024) /* 10bit */ +#define HIFC_ROOT_CFG_RQ_NUM_MAX (42) +#define HIFC_CMDQ_QUEUE_TYPE_RQ (1) +#define HIFC_RQE_MAX_PROCESS_NUM_PER_INTR (128) +#define HIFC_ROOT_RQ_CTX_OFFSET(q_num, q_id)\ + (((HIFC_ROOT_TSO_LRO_SPACE * 2 + HIFC_ROOT_Q_CTX_SIZE) * (q_num) +\ + HIFC_ROOT_Q_CTX_SIZE * (q_id)) / 16) + +/************************** SCQ defines ***********************************/ +#define HIFC_SCQ_INT_ID_MAX (2048) /* 11BIT */ +#define HIFC_SCQE_SIZE (64) +#define HIFC_CQE_GPA_SHIFT (4) +#define HIFC_NEXT_CQE_GPA_SHIFT (12) +/* 1-Update Ci by Tile, 0-Update Ci by Hardware */ +#define HIFC_PMSN_CI_TYPE_FROM_HOST (0) +#define HIFC_PMSN_CI_TYPE_FROM_UCODE (1) +#define HIFC_ARMQ_IDLE (0) +#define HIFC_CQ_INT_MODE (2) +#define HIFC_CQ_HEADER_OWNER_SHIFT (15) + +/* + * SCQC_CQ_DEPTH: 0-256, 1-512, 2-1k, 3-2k, 4-4k, 5-8k, 6-16k, 7-32k. + * include LinkWqe + */ +#define HIFC_CMD_SCQ_DEPTH (4096) +#define HIFC_STS_SCQ_DEPTH (8192) + +#define HIFC_CMD_SCQC_CQ_DEPTH (hifc_log2n(HIFC_CMD_SCQ_DEPTH >> 8)) +#define HIFC_STS_SCQC_CQ_DEPTH (hifc_log2n(HIFC_STS_SCQ_DEPTH >> 8)) +#define HIFC_STS_SCQ_CI_TYPE HIFC_PMSN_CI_TYPE_FROM_HOST + +#define HIFC_CMD_SCQ_CI_TYPE HIFC_PMSN_CI_TYPE_FROM_UCODE +#define HIFC_SCQ_INTR_LOW_LATENCY_MODE 0 +#define HIFC_SCQ_INTR_POLLING_MODE 1 + +#define HIFC_CQE_MAX_PROCESS_NUM_PER_INTR (128) +#define HIFC_SESSION_SCQ_NUM (16) + +/* + * SCQ[0, 2, 4 ...]CMD SCQ,SCQ[1, 3, 5 ...]STS SCQ,SCQ[HIFC_TOTAL_SCQ_NUM-1] + * Defaul SCQ + */ +#define HIFC_CMD_SCQN_START (0) +#define HIFC_STS_SCQN_START (1) +#define HIFC_SCQS_PER_SESSION (2) + +#define HIFC_TOTAL_SCQ_NUM (HIFC_SESSION_SCQ_NUM + 1) + +#define HIFC_SCQ_IS_STS(scq_index) \ + (((scq_index) % HIFC_SCQS_PER_SESSION) || \ + ((scq_index) == HIFC_SESSION_SCQ_NUM)) +#define HIFC_SCQ_IS_CMD(scq_index)\ + (!HIFC_SCQ_IS_STS(scq_index)) +#define HIFC_RPORTID_TO_CMD_SCQN(rport_index) \ + (((rport_index) * HIFC_SCQS_PER_SESSION) % HIFC_SESSION_SCQ_NUM) +#define HIFC_RPORTID_TO_STS_SCQN(rport_index) \ + ((((rport_index) * HIFC_SCQS_PER_SESSION) + 1) % HIFC_SESSION_SCQ_NUM) + +/************************** SRQ defines ***********************************/ +#define HIFC_SRQE_SIZE (32) +#define HIFC_SRQ_INIT_LOOP_O (1) +#define HIFC_QUEUE_RING (1) +#define HIFC_SRQ_ELS_DATA_NUM (1) +#define HIFC_SRQ_ELS_SGE_LEN (256) +#define HIFC_SRQ_ELS_DATA_DEPTH (4096) + +#define HIFC_IRQ_NAME_MAX (30) + +/* Support 2048 sessions(xid) */ +#define HIFC_CQM_XID_MASK (0x7ff) + +#define HIFC_QUEUE_FLUSH_DOING (0) +#define HIFC_QUEUE_FLUSH_DONE (1) +#define HIFC_QUEUE_FLUSH_WAIT_TIMEOUT_MS (2000) +#define HIFC_QUEUE_FLUSH_WAIT_MS (2) + +/************************* RPort defines ***********************************/ +#define HIFC_EXIT_STRIDE (4096) +#define UNF_HIFC_MAXRPORT_NUM (2048) +#define HIFC_RPORT_OFFLOADED(prnt_qinfo) \ + ((prnt_qinfo)->offload_state == HIFC_QUEUE_STATE_OFFLOADED) +#define HIFC_RPORT_NOT_OFFLOADED(prnt_qinfo) \ + ((prnt_qinfo)->offload_state != HIFC_QUEUE_STATE_OFFLOADED) +#define HIFC_RPORT_FLUSH_NOT_NEEDED(prnt_qinfo)\ + (((prnt_qinfo)->offload_state == HIFC_QUEUE_STATE_INITIALIZED) || \ + ((prnt_qinfo)->offload_state == HIFC_QUEUE_STATE_OFFLOADING) || \ + ((prnt_qinfo)->offload_state == HIFC_QUEUE_STATE_FREE)) +#define HIFC_CHECK_XID_MATCHED(sq_xid, sqe_xid) \ + (((sq_xid) & HIFC_CQM_XID_MASK) == ((sqe_xid) & HIFC_CQM_XID_MASK)) +#define HIFC_PORT_MODE_TGT (0) /* Port mode */ +#define HIFC_PORT_MODE_INI (1) +#define HIFC_PORT_MODE_BOTH (2) + +/********** Hardware Reserved Queue Info defines ***************************/ +#define HIFC_HRQI_SEQ_ID_MAX (255) +#define HIFC_HRQI_SEQ_INDEX_MAX (64) +#define HIFC_HRQI_SEQ_INDEX_SHIFT (6) +#define HIFC_HRQI_SEQ_SEPCIAL_ID (3) +#define HIFC_HRQI_SEQ_INVALID_ID (~0LL) + +/************************* OQID defines ***********************************/ + +#define HIFC_OQID_HOST_XID_OFFSET (5) +#define HIFC_OQID_HOST_RW_OFFSET (4) +#define HIFC_OQID_HOST_ST_OFFSET (2) +#define HIFC_OQID_HOST_OQID_LEN (11) +#define HIFC_OQID_HOST_READ_FROM_HOST (0UL) +#define HIFC_OQID_HOST_WRITE_TO_HOST (1) +#define HIFC_CPI_CHNL_ID_XOE_READ (1UL) +#define HIFC_CPI_CHNL_ID_XOE_WRITE (3UL) +#define HIFC_SERVICE_TYPE_FC_FCOE (2) +/********************* sdk config defines ***********************************/ +#define HIFC_CNTX_SIZE_256B 256 +#define HIFC_QUEUE_LINK_STYLE 0 +#define HIFC_PACKET_COS_FC_CMD 0 +#define HIFC_PACKET_COS_FC_DATA 1 +#define HIFC_DB_ARM_DISABLE 0 +#define HIFC_DMA_ATTR_OFST 0 +#define HIFC_PCIE_TEMPLATE 0 +#define HIFC_PCIE_RELAXED_ORDERING 1 +#define HIFC_OWNER_DRIVER_PRODUCT 1 +#define HIFC_CMDQE_BUFF_LEN_MAX 2040 +#define HIFC_CNTX_SIZE_T_256B 0 + +#define HIFC_OQID_IO_HOST_SET(xid, rw, cidx, vf_id, m, oqid) \ + { \ + oqid = (unsigned short)(((unsigned short)\ + ((xid) << HIFC_OQID_HOST_XID_OFFSET)) \ + | ((unsigned short)((rw) << HIFC_OQID_HOST_RW_OFFSET)) \ + | ((unsigned short)(HIFC_SERVICE_TYPE_FC_FCOE << \ + HIFC_OQID_HOST_ST_OFFSET)) | (cidx)); \ + oqid = (unsigned short)\ + (((unsigned short)(oqid & (0x7ff >> (m))))\ + | ((unsigned short)((vf_id) << \ + (HIFC_OQID_HOST_OQID_LEN - (m))))); \ + } + +#define HIFC_OQID_RD(xid, vf_id, m, oq_id) \ + HIFC_OQID_IO_HOST_SET(xid, HIFC_OQID_HOST_READ_FROM_HOST,\ + HIFC_CPI_CHNL_ID_XOE_READ, vf_id, m, oq_id) + +#define HIFC_OQID_WR(xid, vf_id, m, oq_id) \ + HIFC_OQID_IO_HOST_SET(xid, HIFC_OQID_HOST_WRITE_TO_HOST,\ + HIFC_CPI_CHNL_ID_XOE_WRITE, vf_id, m, oq_id) + +enum hifc_session_reset_mode_e { + HIFC_SESS_RST_DELETE_IO_ONLY = 1, + HIFC_SESS_RST_DELETE_CONN_ONLY = 2, + HIFC_SESS_RST_DELETE_IO_CONN_BOTH = 3, + HIFC_SESS_RST_MODE_BUTT +}; + +/* linkwqe */ +#define CQM_LINK_WQE_CTRLSL_VALUE 2 +#define CQM_LINK_WQE_LP_VALID 1 +#define CQM_LINK_WQE_LP_INVALID 0 + +/****************** ROOT SQ&RQ&CTX defines ****************************/ +struct nic_tx_doorbell { + union { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 srv_type : 5; + u32 cos : 3; + u32 c_flag : 1; + u32 rsvd0 : 5; + u32 queue_id : 10; + u32 pi_high : 8; +#else + u32 pi_high : 8; + u32 queue_id : 10; + u32 rsvd0 : 5; + u32 c_flag : 1; + u32 cos : 3; + u32 srv_type : 5; +#endif + } bs0; + u32 dw0; + }; + + u32 rsvd1; +}; + +struct hifc_qp_ctxt_header { + u16 num_queues; + u16 queue_type; + u32 addr_offset; +}; + +/* + * nic_sq_ctx_1822 table define + */ +struct hifc_sq_ctxt { + union { + struct sq_ctx_dw0 { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* whether generate CEQ */ + u32 ceq_arm : 1; + u32 rsvd1 : 7; + /* whether enable CEQ */ + u32 ceq_en : 1; + u32 global_sq_id : 10; + u32 ceq_num : 5; + u32 pkt_template : 6; + u32 rsvd2 : 2; +#else + u32 rsvd2 : 2; + u32 pkt_template : 6; + u32 ceq_num : 5; + u32 global_sq_id : 10; + /* whether enable CEQ */ + u32 ceq_en : 1; + u32 rsvd1 : 7; + /* whether generate CEQ */ + u32 ceq_arm : 1; +#endif + } sq_ctx_dw0; + u32 dw0; + }; + + union { + struct sq_ctx_dw1 { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 wqe_template : 6; + u32 rsvd3 : 2; + u32 owner : 1; + /* customer index */ + u32 ci : 12; + u32 tso_doing : 1; + /* indicate how many sge left in current tso wqe */ + u32 sge_num_left : 6; + /* number of sge processing */ + u32 processing_sge : 3; + u32 rsvd4 : 1; +#else + u32 rsvd4 : 1; + /* number of sge processing */ + u32 processing_sge : 3; + /* indicate how many sge left in current tso wqe */ + u32 sge_num_left : 6; + u32 tso_doing : 1; + /* customer index */ + u32 ci : 12; + u32 owner : 1; + u32 rsvd3 : 2; + u32 wqe_template : 6; +#endif + } sq_ctx_dw1; + u32 dw1; + }; + + union { + struct sq_ctx_dw2 { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd5 : 12; + /* the wqe page address that current ci point to */ + u32 ci_wqe_page_addr_hi : 20; +#else + /* the wqe page address that current ci point to */ + u32 ci_wqe_page_addr_hi : 20; + u32 rsvd5 : 12; +#endif + } sq_ctx_dw2; + u32 dw2; + }; + + u32 ci_wqe_page_addr_lo; + + union { + struct sq_ctx_dw4 { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* + * The minimum prefetch WQE cacheline number of this SQ + */ + u32 prefetch_min : 7; + /* + * The maximum prefetch WQE cacheline number of this SQ + */ + u32 prefetch_max : 11; + u32 prefetch_cache_threshold : 14; +#else + u32 prefetch_cache_threshold : 14; + /* + * The maximum prefetch WQE cacheline number of this SQ + */ + u32 prefetch_max : 11; + /* + * The minimum prefetch WQE cacheline number of this SQ + */ + u32 prefetch_min : 7; +#endif + } sq_ctx_dw4; + u32 dw4; + }; + + union { + struct sq_ctx_dw5 { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd6 : 31; + u32 prefetch_owner : 1; +#else + u32 prefetch_owner : 1; + u32 rsvd6 : 31; +#endif + } sq_ctx_dw5; + u32 dw5; + }; + + union { + struct sq_ctx_dw6 { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 prefetch_ci : 12; + u32 prefetch_ci_wqe_addr_hi : 20; +#else + u32 prefetch_ci_wqe_addr_hi : 20; + u32 prefetch_ci : 12; +#endif + } sq_ctx_dw6; + u32 dw6; + }; + + u32 prefetch_ci_wqe_addr_lo; + + union { + struct sq_ctx_dw8 { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* processed length of current seg */ + u32 processed_seg_len : 16; + u32 rsvd7 : 16; +#else + u32 rsvd7 : 16; + /* processed length of current seg */ + u32 processed_seg_len : 16; +#endif + } sq_ctx_dw8; + u32 dw8; + }; + + u32 qsf; + + union { + struct sq_ctx_dw10 { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd8 : 9; + /* CI CLA table address */ + u32 cla_addr_hi : 23; +#else + /* CI CLA table address */ + u32 cla_addr_hi : 23; + u32 rsvd8 : 9; +#endif + } sq_ctx_dw10; + u32 dw10; + }; + + u32 cla_addr_lo; +}; + +struct hifc_sq_ctxt_block { + struct hifc_qp_ctxt_header cmdq_hdr; + struct hifc_sq_ctxt sq_ctx[HIFC_ROOT_CFG_SQ_NUM_MAX]; +}; + +/* + * nic_rq_ctx_1822 table define + */ +struct hifc_rq_ctxt { + union { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 max_count : 10; + u32 cqe_tmpl : 6; + u32 pkt_tmpl : 6; + u32 wqe_tmpl : 6; + u32 psge_valid : 1; + u32 rsvd1 : 1; + u32 owner : 1; + u32 ceq_en : 1; +#else + u32 ceq_en : 1; + u32 owner : 1; + u32 rsvd1 : 1; + u32 psge_valid : 1; + u32 wqe_tmpl : 6; + u32 pkt_tmpl : 6; + u32 cqe_tmpl : 6; + u32 max_count : 10; +#endif + } bs; + u32 dw0; + }; + + union { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* + * Interrupt number that L2NIC engine tell SW if + * generate int instead of CEQ + */ + u32 int_num : 10; + u32 ceq_count : 10; + /* product index */ + u32 pi : 12; +#else + /* product index */ + u32 pi : 12; + u32 ceq_count : 10; + /* + * Interrupt number that L2NIC engine tell SW if + * generate int instead of CEQ + */ + u32 int_num : 10; +#endif + } bs0; + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* + * CEQ arm, L2NIC engine will clear it after send ceq, + * driver should set it by CMD Q after receive all pkt. + */ + u32 ceq_arm : 1; + u32 eq_id : 5; + u32 rsvd2 : 4; + u32 ceq_count : 10; + /* product index */ + u32 pi : 12; +#else + /* product index */ + u32 pi : 12; + u32 ceq_count : 10; + u32 rsvd2 : 4; + u32 eq_id : 5; + /* CEQ arm, L2NIC engine will clear it after send ceq, + * driver should set it by CMD Q after receive all pkt. + */ + u32 ceq_arm : 1; +#endif + } bs1; + u32 dw1; + }; + + union { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* consumer index */ + u32 ci : 12; + /* WQE page address of current CI point to, high part */ + u32 ci_wqe_page_addr_hi : 20; +#else + /* WQE page address of current CI point to, high part */ + u32 ci_wqe_page_addr_hi : 20; + /* consumer index */ + u32 ci : 12; +#endif + } bs2; + u32 dw2; + }; + + /* WQE page address of current CI point to, low part */ + u32 ci_wqe_page_addr_lo; + + union { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 prefetch_min : 7; + u32 prefetch_max : 11; + u32 prefetch_cache_threshold : 14; +#else + u32 prefetch_cache_threshold : 14; + u32 prefetch_max : 11; + u32 prefetch_min : 7; +#endif + } bs3; + u32 dw3; + }; + + union { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd3 : 31; + /* ownership of WQE */ + u32 prefetch_owner : 1; +#else + /* ownership of WQE */ + u32 prefetch_owner : 1; + u32 rsvd3 : 31; +#endif + } bs4; + u32 dw4; + }; + + union { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 prefetch_ci : 12; + /* high part */ + u32 prefetch_ci_wqe_page_addr_hi : 20; +#else + /* high part */ + u32 prefetch_ci_wqe_page_addr_hi : 20; + u32 prefetch_ci : 12; +#endif + } bs5; + u32 dw5; + }; + + /* low part */ + u32 prefetch_ci_wqe_page_addr_lo; + /* host mem GPA, high part */ + u32 pi_gpa_hi; + /* host mem GPA, low part */ + u32 pi_gpa_lo; + + union { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd4 : 9; + u32 ci_cla_tbl_addr_hi : 23; +#else + u32 ci_cla_tbl_addr_hi : 23; + u32 rsvd4 : 9; +#endif + } bs6; + u32 dw6; + }; + + u32 ci_cla_tbl_addr_lo; +}; + +struct hifc_rq_ctxt_block { + struct hifc_qp_ctxt_header cmdq_hdr; + struct hifc_rq_ctxt rq_ctx[HIFC_ROOT_CFG_RQ_NUM_MAX]; +}; + +struct hifc_root_qsf_s { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* packet priority, engine pass pri to ucode */ + u32 pri : 3; + /* unicast flag, engine pass uc to ucode */ + u32 uc : 1; + /* sctp packet, engine pass sctp to ucode */ + u32 sctp : 1; + /* mss */ + u32 mss : 14; + /* when set, hi1822 calculates the tcp/udp check sum of the packet */ + u32 tcp_udp_cs : 1; + /* + * transmit segmentation offload is activated when the tso flag is set + */ + u32 tso : 1; + /* for udp packet, engine read the whole udp packet from host by 1 dma + * read, and ipsu calculate udp checksum, ucode do ip segment + */ + u32 ufo : 1; + /* payload offset. it is the start position to calculate tcp/udp + * checksum or sctp crc + */ + u32 payload_offset : 8; + /* reserved */ + u32 route_to_ucode : 2; +#else + /* reserved */ + u32 route_to_ucode : 2; + /* + * payload offset. it is the start position to calculate tcp/udp + * checksum or sctp crc + */ + u32 payload_offset : 8; + /* + * for udp packet, engine read the whole udp packet from host by 1 dma + * read, and ipsu calculate udp checksum, ucode do ip segment + */ + u32 ufo : 1; + /* + * transmit segmentation offload is activated when the tso flag is set + */ + u32 tso : 1; + /* when set, hi1822 calculates the tcp/udp check sum of the packet */ + u32 tcp_udp_cs : 1; + /* mss */ + u32 mss : 14; + /* sctp packet, engine pass sctp to ucode */ + u32 sctp : 1; + /* unicast flag, engine pass uc to ucode */ + u32 uc : 1; + /* packet priority, engine pass pri to ucode */ + u32 pri : 3; +#endif +}; + +struct hifc_root_db_addr_s { + unsigned long long phy_addr; + void __iomem *virt_map_addr; +}; + +/* send queue management structure */ +struct hifc_root_sq_info_s { + spinlock_t root_sq_spin_lock; + + unsigned short qid; + unsigned short max_qnum; + unsigned short pi; /* ring buffer Pi */ + unsigned short ci; /* ring buffer Ci */ + unsigned short owner; + unsigned short hardware_write_back_value; + unsigned short q_depth; + unsigned short wqe_bb_size; /* WQE Basic size */ + + char irq_name[HIFC_IRQ_NAME_MAX]; + unsigned int irq_id; + unsigned short msix_entry_idx; + + unsigned short *ci_addr; + dma_addr_t ci_dma_addr; + + unsigned long long cla_addr; + void *sq_handle; + struct hifc_root_db_addr_s direct_db; + struct hifc_root_db_addr_s normal_db; + unsigned int db_idx; + unsigned int global_qpn; + int in_flush; + void *root_info; +}; + +struct hifc_root_rq_info_s { + unsigned short qid; + unsigned short max_qnum; + unsigned short pi; + unsigned short ci; + unsigned short owner; + + unsigned short q_depth; + unsigned short q_mask; + unsigned short wqe_bb_size; + + char irq_name[HIFC_IRQ_NAME_MAX]; + unsigned int irq_id; + unsigned short msix_entry_idx; + + unsigned short *pi_vir_addr; + dma_addr_t pi_dma_addr; + + /* Root RQ Receive Buffer size and completion buff */ + unsigned int rqc_buff_size; + void *rq_completion_buff; + dma_addr_t rq_completion_dma; + unsigned int rq_rcv_buff_size; + void *rq_rcv_buff; + dma_addr_t rq_rcv_dma; + void *rq_handle; + + /* for queue context init */ + unsigned long long ci_cla_tbl_addr; + + unsigned int global_qpn; + struct tasklet_struct tasklet; + atomic_t flush_state; + + void *root_info; +}; + +struct hifc_root_info_s { + void *phba; + unsigned int sq_num; + unsigned int sq_ci_table_size; + void *virt_sq_ci_table_buff; + dma_addr_t sq_ci_table_dma; + void *sq_info; + + unsigned int rq_num; + unsigned int rq_pi_table_size; + void *virt_rq_pi_table_buff; + dma_addr_t rq_pi_table_dma; + void *rq_info; +}; + +/**************************** SCQ defines ********************************/ +struct hifc_scq_info_s { + struct cqm_queue_s *cqm_scq_info; + unsigned int wqe_num_per_buf; + unsigned int wqe_size; + /* 0-256, 1-512, 2-1k, 3-2k, 4-4k, 5-8k, 6-16k, 7-32k */ + unsigned int scqc_cq_depth; + unsigned short scqc_ci_type; + unsigned short valid_wqe_num; /* ScQ depth include link wqe */ + unsigned short ci; + unsigned short ci_owner; + + unsigned int queue_id; + unsigned int scqn; + char irq_name[HIFC_IRQ_NAME_MAX]; + unsigned short msix_entry_idx; + unsigned int irq_id; + struct tasklet_struct tasklet; + atomic_t flush_state; + + void *phba; + unsigned int reserved; + struct task_struct *delay_task; + int task_exit; + unsigned int intrmode; +}; + +/************************* SRQ depth ***********************************/ +struct hifc_srq_ctx_s { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* DW0 */ + unsigned long long last_rq_pmsn : 16; + unsigned long long cur_rqe_msn : 16; + unsigned long long cur_rqe_user_id : 16; + unsigned long long parity : 8; + unsigned long long rsvd0 : 2; + unsigned long long pcie_template : 6; + + /* DW1 */ + unsigned long long cur_rqe_gpa; + + /* DW2 */ + unsigned long long cur_sge_v : 1; + unsigned long long cur_sge_l : 1; + unsigned long long int_mode : 2; + unsigned long long ceqn_msix : 11; + unsigned long long cur_sge_remain_len : 17; + unsigned long long cur_sge_id : 4; + unsigned long long consant_sge_len : 17; + unsigned long long cur_wqe : 1; + unsigned long long pmsn_type : 1; + unsigned long long bdsl : 4; + unsigned long long cr : 1; + unsigned long long csl : 2; + unsigned long long cf : 1; + unsigned long long ctrl_sl : 1; + + /* DW3 */ + unsigned long long cur_sge_gpa; + + /* DW4 */ + unsigned long long cur_pmsn_gpa; + + /* DW5 */ + unsigned long long pre_fetch_max_msn : 16; + unsigned long long cqe_max_cnt : 8; + unsigned long long cur_cqe_cnt : 8; + unsigned long long arm_q : 1; + unsigned long long rsvd1 : 7; + unsigned long long cq_so_ro : 2; + unsigned long long cqe_dma_attr_idx : 6; + unsigned long long rq_so_ro : 2; + unsigned long long rqe_dma_attr_idx : 6; + unsigned long long rsvd2 : 1; + unsigned long long loop_o : 1; + unsigned long long ring : 1; + unsigned long long rsvd3 : 5; + +#else + /* DW0 */ + unsigned long long pcie_template : 6; + unsigned long long rsvd0 : 2; + unsigned long long parity : 8; + unsigned long long cur_rqe_user_id : 16; + unsigned long long cur_rqe_msn : 16; + unsigned long long last_rq_pmsn : 16; + + /* DW1 */ + unsigned long long cur_rqe_gpa; + + /* DW2 */ + unsigned long long ctrl_sl : 1; + unsigned long long cf : 1; + unsigned long long csl : 2; + unsigned long long cr : 1; + unsigned long long bdsl : 4; + unsigned long long pmsn_type : 1; + unsigned long long cur_wqe : 1; + unsigned long long consant_sge_len : 17; + unsigned long long cur_sge_id : 4; + unsigned long long cur_sge_remain_len : 17; + unsigned long long ceqn_msix : 11; + unsigned long long int_mode : 2; + unsigned long long cur_sge_l : 1; + unsigned long long cur_sge_v : 1; + + /* DW3 */ + unsigned long long cur_sge_gpa; + + /* DW4 */ + unsigned long long cur_pmsn_gpa; + + /* DW5 */ + unsigned long long rsvd3 : 5; + unsigned long long ring : 1; + unsigned long long loop_o : 1; + unsigned long long rsvd2 : 1; + unsigned long long rqe_dma_attr_idx : 6; + unsigned long long rq_so_ro : 2; + unsigned long long cqe_dma_attr_idx : 6; + unsigned long long cq_so_ro : 2; + unsigned long long rsvd1 : 7; + unsigned long long arm_q : 1; + unsigned long long cur_cqe_cnt : 8; + unsigned long long cqe_max_cnt : 8; + unsigned long long pre_fetch_max_msn : 16; + +#endif + + /* DW6~DW7 */ + unsigned long long rsvd4; + unsigned long long rsvd5; + +}; + +struct hifc_srq_buff_entry_s { + unsigned short buff_id; + void *buff_addr; + dma_addr_t buff_dma; +}; + +enum hifc_clean_state_e { + HIFC_CLEAN_DONE, + HIFC_CLEAN_DOING, + HIFC_CLEAN_BUTT +}; + +enum hifc_srq_type_e { + HIFC_SRQ_ELS = 1, + HIFC_SRQ_BUTT +}; + +struct hifc_srq_info_s { + enum hifc_srq_type_e srq_type; + + struct cqm_queue_s *cqm_srq_info; + /* Wqe number per buf, dont't inlcude link wqe */ + unsigned int wqe_num_per_buf; + unsigned int wqe_size; + /* valid wqe number, dont't include link wqe */ + unsigned int valid_wqe_num; + unsigned short pi; + unsigned short pi_owner; + unsigned short pmsn; + unsigned short ci; + unsigned short cmsn; + unsigned int srqn; + + dma_addr_t first_rqe_rcv_dma; + + struct hifc_srq_buff_entry_s *els_buff_entry_head; + struct buf_describe_s buff_list; + spinlock_t srq_spin_lock; + int spin_lock_init; + int enable; + enum hifc_clean_state_e state; + struct delayed_work del_work; + unsigned int del_retry_time; + void *phba; +}; + +/* + * The doorbell record keeps PI of WQE, which will be produced next time. + * The PI is 15 bits width o-bit + */ +struct hifc_db_record { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u64 rsvd0 : 32; + unsigned long long dump_pmsn : 16; + unsigned long long pmsn : 16; +#else + unsigned long long pmsn : 16; + unsigned long long dump_pmsn : 16; + u64 rsvd0 : 32; +#endif +}; + +/* + * The ci record keeps CI of WQE, which will be consumed next time. + * The ci is 15 bits width with 1 o-bit + */ +struct hifc_ci_record_s { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u64 rsvd0 : 32; + unsigned long long dump_cmsn : 16; + unsigned long long cmsn : 16; +#else + unsigned long long cmsn : 16; + unsigned long long dump_cmsn : 16; + u64 rsvd0 : 32; +#endif +}; + +/* The accumulate data in WQ header */ +struct hifc_accumulate { + u64 data_2_uc; + u64 data_2_drv; +}; + +/* The WQ header structure */ +struct hifc_wq_header_s { + struct hifc_db_record db_record; + struct hifc_ci_record_s ci_record; + struct hifc_accumulate soft_data; + +}; + +/* Link list Sq WqePage Pool */ +/* queue header struct */ +struct hifc_queue_header_s { + unsigned long long doorbell_record; + unsigned long long ci_record; + unsigned long long ulrsv1; + unsigned long long ulrsv2; +}; + +/* WPG-WQEPAGE, LLSQ-LINKED LIST SQ */ +struct hifc_sq_wqe_page_s { + struct list_head entry_wpg; + /* Wqe Page virtual addr */ + void *wpg_addr; + /* Wqe Page physical addr */ + unsigned long long wpg_phy_addr; +}; + +struct hifc_sq_wqe_page_pool_s { + unsigned int wpg_cnt; + unsigned int wpg_size; + unsigned int wqe_per_wpg; + + /* PCI DMA Pool */ + struct dma_pool *wpg_dma_pool; + struct hifc_sq_wqe_page_s *wpg_pool_addr; + struct list_head list_free_wpg_pool; + spinlock_t wpg_pool_lock; + atomic_t wpg_in_use; +}; + +#define HIFC_SQ_DEL_STAGE_TIMEOUT_MS (3 * 1000) +#define HIFC_SRQ_DEL_STAGE_TIMEOUT_MS (10 * 1000) +#define HIFC_SQ_WAIT_FLUSH_DONE_TIMEOUT_MS (10) +#define HIFC_SQ_WAIT_FLUSH_DONE_TIMEOUT_CNT (3) + +#define HIFC_SRQ_PROCESS_DELAY_MS (20) + +/* PLOGI parameters */ +struct hifc_plogi_coparams_s { + unsigned int seq_cnt : 1; + unsigned int ed_tov : 1; + unsigned int reserved : 14; + unsigned int tx_mfs : 16; + unsigned int ed_tov_timer_val; +}; + +struct hifc_delay_sqe_ctrl_info_s { + int valid; + unsigned int rport_index; + unsigned int time_out; + unsigned long long start_jiff; + unsigned int sid; + unsigned int did; + struct hifc_root_sqe_s sqe; +}; + +struct hifc_destroy_ctrl_info_s { + int valid; + unsigned int rport_index; + unsigned int time_out; + unsigned long long start_jiff; + struct unf_rport_info_s rport_info; +}; + +/* PARENT SQ Info */ +struct hifc_parent_sq_info_s { + void *phba; + + spinlock_t parent_sq_enqueue_lock; + atomic_t wqe_page_cnt; + unsigned int rport_index; + + unsigned int context_id; + + /* Fixed value,used for Doorbell */ + unsigned int sq_queue_id; + + /* When a session is offloaded, tile will return the CacheId to the + * driver,which is used for Doorbell + */ + unsigned int cache_id; + + /* service type, fc */ + unsigned int service_type; + + /* OQID */ + unsigned short oqid_rd; + unsigned short oqid_wr; + + unsigned int max_sqe_num; /* SQ depth */ + unsigned int wqe_num_per_buf; + unsigned int wqe_size; + + unsigned int wqe_offset; + unsigned short head_start_cmsn; + unsigned short head_end_cmsn; + unsigned short last_pmsn; + unsigned short last_pi_owner; + + unsigned int local_port_id; + unsigned int remote_port_id; + int port_in_flush; + int sq_in_sess_rst; + atomic_t sq_valid; + + void *queue_header_original; + struct hifc_queue_header_s *queue_header; + dma_addr_t queue_hdr_phy_addr_original; + dma_addr_t queue_hdr_phy_addr; + + /* Linked List SQ */ + struct list_head list_linked_list_sq; + + unsigned char vport_id; + struct delayed_work del_work; + struct delayed_work flush_done_tmo_work; + unsigned long long del_start_jiff; + dma_addr_t srq_ctx_addr; + atomic_t sq_cashed; + atomic_t fush_done_wait_cnt; + + struct hifc_plogi_coparams_s plogi_coparams; + + /* dif control info for immi */ + struct unf_dif_control_info_s sirt_dif_control; + + atomic_t sq_dbl_cnt; + atomic_t sq_wqe_cnt; + atomic_t sq_cqe_cnt; + atomic_t sqe_minus_cqe_cnt; + + struct hifc_delay_sqe_ctrl_info_s delay_sqe; + struct hifc_destroy_ctrl_info_s destroy_sqe; + atomic_t io_stat[HIFC_MAX_SQ_TASK_TYPE_CNT]; + +}; + +/* parent context doorbell */ +struct hifc_parent_sq_db_s { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 service_type : 5; + u32 cos : 3; + u32 c : 1; + u32 arm : 1; + u32 cntx_size : 2; + u32 vport : 7; + u32 xid : 13; +#else + u32 xid : 13; + u32 vport : 7; + u32 cntx_size : 2; + u32 arm : 1; + u32 c : 1; + u32 cos : 3; + u32 service_type : 5; +#endif + } wd0; + + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 qid : 4; + u32 sm_data : 20; + u32 pi_hi : 8; +#else + u32 pi_hi : 8; + u32 sm_data : 20; + u32 qid : 4; +#endif + } wd1; + +}; + +struct hifc_parent_cmd_scq_info_s { + unsigned int cqm_queue_id; + unsigned int local_queue_id; +}; + +struct hifc_parent_st_scq_info_s { + unsigned int cqm_queue_id; + unsigned int local_queue_id; +}; + +struct hifc_parent_els_srq_info_s { + unsigned int cqm_queue_id; + unsigned int local_queue_id; +}; + +enum hifc_parent_queue_state_e { + HIFC_QUEUE_STATE_INITIALIZED = 0, + HIFC_QUEUE_STATE_OFFLOADING = 1, + HIFC_QUEUE_STATE_OFFLOADED = 2, + HIFC_QUEUE_STATE_DESTROYING = 3, + HIFC_QUEUE_STATE_FREE = 4, + HIFC_QUEUE_STATE_BUTT +}; + +struct hifc_parent_ctx_s { + dma_addr_t parent_ctx; + /* Allocated by driver, Driver filled it when a session offload */ + void *virt_parent_ctx; + /* Allocated by CQM,used by Hardware */ + struct cqm_qpc_mpt_s *cqm_parent_ctx_obj; +}; + +struct hifc_parent_queue_info_s { + spinlock_t parent_queue_state_lock; + struct hifc_parent_ctx_s parent_ctx; + enum hifc_parent_queue_state_e offload_state; + struct hifc_parent_sq_info_s parent_sq_info; + /* Cmd Scq info which is assocaiated with parent queue */ + struct hifc_parent_cmd_scq_info_s parent_cmd_scq_info; + /* Sts Scq info which is assocaiated with parent queue */ + struct hifc_parent_st_scq_info_s parent_sts_scq_info; + /* ELS Srq info which is assocaiated with parent queue */ + unsigned char queue_vport_id; + struct hifc_parent_els_srq_info_s parent_els_srq_info; + unsigned char queue_data_cos; +}; + +struct hifc_parent_queue_mgr_s { + struct hifc_parent_queue_info_s parent_queues[UNF_HIFC_MAXRPORT_NUM]; + struct buf_describe_s parent_sq_buf_list; +}; + +struct hifc_get_global_base_qpn_s { + /* for new version interface */ + unsigned char status; + unsigned char version; + unsigned char rsvd0[6]; + + unsigned short func_id; + unsigned short base_qpn; +}; + +#define HIFC_SRQC_BUS_ROW 8 +#define HIFC_SRQC_BUS_COL 19 +#define HIFC_SQC_BUS_ROW 8 +#define HIFC_SQC_BUS_COL 13 +#define HIFC_HW_SCQC_BUS_ROW 6 +#define HIFC_HW_SCQC_BUS_COL 10 +#define HIFC_HW_SRQC_BUS_ROW 4 +#define HIFC_HW_SRQC_BUS_COL 15 +#define HIFC_SCQC_BUS_ROW 3 +#define HIFC_SCQC_BUS_COL 29 + +#define HIFC_QUEUE_INFO_BUS_NUM 4 +struct hifc_queue_info_bus_s { + unsigned long long bus[HIFC_QUEUE_INFO_BUS_NUM]; +}; + +unsigned int hifc_free_parent_resource(void *v_hba, + struct unf_rport_info_s *v_rport_info); +unsigned int hifc_alloc_parent_resource(void *v_hba, + struct unf_rport_info_s *v_rport_info); +unsigned int hifc_create_root_queues(void *v_hba); +void hifc_destroy_root_queues(void *v_hba); +unsigned int hifc_alloc_parent_queue_mgr(void *v_hba); +void hifc_free_parent_queue_mgr(void *v_hba); +unsigned int hifc_create_common_share_queues(void *v_hba); +void hifc_destroy_common_share_queues(void *v_hba); +unsigned int hifc_alloc_parent_sq_wqe_page_pool(void *v_hba); +void hifc_free_parent_sq_wqe_page_pool(void *v_hba); + +struct hifc_parent_queue_info_s *hifc_find_parent_queue_info_by_pkg( + void *v_hba, + struct unf_frame_pkg_s *v_pkg); +struct hifc_parent_sq_info_s *hifc_find_parent_sq_by_pkg( + void *v_hba, struct unf_frame_pkg_s *v_pkg); +struct hifc_parent_ctx_s *hifc_get_parnt_ctx_virt_addr_by_pkg( + void *v_hba, + struct unf_frame_pkg_s *v_pkg); +unsigned int hifc_get_parent_ctx_xid_by_pkg(void *v_hba, + struct unf_frame_pkg_s *v_pkg); + +unsigned int hifc_root_sq_enqueue(void *v_hba, + struct hifc_root_sqe_s *v_sqe); +void hifc_process_root_rqe(unsigned long v_rq_info); + +unsigned int hifc_root_cmdq_enqueue(void *v_hba, + union hifc_cmdqe_u *v_cmd_qe, + unsigned short v_cmd_len); + +void hifc_process_scq_cqe(unsigned long scq_info); +unsigned int hifc_process_scq_cqe_entity(unsigned long v_scq_info, + unsigned int proc_cnt); + +void hifc_post_els_srq_wqe(struct hifc_srq_info_s *v_srq_info, + unsigned short buf_id); +void hifc_process_aeqe(void *v_srv_handle, unsigned char evt_type, u64 evt_val); + +unsigned int hifc_parent_sq_enqueue(struct hifc_parent_sq_info_s *v_sq, + struct hifcoe_sqe_s *v_sqe); +void hifc_free_sq_wqe_page(struct hifc_parent_sq_info_s *v_sq, + unsigned int cur_cmsn); +unsigned int hifc_reclaim_sq_wqe_page(void *v_hba, union hifcoe_scqe_u *v_scqe); + +void hifc_set_root_sq_flush_state(void *v_hba, int in_flush); +void hifc_set_rport_flush_state(void *v_hba, int in_flush); +unsigned int hifc_clear_fetched_sq_wqe(void *v_hba); +unsigned int hifc_clear_pending_sq_wqe(void *v_hba); + +void hifc_free_parent_queues(void *v_hba); +void hifc_enable_queues_dispatch(void *v_hba); +void hifc_queue_pre_process(void *v_hba, int v_clean); +void hifc_free_parent_queue_info( + void *v_hba, + struct hifc_parent_queue_info_s *v_parent_queue_info); +unsigned int hifc_send_session_rst_cmd( + void *v_hba, + struct hifc_parent_queue_info_s *v_parent_queue_info, + unsigned int v_mode); +void hifc_build_session_rst_wqe(void *v_hba, + struct hifc_parent_sq_info_s *v_sq, + struct hifcoe_sqe_s *v_sqe, + enum hifc_session_reset_mode_e v_mode, + unsigned int scqn); + +unsigned int hifc_rport_session_rst(void *v_hba, + struct unf_rport_info_s *v_rport_info); +unsigned int hifc_get_rport_maped_cmd_scqn(void *v_hba, + unsigned int rport_index); +unsigned int hifc_get_rport_maped_sts_scqn(void *v_hba, + unsigned int rport_index); + +void hifc_destroy_srq(void *v_hba); +unsigned int hifc_push_delay_sqe( + void *v_hba, + struct hifc_parent_queue_info_s *v_offload_parent_queue, + struct hifc_root_sqe_s *v_sqe, + struct unf_frame_pkg_s *v_pkg); + +void hifc_push_destroy_parent_queue_sqe( + void *v_hba, + struct hifc_parent_queue_info_s *v_offload_parent_queue, + struct unf_rport_info_s *v_rport_info); +void hifc_pop_destroy_parent_queue_sqe( + void *v_hba, + struct hifc_destroy_ctrl_info_s *v_destroy_sqe_info); +struct hifc_parent_queue_info_s *hifc_find_offload_parent_queue( + void *v_hba, + unsigned int v_local_id, + unsigned int v_remote_id, + unsigned int v_rport_index); + +unsigned int hifc_flush_ini_resp_queue(void *v_hba); +void hifc_rcvd_els_from_srq_time_out(struct work_struct *work); +#endif diff --git a/drivers/scsi/huawei/hifc/hifc_service.c b/drivers/scsi/huawei/hifc/hifc_service.c new file mode 100644 index 0000000000000000000000000000000000000000..52c9ad7670eec11e1aa273ca4ecadb7346b1db60 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_service.c @@ -0,0 +1,3076 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#include "unf_log.h" +#include "unf_common.h" +#include "hifc_module.h" +#include "hifc_service.h" +#include "hifc_io.h" +#include "hifc_chipitf.h" + +#define HIFC_RQ_ERROR_FRAME 0x100 +#define HIFC_ELS_SRQ_BUF_NUM 0x9 + +/* Parent SCQ Receive the ELS processing function */ +static unsigned int hifc_scq_rcv_els_cmd(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe); +static unsigned int hifc_scq_rcv_els_rsp(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe); +static unsigned int hifc_scq_rcv_els_rsp_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe); + +/* Parent SCQ Receive the GS RSP processing function */ +static unsigned int hifc_scq_rcv_gs_rsp(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe); + +/* Parent SCQ Receive the BLS RSP processing function */ +static unsigned int hifc_scq_rcv_abts_rsp(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe); + +/* Parent SCQ Receive the offload completion processing function */ +static unsigned int hifc_scq_rcv_offload_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe); + +/* Parent SCQ Receive the flush sq completion processing function */ +static unsigned int hifc_scq_rcv_flush_sq_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe); + +/* Parent SCQ Receive the bufferclear completion processing function */ +static unsigned int hifc_scq_rcv_buf_clear_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe); +static unsigned int hifc_scq_rcv_sess_rst_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe); +static unsigned int hifc_scq_rcv_clear_srq_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe); +static unsigned int hifc_scq_rcv_marker_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe); +static unsigned int hifc_scq_rcv_abts_marker_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe); + +typedef unsigned int (*pfn_scqe_handler)(struct hifc_hba_s *, + union hifcoe_scqe_u *); + +struct unf_scqe_handler_table_s { + unsigned int scqe_type; /* ELS type */ + int reclaim_sq_wpg; + pfn_scqe_handler pfn_scqe_handle_fun; +}; + +struct unf_scqe_handler_table_s scqe_handler_table[] = { + { /* INI rcvd ELS_CMND */ + HIFC_SCQE_ELS_CMND, + UNF_FALSE, + hifc_scq_rcv_els_cmd + }, + { /* INI rcvd ELS_RSP */ + HIFC_SCQE_ELS_RSP, + UNF_TRUE, + hifc_scq_rcv_els_rsp + }, + { /* INI rcvd GS_RSP */ + HIFC_SCQE_GS_RSP, + UNF_TRUE, + hifc_scq_rcv_gs_rsp + }, + { /* INI rcvd BLS_RSP */ + HIFC_SCQE_ABTS_RSP, + UNF_TRUE, + hifc_scq_rcv_abts_rsp + }, + { /* INI rcvd FCP RSP */ + HIFC_SCQE_FCP_IRSP, + UNF_TRUE, + hifc_scq_recv_iresp + }, + { /* INI rcvd ELS_RSP STS(Done) */ + HIFC_SCQE_ELS_RSP_STS, + UNF_TRUE, + hifc_scq_rcv_els_rsp_sts + }, + { /* INI rcvd Session enable STS */ + HIFC_SCQE_SESS_EN_STS, + UNF_FALSE, + hifc_scq_rcv_offload_sts + }, + { /* INI rcvd flush (pending) SQ STS */ + HIFC_SCQE_FLUSH_SQ_STS, + UNF_FALSE, + hifc_scq_rcv_flush_sq_sts + }, + { /* INI rcvd Buffer clear STS */ + HIFC_SCQE_BUF_CLEAR_STS, + UNF_FALSE, + hifc_scq_rcv_buf_clear_sts + }, + { /* INI rcvd session reset STS */ + HIFC_SCQE_SESS_RST_STS, + UNF_FALSE, + hifc_scq_rcv_sess_rst_sts + }, + { /* ELS SRQ */ + HIFC_SCQE_CLEAR_SRQ_STS, + UNF_FALSE, + hifc_scq_rcv_clear_srq_sts + }, + { /* INI rcvd TMF RSP */ + HIFC_SCQE_FCP_ITMF_RSP, + UNF_TRUE, + hifc_scq_recv_iresp + }, + { /* INI rcvd TMF Marker STS */ + HIFC_SCQE_ITMF_MARKER_STS, + UNF_FALSE, + hifc_scq_rcv_marker_sts + }, + { /* INI rcvd ABTS Marker STS */ + HIFC_SCQE_ABTS_MARKER_STS, + UNF_FALSE, + hifc_scq_rcv_abts_marker_sts + } +}; + +static unsigned int hifc_get_els_rps_pld_len(unsigned short type, + unsigned short cmnd, + unsigned int *v_els_acc_pld_len) +{ + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x4917, UNF_TRUE, v_els_acc_pld_len, + return UNF_RETURN_ERROR); + + /* RJT */ + if (type == ELS_RJT) { + *v_els_acc_pld_len = UNF_ELS_ACC_RJT_LEN; + return RETURN_OK; + } + + /* ACC */ + switch (cmnd) { + /* uses the same PAYLOAD length as PLOGI. */ + case ELS_FLOGI: + case ELS_PDISC: + case ELS_PLOGI: + *v_els_acc_pld_len = UNF_PLOGI_ACC_PAYLOAD_LEN; + break; + + case ELS_PRLI: + /* The PRLI ACC payload extends 12 bytes */ + *v_els_acc_pld_len = UNF_PRLI_ACC_PAYLOAD_LEN - + UNF_PRLI_SIRT_EXTRA_SIZE; + break; + + case ELS_LOGO: + *v_els_acc_pld_len = UNF_LOGO_ACC_PAYLOAD_LEN; + break; + + case ELS_PRLO: + *v_els_acc_pld_len = UNF_PRLO_ACC_PAYLOAD_LEN; + break; + + case ELS_RSCN: + *v_els_acc_pld_len = UNF_RSCN_ACC_PAYLOAD_LEN; + break; + + case ELS_ADISC: + *v_els_acc_pld_len = UNF_ADISC_ACC_PAYLOAD_LEN; + break; + + case ELS_RRQ: + *v_els_acc_pld_len = UNF_RRQ_ACC_PAYLOAD_LEN; + break; + + case ELS_SCR: + *v_els_acc_pld_len = UNF_SCR_RSP_PAYLOAD_LEN; + break; + + case ELS_ECHO: + *v_els_acc_pld_len = UNF_ECHO_ACC_PAYLOAD_LEN; + break; + case ELS_RLS: + *v_els_acc_pld_len = UNF_RLS_ACC_PAYLOAD_LEN; + break; + case ELS_REC: + *v_els_acc_pld_len = UNF_REC_ACC_PAYLOAD_LEN; + break; + default: + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Unknown ELS command(0x%x)", cmnd); + ret = UNF_RETURN_ERROR; + break; + } + + return ret; +} + +struct hifc_els_cmd_payload_table_s { + unsigned short cmnd; /* ELS type */ + unsigned int req_pld_len; + unsigned int rsp_pld_len; +}; + +struct hifc_els_cmd_payload_table_s els_pld_table_map[] = { + { ELS_FDISC, + UNF_FDISC_PAYLOAD_LEN, + UNF_FDISC_ACC_PAYLOAD_LEN + }, + { ELS_FLOGI, + UNF_FLOGI_PAYLOAD_LEN, + UNF_FLOGI_ACC_PAYLOAD_LEN + }, + { ELS_PLOGI, + UNF_PLOGI_PAYLOAD_LEN, + UNF_PLOGI_ACC_PAYLOAD_LEN + }, + { ELS_SCR, + UNF_SCR_PAYLOAD_LEN, + UNF_SCR_RSP_PAYLOAD_LEN + }, + { ELS_PDISC, + UNF_PDISC_PAYLOAD_LEN, + UNF_PDISC_ACC_PAYLOAD_LEN + }, + { ELS_LOGO, + UNF_LOGO_PAYLOAD_LEN, + UNF_LOGO_ACC_PAYLOAD_LEN + }, + { ELS_PRLO, + UNF_PRLO_PAYLOAD_LEN, + UNF_PRLO_ACC_PAYLOAD_LEN + }, + { ELS_ADISC, + UNF_ADISC_PAYLOAD_LEN, + UNF_ADISC_ACC_PAYLOAD_LEN + }, + { ELS_RRQ, + UNF_RRQ_PAYLOAD_LEN, + UNF_RRQ_ACC_PAYLOAD_LEN + }, + { ELS_RSCN, + 0, + UNF_RSCN_ACC_PAYLOAD_LEN + }, + { ELS_ECHO, + UNF_ECHO_PAYLOAD_LEN, + UNF_ECHO_ACC_PAYLOAD_LEN + }, + { ELS_RLS, + UNF_RLS_PAYLOAD_LEN, + UNF_RLS_ACC_PAYLOAD_LEN + }, + { ELS_REC, + UNF_REC_PAYLOAD_LEN, + UNF_REC_ACC_PAYLOAD_LEN + } +}; + +static unsigned int hifc_get_els_req_and_acc_pld_len(unsigned short cmnd, + unsigned int *req_pld_len, + unsigned int *rsp_pld_len) +{ + unsigned int ret = RETURN_OK; + unsigned int i; + + UNF_CHECK_VALID(0x4917, UNF_TRUE, req_pld_len, return UNF_RETURN_ERROR); + + for (i = 0; i < (sizeof(els_pld_table_map) / + sizeof(struct hifc_els_cmd_payload_table_s)); i++) { + if (els_pld_table_map[i].cmnd == cmnd) { + *req_pld_len = els_pld_table_map[i].req_pld_len; + *rsp_pld_len = els_pld_table_map[i].rsp_pld_len; + return ret; + } + } + + switch (cmnd) { + case ELS_PRLI: + /* If sirt is enabled, The PRLI ACC payload extends + * 12 bytes + */ + *req_pld_len = HIFC_GET_PRLI_PAYLOAD_LEN; + *rsp_pld_len = HIFC_GET_PRLI_PAYLOAD_LEN; + break; + + default: + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, + UNF_ERR, "[err]Unknown ELS_CMD(0x%x)", cmnd); + ret = UNF_RETURN_ERROR; + break; + } + + return ret; +} + +/* + * Function Name : hifc_get_els_frame_len + * Function Description: Get ELS Frame length + * Input Parameters : type, + * : cmnd + * Output Parameters : v_frame_len + * Return Type : unsigned int + */ +static unsigned int hifc_get_els_frame_len(unsigned short type, + unsigned short cmnd, + unsigned int *v_frame_len) +{ + unsigned int ret = RETURN_OK; + unsigned int hdr_len = sizeof(struct unf_fchead_s); + unsigned int req_len = 0; + unsigned int rsp_len = 0; + + UNF_CHECK_VALID(0x4917, UNF_TRUE, v_frame_len, return UNF_RETURN_ERROR); + + if (type == ELS_RJT) + rsp_len = UNF_ELS_ACC_RJT_LEN; + else + ret = hifc_get_els_req_and_acc_pld_len(cmnd, &req_len, + &rsp_len); + + if (ret == RETURN_OK) + *v_frame_len = hdr_len + ((type == ELS_ACC || type == ELS_RJT) ? + rsp_len : req_len); + + return ret; +} + +static void hifc_build_els_frame_header(unsigned short v_xid_base, + unsigned short v_cmnd_type, + unsigned short els_code, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned int fctl = 0; + unsigned int rctl = 0; + unsigned int type = 0; + struct unf_fchead_s *cm_fc_hdr_buf = NULL; + struct unf_fchead_s *pkg_fc_hdr_info = NULL; + + pkg_fc_hdr_info = &v_pkg->frame_head; + cm_fc_hdr_buf = HIFC_GET_CMND_FC_HEADER(v_pkg); + + if (v_cmnd_type == ELS_CMND) { + rctl = HIFC_FC_RCTL_ELS_REQ; + fctl = HIFC_FCTL_REQ; + + /* If the ELS_CMD frame is sent, Adjusting the oxid */ + cm_fc_hdr_buf->oxid_rxid = pkg_fc_hdr_info->oxid_rxid + + ((unsigned int)v_xid_base << 16); + } else { + rctl = HIFC_FC_RCTL_ELS_RSP; + fctl = HIFC_FCTL_RESP; + + /* If the ELS_RSP frame is sent, Adjusting the rxid */ + cm_fc_hdr_buf->oxid_rxid = pkg_fc_hdr_info->oxid_rxid + + v_xid_base; + } + + type = HIFC_FC_TYPE_ELS; + + /* Get SID, DID, OXID, RXID from CM layer */ + cm_fc_hdr_buf->rctl_did = pkg_fc_hdr_info->rctl_did; + cm_fc_hdr_buf->csctl_sid = pkg_fc_hdr_info->csctl_sid; + cm_fc_hdr_buf->parameter = 0; + + /* R_CTL, CS_CTL, TYPE, F_CTL, SEQ_ID, DF_CTL, SEQ_CNT, LL filled */ + UNF_SET_FC_HEADER_RCTL(cm_fc_hdr_buf, rctl); + UNF_SET_FC_HEADER_CS_CTL(cm_fc_hdr_buf, 0); + UNF_SET_FC_HEADER_TYPE(cm_fc_hdr_buf, type); + UNF_SET_FC_HEADER_FCTL(cm_fc_hdr_buf, fctl); + UNF_SET_FC_HEADER_SEQ_CNT(cm_fc_hdr_buf, 0); + UNF_SET_FC_HEADER_DF_CTL(cm_fc_hdr_buf, 0); + UNF_SET_FC_HEADER_SEQ_ID(cm_fc_hdr_buf, 0); + + UNF_PRINT_SFS(UNF_INFO, 0, cm_fc_hdr_buf, sizeof(struct unf_fchead_s)); +} + +void hifc_save_login_para_in_sq_info( + struct hifc_hba_s *v_hba, + struct unf_port_login_parms_s *v_login_co_parms) +{ + struct hifc_hba_s *hba = NULL; + unsigned int rport_index = v_login_co_parms->rport_index; + struct hifc_parent_sq_info_s *sq_info = NULL; + + hba = (struct hifc_hba_s *)v_hba; + + if (rport_index >= UNF_HIFC_MAXRPORT_NUM) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) save login parms,but uplevel alloc invalid rport index: 0x%x", + hba->port_cfg.port_id, rport_index); + + return; + } + + sq_info = + &hba->parent_queue_mgr->parent_queues[rport_index].parent_sq_info; + + sq_info->plogi_coparams.seq_cnt = v_login_co_parms->seq_cnt; + sq_info->plogi_coparams.ed_tov = v_login_co_parms->ed_tov; + sq_info->plogi_coparams.tx_mfs = (v_login_co_parms->tx_mfs < + HIFC_DEFAULT_TX_MAX_FREAM_SIZE) ? HIFC_DEFAULT_TX_MAX_FREAM_SIZE : + v_login_co_parms->tx_mfs; + + sq_info->plogi_coparams.ed_tov_timer_val = + v_login_co_parms->ed_tov_timer_val; +} + +static void hifc_save_default_plogi_param_in_ctx( + struct hifc_hba_s *v_hba, + struct hifcoe_parent_context_s *v_ctx, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned int tx_mfs = HIFC_DEFAULT_TX_MAX_FREAM_SIZE; + unsigned int did = 0; + + did = UNF_GET_DID(v_pkg); + + if (did == UNF_FC_FID_DIR_SERV) + tx_mfs = 2048; + + v_ctx->sw_section.tx_mfs = cpu_to_be16((unsigned short)(tx_mfs)); +} + +static void hifc_save_plogi_acc_param_in_ctx( + struct hifc_hba_s *v_hba, + struct hifcoe_parent_context_s *v_ctx, + struct unf_frame_pkg_s *v_pkg) +{ +#define HIFC_UCODE_MAX_PKT_SIZE_PER_DISPATCH ((8 * 1024)) + + struct unf_lgn_port_coparms_s *port_co_param = NULL; + struct unf_plogi_payload_s *plogi_acc_pld = NULL; + + plogi_acc_pld = UNF_GET_PLOGI_ACC_PAYLOAD(v_pkg); + port_co_param = &plogi_acc_pld->parms.co_parms; + + /* e_d_tov and seq_cnt */ + hifc_big_to_cpu32(&v_ctx->sw_section.sw_ctxt_config.pctxt_val1, + sizeof(unsigned int)); + + v_ctx->sw_section.sw_ctxt_config.dw.e_d_tov = + port_co_param->e_d_tov_resolution; + + v_ctx->sw_section.sw_ctxt_config.dw.seq_cnt = + port_co_param->seq_cnt; + + hifc_cpu_to_big32(&v_ctx->sw_section.sw_ctxt_config.pctxt_val1, + sizeof(unsigned int)); + + v_ctx->sw_section.tx_mfs = + (unsigned short)(v_pkg->private[PKG_PRIVATE_RPORT_RX_SIZE]) < + HIFC_DEFAULT_TX_MAX_FREAM_SIZE ? + cpu_to_be16((unsigned short)HIFC_DEFAULT_TX_MAX_FREAM_SIZE) : + cpu_to_be16 ((unsigned short) + (v_pkg->private[PKG_PRIVATE_RPORT_RX_SIZE])); + + v_ctx->sw_section.e_d_tov_timer_val = + cpu_to_be32(port_co_param->e_d_tov); + + v_ctx->sw_section.mfs_unaligned_bytes = + cpu_to_be16(HIFC_UCODE_MAX_PKT_SIZE_PER_DISPATCH % + port_co_param->bb_receive_data_field_size); +} + +static void hifc_recover_offloading_state( + struct hifc_parent_queue_info_s *v_prntq_info, + enum hifc_parent_queue_state_e offload_state) +{ + unsigned long flag = 0; + + spin_lock_irqsave(&v_prntq_info->parent_queue_state_lock, flag); + + if (v_prntq_info->offload_state == HIFC_QUEUE_STATE_OFFLOADING) + v_prntq_info->offload_state = offload_state; + + spin_unlock_irqrestore(&v_prntq_info->parent_queue_state_lock, flag); +} + +static void hifc_save_magic_num_in_ctx(struct hifcoe_parent_context_s *v_ctx, + struct unf_frame_pkg_s *v_pkg) +{ + /* The CID itself is initialized by the microcode. + * The driver multiplexes the CID as magicnum and then updates + * the CID by the microcode. + */ + v_ctx->sw_section.cid = cpu_to_be32(UNF_GETXCHGALLOCTIME(v_pkg)); +} + +static void hifc_save_magic_num_in_nurmal_root_ts( + struct hifc_root_sqe_s *v_rt_sqe, + struct unf_frame_pkg_s *v_pkg) +{ + v_rt_sqe->task_section.fc_dw1.magic_num = UNF_GETXCHGALLOCTIME(v_pkg); +} + +static int hifc_check_need_delay_offload( + void *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int rport_idx, + struct hifc_parent_queue_info_s *v_cur_parent_queue, + struct hifc_parent_queue_info_s **v_offload_parnt_queue) +{ + unsigned long flag = 0; + struct hifc_parent_queue_info_s *offload_parnt_queue = NULL; + + spin_lock_irqsave(&v_cur_parent_queue->parent_queue_state_lock, flag); + + if (v_cur_parent_queue->offload_state == HIFC_QUEUE_STATE_OFFLOADING) { + spin_unlock_irqrestore( + &v_cur_parent_queue->parent_queue_state_lock, flag); + + offload_parnt_queue = hifc_find_offload_parent_queue( + v_hba, + v_pkg->frame_head.csctl_sid & UNF_NPORTID_MASK, + v_pkg->frame_head.rctl_did & UNF_NPORTID_MASK, + rport_idx); + if (offload_parnt_queue) { + *v_offload_parnt_queue = offload_parnt_queue; + + return UNF_TRUE; + } + } else { + spin_unlock_irqrestore( + &v_cur_parent_queue->parent_queue_state_lock, flag); + } + + return UNF_FALSE; +} + +static unsigned int hifc_build_service_wqe_root_offload( + void *v_hba, + struct unf_frame_pkg_s *v_pkg, + struct hifc_parent_queue_info_s *v_parnt_qinfo, + struct hifc_root_sqe_s *v_sqe) +{ + unsigned int cqm_xid = 0; + unsigned short els_cmnd_type = UNF_ZERO; + struct hifc_parent_ctx_s *parnt_ctx = NULL; + struct hifc_parent_sq_info_s *sq_info = NULL; + struct hifcoe_parent_context_s *v_ctx = NULL; + + els_cmnd_type = HIFC_GET_ELS_RSP_TYPE(v_pkg->cmnd); + cqm_xid = hifc_get_parent_ctx_xid_by_pkg(v_hba, v_pkg); + + /* An offload request is initiated only when the parent queue is in the + * initialized state + */ + if (v_parnt_qinfo->offload_state == HIFC_QUEUE_STATE_INITIALIZED) { + /* Obtain Parent Context and set WQE to off_load, GPA_Addr */ + parnt_ctx = hifc_get_parnt_ctx_virt_addr_by_pkg(v_hba, v_pkg); + + sq_info = hifc_find_parent_sq_by_pkg(v_hba, v_pkg); + if (unlikely((!parnt_ctx) || (!sq_info) || + (cqm_xid == INVALID_VALUE32))) { + return UNF_RETURN_ERROR; + } + + /* Fill in ROOT SQE with offload request */ + hifc_build_els_wqe_root_offload( + v_sqe, + parnt_ctx->cqm_parent_ctx_obj->paddr, + cqm_xid); + + /* If the value is PlogiAcc, parse the FlogiAcc negotiation + * parameter and fill in Context + */ + v_ctx = (struct hifcoe_parent_context_s *) + parnt_ctx->virt_parent_ctx; + + if (els_cmnd_type == ELS_ACC) + hifc_save_plogi_acc_param_in_ctx( + (struct hifc_hba_s *)v_hba, v_ctx, v_pkg); + else + hifc_save_default_plogi_param_in_ctx( + (struct hifc_hba_s *)v_hba, v_ctx, v_pkg); + + /* The SID DID parameter is updated to Parent SQ Qinfo */ + sq_info->local_port_id = UNF_GET_SID(v_pkg); + sq_info->remote_port_id = UNF_GET_DID(v_pkg); + + /* Transfers the key value to the ucode for offload */ + hifc_big_to_cpu32(v_ctx->key, sizeof(v_ctx->key)); + memcpy(v_ctx->key, &sq_info->local_port_id, + sizeof(sq_info->local_port_id)); + memcpy((unsigned char *)v_ctx->key + + sizeof(sq_info->local_port_id), + &sq_info->remote_port_id, + sizeof(sq_info->remote_port_id)); + + hifc_cpu_to_big32(v_ctx->key, sizeof(v_ctx->key)); + + /* Update magic num to parent_ctx */ + hifc_save_magic_num_in_ctx(v_ctx, v_pkg); + + hifc_build_service_wqe_ctx_sge( + v_sqe, parnt_ctx->parent_ctx, + sizeof(struct hifcoe_parent_context_s)); + + v_parnt_qinfo->offload_state = HIFC_QUEUE_STATE_OFFLOADING; + } else { + /* If the connection is being uninstalled and the plogi is + * delivered through the root channel, the plogi must be carried + * to the ucode. + */ + v_sqe->task_section.fc_dw4.parent_xid = cqm_xid; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) send PLOGI with no offload while parent queue is not initialized status", + ((struct hifc_hba_s *)v_hba)->port_cfg.port_id); + } + + return RETURN_OK; +} + +static unsigned int hifc_send_els_via_root(void *v_hba, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned short els_cmd_code = UNF_ZERO; + unsigned short els_cmnd_type = UNF_ZERO; + unsigned int frame_len = 0; + unsigned int exch_id = 0; + unsigned int scq_num = 0; + unsigned int rport_idx = 0; + int sqe_delay = UNF_FALSE; + void *frame_addr = NULL; + struct hifc_hba_s *hba = NULL; + struct hifc_parent_queue_info_s *prnt_qinfo = NULL; + struct hifc_parent_queue_info_s *offload_parnt_queue = NULL; + struct hifc_root_sqe_s *sqe = NULL; + struct hifc_root_sqe_s local_rt_sqe; + unsigned long flag = 0; + enum hifc_parent_queue_state_e last_offload_state = + HIFC_QUEUE_STATE_INITIALIZED; + struct hifc_destroy_ctrl_info_s destroy_sqe_info = { 0 }; + unsigned long long frame_phy_addr; + + /* The ROOT SQE is assembled in local variables and then copied to the + * queue memory + */ + sqe = &local_rt_sqe; + hba = (struct hifc_hba_s *)v_hba; + + memset(sqe, 0, sizeof(local_rt_sqe)); + + /* Determine the ELS type in the pstPkg */ + els_cmnd_type = HIFC_GET_ELS_RSP_TYPE(v_pkg->cmnd); + if (HIFC_PKG_IS_ELS_RSP(els_cmnd_type)) { + els_cmd_code = HIFC_GET_ELS_RSP_CODE(v_pkg->cmnd); + exch_id = UNF_GET_RXID(v_pkg); + sqe->task_section.fc_dw0.task_type = HIFC_SQE_ELS_RSP; + } else { + els_cmd_code = els_cmnd_type; + els_cmnd_type = ELS_CMND; + exch_id = UNF_GET_OXID(v_pkg); + sqe->task_section.fc_dw0.task_type = HIFC_SQE_ELS_CMND; + } + if ((els_cmd_code == ELS_ECHO) && (els_cmnd_type != ELS_RJT)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_WARN, + "[info]Port(0x%x) RPort(0x%x) send ELS ECHO can't send via root Type(0x%x)", + hba->port_cfg.port_id, rport_idx, els_cmnd_type); + + return UNF_RETURN_NOT_SUPPORT; + } + exch_id += hba->exit_base; + + ret = hifc_get_els_frame_len(els_cmnd_type, els_cmd_code, &frame_len); + if (ret != RETURN_OK) { + dump_stack(); + return ret; + } + + /* Obtains the frame start address */ + frame_addr = HIFC_GET_CMND_HEADER_ADDR(v_pkg); + frame_phy_addr = v_pkg->unf_cmnd_pload_bl.buf_dma_addr; + + /* Assemble the frame header and adjust the Paylaod based on the ELS */ + hifc_build_els_frame_header(hba->exit_base, els_cmnd_type, + els_cmd_code, v_pkg); + + /* Assembling the Control Section */ + hifc_build_service_wqe_ctrl_section( + &sqe->ctrl_section, + HIFC_BYTES_TO_QW_NUM( + sizeof(struct hifc_root_sqe_task_section_s)), + HIFC_BYTES_TO_QW_NUM(sizeof(struct hifc_root_sge_s))); + + /* Fill in Normal Root SQE TS */ + rport_idx = v_pkg->private[PKG_PRIVATE_XCHG_RPORT_INDEX]; + scq_num = hifc_get_rport_maped_cmd_scqn(v_hba, rport_idx); + hifc_build_service_wqe_root_ts(v_hba, sqe, exch_id, rport_idx, scq_num); + + /* Upsate magic number into sqe */ + hifc_save_magic_num_in_nurmal_root_ts(sqe, v_pkg); + + /* Fill in the special part of Normal Root SQE TS and initiate implicit + * uninstallation + */ + if ((els_cmd_code == ELS_PLOGI) && (els_cmnd_type != ELS_RJT)) { + prnt_qinfo = hifc_find_parent_queue_info_by_pkg(hba, v_pkg); + if (!prnt_qinfo) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[warn]Port(0x%x) RPort(0x%x) send ELS Type(0x%x) find parent queue fail", + hba->port_cfg.port_id, rport_idx, + els_cmnd_type); + return UNF_RETURN_ERROR; + } + + spin_lock_irqsave(&prnt_qinfo->parent_queue_state_lock, flag); + + last_offload_state = prnt_qinfo->offload_state; + + /* Fill in the special part of Normal Root SQE TS */ + ret = hifc_build_service_wqe_root_offload((void *)hba, + v_pkg, prnt_qinfo, + sqe); + if (ret != RETURN_OK) { + spin_unlock_irqrestore( + &prnt_qinfo->parent_queue_state_lock, flag); + + return ret; + } + + spin_unlock_irqrestore(&prnt_qinfo->parent_queue_state_lock, + flag); + + /* Before the offload, check whether there is a risk of + * repeated offload + */ + sqe_delay = hifc_check_need_delay_offload((void *)hba, + v_pkg, rport_idx, + prnt_qinfo, + &offload_parnt_queue); + } + + /* Fill in Normal Root SQE SGE */ + hifc_build_service_wqe_root_sge(sqe, frame_addr, frame_phy_addr, + frame_len, v_hba); + + if (sqe_delay == UNF_TRUE) { + ret = hifc_push_delay_sqe((void *)hba, offload_parnt_queue, + sqe, v_pkg); + if (ret == RETURN_OK) { + hifc_recover_offloading_state(prnt_qinfo, + last_offload_state); + + return ret; + } + } + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Port(0x%x) RPort(0x%x) send ELS Type(0x%x) Code(0x%x) ExchId(0x%x)", + hba->port_cfg.port_id, rport_idx, els_cmnd_type, + els_cmd_code, exch_id); + + ret = hifc_root_sq_enqueue(hba, sqe); + if ((ret != RETURN_OK) && (prnt_qinfo)) { + hifc_recover_offloading_state(prnt_qinfo, last_offload_state); + + spin_lock_irqsave(&prnt_qinfo->parent_queue_state_lock, flag); + + if (prnt_qinfo->parent_sq_info.destroy_sqe.valid == + UNF_TRUE) { + memcpy(&destroy_sqe_info, + &prnt_qinfo->parent_sq_info.destroy_sqe, + sizeof(struct hifc_destroy_ctrl_info_s)); + + prnt_qinfo->parent_sq_info.destroy_sqe.valid = + UNF_FALSE; + } + + spin_unlock_irqrestore(&prnt_qinfo->parent_queue_state_lock, + flag); + + hifc_pop_destroy_parent_queue_sqe((void *)v_hba, + &destroy_sqe_info); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[warn]Port(0x%x) RPort(0x%x) send ELS Type(0x%x) Code(0x%x) ExchId(0x%x) fail, recover offloadstatus(%u)", + hba->port_cfg.port_id, + rport_idx, + els_cmnd_type, + els_cmd_code, + exch_id, + prnt_qinfo->offload_state); + } + + return ret; +} + +static void *hifc_get_els_frame_addr(struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned short els_cmd_code, + unsigned short els_cmnd_type, + unsigned long long *v_phyaddr) +{ + void *frame_pld_addr; + dma_addr_t els_frame_addr = 0; + + if (els_cmd_code == ELS_ECHO) { + frame_pld_addr = (void *)UNF_GET_ECHO_PAYLOAD(v_pkg); + els_frame_addr = UNF_GET_ECHO_PAYLOAD_PHYADDR(v_pkg); + } else if (els_cmd_code == ELS_RSCN) { + if (els_cmnd_type == ELS_CMND) { + /* Not Support */ + frame_pld_addr = NULL; + els_frame_addr = 0; + } else { + frame_pld_addr = + (void *)UNF_GET_RSCN_ACC_PAYLOAD(v_pkg); + els_frame_addr = v_pkg->unf_cmnd_pload_bl.buf_dma_addr + + sizeof(struct unf_fchead_s); + } + } else { + frame_pld_addr = (void *)HIFC_GET_CMND_PAYLOAD_ADDR(v_pkg); + els_frame_addr = v_pkg->unf_cmnd_pload_bl.buf_dma_addr + + sizeof(struct unf_fchead_s); + } + *v_phyaddr = els_frame_addr; + return frame_pld_addr; +} + +static unsigned int hifc_send_els_via_parent( + void *v_hba, + struct unf_frame_pkg_s *v_pkg, + struct hifc_parent_queue_info_s *v_prntq_info) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned short els_cmd_code = UNF_ZERO; + unsigned short els_cmnd_type = UNF_ZERO; + unsigned short remote_xid = 0; + unsigned short local_xid = 0; + struct hifc_hba_s *hba; + struct hifc_parent_sq_info_s *sq_info = NULL; + struct hifcoe_sqe_s sqe; + void *frame_pld_addr; + unsigned int frame_pld_len = 0; + unsigned int acc_pld_len = 0; + unsigned long long fram_phy_addr = 0; + + hba = (struct hifc_hba_s *)v_hba; + + memset(&sqe, 0, sizeof(struct hifcoe_sqe_s)); + + sq_info = &v_prntq_info->parent_sq_info; + + /* Determine the ELS type in pstPkg */ + els_cmnd_type = HIFC_GET_ELS_CMND_CODE(v_pkg->cmnd); + if (HIFC_PKG_IS_ELS_RSP(els_cmnd_type)) { + els_cmd_code = HIFC_GET_ELS_RSP_CODE(v_pkg->cmnd); + remote_xid = UNF_GET_OXID(v_pkg); + local_xid = UNF_GET_RXID(v_pkg) + hba->exit_base; + } else { + els_cmd_code = els_cmnd_type; + els_cmnd_type = ELS_CMND; + local_xid = UNF_GET_OXID(v_pkg) + hba->exit_base; + remote_xid = UNF_GET_RXID(v_pkg); + } + + frame_pld_addr = hifc_get_els_frame_addr(v_hba, v_pkg, els_cmd_code, + els_cmnd_type, &fram_phy_addr); + + if (HIFC_PKG_IS_ELS_RSP(els_cmnd_type)) { + ret = hifc_get_els_rps_pld_len(els_cmnd_type, els_cmd_code, + &frame_pld_len); + if (ret != RETURN_OK) + return ret; + + hifc_build_els_wqe_ts_rsp( + &sqe, sq_info, frame_pld_addr, + els_cmnd_type, els_cmd_code, + v_prntq_info->parent_sts_scq_info.cqm_queue_id); + } else { + /* Fill in HIFCOE_TASK_T_ELS */ + ret = hifc_get_els_req_and_acc_pld_len(els_cmd_code, + &frame_pld_len, + &acc_pld_len); + if (ret != RETURN_OK) + return ret; + + hifc_build_els_wqe_ts_req( + &sqe, sq_info, els_cmd_code, + v_prntq_info->parent_sts_scq_info.cqm_queue_id, + frame_pld_addr); + } + + /* Assemble the magicnum field of the els */ + hifc_build_els_wqe_ts_magic_num(&sqe, els_cmnd_type, + UNF_GETXCHGALLOCTIME(v_pkg)); + + /* Assemble the SQE Control Section part */ + hifc_build_service_wqe_ctrl_section( + &sqe.ctrl_sl, + HIFC_BYTES_TO_QW_NUM(HIFC_SQE_TS_SIZE), + HIFC_BYTES_TO_QW_NUM(sizeof(struct hifcoe_variable_sge_s))); + + /* Assemble the SQE Task Section Els Common part */ + hifc_build_service_wqe_ts_common(&sqe.ts_sl, sq_info->rport_index, + local_xid, remote_xid, + HIFC_LSW(frame_pld_len)); + + /* Build SGE */ + hifc_build_els_gs_wqe_sge(&sqe, frame_pld_addr, fram_phy_addr, + frame_pld_len, sq_info->context_id, v_hba); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) RPort(0x%x) send ELS Type(0x%x) Code(0x%x) ExchId(0x%x)", + hba->port_cfg.port_id, sq_info->rport_index, els_cmnd_type, + els_cmd_code, local_xid); + + ret = hifc_parent_sq_enqueue(sq_info, &sqe); + + return ret; +} + +unsigned int hifc_send_els_cmnd(void *v_hba, struct unf_frame_pkg_s *v_pkg) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned long flag = 0; + struct hifc_hba_s *hba = NULL; + struct hifc_parent_queue_info_s *prnt_qinfo = NULL; + unsigned short els_cmd_code = UNF_ZERO; + unsigned short els_rsp_code = UNF_ZERO; + union unf_sfs_u *fc_entry = NULL; + struct unf_rrq_s *rrq_pld = NULL; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + + /* Check Parameters */ + UNF_CHECK_VALID(0x5014, UNF_TRUE, v_hba, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x5015, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x5016, UNF_TRUE, UNF_GET_SFS_ENTRY(v_pkg), + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x5017, UNF_TRUE, HIFC_GET_CMND_PAYLOAD_ADDR(v_pkg), + return UNF_RETURN_ERROR); + + HIFC_CHECK_PKG_ALLOCTIME(v_pkg); + + hba = (struct hifc_hba_s *)v_hba; + els_cmd_code = HIFC_GET_ELS_CMND_CODE(v_pkg->cmnd); + els_rsp_code = HIFC_GET_ELS_RSP_CODE(v_pkg->cmnd); + + /* If RRQ Req, Special processing */ + if (els_cmd_code == ELS_RRQ) { + fc_entry = UNF_GET_SFS_ENTRY(v_pkg); + rrq_pld = &fc_entry->rrq; + ox_id = (unsigned short)(rrq_pld->oxid_rxid >> 16); + rx_id = (unsigned short)(rrq_pld->oxid_rxid & 0xFFFF); + ox_id += hba->exit_base; + rrq_pld->oxid_rxid = ox_id << 16 | rx_id; + } + + prnt_qinfo = hifc_find_parent_queue_info_by_pkg(hba, v_pkg); + if (!prnt_qinfo) { + HIFC_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "Port(0x%x) send ELS SID(0x%x) DID(0x%x) get a null parent queue info, send via root", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + /* If the Rport cannot be found, Send Pkg by Root SQ */ + ret = hifc_send_els_via_root(v_hba, v_pkg); + return ret; + } + + spin_lock_irqsave(&prnt_qinfo->parent_queue_state_lock, flag); + + /* After offload, Send Pkg by Parent SQ */ + if (HIFC_RPORT_OFFLOADED(prnt_qinfo)) { + spin_unlock_irqrestore(&prnt_qinfo->parent_queue_state_lock, + flag); + + ret = hifc_send_els_via_parent(v_hba, v_pkg, prnt_qinfo); + } else { + /* Before offload, Send Pkg by Root SQ */ + spin_unlock_irqrestore(&prnt_qinfo->parent_queue_state_lock, + flag); + + ret = hifc_send_els_via_root(v_hba, v_pkg); + } + + return ret; +} + +unsigned int hifc_rq_rcv_els_rsp_sts( + struct hifc_hba_s *v_hba, + struct hifc_root_rq_complet_info_s *v_cs_info) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int rx_id = (~0); + struct unf_frame_pkg_s pkg = { 0 }; + + rx_id = (unsigned int)v_cs_info->exch_id - v_hba->exit_base; + pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME] = v_cs_info->magic_num; + + ret = hifc_rcv_els_rsp_sts(v_hba, &pkg, rx_id); + HIFC_IO_STAT(v_hba, HIFCOE_TASK_T_ELS_RSP_STS); + + return ret; +} + +static unsigned int hifc_recv_els_rsp_payload(struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int ox_id, + unsigned char *v_els_pld_buf, + unsigned int pld_len) +{ + unsigned int ret = UNF_RETURN_ERROR; + + v_pkg->type = UNF_PKG_ELS_REQ_DONE; + v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = ox_id; + + /* Payload Buffer in ROOT SQ Buffer */ + v_pkg->unf_cmnd_pload_bl.buffer_ptr = v_els_pld_buf; + v_pkg->unf_cmnd_pload_bl.length = pld_len; + v_pkg->byte_orders |= HIFC_BIT_2; + + /* Mark as a non-last block */ + v_pkg->last_pkg_flag = UNF_PKG_NOT_LAST_RESPONSE; + + UNF_LOWLEVEL_RECEIVE_ELS_PKG(ret, v_hba->lport, v_pkg); + + return ret; +} + +static unsigned int hifc_rq_rcv_els_frame(struct hifc_hba_s *v_hba, + unsigned char *v_frame, + unsigned int frame_len, + unsigned short pkg_flag, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int ox_id = INVALID_VALUE32; + unsigned int pld_len = 0; + unsigned char *plg_buf = NULL; + unsigned long flags = 0; + + plg_buf = v_frame; + pld_len = frame_len; + + v_pkg->status = UNF_IO_SUCCESS; + + if (UNF_GET_FC_HEADER_RCTL(&v_pkg->frame_head) == + HIFC_FC_RCTL_ELS_RSP) { + ox_id = v_pkg->frame_head.oxid_rxid >> 16; + + if (!(HIFC_XID_IS_VALID(ox_id, (unsigned int)v_hba->exit_base, + (unsigned int)v_hba->exit_count))) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, "[err]Port(0x%x) ExchId(0x%x) isn't in 0x%x~0x%x", + v_hba->port_cfg.port_id, ox_id, + v_hba->exit_base, + v_hba->exit_base + v_hba->exit_count - 1); + + goto rq_recv_error_els_frame; + } + + ox_id -= v_hba->exit_base; + + ret = hifc_recv_els_rsp_payload(v_hba, v_pkg, ox_id, plg_buf, + pld_len); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]Port(0x%x) receive ESL RSP payload error, OXID(0x%x) RXID(0x%x) PldLen(0x%x)", + v_hba->port_cfg.port_id, UNF_GET_OXID(v_pkg), + UNF_GET_RXID(v_pkg), pld_len); + + HIFC_ERR_IO_STAT(v_hba, HIFCOE_TASK_T_RCV_ELS_RSP); + } + + if (HIFC_CHECK_IF_LAST_PKG(pkg_flag)) { + ret = hifc_rcv_els_rsp(v_hba, v_pkg, ox_id); + + HIFC_IO_STAT(v_hba, HIFCOE_TASK_T_RCV_ELS_RSP); + } + } else if (UNF_GET_FC_HEADER_RCTL(&v_pkg->frame_head) == + HIFC_FC_RCTL_ELS_REQ) { + HIFC_IO_STAT(v_hba, HIFCOE_TASK_T_RCV_ELS_CMD); + + if (HIFC_CHECK_IF_FIRST_PKG(pkg_flag)) + v_pkg->xchg_contex = NULL; + + v_pkg->last_pkg_flag = (HIFC_CHECK_IF_LAST_PKG(pkg_flag)) ? + UNF_PKG_LAST_REQUEST : UNF_PKG_NOT_LAST_REQUEST; + + ret = hifc_rcv_els_cmnd(v_hba, v_pkg, plg_buf, pld_len, + HIFC_CHECK_IF_FIRST_PKG(pkg_flag)); + + spin_lock_irqsave(&v_hba->delay_info.srq_lock, flags); + if (v_hba->delay_info.srq_delay_flag) { + v_hba->delay_info.srq_delay_flag = 0; + + if (!cancel_delayed_work(&v_hba->delay_info.del_work)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) rcvd plogi from srq process delay timer maybe timeout", + v_hba->port_cfg.port_id); + } + spin_unlock_irqrestore(&v_hba->delay_info.srq_lock, + flags); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, + UNF_ERR, + "[info]Port(0x%x) received els from root rq and send delay plogi to CM", + v_hba->port_cfg.port_id); + + hifc_rcv_els_cmnd( + v_hba, &v_hba->delay_info.pkg, + v_hba->delay_info.pkg.unf_cmnd_pload_bl.buffer_ptr, + 0, UNF_FALSE); + } else { + spin_unlock_irqrestore(&v_hba->delay_info.srq_lock, + flags); + } + + } else { + goto rq_recv_error_els_frame; + } + + return ret; + +rq_recv_error_els_frame: + return HIFC_RQ_ERROR_FRAME; +} + +static unsigned int hifc_rq_rcv_bls_frame(struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned int ret = RETURN_OK; + unsigned int ox_id = INVALID_VALUE32; + + v_pkg->status = UNF_IO_SUCCESS; + + if ((UNF_GET_FC_HEADER_RCTL(&v_pkg->frame_head) == HIFC_RCTL_BLS_ACC) || + (UNF_GET_FC_HEADER_RCTL(&v_pkg->frame_head) == HIFC_RCTL_BLS_RJT)) { + /* INI Mode */ + ox_id = UNF_GET_FC_HEADER_OXID(&v_pkg->frame_head); + if ((ox_id < (unsigned int)v_hba->exit_base) || + (ox_id >= (unsigned int)(v_hba->exit_base + + v_hba->exit_count))) { + goto rq_recv_error_bls_frame; + } + ox_id -= v_hba->exit_base; + + ret = hifc_rcv_bls_rsp(v_hba, v_pkg, ox_id); + HIFC_IO_STAT(v_hba, HIFCOE_TASK_T_RCV_ABTS_RSP); + } else { + goto rq_recv_error_bls_frame; + } + + return ret; + +rq_recv_error_bls_frame: + return HIFC_RQ_ERROR_FRAME; +} + +static unsigned int hifc_rq_rcv_service_frame(struct hifc_hba_s *v_hba, + unsigned char *v_frame, + unsigned int frame_len, + unsigned short pkg_flag, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned char fc_frame_type = 0; + + fc_frame_type = UNF_GET_FC_HEADER_TYPE(&v_pkg->frame_head); + + if (fc_frame_type == HIFC_FC_TYPE_ELS) { + v_hba->delay_info.root_rq_rcvd_flag = 1; + ret = hifc_rq_rcv_els_frame(v_hba, v_frame, frame_len, + pkg_flag, v_pkg); + } else if (fc_frame_type == HIFC_FC_TYPE_BLS) { + ret = hifc_rq_rcv_bls_frame(v_hba, v_pkg); + } else { + ret = HIFC_RQ_ERROR_FRAME; + } + + if (ret == HIFC_RQ_ERROR_FRAME) { + /* Error statistics are collected when an invalid frame + * is received + */ + HIFC_IO_STAT(v_hba, HIFCOE_TASK_T_BUTT); + + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[info]Port(0x%x) Receive an unsupported frame, Rctl(0x%x), Type(0x%x), Fctl(0x%x), Sid_Did(0x%x_0x%x),OxId_RxId(0x%x_0x%x), FrameLen(0x%x), drop it", + v_hba->port_cfg.port_id, + UNF_GET_FC_HEADER_RCTL(&v_pkg->frame_head), + UNF_GET_FC_HEADER_TYPE(&v_pkg->frame_head), + UNF_GET_FC_HEADER_FCTL(&v_pkg->frame_head), + UNF_GET_FC_HEADER_SID(&v_pkg->frame_head), + UNF_GET_FC_HEADER_DID(&v_pkg->frame_head), + UNF_GET_FC_HEADER_OXID(&v_pkg->frame_head), + UNF_GET_FC_HEADER_RXID(&v_pkg->frame_head), + frame_len); + } + + return ret; +} + +unsigned int hifc_rcv_service_frame_from_rq(struct hifc_hba_s *v_hba, + struct hifc_root_rq_info_s + *v_rq_info, + struct hifc_root_rq_complet_info_s + *v_complet_info, + unsigned short v_rcv_buf_num) +{ + unsigned short remain_len = 0; + unsigned short rcv_len = 0; + unsigned short pkg_flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short pkt_len = 0; + void *root_rq_rcv_buf = NULL; + unsigned short ci = 0; + unsigned int loop = 0; + struct unf_frame_pkg_s pkg = { 0 }; + struct unf_fchead_s *els_frame = NULL; + unsigned char *pld_buf = NULL; + unsigned int pld_len = 0; + + ci = v_rq_info->ci; + pkt_len = v_complet_info->buf_length; + memset(&pkg, 0, sizeof(pkg)); + + for (loop = 0; loop < v_rcv_buf_num; loop++) { + /* Obtain rcv buffer */ + root_rq_rcv_buf = + (void *)((unsigned long long)v_rq_info->rq_rcv_buff + + HIFC_ROOT_RQ_RECV_BUFF_SIZE * ci); + + /* Calculate the frame data address and length */ + els_frame = (struct unf_fchead_s *)root_rq_rcv_buf; + rcv_len = HIFC_ROOT_RQ_RECV_BUFF_SIZE; + pkg_flag = 0; + + if (loop == (v_rcv_buf_num - 1)) { + pkg_flag |= HIFC_LAST_PKG_FLAG; + remain_len = pkt_len % HIFC_ROOT_RQ_RECV_BUFF_SIZE; + rcv_len = (remain_len > 0) ? (remain_len) : + HIFC_ROOT_RQ_RECV_BUFF_SIZE; + } + + /* Calculate the frame data address and length */ + if (loop == 0) { + pkg_flag |= HIFC_FIRST_PKG_FLAG; + + memcpy(&pkg.frame_head, els_frame, + sizeof(pkg.frame_head)); + hifc_big_to_cpu32(&pkg.frame_head, + sizeof(pkg.frame_head)); + pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME] = + v_complet_info->magic_num; + + pld_buf = (unsigned char *)(els_frame + 1); + pld_len = rcv_len - sizeof(pkg.frame_head); + } else { + pld_buf = (unsigned char *)els_frame; + pld_len = rcv_len; + } + + /* Processing the rqe sent by the FC ucode */ + ret = hifc_rq_rcv_service_frame(v_hba, pld_buf, pld_len, + pkg_flag, &pkg); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, + UNF_INFO, + "[err]Up layer Process RQE frame or status abnormal(0x%x)", + ret); + + return UNF_RETURN_ERROR; + } + + ci = ((ci + 1) < v_rq_info->q_depth) ? (ci + 1) : 0; + } + + return RETURN_OK; +} + +static unsigned int hifc_rcv_gs_rsp_payload(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int ox_id, + unsigned char *v_els_pld_buf, + unsigned int pld_len) +{ + unsigned int ret = UNF_RETURN_ERROR; + + v_pkg->type = UNF_PKG_GS_REQ_DONE; + v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = ox_id; + + /* Convert to small endian */ + hifc_big_to_cpu32(v_els_pld_buf, pld_len); + + /* Payload Buffer in ROOT SQ Buffer */ + v_pkg->unf_cmnd_pload_bl.buffer_ptr = v_els_pld_buf; + v_pkg->unf_cmnd_pload_bl.length = pld_len; + + /* Mark as a non-last block */ + v_pkg->last_pkg_flag = UNF_PKG_NOT_LAST_RESPONSE; + + UNF_LOWLEVEL_RECEIVE_GS_PKG(ret, v_hba->lport, v_pkg); + + return ret; +} + +static unsigned int hifc_scq_rcv_abts_rsp(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + /* Default path, which is sent from SCQ to the driver */ + unsigned char status = 0; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int ox_id = INVALID_VALUE32; + struct unf_frame_pkg_s pkg = { 0 }; + struct hifcoe_scqe_rcv_abts_rsp_s *abts_rsp = NULL; + + abts_rsp = &v_scqe->rcv_abts_rsp; + pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME] = abts_rsp->magic_num; + + ox_id = (unsigned int)(abts_rsp->wd0.ox_id); + + if (unlikely((ox_id < (unsigned int)v_hba->exit_base) || + (ox_id >= + (unsigned int)(v_hba->exit_base + v_hba->exit_count)))) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) has bad OX_ID(0x%x) for bls_rsp", + v_hba->port_cfg.port_id, ox_id); + + return UNF_RETURN_ERROR; + } + + ox_id -= v_hba->exit_base; + + if (unlikely(HIFC_SCQE_HAS_ERRCODE(v_scqe))) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) BLS response has error code(0x%x) tag(0x%x)", + v_hba->port_cfg.port_id, + HIFC_GET_SCQE_STATUS(v_scqe), + (unsigned int)(abts_rsp->wd0.ox_id)); + + status = UNF_IO_FAILED; + } else { + pkg.frame_head.rctl_did = abts_rsp->wd3.did; + pkg.frame_head.csctl_sid = abts_rsp->wd4.sid; + pkg.frame_head.oxid_rxid = (unsigned int)(abts_rsp->wd0.rx_id) + | ox_id << 16; + + /* BLS_ACC/BLS_RJT: IO_succeed */ + if (abts_rsp->wd2.fh_rctrl == HIFC_RCTL_BLS_ACC) { + status = UNF_IO_SUCCESS; + } else if (abts_rsp->wd2.fh_rctrl == HIFC_RCTL_BLS_RJT) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) ABTS RJT: %08x-%08x-%08x", + v_hba->port_cfg.port_id, + abts_rsp->payload[0], + abts_rsp->payload[1], abts_rsp->payload[2]); + + status = UNF_IO_SUCCESS; + } else { + /* 3. BA_RSP type is err: IO_failed */ + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) BLS response RCTL is error", + v_hba->port_cfg.port_id); + + HIFC_ERR_IO_STAT(v_hba, HIFC_SCQE_ABTS_RSP); + + status = UNF_IO_FAILED; + } + } + + /* Set PKG/exchange status & Process BLS_RSP */ + pkg.status = status; + ret = hifc_rcv_bls_rsp(v_hba, &pkg, ox_id); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) recv ABTS rsp OX_ID(0x%x) RX_ID(0x%x) SID(0x%x) DID(0x%x) %s", + v_hba->port_cfg.port_id, + ox_id, + abts_rsp->wd0.rx_id, + abts_rsp->wd4.sid, + abts_rsp->wd3.did, + (ret == RETURN_OK) ? "OK" : "ERROR"); + + return ret; +} + +unsigned int hifc_rq_rcv_srv_err(struct hifc_hba_s *v_hba, + struct hifc_root_rq_complet_info_s *v_cs_info) +{ + UNF_REFERNCE_VAR(v_hba); + UNF_REFERNCE_VAR(v_cs_info); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]hifc_rq_rcv_srv_err not implemented yet"); + + if (!v_hba) + return UNF_RETURN_ERROR; + + if (!v_cs_info) + return UNF_RETURN_ERROR; + + return UNF_RETURN_ERROR; +} + +unsigned int hifc_rcv_els_cmnd(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned char *v_pld, + unsigned int pld_len, + int first_frame) +{ + unsigned int ret = UNF_RETURN_ERROR; + + /* Convert Payload to small endian */ + hifc_big_to_cpu32(v_pld, pld_len); + + v_pkg->type = UNF_PKG_ELS_REQ; + + v_pkg->unf_cmnd_pload_bl.buffer_ptr = v_pld; + + /* Payload length */ + v_pkg->unf_cmnd_pload_bl.length = pld_len; + + /* Obtain the Cmnd type from the Paylaod. The Cmnd is in small endian */ + if (first_frame == UNF_TRUE) { + v_pkg->cmnd = UNF_GET_FC_PAYLOAD_ELS_CMND( + v_pkg->unf_cmnd_pload_bl.buffer_ptr); + } + + /* Errors have been processed in HIFC_RecvElsError */ + v_pkg->status = UNF_IO_SUCCESS; + + /* Send PKG to the CM layer */ + UNF_LOWLEVEL_RECEIVE_ELS_PKG(ret, v_hba->lport, v_pkg); + + return ret; +} + +unsigned int hifc_rcv_els_rsp(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int ox_id) +{ + unsigned int ret = UNF_RETURN_ERROR; + + /* Receive CmndReqSts */ + v_pkg->type = UNF_PKG_ELS_REQ_DONE; + v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = ox_id; + v_pkg->byte_orders |= HIFC_BIT_2; + + /* Mark the last block */ + v_pkg->last_pkg_flag = UNF_PKG_LAST_RESPONSE; + + /* Send PKG to the CM layer */ + UNF_LOWLEVEL_RECEIVE_ELS_PKG(ret, v_hba->lport, v_pkg); + + return ret; +} + +unsigned int hifc_rcv_els_rsp_sts(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int rx_id) +{ + unsigned int ret = UNF_RETURN_ERROR; + + v_pkg->type = UNF_PKG_ELS_REPLY_DONE; + v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = rx_id; + + UNF_LOWLEVEL_SEND_ELS_DONE(ret, v_hba->lport, v_pkg); + + return ret; +} + +unsigned int hifc_rcv_gs_rsp(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int ox_id) +{ + unsigned int ret = UNF_RETURN_ERROR; + + /* Receive CmndReqSts */ + v_pkg->type = UNF_PKG_GS_REQ_DONE; + v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = ox_id; + + /* Mark the last block */ + v_pkg->last_pkg_flag = UNF_PKG_LAST_RESPONSE; + + /* Send PKG to the CM layer */ + UNF_LOWLEVEL_RECEIVE_GS_PKG(ret, v_hba->lport, v_pkg); + + return ret; +} + +unsigned int hifc_rcv_bls_rsp(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int ox_id) +{ + /* + * 1. SCQ (normal) + * 2. from Root RQ (parent no existence) + ** + * single frame, single sequence + */ + unsigned int ret = UNF_RETURN_ERROR; + + v_pkg->type = UNF_PKG_BLS_REQ_DONE; + v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = ox_id; + v_pkg->last_pkg_flag = UNF_PKG_LAST_RESPONSE; + + UNF_LOWLEVEL_RECEIVE_BLS_PKG(ret, v_hba->lport, v_pkg); + + return ret; +} + +unsigned int hifc_rcv_tmf_marker_sts(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int ox_id) +{ + unsigned int ret = UNF_RETURN_ERROR; + + v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = ox_id; + + /* Send PKG info to COM */ + UNF_LOWLEVEL_RECEIVE_MARKER_STS(ret, v_hba->lport, v_pkg); + + return ret; +} + +unsigned int hifc_rcv_abts_marker_sts(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int ox_id) +{ + unsigned int ret = UNF_RETURN_ERROR; + + v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = ox_id; + + UNF_LOWLEVEL_RECEIVE_ABTS_MARKER_STS(ret, v_hba->lport, v_pkg); + + return ret; +} + +void hifc_scqe_error_pre_process(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + /* Currently, only printing and statistics collection are performed */ + HIFC_ERR_IO_STAT(v_hba, HIFC_GET_SCQE_TYPE(v_scqe)); + HIFC_SCQ_ERR_TYPE_STAT(v_hba, HIFC_GET_SCQE_STATUS(v_scqe)); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_ABNORMAL, UNF_WARN, + "[warn]Port(0x%x)-Task_type(%u) SCQE contain error code(%u), additional info(0x%x)", + v_hba->port_cfg.port_id, + v_scqe->common.ch.wd0.task_type, + v_scqe->common.ch.wd0.err_code, + v_scqe->common.conn_id); +} + +unsigned int hifc_rcv_scqe_entry_from_scq(void *v_hba, void *v_scqe, + unsigned int scq_idx) +{ + unsigned int ret = UNF_RETURN_ERROR; + int do_reclaim = UNF_FALSE; + unsigned int index = 0; + unsigned int total_index = 0; + struct hifc_hba_s *hba = NULL; + union hifcoe_scqe_u *scqe = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_scqe, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, HIFC_TOTAL_SCQ_NUM > scq_idx, + return UNF_RETURN_ERROR); + + scqe = (union hifcoe_scqe_u *)v_scqe; + hba = (struct hifc_hba_s *)v_hba; + + HIFC_IO_STAT(hba, HIFC_GET_SCQE_TYPE(scqe)); + + /* 1. error code cheking */ + if (unlikely(HIFC_SCQE_HAS_ERRCODE(scqe))) { + /* So far, just print & counter */ + hifc_scqe_error_pre_process(hba, scqe); + } + + /* 2. Process SCQE by corresponding processer */ + total_index = sizeof(scqe_handler_table) / + sizeof(struct unf_scqe_handler_table_s); + while (index < total_index) { + if (HIFC_GET_SCQE_TYPE(scqe) == + scqe_handler_table[index].scqe_type) { + ret = scqe_handler_table[index].pfn_scqe_handle_fun( + hba, scqe); + do_reclaim = scqe_handler_table[index].reclaim_sq_wpg; + + break; + } + + index++; + } + + /* 3. SCQE type check */ + if (unlikely(index == total_index)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[warn]Unknown SCQE type %d", + HIFC_GET_SCQE_TYPE(scqe)); + + UNF_PRINT_SFS_LIMIT(UNF_ERR, hba->port_cfg.port_id, scqe, + sizeof(union hifcoe_scqe_u)); + } + + /* 4. If SCQE is for SQ-WQE then recovery Link List SQ free page */ + if (do_reclaim == UNF_TRUE) { + if (HIFC_SCQE_CONN_ID_VALID(scqe)) { + ret = hifc_reclaim_sq_wqe_page(v_hba, scqe); + } else { + /* NOTE: for buffer clear, the SCQE conn_id is 0xFFFF, + * count with HBA + */ + HIFC_HBA_STAT( + (struct hifc_hba_s *)v_hba, + HIFC_STAT_SQ_IO_BUFFER_CLEARED); + } + } + + return ret; +} + +static void *hifc_get_els_buf_by_userid(struct hifc_hba_s *v_hba, + unsigned short user_id) +{ + struct hifc_srq_buff_entry_s *buf_entry = NULL; + struct hifc_srq_info_s *srq_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return NULL); + + srq_info = &v_hba->els_srq_info; + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + user_id < srq_info->valid_wqe_num, return NULL); + + buf_entry = &srq_info->els_buff_entry_head[user_id]; + + return buf_entry->buff_addr; +} + +static unsigned int hifc_check_srq_buf_valid(struct hifc_hba_s *v_hba, + unsigned int *v_buf_id, + unsigned int v_buf_num) +{ + unsigned int index = 0; + unsigned int buf_id = 0; + void *srq_buf = NULL; + + for (index = 0; index < v_buf_num; index++) { + buf_id = v_buf_id[index]; + + if (buf_id < v_hba->els_srq_info.valid_wqe_num) { + srq_buf = hifc_get_els_buf_by_userid( + v_hba, + (unsigned short)buf_id); + } else { + srq_buf = NULL; + } + + if (!srq_buf) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) get srq buffer user id(0x%x) is null", + v_hba->port_cfg.port_id, buf_id); + + return UNF_RETURN_ERROR; + } + } + + return RETURN_OK; +} + +static void hifc_reclaim_srq_buff(struct hifc_hba_s *v_hba, + unsigned int *v_buf_id, + unsigned int v_buf_num) +{ + unsigned int index = 0; + unsigned int buf_id = 0; + void *srq_buf = NULL; + + for (index = 0; index < v_buf_num; index++) { + buf_id = v_buf_id[index]; + if (buf_id < v_hba->els_srq_info.valid_wqe_num) { + srq_buf = hifc_get_els_buf_by_userid( + v_hba, + (unsigned short)buf_id); + } else { + srq_buf = NULL; + } + + /* If the value of buffer is NULL, it indicates that the value + * of buffer is invalid. In this case, exit directly. + */ + if (!srq_buf) + break; + + hifc_post_els_srq_wqe(&v_hba->els_srq_info, + (unsigned short)buf_id); + } +} + +static unsigned int hifc_check_els_gs_valid(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe, + struct unf_frame_pkg_s *v_pkg, + unsigned int *v_buf_id, + unsigned int buf_num, + unsigned int frame_len) +{ + unsigned int ox_id = INVALID_VALUE32; + + ox_id = v_pkg->frame_head.oxid_rxid >> 16; + + /* The ELS CMD returns an error code and discards it directly */ + if ((sizeof(struct hifc_fc_frame_header) > frame_len) || + (HIFC_SCQE_HAS_ERRCODE(v_scqe)) || + (buf_num > HIFC_ELS_SRQ_BUF_NUM)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[event]Port(0x%x) get scqe type(0x%x) payload len(0x%x),scq status(0x%x),user id num(0x%x) abnormal", + v_hba->port_cfg.port_id, + HIFC_GET_SCQE_TYPE(v_scqe), + frame_len, + HIFC_GET_SCQE_STATUS(v_scqe), + buf_num); + + /* ELS RSP Special Processing */ + if (HIFC_GET_SCQE_TYPE(v_scqe) == HIFC_SCQE_ELS_RSP) { + if (HIFC_SCQE_ERR_TO_CM(v_scqe)) { + v_pkg->status = UNF_IO_FAILED; + (void)hifc_rcv_els_rsp(v_hba, v_pkg, ox_id); + } else { + HIFC_HBA_STAT(v_hba, + HIFC_STAT_ELS_RSP_EXCH_REUSE); + } + } + + /* GS RSP Special Processing */ + if (HIFC_GET_SCQE_TYPE(v_scqe) == HIFC_SCQE_GS_RSP) { + if (HIFC_SCQE_ERR_TO_CM(v_scqe)) { + v_pkg->status = UNF_IO_FAILED; + (void)hifc_rcv_gs_rsp(v_hba, v_pkg, ox_id); + } else { + HIFC_HBA_STAT(v_hba, + HIFC_STAT_GS_RSP_EXCH_REUSE); + } + } + + /* Reclaim srq */ + if (buf_num <= HIFC_ELS_SRQ_BUF_NUM) + hifc_reclaim_srq_buff(v_hba, v_buf_id, buf_num); + + return UNF_RETURN_ERROR; + } + + /* ELS CMD Check the validity of the buffer sent by the ucode */ + if (HIFC_GET_SCQE_TYPE(v_scqe) == HIFC_SCQE_ELS_CMND) { + if (hifc_check_srq_buf_valid(v_hba, v_buf_id, buf_num) != + RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) get els cmnd scqe user id num(0x%x) abnormal, as some srq buff is null", + v_hba->port_cfg.port_id, buf_num); + + hifc_reclaim_srq_buff(v_hba, v_buf_id, buf_num); + + return UNF_RETURN_ERROR; + } + } + + return RETURN_OK; +} + +static unsigned int hifc_scq_rcv_els_cmd(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + unsigned int ret = RETURN_OK; + unsigned int pld_len = 0; + unsigned int hdr_len = 0; + unsigned int frame_len = 0; + unsigned int rcv_data_len = 0; + unsigned int max_buf_num = 0; + unsigned short buf_id = 0; + unsigned int index = 0; + unsigned char *pld = NULL; + struct unf_frame_pkg_s pkg = { 0 }; + struct hifcoe_scqe_rcv_els_cmd_s *els_cmd = NULL; + struct hifc_fc_frame_header *els_frame = NULL; + struct hifc_fc_frame_header local_fc_frame = { 0 }; + void *els_buf = NULL; + int first_frame = UNF_FALSE; + unsigned long flags = 0; + unsigned char srq_delay_flag = 0; + + els_cmd = &v_scqe->rcv_els_cmd; + frame_len = els_cmd->wd3.data_len; + max_buf_num = els_cmd->wd3.user_id_num; + + pkg.xchg_contex = NULL; + pkg.status = UNF_IO_SUCCESS; + + /* Check the validity of error codes and buff. If an exception occurs, + * discard the error code + */ + ret = hifc_check_els_gs_valid(v_hba, v_scqe, &pkg, els_cmd->user_id, + max_buf_num, frame_len); + if (ret != RETURN_OK) + return RETURN_OK; + + /* Send data to COM cyclically */ + for (index = 0; index < max_buf_num; index++) { + /* Exception record, which is not processed currently */ + if (rcv_data_len >= frame_len) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) get els cmd date len(0x%x) is bigger than fream len(0x%x)", + v_hba->port_cfg.port_id, + rcv_data_len, frame_len); + } + + buf_id = (unsigned short)els_cmd->user_id[index]; + els_buf = hifc_get_els_buf_by_userid(v_hba, buf_id); + + /* Obtain playload address */ + pld = (unsigned char *)(els_buf); + hdr_len = 0; + first_frame = UNF_FALSE; + if (index == 0) { + els_frame = (struct hifc_fc_frame_header *)els_buf; + pld = (unsigned char *)(els_frame + 1); + + hdr_len = sizeof(struct hifc_fc_frame_header); + first_frame = UNF_TRUE; + + memcpy(&local_fc_frame, els_frame, + sizeof(struct hifc_fc_frame_header)); + hifc_big_to_cpu32(&local_fc_frame, + sizeof(struct hifc_fc_frame_header)); + memcpy(&pkg.frame_head, &local_fc_frame, + sizeof(pkg.frame_head)); + } + + /* Calculate the playload length */ + pkg.last_pkg_flag = 0; + pld_len = HIFC_SRQ_ELS_SGE_LEN; + + if ((rcv_data_len + HIFC_SRQ_ELS_SGE_LEN) >= frame_len) { + pkg.last_pkg_flag = 1; + pld_len = frame_len - rcv_data_len; + + if (unlikely( + (v_hba->active_topo == UNF_TOP_P2P_MASK) && + (v_hba->delay_info.root_rq_rcvd_flag == 0))) { + /* Only data is pushed for the first time, but + * the last packet flag is not set + */ + pkg.last_pkg_flag = 0; + srq_delay_flag = 1; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) revd els from srq, and need delay processed, topo(0x%x)", + v_hba->port_cfg.port_id, + v_hba->active_topo); + } + } + + /* Push data to COM */ + if (ret == RETURN_OK) { + ret = hifc_rcv_els_cmnd(v_hba, &pkg, pld, + (pld_len - hdr_len), + first_frame); + + /* If the plogi arrives before the flogi, the pkg is + * saved, and the last packet is pushed + * when the root rq contains content. + */ + if (unlikely(srq_delay_flag == 1)) { + spin_lock_irqsave(&v_hba->delay_info.srq_lock, + flags); + memcpy(&v_hba->delay_info.pkg, &pkg, + sizeof(pkg)); + v_hba->delay_info.srq_delay_flag = 1; + v_hba->delay_info.pkg.last_pkg_flag = 1; + + /* Add a 20-ms timer to prevent the root rq + * from processing data + */ + (void)queue_delayed_work( + v_hba->work_queue, + &v_hba->delay_info.del_work, + (unsigned long) + msecs_to_jiffies((unsigned int) + HIFC_SRQ_PROCESS_DELAY_MS)); + + spin_unlock_irqrestore( + &v_hba->delay_info.srq_lock, flags); + } + } + + /* Reclaim srq buffer */ + hifc_post_els_srq_wqe(&v_hba->els_srq_info, buf_id); + + rcv_data_len += pld_len; + } + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) recv ELS Type(0x%x) Cmnd(0x%x) OXID(0x%x) RXID(0x%x) SID(0x%x) DID(0x%x) %u", + v_hba->port_cfg.port_id, + pkg.type, + pkg.cmnd, + els_cmd->wd2.ox_id, + els_cmd->wd2.rx_id, + els_cmd->wd1.sid, + els_cmd->wd0.did, + ret); + + return ret; +} + +static unsigned int hifc_get_els_gs_pld_len(struct hifc_hba_s *v_hba, + unsigned int v_rcv_data_len, + unsigned int v_frame_len) +{ + unsigned int pld_len; + + /* Exception record, which is not processed currently */ + if (v_rcv_data_len >= v_frame_len) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) get els rsp date len(0x%x) is bigger than fream len(0x%x)", + v_hba->port_cfg.port_id, + v_rcv_data_len, v_frame_len); + } + + pld_len = HIFC_SRQ_ELS_SGE_LEN; + if ((v_rcv_data_len + HIFC_SRQ_ELS_SGE_LEN) >= v_frame_len) + pld_len = v_frame_len - v_rcv_data_len; + + return pld_len; +} + +static unsigned int hifc_scq_rcv_els_rsp(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + unsigned int ret = RETURN_OK; + unsigned int pld_len = 0; + unsigned int hdr_len = 0; + unsigned int frame_len = 0; + unsigned int rcv_data_len = 0; + unsigned int max_buf_num = 0; + unsigned short buf_id = 0; + unsigned int index = 0; + unsigned int ox_id = (~0); + struct unf_frame_pkg_s pkg = { 0 }; + struct hifcoe_scqe_rcv_els_gs_rsp_s *els_rsp; + struct hifc_fc_frame_header *els_frame = NULL; + void *els_buf = NULL; + unsigned char *pld = NULL; + + els_rsp = &v_scqe->rcv_els_gs_rsp; + frame_len = els_rsp->wd2.data_len; + max_buf_num = els_rsp->wd4.user_id_num; + + ox_id = (unsigned int)(els_rsp->wd1.ox_id) - v_hba->exit_base; + pkg.frame_head.oxid_rxid = (unsigned int)(els_rsp->wd1.rx_id) | + ox_id << 16; + pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME] = els_rsp->magic_num; + pkg.frame_head.csctl_sid = els_rsp->wd4.sid; + pkg.frame_head.rctl_did = els_rsp->wd3.did; + pkg.status = UNF_IO_SUCCESS; + + /* Handle the exception first. The ELS RSP returns the error code. + * Only the OXID can submit the error code to the CM layer. + */ + ret = hifc_check_els_gs_valid(v_hba, v_scqe, &pkg, + els_rsp->user_id, max_buf_num, frame_len); + if (ret != RETURN_OK) + return RETURN_OK; + + /* if this is echo rsp */ + if (els_rsp->wd3.echo_rsp == UNF_TRUE) { + /* echo time stamp fill in the Els rsp user_id last 4dword */ + pkg.private[PKG_PRIVATE_ECHO_CMD_RCV_TIME] = + els_rsp->user_id[5]; + pkg.private[PKG_PRIVATE_ECHO_RSP_SND_TIME] = + els_rsp->user_id[6]; + pkg.private[PKG_PRIVATE_ECHO_CMD_SND_TIME] = + els_rsp->user_id[7]; + pkg.private[PKG_PRIVATE_ECHO_ACC_RCV_TIME] = + els_rsp->user_id[8]; + } + + /* Send data to COM cyclically */ + for (index = 0; index < max_buf_num; index++) { + /* Obtain buffer address */ + els_buf = NULL; + buf_id = (unsigned short)els_rsp->user_id[index]; + + els_buf = hifc_get_els_buf_by_userid(v_hba, buf_id); + + /* If the value of buffer is NULL, the buff id is abnormal and + * exits directly + */ + if (unlikely(!els_buf)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) OXID(0x%x) RXID(0x%x) SID(0x%x) DID(0x%x) Index(0x%x) get els rsp buff user id(0x%x) abnormal", + v_hba->port_cfg.port_id, ox_id, + els_rsp->wd1.rx_id, els_rsp->wd4.sid, + els_rsp->wd3.did, index, buf_id); + + if (index == 0) { + pkg.status = UNF_IO_FAILED; + ret = hifc_rcv_els_rsp(v_hba, &pkg, ox_id); + } + + return ret; + } + + hdr_len = 0; + pld = (unsigned char *)(els_buf); + if (index == 0) { + hdr_len = sizeof(struct hifc_fc_frame_header); + + els_frame = (struct hifc_fc_frame_header *)els_buf; + pld = (unsigned char *)(els_frame + 1); + } + + /* Calculate the playload length */ + pld_len = hifc_get_els_gs_pld_len(v_hba, rcv_data_len, + frame_len); + + /* Push data to COM */ + if (ret == RETURN_OK) { + ret = hifc_recv_els_rsp_payload(v_hba, &pkg, ox_id, pld, + (pld_len - hdr_len)); + } + + /* Reclaim srq buffer */ + hifc_post_els_srq_wqe(&v_hba->els_srq_info, buf_id); + + rcv_data_len += pld_len; + } + + if ((els_rsp->wd3.end_rsp) && (ret == RETURN_OK)) + ret = hifc_rcv_els_rsp(v_hba, &pkg, ox_id); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) receive ELS RSP OXID(0x%x) RXID(0x%x) SID(0x%x) DID(0x%x) end_rsp(0x%x) user_num(0x%x)", + v_hba->port_cfg.port_id, + ox_id, + els_rsp->wd1.rx_id, + els_rsp->wd4.sid, + els_rsp->wd3.did, + els_rsp->wd3.end_rsp, + els_rsp->wd4.user_id_num); + + return ret; +} + +static unsigned int hifc_scq_rcv_gs_rsp(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + unsigned int ret = RETURN_OK; + unsigned int pld_len = 0; + unsigned int hdr_len = 0; + unsigned int frame_len = 0; + unsigned int rcv_data_len = 0; + unsigned int max_buf_num = 0; + unsigned short buf_id = 0; + unsigned int index = 0; + unsigned int ox_id = (~0); + struct unf_frame_pkg_s pkg = { 0 }; + struct hifcoe_scqe_rcv_els_gs_rsp_s *gs_rsp = NULL; + struct hifc_fc_frame_header *gs_frame = NULL; + void *gs_buf = NULL; + unsigned char *pld = NULL; + + gs_rsp = &v_scqe->rcv_els_gs_rsp; + frame_len = gs_rsp->wd2.data_len; + max_buf_num = gs_rsp->wd4.user_id_num; + + ox_id = (unsigned int)(gs_rsp->wd1.ox_id) - v_hba->exit_base; + pkg.frame_head.oxid_rxid = (unsigned int)(gs_rsp->wd1.rx_id) | + ox_id << 16; + pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME] = gs_rsp->magic_num; + pkg.frame_head.csctl_sid = gs_rsp->wd4.sid; + pkg.frame_head.rctl_did = gs_rsp->wd3.did; + pkg.status = UNF_IO_SUCCESS; + + if (gs_rsp->wd3.end_rsp) + HIFC_HBA_STAT(v_hba, HIFC_STAT_LAST_GS_SCQE); + + /* Exception handling: The GS RSP returns an error code. Only the OXID + * can submit the error code to the CM layer + */ + ret = hifc_check_els_gs_valid(v_hba, v_scqe, &pkg, gs_rsp->user_id, + max_buf_num, frame_len); + if (ret != RETURN_OK) + return RETURN_OK; + + /* Send data to COM cyclically */ + for (index = 0; index < max_buf_num; index++) { + /* Obtain buffer address */ + gs_buf = NULL; + buf_id = (unsigned short)gs_rsp->user_id[index]; + + gs_buf = hifc_get_els_buf_by_userid(v_hba, buf_id); + + if (unlikely(!gs_buf)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) OXID(0x%x) RXID(0x%x) SID(0x%x) DID(0x%x) Index(0x%x) get gs rsp scqe user id(0x%x) abnormal", + v_hba->port_cfg.port_id, ox_id, + gs_rsp->wd1.rx_id, gs_rsp->wd4.sid, + gs_rsp->wd3.did, index, buf_id); + + if (index == 0) { + pkg.status = UNF_IO_FAILED; + ret = hifc_rcv_gs_rsp(v_hba, &pkg, ox_id); + } + + return ret; + } + + /* Obtain playload address */ + hdr_len = 0; + pld = (unsigned char *)(gs_buf); + if (index == 0) { + hdr_len = sizeof(struct hifc_fc_frame_header); + + gs_frame = (struct hifc_fc_frame_header *)gs_buf; + pld = (unsigned char *)(gs_frame + 1); + } + + /* Calculate the playload length */ + pld_len = hifc_get_els_gs_pld_len(v_hba, rcv_data_len, + frame_len); + + /* Push data to COM */ + if (ret == RETURN_OK) + ret = hifc_rcv_gs_rsp_payload(v_hba, &pkg, ox_id, pld, + (pld_len - hdr_len)); + + /* Reclaim srq buffer */ + hifc_post_els_srq_wqe(&v_hba->els_srq_info, buf_id); + + rcv_data_len += pld_len; + } + + if ((gs_rsp->wd3.end_rsp) && (ret == RETURN_OK)) + ret = hifc_rcv_gs_rsp(v_hba, &pkg, ox_id); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) recv GS RSP OXID(0x%x) RXID(0x%x) SID(0x%x) DID(0x%x) end_rsp(0x%x) user_num(0x%x)", + v_hba->port_cfg.port_id, + ox_id, + gs_rsp->wd1.rx_id, + gs_rsp->wd4.sid, + gs_rsp->wd3.did, + gs_rsp->wd3.end_rsp, + gs_rsp->wd4.user_id_num); + + return ret; +} + +static unsigned int hifc_scq_rcv_els_rsp_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int rx_id = INVALID_VALUE32; + struct unf_frame_pkg_s pkg = { 0 }; + struct hifcoe_scqe_comm_rsp_sts_s *els_rsp_sts = NULL; + + els_rsp_sts = &v_scqe->comm_sts; + rx_id = (unsigned int)els_rsp_sts->wd0.rx_id; + rx_id = rx_id - v_hba->exit_base; + + pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME] = els_rsp_sts->magic_num; + pkg.frame_head.oxid_rxid = rx_id | + (unsigned int)(els_rsp_sts->wd0.ox_id) << 16; + + if (unlikely(HIFC_SCQE_HAS_ERRCODE(v_scqe))) + pkg.status = UNF_IO_FAILED; + else + pkg.status = UNF_IO_SUCCESS; + + ret = hifc_rcv_els_rsp_sts(v_hba, &pkg, rx_id); + + return ret; +} + +static unsigned int hifc_check_rport_is_valid( + const struct hifc_parent_queue_info_s *v_prntq_info, + unsigned int scqe_xid) +{ + if (v_prntq_info->parent_ctx.cqm_parent_ctx_obj) { + if ((v_prntq_info->parent_sq_info.context_id & + HIFC_CQM_XID_MASK) == (scqe_xid & HIFC_CQM_XID_MASK)) + return RETURN_OK; + } + + return UNF_RETURN_ERROR; +} + +static unsigned int hifc_scq_rcv_offload_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + unsigned int rport_valid = UNF_RETURN_ERROR; + unsigned int rport_index = 0; + unsigned int cache_id = 0; + unsigned int local_ctx_id = 0; + unsigned long flag = 0; + struct hifc_parent_queue_info_s *prnt_qinfo = NULL; + struct hifcoe_scqe_sess_sts_s *offload_sts = NULL; + struct hifc_destroy_ctrl_info_s destroy_sqe_info = { 0 }; + + offload_sts = &v_scqe->sess_sts; + rport_index = offload_sts->wd1.conn_id; + cache_id = offload_sts->wd2.cid; + local_ctx_id = offload_sts->wd0.xid_qpn; + + if (rport_index >= UNF_HIFC_MAXRPORT_NUM) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) receive an error offload status: rport index(0x%x) is invalid, cache id(0x%x)", + v_hba->port_cfg.port_id, rport_index, cache_id); + + return UNF_RETURN_ERROR; + } + + prnt_qinfo = &v_hba->parent_queue_mgr->parent_queues[rport_index]; + + rport_valid = hifc_check_rport_is_valid(prnt_qinfo, local_ctx_id); + if (rport_valid != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) receive an error offload status: rport index(0x%x), context id(0x%x) is invalid", + v_hba->port_cfg.port_id, rport_index, local_ctx_id); + + return UNF_RETURN_ERROR; + } + + /* off_load failed */ + if (HIFC_GET_SCQE_STATUS(v_scqe) != HIFC_COMPLETION_STATUS_SUCCESS) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x), rport(0x%x), context id(0x%x), cache id(0x%x), offload failed", + v_hba->port_cfg.port_id, rport_index, + local_ctx_id, cache_id); + + return UNF_RETURN_ERROR; + } + + spin_lock_irqsave(&prnt_qinfo->parent_queue_state_lock, flag); + prnt_qinfo->parent_sq_info.cache_id = cache_id; + prnt_qinfo->offload_state = HIFC_QUEUE_STATE_OFFLOADED; + atomic_set(&prnt_qinfo->parent_sq_info.sq_cashed, UNF_TRUE); + + if (prnt_qinfo->parent_sq_info.destroy_sqe.valid == UNF_TRUE) { + destroy_sqe_info.valid = + prnt_qinfo->parent_sq_info.destroy_sqe.valid; + + destroy_sqe_info.rport_index = + prnt_qinfo->parent_sq_info.destroy_sqe.rport_index; + + destroy_sqe_info.time_out = + prnt_qinfo->parent_sq_info.destroy_sqe.time_out; + + destroy_sqe_info.start_jiff = + prnt_qinfo->parent_sq_info.destroy_sqe.start_jiff; + + destroy_sqe_info.rport_info.nport_id = + prnt_qinfo->parent_sq_info.destroy_sqe.rport_info.nport_id; + destroy_sqe_info.rport_info.rport_index = + prnt_qinfo->parent_sq_info.destroy_sqe.rport_info.rport_index; + destroy_sqe_info.rport_info.port_name = + prnt_qinfo->parent_sq_info.destroy_sqe.rport_info.port_name; + + prnt_qinfo->parent_sq_info.destroy_sqe.valid = UNF_FALSE; + } + + spin_unlock_irqrestore(&prnt_qinfo->parent_queue_state_lock, flag); + + hifc_pop_destroy_parent_queue_sqe((void *)v_hba, &destroy_sqe_info); + + return RETURN_OK; +} + +unsigned int hifc_get_gs_req_and_rsp_pld_len(unsigned short cmd_code, + unsigned int *v_gs_pld_len, + unsigned int *v_gs_rsp_pld_len) +{ + UNF_CHECK_VALID(0x4917, UNF_TRUE, v_gs_pld_len, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x4917, UNF_TRUE, v_gs_rsp_pld_len, + return UNF_RETURN_ERROR); + + switch (cmd_code) { + case NS_GPN_ID: + *v_gs_pld_len = UNF_GPNID_PAYLOAD_LEN; + *v_gs_rsp_pld_len = UNF_GPNID_RSP_PAYLOAD_LEN; + break; + + case NS_GNN_ID: + *v_gs_pld_len = UNF_GNNID_PAYLOAD_LEN; + *v_gs_rsp_pld_len = UNF_GNNID_RSP_PAYLOAD_LEN; + break; + + case NS_GFF_ID: + *v_gs_pld_len = UNF_GFFID_PAYLOAD_LEN; + *v_gs_rsp_pld_len = UNF_GFFID_RSP_PAYLOAD_LEN; + break; + + case NS_GID_FT: + case NS_GID_PT: + *v_gs_pld_len = UNF_GID_PAYLOAD_LEN; + *v_gs_rsp_pld_len = UNF_GID_ACC_PAYLOAD_LEN; + break; + + case NS_RFT_ID: + *v_gs_pld_len = UNF_RFTID_PAYLOAD_LEN; + *v_gs_rsp_pld_len = UNF_RFTID_RSP_PAYLOAD_LEN; + break; + + case NS_RFF_ID: + *v_gs_pld_len = UNF_RFFID_PAYLOAD_LEN; + *v_gs_rsp_pld_len = UNF_RFFID_RSP_PAYLOAD_LEN; + break; + case NS_GA_NXT: + *v_gs_pld_len = UNF_GID_PAYLOAD_LEN; + *v_gs_rsp_pld_len = UNF_GID_ACC_PAYLOAD_LEN; + break; + + case NS_GIEL: + *v_gs_pld_len = UNF_RFTID_RSP_PAYLOAD_LEN; + *v_gs_rsp_pld_len = UNF_GID_ACC_PAYLOAD_LEN; + break; + + default: + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Unknown GS commond type(0x%x)", cmd_code); + return UNF_RETURN_ERROR; + } + + return RETURN_OK; +} + +static unsigned int hifc_send_gs_via_parent(void *v_hba, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned short ox_id, rx_id; + unsigned short cmd_code = UNF_ZERO; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int gs_pld_len = UNF_ZERO; + unsigned int gs_rsp_pld_len = UNF_ZERO; + void *gs_pld_addr = NULL; + struct hifc_hba_s *hba = NULL; + struct hifc_parent_sq_info_s *sq_info; + struct hifcoe_sqe_s sqe; + unsigned long long fram_phy_addr; + + hba = (struct hifc_hba_s *)v_hba; + + memset(&sqe, 0, sizeof(struct hifcoe_sqe_s)); + + sq_info = hifc_find_parent_sq_by_pkg(hba, v_pkg); + if (!sq_info) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Get NULL parent SQ information"); + + return ret; + } + + cmd_code = HIFC_GET_GS_CMND_CODE(v_pkg->cmnd); + + ret = hifc_get_gs_req_and_rsp_pld_len(cmd_code, &gs_pld_len, + &gs_rsp_pld_len); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) send GS SID(0x%x) DID(0x%x), get error GS request and response payload length", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return ret; + } + + gs_pld_addr = (void *)(HIFC_GET_CMND_PAYLOAD_ADDR(v_pkg)); + fram_phy_addr = v_pkg->unf_cmnd_pload_bl.buf_dma_addr + + sizeof(struct unf_fchead_s); + + if (cmd_code == NS_GID_FT || cmd_code == NS_GID_PT) + gs_pld_addr = (void *)(UNF_GET_GID_PAYLOAD(v_pkg)); + + /* Assemble the SQE Control Section part */ + hifc_build_service_wqe_ctrl_section( + &sqe.ctrl_sl, + HIFC_BYTES_TO_QW_NUM(HIFC_SQE_TS_SIZE), + HIFC_BYTES_TO_QW_NUM(sizeof(struct hifcoe_variable_sge_s))); + /* Assemble the SQE Task Section part */ + ox_id = UNF_GET_OXID(v_pkg) + hba->exit_base; + rx_id = UNF_GET_RXID(v_pkg); + hifc_build_service_wqe_ts_common(&sqe.ts_sl, + sq_info->rport_index, ox_id, + rx_id, HIFC_LSW(gs_pld_len)); + hifc_build_gs_wqe_ts_req(&sqe, UNF_GETXCHGALLOCTIME(v_pkg)); + + hifc_build_els_gs_wqe_sge(&sqe, gs_pld_addr, fram_phy_addr, gs_pld_len, + sq_info->context_id, v_hba); + + ret = hifc_parent_sq_enqueue(sq_info, &sqe); + + return ret; +} + +unsigned int hifc_send_gs_cmnd(void *v_hba, struct unf_frame_pkg_s *v_pkg) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct hifc_hba_s *hba = NULL; + struct hifc_parent_queue_info_s *prnt_qinfo = NULL; + + UNF_CHECK_VALID(0x4913, UNF_TRUE, v_hba, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x4914, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x4915, UNF_TRUE, UNF_GET_SFS_ENTRY(v_pkg), + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x4916, UNF_TRUE, HIFC_GET_CMND_PAYLOAD_ADDR(v_pkg), + return UNF_RETURN_ERROR); + + HIFC_CHECK_PKG_ALLOCTIME(v_pkg); + + hba = (struct hifc_hba_s *)v_hba; + prnt_qinfo = hifc_find_parent_queue_info_by_pkg(hba, v_pkg); + + if (!prnt_qinfo) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) send GS SID(0x%x) DID(0x%x), get a null parent queue information", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return ret; + } + + if (HIFC_RPORT_NOT_OFFLOADED(prnt_qinfo)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]Port(0x%x) send GS SID(0x%x) DID(0x%x), send GS Request before PLOGI", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return ret; + } + + ret = hifc_send_gs_via_parent(v_hba, v_pkg); + + return ret; +} + +static unsigned int hifc_get_bls_pld_len(struct unf_frame_pkg_s *v_pkg, + unsigned int *v_frame_len) +{ + unsigned int ret = RETURN_OK; + unsigned int rctl = 0; + + UNF_CHECK_VALID(0x4917, UNF_TRUE, v_frame_len, return UNF_RETURN_ERROR); + + rctl = UNF_GET_FC_HEADER_RCTL(&v_pkg->frame_head); + if (rctl == HIFC_RCTL_BLS_ACC) { + /* BA_ACC */ + *v_frame_len = sizeof(struct unf_ba_acc_s); + } else if (rctl == HIFC_RCTL_BLS_RJT) { + /* BA_RJT */ + *v_frame_len = sizeof(struct unf_ba_rjt_s); + } else { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[warn]PKG Rclt(0x%x) not BLS ACC or RJT", rctl); + + *v_frame_len = 0; + ret = UNF_RETURN_ERROR; + } + + return ret; +} + +static unsigned int hifc_send_bls_via_cmdq(struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int rctl = 0; + unsigned int bls_pld_len = 0; + unsigned short rx_id = INVALID_VALUE16; + unsigned short ox_id = INVALID_VALUE16; + unsigned short exch_id = INVALID_VALUE16; + unsigned char *bls_pld_addr = NULL; + union hifc_cmdqe_u cmdqe; + struct hifc_parent_sq_info_s *sq_info = NULL; + + sq_info = hifc_find_parent_sq_by_pkg(v_hba, v_pkg); + if (!sq_info) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[warn]Port(0x%x) send BLS SID_DID(0x%x_0x%x) with null parent queue information", + v_hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return UNF_RETURN_ERROR; + } + + /* Determine whether the value is ACC or RTJ and obtain the payload + * length of the ABTS_RSP + */ + ret = hifc_get_bls_pld_len(v_pkg, &bls_pld_len); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) cmdq send BLS PKG DID(0x%x) failed", + v_hba->port_index, v_pkg->frame_head.rctl_did); + + return UNF_RETURN_ERROR; + } + + rctl = UNF_GET_FC_HEADER_RCTL(&v_pkg->frame_head); + exch_id = (v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]) & 0xffff; + if ((exch_id == INVALID_VALUE16) && (rctl == HIFC_RCTL_BLS_ACC)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) cmdq send BA_ACC with error RXID(0xffff)", + v_hba->port_index); + + return UNF_RETURN_ERROR; + } + + /* + * FC-FS-3 15.3.3.1 Description: + * The OX_ID and RX_ID shall be set to match the Exchange in which + * the ABTS frame was transmitted. + */ + rx_id = UNF_GET_FC_HEADER_RXID(&v_pkg->frame_head); + ox_id = UNF_GET_FC_HEADER_OXID(&v_pkg->frame_head); + + if (exch_id != INVALID_VALUE16) { + exch_id = exch_id + v_hba->exit_base; + } else { + /* If the number is not an immediate number and the rxid is not + * allocated to the CM, the CM may correspond to the rjt. + */ + } + + memset(&cmdqe, 0, sizeof(cmdqe)); + hifc_build_cmdqe_common(&cmdqe, HIFC_CMDQE_ABTS_RSP, exch_id); + cmdqe.snd_abts_rsp.wd1.ox_id = ox_id; + cmdqe.snd_abts_rsp.wd1.port_id = v_hba->port_index; + cmdqe.snd_abts_rsp.wd1.payload_len = bls_pld_len; + cmdqe.snd_abts_rsp.wd1.rsp_type = ((rctl == HIFC_RCTL_BLS_ACC) ? 0 : 1); + cmdqe.snd_abts_rsp.wd2.conn_id = sq_info->rport_index; + cmdqe.snd_abts_rsp.wd2.scqn = hifc_get_rport_maped_sts_scqn(v_hba, + sq_info->rport_index); + cmdqe.snd_abts_rsp.wd3.xid = sq_info->context_id; + cmdqe.snd_abts_rsp.wd4.cid = sq_info->cache_id; + cmdqe.snd_abts_rsp.wd5.req_rx_id = rx_id; + bls_pld_addr = HIFC_GET_RSP_PAYLOAD_ADDR(v_pkg); + memcpy(cmdqe.snd_abts_rsp.payload, bls_pld_addr, bls_pld_len); + + /* Send the ABTS_RSP command via ROOT CMDQ. */ + ret = hifc_root_cmdq_enqueue(v_hba, &cmdqe, sizeof(cmdqe.snd_abts_rsp)); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) RPort(0x%x) send ABTS_RSP OXID(0x%x) RXID(0x%x) EXCHID(0x%x)", + v_hba->port_cfg.port_id, sq_info->rport_index, ox_id, + rx_id, exch_id); + + return ret; +} + +static unsigned int hifc_send_bls_via_parent(struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = INVALID_VALUE16; + unsigned short rx_id = INVALID_VALUE16; + struct hifcoe_sqe_s sqe; + struct hifc_parent_sq_info_s *sq_info = NULL; + struct hifc_parent_queue_info_s *prnt_qinfo = NULL; + + UNF_CHECK_VALID(0x5015, UNF_TRUE, (v_pkg->type == UNF_PKG_BLS_REQ), + return UNF_RETURN_ERROR); + + memset(&sqe, 0, sizeof(struct hifcoe_sqe_s)); + + prnt_qinfo = hifc_find_parent_queue_info_by_pkg(v_hba, v_pkg); + if (!prnt_qinfo) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) send BLS SID_DID(0x%x_0x%x) with null parent queue information", + v_hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return ret; + } + + sq_info = hifc_find_parent_sq_by_pkg(v_hba, v_pkg); + if (!sq_info) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) send ABTS SID_DID(0x%x_0x%x) with null parent queue information", + v_hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return ret; + } + + rx_id = UNF_GET_RXID(v_pkg); + ox_id = UNF_GET_OXID(v_pkg) + v_hba->exit_base; + + /* Assemble the SQE Control Section part. + * The ABTS does not have Payload. bdsl=0 + */ + hifc_build_service_wqe_ctrl_section( + &sqe.ctrl_sl, + HIFC_BYTES_TO_QW_NUM(HIFC_SQE_TS_SIZE), 0); + + /* Assemble the SQE Task Section BLS Common part. The value of DW2 + * of BLS WQE is Rsvd, and the value of DW2 is 0 + */ + hifc_build_service_wqe_ts_common(&sqe.ts_sl, sq_info->rport_index, + ox_id, rx_id, 0); + + /* Assemble the special part of the ABTS */ + hifc_build_bls_wqe_ts_req(&sqe, v_pkg->frame_head.parameter, + UNF_GETXCHGALLOCTIME(v_pkg)); + + ret = hifc_parent_sq_enqueue(sq_info, &sqe); + + return ret; +} + +unsigned int hifc_send_bls_cmnd(void *v_hba, struct unf_frame_pkg_s *v_pkg) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct hifc_hba_s *hba = NULL; + unsigned long flag = 0; + struct hifc_parent_queue_info_s *prnt_qinfo = NULL; + + UNF_CHECK_VALID(0x4913, UNF_TRUE, v_hba, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x4914, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x4913, UNF_TRUE, UNF_PKG_BLS_REQ == v_pkg->type, + return UNF_RETURN_ERROR); + + HIFC_CHECK_PKG_ALLOCTIME(v_pkg); + hba = (struct hifc_hba_s *)v_hba; + + prnt_qinfo = hifc_find_parent_queue_info_by_pkg(hba, v_pkg); + if (!prnt_qinfo) { + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) send BLS SID_DID(0x%x_0x%x) with null parent queue information", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return ret; + } + + spin_lock_irqsave(&prnt_qinfo->parent_queue_state_lock, flag); + + if (HIFC_RPORT_OFFLOADED(prnt_qinfo)) { + spin_unlock_irqrestore(&prnt_qinfo->parent_queue_state_lock, + flag); + + /* INI: send ABTS_REQ via parent SQ */ + ret = hifc_send_bls_via_parent(hba, v_pkg); + + } else { + spin_unlock_irqrestore(&prnt_qinfo->parent_queue_state_lock, + flag); + + ret = hifc_send_bls_via_cmdq(hba, v_pkg); + } + + return ret; +} + +static unsigned int hifc_scq_rcv_flush_sq_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + /* + * RCVD sq flush sts + * --->>> continue flush or clear done + */ + unsigned int ret = UNF_RETURN_ERROR; + + if (v_scqe->flush_sts.wd0.port_id != v_hba->port_index) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_EVENT, UNF_CRITICAL, + "[err]Port(0x%x) clear_sts_port_idx(0x%x) not match hba_port_idx(0x%x), stage(0x%x)", + v_hba->port_cfg.port_id, + v_scqe->clear_sts.wd0.port_id, + v_hba->port_index, + v_hba->q_set_stage); + + return UNF_RETURN_ERROR; + } + + if (v_scqe->flush_sts.wd0.last_flush) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EVENT, UNF_INFO, + "[info]Port(0x%x) flush sq(0x%x) done, stage(0x%x)", + v_hba->port_cfg.port_id, v_hba->next_clearing_sq, + v_hba->q_set_stage); + + /* If the Flush STS is last one, send cmd done */ + ret = hifc_clear_sq_wqe_done(v_hba); + } else { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EVENT, UNF_MAJOR, + "[info]Port(0x%x) continue flush sq(0x%x), stage(0x%x)", + v_hba->port_cfg.port_id, v_hba->next_clearing_sq, + v_hba->q_set_stage); + + ret = hifc_clear_pending_sq_wqe(v_hba); + } + + return ret; +} + +static unsigned int hifc_scq_rcv_buf_clear_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + /* + * clear: fetched sq wqe + * ---to--->>> pending sq wqe + */ + unsigned int ret = UNF_RETURN_ERROR; + + if (v_scqe->clear_sts.wd0.port_id != v_hba->port_index) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_EVENT, UNF_CRITICAL, + "[err]Port(0x%x) clear_sts_port_idx(0x%x) not match hba_port_idx(0x%x), stage(0x%x)", + v_hba->port_cfg.port_id, + v_scqe->clear_sts.wd0.port_id, + v_hba->port_index, + v_hba->q_set_stage); + + return UNF_RETURN_ERROR; + } + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EVENT, UNF_KEVENT, + "[info]Port(0x%x) cleared all fetched wqe, start clear sq pending wqe, stage (0x%x)", + v_hba->port_cfg.port_id, v_hba->q_set_stage); + + v_hba->q_set_stage = HIFC_QUEUE_SET_STAGE_FLUSHING; + ret = hifc_clear_pending_sq_wqe(v_hba); + + return ret; +} + +static unsigned int hifc_scq_rcv_sess_rst_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + unsigned int rport_index = INVALID_VALUE32; + unsigned long flag = 0; + struct hifc_parent_queue_info_s *parent_queue_info = NULL; + struct hifcoe_scqe_sess_sts_s *sess_sts = + (struct hifcoe_scqe_sess_sts_s *)(void *)v_scqe; + unsigned int ctx_flush_done; + unsigned int *ctx_dw = NULL; + int ret; + + rport_index = sess_sts->wd1.conn_id; + if (rport_index >= UNF_HIFC_MAXRPORT_NUM) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) receive reset session cmd sts failed, invlaid rport_index(0x%x) status_code(0x%x) remain_cnt(0x%x)", + v_hba->port_cfg.port_id, + rport_index, + sess_sts->ch.wd0.err_code, + sess_sts->ch.wd0.cqe_remain_cnt); + + return UNF_RETURN_ERROR; + } + + parent_queue_info = + &v_hba->parent_queue_mgr->parent_queues[rport_index]; + + /* + * If only session reset is used, the offload status of sq remains + * unchanged. If a link is deleted, the offload status is set to + * destroying and is irreversible. + */ + spin_lock_irqsave(&parent_queue_info->parent_queue_state_lock, flag); + + /* + * According to the fault tolerance principle, even if the connection + * deletion times out and the sts returns to delete the connection, one + * indicates thatthe cancel timer is successful, and 0 indicates that + * the timer is being processed. + */ + if (!cancel_delayed_work( + &parent_queue_info->parent_sq_info.del_work)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) rport_index(0x%x) delete rport timer maybe timeout", + v_hba->port_cfg.port_id, + rport_index); + } + + /* + * If the SessRstSts is returned too late and the Parent Queue Info + * resource is released, OK is returned. + */ + if (parent_queue_info->offload_state != HIFC_QUEUE_STATE_DESTROYING) { + spin_unlock_irqrestore( + &parent_queue_info->parent_queue_state_lock, flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[info]Port(0x%x) reset session cmd complete, no need to free parent qinfo, rport_index(0x%x) status_code(0x%x) remain_cnt(0x%x)", + v_hba->port_cfg.port_id, + rport_index, + sess_sts->ch.wd0.err_code, + sess_sts->ch.wd0.cqe_remain_cnt); + + return RETURN_OK; + } + + if (parent_queue_info->parent_ctx.cqm_parent_ctx_obj) { + ctx_dw = (unsigned int *)((void *)(parent_queue_info->parent_ctx.cqm_parent_ctx_obj->vaddr)); + ctx_flush_done = ctx_dw[HIFC_CTXT_FLUSH_DONE_DW_POS] & + HIFC_CTXT_FLUSH_DONE_MASK_BE; + /* memory barr */ + mb(); + if (ctx_flush_done == 0) { + spin_unlock_irqrestore( + &parent_queue_info->parent_queue_state_lock, + flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) rport(0x%x) flushdone is not set, delay to free parent session", + v_hba->port_cfg.port_id, rport_index); + + /* If flushdone bit is not set,delay free Sq info */ + ret = queue_delayed_work( + v_hba->work_queue, + &parent_queue_info->parent_sq_info.flush_done_tmo_work, + (unsigned long) + msecs_to_jiffies((unsigned int) + HIFC_SQ_WAIT_FLUSH_DONE_TIMEOUT_MS)); + if (ret == (int)false) { + HIFC_HBA_STAT( + v_hba, + HIFC_STAT_PARENT_SQ_QUEUE_DELAYED_WORK); + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) rport(0x%x) queue delayed work failed iret:%d", + v_hba->port_cfg.port_id, + rport_index, ret); + } + + return RETURN_OK; + } + } + + spin_unlock_irqrestore(&parent_queue_info->parent_queue_state_lock, + flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) begin to free parent session with rport_index(0x%x)", + v_hba->port_cfg.port_id, + rport_index); + + hifc_free_parent_queue_info(v_hba, parent_queue_info); + + return RETURN_OK; +} + +static unsigned int hifc_scq_rcv_clear_srq_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + /* + * clear ELS/Immi SRQ + * ---then--->>> Destroy SRQ + */ + + struct hifc_hba_s *hba = v_hba; + struct hifc_srq_info_s *srq_info = NULL; + + if (HIFC_GET_SCQE_STATUS(v_scqe) != 0) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) clear srq failed, status(0x%x)", + v_hba->port_cfg.port_id, + HIFC_GET_SCQE_STATUS(v_scqe)); + + return RETURN_OK; + } + + srq_info = &hba->els_srq_info; + + /* + * 1: cancel timer succeed + * 0: the timer is being processed, the SQ is released when the timer + * times out + */ + if (cancel_delayed_work(&srq_info->del_work)) { + /* + * not free srq resource, it will be freed on hba remove + */ + srq_info->state = HIFC_CLEAN_DONE; + } + + return RETURN_OK; +} + +static unsigned int hifc_scq_rcv_marker_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int ox_id = INVALID_VALUE32; + unsigned int rx_id = INVALID_VALUE32; + struct unf_frame_pkg_s pkg = { 0 }; + struct hifcoe_scqe_itmf_marker_sts_s *marker_sts = NULL; + + marker_sts = &v_scqe->itmf_marker_sts; + ox_id = (unsigned int)marker_sts->wd1.ox_id; + ox_id = ox_id - v_hba->exit_base; + rx_id = (unsigned int)marker_sts->wd1.rx_id; + pkg.frame_head.oxid_rxid = rx_id | (unsigned int)(ox_id) << 16; + + pkg.frame_head.csctl_sid = marker_sts->wd3.sid; + pkg.frame_head.rctl_did = marker_sts->wd2.did; + + /* 1. set pkg status */ + if (unlikely(HIFC_SCQE_HAS_ERRCODE(v_scqe))) + pkg.status = UNF_IO_FAILED; + else + pkg.status = UNF_IO_SUCCESS; + + /* 2 .process rcvd marker STS: set exchange state */ + ret = hifc_rcv_tmf_marker_sts(v_hba, &pkg, ox_id); + + return ret; +} + +static unsigned int hifc_scq_rcv_abts_marker_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int ox_id = INVALID_VALUE32; + unsigned int rx_id = INVALID_VALUE32; + struct unf_frame_pkg_s pkg = { 0 }; + + struct hifcoe_scqe_abts_marker_sts_s *abts_sts = NULL; + + abts_sts = &v_scqe->abts_marker_sts; + if (!abts_sts) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]ABTS marker STS is NULL"); + return ret; + } + + ox_id = (unsigned int)abts_sts->wd1.ox_id; + ox_id = ox_id - v_hba->exit_base; + rx_id = (unsigned int)abts_sts->wd1.rx_id; + pkg.frame_head.oxid_rxid = rx_id | (unsigned int)(ox_id) << 16; + pkg.frame_head.csctl_sid = abts_sts->wd3.sid; + pkg.frame_head.rctl_did = abts_sts->wd2.did; + /* abts marker abts_maker_status as ucode stat */ + pkg.abts_maker_status = (unsigned int)abts_sts->wd3.io_state; + + if (unlikely(HIFC_SCQE_HAS_ERRCODE(v_scqe))) + pkg.status = UNF_IO_FAILED; + else + pkg.status = UNF_IO_SUCCESS; + + ret = hifc_rcv_abts_marker_sts(v_hba, &pkg, ox_id); + + return ret; +} + +unsigned int hifc_handle_aeq_offload_err(struct hifc_hba_s *v_hba, + struct hifcoe_aqe_data_s *v_aeq_msg) +{ + unsigned int ret = RETURN_OK; + struct hifcoe_aqe_data_s *aeq_msg; + unsigned int rport_index = 0; + unsigned int local_ctx_id = 0; + struct hifc_parent_queue_info_s *prnt_qinfo = NULL; + struct hifc_destroy_ctrl_info_s destroy_sqe_info = { 0 }; + unsigned long flag = 0; + + aeq_msg = v_aeq_msg; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) receive off_load Err Event, EvtCode(0x%x) Conn_id(0x%x) Xid(0x%x)", + v_hba->port_cfg.port_id, aeq_msg->wd0.evt_code, + aeq_msg->wd0.conn_id, aeq_msg->wd1.xid); + + /* Currently, only the offload failure caused by insufficient scqe is + * processed. Other errors are not processed temporarily. + */ + if (unlikely(aeq_msg->wd0.evt_code != + FCOE_ERROR_OFFLOAD_LACKOF_SCQE_FAIL)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) receive an unsupported error code of AEQ Event, EvtCode(0x%x) Conn_id(0x%x)", + v_hba->port_cfg.port_id, aeq_msg->wd0.evt_code, + aeq_msg->wd0.conn_id); + + return UNF_RETURN_ERROR; + } + HIFC_SCQ_ERR_TYPE_STAT(v_hba, FCOE_ERROR_OFFLOAD_LACKOF_SCQE_FAIL); + + rport_index = aeq_msg->wd0.conn_id; + local_ctx_id = aeq_msg->wd1.xid; + + if (rport_index >= UNF_HIFC_MAXRPORT_NUM) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) receive an error offload status: rport index(0x%x) is invalid, Xid(0x%x)", + v_hba->port_cfg.port_id, rport_index, + aeq_msg->wd1.xid); + + return UNF_RETURN_ERROR; + } + + prnt_qinfo = &v_hba->parent_queue_mgr->parent_queues[rport_index]; + if (hifc_check_rport_is_valid(prnt_qinfo, local_ctx_id) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) receive an error offload status: rport index(0x%x), context id(0x%x) is invalid", + v_hba->port_cfg.port_id, rport_index, local_ctx_id); + + return UNF_RETURN_ERROR; + } + + spin_lock_irqsave(&prnt_qinfo->parent_queue_state_lock, flag); + + /* The offload status is restored only + * when the offload status is offloading + */ + if (prnt_qinfo->offload_state == HIFC_QUEUE_STATE_OFFLOADING) + prnt_qinfo->offload_state = HIFC_QUEUE_STATE_INITIALIZED; + + spin_unlock_irqrestore(&prnt_qinfo->parent_queue_state_lock, flag); + + if (prnt_qinfo->parent_sq_info.destroy_sqe.valid == UNF_TRUE) { + destroy_sqe_info.valid = + prnt_qinfo->parent_sq_info.destroy_sqe.valid; + destroy_sqe_info.rport_index = + prnt_qinfo->parent_sq_info.destroy_sqe.rport_index; + destroy_sqe_info.time_out = + prnt_qinfo->parent_sq_info.destroy_sqe.time_out; + destroy_sqe_info.start_jiff = + prnt_qinfo->parent_sq_info.destroy_sqe.start_jiff; + + destroy_sqe_info.rport_info.nport_id = + prnt_qinfo->parent_sq_info.destroy_sqe.rport_info.nport_id; + + destroy_sqe_info.rport_info.rport_index = + prnt_qinfo->parent_sq_info.destroy_sqe.rport_info.rport_index; + + destroy_sqe_info.rport_info.port_name = + prnt_qinfo->parent_sq_info.destroy_sqe.rport_info.port_name; + + prnt_qinfo->parent_sq_info.destroy_sqe.valid = UNF_FALSE; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) pop up delay destroy parent sq, sqe start time 0x%llx, timeout value 0x%x, rport index 0x%x, offload state 0x%x", + v_hba->port_cfg.port_id, + destroy_sqe_info.start_jiff, + destroy_sqe_info.time_out, + prnt_qinfo->parent_sq_info.destroy_sqe.rport_info.rport_index, + HIFC_QUEUE_STATE_INITIALIZED); + + ret = hifc_free_parent_resource(v_hba, + &destroy_sqe_info.rport_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) pop delay destroy parent sq failed, rport index 0x%x, rport nport id 0x%x", + v_hba->port_cfg.port_id, + destroy_sqe_info.rport_info.rport_index, + destroy_sqe_info.rport_info.nport_id); + } + } + + return ret; +} diff --git a/drivers/scsi/huawei/hifc/hifc_service.h b/drivers/scsi/huawei/hifc/hifc_service.h new file mode 100644 index 0000000000000000000000000000000000000000..c810cc7e64cb0e83c05ed285fa73017a0727c28a --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_service.h @@ -0,0 +1,248 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef __HIFC_SERVICE_H__ +#define __HIFC_SERVICE_H__ + +/* Send ElsCmnd or ElsRsp */ +unsigned int hifc_send_els_cmnd(void *phba, struct unf_frame_pkg_s *v_pkg); + +/* Send GsCmnd */ +unsigned int hifc_send_gs_cmnd(void *v_hba, struct unf_frame_pkg_s *v_pkg); + +/* Send BlsCmnd */ +unsigned int hifc_send_bls_cmnd(void *v_hba, struct unf_frame_pkg_s *v_pkg); + +/* Receive Frame from Root RQ */ +unsigned int hifc_rcv_service_frame_from_rq( + struct hifc_hba_s *v_hba, + struct hifc_root_rq_info_s *rq_info, + struct hifc_root_rq_complet_info_s *v_complet_info, + unsigned short v_rcv_buf_num); + +unsigned int hifc_rq_rcv_srv_err(struct hifc_hba_s *v_hba, + struct hifc_root_rq_complet_info_s *v_info); + +unsigned int hifc_rq_rcv_els_rsp_sts( + struct hifc_hba_s *v_hba, + struct hifc_root_rq_complet_info_s *v_info); + +/* Receive Frame from SCQ */ +unsigned int hifc_rcv_scqe_entry_from_scq(void *v_hba, void *v_scqe, + unsigned int scq_idx); + +/* FC txmfs */ +#define HIFC_DEFAULT_TX_MAX_FREAM_SIZE 256 + +#define HIFC_FIRST_PKG_FLAG (1 << 0) +#define HIFC_LAST_PKG_FLAG (1 << 1) + +#define HIFC_CHECK_IF_FIRST_PKG(pkg_flag) ((pkg_flag) & HIFC_FIRST_PKG_FLAG) +#define HIFC_CHECK_IF_LAST_PKG(pkg_flag) ((pkg_flag) & HIFC_LAST_PKG_FLAG) + +#define HIFC_GET_SERVICE_TYPE(v_hba) 12 +#define HIFC_GET_PACKET_TYPE(v_service_type) 1 +#define HIFC_GET_PACKET_COS(v_service_type) 1 +#define HIFC_GET_PRLI_PAYLOAD_LEN \ + (UNF_PRLI_PAYLOAD_LEN - UNF_PRLI_SIRT_EXTRA_SIZE) +/* Start addr of the header/payloed of the cmnd buffer in the pkg */ +#define HIFC_FC_HEAD_LEN (sizeof(struct unf_fchead_s)) +#define HIFC_PAYLOAD_OFFSET (sizeof(struct unf_fchead_s)) +#define HIFC_GET_CMND_PAYLOAD_ADDR(v_pkg) \ + UNF_GET_FLOGI_PAYLOAD(v_pkg) +#define HIFC_GET_CMND_HEADER_ADDR(v_pkg) \ + ((v_pkg)->unf_cmnd_pload_bl.buffer_ptr) +#define HIFC_GET_RSP_HEADER_ADDR(v_pkg) \ + ((v_pkg)->unf_rsp_pload_bl.buffer_ptr) +#define HIFC_GET_RSP_PAYLOAD_ADDR(v_pkg) \ + ((v_pkg)->unf_rsp_pload_bl.buffer_ptr + HIFC_PAYLOAD_OFFSET) +#define HIFC_GET_CMND_FC_HEADER(v_pkg) \ + (&(UNF_GET_SFS_ENTRY(v_pkg)->sfs_common.frame_head)) +#define HIFC_PKG_IS_ELS_RSP(els_cmnd_type) \ + (((els_cmnd_type) == ELS_ACC) || ((els_cmnd_type) == ELS_RJT)) +#define HIFC_XID_IS_VALID(xid, exi_base, exi_count) \ + (((xid) >= (exi_base)) && ((xid) < ((exi_base) + (exi_count)))) + +#define UNF_FC_PAYLOAD_ELS_MASK 0xFF000000 +#define UNF_FC_PAYLOAD_ELS_SHIFT 24 +#define UNF_FC_PAYLOAD_ELS_DWORD 0 + +/* Note: this pfcpayload is little endian */ +#define UNF_GET_FC_PAYLOAD_ELS_CMND(pfcpayload) \ + UNF_GET_SHIFTMASK(((unsigned int *)(void *)pfcpayload)\ + [UNF_FC_PAYLOAD_ELS_DWORD], \ + UNF_FC_PAYLOAD_ELS_SHIFT, UNF_FC_PAYLOAD_ELS_MASK) + +#define HIFC_ELS_CMND_MASK 0xffff +#define HIFC_ELS_CMND__RELEVANT_SHIFT 16UL +#define HIFC_GET_ELS_CMND_CODE(__cmnd) \ + ((unsigned short)((__cmnd) & HIFC_ELS_CMND_MASK)) +#define HIFC_GET_ELS_RSP_TYPE(__cmnd) \ + ((unsigned short)((__cmnd) & HIFC_ELS_CMND_MASK)) +#define HIFC_GET_ELS_RSP_CODE(__cmnd) \ + ((unsigned short)((__cmnd) >> HIFC_ELS_CMND__RELEVANT_SHIFT & \ + HIFC_ELS_CMND_MASK)) +#define HIFC_GET_GS_CMND_CODE(__cmnd) \ + ((unsigned short)((__cmnd) & HIFC_ELS_CMND_MASK)) + +/* ELS CMND Request */ +#define ELS_CMND 0 + +/* fh_f_ctl - Frame control flags. */ +#define HIFC_FC_EX_CTX (1 << 23) /* sent by responder to exchange */ +#define HIFC_FC_SEQ_CTX (1 << 22) /* sent by responder to sequence */ +#define HIFC_FC_FIRST_SEQ (1 << 21) /* first sequence of this exchange */ +#define HIFC_FC_LAST_SEQ (1 << 20) /* last sequence of this exchange */ +#define HIFC_FC_END_SEQ (1 << 19) /* last frame of sequence */ +#define HIFC_FC_END_CONN (1 << 18) /* end of class 1 connection pending */ +#define HIFC_FC_RES_B17 (1 << 17) /* reserved */ +#define HIFC_FC_SEQ_INIT (1 << 16) /* transfer of sequence initiative */ +#define HIFC_FC_X_ID_REASS (1 << 15) /* exchange ID has been changed */ +#define HIFC_FC_X_ID_INVAL (1 << 14) /* exchange ID invalidated */ +#define HIFC_FC_ACK_1 (1 << 12) /* 13:12 = 1: ACK_1 expected */ +#define HIFC_FC_ACK_N (2 << 12) /* 13:12 = 2: ACK_N expected */ +#define HIFC_FC_ACK_0 (3 << 12) /* 13:12 = 3: ACK_0 expected */ +#define HIFC_FC_RES_B11 (1 << 11) /* reserved */ +#define HIFC_FC_RES_B10 (1 << 10) /* reserved */ +#define HIFC_FC_RETX_SEQ (1 << 9) /* retransmitted sequence */ +#define HIFC_FC_UNI_TX (1 << 8) /* unidirectional transmit (class 1) */ +#define HIFC_FC_CONT_SEQ(i) ((i) << 6) +#define HIFC_FC_ABT_SEQ(i) ((i) << 4) +#define HIFC_FC_REL_OFF (1 << 3) /* parameter is relative offset */ +#define HIFC_FC_RES2 (1 << 2) /* reserved */ +#define HIFC_FC_FILL(i) ((i) & 3) /* 1:0: bytes of trailing fill */ + +#define HIFC_FCTL_REQ (HIFC_FC_FIRST_SEQ | HIFC_FC_END_SEQ |\ + HIFC_FC_SEQ_INIT) +#define HIFC_FCTL_RESP (HIFC_FC_EX_CTX | HIFC_FC_LAST_SEQ | \ + HIFC_FC_END_SEQ | HIFC_FC_SEQ_INIT) +#define HIFC_RCTL_BLS_REQ 0x81 +#define HIFC_RCTL_BLS_ACC 0x84 +#define HIFC_RCTL_BLS_RJT 0x85 + +#define UNF_IO_STATE_NEW 0 +#define TGT_IO_STATE_SEND_XFERRDY (1 << 2) +#define TGT_IO_STATE_RSP (1 << 5) +#define TGT_IO_STATE_ABORT (1 << 7) + +enum HIFC_FC_FH_TYPE_E { + HIFC_FC_TYPE_BLS = 0x00, /* basic link service */ + HIFC_FC_TYPE_ELS = 0x01, /* extended link service */ + HIFC_FC_TYPE_IP = 0x05, /* IP over FC, RFC 4338 */ + HIFC_FC_TYPE_FCP = 0x08, /* SCSI FCP */ + HIFC_FC_TYPE_CT = 0x20, /* Fibre Channel Services (FC-CT) */ + HIFC_FC_TYPE_ILS = 0x22 /* internal link service */ +}; + +enum HIFC_FC_FH_RCTL_E { + HIFC_FC_RCTL_DD_UNCAT = 0x00, /* uncategorized information */ + HIFC_FC_RCTL_DD_SOL_DATA = 0x01, /* solicited data */ + HIFC_FC_RCTL_DD_UNSOL_CTL = 0x02, /* unsolicited control */ + HIFC_FC_RCTL_DD_SOL_CTL = 0x03, /* solicited control or reply */ + HIFC_FC_RCTL_DD_UNSOL_DATA = 0x04, /* unsolicited data */ + HIFC_FC_RCTL_DD_DATA_DESC = 0x05, /* data descriptor */ + HIFC_FC_RCTL_DD_UNSOL_CMD = 0x06, /* unsolicited command */ + HIFC_FC_RCTL_DD_CMD_STATUS = 0x07, /* command status */ + +#define HIFC_FC_RCTL_ILS_REQ HIFC_FC_RCTL_DD_UNSOL_CTL /* ILS request */ +#define HIFC_FC_RCTL_ILS_REP HIFC_FC_RCTL_DD_SOL_CTL /* ILS reply */ + + /* + * Extended Link_Data + */ + HIFC_FC_RCTL_ELS_REQ = 0x22, /* extended link services request */ + HIFC_FC_RCTL_ELS_RSP = 0x23, /* extended link services reply */ + HIFC_FC_RCTL_ELS4_REQ = 0x32, /* FC-4 ELS request */ + HIFC_FC_RCTL_ELS4_RSP = 0x33, /* FC-4 ELS reply */ + /* + * Optional Extended Headers + */ + HIFC_FC_RCTL_VFTH = 0x50, /* virtual fabric tagging header */ + HIFC_FC_RCTL_IFRH = 0x51, /* inter-fabric routing header */ + HIFC_FC_RCTL_ENCH = 0x52, /* encapsulation header */ + /* + * Basic Link Services fh_r_ctl values. + */ + HIFC_FC_RCTL_BA_NOP = 0x80, /* basic link service NOP */ + HIFC_FC_RCTL_BA_ABTS = 0x81, /* basic link service abort */ + HIFC_FC_RCTL_BA_RMC = 0x82, /* remove connection */ + HIFC_FC_RCTL_BA_ACC = 0x84, /* basic accept */ + HIFC_FC_RCTL_BA_RJT = 0x85, /* basic reject */ + HIFC_FC_RCTL_BA_PRMT = 0x86, /* dedicated connection preempted */ + /* + * Link Control Information. + */ + HIFC_FC_RCTL_ACK_1 = 0xc0, /* acknowledge_1 */ + HIFC_FC_RCTL_ACK_0 = 0xc1, /* acknowledge_0 */ + HIFC_FC_RCTL_P_RJT = 0xc2, /* port reject */ + HIFC_FC_RCTL_F_RJT = 0xc3, /* fabric reject */ + HIFC_FC_RCTL_P_BSY = 0xc4, /* port busy */ + HIFC_FC_RCTL_F_BSY = 0xc5, /* fabric busy to data frame */ + HIFC_FC_RCTL_F_BSYL = 0xc6, /* fabric busy to link control frame */ + HIFC_FC_RCTL_LCR = 0xc7, /* link credit reset */ + HIFC_FC_RCTL_END = 0xc9 /* end */ +}; + +struct hifc_fc_frame_header { + unsigned char rctl; /* routing control */ + unsigned char did[3]; /* Destination ID */ + + unsigned char cs_ctl; /* class of service control / pri */ + unsigned char sid[3]; /* Source ID */ + + unsigned char type; /* see enum fc_fh_type below */ + unsigned char frame_ctl[3]; /* frame control */ + + unsigned char seq_id; /* sequence ID */ + unsigned char df_ctl; /* data field control */ + unsigned short seq_cnt; /* sequence count */ + + unsigned short ox_id; /* originator exchange ID */ + unsigned short rx_id; /* responder exchange ID */ + unsigned int parm_offset; /* parameter or relative offset */ +}; + +unsigned int hifc_rcv_els_cmnd(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned char *v_pld, + unsigned int pld_len, + int first_frame); +unsigned int hifc_rcv_els_rsp(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int ox_id); +unsigned int hifc_rcv_els_rsp_sts(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int rx_id); +unsigned int hifc_rcv_gs_rsp(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int ox_id); +unsigned int hifc_rcv_bls_rsp(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int ox_id); + +void hifc_save_login_para_in_sq_info( + struct hifc_hba_s *v_hba, + struct unf_port_login_parms_s *v_login_coparms); +unsigned int hifc_handle_aeq_offload_err(struct hifc_hba_s *v_hba, + struct hifcoe_aqe_data_s *v_aeg_msg); + +#define HIFC_CHECK_PKG_ALLOCTIME(v_pkg) \ + do { \ + if (unlikely(UNF_GETXCHGALLOCTIME(v_pkg) == 0)) { \ + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, \ + UNF_WARN, \ + "[warn]Invalid MagicNum,S_ID(0x%x) D_ID(0x%x) OXID(0x%x) RX_ID(0x%x) pkg type(0x%x) hot pooltag(0x%x)", \ + UNF_GET_SID(v_pkg), \ + UNF_GET_DID(v_pkg), \ + UNF_GET_OXID(v_pkg), \ + UNF_GET_RXID(v_pkg), \ + ((struct unf_frame_pkg_s *)v_pkg)->type, \ + UNF_GET_XCHG_TAG(v_pkg)); \ + } \ + } while (0) + +#endif + diff --git a/drivers/scsi/huawei/hifc/hifc_sml.c b/drivers/scsi/huawei/hifc/hifc_sml.c new file mode 100644 index 0000000000000000000000000000000000000000..2d04ff6ed5ff2657db6d47e7229c4498f6aa8c87 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_sml.c @@ -0,0 +1,361 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include "hifc_knl_adp.h" +#include "hifc_hw.h" +#include "hifc_hwdev.h" +#include "hifc_sml.h" + +#ifndef HTONL +#define HTONL(x) \ + ((((x) & 0x000000ff) << 24) \ + | (((x) & 0x0000ff00) << 8) \ + | (((x) & 0x00ff0000) >> 8) \ + | (((x) & 0xff000000) >> 24)) +#endif + +static void sml_ctr_htonl_n(u32 *node, u32 len) +{ + u32 i; + + for (i = 0; i < len; i++) { + *node = HTONL(*node); + node++; + } +} + +static void hifc_sml_ctr_read_build_req(struct chipif_sml_ctr_rd_req_s *msg, + u8 instance_id, u8 op_id, + u8 ack, u32 ctr_id, u32 init_val) +{ + msg->head.value = 0; + msg->head.bs.instance = instance_id; + msg->head.bs.op_id = op_id; + msg->head.bs.ack = ack; + msg->head.value = HTONL(msg->head.value); + + msg->ctr_id = ctr_id; + msg->ctr_id = HTONL(msg->ctr_id); + + msg->initial = init_val; +} + +static void hifc_sml_ctr_write_build_req(struct chipif_sml_ctr_wr_req_s *msg, + u8 instance_id, u8 op_id, + u8 ack, u32 ctr_id, + u64 val1, u64 val2) +{ + msg->head.value = 0; + msg->head.bs.instance = instance_id; + msg->head.bs.op_id = op_id; + msg->head.bs.ack = ack; + msg->head.value = HTONL(msg->head.value); + + msg->ctr_id = ctr_id; + msg->ctr_id = HTONL(msg->ctr_id); + + msg->value1_h = val1 >> 32; + msg->value1_l = val1 & 0xFFFFFFFF; + + msg->value2_h = val2 >> 32; + msg->value2_l = val2 & 0xFFFFFFFF; +} + +/** + * hifc_sm_ctr_rd32 - small single 32 counter read + * @hwdev: the pointer to hw device + * @node: the node id + * @instance: instance value + * @ctr_id: counter id + * @value: read counter value ptr + * Return: 0 - success, negative - failure + */ +int hifc_sm_ctr_rd32(void *hwdev, u8 node, u8 instance, u32 ctr_id, u32 *value) +{ + struct chipif_sml_ctr_rd_req_s req; + union ctr_rd_rsp_u rsp; + int ret; + + if (!hwdev || !value) + return -EFAULT; + + hifc_sml_ctr_read_build_req(&req, instance, CHIPIF_SM_CTR_OP_READ, + CHIPIF_ACK, ctr_id, 0); + + ret = hifc_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), + (void *)&rsp, (unsigned short)sizeof(rsp)); + if (ret) { + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Sm 32bit counter read fail, err(%d)\n", ret); + return ret; + } + sml_ctr_htonl_n((u32 *)&rsp, 4); + *value = rsp.bs_ss32_rsp.value1; + + return 0; +} + +/** + * hifc_sm_ctr_rd32_clear - small single 32 counter read and clear to zero + * @hwdev: the pointer to hw device + * @node: the node id + * @instance: instance value + * @ctr_id: counter id + * @value: read counter value ptr + * Return: 0 - success, negative - failure + * according to ACN error code (ERR_OK, ERR_PARAM, ERR_FAILED...etc) + */ +int hifc_sm_ctr_rd32_clear(void *hwdev, u8 node, u8 instance, + u32 ctr_id, u32 *value) +{ + struct chipif_sml_ctr_rd_req_s req; + union ctr_rd_rsp_u rsp; + int ret; + + if (!hwdev || !value) + return -EFAULT; + + hifc_sml_ctr_read_build_req(&req, instance, + CHIPIF_SM_CTR_OP_READ_CLEAR, + CHIPIF_ACK, ctr_id, 0); + + ret = hifc_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), + (void *)&rsp, (unsigned short)sizeof(rsp)); + + if (ret) { + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Sm 32bit counter clear fail, err(%d)\n", ret); + return ret; + } + sml_ctr_htonl_n((u32 *)&rsp, 4); + *value = rsp.bs_ss32_rsp.value1; + + return 0; +} + +/** + * hifc_sm_ctr_wr32 - small single 32 counter write + * @hwdev: the pointer to hw device + * @node: the node id + * @instance: instance value + * @ctr_id: counter id + * @value: write counter value + * Return: 0 - success, negative - failure + */ +int hifc_sm_ctr_wr32(void *hwdev, u8 node, u8 instance, u32 ctr_id, u32 value) +{ + struct chipif_sml_ctr_wr_req_s req; + struct chipif_sml_ctr_wr_rsp_s rsp; + + if (!hwdev) + return -EFAULT; + + hifc_sml_ctr_write_build_req(&req, instance, CHIPIF_SM_CTR_OP_WRITE, + CHIPIF_NOACK, ctr_id, (u64)value, 0ULL); + + return hifc_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), (void *)&rsp, + (unsigned short)sizeof(rsp)); +} + +/** + * hifc_sm_ctr_rd64 - big counter 64 read + * @hwdev: the pointer to hw device + * @node: the node id + * @instance: instance value + * @ctr_id: counter id + * @value: read counter value ptr + * Return: 0 - success, negative - failure + */ +int hifc_sm_ctr_rd64(void *hwdev, u8 node, u8 instance, u32 ctr_id, u64 *value) +{ + struct chipif_sml_ctr_rd_req_s req; + union ctr_rd_rsp_u rsp; + int ret; + + if (!hwdev || !value) + return -EFAULT; + + hifc_sml_ctr_read_build_req(&req, instance, CHIPIF_SM_CTR_OP_READ, + CHIPIF_ACK, ctr_id, 0); + + ret = hifc_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), (void *)&rsp, + (unsigned short)sizeof(rsp)); + if (ret) { + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Sm 64bit counter read fail err(%d)\n", ret); + return ret; + } + sml_ctr_htonl_n((u32 *)&rsp, 4); + *value = ((u64)rsp.bs_bs64_rsp.value1 << 32) | rsp.bs_bs64_rsp.value2; + + return 0; +} + +/** + * hifc_sm_ctr_wr64 - big single 64 counter write + * @hwdev: the pointer to hw device + * @node: the node id + * @instance: instance value + * @ctr_id: counter id + * @value: write counter value + * Return: 0 - success, negative - failure + */ +int hifc_sm_ctr_wr64(void *hwdev, u8 node, u8 instance, u32 ctr_id, u64 value) +{ + struct chipif_sml_ctr_wr_req_s req; + struct chipif_sml_ctr_wr_rsp_s rsp; + + if (!hwdev) + return -EFAULT; + + hifc_sml_ctr_write_build_req(&req, instance, CHIPIF_SM_CTR_OP_WRITE, + CHIPIF_NOACK, ctr_id, value, 0ULL); + + return hifc_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), (void *)&rsp, + (unsigned short)sizeof(rsp)); +} + +/** + * hifc_sm_ctr_rd64_pair - big pair 128 counter read + * @hwdev: the pointer to hw device + * @node: the node id + * @instance: instance value + * @ctr_id: counter id + * @value1: read counter value ptr + * @value2: read counter value ptr + * Return: 0 - success, negative - failure + */ +int hifc_sm_ctr_rd64_pair(void *hwdev, u8 node, u8 instance, + u32 ctr_id, u64 *value1, u64 *value2) +{ + struct chipif_sml_ctr_rd_req_s req; + union ctr_rd_rsp_u rsp; + int ret; + + if (!hwdev || (0 != (ctr_id & 0x1)) || !value1 || !value2) { + pr_err("Hwdev(0x%p) or value1(0x%p) or value2(0x%p) is NULL or ctr_id(%d) is odd number\n", + hwdev, value1, value2, ctr_id); + return -EFAULT; + } + + hifc_sml_ctr_read_build_req(&req, instance, CHIPIF_SM_CTR_OP_READ, + CHIPIF_ACK, ctr_id, 0); + + ret = hifc_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), (void *)&rsp, + (unsigned short)sizeof(rsp)); + if (ret) { + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Sm 64 bit rd pair ret(%d)\n", ret); + return ret; + } + sml_ctr_htonl_n((u32 *)&rsp, 4); + *value1 = ((u64)rsp.bs_bp64_rsp.val1_h << 32) | rsp.bs_bp64_rsp.val1_l; + *value2 = ((u64)rsp.bs_bp64_rsp.val2_h << 32) | rsp.bs_bp64_rsp.val2_l; + + return 0; +} + +/** + * hifc_sm_ctr_wr64_pair - big pair 128 counter write + * @hwdev: the pointer to hw device + * @node: the node id + * @ctr_id: counter id + * @instance: instance value + * @value1: write counter value + * @value2: write counter value + * Return: 0 - success, negative - failure + */ +int hifc_sm_ctr_wr64_pair(void *hwdev, u8 node, u8 instance, + u32 ctr_id, u64 value1, u64 value2) +{ + struct chipif_sml_ctr_wr_req_s req; + struct chipif_sml_ctr_wr_rsp_s rsp; + + /* pair pattern ctr_id must be even number */ + if (!hwdev || (0 != (ctr_id & 0x1))) { + pr_err("Handle is NULL or ctr_id(%d) is odd number for write 64 bit pair\n", + ctr_id); + return -EFAULT; + } + + hifc_sml_ctr_write_build_req(&req, instance, CHIPIF_SM_CTR_OP_WRITE, + CHIPIF_NOACK, ctr_id, value1, value2); + return hifc_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), (void *)&rsp, + (unsigned short)sizeof(rsp)); +} + +int hifc_api_csr_rd32(void *hwdev, u8 dest, u32 addr, u32 *val) +{ + struct hifc_csr_request_api_data api_data = {0}; + u32 csr_val = 0; + u16 in_size = sizeof(api_data); + int ret; + + if (!hwdev || !val) + return -EFAULT; + + memset(&api_data, 0, sizeof(struct hifc_csr_request_api_data)); + api_data.dw0 = 0; + api_data.dw1.bits.operation_id = HIFC_CSR_OPERATION_READ_CSR; + api_data.dw1.bits.need_response = HIFC_CSR_NEED_RESP_DATA; + api_data.dw1.bits.data_size = HIFC_CSR_DATA_SZ_32; + api_data.dw1.val32 = cpu_to_be32(api_data.dw1.val32); + api_data.dw2.bits.csr_addr = addr; + api_data.dw2.val32 = cpu_to_be32(api_data.dw2.val32); + + ret = hifc_api_cmd_read_ack(hwdev, dest, (u8 *)(&api_data), + in_size, &csr_val, 4); + if (ret) { + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Read 32 bit csr fail, dest %d addr 0x%x, ret: 0x%x\n", + dest, addr, ret); + return ret; + } + + *val = csr_val; + + return 0; +} + +int hifc_api_csr_wr32(void *hwdev, u8 dest, u32 addr, u32 val) +{ + struct hifc_csr_request_api_data api_data; + u16 in_size = sizeof(api_data); + int ret; + + if (!hwdev) + return -EFAULT; + + memset(&api_data, 0, sizeof(struct hifc_csr_request_api_data)); + api_data.dw1.bits.operation_id = HIFC_CSR_OPERATION_WRITE_CSR; + api_data.dw1.bits.need_response = HIFC_CSR_NO_RESP_DATA; + api_data.dw1.bits.data_size = HIFC_CSR_DATA_SZ_32; + api_data.dw1.val32 = cpu_to_be32(api_data.dw1.val32); + api_data.dw2.bits.csr_addr = addr; + api_data.dw2.val32 = cpu_to_be32(api_data.dw2.val32); + api_data.csr_write_data_h = 0xffffffff; + api_data.csr_write_data_l = val; + + ret = hifc_api_cmd_write_nack(hwdev, dest, (u8 *)(&api_data), in_size); + if (ret) { + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Write 32 bit csr fail! dest %d addr 0x%x val 0x%x\n", + dest, addr, val); + return ret; + } + + return 0; +} + diff --git a/drivers/scsi/huawei/hifc/hifc_sml.h b/drivers/scsi/huawei/hifc/hifc_sml.h new file mode 100644 index 0000000000000000000000000000000000000000..9fe2088f48a19c19a10ee26f49ad09d8e60cdb3e --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_sml.h @@ -0,0 +1,183 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef __CHIPIF_SML_COUNTER_H__ +#define __CHIPIF_SML_COUNTER_H__ + +#define CHIPIF_FUNC_PF 0 +#define CHIPIF_FUNC_VF 1 +#define CHIPIF_FUNC_PPF 2 + +#define CHIPIF_ACK 1 +#define CHIPIF_NOACK 0 + +#define CHIPIF_SM_CTR_OP_READ 0x2 +#define CHIPIF_SM_CTR_OP_READ_CLEAR 0x6 +#define CHIPIF_SM_CTR_OP_WRITE 0x3 + +#define SMALL_CNT_READ_RSP_SIZE 16 + +/* request head */ +union chipif_sml_ctr_req_head_u { + struct { + u32 pad:15; + u32 ack:1; + u32 op_id:5; + u32 instance:6; + u32 src:5; + } bs; + + u32 value; +}; + +/* counter read request struct */ +struct chipif_sml_ctr_rd_req_s { + u32 extra; + union chipif_sml_ctr_req_head_u head; + u32 ctr_id; + u32 initial; + u32 pad; +}; + +/* counter read response union */ +union ctr_rd_rsp_u { + struct { + u32 value1:16; + u32 pad0:16; + u32 pad1[3]; + } bs_ss16_rsp; + + struct { + u32 value1; + u32 pad[3]; + } bs_ss32_rsp; + + struct { + u32 value1:20; + u32 pad0:12; + u32 value2:12; + u32 pad1:20; + u32 pad2[2]; + } bs_sp_rsp; + + struct { + u32 value1; + u32 value2; + u32 pad[2]; + } bs_bs64_rsp; + + struct { + u32 val1_h; + u32 val1_l; + u32 val2_h; + u32 val2_l; + } bs_bp64_rsp; + +}; + +/* resopnse head */ +union sml_ctr_rsp_head_u { + struct { + u32 pad:30; /* reserve */ + u32 code:2; /* error code */ + } bs; + + u32 value; +}; + +/* counter write request struct */ +struct chipif_sml_ctr_wr_req_s { + u32 extra; + union chipif_sml_ctr_req_head_u head; + u32 ctr_id; + u32 rsv1; + u32 rsv2; + u32 value1_h; + u32 value1_l; + u32 value2_h; + u32 value2_l; +}; + +/* counter write response struct */ +struct chipif_sml_ctr_wr_rsp_s { + union sml_ctr_rsp_head_u head; + u32 pad[3]; +}; + +enum HIFC_CSR_API_DATA_OPERATION_ID { + HIFC_CSR_OPERATION_WRITE_CSR = 0x1E, + HIFC_CSR_OPERATION_READ_CSR = 0x1F +}; + +enum HIFC_CSR_API_DATA_NEED_RESPONSE_DATA { + HIFC_CSR_NO_RESP_DATA = 0, + HIFC_CSR_NEED_RESP_DATA = 1 +}; + +enum HIFC_CSR_API_DATA_DATA_SIZE { + HIFC_CSR_DATA_SZ_32 = 0, + HIFC_CSR_DATA_SZ_64 = 1 +}; + +struct hifc_csr_request_api_data { + u32 dw0; + + union { + struct { + u32 reserved1:13; + /* this field indicates the write/read data size: + * 2'b00: 32 bits + * 2'b01: 64 bits + * 2'b10~2'b11:reserved + */ + u32 data_size:2; + /* this field indicates that requestor expect receive a + * response data or not. + * 1'b0: expect not to receive a response data. + * 1'b1: expect to receive a response data. + */ + u32 need_response:1; + /* this field indicates the operation that the requestor + * expected. + * 5'b1_1110: write value to csr space. + * 5'b1_1111: read register from csr space. + */ + u32 operation_id:5; + u32 reserved2:6; + /* this field specifies the Src node ID for this API + * request message. + */ + u32 src_node_id:5; + } bits; + + u32 val32; + } dw1; + + union { + struct { + /* it specifies the CSR address. */ + u32 csr_addr:26; + u32 reserved3:6; + } bits; + + u32 val32; + } dw2; + + /* if data_size=2'b01, it is high 32 bits of write data. else, it is + * 32'hFFFF_FFFF. + */ + u32 csr_write_data_h; + /* the low 32 bits of write data. */ + u32 csr_write_data_l; +}; + +int hifc_sm_ctr_rd32(void *hwdev, u8 node, u8 instance, u32 ctr_id, u32 *value); +int hifc_sm_ctr_rd64(void *hwdev, u8 node, u8 instance, u32 ctr_id, u64 *value); +int hifc_sm_ctr_rd64_pair(void *hwdev, u8 node, u8 instance, + u32 ctr_id, u64 *value1, u64 *value2); + +#endif + diff --git a/drivers/scsi/huawei/hifc/hifc_tool.c b/drivers/scsi/huawei/hifc/hifc_tool.c new file mode 100644 index 0000000000000000000000000000000000000000..db3586103ae5caee27f90227c68acc88638db79f --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_tool.c @@ -0,0 +1,785 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include +#include + +#include "hifc_knl_adp.h" +#include "hifc_hw.h" +#include "hifc_hwif.h" +#include "hifc_api_cmd.h" +#include "hifc_mgmt.h" +#include "hifc_lld.h" +#include "hifc_dbgtool_knl.h" +#include "hifc_tool.h" +#include "hifc_portmng.h" + +#define HIADM_DEV_PATH "/dev/hifc_dev" +#define HIADM_DEV_CLASS "hifc_class" +#define HIADM_DEV_NAME "hifc_dev" + +#define MAJOR_DEV_NUM 921 +#define HIFC_CMDQ_BUF_MAX_SIZE 2048U +#define MSG_MAX_IN_SIZE (2048 * 1024) +#define MSG_MAX_OUT_SIZE (2048 * 1024) + +static dev_t g_dev_id = {0}; +static struct class *g_nictool_class; +static struct cdev g_nictool_cdev; + +static int g_nictool_init_flag; +static int g_nictool_ref_cnt; + +static void free_buff_in(void *hwdev, struct msg_module *nt_msg, void *buf_in) +{ + if (!buf_in) + return; + + if (nt_msg->module == SEND_TO_UCODE) + hifc_free_cmd_buf(hwdev, buf_in); + else + kfree(buf_in); +} + +static int alloc_buff_in(void *hwdev, struct msg_module *nt_msg, + u32 in_size, void **buf_in) +{ + void *msg_buf; + + if (!in_size) + return 0; + + if (nt_msg->module == SEND_TO_UCODE) { + struct hifc_cmd_buf *cmd_buf; + + if (in_size > HIFC_CMDQ_BUF_MAX_SIZE) { + pr_err("Cmdq in size(%u) more than 2KB\n", in_size); + return -ENOMEM; + } + + cmd_buf = hifc_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + pr_err("Alloc cmdq cmd buffer failed in %s\n", + __func__); + return -ENOMEM; + } + msg_buf = cmd_buf->buf; + *buf_in = (void *)cmd_buf; + cmd_buf->size = (u16)in_size; + } else { + if (in_size > MSG_MAX_IN_SIZE) { + pr_err("In size(%u) more than 2M\n", in_size); + return -ENOMEM; + } + msg_buf = kzalloc(in_size, GFP_KERNEL); + *buf_in = msg_buf; + } + if (!(*buf_in)) { + pr_err("Alloc buffer in failed\n"); + return -ENOMEM; + } + + if (copy_from_user(msg_buf, nt_msg->in_buff, in_size)) { + pr_err("%s:%d: Copy from user failed\n", + __func__, __LINE__); + free_buff_in(hwdev, nt_msg, *buf_in); + return -EFAULT; + } + + return 0; +} + +static void free_buff_out(void *hwdev, struct msg_module *nt_msg, + void *buf_out) +{ + if (!buf_out) + return; + + if (nt_msg->module == SEND_TO_UCODE && + !nt_msg->ucode_cmd.ucode_db.ucode_imm) + hifc_free_cmd_buf(hwdev, buf_out); + else + kfree(buf_out); +} + +static int alloc_buff_out(void *hwdev, struct msg_module *nt_msg, + u32 out_size, void **buf_out) +{ + if (!out_size) + return 0; + + if (nt_msg->module == SEND_TO_UCODE && + !nt_msg->ucode_cmd.ucode_db.ucode_imm) { + struct hifc_cmd_buf *cmd_buf; + + if (out_size > HIFC_CMDQ_BUF_MAX_SIZE) { + pr_err("Cmdq out size(%u) more than 2KB\n", out_size); + return -ENOMEM; + } + + cmd_buf = hifc_alloc_cmd_buf(hwdev); + *buf_out = (void *)cmd_buf; + } else { + if (out_size > MSG_MAX_OUT_SIZE) { + pr_err("out size(%u) more than 2M\n", out_size); + return -ENOMEM; + } + *buf_out = kzalloc(out_size, GFP_KERNEL); + } + if (!(*buf_out)) { + pr_err("Alloc buffer out failed\n"); + return -ENOMEM; + } + + return 0; +} + +static int copy_buf_out_to_user(struct msg_module *nt_msg, + u32 out_size, void *buf_out) +{ + int ret = 0; + void *msg_out; + + if (nt_msg->module == SEND_TO_UCODE && + !nt_msg->ucode_cmd.ucode_db.ucode_imm) + msg_out = ((struct hifc_cmd_buf *)buf_out)->buf; + else + msg_out = buf_out; + + if (copy_to_user(nt_msg->out_buf, msg_out, out_size)) + ret = -EFAULT; + + return ret; +} + +static int __get_card_usr_api_chain_mem(int card_idx) +{ +#define DBGTOOL_PAGE_ORDER 10 + + unsigned char *tmp; + int i; + + mutex_lock(&g_hifc_addr_lock); + g_hifc_card_id = card_idx; + if (!g_hifc_card_vir_addr[card_idx]) { + g_hifc_card_vir_addr[card_idx] = + (void *)__get_free_pages(GFP_KERNEL, + DBGTOOL_PAGE_ORDER); + if (!g_hifc_card_vir_addr[card_idx]) { + pr_err("Alloc api chain memory fail for card %d.\n", + card_idx); + mutex_unlock(&g_hifc_addr_lock); + return -EFAULT; + } + + memset(g_hifc_card_vir_addr[card_idx], 0, + PAGE_SIZE * (1 << DBGTOOL_PAGE_ORDER)); + + g_hifc_card_phy_addr[card_idx] = + virt_to_phys(g_hifc_card_vir_addr[card_idx]); + if (!g_hifc_card_phy_addr[card_idx]) { + pr_err("phy addr for card %d is 0.\n", card_idx); + free_pages((unsigned long)g_hifc_card_vir_addr[ + card_idx], DBGTOOL_PAGE_ORDER); + g_hifc_card_vir_addr[card_idx] = NULL; + mutex_unlock(&g_hifc_addr_lock); + return -EFAULT; + } + + tmp = g_hifc_card_vir_addr[card_idx]; + for (i = 0; i < (1 << DBGTOOL_PAGE_ORDER); i++) { + SetPageReserved(virt_to_page(tmp)); + tmp += PAGE_SIZE; + } + } + mutex_unlock(&g_hifc_addr_lock); + + return 0; +} + +static int get_card_func_info(char *dev_name, struct msg_module *nt_msg) +{ + struct hifc_card_func_info card_func_info = {0}; + int id, err; + + if (nt_msg->len_info.out_buff_len != sizeof(card_func_info) || + nt_msg->len_info.in_buff_len != sizeof(card_func_info)) { + pr_err("Invalid out_buf_size %d or Invalid in_buf_size %d, expect %lu\n", + nt_msg->len_info.out_buff_len, + nt_msg->len_info.in_buff_len, + sizeof(card_func_info)); + return -EINVAL; + } + + err = memcmp(dev_name, HIFC_CHIP_NAME, strlen(HIFC_CHIP_NAME)); + if (err) { + pr_err("Invalid chip name %s\n", dev_name); + return err; + } + + err = sscanf(dev_name, HIFC_CHIP_NAME "%d", &id); + if (err < 0) { + pr_err("Failed to get hifc id\n"); + return err; + } + + if (id >= MAX_CARD_NUM) { + pr_err("chip id %d exceed limit[0-%d]\n", id, MAX_CARD_NUM - 1); + return -EINVAL; + } + + hifc_get_card_func_info_by_card_name(dev_name, &card_func_info); + + if (!card_func_info.num_pf) { + pr_err("None function found for %s\n", dev_name); + return -EFAULT; + } + + err = __get_card_usr_api_chain_mem(id); + if (err) { + pr_err("Faile to get api chain memory for userspace %s\n", + dev_name); + return -EFAULT; + } + + card_func_info.usr_api_phy_addr = g_hifc_card_phy_addr[id]; + + /* Copy the dev_info to user mode */ + if (copy_to_user(nt_msg->out_buf, &card_func_info, + sizeof(card_func_info))) { + pr_err("Copy dev_info to user fail\n"); + return -EFAULT; + } + + return 0; +} + +static bool is_mgmt_cmd_support(void *hwdev, unsigned int mod, u32 up_api_type) +{ + if (FUNC_SUPPORT_MGMT(hwdev)) { + if (up_api_type == API_CLP) { + if (!hifc_is_hwdev_mod_inited + (hwdev, HIFC_HWDEV_CLP_INITED)) { + pr_err("CLP have not initialized\n"); + return false; + } + } else if (!hifc_is_hwdev_mod_inited + (hwdev, HIFC_HWDEV_MGMT_INITED)) { + pr_err("MGMT have not initialized\n"); + return false; + } + } else if (!hifc_is_hwdev_mod_inited + (hwdev, HIFC_HWDEV_MBOX_INITED)) { + pr_err("MBOX have not initialized\n"); + return false; + } + + return true; +} + +static bool is_hwdev_cmd_support(unsigned int mod, + char *ifname, u32 up_api_type) +{ + void *hwdev; + + hwdev = hifc_get_hwdev_by_ifname(ifname); + if (!hwdev) { + pr_err("Can not get the device %s correctly\n", ifname); + return false; + } + + switch (mod) { + case SEND_TO_UP: + case SEND_TO_SM: + return is_mgmt_cmd_support(hwdev, mod, up_api_type); + case SEND_TO_UCODE: + if (!hifc_is_hwdev_mod_inited(hwdev, + HIFC_HWDEV_CMDQ_INITED)) { + pr_err("CMDQ have not initialized\n"); + return false; + } + break; + + default: + return false; + } + + return true; +} + +static bool nictool_k_is_cmd_support(unsigned int mod, + char *ifname, u32 up_api_type) +{ + enum hifc_init_state init_state = + hifc_get_init_state_by_ifname(ifname); + + if (init_state == HIFC_INIT_STATE_NONE) + return false; + + if (mod == HIFCADM_FC_DRIVER) { + if (init_state < HIFC_INIT_STATE_ALL_INITED) { + pr_err("HIFC driver have not initialized\n"); + return false; + } + + return true; + } else if (mod >= SEND_TO_UCODE && mod <= SEND_TO_SM) { + return is_hwdev_cmd_support(mod, ifname, up_api_type); + } else if (mod == SEND_TO_HW_DRIVER) { + if (init_state < HIFC_INIT_STATE_HWDEV_INITED) { + pr_err("Hwdev have not initialized\n"); + return false; + } + + return true; + } + + return false; +} + +static int alloc_tmp_buf(void *hwdev, struct msg_module *nt_msg, u32 in_size, + void **buf_in, u32 out_size, void **buf_out) +{ + int ret; + + ret = alloc_buff_in(hwdev, nt_msg, in_size, buf_in); + if (ret) { + pr_err("Alloc tool cmd buff in failed\n"); + return ret; + } + + ret = alloc_buff_out(hwdev, nt_msg, out_size, buf_out); + if (ret) { + pr_err("Alloc tool cmd buff out failed\n"); + goto out_free_buf_in; + } + + return 0; + +out_free_buf_in: + free_buff_in(hwdev, nt_msg, *buf_in); + + return ret; +} + +static void free_tmp_buf(void *hwdev, struct msg_module *nt_msg, + void *buf_in, void *buf_out) +{ + free_buff_out(hwdev, nt_msg, buf_out); + free_buff_in(hwdev, nt_msg, buf_in); +} + +static int get_all_chip_id_cmd(struct msg_module *nt_msg) +{ + struct nic_card_id card_id; + + hifc_get_all_chip_id((void *)&card_id); + + if (copy_to_user(nt_msg->out_buf, &card_id, sizeof(card_id))) { + pr_err("Copy chip id to user failed\n"); + return -EFAULT; + } + + return 0; +} + +static bool __is_pcidev_match_dev_name(const char *ifname, + struct hifc_pcidev *dev) +{ + if (!strncmp(dev->uld_dev_name, ifname, IFNAMSIZ)) + return true; + + if ((dev->uld_dev) && (strlen(ifname) == 0)) + return true; + + return false; +} + +struct hifc_pcidev *hifc_get_pcidev_by_dev_name(char *ifname) +{ + struct card_node *chip_node; + struct hifc_pcidev *dev; + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hifc_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (test_bit(HIFC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + if (__is_pcidev_match_dev_name(ifname, dev)) { + lld_dev_put(); + return dev; + } + } + } + lld_dev_put(); + + return NULL; +} + +static void *get_support_uld_dev(struct msg_module *nt_msg) +{ + struct hifc_pcidev *dev; + + dev = hifc_get_pcidev_by_dev_name(nt_msg->device_name); + + if (dev) + return dev->uld_dev; + + return NULL; +} + +static int get_service_drv_version(void *hwdev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, + u32 *out_size) +{ + enum hifc_service_type type; + int ret = 0; + + type = nt_msg->module - SEND_TO_SM; + if (type != SERVICE_T_FC) { + pr_err("err cmd type: %d\n", type); + return ret; + } + *out_size = sizeof(struct drv_version_info); + + ret = hifc_adm(NULL, nt_msg->msg_formate, buf_in, in_size, + buf_out, out_size); + if (ret) + return ret; + + if (copy_to_user(nt_msg->out_buf, buf_out, *out_size)) + return -EFAULT; + + return ret; +} + +static int send_to_service_driver(struct msg_module *nt_msg, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + enum hifc_service_type type; + void *uld_dev; + int ret = -EINVAL; + + type = nt_msg->module - SEND_TO_SM; + + if (type == SERVICE_T_FC) { + uld_dev = get_support_uld_dev(nt_msg); + if (!uld_dev) + return -EINVAL; + ret = hifc_adm(uld_dev, + nt_msg->msg_formate, + buf_in, in_size, buf_out, + out_size); + } else { + pr_err("Ioctl input module id: %d is incorrectly\n", + nt_msg->module); + } + + return ret; +} + +static int nictool_exec_cmd(void *hwdev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, + u32 *out_size) +{ + int ret; + + switch (nt_msg->module) { + case SEND_TO_HW_DRIVER: + ret = send_to_hw_driver(hwdev, nt_msg, buf_in, + in_size, buf_out, out_size); + break; + case SEND_TO_UP: + ret = send_to_up(hwdev, nt_msg, buf_in, + in_size, buf_out, out_size); + break; + case SEND_TO_UCODE: + ret = send_to_ucode(hwdev, nt_msg, buf_in, + in_size, buf_out, out_size); + break; + case SEND_TO_SM: + ret = send_to_sm(hwdev, nt_msg, buf_in, + in_size, buf_out, out_size); + break; + default: + ret = send_to_service_driver(nt_msg, buf_in, in_size, buf_out, + out_size); + break; + } + + return ret; +} + +static bool hifc_is_special_handling_cmd(struct msg_module *nt_msg, int *ret) +{ + bool handled = true; + + if (nt_msg->module != SEND_TO_HW_DRIVER) + return false; + + switch (nt_msg->msg_formate) { + case GET_CHIP_ID: + *ret = get_all_chip_id_cmd(nt_msg); + break; + case GET_CHIP_INFO: + *ret = get_card_func_info(nt_msg->device_name, nt_msg); + break; + default: + handled = false; + break; + } + + return handled; +} + +static int do_nictool_ioctl_cmd(void *hwdev, struct msg_module *nt_msg) +{ + void *buf_out = NULL; + void *buf_in = NULL; + u32 out_size_expect; + u32 out_size, in_size; + int ret = 0; + + out_size_expect = nt_msg->len_info.out_buff_len; + in_size = nt_msg->len_info.in_buff_len; + + ret = alloc_tmp_buf(hwdev, nt_msg, in_size, + &buf_in, out_size_expect, &buf_out); + if (ret) { + pr_err("Alloc tmp buff failed\n"); + return ret; + } + + out_size = out_size_expect; + + if ((nt_msg->msg_formate == GET_DRV_VERSION) && + (nt_msg->module == HIFCADM_FC_DRIVER)) { + ret = get_service_drv_version(hwdev, nt_msg, buf_in, + in_size, buf_out, &out_size); + goto out_free_buf; + } + + ret = nictool_exec_cmd(hwdev, nt_msg, buf_in, + in_size, buf_out, &out_size); + if (ret) { + pr_err("nictool_exec_cmd failed, mod:%d msg_formate:%d\n", + nt_msg->module, nt_msg->msg_formate); + goto out_free_buf; + } + + if (out_size_expect && buf_out) { + ret = copy_buf_out_to_user(nt_msg, out_size_expect, buf_out); + if (ret) + pr_err("Copy information to user failed\n"); + } +out_free_buf: + free_tmp_buf(hwdev, nt_msg, buf_in, buf_out); + + return ret; +} + +static long nictool_k_unlocked_ioctl(struct file *pfile, + unsigned int cmd, unsigned long arg) +{ + void *hwdev; + struct msg_module nt_msg; + int ret = 0; + + memset(&nt_msg, 0, sizeof(nt_msg)); + + if (copy_from_user(&nt_msg, (void *)arg, sizeof(nt_msg))) { + pr_err("Copy information from user failed\n"); + return -EFAULT; + } + + /* end with '\0' */ + nt_msg.device_name[IFNAMSIZ - 1] = '\0'; + + hifc_tool_cnt_inc(); + if (hifc_is_special_handling_cmd(&nt_msg, &ret)) + goto out_free_lock; + + if (nt_msg.module == HIFCADM_FC_DRIVER && + nt_msg.msg_formate == GET_CHIP_ID) + hifc_get_fc_devname(nt_msg.device_name); + + if (!nictool_k_is_cmd_support(nt_msg.module, nt_msg.device_name, + nt_msg.up_cmd.up_db.up_api_type)) { + ret = -EFAULT; + goto out_free_lock; + } + + /* get the netdevice */ + hwdev = hifc_get_hwdev_by_ifname(nt_msg.device_name); + if (!hwdev) { + pr_err("Can not get the device %s correctly\n", + nt_msg.device_name); + ret = -ENODEV; + goto out_free_lock; + } + + ret = do_nictool_ioctl_cmd(hwdev, &nt_msg); + +out_free_lock: + hifc_tool_cnt_dec(); + + return (long)ret; +} + +static int nictool_k_open(struct inode *pnode, struct file *pfile) +{ + return 0; +} + +static ssize_t nictool_k_read(struct file *pfile, char __user *ubuf, + size_t size, loff_t *ppos) +{ + return 0; +} + +static ssize_t nictool_k_write(struct file *pfile, const char __user *ubuf, + size_t size, loff_t *ppos) +{ + return 0; +} + +static const struct file_operations fifo_operations = { + .owner = THIS_MODULE, + .open = nictool_k_open, + .read = nictool_k_read, + .write = nictool_k_write, + .unlocked_ioctl = nictool_k_unlocked_ioctl, + .mmap = hifc_mem_mmap, +}; + +static int if_nictool_exist(void) +{ + struct file *fp = NULL; + int exist = 0; + + fp = filp_open(HIADM_DEV_PATH, O_RDONLY, 0); + if (IS_ERR(fp)) { + exist = 0; + } else { + (void)filp_close(fp, NULL); + exist = 1; + } + + return exist; +} + +/** + * hifc_tool_k_init - initialize the hw interface + */ +int hifc_tool_k_init(void) +{ + int ret; + struct device *pdevice; + + if (g_nictool_init_flag) { + g_nictool_ref_cnt++; + /* already initialized */ + return 0; + } + + if (if_nictool_exist()) { + pr_err("Nictool device exists\n"); + return 0; + } + + /* Device ID: primary device ID (12bit) | + * secondary device number (20bit) + */ + g_dev_id = MKDEV(MAJOR_DEV_NUM, 0); + + /* Static device registration number */ + ret = register_chrdev_region(g_dev_id, 1, HIADM_DEV_NAME); + if (ret < 0) { + ret = alloc_chrdev_region(&g_dev_id, 0, 1, HIADM_DEV_NAME); + if (ret < 0) { + pr_err("Register nictool_dev fail(0x%x)\n", ret); + return ret; + } + } + + /* Create equipment */ + /*lint -save -e160*/ + g_nictool_class = class_create(THIS_MODULE, HIADM_DEV_CLASS); + /*lint -restore*/ + if (IS_ERR(g_nictool_class)) { + pr_err("Create nictool_class fail\n"); + ret = -EFAULT; + goto class_create_err; + } + + /* Initializing the character device */ + cdev_init(&g_nictool_cdev, &fifo_operations); + + /* Add devices to the operating system */ + ret = cdev_add(&g_nictool_cdev, g_dev_id, 1); + if (ret < 0) { + pr_err("Add nictool_dev to operating system fail(0x%x)\n", ret); + goto cdev_add_err; + } + + /* Export device information to user space + * (/sys/class/class name/device name) + */ + pdevice = device_create(g_nictool_class, NULL, + g_dev_id, NULL, HIADM_DEV_NAME); + if (IS_ERR(pdevice)) { + pr_err("Export nictool device information to user space fail\n"); + ret = -EFAULT; + goto device_create_err; + } + + g_nictool_init_flag = 1; + g_nictool_ref_cnt = 1; + + pr_info("Register nictool_dev to system succeed\n"); + + return 0; + +device_create_err: + cdev_del(&g_nictool_cdev); + +cdev_add_err: + class_destroy(g_nictool_class); + +class_create_err: + g_nictool_class = NULL; + unregister_chrdev_region(g_dev_id, 1); + + return ret; +} + +void hifc_tool_k_uninit(void) +{ + if (g_nictool_init_flag) { + if ((--g_nictool_ref_cnt)) + return; + } + + g_nictool_init_flag = 0; + + if (!g_nictool_class || IS_ERR(g_nictool_class)) + return; + + cdev_del(&g_nictool_cdev); + device_destroy(g_nictool_class, g_dev_id); + class_destroy(g_nictool_class); + g_nictool_class = NULL; + + unregister_chrdev_region(g_dev_id, 1); + + pr_info("Unregister nictool_dev succeed\n"); +} diff --git a/drivers/scsi/huawei/hifc/hifc_tool.h b/drivers/scsi/huawei/hifc/hifc_tool.h new file mode 100644 index 0000000000000000000000000000000000000000..8de74dbcc25d4ff10c79a7ab311ad633a09a7ff8 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_tool.h @@ -0,0 +1,332 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef HIFC_NICTOOL_H_ +#define HIFC_NICTOOL_H_ +#ifndef IFNAMSIZ +#define IFNAMSIZ 16 +#endif +/* completion timeout interval, unit is jiffies*/ +#define UP_COMP_TIME_OUT_VAL 10000U + +struct sm_in_st { + int node; + int id; + int instance; +}; + +struct sm_out_st { + u64 val1; + u64 val2; +}; + +struct up_log_msg_st { + u32 rd_len; + u32 addr; +}; + +struct csr_write_st { + u32 rd_len; + u32 addr; + u8 *data; +}; + +struct ipsurx_stats_info { + u32 addr; + u32 rd_cnt; +}; + +struct ucode_cmd_st { + union { + struct { + u32 comm_mod_type:8; + u32 ucode_cmd_type:4; + u32 cmdq_ack_type:3; + u32 ucode_imm:1; + u32 len:16; + } ucode_db; + u32 value; + }; +}; + +struct up_cmd_st { + union { + struct { + u32 comm_mod_type:8; + u32 chipif_cmd:8; + u32 up_api_type:16; + } up_db; + u32 value; + }; +}; + +struct _dcb_data { + u8 wr_flag; + u8 dcb_en; + u8 err; + u8 rsvd; +}; + +union _dcb_ctl { + struct _dcb_data dcb_data; + u32 data; +}; + +struct _pfc_data { + u8 pfc_en; + u8 pfc_priority; + u8 num_of_tc; + u8 err; +}; + +union _pfc { + struct _pfc_data pfc_data; + u32 data; +}; + +union _flag_com { + struct _ets_flag { + u8 flag_ets_enable:1; + u8 flag_ets_percent:1; + u8 flag_ets_cos:1; + u8 flag_ets_strict:1; + u8 rev:4; + } ets_flag; + u8 data; +}; + +struct _ets { + u8 ets_en; + u8 err; + u8 strict; + u8 tc[8]; + u8 ets_percent[8]; + union _flag_com flag_com; +}; + +#define API_CMD 0x1 +#define API_CHAIN 0x2 +#define API_CLP 0x3 + +struct msg_module { + char device_name[IFNAMSIZ]; + unsigned int module; + union { + u32 msg_formate; + struct ucode_cmd_st ucode_cmd; + struct up_cmd_st up_cmd; + }; + + struct { + u32 in_buff_len; + u32 out_buff_len; + } len_info; + u32 res; + void *in_buff; + void *out_buf; +}; + +#define MAX_VER_INFO_LEN 128 +struct drv_version_info { + char ver[MAX_VER_INFO_LEN]; +}; + +struct chip_fault_stats { + int offset; + u8 chip_faults[MAX_DRV_BUF_SIZE]; +}; + +struct hifc_wqe_info { + int q_id; + void *slq_handle; + unsigned int wqe_id; +}; + +struct hifc_tx_hw_page { + u64 phy_addr; + u64 *map_addr; +}; + +struct hifc_dbg_sq_info { + u16 q_id; + u16 pi; + u16 ci;/* sw_ci */ + u16 fi;/* hw_ci */ + + u32 q_depth; + u16 pi_reverse; + u16 weqbb_size; + + u8 priority; + u16 *ci_addr; + u64 cla_addr; + + void *slq_handle; + + struct hifc_tx_hw_page direct_wqe; + struct hifc_tx_hw_page db_addr; + u32 pg_idx; + + u32 glb_sq_id; +}; + +struct hifc_dbg_rq_info { + u16 q_id; + u16 glb_rq_id; + u16 hw_pi; + u16 ci; /* sw_ci */ + u16 sw_pi; + u16 wqebb_size; + u16 q_depth; + u16 buf_len; + + void *slq_handle; + u64 ci_wqe_page_addr; + u64 ci_cla_tbl_addr; + + u16 msix_idx; + u32 msix_vector; +}; + +#ifndef BUSINFO_LEN +#define BUSINFO_LEN (32) +#endif +struct pf_info { + char name[IFNAMSIZ]; + char bus_info[BUSINFO_LEN]; + u32 pf_type; +}; + +#ifndef MAX_SIZE +#define MAX_SIZE (16) +#endif +struct card_info { + struct pf_info pf[MAX_SIZE]; + u32 pf_num; +}; + +struct nic_card_id { + u32 id[MAX_SIZE]; + u32 num; +}; + +struct func_pdev_info { + u64 bar0_phy_addr; + u64 bar0_size; + u64 rsvd1[4]; +}; + +struct hifc_card_func_info { + u32 num_pf; + u32 rsvd0; + u64 usr_api_phy_addr; + struct func_pdev_info pdev_info[MAX_SIZE]; +}; + +#ifndef NIC_UP_CMD_UPDATE_FW +#define NIC_UP_CMD_UPDATE_FW (114) +#endif + +#ifndef MAX_CARD_NUM +#define MAX_CARD_NUM (64) +#endif +extern void *g_hifc_card_node_array[MAX_CARD_NUM]; +extern void *g_hifc_card_vir_addr[MAX_CARD_NUM]; +extern u64 g_hifc_card_phy_addr[MAX_CARD_NUM]; +extern struct mutex g_hifc_addr_lock; +extern int g_hifc_card_id; + +struct hifc_nic_loop_mode { + u32 loop_mode; + u32 loop_ctrl; +}; + +struct hifc_nic_poll_weight { + int poll_weight; +}; + +enum hifc_homologues_state { + HIFC_HOMOLOGUES_OFF = 0, + HIFC_HOMOLOGUES_ON = 1, +}; + +struct hifc_homologues { + enum hifc_homologues_state homo_state; +}; + +struct hifc_pf_info { + u32 isvalid; + u32 pf_id; +}; + +enum module_name { + SEND_TO_NIC_DRIVER = 1, + SEND_TO_HW_DRIVER, + SEND_TO_UCODE, + SEND_TO_UP, + SEND_TO_SM, + HIFCADM_FC_DRIVER = 10, +}; + +enum driver_cmd_type { + FUNC_TYPE = 12, + GET_FUNC_IDX, + GET_DRV_VERSION = 16, + GET_HW_STATS = 18, + CLEAR_HW_STATS, + GET_CHIP_FAULT_STATS = 21, + GET_CHIP_ID = 25, + GET_SINGLE_CARD_INFO, + GET_FIRMWARE_ACTIVE_STATUS, + GET_DEVICE_ID = 29, + IS_DRV_IN_VM = 44, + GET_CHIP_INFO = 48, + GET_PF_ID = 52, + PORT_ID = 0x42 +}; + +enum api_chain_cmd_type { + API_CSR_READ, + API_CSR_WRITE +}; + +enum sm_cmd_type { + SM_CTR_RD32 = 1, + SM_CTR_RD64_PAIR, + SM_CTR_RD64 +}; + +int hifc_tool_k_init(void); +void hifc_tool_k_uninit(void); + +int send_to_hw_driver(void *hwdev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size); + +int send_to_sm(void *hwdev, struct msg_module *nt_msg, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size); + +int send_to_up(void *hwdev, struct msg_module *nt_msg, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size); + +int send_to_ucode(void *hwdev, struct msg_module *nt_msg, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size); + +void hifc_get_fc_devname(char *devname); +void *hifc_get_hwdev_by_ifname(char *ifname); +enum hifc_init_state hifc_get_init_state_by_ifname(char *ifname); +void hifc_get_all_chip_id(void *id_info); +void hifc_tool_cnt_dec(void); +void hifc_tool_cnt_inc(void); +int hifc_get_device_id(void *hwdev, u16 *dev_id); +int hifc_get_pf_id(void *hwdev, u32 port_id, u32 *pf_id, u32 *isvalid); +bool hifc_is_valid_bar_addr(u64 offset); +void hifc_get_card_info(void *hwdev, void *bufin); +struct hifc_pcidev *hifc_get_pcidev_by_dev_name(char *ifname); +void hifc_get_card_func_info_by_card_name( + const char *chip_name, struct hifc_card_func_info *card_func); + +#endif + diff --git a/drivers/scsi/huawei/hifc/hifc_tool_hw.c b/drivers/scsi/huawei/hifc/hifc_tool_hw.c new file mode 100644 index 0000000000000000000000000000000000000000..f4224f6d70d5b7b17903e0a7fcde1e08abcdd322 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_tool_hw.c @@ -0,0 +1,1010 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include +#include + +#include "hifc_knl_adp.h" +#include "hifc_hw.h" +#include "hifc_hwdev.h" +#include "hifc_hwif.h" +#include "hifc_api_cmd.h" +#include "hifc_mgmt.h" +#include "hifc_cfg.h" +#include "hifc_lld.h" +#include "hifc_sml.h" +#include "hifc_tool.h" + +static atomic_t tool_used_cnt; + +typedef int (*hw_driv_module)(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size); + +struct hw_drv_module_handle { + enum driver_cmd_type driv_cmd_name; + hw_driv_module driv_func; +}; + +u8 hifc_physical_port_id(void *hwdev) +{ + struct hifc_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting physical port id\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.port_id; +} + +int hifc_clp_to_mgmt(void *hwdev, enum hifc_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hifc_hwdev *dev = hwdev; + int err; + + if (!dev) + return -EINVAL; + + if (!dev->chip_present_flag) + return -EPERM; + + if (!hifc_is_hwdev_mod_inited(hwdev, HIFC_HWDEV_CLP_INITED)) + return -EPERM; + + err = hifc_pf_clp_to_mgmt(dev, mod, cmd, buf_in, + in_size, buf_out, out_size); + + return err; +} + +static int get_func_type(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + u16 func_typ; + + func_typ = hifc_func_type(hwdev); + if (!buf_out || *out_size != sizeof(u16)) { + pr_err("Unexpect out buf size from user :%d, expect: %lu\n", + *out_size, sizeof(u16)); + return -EFAULT; + } + *(u16 *)buf_out = func_typ; + return 0; +} + +static int get_func_id(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + u16 func_id; + + if (!buf_out || *out_size != sizeof(u16)) { + pr_err("Unexpect out buf size from user :%d, expect: %lu\n", + *out_size, sizeof(u16)); + return -EFAULT; + } + + func_id = hifc_global_func_id_hw(hwdev); + *(u16 *)buf_out = func_id; + *out_size = sizeof(u16); + return 0; +} + +static int get_drv_version(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + struct drv_version_info *ver_info; + char ver_str[MAX_VER_INFO_LEN] = {0}; + + if (*out_size != sizeof(*ver_info)) { + pr_err("Unexpect out buf size from user :%d, expect: %lu\n", + *out_size, sizeof(*ver_info)); + return -EFAULT; + } + snprintf(ver_str, sizeof(ver_str), "%s %s", + HIFC_DRV_VERSION, __TIME_STR__); + ver_info = (struct drv_version_info *)buf_out; + memcpy(ver_info->ver, ver_str, sizeof(ver_str)); + + return 0; +} + +static int clear_hw_stats(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + return 0; +} + +static int get_hw_stats(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + return 0; +} + +static void hifc_get_chip_fault_stats(const void *hwdev, + u8 *chip_fault_stats, int offset) +{ + int copy_len = offset + MAX_DRV_BUF_SIZE - HIFC_CHIP_FAULT_SIZE; + + if (offset < 0 || offset > HIFC_CHIP_FAULT_SIZE) { + pr_err("Invalid chip offset value: %d\n", + offset); + return; + } + + if (offset + MAX_DRV_BUF_SIZE <= HIFC_CHIP_FAULT_SIZE) + memcpy(chip_fault_stats, + ((struct hifc_hwdev *)hwdev)->chip_fault_stats + offset, + MAX_DRV_BUF_SIZE); + else + memcpy(chip_fault_stats, + ((struct hifc_hwdev *)hwdev)->chip_fault_stats + offset, + copy_len); +} + +static int get_chip_faults_stats(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + int offset = 0; + struct chip_fault_stats *fault_info; + + if (!buf_in || !buf_out || *out_size != sizeof(*fault_info) || + in_size != sizeof(*fault_info)) { + pr_err("Unexpect out buf size from user :%d, expect: %lu\n", + *out_size, sizeof(*fault_info)); + return -EFAULT; + } + fault_info = (struct chip_fault_stats *)buf_in; + offset = fault_info->offset; + fault_info = (struct chip_fault_stats *)buf_out; + hifc_get_chip_fault_stats(hwdev, fault_info->chip_faults, offset); + + return 0; +} + +static int get_chip_id_test(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + return 0; +} + +static int get_single_card_info(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + if (!buf_in || !buf_out || in_size != sizeof(struct card_info) || + *out_size != sizeof(struct card_info)) { + pr_err("Unexpect out buf size from user :%d, expect: %lu\n", + *out_size, sizeof(struct card_info)); + return -EFAULT; + } + + hifc_get_card_info(hwdev, buf_out); + *out_size = in_size; + return 0; +} + +#define GET_FIRMWARE_ACTIVE_STATUS_TIMEOUT 30 +static int get_firmware_active_status(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + u32 loop_cnt = 0; + + while (loop_cnt < GET_FIRMWARE_ACTIVE_STATUS_TIMEOUT) { + if (!hifc_get_mgmt_channel_status(hwdev)) + return 0; + + msleep(1000); + loop_cnt++; + } + if (loop_cnt == GET_FIRMWARE_ACTIVE_STATUS_TIMEOUT) + return -ETIMEDOUT; + + return 0; +} + +static int get_device_id(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + u16 dev_id; + int err; + + if (!buf_out || !buf_in || *out_size != sizeof(u16) || + in_size != sizeof(u16)) { + pr_err("Unexpect out buf size from user :%d, expect: %lu\n", + *out_size, sizeof(u16)); + return -EFAULT; + } + + err = hifc_get_device_id(hwdev, &dev_id); + if (err) + return err; + + *((u32 *)buf_out) = dev_id; + *out_size = in_size; + + return 0; +} + +bool hifc_is_in_host(void) +{ + struct card_node *chip_node; + struct hifc_pcidev *dev; + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hifc_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (test_bit(HIFC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + if (dev->init_state > HIFC_INIT_STATE_PCI_INITED) { + lld_dev_put(); + return true; + } + } + } + lld_dev_put(); + + return false; +} + +static int is_driver_in_vm(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + bool in_host; + + if (!buf_out || (*out_size != sizeof(u8))) + return -EINVAL; + + in_host = hifc_is_in_host(); + if (in_host) + *((u8 *)buf_out) = 0; + else + *((u8 *)buf_out) = 1; + + return 0; +} + +static int get_pf_id(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + struct hifc_pf_info *pf_info; + u32 port_id = 0; + int err; + + if (!buf_out || (*out_size != sizeof(*pf_info)) || + !buf_in || in_size != sizeof(u32)) + return -EINVAL; + + port_id = *((u32 *)buf_in); + pf_info = (struct hifc_pf_info *)buf_out; + err = hifc_get_pf_id(hwdev, port_id, &pf_info->pf_id, + &pf_info->isvalid); + if (err) + return err; + + *out_size = sizeof(*pf_info); + + return 0; +} + +static struct hw_drv_module_handle hw_driv_module_cmd_handle[] = { + {FUNC_TYPE, get_func_type}, + {GET_FUNC_IDX, get_func_id}, + {GET_DRV_VERSION, get_drv_version}, + {GET_HW_STATS, get_hw_stats}, + {CLEAR_HW_STATS, clear_hw_stats}, + {GET_CHIP_FAULT_STATS, get_chip_faults_stats}, + {GET_CHIP_ID, get_chip_id_test}, + {GET_SINGLE_CARD_INFO, get_single_card_info}, + {GET_FIRMWARE_ACTIVE_STATUS, get_firmware_active_status}, + {GET_DEVICE_ID, get_device_id}, + {IS_DRV_IN_VM, is_driver_in_vm}, + {GET_PF_ID, get_pf_id}, +}; + +int send_to_hw_driver(void *hwdev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + int index, num_cmds = sizeof(hw_driv_module_cmd_handle) / + sizeof(hw_driv_module_cmd_handle[0]); + enum driver_cmd_type cmd_type; + int err = 0; + + if (!nt_msg) { + pr_err("Input param invalid!\n"); + return -EINVAL; + } + cmd_type = (enum driver_cmd_type)(nt_msg->msg_formate); + for (index = 0; index < num_cmds; index++) { + if (cmd_type == + hw_driv_module_cmd_handle[index].driv_cmd_name) { + err = hw_driv_module_cmd_handle[index].driv_func + (hwdev, buf_in, + in_size, buf_out, out_size); + break; + } + } + + if (index == num_cmds) + return -EINVAL; + + return err; +} + +typedef int (*sm_module)(void *hwdev, u32 id, u8 instance, + u8 node, struct sm_out_st *buf_out); + +static int sm_rd32(void *hwdev, u32 id, u8 instance, + u8 node, struct sm_out_st *buf_out) +{ + u32 val1; + int ret; + + ret = hifc_sm_ctr_rd32(hwdev, node, instance, id, &val1); + if (ret) { + pr_err("Get sm ctr information (32 bits)failed!\n"); + val1 = 0xffffffff; + } + + buf_out->val1 = val1; + + return ret; +} + +static int sm_rd64_pair(void *hwdev, u32 id, u8 instance, + u8 node, struct sm_out_st *buf_out) +{ + u64 val1 = 0, val2 = 0; + int ret; + + ret = hifc_sm_ctr_rd64_pair(hwdev, node, instance, id, &val1, &val2); + if (ret) { + pr_err("Get sm ctr information (64 bits pair)failed!\n"); + val1 = 0xffffffff; + } + + buf_out->val1 = val1; + buf_out->val2 = val2; + + return ret; +} + +static int sm_rd64(void *hwdev, u32 id, u8 instance, + u8 node, struct sm_out_st *buf_out) +{ + u64 val1; + int ret; + + ret = hifc_sm_ctr_rd64(hwdev, node, instance, id, &val1); + if (ret) { + pr_err("Get sm ctr information (64 bits)failed!\n"); + val1 = 0xffffffff; + } + buf_out->val1 = val1; + + return ret; +} + +struct sm_module_handle { + enum sm_cmd_type sm_cmd_name; + sm_module sm_func; +}; + +static struct sm_module_handle sm_module_cmd_handle[] = { + {SM_CTR_RD32, sm_rd32}, + {SM_CTR_RD64_PAIR, sm_rd64_pair}, + {SM_CTR_RD64, sm_rd64} +}; + +int send_to_sm(void *hwdev, struct msg_module *nt_msg, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct sm_in_st *sm_in = buf_in; + struct sm_out_st *sm_out = buf_out; + u32 msg_formate; + int index, num_cmds = sizeof(sm_module_cmd_handle) / + sizeof(sm_module_cmd_handle[0]); + int ret = 0; + + if ((!nt_msg) || (!buf_in) || (!buf_out) || + (in_size != sizeof(*sm_in)) || + (*out_size != sizeof(*sm_out))) { + pr_err("Input param invalid!\n"); + return -EINVAL; + } + + msg_formate = nt_msg->msg_formate; + for (index = 0; index < num_cmds; index++) { + if (msg_formate == sm_module_cmd_handle[index].sm_cmd_name) + ret = sm_module_cmd_handle[index].sm_func(hwdev, + (u32)sm_in->id, + (u8)sm_in->instance, + (u8)sm_in->node, sm_out); + } + + if (ret) + pr_err("Get sm information fail!\n"); + + *out_size = sizeof(struct sm_out_st); + + return ret; +} + +static u32 get_up_timeout_val(enum hifc_mod_type mod, u8 cmd) +{ +#define UP_UPDATEFW_TIME_OUT_VAL 20000U + if (mod == HIFC_MOD_L2NIC && cmd == NIC_UP_CMD_UPDATE_FW) + return UP_UPDATEFW_TIME_OUT_VAL; + else + return UP_COMP_TIME_OUT_VAL; +} + +static int api_csr_write(void *hwdev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, + u32 *out_size) +{ + struct csr_write_st *csr_write_msg = (struct csr_write_st *)buf_in; + int ret = 0; + u32 rd_len; + u32 rd_addr; + u32 rd_cnt = 0; + u32 offset = 0; + u8 node_id; + u32 i; + u8 *data; + + if (!buf_in || in_size != sizeof(*csr_write_msg)) + return -EINVAL; + + rd_len = csr_write_msg->rd_len; + rd_addr = csr_write_msg->addr; + node_id = (u8)nt_msg->up_cmd.up_db.comm_mod_type; + + if (rd_len % 4) { + pr_err("Csr length must be a multiple of 4\n"); + return -EFAULT; + } + + rd_cnt = rd_len / 4; + data = kzalloc(rd_len, GFP_KERNEL); + if (!data) { + pr_err("No more memory\n"); + return -EFAULT; + } + if (copy_from_user(data, (void *)csr_write_msg->data, rd_len)) { + pr_err("Copy information from user failed\n"); + kfree(data); + return -EFAULT; + } + + for (i = 0; i < rd_cnt; i++) { + ret = hifc_api_csr_wr32(hwdev, node_id, + rd_addr + offset, + *((u32 *)(data + offset))); + if (ret) { + pr_err("Csr wr fail, ret: %d, node_id: %d, csr addr: 0x%08x\n", + ret, rd_addr + offset, node_id); + kfree(data); + return ret; + } + offset += 4; + } + + *out_size = 0; + kfree(data); + return ret; +} + +static int api_csr_read(void *hwdev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + struct up_log_msg_st *up_log_msg = (struct up_log_msg_st *)buf_in; + int ret = 0; + u32 rd_len; + u32 rd_addr; + u32 rd_cnt = 0; + u32 offset = 0; + u8 node_id; + u32 i; + + if (!buf_in || !buf_out || in_size != sizeof(*up_log_msg) || + *out_size != up_log_msg->rd_len) + return -EINVAL; + + rd_len = up_log_msg->rd_len; + rd_addr = up_log_msg->addr; + node_id = (u8)nt_msg->up_cmd.up_db.comm_mod_type; + + rd_cnt = rd_len / 4; + + if (rd_len % 4) + rd_cnt++; + + for (i = 0; i < rd_cnt; i++) { + ret = hifc_api_csr_rd32(hwdev, node_id, + rd_addr + offset, + (u32 *)(((u8 *)buf_out) + offset)); + if (ret) { + pr_err("Csr rd fail, err: %d, node_id: %d, csr addr: 0x%08x\n", + ret, node_id, rd_addr + offset); + return ret; + } + offset += 4; + } + *out_size = rd_len; + + return ret; +} + +int send_to_up(void *hwdev, struct msg_module *nt_msg, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + int ret = 0; + + if ((!nt_msg) || (!hwdev) || (!buf_in) || (!buf_out)) { + pr_err("Input param invalid!\n"); + return -EINVAL; + } + + if ((nt_msg->up_cmd.up_db.up_api_type == API_CMD) || + (nt_msg->up_cmd.up_db.up_api_type == API_CLP)) { + enum hifc_mod_type mod; + u8 cmd; + u32 timeout; + + mod = (enum hifc_mod_type)nt_msg->up_cmd.up_db.comm_mod_type; + cmd = nt_msg->up_cmd.up_db.chipif_cmd; + + timeout = get_up_timeout_val(mod, cmd); + + if (nt_msg->up_cmd.up_db.up_api_type == API_CMD) + ret = hifc_msg_to_mgmt_sync(hwdev, mod, cmd, + buf_in, (u16)in_size, + buf_out, (u16 *)out_size, + timeout); + else + ret = hifc_clp_to_mgmt(hwdev, mod, cmd, + buf_in, (u16)in_size, + buf_out, (u16 *)out_size); + if (ret) { + pr_err("Message to mgmt cpu return fail, mod: %d, cmd: %d\n", + mod, cmd); + return ret; + } + + } else if (nt_msg->up_cmd.up_db.up_api_type == API_CHAIN) { + if (nt_msg->up_cmd.up_db.chipif_cmd == API_CSR_WRITE) { + ret = api_csr_write(hwdev, nt_msg, buf_in, + in_size, buf_out, out_size); + return ret; + } + + ret = api_csr_read(hwdev, nt_msg, buf_in, + in_size, buf_out, out_size); + } + + return ret; +} + +int send_to_ucode(void *hwdev, struct msg_module *nt_msg, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + int ret = 0; + + if ((!nt_msg) || (!hwdev) || (!buf_in)) { + pr_err("Input param invalid!\n"); + return -EINVAL; + } + + if (nt_msg->ucode_cmd.ucode_db.ucode_imm) { + ret = hifc_cmdq_direct_resp + (hwdev, nt_msg->ucode_cmd.ucode_db.cmdq_ack_type, + nt_msg->ucode_cmd.ucode_db.comm_mod_type, + nt_msg->ucode_cmd.ucode_db.ucode_cmd_type, + buf_in, buf_out, 0); + if (ret) + pr_err("Send direct cmdq err: %d!\n", ret); + } else { + ret = hifc_cmdq_detail_resp + (hwdev, nt_msg->ucode_cmd.ucode_db.cmdq_ack_type, + nt_msg->ucode_cmd.ucode_db.comm_mod_type, + nt_msg->ucode_cmd.ucode_db.ucode_cmd_type, + buf_in, buf_out, 0); + if (ret) + pr_err("Send detail cmdq err: %d!\n", ret); + } + + return ret; +} + +void hifc_tool_cnt_inc(void) +{ + atomic_inc(&tool_used_cnt); +} + +void hifc_tool_cnt_dec(void) +{ + atomic_dec(&tool_used_cnt); +} + +static bool __is_pcidev_match_chip_name(const char *ifname, + struct hifc_pcidev *dev, + struct card_node *chip_node, + enum func_type type) +{ + if (!strncmp(chip_node->chip_name, ifname, IFNAMSIZ)) { + if (type == TYPE_UNKNOWN) { + if (dev->init_state < HIFC_INIT_STATE_HW_PART_INITED) + return false; + } else { + if (dev->init_state >= + HIFC_INIT_STATE_HW_PART_INITED && + hifc_func_type(dev->hwdev) != type) + return false; + } + + return true; + } + + return false; +} + +static struct hifc_pcidev *_get_pcidev_by_chip_name(char *ifname, + enum func_type type) +{ + struct card_node *chip_node; + struct hifc_pcidev *dev; + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hifc_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (test_bit(HIFC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + if (__is_pcidev_match_chip_name(ifname, dev, chip_node, + type)) { + lld_dev_put(); + return dev; + } + } + } + + lld_dev_put(); + + return NULL; +} + +static struct hifc_pcidev *hifc_get_pcidev_by_chip_name(char *ifname) +{ + struct hifc_pcidev *dev, *dev_hw_init; + + /* find hw init device first */ + dev_hw_init = _get_pcidev_by_chip_name(ifname, TYPE_UNKNOWN); + if (dev_hw_init) { + if (hifc_func_type(dev_hw_init->hwdev) == TYPE_PPF) + return dev_hw_init; + } + + dev = _get_pcidev_by_chip_name(ifname, TYPE_PPF); + if (dev) { + if (dev_hw_init && (dev_hw_init->init_state >= dev->init_state)) + return dev_hw_init; + + return dev; + } + + dev = _get_pcidev_by_chip_name(ifname, TYPE_PF); + if (dev) { + if (dev_hw_init && (dev_hw_init->init_state >= dev->init_state)) + return dev_hw_init; + + return dev; + } + + return NULL; +} + +static struct hifc_pcidev *hifc_get_pcidev_by_ifname(char *ifname) +{ + struct hifc_pcidev *dev; + + /* support search hwdev by chip name, net device name, + * or fc device name + */ + /* Find pcidev by chip_name first */ + dev = hifc_get_pcidev_by_chip_name(ifname); + if (dev) + return dev; + + /* If ifname not a chip name, + * find pcidev by FC name or netdevice name + */ + return hifc_get_pcidev_by_dev_name(ifname); +} + +void *hifc_get_hwdev_by_ifname(char *ifname) +{ + struct hifc_pcidev *dev; + + if (!ifname) { + pr_err("Input param invalid!\n"); + return NULL; + } + + dev = hifc_get_pcidev_by_ifname(ifname); + if (dev) + return dev->hwdev; + + return NULL; +} + +enum hifc_init_state hifc_get_init_state_by_ifname(char *ifname) +{ + struct hifc_pcidev *dev; + + if (!ifname) { + pr_err("Input param invalid!\n"); + return HIFC_INIT_STATE_NONE; + } + dev = hifc_get_pcidev_by_ifname(ifname); + if (dev) + return dev->init_state; + + pr_err("Can not get device %s\n", ifname); + + return HIFC_INIT_STATE_NONE; +} + +void hifc_get_fc_devname(char *devname) +{ + struct card_node *chip_node; + struct hifc_pcidev *dev; + + if (!devname) { + pr_err("Input param invalid!\n"); + return; + } + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hifc_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (test_bit(HIFC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + if (dev->init_state < HIFC_INIT_STATE_ALL_INITED) + continue; + + if (dev->uld_dev) { + strlcpy(devname, (char *)dev->uld_dev_name, + IFNAMSIZ); + lld_dev_put(); + return; + } + } + } + lld_dev_put(); +} + +void hifc_get_all_chip_id(void *id_info) +{ + struct nic_card_id *card_id = (struct nic_card_id *)id_info; + struct card_node *chip_node; + int i = 0; + int id, err; + + if (!card_id) { + pr_err("Input param invalid!\n"); + return; + } + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hifc_chip_list, node) { + err = sscanf(chip_node->chip_name, HIFC_CHIP_NAME "%d", &id); + if (err < 0) + pr_err("Failed to get hifc id\n"); + + card_id->id[i] = id; + i++; + } + lld_dev_put(); + card_id->num = i; +} + +static struct card_node *hifc_get_chip_node_by_hwdev(const void *hwdev) +{ + struct card_node *chip_node = NULL; + struct card_node *node_tmp = NULL; + struct hifc_pcidev *dev; + + if (!hwdev) + return NULL; + + lld_dev_hold(); + list_for_each_entry(node_tmp, &g_hifc_chip_list, node) { + if (!chip_node) { + list_for_each_entry(dev, &node_tmp->func_list, node) { + if (test_bit(HIFC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + if (dev->hwdev == hwdev) { + chip_node = node_tmp; + break; + } + } + } + } + + lld_dev_put(); + + return chip_node; +} + +int hifc_get_device_id(void *hwdev, u16 *dev_id) +{ + struct card_node *chip_node = NULL; + struct hifc_pcidev *dev; + u16 vendor_id = 0; + u16 device_id = 0; + + if ((!dev_id) || (!hwdev)) { + pr_err("Input param invalid!\n"); + return -ENODEV; + } + chip_node = hifc_get_chip_node_by_hwdev(hwdev); + if (!chip_node) + return -ENODEV; + + lld_dev_hold(); + list_for_each_entry(dev, &chip_node->func_list, node) { + if (test_bit(HIFC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + pci_read_config_word(dev->pcidev, 0, &vendor_id); + if (vendor_id == HIFC_PCI_VENDOR_ID) { + pci_read_config_word(dev->pcidev, 2, &device_id); + break; + } + } + lld_dev_put(); + *dev_id = device_id; + + return 0; +} + +int hifc_get_pf_id(void *hwdev, u32 port_id, u32 *pf_id, u32 *isvalid) +{ + struct card_node *chip_node = NULL; + struct hifc_pcidev *dev; + + if ((!isvalid) || (!pf_id) || (!hwdev)) { + pr_err("Input param invalid!\n"); + return -ENODEV; + } + chip_node = hifc_get_chip_node_by_hwdev(hwdev); + if (!chip_node) + return -ENODEV; + + lld_dev_hold(); + list_for_each_entry(dev, &chip_node->func_list, node) { + if (hifc_physical_port_id(dev->hwdev) == port_id) { + *pf_id = hifc_global_func_id(dev->hwdev); + *isvalid = 1; + break; + } + } + lld_dev_put(); + + return 0; +} + +bool hifc_is_valid_bar_addr(u64 offset) +{ + struct card_node *chip_node = NULL; + struct hifc_pcidev *dev; + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hifc_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (test_bit(HIFC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + if (offset == pci_resource_start(dev->pcidev, 0)) { + lld_dev_put(); + return true; + } + } + } + lld_dev_put(); + + return false; +} + +void hifc_get_card_func_info_by_card_name( + const char *chip_name, struct hifc_card_func_info *card_func) +{ + struct card_node *chip_node = NULL; + struct hifc_pcidev *dev; + struct func_pdev_info *pdev_info; + + if ((!card_func) || (!chip_name)) { + pr_err("Input param invalid!\n"); + return; + } + card_func->num_pf = 0; + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hifc_chip_list, node) { + if (strncmp(chip_node->chip_name, chip_name, IFNAMSIZ)) + continue; + + list_for_each_entry(dev, &chip_node->func_list, node) { + if (hifc_func_type(dev->hwdev) == TYPE_VF) + continue; + + if (test_bit(HIFC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + pdev_info = &card_func->pdev_info[card_func->num_pf]; + pdev_info->bar0_size = pci_resource_len(dev->pcidev, 0); + pdev_info->bar0_phy_addr = + pci_resource_start(dev->pcidev, 0); + + card_func->num_pf++; + if (card_func->num_pf >= MAX_SIZE) + break; + } + } + + lld_dev_put(); +} + +static bool __is_func_valid(struct hifc_pcidev *dev) +{ + if (test_bit(HIFC_FUNC_IN_REMOVE, &dev->flag)) + return false; + + if (dev->init_state < HIFC_INIT_STATE_HWDEV_INITED) + return false; + + return true; +} + +void hifc_get_card_info(void *hwdev, void *bufin) +{ + struct card_node *chip_node = NULL; + struct card_info *info = (struct card_info *)bufin; + struct hifc_pcidev *dev; + u32 idx = 0; + + if ((!bufin) || (!hwdev)) { + pr_err("Input param invalid!\n"); + return; + } + info->pf_num = 0; + + chip_node = hifc_get_chip_node_by_hwdev(hwdev); + if (!chip_node) + return; + + lld_dev_hold(); + list_for_each_entry(dev, &chip_node->func_list, node) { + if (!__is_func_valid(dev)) + continue; + + strlcpy(info->pf[idx].name, dev->uld_dev_name, IFNAMSIZ); + info->pf[idx].pf_type = (u32)BIT(SERVICE_T_FC); + strlcpy(info->pf[idx].bus_info, pci_name(dev->pcidev), + sizeof(info->pf[idx].bus_info)); + info->pf_num++; + idx++; + } + lld_dev_put(); +} diff --git a/drivers/scsi/huawei/hifc/hifc_utils.c b/drivers/scsi/huawei/hifc/hifc_utils.c new file mode 100644 index 0000000000000000000000000000000000000000..c2c6ef1fe120d2e92e11a34a8d0c62a94ca814d1 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_utils.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#include "hifc_utils.h" +#include "unf_log.h" +#include "unf_common.h" + +void hifc_cpu_to_big64(void *v_addr, unsigned int size) +{ + unsigned int index = 0; + unsigned int cnt = 0; + unsigned long long *temp = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + v_addr, dump_stack(); return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + (size % HIFC_QWORD_BYTE) == 0, dump_stack(); return); + + temp = (unsigned long long *)v_addr; + cnt = HIFC_SHIFT_TO_U64(size); + + for (index = 0; index < cnt; index++) { + *temp = cpu_to_be64(*temp); + temp++; + } +} + +void hifc_big_to_cpu64(void *v_addr, unsigned int size) +{ + unsigned int index = 0; + unsigned int cnt = 0; + unsigned long long *tmp = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + v_addr, dump_stack(); return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + (size % HIFC_QWORD_BYTE) == 0, dump_stack(); return); + + tmp = (unsigned long long *)v_addr; + cnt = HIFC_SHIFT_TO_U64(size); + + for (index = 0; index < cnt; index++) { + *tmp = be64_to_cpu(*tmp); + tmp++; + } +} + +void hifc_cpu_to_big32(void *v_addr, unsigned int size) +{ + unf_cpu_to_big_end(v_addr, size); +} + +void hifc_big_to_cpu32(void *v_addr, unsigned int size) +{ + if (size % UNF_BYTES_OF_DWORD) + dump_stack(); + unf_big_end_to_cpu(v_addr, size); +} + +unsigned int hifc_log2n(unsigned int val) +{ + unsigned int result = 0; + unsigned int logn = (val >> 1); + + while (logn) { + logn >>= 1; + result++; + } + return result; +} diff --git a/drivers/scsi/huawei/hifc/hifc_utils.h b/drivers/scsi/huawei/hifc/hifc_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..548e5c9bb95d758c31a25304cb610472241a7fb5 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_utils.h @@ -0,0 +1,361 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef __HIFC_UTILS_H__ +#define __HIFC_UTILS_H__ + +#define UNF_ZERO 0 +#define HIFC_BIT(n) (0x1UL << (n)) +#define HIFC_BIT_0 HIFC_BIT(0) +#define HIFC_BIT_1 HIFC_BIT(1) +#define HIFC_BIT_2 HIFC_BIT(2) +#define HIFC_BIT_3 HIFC_BIT(3) +#define HIFC_BIT_4 HIFC_BIT(4) +#define HIFC_BIT_5 HIFC_BIT(5) +#define HIFC_BIT_6 HIFC_BIT(6) +#define HIFC_BIT_7 HIFC_BIT(7) +#define HIFC_BIT_8 HIFC_BIT(8) +#define HIFC_BIT_9 HIFC_BIT(9) +#define HIFC_BIT_10 HIFC_BIT(10) +#define HIFC_BIT_11 HIFC_BIT(11) +#define HIFC_BIT_12 HIFC_BIT(12) +#define HIFC_BIT_13 HIFC_BIT(13) +#define HIFC_BIT_14 HIFC_BIT(14) +#define HIFC_BIT_15 HIFC_BIT(15) +#define HIFC_BIT_16 HIFC_BIT(16) +#define HIFC_BIT_17 HIFC_BIT(17) +#define HIFC_BIT_18 HIFC_BIT(18) +#define HIFC_BIT_19 HIFC_BIT(19) +#define HIFC_BIT_20 HIFC_BIT(20) +#define HIFC_BIT_21 HIFC_BIT(21) +#define HIFC_BIT_22 HIFC_BIT(22) +#define HIFC_BIT_23 HIFC_BIT(23) +#define HIFC_BIT_24 HIFC_BIT(24) +#define HIFC_BIT_25 HIFC_BIT(25) +#define HIFC_BIT_26 HIFC_BIT(26) +#define HIFC_BIT_27 HIFC_BIT(27) +#define HIFC_BIT_28 HIFC_BIT(28) +#define HIFC_BIT_29 HIFC_BIT(29) +#define HIFC_BIT_30 HIFC_BIT(30) +#define HIFC_BIT_31 HIFC_BIT(31) + +#define HIFC_GET_BITS(data, mask) ((data) & (mask)) /* Obtains the bit */ +#define HIFC_SET_BITS(data, mask) ((data) |= (mask)) /* set the bit */ +#define HIFC_CLR_BITS(data, mask) ((data) &= ~(mask)) /* clear the bit */ + +/* Byte alignment */ +#define HIFC_ALIGN_N(n) __attribute__((__packed, __aligned(n))) +#define HIFC_ALIGN_1 HIFC_ALIGN_N(1) +#define HIFC_ALIGN_2 HIFC_ALIGN_N(2) +#define HIFC_ALIGN_4 HIFC_ALIGN_N(4) +#define HIFC_ALIGN_8 HIFC_ALIGN_N(8) + +#define HIFC_ADJUST_ALIGN_4(n) ((n) - (n) % 4) + +#define HIFC_LSB(x) ((unsigned char)(x)) +#define HIFC_MSB(x) ((unsigned char)((unsigned short)(x) >> 8)) + +#define HIFC_LSW(x) ((unsigned short)(x)) +#define HIFC_MSW(x) ((unsigned short)((unsigned int)(x) >> 16)) + +#define HIFC_LSD(x) ((unsigned int)((unsigned long long)(x))) +#define HIFC_MSD(x) ((unsigned int)((((unsigned long long)(x)) >> 16) >> 16)) + +#define HIFC_BYTES_TO_QW_NUM(x) ((x) >> 3) +#define HIFC_BYTES_TO_DW_NUM(x) ((x) >> 2) + +#define UNF_GET_SHIFTMASK(__src, __shift, __mask) \ + (((__src) & (__mask)) >> (__shift)) +#define UNF_FC_SET_SHIFTMASK(__des, __val, __shift, __mask)\ + ((__des) = \ + (((__des) & ~(__mask)) | (((__val) << (__shift)) & (__mask)))) + +/* D_ID */ +#define UNF_FC_HEADER_DID_MASK 0x00FFFFFF +#define UNF_FC_HEADER_DID_SHIFT 0 +#define UNF_FC_HEADER_DID_DWORD 0 +#define UNF_GET_FC_HEADER_DID(__pfcheader)\ + UNF_GET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_DID_DWORD],\ + UNF_FC_HEADER_DID_SHIFT, UNF_FC_HEADER_DID_MASK) + +#define UNF_SET_FC_HEADER_DID(__pfcheader, __val)\ + UNF_FC_SET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_DID_DWORD],\ + __val, UNF_FC_HEADER_DID_SHIFT, UNF_FC_HEADER_DID_MASK) + +/* R_CTL */ +#define UNF_FC_HEADER_RCTL_MASK 0xFF000000 +#define UNF_FC_HEADER_RCTL_SHIFT 24 +#define UNF_FC_HEADER_RCTL_DWORD 0 +#define UNF_GET_FC_HEADER_RCTL(__pfcheader)\ + UNF_GET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_RCTL_DWORD],\ + UNF_FC_HEADER_RCTL_SHIFT, UNF_FC_HEADER_RCTL_MASK) + +#define UNF_SET_FC_HEADER_RCTL(__pfcheader, __val)\ + UNF_FC_SET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_RCTL_DWORD],\ + __val, UNF_FC_HEADER_RCTL_SHIFT, UNF_FC_HEADER_RCTL_MASK) + +/* S_ID */ +#define UNF_FC_HEADER_SID_MASK 0x00FFFFFF +#define UNF_FC_HEADER_SID_SHIFT 0 +#define UNF_FC_HEADER_SID_DWORD 1 +#define UNF_GET_FC_HEADER_SID(__pfcheader)\ + UNF_GET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_SID_DWORD],\ + UNF_FC_HEADER_SID_SHIFT, UNF_FC_HEADER_SID_MASK) +#define UNF_SET_FC_HEADER_SID(__pfcheader, __val)\ + UNF_FC_SET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_SID_DWORD],\ + __val, UNF_FC_HEADER_SID_SHIFT, UNF_FC_HEADER_SID_MASK) + +/* CS_CTL */ +#define UNF_FC_HEADER_CS_CTL_MASK 0xFF000000 +#define UNF_FC_HEADER_CS_CTL_SHIFT 24 +#define UNF_FC_HEADER_CS_CTL_DWORD 1 +#define UNF_GET_FC_HEADER_CS_CTL(__pfcheader)\ + UNF_GET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_CS_CTL_DWORD],\ + UNF_FC_HEADER_CS_CTL_SHIFT, UNF_FC_HEADER_CS_CTL_MASK) + +#define UNF_SET_FC_HEADER_CS_CTL(__pfcheader, __val)\ + UNF_FC_SET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_CS_CTL_DWORD],\ + __val, UNF_FC_HEADER_CS_CTL_SHIFT, UNF_FC_HEADER_CS_CTL_MASK) + +/* F_CTL */ +#define UNF_FC_HEADER_FCTL_MASK 0x00FFFFFF +#define UNF_FC_HEADER_FCTL_SHIFT 0 +#define UNF_FC_HEADER_FCTL_DWORD 2 +#define UNF_GET_FC_HEADER_FCTL(__pfcheader)\ + UNF_GET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_FCTL_DWORD],\ + UNF_FC_HEADER_FCTL_SHIFT, UNF_FC_HEADER_FCTL_MASK) +#define UNF_SET_FC_HEADER_FCTL(__pfcheader, __val)\ + UNF_FC_SET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_FCTL_DWORD],\ + __val, UNF_FC_HEADER_FCTL_SHIFT, UNF_FC_HEADER_FCTL_MASK) + +/* TYPE */ +#define UNF_FC_HEADER_TYPE_MASK 0xFF000000 +#define UNF_FC_HEADER_TYPE_SHIFT 24 +#define UNF_FC_HEADER_TYPE_DWORD 2 +#define UNF_GET_FC_HEADER_TYPE(__pfcheader)\ + UNF_GET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_TYPE_DWORD],\ + UNF_FC_HEADER_TYPE_SHIFT, UNF_FC_HEADER_TYPE_MASK) + +#define UNF_SET_FC_HEADER_TYPE(__pfcheader, __val)\ + UNF_FC_SET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_TYPE_DWORD],\ + __val, UNF_FC_HEADER_TYPE_SHIFT, UNF_FC_HEADER_TYPE_MASK) + +/* SEQ_CNT */ +#define UNF_FC_HEADER_SEQ_CNT_MASK 0x0000FFFF +#define UNF_FC_HEADER_SEQ_CNT_SHIFT 0 +#define UNF_FC_HEADER_SEQ_CNT_DWORD 3 +#define UNF_GET_FC_HEADER_SEQ_CNT(__pfcheader)\ + UNF_GET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_SEQ_CNT_DWORD],\ + UNF_FC_HEADER_SEQ_CNT_SHIFT, UNF_FC_HEADER_SEQ_CNT_MASK) + +#define UNF_SET_FC_HEADER_SEQ_CNT(__pfcheader, __val)\ + UNF_FC_SET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_SEQ_CNT_DWORD],\ + __val, UNF_FC_HEADER_SEQ_CNT_SHIFT, UNF_FC_HEADER_SEQ_CNT_MASK) + +/* DF_CTL */ +#define UNF_FC_HEADER_DF_CTL_MASK 0x00FF0000 +#define UNF_FC_HEADER_DF_CTL_SHIFT 16 +#define UNF_FC_HEADER_DF_CTL_DWORD 3 +#define UNF_GET_FC_HEADER_DF_CTL(__pfcheader)\ + UNF_GET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_DF_CTL_DWORD],\ + UNF_FC_HEADER_DF_CTL_SHIFT, UNF_FC_HEADER_DF_CTL_MASK) +#define UNF_SET_FC_HEADER_DF_CTL(__pfcheader, __val)\ + UNF_FC_SET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_DF_CTL_DWORD],\ + __val, UNF_FC_HEADER_DF_CTL_SHIFT, UNF_FC_HEADER_DF_CTL_MASK) + +/* SEQ_ID */ +#define UNF_FC_HEADER_SEQ_ID_MASK 0xFF000000 +#define UNF_FC_HEADER_SEQ_ID_SHIFT 24 +#define UNF_FC_HEADER_SEQ_ID_DWORD 3 +#define UNF_GET_FC_HEADER_SEQ_ID(__pfcheader)\ + UNF_GET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_SEQ_ID_DWORD],\ + UNF_FC_HEADER_SEQ_ID_SHIFT, UNF_FC_HEADER_SEQ_ID_MASK) +#define UNF_SET_FC_HEADER_SEQ_ID(__pfcheader, __val)\ + UNF_FC_SET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_SEQ_ID_DWORD],\ + __val, UNF_FC_HEADER_SEQ_ID_SHIFT, UNF_FC_HEADER_SEQ_ID_MASK) + +/* RX_ID */ +#define UNF_FC_HEADER_RXID_MASK 0x0000FFFF +#define UNF_FC_HEADER_RXID_SHIFT 0 +#define UNF_FC_HEADER_RXID_DWORD 4 +#define UNF_GET_FC_HEADER_RXID(__pfcheader)\ + UNF_GET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_RXID_DWORD],\ + UNF_FC_HEADER_RXID_SHIFT, UNF_FC_HEADER_RXID_MASK) +#define UNF_SET_FC_HEADER_RXID(__pfcheader, __val)\ + UNF_FC_SET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_RXID_DWORD],\ + __val, UNF_FC_HEADER_RXID_SHIFT, UNF_FC_HEADER_RXID_MASK) + +/* OX_ID */ +#define UNF_FC_HEADER_OXID_MASK 0xFFFF0000 +#define UNF_FC_HEADER_OXID_SHIFT 16 +#define UNF_FC_HEADER_OXID_DWORD 4 +#define UNF_GET_FC_HEADER_OXID(__pfcheader)\ + ((unsigned short)UNF_GET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_OXID_DWORD],\ + UNF_FC_HEADER_OXID_SHIFT\ + , UNF_FC_HEADER_OXID_MASK)) + +#define UNF_SET_FC_HEADER_OXID(__pfcheader, __val)\ + (UNF_FC_SET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_OXID_DWORD],\ + __val, UNF_FC_HEADER_OXID_SHIFT, UNF_FC_HEADER_OXID_MASK)) + +/* PRLI PARAM 3 */ +#define HIFC_PRLI_PARAM_WXFER_ENABLE_MASK 0x00000001 +#define HIFC_PRLI_PARAM_WXFER_ENABLE_SHIFT 0 +#define HIFC_PRLI_PARAM_WXFER_DWORD 3 +#define HIFC_GET_PRLI_PARAM_WXFER(__pfcheader)\ + UNF_GET_SHIFTMASK(\ + ((unsigned int *)(void *)(__pfcheader))[HIFC_PRLI_PARAM_WXFER_DWORD],\ + HIFC_PRLI_PARAM_WXFER_ENABLE_SHIFT, HIFC_PRLI_PARAM_WXFER_ENABLE_MASK) + +#define HIFC_PRLI_PARAM_CONF_ENABLE_MASK 0x00000080 +#define HIFC_PRLI_PARAM_CONF_ENABLE_SHIFT 7 +#define HIFC_PRLI_PARAM_CONF_DWORD 3 +#define HIFC_GET_PRLI_PARAM_CONF(__pfcheader)\ + UNF_GET_SHIFTMASK(\ + ((unsigned int *)(void *)(__pfcheader))[HIFC_PRLI_PARAM_CONF_DWORD],\ + HIFC_PRLI_PARAM_CONF_ENABLE_SHIFT, HIFC_PRLI_PARAM_CONF_ENABLE_MASK) + +#define HIFC_PRLI_PARAM_REC_ENABLE_MASK 0x00000400 +#define HIFC_PRLI_PARAM_REC_ENABLE_SHIFT 10 +#define HIFC_PRLI_PARAM_CONF_REC 3 +#define HIFC_GET_PRLI_PARAM_REC(__pfcheader)\ + UNF_GET_SHIFTMASK(\ + ((unsigned int *)(void *)(__pfcheader))[HIFC_PRLI_PARAM_CONF_REC],\ + HIFC_PRLI_PARAM_REC_ENABLE_SHIFT, HIFC_PRLI_PARAM_REC_ENABLE_MASK) + +#define HIFC_WQE_TYPE_MASK 0x000000FF +#define HIFC_WQE_TYPE_SHIFT 0 +#define HIFC_WQE_TYPE_DWORD 0 +#define HIFC_GET_WQE_TYPE_BE(__pfcheader)\ + UNF_GET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[HIFC_WQE_TYPE_DWORD],\ + HIFC_WQE_TYPE_SHIFT, HIFC_WQE_TYPE_MASK) + +#define HIFC_MAKE_64BIT_ADDR(__high32, __low32) \ + (unsigned long long)(((unsigned long long)(__high32) << 32) |\ + (unsigned long long)(__low32)) + +#define HIFC_TRACE(log_id, log_att, log_level, fmt, ...) \ + UNF_TRACE(log_id, log_att, log_level, fmt, ##__VA_ARGS__) + +/* Valid check */ +#define HIFC_CHECK(log_id, condition, fail_do) \ + do { \ + if (unlikely(!(condition))) { \ + HIFC_TRACE((log_id), UNF_LOG_IO_ATT, UNF_ERR, \ + "[err]Function:%s parameter check[%s] invalid",\ + __func__, #condition); \ + fail_do; \ + } \ + } while (0) + +#define PRINT_IN_MBOX(dbg_level, data, count) \ + do { \ + unsigned int index = 0; \ + if ((dbg_level) <= unf_dbg_level) { \ + printk("HIFC send inbound mailbox: "); \ + for (index = 0; index < (count) / 4; index++) { \ + printk("%08x ", \ + (((unsigned int *)(data))[index]));\ + } \ + printk("\n"); \ + } \ + } while (0) +#define PRINT_OUT_MBOX(dbg_level, data, count) \ + do { \ + unsigned int index = 0; \ + if ((dbg_level) <= unf_dbg_level) { \ + printk("HIFC receive outbound mailbox: "); \ + for (index = 0; index < (count) / 4; index++) { \ + printk("%08x ",\ + (((unsigned int *)(data))[index]));\ + } \ + printk("\n"); \ + } \ + } while (0) + +#define PRINT_INBOUND_IOB(dbg_level, data, count) \ + do { \ + unsigned int index = 0; \ + if ((dbg_level) <= unf_dbg_level) { \ + printk("HIFC send inbound iob: "); \ + for (index = 0; index < (count) / 4; index++) { \ + printk("%08x ",\ + (((unsigned int *)(data))[index]));\ + } \ + printk("\n"); \ + } \ + } while (0) + +#define PRINT_OUTBOUND_IOB(dbg_level, data, count) \ + do { \ + unsigned int index = 0; \ + if ((dbg_level) <= unf_dbg_level) { \ + printk("HIFC receive outbound iob: "); \ + for (index = 0; index < (count) / 4; index++) { \ + printk("%08x ",\ + (((unsigned int *)(data))[index]));\ + } \ + printk("\n"); \ + } \ + } while (0) +#define HIFC_REFERNCE_VAR(ref, cmp, ret) + +#define RETURN_ERROR_S32 (-1) +#define UNF_RETURN_ERROR_S32 (-1) + +enum HIFC_HBA_ERR_STAT_E { + HIFC_STAT_CTXT_FLUSH_DONE = 0, + HIFC_STAT_SQ_WAIT_EMPTY, + HIFC_STAT_LAST_GS_SCQE, + HIFC_STAT_SQ_POOL_EMPTY, + HIFC_STAT_PARENT_IO_FLUSHED, + HIFC_STAT_ROOT_IO_FLUSHED, /* 5 */ + HIFC_STAT_ROOT_SQ_FULL, + HIFC_STAT_ELS_RSP_EXCH_REUSE, + HIFC_STAT_GS_RSP_EXCH_REUSE, + HIFC_STAT_SQ_IO_BUFFER_CLEARED, + HIFC_STAT_PARENT_SQ_NOT_OFFLOADED, /* 10 */ + HIFC_STAT_PARENT_SQ_QUEUE_DELAYED_WORK, + HIFC_STAT_PARENT_SQ_INVALID_CACHED_ID, + HIFC_HBA_STAT_BUTT +}; + +#define HIFC_DWORD_BYTE 4 +#define HIFC_QWORD_BYTE 8 +#define HIFC_SHIFT_TO_U64(x) ((x) >> 3) +#define HIFC_SHIFT_TO_U32(x) ((x) >> 2) + +void hifc_cpu_to_big64(void *v_addr, unsigned int size); +void hifc_big_to_cpu64(void *v_addr, unsigned int size); +void hifc_cpu_to_big32(void *v_addr, unsigned int size); +void hifc_big_to_cpu32(void *v_addr, unsigned int size); +unsigned int hifc_log2n(unsigned int val); + +#endif /* __HIFC_UTILS_H__ */ + diff --git a/drivers/scsi/huawei/hifc/hifc_wq.c b/drivers/scsi/huawei/hifc/hifc_wq.c new file mode 100644 index 0000000000000000000000000000000000000000..4e926d140b2cee2035949b498764e2b9257c1cd7 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_wq.c @@ -0,0 +1,624 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hifc_knl_adp.h" +#include "hifc_hw.h" +#include "hifc_hwif.h" +#include "hifc_wq.h" + +#define WQS_MAX_NUM_BLOCKS 128 +#define WQS_FREE_BLOCKS_SIZE(wqs) (WQS_MAX_NUM_BLOCKS * \ + sizeof((wqs)->free_blocks[0])) + +static void wqs_return_block(struct hifc_wqs *wqs, u32 page_idx, u32 block_idx) +{ + u32 pos; + + spin_lock(&wqs->alloc_blocks_lock); + + wqs->num_free_blks++; + + pos = wqs->return_blk_pos++; + pos &= WQS_MAX_NUM_BLOCKS - 1; + + wqs->free_blocks[pos].page_idx = page_idx; + wqs->free_blocks[pos].block_idx = block_idx; + + spin_unlock(&wqs->alloc_blocks_lock); +} + +static int wqs_next_block(struct hifc_wqs *wqs, u32 *page_idx, + u32 *block_idx) +{ + u32 pos; + + spin_lock(&wqs->alloc_blocks_lock); + + if (wqs->num_free_blks <= 0) { + spin_unlock(&wqs->alloc_blocks_lock); + return -ENOMEM; + } + wqs->num_free_blks--; + + pos = wqs->alloc_blk_pos++; + pos &= WQS_MAX_NUM_BLOCKS - 1; + + *page_idx = wqs->free_blocks[pos].page_idx; + *block_idx = wqs->free_blocks[pos].block_idx; + + wqs->free_blocks[pos].page_idx = 0xFFFFFFFF; + wqs->free_blocks[pos].block_idx = 0xFFFFFFFF; + + spin_unlock(&wqs->alloc_blocks_lock); + + return 0; +} + +static int queue_alloc_page(void *handle, u64 **vaddr, u64 *paddr, + u64 **shadow_vaddr, u64 page_sz) +{ + dma_addr_t dma_addr = 0; + + *vaddr = dma_zalloc_coherent(handle, page_sz, &dma_addr, + GFP_KERNEL); + if (!*vaddr) { + sdk_err(handle, "Failed to allocate dma to wqs page\n"); + return -ENOMEM; + } + + if (!ADDR_4K_ALIGNED(dma_addr)) { + sdk_err(handle, "Cla is not 4k aligned!\n"); + goto shadow_vaddr_err; + } + + *paddr = (u64)dma_addr; + + /* use vzalloc for big mem, shadow_vaddr only used at initialization */ + *shadow_vaddr = vzalloc(page_sz); + if (!*shadow_vaddr) { + sdk_err(handle, "Failed to allocate shadow page vaddr\n"); + goto shadow_vaddr_err; + } + + return 0; + +shadow_vaddr_err: + dma_free_coherent(handle, page_sz, *vaddr, dma_addr); + return -ENOMEM; +} + +static int wqs_allocate_page(struct hifc_wqs *wqs, u32 page_idx) +{ + return queue_alloc_page(wqs->dev_hdl, &wqs->page_vaddr[page_idx], + &wqs->page_paddr[page_idx], + &wqs->shadow_page_vaddr[page_idx], + WQS_PAGE_SIZE); +} + +static void wqs_free_page(struct hifc_wqs *wqs, u32 page_idx) +{ + dma_free_coherent(wqs->dev_hdl, WQS_PAGE_SIZE, + wqs->page_vaddr[page_idx], + (dma_addr_t)wqs->page_paddr[page_idx]); + vfree(wqs->shadow_page_vaddr[page_idx]); +} + +static int cmdq_allocate_page(struct hifc_cmdq_pages *cmdq_pages) +{ + return queue_alloc_page(cmdq_pages->dev_hdl, + &cmdq_pages->cmdq_page_vaddr, + &cmdq_pages->cmdq_page_paddr, + &cmdq_pages->cmdq_shadow_page_vaddr, + CMDQ_PAGE_SIZE); +} + +static void cmdq_free_page(struct hifc_cmdq_pages *cmdq_pages) +{ + dma_free_coherent(cmdq_pages->dev_hdl, CMDQ_PAGE_SIZE, + cmdq_pages->cmdq_page_vaddr, + (dma_addr_t)cmdq_pages->cmdq_page_paddr); + vfree(cmdq_pages->cmdq_shadow_page_vaddr); +} + +static int alloc_wqes_shadow(struct hifc_wq *wq) +{ + u64 size; + + /* if wq->max_wqe_size == 0, we don't need to alloc shadow */ + if (wq->max_wqe_size <= wq->wqebb_size) + return 0; + + size = (u64)wq->num_q_pages * wq->max_wqe_size; + wq->shadow_wqe = kzalloc(size, GFP_KERNEL); + if (!wq->shadow_wqe) { + pr_err("Failed to allocate shadow wqe\n"); + return -ENOMEM; + } + + size = wq->num_q_pages * sizeof(wq->prod_idx); + wq->shadow_idx = kzalloc(size, GFP_KERNEL); + if (!wq->shadow_idx) { + pr_err("Failed to allocate shadow index\n"); + goto shadow_idx_err; + } + + return 0; + +shadow_idx_err: + kfree(wq->shadow_wqe); + return -ENOMEM; +} + +static void free_wqes_shadow(struct hifc_wq *wq) +{ + if (wq->max_wqe_size <= wq->wqebb_size) + return; + + kfree(wq->shadow_idx); + kfree(wq->shadow_wqe); +} + +static void free_wq_pages(void *handle, struct hifc_wq *wq, + u32 num_q_pages) +{ + u32 i; + + for (i = 0; i < num_q_pages; i++) + hifc_dma_free_coherent_align(handle, &wq->mem_align[i]); + + free_wqes_shadow(wq); + + wq->block_vaddr = NULL; + wq->shadow_block_vaddr = NULL; + + kfree(wq->mem_align); +} + +static int alloc_wq_pages(void *dev_hdl, struct hifc_wq *wq) +{ + struct hifc_dma_addr_align *mem_align; + u64 *vaddr, *paddr; + u32 i, num_q_pages; + int err; + + vaddr = wq->shadow_block_vaddr; + paddr = wq->block_vaddr; + + num_q_pages = ALIGN(WQ_SIZE(wq), wq->wq_page_size) / wq->wq_page_size; + if (num_q_pages > WQ_MAX_PAGES) { + sdk_err(dev_hdl, "Number(%d) wq pages exceeds the limit\n", + num_q_pages); + return -EINVAL; + } + + if (num_q_pages & (num_q_pages - 1)) { + sdk_err(dev_hdl, "Wq num(%d) q pages must be power of 2\n", + num_q_pages); + return -EINVAL; + } + + wq->num_q_pages = num_q_pages; + + err = alloc_wqes_shadow(wq); + if (err) { + sdk_err(dev_hdl, "Failed to allocate wqe shadow\n"); + return err; + } + + wq->mem_align = kcalloc(wq->num_q_pages, sizeof(*wq->mem_align), + GFP_KERNEL); + if (!wq->mem_align) { + sdk_err(dev_hdl, "Failed to allocate mem_align\n"); + free_wqes_shadow(wq); + return -ENOMEM; + } + + for (i = 0; i < num_q_pages; i++) { + mem_align = &wq->mem_align[i]; + err = hifc_dma_zalloc_coherent_align(dev_hdl, wq->wq_page_size, + wq->wq_page_size, + GFP_KERNEL, mem_align); + if (err) { + sdk_err(dev_hdl, "Failed to allocate wq page\n"); + goto alloc_wq_pages_err; + } + + *paddr = cpu_to_be64(mem_align->align_paddr); + *vaddr = (u64)mem_align->align_vaddr; + + paddr++; + vaddr++; + } + + return 0; + +alloc_wq_pages_err: + free_wq_pages(dev_hdl, wq, i); + + return -ENOMEM; +} + +int hifc_wq_allocate(struct hifc_wqs *wqs, struct hifc_wq *wq, + u32 wqebb_size, u32 wq_page_size, u16 q_depth, + u32 max_wqe_size) +{ + u32 num_wqebbs_per_page; + int err; + + if (wqebb_size == 0) { + sdk_err(wqs->dev_hdl, "Wqebb_size must be >0\n"); + return -EINVAL; + } + + if (q_depth & (q_depth - 1)) { + sdk_err(wqs->dev_hdl, "Wq q_depth(%d) isn't power of 2\n", + q_depth); + return -EINVAL; + } + + if (wq_page_size & (wq_page_size - 1)) { + sdk_err(wqs->dev_hdl, "Wq page_size(%d) isn't power of 2\n", + wq_page_size); + return -EINVAL; + } + + num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size; + + if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) { + sdk_err(wqs->dev_hdl, "Num(%d) wqebbs per page isn't power of 2\n", + num_wqebbs_per_page); + return -EINVAL; + } + + err = wqs_next_block(wqs, &wq->page_idx, &wq->block_idx); + if (err) { + sdk_err(wqs->dev_hdl, "Failed to get free wqs next block\n"); + return err; + } + + wq->wqebb_size = wqebb_size; + wq->wq_page_size = wq_page_size; + wq->q_depth = q_depth; + wq->max_wqe_size = max_wqe_size; + wq->num_wqebbs_per_page = num_wqebbs_per_page; + + wq->wqebbs_per_page_shift = (u32)ilog2(num_wqebbs_per_page); + + wq->block_vaddr = WQ_BASE_VADDR(wqs, wq); + wq->shadow_block_vaddr = WQ_BASE_ADDR(wqs, wq); + wq->block_paddr = WQ_BASE_PADDR(wqs, wq); + + err = alloc_wq_pages(wqs->dev_hdl, wq); + if (err) { + sdk_err(wqs->dev_hdl, "Failed to allocate wq pages\n"); + goto alloc_wq_pages_err; + } + + atomic_set(&wq->delta, q_depth); + wq->cons_idx = 0; + wq->prod_idx = 0; + wq->mask = q_depth - 1; + + return 0; + +alloc_wq_pages_err: + wqs_return_block(wqs, wq->page_idx, wq->block_idx); + return err; +} + +void hifc_wq_free(struct hifc_wqs *wqs, struct hifc_wq *wq) +{ + free_wq_pages(wqs->dev_hdl, wq, wq->num_q_pages); + + wqs_return_block(wqs, wq->page_idx, wq->block_idx); +} + +static void init_wqs_blocks_arr(struct hifc_wqs *wqs) +{ + u32 page_idx, blk_idx, pos = 0; + + for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) { + for (blk_idx = 0; blk_idx < WQS_BLOCKS_PER_PAGE; blk_idx++) { + wqs->free_blocks[pos].page_idx = page_idx; + wqs->free_blocks[pos].block_idx = blk_idx; + pos++; + } + } + + wqs->alloc_blk_pos = 0; + wqs->return_blk_pos = 0; + wqs->num_free_blks = WQS_MAX_NUM_BLOCKS; + spin_lock_init(&wqs->alloc_blocks_lock); +} + +void hifc_wq_wqe_pg_clear(struct hifc_wq *wq) +{ + u64 *block_vaddr; + u32 pg_idx; + + block_vaddr = wq->shadow_block_vaddr; + + atomic_set(&wq->delta, wq->q_depth); + wq->cons_idx = 0; + wq->prod_idx = 0; + + for (pg_idx = 0; pg_idx < wq->num_q_pages; pg_idx++) + memset((void *)(*(block_vaddr + pg_idx)), 0, wq->wq_page_size); +} + +int hifc_cmdq_alloc(struct hifc_cmdq_pages *cmdq_pages, + struct hifc_wq *wq, void *dev_hdl, + int cmdq_blocks, u32 wq_page_size, u32 wqebb_size, + u16 q_depth, u32 max_wqe_size) +{ + int i, j, err = -ENOMEM; + + if (q_depth & (q_depth - 1)) { + sdk_err(dev_hdl, "Cmdq q_depth(%d) isn't power of 2\n", + q_depth); + return -EINVAL; + } + + cmdq_pages->dev_hdl = dev_hdl; + + err = cmdq_allocate_page(cmdq_pages); + if (err) { + sdk_err(dev_hdl, "Failed to allocate CMDQ page\n"); + return err; + } + + for (i = 0; i < cmdq_blocks; i++) { + wq[i].page_idx = 0; + wq[i].block_idx = (u32)i; + wq[i].wqebb_size = wqebb_size; + wq[i].wq_page_size = wq_page_size; + wq[i].q_depth = q_depth; + wq[i].max_wqe_size = max_wqe_size; + wq[i].num_wqebbs_per_page = + ALIGN(wq_page_size, wqebb_size) / wqebb_size; + + wq[i].wqebbs_per_page_shift = + (u32)ilog2(wq[i].num_wqebbs_per_page); + + wq[i].block_vaddr = CMDQ_BASE_VADDR(cmdq_pages, &wq[i]); + wq[i].shadow_block_vaddr = CMDQ_BASE_ADDR(cmdq_pages, &wq[i]); + wq[i].block_paddr = CMDQ_BASE_PADDR(cmdq_pages, &wq[i]); + + err = alloc_wq_pages(cmdq_pages->dev_hdl, &wq[i]); + if (err) { + sdk_err(dev_hdl, "Failed to alloc CMDQ blocks\n"); + goto cmdq_block_err; + } + + atomic_set(&wq[i].delta, q_depth); + wq[i].cons_idx = 0; + wq[i].prod_idx = 0; + wq[i].mask = q_depth - 1; + } + + return 0; + +cmdq_block_err: + for (j = 0; j < i; j++) + free_wq_pages(cmdq_pages->dev_hdl, &wq[j], wq[j].num_q_pages); + + cmdq_free_page(cmdq_pages); + return err; +} + +void hifc_cmdq_free(struct hifc_cmdq_pages *cmdq_pages, + struct hifc_wq *wq, int cmdq_blocks) +{ + int i; + + for (i = 0; i < cmdq_blocks; i++) + free_wq_pages(cmdq_pages->dev_hdl, &wq[i], wq[i].num_q_pages); + + cmdq_free_page(cmdq_pages); +} + +static int alloc_page_addr(struct hifc_wqs *wqs) +{ + u64 size = wqs->num_pages * sizeof(*wqs->page_paddr); + + wqs->page_paddr = kzalloc(size, GFP_KERNEL); + if (!wqs->page_paddr) + return -ENOMEM; + + size = wqs->num_pages * sizeof(*wqs->page_vaddr); + wqs->page_vaddr = kzalloc(size, GFP_KERNEL); + if (!wqs->page_vaddr) + goto page_vaddr_err; + + size = wqs->num_pages * sizeof(*wqs->shadow_page_vaddr); + wqs->shadow_page_vaddr = kzalloc(size, GFP_KERNEL); + if (!wqs->shadow_page_vaddr) + goto page_shadow_vaddr_err; + + return 0; + +page_shadow_vaddr_err: + kfree(wqs->page_vaddr); + +page_vaddr_err: + kfree(wqs->page_paddr); + return -ENOMEM; +} + +static void free_page_addr(struct hifc_wqs *wqs) +{ + kfree(wqs->shadow_page_vaddr); + kfree(wqs->page_vaddr); + kfree(wqs->page_paddr); +} + +int hifc_wqs_alloc(struct hifc_wqs *wqs, int num_wqs, void *dev_hdl) +{ + u32 i, page_idx; + int err; + + wqs->dev_hdl = dev_hdl; + wqs->num_pages = WQ_NUM_PAGES(num_wqs); + + if (alloc_page_addr(wqs)) { + sdk_err(dev_hdl, "Failed to allocate mem for page addresses\n"); + return -ENOMEM; + } + + for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) { + err = wqs_allocate_page(wqs, page_idx); + if (err) { + sdk_err(dev_hdl, "Failed wq page allocation\n"); + goto wq_allocate_page_err; + } + } + + wqs->free_blocks = kzalloc(WQS_FREE_BLOCKS_SIZE(wqs), GFP_KERNEL); + if (!wqs->free_blocks) { + err = -ENOMEM; + goto alloc_blocks_err; + } + + init_wqs_blocks_arr(wqs); + return 0; + +alloc_blocks_err: +wq_allocate_page_err: + for (i = 0; i < page_idx; i++) + wqs_free_page(wqs, i); + + free_page_addr(wqs); + return err; +} + +void hifc_wqs_free(struct hifc_wqs *wqs) +{ + u32 page_idx; + + for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) + wqs_free_page(wqs, page_idx); + + free_page_addr(wqs); + kfree(wqs->free_blocks); +} + +static void copy_wqe_to_shadow(struct hifc_wq *wq, void *shadow_addr, + int num_wqebbs, u16 prod_idx) +{ + u8 *shadow_wqebb_addr, *wqe_page_addr, *wqebb_addr; + u32 i, offset; + u16 idx; + + for (i = 0; i < (u32)num_wqebbs; i++) { + offset = i * wq->wqebb_size; + shadow_wqebb_addr = (u8 *)shadow_addr + offset; + + idx = MASKED_WQE_IDX(wq, prod_idx + i); + wqe_page_addr = WQ_PAGE_ADDR(wq, idx); + wqebb_addr = wqe_page_addr + + WQE_PAGE_OFF(wq, MASKED_WQE_IDX(wq, idx)); + + memcpy(shadow_wqebb_addr, wqebb_addr, wq->wqebb_size); + } +} + +void *hifc_get_wqebb_addr(struct hifc_wq *wq, u16 index) +{ + return WQ_PAGE_ADDR(wq, index) + WQE_PAGE_OFF(wq, index); +} + +u64 hifc_get_first_wqe_page_addr(struct hifc_wq *wq) +{ + return be64_to_cpu(*wq->block_vaddr); +} + +void *hifc_get_wqe(struct hifc_wq *wq, int num_wqebbs, u16 *prod_idx) +{ + u32 curr_pg, end_pg; + u16 curr_prod_idx, end_prod_idx; + + if (atomic_sub_return(num_wqebbs, &wq->delta) < 0) { + atomic_add(num_wqebbs, &wq->delta); + return NULL; + } + + /* use original cur_pi and end_pi, no need queue depth mask as + * WQE_PAGE_NUM will do num_queue_pages mask + */ + curr_prod_idx = (u16)wq->prod_idx; + wq->prod_idx += num_wqebbs; + + /* end prod index should points to the last wqebb of wqe, + * therefore minus 1 + */ + end_prod_idx = (u16)wq->prod_idx - 1; + + curr_pg = WQE_PAGE_NUM(wq, curr_prod_idx); + end_pg = WQE_PAGE_NUM(wq, end_prod_idx); + + *prod_idx = MASKED_WQE_IDX(wq, curr_prod_idx); + + /* If we only have one page, still need to get shadown wqe when + * wqe rolling-over page + */ + if (curr_pg != end_pg || MASKED_WQE_IDX(wq, end_prod_idx) < *prod_idx) { + u32 offset = curr_pg * wq->max_wqe_size; + u8 *shadow_addr = wq->shadow_wqe + offset; + + wq->shadow_idx[curr_pg] = *prod_idx; + return shadow_addr; + } + + return WQ_PAGE_ADDR(wq, *prod_idx) + WQE_PAGE_OFF(wq, *prod_idx); +} + +void hifc_put_wqe(struct hifc_wq *wq, int num_wqebbs) +{ + atomic_add(num_wqebbs, &wq->delta); + wq->cons_idx += num_wqebbs; +} + +void *hifc_read_wqe(struct hifc_wq *wq, int num_wqebbs, u16 *cons_idx) +{ + u32 curr_pg, end_pg; + u16 curr_cons_idx, end_cons_idx; + + if ((atomic_read(&wq->delta) + num_wqebbs) > wq->q_depth) + return NULL; + + curr_cons_idx = (u16)wq->cons_idx; + + curr_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx); + end_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx + num_wqebbs - 1); + + curr_pg = WQE_PAGE_NUM(wq, curr_cons_idx); + end_pg = WQE_PAGE_NUM(wq, end_cons_idx); + + *cons_idx = curr_cons_idx; + + if (curr_pg != end_pg) { + u32 offset = curr_pg * wq->max_wqe_size; + u8 *shadow_addr = wq->shadow_wqe + offset; + + copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *cons_idx); + + return shadow_addr; + } + + return WQ_PAGE_ADDR(wq, *cons_idx) + WQE_PAGE_OFF(wq, *cons_idx); +} diff --git a/drivers/scsi/huawei/hifc/hifc_wq.h b/drivers/scsi/huawei/hifc/hifc_wq.h new file mode 100644 index 0000000000000000000000000000000000000000..207d54191afa402748ea5881bd1d0347f2cda86c --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_wq.h @@ -0,0 +1,165 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef HIFC_WQ_H +#define HIFC_WQ_H + +#define WQS_BLOCKS_PER_PAGE 4 +#define WQ_SIZE(wq) (u32)((u64)(wq)->q_depth * (wq)->wqebb_size) + +#define WQE_PAGE_NUM(wq, idx) (((idx) >> ((wq)->wqebbs_per_page_shift)) & \ + ((wq)->num_q_pages - 1)) + +#define WQE_PAGE_OFF(wq, idx) ((u64)((wq)->wqebb_size) * \ + ((idx) & ((wq)->num_wqebbs_per_page - 1))) + +#define WQ_PAGE_ADDR_SIZE sizeof(u64) +#define WQ_PAGE_ADDR_SIZE_SHIFT 3 +#define WQ_PAGE_ADDR(wq, idx) \ + (u8 *)(*(u64 *)((u64)((wq)->shadow_block_vaddr) + \ + (WQE_PAGE_NUM(wq, idx) << WQ_PAGE_ADDR_SIZE_SHIFT))) + +#define WQ_BLOCK_SIZE 4096UL +#define WQS_PAGE_SIZE (WQS_BLOCKS_PER_PAGE * WQ_BLOCK_SIZE) +#define WQ_MAX_PAGES (WQ_BLOCK_SIZE >> WQ_PAGE_ADDR_SIZE_SHIFT) + +#define CMDQ_BLOCKS_PER_PAGE 8 +#define CMDQ_BLOCK_SIZE 512UL +#define CMDQ_PAGE_SIZE ALIGN((CMDQ_BLOCKS_PER_PAGE * \ + CMDQ_BLOCK_SIZE), PAGE_SIZE) + +#define ADDR_4K_ALIGNED(addr) (((addr) & 0xfff) == 0) + +#define WQ_BASE_VADDR(wqs, wq) \ + (u64 *)(((u64)((wqs)->page_vaddr[(wq)->page_idx])) \ + + (wq)->block_idx * WQ_BLOCK_SIZE) + +#define WQ_BASE_PADDR(wqs, wq) (((wqs)->page_paddr[(wq)->page_idx]) \ + + (u64)(wq)->block_idx * WQ_BLOCK_SIZE) + +#define WQ_BASE_ADDR(wqs, wq) \ + (u64 *)(((u64)((wqs)->shadow_page_vaddr[(wq)->page_idx])) \ + + (wq)->block_idx * WQ_BLOCK_SIZE) + +#define CMDQ_BASE_VADDR(cmdq_pages, wq) \ + (u64 *)(((u64)((cmdq_pages)->cmdq_page_vaddr)) \ + + (wq)->block_idx * CMDQ_BLOCK_SIZE) + +#define CMDQ_BASE_PADDR(cmdq_pages, wq) \ + (((u64)((cmdq_pages)->cmdq_page_paddr)) \ + + (u64)(wq)->block_idx * CMDQ_BLOCK_SIZE) + +#define CMDQ_BASE_ADDR(cmdq_pages, wq) \ + (u64 *)(((u64)((cmdq_pages)->cmdq_shadow_page_vaddr)) \ + + (wq)->block_idx * CMDQ_BLOCK_SIZE) + +#define MASKED_WQE_IDX(wq, idx) ((idx) & (wq)->mask) + +#define WQ_NUM_PAGES(num_wqs) \ + (ALIGN((u32)num_wqs, WQS_BLOCKS_PER_PAGE) / WQS_BLOCKS_PER_PAGE) + +#define MAX_WQE_SIZE(max_sge, wqebb_size) \ + ((max_sge <= 2) ? (wqebb_size) : \ + ((ALIGN(((max_sge) - 2), 4) / 4 + 1) * (wqebb_size))) + +struct hifc_free_block { + u32 page_idx; + u32 block_idx; +}; + +struct hifc_wq { + /* The addresses are 64 bit in the HW */ + u64 block_paddr; + u64 *shadow_block_vaddr; + u64 *block_vaddr; + + u32 wqebb_size; + u32 wq_page_size; + u16 q_depth; + u32 max_wqe_size; + u32 num_wqebbs_per_page; + + /* performance: replace mul/div as shift; + * num_wqebbs_per_page must be power of 2 + */ + u32 wqebbs_per_page_shift; + u32 page_idx; + u32 block_idx; + + u32 num_q_pages; + + struct hifc_dma_addr_align *mem_align; + + int cons_idx; + int prod_idx; + + atomic_t delta; + u16 mask; + + u8 *shadow_wqe; + u16 *shadow_idx; +}; + +struct hifc_cmdq_pages { + /* The addresses are 64 bit in the HW */ + u64 cmdq_page_paddr; + u64 *cmdq_page_vaddr; + u64 *cmdq_shadow_page_vaddr; + + void *dev_hdl; +}; + +struct hifc_wqs { + /* The addresses are 64 bit in the HW */ + u64 *page_paddr; + u64 **page_vaddr; + u64 **shadow_page_vaddr; + + struct hifc_free_block *free_blocks; + u32 alloc_blk_pos; + u32 return_blk_pos; + int num_free_blks; + + /* for allocate blocks */ + spinlock_t alloc_blocks_lock; + + u32 num_pages; + + void *dev_hdl; +}; + +void hifc_wq_wqe_pg_clear(struct hifc_wq *wq); + +int hifc_cmdq_alloc(struct hifc_cmdq_pages *cmdq_pages, + struct hifc_wq *wq, void *dev_hdl, + int cmdq_blocks, u32 wq_page_size, u32 wqebb_size, + u16 q_depth, u32 max_wqe_size); + +void hifc_cmdq_free(struct hifc_cmdq_pages *cmdq_pages, + struct hifc_wq *wq, int cmdq_blocks); + +int hifc_wqs_alloc(struct hifc_wqs *wqs, int num_wqs, void *dev_hdl); + +void hifc_wqs_free(struct hifc_wqs *wqs); + +int hifc_wq_allocate(struct hifc_wqs *wqs, struct hifc_wq *wq, + u32 wqebb_size, u32 wq_page_size, u16 q_depth, + u32 max_wqe_size); + +void hifc_wq_free(struct hifc_wqs *wqs, struct hifc_wq *wq); + +void *hifc_get_wqebb_addr(struct hifc_wq *wq, u16 index); + +u64 hifc_get_first_wqe_page_addr(struct hifc_wq *wq); + +void *hifc_get_wqe(struct hifc_wq *wq, int num_wqebbs, u16 *prod_idx); + +void hifc_put_wqe(struct hifc_wq *wq, int num_wqebbs); + +void *hifc_read_wqe(struct hifc_wq *wq, int num_wqebbs, u16 *cons_idx); + +#endif + diff --git a/drivers/scsi/huawei/hifc/hifc_wqe.c b/drivers/scsi/huawei/hifc/hifc_wqe.c new file mode 100644 index 0000000000000000000000000000000000000000..d1339d4bf9170142c07a8954b7e27ebaa5aa0d6d --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_wqe.c @@ -0,0 +1,667 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Fabric Channel Linux driver + * Copyright(c) 2018 Huawei Technologies Co., Ltd + * + */ + +#include "hifc_module.h" +#include "hifc_service.h" + +void hifc_build_common_wqe_ctrls(struct hifcoe_wqe_ctrl_s *v_ctrl_sl, + unsigned char v_task_len) +{ + /* "BDSL" field of CtrlS - defines the size of BDS, + * which varies from 0 to 2040 bytes (8 bits of 8 bytes' chunk) + */ + v_ctrl_sl->ch.wd0.bdsl = 0; + + /* + * "DrvSL" field of CtrlS - defines the size of DrvS, which varies from + * 0 to 24 bytes + */ + v_ctrl_sl->ch.wd0.drv_sl = 0; + + /* a. + * b1 - linking WQE, which will be only used in linked page architecture + * instead of ring, it's a special control WQE which does not contain + * any buffer or inline data information, and will only be consumed by + * hardware. The size is aligned to WQEBB/WQE b0 - normal WQE, either + * normal SEG WQE or inline data WQE + */ + v_ctrl_sl->ch.wd0.wf = 0; + + /* + * "CF" field of CtrlS - Completion Format - defines the format of CS. + * a.b0 - Status information is embedded inside of Completion Section + * b.b1 - Completion Section keeps SGL, where Status information + * should be written. (For the definition of SGLs see ?4.1* .) + */ + v_ctrl_sl->ch.wd0.cf = 0; + + /* + * "TSL" field of CtrlS - defines the size of TS, which varies from 0 + * to 248 bytes + */ + v_ctrl_sl->ch.wd0.tsl = v_task_len; + + /* + * Variable length SGE (vSGE). The size of SGE is 16 bytes. The vSGE + * format is of two types, which are defined by "VA " field of CtrlS. + * "VA" stands for Virtual Address: o b0. SGE comprises 64-bits buffer's + * pointer and 31-bits Length, each SGE can only support up to 2G-1B, + * it can guarantee each single SGE length can not exceed 2GB by nature, + * A byte count value of zero means a 0byte data transfer.o b1. + * SGE comprises 64-bits buffer's pointer, 31-bits Length and 30-bits + * Key of the Translation table ,each SGE can only support up to 2G-1B, + * it can guarantee each single SGE length can notexceed 2GB by nature, + * A byte count value of zero means a 0byte data transfer + */ + v_ctrl_sl->ch.wd0.va = 0; + + /* + * "DF" field of CtrlS - Data Format - defines the format of BDS + * a. b0 - BDS carries the list of SGEs (SGL) + * b. b1 - BDS carries the inline data + */ + v_ctrl_sl->ch.wd0.df = 0; + + /* + * "CR" - Completion is Required - marks CQE generation request per WQE + */ + v_ctrl_sl->ch.wd0.cr = 1; + + /* + * "DIFSL" field of CtrlS - defines the size of DIFS, which varies from + * 0 to 56 bytes + */ + v_ctrl_sl->ch.wd0.dif_sl = 0; + + /* + * "CSL" field of CtrlS - defines the size of CS, which varies from 0 to + * 24 bytes + */ + v_ctrl_sl->ch.wd0.csl = 0; + + /* "CtrlSL" - C describes the size of CtrlS in 8 bytes chunks. + *The value Zero is not valid + */ + v_ctrl_sl->ch.wd0.ctrl_sl = 1; + + /* "O" - Owner - marks ownership of WQE */ + v_ctrl_sl->ch.wd0.owner = 0; +} + +void hifc_build_trd_twr_wqe_ctrls(struct unf_frame_pkg_s *v_pkg, + struct hifcoe_sqe_s *v_sqe) +{ + /* "BDSL" field of CtrlS - defines the size of BDS, which varies from + * 0 to 2040 bytes (8 bits of 8 bytes' chunk) + */ + /* TrdWqe carry 2 SGE defaultly, 4DW per SGE, the value is 4 because + * unit is 2DW, in double SGL mode, bdsl is 2 + */ + v_sqe->ctrl_sl.ch.wd0.bdsl = HIFC_T_RD_WR_WQE_CTR_BDSL_SIZE; + + /* + * "DrvSL" field of CtrlS - defines the size of DrvS, which varies from + * 0 to 24 bytes DrvSL config for 0 + */ + v_sqe->ctrl_sl.ch.wd0.drv_sl = 0; + + /* a. b1 - linking WQE, which will be only used in linked page + * architecture instead of ring, it's a special control WQE which does + * not contain any buffer or inline data information, and will only be + * consumed by hardware. The size is aligned to WQEBB/WQE b0 - normal + * WQE, either normal SEG WQE or inline data WQE + */ + /* normal wqe */ + v_sqe->ctrl_sl.ch.wd0.wf = 0; + + /* + * "CF" field of CtrlS - Completion Format - defines the format of CS. + * a.b0 - Status information is embedded inside of Completion Section + * b.b1 - Completion Section keeps SGL, where Status information + * should be written. (For the definition of SGLs see ?4.1.) + */ + /* by SCQE mode, the value is ignored */ + v_sqe->ctrl_sl.ch.wd0.cf = 0; + + /* "TSL" field of CtrlS - defines the size of TS, which varies from 0 to + * 248 bytes + */ + /* TSL is configured by 56 bytes */ + v_sqe->ctrl_sl.ch.wd0.tsl = sizeof(struct hifcoe_sqe_ts_s) / + HIFC_WQE_SECTION_CHUNK_SIZE; + + /* + * Variable length SGE (vSGE). The size of SGE is 16 bytes. The vSGE + * format is of two types, which are defined by "VA" field of CtrlS. + * "VA" stands for Virtual Address: o b0. SGE comprises 64-bits buffer's + * pointer and 31-bits Length, each SGE can only support up to 2G-1B, it + * can guarantee each single SGE length can not exceed 2GB by nature, A + * byte count value of zero means a 0byte data transfer. o b1. SGE + * comprises 64-bits buffer's pointer, 31-bits Length and 30-bits Key of + * the Translation table , each SGE can only support up to 2G-1B, it can + * guarantee each single SGE length can not exceed 2GB by nature, A byte + * count value of zero means a 0byte data transfer + */ + v_sqe->ctrl_sl.ch.wd0.va = 0; + + /* + * "DF" field of CtrlS - Data Format - defines the format of BDS + * a. b0 - BDS carries the list of SGEs (SGL) + * b. b1 - BDS carries the inline data + */ + v_sqe->ctrl_sl.ch.wd0.df = 0; + + /* "CR" - Completion is Required marks CQE generation request per WQE */ + /* by SCQE mode, this value is ignored */ + v_sqe->ctrl_sl.ch.wd0.cr = 1; + + /* + * "DIFSL" field of CtrlS - defines the size of DIFS, which varies from + * 0 to 56 bytes. + */ + v_sqe->ctrl_sl.ch.wd0.dif_sl = 0; + + /* + * "CSL" field of CtrlS - defines the size of CS, which varies from 0 to + * 24 bytes + */ + v_sqe->ctrl_sl.ch.wd0.csl = 0; + + /* "CtrlSL" - C describes the size of CtrlS in 8 bytes chunks. + * The value Zero is not valid. + */ + v_sqe->ctrl_sl.ch.wd0.ctrl_sl = HIFC_T_RD_WR_WQE_CTR_CTRLSL_SIZE; + + /* "O" - Owner - marks ownership of WQE */ + v_sqe->ctrl_sl.ch.wd0.owner = 0; +} + +void hifc_build_service_wqe_ts_common(struct hifcoe_sqe_ts_s *v_sqe_ts, + unsigned int rport_index, + unsigned short local_xid, + unsigned short remote_xid, + unsigned short data_len) +{ + v_sqe_ts->local_xid = local_xid; + + v_sqe_ts->wd0.conn_id = (unsigned short)rport_index; + v_sqe_ts->wd0.remote_xid = remote_xid; + + v_sqe_ts->cont.els_gs_elsrsp_comm.data_len = data_len; +} + +void hifc_build_els_gs_wqe_sge(struct hifcoe_sqe_s *v_sqe, void *v_buf_addr, + unsigned long long v_phy_addr, + unsigned int buf_len, + unsigned int xid, void *v_hba) +{ + unsigned long long els_rsp_phy_addr; + struct hifcoe_variable_sge_s *psge = NULL; + + /* Fill in SGE and convert it to big-endian. */ + psge = &v_sqe->sge[0]; + els_rsp_phy_addr = v_phy_addr; + psge->buf_addr_hi = HIFC_HIGH_32_BITS(els_rsp_phy_addr); + psge->buf_addr_lo = HIFC_LOW_32_BITS(els_rsp_phy_addr); + psge->wd0.buf_len = buf_len; + psge->wd0.r_flag = 0; + psge->wd1.extension_flag = HIFC_WQE_SGE_NOT_EXTEND_FLAG; + psge->wd1.buf_addr_gpa = (psge->buf_addr_lo >> 16); + psge->wd1.xid = (xid & 0x3fff); + psge->wd1.last_flag = HIFC_WQE_SGE_LAST_FLAG; + hifc_cpu_to_big32(psge, sizeof(*psge)); + + /* Converts the payload of an FC frame into a big end. */ + hifc_cpu_to_big32(v_buf_addr, buf_len); +} + +void hifc_build_els_wqe_ts_rsp(struct hifcoe_sqe_s *v_sqe, void *v_sq_info, + void *v_frame_pld, unsigned short type, + unsigned short cmnd, unsigned int v_scqn) +{ + struct unf_pril_payload_s *pri_acc_pld = NULL; + struct hifcoe_sqe_els_rsp_s *els_rsp = NULL; + struct hifcoe_sqe_ts_s *sqe_ts = NULL; + struct hifc_parent_sq_info_s *sq_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(0x5015, UNF_TRUE, v_sqe, return); + UNF_CHECK_VALID(0x5015, UNF_TRUE, v_frame_pld, return); + UNF_CHECK_VALID(0x5015, UNF_TRUE, v_sq_info, return); + + sqe_ts = &v_sqe->ts_sl; + els_rsp = &sqe_ts->cont.els_rsp; + sqe_ts->task_type = HIFC_SQE_ELS_RSP; + + /* The default chip does not need to update parameters. */ + els_rsp->wd1.para_update = 0x0; + + sq_info = (struct hifc_parent_sq_info_s *)v_sq_info; + hba = (struct hifc_hba_s *)sq_info->phba; + /* When the PLOGI request is sent, the microcode needs to be instructed + * to clear the I/O related to the link to avoid data inconsistency + * caused by the disorder of the IO. + */ + if (((cmnd == ELS_LOGO) || (cmnd == ELS_PLOGI)) && hba) { + els_rsp->wd1.clr_io = 1; + els_rsp->wd6.reset_exch_start = hba->exit_base; + els_rsp->wd6.reset_exch_end = hba->exit_base + + (hba->exit_count - 1); + els_rsp->wd7.scqn = v_scqn; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "Port(0x%x) send cmd(0x%x) to RPort(0x%x),rport index(0x%x), notify clean io start 0x%x, end 0x%x, scqn 0x%x.", + sq_info->local_port_id, + cmnd, + sq_info->remote_port_id, + sq_info->rport_index, + els_rsp->wd6.reset_exch_start, + els_rsp->wd6.reset_exch_end, + v_scqn); + + return; + } + + if (type == ELS_RJT) + return; + + /* + * Enter WQE in the PrliAcc negotiation parameter, and fill in the + * Update flag in WQE. + */ + if (cmnd == ELS_PRLI) { + /* The chip updates the PLOGI ACC negotiation parameters. */ + els_rsp->wd2.seq_cnt = sq_info->plogi_coparams.seq_cnt; + els_rsp->wd2.e_d_tov = sq_info->plogi_coparams.ed_tov; + els_rsp->wd2.tx_mfs = sq_info->plogi_coparams.tx_mfs; + els_rsp->e_d_tov_timer_val = + sq_info->plogi_coparams.ed_tov_timer_val; + + /* The chip updates the PRLI ACC parameter. */ + pri_acc_pld = (struct unf_pril_payload_s *)v_frame_pld; + els_rsp->wd4.xfer_dis = HIFC_GET_PRLI_PARAM_WXFER( + pri_acc_pld->parms); + els_rsp->wd4.conf = HIFC_GET_PRLI_PARAM_CONF( + pri_acc_pld->parms); + els_rsp->wd4.rec = HIFC_GET_PRLI_PARAM_REC(pri_acc_pld->parms); + + els_rsp->wd1.para_update = 0x03; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "Port(0x%x) save rport index(0x%x) login parms,seqcnt:0x%x,e_d_tov:0x%x,txmfs:0x%x,e_d_tovtimerval:0x%x,xfer_dis:0x%x, conf:0x%x,rec:0x%x.", + sq_info->local_port_id, + sq_info->rport_index, els_rsp->wd2.seq_cnt, + els_rsp->wd2.e_d_tov, els_rsp->wd2.tx_mfs, + els_rsp->e_d_tov_timer_val, els_rsp->wd4.xfer_dis, + els_rsp->wd4.conf, els_rsp->wd4.rec); + } +} + +void hifc_build_els_wqe_ts_req(struct hifcoe_sqe_s *v_sqe, void *v_sq_info, + unsigned short cmnd, unsigned int v_scqn, + void *v_frame_pld) +{ + struct hifcoe_sqe_ts_s *v_sqe_ts = NULL; + struct hifcoe_sqe_t_els_gs_s *els_req = NULL; + struct hifc_parent_sq_info_s *sq_info = NULL; + struct hifc_hba_s *hba = NULL; + struct unf_rec_pld_s *rec_pld = NULL; + + v_sqe_ts = &v_sqe->ts_sl; + v_sqe_ts->task_type = HIFC_SQE_ELS_CMND; + els_req = &v_sqe_ts->cont.t_els_gs; + + sq_info = (struct hifc_parent_sq_info_s *)v_sq_info; + hba = (struct hifc_hba_s *)sq_info->phba; + + /* + * When the PLOGI request is sent, the microcode needs to be instructed + * to clear the I/O related to the link to avoid data inconsistency + * caused by the disorder of the IO. + */ + if (((cmnd == ELS_LOGO) || (cmnd == ELS_PLOGI)) && hba) { + els_req->wd4.clr_io = 1; + els_req->wd6.reset_exch_start = hba->exit_base; + els_req->wd6.reset_exch_end = hba->exit_base + + (hba->exit_count - 1); + els_req->wd7.scqn = v_scqn; + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "Port(0x%x) Rport(0x%x) SID(0x%x) send %s to DID(0x%x), notify clean io start 0x%x, end 0x%x, scqn 0x%x.", + hba->port_cfg.port_id, sq_info->rport_index, + sq_info->local_port_id, + (cmnd == ELS_PLOGI) ? "PLOGI" : "LOGO", + sq_info->remote_port_id, + els_req->wd6.reset_exch_start, + els_req->wd6.reset_exch_end, + v_scqn); + + return; + } + + /* The chip updates the PLOGI ACC negotiation parameters. */ + if (cmnd == ELS_PRLI) { + els_req->wd5.seq_cnt = sq_info->plogi_coparams.seq_cnt; + els_req->wd5.e_d_tov = sq_info->plogi_coparams.ed_tov; + els_req->wd5.tx_mfs = sq_info->plogi_coparams.tx_mfs; + els_req->e_d_tov_timer_val = + sq_info->plogi_coparams.ed_tov_timer_val; + + els_req->wd4.rec_support = hba->port_cfg.tape_support ? 1 : 0; + els_req->wd4.para_update = 0x01; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "Port(0x%x) save rport index(0x%x) login parms,seqcnt:0x%x, e_d_tov:0x%x,txmfs:0x%x,e_d_tovtimerval:0x%x.", + sq_info->local_port_id, sq_info->rport_index, + els_req->wd5.seq_cnt, els_req->wd5.e_d_tov, + els_req->wd5.tx_mfs, + els_req->e_d_tov_timer_val); + } + + if (cmnd == ELS_ECHO) + els_req->echo_flag = UNF_TRUE; + if (cmnd == ELS_REC) { + rec_pld = (struct unf_rec_pld_s *)v_frame_pld; + els_req->wd4.rec_flag = 1; + rec_pld->ox_id += hba->exit_base; + els_req->wd4.orign_oxid = rec_pld->ox_id; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "Port(0x%x) Rport(0x%x) SID(0x%x) send Rec to DID(0x%x), origin_oxid 0x%x", + hba->port_cfg.port_id, sq_info->rport_index, + sq_info->local_port_id, + sq_info->remote_port_id, + els_req->wd4.orign_oxid); + } +} + +void hifc_build_els_wqe_ts_magic_num(struct hifcoe_sqe_s *v_sqe, + unsigned short els_cmnd_type, + unsigned int v_magic_num) +{ + struct hifcoe_sqe_t_els_gs_s *els_req; + struct hifcoe_sqe_els_rsp_s *els_rsp; + + if (els_cmnd_type == ELS_ACC || els_cmnd_type == ELS_RJT) { + els_rsp = &v_sqe->ts_sl.cont.els_rsp; + els_rsp->magic_num = v_magic_num; + } else { + els_req = &v_sqe->ts_sl.cont.t_els_gs; + els_req->magic_num = v_magic_num; + } +} + +void hifc_build_gs_wqe_ts_req(struct hifcoe_sqe_s *v_sqe, + unsigned int magic_num) +{ + struct hifcoe_sqe_ts_s *v_sqe_ts = NULL; + struct hifcoe_sqe_t_els_gs_s *gs_req = NULL; + + v_sqe_ts = &v_sqe->ts_sl; + v_sqe_ts->task_type = HIFC_SQE_GS_CMND; + + gs_req = &v_sqe_ts->cont.t_els_gs; + gs_req->magic_num = magic_num; +} + +void hifc_build_bls_wqe_ts_req(struct hifcoe_sqe_s *v_sqe, + unsigned int abts_param, + unsigned int magic_num) +{ + struct hifcoe_sqe_abts_s *abts_ts; + + v_sqe->ts_sl.task_type = HIFC_SQE_BLS_CMND; + abts_ts = &v_sqe->ts_sl.cont.abts; + abts_ts->fh_parm_abts = abts_param; + abts_ts->magic_num = magic_num; +} + +void hifc_build_service_wqe_root_ts(void *v_hba, + struct hifc_root_sqe_s *v_rt_sqe, + unsigned int rx_id, unsigned int rport_id, + unsigned int scq_num) +{ + unsigned char data_cos = 0; + unsigned int port_id = 0; + unsigned int service_type = 0; + struct hifc_hba_s *hba = NULL; + struct hifc_parent_queue_info_s *parent_queue_info = NULL; + + hba = (struct hifc_hba_s *)v_hba; + + port_id = HIFC_GET_HBA_PORT_ID(hba); + service_type = HIFC_GET_SERVICE_TYPE(hba); + + if (rport_id >= UNF_HIFC_MAXRPORT_NUM) { + data_cos = HIFC_GET_PACKET_COS(service_type); + } else { + parent_queue_info = + &hba->parent_queue_mgr->parent_queues[rport_id]; + data_cos = parent_queue_info->queue_data_cos; + } + + v_rt_sqe->task_section.fc_dw0.exch_id = rx_id; + v_rt_sqe->task_section.fc_dw0.host_id = 0; + v_rt_sqe->task_section.fc_dw0.port_id = port_id; + v_rt_sqe->task_section.fc_dw0.off_load = HIFC_NO_OFFLOAD; + v_rt_sqe->task_section.fc_dw3.rport_index = HIFC_LSW(rport_id); + v_rt_sqe->task_section.fc_dw3.scq_num = HIFC_LSW(scq_num); + v_rt_sqe->task_section.fc_dw4.service_type = UNF_GET_SHIFTMASK( + service_type, 0, 0x1f); + v_rt_sqe->task_section.fc_dw4.pkt_type = HIFC_GET_PACKET_TYPE( + service_type); + v_rt_sqe->task_section.fc_dw4.pkt_cos = data_cos; +} + +void hifc_build_service_wqe_root_sge(struct hifc_root_sqe_s *v_rt_sqe, + void *v_buf_addr, + unsigned long long v_phy_addr, + unsigned int buf_len, + void *v_hba) +{ + unsigned long long frame_phy_addr; + + /* Enter the SGE and convert it to the big-endian mode. */ + frame_phy_addr = v_phy_addr; + v_rt_sqe->sge.buf_addr_hi = HIFC_HIGH_32_BITS(frame_phy_addr); + v_rt_sqe->sge.buf_addr_lo = HIFC_LOW_32_BITS(frame_phy_addr); + v_rt_sqe->sge.wd0.buf_len = buf_len; + v_rt_sqe->sge.wd0.ext_flag = 0; + v_rt_sqe->sge.wd1.rsvd = 0; + hifc_cpu_to_big32(&v_rt_sqe->sge, sizeof(v_rt_sqe->sge)); + + /* Converting FC Frames into big Ends */ + hifc_cpu_to_big32(v_buf_addr, buf_len); +} + +void hifc_build_service_wqe_ctx_sge(struct hifc_root_sqe_s *v_rt_sqe, + unsigned long long v_ctxt_addr, + unsigned int buf_len) +{ + /* The SGE is filled in and converted to the big-endian mode. */ + v_rt_sqe->ctx_sge.buf_addr_hi = HIFC_HIGH_32_BITS(v_ctxt_addr); + v_rt_sqe->ctx_sge.buf_addr_lo = HIFC_LOW_32_BITS(v_ctxt_addr); + v_rt_sqe->ctx_sge.wd0.buf_len = buf_len; + v_rt_sqe->ctx_sge.wd0.ext_flag = 0; + v_rt_sqe->ctx_sge.wd1.rsvd = 0; + + hifc_cpu_to_big32(&v_rt_sqe->ctx_sge, sizeof(v_rt_sqe->ctx_sge)); +} + +void hifc_build_els_wqe_root_offload(struct hifc_root_sqe_s *v_rt_sqe, + dma_addr_t ctxt_addr, + unsigned int xid) +{ + /* update Task Section DW0.OFFLOAD */ + v_rt_sqe->task_section.fc_dw0.off_load = HIFC_HAVE_OFFLOAD; + + /* update Context GPA DW1~2 */ + v_rt_sqe->task_section.fc_dw1.context_gpa_hi = + HIFC_HIGH_32_BITS(ctxt_addr); + v_rt_sqe->task_section.fc_dw2.context_gpa_lo = + HIFC_LOW_32_BITS(ctxt_addr); + + /* fill Context DW4 */ + v_rt_sqe->task_section.fc_dw4.parent_xid = xid; + v_rt_sqe->task_section.fc_dw4.csize = HIFC_CNTX_SIZE_T_256B; + + /* The sqe of the offload request has two sge. The first is the packet, + * and the second is the ctx. + */ + v_rt_sqe->ctrl_section.ch.wd0.bdsl = + 2 * HIFC_BYTES_TO_QW_NUM(sizeof(struct hifc_root_sge_s)); +} + +void hifc_build_service_wqe_ctrl_section(struct hifcoe_wqe_ctrl_s *v_wqe_cs, + unsigned int ts_size, + unsigned int bdsi) +{ + v_wqe_cs->ch.wd0.bdsl = bdsi; + v_wqe_cs->ch.wd0.drv_sl = 0; + v_wqe_cs->ch.wd0.rsvd0 = 0; + v_wqe_cs->ch.wd0.wf = 0; + v_wqe_cs->ch.wd0.cf = 0; + v_wqe_cs->ch.wd0.tsl = ts_size; + v_wqe_cs->ch.wd0.va = 0; + v_wqe_cs->ch.wd0.df = 0; + v_wqe_cs->ch.wd0.cr = 1; + v_wqe_cs->ch.wd0.dif_sl = 0; + v_wqe_cs->ch.wd0.csl = 0; + /* divided by 8 */ + v_wqe_cs->ch.wd0.ctrl_sl = HIFC_BYTES_TO_QW_NUM(sizeof(*v_wqe_cs)); + v_wqe_cs->ch.wd0.owner = 0; +} + +void hifc_build_wqe_owner_pmsn(struct hifcoe_wqe_ctrl_s *v_wqe_cs, + unsigned short owner, + unsigned short pmsn) +{ + v_wqe_cs->qsf.wqe_sn = pmsn; + v_wqe_cs->qsf.dump_wqe_sn = v_wqe_cs->qsf.wqe_sn; + v_wqe_cs->ch.wd0.owner = (unsigned int)owner; +} + +void hifc_convert_parent_wqe_to_big_endian(struct hifcoe_sqe_s *v_sqe) +{ + if (likely((v_sqe->ts_sl.task_type != HIFCOE_TASK_T_TRESP) && + (v_sqe->ts_sl.task_type != HIFCOE_TASK_T_TMF_RESP))) { + /* + * Convert Control Secton and Task Section to big-endian. Before + * the SGE enters the queue, the upper-layer driver converts the + * SGE and Task Section to the big-endian mode. + */ + hifc_cpu_to_big32(&v_sqe->ctrl_sl, sizeof(v_sqe->ctrl_sl)); + hifc_cpu_to_big32(&v_sqe->ts_sl, sizeof(v_sqe->ts_sl)); + } else { + /* + * The HIFCOE_TASK_T_TRESP may use the SGE as the Task Section + * to convert the entire SQE into a large end. + */ + hifc_cpu_to_big32(v_sqe, sizeof(struct hifcoe_sqe_tresp_s)); + } +} + +void hifc_convert_root_wqe_to_big_endian(struct hifc_root_sqe_s *v_sqe) +{ + hifc_cpu_to_big32(&v_sqe->ctrl_section, sizeof(v_sqe->ctrl_section)); + hifc_cpu_to_big32(&v_sqe->task_section, sizeof(v_sqe->task_section)); +} + +void hifc_build_cmdqe_common(union hifc_cmdqe_u *cmdqe, + enum hifcoe_task_type_e task_type, + unsigned short rx_id) +{ + cmdqe->common.wd0.task_type = task_type; + cmdqe->common.wd0.rx_id = rx_id; + cmdqe->common.wd0.rsvd0 = 0; +} + +#define HIFC_STANDARD_SIRT_ENABLE 1 +#define HIFC_STANDARD_SIRT_DISABLE 0 +#define HIFC_UNKNOWN_ID 0xFFFF + +void hifc_build_icmnd_wqe_ts_header(struct unf_frame_pkg_s *v_pkg, + struct hifcoe_sqe_s *v_sqe, + unsigned char task_type, + unsigned short exit_base, + unsigned char v_port_idx) +{ + v_sqe->ts_sl.local_xid = UNF_GET_OXID(v_pkg) + exit_base; + v_sqe->ts_sl.task_type = task_type; + v_sqe->ts_sl.wd0.conn_id = + (unsigned short)(v_pkg->private[PKG_PRIVATE_XCHG_RPORT_INDEX]); + + v_sqe->ts_sl.wd0.remote_xid = HIFC_UNKNOWN_ID; +} + +void hifc_build_icmnd_wqe_ts(void *v_hba, struct unf_frame_pkg_s *v_pkg, + struct hifcoe_sqe_ts_s *v_sqe_ts) +{ + struct hifcoe_sqe_icmnd_s *icmd = &v_sqe_ts->cont.icmnd; + void *phy_add = NULL; + struct hifc_hba_s *hba = NULL; + + hba = (struct hifc_hba_s *)v_hba; + v_sqe_ts->cdb_type = 0; + memcpy(icmd->fcp_cmnd_iu, v_pkg->fcp_cmnd, + sizeof(struct unf_fcp_cmnd_s)); + + icmd->magic_num = UNF_GETXCHGALLOCTIME(v_pkg); + + if (v_pkg->unf_rsp_pload_bl.buffer_ptr) { + phy_add = (void *)v_pkg->unf_rsp_pload_bl.buf_dma_addr; + icmd->rsp_gpa_hi = HIFC_HIGH_32_BITS(phy_add); + icmd->rsp_gpa_lo = HIFC_LOW_32_BITS(phy_add); + } else { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]INI Build WQE sense buffer should not be null,sid_did (0x%x_0x%x) oxid(0x%x) pkg type(0x%x) hot pool tag(0x%x).", + v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did, + UNF_GET_OXID(v_pkg), + v_pkg->type, UNF_GET_XCHG_TAG(v_pkg)); + } + + if (v_sqe_ts->task_type != HIFC_SQE_FCP_ITMF) { + icmd->info.tmf.w0.bs.reset_exch_start = hba->exit_base; + icmd->info.tmf.w0.bs.reset_exch_end = hba->exit_base + + hba->exit_count - 1; + + icmd->info.tmf.w1.bs.reset_did = UNF_GET_DID(v_pkg); + /* delivers the marker status flag to the microcode. */ + icmd->info.tmf.w1.bs.marker_sts = 1; + HIFC_GET_RESET_TYPE(UNF_GET_TASK_MGMT_FLAGS( + v_pkg->fcp_cmnd->control), + icmd->info.tmf.w1.bs.reset_type); + + icmd->info.tmf.w2.bs.reset_sid = UNF_GET_SID(v_pkg); + + memcpy(icmd->info.tmf.reset_lun, v_pkg->fcp_cmnd->lun, + sizeof(icmd->info.tmf.reset_lun)); + } +} + +void hifc_build_srq_wqe_ctrls(struct hifcoe_rqe_s *v_rqe, + unsigned short owner, + unsigned short pmsn) +{ + struct hifcoe_wqe_ctrl_ch_s *wqe_ctrls = NULL; + + wqe_ctrls = &v_rqe->ctrl_sl.ch; + wqe_ctrls->wd0.owner = owner; + wqe_ctrls->wd0.ctrl_sl = sizeof(struct hifcoe_wqe_ctrl_s) >> 3; + wqe_ctrls->wd0.csl = 1; + wqe_ctrls->wd0.dif_sl = 0; + wqe_ctrls->wd0.cr = 1; + wqe_ctrls->wd0.df = 0; + wqe_ctrls->wd0.va = 0; + wqe_ctrls->wd0.tsl = 0; + wqe_ctrls->wd0.cf = 0; + wqe_ctrls->wd0.wf = 0; + wqe_ctrls->wd0.drv_sl = sizeof(struct hifcoe_rqe_drv_s) >> 3; + wqe_ctrls->wd0.bdsl = sizeof(struct hifcoe_constant_sge_s) >> 3; + + v_rqe->ctrl_sl.wd0.wqe_msn = pmsn; + v_rqe->ctrl_sl.wd0.dump_wqe_msn = v_rqe->ctrl_sl.wd0.wqe_msn; +} diff --git a/drivers/scsi/huawei/hifc/hifc_wqe.h b/drivers/scsi/huawei/hifc/hifc_wqe.h new file mode 100644 index 0000000000000000000000000000000000000000..991b3b8ce00eaba0f8cbdd54e8bc6160d330b961 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_wqe.h @@ -0,0 +1,486 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef __HIFC_WQE_H__ +#define __HIFC_WQE_H__ + +#include "hifcoe_wqe.h" +#include "hifcoe_parent_context.h" + +/* TGT WQE type */ +/* DRV->uCode via Root or Parent SQ */ +#define HIFC_SQE_FCP_TRD HIFCOE_TASK_T_TREAD +#define HIFC_SQE_FCP_TWR HIFCOE_TASK_T_TWRITE +#define HIFC_SQE_FCP_TRSP HIFCOE_TASK_T_TRESP +#define HIFC_SQE_FCP_TACK HIFCOE_TASK_T_TACK +#define HIFC_SQE_ELS_CMND HIFCOE_TASK_T_ELS +#define HIFC_SQE_ELS_RSP HIFCOE_TASK_T_ELS_RSP +#define HIFC_SQE_GS_CMND HIFCOE_TASK_T_GS +#define HIFC_SQE_BLS_CMND HIFCOE_TASK_T_ABTS +#define HIFC_SQE_FCP_IREAD HIFCOE_TASK_T_IREAD +#define HIFC_SQE_FCP_IWRITE HIFCOE_TASK_T_IWRITE +#define HIFC_SQE_FCP_ITMF HIFCOE_TASK_T_ITMF +#define HIFC_SQE_SESS_RST HIFCOE_TASK_T_SESS_RESET +#define HIFC_SQE_FCP_TMF_TRSP HIFCOE_TASK_T_TMF_RESP + +/* DRV->uCode Via CMDQ */ +#define HIFC_CMDQE_ABTS_RSP HIFCOE_TASK_T_ABTS_RSP +#define HIFC_CMDQE_ABORT HIFCOE_TASK_T_ABORT +#define HIFC_CMDQE_SESS_DIS HIFCOE_TASK_T_SESS_DIS +#define HIFC_CMDQE_SESS_DEL HIFCOE_TASK_T_SESS_DEL + +/* uCode->Drv Via CMD SCQ */ +#define HIFC_SCQE_FCP_TCMND HIFCOE_TASK_T_RCV_TCMND +#define HIFC_SCQE_ELS_CMND HIFCOE_TASK_T_RCV_ELS_CMD +#define HIFC_SCQE_ABTS_CMD HIFCOE_TASK_T_RCV_ABTS_CMD +#define HIFC_SCQE_FCP_IRSP HIFCOE_TASK_T_IRESP +#define HIFC_SCQE_FCP_ITMF_RSP HIFCOE_TASK_T_ITMF_RESP + +/* uCode->Drv Via STS SCQ */ +#define HIFC_SCQE_FCP_TSTS HIFCOE_TASK_T_TSTS +#define HIFC_SCQE_GS_RSP HIFCOE_TASK_T_RCV_GS_RSP +#define HIFC_SCQE_ELS_RSP HIFCOE_TASK_T_RCV_ELS_RSP +#define HIFC_SCQE_ABTS_RSP HIFCOE_TASK_T_RCV_ABTS_RSP +#define HIFC_SCQE_ELS_RSP_STS HIFCOE_TASK_T_ELS_RSP_STS +#define HIFC_SCQE_ABTS_RSP_STS HIFCOE_TASK_T_ABTS_RSP_STS +#define HIFC_SCQE_ABORT_STS HIFCOE_TASK_T_ABORT_STS +#define HIFC_SCQE_SESS_EN_STS HIFCOE_TASK_T_SESS_EN_STS +#define HIFC_SCQE_SESS_DIS_STS HIFCOE_TASK_T_SESS_DIS_STS +#define HIFC_SCQE_SESS_DEL_STS HIFCOE_TASK_T_SESS_DEL_STS +#define HIFC_SCQE_SESS_RST_STS HIFCOE_TASK_T_SESS_RESET_STS +#define HIFC_SCQE_ITMF_MARKER_STS HIFCOE_TASK_T_ITMF_MARKER_STS +#define HIFC_SCQE_ABTS_MARKER_STS HIFCOE_TASK_T_ABTS_MARKER_STS +#define HIFC_SCQE_FLUSH_SQ_STS HIFCOE_TASK_T_FLUSH_SQ_STS +#define HIFC_SCQE_BUF_CLEAR_STS HIFCOE_TASK_T_BUFFER_CLEAR_STS +#define HIFC_SCQE_CLEAR_SRQ_STS HIFCOE_TASK_T_CLEAR_SRQ_STS + +#define HIFC_LOW_32_BITS(__addr) \ + ((unsigned int)((unsigned long long)(__addr) & 0xffffffff)) +#define HIFC_HIGH_32_BITS(__addr)\ + ((unsigned int)(((unsigned long long)(__addr) >> 32) & 0xffffffff)) + +/* Error Code from SCQ */ +#define HIFC_COMPLETION_STATUS_SUCCESS FCOE_CQE_COMPLETED +#define HIFC_COMPLETION_STATUS_ABORTED_SETUP_FAIL FCOE_IMMI_CMDPKT_SETUP_FAIL + +#define HIFC_COMPLETION_STATUS_TIMEOUT FCOE_ERROR_CODE_E_D_TIMER_EXPIRE +#define HIFC_COMPLETION_STATUS_DIF_ERROR FCOE_ERROR_CODE_DATA_DIFX_FAILED +#define HIFC_COMPLETION_STATUS_DATA_OOO FCOE_ERROR_CODE_DATA_OOO_RO +#define HIFC_COMPLETION_STATUS_DATA_OVERFLOW \ + FCOE_ERROR_CODE_DATA_EXCEEDS_DATA2TRNS + +#define HIFC_SCQE_INVALID_CONN_ID 0xffff +#define HIFC_GET_SCQE_TYPE(scqe) ((scqe)->common.ch.wd0.task_type) +#define HIFC_GET_SCQE_STATUS(scqe) ((scqe)->common.ch.wd0.err_code) +#define HIFC_GET_SCQE_REMAIN_CNT(scqe) ((scqe)->common.ch.wd0.cqe_remain_cnt) +#define HIFC_GET_SCQE_CONN_ID(scqe) ((scqe)->common.conn_id) +#define HIFC_GET_WQE_TYPE(wqe) ((wqe)->ts_sl.task_type) + +#define HIFC_WQE_IS_IO(wqe) \ + (HIFC_GET_WQE_TYPE(wqe) != HIFC_SQE_SESS_RST) + +#define HIFC_SCQE_HAS_ERRCODE(scqe) \ + (HIFC_GET_SCQE_STATUS(scqe) != HIFC_COMPLETION_STATUS_SUCCESS) + +#define HIFC_SCQE_ERR_TO_CM(scqe)\ + (HIFC_GET_SCQE_STATUS(scqe) != FCOE_ELS_GS_RSP_EXCH_CHECK_FAIL) +#define HIFC_SCQE_CONN_ID_VALID(scqe) \ + (HIFC_GET_SCQE_CONN_ID(scqe) != HIFC_SCQE_INVALID_CONN_ID) + +#define HIFC_WQE_SECTION_CHUNK_SIZE 8 /* 8 bytes' chunk */ +#define HIFC_T_RESP_WQE_CTR_TSL_SIZE 15 /* 8 bytes' chunk */ +#define HIFC_T_RD_WR_WQE_CTR_TSL_SIZE 9 /* 8 bytes' chunk */ +#define HIFC_T_RD_WR_WQE_CTR_BDSL_SIZE 4 /* 8 bytes' chunk */ +#define HIFC_T_RD_WR_WQE_CTR_CTRLSL_SIZE 1 /* 8 bytes' chunk */ + +#define HIFC_WQE_SGE_ENTRY_NUM 2 /* BD SGE and DIF SGE count */ +#define HIFC_WQE_SGE_DIF_ENTRY_NUM 1 /* DIF SGE count */ +#define HIFC_WQE_SGE_LAST_FLAG 1 +#define HIFC_WQE_SGE_NOT_LAST_FLAG 0 +#define HIFC_WQE_SGE_EXTEND_FLAG 1 +#define HIFC_WQE_SGE_NOT_EXTEND_FLAG 0 + +#define HIFC_FCP_TMF_PORT_RESET 0 +#define HIFC_FCP_TMF_LUN_RESET 1 +#define HIFC_FCP_TMF_TGT_RESET 2 +#define HIFC_FCP_TMF_RSVD 3 +#define HIFC_NO_OFFLOAD 0 +#define HIFC_HAVE_OFFLOAD 1 +#define HIFC_QID_SQ 0 + +#define HIFC_ADJUST_DATA(old_val, new_val) ((old_val) = (new_val)) + +#define HIFC_GET_RESET_TYPE(tmf_flag, reset_flag) \ + do { \ + switch (tmf_flag) { \ + case UNF_FCP_TM_ABORT_TASK_SET: \ + case UNF_FCP_TM_LOGICAL_UNIT_RESET: \ + reset_flag = HIFC_FCP_TMF_LUN_RESET; \ + break; \ + case UNF_FCP_TM_TARGET_RESET: \ + reset_flag = HIFC_FCP_TMF_TGT_RESET; \ + break; \ + case UNF_FCP_TM_CLEAR_TASK_SET: \ + reset_flag = HIFC_FCP_TMF_PORT_RESET; \ + break; \ + default: \ + reset_flag = HIFC_FCP_TMF_RSVD; \ + } \ + } while (0) + +/* + * nic_wqe_ctrl_sec table define + */ +struct nic_wqe_ctrl_sec { + union { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* marks ownership of WQE */ + u32 owner : 1; + /* Control Section Length */ + u32 ctrl_sec_len : 2; + /* Completion Section Length */ + u32 completion_sec_len : 2; + /* DIF Section Length */ + u32 dif_sec_len : 3; + /* + * Completion is Required - marks CQE generation request + * per WQE + */ + u32 cr : 1; + /* Data Format - format of BDS */ + u32 df : 1; + /* Virtual Address */ + u32 va : 1; + /* Task Section Length */ + u32 task_sec_len : 5; + /* Completion Format */ + u32 cf : 1; + u32 wf : 1; + /* reserved */ + u32 rsvd : 4; + /* Driver Section Length */ + u32 drv_sec_len : 2; + /* Buffer Descriptors Section Length */ + u32 buf_desc_sec_len : 8; +#else + /* Buffer Descriptors Section Length */ + u32 buf_desc_sec_len : 8; + /* Driver Section Length */ + u32 drv_sec_len : 2; + /* reserved */ + u32 rsvd : 4; + u32 wf : 1; + /* Completion Format */ + u32 cf : 1; + /* Task Section Length */ + u32 task_sec_len : 5; + /* Virtual Address */ + u32 va : 1; + /* Data Format - format of BDS */ + u32 df : 1; + /* + * Completion is Required - marks CQE generation request + * per WQE + */ + u32 cr : 1; + /* DIF Section Length */ + u32 dif_sec_len : 3; + /* Completion Section Length */ + u32 completion_sec_len : 2; + /* Control Section Length */ + u32 ctrl_sec_len : 2; + /* marks ownership of WQE */ + u32 owner : 1; +#endif + } bs; + + u32 dw; + }; +}; + +/* + * nic_rq_sge_sec table define + */ +struct nic_rq_sge_sec { + /* packet buffer address high */ + u32 wb_addr_high; + /* packet buffer address low */ + u32 wb_addr_low; + + union { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd : 1; + /* SGE length */ + u32 length : 31; +#else + /* SGE length */ + u32 length : 31; + u32 rsvd : 1; +#endif + } bs0; + u32 dw0; + }; + + union { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* 0:list,1:last */ + u32 list : 1; + /* 0:normal,1:pointer to next SGE */ + u32 extension : 1; + /* key or unsed */ + u32 key : 30; +#else + /* key or unsed */ + u32 key : 30; + /* 0:normal,1:pointer to next SGE */ + u32 extension : 1; + /* 0:list,1:last */ + u32 list : 1; +#endif + } bs1; + u32 dw1; + }; +}; + +/* + * nic_rq_bd_sec table define + */ +struct nic_rq_bd_sec { + /* packet buffer address high */ + u32 pkt_buf_addr_high; + /* packet buffer address low */ + u32 pkt_buf_addr_low; +}; + +/* + * nic_rq_wqe table define + */ +struct nic_rq_wqe { + struct nic_wqe_ctrl_sec rq_wqe_ctrl_sec; + u32 rsvd; + struct nic_rq_sge_sec rx_sge; + struct nic_rq_bd_sec pkt_buf_addr; +}; + +/* Link WQE structure */ +struct hifc_link_wqe_s { + union { + struct { + unsigned int rsv1 : 14; + unsigned int wf : 1; + unsigned int rsv2 : 14; + unsigned int ctrlsl : 2; + unsigned int o : 1; + } wd0; + u32 val_wd0; + }; + + union { + struct { + unsigned int msn : 16; + unsigned int dump_msn : 15; + /* lp means whether O bit is overturn */ + unsigned int lp : 1; + } wd1; + unsigned int val_wd1; + }; + + unsigned int next_page_addr_hi; + unsigned int next_page_addr_lo; +}; + +struct hifc_root_rq_complet_info_s { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + unsigned int done : 1; /* done bit,ucode will set to 1 */ + unsigned int rsvd1 : 6; + unsigned int fc_pkt : 1; /* Marks whether the packet is fc type */ + unsigned int rsvd2 : 24; +#else + unsigned int rsvd2 : 24; + unsigned int fc_pkt : 1; /* Marks whether the packet is fc type */ + unsigned int rsvd1 : 6; + unsigned int done : 1; /* done bit,ucode will set to 1 */ +#endif + +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + unsigned short buf_length; + unsigned short exch_id; +#else + unsigned short exch_id; + unsigned short buf_length; +#endif + +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + unsigned short sts_only; /* If only CMPL SECTION */ + unsigned short status; /* 0:no err;!0:others */ +#else + unsigned short status; /* 0:no err;!0:others */ + unsigned short sts_only; /* If only CMPL SECTION */ +#endif + unsigned int magic_num; + unsigned int rsvd[4]; +}; + +/* Parent SQ WQE */ +struct hifc_root_sge_s { + unsigned int buf_addr_hi; + unsigned int buf_addr_lo; + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + unsigned int ext_flag : 1; + unsigned int buf_len : 31; +#else + unsigned int buf_len : 31; + unsigned int ext_flag : 1; +#endif + } wd0; + struct { + unsigned int rsvd; + } wd1; +}; + +/* Root SQ WQE Task Section structure for FC */ +struct hifc_root_sqe_task_section_s { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + unsigned int task_type : 8; + /* 1:offload enable,0:offload disable. */ + unsigned int off_load : 1; + unsigned int port_id : 4; + unsigned int host_id : 2; + unsigned int rsvd1 : 1; + unsigned int exch_id : 16; +#else + unsigned int exch_id : 16; + unsigned int rsvd1 : 1; + unsigned int host_id : 2; + unsigned int port_id : 4; + unsigned int off_load : 1; + unsigned int task_type : 8; +#endif + } fc_dw0; + + union { + unsigned int context_gpa_hi; + unsigned int magic_num; + } fc_dw1; + + struct { + unsigned int context_gpa_lo; + } fc_dw2; + + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + unsigned short scq_num; /* SCQ num */ + unsigned short rport_index; /* RPort */ +#else + unsigned short rport_index; /* RPort */ + unsigned short scq_num; /* SCQ num */ +#endif + } fc_dw3; + + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + unsigned int pkt_type : 1; /* pkt type 0:ETH, 1:FC */ + unsigned int pkt_cos : 3; + unsigned int rsvd2 : 1; + unsigned int csize : 2; + unsigned int service_type : 5; + unsigned int parent_xid : 20; +#else + unsigned int parent_xid : 20; + unsigned int service_type : 5; + unsigned int csize : 2; + unsigned int rsvd2 : 1; + unsigned int pkt_cos : 3; /* pkt cos,4:ETH, 0:FC */ + unsigned int pkt_type : 1; /* pkt type 0:ETH, 1:FC */ +#endif + } fc_dw4; + + struct { + unsigned int rsvd; + } fc_dw5; + +}; + +/* Root SQ WQE */ +struct hifc_root_sqe_s { + /* Control Section */ + struct hifcoe_wqe_ctrl_s ctrl_section; + struct hifc_root_sqe_task_section_s task_section; + struct hifc_root_sge_s sge; + struct hifc_root_sge_s ctx_sge; +}; + +/* Parent SQ WQE and Root SQ WQE Related function */ +void hifc_build_service_wqe_ctrl_section(struct hifcoe_wqe_ctrl_s *v_wqe_cs, + unsigned int ts_size, + unsigned int bdsl); +void hifc_build_service_wqe_ts_common(struct hifcoe_sqe_ts_s *v_sqe_ts, + unsigned int rport_index, + unsigned short local_xid, + unsigned short remote_xid, + unsigned short data_len); +void hifc_build_els_gs_wqe_sge(struct hifcoe_sqe_s *v_sqe, void *v_buf_addr, + unsigned long long v_phyaddr, + unsigned int buf_len, + unsigned int xid, void *v_hba); +void hifc_build_els_wqe_ts_req(struct hifcoe_sqe_s *v_sqe, + void *v_sq_info, unsigned short cmnd, + unsigned int v_scqn, void *v_frame_pld); + +void hifc_build_els_wqe_ts_rsp(struct hifcoe_sqe_s *v_sqe, void *v_sq_info, + void *v_frame_pld, unsigned short type, + unsigned short cmnd, unsigned int v_scqn); +void hifc_build_els_wqe_ts_magic_num(struct hifcoe_sqe_s *v_sqe, + unsigned short els_cmnd_type, + unsigned int v_magic_num); +void hifc_build_gs_wqe_ts_req(struct hifcoe_sqe_s *v_sqe, + unsigned int magic_num); +void hifc_build_bls_wqe_ts_req(struct hifcoe_sqe_s *v_sqe, + unsigned int abts_param, + unsigned int magic_num); +void hifc_build_service_wqe_root_ts(void *v_hba, + struct hifc_root_sqe_s *v_rt_sqe, + unsigned int rx_id, unsigned int rport_id, + unsigned int scq_num); +void hifc_build_service_wqe_root_sge(struct hifc_root_sqe_s *v_rt_sqe, + void *v_buf_addr, + unsigned long long v_phyaddr, + unsigned int buf_len, + void *v_hba); +void hifc_build_els_wqe_root_offload(struct hifc_root_sqe_s *v_rt_sqe, + dma_addr_t ctx_addr, + unsigned int xid); +void hifc_build_wqe_owner_pmsn(struct hifcoe_wqe_ctrl_s *v_wqe_cs, + unsigned short owner, + unsigned short pmsn); +void hifc_convert_parent_wqe_to_big_endian(struct hifcoe_sqe_s *v_sqe); +void hifc_convert_root_wqe_to_big_endian(struct hifc_root_sqe_s *v_sqe); +void hifc_build_icmnd_wqe_ts(void *v_hba, struct unf_frame_pkg_s *v_pkg, + struct hifcoe_sqe_ts_s *v_sqe_ts); +void hifc_build_icmnd_wqe_ts_header(struct unf_frame_pkg_s *v_pkg, + struct hifcoe_sqe_s *v_sqe, + unsigned char v_task_type, + unsigned short v_exi_base, + unsigned char v_port_idx); +void hifc_build_cmdqe_common(union hifc_cmdqe_u *cmdqe, + enum hifcoe_task_type_e task_type, + unsigned short rx_id); +void hifc_build_srq_wqe_ctrls(struct hifcoe_rqe_s *v_rqe, unsigned short owner, + unsigned short pmsn); +void hifc_build_common_wqe_ctrls(struct hifcoe_wqe_ctrl_s *v_ctrl_sl, + unsigned char v_task_len); +void hifc_build_service_wqe_ctx_sge(struct hifc_root_sqe_s *v_rt_sqe, + unsigned long long v_ctx_addr, + unsigned int buf_len); +void hifc_build_trd_twr_wqe_ctrls(struct unf_frame_pkg_s *v_pkg, + struct hifcoe_sqe_s *v_sqe); + +#endif diff --git a/drivers/scsi/huawei/hifc/hifcoe_parent_context.h b/drivers/scsi/huawei/hifc/hifcoe_parent_context.h new file mode 100644 index 0000000000000000000000000000000000000000..91673338dd4a8bfea8a82e902696ccaf41bd0882 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifcoe_parent_context.h @@ -0,0 +1,414 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#ifndef __HIFCOE_PARENT_CONTEXT_H__ +#define __HIFCOE_PARENT_CONTEXT_H__ + +enum fc_parent_status_e { + FCOE_PARENT_STATUS_INVALID = 0, + FCOE_PARENT_STATUS_NORMAL, + FCOE_PARENT_STATUS_CLOSING +}; + +#define HIFCOE_DOUBLE_SGL (1) +#define HIFCOE_SINGLE_SGL (0) + +#define HIFCOE_DIX_ALGORITHM_IP (1) +#define HIFCOE_DIX_ALGORITHM_CRC (0) + +#define HIFCOE_PARENT_CONTEXT_KEY_ALIGN_SIZE (48) + +#define HIFCOE_PARENT_CONTEXT_SRQ_QINFO_SIZE (8) +#define HIFCOE_PARENT_CONTEXT_TIMER_SIZE (32) /* 24+2*N,N=timer count */ +#define HIFCOE_RQ_FILLED_OFFSET \ + ((u8)(u32)& \ + (((struct hifcoe_sw_section_s *)0x0)->occupy_by_rqe_filled_flag)) +#define HIFCOE_RW_LOCK_AREA_OFFSET \ + ((u8)(u32)&\ + (((struct hifcoe_sw_section_s *)0x0)->occupy_by_rw_lock_area)) + +/* "fqg_level_eventiq_info_s" should be care if MAX_EVENTIQ_LEVEL is larger + * than 4 + */ +#define MAX_EVENTIQ_LEVEL 4 +#define MAX_EVENTIQ_LEVEL_SHIFT 2 + +#define SP_FEATRUE_EDTR 0x1 +#define SP_FEATRUE_SEQ_CNT 0x2 + +#define MAX_PKT_SIZE_PER_DISPATCH (FC_PARENT_P->per_xmit_data_size) +#define MAX_PKT_SIZE_PER_DISPATCH_DIF_4K \ + (MAX_PKT_SIZE_PER_DISPATCH + ((MAX_PKT_SIZE_PER_DISPATCH >> 12) << 3)) +#define MAX_PKT_SIZE_PER_DISPATCH_DIF_512B \ + (MAX_PKT_SIZE_PER_DISPATCH + ((MAX_PKT_SIZE_PER_DISPATCH >> 9) << 3)) +#define MAX_PKT_SIZE_PER_DISPATCH_DIF(shift) \ + (MAX_PKT_SIZE_PER_DISPATCH +\ + ((u32)((MAX_PKT_SIZE_PER_DISPATCH >> 9) >> (shift)) << 3)) + +/* immidiate data DIF info definition in parent context */ +struct immi_dif_info_s { + union { + u32 value; + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 pdu_difx_cnt :8; + u32 sct_size :1;/* Sector size, 1: 4K; 0: 512 */ + u32 dif_verify_type :2; /* verify type */ + u32 dif_ins_rep_type:2; /* ins&rep type */ + u32 io_1st_pdu :1; + /* Check blocks whose application tag contains + * 0xFFFF flag + */ + u32 difx_app_esc :1; + u32 difx_ref_esc :1; + /* + * Check blocks whose reference tag contains 0xFFFF flag + */ + u32 grd_ctrl :3; /* The DIF/DIX Guard control */ + /* Bit 0: DIF/DIX guard verify algorithm control */ + u32 grd_agm_ctrl :2; + /* + * Bit 1: DIF/DIX guard replace or insert algorithm + * control + */ + u32 grd_agm_ini_ctrl :3; + /* The DIF/DIX Reference tag control */ + u32 ref_tag_ctrl :3; + /* Bit 0: scenario of the reference tag verify mode */ + u32 ref_tag_mode :2; + /* + * Bit 1: scenario of the reference tag insert/replace + * mode + */ + /* 0: fixed; 1: increasement;*/ + u32 app_tag_ctrl :3; /* DIF/DIX APP TAG Control */ + +#else + u32 app_tag_ctrl :3; /* DIF/DIX APP TAG Control */ + /* Bit 0: scenario of the reference tag verify mode */ + u32 ref_tag_mode :2; + /* + * Bit 1: scenario of the reference tag insert/replace + * mode + */ + /* 0: fixed; 1: increasement;*/ + /* The DIF/DIX Reference tag control */ + u32 ref_tag_ctrl :3; + u32 grd_agm_ini_ctrl :3; + /* Bit 0: DIF/DIX guard verify algorithm control */ + u32 grd_agm_ctrl :2; + /* + * Bit 1: DIF/DIX guard replace or insert algorithm + * control + */ + u32 grd_ctrl :3; /* The DIF/DIX Guard control */ + /* + * Check blocks whose reference tag contains 0xFFFF flag + */ + u32 difx_ref_esc :1; + /* + * Check blocks whose application tag contains 0xFFFF + * flag + */ + u32 difx_app_esc :1; + u32 io_1st_pdu :1; + u32 dif_ins_rep_type:2; /* ins&rep type */ + u32 dif_verify_type :2; /* verify type */ + u32 sct_size :1; /* Sector size, 1: 4K; 0: 512 */ + u32 pdu_difx_cnt :8; + +#endif + } info; + } dif_dw3; + + union { + u32 value; + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 difx_len :11; /* DIF/DIFX total length */ + u32 difx_en :1; /* DIF/DIFX enable flag */ + u32 rsv0 :4; + u32 dif_cnt :16; +#else + u32 dif_cnt :16; + u32 rsv0 :4; + u32 difx_en :1; /* DIF/DIFX enable flag */ + u32 difx_len :11; /* DIF/DIFX total length */ +#endif + } info; + } dif_other; + + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rep_app_tag :16; + u32 cmp_app_tag :16; + #else + u32 cmp_app_tag :16; + u32 rep_app_tag :16; + #endif + /* + * The ref tag value for verify compare, do not support replace or + * insert ref tag + */ + u32 cmp_ref_tag; + +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 cmp_app_tag_msk :16; + u32 rsv1 :16; +#else + u32 rsv1 :16; + u32 cmp_app_tag_msk :16; +#endif +}; + +/* parent context SW section definition: SW(80B) */ +struct hifcoe_sw_section_s { + /* RO fields */ + u32 scq_num_rcv_cmd; /* scq number used for cmd receive */ + +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 xid; /* driver init */ +#else + struct { + u32 xid :13; + u32 vport :7; + u32 csctrl :8; + u32 rsvd0 :4; + } sw_ctxt_vport_xid; +#endif + u32 cid; /* ucode init */ + + u16 conn_id; + u16 immi_rq_page_size; + + u16 immi_taskid_min; + u16 immi_taskid_max; + +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 vlan_id : 16; /* Vlan ID */ + /* phycial port to receive and transmit packet. */ + u32 port_id : 4; + /* + * new srq offset. Ucode use new srq to receive els/gs with big payload. + */ + u32 rsvd1 : 5; + u32 srr_support : 2; /* sequence retransmition support flag */ + u32 srv_type : 5; +#else + union { + u32 pctxt_val0; + struct { + u32 srv_type : 5; /* driver init */ + /* sequence retransmition support flag */ + u32 srr_support : 2; + u32 rsvd1 : 5; + u32 port_id : 4; /* driver init */ + u32 vlan_id : 16; /* driver init */ + } dw; + } sw_ctxt_misc; +#endif + + u16 oqid_rd; + u16 oqid_wr; + u32 per_xmit_data_size; + + /* RW fields */ + u32 cmd_scq_gpa_h; + u32 cmd_scq_gpa_l; + /* E_D_TOV timer value: value should be set on ms by driver */ + u32 e_d_tov_timer_val; + /* + * mfs unalined bytes of per 64KB dispatch; equal to + * "MAX_PKT_SIZE_PER_DISPATCH%info->parent->tx_mfs" + */ + u16 mfs_unaligned_bytes; + u16 tx_mfs; /* remote port max receive fc payload length */ + /* max data len allowed in xfer_rdy dis scenario*/ + u32 xfer_rdy_dis_max_len_remote; + u32 xfer_rdy_dis_max_len_local; + +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* Double or single SGL, 1: double; 0: single */ + u32 sgl_num :1; + u32 write_xfer_rdy :1; /* WRITE Xfer_Rdy disable or enable */ + u32 rec_support :1; /* REC support flag */ + u32 conf_support :1; /* Response confirm support flag */ + u32 vlan_enable :1; /* Vlan enable flag */ + u32 e_d_tov :1; /* E_D_TOV Resolution, 0: ms, 1: us*/ + /* seq_cnt, 1: increament support, 0: increament not support */ + u32 seq_cnt :1; + /* 0:Target, 1:Initiator, 2:Target&Initiator */ + u32 work_mode :2; + /* used for parent context cache Consistency judgment,1: done*/ + u32 flush_done :1; + u32 oq_cos_cmd :3; /* esch oq cos for cmd/xferrdy/rsp */ + u32 oq_cos_data :3; /* esch oq cos for data */ + u32 cos :3; /* doorbell cos value */ + u32 status :8; /* status of flow*/ + u32 rsvd4 :2; + u32 priority :3; /* vlan priority */ +#else + union { + struct { + u32 priority : 3; /* vlan priority */ + u32 rsvd4 : 2; + u32 status : 8; /* status of flow*/ + u32 cos : 3; /* doorbell cos value */ + u32 oq_cos_data : 3; /* esch oq cos for data */ + /* esch oq cos for cmd/xferrdy/rsp */ + u32 oq_cos_cmd : 3; + /* + * used for parent context cache Consistency judgment, + * 1: done + */ + u32 flush_done : 1; + /* 0:Target, 1:Initiator, 2:Target&Initiator */ + u32 work_mode : 2; + u32 seq_cnt : 1; /* seq_cnt */ + u32 e_d_tov : 1; /* E_D_TOV resolution */ + u32 vlan_enable : 1; /* Vlan enable flag */ + /* Response confirm support flag */ + u32 conf_support : 1; + u32 rec_support : 1; /* REC support flag */ + /* WRITE Xfer_Rdy disable or enable */ + u32 write_xfer_rdy : 1; + /* Double or single SGL, 1: double; 0: single */ + u32 sgl_num : 1; + } dw; + u32 pctxt_val1; + } sw_ctxt_config; +#endif + /* immidiate data dif control info(20B) */ + struct immi_dif_info_s immi_dif_info; +}; + +struct hifcoe_hw_rsvd_queue_s { + /* bitmap[0]:255-192 */ + /* bitmap[1]:191-128 */ + /* bitmap[2]:127-64 */ + /* bitmap[3]:63-0 */ + u64 seq_id_bitmap[4]; + struct { + u64 last_req_seq_id : 8; + u64 xid : 20; + u64 rsvd0 : 36; + } wd0; +}; + +struct hifcoe_sq_qinfo_s { + u64 rsvd_0 : 10; + /* 0: get pmsn from queue header; 1: get pmsn from ucode */ + u64 pmsn_type : 1; + u64 rsvd_1 : 4; + u64 cur_wqe_o : 1; /* should be opposite from loop_o */ + u64 rsvd_2 : 48; + + u64 cur_sqe_gpa; + u64 pmsn_gpa; /* sq's queue header gpa */ + + u64 sqe_dmaattr_idx : 6; + u64 sq_so_ro : 2; + u64 rsvd_3 : 2; + u64 ring : 1; /* 0: link; 1: ring */ + u64 loop_o : 1; /* init to be the first round o-bit */ + u64 rsvd_4 : 4; + u64 zerocopy_dmaattr_idx : 6; + u64 zerocopy_so_ro : 2; + u64 parity : 8; + u64 rsvd_5 : 26; + u64 pcie_template : 6; +}; + +struct hifcoe_cq_qinfo_s { + u64 pcie_template_hi : 3; + u64 parity_2 : 1; + u64 cur_cqe_gpa : 60; + + u64 pi : 15; + u64 pi_o : 1; + u64 ci : 15; + u64 ci_o : 1; + /* if init_mode = 2, is msi/msi-x; other the low-5-bit means c_eqn */ + u64 c_eqn_msi_x : 10; + u64 parity_1 : 1; + /* 0: get ci from queue header; 1: get ci from ucode */ + u64 ci_type : 1; + u64 cq_depth : 3; /* valid when ring = 1 */ + u64 armq : 1; /* 0: IDLE state; 1: NEXT state */ + u64 cur_cqe_cnt : 8; + u64 cqe_max_cnt : 8; + + u64 cqe_dmaattr_idx : 6; + u64 cq_so_ro : 2; + u64 init_mode : 2; /* 1: armQ; 2: msi/msi-x; others: rsvd */ + u64 next_o : 1; /* next pate valid o-bit */ + u64 loop_o : 1; /* init to be the first round o-bit */ + u64 next_cq_wqe_page_gpa : 52; + + u64 pcie_template_lo : 3; + u64 parity_0 : 1; + u64 ci_gpa : 60; /* cq's queue header gpa */ +}; + +struct hifcoe_scq_qinfo_s { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + union { + struct { + u64 parity : 6; + u64 rq_th2_preld_cache_num : 5; + u64 rq_th1_preld_cache_num : 5; + u64 rq_th0_preld_cache_num : 5; + u64 rq_min_preld_cache_num : 4; + u64 sq_th2_preld_cache_num : 5; + u64 sq_th1_preld_cache_num : 5; + u64 sq_th0_preld_cache_num : 5; + u64 sq_min_preld_cache_num : 4; + u64 scq_n : 20; /* scq number */ + } info; + + u64 pctxt_val1; + } hw_scqc_config; +#else + union { + struct { + u64 scq_n : 20; /* scq number */ + u64 sq_min_preld_cache_num : 4; + u64 sq_th0_preld_cache_num : 5; + u64 sq_th1_preld_cache_num : 5; + u64 sq_th2_preld_cache_num : 5; + u64 rq_min_preld_cache_num : 4; + u64 rq_th0_preld_cache_num : 5; + u64 rq_th1_preld_cache_num : 5; + u64 rq_th2_preld_cache_num : 5; + u64 parity : 6; + } info; + + u64 pctxt_val1; + } hw_scqc_config; +#endif +}; + +struct hifcoe_srq_qinfo_s { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u64 srqc_gpa : 60; + u64 parity : 4; +#else + u64 parity : 4; + u64 srqc_gpa : 60; +#endif +}; + +/* here is the layout of service type 12/13 */ +struct hifcoe_parent_context_s { + u8 key[HIFCOE_PARENT_CONTEXT_KEY_ALIGN_SIZE]; + struct hifcoe_scq_qinfo_s resp_scq_qinfo; + struct hifcoe_srq_qinfo_s imm_srq_info; + struct hifcoe_sq_qinfo_s sq_qinfo; + u8 timer_section[HIFCOE_PARENT_CONTEXT_TIMER_SIZE]; + struct hifcoe_hw_rsvd_queue_s hw_rsvdq; + struct hifcoe_srq_qinfo_s els_srq_info; + struct hifcoe_sw_section_s sw_section; +}; + +#endif diff --git a/drivers/scsi/huawei/hifc/hifcoe_wqe.h b/drivers/scsi/huawei/hifc/hifcoe_wqe.h new file mode 100644 index 0000000000000000000000000000000000000000..442a52be4580e14c127ebb2c8f1aaa83e789421b --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifcoe_wqe.h @@ -0,0 +1,1698 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#ifndef __HIFCOE_WQE_H__ +#define __HIFCOE_WQE_H__ + +/* + * TASK TYPE: in order to compatible wiht EDA, please add new type before BUTT. + */ +enum hifcoe_task_type_e { + HIFCOE_TASK_T_EMPTY = 0,/* SCQE TYPE: means task type not initialize */ + + HIFCOE_TASK_T_IWRITE = 1, /* SQE TYPE: ini send FCP Write Command */ + HIFCOE_TASK_T_IREAD = 2,/* SQE TYPE: ini send FCP Read Command */ + /* SCQE TYPE: ini recv fcp rsp for IREAD/IWRITE/ITMF*/ + HIFCOE_TASK_T_IRESP = 3, + HIFCOE_TASK_T_TCMND = 4,/* NA */ + HIFCOE_TASK_T_TREAD = 5,/* SQE TYPE: tgt send FCP Read Command */ + /* SQE TYPE: tgt send FCP Write Command (XFER_RDY) */ + HIFCOE_TASK_T_TWRITE = 6, + HIFCOE_TASK_T_TRESP = 7,/* SQE TYPE: tgt send fcp rsp of Read/Write*/ + HIFCOE_TASK_T_TSTS = 8, /* SCQE TYPE: tgt sts for TREAD/TWRITE/TRESP*/ + HIFCOE_TASK_T_ABTS = 9, /* SQE TYPE: ini send abts request Command */ + HIFCOE_TASK_T_IELS = 10,/* NA */ + HIFCOE_TASK_T_ITMF = 11,/* SQE TYPE: ini send tmf request Command */ + HIFCOE_TASK_T_CLEAN_UP = 12,/* NA */ + HIFCOE_TASK_T_CLEAN_UP_ALL = 13,/* NA */ + HIFCOE_TASK_T_UNSOLICITED = 14, /* NA */ + HIFCOE_TASK_T_ERR_WARN = 15,/* NA */ + HIFCOE_TASK_T_SESS_EN = 16, /* CMDQ TYPE: enable session */ + HIFCOE_TASK_T_SESS_DIS = 17,/* NA */ + HIFCOE_TASK_T_SESS_DEL = 18,/* NA */ + HIFCOE_TASK_T_RQE_REPLENISH = 19, /* NA */ + + HIFCOE_TASK_T_RCV_TCMND = 20, /* SCQE TYPE: tgt recv fcp cmd */ + HIFCOE_TASK_T_RCV_ELS_CMD = 21, /* SCQE TYPE: tgt recv els cmd */ + HIFCOE_TASK_T_RCV_ABTS_CMD = 22,/* SCQE TYPE: tgt recv abts cmd */ + /* SCQE TYPE: tgt recv immidiate data */ + HIFCOE_TASK_T_RCV_IMMIDIATE = 23, + /* + * SQE TYPE: send ESL rsp. PLOGI_ACC, PRLI_ACC will carry the parent + * context parameter indication. + */ + + HIFCOE_TASK_T_ELS_RSP = 24, + HIFCOE_TASK_T_ELS_RSP_STS = 25, /* SCQE TYPE: ELS rsp sts */ + + HIFCOE_TASK_T_ABTS_RSP = 26,/* CMDQ TYPE: tgt send abts rsp */ + HIFCOE_TASK_T_ABTS_RSP_STS = 27,/* SCQE TYPE: tgt abts rsp sts*/ + + HIFCOE_TASK_T_ABORT = 28, /* CMDQ TYPE: tgt send Abort Command */ + HIFCOE_TASK_T_ABORT_STS = 29, /* SCQE TYPE: Abort sts */ + + HIFCOE_TASK_T_ELS = 30, /* SQE TYPE: send ELS request Command */ + HIFCOE_TASK_T_RCV_ELS_RSP = 31, /* SCQE TYPE: recv ELS response */ + + HIFCOE_TASK_T_GS = 32, /* SQE TYPE: send GS request Command */ + HIFCOE_TASK_T_RCV_GS_RSP = 33, /* SCQE TYPE: recv GS response */ + + HIFCOE_TASK_T_SESS_EN_STS = 34, /* SCQE TYPE: enable session sts */ + HIFCOE_TASK_T_SESS_DIS_STS = 35,/* NA */ + HIFCOE_TASK_T_SESS_DEL_STS = 36,/* NA */ + + HIFCOE_TASK_T_RCV_ABTS_RSP = 37,/* SCQE TYPE: ini recv abts rsp */ + + HIFCOE_TASK_T_BUFFER_CLEAR = 38,/* CMDQ TYPE: Buffer Clear */ + HIFCOE_TASK_T_BUFFER_CLEAR_STS = 39,/* SCQE TYPE: Buffer Clear sts */ + HIFCOE_TASK_T_FLUSH_SQ = 40,/* CMDQ TYPE: flush sq */ + HIFCOE_TASK_T_FLUSH_SQ_STS = 41,/* SCQE TYPE: flush sq sts */ + + HIFCOE_TASK_T_SESS_RESET = 42, /* SQE TYPE: Reset session */ + HIFCOE_TASK_T_SESS_RESET_STS = 43, /* SCQE TYPE: Reset session sts */ + HIFCOE_TASK_T_RQE_REPLENISH_STS = 44, /* NA */ + HIFCOE_TASK_T_DUMP_EXCH = 45, /* CMDQ TYPE: dump exch */ + HIFCOE_TASK_T_INIT_SRQC = 46, /* CMDQ TYPE: init SRQC */ + HIFCOE_TASK_T_CLEAR_SRQ = 47, /* CMDQ TYPE: clear SRQ */ + HIFCOE_TASK_T_CLEAR_SRQ_STS = 48, /* SCQE TYPE: clear SRQ sts */ + HIFCOE_TASK_T_INIT_SCQC = 49, /* CMDQ TYPE: init SCQC */ + HIFCOE_TASK_T_DEL_SCQC = 50,/* CMDQ TYPE: delete SCQC */ + HIFCOE_TASK_T_TMF_RESP = 51,/* SQE TYPE: tgt send tmf rsp */ + HIFCOE_TASK_T_DEL_SRQC = 52,/* CMDQ TYPE: delete SRQC */ + /* SCQE TYPE: tgt recv continue immidiate data */ + HIFCOE_TASK_T_RCV_IMMI_CONTINUE = 53, + HIFCOE_TASK_T_ITMF_RESP = 54, /* SCQE TYPE: ini recv tmf rsp */ + HIFCOE_TASK_T_ITMF_MARKER_STS = 55,/* SCQE TYPE: tmf marker sts */ + HIFCOE_TASK_T_TACK = 56, + HIFCOE_TASK_T_SEND_AEQERR = 57, + HIFCOE_TASK_T_ABTS_MARKER_STS = 58,/* SCQE TYPE: abts marker sts */ + HIFCOE_TASK_T_FLR_CLEAR_IO = 59,/* FLR clear io type*/ + HIFCOE_TASK_T_BUTT +}; + +/* + * error code for error report + */ +enum hifcoe_err_code_e { + FCOE_CQE_COMPLETED = 0, /* Successful */ + FCOE_SESS_HT_INSERT_FAIL = 1,/* Offload fail: hash insert fail */ + FCOE_SESS_HT_INSERT_DUPLICATE = 2, /* Offload fail: duplicate offload */ + FCOE_SESS_HT_BIT_SET_FAIL = 3, /* Offload fail: bloom filter set fail */ + /* Offload fail: hash delete fail(duplicate delete) */ + FCOE_SESS_HT_DELETE_FAIL = 4, + FCOE_CQE_BUFFER_CLEAR_IO_COMPLETED = 5, /* IO done in buffer clear */ + /* IO done in session rst mode=1 */ + FCOE_CQE_SESSION_ONLY_CLEAR_IO_COMPLETED = 6, + /* IO done in session rst mode=3 */ + FCOE_CQE_SESSION_RST_CLEAR_IO_COMPLETED = 7, + FCOE_CQE_TMF_RSP_IO_COMPLETED = 8, /* IO done in tgt tmf rsp */ + FCOE_CQE_TMF_IO_COMPLETED = 9, /* IO done in ini tmf */ + FCOE_CQE_DRV_ABORT_IO_COMPLETED = 10,/* IO done in tgt abort */ + /* IO done in fcp rsp process. Used for the sceanrio: + * 1.abort before cmd + * 2.send fcp rsp directly after recv cmd + */ + FCOE_CQE_DRV_ABORT_IO_IN_RSP_COMPLETED = 11, + /* IO done in fcp cmd process. Used for the sceanrio: + * 1.abort before cmd + * 2.child setup fail + */ + FCOE_CQE_DRV_ABORT_IO_IN_CMD_COMPLETED = 12, + FCOE_CQE_WQE_FLUSH_IO_COMPLETED = 13,/* IO done in FLUSH SQ */ + /* fcp data format check: DIFX check error */ + FCOE_ERROR_CODE_DATA_DIFX_FAILED = 14, + /* fcp data format check: task_type is not read */ + FCOE_ERROR_CODE_DATA_TASK_TYPE_INCORRECT = 15, + /* fcp data format check: data offset is not continuous */ + FCOE_ERROR_CODE_DATA_OOO_RO = 16, + /* fcp data format check: data is over run */ + FCOE_ERROR_CODE_DATA_EXCEEDS_DATA2TRNS = 17, + /* fcp rsp format check: payload is too short */ + FCOE_ERROR_CODE_FCP_RSP_INVALID_LENGTH_FIELD = 18, + /* fcp rsp format check: fcp_conf need, but exch don't hold seq + * initiative + */ + FCOE_ERROR_CODE_FCP_RSP_CONF_REQ_NOT_SUPPORTED_YET = 19, + /* fcp rsp format check: fcp_conf is required, but it's the last seq */ + FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ = 20, + /* xfer rdy format check: payload is too short */ + FCOE_ERROR_CODE_XFER_INVALID_PAYLOAD_SIZE = 21, + /* xfer rdy format check: last data out havn't finished */ + FCOE_ERROR_CODE_XFER_PEND_XFER_SET = 22, + /* xfer rdy format check: data offset is not continuous */ + FCOE_ERROR_CODE_XFER_OOO_RO = 23, + /* xfer rdy format check: burst len is 0 */ + FCOE_ERROR_CODE_XFER_NULL_BURST_LEN = 24, + FCOE_ERROR_CODE_REC_TIMER_EXPIRE = 25, /* Timer expire: REC_TIMER */ + FCOE_ERROR_CODE_E_D_TIMER_EXPIRE = 26, /* Timer expire: E_D_TIMER */ + FCOE_ERROR_CODE_ABORT_TIMER_EXPIRE = 27,/* Timer expire: Abort timer */ + /* Abort IO magic number mismatch */ + FCOE_ERROR_CODE_ABORT_MAGIC_NUM_NOT_MATCH = 28, + /* RX immidiate data cmd pkt child setup fail */ + FCOE_IMMI_CMDPKT_SETUP_FAIL = 29, + /* RX fcp data sequence id not equal */ + FCOE_ERROR_CODE_DATA_SEQ_ID_NOT_EQUAL = 30, + FCOE_ELS_GS_RSP_EXCH_CHECK_FAIL = 31,/* ELS/GS exch info check fail */ + FCOE_CQE_ELS_GS_SRQE_GET_FAIL = 32, /* ELS/GS process get SRQE fail */ + FCOE_CQE_DATA_DMA_REQ_FAIL = 33, /* SMF soli-childdma rsp error */ + FCOE_CQE_SESSION_CLOSED = 34,/* Session is closed */ + FCOE_SCQ_IS_FULL = 35, /* SCQ is full */ + FCOE_SRQ_IS_FULL = 36, /* SRQ is full */ + FCOE_ERROR_DUCHILDCTX_SETUP_FAIL = 37, /* dpchild ctx setup fail */ + FCOE_ERROR_INVALID_TXMFS = 38, /* invalid txmfs */ + /* offload fail,lack of SCQE,through AEQ */ + FCOE_ERROR_OFFLOAD_LACKOF_SCQE_FAIL = 39, + FCOE_ERROR_INVALID_TASK_ID = 40, /* tx invlaid task id */ + FCOE_ERROR_INVALID_PKT_LEN = 41, /* tx els gs pakcet len check */ + FCOE_CQE_ELS_GS_REQ_CLR_IO_COMPLETED = 42, /* IO done in els gs tx */ + FCOE_CQE_ELS_RSP_CLR_IO_COMPLETED = 43, /* IO done in els rsp tx */ + FCOE_ERROR_CODE_RESID_UNDER_ERR = 44 /* FCP RSP RESID ERROR */ +}; + +/* AEQ EVENT TYPE */ +enum hifcoe_aeq_evt_type_e { + /* + * SCQ and SRQ not enough, HOST will initiate a operation to associated + * SCQ/SRQ + */ + FC_AEQ_EVENT_QUEUE_ERROR = 48, + /* WQE MSN check error,HOST will reset port */ + FC_AEQ_EVENT_WQE_FATAL_ERROR = 49, + /* serious chip error, HOST will reset chip */ + FC_AEQ_EVENT_CTX_FATAL_ERROR = 50, + FC_AEQ_EVENT_OFFLOAD_ERROR = 51, + + FC_FC_AEQ_EVENT_TYPE_LAST +}; + +enum hifcoe_aeq_evt_err_code_e { + /* detail type of resource lack */ + FC_SCQ_IS_FULL_ERR = 0, + FC_SRQ_IS_FULL_ERR, + + /* detail type of FC_AEQ_EVENT_WQE_FATAL_ERROR */ + FC_SQE_CHILD_SETUP_WQE_MSN_ERR = 2, + FC_SQE_CHILD_SETUP_WQE_GPA_ERR, + FC_CMDPKT_CHILD_SETUP_INVALID_WQE_ERR_1, + FC_CMDPKT_CHILD_SETUP_INVALID_WQE_ERR_2, + FC_CLEAEQ_WQE_ERR, + FC_WQEFETCH_WQE_MSN_ERR, + FC_WQEFETCH_QUINFO_ERR, + + /* detail type of FC_AEQ_EVENT_CTX_FATAL_ERROR */ + FC_SCQE_ERR_BIT_ERR = 9, + FC_UPDMA_ADDR_REQ_SRQ_ERR, + FC_SOLICHILDDMA_ADDR_REQ_ERR, + FC_UNSOLICHILDDMA_ADDR_REQ_ERR, + FC_SQE_CHILD_SETUP_QINFO_ERR_1, + FC_SQE_CHILD_SETUP_QINFO_ERR_2, + FC_CMDPKT_CHILD_SETUP_QINFO_ERR_1, + FC_CMDPKT_CHILD_SETUP_QINFO_ERR_2, + FC_CMDPKT_CHILD_SETUP_PMSN_ERR, + FC_CLEAEQ_CTX_ERR, + FC_WQEFETCH_CTX_ERR, + FC_FLUSH_QPC_ERR_LQP, + FC_FLUSH_QPC_ERR_SMF, + FC_PREFETCH_QPC_ERR_1, + FC_PREFETCH_QPC_ERR_2, + FC_PREFETCH_QPC_ERR_3, + FC_PREFETCH_QPC_ERR_4, + FC_PREFETCH_QPC_ERR_5, + FC_PREFETCH_QPC_ERR_6, + FC_PREFETCH_QPC_ERR_7, + FC_PREFETCH_QPC_ERR_8, + FC_PREFETCH_QPC_ERR_9, + FC_PREFETCH_QPC_ERR_10, + FC_PREFETCH_QPC_ERR_11, + FC_PREFETCH_QPC_ERR_DEFAULT, + FC_CHILDHASH_INSERT_SW_ERR, + FC_CHILDHASH_LOOKUP_SW_ERR, + FC_CHILDHASH_DEL_SW_ERR, + FC_FLOWHASH_INSERT_SW_ERR, + FC_FLOWHASH_LOOKUP_SW_ERR, + FC_FLOWHASH_DEL_SW_ERR, + FC_FLUSH_QPC_ERR_USED, + FC_FLUSH_QPC_ERR_OUTER_LOCK, + + FC_AEQ_EVT_ERR_CODE_BUTT + +}; + +/* AEQ data structure */ +struct hifcoe_aqe_data_s { + union { + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 evt_code: 8; + u32 rsvd: 8; + u32 conn_id : 16; /* conn_id */ + #else + u32 conn_id : 16; + u32 rsvd: 8; + u32 evt_code: 8; + #endif + } wd0; + + u32 data0; + }; + + union { + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd: 12; + u32 xid : 20; /* xid */ + #else + u32 xid : 20; /* xid */ + u32 rsvd: 12; + #endif + } wd1; + + u32 data1; + }; +}; + +/* Control Section: Common Header */ +struct hifcoe_wqe_ctrl_ch_s { + union { + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 owner : 1; + u32 ctrl_sl : 2; + u32 csl : 2; + u32 dif_sl: 3; + u32 cr: 1; + u32 df: 1; + u32 va: 1; + u32 tsl : 5; + u32 cf: 1; + u32 wf: 1; + u32 rsvd0 : 4; + u32 drv_sl: 2; + u32 bdsl : 8; + #else + u32 bdsl : 8; + u32 drv_sl: 2; + u32 rsvd0 : 4; + u32 wf: 1; + u32 cf: 1; + u32 tsl : 5; + u32 va: 1; + u32 df: 1; + u32 cr: 1; + u32 dif_sl: 3; + u32 csl : 2; + u32 ctrl_sl: 2; + u32 owner : 1; + #endif + } wd0; + + u32 ctrl_ch_val; + }; + +}; + +/* Control Section: Queue Specific Field */ +struct hifcoe_wqe_ctrl_qsf_s { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 dump_wqe_sn : 16; + u32 wqe_sn:16; + #else + u32 wqe_sn:16; + u32 dump_wqe_sn : 16; + #endif +}; + +/* DIF info definition in WQE */ +struct hifcoe_fc_dif_info_s { + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* difx enable flag:1'b0: disable;1'b1: enable */ + u32 difx_en : 1; + /* + * sector size:1'b0: sector size is 512B.1'b1: sector size is + * 4KB. + */ + u32 sct_size : 1; + u32 difx_len : 11; + /* + * The DIFX verify type: 2'b00: Type0, 2'b01: Type 1, 2'b10: + * Type 2, 2'b11: Type 3 + */ + u32 dif_verify_type : 2; + /* + * The DIFX insert and replace type: 2'b00: Type0, 2'b01: Type 1 + * , 2'b10: Type 2, 2'b11: Type 3 + */ + u32 dif_ins_rep_type : 2; + u32 difx_app_esc : 1; + u32 difx_ref_esc : 1; + u32 grd_ctrl : 3; + u32 grd_agm_ctrl : 2; + u32 grd_agm_ini_ctrl : 3; + u32 ref_tag_ctrl : 3; + u32 ref_tag_mode : 2; + #else + u32 ref_tag_mode : 2; + u32 ref_tag_ctrl : 3; + u32 grd_agm_ini_ctrl : 3; + u32 grd_agm_ctrl : 2; + u32 grd_ctrl : 3; + u32 difx_ref_esc : 1; + u32 difx_app_esc : 1; + u32 dif_ins_rep_type : 2; + u32 dif_verify_type : 2; + u32 difx_len : 11; + u32 sct_size : 1; + u32 difx_en : 1; + #endif + } wd0; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 app_tag_ctrl : 3; + u32 vpid : 7; + u32 lun_qos_en : 2; + u32 rsvd : 4; + u32 cmp_app_tag_msk : 16; + #else + u32 cmp_app_tag_msk : 16; + u32 rsvd : 4; + u32 lun_qos_en : 2; + u32 vpid : 7; + u32 app_tag_ctrl : 3; + #endif + } wd1; + + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u16 rep_app_tag; + u16 cmp_app_tag; + #else + u16 cmp_app_tag; + u16 rep_app_tag; + #endif + + u32 cmp_ref_tag; + u32 rep_ref_tag; + +}; + +/* Task Section: TMF SQE for INI */ +struct hifcoe_tmf_info_s { + union { + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 reset_exch_start :16; + u32 reset_exch_end :16; + #else + u32 reset_exch_end :16; + u32 reset_exch_start :16; + #endif + } bs; + u32 value; + } w0; + + union { + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd0 :5; + u32 marker_sts :1; + u32 reset_type :2; + u32 reset_did :24; + #else + u32 reset_did :24; + u32 reset_type :2; + u32 marker_sts :1; + u32 rsvd0 :5; + #endif + } bs; + u32 value; + } w1; + + union { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd0 :8; + u32 reset_sid :24; +#else + u32 reset_sid :24; + u32 rsvd0 :8; +#endif + } bs; + u32 value; + } w2; + + u8 reset_lun[8]; +}; + +/* Task Section: CMND SQE for INI */ +struct hifcoe_sqe_icmnd_s { + u8 fcp_cmnd_iu[48]; + union { + struct hifcoe_fc_dif_info_s dif_info; + struct hifcoe_tmf_info_s tmf; + } info; + + u32 magic_num; + u32 rsp_gpa_hi; + u32 rsp_gpa_lo; +}; + +/* Task Section: ABTS SQE */ +struct hifcoe_sqe_abts_s { + u32 fh_parm_abts; + u32 magic_num; +}; + +struct hifcoe_keys_s { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsv : 16; + u32 smac0 : 8; + u32 smac1 : 8; +#else + u32 smac1 : 8; + u32 smac0 : 8; + u32 rsv : 16; +#endif + } wd0; + + u8 smac[4]; + + u8 dmac[6]; + u8 sid[3]; + u8 did[3]; + + u32 svlan; + u32 cvlan; +}; + +/* BDSL: Session Enable WQE */ +/* keys field only use 26 bytes room */ +struct hifcoe_cmdqe_sess_en_s { + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 task_type : 8; + u32 rsvd0 : 8; + u32 rx_id : 16; + #else + u32 rx_id : 16; + u32 rsvd0 : 8; + u32 task_type : 8; + #endif + } wd0; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd1 : 12; + u32 cid : 20; + #else + u32 cid : 20; + u32 rsvd1 : 12; + #endif + } wd1; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 scqn :16; + u32 conn_id :16; + #else + u32 conn_id :16; + u32 scqn :16; + #endif + } wd2; + + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd3 :12; + u32 xid_p :20; +#else + u32 xid_p :20; + u32 rsvd3 :12; +#endif + } wd3; + + u32 context_gpa_hi; + u32 context_gpa_lo; + struct hifcoe_keys_s keys; +}; + +/* Control Section */ +struct hifcoe_wqe_ctrl_s { + struct hifcoe_wqe_ctrl_ch_s ch; + struct hifcoe_wqe_ctrl_qsf_s qsf; +}; + +struct hifcoe_sqe_els_rsp_s { + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* + * ELS RSP packet payload. ELS RSP payload GPA is store in BDSL, ucode + * use child setup to send data(do not include fc_hdr/eth_hdr) + */ + u32 data_len:16; + u32 echo_flag :16; + #else + u32 echo_flag :16; + u32 data_len:16; + #endif + } wd0; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* Para Update:drv indicate Parent Context para need to be + * update or not. + * 00---no update + * 01---send PLOGI_ACC, need to updata Port para + * 10---send PRLI_ACC, need to updata process para + * 11---Reserved + */ + u32 para_update :2; + u32 clr_io :1; + u32 lp_bflag:1; /* use for loopback */ + u32 rsvd1 :28; + #else + u32 rsvd1 :28; + u32 lp_bflag:1; + u32 clr_io :1; + u32 para_update :2; + #endif + } wd1; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 tx_mfs :16; + u32 rsvd2 :14; + u32 e_d_tov :1; + u32 seq_cnt :1; + #else + u32 seq_cnt :1; + u32 e_d_tov :1; + u32 rsvd2 :14; + u32 tx_mfs :16; + #endif + } wd2; + + u32 e_d_tov_timer_val; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 immi_taskid_start:16; + u32 immi_taskid_cnt :13; + u32 xfer_dis:1; + u32 rec :1; + u32 conf:1; + #else + u32 conf:1; + u32 rec :1; + u32 xfer_dis:1; + u32 immi_taskid_cnt :13; + u32 immi_taskid_start:16; + #endif + } wd4; + + u32 first_burst_len; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 reset_exch_start :16; + u32 reset_exch_end:16; + #else + u32 reset_exch_end:16; + u32 reset_exch_start :16; + #endif + } wd6; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd:16; + u32 scqn:16; + #else + u32 scqn:16; + u32 rsvd:16; + #endif + } wd7; + + u32 magic_num; + u32 magic_local; + u32 magic_remote; + u32 ts_rcv_echo_req; +}; + +struct hifcoe_sqe_reset_session_s { + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 reset_exch_start :16; + u32 reset_exch_end:16; + #else + u32 reset_exch_end:16; + u32 reset_exch_start :16; + #endif + } wd0; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd:6; + /* + * 1: clean io; + * 2: delete session; + * 3: clean io&delete session + */ + u32 mode:2; + u32 reset_did :24; + #else + u32 reset_did :24; + u32 mode:2; + u32 rsvd:6; + #endif + } wd1; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd:8; + u32 reset_sid :24; + #else + u32 reset_sid :24; + u32 rsvd:8; + #endif + } wd2; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd:16; + u32 scqn:16; + #else + u32 scqn:16; + u32 rsvd:16; + #endif + } wd3; +}; + +struct hifcoe_sqe_t_els_gs_s { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* + * ELS/GS req packet payload. ELS/GS payload GPA is store in BDSL, + * ucode use child setup to send data(do not include fc_hdr/eth_hdr) + */ + u16 data_len; + u16 echo_flag; /* echo flag */ + #else + u16 echo_flag; + u16 data_len; + #endif + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* Para Update: drv indicate Parent Context para need to be + * update or not. + * 00---no update + * 01---send PRLI Req, need to updata Port para + * 10---Reserved + * 11---Reserved + */ + u32 para_update :2; + u32 clr_io :1; + u32 lp_bflag:1; /* use for loopback */ + u32 rec_support :1; + u32 rec_flag:1; + u32 orign_oxid :16; + u32 rsvd1 :10; + #else + u32 rsvd1 :10; + u32 orign_oxid :16; + u32 rec_flag:1; + u32 rec_support :1; + u32 lp_bflag:1; + u32 clr_io :1; + u32 para_update :2; + #endif + } wd4; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 tx_mfs :16; + u32 rsvd2 :14; + u32 e_d_tov :1; + u32 seq_cnt :1; + #else + u32 seq_cnt :1; + u32 e_d_tov :1; + u32 rsvd2 :14; + u32 tx_mfs :16; + #endif + } wd5; + + u32 e_d_tov_timer_val; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 reset_exch_start :16; + u32 reset_exch_end:16; + #else + u32 reset_exch_end:16; + u32 reset_exch_start :16; + #endif + } wd6; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd:16; + u32 scqn:16; + #else + u32 scqn:16; + u32 rsvd:16; + #endif + } wd7; + + u32 magic_num; +}; + +struct hifcoe_sqe_els_gs_elsrsp_comm_s { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u16 data_len; + u16 rsvd; + #else + u16 rsvd; + u16 data_len; + #endif +}; + +/* SQE Task Section's Contents except Common Header */ +union hifcoe_sqe_ts_cont_u { + struct hifcoe_sqe_icmnd_s icmnd; + struct hifcoe_sqe_abts_s abts; + struct hifcoe_sqe_els_rsp_s els_rsp; + struct hifcoe_sqe_t_els_gs_s t_els_gs; + struct hifcoe_sqe_els_gs_elsrsp_comm_s els_gs_elsrsp_comm; + struct hifcoe_sqe_reset_session_s reset_session; + u32 value[16]; +}; + +struct hifcoe_sqe_ts_s { + /* SQE Task Section's Common Header */ + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 task_type :8; + u32 rsvd:5; /* used for loopback saving bdsl's num */ + /* cdb_type = 0:CDB_LEN = 16B, cdb_type = 1:CDB_LEN = 32B */ + u32 cdb_type:1; + /* standard immidiate data flag, use with local-xid for initiator */ + u32 immi_std:1; + /* + * CRC err inject flag: drv set, and ucode use for send first packet of + * WQE + */ + u32 crc_inj :1; + u32 local_xid :16; /* local exch_id */ + #else + u32 local_xid :16; + u32 crc_inj :1; + u32 immi_std:1; + /* cdb_type = 0:CDB_LEN = 16B, cdb_type = 1:CDB_LEN = 32B */ + u32 cdb_type:1; + u32 rsvd:5; /* used for loopback saving bdsl's num */ + u32 task_type :8; + #endif + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u16 remote_xid; /* remote exch_id */ + u16 conn_id; + #else + u16 conn_id; + u16 remote_xid; + #endif + } wd0; + + union hifcoe_sqe_ts_cont_u cont; +}; + +struct hifcoe_constant_sge_s { + u32 buf_addr_hi; + u32 buf_addr_lo; +}; + +struct hifcoe_variable_sge_s { + u32 buf_addr_hi; + u32 buf_addr_lo; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 r_flag :1; + u32 buf_len :31; + #else + u32 buf_len :31; + u32 r_flag :1; + #endif + } wd0; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 last_flag :1; + u32 extension_flag :1; + u32 xid : 14; + u32 buf_addr_gpa: 16; + #else + u32 buf_addr_gpa: 16; + u32 xid : 14; + u32 extension_flag :1; + u32 last_flag :1; + #endif + } wd1; +}; + +/* SQE, should not be over 128B */ +struct hifcoe_sqe_s { + struct hifcoe_wqe_ctrl_s ctrl_sl; + struct hifcoe_sqe_ts_s ts_sl; + struct hifcoe_variable_sge_s sge[2]; +}; + +struct hifcoe_rqe_ctrl_s { + struct hifcoe_wqe_ctrl_ch_s ch; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u16 dump_wqe_msn; + u16 wqe_msn; + #else + u16 wqe_msn; + u16 dump_wqe_msn; + #endif + } wd0; +}; + +struct hifcoe_rqe_drv_s { + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* + * User ID[15:0], 15 bits valid and User ID[15] is fix to 0 + */ + u32 user_id :16; + u32 rsvd0 :16; + #else + u32 rsvd0 :16; + u32 user_id :16; + #endif + } wd0; + + u32 rsvd1; +}; + +/* RQE,should not be over 32B */ +struct hifcoe_rqe_s { + struct hifcoe_rqe_ctrl_s ctrl_sl; + u32 cqe_gpa_h; + u32 cqe_gpa_l; + struct hifcoe_constant_sge_s bds_sl; + struct hifcoe_rqe_drv_s drv_sl; +}; + +struct hifcoe_cmdqe_abts_rsp_s { + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 task_type : 8; + u32 rsvd0 : 8; + u32 rx_id : 16; + #else + u32 rx_id : 16; + u32 rsvd0 : 8; + u32 task_type : 8; + #endif + } wd0; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsp_type:1; /* 0:BA_ACC, 1:BA_RJT */ + u32 payload_len :7; + u32 port_id :4; + u32 rsvd1 :4; + u32 ox_id :16; + #else + u32 ox_id :16; + u32 rsvd1 :4; + u32 port_id :4; + u32 payload_len :7; + u32 rsp_type:1; + #endif + } wd1; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 scqn: 16; + u32 conn_id : 16; + #else + u32 conn_id : 16; + u32 scqn: 16; + #endif + } wd2; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd: 12; + u32 xid : 20; + #else + u32 xid : 20; + u32 rsvd: 12; + #endif + } wd3; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd: 12; + u32 cid : 20; + #else + u32 cid : 20; + u32 rsvd: 12; + #endif + } wd4; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd: 16; + u32 req_rx_id : 16; + #else + u32 req_rx_id : 16; + u32 rsvd: 16; + #endif + } wd5; + + /* payload length is according to rsp_type:1DWORD or 3DWORD */ + u32 payload[3]; +}; + +struct hifcoe_cmdqe_buffer_clear_s { + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 wqe_type:8; + u32 rsvd0 :8; + u32 rsvd1 :16; + #else + u32 rsvd1 :16; + u32 rsvd0 :8; + u32 wqe_type:8; + #endif + } wd0; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rx_id_start :16; + u32 rx_id_end :16; + #else + u32 rx_id_end :16; + u32 rx_id_start :16; + #endif + } wd1; + + u32 scqn; + u32 wd3; +}; + +struct hifcoe_cmdqe_flush_sq_info_s { + u32 cid; + u32 xid; +}; + +struct hifcoe_cmdqe_flush_sq_s { + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 wqe_type :8; + u32 sq_qid :8; + u32 entry_count :16; + #else + u32 entry_count :16; + u32 sq_qid :8; + u32 wqe_type :8; + #endif + } wd0; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 last_wqe:1; + u32 pos :11; + u32 port_id:4; + u32 scqn:16; + #else + u32 scqn:16; + u32 port_id :4; + u32 pos :11; + u32 last_wqe:1; + #endif + } wd1; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 pkt_ptr :16; + u32 rsvd:16; + #else + u32 rsvd:16; + u32 pkt_ptr :16; + #endif + } wd2; + + struct hifcoe_cmdqe_flush_sq_info_s sq_info_entry[0]; +}; + +struct hifcoe_cmdqe_creat_srqc_s { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 task_type : 8; + u32 rsvd0 : 8; + u32 rsvd1 : 16; +#else + u32 rsvd1 : 16; + u32 rsvd0 : 8; + u32 task_type : 8; +#endif + } wd0; + + u32 srqc_gpa_h; + u32 srqc_gpa_l; + + u32 srqc[16];/* srqc_size=64B */ + +}; + +struct hifcoe_cmdqe_delete_srqc_s { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 task_type : 8; + u32 rsvd0 : 8; + u32 rsvd1 : 16; +#else + u32 rsvd1 : 16; + u32 rsvd0 : 8; + u32 task_type : 8; +#endif + } wd0; + + u32 srqc_gpa_h; + u32 srqc_gpa_l; +}; + +struct hifcoe_cmdqe_clr_srq_s { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 task_type : 8; + u32 rsvd0 : 8; + u32 rsvd1 : 16; +#else + u32 rsvd1 : 16; + u32 rsvd0 : 8; + u32 task_type : 8; +#endif + } wd0; + + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* + * 0: SRQ for recv ELS; + * 1: SRQ for recv immidiate data + */ + u32 srq_type: 16; + u32 scqn: 16; +#else + u32 scqn: 16; + u32 srq_type: 16; +#endif + } wd1; + + u32 srqc_gpa_h; + u32 srqc_gpa_l; +}; + +struct hifcoe_cmdqe_creat_scqc_s { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 task_type : 8; + u32 rsvd0 : 8; + u32 rsvd1 : 16; +#else + u32 rsvd1 : 16; + u32 rsvd0 : 8; + u32 task_type : 8; +#endif + } wd0; + + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd2 : 16; + u32 scqn: 16; +#else + u32 scqn: 16; + u32 rsvd2 : 16; +#endif + } wd1; + + u32 scqc[16];/* scqc_size=64B */ + +}; + +struct hifcoe_cmdqe_delete_scqc_s { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 task_type : 8; + u32 rsvd0 : 8; + u32 rsvd1 : 16; +#else + u32 rsvd1 : 16; + u32 rsvd0 : 8; + u32 task_type : 8; +#endif + } wd0; + + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd2 : 16; + u32 scqn: 16; +#else + u32 scqn: 16; + u32 rsvd2 : 16; +#endif + } wd1; +}; + +struct hifcoe_sqe_t_rsp_s { + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 fill:2; /* 2bit of F_CTL[1:0] */ + u32 conf:1; /* Wait INI confirm, 0: disable, 1:enable */ + /* + * 0: payload area store payload, + * 1: payload area store payload GPA + */ + u32 mode:1; + u32 immi:1; + u32 rsvd0 :3; + u32 fcp_rsp_len :8; /* FCP_RESP payload(24~96B)*/ + u32 rsvd1 :16; + #else + u32 rsvd1 :16; + u32 fcp_rsp_len :8; + u32 rsvd0 :3; + u32 immi:1; + u32 mode:1; + u32 conf:1; + u32 fill:2; + #endif + } wd0; + + u32 magic_num; + u32 hotpooltag; + + union { + struct { + u32 addr_h; + u32 addr_l; + } gpa; + + struct { + u32 data[25]; /* FCP_RESP payload buf, 100B rsvd */ + } buf; + + } payload; + +}; + +struct hifcoe_sqe_tresp_ts_s { + /* SQE Task Section's Common Header */ + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u8 task_type; + u8 rsvd0; + u16 local_xid; + #else + u16 local_xid; + u8 rsvd0; + u8 task_type; + #endif + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u16 remote_xid; + u16 conn_id; + #else + u16 conn_id; + u16 remote_xid; + #endif + } wd0; + + struct hifcoe_sqe_t_rsp_s t_rsp; +}; + +/* SQE for fcp response, max TSL is 120B*/ +struct hifcoe_sqe_tresp_s { + struct hifcoe_wqe_ctrl_s ctrl_sl; + struct hifcoe_sqe_tresp_ts_s ts_sl; +}; + +/* SCQE Common Header */ +struct hifcoe_scqe_ch_s { + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 owner : 1; + u32 err_code: 7; + u32 cqe_remain_cnt : 3; + u32 rsvd0 : 13; + u32 task_type : 8; + #else + u32 task_type : 8; + u32 rsvd0 : 13; + u32 cqe_remain_cnt : 3; + u32 err_code: 7; + u32 owner : 1; + #endif + } wd0; +}; + +struct hifcoe_scqe_type_s { + struct hifcoe_scqe_ch_s ch; + + u32 rsvd0; + +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u16 rsvd4; + u16 conn_id; +#else + u16 conn_id; + u16 rsvd4; +#endif + + u32 rsvd1[12]; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd2 :31; + u32 done:1; + #else + u32 done:1; + u32 rsvd3 :31; + #endif + } wd0; +}; + +struct hifcoe_scqe_sess_sts_s { + struct hifcoe_scqe_ch_s ch; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd1 :12; + u32 xid_qpn :20; + #else + u32 xid_qpn :20; + u32 rsvd1 :12; + #endif + } wd0; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd3 :16; + u32 conn_id :16; + #else + u32 conn_id :16; + u32 rsvd3 :16; + #endif + } wd1; + + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd2 :12; + u32 cid :20; +#else + u32 cid :20; + u32 rsvd2 :12; +#endif + } wd2; + + u64 bloomfilter_id; /* valid only in session offload */ + +}; + +struct hifcoe_scqe_comm_rsp_sts_s { + struct hifcoe_scqe_ch_s ch; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 ox_id :16; + u32 rx_id :16; + #else + u32 rx_id :16; + u32 ox_id :16; + #endif + } wd0; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd0 :16; + u32 conn_id :16; + #else + u32 conn_id :16; + u32 rsvd0 :16; + #endif + } wd1; + + u32 magic_num; +}; + +struct hifcoe_scqe_iresp_s { + struct hifcoe_scqe_ch_s ch; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 ox_id :16; + u32 rx_id :16; + #else + u32 rx_id :16; + u32 ox_id :16; + #endif + } wd0; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 dif_info:5; + u32 rsvd0 :11; + u32 conn_id :16; + #else + u32 conn_id :16; + u32 rsvd0 :11; + u32 dif_info:5; + #endif + } wd1; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd0 :16; + u32 fcp_flag:8; + u32 scsi_status :8; + #else + u32 scsi_status :8; + u32 fcp_flag:8; + u32 rsvd0 :16; + #endif + } wd2; + + u32 fcp_resid; + u32 fcp_sns_len; + u32 fcp_rsp_len; + u32 magic_num; +}; + +struct hifcoe_scqe_rcv_abts_rsp_s { + struct hifcoe_scqe_ch_s ch; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 ox_id :16; + u32 rx_id :16; + #else + u32 rx_id :16; + u32 ox_id :16; + #endif + } wd0; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd0 :16; + u32 conn_id :16; + #else + u32 conn_id :16; + u32 rsvd0 :16; + #endif + } wd1; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd0:24; + u32 fh_rctrl :8; + #else + u32 fh_rctrl :8; + u32 rsvd0:24; + #endif + } wd2; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd1 :8; + u32 did :24; + #else + u32 did :24; + u32 rsvd1 :8; + #endif + } wd3; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd2 :8; + u32 sid :24; + #else + u32 sid :24; + u32 rsvd2 :8; + #endif + } wd4; + + /* payload length is according to fh_rctrl:1DWORD or 3DWORD */ + u32 payload[3]; + u32 magic_num; + +}; + +struct hifcoe_scqe_rcv_els_cmd_s { + struct hifcoe_scqe_ch_s ch; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd0 :8; + u32 did :24; + #else + u32 did :24; + u32 rsvd0 :8; + #endif + } wd0; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd1 :8; + u32 sid :24; + #else + u32 sid :24; + u32 rsvd1 :8; + #endif + } wd1; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 ox_id :16; + u32 rx_id :16; + #else + u32 rx_id :16; + u32 ox_id :16; + #endif + } wd2; + + struct{ + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 data_len :16;/* ELS cmd Payload length */ + u32 user_id_num :16;/* current used user_id num */ + #else + u32 user_id_num :16; + u32 data_len :16; + #endif + } wd3; + + u32 user_id[9]; /* User ID of SRQ SGE, used for drvier buffer release */ + u32 ts; +}; + +struct hifcoe_scqe_rcv_els_gs_rsp_s { + struct hifcoe_scqe_ch_s ch; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 ox_id :16; + u32 rx_id :16; + #else + u32 rx_id :16; + u32 ox_id :16; + #endif + } wd1; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 data_len:16; + u32 conn_id :16; + #else + u32 conn_id :16; + u32 data_len:16; /* ELS/GS RSP Payload length */ + #endif + } wd2; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 end_rsp :1; + u32 echo_rsp:1; + u32 rsvd:6; + u32 did :24; + #else + u32 did :24; + u32 rsvd:6; + u32 echo_rsp:1; + u32 end_rsp :1; + #endif + } wd3; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 user_id_num :8; + u32 sid :24; + #else + u32 sid :24; + u32 user_id_num :8; + #endif + } wd4; + + u32 magic_num; + u32 user_id[9]; +}; + +struct hifcoe_scqe_rcv_flush_sts_s { + struct hifcoe_scqe_ch_s ch; + + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 last_flush : 8; + u32 port_id: 8; + u32 rsvd0 : 16; +#else + u32 rsvd0 : 16; + u32 port_id: 8; + u32 last_flush : 8; +#endif + } wd0; +}; + +struct hifcoe_scqe_rcv_clear_buf_sts_s { + struct hifcoe_scqe_ch_s ch; + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 port_id: 8; + u32 rsvd0 : 24; +#else + u32 rsvd0 : 24; + u32 port_id: 8; +#endif + } wd0; +}; + +struct hifcoe_scqe_itmf_marker_sts_s { + struct hifcoe_scqe_ch_s ch; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 ox_id :16; + u32 rx_id :16; + #else + u32 rx_id :16; + u32 ox_id :16; + #endif + } wd1; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 end_rsp :8; + u32 did :24; + #else + u32 did :24; + u32 end_rsp :8; + #endif + } wd2; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd1:8; + u32 sid :24; + #else + u32 sid :24; + u32 rsvd1:8; + #endif + } wd3; +}; + +struct hifcoe_scqe_abts_marker_sts_s { + struct hifcoe_scqe_ch_s ch; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 ox_id :16; + u32 rx_id :16; + #else + u32 rx_id :16; + u32 ox_id :16; + #endif + } wd1; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 end_rsp :8; + u32 did :24; + #else + u32 did :24; + u32 end_rsp :8; + #endif + } wd2; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 io_state :8; + u32 sid :24; + #else + u32 sid :24; + u32 io_state :8; + #endif + } wd3; +}; + +/* SCQE, should not be over 64B */ +union hifcoe_scqe_u { + struct hifcoe_scqe_type_s common; + /* session enable/disable/delete sts */ + struct hifcoe_scqe_sess_sts_s sess_sts; + /* aborts/abts_rsp/els rsp sts */ + struct hifcoe_scqe_comm_rsp_sts_s comm_sts; + struct hifcoe_scqe_rcv_clear_buf_sts_s clear_sts;/* clear buffer sts */ + struct hifcoe_scqe_rcv_flush_sts_s flush_sts; /* flush sq sts */ + struct hifcoe_scqe_iresp_s iresp; + struct hifcoe_scqe_rcv_abts_rsp_s rcv_abts_rsp; /* recv abts rsp*/ + struct hifcoe_scqe_rcv_els_cmd_s rcv_els_cmd;/* recv els cmd */ + struct hifcoe_scqe_rcv_els_gs_rsp_s rcv_els_gs_rsp;/* recv els/gs rsp */ + struct hifcoe_scqe_itmf_marker_sts_s itmf_marker_sts;/* tmf marker */ + struct hifcoe_scqe_abts_marker_sts_s abts_marker_sts;/* abts marker */ +}; + +struct hifcoe_cmdqe_type_s { + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 task_type : 8; + u32 rsvd0 : 8; + u32 rx_id : 16; + #else + u32 rx_id : 16; + u32 rsvd0 : 8; + u32 task_type : 8; + #endif + } wd0; +}; + +/* CMDQE, variable length */ +union hifc_cmdqe_u { + struct hifcoe_cmdqe_type_s common; + struct hifcoe_cmdqe_sess_en_s session_enable; + struct hifcoe_cmdqe_abts_rsp_s snd_abts_rsp; + struct hifcoe_cmdqe_buffer_clear_s buffer_clear; + struct hifcoe_cmdqe_flush_sq_s flush_sq; + struct hifcoe_cmdqe_creat_srqc_s create_srqc; + struct hifcoe_cmdqe_delete_srqc_s delete_srqc; + struct hifcoe_cmdqe_clr_srq_s clear_srq; + struct hifcoe_cmdqe_creat_scqc_s create_scqc; + struct hifcoe_cmdqe_delete_scqc_s delete_scqc; +}; + +#endif diff --git a/drivers/scsi/huawei/hifc/unf_common.h b/drivers/scsi/huawei/hifc/unf_common.h new file mode 100644 index 0000000000000000000000000000000000000000..9253cc3eada531a85cd8a8c704ebcf0bcce52079 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_common.h @@ -0,0 +1,1893 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#ifndef __UNF_COMMON_H +#define __UNF_COMMON_H + +#include "unf_scsi_common.h" + +/* V/C version number */ +#define UNF_MAJOR_VERSION "3" +/* B version, B0XX Corresponding x.x */ +#define UNF_B_VERSION "5.0" +/* Indicates the minor version number of the driver */ +#define UNF_DRIVER_VERSION "12" +/* version num */ +#define UNF_FC_VERSION UNF_MAJOR_VERSION "." UNF_B_VERSION "." UNF_DRIVER_VERSION +extern unsigned int unf_dbg_level; +extern unsigned int hifc_dif_type; +extern unsigned int hifc_dif_enable; +extern unsigned char hifc_guard; + +#define RETURN_ERROR_S32 (-1) +#define UNF_RETURN_ERROR_S32 (-1) + +#define UNF_IO_SUCCESS 0x00000000 +/* the host system aborted the command */ +#define UNF_IO_ABORTED 0x00000001 +#define UNF_IO_FAILED 0x00000002 +#define UNF_IO_ABORT_ABTS 0x00000003 +#define UNF_IO_ABORT_LOGIN 0x00000004 /* abort login */ +/* reset event aborted the transport */ +#define UNF_IO_ABORT_REET 0x00000005 +#define UNF_IO_ABORT_FAILED 0x00000006 /* abort failed */ +/* data out of order ,data reassembly error */ +#define UNF_IO_OUTOF_ORDER 0x00000007 +#define UNF_IO_FTO 0x00000008 /* frame time out */ +#define UNF_IO_LINK_FAILURE 0x00000009 +#define UNF_IO_OVER_FLOW 0x0000000a /* data over run */ +#define UNF_IO_RSP_OVER 0x0000000b +#define UNF_IO_LOST_FRAME 0x0000000c +#define UNF_IO_UNDER_FLOW 0x0000000d /* data under run */ +#define UNF_IO_HOST_PROG_ERROR 0x0000000e +#define UNF_IO_SEST_PROG_ERROR 0x0000000f +#define UNF_IO_INVALID_ENTRY 0x00000010 +#define UNF_IO_ABORT_SEQ_NOT 0x00000011 +#define UNF_IO_REJECT 0x00000012 +#define UNF_IO_RS_INFO 0x00000013 +#define UNF_IO_EDC_IN_ERROR 0x00000014 +#define UNF_IO_EDC_OUT_ERROR 0x00000015 +#define UNF_IO_UNINIT_KEK_ERR 0x00000016 +#define UNF_IO_DEK_OUTOF_RANGE 0x00000017 +#define UNF_IO_KEY_UNWRAP_ERR 0x00000018 +#define UNF_IO_KEY_TAG_ERR 0x00000019 +#define UNF_IO_KEY_ECC_ERR 0x0000001a +#define UNF_IO_BLOCK_SIZE_ERROR 0x0000001b +#define UNF_IO_ILLEGAL_CIPHER_MODE 0x0000001c +#define UNF_IO_CLEAN_UP 0x0000001d +#define UNF_SRR_RECEIVE 0x0000001e /* receive srr */ +/* The target device sent an ABTS to abort the I/O. */ +#define UNF_IO_ABORTED_BY_TARGET 0x0000001f +#define UNF_IO_TRANSPORT_ERROR 0x00000020 +#define UNF_IO_LINK_FLASH 0x00000021 +#define UNF_IO_TIMEOUT 0x00000022 +#define UNF_IO_PORT_UNAVAILABLE 0x00000023 +#define UNF_IO_PORT_LOGOUT 0x00000024 +#define UNF_IO_PORT_CFG_CHG 0x00000025 +#define UNF_IO_FIRMWARE_RES_UNAVAILABLE 0x00000026 +#define UNF_IO_TASK_MGT_OVERRUN 0x00000027 +#define UNF_IO_DMA_ERROR 0x00000028 +#define UNF_IO_DIF_ERROR 0x00000029 +#define UNF_IO_NO_LPORT 0x0000002a +#define UNF_IO_NO_XCHG 0x0000002b +#define UNF_IO_SOFT_ERR 0x0000002c +#define UNF_IO_XCHG_ADD_ERROR 0x0000002d +#define UNF_IO_NO_LOGIN 0x0000002e +#define UNF_IO_NO_BUFFER 0x0000002f +#define UNF_IO_DID_ERROR 0x00000030 +#define UNF_IO_UNSUPPORT 0x00000031 +#define UNF_IO_NOREADY 0x00000032 +#define UNF_IO_NPORTID_REUSED 0x00000033 +#define UNF_IO_NPORT_HANDLE_REUSED 0x00000034 +#define UNF_IO_NO_NPORT_HANDLE 0x00000035 +#define UNF_IO_ABORT_BY_FW 0x00000036 +#define UNF_IO_ABORT_PORT_REMOVING 0x00000037 +#define UNF_IO_INCOMPLETE 0x00000038 +#define UNF_IO_DIF_REF_ERROR 0x00000039 +#define UNF_IO_DIF_GEN_ERROR 0x0000003a + +#define UNF_IO_ERREND 0xFFFFFFFF + +/* define bits */ +#define UNF_BIT(n) (0x1UL << (n)) +#define UNF_BIT_0 UNF_BIT(0) +#define UNF_BIT_1 UNF_BIT(1) +#define UNF_BIT_2 UNF_BIT(2) +#define UNF_BIT_3 UNF_BIT(3) +#define UNF_BIT_4 UNF_BIT(4) +#define UNF_BIT_5 UNF_BIT(5) + +struct buff_list_s { + u8 *vaddr; + dma_addr_t paddr; +}; + +struct buf_describe_s { + struct buff_list_s *buflist; + u32 buf_size; + u32 buf_num; +}; + +#define BUF_LIST_PAGE_SIZE (PAGE_SIZE << 8) + +/* Echo macro define */ +#define ECHO_MG_VERSION_LOCAL 1 +#define ECHO_MG_VERSION_REMOTE 2 + +/* save hba info macro define */ +#define SAVE_PORT_INFO_LEN 1016 + +#define UNF_GET_NAME_HIGH_WORD(v_name) \ + (((v_name) >> 32) & 0xffffffff) +#define UNF_GET_NAME_LOW_WORD(v_name) \ + ((v_name) & 0xffffffff) + +#define UNF_FIRST_LPORT_ID_MASK 0xffffff00 +#define HIFC_MAX_COUNTER_TYPE 128 + +#define UNF_EVENT_ASYN 0 +#define UNF_EVENT_SYN 1 +#define UNF_GLOBAL_EVENT_ASYN 2 +#define UNF_GLOBAL_EVENT_SYN 3 + +/* define sfp err */ +#define UNF_SFP_PRESENT_FAIL 0x1 +#define UNF_SFP_POWER_FAIL 0x2 +#define UNF_9545_FAIL 0x3 + +/* obtain the values of board type and ID */ +#define UNF_GET_BOARD_TYPE_AND_SLOT_ID_BY_PORTID(port_id) \ + (((port_id) & 0x00FF00) >> 8) + +#define UNF_FC_SERVER_BOARD_8_G 13 /* 8G mode */ +#define UNF_FC_SERVER_BOARD_16_G 7 /* 16G mode */ +#define UNF_FC_SERVER_BOARD_32_G 6 /* 32G mode */ + +#define UNF_PORT_TYPE_FC_QSFP 1 +#define UNF_PORT_TYPE_FC_SFP 0 +#define UNF_PORT_UNGRADE_FW_RESET_ACTIVE 0 +#define UNF_PORT_UNGRADE_FW_RESET_INACTIVE 1 + +#ifndef __BIG_ENDIAN__ +#define __BIG_ENDIAN__ 0x4321 +#endif + +#ifndef __LITTLE_ENDIAN__ +#define __LITTLE_ENDIAN__ 0x1234 +#endif + +#ifdef __BYTE_ORDER__ +#undef __BYTE_ORDER__ +#endif +#define __BYTE_ORDER__ __LITTLE_ENDIAN__ + +#ifndef INVALID_VALUE64 +#define INVALID_VALUE64 0xFFFFFFFFFFFFFFFFULL +#endif /* INVALID_VALUE64 */ + +#ifndef INVALID_VALUE32 +#define INVALID_VALUE32 0xFFFFFFFF +#endif /* INVALID_VALUE32 */ + +#ifndef INVALID_VALUE16 +#define INVALID_VALUE16 0xFFFF +#endif /* INVALID_VALUE16 */ + +#ifndef INVALID_VALUE8 +#define INVALID_VALUE8 0xFF +#endif /* INVALID_VALUE8 */ + +#ifndef RETURN_OK +#define RETURN_OK 0 +#endif + +#ifndef RETURN_ERROR +#define RETURN_ERROR (~0) +#endif +#define UNF_RETURN_ERROR (~0) + +#ifndef UNF_RETURN_NOT_SUPPORT +#define UNF_RETURN_NOT_SUPPORT (2) +#endif + +enum int_e { + UNF_FALSE = 0, + UNF_TRUE = 1 +}; + +#define DRV_DIF_CRC_ERR 0x1001 +#define DRV_DIF_LBA_ERR 0x1002 +#define DRV_DIF_APP_ERR 0x1003 + +#define UNF_SCSI_SENSE_DATA_LEN SCSI_SENSE_DATA_LEN + +/* RPort Management information related to Rport, + * only used at the boundary between common and lowlevel + */ +struct unf_rport_info_s { + unsigned int local_nport_id; + unsigned int nport_id; + unsigned int rport_index; + unsigned long long port_name; + unsigned char rsvd0[3]; +}; + +struct unf_cfg_item_s { + char *name; + unsigned int min_value; + unsigned int default_value; + unsigned int max_value; +}; + +struct unf_port_params_s { + unsigned int ra_tov; + unsigned int ed_tov; +}; + +/* get wwpn adn wwnn */ +struct unf_get_chip_info_argout { + unsigned char board_type; + unsigned long long wwpn; + unsigned long long wwnn; + unsigned long long sys_mac; +}; + +/* get sfp info: present and speed */ +struct unf_get_port_info_argout { + unsigned char sfp_speed; + unsigned char present; + unsigned char rsvd[2]; +}; + +/* SFF-8436(QSFP+) Rev 4.7 */ +struct sfp_plus_field_a0_s { + unsigned char identifier; + /* offset 1~2 */ + struct { + unsigned char reserved; + unsigned char status; + } status_indicator; + /* offset 3~21 */ + struct { + unsigned char rx_tx_los; + unsigned char tx_fault; + unsigned char all_resv; + + unsigned char ini_complete : 1; + unsigned char bit_resv : 3; + unsigned char temp_low_warn : 1; + unsigned char temp_high_warn : 1; + unsigned char temp_low_alarm : 1; + unsigned char temp_high_alarm : 1; + + unsigned char resv : 4; + unsigned char vcc_low_warn : 1; + unsigned char vcc_high_warn : 1; + unsigned char vcc_low_alarm : 1; + unsigned char vcc_high_alarm : 1; + + unsigned char resv8; + unsigned char rx_pow[2]; + unsigned char tx_bias[2]; + unsigned char reserved[6]; + unsigned char vendor_specifics[3]; + } interrupt_flag; + /* offset 22~33 */ + struct { + unsigned char temp[2]; + unsigned char reserved[2]; + unsigned char supply_vol[2]; + unsigned char reserveds[2]; + unsigned char vendor_specific[4]; + } module_monitors; + /* offset 34~81 */ + struct { + unsigned char rx_pow[8]; + unsigned char tx_bias[8]; + unsigned char reserved[16]; + unsigned char vendor_specific[16]; + } channel_monitor_val; + + /* offset 82~85 */ + unsigned char reserved[4]; + + /* offset 86~97 */ + struct { + /* 86~88 */ + unsigned char tx_disable; + unsigned char rx_rate_select; + unsigned char tx_rate_select; + + /* 89~92 */ + unsigned char rx_4_app_select; + unsigned char rx_3_app_select; + unsigned char rx_2_app_select; + unsigned char rx_1_app_select; + /* 93 */ + unsigned char power_override : 1; + unsigned char power_set : 1; + unsigned char reserved : 6; + + /* 94~97 */ + unsigned char tx_4_app_select; + unsigned char tx_3_app_select; + unsigned char tx_2_app_select; + unsigned char tx_1_app_select; + /* 98~99 */ + unsigned char auc_reserved[2]; + } control; + /* 100~106 */ + struct { + /* 100 */ + unsigned char mrx_1_os : 1; + unsigned char mrx_2_los : 1; + unsigned char mrx_3_los : 1; + unsigned char mrx_4_los : 1; + unsigned char mtx_1_los : 1; + unsigned char mtx_2_los : 1; + unsigned char mtx_3_los : 1; + unsigned char mtx_4_los : 1; + /* 101 */ + unsigned char mtx_1_fault : 1; + unsigned char mtx_2_fault : 1; + unsigned char mtx_3_fault : 1; + unsigned char mtx_4_fault : 1; + unsigned char reserved : 4; + /* 102 */ + unsigned char uc_reserved; + /* 103 */ + unsigned char mini_cmp_flag : 1; + unsigned char rsv : 3; + unsigned char mtemp_low_warn : 1; + unsigned char mtemp_high_warn : 1; + unsigned char mtemp_low_alarm : 1; + unsigned char mtemp_high_alarm : 1; + /* 104 */ + unsigned char rsv1 : 4; + unsigned char mvcc_low_warn : 1; + unsigned char mvcc_high_warn : 1; + unsigned char mvcc_low_alarm : 1; + unsigned char mvcc_high_alarm : 1; + /* 105~106 */ + unsigned char vendor_specific[2]; + } module_channel_mask_bit; + /* 107~118 */ + unsigned char auc_resv[12]; + /* 119~126 */ + unsigned char auc_reserved[8]; + /* 127 */ + unsigned char page_select; +}; + +/* page 00 */ +struct sfp_plus_field_00_s { + /* 128~191 */ + struct { + unsigned char id; + unsigned char id_ext; + unsigned char connector; + unsigned char speci_com[6]; + unsigned char mode; + unsigned char speed; + unsigned char encoding; + unsigned char br_nominal; + unsigned char ext_rate_select_com; + unsigned char length_smf; + unsigned char length_om3; + unsigned char length_om2; + unsigned char length_om1; + unsigned char length_copper; + unsigned char device_tech; + unsigned char vendor_name[16]; + unsigned char ex_module; + unsigned char vendor_oui[3]; + unsigned char vendor_pn[16]; + unsigned char vendor_rev[2]; + /* Wave length or Copper cable Attenuation */ + unsigned char wave_or_copper_attenuation[2]; + unsigned char wave_length_toler[2]; /* Wavelength tolerance */ + unsigned char max_temp; + unsigned char cc_base; + } base_id_fields; + /* 192~223 */ + struct { + unsigned char options[4]; + unsigned char vendor_sn[16]; + unsigned char date_code[8]; + unsigned char diagn_monit_type; + unsigned char enhance_opt; + unsigned char uc_reserved; + unsigned char ccext; + } ext_id_fields; + /* 224~255 */ + unsigned char vendor_spec_eeprom[32]; +}; + +/* page 01 */ +struct sfp_field_01_s { + unsigned char optiona_l01[128]; +}; + +/* page 02 */ +struct sfp_field_02_s { + unsigned char optiona_l02[128]; +}; + +/* page 03 */ +struct sfp_field_03_s { + unsigned char temp_high_alarm[2]; + unsigned char temp_low_alarm[2]; + unsigned char temp_high_warn[2]; + unsigned char temp_low_warn[2]; + + unsigned char reserved1[8]; + + unsigned char vcc_high_alarm[2]; + unsigned char vcc_low_alarm[2]; + unsigned char vcc_high_warn[2]; + unsigned char vcc_low_warn[2]; + + unsigned char reserved2[8]; + unsigned char vendor_specific1[16]; + + unsigned char pow_high_alarm[2]; + unsigned char pow_low_alarm[2]; + unsigned char pow_high_warn[2]; + unsigned char pow_low_warn[2]; + + unsigned char bias_high_alarm[2]; + unsigned char bias_low_alarm[2]; + unsigned char bias_high_warn[2]; + unsigned char bias_low_warn[2]; + + unsigned char tx_power_high_alarm[2]; + unsigned char tx_power_low_alarm[2]; + unsigned char reserved3[4]; + + unsigned char reserved4[8]; + + unsigned char vendor_specific2[16]; + unsigned char reserved5[2]; + unsigned char vendor_specific3[12]; + unsigned char rx_ampl[2]; + unsigned char rx_tx_sq_disable; + unsigned char rx_output_disable; + unsigned char chan_monit_mask[12]; + unsigned char reserved6[2]; + +}; + +struct sfp_plus_info_s { + struct sfp_plus_field_a0_s sfp_plus_info_a0; + struct sfp_plus_field_00_s sfp_plus_info_00; + struct sfp_field_01_s sfp_plus_info_01; + struct sfp_field_02_s sfp_plus_info_02; + struct sfp_field_03_s sfp_plus_info_03; +}; + +/* SFF-8472 Rev 10.4 */ +struct unf_sfp_data_field_a0_s { + /* Offset 0~63 */ + struct { + unsigned char id; + unsigned char id_ext; + unsigned char connector; + unsigned char atransceiver[8]; + unsigned char encoding; + /* Nominal signalling rate, units of 100MBd. */ + unsigned char br_nominal; + /* Type of rate select functionality */ + unsigned char rate_identifier; + /* Link length supported for single mode fiber, units of km */ + unsigned char length_smf_km; + /* Link length supported for single mode fiber, + * units of 100 m + */ + unsigned char length_smf; + /* Link length supported for 50 um OM2 fiber, units of 10 m */ + unsigned char length_smf_om2; + /* Link length supported for 62.5 um OM1 fiber, units of 10 m */ + unsigned char length_smf_om1; + /* Link length supported for copper or direct attach cable, + * units of m + */ + unsigned char length_cable; + /* Link length supported for 50 um OM3 fiber, units of 10 m */ + unsigned char length_om3; + unsigned char vendor_name[16]; /* ASCII */ + /* Code for electronic or optical compatibility */ + unsigned char transceiver; + unsigned char vendor_oui[3]; /* SFP vendor IEEE company ID */ + /* Part number provided by SFP vendor (ASCII) */ + unsigned char vendor_pn[16]; + /* Revision level for part number provided by vendor (ASCII) */ + unsigned char vendor_rev[4]; + /* Laser wavelength (Passive/Active Cable + * Specification Compliance) + */ + unsigned char wave_length[2]; + unsigned char unallocated; + /* Check code for Base ID Fields (addresses 0 to 62) */ + unsigned char cc_base; + } base_id_fields; + + /* Offset 64~95 */ + struct { + unsigned char options[2]; + unsigned char br_max; + unsigned char br_min; + unsigned char vendor_sn[16]; + unsigned char date_code[8]; + unsigned char diag_monitoring_type; + unsigned char enhanced_options; + unsigned char sff8472_compliance; + unsigned char cc_ext; + } ext_id_fields; + + /* Offset 96~255 */ + struct { + unsigned char vendor_spec_eeprom[32]; + unsigned char rsvd[128]; + } vendor_spec_id_fields; +}; + +struct unf_sfp_data_field_a2_s { + /* Offset 0~119 */ + struct { + /* 0~39 */ + struct { + unsigned char temp_alarm_high[2]; + unsigned char temp_alarm_low[2]; + unsigned char temp_warning_high[2]; + unsigned char temp_warning_low[2]; + + unsigned char vcc_alarm_high[2]; + unsigned char vcc_alarm_low[2]; + unsigned char vcc_warning_high[2]; + unsigned char vcc_warning_low[2]; + + unsigned char bias_alarm_high[2]; + unsigned char bias_alarm_low[2]; + unsigned char bias_warning_high[2]; + unsigned char bias_warning_low[2]; + + unsigned char tx_alarm_high[2]; + unsigned char tx_alarm_low[2]; + unsigned char tx_warning_high[2]; + unsigned char tx_warning_low[2]; + + unsigned char rx_alarm_high[2]; + unsigned char rx_alarm_low[2]; + unsigned char rx_warning_high[2]; + unsigned char rx_warning_low[2]; + } alarm_warn_th; + + unsigned char unallocated0[16]; + unsigned char ext_cal_constants[36]; + unsigned char unallocated1[3]; + unsigned char cc_dmi; + + /* 96~105 */ + struct { + unsigned char temp[2]; + unsigned char vcc[2]; + unsigned char tx_bias[2]; + unsigned char tx_power[2]; + unsigned char rx_power[2]; + } diag; + + unsigned char unallocated2[4]; + + struct { + unsigned char data_rdy_bar_state : 1; + unsigned char rx_los : 1; + unsigned char tx_fault_state : 1; + unsigned char soft_rate_select_state : 1; + unsigned char rate_select_state : 1; + unsigned char rs_state : 1; + unsigned char soft_tx_disable_select : 1; + unsigned char tx_disable_state : 1; + } status_ctrl; + unsigned char rsvd; + + /* 112~113 */ + struct { + /* 112 */ + unsigned char tx_alarm_low : 1; + unsigned char tx_alarm_high : 1; + unsigned char tx_bias_alarm_low : 1; + unsigned char tx_bias_alarm_high : 1; + unsigned char vcc_alarm_low : 1; + unsigned char vcc_alarm_high : 1; + unsigned char temp_alarm_low : 1; + unsigned char temp_alarm_high : 1; + + /* 113 */ + unsigned char rsvd : 6; + unsigned char rx_alarm_low : 1; + unsigned char rx_alarm_high : 1; + } alarm; + + unsigned char unallocated3[2]; + + /* 116~117 */ + struct { + /* 116 */ + unsigned char tx_warn_lo : 1; + unsigned char tx_warn_hi : 1; + unsigned char bias_warn_lo : 1; + unsigned char bias_warn_hi : 1; + unsigned char vcc_warn_lo : 1; + unsigned char vcc_warn_hi : 1; + unsigned char temp_warn_lo : 1; + unsigned char temp_warn_hi : 1; + + /* 117 */ + unsigned char rsvd : 6; + unsigned char rx_warn_lo : 1; + unsigned char rx_warn_hi : 1; + } warning; + + unsigned char ext_status_and_ctrl[2]; + } diag; + + /* Offset 120~255 */ + struct { + unsigned char vendor_spec[8]; + unsigned char user_eeprom[120]; + unsigned char vendor_ctrl[8]; + } general_use_fields; +}; + +struct unf_sfp_info_s { + struct unf_sfp_data_field_a0_s sfp_info_a0; + struct unf_sfp_data_field_a2_s sfp_info_a2; +}; + +union unf_sfp_eeprome_info { + struct unf_sfp_info_s sfp_info; + struct sfp_plus_info_s sfp_plus_info; +}; + +/* sfp info end */ +struct unf_lport_sfp_info { + unsigned int status; + union unf_sfp_eeprome_info sfp_eeprom_info; +}; + +struct unf_err_code_s { + unsigned int loss_of_signal_count; + unsigned int bad_rx_char_count; + unsigned int loss_of_sync_count; + unsigned int link_fail_count; + unsigned int rx_eo_fa_count; + unsigned int dis_frame_count; + unsigned int bad_crc_count; + unsigned int proto_error_count; +}; + +/* config file */ +enum unf_scsi_mode_e { + UNF_PORT_MODE_UNKNOWN = 0x00, + UNF_PORT_MODE_TGT = 0x10, + UNF_PORT_MODE_INI = 0x20, + UNF_PORT_MODE_BOTH = 0x30 +}; + +enum unf_port_upgrade_e { + UNF_PORT_UNSUPPORT_UPGRADE_REPORT = 0x00, + UNF_PORT_SUPPORT_UPGRADE_REPORT = 0x01, + UNF_PORT_UPGRADE_BUTT +}; + +#define UNF_BYTES_OF_DWORD 0x4 +static inline void __attribute__((unused)) unf_big_end_to_cpu( + unsigned char *v_buffer, unsigned int v_size) +{ + unsigned int *buffer = NULL; + unsigned int word_sum = 0; + unsigned int i = 0; + + if (!v_buffer) + return; + + buffer = (unsigned int *)v_buffer; + + /* byte to word */ + if (v_size % UNF_BYTES_OF_DWORD == 0) + word_sum = v_size / UNF_BYTES_OF_DWORD; + else + return; + + /* word to byte */ + while (i < word_sum) { + *buffer = be32_to_cpu(*buffer); + buffer++; + i++; + } +} + +static inline void __attribute__((unused)) unf_cpu_to_big_end( + void *v_buffer, unsigned int v_size) +{ +#define DWORD_BIT 32 +#define BYTE_BIT 8 + unsigned int *buffer = NULL; + unsigned int word_sum = 0; + unsigned int i = 0; + unsigned int tmp = 0; + + if (!v_buffer) + return; + + buffer = (unsigned int *)v_buffer; + + /* byte to dword */ + word_sum = v_size / 4; + + /* dword to byte */ + while (i < word_sum) { + *buffer = cpu_to_be32(*buffer); + buffer++; + i++; + } + + if (v_size % 4) { + tmp = cpu_to_be32(*buffer); + tmp = tmp >> (DWORD_BIT - (v_size % 4) * BYTE_BIT); + memcpy(buffer, &tmp, (v_size % 4)); + } +} + +#define UNF_FUNCTION_RETURN_CHECK(ret, dstlen) \ + do { \ + if (((ret) <= 0) || ((ret) >= (dstlen))) { \ + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, \ + UNF_LOG_REG_ATT, UNF_ERR, \ + "function return (%d) check invalid, dst len(%d).", \ + (ret), (dstlen)); \ + } \ + } while (0) + +#define UNF_TOP_AUTO_MASK 0x0f + +#define UNF_NORMAL_MODE 0 +#define UNF_SET_NOMAL_MODE(mode) (mode = UNF_NORMAL_MODE) + +/* + * SCSI status + */ +#define SCSI_CHECK_CONDITION 0x02 + +enum unf_act_topo_e { + UNF_ACT_TOP_PUBLIC_LOOP = 0x1, + UNF_ACT_TOP_PRIVATE_LOOP = 0x2, + UNF_ACT_TOP_P2P_DIRECT = 0x4, + UNF_ACT_TOP_P2P_FABRIC = 0x8, + UNF_TOP_LOOP_MASK = 0x03, + UNF_TOP_P2P_MASK = 0x0c, + UNF_TOP_FCOE_MASK = 0x30, + UNF_ACT_TOP_UNKNOWN +}; + +#define UNF_FL_PORT_LOOP_ADDR 0x00 + +#define UNF_FC_PROTOCOL_TYPE 0x100 + +#define UNF_LOOP_ROLE_MASTER_OR_SLAVE 0x0 + +#define UNF_TOU16_CHECK(dest, src, over_action) \ + do { \ + if (unlikely((src) > 0xFFFF)) { \ + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, \ + UNF_ERR, "ToU16 error, src 0x%x ", (src)); \ + over_action; \ + } \ + ((dest) = (unsigned short)(src)); \ + } while (0) + +#define UNF_PORT_SPEED_AUTO 0 +#define UNF_PORT_SPEED_2_G 2 +#define UNF_PORT_SPEED_4_G 4 +#define UNF_PORT_SPEED_8_G 8 +#define UNF_PORT_SPEED_10_G 10 +#define UNF_PORT_SPEED_16_G 16 +#define UNF_PORT_SPEED_32_G 32 + +#define UNF_PORT_SPEED_UNKNOWN (~0) +#define UNF_PORT_SFP_SPEED_ERR 0xFF + +#define UNF_FW_VERSION_LEN 32 +#define UNF_HW_VERSION_LEN 32 + +/* max frame size */ +#define UNF_MAX_FRAME_SIZE 2112 + +/* default */ +#define UNF_DEFAULT_FRAME_SIZE 2048 +#define UNF_DEFAULT_EDTOV 2000 +#define UNF_DEFAULT_RATOV 10000 +#define UNF_DEFAULT_FABRIC_RATOV 10000 +#define UNF_MAX_RETRY_COUNT 3 +#define UNF_DEFAULT_RRTOV (10000 + 500) /* FCP-4 10.4.10 */ +#define UNF_RRQ_MIN_TIMEOUT_INTERVAL 30000 +#define UNF_LOGO_TIMEOUT_INTERVAL 3000 +#define UNF_WRITE_RRQ_SENDERR_INTERVAL 3000 +#define UNF_REC_TOV 3000 + +#define UNF_WAIT_SEM_TIMEOUT (5000UL) +#define UNF_WAIT_ABTS_RSP_TIMEOUT (20000UL) + +#define UNF_INI_RRQ_REDUNDANT_TIME 500 +#define UNF_INI_ELS_REDUNDANT_TIME 2000 + +/* ELS command values */ +#define UNF_ELS_CMND_HIGH_MASK 0xff000000 +#define UNF_ELS_CMND_RJT 0x01000000 +#define UNF_ELS_CMND_ACC 0x02000000 +#define UNF_ELS_CMND_PLOGI 0x03000000 +#define UNF_ELS_CMND_FLOGI 0x04000000 +#define UNF_ELS_CMND_LOGO 0x05000000 +#define UNF_ELS_CMND_RLS 0x0F000000 +#define UNF_ELS_CMND_ECHO 0x10000000 +#define UNF_ELS_CMND_REC 0x13000000 +#define UNF_ELS_CMND_RRQ 0x12000000 +#define UNF_ELS_CMND_PRLI 0x20000000 +#define UNF_ELS_CMND_PRLO 0x21000000 +#define UNF_ELS_CMND_PDISC 0x50000000 +#define UNF_ELS_CMND_FDISC 0x51000000 +#define UNF_ELS_CMND_ADISC 0x52000000 +#define UNF_ELS_CMND_FAN 0x60000000 +#define UNF_ELS_CMND_RSCN 0x61000000 +#define UNF_FCP_CMND_SRR 0x14000000 +#define UNF_GS_CMND_SCR 0x62000000 + +#define UNF_PLOGI_VERSION_UPPER 0x20 +#define UNF_PLOGI_VERSION_LOWER 0x20 +#define UNF_PLOGI_CONCURRENT_SEQ 0x00FF +#define UNF_PLOGI_RO_CATEGORY 0x00FE +#define UNF_PLOGI_SEQ_PER_XCHG 0x0001 + +/* CT_IU pream defines */ +#define UNF_REV_NPORTID_INIT 0x01000000 +#define UNF_FSTYPE_OPT_INIT 0xfc020000 +#define UNF_FSTYPE_RFT_ID 0x02170000 +#define UNF_FSTYPE_GID_PT 0x01A10000 +#define UNF_FSTYPE_GID_FT 0x01710000 +#define UNF_FSTYPE_RFF_ID 0x021F0000 +#define UNF_FSTYPE_GFF_ID 0x011F0000 +#define UNF_FSTYPE_GNN_ID 0x01130000 +#define UNF_FSTYPE_GPN_ID 0x01120000 + +#define UNF_CT_IU_RSP_MASK 0xffff0000 +#define UNF_CT_IU_REASON_MASK 0x00ff0000 +#define UNF_CT_IU_EXPLAN_MASK 0x0000ff00 +#define UNF_CT_IU_REJECT 0x80010000 +#define UNF_CT_IU_ACCEPT 0x80020000 + +#define UNF_FABRIC_FULL_REG 0x00000003 + +#define UNF_FC4_SCSI_BIT8 0x00000100 +#define UNF_FC4_FCP_TYPE 0x00000008 +#define UNF_FRAG_REASON_VENDOR 0 + +/* GID_PT, GID_FT */ +#define UNF_GID_PT_TYPE 0x7F000000 +#define UNF_GID_FT_TYPE 0x00000008 + +/* + * FC4 defines + */ +#define UNF_FC4_FRAME_PAGE_SIZE 0x10 +#define UNF_FC4_FRAME_PAGE_SIZE_SHIFT 16 + +#define UNF_FC4_FRAME_PARM_0_FCP 0x08000000 +#define UNF_FC4_FRAME_PARM_0_I_PAIR 0x00002000 +#define UNF_FC4_FRAME_PARM_0_GOOD_RSP_CODE 0x00000100 + +#define UNF_FC4_FRAME_PARM_3_INI 0x00000020 +#define UNF_FC4_FRAME_PARM_3_TGT 0x00000010 +#define UNF_FC4_FRAME_PARM_3_R_XFER_DIS 0x00000002 +#define UNF_FC4_FRAME_PARM_3_CONF_ALLOW 0x00000080 /* bit 7 */ +#define UNF_FC4_FRAME_PARM_3_REC_SUPPORT 0x00000400 /* bit 10 */ +#define UNF_FC4_FRAME_PARM_3_TASK_RETRY_ID_SUPPORT 0x00000200 /* bit 9 */ +#define UNF_FC4_FRAME_PARM_3_RETRY_SUPPORT 0x00000100 /* bit 8 */ +#define UNF_FC4_FRAME_PARM_3_CONF_ALLOW 0x00000080 /* bit 7 */ + + +#define UNF_GFF_ACC_MASK 0xFF000000 + +/* Reject CT_IU Reason Codes */ +#define UNF_CTIU_RJT_MASK 0xffff0000 +#define UNF_CTIU_RJT_INVALID_COMMAND 0x00010000 +#define UNF_CTIU_RJT_INVALID_VERSION 0x00020000 +#define UNF_CTIU_RJT_LOGIC_ERR 0x00030000 +#define UNF_CTIU_RJT_INVALID_SIZE 0x00040000 +#define UNF_CTIU_RJT_LOGIC_BUSY 0x00050000 +#define UNF_CTIU_RJT_PROTOCOL_ERR 0x00070000 +#define UNF_CTIU_RJT_UNABLE_PERFORM 0x00090000 +#define UNF_CTIU_RJT_NOT_SUPPORTED 0x000B0000 + +/* FS_RJT Reason code explanations, FC-GS-2 6.5 */ +#define UNF_CTIU_RJT_EXP_MASK 0x0000FF00 +#define UNF_CTIU_RJT_EXP_NO_ADDTION 0x00000000 +#define UNF_CTIU_RJT_EXP_PORTID_NO_REG 0x00000100 +#define UNF_CTIU_RJT_EXP_PORTNAME_NO_REG 0x00000200 +#define UNF_CTIU_RJT_EXP_NODENAME_NO_REG 0x00000300 +#define UNF_CTIU_RJT_EXP_FC4TYPE_NO_REG 0x00000700 +#define UNF_CTIU_RJT_EXP_PORTTYPE_NO_REG 0x00000A00 + +/* + * LS_RJT defines + */ +#define UNF_FC_LS_RJT_REASON_MASK 0x00ff0000 + +/* + * LS_RJT reason code defines + */ +#define UNF_LS_OK 0x00000000 +#define UNF_LS_RJT_INVALID_COMMAND 0x00010000 +#define UNF_LS_RJT_LOGICAL_ERROR 0x00030000 +#define UNF_LS_RJT_BUSY 0x00050000 +#define UNF_LS_RJT_PROTOCOL_ERROR 0x00070000 +#define UNF_LS_RJT_REQUEST_DENIED 0x00090000 +#define UNF_LS_RJT_NOT_SUPPORTED 0x000b0000 +#define UNF_LS_RJT_CLASS_ERROR 0x000c0000 + +/* + * LS_RJT code explanation + */ +#define UNF_LS_RJT_NO_ADDITIONAL_INFO 0x00000000 +#define UNF_LS_RJT_INV_DATA_FIELD_SIZE 0x00000700 +#define UNF_LS_RJT_INV_COMMON_SERV_PARAM 0x00000F00 +#define UNF_LS_RJT_INVALID_OXID_RXID 0x00001700 +#define UNF_LS_RJT_COMMAND_IN_PROGRESS 0x00001900 +#define UNF_LS_RJT_INSUFFICIENT_RESOURCES 0x00002900 +#define UNF_LS_RJT_COMMAND_NOT_SUPPORTED 0x00002C00 +#define UNF_LS_RJT_UNABLE_TO_SUPLY_REQ_DATA 0x00002A00 +#define UNF_LS_RJT_INVALID_PAYLOAD_LENGTH 0x00002D00 + +#define UNF_P2P_LOCAL_NPORT_ID 0x000000EF +#define UNF_P2P_REMOTE_NPORT_ID 0x000000D6 + +#define UNF_BBCREDIT_MANAGE_NFPORT 0 +#define UNF_BBCREDIT_MANAGE_LPORT 1 +#define UNF_BBCREDIT_LPORT 0 +#define UNF_CONTIN_INCREASE_SUPPORT 1 +#define UNF_CLASS_VALID 1 +#define UNF_CLASS_INVALID 0 +#define UNF_NOT_MEANINGFUL 0 +#define UNF_NO_SERVICE_PARAMS 0 +#define UNF_CLEAN_ADDRESS_DEFAULT 0 +#define UNF_PRIORITY_ENABLE 1 +#define UNF_PRIORITY_DISABLE 0 +#define UNF_SEQUEN_DELIVERY_REQ 1 /* Sequential delivery requested */ + +/* RSCN */ +#define UNF_RSCN_PORT_ADDR 0x0 +#define UNF_RSCN_AREA_ADDR_GROUP 0x1 +#define UNF_RSCN_DOMAIN_ADDR_GROUP 0x2 +#define UNF_RSCN_FABRIC_ADDR_GROUP 0x3 + +#define UNF_GET_RSCN_PLD_LEN(v_cmnd) ((v_cmnd)&0x0000ffff) +#define UNF_RSCN_PAGE_LEN 0x4 + +#define UNF_PORT_LINK_UP 0x0000 +#define UNF_PORT_LINK_DOWN 0x0001 +#define UNF_PORT_RESET_START 0x0002 +#define UNF_PORT_RESET_END 0x0003 +#define UNF_PORT_LINK_UNKNOWN 0x0004 +#define UNF_PORT_NOP 0x0005 +#define UNF_PORT_CORE_FATAL_ERROR 0x0006 +#define UNF_PORT_CORE_UNRECOVERABLE_ERROR 0x0007 +#define UNF_PORT_CORE_RECOVERABLE_ERROR 0x0008 +#define UNF_PORT_UPDATE_PROCESS 0x000b +#define UNF_PORT_DEBUG_DUMP 0x000c +#define UNF_PORT_GET_FWLOG 0x000d +#define UNF_PORT_CLEAN_DONE 0x000e +#define UNF_PORT_BEGIN_REMOVE 0x000f +#define UNF_PORT_RELEASE_RPORT_INDEX 0x0010 +#define UNF_PORT_ABNORMAL_RESET 0x0012 + +#define UNF_READ 0 +#define UNF_WRITE 1 +#define UNF_READ_64 2 +#define UNF_WRITE_64 3 +/* + *SCSI begin + */ +#define SCSIOPC_TEST_UNIT_READY 0x00 +#define SCSIOPC_INQUIRY 0x12 +#define SCSIOPC_MODE_SENSE_6 0x1A +#define SCSIOPC_MODE_SENSE_10 0x5A +#define SCSIOPC_MODE_SELECT_6 0x15 +#define SCSIOPC_RESERVE 0x16 +#define SCSIOPC_RELEASE 0x17 +#define SCSIOPC_START_STOP_UNIT 0x1B +#define SCSIOPC_READ_CAPACITY_10 0x25 +#define SCSIOPC_READ_CAPACITY_16 0x9E +#define SCSIOPC_READ_6 0x08 +#define SCSIOPC_READ_10 0x28 +#define SCSIOPC_READ_12 0xA8 +#define SCSIOPC_READ_16 0x88 +#define SCSIOPC_WRITE_6 0x0A +#define SCSIOPC_WRITE_10 0x2A +#define SCSIOPC_WRITE_12 0xAA +#define SCSIOPC_WRITE_16 0x8A +#define SCSIOPC_WRITE_VERIFY 0x2E +#define SCSIOPC_VERIFY_10 0x2F +#define SCSIOPC_VERIFY_12 0xAF +#define SCSIOPC_VERIFY_16 0x8F +#define SCSIOPC_REQUEST_SENSE 0x03 +#define SCSIOPC_REPORT_LUN 0xA0 +#define SCSIOPC_FORMAT_UNIT 0x04 +#define SCSIOPC_SEND_DIAGNOSTIC 0x1D +#define SCSIOPC_WRITE_SAME_10 0x41 +#define SCSIOPC_WRITE_SAME_16 0x93 +#define SCSIOPC_READ_BUFFER 0x3C +#define SCSIOPC_WRITE_BUFFER 0x3B + +#define SCSIOPC_LOG_SENSE 0x4D +#define SCSIOPC_MODE_SELECT_10 0x55 +#define SCSIOPC_SYNCHRONIZE_CACHE_10 0x35 +#define SCSIOPC_SYNCHRONIZE_CACHE_16 0x91 +#define SCSIOPC_WRITE_AND_VERIFY_10 0x2E +#define SCSIOPC_WRITE_AND_VERIFY_12 0xAE +#define SCSIOPC_WRITE_AND_VERIFY_16 0x8E +#define SCSIOPC_READ_MEDIA_SERIAL_NUMBER 0xAB +#define SCSIOPC_REASSIGN_BLOCKS 0x07 +#define SCSIOPC_ATA_PASSTHROUGH_16 0x85 +#define SCSIOPC_ATA_PASSTHROUGH_12 0xa1 + +/* + * SCSI end + */ +#define IS_READ_COMMAND(opcode) ((opcode) == SCSIOPC_READ_6 || \ + (opcode) == SCSIOPC_READ_10 || \ + (opcode) == SCSIOPC_READ_12 || \ + (opcode) == SCSIOPC_READ_16) +#define IS_WRITE_COMMAND(opcode) ((opcode) == SCSIOPC_WRITE_6 || \ + (opcode) == SCSIOPC_WRITE_10 || \ + (opcode) == SCSIOPC_WRITE_12 || \ + (opcode) == SCSIOPC_WRITE_16) + +#define FCP_RSP_LEN_VALID_MASK 0x1 +#define FCP_SNS_LEN_VALID_MASK 0x2 +#define FCP_RESID_OVER_MASK 0x4 +#define FCP_RESID_UNDER_MASK 0x8 +#define FCP_CONF_REQ_MASK 0x10 +#define FCP_SCSI_STATUS_GOOD 0x0 + +#define UNF_DELAYED_WORK_SYNC(v_ret, v_pord_id, v_work, v_work_symb) \ + do { \ + if (!cancel_delayed_work_sync(v_work)) { \ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, \ + UNF_INFO, \ + "[info]LPort or RPort(0x%x) %s worker can't destroy, or no worker", \ + v_pord_id, v_work_symb); \ + v_ret = UNF_RETURN_ERROR; \ + } else { \ + v_ret = RETURN_OK; \ + } \ + } while (0) + +#define UNF_DELAYED_WORK(v_ret, v_pord_id, v_work, v_work_symb) \ + do { \ + if (!cancel_delayed_work(v_work)) { \ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, \ + UNF_MAJOR, \ + "LPort or RPort(0x%x) %s worker can't destroy, or no worker.", \ + v_pord_id, v_work_symb); \ + v_ret = UNF_RETURN_ERROR; \ + } else { \ + v_ret = RETURN_OK; \ + } \ + } while (0) + +#define UNF_DELAYED_WORK_CONFUSED(v_ret, v_pord_id, v_work, v_work_symb) \ + do { \ + if (in_interrupt()) { \ + UNF_DELAYED_WORK(v_ret, v_pord_id, v_work, \ + v_work_symb) \ + } else { \ + UNF_DELAYED_WORK_SYNC(v_ret, v_pord_id, v_work, \ + v_work_symb) \ + } \ + } while (0) + +#define UNF_GET_IO_XCHG_TAG(v_pkg) \ + ((unsigned short)((v_pkg)->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX])) + +#define UNF_GET_SFS_ENTRY(v_pkg) ((union unf_sfs_u *)(void *) \ + (((struct unf_frame_pkg_s *)v_pkg)->unf_cmnd_pload_bl.buffer_ptr)) + +/* FLOGI */ +#define UNF_GET_FLOGI_PAYLOAD(v_pkg) (&(((union unf_sfs_u *) \ + (UNF_GET_SFS_ENTRY(v_pkg)))->flogi.flogi_payload)) +#define UNF_FLOGI_PAYLOAD_LEN sizeof(struct unf_flogi_payload_s) + +/* FLOGI ACC */ +#define UNF_GET_FLOGI_ACC_PAYLOAD(v_pkg) (&(((union unf_sfs_u *) \ + (UNF_GET_SFS_ENTRY(v_pkg)))->flogi_acc.flogi_payload)) +#define UNF_FLOGI_ACC_PAYLOAD_LEN sizeof(struct unf_flogi_payload_s) + +/* FDISC */ +#define UNF_FDISC_PAYLOAD_LEN UNF_FLOGI_PAYLOAD_LEN +#define UNF_FDISC_ACC_PAYLOAD_LEN UNF_FLOGI_ACC_PAYLOAD_LEN + +/* PLOGI */ +#define UNF_GET_PLOGI_PAYLOAD(v_pkg) \ + (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(v_pkg)))->plogi.payload)) +#define UNF_PLOGI_PAYLOAD_LEN sizeof(struct unf_plogi_payload_s) + +/* PLOGI ACC */ +#define UNF_GET_PLOGI_ACC_PAYLOAD(v_pkg) \ + (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(v_pkg)))->plogi_acc.payload)) +#define UNF_PLOGI_ACC_PAYLOAD_LEN sizeof(struct unf_plogi_payload_s) + +/* LOGO */ +#define UNF_LOGO_PAYLOAD_LEN sizeof(struct unf_logo_payload_s) + +/* ECHO */ +#define UNF_GET_ECHO_PAYLOAD(v_pkg) \ + (((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(v_pkg)))->echo.echo_pld) + +/* ECHO PHYADDR */ +#define UNF_GET_ECHO_PAYLOAD_PHYADDR(v_pkg) \ + (((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(v_pkg)))->echo.phy_echo_addr) + +#define UNF_ECHO_PAYLOAD_LEN sizeof(struct unf_echo_payload_s) + +/* RLS */ +#define UNF_RLS_PAYLOAD_LEN sizeof(struct unf_rls_payload_s) + +/* ECHO ACC */ +#define UNF_ECHO_ACC_PAYLOAD_LEN sizeof(struct unf_echo_payload_s) +/* REC */ +#define UNF_REC_PAYLOAD_LEN sizeof(struct unf_rec_pld_s) + +/* REC ACC */ +#define UNF_GET_REC_ACC_PAYLOAD(v_pkg) \ + (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(v_pkg)))->els_acc.cmnd)) + +#define UNF_REC_ACC_PAYLOAD_LEN (sizeof(struct unf_els_acc_s) - \ + sizeof(struct unf_fchead_s)) + +/* RRQ */ +#define UNF_RRQ_PAYLOAD_LEN (sizeof(struct unf_rrq_s) - \ + sizeof(struct unf_fchead_s)) + +/* PRLI */ +#define UNF_PRLI_PAYLOAD_LEN sizeof(struct unf_pril_payload_s) + +/* PRLI ACC */ +#define UNF_PRLI_ACC_PAYLOAD_LEN sizeof(struct unf_pril_payload_s) + +/* PRLO */ +#define UNF_PRLO_PAYLOAD_LEN sizeof(struct unf_pril_payload_s) + +#define UNF_PRLO_ACC_PAYLOAD_LEN sizeof(struct unf_pril_payload_s) + +/* PDISC */ +#define UNF_PDISC_PAYLOAD_LEN sizeof(struct unf_plogi_payload_s) + +/* PDISC ACC */ +#define UNF_PDISC_ACC_PAYLOAD_LEN sizeof(struct unf_plogi_payload_s) + +/* ADISC */ +#define UNF_ADISC_PAYLOAD_LEN sizeof(struct unf_adisc_payload_s) + +/* ADISC ACC */ +#define UNF_ADISC_ACC_PAYLOAD_LEN sizeof(struct unf_adisc_payload_s) + +/* RSCN ACC */ +#define UNF_GET_RSCN_ACC_PAYLOAD(v_pkg) \ + (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(v_pkg)))->els_acc.cmnd)) +#define UNF_RSCN_ACC_PAYLOAD_LEN (sizeof(struct unf_els_acc_s) - \ + sizeof(struct unf_fchead_s)) + +/* LOGO ACC */ +#define UNF_LOGO_ACC_PAYLOAD_LEN (sizeof(struct unf_els_acc_s) - \ + sizeof(struct unf_fchead_s)) + +/* RRQ ACC */ +#define UNF_RRQ_ACC_PAYLOAD_LEN (sizeof(struct unf_els_acc_s) - \ + sizeof(struct unf_fchead_s)) + +/* RLS ACC */ +#define UNF_RLS_ACC_PAYLOAD_LEN (sizeof(struct unf_rls_acc_s) - \ + sizeof(struct unf_fchead_s)) + +/* GPN_ID */ +#define UNF_GPNID_PAYLOAD_LEN (sizeof(struct unf_gpnid_s) - \ + sizeof(struct unf_fchead_s)) + +#define UNF_GPNID_RSP_PAYLOAD_LEN (sizeof(struct unf_gpnid_rsp_s) - \ + sizeof(struct unf_fchead_s)) + +/* GNN_ID */ +#define UNF_GNNID_PAYLOAD_LEN (sizeof(struct unf_gnnid_s) - \ + sizeof(struct unf_fchead_s)) + +#define UNF_GNNID_RSP_PAYLOAD_LEN (sizeof(struct unf_gnnid_rsp_s) - \ + sizeof(struct unf_fchead_s)) + +/* GFF_ID */ +#define UNF_GFFID_PAYLOAD_LEN (sizeof(struct unf_gffid_s) - \ + sizeof(struct unf_fchead_s)) + +#define UNF_GFFID_RSP_PAYLOAD_LEN (sizeof(struct unf_gffid_rsp_s) - \ + sizeof(struct unf_fchead_s)) + +/* GID_FT/GID_PT */ +#define UNF_GET_GID_PAYLOAD(v_pkg) (&(((union unf_sfs_u *) \ + UNF_GET_SFS_ENTRY(v_pkg))->get_id.gid_req.ctiu_pream)) + +#define UNF_GID_PAYLOAD_LEN (sizeof(struct unf_ctiu_prem_s) + \ + sizeof(unsigned int)) + +#define UNF_GID_ACC_PAYLOAD_LEN sizeof(struct unf_gif_acc_pld_s) + +/* RFT_ID */ +#define UNF_RFTID_PAYLOAD_LEN (sizeof(struct unf_rftid_s) - \ + sizeof(struct unf_fchead_s)) + +#define UNF_RFTID_RSP_PAYLOAD_LEN sizeof(struct unf_ctiu_prem_s) + +/* RFF_ID */ +#define UNF_RFFID_PAYLOAD_LEN (sizeof(struct unf_rffid_s) - \ + sizeof(struct unf_fchead_s)) + +#define UNF_RFFID_RSP_PAYLOAD_LEN sizeof(struct unf_ctiu_prem_s) + +/* SRR */ +#define UNF_SRR_PAYLOAD_LEN \ + sizeof(struct unf_srr_payload_s) + +/* ACC&RJT */ +#define UNF_ELS_ACC_RJT_LEN (sizeof(struct unf_els_rjt_s) - \ + sizeof(struct unf_fchead_s)) + +/* SCR */ +#define UNF_SCR_PAYLOAD_LEN (sizeof(struct unf_scr_s) - \ + sizeof(struct unf_fchead_s)) + +#define UNF_SCR_RSP_PAYLOAD_LEN (sizeof(struct unf_els_acc_s) - \ + sizeof(struct unf_fchead_s)) + +/**********************************************************/ +#define UNF_GET_XCHG_TAG(v_pkg) (((struct unf_frame_pkg_s *) \ + v_pkg)->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]) + +#define UNF_GET_SID(v_pkg) (((struct unf_frame_pkg_s *) \ + v_pkg)->frame_head.csctl_sid & UNF_NPORTID_MASK) +#define UNF_GET_DID(v_pkg) (((struct unf_frame_pkg_s *) \ + v_pkg)->frame_head.rctl_did & UNF_NPORTID_MASK) +#define UNF_GET_OXID(v_pkg) (((struct unf_frame_pkg_s *) \ + v_pkg)->frame_head.oxid_rxid >> 16) +#define UNF_GET_RXID(v_pkg) ((unsigned short)((struct unf_frame_pkg_s *) \ + v_pkg)->frame_head.oxid_rxid) +#define UNF_GET_XFER_LEN(v_pkg) (((struct unf_frame_pkg_s *)v_pkg)->transfer_len) + +/* ioc abort */ +#define UNF_GETXCHGALLOCTIME(v_pkg) \ + (((struct unf_frame_pkg_s *)v_pkg)->private[PKG_PRIVATE_XCHG_ALLOC_TIME]) +#define UNF_SET_XCHG_ALLOC_TIME(pkg, xchg) \ + (((struct unf_frame_pkg_s *)(pkg))->private[PKG_PRIVATE_XCHG_ALLOC_TIME] = \ + (((struct unf_xchg_s *)(xchg))->private[PKG_PRIVATE_XCHG_ALLOC_TIME])) +#define UNF_SET_ABORT_INFO_IOTYPE(pkg, xchg) \ + (((struct unf_frame_pkg_s *)(pkg))->private[PKG_PRIVATE_XCHG_ABORT_INFO] |= \ + (((unsigned char)(((struct unf_xchg_s *)(xchg))->data_direction & 0x7))\ + << 2)) + +#define UNF_CHECK_NPORT_FPORT_BIT(els_payload) \ + (((struct unf_flogi_payload_s *)els_payload)->fabric_parms.co_parms.n_port) + +#define UNF_N_PORT 0 +#define UNF_F_PORT 1 + +#define UNF_GET_RA_TOV_FROM_PARAMS(params) \ + (((struct unf_fabric_parms_s *)params)->co_parms.r_a_tov) +#define UNF_GET_RT_TOV_FROM_PARAMS(params) \ + (((struct unf_fabric_parms_s *)params)->co_parms.r_t_tov) +#define UNF_GET_E_D_TOV_FROM_PARAMS(params) \ + (((struct unf_fabric_parms_s *)params)->co_parms.e_d_tov) +#define UNF_GET_E_D_TOV_RESOLUTION_FROM_PARAMS(params) \ + (((struct unf_fabric_parms_s *)params)->co_parms.e_d_tov_resolution) +#define UNF_GET_BB_SC_N_FROM_PARAMS(params) \ + (((struct unf_fabric_parms_s *)params)->co_parms.bb_scn) +#define UNF_GET_BB_CREDIT_FROM_PARAMS(params) \ + (((struct unf_fabric_parms_s *)params)->co_parms.bb_credit) +enum unf_pcie_error_code_e { + UNF_PCIE_ERROR_NONE = 0, + UNF_PCIE_DATAPARITYDETECTED = 1, + UNF_PCIE_SIGNALTARGETABORT, + UNF_PCIE_RECEIVEDTARGETABORT, + UNF_PCIE_RECEIVEDMASTERABORT, + UNF_PCIE_SIGNALEDSYSTEMERROR, + UNF_PCIE_DETECTEDPARITYERROR, + UNF_PCIE_CORRECTABLEERRORDETECTED, + UNF_PCIE_NONFATALERRORDETECTED, + UNF_PCIE_FATALERRORDETECTED, + UNF_PCIE_UNSUPPORTEDREQUESTDETECTED, + UNF_PCIE_AUXILIARYPOWERDETECTED, + UNF_PCIE_TRANSACTIONSPENDING, + + UNF_PCIE_UNCORRECTINTERERRSTATUS, + UNF_PCIE_UNSUPPORTREQERRSTATUS, + UNF_PCIE_ECRCERRORSTATUS, + UNF_PCIE_MALFORMEDTLPSTATUS, + UNF_PCIE_RECEIVEROVERFLOWSTATUS, + UNF_PCIE_UNEXPECTCOMPLETESTATUS, + UNF_PCIE_COMPLETERABORTSTATUS, + UNF_PCIE_COMPLETIONTIMEOUTSTATUS, + UNF_PCIE_FLOWCTRLPROTOCOLERRSTATUS, + UNF_PCIE_POISONEDTLPSTATUS, + UNF_PCIE_SURPRISEDOWNERRORSTATUS, + UNF_PCIE_DATALINKPROTOCOLERRSTATUS, + UNF_PCIE_ADVISORYNONFATALERRSTATUS, + UNF_PCIE_REPLAYTIMERTIMEOUTSTATUS, + UNF_PCIE_REPLAYNUMROLLOVERSTATUS, + UNF_PCIE_BADDLLPSTATUS, + UNF_PCIE_BADTLPSTATUS, + UNF_PCIE_RECEIVERERRORSTATUS, + + UNF_PCIE_BUTT +}; + +#define UNF_DMA_HI32(a) (((a) >> 32) & 0xffffffff) +#define UNF_DMA_LO32(a) ((a) & 0xffffffff) + +#define UNF_WWN_LEN 8 +#define UNF_MAC_LEN 6 + +/* send BLS/ELS/BLS REPLY/ELS REPLY/GS/ */ +/* rcvd BLS/ELS/REQ DONE/REPLY DONE */ +#define UNF_PKG_BLS_REQ 0x0100 +#define UNF_PKG_BLS_REQ_DONE 0x0101 + +#define UNF_PKG_ELS_REQ 0x0200 + +#define UNF_PKG_ELS_REQ_DONE 0x0201 + +#define UNF_PKG_ELS_REPLY 0x0202 + +#define UNF_PKG_ELS_REPLY_DONE 0x0203 + +#define UNF_PKG_GS_REQ 0x0300 + +#define UNF_PKG_GS_REQ_DONE 0x0301 + +#define UNF_PKG_INI_IO 0x0500 +#define UNF_PKG_INI_RCV_TGT_RSP 0x0507 + +/* external sgl struct start */ +struct unf_esgl_page_s { + unsigned long long page_address; + dma_addr_t esgl_phyaddr; + unsigned int page_size; +}; + +struct unf_esgl_s { + struct list_head entry_esgl; + struct unf_esgl_page_s page; +}; + +/* external sgl struct end */ +struct unf_frame_payld_s { + unsigned char *buffer_ptr; + dma_addr_t buf_dma_addr; + unsigned int length; +}; + +enum pkg_private_index_e { + PKG_PRIVATE_LOWLEVEL_XCHG_ADD = 0, + PKG_PRIVATE_XCHG_HOT_POOL_INDEX = 1, /* Hot Pool Index */ + PKG_PRIVATE_XCHG_RPORT_INDEX = 2, /* RPort index */ + PKG_PRIVATE_XCHG_VP_INDEX = 3, /* VPort index */ + PKG_PRIVATE_RPORT_RX_SIZE, + PKG_PRIVATE_XCHG_TIMEER, + PKG_PRIVATE_XCHG_ALLOC_TIME, + PKG_PRIVATE_XCHG_ABORT_INFO, + PKG_PRIVATE_ECHO_CMD_SND_TIME, /* local send echo cmd time stamp */ + PKG_PRIVATE_ECHO_ACC_RCV_TIME, /* local receive echo acc time stamp */ + PKG_PRIVATE_ECHO_CMD_RCV_TIME, /* remote receive echo cmd time stamp */ + PKG_PRIVATE_ECHO_RSP_SND_TIME, /* remote send echo rsp time stamp */ + PKG_MAX_PRIVATE_DATA_SIZE +}; + +extern unsigned int dix_flag; +extern unsigned int dif_sgl_mode; +extern unsigned int dif_app_esc_check; +extern unsigned int dif_ref_esc_check; + +#define UNF_DIF_ACTION_NONE 0 + +enum unf_adm_dif_mode_e { + UNF_SWITCH_DIF_DIX = 0, + UNF_APP_REF_ESCAPE, + ALL_DIF_MODE = 20, +}; + +#define UNF_VERIFY_CRC_MASK (1 << 1) +#define UNF_VERIFY_APP_MASK (1 << 2) +#define UNF_VERIFY_LBA_MASK (1 << 3) + +#define UNF_REPLACE_CRC_MASK (1 << 8) +#define UNF_REPLACE_APP_MASK (1 << 9) +#define UNF_REPLACE_LBA_MASK (1 << 10) + +#define UNF_DIF_ACTION_MASK (0xff << 16) +#define UNF_DIF_ACTION_INSERT (0x1 << 16) +#define UNF_DIF_ACTION_VERIFY_AND_DELETE (0x2 << 16) +#define UNF_DIF_ACTION_VERIFY_AND_FORWARD (0x3 << 16) +#define UNF_DIF_ACTION_VERIFY_AND_REPLACE (0x4 << 16) + +#define UNF_DIF_ACTION_NO_INCREASE_REFTAG (0x1 << 24) + +#define UNF_DEFAULT_CRC_GUARD_SEED (0) +#define UNF_CAL_BLOCK_CNT(data_len, sector_size) ((data_len) / (sector_size)) + +#define UNF_DIF_DOUBLE_SGL (1 << 1) +#define UNF_DIF_SECTSIZE_4KB (1 << 2) +#define UNF_DIF_LBA_NONE_INCREASE (1 << 3) +#define UNF_DIF_TYPE3 (1 << 4) + +#define HIFC_DIF_APP_REF_ESC_NOT_CHECK 1 +#define HIFC_DIF_APP_REF_ESC_CHECK 0 + +enum unf_io_state_e { + UNF_INI_IO = 0, + UNF_TGT_XFER = 1, + UNF_TGT_RSP = 2 +}; + +#define UNF_PKG_LAST_RESPONSE 0 +#define UNF_PKG_NOT_LAST_RESPONSE 1 + +#define UNF_PKG_LAST_REQUEST 1 +#define UNF_PKG_NOT_LAST_REQUEST 0 + +struct unf_frame_pkg_s { + /* pkt type:BLS/ELS/FC4LS/CMND/XFER/RSP */ + unsigned int type; + unsigned int last_pkg_flag; + +#define UNF_FCP_RESPONSE_VALID 0x01 +#define UNF_FCP_SENSE_VALID 0x02 + /* resp and sense vailed flag */ + unsigned int response_and_sense_valid_flag; + unsigned int cmnd; + struct unf_fchead_s frame_head; + unsigned int entry_count; + void *xchg_contex; + unsigned int transfer_len; + unsigned int residus_len; + unsigned int status; + unsigned int status_sub_code; + enum unf_io_state_e io_state; + unsigned int qos_level; + + unsigned int private[PKG_MAX_PRIVATE_DATA_SIZE]; + + unsigned char byte_orders; + + struct unf_fcp_cmnd_s *fcp_cmnd; + struct unf_dif_control_info_s dif_control; + struct unf_frame_payld_s unf_cmnd_pload_bl; + struct unf_frame_payld_s unf_rsp_pload_bl; + struct unf_frame_payld_s unf_sense_pload_bl; + void *upper_cmd; + unsigned int abts_maker_status; + +}; + +#define UNF_MAX_SFS_XCHG 2048 +#define UNF_RESERVE_SFS_XCHG 128 /* times on exchange mgr num */ + +struct unf_lport_cfg_item_s { + unsigned int port_id; + unsigned int port_mode; /* INI(0x20) TGT(0x10) BOTH(0x30) */ + unsigned int port_topology; /* 0x3:loop , 0xc:p2p ,0xf:auto */ + unsigned int max_queue_depth; + unsigned int max_io; /* Recommended Value 512-4096 */ + unsigned int max_login; + unsigned int max_sfs_xchg; + /* 0:auto 1:1Gbps 2:2Gbps 4:4Gbps 8:8Gbps 16:16Gbps */ + unsigned int port_speed; + unsigned int tape_support; /* tape support */ + unsigned int fcp_conf; /* fcp confirm support */ + unsigned int bb_scn; + unsigned int sum_resource; + enum int_e res_mgmt_enabled; +}; + +struct unf_port_dynamic_info_s { + unsigned int sfp_posion; + unsigned int sfp_valid; + unsigned int phy_link; + unsigned int firmware_state; + unsigned int cur_speed; + unsigned int mailbox_timeout_cnt; +}; + +struct unf_hinicam_pkg { + unsigned int msg_format; + void *buff_in; + void *buff_out; + unsigned int in_size; + unsigned int *out_size; +}; + +struct unf_version_str_s { + char *buf; + unsigned int buf_len; +}; + +struct unf_buf_s { + unsigned char *cbuf; + unsigned int buf_len; +}; + +struct unf_rw_reg_param_s { + unsigned int rw_type; + unsigned int offset; + unsigned long long value; +}; + +/* get ucode & up ver */ +#define HIFC_VER_LEN (16) +#define HIFC_COMPILE_TIME_LEN (20) +struct unf_fw_version_s { + unsigned int message_type; + unsigned char fw_version[HIFC_VER_LEN]; +}; + +enum unf_port_config_set_op_e { + UNF_PORT_CFG_SET_SPEED, + UNF_PORT_CFG_SET_TOPO, + UNF_PORT_CFG_SET_BBSCN, + UNF_PORT_CFG_SET_MODE, + UNF_PORT_CFG_SET_SFP_SWITCH, + UNF_PORT_CFG_SET_PORT_SWITCH, + UNF_PORT_CFG_SET_POWER_STATE, + UNF_PORT_CFG_SET_PORT_STATE, + UNF_PORT_CFG_SET_INTR_COALSEC, + UNF_PORT_CFG_UPDATE_PORT, + UNF_PORT_CFG_UPDATE_WWN, + UNF_PORT_CFG_TEST_FLASH, + UNF_PORT_CFG_SET_FCP_CONF, + UNF_PORT_CFG_SET_LOOP_ROLE, + UNF_PORT_CFG_SET_INIT_REQ, + UNF_PORT_CFG_SET_MAX_SUPPORT_SPEED, + UNF_PORT_CFG_SET_MAC_ADDR, + UNF_PORT_CFG_SET_SFP_USEDTIME, + UNF_PORT_CFG_SET_PORT_TRANSFER_PARAMETER, + UNF_PORT_CFG_SET_SFP_REG_WRITE, + UNF_PORT_CFG_UPDATE_SFP, + UNF_PORT_CFG_UPDATE_FABRIC_PARAM, + UNF_PORT_CFG_UPDATE_PLOGI_PARAM, + UNF_PORT_CFG_UPDATE_FDISC_PARAM, + UNF_PORT_CFG_SAVE_HBA_INFO, + UNF_PORT_CFG_SET_HBA_BASE_INFO, + UNF_PORT_CFG_SET_FLASH_DATA_INFO, + UNF_PORT_CFG_SET_BUTT +}; + +enum unf_port_config_get_op_e { + UNF_PORT_CFG_GET_SPEED_CFG, + UNF_PORT_CFG_GET_SPEED_ACT, + UNF_PORT_CFG_GET_TOPO_CFG, + UNF_PORT_CFG_GET_TOPO_ACT, + UNF_PORT_CFG_GET_MODE, + UNF_PORT_CFG_GET_LOOP_MAP, + UNF_PORT_CFG_GET_TOV, + UNF_PORT_CFG_GET_SFP_PRESENT, + UNF_PORT_CFG_GET_SFP_INFO, + UNF_PORT_CFG_GET_FW_VER, + UNF_PORT_CFG_GET_HW_VER, + UNF_PORT_CFG_GET_LESB_THEN_CLR, /* Link Error Status Block, LESB */ + UNF_PORT_CFG_GET_DYNAMIC_INFO, + UNF_PORT_CFG_GET_VITAL_REGS, + UNF_PORT_CFG_CLR_LESB, + UNF_PORT_CFG_GET_WORKBALE_BBCREDIT, + UNF_PORT_CFG_GET_WORKBALE_BBSCN, + UNF_PORT_CFG_GET_FC_SERDES, + UNF_PORT_CFG_GET_LOOP_ALPA, + UNF_PORT_CFG_GET_SFP_DYNAMIC_INFO, + UNF_PORT_CFG_GET_MAC_ADDR, + UNF_PORT_CFG_GET_SFP_USEDTIME, + UNF_PORT_CFG_GET_PORT_INFO, + UNF_PORT_CFG_DDT_TEST, + UNF_PORT_CFG_GET_LED_STATE, + UNF_PORT_CFG_GET_VLAN, + UNF_PORT_CFG_GET_SFP_REG_READ, + UNF_PORT_CFG_GET_SFP_VER, + UNF_PORT_CFG_GET_SFP_SUPPORT_UPDATE, + UNF_PORT_CFG_GET_SFP_LOG, + UNF_PORT_CFG_GET_FEC, + UNF_PORT_CFG_GET_PCIE_LINK_STATE, + UNF_PORT_CFG_GET_FLASH_DATA_INFO, + UNF_PORT_CFG_GET_BUTT +}; + +enum unf_port_diag_op_e { + UNF_PORT_DIAG_PORT_DETAIL, + UNF_PORT_DIAG_RD_WR_REG, + UNF_PORT_DIAG_BUTT +}; + +enum unf_port_config_state_e { + UNF_PORT_CONFIG_STATE_START, + UNF_PORT_CONFIG_STATE_STOP, + UNF_PORT_CONFIG_STATE_RESET, + UNF_PORT_CONFIG_STATE_STOP_INTR, + UNF_PORT_CONFIG_STATE_BUTT +}; + +struct unf_port_login_parms_s { + enum unf_act_topo_e en_act_topo; + + unsigned int rport_index; + unsigned int seq_cnt : 1; + unsigned int ed_tov : 1; + unsigned int reserved : 14; + unsigned int tx_mfs : 16; + unsigned int ed_tov_timer_val; + + unsigned char remote_rttov_tag; + unsigned char remote_edtov_tag; + unsigned short remote_bbcredit; + unsigned short compared_bbscn; + unsigned int compared_edtov_val; + unsigned int compared_ratov_val; + unsigned int els_cmnd_code; +}; + +#define HIFC_FLASH_MAX_LEN 1024 // bytes + +struct unf_mbox_head_info_s { + /* mbox header */ + unsigned char cmnd_type; + unsigned char length; + unsigned char port_id; + unsigned char pad0; + + /* operation */ + unsigned int op_code : 4; + unsigned int pad1 : 28; +}; + +#define HIFC_FLASH_MBOX_HEAD_MAX_LEN 8 // bytes +struct unf_mbox_head_sts_s { + /* mbox header */ + unsigned char cmnd_type; + unsigned char length; + unsigned char port_id; + unsigned char pad0; + + /* operation */ + unsigned short pad1; + unsigned char pad2; + unsigned char status; +}; + +#define HIFC_FLASH_UEFI_MAX_LEN 16 // bytes +struct unf_flash_uefi_switch_s { + unsigned char writeflag; + unsigned char sanbooten; + unsigned char reserved[14]; +}; + +#define HIFC_MGMT_UEFI_MAGIC_NUM 0xAF +#define HIFC_MGMT_TMO_MAGIC_NUM 0xAE + +#define HIFC_FLASH_LINK_TMO_MAX_LEN 16 // bytes +struct unf_flash_link_tmo_s { + unsigned char writeflag; + unsigned char link_tmo0; + unsigned char link_tmo1; + unsigned char link_tmo2; + unsigned char link_tmo3; + unsigned char reserved[11]; +}; + +#define HIFC_FLASH_DATA_MAX_LEN (HIFC_FLASH_MAX_LEN - \ + HIFC_FLASH_MBOX_HEAD_MAX_LEN) // bytes +struct unf_flash_data_s { + struct unf_flash_uefi_switch_s uefi_switch; // 16 bytes + struct unf_flash_link_tmo_s link_tmo; // 16 bytes + /* once the related struct change, the reserved size needs modify */ + unsigned char reserved[HIFC_FLASH_DATA_MAX_LEN - 32]; +}; + +/* size of hifc_flash_data_mgmt not more than 1024 bytes */ +struct unf_mbox_flash_data_mgmt_s { + struct unf_mbox_head_info_s mbox_head; // 8 bytes + struct unf_flash_data_s flash_data; +}; + +struct unf_flash_data_mgmt_sts_s { + struct unf_mbox_head_sts_s mbox_head; // 8 bytes + struct unf_flash_data_s flash_data; +}; + +struct unf_low_level_service_op_s { + unsigned int (*pfn_unf_els_send)(void *, struct unf_frame_pkg_s *); + unsigned int (*pfn_unf_bls_send)(void *, struct unf_frame_pkg_s *); + unsigned int (*pfn_unf_gs_send)(void *, struct unf_frame_pkg_s *); + unsigned int (*pfn_unf_fc_4_ls_send)(void *, struct unf_frame_pkg_s *); + unsigned int (*pfn_unf_cmnd_send)(void *, struct unf_frame_pkg_s *); + unsigned int (*pfn_ll_relese_xchg_res)(void *, + struct unf_frame_pkg_s *); + unsigned int (*pfn_unf_release_rport_res)(void *, struct + unf_rport_info_s *); + unsigned int (*pfn_unf_get_consumed_res)(void *, + struct unf_frame_pkg_s *); + unsigned int (*pfn_unf_flush_ini_resp_que)(void *); + unsigned int (*pfn_unf_alloc_rport_res)(void *, + struct unf_rport_info_s *); + unsigned int (*pfn_unf_rport_session_rst)(void *, + struct unf_rport_info_s *); +}; + +struct unf_low_level_port_mgr_op_s { + /* fcport/opcode/input parameter */ + unsigned int (*pfn_ll_port_config_set) + (void *v_fc_port, + enum unf_port_config_set_op_e v_op_code, + void *v_para_in); + /* fcport/opcode/output parameter */ + unsigned int (*pfn_ll_port_config_get) + (void *v_fc_port, + enum unf_port_config_get_op_e v_op_code, + void *v_para_out); + /* fcport/opcode/input parameter/output parameter */ + unsigned int (*pfn_ll_port_diagnose) + (void *v_fc_port, + enum unf_port_diag_op_e v_op_code, + void *v_para); + +}; + +struct unf_chip_info_s { + unsigned char chip_type; + unsigned char chip_work_mode; + unsigned char disable_err_flag; +}; + +struct unf_low_level_function_op_s { + struct unf_chip_info_s chip_info; + /* low level type */ + unsigned int low_level_type; + /* low level name, fc etc. */ + const char *name; + struct pci_dev *dev; + unsigned long long sys_node_name; + unsigned long long sys_port_name; + + struct unf_lport_cfg_item_s lport_cfg_items; + + /* low level Xchg mgr type, + * active --alloc oxid and rxid + * passtive -- not alloc oxid and rxid + */ +#define UNF_LOW_LEVEL_MGR_TYPE_ACTIVE 0 +#define UNF_LOW_LEVEL_MGR_TYPE_PASSTIVE 1 + const unsigned int xchg_mgr_type; + +#define UNF_NO_EXTRA_ABTS_XCHG 0x0 +#define UNF_LL_IOC_ABTS_XCHG 0x1 + const unsigned int abts_xchg; +#define UNF_CM_RPORT_SET_QUALIFIER 0x0 +#define UNF_CM_RPORT_SET_QUALIFIER_REUSE 0x1 +#define UNF_CM_RPORT_SET_QUALIFIER_HIFC 0x2 + /* low level pass-through flag. */ +#define UNF_LOW_LEVEL_PASS_THROUGH_FIP 0x0 +#define UNF_LOW_LEVEL_PASS_THROUGH_FABRIC_LOGIN 0x1 +#define UNF_LOW_LEVEL_PASS_THROUGH_PORT_LOGIN 0x2 + unsigned int pass_through_flag; + /* low level parameter */ + unsigned int support_max_npiv_num; + unsigned int support_max_speed; + unsigned int fc_ser_max_speed; + unsigned int support_max_rport; + unsigned int support_max_xid_range; + unsigned int sfp_type; + unsigned int update_fw_reset_active; + unsigned int support_upgrade_report; + unsigned int multi_conf_support; + unsigned int port_type; +#define UNF_LOW_LEVEL_RELEASE_RPORT_SYNC 0x0 +#define UNF_LOW_LEVEL_RELEASE_RPORT_ASYNC 0x1 + unsigned char rport_release_type; +#define UNF_LOW_LEVEL_SIRT_PAGE_MODE_FIXED 0x0 +#define UNF_LOW_LEVEL_SIRT_PAGE_MODE_XCHG 0x1 + unsigned char sirt_page_mode; + unsigned char sfp_speed; + /* IO reference */ + struct unf_low_level_service_op_s service_op; + /* Port Mgr reference */ + struct unf_low_level_port_mgr_op_s port_mgr_op; + unsigned char chip_id; +}; + +struct unf_cm_handle_op_s { + /* return:L_Port */ + void *(*pfn_unf_alloc_local_port)(void *, + struct unf_low_level_function_op_s *); + /* input para:L_Port */ + unsigned int (*pfn_unf_release_local_port)(void *); + /* input para:lport vn2vnid,output para:ok/err */ + unsigned int (*pfn_unf_set_vn2vn_id)(void *, unsigned int); + unsigned char (*pfn_unf_get_loop_id)(unsigned int v_port_id); + /* input para:L_Port, FRAME_PKG_S */ + unsigned int (*pfn_unf_receive_els_pkg)(void *v_lport, + struct unf_frame_pkg_s *v_pkg); + /* input para:L_Port, FRAME_PKG_S */ + unsigned int (*pfn_unf_receive_gs_pkg)(void *v_lport, + struct unf_frame_pkg_s *v_pkg); + /* input para:L_Port, FRAME_PKG_S */ + unsigned int (*pfn_unf_receive_bls_pkg)(void *v_lport, + struct unf_frame_pkg_s *v_pkg); + /* input para:L_Port, FRAME_PKG_S */ + unsigned int (*pfn_unf_receive_fc4_ls_pkg)( + void *v_lport, + struct unf_frame_pkg_s *v_pkg); + /* input para:L_Port, FRAME_PKG_S */ + unsigned int (*pfn_unf_send_els_done)(void *v_lport, + struct unf_frame_pkg_s *v_pkg); + unsigned int (*pfn_unf_send_fc4_ls_done)(void *v_lport, + struct unf_frame_pkg_s *v_pkg); + /* input para:L_Port, FRAME_PKG_S */ + unsigned int (*pfn_unf_receive_marker_status)( + void *v_lport, struct unf_frame_pkg_s *v_pkg); + unsigned int (*pfn_unf_receive_abts_marker_status)( + void *v_lport, struct unf_frame_pkg_s *v_pkg); + /* input para:L_Port, FRAME_PKG_S */ + unsigned int (*pfn_unf_receive_ini_rsponse)( + void *v_lport, struct unf_frame_pkg_s *v_pkg); + int (*pfn_unf_get_cfg_parms)(char *v_section_name, + struct unf_cfg_item_s *v_cfg_parm, + unsigned int *v_cfg_value, + unsigned int v_item_num); + unsigned int (*pfn_unf_cm_get_sgl_entry)(void *v_pkg, + char **v_buf, + unsigned int *v_buf_len); + unsigned int (*pfn_unf_cm_get_dif_sgl_entry)(void *v_pkg, + char **v_buf, + unsigned int *v_buf_len); + struct unf_esgl_page_s *(*pfn_unf_get_one_free_esgl_page)( + void *v_lport, struct unf_frame_pkg_s *v_pkg); + /* input para:L_Port, EVENT */ + unsigned int (*pfn_unf_fc_port_link_event)(void *v_lport, + unsigned int v_events, + void *v_input); + unsigned int (*pfn_unf_fcoe_update_fcf_name)(void *v_lport, + void *v_input); + int (*pfn_unf_ioctl_to_com_handler)(void *v_lport, + struct unf_hinicam_pkg *v_pkg); +}; + +unsigned int unf_get_cm_handle_op(struct unf_cm_handle_op_s *v_cm_handle); +int unf_common_init(void); +void unf_common_exit(void); + +struct unf_port_info_entry_s { + unsigned int bb_scn; + unsigned int speed; + unsigned int topo; + unsigned int fec; +}; + +enum drv_cable_connector_type_e { + DRV_CABLE_CONNECTOR_NONE, + DRV_CABLE_CONNECTOR_OPTICAL, + DRV_CABLE_CONNECTOR_COPPER, + DRV_CABLE_CONNECTOR_INVALID, + DRV_CABLE_CONNECTOR_BUTT +}; + +#endif diff --git a/drivers/scsi/huawei/hifc/unf_disc.c b/drivers/scsi/huawei/hifc/unf_disc.c new file mode 100644 index 0000000000000000000000000000000000000000..12d8514af9598bcce88600b1188672e53431298b --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_disc.c @@ -0,0 +1,1320 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#include "unf_log.h" +#include "unf_common.h" +#include "unf_disc.h" +#include "unf_event.h" +#include "unf_lport.h" +#include "unf_rport.h" +#include "unf_exchg.h" +#include "unf_service.h" +#include "unf_portman.h" + +#define UNF_LIST_RSCN_PAGE_CNT 2560 +#define UNF_MAX_PORTS_PRI_LOOP 2 +#define UNF_MAX_GS_SEND_NUM 8 +#define UNF_OS_REMOVE_CARD_TIMEOUT (60 * 1000) + +static void unf_set_disc_state(struct unf_disc_s *v_disc, + enum unf_disc_state_e v_en_states) +{ + UNF_CHECK_VALID(0x651, UNF_TRUE, v_disc, return); + + if (v_en_states != v_disc->en_states) { + /* Reset disc retry count */ + v_disc->retry_count = 0; + } + + v_disc->en_states = v_en_states; +} + +static inline unsigned int unf_get_loop_map(struct unf_lport_s *v_lport, + unsigned char v_loop_map[], + unsigned int loop_map_size) +{ + struct unf_buf_s buf = { 0 }; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID( + 0x652, UNF_TRUE, + v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get, + return UNF_RETURN_ERROR); + + buf.cbuf = v_loop_map; + buf.buf_len = loop_map_size; + + ret = v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get( + v_lport->fc_port, + UNF_PORT_CFG_GET_LOOP_MAP, + (void *)&buf); + return ret; +} + +static int unf_discover_private_loop(void *v_arg_in, void *v_arg_out) +{ + struct unf_lport_s *lport = (struct unf_lport_s *)v_arg_in; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int i = 0; + unsigned char loop_id = 0; + unsigned int alpa_index = 0; + unsigned char loop_map[UNF_LOOPMAP_COUNT]; + + UNF_REFERNCE_VAR(v_arg_out); + UNF_CHECK_VALID(0x653, UNF_TRUE, lport, return UNF_RETURN_ERROR); + memset(loop_map, 0x0, UNF_LOOPMAP_COUNT); + + /* Get Port Loop Map */ + ret = unf_get_loop_map(lport, loop_map, UNF_LOOPMAP_COUNT); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) get loop map failed", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + /* Check Loop Map Ports Count */ + if (loop_map[0] > UNF_MAX_PORTS_PRI_LOOP) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) has more than %d ports(%u) in private loop", + lport->port_id, UNF_MAX_PORTS_PRI_LOOP, + loop_map[0]); + + return UNF_RETURN_ERROR; + } + + /* AL_PA = 0 means Public Loop */ + if ((loop_map[1] == UNF_FL_PORT_LOOP_ADDR) || + (loop_map[2] == UNF_FL_PORT_LOOP_ADDR)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) one or more AL_PA is 0x00, indicate it's FL_Port", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + /* Discovery Private Loop Ports */ + for (i = 0; i < loop_map[0]; i++) { + alpa_index = i + 1; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) start to disc(0x%x) with count(0x%x)", + lport->port_id, loop_map[alpa_index], i); + + /* Check whether need delay to send PLOGI or not */ + loop_id = loop_map[alpa_index]; + unf_login_with_loop_node(lport, (unsigned int)loop_id); + } + + return RETURN_OK; +} + +static unsigned int unf_disc_start(void *v_lport) +{ + /* + * Call by: + * 1. Enter Private Loop Login + * 2. Analysis RSCN payload + * 3. SCR callback + ** + * Doing: + * Fabric/Public Loop: Send GID_PT + * Private Loop: (delay to) send PLOGI or send LOGO immediately + * P2P: do nothing + */ + struct unf_lport_s *lport = (struct unf_lport_s *)v_lport; + struct unf_rport_s *rport = NULL; + struct unf_disc_s *disc = NULL; + struct unf_cm_event_report *event = NULL; + unsigned int ret = RETURN_OK; + unsigned long flag = 0; + enum unf_act_topo_e act_topo = UNF_ACT_TOP_UNKNOWN; + + UNF_CHECK_VALID(0x654, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + act_topo = lport->en_act_topo; + disc = &lport->disc; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]LOGIN: Port(0x%x) with topo(0x%x) begin to discovery", + lport->port_id, act_topo); + + if ((act_topo == UNF_ACT_TOP_P2P_FABRIC) || + (act_topo == UNF_ACT_TOP_PUBLIC_LOOP)) { + /* 1. Fabric or Public Loop Topology: for directory server */ + /* 0xfffffc */ + rport = unf_get_rport_by_nport_id(lport, + UNF_FC_FID_DIR_SERV); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) unable to get SNS RPort(0xfffffc)", + lport->port_id); + + rport = unf_rport_get_free_and_init( + lport, + UNF_PORT_TYPE_FC, + UNF_FC_FID_DIR_SERV); + if (!rport) + return UNF_RETURN_ERROR; + rport->nport_id = UNF_FC_FID_DIR_SERV; + } + + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + unf_set_disc_state(disc, UNF_DISC_ST_START); /* disc start */ + unf_disc_state_ma(lport, UNF_EVENT_DISC_NORMAL_ENTER); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + /* + * NOTE: Send GID_PT + * The Name Server shall, when it receives a GID_PT request, + * return all Port Identifiers having registered support for + * the specified Port Type. + * One or more Port Identifiers, having registered as + * the specified Port Type, are returned. + */ + ret = unf_send_gid_pt(lport, rport); + if (ret != RETURN_OK) + unf_disc_error_recovery(lport); + } else if (act_topo == UNF_ACT_TOP_PRIVATE_LOOP) { + /* Private Loop: to thread process */ + event = unf_get_one_event_node(lport); + UNF_CHECK_VALID(0x655, UNF_TRUE, NULL != event, + return UNF_RETURN_ERROR); + + event->lport = lport; + event->event_asy_flag = UNF_EVENT_ASYN; + event->pfn_unf_event_task = unf_discover_private_loop; + event->para_in = (void *)lport; + + unf_post_one_event_node(lport, event); + } else { + /* P2P toplogy mode: Do nothing */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x) with topo(0x%x) need do nothing", + lport->port_id, act_topo); + } + + return ret; +} + +static unsigned int unf_disc_stop(void *v_lport) +{ + /* Call by GID_ACC processer */ + struct unf_lport_s *lport = NULL; + struct unf_lport_s *root_lport = NULL; + struct unf_rport_s *sns_port = NULL; + struct unf_disc_rport_s *disc_rport = NULL; + struct unf_disc_s *disc = NULL; + struct unf_disc_s *root_disc = NULL; + struct list_head *node = NULL; + unsigned long flag = 0; + unsigned int ret = RETURN_OK; + unsigned int nport_id = 0; + + UNF_CHECK_VALID(0x656, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + lport = (struct unf_lport_s *)v_lport; + disc = &lport->disc; + root_lport = (struct unf_lport_s *)lport->root_lport; + root_disc = &root_lport->disc; + + /* Get R_Port for Directory server */ + /* 0xfffffc */ + sns_port = unf_get_rport_by_nport_id(lport, UNF_FC_FID_DIR_SERV); + if (!sns_port) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) find fabric RPort(0xfffffc) failed", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + /* for R_Port from disc pool busy list */ + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + if (list_empty(&disc->disc_rport_mgr.list_disc_rport_busy)) { + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + /* Empty and return directly */ + return RETURN_OK; + } + + node = (&disc->disc_rport_mgr.list_disc_rport_busy)->next; + do { + /* Delete from Disc busy list */ + disc_rport = list_entry(node, struct unf_disc_rport_s, + entry_rport); + nport_id = disc_rport->nport_id; + list_del_init(node); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + /* Add back to (free) Disc R_Port pool (list) */ + spin_lock_irqsave(&root_disc->rport_busy_pool_lock, flag); + list_add_tail(node, + &root_disc->disc_rport_mgr.list_disc_rports_pool); + spin_unlock_irqrestore(&root_disc->rport_busy_pool_lock, flag); + + /* Send GNN_ID to Name Server */ + ret = unf_get_and_post_disc_event(lport, sns_port, nport_id, + UNF_DISC_GET_NODE_NAME); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, + UNF_ERR, + "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", + lport->nport_id, UNF_DISC_GET_NODE_NAME, + nport_id); + + /* NOTE: go to next stage */ + unf_rcv_gnn_id_rsp_unknown(lport, sns_port, + nport_id); + } + + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + node = (&disc->disc_rport_mgr.list_disc_rport_busy)->next; + + } while (node != &disc->disc_rport_mgr.list_disc_rport_busy); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + return ret; +} + +static void unf_disc_callback(void *v_lport, unsigned int v_result) +{ + /* Do nothing */ + UNF_REFERNCE_VAR(v_lport); + UNF_REFERNCE_VAR(v_result); +} + +/* + * Function Name : unf_init_rport_pool + * Function Description: Init R_Port (free) Pool + * Input Parameters : struct unf_lport_s *v_lport + * Output Parameters : N/A + * Return Type : unsigned int + */ +static unsigned int unf_init_rport_pool(struct unf_lport_s *v_lport) +{ + struct unf_rport_pool_s *rport_pool = NULL; + struct unf_rport_s *rport = NULL; + unsigned int ret = RETURN_OK; + unsigned int i = 0; + unsigned int bit_map_cnt = 0; + unsigned long flag = 0; + unsigned int max_login = 0; + + UNF_CHECK_VALID(0x657, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + /* Init RPort Pool info */ + rport_pool = &v_lport->rport_pool; + max_login = v_lport->low_level_func.lport_cfg_items.max_login; + rport_pool->rport_pool_completion = NULL; + rport_pool->rport_pool_count = max_login; + spin_lock_init(&rport_pool->rport_free_pool_lock); + INIT_LIST_HEAD(&rport_pool->list_rports_pool); /* free RPort pool */ + + /* 1. Alloc RPort Pool buffer/resource (memory) */ + rport_pool->rport_pool_add = + vmalloc((size_t)(max_login * sizeof(struct unf_rport_s))); + if (!rport_pool->rport_pool_add) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) allocate RPort(s) resource failed", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + memset(rport_pool->rport_pool_add, 0, + (max_login * sizeof(struct unf_rport_s))); + + /* 2. Alloc R_Port Pool bitmap */ + bit_map_cnt = (v_lport->low_level_func.support_max_rport) / + BITS_PER_LONG + 1; + rport_pool->pul_rpi_bitmap = vmalloc((size_t)(bit_map_cnt * + sizeof(unsigned long))); + if (!rport_pool->pul_rpi_bitmap) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) allocate RPort Bitmap failed", + v_lport->port_id); + + vfree(rport_pool->rport_pool_add); + rport_pool->rport_pool_add = NULL; + return UNF_RETURN_ERROR; + } + memset(rport_pool->pul_rpi_bitmap, 0, + (bit_map_cnt * sizeof(unsigned long))); + + /* 3. Rport resource Management: Add Rports (buffer) + * to Rport Pool List + */ + rport = (struct unf_rport_s *)(rport_pool->rport_pool_add); + spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag); + for (i = 0; i < rport_pool->rport_pool_count; i++) { + spin_lock_init(&rport->rport_state_lock); + list_add_tail(&rport->entry_rport, + &rport_pool->list_rports_pool); + sema_init(&rport->task_sema, 0); + rport++; + } + spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flag); + + return ret; +} + +static void unf_free_rport_pool(struct unf_lport_s *v_lport) +{ + struct unf_rport_pool_s *rport_pool = NULL; + int wait = UNF_FALSE; + unsigned long flag = 0; + unsigned int remain = 0; + unsigned long long time_out = 0; + unsigned int max_login = 0; + unsigned int i; + struct unf_rport_s *rport; + + struct completion rport_pool_completion = + COMPLETION_INITIALIZER(rport_pool_completion); + + UNF_CHECK_VALID(0x671, UNF_TRUE, v_lport, return); + UNF_REFERNCE_VAR(remain); + + rport_pool = &v_lport->rport_pool; + max_login = v_lport->low_level_func.lport_cfg_items.max_login; + + spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag); + if (max_login != rport_pool->rport_pool_count) { + rport_pool->rport_pool_completion = &rport_pool_completion; + remain = max_login - rport_pool->rport_pool_count; + wait = UNF_TRUE; + } + spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flag); + + if (wait == UNF_TRUE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) begin to wait for RPort pool completion(%ld), remain(0x%x)", + v_lport->port_id, jiffies, remain); + + time_out = wait_for_completion_timeout( + rport_pool->rport_pool_completion, + msecs_to_jiffies(UNF_OS_REMOVE_CARD_TIMEOUT)); + if (time_out == 0) + unf_cmmark_dirty_mem( + v_lport, + UNF_LPORT_DIRTY_FLAG_RPORT_POOL_DIRTY); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) wait for RPort pool completion end(%ld)", + v_lport->port_id, jiffies); + + spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag); + rport_pool->rport_pool_completion = NULL; + spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flag); + } + + rport = (struct unf_rport_s *)(rport_pool->rport_pool_add); + for (i = 0; i < rport_pool->rport_pool_count; i++) { + if (!rport) + break; + rport++; + } + + if ((v_lport->dirty_flag & + UNF_LPORT_DIRTY_FLAG_RPORT_POOL_DIRTY) == 0) { + vfree(rport_pool->rport_pool_add); + rport_pool->rport_pool_add = NULL; /* R_Port pool */ + vfree(rport_pool->pul_rpi_bitmap); /* R_Port bitmap */ + rport_pool->pul_rpi_bitmap = NULL; + } + UNF_REFERNCE_VAR(remain); +} + +static void unf_init_rscn_node(struct unf_port_id_page_s *v_port_id_page) +{ + UNF_CHECK_VALID(0x658, UNF_TRUE, v_port_id_page, return); + + v_port_id_page->uc_addr_format = 0; + v_port_id_page->uc_event_qualifier = 0; + v_port_id_page->uc_reserved = 0; + v_port_id_page->port_id_area = 0; + v_port_id_page->port_id_domain = 0; + v_port_id_page->port_id_port = 0; +} + +struct unf_port_id_page_s *unf_get_free_rscn_node(void *v_rscn_mg) +{ + /* Call by Save RSCN Port_ID */ + struct unf_rscn_mg_s *rscn_mgr = NULL; + struct unf_port_id_page_s *port_id_node = NULL; + struct list_head *list_node = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x659, UNF_TRUE, v_rscn_mg, return NULL); + rscn_mgr = (struct unf_rscn_mg_s *)v_rscn_mg; + + spin_lock_irqsave(&rscn_mgr->rscn_id_list_lock, flag); + if (list_empty(&rscn_mgr->list_free_rscn_page)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, + UNF_WARN, + "[warn]No RSCN node anymore"); + + spin_unlock_irqrestore(&rscn_mgr->rscn_id_list_lock, flag); + return NULL; + } + + /* Get from list_free_RSCN_page */ + list_node = (&rscn_mgr->list_free_rscn_page)->next; + list_del(list_node); + rscn_mgr->free_rscn_count--; + port_id_node = list_entry(list_node, struct unf_port_id_page_s, + list_node_rscn); + unf_init_rscn_node(port_id_node); + spin_unlock_irqrestore(&rscn_mgr->rscn_id_list_lock, flag); + + return port_id_node; +} + +static void unf_release_rscn_node(void *v_rscn_mg, + void *v_port_id_node) +{ + /* Call by RSCN GID_ACC */ + struct unf_rscn_mg_s *rscn_mgr = NULL; + struct unf_port_id_page_s *port_id_node = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x660, UNF_TRUE, v_rscn_mg, return); + UNF_CHECK_VALID(0x661, UNF_TRUE, v_port_id_node, return); + rscn_mgr = (struct unf_rscn_mg_s *)v_rscn_mg; + port_id_node = (struct unf_port_id_page_s *)v_port_id_node; + + /* Back to list_free_RSCN_page */ + spin_lock_irqsave(&rscn_mgr->rscn_id_list_lock, flag); + rscn_mgr->free_rscn_count++; + unf_init_rscn_node(port_id_node); + list_add_tail(&port_id_node->list_node_rscn, + &rscn_mgr->list_free_rscn_page); + spin_unlock_irqrestore(&rscn_mgr->rscn_id_list_lock, flag); +} + +static unsigned int unf_init_rscn_pool(struct unf_lport_s *v_lport) +{ + struct unf_rscn_mg_s *rscn_mgr = NULL; + struct unf_port_id_page_s *port_id_page = NULL; + unsigned int ret = RETURN_OK; + unsigned int i = 0; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x662, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + rscn_mgr = &v_lport->disc.rscn_mgr; + + /* Get RSCN Pool buffer */ + rscn_mgr->rscn_pool_add = + vmalloc(UNF_LIST_RSCN_PAGE_CNT * + sizeof(struct unf_port_id_page_s)); + if (!rscn_mgr->rscn_pool_add) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port(0x%x) allocate RSCN pool failed", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + memset(rscn_mgr->rscn_pool_add, 0, + sizeof(struct unf_port_id_page_s) * UNF_LIST_RSCN_PAGE_CNT); + + spin_lock_irqsave(&rscn_mgr->rscn_id_list_lock, flag); + port_id_page = (struct unf_port_id_page_s *)(rscn_mgr->rscn_pool_add); + for (i = 0; i < UNF_LIST_RSCN_PAGE_CNT; i++) { + /* Add tail to list_free_RSCN_page */ + list_add_tail(&port_id_page->list_node_rscn, + &rscn_mgr->list_free_rscn_page); + + rscn_mgr->free_rscn_count++; + port_id_page++; + } + spin_unlock_irqrestore(&rscn_mgr->rscn_id_list_lock, flag); + + return ret; +} + +static void unf_free_rscn_pool(struct unf_lport_s *v_lport) +{ + struct unf_disc_s *disc = NULL; + + UNF_CHECK_VALID(0x663, UNF_TRUE, v_lport, return); + + disc = &v_lport->disc; + if (disc->rscn_mgr.rscn_pool_add) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_INFO, + "[info]Port(0x%x) free RSCN pool", + v_lport->nport_id); + + vfree(disc->rscn_mgr.rscn_pool_add); + disc->rscn_mgr.rscn_pool_add = NULL; + } +} + +static unsigned int unf_init_rscn_mgr(struct unf_lport_s *v_lport) +{ + struct unf_rscn_mg_s *rscn_mgr = NULL; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x664, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + rscn_mgr = &v_lport->disc.rscn_mgr; + + /* free RSCN page list */ + INIT_LIST_HEAD(&rscn_mgr->list_free_rscn_page); + /* busy RSCN page list */ + INIT_LIST_HEAD(&rscn_mgr->list_using_rscn_page); + spin_lock_init(&rscn_mgr->rscn_id_list_lock); + rscn_mgr->free_rscn_count = 0; + rscn_mgr->pfn_unf_get_free_rscn_node = unf_get_free_rscn_node; + rscn_mgr->pfn_unf_release_rscn_node = unf_release_rscn_node; + + ret = unf_init_rscn_pool(v_lport); + return ret; +} + +static void unf_destroy_rscn_mgr(struct unf_lport_s *v_lport) +{ + struct unf_rscn_mg_s *rscn_mgr = NULL; + + UNF_CHECK_VALID(0x665, UNF_TRUE, v_lport, return); + rscn_mgr = &v_lport->disc.rscn_mgr; + + rscn_mgr->free_rscn_count = 0; + rscn_mgr->pfn_unf_get_free_rscn_node = NULL; + rscn_mgr->pfn_unf_release_rscn_node = NULL; + + unf_free_rscn_pool(v_lport); +} + +static unsigned int unf_init_disc_rport_pool(struct unf_lport_s *v_lport) +{ + struct unf_disc_rport_mg_s *disc_mgr = NULL; + struct unf_disc_rport_s *disc_rport = NULL; + unsigned int i = 0; + unsigned int max_login = 0; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x662, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + max_login = v_lport->low_level_func.lport_cfg_items.max_login; + disc_mgr = &v_lport->disc.disc_rport_mgr; + + /* Alloc R_Port Disc Pool buffer (address) */ + disc_mgr->disc_pool_add = vmalloc(max_login * + sizeof(struct unf_disc_rport_s)); + if (!disc_mgr->disc_pool_add) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port(0x%x) allocate disc RPort pool failed", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + memset(disc_mgr->disc_pool_add, 0, + (max_login * sizeof(struct unf_disc_rport_s))); + + /* Add R_Port to (free) DISC R_Port Pool */ + spin_lock_irqsave(&v_lport->disc.rport_busy_pool_lock, flag); + disc_rport = (struct unf_disc_rport_s *)(disc_mgr->disc_pool_add); + for (i = 0; i < max_login; i++) { + /* Add tail to list_disc_Rport_pool */ + list_add_tail(&disc_rport->entry_rport, + &disc_mgr->list_disc_rports_pool); + + disc_rport++; + } + spin_unlock_irqrestore(&v_lport->disc.rport_busy_pool_lock, flag); + + return RETURN_OK; +} + +static void unf_free_disc_rport_pool(struct unf_lport_s *v_lport) +{ + struct unf_disc_s *disc = NULL; + + UNF_CHECK_VALID(0x663, UNF_TRUE, v_lport, return); + + disc = &v_lport->disc; + if (disc->disc_rport_mgr.disc_pool_add) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_INFO, + "[info]Port(0x%x) free disc RPort pool", + v_lport->port_id); + + vfree(disc->disc_rport_mgr.disc_pool_add); + disc->disc_rport_mgr.disc_pool_add = NULL; + } +} + +static int unf_discover_port_info(void *v_arg_in) +{ + struct unf_disc_gs_event_info *gs_info = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + + UNF_CHECK_VALID(0x2250, UNF_TRUE, v_arg_in, return UNF_RETURN_ERROR); + + gs_info = (struct unf_disc_gs_event_info *)v_arg_in; + lport = (struct unf_lport_s *)gs_info->lport; + rport = (struct unf_rport_s *)gs_info->rport; + + switch (gs_info->entype) { + case UNF_DISC_GET_PORT_NAME: + ret = unf_send_gpn_id(lport, rport, gs_info->rport_id); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) send GPN_ID failed RPort(0x%x)", + lport->nport_id, gs_info->rport_id); + unf_rcv_gpn_id_rsp_unknown(lport, gs_info->rport_id); + } + break; + case UNF_DISC_GET_FEATURE: + ret = unf_send_gff_id(lport, rport, gs_info->rport_id); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) send GFF_ID failed to get RPort(0x%x)'s feature", + lport->port_id, gs_info->rport_id); + + unf_rcv_gff_id_rsp_unknown(lport, gs_info->rport_id); + } + break; + case UNF_DISC_GET_NODE_NAME: + ret = unf_send_gnn_id(lport, rport, gs_info->rport_id); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) GNN_ID send failed with NPort ID(0x%x)", + lport->port_id, gs_info->rport_id); + + /* NOTE: Continue to next stage */ + unf_rcv_gnn_id_rsp_unknown(lport, rport, + gs_info->rport_id); + } + break; + default: + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[err]Send GS packet type(0x%x) is unknown", + gs_info->entype); + } + + kfree(gs_info); + + return (int)ret; +} + +unsigned int unf_get_and_post_disc_event(void *v_lport, + void *v_sns_port, + unsigned int v_nport_id, + enum unf_disc_type_e v_en_type) +{ + struct unf_disc_gs_event_info *gs_info = NULL; + unsigned long flag = 0; + struct unf_lport_s *root_lport = NULL; + struct unf_lport_s *lport = NULL; + struct unf_disc_manage_info_s *disc_info = NULL; + + UNF_CHECK_VALID(0x654, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x654, UNF_TRUE, v_sns_port, return UNF_RETURN_ERROR); + + lport = (struct unf_lport_s *)v_lport; + + if (lport->link_up == UNF_PORT_LINK_DOWN) + return RETURN_OK; + + root_lport = lport->root_lport; + disc_info = &root_lport->disc.disc_thread_info; + + if (disc_info->b_thread_exit == UNF_TRUE) + return RETURN_OK; + + gs_info = kmalloc(sizeof(struct unf_disc_gs_event_info), GFP_ATOMIC); + if (!gs_info) + return UNF_RETURN_ERROR; + + gs_info->entype = v_en_type; + gs_info->lport = v_lport; + gs_info->rport = v_sns_port; + gs_info->rport_id = v_nport_id; + + INIT_LIST_HEAD(&gs_info->list_entry); + + spin_lock_irqsave(&disc_info->disc_event_list_lock, flag); + list_add_tail(&gs_info->list_entry, &disc_info->list_head); + spin_unlock_irqrestore(&disc_info->disc_event_list_lock, flag); + wake_up_process(disc_info->data_thread); + return RETURN_OK; +} + +static int unf_disc_event_process(void *v_arg) +{ + struct list_head *node = NULL; + struct unf_disc_gs_event_info *gs_info = NULL; + unsigned long flags = 0; + struct unf_disc_s *disc = (struct unf_disc_s *)v_arg; + struct unf_disc_manage_info_s *disc_info = &disc->disc_thread_info; + + UNF_REFERNCE_VAR(v_arg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, + UNF_INFO, + "Port(0x%x) enter discovery thread.", + disc->lport->port_id); + + while (!kthread_should_stop()) { + if (disc_info->b_thread_exit == UNF_TRUE) + break; + + spin_lock_irqsave(&disc_info->disc_event_list_lock, flags); + if ((list_empty(&disc_info->list_head) == UNF_TRUE) || + (atomic_read(&disc_info->disc_contrl_size) == 0)) { + spin_unlock_irqrestore(&disc_info->disc_event_list_lock, + flags); + + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout((long)msecs_to_jiffies(1000)); + } else { + node = (&disc_info->list_head)->next; + list_del_init(node); + gs_info = list_entry(node, + struct unf_disc_gs_event_info, + list_entry); + spin_unlock_irqrestore(&disc_info->disc_event_list_lock, + flags); + unf_discover_port_info(gs_info); + } + } + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EVENT, + UNF_MAJOR, + "Port(0x%x) discovery thread over.", disc->lport->port_id); + + return RETURN_OK; +} + +void unf_flush_disc_event(void *v_disc, void *v_vport) +{ + struct unf_disc_s *disc = (struct unf_disc_s *)v_disc; + struct unf_disc_manage_info_s *disc_info = NULL; + struct list_head *list = NULL; + struct list_head *list_tmp = NULL; + struct unf_disc_gs_event_info *gs_info = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x2249, UNF_TRUE, v_disc, return); + + disc_info = &disc->disc_thread_info; + + spin_lock_irqsave(&disc_info->disc_event_list_lock, flag); + list_for_each_safe(list, list_tmp, &disc_info->list_head) { + gs_info = list_entry(list, struct unf_disc_gs_event_info, + list_entry); + + if (!v_vport || gs_info->lport == v_vport) { + list_del_init(&gs_info->list_entry); + kfree(gs_info); + } + } + + if (!v_vport) + atomic_set(&disc_info->disc_contrl_size, UNF_MAX_GS_SEND_NUM); + + spin_unlock_irqrestore(&disc_info->disc_event_list_lock, flag); +} + +void unf_disc_ctrl_size_inc(void *v_lport, unsigned int v_cmnd) +{ + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x2249, UNF_TRUE, v_lport, return); + lport = (struct unf_lport_s *)v_lport; + lport = lport->root_lport; + UNF_CHECK_VALID(0x2249, UNF_TRUE, lport, return); + + if (atomic_read(&lport->disc.disc_thread_info.disc_contrl_size) == + UNF_MAX_GS_SEND_NUM) + return; + + if (v_cmnd == NS_GPN_ID || v_cmnd == NS_GNN_ID || v_cmnd == NS_GFF_ID) + atomic_inc(&lport->disc.disc_thread_info.disc_contrl_size); +} + +static void unf_destroy_disc_thread(void *v_disc) +{ + struct unf_disc_manage_info_s *disc_info = NULL; + struct unf_disc_s *disc = (struct unf_disc_s *)v_disc; + + UNF_CHECK_VALID(0x2249, UNF_TRUE, disc, return); + + disc_info = &disc->disc_thread_info; + + disc_info->b_thread_exit = UNF_TRUE; + unf_flush_disc_event(disc, NULL); + + wake_up_process(disc_info->data_thread); + kthread_stop(disc_info->data_thread); + disc_info->data_thread = NULL; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Port(0x%x) destroy discovery thread succeed.", + disc->lport->port_id); +} + +static unsigned int unf_create_disc_thread(void *v_disc) +{ + struct unf_disc_manage_info_s *disc_info = NULL; + struct unf_disc_s *disc = (struct unf_disc_s *)v_disc; + + UNF_CHECK_VALID(0x2250, UNF_TRUE, disc, return UNF_RETURN_ERROR); + + /* If the thread cannot be found, apply for a new thread. */ + disc_info = &disc->disc_thread_info; + + memset(disc_info, 0, sizeof(struct unf_disc_manage_info_s)); + + INIT_LIST_HEAD(&disc_info->list_head); + spin_lock_init(&disc_info->disc_event_list_lock); + atomic_set(&disc_info->disc_contrl_size, UNF_MAX_GS_SEND_NUM); + + disc_info->b_thread_exit = UNF_FALSE; + disc_info->data_thread = + kthread_create(unf_disc_event_process, disc, + "%x_DiscT", disc->lport->port_id); + + if (IS_ERR(disc_info->data_thread) || !disc_info->data_thread) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) creat discovery thread(0x%p) unsuccessful.", + disc->lport->port_id, disc_info->data_thread); + + return UNF_RETURN_ERROR; + } + + wake_up_process(disc_info->data_thread); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "Port(0x%x) creat discovery thread succeed.", + disc->lport->port_id); + + return RETURN_OK; +} + +static void unf_disc_ref_cnt_dec(struct unf_disc_s *v_disc) +{ + unsigned long flags = 0; + + UNF_CHECK_VALID(0x669, UNF_TRUE, v_disc, return); + + spin_lock_irqsave(&v_disc->rport_busy_pool_lock, flags); + if (atomic_dec_and_test(&v_disc->disc_ref_cnt)) { + if (v_disc->disc_completion) + complete(v_disc->disc_completion); + } + spin_unlock_irqrestore(&v_disc->rport_busy_pool_lock, flags); +} + +static void unf_lport_disc_timeout(struct work_struct *v_work) +{ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_disc_s *disc = NULL; + enum unf_disc_state_e en_state = UNF_DISC_ST_END; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x675, UNF_TRUE, v_work, return); + + disc = container_of(v_work, struct unf_disc_s, disc_work.work); + if (!disc) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Get discover pointer failed"); + + return; + } + + lport = disc->lport; + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Find Port by discovery work failed"); + + unf_disc_ref_cnt_dec(disc); + return; + } + + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + en_state = disc->en_states; + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + /* 0xfffffc */ + rport = unf_get_rport_by_nport_id(lport, UNF_FC_FID_DIR_SERV); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) find fabric RPort failed", + lport->port_id); + + unf_disc_ref_cnt_dec(disc); + return; + } + + switch (en_state) { + case UNF_DISC_ST_START: + break; + + case UNF_DISC_ST_GIDPT_WAIT: + (void)unf_send_gid_pt(lport, rport); + break; + + case UNF_DISC_ST_GIDFT_WAIT: + (void)unf_send_gid_ft(lport, rport); + break; + + case UNF_DISC_ST_END: + break; + + default: + break; + } + + unf_disc_ref_cnt_dec(disc); +} + +unsigned int unf_init_disc_mgr(struct unf_lport_s *v_lport) +{ + struct unf_disc_s *disc = NULL; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x666, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + disc = &v_lport->disc; + disc->max_retry_count = UNF_DISC_RETRY_TIMES; + disc->retry_count = 0; + disc->disc_flag = UNF_DISC_NONE; + INIT_LIST_HEAD(&disc->list_busy_rports); /* busy RPort pool list */ + /* delete RPort pool list */ + INIT_LIST_HEAD(&disc->list_delete_rports); + /* destroy RPort pool list */ + INIT_LIST_HEAD(&disc->list_destroy_rports); + spin_lock_init(&disc->rport_busy_pool_lock); + + disc->disc_rport_mgr.disc_pool_add = NULL; + /* free disc RPort pool */ + INIT_LIST_HEAD(&disc->disc_rport_mgr.list_disc_rports_pool); + /* busy disc RPort pool */ + INIT_LIST_HEAD(&disc->disc_rport_mgr.list_disc_rport_busy); + + disc->disc_completion = NULL; + disc->lport = v_lport; + INIT_DELAYED_WORK(&disc->disc_work, unf_lport_disc_timeout); + disc->unf_disc_temp.pfn_unf_disc_start = unf_disc_start; + disc->unf_disc_temp.pfn_unf_disc_stop = unf_disc_stop; + disc->unf_disc_temp.pfn_unf_disc_callback = unf_disc_callback; + atomic_set(&disc->disc_ref_cnt, 0); + + /* Init RSCN Manager */ + ret = unf_init_rscn_mgr(v_lport); + if (ret != RETURN_OK) + return UNF_RETURN_ERROR; + + if (v_lport != v_lport->root_lport) + return ret; + + ret = unf_create_disc_thread(disc); + if (ret != RETURN_OK) { + unf_destroy_rscn_mgr(v_lport); + + return UNF_RETURN_ERROR; + } + + /* Init R_Port free Pool */ + ret = unf_init_rport_pool(v_lport); + if (ret != RETURN_OK) { + unf_destroy_disc_thread(disc); + unf_destroy_rscn_mgr(v_lport); + + return UNF_RETURN_ERROR; + } + + /* Init R_Port free disc Pool */ + ret = unf_init_disc_rport_pool(v_lport); + if (ret != RETURN_OK) { + unf_destroy_disc_thread(disc); + unf_free_rport_pool(v_lport); + unf_destroy_rscn_mgr(v_lport); + + return UNF_RETURN_ERROR; + } + + return ret; +} + +static void unf_wait_disc_complete(struct unf_lport_s *v_lport) +{ + struct unf_disc_s *disc = NULL; + int wait = UNF_FALSE; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + unsigned long long time_out = 0; + + struct completion disc_completion = + COMPLETION_INITIALIZER(disc_completion); + + disc = &v_lport->disc; + + UNF_DELAYED_WORK_SYNC(ret, v_lport->port_id, &disc->disc_work, + "Disc_work"); + if (ret == RETURN_OK) + unf_disc_ref_cnt_dec(disc); + + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + if (atomic_read(&disc->disc_ref_cnt) != 0) { + disc->disc_completion = &disc_completion; + wait = UNF_TRUE; + } + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + if (wait == UNF_TRUE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) begin to wait for discover completion(0x%lx)", + v_lport->port_id, jiffies); + + time_out = wait_for_completion_timeout( + disc->disc_completion, + msecs_to_jiffies(UNF_OS_REMOVE_CARD_TIMEOUT)); + if (time_out == 0) + unf_cmmark_dirty_mem(v_lport, + UNF_LPORT_DIRTY_FLAG_DISC_DIRTY); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) wait for discover completion end(0x%lx)", + v_lport->port_id, jiffies); + + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + disc->disc_completion = NULL; + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + } +} + +void unf_disc_mgr_destroy(void *v_lport) +{ + struct unf_disc_s *disc = NULL; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x672, UNF_TRUE, v_lport, return); + lport = (struct unf_lport_s *)v_lport; + + disc = &lport->disc; + disc->retry_count = 0; + disc->unf_disc_temp.pfn_unf_disc_start = NULL; + disc->unf_disc_temp.pfn_unf_disc_stop = NULL; + disc->unf_disc_temp.pfn_unf_disc_callback = NULL; + + unf_free_disc_rport_pool(lport); + unf_destroy_rscn_mgr(lport); + unf_wait_disc_complete(lport); + + if (lport != lport->root_lport) + return; + + unf_destroy_disc_thread(disc); + unf_free_rport_pool(lport); + lport->destroy_step = UNF_LPORT_DESTROY_STEP_6_DESTROY_DISC_MGR; +} + +void unf_disc_error_recovery(void *v_lport) +{ + struct unf_rport_s *rport = NULL; + struct unf_disc_s *disc = NULL; + unsigned long delay = 0; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x673, UNF_TRUE, v_lport, return); + + lport = (struct unf_lport_s *)v_lport; + disc = &lport->disc; + + rport = unf_get_rport_by_nport_id(lport, UNF_FC_FID_DIR_SERV); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) find RPort failed", + lport->port_id); + return; + } + + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + + /* Delay work is pending */ + if (delayed_work_pending(&disc->disc_work)) { + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) disc_work is running and do nothing", + lport->port_id); + return; + } + + /* Continue to retry */ + if (disc->retry_count < disc->max_retry_count) { + disc->retry_count++; + delay = (unsigned long)lport->ed_tov; + + if (queue_delayed_work(unf_work_queue, &disc->disc_work, + (unsigned long)msecs_to_jiffies( + (unsigned int)delay))) { + atomic_inc(&disc->disc_ref_cnt); + } + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + } else { + /* Go to next stage */ + if (disc->en_states == UNF_DISC_ST_GIDPT_WAIT) { + /* GID_PT_WAIT --->>> Send GID_FT */ + unf_disc_state_ma(lport, UNF_EVENT_DISC_RETRY_TIMEOUT); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, + flag); + + while ((ret != RETURN_OK) && + (disc->retry_count < disc->max_retry_count)) { + ret = unf_send_gid_ft(lport, rport); + disc->retry_count++; + } + } else if (disc->en_states == UNF_DISC_ST_GIDFT_WAIT) { + /* GID_FT_WAIT --->>> Send LOGO */ + unf_disc_state_ma(lport, UNF_EVENT_DISC_RETRY_TIMEOUT); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, + flag); + } else { + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, + flag); + } + } +} + +enum unf_disc_state_e unf_disc_stat_start(enum unf_disc_state_e v_old_state, + enum unf_disc_event_e v_en_event) +{ + enum unf_disc_state_e en_next_state = UNF_DISC_ST_END; + + if (v_en_event == UNF_EVENT_DISC_NORMAL_ENTER) + en_next_state = UNF_DISC_ST_GIDPT_WAIT; + else + en_next_state = v_old_state; + + return en_next_state; +} + +enum unf_disc_state_e unf_disc_stat_gid_pt_wait( + enum unf_disc_state_e v_old_state, + enum unf_disc_event_e v_en_event) +{ + enum unf_disc_state_e en_next_state = UNF_DISC_ST_END; + + switch (v_en_event) { + case UNF_EVENT_DISC_FAILED: + en_next_state = UNF_DISC_ST_GIDPT_WAIT; + break; + + case UNF_EVENT_DISC_RETRY_TIMEOUT: + en_next_state = UNF_DISC_ST_GIDFT_WAIT; + break; + + case UNF_EVENT_DISC_SUCCESS: + en_next_state = UNF_DISC_ST_END; + break; + + case UNF_EVENT_DISC_LINKDOWN: + en_next_state = UNF_DISC_ST_START; + break; + + default: + en_next_state = v_old_state; + break; + } + + return en_next_state; +} + +enum unf_disc_state_e unf_disc_stat_gid_ft_wait( + enum unf_disc_state_e v_old_state, + enum unf_disc_event_e v_en_event) +{ + enum unf_disc_state_e en_next_state = UNF_DISC_ST_END; + + switch (v_en_event) { + case UNF_EVENT_DISC_FAILED: + en_next_state = UNF_DISC_ST_GIDFT_WAIT; + break; + + case UNF_EVENT_DISC_RETRY_TIMEOUT: + en_next_state = UNF_DISC_ST_END; + break; + + case UNF_EVENT_DISC_SUCCESS: + en_next_state = UNF_DISC_ST_END; + break; + + case UNF_EVENT_DISC_LINKDOWN: + en_next_state = UNF_DISC_ST_START; + break; + + default: + en_next_state = v_old_state; + break; + } + + return en_next_state; +} + +enum unf_disc_state_e unf_disc_stat_end(enum unf_disc_state_e v_old_state, + enum unf_disc_event_e v_en_event) +{ + enum unf_disc_state_e en_next_state = UNF_DISC_ST_END; + + if (v_en_event == UNF_EVENT_DISC_LINKDOWN) + en_next_state = UNF_DISC_ST_START; + else + en_next_state = v_old_state; + + return en_next_state; +} + +void unf_disc_state_ma(struct unf_lport_s *v_lport, + enum unf_disc_event_e v_en_event) +{ + struct unf_disc_s *disc = NULL; + enum unf_disc_state_e en_old_state = UNF_DISC_ST_START; + enum unf_disc_state_e en_next_state = UNF_DISC_ST_START; + + UNF_CHECK_VALID(0x674, UNF_TRUE, v_lport, return); + + disc = &v_lport->disc; + en_old_state = disc->en_states; + + switch (disc->en_states) { + case UNF_DISC_ST_START: + en_next_state = unf_disc_stat_start(en_old_state, v_en_event); + break; + + case UNF_DISC_ST_GIDPT_WAIT: + en_next_state = unf_disc_stat_gid_pt_wait(en_old_state, + v_en_event); + break; + + case UNF_DISC_ST_GIDFT_WAIT: + en_next_state = unf_disc_stat_gid_ft_wait(en_old_state, + v_en_event); + break; + + case UNF_DISC_ST_END: + en_next_state = unf_disc_stat_end(en_old_state, v_en_event); + break; + + default: + en_next_state = en_old_state; + break; + } + + unf_set_disc_state(disc, en_next_state); +} diff --git a/drivers/scsi/huawei/hifc/unf_disc.h b/drivers/scsi/huawei/hifc/unf_disc.h new file mode 100644 index 0000000000000000000000000000000000000000..45fc3e01197496e89843205fab51a700e325f84d --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_disc.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#ifndef __UNF_DISC_H__ +#define __UNF_DISC_H__ + +#define UNF_DISC_RETRY_TIMES 3 +#define UNF_DISC_NONE 0 +#define UNF_DISC_FABRIC 1 +#define UNF_DISC_LOOP 2 + +enum unf_disc_state_e { + UNF_DISC_ST_START = 0x3000, + UNF_DISC_ST_GIDPT_WAIT, + UNF_DISC_ST_GIDFT_WAIT, + UNF_DISC_ST_END +}; + +enum unf_disc_event_e { + UNF_EVENT_DISC_NORMAL_ENTER = 0x8000, + UNF_EVENT_DISC_FAILED = 0x8001, + UNF_EVENT_DISC_SUCCESS = 0x8002, + UNF_EVENT_DISC_RETRY_TIMEOUT = 0x8003, + UNF_EVENT_DISC_LINKDOWN = 0x8004 +}; + +enum unf_disc_type_e { + UNF_DISC_GET_PORT_NAME = 0, + UNF_DISC_GET_NODE_NAME, + UNF_DISC_GET_FEATURE +}; + +struct unf_disc_gs_event_info { + void *lport; + void *rport; + unsigned int rport_id; + enum unf_disc_type_e entype; + struct list_head list_entry; +}; + +unsigned int unf_get_and_post_disc_event(void *v_lport, + void *v_sns_port, + unsigned int v_nport_id, + enum unf_disc_type_e v_en_type); + +void unf_flush_disc_event(void *v_disc, void *v_vport); +void unf_disc_error_recovery(void *v_lport); +void unf_disc_mgr_destroy(void *v_lport); +void unf_disc_ctrl_size_inc(void *v_lport, unsigned int v_cmnd); + +#endif diff --git a/drivers/scsi/huawei/hifc/unf_event.c b/drivers/scsi/huawei/hifc/unf_event.c new file mode 100644 index 0000000000000000000000000000000000000000..205824b633a41d65b0b147cb5a0d2c49a94a91f2 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_event.c @@ -0,0 +1,557 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#include "unf_log.h" +#include "unf_common.h" +#include "unf_event.h" +#include "unf_lport.h" + +struct unf_event_list fc_event_list; +struct unf_global_event_queue global_event_queue; + +/* Max global event node */ +#define UNF_MAX_GLOBAL_ENENT_NODE 24 + +unsigned int unf_init_event_msg(struct unf_lport_s *v_lport) +{ + struct unf_event_mgr *event_mgr = NULL; + struct unf_cm_event_report *event_node = NULL; + unsigned int i; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x770, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + event_mgr = &v_lport->event_mgr; + + /* Get and Initial Event Node resource */ + event_mgr->pmem_add = + vmalloc((size_t)event_mgr->free_event_count * + sizeof(struct unf_cm_event_report)); + if (!event_mgr->pmem_add) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port(0x%x) allocate event manager failed", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + memset(event_mgr->pmem_add, 0, + ((size_t)event_mgr->free_event_count * + sizeof(struct unf_cm_event_report))); + + event_node = (struct unf_cm_event_report *)(event_mgr->pmem_add); + + spin_lock_irqsave(&event_mgr->port_event_lock, flags); + for (i = 0; i < event_mgr->free_event_count; i++) { + INIT_LIST_HEAD(&event_node->list_entry); + list_add_tail(&event_node->list_entry, + &event_mgr->list_free_event); + event_node++; + } + spin_unlock_irqrestore(&event_mgr->port_event_lock, flags); + + return RETURN_OK; +} + +static void unf_del_eventcenter(struct unf_lport_s *v_lport) +{ + struct unf_event_mgr *event_mgr = NULL; + + UNF_CHECK_VALID(0x771, UNF_TRUE, v_lport, return); + + event_mgr = &v_lport->event_mgr; + event_mgr->pfn_unf_get_free_event = NULL; + event_mgr->pfn_unf_release_event = NULL; + event_mgr->pfn_unf_post_event = NULL; +} + +void unf_init_event_node(struct unf_cm_event_report *v_event_node) +{ + UNF_CHECK_VALID(0x776, UNF_TRUE, v_event_node, return); + + v_event_node->event = UNF_EVENT_TYPE_REQUIRE; + v_event_node->event_asy_flag = UNF_EVENT_ASYN; + v_event_node->delay_times = 0; + v_event_node->para_in = NULL; + v_event_node->para_out = NULL; + v_event_node->result = 0; + v_event_node->lport = NULL; + v_event_node->pfn_unf_event_task = NULL; + v_event_node->pfn_unf_event_recovery_strategy = NULL; + v_event_node->pfn_unf_event_alarm_strategy = NULL; +} + +struct unf_cm_event_report *unf_get_free_event_node(void *v_lport) +{ + struct unf_event_mgr *event_mgr = NULL; + struct unf_cm_event_report *event_node = NULL; + struct list_head *list_node = NULL; + struct unf_lport_s *root_lport = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x777, UNF_TRUE, v_lport, return NULL); + root_lport = (struct unf_lport_s *)v_lport; + root_lport = root_lport->root_lport; + + if (unlikely(atomic_read(&root_lport->port_no_operater_flag) == + UNF_LPORT_NOP)) + return NULL; + + /* Get EventMgr from Lport */ + event_mgr = &root_lport->event_mgr; + + /* Get free node free pool */ + spin_lock_irqsave(&event_mgr->port_event_lock, flags); + if (list_empty(&event_mgr->list_free_event)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port(0x%x) have no event node anymore", + root_lport->port_id); + + spin_unlock_irqrestore(&event_mgr->port_event_lock, flags); + return NULL; + } + + list_node = (&event_mgr->list_free_event)->next; + list_del(list_node); + event_mgr->free_event_count--; + event_node = list_entry(list_node, struct unf_cm_event_report, + list_entry); + + /* Initial event node */ + unf_init_event_node(event_node); + spin_unlock_irqrestore(&event_mgr->port_event_lock, flags); + + return event_node; +} + +void unf_check_event_mgr_status(struct unf_event_mgr *v_event_mgr) +{ + unsigned long flag = 0; + + UNF_CHECK_VALID(0x773, UNF_TRUE, v_event_mgr, return); + + spin_lock_irqsave(&v_event_mgr->port_event_lock, flag); + if ((v_event_mgr->emg_completion) && + (v_event_mgr->free_event_count == UNF_MAX_EVENT_NODE)) { + complete(v_event_mgr->emg_completion); + } + spin_unlock_irqrestore(&v_event_mgr->port_event_lock, flag); +} + +void unf_release_event(void *v_lport, void *v_event_node) +{ + struct unf_event_mgr *event_mgr = NULL; + struct unf_lport_s *root_lport = NULL; + struct unf_cm_event_report *event_node = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x778, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x779, UNF_TRUE, v_event_node, return); + + event_node = (struct unf_cm_event_report *)v_event_node; + root_lport = (struct unf_lport_s *)v_lport; + root_lport = root_lport->root_lport; + event_mgr = &root_lport->event_mgr; + + spin_lock_irqsave(&event_mgr->port_event_lock, flags); + event_mgr->free_event_count++; + unf_init_event_node(event_node); + list_add_tail(&event_node->list_entry, &event_mgr->list_free_event); + spin_unlock_irqrestore(&event_mgr->port_event_lock, flags); + + unf_check_event_mgr_status(event_mgr); +} + +void unf_post_event(void *v_lport, void *v_event_node) +{ + struct unf_cm_event_report *event_node = NULL; + struct unf_chip_manage_info_s *card_thread_info = NULL; + struct unf_lport_s *root_lport = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x780, UNF_TRUE, v_event_node, return); + event_node = (struct unf_cm_event_report *)v_event_node; + UNF_REFERNCE_VAR(v_lport); + + /* If null, post to global event center */ + if (!v_lport) { + spin_lock_irqsave(&fc_event_list.fc_eventlist_lock, flags); + fc_event_list.list_num++; + list_add_tail(&event_node->list_entry, + &fc_event_list.list_head); + spin_unlock_irqrestore(&fc_event_list.fc_eventlist_lock, + flags); + + wake_up_process(event_thread); + } else { + root_lport = (struct unf_lport_s *)v_lport; + root_lport = root_lport->root_lport; + card_thread_info = root_lport->chip_info; + + /* Post to global event center */ + if (!card_thread_info) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EVENT, + UNF_WARN, + "[warn]Port(0x%x) has strange event with type(0x%x)", + root_lport->nport_id, event_node->event); + + spin_lock_irqsave(&fc_event_list.fc_eventlist_lock, + flags); + fc_event_list.list_num++; + list_add_tail(&event_node->list_entry, + &fc_event_list.list_head); + spin_unlock_irqrestore( + &fc_event_list.fc_eventlist_lock, + flags); + + wake_up_process(event_thread); + } else { + spin_lock_irqsave( + &card_thread_info->chip_event_list_lock, + flags); + card_thread_info->list_num++; + list_add_tail(&event_node->list_entry, + &card_thread_info->list_head); + spin_unlock_irqrestore( + &card_thread_info->chip_event_list_lock, + flags); + + wake_up_process(card_thread_info->data_thread); + } + } +} + +unsigned int unf_init_event_center(void *v_lport) +{ + struct unf_event_mgr *event_mgr = NULL; + unsigned int ret = RETURN_OK; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x772, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + lport = (struct unf_lport_s *)v_lport; + + /* Initial Disc manager */ + event_mgr = &lport->event_mgr; + event_mgr->free_event_count = UNF_MAX_EVENT_NODE; + event_mgr->pfn_unf_get_free_event = unf_get_free_event_node; + event_mgr->pfn_unf_release_event = unf_release_event; + event_mgr->pfn_unf_post_event = unf_post_event; + + INIT_LIST_HEAD(&event_mgr->list_free_event); + spin_lock_init(&event_mgr->port_event_lock); + event_mgr->emg_completion = NULL; + + ret = unf_init_event_msg(lport); + return ret; +} + +void unf_wait_event_mgr_complete(struct unf_event_mgr *v_event_mgr) +{ + struct unf_event_mgr *event_mgr = NULL; + int wait = UNF_FALSE; + unsigned long mg_flag = 0; + + struct completion fc_event_completion = + COMPLETION_INITIALIZER(fc_event_completion); + + UNF_CHECK_VALID(0x774, UNF_TRUE, v_event_mgr, return); + event_mgr = v_event_mgr; + + spin_lock_irqsave(&event_mgr->port_event_lock, mg_flag); + if (event_mgr->free_event_count != UNF_MAX_EVENT_NODE) { + event_mgr->emg_completion = &fc_event_completion; + wait = UNF_TRUE; + } + spin_unlock_irqrestore(&event_mgr->port_event_lock, mg_flag); + + if (wait == UNF_TRUE) + wait_for_completion(event_mgr->emg_completion); + + spin_lock_irqsave(&event_mgr->port_event_lock, mg_flag); + event_mgr->emg_completion = NULL; + spin_unlock_irqrestore(&event_mgr->port_event_lock, mg_flag); +} + +unsigned int unf_event_center_destroy(void *v_lport) +{ + struct unf_event_mgr *event_mgr = NULL; + struct list_head *list = NULL; + struct list_head *list_tmp = NULL; + struct unf_cm_event_report *event_node = NULL; + unsigned int ret = RETURN_OK; + unsigned long flag = 0; + unsigned long list_lock_flag = 0; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x775, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + lport = (struct unf_lport_s *)v_lport; + event_mgr = &lport->event_mgr; + + spin_lock_irqsave(&fc_event_list.fc_eventlist_lock, list_lock_flag); + if (!list_empty(&fc_event_list.list_head)) { + list_for_each_safe(list, list_tmp, &fc_event_list.list_head) { + event_node = list_entry(list, + struct unf_cm_event_report, + list_entry); + if (lport == event_node->lport) { + list_del_init(&event_node->list_entry); + if (event_node->event_asy_flag == + UNF_EVENT_SYN) { + event_node->result = UNF_RETURN_ERROR; + complete(&event_node->event_comp); + } + + spin_lock_irqsave(&event_mgr->port_event_lock, + flag); + event_mgr->free_event_count++; + list_add_tail(&event_node->list_entry, + &event_mgr->list_free_event); + spin_unlock_irqrestore( + &event_mgr->port_event_lock, flag); + } + } + } + spin_unlock_irqrestore(&fc_event_list.fc_eventlist_lock, + list_lock_flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) begin to wait event", lport->port_id); + unf_wait_event_mgr_complete(event_mgr); + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) wait event process end", lport->port_id); + + unf_del_eventcenter(lport); + vfree(event_mgr->pmem_add); + event_mgr->pmem_add = NULL; + lport->destroy_step = UNF_LPORT_DESTROY_STEP_3_DESTROY_EVENT_CENTER; + + return ret; +} + +static void unf_procee_asyn_event(struct unf_cm_event_report *v_event_node) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct unf_lport_s *lport = (struct unf_lport_s *)v_event_node->lport; + + UNF_CHECK_VALID(0x782, UNF_TRUE, lport, return); + if (v_event_node->pfn_unf_event_task) + ret = (unsigned int) + v_event_node->pfn_unf_event_task(v_event_node->para_in, + v_event_node->para_out); + + if (lport->event_mgr.pfn_unf_release_event) + lport->event_mgr.pfn_unf_release_event(lport, v_event_node); + + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EVENT, UNF_WARN, + "[warn]Port(0x%x) handle event(0x%x) failed", + lport->port_id, v_event_node->event); + } + + UNF_REFERNCE_VAR(ret); +} + +void unf_release_global_event(void *v_event_node) +{ + unsigned long flag = 0; + struct unf_cm_event_report *event_node = NULL; + + UNF_CHECK_VALID(0x784, UNF_TRUE, v_event_node, return); + event_node = (struct unf_cm_event_report *)v_event_node; + unf_init_event_node(event_node); + + spin_lock_irqsave(&global_event_queue.global_eventlist_lock, flag); + global_event_queue.list_number++; + list_add_tail(&event_node->list_entry, + &global_event_queue.global_eventlist); + spin_unlock_irqrestore(&global_event_queue.global_eventlist_lock, + flag); +} + +void unf_handle_event(struct unf_cm_event_report *v_event_node) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int event = 0; + unsigned int event_asy_flag = UNF_EVENT_ASYN; + + UNF_CHECK_VALID(0x781, UNF_TRUE, v_event_node, return); + UNF_REFERNCE_VAR(ret); + UNF_REFERNCE_VAR(event); + + event = v_event_node->event; + event_asy_flag = v_event_node->event_asy_flag; + + switch (event_asy_flag) { + case UNF_EVENT_SYN: /* synchronous event node */ + case UNF_GLOBAL_EVENT_SYN: + if (v_event_node->pfn_unf_event_task) { + ret = (unsigned int)v_event_node->pfn_unf_event_task( + v_event_node->para_in, + v_event_node->para_out); + } + v_event_node->result = ret; + complete(&v_event_node->event_comp); + break; + case UNF_EVENT_ASYN: /* asynchronous event node */ + unf_procee_asyn_event(v_event_node); + break; + case UNF_GLOBAL_EVENT_ASYN: + if (v_event_node->pfn_unf_event_task) { + ret = (unsigned int)v_event_node->pfn_unf_event_task( + v_event_node->para_in, + v_event_node->para_out); + } + unf_release_global_event(v_event_node); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_EVENT, UNF_WARN, + "[warn]handle global event(0x%x) failed", + event); + } + break; + default: + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EVENT, UNF_WARN, + "[warn]Unknown event(0x%x)", event); + break; + } +} + +unsigned int unf_init_global_event_msg(void) +{ + struct unf_cm_event_report *event_node = NULL; + unsigned int ret = RETURN_OK; + unsigned int i = 0; + unsigned long flag = 0; + + INIT_LIST_HEAD(&global_event_queue.global_eventlist); + spin_lock_init(&global_event_queue.global_eventlist_lock); + global_event_queue.list_number = 0; + + global_event_queue.global_event_add = + vmalloc(UNF_MAX_GLOBAL_ENENT_NODE * + sizeof(struct unf_cm_event_report)); + if (!global_event_queue.global_event_add) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Can't allocate global event queue"); + + return UNF_RETURN_ERROR; + } + memset(global_event_queue.global_event_add, 0, + (sizeof(struct unf_cm_event_report) * + UNF_MAX_GLOBAL_ENENT_NODE)); + event_node = (struct unf_cm_event_report *) + (global_event_queue.global_event_add); + + spin_lock_irqsave(&global_event_queue.global_eventlist_lock, flag); + for (i = 0; i < UNF_MAX_GLOBAL_ENENT_NODE; i++) { + INIT_LIST_HEAD(&event_node->list_entry); + list_add_tail(&event_node->list_entry, + &global_event_queue.global_eventlist); + global_event_queue.list_number++; + event_node++; + } + spin_unlock_irqrestore(&global_event_queue.global_eventlist_lock, + flag); + + return ret; +} + +void unf_destroy_global_event_msg(void) +{ + if (global_event_queue.list_number != UNF_MAX_GLOBAL_ENENT_NODE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EVENT, UNF_CRITICAL, + "[warn]Global event release not complete with remain nodes(0x%x)", + global_event_queue.list_number); + } + + vfree(global_event_queue.global_event_add); +} + +unsigned int unf_schedule_global_event( + void *v_para, + unsigned int v_event_asy_flag, + int (*pfn_unf_event_task)(void *v_argin, void *v_argout)) +{ + struct list_head *list_node = NULL; + struct unf_cm_event_report *event_node = NULL; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x783, UNF_TRUE, pfn_unf_event_task, + return UNF_RETURN_ERROR); + + if ((v_event_asy_flag != UNF_GLOBAL_EVENT_ASYN) && + (v_event_asy_flag != UNF_GLOBAL_EVENT_SYN)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Event async flag(0x%x) abnormity", + v_event_asy_flag); + + return UNF_RETURN_ERROR; + } + + spin_lock_irqsave(&global_event_queue.global_eventlist_lock, flag); + if (list_empty(&global_event_queue.global_eventlist)) { + spin_unlock_irqrestore( + &global_event_queue.global_eventlist_lock, flag); + + return UNF_RETURN_ERROR; + } + + list_node = (&global_event_queue.global_eventlist)->next; + list_del_init(list_node); + global_event_queue.list_number--; + event_node = list_entry(list_node, struct unf_cm_event_report, + list_entry); + spin_unlock_irqrestore(&global_event_queue.global_eventlist_lock, + flag); + + /* Initial global event */ + unf_init_event_node(event_node); + init_completion(&event_node->event_comp); + event_node->event_asy_flag = v_event_asy_flag; + event_node->pfn_unf_event_task = pfn_unf_event_task; + event_node->para_in = (void *)v_para; + event_node->para_out = NULL; + + unf_post_event(NULL, event_node); + + if (v_event_asy_flag == UNF_GLOBAL_EVENT_SYN) { + /* must wait for complete */ + wait_for_completion(&event_node->event_comp); + ret = event_node->result; + unf_release_global_event(event_node); + } else { + ret = RETURN_OK; + } + + return ret; +} + +struct unf_cm_event_report *unf_get_one_event_node(void *v_lport) +{ + struct unf_lport_s *lport = (struct unf_lport_s *)v_lport; + + UNF_CHECK_VALID(0x785, UNF_TRUE, v_lport, return NULL); + UNF_CHECK_VALID(0x786, UNF_TRUE, + lport->event_mgr.pfn_unf_get_free_event, + return NULL); + + return lport->event_mgr.pfn_unf_get_free_event((void *)lport); +} + +void unf_post_one_event_node(void *v_lport, + struct unf_cm_event_report *v_event) +{ + struct unf_lport_s *lport = (struct unf_lport_s *)v_lport; + + UNF_CHECK_VALID(0x787, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x788, UNF_TRUE, v_event, return); + + UNF_CHECK_VALID(0x789, UNF_TRUE, lport->event_mgr.pfn_unf_post_event, + return); + UNF_CHECK_VALID(0x790, UNF_TRUE, v_event, return); + + lport->event_mgr.pfn_unf_post_event((void *)lport, v_event); +} + diff --git a/drivers/scsi/huawei/hifc/unf_event.h b/drivers/scsi/huawei/hifc/unf_event.h new file mode 100644 index 0000000000000000000000000000000000000000..4f78d1c538b8771a914fe3cda04ca83d6e6b86bf --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_event.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#ifndef __UNF_EVENT_H__ +#define __UNF_EVENT_H__ +#include "hifc_knl_adp.h" + +enum unf_poll_flag { + UNF_POLL_CHIPERROR_FLAG = 0, /* CHIP ERROR POLL */ + UNF_POLL_ERROR_CODE, /* CODE ERROR POLL */ + UNF_POLL_SFP_FLAG, /* SFP POLL */ + UNF_POLL_BUTT +}; + +#define UNF_MAX_EVENT_NODE 256 + +enum unf_event_type { + UNF_EVENT_TYPE_ALARM = 0, /* Alarm */ + UNF_EVENT_TYPE_REQUIRE, /* Require */ + UNF_EVENT_TYPE_RECOVERY, /* Recovery */ + UNF_EVENT_TYPE_BUTT +}; + +struct unf_cm_event_report { + /* event type */ + unsigned int event; + + /* ASY flag */ + unsigned int event_asy_flag; + + /* Delay times,must be async event */ + unsigned int delay_times; + + struct list_head list_entry; + + void *lport; + + /* parameter */ + void *para_in; + void *para_out; + unsigned int result; + + /* recovery strategy */ + int (*pfn_unf_event_task)(void *v_argin, void *v_argout); + + /* recovery strategy */ + int (*pfn_unf_event_recovery_strategy)(void *); + + /* alarm strategy */ + int (*pfn_unf_event_alarm_strategy)(void *); + + struct completion event_comp; +}; + +struct unf_event_mgr { + spinlock_t port_event_lock; + unsigned int free_event_count; + + struct list_head list_free_event; + + struct completion *emg_completion; + + void *pmem_add; + struct unf_cm_event_report *(*pfn_unf_get_free_event)(void *v_lport); + void (*pfn_unf_release_event)(void *v_lport, void *v_event_node); + void (*pfn_unf_post_event)(void *v_lport, void *v_event_node); +}; + +struct unf_global_event_queue { + void *global_event_add; + unsigned int list_number; + struct list_head global_eventlist; + spinlock_t global_eventlist_lock; +}; + +struct unf_event_list { + struct list_head list_head; + spinlock_t fc_eventlist_lock; + unsigned int list_num; /* list node number */ +}; + +void unf_handle_event(struct unf_cm_event_report *v_event_node); +unsigned int unf_init_global_event_msg(void); +void unf_destroy_global_event_msg(void); +unsigned int unf_schedule_global_event( + void *v_para, + unsigned int v_event_asy_flag, + int (*pfn_unf_event_task)(void *v_argin, void *v_argout)); + +struct unf_cm_event_report *unf_get_one_event_node(void *v_lport); +void unf_post_one_event_node(void *v_lport, + struct unf_cm_event_report *v_event); +unsigned int unf_event_center_destroy(void *v_lport); +unsigned int unf_init_event_center(void *v_lport); + +extern struct task_struct *event_thread; +extern struct unf_global_event_queue global_event_queue; +extern struct unf_event_list fc_event_list; +#endif diff --git a/drivers/scsi/huawei/hifc/unf_exchg.c b/drivers/scsi/huawei/hifc/unf_exchg.c new file mode 100644 index 0000000000000000000000000000000000000000..f3234a9edc2261decdde14e73e9531f175abdc08 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_exchg.c @@ -0,0 +1,3632 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#include "unf_log.h" +#include "unf_common.h" +#include "unf_exchg.h" +#include "unf_rport.h" +#include "unf_service.h" +#include "unf_io.h" + +#define UNF_DEL_XCHG_TIMER_SAFE(v_xchg) \ + do { \ + if (cancel_delayed_work(&((v_xchg)->timeout_work))) { \ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_IO_ATT, \ + UNF_MAJOR, \ + "Exchange(0x%p) is free, but timer is pending.", \ + v_xchg); \ + } else { \ + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_IO_ATT, \ + UNF_CRITICAL, \ + "Exchange(0x%p) is free, but timer is running.", \ + v_xchg); \ + } \ + } while (0) + +#define UNF_XCHG_IS_ELS_REPLY(v_xchg) \ + ((((v_xchg)->cmnd_code & 0x0ffff) == ELS_ACC) || \ + (((v_xchg)->cmnd_code & 0x0ffff) == ELS_RJT)) + +static struct unf_ioflow_id_s io_stage[] = { + { "XCHG_ALLOC" }, + { "TGT_RECEIVE_ABTS" }, + { "TGT_ABTS_DONE" }, + { "TGT_IO_SRR" }, + { "SFS_RESPONSE" }, + { "SFS_TIMEOUT" }, + { "INI_SEND_CMND" }, + { "INI_RESPONSE_DONE" }, + { "INI_EH_ABORT" }, + { "INI_EH_DEVICE_RESET" }, + { "INI_EH_BLS_DONE" }, + { "INI_IO_TIMEOUT" }, + { "INI_REQ_TIMEOUT" }, + { "XCHG_CANCEL_TIMER" }, + { "XCHG_FREE_XCHG" }, + { "SEND_ELS" }, + { "IO_XCHG_WAIT" }, +}; + +void unf_wakeup_scsi_task_cmnd(struct unf_lport_s *v_lport) +{ + struct list_head *node = NULL; + struct list_head *next_node = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned long hot_pool_lock_flags = 0; + unsigned long xchg_flag = 0; + struct unf_xchg_mgr_s *xchg_mgr = NULL; + unsigned int i = 0; + + UNF_CHECK_VALID(0x850, UNF_TRUE, v_lport, return); + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + xchg_mgr = unf_get_xchg_mgr_by_lport(v_lport, i); + + if (!xchg_mgr) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_EVENT, UNF_MINOR, + "Can't find LPort(0x%x) MgrIdx %u exchange manager.", + v_lport->port_id, i); + continue; + } + + spin_lock_irqsave(&xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); + list_for_each_safe(node, next_node, + &xchg_mgr->hot_pool->ini_busylist) { + xchg = list_entry(node, struct unf_xchg_s, + list_xchg_entry); + + spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flag); + if (INI_IO_STATE_UPTASK & xchg->io_state && + (atomic_read(&xchg->ref_cnt) > 0)) { + UNF_SET_SCSI_CMND_RESULT(xchg, UNF_IO_SUCCESS); + up(&xchg->task_sema); + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_EVENT, UNF_MINOR, + "Wake up task command exchange(0x%p), Hot Pool Tag(0x%x).", + xchg, xchg->hot_pool_tag); + } + spin_unlock_irqrestore(&xchg->xchg_state_lock, + xchg_flag); + } + + spin_unlock_irqrestore(&xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); + } +} + +void unf_cm_xchg_mgr_abort_io_by_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int v_sid, unsigned int v_did, + unsigned int v_extra_io_state) +{ + /* + * for target session: set ABORT + * 1. R_Port remove + * 2. Send PLOGI_ACC callback + * 3. RCVD PLOGI + * 4. RCVD LOGO + */ + UNF_CHECK_VALID(0x852, UNF_TRUE, v_lport, return); + + if (v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_io_xchg_abort) { + /* The SID/DID of the Xchg is in reverse direction in + * different phases. Therefore, the reverse direction + * needs to be considered + */ + v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_io_xchg_abort( + v_lport, + v_rport, + v_sid, v_did, + v_extra_io_state); + v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_io_xchg_abort( + v_lport, v_rport, + v_did, v_sid, + v_extra_io_state); + } +} + +void unf_cm_xchg_mgr_abort_sfs_by_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int v_sid, unsigned int v_did) +{ + UNF_CHECK_VALID(0x990, UNF_TRUE, v_lport, return); + + if (v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_sfs_xchg_abort) { + /* The SID/DID of the Xchg is in reverse direction in different + * phases, therefore, the reverse direction + * needs to be considered + */ + v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_sfs_xchg_abort(v_lport, + v_rport, + v_sid, + v_did); + v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_sfs_xchg_abort(v_lport, + v_rport, + v_did, + v_sid); + } +} + +void unf_cm_xchg_abort_by_lun(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned long long v_lun_id, + void *v_tm_xchg, + int v_abort_all_lun_flag) +{ + /* + * LUN Reset: set UP_ABORT tag, with: + * INI_Busy_list, IO_Wait_list, + * IO_Delay_list, IO_Delay_transfer_list + */ + void (*unf_xchg_abort_by_lun)(void*, void*, unsigned long long, + void*, int) = NULL; + + UNF_CHECK_VALID(0x853, UNF_TRUE, v_lport, return); + + unf_xchg_abort_by_lun = + v_lport->xchg_mgr_temp.pfn_unf_xchg_abort_by_lun; + if (unf_xchg_abort_by_lun) { + unf_xchg_abort_by_lun((void *)v_lport, (void *)v_rport, + v_lun_id, v_tm_xchg, + v_abort_all_lun_flag); + } +} + +void unf_cm_xchg_abort_by_session(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + void (*pfn_unf_xchg_abort_by_session)(void*, void*) = NULL; + + UNF_CHECK_VALID(0x853, UNF_TRUE, v_lport, return); + + pfn_unf_xchg_abort_by_session = + v_lport->xchg_mgr_temp.pfn_unf_xchg_abort_by_session; + if (pfn_unf_xchg_abort_by_session) { + pfn_unf_xchg_abort_by_session((void *)v_lport, + (void *)v_rport); + } +} + +void *unf_cm_get_free_xchg(void *v_lport, unsigned int v_xchg_type) +{ + struct unf_lport_s *lport = NULL; + struct unf_cm_xchg_mgr_template_s *xch_mgr_temp = NULL; + + UNF_CHECK_VALID(0x855, UNF_TRUE, unlikely(v_lport), return NULL); + + lport = (struct unf_lport_s *)v_lport; + xch_mgr_temp = &lport->xchg_mgr_temp; + + /* Find the corresponding Lport Xchg management template. */ + UNF_CHECK_VALID(0x856, UNF_TRUE, + unlikely(xch_mgr_temp->pfn_unf_xchg_get_free_and_init), + return NULL); + + return xch_mgr_temp->pfn_unf_xchg_get_free_and_init(lport, v_xchg_type, + INVALID_VALUE16); +} + +void unf_cm_free_xchg(void *v_lport, void *v_xchg) +{ + struct unf_lport_s *lport = NULL; + struct unf_cm_xchg_mgr_template_s *xch_mgr_temp = NULL; + + UNF_CHECK_VALID(0x857, UNF_TRUE, unlikely(v_lport), return); + UNF_CHECK_VALID(0x858, UNF_TRUE, unlikely(v_xchg), return); + + lport = (struct unf_lport_s *)v_lport; + xch_mgr_temp = &lport->xchg_mgr_temp; + UNF_CHECK_VALID(0x859, UNF_TRUE, + unlikely(xch_mgr_temp->pfn_unf_xchg_release), + return); + + /* + * unf_cm_free_xchg --->>> unf_free_xchg + * --->>> unf_xchg_ref_dec --->>> unf_free_fcp_xchg + * --->>> unf_done_ini_xchg + */ + xch_mgr_temp->pfn_unf_xchg_release(v_lport, v_xchg); +} + +void *unf_cm_lookup_xchg_by_tag(void *v_lport, unsigned short v_hot_pool_tag) +{ + struct unf_lport_s *lport = NULL; + struct unf_cm_xchg_mgr_template_s *xch_mgr_temp = NULL; + + UNF_CHECK_VALID(0x860, UNF_TRUE, unlikely(v_lport), return NULL); + + /* Find the corresponding Lport Xchg management template */ + lport = (struct unf_lport_s *)v_lport; + xch_mgr_temp = &lport->xchg_mgr_temp; + + UNF_CHECK_VALID(0x861, UNF_TRUE, + unlikely(xch_mgr_temp->pfn_unf_look_up_xchg_by_tag), + return NULL); + + return xch_mgr_temp->pfn_unf_look_up_xchg_by_tag(v_lport, + v_hot_pool_tag); +} + +void *unf_cm_lookup_xchg_by_id(void *v_lport, unsigned short v_ox_id, + unsigned int v_oid) +{ + struct unf_lport_s *lport = NULL; + struct unf_cm_xchg_mgr_template_s *xch_mgr_temp = NULL; + + UNF_CHECK_VALID(0x862, UNF_TRUE, unlikely(v_lport), return NULL); + + lport = (struct unf_lport_s *)v_lport; + xch_mgr_temp = &lport->xchg_mgr_temp; + + /* Find the corresponding Lport Xchg management template */ + UNF_CHECK_VALID(0x863, UNF_TRUE, + unlikely(xch_mgr_temp->pfn_unf_look_up_xchg_by_id), + return NULL); + + return xch_mgr_temp->pfn_unf_look_up_xchg_by_id(v_lport, v_ox_id, + v_oid); +} + +struct unf_xchg_s *unf_cm_lookup_xchg_by_cmnd_sn( + void *v_lport, + unsigned long long v_command_sn, + unsigned int v_world_id) +{ + struct unf_lport_s *lport = NULL; + struct unf_cm_xchg_mgr_template_s *xch_mgr_temp = NULL; + struct unf_xchg_s *xchg = NULL; + + UNF_CHECK_VALID(0x864, UNF_TRUE, unlikely(v_lport), return NULL); + + lport = (struct unf_lport_s *)v_lport; + xch_mgr_temp = &lport->xchg_mgr_temp; + + UNF_CHECK_VALID( + 0x865, UNF_TRUE, + unlikely(xch_mgr_temp->pfn_unf_look_up_xchg_by_cmnd_sn), + return NULL); + + xchg = + (struct unf_xchg_s *)xch_mgr_temp->pfn_unf_look_up_xchg_by_cmnd_sn( + lport, v_command_sn, + v_world_id); + + return xchg; +} + +static void unf_free_all_rsp_pages(struct unf_xchg_mgr_s *v_xchg_mgr) +{ + unsigned int buff_index; + + UNF_CHECK_VALID(0x868, UNF_TRUE, v_xchg_mgr, return); + + if (v_xchg_mgr->rsp_buf_list.buflist) { + for (buff_index = 0; buff_index < + v_xchg_mgr->rsp_buf_list.buf_num; + buff_index++) { + if (v_xchg_mgr->rsp_buf_list.buflist[buff_index].vaddr) { + dma_free_coherent( + &v_xchg_mgr->hot_pool->lport->low_level_func.dev->dev, + v_xchg_mgr->rsp_buf_list.buf_size, + v_xchg_mgr->rsp_buf_list.buflist[buff_index].vaddr, + v_xchg_mgr->rsp_buf_list.buflist[buff_index].paddr); + v_xchg_mgr->rsp_buf_list.buflist[buff_index].vaddr = NULL; + } + } + + kfree(v_xchg_mgr->rsp_buf_list.buflist); + v_xchg_mgr->rsp_buf_list.buflist = NULL; + } +} + +static unsigned int unf_init_xchg(struct unf_lport_s *v_lport, + struct unf_xchg_mgr_s *v_xchg_mgr, + unsigned int v_xchg_sum, + unsigned int v_sfs_sum) +{ + struct unf_xchg_s *xchg_mem = NULL; + union unf_sfs_u *sfs_mm_start = NULL; + dma_addr_t sfs_dma_addr; + struct unf_xchg_s *xchg = NULL; + struct unf_xchg_free_pool_s *free_pool = NULL; + unsigned int rsp_iu_nums_per_page = 0; + unsigned int rsp_iu_size = 0; + unsigned long flags = 0; + unsigned int xchg_sum = 0; + unsigned int i = 0; + unsigned int rsp_iu_loop = 0; + unsigned int buf_num; + unsigned int buf_size; + unsigned int curbuf_idx = 0; + void *page_addr; + dma_addr_t phy_addr; + + UNF_CHECK_VALID(0x871, UNF_TRUE, v_sfs_sum <= v_xchg_sum, + return UNF_RETURN_ERROR); + + free_pool = &v_xchg_mgr->free_pool; + xchg_sum = v_xchg_sum; + xchg_mem = v_xchg_mgr->fcp_mm_start; + xchg = xchg_mem; + + sfs_mm_start = (union unf_sfs_u *)v_xchg_mgr->sfs_mm_start; + sfs_dma_addr = v_xchg_mgr->sfs_phy_addr; + /* 1. Allocate the SFS UNION memory to each SFS XCHG + * and mount the SFS XCHG to the corresponding FREE linked list + */ + free_pool->total_sfs_xchg = 0; + free_pool->sfs_xchg_sum = v_sfs_sum; + for (i = 0; i < v_sfs_sum; i++) { + INIT_LIST_HEAD(&xchg->list_xchg_entry); + INIT_LIST_HEAD(&xchg->list_esgls); + spin_lock_init(&xchg->xchg_state_lock); + sema_init(&xchg->task_sema, 0); + sema_init(&xchg->echo_info.echo_sync_sema, 0); + + spin_lock_irqsave(&free_pool->xchg_free_pool_lock, flags); + xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr = sfs_mm_start; + xchg->fcp_sfs_union.sfs_entry.sfs_buff_phy_addr = sfs_dma_addr; + xchg->fcp_sfs_union.sfs_entry.sfs_buff_len = + sizeof(*sfs_mm_start); + list_add_tail(&xchg->list_xchg_entry, + &free_pool->list_sfs_xchg_list); + free_pool->total_sfs_xchg++; + spin_unlock_irqrestore(&free_pool->xchg_free_pool_lock, flags); + sfs_mm_start++; + sfs_dma_addr = sfs_dma_addr + sizeof(union unf_sfs_u); + xchg++; + } + + /* + * 2. Allocate RSP IU memory for each IO XCHG and mount IO + * XCHG to the corresponding FREE linked list + * The memory size of each RSP IU is rsp_iu_size. + */ + rsp_iu_size = (UNF_FCPRSP_CTL_LEN + UNF_MAX_RSP_INFO_LEN + + UNF_SCSI_SENSE_DATA_LEN); + + buf_size = BUF_LIST_PAGE_SIZE; + if ((xchg_sum - v_sfs_sum) * rsp_iu_size < BUF_LIST_PAGE_SIZE) + buf_size = (xchg_sum - v_sfs_sum) * rsp_iu_size; + + rsp_iu_nums_per_page = buf_size / rsp_iu_size; + buf_num = (xchg_sum - v_sfs_sum) % rsp_iu_nums_per_page ? + (xchg_sum - v_sfs_sum) / rsp_iu_nums_per_page + 1 : + (xchg_sum - v_sfs_sum) / rsp_iu_nums_per_page; + + v_xchg_mgr->rsp_buf_list.buflist = + (struct buff_list_s *)kmalloc( + buf_num * sizeof(struct buff_list_s), + GFP_KERNEL); + v_xchg_mgr->rsp_buf_list.buf_num = buf_num; + v_xchg_mgr->rsp_buf_list.buf_size = buf_size; + + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) buff num 0x%x buff size 0x%x", + v_lport->port_id, buf_num, + v_xchg_mgr->rsp_buf_list.buf_size); + + if (!v_xchg_mgr->rsp_buf_list.buflist) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Allocate BigSfs pool buf list failed out of memory"); + goto free_buff; + } + memset(v_xchg_mgr->rsp_buf_list.buflist, 0, + buf_num * sizeof(struct buff_list_s)); + + free_pool->total_fcp_xchg = 0; + for (i = 0, curbuf_idx = 0; curbuf_idx < buf_num; curbuf_idx++) { + page_addr = dma_alloc_coherent( + &v_lport->low_level_func.dev->dev, + v_xchg_mgr->rsp_buf_list.buf_size, + &phy_addr, GFP_KERNEL); + if (!page_addr) + goto free_buff; + + memset(page_addr, 0, v_xchg_mgr->rsp_buf_list.buf_size); + v_xchg_mgr->rsp_buf_list.buflist[curbuf_idx].vaddr = page_addr; + v_xchg_mgr->rsp_buf_list.buflist[curbuf_idx].paddr = phy_addr; + + for (rsp_iu_loop = 0; + (rsp_iu_loop < rsp_iu_nums_per_page && + i < xchg_sum - v_sfs_sum); rsp_iu_loop++) { + INIT_LIST_HEAD(&xchg->list_xchg_entry); + + INIT_LIST_HEAD(&xchg->list_esgls); + spin_lock_init(&xchg->xchg_state_lock); + sema_init(&xchg->task_sema, 0); + sema_init(&xchg->echo_info.echo_sync_sema, 0); + + /* alloc dma buffer for fcp_rsp_iu */ + spin_lock_irqsave(&free_pool->xchg_free_pool_lock, + flags); + xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu = + (struct unf_fcprsp_iu_s *)page_addr; + xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu_phy_addr = + phy_addr; + list_add_tail(&xchg->list_xchg_entry, + &free_pool->list_free_xchg_list); + free_pool->total_fcp_xchg++; + spin_unlock_irqrestore(&free_pool->xchg_free_pool_lock, + flags); + + page_addr += rsp_iu_size; + phy_addr += rsp_iu_size; + i++; + xchg++; + } + } + + free_pool->fcp_xchg_sum = free_pool->total_fcp_xchg; + + return RETURN_OK; +free_buff: + unf_free_all_rsp_pages(v_xchg_mgr); + return UNF_RETURN_ERROR; +} + +static unsigned int unf_get_xchg_config_sum(struct unf_lport_s *v_lport, + unsigned int *v_xchg_sum) +{ + struct unf_lport_cfg_item_s *lport_cfg_items = NULL; + + lport_cfg_items = &v_lport->low_level_func.lport_cfg_items; + + /* It has been checked at the bottom layer. + * Don't need to check it again. + */ + *v_xchg_sum = lport_cfg_items->max_sfs_xchg + lport_cfg_items->max_io; + if ((*v_xchg_sum / UNF_EXCHG_MGR_NUM) == 0 || + lport_cfg_items->max_sfs_xchg / UNF_EXCHG_MGR_NUM == 0) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) Xchgsum(%u) or SfsXchg(%u) is less than ExchangeMgrNum(%u).", + v_lport->port_id, *v_xchg_sum, + lport_cfg_items->max_sfs_xchg, + UNF_EXCHG_MGR_NUM); + return UNF_RETURN_ERROR; + } + + if (*v_xchg_sum > (INVALID_VALUE16 - 1)) { + /* If the format of ox_id/rx_id is exceeded, + * this function is not supported + */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) Exchange num(0x%x) is Too Big.", + v_lport->port_id, *v_xchg_sum); + + return UNF_RETURN_ERROR; + } + + return RETURN_OK; +} + +static void unf_xchg_cancel_timer(void *v_xchg) +{ + struct unf_xchg_s *xchg = NULL; + int need_dec_xchg_ref = UNF_FALSE; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x874, UNF_TRUE, v_xchg, return); + xchg = (struct unf_xchg_s *)v_xchg; + + spin_lock_irqsave(&xchg->xchg_state_lock, flag); + if (cancel_delayed_work(&xchg->timeout_work)) + need_dec_xchg_ref = UNF_TRUE; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); + + if (need_dec_xchg_ref == UNF_TRUE) + unf_xchg_ref_dec(v_xchg, XCHG_CANCEL_TIMER); +} + +void unf_show_all_xchg(struct unf_lport_s *v_lport, + struct unf_xchg_mgr_s *v_xchg_mgr) +{ + struct unf_lport_s *lport = NULL; + struct unf_xchg_mgr_s *xchg_mgr = NULL; + struct unf_xchg_s *xchg = NULL; + struct list_head *xchg_node = NULL; + struct list_head *next_xchg_node = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x879, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x880, UNF_TRUE, v_xchg_mgr, return); + + UNF_REFERNCE_VAR(lport); + UNF_REFERNCE_VAR(xchg); + + xchg_mgr = v_xchg_mgr; + lport = v_lport; + + /* hot Xchg */ + spin_lock_irqsave(&xchg_mgr->hot_pool->xchg_hot_pool_lock, flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_WARN, + "INI busy :"); + list_for_each_safe(xchg_node, next_xchg_node, + &xchg_mgr->hot_pool->ini_busylist) { + xchg = list_entry(xchg_node, struct unf_xchg_s, + list_xchg_entry); + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR, + "0x%p---0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----%llu.", + xchg, + (unsigned int)xchg->hot_pool_tag, + (unsigned int)xchg->xchg_type, + (unsigned int)xchg->ox_id, + (unsigned int)xchg->rx_id, + (unsigned int)xchg->sid, + (unsigned int)xchg->did, + atomic_read(&xchg->ref_cnt), + (unsigned int)xchg->io_state, + xchg->alloc_jif); + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, + UNF_WARN, "SFS :"); + list_for_each_safe(xchg_node, next_xchg_node, + &xchg_mgr->hot_pool->sfs_busylist) { + xchg = list_entry(xchg_node, struct unf_xchg_s, + list_xchg_entry); + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_WARN, + "0x%p---0x%x---0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----%llu.", + xchg, + xchg->cmnd_code, + (unsigned int)xchg->hot_pool_tag, + (unsigned int)xchg->xchg_type, + (unsigned int)xchg->ox_id, + (unsigned int)xchg->rx_id, + (unsigned int)xchg->sid, + (unsigned int)xchg->did, + atomic_read(&xchg->ref_cnt), + (unsigned int)xchg->io_state, + xchg->alloc_jif); + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_WARN, + "Destroy list."); + list_for_each_safe(xchg_node, next_xchg_node, + &xchg_mgr->hot_pool->list_destroy_xchg) { + xchg = list_entry(xchg_node, struct unf_xchg_s, + list_xchg_entry); + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_WARN, + "0x%p---0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----%llu.", + xchg, + (unsigned int)xchg->hot_pool_tag, + (unsigned int)xchg->xchg_type, + (unsigned int)xchg->ox_id, + (unsigned int)xchg->rx_id, + (unsigned int)xchg->sid, + (unsigned int)xchg->did, + atomic_read(&xchg->ref_cnt), + (unsigned int)xchg->io_state, + xchg->alloc_jif); + } + spin_unlock_irqrestore(&xchg_mgr->hot_pool->xchg_hot_pool_lock, flags); + + UNF_REFERNCE_VAR(xchg); + UNF_REFERNCE_VAR(lport); +} + +static void unf_delay_work_del_syn(struct unf_xchg_s *v_xchg) +{ + struct unf_xchg_s *xchg = NULL; + + UNF_CHECK_VALID(0x884, UNF_TRUE, v_xchg, return); + + xchg = v_xchg; + + /* synchronous release timer */ + if (!cancel_delayed_work_sync(&xchg->timeout_work)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Exchange(0x%p), State(0x%x) can't delete work timer, timer is running or no timer.", + xchg, xchg->io_state); + } else { + /* The reference count cannot be directly subtracted. + * This prevents the XCHG from being moved to the + * Free linked list when the card is unloaded. + */ + unf_cm_free_xchg(xchg->lport, xchg); + } +} + +static void unf_free_lport_sfs_xchg(struct unf_xchg_mgr_s *v_xchg_mgr, + int v_done_ini_flag) +{ + struct list_head *list = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned long hot_pool_lock_flags = 0; + + UNF_REFERNCE_VAR(v_done_ini_flag); + UNF_CHECK_VALID(0x887, UNF_TRUE, v_xchg_mgr, return); + UNF_CHECK_VALID(0x888, UNF_TRUE, v_xchg_mgr->hot_pool, return); + + spin_lock_irqsave(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); + while (!list_empty(&v_xchg_mgr->hot_pool->sfs_busylist)) { + list = (&v_xchg_mgr->hot_pool->sfs_busylist)->next; + list_del_init(list); + + /* Prevent the xchg of the sfs from being accessed repeatedly. + * The xchg is first mounted to the destroy linked list. + */ + list_add_tail(list, &v_xchg_mgr->hot_pool->list_destroy_xchg); + + xchg = list_entry(list, struct unf_xchg_s, list_xchg_entry); + spin_unlock_irqrestore( + &v_xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); + unf_delay_work_del_syn(xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Free SFS Exchange(0x%p), State(0x%x), Reference count(%d), Start time(%llu).", + xchg, xchg->io_state, atomic_read(&xchg->ref_cnt), + xchg->alloc_jif); + + unf_cm_free_xchg(xchg->lport, xchg); + + spin_lock_irqsave(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); + } + spin_unlock_irqrestore(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); +} + +static void unf_free_lport_destroy_xchg(struct unf_xchg_mgr_s *v_xchg_mgr) +{ +#define UNF_WAIT_DESTROY_EMPTY_STEP_MS 1000 +#define UNF_WAIT_IO_STATE_TGT_FRONT_MS (10 * 1000) + + struct unf_xchg_s *xchg = NULL; + struct list_head *next_xchg_node = NULL; + unsigned long hot_pool_lock_flags = 0; + unsigned long xchg_flag = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_xchg_mgr, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_xchg_mgr->hot_pool, + return); + + /* In this case, the timer on the destroy linked list is deleted. + * You only need to check whether the timer is released + * at the end of the tgt. + */ + spin_lock_irqsave(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); + while (!list_empty(&v_xchg_mgr->hot_pool->list_destroy_xchg)) { + next_xchg_node = + (&v_xchg_mgr->hot_pool->list_destroy_xchg)->next; + xchg = list_entry(next_xchg_node, struct unf_xchg_s, + list_xchg_entry); + + spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Free Exchange(0x%p), Type(0x%x), State(0x%x), Reference count(%d), Start time(%llu)", + xchg, xchg->xchg_type, xchg->io_state, + atomic_read(&xchg->ref_cnt), + xchg->alloc_jif); + + spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_flag); + spin_unlock_irqrestore( + &v_xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); + + /* This interface can be invoked to ensure that + * the timer is successfully canceled + * or wait until the timer execution is complete + */ + unf_delay_work_del_syn(xchg); + + /* + * If the timer is canceled successfully, delete Xchg + * If the timer has burst, the Xchg may have been released, + * In this case, deleting the Xchg will be failed + */ + unf_cm_free_xchg(xchg->lport, xchg); + + spin_lock_irqsave(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); + }; + + spin_unlock_irqrestore(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); +} + +static unsigned int unf_free_lport_xchg(struct unf_lport_s *v_lport, + struct unf_xchg_mgr_s *v_xchg_mgr) +{ +#define UNF_OS_WAITIO_TIMEOUT (10 * 1000) + + unsigned long free_pool_lock_flags = 0; + int wait = UNF_FALSE; + unsigned int total_xchg = 0; + unsigned int total_xchg_sum = 0; + unsigned int ret = RETURN_OK; + unsigned long long timeout = 0; + + struct completion xchg_mgr_completion = + COMPLETION_INITIALIZER(xchg_mgr_completion); + + UNF_CHECK_VALID(0x881, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x882, UNF_TRUE, v_xchg_mgr, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x883, UNF_TRUE, v_xchg_mgr->hot_pool, + return UNF_RETURN_ERROR); + UNF_REFERNCE_VAR(v_lport); + + unf_free_lport_sfs_xchg(v_xchg_mgr, UNF_FALSE); + + /* free INI Mode exchanges belong to L_Port */ + unf_free_lport_ini_xchg(v_xchg_mgr, UNF_FALSE); + + spin_lock_irqsave(&v_xchg_mgr->free_pool.xchg_free_pool_lock, + free_pool_lock_flags); + total_xchg = v_xchg_mgr->free_pool.total_fcp_xchg + + v_xchg_mgr->free_pool.total_sfs_xchg; + total_xchg_sum = v_xchg_mgr->free_pool.fcp_xchg_sum + + v_xchg_mgr->free_pool.sfs_xchg_sum; + if (total_xchg != total_xchg_sum) { + v_xchg_mgr->free_pool.xchg_mgr_completion = + &xchg_mgr_completion; + wait = UNF_TRUE; + } + spin_unlock_irqrestore(&v_xchg_mgr->free_pool.xchg_free_pool_lock, + free_pool_lock_flags); + + if (wait == UNF_TRUE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) begin to wait for exchange manager completion(%ld) (0x%x:0x%x)", + v_lport->port_id, jiffies, total_xchg, + total_xchg_sum); + + unf_show_all_xchg(v_lport, v_xchg_mgr); + + timeout = wait_for_completion_timeout( + v_xchg_mgr->free_pool.xchg_mgr_completion, + msecs_to_jiffies(UNF_OS_WAITIO_TIMEOUT)); + if (timeout == 0) + unf_free_lport_destroy_xchg(v_xchg_mgr); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) wait for exchange manager completion end", + v_lport->port_id); + + spin_lock_irqsave(&v_xchg_mgr->free_pool.xchg_free_pool_lock, + free_pool_lock_flags); + v_xchg_mgr->free_pool.xchg_mgr_completion = NULL; + spin_unlock_irqrestore( + &v_xchg_mgr->free_pool.xchg_free_pool_lock, + free_pool_lock_flags); + } + + return ret; +} + +void unf_free_lport_all_xchg(struct unf_lport_s *v_lport) +{ + struct unf_xchg_mgr_s *xchg_mgr; + unsigned int i; + + UNF_CHECK_VALID(0x881, UNF_TRUE, v_lport, return); + UNF_REFERNCE_VAR(v_lport); + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + xchg_mgr = unf_get_xchg_mgr_by_lport(v_lport, i); + if (unlikely(!xchg_mgr)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) hot pool is NULL", + v_lport->port_id); + + continue; + } + unf_free_lport_sfs_xchg(xchg_mgr, UNF_FALSE); + + /* free INI Mode exchanges belong to L_Port */ + unf_free_lport_ini_xchg(xchg_mgr, UNF_FALSE); + + unf_free_lport_destroy_xchg(xchg_mgr); + } +} + +void unf_free_lport_ini_xchg(struct unf_xchg_mgr_s *v_xchg_mgr, + int v_done_ini_flag) +{ + /* + * 1. L_Port destroy + * 2. AC power down + */ + struct list_head *list = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned long hot_pool_lock_flags = 0; + unsigned int up_status = 0; + + UNF_REFERNCE_VAR(v_done_ini_flag); + UNF_CHECK_VALID(0x889, UNF_TRUE, v_xchg_mgr, return); + UNF_CHECK_VALID(0x890, UNF_TRUE, v_xchg_mgr->hot_pool, return); + + spin_lock_irqsave(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); + while (!list_empty(&v_xchg_mgr->hot_pool->ini_busylist)) { + /* for each INI busy_list (exchange) node */ + list = (&v_xchg_mgr->hot_pool->ini_busylist)->next; + + /* Put exchange node to destroy_list, prevent done repeatly */ + list_del_init(list); + list_add_tail(list, &v_xchg_mgr->hot_pool->list_destroy_xchg); + xchg = list_entry(list, struct unf_xchg_s, list_xchg_entry); + if (atomic_read(&xchg->ref_cnt) <= 0) + continue; + spin_unlock_irqrestore( + &v_xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); + unf_delay_work_del_syn(xchg); + + /* In the case of INI done, the command should be set to fail + * to prevent data inconsistency caused by the return of OK + */ + up_status = unf_get_uplevel_cmnd_errcode( + xchg->scsi_cmnd_info.err_code_table, + xchg->scsi_cmnd_info.err_code_table_cout, + UNF_IO_PORT_LOGOUT); + + if (xchg->io_state & INI_IO_STATE_UPABORT) { + /* + * About L_Port destroy or AC power down: + * UP_ABORT ---to--->>> ABORT_Port_Removing + */ + up_status = UNF_IO_ABORT_PORT_REMOVING; + } + + xchg->scsi_cmnd_info.result = up_status; + up(&xchg->task_sema); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Free INI exchange(0x%p) state(0x%x) reference count(%d) start time(%llu)", + xchg, xchg->io_state, atomic_read(&xchg->ref_cnt), + xchg->alloc_jif); + + unf_cm_free_xchg(xchg->lport, xchg); + + /* go to next INI busy_list (exchange) node */ + spin_lock_irqsave(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); + } + spin_unlock_irqrestore(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); +} + +static void unf_free_all_big_sfs(struct unf_xchg_mgr_s *v_xchg_mgr) +{ + struct unf_xchg_mgr_s *xchg_mgr = v_xchg_mgr; + struct unf_big_sfs_s *big_sfs = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flag = 0; + unsigned int buff_index; + + UNF_CHECK_VALID(0x891, UNF_TRUE, xchg_mgr, return); + + /* Release the free resources in the busy state */ + spin_lock_irqsave(&xchg_mgr->st_big_sfs_pool.big_sfs_pool_lock, flag); + list_for_each_safe(node, next_node, + &xchg_mgr->st_big_sfs_pool.list_busy_pool) { + list_del(node); + list_add_tail(node, &xchg_mgr->st_big_sfs_pool.list_free_pool); + } + + list_for_each_safe(node, next_node, + &xchg_mgr->st_big_sfs_pool.list_free_pool) { + list_del(node); + big_sfs = list_entry(node, struct unf_big_sfs_s, + entry_big_sfs); + if (big_sfs->vaddr) + big_sfs->vaddr = NULL; + } + spin_unlock_irqrestore(&xchg_mgr->st_big_sfs_pool.big_sfs_pool_lock, + flag); + + if (xchg_mgr->big_sfs_buf_list.buflist) { + for (buff_index = 0; + buff_index < xchg_mgr->big_sfs_buf_list.buf_num; + buff_index++) { + if (xchg_mgr->big_sfs_buf_list.buflist[buff_index].vaddr) { + kfree(xchg_mgr->big_sfs_buf_list.buflist[buff_index].vaddr); + xchg_mgr->big_sfs_buf_list.buflist[buff_index].vaddr = NULL; + } + } + + kfree(xchg_mgr->big_sfs_buf_list.buflist); + xchg_mgr->big_sfs_buf_list.buflist = NULL; + } +} + +static void unf_free_big_sfs_pool(struct unf_xchg_mgr_s *v_xchg_mgr) +{ + UNF_CHECK_VALID(0x892, UNF_TRUE, v_xchg_mgr, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "Free Big SFS Pool, Count(0x%x).", + v_xchg_mgr->st_big_sfs_pool.free_count); + + unf_free_all_big_sfs(v_xchg_mgr); + v_xchg_mgr->st_big_sfs_pool.free_count = 0; + + if (v_xchg_mgr->st_big_sfs_pool.big_sfs_pool) { + vfree(v_xchg_mgr->st_big_sfs_pool.big_sfs_pool); + v_xchg_mgr->st_big_sfs_pool.big_sfs_pool = NULL; + } +} + +static void unf_free_xchg_mgr_mem(struct unf_lport_s *v_lport, + struct unf_xchg_mgr_s *v_xchg_mgr) +{ + struct unf_xchg_mgr_s *xchg_mgr = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int i = 0; + unsigned int xchg_sum = 0; + struct unf_xchg_free_pool_s *free_pool = NULL; + + UNF_CHECK_VALID(0x893, UNF_TRUE, v_xchg_mgr, return); + + xchg_mgr = v_xchg_mgr; + + /* Release the reserved Rsp IU Page */ + unf_free_all_rsp_pages(xchg_mgr); + + unf_free_big_sfs_pool(xchg_mgr); + + /* The sfs is released first, and the XchgMgr is allocated + * by the get free page. + * Therefore, the XchgMgr is compared with the '0' + */ + if (xchg_mgr->sfs_mm_start != 0) { + dma_free_coherent(&v_lport->low_level_func.dev->dev, + xchg_mgr->sfs_mem_size, + xchg_mgr->sfs_mm_start, + xchg_mgr->sfs_phy_addr); + xchg_mgr->sfs_mm_start = 0; + } + + /* Release Xchg first */ + if (xchg_mgr->fcp_mm_start) { + unf_get_xchg_config_sum(v_lport, &xchg_sum); + xchg_sum = xchg_sum / UNF_EXCHG_MGR_NUM; + + xchg = xchg_mgr->fcp_mm_start; + for (i = 0; i < xchg_sum; i++) { + if (!xchg) + break; + xchg++; + } + + vfree(xchg_mgr->fcp_mm_start); + xchg_mgr->fcp_mm_start = NULL; + } + + /* release the hot pool */ + if (xchg_mgr->hot_pool) { + vfree(xchg_mgr->hot_pool); + xchg_mgr->hot_pool = NULL; + } + + free_pool = &xchg_mgr->free_pool; + + vfree(xchg_mgr); + + UNF_REFERNCE_VAR(xchg_mgr); + UNF_REFERNCE_VAR(free_pool); +} + +static void unf_free_xchg_mgr(struct unf_lport_s *v_lport, + struct unf_xchg_mgr_s *v_xchg_mgr) +{ + unsigned long flags = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x894, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x895, UNF_TRUE, v_xchg_mgr, return); + + /* 1. At first, free exchanges for this Exch_Mgr */ + ret = unf_free_lport_xchg(v_lport, v_xchg_mgr); + + /* 2. Delete this Exch_Mgr entry */ + spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags); + list_del_init(&v_xchg_mgr->xchg_mgr_entry); + spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags); + + /* 3. free Exch_Mgr memory if necessary */ + if (ret == RETURN_OK) { + /* free memory directly */ + unf_free_xchg_mgr_mem(v_lport, v_xchg_mgr); + } else { + /* Add it to Dirty list */ + spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags); + list_add_tail(&v_xchg_mgr->xchg_mgr_entry, + &v_lport->list_dirty_xchg_mgr_head); + spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags); + + /* Mark dirty flag */ + unf_cmmark_dirty_mem(v_lport, + UNF_LPORT_DIRTY_FLAG_XCHGMGR_DIRTY); + } +} + +void unf_free_all_xchg_mgr(struct unf_lport_s *v_lport) +{ + struct unf_xchg_mgr_s *xchg_mgr = NULL; + unsigned long flags = 0; + unsigned int i = 0; + + UNF_CHECK_VALID(0x896, UNF_TRUE, v_lport, return); + + /* for each L_Port->Exch_Mgr_List */ + spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags); + while (!list_empty(&v_lport->list_xchg_mgr_head)) { + spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags); + + xchg_mgr = unf_get_xchg_mgr_by_lport(v_lport, i); + unf_free_xchg_mgr(v_lport, xchg_mgr); + if (i < UNF_EXCHG_MGR_NUM) + v_lport->p_xchg_mgr[i] = NULL; + + i++; + /* go to next */ + spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags); + } + spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags); + + v_lport->destroy_step = UNF_LPORT_DESTROY_STEP_4_DESTROY_EXCH_MGR; +} + +static unsigned int unf_init_xchg_mgr(struct unf_xchg_mgr_s *v_xchg_mgr) +{ + struct unf_xchg_mgr_s *xchg_mgr = NULL; + + UNF_CHECK_VALID(0x897, UNF_TRUE, v_xchg_mgr, return UNF_RETURN_ERROR); + xchg_mgr = v_xchg_mgr; + memset(xchg_mgr, 0, sizeof(struct unf_xchg_mgr_s)); + + INIT_LIST_HEAD(&xchg_mgr->xchg_mgr_entry); + xchg_mgr->mgr_type = UNF_XCHG_MGR_FC; + xchg_mgr->min_xid = UNF_XCHG_MIN_XID; + xchg_mgr->max_xid = UNF_XCHG_MAX_XID; + xchg_mgr->fcp_mm_start = NULL; + xchg_mgr->mem_size = sizeof(struct unf_xchg_mgr_s); + return RETURN_OK; +} + +static unsigned int unf_init_xchg_mgr_free_pool( + struct unf_xchg_mgr_s *v_xchg_mgr) +{ + struct unf_xchg_free_pool_s *free_pool = NULL; + struct unf_xchg_mgr_s *xchg_mgr = NULL; + + UNF_CHECK_VALID(0x898, UNF_TRUE, v_xchg_mgr, return UNF_RETURN_ERROR); + xchg_mgr = v_xchg_mgr; + + free_pool = &xchg_mgr->free_pool; + INIT_LIST_HEAD(&free_pool->list_free_xchg_list); + INIT_LIST_HEAD(&free_pool->list_sfs_xchg_list); + spin_lock_init(&free_pool->xchg_free_pool_lock); + free_pool->fcp_xchg_sum = 0; + free_pool->xchg_mgr_completion = NULL; + + return RETURN_OK; +} + +static unsigned int unf_init_xchg_hot_pool( + struct unf_lport_s *v_lport, + struct unf_xchg_hot_pool_s *v_hot_pool, + unsigned int v_xchg_sum) +{ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + + UNF_CHECK_VALID(0x899, UNF_TRUE, v_hot_pool, return UNF_RETURN_ERROR); + hot_pool = v_hot_pool; + + INIT_LIST_HEAD(&hot_pool->sfs_busylist); + INIT_LIST_HEAD(&hot_pool->ini_busylist); + spin_lock_init(&hot_pool->xchg_hot_pool_lock); + INIT_LIST_HEAD(&hot_pool->list_destroy_xchg); + hot_pool->total_xchges = 0; + hot_pool->total_res_cnt = 0; + hot_pool->wait_state = UNF_FALSE; + hot_pool->lport = v_lport; + + /* Slab Pool Index */ + hot_pool->slab_next_index = 0; + UNF_TOU16_CHECK(hot_pool->slab_total_sum, v_xchg_sum, + return UNF_RETURN_ERROR); + + return RETURN_OK; +} + +static unsigned int unf_alloc_and_init_big_sfs_pool( + struct unf_lport_s *v_lport, + struct unf_xchg_mgr_s *v_xchg_mgr) +{ + unsigned int i = 0; + unsigned int size = 0; + unsigned int align_size = 0; + unsigned int npiv_cnt = 0; + struct unf_big_sfs_pool_s *big_sfs_pool = NULL; + struct unf_big_sfs_s *big_sfs_buf = NULL; + unsigned int buf_total_size; + unsigned int buf_num; + unsigned int buf_cnt_perhugebuf; + unsigned int alloc_idx; + unsigned int curbuf_idx = 0; + unsigned int curbuf_offset = 0; + + UNF_CHECK_VALID(0x900, UNF_TRUE, v_xchg_mgr, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x901, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + big_sfs_pool = &v_xchg_mgr->st_big_sfs_pool; + + INIT_LIST_HEAD(&big_sfs_pool->list_free_pool); + INIT_LIST_HEAD(&big_sfs_pool->list_busy_pool); + spin_lock_init(&big_sfs_pool->big_sfs_pool_lock); + npiv_cnt = v_lport->low_level_func.support_max_npiv_num; + + /* + * The value*6 indicates GID_PT/GID_FT, RSCN, and ECHO + * Another command is received when a command is being responded + * A maximum of 20 resources are reserved for the RSCN. + * During the test, multiple rscn are found. As a result, + * the resources are insufficient and the disc fails. + */ + big_sfs_pool->free_count = (npiv_cnt + 1) * 6 + 20; + big_sfs_buf = (struct unf_big_sfs_s *)vmalloc( + big_sfs_pool->free_count + * sizeof(struct unf_big_sfs_s)); + if (!big_sfs_buf) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Allocate Big SFS buf fail."); + + return UNF_RETURN_ERROR; + } + memset(big_sfs_buf, 0, big_sfs_pool->free_count * + sizeof(struct unf_big_sfs_s)); + v_xchg_mgr->mem_size += + (unsigned int) + (big_sfs_pool->free_count * sizeof(struct unf_big_sfs_s)); + big_sfs_pool->big_sfs_pool = (void *)big_sfs_buf; + + /* + * Use the larger value of sizeof (struct unf_gif_acc_pld_s) and + * sizeof (struct unf_rscn_pld_s) to avoid the icp error.Therefore, + * the value is directly assigned instead of being compared. + */ + size = sizeof(struct unf_gif_acc_pld_s); + align_size = ALIGN(size, PAGE_SIZE); + + buf_total_size = align_size * big_sfs_pool->free_count; + + v_xchg_mgr->big_sfs_buf_list.buf_size = + buf_total_size > BUF_LIST_PAGE_SIZE ? + BUF_LIST_PAGE_SIZE : buf_total_size; + buf_cnt_perhugebuf = + v_xchg_mgr->big_sfs_buf_list.buf_size / align_size; + buf_num = + big_sfs_pool->free_count % buf_cnt_perhugebuf ? + big_sfs_pool->free_count / buf_cnt_perhugebuf + 1 : + big_sfs_pool->free_count / buf_cnt_perhugebuf; + + v_xchg_mgr->big_sfs_buf_list.buflist = + (struct buff_list_s *)kmalloc( + buf_num * sizeof(struct buff_list_s), + GFP_KERNEL); + v_xchg_mgr->big_sfs_buf_list.buf_num = buf_num; + + if (!v_xchg_mgr->big_sfs_buf_list.buflist) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Allocate BigSfs pool buf list failed out of memory"); + goto free_buff; + } + memset(v_xchg_mgr->big_sfs_buf_list.buflist, 0, buf_num * + sizeof(struct buff_list_s)); + for (alloc_idx = 0; alloc_idx < buf_num; alloc_idx++) { + v_xchg_mgr->big_sfs_buf_list.buflist[alloc_idx].vaddr = + kmalloc(v_xchg_mgr->big_sfs_buf_list.buf_size, + GFP_ATOMIC); + if (!v_xchg_mgr->big_sfs_buf_list.buflist[alloc_idx].vaddr) + goto free_buff; + + memset(v_xchg_mgr->big_sfs_buf_list.buflist[alloc_idx].vaddr, + 0, v_xchg_mgr->big_sfs_buf_list.buf_size); + } + + for (i = 0; i < big_sfs_pool->free_count; i++) { + if ((i != 0) && !(i % buf_cnt_perhugebuf)) + curbuf_idx++; + + curbuf_offset = align_size * (i % buf_cnt_perhugebuf); + big_sfs_buf->vaddr = + v_xchg_mgr->big_sfs_buf_list.buflist[curbuf_idx].vaddr + + curbuf_offset; + big_sfs_buf->size = size; + v_xchg_mgr->mem_size += size; + list_add_tail(&big_sfs_buf->entry_big_sfs, + &big_sfs_pool->list_free_pool); + big_sfs_buf++; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[EVENT]Allocate BigSfs pool size:%d,uiAlignSize:%d,buf_num:%d,buf_size:%d", + size, align_size, v_xchg_mgr->big_sfs_buf_list.buf_num, + v_xchg_mgr->big_sfs_buf_list.buf_size); + return RETURN_OK; +free_buff: + unf_free_all_big_sfs(v_xchg_mgr); + vfree(big_sfs_buf); + big_sfs_pool->big_sfs_pool = NULL; + return UNF_RETURN_ERROR; +} + +/* + * Function Name : unf_free_one_big_sfs + * Function Description: Put the big sfs memory in xchg back to bigsfspool + * Input Parameters : struct unf_xchg_s * v_xchg + * Output Parameters : N/A + * Return Type : static void + */ +static void unf_free_one_big_sfs(struct unf_xchg_s *v_xchg) +{ + unsigned long flag = 0; + struct unf_xchg_mgr_s *xchg_mgr = NULL; + + UNF_CHECK_VALID(0x902, UNF_TRUE, v_xchg, return); + xchg_mgr = v_xchg->xchg_mgr; + UNF_CHECK_VALID(0x903, UNF_TRUE, xchg_mgr, return); + if (!v_xchg->big_sfs_buf) + return; + + if ((v_xchg->cmnd_code != NS_GID_PT) && + (v_xchg->cmnd_code != NS_GID_FT) && + (v_xchg->cmnd_code != ELS_ECHO) && + (UNF_SET_ELS_ACC_TYPE(ELS_ECHO) != v_xchg->cmnd_code) && + (v_xchg->cmnd_code != ELS_RSCN) && + (UNF_SET_ELS_ACC_TYPE(ELS_RSCN) != v_xchg->cmnd_code)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "Exchange(0x%p), Command(0x%x) big SFS buf is not NULL.", + v_xchg, v_xchg->cmnd_code); + } + + spin_lock_irqsave(&xchg_mgr->st_big_sfs_pool.big_sfs_pool_lock, flag); + list_del(&v_xchg->big_sfs_buf->entry_big_sfs); + list_add_tail(&v_xchg->big_sfs_buf->entry_big_sfs, + &xchg_mgr->st_big_sfs_pool.list_free_pool); + xchg_mgr->st_big_sfs_pool.free_count++; + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "Free one big SFS buf(0x%p), Count(0x%x), Exchange(0x%p), Command(0x%x).", + v_xchg->big_sfs_buf->vaddr, + xchg_mgr->st_big_sfs_pool.free_count, + v_xchg, v_xchg->cmnd_code); + spin_unlock_irqrestore(&xchg_mgr->st_big_sfs_pool.big_sfs_pool_lock, + flag); +} + +static void unf_free_exchg_mgr_info(struct unf_lport_s *v_lport) +{ + unsigned int i; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flags = 0; + struct unf_xchg_mgr_s *xchg_mgr = NULL; + + spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags); + list_for_each_safe(node, next_node, &v_lport->list_xchg_mgr_head) { + list_del(node); + xchg_mgr = list_entry(node, struct unf_xchg_mgr_s, + xchg_mgr_entry); + } + spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags); + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + xchg_mgr = v_lport->p_xchg_mgr[i]; + + if (xchg_mgr) { + unf_free_big_sfs_pool(xchg_mgr); + unf_free_all_rsp_pages(xchg_mgr); + + if (xchg_mgr->sfs_mm_start) { + dma_free_coherent( + &v_lport->low_level_func.dev->dev, + xchg_mgr->sfs_mem_size, + xchg_mgr->sfs_mm_start, + xchg_mgr->sfs_phy_addr); + xchg_mgr->sfs_mm_start = 0; + } + + if (xchg_mgr->fcp_mm_start) { + vfree(xchg_mgr->fcp_mm_start); + xchg_mgr->fcp_mm_start = NULL; + } + + if (xchg_mgr->hot_pool) { + vfree(xchg_mgr->hot_pool); + xchg_mgr->hot_pool = NULL; + } + + vfree(xchg_mgr); + v_lport->p_xchg_mgr[i] = NULL; + } + } +} + +static unsigned int unf_alloc_and_init_xchg_mgr(struct unf_lport_s *v_lport) +{ + struct unf_xchg_mgr_s *xchg_mgr = NULL; + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct unf_xchg_s *xchg_mem = NULL; + void *sfs_mm_start = 0; + dma_addr_t sfs_phy_addr = 0; + unsigned int xchg_sum = 0; + unsigned int sfs_xchg_sum = 0; + unsigned long flags = 0; + unsigned int order = 0; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int slab_num = 0; + unsigned int i = 0; + + UNF_REFERNCE_VAR(order); + /* SFS_EXCH + I/O_EXCH */ + ret = unf_get_xchg_config_sum(v_lport, &xchg_sum); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Port(0x%x) can't get Exchange.", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + /* SFS Exchange Sum */ + sfs_xchg_sum = v_lport->low_level_func.lport_cfg_items.max_sfs_xchg / + UNF_EXCHG_MGR_NUM; + + xchg_sum = xchg_sum / UNF_EXCHG_MGR_NUM; + slab_num = v_lport->low_level_func.support_max_xid_range / + UNF_EXCHG_MGR_NUM; + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + /* Alloc Exchange Manager */ + xchg_mgr = (struct unf_xchg_mgr_s *) + vmalloc(sizeof(struct unf_xchg_mgr_s)); + if (!xchg_mgr) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) allocate Exchange Manager Memory Fail.", + v_lport->port_id); + + goto exit; + } + + /* Init Exchange Manager */ + ret = unf_init_xchg_mgr(xchg_mgr); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_REG_ATT, UNF_MAJOR, + "Port(0x%x) initialization Exchange Manager unsuccessful.", + v_lport->port_id); + + goto free_xchg_mgr; + } + + /* Initialize the Exchange Free Pool resource */ + ret = unf_init_xchg_mgr_free_pool(xchg_mgr); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_REG_ATT, UNF_MAJOR, + "Port(0x%x) initialization Exchange Manager Free Pool unsuccessful.", + v_lport->port_id); + + goto free_xchg_mgr; + } + + /* Allocate memory for Hot Pool and Xchg slab */ + hot_pool = vmalloc(sizeof(struct unf_xchg_hot_pool_s) + + sizeof(struct unf_xchg_s *) * slab_num); + if (!hot_pool) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) allocate Hot Pool Memory Fail.", + v_lport->port_id); + goto free_xchg_mgr; + } + + memset(hot_pool, 0, + sizeof(struct unf_xchg_hot_pool_s) + + sizeof(struct unf_xchg_s *) * slab_num); + xchg_mgr->mem_size += + (unsigned int)(sizeof(struct unf_xchg_hot_pool_s) + + sizeof(struct unf_xchg_s *) * slab_num); + + /* Initialize the Exchange Hot Pool resource */ + ret = unf_init_xchg_hot_pool(v_lport, hot_pool, slab_num); + if (ret != RETURN_OK) + goto free_hot_pool; + + hot_pool->base += (unsigned short)(i * slab_num); + /* Allocate the memory of all Xchg (IO/SFS) */ + xchg_mem = vmalloc(sizeof(struct unf_xchg_s) * xchg_sum); + if (!xchg_mem) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) allocate Exchange Memory Fail.", + v_lport->port_id); + goto free_hot_pool; + } + memset(xchg_mem, 0, sizeof(struct unf_xchg_s) * xchg_sum); + xchg_mgr->mem_size += + (unsigned int)(sizeof(struct unf_xchg_s) * xchg_sum); + + xchg_mgr->hot_pool = hot_pool; + xchg_mgr->fcp_mm_start = xchg_mem; + + /* Allocate the memory used by the SFS Xchg + * to carry the ELS/BLS/GS command and response + */ + xchg_mgr->sfs_mem_size = + (unsigned int)(sizeof(union unf_sfs_u) * sfs_xchg_sum); + + /* Apply for the DMA space for sending sfs frames. + * If the value of DMA32 is less than 4 GB, + * cross-4G problems will not occur + */ + order = (unsigned int)get_order(xchg_mgr->sfs_mem_size); + + sfs_mm_start = dma_alloc_coherent( + &v_lport->low_level_func.dev->dev, + xchg_mgr->sfs_mem_size, + &sfs_phy_addr, GFP_KERNEL); + if (!sfs_mm_start) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) Get Free Pagers Fail, Order(%u).", + v_lport->port_id, order); + goto free_xchg_mem; + } + memset(sfs_mm_start, 0, sizeof(union unf_sfs_u) * sfs_xchg_sum); + xchg_mgr->mem_size += xchg_mgr->sfs_mem_size; + xchg_mgr->sfs_mm_start = sfs_mm_start; + xchg_mgr->sfs_phy_addr = sfs_phy_addr; + + /* The Xchg is initialized and mounted to the Free Pool */ + ret = unf_init_xchg(v_lport, xchg_mgr, xchg_sum, sfs_xchg_sum); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_REG_ATT, UNF_MAJOR, + "Port(0x%x) initialization Exchange unsuccessful, Exchange Number(%u), SFS Exchange number(%u).", + v_lport->port_id, xchg_sum, sfs_xchg_sum); + dma_free_coherent(&v_lport->low_level_func.dev->dev, + xchg_mgr->sfs_mem_size, + xchg_mgr->sfs_mm_start, + xchg_mgr->sfs_phy_addr); + xchg_mgr->sfs_mm_start = 0; + goto free_xchg_mem; + } + + /* Apply for the memory used by GID_PT, GID_FT, and RSCN */ + ret = unf_alloc_and_init_big_sfs_pool(v_lport, xchg_mgr); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) allocate big SFS fail", + v_lport->port_id); + + unf_free_all_rsp_pages(xchg_mgr); + dma_free_coherent(&v_lport->low_level_func.dev->dev, + xchg_mgr->sfs_mem_size, + xchg_mgr->sfs_mm_start, + xchg_mgr->sfs_phy_addr); + xchg_mgr->sfs_mm_start = 0; + goto free_xchg_mem; + } + + spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags); + v_lport->p_xchg_mgr[i] = (void *)xchg_mgr; + list_add_tail(&xchg_mgr->xchg_mgr_entry, + &v_lport->list_xchg_mgr_head); + spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) ExchangeMgr:(0x%p),Base:(0x%x).", + v_lport->port_id, v_lport->p_xchg_mgr[i], + hot_pool->base); + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "Port(0x%x) allocate Exchange Manager size(0x%x).", + v_lport->port_id, xchg_mgr->mem_size); + + return RETURN_OK; + +free_xchg_mem: + vfree(xchg_mem); +free_hot_pool: + vfree(hot_pool); +free_xchg_mgr: + vfree(xchg_mgr); +exit: + unf_free_exchg_mgr_info(v_lport); + return UNF_RETURN_ERROR; +} + +void unf_xchg_mgr_destroy(struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x905, UNF_TRUE, v_lport, return); + + unf_free_all_xchg_mgr(v_lport); +} + +unsigned int unf_alloc_xchg_resource(struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x906, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + INIT_LIST_HEAD(&v_lport->list_dirty_xchg_mgr_head); + INIT_LIST_HEAD(&v_lport->list_xchg_mgr_head); + spin_lock_init(&v_lport->xchg_mgr_lock); + + /* LPort Xchg Management Unit Allocation */ + if (unf_alloc_and_init_xchg_mgr(v_lport) != RETURN_OK) + return UNF_RETURN_ERROR; + + return RETURN_OK; +} + +void unf_destroy_dirty_xchg(struct unf_lport_s *v_lport, int v_show_only) +{ + unsigned int dirty_xchg = 0; + struct unf_xchg_mgr_s *exch_mgr = NULL; + unsigned long flags = 0; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + + UNF_CHECK_VALID(0x908, UNF_TRUE, v_lport, return); + + if (v_lport->dirty_flag & UNF_LPORT_DIRTY_FLAG_XCHGMGR_DIRTY) { + spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags); + list_for_each_safe(node, next_node, + &v_lport->list_dirty_xchg_mgr_head) { + exch_mgr = list_entry(node, struct unf_xchg_mgr_s, + xchg_mgr_entry); + spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags); + if (exch_mgr) { + dirty_xchg = + (exch_mgr->free_pool.total_fcp_xchg + + exch_mgr->free_pool.total_sfs_xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) has %u dirty exchange(s)", + v_lport->port_id, dirty_xchg); + + unf_show_all_xchg(v_lport, exch_mgr); + + if (v_show_only == UNF_FALSE) { + /* Delete Dirty Exchange Mgr entry */ + spin_lock_irqsave( + &v_lport->xchg_mgr_lock, + flags); + list_del_init( + &exch_mgr->xchg_mgr_entry); + spin_unlock_irqrestore( + &v_lport->xchg_mgr_lock, + flags); + + /* Free Dirty Exchange Mgr memory */ + unf_free_xchg_mgr_mem(v_lport, + exch_mgr); + } + } + spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags); + } + spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags); + } + + UNF_REFERNCE_VAR(dirty_xchg); +} + +struct unf_xchg_mgr_s *unf_get_xchg_mgr_by_lport(struct unf_lport_s *v_lport, + unsigned int v_idx) +{ + struct unf_xchg_mgr_s *xchg_mgr = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x909, UNF_TRUE, v_lport, return NULL); + UNF_CHECK_VALID(0x910, UNF_TRUE, v_idx < UNF_EXCHG_MGR_NUM, + return NULL); + + spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags); + xchg_mgr = v_lport->p_xchg_mgr[v_idx]; + spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags); + + return xchg_mgr; +} + +struct unf_xchg_hot_pool_s *unf_get_hot_pool_by_lport( + struct unf_lport_s *v_lport, + unsigned int v_mgr_idx) +{ + struct unf_xchg_mgr_s *xchg_mgr = NULL; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x910, UNF_TRUE, (v_lport), return NULL); + + lport = (struct unf_lport_s *)(v_lport->root_lport); + + UNF_CHECK_VALID(0x910, UNF_TRUE, (lport), return NULL); + + /* Get Xchg Manager */ + xchg_mgr = unf_get_xchg_mgr_by_lport(lport, v_mgr_idx); + if (!xchg_mgr) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "Port(0x%x) Exchange Manager is NULL.", + lport->port_id); + + return NULL; + } + + /* Get Xchg Manager Hot Pool */ + return xchg_mgr->hot_pool; +} + +static inline void unf_hot_pool_slab_set( + struct unf_xchg_hot_pool_s *v_hot_pool, + unsigned short v_slab_index, + struct unf_xchg_s *v_xchg) +{ + UNF_CHECK_VALID(0x911, UNF_TRUE, v_hot_pool, return); + + v_hot_pool->xchg_slab[v_slab_index] = v_xchg; +} + +static inline struct unf_xchg_s *unf_get_xchg_by_xchg_tag( + struct unf_xchg_hot_pool_s *v_hot_pool, + unsigned short v_slab_index) +{ + UNF_CHECK_VALID(0x912, UNF_TRUE, v_hot_pool, return NULL); + + return v_hot_pool->xchg_slab[v_slab_index]; +} + +static void *unf_lookup_xchg_by_tag(void *v_lport, + unsigned short v_hot_pool_tag) +{ + struct unf_lport_s *lport = NULL; + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned long flags = 0; + unsigned int exchg_mgr_idx = 0; + struct unf_xchg_mgr_s *xchg_mgr = NULL; + + UNF_CHECK_VALID(0x913, UNF_TRUE, v_lport, return NULL); + + /* In the case of NPIV, v_pstLport is the Vport pointer, + * the share uses the ExchMgr of RootLport + */ + lport = ((struct unf_lport_s *)v_lport)->root_lport; + UNF_CHECK_VALID(0x914, UNF_TRUE, lport, return NULL); + + exchg_mgr_idx = (v_hot_pool_tag * UNF_EXCHG_MGR_NUM) / + lport->low_level_func.support_max_xid_range; + if (unlikely(exchg_mgr_idx >= UNF_EXCHG_MGR_NUM)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) Get ExchgMgr %u err", + lport->port_id, exchg_mgr_idx); + + return NULL; + } + + xchg_mgr = lport->p_xchg_mgr[exchg_mgr_idx]; + + if (unlikely(!xchg_mgr)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) ExchgMgr %u is null", + lport->port_id, exchg_mgr_idx); + + return NULL; + } + + hot_pool = xchg_mgr->hot_pool; + + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "Port(0x%x) Hot Pool is NULL.", lport->port_id); + + return NULL; + } + + if (unlikely(v_hot_pool_tag >= + (hot_pool->slab_total_sum + hot_pool->base))) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]LPort(0x%x) can't Input Tag(0x%x), Max(0x%x).", + lport->port_id, v_hot_pool_tag, + (hot_pool->slab_total_sum + hot_pool->base)); + + return NULL; + } + + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags); + xchg = unf_get_xchg_by_xchg_tag(hot_pool, + v_hot_pool_tag - hot_pool->base); + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags); + + return (void *)xchg; +} + +static void *unf_find_xchg_by_oxid(void *v_lport, unsigned short v_oxid, + unsigned int v_oid) +{ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct unf_xchg_s *xchg = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + struct unf_lport_s *lport = NULL; + unsigned long flags = 0; + unsigned long xchg_flags = 0; + unsigned int i = 0; + + UNF_CHECK_VALID(0x915, UNF_TRUE, (v_lport), return NULL); + + /* In the case of NPIV, the v_lport is the Vport pointer, + * and the share uses the ExchMgr of the RootLport + */ + lport = ((struct unf_lport_s *)v_lport)->root_lport; + UNF_CHECK_VALID(0x916, UNF_TRUE, (lport), return NULL); + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + hot_pool = unf_get_hot_pool_by_lport(lport, i); + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, + UNF_LOG_IO_ATT, UNF_MAJOR, + "Port(0x%x) MgrIdex %u Hot Pool is NULL.", + lport->port_id, i); + continue; + } + + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags); + + /* 1. Traverse sfs_busy list */ + list_for_each_safe(node, next_node, &hot_pool->sfs_busylist) { + xchg = list_entry(node, struct unf_xchg_s, + list_xchg_entry); + spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flags); + if (UNF_CHECK_OXID_MATCHED(v_oxid, v_oid, xchg)) { + atomic_inc(&xchg->ref_cnt); + spin_unlock_irqrestore(&xchg->xchg_state_lock, + xchg_flags); + spin_unlock_irqrestore( + &hot_pool->xchg_hot_pool_lock, flags); + return xchg; + } + spin_unlock_irqrestore(&xchg->xchg_state_lock, + xchg_flags); + } + + /* 2. Traverse INI_Busy List */ + list_for_each_safe(node, next_node, &hot_pool->ini_busylist) { + xchg = list_entry(node, struct unf_xchg_s, + list_xchg_entry); + spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flags); + if (UNF_CHECK_OXID_MATCHED(v_oxid, v_oid, xchg)) { + atomic_inc(&xchg->ref_cnt); + spin_unlock_irqrestore(&xchg->xchg_state_lock, + xchg_flags); + spin_unlock_irqrestore( + &hot_pool->xchg_hot_pool_lock, flags); + return xchg; + } + spin_unlock_irqrestore(&xchg->xchg_state_lock, + xchg_flags); + } + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags); + } + + return NULL; +} + +static inline int unf_check_xchg_matched(struct unf_xchg_s *xchg, + unsigned long long v_command_sn, + unsigned int v_world_id) +{ + int matched = 0; + + matched = (v_command_sn == xchg->cmnd_sn); + if (matched && (atomic_read(&xchg->ref_cnt) > 0)) + return UNF_TRUE; + else + return UNF_FALSE; +} + +static void *unf_lookup_xchg_by_cmnd_sn(void *v_lport, + unsigned long long v_command_sn, + unsigned int v_world_id) +{ + struct unf_lport_s *lport = NULL; + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned long flags = 0; + unsigned int i; + + UNF_CHECK_VALID(0x919, UNF_TRUE, v_lport, return NULL); + + /* In NPIV, v_lport is a Vport pointer, and idle resources are + * shared by ExchMgr of RootLport. + * However, busy resources are mounted on each vport. + * Therefore, vport needs to be used. + */ + lport = (struct unf_lport_s *)v_lport; + UNF_CHECK_VALID(0x920, UNF_TRUE, lport, return NULL); + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + hot_pool = unf_get_hot_pool_by_lport(lport, i); + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) hot pool is NULL", + lport->port_id); + + continue; + } + + /* from busy_list */ + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags); + list_for_each_safe(node, next_node, &hot_pool->ini_busylist) { + xchg = list_entry(node, struct unf_xchg_s, + list_xchg_entry); + if (unf_check_xchg_matched(xchg, v_command_sn, + v_world_id)) { + spin_unlock_irqrestore( + &hot_pool->xchg_hot_pool_lock, flags); + + return xchg; + } + } + + /* vport: from destroy_list */ + if (lport != lport->root_lport) { + list_for_each_safe(node, next_node, + &hot_pool->list_destroy_xchg) { + xchg = list_entry(node, struct unf_xchg_s, + list_xchg_entry); + if (unf_check_xchg_matched(xchg, v_command_sn, + v_world_id)) { + spin_unlock_irqrestore( + &hot_pool->xchg_hot_pool_lock, + flags); + + UNF_TRACE(UNF_EVTLOG_IO_INFO, + UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]Port(0x%x) lookup exchange from destroy list", + lport->port_id); + + return xchg; + } + } + } + + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags); + } + + return NULL; +} + +static inline unsigned int unf_alloc_hot_pool_slab( + struct unf_xchg_hot_pool_s *v_hot_pool, + struct unf_xchg_s *v_xchg, + unsigned short v_rx_id) +{ + unsigned short slab_index = 0; + + UNF_CHECK_VALID(0x921, UNF_TRUE, v_hot_pool, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x922, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + /* Check whether the hotpool tag is in the specified range sirt. + * If yes, set up the management relationship. If no, + * handle the problem according to the normal IO. + * If the sirt digitmap is used but the tag is occupied, + * it indicates that the I/O is discarded. + */ + + v_hot_pool->slab_next_index = + (unsigned short)v_hot_pool->slab_next_index; + + slab_index = v_hot_pool->slab_next_index; + while (unf_get_xchg_by_xchg_tag(v_hot_pool, slab_index)) { + slab_index++; + slab_index = slab_index % v_hot_pool->slab_total_sum; + + /* Rewind occurs */ + if (slab_index == v_hot_pool->slab_next_index) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "There is No Slab At Hot Pool(0x%p) for xchg(0x%p).", + v_hot_pool, v_xchg); + + return UNF_RETURN_ERROR; + } + } + + unf_hot_pool_slab_set(v_hot_pool, slab_index, v_xchg); + v_xchg->hot_pool_tag = slab_index + v_hot_pool->base; + slab_index++; + v_hot_pool->slab_next_index = + slab_index % v_hot_pool->slab_total_sum; + return RETURN_OK; +} + +struct unf_esgl_page_s *unf_get_one_free_esgl_page(struct unf_lport_s *v_lport, + struct unf_xchg_s *v_xchg) +{ + struct unf_lport_s *lport = NULL; + struct unf_esgl_s *esgl = NULL; + struct unf_xchg_s *xchg = NULL; + struct list_head *list_head = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x923, UNF_TRUE, v_lport, return NULL); + UNF_CHECK_VALID(0x924, UNF_TRUE, v_xchg, return NULL); + + lport = v_lport; + xchg = v_xchg; + + /* Obtain a new Esgl from the EsglPool and + * add it to the list_esgls of the Xchg + */ + spin_lock_irqsave(&lport->esgl_pool.esgl_pool_lock, flag); + if (!list_empty(&lport->esgl_pool.list_esgl_pool)) { + list_head = (&lport->esgl_pool.list_esgl_pool)->next; + list_del(list_head); + lport->esgl_pool.esgl_pool_count--; + list_add_tail(list_head, &xchg->list_esgls); + + esgl = list_entry(list_head, struct unf_esgl_s, entry_esgl); + atomic_inc(&xchg->esgl_cnt); + spin_unlock_irqrestore(&lport->esgl_pool.esgl_pool_lock, flag); + } else { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) esgl pool is empty", + lport->nport_id); + + spin_unlock_irqrestore(&lport->esgl_pool.esgl_pool_lock, flag); + return NULL; + } + + return &esgl->page; +} + +void unf_release_esgls(struct unf_xchg_s *v_xchg) +{ + struct unf_lport_s *lport = NULL; + struct list_head *list = NULL; + struct list_head *list_tmp = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x925, UNF_TRUE, v_xchg, return); + UNF_CHECK_VALID(0x926, UNF_TRUE, v_xchg->lport, return); + + if (atomic_read(&v_xchg->esgl_cnt) <= 0) + return; + + /* In the case of NPIV, the Vport pointer is saved in v_pstExch, + * and the EsglPool of RootLport is shared. + */ + lport = (v_xchg->lport)->root_lport; + UNF_CHECK_VALID(0x927, UNF_TRUE, (lport), return); + + spin_lock_irqsave(&lport->esgl_pool.esgl_pool_lock, flag); + if (!list_empty(&v_xchg->list_esgls)) { + list_for_each_safe(list, list_tmp, &v_xchg->list_esgls) { + list_del(list); + list_add_tail(list, &lport->esgl_pool.list_esgl_pool); + lport->esgl_pool.esgl_pool_count++; + atomic_dec(&v_xchg->esgl_cnt); + } + } + spin_unlock_irqrestore(&lport->esgl_pool.esgl_pool_lock, flag); +} + +static void unf_init_xchg_attribute(struct unf_xchg_s *v_xchg) +{ + unsigned long flags = 0; + + UNF_CHECK_VALID(0x973, UNF_TRUE, (v_xchg), return); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flags); + v_xchg->xchg_mgr = NULL; + v_xchg->free_pool = NULL; + v_xchg->hot_pool = NULL; + v_xchg->lport = NULL; + v_xchg->rport = NULL; + v_xchg->disc_rport = NULL; + v_xchg->io_state = UNF_IO_STATE_NEW; + v_xchg->io_send_stage = TGT_IO_SEND_STAGE_NONE; + v_xchg->io_send_result = TGT_IO_SEND_RESULT_INVALID; + v_xchg->io_send_abort = UNF_FALSE; + v_xchg->io_abort_result = UNF_FALSE; + v_xchg->abts_state = 0; + v_xchg->ox_id = INVALID_VALUE16; + v_xchg->abort_oxid = INVALID_VALUE16; + v_xchg->rx_id = INVALID_VALUE16; + v_xchg->sid = INVALID_VALUE32; + v_xchg->did = INVALID_VALUE32; + v_xchg->oid = INVALID_VALUE32; + v_xchg->disc_port_id = INVALID_VALUE32; + v_xchg->seq_id = INVALID_VALUE8; + v_xchg->cmnd_code = INVALID_VALUE32; + v_xchg->cmnd_sn = INVALID_VALUE64; + v_xchg->data_len = 0; + v_xchg->resid_len = 0; + v_xchg->data_direction = DMA_NONE; + v_xchg->hot_pool_tag = INVALID_VALUE16; + v_xchg->big_sfs_buf = NULL; + v_xchg->may_consume_res_cnt = 0; + v_xchg->fact_consume_res_cnt = 0; + v_xchg->io_front_jif = INVALID_VALUE64; + v_xchg->ob_callback_sts = UNF_IO_SUCCESS; + v_xchg->start_jif = 0; + v_xchg->rport_bind_jifs = INVALID_VALUE64; + v_xchg->scsi_id = INVALID_VALUE32; + v_xchg->world_id = INVALID_VALUE32; + + memset(&v_xchg->seq, 0, sizeof(struct unf_seq_s)); + memset(&v_xchg->fcp_cmnd, 0, sizeof(struct unf_fcp_cmnd_s)); + memset(&v_xchg->scsi_cmnd_info, 0, sizeof(struct unf_scsi_cmd_info_s)); + memset(&v_xchg->abts_rsps, 0, sizeof(struct unf_abts_rsps_s)); + memset(&v_xchg->dif_info, 0, sizeof(struct dif_info_s)); + memset(v_xchg->private, 0, + (PKG_MAX_PRIVATE_DATA_SIZE * sizeof(unsigned int))); + v_xchg->echo_info.echo_result = UNF_ELS_ECHO_RESULT_OK; + v_xchg->echo_info.response_time = 0; + + if (v_xchg->xchg_type == UNF_XCHG_TYPE_INI) { + if (v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu) + memset(v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu, + 0, sizeof(struct unf_fcprsp_iu_s)); + } else if (v_xchg->xchg_type == UNF_XCHG_TYPE_SFS) { + if (v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) { + memset(v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr, + 0, sizeof(union unf_sfs_u)); + v_xchg->fcp_sfs_union.sfs_entry.cur_offset = 0; + } + } else { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "Exchange Type(0x%x) SFS Union uninited.", + v_xchg->xchg_type); + } + v_xchg->xchg_type = UNF_XCHG_TYPE_INVALID; + v_xchg->pfn_ob_callback = NULL; + v_xchg->pfn_callback = NULL; + v_xchg->pfn_free_xchg = NULL; + + atomic_set(&v_xchg->ref_cnt, 0); + atomic_set(&v_xchg->esgl_cnt, 0); + atomic_set(&v_xchg->delay_flag, 0); + + if (delayed_work_pending(&v_xchg->timeout_work)) + UNF_DEL_XCHG_TIMER_SAFE(v_xchg); + + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags); +} + +static void unf_add_back_to_fcp_list( + struct unf_xchg_free_pool_s *v_free_pool, + struct unf_xchg_s *v_xchg) +{ + unsigned long flags = 0; + + UNF_CHECK_VALID(0x928, UNF_TRUE, v_free_pool, return); + UNF_CHECK_VALID(0x929, UNF_TRUE, v_xchg, return); + + unf_init_xchg_attribute(v_xchg); + + /* The released I/O resources are added to + * the queue tail to facilitate fault locating + */ + spin_lock_irqsave(&v_free_pool->xchg_free_pool_lock, flags); + list_add_tail(&v_xchg->list_xchg_entry, + &v_free_pool->list_free_xchg_list); + v_free_pool->total_fcp_xchg++; + spin_unlock_irqrestore(&v_free_pool->xchg_free_pool_lock, flags); +} + +static void unf_check_xchg_mgr_status(struct unf_xchg_mgr_s *v_xchg_mgr) +{ + unsigned long flags = 0; + unsigned int total_xchg = 0; + unsigned int total_xchg_sum = 0; + + UNF_CHECK_VALID(0x930, UNF_TRUE, v_xchg_mgr, return); + + spin_lock_irqsave(&v_xchg_mgr->free_pool.xchg_free_pool_lock, flags); + + total_xchg = v_xchg_mgr->free_pool.total_fcp_xchg + + v_xchg_mgr->free_pool.total_sfs_xchg; + total_xchg_sum = v_xchg_mgr->free_pool.fcp_xchg_sum + + v_xchg_mgr->free_pool.sfs_xchg_sum; + + if ((v_xchg_mgr->free_pool.xchg_mgr_completion) && + (total_xchg == total_xchg_sum)) { + complete(v_xchg_mgr->free_pool.xchg_mgr_completion); + } + spin_unlock_irqrestore(&v_xchg_mgr->free_pool.xchg_free_pool_lock, + flags); +} + +static void unf_free_fcp_xchg(struct unf_xchg_s *v_xchg) +{ + struct unf_xchg_free_pool_s *free_pool = NULL; + struct unf_xchg_mgr_s *xchg_mgr = NULL; + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + + UNF_CHECK_VALID(0x932, UNF_TRUE, v_xchg, return); + + /* Releasing a Specified INI I/O and Invoking the scsi_done Process */ + unf_done_ini_xchg(v_xchg); + free_pool = v_xchg->free_pool; + xchg_mgr = v_xchg->xchg_mgr; + lport = v_xchg->lport; + rport = v_xchg->rport; + + atomic_dec(&rport->pending_io_cnt); + /* Release the Esgls in the Xchg structure and + * return it to the EsglPool of the Lport + */ + unf_release_esgls(v_xchg); + + /* Mount I/O resources to the FCP Free linked list */ + unf_add_back_to_fcp_list(free_pool, v_xchg); + + /* The Xchg is released synchronously and then forcibly released to + * prevent the Xchg from accessing the Xchg in the normal I/O process + */ + if (unlikely(lport->b_port_removing == UNF_TRUE)) + unf_check_xchg_mgr_status(xchg_mgr); +} + +static void unf_fc_abort_timeout_cmnd(struct unf_lport_s *v_lport, + struct unf_xchg_s *v_xchg) +{ + struct unf_lport_s *lport = v_lport; + struct unf_xchg_s *xchg = v_xchg; + struct unf_scsi_cmd_s scsi_cmnd = { 0 }; + unsigned long flag = 0; + unsigned int timeout_value = 2000; + unsigned int return_value = 0; + struct unf_rport_scsi_id_image_s *scsi_image_table = NULL; + + UNF_CHECK_VALID(0x936, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x937, UNF_TRUE, v_xchg, return); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + if (v_xchg->io_state & INI_IO_STATE_UPABORT) { + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "LPort(0x%x) xchange(0x%p) OX_ID(0x%x), RX_ID(0x%x) Cmdsn(0x%lx) has been aborted.", + lport->port_id, v_xchg, v_xchg->ox_id, + v_xchg->rx_id, (unsigned long)v_xchg->cmnd_sn); + return; + } + v_xchg->io_state |= INI_IO_STATE_UPABORT; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_NORMAL, UNF_KEVENT, + "LPort(0x%x) exchg(0x%p) OX_ID(0x%x) RX_ID(0x%x) Cmdsn(0x%lx) timeout abort it", + lport->port_id, v_xchg, v_xchg->ox_id, + v_xchg->rx_id, (unsigned long)v_xchg->cmnd_sn); + + lport->xchg_mgr_temp.pfn_unf_xchg_add_timer( + (void *)v_xchg, + (unsigned long)UNF_WAIT_ABTS_RSP_TIMEOUT, + UNF_TIMER_TYPE_INI_ABTS); + + sema_init(&v_xchg->task_sema, 0); + + scsi_cmnd.scsi_id = xchg->scsi_cmnd_info.scsi_id; + scsi_cmnd.upper_cmnd = xchg->scsi_cmnd_info.scsi_cmnd; + scsi_cmnd.pfn_done = xchg->scsi_cmnd_info.pfn_done; + scsi_image_table = &lport->rport_scsi_table; + + if (unf_send_abts(lport, v_xchg) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "LPort(0x%x) send ABTS, Send ABTS unsuccessful. Exchange OX_ID(0x%x), RX_ID(0x%x).", + lport->port_id, v_xchg->ox_id, + v_xchg->rx_id); + lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)v_xchg); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + v_xchg->io_state &= ~INI_IO_STATE_UPABORT; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + /* The message fails to be sent. + * It is released internally and does not + * need to be released externally. + */ + return; + } + + if (down_timeout(&v_xchg->task_sema, + (long long)msecs_to_jiffies(timeout_value))) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) recv abts marker timeout,Exch(0x%p) OX_ID(0x%x) RX_ID(0x%x)", + lport->port_id, v_xchg, + v_xchg->ox_id, v_xchg->rx_id); + lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)v_xchg); + + /* Cnacel the flag of INI_IO_STATE_UPABORT + * and process the io in TMF + */ + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + v_xchg->io_state &= ~INI_IO_STATE_UPABORT; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + return; + } + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + if ((v_xchg->ucode_abts_state == UNF_IO_SUCCESS) || + (v_xchg->scsi_cmnd_info.result == UNF_IO_ABORT_PORT_REMOVING)) { + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]Port(0x%x) Send ABTS succeed and recv marker Exch(0x%p) OX_ID(0x%x) RX_ID(0x%x) marker status(0x%x)", + lport->port_id, v_xchg, + v_xchg->ox_id, v_xchg->rx_id, + v_xchg->ucode_abts_state); + return_value = DID_BUS_BUSY; + UNF_IO_RESULT_CNT(scsi_image_table, scsi_cmnd.scsi_id, + return_value); + unf_complete_cmnd(&scsi_cmnd, DID_BUS_BUSY << 16); + return; + } + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)v_xchg); + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + v_xchg->io_state &= ~INI_IO_STATE_UPABORT; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port(0x%x) send ABTS failed. Exch(0x%p) hot_tag(0x%x) ret(0x%x) v_xchg->io_state (0x%x)", + lport->port_id, v_xchg, v_xchg->hot_pool_tag, + v_xchg->scsi_cmnd_info.result, v_xchg->io_state); +} + +static void unf_fc_ini_send_abts_timeout(struct unf_lport_s *lport, + struct unf_rport_s *rport, + struct unf_xchg_s *xchg) +{ + if (xchg->rport_bind_jifs == rport->rport_alloc_jifs && + xchg->rport_bind_jifs != INVALID_VALUE64) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%x) Exch(0x%p) first time to send abts timeout, retry again OX_ID(0x%x) RX_ID(0x%x) state(0x%x)", + lport->port_id, rport->nport_id, + xchg, xchg->ox_id, xchg->rx_id, xchg->io_state); + + lport->xchg_mgr_temp.pfn_unf_xchg_add_timer( + (void *)xchg, + (unsigned long)UNF_WAIT_ABTS_RSP_TIMEOUT, + UNF_TIMER_TYPE_INI_ABTS); + + if (unf_send_abts(lport, xchg) != RETURN_OK) { + lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer( + (void *)xchg); + + unf_abts_timeout_recovery_default(rport, xchg); + + unf_cm_free_xchg(lport, xchg); + } + } else { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%x) Exch(0x%p) rport is invalid, exchg rport jiff(0x%llx 0x%llx), free exchange OX_ID(0x%x) RX_ID(0x%x) state(0x%x)", + lport->port_id, rport->nport_id, xchg, + xchg->rport_bind_jifs, rport->rport_alloc_jifs, + xchg->ox_id, xchg->rx_id, xchg->io_state); + + unf_cm_free_xchg(lport, xchg); + } +} + +static void unf_fc_ini_io_rec_wait_timeout(struct unf_lport_s *lport, + struct unf_rport_s *rport, + struct unf_xchg_s *xchg) +{ + unsigned long io_time_out = 0; + + if (xchg->rport_bind_jifs == rport->rport_alloc_jifs) { + unf_send_rec(lport, rport, xchg); + if (xchg->scsi_cmnd_info.abort_timeout > 0) { + io_time_out = + (xchg->scsi_cmnd_info.abort_timeout > + UNF_REC_TOV) ? + (xchg->scsi_cmnd_info.abort_timeout - + UNF_REC_TOV) : 0; + + if (io_time_out > 0) { + lport->xchg_mgr_temp.pfn_unf_xchg_add_timer( + (void *)xchg, + io_time_out, + UNF_TIMER_TYPE_REQ_IO); + } else { + unf_fc_abort_timeout_cmnd(lport, xchg); + } + } + } else { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%x) Exch(0x%p) Rec timeout exchange OX_ID(0x%x) RX_ID(0x%x) state(0x%x), bindjifs(0x%llx)no eqal Rport alloc jifs(0x%llx)", + lport->port_id, rport->nport_id, + xchg, xchg->ox_id, xchg->rx_id, + xchg->io_state, xchg->rport_bind_jifs, + rport->rport_alloc_jifs); + } +} + +static void unf_fc_ini_io_xchg_timeout(struct work_struct *v_work) +{ + struct unf_xchg_s *xchg = NULL; + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + unsigned long flags = 0; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int port_valid_flag = 0; + + UNF_REFERNCE_VAR(ret); + + xchg = container_of(v_work, struct unf_xchg_s, timeout_work.work); + UNF_CHECK_VALID(0x939, UNF_TRUE, xchg, return); + + ret = unf_xchg_ref_inc(xchg, INI_IO_TIMEOUT); + UNF_CHECK_VALID(0x940, UNF_TRUE, ret == RETURN_OK, return); + + lport = xchg->lport; + rport = xchg->rport; + + port_valid_flag = !lport || !rport; + if (port_valid_flag) { + unf_xchg_ref_dec(xchg, INI_IO_TIMEOUT); + unf_xchg_ref_dec(xchg, INI_IO_TIMEOUT); + + return; + } + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + + /* 1. for Send RRQ failed Timer timeout */ + if (INI_IO_STATE_RRQSEND_ERR & xchg->io_state) { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[info]LPort(0x%x) RPort(0x%x) Exch(0x%p) had wait enough time for RRQ send failed OX_ID(0x%x) RX_ID(0x%x) state(0x%x)", + lport->port_id, rport->nport_id, + xchg, xchg->ox_id, xchg->rx_id, xchg->io_state); + + unf_cm_free_xchg(lport, xchg); + } + /* Second ABTS timeout and enter LOGO process */ + else if ((INI_IO_STATE_ABORT_TIMEOUT & xchg->io_state) && + (!(ABTS_RESPONSE_RECEIVED & xchg->abts_state))) { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%x) Exch(0x%p) had wait enough time for second abts send OX_ID(0x%x) RX_ID(0x%x) state(0x%x)", + lport->port_id, rport->nport_id, + xchg, xchg->ox_id, xchg->rx_id, + xchg->io_state); + + unf_abts_timeout_recovery_default(rport, xchg); + + unf_cm_free_xchg(lport, xchg); + } + /* First time to send ABTS, timeout and retry to send ABTS again */ + else if ((xchg->io_state & INI_IO_STATE_UPABORT) && + (!(xchg->abts_state & ABTS_RESPONSE_RECEIVED))) { + xchg->io_state |= INI_IO_STATE_ABORT_TIMEOUT; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + unf_fc_ini_send_abts_timeout(lport, rport, xchg); + } + /* 3. IO_DONE */ + else if ((xchg->io_state & INI_IO_STATE_DONE) && + (xchg->abts_state & ABTS_RESPONSE_RECEIVED)) { + /* + * for IO_DONE: + * 1. INI ABTS first timer time out + * 2. INI RCVD ABTS Response + * 3. Normal case for I/O Done + */ + /* Send ABTS & RCVD RSP & no timeout */ + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + /* Send RRQ */ + if (unf_send_rrq(lport, rport, xchg) == RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, + UNF_MAJOR, + "[info]LPort(0x%x) send RRQ succeed to RPort(0x%x) Exch(0x%p) OX_ID(0x%x) RX_ID(0x%x) state(0x%x)", + lport->port_id, rport->nport_id, xchg, + xchg->ox_id, xchg->rx_id, xchg->io_state); + } else { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, + UNF_WARN, + "[warn]LPort(0x%x) can't send RRQ to RPort(0x%x) Exch(0x%p) OX_ID(0x%x) RX_ID(0x%x) state(0x%x)", + lport->port_id, rport->nport_id, xchg, + xchg->ox_id, xchg->rx_id, xchg->io_state); + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + xchg->io_state |= INI_IO_STATE_RRQSEND_ERR; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + lport->xchg_mgr_temp.pfn_unf_xchg_add_timer( + (void *)xchg, + (unsigned long)UNF_WRITE_RRQ_SENDERR_INTERVAL, + UNF_TIMER_TYPE_INI_IO); + } + } else if (xchg->io_state & INI_IO_STATE_REC_TIMEOUT_WAIT) { + xchg->io_state &= ~INI_IO_STATE_REC_TIMEOUT_WAIT; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + unf_fc_ini_io_rec_wait_timeout(lport, rport, xchg); + } else { + /* 4. I/O Timer Timeout */ + /* vmware */ + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + unf_fc_abort_timeout_cmnd(lport, xchg); + } + + unf_xchg_ref_dec(xchg, INI_IO_TIMEOUT); + unf_xchg_ref_dec(xchg, INI_IO_TIMEOUT); + + UNF_REFERNCE_VAR(ret); +} + +static inline struct unf_xchg_s *unf_alloc_io_xchg( + struct unf_lport_s *v_lport, + struct unf_xchg_mgr_s *v_xchg_mgr, + unsigned int v_xchg_type, + unsigned short v_rx_id) +{ + struct unf_xchg_s *xchg = NULL; + struct list_head *list_node = NULL; + struct unf_xchg_free_pool_s *free_pool = NULL; + struct unf_xchg_hot_pool_s *hot_pool = NULL; + unsigned long flags = 0; + static atomic64_t s_exhg_id; + + void (*unf_fc_io_xchg_timeout)(struct work_struct *v_work) = NULL; + + UNF_CHECK_VALID(0x941, UNF_TRUE, v_xchg_mgr, return NULL); + UNF_CHECK_VALID(0x942, UNF_TRUE, v_lport, return NULL); + + free_pool = &v_xchg_mgr->free_pool; + hot_pool = v_xchg_mgr->hot_pool; + UNF_CHECK_VALID(0x943, UNF_TRUE, free_pool, return NULL); + UNF_CHECK_VALID(0x944, UNF_TRUE, hot_pool, return NULL); + + /* 1. Free Pool */ + spin_lock_irqsave(&free_pool->xchg_free_pool_lock, flags); + if (unlikely(list_empty(&free_pool->list_free_xchg_list))) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_INFO, + "Port(0x%x) have no Exchange anymore.", + v_lport->port_id); + + spin_unlock_irqrestore(&free_pool->xchg_free_pool_lock, flags); + + return NULL; + } + + /* Select an idle node from free pool */ + list_node = (&free_pool->list_free_xchg_list)->next; + list_del(list_node); + free_pool->total_fcp_xchg--; + spin_unlock_irqrestore(&free_pool->xchg_free_pool_lock, flags); + + xchg = list_entry(list_node, struct unf_xchg_s, list_xchg_entry); + + /* + * Hot Pool: + * When xchg is mounted to Hot Pool, the mount mode and release mode + * of Xchg must be specified and stored in the sfs linked list. + */ + flags = 0; + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags); + if (unf_alloc_hot_pool_slab(hot_pool, xchg, v_rx_id) != RETURN_OK) { + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags); + + unf_add_back_to_fcp_list(free_pool, xchg); + if (unlikely(v_lport->b_port_removing == UNF_TRUE)) + unf_check_xchg_mgr_status(v_xchg_mgr); + + return NULL; + } + + list_add_tail(&xchg->list_xchg_entry, &hot_pool->ini_busylist); + unf_fc_io_xchg_timeout = unf_fc_ini_io_xchg_timeout; + + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags); + + /* 3. Exchange State */ + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + xchg->start_jif = atomic64_inc_return(&s_exhg_id); + xchg->xchg_mgr = v_xchg_mgr; + xchg->free_pool = free_pool; + xchg->hot_pool = hot_pool; + xchg->lport = v_lport; + xchg->xchg_type = v_xchg_type; + xchg->pfn_free_xchg = unf_free_fcp_xchg; + xchg->io_state = UNF_IO_STATE_NEW; + xchg->io_send_stage = TGT_IO_SEND_STAGE_NONE; + xchg->io_send_result = TGT_IO_SEND_RESULT_INVALID; + xchg->io_send_abort = UNF_FALSE; + xchg->io_abort_result = UNF_FALSE; + xchg->ox_id = INVALID_VALUE16; + xchg->abort_oxid = INVALID_VALUE16; + xchg->rx_id = INVALID_VALUE16; + xchg->sid = INVALID_VALUE32; + xchg->did = INVALID_VALUE32; + xchg->oid = INVALID_VALUE32; + xchg->seq_id = INVALID_VALUE8; + xchg->cmnd_code = INVALID_VALUE32; + xchg->data_len = 0; + xchg->resid_len = 0; + xchg->data_direction = DMA_NONE; + xchg->may_consume_res_cnt = 0; + xchg->fact_consume_res_cnt = 0; + xchg->io_front_jif = 0; + xchg->tmf_state = 0; + xchg->ucode_abts_state = INVALID_VALUE32; + xchg->abts_state = 0; + xchg->rport_bind_jifs = INVALID_VALUE64; + xchg->scsi_id = INVALID_VALUE32; + xchg->world_id = INVALID_VALUE32; + + memset(&xchg->dif_control, 0, sizeof(struct unf_dif_control_info_s)); + memset(&xchg->req_sgl_info, 0, sizeof(struct unf_req_sgl_info_s)); + memset(&xchg->dif_sgl_info, 0, sizeof(struct unf_req_sgl_info_s)); + memset(&xchg->abts_rsps, 0, sizeof(struct unf_abts_rsps_s)); + xchg->scsi_cmnd_info.result = 0; + + xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] = + (unsigned int)atomic64_inc_return(&v_lport->exchg_index); + if (xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] == 0) + xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] = + (unsigned int)atomic64_inc_return(&v_lport->exchg_index); + + atomic_set(&xchg->ref_cnt, 0); + atomic_set(&xchg->delay_flag, 0); + + if (delayed_work_pending(&xchg->timeout_work)) + UNF_DEL_XCHG_TIMER_SAFE(xchg); + + INIT_DELAYED_WORK(&xchg->timeout_work, unf_fc_io_xchg_timeout); + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + return xchg; +} + +static void unf_add_back_to_sfs_list( + struct unf_xchg_free_pool_s *v_free_pool, + struct unf_xchg_s *v_xchg) +{ + unsigned long flags = 0; + + UNF_CHECK_VALID(0x945, UNF_TRUE, v_free_pool, return); + UNF_CHECK_VALID(0x946, UNF_TRUE, v_xchg, return); + + unf_init_xchg_attribute(v_xchg); + + spin_lock_irqsave(&v_free_pool->xchg_free_pool_lock, flags); + + list_add_tail(&v_xchg->list_xchg_entry, + &v_free_pool->list_sfs_xchg_list); + v_free_pool->total_sfs_xchg++; + spin_unlock_irqrestore(&v_free_pool->xchg_free_pool_lock, flags); +} + +static void unf_free_sfs_xchg(struct unf_xchg_s *v_xchg) +{ + struct unf_xchg_free_pool_s *free_pool = NULL; + struct unf_xchg_mgr_s *xchg_mgr = NULL; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x947, UNF_TRUE, v_xchg, return); + + free_pool = v_xchg->free_pool; + lport = v_xchg->lport; + xchg_mgr = v_xchg->xchg_mgr; + + /* The memory is applied for when the GID_PT/GID_FT is sent. + * If no response is received, the GID_PT/GID_FT + * needs to be forcibly released. + */ + + unf_free_one_big_sfs(v_xchg); + + unf_add_back_to_sfs_list(free_pool, v_xchg); + + if (unlikely(lport->b_port_removing == UNF_TRUE)) + unf_check_xchg_mgr_status(xchg_mgr); +} + +static void unf_fc_xchg_add_timer(void *v_xchg, + unsigned long v_time_ms, + enum unf_timer_type_e v_en_time_type) +{ + unsigned long flag = 0; + struct unf_xchg_s *xchg = NULL; + unsigned long time_ms = v_time_ms; + struct unf_lport_s *lport; + + UNF_CHECK_VALID(0x948, UNF_TRUE, v_xchg, return); + xchg = (struct unf_xchg_s *)v_xchg; + lport = xchg->lport; + UNF_CHECK_VALID(0x948, UNF_TRUE, lport, return); + + /* update timeout */ + switch (v_en_time_type) { + case UNF_TIMER_TYPE_INI_RRQ: + time_ms = time_ms - UNF_INI_RRQ_REDUNDANT_TIME; + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, + UNF_INFO, "INI RRQ Timer set."); + break; + + case UNF_TIMER_TYPE_SFS: + time_ms = time_ms + UNF_INI_ELS_REDUNDANT_TIME; + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, + UNF_INFO, "INI ELS Timer set."); + break; + default: + break; + } + + /* The xchg of the timer must be valid. + * If the reference count of xchg is 0, + * the timer must not be added + */ + if (atomic_read(&xchg->ref_cnt) <= 0) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_KEVENT, + "[warn]Abnormal Exchange(0x%p), Reference count(0x%x), Can't add timer.", + xchg, atomic_read(&xchg->ref_cnt)); + return; + } + + /* Delay Work: Hold for timer */ + spin_lock_irqsave(&xchg->xchg_state_lock, flag); + if (queue_delayed_work(lport->xchg_wq, + &xchg->timeout_work, + (unsigned long) + msecs_to_jiffies((unsigned int)time_ms))) { + /* hold for timer */ + atomic_inc(&xchg->ref_cnt); + } + spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); +} + +static void unf_sfs_xchg_timeout(struct work_struct *v_work) +{ + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x949, UNF_TRUE, v_work, return); + xchg = container_of(v_work, struct unf_xchg_s, timeout_work.work); + UNF_CHECK_VALID(0x950, UNF_TRUE, xchg, return); + + ret = unf_xchg_ref_inc(xchg, SFS_TIMEOUT); + UNF_REFERNCE_VAR(ret); + UNF_CHECK_VALID(0x951, UNF_TRUE, ret == RETURN_OK, return); + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + lport = xchg->lport; + rport = xchg->rport; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + unf_xchg_ref_dec(xchg, SFS_TIMEOUT); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]SFS Exch(%p) Cmnd(0x%x) IO Exch(0x%p) Sid_Did(0x%x:0x%x) HotTag(0x%x) State(0x%x) Timeout.", + xchg, xchg->cmnd_code, xchg->io_xchg, xchg->sid, + xchg->did, xchg->hot_pool_tag, xchg->io_state); + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + if ((xchg->io_state & TGT_IO_STATE_ABORT) && + (xchg->cmnd_code != ELS_RRQ) && + (xchg->cmnd_code != ELS_LOGO)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "SFS Exch(0x%p) Cmnd(0x%x) Hot Pool Tag(0x%x) timeout, but aborted, no need to handle.", + xchg, xchg->cmnd_code, xchg->hot_pool_tag); + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + unf_xchg_ref_dec(xchg, SFS_TIMEOUT); + unf_xchg_ref_dec(xchg, SFS_TIMEOUT); + + return; + } + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + /* The sfs times out. If the sfs is ELS reply, + * go to unf_rport_error_recovery/unf_lport_error_recovery. + * Otherwise, go to the corresponding obCallback. + */ + if (UNF_XCHG_IS_ELS_REPLY(xchg) && (rport)) { + if (rport->nport_id >= UNF_FC_FID_DOM_MGR) + unf_lport_error_recovery(lport); + else + unf_rport_error_recovery(rport); + } else if (xchg->pfn_ob_callback) { + xchg->pfn_ob_callback(xchg); + } else { + /* Do nothing */ + } + + unf_xchg_ref_dec(xchg, SFS_TIMEOUT); + unf_xchg_ref_dec(xchg, SFS_TIMEOUT); +} + +static struct unf_xchg_s *unf_alloc_sfs_xchg(struct unf_lport_s *v_lport, + struct unf_xchg_mgr_s *v_xchg_mgr, + unsigned int v_xchg_type, + unsigned short v_rx_id) +{ + struct unf_xchg_s *xchg = NULL; + struct list_head *list_node = NULL; + struct unf_xchg_free_pool_s *free_pool = NULL; + struct unf_xchg_hot_pool_s *hot_pool = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x952, UNF_TRUE, v_lport, return NULL); + UNF_CHECK_VALID(0x953, UNF_TRUE, v_xchg_mgr, return NULL); + free_pool = &v_xchg_mgr->free_pool; + hot_pool = v_xchg_mgr->hot_pool; + UNF_CHECK_VALID(0x954, UNF_TRUE, free_pool, return NULL); + UNF_CHECK_VALID(0x955, UNF_TRUE, hot_pool, return NULL); + + /* Select an idle node from free pool */ + spin_lock_irqsave(&free_pool->xchg_free_pool_lock, flags); + if (list_empty(&free_pool->list_sfs_xchg_list)) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "Port(0x%x) have no Exchange anymore.", + v_lport->port_id); + + spin_unlock_irqrestore(&free_pool->xchg_free_pool_lock, flags); + + return NULL; + } + + list_node = (&free_pool->list_sfs_xchg_list)->next; + list_del(list_node); + free_pool->total_sfs_xchg--; + spin_unlock_irqrestore(&free_pool->xchg_free_pool_lock, flags); + + xchg = list_entry(list_node, struct unf_xchg_s, list_xchg_entry); + + /* + * The xchg is mounted to the Hot Pool. + * The mount mode and release mode of the xchg must be specified + * and stored in the sfs linked list. + */ + flags = 0; + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags); + if (unf_alloc_hot_pool_slab(hot_pool, xchg, v_rx_id) != RETURN_OK) { + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags); + + unf_add_back_to_sfs_list(free_pool, xchg); + if (unlikely(v_lport->b_port_removing == UNF_TRUE)) + unf_check_xchg_mgr_status(v_xchg_mgr); + + return NULL; + } + + list_add_tail(&xchg->list_xchg_entry, &hot_pool->sfs_busylist); + hot_pool->total_xchges++; + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags); + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + xchg->free_pool = free_pool; + xchg->hot_pool = hot_pool; + xchg->lport = v_lport; + xchg->xchg_mgr = v_xchg_mgr; + xchg->pfn_free_xchg = unf_free_sfs_xchg; + xchg->xchg_type = v_xchg_type; + xchg->io_state = UNF_IO_STATE_NEW; + xchg->scsi_cmnd_info.result = 0; + xchg->ob_callback_sts = UNF_IO_SUCCESS; + + xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] = + (unsigned int)atomic64_inc_return(&v_lport->exchg_index); + if (xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] == 0) + xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] = + (unsigned int) + atomic64_inc_return(&v_lport->exchg_index); + + if (delayed_work_pending(&xchg->timeout_work)) + UNF_DEL_XCHG_TIMER_SAFE(xchg); + + INIT_DELAYED_WORK(&xchg->timeout_work, unf_sfs_xchg_timeout); + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + return xchg; +} + +static void *unf_get_new_xchg(void *v_lport, unsigned int v_xchg_type, + unsigned short v_rx_id) +{ + struct unf_lport_s *lport = NULL; + struct unf_xchg_mgr_s *xchg_mgr = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int xchg_type = 0; + unsigned short xchg_mgr_type; + unsigned int rtry_cnt = 0; + unsigned int last_exchg_mgr_idx; + + xchg_mgr_type = (v_xchg_type >> 16); + xchg_type = v_xchg_type & 0xFFFF; + UNF_CHECK_VALID(0x956, UNF_TRUE, v_lport, return NULL); + + /* In the case of NPIV, the v_lport is the Vport pointer, + * and the share uses the ExchMgr of the RootLport. + */ + lport = ((struct unf_lport_s *)v_lport)->root_lport; + UNF_CHECK_VALID(0x957, UNF_TRUE, (lport), return NULL); + + if (unlikely((atomic_read(&lport->port_no_operater_flag) == + UNF_LPORT_NOP) || + (atomic_read(&((struct unf_lport_s *)v_lport)->port_no_operater_flag) == + UNF_LPORT_NOP))) + return NULL; + + last_exchg_mgr_idx = + (unsigned int)atomic64_inc_return(&lport->last_exchg_mgr_idx); +try_next_mgr: + rtry_cnt++; + if (unlikely(rtry_cnt > UNF_EXCHG_MGR_NUM)) + return NULL; + + /* If Fixed mode,only use XchgMgr 0 */ + if (unlikely(xchg_mgr_type == UNF_XCHG_MGR_TYPE_FIXED)) + xchg_mgr = (struct unf_xchg_mgr_s *)lport->p_xchg_mgr[0]; + else + xchg_mgr = + (struct unf_xchg_mgr_s *) + lport->p_xchg_mgr[last_exchg_mgr_idx % UNF_EXCHG_MGR_NUM]; + + if (unlikely(!xchg_mgr)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) get exchangemgr %u is null.", + lport->port_id, + last_exchg_mgr_idx % UNF_EXCHG_MGR_NUM); + return NULL; + } + + last_exchg_mgr_idx++; + + /* Allocate entries based on the Exchange type */ + switch (xchg_type) { + case UNF_XCHG_TYPE_SFS: + xchg = unf_alloc_sfs_xchg(v_lport, xchg_mgr, xchg_type, + INVALID_VALUE16); + break; + + case UNF_XCHG_TYPE_INI: + xchg = unf_alloc_io_xchg(v_lport, xchg_mgr, xchg_type, + INVALID_VALUE16); + break; + + default: + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "Port(0x%x) unwonted, Exchange type(0x%x).", + lport->port_id, xchg_type); + break; + } + + if (likely(xchg)) { + xchg->ox_id = INVALID_VALUE16; + xchg->abort_oxid = INVALID_VALUE16; + xchg->rx_id = INVALID_VALUE16; + xchg->debug_hook = UNF_FALSE; + xchg->alloc_jif = jiffies; + + atomic_set(&xchg->ref_cnt, 1); + atomic_set(&xchg->esgl_cnt, 0); + } else { + goto try_next_mgr; + } + + return xchg; +} + +static void unf_free_xchg(void *v_lport, void *v_xchg) +{ + struct unf_xchg_s *xchg = NULL; + + UNF_REFERNCE_VAR(v_lport); + UNF_CHECK_VALID(0x958, UNF_TRUE, (v_xchg), return); + + xchg = (struct unf_xchg_s *)v_xchg; + unf_xchg_ref_dec(xchg, XCHG_FREE_XCHG); +} + +void unf_release_xchg_mgr_temp(struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x960, UNF_TRUE, v_lport, return); + + if (v_lport->dirty_flag & UNF_LPORT_DIRTY_FLAG_XCHGMGR_DIRTY) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Port(0x%x) has dirty exchange, Don't release exchange manager template.", + v_lport->port_id); + + return; + } + + memset(&v_lport->xchg_mgr_temp, 0, + sizeof(struct unf_cm_xchg_mgr_template_s)); + + v_lport->destroy_step = UNF_LPORT_DESTROY_STEP_7_DESTROY_XCHG_MGR_TMP; +} + +static void unf_xchg_abort_all_sfs_xchg(struct unf_lport_s *v_lport, + int v_clean) +{ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct list_head *xchg_node = NULL; + struct list_head *next_xchg_node = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned long pool_lock_flags = 0; + unsigned long xchg_lock_flags = 0; + unsigned int i = 0; + + UNF_CHECK_VALID(0x961, UNF_TRUE, v_lport, return); + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + hot_pool = unf_get_hot_pool_by_lport(v_lport, i); + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, + UNF_MAJOR, + "Port(0x%x) Hot Pool is NULL.", + v_lport->port_id); + + continue; + } + + if (v_clean == UNF_FALSE) { + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + + /* Clearing the SFS_Busy_list Exchange Resource */ + list_for_each_safe(xchg_node, next_xchg_node, + &hot_pool->sfs_busylist) { + xchg = list_entry(xchg_node, struct unf_xchg_s, + list_xchg_entry); + spin_lock_irqsave(&xchg->xchg_state_lock, + xchg_lock_flags); + if (atomic_read(&xchg->ref_cnt) > 0) + xchg->io_state |= TGT_IO_STATE_ABORT; + spin_unlock_irqrestore(&xchg->xchg_state_lock, + xchg_lock_flags); + } + + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + } else { + continue; + } + } +} + +static void unf_xchg_abort_ini_io_xchg(struct unf_lport_s *v_lport, + int v_clean) +{ + /* Clean L_Port/V_Port Link Down I/O: Abort */ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct list_head *xchg_node = NULL; + struct list_head *next_xchg_node = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned long pool_lock_flags = 0; + unsigned long xchg_lock_flags = 0; + unsigned int io_state = 0; + unsigned int i = 0; + + UNF_CHECK_VALID(0x962, UNF_TRUE, (v_lport), return); + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + hot_pool = unf_get_hot_pool_by_lport(v_lport, i); + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, + UNF_WARN, + "[warn]Port(0x%x) hot pool is NULL", + v_lport->port_id); + + continue; + } + + if (v_clean == UNF_FALSE) { + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + + /* 1. Abort INI_Busy_List IO */ + list_for_each_safe(xchg_node, next_xchg_node, + &hot_pool->ini_busylist) { + xchg = list_entry(xchg_node, struct unf_xchg_s, + list_xchg_entry); + spin_lock_irqsave(&xchg->xchg_state_lock, + xchg_lock_flags); + if (atomic_read(&xchg->ref_cnt) > 0) + xchg->io_state |= + INI_IO_STATE_DRABORT | io_state; + spin_unlock_irqrestore(&xchg->xchg_state_lock, + xchg_lock_flags); + } + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + } else { + /* Do nothing, just return */ + continue; + } + } +} + +static void unf_xchg_abort_all_xchg(void *v_lport, + unsigned int v_xchg_type, + int v_clean) +{ + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x964, UNF_TRUE, v_lport, return); + lport = (struct unf_lport_s *)v_lport; + + switch (v_xchg_type) { + case UNF_XCHG_TYPE_SFS: + unf_xchg_abort_all_sfs_xchg(lport, v_clean); + break; + + /* Clean L_Port/V_Port Link Down I/O: Abort */ + case UNF_XCHG_TYPE_INI: + unf_xchg_abort_ini_io_xchg(lport, v_clean); + break; + + default: + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) unknown exch type(0x%x)", + lport->port_id, v_xchg_type); + break; + } +} + +static void unf_xchg_abort_ini_send_tm_cmd(void *v_lport, + void *v_rport, + unsigned long long v_lun_id) +{ + /* + * LUN Reset: set UP_ABORT tag, with: + * INI_Busy_list, IO_Wait_list, + * IO_Delay_list, IO_Delay_transfer_list + */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned long flags = 0; + unsigned long xchg_flag = 0; + unsigned int i = 0; + unsigned long long raw_lunid = 0; + + UNF_CHECK_VALID(0x981, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x981, UNF_TRUE, v_rport, return); + + lport = ((struct unf_lport_s *)v_lport)->root_lport; + UNF_CHECK_VALID(0x982, UNF_TRUE, (lport), return); + rport = (struct unf_rport_s *)v_rport; + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + hot_pool = unf_get_hot_pool_by_lport(lport, i); + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) hot pool is NULL", + lport->port_id); + continue; + } + + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags); + + /* 1. for each exchange from busy list */ + list_for_each_safe(node, next_node, + &hot_pool->ini_busylist) { + xchg = list_entry(node, struct unf_xchg_s, + list_xchg_entry); + + raw_lunid = *(unsigned long long *) + (xchg->fcp_cmnd.lun) >> 16 & + 0x000000000000ffff; + if ((v_lun_id == raw_lunid) && + (rport == xchg->rport)) { + spin_lock_irqsave(&xchg->xchg_state_lock, + xchg_flag); + xchg->io_state |= INI_IO_STATE_TMF_ABORT; + spin_unlock_irqrestore(&xchg->xchg_state_lock, + xchg_flag); + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, + UNF_MAJOR, + "[info]Exchange(%p) state(0x%x) S_ID(0x%x) D_ID(0x%x) tag(0x%x) abort by TMF CMD", + xchg, xchg->io_state, lport->nport_id, + rport->nport_id, xchg->hot_pool_tag); + } + } + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags); + } +} + +static void unf_xchg_abort_by_lun(void *v_lport, + void *v_rport, + unsigned long long v_lun_id, + void *v_tm_xchg, + int v_abort_all_lun_flag) +{ + /* ABORT: set UP_ABORT tag for target LUN I/O */ + struct unf_xchg_s *tm_xchg = (struct unf_xchg_s *)v_tm_xchg; + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[event]Port(0x%x) LUN_ID(0x%llx) TM_EXCH(0x%p) flag(%d)", + ((struct unf_lport_s *)v_lport)->port_id, + v_lun_id, v_tm_xchg, v_abort_all_lun_flag); + + /* for INI Mode */ + if (!tm_xchg) { + /* + * LUN Reset: set UP_ABORT tag, with: + * INI_Busy_list, IO_Wait_list, + * IO_Delay_list, IO_Delay_transfer_list + */ + unf_xchg_abort_ini_send_tm_cmd(v_lport, v_rport, v_lun_id); + + return; + } +} + +static void unf_xchg_abort_ini_tmf_target_reset(void *v_lport, void *v_rport) +{ + /* + * LUN Reset: set UP_ABORT tag, with: + * INI_Busy_list, IO_Wait_list, + * IO_Delay_list, IO_Delay_transfer_list + */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned long flags = 0; + unsigned long xchg_flag = 0; + unsigned int i = 0; + + UNF_CHECK_VALID(0x981, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x981, UNF_TRUE, v_rport, return); + + lport = ((struct unf_lport_s *)v_lport)->root_lport; + UNF_CHECK_VALID(0x982, UNF_TRUE, (lport), return); + rport = (struct unf_rport_s *)v_rport; + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + hot_pool = unf_get_hot_pool_by_lport(lport, i); + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) hot pool is NULL", + lport->port_id); + continue; + } + + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags); + + /* 1. for each exchange from busy_list */ + list_for_each_safe(node, next_node, + &hot_pool->ini_busylist) { + xchg = list_entry(node, struct unf_xchg_s, + list_xchg_entry); + if (rport == xchg->rport) { + spin_lock_irqsave(&xchg->xchg_state_lock, + xchg_flag); + xchg->io_state |= INI_IO_STATE_TMF_ABORT; + spin_unlock_irqrestore(&xchg->xchg_state_lock, + xchg_flag); + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, + UNF_MAJOR, + "[info]Exchange(%p) state(0x%x) S_ID(0x%x) D_ID(0x%x) tag(0x%x) abort by TMF CMD", + xchg, xchg->io_state, + lport->nport_id, + rport->nport_id, xchg->hot_pool_tag); + } + } + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags); + } +} + +static void unf_xchg_abort_by_session(void *v_lport, void *v_rport) +{ + /* + * LUN Reset: set UP_ABORT tag, with: + * INI_Busy_list, IO_Wait_list, + * IO_Delay_list, IO_Delay_transfer_list + */ + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[event]Port(0x%x) Rport(0x%x) start session reset with TMF", + ((struct unf_lport_s *)v_lport)->port_id, + ((struct unf_rport_s *)v_rport)->nport_id); + + unf_xchg_abort_ini_tmf_target_reset(v_lport, v_rport); +} + +static void unf_ini_busy_io_xchg_abort(void *v_hot_pool, void *v_rport, + unsigned int v_sid, unsigned int v_did, + unsigned int v_extra_io_state) +{ + /* + * for target session: Set (DRV) ABORT + * 1. R_Port remove + * 2. Send PLOGI_ACC callback + * 3. RCVD PLOGI + * 4. RCVD LOGO + */ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct unf_xchg_s *xchg = NULL; + struct list_head *xchg_node = NULL; + struct list_head *next_xchg_node = NULL; + struct unf_rport_s *rport = NULL; + unsigned long xchg_lock_flags = 0; + + rport = (struct unf_rport_s *)v_rport; + hot_pool = (struct unf_xchg_hot_pool_s *)v_hot_pool; + + /* ABORT INI IO: INI_BUSY_LIST */ + list_for_each_safe(xchg_node, next_xchg_node, + &hot_pool->ini_busylist) { + xchg = list_entry(xchg_node, struct unf_xchg_s, + list_xchg_entry); + + spin_lock_irqsave(&xchg->xchg_state_lock, xchg_lock_flags); + if ((v_did == xchg->did) && (v_sid == xchg->sid) && + (rport == xchg->rport) && + (atomic_read(&xchg->ref_cnt) > 0)) { + xchg->scsi_cmnd_info.result = + UNF_SCSI_HOST(DID_IMM_RETRY); + xchg->io_state |= INI_IO_STATE_DRABORT; + xchg->io_state |= v_extra_io_state; + + UNF_TRACE(UNF_EVTLOG_IO_INFO, + UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]Abort INI:0x%p, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, %llu.", + xchg, + (unsigned int)xchg->hot_pool_tag, + (unsigned int)xchg->xchg_type, + (unsigned int)xchg->ox_id, + (unsigned int)xchg->rx_id, + (unsigned int)xchg->sid, + (unsigned int)xchg->did, + (unsigned int)xchg->io_state, + atomic_read(&xchg->ref_cnt), + xchg->alloc_jif); + } + spin_unlock_irqrestore(&xchg->xchg_state_lock, + xchg_lock_flags); + } +} + +static void unf_xchg_mgr_io_xchg_abort(void *v_lport, void *v_rport, + unsigned int v_sid, unsigned int v_did, + unsigned int v_extra_io_state) +{ + /* + * for target session: set ABORT + * 1. R_Port remove + * 2. Send PLOGI_ACC callback + * 3. RCVD PLOGI + * 4. RCVD LOGO + */ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct unf_lport_s *lport = NULL; + unsigned long pool_lock_flags = 0; + unsigned int i = 0; + + UNF_CHECK_VALID(0x983, UNF_TRUE, v_lport, return); + lport = ((struct unf_lport_s *)v_lport)->root_lport; + UNF_CHECK_VALID(0x984, UNF_TRUE, lport, return); + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + hot_pool = unf_get_hot_pool_by_lport(lport, i); + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, + UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) hot pool is NULL", + lport->port_id); + + continue; + } + + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + + /* 1. Clear INI (session) IO: INI Mode */ + unf_ini_busy_io_xchg_abort(hot_pool, v_rport, v_sid, + v_did, v_extra_io_state); + + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + } +} + +static void unf_xchg_mgr_sfs_xchg_abort(void *v_lport, void *v_rport, + unsigned int v_sid, unsigned int v_did) +{ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct list_head *xchg_node = NULL; + struct list_head *next_xchg_node = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + unsigned long pool_lock_flags = 0; + unsigned long xchg_lock_flags = 0; + unsigned int i = 0; + + UNF_CHECK_VALID(0x991, UNF_TRUE, (v_lport), return); + + lport = ((struct unf_lport_s *)v_lport)->root_lport; + UNF_CHECK_VALID(0x992, UNF_TRUE, (lport), return); + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + hot_pool = unf_get_hot_pool_by_lport(lport, i); + if (!hot_pool) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, + UNF_LOG_IO_ATT, UNF_MAJOR, + "Port(0x%x) Hot Pool is NULL.", + lport->port_id); + + continue; + } + + rport = (struct unf_rport_s *)v_rport; + + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + + /* Clear the SFS exchange of the corresponding connection */ + list_for_each_safe(xchg_node, next_xchg_node, + &hot_pool->sfs_busylist) { + xchg = list_entry(xchg_node, struct unf_xchg_s, + list_xchg_entry); + + spin_lock_irqsave(&xchg->xchg_state_lock, + xchg_lock_flags); + if ((v_did == xchg->did) && (v_sid == xchg->sid) && + (rport == xchg->rport) && + (atomic_read(&xchg->ref_cnt) > 0)) { + xchg->io_state |= TGT_IO_STATE_ABORT; + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, + UNF_MAJOR, + "Abort SFS:0x%p---0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----%llu.", + xchg, + (unsigned int)xchg->hot_pool_tag, + (unsigned int)xchg->xchg_type, + (unsigned int)xchg->ox_id, + (unsigned int)xchg->rx_id, + (unsigned int)xchg->sid, + (unsigned int)xchg->did, + (unsigned int)xchg->io_state, + atomic_read(&xchg->ref_cnt), + xchg->alloc_jif); + } + spin_unlock_irqrestore(&xchg->xchg_state_lock, + xchg_lock_flags); + } + + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + } +} + +unsigned int unf_init_xchg_mgr_temp(struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x959, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + v_lport->xchg_mgr_temp.pfn_unf_xchg_get_free_and_init = + unf_get_new_xchg; + v_lport->xchg_mgr_temp.pfn_unf_xchg_release = unf_free_xchg; + v_lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag = + unf_lookup_xchg_by_tag; + v_lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_id = + unf_find_xchg_by_oxid; + v_lport->xchg_mgr_temp.pfn_unf_xchg_add_timer = + unf_fc_xchg_add_timer; + v_lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer = + unf_xchg_cancel_timer; + v_lport->xchg_mgr_temp.pfn_unf_xchg_abort_all_io = + unf_xchg_abort_all_xchg; + v_lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_cmnd_sn = + unf_lookup_xchg_by_cmnd_sn; + v_lport->xchg_mgr_temp.pfn_unf_xchg_abort_by_lun = + unf_xchg_abort_by_lun; + v_lport->xchg_mgr_temp.pfn_unf_xchg_abort_by_session = + unf_xchg_abort_by_session; + v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_io_xchg_abort = + unf_xchg_mgr_io_xchg_abort; + v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_sfs_xchg_abort = + unf_xchg_mgr_sfs_xchg_abort; + + return RETURN_OK; +} + +void unf_set_hot_pool_wait_state(struct unf_lport_s *v_lport, + enum int_e v_wait_state) +{ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + unsigned long pool_lock_flags = 0; + unsigned int i = 0; + + UNF_CHECK_VALID(0x965, UNF_TRUE, v_lport, return); + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + hot_pool = unf_get_hot_pool_by_lport(v_lport, i); + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, + UNF_WARN, + "[warn]Port(0x%x) hot pool is NULL", + v_lport->port_id); + continue; + } + + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + hot_pool->wait_state = v_wait_state; + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + } +} + +unsigned int unf_xchg_ref_inc(struct unf_xchg_s *v_xchg, + enum unf_ioflow_id_e v_io_stage) +{ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + unsigned long flags = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x967, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + if (unlikely(v_xchg->debug_hook == UNF_TRUE)) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]Xchg(0x%p) State(0x%x) SID_DID(0x%x_0x%x) OX_ID_RX_ID(0x%x_0x%x) AllocJiff(%llu) Refcnt(%d) Stage(%s)", + v_xchg, v_xchg->io_state, v_xchg->sid, + v_xchg->did, v_xchg->ox_id, v_xchg->rx_id, + v_xchg->alloc_jif, atomic_read(&v_xchg->ref_cnt), + io_stage[v_io_stage].stage); + } + + hot_pool = v_xchg->hot_pool; + UNF_CHECK_VALID(0x968, UNF_TRUE, hot_pool, return UNF_RETURN_ERROR); + UNF_REFERNCE_VAR(v_io_stage); + + /* Exchange -> Hot Pool Tag check */ + if (unlikely((v_xchg->hot_pool_tag >= + (hot_pool->slab_total_sum + hot_pool->base)) || + (v_xchg->hot_pool_tag < hot_pool->base))) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Xchg(0x%p) S_ID(%xh) D_ID(0x%x) hot_pool_tag(0x%x) is bigger than slab total num(0x%x) base(0x%x)", + v_xchg, v_xchg->sid, v_xchg->did, + v_xchg->hot_pool_tag, + hot_pool->slab_total_sum + hot_pool->base, + hot_pool->base); + + return UNF_RETURN_ERROR; + } + + /* atomic read & inc */ + spin_lock_irqsave(&v_xchg->xchg_state_lock, flags); + if (unlikely(atomic_read(&v_xchg->ref_cnt) <= 0)) { + ret = UNF_RETURN_ERROR; + } else { + if (unf_get_xchg_by_xchg_tag(hot_pool, + v_xchg->hot_pool_tag - + hot_pool->base) == + v_xchg) { + atomic_inc(&v_xchg->ref_cnt); + ret = RETURN_OK; + } else { + ret = UNF_RETURN_ERROR; + } + } + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags); + + return ret; +} + +void unf_xchg_ref_dec(struct unf_xchg_s *v_xchg, + enum unf_ioflow_id_e v_io_stage) +{ + /* Atomic dec ref_cnt & test, free exchange + * if necessary (ref_cnt==0) + */ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + void (*pfn_free_xchg)(struct unf_xchg_s *) = NULL; + unsigned long flags = 0; + unsigned long xchg_lock_flags = 0; + + UNF_CHECK_VALID(0x969, UNF_TRUE, (v_xchg), return); + + if (v_xchg->debug_hook == UNF_TRUE) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]Xchg(0x%p) State(0x%x) SID_DID(0x%x_0x%x) OXID_RXID(0x%x_0x%x) AllocJiff(%llu) Refcnt(%d) Statge %s", + v_xchg, v_xchg->io_state, v_xchg->sid, + v_xchg->did, v_xchg->ox_id, v_xchg->rx_id, + v_xchg->alloc_jif, atomic_read(&v_xchg->ref_cnt), + io_stage[v_io_stage].stage); + } + + hot_pool = v_xchg->hot_pool; + UNF_CHECK_VALID(0x970, UNF_TRUE, hot_pool, return); + UNF_CHECK_VALID(0x970, UNF_TRUE, + v_xchg->hot_pool_tag >= hot_pool->base, return); + UNF_REFERNCE_VAR(v_io_stage); + + /* + * 1. Atomic dec & test + * 2. Free exchange if necessary (ref_cnt == 0) + */ + spin_lock_irqsave(&v_xchg->xchg_state_lock, xchg_lock_flags); + if (atomic_dec_and_test(&v_xchg->ref_cnt)) { + pfn_free_xchg = v_xchg->pfn_free_xchg; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, + xchg_lock_flags); + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags); + unf_hot_pool_slab_set(hot_pool, + v_xchg->hot_pool_tag - hot_pool->base, + NULL); + /* Delete exchange list entry */ + list_del_init(&v_xchg->list_xchg_entry); + hot_pool->total_xchges--; + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags); + + // unf_free_fcp_xchg --->>> unf_done_ini_xchg + if (pfn_free_xchg) + pfn_free_xchg(v_xchg); + } else { + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, + xchg_lock_flags); + } +} + +bool unf_busy_io_completed(struct unf_lport_s *v_lport) +{ + struct unf_xchg_mgr_s *xchg_mgr = NULL; + unsigned long pool_lock_flags = 0; + unsigned int i; + + UNF_CHECK_VALID(0x5841, UNF_TRUE, v_lport, return UNF_TRUE); + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + xchg_mgr = unf_get_xchg_mgr_by_lport(v_lport, i); + if (unlikely(!xchg_mgr)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) Exchange Manager is NULL", + v_lport->port_id); + continue; + } + + spin_lock_irqsave(&xchg_mgr->hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + if (!list_empty(&xchg_mgr->hot_pool->ini_busylist)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, + UNF_INFO, "[info]Port(0x%x) ini busylist is not empty.", + v_lport->port_id); + + spin_unlock_irqrestore( + &xchg_mgr->hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + return UNF_FALSE; + } + spin_unlock_irqrestore( + &xchg_mgr->hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + } + return UNF_TRUE; +} diff --git a/drivers/scsi/huawei/hifc/unf_exchg.h b/drivers/scsi/huawei/hifc/unf_exchg.h new file mode 100644 index 0000000000000000000000000000000000000000..fa24cd986654b5415f79947a7a059c17f6a4d2d6 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_exchg.h @@ -0,0 +1,513 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#ifndef __UNF_FCEXCH_H__ +#define __UNF_FCEXCH_H__ + +#include "unf_scsi_common.h" +#include "unf_lport.h" + +#define DRV_VERIFY_CRC_MASK (1 << 1) +#define DRV_VERIFY_APP_MASK (1 << 2) +#define DRV_VERIFY_LBA_MASK (1 << 3) + +#define DRV_DIF_CRC_POS 0 +#define DRV_DIF_CRC_LEN 2 +#define DRV_DIF_APP_POS 2 +#define DRV_DIF_APP_LEN 2 +#define DRV_DIF_LBA_POS 4 +#define DRV_DIF_LBA_LEN 4 + +enum unf_ioflow_id_e { + XCHG_ALLOC = 0, + TGT_RECEIVE_ABTS, + TGT_ABTS_DONE, + TGT_IO_SRR, + SFS_RESPONSE, + SFS_TIMEOUT, + INI_SEND_CMND, + INI_RESPONSE_DONE, + INI_EH_ABORT, + INI_EH_DEVICE_RESET, + INI_EH_BLS_DONE, + INI_IO_TIMEOUT, + INI_REQ_TIMEOUT, + XCHG_CANCEL_TIMER, + XCHG_FREE_XCHG, + SEND_ELS, + IO_XCHG_WAIT, + XCHG_BUTT +}; + +enum unf_xchg_type_e { + UNF_XCHG_TYPE_INI = 0, /* INI IO */ + UNF_XCHG_TYPE_SFS = 1, /* SFS IO */ + UNF_XCHG_TYPE_INVALID +}; + +enum unf_xchg_mgr_type_e { + UNF_XCHG_MGR_TYPE_RANDOM = 0, + UNF_XCHG_MGR_TYPE_FIXED = 1, + UNF_XCHG_MGR_TYPE_INVALID +}; + +enum tgt_io_xchg_send_stage_e { + TGT_IO_SEND_STAGE_NONE = 0, + TGT_IO_SEND_STAGE_DOING = 1, /* xfer/rsp into queue */ + TGT_IO_SEND_STAGE_DONE = 2, /* xfer/rsp into queue complete */ + TGT_IO_SEND_STAGE_ECHO = 3, /* driver handled TSTS */ + TGT_IO_SEND_STAGE_INVALID +}; + +enum tgt_io_send_result_e { + TGT_IO_SEND_RESULT_OK = 0, /* xfer/rsp enqueue succeed */ + TGT_IO_SEND_RESULT_FAIL = 1, /* xfer/rsp enqueue fail */ + TGT_IO_SEND_RESULT_INVALID +}; + +struct unf_ioflow_id_s { + char *stage; +}; + +#define UNF_CHECK_OXID_MATCHED(v_oxid, v_oid, xchg) \ + ((v_oxid == xchg->ox_id) && (v_oid == xchg->oid) && \ + (atomic_read(&xchg->ref_cnt) > 0)) + +#define UNF_CHECK_ALLOCTIME_VALID(lport, xchg_tag, exchg, pkg_alloc_time, \ + xchg_alloc_time) \ + do { \ + if (unlikely((pkg_alloc_time != 0) && \ + (pkg_alloc_time != xchg_alloc_time))) { \ + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, \ + UNF_ERR, \ + "Lport(0x%x_0x%x_0x%x_0x%p) AllocTime is not equal,PKG AllocTime:0x%x,Exhg AllocTime:0x%x", \ + lport->port_id, lport->nport_id, \ + xchg_tag, exchg, \ + pkg_alloc_time, xchg_alloc_time); \ + return UNF_RETURN_ERROR; \ + }; \ + if (unlikely(pkg_alloc_time == 0)) { \ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, \ + UNF_MAJOR, \ + "Lport(0x%x_0x%x_0x%x_0x%p) pkgtime err,PKG AllocTime:0x%x,Exhg AllocTime:0x%x", \ + lport->port_id, lport->nport_id, \ + xchg_tag, exchg, \ + pkg_alloc_time, xchg_alloc_time); \ + }; \ + } while (0) + +#define UNF_GET_DIF_ERROR_LEVEL1(v_xchg, dif_control, check_err_code, \ + tgt_err_code, default_err_code) \ + do { \ + if (DRV_VERIFY_CRC_MASK & \ + v_xchg->dif_control.protect_opcode) { \ + if (memcmp(&dif_control->actual_dif[DRV_DIF_CRC_POS], \ + &dif_control->expected_dif[DRV_DIF_CRC_POS], \ + DRV_DIF_CRC_LEN) != 0) { \ + tgt_err_code = default_err_code; \ + } \ + } \ + } while (0) + +#define UNF_GET_DIF_ERROR_LEVEL2(v_xchg, dif_control, check_err_code, \ + tgt_err_code, default_err_code) \ + do { \ + if ((check_err_code == tgt_err_code) && \ + (DRV_VERIFY_LBA_MASK & v_xchg->dif_control.protect_opcode)) { \ + if (memcmp(&dif_control->actual_dif[DRV_DIF_LBA_POS], \ + &dif_control->expected_dif[DRV_DIF_LBA_POS], \ + DRV_DIF_LBA_LEN) != 0) { \ + tgt_err_code = default_err_code; \ + } \ + } \ + } while (0) + +#define UNF_GET_DIF_ERROR_LEVEL3(v_xchg, dif_control, check_err_code, \ + tgt_err_code, default_err_code) \ + UNF_GET_DIF_ERROR_LEVEL2(v_xchg, dif_control, check_err_code, \ + tgt_err_code, default_err_code) + +#define UNF_SET_SCSI_CMND_RESULT(v_xchg, v_result) \ + ((v_xchg)->scsi_cmnd_info.result = (v_result)) + +#define UNF_GET_GS_SFS_XCHG_TIMER(v_lport) (3 * \ + (unsigned long)(v_lport)->ra_tov) + +#define UNF_GET_BLS_SFS_XCHG_TIMER(v_lport) (2 * \ + (unsigned long)(v_lport)->ra_tov) + +#define UNF_GET_ELS_SFS_XCHG_TIMER(v_lport) (2 * \ + (unsigned long)(v_lport)->ra_tov) + +#define UNF_XCHG_MGR_FC 0 +#define UNF_XCHG_MIN_XID 0x0000 +#define UNF_XCHG_MAX_XID 0xffff +#define UNF_ELS_ECHO_RESULT_OK 0 +#define UNF_ELS_ECHO_RESULT_FAIL 1 + +struct unf_xchg_s; +/* Xchg hot pool, busy IO lookup Xchg */ +struct unf_xchg_hot_pool_s { + /* Xchg sum, in hot pool */ + unsigned short total_xchges; + /* Total number of resources consumedcorresponding to buffer */ + unsigned int total_res_cnt; + enum int_e wait_state; + + /* pool lock */ + spinlock_t xchg_hot_pool_lock; + + /* Xchg posiontion list */ + struct list_head sfs_busylist; + struct list_head ini_busylist; + struct list_head list_destroy_xchg; + + /* Next free hot point */ + unsigned short slab_next_index; + unsigned short slab_total_sum; + unsigned short base; + + struct unf_lport_s *lport; + + struct unf_xchg_s *xchg_slab[0]; + +}; + +/* FREE POOL of Xchg*/ +struct unf_xchg_free_pool_s { + spinlock_t xchg_free_pool_lock; + + unsigned int fcp_xchg_sum; + + /* IO used Xchg */ + struct list_head list_free_xchg_list; + unsigned int total_fcp_xchg; + + /* SFS used Xchg */ + struct list_head list_sfs_xchg_list; + unsigned int total_sfs_xchg; + unsigned int sfs_xchg_sum; + + struct completion *xchg_mgr_completion; +}; + +struct unf_big_sfs_s { + struct list_head entry_big_sfs; + void *vaddr; + unsigned int size; +}; + +struct unf_big_sfs_pool_s { + void *big_sfs_pool; + unsigned int free_count; + struct list_head list_free_pool; + struct list_head list_busy_pool; + spinlock_t big_sfs_pool_lock; +}; + +/* Xchg Manager for vport Xchg */ +struct unf_xchg_mgr_s { + /* MG type */ + unsigned int mgr_type; + + /* MG entry */ + struct list_head xchg_mgr_entry; + + /* MG attribution */ + unsigned short min_xid; + unsigned short max_xid; + unsigned int mem_size; + + /* MG alloced resource */ + void *fcp_mm_start; + + unsigned int sfs_mem_size; + void *sfs_mm_start; + dma_addr_t sfs_phy_addr; + + struct unf_xchg_free_pool_s free_pool; + struct unf_xchg_hot_pool_s *hot_pool; + + struct unf_big_sfs_pool_s st_big_sfs_pool; + + struct buf_describe_s big_sfs_buf_list; + struct buf_describe_s rsp_buf_list; + +}; + +struct unf_seq_s { + /* Seq ID */ + unsigned char seq_id; + + /* Seq Cnt */ + unsigned short seq_cnt; + + /* Seq state and len,maybe used for fcoe */ + unsigned short seq_stat; + unsigned int rec_data_len; +}; + +union unf_xchg_fcp_sfs_u { + struct unf_sfs_entry_s sfs_entry; + struct unf_fcp_rsp_iu_entry_s fcp_rsp_entry; +}; + +#define UNF_IO_STATE_NEW 0 +#define TGT_IO_STATE_SEND_XFERRDY (1 << 2) /* succeed to send XFer rdy */ +#define TGT_IO_STATE_RSP (1 << 5) /* chip send rsp */ +#define TGT_IO_STATE_ABORT (1 << 7) + +/* INI Upper-layer Task Management Commands */ +#define INI_IO_STATE_UPTASK (1 << 15) +/* INI Upper-layer timeout Abort flag */ +#define INI_IO_STATE_UPABORT (1 << 16) +#define INI_IO_STATE_DRABORT (1 << 17) /* INI driver Abort flag */ +#define INI_IO_STATE_DONE (1 << 18) /* INI complete flag */ +#define INI_IO_STATE_WAIT_RRQ (1 << 19) /* INI wait send rrq */ +#define INI_IO_STATE_UPSEND_ERR (1 << 20) /* INI send fail flag */ +/* INI only clear firmware resource flag */ +#define INI_IO_STATE_ABORT_RESOURCE (1 << 21) +/* ioc abort:INI send ABTS ,5S timeout Semaphore,than set 1 */ +#define INI_IO_STATE_ABORT_TIMEOUT (1 << 22) +#define INI_IO_STATE_RRQSEND_ERR (1 << 23) /* INI send RRQ fail flag */ +/* INI busy IO session logo status */ +#define INI_IO_STATE_LOGO (1 << 24) +#define INI_IO_STATE_TMF_ABORT (1 << 25) /* INI TMF ABORT IO flag */ +#define INI_IO_STATE_REC_TIMEOUT_WAIT (1 << 26) /* INI REC TIMEOUT WAIT */ +#define INI_IO_STATE_REC_TIMEOUT (1 << 27) /* INI REC TIMEOUT */ + +#define TMF_RESPONSE_RECEIVED (1 << 0) +#define MARKER_STS_RECEIVED (1 << 1) +#define ABTS_RESPONSE_RECEIVED (1 << 2) + +struct unf_scsi_cmd_info_s { + unsigned long time_out; + unsigned long abort_timeout; + void *scsi_cmnd; + void (*pfn_done)(struct unf_scsi_cmd_s *); + ini_get_sgl_entry_buf pfn_unf_get_sgl_entry_buf; + struct unf_ini_error_code_s *err_code_table; /* error code table */ + char *sense_buf; + unsigned int err_code_table_cout; /* Size of the error code table */ + unsigned int buf_len; + unsigned int entry_cnt; + unsigned int result; /* Stores command execution results */ + unsigned int port_id; + /* Re-search for rport based on scsiid during retry. Otherwise, + * data inconsistency will occur + */ + unsigned int scsi_id; + void *sgl; +}; + +struct unf_req_sgl_info_s { + void *sgl; + void *sgl_start; + unsigned int req_index; + unsigned int entry_index; +}; + +struct unf_els_echo_info_s { + unsigned long long response_time; + struct semaphore echo_sync_sema; + unsigned int echo_result; +}; + +struct unf_xchg_s { + /* Mg resouce relative */ + /* list delete from HotPool */ + struct unf_xchg_hot_pool_s *hot_pool; + + /* attach to FreePool */ + struct unf_xchg_free_pool_s *free_pool; + struct unf_xchg_mgr_s *xchg_mgr; + struct unf_lport_s *lport; /* Local LPort/VLPort */ + struct unf_rport_s *rport; /* Rmote Port */ + struct unf_rport_s *disc_rport; /* Discover Rmote Port */ + struct list_head list_xchg_entry; + struct list_head list_abort_xchg_entry; + spinlock_t xchg_state_lock; + + /* Xchg reference */ + atomic_t ref_cnt; + atomic_t esgl_cnt; + int debug_hook; + /* Xchg attribution */ + unsigned short hot_pool_tag; /* Hot pool tag */ + /* Only used for abort,ox_id + * lunrset/logo/plogi/linkdown set to 0xffff + */ + unsigned short abort_oxid; + unsigned int xchg_type; /* LS,TGT CMND ,REQ,or SCSI Cmnd */ + unsigned short ox_id; + unsigned short rx_id; + unsigned int sid; + unsigned int did; + unsigned int oid; /* ID of the exchange initiator */ + unsigned int disc_port_id; /* Send GNN_ID/GFF_ID NPortId */ + unsigned char seq_id; + unsigned char byte_orders; /* Byte order */ + struct unf_seq_s seq; + + unsigned int cmnd_code; + unsigned int world_id; + /* Dif control */ + struct unf_dif_control_info_s dif_control; + struct dif_info_s dif_info; + + /* IO status Abort,timer out */ + unsigned int io_state; /* TGT_IO_STATE_E */ + unsigned int tmf_state; /* TMF STATE */ + unsigned int ucode_abts_state; + unsigned int abts_state; + + /* IO Enqueuing */ + enum tgt_io_xchg_send_stage_e io_send_stage; /* TGT_IO_SEND_STAGE_E */ + + /* IO Enqueuing result, success or failure */ + enum tgt_io_send_result_e io_send_result; /* TGT_IO_SEND_RESULT_E */ + + /* Whether ABORT is delivered to the chip for IO */ + unsigned char io_send_abort; + /* Result of delivering ABORT to the chip + * (success: UNF_TRUE; failure: UNF_FALSE) + */ + unsigned char io_abort_result; + + /* for INI,Indicates the length of the data + * transmitted over the PCI link + */ + unsigned int data_len; + + /* ResidLen,greater than 0 UnderFlow or Less than Overflow */ + int resid_len; + + /* +++++++++++++++++IO Special++++++++++++++++++++ */ + /* point to tgt cmnd/req/scsi cmnd */ + /* Fcp cmnd */ + struct unf_fcp_cmnd_s fcp_cmnd; + + struct unf_scsi_cmd_info_s scsi_cmnd_info; + + struct unf_req_sgl_info_s req_sgl_info; + + struct unf_req_sgl_info_s dif_sgl_info; + + unsigned long long cmnd_sn; + + /* timestamp */ + unsigned long long start_jif; + unsigned long long alloc_jif; + + unsigned long long io_front_jif; + + /* I/O resources to be consumed,Corresponding to buffer */ + unsigned int may_consume_res_cnt; + /* Number of resources consumed by I/Os. The value is not zero + * only when it is sent to the chip + */ + unsigned int fact_consume_res_cnt; + + /* scsi req info */ + unsigned int data_direction; + + struct unf_big_sfs_s *big_sfs_buf; + + /* scsi cmnd sense_buffer pointer */ + union unf_xchg_fcp_sfs_u fcp_sfs_union; + + /* One exchange may use several External Sgls */ + struct list_head list_esgls; + + struct unf_els_echo_info_s echo_info; + + /* +++++++++++++++++Task Special++++++++++++++++++++ */ + struct semaphore task_sema; + + /* for RRQ ,IO Xchg add to SFS Xchg */ + void *io_xchg; + + /* Xchg delay work */ + struct delayed_work timeout_work; + + /* send result callback */ + void (*pfn_ob_callback)(struct unf_xchg_s *); + + /*Response IO callback */ + void (*pfn_callback)(void *v_lport, + void *v_rport, + void *v_xchg); + + /* Xchg release function */ + void (*pfn_free_xchg)(struct unf_xchg_s *); + + /* +++++++++++++++++low level Special++++++++++++++++++++ */ + unsigned int private[PKG_MAX_PRIVATE_DATA_SIZE]; + + /* ABTS_RSP info */ + struct unf_abts_rsps_s abts_rsps; + + unsigned long long rport_bind_jifs; + + /* sfs exchg ob callback status */ + unsigned int ob_callback_sts; + unsigned int scsi_id; + atomic_t delay_flag; + void *upper_ct; +}; + +struct unf_esgl_page_s *unf_get_one_free_esgl_page(struct unf_lport_s *v_lport, + struct unf_xchg_s *v_xchg); +void unf_release_xchg_mgr_temp(struct unf_lport_s *v_lport); +unsigned int unf_init_xchg_mgr_temp(struct unf_lport_s *v_lport); +unsigned int unf_alloc_xchg_resource(struct unf_lport_s *v_lport); +void unf_free_all_xchg_mgr(struct unf_lport_s *v_lport); +void unf_xchg_mgr_destroy(struct unf_lport_s *v_lport); +unsigned int unf_xchg_ref_inc(struct unf_xchg_s *v_xchg, + enum unf_ioflow_id_e v_io_stage); +void unf_xchg_ref_dec(struct unf_xchg_s *v_xchg, + enum unf_ioflow_id_e v_io_stage); +struct unf_xchg_mgr_s *unf_get_xchg_mgr_by_lport(struct unf_lport_s *v_lport, + unsigned int); +struct unf_xchg_hot_pool_s *unf_get_hot_pool_by_lport( + struct unf_lport_s *v_lport, unsigned int); +void unf_free_lport_ini_xchg(struct unf_xchg_mgr_s *v_xchg_mgr, + int v_done_ini_flag); +struct unf_xchg_s *unf_cm_lookup_xchg_by_cmnd_sn( + void *v_lport, + unsigned long long v_command_sn, + unsigned int v_world_id); +void *unf_cm_lookup_xchg_by_id(void *v_lport, unsigned short v_oxid, + unsigned int v_oid); +void unf_cm_xchg_abort_by_lun(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned long long v_lun_id, + void *v_tm_xchg, int v_abort_all_lun_flag); +void unf_cm_xchg_abort_by_session(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); + +void unf_cm_xchg_mgr_abort_io_by_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int v_sid, + unsigned int v_did, + unsigned int extra_io_stat); +void unf_cm_xchg_mgr_abort_sfs_by_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int v_sid, + unsigned int v_did); +void unf_cm_free_xchg(void *v_lport, void *v_xchg); +void *unf_cm_get_free_xchg(void *v_lport, unsigned int v_xchg_type); +void *unf_cm_lookup_xchg_by_tag(void *v_lport, unsigned short v_hot_pool_tag); +void unf_release_esgls(struct unf_xchg_s *v_xchg); +void unf_show_all_xchg(struct unf_lport_s *v_lport, + struct unf_xchg_mgr_s *v_xchg_mgr); +void unf_destroy_dirty_xchg(struct unf_lport_s *v_lport, int v_show_only); +void unf_wakeup_scsi_task_cmnd(struct unf_lport_s *v_lport); +void unf_set_hot_pool_wait_state(struct unf_lport_s *v_lport, + enum int_e v_wait_state); +void unf_free_lport_all_xchg(struct unf_lport_s *v_lport); +bool unf_busy_io_completed(struct unf_lport_s *v_lport); +#endif + diff --git a/drivers/scsi/huawei/hifc/unf_init.c b/drivers/scsi/huawei/hifc/unf_init.c new file mode 100644 index 0000000000000000000000000000000000000000..c902a7f71bf57bbd5496747ed22729882217fec8 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_init.c @@ -0,0 +1,564 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Fabric Channel Linux driver + * Copyright(c) 2018 Huawei Technologies Co., Ltd + * + */ + +#include "unf_log.h" +#include "unf_event.h" +#include "unf_exchg.h" +#include "unf_portman.h" +#include "unf_rport.h" +#include "unf_service.h" +#include "unf_io.h" + +#define RPORT_FEATURE_POOL_SIZE 4096 + +static struct unf_esgl_page_s *unf_cm_get_one_free_esgl_page( + void *v_lport, + struct unf_frame_pkg_s *v_fra_pkg); +static unsigned int unf_recv_tmf_marker_status( + void *v_lport, + struct unf_frame_pkg_s *v_fra_pkg); +static unsigned int unf_recv_abts_mrker_status( + void *v_lport, + struct unf_frame_pkg_s *v_fra_pkg); +static int unf_get_cfg_parms(char *v_section_name, + struct unf_cfg_item_s *v_cfg_parm, + unsigned int *v_cfg_value, + unsigned int v_item_num); + + +/* global variables */ +unsigned int event_thread_exit; +struct task_struct *event_thread; + +struct completion *fc_event_handle_thd_comp; +struct workqueue_struct *unf_work_queue; + +struct unf_global_card_thread_s card_thread_mgr; +unsigned int unf_dbg_level = UNF_MAJOR; +unsigned int log_print_level = UNF_INFO; +unsigned int log_limted_times = UNF_LOGIN_ATT_PRINT_TIMES; + +struct unf_cm_handle_op_s cm_low_levle_handle = { + .pfn_unf_alloc_local_port = unf_lport_create_and_init, + .pfn_unf_release_local_port = unf_release_local_port, + .pfn_unf_receive_els_pkg = unf_receive_els_pkg, + .pfn_unf_receive_gs_pkg = unf_receive_gs_pkg, + .pfn_unf_receive_bls_pkg = unf_receive_bls_pkg, + .pfn_unf_send_els_done = unf_send_els_done, + .pfn_unf_receive_ini_rsponse = unf_ini_scsi_completed, + .pfn_unf_get_cfg_parms = unf_get_cfg_parms, + .pfn_unf_receive_marker_status = unf_recv_tmf_marker_status, + .pfn_unf_receive_abts_marker_status = unf_recv_abts_mrker_status, + + .pfn_unf_cm_get_sgl_entry = unf_ini_get_sgl_entry, + .pfn_unf_cm_get_dif_sgl_entry = unf_ini_get_dif_sgl_entry, + .pfn_unf_get_one_free_esgl_page = unf_cm_get_one_free_esgl_page, + .pfn_unf_fc_port_link_event = unf_fc_port_link_event, + .pfn_unf_ioctl_to_com_handler = unf_cmd_adm_handler, +}; + +static struct unf_esgl_page_s *unf_cm_get_one_free_esgl_page( + void *v_lport, + struct unf_frame_pkg_s *v_fra_pkg) +{ + struct unf_lport_s *lport = NULL; + struct unf_xchg_s *xchg = NULL; + + UNF_CHECK_VALID(0x1700, 1, v_lport, return NULL); + UNF_CHECK_VALID(0x1701, 1, v_fra_pkg, return NULL); + + lport = (struct unf_lport_s *)v_lport; + xchg = (struct unf_xchg_s *)v_fra_pkg->xchg_contex; + + return unf_get_one_free_esgl_page(lport, xchg); /* from esgl pool */ +} + +static int unf_get_cfg_parms(char *v_section_name, + struct unf_cfg_item_s *v_cfg_parm, + unsigned int *v_cfg_value, + unsigned int v_item_num) +{ + /* Maximum length of a configuration item value, + * including the end character + */ +#define UNF_MAX_ITEM_VALUE_LEN (256) + + unsigned int *value = NULL; + struct unf_cfg_item_s *cfg_parm = NULL; + unsigned int i = 0; + + cfg_parm = v_cfg_parm; + value = v_cfg_value; + + for (i = 0; i < v_item_num; i++) { + if (!cfg_parm || !value) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_REG_ATT, UNF_ERR, + "[err]Config name or value is NULL"); + + return UNF_RETURN_ERROR; + } + + if (strcmp("End", cfg_parm->name) == 0) + break; + + if (strcmp("fw_path", cfg_parm->name) == 0) { + cfg_parm++; + value += UNF_MAX_ITEM_VALUE_LEN / sizeof(unsigned int); + + continue; + } + + *value = cfg_parm->default_value; + cfg_parm++; + value++; + } + + return RETURN_OK; +} + +static unsigned int unf_recv_tmf_marker_status( + void *v_lport, + struct unf_frame_pkg_s *v_fra_pkg) +{ + struct unf_lport_s *lport = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned short hot_pool_tag = 0; + + UNF_CHECK_VALID(0x3543, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3544, UNF_TRUE, v_fra_pkg, return UNF_RETURN_ERROR); + lport = (struct unf_lport_s *)v_lport; + + /* Find exchange which point to marker sts */ + if (!lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) tag function is NULL", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + hot_pool_tag = (unsigned short) + (v_fra_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]); + + xchg = (struct unf_xchg_s *) + (lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag((void *)lport, + hot_pool_tag)); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) find exchange by tag(0x%x) failed", + lport->port_id, lport->nport_id, hot_pool_tag); + + return UNF_RETURN_ERROR; + } + + /* + * NOTE: set exchange TMF state with MARKER_STS_RECEIVED + * + * About TMF state + * 1. STS received + * 2. Response received + * 3. Do check if necessary + */ + xchg->tmf_state |= MARKER_STS_RECEIVED; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]Marker STS: D_ID(0x%x) S_ID(0x%x) OX_ID(0x%x) RX_ID(0x%x), EXCH: D_ID(0x%x) S_ID(0x%x) OX_ID(0x%x) RX_ID(0x%x)", + v_fra_pkg->frame_head.rctl_did & UNF_NPORTID_MASK, + v_fra_pkg->frame_head.csctl_sid & UNF_NPORTID_MASK, + (unsigned short)(v_fra_pkg->frame_head.oxid_rxid >> 16), + (unsigned short)(v_fra_pkg->frame_head.oxid_rxid), + xchg->did, + xchg->sid, + xchg->ox_id, + xchg->rx_id); + + return RETURN_OK; +} + +static unsigned int unf_recv_abts_mrker_status( + void *v_lport, + struct unf_frame_pkg_s *v_fra_pkg) +{ + struct unf_lport_s *lport = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned short hot_pool_tag = 0; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x3543, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3544, UNF_TRUE, v_fra_pkg, return UNF_RETURN_ERROR); + lport = (struct unf_lport_s *)v_lport; + + /* Find exchange by tag */ + if (!lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) tag function is NULL", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + hot_pool_tag = (unsigned short) + (v_fra_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]); + + xchg = (struct unf_xchg_s *) + (lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag((void *)lport, + hot_pool_tag)); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) find exchange by tag(0x%x) failed", + lport->port_id, lport->nport_id, hot_pool_tag); + + return UNF_RETURN_ERROR; + } + + /* + * NOTE: set exchange ABTS state with MARKER_STS_RECEIVED + * + * About exchange ABTS state + * 1. STS received + * 2. Response received + * 3. Do check if necessary + * + * About Exchange status get from low level + * 1. Set: when RCVD ABTS Marker + * 2. Set: when RCVD ABTS Req Done + * 3. value: set value with pkg->status + */ + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + xchg->ucode_abts_state = v_fra_pkg->status; + xchg->abts_state |= MARKER_STS_RECEIVED; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[info]Port(0x%x) wake up SEMA for Abts marker exchange(0x%p) oxid(0x%x 0x%x) status(0x%x)", + lport->port_id, xchg, xchg->ox_id, xchg->hot_pool_tag, + v_fra_pkg->abts_maker_status); + + /* + * NOTE: Second time for ABTS marker received, or + * ABTS response have been received, no need to wake up sema + */ + if ((xchg->io_state & INI_IO_STATE_ABORT_TIMEOUT) || + (xchg->abts_state & ABTS_RESPONSE_RECEIVED)) { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_KEVENT, + "[info]Port(0x%x) no need to wake up SEMA for Abts marker ABTS_STATE(0x%x) IO_STATE(0x%x)", + lport->port_id, xchg->abts_state, + xchg->io_state); + + return RETURN_OK; + } + if (xchg->io_state & INI_IO_STATE_TMF_ABORT) { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_KEVENT, + "[info]Port(0x%x) receive Abts marker, exchange(%p) state(0x%x) free it", + lport->port_id, xchg, xchg->io_state); + + unf_cm_free_xchg(lport, xchg); + } else { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + up(&xchg->task_sema); + } + + return RETURN_OK; +} + +unsigned int unf_get_cm_handle_op(struct unf_cm_handle_op_s *v_cm_handle) +{ + UNF_CHECK_VALID(0x1708, UNF_TRUE, v_cm_handle, + return UNF_RETURN_ERROR); + + memcpy(v_cm_handle, &cm_low_levle_handle, + sizeof(struct unf_cm_handle_op_s)); + + return RETURN_OK; +} + +static void unf_uninit_cm_low_level_handle(void) +{ + memset(&cm_low_levle_handle, 0, sizeof(struct unf_cm_handle_op_s)); +} + +int unf_event_process(void *v_arg) +{ + struct list_head *node = NULL; + struct unf_cm_event_report *event_node = NULL; + unsigned long flags = 0; + + UNF_REFERNCE_VAR(v_arg); + + set_user_nice(current, 4); + recalc_sigpending(); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[event]Enter event thread"); + + complete(fc_event_handle_thd_comp); + + do { + spin_lock_irqsave(&fc_event_list.fc_eventlist_lock, flags); + if (list_empty(&fc_event_list.list_head) == UNF_TRUE) { + spin_unlock_irqrestore(&fc_event_list.fc_eventlist_lock, + flags); + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout((long)msecs_to_jiffies(1000)); + } else { + node = (&fc_event_list.list_head)->next; + list_del_init(node); + fc_event_list.list_num--; + event_node = list_entry(node, + struct unf_cm_event_report, + list_entry); + spin_unlock_irqrestore(&fc_event_list.fc_eventlist_lock, + flags); + + /* Process event node */ + unf_handle_event(event_node); + } + } while (!event_thread_exit); + + complete_and_exit(fc_event_handle_thd_comp, 0); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EVENT, UNF_MAJOR, + "[event]Event thread exit"); + + return RETURN_OK; +} + +static unsigned int unf_creat_event_center(void) +{ + struct completion fc_event_completion = + COMPLETION_INITIALIZER(fc_event_completion); + + struct completion *p_fc_event_completion = &fc_event_completion; + + INIT_LIST_HEAD(&fc_event_list.list_head); + fc_event_list.list_num = 0; + spin_lock_init(&fc_event_list.fc_eventlist_lock); + fc_event_handle_thd_comp = p_fc_event_completion; + + event_thread = kthread_run(unf_event_process, NULL, "hifc_event"); + if (IS_ERR(event_thread)) { + complete_and_exit(fc_event_handle_thd_comp, 0); + fc_event_handle_thd_comp = NULL; + + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Create event thread failed(0x%p)", + event_thread); + + return UNF_RETURN_ERROR; + } + wait_for_completion(fc_event_handle_thd_comp); + return RETURN_OK; +} + +static void unf_cm_event_thread_exit(void) +{ + struct completion fc_event_completion = + COMPLETION_INITIALIZER(fc_event_completion); + + struct completion *p_fc_event_completion = &fc_event_completion; + + fc_event_handle_thd_comp = p_fc_event_completion; + event_thread_exit = 1; + wake_up_process(event_thread); + wait_for_completion(fc_event_handle_thd_comp); + + fc_event_handle_thd_comp = NULL; +} + +static void unf_cm_cread_card_mgr_list(void) +{ + /* So far, do not care */ + INIT_LIST_HEAD(&card_thread_mgr.list_card_list_head); + + spin_lock_init(&card_thread_mgr.global_card_list_lock); + + card_thread_mgr.card_sum = 0; +} + +static int unf_port_feature_pool_init(void) +{ + unsigned int i = 0; + unsigned int rport_fea_pool_size = 0; + struct unf_rport_feature_recard_s *rport_fea_recard = NULL; + unsigned long flags = 0; + + rport_fea_pool_size = sizeof(struct unf_rport_feature_pool_s); + port_fea_pool = vmalloc(rport_fea_pool_size); + if (!port_fea_pool) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]cannot allocate rport feature pool"); + + return UNF_RETURN_ERROR; + } + memset(port_fea_pool, 0, rport_fea_pool_size); + spin_lock_init(&port_fea_pool->port_fea_pool_lock); + INIT_LIST_HEAD(&port_fea_pool->list_busy_head); + INIT_LIST_HEAD(&port_fea_pool->list_free_head); + + port_fea_pool->p_port_feature_pool_addr = + vmalloc((size_t)(RPORT_FEATURE_POOL_SIZE * + sizeof(struct unf_rport_feature_recard_s))); + if (!port_fea_pool->p_port_feature_pool_addr) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]cannot allocate rport feature pool address"); + + vfree(port_fea_pool); + port_fea_pool = NULL; + + return UNF_RETURN_ERROR; + } + + memset(port_fea_pool->p_port_feature_pool_addr, 0, + sizeof(struct unf_rport_feature_recard_s) * + RPORT_FEATURE_POOL_SIZE); + rport_fea_recard = + (struct unf_rport_feature_recard_s *) + port_fea_pool->p_port_feature_pool_addr; + + spin_lock_irqsave(&port_fea_pool->port_fea_pool_lock, flags); + for (i = 0; i < RPORT_FEATURE_POOL_SIZE; i++) { + list_add_tail(&rport_fea_recard->entry_feature, + &port_fea_pool->list_free_head); + rport_fea_recard++; + } + spin_unlock_irqrestore(&port_fea_pool->port_fea_pool_lock, flags); + + return RETURN_OK; +} + +void unf_free_port_feature_pool(void) +{ + if (port_fea_pool->p_port_feature_pool_addr) { + vfree(port_fea_pool->p_port_feature_pool_addr); + port_fea_pool->p_port_feature_pool_addr = NULL; + } + vfree(port_fea_pool); + port_fea_pool = NULL; +} + +int unf_common_init(void) +{ + int ret = RETURN_OK; + + unf_dbg_level = UNF_MAJOR; + log_print_level = UNF_KEVENT; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "UNF Driver Version:%s.", UNF_FC_VERSION); + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "UNF Compile Time: %s", __TIME_STR__); + + ret = unf_port_feature_pool_init(); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port Feature Pool init failed"); + + return ret; + } + + /* 1. Init Transport */ + ret = (int)unf_register_ini_transport(); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]INI interface init failed"); + unf_free_port_feature_pool(); + + return ret; + } + + /* 2. Init L_Port MG: Y */ + unf_port_mgmt_init(); + + /* 3. Init card MG list: N */ + unf_cm_cread_card_mgr_list(); + + /* 4. Init global event resource: N */ + ret = (int)unf_init_global_event_msg(); + if (ret != RETURN_OK) { + unf_unregister_ini_transport(); + unf_free_port_feature_pool(); + + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Create global event center failed"); + + return ret; + } + + /* 5. Create event center(one thread per pf): Y */ + ret = (int)unf_creat_event_center(); + if (ret != RETURN_OK) { + unf_destroy_global_event_msg(); + unf_unregister_ini_transport(); + unf_free_port_feature_pool(); + + fc_event_handle_thd_comp = NULL; + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Create event center (thread) failed"); + + return ret; + } + + /* 6. Create work queue: Y */ + unf_work_queue = create_workqueue("unf_wq"); + if (!unf_work_queue) { + /* event thread exist */ + unf_cm_event_thread_exit(); + unf_destroy_global_event_msg(); + + fc_event_handle_thd_comp = NULL; + unf_unregister_ini_transport(); + unf_free_port_feature_pool(); + + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Create work queue failed"); + + return UNF_RETURN_ERROR; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Init common layer succeed"); + + return ret; +} + +static void unf_destroy_dirty_port(void) +{ + unsigned int v_ditry_port_num = 0; + + unf_show_dirty_port(UNF_FALSE, &v_ditry_port_num); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Sys has %d dirty L_Port(s)", v_ditry_port_num); +} + +void unf_common_exit(void) +{ + unf_free_port_feature_pool(); + + unf_destroy_dirty_port(); + + flush_workqueue(unf_work_queue); + destroy_workqueue(unf_work_queue); + unf_work_queue = NULL; + + unf_cm_event_thread_exit(); + + unf_destroy_global_event_msg(); + + unf_uninit_cm_low_level_handle(); + + unf_port_mgmt_deinit(); + + unf_unregister_ini_transport(); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "[info]HIFC module remove succeed"); +} diff --git a/drivers/scsi/huawei/hifc/unf_io.c b/drivers/scsi/huawei/hifc/unf_io.c new file mode 100644 index 0000000000000000000000000000000000000000..f52b9927d7de4c311c17b5378b0077083761d077 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_io.c @@ -0,0 +1,1338 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#include "hifc_knl_adp.h" +#include "unf_log.h" +#include "unf_exchg.h" +#include "unf_rport.h" +#include "unf_io.h" +#include "unf_portman.h" +#include "unf_io_abnormal.h" + +#define UNF_GET_FCP_CTL(pkg) ((((pkg)->status) >> 8) & 0xFF) +#define UNF_GET_SCSI_STATUS(pkg) (((pkg)->status) & 0xFF) + +static unsigned int unf_io_success_handler(struct unf_xchg_s *v_xchg, + struct unf_frame_pkg_s *v_pkg, + unsigned int v_status); +static unsigned int unf_ini_error_default_handler(struct unf_xchg_s *v_xchg, + struct unf_frame_pkg_s *v_pkg, + unsigned int v_status); +static unsigned int unf_io_under_flow_handler(struct unf_xchg_s *v_xchg, + struct unf_frame_pkg_s *v_pkg, + unsigned int v_status); +static unsigned int unf_ini_dif_error_handler(struct unf_xchg_s *v_xchg, + struct unf_frame_pkg_s *v_pkg, + unsigned int v_status); + +struct unf_ini_error_handler { + unsigned int error_code; + unsigned int (*pfn_unf_ini_error_handler)(struct unf_xchg_s *v_xchg, + struct unf_frame_pkg_s *v_pkg, + unsigned int v_status); +}; + +struct unf_ini_error_handler ini_error_handler_table[] = { + { UNF_IO_SUCCESS, unf_io_success_handler }, + { UNF_IO_ABORTED, unf_ini_error_default_handler }, + { UNF_IO_FAILED, unf_ini_error_default_handler }, + { UNF_IO_ABORT_ABTS, unf_ini_error_default_handler }, + { UNF_IO_ABORT_LOGIN, unf_ini_error_default_handler }, + { UNF_IO_ABORT_REET, unf_ini_error_default_handler }, + { UNF_IO_ABORT_FAILED, unf_ini_error_default_handler }, + { UNF_IO_OUTOF_ORDER, unf_ini_error_default_handler }, + { UNF_IO_FTO, unf_ini_error_default_handler }, + { UNF_IO_LINK_FAILURE, unf_ini_error_default_handler }, + { UNF_IO_OVER_FLOW, unf_ini_error_default_handler }, + { UNF_IO_RSP_OVER, unf_ini_error_default_handler }, + { UNF_IO_LOST_FRAME, unf_ini_error_default_handler }, + { UNF_IO_UNDER_FLOW, unf_io_under_flow_handler }, + { UNF_IO_HOST_PROG_ERROR, unf_ini_error_default_handler }, + { UNF_IO_SEST_PROG_ERROR, unf_ini_error_default_handler }, + { UNF_IO_INVALID_ENTRY, unf_ini_error_default_handler }, + { UNF_IO_ABORT_SEQ_NOT, unf_ini_error_default_handler }, + { UNF_IO_REJECT, unf_ini_error_default_handler }, + { UNF_IO_EDC_IN_ERROR, unf_ini_error_default_handler }, + { UNF_IO_EDC_OUT_ERROR, unf_ini_error_default_handler }, + { UNF_IO_UNINIT_KEK_ERR, unf_ini_error_default_handler }, + { UNF_IO_DEK_OUTOF_RANGE, unf_ini_error_default_handler }, + { UNF_IO_KEY_UNWRAP_ERR, unf_ini_error_default_handler }, + { UNF_IO_KEY_TAG_ERR, unf_ini_error_default_handler }, + { UNF_IO_KEY_ECC_ERR, unf_ini_error_default_handler }, + { UNF_IO_BLOCK_SIZE_ERROR, unf_ini_error_default_handler }, + { UNF_IO_ILLEGAL_CIPHER_MODE, unf_ini_error_default_handler }, + { UNF_IO_CLEAN_UP, unf_ini_error_default_handler }, + { UNF_IO_ABORTED_BY_TARGET, unf_ini_error_default_handler }, + { UNF_IO_TRANSPORT_ERROR, unf_ini_error_default_handler }, + { UNF_IO_LINK_FLASH, unf_ini_error_default_handler }, + { UNF_IO_TIMEOUT, unf_ini_error_default_handler }, + { UNF_IO_DMA_ERROR, unf_ini_error_default_handler }, + { UNF_IO_DIF_ERROR, unf_ini_dif_error_handler }, + { UNF_IO_INCOMPLETE, unf_ini_error_default_handler }, + { UNF_IO_DIF_REF_ERROR, unf_ini_dif_error_handler }, + { UNF_IO_DIF_GEN_ERROR, unf_ini_dif_error_handler } +}; + +void unf_done_ini_xchg(struct unf_xchg_s *v_xchg) +{ + /* + * About I/O Done + * 1. normal case + * 2. Send ABTS & RCVD RSP + * 3. Send ABTS & timer timeout + */ + struct unf_scsi_cmd_s scsi_cmd = { 0 }; + unsigned long flags = 0; + struct unf_scsi_cmd_info_s *scsi_cmnd_info = NULL; + struct unf_rport_scsi_id_image_s *scsi_image_table = NULL; + unsigned int scsi_id = 0; + + UNF_CHECK_VALID(0x1301, TRUE, v_xchg, return); + + /* scsi_cmnd validity check */ + if (unlikely(!v_xchg->scsi_cmnd_info.scsi_cmnd)) + return; + + /* 1. Free RX_ID for INI SIRT: Do not care + * 2. set & check exchange state + * + * for Set UP_ABORT Tag: + * 1) L_Port destroy + * 2) AC power down + * 3) LUN reset + * 4) Target/Session reset + * 5) SCSI send Abort(ABTS) + */ + spin_lock_irqsave(&v_xchg->xchg_state_lock, flags); + v_xchg->io_state |= INI_IO_STATE_DONE; + if (unlikely(v_xchg->io_state & (INI_IO_STATE_UPABORT | + INI_IO_STATE_UPSEND_ERR | + INI_IO_STATE_TMF_ABORT))) { + /* + * a. UPABORT: scsi have send ABTS + * --->>> do not call SCSI_Done, return directly + * b. UPSEND_ERR: error happened duiring LLDD send SCSI_CMD + * --->>> do not call SCSI_Done, scsi need retry + */ + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_KEVENT, + "[event]Exchange(0x%p) Cmdsn:0x%lx upCmd:%p oxid(0x%x) with state(0x%x) has been aborted or send error", + v_xchg, (unsigned long)v_xchg->cmnd_sn, + v_xchg->scsi_cmnd_info.scsi_cmnd, v_xchg->ox_id, + v_xchg->io_state); + + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags); + /* here, return directly */ + return; + } + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags); + + /* 3. Get scsi_cmnd info */ + scsi_cmnd_info = &v_xchg->scsi_cmnd_info; + + /* + * 4. Set: + * scsi_cmnd; + * cmnd_done_func; + * cmnd up_level_done; + * sense_buff_addr; + * resid_length; + * cmnd_result; + * dif_info + * + * UNF_SCSI_CMND <<-- UNF_SCSI_CMND_INFO + */ + UNF_SET_HOST_CMND((&scsi_cmd), scsi_cmnd_info->scsi_cmnd); + UNF_SET_CMND_DONE_FUNC((&scsi_cmd), scsi_cmnd_info->pfn_done); + scsi_cmd.drv_private = v_xchg->lport; + if (unlikely((UNF_SCSI_STATUS(v_xchg->scsi_cmnd_info.result)) & + FCP_SNS_LEN_VALID_MASK)) { + unf_save_sense_data( + scsi_cmd.upper_cmnd, + (char *)v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu, + SCSI_SENSE_DATA_LEN); + } + UNF_SET_RESID((&scsi_cmd), (unsigned int)v_xchg->resid_len); + UNF_SET_CMND_RESULT((&scsi_cmd), scsi_cmnd_info->result); + memcpy(&scsi_cmd.dif_info, &v_xchg->dif_info, + sizeof(struct dif_info_s)); + + scsi_id = scsi_cmnd_info->scsi_id; + + /* 5. call scsi_cmnd_done func: unf_scsi_done */ + UNF_DONE_SCSI_CMND(&scsi_cmd); + + /* 6. Update IO result CNT */ + if (likely(v_xchg->lport)) { + scsi_image_table = &v_xchg->lport->rport_scsi_table; + UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, + (scsi_cmnd_info->result >> 16)); + } +} + +static inline unsigned int unf_ini_get_sgl_entry_buf( + ini_get_sgl_entry_buf pfn_unf_ini_get_sgl, + void *v_cmnd, + void *v_driver_sgl, + void **v_upper_sgl, + unsigned int *v_req_index, + unsigned int *v_index, + char **v_buf, + unsigned int *v_buf_len) +{ + if (unlikely(!pfn_unf_ini_get_sgl)) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "Command(0x%p) Get sgl Entry func Null.", v_cmnd); + + return UNF_RETURN_ERROR; + } + + return pfn_unf_ini_get_sgl(v_cmnd, v_driver_sgl, v_upper_sgl, + v_req_index, v_index, v_buf, v_buf_len); +} + +unsigned int unf_ini_get_sgl_entry(void *v_pkg, char **v_buf, + unsigned int *v_buf_len) +{ + struct unf_frame_pkg_s *pkg = (struct unf_frame_pkg_s *)v_pkg; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x1305, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x1306, UNF_TRUE, v_buf, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x1307, UNF_TRUE, v_buf_len, return UNF_RETURN_ERROR); + + xchg = (struct unf_xchg_s *)pkg->xchg_contex; + UNF_CHECK_VALID(0x1308, UNF_TRUE, xchg, return UNF_RETURN_ERROR); + + /* Get SGL Entry buffer for INI Mode */ + ret = unf_ini_get_sgl_entry_buf( + xchg->scsi_cmnd_info.pfn_unf_get_sgl_entry_buf, + xchg->scsi_cmnd_info.scsi_cmnd, + NULL, + &xchg->req_sgl_info.sgl, + &xchg->scsi_cmnd_info.port_id, + &((xchg->req_sgl_info).entry_index), + v_buf, v_buf_len); + + return ret; +} + +unsigned int unf_ini_get_dif_sgl_entry(void *v_pkg, char **v_buf, + unsigned int *v_buf_len) +{ + struct unf_frame_pkg_s *pkg = (struct unf_frame_pkg_s *)v_pkg; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x1305, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x1306, UNF_TRUE, v_buf, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x1307, UNF_TRUE, v_buf_len, return UNF_RETURN_ERROR); + + xchg = (struct unf_xchg_s *)pkg->xchg_contex; + UNF_CHECK_VALID(0x1308, UNF_TRUE, xchg, return UNF_RETURN_ERROR); + + /* Get SGL Entry buffer for INI Mode */ + ret = unf_ini_get_sgl_entry_buf( + xchg->scsi_cmnd_info.pfn_unf_get_sgl_entry_buf, + xchg->scsi_cmnd_info.scsi_cmnd, + NULL, + &xchg->dif_sgl_info.sgl, + &xchg->scsi_cmnd_info.port_id, + &xchg->dif_sgl_info.entry_index, + v_buf, v_buf_len); + return ret; +} + +unsigned int unf_get_uplevel_cmnd_errcode( + struct unf_ini_error_code_s *v_err_table, + unsigned int v_err_table_count, + unsigned int v_drv_err_code) +{ + unsigned int i; + + /* fail return UNF_RETURN_ERROR,adjust by up level */ + if (unlikely(!v_err_table)) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "Error Code Table is Null, Error Code(0x%x).", + v_drv_err_code); + + return (unsigned int)UNF_SCSI_HOST(DID_ERROR); + } + + for (i = 0; i < v_err_table_count; i++) { + if (v_drv_err_code == v_err_table[i].drv_err_code) + return v_err_table[i].ap_err_code; + } + + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Unsupported Ap Error code by Error Code(0x%x).", + v_drv_err_code); + + return (unsigned int)UNF_SCSI_HOST(DID_ERROR); +} + +static unsigned int unf_ini_status_handle(struct unf_xchg_s *v_xchg, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned int i; + unsigned int ret; + unsigned int status; + + for (i = 0; + i < sizeof(ini_error_handler_table) / + sizeof(struct unf_ini_error_handler); + i++) { + if (UNF_GET_LL_ERR(v_pkg) == + ini_error_handler_table[i].error_code) { + status = unf_get_uplevel_cmnd_errcode( + v_xchg->scsi_cmnd_info.err_code_table, + v_xchg->scsi_cmnd_info.err_code_table_cout, + UNF_GET_LL_ERR(v_pkg)); + + if (ini_error_handler_table[i].pfn_unf_ini_error_handler) { + ret = ini_error_handler_table[i].pfn_unf_ini_error_handler( + v_xchg, + v_pkg, + status); + } else { + /* set exchange->result + * ---to--->>>scsi_result + */ + ret = unf_ini_error_default_handler(v_xchg, + v_pkg, + status); + } + + return ret; + } + } + + status = unf_get_uplevel_cmnd_errcode( + v_xchg->scsi_cmnd_info.err_code_table, + v_xchg->scsi_cmnd_info.err_code_table_cout, + UNF_IO_SOFT_ERR); + + ret = unf_ini_error_default_handler(v_xchg, v_pkg, status); + + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Can not find com status, SID(0x%x) exchange(0x%p) com_status(0x%x) DID(0x%x) hot_pool_tag(0x%x)", + v_xchg->sid, v_xchg, v_pkg->status, + v_xchg->did, v_xchg->hot_pool_tag); + + return ret; +} + +static void unf_analysis_response_info(struct unf_xchg_s *v_xchg, + struct unf_frame_pkg_s *v_pkg, + unsigned int *v_status) +{ + unsigned char *resp_buf = NULL; + + /* LL_Driver use Little End, and copy RSP_INFO to COM_Driver */ + if (v_pkg->unf_rsp_pload_bl.buffer_ptr) { + if (v_pkg->unf_rsp_pload_bl.buffer_ptr[0] != + UNF_FCP_TM_RSP_COMPLETE) { + *v_status = UNF_SCSI_HOST(DID_BUS_BUSY); + + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%p) DID bus busy, scsi_status(0x%x)", + v_xchg->lport, UNF_GET_SCSI_STATUS(v_pkg)); + } + } else { + resp_buf = + (unsigned char *)v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu; + if ((resp_buf)) { + /* If chip use Little End, then change it to Big End */ + if ((v_pkg->byte_orders & UNF_BIT_3) == 0) + unf_cpu_to_big_end( + resp_buf, + v_pkg->unf_rsp_pload_bl.length); + + /* Chip DAM data with Big End */ + if (resp_buf[3] != UNF_FCP_TM_RSP_COMPLETE) { + *v_status = UNF_SCSI_HOST(DID_BUS_BUSY); + + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, + UNF_WARN, + "[warn]Port(0x%p) DID bus busy, scsi_status(0x%x)", + v_xchg->lport, + UNF_GET_SCSI_STATUS(v_pkg)); + } + } + } +} + +static void unf_analysis_sense_info(struct unf_xchg_s *v_xchg, + struct unf_frame_pkg_s *v_pkg) +{ +#define MIN(x, y) ((x) < (y) ? (x) : (y)) + + unsigned int length = 0; + + /* 4 bytes Align */ + length = v_pkg->unf_sense_pload_bl.length; + if (length % 4 != 0) + length = 4 * ((length / 4) + 1); + + /* + * If have sense info then copy directly + * else, the chip has been dma the data to sense buffer + */ + if (v_pkg->unf_sense_pload_bl.buffer_ptr) { + /* carry from wqe by ll_driver & ucode: do not used */ + unf_cpu_to_big_end(v_pkg->unf_sense_pload_bl.buffer_ptr, + length); + + memcpy(v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu, + v_pkg->unf_sense_pload_bl.buffer_ptr, + (unsigned int)MIN(UNF_SCSI_SENSE_DATA_LEN, + v_pkg->unf_sense_pload_bl.length)); + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]LPort(0x%p), Sense Length(%u), Scsi Status(0x%x).", + v_xchg->lport, + v_pkg->unf_sense_pload_bl.length, + UNF_GET_SCSI_STATUS(v_pkg)); + } else if ((length != 0) && + (v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu)) { + /* has been dma to exchange buffer */ + if ((v_pkg->byte_orders & UNF_BIT_4) == 0) { + unf_cpu_to_big_end(((unsigned char *) + (v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu)) + + v_pkg->unf_rsp_pload_bl.length, + v_pkg->unf_sense_pload_bl.length); + } + + memcpy(v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu, + ((unsigned char *) + (v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu)) + + v_pkg->unf_rsp_pload_bl.length, + (unsigned int)MIN(UNF_SCSI_SENSE_DATA_LEN, + v_pkg->unf_sense_pload_bl.length)); + } +} + +static unsigned int unf_io_success_handler(struct unf_xchg_s *v_xchg, + struct unf_frame_pkg_s *v_pkg, + unsigned int v_status) +{ + unsigned char scsi_status; + unsigned char control; + unsigned int status = v_status; + + UNF_CHECK_VALID(0x1311, TRUE, v_xchg, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x1312, TRUE, v_pkg, return UNF_RETURN_ERROR); + + control = UNF_GET_FCP_CTL(v_pkg); + scsi_status = UNF_GET_SCSI_STATUS(v_pkg); + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_INFO, + "[info]Port(0x%p), Exchange(0x%p) Completed, Control(0x%x), Scsi Status(0x%x)", + v_xchg->lport, v_xchg, control, scsi_status); + + if (control & FCP_SNS_LEN_VALID_MASK) { + /* has sense info */ + if (scsi_status == FCP_SCSI_STATUS_GOOD) + scsi_status = SCSI_CHECK_CONDITION; + + unf_analysis_sense_info(v_xchg, v_pkg); + } else { + /* + * When the FCP_RSP_LEN_VALID bit is set to one, + * the content of the SCSI STATUS CODE field is not reliable + * and shall be ignored by the application client. + */ + if (control & FCP_RSP_LEN_VALID_MASK) + unf_analysis_response_info(v_xchg, v_pkg, &status); + } + + v_xchg->scsi_cmnd_info.result = status | + UNF_SCSI_STATUS(scsi_status); + + return RETURN_OK; +} + +static unsigned int unf_ini_error_default_handler(struct unf_xchg_s *v_xchg, + struct unf_frame_pkg_s *v_pkg, + unsigned int v_status) +{ + /* set exchange->result ---to--->>> scsi_cmnd->result */ + UNF_CHECK_VALID(0x1313, TRUE, v_xchg, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x1314, TRUE, v_pkg, return UNF_RETURN_ERROR); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_ABNORMAL, UNF_WARN, + "[warn]SID(0x%x) exchange(0x%p) com_status(0x%x) up_status(0x%x) DID(0x%x) hot_pool_tag(0x%x) response_len(0x%x)", + v_xchg->sid, v_xchg, v_pkg->status, v_status, + v_xchg->did, v_xchg->hot_pool_tag, v_pkg->residus_len); + + v_xchg->scsi_cmnd_info.result = + v_status | UNF_SCSI_STATUS(UNF_GET_SCSI_STATUS(v_pkg)); + + return RETURN_OK; +} + +static unsigned int unf_ini_dif_error_handler(struct unf_xchg_s *v_xchg, + struct unf_frame_pkg_s *v_pkg, + unsigned int v_status) +{ + struct unf_dif_control_info_s *dif_control = NULL; + unsigned char *sense_data = NULL; + unsigned short sense_code = 0; + + UNF_CHECK_VALID(0x1315, TRUE, v_xchg, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x1316, TRUE, v_pkg, return UNF_RETURN_ERROR); + UNF_REFERNCE_VAR(v_status); + + /* + * According to DIF scheme + * drive set check condition(0x2) when dif error occurs, + * and returns the values base on the upper-layer verification resule + * Check sequence: crc,Lba,App, + * if CRC error is found, the subsequent check is not performed + */ + v_xchg->scsi_cmnd_info.result = + UNF_SCSI_STATUS(SCSI_CHECK_CONDITION); + dif_control = &v_pkg->dif_control; + + if (v_pkg->status_sub_code == 0) { + UNF_GET_DIF_ERROR_LEVEL1(v_xchg, dif_control, 0, + sense_code, DRV_DIF_CRC_ERR); + + UNF_GET_DIF_ERROR_LEVEL2(v_xchg, dif_control, 0, + sense_code, DRV_DIF_LBA_ERR); + + UNF_GET_DIF_ERROR_LEVEL3(v_xchg, dif_control, 0, + sense_code, DRV_DIF_APP_ERR); + + if (sense_code == 0) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Unexpected DIF unwonted, operation_code(0x%x) actual DIF(0x%llx) expected DIF(0x%llx)", + v_xchg->dif_control.protect_opcode, + *(unsigned long long *) + &dif_control->actual_dif[0], + *(unsigned long long *) + &dif_control->expected_dif[0]); + } + } else { + sense_code = (unsigned short)v_pkg->status_sub_code; + } + + sense_data = (unsigned char *) + v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu; + memset(sense_data, 0, SCSI_SENSE_DATA_LEN); + sense_data[0] = 0x70; /* response code */ + sense_data[2] = ILLEGAL_REQUEST; /* sense key:0x05; */ + sense_data[7] = 0x7; /* additional sense length */ + sense_data[12] = (unsigned char)(sense_code >> 8); + sense_data[13] = (unsigned char)sense_code; + + /* valid sense data length snscode[13] */ + return RETURN_OK; +} + +static unsigned int unf_io_under_flow_handler(struct unf_xchg_s *v_xchg, + struct unf_frame_pkg_s *v_pkg, + unsigned int v_status) +{ + /* under flow: residlen > 0 */ + UNF_CHECK_VALID(0x1317, TRUE, v_xchg, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x1318, TRUE, v_pkg, return UNF_RETURN_ERROR); + + if ((v_xchg->fcp_cmnd.cdb[0] != SCSIOPC_REPORT_LUN) && + (v_xchg->fcp_cmnd.cdb[0] != SCSIOPC_INQUIRY)) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_INFO, + "[info]IO under flow: SID(0x%x) exchange(0x%p) com status(0x%x) up_status(0x%x) DID(0x%x) hot_pool_tag(0x%x) response SID(0x%x)", + v_xchg->sid, v_xchg, v_pkg->status, v_status, + v_xchg->did, v_xchg->hot_pool_tag, + v_pkg->residus_len); + } + + v_xchg->resid_len = (int)v_pkg->residus_len; + (void)unf_io_success_handler(v_xchg, v_pkg, v_status); + + return RETURN_OK; +} + +void unf_complete_cmnd(struct unf_scsi_cmd_s *v_scsi_cmnd, unsigned int result) +{ + /* + * Exception during process Que_CMND + * 1. L_Port == NULL; + * 2. L_Port == removing; + * 3. R_Port == NULL; + * 4. Xchg == NULL. + */ + UNF_CHECK_VALID(0x1319, TRUE, UNF_GET_CMND_DONE_FUNC(v_scsi_cmnd), + return); + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_INFO, + "[info]Command(0x%p), Result(0x%x).", v_scsi_cmnd, result); + + UNF_SET_CMND_RESULT(v_scsi_cmnd, result); + + /* struct unf_scsi_cmd_s->pfn_done -->> unf_scsi_done */ + UNF_DONE_SCSI_CMND(v_scsi_cmnd); +} + +static inline void unf_bind_xchg_scsi_cmd(struct unf_xchg_s *v_xchg, + struct unf_scsi_cmd_s *v_scsi_cmnd) +{ + struct unf_scsi_cmd_info_s *scsi_cmnd_info = NULL; + + scsi_cmnd_info = &v_xchg->scsi_cmnd_info; + + /* UNF_SCSI_CMND_INFO <<-- UNF_SCSI_CMND */ + scsi_cmnd_info->err_code_table = + UNF_GET_ERR_CODE_TABLE(v_scsi_cmnd); + scsi_cmnd_info->err_code_table_cout = + UNF_GET_ERR_CODE_TABLE_COUNT(v_scsi_cmnd); + scsi_cmnd_info->pfn_done = UNF_GET_CMND_DONE_FUNC(v_scsi_cmnd); + scsi_cmnd_info->scsi_cmnd = UNF_GET_HOST_CMND(v_scsi_cmnd); + scsi_cmnd_info->sense_buf = + (char *)UNF_GET_SENSE_BUF_ADDR(v_scsi_cmnd); + /* unf_get_frame_entry_buf */ + scsi_cmnd_info->pfn_unf_get_sgl_entry_buf = + UNF_GET_SGL_ENTRY_BUF_FUNC(v_scsi_cmnd); + scsi_cmnd_info->sgl = UNF_GET_CMND_SGL(v_scsi_cmnd); + scsi_cmnd_info->time_out = v_scsi_cmnd->time_out; + scsi_cmnd_info->entry_cnt = v_scsi_cmnd->entry_count; + scsi_cmnd_info->port_id = (unsigned int)v_scsi_cmnd->port_id; + scsi_cmnd_info->scsi_id = UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd); +} + +unsigned int unf_ini_scsi_completed(void *v_lport, + struct unf_frame_pkg_s *v_pkg) +{ + struct unf_lport_s *lport = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_fcp_cmnd_s *fcp_cmnd = NULL; + unsigned int control; + unsigned short xchg_tag; + unsigned int ret; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x1323, TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x1324, TRUE, v_pkg, return UNF_RETURN_ERROR); + + lport = (struct unf_lport_s *)v_lport; + xchg_tag = + (unsigned short)v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]; + + /* 1. Find Exchange Context */ + xchg = unf_cm_lookup_xchg_by_tag(v_lport, (unsigned short)xchg_tag); + if (unlikely(!xchg)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) can not find exchange by tag(0x%x)", + lport->port_id, lport->nport_id, xchg_tag); + + /* NOTE: return directly */ + return UNF_RETURN_ERROR; + } + + /* 2. Consistency check */ + UNF_CHECK_ALLOCTIME_VALID(lport, xchg_tag, xchg, + v_pkg->private[PKG_PRIVATE_XCHG_ALLOC_TIME], + xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]); + + /* 3. Increase ref_cnt for exchange protecting */ + ret = unf_xchg_ref_inc(xchg, INI_RESPONSE_DONE); /* hold */ + UNF_CHECK_VALID(0x1325, TRUE, (ret == RETURN_OK), + return UNF_RETURN_ERROR); + + fcp_cmnd = &xchg->fcp_cmnd; + control = fcp_cmnd->control; + control = UNF_GET_TASK_MGMT_FLAGS(control); + + /* 4. Cancel timer if necessary */ + if (xchg->scsi_cmnd_info.time_out != 0) + lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer(xchg); + + /* 5. process scsi TMF if necessary */ + if (control != 0) { + unf_process_scsi_mgmt_result(v_pkg, xchg); + unf_xchg_ref_dec(xchg, INI_RESPONSE_DONE); /* cancel hold */ + + /* NOTE: return directly */ + return RETURN_OK; + } + + /* 6. Xchg Abort state check */ + spin_lock_irqsave(&xchg->xchg_state_lock, flag); + if (INI_IO_STATE_UPABORT & xchg->io_state) { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_WARN, + "[warn]Port(0x%x) find exchange(%p) state(0x%x) has been aborted", + lport->port_id, xchg, xchg->io_state); + + /* NOTE: release exchange during SCSI ABORT(ABTS) */ + unf_xchg_ref_dec(xchg, INI_RESPONSE_DONE); /* cancel hold */ + + return ret; + } + spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); + + /* + * 7. INI SCSI CMND Status process + * set exchange->result ---to--->>> scsi_result + */ + ret = unf_ini_status_handle(xchg, v_pkg); + + /* 8. NOTE: release exchange if necessary */ + unf_cm_free_xchg(lport, xchg); + + /* 9. dec exch ref_cnt */ + /* cancel hold: release resource now */ + unf_xchg_ref_dec(xchg, INI_RESPONSE_DONE); + + return ret; +} + +unsigned int unf_hardware_start_io(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_pkg) +{ + if (unlikely(!v_lport->low_level_func.service_op.pfn_unf_cmnd_send)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) low level send scsi function is NULL", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + return v_lport->low_level_func.service_op.pfn_unf_cmnd_send( + v_lport->fc_port, + v_pkg); +} + +struct unf_rport_s *unf_find_rport_by_scsi_id( + struct unf_lport_s *v_lport, + struct unf_ini_error_code_s *v_err_code_table, + unsigned int v_err_code_table_cout, + unsigned int v_scsi_id, + unsigned int *v_scsi_result) +{ + struct unf_rport_scsi_id_image_s *scsi_image_table = NULL; + struct unf_wwpn_rport_info_s *wwpn_rport_info = NULL; + struct unf_rport_s *rport = NULL; + unsigned long flags = 0; + + /* scsi_table -> session_table -> image_table */ + scsi_image_table = &v_lport->rport_scsi_table; + + /* 1. Scsi_Id validity check */ + if (unlikely(v_scsi_id >= scsi_image_table->max_scsi_id)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Input scsi_id(0x%x) bigger than max_scsi_id(0x%x).", + v_scsi_id, scsi_image_table->max_scsi_id); + + *v_scsi_result = unf_get_uplevel_cmnd_errcode( + v_err_code_table, + v_err_code_table_cout, + UNF_IO_SOFT_ERR); /* did_soft_error */ + + return NULL; + } + + /* 2. GetR_Port_Info/R_Port: use Scsi_Id find from L_Port's + * Rport_Scsi_Table (image table) + */ + spin_lock_irqsave(&scsi_image_table->scsi_image_table_lock, flags); + wwpn_rport_info = &scsi_image_table->wwn_rport_info_table[v_scsi_id]; + rport = wwpn_rport_info->rport; + spin_unlock_irqrestore(&scsi_image_table->scsi_image_table_lock, flags); + + if (unlikely(!rport)) { + *v_scsi_result = unf_get_uplevel_cmnd_errcode( + v_err_code_table, + v_err_code_table_cout, + /* did_not_connect */ + UNF_IO_PORT_LOGOUT); + + return NULL; + } + + return rport; +} + +static unsigned int unf_build_xchg_fcp_cmnd(struct unf_fcp_cmnd_s *v_fcp_cmnd, + struct unf_scsi_cmd_s *v_scsi_cmnd) +{ + /* SCSI_CMND -->> FCP_CMND */ + if (UNF_GET_DATA_DIRECTION(v_scsi_cmnd) == DMA_TO_DEVICE) { + v_fcp_cmnd->control = UNF_FCP_WR_DATA; + } else if (UNF_GET_DATA_DIRECTION(v_scsi_cmnd) == DMA_FROM_DEVICE) { + v_fcp_cmnd->control = UNF_FCP_RD_DATA; + } else { + /* DMA Direction None */ + v_fcp_cmnd->control = 0; + } + + memcpy(v_fcp_cmnd->cdb, &UNF_GET_FCP_CMND(v_scsi_cmnd), + v_scsi_cmnd->cmnd_len); + + if (((v_fcp_cmnd->control == UNF_FCP_WR_DATA) && + (IS_READ_COMMAND(v_fcp_cmnd->cdb[0]))) || + ((v_fcp_cmnd->control == UNF_FCP_RD_DATA) && + (IS_WRITE_COMMAND(v_fcp_cmnd->cdb[0])))) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MINOR, + "Scsi command direction inconsistent, CDB[0](0x%x), direction(0x%x).", + v_fcp_cmnd->cdb[0], v_fcp_cmnd->control); + + return UNF_RETURN_ERROR; + } + + memcpy(v_fcp_cmnd->lun, v_scsi_cmnd->pc_lun_id, + sizeof(v_fcp_cmnd->lun)); + + unf_big_end_to_cpu((void *)v_fcp_cmnd->cdb, + sizeof(v_fcp_cmnd->cdb)); + v_fcp_cmnd->data_length = UNF_GET_DATA_LEN(v_scsi_cmnd); + + return RETURN_OK; +} + +static void unf_adjust_xchg_len(struct unf_xchg_s *v_xchg, + unsigned int v_scsi_cmnd) +{ + switch (v_scsi_cmnd) { + case SCSIOPC_REQUEST_SENSE: /* requires different buffer */ + v_xchg->data_len = SCSI_SENSE_DATA_LEN; + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MINOR, + "Request Sense new."); + break; + case SCSIOPC_TEST_UNIT_READY: + case SCSIOPC_RESERVE: + case SCSIOPC_RELEASE: + case SCSIOPC_START_STOP_UNIT: + v_xchg->data_len = 0; + break; + default: + break; + } +} + +static void unf_copy_dif_control(struct unf_dif_control_info_s *v_dif_control, + struct unf_scsi_cmd_s *v_scsi_cmnd) +{ + v_dif_control->fcp_dl = v_scsi_cmnd->dif_control.fcp_dl; + v_dif_control->protect_opcode = + v_scsi_cmnd->dif_control.protect_opcode; + v_dif_control->start_lba = v_scsi_cmnd->dif_control.start_lba; + v_dif_control->app_tag = v_scsi_cmnd->dif_control.app_tag; + + v_dif_control->flags = v_scsi_cmnd->dif_control.flags; + v_dif_control->dif_sge_count = + v_scsi_cmnd->dif_control.dif_sge_count; + v_dif_control->dif_sgl = v_scsi_cmnd->dif_control.dif_sgl; +} + +static void unf_adjsut_dif_pci_transfer_len(struct unf_xchg_s *v_xchg, + unsigned int direction) +{ + struct unf_dif_control_info_s *dif_control = NULL; + unsigned int sector_size = 512; + + dif_control = &v_xchg->dif_control; + + if (dif_control->protect_opcode == UNF_DIF_ACTION_NONE) + return; + + switch (dif_control->protect_opcode & UNF_DIF_ACTION_MASK) { + case UNF_DIF_ACTION_INSERT: + if (direction == DMA_TO_DEVICE) { + /* write IO,insert,Indicates that data with DIF is + * transmitted over the link. + */ + dif_control->fcp_dl = + v_xchg->data_len + + UNF_CAL_BLOCK_CNT(v_xchg->data_len, + sector_size) * + UNF_DIF_AREA_SIZE; + } else { + /* read IO,insert,Indicates that the internal DIf is + * carried, and the link does not carry the DIf. + */ + dif_control->fcp_dl = v_xchg->data_len; + } + break; + case UNF_DIF_ACTION_VERIFY_AND_DELETE: + if (direction == DMA_TO_DEVICE) { + /* write IO,Delete,Indicates that the internal DIf is + * carried, and the link does not carry the DIf. + */ + dif_control->fcp_dl = v_xchg->data_len; + } else { + /* read IO,Delete,Indicates that data with DIF is + * carried on the link and does not contain DIF + * on internal. + */ + dif_control->fcp_dl = + v_xchg->data_len + + UNF_CAL_BLOCK_CNT(v_xchg->data_len, + sector_size) * + UNF_DIF_AREA_SIZE; + } + break; + case UNF_DIF_ACTION_VERIFY_AND_FORWARD: + dif_control->fcp_dl = + v_xchg->data_len + + UNF_CAL_BLOCK_CNT(v_xchg->data_len, sector_size) * + UNF_DIF_AREA_SIZE; + break; + default: + dif_control->fcp_dl = v_xchg->data_len; + break; + } + + v_xchg->fcp_cmnd.data_length = dif_control->fcp_dl; +} + +static int unf_save_scsi_cmnd_to_xchg(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg, + struct unf_scsi_cmd_s *v_scsi_cmnd) +{ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = v_rport; + struct unf_xchg_s *xchg = v_xchg; + unsigned int result; + + v_scsi_cmnd->driver_scribble = (void *)xchg->start_jif; + xchg->rport = rport; + xchg->rport_bind_jifs = rport->rport_alloc_jifs; + + if (lport->low_level_func.xchg_mgr_type == + UNF_LOW_LEVEL_MGR_TYPE_PASSTIVE) + xchg->ox_id = xchg->hot_pool_tag; + + /* Build Xchg SCSI_CMND info */ + unf_bind_xchg_scsi_cmd(xchg, v_scsi_cmnd); + + xchg->data_len = UNF_GET_DATA_LEN(v_scsi_cmnd); + xchg->data_direction = UNF_GET_DATA_DIRECTION(v_scsi_cmnd); + xchg->sid = lport->nport_id; + xchg->did = rport->nport_id; + xchg->private[PKG_PRIVATE_XCHG_RPORT_INDEX] = rport->rport_index; + xchg->world_id = v_scsi_cmnd->world_id; + xchg->cmnd_sn = v_scsi_cmnd->cmnd_sn; + xchg->scsi_id = v_scsi_cmnd->scsi_id; + + /* Build Xchg fcp_cmnd */ + result = unf_build_xchg_fcp_cmnd(&xchg->fcp_cmnd, v_scsi_cmnd); + if (unlikely(result != RETURN_OK)) + return UNF_RETURN_ERROR; + + unf_adjust_xchg_len(xchg, UNF_GET_FCP_CMND(v_scsi_cmnd)); + + /* Dif (control) info */ + unf_copy_dif_control(&xchg->dif_control, v_scsi_cmnd); + memcpy(&xchg->dif_info, &v_scsi_cmnd->dif_info, + sizeof(struct dif_info_s)); + unf_adjsut_dif_pci_transfer_len(xchg, + UNF_GET_DATA_DIRECTION(v_scsi_cmnd)); + + /* single sgl info */ + if ((xchg->data_direction != DMA_NONE) && + (UNF_GET_CMND_SGL(v_scsi_cmnd))) { + xchg->req_sgl_info.sgl = UNF_GET_CMND_SGL(v_scsi_cmnd); + /* Save the sgl header for easy location and printing. */ + xchg->req_sgl_info.sgl_start = xchg->req_sgl_info.sgl; + xchg->req_sgl_info.req_index = 0; + xchg->req_sgl_info.entry_index = 0; + } + + if (v_scsi_cmnd->dif_control.dif_sgl) { + xchg->dif_sgl_info.sgl = UNF_INI_GET_DIF_SGL(v_scsi_cmnd); + xchg->dif_sgl_info.entry_index = 0; + xchg->dif_sgl_info.req_index = 0; + xchg->dif_sgl_info.sgl_start = xchg->dif_sgl_info.sgl; + } + + return RETURN_OK; +} + +static int unf_send_fcp_cmnd(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + struct unf_scsi_cmd_info_s *scsi_cmnd_info = NULL; + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = v_rport; + struct unf_xchg_s *xchg = v_xchg; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned int result; + unsigned long flags = 0; + + memcpy(&pkg.dif_control, &xchg->dif_control, + sizeof(struct unf_dif_control_info_s)); + pkg.dif_control.fcp_dl = xchg->dif_control.fcp_dl; + pkg.transfer_len = xchg->data_len; /* Pcie data transfer length */ + pkg.xchg_contex = xchg; + pkg.qos_level = 0; + pkg.entry_count = xchg->scsi_cmnd_info.entry_cnt; + scsi_cmnd_info = &v_xchg->scsi_cmnd_info; + if ((xchg->data_direction == DMA_NONE) || (!scsi_cmnd_info->sgl)) + pkg.entry_count = 0; + + pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME] = + xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]; + pkg.private[PKG_PRIVATE_XCHG_VP_INDEX] = lport->vp_index; + pkg.private[PKG_PRIVATE_XCHG_RPORT_INDEX] = rport->rport_index; + pkg.private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = xchg->hot_pool_tag; + + pkg.fcp_cmnd = &xchg->fcp_cmnd; + pkg.frame_head.csctl_sid = lport->nport_id; + pkg.frame_head.rctl_did = rport->nport_id; + pkg.upper_cmd = xchg->scsi_cmnd_info.scsi_cmnd; + + /* exch->fcp_rsp_id --->>> pkg->buffer_ptr */ + pkg.unf_rsp_pload_bl.buffer_ptr = + (unsigned char *) + v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu; + pkg.unf_rsp_pload_bl.buf_dma_addr = + v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu_phy_addr; + pkg.unf_rsp_pload_bl.length = PAGE_SIZE; + + pkg.frame_head.oxid_rxid = + ((unsigned int)xchg->ox_id << 16 | xchg->rx_id); + + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_EQUIP_ATT, UNF_INFO, + "[info]LPort (0x%p), Nport ID(0x%x) RPort ID(0x%x) direction(0x%x) magic number(0x%x) send IO to OX_ID(0x%x) entry count(0x%x) tag(0x%x)", + lport, lport->nport_id, rport->nport_id, + v_xchg->data_direction, + pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME], + v_xchg->ox_id, pkg.entry_count, xchg->hot_pool_tag); + + atomic_inc(&rport->pending_io_cnt); + if ((rport->tape_support_needed == UNF_TRUE) && + (atomic_read(&rport->pending_io_cnt) <= 3)) { + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + v_xchg->io_state |= INI_IO_STATE_REC_TIMEOUT_WAIT; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + scsi_cmnd_info->abort_timeout = scsi_cmnd_info->time_out; + scsi_cmnd_info->time_out = UNF_REC_TOV; + } + + /* 3. add INI I/O timer if necessary */ + if (scsi_cmnd_info->time_out != 0) { + /* I/O inner timer, do not used at this time */ + lport->xchg_mgr_temp.pfn_unf_xchg_add_timer( + xchg, + scsi_cmnd_info->time_out, + UNF_TIMER_TYPE_REQ_IO); + } + + /* 4. R_Port state check */ + if (unlikely((rport->lport_ini_state != UNF_PORT_STATE_LINKUP) || + (rport->rp_state > UNF_RPORT_ST_READY))) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[info]Port(0x%x) RPort(0x%p) NPortId(0x%x) inistate(0x%x): RPort state(0x%x) upper_cmd(0x%p) is not ready", + lport->port_id, rport, rport->nport_id, + rport->lport_ini_state, rport->rp_state, + pkg.upper_cmd); + + result = unf_get_uplevel_cmnd_errcode( + scsi_cmnd_info->err_code_table, + scsi_cmnd_info->err_code_table_cout, + UNF_IO_INCOMPLETE); + scsi_cmnd_info->result = result; + if (scsi_cmnd_info->time_out != 0) + lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer(xchg); + + unf_cm_free_xchg(lport, xchg); + /* DID_IMM_RETRY */ + return RETURN_OK; + } else if (rport->rp_state < UNF_RPORT_ST_READY) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[info]Port(0x%x) RPort(0x%p) NPortId(0x%x) inistate(0x%x): RPort state(0x%x) upper_cmd(0x%p) is not ready", + lport->port_id, rport, rport->nport_id, + rport->lport_ini_state, rport->rp_state, + pkg.upper_cmd); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flags); + xchg->io_state |= INI_IO_STATE_UPSEND_ERR; /* need retry */ + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags); + + if (unlikely(scsi_cmnd_info->time_out != 0)) + lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer( + (void *)xchg); + + /* Host busy & need scsi retry */ + return UNF_RETURN_ERROR; + } + + /* 5. send scsi_cmnd to FC_LL Driver */ + if (unf_hardware_start_io(lport, &pkg) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port (0x%x) upper_cmd(0x%p) Hardware Send IO failed.", + lport->port_id, pkg.upper_cmd); + + unf_release_esgls(xchg); + result = unf_get_uplevel_cmnd_errcode( + scsi_cmnd_info->err_code_table, + scsi_cmnd_info->err_code_table_cout, + UNF_IO_INCOMPLETE); + scsi_cmnd_info->result = result; + if (scsi_cmnd_info->time_out != 0) + lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer(xchg); + + unf_cm_free_xchg(lport, xchg); + /* SCSI_DONE */ + return RETURN_OK; + } + + return RETURN_OK; +} + +int unf_prefer_to_send_scsi_cmnd(struct unf_xchg_s *v_xchg) +{ + /* + * About INI_IO_STATE_DRABORT: + * 1. Set ABORT tag: Clean L_Port/V_Port Link Down I/O + * with: INI_busy_list, delay_list, delay_transfer_list, wait_list + * + * 2. Set ABORT tag: for target session: + * with: INI_busy_list, delay_list, delay_transfer_list, wait_list + * a. R_Port remove + * b. Send PLOGI_ACC callback + * c. RCVD PLOGI + * d. RCVD LOGO + * + * 3. if set ABORT: prevent send scsi_cmnd to target + */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + int ret; + unsigned long flags = 0; + + lport = v_xchg->lport; + rport = v_xchg->rport; + if (unlikely(!lport || !rport)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%p) or RPort(0x%p) is NULL", lport, + rport); + + /* if happened (never happen): need retry */ + return UNF_RETURN_ERROR; + } + + /* 1. inc ref_cnt to protect exchange */ + ret = (int)unf_xchg_ref_inc(v_xchg, INI_SEND_CMND); + if (unlikely(ret != RETURN_OK)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) exhg(%p) exception ref(%d) ", + lport->port_id, v_xchg, + atomic_read(&v_xchg->ref_cnt)); + /* exchange exception, need retry */ + spin_lock_irqsave(&v_xchg->xchg_state_lock, flags); + v_xchg->io_state |= INI_IO_STATE_UPSEND_ERR; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags); + + /* INI_IO_STATE_UPSEND_ERR: Host busy --->>> need retry */ + return UNF_RETURN_ERROR; + } + + /* 2. Xchg Abort state check: Free EXCH if necessary */ + spin_lock_irqsave(&v_xchg->xchg_state_lock, flags); + if (unlikely((v_xchg->io_state & INI_IO_STATE_UPABORT) || + (v_xchg->io_state & INI_IO_STATE_DRABORT))) { + /* Prevent to send: UP_ABORT/DRV_ABORT */ + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags); + v_xchg->scsi_cmnd_info.result = UNF_SCSI_HOST(DID_IMM_RETRY); + + unf_xchg_ref_dec(v_xchg, INI_SEND_CMND); + unf_cm_free_xchg(lport, v_xchg); + + /* + * Release exchange & return directly: + * 1. FC LLDD rcvd ABTS before scsi_cmnd: do nothing + * 2. INI_IO_STATE_UPABORT/INI_IO_STATE_DRABORT: + * discard this cmnd directly + */ + return RETURN_OK; + } + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags); + + /* 3. Send FCP_CMND to FC_LL Driver */ + ret = unf_send_fcp_cmnd(lport, rport, v_xchg); + if (unlikely(ret != RETURN_OK)) { + /* exchange exception, need retry */ + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) send exhg(%p) OX_ID(0x%x) RX_ID(0x%x) to Rport(%p) NPortID(0x%x) state(0x%x) scsi_id(0x%x) failed", + lport->port_id, v_xchg, v_xchg->ox_id, + v_xchg->rx_id, + rport, rport->nport_id, rport->rp_state, + rport->scsi_id); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flags); + v_xchg->io_state |= INI_IO_STATE_UPSEND_ERR; /* need retry */ + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags); + + /* INI_IO_STATE_UPSEND_ERR: Host busy --->>> need retry */ + unf_cm_free_xchg(lport, v_xchg); + } + + /* 4. dec ref_cnt */ + unf_xchg_ref_dec(v_xchg, INI_SEND_CMND); + + return ret; +} + +struct unf_lport_s *unf_find_lport_by_scsi_cmd( + struct unf_scsi_cmd_s *v_scsi_cmnd) +{ + struct unf_lport_s *lport = NULL; + + /* cmd -->> L_Port */ + lport = (struct unf_lport_s *)UNF_GET_HOST_PORT_BY_CMND(v_scsi_cmnd); + if (unlikely(!lport)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Find Port by scsi_cmnd(0x%p) failed", + v_scsi_cmnd); + + /* cmnd -->> scsi_host_id -->> L_Port */ + lport = unf_find_lport_by_scsi_host_id( + UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd)); + } + return lport; +} + +int unf_cm_queue_command(struct unf_scsi_cmd_s *v_scsi_cmnd) +{ + /* SCSI Command --->>> FC FCP Command */ + struct unf_lport_s *lport = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_rport_s *rport = NULL; + struct unf_rport_scsi_id_image_s *scsi_image_table = NULL; + unsigned int result = 0; + int ret; + unsigned long flags = 0; + unsigned int scsi_id; + unsigned int exhg_mgr_type = UNF_XCHG_MGR_TYPE_RANDOM; + + /* 1. Get L_Port */ + lport = unf_find_lport_by_scsi_cmd(v_scsi_cmnd); + + /* + * corresponds to the insertion or removal scenario or + * the remove card scenario. + * This method is used to search for LPort information + * based on SCSI_HOST_ID. + * The Slave alloc is not invoked when LUNs are not scanned. + * Therefore, the Lport cannot be obtained. + * You need to obtain the Lport from the Lport linked list. + * + * FC After Link Up, the first SCSI command is inquiry. + * Before inquiry, SCSI delivers slave_alloc. + */ + if (!lport) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Find Port by scsi cmd(0x%p) failed", + v_scsi_cmnd); + + /* find from ini_error_code_table1 */ + result = unf_get_uplevel_cmnd_errcode( + v_scsi_cmnd->err_code_table, + v_scsi_cmnd->err_code_table_cout, + UNF_IO_NO_LPORT); /* did_not_connect */ + + /* DID_NOT_CONNECT & SCSI_DONE & RETURN_OK(0) & I/O error */ + unf_complete_cmnd(v_scsi_cmnd, result); + return RETURN_OK; + } + + /* Get Local SCSI_Image_table & SCSI_ID */ + scsi_image_table = &lport->rport_scsi_table; + scsi_id = v_scsi_cmnd->scsi_id; + + /* 2. L_Port State check */ + if (unlikely((lport->b_port_removing == UNF_TRUE) || + (lport->b_pcie_linkdown == UNF_TRUE))) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) is removing(%d) or pcielinkdown(%d) and return with scsi_id(0x%x)", + lport->port_id, lport->b_port_removing, + lport->b_pcie_linkdown, + UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd)); + + result = unf_get_uplevel_cmnd_errcode( + v_scsi_cmnd->err_code_table, + v_scsi_cmnd->err_code_table_cout, + UNF_IO_NO_LPORT); /* did_not_connect */ + + UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, (result >> 16)); + + /* DID_NOT_CONNECT & SCSI_DONE & RETURN_OK(0) & I/O error */ + unf_complete_cmnd(v_scsi_cmnd, result); + return RETURN_OK; + } + + /* 3. Get R_Port */ + rport = unf_find_rport_by_scsi_id(lport, + v_scsi_cmnd->err_code_table, + v_scsi_cmnd->err_code_table_cout, + UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd), + &result); + if (unlikely(!rport)) { + /* never happen: do not care */ + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) find RPort by scsi_id(0x%x) failed", + lport->port_id, + UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd)); + + UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, (result >> 16)); + + /* DID_NOT_CONNECT/DID_SOFT_ERROR & SCSI_DONE & + * RETURN_OK(0) & I/O error + */ + unf_complete_cmnd(v_scsi_cmnd, result); + return RETURN_OK; + } + + /* 4. Can't get exchange & retrun host busy, retry by uplevel */ + xchg = (struct unf_xchg_s *)unf_cm_get_free_xchg( + lport, + exhg_mgr_type << 16 | UNF_XCHG_TYPE_INI); + if (unlikely(!xchg)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[err]Port(0x%x) get free exchange for INI IO(0x%x) failed", + lport->port_id, + UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd)); + + /* NOTE: need scsi retry */ + return UNF_RETURN_ERROR; + } + + xchg->scsi_cmnd_info.result = UNF_SCSI_HOST(DID_ERROR); + /* 5. Save the SCSI CMND information in advance. */ + ret = unf_save_scsi_cmnd_to_xchg(lport, rport, xchg, v_scsi_cmnd); + if (unlikely(ret != RETURN_OK)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[err]Port(0x%x) save scsi_cmnd info(0x%x) to exchange failed", + lport->port_id, + UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd)); + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + xchg->io_state |= INI_IO_STATE_UPSEND_ERR; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + /* INI_IO_STATE_UPSEND_ERR: Don't Do SCSI_DONE, + * need retry I/O + */ + unf_cm_free_xchg(lport, xchg); + /* NOTE: need scsi retry */ + return UNF_RETURN_ERROR; + } + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_INFO, + "[info]Get exchange(0x%p) OX_ID(0x%x) RX_ID(0x%x) hot_pool_tag(0x%x) for Pcmd:%p,Cmdsn:0x%lx,WorldId:%u", + xchg, xchg->ox_id, xchg->rx_id, + xchg->hot_pool_tag, v_scsi_cmnd->upper_cmnd, + (unsigned long)v_scsi_cmnd->cmnd_sn, + v_scsi_cmnd->world_id); + /* 6. Send SCSI CMND */ + ret = unf_prefer_to_send_scsi_cmnd(xchg); + return ret; +} diff --git a/drivers/scsi/huawei/hifc/unf_io.h b/drivers/scsi/huawei/hifc/unf_io.h new file mode 100644 index 0000000000000000000000000000000000000000..1b8d0daa14123562e30da8ba90ae3885d13412fa --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_io.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#ifndef __UNF_IO_H__ +#define __UNF_IO_H__ + +#define UNF_MAX_TARGET_NUMBER 2048 +#define UNF_DEFAULT_MAX_LUN 0xFFFF +#define UNF_MAX_DMA_SEGS 0x400 +#define UNF_MAX_SCSI_CMND_LEN 16 +#define UNF_MAX_SECTORS 0xffff +#define UNF_MAX_BUS_CHANNEL 0 +#define UNF_DMA_BOUNDARY 0xffffffffffffffff +#define UNF_MAX_CMND_PER_LUN 64 /* LUN max command */ + +#define NO_SENSE 0x00 +#define RECOVERED_ERROR 0x01 +#define NOT_READY 0x02 +#define MEDIUM_ERROR 0x03 +#define HARDWARE_ERROR 0x04 +#define ILLEGAL_REQUEST 0x05 +#define UNIT_ATTENTION 0x06 +#define DATA_PROTECT 0x07 +#define BLANK_CHECK 0x08 +#define COPY_ABORTED 0x0a +#define ABORTED_COMMAND 0x0b +#define VOLUME_OVERFLOW 0x0d +#define MISCOMPARE 0x0e + +#define UNF_GET_SCSI_HOST_ID_BY_CMND(pcmd) ((pcmd)->scsi_host_id) +#define UNF_GET_SCSI_ID_BY_CMND(pcmd) ((pcmd)->scsi_id) +#define UNF_GET_HOST_PORT_BY_CMND(pcmd) ((pcmd)->drv_private) +#define UNF_GET_FCP_CMND(pcmd) ((pcmd)->pcmnd[0]) +#define UNF_GET_DATA_LEN(pcmd) ((pcmd)->transfer_len) +#define UNF_GET_DATA_DIRECTION(pcmd) ((pcmd)->data_direction) + +#define UNF_GET_HOST_CMND(pcmd) ((pcmd)->upper_cmnd) +#define UNF_GET_CMND_DONE_FUNC(pcmd) ((pcmd)->pfn_done) +#define UNF_GET_SGL_ENTRY_BUF_FUNC(pcmd) ((pcmd)->pfn_unf_ini_get_sgl_entry) +#define UNF_GET_SENSE_BUF_ADDR(pcmd) ((pcmd)->sense_buf) +#define UNF_GET_ERR_CODE_TABLE(pcmd) ((pcmd)->err_code_table) +#define UNF_GET_ERR_CODE_TABLE_COUNT(pcmd) ((pcmd)->err_code_table_cout) + +#define UNF_SET_HOST_CMND(pcmd, host_cmd) ((pcmd)->upper_cmnd = (host_cmd)) +#define UNF_SET_CMND_DONE_FUNC(pcmd, pfn) ((pcmd)->pfn_done = (pfn)) + +#define UNF_SET_RESID(pcmd, id_len) ((pcmd)->resid = (id_len)) +#define UNF_SET_CMND_RESULT(pcmd, uiresult) ((pcmd)->result = ((int)uiresult)) + +#define UNF_DONE_SCSI_CMND(pcmd) ((pcmd)->pfn_done(pcmd)) + +#define UNF_GET_CMND_SGL(pcmd) ((pcmd)->sgl) +#define UNF_INI_GET_DIF_SGL(pcmd) ((pcmd)->dif_control.dif_sgl) + +unsigned int unf_ini_scsi_completed(void *v_lport, + struct unf_frame_pkg_s *v_pkg); +unsigned int unf_ini_get_sgl_entry(void *v_pkg, char **v_buf, + unsigned int *v_buf_len); +unsigned int unf_ini_get_dif_sgl_entry(void *v_pkg, char **v_buf, + unsigned int *v_buf_len); + +void unf_complete_cmnd(struct unf_scsi_cmd_s *v_scsi_cmnd, unsigned int result); +void unf_done_ini_xchg(struct unf_xchg_s *v_xchg); +unsigned int unf_tmf_timeout_recovery_special(void *v_rport, void *v_xchg); +void unf_abts_timeout_recovery_default(void *v_rport, void *v_xchg); +int unf_cm_queue_command(struct unf_scsi_cmd_s *v_scsi_cmnd); +int unf_cm_eh_abort_handler(struct unf_scsi_cmd_s *v_scsi_cmnd); +int unf_cm_eh_device_reset_handler(struct unf_scsi_cmd_s *v_scsi_cmnd); +int unf_cm_target_reset_handler(struct unf_scsi_cmd_s *v_scsi_cmnd); +int unf_cm_bus_reset_handler(struct unf_scsi_cmd_s *v_scsi_cmnd); +struct unf_rport_s *unf_find_rport_by_scsi_id( + struct unf_lport_s *v_lport, + struct unf_ini_error_code_s *v_err_code_table, + unsigned int v_err_code_table_cout, + unsigned int v_scsi_id, + unsigned int *v_scsi_result); + +struct unf_lport_s *unf_find_lport_by_scsi_cmd( + struct unf_scsi_cmd_s *v_scsi_cmnd); +void unf_tmf_abnormal_recovery(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg); +unsigned int unf_get_uplevel_cmnd_errcode( + struct unf_ini_error_code_s *v_err_table, + unsigned int v_err_table_count, + unsigned int v_drv_err_code); + +#endif diff --git a/drivers/scsi/huawei/hifc/unf_io_abnormal.c b/drivers/scsi/huawei/hifc/unf_io_abnormal.c new file mode 100644 index 0000000000000000000000000000000000000000..f96bd0755b23985bb63e368625029b7a7f487cb4 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_io_abnormal.c @@ -0,0 +1,926 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#include "unf_log.h" +#include "unf_exchg.h" +#include "unf_rport.h" +#include "unf_io.h" +#include "unf_portman.h" +#include "unf_service.h" +#include "unf_io_abnormal.h" + +static int unf_send_abts_success(struct unf_lport_s *v_lport, + struct unf_xchg_s *v_xchg, + struct unf_scsi_cmd_s *v_scsi_cmnd, + unsigned int time_out_value) +{ + int wait_marker = UNF_TRUE; + struct unf_rport_scsi_id_image_s *scsi_image_table = NULL; + unsigned int scsi_id; + unsigned int ret; + unsigned long flag = 0; + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + wait_marker = (v_xchg->abts_state & MARKER_STS_RECEIVED) ? + UNF_FALSE : UNF_TRUE; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + if (wait_marker) { + if (down_timeout( + &v_xchg->task_sema, + (long long)msecs_to_jiffies(time_out_value))) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, + UNF_WARN, + "[warn]Port(0x%x) recv abts marker timeout,Exch(0x%p) OX_ID(0x%x 0x%x) RX_ID(0x%x)", + v_lport->port_id, v_xchg, + v_xchg->ox_id, v_xchg->hot_pool_tag, + v_xchg->rx_id); + + /* Cancel abts rsp timer when sema timeout */ + v_lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer( + (void *)v_xchg); + + /* Cnacel the flag of INI_IO_STATE_UPABORT and + * process the io in TMF + */ + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + v_xchg->io_state &= ~INI_IO_STATE_UPABORT; + v_xchg->io_state |= INI_IO_STATE_TMF_ABORT; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + return UNF_SCSI_ABORT_FAIL; + } + } else { + v_xchg->ucode_abts_state = UNF_IO_SUCCESS; + } + + scsi_image_table = &v_lport->rport_scsi_table; + scsi_id = v_scsi_cmnd->scsi_id; + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + if ((v_xchg->ucode_abts_state == UNF_IO_SUCCESS) || + (v_xchg->scsi_cmnd_info.result == UNF_IO_ABORT_PORT_REMOVING)) { + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]Port(0x%x) Send ABTS succeed and recv marker Exch(0x%p) OX_ID(0x%x) RX_ID(0x%x) marker status(0x%x)", + v_lport->port_id, v_xchg, + v_xchg->ox_id, v_xchg->rx_id, + v_xchg->ucode_abts_state); + ret = DID_RESET; + UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, ret); + unf_complete_cmnd(v_scsi_cmnd, DID_RESET << 16); + return UNF_SCSI_ABORT_SUCCESS; + } + + v_xchg->io_state &= ~INI_IO_STATE_UPABORT; + v_xchg->io_state |= INI_IO_STATE_TMF_ABORT; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + /* Cancel abts rsp timer when sema timeout */ + v_lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)v_xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port(0x%x) send ABTS failed. Exch(0x%p) oxid(0x%x) hot_tag(0x%x) ret(0x%x) v_xchg->io_state (0x%x)", + v_lport->port_id, v_xchg, v_xchg->ox_id, + v_xchg->hot_pool_tag, + v_xchg->scsi_cmnd_info.result, v_xchg->io_state); + + /* return fail and then enter TMF */ + return UNF_SCSI_ABORT_FAIL; +} + +static int unf_ini_abort_cmnd(struct unf_lport_s *v_lport, + struct unf_xchg_s *v_xchg, + struct unf_scsi_cmd_s *v_scsi_cmnd) +{ + /* + * About INI_IO_STATE_UPABORT: + * + * 1. Check: AC power down + * 2. Check: L_Port destroy + * 3. Check: I/O XCHG timeout + * 4. Set ABORT: send ABTS + * 5. Set ABORT: LUN reset + * 6. Set ABORT: Target reset + * 7. Check: Prevent to send I/O to target (UNF_PreferToSendScsiCmnd) + * 8. Check: Done INI XCHG --->>> do not call scsi_done, return directly + * 9. Check: INI SCSI Complete --->>> + * do not call scsi_done, return directly + */ +#define UNF_RPORT_NOTREADY_WAIT_SEM_TIMEOUT (2000) /* 2s */ + + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + unsigned long flag = 0; + struct unf_rport_scsi_id_image_s *scsi_image_table = NULL; + unsigned int scsi_id; + unsigned int ret; + + unsigned int time_out_value = (unsigned int)UNF_WAIT_SEM_TIMEOUT; + + UNF_CHECK_VALID(0x1335, TRUE, v_lport, return UNF_SCSI_ABORT_FAIL); + lport = v_lport; + + /* 1. Xchg State Set: INI_IO_STATE_UPABORT */ + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + v_xchg->io_state |= INI_IO_STATE_UPABORT; + rport = v_xchg->rport; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + /* 2. R_Port check */ + if (unlikely(!rport)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) send ABTS but no RPort, OX_ID(0x%x) RX_ID(0x%x)", + lport->port_id, v_xchg->ox_id, v_xchg->rx_id); + + return UNF_SCSI_ABORT_SUCCESS; + } + + spin_lock_irqsave(&rport->rport_state_lock, flag); + if (unlikely(rport->rp_state != UNF_RPORT_ST_READY)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, + UNF_WARN, + "[warn]Port(0x%x) find RPort's state(0x%x) is not ready but send ABTS also, exchange(0x%p) tag(0x%x)", + lport->port_id, rport->rp_state, + v_xchg, v_xchg->hot_pool_tag); + + /* + * Important: Send ABTS also & update timer + * Purpose: only used for release chip (uCode) resource, + * continue + */ + time_out_value = UNF_RPORT_NOTREADY_WAIT_SEM_TIMEOUT; + } + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* 3. L_Port State check */ + if (unlikely(lport->b_port_removing == UNF_TRUE)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) is removing", lport->port_id); + + v_xchg->io_state &= ~INI_IO_STATE_UPABORT; + + return UNF_SCSI_ABORT_FAIL; + } + + scsi_image_table = &lport->rport_scsi_table; + scsi_id = v_scsi_cmnd->scsi_id; + + /* If pcie linkdown, complete this io and flush all io */ + if (unlikely(lport->b_pcie_linkdown == UNF_TRUE)) { + ret = DID_RESET; + UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, ret); + unf_complete_cmnd(v_scsi_cmnd, DID_RESET << 16); + unf_free_lport_all_xchg(v_lport); + return UNF_SCSI_ABORT_SUCCESS; + } + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_KEVENT, + "[abort]Port(0x%x) Exchg(0x%p) delay(%llu) SID(0x%x) DID(0x%x) wwpn(0x%llx) OxID(0x%x 0x%x) scsi_id(0x%x) lun_id(0x%x) cmdsn(0x%llx)", + lport->port_id, v_xchg, + (unsigned long long)jiffies_to_msecs(jiffies) - + (unsigned long long)jiffies_to_msecs(v_xchg->alloc_jif), + v_xchg->sid, v_xchg->did, rport->port_name, + v_xchg->ox_id, v_xchg->hot_pool_tag, v_scsi_cmnd->scsi_id, + (unsigned int)v_scsi_cmnd->lun_id, v_scsi_cmnd->cmnd_sn); + + /* Init abts marker semaphore */ + sema_init(&v_xchg->task_sema, 0); + + if (v_xchg->scsi_cmnd_info.time_out != 0) + lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer(v_xchg); + + /* Add timer for sending ABTS */ + v_lport->xchg_mgr_temp.pfn_unf_xchg_add_timer( + (void *)v_xchg, + (unsigned long)UNF_WAIT_ABTS_RSP_TIMEOUT, + UNF_TIMER_TYPE_INI_ABTS); + + /* 4. Send INI ABTS CMND */ + if (unf_send_abts(lport, v_xchg) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) Send ABTS failed. Exch(0x%p) OX_ID(0x%x 0x%x) RX_ID(0x%x)", + lport->port_id, v_xchg, + v_xchg->ox_id, v_xchg->hot_pool_tag, + v_xchg->rx_id); + + /* Cancel timer when sending ABTS failed */ + v_lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer( + (void *)v_xchg); + + /* Cnacel the flag of INI_IO_STATE_UPABORT + * and process the io in TMF + */ + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + v_xchg->io_state &= ~INI_IO_STATE_UPABORT; + v_xchg->io_state |= INI_IO_STATE_TMF_ABORT; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + return UNF_SCSI_ABORT_FAIL; + } + + return unf_send_abts_success(lport, v_xchg, v_scsi_cmnd, + time_out_value); +} + +static void unf_flush_ini_resp_que(struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x1335, TRUE, v_lport, return); + + if (v_lport->low_level_func.service_op.pfn_unf_flush_ini_resp_que) + (void)v_lport->low_level_func.service_op.pfn_unf_flush_ini_resp_que(v_lport->fc_port); +} + +int unf_cm_eh_abort_handler(struct unf_scsi_cmd_s *v_scsi_cmnd) +{ + /* + * SCSI ABORT Command --->>> FC ABTS Command + * If return ABORT_FAIL then enter TMF process + */ + struct unf_lport_s *lport = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_rport_s *rport = NULL; + struct unf_lport_s *xchg_lport = NULL; + int ret; + unsigned long flag = 0; + + /* 1. Get L_Port: Point to Scsi_host */ + lport = unf_find_lport_by_scsi_cmd(v_scsi_cmnd); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Can't find port by scsi host id(0x%x)", + UNF_GET_SCSI_HOST_ID_BY_CMND(v_scsi_cmnd)); + return UNF_SCSI_ABORT_FAIL; + } + + /* 2. find target Xchg for INI Abort CMND */ + xchg = unf_cm_lookup_xchg_by_cmnd_sn(lport, v_scsi_cmnd->cmnd_sn, + v_scsi_cmnd->world_id); + if (unlikely(!xchg)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_ABNORMAL, + UNF_WARN, + "[warn]Port(0x%x) can't find exchange by Cmdsn(0x%lx)", + lport->port_id, + (unsigned long)v_scsi_cmnd->cmnd_sn); + + unf_flush_ini_resp_que(lport); + + return UNF_SCSI_ABORT_SUCCESS; + } + + /* 3. increase ref_cnt to protect exchange */ + ret = (int)unf_xchg_ref_inc(xchg, INI_EH_ABORT); + if (unlikely(ret != RETURN_OK)) { + unf_flush_ini_resp_que(lport); + return UNF_SCSI_ABORT_SUCCESS; + } + + v_scsi_cmnd->upper_cmnd = xchg->scsi_cmnd_info.scsi_cmnd; + + xchg->debug_hook = UNF_TRUE; + + /* 4. Exchang L_Port/R_Port Get & check */ + spin_lock_irqsave(&xchg->xchg_state_lock, flag); + xchg_lport = xchg->lport; + rport = xchg->rport; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); + + if (unlikely(!xchg_lport || !rport)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Exchange(0x%p)'s L_Port or R_Port is NULL, state(0x%x)", + xchg, xchg->io_state); + + unf_xchg_ref_dec(xchg, INI_EH_ABORT); + + if (!xchg_lport) + return UNF_SCSI_ABORT_FAIL; /* for L_Port */ + return UNF_SCSI_ABORT_SUCCESS; /* for R_Port */ + } + + /* 5. Send INI Abort Cmnd */ + ret = unf_ini_abort_cmnd(xchg_lport, xchg, v_scsi_cmnd); + + /* 6. decrease exchange ref_cnt */ + unf_xchg_ref_dec(xchg, INI_EH_ABORT); + + return ret; +} + +static unsigned int unf_tmf_timeout_recovery_default(void *v_rport, + void *v_xchg) +{ + struct unf_lport_s *lport = NULL; + unsigned long flag = 0; + struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_xchg; + struct unf_rport_s *rport = (struct unf_rport_s *)v_rport; + + lport = xchg->lport; + UNF_CHECK_VALID(0x4614, UNF_TRUE, lport, return UNF_RETURN_ERROR); + + spin_lock_irqsave(&rport->rport_state_lock, flag); + unf_rport_state_ma(rport, UNF_EVENT_RPORT_LOGO); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + unf_rport_enter_logo(lport, rport); + return RETURN_OK; +} + +void unf_abts_timeout_recovery_default(void *v_rport, void *v_xchg) +{ + struct unf_lport_s *lport = NULL; + unsigned long flag = 0; + struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_xchg; + struct unf_rport_s *rport = (struct unf_rport_s *)v_rport; + + lport = xchg->lport; + UNF_CHECK_VALID(0x4614, UNF_TRUE, lport, return); + + spin_lock_irqsave(&xchg->xchg_state_lock, flag); + if (INI_IO_STATE_DONE & xchg->io_state) { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); + + return; + } + spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); + if (xchg->rport_bind_jifs != rport->rport_alloc_jifs) + return; + + spin_lock_irqsave(&rport->rport_state_lock, flag); + unf_rport_state_ma(rport, UNF_EVENT_RPORT_LOGO); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + unf_rport_enter_logo(lport, rport); +} + +unsigned int unf_tmf_timeout_recovery_special(void *v_rport, void *v_xchg) +{ + /* Do port reset or R_Port LOGO */ + int ret = UNF_RETURN_ERROR; + struct unf_lport_s *lport = NULL; + struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_xchg; + struct unf_rport_s *rport = (struct unf_rport_s *)v_rport; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_rport, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_xchg, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, xchg->lport, + return UNF_RETURN_ERROR); + + lport = xchg->lport->root_lport; + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, lport, + return UNF_RETURN_ERROR); + + /* 1. TMF response timeout & Marker STS timeout */ + if (!(xchg->tmf_state & + (MARKER_STS_RECEIVED | TMF_RESPONSE_RECEIVED))) { + /* TMF timeout & marker timeout */ + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) receive marker status timeout and do recovery", + lport->port_id); + + /* Do port reset */ + ret = unf_cm_reset_port(lport->port_id); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) do reset failed", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + return RETURN_OK; + } + + /* 2. default case: Do LOGO process */ + unf_tmf_timeout_recovery_default(rport, xchg); + + return RETURN_OK; +} + +void unf_tmf_abnormal_recovery(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + /* + * for device(lun)/target(session) reset: + * Do port reset or R_Port LOGO + */ + if (v_lport->pfn_unf_tmf_abnormal_recovery) + v_lport->pfn_unf_tmf_abnormal_recovery((void *)v_rport, + (void *)v_xchg); +} + +static void unf_build_task_mgmt_fcp_cmnd(struct unf_fcp_cmnd_s *v_fcp_cmnd, + struct unf_scsi_cmd_s *v_scsi_cmnd, + enum unf_task_mgmt_cmnd_e v_task_mgmt) +{ + UNF_CHECK_VALID(0x1339, UNF_TRUE, v_fcp_cmnd, return); + UNF_CHECK_VALID(0x1340, UNF_TRUE, v_scsi_cmnd, return); + + unf_big_end_to_cpu((void *)v_scsi_cmnd->pc_lun_id, UNF_FCP_LUNID_LEN_8); + (*(unsigned long long *)(v_scsi_cmnd->pc_lun_id)) >>= 8; + memcpy(v_fcp_cmnd->lun, v_scsi_cmnd->pc_lun_id, + sizeof(v_fcp_cmnd->lun)); + + /* + * If the TASK MANAGEMENT FLAGS field is set to a nonzero value, + * the FCP_CDB field, the FCP_DL field, the TASK ATTRIBUTE field, + * the RDDATA bit, and the WRDATA bit shall be ignored and the + * FCP_BIDIRECTIONAL_READ_DL field shall not be + * included in the FCP_CMND IU payload + */ + v_fcp_cmnd->control = UNF_SET_TASK_MGMT_FLAGS(v_task_mgmt); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "SCSI cmnd(0x%x) is task mgmt cmnd. ntrl Flag(LITTLE END) is 0x%x.", + v_task_mgmt, v_fcp_cmnd->control); +} + +int unf_send_scsi_mgmt_cmnd(struct unf_xchg_s *v_xchg, + struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_scsi_cmd_s *v_scsi_cmnd, + enum unf_task_mgmt_cmnd_e v_task_mgnt_cmd_type) +{ + /* + * 1. Device/LUN reset + * 2. Target/Session reset + */ + struct unf_xchg_s *xchg = NULL; + int ret = SUCCESS; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x1341, UNF_TRUE, v_xchg, return FAILED); + UNF_CHECK_VALID(0x1342, UNF_TRUE, v_lport, return FAILED); + UNF_CHECK_VALID(0x1343, UNF_TRUE, v_rport, return FAILED); + UNF_CHECK_VALID(0x1344, UNF_TRUE, v_scsi_cmnd, return FAILED); + UNF_CHECK_VALID(0x1345, UNF_TRUE, + ((v_task_mgnt_cmd_type <= UNF_FCP_TM_TERMINATE_TASK) && + (v_task_mgnt_cmd_type >= UNF_FCP_TM_QUERY_TASK_SET)), + return FAILED); + + xchg = v_xchg; + xchg->lport = v_lport; + xchg->rport = v_rport; + + /* 1. State: Up_Task */ + spin_lock_irqsave(&xchg->xchg_state_lock, flag); + xchg->io_state |= INI_IO_STATE_UPTASK; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); + + if (v_lport->low_level_func.xchg_mgr_type == + UNF_LOW_LEVEL_MGR_TYPE_PASSTIVE) { + xchg->ox_id = xchg->hot_pool_tag; + pkg.frame_head.oxid_rxid = + ((unsigned int)xchg->ox_id << 16) | xchg->rx_id; + } + + /* 2. Set TASK MANAGEMENT FLAGS of FCP_CMND to + * the corresponding task management command + */ + unf_build_task_mgmt_fcp_cmnd(&xchg->fcp_cmnd, v_scsi_cmnd, + v_task_mgnt_cmd_type); + + pkg.xchg_contex = xchg; + pkg.private[PKG_PRIVATE_XCHG_RPORT_INDEX] = v_rport->rport_index; + pkg.fcp_cmnd = &xchg->fcp_cmnd; + pkg.private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = xchg->hot_pool_tag; + pkg.frame_head.csctl_sid = v_lport->nport_id; + pkg.frame_head.rctl_did = v_rport->nport_id; + pkg.unf_rsp_pload_bl.buffer_ptr = + (unsigned char *)xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu; + pkg.unf_rsp_pload_bl.buf_dma_addr = + v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu_phy_addr; + pkg.unf_rsp_pload_bl.length = PAGE_SIZE; + pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME] = + v_xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]; + + if (unlikely(v_lport->b_pcie_linkdown == UNF_TRUE)) { + unf_free_lport_all_xchg(v_lport); + return SUCCESS; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "[event]Port(0x%x) send task_cmnd(0x%x) to RPort(0x%x) Hottag(0x%x) lunid(0x%llx)", + v_lport->port_id, v_task_mgnt_cmd_type, + v_rport->nport_id, xchg->hot_pool_tag, + *((unsigned long long *)v_scsi_cmnd->pc_lun_id)); + + /* 3. Init exchange task semaphore */ + sema_init(&xchg->task_sema, 0); + + /* 4. Send Mgmt Task to low-level */ + if (unf_hardware_start_io(v_lport, &pkg) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) send task_cmnd(0x%x) to RPort(0x%x) failed", + v_lport->port_id, v_task_mgnt_cmd_type, + v_rport->nport_id); + + return FAILED; + } + + /* + * semaphore timeout + * + * Code review: The second input parameter needs to + * be converted to jiffies. + * set semaphore after the message is sent successfully. + * The semaphore is returned when the semaphore times out + * or is woken up. + * + * 5. The semaphore is cleared and counted when the Mgmt + * Task message is sent, + * and is Wake Up when the RSP message is received. + * If the semaphore is not Wake Up, the semaphore is + * triggered after timeout. + * That is, no RSP message is received within the timeout period. + */ + if (down_timeout(&xchg->task_sema, + (long long)msecs_to_jiffies((unsigned int)UNF_WAIT_SEM_TIMEOUT))) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) send task_cmnd(0x%x) to RPort(0x%x) timeout scsi id(0x%x) lun id(0x%x)", + v_lport->nport_id, v_task_mgnt_cmd_type, + v_rport->nport_id, + v_scsi_cmnd->scsi_id, + (unsigned int)v_scsi_cmnd->lun_id); + + /* semaphore timeout */ + ret = FAILED; + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + if (v_lport->en_states == UNF_LPORT_ST_RESET) + ret = SUCCESS; + + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + return ret; + } + + /* + * 6. NOTE: no timeout (has been waken up) + * Do Scsi_Cmnd(Mgmt Task) result checking + * + * FAILED: with error code or RSP is error + * SUCCESS: others + */ + if (xchg->scsi_cmnd_info.result == UNF_IO_SUCCESS) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]Port(0x%x) send task_cmnd(0x%x) to RPort(0x%x) and receive rsp succeed", + v_lport->nport_id, v_task_mgnt_cmd_type, + v_rport->nport_id); + + ret = SUCCESS; + } else { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) send task_cmnd(0x%x) to RPort(0x%x) and receive rsp failed scsi id(0x%x) lun id(0x%x)", + v_lport->nport_id, v_task_mgnt_cmd_type, + v_rport->nport_id, + v_scsi_cmnd->scsi_id, + (unsigned int)v_scsi_cmnd->lun_id); + + ret = FAILED; + } + + return ret; +} + +int unf_cm_eh_device_reset_handler(struct unf_scsi_cmd_s *v_scsi_cmnd) +{ + /* SCSI Device/LUN Reset Command --->>> FC LUN/Device Reset Command */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int cmnd_result = 0; + int ret = SUCCESS; + + UNF_CHECK_VALID(0x1349, UNF_TRUE, v_scsi_cmnd, return FAILED); + UNF_CHECK_VALID(0x1350, UNF_TRUE, v_scsi_cmnd->pc_lun_id, + return FAILED); + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[event]Enter device/LUN reset handler"); + + /* 1. Get L_Port */ + lport = unf_find_lport_by_scsi_cmd(v_scsi_cmnd); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Can't find port by scsi_host_id(0x%x)", + UNF_GET_SCSI_HOST_ID_BY_CMND(v_scsi_cmnd)); + + return FAILED; + } + + /* 2. L_Port State checking */ + if (unlikely(lport->b_port_removing == UNF_TRUE)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%p) is removing", lport); + + return FAILED; + } + + /* + * 3. Get R_Port: no rport is found or rport is not ready,return ok + * from: L_Port -->> rport_scsi_table (image table) + * -->> rport_info_table + */ + rport = unf_find_rport_by_scsi_id(lport, + v_scsi_cmnd->err_code_table, + v_scsi_cmnd->err_code_table_cout, + UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd), + &cmnd_result); + if (unlikely(!rport)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) Can't find rport by scsi_id(0x%x)", + lport->port_id, + UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd)); + + return SUCCESS; + } + + /* + * 4. Set the I/O of the corresponding LUN to abort. + * + * LUN Reset: set UP_ABORT tag, with: + * INI_Busy_list, IO_Wait_list, + * IO_Delay_list, IO_Delay_transfer_list + */ + unf_cm_xchg_abort_by_lun( + lport, rport, + *((unsigned long long *)v_scsi_cmnd->pc_lun_id), + NULL, UNF_FALSE); + + /* 5. R_Port state check */ + if (unlikely(rport->rp_state != UNF_RPORT_ST_READY)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%x) state(0x%x) SCSI Command(0x%p), rport is not ready", + lport->port_id, rport->nport_id, + rport->rp_state, v_scsi_cmnd); + + return SUCCESS; + } + + /* 6. Get & inc ref_cnt free Xchg for Device reset */ + xchg = (struct unf_xchg_s *)unf_cm_get_free_xchg(lport, + UNF_XCHG_TYPE_INI); + if (unlikely(!xchg)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%p) can't get free exchange", lport); + + return FAILED; + } + + /* increase ref_cnt for protecting exchange */ + ret = (int)unf_xchg_ref_inc(xchg, INI_EH_DEVICE_RESET); + UNF_CHECK_VALID(0x1351, UNF_TRUE, (ret == RETURN_OK), return FAILED); + + /* 7. Send Device/LUN Reset to Low level */ + ret = unf_send_scsi_mgmt_cmnd(xchg, lport, rport, + v_scsi_cmnd, + UNF_FCP_TM_LOGICAL_UNIT_RESET); + if (unlikely(ret == FAILED)) { + /* + * Do port reset or R_Port LOGO: + * 1. FAILED: send failed + * 2. FAILED: semaphore timeout + * 3. SUCCESS: rcvd rsp & semaphore has been waken up + */ + unf_tmf_abnormal_recovery(lport, rport, xchg); + } + + /* + * 8. Release resource immediately if necessary + * NOTE: here, semaphore timeout or rcvd rsp + * (semaphore has been waken up) + */ + if (likely((lport->b_port_removing != UNF_TRUE) || + (lport->root_lport != lport))) + unf_cm_free_xchg(xchg->lport, xchg); + + /* decrease ref_cnt */ + unf_xchg_ref_dec(xchg, INI_EH_DEVICE_RESET); + + return SUCCESS; +} + +int unf_cm_target_reset_handler(struct unf_scsi_cmd_s *v_scsi_cmnd) +{ + /* SCSI Target Reset Command --->>> FC Session Reset/Delete Command */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int cmnd_result = 0; + int ret; + + UNF_CHECK_VALID(0x1355, UNF_TRUE, v_scsi_cmnd, return FAILED); + UNF_CHECK_VALID(0x1356, UNF_TRUE, v_scsi_cmnd->pc_lun_id, + return FAILED); + + /* 1. Get L_Port */ + lport = unf_find_lport_by_scsi_cmd(v_scsi_cmnd); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Can't find port by scsi_host_id(0x%x)", + UNF_GET_SCSI_HOST_ID_BY_CMND(v_scsi_cmnd)); + + return FAILED; + } + + /* 2. L_Port State check */ + if (unlikely(lport->b_port_removing == UNF_TRUE)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%p) is removing", lport); + + return FAILED; + } + + /* + * 3. Get R_Port: no rport is found or rport is not ready,return ok + * from: L_Port -->> rport_scsi_table (image table) -->> + * rport_info_table + */ + rport = unf_find_rport_by_scsi_id(lport, + v_scsi_cmnd->err_code_table, + v_scsi_cmnd->err_code_table_cout, + UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd), + &cmnd_result); + if (unlikely(!rport)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Can't find rport by scsi_id(0x%x)", + UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd)); + + return SUCCESS; + } + + /* + * 4. set UP_ABORT on Target IO and Session IO + * + * LUN Reset: set UP_ABORT tag, with: + * INI_Busy_list, IO_Wait_list, + * IO_Delay_list, IO_Delay_transfer_list + */ + unf_cm_xchg_abort_by_session(lport, rport); + + /* 5. R_Port state check */ + if (unlikely(rport->rp_state != UNF_RPORT_ST_READY)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%x) state(0x%x) is not ready, SCSI Command(0x%p)", + lport->port_id, rport->nport_id, + rport->rp_state, v_scsi_cmnd); + + return SUCCESS; + } + + /* 6. Get free Xchg for Target Reset CMND */ + xchg = (struct unf_xchg_s *)unf_cm_get_free_xchg(lport, + UNF_XCHG_TYPE_INI); + if (unlikely(!xchg)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%p) can't get free exchange", lport); + + return FAILED; + } + + /* increase ref_cnt to protect exchange */ + ret = (int)unf_xchg_ref_inc(xchg, INI_EH_DEVICE_RESET); + UNF_CHECK_VALID(0x1357, UNF_TRUE, (ret == RETURN_OK), return FAILED); + + /* 7. Send Target Reset Cmnd to low-level */ + ret = unf_send_scsi_mgmt_cmnd(xchg, lport, rport, v_scsi_cmnd, + UNF_FCP_TM_TARGET_RESET); + if (unlikely(ret == FAILED)) { + /* + * Do port reset or R_Port LOGO: + * 1. FAILED: send failed + * 2. FAILED: semaphore timeout + * 3. SUCCESS: rcvd rsp & semaphore has been waken up + */ + unf_tmf_abnormal_recovery(lport, rport, xchg); + } + + /* + * 8. Release resource immediately if necessary + * NOTE: here, semaphore timeout or rcvd rsp + * (semaphore has been waken up) + */ + if (likely((lport->b_port_removing != UNF_TRUE) || + (lport->root_lport != lport))) + unf_cm_free_xchg(xchg->lport, xchg); + + /* decrease exchange ref_cnt */ + unf_xchg_ref_dec(xchg, INI_EH_DEVICE_RESET); + + return SUCCESS; +} + +int unf_cm_bus_reset_handler(struct unf_scsi_cmd_s *v_scsi_cmnd) +{ + /* SCSI BUS Reset Command --->>> FC Port Reset Command */ + struct unf_lport_s *lport = NULL; + int cmnd_result = 0; + + /* 1. Get L_Port */ + lport = unf_find_lport_by_scsi_cmd(v_scsi_cmnd); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Can't find port by scsi_host_id(0x%x)", + UNF_GET_SCSI_HOST_ID_BY_CMND(v_scsi_cmnd)); + + return FAILED; + } + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_KEVENT, + "[event]Do port reset with scsi_bus_reset"); + + cmnd_result = unf_cm_reset_port(lport->port_id); + if (unlikely(cmnd_result == UNF_RETURN_ERROR)) + return FAILED; + else + return SUCCESS; +} + +void unf_process_scsi_mgmt_result(struct unf_frame_pkg_s *v_pkg, + struct unf_xchg_s *v_xchg) +{ + unsigned char *rsp_info = NULL; + unsigned char rsp_code = 0; + unsigned int code_index = 0; + + /* + * LLT found that:RSP_CODE is the third byte of FCP_RSP_INFO, + * on Little endian should be byte 0, For detail FCP_4 Table 26 + * FCP_RSP_INFO field format + * + * 1. state setting + * 2. wake up semaphore + */ + UNF_CHECK_VALID(0x1321, TRUE, v_pkg, return); + UNF_CHECK_VALID(0x1322, TRUE, v_xchg, return); + + v_xchg->tmf_state |= TMF_RESPONSE_RECEIVED; + + if (UNF_GET_LL_ERR(v_pkg) != UNF_IO_SUCCESS) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Send scsi manage command failed with error code(0x%x)", + UNF_GET_LL_ERR(v_pkg)); + + v_xchg->scsi_cmnd_info.result = UNF_IO_FAILED; + + /* wakeup semaphore & return */ + up(&v_xchg->task_sema); + + return; + } + + rsp_info = v_pkg->unf_rsp_pload_bl.buffer_ptr; + if (!rsp_info && (v_pkg->unf_rsp_pload_bl.length != 0)) { + rsp_info = + (unsigned char *) + v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu; + + /* change to little end if necessary */ + if (rsp_info && (v_pkg->byte_orders & UNF_BIT_3)) + unf_big_end_to_cpu( + rsp_info, + v_pkg->unf_rsp_pload_bl.length); + } + + if (!rsp_info) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]FCP response data pointer is NULL with Xchg TAG(0x%x)", + v_xchg->hot_pool_tag); + + v_xchg->scsi_cmnd_info.result = UNF_IO_SUCCESS; + + /* wakeup semaphore & return */ + up(&v_xchg->task_sema); + + return; + } + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]FCP response data length(0x%x), RSP_CODE(0x%x:%x:%x:%x:%x:%x:%x:%x)", + v_pkg->unf_rsp_pload_bl.length, + rsp_info[0], + rsp_info[1], + rsp_info[2], + rsp_info[3], + rsp_info[4], + rsp_info[5], + rsp_info[6], + rsp_info[7]); + + rsp_code = rsp_info[code_index]; + if ((rsp_code == UNF_FCP_TM_RSP_COMPLETE) || + (rsp_code == UNF_FCP_TM_RSP_SUCCEED)) + v_xchg->scsi_cmnd_info.result = UNF_IO_SUCCESS; + else + v_xchg->scsi_cmnd_info.result = UNF_IO_FAILED; + + /* wakeup semaphore & return */ + up(&v_xchg->task_sema); +} diff --git a/drivers/scsi/huawei/hifc/unf_io_abnormal.h b/drivers/scsi/huawei/hifc/unf_io_abnormal.h new file mode 100644 index 0000000000000000000000000000000000000000..54336ac71ea8f0103ae4d5c20d561437c367a41a --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_io_abnormal.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#ifndef __UNF_IO__ABNORMAL_H__ +#define __UNF_IO__ABNORMAL_H__ + +#define UNF_GET_LL_ERR(v_pkg) ((v_pkg->status) >> 16) + +void unf_process_scsi_mgmt_result(struct unf_frame_pkg_s *v_pkg, + struct unf_xchg_s *v_xchg); +unsigned int unf_hardware_start_io(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_pkg); + +#endif diff --git a/drivers/scsi/huawei/hifc/unf_log.h b/drivers/scsi/huawei/hifc/unf_log.h new file mode 100644 index 0000000000000000000000000000000000000000..1da7ab821b10236cc2cbcaee794c4f21203aeb79 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_log.h @@ -0,0 +1,183 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#ifndef __UNF_LOG_H__ +#define __UNF_LOG_H__ + +#define UNF_CRITICAL 1 +#define UNF_ERR 2 +#define UNF_WARN 3 +#define UNF_KEVENT 4 +#define UNF_MAJOR 5 +#define UNF_MINOR 6 +#define UNF_INFO 7 +#define UNF_DATA 7 +#define UNF_ALL 7 + +enum unf_debug_type_e { + UNF_DEBUG_TYPE_MML = 0, + UNF_DEBUG_TYPE_DIAGNOSE = 1, + UNF_DEBUG_TYPE_MESSAGE = 2, + UNF_DEBUG_TYPE_BUTT +}; + +enum unf_log_attr_e { + UNF_LOG_LOGIN_ATT = 0x1, + UNF_LOG_IO_ATT = 0x2, + UNF_LOG_EQUIP_ATT = 0x4, + UNF_LOG_REG_ATT = 0x8, + UNF_LOG_REG_MML_TEST = 0x10, + UNF_LOG_EVENT = 0x20, + UNF_LOG_NORMAL = 0x40, + UNF_LOG_ABNORMAL = 0X80, + UNF_LOG_BUTT +}; + +enum event_log_e { + UNF_EVTLOG_DRIVER_SUC = 0, + UNF_EVTLOG_DRIVER_INFO, + UNF_EVTLOG_DRIVER_WARN, + UNF_EVTLOG_DRIVER_ERR, + UNF_EVTLOG_LINK_SUC, + UNF_EVTLOG_LINK_INFO, + UNF_EVTLOG_LINK_WARN, + UNF_EVTLOG_LINK_ERR, + UNF_EVTLOG_IO_SUC, + UNF_EVTLOG_IO_INFO, + UNF_EVTLOG_IO_WARN, + UNF_EVTLOG_IO_ERR, + UNF_EVTLOG_TOOL_SUC, + UNF_EVTLOG_TOOL_INFO, + UNF_EVTLOG_TOOL_WARN, + UNF_EVTLOG_TOOL_ERR, + UNF_EVTLOG_BUT +}; + +#define UNF_IO_ATT_PRINT_TIMES 2 +#define UNF_LOGIN_ATT_PRINT_TIMES 100 + +#define UNF_IO_ATT_PRINT_LIMIT msecs_to_jiffies(6 * 1000) + +extern unsigned int unf_dbg_level; +extern unsigned int log_print_level; +extern unsigned int log_limted_times; + +#define DRV_LOG_LIMIT(module_id, log_level, log_id, log_att, format, ...) \ + do { \ + static unsigned long pre; \ + static int should_print = UNF_LOGIN_ATT_PRINT_TIMES; \ + if (time_after_eq(jiffies, pre + (UNF_IO_ATT_PRINT_LIMIT))) { \ + if (log_att == UNF_LOG_ABNORMAL) { \ + should_print = UNF_IO_ATT_PRINT_TIMES; \ + } else { \ + should_print = log_limted_times; \ + } \ + } \ + if (should_print < 0) { \ + if (log_att != UNF_LOG_ABNORMAL) { \ + pre = jiffies; \ + } \ + break; \ + } \ + if (should_print-- > 0) { \ + printk(log_level \ + "[%d][FC_UNF]" format "[%s][%-5d]\n", \ + smp_processor_id(), ##__VA_ARGS__, \ + __func__, __LINE__); \ + } \ + if (should_print == 0) { \ + printk(log_level \ + "[FC_UNF]log is limited[%s][%-5d]\n", \ + __func__, __LINE__); \ + } \ + pre = jiffies; \ + } while (0) + +#define UNF_CHECK_VALID(logid, need_check, condition, fail_do) \ + do { \ + if (unlikely(!(condition))) { \ + UNF_TRACE((logid), UNF_LOG_REG_ATT, UNF_ERR, \ + "Para check(%s) invalid", #condition); \ + fail_do; \ + } \ + } while (0) + +#define HIUNF_TRACE(log_id, log_att, log_level, format, ...) \ + do { \ + if (unlikely((log_level) <= log_print_level)) { \ + if (log_level == UNF_CRITICAL) { \ + DRV_LOG_LIMIT(UNF_PID, KERN_CRIT, log_id, \ + log_att, format, ##__VA_ARGS__); \ + } else if (log_level == UNF_WARN) { \ + DRV_LOG_LIMIT(UNF_PID, KERN_WARNING, log_id, \ + log_att, format, ##__VA_ARGS__); \ + } else if (log_level == UNF_ERR) { \ + DRV_LOG_LIMIT(UNF_PID, KERN_ERR, log_id, \ + log_att, format, ##__VA_ARGS__); \ + } else if (log_level == UNF_MAJOR || \ + log_level == UNF_MINOR || \ + log_level == UNF_KEVENT) { \ + DRV_LOG_LIMIT(UNF_PID, KERN_NOTICE, log_id, \ + log_att, format, ##__VA_ARGS__); \ + } else if (log_level == UNF_INFO || \ + log_level == UNF_DATA) { \ + DRV_LOG_LIMIT(UNF_PID, KERN_INFO, log_id, \ + log_att, format, ##__VA_ARGS__); \ + } \ + } \ + } while (0) + +#define UNF_TRACE(log_id, log_att, log_level, fmt, ...) \ + do { \ + HIUNF_TRACE(log_id, log_att, log_level, fmt, ##__VA_ARGS__); \ + } while (0) + +#define UNF_INIT_PRIVATE_ST(private_st) \ + do { \ + memset(&(private_st), 0, sizeof(private_st)); \ + } while (0) + +#define UNF_PRINT_SFS(dbg_level, portid, v_data, v_size) \ + do { \ + if ((dbg_level) <= log_print_level) { \ + unsigned int cnt = 0; \ + printk(KERN_INFO "[INFO]Port(0x%x) sfs:0x", \ + (portid)); \ + for (cnt = 0; cnt < (v_size) / 4; cnt++) { \ + printk(KERN_INFO "%08x ", \ + ((unsigned int *)v_data)[cnt]); \ + } \ + printk(KERN_INFO "[FC_UNF][%s]\n", __FUNCTION__); \ + } \ + } while (0) + +#define UNF_PRINT_SFS_LIMIT(dbg_level, portid, v_data, v_size) \ + do { \ + if ((dbg_level) <= log_print_level) { \ + static unsigned long pre; \ + static int should_print = UNF_LOGIN_ATT_PRINT_TIMES; \ + if (time_after_eq(jiffies, pre + \ + UNF_IO_ATT_PRINT_LIMIT)) { \ + should_print = log_limted_times; \ + } \ + if (should_print < 0) { \ + pre = jiffies; \ + break; \ + } \ + if (should_print-- > 0) { \ + UNF_PRINT_SFS(dbg_level, portid, \ + v_data, v_size); \ + } \ + if (should_print == 0) { \ + printk(KERN_INFO "[FC_UNF]sfs log is limited[%s][%-5d]\n", \ + __func__, __LINE__); \ + } \ + pre = jiffies; \ + } \ + } while (0) + +#define UNF_REFERNCE_VAR(var) + +#endif diff --git a/drivers/scsi/huawei/hifc/unf_lport.c b/drivers/scsi/huawei/hifc/unf_lport.c new file mode 100644 index 0000000000000000000000000000000000000000..09986f177fcc91bd144d018798498860c9a4447a --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_lport.c @@ -0,0 +1,1129 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#include "unf_log.h" +#include "unf_common.h" +#include "unf_event.h" +#include "unf_lport.h" +#include "unf_rport.h" +#include "unf_exchg.h" +#include "unf_service.h" +#include "unf_portman.h" +#include "unf_npiv.h" + +static void unf_lport_timeout(struct work_struct *work); + +void unf_cmmark_dirty_mem(struct unf_lport_s *v_lport, + enum unf_lport_dirty_flag_e v_etype) +{ + UNF_CHECK_VALID(0x1801, UNF_TRUE, v_lport, return); + + v_lport->dirty_flag |= v_etype; +} + +unsigned int unf_init_lport_route(struct unf_lport_s *v_lport) +{ + int ret = 0; + + UNF_CHECK_VALID(0x1802, UNF_TRUE, + v_lport, return UNF_RETURN_ERROR); + + /* Init L_Port route work */ + INIT_DELAYED_WORK(&v_lport->route_timer_work, unf_lport_route_work); + + /* Delay route work */ + ret = queue_delayed_work( + unf_work_queue, + &v_lport->route_timer_work, + (unsigned long)msecs_to_jiffies(UNF_LPORT_POLL_TIMER)); + if (unlikely(ret == UNF_FALSE)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_WARN, + "[warn]Port(0x%x) schedule route work failed", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + return unf_lport_refinc(v_lport); +} + +void unf_destroy_lport_route(struct unf_lport_s *v_lport) +{ + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x1803, UNF_TRUE, v_lport, return); + + /* Cancel (route timer) delay work */ + UNF_DELAYED_WORK_SYNC(ret, v_lport->port_id, + &v_lport->route_timer_work, + "Route Timer work"); + if (ret == RETURN_OK) { + /* Corresponding to ADD operation */ + unf_lport_ref_dec(v_lport); + } + + v_lport->destroy_step = UNF_LPORT_DESTROY_STEP_2_CLOSE_ROUTE; +} + +static void unf_lport_config(struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x1816, UNF_TRUE, v_lport, return); + + INIT_DELAYED_WORK(&v_lport->retry_work, unf_lport_timeout); + + v_lport->max_retry_count = UNF_MAX_RETRY_COUNT; /* 3 */ + v_lport->retries = 0; +} + +void unf_init_portparms(struct unf_lport_s *v_lport) +{ + INIT_LIST_HEAD(&v_lport->list_vports_head); + INIT_LIST_HEAD(&v_lport->list_intergrad_vports); + INIT_LIST_HEAD(&v_lport->list_destroy_vports); + INIT_LIST_HEAD(&v_lport->entry_lport); + spin_lock_init(&v_lport->lport_state_lock); + + v_lport->max_frame_size = max_frame_size; + v_lport->ed_tov = UNF_DEFAULT_EDTOV; + v_lport->ra_tov = UNF_DEFAULT_RATOV; + v_lport->rr_tov = UNF_DEFAULT_RRTOV; + v_lport->fabric_node_name = 0; + v_lport->b_priority = UNF_PRIORITY_DISABLE; + v_lport->b_port_dir_exchange = UNF_FALSE; + /* Delay (retry) work init */ + unf_lport_config(v_lport); + + unf_set_lport_state(v_lport, UNF_LPORT_ST_ONLINE); /* online */ + + v_lport->link_up = UNF_PORT_LINK_DOWN; + v_lport->b_port_removing = UNF_FALSE; + v_lport->lport_free_completion = NULL; + v_lport->last_tx_fault_jif = 0; + v_lport->enhanced_features = 0; + v_lport->destroy_step = INVALID_VALUE32; + v_lport->dirty_flag = 0; + v_lport->b_switch_state = UNF_FALSE; + v_lport->b_bbscn_support = UNF_FALSE; + + v_lport->en_start_work_state = UNF_START_WORK_STOP; + v_lport->sfp_power_fault_count = 0; + v_lport->sfp_9545_fault_count = 0; + + atomic_set(&v_lport->port_no_operater_flag, UNF_LPORT_NORMAL); + atomic_set(&v_lport->lport_ref_cnt, 0); + atomic_set(&v_lport->scsi_session_add_success, 0); + atomic_set(&v_lport->scsi_session_add_failed, 0); + atomic_set(&v_lport->scsi_session_del_success, 0); + atomic_set(&v_lport->scsi_session_del_failed, 0); + atomic_set(&v_lport->add_start_work_failed, 0); + atomic_set(&v_lport->add_closing_work_failed, 0); + atomic_set(&v_lport->alloc_scsi_id, 0); + atomic_set(&v_lport->resume_scsi_id, 0); + atomic_set(&v_lport->reuse_scsi_id, 0); + atomic_set(&v_lport->device_alloc, 0); + atomic_set(&v_lport->device_destroy, 0); + atomic_set(&v_lport->session_loss_tmo, 0); + + atomic64_set(&v_lport->exchg_index, 1); + atomic_inc(&v_lport->lport_ref_cnt); + atomic_set(&v_lport->err_code_obtain_freq, 0); + + memset(&v_lport->link_service_info, 0, + sizeof(struct unf_link_service_collect_s)); + memset(&v_lport->err_code_sum, 0, sizeof(struct unf_err_code_s)); +} + +void unf_reset_lport_params(struct unf_lport_s *v_lport) +{ + struct unf_lport_s *lport = v_lport; + + UNF_CHECK_VALID(0x1804, UNF_TRUE, v_lport, return); + + lport->link_up = UNF_PORT_LINK_DOWN; + lport->nport_id = 0; /* Need do FLOGI again to clear N_Port_ID */ + lport->max_frame_size = max_frame_size; + lport->ed_tov = UNF_DEFAULT_EDTOV; + lport->ra_tov = UNF_DEFAULT_RATOV; + lport->rr_tov = UNF_DEFAULT_RRTOV; + lport->fabric_node_name = 0; +} + +static enum unf_lport_login_state_e unf_lport_stat_online( + enum unf_lport_login_state_e old_state, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + switch (event) { + case UNF_EVENT_LPORT_LINK_UP: + /* EVENT_LINK_UP --->>> ST_LINK_UP */ + next_state = UNF_LPORT_ST_LINK_UP; + break; + + case UNF_EVENT_LPORT_NORMAL_ENTER: + /* EVENT_NORMAL_ENTER --->>> ST_INITIAL */ + next_state = UNF_LPORT_ST_INITIAL; + break; + + default: + next_state = old_state; + break; + } + + return next_state; +} + +static enum unf_lport_login_state_e unf_lport_stat_initial( + enum unf_lport_login_state_e old_state, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + switch (event) { + case UNF_EVENT_LPORT_LINK_UP: + /* EVENT_LINK_UP --->>> ST_LINK_UP */ + next_state = UNF_LPORT_ST_LINK_UP; + break; + + default: + next_state = old_state; + break; + } + + return next_state; +} + +static enum unf_lport_login_state_e unf_lport_stat_linkup( + enum unf_lport_login_state_e old_state, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + switch (event) { + case UNF_EVENT_LPORT_NORMAL_ENTER: + /* EVENT_NORMAL_ENTER --->>> FLOGI_WAIT */ + next_state = UNF_LPORT_ST_FLOGI_WAIT; + break; + + case UNF_EVENT_LPORT_READY: + /* EVENT_READY --->>> ST_READY */ + next_state = UNF_LPORT_ST_READY; + break; + + case UNF_EVENT_LPORT_LINK_DOWN: + /* EVENT_LINK_DOWN --->>> ST_INITIAL */ + next_state = UNF_LPORT_ST_INITIAL; + break; + + default: + next_state = old_state; + break; + } + + return next_state; +} + +static enum unf_lport_login_state_e unf_lport_stat_flogi_wait( + enum unf_lport_login_state_e old_state, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + switch (event) { + case UNF_EVENT_LPORT_REMOTE_ACC: + /* EVENT_REMOTE_ACC --->>> ST_PLOGI_WAIT */ + next_state = UNF_LPORT_ST_PLOGI_WAIT; + break; + + case UNF_EVENT_LPORT_READY: + /* EVENT_READY --->>> ST_READY */ + next_state = UNF_LPORT_ST_READY; + break; + + case UNF_EVENT_LPORT_REMOTE_TIMEOUT: + /* EVENT_REMOTE_TIMEOUT --->>> ST_LOGO */ + next_state = UNF_LPORT_ST_LOGO; + break; + + case UNF_EVENT_LPORT_LINK_DOWN: + /* EVENT_LINK_DOWN --->>> ST_INITIAL */ + next_state = UNF_LPORT_ST_INITIAL; + break; + + default: + next_state = old_state; + break; + } + + return next_state; +} + +static enum unf_lport_login_state_e unf_lport_stat_plogi_wait( + enum unf_lport_login_state_e old_state, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + switch (event) { + case UNF_EVENT_LPORT_REMOTE_ACC: + /* EVENT_REMOTE_ACC --->>> ST_RFT_ID_WAIT */ + next_state = UNF_LPORT_ST_RFT_ID_WAIT; + break; + + case UNF_EVENT_LPORT_REMOTE_TIMEOUT: + /* EVENT_TIMEOUT --->>> ST_LOGO */ + next_state = UNF_LPORT_ST_LOGO; + break; + + case UNF_EVENT_LPORT_LINK_DOWN: + /* EVENT_LINK_DOWN --->>> ST_INITIAL */ + next_state = UNF_LPORT_ST_INITIAL; + break; + + default: + next_state = old_state; + break; + } + + return next_state; +} + +static enum unf_lport_login_state_e unf_lport_stat_rftid_wait( + enum unf_lport_login_state_e old_state, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + switch (event) { + case UNF_EVENT_LPORT_REMOTE_ACC: + /* EVENT_REMOTE_ACC --->>> ST_RFF_ID_WAIT */ + next_state = UNF_LPORT_ST_RFF_ID_WAIT; + break; + + case UNF_EVENT_LPORT_REMOTE_TIMEOUT: + /* EVENT_TIMEOUT --->>> ST_LOGO */ + next_state = UNF_LPORT_ST_LOGO; + break; + + case UNF_EVENT_LPORT_LINK_DOWN: + /* EVENT_LINK_DOWN --->>> ST_INITIAL */ + next_state = UNF_LPORT_ST_INITIAL; + break; + + default: + next_state = old_state; + break; + } + + return next_state; +} + +static enum unf_lport_login_state_e unf_lport_stat_rffid_wait( + enum unf_lport_login_state_e old_state, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + switch (event) { + case UNF_EVENT_LPORT_REMOTE_ACC: + /* EVENT_REMOTE_ACC --->>> ST_SCR_WAIT */ + next_state = UNF_LPORT_ST_SCR_WAIT; + break; + + case UNF_EVENT_LPORT_REMOTE_TIMEOUT: + /* EVENT_TIMEOUT --->>> ST_LOGO */ + next_state = UNF_LPORT_ST_LOGO; + break; + + case UNF_EVENT_LPORT_LINK_DOWN: + /* EVENT_LINK_DOWN --->>> ST_INITIAL */ + next_state = UNF_LPORT_ST_INITIAL; + break; + + default: + next_state = old_state; + break; + } + + return next_state; +} + +static enum unf_lport_login_state_e unf_lport_state_scr_wait( + enum unf_lport_login_state_e old_state, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + switch (event) { + case UNF_EVENT_LPORT_REMOTE_ACC: + /* EVENT_REMOTE_ACC --->>> ST_READY */ + next_state = UNF_LPORT_ST_READY; + break; + + case UNF_EVENT_LPORT_REMOTE_TIMEOUT: + /* EVENT_TIMEOUT --->>> ST_LOGO */ + next_state = UNF_LPORT_ST_LOGO; + break; + + case UNF_EVENT_LPORT_LINK_DOWN: + /* EVENT_LINK_DOWN --->>> ST_INITIAL */ + next_state = UNF_LPORT_ST_INITIAL; + break; + + default: + next_state = old_state; + break; + } + + return next_state; +} + +static enum unf_lport_login_state_e unf_lport_state_logo( + enum unf_lport_login_state_e old_state, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + switch (event) { + case UNF_EVENT_LPORT_NORMAL_ENTER: + /* EVENT_NORMAL_ENTER --->>> ST_OFFLINE */ + next_state = UNF_LPORT_ST_OFFLINE; + break; + + case UNF_EVENT_LPORT_LINK_DOWN: + /* EVENT_LINK_DOWN --->>> ST_INITIAL */ + next_state = UNF_LPORT_ST_INITIAL; + break; + + default: + next_state = old_state; + break; + } + + return next_state; +} + +static enum unf_lport_login_state_e unf_lport_state_offline( + enum unf_lport_login_state_e old_state, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + switch (event) { + case UNF_EVENT_LPORT_ONLINE: + /* EVENT_ONLINE --->>> ST_ONLINE */ + next_state = UNF_LPORT_ST_ONLINE; + break; + + case UNF_EVENT_LPORT_RESET: + /* EVENT_RESET --->>> ST_RESET */ + next_state = UNF_LPORT_ST_RESET; + break; + + case UNF_EVENT_LPORT_LINK_DOWN: + /* EVENT_LINK_DOWN --->>> ST_INITIAL */ + next_state = UNF_LPORT_ST_INITIAL; + break; + + default: + next_state = old_state; + break; + } + + return next_state; +} + +static enum unf_lport_login_state_e unf_lport_state_reset( + enum unf_lport_login_state_e old_state, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + switch (event) { + case UNF_EVENT_LPORT_NORMAL_ENTER: + /* EVENT_NORMAL_ENTER --->>> ST_INITIAL */ + next_state = UNF_LPORT_ST_INITIAL; + break; + + default: + next_state = old_state; + break; + } + + return next_state; +} + +static enum unf_lport_login_state_e unf_lport_state_ready( + enum unf_lport_login_state_e old_state, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + switch (event) { + case UNF_EVENT_LPORT_LINK_DOWN: + /* EVENT_LINK_DOWN --->>> ST_INITIAL */ + next_state = UNF_LPORT_ST_INITIAL; + break; + + case UNF_EVENT_LPORT_RESET: + /* EVENT_RESET --->>> ST_RESET */ + next_state = UNF_LPORT_ST_RESET; + break; + + case UNF_EVENT_LPORT_OFFLINE: + /* EVENT_OFFLINE --->>> ST_LOGO */ + next_state = UNF_LPORT_ST_LOGO; + break; + + default: + next_state = old_state; + break; + } + + return next_state; +} + +void unf_lport_stat_ma(struct unf_lport_s *v_lport, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e old_state = UNF_LPORT_ST_ONLINE; + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + UNF_CHECK_VALID(0x1805, UNF_TRUE, v_lport, return); + + old_state = v_lport->en_states; + switch (v_lport->en_states) { + case UNF_LPORT_ST_ONLINE: + next_state = unf_lport_stat_online(old_state, event); + break; + + case UNF_LPORT_ST_INITIAL: + next_state = unf_lport_stat_initial(old_state, event); + break; + + case UNF_LPORT_ST_LINK_UP: + next_state = unf_lport_stat_linkup(old_state, event); + break; + + case UNF_LPORT_ST_FLOGI_WAIT: + next_state = unf_lport_stat_flogi_wait(old_state, event); + break; + + case UNF_LPORT_ST_PLOGI_WAIT: + next_state = unf_lport_stat_plogi_wait(old_state, event); + break; + + case UNF_LPORT_ST_RFT_ID_WAIT: + next_state = unf_lport_stat_rftid_wait(old_state, event); + break; + + case UNF_LPORT_ST_RFF_ID_WAIT: + next_state = unf_lport_stat_rffid_wait(old_state, event); + break; + + case UNF_LPORT_ST_SCR_WAIT: + next_state = unf_lport_state_scr_wait(old_state, event); + break; + + case UNF_LPORT_ST_LOGO: + next_state = unf_lport_state_logo(old_state, event); + break; + + case UNF_LPORT_ST_OFFLINE: + next_state = unf_lport_state_offline(old_state, event); + break; + + case UNF_LPORT_ST_RESET: + next_state = unf_lport_state_reset(old_state, event); + break; + + case UNF_LPORT_ST_READY: + next_state = unf_lport_state_ready(old_state, event); + break; + + default: + next_state = old_state; + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) hold state(0x%x)", + v_lport->port_id, v_lport->en_states); + break; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) with old state(0x%x) event(0x%x) next state(0x%x)", + v_lport->port_id, old_state, event, next_state); + + unf_set_lport_state(v_lport, next_state); +} + +unsigned int unf_init_lport_mgr_temp(struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x1806, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + v_lport->lport_mgr_temp.pfn_unf_vport_get_free_and_init = NULL; + v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_vp_index = + unf_lookup_vport_by_vp_index; + v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_port_id = + unf_lookup_vport_by_port_id; + v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_did = + unf_lookup_vport_by_did; + v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_wwpn = + unf_lookup_vport_by_wwpn; + v_lport->lport_mgr_temp.pfn_unf_vport_remove = unf_vport_remove; + return RETURN_OK; +} + +void unf_release_lport_mgr_temp(struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x1807, UNF_TRUE, v_lport, return); + + memset(&v_lport->lport_mgr_temp, 0, + sizeof(struct unf_cm_lport_template_s)); + v_lport->destroy_step = UNF_LPORT_DESTROY_STEP_9_DESTROY_LPORT_MG_TMP; +} + +unsigned int unf_lport_retry_flogi(struct unf_lport_s *v_lport) +{ + struct unf_rport_s *rport = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x1808, UNF_TRUE, + v_lport, return UNF_RETURN_ERROR); + + /* Get (new) R_Port */ + rport = unf_get_rport_by_nport_id(v_lport, UNF_FC_FID_FLOGI); + rport = unf_get_safe_rport(v_lport, rport, + UNF_RPORT_REUSE_ONLY, UNF_FC_FID_FLOGI); + if (unlikely(!rport)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) allocate RPort failed", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + /* Check L_Port state */ + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + if (v_lport->en_states != UNF_LPORT_ST_FLOGI_WAIT) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) no need to retry FLOGI with state(0x%x)", + v_lport->port_id, v_lport->en_states); + + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + return RETURN_OK; + } + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = UNF_FC_FID_FLOGI; + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Send FLOGI or FDISC */ + if (v_lport != v_lport->root_lport) { + ret = unf_send_fdisc(v_lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) send FDISC failed", + v_lport->port_id); + + /* Do L_Port recovery */ + unf_lport_error_recovery(v_lport); + } + } else { + ret = unf_send_flogi(v_lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE( + UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) send FLOGI failed\n", + v_lport->port_id); + + /* Do L_Port recovery */ + unf_lport_error_recovery(v_lport); + } + } + + return ret; +} + +unsigned int unf_lport_name_server_register( + struct unf_lport_s *v_lport, + enum unf_lport_login_state_e states) +{ + struct unf_rport_s *rport = NULL; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x1809, UNF_TRUE, + v_lport, return UNF_RETURN_ERROR); + + /* Get (safe) R_Port 0xfffffc */ + rport = unf_get_rport_by_nport_id(v_lport, UNF_FC_FID_DIR_SERV); + rport = unf_get_safe_rport(v_lport, rport, UNF_RPORT_REUSE_ONLY, + UNF_FC_FID_DIR_SERV); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) allocate RPort failed", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + /* Update R_Port & L_Port state */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = UNF_FC_FID_DIR_SERV; /* 0xfffffc */ + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + unf_lport_stat_ma(v_lport, UNF_EVENT_LPORT_NORMAL_ENTER); + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + switch (states) { + /* RFT_ID */ + case UNF_LPORT_ST_RFT_ID_WAIT: + ret = unf_send_rft_id(v_lport, rport); + break; + + /* RFF_ID */ + case UNF_LPORT_ST_RFF_ID_WAIT: + ret = unf_send_rff_id(v_lport, rport); + break; + + /* SCR */ + case UNF_LPORT_ST_SCR_WAIT: + ret = unf_send_scr(v_lport, NULL); + break; + + /* PLOGI */ + case UNF_LPORT_ST_PLOGI_WAIT: + default: + spin_lock_irqsave(&rport->rport_state_lock, flag); + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + ret = unf_send_plogi(v_lport, rport); + break; + } + + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) register fabric(0xfffffc) failed", + v_lport->nport_id); + + /* Do L_Port recovery */ + unf_lport_error_recovery(v_lport); + } + + return ret; +} + +unsigned int unf_lport_enter_sns_logo(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + struct unf_rport_s *rport = NULL; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x1810, UNF_TRUE, + v_lport, return UNF_RETURN_ERROR); + + if (!v_rport) { + rport = unf_get_rport_by_nport_id(v_lport, + UNF_FC_FID_DIR_SERV); + } else { + rport = v_rport; + } + + if (!rport) { + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + unf_lport_stat_ma(v_lport, UNF_EVENT_LPORT_NORMAL_ENTER); + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + return RETURN_OK; + } + + /* Update L_Port & R_Port state */ + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + unf_lport_stat_ma(v_lport, UNF_EVENT_LPORT_NORMAL_ENTER); + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + spin_lock_irqsave(&rport->rport_state_lock, flag); + unf_rport_state_ma(rport, UNF_EVENT_RPORT_LOGO); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Do R_Port LOGO state */ + unf_rport_enter_logo(v_lport, rport); + + return ret; +} + +void unf_lport_enter_sns_plogi(struct unf_lport_s *v_lport) +{ + /* Fabric or Public Loop Mode: Login with Name server */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = NULL; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x1811, UNF_TRUE, v_lport, return); + + /* Get (safe) R_Port 0xfffffc */ + rport = unf_get_rport_by_nport_id(lport, UNF_FC_FID_DIR_SERV); + if (rport) { + /* for port swap: Delete old R_Port if necessary */ + if (rport->local_nport_id != v_lport->nport_id) { + unf_rport_immediate_linkdown(v_lport, rport); + rport = NULL; + } + } + + rport = unf_get_safe_rport(v_lport, rport, + UNF_RPORT_REUSE_ONLY, UNF_FC_FID_DIR_SERV); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) allocate RPort failed", + v_lport->port_id); + + unf_lport_error_recovery(lport); + return; + } + + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = UNF_FC_FID_DIR_SERV; /* 0xfffffc */ + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Send PLOGI to Fabric(0xfffffc) */ + ret = unf_send_plogi(lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) send PLOGI to name server failed", + v_lport->port_id); + + unf_lport_error_recovery(lport); + } +} + +int unf_get_port_params(void *v_argin, void *v_argout) +{ + struct unf_lport_s *lport = (struct unf_lport_s *)v_argin; + struct unf_low_level_port_mgr_op_s *port_mg = NULL; + struct unf_port_params_s port_params = { 0 }; + int ret = RETURN_OK; + + UNF_REFERNCE_VAR(v_argout); + UNF_CHECK_VALID(0x1812, UNF_TRUE, + v_argin, return UNF_RETURN_ERROR); + + port_mg = &lport->low_level_func.port_mgr_op; + if (!port_mg->pfn_ll_port_config_get) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_WARN, + "[warn]Port(0x%x) low level port_config_get function is NULL", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_INFO, + "[warn]Port(0x%x) get parameters with default:R_A_TOV(%d) E_D_TOV(%d)", + lport->port_id, UNF_DEFAULT_FABRIC_RATOV, UNF_DEFAULT_EDTOV); + + port_params.ra_tov = UNF_DEFAULT_FABRIC_RATOV; + port_params.ed_tov = UNF_DEFAULT_EDTOV; + + /* Update parameters with Fabric mode */ + if ((lport->en_act_topo == UNF_ACT_TOP_PUBLIC_LOOP) || + (lport->en_act_topo == UNF_ACT_TOP_P2P_FABRIC)) { + lport->ra_tov = port_params.ra_tov; + lport->ed_tov = port_params.ed_tov; + } + + return ret; +} + +unsigned int unf_lport_enter_flogi(struct unf_lport_s *v_lport) +{ + struct unf_rport_s *rport = NULL; + struct unf_cm_event_report *event = NULL; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int nport_id = 0; + + UNF_CHECK_VALID(0x1813, UNF_TRUE, + v_lport, return UNF_RETURN_ERROR); + + /* Get (safe) R_Port */ + nport_id = UNF_FC_FID_FLOGI; /* 0xfffffe */ + rport = unf_get_rport_by_nport_id(v_lport, UNF_FC_FID_FLOGI); + + rport = unf_get_safe_rport(v_lport, rport, + UNF_RPORT_REUSE_ONLY, nport_id); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) allocate RPort failed", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + /* Updtae L_Port state */ + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + /* LPort: LINK UP --> FLOGI WAIT */ + unf_lport_stat_ma(v_lport, UNF_EVENT_LPORT_NORMAL_ENTER); + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + /* Update R_Port N_Port_ID */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = UNF_FC_FID_FLOGI; /* 0xfffffe */ + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + event = unf_get_one_event_node(v_lport); + if (event) { + event->lport = v_lport; + event->event_asy_flag = UNF_EVENT_ASYN; + /* NULL for timer */ + event->pfn_unf_event_task = unf_get_port_params; + event->para_in = (void *)v_lport; + unf_post_one_event_node(v_lport, event); + } + + if (v_lport != v_lport->root_lport) { + /* for NPIV */ + ret = unf_send_fdisc(v_lport, rport); + if (ret != RETURN_OK) + /* Do L_Port recovery */ + unf_lport_error_recovery(v_lport); + } else { + /* for Physical Port */ + ret = unf_send_flogi(v_lport, rport); + if (ret != RETURN_OK) + /* Do L_Port recovery */ + unf_lport_error_recovery(v_lport); + } + + return ret; +} + +void unf_set_lport_state(struct unf_lport_s *v_lport, + enum unf_lport_login_state_e states) +{ + UNF_CHECK_VALID(0x1814, UNF_TRUE, v_lport, return); + if (states != v_lport->en_states) { + /* Reset L_Port retry count */ + v_lport->retries = 0; + } + + v_lport->en_states = states; +} + +static void unf_lport_timeout(struct work_struct *work) +{ + struct unf_lport_s *lport = NULL; + enum unf_lport_login_state_e state = UNF_LPORT_ST_READY; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x1815, UNF_TRUE, work, return); + lport = container_of(work, struct unf_lport_s, retry_work.work); + + spin_lock_irqsave(&lport->lport_state_lock, flag); + state = lport->en_states; + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) is timeout with state(0x%x)", + lport->port_id, state); + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + switch (state) { + /* FLOGI retry */ + case UNF_LPORT_ST_FLOGI_WAIT: + (void)unf_lport_retry_flogi(lport); + break; + + case UNF_LPORT_ST_PLOGI_WAIT: + case UNF_LPORT_ST_RFT_ID_WAIT: + case UNF_LPORT_ST_RFF_ID_WAIT: + case UNF_LPORT_ST_SCR_WAIT: + (void)unf_lport_name_server_register(lport, state); + break; + + /* Send LOGO External */ + case UNF_LPORT_ST_LOGO: + break; + + /* Do nothing */ + case UNF_LPORT_ST_OFFLINE: + case UNF_LPORT_ST_READY: + case UNF_LPORT_ST_RESET: + case UNF_LPORT_ST_ONLINE: + case UNF_LPORT_ST_INITIAL: + case UNF_LPORT_ST_LINK_UP: + + lport->retries = 0; + break; + default: + break; + } + + unf_lport_ref_dec_to_destroy(lport); +} + +void unf_lport_error_recovery(struct unf_lport_s *v_lport) +{ + unsigned long delay = 0; + unsigned long flag = 0; + int ret = 0; + + UNF_CHECK_VALID(0x1817, UNF_TRUE, v_lport, return); + + if (unlikely(unf_lport_refinc(v_lport) != RETURN_OK)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) is removing and no need process", + v_lport->port_id); + return; + } + + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + + /* Port State: removing */ + if (v_lport->b_port_removing == UNF_TRUE) { + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) is removing and no need process", + v_lport->port_id); + + unf_lport_ref_dec_to_destroy(v_lport); + return; + } + + /* Port State: offline */ + if (v_lport->en_states == UNF_LPORT_ST_OFFLINE) { + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) is offline and no need process", + v_lport->port_id); + + unf_lport_ref_dec_to_destroy(v_lport); + return; + } + + /* Queue work state check */ + if (delayed_work_pending(&v_lport->retry_work)) { + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + unf_lport_ref_dec_to_destroy(v_lport); + return; + } + + /* Do retry operation */ + if (v_lport->retries < v_lport->max_retry_count) { + v_lport->retries++; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x_0x%x) enter recovery and retry %u times", + v_lport->port_id, v_lport->nport_id, + v_lport->retries); + + delay = (unsigned long)v_lport->ed_tov; + ret = queue_delayed_work(unf_work_queue, + &v_lport->retry_work, + (unsigned long)msecs_to_jiffies( + (unsigned int)delay)); + if (ret) { + atomic_inc(&v_lport->lport_ref_cnt); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) queue work success and reference count is %d", + v_lport->port_id, + atomic_read(&v_lport->lport_ref_cnt)); + } + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + } else { + unf_lport_stat_ma(v_lport, UNF_EVENT_LPORT_REMOTE_TIMEOUT); + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) register operation timeout and do LOGO", + v_lport->port_id); + + /* Do L_Port LOGO */ + (void)unf_lport_enter_sns_logo(v_lport, NULL); + } + + unf_lport_ref_dec_to_destroy(v_lport); +} + +struct unf_lport_s *unf_cm_lookup_vport_by_vp_index(struct unf_lport_s *v_lport, + unsigned short v_vp_index) +{ + UNF_CHECK_VALID(0x1819, UNF_TRUE, v_lport, return NULL); + + if (v_vp_index == 0) + return v_lport; + + if (!v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_vp_index) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) function do look up vport by index is NULL", + v_lport->port_id); + + return NULL; + } + + return v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_vp_index( + v_lport, v_vp_index); +} + +struct unf_lport_s *unf_cm_lookup_vport_by_did(struct unf_lport_s *v_lport, + unsigned int v_did) +{ + UNF_CHECK_VALID(0x1821, UNF_TRUE, v_lport, return NULL); + + if (!v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_did) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) function do look up vport by D_ID is NULL", + v_lport->port_id); + + return NULL; + } + + return v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_did(v_lport, + v_did); +} + +struct unf_lport_s *unf_cm_lookup_vport_by_wwpn(struct unf_lport_s *v_lport, + unsigned long long v_wwpn) +{ + UNF_CHECK_VALID(0x1822, UNF_TRUE, v_lport, return NULL); + + if (!v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_wwpn) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) function do look up vport by WWPN is NULL", + v_lport->port_id); + + return NULL; + } + + return v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_wwpn(v_lport, + v_wwpn); +} + +void unf_cm_vport_remove(struct unf_lport_s *v_vport) +{ + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x1823, UNF_TRUE, v_vport, return); + lport = v_vport->root_lport; + UNF_CHECK_VALID(0x1824, UNF_TRUE, lport, return); + + if (!lport->lport_mgr_temp.pfn_unf_vport_remove) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) function do vport remove is NULL", + lport->port_id); + return; + } + + lport->lport_mgr_temp.pfn_unf_vport_remove(v_vport); +} diff --git a/drivers/scsi/huawei/hifc/unf_lport.h b/drivers/scsi/huawei/hifc/unf_lport.h new file mode 100644 index 0000000000000000000000000000000000000000..cb9105e12b51fc4e3e0e65410316c89cb2fd35de --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_lport.h @@ -0,0 +1,569 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef __UNF_LPORT_H +#define __UNF_LPORT_H +#include "unf_disc.h" +#include "unf_event.h" +#include "unf_common.h" + +#define UNF_PORT_TYPE_FC 0 +#define UNF_PORT_TYPE_DISC 1 +#define UNF_FW_UPDATE_PATH_LEN_MAX 255 +#define UNF_EXCHG_MGR_NUM (4) + +#define UNF_MAX_IO_RETURN_VALUE 0x12 +#define UNF_MAX_SCSI_CMD 0xFF + +enum unf_scsi_error_handle_type { + UNF_SCSI_ABORT_IO_TYPE = 0, + UNF_SCSI_DEVICE_RESET_TYPE, + UNF_SCSI_TARGET_RESET_TYPE, + UNF_SCSI_BUS_RESET_TYPE, + UNF_SCSI_HOST_RESET_TYPE, + UNF_SCSI_VIRTUAL_RESET_TYPE, + UNF_SCSI_ERROR_HANDLE_BUTT +}; + +enum unf_lport_destroy_step_e { + UNF_LPORT_DESTROY_STEP_0_SET_REMOVING = 0, + UNF_LPORT_DESTROY_STEP_1_REPORT_PORT_OUT, + UNF_LPORT_DESTROY_STEP_2_CLOSE_ROUTE, + UNF_LPORT_DESTROY_STEP_3_DESTROY_EVENT_CENTER, + UNF_LPORT_DESTROY_STEP_4_DESTROY_EXCH_MGR, + UNF_LPORT_DESTROY_STEP_5_DESTROY_ESGL_POOL, + UNF_LPORT_DESTROY_STEP_6_DESTROY_DISC_MGR, + UNF_LPORT_DESTROY_STEP_7_DESTROY_XCHG_MGR_TMP, + UNF_LPORT_DESTROY_STEP_8_DESTROY_RPORT_MG_TMP, + UNF_LPORT_DESTROY_STEP_9_DESTROY_LPORT_MG_TMP, + UNF_LPORT_DESTROY_STEP_10_DESTROY_SCSI_TABLE, + UNF_LPORT_DESTROY_STEP_11_UNREG_TGT_HOST, + UNF_LPORT_DESTROY_STEP_12_UNREG_SCSI_HOST, + UNF_LPORT_DESTROY_STEP_13_DESTROY_LW_INTERFACE, + UNF_LPORT_DESTROY_STEP_BUTT +}; + +enum unf_lport_enhanced_feature_e { + /* Enhance GFF feature connect even if fail to get GFF feature */ + UNF_LPORT_ENHANCED_FEATURE_ENHANCED_GFF = 0x0001, + /* Enhance IO balance */ + UNF_LPORT_ENHANCED_FEATURE_IO_TRANSFERLIST = 0x0002, + /* Enhance IO check */ + UNF_LPORT_ENHANCED_FEATURE_IO_CHECKPOINT = 0x0004, + /* Close FW ROUTE */ + UNF_LPORT_ENHANCED_FEATURE_CLOSE_FW_ROUTE = 0x0008, + /* lowest frequency read SFP information */ + UNF_LPORT_ENHANCED_FEATURE_READ_SFP_ONCE = 0x0010, + UNF_LPORT_ENHANCED_FEATURE_BUTT +}; + +enum unf_lport_login_state_e { + UNF_LPORT_ST_ONLINE = 0x2000, /* uninitialized */ + UNF_LPORT_ST_INITIAL, /* initialized and LinkDown */ + UNF_LPORT_ST_LINK_UP, /* initialized and Link UP */ + UNF_LPORT_ST_FLOGI_WAIT, /* waiting for FLOGI completion */ + UNF_LPORT_ST_PLOGI_WAIT, /* waiting for PLOGI completion */ + UNF_LPORT_ST_RNN_ID_WAIT, /* waiting for RNN_ID completion */ + UNF_LPORT_ST_RSNN_NN_WAIT, /* waiting for RSNN_NN completion */ + UNF_LPORT_ST_RSPN_ID_WAIT, /* waiting for RSPN_ID completion */ + UNF_LPORT_ST_RPN_ID_WAIT, /* waiting for RPN_ID completion */ + UNF_LPORT_ST_RFT_ID_WAIT, /* waiting for RFT_ID completion */ + UNF_LPORT_ST_RFF_ID_WAIT, /* waiting for RFF_ID completion */ + UNF_LPORT_ST_SCR_WAIT, /* waiting for SCR completion */ + UNF_LPORT_ST_READY, /* ready for use */ + UNF_LPORT_ST_LOGO, /* waiting for LOGO completion */ + UNF_LPORT_ST_RESET, /* being reset and will restart */ + UNF_LPORT_ST_OFFLINE, /* offline */ + UNF_LPORT_ST_BUTT +}; + +enum unf_lport_event_e { + UNF_EVENT_LPORT_NORMAL_ENTER = 0x8000, /* next state enter */ + UNF_EVENT_LPORT_ONLINE = 0x8001, /* LPort link up */ + UNF_EVENT_LPORT_LINK_UP = 0x8002, /* LPort link up */ + UNF_EVENT_LPORT_LINK_DOWN = 0x8003, /* LPort link down */ + UNF_EVENT_LPORT_OFFLINE = 0x8004, /* lPort bing stopped */ + UNF_EVENT_LPORT_RESET = 0x8005, + UNF_EVENT_LPORT_REMOTE_ACC = 0x8006, /* next state enter */ + UNF_EVENT_LPORT_REMOTE_RJT = 0x8007, /* rport reject */ + UNF_EVENT_LPORT_REMOTE_TIMEOUT = 0x8008, /* rport time out */ + UNF_EVENT_LPORT_READY = 0x8009, + UNF_EVENT_LPORT_REMOTE_BUTT +}; + +struct unf_cm_disc_mg_template_s { + /* start input:L_Port,return:ok/fail */ + unsigned int (*pfn_unf_disc_start)(void *v_lport); + + /* stop input: L_Port,return:ok/fail */ + unsigned int (*pfn_unf_disc_stop)(void *v_lport); + + /* Callback after disc complete[with event:ok/fail]. */ + void (*pfn_unf_disc_callback)(void *v_lport, unsigned int v_result); +}; + +struct unf_chip_manage_info_s { + struct list_head list_chip_thread_entry; + struct list_head list_head; + spinlock_t chip_event_list_lock; + struct task_struct *data_thread; + unsigned int list_num; + unsigned int slot_id; + unsigned char chip_id; + unsigned char rsv; + unsigned char sfp_9545_fault; /* 9545 fault */ + unsigned char sfp_power_fault; /* SFP power fault */ + atomic_t ref_cnt; + unsigned int b_thread_exit; + struct unf_chip_info_s chip_info; + atomic_t card_loop_test_flag; + spinlock_t card_loop_back_state_lock; + char update_path[UNF_FW_UPDATE_PATH_LEN_MAX]; +}; + +enum unf_timer_type_e { + UNF_TIMER_TYPE_INI_IO, + UNF_TIMER_TYPE_REQ_IO, + UNF_TIMER_TYPE_INI_RRQ, + UNF_TIMER_TYPE_SFS, + UNF_TIMER_TYPE_INI_ABTS +}; + +struct unf_cm_xchg_mgr_template_s { + /* Get new Xchg */ + /* input:L_Port,ini/tgt type,return:initialized Xchg */ + void *(*pfn_unf_xchg_get_free_and_init)(void *, unsigned int, + unsigned short); + + /* OXID,SID lookup Xchg */ + /* input: L_Port,OXID,SID,return:Xchg */ + void *(*pfn_unf_look_up_xchg_by_id)(void *, unsigned short, + unsigned int); + + /* input:L_Port,tag,return:Xchg */ + void *(*pfn_unf_look_up_xchg_by_tag)(void *, unsigned short); + + /* free Xchg */ + /* input:L_Port,Xchg,return:void */ + void (*pfn_unf_xchg_release)(void *, void *); + + /* Abort IO Xchg by SID/DID */ + /* input:L_Port,SID,DID,return:void */ + void (*pfn_unf_xchg_mgr_io_xchg_abort)(void *, void *, unsigned int, + unsigned int, unsigned int); + + /* Abort SFS Xchg by SID/DID */ + /* input:L_Port,SID,DID,return:void */ + void (*pfn_unf_xchg_mgr_sfs_xchg_abort)(void *, void *, + unsigned int, unsigned int); + + /* Clean Xchg by SID/DID */ + /* input:L_Port,SID,DID,return:void */ + void (*pfn_unf_xchg_mgr_xchg_clean)(void *, unsigned int, + unsigned int); + + /* Add Xchg timer */ + void (*pfn_unf_xchg_add_timer)(void *, unsigned long, + enum unf_timer_type_e); + + /* Cancel Xchg timer */ + void (*pfn_unf_xchg_cancel_timer)(void *); + + /* L_Port, Abort flag */ + void (*pfn_unf_xchg_abort_all_io)(void *, unsigned int, int); + + /* find Xchg by scsi Cmnd sn */ + void *(*pfn_unf_look_up_xchg_by_cmnd_sn)(void *, unsigned long long, + unsigned int); + /* input:L_Port,unsigned long long */ + void (*pfn_unf_xchg_abort_by_lun)(void *, void *, unsigned long long, + void *, int); + + void (*pfn_unf_xchg_abort_by_session)(void *, void *); + +}; + +struct unf_rport_pool_s { + unsigned int rport_pool_count; + void *rport_pool_add; + struct list_head list_rports_pool; + spinlock_t rport_free_pool_lock; + /* for synchronous reuse RPort POOL completion */ + struct completion *rport_pool_completion; + unsigned long *pul_rpi_bitmap; +}; + +struct unf_cm_lport_template_s { + /* Get VPort struct and init */ + /* input:pstLport,ini/tgt type,return:pstVport */ + void *(*pfn_unf_vport_get_free_and_init)(void *, unsigned int); + + /* For fast IO path */ + /* input: pstLport, VpIndex, return:pstVport */ + void *(*pfn_unf_lookup_vport_by_vp_index)(void *, unsigned short); + + /* input: pstLport, PortId,return:pstVport */ + void *(*pfn_unf_lookup_vport_by_port_id)(void *, unsigned int); + + /* input:pstLport, wwpn, return:pstVport */ + void *(*pfn_unf_lookup_vport_by_wwpn)(void *, unsigned long long); + + /* input:L_Port, DID, return:pstVport */ + void *(*pfn_unf_lookup_vport_by_did)(void *, unsigned int); + + /* input:L_Port,return:void */ + void (*pfn_unf_vport_remove)(void *); + +}; + +struct unf_vport_pool_s { + unsigned short vport_pool_count; + void *vport_pool_addr; + struct list_head list_vport_pool; + spinlock_t vport_pool_lock; + struct completion *vport_pool_completion; + unsigned short slab_next_index; /* Next free vport */ + unsigned short slab_total_sum; /* Total Vport num */ + struct unf_lport_s *vport_slab[0]; +}; + +struct unf_esgl_pool_s { + unsigned int esgl_pool_count; + void *esgl_pool_addr; + struct list_head list_esgl_pool; + spinlock_t esgl_pool_lock; + struct buf_describe_s esgl_buf_list; +}; + +/* little endium */ +struct unf_port_id_page_s { + struct list_head list_node_rscn; + unsigned char port_id_port; + unsigned char port_id_area; + unsigned char port_id_domain; + + unsigned char uc_addr_format : 2; + unsigned char uc_event_qualifier : 4; + unsigned char uc_reserved : 2; +}; + +struct unf_rscn_mg_s { + spinlock_t rscn_id_list_lock; + unsigned int free_rscn_count; + + /* free RSCN page list */ + struct list_head list_free_rscn_page; + + /* using RSCN page list */ + struct list_head list_using_rscn_page; + + /* All RSCN PAGE Address */ + void *rscn_pool_add; + struct unf_port_id_page_s *(*pfn_unf_get_free_rscn_node)( + void *v_rscn_mg); + void (*pfn_unf_release_rscn_node)(void *v_rscn_mg, void *v_rscn_node); +}; + +struct unf_disc_rport_mg_s { + void *disc_pool_add; + struct list_head list_disc_rports_pool; /* discovery DISC Rport pool */ + struct list_head list_disc_rport_busy; /* Busy discovery DiscRport */ +}; + +struct unf_disc_manage_info_s { + struct list_head list_head; + spinlock_t disc_event_list_lock; + atomic_t disc_contrl_size; + + unsigned int b_thread_exit; + struct task_struct *data_thread; + +}; + +struct unf_disc_s { + unsigned int retry_count; /* current retry counter */ + unsigned int max_retry_count; /* retry counter */ + unsigned int disc_flag; /* Disc flag :Loop Disc,Fabric Disc */ + + struct completion *disc_completion; + atomic_t disc_ref_cnt; + + struct list_head list_busy_rports; /* Busy RPort list */ + struct list_head list_delete_rports; /* Delete RPort list */ + struct list_head list_destroy_rports; + + spinlock_t rport_busy_pool_lock; + + struct unf_lport_s *lport; + enum unf_disc_state_e en_states; + struct delayed_work disc_work; + + /* Disc operation template */ + struct unf_cm_disc_mg_template_s unf_disc_temp; + + /* UNF_INIT_DISC/UNF_RSCN_DISC */ + unsigned int disc_option; + + /* RSCN list */ + struct unf_rscn_mg_s rscn_mgr; + struct unf_disc_rport_mg_s disc_rport_mgr; + struct unf_disc_manage_info_s disc_thread_info; + + unsigned long long last_disc_jiff; +}; + +enum unf_service_item_e { + UNF_SERVICE_ITEM_FLOGI = 0, + UNF_SERVICE_ITEM_PLOGI, + UNF_SERVICE_ITEM_PRLI, + UNF_SERVICE_ITEM_RSCN, + UNF_SERVICE_ITEM_ABTS, + UNF_SERVICE_ITEM_PDISC, + UNF_SERVICE_ITEM_ADISC, + UNF_SERVICE_ITEM_LOGO, + UNF_SERVICE_ITEM_SRR, + UNF_SERVICE_ITEM_RRQ, + UNF_SERVICE_ITEM_ECHO, + UNF_SERVICE_ITEM_RLS, + UNF_SERVICE_BUTT +}; + +/* Link service counter */ +struct unf_link_service_collect_s { + unsigned long long service_cnt[UNF_SERVICE_BUTT]; +}; + +struct unf_pcie_error_count_s { + unsigned int pcie_error_count[UNF_PCIE_BUTT]; +}; + +#define INVALID_WWPN 0 + +enum unf_device_scsi_state_e { + UNF_SCSI_ST_INIT = 0, + UNF_SCSI_ST_OFFLINE, + UNF_SCSI_ST_ONLINE, + UNF_SCSI_ST_DEAD, + UNF_SCSI_ST_BUTT +}; + +struct unf_wwpn_dfx_counter_info_s { + atomic64_t io_done_cnt[UNF_MAX_IO_RETURN_VALUE]; + atomic64_t scsi_cmd_cnt[UNF_MAX_SCSI_CMD]; + atomic64_t target_busy; + atomic64_t host_busy; + atomic_t error_handle[UNF_SCSI_ERROR_HANDLE_BUTT]; + atomic_t error_handle_result[UNF_SCSI_ERROR_HANDLE_BUTT]; + atomic_t device_alloc; + atomic_t device_destroy; +}; + +#define UNF_MAX_LUN_PER_TARGET 256 +struct unf_wwpn_rport_info_s { + unsigned long long wwpn; + struct unf_rport_s *rport; /* Rport which linkup */ + void *lport; /* Lport */ + unsigned int target_id; /* target_id distribute by scsi */ + unsigned int last_en_scis_state; + atomic_t en_scsi_state; + struct unf_wwpn_dfx_counter_info_s *dfx_counter; + struct delayed_work loss_tmo_work; + int b_need_scan; + struct list_head fc_lun_list; +}; + +struct unf_rport_scsi_id_image_s { + spinlock_t scsi_image_table_lock; + /* ScsiId Wwpn table */ + struct unf_wwpn_rport_info_s *wwn_rport_info_table; + unsigned int max_scsi_id; +}; + +enum unf_lport_dirty_flag_e { + UNF_LPORT_DIRTY_FLAG_NONE = 0, + UNF_LPORT_DIRTY_FLAG_XCHGMGR_DIRTY = 0x100, + UNF_LPORT_DIRTY_FLAG_RPORT_POOL_DIRTY = 0x200, + UNF_LPORT_DIRTY_FLAG_DISC_DIRTY = 0x400, + UNF_LPORT_DIRTY_FLAG_BUTT +}; + +typedef struct unf_rport_s *(*pfn_unf_rport_set_qualifier)( + struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport_by_nport_id, + struct unf_rport_s *v_rport_by_wwpn, + unsigned long long v_wwpn, + unsigned int v_sid); +typedef unsigned int (*pfn_unf_tmf_status_recovery)(void *v_rport, + void *v_xchg); + +enum unf_start_work_state_e { + UNF_START_WORK_STOP, + UNF_START_WORK_BEGIN, + UNF_START_WORK_COMPLETE +}; + +struct unf_ini_private_info_s { + unsigned int driver_type; /* Driver Type */ + void *lower; /* driver private pointer */ +}; + +struct unf_product_hosts_info_s { + void *p_tgt_host; + unf_scsi_host_s *p_scsi_host; + struct unf_ini_private_info_s drv_private_info; + unf_scsi_host_s scsi_host; + +}; + +struct unf_lport_s { + unsigned int port_type; /* Port Type: fc */ + atomic_t lport_ref_cnt; /* LPort reference counter */ + void *fc_port; /* hard adapter hba pointer */ + void *rport; /* Used for SCSI interface */ + void *vport; + + struct unf_product_hosts_info_s host_info; /* scsi host mg */ + struct unf_rport_scsi_id_image_s rport_scsi_table; + int b_port_removing; + + int b_port_dir_exchange; + + spinlock_t xchg_mgr_lock; + struct list_head list_xchg_mgr_head; + struct list_head list_dirty_xchg_mgr_head; + void *p_xchg_mgr[UNF_EXCHG_MGR_NUM]; + enum int_e b_priority; + struct list_head list_vports_head; /* Vport Mg */ + struct list_head list_intergrad_vports; /* Vport intergrad list */ + struct list_head list_destroy_vports; /* Vport destroy list */ + /* VPort entry, hook in list_vports_head */ + struct list_head entry_vport; + struct list_head entry_lport; /* LPort entry */ + spinlock_t lport_state_lock; /* UL Port Lock */ + struct unf_disc_s disc; /* Disc and rport Mg */ + /* rport pool,Vport share Lport pool */ + struct unf_rport_pool_s rport_pool; + struct unf_esgl_pool_s esgl_pool; /* external sgl pool */ + unsigned int port_id; /* Port Management ,0x11000 etc. */ + enum unf_lport_login_state_e en_states; + unsigned int link_up; + unsigned int speed; + + unsigned long long node_name; + unsigned long long port_name; + unsigned long long fabric_node_name; + unsigned int nport_id; + unsigned int max_frame_size; + unsigned int ed_tov; + unsigned int ra_tov; + unsigned int rr_tov; + + unsigned int options; /* ini or tgt */ + unsigned int retries; + unsigned int max_retry_count; + + enum unf_act_topo_e en_act_topo; + enum int_e b_switch_state; /* 1---->ON,FALSE---->OFF */ + enum int_e b_bbscn_support; /* 1---->ON,FALSE---->OFF */ + + enum unf_start_work_state_e en_start_work_state; + + /* Xchg Mg operation template */ + struct unf_cm_xchg_mgr_template_s xchg_mgr_temp; + struct unf_cm_lport_template_s lport_mgr_temp; + struct unf_low_level_function_op_s low_level_func; + struct unf_event_mgr event_mgr; /* Disc and rport Mg */ + struct delayed_work retry_work; /* poll work or delay work */ + + struct workqueue_struct *link_event_wq; + struct workqueue_struct *xchg_wq; + + struct unf_err_code_s err_code_sum; /* Error code counter */ + struct unf_link_service_collect_s link_service_info; + struct unf_pcie_error_count_s pcie_error_cnt; + pfn_unf_rport_set_qualifier pfn_unf_qualify_rport; /* Qualify Rport */ + /* tmf marker recovery */ + pfn_unf_tmf_status_recovery pfn_unf_tmf_abnormal_recovery; + struct delayed_work route_timer_work; /* L_Port timer route */ + + unsigned short vp_index; /* Vport Index, Lport:0 */ + struct unf_vport_pool_s *vport_pool; /* Only for Lport */ + + void *root_lport; /* Point to physic Lport */ + struct completion *lport_free_completion; /* Free LPort Completion */ + +#define UNF_LPORT_NOP 1 +#define UNF_LPORT_NORMAL 0 + + atomic_t port_no_operater_flag; + + unsigned int enhanced_features; /* Enhanced Features */ + + unsigned int destroy_step; + unsigned int dirty_flag; + + struct unf_lport_sfp_info sfp_info; + struct unf_chip_manage_info_s *chip_info; + +#define UNF_LOOP_BACK_TESTING 1 +#define UNF_LOOP_BACK_TEST_END 0 + + unsigned char sfp_power_fault_count; + unsigned char sfp_9545_fault_count; + unsigned long long last_tx_fault_jif; /* SFP last tx fault jiffies */ + + /* Server card: UNF_FC_SERVER_BOARD_32_G(6)for 32G mode, + * UNF_FC_SERVER_BOARD_16_G(7)for 16G mode + */ + unsigned int card_type; + atomic_t scsi_session_add_success; + atomic_t scsi_session_add_failed; + atomic_t scsi_session_del_success; + atomic_t scsi_session_del_failed; + atomic_t add_start_work_failed; + atomic_t add_closing_work_failed; + atomic_t device_alloc; + atomic_t device_destroy; + atomic_t session_loss_tmo; + atomic_t alloc_scsi_id; + atomic_t resume_scsi_id; + atomic_t reuse_scsi_id; + atomic64_t last_exchg_mgr_idx; + atomic64_t exchg_index; + + unsigned int pcie_link_down_cnt; + int b_pcie_linkdown; + unsigned char fw_version[HIFC_VER_LEN]; + + atomic_t link_lose_tmo; + atomic_t err_code_obtain_freq; +}; + +void unf_lport_stat_ma(struct unf_lport_s *v_lport, + enum unf_lport_event_e v_event); +void unf_lport_error_recovery(struct unf_lport_s *v_lport); +void unf_set_lport_state(struct unf_lport_s *v_lport, + enum unf_lport_login_state_e v_states); +void unf_init_portparms(struct unf_lport_s *v_lport); +unsigned int unf_lport_enter_flogi(struct unf_lport_s *v_lport); +void unf_lport_enter_sns_plogi(struct unf_lport_s *v_lport); +unsigned int unf_init_disc_mgr(struct unf_lport_s *v_pst_lport); +unsigned int unf_init_lport_route(struct unf_lport_s *v_lport); +void unf_destroy_lport_route(struct unf_lport_s *v_lport); +void unf_reset_lport_params(struct unf_lport_s *v_lport); +void unf_cmmark_dirty_mem(struct unf_lport_s *v_lport, + enum unf_lport_dirty_flag_e v_etype); + +struct unf_lport_s *unf_cm_lookup_vport_by_vp_index(struct unf_lport_s *v_lport, + unsigned short v_vp_index); +struct unf_lport_s *unf_cm_lookup_vport_by_did(struct unf_lport_s *v_lport, + unsigned int v_did); +struct unf_lport_s *unf_cm_lookup_vport_by_wwpn(struct unf_lport_s *v_lport, + unsigned long long v_wwpn); +void unf_cm_vport_remove(struct unf_lport_s *v_vport); + +#endif + diff --git a/drivers/scsi/huawei/hifc/unf_npiv.c b/drivers/scsi/huawei/hifc/unf_npiv.c new file mode 100644 index 0000000000000000000000000000000000000000..be7772cb5b741ea220769ab987f31cd82f920ac0 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_npiv.c @@ -0,0 +1,1487 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 1, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#include "unf_log.h" +#include "unf_common.h" +#include "unf_lport.h" +#include "unf_rport.h" +#include "unf_exchg.h" +#include "unf_service.h" +#include "unf_portman.h" +#include "unf_rport.h" +#include "unf_io.h" +#include "unf_npiv.h" + +/* Note: + * The function related with resources allocation in Vport is shared with Lport, + * and rootLport is acted as parameters in this function including : + * stEsglPool; + * event_mgr; + * stRportPool + * ExchMgr + */ + +#define UNF_DELETE_VPORT_MAX_WAIT_TIME_MS 60000 + +unsigned int unf_init_vport_pool(struct unf_lport_s *v_lport) +{ + unsigned int ret = RETURN_OK; + unsigned int i = 0; + unsigned short vport_cnt = 0; + struct unf_lport_s *vport = NULL; + struct unf_vport_pool_s *vport_pool; + unsigned int vport_pool_size = 0; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x1950, UNF_TRUE, v_lport, return RETURN_ERROR); + + UNF_TOU16_CHECK(vport_cnt, v_lport->low_level_func.support_max_npiv_num, + return RETURN_ERROR); + if (vport_cnt == 0) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port(0x%x) do not support NPIV", + v_lport->port_id); + + return RETURN_OK; + } + + vport_pool_size = sizeof(struct unf_vport_pool_s) + + sizeof(struct unf_lport_s *) * vport_cnt; + v_lport->vport_pool = vmalloc(vport_pool_size); + if (!v_lport->vport_pool) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) cannot allocate vport pool", + v_lport->port_id); + + return RETURN_ERROR; + } + memset(v_lport->vport_pool, 0, vport_pool_size); + vport_pool = v_lport->vport_pool; + vport_pool->vport_pool_count = vport_cnt; + vport_pool->vport_pool_completion = NULL; + spin_lock_init(&vport_pool->vport_pool_lock); + INIT_LIST_HEAD(&vport_pool->list_vport_pool); + + vport_pool->vport_pool_addr = vmalloc( + (size_t)(vport_cnt * sizeof(struct unf_lport_s))); + if (!vport_pool->vport_pool_addr) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) cannot allocate vport pool address", + v_lport->port_id); + vfree(v_lport->vport_pool); + v_lport->vport_pool = NULL; + + return RETURN_ERROR; + } + + memset(vport_pool->vport_pool_addr, 0, vport_cnt * + sizeof(struct unf_lport_s)); + vport = (struct unf_lport_s *)vport_pool->vport_pool_addr; + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + for (i = 0; i < vport_cnt; i++) { + list_add_tail(&vport->entry_vport, + &vport_pool->list_vport_pool); + vport++; + } + + vport_pool->slab_next_index = 0; + vport_pool->slab_total_sum = vport_cnt; + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); + + return ret; +} + +void unf_free_vport_pool(struct unf_lport_s *v_lport) +{ + struct unf_vport_pool_s *vport_pool = NULL; + int wait = UNF_FALSE; + unsigned long flag = 0; + unsigned int remain = 0; + struct completion vport_pool_completion = + COMPLETION_INITIALIZER(vport_pool_completion); + + UNF_CHECK_VALID(0x1951, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x1952, UNF_TRUE, v_lport->vport_pool, return); + vport_pool = v_lport->vport_pool; + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); + + if (vport_pool->slab_total_sum != vport_pool->vport_pool_count) { + vport_pool->vport_pool_completion = &vport_pool_completion; + remain = vport_pool->slab_total_sum - + vport_pool->vport_pool_count; + wait = UNF_TRUE; + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); + + if (wait == UNF_TRUE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) begin to wait for vport pool completion(%ld) remain(%d)", + v_lport->port_id, jiffies, remain); + + wait_for_completion(vport_pool->vport_pool_completion); + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) wait for vport pool completion end(%ld)", + v_lport->port_id, jiffies); + spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); + vport_pool->vport_pool_completion = NULL; + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); + } + + if (v_lport->vport_pool->vport_pool_addr) { + vfree(v_lport->vport_pool->vport_pool_addr); + v_lport->vport_pool->vport_pool_addr = NULL; + } + vfree(v_lport->vport_pool); + v_lport->vport_pool = NULL; + + UNF_REFERNCE_VAR(remain); +} + +static inline struct unf_lport_s *unf_get_vport_by_slab_index( + struct unf_vport_pool_s *v_vport_pool, + unsigned short v_slab_index) +{ + UNF_CHECK_VALID(0x1953, UNF_TRUE, v_vport_pool, return NULL); + + return v_vport_pool->vport_slab[v_slab_index]; +} + +static inline void unf_vport_pool_slab_set( + struct unf_vport_pool_s *v_vport_pool, + unsigned short v_slab_index, + struct unf_lport_s *v_vport) +{ + UNF_CHECK_VALID(0x1954, UNF_TRUE, v_vport_pool, return); + + v_vport_pool->vport_slab[v_slab_index] = v_vport; +} + +unsigned int unf_alloc_vp_index(struct unf_vport_pool_s *v_vport_pool, + struct unf_lport_s *v_vport, + unsigned short v_vpid) +{ + unsigned short slab_index = 0; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x1955, UNF_TRUE, v_vport_pool, return RETURN_ERROR); + UNF_CHECK_VALID(0x1956, UNF_TRUE, v_vport, return RETURN_ERROR); + + spin_lock_irqsave(&v_vport_pool->vport_pool_lock, flags); + if (v_vpid == 0) { + slab_index = v_vport_pool->slab_next_index; + while (unf_get_vport_by_slab_index(v_vport_pool, slab_index)) { + slab_index = (slab_index + 1) % + v_vport_pool->slab_total_sum; + + if (slab_index == v_vport_pool->slab_next_index) { + spin_unlock_irqrestore( + &v_vport_pool->vport_pool_lock, flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_REG_ATT, UNF_WARN, + "[warn]VPort pool has no slab "); + + return RETURN_ERROR; + } + } + } else { + slab_index = v_vpid - 1; + if (unf_get_vport_by_slab_index(v_vport_pool, slab_index)) { + spin_unlock_irqrestore(&v_vport_pool->vport_pool_lock, + flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[warn]VPort Index(0x%x) is occupy", v_vpid); + + return RETURN_ERROR; + } + } + + unf_vport_pool_slab_set(v_vport_pool, slab_index, v_vport); + + v_vport_pool->slab_next_index = (slab_index + 1) % + v_vport_pool->slab_total_sum; + + spin_unlock_irqrestore(&v_vport_pool->vport_pool_lock, flags); + + spin_lock_irqsave(&v_vport->lport_state_lock, flags); + v_vport->vp_index = slab_index + 1; /* VpIndex=SlabIndex+1 */ + spin_unlock_irqrestore(&v_vport->lport_state_lock, flags); + + return RETURN_OK; +} + +void unf_free_vp_index(struct unf_vport_pool_s *v_vport_pool, + struct unf_lport_s *v_vport) +{ + unsigned long flags = 0; + + UNF_CHECK_VALID(0x1957, UNF_TRUE, v_vport_pool, return); + UNF_CHECK_VALID(0x1958, UNF_TRUE, v_vport, return); + + if ((v_vport->vp_index == 0) || + (v_vport->vp_index > v_vport_pool->slab_total_sum)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "Input vpoot index(0x%x) is beyond the normal range, min(0x1), max(0x%x).", + v_vport->vp_index, v_vport_pool->slab_total_sum); + return; + } + + spin_lock_irqsave(&v_vport_pool->vport_pool_lock, flags); + /* SlabIndex=VpIndex-1 */ + unf_vport_pool_slab_set(v_vport_pool, v_vport->vp_index - 1, NULL); + spin_unlock_irqrestore(&v_vport_pool->vport_pool_lock, flags); + + spin_lock_irqsave(&v_vport->lport_state_lock, flags); + v_vport->vp_index = INVALID_VALUE16; + spin_unlock_irqrestore(&v_vport->lport_state_lock, flags); +} + +struct unf_lport_s *unf_get_free_vport(struct unf_lport_s *v_lport) +{ + struct unf_lport_s *vport = NULL; + struct list_head *list_head = NULL; + struct unf_vport_pool_s *vport_pool; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x1959, 1, v_lport, return NULL); + UNF_CHECK_VALID(0x1960, UNF_TRUE, v_lport->vport_pool, return NULL); + + vport_pool = v_lport->vport_pool; + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); + if (!list_empty(&vport_pool->list_vport_pool)) { + list_head = (&vport_pool->list_vport_pool)->next; + list_del(list_head); + vport_pool->vport_pool_count--; + list_add_tail(list_head, &v_lport->list_vports_head); + vport = list_entry(list_head, struct unf_lport_s, entry_vport); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]LPort(0x%x)'s vport pool is empty", + v_lport->port_id); + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); + + return NULL; + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); + + return vport; +} + +void unf_vport_back_to_pool(void *v_vport) +{ + struct unf_lport_s *lport = NULL; + struct unf_lport_s *vport = NULL; + struct list_head *list = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x1961, UNF_TRUE, v_vport, return); + vport = v_vport; + lport = (struct unf_lport_s *)(vport->root_lport); + UNF_CHECK_VALID(0x1962, UNF_TRUE, lport, return); + UNF_CHECK_VALID(0x1963, UNF_TRUE, lport->vport_pool, return); + + unf_free_vp_index(lport->vport_pool, vport); + + spin_lock_irqsave(&lport->vport_pool->vport_pool_lock, flag); + + list = &vport->entry_vport; + list_del(list); + list_add_tail(list, &lport->vport_pool->list_vport_pool); + lport->vport_pool->vport_pool_count++; + + spin_unlock_irqrestore(&lport->vport_pool->vport_pool_lock, flag); +} + +void unf_init_vport_from_lport(struct unf_lport_s *v_vport, + struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x1964, UNF_TRUE, v_vport, return); + UNF_CHECK_VALID(0x1965, UNF_TRUE, v_lport, return); + + v_vport->port_type = v_lport->port_type; + v_vport->fc_port = v_lport->fc_port; + v_vport->en_act_topo = v_lport->en_act_topo; + v_vport->root_lport = v_lport; + v_vport->pfn_unf_qualify_rport = v_lport->pfn_unf_qualify_rport; + v_vport->link_event_wq = v_lport->link_event_wq; + v_vport->xchg_wq = v_lport->xchg_wq; + + memcpy(&v_vport->xchg_mgr_temp, &v_lport->xchg_mgr_temp, + sizeof(struct unf_cm_xchg_mgr_template_s)); + + memcpy(&v_vport->event_mgr, &v_lport->event_mgr, + sizeof(struct unf_event_mgr)); + + memset(&v_vport->lport_mgr_temp, 0, + sizeof(struct unf_cm_lport_template_s)); + + memcpy(&v_vport->low_level_func, &v_lport->low_level_func, + sizeof(struct unf_low_level_function_op_s)); +} + +void unf_check_vport_pool_status(struct unf_lport_s *v_lport) +{ + struct unf_vport_pool_s *vport_pool = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x1968, UNF_TRUE, v_lport, return); + vport_pool = v_lport->vport_pool; + UNF_CHECK_VALID(0x1969, UNF_TRUE, vport_pool, return); + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + + if ((vport_pool->vport_pool_completion) && + (vport_pool->slab_total_sum == vport_pool->vport_pool_count)) + complete(vport_pool->vport_pool_completion); + + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); +} + +void unf_vport_fabric_logo(struct unf_lport_s *v_vport) +{ + struct unf_rport_s *rport = NULL; + unsigned long flag = 0; + + rport = unf_get_rport_by_nport_id(v_vport, UNF_FC_FID_FLOGI); + + UNF_CHECK_VALID(0x1970, UNF_TRUE, rport, return); + spin_lock_irqsave(&rport->rport_state_lock, flag); + unf_rport_state_ma(rport, UNF_EVENT_RPORT_LOGO); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + unf_rport_enter_logo(v_vport, rport); +} + +void unf_vport_deinit(void *v_vport) +{ + struct unf_lport_s *vport = NULL; + + UNF_CHECK_VALID(0x1971, UNF_TRUE, v_vport, return); + vport = (struct unf_lport_s *)v_vport; + + unf_unregister_scsi_host(vport); + + unf_disc_mgr_destroy(vport); + + unf_release_xchg_mgr_temp(vport); + + unf_release_lport_mgr_temp(vport); + + unf_destroy_scsi_id_table(vport); + + unf_lport_release_lw_fun_op(vport); + vport->fc_port = NULL; + vport->vport = NULL; + + if (vport->lport_free_completion) { + complete(vport->lport_free_completion); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]VPort(0x%x) point(0x%p) completion free function is NULL", + vport->port_id, vport); + dump_stack(); + } +} + +void unf_vport_ref_dec(struct unf_lport_s *v_vport) +{ + UNF_CHECK_VALID(0x1972, UNF_TRUE, v_vport, return); + + if (atomic_dec_and_test(&v_vport->lport_ref_cnt)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]VPort(0x%x) point(0x%p) reference count is 0 and freevport", + v_vport->port_id, v_vport); + + unf_vport_deinit(v_vport); + } +} + +unsigned int unf_vport_init(void *v_vport) +{ + struct unf_lport_s *vport = NULL; + + UNF_CHECK_VALID(0x1974, UNF_TRUE, v_vport, return RETURN_ERROR); + vport = (struct unf_lport_s *)v_vport; + + vport->options = UNF_PORT_MODE_INI; + vport->nport_id = 0; + + if (unf_init_scsi_id_table(vport) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Vport(0x%x) can not initialize SCSI ID table", + vport->port_id); + + return RETURN_ERROR; + } + + if (unf_init_disc_mgr(vport) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Vport(0x%x) can not initialize discover manager", + vport->port_id); + unf_destroy_scsi_id_table(vport); + + return RETURN_ERROR; + } + + if (unf_register_scsi_host(vport) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Vport(0x%x) vport can not register SCSI host", + vport->port_id); + unf_disc_mgr_destroy(vport); + unf_destroy_scsi_id_table(vport); + + return RETURN_ERROR; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "[event]Vport(0x%x) Create succeed with wwpn(0x%llx)", + vport->port_id, vport->port_name); + + return RETURN_OK; +} + +void unf_vport_remove(void *v_vport) +{ + struct unf_lport_s *vport = NULL; + struct unf_lport_s *lport = NULL; + struct completion vport_free_completion = + COMPLETION_INITIALIZER(vport_free_completion); + + UNF_CHECK_VALID(0x1975, UNF_TRUE, v_vport, return); + vport = (struct unf_lport_s *)v_vport; + lport = (struct unf_lport_s *)(vport->root_lport); + vport->lport_free_completion = &vport_free_completion; + + unf_set_lport_removing(vport); + + unf_vport_ref_dec(vport); + + wait_for_completion(vport->lport_free_completion); + unf_vport_back_to_pool(vport); + + unf_check_vport_pool_status(lport); +} + +void *unf_lookup_vport_by_vp_index(void *v_lport, unsigned short v_vp_index) +{ + struct unf_lport_s *lport = NULL; + struct unf_vport_pool_s *vport_pool = NULL; + struct unf_lport_s *vport = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x1976, UNF_TRUE, v_lport, return NULL); + + lport = (struct unf_lport_s *)v_lport; + + vport_pool = lport->vport_pool; + if (unlikely(!vport_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) vport pool is NULL", + lport->port_id); + + return NULL; + } + + if ((v_vp_index == 0) || (v_vp_index > vport_pool->slab_total_sum)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) input vport index(0x%x) is beyond the normal range(0x1~0x%x)", + lport->port_id, v_vp_index, + vport_pool->slab_total_sum); + + return NULL; + } + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + /* SlabIndex=VpIndex-1 */ + vport = unf_get_vport_by_slab_index(vport_pool, v_vp_index - 1); + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); + + return (void *)vport; +} + +void *unf_lookup_vport_by_port_id(void *v_lport, unsigned int v_port_id) +{ + struct unf_lport_s *lport = NULL; + struct unf_vport_pool_s *vport_pool = NULL; + struct unf_lport_s *vport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x1977, UNF_TRUE, v_lport, return NULL); + + lport = (struct unf_lport_s *)v_lport; + vport_pool = lport->vport_pool; + if (unlikely(!vport_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) vport pool is NULL", + lport->port_id); + + return NULL; + } + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); + list_for_each_safe(node, next_node, &lport->list_vports_head) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + if (vport->port_id == v_port_id) { + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, + flag); + return vport; + } + } + + list_for_each_safe(node, next_node, &lport->list_intergrad_vports) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + if (vport->port_id == v_port_id) { + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, + flag); + return vport; + } + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) has no vport ID(0x%x).", + lport->port_id, v_port_id); + return NULL; +} + +void *unf_lookup_vport_by_did(void *v_lport, unsigned int v_did) +{ + struct unf_lport_s *lport = NULL; + struct unf_vport_pool_s *vport_pool = NULL; + struct unf_lport_s *vport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x1978, UNF_TRUE, v_lport, return NULL); + + lport = (struct unf_lport_s *)v_lport; + vport_pool = lport->vport_pool; + if (unlikely(!vport_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) vport pool is NULL", + lport->port_id); + + return NULL; + } + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); + list_for_each_safe(node, next_node, &lport->list_vports_head) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + if (vport->nport_id == v_did) { + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, + flag); + + return vport; + } + } + + list_for_each_safe(node, next_node, &lport->list_intergrad_vports) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + if (vport->nport_id == v_did) { + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, + flag); + + return vport; + } + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) has no vport Nport ID(0x%x)", + lport->port_id, v_did); + return NULL; +} + +void *unf_lookup_vport_by_wwpn(void *v_lport, unsigned long long v_wwpn) +{ + struct unf_lport_s *lport = NULL; + struct unf_vport_pool_s *vport_pool = NULL; + struct unf_lport_s *vport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x1979, UNF_TRUE, v_lport, return NULL); + + lport = (struct unf_lport_s *)v_lport; + vport_pool = lport->vport_pool; + if (unlikely(!vport_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) vport pool is NULL", + lport->port_id); + + return NULL; + } + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); + list_for_each_safe(node, next_node, &lport->list_vports_head) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + if (vport->port_name == v_wwpn) { + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, + flag); + + return vport; + } + } + + list_for_each_safe(node, next_node, &lport->list_intergrad_vports) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + if (vport->port_name == v_wwpn) { + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, + flag); + + return vport; + } + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) has no vport WWPN(0x%llx)", + lport->port_id, v_wwpn); + + return NULL; +} + +struct unf_lport_s *unf_alloc_vport(struct unf_lport_s *lport, + unsigned long long v_wwpn) +{ + struct unf_lport_s *vport = NULL; + + vport = unf_cm_lookup_vport_by_wwpn(lport, v_wwpn); + if (vport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Port(0x%x) has find vport with wwpn(0x%llx), can't create again", + lport->port_id, v_wwpn); + + return NULL; + } + + vport = unf_get_free_vport(lport); + if (!vport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Can not get free vport from pool"); + + return NULL; + } + vport->root_lport = lport; + vport->port_name = v_wwpn; + + unf_init_portparms(vport); + unf_init_vport_from_lport(vport, lport); + + if (unf_alloc_vp_index(lport->vport_pool, vport, 0) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Vport can not allocate vport index"); + unf_vport_back_to_pool(vport); + + return NULL; + } + vport->port_id = (((unsigned int)vport->vp_index) << + PORTID_VPINDEX_SHIT) | lport->port_id; + + return vport; +} + +unsigned int unf_npiv_conf(unsigned int v_port_id, unsigned long long v_wwpn) +{ +#define VPORT_WWN_MASK 0xff00ffffffffffff +#define VPORT_WWN_SHIFT 48 + + struct fc_vport_identifiers vid = { 0 }; + struct fc_vport *fc_port = NULL; + struct Scsi_Host *shost = NULL; + struct unf_lport_s *lport = NULL; + struct unf_lport_s *vport = NULL; + unsigned short vport_id = 0; + + lport = unf_find_lport_by_port_id(v_port_id); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Cannot find LPort by (0x%x).", v_port_id); + + return RETURN_ERROR; + } + + vport = unf_cm_lookup_vport_by_wwpn(lport, v_wwpn); + if (vport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Port(0x%x) has find vport with wwpn(0x%llx), can't create again", + lport->port_id, v_wwpn); + + return RETURN_ERROR; + } + + vport = unf_get_free_vport(lport); + if (!vport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Can not get free vport from pool"); + + return RETURN_ERROR; + } + + unf_init_portparms(vport); + unf_init_vport_from_lport(vport, lport); + + if ((lport->port_name & VPORT_WWN_MASK) == (v_wwpn & VPORT_WWN_MASK)) { + vport_id = (v_wwpn & ~VPORT_WWN_MASK) >> VPORT_WWN_SHIFT; + if (vport_id == 0) { + vport_id = (lport->port_name & ~VPORT_WWN_MASK) >> + VPORT_WWN_SHIFT; + } + } + + if (unf_alloc_vp_index(lport->vport_pool, vport, vport_id) != + RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Vport can not allocate vport index"); + unf_vport_back_to_pool(vport); + + return RETURN_ERROR; + } + + vport->port_id = (((unsigned int)vport->vp_index) << + PORTID_VPINDEX_SHIT) | lport->port_id; + + vid.roles = FC_PORT_ROLE_FCP_INITIATOR; + vid.vport_type = FC_PORTTYPE_NPIV; + vid.disable = false; + vid.node_name = lport->node_name; + + if (v_wwpn != 0) { + vid.port_name = v_wwpn; + } else { + if ((lport->port_name & ~VPORT_WWN_MASK) >> VPORT_WWN_SHIFT != + vport->vp_index) + vid.port_name = + (lport->port_name & VPORT_WWN_MASK) | + (((unsigned long long)vport->vp_index) << + VPORT_WWN_SHIFT); + else + vid.port_name = (lport->port_name & VPORT_WWN_MASK); + } + + vport->port_name = vid.port_name; + + shost = lport->host_info.p_scsi_host; + + fc_port = fc_vport_create(shost, 0, &vid); + if (!fc_port) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) Cannot Failed to create vport wwpn=%llx", + lport->port_id, vid.port_name); + + unf_vport_back_to_pool(vport); + + return RETURN_ERROR; + } + + return RETURN_OK; +} + +struct unf_lport_s *unf_create_vport(struct unf_lport_s *v_lport, + struct vport_config_s *v_vport_config) +{ + unsigned int ret = RETURN_OK; + struct unf_lport_s *lport = NULL; + struct unf_lport_s *vport = NULL; + enum unf_act_topo_e lport_topo = UNF_ACT_TOP_UNKNOWN; + enum unf_lport_login_state_e lport_state = UNF_LPORT_ST_ONLINE; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x1983, UNF_TRUE, v_lport, return NULL); + UNF_CHECK_VALID(0x1983, UNF_TRUE, v_vport_config, return NULL); + + if (v_vport_config->port_mode != FC_PORT_ROLE_FCP_INITIATOR) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Only support INITIATOR port mode(0x%x)", + v_vport_config->port_mode); + + return NULL; + } + lport = v_lport; + + if (lport != lport->root_lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) not root port return", + lport->port_id); + + return NULL; + } + + vport = unf_cm_lookup_vport_by_wwpn(lport, v_vport_config->port_name); + if (!vport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Port(0x%x) can not find vport with wwpn(0x%llx)", + lport->port_id, v_vport_config->port_name); + + return NULL; + } + + ret = unf_vport_init(vport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]VPort(0x%x) can not initialze vport", + vport->port_id); + + return NULL; + } + + spin_lock_irqsave(&lport->lport_state_lock, flag); + lport_topo = lport->en_act_topo; + lport_state = lport->en_states; + v_vport_config->node_name = lport->node_name; + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + vport->port_name = v_vport_config->port_name; + vport->node_name = v_vport_config->node_name; + vport->nport_id = 0; + + /* only fabric topo support NPIV */ + if ((lport_topo == UNF_ACT_TOP_P2P_FABRIC) && + /* after receive flogi acc */ + (lport_state >= UNF_LPORT_ST_PLOGI_WAIT) && + (lport_state <= UNF_LPORT_ST_READY)) { + vport->link_up = lport->link_up; + (void)unf_lport_login(vport, lport_topo); + } + + return vport; +} + +unsigned int unf_drop_vport(struct unf_lport_s *v_vport) +{ + unsigned int ret = RETURN_ERROR; + struct fc_vport *vport = NULL; + + UNF_CHECK_VALID(0x1985, UNF_TRUE, v_vport, return RETURN_ERROR); + + vport = v_vport->vport; + if (!vport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]VPort(0x%x) find vport in scsi is NULL", + v_vport->port_id); + + return ret; + } + + ret = fc_vport_terminate(vport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]VPort(0x%x) terminate vport(%p) in scsi failed", + v_vport->port_id, vport); + + return ret; + } + return ret; +} + +unsigned int unf_delete_vport(unsigned int v_port_id, unsigned int v_vp_index) +{ + struct unf_lport_s *lport = NULL; + unsigned short vp_index = 0; + struct unf_lport_s *vport = NULL; + + lport = unf_find_lport_by_port_id(v_port_id); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) can not be found by portid", + v_port_id); + + return RETURN_ERROR; + } + + if (atomic_read(&lport->port_no_operater_flag) == UNF_LPORT_NOP) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) is in NOP, destroy all vports function will be called", + lport->port_id); + + return RETURN_OK; + } + + UNF_TOU16_CHECK(vp_index, v_vp_index, return RETURN_ERROR); + vport = unf_cm_lookup_vport_by_vp_index(lport, vp_index); + if (!vport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Can not lookup VPort by VPort index(0x%x)", + vp_index); + + return RETURN_ERROR; + } + + return unf_drop_vport(vport); +} + +void unf_vport_abort_all_sfs_exch(struct unf_lport_s *vport) +{ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct list_head *xchg_node = NULL; + struct list_head *next_xchg_node = NULL; + struct unf_xchg_s *exch = NULL; + unsigned long pool_lock_flags = 0; + unsigned long exch_lock_flags = 0; + unsigned int i; + + UNF_CHECK_VALID(0x1985, UNF_TRUE, vport, return); + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + hot_pool = unf_get_hot_pool_by_lport( + (struct unf_lport_s *)(vport->root_lport), i); + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) hot pool is NULL", + ((struct unf_lport_s *) + (vport->root_lport))->port_id); + continue; + } + + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + list_for_each_safe(xchg_node, next_xchg_node, + &hot_pool->sfs_busylist) { + exch = list_entry(xchg_node, struct unf_xchg_s, + list_xchg_entry); + spin_lock_irqsave(&exch->xchg_state_lock, + exch_lock_flags); + if (vport == exch->lport && + (atomic_read(&exch->ref_cnt) > 0)) { + exch->io_state |= TGT_IO_STATE_ABORT; + spin_unlock_irqrestore(&exch->xchg_state_lock, + exch_lock_flags); + unf_disc_ctrl_size_inc(vport, exch->cmnd_code); + /* Transfer exch to destroy chain */ + list_del(xchg_node); + list_add_tail(xchg_node, + &hot_pool->list_destroy_xchg); + + } else { + spin_unlock_irqrestore(&exch->xchg_state_lock, + exch_lock_flags); + } + } + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + } +} + +void unf_vport_abort_ini_io_exch(struct unf_lport_s *vport) +{ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct list_head *xchg_node = NULL; + struct list_head *next_xchg_node = NULL; + struct unf_xchg_s *exch = NULL; + unsigned long pool_lock_flags = 0; + unsigned long exch_lock_flags = 0; + unsigned int i; + + UNF_CHECK_VALID(0x1986, UNF_TRUE, vport, return); + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + hot_pool = unf_get_hot_pool_by_lport( + (struct unf_lport_s *)(vport->root_lport), i); + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) MgrIdex %d hot pool is NULL", + ((struct unf_lport_s *) + (vport->root_lport))->port_id, i); + continue; + } + + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + list_for_each_safe(xchg_node, next_xchg_node, + &hot_pool->ini_busylist) { + exch = list_entry(xchg_node, struct unf_xchg_s, + list_xchg_entry); + + if (vport == exch->lport && + atomic_read(&exch->ref_cnt) > 0) { + /* Transfer exch to destroy chain */ + list_del(xchg_node); + list_add_tail(xchg_node, + &hot_pool->list_destroy_xchg); + + spin_lock_irqsave(&exch->xchg_state_lock, + exch_lock_flags); + exch->io_state |= INI_IO_STATE_DRABORT; + spin_unlock_irqrestore(&exch->xchg_state_lock, + exch_lock_flags); + } + } + + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + } +} + +void unf_vport_abort_all_exch(struct unf_lport_s *vport) +{ + UNF_CHECK_VALID(0x1988, UNF_TRUE, vport, return); + + unf_vport_abort_all_sfs_exch(vport); + + unf_vport_abort_ini_io_exch(vport); +} + +unsigned int unf_vport_wait_all_exch_removed(struct unf_lport_s *vport) +{ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct list_head *xchg_node = NULL; + struct list_head *next_xchg_node = NULL; + struct unf_xchg_s *exch = NULL; + unsigned int vport_uses = 0; + unsigned long flags = 0; + unsigned long long cur_jif = jiffies; + unsigned int i = 0; + + UNF_CHECK_VALID(0x1989, UNF_TRUE, vport, return RETURN_ERROR); + + while (1) { + vport_uses = 0; + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + hot_pool = unf_get_hot_pool_by_lport( + (struct unf_lport_s *) + (vport->root_lport), i); + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, + UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) hot Pool is NULL", + ((struct unf_lport_s *) + (vport->root_lport))->port_id); + + continue; + } + + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags); + list_for_each_safe(xchg_node, next_xchg_node, + &hot_pool->list_destroy_xchg) { + exch = list_entry(xchg_node, struct unf_xchg_s, + list_xchg_entry); + + if (vport != exch->lport) + continue; + + vport_uses++; + + if (jiffies - cur_jif >= + msecs_to_jiffies(UNF_DELETE_VPORT_MAX_WAIT_TIME_MS)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_NORMAL, UNF_ERR, + "[error]VPort(0x%x) Abort Exch(0x%p) Type(0x%x) OxRxid(0x%x 0x%x), sid did(0x%x 0x%x) SeqId(0x%x) IOState(0x%x) Ref(0x%x)", + vport->port_id, exch, + (unsigned int)exch->xchg_type, + (unsigned int)exch->ox_id, + (unsigned int)exch->rx_id, + (unsigned int)exch->sid, + (unsigned int)exch->did, + (unsigned int)exch->seq_id, + (unsigned int)exch->io_state, + atomic_read(&exch->ref_cnt)); + } + } + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, + flags); + } + + if (vport_uses == 0) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]VPort(0x%x) has removed all exchanges it used", + vport->port_id); + break; + } + + if (jiffies - cur_jif >= msecs_to_jiffies(UNF_DELETE_VPORT_MAX_WAIT_TIME_MS)) + return RETURN_ERROR; + + msleep(1000); + } + + return RETURN_OK; +} + +unsigned int unf_vport_wait_rports_removed(struct unf_lport_s *vport) +{ + struct unf_disc_s *disc = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned int vport_uses = 0; + unsigned long flags = 0; + unsigned long long cur_jif = jiffies; + struct unf_rport_s *rport = NULL; + + UNF_CHECK_VALID(0x1990, UNF_TRUE, vport, return RETURN_ERROR); + disc = &vport->disc; + + while (1) { + vport_uses = 0; + spin_lock_irqsave(&disc->rport_busy_pool_lock, flags); + list_for_each_safe(node, next_node, &disc->list_delete_rports) { + rport = list_entry(node, struct unf_rport_s, + entry_rport); + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, + UNF_MAJOR, + "[info]Vport(0x%x) Rport(0x%x) point(%p) is in Delete", + vport->port_id, rport->nport_id, rport); + vport_uses++; + } + list_for_each_safe(node, next_node, + &disc->list_destroy_rports) { + rport = list_entry(node, struct unf_rport_s, + entry_rport); + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, + UNF_MAJOR, + "[info]Vport(0x%x) Rport(0x%x) point(%p) is in Destroy", + vport->port_id, rport->nport_id, rport); + vport_uses++; + } + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flags); + + if (vport_uses == 0) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]VPort(0x%x) has removed all RPorts it used", + vport->port_id); + break; + } + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Vport(0x%x) has %d RPorts not removed wait timeout(30s)", + vport->port_id, vport_uses); + + if (jiffies - cur_jif >= + msecs_to_jiffies(UNF_DELETE_VPORT_MAX_WAIT_TIME_MS)) + return RETURN_ERROR; + + msleep(5000); + } + + UNF_REFERNCE_VAR(rport); + + return RETURN_OK; +} + +unsigned int unf_destroy_one_vport(struct unf_lport_s *vport) +{ + unsigned int ret = RETURN_ERROR; + struct unf_lport_s *root_port = NULL; + + UNF_CHECK_VALID(0x1992, UNF_TRUE, vport, return RETURN_ERROR); + + root_port = (struct unf_lport_s *)vport->root_lport; + + unf_vport_fabric_logo(vport); + + /* 1 set NOP */ + atomic_set(&vport->port_no_operater_flag, UNF_LPORT_NOP); + vport->b_port_removing = UNF_TRUE; + + /* 2 report linkdown to scsi and delele rpot */ + unf_link_down_one_vport(vport); + + /* 3 set abort for exchange */ + unf_vport_abort_all_exch(vport); + + /* 4 wait exch return freepool */ + if (!root_port->b_port_dir_exchange) { + ret = unf_vport_wait_all_exch_removed(vport); + if (ret != RETURN_OK) { + if ((root_port->b_port_removing) != UNF_TRUE) { + vport->b_port_removing = UNF_FALSE; + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, + UNF_ERR, + "[err]VPort(0x%x) can not wait Exchange return freepool", + vport->port_id); + + return RETURN_ERROR; + } + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_NORMAL, UNF_WARN, + "[warn]Port(0x%x) is removing, there is dirty exchange, continue", + root_port->port_id); + + root_port->b_port_dir_exchange = UNF_TRUE; + } + } + + /* wait rport return rportpool */ + ret = unf_vport_wait_rports_removed(vport); + if (ret != RETURN_OK) { + vport->b_port_removing = UNF_FALSE; + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[err]VPort(0x%x) can not wait Rport return freepool", + vport->port_id); + + return RETURN_ERROR; + } + + unf_cm_vport_remove(vport); + + return RETURN_OK; +} + +void unf_link_down_one_vport(struct unf_lport_s *v_vport) +{ + unsigned long flag = 0; + struct unf_lport_s *root_lport = NULL; + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_KEVENT, + "[info]VPort(0x%x) linkdown", v_vport->port_id); + + spin_lock_irqsave(&v_vport->lport_state_lock, flag); + v_vport->link_up = UNF_PORT_LINK_DOWN; + v_vport->nport_id = 0; /* set nportid 0 before send fdisc again */ + unf_lport_stat_ma(v_vport, UNF_EVENT_LPORT_LINK_DOWN); + spin_unlock_irqrestore(&v_vport->lport_state_lock, flag); + + root_lport = (struct unf_lport_s *)v_vport->root_lport; + + unf_flush_disc_event(&root_lport->disc, v_vport); + + unf_clean_linkdown_rport(v_vport); +} + +void unf_linkdown_all_vports(void *v_lport) +{ + struct unf_lport_s *lport = NULL; + struct unf_vport_pool_s *vport_pool = NULL; + struct unf_lport_s *vport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x1993, UNF_TRUE, v_lport, return); + + lport = (struct unf_lport_s *)v_lport; + vport_pool = lport->vport_pool; + if (unlikely(!vport_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) VPort pool is NULL", + lport->port_id); + + return; + } + + /* Transfer to the transition chain */ + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + list_for_each_safe(node, next_node, &lport->list_vports_head) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + list_del_init(&vport->entry_vport); + list_add_tail(&vport->entry_vport, + &lport->list_intergrad_vports); + (void)unf_lport_refinc(vport); + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + while (!list_empty(&lport->list_intergrad_vports)) { + node = (&lport->list_intergrad_vports)->next; + vport = list_entry(node, struct unf_lport_s, entry_vport); + + list_del_init(&vport->entry_vport); + list_add_tail(&vport->entry_vport, &lport->list_vports_head); + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); + + unf_link_down_one_vport(vport); + + unf_vport_ref_dec(vport); + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); +} + +int unf_process_vports_linkup(void *v_arg_in, void *v_arg_out) +{ + struct unf_vport_pool_s *vport_pool = NULL; + struct unf_lport_s *lport = NULL; + struct unf_lport_s *vport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flags = 0; + int ret = RETURN_OK; + + UNF_REFERNCE_VAR(v_arg_out); + UNF_CHECK_VALID(0x1994, UNF_TRUE, v_arg_in, return RETURN_ERROR); + + lport = (struct unf_lport_s *)v_arg_in; + + if (atomic_read(&lport->port_no_operater_flag) == UNF_LPORT_NOP) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) is NOP don't continue", + lport->port_id); + + return RETURN_OK; + } + + if (lport->link_up != UNF_PORT_LINK_UP) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) is not linkup don't continue.", + lport->port_id); + + return RETURN_OK; + } + + vport_pool = lport->vport_pool; + if (unlikely(!vport_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) VPort pool is NULL.", + lport->port_id); + + return RETURN_OK; + } + + /* Transfer to the transition chain */ + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + list_for_each_safe(node, next_node, &lport->list_vports_head) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + list_del_init(&vport->entry_vport); + list_add_tail(&vport->entry_vport, + &lport->list_intergrad_vports); + (void)unf_lport_refinc(vport); + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + while (!list_empty(&lport->list_intergrad_vports)) { + node = (&lport->list_intergrad_vports)->next; + vport = list_entry(node, struct unf_lport_s, entry_vport); + + list_del_init(&vport->entry_vport); + list_add_tail(&vport->entry_vport, &lport->list_vports_head); + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); + + if (atomic_read(&vport->port_no_operater_flag) == + UNF_LPORT_NOP) { + unf_vport_ref_dec(vport); + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + continue; + } + + if ((lport->link_up == UNF_PORT_LINK_UP) && + (lport->en_act_topo == UNF_ACT_TOP_P2P_FABRIC)) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]Vport(0x%x) begin login", + vport->port_id); + + vport->link_up = UNF_PORT_LINK_UP; + (void)unf_lport_login(vport, lport->en_act_topo); + + msleep(100); + } else { + unf_link_down_one_vport(vport); + + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Vport(0x%x) login failed because root port linkdown", + vport->port_id); + } + + unf_vport_ref_dec(vport); + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); + + return ret; +} + +void unf_linkup_all_vports(struct unf_lport_s *v_lport) +{ + struct unf_cm_event_report *event = NULL; + + UNF_CHECK_VALID(0x1996, UNF_TRUE, v_lport, return); + + if (unlikely((!v_lport->event_mgr.pfn_unf_get_free_event) || + (!v_lport->event_mgr.pfn_unf_post_event) || + (!v_lport->event_mgr.pfn_unf_release_event))) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) Event fun is NULL", + v_lport->port_id); + return; + } + + event = v_lport->event_mgr.pfn_unf_get_free_event((void *)v_lport); + UNF_CHECK_VALID(0x1997, UNF_TRUE, event, return); + + event->lport = v_lport; + event->event_asy_flag = UNF_EVENT_ASYN; + event->pfn_unf_event_task = unf_process_vports_linkup; + event->para_in = (void *)v_lport; + + v_lport->event_mgr.pfn_unf_post_event(v_lport, event); +} + +void unf_destroy_all_vports(struct unf_lport_s *v_lport) +{ + struct unf_vport_pool_s *vport_pool = NULL; + struct unf_lport_s *lport = NULL; + struct unf_lport_s *vport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flags = 0; + + lport = v_lport; + UNF_CHECK_VALID(0x1998, UNF_TRUE, lport, return); + + vport_pool = lport->vport_pool; + if (unlikely(!vport_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Lport(0x%x) VPort pool is NULL", + lport->port_id); + + return; + } + + /* Transfer to the transition chain */ + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + list_for_each_safe(node, next_node, &lport->list_vports_head) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + list_del_init(&vport->entry_vport); + list_add_tail(&vport->entry_vport, &lport->list_destroy_vports); + } + + list_for_each_safe(node, next_node, &lport->list_intergrad_vports) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + list_del_init(&vport->entry_vport); + list_add_tail(&vport->entry_vport, + &lport->list_destroy_vports); + atomic_dec(&vport->lport_ref_cnt); + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + while (!list_empty(&lport->list_destroy_vports)) { + node = (&lport->list_destroy_vports)->next; + vport = list_entry(node, struct unf_lport_s, entry_vport); + + list_del_init(&vport->entry_vport); + list_add_tail(&vport->entry_vport, &lport->list_vports_head); + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]VPort(0x%x) Destroy begin", + vport->port_id); + unf_drop_vport(vport); + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "[info]VPort(0x%x) Destroy end", + vport->port_id); + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); +} + diff --git a/drivers/scsi/huawei/hifc/unf_npiv.h b/drivers/scsi/huawei/hifc/unf_npiv.h new file mode 100644 index 0000000000000000000000000000000000000000..de9572931b78d53e4bed4a849266c4ecbb3389dd --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_npiv.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#ifndef __NPIV_H__ +#define __NPIV_H__ + +/* product VPORT configure */ +struct vport_config_s { + unsigned long long node_name; + unsigned long long port_name; + unsigned int port_mode; /* INI, TGT or both */ +}; + +/* product Vport function */ +#define PORTID_VPINDEX_MASK 0xff000000 +#define PORTID_VPINDEX_SHIT 24 +unsigned int unf_npiv_conf(unsigned int v_port_id, unsigned long long v_wwpn); +struct unf_lport_s *unf_create_vport(struct unf_lport_s *v_lport, + struct vport_config_s *v_vport_config); +unsigned int unf_delete_vport(unsigned int v_port_id, unsigned int v_vp_index); + +/* Vport pool creat and release function */ +unsigned int unf_init_vport_pool(struct unf_lport_s *v_lport); +void unf_free_vport_pool(struct unf_lport_s *v_lport); + +/* Lport resigster stLPortMgTemp function */ +void unf_vport_remove(void *v_vport); +void unf_vport_ref_dec(struct unf_lport_s *v_vport); + +/* linkdown all Vport after receive linkdown event */ +void unf_linkdown_all_vports(void *v_lport); +/* Lport receive Flogi Acc linkup all Vport */ +void unf_linkup_all_vports(struct unf_lport_s *v_lport); +/* Lport remove delete all Vport */ +void unf_destroy_all_vports(struct unf_lport_s *v_lport); +void unf_vport_fabric_logo(struct unf_lport_s *v_vport); +unsigned int unf_destroy_one_vport(struct unf_lport_s *v_vport); +struct unf_lport_s *unf_alloc_vport(struct unf_lport_s *v_lport, + unsigned long long v_wwpn); +unsigned int unf_drop_vport(struct unf_lport_s *v_vport); +void unf_link_down_one_vport(struct unf_lport_s *v_vport); +void *unf_lookup_vport_by_vp_index(void *v_lport, unsigned short v_vp_index); +void *unf_lookup_vport_by_port_id(void *v_lport, unsigned int v_port_id); +void *unf_lookup_vport_by_did(void *v_lport, unsigned int v_did); +void *unf_lookup_vport_by_wwpn(void *v_lport, unsigned long long v_wwpn); + +#endif + diff --git a/drivers/scsi/huawei/hifc/unf_portman.c b/drivers/scsi/huawei/hifc/unf_portman.c new file mode 100644 index 0000000000000000000000000000000000000000..d244a5c1aaa2a9142b57d526897692360620aa83 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_portman.c @@ -0,0 +1,5565 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#include "unf_log.h" +#include "unf_common.h" +#include "unf_event.h" +#include "unf_lport.h" +#include "unf_exchg.h" +#include "unf_portman.h" +#include "unf_rport.h" +#include "unf_io.h" +#include "unf_service.h" +#include "unf_rport.h" +#include "unf_npiv.h" +#include "hifc_portmng.h" + +#define UNF_LOOP_STOP_NEED_WAIT 0 +#define UNF_LOOP_STOP_NO_NEED_WAIT 1 + +#define UNF_MAX_SAVE_ENTRY_NUM 60 +#define UNF_CHECK_CONFIG_SPEED_BY_SFSSPEED(sfs_speed, cfg_speed) \ + ((sfs_speed) < (cfg_speed) || (sfs_speed) == UNF_PORT_SFP_SPEED_ERR) +#define UNF_LPORT_CHIP_ERROR(lport) \ + ((lport)->pcie_error_cnt.pcie_error_count[UNF_PCIE_FATALERRORDETECTED]) + +struct unf_global_lport_s global_lport_mgr; + +static unsigned int unf_port_link_up(struct unf_lport_s *v_lport, + void *v_in_put); +static unsigned int unf_port_link_down(struct unf_lport_s *v_lport, + void *v_in_put); +static unsigned int unf_port_abnormal_reset(struct unf_lport_s *v_lport, + void *v_in_put); +static unsigned int unf_port_reset_start(struct unf_lport_s *v_lport, + void *v_in_put); +static unsigned int unf_port_reset_end(struct unf_lport_s *v_lport, + void *v_in_put); +static unsigned int unf_port_nop(struct unf_lport_s *v_lport, void *v_in_put); +static unsigned int unf_port_clean_done(struct unf_lport_s *v_lport, + void *v_in_put); +static unsigned int unf_port_begin_remove(struct unf_lport_s *v_lport, + void *v_in_put); +static unsigned int unf_port_release_rport_index(struct unf_lport_s *v_lport, + void *v_in_put); +static int unf_cm_port_info_get(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_in_put); +static int unf_cm_port_speed_set(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_in_put); +static int unf_cm_topo_set(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_in_put); +static int unf_cm_port_set(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_in_put); +static int unf_get_port_sfp_info(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_in_put); +static int unf_cm_get_all_port_info(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_in_put); +static int unf_cm_clear_error_code_sum(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_in_put); +static int unf_cm_bbscn_set(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_in_put); +static int unf_get_io_dfx_statistics(struct unf_lport_s *v_pstLPort, + struct unf_hinicam_pkg *v_input); +static int unf_cm_set_vport(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input); +static int unf_cm_link_delay_get(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_in_put); +static int unf_cm_save_data_mode(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_in_put); +static int unf_cm_set_dif(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_in_put); +static int unf_cm_select_dif_mode(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_in_put); +static int unf_cm_adm_show_xchg(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_in_put); +static int unf_cm_adm_link_time_out_opt(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_in_put); +static int unf_cm_adm_log_level_opt(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_in_put); + +static struct unf_port_action_s lport_action[] = { + { UNF_PORT_LINK_UP, unf_port_link_up }, + { UNF_PORT_LINK_DOWN, unf_port_link_down }, + { UNF_PORT_RESET_START, unf_port_reset_start }, + { UNF_PORT_RESET_END, unf_port_reset_end }, + { UNF_PORT_NOP, unf_port_nop }, + { UNF_PORT_CLEAN_DONE, unf_port_clean_done }, + { UNF_PORT_BEGIN_REMOVE, unf_port_begin_remove }, + { UNF_PORT_RELEASE_RPORT_INDEX, unf_port_release_rport_index }, + { UNF_PORT_ABNORMAL_RESET, unf_port_abnormal_reset }, +}; + +static struct unf_hifcadm_action_s unf_hifcadm_action[] = { + { UNF_PORT_SET_OP, unf_cm_port_set }, + { UNF_TOPO_SET_OP, unf_cm_topo_set }, + { UNF_SPEED_SET_OP, unf_cm_port_speed_set }, + { UNF_INFO_GET_OP, unf_cm_port_info_get }, + { UNF_INFO_CLEAR_OP, unf_cm_clear_error_code_sum }, + { UNF_SFP_INFO_OP, unf_get_port_sfp_info }, + { UNF_ALL_INFO_OP, unf_cm_get_all_port_info }, + { UNF_BBSCN, unf_cm_bbscn_set }, + { UNF_DFX, unf_get_io_dfx_statistics }, + { UNF_VPORT, unf_cm_set_vport }, + { UNF_LINK_DELAY, unf_cm_link_delay_get }, + { UNF_SAVA_DATA, unf_cm_save_data_mode }, + { UNF_DIF, unf_cm_set_dif }, + { UNF_DIF_CONFIG, unf_cm_select_dif_mode }, + { UNF_SHOW_XCHG, unf_cm_adm_show_xchg }, + { FC_LINK_TMO_OPT, unf_cm_adm_link_time_out_opt }, + { FC_DRV_LOG_OPT, unf_cm_adm_log_level_opt }, +}; + +static void unf_destroy_dirty_rport(struct unf_lport_s *v_lport, + int v_show_only) +{ + unsigned int dirty_rport = 0; + + UNF_REFERNCE_VAR(dirty_rport); + + /* for whole L_Port */ + if (v_lport->dirty_flag & UNF_LPORT_DIRTY_FLAG_RPORT_POOL_DIRTY) { + dirty_rport = v_lport->rport_pool.rport_pool_count; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) has %u dirty RPort(s)", + v_lport->port_id, dirty_rport); + + /* free R_Port pool memory & bitmap */ + if (v_show_only == UNF_FALSE) { + vfree(v_lport->rport_pool.rport_pool_add); + v_lport->rport_pool.rport_pool_add = NULL; + vfree(v_lport->rport_pool.pul_rpi_bitmap); + v_lport->rport_pool.pul_rpi_bitmap = NULL; + } + } + + UNF_REFERNCE_VAR(dirty_rport); +} + +void unf_show_dirty_port(int v_show_only, unsigned int *v_ditry_port_num) +{ + struct list_head *node = NULL; + struct list_head *node_next = NULL; + struct unf_lport_s *lport = NULL; + unsigned long flags = 0; + unsigned int port_num = 0; + + UNF_CHECK_VALID(0x2200, UNF_TRUE, NULL != v_ditry_port_num, return); + + /* for each dirty L_Port from global L_Port list */ + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); + list_for_each_safe(node, node_next, &global_lport_mgr.list_dirty_head) { + lport = list_entry(node, struct unf_lport_s, entry_lport); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) has dirty data(0x%x)", + lport->port_id, lport->dirty_flag); + + /* Destroy dirty L_Port's exchange(s) & R_Port(s) */ + unf_destroy_dirty_xchg(lport, v_show_only); + unf_destroy_dirty_rport(lport, v_show_only); + + /* Delete (dirty L_Port) list entry if necessary */ + if (v_show_only == UNF_FALSE) { + list_del_init(node); + vfree(lport); + } + + port_num++; + } + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, + flags); + + *v_ditry_port_num = port_num; +} + +int unf_send_event(unsigned int port_id, + unsigned int syn_flag, + void *argc_in, + void *argc_out, + int (*p_func)(void *argc_in, void *argc_out)) +{ + struct unf_lport_s *lport = NULL; + struct unf_cm_event_report *event = NULL; + int ret = 0; + + lport = unf_find_lport_by_port_id(port_id); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_INFO, + "Cannot find LPort(0x%x).", port_id); + + return UNF_RETURN_ERROR; + } + + if (unf_lport_refinc(lport) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "LPort(0x%x) is removing, no need process.", + lport->port_id); + + return UNF_RETURN_ERROR; + } + if (unlikely((!lport->event_mgr.pfn_unf_get_free_event) || + (!lport->event_mgr.pfn_unf_post_event) || + (!lport->event_mgr.pfn_unf_release_event))) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Event function is NULL."); + + unf_lport_ref_dec_to_destroy(lport); + + return UNF_RETURN_ERROR; + } + + if (lport->b_port_removing == UNF_TRUE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "LPort(0x%x) is removing, no need process.", + lport->port_id); + + unf_lport_ref_dec_to_destroy(lport); + + return UNF_RETURN_ERROR; + } + + event = lport->event_mgr.pfn_unf_get_free_event((void *)lport); + if (!event) { + unf_lport_ref_dec_to_destroy(lport); + + return UNF_RETURN_ERROR; + } + + init_completion(&event->event_comp); + event->lport = lport; + event->event_asy_flag = syn_flag; + event->pfn_unf_event_task = p_func; + event->para_in = argc_in; + event->para_out = argc_out; + lport->event_mgr.pfn_unf_post_event(lport, event); + + if (event->event_asy_flag) { + /* You must wait for the other party to return. Otherwise, + *the linked list may be in disorder. + */ + wait_for_completion(&event->event_comp); + ret = (int)event->result; + lport->event_mgr.pfn_unf_release_event(lport, event); + } else { + ret = RETURN_OK; + } + + unf_lport_ref_dec_to_destroy(lport); + return ret; +} + +void unf_lport_update_topo(struct unf_lport_s *v_lport, + enum unf_act_topo_e v_enactive_topo) +{ + unsigned long flag = 0; + + UNF_CHECK_VALID(0x2210, UNF_TRUE, NULL != v_lport, return); + + if ((v_enactive_topo > UNF_ACT_TOP_UNKNOWN) || + (v_enactive_topo < UNF_ACT_TOP_PUBLIC_LOOP)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) set invalid topology(0x%x) with current value(0x%x)", + v_lport->nport_id, v_enactive_topo, + v_lport->en_act_topo); + + return; + } + + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + v_lport->en_act_topo = v_enactive_topo; + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); +} + +void unf_set_lport_removing(struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x2216, UNF_TRUE, (v_lport), return); + + v_lport->fc_port = NULL; + v_lport->b_port_removing = UNF_TRUE; + v_lport->destroy_step = UNF_LPORT_DESTROY_STEP_0_SET_REMOVING; +} + +unsigned int unf_release_local_port(void *v_lport) +{ + struct unf_lport_s *lport = v_lport; + struct completion local_port_free_completion = + COMPLETION_INITIALIZER(local_port_free_completion); + + UNF_CHECK_VALID(0x2217, UNF_TRUE, (lport), + return UNF_RETURN_ERROR); + + lport->lport_free_completion = &local_port_free_completion; + unf_set_lport_removing(lport); + unf_lport_ref_dec(lport); + wait_for_completion(lport->lport_free_completion); + /* for dirty case */ + if (lport->dirty_flag == 0) + vfree(lport); + + return RETURN_OK; +} + +static void unf_free_all_esgl_pages(struct unf_lport_s *v_lport) +{ + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flag = 0; + unsigned int alloc_idx; + + UNF_CHECK_VALID(0x2218, UNF_TRUE, (v_lport), return); + spin_lock_irqsave(&v_lport->esgl_pool.esgl_pool_lock, flag); + list_for_each_safe(node, next_node, + &v_lport->esgl_pool.list_esgl_pool) { + list_del(node); + } + + spin_unlock_irqrestore(&v_lport->esgl_pool.esgl_pool_lock, flag); + + if (v_lport->esgl_pool.esgl_buf_list.buflist) { + for (alloc_idx = 0; + alloc_idx < v_lport->esgl_pool.esgl_buf_list.buf_num; + alloc_idx++) { + if (v_lport->esgl_pool.esgl_buf_list.buflist[alloc_idx].vaddr) { + dma_free_coherent(&v_lport->low_level_func.dev->dev, + v_lport->esgl_pool.esgl_buf_list.buf_size, + v_lport->esgl_pool.esgl_buf_list.buflist[alloc_idx].vaddr, + v_lport->esgl_pool.esgl_buf_list.buflist[alloc_idx].paddr); + v_lport->esgl_pool.esgl_buf_list.buflist[alloc_idx].vaddr = NULL; + } + } + kfree(v_lport->esgl_pool.esgl_buf_list.buflist); + v_lport->esgl_pool.esgl_buf_list.buflist = NULL; + } +} + +static unsigned int unf_init_esgl_pool(struct unf_lport_s *v_lport) +{ + struct unf_esgl_s *esgl = NULL; + unsigned int ret = RETURN_OK; + unsigned int index = 0; + unsigned int buf_total_size; + unsigned int buf_num; + unsigned int alloc_idx; + unsigned int cur_buf_idx = 0; + unsigned int cur_buf_offset = 0; + unsigned int buf_cnt_perhugebuf; + + UNF_CHECK_VALID(0x2219, UNF_TRUE, NULL != v_lport, + return UNF_RETURN_ERROR); + + v_lport->esgl_pool.esgl_pool_count = + v_lport->low_level_func.lport_cfg_items.max_io; + spin_lock_init(&v_lport->esgl_pool.esgl_pool_lock); + INIT_LIST_HEAD(&v_lport->esgl_pool.list_esgl_pool); + + v_lport->esgl_pool.esgl_pool_addr = + vmalloc((size_t)((v_lport->esgl_pool.esgl_pool_count) * + sizeof(struct unf_esgl_s))); + if (!v_lport->esgl_pool.esgl_pool_addr) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_ERR, + "LPort(0x%x) cannot allocate ESGL Pool.", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + esgl = (struct unf_esgl_s *)v_lport->esgl_pool.esgl_pool_addr; + memset(esgl, 0, ((v_lport->esgl_pool.esgl_pool_count) * + sizeof(struct unf_esgl_s))); + + buf_total_size = + (unsigned int)(PAGE_SIZE * v_lport->esgl_pool.esgl_pool_count); + + v_lport->esgl_pool.esgl_buf_list.buf_size = + buf_total_size > BUF_LIST_PAGE_SIZE ? BUF_LIST_PAGE_SIZE : + buf_total_size; + buf_cnt_perhugebuf = + v_lport->esgl_pool.esgl_buf_list.buf_size / PAGE_SIZE; + buf_num = v_lport->esgl_pool.esgl_pool_count % + buf_cnt_perhugebuf ? v_lport->esgl_pool.esgl_pool_count / + buf_cnt_perhugebuf + 1 : v_lport->esgl_pool.esgl_pool_count / + buf_cnt_perhugebuf; + v_lport->esgl_pool.esgl_buf_list.buflist = + (struct buff_list_s *) + kmalloc(buf_num * sizeof(struct buff_list_s), GFP_KERNEL); + v_lport->esgl_pool.esgl_buf_list.buf_num = buf_num; + + if (!v_lport->esgl_pool.esgl_buf_list.buflist) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Allocate Esgl pool buf list failed out of memory"); + goto free_buff; + } + memset(v_lport->esgl_pool.esgl_buf_list.buflist, 0, + buf_num * sizeof(struct buff_list_s)); + + for (alloc_idx = 0; alloc_idx < buf_num; alloc_idx++) { + v_lport->esgl_pool.esgl_buf_list.buflist[alloc_idx].vaddr = + dma_alloc_coherent( + &v_lport->low_level_func.dev->dev, + v_lport->esgl_pool.esgl_buf_list.buf_size, + &v_lport->esgl_pool.esgl_buf_list.buflist[alloc_idx].paddr, + GFP_KERNEL); + if (!v_lport->esgl_pool.esgl_buf_list.buflist[alloc_idx].vaddr) + goto free_buff; + + memset(v_lport->esgl_pool.esgl_buf_list.buflist[alloc_idx].vaddr, + 0, v_lport->esgl_pool.esgl_buf_list.buf_size); + } + + /* allocates the Esgl page, and the DMA uses the */ + for (index = 0; index < v_lport->esgl_pool.esgl_pool_count; index++) { + if ((index != 0) && !(index % buf_cnt_perhugebuf)) + cur_buf_idx++; + + cur_buf_offset = + (unsigned int) + (PAGE_SIZE * (index % buf_cnt_perhugebuf)); + esgl->page.page_address = + (unsigned long long)v_lport->esgl_pool.esgl_buf_list.buflist[cur_buf_idx].vaddr + + cur_buf_offset; + esgl->page.page_size = PAGE_SIZE; + esgl->page.esgl_phyaddr = + v_lport->esgl_pool.esgl_buf_list.buflist[cur_buf_idx].paddr + + cur_buf_offset; + list_add_tail(&esgl->entry_esgl, + &v_lport->esgl_pool.list_esgl_pool); + esgl++; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[EVENT]Allocate bufnum:%u, buf_total_size:%u", buf_num, + buf_total_size); + return ret; +free_buff: + unf_free_all_esgl_pages(v_lport); + vfree(v_lport->esgl_pool.esgl_pool_addr); + + return UNF_RETURN_ERROR; +} + +static void unf_free_esgl_pool(struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x2220, UNF_TRUE, (v_lport), return); + + unf_free_all_esgl_pages(v_lport); + v_lport->esgl_pool.esgl_pool_count = 0; + + if (v_lport->esgl_pool.esgl_pool_addr) { + vfree(v_lport->esgl_pool.esgl_pool_addr); + v_lport->esgl_pool.esgl_pool_addr = NULL; + } + + v_lport->destroy_step = UNF_LPORT_DESTROY_STEP_5_DESTROY_ESGL_POOL; +} + +struct unf_lport_s *unf_find_lport_by_port_id(unsigned int v_port_id) +{ + struct unf_lport_s *lport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flags = 0; + unsigned int port_id = v_port_id & (~PORTID_VPINDEX_MASK); + unsigned short vport_index = (v_port_id & PORTID_VPINDEX_MASK) >> + PORTID_VPINDEX_SHIT; + + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); + + list_for_each_safe(node, next_node, + &global_lport_mgr.list_lport_list_head) { + lport = list_entry(node, struct unf_lport_s, entry_lport); + if ((port_id == lport->port_id) && + (lport->b_port_removing != UNF_TRUE)) { + spin_unlock_irqrestore( + &global_lport_mgr.global_lport_list_lock, + flags); + return unf_cm_lookup_vport_by_vp_index(lport, + vport_index); + } + } + + list_for_each_safe(node, next_node, + &global_lport_mgr.list_intergrad_head) { + lport = list_entry(node, struct unf_lport_s, entry_lport); + if ((port_id == lport->port_id) && + (lport->b_port_removing != UNF_TRUE)) { + spin_unlock_irqrestore( + &global_lport_mgr.global_lport_list_lock, + flags); + return unf_cm_lookup_vport_by_vp_index(lport, + vport_index); + } + } + + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags); + + return NULL; +} + +unsigned int unf_is_vport_valid(struct unf_lport_s *v_lport, + struct unf_lport_s *v_vport) +{ + struct unf_lport_s *lport = NULL; + struct unf_vport_pool_s *vport_pool = NULL; + struct unf_lport_s *vport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x1977, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x1977, UNF_TRUE, v_vport, return UNF_RETURN_ERROR); + + lport = v_lport; + vport_pool = lport->vport_pool; + if (unlikely(!vport_pool)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) vport pool is NULL", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); + list_for_each_safe(node, next_node, &lport->list_vports_head) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + + if (vport == v_vport && vport->b_port_removing != UNF_TRUE) { + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, + flag); + + return RETURN_OK; + } + } + + list_for_each_safe(node, next_node, &lport->list_intergrad_vports) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + + if (vport == v_vport && vport->b_port_removing != UNF_TRUE) { + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, + flag); + + return RETURN_OK; + } + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); + + return UNF_RETURN_ERROR; +} + +unsigned int unf_is_lport_valid(struct unf_lport_s *v_lport) +{ + struct unf_lport_s *lport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flags = 0; + + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); + + list_for_each_safe(node, next_node, + &global_lport_mgr.list_lport_list_head) { + lport = list_entry(node, struct unf_lport_s, entry_lport); + + if ((v_lport == lport) && + (lport->b_port_removing != UNF_TRUE)) { + spin_unlock_irqrestore( + &global_lport_mgr.global_lport_list_lock, + flags); + return RETURN_OK; + } + + if (unf_is_vport_valid(lport, v_lport) == RETURN_OK) { + spin_unlock_irqrestore( + &global_lport_mgr.global_lport_list_lock, + flags); + + return RETURN_OK; + } + } + + list_for_each_safe(node, next_node, + &global_lport_mgr.list_intergrad_head) { + lport = list_entry(node, struct unf_lport_s, entry_lport); + + if ((v_lport == lport) && + (lport->b_port_removing != UNF_TRUE)) { + spin_unlock_irqrestore( + &global_lport_mgr.global_lport_list_lock, + flags); + return RETURN_OK; + } + + if (unf_is_vport_valid(lport, v_lport) == RETURN_OK) { + spin_unlock_irqrestore( + &global_lport_mgr.global_lport_list_lock, + flags); + + return RETURN_OK; + } + } + + list_for_each_safe(node, next_node, + &global_lport_mgr.list_destroy_head) { + lport = list_entry(node, struct unf_lport_s, entry_lport); + + if ((v_lport == lport) && + (lport->b_port_removing != UNF_TRUE)) { + spin_unlock_irqrestore( + &global_lport_mgr.global_lport_list_lock, + flags); + return RETURN_OK; + } + + if (unf_is_vport_valid(lport, v_lport) == RETURN_OK) { + spin_unlock_irqrestore( + &global_lport_mgr.global_lport_list_lock, + flags); + + return RETURN_OK; + } + } + + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, + flags); + return UNF_RETURN_ERROR; +} + +static void unf_clean_link_down_io(struct unf_lport_s *v_lport, + int v_clean_flag) +{ + /* Clean L_Port/V_Port Link Down I/O: Set Abort Tag */ + UNF_CHECK_VALID(0x2225, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x2685, UNF_TRUE, + v_lport->xchg_mgr_temp.pfn_unf_xchg_abort_all_io, + return); + + v_lport->xchg_mgr_temp.pfn_unf_xchg_abort_all_io(v_lport, + UNF_XCHG_TYPE_INI, v_clean_flag); + v_lport->xchg_mgr_temp.pfn_unf_xchg_abort_all_io(v_lport, + UNF_XCHG_TYPE_SFS, v_clean_flag); +} + +unsigned int unf_fc_port_link_event(void *v_lport, unsigned int v_events, + void *v_input) +{ + struct unf_lport_s *lport = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int index = 0; + + if (unlikely(!v_lport)) + return UNF_RETURN_ERROR; + + lport = (struct unf_lport_s *)v_lport; + + ret = unf_lport_refinc(lport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) is removing and do nothing", + lport->port_id); + return RETURN_OK; + } + + /* process port event */ + while (index < (sizeof(lport_action) / + sizeof(struct unf_port_action_s))) { + if (v_events == lport_action[index].action) { + ret = lport_action[index].fn_unf_action(lport, v_input); + + unf_lport_ref_dec_to_destroy(lport); + + return ret; + } + index++; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) receive unknown event(0x%x)", + lport->port_id, v_events); + + unf_lport_ref_dec_to_destroy(lport); + + return ret; +} + +void unf_port_mgmt_init(void) +{ + memset(&global_lport_mgr, 0, sizeof(struct unf_global_lport_s)); + + INIT_LIST_HEAD(&global_lport_mgr.list_lport_list_head); + + INIT_LIST_HEAD(&global_lport_mgr.list_intergrad_head); + + INIT_LIST_HEAD(&global_lport_mgr.list_destroy_head); + + INIT_LIST_HEAD(&global_lport_mgr.list_dirty_head); + + spin_lock_init(&global_lport_mgr.global_lport_list_lock); + + UNF_SET_NOMAL_MODE(global_lport_mgr.dft_mode); + + global_lport_mgr.b_start_work = UNF_TRUE; +} + +void unf_port_mgmt_deinit(void) +{ + if (global_lport_mgr.lport_sum != 0) + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]There are %u port pool memory giveaway", + global_lport_mgr.lport_sum); + + memset(&global_lport_mgr, 0, sizeof(struct unf_global_lport_s)); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Common port manager exit succeed"); +} + +static void unf_port_register(struct unf_lport_s *v_lport) +{ + unsigned long flags = 0; + + UNF_CHECK_VALID(0x2230, UNF_TRUE, (v_lport), return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "Register LPort(0x%p), port ID(0x%x).", + v_lport, v_lport->port_id); + + /* Add to the global management linked list header */ + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); + list_add_tail(&v_lport->entry_lport, + &global_lport_mgr.list_lport_list_head); + global_lport_mgr.lport_sum++; + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags); +} + +static void unf_port_unregister(struct unf_lport_s *v_lport) +{ + unsigned long flags = 0; + + UNF_CHECK_VALID(0x2703, UNF_TRUE, (v_lport), return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "Unregister LPort(0x%p), port ID(0x%x).", + v_lport, v_lport->port_id); + + /* Remove from the global management linked list header */ + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); + list_del(&v_lport->entry_lport); + global_lport_mgr.lport_sum--; + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags); +} + +static int unf_port_switch(struct unf_lport_s *v_lport, + unsigned int v_switch_flag) +{ + struct unf_lport_s *lport = v_lport; + int ret = UNF_RETURN_ERROR; + int switch_flag = UNF_FALSE; + + UNF_CHECK_VALID(0x2261, UNF_TRUE, lport, return UNF_RETURN_ERROR); + + if (!lport->low_level_func.port_mgr_op.pfn_ll_port_config_set) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_WARN, + "[warn]Port(0x%x)'s config(switch) function is NULL", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + switch_flag = v_switch_flag ? UNF_TRUE : UNF_FALSE; + ret = (int)lport->low_level_func.port_mgr_op.pfn_ll_port_config_set( + lport->fc_port, + UNF_PORT_CFG_SET_PORT_SWITCH, (void *)&switch_flag); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_WARN, + "[warn]Port(0x%x) switch %s failed", + lport->port_id, + v_switch_flag ? "On" : "Off"); + + return UNF_RETURN_ERROR; + } + + lport->b_switch_state = (enum int_e)switch_flag; + + return RETURN_OK; +} + +int unf_port_start_work(struct unf_lport_s *v_lport) +{ + unsigned long flag = 0; + struct unf_fw_version_s fw_version = { 0 }; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x2231, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + if (v_lport->en_start_work_state != UNF_START_WORK_STOP) { + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + return RETURN_OK; + } + v_lport->en_start_work_state = UNF_START_WORK_COMPLETE; + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + if (!v_lport->low_level_func.port_mgr_op.pfn_ll_port_diagnose) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Port(0x%x)'s corresponding function is NULL.", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + fw_version.message_type = UNF_DEBUG_TYPE_MESSAGE; + + ret = v_lport->low_level_func.port_mgr_op.pfn_ll_port_diagnose( + (void *)v_lport->fc_port, + UNF_PORT_DIAG_PORT_DETAIL, &fw_version); + if (ret != RETURN_OK) + v_lport->fw_version[0] = '\0'; + else + memcpy(v_lport->fw_version, fw_version.fw_version, + HIFC_VER_LEN); + + unf_cm_get_save_info(v_lport); + /* switch sfp to start work */ + (void)unf_port_switch(v_lport, UNF_TRUE); + + return RETURN_OK; +} + +static unsigned int unf_lport_init_lw_fun_op( + struct unf_lport_s *v_lport, + struct unf_low_level_function_op_s *low_level_op) +{ + UNF_CHECK_VALID(0x2235, UNF_TRUE, (v_lport), return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x2236, UNF_TRUE, (low_level_op), + return UNF_RETURN_ERROR); + + v_lport->port_id = low_level_op->lport_cfg_items.port_id; + v_lport->port_name = low_level_op->sys_port_name; + v_lport->node_name = low_level_op->sys_node_name; + v_lport->options = low_level_op->lport_cfg_items.port_mode; + v_lport->en_act_topo = UNF_ACT_TOP_UNKNOWN; + + memcpy(&v_lport->low_level_func, low_level_op, + sizeof(struct unf_low_level_function_op_s)); + + return RETURN_OK; +} + +void unf_lport_release_lw_fun_op(struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x2237, UNF_TRUE, v_lport, return); + + memset(&v_lport->low_level_func, 0, + sizeof(struct unf_low_level_function_op_s)); + + v_lport->destroy_step = UNF_LPORT_DESTROY_STEP_13_DESTROY_LW_INTERFACE; +} + +struct unf_lport_s *unf_find_lport_by_scsi_host_id(unsigned int scsi_host_id) +{ + struct list_head *node = NULL, *next_node = NULL; + struct list_head *vp_node = NULL, *next_vp_node = NULL; + struct unf_lport_s *lport = NULL; + struct unf_lport_s *vport = NULL; + unsigned long flags = 0; + unsigned long vpool_flags = 0; + + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); + list_for_each_safe(node, next_node, + &global_lport_mgr.list_lport_list_head) { + lport = list_entry(node, struct unf_lport_s, entry_lport); + + if (scsi_host_id == + UNF_GET_SCSI_HOST_ID((lport->host_info.p_scsi_host))) { + spin_unlock_irqrestore( + &global_lport_mgr.global_lport_list_lock, + flags); + + return lport; + } + + /* support NPIV */ + if (lport->vport_pool) { + spin_lock_irqsave(&lport->vport_pool->vport_pool_lock, + vpool_flags); + list_for_each_safe(vp_node, next_vp_node, + &lport->list_vports_head) { + vport = list_entry(vp_node, struct unf_lport_s, + entry_vport); + + if (scsi_host_id == + UNF_GET_SCSI_HOST_ID(vport->host_info.p_scsi_host)) { + spin_unlock_irqrestore( + &lport->vport_pool->vport_pool_lock, + vpool_flags); + spin_unlock_irqrestore( + &global_lport_mgr.global_lport_list_lock, + flags); + + return vport; + } + } + spin_unlock_irqrestore( + &lport->vport_pool->vport_pool_lock, vpool_flags); + } + } + + list_for_each_safe(node, next_node, + &global_lport_mgr.list_intergrad_head) { + lport = list_entry(node, struct unf_lport_s, entry_lport); + + if (scsi_host_id == + UNF_GET_SCSI_HOST_ID(lport->host_info.p_scsi_host)) { + spin_unlock_irqrestore( + &global_lport_mgr.global_lport_list_lock, + flags); + + return lport; + } + + /* support NPIV */ + if (lport->vport_pool) { + spin_lock_irqsave(&lport->vport_pool->vport_pool_lock, + vpool_flags); + list_for_each_safe(vp_node, next_vp_node, + &lport->list_vports_head) { + vport = list_entry(vp_node, struct unf_lport_s, + entry_vport); + + if (scsi_host_id == + UNF_GET_SCSI_HOST_ID(vport->host_info.p_scsi_host)) { + spin_unlock_irqrestore( + &lport->vport_pool->vport_pool_lock, + vpool_flags); + spin_unlock_irqrestore( + &global_lport_mgr.global_lport_list_lock, + flags); + + return vport; + } + } + spin_unlock_irqrestore( + &lport->vport_pool->vport_pool_lock, vpool_flags); + } + } + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Can not find port by scsi_host_id(0x%x), may be removing", + scsi_host_id); + + return NULL; +} + +unsigned int unf_init_scsi_id_table(struct unf_lport_s *v_lport) +{ + struct unf_rport_scsi_id_image_s *rport_scsi_id_image = NULL; + struct unf_wwpn_rport_info_s *wwpn_port_info = NULL; + unsigned int idx; + + UNF_CHECK_VALID(0x2238, UNF_TRUE, (v_lport), + return UNF_RETURN_ERROR); + + rport_scsi_id_image = &v_lport->rport_scsi_table; + rport_scsi_id_image->max_scsi_id = UNF_MAX_SCSI_ID; + + /* If the number of remote connections supported by the L_Port is 0, + * an exception occurs + */ + if (rport_scsi_id_image->max_scsi_id == 0) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x), supported maximum login is zero.", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + rport_scsi_id_image->wwn_rport_info_table = + vmalloc(rport_scsi_id_image->max_scsi_id * + sizeof(struct unf_wwpn_rport_info_s)); + if (!rport_scsi_id_image->wwn_rport_info_table) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) can't allocate SCSI ID Table(0x%x).", + v_lport->port_id, rport_scsi_id_image->max_scsi_id); + + return UNF_RETURN_ERROR; + } + memset(rport_scsi_id_image->wwn_rport_info_table, 0, + rport_scsi_id_image->max_scsi_id * + sizeof(struct unf_wwpn_rport_info_s)); + + wwpn_port_info = rport_scsi_id_image->wwn_rport_info_table; + + for (idx = 0; idx < rport_scsi_id_image->max_scsi_id; idx++) { + INIT_DELAYED_WORK(&wwpn_port_info->loss_tmo_work, + unf_sesion_loss_timeout); + INIT_LIST_HEAD(&wwpn_port_info->fc_lun_list); + wwpn_port_info->lport = v_lport; + wwpn_port_info->target_id = INVALID_VALUE32; + wwpn_port_info++; + } + + spin_lock_init(&rport_scsi_id_image->scsi_image_table_lock); + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Port(0x%x) supported maximum login is %d.", + v_lport->port_id, rport_scsi_id_image->max_scsi_id); + + return RETURN_OK; +} + +void unf_destroy_scsi_id_table(struct unf_lport_s *v_lport) +{ + struct unf_rport_scsi_id_image_s *rport_scsi_id_image = NULL; + struct unf_wwpn_rport_info_s *wwpn_rport_info = NULL; + unsigned int i = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x2239, UNF_TRUE, (v_lport), return); + + rport_scsi_id_image = &v_lport->rport_scsi_table; + if (rport_scsi_id_image->wwn_rport_info_table) { + for (i = 0; i < UNF_MAX_SCSI_ID; i++) { + wwpn_rport_info = + &rport_scsi_id_image->wwn_rport_info_table[i]; + UNF_DELAYED_WORK_SYNC(ret, v_lport->port_id, + &wwpn_rport_info->loss_tmo_work, + "loss tmo Timer work"); + if (wwpn_rport_info->dfx_counter) + vfree(wwpn_rport_info->dfx_counter); + } + + /* just for pc_lint */ + if (ret) + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_INFO, + "Port(0x%x) cancel loss tmo work success", + v_lport->port_id); + + vfree(rport_scsi_id_image->wwn_rport_info_table); + rport_scsi_id_image->wwn_rport_info_table = NULL; + } + + rport_scsi_id_image->max_scsi_id = 0; + v_lport->destroy_step = UNF_LPORT_DESTROY_STEP_10_DESTROY_SCSI_TABLE; +} + +static unsigned int unf_lport_init( + struct unf_lport_s *v_lport, + void *private_data, + struct unf_low_level_function_op_s *low_level_op) +{ + unsigned int ret = RETURN_OK; + int ret_value = RETURN_ERROR_S32; + char work_queue_name[16]; + + unf_init_portparms(v_lport); + + /* Associating LPort with FCPort */ + v_lport->fc_port = private_data; + + /* VpIndx=0 is reserved for Lport, and rootLport points to its own */ + v_lport->vp_index = 0; + v_lport->root_lport = v_lport; + v_lport->chip_info = NULL; + + /* Initialize the units related to L_Port and lw func */ + ret = unf_lport_init_lw_fun_op(v_lport, low_level_op); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "LPort(0x%x) initialize lowlevel function unsuccessful.", + v_lport->port_id); + + return ret; + } + + /* Init Linkevent workqueue */ + ret_value = snprintf(work_queue_name, sizeof(work_queue_name), + "%x_lkq", (unsigned int)v_lport->port_id); + UNF_FUNCTION_RETURN_CHECK(ret_value, (int)sizeof(work_queue_name)); + + v_lport->link_event_wq = create_singlethread_workqueue(work_queue_name); + if (!v_lport->link_event_wq) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[err]Port(0x%x) creat link event work queue failed", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + ret_value = snprintf(work_queue_name, sizeof(work_queue_name), + "%x_xchgwq", (unsigned int)v_lport->port_id); + UNF_FUNCTION_RETURN_CHECK(ret_value, (int)sizeof(work_queue_name)); + + v_lport->xchg_wq = create_workqueue(work_queue_name); + if (!v_lport->xchg_wq) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[err]Port(0x%x) creat Exchg work queue failed", + v_lport->port_id); + flush_workqueue(v_lport->link_event_wq); + destroy_workqueue(v_lport->link_event_wq); + v_lport->link_event_wq = NULL; + return UNF_RETURN_ERROR; + } + /* scsi table (R_Port) required for initializing INI + * Initialize the scsi id Table table to manage the + * mapping between SCSI ID, WWN, and Rport. + */ + ret = unf_init_scsi_id_table(v_lport); + if (ret != RETURN_OK) { + flush_workqueue(v_lport->link_event_wq); + destroy_workqueue(v_lport->link_event_wq); + v_lport->link_event_wq = NULL; + + flush_workqueue(v_lport->xchg_wq); + destroy_workqueue(v_lport->xchg_wq); + v_lport->xchg_wq = NULL; + return ret; + } + + /* Initialize the EXCH resource */ + ret = unf_alloc_xchg_resource(v_lport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "LPort(0x%x) can't allocate exchange resource.", + v_lport->port_id); + + flush_workqueue(v_lport->link_event_wq); + destroy_workqueue(v_lport->link_event_wq); + v_lport->link_event_wq = NULL; + + flush_workqueue(v_lport->xchg_wq); + destroy_workqueue(v_lport->xchg_wq); + v_lport->xchg_wq = NULL; + unf_destroy_scsi_id_table(v_lport); + + return ret; + } + + /* Initialize the ESGL resource pool used by Lport */ + ret = unf_init_esgl_pool(v_lport); + if (ret != RETURN_OK) { + flush_workqueue(v_lport->link_event_wq); + destroy_workqueue(v_lport->link_event_wq); + v_lport->link_event_wq = NULL; + + flush_workqueue(v_lport->xchg_wq); + destroy_workqueue(v_lport->xchg_wq); + v_lport->xchg_wq = NULL; + unf_free_all_xchg_mgr(v_lport); + unf_destroy_scsi_id_table(v_lport); + + return ret; + } + /* Initialize the disc manager under Lport */ + ret = unf_init_disc_mgr(v_lport); + if (ret != RETURN_OK) { + flush_workqueue(v_lport->link_event_wq); + destroy_workqueue(v_lport->link_event_wq); + v_lport->link_event_wq = NULL; + + flush_workqueue(v_lport->xchg_wq); + destroy_workqueue(v_lport->xchg_wq); + v_lport->xchg_wq = NULL; + unf_free_esgl_pool(v_lport); + unf_free_all_xchg_mgr(v_lport); + unf_destroy_scsi_id_table(v_lport); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "LPort(0x%x) initialize discover manager unsuccessful.", + v_lport->port_id); + + return ret; + } + + /* Initialize the LPort manager */ + ret = unf_init_lport_mgr_temp(v_lport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "LPort(0x%x) initialize RPort manager unsuccessful.", + v_lport->port_id); + + goto RELEASE_LPORT; + } + + /* Initialize the EXCH manager */ + ret = unf_init_xchg_mgr_temp(v_lport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "LPort(0x%x) initialize exchange manager unsuccessful.", + v_lport->port_id); + + goto RELEASE_LPORT; + } + /* Initialize the resources required by the event processing center */ + ret = unf_init_event_center(v_lport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "LPort(0x%x) initialize event center unsuccessful.", + v_lport->port_id); + + goto RELEASE_LPORT; + } + /* Initialize the initialization status of Lport */ + unf_set_lport_state(v_lport, UNF_LPORT_ST_INITIAL); + + /* Initialize the Lport route test case */ + ret = unf_init_lport_route(v_lport); + if (ret != RETURN_OK) { + flush_workqueue(v_lport->link_event_wq); + destroy_workqueue(v_lport->link_event_wq); + v_lport->link_event_wq = NULL; + + flush_workqueue(v_lport->xchg_wq); + destroy_workqueue(v_lport->xchg_wq); + v_lport->xchg_wq = NULL; + (void)unf_event_center_destroy(v_lport); + unf_disc_mgr_destroy(v_lport); + unf_free_esgl_pool(v_lport); + unf_free_all_xchg_mgr(v_lport); + unf_destroy_scsi_id_table(v_lport); + + return ret; + } + + /* Thesupports the initialization stepof the NPIV */ + ret = unf_init_vport_pool(v_lport); + if (ret != RETURN_OK) { + flush_workqueue(v_lport->link_event_wq); + destroy_workqueue(v_lport->link_event_wq); + v_lport->link_event_wq = NULL; + + flush_workqueue(v_lport->xchg_wq); + destroy_workqueue(v_lport->xchg_wq); + v_lport->xchg_wq = NULL; + + unf_destroy_lport_route(v_lport); + (void)unf_event_center_destroy(v_lport); + unf_disc_mgr_destroy(v_lport); + unf_free_esgl_pool(v_lport); + unf_free_all_xchg_mgr(v_lport); + unf_destroy_scsi_id_table(v_lport); + + return ret; + } + + /* qualifier rport callback */ + v_lport->pfn_unf_qualify_rport = unf_rport_set_qualifier_key_reuse; + v_lport->pfn_unf_tmf_abnormal_recovery = + unf_tmf_timeout_recovery_special; + return RETURN_OK; +RELEASE_LPORT: + flush_workqueue(v_lport->link_event_wq); + destroy_workqueue(v_lport->link_event_wq); + v_lport->link_event_wq = NULL; + + flush_workqueue(v_lport->xchg_wq); + destroy_workqueue(v_lport->xchg_wq); + v_lport->xchg_wq = NULL; + + unf_disc_mgr_destroy(v_lport); + unf_free_esgl_pool(v_lport); + unf_free_all_xchg_mgr(v_lport); + unf_destroy_scsi_id_table(v_lport); + return ret; +} + +static void unf_destroy_card_thread(struct unf_lport_s *v_lport) +{ + struct unf_event_mgr *event_mgr = NULL; + struct unf_chip_manage_info_s *chip_info = NULL; + struct list_head *list = NULL; + struct list_head *list_tmp = NULL; + struct unf_cm_event_report *event_node = NULL; + unsigned long event_lock_flag = 0; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x2249, UNF_TRUE, (v_lport), return); + + /* If the thread cannot be found, apply for a new thread. */ + chip_info = v_lport->chip_info; + if (!chip_info) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Port(0x%x) has no event thread.", v_lport->port_id); + return; + } + event_mgr = &v_lport->event_mgr; + + spin_lock_irqsave(&chip_info->chip_event_list_lock, flag); + if (!list_empty(&chip_info->list_head)) { + list_for_each_safe(list, list_tmp, &chip_info->list_head) { + event_node = list_entry(list, + struct unf_cm_event_report, + list_entry); + + /* The LPort under the global event node is null. */ + if (v_lport == event_node->lport) { + list_del_init(&event_node->list_entry); + if (event_node->event_asy_flag == + UNF_EVENT_SYN) { + event_node->result = UNF_RETURN_ERROR; + complete(&event_node->event_comp); + } + + spin_lock_irqsave(&event_mgr->port_event_lock, + event_lock_flag); + event_mgr->free_event_count++; + list_add_tail(&event_node->list_entry, + &event_mgr->list_free_event); + spin_unlock_irqrestore( + &event_mgr->port_event_lock, + event_lock_flag); + } + } + } + spin_unlock_irqrestore(&chip_info->chip_event_list_lock, flag); + + /* If the number of events introduced by the event thread is 0, + * it indicates that no interface is used. In this case, thread + * resources need to be consumed + */ + if (atomic_dec_and_test(&chip_info->ref_cnt)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Port(0x%x) destroy slot(%u) chip(0x%x) event thread succeed.", + v_lport->port_id, chip_info->slot_id, + chip_info->chip_id); + chip_info->b_thread_exit = UNF_TRUE; + wake_up_process(chip_info->data_thread); + kthread_stop(chip_info->data_thread); + chip_info->data_thread = NULL; + + spin_lock_irqsave(&card_thread_mgr.global_card_list_lock, flag); + list_del_init(&chip_info->list_chip_thread_entry); + card_thread_mgr.card_sum--; + spin_unlock_irqrestore(&card_thread_mgr.global_card_list_lock, + flag); + + vfree(chip_info); + } + + v_lport->chip_info = NULL; +} + +unsigned int unf_lport_deinit(struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x2246, UNF_TRUE, (v_lport), return UNF_RETURN_ERROR); + + /* If the card is unloaded normally, the thread is stopped once. + * The problem does not occur if you stop the thread again. + */ + unf_destroy_lport_route(v_lport); + + /* minus the reference count of the card event; + * the last port deletes the card thread + */ + unf_destroy_card_thread(v_lport); + flush_workqueue(v_lport->link_event_wq); + destroy_workqueue(v_lport->link_event_wq); + v_lport->link_event_wq = NULL; + + /* Release Event Processing Center */ + (void)unf_event_center_destroy(v_lport); + + /* Release the Vport resource pool */ + unf_free_vport_pool(v_lport); + + /* Destroying the Xchg Manager */ + unf_xchg_mgr_destroy(v_lport); + + /* Release Esgl pool */ + unf_free_esgl_pool(v_lport); + + /* reliability review :Disc should release after Xchg. + * Destroy the disc manager + */ + unf_disc_mgr_destroy(v_lport); + + /* Release Xchg Mg template */ + unf_release_xchg_mgr_temp(v_lport); + + /* Release the Lport Mg template */ + unf_release_lport_mgr_temp(v_lport); + + /* Destroy the ScsiId Table */ + unf_destroy_scsi_id_table(v_lport); + + flush_workqueue(v_lport->xchg_wq); + destroy_workqueue(v_lport->xchg_wq); + v_lport->xchg_wq = NULL; + + /* Deregister SCSI Host */ + unf_unregister_scsi_host(v_lport); + + /* Releasing the lw Interface Template */ + unf_lport_release_lw_fun_op(v_lport); + v_lport->fc_port = NULL; + return RETURN_OK; +} + +static int unf_card_event_process(void *v_arg) +{ + struct list_head *node = NULL; + struct unf_cm_event_report *event_node = NULL; + unsigned long flags = 0; + struct unf_chip_manage_info_s *chip_info = + (struct unf_chip_manage_info_s *)v_arg; + + UNF_REFERNCE_VAR(v_arg); + + set_user_nice(current, 4); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "Slot(%u) chip(0x%x) enter event thread.", + chip_info->slot_id, chip_info->chip_id); + + while (!kthread_should_stop()) { + if (chip_info->b_thread_exit == UNF_TRUE) + break; + + spin_lock_irqsave(&chip_info->chip_event_list_lock, flags); + if (list_empty(&chip_info->list_head) == UNF_TRUE) { + spin_unlock_irqrestore(&chip_info->chip_event_list_lock, + flags); + + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout((long)msecs_to_jiffies(1000)); + } else { + node = (&chip_info->list_head)->next; + list_del_init(node); + chip_info->list_num--; + event_node = list_entry(node, + struct unf_cm_event_report, + list_entry); + spin_unlock_irqrestore(&chip_info->chip_event_list_lock, + flags); + unf_handle_event(event_node); + } + } + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EVENT, UNF_MAJOR, + "Slot(%u) chip(0x%x) exit event thread.", + chip_info->slot_id, chip_info->chip_id); + + return RETURN_OK; +} + +static unsigned int unf_creat_chip_thread(struct unf_lport_s *v_lport) +{ + unsigned long flag = 0; + struct unf_chip_manage_info_s *chip_info = NULL; + + UNF_CHECK_VALID(0x2250, UNF_TRUE, (v_lport), return UNF_RETURN_ERROR); + + /* If the thread cannot be found, apply for a new thread. */ + chip_info = (struct unf_chip_manage_info_s *)vmalloc( + sizeof(struct unf_chip_manage_info_s)); + if (!chip_info) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) cannot allocate thread memory.", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + memset(chip_info, 0, sizeof(struct unf_chip_manage_info_s)); + + memcpy(&chip_info->chip_info, &v_lport->low_level_func.chip_info, + sizeof(struct unf_chip_info_s)); + chip_info->slot_id = + UNF_GET_BOARD_TYPE_AND_SLOT_ID_BY_PORTID(v_lport->port_id); + chip_info->chip_id = v_lport->low_level_func.chip_id; + chip_info->list_num = 0; + chip_info->sfp_9545_fault = UNF_FALSE; + chip_info->sfp_power_fault = UNF_FALSE; + atomic_set(&chip_info->ref_cnt, 1); + atomic_set(&chip_info->card_loop_test_flag, UNF_FALSE); + spin_lock_init(&chip_info->card_loop_back_state_lock); + INIT_LIST_HEAD(&chip_info->list_head); + spin_lock_init(&chip_info->chip_event_list_lock); + + chip_info->b_thread_exit = UNF_FALSE; + chip_info->data_thread = + kthread_create(unf_card_event_process, chip_info, + "%x_et", v_lport->port_id); + + if (IS_ERR(chip_info->data_thread) || + (!chip_info->data_thread)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Port(0x%x) creat event thread(0x%p) unsuccessful.", + v_lport->port_id, chip_info->data_thread); + + vfree(chip_info); + + return UNF_RETURN_ERROR; + } + + v_lport->chip_info = chip_info; + wake_up_process(chip_info->data_thread); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "Port(0x%x) creat slot(%u) chip(0x%x) event thread succeed.", + v_lport->port_id, chip_info->slot_id, chip_info->chip_id); + + spin_lock_irqsave(&card_thread_mgr.global_card_list_lock, flag); + list_add_tail(&chip_info->list_chip_thread_entry, + &card_thread_mgr.list_card_list_head); + card_thread_mgr.card_sum++; + spin_unlock_irqrestore(&card_thread_mgr.global_card_list_lock, flag); + + return RETURN_OK; +} + +static unsigned int unf_find_chip_thread(struct unf_lport_s *v_lport) +{ + unsigned long flag = 0; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + struct unf_chip_manage_info_s *chip_info = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + spin_lock_irqsave(&card_thread_mgr.global_card_list_lock, flag); + list_for_each_safe(node, next_node, + &card_thread_mgr.list_card_list_head) { + chip_info = list_entry(node, struct unf_chip_manage_info_s, + list_chip_thread_entry); + + if ((chip_info->chip_id == v_lport->low_level_func.chip_id) && + (chip_info->slot_id == UNF_GET_BOARD_TYPE_AND_SLOT_ID_BY_PORTID(v_lport->port_id))) { + atomic_inc(&chip_info->ref_cnt); + v_lport->chip_info = chip_info; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, + UNF_MAJOR, + "Port(0x%x) find card(%u) chip(0x%x) event thread succeed.", + v_lport->port_id, chip_info->slot_id, + chip_info->chip_id); + + spin_unlock_irqrestore( + &card_thread_mgr.global_card_list_lock, flag); + + return RETURN_OK; + } + } + spin_unlock_irqrestore(&card_thread_mgr.global_card_list_lock, flag); + + ret = unf_creat_chip_thread(v_lport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "LPort(0x%x) creat event thread unsuccessful. Destroy LPort.", + v_lport->port_id); + return UNF_RETURN_ERROR; + } else { + return RETURN_OK; + } +} + +static int unf_cm_get_mac_adr(void *argc_in, void *argc_out) +{ + struct unf_lport_s *lport = NULL; + struct unf_get_chip_info_argout *chp_info = NULL; + + UNF_CHECK_VALID(0x2398, UNF_TRUE, argc_in, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x2398, UNF_TRUE, argc_out, return UNF_RETURN_ERROR); + + lport = (struct unf_lport_s *)argc_in; + chp_info = (struct unf_get_chip_info_argout *)argc_out; + + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + " LPort is null."); + + return UNF_RETURN_ERROR; + } + + if (!lport->low_level_func.port_mgr_op.pfn_ll_port_config_get) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Port(0x%x)'s corresponding function is NULL.", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + if (lport->low_level_func.port_mgr_op.pfn_ll_port_config_get( + lport->fc_port, + UNF_PORT_CFG_GET_MAC_ADDR, chp_info) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Port(0x%x) get .", lport->port_id); + + return UNF_RETURN_ERROR; + } + return RETURN_OK; +} + +static unsigned int unf_build_lport_wwn(struct unf_lport_s *v_lport) +{ + struct unf_get_chip_info_argout v_wwn = { 0 }; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x2403, UNF_TRUE, (v_lport), return UNF_RETURN_ERROR); + + ret = (unsigned int)unf_send_event(v_lport->port_id, + UNF_EVENT_SYN, + (void *)v_lport, + (void *)&v_wwn, + unf_cm_get_mac_adr); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "UNF_BuildSysWwn SendEvent(UNF_PortGetMacAdr) fail."); + + return UNF_RETURN_ERROR; + } + + /* save card mode: UNF_FC_SERVER_BOARD_32_G(6):32G; + * UNF_FC_SERVER_BOARD_16_G(7):16G MODE + */ + v_lport->card_type = v_wwn.board_type; + + /* update port max speed */ + if (v_wwn.board_type == UNF_FC_SERVER_BOARD_32_G) + v_lport->low_level_func.fc_ser_max_speed = UNF_PORT_SPEED_32_G; + else if (v_wwn.board_type == UNF_FC_SERVER_BOARD_16_G) + v_lport->low_level_func.fc_ser_max_speed = UNF_PORT_SPEED_16_G; + else if (v_wwn.board_type == UNF_FC_SERVER_BOARD_8_G) + v_lport->low_level_func.fc_ser_max_speed = UNF_PORT_SPEED_8_G; + else + v_lport->low_level_func.fc_ser_max_speed = UNF_PORT_SPEED_32_G; + + return RETURN_OK; +} + +void *unf_lport_create_and_init( + void *private_data, + struct unf_low_level_function_op_s *low_level_op) +{ + struct unf_lport_s *lport = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + if (!private_data) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Private Data is NULL"); + + return NULL; + } + if (!low_level_op) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "LowLevel port(0x%p) function is NULL", private_data); + + return NULL; + } + + /* 1. vmalloc & Memset L_Port */ + lport = vmalloc(sizeof(struct unf_lport_s)); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Alloc LPort memory failed."); + + return NULL; + } + memset(lport, 0, sizeof(struct unf_lport_s)); + + /* 2. L_Port Init */ + if (unf_lport_init(lport, private_data, low_level_op) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "LPort initialize unsuccessful."); + + vfree(lport); + + return NULL; + } + + /* 4. Get or Create Chip Thread Chip_ID & Slot_ID */ + ret = unf_find_chip_thread(lport); + if (ret != RETURN_OK) { + (void)unf_lport_deinit(lport); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "LPort(0x%x) Find Chip thread unsuccessful. Destroy LPort.", + lport->port_id); + + vfree(lport); + return NULL; + } + + /* 5. Registers with in the port management global linked list */ + unf_port_register(lport); + /* update WWN */ + if (unf_build_lport_wwn(lport) != RETURN_OK) { + unf_port_unregister(lport); + (void)unf_lport_deinit(lport); + vfree(lport); + return NULL; + } + + unf_init_link_lose_tmo(lport); + + /* initialize Scsi Host */ + if (unf_register_scsi_host(lport) != RETURN_OK) { + unf_port_unregister(lport); + (void)unf_lport_deinit(lport); + vfree(lport); + return NULL; + } + + /* 7. Here, start work now */ + if (global_lport_mgr.b_start_work == UNF_TRUE) { + if (unf_port_start_work(lport) != RETURN_OK) { + unf_port_unregister(lport); + + (void)unf_lport_deinit(lport); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[warn]Port(0x%x) start work failed", + lport->port_id); + vfree(lport); + return NULL; + } + } + + UNF_REFERNCE_VAR(lport); + return lport; +} + +static int unf_lport_destroy(void *v_lport, void *v_arg_out) +{ + struct unf_lport_s *lport = NULL; + unsigned long flags = 0; + + if (!v_lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "LPort is NULL."); + + return UNF_RETURN_ERROR; + } + + UNF_REFERNCE_VAR(v_arg_out); + + lport = (struct unf_lport_s *)v_lport; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR, + "Destroy LPort(0x%p), ID(0x%x).", + lport, lport->port_id); + + /* NPIV Ensure that all Vport are deleted */ + unf_destroy_all_vports(lport); + + lport->destroy_step = UNF_LPORT_DESTROY_STEP_1_REPORT_PORT_OUT; + + (void)unf_lport_deinit(v_lport); + + /* The port is removed from the destroy linked list. + * The next step is to release the memory + */ + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); + list_del(&lport->entry_lport); + + /* If the port has dirty memory, the port is mounted to the + * linked list of dirty ports + */ + if (lport->dirty_flag) + list_add_tail(&lport->entry_lport, + &global_lport_mgr.list_dirty_head); + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, + flags); + + if (lport->lport_free_completion) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Complete LPort(0x%p), port ID(0x%x)'s Free Completion.", + lport, lport->port_id); + complete(lport->lport_free_completion); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "LPort(0x%p), port ID(0x%x)'s Free Completion is NULL.", + lport, lport->port_id); + dump_stack(); + } + + return RETURN_OK; +} + +unsigned int unf_lport_refinc(struct unf_lport_s *v_lport) +{ + unsigned long lport_flags = 0; + + UNF_CHECK_VALID(0x2208, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + spin_lock_irqsave(&v_lport->lport_state_lock, lport_flags); + if (atomic_read(&v_lport->lport_ref_cnt) <= 0) { + spin_unlock_irqrestore(&v_lport->lport_state_lock, + lport_flags); + + return UNF_RETURN_ERROR; + } + + atomic_inc(&v_lport->lport_ref_cnt); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%p) port_id(0x%x) reference count is %d", + v_lport, v_lport->port_id, + atomic_read(&v_lport->lport_ref_cnt)); + + spin_unlock_irqrestore(&v_lport->lport_state_lock, lport_flags); + + return RETURN_OK; +} + +void unf_lport_ref_dec(struct unf_lport_s *v_lport) +{ + unsigned long flags = 0; + unsigned long lport_flags = 0; + + UNF_CHECK_VALID(0x2209, UNF_TRUE, v_lport, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "LPort(0x%p), port ID(0x%x), reference count is %d.", + v_lport, v_lport->port_id, + atomic_read(&v_lport->lport_ref_cnt)); + + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); + spin_lock_irqsave(&v_lport->lport_state_lock, lport_flags); + if (atomic_dec_and_test(&v_lport->lport_ref_cnt)) { + spin_unlock_irqrestore(&v_lport->lport_state_lock, lport_flags); + list_del(&v_lport->entry_lport); + global_lport_mgr.lport_sum--; + + /* attaches the lport to the destroy linked list for dfx */ + list_add_tail(&v_lport->entry_lport, + &global_lport_mgr.list_destroy_head); + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, + flags); + + (void)unf_lport_destroy(v_lport, NULL); + } else { + spin_unlock_irqrestore(&v_lport->lport_state_lock, lport_flags); + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, + flags); + } +} + +static int unf_reset_port(void *v_arg_in, void *v_arg_out) +{ + struct unf_reset_port_argin *arg_in = + (struct unf_reset_port_argin *)v_arg_in; + struct unf_lport_s *lport = NULL; + unsigned int ret = UNF_RETURN_ERROR; + enum unf_port_config_state_e port_state = UNF_PORT_CONFIG_STATE_RESET; + + UNF_REFERNCE_VAR(v_arg_out); + UNF_CHECK_VALID(0x2262, UNF_TRUE, arg_in, return UNF_RETURN_ERROR); + + lport = unf_find_lport_by_port_id(arg_in->port_id); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Not find LPort(0x%x).", arg_in->port_id); + + return UNF_RETURN_ERROR; + } + + /* reset port */ + if (!lport->low_level_func.port_mgr_op.pfn_ll_port_config_set) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Port(0x%x)'s corresponding function is NULL.", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + lport->en_act_topo = UNF_ACT_TOP_UNKNOWN; + lport->speed = UNF_PORT_SPEED_UNKNOWN; + lport->fabric_node_name = 0; + + ret = lport->low_level_func.port_mgr_op.pfn_ll_port_config_set( + lport->fc_port, + UNF_PORT_CFG_SET_PORT_STATE, (void *)&port_state); + + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Reset port(0x%x) unsuccessful.", lport->port_id); + + return UNF_RETURN_ERROR; + } + + return RETURN_OK; +} + +static int unf_sfp_switch(unsigned int v_port_id, int v_turn_on) +{ + struct unf_lport_s *lport = NULL; + int turn_on = v_turn_on; + int ret = UNF_RETURN_ERROR; + unsigned long flag = 0; + + if (global_lport_mgr.b_start_work == UNF_FALSE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_WARN, + "[warn]Port(0x%x) not start work, ignored command:turn %s.", + v_port_id, (v_turn_on == UNF_TRUE) ? "ON" : "OFF"); + + return RETURN_OK; + } + + lport = unf_find_lport_by_port_id(v_port_id); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_WARN, + "[warn]Not find LPort(0x%x).", v_port_id); + + return UNF_RETURN_ERROR; + } + + spin_lock_irqsave(&lport->lport_state_lock, flag); + if (lport->en_start_work_state != UNF_START_WORK_COMPLETE) { + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_WARN, + "[warn]LPort(0x%x) not start work, ignored command:turn %s.", + v_port_id, (v_turn_on == UNF_TRUE) ? "ON" : "OFF"); + + return RETURN_OK; + } + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + if (!lport->low_level_func.port_mgr_op.pfn_ll_port_config_set) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_WARN, + "[warn]Port(0x%x)'s corresponding function is NULL.", + v_port_id); + + return UNF_RETURN_ERROR; + } + + ret = (int)lport->low_level_func.port_mgr_op.pfn_ll_port_config_set( + lport->fc_port, + UNF_PORT_CFG_SET_SFP_SWITCH, + (void *)&turn_on); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_WARN, + "[warn]Port(0x%x) switch SFP+ %s unsuccessful.", + v_port_id, v_turn_on ? "On" : "Off"); + + return UNF_RETURN_ERROR; + } + + lport->b_switch_state = (enum int_e)turn_on; + + return RETURN_OK; +} + +static int unf_sfp_switch_event(void *v_argc_in, void *v_argc_out) +{ + struct unf_set_sfp_argin *in = (struct unf_set_sfp_argin *)v_argc_in; + + UNF_REFERNCE_VAR(v_argc_out); + UNF_CHECK_VALID(0x2267, UNF_TRUE, v_argc_in, return UNF_RETURN_ERROR); + + return unf_sfp_switch(in->port_id, in->turn_on); +} + +int unf_cm_sfp_switch(unsigned int v_port_id, int v_bturn_on) +{ + struct unf_set_sfp_argin in = { 0 }; + + in.port_id = v_port_id; + in.turn_on = v_bturn_on; + return unf_send_event(v_port_id, UNF_EVENT_SYN, (void *)&in, + (void *)NULL, unf_sfp_switch_event); +} + +static int unf_get_port_speed(void *v_argc_in, void *v_argc_out) +{ + unsigned int *speed = (unsigned int *)v_argc_out; + struct unf_low_level_port_mgr_op_s *port_mgr = NULL; + struct unf_lport_s *lport = NULL; + int ret = 0; + unsigned int port_id = *(unsigned int *)v_argc_in; + + UNF_CHECK_VALID(0x2268, UNF_TRUE, v_argc_in, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x2269, UNF_TRUE, v_argc_out, return UNF_RETURN_ERROR); + lport = unf_find_lport_by_port_id(port_id); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Cannot Find LPort by (0x%x).", port_id); + + return UNF_RETURN_ERROR; + } + + port_mgr = &lport->low_level_func.port_mgr_op; + + if (!port_mgr->pfn_ll_port_config_get) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Port(0x%x)'s corresponding function is NULL.", + port_id); + + return UNF_RETURN_ERROR; + } + + if (lport->link_up == UNF_PORT_LINK_UP) + ret = (int)port_mgr->pfn_ll_port_config_get(lport->fc_port, + UNF_PORT_CFG_GET_SPEED_ACT, (void *)speed); + else + *speed = UNF_PORT_SPEED_UNKNOWN; + + return ret; +} + +static int unf_cm_get_port_speed(unsigned int v_port_id, unsigned int *v_speed) +{ + UNF_CHECK_VALID(0x2270, UNF_TRUE, v_speed, return UNF_RETURN_ERROR); + + return unf_send_event(v_port_id, UNF_EVENT_SYN, (void *)&v_port_id, + (void *)v_speed, unf_get_port_speed); +} + +static int unf_set_port_speed(void *v_argc_in, void *v_argc_out) +{ + unsigned int ret = RETURN_OK; + struct unf_set_speed_argin *in = + (struct unf_set_speed_argin *)v_argc_in; + struct unf_lport_s *lport = NULL; + + UNF_REFERNCE_VAR(v_argc_out); + UNF_CHECK_VALID(0x2271, UNF_TRUE, v_argc_in, return UNF_RETURN_ERROR); + lport = unf_find_lport_by_port_id(in->port_id); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Cannot Find LPort by (0x%x).", in->port_id); + + return UNF_RETURN_ERROR; + } + + if (!lport->low_level_func.port_mgr_op.pfn_ll_port_config_set) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Port(0x%x)'s corresponding function is NULL.", + in->port_id); + + return UNF_RETURN_ERROR; + } + + ret = lport->low_level_func.port_mgr_op.pfn_ll_port_config_set( + lport->fc_port, + UNF_PORT_CFG_SET_SPEED, (void *)in->speed); + + return (int)ret; +} + +int unf_cm_set_port_speed(unsigned int v_port_id, unsigned int *v_speed) +{ + struct unf_set_speed_argin in = { 0 }; + + in.port_id = v_port_id; + in.speed = v_speed; + return unf_send_event(v_port_id, UNF_EVENT_SYN, (void *)&in, + (void *)NULL, unf_set_port_speed); +} + +static int unf_get_port_topo(void *argc_in, void *argc_out) +{ + struct unf_lport_s *lport = NULL; + struct unf_get_topo_argout *out = NULL; + struct unf_low_level_port_mgr_op_s *port_mgr = NULL; + int ret = UNF_TRUE; + unsigned int port_id = 0; + + UNF_CHECK_VALID(0x2283, UNF_TRUE, argc_in, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x2284, UNF_TRUE, argc_out, return UNF_RETURN_ERROR); + port_id = *(unsigned int *)argc_in; + out = (struct unf_get_topo_argout *)argc_out; + + lport = unf_find_lport_by_port_id(port_id); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Not find LPort(0x%x).", port_id); + + return UNF_RETURN_ERROR; + } + + port_mgr = &lport->low_level_func.port_mgr_op; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + port_mgr->pfn_ll_port_config_get, + return UNF_RETURN_ERROR); + + if (lport->link_up == UNF_PORT_LINK_UP) { + ret = (int)port_mgr->pfn_ll_port_config_get(lport->fc_port, + UNF_PORT_CFG_GET_TOPO_ACT, (void *)out->en_act_topo); + if (ret != RETURN_OK) + return ret; + + } else { + *out->en_act_topo = UNF_ACT_TOP_UNKNOWN; + } + + ret = (int)port_mgr->pfn_ll_port_config_get(lport->fc_port, + UNF_PORT_CFG_GET_TOPO_CFG, (void *)out->topo_cfg); + + return ret; +} + +int unf_cm_get_port_topo(unsigned int v_port_id, unsigned int *v_topo_cfg, + enum unf_act_topo_e *v_en_act_topo) +{ + struct unf_get_topo_argout out = { 0 }; + + UNF_CHECK_VALID(0x2286, UNF_TRUE, v_topo_cfg, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x2287, UNF_TRUE, v_en_act_topo, + return UNF_RETURN_ERROR); + + out.en_act_topo = v_en_act_topo; + out.topo_cfg = v_topo_cfg; + + return unf_send_event(v_port_id, UNF_EVENT_SYN, (void *)&v_port_id, + (void *)&out, unf_get_port_topo); +} + +static int unf_set_port_topo(void *argc_in, void *argc_out) +{ + struct unf_lport_s *lport = NULL; + struct unf_set_topo_argin *in = NULL; + enum int_e *b_arg_out = (enum int_e *)argc_out; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x2257, UNF_TRUE, argc_out, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x2288, UNF_TRUE, argc_in, return UNF_RETURN_ERROR); + in = (struct unf_set_topo_argin *)argc_in; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + (in->topo == UNF_TOP_LOOP_MASK) || + (in->topo == UNF_TOP_P2P_MASK) || + (in->topo == UNF_TOP_AUTO_MASK), + return UNF_RETURN_ERROR); + + lport = unf_find_lport_by_port_id(in->port_id); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Not find LPort(0x%x).", in->port_id); + + return UNF_RETURN_ERROR; + } + + UNF_CHECK_VALID( + INVALID_VALUE32, UNF_TRUE, + lport->low_level_func.port_mgr_op.pfn_ll_port_config_set, + return UNF_RETURN_ERROR); + + ret = lport->low_level_func.port_mgr_op.pfn_ll_port_config_set( + lport->fc_port, + UNF_PORT_CFG_SET_TOPO, (void *)&in->topo); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Can't set port topology."); + + return UNF_RETURN_ERROR; + } + + lport->low_level_func.lport_cfg_items.port_topology = in->topo; + *b_arg_out = lport->b_switch_state; + + return RETURN_OK; +} + +int unf_cm_set_port_topo(unsigned int v_port_id, unsigned int v_topo) +{ + struct unf_set_topo_argin in = { 0 }; + int ret = UNF_RETURN_ERROR; + enum int_e b_switch_state = UNF_FALSE; + + in.port_id = v_port_id; + in.topo = v_topo; + + ret = unf_send_event(v_port_id, UNF_EVENT_SYN, (void *)&in, + (void *)&b_switch_state, unf_set_port_topo); + + return ret; +} + +int unf_set_port_bbscn(void *argc_in, void *argc_out) +{ + struct unf_lport_s *lport = NULL; + struct unf_set_bbscn_argin *in = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_REFERNCE_VAR(argc_out); + UNF_CHECK_VALID(0x2300, UNF_TRUE, argc_in, return UNF_RETURN_ERROR); + in = (struct unf_set_bbscn_argin *)argc_in; + + lport = unf_find_lport_by_port_id(in->port_id); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Not find LPort(0x%x).", in->port_id); + + return UNF_RETURN_ERROR; + } + + UNF_CHECK_VALID( + INVALID_VALUE32, UNF_TRUE, + lport->low_level_func.port_mgr_op.pfn_ll_port_config_set, + return UNF_RETURN_ERROR); + + ret = lport->low_level_func.port_mgr_op.pfn_ll_port_config_set( + lport->fc_port, + UNF_PORT_CFG_SET_BBSCN, (void *)&in->bb_scn); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Cannot set port BB_SC_N."); + + return UNF_RETURN_ERROR; + } + + /* update bbsn cfg to Lport */ + lport->low_level_func.lport_cfg_items.bb_scn = in->bb_scn; + + return RETURN_OK; +} + +int unf_cm_set_port_bbscn(unsigned int v_port_id, unsigned int v_bbscn) +{ + struct unf_set_bbscn_argin in = { 0 }; + + in.port_id = v_port_id; + in.bb_scn = v_bbscn; + + return unf_send_event(v_port_id, UNF_EVENT_SYN, (void *)&in, + (void *)NULL, unf_set_port_bbscn); +} + +unsigned int unf_get_error_code_sum(struct unf_lport_s *v_lport, + struct unf_err_code_s *v_fc_err_code) +{ + struct unf_low_level_port_mgr_op_s *port_mgr = NULL; + struct unf_lport_s *lport = v_lport; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_err_code_s fc_err_code; + + UNF_CHECK_VALID(0x2328, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x2329, UNF_TRUE, v_fc_err_code, + return UNF_RETURN_ERROR); + + memset(&fc_err_code, 0, sizeof(struct unf_err_code_s)); + + port_mgr = &lport->low_level_func.port_mgr_op; + if (!port_mgr->pfn_ll_port_config_get) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Port(0x%x)'s corresponding function is NULL.", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + ret = port_mgr->pfn_ll_port_config_get((void *)lport->fc_port, + UNF_PORT_CFG_GET_LESB_THEN_CLR, (void *)&fc_err_code); + if (ret != RETURN_OK) + return UNF_RETURN_ERROR; + + if (lport->link_up != UNF_PORT_LINK_UP) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_INFO, + "LPort(0x%x) is not link up.", lport->port_id); + memcpy(v_fc_err_code, &lport->err_code_sum, + sizeof(struct unf_err_code_s)); + + return RETURN_OK; + } + + lport->err_code_sum.bad_rx_char_count += fc_err_code.bad_rx_char_count; + lport->err_code_sum.link_fail_count += fc_err_code.link_fail_count; + lport->err_code_sum.loss_of_signal_count += + fc_err_code.loss_of_signal_count; + lport->err_code_sum.loss_of_sync_count += + fc_err_code.loss_of_sync_count; + lport->err_code_sum.proto_error_count += fc_err_code.proto_error_count; + + lport->err_code_sum.rx_eo_fa_count = fc_err_code.rx_eo_fa_count; + lport->err_code_sum.dis_frame_count = fc_err_code.dis_frame_count; + lport->err_code_sum.bad_crc_count = fc_err_code.bad_crc_count; + + memcpy(v_fc_err_code, &lport->err_code_sum, + sizeof(struct unf_err_code_s)); + + return RETURN_OK; +} + +static int unf_clear_port_error_code_sum(void *argc_in, void *argc_out) +{ + struct unf_lport_s *lport = NULL; + unsigned int port_id = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x2331, UNF_TRUE, argc_in, return UNF_RETURN_ERROR); + UNF_REFERNCE_VAR(argc_out); + + port_id = *(unsigned int *)argc_in; + lport = unf_find_lport_by_port_id(port_id); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Cannot find LPort(0x%x).", port_id); + + return UNF_RETURN_ERROR; + } + + if (!lport->low_level_func.port_mgr_op.pfn_ll_port_config_get) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Port(0x%x)'s corresponding function is NULL.", + port_id); + + return UNF_RETURN_ERROR; + } + + ret = lport->low_level_func.port_mgr_op.pfn_ll_port_config_get( + (void *)lport->fc_port, + UNF_PORT_CFG_CLR_LESB, NULL); + if (ret != RETURN_OK) + return UNF_RETURN_ERROR; + + memset(&lport->err_code_sum, 0, sizeof(struct unf_err_code_s)); + + return RETURN_OK; +} + +int unf_cm_clear_port_error_code_sum(unsigned int v_port_id) +{ + return unf_send_event(v_port_id, UNF_EVENT_SYN, (void *)&v_port_id, + (void *)NULL, unf_clear_port_error_code_sum); +} + +static int unf_update_lport_sfp_info(struct unf_lport_s *v_lport, + enum unf_port_config_get_op_e v_type) +{ + struct unf_lport_s *lport = NULL; + int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x2332, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + lport = v_lport; + + if (!lport->low_level_func.port_mgr_op.pfn_ll_port_config_get) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Port(0x%x)'s corresponding function is NULL.", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + ret = (int)(lport->low_level_func.port_mgr_op.pfn_ll_port_config_get( + (void *)lport->fc_port, + v_type, (void *)&lport->sfp_info)); + + return ret; +} + +static int unf_translate_sfp_status(struct unf_lport_s *v_lport, + struct unf_get_sfp_argout *v_out) +{ + struct unf_lport_s *lport = v_lport; + int ret = UNF_RETURN_ERROR; + + switch (lport->sfp_info.status) { + case UNF_SFP_PRESENT_FAIL: + *v_out->status = DRV_CABLE_CONNECTOR_NONE; + ret = RETURN_OK; + break; + case UNF_SFP_POWER_FAIL: + *v_out->status = DRV_CABLE_CONNECTOR_INVALID; + ret = RETURN_OK; + break; + case UNF_9545_FAIL: + *v_out->status = DRV_CABLE_CONNECTOR_INVALID; + ret = RETURN_OK; + break; + default: + *v_out->status = DRV_CABLE_CONNECTOR_BUTT; + ret = UNF_RETURN_ERROR; + break; + } + + return ret; +} + +static void unf_record_chip_fault(struct unf_lport_s *v_lport) +{ +#define UNF_CHIP_FAULT_MAX_CHECK_TIME 3 + + if (v_lport->sfp_info.status == UNF_9545_FAIL) { + /* If there are 9545 fault,explain that the sfp is power on, + * and reset sfp_power_fault_count + */ + v_lport->sfp_power_fault_count = 0; + + if (v_lport->sfp_9545_fault_count < + UNF_CHIP_FAULT_MAX_CHECK_TIME) { + v_lport->sfp_9545_fault_count++; + } else { + v_lport->chip_info->sfp_9545_fault = UNF_TRUE; + v_lport->sfp_9545_fault_count = 0; + } + } else if (v_lport->sfp_info.status == UNF_SFP_POWER_FAIL) { + if (v_lport->sfp_power_fault_count < + UNF_CHIP_FAULT_MAX_CHECK_TIME) { + v_lport->sfp_power_fault_count++; + } else { + v_lport->chip_info->sfp_power_fault = UNF_TRUE; + v_lport->sfp_power_fault_count = 0; + } + } +} + +int unf_check_sfp_tx_fault(struct unf_lport_s *v_lport, + struct unf_sfp_info_s *v_sfp_info) +{ + /* 24 hours ms(24*60*60*1000) */ +#define UNF_SFP_TXFALT_RECOVER_INTERVEL 86400000 + + struct unf_sfp_info_s *sfp_info = NULL; + struct unf_lport_s *lport = NULL; + + sfp_info = v_sfp_info; + lport = v_lport; + + if (sfp_info->sfp_info_a2.diag.status_ctrl.tx_fault_state == 0) + return RETURN_OK; + + /* Repair conditions: + * 1 port linkdown; + * 2 from the last repair more than 24 hours; + * 3 sfp is on + */ + if ((lport->link_up == UNF_PORT_LINK_DOWN) && + (lport->b_switch_state) && + ((lport->last_tx_fault_jif == 0) || + (jiffies_to_msecs(jiffies - lport->last_tx_fault_jif) > + UNF_SFP_TXFALT_RECOVER_INTERVEL))) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "LPort(0x%x) stat(0x%x) jiff(%ld) lastjiff(%llu) Ctrl(0x%x) TxFault set 1.", + lport->port_id, lport->link_up, jiffies, + lport->last_tx_fault_jif, + *((unsigned char *) + &sfp_info->sfp_info_a2.diag.status_ctrl)); + + lport->last_tx_fault_jif = jiffies; + (void)unf_sfp_switch(lport->port_id, UNF_FALSE); + msleep(100); + + /* Around quickly switch port FW state error problem */ + (void)unf_sfp_switch(lport->port_id, UNF_TRUE); + + return UNF_RETURN_ERROR; + } + + return RETURN_OK; +} + +static int unf_get_sfp_info(void *argc_in, void *argc_out) +{ + struct unf_lport_s *lport = NULL; + struct unf_get_sfp_argout *out = NULL; + unsigned int port_id = 0; + int ret = RETURN_OK; + + UNF_CHECK_VALID(0x2333, UNF_TRUE, argc_in, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x2334, UNF_TRUE, argc_out, return UNF_RETURN_ERROR); + + port_id = *(unsigned int *)argc_in; + out = (struct unf_get_sfp_argout *)argc_out; + lport = unf_find_lport_by_port_id(port_id); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Cannot find LPort(0x%x).", port_id); + + return UNF_RETURN_ERROR; + } + + lport->sfp_info.status = 0; + + ret = unf_update_lport_sfp_info(lport, UNF_PORT_CFG_GET_SFP_INFO); + + if (ret == RETURN_OK) { + lport->sfp_power_fault_count = 0; + lport->sfp_9545_fault_count = 0; + *out->status = DRV_CABLE_CONNECTOR_OPTICAL; + if (unf_check_sfp_tx_fault( + lport, + &lport->sfp_info.sfp_eeprom_info.sfp_info) == + UNF_RETURN_ERROR) { + return UNF_RETURN_ERROR; + } + + memcpy(out->sfp_info, &lport->sfp_info.sfp_eeprom_info, + sizeof(union unf_sfp_eeprome_info)); + ret = RETURN_OK; + } else { + ret = unf_translate_sfp_status(lport, out); + + unf_record_chip_fault(lport); + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "Port(0x%x)'s getsfpinfo fail, sfp status(0x%x).", + lport->port_id, lport->sfp_info.status); + } + + return ret; +} + +int unf_cm_get_sfp_info(unsigned int v_port_id, unsigned int *v_status, + union unf_sfp_eeprome_info *v_sfp_info, + unsigned int *sfp_type) +{ + struct unf_lport_s *lport = NULL; + struct unf_get_sfp_argout out = { 0 }; + + lport = unf_find_lport_by_port_id(v_port_id); + if (!lport) + return UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x2335, UNF_TRUE, v_status, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x2336, UNF_TRUE, v_sfp_info, return UNF_RETURN_ERROR); + + out.status = v_status; + out.sfp_info = v_sfp_info; + + if (global_lport_mgr.b_start_work == UNF_FALSE) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "Port(0x%x) have not start work, return.", v_port_id); + return UNF_RETURN_ERROR; + } + + *sfp_type = lport->low_level_func.sfp_type; + return unf_send_event(v_port_id, UNF_EVENT_SYN, (void *)&v_port_id, + (void *)&out, unf_get_sfp_info); +} + +int unf_cm_reset_port(unsigned int v_port_id) +{ + int ret = UNF_RETURN_ERROR; + + ret = unf_send_event(v_port_id, UNF_EVENT_SYN, (void *)&v_port_id, + (void *)NULL, unf_reset_port); + return ret; +} + +int unf_lport_reset_port(struct unf_lport_s *v_lport, unsigned int v_flag) +{ + UNF_CHECK_VALID(0x2352, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + return unf_send_event(v_lport->port_id, v_flag, + (void *)&v_lport->port_id, + (void *)NULL, + unf_reset_port); +} + +static inline unsigned int unf_get_loop_alpa(struct unf_lport_s *v_lport, + void *v_loop_alpa) +{ + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x2357, UNF_TRUE, + v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get, + return UNF_RETURN_ERROR); + + ret = v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get( + v_lport->fc_port, + UNF_PORT_CFG_GET_LOOP_ALPA, v_loop_alpa); + return ret; +} + +static unsigned int unf_lport_enter_private_loop_login( + struct unf_lport_s *v_lport) +{ + struct unf_lport_s *lport = v_lport; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x2358, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + spin_lock_irqsave(&lport->lport_state_lock, flag); + unf_lport_stat_ma(lport, UNF_EVENT_LPORT_READY); + /* LPort: LINK_UP --> READY */ + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + unf_lport_update_topo(lport, UNF_ACT_TOP_PRIVATE_LOOP); + + /* NOP: check L_Port state */ + if (atomic_read(&lport->port_no_operater_flag) == UNF_LPORT_NOP) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) is NOP, do nothing", + lport->port_id); + + return RETURN_OK; + } + + /* INI: check L_Port mode */ + if ((lport->options & UNF_PORT_MODE_INI) != UNF_PORT_MODE_INI) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) has no INI feature(0x%x), do nothing", + lport->port_id, lport->options); + + return RETURN_OK; + } + + if (lport->disc.unf_disc_temp.pfn_unf_disc_start) { + ret = lport->disc.unf_disc_temp.pfn_unf_disc_start(lport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) with nportid(0x%x) start discovery failed", + lport->port_id, lport->nport_id); + } + } + + return ret; +} + +unsigned int unf_lport_login(struct unf_lport_s *v_lport, + enum unf_act_topo_e v_en_act_topo) +{ + unsigned int loop_alpa = 0; + unsigned int ret = RETURN_OK; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x2359, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + /* 1. Update (set) L_Port topo which get from low level */ + unf_lport_update_topo(v_lport, v_en_act_topo); + + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + + /* 2. Link state check */ + if (v_lport->link_up != UNF_PORT_LINK_UP) { + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) with link_state(0x%x) port_state(0x%x) when login", + v_lport->port_id, v_lport->link_up, + v_lport->en_states); + + return UNF_RETURN_ERROR; + } + + /* 3. Update L_Port state */ + unf_lport_stat_ma(v_lport, UNF_EVENT_LPORT_LINK_UP); + /* LPort: INITIAL --> LINK UP */ + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]LOGIN: Port(0x%x) start to login with topology(0x%x)", + v_lport->port_id, v_lport->en_act_topo); + + /* 4. Start logoin */ + if ((v_en_act_topo == UNF_TOP_P2P_MASK) || + (v_en_act_topo == UNF_ACT_TOP_P2P_FABRIC) || + (v_en_act_topo == UNF_ACT_TOP_P2P_DIRECT)) { + /* P2P or Fabric mode */ + ret = unf_lport_enter_flogi(v_lport); + } else if (v_en_act_topo == UNF_ACT_TOP_PUBLIC_LOOP) { + /* Public loop */ + (void)unf_get_loop_alpa(v_lport, &loop_alpa); + + /* Before FLOGI ALPA just low 8 bit after FLOGI ACC switch + * will assign complete addresses + */ + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + v_lport->nport_id = loop_alpa; + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + ret = unf_lport_enter_flogi(v_lport); + } else if (v_en_act_topo == UNF_ACT_TOP_PRIVATE_LOOP) { + /* Private loop */ + (void)unf_get_loop_alpa(v_lport, &loop_alpa); + + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + v_lport->nport_id = loop_alpa; + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + ret = unf_lport_enter_private_loop_login(v_lport); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]LOGIN: Port(0x%x) login with unknown topology(0x%x)", + v_lport->port_id, v_lport->en_act_topo); + } + + return ret; +} + +static unsigned int unf_port_link_up(struct unf_lport_s *v_lport, + void *v_in_put) +{ + struct unf_lport_s *lport = v_lport; + unsigned int ret = RETURN_OK; + enum unf_act_topo_e en_act_topo = UNF_ACT_TOP_UNKNOWN; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x2361, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_REFERNCE_VAR(v_in_put); + + /* If NOP state, stop */ + if (atomic_read(&lport->port_no_operater_flag) == UNF_LPORT_NOP) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[warn]Port(0x%x) is NOP and do nothing", + lport->port_id); + + return RETURN_OK; + } + + /* Update port state */ + spin_lock_irqsave(&lport->lport_state_lock, flag); + lport->link_up = UNF_PORT_LINK_UP; + lport->speed = *((unsigned int *)v_in_put); + unf_set_lport_state(v_lport, UNF_LPORT_ST_INITIAL); + /* INITIAL state */ + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + /* set hot pool wait state: so far, do not care */ + unf_set_hot_pool_wait_state(lport, UNF_TRUE); + + lport->enhanced_features |= UNF_LPORT_ENHANCED_FEATURE_READ_SFP_ONCE; + + /* Get port active topopolgy (from low level) */ + if (!lport->low_level_func.port_mgr_op.pfn_ll_port_config_get) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[warn]Port(0x%x) get topo function is NULL", + lport->port_id); + + return UNF_RETURN_ERROR; + } + ret = lport->low_level_func.port_mgr_op.pfn_ll_port_config_get( + lport->fc_port, + UNF_PORT_CFG_GET_TOPO_ACT, (void *)&en_act_topo); + + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[warn]Port(0x%x) get topo from low level failed", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + /* Start Login process */ + ret = unf_lport_login(lport, en_act_topo); + + unf_report_io_dm_event(lport, UNF_PORT_LINK_UP, 0); + return ret; +} + +static unsigned int unf_port_link_down(struct unf_lport_s *v_lport, + void *v_in_put) +{ + unsigned long flag = 0; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x2363, UNF_FALSE, v_lport, return UNF_RETURN_ERROR); + UNF_REFERNCE_VAR(v_in_put); + lport = v_lport; + unf_report_io_dm_event(lport, UNF_PORT_LINK_DOWN, 0); + + /* To prevent repeated reporting linkdown */ + spin_lock_irqsave(&lport->lport_state_lock, flag); + lport->speed = UNF_PORT_SPEED_UNKNOWN; + lport->en_act_topo = UNF_ACT_TOP_UNKNOWN; + if (lport->link_up == UNF_PORT_LINK_DOWN) { + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + return RETURN_OK; + } + unf_lport_stat_ma(lport, UNF_EVENT_LPORT_LINK_DOWN); + unf_reset_lport_params(lport); + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + unf_set_hot_pool_wait_state(lport, UNF_FALSE); + + /* + * clear I/O: + * 1. INI do ABORT only, + * for INI: busy/delay/delay_transfer/wait + * Clean L_Port/V_Port Link Down I/O: only set ABORT tag + */ + unf_flush_disc_event(&lport->disc, NULL); + + unf_clean_link_down_io(lport, UNF_FALSE); + + /* for L_Port's R_Ports */ + unf_clean_linkdown_rport(lport); + /* for L_Port's all Vports */ + unf_linkdown_all_vports(v_lport); + return RETURN_OK; +} + +static unsigned int unf_port_abnormal_reset(struct unf_lport_s *v_lport, + void *v_in_put) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x2363, UNF_FALSE, v_lport, return UNF_RETURN_ERROR); + + UNF_REFERNCE_VAR(v_in_put); + + lport = v_lport; + + ret = (unsigned int)unf_lport_reset_port(lport, UNF_EVENT_ASYN); + + return ret; +} + +static unsigned int unf_port_reset_start(struct unf_lport_s *v_lport, + void *v_in_put) +{ + unsigned int ret = RETURN_OK; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x2364, UNF_FALSE, v_lport, return UNF_RETURN_ERROR); + UNF_REFERNCE_VAR(v_in_put); + + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + unf_set_lport_state(v_lport, UNF_LPORT_ST_RESET); + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "Port(0x%x) begin to reset.", v_lport->port_id); + + return ret; +} + +static unsigned int unf_port_reset_end(struct unf_lport_s *v_lport, + void *v_in_put) +{ + unsigned long flag = 0; + + UNF_CHECK_VALID(0x2365, UNF_FALSE, v_lport, return UNF_RETURN_ERROR); + + UNF_REFERNCE_VAR(v_in_put); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "Port(0x%x) reset end.", v_lport->port_id); + + /* Task management command returns success and avoid + * repair measures case offline device + */ + unf_wakeup_scsi_task_cmnd(v_lport); + + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + unf_set_lport_state(v_lport, UNF_LPORT_ST_INITIAL); + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + return RETURN_OK; +} + +static unsigned int unf_port_nop(struct unf_lport_s *v_lport, void *v_in_put) +{ + struct unf_lport_s *lport = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x2366, UNF_FALSE, v_lport, return UNF_RETURN_ERROR); + UNF_REFERNCE_VAR(v_in_put); + lport = v_lport; + + atomic_set(&lport->port_no_operater_flag, UNF_LPORT_NOP); + + spin_lock_irqsave(&lport->lport_state_lock, flag); + unf_lport_stat_ma(lport, UNF_EVENT_LPORT_LINK_DOWN); + unf_reset_lport_params(lport); + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + /* Set Tag prevent pending I/O to wait_list when close sfp failed */ + unf_set_hot_pool_wait_state(lport, UNF_FALSE); + + unf_flush_disc_event(&lport->disc, NULL); + + /* L_Port/V_Port's I/O(s): Clean Link Down I/O: Set Abort Tag */ + unf_clean_link_down_io(lport, UNF_FALSE); + + /* L_Port/V_Port's R_Port(s): report link down event to + * scsi & clear resource + */ + unf_clean_linkdown_rport(lport); + unf_linkdown_all_vports(lport); + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) report NOP event done", + lport->nport_id); + + return RETURN_OK; +} + +static unsigned int unf_port_clean_done(struct unf_lport_s *v_lport, + void *v_in_put) +{ + UNF_CHECK_VALID(0x2691, UNF_FALSE, v_lport, return UNF_RETURN_ERROR); + + UNF_REFERNCE_VAR(v_in_put); + + /* when port reset,delte delete all Rport immediately, + * in order to remove immediately for resources + */ + unf_clean_linkdown_rport(v_lport); + + return RETURN_OK; +} + +static unsigned int unf_port_begin_remove(struct unf_lport_s *v_lport, + void *v_in_put) +{ + UNF_CHECK_VALID(0x2691, UNF_FALSE, v_lport, return UNF_RETURN_ERROR); + + UNF_REFERNCE_VAR(v_in_put); + + /* Cancel route timer delay work */ + unf_destroy_lport_route(v_lport); + + return RETURN_OK; +} + +static unsigned int unf_get_pcie_link_state(struct unf_lport_s *v_lport) +{ + struct unf_lport_s *lport = v_lport; + int link_state = UNF_TRUE; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x2257, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + UNF_CHECK_VALID( + INVALID_VALUE32, UNF_TRUE, + lport->low_level_func.port_mgr_op.pfn_ll_port_config_get, + return UNF_RETURN_ERROR); + + ret = lport->low_level_func.port_mgr_op.pfn_ll_port_config_get( + lport->fc_port, + UNF_PORT_CFG_GET_PCIE_LINK_STATE, (void *)&link_state); + if (ret != RETURN_OK || link_state != UNF_TRUE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_KEVENT, + "[err]Can't Get Pcie Link State"); + + return UNF_RETURN_ERROR; + } + + return RETURN_OK; +} + +void unf_root_lport_ref_dec(struct unf_lport_s *v_lport) +{ + unsigned long flags = 0; + unsigned long lport_flags = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x2385, UNF_TRUE, v_lport, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%p) port_id(0x%x) reference count is %d", + v_lport, v_lport->port_id, + atomic_read(&v_lport->lport_ref_cnt)); + + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); + spin_lock_irqsave(&v_lport->lport_state_lock, lport_flags); + if (atomic_dec_and_test(&v_lport->lport_ref_cnt)) { + spin_unlock_irqrestore(&v_lport->lport_state_lock, lport_flags); + + list_del(&v_lport->entry_lport); + global_lport_mgr.lport_sum--; + + /* Put L_Port to destroy list for debuging */ + list_add_tail(&v_lport->entry_lport, + &global_lport_mgr.list_destroy_head); + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, + flags); + + ret = unf_schedule_global_event((void *)v_lport, + UNF_GLOBAL_EVENT_ASYN, + unf_lport_destroy); + if (ret != RETURN_OK) + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EVENT, + UNF_CRITICAL, + "[warn]Schedule global event faile. remain nodes(0x%x)", + global_event_queue.list_number); + } else { + spin_unlock_irqrestore(&v_lport->lport_state_lock, lport_flags); + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, + flags); + } +} + +void unf_lport_ref_dec_to_destroy(struct unf_lport_s *v_lport) +{ + if (v_lport->root_lport != v_lport) + unf_vport_ref_dec(v_lport); + else + unf_root_lport_ref_dec(v_lport); +} + +void unf_lport_route_work(struct work_struct *v_work) +{ +#define MAX_INTERVAL_TIMES 60 + + struct unf_lport_s *lport = NULL; + int ret = 0; + struct unf_err_code_s fc_err_code; + + UNF_CHECK_VALID(0x2388, UNF_TRUE, v_work, return); + + lport = container_of(v_work, struct unf_lport_s, route_timer_work.work); + if (unlikely(!lport)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, + UNF_KEVENT, "[err]LPort is NULL"); + + return; + } + + if (unlikely(lport->b_port_removing == UNF_TRUE)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_KEVENT, + "[warn]LPort(0x%x) route work is closing.", + lport->port_id); + + unf_lport_ref_dec_to_destroy(lport); + + return; + } + + if (unlikely(unf_get_pcie_link_state(lport))) + lport->pcie_link_down_cnt++; + else + lport->pcie_link_down_cnt = 0; + + if (lport->pcie_link_down_cnt >= 3) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_KEVENT, + "[warn]LPort(0x%x) detected pcie linkdown, closing route work", + lport->port_id); + lport->b_pcie_linkdown = UNF_TRUE; + unf_free_lport_all_xchg(lport); + unf_lport_ref_dec_to_destroy(lport); + return; + } + + if (unlikely(UNF_LPORT_CHIP_ERROR(lport))) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_KEVENT, + "[warn]LPort(0x%x) reported chip error, closing route work. ", + lport->port_id); + + unf_lport_ref_dec_to_destroy(lport); + + return; + } + + if (lport->enhanced_features & + UNF_LPORT_ENHANCED_FEATURE_CLOSE_FW_ROUTE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_KEVENT, + "[warn]User close LPort(0x%x) route work. ", + lport->port_id); + + unf_lport_ref_dec_to_destroy(lport); + + return; + } + if (atomic_read(&lport->err_code_obtain_freq) == 0) { + memset(&fc_err_code, 0, sizeof(struct unf_err_code_s)); + unf_get_error_code_sum(lport, &fc_err_code); + atomic_inc(&lport->err_code_obtain_freq); + } else if (atomic_read(&lport->err_code_obtain_freq) == + MAX_INTERVAL_TIMES) { + atomic_set(&lport->err_code_obtain_freq, 0); + } else { + atomic_inc(&lport->err_code_obtain_freq); + } + /* Scheduling 1 second */ + ret = queue_delayed_work( + unf_work_queue, &lport->route_timer_work, + (unsigned long)msecs_to_jiffies(UNF_LPORT_POLL_TIMER)); + if (ret == 0) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_KEVENT, + "[warn]LPort(0x%x) schedule work unsuccessful.", + lport->port_id); + + unf_lport_ref_dec_to_destroy(lport); + } +} + +int unf_cm_get_port_info(void *argc_in, void *argc_out) +{ + struct unf_lport_s *lport = NULL; + struct unf_get_port_info_argout *port_info = NULL; + + UNF_CHECK_VALID(0x2398, UNF_TRUE, argc_in, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x2398, UNF_TRUE, argc_out, return UNF_RETURN_ERROR); + + lport = (struct unf_lport_s *)argc_in; + port_info = (struct unf_get_port_info_argout *)argc_out; + + if (!lport->low_level_func.port_mgr_op.pfn_ll_port_config_get) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Port(0x%x)'s corresponding function is NULL.", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + if (lport->low_level_func.port_mgr_op.pfn_ll_port_config_get( + lport->fc_port, + UNF_PORT_CFG_GET_PORT_INFO, port_info) != + RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Port(0x%x) get current info failed.", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + return RETURN_OK; +} + +static unsigned int unf_get_lport_current_info(struct unf_lport_s *v_lport) +{ + struct unf_get_port_info_argout port_info = { 0 }; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x2403, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + lport = unf_find_lport_by_port_id(v_lport->port_id); + + if (!lport) + return UNF_RETURN_ERROR; + + ret = (unsigned int)unf_send_event(lport->port_id, UNF_EVENT_SYN, + (void *)lport, + (void *)&port_info, + unf_cm_get_port_info); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "UNF_GetPortCurrentInfo SendEvent(unf_cm_get_port_info) fail."); + + return UNF_RETURN_ERROR; + } + + lport->low_level_func.sfp_speed = port_info.sfp_speed; + + return RETURN_OK; +} + +int unf_set_link_lose_tmo_to_up(struct unf_lport_s *v_lport, + struct unf_flash_link_tmo_s *v_link_tmo) +{ + int ret = UNF_RETURN_ERROR; + struct unf_flash_data_s flash_data; + + if ((!v_lport) || (!v_link_tmo) || + (sizeof(struct unf_flash_data_s) > HIFC_FLASH_DATA_MAX_LEN)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_KEVENT, + "[warn]set link tmo param check fail"); + return ret; + } + memset(&flash_data, 0, sizeof(struct unf_flash_data_s)); + + if (!v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_KEVENT, + "[warn]link tmo fun null"); + return ret; + } + if (v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get( + v_lport->fc_port, + UNF_PORT_CFG_GET_FLASH_DATA_INFO, &flash_data) != + RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_KEVENT, + "[warn]get link tmo to up fail"); + return ret; + } + + memcpy(&flash_data.link_tmo, v_link_tmo, HIFC_FLASH_LINK_TMO_MAX_LEN); + + if (!v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_set) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_KEVENT, + "[warn]set link tmo fun null"); + return ret; + } + + if (v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_set( + v_lport->fc_port, UNF_PORT_CFG_SET_FLASH_DATA_INFO, + &flash_data) != + RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_KEVENT, + "[warn]set link tmo to up fail"); + return ret; + } + ret = RETURN_OK; + + return ret; +} + +int unf_set_link_lose_tmo(struct unf_lport_s *v_lport, int time_out) +{ + struct unf_flash_link_tmo_s flash_link_tmo; + int ret = UNF_RETURN_ERROR; + unsigned int link_tmo = (unsigned int)time_out; + + memset(&flash_link_tmo, 0, sizeof(struct unf_flash_link_tmo_s)); + + if (!v_lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, + UNF_KEVENT, "[warn]set link tmo lport null"); + return ret; + } + + /* 1. update gloabl var */ + if ((int)atomic_read(&v_lport->link_lose_tmo) == time_out) + return RETURN_OK; + + atomic_set(&v_lport->link_lose_tmo, time_out); + + flash_link_tmo.writeflag = HIFC_MGMT_TMO_MAGIC_NUM; + flash_link_tmo.link_tmo0 = (unsigned char)link_tmo; + flash_link_tmo.link_tmo1 = (unsigned char)(link_tmo >> 8); + flash_link_tmo.link_tmo2 = (unsigned char)(link_tmo >> 16); + flash_link_tmo.link_tmo3 = (unsigned char)(link_tmo >> 24); + + /* 2. write to up */ + ret = unf_set_link_lose_tmo_to_up(v_lport, &flash_link_tmo); + + return ret; +} + +int unf_set_link_lose_tmo_to_all(int time_out) +{ + int ret = RETURN_OK; + struct list_head list_lport_tmp_head; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + struct unf_lport_s *lport = NULL; + unsigned long flags = 0; + + INIT_LIST_HEAD(&list_lport_tmp_head); + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); + list_for_each_safe(node, next_node, + &global_lport_mgr.list_lport_list_head) { + lport = list_entry(node, struct unf_lport_s, entry_lport); + list_del_init(&lport->entry_lport); + list_add_tail(&lport->entry_lport, &list_lport_tmp_head); + (void)unf_lport_refinc(lport); + } + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags); + + while (!list_empty(&list_lport_tmp_head)) { + node = (&list_lport_tmp_head)->next; + lport = list_entry(node, struct unf_lport_s, entry_lport); + if (lport) + unf_set_link_lose_tmo(lport, time_out); + + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, + flags); + list_del_init(&lport->entry_lport); + list_add_tail(&lport->entry_lport, + &global_lport_mgr.list_lport_list_head); + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, + flags); + + unf_lport_ref_dec_to_destroy(lport); + } + + return ret; +} + +static int unf_cm_adm_show_xchg(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input) +{ + int ret = UNF_RETURN_ERROR; + struct unf_xchg_mgr_s *xchg_mgr = NULL; + struct unf_xchg_s *xchg = NULL; + struct list_head *xchg_node = NULL; + struct list_head *next_xchg_node = NULL; + unsigned long flags = 0; + unsigned int aborted = 0; + unsigned int ini_busy = 0; + unsigned int tgt_busy = 0; + unsigned int delay = 0; + unsigned int free = 0; + unsigned int wait = 0; + unsigned int sfs_free = 0; + unsigned int sfs_busy = 0; + unsigned int i; + struct unf_adm_xchg *buff_out = NULL; + + buff_out = (struct unf_adm_xchg *)v_input->buff_out; + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, *v_input->out_size >= + sizeof(struct unf_adm_xchg), return UNF_RETURN_ERROR); + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + xchg_mgr = unf_get_xchg_mgr_by_lport(v_lport, i); + if (!xchg_mgr) + continue; + + if (!xchg_mgr->hot_pool) + continue; + + /* hot Xchg */ + spin_lock_irqsave(&xchg_mgr->hot_pool->xchg_hot_pool_lock, + flags); + + UNF_TRACE(0x2659, UNF_LOG_NORMAL, UNF_INFO, "ini busy :"); + list_for_each_safe(xchg_node, next_xchg_node, + &xchg_mgr->hot_pool->ini_busylist) { + ini_busy++; + + xchg = list_entry(xchg_node, struct unf_xchg_s, + list_xchg_entry); + UNF_TRACE(0x2660, UNF_LOG_NORMAL, UNF_INFO, + "0x%p--0x%x--0x%x--0x%x--0x%x--0x%x--0x%x--0x%x--0x%x--0x%x--(%llu)", + xchg, + (unsigned int)xchg->hot_pool_tag, + (unsigned int)xchg->xchg_type, + (unsigned int)xchg->ox_id, + (unsigned int)xchg->rx_id, + (unsigned int)xchg->sid, + (unsigned int)xchg->did, + (unsigned int)xchg->seq_id, + (unsigned int)xchg->io_state, + atomic_read(&xchg->ref_cnt), + xchg->alloc_jif); + } + + UNF_TRACE(0x2665, UNF_LOG_NORMAL, UNF_INFO, "SFS Busy:"); + list_for_each_safe(xchg_node, next_xchg_node, + &xchg_mgr->hot_pool->sfs_busylist) { + sfs_busy++; + + xchg = list_entry(xchg_node, struct unf_xchg_s, + list_xchg_entry); + UNF_TRACE(0x2666, UNF_LOG_NORMAL, UNF_INFO, + "0x%p--0x%x--0x%x--0x%x--0x%x--0x%x--0x%x--0x%x--0x%x--0x%x--(%llu)", + xchg, + (unsigned int)xchg->hot_pool_tag, + (unsigned int)xchg->xchg_type, + (unsigned int)xchg->ox_id, + (unsigned int)xchg->rx_id, + (unsigned int)xchg->sid, + (unsigned int)xchg->did, + (unsigned int)xchg->seq_id, + (unsigned int)xchg->io_state, + atomic_read(&xchg->ref_cnt), + xchg->alloc_jif); + } + + spin_unlock_irqrestore(&xchg_mgr->hot_pool->xchg_hot_pool_lock, + flags); + + /* free Xchg */ + spin_lock_irqsave(&xchg_mgr->free_pool.xchg_free_pool_lock, + flags); + + list_for_each_safe(xchg_node, next_xchg_node, + &xchg_mgr->free_pool.list_free_xchg_list) { + free++; + } + + list_for_each_safe(xchg_node, next_xchg_node, + &xchg_mgr->free_pool.list_sfs_xchg_list) { + sfs_free++; + } + spin_unlock_irqrestore(&xchg_mgr->free_pool.xchg_free_pool_lock, + flags); + + ret = RETURN_OK; + } + + buff_out->aborted = aborted; + buff_out->ini_busy = ini_busy; + buff_out->tgt_busy = tgt_busy; + buff_out->delay = delay; + buff_out->free = free; + buff_out->wait = wait; + buff_out->sfs_free = sfs_free; + buff_out->sfs_busy = sfs_busy; + UNF_REFERNCE_VAR(xchg); + return ret; +} + +static int unf_cm_adm_link_time_out_opt(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input) +{ + int ret = RETURN_OK; + int time_out = 0; + struct unf_link_tmo_opt_s *buff_in = NULL; + struct unf_link_tmo_opt_s *buff_out = NULL; + struct unf_admin_msg_head msg_head = { 0 }; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input, + return RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_lport, + return RETURN_ERROR); + + buff_in = (struct unf_link_tmo_opt_s *)(v_input->buff_in); + buff_out = (struct unf_link_tmo_opt_s *)(v_input->buff_out); + + msg_head.status = UNF_ADMIN_MSG_DONE; + msg_head.size = sizeof(struct unf_admin_msg_head); + if (buff_in->link_opt) { + /* set link tmo value */ + time_out = unf_get_link_lose_tmo(v_lport); + /* compatible for PI2 branch tool (not release)not + * include syncAllPort section + */ + if (v_input->in_size > 16) { + if (buff_in->sync_all_port) + /* sync to all other lport */ + unf_set_link_lose_tmo_to_all(buff_in->tmo_value); + else + unf_set_link_lose_tmo(v_lport, + buff_in->tmo_value); + + buff_out->sync_all_port = 1; + } else { + unf_set_link_lose_tmo_to_all(buff_in->tmo_value); + } + + buff_out->link_opt = 1; + + /* return orige value */ + buff_out->tmo_value = time_out; + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_KEVENT, + "[info]set fc port(0x%0x)link tmo value(%d -> %d) success .", + v_lport->nport_id, time_out, buff_out->tmo_value); + } else { + /* get link tmo value */ + buff_out->tmo_value = unf_get_link_lose_tmo(v_lport); + buff_out->link_opt = 0; + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR, + "get fc port(0x%0x) link tmo value(%d) success .", + v_lport->nport_id, buff_out->tmo_value); + } + *v_input->out_size = v_input->in_size; + memcpy((void *)buff_out, &msg_head, sizeof(struct unf_admin_msg_head)); + return ret; +} + +static int unf_cm_adm_log_level_opt(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input) +{ + int ret = RETURN_OK; + unsigned int log_level = 0; + unsigned int log_count = 0; + struct unf_log_level_opt_s *buff_in = NULL; + struct unf_log_level_opt_s *buff_out = NULL; + struct unf_admin_msg_head msg_head = { 0 }; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input, + return RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + v_input->in_size >= sizeof(struct unf_log_level_opt_s), + return RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + *v_input->out_size >= + sizeof(struct unf_log_level_opt_s), + return RETURN_ERROR); + + buff_in = (struct unf_log_level_opt_s *)(v_input->buff_in); + buff_out = (struct unf_log_level_opt_s *)(v_input->buff_out); + + msg_head.status = UNF_ADMIN_MSG_DONE; + msg_head.size = sizeof(struct unf_admin_msg_head); + if (buff_in->log_opt) { + /* set log level value */ + log_level = log_print_level; + log_count = log_limted_times; + log_print_level = buff_in->log_level; + log_limted_times = buff_in->log_fre_qunce; + buff_out->log_opt = 1; + /* return orige value */ + + buff_out->log_level = log_level; + buff_out->log_fre_qunce = log_count; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR, + "set fc log level(%u -> %u), frenqunce(%u -> %u)in 2s success .", + log_level, log_print_level, log_count, + log_limted_times); + } else { + /* get link tmo value */ + buff_out->log_level = log_print_level; + buff_out->log_fre_qunce = log_limted_times; + buff_out->log_opt = 0; + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR, + "get fc log level(%u),frenqunce(%u) in 2s success .", + buff_out->log_level, buff_out->log_fre_qunce); + } + *v_input->out_size = sizeof(struct unf_log_level_opt_s); + memcpy((void *)buff_out, &msg_head, sizeof(struct unf_admin_msg_head)); + return ret; +} + +int unf_cm_echo_test(unsigned int v_port_id, unsigned int v_nport_id, + unsigned int *v_link_delay) +{ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + int ret = RETURN_OK; + unsigned int index = 0; + + lport = unf_find_lport_by_port_id(v_port_id); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR, + "fcping request failed [invalid source lport (0x%x)].\n", + v_port_id); + + return UNF_RETURN_ERROR; + } + + rport = unf_get_rport_by_nport_id(lport, v_nport_id); + if ((!rport) || (v_nport_id == UNF_FC_FID_FLOGI)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR, + "fcping request failed [invalid destination rport(0x%x)].\n", + v_nport_id); + + return UNF_RETURN_ERROR; + } + + for (index = 0; index < UNF_ECHO_SEND_MAX_TIMES; index++) { + ret = (int)unf_send_echo(lport, rport, v_link_delay); + if (ret != RETURN_OK) { + *v_link_delay = 0xffffffff; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, + UNF_MAJOR, + "fcping request failed [lport(0x%x)-> rport(0x%x)].\n", + v_port_id, v_nport_id); + + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, + UNF_MAJOR, + "fcping request succeed within %u us [lport(0x%x)->rport(0x%x)].\n", + *(unsigned int *)v_link_delay, v_port_id, + v_nport_id); + } + + msleep(1000); + } + + return ret; +} + +static int unf_cm_link_delay_get(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input) +{ + int ret = UNF_RETURN_ERROR; + unsigned int link_delay = 0xffffffff; + unsigned int nport_id = 0xffffff; + unsigned int port_id = 0; + struct unf_adm_cmd *buff_in = NULL; + struct unf_adm_cmd *buff_out = NULL; + struct unf_admin_msg_head msg_head = { 0 }; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, *v_input->out_size >= + sizeof(struct unf_adm_cmd), return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input->in_size >= + sizeof(struct unf_adm_cmd), return UNF_RETURN_ERROR); + + buff_in = (struct unf_adm_cmd *)(v_input->buff_in); + buff_out = (struct unf_adm_cmd *)(v_input->buff_out); + port_id = v_lport->port_id; + nport_id = buff_in->arg[0]; + + msg_head.status = UNF_ADMIN_MSG_DONE; + + ret = unf_cm_echo_test(port_id, nport_id, &link_delay); + if ((ret == RETURN_OK) && (link_delay != 0xffffffff)) { + buff_out->arg[0] = link_delay; + msg_head.size = sizeof(struct unf_admin_msg_head) + + sizeof(unsigned int) * 1; + } else { + msg_head.status = UNF_ADMIN_MSG_FAILED; + msg_head.size = sizeof(struct unf_admin_msg_head); + } + + *v_input->out_size = sizeof(struct unf_adm_cmd); + memcpy((void *)buff_out, &msg_head, sizeof(struct unf_admin_msg_head)); + + return ret; +} + +static unsigned int unf_port_release_rport_index(struct unf_lport_s *v_lport, + void *v_input) +{ + unsigned int index = INVALID_VALUE32; + unsigned int *rport_index = NULL; + unsigned long flag = 0; + struct unf_rport_pool_s *rport_pool = NULL; + + UNF_CHECK_VALID(0x2370, UNF_FALSE, v_lport, return UNF_RETURN_ERROR); + + if (v_input) { + rport_index = (unsigned int *)v_input; + index = *rport_index; + if (index < v_lport->low_level_func.support_max_rport) { + rport_pool = &((struct unf_lport_s *)v_lport->root_lport)->rport_pool; + spin_lock_irqsave(&rport_pool->rport_free_pool_lock, + flag); + if (test_bit((int)index, rport_pool->pul_rpi_bitmap)) + clear_bit((int)index, + rport_pool->pul_rpi_bitmap); + else + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_ERR, + "[warn]Port(0x%x) try to release a free rport index(0x%x)", + v_lport->port_id, index); + + spin_unlock_irqrestore( + &rport_pool->rport_free_pool_lock, + flag); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_ERR, + "[warn]Port(0x%x) try to release a not exist rport index(0x%x)", + v_lport->port_id, index); + } + } + + return RETURN_OK; +} + +void *unf_lookup_lport_by_nport_id(void *v_lport, unsigned int v_nport_id) +{ + struct unf_lport_s *lport = NULL; + struct unf_vport_pool_s *vport_pool = NULL; + struct unf_lport_s *vport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x1978, UNF_TRUE, v_lport, return NULL); + + lport = (struct unf_lport_s *)v_lport; + lport = lport->root_lport; + vport_pool = lport->vport_pool; + + if (v_nport_id == lport->nport_id) + return lport; + + if (unlikely(!vport_pool)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) vport pool is NULL", + lport->port_id); + + return NULL; + } + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); + list_for_each_safe(node, next_node, &lport->list_vports_head) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + if (vport->nport_id == v_nport_id) { + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, + flag); + return vport; + } + } + + list_for_each_safe(node, next_node, &lport->list_intergrad_vports) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + if (vport->nport_id == v_nport_id) { + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, + flag); + return vport; + } + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "Port(0x%x) has no vport Nport ID(0x%x)", + lport->port_id, v_nport_id); + return NULL; +} + +static int unf_get_port_info(struct unf_lport_s *v_lport, + struct unf_lport_info *v_port_info) +{ + unsigned int act_speed = INVALID_VALUE32; + unsigned int cfg_speed = INVALID_VALUE32; + unsigned int cfg_topo = INVALID_VALUE32; + enum unf_act_topo_e act_topo = UNF_ACT_TOP_UNKNOWN; + struct unf_err_code_s fc_err_code; + unsigned int cfg_led_mode = INVALID_VALUE32; + struct unf_vport_pool_s *vport_pool = NULL; + struct unf_lport_s *vport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x2205, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x2206, UNF_TRUE, v_port_info, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x2207, UNF_TRUE, v_lport->fc_port, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID( + 0x2208, UNF_TRUE, + v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get, + return UNF_RETURN_ERROR); + UNF_REFERNCE_VAR(cfg_speed); + UNF_REFERNCE_VAR(act_topo); + + memset(&fc_err_code, 0, sizeof(fc_err_code)); + + /* get port speed */ + cfg_speed = v_lport->low_level_func.lport_cfg_items.port_speed; + + if (v_lport->link_up == UNF_PORT_LINK_UP) + (void)v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get( + v_lport->fc_port, + UNF_PORT_CFG_GET_SPEED_ACT, (void *)&act_speed); + else + act_speed = UNF_PORT_SPEED_UNKNOWN; + + (void)v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get( + v_lport->fc_port, + UNF_PORT_CFG_GET_SPEED_CFG, (void *)&cfg_speed); + + if (v_lport->link_up == UNF_PORT_LINK_UP) + (void)v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get( + v_lport->fc_port, + UNF_PORT_CFG_GET_TOPO_ACT, (void *)&act_topo); + else + act_topo = UNF_ACT_TOP_UNKNOWN; + + (void)v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get( + v_lport->fc_port, + UNF_PORT_CFG_GET_TOPO_CFG, (void *)&cfg_topo); + + (void)v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get( + v_lport->fc_port, + UNF_PORT_CFG_GET_LED_STATE, (void *)&cfg_led_mode); + + v_port_info->port_id = v_lport->port_id; + v_port_info->options = v_lport->options; + v_port_info->b_start_work = global_lport_mgr.b_start_work; + v_port_info->phy_link = UNF_PORT_LINK_UP; + v_port_info->link_up = v_lport->link_up; + v_port_info->act_speed = act_speed; + v_port_info->cfg_speed = cfg_speed; + v_port_info->port_name = v_lport->port_name; + v_port_info->tape_support = + v_lport->low_level_func.lport_cfg_items.tape_support; + v_port_info->msi = 0; + v_port_info->ini_io_retry_timeout = 0; + v_port_info->support_max_npiv_num = + v_lport->low_level_func.support_max_npiv_num; + v_port_info->act_topo = act_topo; + v_port_info->port_topology = + v_lport->low_level_func.lport_cfg_items.port_topology; + v_port_info->fc_ser_max_speed = + v_lport->low_level_func.fc_ser_max_speed; + + if (unf_get_error_code_sum(v_lport, &fc_err_code) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) get error code failed", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + v_port_info->loss_of_signal_count = fc_err_code.loss_of_signal_count; + v_port_info->bad_rx_char_count = fc_err_code.bad_rx_char_count; + v_port_info->loss_of_sync_count = fc_err_code.loss_of_sync_count; + v_port_info->link_fail_count = fc_err_code.link_fail_count; + v_port_info->rx_eo_fa_count = fc_err_code.rx_eo_fa_count; + v_port_info->dis_frame_count = fc_err_code.dis_frame_count; + v_port_info->bad_crc_count = fc_err_code.bad_crc_count; + v_port_info->proto_error_count = fc_err_code.proto_error_count; + v_port_info->chip_type = v_lport->low_level_func.chip_info.chip_type; + v_port_info->cfg_led_mode = cfg_led_mode; + + v_port_info->vport_num = 0; + + vport_pool = v_lport->vport_pool; + if (unlikely(!vport_pool)) + return RETURN_OK; + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); + list_for_each_safe(node, next_node, &v_lport->list_vports_head) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + + v_port_info->vport_id[v_port_info->vport_num] = vport->port_id; + + v_port_info->vport_num = v_port_info->vport_num + 1; + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); + return RETURN_OK; +} + +static int unf_get_vport_info(struct unf_lport_s *v_lport, + unsigned int v_vport_id, + struct unf_lport_info *v_port_info) +{ + unsigned char vport_index = INVALID_VALUE8; + struct unf_lport_s *vport = NULL; + + UNF_CHECK_VALID(0x2203, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x2203, UNF_TRUE, v_port_info, return UNF_RETURN_ERROR); + + vport_index = (v_vport_id & PORTID_VPINDEX_MASK) >> PORTID_VPINDEX_SHIT; + if (unlikely(vport_index == 0)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[err]VPortId(0x%x) is not vport", v_vport_id); + + return UNF_RETURN_ERROR; + } + + vport = unf_cm_lookup_vport_by_vp_index(v_lport, vport_index); + if (unlikely(!vport)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[err]VPortId(0x%x) can not be found", + v_vport_id); + + return UNF_RETURN_ERROR; + } + + v_port_info->port_id = vport->port_id; + v_port_info->port_name = vport->port_name; + v_port_info->nport_id = vport->nport_id; + v_port_info->options = 0; + + return RETURN_OK; +} + +static int unf_get_all_port_info(void *v_arg_in, void *v_arg_out) +{ + struct unf_lport_s *lport = NULL; + struct unf_get_allinfo_argout *arg_in = NULL; + unsigned int current_len = 0; + struct unf_lport_info *cur_lport_info = NULL; + struct unf_admin_msg_head msg_head = { 0 }; + int ret = UNF_RETURN_ERROR; + unsigned int out_buf_len = 0; + char *out_buf = NULL; + struct hifc_adm_cmd_s *buff_in = NULL; + + UNF_CHECK_VALID(0x2203, UNF_TRUE, v_arg_in, return UNF_RETURN_ERROR); + UNF_REFERNCE_VAR(v_arg_out); + + arg_in = (struct unf_get_allinfo_argout *)v_arg_in; + out_buf = (char *)arg_in->out_buf; + buff_in = (struct hifc_adm_cmd_s *)arg_in->in_buf; + lport = (struct unf_lport_s *)arg_in->lport; + + UNF_CHECK_VALID(0x2203, UNF_TRUE, out_buf, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x2203, UNF_TRUE, buff_in, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x2203, UNF_TRUE, lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, arg_in->in_size >= + sizeof(struct hifc_adm_cmd_s), return UNF_RETURN_ERROR); + + cur_lport_info = vmalloc(sizeof(struct unf_lport_info)); + if (!cur_lport_info) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) malloc memory fail", lport->port_id); + ((struct unf_admin_msg_head *)out_buf)->status = + UNF_ADMIN_MSG_FAILED; + return ret; + } + + memset(cur_lport_info, 0, sizeof(struct unf_lport_info)); + out_buf_len = arg_in->in_size; + msg_head.status = UNF_ADMIN_MSG_DONE; + *arg_in->out_size = out_buf_len; + + /* Storage info */ + current_len += sizeof(struct unf_admin_msg_head); + + if (lport->b_port_removing != UNF_TRUE) { + /* Cmd[3] is Vportid */ + if (buff_in->cmd[3] != 0) { + ret = unf_get_vport_info(lport, buff_in->cmd[3], + cur_lport_info); + } else { + ret = unf_get_port_info(lport, cur_lport_info); + } + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, + UNF_INFO, + "[err]Port(0x%x) get port information error", + lport->port_id); + + msg_head.status = UNF_ADMIN_MSG_FAILED; + msg_head.size = current_len; + memcpy(out_buf, &msg_head, + sizeof(struct unf_admin_msg_head)); + vfree(cur_lport_info); + return ret; + } + + if (out_buf_len < current_len + sizeof(struct unf_lport_info)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, + UNF_ERR, + "[warn]Allocated buff size (%u < %lu) is not enough", + out_buf_len, + current_len + sizeof(struct unf_lport_info)); + + /* Compatible for vport: return Lport info + * if tools version is not support npiv + */ + memcpy(out_buf + current_len, cur_lport_info, + out_buf_len - current_len); + + current_len = out_buf_len; + + } else { + memcpy(out_buf + current_len, cur_lport_info, + sizeof(struct unf_lport_info)); + current_len += sizeof(struct unf_lport_info); + } + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[warn]Port(0x%x) is removing. Ref count 0x%x", + lport->port_id, atomic_read(&lport->lport_ref_cnt)); + + msg_head.status = UNF_ADMIN_MSG_FAILED; + } + + msg_head.size = current_len; + memcpy(out_buf, &msg_head, sizeof(struct unf_admin_msg_head)); + vfree(cur_lport_info); + return ret; +} + +static int unf_cm_get_all_port_info(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input) +{ + struct unf_get_allinfo_argout out = { 0 }; + int ret = UNF_RETURN_ERROR; + + out.out_buf = v_input->buff_out; + out.in_buf = v_input->buff_in; + out.out_size = v_input->out_size; + out.in_size = v_input->in_size; + out.lport = v_lport; + + ret = (int)unf_schedule_global_event((void *)&out, + UNF_GLOBAL_EVENT_SYN, + unf_get_all_port_info); + + return ret; +} + +static int unf_cm_port_set(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input) +{ + int ret = UNF_RETURN_ERROR; + unsigned int mode = 0; /* 1:portreset 2:sfp on/off */ + int turn_on = 0; /* 0:sfp off 1:sfp on */ + unsigned int port_id = 0; + void *out_buf = NULL; + struct unf_adm_cmd *buff_in = NULL; + struct unf_admin_msg_head msg_head = { 0 }; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input->in_size >= + sizeof(struct unf_adm_cmd), return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, *v_input->out_size >= + sizeof(struct unf_adm_cmd), return UNF_RETURN_ERROR); + out_buf = v_input->buff_out; + buff_in = v_input->buff_in; + mode = buff_in->arg[0]; + port_id = v_lport->port_id; + + msg_head.status = UNF_ADMIN_MSG_DONE; + + if (mode == 1) { + ret = unf_cm_reset_port(port_id); + + if (ret != RETURN_OK) + msg_head.status = UNF_ADMIN_MSG_FAILED; + + } else if (mode == 2) { + turn_on = (int)buff_in->arg[1]; + + if ((turn_on == 0) || (turn_on == 1)) { + ret = unf_cm_sfp_switch(port_id, turn_on); + if (ret != RETURN_OK) + msg_head.status = UNF_ADMIN_MSG_FAILED; + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]Switch sfp failed. Parameter(0x%x) error", + turn_on); + msg_head.status = UNF_ADMIN_MSG_FAILED; + } + } + + msg_head.size = sizeof(struct unf_admin_msg_head); + *v_input->out_size = sizeof(struct unf_adm_cmd); + memcpy(out_buf, &msg_head, sizeof(struct unf_admin_msg_head)); + + return ret; +} + +static int unf_cm_topo_set(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input) +{ + int ret = UNF_RETURN_ERROR; + unsigned int topo = 0; /* topology set */ + unsigned int port_id = 0; + void *out_buf = NULL; + struct unf_adm_cmd *buff_in = NULL; + struct unf_admin_msg_head msg_head = { 0 }; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input->in_size >= + sizeof(struct unf_adm_cmd), return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, *v_input->out_size >= + sizeof(struct unf_adm_cmd), return UNF_RETURN_ERROR); + out_buf = v_input->buff_out; + buff_in = v_input->buff_in; + topo = buff_in->arg[0]; + port_id = v_lport->port_id; + + msg_head.status = UNF_ADMIN_MSG_DONE; + + if ((topo == UNF_TOP_AUTO_MASK) || (topo == UNF_TOP_LOOP_MASK) || + (topo == UNF_TOP_P2P_MASK)) { + ret = unf_cm_set_port_topo(port_id, topo); + if (ret != RETURN_OK) + msg_head.status = UNF_ADMIN_MSG_FAILED; + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Set topo failed. Parameter(0x%x) error", topo); + msg_head.status = UNF_ADMIN_MSG_FAILED; + } + + msg_head.size = sizeof(struct unf_admin_msg_head); + *v_input->out_size = sizeof(struct unf_adm_cmd); + memcpy(out_buf, &msg_head, sizeof(struct unf_admin_msg_head)); + + return ret; +} + +static int unf_cm_port_speed_set(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input) +{ + int ret = UNF_RETURN_ERROR; + unsigned int port_speed = 0; + unsigned int port_id = 0; + void *out_buf = NULL; + struct unf_adm_cmd *buff_in = NULL; + struct unf_admin_msg_head msg_head = { 0 }; + struct unf_lport_s *lport = NULL; + int check_speed_flag = UNF_TRUE; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_lport, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + v_input->in_size >= sizeof(struct unf_adm_cmd), + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + *v_input->out_size >= sizeof(struct unf_adm_cmd), + return UNF_RETURN_ERROR); + lport = v_lport; + out_buf = v_input->buff_out; + buff_in = v_input->buff_in; + port_speed = buff_in->arg[0]; + port_id = v_lport->port_id; + + msg_head.status = UNF_ADMIN_MSG_DONE; + + /* get and check sfp speed */ + if (unf_get_lport_current_info(lport) != RETURN_OK) { + msg_head.status = UNF_ADMIN_MSG_FAILED; + lport->low_level_func.sfp_speed = UNF_PORT_SFP_SPEED_ERR; + } + if (UNF_CHECK_CONFIG_SPEED_BY_SFSSPEED(lport->low_level_func.sfp_speed, + port_speed)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Set port speed failed. Speed (0x%x) is greater than SfpSpeed (0x%x)", + port_speed, lport->low_level_func.sfp_speed); + msg_head.status = UNF_ADMIN_MSG_FAILED; + check_speed_flag = UNF_FALSE; + } else { + if (lport->low_level_func.fc_ser_max_speed == + UNF_PORT_SPEED_32_G) { + check_speed_flag = + (port_speed == UNF_PORT_SPEED_AUTO) || + (port_speed == UNF_PORT_SPEED_8_G) || + (port_speed == UNF_PORT_SPEED_16_G) || + (port_speed == UNF_PORT_SPEED_32_G); + } else if (lport->low_level_func.fc_ser_max_speed == + UNF_PORT_SPEED_16_G) { + check_speed_flag = + (port_speed == UNF_PORT_SPEED_AUTO) || + (port_speed == UNF_PORT_SPEED_4_G) || + (port_speed == UNF_PORT_SPEED_8_G) || + (port_speed == UNF_PORT_SPEED_16_G); + } else if (lport->low_level_func.fc_ser_max_speed == + UNF_PORT_SPEED_8_G) { + check_speed_flag = + (port_speed == UNF_PORT_SPEED_AUTO) || + (port_speed == UNF_PORT_SPEED_2_G) || + (port_speed == UNF_PORT_SPEED_4_G) || + (port_speed == UNF_PORT_SPEED_8_G); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]Board maxspeed is unknown"); + msg_head.status = UNF_ADMIN_MSG_FAILED; + check_speed_flag = UNF_FALSE; + } + } + + if (check_speed_flag) { + ret = unf_cm_set_port_speed(port_id, &port_speed); + if (ret != RETURN_OK) + msg_head.status = UNF_ADMIN_MSG_FAILED; + } + + msg_head.size = sizeof(struct unf_admin_msg_head); + *v_input->out_size = sizeof(struct unf_adm_cmd); + memcpy(out_buf, &msg_head, sizeof(struct unf_admin_msg_head)); + + return ret; +} + +static int unf_cm_set_vport(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct unf_lport_s *lport = NULL; + unsigned int mode = 0; + unsigned int index = 0; + unsigned int high32 = 0x2000286e; + unsigned int low32 = 0; + unsigned long long port_name = 0; + unsigned int port_id = 0; + + void *out_buf = NULL; + struct unf_adm_cmd *buff_in = NULL; + struct unf_admin_msg_head msg_head = { 0 }; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + v_input->in_size >= sizeof(struct unf_adm_cmd), + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + *v_input->out_size >= sizeof(struct unf_adm_cmd), + return UNF_RETURN_ERROR); + out_buf = v_input->buff_out; + buff_in = v_input->buff_in; + port_id = v_lport->port_id; + + msg_head.status = UNF_ADMIN_MSG_DONE; + + mode = buff_in->arg[0]; + + switch (mode) { + case 1: + /* create vport with wwpn */ + low32 = buff_in->arg[1]; + port_name = ((unsigned long)high32 << 32) | low32; + + //lint -fallthrough + case 3: + /* create vport and autogeneration wwpn */ + ret = unf_npiv_conf(port_id, port_name); + if (ret != RETURN_OK) + msg_head.status = UNF_ADMIN_MSG_FAILED; + msleep(2000); + break; + + case 2: + /* delete vport by vport index */ + index = buff_in->arg[2]; + ret = unf_delete_vport(port_id, index); + if (ret != RETURN_OK) + msg_head.status = UNF_ADMIN_MSG_FAILED; + break; + + case 4: + /* delete all vport on Lport */ + lport = unf_find_lport_by_port_id(port_id); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, + UNF_ERR, + "[err]Port(0x%x) can't find", port_id); + msg_head.status = UNF_ADMIN_MSG_FAILED; + } else { + unf_destroy_all_vports(lport); + ret = RETURN_OK; + } + break; + + default: + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[err]Mode is unknown"); + msg_head.status = UNF_ADMIN_MSG_FAILED; + break; + } + + msg_head.size = sizeof(struct unf_admin_msg_head); + *v_input->out_size = sizeof(struct unf_adm_cmd); + memcpy(out_buf, &msg_head, sizeof(struct unf_admin_msg_head)); + + return (int)ret; +} + +static int unf_cm_port_info_get(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input) +{ + int ret = UNF_RETURN_ERROR; + unsigned int topo_cfg = 0; + enum unf_act_topo_e topo = UNF_ACT_TOP_UNKNOWN; + unsigned int port_speed = 0; + unsigned int port_id = 0; + struct unf_adm_cmd *buff_out = NULL; + struct unf_admin_msg_head msg_head = { 0 }; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + *v_input->out_size >= sizeof(struct unf_adm_cmd), + return UNF_RETURN_ERROR); + lport = v_lport; + port_id = v_lport->port_id; + buff_out = (struct unf_adm_cmd *)v_input->buff_out; + msg_head.status = UNF_ADMIN_MSG_DONE; + + ret = unf_cm_get_port_topo(port_id, &topo_cfg, &topo); + if (ret == RETURN_OK) { + ret = unf_cm_get_port_speed(port_id, &port_speed); + if (ret == RETURN_OK) { + buff_out->arg[0] = lport->port_id; + buff_out->arg[1] = topo_cfg; + buff_out->arg[2] = topo; + buff_out->arg[3] = port_speed; + buff_out->arg[4] = lport->link_up; + + msg_head.size = sizeof(struct unf_admin_msg_head) + + sizeof(unsigned int) * 5; + } else { + msg_head.status = UNF_ADMIN_MSG_FAILED; + msg_head.size = sizeof(struct unf_admin_msg_head); + } + } else { + msg_head.status = UNF_ADMIN_MSG_FAILED; + msg_head.size = sizeof(struct unf_admin_msg_head); + } + + *v_input->out_size = sizeof(struct unf_adm_cmd); + memcpy(buff_out, &msg_head, sizeof(struct unf_admin_msg_head)); + + return ret; +} + +static int unf_get_port_sfp_info(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input) +{ +#define MIN_SFPINFO_LEN 512 + union unf_sfp_eeprome_info *sfp_info = NULL; + int ret = UNF_RETURN_ERROR; + unsigned int status = 0; + unsigned int sfp_type = 0; + unsigned int port_id = 0; + char *buff_out = NULL; + struct unf_admin_msg_head msg_head = { 0 }; + + UNF_CHECK_VALID(0x2203, UNF_TRUE, v_input, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + *v_input->out_size >= MIN_SFPINFO_LEN, + return UNF_RETURN_ERROR); + buff_out = v_input->buff_out; + port_id = v_lport->port_id; + + *v_input->out_size = MIN_SFPINFO_LEN; + msg_head.status = UNF_ADMIN_MSG_DONE; + + sfp_info = vmalloc(sizeof(union unf_sfp_eeprome_info)); + if (!sfp_info) + return UNF_RETURN_ERROR; + + memset(sfp_info, 0, sizeof(union unf_sfp_eeprome_info)); + + ret = unf_cm_get_sfp_info(port_id, &status, sfp_info, &sfp_type); + if (ret == UNF_RETURN_ERROR || (status != DRV_CABLE_CONNECTOR_OPTICAL)) + msg_head.status = UNF_ADMIN_MSG_FAILED; + + msg_head.size = sizeof(struct unf_admin_msg_head); + memcpy(buff_out, &msg_head, sizeof(struct unf_admin_msg_head)); + memcpy((buff_out + msg_head.size), + &sfp_info->sfp_info, sizeof(struct unf_sfp_info_s)); + + vfree(sfp_info); + + return ret; +} + +static int unf_cm_clear_error_code_sum(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input) +{ + int ret = RETURN_OK; + void *out_buf = NULL; + unsigned int port_id = 0; + struct unf_admin_msg_head msg_head = { 0 }; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + *v_input->out_size >= sizeof(struct unf_adm_cmd), + return UNF_RETURN_ERROR); + out_buf = v_input->buff_out; + port_id = v_lport->port_id; + msg_head.status = UNF_ADMIN_MSG_DONE; + + ret = unf_cm_clear_port_error_code_sum(port_id); + if (ret != RETURN_OK) + msg_head.status = UNF_ADMIN_MSG_FAILED; + + msg_head.size = sizeof(struct unf_admin_msg_head); + *v_input->out_size = sizeof(struct unf_adm_cmd); + memcpy(out_buf, &msg_head, sizeof(struct unf_admin_msg_head)); + return ret; +} + +static int unf_cm_bbscn_set(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input) +{ + int ret = UNF_RETURN_ERROR; + unsigned int bbscn_val = 0; + unsigned int port_id = 0; + void *out_buf = NULL; + struct unf_adm_cmd *buff_in = NULL; + struct unf_admin_msg_head msg_head = { 0 }; + + UNF_CHECK_VALID(INVALID_VALUE32, + UNF_TRUE, v_input, return UNF_RETURN_ERROR); + out_buf = v_input->buff_out; + buff_in = v_input->buff_in; + port_id = v_lport->port_id; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + out_buf, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + buff_in, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + v_input->in_size >= sizeof(struct unf_adm_cmd), + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + *v_input->out_size >= sizeof(struct unf_adm_cmd), + return UNF_RETURN_ERROR); + bbscn_val = buff_in->arg[1]; + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]BBSCN value (0x%x)", bbscn_val); + msg_head.status = UNF_ADMIN_MSG_DONE; + if (bbscn_val <= UNF_MAX_BBSCN_VALUE) { + ret = unf_cm_set_port_bbscn(port_id, bbscn_val); + if (ret != RETURN_OK) + msg_head.status = UNF_ADMIN_MSG_FAILED; + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]BBSCN value is invalid(0x%x)", bbscn_val); + msg_head.status = UNF_ADMIN_MSG_FAILED; + } + + msg_head.size = sizeof(struct unf_admin_msg_head); + *v_input->out_size = sizeof(struct unf_adm_cmd); + memcpy(out_buf, &msg_head, sizeof(struct unf_admin_msg_head)); + + return ret; +} + +static void unf_fc_host_counter(struct unf_lport_s *v_lport, + struct hifc_adm_dfx_cmd_s *v_buff_out) +{ + unsigned int scsi_id = 0; + unsigned int index = 0; + struct unf_rport_scsi_id_image_s *scsi_image_table = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_buff_out, return); + + scsi_image_table = &v_lport->rport_scsi_table; + v_buff_out->unresult.host_cnt.host_num = + v_lport->host_info.p_scsi_host->host_no; + v_buff_out->unresult.host_cnt.port_id = v_lport->port_id; + v_buff_out->unresult.host_cnt.scsi_session_add_success = + atomic_read(&v_lport->scsi_session_add_success); + v_buff_out->unresult.host_cnt.scsi_session_add_failed = + atomic_read(&v_lport->scsi_session_add_failed); + v_buff_out->unresult.host_cnt.scsi_session_del_success = + atomic_read(&v_lport->scsi_session_del_success); + v_buff_out->unresult.host_cnt.scsi_session_del_failed = + atomic_read(&v_lport->scsi_session_del_failed); + v_buff_out->unresult.host_cnt.device_alloc = + atomic_read(&v_lport->device_alloc); + v_buff_out->unresult.host_cnt.device_destroy = + atomic_read(&v_lport->device_destroy); + v_buff_out->unresult.host_cnt.session_loss_tmo = + atomic_read(&v_lport->session_loss_tmo); + v_buff_out->unresult.host_cnt.alloc_scsi_id = + atomic_read(&v_lport->alloc_scsi_id); + v_buff_out->unresult.host_cnt.reuse_scsi_id = + atomic_read(&v_lport->reuse_scsi_id); + v_buff_out->unresult.host_cnt.resume_scsi_id = + atomic_read(&v_lport->resume_scsi_id); + v_buff_out->unresult.host_cnt.add_start_work_failed = + atomic_read(&v_lport->add_start_work_failed); + v_buff_out->unresult.host_cnt.add_closing_work_failed = + atomic_read(&v_lport->add_closing_work_failed); + + for (scsi_id = 0; scsi_id < UNF_MAX_SCSI_ID / 2; scsi_id++) { + index = scsi_id * 2; + v_buff_out->unresult.host_cnt.session_state[scsi_id].session1 = + (unsigned char)atomic_read(&scsi_image_table->wwn_rport_info_table[index].en_scsi_state); + + index = scsi_id * 2 + 1; + v_buff_out->unresult.host_cnt.session_state[scsi_id].session2 = + (unsigned char)atomic_read(&scsi_image_table->wwn_rport_info_table[index].en_scsi_state); + } + + for (scsi_id = 0; scsi_id < UNF_MAX_SCSI_ID; scsi_id++) { + if (!scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter) + continue; + v_buff_out->unresult.host_cnt.abort_io += + atomic_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->error_handle[UNF_SCSI_ABORT_IO_TYPE]); + v_buff_out->unresult.host_cnt.device_reset += + atomic_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->error_handle[UNF_SCSI_DEVICE_RESET_TYPE]); + v_buff_out->unresult.host_cnt.target_reset += + atomic_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->error_handle[UNF_SCSI_TARGET_RESET_TYPE]); + v_buff_out->unresult.host_cnt.bus_reset += + atomic_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->error_handle[UNF_SCSI_BUS_RESET_TYPE]); + v_buff_out->unresult.host_cnt.virtual_reset += + atomic_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->error_handle[UNF_SCSI_VIRTUAL_RESET_TYPE]); + v_buff_out->unresult.host_cnt.abort_io_result += + atomic_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->error_handle_result[UNF_SCSI_ABORT_IO_TYPE]); + v_buff_out->unresult.host_cnt.device_reset_result += + atomic_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->error_handle_result[UNF_SCSI_DEVICE_RESET_TYPE]); + v_buff_out->unresult.host_cnt.target_reset_result += + atomic_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->error_handle_result[UNF_SCSI_TARGET_RESET_TYPE]); + v_buff_out->unresult.host_cnt.bus_reset_result += + atomic_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->error_handle_result[UNF_SCSI_BUS_RESET_TYPE]); + v_buff_out->unresult.host_cnt.virtual_reset_result += + atomic_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->error_handle_result[UNF_SCSI_VIRTUAL_RESET_TYPE]); + } +} + +static void unf_fc_session_counter(struct unf_lport_s *v_lport, + unsigned int scsi_id, + struct hifc_adm_dfx_cmd_s *v_buff_out) +{ + struct unf_wwpn_rport_info_s *rport_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_buff_out, return); + + rport_info = &v_lport->rport_scsi_table.wwn_rport_info_table[scsi_id]; + v_buff_out->unresult.session_cnt.port_id = v_lport->port_id; + v_buff_out->unresult.session_cnt.host_id = + v_lport->host_info.p_scsi_host->host_no; + + if (rport_info->dfx_counter) { + v_buff_out->unresult.session_cnt.target_busy = + atomic64_read(&rport_info->dfx_counter->target_busy); + v_buff_out->unresult.session_cnt.host_busy = + atomic64_read(&rport_info->dfx_counter->host_busy); + v_buff_out->unresult.session_cnt.abort_io = + atomic_read(&rport_info->dfx_counter->error_handle[UNF_SCSI_ABORT_IO_TYPE]); + v_buff_out->unresult.session_cnt.device_reset = + atomic_read(&rport_info->dfx_counter->error_handle[UNF_SCSI_DEVICE_RESET_TYPE]); + v_buff_out->unresult.session_cnt.target_reset = + atomic_read(&rport_info->dfx_counter->error_handle[UNF_SCSI_TARGET_RESET_TYPE]); + v_buff_out->unresult.session_cnt.bus_reset = + atomic_read(&rport_info->dfx_counter->error_handle[UNF_SCSI_BUS_RESET_TYPE]); + v_buff_out->unresult.session_cnt.virtual_reset = + atomic_read(&rport_info->dfx_counter->error_handle[UNF_SCSI_VIRTUAL_RESET_TYPE]); + + v_buff_out->unresult.session_cnt.abort_io_result = + atomic_read(&rport_info->dfx_counter->error_handle_result[UNF_SCSI_ABORT_IO_TYPE]); + v_buff_out->unresult.session_cnt.device_reset_result = + atomic_read(&rport_info->dfx_counter->error_handle_result[UNF_SCSI_DEVICE_RESET_TYPE]); + v_buff_out->unresult.session_cnt.target_reset_result = + atomic_read(&rport_info->dfx_counter->error_handle_result[UNF_SCSI_TARGET_RESET_TYPE]); + v_buff_out->unresult.session_cnt.bus_reset_result = + atomic_read(&rport_info->dfx_counter->error_handle_result[UNF_SCSI_BUS_RESET_TYPE]); + v_buff_out->unresult.session_cnt.virtual_reset_result = + atomic_read(&rport_info->dfx_counter->error_handle_result[UNF_SCSI_VIRTUAL_RESET_TYPE]); + + v_buff_out->unresult.session_cnt.device_alloc = + atomic_read(&rport_info->dfx_counter->device_alloc); + v_buff_out->unresult.session_cnt.device_destroy = + atomic_read(&rport_info->dfx_counter->device_destroy); + } + + v_buff_out->unresult.session_cnt.target_id = rport_info->target_id; + + if ((rport_info->wwpn != INVALID_WWPN) && (rport_info->rport)) { + v_buff_out->unresult.session_cnt.remote_port_wwpn = + rport_info->wwpn; + v_buff_out->unresult.session_cnt.remote_port_nportid = + rport_info->rport->nport_id; + v_buff_out->unresult.session_cnt.scsi_state = + atomic_read(&rport_info->en_scsi_state); + v_buff_out->unresult.session_cnt.remote_port_state = + rport_info->rport->rp_state; + v_buff_out->unresult.session_cnt.remote_port_scsiid = + rport_info->rport->scsi_id; + v_buff_out->unresult.session_cnt.remote_port_index = + rport_info->rport->rport_index; + + if (rport_info->rport->lport) { + v_buff_out->unresult.session_cnt.local_port_wwpn = + rport_info->rport->lport->port_name; + v_buff_out->unresult.session_cnt.local_port_nportid = + rport_info->rport->local_nport_id; + v_buff_out->unresult.session_cnt.local_port_ini_state = + rport_info->rport->lport_ini_state; + v_buff_out->unresult.session_cnt.local_port_state = + rport_info->rport->lport->en_states; + } + } +} + +static int unf_fc_session_scsi_cmd_in( + struct unf_hinicam_pkg *v_input, + struct unf_rport_scsi_id_image_s *scsi_image_table) +{ + unsigned int scsi_id = 0; + unsigned int scsi_cmd_type = 0; + int ret = RETURN_OK; + + struct hifc_adm_dfx_cmd_s *buff_out = NULL; + struct unf_adm_cmd *buff_in = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, scsi_image_table, + return UNF_RETURN_ERROR); + + buff_in = (struct unf_adm_cmd *)v_input->buff_in; + buff_out = (struct hifc_adm_dfx_cmd_s *)v_input->buff_out; + + scsi_id = buff_in->arg[2]; + scsi_cmd_type = buff_in->arg[3]; + + if (scsi_id >= UNF_MAX_SCSI_ID || scsi_cmd_type >= UNF_MAX_SCSI_CMD) + return UNF_RETURN_ERROR; + + if (scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter) + buff_out->unresult.scsi_cmd_in = + atomic64_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->scsi_cmd_cnt[scsi_cmd_type]); + + return ret; +} + +static int unf_fc_host_scsi_cmd_in_total( + struct unf_hinicam_pkg *v_input, + struct unf_rport_scsi_id_image_s *scsi_image_table) +{ + unsigned int scsi_id = 0; + unsigned int scsi_cmd_type = 0; + + struct hifc_adm_dfx_cmd_s *buff_out = NULL; + struct unf_adm_cmd *buff_in = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, scsi_image_table, + return UNF_RETURN_ERROR); + + buff_in = (struct unf_adm_cmd *)v_input->buff_in; + buff_out = (struct hifc_adm_dfx_cmd_s *)v_input->buff_out; + + scsi_cmd_type = buff_in->arg[3]; + + if (scsi_cmd_type >= UNF_MAX_SCSI_CMD) + return UNF_RETURN_ERROR; + + for (scsi_id = 0; scsi_id < UNF_MAX_SCSI_ID; scsi_id++) { + if (!scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter) + continue; + buff_out->unresult.scsi_cmd_in += + atomic64_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->scsi_cmd_cnt[scsi_cmd_type]); + } + + return RETURN_OK; +} + +static int unf_fc_host_scsi_cmd_done_total( + struct unf_hinicam_pkg *v_input, + struct unf_rport_scsi_id_image_s *scsi_image_table) +{ + unsigned int scsi_id = 0; + unsigned int io_return_value = 0; + int ret = RETURN_OK; + + struct hifc_adm_dfx_cmd_s *buff_out = NULL; + struct unf_adm_cmd *buff_in = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, scsi_image_table, + return UNF_RETURN_ERROR); + + buff_in = (struct unf_adm_cmd *)v_input->buff_in; + buff_out = (struct hifc_adm_dfx_cmd_s *)v_input->buff_out; + + io_return_value = buff_in->arg[3]; + + if (io_return_value >= UNF_MAX_IO_RETURN_VALUE) + return UNF_RETURN_ERROR; + + for (scsi_id = 0; scsi_id < UNF_MAX_SCSI_ID; scsi_id++) { + if (!scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter) + continue; + buff_out->unresult.scsi_cmd_done += + atomic64_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->io_done_cnt[io_return_value]); + } + + return ret; +} + +static int unf_fc_session_scsi_cmd_done( + struct unf_hinicam_pkg *v_input, + struct unf_rport_scsi_id_image_s *scsi_image_table) +{ + unsigned int scsi_id = 0; + unsigned int io_return_value = 0; + int ret = RETURN_OK; + + struct hifc_adm_dfx_cmd_s *buff_out = NULL; + struct unf_adm_cmd *buff_in = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, scsi_image_table, + return UNF_RETURN_ERROR); + + buff_in = (struct unf_adm_cmd *)v_input->buff_in; + buff_out = (struct hifc_adm_dfx_cmd_s *)v_input->buff_out; + + scsi_id = buff_in->arg[2]; + io_return_value = buff_in->arg[3]; + + if (scsi_id >= UNF_MAX_SCSI_ID || + io_return_value >= UNF_MAX_IO_RETURN_VALUE) + return UNF_RETURN_ERROR; + + if (scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter) + buff_out->unresult.scsi_cmd_done = + atomic64_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->io_done_cnt[io_return_value]); + + return ret; +} + +static int unf_get_io_dfx_statistics(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input) +{ + int ret = RETURN_OK; + unsigned int counter_mode = 0; + struct hifc_adm_dfx_cmd_s *buff_out = NULL; + struct unf_adm_cmd *buff_in = NULL; + struct unf_admin_msg_head msg_head = { 0 }; + struct unf_rport_scsi_id_image_s *scsi_image_table = NULL; + unsigned int scsi_id = 0; + struct unf_lport_s *vport = NULL; + unsigned int buff_flag = 0; + + buff_flag = (!v_input) || (!v_input->buff_out) || + (!v_input->buff_in) || (!v_lport); + if (buff_flag) + return UNF_RETURN_ERROR; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + v_input->in_size >= sizeof(struct unf_adm_cmd), + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + *v_input->out_size >= sizeof(struct hifc_adm_dfx_cmd_s), + return UNF_RETURN_ERROR); + buff_in = (struct unf_adm_cmd *)v_input->buff_in; + buff_out = (struct hifc_adm_dfx_cmd_s *)v_input->buff_out; + msg_head.status = UNF_ADMIN_MSG_DONE; + + vport = unf_cm_lookup_vport_by_vp_index( + v_lport, (unsigned short)(buff_in->arg[4])); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, vport, + return UNF_RETURN_ERROR); + + scsi_image_table = &vport->rport_scsi_table; + FC_DRIVE_ACTION_CHECK((!scsi_image_table->wwn_rport_info_table), + (msg_head.status = UNF_ADMIN_MSG_FAILED), + (ret = UNF_RETURN_ERROR), + goto err); + + counter_mode = buff_in->arg[1]; + switch (counter_mode) { + case FC_HOST_COUNTER: + unf_fc_host_counter(vport, buff_out); + break; + case FC_SESSION_SCSI_CMD_IN: + ret = unf_fc_session_scsi_cmd_in(v_input, scsi_image_table); + break; + case FC_HOST_SCSI_CMD_IN_TOTAL: + ret = unf_fc_host_scsi_cmd_in_total(v_input, scsi_image_table); + break; + case FC_HOST_SCSI_CMD_DONE_TOTAL: + ret = unf_fc_host_scsi_cmd_done_total(v_input, + scsi_image_table); + break; + case FC_SESSION_SCSI_CMD_DONE: + ret = unf_fc_session_scsi_cmd_done(v_input, scsi_image_table); + break; + case FC_SESSION_COUNTER: + scsi_id = buff_in->arg[2]; + FC_DRIVE_ACTION_CHECK((scsi_id >= UNF_MAX_SCSI_ID), + (msg_head.status = UNF_ADMIN_MSG_FAILED), + (ret = UNF_RETURN_ERROR), + goto err); + unf_fc_session_counter(vport, scsi_id, buff_out); + break; + default: + msg_head.status = UNF_ADMIN_MSG_FAILED; + ret = UNF_RETURN_ERROR; + break; + } + + if (ret != RETURN_OK) + return ret; + +err: + msg_head.size = sizeof(struct unf_admin_msg_head); + *v_input->out_size = sizeof(struct hifc_adm_dfx_cmd_s); + memcpy(buff_out, &msg_head, sizeof(struct unf_admin_msg_head)); + + return ret; +} + +static int unf_cm_switch_dif(unsigned int v_option, + unsigned int v_dix_ip_checksum) +{ +#define UNF_WAIT_IO_COMPLETE_TIME_MS 5000 +#define UNF_WAIT_ONE_TIME_MS 100 +#define UNF_LOOP_TIMES (UNF_WAIT_IO_COMPLETE_TIME_MS / UNF_WAIT_ONE_TIME_MS) + + int ret = UNF_RETURN_ERROR; + struct unf_lport_s *lport = NULL; + unsigned long flags = 0; + int enable_dif; + unsigned int index; + + dix_flag = v_dix_ip_checksum ? UNF_TRUE : UNF_FALSE; + + enable_dif = (v_option >= UNF_ENABLE_DIF_DIX_PROT && + v_option <= UNF_ENABLE_DIX_PROT); + if (enable_dif) { + dif_sgl_mode = UNF_TRUE; + hifc_dif_enable = UNF_TRUE; + } + + switch (v_option) { + case UNF_DIF_ACTION_NONE: + dif_sgl_mode = UNF_FALSE; + hifc_dif_enable = UNF_FALSE; + hifc_dif_type = 0; + hifc_guard = 0; + break; + + case UNF_ENABLE_DIF_DIX_PROT: + hifc_dif_type = SHOST_DIF_TYPE1_PROTECTION | + SHOST_DIX_TYPE1_PROTECTION; + break; + + case UNF_ENABLE_DIF_PROT: + hifc_dif_type = SHOST_DIF_TYPE1_PROTECTION; + dif_sgl_mode = UNF_FALSE; + break; + + case UNF_ENABLE_DIX_PROT: + hifc_dif_type = SHOST_DIX_TYPE0_PROTECTION; + break; + + default: + return UNF_ADMIN_MSG_FAILED; + } + + /* 1. Close Lport's SFP */ + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); + list_for_each_entry(lport, &global_lport_mgr.list_lport_list_head, + entry_lport) { + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, + flags); + + ret = unf_cm_sfp_switch(lport->port_id, UNF_FALSE); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]Port(0x%x) close SFP failed in DIF switch", + lport->port_id); + + return UNF_ADMIN_MSG_FAILED; + } + for (index = 0; index < UNF_LOOP_TIMES; index++) { + if (unf_busy_io_completed(lport) == UNF_TRUE) + break; + msleep(UNF_WAIT_ONE_TIME_MS); + } + + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, + flags); + } + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, + flags); + + /* 2. UnRegister the SCSI host of LPort, including its Vports */ + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); + list_for_each_entry(lport, &global_lport_mgr.list_lport_list_head, + entry_lport) { + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, + flags); + unf_unregister_scsi_host(lport); + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, + flags); + } + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags); + + /* 3. Register the SCSI host of LPort, including its Vports */ + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); + list_for_each_entry(lport, &global_lport_mgr.list_lport_list_head, + entry_lport) { + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, + flags); + if (unf_register_scsi_host(lport) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, "[warn]Port(0x%x) register scsi host failed in DIF switch", + lport->port_id); + return UNF_ADMIN_MSG_FAILED; + } + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, + flags); + } + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags); + + /* 4. Open Lport's SFP */ + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); + list_for_each_entry(lport, &global_lport_mgr.list_lport_list_head, + entry_lport) { + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, + flags); + + ret = unf_cm_sfp_switch(lport->port_id, UNF_TRUE); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]Port(0x%x) reopen SFP failed in DIF switch", + lport->port_id); + + return UNF_ADMIN_MSG_FAILED; + } + + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, + flags); + } + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags); + + return UNF_ADMIN_MSG_DONE; +} + +static int unf_cm_switch_app_ref_escape(unsigned int v_option) +{ + switch (v_option) { + case UNF_APP_REF_ESC_BOTH_NOT_CHECK: + dif_app_esc_check = HIFC_DIF_APP_REF_ESC_NOT_CHECK; + dif_ref_esc_check = HIFC_DIF_APP_REF_ESC_NOT_CHECK; + break; + + case UNF_APP_ESC_CHECK: + dif_app_esc_check = HIFC_DIF_APP_REF_ESC_CHECK; + dif_ref_esc_check = HIFC_DIF_APP_REF_ESC_NOT_CHECK; + break; + + case UNF_REF_ESC_CHECK: + dif_app_esc_check = HIFC_DIF_APP_REF_ESC_NOT_CHECK; + dif_ref_esc_check = HIFC_DIF_APP_REF_ESC_CHECK; + break; + + case UNF_APP_REF_ESC_BOTH_CHECK: + dif_app_esc_check = HIFC_DIF_APP_REF_ESC_CHECK; + dif_ref_esc_check = HIFC_DIF_APP_REF_ESC_CHECK; + break; + + default: + dif_app_esc_check = HIFC_DIF_APP_REF_ESC_CHECK; + dif_ref_esc_check = HIFC_DIF_APP_REF_ESC_CHECK; + break; + } + + return UNF_ADMIN_MSG_DONE; +} + +static int unf_cm_select_dif_mode(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input) +{ + unsigned int dif_mode = 0; + unsigned int option = 0; + unsigned int dix_ip_checksum = 0; + struct unf_adm_cmd *buff_in = NULL; + struct unf_adm_cmd *buff_out = NULL; + struct unf_admin_msg_head msg_head = { 0 }; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input->buff_out, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input->buff_in, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + v_input->in_size >= sizeof(struct unf_adm_cmd), + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + *v_input->out_size >= sizeof(struct unf_adm_cmd), + return UNF_RETURN_ERROR); + + buff_in = (struct unf_adm_cmd *)v_input->buff_in; + buff_out = (struct unf_adm_cmd *)v_input->buff_out; + msg_head.status = UNF_ADMIN_MSG_DONE; + dif_mode = buff_in->arg[0]; + option = buff_in->arg[1]; + dix_ip_checksum = buff_in->arg[2]; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]DIF mode(0x%x) sub option(0x%x 0x%x)", + dif_mode, option, dix_ip_checksum); + + switch (dif_mode) { + case UNF_SWITCH_DIF_DIX: + msg_head.status = + (unsigned short)unf_cm_switch_dif(option, + dix_ip_checksum); + break; + + case UNF_APP_REF_ESCAPE: + msg_head.status = + (unsigned short)unf_cm_switch_app_ref_escape(option); + break; + + default: + msg_head.status = UNF_ADMIN_MSG_FAILED; + goto end; + } + +end: + msg_head.size = sizeof(struct unf_admin_msg_head); + *v_input->out_size = sizeof(struct unf_adm_cmd); + memcpy(buff_out, &msg_head, sizeof(struct unf_admin_msg_head)); + + return RETURN_OK; +} + +static int unf_cm_set_dif(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input) +{ + unsigned int dif_switch = 0; + struct unf_adm_cmd *buff_in = NULL; + struct unf_adm_cmd *buff_out = NULL; + struct unf_admin_msg_head msg_head = { 0 }; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input->buff_out, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input->buff_in, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + v_input->in_size >= sizeof(struct unf_adm_cmd), + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + *v_input->out_size >= sizeof(struct unf_adm_cmd), + return UNF_RETURN_ERROR); + buff_in = (struct unf_adm_cmd *)v_input->buff_in; + buff_out = (struct unf_adm_cmd *)v_input->buff_out; + msg_head.status = UNF_ADMIN_MSG_DONE; + dif_switch = (buff_in->arg[0]) ? + UNF_ENABLE_DIF_DIX_PROT : UNF_DIF_ACTION_NONE; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]DIF switch is 0x%x", dif_switch); + + if (dif_switch == UNF_ENABLE_DIF_DIX_PROT) + msg_head.status = (unsigned short)unf_cm_switch_dif(dif_switch, + UNF_ENABLE_IP_CHECKSUM); + else + msg_head.status = (unsigned short)unf_cm_switch_dif(dif_switch, + UNF_DISABLE_IP_CHECKSUM); + + msg_head.size = sizeof(struct unf_admin_msg_head); + *v_input->out_size = sizeof(struct unf_adm_cmd); + memcpy(buff_out, &msg_head, sizeof(struct unf_admin_msg_head)); + + return RETURN_OK; +} + +static unsigned int unf_save_port_info(struct unf_lport_s *lport, + void *save_info_addr) +{ + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x2271, UNF_TRUE, save_info_addr, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x2271, UNF_TRUE, lport, return UNF_RETURN_ERROR); + + if (!lport->low_level_func.port_mgr_op.pfn_ll_port_config_set) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Port(0x%x)'s corresponding function is NULL.", + lport->port_id); + + return ret; + } + + ret = lport->low_level_func.port_mgr_op.pfn_ll_port_config_set( + lport->fc_port, + UNF_PORT_CFG_SAVE_HBA_INFO, (void *)save_info_addr); + + return ret; +} + +static unsigned int unf_save_port_base_info(struct unf_lport_s *lport, + void *v_save_info) +{ + struct unf_save_info_head_s *save_info_head = v_save_info; + struct unf_port_info_entry_s *sava_port_entry = NULL; + struct unf_low_level_port_mgr_op_s *port_mgr = NULL; + unsigned int cfg_speed = 0; + unsigned int topo_cfg = 0; + int fec = UNF_FALSE; + int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, lport, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, save_info_head, + return UNF_RETURN_ERROR); + + save_info_head->opcode = 0; + /* write information to up */ + save_info_head->type = UNF_PORT_BASE_INFO; /* port base info */ + save_info_head->entry_num = 1; + save_info_head->next = 0xffff; + + sava_port_entry = (struct unf_port_info_entry_s *) + ((void *)(save_info_head + 1)); + + port_mgr = &lport->low_level_func.port_mgr_op; + if (!port_mgr->pfn_ll_port_config_get) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Port(0x%x)'s corresponding function is NULL.", + lport->nport_id); + + return UNF_RETURN_ERROR; + } + + /* get Bbscn */ + sava_port_entry->bb_scn = unf_low_level_bbscn(lport); + + /* get speed */ + port_mgr->pfn_ll_port_config_get(lport->fc_port, + UNF_PORT_CFG_GET_SPEED_CFG, + (void *)&cfg_speed); + sava_port_entry->speed = cfg_speed; + + /* get topo */ + port_mgr->pfn_ll_port_config_get(lport->fc_port, + UNF_PORT_CFG_GET_TOPO_CFG, + (void *)&topo_cfg); + sava_port_entry->topo = topo_cfg; + + /* get fec */ + port_mgr->pfn_ll_port_config_get(lport->fc_port, + UNF_PORT_CFG_GET_FEC, + (void *)&fec); + sava_port_entry->fec = fec; + + ret = (int)unf_save_port_info(lport, v_save_info); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_WARN, + "[warn]Port(0x%x) send mailbox fail", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + return RETURN_OK; +} + +unsigned int unf_cm_save_port_info(unsigned int v_port_id) +{ + unsigned int port_id = v_port_id; + struct unf_lport_s *lport = NULL; + struct unf_save_info_head_s *save_info = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + lport = unf_find_lport_by_port_id(port_id); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[err]Port(0x%x) can not be found", port_id); + + return ret; + } + + save_info = vmalloc(SAVE_PORT_INFO_LEN); + if (!save_info) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[err]Can't alloc buffer for saving port info"); + + return ret; + } + + /* 1 clean flush */ + memset(save_info, 0, SAVE_PORT_INFO_LEN); + + save_info->opcode = 2; /* notify up to clean flush */ + save_info->type = 0xf; + save_info->entry_num = 0; + save_info->next = 0xffff; + + ret = unf_save_port_info(lport, save_info); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "[warn]Port(0x%x) send mailbox fail", lport->port_id); + + vfree(save_info); + + return ret; + } + + /* 2 save port base information */ + memset(save_info, 0, SAVE_PORT_INFO_LEN); + + ret = unf_save_port_base_info(lport, save_info); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[err]Port(0x%x) save port base information failed", + lport->port_id); + + vfree(save_info); + + return ret; + } + + vfree(save_info); + + return ret; +} + +static void unf_handle_port_base_info(struct unf_lport_s *lport, + struct unf_port_info_entry_s *v_save_info) +{ + struct unf_port_info_entry_s *sava_port_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, lport, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_save_info, return); + + sava_port_entry = v_save_info; + + UNF_CHECK_VALID(INVALID_VALUE32, + UNF_TRUE, + (sava_port_entry->topo == UNF_TOP_LOOP_MASK) || + (sava_port_entry->topo == UNF_TOP_P2P_MASK) || + (sava_port_entry->topo == UNF_TOP_AUTO_MASK), + return); + + if (!lport->low_level_func.port_mgr_op.pfn_ll_port_config_set) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "Port(0x%x)'s corresponding function is NULL.", + lport->port_id); + return; + } + + ret = lport->low_level_func.port_mgr_op.pfn_ll_port_config_set( + lport->fc_port, + UNF_PORT_CFG_SET_HBA_BASE_INFO, (void *)sava_port_entry); + + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "Cannot set port base info"); + return; + } + + /* update bbsn cfg to Lport */ + lport->low_level_func.lport_cfg_items.bb_scn = sava_port_entry->bb_scn; + + lport->low_level_func.lport_cfg_items.port_topology = + sava_port_entry->topo; +} + +static unsigned int unf_recovery_save_info(struct unf_lport_s *lport, + void *v_save_info, + unsigned char v_type) +{ + struct unf_save_info_head_s *save_info_head = v_save_info; + void *info_entry = NULL; + int ret = 0; + unsigned short next_flag = 0; + unsigned char entry_num = 0; + unsigned char index = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, lport, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, save_info_head, + return UNF_RETURN_ERROR); + + do { + memset(save_info_head, 0, SAVE_PORT_INFO_LEN); + save_info_head->opcode = 1; + /* read information from up */ + save_info_head->type = v_type; + /* vport[qos] info */ + save_info_head->entry_num = 0xff; + save_info_head->next = next_flag; + + ret = (int)unf_save_port_info(lport, save_info_head); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, + UNF_WARN, + "[warn]Port(0x%x) send mailbox fail", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + next_flag = (unsigned short)save_info_head->next; + entry_num = (unsigned char)save_info_head->entry_num; + info_entry = save_info_head + 1; + + for (index = 0; index < entry_num; index++) { + switch (v_type) { + case UNF_PORT_BASE_INFO: + unf_handle_port_base_info(lport, info_entry); + info_entry = ((struct unf_port_info_entry_s *) + info_entry) + 1; + break; + + default: + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_EQUIP_ATT, + UNF_ERR, + "[err]Port(0x%x) handle message failed", + lport->port_id); + return UNF_RETURN_ERROR; + } + } + + } while (next_flag != 0xffff); + + return RETURN_OK; +} + +unsigned int unf_cm_get_save_info(struct unf_lport_s *v_lport) +{ + struct unf_lport_s *lport = NULL; + void *save_info = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_lport, + return UNF_RETURN_ERROR); + + lport = v_lport; + save_info = vmalloc(SAVE_PORT_INFO_LEN); + if (!save_info) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[err]Can't alloc buffer for saving port info"); + + return ret; + } + + /* 1 get port base information */ + ret = unf_recovery_save_info(lport, save_info, UNF_PORT_BASE_INFO); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "[warn]Port(0x%x) send mailbox fail", lport->port_id); + + vfree(save_info); + + return ret; + } + + vfree(save_info); + + return ret; +} + +int unf_get_link_lose_tmo(struct unf_lport_s *v_lport) +{ + unsigned int tmo_value = 0; + + if (!v_lport) + return UNF_LOSE_TMO; + + tmo_value = atomic_read(&v_lport->link_lose_tmo); + + if (!tmo_value) + tmo_value = UNF_LOSE_TMO; + + return (int)tmo_value; +} + +int unf_get_link_lose_tmo_from_up(struct unf_lport_s *v_lport, + struct unf_flash_link_tmo_s *v_link_tmo) +{ + int ret = UNF_RETURN_ERROR; + struct unf_flash_data_s flash_data; + + if (!v_lport || !v_link_tmo || (sizeof(struct unf_flash_data_s) + > HIFC_FLASH_DATA_MAX_LEN)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_KEVENT, + "[warn]get flas link tmo param check fail"); + return ret; + } + memset(&flash_data, 0, sizeof(struct unf_flash_data_s)); + + if (!v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_KEVENT, + "[warn]link tmo fun null"); + return ret; + } + if (v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get( + v_lport->fc_port, + UNF_PORT_CFG_GET_FLASH_DATA_INFO, &flash_data) != + RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_KEVENT, + "[warn]get link tmo from up fail"); + return ret; + } + ret = RETURN_OK; + memcpy(v_link_tmo, &flash_data.link_tmo, HIFC_FLASH_LINK_TMO_MAX_LEN); + + return ret; +} + +void unf_init_link_lose_tmo(struct unf_lport_s *v_lport) +{ + struct unf_flash_link_tmo_s flash_link_tmo; + unsigned int tmo; + + memset(&flash_link_tmo, 0, sizeof(struct unf_flash_link_tmo_s)); + + if (!v_lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "[warn]int link tmo param check fail"); + return; + } + if ((unf_get_link_lose_tmo_from_up(v_lport, &flash_link_tmo) == + RETURN_OK) && + (flash_link_tmo.writeflag == HIFC_MGMT_TMO_MAGIC_NUM)) { + tmo = (((unsigned int)flash_link_tmo.link_tmo3 << 24) | + ((unsigned int)flash_link_tmo.link_tmo2 << 16) | + ((unsigned int)flash_link_tmo.link_tmo1 << 8) | + flash_link_tmo.link_tmo0); + if (tmo > 600) + unf_set_link_lose_tmo(v_lport, UNF_LOSE_TMO); + else + atomic_set(&v_lport->link_lose_tmo, (int)tmo); + } else { + unf_set_link_lose_tmo(v_lport, UNF_LOSE_TMO); + } +} + +unsigned int unf_register_scsi_host(struct unf_lport_s *v_lport) +{ + struct unf_host_param_s host_param = { 0 }; + unf_scsi_host_s **p_scsi_host = NULL; + struct unf_lport_cfg_item_s *lport_cfg_items = NULL; + + UNF_CHECK_VALID(0x1359, TRUE, v_lport, return UNF_RETURN_ERROR); + + /* Point to -->> L_port->Scsi_host */ + p_scsi_host = &v_lport->host_info.p_scsi_host; + + lport_cfg_items = &v_lport->low_level_func.lport_cfg_items; + host_param.can_queue = (int)lport_cfg_items->max_queue_depth; + + /* Performance optimization */ + host_param.cmnd_per_lun = UNF_MAX_CMND_PER_LUN; + + host_param.sg_table_size = UNF_MAX_DMA_SEGS; + host_param.max_id = UNF_MAX_TARGET_NUMBER; + host_param.max_lun = UNF_DEFAULT_MAX_LUN; + host_param.max_channel = UNF_MAX_BUS_CHANNEL; + host_param.max_cmnd_len = UNF_MAX_SCSI_CMND_LEN; /* CDB-16 */ + host_param.dma_boundary = UNF_DMA_BOUNDARY; + host_param.max_sectors = UNF_MAX_SECTORS; + host_param.port_id = v_lport->port_id; + host_param.lport = v_lport; + host_param.pdev = &v_lport->low_level_func.dev->dev; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Port(0x%x) allocate scsi host: can queue(%u), command performance LUN(%u), max lun(%u)", + v_lport->port_id, host_param.can_queue, + host_param.cmnd_per_lun, host_param.max_lun); + + if (unf_alloc_scsi_host(p_scsi_host, &host_param) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) allocate scsi host failed", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "[event]Port(0x%x) allocate scsi host(0x%x) succeed", + v_lport->port_id, UNF_GET_SCSI_HOST_ID(*p_scsi_host)); + + return RETURN_OK; +} + +void unf_unregister_scsi_host(struct unf_lport_s *v_lport) +{ + unf_scsi_host_s *p_scsi_host = NULL; + unsigned int host_no = 0; + + UNF_REFERNCE_VAR(p_scsi_host); + UNF_CHECK_VALID(0x1360, TRUE, v_lport, return); + + p_scsi_host = v_lport->host_info.p_scsi_host; + + if (p_scsi_host) { + host_no = UNF_GET_SCSI_HOST_ID(p_scsi_host); + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[event]Port(0x%x) starting unregister scsi host(0x%x)", + v_lport->port_id, host_no); + + unf_free_scsi_host(p_scsi_host); + /* can`t set p_scsi_host for NULL, + * since it does`t alloc by itself + */ + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "[warn]Port(0x%x) unregister scsi host, invalid ScsiHost ", + v_lport->port_id); + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[event]Port(0x%x) unregister scsi host(0x%x) succeed", + v_lport->port_id, host_no); + + v_lport->destroy_step = UNF_LPORT_DESTROY_STEP_12_UNREG_SCSI_HOST; + + UNF_REFERNCE_VAR(p_scsi_host); + UNF_REFERNCE_VAR(host_no); +} + +unsigned int unf_cm_clear_flush(unsigned int v_port_id) +{ + unsigned int port_id = v_port_id; + struct unf_lport_s *lport = NULL; + struct unf_save_info_head_s *save_info = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + lport = unf_find_lport_by_port_id(port_id); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[err]Port(0x%x) can not be found", port_id); + + return ret; + } + + save_info = vmalloc(SAVE_PORT_INFO_LEN); + if (!save_info) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[err]Can't alloc buffer for saving port info"); + + return ret; + } + + /* 1 clean flush */ + memset(save_info, 0, SAVE_PORT_INFO_LEN); + + save_info->opcode = 2; /* notify up to clean flush */ + save_info->type = 0xf; + save_info->entry_num = 0; + save_info->next = 0xffff; + + ret = unf_save_port_info(lport, save_info); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "[warn]Port(0x%x) send mailbox fail", lport->port_id); + + vfree(save_info); + + return ret; + } + + vfree(save_info); + + return ret; +} + +static int unf_cm_save_data_mode(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input) +{ + int ret = UNF_RETURN_ERROR; + unsigned int save_data_mode = 0; + unsigned int port_id = 0; + void *out_buf = NULL; + struct unf_adm_cmd *buff_in = NULL; + struct unf_admin_msg_head msg_head = { 0 }; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input, + return UNF_RETURN_ERROR); + out_buf = v_input->buff_out; + buff_in = v_input->buff_in; + port_id = v_lport->port_id; + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, out_buf, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, buff_in, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + v_input->in_size >= sizeof(struct unf_adm_cmd), + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + *v_input->out_size >= sizeof(struct unf_adm_cmd), + return UNF_RETURN_ERROR); + + save_data_mode = buff_in->arg[0]; + + msg_head.status = UNF_ADMIN_MSG_DONE; + + if (save_data_mode == UNF_SAVA_INFO_MODE) { + ret = (int)unf_cm_save_port_info(port_id); + if (ret != RETURN_OK) + msg_head.status = UNF_ADMIN_MSG_FAILED; + } else if (save_data_mode == UNF_CLEAN_INFO_MODE) { + ret = (int)unf_cm_clear_flush(port_id); + if (ret != RETURN_OK) + msg_head.status = UNF_ADMIN_MSG_FAILED; + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_MAJOR, + "[err]This mode(0x%x) is unknown", save_data_mode); + msg_head.status = UNF_ADMIN_MSG_FAILED; + } + + msg_head.size = sizeof(struct unf_admin_msg_head); + *v_input->out_size = sizeof(struct unf_adm_cmd); + memcpy(out_buf, &msg_head, sizeof(struct unf_admin_msg_head)); + + return ret; +} + +int unf_cmd_adm_handler(void *v_lport, struct unf_hinicam_pkg *v_input) +{ + struct unf_lport_s *lport = NULL; + int ret = UNF_RETURN_ERROR; + enum unf_msg_format_e msg_formate; + unsigned int index = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_lport, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input, + return UNF_RETURN_ERROR); + lport = (struct unf_lport_s *)v_lport; + msg_formate = v_input->msg_format; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Enter HIFC_Adm, msg_formate=0x%x, 0x%x", + msg_formate, *v_input->out_size); + + /* hifcadm event */ + while (index < (sizeof(unf_hifcadm_action) / + sizeof(struct unf_hifcadm_action_s))) { + if ((msg_formate == unf_hifcadm_action[index].hifc_action) && + unf_hifcadm_action[index].fn_unf_hifc_action) { + ret = unf_hifcadm_action[index].fn_unf_hifc_action(lport, v_input); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EVENT, + UNF_WARN, + "[warn]Port(0x%x) process up msg(0x%x) failed", + lport->port_id, msg_formate); + } + return ret; + } + index++; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_EVENT, UNF_KEVENT, + "[event]Port(0x%x) not support adm cmd, msg type(0x%x) ", + lport->port_id, msg_formate); + + return ret; +} diff --git a/drivers/scsi/huawei/hifc/unf_portman.h b/drivers/scsi/huawei/hifc/unf_portman.h new file mode 100644 index 0000000000000000000000000000000000000000..6c6083889167092c92cabbf6310b7b85ab7fa41d --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_portman.h @@ -0,0 +1,305 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#ifndef __UNF_PORT_MAN_H__ +#define __UNF_PORT_MAN_H__ + +#define UNF_LPORT_POLL_TIMER ((unsigned int)(1 * 1000)) + +#define UNF_MAX_BBSCN_VALUE 14 +#define UNF_SAVA_INFO_MODE 0 +#define UNF_CLEAN_INFO_MODE 1 + +#define FC_DRIVE_ACTION_CHECK(condition, fail_do0, fail_do1, return) \ + do { \ + if (condition) { \ + fail_do0; \ + fail_do1; \ + return; \ + } \ + } while (0) + +/* Used in hifcadm tool */ +#define UNF_ENABLE_DIF_DIX_PROT 1 +#define UNF_ENABLE_DIF_PROT 2 +#define UNF_ENABLE_DIX_PROT 3 + +#define UNF_DISABLE_IP_CHECKSUM 0 +#define UNF_ENABLE_IP_CHECKSUM 1 + +#define UNF_APP_REF_ESC_BOTH_NOT_CHECK 0 +#define UNF_APP_ESC_CHECK 1 +#define UNF_REF_ESC_CHECK 2 +#define UNF_APP_REF_ESC_BOTH_CHECK 3 + +struct unf_global_card_thread_s { + struct list_head list_card_list_head; + spinlock_t global_card_list_lock; + unsigned int card_sum; +}; + +/* Global L_Port MG,manage all L_Port */ +struct unf_global_lport_s { + struct list_head list_lport_list_head; + + /* Temporary list,used in hold list traverse */ + struct list_head list_intergrad_head; + + /* destroy list,used in card remove */ + struct list_head list_destroy_head; + + /* Dirty list,abnormal port */ + struct list_head list_dirty_head; + spinlock_t global_lport_list_lock; + unsigned int lport_sum; + unsigned char dft_mode; + int b_start_work; +}; + +struct unf_reset_port_argin { + unsigned int port_id; +}; + +struct unf_get_topo_argout { + unsigned int *topo_cfg; + enum unf_act_topo_e *en_act_topo; +}; + +struct unf_set_topo_argin { + unsigned int port_id; + unsigned int topo; +}; + +struct unf_set_bbscn_argin { + unsigned int port_id; + unsigned int bb_scn; +}; + +struct unf_set_sfp_argin { + unsigned int port_id; + int turn_on; +}; + +struct unf_set_speed_argin { + unsigned int port_id; + unsigned int *speed; +}; + +struct unf_get_sfp_argout { + unsigned int *status; + union unf_sfp_eeprome_info *sfp_info; +}; + +struct unf_get_allinfo_argout { + unsigned int *out_size; + unsigned int in_size; + void *out_buf; + void *in_buf; + void *lport; +}; + +struct unf_port_action_s { + unsigned int action; + unsigned int (*fn_unf_action)(struct unf_lport_s *v_lport, + void *v_input); +}; + +struct unf_hifcadm_action_s { + unsigned int hifc_action; + int (*fn_unf_hifc_action)(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input); +}; + +struct unf_lport_info { +#define NPIVMAX 255 + unsigned int port_id; + unsigned int options; + int b_start_work; + unsigned int phy_link; + unsigned int link_up; + unsigned int act_speed; + unsigned int cfg_speed; + unsigned int tape_support; + unsigned long long port_name; + unsigned int msi; + unsigned int ini_io_retry_timeout; + unsigned int support_max_npiv_num; + unsigned int act_topo; + unsigned int port_topology; + unsigned int fc_ser_max_speed; + unsigned int loss_of_signal_count; + unsigned int bad_rx_char_count; + unsigned int loss_of_sync_count; + unsigned int link_fail_count; + unsigned int rx_eo_fa_count; + unsigned int dis_frame_count; + unsigned int bad_crc_count; + unsigned int proto_error_count; + unsigned int cfg_led_mode; + unsigned char chip_type; + unsigned char vport_num; + unsigned short rsvd1; + unsigned int vport_id[NPIVMAX]; + unsigned int nport_id; +}; + +struct unf_admin_msg_head { + unsigned int size; + unsigned short status; + unsigned char success_num; + unsigned char rsvd; +}; + +#define UNF_PORT_INFO_SIZE 10 + +struct unf_adm_cmd { + struct unf_admin_msg_head msg_head; + unsigned int arg[UNF_PORT_INFO_SIZE]; +}; + +struct unf_adm_xchg { + unsigned int aborted; + unsigned int ini_busy; + unsigned int tgt_busy; + unsigned int delay; + unsigned int free; + unsigned int wait; + unsigned int sfs_free; + unsigned int sfs_busy; +}; + +enum unf_admin_msg_status_e { + UNF_ADMIN_MSG_DONE = 0, + UNF_ADMIN_MSG_INCOMPLETE, + UNF_ADMIN_MSG_FAILED, + UNF_ADMIN_MSG_BUTT +}; + +/* the structure define with fc unf driver */ +enum fc_dfx_io_count_type_e { + FC_HOST_COUNTER = 0, + FC_HOST_SCSI_CMD_IN_TOTAL, + FC_HOST_SCSI_CMD_DONE_TOTAL, + FC_SESSION_COUNTER, + FC_SESSION_SCSI_CMD_IN, + FC_SESSION_SCSI_CMD_DONE, + FC_SRB_COUNT, +}; + +enum unf_msg_format_e { + UNF_PORT_SET_OP = 1, + UNF_TOPO_SET_OP, + UNF_SPEED_SET_OP, + UNF_INFO_GET_OP, + UNF_INFO_CLEAR_OP, + UNF_SFP_INFO_OP, + UNF_DFX, + UNF_FEC_SET = 8, + UNF_BBSCN, + UNF_VPORT, + UNF_LINK_DELAY = 11, + UNF_DIF, + UNF_DIF_CONFIG = 14, + UNF_SAVA_DATA, + UNF_SHOW_XCHG = 23, + UNF_PORTSTAT = 24, + UNF_ALL_INFO_OP = 25, + FC_LINK_TMO_OPT = 26, + FC_DRV_LOG_OPT = 27, + UNF_COMPAT_TEST = 0xFF +}; + +struct unf_save_info_head_s { + unsigned int opcode : 4; + unsigned int type : 4; + unsigned int entry_num : 8; + unsigned int next : 16; +}; + +enum unf_save_info_type_e { + UNF_SESSION_QOS = 0, + UNF_PORT_BASE_INFO = 2, + UNF_SAVE_TYPE_BUTT, +}; + +struct unf_link_tmo_opt_s { + struct unf_admin_msg_head head; + unsigned int link_opt; + int tmo_value; + unsigned int sync_all_port; +}; + +struct unf_log_level_opt_s { + struct unf_admin_msg_head head; + unsigned int log_opt; + unsigned int log_level; + unsigned int log_fre_qunce; +}; + +extern struct unf_global_lport_s global_lport_mgr; +extern struct unf_global_card_thread_s card_thread_mgr; +extern struct workqueue_struct *unf_work_queue; + +struct unf_lport_s *unf_find_lport_by_port_id(unsigned int v_port_id); +struct unf_lport_s *unf_find_lport_by_scsi_host_id(unsigned int scsi_host_id); +void *unf_lport_create_and_init( + void *private_data, + struct unf_low_level_function_op_s *low_level_op); +int unf_cm_reset_port(unsigned int v_port_id); +int unf_cm_sfp_switch(unsigned int v_port_id, int v_bturn_on); +int unf_cm_get_sfp_info(unsigned int v_port_id, unsigned int *v_status, + union unf_sfp_eeprome_info *v_sfp_info, + unsigned int *sfp_type); +int unf_cm_set_port_bbscn(unsigned int v_port_id, unsigned int v_bbscn); +int unf_cm_set_port_topo(unsigned int v_port_id, unsigned int v_topo); +int unf_cm_get_port_topo(unsigned int v_port_id, + unsigned int *v_topo_cfg, + enum unf_act_topo_e *v_en_act_topo); +int unf_cm_clear_port_error_code_sum(unsigned int v_port_id); +unsigned int unf_fc_port_link_event(void *v_lport, unsigned int v_events, + void *v_input); +unsigned int unf_release_local_port(void *v_lport); +void unf_lport_route_work(struct work_struct *v_work); +void unf_lport_update_topo(struct unf_lport_s *v_lport, + enum unf_act_topo_e v_enactive_topo); +void unf_lport_ref_dec(struct unf_lport_s *v_lport); +unsigned int unf_lport_refinc(struct unf_lport_s *v_lport); +void unf_lport_ref_dec_to_destroy(struct unf_lport_s *v_lport); +int unf_send_event(unsigned int port_id, unsigned int syn_flag, + void *argc_in, void *argc_out, + int (*p_func)(void *argc_in, void *argc_out)); +void unf_port_mgmt_deinit(void); +void unf_port_mgmt_init(void); +int unf_cm_echo_test(unsigned int v_port_id, unsigned int v_nport_id, + unsigned int *v_link_delay); +void unf_show_dirty_port(int v_show_only, unsigned int *v_ditry_port_num); +unsigned int unf_get_error_code_sum(struct unf_lport_s *v_lport, + struct unf_err_code_s *v_fc_err_code); +int unf_cm_set_port_speed(unsigned int v_port_id, unsigned int *v_speed); +void *unf_lookup_lport_by_nport_id(void *v_lport, unsigned int v_nport_id); +int unf_cmd_adm_handler(void *v_lport, struct unf_hinicam_pkg *v_input); +unsigned int unf_is_lport_valid(struct unf_lport_s *v_lport); +unsigned int unf_cm_save_port_info(unsigned int v_port_id); +unsigned int unf_cm_get_save_info(struct unf_lport_s *v_lport); +unsigned int unf_cm_clear_flush(unsigned int v_port_id); +int unf_lport_reset_port(struct unf_lport_s *v_lport, unsigned int v_flag); +unsigned int unf_register_scsi_host(struct unf_lport_s *v_lport); +void unf_unregister_scsi_host(struct unf_lport_s *v_lport); +int unf_get_link_lose_tmo(struct unf_lport_s *v_lport); +int unf_set_link_lose_tmo(struct unf_lport_s *v_lport, int time_out); +void unf_init_link_lose_tmo(struct unf_lport_s *v_lport); +int unf_set_link_lose_tmo_to_all(int time_out); +void unf_destroy_scsi_id_table(struct unf_lport_s *v_lport); +unsigned int unf_lport_login(struct unf_lport_s *v_lport, + enum unf_act_topo_e v_en_act_topo); +unsigned int unf_init_scsi_id_table(struct unf_lport_s *v_lport); +void unf_set_lport_removing(struct unf_lport_s *v_lport); +void unf_lport_release_lw_fun_op(struct unf_lport_s *v_lport); +void unf_disc_state_ma(struct unf_lport_s *v_lport, + enum unf_disc_event_e v_event); +unsigned int unf_init_lport_mgr_temp(struct unf_lport_s *v_lport); +void unf_release_lport_mgr_temp(struct unf_lport_s *v_lport); + +#endif diff --git a/drivers/scsi/huawei/hifc/unf_rport.c b/drivers/scsi/huawei/hifc/unf_rport.c new file mode 100644 index 0000000000000000000000000000000000000000..3b216763dd81c467638943da681d185e7b2d617d --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_rport.c @@ -0,0 +1,2430 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#include "unf_log.h" +#include "unf_common.h" +#include "unf_lport.h" +#include "unf_rport.h" +#include "unf_exchg.h" +#include "unf_service.h" +#include +#include "unf_portman.h" + +/* rport state: */ +/* ready --->>> link_down --->>> cloing --->>> timeout --->>> delete */ + +struct unf_rport_feature_pool_s *port_fea_pool; + +/* + * Function Name : unf_sesion_loss_timeout + * Function Description: session loss timeout + * Input Parameters : struct work_struct *v_work + * Output Parameters : N/A + * Return Type : unsigned int + */ +void unf_sesion_loss_timeout(struct work_struct *v_work) +{ + struct unf_wwpn_rport_info_s *wwpn_rport_info = NULL; + + UNF_CHECK_VALID(0x3040, UNF_TRUE, v_work, return); + + wwpn_rport_info = container_of(v_work, struct unf_wwpn_rport_info_s, + loss_tmo_work.work); + if (unlikely(!wwpn_rport_info)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]wwpn_rport_info is NULL"); + return; + } + + atomic_set(&wwpn_rport_info->en_scsi_state, UNF_SCSI_ST_DEAD); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[info]Port(0x%x) wwpn(0x%llx) set target(0x%x) scsi state to dead", + ((struct unf_lport_s *)(wwpn_rport_info->lport))->port_id, + wwpn_rport_info->wwpn, + wwpn_rport_info->target_id); +} + +/* + * Function Name : unf_alloc_scsi_id + * Function Description: alloc r_port scsi id + * Input Parameters : struct unf_lport_s *v_lport + * : struct unf_rport_s *v_rport + * Output Parameters : N/A + * Return Type : unsigned int + */ +static unsigned int unf_alloc_scsi_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + struct unf_rport_scsi_id_image_s *rport_scsi_table = NULL; + struct unf_wwpn_rport_info_s *wwn_rport_info = NULL; + unsigned long flags = 0; + unsigned int index = 0; + unsigned int ret = UNF_RETURN_ERROR; + + rport_scsi_table = &v_lport->rport_scsi_table; + UNF_REFERNCE_VAR(ret); + + spin_lock_irqsave(&rport_scsi_table->scsi_image_table_lock, flags); + + /* 1. At first, existence check */ + for (index = 0; index < rport_scsi_table->max_scsi_id; index++) { + wwn_rport_info = + &rport_scsi_table->wwn_rport_info_table[index]; + if (v_rport->port_name == wwn_rport_info->wwpn) { + spin_unlock_irqrestore( + &rport_scsi_table->scsi_image_table_lock, + flags); + UNF_DELAYED_WORK_SYNC(ret, (v_lport->port_id), + (&wwn_rport_info->loss_tmo_work), + "loss tmo Timer work"); + + /* Plug case: reuse again */ + spin_lock_irqsave( + &rport_scsi_table->scsi_image_table_lock, + flags); + wwn_rport_info->rport = v_rport; + wwn_rport_info->last_en_scis_state = + atomic_read(&wwn_rport_info->en_scsi_state); + atomic_set(&wwn_rport_info->en_scsi_state, + UNF_SCSI_ST_ONLINE); + spin_unlock_irqrestore( + &rport_scsi_table->scsi_image_table_lock, + flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]port(0x%x) find the same scsi_id(0x%x) by wwpn(0x%llx) rport(%p) n_port_id(0x%x)", + v_lport->port_id, index, + wwn_rport_info->wwpn, + v_rport, v_rport->nport_id); + + atomic_inc(&v_lport->resume_scsi_id); + goto find; + } + } + + /* 2. Alloc new SCSI ID */ + for (index = 0; index < rport_scsi_table->max_scsi_id; index++) { + wwn_rport_info = + &rport_scsi_table->wwn_rport_info_table[index]; + if (wwn_rport_info->wwpn == INVALID_WWPN) { + spin_unlock_irqrestore( + &rport_scsi_table->scsi_image_table_lock, + flags); + UNF_DELAYED_WORK_SYNC(ret, (v_lport->port_id), + (&wwn_rport_info->loss_tmo_work), + "loss tmo Timer work"); + + /* Use the free space */ + spin_lock_irqsave( + &rport_scsi_table->scsi_image_table_lock, + flags); + wwn_rport_info->rport = v_rport; + wwn_rport_info->wwpn = v_rport->port_name; + wwn_rport_info->last_en_scis_state = + atomic_read(&wwn_rport_info->en_scsi_state); + atomic_set(&wwn_rport_info->en_scsi_state, + UNF_SCSI_ST_ONLINE); + spin_unlock_irqrestore( + &rport_scsi_table->scsi_image_table_lock, + flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]port(0x%x) allco new scsi_id(0x%x) by wwpn(0x%llx) rport(%p) n_port_id(0x%x)", + v_lport->port_id, index, + wwn_rport_info->wwpn, + v_rport, v_rport->nport_id); + + atomic_inc(&v_lport->alloc_scsi_id); + goto find; + } + } + + /* 3. Reuse space has been used */ + for (index = 0; index < rport_scsi_table->max_scsi_id; index++) { + wwn_rport_info = + &rport_scsi_table->wwn_rport_info_table[index]; + if (atomic_read(&wwn_rport_info->en_scsi_state) == + UNF_SCSI_ST_DEAD) { + spin_unlock_irqrestore( + &rport_scsi_table->scsi_image_table_lock, + flags); + UNF_DELAYED_WORK_SYNC(ret, (v_lport->port_id), + (&wwn_rport_info->loss_tmo_work), + "loss tmo Timer work"); + + spin_lock_irqsave( + &rport_scsi_table->scsi_image_table_lock, + flags); + if (wwn_rport_info->dfx_counter) { + memset(wwn_rport_info->dfx_counter, 0, + sizeof(struct unf_wwpn_dfx_counter_info_s)); + } + wwn_rport_info->rport = v_rport; + wwn_rport_info->wwpn = v_rport->port_name; + wwn_rport_info->last_en_scis_state = + atomic_read(&wwn_rport_info->en_scsi_state); + atomic_set(&wwn_rport_info->en_scsi_state, + UNF_SCSI_ST_ONLINE); + spin_unlock_irqrestore( + &rport_scsi_table->scsi_image_table_lock, + flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[info]port(0x%x) reuse a dead scsi_id(0x%x) by wwpn(0x%llx) rport(%p) n_port_id(0x%x)", + v_lport->port_id, index, + wwn_rport_info->wwpn, + v_rport, v_rport->nport_id); + + atomic_inc(&v_lport->reuse_scsi_id); + goto find; + } + } + + spin_unlock_irqrestore(&rport_scsi_table->scsi_image_table_lock, + flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]port(0x%x) there is not enough scsi_id with max_value(0x%x)", + v_lport->port_id, index); + + return INVALID_VALUE32; + +find: + if (!wwn_rport_info->dfx_counter) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Port(0x%x) allocate Rport(0x%x) DFX buffer", + v_lport->port_id, wwn_rport_info->rport->nport_id); + wwn_rport_info->dfx_counter = + vmalloc(sizeof(struct unf_wwpn_dfx_counter_info_s)); + if (!wwn_rport_info->dfx_counter) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]Port(0x%x) allocate DFX buffer fail", + v_lport->port_id); + + return INVALID_VALUE32; + } + + memset(wwn_rport_info->dfx_counter, 0, + sizeof(struct unf_wwpn_dfx_counter_info_s)); + } + + UNF_REFERNCE_VAR(ret); + return index; +} + +static unsigned int unf_get_scsi_id_by_wwpn(struct unf_lport_s *v_lport, + unsigned long long v_wwpn) +{ + struct unf_rport_scsi_id_image_s *rport_scsi_table = NULL; + struct unf_wwpn_rport_info_s *wwn_rport_info = NULL; + unsigned long flags = 0; + unsigned int index = 0; + + UNF_CHECK_VALID(0x3015, UNF_TRUE, + v_lport, return INVALID_VALUE32); + rport_scsi_table = &v_lport->rport_scsi_table; + + if (!v_wwpn) + return INVALID_VALUE32; + + spin_lock_irqsave(&rport_scsi_table->scsi_image_table_lock, flags); + + for (index = 0; index < rport_scsi_table->max_scsi_id; index++) { + wwn_rport_info = + &rport_scsi_table->wwn_rport_info_table[index]; + if (v_wwpn == wwn_rport_info->wwpn) { + spin_unlock_irqrestore( + &rport_scsi_table->scsi_image_table_lock, + flags); + return index; + } + } + + spin_unlock_irqrestore(&rport_scsi_table->scsi_image_table_lock, + flags); + + return INVALID_VALUE32; +} + +static void unf_set_device_state(struct unf_lport_s *v_lport, + unsigned int v_scsi_id, + int en_scsi_state) +{ + struct unf_rport_scsi_id_image_s *scsi_image_table = NULL; + struct unf_wwpn_rport_info_s *wwpn_rport_info = NULL; + + if (unlikely(v_scsi_id >= UNF_MAX_SCSI_ID)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) RPort scsi_id(0x%x) is max than 0x%x", + v_lport->port_id, v_scsi_id, UNF_MAX_SCSI_ID); + return; + } + + scsi_image_table = &v_lport->rport_scsi_table; + wwpn_rport_info = &scsi_image_table->wwn_rport_info_table[v_scsi_id]; + atomic_set(&wwpn_rport_info->en_scsi_state, en_scsi_state); +} + +static void unf_set_rport_state(struct unf_rport_s *v_rport, + enum unf_rport_login_state_e v_states) +{ + UNF_CHECK_VALID(0x3055, UNF_TRUE, v_rport, return); + + if (v_states != v_rport->rp_state) { + /* Reset R_Port retry count */ + v_rport->retries = 0; + } + + v_rport->rp_state = v_states; +} + +void unf_rport_linkdown(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* + * 1. port_logout + * 2. rcvd_rscn_port_not_in_disc + * 3. each_rport_after_rscn + * 4. rcvd_gpnid_rjt + * 5. rport_after_logout(rport is fabric port) + */ + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3000, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3001, UNF_TRUE, v_rport, return); + UNF_REFERNCE_VAR(v_lport); + + /* 1. Update R_Port state: Link Down Event --->>> closing state */ + spin_lock_irqsave(&v_rport->rport_state_lock, flag); + unf_rport_state_ma(v_rport, UNF_EVENT_RPORT_LINK_DOWN); + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + + /* 3. Port enter closing (then enter to Delete) process */ + unf_rport_enter_closing(v_rport); +} + +static struct unf_rport_s *unf_rport_is_changed(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int v_sid) +{ + if (v_rport) { + /* S_ID or D_ID has been changed */ + if ((v_rport->nport_id != v_sid) || + (v_rport->local_nport_id != v_lport->nport_id)) { + /* + * 1. Swap case: (SID or DID changed): + * Report link down & delete immediately + */ + unf_rport_immediate_linkdown(v_lport, v_rport); + return NULL; + } + } + + return v_rport; +} + +struct unf_rport_s *unf_rport_set_qualifier_key_reuse( + struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport_by_nport_id, + struct unf_rport_s *v_rport_by_wwpn, + unsigned long long v_wwpn, + unsigned int v_sid) +{ + /* Used for HIFC Chip */ + struct unf_rport_s *rport = NULL; + struct unf_rport_s *rporta = NULL; + struct unf_rport_s *rportb = NULL; + int bwwpn_flag = 0; + + UNF_CHECK_VALID(0x3002, UNF_TRUE, v_lport, return NULL); + + /* About R_Port by N_Port_ID */ + rporta = unf_rport_is_changed(v_lport, v_rport_by_nport_id, v_sid); + /* About R_Port by WWpn */ + rportb = unf_rport_is_changed(v_lport, v_rport_by_wwpn, v_sid); + + if (!rporta && !rportb) { + return NULL; + } else if (!rporta && rportb) { + /* 3. Plug case: reuse again */ + rport = rportb; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) RPort(0x%p) WWPN(0x%llx) S_ID(0x%x) D_ID(0x%x) reused by wwpn", + v_lport->port_id, rport, rport->port_name, + rport->nport_id, rport->local_nport_id); + + return rport; /* Get by WWPN */ + } else if (rporta && !rportb) { + bwwpn_flag = ((rporta->port_name != v_wwpn) && + (rporta->port_name != 0) && + (rporta->port_name != INVALID_VALUE64)); + if (bwwpn_flag) { + /* 4. WWPN changed: Report link down + * & delete immediately + */ + unf_rport_immediate_linkdown(v_lport, rporta); + return NULL; + } + + /* Updtae WWPN */ + rporta->port_name = v_wwpn; + rport = rporta; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) RPort(0x%p) WWPN(0x%llx) S_ID(0x%x) D_ID(0x%x) reused by N_Port_ID", + v_lport->port_id, + rport, rport->port_name, + rport->nport_id, rport->local_nport_id); + + return rport; /* Get by N_Port_ID */ + } + + /* 5. Case for A == B && A && B */ + if (rporta == rportb) { + rport = rporta; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) find the same RPort(0x%p) WWPN(0x%llx) S_ID(0x%x) D_ID(0x%x)", + v_lport->port_id, + rport, rport->port_name, + rport->nport_id, rport->local_nport_id); + + return rport; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]port(0x%x) find two duplicate login. rport(A:0x%p, WWPN:0x%llx, S_ID:0x%x, D_ID:0x%x) rport(B:0x%p, WWPN:0x%llx, S_ID:0x%x, D_ID:0x%x)", + v_lport->port_id, + rporta, rporta->port_name, + rporta->nport_id, rporta->local_nport_id, + rportb, rportb->port_name, + rportb->nport_id, rportb->local_nport_id); + + /* 6. Case for A != B && A && B */ + unf_rport_immediate_linkdown(v_lport, rporta); + unf_rport_immediate_linkdown(v_lport, rportb); + + return NULL; +} + +struct unf_rport_s *unf_get_rport_by_wwn(struct unf_lport_s *v_lport, + unsigned long long v_wwpn) +{ + struct unf_lport_s *lport = NULL; + struct unf_disc_s *disc = NULL; + struct unf_rport_s *rport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flag = 0; + struct unf_rport_s *find_rport = NULL; + + UNF_CHECK_VALID(0x3049, UNF_TRUE, v_lport, return NULL); + lport = (struct unf_lport_s *)v_lport; + disc = &lport->disc; + + /* for each r_port from busy_list: compare wwpn(port name) */ + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + list_for_each_safe(node, next_node, &disc->list_busy_rports) { + rport = list_entry(node, struct unf_rport_s, entry_rport); + if (rport && rport->port_name == v_wwpn) { + find_rport = rport; + + break; + } + } + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + return find_rport; +} + +struct unf_rport_s *unf_find_valid_rport(struct unf_lport_s *v_lport, + unsigned long long v_wwpn, + unsigned int v_sid) +{ + struct unf_rport_s *rport = NULL; + struct unf_rport_s *rport_by_nport_id = NULL; + struct unf_rport_s *rport_by_wwpn = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x3005, UNF_TRUE, v_lport, return NULL); + UNF_CHECK_VALID(0x3006, UNF_TRUE, + v_lport->pfn_unf_qualify_rport, return NULL); + + /* Get R_Port by WWN & N_Port_ID */ + rport_by_nport_id = unf_get_rport_by_nport_id(v_lport, v_sid); + rport_by_wwpn = unf_get_rport_by_wwn(v_lport, v_wwpn); + + /* R_Port check: by WWPN */ + if (rport_by_wwpn) { + spin_lock_irqsave(&rport_by_wwpn->rport_state_lock, flags); + if (rport_by_wwpn->nport_id == UNF_FC_FID_FLOGI) { + spin_unlock_irqrestore( + &rport_by_wwpn->rport_state_lock, + flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_INFO, + "[err]Port(0x%x) RPort(0x%p) find by WWPN(0x%llx) is invalid", + v_lport->port_id, rport_by_wwpn, v_wwpn); + + rport_by_wwpn = NULL; + } else { + spin_unlock_irqrestore( + &rport_by_wwpn->rport_state_lock, + flags); + } + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x_0x%x) RPort(0x%p) find by N_Port_ID(0x%x) and RPort(0x%p) by WWPN(0x%llx)", + v_lport->port_id, v_lport->nport_id, + rport_by_nport_id, v_sid, rport_by_wwpn, v_wwpn); + + /* R_Port validity check: get by WWPN & N_Port_ID */ + rport = v_lport->pfn_unf_qualify_rport(v_lport, rport_by_nport_id, + rport_by_wwpn, + v_wwpn, v_sid); + return rport; +} + +void unf_rport_delay_login(struct unf_rport_s *v_rport) +{ + UNF_CHECK_VALID(0x3009, UNF_TRUE, v_rport, return); + + /* Do R_Port recovery: PLOGI or PRLI or LOGO */ + unf_rport_error_recovery(v_rport); +} + +unsigned int unf_rport_ref_inc(struct unf_rport_s *v_rport) +{ + UNF_CHECK_VALID(0x3010, UNF_TRUE, + v_rport, return UNF_RETURN_ERROR); + + if (atomic_read(&v_rport->rport_ref_cnt) <= 0) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Rport(0x%x) reference count is wrong %d", + v_rport->nport_id, + atomic_read(&v_rport->rport_ref_cnt)); + return UNF_RETURN_ERROR; + } + + atomic_inc(&v_rport->rport_ref_cnt); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Rport(0x%x) reference count is %d", + v_rport->nport_id, atomic_read(&v_rport->rport_ref_cnt)); + + return RETURN_OK; +} + +void unf_rport_enter_logo(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* + * 1. TMF/ABTS timeout recovery :Y + * 2. L_Port error recovery --->>> larger than retry_count :Y + * 3. R_Port error recovery --->>> larger than retry_count :Y + * 4. Check PLOGI parameters --->>> parameter is error :Y + * 5. PRLI handler --->>> R_Port state is error :Y + * 6. PDISC handler --->>> R_Port state is not PRLI_WAIT :Y + * 7. ADISC handler --->>> R_Port state is not PRLI_WAIT :Y + * 8. PLOGI wait timeout with R_PORT is INI mode :Y + * 9. RCVD GFFID_RJT --->>> R_Port state is INIT :Y + * 10. RCVD GPNID_ACC --->>> R_Port state is error :Y + * 11. Private Loop mode with LOGO case :Y + * 12. P2P mode with LOGO case :Y + * 13. Fabric mode with LOGO case :Y + * 14. RCVD PRLI_ACC with R_Port is INI :Y + * 15. TGT RCVD BLS_REQ with session is error :Y + */ + unsigned long flags = 0; + + UNF_CHECK_VALID(0x3013, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3014, UNF_TRUE, v_rport, return); + + spin_lock_irqsave(&v_rport->rport_state_lock, flags); + + if ((v_rport->rp_state == UNF_RPORT_ST_CLOSING) || + (v_rport->rp_state == UNF_RPORT_ST_DELETE)) { + /* 1. Already within Closing or Delete: Do nothing */ + spin_unlock_irqrestore(&v_rport->rport_state_lock, flags); + + return; + } else if (v_rport->rp_state == UNF_RPORT_ST_LOGO) { + /* 2. Update R_Port state: + * Normal Enter Event --->>> closing state + */ + unf_rport_state_ma(v_rport, UNF_EVENT_RPORT_NORMAL_ENTER); + spin_unlock_irqrestore(&v_rport->rport_state_lock, flags); + + /* Send Logo if necessary */ + if (unf_send_logo(v_lport, v_rport) != RETURN_OK) + unf_rport_enter_closing(v_rport); + } else { + /* + * 3. Update R_Port state: Link Down Event --->>> closing state + * enter closing state + */ + unf_rport_state_ma(v_rport, UNF_EVENT_RPORT_LINK_DOWN); + spin_unlock_irqrestore(&v_rport->rport_state_lock, flags); + + unf_rport_enter_closing(v_rport); + } +} + +unsigned int unf_free_scsi_id(struct unf_lport_s *v_lport, + unsigned int v_scsi_id) +{ + unsigned long flags = 0; + struct unf_rport_scsi_id_image_s *rport_scsi_table = NULL; + struct unf_wwpn_rport_info_s *wwn_rport_info = NULL; + + UNF_CHECK_VALID(0x3016, UNF_TRUE, + v_lport, return UNF_RETURN_ERROR); + + if (unlikely(v_lport->b_port_removing == UNF_TRUE)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) is removing and do nothing", + v_lport->port_id, v_lport->nport_id); + + return UNF_RETURN_ERROR; + } + + if (unlikely(v_scsi_id >= UNF_MAX_SCSI_ID)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x_0x%x) scsi_id(0x%x) is bigger than %d", + v_lport->port_id, v_lport->nport_id, + v_scsi_id, UNF_MAX_SCSI_ID); + + return UNF_RETURN_ERROR; + } + + rport_scsi_table = &v_lport->rport_scsi_table; + if (rport_scsi_table->wwn_rport_info_table) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[warn]Port(0x%x_0x%x) RPort(0x%p) free scsi_id(0x%x) wwpn(0x%llx) target_id(0x%x) succeed", + v_lport->port_id, v_lport->nport_id, + rport_scsi_table->wwn_rport_info_table[v_scsi_id].rport, + v_scsi_id, + rport_scsi_table->wwn_rport_info_table[v_scsi_id].wwpn, + rport_scsi_table->wwn_rport_info_table[v_scsi_id].target_id); + + spin_lock_irqsave(&rport_scsi_table->scsi_image_table_lock, + flags); + wwn_rport_info = + &rport_scsi_table->wwn_rport_info_table[v_scsi_id]; + if (wwn_rport_info->rport) { + wwn_rport_info->rport->rport = NULL; + wwn_rport_info->rport = NULL; + } + + wwn_rport_info->target_id = INVALID_VALUE32; + atomic_set(&wwn_rport_info->en_scsi_state, UNF_SCSI_ST_DEAD); + + /* NOTE: remain WWPN/Port_Name unchanged(un-cleared) */ + spin_unlock_irqrestore( + &rport_scsi_table->scsi_image_table_lock, + flags); + + return RETURN_OK; + } + + return UNF_RETURN_ERROR; +} + +static void unf_report_ini_linkup_event(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + UNF_CHECK_VALID(0x3031, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3032, UNF_TRUE, v_rport, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR, + "[event]Port(0x%x) RPort(0x%x_0x%p) put INI link up work(%p) to work_queue", + v_lport->port_id, v_rport->nport_id, v_rport, + &v_rport->start_work); + + if (unlikely(!queue_work(v_lport->link_event_wq, + &v_rport->start_work))) { + atomic_inc(&v_lport->add_start_work_failed); + + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[err]Port(0x%x) RPort(0x%x_0x%p) put INI link up to work_queue failed", + v_lport->port_id, v_rport->nport_id, v_rport); + } +} + +static void unf_report_ini_linkdown_event(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + unsigned int scsi_id = 0; + struct fc_rport *rport = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3033, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3034, UNF_TRUE, v_rport, return); + + /* + * 1. set local device(rport/rport_info_table) state + * -------------------------------------------------OFF_LINE + ** + * about rport->scsi_id + * valid during rport link up to link down + */ + spin_lock_irqsave(&v_rport->rport_state_lock, flag); + scsi_id = v_rport->scsi_id; + unf_set_device_state(v_lport, scsi_id, UNF_SCSI_ST_OFFLINE); + + /* 2. delete scsi's rport */ + rport = (struct fc_rport *)v_rport->rport; + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + if (rport) { + fc_remote_port_delete(rport); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[event]port(0x%x_0x%x) delete rport(0x%x) wwpn(0x%llx) scsi_id(0x%x) succeed", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id, + v_rport->port_name, scsi_id); + + atomic_inc(&v_lport->scsi_session_del_success); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[info]Port(0x%x_0x%x) delete RPort(0x%x_0x%p) failed", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id, v_rport); + + atomic_inc(&v_lport->scsi_session_del_failed); + } +} + +void unf_update_lport_state_by_linkup_event(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int rport_att) +{ + /* Report R_Port Link Up/Down Event */ + unsigned long flag = 0; + enum unf_port_state_e en_lport_state = 0; + + UNF_CHECK_VALID(0x3019, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3020, UNF_TRUE, v_rport, return); + + spin_lock_irqsave(&v_rport->rport_state_lock, flag); + + /* 1. R_Port does not has TGT mode any more */ + if (!(rport_att & UNF_FC4_FRAME_PARM_3_TGT) && + (v_rport->lport_ini_state == UNF_PORT_STATE_LINKUP)) { + v_rport->last_lport_ini_state = v_rport->lport_ini_state; + // L_Port INI mode: Down + v_rport->lport_ini_state = UNF_PORT_STATE_LINKDOWN; + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%x) does not have TGT attribute(0x%x) any more", + v_lport->port_id, v_rport->nport_id, rport_att); + } + + /* 2. R_Port with TGT mode, L_Port with INI mode */ + if ((rport_att & UNF_FC4_FRAME_PARM_3_TGT) && + (v_lport->options & UNF_FC4_FRAME_PARM_3_INI)) { + v_rport->last_lport_ini_state = v_rport->lport_ini_state; + // L_Port INI mode: Up + v_rport->lport_ini_state = UNF_PORT_STATE_LINKUP; + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[warn]Port(0x%x) update INI state with last(0x%x) and now(0x%x)", + v_lport->port_id, v_rport->last_lport_ini_state, + v_rport->lport_ini_state); + } + + /* 3. Report L_Port INI/TGT Down/Up event to SCSI */ + if (v_rport->last_lport_ini_state == v_rport->lport_ini_state) { + if (v_rport->nport_id < UNF_FC_FID_DOM_MGR) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%x %p) INI state(0x%x) has not been changed", + v_lport->port_id, v_rport->nport_id, v_rport, + v_rport->lport_ini_state); + } + + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + + return; + } + + en_lport_state = v_rport->lport_ini_state; + + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + + switch (en_lport_state) { + /* Link Down */ + case UNF_PORT_STATE_LINKDOWN: + unf_report_ini_linkdown_event(v_lport, v_rport); + break; + + /* Link Up */ + case UNF_PORT_STATE_LINKUP: + unf_report_ini_linkup_event(v_lport, v_rport); + break; + + default: + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) with unknown link status(0x%x)", + v_lport->port_id, v_rport->lport_ini_state); + break; + } +} + +static void unf_rport_call_back(void *v_rport, + void *v_lport, + unsigned int v_result) +{ + /* Report R_Port link down event */ + struct unf_rport_s *rport = NULL; + struct unf_lport_s *lport = NULL; + unsigned long flag = 0; + + UNF_REFERNCE_VAR(lport); + UNF_REFERNCE_VAR(v_result); + + UNF_CHECK_VALID(0x3037, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3038, UNF_TRUE, v_lport, return); + rport = (struct unf_rport_s *)v_rport; + lport = (struct unf_lport_s *)v_lport; + + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->last_lport_ini_state = rport->lport_ini_state; + rport->lport_ini_state = UNF_PORT_STATE_LINKDOWN; + rport->last_lport_tgt_state = rport->lport_tgt_state; + rport->lport_tgt_state = UNF_PORT_STATE_LINKDOWN; + + /* Report R_Port Link Down Event to scsi */ + if (rport->last_lport_ini_state == rport->lport_ini_state) { + if (rport->nport_id < UNF_FC_FID_DOM_MGR) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%x %p) INI state(0x%x) has not been changed", + lport->port_id, rport->nport_id, rport, + rport->lport_ini_state); + } + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + return; + } + + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + unf_report_ini_linkdown_event(lport, rport); +} + +static void unf_rport_recovery_timeout(struct work_struct *v_work) +{ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + unsigned int ret = RETURN_OK; + unsigned long flag = 0; + enum unf_rport_login_state_e en_rp_state = UNF_RPORT_ST_INIT; + + UNF_CHECK_VALID(0x3039, UNF_TRUE, v_work, return); + + rport = container_of(v_work, struct unf_rport_s, recovery_work.work); + if (unlikely(!rport)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, + UNF_ERR, "[err]RPort is NULL"); + + return; + } + + lport = rport->lport; + if (unlikely(!lport)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]RPort(0x%x) Port is NULL", + rport->nport_id); + + /* for timer */ + unf_rport_ref_dec(rport); + return; + } + + spin_lock_irqsave(&rport->rport_state_lock, flag); + en_rp_state = rport->rp_state; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x_0x%x) RPort(0x%x) state(0x%x) recovery timer timeout", + lport->port_id, lport->nport_id, + rport->nport_id, en_rp_state); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + switch (en_rp_state) { + case UNF_RPORT_ST_PLOGI_WAIT: + if (((lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT) && + (lport->port_name > rport->port_name)) || + lport->en_act_topo != UNF_ACT_TOP_P2P_DIRECT) { + /* P2P: Name is master with P2P_D or has INI Mode */ + ret = unf_send_plogi(rport->lport, rport); + } + break; + + case UNF_RPORT_ST_PRLI_WAIT: + ret = unf_send_prli(rport->lport, rport); + break; + + default: + break; + } + + if (ret != RETURN_OK) + unf_rport_error_recovery(rport); + + /* company with timer */ + unf_rport_ref_dec(rport); +} + +static unsigned int unf_get_dev_loss_tmo_by_rport(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + struct fc_rport *rport = (struct fc_rport *)v_rport->rport; + + if (rport) + return rport->dev_loss_tmo; + else + return (unsigned int)unf_get_link_lose_tmo(v_lport); +} + +void unf_schedule_closing_work(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + unsigned long flags = 0; + struct unf_rport_scsi_id_image_s *rport_scsi_table = NULL; + struct unf_wwpn_rport_info_s *wwn_rport_info = NULL; + unsigned int scsi_id = 0; + unsigned int ret = 0; + unsigned int delay = 0; + + UNF_CHECK_VALID(0x3561, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3562, UNF_TRUE, v_rport, return); + + delay = unf_get_dev_loss_tmo_by_rport(v_lport, v_rport); + rport_scsi_table = &v_lport->rport_scsi_table; + scsi_id = v_rport->scsi_id; + spin_lock_irqsave(&v_rport->rport_state_lock, flags); + + /* 1. Cancel recovery_work */ + if (cancel_delayed_work(&v_rport->recovery_work)) { + atomic_dec(&v_rport->rport_ref_cnt); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x_0x%x) RPort(0x%x_0x%p) cancel recovery work succeed", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id, v_rport); + } + + /* 2. Cancel Open_work */ + if (cancel_delayed_work(&v_rport->open_work)) { + atomic_dec(&v_rport->rport_ref_cnt); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x_0x%x) RPort(0x%x_0x%p) cancel open work succeed", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id, v_rport); + } + + spin_unlock_irqrestore(&v_rport->rport_state_lock, flags); + + /* 3. Work in-queue (switch to thread context) */ + if (!queue_work(v_lport->link_event_wq, &v_rport->closing_work)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_ERR, + "[warn]Port(0x%x) RPort(0x%x_0x%p) add link down to work queue failed", + v_lport->port_id, v_rport->nport_id, v_rport); + + atomic_inc(&v_lport->add_closing_work_failed); + + } else { + spin_lock_irqsave(&v_rport->rport_state_lock, flags); + (void)unf_rport_ref_inc(v_rport); + spin_unlock_irqrestore(&v_rport->rport_state_lock, flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR, + "[info]Port(0x%x) RPort(0x%x_0x%p) add link down to work(%p) queue succeed", + v_lport->port_id, v_rport->nport_id, v_rport, + &v_rport->closing_work); + } + + if (v_rport->nport_id > UNF_FC_FID_DOM_MGR) + return; + + if (scsi_id >= UNF_MAX_SCSI_ID) { + scsi_id = unf_get_scsi_id_by_wwpn(v_lport, v_rport->port_name); + if (scsi_id >= UNF_MAX_SCSI_ID) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_NORMAL, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%p) NPortId(0x%x) wwpn(0x%llx) option(0x%x) scsi_id(0x%x) is max than(0x%x)", + v_lport->port_id, v_rport, v_rport->nport_id, + v_rport->port_name, + v_rport->options, scsi_id, + UNF_MAX_SCSI_ID); + + return; + } + } + + wwn_rport_info = &rport_scsi_table->wwn_rport_info_table[scsi_id]; + ret = queue_delayed_work( + unf_work_queue, + &wwn_rport_info->loss_tmo_work, + (unsigned long)delay * msecs_to_jiffies(1000)); + if (!ret) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR, + "[info] Port(0x%x) add RPort(0x%p) NPortId(0x%x) scsi_id(0x%x) wwpn(0x%llx) loss timeout work failed", + v_lport->port_id, v_rport, + v_rport->nport_id, scsi_id, + v_rport->port_name); + } +} + +static void unf_rport_closing_timeout(struct work_struct *v_work) +{ + /* closing --->>>(timeout)--->>> delete */ + struct unf_rport_s *rport = NULL; + struct unf_lport_s *lport = NULL; + struct unf_disc_s *disc = NULL; + unsigned long rport_flag = 0; + unsigned long disc_flag = 0; + void (*pfn_unf_rport_call_back)(void *, void *, unsigned int) = NULL; + + UNF_CHECK_VALID(0x3040, UNF_TRUE, v_work, return); + + /* Get R_Port & L_Port & Disc */ + rport = container_of(v_work, struct unf_rport_s, closing_work); + if (unlikely(!rport)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, + UNF_ERR, "[err]RPort is NULL"); + return; + } + + lport = rport->lport; + if (unlikely(!lport)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]RPort(0x%x_0x%p) Port is NULL", + rport->nport_id, rport); + + /* Release directly (for timer) */ + unf_rport_ref_dec(rport); + return; + } + disc = &lport->disc; + + spin_lock_irqsave(&rport->rport_state_lock, rport_flag); + + /* 1. Update R_Port state: event_timeout --->>> state_delete */ + unf_rport_state_ma(rport, UNF_EVENT_RPORT_CLS_TIMEOUT); + + /* Check R_Port state */ + if (rport->rp_state != UNF_RPORT_ST_DELETE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x_0x%x) RPort(0x%x) closing timeout with error state(0x%x)", + lport->port_id, lport->nport_id, + rport->nport_id, rport->rp_state); + + spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); + + /* Dec ref_cnt for timer */ + unf_rport_ref_dec(rport); + return; + } + + pfn_unf_rport_call_back = rport->pfn_unf_rport_call_back; + spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); + + /* 2. Put R_Port to delete list */ + spin_lock_irqsave(&disc->rport_busy_pool_lock, disc_flag); + list_del_init(&rport->entry_rport); + list_add_tail(&rport->entry_rport, &disc->list_delete_rports); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_flag); + + /* 3. Report rport link down event to scsi */ + if (pfn_unf_rport_call_back) { /* unf_rport_call_back */ + pfn_unf_rport_call_back((void *)rport, (void *)rport->lport, + RETURN_OK); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]RPort(0x%x) callback is NULL", + rport->nport_id); + } + + /* 4. Remove/delete R_Port */ + unf_rport_ref_dec(rport); + unf_rport_ref_dec(rport); +} + +static void unf_rport_linkup_to_scsi(struct work_struct *v_work) +{ + struct fc_rport_identifiers rport_ids; + struct fc_rport *rport = NULL; + unsigned long flags = RETURN_OK; + struct unf_wwpn_rport_info_s *wwn_rport_info = NULL; + struct unf_rport_scsi_id_image_s *rport_scsi_table = NULL; + unsigned int scsi_id = 0; + + struct unf_lport_s *lport = NULL; + struct unf_rport_s *unf_rport = NULL; + + UNF_CHECK_VALID(0x3040, UNF_TRUE, v_work, return); + + unf_rport = container_of(v_work, struct unf_rport_s, start_work); + if (unlikely(!unf_rport)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]RPort is NULL for work(%p)", v_work); + return; + } + + lport = unf_rport->lport; + if (unlikely(!lport)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]RPort(0x%x_0x%p) Port is NULL", + unf_rport->nport_id, unf_rport); + return; + } + + /* 1. Alloc R_Port SCSI_ID (image table) */ + unf_rport->scsi_id = unf_alloc_scsi_id(lport, unf_rport); + if (unlikely(unf_rport->scsi_id == INVALID_VALUE32)) { + atomic_inc(&lport->scsi_session_add_failed); + + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[err]Port(0x%x_0x%x) RPort(0x%x_0x%p) wwpn(0x%llx) scsi_id(0x%x) is invalid", + lport->port_id, lport->nport_id, + unf_rport->nport_id, unf_rport, + unf_rport->port_name, unf_rport->scsi_id); + + /* NOTE: return */ + return; + } + + /* 2. Add rport to scsi */ + scsi_id = unf_rport->scsi_id; + rport_ids.node_name = unf_rport->node_name; + rport_ids.port_name = unf_rport->port_name; + rport_ids.port_id = unf_rport->nport_id; + rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; + rport = fc_remote_port_add(lport->host_info.p_scsi_host, + 0, &rport_ids); + if (unlikely(!rport)) { + atomic_inc(&lport->scsi_session_add_failed); + + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x_0x%x) RPort(0x%x_0x%p) wwpn(0x%llx) report link up to scsi failed", + lport->port_id, lport->nport_id, + unf_rport->nport_id, unf_rport, + unf_rport->port_name); + + unf_free_scsi_id(lport, scsi_id); + return; + } + + /* 3. Change rport role save local SCSI_ID to scsi rport */ + *((unsigned int *)rport->dd_data) = scsi_id; + rport->supported_classes = FC_COS_CLASS3; + rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET; + fc_remote_port_rolechg(rport, rport_ids.roles); + + /* 4. Save scsi rport info to local R_Port */ + spin_lock_irqsave(&unf_rport->rport_state_lock, flags); + unf_rport->rport = rport; + spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); + + rport_scsi_table = &lport->rport_scsi_table; + spin_lock_irqsave(&rport_scsi_table->scsi_image_table_lock, flags); + wwn_rport_info = &rport_scsi_table->wwn_rport_info_table[scsi_id]; + wwn_rport_info->target_id = rport->scsi_target_id; + wwn_rport_info->rport = unf_rport; + atomic_set(&wwn_rport_info->en_scsi_state, UNF_SCSI_ST_ONLINE); + spin_unlock_irqrestore(&rport_scsi_table->scsi_image_table_lock, + flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[event]port(0x%x_0x%x) rport(0x%x) wwpn(0x%llx) scsi_id(0x%x) link up to scsi succeed", + lport->port_id, lport->nport_id, + unf_rport->nport_id, unf_rport->port_name, + scsi_id); + + atomic_inc(&lport->scsi_session_add_success); +} + +static void unf_rport_open_timeout(struct work_struct *v_work) +{ + struct unf_rport_s *rport = NULL; + struct unf_lport_s *lport = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x3041, UNF_TRUE, v_work, return); + + rport = container_of(v_work, struct unf_rport_s, open_work.work); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]RPort is NULL"); + + return; + } + + spin_lock_irqsave(&rport->rport_state_lock, flags); + lport = rport->lport; + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) RPort(0x%x) open work timeout with state(0x%x)", + lport->port_id, lport->nport_id, + rport->nport_id, rport->rp_state); + + /* NOTE: R_Port state check */ + if (rport->rp_state != UNF_RPORT_ST_PRLI_WAIT) { + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + /* Dec ref_cnt for timer case */ + unf_rport_ref_dec(rport); + return; + } + + /* Report R_Port Link Down event */ + unf_rport_state_ma(rport, UNF_EVENT_RPORT_LINK_DOWN); + + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + unf_rport_enter_closing(rport); + + /* Dec ref_cnt for timer case */ + unf_rport_ref_dec(rport); + + UNF_REFERNCE_VAR(lport); +} + +static unsigned int unf_alloc_index_for_rport(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + unsigned long rport_flag = 0; + unsigned long pool_flag = 0; + unsigned int alloc_indx = 0; + unsigned int max_rport = 0; + struct unf_rport_pool_s *rport_pool = NULL; + + rport_pool = &v_lport->rport_pool; + max_rport = v_lport->low_level_func.lport_cfg_items.max_login; + + spin_lock_irqsave(&rport_pool->rport_free_pool_lock, pool_flag); + while (alloc_indx < max_rport) { + if (!test_bit((int)alloc_indx, rport_pool->pul_rpi_bitmap)) { + /* Case for HIFC */ + if (unlikely(atomic_read( + &v_lport->port_no_operater_flag) == + UNF_LPORT_NOP)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) is within NOP", + v_lport->port_id); + + spin_unlock_irqrestore( + &rport_pool->rport_free_pool_lock, + pool_flag); + return UNF_RETURN_ERROR; + } + + spin_lock_irqsave(&v_rport->rport_state_lock, + rport_flag); + /* set R_Port index */ + v_rport->rport_index = alloc_indx; + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) RPort(0x%x) alloc index(0x%x) succeed", + v_lport->port_id, alloc_indx, + v_rport->nport_id); + + spin_unlock_irqrestore(&v_rport->rport_state_lock, + rport_flag); + + /* Set (index) bit */ + set_bit((int)alloc_indx, rport_pool->pul_rpi_bitmap); + + /* Break here */ + break; + } + alloc_indx++; + } + spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, pool_flag); + + if (alloc_indx == max_rport) + return UNF_RETURN_ERROR; + else + return RETURN_OK; +} + +static void unf_check_rport_pool_status(struct unf_lport_s *v_lport) +{ + struct unf_lport_s *lport = v_lport; + struct unf_rport_pool_s *rport_pool = NULL; + unsigned long flags = 0; + unsigned int max_rport = 0; + + UNF_CHECK_VALID(0x3045, UNF_TRUE, v_lport, return); + rport_pool = &lport->rport_pool; + + spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flags); + max_rport = lport->low_level_func.lport_cfg_items.max_login; + if ((rport_pool->rport_pool_completion) && + (max_rport == rport_pool->rport_pool_count)) { + complete(rport_pool->rport_pool_completion); + } + spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flags); +} + +void unf_init_rport_params(struct unf_rport_s *v_rport, + struct unf_lport_s *v_lport) +{ + struct unf_rport_s *rport = v_rport; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3046, UNF_TRUE, rport, return); + UNF_CHECK_VALID(0x3046, UNF_TRUE, v_lport, return); + + spin_lock_irqsave(&rport->rport_state_lock, flag); + unf_set_rport_state(rport, UNF_RPORT_ST_INIT); + /* set callback function */ + rport->pfn_unf_rport_call_back = unf_rport_call_back; + rport->lport = v_lport; + rport->fcp_conf_needed = UNF_FALSE; + rport->tape_support_needed = UNF_FALSE; + rport->mas_retries = UNF_MAX_RETRY_COUNT; + rport->logo_retries = 0; + rport->retries = 0; + rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS; + rport->last_lport_ini_state = UNF_PORT_STATE_LINKDOWN; + rport->lport_ini_state = UNF_PORT_STATE_LINKDOWN; + rport->last_lport_tgt_state = UNF_PORT_STATE_LINKDOWN; + rport->lport_tgt_state = UNF_PORT_STATE_LINKDOWN; + rport->node_name = 0; + rport->port_name = INVALID_WWPN; + rport->disc_done = 0; + rport->scsi_id = INVALID_VALUE32; + rport->data_thread = NULL; + sema_init(&rport->task_sema, 0); + atomic_set(&rport->rport_ref_cnt, 0); + atomic_set(&rport->pending_io_cnt, 0); + rport->rport_alloc_jifs = jiffies; + + rport->ed_tov = UNF_DEFAULT_EDTOV + 500; + rport->ra_tov = UNF_DEFAULT_RATOV; + + INIT_WORK(&rport->closing_work, unf_rport_closing_timeout); + INIT_WORK(&rport->start_work, unf_rport_linkup_to_scsi); + INIT_DELAYED_WORK(&rport->recovery_work, unf_rport_recovery_timeout); + INIT_DELAYED_WORK(&rport->open_work, unf_rport_open_timeout); + + atomic_inc(&rport->rport_ref_cnt); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); +} + +static unsigned int unf_alloc_llrport_resource(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int v_nport_id) +{ + unsigned int ret = RETURN_OK; + struct unf_rport_info_s rport_info = { 0 }; + + struct unf_lport_s *lport = NULL; + + lport = v_lport->root_lport; + + if (lport->low_level_func.service_op.pfn_unf_alloc_rport_res) { + rport_info.nport_id = v_nport_id; + rport_info.rport_index = v_rport->rport_index; + rport_info.local_nport_id = v_lport->nport_id; /* sid */ + rport_info.port_name = 0; + + ret = lport->low_level_func.service_op.pfn_unf_alloc_rport_res( + lport->fc_port, + &rport_info); + } else { + ret = RETURN_OK; + } + + return ret; +} + +static void *unf_add_rport_to_busy_list(struct unf_lport_s *v_lport, + struct unf_rport_s *v_new_rport, + unsigned int v_nport_id) +{ + struct unf_rport_pool_s *rport_pool = NULL; + struct unf_lport_s *lport = NULL; + struct unf_disc_s *disc = NULL; + struct unf_rport_s *new_rport = v_new_rport; + struct unf_rport_s *old_rport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3046, UNF_TRUE, v_lport, return NULL); + UNF_CHECK_VALID(0x3046, UNF_TRUE, v_new_rport, return NULL); + + lport = v_lport->root_lport; + disc = &v_lport->disc; + UNF_CHECK_VALID(0x3046, UNF_TRUE, lport, return NULL); + rport_pool = &lport->rport_pool; + + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + list_for_each_safe(node, next_node, &disc->list_busy_rports) { + /* According to N_Port_ID */ + old_rport = list_entry(node, struct unf_rport_s, entry_rport); + if (old_rport->nport_id == v_nport_id) + break; /* find by N_Port_ID */ + old_rport = NULL; + } + + if (old_rport) { + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + /* Use old R_Port & Add new R_Port back to R_Port Pool */ + spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag); + clear_bit((int)new_rport->rport_index, + rport_pool->pul_rpi_bitmap); + list_add_tail(&new_rport->entry_rport, + &rport_pool->list_rports_pool); + rport_pool->rport_pool_count++; + spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, + flag); + + unf_check_rport_pool_status(lport); + return (void *)old_rport; + } + + if (unf_alloc_llrport_resource(v_lport, new_rport, + v_nport_id != RETURN_OK)) { + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + /* Add new R_Port back to R_Port Pool */ + spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag); + clear_bit((int)new_rport->rport_index, + rport_pool->pul_rpi_bitmap); + list_add_tail(&new_rport->entry_rport, + &rport_pool->list_rports_pool); + rport_pool->rport_pool_count++; + spin_unlock_irqrestore( + &rport_pool->rport_free_pool_lock, flag); + + unf_check_rport_pool_status(lport); + + return NULL; + } + + /* Add new R_Port to busy list */ + list_add_tail(&new_rport->entry_rport, + &disc->list_busy_rports); + new_rport->nport_id = v_nport_id; /* set R_Port N_Port_ID */ + /* set L_Port N_Port_ID */ + new_rport->local_nport_id = v_lport->nport_id; + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + unf_init_rport_params(new_rport, v_lport); + + return (void *)new_rport; +} + +void *unf_rport_get_free_and_init(void *v_lport, + unsigned int v_rport_type, + unsigned int v_nport_id) +{ + struct unf_lport_s *lport = NULL; + struct unf_rport_pool_s *rport_pool = NULL; + struct unf_disc_s *disc = NULL; + struct unf_disc_s *v_port_disc = NULL; + struct unf_rport_s *rport = NULL; + struct list_head *list_head = NULL; + unsigned long flag = 0; + struct unf_disc_rport_s *disc_rport = NULL; + + UNF_REFERNCE_VAR(v_rport_type); + UNF_REFERNCE_VAR(rport); + + UNF_CHECK_VALID(0x3046, UNF_TRUE, v_lport, return NULL); + lport = ((struct unf_lport_s *)v_lport)->root_lport; /* ROOT L_Port */ + UNF_CHECK_VALID(0x3047, UNF_TRUE, lport, return NULL); + + /* Check L_Port state: NOP */ + if (unlikely(atomic_read(&lport->port_no_operater_flag) == + UNF_LPORT_NOP)) { + return NULL; + } + + rport_pool = &lport->rport_pool; + disc = &lport->disc; + + /* 1. UNF_PORT_TYPE_DISC: Get from disc_rport_pool */ + if (v_rport_type == UNF_PORT_TYPE_DISC) { + v_port_disc = &(((struct unf_lport_s *)v_lport)->disc); + + /* NOTE: list_disc_rports_pool used + * with list_disc_rport_busy + */ + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + if (!list_empty(&disc->disc_rport_mgr.list_disc_rports_pool)) { + /* Get & delete from Disc R_Port Pool & + * Add it to Busy list + */ + list_head = + (&disc->disc_rport_mgr.list_disc_rports_pool)->next; + list_del_init(list_head); + disc_rport = list_entry(list_head, + struct unf_disc_rport_s, + entry_rport); + /* Set R_Port N_Port_ID */ + disc_rport->nport_id = v_nport_id; + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, + flag); + + /* Add to list_disc_rport_busy */ + spin_lock_irqsave(&v_port_disc->rport_busy_pool_lock, + flag); + list_add_tail( + list_head, + &v_port_disc->disc_rport_mgr.list_disc_rport_busy); + spin_unlock_irqrestore( + &v_port_disc->rport_busy_pool_lock, flag); + } else { + disc_rport = NULL; + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, + flag); + } + + /* NOTE: return */ + return disc_rport; + } + + /* 2. UNF_PORT_TYPE_FC (rport_pool): Get from list_rports_pool */ + spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag); + if (!list_empty(&rport_pool->list_rports_pool)) { + /* Get & delete from R_Port free Pool */ + list_head = (&rport_pool->list_rports_pool)->next; + list_del_init(list_head); + rport_pool->rport_pool_count--; + rport = list_entry(list_head, struct unf_rport_s, entry_rport); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) RPort pool is empty", + lport->port_id, lport->nport_id); + + spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, + flag); + + /* NOTE: return */ + return NULL; + } + spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flag); + + /* 3. Alloc (& set bit) R_Port index */ + if (unf_alloc_index_for_rport(lport, rport) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) allocate index for new RPort failed", + lport->nport_id); + + /* Alloc failed: Add R_Port back to R_Port Pool */ + spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag); + list_add_tail(&rport->entry_rport, + &rport_pool->list_rports_pool); + rport_pool->rport_pool_count++; + spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, + flag); + + unf_check_rport_pool_status(lport); + return NULL; + } + + /* 4. Add R_Port to busy list */ + rport = unf_add_rport_to_busy_list(v_lport, rport, v_nport_id); + UNF_REFERNCE_VAR(rport); + + return (void *)rport; +} + +static void unf_reset_rport_attribute(struct unf_rport_s *v_rport) +{ + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3070, 1, v_rport, return); + + spin_lock_irqsave(&v_rport->rport_state_lock, flag); + v_rport->pfn_unf_rport_call_back = NULL; + v_rport->lport = NULL; + v_rport->node_name = INVALID_VALUE64; + v_rport->port_name = INVALID_WWPN; + v_rport->nport_id = INVALID_VALUE32; + v_rport->local_nport_id = INVALID_VALUE32; + v_rport->max_frame_size = UNF_MAX_FRAME_SIZE; + v_rport->ed_tov = UNF_DEFAULT_EDTOV; + v_rport->ra_tov = UNF_DEFAULT_RATOV; + v_rport->rport_index = INVALID_VALUE32; + v_rport->scsi_id = INVALID_VALUE32; + v_rport->rport_alloc_jifs = INVALID_VALUE64; + + /* ini or tgt */ + v_rport->options = 0; + + /* fcp conf */ + v_rport->fcp_conf_needed = UNF_FALSE; + + /* special req retry times */ + v_rport->retries = 0; + v_rport->logo_retries = 0; + + /* special req retry times */ + v_rport->mas_retries = UNF_MAX_RETRY_COUNT; + + /* for target mode */ + v_rport->session = NULL; + v_rport->last_lport_ini_state = UNF_PORT_STATE_LINKDOWN; + v_rport->lport_ini_state = UNF_PORT_STATE_LINKDOWN; + v_rport->rp_state = UNF_RPORT_ST_INIT; + v_rport->last_lport_tgt_state = UNF_PORT_STATE_LINKDOWN; + v_rport->lport_tgt_state = UNF_PORT_STATE_LINKDOWN; + v_rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS; + v_rport->disc_done = 0; + + /* for scsi */ + v_rport->data_thread = NULL; + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); +} + +static unsigned int unf_rport_remove(void *v_rport) +{ + /* remove_old_rport/... --->>> rport_ref_dec --->>> rport_remove */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_rport_pool_s *rport_pool = NULL; + unsigned long flag = 0; + unsigned int rport_index = 0; + + UNF_CHECK_VALID(0x3050, UNF_TRUE, + v_rport, return UNF_RETURN_ERROR); + + rport = (struct unf_rport_s *)v_rport; + lport = rport->lport; + UNF_CHECK_VALID(0x3051, UNF_TRUE, + lport, return UNF_RETURN_ERROR); + rport_pool = &((struct unf_lport_s *)lport->root_lport)->rport_pool; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Remove RPort(0x%p) with remote_nport_id(0x%x) local_nport_id(0x%x)", + rport, rport->nport_id, rport->local_nport_id); + + /* 1. Terminate open exchange before rport remove: set ABORT tag */ + unf_cm_xchg_mgr_abort_io_by_id(lport, rport, + rport->nport_id, lport->nport_id, 0); + + /* 2. Abort sfp exchange before rport remove */ + unf_cm_xchg_mgr_abort_sfs_by_id(lport, rport, + rport->nport_id, lport->nport_id); + + /* 3. Release R_Port resource: session reset/delete */ + (void)unf_release_rport_res(lport, rport); + + /* 4.1 Delete R_Port from disc destroy/delete list */ + spin_lock_irqsave(&lport->disc.rport_busy_pool_lock, flag); + list_del_init(&rport->entry_rport); + spin_unlock_irqrestore(&lport->disc.rport_busy_pool_lock, flag); + + rport_index = rport->rport_index; /* according to bitmap */ + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[event]Port(0x%x) release RPort(0x%x_%p) with index(0x%x)", + lport->port_id, rport->nport_id, rport, rport->rport_index); + + unf_reset_rport_attribute(rport); + + /* 4.2 Add rport to --->>> rport_pool (free pool) & clear bitmap */ + spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag); + if (lport->low_level_func.rport_release_type == + UNF_LOW_LEVEL_RELEASE_RPORT_SYNC) { + clear_bit((int)rport_index, rport_pool->pul_rpi_bitmap); + } + list_add_tail(&rport->entry_rport, &rport_pool->list_rports_pool); + rport_pool->rport_pool_count++; + spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flag); + + unf_check_rport_pool_status((struct unf_lport_s *)lport->root_lport); + up(&rport->task_sema); + + return RETURN_OK; +} + +void unf_rport_ref_dec(struct unf_rport_s *v_rport) +{ + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3011, UNF_TRUE, v_rport, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Rport(0x%x) reference count is %d", + v_rport->nport_id, atomic_read(&v_rport->rport_ref_cnt)); + + spin_lock_irqsave(&v_rport->rport_state_lock, flag); + if (atomic_dec_and_test(&v_rport->rport_ref_cnt)) { + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + (void)unf_rport_remove(v_rport); + } else { + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + } +} + +static enum unf_rport_login_state_e unf_rport_stat_init( + enum unf_rport_login_state_e v_old_state, + enum unf_rport_event_e v_event) +{ + enum unf_rport_login_state_e en_next_state = UNF_RPORT_ST_INIT; + + switch (v_event) { + case UNF_EVENT_RPORT_LOGO: /* LOGO --->>> LOGO */ + en_next_state = UNF_RPORT_ST_LOGO; + break; + + case UNF_EVENT_RPORT_ENTER_PLOGI: /* PLOGI --->>> PLOGI_WAIT */ + en_next_state = UNF_RPORT_ST_PLOGI_WAIT; + break; + + case UNF_EVENT_RPORT_LINK_DOWN: /* Link Down --->>> Closing */ + en_next_state = UNF_RPORT_ST_CLOSING; + break; + + default: + en_next_state = v_old_state; + break; + } + + return en_next_state; +} + +static enum unf_rport_login_state_e unf_rport_stat_plogi_wait( + enum unf_rport_login_state_e v_old_state, + enum unf_rport_event_e v_event) +{ + enum unf_rport_login_state_e en_next_state = UNF_RPORT_ST_INIT; + + switch (v_event) { + case UNF_EVENT_RPORT_ENTER_PRLI: /* PRLI --->>> PRLI_WAIT */ + en_next_state = UNF_RPORT_ST_PRLI_WAIT; + break; + + case UNF_EVENT_RPORT_LINK_DOWN: /* Link Down --->>> closing */ + en_next_state = UNF_RPORT_ST_CLOSING; + break; + + case UNF_EVENT_RPORT_LOGO: /* LOGO --->>> LOGO */ + en_next_state = UNF_RPORT_ST_LOGO; + break; + + case UNF_EVENT_RPORT_RECOVERY: /* Recovery --->>> Ready */ + en_next_state = UNF_RPORT_ST_READY; + break; + + default: + en_next_state = v_old_state; + break; + } + + return en_next_state; +} + +static enum unf_rport_login_state_e unf_rport_stat_prli_wait( + enum unf_rport_login_state_e v_old_state, + enum unf_rport_event_e v_event) +{ + enum unf_rport_login_state_e en_next_state = UNF_RPORT_ST_INIT; + + switch (v_event) { + case UNF_EVENT_RPORT_READY: /* Ready --->>> Ready */ + en_next_state = UNF_RPORT_ST_READY; + break; + + case UNF_EVENT_RPORT_LOGO: /* LOGO --->>> LOGO */ + en_next_state = UNF_RPORT_ST_LOGO; + break; + + case UNF_EVENT_RPORT_LINK_DOWN: /* Link Down --->>> Closing */ + en_next_state = UNF_RPORT_ST_CLOSING; + break; + + case UNF_EVENT_RPORT_RECOVERY: /* Recovery --->>> Ready */ + en_next_state = UNF_RPORT_ST_READY; + break; + + default: + en_next_state = v_old_state; + break; + } + + return en_next_state; +} + +static enum unf_rport_login_state_e unf_rport_stat_ready( + enum unf_rport_login_state_e v_old_state, + enum unf_rport_event_e v_event) +{ + enum unf_rport_login_state_e en_next_state = UNF_RPORT_ST_INIT; + + switch (v_event) { + case UNF_EVENT_RPORT_LOGO: /* LOGO --->>> LOGO */ + en_next_state = UNF_RPORT_ST_LOGO; + break; + + case UNF_EVENT_RPORT_LINK_DOWN: /* Link Down --->>> closing */ + en_next_state = UNF_RPORT_ST_CLOSING; + break; + + case UNF_EVENT_RPORT_ENTER_PLOGI: /* ready --->>> plogi_wait */ + en_next_state = UNF_RPORT_ST_PLOGI_WAIT; + break; + + default: + en_next_state = v_old_state; + break; + } + + return en_next_state; +} + +static enum unf_rport_login_state_e unf_rport_stat_closing( + enum unf_rport_login_state_e v_old_state, + enum unf_rport_event_e v_event) +{ + enum unf_rport_login_state_e en_next_state = UNF_RPORT_ST_INIT; + + switch (v_event) { + case UNF_EVENT_RPORT_CLS_TIMEOUT: /* timeout --->>> delete */ + en_next_state = UNF_RPORT_ST_DELETE; + break; + + case UNF_EVENT_RPORT_RELOGIN: /* relogin --->>> INIT */ + en_next_state = UNF_RPORT_ST_INIT; + break; + + case UNF_EVENT_RPORT_RECOVERY: /* recovery --->>> ready */ + en_next_state = UNF_RPORT_ST_READY; + break; + + default: + en_next_state = v_old_state; + break; + } + + return en_next_state; +} + +static enum unf_rport_login_state_e unf_rport_stat_logo( + enum unf_rport_login_state_e v_old_state, + enum unf_rport_event_e v_event) +{ + enum unf_rport_login_state_e en_next_state = UNF_RPORT_ST_INIT; + + switch (v_event) { + case UNF_EVENT_RPORT_NORMAL_ENTER: /* normal enter --->>> closing */ + en_next_state = UNF_RPORT_ST_CLOSING; + break; + + case UNF_EVENT_RPORT_RECOVERY: /* recovery --->>> ready */ + en_next_state = UNF_RPORT_ST_READY; + break; + + default: + en_next_state = v_old_state; + break; + } + + return en_next_state; +} + +void unf_rport_state_ma(struct unf_rport_s *v_rport, + enum unf_rport_event_e v_event) +{ + enum unf_rport_login_state_e en_old_state = UNF_RPORT_ST_INIT; + enum unf_rport_login_state_e en_next_state = UNF_RPORT_ST_INIT; + + UNF_CHECK_VALID(0x3056, UNF_TRUE, v_rport, return); + + en_old_state = v_rport->rp_state; + + switch (v_rport->rp_state) { + /* State INIT */ + case UNF_RPORT_ST_INIT: + en_next_state = unf_rport_stat_init(en_old_state, v_event); + break; + + /* State PLOGI Wait */ + case UNF_RPORT_ST_PLOGI_WAIT: + en_next_state = unf_rport_stat_plogi_wait(en_old_state, + v_event); + break; + + /* State PRLI Wait */ + case UNF_RPORT_ST_PRLI_WAIT: + en_next_state = unf_rport_stat_prli_wait(en_old_state, + v_event); + break; + + /* State LOGO */ + case UNF_RPORT_ST_LOGO: + en_next_state = unf_rport_stat_logo(en_old_state, v_event); + break; + + /* State CLOSING */ + case UNF_RPORT_ST_CLOSING: + en_next_state = unf_rport_stat_closing(en_old_state, v_event); + break; + + /* State READY */ + case UNF_RPORT_ST_READY: + en_next_state = unf_rport_stat_ready(en_old_state, v_event); + break; + + /* State DELETE */ + case UNF_RPORT_ST_DELETE: + default: + en_next_state = UNF_RPORT_ST_INIT; + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]RPort(0x%x) hold state(0x%x)", + v_rport->nport_id, v_rport->rp_state); + break; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MINOR, + "[info]RPort(0x%x) with oldstate(0x%x) event(0x%x) nextstate(0x%x)", + v_rport->nport_id, en_old_state, v_event, en_next_state); + + unf_set_rport_state(v_rport, en_next_state); +} + +void unf_clean_linkdown_rport(struct unf_lport_s *v_lport) +{ + /* for L_Port's R_Port(s) */ + struct unf_disc_s *disc = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + struct unf_rport_s *rport = NULL; + struct unf_lport_s *lport = NULL; + unsigned long disc_lock_flag = 0; + unsigned long rport_lock_flag = 0; + + UNF_CHECK_VALID(0x3058, UNF_TRUE, v_lport, return); + disc = &v_lport->disc; + + /* for each busy R_Port */ + spin_lock_irqsave(&disc->rport_busy_pool_lock, disc_lock_flag); + /* --->>> busy_rports */ + list_for_each_safe(node, next_node, &disc->list_busy_rports) { + rport = list_entry(node, struct unf_rport_s, entry_rport); + + /* 1. Prevent process Repeatly: Closing */ + spin_lock_irqsave(&rport->rport_state_lock, rport_lock_flag); + if (rport->rp_state == UNF_RPORT_ST_CLOSING) { + spin_unlock_irqrestore(&rport->rport_state_lock, + rport_lock_flag); + continue; + } + + /* 2. Increase ref_cnt to protect R_Port */ + if (unf_rport_ref_inc(rport) != RETURN_OK) { + spin_unlock_irqrestore(&rport->rport_state_lock, + rport_lock_flag); + continue; + } + + /* 3. Update R_Port state: + * Link Down Event --->>> closing state + */ + unf_rport_state_ma(rport, UNF_EVENT_RPORT_LINK_DOWN); + + /* 4. Put R_Port from busy to destroy list */ + list_del_init(&rport->entry_rport); + list_add_tail(&rport->entry_rport, &disc->list_destroy_rports); + + lport = rport->lport; + spin_unlock_irqrestore(&rport->rport_state_lock, + rport_lock_flag); + + /* 5. Schedule Closing work (Enqueuing workqueue) */ + unf_schedule_closing_work(lport, rport); + + /* 6. decrease R_Port ref_cnt (company with 2) */ + unf_rport_ref_dec(rport); + } + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_lock_flag); +} + +void unf_rport_enter_closing(struct unf_rport_s *v_rport) +{ + /* + * call by + * 1. with RSCN processer + * 2. with LOGOUT processer + ** + * from + * 1. R_Port Link Down + * 2. R_Port enter LOGO + */ + unsigned long rport_lock_flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_lport_s *lport = NULL; + struct unf_disc_s *disc = NULL; + + UNF_CHECK_VALID(0x3059, UNF_TRUE, v_rport, return); + + /* 1. Increase ref_cnt to protect R_Port */ + spin_lock_irqsave(&v_rport->rport_state_lock, rport_lock_flag); + ret = unf_rport_ref_inc(v_rport); + if (ret != RETURN_OK) { + spin_unlock_irqrestore(&v_rport->rport_state_lock, + rport_lock_flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]RPort(0x%x_0x%p) is removing and no need process", + v_rport->nport_id, v_rport); + + return; + } + + /* NOTE: R_Port state has been set(with closing) */ + + lport = v_rport->lport; + spin_unlock_irqrestore(&v_rport->rport_state_lock, rport_lock_flag); + + /* 2. Put R_Port from busy to destroy list */ + disc = &lport->disc; + spin_lock_irqsave(&disc->rport_busy_pool_lock, rport_lock_flag); + list_del_init(&v_rport->entry_rport); + list_add_tail(&v_rport->entry_rport, &disc->list_destroy_rports); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, rport_lock_flag); + + /* 3. Schedule Closing work (Enqueuing workqueue) */ + unf_schedule_closing_work(lport, v_rport); + + /* 4. dec R_Port ref_cnt */ + unf_rport_ref_dec(v_rport); +} + +void unf_rport_error_recovery(struct unf_rport_s *v_rport) +{ + unsigned long delay = 0; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3060, UNF_TRUE, v_rport, return); + + spin_lock_irqsave(&v_rport->rport_state_lock, flag); + + ret = unf_rport_ref_inc(v_rport); + if (ret != RETURN_OK) { + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]RPort(0x%x_0x%p) is removing and no need process", + v_rport->nport_id, v_rport); + return; + } + + /* Check R_Port state */ + if ((v_rport->rp_state == UNF_RPORT_ST_CLOSING) || + (v_rport->rp_state == UNF_RPORT_ST_DELETE)) { + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]RPort(0x%x_0x%p) offline and no need process", + v_rport->nport_id, v_rport); + + unf_rport_ref_dec(v_rport); + return; + } + + /* Check repeatability with recovery work */ + if (delayed_work_pending(&v_rport->recovery_work)) { + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]RPort(0x%x_0x%p) recovery work is running and no need process", + v_rport->nport_id, v_rport); + + unf_rport_ref_dec(v_rport); + return; + } + + /* NOTE: Re-login or Logout directly (recovery work) */ + if (v_rport->retries < v_rport->mas_retries) { + v_rport->retries++; + delay = (unsigned long)v_rport->ed_tov; + + if (queue_delayed_work(unf_work_queue, + &v_rport->recovery_work, + (unsigned long)msecs_to_jiffies( + (unsigned int)delay))) { + /* Inc ref_cnt: corresponding to this work timer */ + (void)unf_rport_ref_inc(v_rport); + } + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]RPort(0x%x_0x%p) state(0x%x) retry login failed", + v_rport->nport_id, v_rport, v_rport->rp_state); + + /* Update R_Port state: LOGO event --->>> ST_LOGO */ + unf_rport_state_ma(v_rport, UNF_EVENT_RPORT_LOGO); + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + + /* Enter LOGO processer */ + unf_rport_enter_logo(v_rport->lport, v_rport); + } + + unf_rport_ref_dec(v_rport); +} + +static unsigned int unf_rport_reuse_only(struct unf_rport_s *v_rport) +{ + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3061, UNF_TRUE, + v_rport, return UNF_RETURN_ERROR); + + spin_lock_irqsave(&v_rport->rport_state_lock, flag); + ret = unf_rport_ref_inc(v_rport); + if (ret != RETURN_OK) { + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + + /* R_Port with delete state */ + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]RPort(0x%x_0x%p) is removing and no need process", + v_rport->nport_id, v_rport); + + return UNF_RETURN_ERROR; + } + + /* R_Port State check: delete */ + if ((v_rport->rp_state == UNF_RPORT_ST_DELETE) || + (v_rport->rp_state == UNF_RPORT_ST_CLOSING)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]RPort(0x%x_0x%p) state(0x%x) is delete or closing no need process", + v_rport->nport_id, v_rport, v_rport->rp_state); + + ret = UNF_RETURN_ERROR; + } + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + + unf_rport_ref_dec(v_rport); + + return ret; +} + +static unsigned int unf_rport_reuse_recover(struct unf_rport_s *v_rport) +{ + unsigned long flags = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3062, UNF_TRUE, + v_rport, return UNF_RETURN_ERROR); + + spin_lock_irqsave(&v_rport->rport_state_lock, flags); + ret = unf_rport_ref_inc(v_rport); + if (ret != RETURN_OK) { + spin_unlock_irqrestore(&v_rport->rport_state_lock, flags); + + /* R_Port with delete state */ + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]RPort(0x%x_0x%p) is removing and no need process", + v_rport->nport_id, v_rport); + + return UNF_RETURN_ERROR; + } + + /* R_Port state check: delete */ + if ((v_rport->rp_state == UNF_RPORT_ST_DELETE) || + (v_rport->rp_state == UNF_RPORT_ST_CLOSING)) { + ret = UNF_RETURN_ERROR; + } + + /* Update R_Port state: recovery --->>> ready */ + unf_rport_state_ma(v_rport, UNF_EVENT_RPORT_RECOVERY); + spin_unlock_irqrestore(&v_rport->rport_state_lock, flags); + + unf_rport_ref_dec(v_rport); + + return ret; +} + +static unsigned int unf_rport_reuse_init(struct unf_rport_s *v_rport) +{ + unsigned long flage = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3063, UNF_TRUE, + v_rport, return UNF_RETURN_ERROR); + + spin_lock_irqsave(&v_rport->rport_state_lock, flage); + ret = unf_rport_ref_inc(v_rport); + if (ret != RETURN_OK) { + spin_unlock_irqrestore(&v_rport->rport_state_lock, flage); + + /* R_Port with delete state */ + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]RPort(0x%x_0x%p) is removing and no need process", + v_rport->nport_id, v_rport); + + return UNF_RETURN_ERROR; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]RPort(0x%x)'s state is 0x%x with use_init flag", + v_rport->nport_id, v_rport->rp_state); + + /* R_Port State check: delete */ + if ((v_rport->rp_state == UNF_RPORT_ST_DELETE) || + (v_rport->rp_state == UNF_RPORT_ST_CLOSING)) { + ret = UNF_RETURN_ERROR; + } else { + /* Update R_Port state: re-enter Init state */ + unf_set_rport_state(v_rport, UNF_RPORT_ST_INIT); + } + spin_unlock_irqrestore(&v_rport->rport_state_lock, flage); + + unf_rport_ref_dec(v_rport); + + return ret; +} + +struct unf_rport_s *unf_get_rport_by_nport_id(struct unf_lport_s *v_lport, + unsigned int nport_id) +{ + struct unf_lport_s *lport = NULL; + struct unf_disc_s *disc = NULL; + struct unf_rport_s *rport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flag = 0; + struct unf_rport_s *find_rport = NULL; + + UNF_CHECK_VALID(0x3048, UNF_TRUE, v_lport, return NULL); + lport = (struct unf_lport_s *)v_lport; + disc = &lport->disc; + + /* for each r_port from rport_busy_list: compare N_Port_ID */ + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + list_for_each_safe(node, next_node, &disc->list_busy_rports) { + rport = list_entry(node, struct unf_rport_s, entry_rport); + if (rport && rport->nport_id == nport_id) { + find_rport = rport; + break; + } + } + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + return find_rport; +} + +struct unf_rport_s *unf_get_safe_rport(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + enum unf_rport_reuse_flag_e v_reuse_flag, + unsigned int v_nport_id) +{ + /* + * New add or plug + * + * retry_flogi --->>> reuse_only + * name_server_register --->>> reuse_only + * SNS_plogi --->>> reuse_only + * enter_flogi --->>> reuse_only + * logout --->>> reuse_only + * flogi_handler --->>> reuse_only + * plogi_handler --->>> reuse_only + * adisc_handler --->>> reuse_recovery + * logout_handler --->>> reuse_init + * prlo_handler --->>> reuse_init + * login_with_loop --->>> reuse_only + * gffid_callback --->>> reuse_only + * delay_plogi --->>> reuse_only + * gffid_rjt --->>> reuse_only + * gffid_rsp_unknown --->>> reuse_only + * gpnid_acc --->>> reuse_init + * fdisc_callback --->>> reuse_only + * flogi_acc --->>> reuse_only + * plogi_acc --->>> reuse_only + * logo_callback --->>> reuse_init + * rffid_callback --->>> reuse_only + */ +#define UNF_AVOID_LINK_FLASH_TIME 3000 + + struct unf_rport_s *rport = v_rport; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3075, UNF_TRUE, v_lport, return NULL); + + /* 1. Alloc New R_Port or Update R_Port Property */ + if (!rport) { + /* If NULL, get/Alloc new node + * (R_Port from R_Port pool) directly + */ + rport = unf_rport_get_free_and_init(v_lport, UNF_PORT_TYPE_FC, + v_nport_id); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) get exist RPort(0x%x) with state(0x%x) and reuse_flag(0x%x)", + v_lport->port_id, rport->nport_id, + rport->rp_state, v_reuse_flag); + + switch (v_reuse_flag) { + case UNF_RPORT_REUSE_ONLY: + ret = unf_rport_reuse_only(rport); + if (ret != RETURN_OK) { + /* R_Port within delete list: need get new */ + rport = unf_rport_get_free_and_init( + v_lport, + UNF_PORT_TYPE_FC, + v_nport_id); + } + break; + + case UNF_RPORT_REUSE_INIT: + ret = unf_rport_reuse_init(rport); + if (ret != RETURN_OK) { + /* R_Port within delete list: need get new */ + rport = unf_rport_get_free_and_init( + v_lport, + UNF_PORT_TYPE_FC, + v_nport_id); + } + break; + + case UNF_RPORT_REUSE_RECOVER: + ret = unf_rport_reuse_recover(rport); + if (ret != RETURN_OK) { + /* R_Port within delete list, + * NOTE: do nothing + */ + rport = NULL; + } + break; + + default: + break; + } + } + + return rport; +} + +unsigned int unf_get_port_feature(unsigned long long v_wwpn) +{ + struct unf_rport_feature_recard_s *port_fea = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flags = 0; + struct list_head list_temp_node; + + spin_lock_irqsave(&port_fea_pool->port_fea_pool_lock, flags); + list_for_each_safe(node, next_node, &port_fea_pool->list_busy_head) { + port_fea = list_entry(node, struct unf_rport_feature_recard_s, + entry_feature); + + if (v_wwpn == port_fea->wwpn) { + list_del(&port_fea->entry_feature); + list_add(&port_fea->entry_feature, + &port_fea_pool->list_busy_head); + + spin_unlock_irqrestore( + &port_fea_pool->port_fea_pool_lock, flags); + + return port_fea->port_feature; + } + } + + list_for_each_safe(node, next_node, &port_fea_pool->list_free_head) { + port_fea = list_entry(node, struct unf_rport_feature_recard_s, + entry_feature); + + if (v_wwpn == port_fea->wwpn) { + list_del(&port_fea->entry_feature); + list_add(&port_fea->entry_feature, + &port_fea_pool->list_busy_head); + + spin_unlock_irqrestore( + &port_fea_pool->port_fea_pool_lock, flags); + + return port_fea->port_feature; + } + } + + /* can't find wwpn */ + if (list_empty(&port_fea_pool->list_free_head)) { + /* free is empty, transport busy to free */ + list_temp_node = port_fea_pool->list_free_head; + port_fea_pool->list_free_head = port_fea_pool->list_busy_head; + port_fea_pool->list_busy_head = list_temp_node; + } + + port_fea = list_entry((&port_fea_pool->list_free_head)->prev, + struct unf_rport_feature_recard_s, + entry_feature); + list_del(&port_fea->entry_feature); + list_add(&port_fea->entry_feature, &port_fea_pool->list_busy_head); + + port_fea->wwpn = v_wwpn; + port_fea->port_feature = UNF_PORT_MODE_UNKNOWN; + + spin_unlock_irqrestore(&port_fea_pool->port_fea_pool_lock, flags); + return UNF_PORT_MODE_UNKNOWN; +} + +void unf_update_port_feature(unsigned long long v_wwpn, + unsigned int v_port_feature) +{ + struct unf_rport_feature_recard_s *port_fea = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flags = 0; + + spin_lock_irqsave(&port_fea_pool->port_fea_pool_lock, flags); + list_for_each_safe(node, next_node, &port_fea_pool->list_busy_head) { + port_fea = list_entry(node, + struct unf_rport_feature_recard_s, + entry_feature); + + if (v_wwpn == port_fea->wwpn) { + port_fea->port_feature = v_port_feature; + list_del(&port_fea->entry_feature); + list_add(&port_fea->entry_feature, + &port_fea_pool->list_busy_head); + + spin_unlock_irqrestore( + &port_fea_pool->port_fea_pool_lock, flags); + + return; + } + } + + list_for_each_safe(node, next_node, &port_fea_pool->list_free_head) { + port_fea = list_entry(node, struct unf_rport_feature_recard_s, + entry_feature); + + if (v_wwpn == port_fea->wwpn) { + port_fea->port_feature = v_port_feature; + list_del(&port_fea->entry_feature); + list_add(&port_fea->entry_feature, + &port_fea_pool->list_busy_head); + + spin_unlock_irqrestore( + &port_fea_pool->port_fea_pool_lock, flags); + + return; + } + } + + spin_unlock_irqrestore(&port_fea_pool->port_fea_pool_lock, flags); +} diff --git a/drivers/scsi/huawei/hifc/unf_rport.h b/drivers/scsi/huawei/hifc/unf_rport.h new file mode 100644 index 0000000000000000000000000000000000000000..5e1e6551b94a96a73e29b345c2faa5edde9319bc --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_rport.h @@ -0,0 +1,285 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#ifndef __UNF_RPORT_H +#define __UNF_RPORT_H + +#define UNF_MAX_SCSI_ID 2048 +#define UNF_LOSE_TMO 30 +#define UNF_RPORT_INVALID_INDEX 0xffff + +/* RSCN compare DISC list with local RPort macro */ +#define UNF_RPORT_NEED_PROCESS 0x1 +#define UNF_RPORT_ONLY_IN_DISC_PROCESS 0x2 +#define UNF_RPORT_ONLY_IN_LOCAL_PROCESS 0x3 +#define UNF_RPORT_IN_DISC_AND_LOCAL_PROCESS 0x4 +#define UNF_RPORT_NOT_NEED_PROCESS 0x5 + +#define UNF_ECHO_SEND_MAX_TIMES 1 + +extern struct unf_rport_feature_pool_s *port_fea_pool; + +enum unf_rport_login_state_e { + UNF_RPORT_ST_INIT = 0x1000, /* initialized */ + UNF_RPORT_ST_PLOGI_WAIT, /* waiting for PLOGI completion */ + UNF_RPORT_ST_PRLI_WAIT, /* waiting for PRLI completion */ + UNF_RPORT_ST_READY, /* ready for use */ + UNF_RPORT_ST_LOGO, /* port logout sent */ + UNF_RPORT_ST_CLOSING, /* being closed */ + UNF_RPORT_ST_DELETE, /* port being deleted */ + UNF_RPORT_ST_BUTT +}; + +enum unf_rport_event_e { + UNF_EVENT_RPORT_NORMAL_ENTER = 0x9000, + UNF_EVENT_RPORT_ENTER_PLOGI = 0x9001, + UNF_EVENT_RPORT_ENTER_PRLI = 0x9002, + UNF_EVENT_RPORT_READY = 0x9003, + UNF_EVENT_RPORT_LOGO = 0x9004, + UNF_EVENT_RPORT_CLS_TIMEOUT = 0x9005, + UNF_EVENT_RPORT_RECOVERY = 0x9006, + UNF_EVENT_RPORT_RELOGIN = 0x9007, + UNF_EVENT_RPORT_LINK_DOWN = 0x9008, + UNF_EVENT_RPORT_BUTT +}; + +/* RPort local link state */ +enum unf_port_state_e { + UNF_PORT_STATE_LINKUP = 0x1001, + UNF_PORT_STATE_LINKDOWN = 0x1002 +}; + +enum unf_rport_reuse_flag_e { + UNF_RPORT_REUSE_ONLY = 0x1001, + UNF_RPORT_REUSE_INIT = 0x1002, + UNF_RPORT_REUSE_RECOVER = 0x1003 +}; + +struct unf_disc_rport_s { + /* RPort entry */ + struct list_head entry_rport; + + unsigned int nport_id; /* Remote port NPortID */ + unsigned int disc_done; /* 1:Disc done */ +}; + +struct unf_rport_feature_pool_s { + struct list_head list_busy_head; + struct list_head list_free_head; + void *p_port_feature_pool_addr; + spinlock_t port_fea_pool_lock; +}; + +struct unf_rport_feature_recard_s { + struct list_head entry_feature; + unsigned long long wwpn; + unsigned int port_feature; + unsigned int reserved; +}; + +struct unf_os_thread_private_data_s { + struct list_head list; + spinlock_t spin_lock; + struct task_struct *thread; + unsigned int in_process; + unsigned int cpu_id; + atomic_t user_count; +}; + +/* Remote Port struct */ +struct unf_rport_s { + unsigned int max_frame_size; + unsigned int supported_classes; + + /* Dynamic Attributes */ + /* Remote Port loss timeout in seconds. */ + unsigned int dev_loss_tmo; + + unsigned long long node_name; + unsigned long long port_name; + unsigned int nport_id; /* Remote port NPortID */ + unsigned int local_nport_id; + + unsigned int roles; + + /* Remote port local INI state */ + enum unf_port_state_e lport_ini_state; + enum unf_port_state_e last_lport_ini_state; + + /* Remote port local TGT state */ + enum unf_port_state_e lport_tgt_state; + enum unf_port_state_e last_lport_tgt_state; + + /* Port Type:fc */ + unsigned int port_type; + + /* RPort reference counter */ + atomic_t rport_ref_cnt; + + /* Pending IO count */ + atomic_t pending_io_cnt; + + /* RPort entry */ + struct list_head entry_rport; + + /* Port State,delay reclaim when uiRpState == complete. */ + enum unf_rport_login_state_e rp_state; + unsigned int disc_done; /* 1:Disc done */ + + struct unf_lport_s *lport; + void *rport; + spinlock_t rport_state_lock; + + /* Port attribution */ + unsigned int ed_tov; + unsigned int ra_tov; + unsigned int options; /* ini or tgt */ + unsigned int last_report_linkup_options; + unsigned int fcp_conf_needed; /* INI Rport send FCP CONF flag */ + unsigned int tape_support_needed; /* INI tape support flag */ + unsigned int retries; /* special req retry times */ + unsigned int logo_retries; /* logo error recovery retry times */ + unsigned int mas_retries; /* special req retry times */ + /* Rport alloc jiffies */ + unsigned long long rport_alloc_jifs; + + void *session; + + /* binding with SCSI */ + unsigned int scsi_id; + + /* disc list compare flag */ + unsigned int rscn_position; + + unsigned int rport_index; + + /* RPort timer,closing status */ + struct work_struct closing_work; + + /* RPort timer,rport linkup */ + struct work_struct start_work; + + /* RPort timer,recovery */ + struct delayed_work recovery_work; + + /* RPort timer,TGT mode,PRLI waiting */ + struct delayed_work open_work; + + struct semaphore task_sema; + /* Callback after rport Ready/delete.[with state:ok/fail]. + * Creat/free TGT session here + * input : L_Port,R_Port,state:ready + * --creat session/delete--free session + */ + void (*pfn_unf_rport_call_back)(void *, void *, unsigned int); + + struct unf_os_thread_private_data_s *data_thread; +}; + +#define UNF_IO_RESULT_CNT(v_scsi_table, v_scsi_id, v_io_result) \ + do { \ + if (likely(((v_io_result) < UNF_MAX_IO_RETURN_VALUE) && \ + ((v_scsi_id) < UNF_MAX_SCSI_ID) && \ + ((v_scsi_table)->wwn_rport_info_table) && \ + (v_scsi_table->wwn_rport_info_table[v_scsi_id].dfx_counter))) { \ + atomic64_inc(&v_scsi_table->wwn_rport_info_table[v_scsi_id].dfx_counter->io_done_cnt[v_io_result]); \ + } else { \ + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, \ + UNF_LOG_EQUIP_ATT, UNF_ERR, \ + "[err] io return value(0x%x) or scsi_id(0x%x) is invalid", \ + v_io_result, v_scsi_id); \ + } \ + } while (0) + +#define UNF_SCSI_CMD_CNT(v_scsi_table, v_scsi_id, v_io_type) \ + do { \ + if (likely(((v_io_type) < UNF_MAX_SCSI_CMD) && \ + ((v_scsi_id) < UNF_MAX_SCSI_ID) && \ + ((v_scsi_table)->wwn_rport_info_table) && \ + (v_scsi_table->wwn_rport_info_table[v_scsi_id].dfx_counter))) { \ + atomic64_inc(&((v_scsi_table->wwn_rport_info_table[v_scsi_id]).dfx_counter->scsi_cmd_cnt[v_io_type])); \ + } else { \ + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, \ + UNF_LOG_EQUIP_ATT, UNF_ERR, \ + "[err] scsi_cmd(0x%x) or scsi_id(0x%x) is invalid", \ + v_io_type, v_scsi_id); \ + } \ + } while (0) + +#define UNF_SCSI_ERROR_HANDLE_CNT(v_scsi_table, v_scsi_id, v_io_type) \ + do { \ + if (likely(((v_io_type) < UNF_SCSI_ERROR_HANDLE_BUTT) && \ + ((v_scsi_id) < UNF_MAX_SCSI_ID) && \ + ((v_scsi_table)->wwn_rport_info_table) && \ + (v_scsi_table->wwn_rport_info_table[v_scsi_id].dfx_counter))) { \ + atomic_inc(&v_scsi_table->wwn_rport_info_table[v_scsi_id].dfx_counter->error_handle[v_io_type]); \ + } else { \ + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, \ + UNF_LOG_EQUIP_ATT, UNF_ERR, \ + "[err] scsi_cmd(0x%x) or scsi_id(0x%x) is invalid", \ + v_io_type, v_scsi_id); \ + } \ + } while (0) + +#define UNF_SCSI_ERROR_HANDLE_RESULT_CNT(v_scsi_table, v_scsi_id, v_io_type) \ + do { \ + if (likely(((v_io_type) < UNF_SCSI_ERROR_HANDLE_BUTT) && \ + ((v_scsi_id) < UNF_MAX_SCSI_ID) && \ + ((v_scsi_table)->wwn_rport_info_table) && \ + (v_scsi_table->wwn_rport_info_table[v_scsi_id].dfx_counter))) { \ + atomic_inc(&v_scsi_table->wwn_rport_info_table[v_scsi_id].dfx_counter->error_handle_result[v_io_type]); \ + } else { \ + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, \ + UNF_LOG_EQUIP_ATT, UNF_ERR, \ + "[err] scsi_cmd(0x%x) or scsi_id(0x%x) is invalid", \ + v_io_type, v_scsi_id); \ + } \ + } while (0) + +void unf_rport_state_ma(struct unf_rport_s *v_rport, + enum unf_rport_event_e v_event); +void unf_update_lport_state_by_linkup_event(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int rport_att); +void unf_rport_enter_closing(struct unf_rport_s *v_rport); +void unf_clean_linkdown_rport(struct unf_lport_s *v_lport); +void unf_rport_error_recovery(struct unf_rport_s *v_rport); +struct unf_rport_s *unf_get_rport_by_nport_id(struct unf_lport_s *v_lport, + unsigned int nport_id); +void unf_rport_enter_logo(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +unsigned int unf_rport_ref_inc(struct unf_rport_s *v_rport); +void unf_rport_ref_dec(struct unf_rport_s *v_rport); + +struct unf_rport_s *unf_rport_set_qualifier_key_reuse( + struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport_by_nport_id, + struct unf_rport_s *v_rport_by_wwpn, + unsigned long long v_wwpn, + unsigned int v_sid); +void unf_rport_delay_login(struct unf_rport_s *v_rport); +struct unf_rport_s *unf_find_valid_rport(struct unf_lport_s *v_lport, + unsigned long long v_wwpn, + unsigned int v_sid); +void unf_rport_linkdown(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +struct unf_rport_s *unf_get_safe_rport(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + enum unf_rport_reuse_flag_e v_reuse_flag, + unsigned int v_nport_id); +void *unf_rport_get_free_and_init(void *v_lport, + unsigned int v_port_type, + unsigned int v_nport_id); +unsigned int unf_free_scsi_id(struct unf_lport_s *v_lport, + unsigned int v_scsi_id); +void unf_schedule_closing_work(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +void unf_sesion_loss_timeout(struct work_struct *v_work); +unsigned int unf_get_port_feature(unsigned long long v_wwpn); +void unf_update_port_feature(unsigned long long v_wwpn, + unsigned int v_port_feature); + +#endif + diff --git a/drivers/scsi/huawei/hifc/unf_scsi.c b/drivers/scsi/huawei/hifc/unf_scsi.c new file mode 100644 index 0000000000000000000000000000000000000000..2f5cb0e723fbb93a8764c985deb88387a2249f8d --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_scsi.c @@ -0,0 +1,1579 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#include "unf_log.h" +#include "unf_scsi_common.h" +#include "unf_lport.h" +#include "unf_rport.h" +#include "unf_portman.h" +#include "unf_npiv.h" +#include "unf_exchg.h" +#include "unf_io.h" + +static int unf_scsi_queue_cmd(struct Scsi_Host *shost, + struct scsi_cmnd *v_cmnd); +static int unf_scsi_abort_scsi_cmnd(struct scsi_cmnd *v_cmnd); +static int unf_scsi_device_reset_handler(struct scsi_cmnd *v_cmnd); +static int unf_scsi_bus_reset_handler(struct scsi_cmnd *v_cmnd); +static int unf_scsi_target_reset_handler(struct scsi_cmnd *v_cmnd); +static int unf_scsi_slave_alloc(struct scsi_device *sdev); +static void unf_scsi_destroy_slave(struct scsi_device *sdev); +static int unf_scsi_slave_configure(struct scsi_device *sdev); +static int unf_scsi_scan_finished(struct Scsi_Host *shost, unsigned long time); +static void unf_scsi_scan_start(struct Scsi_Host *shost); + +static struct scsi_transport_template *scsi_transport_template; +static struct scsi_transport_template *scsi_transport_template_v; + +struct unf_ini_error_code_s ini_error_code_table1[] = { + { UNF_IO_SUCCESS, UNF_SCSI_HOST(DID_OK) }, + { UNF_IO_ABORTED, UNF_SCSI_HOST(DID_ABORT) }, + { UNF_IO_FAILED, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_ABORT_ABTS, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_ABORT_LOGIN, UNF_SCSI_HOST(DID_NO_CONNECT) }, + { UNF_IO_ABORT_REET, UNF_SCSI_HOST(DID_RESET) }, + { UNF_IO_ABORT_FAILED, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_OUTOF_ORDER, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_FTO, UNF_SCSI_HOST(DID_TIME_OUT) }, + { UNF_IO_LINK_FAILURE, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_OVER_FLOW, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_RSP_OVER, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_LOST_FRAME, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_UNDER_FLOW, UNF_SCSI_HOST(DID_OK) }, + { UNF_IO_HOST_PROG_ERROR, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_SEST_PROG_ERROR, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_INVALID_ENTRY, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_ABORT_SEQ_NOT, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_REJECT, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_EDC_IN_ERROR, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_EDC_OUT_ERROR, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_UNINIT_KEK_ERR, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_DEK_OUTOF_RANGE, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_KEY_UNWRAP_ERR, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_KEY_TAG_ERR, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_KEY_ECC_ERR, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_BLOCK_SIZE_ERROR, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_ILLEGAL_CIPHER_MODE, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_CLEAN_UP, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_ABORTED_BY_TARGET, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_TRANSPORT_ERROR, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_LINK_FLASH, UNF_SCSI_HOST(DID_NO_CONNECT) }, + { UNF_IO_TIMEOUT, UNF_SCSI_HOST(DID_TIME_OUT) }, + { UNF_IO_DMA_ERROR, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_NO_LPORT, UNF_SCSI_HOST(DID_NO_CONNECT) }, + { UNF_IO_NO_XCHG, UNF_SCSI_HOST(DID_SOFT_ERROR) }, + { UNF_IO_SOFT_ERR, UNF_SCSI_HOST(DID_SOFT_ERROR) }, + { UNF_IO_PORT_LOGOUT, UNF_SCSI_HOST(DID_NO_CONNECT) }, + { UNF_IO_ERREND, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_DIF_ERROR, (UNF_SCSI_HOST(DID_OK) | UNF_SCSI_STATUS(SCSI_CHECK_CONDITION)) }, + { UNF_IO_INCOMPLETE, UNF_SCSI_HOST(DID_IMM_RETRY) }, + { UNF_IO_DIF_REF_ERROR, (UNF_SCSI_HOST(DID_OK) | UNF_SCSI_STATUS(SCSI_CHECK_CONDITION)) }, + { UNF_IO_DIF_GEN_ERROR, (UNF_SCSI_HOST(DID_OK) | UNF_SCSI_STATUS(SCSI_CHECK_CONDITION)) } +}; + +unsigned int ini_err_code_table_cnt1 = + sizeof(ini_error_code_table1) / sizeof(struct unf_ini_error_code_s); + +static void unf_set_rport_loss_tmo(struct fc_rport *rport, + unsigned int timeout) +{ + if (timeout) + rport->dev_loss_tmo = timeout; + else + rport->dev_loss_tmo = 1; +} + +static void unf_get_host_port_id(struct Scsi_Host *shost) +{ + struct unf_lport_s *lport = NULL; + + lport = (struct unf_lport_s *)shost->hostdata[0]; + if (unlikely(!lport)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port is null"); + + return; + } + + fc_host_port_id(shost) = lport->port_id; +} + +static void unf_get_host_speed(struct Scsi_Host *shost) +{ + struct unf_lport_s *lport = NULL; + unsigned int speed = FC_PORTSPEED_UNKNOWN; + + lport = (struct unf_lport_s *)shost->hostdata[0]; + if (unlikely(!lport)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port is null"); + + return; + } + + switch (lport->speed) { + case UNF_PORT_SPEED_2_G: + speed = FC_PORTSPEED_2GBIT; + break; + + case UNF_PORT_SPEED_4_G: + speed = FC_PORTSPEED_4GBIT; + break; + + case UNF_PORT_SPEED_8_G: + speed = FC_PORTSPEED_8GBIT; + break; + + case UNF_PORT_SPEED_16_G: + speed = FC_PORTSPEED_16GBIT; + break; + + case UNF_PORT_SPEED_32_G: + speed = FC_PORTSPEED_32GBIT; + break; + + default: + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) with unknown speed(0x%x) for FC mode", + lport->port_id, lport->speed); + break; + } + + fc_host_speed(shost) = speed; +} + +static void unf_get_host_port_type(struct Scsi_Host *shost) +{ + struct unf_lport_s *lport = NULL; + unsigned int port_type = FC_PORTTYPE_UNKNOWN; + + lport = (struct unf_lport_s *)shost->hostdata[0]; + if (unlikely(!lport)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port is null"); + + return; + } + + switch (lport->en_act_topo) { + case UNF_ACT_TOP_PRIVATE_LOOP: + port_type = FC_PORTTYPE_LPORT; + break; + + case UNF_ACT_TOP_PUBLIC_LOOP: + port_type = FC_PORTTYPE_NLPORT; + break; + + case UNF_ACT_TOP_P2P_DIRECT: + port_type = FC_PORTTYPE_PTP; + break; + + case UNF_ACT_TOP_P2P_FABRIC: + port_type = FC_PORTTYPE_NPORT; + break; + + default: + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) with unknown topo type(0x%x) for FC mode", + lport->port_id, lport->en_act_topo); + break; + } + + fc_host_port_type(shost) = port_type; +} + +static void unf_get_symbolic_name(struct Scsi_Host *shost) +{ + unsigned char *name = NULL; + struct unf_lport_s *lport = NULL; + + lport = (struct unf_lport_s *)shost->hostdata[0]; + if (unlikely(!lport)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Check l_port failed"); + + return; + } + + name = fc_host_symbolic_name(shost); + if (name) { + snprintf(name, FC_SYMBOLIC_NAME_SIZE, + "HIFC_FW_RELEASE:%s HIFC_DRV_RELEASE:%s", + lport->fw_version, UNF_FC_VERSION); + } +} + +static void unf_get_host_fabric_name(struct Scsi_Host *shost) +{ + struct unf_lport_s *lport = NULL; + + lport = (struct unf_lport_s *)shost->hostdata[0]; + if (unlikely(!lport)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port is null"); + + return; + } + + fc_host_fabric_name(shost) = lport->fabric_node_name; +} + +static void unf_get_host_port_state(struct Scsi_Host *shost) +{ + struct unf_lport_s *lport = NULL; + enum fc_port_state port_state; + + lport = (struct unf_lport_s *)shost->hostdata[0]; + if (unlikely(!lport)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port is null"); + + return; + } + + switch (lport->link_up) { + case UNF_PORT_LINK_DOWN: + port_state = FC_PORTSTATE_OFFLINE; + break; + + case UNF_PORT_LINK_UP: + port_state = FC_PORTSTATE_ONLINE; + break; + + default: + port_state = FC_PORTSTATE_UNKNOWN; + break; + } + + fc_host_port_state(shost) = port_state; +} + +static void unf_dev_loss_timeout_callbk(struct fc_rport *rport) +{ + /* + * NOTE: about rport->dd_data + * --->>> local SCSI_ID + * 1. Assignment during scsi rport link up + * 2. Released when scsi rport link down & timeout(30s) + * 3. Used during scsi do callback with slave_alloc function + */ + struct Scsi_Host *host = NULL; + struct unf_lport_s *lport = NULL; + unsigned int scsi_id = 0; + + if (unlikely(!rport)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]SCSI rport is null"); + + return; + } + + host = rport_to_shost(rport); + if (unlikely(!host)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Host is null"); + + return; + } + + /* according to Local SCSI_ID */ + scsi_id = *(unsigned int *)(rport->dd_data); + if (unlikely(scsi_id >= UNF_MAX_SCSI_ID)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]scsi_id(0x%x) is max than(0x%x)", + scsi_id, UNF_MAX_SCSI_ID); + + return; + } + + lport = (struct unf_lport_s *)host->hostdata[0]; + if (unf_is_lport_valid(lport) == RETURN_OK) { + UNF_TRACE(0x3097, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[event]Port(0x%x_0x%x) RPort scsi_id(0x%x) target_id(0x%x) loss timeout", + lport->port_id, lport->nport_id, + scsi_id, rport->scsi_target_id); + + atomic_inc(&lport->session_loss_tmo); + + /* Free SCSI ID & set table state with DEAD */ + (void)unf_free_scsi_id(lport, scsi_id); + } else { + UNF_TRACE(0x3097, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(%p) is invalid", lport); + } + + /* reset scsi rport dd_data(local SCSI_ID) */ + *((unsigned int *)rport->dd_data) = INVALID_VALUE32; +} + +int unf_scsi_create_vport(struct fc_vport *fc_port, bool disabled) +{ + struct unf_lport_s *vport = NULL; + struct unf_lport_s *lport = NULL; + struct Scsi_Host *shost = NULL; + struct vport_config_s vport_config = { 0 }; + + shost = vport_to_shost(fc_port); + + lport = (struct unf_lport_s *)shost->hostdata[0]; + if (unf_is_lport_valid(lport) != RETURN_OK) { + UNF_TRACE(0x3097, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(%p) is invalid", lport); + + return RETURN_ERROR; + } + + vport_config.port_name = fc_port->port_name; + + vport_config.port_mode = fc_port->roles; + + vport = unf_create_vport(lport, &vport_config); + if (!vport) { + UNF_TRACE(0x3097, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) Create Vport failed on lldrive", + lport->port_id); + + return RETURN_ERROR; + } + + fc_port->dd_data = vport; + + vport->vport = fc_port; + + return RETURN_OK; +} + +int unf_scsi_delete_vport(struct fc_vport *fc_port) +{ + int ret = RETURN_ERROR; + struct unf_lport_s *vport = NULL; + + vport = (struct unf_lport_s *)fc_port->dd_data; + + if (unf_is_lport_valid(vport) != RETURN_OK) { + UNF_TRACE(0x3097, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]VPort(%p) is invalid or is removing", + vport); + + fc_port->dd_data = NULL; + + return ret; + } + + ret = (int)unf_destroy_one_vport(vport); + if (ret != RETURN_OK) { + UNF_TRACE(0x3097, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]VPort(0x%x) destroy failed on drive", + vport->port_id); + + return ret; + } + + fc_port->dd_data = NULL; + + return ret; +} + +struct fc_function_template function_template = { + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_supported_speeds = 1, + + .get_host_port_id = unf_get_host_port_id, + .show_host_port_id = 1, + .get_host_speed = unf_get_host_speed, + .show_host_speed = 1, + .get_host_port_type = unf_get_host_port_type, + .show_host_port_type = 1, + .get_host_symbolic_name = unf_get_symbolic_name, + .show_host_symbolic_name = 1, + .set_host_system_hostname = NULL, + .show_host_system_hostname = 1, + .get_host_fabric_name = unf_get_host_fabric_name, + .show_host_fabric_name = 1, + .get_host_port_state = unf_get_host_port_state, + .show_host_port_state = 1, + + .dd_fcrport_size = sizeof(void *), + .show_rport_supported_classes = 1, + + .get_starget_node_name = NULL, + .show_starget_node_name = 1, + .get_starget_port_name = NULL, + .show_starget_port_name = 1, + .get_starget_port_id = NULL, + .show_starget_port_id = 1, + + .set_rport_dev_loss_tmo = unf_set_rport_loss_tmo, + .show_rport_dev_loss_tmo = 0, + + .issue_fc_host_lip = NULL, + .dev_loss_tmo_callbk = unf_dev_loss_timeout_callbk, + .terminate_rport_io = NULL, + .get_fc_host_stats = NULL, + + .vport_create = unf_scsi_create_vport, + .vport_disable = NULL, + .vport_delete = unf_scsi_delete_vport, + .bsg_request = NULL, + .bsg_timeout = NULL, +}; + +struct fc_function_template function_template_v = { + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_supported_speeds = 1, + + .get_host_port_id = unf_get_host_port_id, + .show_host_port_id = 1, + .get_host_speed = unf_get_host_speed, + .show_host_speed = 1, + .get_host_port_type = unf_get_host_port_type, + .show_host_port_type = 1, + .get_host_symbolic_name = unf_get_symbolic_name, + .show_host_symbolic_name = 1, + .set_host_system_hostname = NULL, + .show_host_system_hostname = 1, + .get_host_fabric_name = unf_get_host_fabric_name, + .show_host_fabric_name = 1, + .get_host_port_state = unf_get_host_port_state, + .show_host_port_state = 1, + + .dd_fcrport_size = sizeof(void *), + .show_rport_supported_classes = 1, + + .get_starget_node_name = NULL, + .show_starget_node_name = 1, + .get_starget_port_name = NULL, + .show_starget_port_name = 1, + .get_starget_port_id = NULL, + .show_starget_port_id = 1, + + .set_rport_dev_loss_tmo = unf_set_rport_loss_tmo, + .show_rport_dev_loss_tmo = 1, + + .issue_fc_host_lip = NULL, + .dev_loss_tmo_callbk = unf_dev_loss_timeout_callbk, + .terminate_rport_io = NULL, + .get_fc_host_stats = NULL, + + .vport_create = NULL, + .vport_disable = NULL, + .vport_delete = NULL, + .bsg_request = NULL, + .bsg_timeout = NULL, +}; + +struct scsi_host_template scsi_host_template = { + .module = THIS_MODULE, + .name = "HIFC", + + .queuecommand = unf_scsi_queue_cmd, + .eh_abort_handler = unf_scsi_abort_scsi_cmnd, + .eh_device_reset_handler = unf_scsi_device_reset_handler, + + .eh_target_reset_handler = unf_scsi_target_reset_handler, + .eh_bus_reset_handler = unf_scsi_bus_reset_handler, + .eh_host_reset_handler = NULL, + + .slave_configure = unf_scsi_slave_configure, + .slave_alloc = unf_scsi_slave_alloc, + .slave_destroy = unf_scsi_destroy_slave, + + .scan_finished = unf_scsi_scan_finished, + .scan_start = unf_scsi_scan_start, + + .this_id = -1, + .cmd_per_lun = 3, + .use_clustering = ENABLE_CLUSTERING, + .shost_attrs = NULL, + .sg_tablesize = SG_ALL, + .max_sectors = 0xFFFF, + .supported_mode = MODE_INITIATOR, +}; + +static void unf_unmap_prot_sgl(struct scsi_cmnd *v_cmnd) +{ + struct device *dev; + + if ((scsi_get_prot_op(v_cmnd) != SCSI_PROT_NORMAL) && + hifc_dif_enable && (scsi_prot_sg_count(v_cmnd))) { + dev = v_cmnd->device->host->dma_dev; + dma_unmap_sg(dev, scsi_prot_sglist(v_cmnd), + (int)scsi_prot_sg_count(v_cmnd), + v_cmnd->sc_data_direction); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "scsi done cmd:%p op:%d,difsglcount:%d", + v_cmnd, scsi_get_prot_op(v_cmnd), + scsi_prot_sg_count(v_cmnd)); + } +} + +void unf_scsi_done(struct unf_scsi_cmd_s *v_scsi_cmnd) +{ + struct scsi_cmnd *cmnd = NULL; + + UNF_CHECK_VALID(0x509, UNF_TRUE, v_scsi_cmnd, return); + cmnd = (struct scsi_cmnd *)v_scsi_cmnd->upper_cmnd; + UNF_CHECK_VALID(0x510, UNF_TRUE, cmnd, return); + UNF_CHECK_VALID(0x511, UNF_TRUE, cmnd->scsi_done, return); + + scsi_set_resid(cmnd, (int)v_scsi_cmnd->resid); + + cmnd->result = v_scsi_cmnd->result; + scsi_dma_unmap(cmnd); + unf_unmap_prot_sgl(cmnd); + return cmnd->scsi_done(cmnd); +} + +void unf_host_init_attr_setting(unf_scsi_host_s *scsi_host) +{ + struct unf_lport_s *lport = NULL; + unsigned int speed = FC_PORTSPEED_UNKNOWN; + + lport = (struct unf_lport_s *)scsi_host->hostdata[0]; + fc_host_supported_classes(scsi_host) = FC_COS_CLASS3; /* class_3 */ + fc_host_dev_loss_tmo(scsi_host) = + (unsigned int)unf_get_link_lose_tmo(lport); /* 30s */ + fc_host_node_name(scsi_host) = lport->node_name; + fc_host_port_name(scsi_host) = lport->port_name; + + fc_host_max_npiv_vports(scsi_host) = + (unsigned short)((lport == lport->root_lport) ? + lport->low_level_func.support_max_npiv_num : 0); + fc_host_npiv_vports_inuse(scsi_host) = 0; + fc_host_next_vport_number(scsi_host) = 0; + + /* About speed mode */ + if ((lport->low_level_func.fc_ser_max_speed == UNF_PORT_SPEED_32_G) && + (lport->card_type == UNF_FC_SERVER_BOARD_32_G)) { + speed = FC_PORTSPEED_32GBIT | FC_PORTSPEED_16GBIT | + FC_PORTSPEED_8GBIT; + } else if ((lport->low_level_func.fc_ser_max_speed == + UNF_PORT_SPEED_16_G) && + (lport->card_type == UNF_FC_SERVER_BOARD_16_G)) { + speed = FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT | + FC_PORTSPEED_4GBIT; + } else if ((lport->low_level_func.fc_ser_max_speed == + UNF_PORT_SPEED_8_G) && + (lport->card_type == UNF_FC_SERVER_BOARD_8_G)) { + speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | + FC_PORTSPEED_2GBIT; + } + + fc_host_supported_speeds(scsi_host) = speed; +} + +int unf_alloc_scsi_host(unf_scsi_host_s **v_scsi_host, + struct unf_host_param_s *v_host_param) +{ + int ret = RETURN_ERROR; + struct Scsi_Host *scsi_host = NULL; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x512, UNF_TRUE, v_scsi_host, return RETURN_ERROR); + UNF_CHECK_VALID(0x513, UNF_TRUE, v_host_param, return RETURN_ERROR); + + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[event]Alloc scsi host..."); + + /* Check L_Port validity */ + lport = (struct unf_lport_s *)(v_host_param->lport); + if (unlikely(!lport)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port is NULL and return directly"); + + return RETURN_ERROR; + } + + scsi_host_template.can_queue = v_host_param->can_queue; + scsi_host_template.cmd_per_lun = v_host_param->cmnd_per_lun; + scsi_host_template.sg_tablesize = v_host_param->sg_table_size; + scsi_host_template.max_sectors = v_host_param->max_sectors; + + /* Alloc scsi host */ + scsi_host = scsi_host_alloc(&scsi_host_template, + sizeof(unsigned long long)); + if (unlikely(!scsi_host)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Register scsi host failed"); + + return RETURN_ERROR; + } + + scsi_host->max_channel = v_host_param->max_channel; + scsi_host->max_lun = v_host_param->max_lun; + scsi_host->max_cmd_len = v_host_param->max_cmnd_len; + scsi_host->unchecked_isa_dma = 0; + scsi_host->hostdata[0] = (unsigned long)lport; /* save L_Port to scsi */ + scsi_host->unique_id = scsi_host->host_no; + scsi_host->max_id = v_host_param->max_id; + scsi_host->transportt = (lport == lport->root_lport) ? + scsi_transport_template : scsi_transport_template_v; + + /* register DIF/DIX protection */ + if (hifc_dif_enable) { + /* Enable DIF and DIX function */ + scsi_host_set_prot(scsi_host, hifc_dif_type); + + hifc_guard = SHOST_DIX_GUARD_CRC; + /* Enable IP checksum algorithm in DIX */ + if (dix_flag) + hifc_guard |= SHOST_DIX_GUARD_IP; + scsi_host_set_guard(scsi_host, hifc_guard); + } + + /* Add scsi host */ + ret = scsi_add_host(scsi_host, v_host_param->pdev); + if (unlikely(ret)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Add scsi host failed with return value %d", + ret); + + scsi_host_put(scsi_host); + return RETURN_ERROR; + } + + /* Set scsi host attribute */ + unf_host_init_attr_setting(scsi_host); + *v_scsi_host = scsi_host; + + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[event]Alloc and add scsi host(0x%llx) succeed", + (unsigned long long)scsi_host); + + return RETURN_OK; +} + +void unf_free_scsi_host(unf_scsi_host_s *v_scsi_host) +{ + struct Scsi_Host *scsi_host = NULL; + + scsi_host = v_scsi_host; + fc_remove_host(scsi_host); + scsi_remove_host(scsi_host); + + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[event]Remove scsi host(%d) succeed", scsi_host->host_no); + + scsi_host_put(scsi_host); +} + +static int unf_get_protect_mode(struct unf_lport_s *lport, + struct scsi_cmnd *v_cmnd, + struct unf_scsi_cmd_s *v_scsi_cmnd) +{ + struct scsi_cmnd *cmd = NULL; + int difsegcnt = 0; + struct unf_dif_control_info_s *dif_control_info = NULL; + + cmd = v_cmnd; + dif_control_info = &v_scsi_cmnd->dif_control; + + switch (scsi_get_prot_op(cmd)) { + /* OS-HBA: Unprotected, HBA-Target: Protected */ + case SCSI_PROT_READ_STRIP: + dif_control_info->protect_opcode |= + UNF_DIF_ACTION_VERIFY_AND_DELETE; + break; + case SCSI_PROT_WRITE_INSERT: + dif_control_info->protect_opcode |= + UNF_DIF_ACTION_INSERT; + break; + + /* OS-HBA: Protected, HBA-Target: Unprotected */ + case SCSI_PROT_READ_INSERT: + dif_control_info->protect_opcode |= + UNF_DIF_ACTION_INSERT; + break; + case SCSI_PROT_WRITE_STRIP: + dif_control_info->protect_opcode |= + UNF_DIF_ACTION_VERIFY_AND_DELETE; + break; + + /* OS-HBA: Protected, HBA-Target: Protected */ + case SCSI_PROT_READ_PASS: + case SCSI_PROT_WRITE_PASS: + dif_control_info->protect_opcode |= + UNF_DIF_ACTION_VERIFY_AND_FORWARD; + break; + + default: + dif_control_info->protect_opcode |= + UNF_DIF_ACTION_VERIFY_AND_FORWARD; + break; + } + + if (dif_sgl_mode) + dif_control_info->flags |= UNF_DIF_DOUBLE_SGL; + + dif_control_info->protect_opcode |= + UNF_VERIFY_CRC_MASK | UNF_VERIFY_LBA_MASK; + dif_control_info->dif_sge_count = scsi_prot_sg_count(cmd); + dif_control_info->dif_sgl = scsi_prot_sglist(cmd); + dif_control_info->start_lba = + cpu_to_le32(((uint32_t)(0xffffffff & scsi_get_lba(cmd)))); + + if (scsi_prot_sg_count(cmd)) { + difsegcnt = dma_map_sg(&lport->low_level_func.dev->dev, + scsi_prot_sglist(cmd), + (int)scsi_prot_sg_count(cmd), + cmd->sc_data_direction); + if (unlikely(!difsegcnt)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) cmd:%p map dif sgl err", + lport->port_id, cmd); + return UNF_RETURN_ERROR; + } + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "build scsi cmd:%p op:%d,difsglcount:%d,difsegcnt:%d", + cmd, scsi_get_prot_op(cmd), scsi_prot_sg_count(cmd), + difsegcnt); + return RETURN_OK; +} + +unsigned int unf_get_frame_entry_buf(void *v_up_cmnd, + void *v_driver_sgl, + void **v_upper_sgl, + unsigned int *v_port_id, + unsigned int *v_index, + char **v_buf, + unsigned int *v_buf_len) +{ +#define HIFC_1822_MAX_DMA_LENGTH (0x20000 - 1) + struct scatterlist *scsi_sgl = *v_upper_sgl; + + UNF_REFERNCE_VAR(v_up_cmnd); + UNF_REFERNCE_VAR(v_driver_sgl); + UNF_REFERNCE_VAR(v_port_id); + + if (unlikely(!scsi_sgl)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Command(0x%p) can not get SGL.", v_up_cmnd); + return RETURN_ERROR; + } + *v_buf = (char *)sg_dma_address(scsi_sgl); + *v_buf_len = sg_dma_len(scsi_sgl); + *v_upper_sgl = (void *)sg_next(scsi_sgl); + if (unlikely((*v_buf_len > HIFC_1822_MAX_DMA_LENGTH) || + (*v_buf_len == 0))) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Command(0x%p) dmalen:0x%x is not support.", + v_up_cmnd, *v_buf_len); + return RETURN_ERROR; + } + + return RETURN_OK; +} + +static int unf_scsi_queue_cmd(struct Scsi_Host *shost, + struct scsi_cmnd *v_cmnd) +{ + struct Scsi_Host *host = NULL; + struct scsi_cmnd *cmd = NULL; + struct unf_scsi_cmd_s scsi_cmnd = { 0 }; + unsigned int scsi_id = 0; + unsigned int en_scsi_state = 0; + int ret = SCSI_MLQUEUE_HOST_BUSY; + // unsigned int uiError = 0; + struct unf_lport_s *lport = NULL; + struct fc_rport *p_rport = NULL; + struct unf_rport_scsi_id_image_s *scsi_image_table = NULL; + unsigned int ret_value = 0; + struct unf_rport_s *rport = NULL; + unsigned int cmnd_result = 0; + unsigned int rport_state_err = 0; + unsigned int scan_device_cmd = 0; + unsigned long long raw_lun_id = 0; + int data_seg_cnt = 0; + + static atomic64_t ull_count; + host = shost; + cmd = v_cmnd; + UNF_CHECK_VALID(0x515, UNF_TRUE, host, return RETURN_ERROR); + UNF_CHECK_VALID(0x514, UNF_TRUE, cmd, return RETURN_ERROR); + + /* Get L_Port from scsi_cmnd */ + lport = (struct unf_lport_s *)host->hostdata[0]; + if (unlikely(!lport)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Check l_port failed, cmd(%p)", cmd); + + /* scsi_done & return 0 & I/O error */ + cmd->result = DID_NO_CONNECT << 16; + cmd->scsi_done(cmd); + return 0; + } + + /* Check device/session local state by device_id */ + /* local SCSI_ID from device */ + scsi_id = (unsigned int)((unsigned long long)cmd->device->hostdata); + if (unlikely(scsi_id >= UNF_MAX_SCSI_ID)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) scsi_id(0x%x) is max than %d", + lport->port_id, scsi_id, UNF_MAX_SCSI_ID); + + /* scsi_done & return 0 & I/O error */ + cmd->result = DID_NO_CONNECT << 16; + cmd->scsi_done(cmd); + return 0; + } + + scsi_image_table = &lport->rport_scsi_table; + UNF_SCSI_CMD_CNT(scsi_image_table, scsi_id, cmd->cmnd[0]); + + /* Get scsi r_port */ + /*lint -e666 -esym(666,*)*/ + p_rport = starget_to_rport(scsi_target(cmd->device)); + if (unlikely(!p_rport)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) cmd(%p) to get scsi rport failed", + lport->port_id, cmd); + + /* scsi_done & return 0 & I/O error */ + cmd->result = DID_NO_CONNECT << 16; + cmd->scsi_done(cmd); + ret_value = DID_NO_CONNECT; + UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, ret_value); + return 0; + } + + if (unlikely(!scsi_image_table->wwn_rport_info_table)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_ABNORMAL, UNF_WARN, + "[warn]Port(0x%x) WwnRportInfoTable NULL", lport->port_id); + + cmd->result = DID_NO_CONNECT << 16; + cmd->scsi_done(cmd); + ret_value = DID_NO_CONNECT; + UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, ret_value); + return 0; + } + + if (unlikely(lport->b_port_removing == UNF_TRUE)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_ABNORMAL, UNF_WARN, + "[warn]Port(0x%x) scsi_id(0x%x) rport(0x%p) target_id(0x%x) cmd(0x%p) is removing", + lport->port_id, scsi_id, p_rport, p_rport->scsi_target_id, cmd); + + cmd->result = DID_NO_CONNECT << 16; + cmd->scsi_done(cmd); + ret_value = DID_NO_CONNECT; + UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, ret_value); + return 0; + } + + en_scsi_state = atomic_read(&scsi_image_table->wwn_rport_info_table[scsi_id].en_scsi_state); + if (unlikely(en_scsi_state != UNF_SCSI_ST_ONLINE)) { + if (en_scsi_state == UNF_SCSI_ST_OFFLINE) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) scsi_state(0x%x) scsi_id(0x%x) rport(0x%p) target_id(0x%x) cmd(0x%p), target is busy", + lport->port_id, en_scsi_state, scsi_id, + p_rport, p_rport->scsi_target_id, cmd); + + scan_device_cmd = (cmd->cmnd[0] == INQUIRY) || + (cmd->cmnd[0] == REPORT_LUNS); + + /* report lun or inquiry cmd, if send failed, + * do not retry, prevent the scan_mutex in + * scsi host locked up by eachother + */ + if (scan_device_cmd) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) host(0x%x) scsi_id(0x%x) lun(0x%llx) cmd(0x%x) DID_NO_CONNECT", + lport->port_id, host->host_no, + scsi_id, + (unsigned long long)cmd->device->lun, + cmd->cmnd[0]); + + cmd->result = DID_NO_CONNECT << 16; + cmd->scsi_done(cmd); + ret_value = DID_NO_CONNECT; + UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, + ret_value); + + return 0; + } + + if (likely(scsi_image_table->wwn_rport_info_table)) { + if (likely(scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter)) + atomic64_inc(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->target_busy); + } + + /* Target busy: need scsi retry */ + return SCSI_MLQUEUE_TARGET_BUSY; + } + /* timeout(DEAD): scsi_done & return 0 & I/O error */ + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) scsi_id(0x%x) rport(0x%p) target_id(0x%x) cmd(0x%p), target is loss timeout", + lport->port_id, scsi_id, p_rport, + p_rport->scsi_target_id, cmd); + cmd->result = DID_NO_CONNECT << 16; + cmd->scsi_done(cmd); + ret_value = DID_NO_CONNECT; + UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, ret_value); + + return 0; + } + + raw_lun_id = ((unsigned long long)cmd->device->lun << 16) & + 0x00000000ffff0000; + if (scsi_sg_count(cmd)) { + data_seg_cnt = dma_map_sg(&lport->low_level_func.dev->dev, + scsi_sglist(cmd), + (int)scsi_sg_count(cmd), + cmd->sc_data_direction); + if (unlikely(!data_seg_cnt)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) scsi_id(0x%x) rport(0x%p) target_id(0x%x) cmd(0x%p), dma map sg err", + lport->port_id, scsi_id, + p_rport, p_rport->scsi_target_id, cmd); + cmd->result = DID_BUS_BUSY << 16; + cmd->scsi_done(cmd); + ret_value = DID_BUS_BUSY; + UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, ret_value); + return SCSI_MLQUEUE_HOST_BUSY; + } + } + + /* Construct local SCSI CMND info */ + /* save host_no to scsi_cmnd->scsi_host_id */ + scsi_cmnd.scsi_host_id = host->host_no; + scsi_cmnd.scsi_id = scsi_id; + scsi_cmnd.lun_id = raw_lun_id; + scsi_cmnd.data_direction = cmd->sc_data_direction; + scsi_cmnd.underflow = cmd->underflow; + scsi_cmnd.cmnd_len = cmd->cmd_len; + scsi_cmnd.pcmnd = cmd->cmnd; + scsi_cmnd.transfer_len = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); + scsi_cmnd.sense_buf_len = SCSI_SENSE_DATA_LEN; + scsi_cmnd.sense_buf = cmd->sense_buffer; + scsi_cmnd.time_out = 0; + scsi_cmnd.upper_cmnd = cmd; + scsi_cmnd.drv_private = + (void *)(*(unsigned long long *)shost_priv(host)); + scsi_cmnd.entry_count = data_seg_cnt; + scsi_cmnd.sgl = scsi_sglist(cmd); + scsi_cmnd.pfn_unf_ini_get_sgl_entry = unf_get_frame_entry_buf; + scsi_cmnd.pfn_done = unf_scsi_done; + scsi_cmnd.pc_lun_id = (unsigned char *)&scsi_cmnd.lun_id; + scsi_cmnd.err_code_table_cout = ini_err_code_table_cnt1; + scsi_cmnd.err_code_table = ini_error_code_table1; + scsi_cmnd.world_id = 0xfffffffc; + scsi_cmnd.cmnd_sn = atomic64_inc_return(&ull_count); + if (unlikely(scsi_cmnd.cmnd_sn == 0)) + scsi_cmnd.cmnd_sn = atomic64_inc_return(&ull_count); + + if ((scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) && + hifc_dif_enable) { + ret = unf_get_protect_mode(lport, cmd, &scsi_cmnd); + if (ret != RETURN_OK) { + cmd->result = DID_BUS_BUSY << 16; + cmd->scsi_done(cmd); + ret_value = DID_BUS_BUSY; + UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, + ret_value); + scsi_dma_unmap(cmd); + return SCSI_MLQUEUE_HOST_BUSY; + } + } + + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) host(0x%x) scsi_id(0x%x) lun(0x%llx) transfer length(0x%x) cmd_len(0x%x) direction(0x%x) cmd(0x%x) under_flow(0x%x)", + lport->port_id, host->host_no, scsi_id, + (unsigned long long)cmd->device->lun, + scsi_cmnd.transfer_len, + scsi_cmnd.cmnd_len, cmd->sc_data_direction, + scsi_cmnd.pcmnd[0], scsi_cmnd.underflow); + /* Bind the Exchange address corresponding to scsi_cmnd to + * scsi_cmnd->host_scribble + */ + cmd->host_scribble = (unsigned char *)scsi_cmnd.cmnd_sn; + ret = unf_cm_queue_command(&scsi_cmnd); + if (ret != RETURN_OK) { + rport = unf_find_rport_by_scsi_id(lport, + ini_error_code_table1, + ini_err_code_table_cnt1, + scsi_id, + &cmnd_result); + rport_state_err = (!rport) || + (rport->lport_ini_state != + UNF_PORT_STATE_LINKUP) || + (rport->rp_state == UNF_RPORT_ST_CLOSING); + scan_device_cmd = (cmd->cmnd[0] == INQUIRY) || + (cmd->cmnd[0] == REPORT_LUNS); + + /* report lun or inquiry cmd if send failed, do not retry, + * prevent the scan_mutex in scsi host locked up by eachother + */ + if (rport_state_err && scan_device_cmd) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) host(0x%x) scsi_id(0x%x) lun(0x%llx) cmd(0x%x) cmResult(0x%x) DID_NO_CONNECT", + lport->port_id, host->host_no, scsi_id, + (unsigned long long)cmd->device->lun, + cmd->cmnd[0], cmnd_result); + + cmd->result = DID_NO_CONNECT << 16; + cmd->scsi_done(cmd); + ret_value = DID_NO_CONNECT; + UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, ret_value); + scsi_dma_unmap(cmd); + unf_unmap_prot_sgl(cmd); + return 0; + } + + /* Host busy: scsi need to retry */ + ret = SCSI_MLQUEUE_HOST_BUSY; + if (likely(scsi_image_table->wwn_rport_info_table)) { + if (likely(scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter)) + atomic64_inc(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->host_busy); + } + scsi_dma_unmap(cmd); + unf_unmap_prot_sgl(cmd); + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) return(0x%x) to process INI IO falid", + lport->port_id, ret); + } + return ret; +} + +static int unf_scsi_abort_scsi_cmnd(struct scsi_cmnd *v_cmnd) +{ + /* SCSI ABORT Command --->>> FC ABTS */ + struct unf_scsi_cmd_s scsi_cmnd = { 0 }; + struct Scsi_Host *scsi_host = NULL; + int ret = FAILED; + struct unf_rport_scsi_id_image_s *scsi_image_table = NULL; + struct unf_lport_s *lport = NULL; + unsigned int scsi_id = 0; + unsigned int err_handle = 0; + + UNF_CHECK_VALID(0x516, UNF_TRUE, v_cmnd, return FAILED); + + lport = (struct unf_lport_s *)v_cmnd->device->host->hostdata[0]; + scsi_id = (unsigned int)((unsigned long long)v_cmnd->device->hostdata); + + if (unf_is_lport_valid(lport) == RETURN_OK) { + scsi_image_table = &lport->rport_scsi_table; + err_handle = UNF_SCSI_ABORT_IO_TYPE; + UNF_SCSI_ERROR_HANDLE_CNT(scsi_image_table, + scsi_id, err_handle); + + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[abort]Port(0x%x) scsi_id(0x%x) lun_id(0x%x) cmnd_type(0x%x)", + lport->port_id, scsi_id, + (unsigned int)v_cmnd->device->lun, + v_cmnd->cmnd[0]); + } else { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Lport(%p) is moving or null", lport); + + return UNF_SCSI_ABORT_FAIL; + } + + /* Check local SCSI_ID validity */ + if (unlikely(scsi_id >= UNF_MAX_SCSI_ID)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]scsi_id(0x%x) is max than(0x%x)", + scsi_id, UNF_MAX_SCSI_ID); + + return UNF_SCSI_ABORT_FAIL; + } + + /* Block scsi (check rport state -> whether offline or not) */ + ret = fc_block_scsi_eh(v_cmnd); + if (unlikely(ret != 0)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Block scsi eh failed(0x%x)", ret); + + return ret; + } + + scsi_host = v_cmnd->device->host; + scsi_cmnd.scsi_host_id = scsi_host->host_no; // L_Port ID + scsi_cmnd.scsi_id = scsi_id; // R_Port ID (Target ID) + scsi_cmnd.lun_id = (unsigned long long)v_cmnd->device->lun; // LUN ID + scsi_cmnd.upper_cmnd = v_cmnd; // scsi_cmnd + // L_Port + scsi_cmnd.drv_private = + (void *)(*(unsigned long long *)shost_priv(scsi_host)); + scsi_cmnd.cmnd_sn = (unsigned long long)(v_cmnd->host_scribble); + scsi_cmnd.pc_lun_id = (unsigned char *)&scsi_cmnd.lun_id; + scsi_cmnd.pfn_done = unf_scsi_done; + scsi_cmnd.world_id = 0xfffffffc; + /* Process scsi Abort cmnd */ + ret = unf_cm_eh_abort_handler(&scsi_cmnd); + if (ret == UNF_SCSI_ABORT_SUCCESS) { + if (unf_is_lport_valid(lport) == RETURN_OK) { + scsi_image_table = &lport->rport_scsi_table; + err_handle = UNF_SCSI_ABORT_IO_TYPE; + UNF_SCSI_ERROR_HANDLE_RESULT_CNT(scsi_image_table, + scsi_id, err_handle); + } + } + + return ret; +} + +static int unf_scsi_device_reset_handler(struct scsi_cmnd *v_cmnd) +{ + /* LUN reset */ + struct unf_scsi_cmd_s scsi_cmnd = { 0 }; + struct Scsi_Host *scsi_host = NULL; + struct unf_rport_scsi_id_image_s *scsi_image_table = NULL; + int ret = FAILED; + struct unf_lport_s *lport = NULL; + unsigned int scsi_id = 0; + unsigned int err_handle = 0; + + UNF_CHECK_VALID(0x517, UNF_TRUE, v_cmnd, return FAILED); + + lport = (struct unf_lport_s *)v_cmnd->device->host->hostdata[0]; + if (unf_is_lport_valid(lport) == RETURN_OK) { + scsi_image_table = &lport->rport_scsi_table; + err_handle = UNF_SCSI_DEVICE_RESET_TYPE; + UNF_SCSI_ERROR_HANDLE_CNT(scsi_image_table, + scsi_id, err_handle); + + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[device_reset]Port(0x%x) scsi_id(0x%x) lun_id(0x%x) cmnd_type(0x%x)", + lport->port_id, scsi_id, + (unsigned int)v_cmnd->device->lun, + v_cmnd->cmnd[0]); + } else { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port is invalid"); + + return FAILED; + } + + /* Check local SCSI_ID validity */ + scsi_id = (unsigned int)((unsigned long long)v_cmnd->device->hostdata); + if (unlikely(scsi_id >= UNF_MAX_SCSI_ID)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]scsi_id(0x%x) is max than(0x%x)", + scsi_id, UNF_MAX_SCSI_ID); + + return FAILED; + } + + /* Block scsi (check rport state -> whether offline or not) */ + ret = fc_block_scsi_eh(v_cmnd); + if (unlikely(ret != 0)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Block scsi eh failed(0x%x)", ret); + + return ret; + } + + scsi_host = v_cmnd->device->host; + scsi_cmnd.scsi_host_id = scsi_host->host_no; /* l_port id */ + scsi_cmnd.scsi_id = scsi_id; /* r_port id */ + scsi_cmnd.lun_id = (unsigned long long)v_cmnd->device->lun; /* lun id */ + scsi_cmnd.upper_cmnd = v_cmnd; /* scsi_cmnd */ + /* l_port */ + scsi_cmnd.drv_private = + (void *)(*(unsigned long long *)shost_priv(scsi_host)); + scsi_cmnd.pc_lun_id = (unsigned char *)&scsi_cmnd.lun_id; /* lun id */ + + /* Process scsi device/LUN reset cmnd */ + ret = unf_cm_eh_device_reset_handler(&scsi_cmnd); + if (ret == UNF_SCSI_ABORT_SUCCESS) { + if (unf_is_lport_valid(lport) == RETURN_OK) { + scsi_image_table = &lport->rport_scsi_table; + err_handle = UNF_SCSI_DEVICE_RESET_TYPE; + UNF_SCSI_ERROR_HANDLE_RESULT_CNT(scsi_image_table, + scsi_id, + err_handle); + } + } + + return ret; +} + +static int unf_scsi_bus_reset_handler(struct scsi_cmnd *v_cmnd) +{ + /* BUS Reset */ + struct unf_scsi_cmd_s scsi_cmnd = { 0 }; + struct unf_lport_s *lport = NULL; + struct Scsi_Host *scsi_host = NULL; + struct unf_rport_scsi_id_image_s *scsi_image_table = NULL; + int ret = FAILED; + unsigned int scsi_id = 0; + unsigned int err_handle = 0; + + UNF_CHECK_VALID(0x517, UNF_TRUE, v_cmnd, return FAILED); + + lport = (struct unf_lport_s *)v_cmnd->device->host->hostdata[0]; + if (unlikely(!lport)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port is null"); + + return FAILED; + } + + /* Check local SCSI_ID validity */ + scsi_id = (unsigned int)((unsigned long long)v_cmnd->device->hostdata); + if (unlikely(scsi_id >= UNF_MAX_SCSI_ID)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]scsi_id(0x%x) is max than(0x%x)", + scsi_id, UNF_MAX_SCSI_ID); + + return FAILED; + } + + if (unf_is_lport_valid(lport) == RETURN_OK) { + scsi_image_table = &lport->rport_scsi_table; + err_handle = UNF_SCSI_BUS_RESET_TYPE; + UNF_SCSI_ERROR_HANDLE_CNT(scsi_image_table, + scsi_id, err_handle); + + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info][bus_reset]Port(0x%x) scsi_id(0x%x) lun_id(0x%x) cmnd_type(0x%x)", + lport->port_id, scsi_id, + (unsigned int)v_cmnd->device->lun, + v_cmnd->cmnd[0]); + } + + /* Block scsi (check rport state -> whether offline or not) */ + ret = fc_block_scsi_eh(v_cmnd); + if (unlikely(ret != 0)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Block scsi eh failed(0x%x)", ret); + + return ret; + } + + scsi_host = v_cmnd->device->host; + scsi_cmnd.scsi_host_id = scsi_host->host_no; /* l_port id */ + scsi_cmnd.scsi_id = scsi_id; /* r_port id */ + scsi_cmnd.lun_id = (unsigned long long)v_cmnd->device->lun; /* lun id */ + scsi_cmnd.upper_cmnd = v_cmnd; /* scsi_cmnd */ + /* l_port */ + scsi_cmnd.drv_private = + (void *)(*(unsigned long long *)shost_priv(scsi_host)); + scsi_cmnd.pc_lun_id = (unsigned char *)&scsi_cmnd.lun_id; /* lun id */ + + /* Process scsi BUS Reset cmnd */ + ret = unf_cm_bus_reset_handler(&scsi_cmnd); + if (ret == UNF_SCSI_ABORT_SUCCESS) { + if (unf_is_lport_valid(lport) == RETURN_OK) { + scsi_image_table = &lport->rport_scsi_table; + err_handle = UNF_SCSI_BUS_RESET_TYPE; + UNF_SCSI_ERROR_HANDLE_RESULT_CNT(scsi_image_table, + scsi_id, + err_handle); + } + } + + return ret; +} + +static int unf_scsi_target_reset_handler(struct scsi_cmnd *v_cmnd) +{ + /* Session reset/delete */ + struct unf_scsi_cmd_s scsi_cmnd = { 0 }; + struct Scsi_Host *scsi_host = NULL; + struct unf_rport_scsi_id_image_s *scsi_image_table = NULL; + int ret = FAILED; + struct unf_lport_s *lport = NULL; + unsigned int scsi_id = 0; + unsigned int err_handle = 0; + + UNF_CHECK_VALID(0x517, UNF_TRUE, v_cmnd, return RETURN_ERROR); + + lport = (struct unf_lport_s *)v_cmnd->device->host->hostdata[0]; + if (unlikely(!lport)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port is null"); + + return FAILED; + } + + /* Check local SCSI_ID validity */ + scsi_id = (unsigned int)((unsigned long long)v_cmnd->device->hostdata); + if (unlikely(scsi_id >= UNF_MAX_SCSI_ID)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]scsi_id(0x%x) is max than(0x%x)", + scsi_id, UNF_MAX_SCSI_ID); + + return FAILED; + } + + if (unf_is_lport_valid(lport) == RETURN_OK) { + scsi_image_table = &lport->rport_scsi_table; + err_handle = UNF_SCSI_TARGET_RESET_TYPE; + UNF_SCSI_ERROR_HANDLE_CNT(scsi_image_table, scsi_id, + err_handle); + + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[target_reset]Port(0x%x) scsi_id(0x%x) lun_id(0x%x) cmnd_type(0x%x)", + lport->port_id, scsi_id, + (unsigned int)v_cmnd->device->lun, + v_cmnd->cmnd[0]); + } + + /* Block scsi (check rport state -> whether offline or not) */ + ret = fc_block_scsi_eh(v_cmnd); + if (unlikely(ret != 0)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Block scsi eh failed(0x%x)", ret); + + return ret; + } + + scsi_host = v_cmnd->device->host; + scsi_cmnd.scsi_host_id = scsi_host->host_no; /* l_port id */ + scsi_cmnd.scsi_id = scsi_id; /* r_port id */ + scsi_cmnd.lun_id = (unsigned long long)v_cmnd->device->lun; /* lun id */ + scsi_cmnd.upper_cmnd = v_cmnd; /* scsi_cmnd */ + /* l_port */ + scsi_cmnd.drv_private = + (void *)(*(unsigned long long *)shost_priv(scsi_host)); + scsi_cmnd.pc_lun_id = (unsigned char *)&scsi_cmnd.lun_id; /* lun id */ + + /* Process scsi Target/Session reset/delete cmnd */ + ret = unf_cm_target_reset_handler(&scsi_cmnd); + if (ret == UNF_SCSI_ABORT_SUCCESS) { + if (unf_is_lport_valid(lport) == RETURN_OK) { + scsi_image_table = &lport->rport_scsi_table; + err_handle = UNF_SCSI_TARGET_RESET_TYPE; + UNF_SCSI_ERROR_HANDLE_RESULT_CNT(scsi_image_table, + scsi_id, err_handle); + } + } + + return ret; +} + +static int unf_scsi_slave_alloc(struct scsi_device *sdev) +{ + /*lint -e666 -esym(666,*)*/ + struct fc_rport *rport = NULL; + unsigned int scsi_id = 0; + struct unf_lport_s *lport = NULL; + struct Scsi_Host *host = NULL; + struct unf_rport_scsi_id_image_s *scsi_image_table = NULL; + + /* About device */ + if (unlikely(!sdev)) { + UNF_TRACE(0x4101, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]SDev is null"); + + return -ENXIO; + } + + /* About scsi rport */ + rport = starget_to_rport(scsi_target(sdev)); + if (unlikely(!rport || fc_remote_port_chkready(rport))) { + UNF_TRACE(0x4101, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]SCSI rport is null"); + + if (rport) { + UNF_TRACE(0x4101, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]SCSI rport is not ready(0x%x)", + fc_remote_port_chkready(rport)); + } + + return -ENXIO; + } + + /* About host */ + host = rport_to_shost(rport); + if (unlikely(!host)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Host is null"); + + return -ENXIO; + } + + /* About Local Port */ + lport = (struct unf_lport_s *)host->hostdata[0]; + if (unf_is_lport_valid(lport) != RETURN_OK) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port is invalid"); + + return -ENXIO; + } + + /* About Local SCSI_ID */ + /* use local SCSI_ID to alloc slave device */ + scsi_id = *(unsigned int *)rport->dd_data; + if (unlikely(scsi_id >= UNF_MAX_SCSI_ID)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]scsi_id(0x%x) is max than(0x%x)", + scsi_id, UNF_MAX_SCSI_ID); + + return -ENXIO; + } + + scsi_image_table = &lport->rport_scsi_table; + if (scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter) + atomic_inc(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->device_alloc); + + atomic_inc(&lport->device_alloc); + /* save local SCSI_ID */ + sdev->hostdata = (void *)(unsigned long long)scsi_id; + + UNF_TRACE(0x4101, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[event]Port(0x%x) use scsi_id(%d) to alloc device[%u:%u:%u:%u]", + lport->port_id, scsi_id, host->host_no, + sdev->channel, sdev->id, (unsigned int)sdev->lun); + + return 0; +} + +static void unf_scsi_destroy_slave(struct scsi_device *sdev) +{ + /* + * NOTE: about sdev->hostdata + * --->>> pointing to local SCSI_ID + * 1. Assignment during slave allocation + * 2. Released when callback for slave destroy + * 3. Used during: Queue_CMND, Abort CMND, Device Reset, + * Target Reset & Bus Reset + */ + /*lint -e666 -esym(666,*)*/ + struct fc_rport *rport = NULL; + unsigned int scsi_id = 0; + struct unf_lport_s *lport = NULL; + struct Scsi_Host *host = NULL; + struct unf_rport_scsi_id_image_s *scsi_image_table = NULL; + + /* About scsi device */ + if (unlikely(!sdev)) { + UNF_TRACE(0x4101, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]SDev is null"); + return; + } + + /* About scsi rport */ + rport = starget_to_rport(scsi_target(sdev)); + if (unlikely(!rport)) { + UNF_TRACE(0x4101, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]SCSI rport is null or remote port is not ready"); + return; + } + + /* About host */ + host = rport_to_shost(rport); + if (unlikely(!host)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Host is null"); + return; + } + + /* About L_Port */ + lport = (struct unf_lport_s *)host->hostdata[0]; + if (unf_is_lport_valid(lport) == RETURN_OK) { + scsi_image_table = &lport->rport_scsi_table; + atomic_inc(&lport->device_destroy); + + scsi_id = (unsigned int)((unsigned long long)sdev->hostdata); + if ((scsi_id < UNF_MAX_SCSI_ID) && + (scsi_image_table->wwn_rport_info_table)) { + if (scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter) + atomic_inc(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->device_destroy); + + UNF_TRACE(0x4101, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[event]Port(0x%x) with scsi_id(%d) to destroy slave device[%u:%u:%u:%u]", + lport->port_id, scsi_id, host->host_no, + sdev->channel, sdev->id, + (unsigned int)sdev->lun); + } else { + UNF_TRACE(0x4101, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[err]Port(0x%x) scsi_id(%d) is invalid and destroy device[%u:%u:%u:%u]", + lport->port_id, scsi_id, host->host_no, + sdev->channel, sdev->id, + (unsigned int)sdev->lun); + } + } else { + UNF_TRACE(0x3097, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(%p) is invalid", lport); + } + + sdev->hostdata = NULL; /* reset local SCSI_ID */ +} + +static int unf_scsi_slave_configure(struct scsi_device *sdev) +{ +#define UNF_SCSI_DEV_DEPTH 32 + blk_queue_update_dma_alignment(sdev->request_queue, 0x7); + scsi_change_queue_depth(sdev, UNF_SCSI_DEV_DEPTH); + UNF_TRACE(0x4101, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[event]Enter slave configure, set depth is %d, sdev->tagged_supported is (%d)", + UNF_SCSI_DEV_DEPTH, sdev->tagged_supported); + + return 0; +} + +static int unf_scsi_scan_finished(struct Scsi_Host *shost, unsigned long time) +{ + UNF_TRACE(0x4101, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[event]Scan finished"); + + return 1; +} + +static void unf_scsi_scan_start(struct Scsi_Host *shost) +{ + UNF_TRACE(0x4101, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[event]Start scsi scan..."); +} + +unsigned int unf_register_ini_transport(void) +{ + /* Register INI Transport */ + scsi_transport_template = fc_attach_transport(&function_template); + + if (!scsi_transport_template) { + UNF_TRACE(0x4101, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Register FC transport to scsi failed"); + + return RETURN_ERROR; + } + + scsi_transport_template_v = fc_attach_transport(&function_template_v); + if (!scsi_transport_template_v) { + UNF_TRACE(0x4101, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Register FC vport transport to scsi failed"); + + fc_release_transport(scsi_transport_template); + + return RETURN_ERROR; + } + + UNF_TRACE(0x4101, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[event]Register FC transport to scsi succeed"); + + return RETURN_OK; +} + +void unf_unregister_ini_transport(void) +{ + fc_release_transport(scsi_transport_template); + fc_release_transport(scsi_transport_template_v); + UNF_TRACE(0x4101, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[event]Unregister FC transport succeed"); +} + +void unf_report_io_dm_event(void *v_lport, unsigned int type, + unsigned int value) +{ +} + +void unf_save_sense_data(void *scsicmd, const char *sense, int senslen) +{ + struct scsi_cmnd *cmd; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, scsicmd, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, sense, return); + + cmd = (struct scsi_cmnd *)scsicmd; + memcpy(cmd->sense_buffer, sense, senslen); +} diff --git a/drivers/scsi/huawei/hifc/unf_scsi_common.h b/drivers/scsi/huawei/hifc/unf_scsi_common.h new file mode 100644 index 0000000000000000000000000000000000000000..59580ad2e63e540c6a14cb11377c1a0265950455 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_scsi_common.h @@ -0,0 +1,1136 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#ifndef __UNF_SCSI_COMMON__ +#define __UNF_SCSI_COMMON__ + +#include "unf_log.h" +#include "hifc_knl_adp.h" + +#define DRV_ISCSI_NAME 223 + +#define SCSI_SENSE_DATA_LEN 96 + +#define DRV_SCSI_CDB_LEN 16 +#define DRV_SCSI_LUN_LEN 8 +#define DRV_PORTID_NUM 32 + +#ifndef SUCCESS +#define SUCCESS 0x2002 +#endif + +#ifndef FAILED +#define FAILED 0x2003 +#endif + +#ifndef FC_PORTSPEED_32GBIT +#define FC_PORTSPEED_32GBIT 0x40 +#endif + +/* + * FCTL defines (FrameHdr.Type_Fctl) + */ +#define FC_EXCHANGE_RESPONDER 0x00800000 +#define FC_LAST_SEQUENCE 0x00100000 +#define FC_END_SEQUENCE 0x00080000 +#define FC_SEQUENCE_INITIATIVE 0x00010000 + +/* + * FCTL common use defines + */ +#define FC_FCTL_RSP (FC_EXCHANGE_RESPONDER | FC_LAST_SEQUENCE | \ + FC_END_SEQUENCE) + +#define UNF_GID_PORT_CNT 2048 +#define UNF_RSCN_PAGE_SUM 255 + +#define UNF_CPU_ENDIAN + +#define UNF_NPORTID_MASK 0x00FFFFFF +#define UNF_DOMAIN_MASK 0x00FF0000 +#define UNF_AREA_MASK 0x0000FF00 +#define UNF_ALPA_MASK 0x000000FF + +#define UNF_NPORTID_WELLKNOWN_MASK 0x00fffff0 + +#define UNF_SCSI_ABORT_SUCCESS SUCCESS +#define UNF_SCSI_ABORT_FAIL FAILED + +#define UNF_SCSI_STATUS(byte) (byte) +#define UNF_SCSI_MSG(byte) ((byte) << 8) +#define UNF_SCSI_HOST(byte) ((byte) << 16) +#define UNF_SCSI_DRIVER(byte) ((byte) << 24) + +#define UNF_GET_SCSI_HOST_ID(scsi_host) ((scsi_host)->host_no) + +struct unf_fchead_s { + /* Routing control and Destination address of the seq */ + unsigned int rctl_did; + /* Class control and Source address of the sequence */ + unsigned int csctl_sid; + /* Data type and Initial frame control value of the seq */ + unsigned int type_fctl; + /* Seq ID, Data Field and Initial seq count */ + unsigned int seq_id_dfctl_seq_cnt; + /* Originator & Responder exchange IDs for the sequence */ + unsigned int oxid_rxid; + /* Relative offset of the first frame of the sequence */ + unsigned int parameter; +}; + +#define UNF_FCPRSP_CTL_LEN (24) +#define UNF_MAX_RSP_INFO_LEN (8) +#define UNF_RSP_LEN_VLD (1 << 0) +#define UNF_SENSE_LEN_VLD (1 << 1) +#define UNF_RESID_OVERRUN (1 << 2) +#define UNF_RESID_UNDERRUN (1 << 3) + +/* T10: FCP2r.07 9.4.1 Overview and format of FCP_RSP IU */ +struct unf_fcprsp_iu_s { + unsigned int ui_reserved[2]; + unsigned char uc_reserved[2]; + unsigned char control; + unsigned char fcp_status; + unsigned int fcp_residual; + unsigned int fcp_sense_len; /* Length of sense info field */ + /* Length of response info field in bytes 0,4 or 8 */ + unsigned int fcp_response_len; + /* Buffer for response info */ + unsigned char fcp_rsp_info[UNF_MAX_RSP_INFO_LEN]; + /* Buffer for sense info */ + unsigned char fcp_sense_info[SCSI_SENSE_DATA_LEN]; +} __attribute__((packed)); + +#define UNF_CMD_REF_MASK 0xFF000000 +#define UNF_TASK_ATTR_MASK 0x00070000 +#define UNF_TASK_MGMT_MASK 0x0000FF00 +#define UNF_FCP_WR_DATA 0x00000001 +#define UNF_FCP_RD_DATA 0x00000002 +#define UNF_CDB_LEN_MASK 0x0000007C +#define UNF_FCP_CDB_LEN_16 (16) +#define UNF_FCP_CDB_LEN_32 (32) +#define UNF_FCP_LUNID_LEN_8 (8) + +/* FCP-4 :Table 27 - RSP_CODE field */ +#define UNF_FCP_TM_RSP_COMPLETE (0) +#define UNF_FCP_TM_INVALID_CMND (0x2) +#define UNF_FCP_TM_RSP_REJECT (0x4) +#define UNF_FCP_TM_RSP_FAIL (0x5) +#define UNF_FCP_TM_RSP_SUCCEED (0x8) +#define UNF_FCP_TM_RSP_INCRECT_LUN (0x9) + +#define UNF_SET_TASK_MGMT_FLAGS(v_fcp_tm_code) ((v_fcp_tm_code) << 8) +#define UNF_GET_TASK_MGMT_FLAGS(v_control) \ + (((v_control) & UNF_TASK_MGMT_MASK) >> 8) + +enum unf_task_mgmt_cmnd_e { + UNF_FCP_TM_QUERY_TASK_SET = (1 << 0), + UNF_FCP_TM_ABORT_TASK_SET = (1 << 1), + UNF_FCP_TM_CLEAR_TASK_SET = (1 << 2), + UNF_FCP_TM_QUERY_UNIT_ATTENTION = (1 << 3), + UNF_FCP_TM_LOGICAL_UNIT_RESET = (1 << 4), + UNF_FCP_TM_TARGET_RESET = (1 << 5), + UNF_FCP_TM_CLEAR_ACA = (1 << 6), + UNF_FCP_TM_TERMINATE_TASK = (1 << 7) /* obsolete */ +}; + +struct unf_fcp_cmnd_s { + unsigned char lun[UNF_FCP_LUNID_LEN_8]; /* Logical unit number */ + + unsigned int control; /* Control field : + * uint8_t cmnd_ref; + * uint8_t task_attr:3; + * uint8_t reserved:5; + * uint8_t task_mgmt_flags; + * uint8_t wrdata:1; + * uint8_t rddata:1; + * uint8_t add_cdb_len:6; + */ + /* Payload data containing cdb info */ + unsigned char cdb[UNF_FCP_CDB_LEN_16]; + /* Number of bytes expected to be transferred */ + unsigned int data_length; +} __attribute__((packed)); + +struct unf_fcp_cmd_hdr_s { + struct unf_fchead_s frame_hdr; /* FCHS structure */ + struct unf_fcp_cmnd_s fcp_cmnd; /* Fcp Cmnd struct */ +}; + +/* + * parameter struct + */ + +/* Common Services Parameter used for returning Fabric + * parameters. See FC-FS Rev. 1.90, FC-PH-3 Rev. 9.4 and see FC-DA 3.1. + * This is the structure that is used to enquire Fabric parameters + * after a Fabric login is successful. The fileds in this structure + * are relevant for FLOGI ACC. + */ + +/* FC-LS-2 Table 140 Common Service Parameter applicability */ +struct unf_fabric_coparms_s { +#if defined(UNF_CPU_ENDIAN) + unsigned int bb_credit : 16; /* 0 [0-15] */ + unsigned int lowest_version : 8; /* 0 [16-23] */ + unsigned int highest_version : 8; /* 0 [24-31] */ +#else + unsigned int highest_version : 8; /* 0 [24-31] */ + unsigned int lowest_version : 8; /* 0 [16-23] */ + unsigned int bb_credit : 16; /* 0 [0-15] */ +#endif + + /* Word1 Common Features */ +#if defined(UNF_CPU_ENDIAN) + unsigned int bb_receive_data_field_size : 12; /* 1 [0-11] */ + unsigned int bb_scn : 4; /* 1 [12-15] */ + unsigned int payload_length : 1; /* 1 [16] */ + unsigned int seq_cnt : 1; /* 1 [17] */ + unsigned int dynamic_half_duplex : 1; /* 1 [18] */ + unsigned int r_t_tov : 1; /* 1 [19] */ + unsigned int reserved_co2 : 6; /* 1 [20-25] */ + unsigned int e_d_tov_resolution : 1; /* 1 [26] */ + unsigned int alternate_bb_credit_mgmt : 1; /* 1 [27] */ + unsigned int n_port : 1; /* 1 [28] */ + unsigned int mnid_assignment : 1; /* 1 [29] */ + unsigned int random_relative_offset : 1; /* 1 [30] */ + unsigned int clean_address : 1; /* 1 [31] */ +#else + unsigned int reserved_co22 : 2; /* 1 [24-25] */ + unsigned int e_d_tov_resolution : 1; /* 1 [26] */ + unsigned int alternate_bb_credit_mgmt : 1; /* 1 [27] */ + unsigned int n_port : 1; /* 1 [28] */ + unsigned int mnid_assignment : 1; /* 1 [29] */ + unsigned int random_relative_offset : 1; /* 1 [30] */ + unsigned int clean_address : 1; /* 1 [31] */ + + unsigned int payload_length : 1; /* 1 [16] */ + unsigned int seq_cnt : 1; /* 1 [17] */ + unsigned int dynamic_half_duplex : 1; /* 1 [18] */ + unsigned int r_t_tov : 1; /* 1 [19] */ + unsigned int reserved_co25 : 4; /* 1 [20-23] */ + + unsigned int bb_receive_data_field_size : 12; /* 1 [0-11] */ + unsigned int bb_scn : 4; /* 1 [12-15] */ +#endif + unsigned int r_a_tov; /* 2 [0-31] */ + unsigned int e_d_tov; /* 3 [0-31] */ +}; + +/* + * Common Services Parameter 16 byte structure. + * See FC-PH 4.3 Section 23.6.3, FC-PLDA Section 5.2 and + * TachLite Users Manual 3.24.1 + * the structure does not need to be packed. + */ + +/* FC-LS-2 Table 140 Common Service Parameter applicability */ +/* Table 142 Common Service Parameters - PLOGI and PLOGI LS_ACC */ +struct unf_lgn_port_coparms_s { +#if defined(UNF_CPU_ENDIAN) + unsigned int bb_credit : 16; /* 0 [0-15] */ + unsigned int lowest_version : 8; /* 0 [16-23] */ + unsigned int highest_version : 8; /* 0 [24-31] */ +#else + unsigned int highest_version : 8; /* 0 [24-31] */ + unsigned int lowest_version : 8; /* 0 [16-23] */ + unsigned int bb_credit : 16; /* 0 [0-15] */ +#endif + +#if defined(UNF_CPU_ENDIAN) + unsigned int bb_receive_data_field_size : 12; /* 1 [0-11] */ + unsigned int bb_scn : 4; /* 1 [12-15] */ + unsigned int payload_length : 1; /* 1 [16] */ + unsigned int seq_cnt : 1; /* 1 [17] */ + unsigned int dynamic_half_duplex : 1; /* 1 [18] */ + unsigned int reserved_co2 : 7; /* 1 [19-25] */ + unsigned int e_d_tov_resolution : 1; /* 1 [26] */ + unsigned int alternate_bb_credit_mgmt : 1; /* 1 [27] */ + unsigned int n_port : 1; /* 1 [28] */ + unsigned int vendor_version_level : 1; /* 1 [29] */ + unsigned int random_relative_offset : 1; /* 1 [30] */ + unsigned int continuously_increasing : 1; /* 1 [31] */ +#else + unsigned int reserved_co22 : 2; /* 1 [24-25] */ + unsigned int e_d_tov_resolution : 1; /* 1 [26] */ + unsigned int alternate_bb_credit_mgmt : 1; /* 1 [27] */ + unsigned int n_port : 1; /* 1 [28] */ + unsigned int vendor_version_level : 1; /* 1 [29] */ + unsigned int random_relative_offset : 1; /* 1 [30] */ + unsigned int continuously_increasing : 1; /* 1 [31] */ + + unsigned int payload_length : 1; /* 1 [16] */ + unsigned int seq_cnt : 1; /* 1 [17] */ + unsigned int dynamic_half_duplex : 1; /* 1 [18] */ + unsigned int reserved_co25 : 5; /* 1 [19-23] */ + + unsigned int bb_receive_data_field_size : 12; /* 1 [0-11] */ + unsigned int reserved_co1 : 4; /* 1 [12-15] */ +#endif + +#if defined(UNF_CPU_ENDIAN) + unsigned int relative_offset : 16; /* 2 [0-15] */ + unsigned int nport_total_concurrent_sequences : 16; /* 2 [16-31] */ +#else + unsigned int nport_total_concurrent_sequences : 16; /* 2 [16-31] */ + unsigned int relative_offset : 16; /* 2 [0-15] */ +#endif + + unsigned int e_d_tov; +}; + +/* + * Class services 16 byte structure. See FC-PH 4.3 Section 23.6.8 and + * FC-PLDA Section 5.3 + * the structure does not need to be packed + */ + +/* FC-LS-2 Table 145 Class Service Parameters Applicability */ +struct unf_lgn_port_clparms_s { +#if defined(UNF_CPU_ENDIAN) + unsigned int reserved_cl1 : 6; /* 0 [0-5] */ + unsigned int ic_data_compression_history_buffer_size : 2; /* 0 [6-7] */ + unsigned int ic_data_compression_capable : 1; /* 0 [8] */ + + unsigned int ic_ack_generation_assistance : 1; /* 0 [9] */ + unsigned int ic_ack_n_capable : 1; /* 0 [10] */ + unsigned int ic_ack_o_capable : 1; /* 0 [11] */ + /* 0 [12-13] */ + unsigned int ic_initial_responder_processes_accociator : 2; + unsigned int ic_x_id_reassignment : 2; /* 0 [14-15] */ + + unsigned int reserved_cl2 : 7; /* 0 [16-22] */ + unsigned int priority : 1; /* 0 [23] */ + unsigned int buffered_class : 1; /* 0 [24] */ + unsigned int camp_on : 1; /* 0 [25] */ + unsigned int dedicated_simplex : 1; /* 0 [26] */ + unsigned int sequential_delivery : 1; /* 0 [27] */ + unsigned int stacked_connect_request : 2; /* 0 [28-29] */ + unsigned int intermix_mode : 1; /* 0 [30] */ + unsigned int valid : 1; /* 0 [31] */ +#else + unsigned int buffered_class : 1; /* 0 [24] */ + unsigned int camp_on : 1; /* 0 [25] */ + unsigned int dedicated_simplex : 1; /* 0 [26] */ + unsigned int sequential_delivery : 1; /* 0 [27] */ + unsigned int stacked_connect_request : 2; /* 0 [28-29] */ + unsigned int intermix_mode : 1; /* 0 [30] */ + unsigned int valid : 1; /* 0 [31] */ + unsigned int reserved_cl2 : 7; /* 0 [16-22] */ + unsigned int priority : 1; /* 0 [23] */ + unsigned int ic_data_compression_capable : 1; /* 0 [8] */ + unsigned int ic_ack_generation_assistance : 1; /* 0 [9] */ + unsigned int ic_ack_n_capable : 1; /* 0 [10] */ + unsigned int ic_ack_o_capable : 1; /* 0 [11] */ + /* 0 [12-13] */ + unsigned int ic_initial_responder_processes_accociator : 2; + unsigned int ic_x_id_reassignment : 2; /* 0 [14-15] */ + + unsigned int reserved_cl1 : 6; /* 0 [0-5] */ + /* 0 [6-7] */ + unsigned int ic_data_compression_history_buffer_size : 2; +#endif + +#if defined(UNF_CPU_ENDIAN) + unsigned int received_data_field_size : 16; /* 1 [0-15] */ + + unsigned int reserved_cl3 : 5; /* 1 [16-20] */ + /* 1 [21-22] */ + unsigned int rc_data_compression_history_buffer_size : 2; + unsigned int rc_data_compression_capable : 1; /* 1 [23] */ + + unsigned int rc_categories_per_sequence : 2; /* 1 [24-25] */ + unsigned int reserved_cl4 : 1; /* 1 [26] */ + unsigned int rc_error_policy_supported : 2; /* 1 [27-28] */ + unsigned int rc_x_id_interlock : 1; /* 1 [29] */ + unsigned int rc_ack_n_capable : 1; /* 1 [30] */ + unsigned int rc_ack_o_capable : 1; /* 1 [31] */ +#else + unsigned int rc_categories_per_sequence : 2; /* 1 [24-25] */ + unsigned int reserved_cl4 : 1; /* 1 [26] */ + unsigned int rc_error_policy_supported : 2; /* 1 [27-28] */ + unsigned int rc_x_id_interlock : 1; /* 1 [29] */ + unsigned int rc_ack_n_capable : 1; /* 1 [30] */ + unsigned int rc_ack_o_capable : 1; /* 1 [31] */ + + unsigned int reserved_cl3 : 5; /* 1 [16-20] */ + /* 1 [21-22] */ + unsigned int rc_data_compression_history_buffer_size : 2; + unsigned int rc_data_compression_capable : 1; /* 1 [23] */ + + unsigned int received_data_field_size : 16; /* 1 [0-15] */ +#endif + +#if defined(UNF_CPU_ENDIAN) + unsigned int n_port_end_to_end_credit : 15; /* 2 [0-14] */ + unsigned int reserved_cl5 : 1; /* 2 [15] */ + + unsigned int concurrent_sequences : 16; /* 2 [16-31] */ +#else + unsigned int concurrent_sequences : 16; /* 2 [16-31] */ + + unsigned int n_port_end_to_end_credit : 15; /* 2 [0-14] */ + unsigned int reserved_cl5 : 1; /* 2 [15] */ +#endif + +#if defined(UNF_CPU_ENDIAN) + unsigned int reserved_cl6 : 16; /* 3 [0-15] */ + unsigned int open_sequences_per_exchange : 16; /* 3 [16-31] */ +#else + unsigned int open_sequences_per_exchange : 16; /* 3 [16-31] */ + unsigned int reserved_cl6 : 16; /* 3 [0-15] */ +#endif +}; + +struct unf_fabric_parms_s { + struct unf_fabric_coparms_s co_parms; + unsigned int high_port_name; + unsigned int low_port_name; + unsigned int high_node_name; + unsigned int low_node_name; + struct unf_lgn_port_clparms_s cl_parms[3]; + unsigned int reserved_1[4]; + unsigned int vendor_version_level[4]; +}; + +struct unf_lgn_parms_s { + struct unf_lgn_port_coparms_s co_parms; + unsigned int high_port_name; + unsigned int low_port_name; + unsigned int high_node_name; + unsigned int low_node_name; + struct unf_lgn_port_clparms_s cl_parms[3]; + unsigned int reserved_1[4]; + unsigned int vendor_version_level[4]; +}; + +#define ELS_RJT 0x1 +#define ELS_ACC 0x2 +#define ELS_PLOGI 0x3 +#define ELS_FLOGI 0x4 +#define ELS_LOGO 0x5 +#define ELS_RLS 0xf +#define ELS_ECHO 0x10 +#define ELS_RRQ 0x12 +#define ELS_REC 0x13 +#define ELS_PRLI 0x20 +#define ELS_PRLO 0x21 +#define ELS_TPRLO 0x24 +#define ELS_PDISC 0x50 +#define ELS_FDISC 0x51 +#define ELS_ADISC 0x52 +#define ELS_RSCN 0x61 /* registered state change notification */ +#define ELS_SCR 0x62 /* state change registration */ + +#define NS_GIEL 0X0101 +#define NS_GA_NXT 0X0100 +#define NS_GPN_ID 0x0112 /* get port name by ID */ +#define NS_GNN_ID 0x0113 /* get node name by ID */ +#define NS_GFF_ID 0x011f /* get FC-4 features by ID */ +#define NS_GID_PN 0x0121 /* get ID for port name */ +#define NS_GID_NN 0x0131 /* get IDs for node name */ +#define NS_GID_FT 0x0171 /* get IDs by FC4 type */ +#define NS_GPN_FT 0x0172 /* get port names by FC4 type */ +#define NS_GID_PT 0x01a1 /* get IDs by port type */ +#define NS_RFT_ID 0x0217 /* reg FC4 type for ID */ +#define NS_RPN_ID 0x0212 /* reg port name for ID */ +#define NS_RNN_ID 0x0213 /* reg node name for ID */ +#define NS_RSNPN 0x0218 /* reg symbolic port name */ +#define NS_RFF_ID 0x021f /* reg FC4 Features for ID */ +#define NS_RSNN 0x0239 /* reg symbolic node name */ +#define ST_NULL 0xffff /* reg symbolic node name */ + +#define BLS_ABTS 0xA001 /* ABTS */ + +#define FCP_SRR 0x14 /* Sequence Retransmission Request */ + +#define UNF_FC_FID_DOM_MGR 0xfffc00 /* domain manager base */ +enum unf_fc_well_known_fabric_id { + UNF_FC_FID_NONE = 0x000000, /* No destination */ + UNF_FC_FID_DOM_CTRL = 0xfffc01, /* domain controller */ + UNF_FC_FID_BCAST = 0xffffff, /* broadcast */ + UNF_FC_FID_FLOGI = 0xfffffe, /* fabric login */ + UNF_FC_FID_FCTRL = 0xfffffd, /* fabric controller */ + UNF_FC_FID_DIR_SERV = 0xfffffc, /* directory server */ + UNF_FC_FID_TIME_SERV = 0xfffffb, /* time server */ + UNF_FC_FID_MGMT_SERV = 0xfffffa, /* management server */ + UNF_FC_FID_QOS = 0xfffff9, /* QoS Facilitator */ + UNF_FC_FID_ALIASES = 0xfffff8, /* alias server (FC-PH2) */ + UNF_FC_FID_SEC_KEY = 0xfffff7, /* Security key dist. server */ + UNF_FC_FID_CLOCK = 0xfffff6, /* clock synch server */ + UNF_FC_FID_MCAST_SERV = 0xfffff5 /* multicast server */ +}; + +#define DRV_ENTRY_PER_SGL 64 /* Size of an entry array in a hash table */ +#define DRV_DATA_PROTECTION_LEN 8 + +struct dif_result_info_s { + unsigned char actual_dif[DRV_DATA_PROTECTION_LEN]; + unsigned char expected_dif[DRV_DATA_PROTECTION_LEN]; +}; + +struct drv_sge { + char *buf; + void *page_ctrl; + unsigned int length; + unsigned int offset; +}; + +/* + * @enum drv_io_direction + * SCSI data direction + */ +enum drv_io_direction { + DRV_IO_BIDIRECTIONAL = 0, + DRV_IO_DIRECTION_WRITE = 1, + DRV_IO_DIRECTION_READ = 2, + DRV_IO_DIRECTION_NONE = 3, +}; + +/* + * Hash table data structure + */ +struct drv_sgl { + struct drv_sgl *next_sgl; /* poin to SGL,SGL list */ + unsigned short num_sges_in_chain; + unsigned short num_sges_in_sgl; + unsigned int flag; + unsigned long long serial_num; + struct drv_sge sge[DRV_ENTRY_PER_SGL]; + struct list_head node; + unsigned int cpu_id; +}; + +struct dif_info_s { + /* Indicates the result returned when the data + * protection information is inconsistent,add by pangea + */ + struct dif_result_info_s dif_result; + /* Data protection information operation code + * bit[31-24] other operation code + * bit[23-16] Data Protection Information Operation + * bit[15-8] Data protection information verification + * bit[7-0] Data protection information replace + */ + unsigned int protect_opcode; + unsigned short app_tag; + unsigned long long start_lba; /* IO start LBA */ + struct drv_sgl *protection_sgl; +}; + +typedef struct Scsi_Host unf_scsi_host_s; + +struct unf_ini_error_code_s { + unsigned int drv_err_code; /* driver error code */ + unsigned int ap_err_code; /* up level error code */ +}; + +typedef unsigned int (*ini_get_sgl_entry_buf)(void *v_upper_cmnd, + void *v_driver_sgl, + void **v_upper_sgl, + unsigned int *v_req_index, + unsigned int *v_index, + char **v_buf, + unsigned int *v_buf_len); + +struct unf_host_param_s { + int can_queue; + unsigned short sg_table_size; + short cmnd_per_lun; + unsigned int max_id; + unsigned int max_lun; + unsigned int max_channel; + unsigned short max_cmnd_len; + unsigned short max_sectors; + unsigned long long dma_boundary; + unsigned int port_id; + void *lport; + struct device *pdev; +}; + +#define UNF_DIF_AREA_SIZE 8 + +struct unf_dif_control_info_s { + unsigned short app_tag; + unsigned short flags; + unsigned int protect_opcode; + unsigned int fcp_dl; + unsigned int start_lba; + unsigned char actual_dif[UNF_DIF_AREA_SIZE]; + unsigned char expected_dif[UNF_DIF_AREA_SIZE]; + unsigned int dif_sge_count; + void *dif_sgl; +}; + +struct unf_scsi_cmd_s { + unsigned int scsi_host_id; + unsigned int scsi_id; /* cmd->dev->id */ + unsigned long long lun_id; + unsigned long long port_id; + unsigned int underflow; /* Underflow */ + unsigned int transfer_len; /* Transfer Length */ + unsigned int resid; /* Resid */ + unsigned int sense_buf_len; + int result; + unsigned int entry_count; /* IO Buffer counter */ + unsigned int abort; + unsigned int err_code_table_cout; /* error code size */ + unsigned long long cmnd_sn; + unsigned long time_out; /* EPL driver add timer */ + unsigned short cmnd_len; /* Cdb length */ + unsigned char data_direction; /* data direction */ + unsigned char *pcmnd; /* SCSI CDB */ + unsigned char *sense_buf; + void *drv_private; /* driver host pionter */ + void *driver_scribble; /* Xchg pionter */ + void *upper_cmnd; /* UpperCmnd pointer by driver */ + unsigned char *pc_lun_id; /* new lunid */ + unsigned int world_id; + struct unf_dif_control_info_s dif_control; /* DIF control */ + struct unf_ini_error_code_s *err_code_table; /* error code table */ + void *sgl; /* Sgl pointer */ + ini_get_sgl_entry_buf pfn_unf_ini_get_sgl_entry; + void (*pfn_done)(struct unf_scsi_cmd_s *); + struct dif_info_s dif_info; +}; + +/* + * R_CTL Basic Link Data defines + */ +#define FC_RCTL_BLS 0x80000000 +#define FC_RCTL_BLS_ACC (FC_RCTL_BLS | 0x04000000) +#define FC_RCTL_BLS_RJT (FC_RCTL_BLS | 0x05000000) + +/* + * BA_RJT reason code defines + */ +#define FCXLS_BA_OK 0x00000000 +#define FCXLS_BA_RJT_INVALID_COMMAND 0x00010000 +#define FCXLS_BA_RJT_LOGICAL_ERROR 0x00030000 + +/* + * BA_RJT code explanation + */ + +#define FCXLS_BA_RJT_INV_OXID_RXID 0x00000300 +#define FCXLS_LS_RJT_INVALID_OXID_RXID 0x00001700 + +/* + * Types (word) + */ +#define FC_TYPE_WORD_BLS 0x00000000 + +/* + * SFS structures + */ +struct unf_ba_rjt_s { + unsigned int reason_code; /* BLS reason code and Reason Explanation */ +}; + +#define FC_ABTS_ACC_SEQ_CNT 0x0000ffff +struct unf_ba_acc_s { + unsigned int seq_id; + unsigned int oxid_rxid; + unsigned int seq_cnt; +}; + +union unf_ba_pld_u { + struct unf_ba_rjt_s ba_rjt; + struct unf_ba_acc_s ba_acc; +}; + +struct unf_abts_rsps_s { + struct unf_fchead_s frame_hdr; + union unf_ba_pld_u ba_pld; +}; + +/* + * BLS RJT structure header and payload + */ +struct unf_bls_rjt_s { + struct unf_fchead_s frame_hdr; + /* BLS reason code and Reason Explanation */ + unsigned int reason_code; +}; + +/* + * ELS ACC + */ +struct unf_els_acc_s { + struct unf_fchead_s frame_hdr; + unsigned int cmnd; +}; + +/* + * ELS RJT + */ +struct unf_els_rjt_s { + struct unf_fchead_s frame_hdr; + unsigned int cmnd; + unsigned int reason_code; +}; + +/* + * FLOGI payload, + * FC-LS-2 Table 139 FLOGI, PLOGI, FDISC or LS_ACC Payload + */ +struct unf_flogi_payload_s { + unsigned int cmnd; + struct unf_fabric_parms_s fabric_parms; +}; + +/* + * Flogi and Flogi accept frames. They are the same structure + */ +struct unf_flogi_fdisc_acc_s { + struct unf_fchead_s frame_hdr; + struct unf_flogi_payload_s flogi_payload; +}; + +/* + * Fdisc and Fdisc accept frames. They are the same structure + */ +struct unf_fdisc_acc_s { + struct unf_fchead_s frame_hdr; + struct unf_flogi_payload_s fdisc_payload; +}; + +/* + * PLOGI payload + */ +struct unf_plogi_payload_s { + unsigned int cmnd; + struct unf_lgn_parms_s parms; +}; + +/* + * Plogi, Plogi accept, Pdisc and Pdisc accept frames. + * They are all the same structure. + */ +struct unf_plogi_pdisc_s { + struct unf_fchead_s frame_hdr; + struct unf_plogi_payload_s payload; +}; + +/* + * LOGO logout link service requests invalidation of service parameters and + * port name. + * see FC-PH 4.3 Section 21.4.8 + */ + +/* FC-LS-2 Table 12 LOGO Payload */ +struct unf_logo_payload_s { + unsigned int cmnd; + unsigned int nport_id; + unsigned int high_port_name; + unsigned int low_port_name; +}; + +/* + * payload to hold LOGO command + */ +struct unf_logo_s { + struct unf_fchead_s frame_hdr; + struct unf_logo_payload_s payload; +}; + +/* + * payload for ECHO command, refer to FC-LS-2 4.2.4 + */ +struct unf_echo_payload_s { + unsigned int cmnd; +#define UNF_FC_ECHO_PAYLOAD_LENGTH 255 /* Length in words */ + unsigned int data[UNF_FC_ECHO_PAYLOAD_LENGTH]; +}; + +struct unf_echo_s { + struct unf_fchead_s frame_hdr; + struct unf_echo_payload_s *echo_pld; + dma_addr_t phy_echo_addr; +}; + +#define UNF_PRLI_SIRT_EXTRA_SIZE 12 +/* + * payload for PRLI and PRLO + */ +struct unf_pril_payload_s { + unsigned int cmnd; +#define UNF_FC_PRLI_PAYLOAD_LENGTH 7 /* Length in words */ + unsigned int parms[UNF_FC_PRLI_PAYLOAD_LENGTH]; +}; + +/* + * FCHS structure with payload + */ +struct unf_prli_prlo_s { + struct unf_fchead_s frame_hdr; + struct unf_pril_payload_s payload; +}; + +/* + * ADISC payload + */ + +/* FC-LS-2 Table 75 ADISC Request payload */ +struct unf_adisc_payload_s { + unsigned int cmnd; + unsigned int hard_address; + unsigned int high_port_name; + unsigned int low_port_name; + unsigned int high_node_name; + unsigned int low_node_name; + unsigned int nport_id; +}; + +/* + * FCHS structure with payload + */ +struct unf_adisc_s { + /* FCHS structure */ + struct unf_fchead_s frame_hdr; + /* Payload data containing ADISC info */ + struct unf_adisc_payload_s adisc_payl; +}; + +/* + * RLS payload + */ +struct unf_rls_payload_s { + unsigned int cmnd; + unsigned int nport_id; /* in litle endian format */ +}; + +/* + * RLS + */ +struct unf_rls_s { + struct unf_fchead_s frame_hdr; /* FCHS structure */ + /* payload data containing the RLS info */ + struct unf_rls_payload_s rls; +}; + +/* + * RLS accept payload + */ +struct unf_rls_acc_payload_s { + unsigned int cmnd; + unsigned int link_failure_count; + unsigned int loss_of_sync_count; + unsigned int loss_of_signal_count; + unsigned int primitive_seq_count; + unsigned int invalid_trans_word_count; + unsigned int invalid_crc_count; +}; + +/* + * RLS accept + */ +struct unf_rls_acc_s { + struct unf_fchead_s frame_hdr; /* FCHS structure */ + /* payload data containing the RLS ACC info */ + struct unf_rls_acc_payload_s rls; +}; + +/* + * FCHS structure with payload + */ +struct unf_rrq_s { + struct unf_fchead_s frame_hdr; + unsigned int cmnd; + unsigned int sid; + unsigned int oxid_rxid; +}; + +/* + * ABTA accept + */ +struct unf_abts_acc_s { + struct unf_fchead_s frame_hdr; + unsigned int seq_id; + unsigned int oxid_rxid; + unsigned int seq_cnt; +}; + +struct unf_scr_s { + struct unf_fchead_s frame_hdr; + unsigned int payload[2]; +}; + +struct unf_ctiu_prem_s { + unsigned int rev_inid; + unsigned int gstype_gssub_options; + unsigned int cmnd_rsp_size; + unsigned int frag_reason_exp_vend; +}; + +struct unf_rftid_s { + struct unf_fchead_s frame_hdr; + struct unf_ctiu_prem_s ctiu_pream; + unsigned int nport_id; + unsigned int fc_4_types[8]; +}; + +struct unf_rffid_s { + struct unf_fchead_s frame_hdr; + struct unf_ctiu_prem_s ctiu_pream; + unsigned int nport_id; + unsigned int fc_4_feature; +}; + +struct unf_rffid_rsp_s { + struct unf_fchead_s frame_hdr; + struct unf_ctiu_prem_s ctiu_pream; +}; + +struct unf_gffid_s { + struct unf_fchead_s frame_hdr; + struct unf_ctiu_prem_s ctiu_pream; + unsigned int nport_id; +}; + +struct unf_gffid_rsp_s { + struct unf_fchead_s frame_hdr; + struct unf_ctiu_prem_s ctiu_pream; + unsigned int fc_4_feature[32]; +}; + +struct unf_gnnid_s { + struct unf_fchead_s frame_hdr; + struct unf_ctiu_prem_s ctiu_pream; + unsigned int nport_id; +}; + +struct unf_gnnid_rsp_s { + struct unf_fchead_s frame_hdr; + struct unf_ctiu_prem_s ctiu_pream; + unsigned int node_name[2]; +}; + +struct unf_gpnid_s { + struct unf_fchead_s frame_hdr; + struct unf_ctiu_prem_s ctiu_pream; + unsigned int nport_id; +}; + +struct unf_gpnid_rsp_s { + struct unf_fchead_s frame_hdr; + struct unf_ctiu_prem_s ctiu_pream; + unsigned int port_name[2]; +}; + +struct unf_rft_rsp_s { + struct unf_fchead_s frame_hdr; + struct unf_ctiu_prem_s ctiu_pream; +}; + +struct unf_srr_payload_s { + unsigned int srr_op; + unsigned short rx_id; + unsigned short ox_id; + unsigned int rel_offset; + unsigned char reserved[3]; + unsigned char rctl_for_iu; +}; + +struct unf_srr_s { + struct unf_fchead_s frame_hdr; + struct unf_srr_payload_s pld; +}; + +struct unf_srr_acc_pld_s { + unsigned int srr_op; /* 02000000h */ +}; + +struct unf_srr_acc_s { + struct unf_fchead_s frame_hdr; + struct unf_srr_acc_pld_s pld; +}; + +struct unf_ls_rjt_pld_s { + unsigned int srr_op; /* 01000000h */ + unsigned char vandor; + unsigned char reason_exp; + unsigned char reason; + unsigned char reserved; +}; + +struct unf_ls_rjt_s { + struct unf_fchead_s frame_hdr; + struct unf_ls_rjt_pld_s pld; +}; + +struct unf_rec_pld_s { + unsigned int rec_cmnd; + unsigned int xchg_org_sid; /* bit0-bit23 */ + unsigned short rx_id; + unsigned short ox_id; +}; + +struct unf_rec_s { + struct unf_fchead_s frame_hdr; + struct unf_rec_pld_s rec_pld; +}; + +struct unf_rec_acc_pld_s { + unsigned int cmnd; + unsigned short rx_id; + unsigned short ox_id; + unsigned int org_addr_id; /* bit0-bit23 */ + unsigned int rsp_addr_id; /* bit0-bit23 */ +}; + +struct unf_rec_acc_s { + struct unf_fchead_s frame_hdr; + struct unf_rec_acc_pld_s payload; +}; + +struct unf_gid_s { + struct unf_ctiu_prem_s ctiu_pream; + unsigned int scope_type; +}; + +struct unf_gid_acc_s { + struct unf_fchead_s frame_hdr; + struct unf_ctiu_prem_s ctiu_pream; +}; + +#define UNF_LOOPMAP_COUNT 128 +struct unf_loop_init_s { + struct unf_fchead_s frame_hdr; + unsigned int cmnd; +#define UNF_FC_ALPA_BIT_MAP_SIZE 4 + unsigned int alpa_bit_map[UNF_FC_ALPA_BIT_MAP_SIZE]; +}; + +struct unf_loop_map_s { + struct unf_fchead_s frame_hdr; + unsigned int cmnd; + unsigned int loop_map[32]; +}; + +struct unf_ctiu_rjt_s { + struct unf_fchead_s frame_hdr; + struct unf_ctiu_prem_s ctiu_pream; +}; + +struct unf_gif_acc_pld_s { + struct unf_ctiu_prem_s ctiu_pream; + + unsigned int gid_port_id[UNF_GID_PORT_CNT]; +}; + +struct unf_gid_rsp_s { + struct unf_gif_acc_pld_s *gid_acc_pld; +}; + +struct unf_gid_req_rsp_s { + struct unf_fchead_s frame_hdr; + struct unf_gid_s gid_req; + struct unf_gid_rsp_s gid_rsp; +}; + +/* Added by fangtao FC-LS-2 Table 31 RSCN Payload */ +struct unf_rscn_port_id_page_s { + unsigned char port_id_port; + unsigned char port_id_area; + unsigned char port_id_domain; + + unsigned char addr_format : 2; + unsigned char event_qualifier : 4; + unsigned char reserved : 2; +}; + +struct unf_rscn_pld_s { + unsigned int cmnd; + struct unf_rscn_port_id_page_s port_id_page[UNF_RSCN_PAGE_SUM]; +}; + +struct unf_rscn_s { + struct unf_fchead_s frame_hdr; + struct unf_rscn_pld_s *rscn_pld; +}; + +union unf_sfs_u { + struct { + struct unf_fchead_s frame_head; + unsigned char data[0]; + } sfs_common; + struct unf_abts_rsps_s abts_rsps; + struct unf_els_acc_s els_acc; + struct unf_els_rjt_s els_rjt; + struct unf_plogi_pdisc_s plogi; + struct unf_logo_s logo; + struct unf_echo_s echo; + struct unf_echo_s echo_acc; + struct unf_prli_prlo_s prli; + struct unf_prli_prlo_s prlo; + struct unf_rls_s rls; + struct unf_rls_acc_s rls_acc; + struct unf_plogi_pdisc_s pdisc; + struct unf_adisc_s adisc; + struct unf_rrq_s rrq; + struct unf_flogi_fdisc_acc_s flogi; + struct unf_fdisc_acc_s fdisc; + struct unf_scr_s scr; + struct unf_rec_s rec; + struct unf_rec_acc_s rec_acc; + struct unf_srr_s srr; + struct unf_srr_acc_s srr_acc; + struct unf_ls_rjt_s ls_rjt; + struct unf_rscn_s rscn; + struct unf_gid_req_rsp_s get_id; + struct unf_rftid_s rft_id; + struct unf_rft_rsp_s rft_id_rsp; + struct unf_rffid_s rff_id; + struct unf_rffid_rsp_s rff_id_rsp; + struct unf_gffid_s gff_id; + struct unf_gffid_rsp_s gff_id_rsp; + struct unf_gnnid_s gnn_id; + struct unf_gnnid_rsp_s gnn_id_rsp; + struct unf_gpnid_s gpn_id; + struct unf_gpnid_rsp_s gpn_id_rsp; + struct unf_plogi_pdisc_s plogi_acc; + struct unf_plogi_pdisc_s pdisc_acc; + struct unf_adisc_s adisc_acc; + struct unf_prli_prlo_s prli_acc; + struct unf_prli_prlo_s prlo_acc; + struct unf_flogi_fdisc_acc_s flogi_acc; + struct unf_fdisc_acc_s fdisc_acc; + struct unf_loop_init_s lpi; + struct unf_loop_map_s loopmap; + struct unf_ctiu_rjt_s ctiu_rjt; +}; + +struct unf_sfs_entry_s { + /* Virtual addr of SFS buffer */ + union unf_sfs_u *fc_sfs_entry_ptr; + /* Physical addr of SFS buffer */ + unsigned long long sfs_buff_phy_addr; + /* Length of bytes in SFS buffer */ + unsigned int sfs_buff_len; + unsigned int cur_offset; +}; + +struct unf_fcp_rsp_iu_entry_s { + struct unf_fcprsp_iu_s *fcp_rsp_iu; + dma_addr_t fcp_rsp_iu_phy_addr; +}; + +struct unf_rjt_info_s { + unsigned int els_cmnd_code; + unsigned int reason_code; + unsigned int reason_explanation; +}; + +int unf_alloc_scsi_host(unf_scsi_host_s **v_scsi_host, + struct unf_host_param_s *v_host_param); +void unf_free_scsi_host(unf_scsi_host_s *v_scsi_host); +unsigned int unf_register_ini_transport(void); +void unf_unregister_ini_transport(void); +void unf_report_io_dm_event(void *v_lport, unsigned int type, + unsigned int value); +void unf_save_sense_data(void *scsicmd, const char *sense, int senslen); + +#endif diff --git a/drivers/scsi/huawei/hifc/unf_service.c b/drivers/scsi/huawei/hifc/unf_service.c new file mode 100644 index 0000000000000000000000000000000000000000..ec6e3fa96a472f7cac163bc0d23e4b693d27fae8 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_service.c @@ -0,0 +1,9876 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#include "unf_exchg.h" +#include "unf_log.h" +#include "unf_rport.h" +#include "unf_exchg.h" +#include "unf_service.h" +#include "unf_portman.h" +#include "unf_npiv.h" + +static void unf_flogi_callback(void *v_lport, + void *v_rport, + void *v_xchg); +static void unf_fdisc_callback(void *v_lport, + void *v_rport, + void *v_xchg); +static void unf_plogi_callback(void *v_lport, + void *v_rport, + void *v_xchg); +static unsigned int unf_rec_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg); +static void unf_gid_ft_callback(void *v_lport, + void *v_rport, + void *v_xchg); +static void unf_gid_pt_callback(void *v_lport, + void *v_rport, + void *v_xchg); +static void unf_process_rport_after_logo(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +static unsigned int unf_flogi_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg); +static unsigned int unf_plogi_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg); +static unsigned int unf_prli_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg); +static unsigned int unf_prlo_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg); +static unsigned int unf_rscn_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg); +static unsigned int unf_logo_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg); +static unsigned int unf_echo_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg); +static unsigned int unf_pdisc_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg); +static unsigned int unf_adisc_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg); +static unsigned int unf_rrq_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg); +static unsigned int unf_rls_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg); +static unsigned int unf_send_els_rjt_by_rport( + struct unf_lport_s *v_lport, + struct unf_xchg_s *v_xchg, + struct unf_rport_s *v_rport, + struct unf_rjt_info_s *v_rjt_info); + +unsigned int max_frame_size = UNF_DEFAULT_FRAME_SIZE; + +#define FCP_XFER_RDY_IU 0x05 +#define FCP_RSP_IU 0x07 +#define FCP_DATA_IU 0x01 + +#define UNF_GID_LAST_PORT_ID 0x80 +#define UNF_LOWLEVEL_BBCREDIT 0x6 +#define UNF_DEFAULT_BB_SC_N 0 +#define UNF_INIT_DISC 0x1 /* first time DISC */ +#define UNF_RSCN_DISC 0x2 /* RSCN Port Addr DISC */ +/* Reference from FCP-4 Table33 RR_TOV: REC_TOV + 2*R_A_TOV + 1S, + * REC_TOV = E_D_TOV + 1s + */ +#define UNF_CALC_LPORT_RRTOV(v_lport) \ + (((v_lport)->ed_tov + 1000) + (2 * (v_lport)->ra_tov + 1000)) + +#define UNF_GID_CONTROL(v_nport_id) ((v_nport_id) >> 24) + +#define UNF_ECHO_PLD_DATA 0x1234567890ABCDEF +#define UNF_ECHO_REQ_SIZE 0 + +#define UNF_GET_PORT_OPTIONS(v_fc4feature) ((v_fc4feature) >> 20) + +#define UNF_GET_DOMAIN_ID(x) (((x) & 0xFF0000) >> 16) /* domain id */ +#define UNF_GET_AREA_ID(x) (((x) & 0x00FF00) >> 8) /* area id */ + +#define UNF_SERVICE_GET_NPORTID_FORM_GID_PAGE(v_port_id_page) \ + (((unsigned int)(v_port_id_page)->port_id_domain << 16) | \ + ((unsigned int)(v_port_id_page)->port_id_area << 8) | \ + ((unsigned int)(v_port_id_page)->port_id_port)) + +#define UNF_GNN_GFF_ID_RJT_REASON(rjt_reason) \ + ((((rjt_reason) & UNF_CTIU_RJT_MASK) == \ + UNF_CTIU_RJT_UNABLE_PERFORM) && \ + ((((rjt_reason) & UNF_CTIU_RJT_EXP_MASK) == \ + UNF_CTIU_RJT_EXP_PORTID_NO_REG) || \ + (((rjt_reason) & UNF_CTIU_RJT_EXP_MASK) == \ + UNF_CTIU_RJT_EXP_PORTNAME_NO_REG) || \ + (((rjt_reason) & UNF_CTIU_RJT_EXP_MASK) == \ + UNF_CTIU_RJT_EXP_NODENAME_NO_REG))) + +#define UNF_NEED_BIG_RESPONSE_BUFF(cmnd_code) \ + (((cmnd_code) == ELS_ECHO) || ((cmnd_code) == NS_GID_PT) || \ + ((cmnd_code) == NS_GID_FT)) + +#define NEED_REFRESH_NPORTID(pkg) ((((pkg)->cmnd == ELS_PLOGI) || \ + ((pkg)->cmnd == ELS_PDISC) || \ + ((pkg)->cmnd == ELS_ADISC))) + +struct unf_els_handler_table { + unsigned int cmnd; + unsigned int (*pfn_els_cmnd_handler)(struct unf_lport_s *, + unsigned int, struct unf_xchg_s *); +}; + +#define UNF_SERVICE_COLLECT(service_collect, item) \ + do { \ + if ((item) < UNF_SERVICE_BUTT) { \ + service_collect.service_cnt[(item)]++; \ + } \ + } while (0) + +struct unf_els_handler_table els_handle[] = { + { ELS_PLOGI, unf_plogi_handler }, + { ELS_FLOGI, unf_flogi_handler }, + { ELS_LOGO, unf_logo_handler }, + { ELS_ECHO, unf_echo_handler }, + { ELS_RRQ, unf_rrq_handler }, + { ELS_REC, unf_rec_handler }, + { ELS_PRLI, unf_prli_handler }, + { ELS_PRLO, unf_prlo_handler }, + { ELS_PDISC, unf_pdisc_handler }, + { ELS_ADISC, unf_adisc_handler }, + { ELS_RSCN, unf_rscn_handler }, + { ELS_RLS, unf_rls_handler } +}; + +static void unf_check_rport_need_delay_prli(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int v_port_feature) +{ + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3300, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3301, UNF_TRUE, v_rport, return); + + v_port_feature &= UNF_PORT_MODE_BOTH; + + /* Used for: L_Port has INI mode & R_Port is not SW */ + if (v_rport->nport_id < UNF_FC_FID_DOM_MGR) { + /* + * 1. immediately: R_Port only with TGT, or + * L_Port only with INI & R_Port has TGT mode, + * send PRLI immediately + */ + if (((v_port_feature == UNF_PORT_MODE_TGT) || + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT)) || + ((v_port_feature & UNF_PORT_MODE_TGT) == + UNF_PORT_MODE_TGT)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Port(0x%x_0x%x) Rport(0x%x) with feature(0x%x) send PRLI", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id, v_port_feature); + + /* Send PRLI to remote port */ + ret = unf_send_prli(v_lport, v_rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) Rport(0x%x) with feature(0x%x) send PRLI failed", + v_lport->port_id, + v_lport->nport_id, + v_rport->nport_id, + v_port_feature); + + /* Do R_Port recovery */ + unf_rport_error_recovery(v_rport); + } + } else if (v_port_feature != UNF_PORT_MODE_INI) { + /* 2. R_Port has BOTH mode or unknown, + * Delay to send PRLI + */ + /* Prevent: PRLI done before PLOGI */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]LOGIN: Port(0x%x_0x%x) Rport(0x%x) with feature(0x%x) delay to send PRLI", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id, v_port_feature); + + /* Delay to send PRLI to R_Port */ + unf_rport_delay_login(v_rport); + } else { + /* 3. R_Port only with INI mode: wait for R_Port's + * PRLI: Do not care + */ + /* Cancel recovery(timer) work */ + if (delayed_work_pending(&v_rport->recovery_work)) { + if (cancel_delayed_work( + &v_rport->recovery_work)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]LOGIN: Port(0x%x_0x%x) Rport(0x%x) with feature(0x%x) is pure INI", + v_lport->port_id, + v_lport->nport_id, + v_rport->nport_id, + v_port_feature); + + unf_rport_ref_dec(v_rport); + } + } + + /* Server: R_Port only support INI, + * do not care this case + */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Port(0x%x_0x%x) Rport(0x%x) with feature(0x%x) wait for PRLI", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id, v_port_feature); + } + } +} + +static unsigned int unf_low_level_bb_credit(struct unf_lport_s *v_lport) +{ + struct unf_lport_s *lport = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int bb_credit = UNF_LOWLEVEL_BBCREDIT; + + if (unlikely(!v_lport)) + return bb_credit; + + lport = v_lport; + if (unlikely(!lport->low_level_func.port_mgr_op.pfn_ll_port_config_get)) + return bb_credit; + + ret = lport->low_level_func.port_mgr_op.pfn_ll_port_config_get( + (void *)lport->fc_port, + UNF_PORT_CFG_GET_WORKBALE_BBCREDIT, + (void *)&bb_credit); + if (unlikely(ret != RETURN_OK)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[warn]Port(0x%x) get BB_Credit failed, use default value(%d)", + lport->port_id, UNF_LOWLEVEL_BBCREDIT); + + bb_credit = UNF_LOWLEVEL_BBCREDIT; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) with BB_Credit(%u)", + lport->port_id, bb_credit); + + return bb_credit; +} + +unsigned int unf_low_level_bbscn(struct unf_lport_s *v_lport) +{ + struct unf_lport_s *lport = v_lport; + struct unf_low_level_port_mgr_op_s *port_mgr = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int bb_scn = UNF_DEFAULT_BB_SC_N; + + if (unlikely(!v_lport)) + return bb_scn; + + port_mgr = &lport->low_level_func.port_mgr_op; + + if (unlikely(!port_mgr->pfn_ll_port_config_get)) + return bb_scn; + + ret = port_mgr->pfn_ll_port_config_get((void *)lport->fc_port, + UNF_PORT_CFG_GET_WORKBALE_BBSCN, + (void *)&bb_scn); + if (unlikely(ret != RETURN_OK)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[warn]Port(0x%x) get bbscn failed, use default value(%d)", + lport->port_id, UNF_DEFAULT_BB_SC_N); + + bb_scn = UNF_DEFAULT_BB_SC_N; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x)'s bbscn(%d)", + lport->port_id, bb_scn); + + return bb_scn; +} + +static unsigned int unf_els_cmnd_send(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_pkg, + struct unf_xchg_s *v_xchg) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned long time_out = 0; + + UNF_CHECK_VALID(0x3302, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3303, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3304, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + if (unlikely(!v_lport->low_level_func.service_op.pfn_unf_els_send)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) ELS send function is NULL", + v_lport->port_id); + + return ret; + } + + /* Add ELS command/response (Exchange) timeout timer */ + time_out = UNF_GET_ELS_SFS_XCHG_TIMER(v_lport); + if (v_xchg->cmnd_code == ELS_RRQ) { + time_out = ((unsigned long) + UNF_GET_ELS_SFS_XCHG_TIMER(v_lport) > + UNF_RRQ_MIN_TIMEOUT_INTERVAL) ? + (unsigned long) + UNF_GET_ELS_SFS_XCHG_TIMER(v_lport) : + UNF_RRQ_MIN_TIMEOUT_INTERVAL; + } else if (v_xchg->cmnd_code == ELS_LOGO) { + time_out = UNF_LOGO_TIMEOUT_INTERVAL; + } + v_lport->xchg_mgr_temp.pfn_unf_xchg_add_timer((void *)v_xchg, + time_out, + UNF_TIMER_TYPE_SFS); + + v_pkg->private[PKG_PRIVATE_XCHG_TIMEER] = + (unsigned int)UNF_GET_ELS_SFS_XCHG_TIMER(v_lport); + v_pkg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] = + v_xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]; + + /* Send ELS command/response */ + ret = v_lport->low_level_func.service_op.pfn_unf_els_send( + v_lport->fc_port, v_pkg); + if (unlikely(ret != RETURN_OK)) { + /* Cancel timer if send failed */ + v_lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer( + (void *)v_xchg); + } + + return ret; +} + +static unsigned int unf_gs_cmnd_send(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_pkg, + struct unf_xchg_s *v_xchg) +{ + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3305, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3306, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3307, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + if (unlikely(!v_lport->low_level_func.service_op.pfn_unf_gs_send)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) GS send function is NULL", + v_lport->port_id); + + return ret; + } + + /* Add GS command timeout timer */ + v_lport->xchg_mgr_temp.pfn_unf_xchg_add_timer( + (void *)v_xchg, + (unsigned long)UNF_GET_GS_SFS_XCHG_TIMER(v_lport), + UNF_TIMER_TYPE_SFS); + v_pkg->private[PKG_PRIVATE_XCHG_TIMEER] = (unsigned int) + UNF_GET_GS_SFS_XCHG_TIMER(v_lport); + v_pkg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] = + v_xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]; + + /* Send GS command */ + ret = v_lport->low_level_func.service_op.pfn_unf_gs_send( + v_lport->fc_port, v_pkg); + if (unlikely(ret != RETURN_OK)) + /* Cancel timer if send failed */ + v_lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer( + (void *)v_xchg); + + return ret; +} + +static unsigned int unf_bls_cmnd_send(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_pkg, + struct unf_xchg_s *v_xchg) +{ + UNF_CHECK_VALID(0x3308, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3309, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3310, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + v_pkg->private[PKG_PRIVATE_XCHG_TIMEER] = + (unsigned int)UNF_GET_BLS_SFS_XCHG_TIMER(v_lport); + v_pkg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] = + v_xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]; + + return v_lport->low_level_func.service_op.pfn_unf_bls_send( + v_lport->fc_port, v_pkg); +} + +static void unf_fill_package(struct unf_frame_pkg_s *v_pkg, + struct unf_xchg_s *v_xchg, + struct unf_rport_s *v_rport) +{ + /* v_rport maybe NULL */ + UNF_CHECK_VALID(0x3311, UNF_TRUE, v_pkg, return); + UNF_CHECK_VALID(0x3312, UNF_TRUE, v_xchg, return); + + v_pkg->cmnd = v_xchg->cmnd_code; + v_pkg->fcp_cmnd = &v_xchg->fcp_cmnd; + v_pkg->frame_head.csctl_sid = v_xchg->sid; + v_pkg->frame_head.rctl_did = v_xchg->did; + v_pkg->frame_head.oxid_rxid = ((unsigned int)v_xchg->ox_id << 16 | + v_xchg->rx_id); + v_pkg->xchg_contex = v_xchg; + + UNF_CHECK_VALID(0x3313, UNF_TRUE, v_xchg->lport, return); + v_pkg->private[PKG_PRIVATE_XCHG_VP_INDEX] = + v_xchg->lport->vp_index; + + if (!v_rport) { + v_pkg->private[PKG_PRIVATE_XCHG_RPORT_INDEX] = + UNF_RPORT_INVALID_INDEX; + v_pkg->private[PKG_PRIVATE_RPORT_RX_SIZE] = INVALID_VALUE32; + } else { + v_pkg->private[PKG_PRIVATE_XCHG_RPORT_INDEX] = + v_rport->rport_index; + v_pkg->private[PKG_PRIVATE_RPORT_RX_SIZE] = + v_rport->max_frame_size; + } + + v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = v_xchg->hot_pool_tag; + v_pkg->private[PKG_PRIVATE_LOWLEVEL_XCHG_ADD] = + v_xchg->private[PKG_PRIVATE_LOWLEVEL_XCHG_ADD]; + v_pkg->unf_cmnd_pload_bl.buffer_ptr = + (unsigned char *) + v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + v_pkg->unf_cmnd_pload_bl.buf_dma_addr = + v_xchg->fcp_sfs_union.sfs_entry.sfs_buff_phy_addr; + + /* Low level need to know payload length if send ECHO response */ + v_pkg->unf_cmnd_pload_bl.length = + v_xchg->fcp_sfs_union.sfs_entry.cur_offset; +} + +static struct unf_xchg_s *unf_get_sfs_free_xchg_and_init( + struct unf_lport_s *v_lport, + unsigned int v_did, + struct unf_rport_s *v_rport, + union unf_sfs_u **v_fc_entry) +{ + struct unf_xchg_s *xchg = NULL; + union unf_sfs_u *fc_entry = NULL; + + xchg = unf_cm_get_free_xchg(v_lport, UNF_XCHG_TYPE_SFS); + if (!xchg) + return NULL; + + xchg->did = v_did; + xchg->sid = v_lport->nport_id; + xchg->oid = xchg->sid; + xchg->lport = v_lport; + xchg->rport = v_rport; + xchg->disc_rport = NULL; + + if (v_lport->low_level_func.xchg_mgr_type == + UNF_LOW_LEVEL_MGR_TYPE_PASSTIVE) + xchg->ox_id = xchg->hot_pool_tag; + + xchg->pfn_callback = NULL; + xchg->pfn_ob_callback = NULL; + + fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, xchg); + return NULL; + } + + *v_fc_entry = fc_entry; + + return xchg; +} + +static void unf_scr_callback(void *v_lport, + void *v_rport, + void *v_xchg) +{ + /* Callback function for SCR response: Send GID_PT with INI mode */ + struct unf_lport_s *lport = (struct unf_lport_s *)v_lport; + struct unf_disc_s *disc = &lport->disc; + struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_xchg; + struct unf_els_acc_s *els_acc = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned long lport_flag = 0; + unsigned long disc_flag = 0; + unsigned int cmnd = 0; + + UNF_CHECK_VALID(0x3694, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3695, UNF_TRUE, v_xchg, return); + UNF_REFERNCE_VAR(v_rport); + UNF_REFERNCE_VAR(ret); + + if (!xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) + return; + + els_acc = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->els_acc; + if (xchg->byte_orders & UNF_BIT_2) + cmnd = be32_to_cpu(els_acc->cmnd); + else + cmnd = (els_acc->cmnd); + + if ((cmnd & UNF_ELS_CMND_HIGH_MASK) == UNF_ELS_CMND_ACC) { + /* About ELS_CMND ACC */ + spin_lock_irqsave(&lport->lport_state_lock, lport_flag); + + /* Check L_Port state: SCR_WAIT */ + if (lport->en_states != UNF_LPORT_ST_SCR_WAIT) { + spin_unlock_irqrestore(&lport->lport_state_lock, + lport_flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x_0x%x) receive SCR ACC with error state(0x%x)", + lport->port_id, lport->nport_id, + lport->en_states); + return; + } + + /* Update L_Port state machine: Ready */ + /* LPort: SCR_WAIT --> READY */ + unf_lport_stat_ma(lport, UNF_EVENT_LPORT_REMOTE_ACC); + if (lport->en_states == UNF_LPORT_ST_READY) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]LOGIN: Port(0x%x_0x%x) enter READY state when received SCR response", + lport->port_id, lport->nport_id); + } + + /* Start to Discovery with INI mode: GID_PT */ + if ((lport->options & UNF_PORT_MODE_INI) == + UNF_PORT_MODE_INI) { + spin_unlock_irqrestore(&lport->lport_state_lock, + lport_flag); + + if (lport->disc.unf_disc_temp.pfn_unf_disc_start) { + spin_lock_irqsave(&disc->rport_busy_pool_lock, + disc_flag); + lport->disc.disc_option = UNF_INIT_DISC; + disc->last_disc_jiff = jiffies; + spin_unlock_irqrestore( + &disc->rport_busy_pool_lock, disc_flag); + + ret = lport->disc.unf_disc_temp.pfn_unf_disc_start(lport); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]LOGIN: Port(0x%x) DISC %s with INI mode", + lport->port_id, + (ret != RETURN_OK) ? "failed" : + "succeed"); + } + + UNF_REFERNCE_VAR(ret); + return; + } + /* TGT mode: Do not care */ + spin_unlock_irqrestore(&lport->lport_state_lock, lport_flag); + + /* NOTE: set state with UNF_DISC_ST_END used for RSCN process */ + spin_lock_irqsave(&disc->rport_busy_pool_lock, disc_flag); + lport->disc.en_states = UNF_DISC_ST_END; + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) is TGT mode, no need to discovery", + lport->port_id); + return; + } + /* About ELS_CMND response: RJT */ + unf_lport_error_recovery(lport); + + UNF_REFERNCE_VAR(ret); +} + +static void unf_scr_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Callback fucnion for exception: Do L_Port error recovery */ + struct unf_lport_s *lport = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3692, UNF_TRUE, v_xchg, return); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + lport = v_xchg->lport; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + UNF_CHECK_VALID(0x3693, UNF_TRUE, lport, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) send SCR failed and do port recovery", + lport->port_id); + + unf_lport_error_recovery(lport); +} + +unsigned int unf_send_scr(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* after RCVD RFF_ID ACC */ + struct unf_scr_s *scr = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned short ox_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3314, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3315, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + + /* Get free exchange for SCR */ + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + NULL, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for SCR", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = ELS_SCR; /* SCR */ + ox_id = xchg->ox_id; + /* Set callback function */ + xchg->pfn_callback = unf_scr_callback; + xchg->pfn_ob_callback = unf_scr_ob_callback; + + /* Fill command/response package */ + unf_fill_package(&pkg, xchg, v_rport); + + scr = &fc_entry->scr; + memset(scr, 0, sizeof(struct unf_scr_s)); + scr->payload[0] = (UNF_GS_CMND_SCR); /* SCR is 0x62 */ + scr->payload[1] = (UNF_FABRIC_FULL_REG); /* Full registration */ + + /* Send SCR command */ + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: SCR send %s. Port(0x%x_0x%x)--->rport(0x%x) with OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_rec_pld(struct unf_rec_pld_s *v_rec_pld, + unsigned int v_sid, + unsigned short v_oxid) +{ + UNF_CHECK_VALID(0x3339, UNF_TRUE, v_rec_pld, return); + + v_rec_pld->rec_cmnd = UNF_ELS_CMND_REC; + v_rec_pld->xchg_org_sid = v_sid; + v_rec_pld->ox_id = v_oxid; + v_rec_pld->rx_id = INVALID_VALUE16; +} + +unsigned int unf_send_rec(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_io_xchg) +{ + struct unf_rec_pld_s *rec_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + struct unf_frame_pkg_s pkg; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3324, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3325, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3325, UNF_TRUE, v_io_xchg, return UNF_RETURN_ERROR); + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + + /* Get & Set new free exchange */ + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for PLOGI", + v_lport->port_id); + return ret; + } + + xchg->cmnd_code = ELS_REC; + ox_id = xchg->ox_id; + unf_fill_package(&pkg, xchg, v_rport); + + rec_pld = &fc_entry->rec.rec_pld; + memset(rec_pld, 0, sizeof(struct unf_rec_pld_s)); + + unf_fill_rec_pld(rec_pld, v_lport->nport_id, v_io_xchg->ox_id); + + /* Start to Send REC command */ + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[info]LOGIN: Send REC %s. Port(0x%x_0x%x_0x%llx)--->RPort(0x%x_0x%llx) with OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_lport->nport_id, v_lport->port_name, + v_rport->nport_id, v_rport->port_name, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_flogi_pld(struct unf_flogi_payload_s *v_flogi_pld, + struct unf_lport_s *v_lport) +{ + struct unf_fabric_parms_s *fabric_parms = NULL; + + UNF_CHECK_VALID(0x3316, UNF_TRUE, v_flogi_pld, return); + UNF_CHECK_VALID(0x3317, UNF_TRUE, v_lport, return); + + fabric_parms = &v_flogi_pld->fabric_parms; + if ((v_lport->en_act_topo == UNF_ACT_TOP_P2P_FABRIC) || + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT) || + (v_lport->en_act_topo == UNF_TOP_P2P_MASK)) { + /* Fabric or P2P topology */ + fabric_parms->co_parms.bb_credit = + unf_low_level_bb_credit(v_lport); + fabric_parms->co_parms.lowest_version = + UNF_PLOGI_VERSION_LOWER; + fabric_parms->co_parms.highest_version = + UNF_PLOGI_VERSION_UPPER; + fabric_parms->co_parms.bb_receive_data_field_size = + (v_lport->max_frame_size); + fabric_parms->co_parms.bb_scn = unf_low_level_bbscn(v_lport); + } else { + /* Loop topology here */ + fabric_parms->co_parms.clean_address = + UNF_CLEAN_ADDRESS_DEFAULT; + fabric_parms->co_parms.bb_credit = UNF_BBCREDIT_LPORT; + fabric_parms->co_parms.lowest_version = + UNF_PLOGI_VERSION_LOWER; + fabric_parms->co_parms.highest_version = + UNF_PLOGI_VERSION_UPPER; + fabric_parms->co_parms.alternate_bb_credit_mgmt = + UNF_BBCREDIT_MANAGE_LPORT; /* :1 */ + fabric_parms->co_parms.bb_receive_data_field_size = + (v_lport->max_frame_size); + } + + if (v_lport->low_level_func.support_max_npiv_num != 0) + fabric_parms->co_parms.clean_address = 1; /* support NPIV */ + + fabric_parms->cl_parms[2].valid = UNF_CLASS_VALID; + fabric_parms->cl_parms[2].priority = UNF_PRIORITY_DISABLE; + + fabric_parms->cl_parms[2].sequential_delivery = + UNF_SEQUEN_DELIVERY_REQ; + fabric_parms->cl_parms[2].received_data_field_size = + (v_lport->max_frame_size); + + fabric_parms->high_node_name = + UNF_GET_NAME_HIGH_WORD(v_lport->node_name); + fabric_parms->low_node_name = + UNF_GET_NAME_LOW_WORD(v_lport->node_name); + fabric_parms->high_port_name = + UNF_GET_NAME_HIGH_WORD(v_lport->port_name); + fabric_parms->low_port_name = + UNF_GET_NAME_LOW_WORD(v_lport->port_name); +} + +static void unf_flogi_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Send FLOGI failed & Do L_Port recovery */ + struct unf_lport_s *lport = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3644, UNF_TRUE, v_xchg, return); + + /* Get L_port from exchange context */ + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + lport = v_xchg->lport; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + UNF_CHECK_VALID(0x3645, UNF_TRUE, lport, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) send FLOGI failed", + lport->port_id); + + /* Check L_Port state */ + spin_lock_irqsave(&lport->lport_state_lock, flag); + if (lport->en_states != UNF_LPORT_ST_FLOGI_WAIT) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) send FLOGI failed with state(0x%x)", + lport->port_id, lport->nport_id, lport->en_states); + + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + return; + } + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + /* Do L_Port error recovery */ + unf_lport_error_recovery(lport); +} + +unsigned int unf_send_flogi(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + struct unf_xchg_s *xchg = NULL; + struct unf_flogi_payload_s *flogi_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned short ox_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3318, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3319, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + + /* Get & Set New free Exchange Context */ + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for FLOGI", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = ELS_FLOGI; /* FLOGI */ + + ox_id = xchg->ox_id; + + /* Set callback function */ + /* for rcvd flogi acc/rjt processer */ + xchg->pfn_callback = unf_flogi_callback; + /* for send flogi failed processer */ + xchg->pfn_ob_callback = unf_flogi_ob_callback; + + /* Fill package: Exchange --to-->> Package */ + unf_fill_package(&pkg, xchg, v_rport); + + /* Fill Flogi Payload */ + flogi_pld = &fc_entry->flogi.flogi_payload; + memset(flogi_pld, 0, sizeof(struct unf_flogi_payload_s)); + unf_fill_flogi_pld(flogi_pld, v_lport); + flogi_pld->cmnd = (UNF_ELS_CMND_FLOGI); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Begin to send FLOGI. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x)", + v_lport->port_id, v_rport->nport_id, ox_id); + + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, flogi_pld, + sizeof(struct unf_flogi_payload_s)); + + /* Start to send FLOGI command */ + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[warn]LOGIN: Send FLOGI failed. Port(0x%x)--->rport(0x%x)", + v_lport->port_id, v_rport->nport_id); + + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + } + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fdisc_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Do recovery */ + struct unf_lport_s *lport = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3638, UNF_TRUE, v_xchg, return); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + lport = v_xchg->lport; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: FDISC send failed"); + + UNF_CHECK_VALID(0x3639, UNF_TRUE, NULL != lport, return); + + /* Do L_Port error recovery */ + unf_lport_error_recovery(lport); +} + +unsigned int unf_send_fdisc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + struct unf_xchg_s *exch = NULL; + struct unf_flogi_payload_s *fdisc_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned short ox_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3320, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3321, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + + exch = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!exch) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for FDISC", + v_lport->port_id); + + return ret; + } + + exch->cmnd_code = ELS_FDISC; /* FDISC */ + + ox_id = exch->ox_id; + + /* Set callback function */ + exch->pfn_callback = unf_fdisc_callback; + exch->pfn_ob_callback = unf_fdisc_ob_callback; + + unf_fill_package(&pkg, exch, v_rport); + + /* Fill FDISC entry(payload) */ + fdisc_pld = &fc_entry->fdisc.fdisc_payload; + memset(fdisc_pld, 0, sizeof(struct unf_flogi_payload_s)); + unf_fill_flogi_pld(fdisc_pld, v_lport); + fdisc_pld->cmnd = UNF_ELS_CMND_FDISC; /* update cmnd type */ + + /* Start to send FDISC */ + ret = unf_els_cmnd_send(v_lport, &pkg, exch); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)exch); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: FDISC send %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_plogi_pld(struct unf_plogi_payload_s *v_plogi_pld, + struct unf_lport_s *v_lport) +{ + struct unf_lgn_parms_s *login_parms = NULL; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x3322, UNF_TRUE, v_plogi_pld, return); + UNF_CHECK_VALID(0x3323, UNF_TRUE, v_lport, return); + + lport = v_lport->root_lport; + v_plogi_pld->cmnd = (UNF_ELS_CMND_PLOGI); + login_parms = &v_plogi_pld->parms; + + if ((v_lport->en_act_topo == UNF_ACT_TOP_P2P_FABRIC) || + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT)) { + /* P2P or Fabric mode */ + login_parms->co_parms.bb_credit = + (unf_low_level_bb_credit(v_lport)); + login_parms->co_parms.alternate_bb_credit_mgmt = + UNF_BBCREDIT_MANAGE_NFPORT; /* 0 */ + login_parms->co_parms.bb_scn = + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_FABRIC) ? + 0 : unf_low_level_bbscn(v_lport); + } else { + /* Public loop & Private loop mode */ + login_parms->co_parms.bb_credit = UNF_BBCREDIT_LPORT; /* 0 */ + login_parms->co_parms.alternate_bb_credit_mgmt = + UNF_BBCREDIT_MANAGE_LPORT; /* 1 */ + } + + login_parms->co_parms.lowest_version = UNF_PLOGI_VERSION_LOWER; + login_parms->co_parms.highest_version = UNF_PLOGI_VERSION_UPPER; + login_parms->co_parms.continuously_increasing = + UNF_CONTIN_INCREASE_SUPPORT; + login_parms->co_parms.bb_receive_data_field_size = + (v_lport->max_frame_size); + login_parms->co_parms.nport_total_concurrent_sequences = + (UNF_PLOGI_CONCURRENT_SEQ); + login_parms->co_parms.relative_offset = (UNF_PLOGI_RO_CATEGORY); + login_parms->co_parms.e_d_tov = UNF_DEFAULT_EDTOV; + if (lport->b_priority == UNF_PRIORITY_ENABLE) + login_parms->cl_parms[2].priority = UNF_PRIORITY_ENABLE; + else + login_parms->cl_parms[2].priority = UNF_PRIORITY_DISABLE; + + login_parms->cl_parms[2].valid = UNF_CLASS_VALID; /* for class_3 */ + login_parms->cl_parms[2].received_data_field_size = + (v_lport->max_frame_size); + login_parms->cl_parms[2].concurrent_sequences = + (UNF_PLOGI_CONCURRENT_SEQ); + login_parms->cl_parms[2].open_sequences_per_exchange = + (UNF_PLOGI_SEQ_PER_XCHG); + + login_parms->high_node_name = + UNF_GET_NAME_HIGH_WORD(v_lport->node_name); + login_parms->low_node_name = + UNF_GET_NAME_LOW_WORD(v_lport->node_name); + login_parms->high_port_name = + UNF_GET_NAME_HIGH_WORD(v_lport->port_name); + login_parms->low_port_name = + UNF_GET_NAME_LOW_WORD(v_lport->port_name); + + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, v_plogi_pld, + sizeof(struct unf_plogi_payload_s)); +} + +static void unf_plogi_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Do L_Port or R_Port recovery */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3656, UNF_TRUE, v_xchg, return); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + lport = v_xchg->lport; + rport = v_xchg->rport; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + UNF_CHECK_VALID(0x3657, UNF_TRUE, lport, return); + UNF_CHECK_VALID(0x3734, UNF_TRUE, rport, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) send PLOGI(0x%x_0x%x) to RPort(%p:0x%x_0x%x) failed", + lport->port_id, lport->nport_id, v_xchg->ox_id, + v_xchg->rx_id, rport, rport->rport_index, rport->nport_id); + + /* Start to recovery */ + if (rport->nport_id > UNF_FC_FID_DOM_MGR) { + /* with Name server: R_Port is fabric --->>> + * L_Port error recovery + */ + unf_lport_error_recovery(lport); + } else { + /* R_Port is not fabric --->>> R_Port error recovery */ + unf_rport_error_recovery(rport); + } +} + +unsigned int unf_send_plogi(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + struct unf_plogi_payload_s *plogi_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + struct unf_frame_pkg_s pkg; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3324, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3325, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + + /* Get & Set new free exchange */ + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for PLOGI", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = ELS_PLOGI; /* PLOGI */ + + ox_id = xchg->ox_id; + + /* Set callback function */ + /* for rcvd plogi acc/rjt processer */ + xchg->pfn_callback = unf_plogi_callback; + /* for send plogi failed processer */ + xchg->pfn_ob_callback = unf_plogi_ob_callback; + + unf_fill_package(&pkg, xchg, v_rport); + + /* Fill PLOGI payload */ + plogi_pld = &fc_entry->plogi.payload; + memset(plogi_pld, 0, sizeof(struct unf_plogi_payload_s)); + unf_fill_plogi_pld(plogi_pld, v_lport); + + /* Start to Send PLOGI command */ + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Send PLOGI %s. Port(0x%x_0x%x_0x%llx)--->rport(0x%x_0x%llx) with OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_lport->nport_id, v_lport->port_name, + v_rport->nport_id, v_rport->port_name, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_logo_pld(struct unf_logo_payload_s *v_logo_pld, + struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x3326, UNF_TRUE, v_logo_pld, return); + UNF_CHECK_VALID(0x3327, UNF_TRUE, v_lport, return); + + v_logo_pld->cmnd = UNF_ELS_CMND_LOGO; + v_logo_pld->nport_id = (v_lport->nport_id); + v_logo_pld->high_port_name = + UNF_GET_NAME_HIGH_WORD(v_lport->port_name); + v_logo_pld->low_port_name = + UNF_GET_NAME_LOW_WORD(v_lport->port_name); + + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, + v_logo_pld, + sizeof(struct unf_logo_payload_s)); +} + +static void unf_logo_ob_callback(struct unf_xchg_s *v_xchg) +{ + struct unf_lport_s *lport; + struct unf_rport_s *rport; + struct unf_rport_s *old_rport; + struct unf_xchg_s *xchg; + unsigned int nport_id = 0; + unsigned int logo_retry = 0; + + UNF_CHECK_VALID(0x3675, UNF_TRUE, NULL, return); + xchg = v_xchg; + old_rport = xchg->rport; + logo_retry = old_rport->logo_retries; + + if (old_rport->nport_id != INVALID_VALUE32) + unf_rport_enter_closing(old_rport); + + lport = xchg->lport; + if (unf_is_lport_valid(lport) != RETURN_OK) + return; + + /* Get R_Port by exchange info: Init state */ + nport_id = xchg->did; + rport = unf_get_rport_by_nport_id(lport, nport_id); + rport = unf_get_safe_rport(lport, rport, UNF_RPORT_REUSE_INIT, + nport_id); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) cannot allocate RPort", + lport->port_id); + return; + } + + rport->logo_retries = logo_retry; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[info]LOGIN: Port(0x%x) received LOGO RSP timeout topo(0x%x) retries(%u)", + lport->port_id, lport->en_act_topo, rport->logo_retries); + + /* RCVD LOGO/PRLO & SEND LOGO: the same process */ + if (rport->logo_retries < UNF_MAX_RETRY_COUNT) { + /* <: retry (LOGIN or LOGO) if necessary */ + unf_process_rport_after_logo(lport, rport); + } else { + /* >=: Link down */ + unf_rport_immediate_linkdown(lport, rport); + } +} + +static void unf_logo_callback(void *v_lport, void *v_rport, void *v_xchg) +{ + /* RCVD LOGO ACC/RJT: retry(LOGIN/LOGO) or link down immediately */ + struct unf_lport_s *lport = (struct unf_lport_s *)v_lport; + struct unf_rport_s *rport = NULL; + struct unf_rport_s *old_rport = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_els_rjt_s *els_acc_rjt = NULL; + unsigned int cmnd = 0; + unsigned int nport_id = 0; + unsigned int logo_retry = 0; + + UNF_CHECK_VALID(0x3675, UNF_TRUE, v_xchg, return); + UNF_REFERNCE_VAR(v_rport); + + xchg = (struct unf_xchg_s *)v_xchg; + old_rport = xchg->rport; + + logo_retry = old_rport->logo_retries; + if (old_rport->nport_id != INVALID_VALUE32) + unf_rport_enter_closing(old_rport); + + if (unf_is_lport_valid(v_lport) != RETURN_OK) + return; + + if (!xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) + return; + + /* Get R_Port by exchange info: Init state */ + els_acc_rjt = + &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->els_rjt; + nport_id = xchg->did; + rport = unf_get_rport_by_nport_id(lport, nport_id); + rport = unf_get_safe_rport(lport, rport, + UNF_RPORT_REUSE_INIT, nport_id); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) cannot allocate RPort", + lport->port_id); + return; + } + + rport->logo_retries = logo_retry; + cmnd = be32_to_cpu(els_acc_rjt->cmnd); + UNF_REFERNCE_VAR(cmnd); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Port(0x%x) received LOGO RSP(0x%x), topo(0x%x) Port options(0x%x) RPort options(0x%x) retries(%d)", + lport->port_id, (cmnd & UNF_ELS_CMND_HIGH_MASK), + lport->en_act_topo, + lport->options, rport->options, rport->logo_retries); + + /* RCVD LOGO/PRLO & SEND LOGO: the same process */ + if (rport->logo_retries < UNF_MAX_RETRY_COUNT) + /* <: retry (LOGIN or LOGO) if necessary */ + unf_process_rport_after_logo(lport, rport); + else + /* >=: Link down */ + unf_rport_immediate_linkdown(lport, rport); +} + +unsigned int unf_send_logo(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + struct unf_logo_payload_s *logo_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3328, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for LOGO", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = ELS_LOGO; /* LOGO */ + + ox_id = xchg->ox_id; + + /* Set callback function */ + /* retry or link down immediately */ + xchg->pfn_callback = unf_logo_callback; + xchg->pfn_ob_callback = unf_logo_ob_callback; /* do nothing */ + + unf_fill_package(&pkg, xchg, v_rport); + + /* Fill LOGO entry(payload) */ + logo_pld = &fc_entry->logo.payload; + memset(logo_pld, 0, sizeof(struct unf_logo_payload_s)); + unf_fill_logo_pld(logo_pld, v_lport); + + /* Start to send LOGO command */ + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + v_rport->logo_retries++; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[info]LOGIN: LOGO send %s. Port(0x%x)--->rport(0x%x) OXID(0x%x) Retries(%d)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, + ox_id, v_rport->logo_retries); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +unsigned int unf_send_logo_by_did(struct unf_lport_s *v_lport, + unsigned int v_did) +{ + /* Has non R_Port */ + struct unf_logo_payload_s *logo_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3329, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_did, NULL, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for LOGO", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = ELS_LOGO; /* LOGO */ + + ox_id = xchg->ox_id; + + unf_fill_package(&pkg, xchg, NULL); + + /* Fill LOGO entry(payload) */ + logo_pld = &fc_entry->logo.payload; + memset(logo_pld, 0, sizeof(struct unf_logo_payload_s)); + unf_fill_logo_pld(logo_pld, v_lport); + + /* Start to send LOGO now */ + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: LOGO send %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_did, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void *unf_get_one_big_sfs_buf(struct unf_xchg_s *v_xchg) +{ + struct unf_big_sfs_s *big_sfs = NULL; + struct list_head *list_head = NULL; + struct unf_xchg_mgr_s *xchg_mgr = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3330, UNF_TRUE, v_xchg, return NULL); + xchg_mgr = v_xchg->xchg_mgr; + UNF_CHECK_VALID(0x3331, UNF_TRUE, xchg_mgr, return NULL); + + spin_lock_irqsave(&xchg_mgr->st_big_sfs_pool.big_sfs_pool_lock, flag); + if (!list_empty(&xchg_mgr->st_big_sfs_pool.list_free_pool)) { + /* from free to busy */ + list_head = (&xchg_mgr->st_big_sfs_pool.list_free_pool)->next; + list_del(list_head); + xchg_mgr->st_big_sfs_pool.free_count--; + list_add_tail(list_head, + &xchg_mgr->st_big_sfs_pool.list_busy_pool); + big_sfs = list_entry(list_head, struct unf_big_sfs_s, + entry_big_sfs); + } else { + spin_unlock_irqrestore( + &xchg_mgr->st_big_sfs_pool.big_sfs_pool_lock, + flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Allocate big sfs buf failed, count(0x%x) exchange(0x%p) command(0x%x)", + xchg_mgr->st_big_sfs_pool.free_count, + v_xchg, v_xchg->cmnd_code); + + return NULL; + } + spin_unlock_irqrestore(&xchg_mgr->st_big_sfs_pool.big_sfs_pool_lock, + flag); + + v_xchg->big_sfs_buf = big_sfs; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Allocate one address(0x%p) of big sfs buffer, remaining count(0x%x) exchange(0x%p) command(0x%x)", + big_sfs->vaddr, + xchg_mgr->st_big_sfs_pool.free_count, + v_xchg, + v_xchg->cmnd_code); + + return big_sfs->vaddr; +} + +static void unf_echo_callback(void *v_lport, void *v_rport, void *v_xchg) +{ + struct unf_lport_s *lport = (struct unf_lport_s *)v_lport; + struct unf_rport_s *rport = (struct unf_rport_s *)v_rport; + struct unf_xchg_s *xchg = NULL; + struct unf_echo_payload_s *echo_rsp_pld = NULL; + unsigned int cmnd = 0; + unsigned int mag_ver_local = 0; + unsigned int mag_ver_remote = 0; + + UNF_CHECK_VALID(0x3332, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3333, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3334, UNF_TRUE, v_xchg, return); + UNF_REFERNCE_VAR(lport); + UNF_REFERNCE_VAR(rport); + + xchg = (struct unf_xchg_s *)v_xchg; + if (!xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) + return; + + echo_rsp_pld = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo_acc.echo_pld; + UNF_CHECK_VALID(0x3335, UNF_TRUE, NULL != echo_rsp_pld, return); + + if (xchg->byte_orders & UNF_BIT_2) { + unf_big_end_to_cpu((unsigned char *)echo_rsp_pld, + sizeof(struct unf_echo_payload_s)); + cmnd = echo_rsp_pld->cmnd; + } else { + cmnd = echo_rsp_pld->cmnd; + } + + mag_ver_local = echo_rsp_pld->data[0]; + mag_ver_remote = echo_rsp_pld->data[1]; + + /* Print info */ + if ((cmnd & UNF_ELS_CMND_HIGH_MASK) == UNF_ELS_CMND_ACC) { + if ((mag_ver_local == ECHO_MG_VERSION_LOCAL) && + (mag_ver_remote == ECHO_MG_VERSION_REMOTE)) { + /* both side are 1822 */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "LPort(0x%x) send ECHO to RPort(0x%x), received ACC. local snd echo:(0x%x), remote rcv echo:(0x%x), remote snd echo acc:(0x%x), local rcv echo acc:(0x%x)", + lport->port_id, rport->nport_id, + xchg->private[PKG_PRIVATE_ECHO_CMD_SND_TIME], + xchg->private[PKG_PRIVATE_ECHO_CMD_RCV_TIME], + xchg->private[PKG_PRIVATE_ECHO_RSP_SND_TIME], + xchg->private[PKG_PRIVATE_ECHO_ACC_RCV_TIME]); + } else if ((mag_ver_local == ECHO_MG_VERSION_LOCAL) && + (mag_ver_remote != ECHO_MG_VERSION_REMOTE)) { + /* the peer don't supprt smartping, only local snd + * and rcv rsp time stamp + */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "LPort(0x%x) send ECHO to RPort(0x%x), received ACC. local snd echo:(0x%x), local rcv echo acc:(0x%x)", + lport->port_id, rport->nport_id, + xchg->private[PKG_PRIVATE_ECHO_CMD_SND_TIME], + xchg->private[PKG_PRIVATE_ECHO_ACC_RCV_TIME]); + } else if ((mag_ver_local != ECHO_MG_VERSION_LOCAL) && + (mag_ver_remote != ECHO_MG_VERSION_REMOTE)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "LPort(0x%x) send ECHO to RPort(0x%x), received ACC. local and remote is not IN300", + lport->port_id, rport->nport_id); + } + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) send ECHO to RPort(0x%x) and received RJT", + lport->port_id, rport->nport_id); + } + + xchg->echo_info.echo_result = UNF_ELS_ECHO_RESULT_OK; + xchg->echo_info.response_time = jiffies - + xchg->echo_info.response_time; + + /* wake up semaphore */ + up(&xchg->echo_info.echo_sync_sema); +} + +static void unf_echo_ob_callback(struct unf_xchg_s *v_xchg) +{ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + + UNF_CHECK_VALID(0x3336, UNF_TRUE, v_xchg, return); + lport = v_xchg->lport; + UNF_CHECK_VALID(0x3337, UNF_TRUE, lport, return); + rport = v_xchg->rport; + UNF_CHECK_VALID(0x3338, UNF_TRUE, rport, return); + + UNF_REFERNCE_VAR(lport); + UNF_REFERNCE_VAR(rport); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) send ECHO to RPort(0x%x) but timeout", + lport->port_id, rport->nport_id); + + v_xchg->echo_info.echo_result = UNF_ELS_ECHO_RESULT_FAIL; + + /* wake up semaphore */ + up(&v_xchg->echo_info.echo_sync_sema); +} + +unsigned int unf_send_echo(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int *v_time) +{ + struct unf_echo_payload_s *echo_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned int ret = UNF_RETURN_ERROR; + unsigned long delay = 0; + unsigned short ox_id = 0; + dma_addr_t phy_echo_addr; + + UNF_CHECK_VALID(0x3340, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3341, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3342, UNF_TRUE, v_time, return UNF_RETURN_ERROR); + + delay = 2 * (unsigned long)(v_lport->ra_tov); + + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for ECHO", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = ELS_ECHO; /* ECHO */ + + xchg->fcp_sfs_union.sfs_entry.cur_offset = UNF_ECHO_REQ_SIZE; + + ox_id = xchg->ox_id; + + /* Set callback function */ + xchg->pfn_callback = unf_echo_callback; /* wake up semaphore */ + xchg->pfn_ob_callback = unf_echo_ob_callback; /* wake up semaphore */ + + unf_fill_package(&pkg, xchg, v_rport); + + /* Fill ECHO entry(payload) */ + echo_pld = (struct unf_echo_payload_s *)unf_get_one_big_sfs_buf(xchg); + if (!echo_pld) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) can't allocate buffer for ECHO", + v_lport->port_id); + + unf_cm_free_xchg(v_lport, xchg); + return UNF_RETURN_ERROR; + } + + fc_entry->echo.echo_pld = echo_pld; + phy_echo_addr = pci_map_single(v_lport->low_level_func.dev, echo_pld, + UNF_ECHO_PAYLOAD_LEN, DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error( + v_lport->low_level_func.dev, phy_echo_addr)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) pci map err", + v_lport->port_id); + unf_cm_free_xchg(v_lport, xchg); + return UNF_RETURN_ERROR; + } + fc_entry->echo.phy_echo_addr = phy_echo_addr; + memset(echo_pld, 0, sizeof(struct unf_echo_payload_s)); + echo_pld->cmnd = (UNF_ELS_CMND_ECHO); + echo_pld->data[0] = ECHO_MG_VERSION_LOCAL; + + ret = unf_xchg_ref_inc(xchg, SEND_ELS); + UNF_CHECK_VALID(0x3343, UNF_TRUE, (ret == RETURN_OK), + return UNF_RETURN_ERROR); + + /* Start to send ECHO command */ + xchg->echo_info.response_time = jiffies; + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) { + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + } else { + if (down_timeout(&xchg->echo_info.echo_sync_sema, + (long) + msecs_to_jiffies((unsigned int)delay))) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]ECHO send %s. Port(0x%x)--->rport(0x%x) but response timeout with OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id); + + xchg->echo_info.echo_result = + UNF_ELS_ECHO_RESULT_FAIL; + } + + if (xchg->echo_info.echo_result == + UNF_ELS_ECHO_RESULT_FAIL) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "Echo send fail or timeout"); + + ret = UNF_RETURN_ERROR; + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "echo acc rsp,echo_cmd_snd(0x%xus)-->echo_cmd_rcv(0x%xus)-->echo_acc_snd(0x%xus)-->echo_acc_rcv(0x%xus).", + xchg->private[PKG_PRIVATE_ECHO_CMD_SND_TIME], + xchg->private[PKG_PRIVATE_ECHO_CMD_RCV_TIME], + xchg->private[PKG_PRIVATE_ECHO_RSP_SND_TIME], + xchg->private[PKG_PRIVATE_ECHO_ACC_RCV_TIME]); + + *v_time = ( + xchg->private[PKG_PRIVATE_ECHO_ACC_RCV_TIME] - + xchg->private[PKG_PRIVATE_ECHO_CMD_SND_TIME]) - + (xchg->private[PKG_PRIVATE_ECHO_RSP_SND_TIME] - + xchg->private[PKG_PRIVATE_ECHO_CMD_RCV_TIME]); + } + } + + pci_unmap_single(v_lport->low_level_func.dev, phy_echo_addr, + UNF_ECHO_PAYLOAD_LEN, DMA_BIDIRECTIONAL); + fc_entry->echo.phy_echo_addr = 0; + unf_xchg_ref_dec(xchg, SEND_ELS); + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_prli_pld(struct unf_pril_payload_s *v_prli_pld, + struct unf_lport_s *v_lport) +{ + unsigned int pld_len = 0; + + UNF_CHECK_VALID(0x3344, UNF_TRUE, v_prli_pld, return); + UNF_CHECK_VALID(0x3345, UNF_TRUE, v_lport, return); + + pld_len = sizeof(struct unf_pril_payload_s) - UNF_PRLI_SIRT_EXTRA_SIZE; + v_prli_pld->cmnd = (UNF_ELS_CMND_PRLI | + ((unsigned int)UNF_FC4_FRAME_PAGE_SIZE << + UNF_FC4_FRAME_PAGE_SIZE_SHIFT) | + ((unsigned int)pld_len)); + + v_prli_pld->parms[0] = (UNF_FC4_FRAME_PARM_0_FCP | + UNF_FC4_FRAME_PARM_0_I_PAIR); + v_prli_pld->parms[1] = UNF_NOT_MEANINGFUL; + v_prli_pld->parms[2] = UNF_NOT_MEANINGFUL; + + /* About Read Xfer_rdy disable */ + v_prli_pld->parms[3] = (UNF_FC4_FRAME_PARM_3_R_XFER_DIS | + v_lport->options); + + /* About FCP confirm */ + if (v_lport->low_level_func.lport_cfg_items.fcp_conf == UNF_TRUE) + v_prli_pld->parms[3] |= UNF_FC4_FRAME_PARM_3_CONF_ALLOW; + + /* About Tape support */ + if (v_lport->low_level_func.lport_cfg_items.tape_support) { + v_prli_pld->parms[3] |= + (UNF_FC4_FRAME_PARM_3_REC_SUPPORT | + UNF_FC4_FRAME_PARM_3_RETRY_SUPPORT | + UNF_FC4_FRAME_PARM_3_TASK_RETRY_ID_SUPPORT | + UNF_FC4_FRAME_PARM_3_CONF_ALLOW); + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x)'s PRLI payload: options(0x%x) parameter-3(0x%x)", + v_lport->port_id, v_lport->options, v_prli_pld->parms[3]); + + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, v_prli_pld, + sizeof(struct unf_pril_payload_s)); +} + +static void unf_prli_callback(void *v_lport, void *v_rport, void *v_xchg) +{ + /* RCVD PRLI RSP: ACC or RJT --->>> SCSI Link Up */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_pril_payload_s *prli_acc_pld = NULL; + unsigned long flag = 0; + unsigned int cmnd = 0; + unsigned int options = 0; + unsigned int fcp_conf = 0; + unsigned int rec_support = 0; + unsigned int task_retry_support = 0; + unsigned int retry_support = 0; + unsigned int tape_support = 0; + enum unf_rport_login_state_e rport_state = UNF_RPORT_ST_INIT; + + UNF_CHECK_VALID(0x3679, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3680, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3681, UNF_TRUE, v_xchg, return); + lport = (struct unf_lport_s *)v_lport; + rport = (struct unf_rport_s *)v_rport; + xchg = (struct unf_xchg_s *)v_xchg; + + if (!xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange(%p) entry is NULL", + lport->port_id, xchg); + return; + } + + /* Get PRLI ACC payload */ + prli_acc_pld = + &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->prli_acc.payload; + if (xchg->byte_orders & UNF_BIT_2) { + /* Change to little End, About INI/TGT mode & confirm info */ + options = be32_to_cpu(prli_acc_pld->parms[3]) & + (UNF_FC4_FRAME_PARM_3_TGT | + UNF_FC4_FRAME_PARM_3_INI); + + cmnd = be32_to_cpu(prli_acc_pld->cmnd); + fcp_conf = be32_to_cpu(prli_acc_pld->parms[3]) & + UNF_FC4_FRAME_PARM_3_CONF_ALLOW; + rec_support = be32_to_cpu(prli_acc_pld->parms[3]) & + UNF_FC4_FRAME_PARM_3_REC_SUPPORT; + task_retry_support = be32_to_cpu(prli_acc_pld->parms[3]) & + UNF_FC4_FRAME_PARM_3_TASK_RETRY_ID_SUPPORT; + retry_support = be32_to_cpu(prli_acc_pld->parms[3]) & + UNF_FC4_FRAME_PARM_3_RETRY_SUPPORT; + + } else { + options = (prli_acc_pld->parms[3]) & + (UNF_FC4_FRAME_PARM_3_TGT | + UNF_FC4_FRAME_PARM_3_INI); + + cmnd = (prli_acc_pld->cmnd); + fcp_conf = prli_acc_pld->parms[3] & + UNF_FC4_FRAME_PARM_3_CONF_ALLOW; + rec_support = prli_acc_pld->parms[3] & + UNF_FC4_FRAME_PARM_3_REC_SUPPORT; + task_retry_support = prli_acc_pld->parms[3] & + UNF_FC4_FRAME_PARM_3_TASK_RETRY_ID_SUPPORT; + retry_support = prli_acc_pld->parms[3] & + UNF_FC4_FRAME_PARM_3_RETRY_SUPPORT; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: PRLI RSP: RPort(0x%x) parameter-3(0x%x) option(0x%x) cmd(0x%x) rec support:%u", + rport->nport_id, prli_acc_pld->parms[3], options, + cmnd, rec_support); + + /* PRLI ACC: R_Port READY & Report R_Port Link Up */ + if ((cmnd & UNF_ELS_CMND_HIGH_MASK) == UNF_ELS_CMND_ACC) { + /* Update R_Port options(INI/TGT/BOTH) */ + rport->options = options; + + unf_update_port_feature(rport->port_name, rport->options); + + /* NOTE: R_Port only with INI mode, send LOGO */ + if (rport->options == UNF_PORT_MODE_INI) { + /* Update R_Port state: LOGO */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + unf_rport_state_ma(rport, UNF_EVENT_RPORT_LOGO); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* NOTE: Start to Send LOGO */ + unf_rport_enter_logo(lport, rport); + return; + } + + /* About confirm */ + if (fcp_conf && + (lport->low_level_func.lport_cfg_items.fcp_conf != + UNF_FALSE)) { + rport->fcp_conf_needed = UNF_TRUE; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x_0x%x) FCP config is need for RPort(0x%x)", + lport->port_id, lport->nport_id, + rport->nport_id); + } + tape_support = (rec_support && task_retry_support && retry_support); + if (tape_support && + (lport->low_level_func.lport_cfg_items.tape_support != UNF_FALSE)) { + rport->tape_support_needed = UNF_TRUE; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[info]Port(0x%x_0x%x) Rec is enabled for RPort(0x%x)", + lport->port_id, lport->nport_id, rport->nport_id); + } + /* Update R_Port state: READY */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + unf_rport_state_ma(rport, UNF_EVENT_RPORT_READY); + rport_state = rport->rp_state; + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Report R_Port online (Link Up) event to SCSI */ + if (rport_state == UNF_RPORT_ST_READY) { + rport->logo_retries = 0; + unf_update_lport_state_by_linkup_event( + lport, rport, rport->options); + } + } else { + /* PRLI RJT: Do R_Port error recovery */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]LOGIN: Port(0x%x)<---LS_RJT(DID:0x%x SID:0x%x) for PRLI. RPort(0x%p) OX_ID(0x%x)", + lport->port_id, lport->nport_id, + rport->nport_id, rport, xchg->ox_id); + + unf_rport_error_recovery(rport); + } +} + +static void unf_prli_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Do R_Port recovery */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3676, UNF_TRUE, v_xchg, return); + + UNF_REFERNCE_VAR(lport); + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + lport = v_xchg->lport; + rport = v_xchg->rport; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + UNF_CHECK_VALID(0x3677, UNF_TRUE, lport, return); + UNF_CHECK_VALID(0x3678, UNF_TRUE, rport, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) RPort(0x%x) send PRLI failed and do recovery", + lport->port_id, lport->nport_id, rport->nport_id); + + /* Start to do R_Port error recovery */ + unf_rport_error_recovery(rport); + + UNF_REFERNCE_VAR(lport); +} + +unsigned int unf_send_prli(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + struct unf_pril_payload_s *prli_pal = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned short ox_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3346, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3347, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + + /* Get & Set new free exchange */ + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for PRLI", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = ELS_PRLI; // PRLI + + ox_id = xchg->ox_id; + + /* Set callback function */ + /* for rcvd prli acc/rjt processer */ + xchg->pfn_callback = unf_prli_callback; + /* for send prli failed processer */ + xchg->pfn_ob_callback = unf_prli_ob_callback; + + unf_fill_package(&pkg, xchg, v_rport); + + /* Fill PRLI payload */ + prli_pal = &fc_entry->prli.payload; + memset(prli_pal, 0, sizeof(struct unf_pril_payload_s)); + unf_fill_prli_pld(prli_pal, v_lport); + + /* Start to Send RPLI ELS CMND */ + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: PRLI send %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_prlo_pld(struct unf_pril_payload_s *v_prlo_pld, + struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x3348, UNF_TRUE, v_prlo_pld, return); + UNF_CHECK_VALID(0x3349, UNF_TRUE, v_lport, return); + + v_prlo_pld->cmnd = (UNF_ELS_CMND_PRLO); + v_prlo_pld->parms[0] = (UNF_FC4_FRAME_PARM_0_FCP); + v_prlo_pld->parms[1] = UNF_NOT_MEANINGFUL; + v_prlo_pld->parms[2] = UNF_NOT_MEANINGFUL; + v_prlo_pld->parms[3] = UNF_NO_SERVICE_PARAMS; + + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, v_prlo_pld, + sizeof(struct unf_pril_payload_s)); +} + +unsigned int unf_send_prlo(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + struct unf_pril_payload_s *prlo_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + struct unf_frame_pkg_s pkg; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3350, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3351, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + + /* Get free exchange for PRLO */ + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for PRLO", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = ELS_PRLO; /* PRLO */ + + ox_id = xchg->ox_id; + + unf_fill_package(&pkg, xchg, v_rport); + + /* Fill PRLO entry(payload) */ + prlo_pld = &fc_entry->prlo.payload; + memset(prlo_pld, 0, sizeof(struct unf_pril_payload_s)); + unf_fill_prlo_pld(prlo_pld, v_lport); + + /* Start to send PRLO command */ + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: PRLO send %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_rrq_pld(struct unf_rrq_s *v_rrq_pld, + struct unf_xchg_s *v_xchg) +{ + UNF_CHECK_VALID(0x3360, UNF_TRUE, v_rrq_pld, return); + UNF_CHECK_VALID(0x3361, UNF_TRUE, v_xchg, return); + + v_rrq_pld->cmnd = UNF_ELS_CMND_RRQ; + v_rrq_pld->sid = v_xchg->sid; + v_rrq_pld->oxid_rxid = ((unsigned int)v_xchg->ox_id << 16 | + v_xchg->rx_id); +} + +static void unf_rrq_callback(void *v_lport, void *v_rport, void *v_xchg) +{ + /* Release I/O */ + struct unf_lport_s *lport = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_els_acc_s *els_acc = NULL; + unsigned int cmnd = 0; + struct unf_xchg_s *io_xchg = NULL; + + UNF_CHECK_VALID(0x3696, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3697, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3698, UNF_TRUE, v_xchg, return); + UNF_REFERNCE_VAR(v_rport); + + lport = (struct unf_lport_s *)v_lport; + UNF_REFERNCE_VAR(lport); + xchg = (struct unf_xchg_s *)v_xchg; + + if (!xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) exchange(0x%p) SfsEntryPtr is NULL", + lport->port_id, xchg); + return; + } + + io_xchg = (struct unf_xchg_s *)xchg->io_xchg; + if (!io_xchg) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) IO exchange is NULL. RRQ cb sfs xchg(0x%p) tag(0x%x)", + lport->port_id, xchg, xchg->hot_pool_tag); + return; + } + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]Port(0x%x) release IO exch(0x%p) tag(0x%x). RRQ cb sfs xchg(0x%p) tag(0x%x)", + lport->port_id, xchg->io_xchg, io_xchg->hot_pool_tag, + xchg, xchg->hot_pool_tag); + + /* NOTE: release I/O exchange resource */ + unf_xchg_ref_dec(io_xchg, XCHG_ALLOC); + + els_acc = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->els_acc; + if (xchg->byte_orders & UNF_BIT_2) + cmnd = be32_to_cpu(els_acc->cmnd); + else + cmnd = (els_acc->cmnd); +} + +static void unf_rrq_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Release I/O */ + struct unf_xchg_s *xchg = NULL; + struct unf_xchg_s *io_xchg = NULL; + + xchg = (struct unf_xchg_s *)v_xchg; + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Exchange can't be NULL"); + return; + } + + io_xchg = (struct unf_xchg_s *)xchg->io_xchg; + if (!io_xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]IO exchange can't be NULL with Sfs exch(0x%p) tag(0x%x)", + xchg, xchg->hot_pool_tag); + return; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[info]send RRQ failed: SFS exch(0x%p) tag(0x%x) exch(0x%p) tag(0x%x) OXID_RXID(0x%x_0x%x) SID_DID(0x%x_0x%x)", + xchg, xchg->hot_pool_tag, io_xchg, io_xchg->hot_pool_tag, + io_xchg->ox_id, io_xchg->rx_id, io_xchg->sid, + io_xchg->did); + + /* NOTE: Free I/O exchange resource */ + unf_xchg_ref_dec(io_xchg, XCHG_ALLOC); +} + +unsigned int unf_send_rrq(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + /* after ABTS Done */ + struct unf_rrq_s *rrq_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3362, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3363, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3364, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + if (v_xchg->rport_bind_jifs != v_rport->rport_alloc_jifs || + (v_rport->nport_id == INVALID_VALUE32)) + return ret; + /* Get & Set New free Exchange for RRQ */ + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for RRQ", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = ELS_RRQ; // RRQ + + /* Set callback function */ + xchg->pfn_callback = unf_rrq_callback; // release I/O exchange context + /* release I/O exchange context */ + xchg->pfn_ob_callback = unf_rrq_ob_callback; + xchg->io_xchg = v_xchg; // pointer to IO XCHG + + ox_id = xchg->ox_id; + + unf_fill_package(&pkg, xchg, v_rport); + + /* Fill RRQ entry(payload) */ + rrq_pld = &fc_entry->rrq; + memset(rrq_pld, 0, sizeof(struct unf_rrq_s)); + unf_fill_rrq_pld(rrq_pld, v_xchg); + + /* Start to send RRQ command to remote port */ + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]RRQ send %s. Port(0x%x)--->rport(0x%x) free old exchange(0x%x) with OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, + v_xchg->hot_pool_tag, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_gff_id_pld(struct unf_gffid_s *v_gff_id, + unsigned int v_nport_id) +{ + UNF_CHECK_VALID(0x3365, UNF_TRUE, v_gff_id, return); + + v_gff_id->ctiu_pream.rev_inid = (UNF_REV_NPORTID_INIT); + v_gff_id->ctiu_pream.gstype_gssub_options = (UNF_FSTYPE_OPT_INIT); + v_gff_id->ctiu_pream.cmnd_rsp_size = (UNF_FSTYPE_GFF_ID); + v_gff_id->ctiu_pream.frag_reason_exp_vend = UNF_FRAG_REASON_VENDOR; + v_gff_id->nport_id = v_nport_id; +} + +static void unf_gff_id_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Send PLOGI */ + struct unf_lport_s *lport = NULL; + struct unf_lport_s *root_lport = NULL; + struct unf_rport_s *rport = NULL; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int nport_id = 0; + + UNF_CHECK_VALID(0x3611, UNF_TRUE, v_xchg, return); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + lport = v_xchg->lport; + nport_id = v_xchg->disc_port_id; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + UNF_CHECK_VALID(0x3612, UNF_TRUE, NULL != lport, return); + + root_lport = (struct unf_lport_s *)lport->root_lport; + atomic_inc(&root_lport->disc.disc_thread_info.disc_contrl_size); + wake_up_process(root_lport->disc.disc_thread_info.data_thread); + + /* Get (safe) R_Port */ + rport = unf_get_rport_by_nport_id(lport, nport_id); + rport = unf_get_safe_rport(lport, rport, UNF_RPORT_REUSE_ONLY, + nport_id); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) can't allocate new RPort(0x%x)", + lport->port_id, nport_id); + return; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) send GFF_ID(0x%x_0x%x) to RPort(0x%x_0x%x) abnormal", + lport->port_id, lport->nport_id, v_xchg->ox_id, + v_xchg->rx_id, rport->rport_index, rport->nport_id); + + /* Update R_Port state: PLOGI_WAIT */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = nport_id; + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* NOTE: Start to send PLOGI */ + ret = unf_send_plogi(lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) send PLOGI failed, enter recovry", + lport->port_id); + + /* Do R_Port recovery */ + unf_rport_error_recovery(rport); + } +} + +static void unf_check_rport_need_delay_plogi(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int v_port_feature) +{ + /* + * Called by: + * 1. Private loop + * 2. RCVD GFF_ID ACC + */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = v_rport; + unsigned long flag = 0; + unsigned int nport_id = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3613, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3614, UNF_TRUE, v_rport, return); + nport_id = rport->nport_id; + + /* + * Send GFF_ID means L_Port has INI attribute + ** + * When to send PLOGI: + * 1. R_Port has TGT mode (COM or TGT), send PLOGI immediately + * 2. R_Port only with INI, send LOGO immediately + * 3. R_Port with unknown attribute, delay to send PLOGI + */ + if ((v_port_feature & UNF_PORT_MODE_TGT) || + (lport->enhanced_features & + UNF_LPORT_ENHANCED_FEATURE_ENHANCED_GFF)) { + /* R_Port has TGT mode: send PLOGI immediately */ + rport = unf_get_safe_rport(v_lport, rport, + UNF_RPORT_REUSE_ONLY, nport_id); + UNF_CHECK_VALID(0x3615, UNF_TRUE, rport, return); + + /* Update R_Port state: PLOGI_WAIT */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = nport_id; + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Start to send PLOGI */ + ret = unf_send_plogi(lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) send PLOGI to RPort(0x%x) failed", + lport->port_id, lport->nport_id, nport_id); + + unf_rport_error_recovery(rport); + } + } else if (v_port_feature == UNF_PORT_MODE_INI) { + /* R_Port only with INI mode: can't send PLOGI --->>> + * LOGO/nothing + */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + if (rport->rp_state == UNF_RPORT_ST_INIT) { + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) send LOGO to RPort(0x%x) which only with INI mode", + lport->port_id, lport->nport_id, nport_id); + + /* Enter Closing state */ + unf_rport_enter_logo(lport, rport); + } else { + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + } + } else { + /* Unknown R_Port attribute: Delay to send PLOGI */ + rport = unf_get_safe_rport(v_lport, rport, + UNF_RPORT_REUSE_ONLY, + nport_id); + UNF_CHECK_VALID(0x3616, UNF_TRUE, rport, return); + + /* Update R_Port state: PLOGI_WAIT */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = nport_id; + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + unf_rport_delay_login(rport); + } +} + +static void unf_rcv_gff_id_acc(struct unf_lport_s *v_lport, + struct unf_gffid_rsp_s *v_gff_id_rsp_pld, + unsigned int v_nport_id) +{ + /* Delay to LOGIN */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = NULL; + struct unf_gffid_rsp_s *gff_id_rsp_pld = v_gff_id_rsp_pld; + unsigned int fc4feature = 0; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3617, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3618, UNF_TRUE, v_gff_id_rsp_pld, return); + + fc4feature = gff_id_rsp_pld->fc_4_feature[1]; + if ((UNF_GFF_ACC_MASK & fc4feature) == 0) + fc4feature = be32_to_cpu(gff_id_rsp_pld->fc_4_feature[1]); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Port(0x%x_0x%x) RPort(0x%x) received GFF_ID ACC. FC4 feature is 0x%x(1:TGT,2:INI,3:COM)", + lport->port_id, lport->nport_id, v_nport_id, fc4feature); + + /* Check (& Get new) R_Port */ + rport = unf_get_rport_by_nport_id(lport, v_nport_id); + if (rport) + rport = unf_find_rport(lport, v_nport_id, rport->port_name); + + if ((rport) || + (UNF_GET_PORT_OPTIONS(fc4feature) != UNF_PORT_MODE_INI)) { + rport = unf_get_safe_rport(lport, rport, + UNF_RPORT_REUSE_ONLY, + v_nport_id); + UNF_CHECK_VALID(0x3619, UNF_TRUE, NULL != rport, return); + } else { + return; + } + + if ((fc4feature & UNF_GFF_ACC_MASK) != 0) { + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->options = UNF_GET_PORT_OPTIONS(fc4feature); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + } else if (rport->port_name != INVALID_WWPN) { + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->options = unf_get_port_feature(rport->port_name); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + } + + /* NOTE: Send PLOGI if necessary */ + unf_check_rport_need_delay_plogi(lport, rport, rport->options); +} + +static void unf_rcv_gff_id_rjt(struct unf_lport_s *v_lport, + struct unf_gffid_rsp_s *v_gff_id_rsp_pld, + unsigned int v_nport_id) +{ + /* Delay LOGIN or LOGO */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = NULL; + struct unf_gffid_rsp_s *gff_id_rsp_pld = v_gff_id_rsp_pld; + unsigned int rjt_reason = 0; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3620, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3621, UNF_TRUE, v_gff_id_rsp_pld, return); + + /* Check (& Get new) R_Port */ + rport = unf_get_rport_by_nport_id(lport, v_nport_id); + if (rport) + rport = unf_find_rport(lport, v_nport_id, rport->port_name); + + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) get RPort by N_Port_ID(0x%x) failed and alloc new", + lport->port_id, v_nport_id); + + rport = unf_rport_get_free_and_init(lport, UNF_PORT_TYPE_FC, + v_nport_id); + UNF_CHECK_VALID(0x3622, UNF_TRUE, NULL != rport, return); + + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = v_nport_id; + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + } + + rjt_reason = gff_id_rsp_pld->ctiu_pream.frag_reason_exp_vend; + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) send GFF_ID for RPort(0x%x) but was rejected. Reason code(0x%x)", + lport->port_id, v_nport_id, rjt_reason); + + if (!UNF_GNN_GFF_ID_RJT_REASON(rjt_reason)) { + rport = unf_get_safe_rport(v_lport, rport, + UNF_RPORT_REUSE_ONLY, + v_nport_id); + UNF_CHECK_VALID(0x3623, UNF_TRUE, NULL != rport, return); + + /* Update R_Port state: PLOGI_WAIT */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = v_nport_id; + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Delay to send PLOGI */ + unf_rport_delay_login(rport); + } else { + spin_lock_irqsave(&rport->rport_state_lock, flag); + if (rport->rp_state == UNF_RPORT_ST_INIT) { + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Enter closing state */ + unf_rport_enter_logo(lport, rport); + } else { + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + } + } +} + +static void unf_gff_id_callback(void *v_lport, void *v_sns_port, void *v_xchg) +{ + struct unf_lport_s *lport = (struct unf_lport_s *)v_lport; + struct unf_lport_s *root_lport = NULL; + struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_xchg; + struct unf_gffid_rsp_s *gff_id_rsp_pld = NULL; + unsigned int cmnd_rsp_size = 0; + unsigned int nport_id = 0; + + UNF_CHECK_VALID(0x3626, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3627, UNF_TRUE, v_sns_port, return); + UNF_CHECK_VALID(0x3628, UNF_TRUE, v_xchg, return); + + UNF_REFERNCE_VAR(v_sns_port); + nport_id = xchg->disc_port_id; + + gff_id_rsp_pld = + &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->gff_id_rsp; + cmnd_rsp_size = (gff_id_rsp_pld->ctiu_pream.cmnd_rsp_size); + + root_lport = (struct unf_lport_s *)lport->root_lport; + atomic_inc(&root_lport->disc.disc_thread_info.disc_contrl_size); + wake_up_process(root_lport->disc.disc_thread_info.data_thread); + + if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_ACCEPT) { + /* Case for GFF_ID ACC: (Delay)PLOGI */ + unf_rcv_gff_id_acc(lport, gff_id_rsp_pld, nport_id); + } else if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_REJECT) { + /* Case for GFF_ID RJT: Delay PLOGI or LOGO directly */ + unf_rcv_gff_id_rjt(lport, gff_id_rsp_pld, nport_id); + } else { + /* Send PLOGI */ + unf_rcv_gff_id_rsp_unknown(lport, nport_id); + } +} + +unsigned int unf_send_gff_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_sns_port, + unsigned int v_nport_id) +{ + struct unf_gffid_s *gff_id = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + struct unf_frame_pkg_s pkg; + struct unf_lport_s *root_lport = NULL; + + UNF_CHECK_VALID(0x3367, UNF_TRUE, v_sns_port, return UNF_RETURN_ERROR); + + if (unf_is_lport_valid(v_lport) != RETURN_OK) + /* Lport is invalid, no retry or handle required, return ok */ + return RETURN_OK; + + root_lport = (struct unf_lport_s *)v_lport->root_lport; + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_sns_port->nport_id, + v_sns_port, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for GFF_ID", + v_lport->port_id); + + return unf_get_and_post_disc_event(v_lport, v_sns_port, + v_nport_id, + UNF_DISC_GET_FEATURE); + } + + xchg->cmnd_code = NS_GFF_ID; /* GFF_ID */ + + xchg->disc_port_id = v_nport_id; + + /* Set callback function */ + xchg->pfn_ob_callback = unf_gff_id_ob_callback; /* send PLOGI */ + xchg->pfn_callback = unf_gff_id_callback; /* send PLOGI or LOGO */ + + ox_id = xchg->ox_id; + + unf_fill_package(&pkg, xchg, v_sns_port); + + /* Fill GFF_ID payload(entry) */ + gff_id = &fc_entry->gff_id; /* GFF_ID */ + memset(gff_id, 0, sizeof(struct unf_gffid_s)); + unf_fill_gff_id_pld(gff_id, v_nport_id); + + /* Send GFF_ID GS command now */ + ret = unf_gs_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + else + atomic_dec( + &root_lport->disc.disc_thread_info.disc_contrl_size); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: GFF_ID send %s. Port(0x%x)--->rport(0x%x). Inquire RPort(0x%x) OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_sns_port->nport_id, + v_nport_id, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_gnn_id_pld(struct unf_gnnid_s *v_gnn_id_pld, + unsigned int v_nport_id) +{ + /* Inquiry R_Port node name from SW */ + UNF_CHECK_VALID(0x3368, UNF_TRUE, v_gnn_id_pld, return); + + v_gnn_id_pld->ctiu_pream.rev_inid = UNF_REV_NPORTID_INIT; + v_gnn_id_pld->ctiu_pream.gstype_gssub_options = UNF_FSTYPE_OPT_INIT; + v_gnn_id_pld->ctiu_pream.cmnd_rsp_size = UNF_FSTYPE_GNN_ID; + v_gnn_id_pld->ctiu_pream.frag_reason_exp_vend = UNF_FRAG_REASON_VENDOR; + + v_gnn_id_pld->nport_id = v_nport_id; +} + +/* + * Function Name : unf_gnn_id_ob_callback + * Function Description: Callback for sending GNN_ID abnormal + * Input Parameters : struct unf_xchg_s *v_xchg + * Output Parameters : N/A + * Return Type : void + */ +static void unf_gnn_id_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Send GFF_ID */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *sns_port = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int nport_id = 0; + struct unf_lport_s *root_lport = NULL; + + UNF_CHECK_VALID(0x3597, UNF_TRUE, v_xchg, return); + lport = v_xchg->lport; + UNF_CHECK_VALID(0x3598, UNF_TRUE, lport, return); + sns_port = v_xchg->rport; + UNF_CHECK_VALID(0x3599, UNF_TRUE, sns_port, return); + nport_id = v_xchg->disc_port_id; + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) send GNN_ID failed to inquire RPort(0x%x)", + lport->port_id, nport_id); + + root_lport = (struct unf_lport_s *)lport->root_lport; + atomic_inc(&root_lport->disc.disc_thread_info.disc_contrl_size); + wake_up_process(root_lport->disc.disc_thread_info.data_thread); + + /* NOTE: continue next stage */ + ret = unf_get_and_post_disc_event(lport, sns_port, nport_id, + UNF_DISC_GET_FEATURE); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", + lport->port_id, UNF_DISC_GET_FEATURE, nport_id); + + unf_rcv_gff_id_rsp_unknown(lport, nport_id); // send PLOGI + } +} + +static void unf_rcv_gnn_id_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_sns_port, + struct unf_gnnid_rsp_s *v_gnn_id_rsp_pld, + unsigned int v_nport_id) +{ + /* Send GFF_ID or Link down immediately */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *sns_port = v_sns_port; + struct unf_gnnid_rsp_s *gnn_id_rsp_pld = v_gnn_id_rsp_pld; + struct unf_rport_s *rport = NULL; + unsigned long long node_name = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3600, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3601, UNF_TRUE, v_sns_port, return); + UNF_CHECK_VALID(0x3602, UNF_TRUE, v_gnn_id_rsp_pld, return); + + node_name = ((unsigned long long)(gnn_id_rsp_pld->node_name[0]) << + 32) | + ((unsigned long long)(gnn_id_rsp_pld->node_name[1])); + + if (node_name == lport->node_name) { + /* R_Port & L_Port with same Node Name */ + rport = unf_get_rport_by_nport_id(lport, v_nport_id); + if (rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_KEVENT, + "[info]Port(0x%x) has the same node name(0x%llx) with RPort(0x%x), linkdown it", + lport->port_id, node_name, v_nport_id); + + /* Destroy immediately */ + unf_rport_immediate_linkdown(lport, rport); + } + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Port(0x%x) got RPort(0x%x) with node name(0x%llx) by GNN_ID", + lport->port_id, v_nport_id, node_name); + + /* Start to Send GFF_ID */ + ret = unf_get_and_post_disc_event(lport, sns_port, v_nport_id, + UNF_DISC_GET_FEATURE); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, + UNF_ERR, + "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", + lport->port_id, UNF_DISC_GET_FEATURE, + v_nport_id); + + unf_rcv_gff_id_rsp_unknown(lport, v_nport_id); + } + } +} + +static void unf_rcv_gnn_id_rjt(struct unf_lport_s *v_lport, + struct unf_rport_s *v_sns_port, + struct unf_gnnid_rsp_s *v_gnn_id_rsp_pld, + unsigned int v_nport_id) +{ + /* Send GFF_ID */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *sns_port = v_sns_port; + struct unf_gnnid_rsp_s *gnn_id_rsp_pld = v_gnn_id_rsp_pld; + unsigned int rjt_reason = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3603, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3604, UNF_TRUE, v_sns_port, return); + UNF_CHECK_VALID(0x3605, UNF_TRUE, v_gnn_id_rsp_pld, return); + + rjt_reason = (gnn_id_rsp_pld->ctiu_pream.frag_reason_exp_vend); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) GNN_ID was rejected with reason code(0x%x)", + lport->port_id, lport->nport_id, rjt_reason); + + if (!UNF_GNN_GFF_ID_RJT_REASON(rjt_reason)) { + /* Node existence: Continue next stage */ + ret = unf_get_and_post_disc_event(lport, sns_port, v_nport_id, + UNF_DISC_GET_FEATURE); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, + UNF_ERR, + "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", + lport->port_id, UNF_DISC_GET_FEATURE, + v_nport_id); + + unf_rcv_gff_id_rsp_unknown(lport, v_nport_id); + } + } +} + +static void unf_gnn_id_callback(void *v_lport, void *v_sns_port, void *v_xchg) +{ + struct unf_lport_s *lport = (struct unf_lport_s *)v_lport; + struct unf_rport_s *sns_port = (struct unf_rport_s *)v_sns_port; + struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_xchg; + struct unf_gnnid_rsp_s *gnn_id_rsp_pld = NULL; + unsigned int cmnd_rsp_size = 0; + unsigned int nport_id = 0; + struct unf_lport_s *root_lport = NULL; + + UNF_CHECK_VALID(0x3608, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3609, UNF_TRUE, v_sns_port, return); + UNF_CHECK_VALID(0x3610, UNF_TRUE, v_xchg, return); + + nport_id = xchg->disc_port_id; + gnn_id_rsp_pld = + &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->gnn_id_rsp; + cmnd_rsp_size = (gnn_id_rsp_pld->ctiu_pream.cmnd_rsp_size); + + root_lport = (struct unf_lport_s *)lport->root_lport; + atomic_inc(&root_lport->disc.disc_thread_info.disc_contrl_size); + wake_up_process(root_lport->disc.disc_thread_info.data_thread); + + if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_ACCEPT) { + /* Case ACC: send GFF_ID or Link down immediately */ + unf_rcv_gnn_id_acc(lport, sns_port, gnn_id_rsp_pld, nport_id); + } else if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_REJECT) { + /* Case RJT: send GFF_ID */ + unf_rcv_gnn_id_rjt(lport, sns_port, gnn_id_rsp_pld, nport_id); + } else { /* NOTE: continue next stage */ + /* Case unknown: send GFF_ID */ + unf_rcv_gnn_id_rsp_unknown(lport, sns_port, nport_id); + } +} + +unsigned int unf_send_gnn_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_sns_port, + unsigned int v_nport_id) +{ + /* from DISC stop/re-login */ + struct unf_gnnid_s *gnn_id_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + struct unf_frame_pkg_s pkg; + struct unf_lport_s *root_lport = NULL; + + UNF_CHECK_VALID(0x3370, UNF_TRUE, v_sns_port, return UNF_RETURN_ERROR); + + if (unf_is_lport_valid(v_lport) != RETURN_OK) + /* Lport is invalid, no retry or handle required, return ok */ + return RETURN_OK; + + root_lport = (struct unf_lport_s *)v_lport->root_lport; + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_sns_port->nport_id, + v_sns_port, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) exchange can't be NULL for GNN_ID", + v_lport->port_id); + + return unf_get_and_post_disc_event(v_lport, v_sns_port, + v_nport_id, + UNF_DISC_GET_NODE_NAME); + } + + xchg->cmnd_code = NS_GNN_ID; /* GNN_ID */ + xchg->disc_port_id = v_nport_id; + + ox_id = xchg->ox_id; + + /* Set callback function */ + xchg->pfn_ob_callback = unf_gnn_id_ob_callback; /* send GFF_ID */ + xchg->pfn_callback = unf_gnn_id_callback; /* send GFF_ID */ + + unf_fill_package(&pkg, xchg, v_sns_port); + + /* Fill GNN_ID entry(payload) */ + gnn_id_pld = &fc_entry->gnn_id; /* GNNID payload */ + memset(gnn_id_pld, 0, sizeof(struct unf_gnnid_s)); + unf_fill_gnn_id_pld(gnn_id_pld, v_nport_id); + + /* Start to send GNN_ID GS command */ + ret = unf_gs_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + else + atomic_dec( + &root_lport->disc.disc_thread_info.disc_contrl_size); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: GNN_ID send %s. Port(0x%x_0x%x)--->rport(0x%x) inquire Nportid(0x%x) OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", v_lport->port_id, + v_lport->nport_id, v_sns_port->nport_id, + v_nport_id, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_gpn_id_pld(struct unf_gpnid_s *v_gpn_id_pld, + unsigned int v_nport_id) +{ + UNF_CHECK_VALID(0x3371, UNF_TRUE, v_gpn_id_pld, return); + + v_gpn_id_pld->ctiu_pream.rev_inid = UNF_REV_NPORTID_INIT; + v_gpn_id_pld->ctiu_pream.gstype_gssub_options = UNF_FSTYPE_OPT_INIT; + v_gpn_id_pld->ctiu_pream.cmnd_rsp_size = UNF_FSTYPE_GPN_ID; + v_gpn_id_pld->ctiu_pream.frag_reason_exp_vend = UNF_FRAG_REASON_VENDOR; + + /* Inquiry WWN from SW */ + v_gpn_id_pld->nport_id = v_nport_id; +} + +unsigned int unf_rport_relogin(struct unf_lport_s *v_lport, + unsigned int v_nport_id) +{ + /* Send GNN_ID */ + struct unf_rport_s *sns_port = NULL; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3563, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + /* Get SNS R_Port */ + sns_port = unf_get_rport_by_nport_id(v_lport, UNF_FC_FID_DIR_SERV); + if (!sns_port) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) can't find fabric Port", + v_lport->nport_id); + + return UNF_RETURN_ERROR; + } + + /* Send GNN_ID now to SW */ + ret = unf_get_and_post_disc_event(v_lport, sns_port, v_nport_id, + UNF_DISC_GET_NODE_NAME); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", + v_lport->nport_id, UNF_DISC_GET_NODE_NAME, + v_nport_id); + + /* NOTE: Continue to next stage */ + unf_rcv_gnn_id_rsp_unknown(v_lport, sns_port, v_nport_id); + } + + return ret; +} + +static void unf_rcv_gpn_id_acc(struct unf_lport_s *v_lport, + unsigned int v_nport_id, + unsigned long long v_port_name) +{ + /* then PLOGI or re-login */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = NULL; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + + rport = unf_find_valid_rport(lport, v_port_name, v_nport_id); + if (rport) { + /* R_Port with TGT mode & L_Port with INI mode: + * send PLOGI with INIT state + */ + if ((rport->options & UNF_PORT_MODE_TGT) == + UNF_PORT_MODE_TGT) { + rport = unf_get_safe_rport(v_lport, rport, + UNF_RPORT_REUSE_INIT, + v_nport_id); + UNF_CHECK_VALID(0x3630, UNF_TRUE, rport, return); + + /* Update R_Port state: PLOGI_WAIT */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = v_nport_id; + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Start to send PLOGI */ + ret = unf_send_plogi(lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) send PLOGI failed for 0x%x, enter recovry", + lport->port_id, lport->nport_id, + v_nport_id); + + unf_rport_error_recovery(rport); + } + } else { + spin_lock_irqsave(&rport->rport_state_lock, flag); + if ((rport->rp_state != UNF_RPORT_ST_PLOGI_WAIT) && + (rport->rp_state != UNF_RPORT_ST_PRLI_WAIT) && + (rport->rp_state != UNF_RPORT_ST_READY)) { + unf_rport_state_ma(rport, + UNF_EVENT_RPORT_LOGO); + spin_unlock_irqrestore( + &rport->rport_state_lock, flag); + + /* Do LOGO operation */ + unf_rport_enter_logo(lport, rport); + } else { + spin_unlock_irqrestore( + &rport->rport_state_lock, flag); + } + } + } else { + /* Send GNN_ID */ + (void)unf_rport_relogin(lport, v_nport_id); + } +} + +static void unf_rcv_gpn_id_rjt(struct unf_lport_s *v_lport, + unsigned int v_nport_id) +{ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = NULL; + + UNF_CHECK_VALID(0x3631, UNF_TRUE, v_lport, return); + + rport = unf_get_rport_by_nport_id(lport, v_nport_id); + if (rport) + unf_rport_linkdown(lport, rport); /* Do R_Port Link down */ +} + +/* + * Function Name : unf_rcv_gpn_id_rsp_unknown + * Function Description: Process unknown type of GPN_ID response + * Input Parameters : struct unf_lport_s *v_lport + * : unsigned int v_nport_id + * Output Parameters : N/A + * Return Type : void + */ +void unf_rcv_gpn_id_rsp_unknown(struct unf_lport_s *v_lport, + unsigned int v_nport_id) +{ + struct unf_lport_s *lport = v_lport; + + UNF_CHECK_VALID(0x3632, UNF_TRUE, v_lport, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) wrong response of GPN_ID with RPort(0x%x)", + lport->port_id, v_nport_id); + + /* NOTE: go to next stage */ + (void)unf_rport_relogin(lport, v_nport_id); +} + +static void unf_gpn_id_callback(void *v_lport, void *v_sns_port, void *v_xchg) +{ + struct unf_lport_s *lport = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_gpnid_rsp_s *gpn_id_rsp_pld = NULL; + unsigned long long port_name = 0; + unsigned int cmnd_rsp_size = 0; + unsigned int nport_id = 0; + struct unf_lport_s *root_lport = NULL; + + UNF_CHECK_VALID(0x3635, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3636, UNF_TRUE, v_sns_port, return); + UNF_CHECK_VALID(0x3637, UNF_TRUE, v_xchg, return); + + UNF_REFERNCE_VAR(v_sns_port); + + lport = (struct unf_lport_s *)v_lport; + xchg = (struct unf_xchg_s *)v_xchg; + nport_id = xchg->disc_port_id; + + root_lport = (struct unf_lport_s *)lport->root_lport; + atomic_inc(&root_lport->disc.disc_thread_info.disc_contrl_size); + wake_up_process(root_lport->disc.disc_thread_info.data_thread); + + gpn_id_rsp_pld = + &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->gpn_id_rsp; + cmnd_rsp_size = gpn_id_rsp_pld->ctiu_pream.cmnd_rsp_size; + if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_ACCEPT) { + /* GPN_ID ACC */ + port_name = ((unsigned long long) + (gpn_id_rsp_pld->port_name[0]) << 32) | + ((unsigned long long) + (gpn_id_rsp_pld->port_name[1])); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]LOGIN: Port(0x%x) GPN_ID ACC with WWN(0x%llx) RPort NPort ID(0x%x)", + lport->port_id, port_name, nport_id); + + /* Send PLOGI or LOGO or GNN_ID */ + unf_rcv_gpn_id_acc(lport, nport_id, port_name); + } else if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == + UNF_CT_IU_REJECT) { + /* GPN_ID RJT: Link Down */ + unf_rcv_gpn_id_rjt(lport, nport_id); + } else { + /* GPN_ID response type unknown: Send GNN_ID */ + unf_rcv_gpn_id_rsp_unknown(lport, nport_id); + } +} + +static void unf_gpn_id_ob_callback(struct unf_xchg_s *v_xchg) +{ + struct unf_lport_s *lport = NULL; + unsigned int nport_id = 0; + struct unf_lport_s *root_lport = NULL; + + UNF_CHECK_VALID(0x3633, UNF_TRUE, v_xchg, return); + + lport = v_xchg->lport; + nport_id = v_xchg->disc_port_id; + UNF_CHECK_VALID(0x3634, UNF_TRUE, lport, return); + + root_lport = (struct unf_lport_s *)lport->root_lport; + atomic_inc(&root_lport->disc.disc_thread_info.disc_contrl_size); + wake_up_process(root_lport->disc.disc_thread_info.data_thread); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) send GPN_ID failed to inquire RPort(0x%x)", + lport->port_id, nport_id); + + /* NOTE: go to next stage */ + (void)unf_rport_relogin(lport, nport_id); +} + +unsigned int unf_send_gpn_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_sns_port, + unsigned int v_nport_id) +{ + struct unf_gpnid_s *gpn_id_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + struct unf_frame_pkg_s pkg; + struct unf_lport_s *root_lport = NULL; + + UNF_CHECK_VALID(0x3374, UNF_TRUE, v_sns_port, return UNF_RETURN_ERROR); + + if (unf_is_lport_valid(v_lport) != RETURN_OK) { + /* Lport is invalid, no retry or handle required, return ok */ + return RETURN_OK; + } + root_lport = (struct unf_lport_s *)v_lport->root_lport; + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_sns_port->nport_id, + v_sns_port, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for GPN_ID", + v_lport->port_id); + + return unf_get_and_post_disc_event(v_lport, v_sns_port, + v_nport_id, + UNF_DISC_GET_PORT_NAME); + } + + xchg->cmnd_code = NS_GPN_ID; // GPN_ID + xchg->disc_port_id = v_nport_id; + + ox_id = xchg->ox_id; + + /* Set callback function */ + xchg->pfn_callback = unf_gpn_id_callback; + /* re-login --->>> GNN_ID */ + xchg->pfn_ob_callback = unf_gpn_id_ob_callback; + + unf_fill_package(&pkg, xchg, v_sns_port); + + /* Fill GPN_ID entry(payload) */ + gpn_id_pld = &fc_entry->gpn_id; /* GPN_ID payload */ + memset(gpn_id_pld, 0, sizeof(struct unf_gpnid_s)); + unf_fill_gpn_id_pld(gpn_id_pld, v_nport_id); + + /* Send GPN_ID GS command */ + ret = unf_gs_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + else + atomic_dec( + &root_lport->disc.disc_thread_info.disc_contrl_size); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: GPN_ID send %s. Port(0x%x)--->rport(0x%x). Inquire RPort(0x%x) with OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", v_lport->port_id, + v_sns_port->nport_id, v_nport_id, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_gid_ft_pld(struct unf_gid_s *v_gid_pld) +{ + UNF_CHECK_VALID(0x3376, UNF_TRUE, v_gid_pld, return); + + v_gid_pld->ctiu_pream.rev_inid = UNF_REV_NPORTID_INIT; + v_gid_pld->ctiu_pream.gstype_gssub_options = UNF_FSTYPE_OPT_INIT; + v_gid_pld->ctiu_pream.cmnd_rsp_size = UNF_FSTYPE_GID_FT; + v_gid_pld->ctiu_pream.frag_reason_exp_vend = UNF_FRAG_REASON_VENDOR; + + v_gid_pld->scope_type = UNF_GID_FT_TYPE; +} + +static void unf_gid_ft_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Do recovery */ + struct unf_lport_s *lport = NULL; + union unf_sfs_u *sfs_ptr = NULL; + struct unf_disc_s *disc = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3589, UNF_TRUE, v_xchg, return); + + sfs_ptr = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!sfs_ptr) + return; + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + lport = v_xchg->lport; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + if (!lport) + return; + + disc = &lport->disc; + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + unf_disc_state_ma(lport, UNF_EVENT_DISC_FAILED); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + /* Do DISC recovery operation */ + unf_disc_error_recovery(lport); +} + +unsigned int unf_send_gid_ft(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + struct unf_gid_s *gid_pld = NULL; + struct unf_gid_rsp_s *gid_rsp = NULL; + struct unf_gif_acc_pld_s *gid_acc_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned short ox_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3377, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3378, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for GID_FT", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = NS_GID_FT; // GID_FT + + ox_id = xchg->ox_id; + + /* Set callback function */ + xchg->pfn_ob_callback = unf_gid_ft_ob_callback; // do DISC recovery + xchg->pfn_callback = unf_gid_ft_callback; + + unf_fill_package(&pkg, xchg, v_rport); + + /* Fill GID_FT entry(payload) */ + gid_pld = &fc_entry->get_id.gid_req; /* GID req payload */ + unf_fill_gid_ft_pld(gid_pld); + gid_rsp = &fc_entry->get_id.gid_rsp; /* GID rsp payload */ + + /* Get GID_FT Response payload */ + gid_acc_pld = (struct unf_gif_acc_pld_s *)unf_get_one_big_sfs_buf(xchg); + if (!gid_acc_pld) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) allocate GID_FT response buffer failed", + v_lport->port_id); + + unf_cm_free_xchg(v_lport, xchg); + return UNF_RETURN_ERROR; + } + memset(gid_acc_pld, 0, sizeof(struct unf_gif_acc_pld_s)); + gid_rsp->gid_acc_pld = gid_acc_pld; + + /* Send GID_FT GS commmand now */ + ret = unf_gs_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: GID_FT send %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_gid_pt_pld(struct unf_gid_s *v_gid_pld, + struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x3379, UNF_TRUE, v_gid_pld, return); + UNF_CHECK_VALID(0x3380, UNF_TRUE, v_lport, return); + + v_gid_pld->ctiu_pream.rev_inid = (UNF_REV_NPORTID_INIT); + v_gid_pld->ctiu_pream.gstype_gssub_options = (UNF_FSTYPE_OPT_INIT); + v_gid_pld->ctiu_pream.cmnd_rsp_size = (UNF_FSTYPE_GID_PT); + v_gid_pld->ctiu_pream.frag_reason_exp_vend = UNF_FRAG_REASON_VENDOR; + + /* 0x7F000000 means NX_Port */ + v_gid_pld->scope_type = UNF_GID_PT_TYPE; + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, v_gid_pld, + sizeof(struct unf_gid_s)); +} + +static void unf_gid_pt_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Do recovery */ + struct unf_lport_s *lport = NULL; + union unf_sfs_u *sfs_ptr = NULL; + struct unf_disc_s *disc = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3593, UNF_TRUE, v_xchg, return); + + sfs_ptr = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!sfs_ptr) + return; + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + lport = v_xchg->lport; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + if (!lport) + return; + + disc = &lport->disc; + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + unf_disc_state_ma(lport, UNF_EVENT_DISC_FAILED); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + /* Do DISC recovery operation */ + unf_disc_error_recovery(lport); +} + +unsigned int unf_send_gid_pt(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* from DISC start */ + struct unf_gid_s *gid_pld = NULL; + struct unf_gid_rsp_s *gid_rsp = NULL; + struct unf_gif_acc_pld_s *gid_acc_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned short ox_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3381, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3382, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for GID_PT", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = NS_GID_PT; /* GID_PT */ + ox_id = xchg->ox_id; + + /* Set callback function */ + xchg->pfn_ob_callback = unf_gid_pt_ob_callback; /* do DISC recovery */ + xchg->pfn_callback = unf_gid_pt_callback; + + unf_fill_package(&pkg, xchg, v_rport); + + /* Fill GID_PT entry(payload) */ + gid_pld = &fc_entry->get_id.gid_req; /* GID req payload */ + unf_fill_gid_pt_pld(gid_pld, v_lport); + gid_rsp = &fc_entry->get_id.gid_rsp; /* GID rsp payload */ + + /* Get GID_PT response payload */ + gid_acc_pld = (struct unf_gif_acc_pld_s *)unf_get_one_big_sfs_buf(xchg); + if (!gid_acc_pld) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%0x) Allocate GID_PT response buffer failed", + v_lport->port_id); + + unf_cm_free_xchg(v_lport, xchg); + return UNF_RETURN_ERROR; + } + memset(gid_acc_pld, 0, sizeof(struct unf_gif_acc_pld_s)); + gid_rsp->gid_acc_pld = gid_acc_pld; + + /* Send GID_PT GS command to SW */ + ret = unf_gs_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: GID_PT send %s. Port(0x%x_0x%x)--->rport(0x%x) with OXID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_rft_id_pld(struct unf_rftid_s *v_rft_id_pld, + struct unf_lport_s *v_lport) +{ + unsigned int i = 1; + + UNF_CHECK_VALID(0x3383, UNF_TRUE, v_rft_id_pld, return); + UNF_CHECK_VALID(0x3384, UNF_TRUE, v_lport, return); + + v_rft_id_pld->ctiu_pream.rev_inid = UNF_REV_NPORTID_INIT; + v_rft_id_pld->ctiu_pream.gstype_gssub_options = UNF_FSTYPE_OPT_INIT; + v_rft_id_pld->ctiu_pream.cmnd_rsp_size = UNF_FSTYPE_RFT_ID; + v_rft_id_pld->ctiu_pream.frag_reason_exp_vend = UNF_FRAG_REASON_VENDOR; + v_rft_id_pld->nport_id = (v_lport->nport_id); + v_rft_id_pld->fc_4_types[0] = (UNF_FC4_SCSI_BIT8); + + for (i = 1; i < 8; i++) + v_rft_id_pld->fc_4_types[i] = 0; +} + +static void unf_rft_id_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Do recovery */ + struct unf_lport_s *lport = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3687, UNF_TRUE, v_xchg, return); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + lport = v_xchg->lport; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + UNF_CHECK_VALID(0x3688, UNF_TRUE, lport, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) send RFT_ID failed", + lport->port_id, lport->nport_id); + + /* Do L_Port recovery operation */ + unf_lport_error_recovery(lport); +} + +static void unf_rft_id_callback(void *v_lport, void *v_rport, void *v_xchg) +{ + /* RFT_ID --->>> RFF_ID */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_ctiu_prem_s *ctiu_prem = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int cmnd_rsp_size = 0; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3689, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3690, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3691, UNF_TRUE, v_xchg, return); + + lport = (struct unf_lport_s *)v_lport; + rport = (struct unf_rport_s *)v_rport; + xchg = (struct unf_xchg_s *)v_xchg; + + if (!xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) SFS entry is NULL with state(0x%x)", + lport->port_id, lport->en_states); + return; + } + + ctiu_prem = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->rft_id_rsp.ctiu_pream; + cmnd_rsp_size = ctiu_prem->cmnd_rsp_size; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Port(0x%x_0x%x) RFT_ID response is (0x%x)", + (cmnd_rsp_size & UNF_CT_IU_RSP_MASK), + lport->port_id, lport->nport_id); + + if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_ACCEPT) { + /* Case for RFT_ID ACC: send RFF_ID */ + spin_lock_irqsave(&lport->lport_state_lock, flag); + if (lport->en_states != UNF_LPORT_ST_RFT_ID_WAIT) { + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x_0x%x) receive RFT_ID ACC in state(0x%x)", + lport->port_id, lport->nport_id, + lport->en_states); + + return; + } + + /* LPort: RFT_ID_WAIT --> RFF_ID_WAIT */ + unf_lport_stat_ma(lport, UNF_EVENT_LPORT_REMOTE_ACC); + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + /* Start to send RFF_ID GS command */ + ret = unf_send_rff_id(lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) send RFF_ID failed", + lport->port_id, lport->nport_id); + + /* Do L_Port recovery */ + unf_lport_error_recovery(lport); + } + } else { + /* Case for RFT_ID RJT: do recovery */ + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) receive RFT_ID RJT with reason_code(0x%x) explanation(0x%x)", + lport->port_id, lport->nport_id, + (ctiu_prem->frag_reason_exp_vend) & + UNF_CT_IU_REASON_MASK, + (ctiu_prem->frag_reason_exp_vend) & + UNF_CT_IU_EXPLAN_MASK); + + /* Do L_Port recovery */ + unf_lport_error_recovery(lport); + } +} + +unsigned int unf_send_rft_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* After PLOGI process */ + struct unf_rftid_s *rft_id = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + struct unf_frame_pkg_s pkg; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3385, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3386, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for RFT_ID", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = NS_RFT_ID; /* RFT_ID */ + + ox_id = xchg->ox_id; + + /* Set callback function */ + xchg->pfn_callback = unf_rft_id_callback; + xchg->pfn_ob_callback = unf_rft_id_ob_callback; /* Do L_Port recovery */ + + unf_fill_package(&pkg, xchg, v_rport); + + /* Fill RFT_ID entry(payload) */ + rft_id = &fc_entry->rft_id; + memset(rft_id, 0, sizeof(struct unf_rftid_s)); + unf_fill_rft_id_pld(rft_id, v_lport); + + /* Send RFT_ID GS command */ + ret = unf_gs_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: RFT_ID send %s. Port(0x%x_0x%x)--->rport(0x%x). rport(0x%p) wwpn(0x%llx) OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_lport->nport_id, v_rport->nport_id, + v_rport, v_rport->port_name, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_rff_id_pld(struct unf_rffid_s *v_rff_id_pld, + struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x3387, UNF_TRUE, v_rff_id_pld, return); + UNF_CHECK_VALID(0x3388, UNF_TRUE, v_lport, return); + + v_rff_id_pld->ctiu_pream.rev_inid = UNF_REV_NPORTID_INIT; + v_rff_id_pld->ctiu_pream.gstype_gssub_options = UNF_FSTYPE_OPT_INIT; + v_rff_id_pld->ctiu_pream.cmnd_rsp_size = UNF_FSTYPE_RFF_ID; + v_rff_id_pld->ctiu_pream.frag_reason_exp_vend = UNF_FRAG_REASON_VENDOR; + v_rff_id_pld->nport_id = v_lport->nport_id; + v_rff_id_pld->fc_4_feature = UNF_FC4_FCP_TYPE | + (v_lport->options << 4); +} + +static void unf_rff_id_callback(void *v_lport, void *v_rport, void *v_xchg) +{ + /* RFF_ID --->>> SCR(for INI mode) */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_ctiu_prem_s *ctiu_prem = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int cmnd_rsp_size = 0; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3684, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3685, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3686, UNF_TRUE, v_xchg, return); + + lport = (struct unf_lport_s *)v_lport; + xchg = (struct unf_xchg_s *)v_xchg; + if (unlikely(!xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr)) + return; + + /* SCR send to 0xfffffd(not 0xfffffc), need to get new R_Port */ + UNF_REFERNCE_VAR(v_rport); + rport = unf_get_rport_by_nport_id(lport, UNF_FC_FID_FCTRL); // 0xfffffd + rport = unf_get_safe_rport(lport, rport, UNF_RPORT_REUSE_ONLY, + UNF_FC_FID_FCTRL); + if (unlikely(!rport)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) can't allocate RPort(0x%x)", + lport->port_id, UNF_FC_FID_FCTRL); + return; + } + + rport->nport_id = UNF_FC_FID_FCTRL; + ctiu_prem = + &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->rff_id_rsp.ctiu_pream; + cmnd_rsp_size = ctiu_prem->cmnd_rsp_size; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]LOGIN: Port(0x%x_0x%x) RFF_ID rsp is (0x%x)", + lport->port_id, lport->nport_id, + (cmnd_rsp_size & UNF_CT_IU_RSP_MASK)); + + /* RSP Type check: some SW not support RFF_ID, go to next stage also */ + if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_ACCEPT) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]LOGIN: Port(0x%x_0x%x) receive RFF ACC(0x%x) in state(0x%x)", + lport->port_id, lport->nport_id, + (cmnd_rsp_size & UNF_CT_IU_RSP_MASK), + lport->en_states); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) receive RFF RJT(0x%x) in state(0x%x) with RJT reason code(0x%x) explanation(0x%x)", + lport->port_id, lport->nport_id, + (cmnd_rsp_size & UNF_CT_IU_RSP_MASK), + lport->en_states, + (ctiu_prem->frag_reason_exp_vend) & + UNF_CT_IU_REASON_MASK, + (ctiu_prem->frag_reason_exp_vend) & + UNF_CT_IU_EXPLAN_MASK); + } + + /* L_Port state check */ + spin_lock_irqsave(&lport->lport_state_lock, flag); + if (lport->en_states != UNF_LPORT_ST_RFF_ID_WAIT) { + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) receive RFF reply in state(0x%x)", + lport->port_id, lport->nport_id, lport->en_states); + + return; + } + + /* Update L_Port state & Send SCR to remote port */ + /* LPort: RFF_ID_WAIT --> SCR_WAIT */ + unf_lport_stat_ma(lport, UNF_EVENT_LPORT_REMOTE_ACC); + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + /* Start to send SCR command */ + ret = unf_send_scr(lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) send SCR failed", + lport->port_id, lport->nport_id); + + /* Do L_Port recovery */ + unf_lport_error_recovery(lport); + } +} + +static void unf_rff_id_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Do recovery */ + struct unf_lport_s *lport = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3682, UNF_TRUE, v_xchg, return); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + lport = v_xchg->lport; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + UNF_CHECK_VALID(0x3683, UNF_TRUE, NULL != lport, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) send RFF_ID failed", + lport->port_id, lport->nport_id); + + /* Do L_Port recovery */ + unf_lport_error_recovery(lport); +} + +unsigned int unf_send_rff_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* from RFT_ID, then Send SCR */ + struct unf_rffid_s *rff_id = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned short ox_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3389, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3390, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "%s Enter", __func__); + + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for RFF_ID", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = NS_RFF_ID; // RFF_ID + + ox_id = xchg->ox_id; + + /* Set callback function */ + xchg->pfn_callback = unf_rff_id_callback; + xchg->pfn_ob_callback = unf_rff_id_ob_callback; /* Do L_Port recovery */ + + unf_fill_package(&pkg, xchg, v_rport); + + /* Fill RFF_ID entry(payload) */ + rff_id = &fc_entry->rff_id; + memset(rff_id, 0, sizeof(struct unf_rffid_s)); + unf_fill_rff_id_pld(rff_id, v_lport); + + /* Send RFF_ID GS command */ + ret = unf_gs_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: RFF_ID feature 0x%x(10:TGT,20:INI,30:COM) send %s. Port(0x%x_0x%x)--->pstRPortid(0x%x) rport(0x%p) OX_ID(0x%x)", + v_lport->options, (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id, v_rport, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_login_with_rport_in_n2n(struct unf_lport_s *v_lport, + unsigned long long v_remote_port_name, + unsigned long long v_remote_nort_name) +{ + /* + * Call by (P2P): + * 1. RCVD FLOGI ACC + * 2. Send FLOGI ACC succeed + ** + * Compare WWN, larger is master, then send PLOGI + */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = NULL; + unsigned long lport_flag = 0; + unsigned long rport_flag = 0; + unsigned long long port_name = 0; + unsigned long long node_name = 0; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3539, UNF_TRUE, v_lport, return); + + spin_lock_irqsave(&lport->lport_state_lock, lport_flag); + /* LPort: FLOGI_WAIT --> READY */ + unf_lport_stat_ma(lport, UNF_EVENT_LPORT_READY); + spin_unlock_irqrestore(&lport->lport_state_lock, lport_flag); + + port_name = v_remote_port_name; + node_name = v_remote_nort_name; + + if (lport->port_name > port_name) { + /* Master case: send PLOGI */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x)'s WWN(0x%llx) is larger than rport(0x%llx), should be master", + lport->port_id, lport->port_name, port_name); + + /* Update N_Port_ID now: 0xEF */ + lport->nport_id = UNF_P2P_LOCAL_NPORT_ID; + + rport = unf_find_valid_rport(v_lport, port_name, + UNF_P2P_REMOTE_NPORT_ID); // 0xD6 + rport = unf_get_safe_rport(v_lport, rport, + UNF_RPORT_REUSE_ONLY, + UNF_P2P_REMOTE_NPORT_ID); + if (rport) { + rport->node_name = node_name; + rport->port_name = port_name; + rport->nport_id = UNF_P2P_REMOTE_NPORT_ID; // 0xD6 + rport->local_nport_id = UNF_P2P_LOCAL_NPORT_ID; // 0xEF + + spin_lock_irqsave(&rport->rport_state_lock, + rport_flag); + if ((rport->rp_state == UNF_RPORT_ST_PLOGI_WAIT) || + (rport->rp_state == UNF_RPORT_ST_PRLI_WAIT) || + (rport->rp_state == UNF_RPORT_ST_READY)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]LOGIN: Port(0x%x) Rport(0x%x) have sent PLOGI or PRLI with state(0x%x)", + lport->port_id, rport->nport_id, + rport->rp_state); + + spin_unlock_irqrestore(&rport->rport_state_lock, + rport_flag); + return; + } + /* Update L_Port State: PLOGI_WAIT */ + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); + spin_unlock_irqrestore(&rport->rport_state_lock, + rport_flag); + + /* P2P with master: Start to Send PLOGI */ + ret = unf_send_plogi(lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]LOGIN: Port(0x%x) with WWN(0x%llx) send PLOGI to(0x%llx) failed", + lport->port_id, lport->port_name, + port_name); + + unf_rport_error_recovery(rport); + } + } else { + /* Get/Alloc R_Port failed */ + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) with WWN(0x%llx) allocate RPort(ID:0x%x,WWPN:0x%llx) failed", + lport->port_id, lport->port_name, + UNF_P2P_REMOTE_NPORT_ID, port_name); + } + } else { + /* Slave case: L_Port's Port Name is smaller than R_Port */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x) with WWN(0x%llx) is smaller than rport(0x%llx), do nothing", + lport->port_id, lport->port_name, port_name); + } +} + +static void unf_flogi_acc_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Callback for Sending FLOGI ACC succeed */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + unsigned long flags = 0; + unsigned long long port_name = 0; + unsigned long long node_name = 0; + + UNF_CHECK_VALID(0x3457, UNF_TRUE, v_xchg, return); + UNF_CHECK_VALID(0x3458, UNF_TRUE, v_xchg->lport, return); + UNF_CHECK_VALID(0x3459, UNF_TRUE, v_xchg->rport, return); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flags); + lport = v_xchg->lport; + rport = v_xchg->rport; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags); + + spin_lock_irqsave(&rport->rport_state_lock, flags); + port_name = rport->port_name; + node_name = rport->node_name; + + /* Swap case: Set WWPN & WWNN with zero */ + rport->port_name = 0; + rport->node_name = 0; + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + /* Enter PLOGI stage: after send FLOGI ACC succeed */ + unf_login_with_rport_in_n2n(lport, port_name, node_name); +} + +unsigned int unf_send_flogi_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + struct unf_flogi_payload_s *flogi_acc_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + UNF_CHECK_VALID(0x3393, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3394, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3395, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + v_xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_FLOGI); + + v_xchg->did = 0; /* D_ID must be 0 */ + v_xchg->sid = UNF_FC_FID_FLOGI; /* S_ID must be 0xfffffe */ + v_xchg->oid = v_xchg->sid; + v_xchg->pfn_callback = NULL; + v_xchg->lport = v_lport; + v_xchg->rport = v_rport; + /* call back for sending FLOGI response */ + v_xchg->pfn_ob_callback = unf_flogi_acc_ob_callback; + fc_entry = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, v_xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + unf_fill_package(&pkg, v_xchg, v_rport); + + /* Fill FLOGI ACC payload */ + memset(fc_entry, 0, sizeof(union unf_sfs_u)); + flogi_acc_pld = &fc_entry->flogi_acc.flogi_payload; + flogi_acc_pld->cmnd = (UNF_ELS_CMND_ACC); + unf_fill_flogi_pld(flogi_acc_pld, v_lport); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + + /* Send FLOGI ACC to remote port */ + ret = unf_els_cmnd_send(v_lport, &pkg, v_xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)v_xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]LOGIN: FLOGI ACC send %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id, rx_id); + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +static void unf_fill_plogi_acc_pld(struct unf_plogi_payload_s *v_plogi_acc_pld, + struct unf_lport_s *v_lport) +{ + struct unf_lgn_parms_s *login_parms = NULL; + + UNF_CHECK_VALID(0x3396, UNF_TRUE, v_plogi_acc_pld, return); + UNF_CHECK_VALID(0x3397, UNF_TRUE, v_lport, return); + + v_plogi_acc_pld->cmnd = (UNF_ELS_CMND_ACC); + login_parms = &v_plogi_acc_pld->parms; + + if ((v_lport->en_act_topo == UNF_ACT_TOP_P2P_FABRIC) || + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT)) { + login_parms->co_parms.bb_credit = + unf_low_level_bb_credit(v_lport); + login_parms->co_parms.alternate_bb_credit_mgmt = + UNF_BBCREDIT_MANAGE_NFPORT; /* 0 */ + login_parms->co_parms.bb_scn = + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_FABRIC) ? + 0 : unf_low_level_bbscn(v_lport); + } else { + login_parms->co_parms.bb_credit = UNF_BBCREDIT_LPORT; + login_parms->co_parms.alternate_bb_credit_mgmt = + UNF_BBCREDIT_MANAGE_LPORT; /* 1 */ + } + + login_parms->co_parms.lowest_version = UNF_PLOGI_VERSION_LOWER; + login_parms->co_parms.highest_version = UNF_PLOGI_VERSION_UPPER; + login_parms->co_parms.continuously_increasing = + UNF_CONTIN_INCREASE_SUPPORT; + login_parms->co_parms.bb_receive_data_field_size = + v_lport->max_frame_size; + login_parms->co_parms.nport_total_concurrent_sequences = + UNF_PLOGI_CONCURRENT_SEQ; + login_parms->co_parms.relative_offset = (UNF_PLOGI_RO_CATEGORY); + login_parms->co_parms.e_d_tov = (v_lport->ed_tov); + login_parms->cl_parms[2].valid = UNF_CLASS_VALID; /* class-3 */ + login_parms->cl_parms[2].received_data_field_size = + v_lport->max_frame_size; + login_parms->cl_parms[2].concurrent_sequences = + UNF_PLOGI_CONCURRENT_SEQ; + login_parms->cl_parms[2].open_sequences_per_exchange = + UNF_PLOGI_SEQ_PER_XCHG; + login_parms->high_node_name = + UNF_GET_NAME_HIGH_WORD(v_lport->node_name); + login_parms->low_node_name = + UNF_GET_NAME_LOW_WORD(v_lport->node_name); + login_parms->high_port_name = + UNF_GET_NAME_HIGH_WORD(v_lport->port_name); + login_parms->low_port_name = + UNF_GET_NAME_LOW_WORD(v_lport->port_name); + + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, + v_plogi_acc_pld, + sizeof(struct unf_plogi_payload_s)); +} + +static void unf_schedule_open_work(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* Used for L_Port port only with TGT, or R_Port only with INI */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = v_rport; + unsigned long delay = 0; + unsigned long flag = 0; + unsigned int ret = 0; + unsigned int port_feature = INVALID_VALUE32; + + UNF_CHECK_VALID(0x3452, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3453, UNF_TRUE, v_rport, return); + + delay = (unsigned long)lport->ed_tov; + port_feature = rport->options & UNF_PORT_MODE_BOTH; + + if ((lport->options == UNF_PORT_MODE_TGT) || + (port_feature == UNF_PORT_MODE_INI)) { + spin_lock_irqsave(&rport->rport_state_lock, flag); + + ret = unf_rport_ref_inc(rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x_0x%x) RPort(0x%x) abnormal, no need open", + lport->port_id, lport->nport_id, + rport->nport_id); + + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + return; + } + + /* Delay work pending check */ + if (delayed_work_pending(&rport->open_work)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x_0x%x) RPort(0x%x) open work is running, no need re-open", + lport->port_id, lport->nport_id, + rport->nport_id); + + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + unf_rport_ref_dec(rport); + return; + } + + /* start open work */ + if (queue_delayed_work( + unf_work_queue, + &rport->open_work, + (unsigned long) + msecs_to_jiffies((unsigned int)delay))) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x_0x%x) RPort(0x%x) start open work", + lport->port_id, lport->nport_id, + rport->nport_id); + + (void)unf_rport_ref_inc(rport); + } + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + unf_rport_ref_dec(rport); + } +} + +static void unf_plogi_acc_ob_callback(struct unf_xchg_s *v_xchg) +{ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x3454, UNF_TRUE, v_xchg, return); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flags); + lport = v_xchg->lport; + rport = v_xchg->rport; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags); + + UNF_CHECK_VALID(0x3455, UNF_TRUE, lport, return); + UNF_CHECK_VALID(0x3456, UNF_TRUE, rport, return); + + /* + * 1. According to FC-LS 4.2.7.1: + * after RCVD PLOGI or sending PLOGI ACC, need to termitate open EXCH + */ + unf_cm_xchg_mgr_abort_io_by_id(lport, rport, rport->nport_id, + lport->nport_id, 0); + + /* 2. Send PLOGI ACC fail */ + if (v_xchg->ob_callback_sts != UNF_IO_SUCCESS) { + /* Do R_Port recovery */ + unf_rport_error_recovery(rport); + + /* Do not care: Just used for L_Port only is + * TGT mode or R_Port only is INI mode + */ + unf_schedule_open_work(lport, rport); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x_0x%x) send PLOGI ACC failed(0x%x) with RPort(0x%x) feature(0x%x)", + lport->port_id, lport->nport_id, + lport->options, v_xchg->ob_callback_sts, + rport->nport_id, rport->options); + + /* NOTE: return */ + return; + } + + /* 3. Private Loop: check whether or not need to send PRLI */ + spin_lock_irqsave(&rport->rport_state_lock, flags); + if ((lport->en_act_topo == UNF_ACT_TOP_PRIVATE_LOOP) && + ((rport->rp_state == UNF_RPORT_ST_PRLI_WAIT) || + (rport->rp_state == UNF_RPORT_ST_READY))) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x_0x%x) RPort(0x%x) with State(0x%x) return directly", + lport->port_id, lport->nport_id, + rport->nport_id, rport->rp_state); + + /* Do nothing */ + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + return; + } + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PRLI); // PRLI_WAIT + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + /* 4. Set Port Feature with BOTH: cancel */ + if ((rport->options == UNF_PORT_MODE_UNKNOWN) && + (rport->port_name != INVALID_WWPN)) + rport->options = unf_get_port_feature(rport->port_name); + + /* + * 5. Check whether need to send PRLI delay + * Call by: RCVD PLOGI ACC or callback for sending PLOGI ACC succeed + */ + unf_check_rport_need_delay_prli(lport, rport, rport->options); + + /* 6. Do not care: Just used for L_Port only is + * TGT mode or R_Port only is INI mode + */ + unf_schedule_open_work(lport, rport); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Port(0x%x_0x%x_0x%x) send PLOGI ACC succeed with RPort(0x%x) feature(0x%x)", + lport->port_id, lport->nport_id, lport->options, + rport->nport_id, rport->options); +} + +unsigned int unf_send_plogi_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + struct unf_plogi_payload_s *plogi_acc_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + UNF_CHECK_VALID(0x3398, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3399, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3400, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + v_xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_PLOGI); + + v_xchg->did = v_rport->nport_id; + v_xchg->sid = v_lport->nport_id; + v_xchg->oid = v_xchg->sid; + v_xchg->pfn_callback = NULL; + v_xchg->lport = v_lport; + v_xchg->rport = v_rport; + /* call back for sending PLOGI ACC */ + v_xchg->pfn_ob_callback = unf_plogi_acc_ob_callback; + + unf_fill_package(&pkg, v_xchg, v_rport); + + fc_entry = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, v_xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + /* Fill PLOGI ACC payload */ + memset(fc_entry, 0, sizeof(union unf_sfs_u)); + plogi_acc_pld = &fc_entry->plogi_acc.payload; + unf_fill_plogi_acc_pld(plogi_acc_pld, v_lport); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + + /* Start to Send PLOGI ACC now */ + ret = unf_els_cmnd_send(v_lport, &pkg, v_xchg); + if (ret != RETURN_OK) + /* NOTE: free exchange */ + unf_cm_free_xchg((void *)v_lport, (void *)v_xchg); + + if ((v_rport->nport_id < UNF_FC_FID_DOM_MGR) || + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: PLOGI ACC send %s. Port(0x%x_0x%x_0x%llx)--->rport(0x%x_0x%llx) with OX_ID(0x%x) RX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_lport->nport_id, + v_lport->port_name, + v_rport->nport_id, v_rport->port_name, + ox_id, rx_id); + } + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +static void unf_fill_rjt_pld(struct unf_els_rjt_s *v_els_rjt, + unsigned int v_reason_code, + unsigned int v_reason_explanation) +{ + UNF_CHECK_VALID(0x3401, UNF_TRUE, v_els_rjt, return); + + v_els_rjt->cmnd = UNF_ELS_CMND_RJT; + v_els_rjt->reason_code = (v_reason_code | v_reason_explanation); +} + +static void unf_fill_prli_acc_pld(struct unf_pril_payload_s *v_prli_acc_pld, + struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + unsigned int port_mode = UNF_FC4_FRAME_PARM_3_TGT; + + UNF_CHECK_VALID(0x3402, UNF_TRUE, v_prli_acc_pld, return); + UNF_CHECK_VALID(0x3403, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3404, UNF_TRUE, v_rport, return); + + v_prli_acc_pld->cmnd = ( + UNF_ELS_CMND_ACC | + ((unsigned int)UNF_FC4_FRAME_PAGE_SIZE << + UNF_FC4_FRAME_PAGE_SIZE_SHIFT) | + ((unsigned int)(sizeof(struct unf_pril_payload_s) - + UNF_PRLI_SIRT_EXTRA_SIZE))); + + v_prli_acc_pld->parms[0] = (UNF_FC4_FRAME_PARM_0_FCP | + UNF_FC4_FRAME_PARM_0_I_PAIR | + UNF_FC4_FRAME_PARM_0_GOOD_RSP_CODE); + v_prli_acc_pld->parms[1] = UNF_NOT_MEANINGFUL; + v_prli_acc_pld->parms[2] = UNF_NOT_MEANINGFUL; + + /* About INI/TGT mode */ + if (v_rport->nport_id < UNF_FC_FID_DOM_MGR) + /* return INI (0x20): R_Port has TGT mode, + * L_Port has INI mode + */ + port_mode = UNF_FC4_FRAME_PARM_3_INI; + else + port_mode = v_lport->options; + + /* About Read xfer_rdy disable */ + v_prli_acc_pld->parms[3] = (UNF_FC4_FRAME_PARM_3_R_XFER_DIS | + port_mode); /* 0x2 */ + + /* About Tape support */ + if (v_rport->tape_support_needed) { + v_prli_acc_pld->parms[3] |= + (UNF_FC4_FRAME_PARM_3_REC_SUPPORT | + UNF_FC4_FRAME_PARM_3_RETRY_SUPPORT | + UNF_FC4_FRAME_PARM_3_TASK_RETRY_ID_SUPPORT | + UNF_FC4_FRAME_PARM_3_CONF_ALLOW); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "PRLI ACC tape support"); + } + + /* About confirm */ + if (v_lport->low_level_func.lport_cfg_items.fcp_conf == UNF_TRUE) + /* 0x80 */ + v_prli_acc_pld->parms[3] |= UNF_FC4_FRAME_PARM_3_CONF_ALLOW; + + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, + v_prli_acc_pld, sizeof(struct unf_pril_payload_s)); +} + +static void unf_prli_acc_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Report R_Port scsi Link Up */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + unsigned long flags = 0; + enum unf_rport_login_state_e rport_state = UNF_RPORT_ST_INIT; + + UNF_CHECK_VALID(0x3449, UNF_TRUE, v_xchg, return); + lport = v_xchg->lport; + rport = v_xchg->rport; + UNF_CHECK_VALID(0x3450, UNF_TRUE, lport, return); + UNF_CHECK_VALID(0x3451, UNF_TRUE, rport, return); + + /* Update & Report Link Up */ + spin_lock_irqsave(&rport->rport_state_lock, flags); + unf_rport_state_ma(rport, UNF_EVENT_RPORT_READY); // READY + rport_state = rport->rp_state; + if (rport->nport_id < UNF_FC_FID_DOM_MGR) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_KEVENT, + "[event]LOGIN: Port(0x%x) RPort(0x%x) state(0x%x) WWN(0x%llx) prliacc", + lport->port_id, rport->nport_id, + rport->rp_state, rport->port_name); + } + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + if (rport_state == UNF_RPORT_ST_READY) { + rport->logo_retries = 0; + unf_update_lport_state_by_linkup_event(lport, rport, + rport->options); + } +} + +unsigned int unf_send_prli_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + struct unf_pril_payload_s *prli_acc_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + UNF_CHECK_VALID(0x3405, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3406, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3407, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + v_xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_PRLI); + v_xchg->did = v_rport->nport_id; + v_xchg->sid = v_lport->nport_id; + v_xchg->oid = v_xchg->sid; + v_xchg->lport = v_lport; + v_xchg->rport = v_rport; + + v_xchg->pfn_callback = NULL; + /* callback when send succeed */ + v_xchg->pfn_ob_callback = unf_prli_acc_ob_callback; + + /* Fill common package */ + unf_fill_package(&pkg, v_xchg, v_rport); + + /* Get FC entry (alloc when create exchange) */ + fc_entry = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, v_xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + /* Fill FRLI Payload */ + memset(fc_entry, 0, sizeof(union unf_sfs_u)); + prli_acc_pld = &fc_entry->prli_acc.payload; + unf_fill_prli_acc_pld(prli_acc_pld, v_lport, v_rport); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + + /* Send ELS (RPLI) RSP */ + ret = unf_els_cmnd_send(v_lport, &pkg, v_xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)v_xchg); + + if ((v_rport->nport_id < UNF_FC_FID_DOM_MGR) || + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: PRLI ACC send %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id, rx_id); + } + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +static unsigned int unf_send_rec_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + /* Reserved */ + UNF_REFERNCE_VAR(v_lport); + UNF_REFERNCE_VAR(v_rport); + UNF_REFERNCE_VAR(v_xchg); + + unf_cm_free_xchg((void *)v_lport, (void *)v_xchg); + + return RETURN_OK; +} + +static void unf_rrq_acc_ob_callback(struct unf_xchg_s *v_xchg) +{ + UNF_CHECK_VALID(0x3408, UNF_TRUE, v_xchg, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]RRQ ACC Xchg(0x%p) tag(0x%x)", + v_xchg, v_xchg->hot_pool_tag); + + UNF_REFERNCE_VAR(v_xchg); +} + +static void unf_fill_els_acc_pld(struct unf_els_acc_s *v_els_acc_pld) +{ + UNF_CHECK_VALID(0x3420, UNF_TRUE, v_els_acc_pld, return); + + v_els_acc_pld->cmnd = UNF_ELS_CMND_ACC; +} + +static void unf_rscn_acc_ob_callback(struct unf_xchg_s *v_xchg) +{ + UNF_REFERNCE_VAR(v_xchg); +} + +static unsigned int unf_send_rscn_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + struct unf_els_acc_s *rscn_acc = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + struct unf_frame_pkg_s pkg; + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + UNF_CHECK_VALID(0x3421, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3422, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3423, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + v_xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_RSCN); + v_xchg->did = v_rport->nport_id; + v_xchg->sid = v_lport->nport_id; + v_xchg->oid = v_xchg->sid; + v_xchg->lport = v_lport; + v_xchg->rport = v_rport; + + /* Set call back function */ + v_xchg->pfn_callback = NULL; + v_xchg->pfn_ob_callback = unf_rscn_acc_ob_callback; // do nothing + + unf_fill_package(&pkg, v_xchg, v_rport); + + fc_entry = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, v_xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + memset(fc_entry, 0, sizeof(union unf_sfs_u)); + rscn_acc = &fc_entry->els_acc; + unf_fill_els_acc_pld(rscn_acc); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + + ret = unf_els_cmnd_send(v_lport, &pkg, v_xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)v_xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: RSCN ACC send %s. Port(0x%x)--->rport(0x%x) with OXID(0x%x) RXID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id, rx_id); + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +static void unf_logo_acc_ob_callback(struct unf_xchg_s *v_xchg) +{ + UNF_REFERNCE_VAR(v_xchg); +} + +unsigned int unf_send_logo_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + struct unf_els_acc_s *logo_acc = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + struct unf_frame_pkg_s pkg; + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + UNF_CHECK_VALID(0x3424, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3425, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3426, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + + v_xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_LOGO); + v_xchg->did = v_rport->nport_id; + v_xchg->sid = v_lport->nport_id; + v_xchg->oid = v_xchg->sid; + v_xchg->lport = v_lport; + v_xchg->rport = v_rport; + v_xchg->pfn_callback = NULL; + v_xchg->pfn_ob_callback = unf_logo_acc_ob_callback; // do nothing + + unf_fill_package(&pkg, v_xchg, v_rport); + + fc_entry = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, v_xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + memset(fc_entry, 0, sizeof(union unf_sfs_u)); + logo_acc = &fc_entry->els_acc; + unf_fill_els_acc_pld(logo_acc); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + + ret = unf_els_cmnd_send(v_lport, &pkg, v_xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)v_xchg); + + if (v_rport->nport_id < UNF_FC_FID_DOM_MGR) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: LOGO ACC send %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id, rx_id); + } + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +static unsigned int unf_send_rrq_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + struct unf_els_acc_s *rrq_acc = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + struct unf_frame_pkg_s pkg = { 0 }; + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + UNF_CHECK_VALID(0x3427, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3428, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3429, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + v_xchg->did = v_rport->nport_id; + v_xchg->sid = v_lport->nport_id; + v_xchg->oid = v_xchg->sid; + v_xchg->lport = v_lport; + v_xchg->rport = v_rport; + v_xchg->pfn_callback = NULL; // do noting + + fc_entry = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, v_xchg->hot_pool_tag); + + return UNF_RETURN_ERROR; + } + + memset(fc_entry, 0, sizeof(union unf_sfs_u)); + rrq_acc = &fc_entry->els_acc; + v_xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_RRQ); + v_xchg->pfn_ob_callback = unf_rrq_acc_ob_callback; // do noting + unf_fill_els_acc_pld(rrq_acc); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + + unf_fill_package(&pkg, v_xchg, v_rport); + + ret = unf_els_cmnd_send(v_lport, &pkg, v_xchg); + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]RRQ ACC send %s. Port(0x%x)--->rport(0x%x) with Xchg(0x%p) OX_ID(0x%x) RX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, v_xchg, ox_id, rx_id); + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +static void unf_fill_pdisc_acc_pld(struct unf_plogi_payload_s *v_pdisc_acc_pld, + struct unf_lport_s *v_lport) +{ + struct unf_lgn_parms_s *login_parms = NULL; + + UNF_CHECK_VALID(0x3430, UNF_TRUE, v_pdisc_acc_pld, return); + UNF_CHECK_VALID(0x3431, UNF_TRUE, v_lport, return); + + v_pdisc_acc_pld->cmnd = UNF_ELS_CMND_ACC; + login_parms = &v_pdisc_acc_pld->parms; + + if ((v_lport->en_act_topo == UNF_ACT_TOP_P2P_FABRIC) || + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT)) { + login_parms->co_parms.bb_credit = + unf_low_level_bb_credit(v_lport); + login_parms->co_parms.alternate_bb_credit_mgmt = + UNF_BBCREDIT_MANAGE_NFPORT; + login_parms->co_parms.bb_scn = + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_FABRIC) ? + 0 : unf_low_level_bbscn(v_lport); + } else { + login_parms->co_parms.bb_credit = UNF_BBCREDIT_LPORT; + login_parms->co_parms.alternate_bb_credit_mgmt = + UNF_BBCREDIT_MANAGE_LPORT; + } + + login_parms->co_parms.lowest_version = UNF_PLOGI_VERSION_LOWER; + login_parms->co_parms.highest_version = UNF_PLOGI_VERSION_UPPER; + login_parms->co_parms.continuously_increasing = + UNF_CONTIN_INCREASE_SUPPORT; + login_parms->co_parms.bb_receive_data_field_size = + v_lport->max_frame_size; + login_parms->co_parms.nport_total_concurrent_sequences = + UNF_PLOGI_CONCURRENT_SEQ; + login_parms->co_parms.relative_offset = UNF_PLOGI_RO_CATEGORY; + login_parms->co_parms.e_d_tov = v_lport->ed_tov; + + login_parms->cl_parms[2].valid = UNF_CLASS_VALID; // class-3 + login_parms->cl_parms[2].received_data_field_size = + v_lport->max_frame_size; + login_parms->cl_parms[2].concurrent_sequences = + UNF_PLOGI_CONCURRENT_SEQ; + login_parms->cl_parms[2].open_sequences_per_exchange = + UNF_PLOGI_SEQ_PER_XCHG; + + login_parms->high_node_name = + UNF_GET_NAME_HIGH_WORD(v_lport->node_name); + login_parms->low_node_name = + UNF_GET_NAME_LOW_WORD(v_lport->node_name); + login_parms->high_port_name = + UNF_GET_NAME_HIGH_WORD(v_lport->port_name); + login_parms->low_port_name = + UNF_GET_NAME_LOW_WORD(v_lport->port_name); + + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, + v_pdisc_acc_pld, + sizeof(struct unf_plogi_payload_s)); +} + +static void unf_pdisc_acc_ob_callback(struct unf_xchg_s *v_xchg) +{ + UNF_REFERNCE_VAR(v_xchg); +} + +unsigned int unf_send_pdisc_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + struct unf_plogi_payload_s *pdisc_acc_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + struct unf_frame_pkg_s pkg; + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + UNF_CHECK_VALID(0x3432, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3433, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3434, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + + v_xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_PDISC); + v_xchg->did = v_rport->nport_id; + v_xchg->sid = v_lport->nport_id; + v_xchg->oid = v_xchg->sid; + v_xchg->lport = v_lport; + v_xchg->rport = v_rport; + + /* Set call back function */ + v_xchg->pfn_callback = NULL; + v_xchg->pfn_ob_callback = unf_pdisc_acc_ob_callback; // do nothing + + unf_fill_package(&pkg, v_xchg, v_rport); + + fc_entry = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, v_xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + memset(fc_entry, 0, sizeof(union unf_sfs_u)); + pdisc_acc_pld = &fc_entry->pdisc_acc.payload; + unf_fill_pdisc_acc_pld(pdisc_acc_pld, v_lport); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + + ret = unf_els_cmnd_send(v_lport, &pkg, v_xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)v_xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Send PDISC ACC %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id, rx_id); + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +static void unf_fill_adisc_acc_pld(struct unf_adisc_payload_s *v_adisc_acc_pld, + struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x3435, UNF_TRUE, v_adisc_acc_pld, return); + UNF_CHECK_VALID(0x3436, UNF_TRUE, v_lport, return); + + v_adisc_acc_pld->cmnd = (UNF_ELS_CMND_ACC); + + v_adisc_acc_pld->hard_address = (v_lport->nport_id & UNF_ALPA_MASK); + v_adisc_acc_pld->high_node_name = + UNF_GET_NAME_HIGH_WORD(v_lport->node_name); + v_adisc_acc_pld->low_node_name = + UNF_GET_NAME_LOW_WORD(v_lport->node_name); + v_adisc_acc_pld->high_port_name = + UNF_GET_NAME_HIGH_WORD(v_lport->port_name); + v_adisc_acc_pld->low_port_name = + UNF_GET_NAME_LOW_WORD(v_lport->port_name); + v_adisc_acc_pld->nport_id = v_lport->nport_id; + + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, + v_adisc_acc_pld, + sizeof(struct unf_adisc_payload_s)); +} + +static void unf_adisc_acc_ob_callback(struct unf_xchg_s *v_xchg) +{ + UNF_REFERNCE_VAR(v_xchg); +} + +static unsigned int unf_send_adisc_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + struct unf_adisc_payload_s *adisc_acc_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + UNF_CHECK_VALID(0x3437, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3438, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3439, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + v_xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_ADISC); + v_xchg->did = v_rport->nport_id; + v_xchg->sid = v_lport->nport_id; + v_xchg->oid = v_xchg->sid; + v_xchg->lport = v_lport; + v_xchg->rport = v_rport; + + v_xchg->pfn_callback = NULL; + v_xchg->pfn_ob_callback = unf_adisc_acc_ob_callback; // do nothing + + unf_fill_package(&pkg, v_xchg, v_rport); + fc_entry = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, v_xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + memset(fc_entry, 0, sizeof(union unf_sfs_u)); + adisc_acc_pld = &fc_entry->adisc_acc.adisc_payl; + unf_fill_adisc_acc_pld(adisc_acc_pld, v_lport); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + + ret = unf_els_cmnd_send(v_lport, &pkg, v_xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)v_xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Send ADISC ACC %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id, rx_id); + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +static void unf_fill_prlo_acc_pld(struct unf_prli_prlo_s *v_prlo_acc, + struct unf_lport_s *v_lport) +{ + struct unf_pril_payload_s *prlo_acc_pld = NULL; + + UNF_CHECK_VALID(0x3440, UNF_TRUE, v_prlo_acc, return); + + prlo_acc_pld = &v_prlo_acc->payload; + prlo_acc_pld->cmnd = (UNF_ELS_CMND_ACC | + ((unsigned int)UNF_FC4_FRAME_PAGE_SIZE << + UNF_FC4_FRAME_PAGE_SIZE_SHIFT) | + ((unsigned int) + sizeof(struct unf_pril_payload_s))); + prlo_acc_pld->parms[0] = UNF_FC4_FRAME_PARM_0_FCP | + UNF_FC4_FRAME_PARM_0_GOOD_RSP_CODE; + prlo_acc_pld->parms[1] = 0; + prlo_acc_pld->parms[2] = 0; + prlo_acc_pld->parms[3] = 0; + + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, prlo_acc_pld, + sizeof(struct unf_pril_payload_s)); +} + +static unsigned int unf_send_prlo_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + struct unf_prli_prlo_s *prlo_acc = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + struct unf_frame_pkg_s pkg; + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + UNF_CHECK_VALID(0x3441, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3442, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3443, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + + v_xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_PRLO); + v_xchg->did = v_rport->nport_id; + v_xchg->sid = v_lport->nport_id; + v_xchg->oid = v_xchg->sid; + v_xchg->lport = v_lport; + v_xchg->rport = v_rport; + + v_xchg->pfn_callback = NULL; // do nothing + v_xchg->pfn_ob_callback = NULL; // do nothing + + unf_fill_package(&pkg, v_xchg, v_rport); + + fc_entry = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, v_xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + memset(fc_entry, 0, sizeof(union unf_sfs_u)); + prlo_acc = &fc_entry->prlo_acc; + unf_fill_prlo_acc_pld(prlo_acc, v_lport); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + + ret = unf_els_cmnd_send(v_lport, &pkg, v_xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)v_xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Send PRLO ACC %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id, rx_id); + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +unsigned int unf_send_abts(struct unf_lport_s *v_lport, + struct unf_xchg_s *v_xchg) +{ + struct unf_rport_s *rport = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_frame_pkg_s pkg; + + UNF_CHECK_VALID(0x3444, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3445, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + rport = v_xchg->rport; + UNF_CHECK_VALID(0x3446, UNF_TRUE, rport, return UNF_RETURN_ERROR); + + /* set pkg info */ + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + pkg.type = UNF_PKG_BLS_REQ; + pkg.frame_head.csctl_sid = v_xchg->sid; + pkg.frame_head.rctl_did = v_xchg->did; + pkg.frame_head.oxid_rxid = + (unsigned int)v_xchg->ox_id << 16 | v_xchg->rx_id; + pkg.xchg_contex = v_xchg; + pkg.unf_cmnd_pload_bl.buffer_ptr = + (unsigned char *) + v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + + pkg.unf_cmnd_pload_bl.buf_dma_addr = + v_xchg->fcp_sfs_union.sfs_entry.sfs_buff_phy_addr; + pkg.private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = v_xchg->hot_pool_tag; + + UNF_SET_XCHG_ALLOC_TIME(&pkg, v_xchg); + UNF_SET_ABORT_INFO_IOTYPE(&pkg, v_xchg); + + pkg.private[PKG_PRIVATE_XCHG_RPORT_INDEX] = + v_xchg->private[PKG_PRIVATE_XCHG_RPORT_INDEX]; + + /* Send ABTS frame to target */ + ret = unf_bls_cmnd_send(v_lport, &pkg, v_xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]Port(0x%x_0x%x) send ABTS %s. Abort exch(0x%p) Cmdsn:0x%lx, tag(0x%x) iotype(0x%x)", + v_lport->port_id, v_lport->nport_id, + (ret == UNF_RETURN_ERROR) ? "failed" : "succeed", + v_xchg, (unsigned long)v_xchg->cmnd_sn, + v_xchg->hot_pool_tag, v_xchg->data_direction); + + UNF_REFERNCE_VAR(rport); + return ret; +} + +unsigned int unf_release_rport_res(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct unf_rport_info_s rport_info; + + UNF_CHECK_VALID(0x3447, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3448, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + + memset(&rport_info, 0, sizeof(struct unf_rport_info_s)); + + rport_info.rport_index = v_rport->rport_index; + rport_info.nport_id = v_rport->nport_id; + rport_info.port_name = v_rport->port_name; + + /* 2. release R_Port(parent context/Session) resource */ + if (!v_lport->low_level_func.service_op.pfn_unf_release_rport_res) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) release rport resource function can't be NULL", + v_lport->port_id); + + return ret; + } + + ret = v_lport->low_level_func.service_op.pfn_unf_release_rport_res( + v_lport->fc_port, + &rport_info); + if (ret != RETURN_OK) + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) rport_index(0x%x, %p) send release session CMND failed", + v_lport->port_id, rport_info.rport_index, v_rport); + + return ret; +} + +static inline unsigned char unf_determin_bbscn(unsigned char local_bbscn, + unsigned char remote_bbscn) +{ + if ((remote_bbscn == 0) || (local_bbscn == 0)) + local_bbscn = 0; + else + local_bbscn = local_bbscn > remote_bbscn ? + local_bbscn : remote_bbscn; + + return local_bbscn; +} + +static void unf_cfg_lowlevel_fabric_params( + struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_fabric_parms_s *v_login_parms) +{ + struct unf_port_login_parms_s login_co_parms = { 0 }; + unsigned int remote_edtov = 0; + unsigned int ret = 0; + unsigned char remote_edtov_resolution = 0; /* 0:ms; 1:ns */ + + if (!v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_set) + return; + + login_co_parms.remote_rttov_tag = + (unsigned char)UNF_GET_RT_TOV_FROM_PARAMS(v_login_parms); + login_co_parms.remote_edtov_tag = 0; + login_co_parms.remote_bbcredit = + (unsigned short) + UNF_GET_BB_CREDIT_FROM_PARAMS(v_login_parms); + login_co_parms.compared_bbscn = + (unsigned int)unf_determin_bbscn( + (unsigned char) + v_lport->low_level_func.lport_cfg_items.bb_scn, + (unsigned char) + UNF_GET_BB_SC_N_FROM_PARAMS(v_login_parms)); + + remote_edtov_resolution = + (unsigned char) + UNF_GET_E_D_TOV_RESOLUTION_FROM_PARAMS(v_login_parms); + remote_edtov = UNF_GET_E_D_TOV_FROM_PARAMS(v_login_parms); + login_co_parms.compared_edtov_val = + remote_edtov_resolution ? + (remote_edtov / 1000000) : remote_edtov; + + login_co_parms.compared_ratov_val = + UNF_GET_RA_TOV_FROM_PARAMS(v_login_parms); + login_co_parms.els_cmnd_code = ELS_FLOGI; + + if (v_lport->en_act_topo & UNF_TOP_P2P_MASK) { + login_co_parms.en_act_topo = + (v_login_parms->co_parms.n_port == UNF_F_PORT) ? + UNF_ACT_TOP_P2P_FABRIC : UNF_ACT_TOP_P2P_DIRECT; + } else { + login_co_parms.en_act_topo = v_lport->en_act_topo; + } + + ret = v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_set( + (void *)v_lport->fc_port, + UNF_PORT_CFG_UPDATE_FABRIC_PARAM, + (void *)&login_co_parms); + if (ret != RETURN_OK) + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Lowlevel unsupport fabric config"); +} + +static unsigned int unf_check_flogi_params( + struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_fabric_parms_s *v_fabric_parms) +{ + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3460, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3461, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3462, UNF_TRUE, v_fabric_parms, + return UNF_RETURN_ERROR); + UNF_REFERNCE_VAR(v_lport); + UNF_REFERNCE_VAR(v_rport); + + if (v_fabric_parms->cl_parms[2].valid == UNF_CLASS_INVALID) { + /* Discard directly */ + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) NPort_ID(0x%x) FLOGI not support class3", + v_lport->port_id, v_rport->nport_id); + + return UNF_RETURN_ERROR; + } + + return ret; +} + +static void unf_save_fabric_params(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_fabric_parms_s *v_fabric_parms) +{ + unsigned long long fabric_node_name = 0; + + UNF_CHECK_VALID(0x3463, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3464, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3465, UNF_TRUE, v_fabric_parms, return); + + UNF_REFERNCE_VAR(v_lport); + fabric_node_name = (unsigned long long) + (((unsigned long long) + (v_fabric_parms->high_node_name) << 32) | + ((unsigned long long) + (v_fabric_parms->low_node_name))); + + /* R_Port for 0xfffffe is used for FLOGI, not need to save WWN */ + if (v_fabric_parms->co_parms.bb_receive_data_field_size > + UNF_MAX_FRAME_SIZE) + v_rport->max_frame_size = UNF_MAX_FRAME_SIZE; // 2112 + else + v_rport->max_frame_size = + v_fabric_parms->co_parms.bb_receive_data_field_size; + + /* with Fabric attribute */ + if (v_fabric_parms->co_parms.n_port == UNF_F_PORT) { + v_rport->ed_tov = v_fabric_parms->co_parms.e_d_tov; + v_rport->ra_tov = v_fabric_parms->co_parms.r_a_tov; + v_lport->ed_tov = v_fabric_parms->co_parms.e_d_tov; + v_lport->ra_tov = v_fabric_parms->co_parms.r_a_tov; + v_lport->rr_tov = UNF_CALC_LPORT_RRTOV(v_lport); + v_lport->fabric_node_name = fabric_node_name; + } + + /* Configure info from FLOGI to chip */ + unf_cfg_lowlevel_fabric_params(v_lport, v_rport, v_fabric_parms); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) Rport(0x%x) login parameter: E_D_TOV = %u. LPort E_D_TOV = %u. fabric nodename: 0x%x%x", + v_lport->port_id, + v_rport->nport_id, + (v_fabric_parms->co_parms.e_d_tov), + v_lport->ed_tov, + v_fabric_parms->high_node_name, + v_fabric_parms->low_node_name); +} + +static unsigned int unf_flogi_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg) +{ + struct unf_rport_s *rport = NULL; + struct unf_flogi_fdisc_acc_s *flogi_frame = NULL; + struct unf_fabric_parms_s *fabric_login_parms = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned long flag = 0; + unsigned long long wwpn = 0; + unsigned long long wwnn = 0; + + UNF_CHECK_VALID(0x3466, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3467, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + UNF_REFERNCE_VAR(v_sid); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]LOGIN: Port(0x%x)<---RPort(0x%x) Receive FLOGI with OX_ID(0x%x)", + v_lport->port_id, v_sid, v_xchg->ox_id); + + UNF_SERVICE_COLLECT(v_lport->link_service_info, + UNF_SERVICE_ITEM_FLOGI); + + /* Check L_Port state: Offline */ + if (v_lport->en_states >= UNF_LPORT_ST_OFFLINE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) with state(0x%x) not need to handle FLOGI", + v_lport->port_id, v_lport->en_states); + + unf_cm_free_xchg(v_lport, v_xchg); + return ret; + } + + flogi_frame = + &v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->flogi; + fabric_login_parms = &flogi_frame->flogi_payload.fabric_parms; + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, + &flogi_frame->flogi_payload, + sizeof(struct unf_flogi_payload_s)); + wwpn = (unsigned long long) + (((unsigned long long) + (fabric_login_parms->high_port_name) << 32) | + ((unsigned long long)fabric_login_parms->low_port_name)); + wwnn = (unsigned long long) + (((unsigned long long) + (fabric_login_parms->high_node_name) << 32) | + ((unsigned long long)fabric_login_parms->low_node_name)); + + /* Get (new) R_Port: reuse only */ + rport = unf_get_rport_by_nport_id(v_lport, UNF_FC_FID_FLOGI); + rport = unf_get_safe_rport(v_lport, rport, + UNF_RPORT_REUSE_ONLY, UNF_FC_FID_FLOGI); + if (unlikely(!rport)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) has no RPort. do nothing", + v_lport->port_id); + + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + /* Update R_Port info */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->port_name = wwpn; + rport->node_name = wwnn; + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Check RCVD FLOGI parameters: only for class-3 */ + ret = unf_check_flogi_params(v_lport, rport, fabric_login_parms); + if (ret != RETURN_OK) { + /* Discard directly */ + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + /* P2P fabric */ + unf_lport_update_topo(v_lport, UNF_ACT_TOP_P2P_DIRECT); + + /* Save fabric parameters */ + unf_save_fabric_params(v_lport, rport, fabric_login_parms); + + /* Send ACC for FLOGI */ + ret = unf_send_flogi_acc(v_lport, rport, v_xchg); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) send FLOGI ACC failed and do recover", + v_lport->port_id); + + /* Do L_Port recovery */ + unf_lport_error_recovery(v_lport); + } + + return ret; +} + +static void unf_cfg_lowlevel_port_params(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_lgn_parms_s *v_login_parms, + unsigned int v_cmd_type) +{ + struct unf_port_login_parms_s login_co_parms = { 0 }; + unsigned int ret = 0; + + if (!v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_set) + return; + + login_co_parms.rport_index = v_rport->rport_index; + login_co_parms.seq_cnt = 0; + login_co_parms.ed_tov = 0; + login_co_parms.ed_tov_timer_val = v_lport->ed_tov; + login_co_parms.tx_mfs = v_rport->max_frame_size; + + login_co_parms.remote_rttov_tag = + (unsigned char)UNF_GET_RT_TOV_FROM_PARAMS(v_login_parms); + login_co_parms.remote_edtov_tag = 0; + login_co_parms.remote_bbcredit = + (unsigned short)UNF_GET_BB_CREDIT_FROM_PARAMS(v_login_parms); + login_co_parms.els_cmnd_code = v_cmd_type; + + if (v_lport->en_act_topo == UNF_ACT_TOP_PRIVATE_LOOP) { + login_co_parms.compared_bbscn = 0; + } else { + login_co_parms.compared_bbscn = + (unsigned int)unf_determin_bbscn( + (unsigned char) + v_lport->low_level_func.lport_cfg_items.bb_scn, + (unsigned char) + UNF_GET_BB_SC_N_FROM_PARAMS(v_login_parms)); + } + + login_co_parms.compared_edtov_val = v_lport->ed_tov; + login_co_parms.compared_ratov_val = v_lport->ra_tov; + + ret = v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_set( + (void *)v_lport->fc_port, + UNF_PORT_CFG_UPDATE_PLOGI_PARAM, + (void *)&login_co_parms); + if (ret != RETURN_OK) + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) Lowlevel unsupport port config", + v_lport->port_id); +} + +unsigned int unf_check_plogi_params(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_lgn_parms_s *v_login_parms) +{ + unsigned int ret = RETURN_OK; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3468, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3469, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3470, UNF_TRUE, v_login_parms, + return UNF_RETURN_ERROR); + + /* Parameters check: Class-type */ + if ((v_login_parms->cl_parms[2].valid == UNF_CLASS_INVALID) || + (v_login_parms->co_parms.bb_receive_data_field_size == 0)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort N_Port_ID(0x%x) with PLOGI parameters invalid: class3(%u), BBReceiveDataFieldSize(0x%x), send LOGO", + v_lport->port_id, v_rport->nport_id, + v_login_parms->cl_parms[2].valid, + v_login_parms->co_parms.bb_receive_data_field_size); + + spin_lock_irqsave(&v_rport->rport_state_lock, flag); + /* --->>> LOGO */ + unf_rport_state_ma(v_rport, UNF_EVENT_RPORT_LOGO); + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + + /* Enter LOGO stage */ + unf_rport_enter_logo(v_lport, v_rport); + return UNF_RETURN_ERROR; + } + + /* 16G FC Brocade SW, Domain Controller's + * PLOGI both support CLASS-1 & CLASS-2 + */ + if ((v_login_parms->cl_parms[0].valid == UNF_CLASS_VALID) || + (v_login_parms->cl_parms[1].valid == UNF_CLASS_VALID)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) get PLOGI class1(%u) class2(%u) from N_Port_ID(0x%x)", + v_lport->port_id, + v_login_parms->cl_parms[0].valid, + v_login_parms->cl_parms[1].valid, + v_rport->nport_id); + } + + return ret; +} + +static void unf_save_plogi_params(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_lgn_parms_s *v_login_parms, + unsigned int v_cmd_code) +{ +#define UNF_DELAY_TIME 100 /* WWPN smaller delay to send PRLI with COM mode */ + + unsigned long long wwpn = INVALID_VALUE64; + unsigned long long wwnn = INVALID_VALUE64; + unsigned int ed_tov = 0; + unsigned int remote_edtov = 0; + + if (v_login_parms->co_parms.bb_receive_data_field_size > + UNF_MAX_FRAME_SIZE) + v_rport->max_frame_size = UNF_MAX_FRAME_SIZE; // 2112 + else + v_rport->max_frame_size = + v_login_parms->co_parms.bb_receive_data_field_size; + + wwnn = (unsigned long long) + (((unsigned long long) + (v_login_parms->high_node_name) << 32) | + ((unsigned long long)v_login_parms->low_node_name)); + wwpn = (unsigned long long) + (((unsigned long long) + (v_login_parms->high_port_name) << 32) | + ((unsigned long long)v_login_parms->low_port_name)); + + remote_edtov = v_login_parms->co_parms.e_d_tov; + ed_tov = v_login_parms->co_parms.e_d_tov_resolution ? + (remote_edtov / 1000000) : remote_edtov; + + v_rport->port_name = wwpn; + v_rport->node_name = wwnn; + v_rport->local_nport_id = v_lport->nport_id; + + if ((v_lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT) || + (v_lport->en_act_topo == UNF_ACT_TOP_PRIVATE_LOOP)) { + /* P2P or Private Loop */ + v_lport->ed_tov = (v_lport->ed_tov > ed_tov) ? + v_lport->ed_tov : ed_tov; + v_lport->ra_tov = 2 * v_lport->ed_tov; // 2 * E_D_TOV + v_lport->rr_tov = UNF_CALC_LPORT_RRTOV(v_lport); + + if (ed_tov != 0) + v_rport->ed_tov = ed_tov; + else + v_rport->ed_tov = UNF_DEFAULT_EDTOV; + } else { + /* SAN: E_D_TOV updated by FLOGI */ + v_rport->ed_tov = v_lport->ed_tov; + } + + /* WWPN smaller: delay to send PRLI */ + if (v_rport->port_name > v_lport->port_name) + v_rport->ed_tov += UNF_DELAY_TIME; // 100ms + + /* Configure port parameters to low level (chip) */ + unf_cfg_lowlevel_port_params(v_lport, v_rport, v_login_parms, + v_cmd_code); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) RPort(0x%x) with WWPN(0x%llx) WWNN(0x%llx) login: ED_TOV(%u) Port: ED_TOV(%u)", + v_lport->port_id, + v_rport->nport_id, + v_rport->port_name, v_rport->node_name, + ed_tov, + v_lport->ed_tov); +} + +static int unf_check_bbscn_is_enabled(unsigned char local_bbscn, + unsigned char remote_bbscn) +{ + return unf_determin_bbscn(local_bbscn, remote_bbscn) ? + UNF_TRUE : UNF_FALSE; +} + +static unsigned int unf_irq_process_switch_2_thread(void *v_lport, + struct unf_xchg_s *v_xchg, + unf_evt_task v_evt_task) +{ + struct unf_cm_event_report *event = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = 0; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x1996, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x1996, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + lport = v_lport; + xchg = v_xchg; + + if (unlikely((!lport->event_mgr.pfn_unf_get_free_event) || + (!lport->event_mgr.pfn_unf_post_event) || + (!lport->event_mgr.pfn_unf_release_event))) { + UNF_TRACE(0x2065, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) event function is NULL", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + ret = unf_xchg_ref_inc(xchg, SFS_RESPONSE); + UNF_CHECK_VALID(0x3343, UNF_TRUE, (ret == RETURN_OK), + return UNF_RETURN_ERROR); + + event = lport->event_mgr.pfn_unf_get_free_event((void *)v_lport); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, event, + return UNF_RETURN_ERROR); + + event->lport = lport; + event->event_asy_flag = UNF_EVENT_ASYN; + event->pfn_unf_event_task = v_evt_task; + event->para_in = v_xchg; + lport->event_mgr.pfn_unf_post_event(lport, event); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) start to switch thread process now", + lport->port_id); + + return ret; +} + +static unsigned int unf_plogi_handler_com_process(struct unf_xchg_s *v_xchg) +{ + struct unf_xchg_s *xchg = v_xchg; + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_plogi_pdisc_s *plogi_frame = NULL; + struct unf_lgn_parms_s *login_parms = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned long flag = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, xchg, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, xchg->lport, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, xchg->rport, + return UNF_RETURN_ERROR); + + lport = xchg->lport; + rport = xchg->rport; + plogi_frame = + &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->plogi; + login_parms = &plogi_frame->payload.parms; + + unf_save_plogi_params(lport, rport, login_parms, + ELS_PLOGI); + + /* Update state: PLOGI_WAIT */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = xchg->sid; + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Send PLOGI ACC to remote port */ + ret = unf_send_plogi_acc(lport, rport, xchg); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) send PLOGI ACC failed", + lport->port_id); + + /* NOTE: exchange has been freed inner(before) */ + unf_rport_error_recovery(rport); + return ret; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]LOGIN: Port(0x%x) send PLOGI ACC to Port(0x%x) succeed", + lport->port_id, rport->nport_id); + + return ret; +} + +static int unf_plogi_async_handle(void *v_argc_in, void *v_argc_out) +{ + struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_argc_in; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x2267, UNF_TRUE, xchg, return UNF_RETURN_ERROR); + + ret = unf_plogi_handler_com_process(xchg); + unf_xchg_ref_dec(xchg, SFS_RESPONSE); + + return (int)ret; +} + +static unsigned int unf_send_els_rjt_by_did(struct unf_lport_s *v_lport, + struct unf_xchg_s *v_xchg, + unsigned int v_did, + struct unf_rjt_info_s *v_rjt_info) +{ + struct unf_els_rjt_s *els_rjt = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = v_xchg; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + + UNF_CHECK_VALID(0x3503, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3504, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + xchg->cmnd_code = UNF_SET_ELS_RJT_TYPE(v_rjt_info->els_cmnd_code); + xchg->did = v_did; + xchg->sid = v_lport->nport_id; + xchg->oid = xchg->sid; + xchg->lport = v_lport; + xchg->rport = NULL; + xchg->disc_rport = NULL; + + xchg->pfn_callback = NULL; + xchg->pfn_ob_callback = NULL; + + unf_fill_package(&pkg, xchg, NULL); + + fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, xchg); + return UNF_RETURN_ERROR; + } + + els_rjt = &fc_entry->els_rjt; + memset(els_rjt, 0, sizeof(struct unf_els_rjt_s)); + unf_fill_rjt_pld(els_rjt, v_rjt_info->reason_code, + v_rjt_info->reason_explanation); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Send LS_RJT %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_did, ox_id, rx_id); + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +static unsigned int unf_plogi_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg) +{ + struct unf_xchg_s *xchg = v_xchg; + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = NULL; + struct unf_plogi_pdisc_s *plogi_frame = NULL; + struct unf_lgn_parms_s *login_parms = NULL; + struct unf_rjt_info_s rjt_info = { 0 }; + unsigned long long wwpn = INVALID_VALUE64; + unsigned int ret = UNF_RETURN_ERROR; + int bbscn_enabled = UNF_FALSE; + int switch_2_thread = UNF_FALSE; + + UNF_CHECK_VALID(0x3474, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3475, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + /* 1. Maybe: PLOGI is sent by Name server */ + if ((v_sid < UNF_FC_FID_DOM_MGR) || + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT)) + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Receive PLOGI. Port(0x%x_0x%x)<---RPort(0x%x) with OX_ID(0x%x)", + v_lport->port_id, v_lport->nport_id, v_sid, + v_xchg->ox_id); + + UNF_SERVICE_COLLECT(v_lport->link_service_info, + UNF_SERVICE_ITEM_PLOGI); + + /* 2. State check: Offline */ + if (lport->en_states >= UNF_LPORT_ST_OFFLINE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) received PLOGI with state(0x%x)", + lport->port_id, lport->nport_id, lport->en_states); + + unf_cm_free_xchg(lport, xchg); + return UNF_RETURN_ERROR; + } + + /* + * 3. According to FC-LS 4.2.7.1: + * After RCVD PLogi or send Plogi ACC, need to termitate open EXCH + */ + unf_cm_xchg_mgr_abort_io_by_id(lport, rport, v_sid, lport->nport_id, 0); + + /* Get R_Port by WWpn */ + plogi_frame = + &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->plogi; + login_parms = &plogi_frame->payload.parms; + + UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, + &plogi_frame->payload, + sizeof(struct unf_plogi_payload_s)); + + wwpn = (unsigned long long) + (((unsigned long long) + (login_parms->high_port_name) << 32) | + ((unsigned long long)login_parms->low_port_name)); + + /* 4. Get (new) R_Port (by wwpn) */ + rport = unf_find_rport(lport, v_sid, wwpn); + rport = unf_get_safe_rport(lport, rport, UNF_RPORT_REUSE_ONLY, v_sid); + if (!rport) { + memset(&rjt_info, 0, sizeof(struct unf_rjt_info_s)); + rjt_info.els_cmnd_code = ELS_PLOGI; + rjt_info.reason_code = UNF_LS_RJT_BUSY; + rjt_info.reason_explanation = + UNF_LS_RJT_INSUFFICIENT_RESOURCES; + + /* R_Port is NULL: Send ELS RJT for PLOGI */ + (void)unf_send_els_rjt_by_did(lport, xchg, v_sid, &rjt_info); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) has no RPort and send PLOGI reject", + lport->port_id); + + /* NOTE: exchange has been freed inner(before) */ + return UNF_RETURN_ERROR; + } + + /* 5. Cancel recovery timer work after RCVD PLOGI */ + if (cancel_delayed_work(&rport->recovery_work)) + atomic_dec(&rport->rport_ref_cnt); + + /* + * 6. Plogi parameters check + * Call by: (RCVD) PLOGI handler & callback function for RCVD PLOGI_ACC + */ + ret = unf_check_plogi_params(lport, rport, login_parms); + if (ret != RETURN_OK) { + unf_cm_free_xchg(lport, xchg); + return UNF_RETURN_ERROR; + } + + xchg->lport = v_lport; + xchg->rport = rport; + xchg->sid = v_sid; + + /* 7. About bbscn for context change */ + bbscn_enabled = unf_check_bbscn_is_enabled( + (unsigned char)lport->low_level_func.lport_cfg_items.bb_scn, + (unsigned char)UNF_GET_BB_SC_N_FROM_PARAMS(login_parms)); + if ((lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT) && + (bbscn_enabled == UNF_TRUE)) { + switch_2_thread = UNF_TRUE; + lport->b_bbscn_support = UNF_TRUE; + } + + /* 8. Process PLOGI Frame: switch to thread if necessary */ + if ((switch_2_thread == UNF_TRUE) && (lport->root_lport == lport)) + /* Wait for LR complete sync */ + ret = unf_irq_process_switch_2_thread(lport, xchg, + unf_plogi_async_handle); + else + ret = unf_plogi_handler_com_process(xchg); + + return ret; +} + +static void unf_obtain_tape_capacity(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int tape_parm) +{ + unsigned int rec_support = 0; + unsigned int task_retry_support = 0; + unsigned int retry_support = 0; + + rec_support = tape_parm & UNF_FC4_FRAME_PARM_3_REC_SUPPORT; + task_retry_support = tape_parm & + UNF_FC4_FRAME_PARM_3_TASK_RETRY_ID_SUPPORT; + retry_support = tape_parm & UNF_FC4_FRAME_PARM_3_RETRY_SUPPORT; + + if ((v_lport->low_level_func.lport_cfg_items.tape_support) && + rec_support && task_retry_support && retry_support) { + v_rport->tape_support_needed = UNF_TRUE; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x_0x%x) FC_tape is needed for RPort(0x%x)", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id); + } + + if ((tape_parm & UNF_FC4_FRAME_PARM_3_CONF_ALLOW) && + (v_lport->low_level_func.lport_cfg_items.fcp_conf != UNF_FALSE)) { + v_rport->fcp_conf_needed = UNF_TRUE; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x_0x%x) FCP confirm is needed for RPort(0x%x)", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id); + } +} + +unsigned int unf_prli_handler_com_process(struct unf_xchg_s *v_xchg) +{ + struct unf_prli_prlo_s *prli = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned long flags = 0; + unsigned int uisid = 0; + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_xchg_s *xchg = NULL; + + xchg = v_xchg; + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, xchg, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, xchg->lport, + return UNF_RETURN_ERROR); + lport = xchg->lport; + uisid = v_xchg->sid; + + UNF_SERVICE_COLLECT(lport->link_service_info, UNF_SERVICE_ITEM_PRLI); + + /* 1. Get R_Port: for each R_Port from rport_busy_list */ + rport = unf_get_rport_by_nport_id(lport, uisid); + if (!rport) { + /* non session (R_Port) existence */ + (void)unf_send_logo_by_did(lport, uisid); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) received PRLI but no RPort SID(0x%x) OX_ID(0x%x)", + lport->port_id, lport->nport_id, uisid, + v_xchg->ox_id); + + unf_cm_free_xchg(lport, v_xchg); + return ret; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]LOGIN: Receive PRLI. Port(0x%x)<---RPort(0x%x) with S_ID(0x%x)", + lport->port_id, rport->nport_id, uisid); + + /* 2. Get PRLI info */ + prli = &v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->prli; + if ((uisid < UNF_FC_FID_DOM_MGR) || + (lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]LOGIN: Receive PRLI. Port(0x%x_0x%x)<---RPort(0x%x) parameter-3(0x%x) OX_ID(0x%x)", + lport->port_id, lport->nport_id, uisid, + prli->payload.parms[3], v_xchg->ox_id); + } + + UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, + &prli->payload, sizeof(struct unf_pril_payload_s)); + + spin_lock_irqsave(&rport->rport_state_lock, flags); + + /* 3. Increase R_Port ref_cnt */ + ret = unf_rport_ref_inc(rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%x_0x%p) is removing and do nothing", + lport->port_id, rport->nport_id, rport); + + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + unf_cm_free_xchg(lport, v_xchg); + return RETURN_OK; + } + + /* 4. Cancel R_Port Open work */ + if (cancel_delayed_work(&rport->open_work)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x_0x%x) RPort(0x%x) cancel open work succeed", + lport->port_id, lport->nport_id, rport->nport_id); + + /* This is not the last counter */ + atomic_dec(&rport->rport_ref_cnt); + } + + /* 5. Check R_Port state */ + if ((rport->rp_state != UNF_RPORT_ST_PRLI_WAIT) && + (rport->rp_state != UNF_RPORT_ST_READY)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) RPort(0x%x) with state(0x%x) when received PRLI, send LOGO", + lport->port_id, lport->nport_id, + rport->nport_id, rport->rp_state); + + unf_rport_state_ma(rport, UNF_EVENT_RPORT_LOGO); // LOGO + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + /* NOTE: Start to send LOGO */ + unf_rport_enter_logo(lport, rport); + + unf_cm_free_xchg(lport, v_xchg); + unf_rport_ref_dec(rport); + + return ret; + } + + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + /* 6. Update R_Port options(INI/TGT/BOTH) */ + rport->options = prli->payload.parms[3] & + (UNF_FC4_FRAME_PARM_3_TGT | + UNF_FC4_FRAME_PARM_3_INI); + + unf_update_port_feature(rport->port_name, rport->options); + + /* for Confirm */ + rport->fcp_conf_needed = UNF_FALSE; + + unf_obtain_tape_capacity(lport, rport, prli->payload.parms[3]); + + if ((prli->payload.parms[3] & UNF_FC4_FRAME_PARM_3_CONF_ALLOW) && + (lport->low_level_func.lport_cfg_items.fcp_conf != UNF_FALSE)) { + rport->fcp_conf_needed = UNF_TRUE; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x_0x%x) FCP confirm is needed for RPort(0x%x)", + lport->port_id, lport->nport_id, rport->nport_id); + } + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x_0x%x) RPort(0x%x) parameter-3(0x%x) options(0x%x)", + lport->port_id, lport->nport_id, rport->nport_id, + prli->payload.parms[3], rport->options); + + /* 7. Send PRLI ACC */ + ret = unf_send_prli_acc(lport, rport, v_xchg); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) RPort(0x%x) send PRLI ACC failed", + lport->port_id, lport->nport_id, rport->nport_id); + + /* NOTE: exchange has been freed inner(before) */ + unf_rport_error_recovery(rport); + } + + /* 8. Decrease R_Port ref_cnt */ + unf_rport_ref_dec(rport); + + return ret; +} + +static int unf_prli_async_handle(void *v_argc_in, void *v_argc_out) +{ + struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_argc_in; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x2267, UNF_TRUE, xchg, return UNF_RETURN_ERROR); + + ret = unf_prli_handler_com_process(xchg); + + unf_xchg_ref_dec(xchg, SFS_RESPONSE); + + return (int)ret; +} + +static unsigned int unf_prli_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg) +{ + unsigned int ret = UNF_RETURN_ERROR; + int switch_2_thread = UNF_FALSE; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x3476, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3477, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + v_xchg->sid = v_sid; + v_xchg->lport = v_lport; + lport = v_lport; + + if ((v_lport->b_bbscn_support) && + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT)) + switch_2_thread = UNF_TRUE; + + if ((switch_2_thread == UNF_TRUE) && (lport->root_lport == lport)) + /* Wait for LR done sync */ + ret = unf_irq_process_switch_2_thread(v_lport, v_xchg, + unf_prli_async_handle); + else + ret = unf_prli_handler_com_process(v_xchg); + + return ret; +} + +static void unf_save_rscn_port_id( + struct unf_rscn_mg_s *v_rscn_mg, + struct unf_rscn_port_id_page_s *v_rscn_port_id) +{ + struct unf_port_id_page_s *exit_port_id_page = NULL; + struct unf_port_id_page_s *new_port_id_page = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flag = 0; + enum int_e repeat = UNF_FALSE; + + UNF_CHECK_VALID(0x3478, UNF_TRUE, v_rscn_mg, return); + UNF_CHECK_VALID(0x3479, UNF_TRUE, v_rscn_port_id, return); + + /* 1. check new RSCN Port_ID (RSNC_Page) + * whether within RSCN_Mgr or not + */ + spin_lock_irqsave(&v_rscn_mg->rscn_id_list_lock, flag); + if (list_empty(&v_rscn_mg->list_using_rscn_page)) { + repeat = UNF_FALSE; + } else { + /* Check repeat: for each exist RSCN page + * form RSCN_Mgr Page list + */ + list_for_each_safe(node, next_node, + &v_rscn_mg->list_using_rscn_page) { + exit_port_id_page = + list_entry(node, struct unf_port_id_page_s, + list_node_rscn); + if ((exit_port_id_page->port_id_port == + v_rscn_port_id->port_id_port) && + (exit_port_id_page->port_id_area == + v_rscn_port_id->port_id_area) && + (exit_port_id_page->port_id_domain == + v_rscn_port_id->port_id_domain)) { + repeat = UNF_TRUE; + break; + } + } + } + spin_unlock_irqrestore(&v_rscn_mg->rscn_id_list_lock, flag); + + UNF_CHECK_VALID(0x3480, UNF_TRUE, v_rscn_mg->pfn_unf_get_free_rscn_node, + return); + + /* 2. Get & add free RSNC Node --->>> RSCN_Mgr */ + if (repeat == UNF_FALSE) { + new_port_id_page = + v_rscn_mg->pfn_unf_get_free_rscn_node(v_rscn_mg); + if (!new_port_id_page) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Get free RSCN node failed"); + + return; + } + + new_port_id_page->uc_addr_format = v_rscn_port_id->addr_format; + new_port_id_page->uc_event_qualifier = + v_rscn_port_id->event_qualifier; + new_port_id_page->uc_reserved = v_rscn_port_id->reserved; + new_port_id_page->port_id_domain = + v_rscn_port_id->port_id_domain; + new_port_id_page->port_id_area = v_rscn_port_id->port_id_area; + new_port_id_page->port_id_port = v_rscn_port_id->port_id_port; + + /* Add entry to list: using_rscn_page */ + spin_lock_irqsave(&v_rscn_mg->rscn_id_list_lock, flag); + list_add_tail(&new_port_id_page->list_node_rscn, + &v_rscn_mg->list_using_rscn_page); + spin_unlock_irqrestore(&v_rscn_mg->rscn_id_list_lock, flag); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) has repeat RSCN node with domain(0x%x) area(0x%x)", + v_rscn_port_id->port_id_domain, + v_rscn_port_id->port_id_area, + v_rscn_port_id->port_id_port); + } +} + +static unsigned int unf_analysis_rscn_payload(struct unf_lport_s *v_lport, + struct unf_rscn_pld_s *v_rscn_pld) +{ +#define UNF_OS_DISC_REDISC_TIME 10000 + + struct unf_rscn_port_id_page_s *rscn_port_id = NULL; + struct unf_disc_s *disc = NULL; + struct unf_rscn_mg_s *rscn_mgr = NULL; + unsigned int i = 0; + unsigned int pld_len = 0; + unsigned int port_id_page_cnt = 0; + unsigned int ret = RETURN_OK; + unsigned long flag = 0; + enum int_e need_disc_flag = UNF_FALSE; + + UNF_CHECK_VALID(0x3481, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3482, UNF_TRUE, v_rscn_pld, return UNF_RETURN_ERROR); + + /* This field is the length in bytes of the entire Payload, + * inclusive of the word 0 + */ + pld_len = UNF_GET_RSCN_PLD_LEN(v_rscn_pld->cmnd); + pld_len -= sizeof(v_rscn_pld->cmnd); + port_id_page_cnt = pld_len / UNF_RSCN_PAGE_LEN; + + /* Pages within payload is nor more than 255 */ + if (port_id_page_cnt > UNF_RSCN_PAGE_SUM) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x_0x%x) page num(0x%x) exceed 255 in RSCN", + v_lport->port_id, v_lport->nport_id, + port_id_page_cnt); + + return UNF_RETURN_ERROR; + } + + /* L_Port-->Disc-->Rscn_Mgr */ + disc = &v_lport->disc; + rscn_mgr = &disc->rscn_mgr; + + /* for each ID from RSCN_Page: check whether need to Disc or not */ + while (i < port_id_page_cnt) { + rscn_port_id = &v_rscn_pld->port_id_page[i]; + if (unf_lookup_lport_by_nport_id(v_lport, *(unsigned int *)rscn_port_id)) { + /* Prevent to create session with L_Port which have the same N_Port_ID */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_INFO, + "[info]Port(0x%x) find local N_Port_ID(0x%x) within RSCN payload", + ((struct unf_lport_s *) + (v_lport->root_lport))->nport_id, + *(unsigned int *)rscn_port_id); + } else { + /* New RSCN_Page ID find, save it to RSCN_Mgr */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_INFO, + "[info]Port(0x%x_0x%x) save RSCN N_Port_ID(0x%x)", + v_lport->port_id, v_lport->nport_id, + *(unsigned int *)rscn_port_id); + + /* 1. new RSCN_Page ID find, save it to RSCN_Mgr */ + unf_save_rscn_port_id(rscn_mgr, rscn_port_id); + need_disc_flag = UNF_TRUE; + unf_report_io_dm_event(v_lport, ELS_RSCN, + *(unsigned int *)rscn_port_id); + } + i++; + } + + if (need_disc_flag != UNF_TRUE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR, + "[info]Port(0x%x) find all N_Port_ID and do not need to disc", + ((struct unf_lport_s *)(v_lport->root_lport))->nport_id); + + return RETURN_OK; + } + + /* 2. Do/Start Disc: Check & do Disc (GID_PT) process */ + if (!disc->unf_disc_temp.pfn_unf_disc_start) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) DISC start function is NULL", + v_lport->nport_id, v_lport->nport_id); + + return UNF_RETURN_ERROR; + } + + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + if ((disc->en_states == UNF_DISC_ST_END) || + ((jiffies - disc->last_disc_jiff) > + msecs_to_jiffies(UNF_OS_DISC_REDISC_TIME))) { + disc->disc_option = UNF_RSCN_DISC; + disc->last_disc_jiff = jiffies; + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + ret = disc->unf_disc_temp.pfn_unf_disc_start(v_lport); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_ABNORMAL, UNF_INFO, + "[info]Port(0x%x_0x%x) DISC state(0x%x) with last time(%llu) and don't do DISC", + v_lport->port_id, v_lport->nport_id, + disc->en_states, disc->last_disc_jiff); + + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + } + + return ret; +} + +static unsigned int unf_rscn_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg) +{ + /* + * A RSCN ELS shall be sent to registered Nx_Ports + * when an event occurs that may have affected the state of + * one or more Nx_Ports, or the ULP state within the Nx_Port. + * + * The Payload of a RSCN Request includes a list + * containing the addresses of the affected Nx_Ports. + * + * Each affected Port_ID page contains the ID of the Nx_Port, + * Fabric Controller, E_Port, domain, or area for + * which the event was detected. + */ + struct unf_rscn_pld_s *rscn_pld = NULL; + struct unf_rport_s *rport = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int pld_len = 0; + + UNF_REFERNCE_VAR(pld_len); + UNF_CHECK_VALID(0x3483, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3484, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Receive RSCN Port(0x%x_0x%x)<---RPort(0x%x) OX_ID(0x%x)", + v_lport->port_id, v_lport->nport_id, v_sid, + v_xchg->ox_id); + + UNF_SERVICE_COLLECT(v_lport->link_service_info, + UNF_SERVICE_ITEM_RSCN); + + /* 1. Get R_Port by S_ID */ + rport = unf_get_rport_by_nport_id(v_lport, v_sid); // rport busy_list + if (!rport) { + rport = unf_rport_get_free_and_init(v_lport, + UNF_PORT_TYPE_FC, v_sid); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x_0x%x) received RSCN but has no RPort(0x%x) with OX_ID(0x%x)", + v_lport->port_id, v_lport->nport_id, + v_sid, v_xchg->ox_id); + + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + rport->nport_id = v_sid; + } + + rscn_pld = + v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->rscn.rscn_pld; + UNF_CHECK_VALID(0x3485, UNF_TRUE, NULL != rscn_pld, + return UNF_RETURN_ERROR); + pld_len = UNF_GET_RSCN_PLD_LEN(rscn_pld->cmnd); + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, rscn_pld, pld_len); + + /* 2. NOTE: Analysis RSCN payload(save & disc if necessary) */ + ret = unf_analysis_rscn_payload(v_lport, rscn_pld); + if (ret != RETURN_OK) + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) analysis RSCN failed", + v_lport->port_id, v_lport->nport_id); + + /* 3. send rscn_acc after analysis payload */ + ret = unf_send_rscn_acc(v_lport, rport, v_xchg); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) send RSCN response failed", + v_lport->port_id, v_lport->nport_id); + + return UNF_RETURN_ERROR; + } + + UNF_REFERNCE_VAR(pld_len); + return ret; +} + +static void unf_analysis_pdisc_pld(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_plogi_pdisc_s *v_pdisc) +{ + struct unf_lgn_parms_s *pdisc_params = NULL; + unsigned long long wwpn = INVALID_VALUE64; + unsigned long long wwnn = INVALID_VALUE64; + + UNF_CHECK_VALID(0x3486, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3487, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3488, UNF_TRUE, v_pdisc, return); + UNF_REFERNCE_VAR(v_lport); + + pdisc_params = &v_pdisc->payload.parms; + if (pdisc_params->co_parms.bb_receive_data_field_size > + UNF_MAX_FRAME_SIZE) + v_rport->max_frame_size = UNF_MAX_FRAME_SIZE; // 2112 + else + v_rport->max_frame_size = + pdisc_params->co_parms.bb_receive_data_field_size; + + wwnn = (unsigned long long) + (((unsigned long long) + (pdisc_params->high_node_name) << 32) | + ((unsigned long long)pdisc_params->low_node_name)); + wwpn = (unsigned long long) + (((unsigned long long)(pdisc_params->high_port_name) << 32) | + ((unsigned long long)pdisc_params->low_port_name)); + + v_rport->port_name = wwpn; + v_rport->node_name = wwnn; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) save PDISC parameters to Rport(0x%x) WWPN(0x%llx) WWNN(0x%llx)", + v_lport->port_id, v_rport->nport_id, + v_rport->port_name, v_rport->node_name); +} + +static unsigned int unf_send_pdisc_rjt(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct unf_rjt_info_s rjt_info; + + UNF_CHECK_VALID(0x3432, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3433, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3434, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + memset(&rjt_info, 0, sizeof(struct unf_rjt_info_s)); + rjt_info.els_cmnd_code = ELS_PDISC; + rjt_info.reason_code = UNF_LS_RJT_LOGICAL_ERROR; + rjt_info.reason_explanation = UNF_LS_RJT_NO_ADDITIONAL_INFO; + + ret = unf_send_els_rjt_by_rport(v_lport, v_xchg, v_rport, &rjt_info); + + return ret; +} + +static unsigned int unf_pdisc_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg) +{ + struct unf_plogi_pdisc_s *pdisc = NULL; + struct unf_rport_s *rport = NULL; + unsigned long flags = 0; + unsigned int ret = RETURN_OK; + unsigned long long wwpn = 0; + + UNF_CHECK_VALID(0x3489, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3490, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Receive PDISC. Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", + v_lport->port_id, v_sid, v_xchg->ox_id); + + UNF_SERVICE_COLLECT(v_lport->link_service_info, UNF_SERVICE_ITEM_PDISC); + pdisc = &v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->pdisc; + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, + &pdisc->payload, + sizeof(struct unf_plogi_payload_s)); + wwpn = (unsigned long long) + (((unsigned long long) + (pdisc->payload.parms.high_port_name) << 32) | + ((unsigned long long)pdisc->payload.parms.low_port_name)); + + rport = unf_find_rport(v_lport, v_sid, wwpn); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) can't find RPort by NPort ID(0x%x). Free exchange and send LOGO", + v_lport->port_id, v_sid); + + unf_cm_free_xchg(v_lport, v_xchg); + (void)unf_send_logo_by_did(v_lport, v_sid); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MINOR, + "[info]Port(0x%x) get exist RPort(0x%x) when receive PDISC with S_Id(0x%x)", + v_lport->port_id, rport->nport_id, v_sid); + + if (v_sid >= UNF_FC_FID_DOM_MGR) + return unf_send_pdisc_rjt(v_lport, rport, v_xchg); + + unf_analysis_pdisc_pld(v_lport, rport, pdisc); + + /* State: READY */ + spin_lock_irqsave(&rport->rport_state_lock, flags); + if (rport->rp_state == UNF_RPORT_ST_READY) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x) find RPort(0x%x) state is READY when receiving PDISC", + v_lport->port_id, v_sid); + + spin_unlock_irqrestore(&rport->rport_state_lock, + flags); + + ret = unf_send_pdisc_acc(v_lport, rport, v_xchg); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) handle PDISC failed", + v_lport->port_id); + + return ret; + } + + /* Report Down/Up event to scsi */ + unf_update_lport_state_by_linkup_event(v_lport, rport, + rport->options); + } + /* State: Closing */ + else if ((rport->rp_state == UNF_RPORT_ST_CLOSING) && + (rport->session)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) find RPort(0x%x) state is 0x%x when receiving PDISC", + v_lport->port_id, v_sid, rport->rp_state); + + spin_unlock_irqrestore(&rport->rport_state_lock, + flags); + + unf_cm_free_xchg(v_lport, v_xchg); + (void)unf_send_logo_by_did(v_lport, v_sid); + } + /* State: PRLI_WAIT */ + else if (rport->rp_state == UNF_RPORT_ST_PRLI_WAIT) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x) find RPort(0x%x) state is 0x%x when receiving PDISC", + v_lport->port_id, v_sid, rport->rp_state); + + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + ret = unf_send_pdisc_acc(v_lport, rport, v_xchg); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) handle PDISC failed", + v_lport->port_id); + + return ret; + } + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) find RPort(0x%x) state is 0x%x when receiving PDISC, send LOGO", + v_lport->port_id, v_sid, rport->rp_state); + + unf_rport_state_ma(rport, UNF_EVENT_RPORT_LOGO); + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + unf_rport_enter_logo(v_lport, rport); + unf_cm_free_xchg(v_lport, v_xchg); + } + } + + return ret; +} + +static void unf_analysis_adisc_pld(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_adisc_payload_s *v_adisc_pld) +{ + unsigned long long wwpn = INVALID_VALUE64; + unsigned long long wwnn = INVALID_VALUE64; + + UNF_CHECK_VALID(0x3491, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3492, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3493, UNF_TRUE, v_adisc_pld, return); + UNF_REFERNCE_VAR(v_lport); + + wwnn = (unsigned long long) + (((unsigned long long)(v_adisc_pld->high_node_name) << 32) | + ((unsigned long long)v_adisc_pld->low_node_name)); + wwpn = (unsigned long long) + (((unsigned long long)(v_adisc_pld->high_port_name) << 32) | + ((unsigned long long)v_adisc_pld->low_port_name)); + + v_rport->port_name = wwpn; + v_rport->node_name = wwnn; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) save ADISC parameters to RPort(0x%x), WWPN(0x%llx) WWNN(0x%llx) NPort ID(0x%x)", + v_lport->port_id, v_rport->nport_id, + v_rport->port_name, v_rport->node_name, + v_adisc_pld->nport_id); +} + +static unsigned int unf_adisc_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg) +{ + struct unf_rport_s *rport = NULL; + struct unf_adisc_payload_s *adisc_pld = NULL; + unsigned long flags = 0; + unsigned long long wwpn = 0; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3494, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3495, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Receive ADISC. Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", + v_lport->port_id, v_sid, v_xchg->ox_id); + + UNF_SERVICE_COLLECT(v_lport->link_service_info, + UNF_SERVICE_ITEM_ADISC); + adisc_pld = &v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->adisc.adisc_payl; + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, adisc_pld, + sizeof(struct unf_adisc_payload_s)); + wwpn = (unsigned long long) + (((unsigned long long)(adisc_pld->high_port_name) << 32) | + ((unsigned long long)adisc_pld->low_port_name)); + + rport = unf_find_rport(v_lport, v_sid, wwpn); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) can't find RPort by NPort ID(0x%x). Free exchange and send LOGO", + v_lport->port_id, v_sid); + + unf_cm_free_xchg(v_lport, v_xchg); + (void)unf_send_logo_by_did(v_lport, v_sid); + + return ret; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MINOR, + "[info]Port(0x%x) get exist RPort(0x%x) when receive ADISC with S_ID(0x%x)", + v_lport->port_id, rport->nport_id, v_sid); + + unf_analysis_adisc_pld(v_lport, rport, adisc_pld); + + /* State: READY */ + spin_lock_irqsave(&rport->rport_state_lock, flags); + if (rport->rp_state == UNF_RPORT_ST_READY) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x) find RPort(0x%x) state is READY when receiving ADISC", + v_lport->port_id, v_sid); + + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + /* Return ACC directly */ + ret = unf_send_adisc_acc(v_lport, rport, v_xchg); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) send ADISC ACC failed", + v_lport->port_id); + + return ret; + } + + /* Report Down/Up event to SCSI */ + unf_update_lport_state_by_linkup_event(v_lport, rport, + rport->options); + } + /* State: Closing */ + else if ((rport->rp_state == UNF_RPORT_ST_CLOSING) && + (rport->session)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) find RPort(0x%x) state is 0x%x when receiving ADISC", + v_lport->port_id, v_sid, rport->rp_state); + + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + rport = unf_get_safe_rport(v_lport, rport, + UNF_RPORT_REUSE_RECOVER, + rport->nport_id); + if (rport) { + spin_lock_irqsave(&rport->rport_state_lock, flags); + rport->nport_id = v_sid; + spin_unlock_irqrestore(&rport->rport_state_lock, + flags); + + ret = unf_send_adisc_acc(v_lport, rport, v_xchg); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) send ADISC ACC failed", + v_lport->port_id); + + return ret; + } + + unf_update_lport_state_by_linkup_event(v_lport, rport, + rport->options); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) can't find RPort by NPort_ID(0x%x). Free exchange and send LOGO", + v_lport->port_id, v_sid); + + unf_cm_free_xchg(v_lport, v_xchg); + (void)unf_send_logo_by_did(v_lport, v_sid); + } + } + /* State: PRLI_WAIT */ + else if (rport->rp_state == UNF_RPORT_ST_PRLI_WAIT) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) find RPort(0x%x) state is 0x%x when receiving ADISC", + v_lport->port_id, v_sid, rport->rp_state); + + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + ret = unf_send_adisc_acc(v_lport, rport, v_xchg); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) send ADISC ACC failed", + v_lport->port_id); + + return ret; + } + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) find RPort(0x%x) state is 0x%x when receiving ADISC, send LOGO", + v_lport->port_id, v_sid, rport->rp_state); + + unf_rport_state_ma(rport, UNF_EVENT_RPORT_LOGO); + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + unf_rport_enter_logo(v_lport, rport); + unf_cm_free_xchg(v_lport, v_xchg); + } + + return ret; +} + +static unsigned int unf_rec_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg) +{ + struct unf_rport_s *rport = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3496, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3497, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + UNF_REFERNCE_VAR(v_sid); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Port(0x%x) receive REC", v_lport->port_id); + + /* Send rec acc */ + ret = unf_send_rec_acc(v_lport, rport, v_xchg); // discard directly + + return ret; +} + +static unsigned int unf_rrq_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg) +{ + struct unf_rport_s *rport = NULL; + struct unf_rrq_s *rrq = NULL; + struct unf_xchg_s *xchg_reused = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + unsigned int sid = 0; + unsigned long flags = 0; + struct unf_rjt_info_s rjt_info = { 0 }; + struct unf_xchg_hot_pool_s *hot_pool = NULL; + + UNF_CHECK_VALID(0x3498, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3499, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + UNF_REFERNCE_VAR(rx_id); + + UNF_SERVICE_COLLECT(v_lport->link_service_info, UNF_SERVICE_ITEM_RRQ); + rrq = &v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->rrq; + ox_id = (unsigned short)(rrq->oxid_rxid >> 16); + rx_id = (unsigned short)(rrq->oxid_rxid); + sid = rrq->sid & UNF_NPORTID_MASK; + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_KEVENT, + "[warn]Receive RRQ. Port(0x%x)<---RPort(0x%x) sfsXchg(0x%p) OX_ID(0x%x,0x%x) RX_ID(0x%x)", + v_lport->port_id, v_sid, v_xchg, + ox_id, v_xchg->ox_id, rx_id); + + /* Get R_Port */ + rport = unf_get_rport_by_nport_id(v_lport, v_sid); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) receive RRQ but has no RPort(0x%x)", + v_lport->port_id, v_sid); + + /* NOTE: send LOGO */ + ret = unf_send_logo_by_did(v_lport, sid); + + unf_cm_free_xchg(v_lport, v_xchg); + return ret; + } + + /* Get Target (Abort I/O) exchange context */ + /* UNF_FindXchgByOxId */ + xchg_reused = unf_cm_lookup_xchg_by_id(v_lport, ox_id, sid); + if (!xchg_reused) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) cannot find exchange with OX_ID(0x%x) RX_ID(0x%x) S_ID(0x%x)", + v_lport->port_id, ox_id, rx_id, sid); + + rjt_info.els_cmnd_code = ELS_RRQ; + rjt_info.reason_code = FCXLS_BA_RJT_LOGICAL_ERROR | + FCXLS_LS_RJT_INVALID_OXID_RXID; + + /* NOTE: send ELS RJT */ + if (unf_send_els_rjt_by_rport(v_lport, v_xchg, + rport, &rjt_info) != + RETURN_OK) { + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + return RETURN_OK; + } + + hot_pool = xchg_reused->hot_pool; + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "Port(0x%x) OxId(0x%x) Rxid(0x%x) Sid(0x%x) Hot Pool is NULL.", + v_lport->port_id, ox_id, rx_id, sid); + + return ret; + } + + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags); + xchg_reused->ox_id = INVALID_VALUE16; + xchg_reused->rx_id = INVALID_VALUE16; + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags); + + /* NOTE: release I/O exchange context */ + unf_xchg_ref_dec(xchg_reused, SFS_RESPONSE); + + /* Send RRQ ACC */ + ret = unf_send_rrq_acc(v_lport, rport, v_xchg); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) can not send RRQ rsp. Xchg(0x%p) Ioxchg(0x%p) OX_RX_ID(0x%x 0x%x) S_ID(0x%x)", + v_lport->port_id, v_xchg, + xchg_reused, ox_id, rx_id, sid); + + unf_cm_free_xchg(v_lport, v_xchg); + return ret; + } + + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +static unsigned int unf_send_els_rjt_by_rport(struct unf_lport_s *v_lport, + struct unf_xchg_s *v_xchg, + struct unf_rport_s *v_rport, + struct unf_rjt_info_s *v_rjt_info) +{ + struct unf_els_rjt_s *els_rjt = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = v_xchg; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + + UNF_CHECK_VALID(0x3500, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3501, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3502, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + + xchg->cmnd_code = UNF_SET_ELS_RJT_TYPE(v_rjt_info->els_cmnd_code); + xchg->did = v_rport->nport_id; + xchg->sid = v_lport->nport_id; + xchg->oid = xchg->sid; + xchg->lport = v_lport; + xchg->rport = v_rport; + xchg->disc_rport = NULL; + + xchg->pfn_callback = NULL; + xchg->pfn_ob_callback = NULL; + + unf_fill_package(&pkg, xchg, v_rport); + + fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, xchg); + return UNF_RETURN_ERROR; + } + + els_rjt = &fc_entry->els_rjt; + memset(els_rjt, 0, sizeof(struct unf_els_rjt_s)); + unf_fill_rjt_pld(els_rjt, v_rjt_info->reason_code, + v_rjt_info->reason_explanation); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Send LS_RJT for 0x%x %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", + v_rjt_info->els_cmnd_code, + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id, rx_id); + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + + return ret; +} + +static unsigned int unf_els_cmnd_default_handler(struct unf_lport_s *v_lport, + struct unf_xchg_s *v_xchg, + unsigned int v_sid, + unsigned int v_els_cmnd_code) +{ +#define ELS_LCB 0X81 +#define ELS_RDP 0X18 + + struct unf_rport_s *rport = NULL; + struct unf_rjt_info_s rjt_info = { 0 }; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3505, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3506, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + if ((v_els_cmnd_code != ELS_LCB) && (v_els_cmnd_code != ELS_RDP)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_ABNORMAL, UNF_KEVENT, + "[info]Receive Unknown ELS command(0x%x). Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", + v_els_cmnd_code, v_lport->port_id, v_sid, + v_xchg->ox_id); + } + + memset(&rjt_info, 0, sizeof(struct unf_rjt_info_s)); + rjt_info.els_cmnd_code = v_els_cmnd_code; + rjt_info.reason_code = UNF_LS_RJT_NOT_SUPPORTED; + + rport = unf_get_rport_by_nport_id(v_lport, v_sid); + if (rport) + ret = unf_send_els_rjt_by_rport(v_lport, v_xchg, rport, + &rjt_info); + else + ret = unf_send_els_rjt_by_did(v_lport, v_xchg, v_sid, + &rjt_info); + + return ret; +} + +static struct unf_xchg_s *unf_alloc_xchg_for_rcv_cmnd( + struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_pkg) +{ + struct unf_xchg_s *xchg = NULL; + unsigned long flags = 0; + unsigned int i = 0; + unsigned int offset = 0; + unsigned char *cmnd_pld = NULL; + unsigned int first_dword = 0; + unsigned int alloc_time = 0; + + UNF_CHECK_VALID(0x3508, UNF_TRUE, v_lport, return NULL); + UNF_CHECK_VALID(0x3509, UNF_TRUE, v_pkg, return NULL); + + if (!v_pkg->xchg_contex) { + xchg = unf_cm_get_free_xchg(v_lport, UNF_XCHG_TYPE_SFS); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[warn]Port(0x%x) get new exchange failed", + v_lport->port_id); + + return NULL; + } + + offset = (xchg->fcp_sfs_union.sfs_entry.cur_offset); + cmnd_pld = (unsigned char *)xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->rscn.rscn_pld; + first_dword = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->sfs_common.frame_head.rctl_did; + + if ((cmnd_pld) || (first_dword != 0) || (offset != 0)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) exchange(0x%p) abnormal, maybe data overrun, start(%llu) command(0x%x)", + v_lport->port_id, xchg, + xchg->alloc_jif, v_pkg->cmnd); + + UNF_PRINT_SFS(UNF_INFO, v_lport->port_id, + xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr, + sizeof(union unf_sfs_u)); + } + + memset(xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr, 0, + sizeof(union unf_sfs_u)); + + v_pkg->xchg_contex = (void *)xchg; + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + xchg->fcp_sfs_union.sfs_entry.cur_offset = 0; + alloc_time = xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]; + for (i = 0; i < PKG_MAX_PRIVATE_DATA_SIZE; i++) + xchg->private[i] = v_pkg->private[i]; + + xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] = alloc_time; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + } else { + xchg = (struct unf_xchg_s *)v_pkg->xchg_contex; + } + + if (!xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) { + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + return NULL; + } + + return xchg; +} + +static unsigned char *unf_calc_big_cmnd_pld_buffer(struct unf_xchg_s *v_xchg, + unsigned int v_cmnd_code) +{ + unsigned char *cmnd_pld = NULL; + void *buf = NULL; + unsigned char *dest = NULL; + + UNF_CHECK_VALID(0x3510, UNF_TRUE, v_xchg, return NULL); + + if (v_cmnd_code == ELS_RSCN) + cmnd_pld = (unsigned char *)v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->rscn.rscn_pld; + else + cmnd_pld = (unsigned char *)v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo.echo_pld; + + if (!cmnd_pld) { + buf = unf_get_one_big_sfs_buf(v_xchg); + if (!buf) + return NULL; + + if (v_cmnd_code == ELS_RSCN) { + memset(buf, 0, sizeof(struct unf_rscn_pld_s)); + v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->rscn.rscn_pld = buf; + } else { + memset(buf, 0, sizeof(struct unf_echo_payload_s)); + v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo.echo_pld = buf; + } + + dest = (unsigned char *)buf; + } else { + dest = (unsigned char *) + (cmnd_pld + v_xchg->fcp_sfs_union.sfs_entry.cur_offset); + } + + return dest; +} + +static unsigned char *unf_calc_other_pld_buffer(struct unf_xchg_s *v_xchg) +{ + unsigned char *dest = NULL; + unsigned int offset = 0; + + UNF_CHECK_VALID(0x3511, UNF_TRUE, v_xchg, return NULL); + + offset = (sizeof(struct unf_fchead_s)) + + (v_xchg->fcp_sfs_union.sfs_entry.cur_offset); + dest = (unsigned char *) + ((unsigned char *) + (v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) + + offset); + + return dest; +} + +static struct unf_xchg_s *unf_mv_data_2_xchg(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_pkg) +{ + struct unf_xchg_s *xchg = NULL; + unsigned char *dest = NULL; + unsigned int length = 0; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x3512, UNF_TRUE, v_lport, return NULL); + UNF_CHECK_VALID(0x3513, UNF_TRUE, v_pkg, return NULL); + + xchg = unf_alloc_xchg_for_rcv_cmnd(v_lport, v_pkg); + if (!xchg) + return NULL; + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + + memcpy(&xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->sfs_common.frame_head, + &v_pkg->frame_head, + sizeof(v_pkg->frame_head)); + + if ((v_pkg->cmnd == ELS_RSCN) || (v_pkg->cmnd == ELS_ECHO)) + dest = unf_calc_big_cmnd_pld_buffer(xchg, v_pkg->cmnd); + else + dest = unf_calc_other_pld_buffer(xchg); + + if (!dest) { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + return NULL; + } + + if (((xchg->fcp_sfs_union.sfs_entry.cur_offset + + v_pkg->unf_cmnd_pload_bl.length) > + (unsigned int)sizeof(union unf_sfs_u)) && + (v_pkg->cmnd != ELS_RSCN) && + (v_pkg->cmnd != ELS_ECHO)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) excange(0x%p) command(0x%x,0x%x) copy payload overrun(0x%x:0x%x:0x%x)", + v_lport->port_id, xchg, v_pkg->cmnd, + xchg->hot_pool_tag, + xchg->fcp_sfs_union.sfs_entry.cur_offset, + v_pkg->unf_cmnd_pload_bl.length, + (unsigned int)sizeof(union unf_sfs_u)); + + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + return NULL; + } + + length = v_pkg->unf_cmnd_pload_bl.length; + if (length > 0) + memcpy(dest, v_pkg->unf_cmnd_pload_bl.buffer_ptr, length); + + xchg->fcp_sfs_union.sfs_entry.cur_offset += length; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + return xchg; +} + +static unsigned int unf_logo_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg) +{ + struct unf_rport_s *rport = NULL; + struct unf_rport_s *logo_rport = NULL; + struct unf_logo_s *logo = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int nport_id = 0; + struct unf_rjt_info_s rjt_info = { 0 }; + + UNF_REFERNCE_VAR(logo); + UNF_CHECK_VALID(0x3514, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3515, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + UNF_SERVICE_COLLECT(v_lport->link_service_info, UNF_SERVICE_ITEM_LOGO); + logo = &v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->logo; + nport_id = logo->payload.nport_id & UNF_NPORTID_MASK; + + if (v_sid < UNF_FC_FID_DOM_MGR) { + /* R_Port is not fabric port */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_KEVENT, + "[info]LOGIN: Receive LOGO. Port(0x%x)<---RPort(0x%x) NPort_ID(0x%x) OXID(0x%x)", + v_lport->port_id, v_sid, nport_id, v_xchg->ox_id); + } + + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, &logo->payload, + sizeof(struct unf_logo_payload_s)); + + /* + * 1. S_ID unequal to NPort_ID: + * link down Rport find by NPort_ID immediately + */ + if (nport_id != v_sid) { + logo_rport = unf_get_rport_by_nport_id(v_lport, nport_id); + if (logo_rport) + unf_rport_immediate_linkdown(v_lport, logo_rport); + } + + /* 2. Get R_Port by S_ID (frame header) */ + rport = unf_get_rport_by_nport_id(v_lport, v_sid); + rport = unf_get_safe_rport(v_lport, rport, UNF_RPORT_REUSE_INIT, + v_sid); // INIT + if (!rport) { + memset(&rjt_info, 0, sizeof(struct unf_rjt_info_s)); + rjt_info.els_cmnd_code = ELS_LOGO; + rjt_info.reason_code = UNF_LS_RJT_LOGICAL_ERROR; + rjt_info.reason_explanation = UNF_LS_RJT_NO_ADDITIONAL_INFO; + ret = unf_send_els_rjt_by_did(v_lport, v_xchg, v_sid, + &rjt_info); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) receive LOGO but has no RPort(0x%x)", + v_lport->port_id, v_sid); + + return ret; + } + + /* + * 3. I/O resource release: set ABORT tag + * + * Call by: R_Port remove; RCVD LOGO; RCVD PLOGI; send PLOGI ACC + */ + unf_cm_xchg_mgr_abort_io_by_id(v_lport, rport, v_sid, v_lport->nport_id, + INI_IO_STATE_LOGO); + + /* 4. Send LOGO ACC */ + ret = unf_send_logo_acc(v_lport, rport, v_xchg); + if (ret != RETURN_OK) + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) send LOGO failed", + v_lport->port_id); + + /* + * 5. Do same operations with RCVD LOGO/PRLO & Send LOGO: + * retry (LOGIN or LOGO) or link down immediately + */ + unf_process_rport_after_logo(v_lport, rport); + + return ret; +} + +static unsigned int unf_prlo_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg) +{ + struct unf_rport_s *rport = NULL; + struct unf_prli_prlo_s *prlo = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_REFERNCE_VAR(prlo); + UNF_CHECK_VALID(0x3516, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3517, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Receive PRLO. Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", + v_lport->port_id, v_sid, v_xchg->ox_id); + + UNF_SERVICE_COLLECT(v_lport->link_service_info, UNF_SERVICE_ITEM_LOGO); + + /* Get (new) R_Port */ + rport = unf_get_rport_by_nport_id(v_lport, v_sid); + rport = unf_get_safe_rport(v_lport, rport, + UNF_RPORT_REUSE_INIT, v_sid); /* INIT */ + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) receive PRLO but has no RPort", + v_lport->port_id); + + /* Discard directly */ + unf_cm_free_xchg(v_lport, v_xchg); + return ret; + } + + prlo = &v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->prlo; + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, &prlo->payload, + sizeof(struct unf_pril_payload_s)); + + /* Send PRLO ACC to remote */ + ret = unf_send_prlo_acc(v_lport, rport, v_xchg); + if (ret != RETURN_OK) + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) send PRLO ACC failed", + v_lport->port_id); + + /* Enter Enhanced action after LOGO (retry LOGIN or LOGO) */ + unf_process_rport_after_logo(v_lport, rport); + + UNF_REFERNCE_VAR(prlo); + return ret; +} + +static void unf_fill_echo_acc_pld(struct unf_echo_s *v_echo_acc) +{ + struct unf_echo_payload_s *echo_acc_pld = NULL; + + UNF_CHECK_VALID(0x3518, UNF_TRUE, v_echo_acc, return); + + echo_acc_pld = v_echo_acc->echo_pld; + UNF_CHECK_VALID(0x3519, UNF_TRUE, echo_acc_pld, return); + + echo_acc_pld->cmnd = UNF_ELS_CMND_ACC; +} + +static void unf_echo_acc_callback(struct unf_xchg_s *v_xchg) +{ + struct unf_lport_s *lport; + + UNF_CHECK_VALID(0x3517, UNF_TRUE, v_xchg, return); + + lport = v_xchg->lport; + + UNF_CHECK_VALID(0x3517, UNF_TRUE, lport, return); + if (v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo_acc.phy_echo_addr) { + pci_unmap_single( + lport->low_level_func.dev, + v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo_acc.phy_echo_addr, + UNF_ECHO_PAYLOAD_LEN, + DMA_BIDIRECTIONAL); + v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo_acc.phy_echo_addr = 0; + } +} + +static unsigned int unf_send_echo_acc(struct unf_lport_s *v_lport, + unsigned int v_did, + struct unf_xchg_s *v_xchg) +{ + struct unf_echo_s *echo_acc = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + struct unf_frame_pkg_s pkg; + dma_addr_t phy_echo_acc_addr; + struct unf_rjt_info_s rjt_info = { 0 }; + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + + UNF_CHECK_VALID(0x3520, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3521, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + v_xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_ECHO); + v_xchg->did = v_did; + v_xchg->sid = v_lport->nport_id; + v_xchg->oid = v_xchg->sid; + v_xchg->lport = v_lport; + + v_xchg->pfn_callback = NULL; + v_xchg->pfn_ob_callback = unf_echo_acc_callback; + + unf_fill_package(&pkg, v_xchg, v_xchg->rport); + + fc_entry = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, v_xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + echo_acc = &fc_entry->echo_acc; + unf_fill_echo_acc_pld(echo_acc); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + phy_echo_acc_addr = pci_map_single(v_lport->low_level_func.dev, + echo_acc->echo_pld, + UNF_ECHO_PAYLOAD_LEN, + DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(v_lport->low_level_func.dev, + phy_echo_acc_addr)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) pci map err", + v_lport->port_id); + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + echo_acc->phy_echo_addr = phy_echo_acc_addr; + + ret = unf_els_cmnd_send(v_lport, &pkg, v_xchg); + if (ret != RETURN_OK) { + pci_unmap_single(v_lport->low_level_func.dev, + phy_echo_acc_addr, + UNF_ECHO_PAYLOAD_LEN, + DMA_BIDIRECTIONAL); + echo_acc->phy_echo_addr = 0; + if (ret == UNF_RETURN_NOT_SUPPORT) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_KEVENT, + "[info]Port(0x%x) send ECHO reject to RPort(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", + v_lport->port_id, v_did, ox_id, rx_id); + + rjt_info.els_cmnd_code = ELS_ECHO; + rjt_info.reason_code = UNF_LS_RJT_NOT_SUPPORTED; + unf_send_els_rjt_by_rport(v_lport, v_xchg, + v_xchg->rport, + &rjt_info); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) send ECHO ACC to RPort(0x%x) with OX_ID(0x%x) RX_ID(0x%x) failed", + v_lport->port_id, v_did, ox_id, rx_id); + + unf_cm_free_xchg((void *)v_lport, (void *)v_xchg); + } + } + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +static unsigned int unf_echo_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg) +{ + struct unf_echo_payload_s *echo_pld = NULL; + struct unf_rport_s *rport = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int data_len = 0; + + UNF_CHECK_VALID(0x3522, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3523, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + data_len = v_xchg->fcp_sfs_union.sfs_entry.cur_offset; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Receive ECHO. Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x))", + v_lport->port_id, v_sid, v_xchg->ox_id); + + UNF_SERVICE_COLLECT(v_lport->link_service_info, UNF_SERVICE_ITEM_ECHO); + echo_pld = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo.echo_pld; + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, echo_pld, data_len); + rport = unf_get_rport_by_nport_id(v_lport, v_sid); + v_xchg->rport = rport; + + ret = unf_send_echo_acc(v_lport, v_sid, v_xchg); + if (ret != RETURN_OK) + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) send ECHO ACC failed", + v_lport->port_id); + + UNF_REFERNCE_VAR(echo_pld); + UNF_REFERNCE_VAR(data_len); + return ret; +} + +static unsigned int unf_check_els_cmnd_valid(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_fra_pkg, + struct unf_xchg_s *v_xchg) +{ + struct unf_lport_s *lport = v_lport; + struct unf_frame_pkg_s *ppkg = v_fra_pkg; + struct unf_xchg_s *xchg = v_xchg; + struct unf_rjt_info_s rjt_info = { 0 }; + struct unf_lport_s *vport = NULL; + unsigned int sid = 0; + unsigned int did = 0; + + sid = (ppkg->frame_head.csctl_sid) & UNF_NPORTID_MASK; + did = (ppkg->frame_head.rctl_did) & UNF_NPORTID_MASK; + + memset(&rjt_info, 0, sizeof(struct unf_rjt_info_s)); + rjt_info.reason_code = UNF_LS_RJT_NOT_SUPPORTED; + + if ((ppkg->cmnd == ELS_FLOGI) && + (lport->en_act_topo == UNF_ACT_TOP_PRIVATE_LOOP)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) receive FLOGI in top (0x%x) and send LS_RJT", + lport->port_id, lport->en_act_topo); + + rjt_info.els_cmnd_code = ELS_FLOGI; + (void)unf_send_els_rjt_by_did(lport, xchg, sid, &rjt_info); + + return UNF_RETURN_ERROR; + } + + if ((ppkg->cmnd == ELS_PLOGI) && (did >= UNF_FC_FID_DOM_MGR)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x)receive PLOGI with wellknown address(0x%x) and Send LS_RJT", + lport->port_id, did); + + rjt_info.els_cmnd_code = ELS_PLOGI; + (void)unf_send_els_rjt_by_did(lport, xchg, sid, &rjt_info); + + return UNF_RETURN_ERROR; + } + + if (((lport->nport_id == 0) || + (lport->nport_id == INVALID_VALUE32)) && + (NEED_REFRESH_NPORTID(ppkg))) { + lport->nport_id = did; + } else if ((did != lport->nport_id) && (ppkg->cmnd != ELS_FLOGI)) { + vport = unf_cm_lookup_vport_by_did(lport, did); + if (!vport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) receive ELS cmd(0x%x) with abnormal D_ID(0x%x)", + lport->nport_id, ppkg->cmnd, did); + + unf_cm_free_xchg(lport, xchg); + return UNF_RETURN_ERROR; + } + } + + return RETURN_OK; +} + +static unsigned int unf_rcv_els_cmnd_req(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_fra_pkg) +{ + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int i = 0; + unsigned int sid = 0; + unsigned int did = 0; + struct unf_lport_s *vport = NULL; + unsigned int (*pfn_els_cmnd_handler)(struct unf_lport_s *, unsigned int, + struct unf_xchg_s *) = NULL; + + sid = (v_fra_pkg->frame_head.csctl_sid) & UNF_NPORTID_MASK; + did = (v_fra_pkg->frame_head.rctl_did) & UNF_NPORTID_MASK; + + xchg = unf_mv_data_2_xchg(v_lport, v_fra_pkg); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) receive ElsCmnd(0x%x), exchange is NULL", + v_lport->port_id, v_fra_pkg->cmnd); + + return UNF_RETURN_ERROR; + } + + if (v_fra_pkg->last_pkg_flag != UNF_TRUE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Exchange(%u) waiting for last WQE", + xchg->hot_pool_tag); + + return RETURN_OK; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Exchange(%u) get last WQE", xchg->hot_pool_tag); + + if (v_lport->low_level_func.xchg_mgr_type == + UNF_LOW_LEVEL_MGR_TYPE_PASSTIVE) { + xchg->ox_id = UNF_GET_OXID(v_fra_pkg); + xchg->abort_oxid = xchg->ox_id; + xchg->rx_id = xchg->hot_pool_tag; + } + xchg->cmnd_code = v_fra_pkg->cmnd; + + ret = unf_check_els_cmnd_valid(v_lport, v_fra_pkg, xchg); + if (ret != RETURN_OK) { + /* NOTE: exchange has been released */ + return UNF_RETURN_ERROR; + } + + if ((did != v_lport->nport_id) && (v_fra_pkg->cmnd != ELS_FLOGI)) { + vport = unf_cm_lookup_vport_by_did(v_lport, did); + if (!vport) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) received unknown ELS command with S_ID(0x%x) D_ID(0x%x))", + v_lport->port_id, sid, did); + + return UNF_RETURN_ERROR; + } + v_lport = vport; + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_INFO, + "[info]VPort(0x%x) received ELS command with S_ID(0x%x) D_ID(0x%x)", + v_lport->port_id, sid, did); + } + + do { + if ((v_fra_pkg->cmnd) == els_handle[i].cmnd) { + pfn_els_cmnd_handler = + els_handle[i].pfn_els_cmnd_handler; + break; + } + + i++; + } while (i < (sizeof(els_handle) / + sizeof(struct unf_els_handler_table))); + + if (pfn_els_cmnd_handler) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_INFO, + "[info]Port(0x%x) receive ELS(0x%x) from RPort(0x%x) and process it", + v_lport->port_id, v_fra_pkg->cmnd, sid); + + ret = pfn_els_cmnd_handler(v_lport, sid, xchg); + } else { + ret = unf_els_cmnd_default_handler(v_lport, xchg, sid, + v_fra_pkg->cmnd); + } + + return ret; +} + +static unsigned int unf_send_els_rsp_succ(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_fra_pkg) +{ + struct unf_xchg_s *xchg = NULL; + unsigned int ret = RETURN_OK; + unsigned short hot_pool_tag = 0; + unsigned long flags = 0; + void (*pfn_ob_callback)(struct unf_xchg_s *) = NULL; + + UNF_CHECK_VALID(0x3529, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3530, UNF_TRUE, v_fra_pkg, return UNF_RETURN_ERROR); + + if (!v_lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) lookup exchange by tag function is NULL", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + hot_pool_tag = (unsigned short) + (v_fra_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]); + xchg = (struct unf_xchg_s *) + (v_lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag( + (void *)v_lport, + hot_pool_tag)); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) find exhange by tag(0x%x) failed", + v_lport->port_id, hot_pool_tag); + + return UNF_RETURN_ERROR; + } + + v_lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)xchg); + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + if ((xchg->pfn_ob_callback) && + (!(xchg->io_state & TGT_IO_STATE_ABORT))) { + pfn_ob_callback = xchg->pfn_ob_callback; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) with exchange(0x%p) tag(%u) do callback", + v_lport->port_id, xchg, hot_pool_tag); + + pfn_ob_callback(xchg); + } else { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + } + + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + return ret; +} + +static unsigned char *unf_calc_big_resp_pld_buffer(struct unf_xchg_s *v_xchg, + unsigned int v_cmnd_code) +{ + unsigned char *resp_pld = NULL; + unsigned char *dest = NULL; + + UNF_CHECK_VALID(0x3510, UNF_TRUE, v_xchg, return NULL); + + if (v_cmnd_code == ELS_ECHO) + resp_pld = (unsigned char *) + v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo.echo_pld; + else + resp_pld = (unsigned char *) + v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->get_id.gid_rsp.gid_acc_pld; + + if (resp_pld) + dest = (unsigned char *) + (resp_pld + v_xchg->fcp_sfs_union.sfs_entry.cur_offset); + + return dest; +} + +static unsigned char *unf_calc_other_resp_pld_buffer(struct unf_xchg_s *v_xchg) +{ + unsigned char *dest = NULL; + unsigned int offset = 0; + + UNF_CHECK_VALID(0x3511, UNF_TRUE, v_xchg, return NULL); + + offset = v_xchg->fcp_sfs_union.sfs_entry.cur_offset; + dest = (unsigned char *)((unsigned char *) + (v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) + offset); + + return dest; +} + +static unsigned int unf_mv_resp_2_xchg(struct unf_xchg_s *v_xchg, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned char *dest = NULL; + unsigned int length = 0; + unsigned int offset = 0; + unsigned int max_frame_len = 0; + unsigned long flags = 0; + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flags); + + if (UNF_NEED_BIG_RESPONSE_BUFF(v_xchg->cmnd_code)) { + dest = unf_calc_big_resp_pld_buffer(v_xchg, + v_xchg->cmnd_code); + offset = 0; + max_frame_len = sizeof(struct unf_gif_acc_pld_s); + } else if (v_xchg->cmnd_code == NS_GA_NXT || + v_xchg->cmnd_code == NS_GIEL) { + dest = unf_calc_big_resp_pld_buffer(v_xchg, + v_xchg->cmnd_code); + offset = 0; + max_frame_len = + v_xchg->fcp_sfs_union.sfs_entry.sfs_buff_len; + } else { + dest = unf_calc_other_resp_pld_buffer(v_xchg); + offset = sizeof(struct unf_fchead_s); + max_frame_len = sizeof(union unf_sfs_u); + } + + if (!dest) { + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags); + + return UNF_RETURN_ERROR; + } + + if (v_xchg->fcp_sfs_union.sfs_entry.cur_offset == 0) { + v_xchg->fcp_sfs_union.sfs_entry.cur_offset += offset; + dest = dest + offset; + } + + length = v_pkg->unf_cmnd_pload_bl.length; + + if ((v_xchg->fcp_sfs_union.sfs_entry.cur_offset + length) > + max_frame_len) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Exchange(0x%p) command(0x%x) hotpooltag(0x%x) OX_RX_ID(0x%x) S_ID(0x%x) D_ID(0x%x) copy payload overrun(0x%x:0x%x:0x%x)", + v_xchg, v_xchg->cmnd_code, v_xchg->hot_pool_tag, + v_pkg->frame_head.oxid_rxid, + v_pkg->frame_head.csctl_sid & UNF_NPORTID_MASK, + v_pkg->frame_head.rctl_did & UNF_NPORTID_MASK, + v_xchg->fcp_sfs_union.sfs_entry.cur_offset, + v_pkg->unf_cmnd_pload_bl.length, + max_frame_len); + + length = max_frame_len - v_xchg->fcp_sfs_union.sfs_entry.cur_offset; + } + + if (length > 0) + memcpy(dest, v_pkg->unf_cmnd_pload_bl.buffer_ptr, length); + + v_xchg->fcp_sfs_union.sfs_entry.cur_offset += length; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags); + + return RETURN_OK; +} + +static unsigned int unf_send_els_cmnd_succ(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_fra_pkg) +{ + struct unf_xchg_s *xchg = NULL; + unsigned int ret = RETURN_OK; + unsigned short hot_pool_tag = 0; + unsigned long flags = 0; + void (*pfn_callback)(void *, void *, void *) = NULL; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x3531, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3532, UNF_TRUE, v_fra_pkg, return UNF_RETURN_ERROR); + lport = v_lport; + + if (!lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) lookup exchange by tag function can't be NULL", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + hot_pool_tag = (unsigned short) + (v_fra_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]); + xchg = (struct unf_xchg_s *) + (lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag((void *)lport, + hot_pool_tag)); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) find exchange by tag(0x%x) failed", + lport->port_id, lport->nport_id, hot_pool_tag); + + return UNF_RETURN_ERROR; + } + + UNF_CHECK_ALLOCTIME_VALID( + lport, hot_pool_tag, xchg, + v_fra_pkg->private[PKG_PRIVATE_XCHG_ALLOC_TIME], + xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]); + + if (((v_fra_pkg->frame_head.csctl_sid) & UNF_NPORTID_MASK) != + xchg->did) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) find exhange invalid, package S_ID(0x%x) exchange S_ID(0x%x) D_ID(0x%x)", + lport->port_id, v_fra_pkg->frame_head.csctl_sid, + xchg->sid, xchg->did); + + return UNF_RETURN_ERROR; + } + + if (v_fra_pkg->last_pkg_flag == UNF_PKG_NOT_LAST_RESPONSE) { + ret = unf_mv_resp_2_xchg(xchg, v_fra_pkg); + + return ret; + } + + xchg->byte_orders = v_fra_pkg->byte_orders; + lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)xchg); + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + if ((xchg->pfn_callback) && + ((xchg->cmnd_code == ELS_RRQ) || + (xchg->cmnd_code == ELS_LOGO) || + (!(xchg->io_state & TGT_IO_STATE_ABORT)))) { + pfn_callback = xchg->pfn_callback; + + if ((xchg->cmnd_code == ELS_FLOGI) || + (xchg->cmnd_code == ELS_FDISC)) + xchg->sid = v_fra_pkg->frame_head.rctl_did & + UNF_NPORTID_MASK; + + if (xchg->cmnd_code == ELS_ECHO) { + xchg->private[PKG_PRIVATE_ECHO_CMD_RCV_TIME] = + v_fra_pkg->private[PKG_PRIVATE_ECHO_CMD_RCV_TIME]; + xchg->private[PKG_PRIVATE_ECHO_RSP_SND_TIME] = + v_fra_pkg->private[PKG_PRIVATE_ECHO_RSP_SND_TIME]; + xchg->private[PKG_PRIVATE_ECHO_CMD_SND_TIME] = + v_fra_pkg->private[PKG_PRIVATE_ECHO_CMD_SND_TIME]; + xchg->private[PKG_PRIVATE_ECHO_ACC_RCV_TIME] = + v_fra_pkg->private[PKG_PRIVATE_ECHO_ACC_RCV_TIME]; + } + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + /* Do callback */ + pfn_callback(xchg->lport, xchg->rport, xchg); + } else { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + } + + unf_cm_free_xchg((void *)lport, (void *)xchg); + return ret; +} + +static unsigned int unf_send_els_cmnd_failed(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_fra_pkg) +{ + struct unf_xchg_s *xchg = NULL; + unsigned int ret = RETURN_OK; + unsigned short hot_pool_tag = 0; + unsigned long flags = 0; + void (*pfn_ob_callback)(struct unf_xchg_s *) = NULL; + + UNF_CHECK_VALID(0x3533, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3534, UNF_TRUE, v_fra_pkg, return UNF_RETURN_ERROR); + + if (!v_lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) lookup exchange by tag function can't be NULL", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + hot_pool_tag = (unsigned short) + (v_fra_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]); + xchg = (struct unf_xchg_s *) + (v_lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag( + (void *)v_lport, + hot_pool_tag)); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) find exhange by tag(0x%x) failed", + v_lport->port_id, v_lport->nport_id, hot_pool_tag); + + return UNF_RETURN_ERROR; + } + + UNF_CHECK_ALLOCTIME_VALID( + v_lport, hot_pool_tag, xchg, + v_fra_pkg->private[PKG_PRIVATE_XCHG_ALLOC_TIME], + xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]); + + v_lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)xchg); + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + if ((xchg->pfn_ob_callback) && + ((xchg->cmnd_code == ELS_RRQ) || + (xchg->cmnd_code == ELS_LOGO) || + (!(xchg->io_state & TGT_IO_STATE_ABORT)))) { + pfn_ob_callback = xchg->pfn_ob_callback; + xchg->ob_callback_sts = v_fra_pkg->status; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + pfn_ob_callback(xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) exchange(0x%p) tag(0x%x) do callback", + v_lport->port_id, xchg, hot_pool_tag); + } else { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + } + + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + return ret; +} + +static unsigned int unf_rcv_els_cmnd_reply(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_fra_pkg) +{ + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3535, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3536, UNF_TRUE, v_fra_pkg, return UNF_RETURN_ERROR); + + if ((v_fra_pkg->status == UNF_IO_SUCCESS) || + (v_fra_pkg->status == UNF_IO_UNDER_FLOW)) + ret = unf_send_els_cmnd_succ(v_lport, v_fra_pkg); + else + ret = unf_send_els_cmnd_failed(v_lport, v_fra_pkg); + + return ret; +} + +void unf_lport_enter_msn_plogi(struct unf_lport_s *v_lport) +{ + /* Fabric or Public Loop Mode: Login with Name server */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = NULL; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_plogi_payload_s *plogi_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_frame_pkg_s pkg; + + UNF_CHECK_VALID(0x1811, UNF_TRUE, v_lport, return); + + /* Get (safe) R_Port */ + rport = unf_rport_get_free_and_init(v_lport, UNF_PORT_TYPE_FC, + UNF_FC_FID_MGMT_SERV); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) allocate RPort failed", + v_lport->port_id); + return; + } + + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = UNF_FC_FID_MGMT_SERV; // 0xfffffa + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + + /* Get & Set new free exchange */ + xchg = unf_cm_get_free_xchg(v_lport, UNF_XCHG_TYPE_SFS); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for PLOGI", + v_lport->port_id); + + return; + } + + xchg->cmnd_code = ELS_PLOGI; // PLOGI + xchg->did = rport->nport_id; + xchg->sid = v_lport->nport_id; + xchg->oid = xchg->sid; + xchg->lport = lport; + xchg->rport = rport; + + if (v_lport->low_level_func.xchg_mgr_type == + UNF_LOW_LEVEL_MGR_TYPE_PASSTIVE) + xchg->ox_id = xchg->hot_pool_tag; + + /* Set callback function */ + xchg->pfn_callback = NULL; // for rcvd plogi acc/rjt processer + xchg->pfn_ob_callback = NULL; // for send plogi failed processer + + unf_fill_package(&pkg, xchg, rport); + + /* Fill PLOGI payload */ + fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, xchg); + return; + } + + plogi_pld = &fc_entry->plogi.payload; + memset(plogi_pld, 0, sizeof(struct unf_plogi_payload_s)); + unf_fill_plogi_pld(plogi_pld, v_lport); + + /* Start to Send PLOGI command */ + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); +} + +static void unf_register_to_switch(struct unf_lport_s *v_lport) +{ + /* Register to Fabric, used for: FABRIC & PUBLI LOOP */ + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3542, UNF_TRUE, v_lport, return); + + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + /* LPort: FLOGI_WAIT --> PLOGI_WAIT */ + unf_lport_stat_ma(v_lport, UNF_EVENT_LPORT_REMOTE_ACC); + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + /* Login with Name server: PLOGI */ + unf_lport_enter_sns_plogi(v_lport); + + unf_lport_enter_msn_plogi(v_lport); + + if ((v_lport->root_lport == v_lport) &&/* Physical Port */ + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_FABRIC)) { + unf_linkup_all_vports(v_lport); + } +} + +void unf_login_with_loop_node(struct unf_lport_s *v_lport, unsigned int v_alpa) +{ + /* Only used for Private Loop LOGIN */ + struct unf_rport_s *rport = NULL; + unsigned long rport_flag = 0; + unsigned int port_feature = 0; + unsigned int ret; + + /* Check AL_PA validity */ + if (v_lport->nport_id == v_alpa) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) is the same as RPort with AL_PA(0x%x), do nothing", + v_lport->port_id, v_alpa); + return; + } + + if (v_alpa == 0) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%x) is fabric, do nothing", + v_lport->port_id, v_alpa); + return; + } + + /* Get & set R_Port: reuse only */ + rport = unf_get_rport_by_nport_id(v_lport, v_alpa); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Port(0x%x_0x%x) RPort(0x%x_0x%p) login with private loop", + v_lport->port_id, v_lport->nport_id, v_alpa, rport); + + rport = unf_get_safe_rport(v_lport, rport, UNF_RPORT_REUSE_ONLY, + v_alpa); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) allocate new RPort(0x%x) failed", + v_lport->port_id, v_lport->nport_id, v_alpa); + return; + } + + /* Update R_Port state & N_Port_ID */ + spin_lock_irqsave(&rport->rport_state_lock, rport_flag); + rport->nport_id = v_alpa; + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); // PLOGI_WAIT + spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); + + /* Private Loop: check whether need delay to send PLOGI or not */ + port_feature = rport->options; + + /* check Rport and Lport feature */ + if ((port_feature == UNF_PORT_MODE_UNKNOWN) && + (v_lport->options == UNF_PORT_MODE_INI)) { + /* Start to send PLOGI */ + ret = unf_send_plogi(v_lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) send PLOGI to RPort(0x%x) failed", + v_lport->port_id, v_lport->nport_id, + rport->nport_id); + + unf_rport_error_recovery(rport); + } + } else { + unf_check_rport_need_delay_plogi(v_lport, rport, port_feature); + } +} + +unsigned int unf_receive_els_pkg(void *v_lport, + struct unf_frame_pkg_s *v_fra_pkg) +{ + struct unf_lport_s *lport = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3543, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3544, UNF_TRUE, v_fra_pkg, return UNF_RETURN_ERROR); + lport = (struct unf_lport_s *)v_lport; + + switch (v_fra_pkg->type) { + case UNF_PKG_ELS_REQ_DONE: + ret = unf_rcv_els_cmnd_reply(lport, v_fra_pkg); + break; + + case UNF_PKG_ELS_REQ: + ret = unf_rcv_els_cmnd_req(lport, v_fra_pkg); + break; + + default: + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) with exchange type(0x%x) abnormal", + lport->port_id, lport->nport_id, v_fra_pkg->type); + break; + } + + return ret; +} + +unsigned int unf_send_els_done(void *v_lport, struct unf_frame_pkg_s *v_pkg) +{ + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3545, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3546, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR); + + if (v_pkg->type == UNF_PKG_ELS_REPLY_DONE) { + if ((v_pkg->status == UNF_IO_SUCCESS) || + (v_pkg->status == UNF_IO_UNDER_FLOW)) + ret = unf_send_els_rsp_succ(v_lport, v_pkg); + else + ret = unf_send_els_cmnd_failed(v_lport, v_pkg); + } + + return ret; +} + +static unsigned int unf_rcv_gs_cmnd_reply(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_fra_pkg) +{ + struct unf_xchg_s *xchg = NULL; + unsigned long flags = 0; + unsigned short hot_pool_tag = 0; + unsigned int ret = RETURN_OK; + struct unf_lport_s *lport = NULL; + void (*pfn_callback)(void *, void *, void *) = NULL; + + UNF_CHECK_VALID(0x3553, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3554, UNF_TRUE, v_fra_pkg, return UNF_RETURN_ERROR); + lport = v_lport; + hot_pool_tag = (unsigned short) + (v_fra_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]); + + xchg = (struct unf_xchg_s *)unf_cm_lookup_xchg_by_tag( + (void *)lport, hot_pool_tag); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) find exhange by tag(0x%x) failed", + lport->port_id, hot_pool_tag); + + return UNF_RETURN_ERROR; + } + + UNF_CHECK_ALLOCTIME_VALID( + lport, hot_pool_tag, xchg, + v_fra_pkg->private[PKG_PRIVATE_XCHG_ALLOC_TIME], + xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]); + + if (v_fra_pkg->last_pkg_flag == UNF_PKG_NOT_LAST_RESPONSE) { + ret = unf_mv_resp_2_xchg(xchg, v_fra_pkg); + return ret; + } + + lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)xchg); + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + if ((xchg->pfn_callback) && (!(xchg->io_state & TGT_IO_STATE_ABORT))) { + pfn_callback = xchg->pfn_callback; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + pfn_callback(xchg->lport, xchg->rport, xchg); + } else { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + } + + unf_cm_free_xchg((void *)lport, (void *)xchg); + return ret; +} + +static unsigned int unf_send_gs_cmnd_failed(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_fra_pkg) +{ + struct unf_xchg_s *xchg = NULL; + unsigned int ret = RETURN_OK; + unsigned short hot_pool_tag = 0; + unsigned long flags = 0; + + void (*pfn_ob_callback)(struct unf_xchg_s *) = NULL; + + UNF_CHECK_VALID(0x3555, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3556, UNF_TRUE, v_fra_pkg, return UNF_RETURN_ERROR); + + if (!v_lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) loopup exchange by tag function can't be NULL", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + hot_pool_tag = (unsigned short) + (v_fra_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]); + xchg = (struct unf_xchg_s *) + (v_lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag( + (void *)v_lport, + hot_pool_tag)); + + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) can't find exhange by tag(0x%x)", + v_lport->port_id, hot_pool_tag); + + return UNF_RETURN_ERROR; + } + + UNF_CHECK_ALLOCTIME_VALID( + v_lport, hot_pool_tag, xchg, + v_fra_pkg->private[PKG_PRIVATE_XCHG_ALLOC_TIME], + xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]); + + v_lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)xchg); + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + if ((xchg->pfn_ob_callback) && + (!(xchg->io_state & TGT_IO_STATE_ABORT))) { + pfn_ob_callback = xchg->pfn_ob_callback; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + pfn_ob_callback(xchg); + } else { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + } + + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + return ret; +} + +unsigned int unf_receive_gs_pkg(void *v_lport, + struct unf_frame_pkg_s *v_fra_pkg) +{ + struct unf_lport_s *lport = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3557, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3558, UNF_TRUE, v_fra_pkg, return UNF_RETURN_ERROR); + + lport = (struct unf_lport_s *)v_lport; + + if ((v_fra_pkg->type) == UNF_PKG_GS_REQ_DONE) { + if ((v_fra_pkg->status == UNF_IO_SUCCESS) || + (v_fra_pkg->status == UNF_IO_UNDER_FLOW) || + (v_fra_pkg->status == UNF_IO_OVER_FLOW)) + ret = unf_rcv_gs_cmnd_reply(lport, v_fra_pkg); + else + ret = unf_send_gs_cmnd_failed(lport, v_fra_pkg); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) exchange type(0x%x) mismatch", + lport->port_id, v_fra_pkg->type); + + return UNF_RETURN_ERROR; + } + + return ret; +} + +static void unf_handle_init_gid_acc(struct unf_gif_acc_pld_s *v_gid_acc_pld, + struct unf_lport_s *v_lport) +{ + /* + * from SCR ACC callback + * NOTE: inquiry disc R_Port used for NPIV + */ + struct unf_disc_rport_s *disc_rport = NULL; + struct unf_disc_s *disc = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int gid_port_id = 0; + unsigned int nport_id = 0; + unsigned int i = 0; + unsigned char control = 0; + + UNF_CHECK_VALID(0x3559, UNF_TRUE, v_gid_acc_pld, return); + UNF_CHECK_VALID(0x3560, UNF_TRUE, v_lport, return); + + /* + * 1. Find & Check & Get (new) R_Port from list_disc_rports_pool + * then, Add to R_Port Disc_busy_list + */ + while (i < UNF_GID_PORT_CNT) { + gid_port_id = (v_gid_acc_pld->gid_port_id[i]); + nport_id = UNF_NPORTID_MASK & gid_port_id; + control = UNF_GID_CONTROL(gid_port_id); + + /* for each N_Port_ID from GID_ACC payload */ + if ((nport_id != v_lport->nport_id) && (nport_id != 0) && + (!unf_lookup_lport_by_nport_id(v_lport, nport_id))) { + /* for New Port, not L_Port */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x_0x%x) get nportid(0x%x) from GID_ACC", + v_lport->port_id, v_lport->nport_id, + nport_id); + + /* Get R_Port from list of RPort Disc Pool */ + disc_rport = + unf_rport_get_free_and_init(v_lport, + UNF_PORT_TYPE_DISC, + nport_id); + if (!disc_rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) can't allocate new rport(0x%x) from disc pool", + v_lport->port_id, + v_lport->nport_id, + nport_id); + + i++; + continue; + } + } + + if ((control & UNF_GID_LAST_PORT_ID) == UNF_GID_LAST_PORT_ID) + break; + + i++; + } + + /* + * 2. Do port disc stop operation: + * NOTE: Do DISC & release R_Port from + * busy_list back to list_disc_rports_pool + */ + disc = &v_lport->disc; + if (!disc->unf_disc_temp.pfn_unf_disc_stop) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) disc stop function is NULL", + v_lport->port_id, v_lport->nport_id); + + return; + } + + ret = disc->unf_disc_temp.pfn_unf_disc_stop(v_lport); + if (ret != RETURN_OK) + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) do disc stop failed", + v_lport->port_id, v_lport->nport_id); +} + +void unf_rport_immediate_linkdown(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* Swap case: Report Link Down immediately & release R_Port */ + unsigned long flags = 0; + struct unf_disc_s *disc = NULL; + + UNF_CHECK_VALID(0x3561, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3562, UNF_TRUE, v_rport, return); + + spin_lock_irqsave(&v_rport->rport_state_lock, flags); + /* 1. Inc R_Port ref_cnt */ + if (unf_rport_ref_inc(v_rport) != RETURN_OK) { + spin_unlock_irqrestore(&v_rport->rport_state_lock, flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) Rport(0x%p,0x%x) is removing and no need process", + v_lport->port_id, v_rport, v_rport->nport_id); + + return; + } + + /* 2. R_PORT state update: Link Down Event --->>> closing state */ + unf_rport_state_ma(v_rport, UNF_EVENT_RPORT_LINK_DOWN); + spin_unlock_irqrestore(&v_rport->rport_state_lock, flags); + + /* 3. Put R_Port from busy to destroy list */ + disc = &v_lport->disc; + spin_lock_irqsave(&disc->rport_busy_pool_lock, flags); + list_del_init(&v_rport->entry_rport); + list_add_tail(&v_rport->entry_rport, &disc->list_destroy_rports); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flags); + + /* 4. Schedule Closing work (Enqueuing workqueue) */ + unf_schedule_closing_work(v_lport, v_rport); + + unf_rport_ref_dec(v_rport); +} + +static unsigned int unf_rport_check_wwn(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* Send GPN_ID */ + struct unf_rport_s *sns_port = NULL; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3564, UNF_TRUE, v_lport, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3565, UNF_TRUE, v_rport, + return UNF_RETURN_ERROR); + + /* Get SNS R_Port */ + sns_port = unf_get_rport_by_nport_id(v_lport, + UNF_FC_FID_DIR_SERV); + if (!sns_port) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) can't find fabric Port", + v_lport->nport_id); + + return UNF_RETURN_ERROR; + } + + /* Send GPN_ID to SW */ + ret = unf_get_and_post_disc_event(v_lport, sns_port, v_rport->nport_id, + UNF_DISC_GET_PORT_NAME); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", + v_lport->nport_id, UNF_DISC_GET_PORT_NAME, + v_rport->nport_id); + + unf_rcv_gpn_id_rsp_unknown(v_lport, v_rport->nport_id); + } + + return ret; +} + +static unsigned int unf_handle_rscn_port_not_in_disc( + struct unf_lport_s *v_lport, + unsigned int v_rscn_nport_id) +{ + /* RSCN Port_ID not in GID_ACC payload table: Link Down */ + struct unf_rport_s *rport = NULL; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3566, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + /* from R_Port busy list by N_Port_ID */ + rport = unf_get_rport_by_nport_id(v_lport, v_rscn_nport_id); + if (rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_KEVENT, + "[info]Port(0x%x) RPort(0x%x) wwpn(0x%llx) has been removed and link down it", + v_lport->port_id, v_rscn_nport_id, + rport->port_name); + + unf_rport_linkdown(v_lport, rport); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) has no RPort(0x%x) and do nothing", + v_lport->nport_id, v_rscn_nport_id); + } + + return ret; +} + +static unsigned int unf_handle_rscn_port_in_disc(struct unf_lport_s *v_lport, + unsigned int v_rscn_nport_id) +{ + /* Send GPN_ID or re-login(GNN_ID) */ + struct unf_rport_s *rport = NULL; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3567, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + /* from R_Port busy list by N_Port_ID */ + rport = unf_get_rport_by_nport_id(v_lport, v_rscn_nport_id); + if (rport) { + /* R_Port exist: send GPN_ID */ + ret = unf_rport_check_wwn(v_lport, rport); + } else { + if ((v_lport->options & UNF_PORT_MODE_INI) == + UNF_PORT_MODE_INI) { + /* Re-LOGIN with INI mode: Send GNN_ID */ + ret = unf_rport_relogin(v_lport, v_rscn_nport_id); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x) with no INI feature. Do nothing", + v_lport->nport_id); + } + } + + return ret; +} + +static unsigned int unf_handle_rscn_port_addr( + struct unf_port_id_page_s *v_port_id_page, + struct unf_gif_acc_pld_s *v_gid_acc_pld, + struct unf_lport_s *v_lport) +{ + /* + * Input parameters: + * 1. Port_ID_page: saved from RSCN payload + * 2. GID_ACC_payload: back from GID_ACC (GID_PT or GID_FT) + ** + * Do work: check whether RSCN Port_ID within GID_ACC payload or not + * then, re-login or link down rport + */ + unsigned int rscn_nport_id = 0; + unsigned int gid_port_id = 0; + unsigned int nport_id = 0; + unsigned int i = 0; + unsigned char control = 0; + unsigned int ret = RETURN_OK; + enum int_e have_same_id = UNF_FALSE; + + UNF_CHECK_VALID(0x3568, UNF_TRUE, v_port_id_page, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3569, UNF_TRUE, v_gid_acc_pld, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3570, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + /* 1. get RSCN_NPort_ID from (L_Port->Disc->RSCN_Mgr)->RSCN_Port_ID_Page */ + rscn_nport_id = UNF_SERVICE_GET_NPORTID_FORM_GID_PAGE(v_port_id_page); + + /* + * 2. for RSCN_NPort_ID + * check whether RSCN_NPort_ID within GID_ACC_Payload or not + */ + while (i < UNF_GID_PORT_CNT) { /* 4k */ + gid_port_id = (v_gid_acc_pld->gid_port_id[i]); + nport_id = UNF_NPORTID_MASK & gid_port_id; + control = UNF_GID_CONTROL(gid_port_id); + + if ((v_lport->nport_id != nport_id) && (nport_id != 0)) { + /* is not L_Port */ + if (rscn_nport_id == nport_id) { + /* RSCN Port_ID within GID_ACC payload */ + have_same_id = UNF_TRUE; + break; + } + } + + if ((control & UNF_GID_LAST_PORT_ID) == UNF_GID_LAST_PORT_ID) + break; + + i++; + } + + /* 3. RSCN_Port_ID not within GID_ACC payload table */ + if (have_same_id == UNF_FALSE) { + /* rport has been removed */ + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[warn]Port(0x%x_0x%x) find RSCN N_Port_ID(0x%x) in GID_ACC table failed", + v_lport->port_id, v_lport->nport_id, + rscn_nport_id); + + /* Link down rport */ + ret = unf_handle_rscn_port_not_in_disc(v_lport, + rscn_nport_id); + } else { /* 4. RSCN_Port_ID within GID_ACC payload table */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x_0x%x) find RSCN N_Port_ID(0x%x) in GID_ACC table succeed", + v_lport->port_id, v_lport->nport_id, + rscn_nport_id); + + /* Re-login with INI mode */ + ret = unf_handle_rscn_port_in_disc(v_lport, rscn_nport_id); + } + + return ret; +} + +static void unf_check_rport_rscn_process( + struct unf_rport_s *v_rport, + struct unf_port_id_page_s *v_port_id_page) +{ + struct unf_rport_s *rport = v_rport; + struct unf_port_id_page_s *port_id_page = v_port_id_page; + unsigned char format = port_id_page->uc_addr_format; + + switch (format) { + /* domain+area */ + case UNF_RSCN_AREA_ADDR_GROUP: + if (UNF_GET_DOMAIN_ID(rport->nport_id) == + port_id_page->port_id_domain && + UNF_GET_AREA_ID(rport->nport_id) == + port_id_page->port_id_area) { + rport->rscn_position = UNF_RPORT_NEED_PROCESS; + } + break; + /* domain */ + case UNF_RSCN_DOMAIN_ADDR_GROUP: + if (UNF_GET_DOMAIN_ID(rport->nport_id) == + port_id_page->port_id_domain) + rport->rscn_position = UNF_RPORT_NEED_PROCESS; + break; + /* all */ + case UNF_RSCN_FABRIC_ADDR_GROUP: + rport->rscn_position = UNF_RPORT_NEED_PROCESS; + break; + default: + break; + } +} + +static void unf_set_rport_rscn_position( + struct unf_lport_s *v_lport, + struct unf_port_id_page_s *v_port_id_page) +{ + struct unf_rport_s *rport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + struct unf_disc_s *disc = NULL; + unsigned long disc_flag = 0; + unsigned long rport_flag = 0; + + UNF_CHECK_VALID(0x3571, UNF_TRUE, v_lport, return); + disc = &v_lport->disc; + + spin_lock_irqsave(&disc->rport_busy_pool_lock, disc_flag); + list_for_each_safe(node, next_node, &disc->list_busy_rports) { + rport = list_entry(node, struct unf_rport_s, entry_rport); + spin_lock_irqsave(&rport->rport_state_lock, rport_flag); + + if (rport->nport_id < UNF_FC_FID_DOM_MGR) { + if (rport->rscn_position == UNF_RPORT_NOT_NEED_PROCESS) + unf_check_rport_rscn_process(rport, + v_port_id_page); + } else { + rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS; + } + + spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); + } + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_flag); +} + +static void unf_set_rport_rscn_position_local(struct unf_lport_s *v_lport) +{ + struct unf_rport_s *rport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + struct unf_disc_s *disc = NULL; + unsigned long disc_flag = 0; + unsigned long rport_flag = 0; + + UNF_CHECK_VALID(0x3572, UNF_TRUE, v_lport, return); + disc = &v_lport->disc; + + spin_lock_irqsave(&disc->rport_busy_pool_lock, disc_flag); + list_for_each_safe(node, next_node, &disc->list_busy_rports) { + rport = list_entry(node, struct unf_rport_s, entry_rport); + spin_lock_irqsave(&rport->rport_state_lock, rport_flag); + + if (rport->nport_id < UNF_FC_FID_DOM_MGR) { + if (rport->rscn_position == UNF_RPORT_NEED_PROCESS) + rport->rscn_position = + UNF_RPORT_ONLY_IN_LOCAL_PROCESS; + } else { + rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS; + } + + spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); + } + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_flag); +} + +static void unf_reset_rport_rscn_setting(struct unf_lport_s *v_lport) +{ + struct unf_rport_s *rport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + struct unf_disc_s *disc = NULL; + unsigned long rport_flag = 0; + + UNF_CHECK_VALID(0x3573, UNF_TRUE, v_lport, return); + disc = &v_lport->disc; + + list_for_each_safe(node, next_node, &disc->list_busy_rports) { + rport = list_entry(node, struct unf_rport_s, entry_rport); + spin_lock_irqsave(&rport->rport_state_lock, rport_flag); + rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS; + spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); + } +} + +static void unf_compare_nport_id_with_rport_list( + struct unf_lport_s *v_lport, + unsigned int v_nport_id, + struct unf_port_id_page_s *v_port_id_page) +{ + struct unf_rport_s *rport = NULL; + unsigned long rport_flag = 0; + unsigned char format = v_port_id_page->uc_addr_format; + + UNF_CHECK_VALID(0x3574, UNF_TRUE, v_lport, return); + + switch (format) { + /* domain+area */ + case UNF_RSCN_AREA_ADDR_GROUP: + if ((UNF_GET_DOMAIN_ID(v_nport_id) != + v_port_id_page->port_id_domain) || + (UNF_GET_AREA_ID(v_nport_id) != + v_port_id_page->port_id_area)) + return; + break; + /* domain */ + case UNF_RSCN_DOMAIN_ADDR_GROUP: + if (UNF_GET_DOMAIN_ID(v_nport_id) != + v_port_id_page->port_id_domain) + return; + break; + /* all */ + case UNF_RSCN_FABRIC_ADDR_GROUP: + break; + /* can't enter this branch guarantee by outer */ + default: + break; + } + + rport = unf_get_rport_by_nport_id(v_lport, v_nport_id); + + if (!rport) { + if ((v_lport->options & UNF_PORT_MODE_INI) == + UNF_PORT_MODE_INI) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_KEVENT, + "[event]Port(0x%x) Find Rport(0x%x) by RSCN", + v_lport->nport_id, v_nport_id); + unf_rport_relogin(v_lport, v_nport_id); + } + } else { + spin_lock_irqsave(&rport->rport_state_lock, rport_flag); + if (rport->rscn_position == UNF_RPORT_NEED_PROCESS) + rport->rscn_position = + UNF_RPORT_IN_DISC_AND_LOCAL_PROCESS; + + spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); + } +} + +static void unf_compare_disc_with_local_rport( + struct unf_lport_s *v_lport, + struct unf_gif_acc_pld_s *v_gid_acc_pld, + struct unf_port_id_page_s *v_port_id_page) +{ + unsigned int gid_port_id = 0; + unsigned int nport_id = 0; + unsigned int i = 0; + unsigned char control = 0; + + UNF_CHECK_VALID(0x3575, UNF_TRUE, v_gid_acc_pld, return); + UNF_CHECK_VALID(0x3576, UNF_TRUE, v_lport, return); + + while (i < UNF_GID_PORT_CNT) { + gid_port_id = (v_gid_acc_pld->gid_port_id[i]); + nport_id = UNF_NPORTID_MASK & gid_port_id; + control = UNF_GID_CONTROL(gid_port_id); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) DISC N_Port_ID(0x%x)", + v_lport->nport_id, nport_id); + + if ((nport_id != 0) && + (!unf_lookup_lport_by_nport_id(v_lport, nport_id))) + unf_compare_nport_id_with_rport_list(v_lport, nport_id, + v_port_id_page); + + if ((UNF_GID_LAST_PORT_ID & control) == UNF_GID_LAST_PORT_ID) + break; + + i++; + } + + unf_set_rport_rscn_position_local(v_lport); +} + +static unsigned int unf_process_each_rport_after_rscn( + struct unf_lport_s *v_lport, + struct unf_rport_s *v_sns_port, + struct unf_rport_s *v_rport) +{ + unsigned long rport_flag = 0; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3577, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3578, UNF_TRUE, v_sns_port, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3579, UNF_TRUE, v_sns_port, return UNF_RETURN_ERROR); + + UNF_REFERNCE_VAR(v_sns_port); + + spin_lock_irqsave(&v_rport->rport_state_lock, rport_flag); + + if (v_rport->rscn_position == UNF_RPORT_IN_DISC_AND_LOCAL_PROCESS) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_KEVENT, + "[info]Port(0x%x_0x%x) RPort(0x%x) rescan position(0x%x), check wwpn", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id, v_rport->rscn_position); + v_rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS; + spin_unlock_irqrestore(&v_rport->rport_state_lock, rport_flag); + ret = unf_rport_check_wwn(v_lport, v_rport); + } else if (v_rport->rscn_position == + UNF_RPORT_ONLY_IN_LOCAL_PROCESS) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_KEVENT, + "[event]Port(0x%x_0x%x) RPort(0x%x) rescan position(0x%x), linkdown it", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id, v_rport->rscn_position); + v_rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS; + spin_unlock_irqrestore(&v_rport->rport_state_lock, rport_flag); + unf_rport_linkdown(v_lport, v_rport); + } else { + spin_unlock_irqrestore(&v_rport->rport_state_lock, rport_flag); + } + + return ret; +} + +static unsigned int unf_process_local_rport_after_rscn( + struct unf_lport_s *v_lport, + struct unf_rport_s *v_sns_port) +{ + struct unf_rport_s *rport = NULL; + struct list_head *node = NULL; + struct unf_disc_s *disc = NULL; + unsigned long disc_flag = 0; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3580, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3581, UNF_TRUE, v_sns_port, return UNF_RETURN_ERROR); + disc = &v_lport->disc; + + spin_lock_irqsave(&disc->rport_busy_pool_lock, disc_flag); + if (list_empty(&disc->list_busy_rports)) { + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_flag); + + return UNF_RETURN_ERROR; + } + + node = (&disc->list_busy_rports)->next; + + do { + rport = list_entry(node, struct unf_rport_s, entry_rport); + + if (rport->rscn_position == UNF_RPORT_NOT_NEED_PROCESS) { + node = node->next; + continue; + } else { + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, + disc_flag); + ret = unf_process_each_rport_after_rscn(v_lport, + v_sns_port, + rport); + spin_lock_irqsave(&disc->rport_busy_pool_lock, + disc_flag); + node = (&disc->list_busy_rports)->next; + } + } while (node != &disc->list_busy_rports); + + unf_reset_rport_rscn_setting(v_lport); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_flag); + + return ret; +} + +static unsigned int unf_handle_rscn_group_addr( + struct unf_port_id_page_s *v_port_id_page, + struct unf_gif_acc_pld_s *v_gid_acc_pld, + struct unf_lport_s *v_lport) +{ + struct unf_rport_s *sns_port = NULL; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3582, UNF_TRUE, v_port_id_page, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3583, UNF_TRUE, v_gid_acc_pld, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3584, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + UNF_REFERNCE_VAR(v_port_id_page); + + sns_port = unf_get_rport_by_nport_id(v_lport, UNF_FC_FID_DIR_SERV); + if (!sns_port) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) find fabric port failed", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + unf_set_rport_rscn_position(v_lport, v_port_id_page); + unf_compare_disc_with_local_rport(v_lport, v_gid_acc_pld, + v_port_id_page); + + ret = unf_process_local_rport_after_rscn(v_lport, sns_port); + return ret; +} + +static void unf_handle_rscn_gid_acc(struct unf_gif_acc_pld_s *v_gid_acc_pld, + struct unf_lport_s *v_lport) +{ + /* for N_Port_ID table return from RSCN */ + struct unf_port_id_page_s *port_id_page = NULL; + struct unf_rscn_mg_s *rscn_mgr = NULL; + struct list_head *list_node = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3585, UNF_TRUE, v_gid_acc_pld, return); + UNF_CHECK_VALID(0x3586, UNF_TRUE, v_lport, return); + rscn_mgr = &v_lport->disc.rscn_mgr; + + spin_lock_irqsave(&rscn_mgr->rscn_id_list_lock, flag); + while (!list_empty(&rscn_mgr->list_using_rscn_page)) { + /* + * for each RSCN_Using_Page(NPortID) + * for each L_Port->Disc->RSCN_Mgr-> + * RSCN_Using_Page(Port_ID_Page) + * NOTE: + * check using_page_port_id whether within + * GID_ACC payload or not + */ + list_node = (&rscn_mgr->list_using_rscn_page)->next; + port_id_page = list_entry(list_node, struct unf_port_id_page_s, + list_node_rscn); + /* NOTE: here delete node (from RSCN using Page) */ + list_del(list_node); + spin_unlock_irqrestore(&rscn_mgr->rscn_id_list_lock, flag); + + switch (port_id_page->uc_addr_format) { + /* each page of RSNC corresponding one of N_Port_ID */ + case UNF_RSCN_PORT_ADDR: + (void)unf_handle_rscn_port_addr(port_id_page, + v_gid_acc_pld, + v_lport); + break; + + /* each page of RSNC corresponding address group */ + case UNF_RSCN_AREA_ADDR_GROUP: + case UNF_RSCN_DOMAIN_ADDR_GROUP: + case UNF_RSCN_FABRIC_ADDR_GROUP: + (void)unf_handle_rscn_group_addr(port_id_page, + v_gid_acc_pld, + v_lport); + break; + + default: + break; + } + + /* NOTE: release this RSCN_Node */ + rscn_mgr->pfn_unf_release_rscn_node(rscn_mgr, port_id_page); + + /* go to next */ + spin_lock_irqsave(&rscn_mgr->rscn_id_list_lock, flag); + } + spin_unlock_irqrestore(&rscn_mgr->rscn_id_list_lock, flag); +} + +static void unf_gid_acc_handle(struct unf_gif_acc_pld_s *v_gid_acc_pld, + struct unf_lport_s *v_lport) +{ +#define UNF_NONE_DISC 0X0 /* before enter DISC */ + + struct unf_disc_s *disc = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3587, UNF_TRUE, v_gid_acc_pld, return); + UNF_CHECK_VALID(0x3588, UNF_TRUE, v_lport, return); + disc = &v_lport->disc; + + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + switch (disc->disc_option) { + case UNF_INIT_DISC: // from SCR callback with INI mode + disc->disc_option = UNF_NONE_DISC; + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + /* R_Port from Disc_list */ + unf_handle_init_gid_acc(v_gid_acc_pld, v_lport); + break; + + case UNF_RSCN_DISC: /* from RSCN payload parse(analysis) */ + disc->disc_option = UNF_NONE_DISC; + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + /* R_Port from busy_list */ + unf_handle_rscn_gid_acc(v_gid_acc_pld, v_lport); + break; + + default: + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x)'s disc option(0x%x) is abnormal", + v_lport->port_id, + v_lport->nport_id, + disc->disc_option); + break; + } +} + +static void unf_gid_ft_callback(void *v_lport, void *v_rport, void *v_xchg) +{ + struct unf_lport_s *lport = NULL; + struct unf_disc_s *disc = NULL; + struct unf_gif_acc_pld_s *gid_acc_pld = NULL; + struct unf_xchg_s *xchg = NULL; + union unf_sfs_u *sfs_ptr = NULL; + unsigned int cmnd_rsp_size = 0; + unsigned int rjt_reason = 0; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3590, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3591, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3592, UNF_TRUE, v_xchg, return); + UNF_REFERNCE_VAR(v_rport); + + lport = (struct unf_lport_s *)v_lport; + xchg = (struct unf_xchg_s *)v_xchg; + disc = &lport->disc; + + sfs_ptr = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + gid_acc_pld = sfs_ptr->get_id.gid_rsp.gid_acc_pld; + if (!gid_acc_pld) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) GID_FT response payload is NULL", + lport->port_id); + + return; + } + + cmnd_rsp_size = (gid_acc_pld->ctiu_pream.cmnd_rsp_size); + if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_ACCEPT) { + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + unf_disc_state_ma(lport, UNF_EVENT_DISC_SUCCESS); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + /* Process GID_FT ACC */ + unf_gid_acc_handle(gid_acc_pld, lport); + } else if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_REJECT) { + rjt_reason = (gid_acc_pld->ctiu_pream.frag_reason_exp_vend); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) GID_FT was rejected with reason code(0x%x)", + lport->port_id, rjt_reason); + + if ((rjt_reason & UNF_CTIU_RJT_EXP_MASK) == + UNF_CTIU_RJT_EXP_FC4TYPE_NO_REG) { + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + unf_disc_state_ma(lport, UNF_EVENT_DISC_SUCCESS); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, + flag); + + unf_gid_acc_handle(gid_acc_pld, lport); + } else { + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + unf_disc_state_ma(lport, UNF_EVENT_DISC_SUCCESS); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, + flag); + } + } else { + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + unf_disc_state_ma(lport, UNF_EVENT_DISC_FAILED); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + /* Do DISC recovery operation */ + unf_disc_error_recovery(lport); + } +} + +static void unf_gid_pt_callback(void *v_lport, void *v_rport, void *v_xchg) +{ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_disc_s *disc = NULL; + struct unf_gif_acc_pld_s *gid_acc_pld = NULL; + struct unf_xchg_s *xchg = NULL; + union unf_sfs_u *sfs_ptr = NULL; + unsigned int cmnd_rsp_size = 0; + unsigned int rjt_reason = 0; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3594, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3595, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3596, UNF_TRUE, v_xchg, return); + + lport = (struct unf_lport_s *)v_lport; + rport = (struct unf_rport_s *)v_rport; + disc = &lport->disc; + xchg = (struct unf_xchg_s *)v_xchg; + sfs_ptr = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + + gid_acc_pld = sfs_ptr->get_id.gid_rsp.gid_acc_pld; + if (!gid_acc_pld) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) GID_PT response payload is NULL", + lport->port_id); + + return; + } + + cmnd_rsp_size = (gid_acc_pld->ctiu_pream.cmnd_rsp_size); + if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_ACCEPT) { + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + unf_disc_state_ma(lport, UNF_EVENT_DISC_SUCCESS); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + unf_gid_acc_handle(gid_acc_pld, lport); + } else if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_REJECT) { + rjt_reason = (gid_acc_pld->ctiu_pream.frag_reason_exp_vend); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) GID_PT was rejected with reason code(0x%x)", + lport->port_id, lport->nport_id, rjt_reason); + + if (UNF_CTIU_RJT_EXP_PORTTYPE_NO_REG == + (rjt_reason & UNF_CTIU_RJT_EXP_MASK)) { + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + unf_disc_state_ma(lport, UNF_EVENT_DISC_SUCCESS); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, + flag); + + unf_gid_acc_handle(gid_acc_pld, lport); + } else { + ret = unf_send_gid_ft(lport, rport); + if (ret != RETURN_OK) { + spin_lock_irqsave(&disc->rport_busy_pool_lock, + flag); + unf_disc_state_ma(lport, UNF_EVENT_DISC_FAILED); + spin_unlock_irqrestore( + &disc->rport_busy_pool_lock, flag); + + /* Do DISC recovery */ + unf_disc_error_recovery(lport); + } + } + } else { + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + unf_disc_state_ma(lport, UNF_EVENT_DISC_FAILED); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + /* Do DISC recovery */ + unf_disc_error_recovery(lport); + } +} + +void unf_rcv_gnn_id_rsp_unknown(struct unf_lport_s *v_lport, + struct unf_rport_s *v_sns_port, + unsigned int v_nport_id) +{ + /* Send GFF_ID */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *sns_port = v_sns_port; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3606, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3607, UNF_TRUE, v_sns_port, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) Rportid(0x%x) GNN_ID response is unknown. Sending GFF_ID", + lport->port_id, lport->nport_id, v_nport_id); + + ret = unf_get_and_post_disc_event(lport, sns_port, v_nport_id, + UNF_DISC_GET_FEATURE); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", + lport->port_id, UNF_DISC_GET_FEATURE, v_nport_id); + + /* NOTE: go to next stage */ + unf_rcv_gff_id_rsp_unknown(lport, v_nport_id); // send PLOGI + } +} + +void unf_rcv_gff_id_rsp_unknown(struct unf_lport_s *v_lport, + unsigned int v_nport_id) +{ + /* Send PLOGI */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = NULL; + unsigned long flag = 0; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3624, UNF_TRUE, v_lport, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) send GFF_ID for RPort(0x%x) but response is unknown", + lport->port_id, v_nport_id); + + /* Get (Safe) R_Port & Set State */ + rport = unf_get_rport_by_nport_id(lport, v_nport_id); + if (rport) + rport = unf_find_rport(lport, v_nport_id, rport->port_name); + + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) can't get RPort by NPort ID(0x%x), allocate new RPort", + lport->port_id, lport->nport_id, v_nport_id); + + rport = unf_rport_get_free_and_init(lport, UNF_PORT_TYPE_FC, + v_nport_id); + UNF_CHECK_VALID(0x3619, UNF_TRUE, NULL != rport, return); + + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = v_nport_id; + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + } + + rport = unf_get_safe_rport(lport, rport, UNF_RPORT_REUSE_ONLY, + v_nport_id); + UNF_CHECK_VALID(0x3625, UNF_TRUE, rport, return); + + /* Update R_Port state: PLOGI_WAIT */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = v_nport_id; + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Start to send PLOGI */ + ret = unf_send_plogi(lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) can not send PLOGI for RPort(0x%x), enter recovery", + lport->port_id, v_nport_id); + + unf_rport_error_recovery(rport); + } +} + +static void unf_lport_update_nport_id(struct unf_lport_s *v_lport, + unsigned int v_nport_id) +{ + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3646, UNF_TRUE, v_lport, return); + + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + v_lport->nport_id = v_nport_id; + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); +} + +static void unf_lport_update_time_params( + struct unf_lport_s *v_lport, + struct unf_flogi_payload_s *v_flogi_payload) +{ + unsigned long flag = 0; + unsigned int ed_tov = 0; + unsigned int ra_tov = 0; + + UNF_CHECK_VALID(0x3647, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3648, UNF_TRUE, v_flogi_payload, return); + + ed_tov = v_flogi_payload->fabric_parms.co_parms.e_d_tov; + ra_tov = v_flogi_payload->fabric_parms.co_parms.r_a_tov; + + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + + /* FC-FS-3: 21.3.4, 21.3.5 */ + if ((v_lport->en_act_topo == UNF_ACT_TOP_P2P_FABRIC) || + (v_lport->en_act_topo == UNF_ACT_TOP_PUBLIC_LOOP)) { + v_lport->ed_tov = ed_tov; + v_lport->ra_tov = ra_tov; + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, + UNF_MAJOR, + "[info]Port(0x%x_0x%x) with topo(0x%x) no need to save time parameters", + v_lport->port_id, v_lport->nport_id, + v_lport->en_act_topo); + } + + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); +} + +static void unf_fdisc_callback(void *v_lport, void *v_rport, void *v_xchg) +{ + /* Register to Name Server or Do recovery */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_flogi_payload_s *fdisc_pld = NULL; + unsigned long flag = 0; + unsigned int cmd = 0; + + lport = (struct unf_lport_s *)v_lport; + rport = (struct unf_rport_s *)v_rport; + xchg = (struct unf_xchg_s *)v_xchg; + UNF_CHECK_VALID(0x3640, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3641, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3642, UNF_TRUE, v_xchg, return); + UNF_CHECK_VALID(0x3643, UNF_TRUE, + xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr, + return); + fdisc_pld = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->fdisc_acc.fdisc_payload; + if (xchg->byte_orders & UNF_BIT_2) + unf_big_end_to_cpu((unsigned char *)fdisc_pld, + sizeof(struct unf_flogi_payload_s)); + + cmd = fdisc_pld->cmnd; + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: FDISC response is (0x%x). Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", + cmd, lport->port_id, rport->nport_id, xchg->ox_id); + rport = unf_get_rport_by_nport_id(lport, UNF_FC_FID_FLOGI); + rport = unf_get_safe_rport(lport, rport, UNF_RPORT_REUSE_ONLY, + UNF_FC_FID_FLOGI); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) has no Rport", lport->port_id); + return; + } + + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = UNF_FC_FID_FLOGI; + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + if ((cmd & UNF_ELS_CMND_HIGH_MASK) == UNF_ELS_CMND_ACC) { + /* Case for ACC */ + spin_lock_irqsave(&lport->lport_state_lock, flag); + if (lport->en_states != UNF_LPORT_ST_FLOGI_WAIT) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x_0x%x) receive Flogi/Fdisc ACC in state(0x%x)", + lport->port_id, lport->nport_id, + lport->en_states); + + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + return; + } + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + unf_lport_update_nport_id(lport, xchg->sid); + unf_lport_update_time_params(lport, fdisc_pld); + + unf_register_to_switch(lport); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: FDISC response is (0x%x). Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", + cmd, lport->port_id, rport->nport_id, + xchg->ox_id); + + /* Case for RJT: Do L_Port recovery */ + unf_lport_error_recovery(lport); + } +} + +static void unf_rcv_flogi_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_flogi_payload_s *v_flogi_pld, + unsigned int v_nport_id, + struct unf_xchg_s *v_xchg) +{ + /* PLOGI to Name server or remote port */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = v_rport; + struct unf_flogi_payload_s *flogi_pld = v_flogi_pld; + struct unf_fabric_parms_s *fabric_params = NULL; + unsigned long long port_name = 0; + unsigned long long node_name = 0; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3649, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3650, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3651, UNF_TRUE, v_flogi_pld, return); + + /* Check L_Port state: FLOGI_WAIT */ + spin_lock_irqsave(&lport->lport_state_lock, flag); + if (lport->en_states != UNF_LPORT_ST_FLOGI_WAIT) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[info]Port(0x%x_0x%x) receive FLOGI ACC with state(0x%x)", + lport->port_id, lport->nport_id, lport->en_states); + + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + return; + } + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + fabric_params = &flogi_pld->fabric_parms; + node_name = (unsigned long long) + (((unsigned long long)(fabric_params->high_node_name) << 32) | + ((unsigned long long)(fabric_params->low_node_name))); + port_name = (unsigned long long) + (((unsigned long long)(fabric_params->high_port_name) << 32) | + ((unsigned long long)(fabric_params->low_port_name))); + + /* flogi acc pyload class 3 service priority value */ + lport->b_priority = UNF_PRIORITY_DISABLE; + + /* Save Flogi parameters */ + unf_save_fabric_params(lport, rport, fabric_params); + + if (UNF_CHECK_NPORT_FPORT_BIT(flogi_pld) == UNF_N_PORT) { + /* P2P Mode */ + unf_lport_update_topo(lport, UNF_ACT_TOP_P2P_DIRECT); + unf_login_with_rport_in_n2n(lport, port_name, node_name); + } else { + /* for: UNF_ACT_TOP_PUBLIC_LOOP + * /UNF_ACT_TOP_P2P_FABRIC/UNF_TOP_P2P_MASK + */ + if (lport->en_act_topo != UNF_ACT_TOP_PUBLIC_LOOP) + unf_lport_update_topo(lport, UNF_ACT_TOP_P2P_FABRIC); + + unf_lport_update_nport_id(lport, v_nport_id); + unf_lport_update_time_params(lport, flogi_pld); + + /* Save process both for Public loop & Fabric */ + unf_register_to_switch(lport); + } +} + +static void unf_flogi_acc_com_process(struct unf_xchg_s *v_xchg) +{ + /* Maybe within interrupt or thread context */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_flogi_payload_s *flogi_pld = NULL; + unsigned int nport_id = 0; + unsigned int cmnd = 0; + unsigned long flags = 0; + struct unf_xchg_s *xchg = v_xchg; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, xchg, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, xchg->lport, return); + + lport = xchg->lport; + rport = xchg->rport; + flogi_pld = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->flogi_acc.flogi_payload; + cmnd = flogi_pld->cmnd; + + /* Get N_Port_ID & R_Port */ + /* Others: 0xFFFFFE */ + rport = unf_get_rport_by_nport_id(lport, UNF_FC_FID_FLOGI); + nport_id = UNF_FC_FID_FLOGI; + + /* Get Safe R_Port: reuse only */ + rport = unf_get_safe_rport(lport, rport, UNF_RPORT_REUSE_ONLY, + nport_id); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) can not allocate new Rport", + lport->port_id); + + return; + } + + /* Update R_Port N_Port_ID */ + spin_lock_irqsave(&rport->rport_state_lock, flags); + /* Others: 0xFFFFFE */ + rport->nport_id = UNF_FC_FID_FLOGI; + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + /* Process FLOGI ACC or RJT */ + if ((cmnd & UNF_ELS_CMND_HIGH_MASK) == UNF_ELS_CMND_ACC) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: FLOGI response is(0x%x). Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", + cmnd, lport->port_id, rport->nport_id, + xchg->ox_id); + + /* Case for ACC */ + unf_rcv_flogi_acc(lport, rport, flogi_pld, xchg->sid, xchg); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: FLOGI response is(0x%x). Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", + cmnd, lport->port_id, rport->nport_id, + xchg->ox_id); + + /* Case for RJT: do L_Port error recovery */ + unf_lport_error_recovery(lport); + } +} + +static int unf_rcv_flogi_acc_async_callback(void *v_arg_in, + void *v_arg_out) +{ + struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_arg_in; + + UNF_CHECK_VALID(0x2267, UNF_TRUE, xchg, return UNF_RETURN_ERROR); + + unf_flogi_acc_com_process(xchg); + + unf_xchg_ref_dec(xchg, SFS_RESPONSE); + return RETURN_OK; +} + +static void unf_flogi_callback(void *v_lport, void *v_rport, void *v_xchg) +{ + /* Callback function for FLOGI ACC or RJT */ + struct unf_lport_s *lport = (struct unf_lport_s *)v_lport; + struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_xchg; + struct unf_flogi_payload_s *flogi_pld = NULL; + int bbscn_enabled = UNF_FALSE; + enum unf_act_topo_e act_topo = UNF_ACT_TOP_UNKNOWN; + int switch_2_thread = UNF_FALSE; + + UNF_CHECK_VALID(0x3652, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3653, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3654, UNF_TRUE, v_xchg, return); + UNF_CHECK_VALID(0x3655, UNF_TRUE, + xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr, + return); + + xchg->lport = v_lport; + flogi_pld = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->flogi_acc.flogi_payload; + + if (xchg->byte_orders & UNF_BIT_2) + unf_big_end_to_cpu((unsigned char *)flogi_pld, + sizeof(struct unf_flogi_payload_s)); + + if ((lport->en_act_topo != UNF_ACT_TOP_PUBLIC_LOOP) && + (UNF_CHECK_NPORT_FPORT_BIT(flogi_pld) == UNF_F_PORT)) + /* Get Top Mode (P2P_F) --->>> used for BBSCN */ + act_topo = UNF_ACT_TOP_P2P_FABRIC; + + bbscn_enabled = unf_check_bbscn_is_enabled( + (unsigned char) + lport->low_level_func.lport_cfg_items.bb_scn, + (unsigned char) + UNF_GET_BB_SC_N_FROM_PARAMS(&flogi_pld->fabric_parms)); + if ((act_topo == UNF_ACT_TOP_P2P_FABRIC) && + (bbscn_enabled == UNF_TRUE)) { + /* BBSCN Enable or not --->>> used for Context change */ + lport->b_bbscn_support = UNF_TRUE; + switch_2_thread = UNF_TRUE; + } + + if ((switch_2_thread == UNF_TRUE) && (lport->root_lport == lport)) { + /* Wait for LR done sync: for Root Port */ + (void)unf_irq_process_switch_2_thread( + lport, xchg, + unf_rcv_flogi_acc_async_callback); + } else { + /* Process FLOGI response directly */ + unf_flogi_acc_com_process(xchg); + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_ALL, + "[info]Port(0x%x) process FLOGI response: switch(%d) to thread done", + lport->port_id, switch_2_thread); +} + +struct unf_rport_s *unf_find_rport(struct unf_lport_s *v_lport, + unsigned int v_rport_nport_id, + unsigned long long v_port_name) +{ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = NULL; + + UNF_CHECK_VALID(0x3658, UNF_TRUE, v_lport, return NULL); + + if (v_rport_nport_id >= UNF_FC_FID_DOM_MGR) // N_Port_ID <---> SID + /* R_Port is Fabric: by N_Port_ID */ + rport = unf_get_rport_by_nport_id(lport, v_rport_nport_id); + else + /* Others: by WWPN & N_Port_ID */ + rport = unf_find_valid_rport(lport, v_port_name, + v_rport_nport_id); + + return rport; +} + +static void unf_rcv_plogi_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_lgn_parms_s *v_login_parms) +{ + /* PLOGI ACC: PRLI(non fabric) or RFT_ID(fabric) */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = v_rport; + struct unf_lgn_parms_s *login_parms = v_login_parms; + unsigned long long node_name = 0; + unsigned long long port_name = 0; + unsigned long flag = 0; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3659, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3660, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3661, UNF_TRUE, v_login_parms, return); + + node_name = (unsigned long long) + (((unsigned long long)(login_parms->high_node_name) << 32) | + ((unsigned long long)(login_parms->low_node_name))); + port_name = (unsigned long long) + (((unsigned long long)(login_parms->high_port_name) << 32) | + ((unsigned long long)(login_parms->low_port_name))); + + /* ACC & Case for: R_Port is fabric (RFT_ID) */ + if (rport->nport_id >= UNF_FC_FID_DOM_MGR) { + /* Check L_Port state */ + spin_lock_irqsave(&lport->lport_state_lock, flag); + if (lport->en_states != UNF_LPORT_ST_PLOGI_WAIT) { + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) receive PLOGI ACC with error state(0x%x)", + v_lport->port_id, lport->en_states); + + return; + } + /* PLOGI_WAIT --> RFT_ID_WAIT */ + unf_lport_stat_ma(lport, UNF_EVENT_LPORT_REMOTE_ACC); + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + /* PLOGI parameters save */ + unf_save_plogi_params(lport, rport, login_parms, ELS_ACC); + + /* Update R_Port WWPN & WWNN */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->node_name = node_name; + rport->port_name = port_name; + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Start to Send RFT_ID */ + ret = unf_send_rft_id(lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]LOGIN: Port(0x%x) send RFT_ID failed", + v_lport->port_id); + + unf_lport_error_recovery(lport); + } + } else { + /* ACC & Case for: R_Port is not fabric */ + if ((rport->options == UNF_PORT_MODE_UNKNOWN) && + (rport->port_name != INVALID_WWPN)) + rport->options = unf_get_port_feature(port_name); + /* Set Port Feature with BOTH: cancel */ + + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->node_name = node_name; + rport->port_name = port_name; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]LOGIN: Port(0x%x)<---LS_ACC(DID:0x%x SID:0x%x) for PLOGI ACC with RPort state(0x%x) NodeName(0x%llx) E_D_TOV(%d)", + lport->port_id, lport->nport_id, + rport->nport_id, rport->rp_state, + rport->node_name, rport->ed_tov); + + if ((lport->en_act_topo == UNF_ACT_TOP_PRIVATE_LOOP) && + ((rport->rp_state == UNF_RPORT_ST_PRLI_WAIT) || + (rport->rp_state == UNF_RPORT_ST_READY))) { + /* Do nothing, return directly */ + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + return; + } + + /* PRLI_WAIT */ + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PRLI); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* PLOGI parameters save */ + unf_save_plogi_params(lport, rport, login_parms, ELS_ACC); + + /* + * Need Delay to Send PRLI or not + * Used for: L_Port with INI mode & R_Port is not Fabric + */ + unf_check_rport_need_delay_prli(lport, rport, + rport->options); + + /* Do not care: Just used for L_Port only is + * TGT mode or R_Port only is INI mode + */ + unf_schedule_open_work(lport, rport); + } +} + +static void unf_plogi_acc_com_process(struct unf_xchg_s *v_xchg) +{ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_xchg; + struct unf_plogi_payload_s *plogi_pld = NULL; + struct unf_lgn_parms_s *login_parms = NULL; + unsigned long flag = 0; + unsigned long long port_name = 0; + unsigned int rport_nport_id = 0; + unsigned int cmnd = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, xchg, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, xchg->lport, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, xchg->rport, return); + + lport = xchg->lport; + rport = xchg->rport; + rport_nport_id = rport->nport_id; + plogi_pld = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->plogi_acc.payload; + login_parms = &plogi_pld->parms; + cmnd = (plogi_pld->cmnd); + + if ((cmnd & UNF_ELS_CMND_HIGH_MASK) == UNF_ELS_CMND_ACC) { + /* Case for PLOGI ACC: Go to next stage */ + port_name = (unsigned long long) + (((unsigned long long)(login_parms->high_port_name) << 32) | + ((unsigned long long)(login_parms->low_port_name))); + + /* Get (new) R_Port: 0xfffffc has same WWN with 0xfffcxx */ + rport = unf_find_rport(lport, rport_nport_id, port_name); + rport = unf_get_safe_rport(lport, rport, UNF_RPORT_REUSE_ONLY, + rport_nport_id); + if (unlikely(!rport)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x_0x%x) alloc new RPort with wwpn(0x%llx) failed", + lport->port_id, lport->nport_id, + port_name); + return; + } + + /* PLOGI parameters check */ + ret = unf_check_plogi_params(lport, rport, login_parms); + if (ret != RETURN_OK) + return; + + /* Update R_Port state */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = rport_nport_id; + /* --->>> PLOGI_WAIT */ + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Start to process PLOGI ACC */ + unf_rcv_plogi_acc(lport, rport, login_parms); + } else { + /* Case for PLOGI RJT: L_Port or R_Port recovery */ + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x)<---RPort(0x%p) with LS_RJT(DID:0x%x SID:0x%x) for PLOGI", + lport->port_id, rport, lport->nport_id, + rport->nport_id); + + if (rport->nport_id >= UNF_FC_FID_DOM_MGR) + /* for Name server */ + unf_lport_error_recovery(lport); + else + unf_rport_error_recovery(rport); + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: PLOGI response(0x%x). Port(0x%x_0x%x)<---RPort(0x%x_0x%p) wwpn(0x%llx) OX_ID(0x%x)", + cmnd, lport->port_id, lport->nport_id, rport->nport_id, + rport, port_name, xchg->ox_id); +} + +static int unf_rcv_plogi_acc_async_callback(void *v_argc_in, void *v_argc_out) +{ + struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_argc_in; + + UNF_CHECK_VALID(0x2267, UNF_TRUE, xchg, return UNF_RETURN_ERROR); + + unf_plogi_acc_com_process(xchg); + + unf_xchg_ref_dec(xchg, SFS_RESPONSE); + + return RETURN_OK; +} + +static void unf_plogi_callback(void *v_lport, void *v_rport, void *v_xchg) +{ + struct unf_lport_s *lport = (struct unf_lport_s *)v_lport; + struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_xchg; + struct unf_plogi_payload_s *plogi_pld = NULL; + struct unf_lgn_parms_s *login_parms = NULL; + int bbscn_enabled = UNF_FALSE; + int switch_2_thread = UNF_FALSE; + + UNF_CHECK_VALID(0x3662, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3663, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3664, UNF_TRUE, v_xchg, return); + UNF_CHECK_VALID(0x3665, UNF_TRUE, + xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr, + return); + + plogi_pld = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->plogi_acc.payload; + login_parms = &plogi_pld->parms; + xchg->lport = v_lport; + + if (xchg->byte_orders & UNF_BIT_2) + unf_big_end_to_cpu((unsigned char *)plogi_pld, + sizeof(struct unf_plogi_payload_s)); + + bbscn_enabled = unf_check_bbscn_is_enabled( + (unsigned char)lport->low_level_func.lport_cfg_items.bb_scn, + (unsigned char)UNF_GET_BB_SC_N_FROM_PARAMS(login_parms)); + if ((bbscn_enabled == UNF_TRUE) && + (lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT)) { + switch_2_thread = UNF_TRUE; + lport->b_bbscn_support = UNF_TRUE; + } + + if ((switch_2_thread == UNF_TRUE) && (lport->root_lport == lport)) { + /* Wait for LR done sync: just for ROOT Port */ + (void)unf_irq_process_switch_2_thread( + lport, xchg, + unf_rcv_plogi_acc_async_callback); + } else { + unf_plogi_acc_com_process(xchg); + } +} + +static void unf_process_logo_in_pri_loop(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* Send PLOGI or LOGO */ + struct unf_rport_s *rport = v_rport; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3666, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3667, UNF_TRUE, v_rport, return); + + spin_lock_irqsave(&rport->rport_state_lock, flag); + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); /* PLOGI WAIT */ + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Private Loop with INI mode, Avoid COM Mode problem */ + unf_rport_delay_login(rport); +} + +static void unf_process_logo_in_n2n(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* Send PLOGI or LOGO */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = v_rport; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3668, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3669, UNF_TRUE, v_rport, return); + + spin_lock_irqsave(&rport->rport_state_lock, flag); + + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); // PLOGI WAIT + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + if (lport->port_name > rport->port_name) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x)'s WWN(0x%llx) is larger than(0x%llx), should be master", + lport->port_id, lport->port_name, + rport->port_name); + + ret = unf_send_plogi(lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]LOGIN: Port(0x%x) send PLOGI failed, enter recovery", + v_lport->port_id); + + unf_rport_error_recovery(rport); + } + } else { + unf_rport_enter_logo(lport, rport); + } +} + +void unf_process_logo_in_fabric(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* Send GFF_ID or LOGO */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = v_rport; + struct unf_rport_s *sns_port = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3670, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3671, UNF_TRUE, v_rport, return); + + /* L_Port with INI Mode: Send GFF_ID */ + sns_port = unf_get_rport_by_nport_id(lport, UNF_FC_FID_DIR_SERV); + if (!sns_port) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) can't find fabric port", + lport->port_id); + return; + } + + ret = unf_get_and_post_disc_event(v_lport, sns_port, rport->nport_id, + UNF_DISC_GET_FEATURE); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", + lport->port_id, UNF_DISC_GET_FEATURE, + rport->nport_id); + + unf_rcv_gff_id_rsp_unknown(lport, rport->nport_id); + } +} + +static void unf_process_rport_after_logo(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* + * 1. LOGO handler + * 2. RPLO handler + * 3. LOGO_CALL_BACK (send LOGO ACC) handler + */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = v_rport; + + UNF_CHECK_VALID(0x3672, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3673, UNF_TRUE, v_rport, return); + + if (rport->nport_id < UNF_FC_FID_DOM_MGR) { + /* R_Port is not fabric port (retry LOGIN or LOGO) */ + if (lport->en_act_topo == UNF_ACT_TOP_PRIVATE_LOOP) { + /* Private Loop: PLOGI or LOGO */ + unf_process_logo_in_pri_loop(lport, rport); + } else if (lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT) { + /* Point to Point: LOGIN or LOGO */ + unf_process_logo_in_n2n(lport, rport); + } else { + /* Fabric or Public Loop: GFF_ID or LOGO */ + unf_process_logo_in_fabric(lport, rport); + } + } else { + /* Rport is fabric port: link down now */ + unf_rport_linkdown(lport, rport); + } +} + +static unsigned int unf_rcv_bls_req_done(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_pkg) +{ + /* + * About I/O resource: + * 1. normal: Release I/O resource during RRQ processer + * 2. exception: Release I/O resource immediately + */ + struct unf_xchg_s *xchg = NULL; + unsigned short hot_pool_tag = 0; + unsigned long flags = 0; + unsigned long time_ms = 0; + unsigned int ret = RETURN_OK; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x3723, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3724, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR); + lport = v_lport; + + /* 1. BLS Request Response: Hot Pool Tag --->>> OX_ID */ + hot_pool_tag = + (unsigned short)v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]; + xchg = (struct unf_xchg_s *)unf_cm_lookup_xchg_by_tag( + (void *)lport, hot_pool_tag); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) can't find exchange by tag(0x%x) when receiving ABTS response", + lport->port_id, hot_pool_tag); + + /* return directly */ + return UNF_RETURN_ERROR; + } + + /* Consistency check */ + UNF_CHECK_ALLOCTIME_VALID(v_lport, hot_pool_tag, xchg, + v_pkg->private[PKG_PRIVATE_XCHG_ALLOC_TIME], + xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]); + + /* 2. Increase ref_cnt for exchange protecting */ + + ret = unf_xchg_ref_inc(xchg, TGT_ABTS_DONE); /* hold */ + UNF_CHECK_VALID(0x3725, UNF_TRUE, (ret == RETURN_OK), + return UNF_RETURN_ERROR); + + /* 3. Exchag I/O State Set & Check: reused */ + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + xchg->io_state |= INI_IO_STATE_DONE; /* I/O Done */ + xchg->abts_state |= ABTS_RESPONSE_RECEIVED; + if (!(xchg->io_state & INI_IO_STATE_UPABORT)) { + /* NOTE: I/O exchange has been released and used again */ + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) SID(0x%x) exch(0x%p) (0x%x:0x%x:0x%x:0x%x) state(0x%x) is abnormal with cnt(0x%x)", + lport->port_id, lport->nport_id, + xchg->sid, xchg, xchg->hot_pool_tag, + xchg->ox_id, xchg->rx_id, xchg->oid, + xchg->io_state, + atomic_read(&xchg->ref_cnt)); + + /* return directly */ + /* cancel ref & do nothing */ + unf_xchg_ref_dec(xchg, TGT_ABTS_DONE); + return UNF_RETURN_ERROR; + } + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + /* 4. Exchange Timer check, cancel if necessary */ + lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)xchg); + + /* + * 5. Exchage I/O Status check: Succ-> Add RRQ Timer + * ***** pkg->status --- to --->>> scsi_cmnd->result ***** + * + * FAILED: ERR_Code or X_ID is err, or BA_RSP type is err + */ + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + if (v_pkg->status == UNF_IO_SUCCESS) { + /* Succeed: PKG status -->> EXCH status -->> scsi status */ + UNF_SET_SCSI_CMND_RESULT(xchg, UNF_IO_SUCCESS); + xchg->io_state |= INI_IO_STATE_WAIT_RRQ; + xchg->rx_id = UNF_GET_RXID(v_pkg); + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + /* Add RRQ timer */ + time_ms = (unsigned long)(lport->ra_tov); + lport->xchg_mgr_temp.pfn_unf_xchg_add_timer( + (void *)xchg, + time_ms, + UNF_TIMER_TYPE_INI_RRQ); + } else { + /* Failed: PKG status -->> EXCH status -->> scsi status */ + UNF_SET_SCSI_CMND_RESULT(xchg, UNF_IO_FAILED); + if (MARKER_STS_RECEIVED & xchg->abts_state) { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + /* NOTE: release I/O resource immediately */ + unf_cm_free_xchg(lport, xchg); + } else { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) exch(0x%p) OX_RX(0x%x:0x%x) IOstate(0x%x) ABTSstate(0x%x) receive response abnormal ref(0x%x)", + lport->port_id, xchg, xchg->ox_id, + xchg->rx_id, + xchg->io_state, xchg->abts_state, + atomic_read(&xchg->ref_cnt)); + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + } + } + + /* + * 6. If abts response arrived before + * marker sts received just wake up abts marker sema + */ + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + if (!(xchg->abts_state & MARKER_STS_RECEIVED)) { + xchg->ucode_abts_state = v_pkg->status; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + /* NOTE: wake up semaphore */ + up(&xchg->task_sema); + } else { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + } + + /* 7. dec exch ref_cnt */ + unf_xchg_ref_dec(xchg, TGT_ABTS_DONE); + return ret; +} + +static unsigned int unf_rcv_abort_ini_io_done(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_pkg) +{ + /* INI mode: do not care */ + struct unf_xchg_s *io_xchg = NULL; + unsigned short io_pool_tag = 0; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3735, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3736, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR); + + io_pool_tag = UNF_GET_IO_XCHG_TAG(v_pkg); + io_xchg = (struct unf_xchg_s *)unf_cm_lookup_xchg_by_tag( + (void *)v_lport, + io_pool_tag); + if (io_xchg) { + UNF_CHECK_ALLOCTIME_VALID( + v_lport, io_pool_tag, io_xchg, + v_pkg->private[PKG_PRIVATE_XCHG_ALLOC_TIME], + io_xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]); + + /* 1. Timer release */ + v_lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer( + (void *)io_xchg); + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]Port(0x%x) abort INI IO with status(0x%x) exchange(0x%p) tag(0x%x)", + v_lport->port_id, v_pkg->status, + io_xchg, io_pool_tag); + + /* 2. Free I/O Exchange context */ + unf_cm_free_xchg((void *)v_lport, (void *)io_xchg); + } + + return ret; +} + +unsigned int unf_receive_bls_pkg(void *v_lport, struct unf_frame_pkg_s *v_pkg) +{ + struct unf_lport_s *lport = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + lport = (struct unf_lport_s *)v_lport; + UNF_CHECK_VALID(0x3730, UNF_TRUE, lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3731, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR); + + if (v_pkg->type == UNF_PKG_BLS_REQ_DONE) { + /* INI: RCVD BLS Req Done */ + ret = unf_rcv_bls_req_done(v_lport, v_pkg); + } else if (v_pkg->type == UNF_PKG_INI_IO) { + /* INI: Abort Done (do not care) */ + ret = unf_rcv_abort_ini_io_done(v_lport, v_pkg); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) received BLS packet type(%xh) is error", + lport->port_id, v_pkg->type); + + return UNF_RETURN_ERROR; + } + + UNF_REFERNCE_VAR(lport); + + return ret; +} + +static void unf_fill_rls_acc_pld(struct unf_rls_acc_s *v_rls_acc, + struct unf_lport_s *v_lport) +{ + struct unf_rls_acc_payload_s *rls_acc_pld = NULL; + + rls_acc_pld = &v_rls_acc->rls; + rls_acc_pld->cmnd = UNF_ELS_CMND_ACC; + + rls_acc_pld->link_failure_count = + v_lport->err_code_sum.link_fail_count; + rls_acc_pld->loss_of_sync_count = + v_lport->err_code_sum.loss_of_sync_count; + rls_acc_pld->loss_of_signal_count = + v_lport->err_code_sum.loss_of_signal_count; + rls_acc_pld->primitive_seq_count = 0; + rls_acc_pld->invalid_trans_word_count = 0; + rls_acc_pld->invalid_crc_count = + v_lport->err_code_sum.bad_crc_count; +} + +static unsigned int unf_send_rls_acc(struct unf_lport_s *v_lport, + unsigned int v_did, + struct unf_xchg_s *v_xchg) +{ + struct unf_rls_acc_s *rls_acc = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + struct unf_frame_pkg_s pkg; + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + v_xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_RLS); + v_xchg->did = v_did; + v_xchg->sid = v_lport->nport_id; + v_xchg->oid = v_xchg->sid; + v_xchg->lport = v_lport; + + v_xchg->pfn_callback = NULL; + v_xchg->pfn_ob_callback = NULL; + + unf_fill_package(&pkg, v_xchg, v_xchg->rport); + + fc_entry = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, v_xchg->hot_pool_tag); + return UNF_RETURN_ERROR; + } + + rls_acc = &fc_entry->rls_acc; + unf_fill_rls_acc_pld(rls_acc, v_lport); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + + ret = unf_els_cmnd_send(v_lport, &pkg, v_xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) send Rls acc %s to RPort(0x%x) with OX_ID(0x%x) RX_ID(0x%x).", + v_lport->port_id, (ret != RETURN_OK) ? "failed" : "succeed", + v_did, ox_id, rx_id); + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +static unsigned int unf_rls_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg) +{ + struct unf_rport_s *rport = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3483, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3484, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + UNF_SERVICE_COLLECT(v_lport->link_service_info, UNF_SERVICE_ITEM_RLS); + + rport = unf_get_rport_by_nport_id(v_lport, v_sid); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn] Port(0x%x_0x%x) can`t find RPort by sid(0x%x) OX_ID(0x%x)", + v_lport->port_id, v_lport->nport_id, v_sid, + v_xchg->ox_id); + unf_cm_free_xchg(v_lport, v_xchg); + return ret; + } + v_xchg->rport = rport; + + ret = unf_send_rls_acc(v_lport, v_sid, v_xchg); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) send RLS ACC failed", + v_lport->port_id); + unf_cm_free_xchg(v_lport, v_xchg); + } + + return ret; +} + diff --git a/drivers/scsi/huawei/hifc/unf_service.h b/drivers/scsi/huawei/hifc/unf_service.h new file mode 100644 index 0000000000000000000000000000000000000000..868723128575ec87de000db62353c0a01c851d88 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_service.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#ifndef __UNF_SERVICE_H__ +#define __UNF_SERVICE_H__ + +extern unsigned int max_frame_size; + +#define UNF_SET_ELS_ACC_TYPE(v_els_cmd) \ + ((unsigned int)(v_els_cmd) << 16 | ELS_ACC) +#define UNF_SET_ELS_RJT_TYPE(v_els_cmd) \ + ((unsigned int)(v_els_cmd) << 16 | ELS_RJT) + +unsigned int unf_send_gid_ft(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +unsigned int unf_send_gid_pt(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +unsigned int unf_send_gpn_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_sns_port, + unsigned int v_nport_id); +unsigned int unf_send_gnn_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_sns_port, + unsigned int v_nport_id); +unsigned int unf_send_gff_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_sns_port, + unsigned int v_nport_id); +unsigned int unf_send_flogi(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +unsigned int unf_send_fdisc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +unsigned int unf_send_plogi(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +unsigned int unf_send_prli(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +unsigned int unf_receive_els_pkg(void *v_lport, + struct unf_frame_pkg_s *v_fra_pkg); +unsigned int unf_send_rff_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +unsigned int unf_send_rft_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +unsigned int unf_send_logo(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +unsigned int unf_send_echo(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int *v_time); +unsigned int unf_send_abts(struct unf_lport_s *v_lport, + struct unf_xchg_s *v_xchg); +unsigned int unf_send_scr(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +unsigned int unf_send_rrq(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg); +void unf_rport_immediate_linkdown(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +unsigned int unf_receive_bls_pkg(void *v_lport, + struct unf_frame_pkg_s *v_pkg); +struct unf_rport_s *unf_find_rport(struct unf_lport_s *v_lport, + unsigned int v_rport_nport_id, + unsigned long long v_port_name); +void unf_login_with_loop_node(struct unf_lport_s *v_lport, unsigned int alpa); +unsigned int unf_receive_gs_pkg(void *v_lport, + struct unf_frame_pkg_s *v_fra_pkg); +void unf_rcv_gnn_id_rsp_unknown(struct unf_lport_s *v_lport, + struct unf_rport_s *v_sns_port, + unsigned int v_nport_id); +void unf_rcv_gpn_id_rsp_unknown(struct unf_lport_s *v_lport, + unsigned int v_nport_id); +void unf_rcv_gff_id_rsp_unknown(struct unf_lport_s *v_lport, + unsigned int v_nport_id); +unsigned int unf_release_rport_res(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); + +unsigned int unf_low_level_bbscn(struct unf_lport_s *v_lport); +unsigned int unf_send_els_done(void *v_lport, struct unf_frame_pkg_s *v_pkg); +unsigned int unf_send_rec(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg); + +typedef int (*unf_evt_task)(void *v_arg_in, void *v_arg_out); + +#endif /* __UNF_SERVICE_H__ */ diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index b64ca977825df32111b9dbfcc1c0b13d21f7c317..71d53bb239e25d2eb30b91b7b80d45d70d8dbcd2 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -4874,8 +4874,8 @@ static int ibmvfc_remove(struct vio_dev *vdev) spin_lock_irqsave(vhost->host->host_lock, flags); ibmvfc_purge_requests(vhost, DID_ERROR); - ibmvfc_free_event_pool(vhost); spin_unlock_irqrestore(vhost->host->host_lock, flags); + ibmvfc_free_event_pool(vhost); ibmvfc_free_mem(vhost); spin_lock(&ibmvfc_driver_lock); diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 9df8a1a2299ccfce4a62efa98b5520de8a7e3f46..e60822f07653ec5e32d84db426ea8b56e762a4a0 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -96,6 +96,7 @@ static int client_reserve = 1; static char partition_name[96] = "UNKNOWN"; static unsigned int partition_number = -1; static LIST_HEAD(ibmvscsi_head); +static DEFINE_SPINLOCK(ibmvscsi_driver_lock); static struct scsi_transport_template *ibmvscsi_transport_template; @@ -2271,7 +2272,9 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) } dev_set_drvdata(&vdev->dev, hostdata); + spin_lock(&ibmvscsi_driver_lock); list_add_tail(&hostdata->host_list, &ibmvscsi_head); + spin_unlock(&ibmvscsi_driver_lock); return 0; add_srp_port_failed: @@ -2293,15 +2296,27 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) static int ibmvscsi_remove(struct vio_dev *vdev) { struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev); - list_del(&hostdata->host_list); - unmap_persist_bufs(hostdata); + unsigned long flags; + + srp_remove_host(hostdata->host); + scsi_remove_host(hostdata->host); + + purge_requests(hostdata, DID_ERROR); + + spin_lock_irqsave(hostdata->host->host_lock, flags); release_event_pool(&hostdata->pool, hostdata); + spin_unlock_irqrestore(hostdata->host->host_lock, flags); + ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_events); kthread_stop(hostdata->work_thread); - srp_remove_host(hostdata->host); - scsi_remove_host(hostdata->host); + unmap_persist_bufs(hostdata); + + spin_lock(&ibmvscsi_driver_lock); + list_del(&hostdata->host_list); + spin_unlock(&ibmvscsi_driver_lock); + scsi_host_put(hostdata->host); return 0; diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 271990bc065b924dda965bc8ee777aface7a5b47..0e6ca809c0d4cf382db1963e2879f2d2b64000b1 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -6696,7 +6696,8 @@ static int ipr_queuecommand(struct Scsi_Host *shost, * Return value: * 0 on success / other on failure **/ -static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) +static int ipr_ioctl(struct scsi_device *sdev, unsigned int cmd, + void __user *arg) { struct ipr_resource_entry *res; @@ -9958,6 +9959,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, ioa_cfg->max_devs_supported = ipr_max_devs; if (ioa_cfg->sis64) { + host->max_channel = IPR_MAX_SIS64_BUSES; host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS; host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET; if (ipr_max_devs > IPR_MAX_SIS64_DEVS) @@ -9966,6 +9968,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, + ((sizeof(struct ipr_config_table_entry64) * ioa_cfg->max_devs_supported))); } else { + host->max_channel = IPR_VSET_BUS; host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS; host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET; if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS) @@ -9975,7 +9978,6 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, * ioa_cfg->max_devs_supported))); } - host->max_channel = IPR_VSET_BUS; host->unique_id = host->host_no; host->max_cmd_len = IPR_MAX_CDB_LEN; host->can_queue = ioa_cfg->max_cmds; diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index f6baa23513139a53676b3f5005474cd419818098..9fbcdc283cdbbc4866e18673abf2a0d04dfaf52c 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -1313,6 +1313,7 @@ struct ipr_resource_entry { #define IPR_ARRAY_VIRTUAL_BUS 0x1 #define IPR_VSET_VIRTUAL_BUS 0x2 #define IPR_IOAFP_VIRTUAL_BUS 0x3 +#define IPR_MAX_SIS64_BUSES 0x4 #define IPR_GET_RES_PHYS_LOC(res) \ (((res)->bus << 24) | ((res)->target << 8) | (res)->lun) diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c index bd6ac6b5980a1128af2b0d4234e51f9a97c52a5b..fe587ef1741d48f7a812ded989be7800a3a2e614 100644 --- a/drivers/scsi/ips.c +++ b/drivers/scsi/ips.c @@ -3485,6 +3485,7 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb) case START_STOP: scb->scsi_cmd->result = DID_OK << 16; + break; case TEST_UNIT_READY: case INQUIRY: diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c index 1ee3868ade079dfd06ef8f7415830a4384e4498b..7b5deae68d33b538b33314f4866f19bd6c9b06a4 100644 --- a/drivers/scsi/isci/host.c +++ b/drivers/scsi/isci/host.c @@ -2717,9 +2717,9 @@ enum sci_status sci_controller_continue_io(struct isci_request *ireq) * the task management request. * @task_request: the handle to the task request object to start. */ -enum sci_task_status sci_controller_start_task(struct isci_host *ihost, - struct isci_remote_device *idev, - struct isci_request *ireq) +enum sci_status sci_controller_start_task(struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq) { enum sci_status status; @@ -2728,7 +2728,7 @@ enum sci_task_status sci_controller_start_task(struct isci_host *ihost, "%s: SCIC Controller starting task from invalid " "state\n", __func__); - return SCI_TASK_FAILURE_INVALID_STATE; + return SCI_FAILURE_INVALID_STATE; } status = sci_remote_device_start_task(ihost, idev, ireq); diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h index b3539928073c628729af52c1301be111ae77b9dc..6bc3f022630a286012cc73fc4b223160e0461624 100644 --- a/drivers/scsi/isci/host.h +++ b/drivers/scsi/isci/host.h @@ -489,7 +489,7 @@ enum sci_status sci_controller_start_io( struct isci_remote_device *idev, struct isci_request *ireq); -enum sci_task_status sci_controller_start_task( +enum sci_status sci_controller_start_task( struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq); diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index 08c7b1e25fe481503ae5d4fd48324f703b40addf..07de94ea3819266294d5bd39ecdc1a7cf695f2f7 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c @@ -167,6 +167,7 @@ static struct scsi_host_template isci_sht = { .eh_abort_handler = sas_eh_abort_handler, .eh_device_reset_handler = sas_eh_device_reset_handler, .eh_target_reset_handler = sas_eh_target_reset_handler, + .slave_alloc = sas_slave_alloc, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, .shost_attrs = isci_host_attrs, @@ -588,6 +589,13 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id) shost->max_lun = ~0; shost->max_cmd_len = MAX_COMMAND_SIZE; + /* turn on DIF support */ + scsi_host_set_prot(shost, + SHOST_DIF_TYPE1_PROTECTION | + SHOST_DIF_TYPE2_PROTECTION | + SHOST_DIF_TYPE3_PROTECTION); + scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); + err = scsi_add_host(shost, &pdev->dev); if (err) goto err_shost; @@ -675,13 +683,6 @@ static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_host_alloc; } pci_info->hosts[i] = h; - - /* turn on DIF support */ - scsi_host_set_prot(to_shost(h), - SHOST_DIF_TYPE1_PROTECTION | - SHOST_DIF_TYPE2_PROTECTION | - SHOST_DIF_TYPE3_PROTECTION); - scsi_host_set_guard(to_shost(h), SHOST_DIX_GUARD_CRC); } err = isci_setup_interrupts(pdev); diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index ed197bc8e801a604029ac12e93bf24d9bdce7e34..2f151708b59ae36086de84b0a3c12e63eeea60df 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -1626,9 +1626,9 @@ static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq, if (status == SCI_SUCCESS) { if (ireq->stp.rsp.status & ATA_ERR) - status = SCI_IO_FAILURE_RESPONSE_VALID; + status = SCI_FAILURE_IO_RESPONSE_VALID; } else { - status = SCI_IO_FAILURE_RESPONSE_VALID; + status = SCI_FAILURE_IO_RESPONSE_VALID; } if (status != SCI_SUCCESS) { diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index 6dcaed0c1fc8cf32c5e6956ff858b95818caf6d1..fb6eba331ac6eb9f51496682da04f50a33bdd063 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c @@ -258,7 +258,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost, struct isci_tmf *tmf, unsigned long timeout_ms) { DECLARE_COMPLETION_ONSTACK(completion); - enum sci_task_status status = SCI_TASK_FAILURE; + enum sci_status status = SCI_FAILURE; struct isci_request *ireq; int ret = TMF_RESP_FUNC_FAILED; unsigned long flags; @@ -301,7 +301,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost, /* start the TMF io. */ status = sci_controller_start_task(ihost, idev, ireq); - if (status != SCI_TASK_SUCCESS) { + if (status != SCI_SUCCESS) { dev_dbg(&ihost->pdev->dev, "%s: start_io failed - status = 0x%x, request = %p\n", __func__, diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index b025a0b7434174cea0096b23fad34aa18a22de20..2f300868a9dbe1252d879aa6860fec734decfef5 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -132,6 +132,7 @@ static void iscsi_sw_tcp_data_ready(struct sock *sk) struct iscsi_conn *conn; struct iscsi_tcp_conn *tcp_conn; read_descriptor_t rd_desc; + int current_cpu; read_lock_bh(&sk->sk_callback_lock); conn = sk->sk_user_data; @@ -141,6 +142,13 @@ static void iscsi_sw_tcp_data_ready(struct sock *sk) } tcp_conn = conn->dd_data; + /* save intimate cpu when in softirq */ + if (!sock_owned_by_user_nocheck(sk)) { + current_cpu = smp_processor_id(); + if (conn->intimate_cpu != current_cpu) + conn->intimate_cpu = current_cpu; + } + /* * Use rd_desc to pass 'conn' to iscsi_tcp_recv. * We set count to 1 because we want the network layer to @@ -374,8 +382,16 @@ static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task) { struct iscsi_conn *conn = task->conn; unsigned int noreclaim_flag; + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; int rc = 0; + if (!tcp_sw_conn->sock) { + iscsi_conn_printk(KERN_ERR, conn, + "Transport not bound to socket!\n"); + return -EINVAL; + } + noreclaim_flag = memalloc_noreclaim_save(); while (iscsi_sw_tcp_xmit_qlen(conn)) { @@ -767,7 +783,7 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, char *buf) { struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(shost); - struct iscsi_session *session = tcp_sw_host->session; + struct iscsi_session *session; struct iscsi_conn *conn; struct iscsi_tcp_conn *tcp_conn; struct iscsi_sw_tcp_conn *tcp_sw_conn; @@ -776,6 +792,7 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost, switch (param) { case ISCSI_HOST_PARAM_IPADDRESS: + session = tcp_sw_host->session; if (!session) return -ENOTCONN; @@ -800,7 +817,8 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost, return rc; return iscsi_conn_get_addr_param((struct sockaddr_storage *) - &addr, param, buf); + &addr, + (enum iscsi_param)param, buf); default: return iscsi_host_get_param(shost, param, buf); } @@ -835,6 +853,7 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, struct iscsi_session *session; struct iscsi_sw_tcp_host *tcp_sw_host; struct Scsi_Host *shost; + int rc; if (ep) { printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep); @@ -852,6 +871,11 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, shost->max_channel = 0; shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE; + rc = iscsi_host_get_max_scsi_cmds(shost, cmds_max); + if (rc < 0) + goto free_host; + shost->can_queue = rc; + if (iscsi_host_add(shost, NULL)) goto free_host; @@ -863,12 +887,13 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, if (!cls_session) goto remove_host; session = cls_session->dd_data; - tcp_sw_host = iscsi_host_priv(shost); - tcp_sw_host->session = session; - shost->can_queue = session->scsi_cmds_max; if (iscsi_tcp_r2tpool_alloc(session)) goto remove_session; + + /* We are now fully setup so expose the session to sysfs. */ + tcp_sw_host = iscsi_host_priv(shost); + tcp_sw_host->session = session; return cls_session; remove_session: @@ -883,11 +908,22 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session) { struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); + struct iscsi_session *session = cls_session->dd_data; - iscsi_tcp_r2tpool_free(cls_session->dd_data); - iscsi_session_teardown(cls_session); + if (WARN_ON_ONCE(session->leadconn)) + return; + iscsi_session_remove(cls_session); + /* + * Our get_host_param needs to access the session, so remove the + * host from sysfs before freeing the session to make sure userspace + * is no longer accessing the callout. + */ iscsi_host_remove(shost); + + iscsi_tcp_r2tpool_free(cls_session->dd_data); + + iscsi_session_free(cls_session); iscsi_host_free(shost); } @@ -971,7 +1007,7 @@ static struct scsi_host_template iscsi_sw_tcp_sht = { .name = "iSCSI Initiator over TCP/IP", .queuecommand = iscsi_queuecommand, .change_queue_depth = scsi_change_queue_depth, - .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1, + .can_queue = ISCSI_TOTAL_CMDS_MAX, .sg_tablesize = 4096, .max_sectors = 0xFFFF, .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 42bcf7f3a0f90bf5a8778e21cfe0b5c20f6e6a87..6ba257cbc6d946c677f984d08bf13312dd71543b 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -2603,7 +2603,7 @@ void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp) /* lport lock ? */ if (!lport || lport->state == LPORT_ST_DISABLED) { - FC_LPORT_DBG(lport, "Receiving frames for an lport that " + FC_LIBFC_DBG("Receiving frames for an lport that " "has not been initialized correctly\n"); fc_frame_free(fp); return; diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index be83590ed9559636549d90d794d74483d141ec89..faeba97f4102f971bab2da2c06bedb540e52332a 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -250,6 +250,12 @@ static void fc_lport_ptp_setup(struct fc_lport *lport, } mutex_lock(&lport->disc.disc_mutex); lport->ptp_rdata = fc_rport_create(lport, remote_fid); + if (!lport->ptp_rdata) { + printk(KERN_WARNING "libfc: Failed to setup lport 0x%x\n", + lport->port_id); + mutex_unlock(&lport->disc.disc_mutex); + return; + } kref_get(&lport->ptp_rdata->kref); lport->ptp_rdata->ids.port_name = remote_wwpn; lport->ptp_rdata->ids.node_name = remote_wwnn; @@ -1726,14 +1732,14 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, fc_frame_payload_op(fp) != ELS_LS_ACC) { FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n"); fc_lport_error(lport, fp); - goto err; + goto out; } flp = fc_frame_payload_get(fp, sizeof(*flp)); if (!flp) { FC_LPORT_DBG(lport, "FLOGI bad response\n"); fc_lport_error(lport, fp); - goto err; + goto out; } mfs = ntohs(flp->fl_csp.sp_bb_data) & @@ -1743,7 +1749,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, " "lport->mfs:%hu\n", mfs, lport->mfs); fc_lport_error(lport, fp); - goto err; + goto out; } if (mfs <= lport->mfs) { diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 372387a450dff7d64147a4baa8cdcfaaa1d43c28..d8cf519da92c992d4bd3565753dc6b934460c706 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -140,6 +140,7 @@ EXPORT_SYMBOL(fc_rport_lookup); struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id) { struct fc_rport_priv *rdata; + size_t rport_priv_size = sizeof(*rdata); lockdep_assert_held(&lport->disc.disc_mutex); @@ -147,7 +148,9 @@ struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id) if (rdata) return rdata; - rdata = kzalloc(sizeof(*rdata) + lport->rport_priv_size, GFP_KERNEL); + if (lport->rport_priv_size > 0) + rport_priv_size = lport->rport_priv_size; + rdata = kzalloc(rport_priv_size, GFP_KERNEL); if (!rdata) return NULL; @@ -184,7 +187,6 @@ void fc_rport_destroy(struct kref *kref) struct fc_rport_priv *rdata; rdata = container_of(kref, struct fc_rport_priv, kref); - WARN_ON(!list_empty(&rdata->peers)); kfree_rcu(rdata, rcu); } EXPORT_SYMBOL(fc_rport_destroy); @@ -1164,6 +1166,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK); FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x spp_type 0x%x\n", pp->spp.spp_flags, pp->spp.spp_type); + rdata->spp_type = pp->spp.spp_type; if (resp_code != FC_SPP_RESP_ACK) { if (resp_code == FC_SPP_RESP_CONF) @@ -1184,11 +1187,13 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, /* * Call prli provider if we should act as a target */ - prov = fc_passive_prov[rdata->spp_type]; - if (prov) { - memset(&temp_spp, 0, sizeof(temp_spp)); - prov->prli(rdata, pp->prli.prli_spp_len, - &pp->spp, &temp_spp); + if (rdata->spp_type < FC_FC4_PROV_SIZE) { + prov = fc_passive_prov[rdata->spp_type]; + if (prov) { + memset(&temp_spp, 0, sizeof(temp_spp)); + prov->prli(rdata, pp->prli.prli_spp_len, + &pp->spp, &temp_spp); + } } /* * Check if the image pair could be established @@ -2154,7 +2159,6 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n", fc_rport_state(rdata)); - rdata->flags &= ~FC_RP_STARTED; fc_rport_enter_delete(rdata, RPORT_EV_STOP); mutex_unlock(&rdata->rp_mutex); kref_put(&rdata->kref, fc_rport_destroy); diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 93c66ebad907ee0e9b99505e54aed32853ab4aae..82975fa7e7f0b047f62770d689208f36db0064c0 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -90,9 +90,15 @@ inline void iscsi_conn_queue_work(struct iscsi_conn *conn) { struct Scsi_Host *shost = conn->session->host; struct iscsi_host *ihost = shost_priv(shost); + int intimate_cpu = conn->intimate_cpu; - if (ihost->workq) - queue_work(ihost->workq, &conn->xmitwork); + if (ihost->workq) { + /* we expect it to be excuted on the same numa of the intimate cpu */ + if ((intimate_cpu >= 0) && cpu_possible(intimate_cpu)) + queue_work_on(intimate_cpu, ihost->workq, &conn->xmitwork); + else + queue_work(ihost->workq, &conn->xmitwork); + } } EXPORT_SYMBOL_GPL(iscsi_conn_queue_work); @@ -259,11 +265,13 @@ static int iscsi_prep_bidi_ahs(struct iscsi_task *task) */ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode) { - struct iscsi_conn *conn = task->conn; - struct iscsi_tm *tmf = &conn->tmhdr; + struct iscsi_session *session = task->conn->session; + struct iscsi_cls_session_wrapper *cls_session = + iscsi_cls_session_to_wrapper(session->cls_session); + struct iscsi_tm *tmf = &cls_session->tmhdr; u64 hdr_lun; - if (conn->tmf_state == TMF_INITIAL) + if (cls_session->tmf_state == TMF_INITIAL) return 0; if ((tmf->opcode & ISCSI_OPCODE_MASK) != ISCSI_OP_SCSI_TMFUNC) @@ -283,24 +291,19 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode) * Fail all SCSI cmd PDUs */ if (opcode != ISCSI_OP_SCSI_DATA_OUT) { - iscsi_conn_printk(KERN_INFO, conn, - "task [op %x itt " - "0x%x/0x%x] " - "rejected.\n", - opcode, task->itt, - task->hdr_itt); + iscsi_session_printk(KERN_INFO, session, + "task [op %x itt 0x%x/0x%x] rejected.\n", + opcode, task->itt, task->hdr_itt); return -EACCES; } /* * And also all data-out PDUs in response to R2T * if fast_abort is set. */ - if (conn->session->fast_abort) { - iscsi_conn_printk(KERN_INFO, conn, - "task [op %x itt " - "0x%x/0x%x] fast abort.\n", - opcode, task->itt, - task->hdr_itt); + if (session->fast_abort) { + iscsi_session_printk(KERN_INFO, session, + "task [op %x itt 0x%x/0x%x] fast abort.\n", + opcode, task->itt, task->hdr_itt); return -EACCES; } break; @@ -313,7 +316,7 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode) */ if (opcode == ISCSI_OP_SCSI_DATA_OUT && task->hdr_itt == tmf->rtt) { - ISCSI_DBG_SESSION(conn->session, + ISCSI_DBG_SESSION(session, "Preventing task %x/%x from sending " "data-out due to abort task in " "progress\n", task->itt, @@ -389,7 +392,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) return rc; } - if (scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) + if (!scsi_prot_op_normal(sc)) task->protected = true; transfer_length = scsi_transfer_length(sc); @@ -561,18 +564,8 @@ static void iscsi_complete_task(struct iscsi_task *task, int state) WARN_ON_ONCE(task->state == ISCSI_TASK_FREE); task->state = state; - spin_lock_bh(&conn->taskqueuelock); - if (!list_empty(&task->running)) { - pr_debug_once("%s while task on list", __func__); - list_del_init(&task->running); - } - spin_unlock_bh(&conn->taskqueuelock); - - if (conn->task == task) - conn->task = NULL; - - if (conn->ping_task == task) - conn->ping_task = NULL; + if (READ_ONCE(conn->ping_task) == task) + WRITE_ONCE(conn->ping_task, NULL); /* release get from queueing */ __iscsi_put_task(task); @@ -602,11 +595,41 @@ void iscsi_complete_scsi_task(struct iscsi_task *task, } EXPORT_SYMBOL_GPL(iscsi_complete_scsi_task); +/* + * Must be called with back and frwd lock + */ +static bool cleanup_queued_task(struct iscsi_task *task) +{ + struct iscsi_conn *conn = task->conn; + bool early_complete = false; + + /* Bad target might have completed task while it was still running */ + if (task->state == ISCSI_TASK_COMPLETED) + early_complete = true; + + if (!list_empty(&task->running)) { + list_del_init(&task->running); + /* + * If it's on a list but still running, this could be from + * a bad target sending a rsp early, cleanup from a TMF, or + * session recovery. + */ + if (task->state == ISCSI_TASK_RUNNING || + task->state == ISCSI_TASK_COMPLETED) + __iscsi_put_task(task); + } + + if (conn->task == task) { + conn->task = NULL; + __iscsi_put_task(task); + } + + return early_complete; +} /* - * session back_lock must be held and if not called for a task that is - * still pending or from the xmit thread, then xmit thread must - * be suspended. + * session frwd lock must be held and if not called for a task that is still + * pending or from the xmit thread, then xmit thread must be suspended */ static void fail_scsi_task(struct iscsi_task *task, int err) { @@ -614,14 +637,11 @@ static void fail_scsi_task(struct iscsi_task *task, int err) struct scsi_cmnd *sc; int state; - /* - * if a command completes and we get a successful tmf response - * we will hit this because the scsi eh abort code does not take - * a ref to the task. - */ - sc = task->sc; - if (!sc) + spin_lock_bh(&conn->session->back_lock); + if (cleanup_queued_task(task)) { + spin_unlock_bh(&conn->session->back_lock); return; + } if (task->state == ISCSI_TASK_PENDING) { /* @@ -636,6 +656,7 @@ static void fail_scsi_task(struct iscsi_task *task, int err) else state = ISCSI_TASK_ABRT_TMF; + sc = task->sc; sc->result = err << 16; if (!scsi_bidi_cmnd(sc)) scsi_set_resid(sc, scsi_bufflen(sc)); @@ -644,8 +665,6 @@ static void fail_scsi_task(struct iscsi_task *task, int err) scsi_in(sc)->resid = scsi_in(sc)->length; } - /* regular RX path uses back_lock */ - spin_lock_bh(&conn->session->back_lock); iscsi_complete_task(task, state); spin_unlock_bh(&conn->session->back_lock); } @@ -781,6 +800,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, task->conn->session->age); } + if (unlikely(READ_ONCE(conn->ping_task) == INVALID_SCSI_TASK)) + WRITE_ONCE(conn->ping_task, task); + if (!ihost->workq) { if (iscsi_prep_mgmt_task(conn, task)) goto free_task; @@ -788,9 +810,7 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, if (session->tt->xmit_task(task)) goto free_task; } else { - spin_lock_bh(&conn->taskqueuelock); list_add_tail(&task->running, &conn->mgmtqueue); - spin_unlock_bh(&conn->taskqueuelock); iscsi_conn_queue_work(conn); } @@ -828,7 +848,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu); * @datalen: len of buffer * * iscsi_cmd_rsp sets up the scsi_cmnd fields based on the PDU and - * then completes the command and task. + * then completes the command and task. called under back_lock **/ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, struct iscsi_task *task, char *data, @@ -931,6 +951,9 @@ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, * @conn: iscsi connection * @hdr: iscsi pdu * @task: scsi command task + * + * iscsi_data_in_rsp sets up the scsi_cmnd fields based on the data received + * then completes the command and task. called under back_lock **/ static void iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, @@ -967,20 +990,22 @@ iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) { struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr; + struct iscsi_cls_session_wrapper *session = + iscsi_cls_session_to_wrapper(conn->session->cls_session); conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; conn->tmfrsp_pdus_cnt++; - if (conn->tmf_state != TMF_QUEUED) + if (session->tmf_state != TMF_QUEUED) return; if (tmf->response == ISCSI_TMF_RSP_COMPLETE) - conn->tmf_state = TMF_SUCCESS; + session->tmf_state = TMF_SUCCESS; else if (tmf->response == ISCSI_TMF_RSP_NO_TASK) - conn->tmf_state = TMF_NOT_FOUND; + session->tmf_state = TMF_NOT_FOUND; else - conn->tmf_state = TMF_FAILED; - wake_up(&conn->ehwait); + session->tmf_state = TMF_FAILED; + wake_up(&session->ehwait); } static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) @@ -988,8 +1013,11 @@ static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) struct iscsi_nopout hdr; struct iscsi_task *task; - if (!rhdr && conn->ping_task) - return -EINVAL; + if (!rhdr) { + if (READ_ONCE(conn->ping_task)) + return -EINVAL; + WRITE_ONCE(conn->ping_task, INVALID_SCSI_TASK); + } memset(&hdr, 0, sizeof(struct iscsi_nopout)); hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE; @@ -1004,24 +1032,35 @@ static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0); if (!task) { + if (!rhdr) + WRITE_ONCE(conn->ping_task, NULL); iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n"); return -EIO; } else if (!rhdr) { /* only track our nops */ - conn->ping_task = task; conn->last_ping = jiffies; } return 0; } +/** + * iscsi_nop_out_rsp - SCSI NOP Response processing + * @task: scsi command task + * @nop: the nop structure + * @data: where to put the data + * @datalen: length of data + * + * iscsi_nop_out_rsp handles nop response from use or + * from user space. called under back_lock + **/ static int iscsi_nop_out_rsp(struct iscsi_task *task, struct iscsi_nopin *nop, char *data, int datalen) { struct iscsi_conn *conn = task->conn; int rc = 0; - if (conn->ping_task != task) { + if (READ_ONCE(conn->ping_task) != task) { /* * If this is not in response to one of our * nops then it must be from userspace. @@ -1378,7 +1417,6 @@ void iscsi_session_failure(struct iscsi_session *session, enum iscsi_err err) { struct iscsi_conn *conn; - struct device *dev; spin_lock_bh(&session->frwd_lock); conn = session->leadconn; @@ -1387,10 +1425,8 @@ void iscsi_session_failure(struct iscsi_session *session, return; } - dev = get_device(&conn->cls_conn->dev); + iscsi_get_conn(conn->cls_conn); spin_unlock_bh(&session->frwd_lock); - if (!dev) - return; /* * if the host is being removed bypass the connection * recovery initialization because we are going to kill @@ -1400,7 +1436,7 @@ void iscsi_session_failure(struct iscsi_session *session, iscsi_conn_error_event(conn->cls_conn, err); else iscsi_conn_failure(conn, err); - put_device(dev); + iscsi_put_conn(conn->cls_conn); } EXPORT_SYMBOL_GPL(iscsi_session_failure); @@ -1441,25 +1477,61 @@ static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn) return 0; } -static int iscsi_xmit_task(struct iscsi_conn *conn) +static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task, + bool was_requeue) { - struct iscsi_task *task = conn->task; int rc; - if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) + spin_lock_bh(&conn->session->back_lock); + + if (!conn->task) { + /* Take a ref so we can access it after xmit_task() */ + __iscsi_get_task(task); + } else { + /* Already have a ref from when we failed to send it last call */ + conn->task = NULL; + } + + /* + * If this was a requeue for a R2T we have an extra ref on the task in + * case a bad target sends a cmd rsp before we have handled the task. + */ + if (was_requeue) + __iscsi_put_task(task); + + /* + * Do this after dropping the extra ref because if this was a requeue + * it's removed from that list and cleanup_queued_task would miss it. + */ + if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) { + /* + * Save the task and ref in case we weren't cleaning up this + * task and get woken up again. + */ + conn->task = task; + spin_unlock_bh(&conn->session->back_lock); return -ENODATA; + } + spin_unlock_bh(&conn->session->back_lock); - __iscsi_get_task(task); spin_unlock_bh(&conn->session->frwd_lock); rc = conn->session->tt->xmit_task(task); spin_lock_bh(&conn->session->frwd_lock); if (!rc) { /* done with this task */ task->last_xfer = jiffies; - conn->task = NULL; } /* regular RX path uses back_lock */ spin_lock(&conn->session->back_lock); + if (rc && task->state == ISCSI_TASK_RUNNING) { + /* + * get an extra ref that is released next time we access it + * as conn->task above. + */ + __iscsi_get_task(task); + conn->task = task; + } + __iscsi_put_task(task); spin_unlock(&conn->session->back_lock); return rc; @@ -1469,9 +1541,7 @@ static int iscsi_xmit_task(struct iscsi_conn *conn) * iscsi_requeue_task - requeue task to run from session workqueue * @task: task to requeue * - * LLDs that need to run a task from the session workqueue should call - * this. The session frwd_lock must be held. This should only be called - * by software drivers. + * Callers must have taken a ref to the task that is going to be requeued. */ void iscsi_requeue_task(struct iscsi_task *task) { @@ -1481,11 +1551,18 @@ void iscsi_requeue_task(struct iscsi_task *task) * this may be on the requeue list already if the xmit_task callout * is handling the r2ts while we are adding new ones */ - spin_lock_bh(&conn->taskqueuelock); - if (list_empty(&task->running)) + spin_lock_bh(&conn->session->frwd_lock); + if (list_empty(&task->running)) { list_add_tail(&task->running, &conn->requeue); - spin_unlock_bh(&conn->taskqueuelock); + } else { + /* + * Don't need the extra ref since it's already requeued and + * has a ref. + */ + iscsi_put_task(task); + } iscsi_conn_queue_work(conn); + spin_unlock_bh(&conn->session->frwd_lock); } EXPORT_SYMBOL_GPL(iscsi_requeue_task); @@ -1511,7 +1588,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) } if (conn->task) { - rc = iscsi_xmit_task(conn); + rc = iscsi_xmit_task(conn, conn->task, false); if (rc) goto done; } @@ -1521,54 +1598,41 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) * only have one nop-out as a ping from us and targets should not * overflow us with nop-ins */ - spin_lock_bh(&conn->taskqueuelock); check_mgmt: while (!list_empty(&conn->mgmtqueue)) { - conn->task = list_entry(conn->mgmtqueue.next, - struct iscsi_task, running); - list_del_init(&conn->task->running); - spin_unlock_bh(&conn->taskqueuelock); - if (iscsi_prep_mgmt_task(conn, conn->task)) { + task = list_entry(conn->mgmtqueue.next, struct iscsi_task, + running); + list_del_init(&task->running); + if (iscsi_prep_mgmt_task(conn, task)) { /* regular RX path uses back_lock */ spin_lock_bh(&conn->session->back_lock); - __iscsi_put_task(conn->task); + __iscsi_put_task(task); spin_unlock_bh(&conn->session->back_lock); - conn->task = NULL; - spin_lock_bh(&conn->taskqueuelock); continue; } - rc = iscsi_xmit_task(conn); + rc = iscsi_xmit_task(conn, task, false); if (rc) goto done; - spin_lock_bh(&conn->taskqueuelock); } /* process pending command queue */ while (!list_empty(&conn->cmdqueue)) { - conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task, - running); - list_del_init(&conn->task->running); - spin_unlock_bh(&conn->taskqueuelock); + task = list_entry(conn->cmdqueue.next, struct iscsi_task, + running); + list_del_init(&task->running); if (conn->session->state == ISCSI_STATE_LOGGING_OUT) { - fail_scsi_task(conn->task, DID_IMM_RETRY); - spin_lock_bh(&conn->taskqueuelock); + fail_scsi_task(task, DID_IMM_RETRY); continue; } - rc = iscsi_prep_scsi_cmd_pdu(conn->task); + rc = iscsi_prep_scsi_cmd_pdu(task); if (rc) { - if (rc == -ENOMEM || rc == -EACCES) { - spin_lock_bh(&conn->taskqueuelock); - list_add_tail(&conn->task->running, - &conn->cmdqueue); - conn->task = NULL; - spin_unlock_bh(&conn->taskqueuelock); - goto done; - } else - fail_scsi_task(conn->task, DID_ABORT); - spin_lock_bh(&conn->taskqueuelock); + if (rc == -ENOMEM || rc == -EACCES) + fail_scsi_task(task, DID_IMM_RETRY); + else + fail_scsi_task(task, DID_ABORT); continue; } - rc = iscsi_xmit_task(conn); + rc = iscsi_xmit_task(conn, task, false); if (rc) goto done; /* @@ -1576,7 +1640,6 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) * we need to check the mgmt queue for nops that need to * be sent to aviod starvation */ - spin_lock_bh(&conn->taskqueuelock); if (!list_empty(&conn->mgmtqueue)) goto check_mgmt; } @@ -1590,21 +1653,17 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) task = list_entry(conn->requeue.next, struct iscsi_task, running); + if (iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_DATA_OUT)) break; - conn->task = task; - list_del_init(&conn->task->running); - conn->task->state = ISCSI_TASK_RUNNING; - spin_unlock_bh(&conn->taskqueuelock); - rc = iscsi_xmit_task(conn); + list_del_init(&task->running); + rc = iscsi_xmit_task(conn, task, true); if (rc) goto done; - spin_lock_bh(&conn->taskqueuelock); if (!list_empty(&conn->mgmtqueue)) goto check_mgmt; } - spin_unlock_bh(&conn->taskqueuelock); spin_unlock_bh(&conn->session->frwd_lock); return -ENODATA; @@ -1770,9 +1829,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc) goto prepd_reject; } } else { - spin_lock_bh(&conn->taskqueuelock); list_add_tail(&task->running, &conn->cmdqueue); - spin_unlock_bh(&conn->taskqueuelock); iscsi_conn_queue_work(conn); } @@ -1781,7 +1838,9 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc) return 0; prepd_reject: + spin_lock_bh(&session->back_lock); iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ); + spin_unlock_bh(&session->back_lock); reject: spin_unlock_bh(&session->frwd_lock); ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n", @@ -1789,7 +1848,9 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc) return SCSI_MLQUEUE_TARGET_BUSY; prepd_fault: + spin_lock_bh(&session->back_lock); iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ); + spin_unlock_bh(&session->back_lock); fault: spin_unlock_bh(&session->frwd_lock); ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n", @@ -1817,15 +1878,16 @@ EXPORT_SYMBOL_GPL(iscsi_target_alloc); static void iscsi_tmf_timedout(struct timer_list *t) { - struct iscsi_conn *conn = from_timer(conn, t, tmf_timer); - struct iscsi_session *session = conn->session; + struct iscsi_cls_session_wrapper *cls_session = + from_timer(cls_session, t, tmf_timer); + struct iscsi_session *session = cls_session->cls_sess.dd_data; spin_lock(&session->frwd_lock); - if (conn->tmf_state == TMF_QUEUED) { - conn->tmf_state = TMF_TIMEDOUT; + if (cls_session->tmf_state == TMF_QUEUED) { + cls_session->tmf_state = TMF_TIMEDOUT; ISCSI_DBG_EH(session, "tmf timedout\n"); /* unblock eh_abort() */ - wake_up(&conn->ehwait); + wake_up(&cls_session->ehwait); } spin_unlock(&session->frwd_lock); } @@ -1836,6 +1898,8 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn, __must_hold(&session->frwd_lock) { struct iscsi_session *session = conn->session; + struct iscsi_cls_session_wrapper *cls_session = + iscsi_cls_session_to_wrapper(session->cls_session); struct iscsi_task *task; task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr, @@ -1848,8 +1912,8 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn, return -EPERM; } conn->tmfcmd_pdus_cnt++; - conn->tmf_timer.expires = timeout * HZ + jiffies; - add_timer(&conn->tmf_timer); + cls_session->tmf_timer.expires = timeout * HZ + jiffies; + add_timer(&cls_session->tmf_timer); ISCSI_DBG_EH(session, "tmf set timeout\n"); spin_unlock_bh(&session->frwd_lock); @@ -1863,12 +1927,12 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn, * 3) session is terminated or restarted or userspace has * given up on recovery */ - wait_event_interruptible(conn->ehwait, age != session->age || + wait_event_interruptible(cls_session->ehwait, age != session->age || session->state != ISCSI_STATE_LOGGED_IN || - conn->tmf_state != TMF_QUEUED); + cls_session->tmf_state != TMF_QUEUED); if (signal_pending(current)) flush_signals(current); - del_timer_sync(&conn->tmf_timer); + del_timer_sync(&cls_session->tmf_timer); mutex_lock(&session->eh_mutex); spin_lock_bh(&session->frwd_lock); @@ -1880,27 +1944,39 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn, } /* - * Fail commands. session lock held and recv side suspended and xmit - * thread flushed + * Fail commands. session frwd lock held and xmit thread flushed. */ static void fail_scsi_tasks(struct iscsi_conn *conn, u64 lun, int error) { + struct iscsi_session *session = conn->session; struct iscsi_task *task; int i; - for (i = 0; i < conn->session->cmds_max; i++) { - task = conn->session->cmds[i]; + spin_lock_bh(&session->back_lock); + for (i = 0; i < session->cmds_max; i++) { + task = session->cmds[i]; if (!task->sc || task->state == ISCSI_TASK_FREE) continue; if (lun != -1 && lun != task->sc->device->lun) continue; - ISCSI_DBG_SESSION(conn->session, + __iscsi_get_task(task); + spin_unlock_bh(&session->back_lock); + + ISCSI_DBG_SESSION(session, "failing sc %p itt 0x%x state %d\n", task->sc, task->itt, task->state); fail_scsi_task(task, error); + + spin_unlock_bh(&session->frwd_lock); + iscsi_put_task(task); + spin_lock_bh(&session->frwd_lock); + + spin_lock_bh(&session->back_lock); } + + spin_unlock_bh(&session->back_lock); } /** @@ -1955,7 +2031,7 @@ static void iscsi_start_tx(struct iscsi_conn *conn) */ static int iscsi_has_ping_timed_out(struct iscsi_conn *conn) { - if (conn->ping_task && + if (READ_ONCE(conn->ping_task) && time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) + (conn->ping_timeout * HZ), jiffies)) return 1; @@ -1977,7 +2053,8 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc); - spin_lock(&session->frwd_lock); + spin_lock_bh(&session->frwd_lock); + spin_lock(&session->back_lock); task = (struct iscsi_task *)sc->SCp.ptr; if (!task) { /* @@ -1985,8 +2062,11 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) * so let timeout code complete it now. */ rc = BLK_EH_DONE; + spin_unlock(&session->back_lock); goto done; } + __iscsi_get_task(task); + spin_unlock(&session->back_lock); if (session->state != ISCSI_STATE_LOGGED_IN) { /* @@ -1996,7 +2076,10 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) * Instead, handle cmd, allow completion to happen and let * upper layer to deal with the result. */ - if (unlikely(system_state != SYSTEM_RUNNING)) { + conn = session->leadconn; + if (unlikely(system_state != SYSTEM_RUNNING) || (conn && + time_before_eq(conn->last_recv + + (cls_session->recovery_tmo * HZ), jiffies))) { sc->result = DID_NO_CONNECT << 16; ISCSI_DBG_EH(session, "sc on shutdown, handled\n"); rc = BLK_EH_DONE; @@ -2045,6 +2128,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) goto done; } + spin_lock(&session->back_lock); for (i = 0; i < conn->session->cmds_max; i++) { running_task = conn->session->cmds[i]; if (!running_task->sc || running_task == task || @@ -2077,10 +2161,12 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) "last xfer %lu/%lu. Last check %lu.\n", task->last_xfer, running_task->last_xfer, task->last_timeout); + spin_unlock(&session->back_lock); rc = BLK_EH_RESET_TIMER; goto done; } } + spin_unlock(&session->back_lock); /* Assumes nop timeout is shorter than scsi cmd timeout */ if (task->have_checked_conn) @@ -2090,7 +2176,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) * Checking the transport already or nop from a cmd timeout still * running */ - if (conn->ping_task) { + if (READ_ONCE(conn->ping_task)) { task->have_checked_conn = true; rc = BLK_EH_RESET_TIMER; goto done; @@ -2102,9 +2188,12 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) rc = BLK_EH_RESET_TIMER; done: - if (task) + spin_unlock_bh(&session->frwd_lock); + + if (task) { task->last_timeout = jiffies; - spin_unlock(&session->frwd_lock); + iscsi_put_task(task); + } ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ? "timer reset" : "shutdown or nh"); return rc; @@ -2170,6 +2259,7 @@ static void iscsi_prep_abort_task_pdu(struct iscsi_task *task, int iscsi_eh_abort(struct scsi_cmnd *sc) { struct iscsi_cls_session *cls_session; + struct iscsi_cls_session_wrapper *session_wrapper; struct iscsi_session *session; struct iscsi_conn *conn; struct iscsi_task *task; @@ -2177,6 +2267,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) int age; cls_session = starget_to_session(scsi_target(sc->device)); + session_wrapper = iscsi_cls_session_to_wrapper(cls_session); session = cls_session->dd_data; ISCSI_DBG_EH(session, "aborting sc %p\n", sc); @@ -2208,19 +2299,26 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) return FAILED; } + spin_lock(&session->back_lock); + task = (struct iscsi_task *)sc->SCp.ptr; + if (!task || !task->sc) { + /* task completed before time out */ + ISCSI_DBG_EH(session, "sc completed while abort in progress\n"); + + spin_unlock(&session->back_lock); + spin_unlock_bh(&session->frwd_lock); + mutex_unlock(&session->eh_mutex); + return SUCCESS; + } + conn = session->leadconn; + iscsi_get_conn(conn->cls_conn); conn->eh_abort_cnt++; age = session->age; - task = (struct iscsi_task *)sc->SCp.ptr; - ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n", - sc, task->itt); - - /* task completed before time out */ - if (!task->sc) { - ISCSI_DBG_EH(session, "sc completed while abort in progress\n"); - goto success; - } + ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n", sc, task->itt); + __iscsi_get_task(task); + spin_unlock(&session->back_lock); if (task->state == ISCSI_TASK_PENDING) { fail_scsi_task(task, DID_ABORT); @@ -2228,17 +2326,17 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) } /* only have one tmf outstanding at a time */ - if (conn->tmf_state != TMF_INITIAL) + if (session_wrapper->tmf_state != TMF_INITIAL) goto failed; - conn->tmf_state = TMF_QUEUED; + session_wrapper->tmf_state = TMF_QUEUED; - hdr = &conn->tmhdr; + hdr = &session_wrapper->tmhdr; iscsi_prep_abort_task_pdu(task, hdr); if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) goto failed; - switch (conn->tmf_state) { + switch (session_wrapper->tmf_state) { case TMF_SUCCESS: spin_unlock_bh(&session->frwd_lock); /* @@ -2253,7 +2351,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) */ spin_lock_bh(&session->frwd_lock); fail_scsi_task(task, DID_ABORT); - conn->tmf_state = TMF_INITIAL; + session_wrapper->tmf_state = TMF_INITIAL; memset(hdr, 0, sizeof(*hdr)); spin_unlock_bh(&session->frwd_lock); iscsi_start_tx(conn); @@ -2264,7 +2362,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) goto failed_unlocked; case TMF_NOT_FOUND: if (!sc->SCp.ptr) { - conn->tmf_state = TMF_INITIAL; + session_wrapper->tmf_state = TMF_INITIAL; memset(hdr, 0, sizeof(*hdr)); /* task completed before tmf abort response */ ISCSI_DBG_EH(session, "sc completed while abort in " @@ -2273,7 +2371,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) } /* fall through */ default: - conn->tmf_state = TMF_INITIAL; + session_wrapper->tmf_state = TMF_INITIAL; goto failed; } @@ -2282,6 +2380,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) success_unlocked: ISCSI_DBG_EH(session, "abort success [sc %p itt 0x%x]\n", sc, task->itt); + iscsi_put_task(task); + iscsi_put_conn(conn->cls_conn); mutex_unlock(&session->eh_mutex); return SUCCESS; @@ -2290,6 +2390,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) failed_unlocked: ISCSI_DBG_EH(session, "abort failed [sc %p itt 0x%x]\n", sc, task ? task->itt : 0); + iscsi_put_task(task); + iscsi_put_conn(conn->cls_conn); mutex_unlock(&session->eh_mutex); return FAILED; } @@ -2308,12 +2410,14 @@ static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr) int iscsi_eh_device_reset(struct scsi_cmnd *sc) { struct iscsi_cls_session *cls_session; + struct iscsi_cls_session_wrapper *session_wrapper; struct iscsi_session *session; struct iscsi_conn *conn; struct iscsi_tm *hdr; int rc = FAILED; cls_session = starget_to_session(scsi_target(sc->device)); + session_wrapper = iscsi_cls_session_to_wrapper(cls_session); session = cls_session->dd_data; ISCSI_DBG_EH(session, "LU Reset [sc %p lun %llu]\n", sc, @@ -2330,11 +2434,11 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc) conn = session->leadconn; /* only have one tmf outstanding at a time */ - if (conn->tmf_state != TMF_INITIAL) + if (session_wrapper->tmf_state != TMF_INITIAL) goto unlock; - conn->tmf_state = TMF_QUEUED; + session_wrapper->tmf_state = TMF_QUEUED; - hdr = &conn->tmhdr; + hdr = &session_wrapper->tmhdr; iscsi_prep_lun_reset_pdu(sc, hdr); if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age, @@ -2343,7 +2447,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc) goto unlock; } - switch (conn->tmf_state) { + switch (session_wrapper->tmf_state) { case TMF_SUCCESS: break; case TMF_TIMEDOUT: @@ -2351,7 +2455,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc) iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST); goto done; default: - conn->tmf_state = TMF_INITIAL; + session_wrapper->tmf_state = TMF_INITIAL; goto unlock; } @@ -2363,7 +2467,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc) spin_lock_bh(&session->frwd_lock); memset(hdr, 0, sizeof(*hdr)); fail_scsi_tasks(conn, sc->device->lun, DID_ERROR); - conn->tmf_state = TMF_INITIAL; + session_wrapper->tmf_state = TMF_INITIAL; spin_unlock_bh(&session->frwd_lock); iscsi_start_tx(conn); @@ -2382,12 +2486,13 @@ EXPORT_SYMBOL_GPL(iscsi_eh_device_reset); void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session) { struct iscsi_session *session = cls_session->dd_data; + struct iscsi_cls_session_wrapper *session_wrapper = + iscsi_cls_session_to_wrapper(cls_session); spin_lock_bh(&session->frwd_lock); if (session->state != ISCSI_STATE_LOGGED_IN) { session->state = ISCSI_STATE_RECOVERY_FAILED; - if (session->leadconn) - wake_up(&session->leadconn->ehwait); + wake_up(&session_wrapper->ehwait); } spin_unlock_bh(&session->frwd_lock); } @@ -2403,21 +2508,33 @@ EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout); int iscsi_eh_session_reset(struct scsi_cmnd *sc) { struct iscsi_cls_session *cls_session; + struct iscsi_cls_session_wrapper *session_wrapper; struct iscsi_session *session; struct iscsi_conn *conn; cls_session = starget_to_session(scsi_target(sc->device)); + session_wrapper = iscsi_cls_session_to_wrapper(cls_session); session = cls_session->dd_data; conn = session->leadconn; mutex_lock(&session->eh_mutex); spin_lock_bh(&session->frwd_lock); - if (session->state == ISCSI_STATE_TERMINATE) { + + /* + * During shutdown, if session is prematurely disconnected, recovery + * won't happen and there will be hung cmds. To solve this case, all + * cmds would be enter scsi EH, but the EH path will wait for + * wait_event_interruptible() to complete until the state of the + * session is ISCSI_STATE_TERMINATE, ISCSI_STATE_LOGGED_IN or + * ISCSI_STATE_RECOVERY_FAILED. + */ + if (session->state == ISCSI_STATE_TERMINATE || + unlikely(system_state != SYSTEM_RUNNING)) { failed: ISCSI_DBG_EH(session, "failing session reset: Could not log back into " - "%s, %s [age %d]\n", session->targetname, - conn->persistent_address, session->age); + "%s [age %d]\n", session->targetname, + session->age); spin_unlock_bh(&session->frwd_lock); mutex_unlock(&session->eh_mutex); return FAILED; @@ -2432,10 +2549,11 @@ int iscsi_eh_session_reset(struct scsi_cmnd *sc) iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST); ISCSI_DBG_EH(session, "wait for relogin\n"); - wait_event_interruptible(conn->ehwait, + wait_event_interruptible_timeout(session_wrapper->ehwait, session->state == ISCSI_STATE_TERMINATE || session->state == ISCSI_STATE_LOGGED_IN || - session->state == ISCSI_STATE_RECOVERY_FAILED); + session->state == ISCSI_STATE_RECOVERY_FAILED, + cls_session->recovery_tmo * HZ); if (signal_pending(current)) flush_signals(current); @@ -2471,12 +2589,14 @@ static void iscsi_prep_tgt_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr) static int iscsi_eh_target_reset(struct scsi_cmnd *sc) { struct iscsi_cls_session *cls_session; + struct iscsi_cls_session_wrapper *session_wrapper; struct iscsi_session *session; struct iscsi_conn *conn; struct iscsi_tm *hdr; int rc = FAILED; cls_session = starget_to_session(scsi_target(sc->device)); + session_wrapper = iscsi_cls_session_to_wrapper(cls_session); session = cls_session->dd_data; ISCSI_DBG_EH(session, "tgt Reset [sc %p tgt %s]\n", sc, @@ -2493,11 +2613,11 @@ static int iscsi_eh_target_reset(struct scsi_cmnd *sc) conn = session->leadconn; /* only have one tmf outstanding at a time */ - if (conn->tmf_state != TMF_INITIAL) + if (session_wrapper->tmf_state != TMF_INITIAL) goto unlock; - conn->tmf_state = TMF_QUEUED; + session_wrapper->tmf_state = TMF_QUEUED; - hdr = &conn->tmhdr; + hdr = &session_wrapper->tmhdr; iscsi_prep_tgt_reset_pdu(sc, hdr); if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age, @@ -2506,7 +2626,7 @@ static int iscsi_eh_target_reset(struct scsi_cmnd *sc) goto unlock; } - switch (conn->tmf_state) { + switch (session_wrapper->tmf_state) { case TMF_SUCCESS: break; case TMF_TIMEDOUT: @@ -2514,7 +2634,7 @@ static int iscsi_eh_target_reset(struct scsi_cmnd *sc) iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST); goto done; default: - conn->tmf_state = TMF_INITIAL; + session_wrapper->tmf_state = TMF_INITIAL; goto unlock; } @@ -2526,7 +2646,7 @@ static int iscsi_eh_target_reset(struct scsi_cmnd *sc) spin_lock_bh(&session->frwd_lock); memset(hdr, 0, sizeof(*hdr)); fail_scsi_tasks(conn, -1, DID_ERROR); - conn->tmf_state = TMF_INITIAL; + session_wrapper->tmf_state = TMF_INITIAL; spin_unlock_bh(&session->frwd_lock); iscsi_start_tx(conn); @@ -2616,6 +2736,56 @@ void iscsi_pool_free(struct iscsi_pool *q) } EXPORT_SYMBOL_GPL(iscsi_pool_free); +int iscsi_host_get_max_scsi_cmds(struct Scsi_Host *shost, + uint16_t requested_cmds_max) +{ + int scsi_cmds, total_cmds = requested_cmds_max; + +check: + if (!total_cmds) + total_cmds = ISCSI_DEF_XMIT_CMDS_MAX; + /* + * The iscsi layer needs some tasks for nop handling and tmfs, + * so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX + * + 1 command for scsi IO. + */ + if (total_cmds < ISCSI_TOTAL_CMDS_MIN) { + printk(KERN_ERR "iscsi: invalid max cmds of %d. Must be a power of two that is at least %d.\n", + total_cmds, ISCSI_TOTAL_CMDS_MIN); + return -EINVAL; + } + + if (total_cmds > ISCSI_TOTAL_CMDS_MAX) { + printk(KERN_INFO "iscsi: invalid max cmds of %d. Must be a power of 2 less than or equal to %d. Using %d.\n", + requested_cmds_max, ISCSI_TOTAL_CMDS_MAX, + ISCSI_TOTAL_CMDS_MAX); + total_cmds = ISCSI_TOTAL_CMDS_MAX; + } + + if (!is_power_of_2(total_cmds)) { + total_cmds = rounddown_pow_of_two(total_cmds); + if (total_cmds < ISCSI_TOTAL_CMDS_MIN) { + printk(KERN_ERR "iscsi: invalid max cmds of %d. Must be a power of 2 greater than %d.\n", requested_cmds_max, ISCSI_TOTAL_CMDS_MIN); + return -EINVAL; + } + + printk(KERN_INFO "iscsi: invalid max cmds %d. Must be a power of 2. Rounding max cmds down to %d.\n", + requested_cmds_max, total_cmds); + } + + scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX; + if (shost->can_queue && scsi_cmds > shost->can_queue) { + total_cmds = shost->can_queue; + + printk(KERN_INFO "iscsi: requested max cmds %u is higher than driver limit. Using driver limit %u\n", + requested_cmds_max, shost->can_queue); + goto check; + } + + return scsi_cmds; +} +EXPORT_SYMBOL_GPL(iscsi_host_get_max_scsi_cmds); + /** * iscsi_host_add - add host to system * @shost: scsi host @@ -2659,7 +2829,9 @@ struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht, if (xmit_can_sleep) { snprintf(ihost->workq_name, sizeof(ihost->workq_name), "iscsi_q_%d", shost->host_no); - ihost->workq = create_singlethread_workqueue(ihost->workq_name); + /* this kind of workqueue only support single work */ + ihost->workq = alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM | + __WQ_DYNAMIC, ihost->workq_name); if (!ihost->workq) goto free_host; } @@ -2704,8 +2876,6 @@ void iscsi_host_remove(struct Scsi_Host *shost) flush_signals(current); scsi_remove_host(shost); - if (ihost->workq) - destroy_workqueue(ihost->workq); } EXPORT_SYMBOL_GPL(iscsi_host_remove); @@ -2713,6 +2883,9 @@ void iscsi_host_free(struct Scsi_Host *shost) { struct iscsi_host *ihost = shost_priv(shost); + if (ihost->workq) + destroy_workqueue(ihost->workq); + kfree(ihost->netdev); kfree(ihost->hwaddress); kfree(ihost->initiatorname); @@ -2766,7 +2939,8 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost, struct iscsi_host *ihost = shost_priv(shost); struct iscsi_session *session; struct iscsi_cls_session *cls_session; - int cmd_i, scsi_cmds, total_cmds = cmds_max; + struct iscsi_cls_session_wrapper *cls_sess_wrapper; + int cmd_i, scsi_cmds; unsigned long flags; spin_lock_irqsave(&ihost->lock, flags); @@ -2777,37 +2951,9 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost, ihost->num_sessions++; spin_unlock_irqrestore(&ihost->lock, flags); - if (!total_cmds) - total_cmds = ISCSI_DEF_XMIT_CMDS_MAX; - /* - * The iscsi layer needs some tasks for nop handling and tmfs, - * so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX - * + 1 command for scsi IO. - */ - if (total_cmds < ISCSI_TOTAL_CMDS_MIN) { - printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue " - "must be a power of two that is at least %d.\n", - total_cmds, ISCSI_TOTAL_CMDS_MIN); + scsi_cmds = iscsi_host_get_max_scsi_cmds(shost, cmds_max); + if (scsi_cmds < 0) goto dec_session_count; - } - - if (total_cmds > ISCSI_TOTAL_CMDS_MAX) { - printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue " - "must be a power of 2 less than or equal to %d.\n", - cmds_max, ISCSI_TOTAL_CMDS_MAX); - total_cmds = ISCSI_TOTAL_CMDS_MAX; - } - - if (!is_power_of_2(total_cmds)) { - printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue " - "must be a power of 2.\n", total_cmds); - total_cmds = rounddown_pow_of_two(total_cmds); - if (total_cmds < ISCSI_TOTAL_CMDS_MIN) - return NULL; - printk(KERN_INFO "iscsi: Rounding can_queue to %d.\n", - total_cmds); - } - scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX; cls_session = iscsi_alloc_session(shost, iscsit, sizeof(struct iscsi_session) + @@ -2823,7 +2969,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost, session->lu_reset_timeout = 15; session->abort_timeout = 10; session->scsi_cmds_max = scsi_cmds; - session->cmds_max = total_cmds; + session->cmds_max = scsi_cmds + ISCSI_MGMT_CMDS_MAX; session->queued_cmdsn = session->cmdsn = initial_cmdsn; session->exp_cmdsn = initial_cmdsn + 1; session->max_cmdsn = initial_cmdsn + 1; @@ -2831,7 +2977,11 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost, session->tt = iscsit; session->dd_data = cls_session->dd_data + sizeof(*session); + cls_sess_wrapper = iscsi_cls_session_to_wrapper(cls_session); + cls_sess_wrapper->tmf_state = TMF_INITIAL; + timer_setup(&cls_sess_wrapper->tmf_timer, iscsi_tmf_timedout, 0); mutex_init(&session->eh_mutex); + spin_lock_init(&session->frwd_lock); spin_lock_init(&session->back_lock); @@ -2872,20 +3022,34 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost, } EXPORT_SYMBOL_GPL(iscsi_session_setup); +/* + * issi_session_remove - Remove session from iSCSI class. + */ +void iscsi_session_remove(struct iscsi_cls_session *cls_session) +{ + struct iscsi_session *session = cls_session->dd_data; + struct Scsi_Host *shost = session->host; + + iscsi_remove_session(cls_session); + /* + * host removal only has to wait for its children to be removed from + * sysfs, and iscsi_tcp needs to do iscsi_host_remove before freeing + * the session, so drop the session count here. + */ + iscsi_host_dec_session_cnt(shost); +} +EXPORT_SYMBOL_GPL(iscsi_session_remove); + /** - * iscsi_session_teardown - destroy session, host, and cls_session + * iscsi_session_free - Free iscsi session and it's resources * @cls_session: iscsi session */ -void iscsi_session_teardown(struct iscsi_cls_session *cls_session) +void iscsi_session_free(struct iscsi_cls_session *cls_session) { struct iscsi_session *session = cls_session->dd_data; struct module *owner = cls_session->transport->owner; - struct Scsi_Host *shost = session->host; iscsi_pool_free(&session->cmdpool); - - iscsi_remove_session(cls_session); - kfree(session->password); kfree(session->password_in); kfree(session->username); @@ -2901,10 +3065,19 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session) kfree(session->discovery_parent_type); iscsi_free_session(cls_session); - - iscsi_host_dec_session_cnt(shost); module_put(owner); } +EXPORT_SYMBOL_GPL(iscsi_session_free); + +/** + * iscsi_session_teardown - destroy session and cls_session + * @cls_session: iscsi session + */ +void iscsi_session_teardown(struct iscsi_cls_session *cls_session) +{ + iscsi_session_remove(cls_session); + iscsi_session_free(cls_session); +} EXPORT_SYMBOL_GPL(iscsi_session_teardown); /** @@ -2918,11 +3091,14 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size, uint32_t conn_idx) { struct iscsi_session *session = cls_session->dd_data; + struct iscsi_cls_session_wrapper *session_wrapper = + iscsi_cls_session_to_wrapper(cls_session); struct iscsi_conn *conn; struct iscsi_cls_conn *cls_conn; char *data; + int err; - cls_conn = iscsi_create_conn(cls_session, sizeof(*conn) + dd_size, + cls_conn = iscsi_alloc_conn(cls_session, sizeof(*conn) + dd_size, conn_idx); if (!cls_conn) return NULL; @@ -2935,14 +3111,14 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size, conn->c_stage = ISCSI_CONN_INITIAL_STAGE; conn->id = conn_idx; conn->exp_statsn = 0; - conn->tmf_state = TMF_INITIAL; + session_wrapper->tmf_state = TMF_INITIAL; + conn->intimate_cpu = -1; timer_setup(&conn->transport_timer, iscsi_check_transport_timeouts, 0); INIT_LIST_HEAD(&conn->mgmtqueue); INIT_LIST_HEAD(&conn->cmdqueue); INIT_LIST_HEAD(&conn->requeue); - spin_lock_init(&conn->taskqueuelock); INIT_WORK(&conn->xmitwork, iscsi_xmitworker); /* allocate login_task used for the login/text sequences */ @@ -2961,16 +3137,23 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size, goto login_task_data_alloc_fail; conn->login_task->data = conn->data = data; - timer_setup(&conn->tmf_timer, iscsi_tmf_timedout, 0); - init_waitqueue_head(&conn->ehwait); + init_waitqueue_head(&session_wrapper->ehwait); + + err = iscsi_add_conn(cls_conn); + if (err) + goto login_task_add_dev_fail; return cls_conn; +login_task_add_dev_fail: + free_pages((unsigned long) conn->data, + get_order(ISCSI_DEF_MAX_RECV_SEG_LEN)); + login_task_data_alloc_fail: kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task, sizeof(void*)); login_task_alloc_fail: - iscsi_destroy_conn(cls_conn); + iscsi_put_conn(cls_conn); return NULL; } EXPORT_SYMBOL_GPL(iscsi_conn_setup); @@ -2987,17 +3170,21 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_session *session = conn->session; + iscsi_remove_conn(cls_conn); + del_timer_sync(&conn->transport_timer); mutex_lock(&session->eh_mutex); spin_lock_bh(&session->frwd_lock); conn->c_stage = ISCSI_CONN_CLEANUP_WAIT; if (session->leadconn == conn) { + struct iscsi_cls_session_wrapper *cls_session = + iscsi_cls_session_to_wrapper(session->cls_session); /* * leading connection? then give up on recovery. */ session->state = ISCSI_STATE_TERMINATE; - wake_up(&conn->ehwait); + wake_up(&cls_session->ehwait); } spin_unlock_bh(&session->frwd_lock); @@ -3019,7 +3206,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) spin_unlock_bh(&session->frwd_lock); mutex_unlock(&session->eh_mutex); - iscsi_destroy_conn(cls_conn); + iscsi_put_conn(cls_conn); } EXPORT_SYMBOL_GPL(iscsi_conn_teardown); @@ -3027,6 +3214,8 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn) { struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_session *session = conn->session; + struct iscsi_cls_session_wrapper *cls_session = + iscsi_cls_session_to_wrapper(session->cls_session); if (!session) { iscsi_conn_printk(KERN_ERR, conn, @@ -3072,7 +3261,7 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn) * commands after successful recovery */ conn->stop_stage = 0; - conn->tmf_state = TMF_INITIAL; + cls_session->tmf_state = TMF_INITIAL; session->age++; if (session->age == 16) session->age = 0; @@ -3086,7 +3275,7 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn) spin_unlock_bh(&session->frwd_lock); iscsi_unblock_session(session->cls_session); - wake_up(&conn->ehwait); + wake_up(&cls_session->ehwait); return 0; } EXPORT_SYMBOL_GPL(iscsi_conn_start); @@ -3108,11 +3297,18 @@ fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn) ISCSI_DBG_SESSION(conn->session, "failing mgmt itt 0x%x state %d\n", task->itt, task->state); + + spin_lock_bh(&session->back_lock); + if (cleanup_queued_task(task)) { + spin_unlock_bh(&session->back_lock); + continue; + } + state = ISCSI_TASK_ABRT_SESS_RECOV; if (task->state == ISCSI_TASK_PENDING) state = ISCSI_TASK_COMPLETED; iscsi_complete_task(task, state); - + spin_unlock_bh(&session->back_lock); } } @@ -3120,6 +3316,8 @@ static void iscsi_start_session_recovery(struct iscsi_session *session, struct iscsi_conn *conn, int flag) { int old_stop_stage; + struct iscsi_cls_session_wrapper *cls_session = + iscsi_cls_session_to_wrapper(session->cls_session); mutex_lock(&session->eh_mutex); spin_lock_bh(&session->frwd_lock); @@ -3172,7 +3370,7 @@ static void iscsi_start_session_recovery(struct iscsi_session *session, spin_lock_bh(&session->frwd_lock); fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED); fail_mgmt_tasks(session, conn); - memset(&conn->tmhdr, 0, sizeof(conn->tmhdr)); + memset(&cls_session->tmhdr, 0, sizeof(cls_session->tmhdr)); spin_unlock_bh(&session->frwd_lock); mutex_unlock(&session->eh_mutex); } @@ -3205,6 +3403,13 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session, session->leadconn = conn; spin_unlock_bh(&session->frwd_lock); + /* + * The target could have reduced it's window size between logins, so + * we have to reset max/exp cmdsn so we can see the new values. + */ + spin_lock_bh(&session->back_lock); + session->max_cmdsn = session->exp_cmdsn = session->cmdsn + 1; + spin_unlock_bh(&session->back_lock); /* * Unblock xmitworker(), Login Phase will pass through. */ @@ -3355,125 +3560,125 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session, switch(param) { case ISCSI_PARAM_FAST_ABORT: - len = sprintf(buf, "%d\n", session->fast_abort); + len = sysfs_emit(buf, "%d\n", session->fast_abort); break; case ISCSI_PARAM_ABORT_TMO: - len = sprintf(buf, "%d\n", session->abort_timeout); + len = sysfs_emit(buf, "%d\n", session->abort_timeout); break; case ISCSI_PARAM_LU_RESET_TMO: - len = sprintf(buf, "%d\n", session->lu_reset_timeout); + len = sysfs_emit(buf, "%d\n", session->lu_reset_timeout); break; case ISCSI_PARAM_TGT_RESET_TMO: - len = sprintf(buf, "%d\n", session->tgt_reset_timeout); + len = sysfs_emit(buf, "%d\n", session->tgt_reset_timeout); break; case ISCSI_PARAM_INITIAL_R2T_EN: - len = sprintf(buf, "%d\n", session->initial_r2t_en); + len = sysfs_emit(buf, "%d\n", session->initial_r2t_en); break; case ISCSI_PARAM_MAX_R2T: - len = sprintf(buf, "%hu\n", session->max_r2t); + len = sysfs_emit(buf, "%hu\n", session->max_r2t); break; case ISCSI_PARAM_IMM_DATA_EN: - len = sprintf(buf, "%d\n", session->imm_data_en); + len = sysfs_emit(buf, "%d\n", session->imm_data_en); break; case ISCSI_PARAM_FIRST_BURST: - len = sprintf(buf, "%u\n", session->first_burst); + len = sysfs_emit(buf, "%u\n", session->first_burst); break; case ISCSI_PARAM_MAX_BURST: - len = sprintf(buf, "%u\n", session->max_burst); + len = sysfs_emit(buf, "%u\n", session->max_burst); break; case ISCSI_PARAM_PDU_INORDER_EN: - len = sprintf(buf, "%d\n", session->pdu_inorder_en); + len = sysfs_emit(buf, "%d\n", session->pdu_inorder_en); break; case ISCSI_PARAM_DATASEQ_INORDER_EN: - len = sprintf(buf, "%d\n", session->dataseq_inorder_en); + len = sysfs_emit(buf, "%d\n", session->dataseq_inorder_en); break; case ISCSI_PARAM_DEF_TASKMGMT_TMO: - len = sprintf(buf, "%d\n", session->def_taskmgmt_tmo); + len = sysfs_emit(buf, "%d\n", session->def_taskmgmt_tmo); break; case ISCSI_PARAM_ERL: - len = sprintf(buf, "%d\n", session->erl); + len = sysfs_emit(buf, "%d\n", session->erl); break; case ISCSI_PARAM_TARGET_NAME: - len = sprintf(buf, "%s\n", session->targetname); + len = sysfs_emit(buf, "%s\n", session->targetname); break; case ISCSI_PARAM_TARGET_ALIAS: - len = sprintf(buf, "%s\n", session->targetalias); + len = sysfs_emit(buf, "%s\n", session->targetalias); break; case ISCSI_PARAM_TPGT: - len = sprintf(buf, "%d\n", session->tpgt); + len = sysfs_emit(buf, "%d\n", session->tpgt); break; case ISCSI_PARAM_USERNAME: - len = sprintf(buf, "%s\n", session->username); + len = sysfs_emit(buf, "%s\n", session->username); break; case ISCSI_PARAM_USERNAME_IN: - len = sprintf(buf, "%s\n", session->username_in); + len = sysfs_emit(buf, "%s\n", session->username_in); break; case ISCSI_PARAM_PASSWORD: - len = sprintf(buf, "%s\n", session->password); + len = sysfs_emit(buf, "%s\n", session->password); break; case ISCSI_PARAM_PASSWORD_IN: - len = sprintf(buf, "%s\n", session->password_in); + len = sysfs_emit(buf, "%s\n", session->password_in); break; case ISCSI_PARAM_IFACE_NAME: - len = sprintf(buf, "%s\n", session->ifacename); + len = sysfs_emit(buf, "%s\n", session->ifacename); break; case ISCSI_PARAM_INITIATOR_NAME: - len = sprintf(buf, "%s\n", session->initiatorname); + len = sysfs_emit(buf, "%s\n", session->initiatorname); break; case ISCSI_PARAM_BOOT_ROOT: - len = sprintf(buf, "%s\n", session->boot_root); + len = sysfs_emit(buf, "%s\n", session->boot_root); break; case ISCSI_PARAM_BOOT_NIC: - len = sprintf(buf, "%s\n", session->boot_nic); + len = sysfs_emit(buf, "%s\n", session->boot_nic); break; case ISCSI_PARAM_BOOT_TARGET: - len = sprintf(buf, "%s\n", session->boot_target); + len = sysfs_emit(buf, "%s\n", session->boot_target); break; case ISCSI_PARAM_AUTO_SND_TGT_DISABLE: - len = sprintf(buf, "%u\n", session->auto_snd_tgt_disable); + len = sysfs_emit(buf, "%u\n", session->auto_snd_tgt_disable); break; case ISCSI_PARAM_DISCOVERY_SESS: - len = sprintf(buf, "%u\n", session->discovery_sess); + len = sysfs_emit(buf, "%u\n", session->discovery_sess); break; case ISCSI_PARAM_PORTAL_TYPE: - len = sprintf(buf, "%s\n", session->portal_type); + len = sysfs_emit(buf, "%s\n", session->portal_type); break; case ISCSI_PARAM_CHAP_AUTH_EN: - len = sprintf(buf, "%u\n", session->chap_auth_en); + len = sysfs_emit(buf, "%u\n", session->chap_auth_en); break; case ISCSI_PARAM_DISCOVERY_LOGOUT_EN: - len = sprintf(buf, "%u\n", session->discovery_logout_en); + len = sysfs_emit(buf, "%u\n", session->discovery_logout_en); break; case ISCSI_PARAM_BIDI_CHAP_EN: - len = sprintf(buf, "%u\n", session->bidi_chap_en); + len = sysfs_emit(buf, "%u\n", session->bidi_chap_en); break; case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL: - len = sprintf(buf, "%u\n", session->discovery_auth_optional); + len = sysfs_emit(buf, "%u\n", session->discovery_auth_optional); break; case ISCSI_PARAM_DEF_TIME2WAIT: - len = sprintf(buf, "%d\n", session->time2wait); + len = sysfs_emit(buf, "%d\n", session->time2wait); break; case ISCSI_PARAM_DEF_TIME2RETAIN: - len = sprintf(buf, "%d\n", session->time2retain); + len = sysfs_emit(buf, "%d\n", session->time2retain); break; case ISCSI_PARAM_TSID: - len = sprintf(buf, "%u\n", session->tsid); + len = sysfs_emit(buf, "%u\n", session->tsid); break; case ISCSI_PARAM_ISID: - len = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n", + len = sysfs_emit(buf, "%02x%02x%02x%02x%02x%02x\n", session->isid[0], session->isid[1], session->isid[2], session->isid[3], session->isid[4], session->isid[5]); break; case ISCSI_PARAM_DISCOVERY_PARENT_IDX: - len = sprintf(buf, "%u\n", session->discovery_parent_idx); + len = sysfs_emit(buf, "%u\n", session->discovery_parent_idx); break; case ISCSI_PARAM_DISCOVERY_PARENT_TYPE: if (session->discovery_parent_type) - len = sprintf(buf, "%s\n", + len = sysfs_emit(buf, "%s\n", session->discovery_parent_type); else - len = sprintf(buf, "\n"); + len = sysfs_emit(buf, "\n"); break; default: return -ENOSYS; @@ -3505,16 +3710,16 @@ int iscsi_conn_get_addr_param(struct sockaddr_storage *addr, case ISCSI_PARAM_CONN_ADDRESS: case ISCSI_HOST_PARAM_IPADDRESS: if (sin) - len = sprintf(buf, "%pI4\n", &sin->sin_addr.s_addr); + len = sysfs_emit(buf, "%pI4\n", &sin->sin_addr.s_addr); else - len = sprintf(buf, "%pI6\n", &sin6->sin6_addr); + len = sysfs_emit(buf, "%pI6\n", &sin6->sin6_addr); break; case ISCSI_PARAM_CONN_PORT: case ISCSI_PARAM_LOCAL_PORT: if (sin) - len = sprintf(buf, "%hu\n", be16_to_cpu(sin->sin_port)); + len = sysfs_emit(buf, "%hu\n", be16_to_cpu(sin->sin_port)); else - len = sprintf(buf, "%hu\n", + len = sysfs_emit(buf, "%hu\n", be16_to_cpu(sin6->sin6_port)); break; default: @@ -3533,88 +3738,88 @@ int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn, switch(param) { case ISCSI_PARAM_PING_TMO: - len = sprintf(buf, "%u\n", conn->ping_timeout); + len = sysfs_emit(buf, "%u\n", conn->ping_timeout); break; case ISCSI_PARAM_RECV_TMO: - len = sprintf(buf, "%u\n", conn->recv_timeout); + len = sysfs_emit(buf, "%u\n", conn->recv_timeout); break; case ISCSI_PARAM_MAX_RECV_DLENGTH: - len = sprintf(buf, "%u\n", conn->max_recv_dlength); + len = sysfs_emit(buf, "%u\n", conn->max_recv_dlength); break; case ISCSI_PARAM_MAX_XMIT_DLENGTH: - len = sprintf(buf, "%u\n", conn->max_xmit_dlength); + len = sysfs_emit(buf, "%u\n", conn->max_xmit_dlength); break; case ISCSI_PARAM_HDRDGST_EN: - len = sprintf(buf, "%d\n", conn->hdrdgst_en); + len = sysfs_emit(buf, "%d\n", conn->hdrdgst_en); break; case ISCSI_PARAM_DATADGST_EN: - len = sprintf(buf, "%d\n", conn->datadgst_en); + len = sysfs_emit(buf, "%d\n", conn->datadgst_en); break; case ISCSI_PARAM_IFMARKER_EN: - len = sprintf(buf, "%d\n", conn->ifmarker_en); + len = sysfs_emit(buf, "%d\n", conn->ifmarker_en); break; case ISCSI_PARAM_OFMARKER_EN: - len = sprintf(buf, "%d\n", conn->ofmarker_en); + len = sysfs_emit(buf, "%d\n", conn->ofmarker_en); break; case ISCSI_PARAM_EXP_STATSN: - len = sprintf(buf, "%u\n", conn->exp_statsn); + len = sysfs_emit(buf, "%u\n", conn->exp_statsn); break; case ISCSI_PARAM_PERSISTENT_PORT: - len = sprintf(buf, "%d\n", conn->persistent_port); + len = sysfs_emit(buf, "%d\n", conn->persistent_port); break; case ISCSI_PARAM_PERSISTENT_ADDRESS: - len = sprintf(buf, "%s\n", conn->persistent_address); + len = sysfs_emit(buf, "%s\n", conn->persistent_address); break; case ISCSI_PARAM_STATSN: - len = sprintf(buf, "%u\n", conn->statsn); + len = sysfs_emit(buf, "%u\n", conn->statsn); break; case ISCSI_PARAM_MAX_SEGMENT_SIZE: - len = sprintf(buf, "%u\n", conn->max_segment_size); + len = sysfs_emit(buf, "%u\n", conn->max_segment_size); break; case ISCSI_PARAM_KEEPALIVE_TMO: - len = sprintf(buf, "%u\n", conn->keepalive_tmo); + len = sysfs_emit(buf, "%u\n", conn->keepalive_tmo); break; case ISCSI_PARAM_LOCAL_PORT: - len = sprintf(buf, "%u\n", conn->local_port); + len = sysfs_emit(buf, "%u\n", conn->local_port); break; case ISCSI_PARAM_TCP_TIMESTAMP_STAT: - len = sprintf(buf, "%u\n", conn->tcp_timestamp_stat); + len = sysfs_emit(buf, "%u\n", conn->tcp_timestamp_stat); break; case ISCSI_PARAM_TCP_NAGLE_DISABLE: - len = sprintf(buf, "%u\n", conn->tcp_nagle_disable); + len = sysfs_emit(buf, "%u\n", conn->tcp_nagle_disable); break; case ISCSI_PARAM_TCP_WSF_DISABLE: - len = sprintf(buf, "%u\n", conn->tcp_wsf_disable); + len = sysfs_emit(buf, "%u\n", conn->tcp_wsf_disable); break; case ISCSI_PARAM_TCP_TIMER_SCALE: - len = sprintf(buf, "%u\n", conn->tcp_timer_scale); + len = sysfs_emit(buf, "%u\n", conn->tcp_timer_scale); break; case ISCSI_PARAM_TCP_TIMESTAMP_EN: - len = sprintf(buf, "%u\n", conn->tcp_timestamp_en); + len = sysfs_emit(buf, "%u\n", conn->tcp_timestamp_en); break; case ISCSI_PARAM_IP_FRAGMENT_DISABLE: - len = sprintf(buf, "%u\n", conn->fragment_disable); + len = sysfs_emit(buf, "%u\n", conn->fragment_disable); break; case ISCSI_PARAM_IPV4_TOS: - len = sprintf(buf, "%u\n", conn->ipv4_tos); + len = sysfs_emit(buf, "%u\n", conn->ipv4_tos); break; case ISCSI_PARAM_IPV6_TC: - len = sprintf(buf, "%u\n", conn->ipv6_traffic_class); + len = sysfs_emit(buf, "%u\n", conn->ipv6_traffic_class); break; case ISCSI_PARAM_IPV6_FLOW_LABEL: - len = sprintf(buf, "%u\n", conn->ipv6_flow_label); + len = sysfs_emit(buf, "%u\n", conn->ipv6_flow_label); break; case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6: - len = sprintf(buf, "%u\n", conn->is_fw_assigned_ipv6); + len = sysfs_emit(buf, "%u\n", conn->is_fw_assigned_ipv6); break; case ISCSI_PARAM_TCP_XMIT_WSF: - len = sprintf(buf, "%u\n", conn->tcp_xmit_wsf); + len = sysfs_emit(buf, "%u\n", conn->tcp_xmit_wsf); break; case ISCSI_PARAM_TCP_RECV_WSF: - len = sprintf(buf, "%u\n", conn->tcp_recv_wsf); + len = sysfs_emit(buf, "%u\n", conn->tcp_recv_wsf); break; case ISCSI_PARAM_LOCAL_IPADDR: - len = sprintf(buf, "%s\n", conn->local_ipaddr); + len = sysfs_emit(buf, "%s\n", conn->local_ipaddr); break; default: return -ENOSYS; @@ -3632,13 +3837,13 @@ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, switch (param) { case ISCSI_HOST_PARAM_NETDEV_NAME: - len = sprintf(buf, "%s\n", ihost->netdev); + len = sysfs_emit(buf, "%s\n", ihost->netdev); break; case ISCSI_HOST_PARAM_HWADDRESS: - len = sprintf(buf, "%s\n", ihost->hwaddress); + len = sysfs_emit(buf, "%s\n", ihost->hwaddress); break; case ISCSI_HOST_PARAM_INITIATOR_NAME: - len = sprintf(buf, "%s\n", ihost->initiatorname); + len = sysfs_emit(buf, "%s\n", ihost->initiatorname); break; default: return -ENOSYS; diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c index 4fcb9e65be5785d1f8324e0d9b594ef8214d7673..f9437fe983a17c59db3e0a3e0181a480095d355d 100644 --- a/drivers/scsi/libiscsi_tcp.c +++ b/drivers/scsi/libiscsi_tcp.c @@ -125,12 +125,17 @@ static void iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv) BUG_ON(sg->length == 0); /* + * We always map for the recv path. + * * If the page count is greater than one it is ok to send * to the network layer's zero copy send path. If not we - * have to go the slow sendmsg path. We always map for the - * recv path. + * have to go the slow sendmsg path. + * + * Same goes for slab pages: skb_can_coalesce() allows + * coalescing neighboring slab objects into a single frag which + * triggers one of hardened usercopy checks. */ - if (page_count(sg_page(sg)) >= 1 && !recv) + if (!recv && page_count(sg_page(sg)) >= 1 && !PageSlab(sg_page(sg))) return; if (recv) { @@ -526,48 +531,79 @@ static int iscsi_tcp_data_in(struct iscsi_conn *conn, struct iscsi_task *task) /** * iscsi_tcp_r2t_rsp - iSCSI R2T Response processing * @conn: iscsi connection - * @task: scsi command task + * @hdr: PDU header */ -static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task) +static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) { struct iscsi_session *session = conn->session; - struct iscsi_tcp_task *tcp_task = task->dd_data; - struct iscsi_tcp_conn *tcp_conn = conn->dd_data; - struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr; + struct iscsi_tcp_task *tcp_task; + struct iscsi_tcp_conn *tcp_conn; + struct iscsi_r2t_rsp *rhdr; struct iscsi_r2t_info *r2t; - int r2tsn = be32_to_cpu(rhdr->r2tsn); + struct iscsi_task *task; u32 data_length; u32 data_offset; + int r2tsn; int rc; + spin_lock(&session->back_lock); + task = iscsi_itt_to_ctask(conn, hdr->itt); + if (!task) { + spin_unlock(&session->back_lock); + return ISCSI_ERR_BAD_ITT; + } else if (task->sc->sc_data_direction != DMA_TO_DEVICE) { + spin_unlock(&session->back_lock); + return ISCSI_ERR_PROTO; + } + /* + * A bad target might complete the cmd before we have handled R2Ts + * so get a ref to the task that will be dropped in the xmit path. + */ + if (task->state != ISCSI_TASK_RUNNING) { + spin_unlock(&session->back_lock); + /* Let the path that got the early rsp complete it */ + return 0; + } + task->last_xfer = jiffies; + __iscsi_get_task(task); + + tcp_conn = conn->dd_data; + rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr; + /* fill-in new R2T associated with the task */ + iscsi_update_cmdsn(session, (struct iscsi_nopin *)rhdr); + spin_unlock(&session->back_lock); + if (tcp_conn->in.datalen) { iscsi_conn_printk(KERN_ERR, conn, "invalid R2t with datalen %d\n", tcp_conn->in.datalen); - return ISCSI_ERR_DATALEN; + rc = ISCSI_ERR_DATALEN; + goto put_task; } + tcp_task = task->dd_data; + r2tsn = be32_to_cpu(rhdr->r2tsn); if (tcp_task->exp_datasn != r2tsn){ ISCSI_DBG_TCP(conn, "task->exp_datasn(%d) != rhdr->r2tsn(%d)\n", tcp_task->exp_datasn, r2tsn); - return ISCSI_ERR_R2TSN; + rc = ISCSI_ERR_R2TSN; + goto put_task; } - /* fill-in new R2T associated with the task */ - iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr); - - if (!task->sc || session->state != ISCSI_STATE_LOGGED_IN) { + if (session->state != ISCSI_STATE_LOGGED_IN) { iscsi_conn_printk(KERN_INFO, conn, "dropping R2T itt %d in recovery.\n", task->itt); - return 0; + rc = 0; + goto put_task; } data_length = be32_to_cpu(rhdr->data_length); if (data_length == 0) { iscsi_conn_printk(KERN_ERR, conn, "invalid R2T with zero data len\n"); - return ISCSI_ERR_DATALEN; + rc = ISCSI_ERR_DATALEN; + goto put_task; } if (data_length > session->max_burst) @@ -581,7 +617,8 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task) "invalid R2T with data len %u at offset %u " "and total length %d\n", data_length, data_offset, scsi_out(task->sc)->length); - return ISCSI_ERR_DATALEN; + rc = ISCSI_ERR_DATALEN; + goto put_task; } spin_lock(&tcp_task->pool2queue); @@ -591,7 +628,8 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task) "Target has sent more R2Ts than it " "negotiated for or driver has leaked.\n"); spin_unlock(&tcp_task->pool2queue); - return ISCSI_ERR_PROTO; + rc = ISCSI_ERR_PROTO; + goto put_task; } r2t->exp_statsn = rhdr->statsn; @@ -609,6 +647,10 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task) iscsi_requeue_task(task); return 0; + +put_task: + iscsi_put_task(task); + return rc; } /* @@ -732,20 +774,11 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr) rc = iscsi_complete_pdu(conn, hdr, NULL, 0); break; case ISCSI_OP_R2T: - spin_lock(&conn->session->back_lock); - task = iscsi_itt_to_ctask(conn, hdr->itt); - spin_unlock(&conn->session->back_lock); - if (!task) - rc = ISCSI_ERR_BAD_ITT; - else if (ahslen) + if (ahslen) { rc = ISCSI_ERR_AHSLEN; - else if (task->sc->sc_data_direction == DMA_TO_DEVICE) { - task->last_xfer = jiffies; - spin_lock(&conn->session->frwd_lock); - rc = iscsi_tcp_r2t_rsp(conn, task); - spin_unlock(&conn->session->frwd_lock); - } else - rc = ISCSI_ERR_PROTO; + break; + } + rc = iscsi_tcp_r2t_rsp(conn, hdr); break; case ISCSI_OP_LOGIN_RSP: case ISCSI_OP_TEXT_RSP: diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 64a958a99f6a89896c16354502c34a397d18a1df..19a2b17876f5050c59683362d28450baa782fce7 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -215,15 +215,17 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len); task->total_xfer_len = qc->nbytes; task->num_scatter = qc->n_elem; + task->data_dir = qc->dma_dir; + } else if (!ata_is_data(qc->tf.protocol)) { + task->data_dir = DMA_NONE; } else { for_each_sg(qc->sg, sg, qc->n_elem, si) xfer += sg_dma_len(sg); task->total_xfer_len = xfer; task->num_scatter = si; + task->data_dir = qc->dma_dir; } - - task->data_dir = qc->dma_dir; task->scatter = qc->sg; task->ata_task.retry_count = 1; task->task_state_flags = SAS_TASK_STATE_PENDING; @@ -383,6 +385,129 @@ static int sas_ata_printk(const char *level, const struct domain_device *ddev, return r; } +static enum sas_linkrate sas_find_min_pathway(struct domain_device *ddev) +{ + enum sas_linkrate min_linkrate = SAS_LINK_RATE_12_0_GBPS; + struct domain_device *child; + struct expander_device *ex; + struct asd_sas_phy *phy; + int i; + + child = ddev; + ddev = ddev->parent; + + while (ddev) { + if (ddev->dev_type != SAS_EDGE_EXPANDER_DEVICE && + ddev->dev_type != SAS_FANOUT_EXPANDER_DEVICE) + break; + + ex = &ddev->ex_dev; + + for (i = 0; i < ex->num_phys; i++) { + struct ex_phy *phy = &ex->ex_phy[i]; + + if (phy->phy_state == PHY_VACANT || + phy->phy_state == PHY_NOT_PRESENT) + continue; + + if (phy->linkrate < SAS_LINK_RATE_1_5_GBPS) + continue; + + if (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(child->sas_addr)) + if (min_linkrate > phy->linkrate) + min_linkrate = phy->linkrate; + } + + child = ddev; + ddev = ddev->parent; + } + + /* check the direct attached phy linkrate */ + list_for_each_entry(phy, &child->port->phy_list, port_phy_el) { + if (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(child->sas_addr)) + if (min_linkrate > phy->linkrate) + min_linkrate = phy->linkrate; + } + + return min_linkrate; +} + +static inline void sas_ata_set_linkrate(struct domain_device *dev, + int phy_num, + enum sas_linkrate linkrate) +{ + struct sas_phy_linkrates rates; + int ret; + + rates.minimum_linkrate = 0; + rates.maximum_linkrate = linkrate; + ret = sas_smp_phy_control(dev, phy_num, PHY_FUNC_LINK_RESET, &rates); + + SAS_DPRINTK("ex %016llx phy%02d set max linkrate to %X %s\n", + SAS_ADDR(dev->sas_addr), phy_num, linkrate, + ret ? "failed" : "succeed"); +} + +static void sas_ata_check_pathway(void *data, async_cookie_t cookie) +{ + struct domain_device *dev = data; + struct expander_device *ex = &dev->ex_dev; + struct ex_phy *ex_phy; + enum sas_linkrate linkrate; + int i; + + /* + * According to Serial Attached SCSI - 1.1 (SAS-1.1): + * If an expander phy attached to a SATA phy is using a physical link + * rate greater than the maximum connection rate supported by the + * pathway from an STP initiator port, a management application client + * should use the SMP PHY CONTROL function (see 10.4.3.10) to set the + * PROGRAMMED MAXIMUM PHYSICAL LINK RATE field of the expander phy to + * the maximum connection rate supported by the pathway from that STP + * initiator port. + */ + + linkrate = sas_find_min_pathway(dev); + + for (i = 0; i < ex->num_phys; i++) { + ex_phy = &ex->ex_phy[i]; + + if (!ex_phy_is_sata(ex_phy)) + continue; + + if (ex_phy->linkrate > linkrate) { + sas_ata_set_linkrate(dev, i, linkrate); + ex_phy->linkrate = linkrate; + } + } + + sas_put_device(dev); +} + +void sas_ata_check_topology(struct asd_sas_port *port) +{ + ASYNC_DOMAIN_EXCLUSIVE(async); + struct domain_device *dev; + + spin_lock(&port->dev_list_lock); + list_for_each_entry(dev, &port->dev_list, dev_list_node) { + if (dev->dev_type != SAS_EDGE_EXPANDER_DEVICE && + dev->dev_type != SAS_FANOUT_EXPANDER_DEVICE) + continue; + + /* hold a reference since we may be + * racing with final remove + */ + kref_get(&dev->kref); + + async_schedule_domain(sas_ata_check_pathway, dev, &async); + } + spin_unlock(&port->dev_list_lock); + + async_synchronize_full_domain(&async); + +} + static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class, unsigned long deadline) { @@ -522,10 +647,23 @@ void sas_ata_end_eh(struct ata_port *ap) spin_unlock_irqrestore(&ha->lock, flags); } +static int sas_ata_prereset(struct ata_link *link, unsigned long deadline) +{ + struct ata_port *ap = link->ap; + struct domain_device *dev = ap->private_data; + struct sas_phy *local_phy = sas_get_local_phy(dev); + int res = 0; + + if (!local_phy->enabled || test_bit(SAS_DEV_GONE, &dev->state)) + res = -ENOENT; + sas_put_local_phy(local_phy); + + return res; +} + static struct ata_port_operations sas_sata_ops = { - .prereset = ata_std_prereset, + .prereset = sas_ata_prereset, .hardreset = sas_ata_hard_reset, - .postreset = ata_std_postreset, .error_handler = ata_std_error_handler, .post_internal_cmd = sas_ata_post_internal, .qc_defer = ata_std_qc_defer, @@ -632,6 +770,22 @@ static int sas_get_ata_command_set(struct domain_device *dev) return ata_dev_classify(&tf); } +static void sas_ata_store_id(struct domain_device *dev) +{ + struct ata_device *ata_dev = sas_to_ata_dev(dev); + unsigned char model[ATA_ID_PROD_LEN + 1]; + unsigned char serial[ATA_ID_SERNO_LEN + 1]; + + /* store the ata device's class and id */ + memcpy(dev->sata_dev.id, ata_dev->id, ATA_ID_WORDS); + dev->sata_dev.class = ata_dev->class; + + ata_id_c_string(ata_dev->id, model, ATA_ID_PROD, sizeof(model)); + ata_id_c_string(ata_dev->id, serial, ATA_ID_SERNO, sizeof(serial)); + + sas_ata_printk(KERN_INFO, dev, "model:%s serial:%s\n", model, serial); +} + void sas_probe_sata(struct asd_sas_port *port) { struct domain_device *dev, *n; @@ -654,8 +808,10 @@ void sas_probe_sata(struct asd_sas_port *port) /* if libata could not bring the link up, don't surface * the device */ - if (ata_dev_disabled(sas_to_ata_dev(dev))) + if (!ata_dev_enabled(sas_to_ata_dev(dev))) sas_fail_probe(dev, __func__, -ENODEV); + else + sas_ata_store_id(dev); } } diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index 0148ae62a52a941b154df5181fc80543df4a30cf..f76e4c19a677aa8c28ec19b0789a5ad03b18eed3 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c @@ -97,12 +97,18 @@ static int sas_get_port_device(struct asd_sas_port *port) else dev->dev_type = SAS_SATA_DEV; dev->tproto = SAS_PROTOCOL_SATA; - } else { + } else if (port->oob_mode == SAS_OOB_MODE) { struct sas_identify_frame *id = (struct sas_identify_frame *) dev->frame_rcvd; dev->dev_type = id->dev_type; dev->iproto = id->initiator_bits; dev->tproto = id->target_bits; + } else { + /* If the oob mode is OOB_NOT_CONNECTED, the port is + * disconnected due to race with PHY down. We cannot + * continue to discover this port */ + sas_put_device(dev); + return rc; } sas_init_dev(dev); @@ -260,7 +266,7 @@ static void sas_suspend_devices(struct work_struct *work) * phy_list is not being mutated */ list_for_each_entry(phy, &port->phy_list, port_phy_el) { - if (si->dft->lldd_port_formed) + if (si->dft->lldd_port_deformed) si->dft->lldd_port_deformed(phy); phy->suspended = 1; port->suspended = 1; @@ -532,6 +538,7 @@ static void sas_revalidate_domain(struct work_struct *work) sas_destruct_devices(port); sas_destruct_ports(port); sas_probe_devices(port); + sas_ata_check_topology(port); } /* ---------- Events ---------- */ diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index fadc99cb60df935b7a285f39b4ac7d0d50622c3f..911d0e1e8e3fcfebf96f42938fe039ccbf5d64ca 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -48,17 +48,16 @@ static void smp_task_timedout(struct timer_list *t) unsigned long flags; spin_lock_irqsave(&task->task_state_lock, flags); - if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) + if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { task->task_state_flags |= SAS_TASK_STATE_ABORTED; + complete(&task->slow_task->completion); + } spin_unlock_irqrestore(&task->task_state_lock, flags); - - complete(&task->slow_task->completion); } static void smp_task_done(struct sas_task *task) { - if (!del_timer(&task->slow_task->timer)) - return; + del_timer(&task->slow_task->timer); complete(&task->slow_task->completion); } @@ -615,7 +614,14 @@ int sas_smp_phy_control(struct domain_device *dev, int phy_id, } res = smp_execute_task(dev, pc_req, PC_REQ_SIZE, pc_resp,PC_RESP_SIZE); - + if (res) { + pr_err("ex %016llx phy%02d PHY control failed: %d\n", + SAS_ADDR(dev->sas_addr), phy_id, res); + } else if (pc_resp[2] != SMP_RESP_FUNC_ACC) { + pr_err("ex %016llx phy%02d PHY control failed: function result 0x%x\n", + SAS_ADDR(dev->sas_addr), phy_id, pc_resp[2]); + res = pc_resp[2]; + } kfree(pc_resp); kfree(pc_req); return res; @@ -818,6 +824,26 @@ static struct domain_device *sas_ex_discover_end_dev( #ifdef CONFIG_SCSI_SAS_ATA if ((phy->attached_tproto & SAS_PROTOCOL_STP) || phy->attached_sata_dev) { + if (child->linkrate > parent->min_linkrate) { + struct sas_phy_linkrates rates = { + .maximum_linkrate = parent->min_linkrate, + .minimum_linkrate = parent->min_linkrate, + }; + int ret; + + pr_notice("ex %016llx phy%02d SATA device linkrate > min pathway connection rate, attempting to lower device linkrate\n", + SAS_ADDR(child->sas_addr), phy_id); + ret = sas_smp_phy_control(parent, phy_id, + PHY_FUNC_LINK_RESET, &rates); + if (ret) { + pr_err("ex %016llx phy%02d SATA device could not set linkrate (%d)\n", + SAS_ADDR(child->sas_addr), phy_id, ret); + goto out_free; + } + pr_notice("ex %016llx phy%02d SATA device set linkrate successfully\n", + SAS_ADDR(child->sas_addr), phy_id); + child->linkrate = child->min_linkrate; + } res = sas_get_ata_info(child, phy); if (res) goto out_free; @@ -829,6 +855,7 @@ static struct domain_device *sas_ex_discover_end_dev( rphy = sas_end_device_alloc(phy->port); if (!rphy) goto out_free; + rphy->identify.phy_identifier = phy_id; child->rphy = rphy; get_device(&rphy->dev); @@ -856,6 +883,7 @@ static struct domain_device *sas_ex_discover_end_dev( child->rphy = rphy; get_device(&rphy->dev); + rphy->identify.phy_identifier = phy_id; sas_fill_in_rphy(child, rphy); list_add_tail(&child->disco_list_node, &parent->port->disco_list); @@ -988,6 +1016,8 @@ static struct domain_device *sas_ex_discover_expander( list_del(&child->dev_list_node); spin_unlock_irq(&parent->port->dev_list_lock); sas_put_device(child); + sas_port_delete(phy->port); + phy->port = NULL; return NULL; } list_add_tail(&child->siblings, &parent->ex_dev.children); @@ -1107,6 +1137,13 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) } } + } else { + /* if we failed to discover this device, we have to + * reset the expander phy state and address so that we + * will not treat the phy as flutter in the next + * revalidation + */ + memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE); } return res; @@ -1128,7 +1165,7 @@ static int sas_find_sub_addr(struct domain_device *dev, u8 *sub_addr) phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE) && phy->routing_attr == SUBTRACTIVE_ROUTING) { - memcpy(sub_addr, phy->attached_sas_addr,SAS_ADDR_SIZE); + memcpy(sub_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); return 1; } @@ -1140,7 +1177,7 @@ static int sas_check_level_subtractive_boundary(struct domain_device *dev) { struct expander_device *ex = &dev->ex_dev; struct domain_device *child; - u8 sub_addr[8] = {0, }; + u8 sub_addr[SAS_ADDR_SIZE] = {0, }; list_for_each_entry(child, &ex->children, siblings) { if (child->dev_type != SAS_EDGE_EXPANDER_DEVICE && @@ -1150,7 +1187,7 @@ static int sas_check_level_subtractive_boundary(struct domain_device *dev) sas_find_sub_addr(child, sub_addr); continue; } else { - u8 s2[8]; + u8 s2[SAS_ADDR_SIZE]; if (sas_find_sub_addr(child, s2) && (SAS_ADDR(sub_addr) != SAS_ADDR(s2))) { @@ -1747,10 +1784,11 @@ static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id, res = sas_get_phy_discover(dev, phy_id, disc_resp); if (res == 0) { - memcpy(sas_addr, disc_resp->disc.attached_sas_addr, 8); + memcpy(sas_addr, disc_resp->disc.attached_sas_addr, + SAS_ADDR_SIZE); *type = to_dev_type(dr); if (*type == 0) - memset(sas_addr, 0, 8); + memset(sas_addr, 0, SAS_ADDR_SIZE); } kfree(disc_resp); return res; @@ -1921,6 +1959,8 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent, &parent->port->sas_port_del_list); phy->port = NULL; } + if (phy->phy) + phy->phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; } static int sas_discover_bfs_by_root_level(struct domain_device *root, @@ -2014,10 +2054,10 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last) struct expander_device *ex = &dev->ex_dev; struct ex_phy *phy = &ex->ex_phy[phy_id]; enum sas_device_type type = SAS_PHY_UNUSED; - u8 sas_addr[8]; + u8 sas_addr[SAS_ADDR_SIZE]; int res; - memset(sas_addr, 0, 8); + memset(sas_addr, 0, SAS_ADDR_SIZE); res = sas_get_phy_attached_dev(dev, phy_id, sas_addr, &type); switch (res) { case SMP_RESP_NO_PHY: @@ -2039,6 +2079,11 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last) if ((SAS_ADDR(sas_addr) == 0) || (res == -ECOMM)) { phy->phy_state = PHY_EMPTY; sas_unregister_devs_sas_addr(dev, phy_id, last); + /* + * Even though the PHY is empty, for convenience we discover + * the PHY to update the PHY info, like negotiated linkrate. + */ + sas_ex_phy_discover(dev, phy_id); return res; } else if (SAS_ADDR(sas_addr) == SAS_ADDR(phy->attached_sas_addr) && dev_type_flutter(type, phy->attached_dev_type)) { @@ -2051,19 +2096,47 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last) action = ", needs recovery"; SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter%s\n", SAS_ADDR(dev->sas_addr), phy_id, action); + + /* the phy attached address will be updated by sas_ex_phy_discover() + * and sometimes become abnormal + */ + if (SAS_ADDR(phy->attached_sas_addr) != SAS_ADDR(sas_addr) || + SAS_ADDR(phy->attached_sas_addr) == 0) { + /* if attached_sas_addr become abnormal, we must set the + * original address back so that the device can be unregistered + */ + memcpy(phy->attached_sas_addr, sas_addr, SAS_ADDR_SIZE); + SAS_DPRINTK("phy address(%016llx) abnormal, origin:%016llx\n", + SAS_ADDR(phy->attached_sas_addr), + SAS_ADDR(sas_addr)); + goto unregister; + } + + + if (ata_dev) { + struct ata_device *adev = sas_to_ata_dev(ata_dev); + unsigned int class = ata_dev->sata_dev.class; + u16 *id = ata_dev->sata_dev.id; + + /* to see if the disk is replaced with another one */ + if (!ata_dev_same_device(adev, class, id)) + goto unregister; + } + return res; } - /* delete the old link */ - if (SAS_ADDR(phy->attached_sas_addr) && - SAS_ADDR(sas_addr) != SAS_ADDR(phy->attached_sas_addr)) { - SAS_DPRINTK("ex %016llx phy 0x%x replace %016llx\n", - SAS_ADDR(dev->sas_addr), phy_id, - SAS_ADDR(phy->attached_sas_addr)); - sas_unregister_devs_sas_addr(dev, phy_id, last); - } +unregister: + /* we always have to delete the old device when we went here */ + SAS_DPRINTK("ex %016llx phy 0x%x replace %016llx\n", + SAS_ADDR(dev->sas_addr), phy_id, + SAS_ADDR(phy->attached_sas_addr)); + sas_unregister_devs_sas_addr(dev, phy_id, last); - return sas_discover_new(dev, phy_id); + /* force the next revalidation find this phy and bring it up */ + phy->phy_change_count = -1; + + return 0; } /** @@ -2084,30 +2157,74 @@ static int sas_rediscover(struct domain_device *dev, const int phy_id) { struct expander_device *ex = &dev->ex_dev; struct ex_phy *changed_phy = &ex->ex_phy[phy_id]; - int res = 0; int i; bool last = true; /* is this the last phy of the port */ - SAS_DPRINTK("ex %016llx phy%d originated BROADCAST(CHANGE)\n", - SAS_ADDR(dev->sas_addr), phy_id); + for (i = 0; i < ex->num_phys; i++) { + struct ex_phy *phy = &ex->ex_phy[i]; - if (SAS_ADDR(changed_phy->attached_sas_addr) != 0) { - for (i = 0; i < ex->num_phys; i++) { - struct ex_phy *phy = &ex->ex_phy[i]; + if (i == phy_id) + continue; + if (SAS_ADDR(phy->attached_sas_addr) == + SAS_ADDR(changed_phy->attached_sas_addr)) { + SAS_DPRINTK("phy%d part of wide port with " + "phy%d\n", phy_id, i); + last = false; + break; + } + } + return sas_rediscover_dev(dev, phy_id, last); +} - if (i == phy_id) - continue; - if (SAS_ADDR(phy->attached_sas_addr) == - SAS_ADDR(changed_phy->attached_sas_addr)) { - SAS_DPRINTK("phy%d part of wide port with " - "phy%d\n", phy_id, i); - last = false; - break; - } +static inline int sas_ex_unregister(struct domain_device *dev, + u8 *changed_phy, + int nr) +{ + struct expander_device *ex = &dev->ex_dev; + int unregistered = 0; + struct ex_phy *phy; + int res; + int i; + + for (i = 0; i < nr; i++) { + SAS_DPRINTK("ex %016llx phy%d originated BROADCAST(CHANGE)\n", + SAS_ADDR(dev->sas_addr), changed_phy[i]); + + phy = &ex->ex_phy[changed_phy[i]]; + + if (SAS_ADDR(phy->attached_sas_addr) != 0) { + res = sas_rediscover(dev, changed_phy[i]); + changed_phy[i] = 0xff; + unregistered++; } - res = sas_rediscover_dev(dev, phy_id, last); - } else - res = sas_discover_new(dev, phy_id); + } + + return unregistered; +} + +static inline int sas_ex_register(struct domain_device *dev, + u8 *changed_phy, + int nr) +{ + struct expander_device *ex = &dev->ex_dev; + struct ex_phy *phy; + int res = 0; + int i; + + for (i = 0; i < nr; i++) { + if (changed_phy[i] == 0xff) + continue; + + phy = &ex->ex_phy[changed_phy[i]]; + + WARN(SAS_ADDR(phy->attached_sas_addr) != 0, + "phy%02d impossible attached_sas_addr %016llx\n", + changed_phy[i], + SAS_ADDR(phy->attached_sas_addr)); + + res = sas_discover_new(dev, changed_phy[i]); + } + return res; } @@ -2123,23 +2240,60 @@ static int sas_rediscover(struct domain_device *dev, const int phy_id) int sas_ex_revalidate_domain(struct domain_device *port_dev) { int res; + struct expander_device *ex; struct domain_device *dev = NULL; + u8 changed_phy[MAX_EXPANDER_PHYS]; + int unregistered = 0; + int phy_id; + int nr = 0; + int i = 0; res = sas_find_bcast_dev(port_dev, &dev); - if (res == 0 && dev) { - struct expander_device *ex = &dev->ex_dev; - int i = 0, phy_id; - - do { - phy_id = -1; - res = sas_find_bcast_phy(dev, &phy_id, i, true); - if (phy_id == -1) - break; - res = sas_rediscover(dev, phy_id); - i = phy_id + 1; - } while (i < ex->num_phys); + if (res != 0 || !dev) + return res; + + memset(changed_phy, 0xff, MAX_EXPANDER_PHYS); + ex = &dev->ex_dev; + + do { + phy_id = -1; + res = sas_find_bcast_phy(dev, &phy_id, i, true); + if (phy_id == -1) + break; + changed_phy[nr++] = phy_id; + i = phy_id + 1; + } while (i < dev->ex_dev.num_phys); + + if (nr == 0) + return res; + + unregistered = sas_ex_unregister(dev, changed_phy, nr); + + /* we have unregistered some devices in this pass and need to + * go again to pick up on any new devices on a separate pass + */ + if (unregistered > 0) { + struct asd_sas_port *port = dev->port; + struct asd_sas_phy *sas_phy; + struct ex_phy *phy; + + for (i = 0; i < nr; i++) { + if (changed_phy[i] == 0xff) + continue; + phy = &ex->ex_phy[changed_phy[i]]; + phy->phy_change_count = -1; + } + ex->ex_change_count = -1; + + sas_phy = container_of(dev->port->phy_list.next, + struct asd_sas_phy, + port_phy_el); + port->ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); + + return 0; } - return res; + + return sas_ex_register(dev, changed_phy, nr); } void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c index ede0af78144f8c39a05fb477c4970de5e15e413b..89bdd0c1a779ed0983479bf75dfb24ebd1edd851 100644 --- a/drivers/scsi/libsas/sas_init.c +++ b/drivers/scsi/libsas/sas_init.c @@ -87,25 +87,27 @@ EXPORT_SYMBOL_GPL(sas_free_task); /*------------ SAS addr hash -----------*/ void sas_hash_addr(u8 *hashed, const u8 *sas_addr) { - const u32 poly = 0x00DB2777; - u32 r = 0; - int i; - - for (i = 0; i < 8; i++) { - int b; - for (b = 7; b >= 0; b--) { - r <<= 1; - if ((1 << b) & sas_addr[i]) { - if (!(r & 0x01000000)) - r ^= poly; - } else if (r & 0x01000000) - r ^= poly; - } - } - - hashed[0] = (r >> 16) & 0xFF; - hashed[1] = (r >> 8) & 0xFF ; - hashed[2] = r & 0xFF; + const u32 poly = 0x00DB2777; + u32 r = 0; + int i; + + for (i = 0; i < SAS_ADDR_SIZE; i++) { + int b; + + for (b = (SAS_ADDR_SIZE - 1); b >= 0; b--) { + r <<= 1; + if ((1 << b) & sas_addr[i]) { + if (!(r & 0x01000000)) + r ^= poly; + } else if (r & 0x01000000) { + r ^= poly; + } + } + } + + hashed[0] = (r >> 16) & 0xFF; + hashed[1] = (r >> 8) & 0xFF; + hashed[2] = r & 0xFF; } int sas_register_ha(struct sas_ha_struct *sas_ha) diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c index bf3e1b979ca6f6f0f9dc7d5bed1f5b7d041cf731..760252b503149f5660ce4fa918492d387ca72f08 100644 --- a/drivers/scsi/libsas/sas_phy.c +++ b/drivers/scsi/libsas/sas_phy.c @@ -35,7 +35,6 @@ static void sas_phye_loss_of_signal(struct work_struct *work) struct asd_sas_event *ev = to_asd_sas_event(work); struct asd_sas_phy *phy = ev->phy; - phy->in_shutdown = 0; phy->error = 0; sas_deform_port(phy, 1); } @@ -45,7 +44,6 @@ static void sas_phye_oob_done(struct work_struct *work) struct asd_sas_event *ev = to_asd_sas_event(work); struct asd_sas_phy *phy = ev->phy; - phy->in_shutdown = 0; phy->error = 0; } @@ -127,6 +125,7 @@ static void sas_phye_shutdown(struct work_struct *work) } else sas_printk("phy%02d is not enabled, cannot shutdown\n", phy->id); + phy->in_shutdown = 0; } /* ---------- Phy class registration ---------- */ diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c index fad23dd3911433cf69f2cc6666acaa050c401c11..32991f7ec2a1031430d2cb6ed59938489af7c53d 100644 --- a/drivers/scsi/libsas/sas_port.c +++ b/drivers/scsi/libsas/sas_port.c @@ -41,7 +41,7 @@ static bool phy_is_wideport_member(struct asd_sas_port *port, struct asd_sas_phy static void sas_resume_port(struct asd_sas_phy *phy) { - struct domain_device *dev; + struct domain_device *dev, *n; struct asd_sas_port *port = phy->port; struct sas_ha_struct *sas_ha = phy->ha; struct sas_internal *si = to_sas_internal(sas_ha->core.shost->transportt); @@ -60,7 +60,7 @@ static void sas_resume_port(struct asd_sas_phy *phy) * 1/ presume every device came back * 2/ force the next revalidation to check all expander phys */ - list_for_each_entry(dev, &port->dev_list, dev_list_node) { + list_for_each_entry_safe(dev, n, &port->dev_list, dev_list_node) { int i, rc; rc = sas_notify_lldd_dev_found(dev); @@ -194,6 +194,8 @@ static void sas_form_port(struct asd_sas_phy *phy) sas_discover_event(phy->port, DISCE_DISCOVER_DOMAIN); flush_workqueue(sas_ha->disco_q); + + sas_ata_check_topology(port); } /** diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index 33229348dcb6adc88fe11de596708776a4ee04e2..0ce716a534690627bc4d0782ac1b19d6b0d39702 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -805,7 +805,7 @@ void sas_scsi_recover_host(struct Scsi_Host *shost) shost->host_failed, tries); } -int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) +int sas_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg) { struct domain_device *dev = sdev_to_domain_dev(sdev); @@ -844,6 +844,14 @@ struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy) return found_dev; } +int sas_slave_alloc(struct scsi_device *sdev) +{ + if (dev_is_sata(sdev_to_domain_dev(sdev)) && sdev->lun) + return -ENXIO; + + return 0; +} + int sas_target_alloc(struct scsi_target *starget) { struct sas_rphy *rphy = dev_to_rphy(starget->dev.parent); @@ -988,5 +996,6 @@ EXPORT_SYMBOL_GPL(sas_task_abort); EXPORT_SYMBOL_GPL(sas_phy_reset); EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler); EXPORT_SYMBOL_GPL(sas_eh_target_reset_handler); +EXPORT_SYMBOL_GPL(sas_slave_alloc); EXPORT_SYMBOL_GPL(sas_target_destroy); EXPORT_SYMBOL_GPL(sas_ioctl); diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile index 092a971d066b1e3cb84c84611be7d420853440d7..4381b472e0b04b19b92a1dfea3a4a0c19152dba8 100644 --- a/drivers/scsi/lpfc/Makefile +++ b/drivers/scsi/lpfc/Makefile @@ -21,7 +21,11 @@ # *******************************************************************/ ###################################################################### +ifeq ($(CONFIG_PGO_KERNEL),y) +ccflags-$(GCOV) := -fprofile-generate -ftest-coverage +else ccflags-$(GCOV) := -fprofile-arcs -ftest-coverage +endif ccflags-$(GCOV) += -O0 ifdef WARNINGS_BECOME_ERRORS diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 43732e8d13473f84b88945072d7156d7a2982279..706aca3f7c253ad8815aaedca83006cef3a0c076 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -490,6 +490,7 @@ struct lpfc_vport { struct nvme_fc_local_port *localport; uint8_t nvmei_support; /* driver supports NVME Initiator */ uint32_t last_fcp_wqidx; + uint32_t rcv_flogi_cnt; /* How many unsol FLOGIs ACK'd. */ }; struct hbq_s { @@ -965,7 +966,8 @@ struct lpfc_hba { struct list_head port_list; struct lpfc_vport *pport; /* physical lpfc_vport pointer */ uint16_t max_vpi; /* Maximum virtual nports */ -#define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */ +#define LPFC_MAX_VPI 0xFF /* Max number VPI supported 0 - 0xff */ +#define LPFC_MAX_VPORTS 0x100 /* Max vports per port, with pport */ uint16_t max_vports; /* * For IOV HBAs max_vpi can change * after a reset. max_vports is max @@ -1235,6 +1237,12 @@ lpfc_sli_read_hs(struct lpfc_hba *phba) static inline struct lpfc_sli_ring * lpfc_phba_elsring(struct lpfc_hba *phba) { + /* Return NULL if sli_rev has become invalid due to bad fw */ + if (phba->sli_rev != LPFC_SLI_REV4 && + phba->sli_rev != LPFC_SLI_REV3 && + phba->sli_rev != LPFC_SLI_REV2) + return NULL; + if (phba->sli_rev == LPFC_SLI_REV4) { if (phba->sli4_hba.els_wq) return phba->sli4_hba.els_wq->pring; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 1a6ed9b0a249397880b4fac46c36d3acd23fc613..fe084d47ed9e54b5ba405e8ed31d7e16b6450d9c 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -117,7 +117,7 @@ static ssize_t lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n"); + return scnprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n"); } /** @@ -137,9 +137,9 @@ lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr, struct lpfc_hba *phba = vport->phba; if (phba->hba_flag & HBA_FIP_SUPPORT) - return snprintf(buf, PAGE_SIZE, "1\n"); + return scnprintf(buf, PAGE_SIZE, "1\n"); else - return snprintf(buf, PAGE_SIZE, "0\n"); + return scnprintf(buf, PAGE_SIZE, "0\n"); } static ssize_t @@ -341,7 +341,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, phba->sli4_hba.scsi_xri_max, lpfc_sli4_get_els_iocb_cnt(phba)); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto buffer_done; + goto rcu_unlock_buf_done; /* Port state is only one of two values for now. */ if (localport->port_id) @@ -357,7 +357,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, wwn_to_u64(vport->fc_nodename.u.wwn), localport->port_id, statep); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto buffer_done; + goto rcu_unlock_buf_done; list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { nrport = NULL; @@ -384,39 +384,39 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, /* Tab in to show lport ownership. */ if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE) - goto buffer_done; + goto rcu_unlock_buf_done; if (phba->brd_no >= 10) { if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE) - goto buffer_done; + goto rcu_unlock_buf_done; } scnprintf(tmp, sizeof(tmp), "WWPN x%llx ", nrport->port_name); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto buffer_done; + goto rcu_unlock_buf_done; scnprintf(tmp, sizeof(tmp), "WWNN x%llx ", nrport->node_name); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto buffer_done; + goto rcu_unlock_buf_done; scnprintf(tmp, sizeof(tmp), "DID x%06x ", nrport->port_id); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto buffer_done; + goto rcu_unlock_buf_done; /* An NVME rport can have multiple roles. */ if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) { if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE) - goto buffer_done; + goto rcu_unlock_buf_done; } if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) { if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE) - goto buffer_done; + goto rcu_unlock_buf_done; } if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) { if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE) - goto buffer_done; + goto rcu_unlock_buf_done; } if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR | FC_PORT_ROLE_NVME_TARGET | @@ -424,12 +424,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x", nrport->port_role); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto buffer_done; + goto rcu_unlock_buf_done; } scnprintf(tmp, sizeof(tmp), "%s\n", statep); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto buffer_done; + goto rcu_unlock_buf_done; } rcu_read_unlock(); @@ -491,7 +491,13 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, atomic_read(&lport->cmpl_fcp_err)); strlcat(buf, tmp, PAGE_SIZE); -buffer_done: + /* RCU is already unlocked. */ + goto buffer_done; + + rcu_unlock_buf_done: + rcu_read_unlock(); + + buffer_done: len = strnlen(buf, PAGE_SIZE); if (unlikely(len >= (PAGE_SIZE - 1))) { @@ -517,14 +523,15 @@ lpfc_bg_info_show(struct device *dev, struct device_attribute *attr, struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; - if (phba->cfg_enable_bg) + if (phba->cfg_enable_bg) { if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) - return snprintf(buf, PAGE_SIZE, "BlockGuard Enabled\n"); + return scnprintf(buf, PAGE_SIZE, + "BlockGuard Enabled\n"); else - return snprintf(buf, PAGE_SIZE, + return scnprintf(buf, PAGE_SIZE, "BlockGuard Not Supported\n"); - else - return snprintf(buf, PAGE_SIZE, + } else + return scnprintf(buf, PAGE_SIZE, "BlockGuard Disabled\n"); } @@ -536,7 +543,7 @@ lpfc_bg_guard_err_show(struct device *dev, struct device_attribute *attr, struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; - return snprintf(buf, PAGE_SIZE, "%llu\n", + return scnprintf(buf, PAGE_SIZE, "%llu\n", (unsigned long long)phba->bg_guard_err_cnt); } @@ -548,7 +555,7 @@ lpfc_bg_apptag_err_show(struct device *dev, struct device_attribute *attr, struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; - return snprintf(buf, PAGE_SIZE, "%llu\n", + return scnprintf(buf, PAGE_SIZE, "%llu\n", (unsigned long long)phba->bg_apptag_err_cnt); } @@ -560,7 +567,7 @@ lpfc_bg_reftag_err_show(struct device *dev, struct device_attribute *attr, struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; - return snprintf(buf, PAGE_SIZE, "%llu\n", + return scnprintf(buf, PAGE_SIZE, "%llu\n", (unsigned long long)phba->bg_reftag_err_cnt); } @@ -578,7 +585,7 @@ lpfc_info_show(struct device *dev, struct device_attribute *attr, { struct Scsi_Host *host = class_to_shost(dev); - return snprintf(buf, PAGE_SIZE, "%s\n",lpfc_info(host)); + return scnprintf(buf, PAGE_SIZE, "%s\n", lpfc_info(host)); } /** @@ -597,7 +604,7 @@ lpfc_serialnum_show(struct device *dev, struct device_attribute *attr, struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; - return snprintf(buf, PAGE_SIZE, "%s\n",phba->SerialNumber); + return scnprintf(buf, PAGE_SIZE, "%s\n", phba->SerialNumber); } /** @@ -619,7 +626,7 @@ lpfc_temp_sensor_show(struct device *dev, struct device_attribute *attr, struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; - return snprintf(buf, PAGE_SIZE, "%d\n",phba->temp_sensor_support); + return scnprintf(buf, PAGE_SIZE, "%d\n", phba->temp_sensor_support); } /** @@ -638,7 +645,7 @@ lpfc_modeldesc_show(struct device *dev, struct device_attribute *attr, struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; - return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelDesc); + return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelDesc); } /** @@ -657,7 +664,7 @@ lpfc_modelname_show(struct device *dev, struct device_attribute *attr, struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; - return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelName); + return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelName); } /** @@ -676,7 +683,7 @@ lpfc_programtype_show(struct device *dev, struct device_attribute *attr, struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; - return snprintf(buf, PAGE_SIZE, "%s\n",phba->ProgramType); + return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ProgramType); } /** @@ -694,7 +701,7 @@ lpfc_mlomgmt_show(struct device *dev, struct device_attribute *attr, char *buf) struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; struct lpfc_hba *phba = vport->phba; - return snprintf(buf, PAGE_SIZE, "%d\n", + return scnprintf(buf, PAGE_SIZE, "%d\n", (phba->sli.sli_flag & LPFC_MENLO_MAINT)); } @@ -714,7 +721,7 @@ lpfc_vportnum_show(struct device *dev, struct device_attribute *attr, struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; - return snprintf(buf, PAGE_SIZE, "%s\n",phba->Port); + return scnprintf(buf, PAGE_SIZE, "%s\n", phba->Port); } /** @@ -742,10 +749,10 @@ lpfc_fwrev_show(struct device *dev, struct device_attribute *attr, sli_family = phba->sli4_hba.pc_sli4_params.sli_family; if (phba->sli_rev < LPFC_SLI_REV4) - len = snprintf(buf, PAGE_SIZE, "%s, sli-%d\n", + len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d\n", fwrev, phba->sli_rev); else - len = snprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n", + len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n", fwrev, phba->sli_rev, if_type, sli_family); return len; @@ -769,7 +776,7 @@ lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf) lpfc_vpd_t *vp = &phba->vpd; lpfc_jedec_to_ascii(vp->rev.biuRev, hdw); - return snprintf(buf, PAGE_SIZE, "%s\n", hdw); + return scnprintf(buf, PAGE_SIZE, "%s\n", hdw); } /** @@ -790,10 +797,11 @@ lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr, char fwrev[FW_REV_STR_SIZE]; if (phba->sli_rev < LPFC_SLI_REV4) - return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion); + return scnprintf(buf, PAGE_SIZE, "%s\n", + phba->OptionROMVersion); lpfc_decode_firmware_rev(phba, fwrev, 1); - return snprintf(buf, PAGE_SIZE, "%s\n", fwrev); + return scnprintf(buf, PAGE_SIZE, "%s\n", fwrev); } /** @@ -824,20 +832,20 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr, case LPFC_LINK_DOWN: case LPFC_HBA_ERROR: if (phba->hba_flag & LINK_DISABLED) - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, "Link Down - User disabled\n"); else - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, "Link Down\n"); break; case LPFC_LINK_UP: case LPFC_CLEAR_LA: case LPFC_HBA_READY: - len += snprintf(buf + len, PAGE_SIZE-len, "Link Up - "); + len += scnprintf(buf + len, PAGE_SIZE-len, "Link Up - "); switch (vport->port_state) { case LPFC_LOCAL_CFG_LINK: - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, "Configuring Link\n"); break; case LPFC_FDISC: @@ -847,38 +855,40 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr, case LPFC_NS_QRY: case LPFC_BUILD_DISC_LIST: case LPFC_DISC_AUTH: - len += snprintf(buf + len, PAGE_SIZE - len, + len += scnprintf(buf + len, PAGE_SIZE - len, "Discovery\n"); break; case LPFC_VPORT_READY: - len += snprintf(buf + len, PAGE_SIZE - len, "Ready\n"); + len += scnprintf(buf + len, PAGE_SIZE - len, + "Ready\n"); break; case LPFC_VPORT_FAILED: - len += snprintf(buf + len, PAGE_SIZE - len, "Failed\n"); + len += scnprintf(buf + len, PAGE_SIZE - len, + "Failed\n"); break; case LPFC_VPORT_UNKNOWN: - len += snprintf(buf + len, PAGE_SIZE - len, + len += scnprintf(buf + len, PAGE_SIZE - len, "Unknown\n"); break; } if (phba->sli.sli_flag & LPFC_MENLO_MAINT) - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, " Menlo Maint Mode\n"); else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { if (vport->fc_flag & FC_PUBLIC_LOOP) - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, " Public Loop\n"); else - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, " Private Loop\n"); } else { if (vport->fc_flag & FC_FABRIC) - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, " Fabric\n"); else - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, " Point-2-Point\n"); } } @@ -903,15 +913,15 @@ lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr, struct lpfc_hba *phba = vport->phba; if (phba->sli_rev < LPFC_SLI_REV4) - return snprintf(buf, PAGE_SIZE, "fc\n"); + return scnprintf(buf, PAGE_SIZE, "fc\n"); if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) { if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_GE) - return snprintf(buf, PAGE_SIZE, "fcoe\n"); + return scnprintf(buf, PAGE_SIZE, "fcoe\n"); if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) - return snprintf(buf, PAGE_SIZE, "fc\n"); + return scnprintf(buf, PAGE_SIZE, "fc\n"); } - return snprintf(buf, PAGE_SIZE, "unknown\n"); + return scnprintf(buf, PAGE_SIZE, "unknown\n"); } /** @@ -931,7 +941,7 @@ lpfc_oas_supported_show(struct device *dev, struct device_attribute *attr, struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; struct lpfc_hba *phba = vport->phba; - return snprintf(buf, PAGE_SIZE, "%d\n", + return scnprintf(buf, PAGE_SIZE, "%d\n", phba->sli4_hba.pc_sli4_params.oas_supported); } @@ -989,7 +999,7 @@ lpfc_num_discovered_ports_show(struct device *dev, struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; - return snprintf(buf, PAGE_SIZE, "%d\n", + return scnprintf(buf, PAGE_SIZE, "%d\n", vport->fc_map_cnt + vport->fc_unmap_cnt); } @@ -1322,7 +1332,7 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode) return -EACCES; if ((phba->sli_rev < LPFC_SLI_REV4) || - (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != + (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < LPFC_SLI_INTF_IF_TYPE_2)) return -EPERM; @@ -1427,7 +1437,7 @@ lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr, struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; - return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt); + return scnprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt); } /** @@ -1456,7 +1466,7 @@ lpfc_board_mode_show(struct device *dev, struct device_attribute *attr, else state = "online"; - return snprintf(buf, PAGE_SIZE, "%s\n", state); + return scnprintf(buf, PAGE_SIZE, "%s\n", state); } /** @@ -1622,6 +1632,9 @@ lpfc_get_hba_info(struct lpfc_hba *phba, max_vpi = (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) > 0) ? (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - 1) : 0; + /* Limit the max we support */ + if (max_vpi > LPFC_MAX_VPI) + max_vpi = LPFC_MAX_VPI; if (mvpi) *mvpi = max_vpi; if (avpi) @@ -1637,8 +1650,13 @@ lpfc_get_hba_info(struct lpfc_hba *phba, *axri = pmb->un.varRdConfig.avail_xri; if (mvpi) *mvpi = pmb->un.varRdConfig.max_vpi; - if (avpi) - *avpi = pmb->un.varRdConfig.avail_vpi; + if (avpi) { + /* avail_vpi is only valid if link is up and ready */ + if (phba->link_state == LPFC_HBA_READY) + *avpi = pmb->un.varRdConfig.avail_vpi; + else + *avpi = pmb->un.varRdConfig.max_vpi; + } } mempool_free(pmboxq, phba->mbox_mem_pool); @@ -1669,8 +1687,8 @@ lpfc_max_rpi_show(struct device *dev, struct device_attribute *attr, uint32_t cnt; if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, NULL, NULL, NULL)) - return snprintf(buf, PAGE_SIZE, "%d\n", cnt); - return snprintf(buf, PAGE_SIZE, "Unknown\n"); + return scnprintf(buf, PAGE_SIZE, "%d\n", cnt); + return scnprintf(buf, PAGE_SIZE, "Unknown\n"); } /** @@ -1697,8 +1715,8 @@ lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr, uint32_t cnt, acnt; if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL)) - return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt)); - return snprintf(buf, PAGE_SIZE, "Unknown\n"); + return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt)); + return scnprintf(buf, PAGE_SIZE, "Unknown\n"); } /** @@ -1725,8 +1743,8 @@ lpfc_max_xri_show(struct device *dev, struct device_attribute *attr, uint32_t cnt; if (lpfc_get_hba_info(phba, &cnt, NULL, NULL, NULL, NULL, NULL)) - return snprintf(buf, PAGE_SIZE, "%d\n", cnt); - return snprintf(buf, PAGE_SIZE, "Unknown\n"); + return scnprintf(buf, PAGE_SIZE, "%d\n", cnt); + return scnprintf(buf, PAGE_SIZE, "Unknown\n"); } /** @@ -1753,8 +1771,8 @@ lpfc_used_xri_show(struct device *dev, struct device_attribute *attr, uint32_t cnt, acnt; if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL)) - return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt)); - return snprintf(buf, PAGE_SIZE, "Unknown\n"); + return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt)); + return scnprintf(buf, PAGE_SIZE, "Unknown\n"); } /** @@ -1781,8 +1799,8 @@ lpfc_max_vpi_show(struct device *dev, struct device_attribute *attr, uint32_t cnt; if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, NULL)) - return snprintf(buf, PAGE_SIZE, "%d\n", cnt); - return snprintf(buf, PAGE_SIZE, "Unknown\n"); + return scnprintf(buf, PAGE_SIZE, "%d\n", cnt); + return scnprintf(buf, PAGE_SIZE, "Unknown\n"); } /** @@ -1809,8 +1827,8 @@ lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr, uint32_t cnt, acnt; if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt)) - return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt)); - return snprintf(buf, PAGE_SIZE, "Unknown\n"); + return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt)); + return scnprintf(buf, PAGE_SIZE, "Unknown\n"); } /** @@ -1835,10 +1853,10 @@ lpfc_npiv_info_show(struct device *dev, struct device_attribute *attr, struct lpfc_hba *phba = vport->phba; if (!(phba->max_vpi)) - return snprintf(buf, PAGE_SIZE, "NPIV Not Supported\n"); + return scnprintf(buf, PAGE_SIZE, "NPIV Not Supported\n"); if (vport->port_type == LPFC_PHYSICAL_PORT) - return snprintf(buf, PAGE_SIZE, "NPIV Physical\n"); - return snprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi); + return scnprintf(buf, PAGE_SIZE, "NPIV Physical\n"); + return scnprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi); } /** @@ -1860,7 +1878,7 @@ lpfc_poll_show(struct device *dev, struct device_attribute *attr, struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; - return snprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll); + return scnprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll); } /** @@ -1964,7 +1982,7 @@ lpfc_fips_level_show(struct device *dev, struct device_attribute *attr, struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; - return snprintf(buf, PAGE_SIZE, "%d\n", phba->fips_level); + return scnprintf(buf, PAGE_SIZE, "%d\n", phba->fips_level); } /** @@ -1983,7 +2001,7 @@ lpfc_fips_rev_show(struct device *dev, struct device_attribute *attr, struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; - return snprintf(buf, PAGE_SIZE, "%d\n", phba->fips_spec_rev); + return scnprintf(buf, PAGE_SIZE, "%d\n", phba->fips_spec_rev); } /** @@ -2002,7 +2020,7 @@ lpfc_dss_show(struct device *dev, struct device_attribute *attr, struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; - return snprintf(buf, PAGE_SIZE, "%s - %sOperational\n", + return scnprintf(buf, PAGE_SIZE, "%s - %sOperational\n", (phba->cfg_enable_dss) ? "Enabled" : "Disabled", (phba->sli3_options & LPFC_SLI3_DSS_ENABLED) ? "" : "Not "); @@ -2031,7 +2049,7 @@ lpfc_sriov_hw_max_virtfn_show(struct device *dev, uint16_t max_nr_virtfn; max_nr_virtfn = lpfc_sli_sriov_nr_virtfn_get(phba); - return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn); + return scnprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn); } static inline bool lpfc_rangecheck(uint val, uint min, uint max) @@ -2091,7 +2109,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ struct Scsi_Host *shost = class_to_shost(dev);\ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ struct lpfc_hba *phba = vport->phba;\ - return snprintf(buf, PAGE_SIZE, "%d\n",\ + return scnprintf(buf, PAGE_SIZE, "%d\n",\ phba->cfg_##attr);\ } @@ -2119,7 +2137,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ struct lpfc_hba *phba = vport->phba;\ uint val = 0;\ val = phba->cfg_##attr;\ - return snprintf(buf, PAGE_SIZE, "%#x\n",\ + return scnprintf(buf, PAGE_SIZE, "%#x\n",\ phba->cfg_##attr);\ } @@ -2255,7 +2273,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ { \ struct Scsi_Host *shost = class_to_shost(dev);\ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ - return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\ + return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\ } /** @@ -2280,7 +2298,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ { \ struct Scsi_Host *shost = class_to_shost(dev);\ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\ - return snprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\ + return scnprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\ } /** @@ -2551,7 +2569,7 @@ lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr, struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; - return snprintf(buf, PAGE_SIZE, "0x%llx\n", + return scnprintf(buf, PAGE_SIZE, "0x%llx\n", (unsigned long long)phba->cfg_soft_wwpn); } @@ -2648,7 +2666,7 @@ lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr, { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; - return snprintf(buf, PAGE_SIZE, "0x%llx\n", + return scnprintf(buf, PAGE_SIZE, "0x%llx\n", (unsigned long long)phba->cfg_soft_wwnn); } @@ -2714,7 +2732,7 @@ lpfc_oas_tgt_show(struct device *dev, struct device_attribute *attr, struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; - return snprintf(buf, PAGE_SIZE, "0x%llx\n", + return scnprintf(buf, PAGE_SIZE, "0x%llx\n", wwn_to_u64(phba->cfg_oas_tgt_wwpn)); } @@ -2782,7 +2800,7 @@ lpfc_oas_priority_show(struct device *dev, struct device_attribute *attr, struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; - return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_priority); + return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_priority); } /** @@ -2845,7 +2863,7 @@ lpfc_oas_vpt_show(struct device *dev, struct device_attribute *attr, struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; - return snprintf(buf, PAGE_SIZE, "0x%llx\n", + return scnprintf(buf, PAGE_SIZE, "0x%llx\n", wwn_to_u64(phba->cfg_oas_vpt_wwpn)); } @@ -2916,7 +2934,7 @@ lpfc_oas_lun_state_show(struct device *dev, struct device_attribute *attr, struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; - return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state); + return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state); } /** @@ -2980,7 +2998,7 @@ lpfc_oas_lun_status_show(struct device *dev, struct device_attribute *attr, if (!(phba->cfg_oas_flags & OAS_LUN_VALID)) return -EFAULT; - return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status); + return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status); } static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO, lpfc_oas_lun_status_show, NULL); @@ -3132,7 +3150,7 @@ lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr, if (oas_lun != NOT_OAS_ENABLED_LUN) phba->cfg_oas_flags |= OAS_LUN_VALID; - len += snprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun); + len += scnprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun); return len; } @@ -3266,7 +3284,7 @@ lpfc_iocb_hw_show(struct device *dev, struct device_attribute *attr, char *buf) struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; - return snprintf(buf, PAGE_SIZE, "%d\n", phba->iocb_max); + return scnprintf(buf, PAGE_SIZE, "%d\n", phba->iocb_max); } static DEVICE_ATTR(iocb_hw, S_IRUGO, @@ -3278,7 +3296,7 @@ lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf) struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba); - return snprintf(buf, PAGE_SIZE, "%d\n", + return scnprintf(buf, PAGE_SIZE, "%d\n", pring ? pring->txq_max : 0); } @@ -3292,7 +3310,7 @@ lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr, struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba); - return snprintf(buf, PAGE_SIZE, "%d\n", + return scnprintf(buf, PAGE_SIZE, "%d\n", pring ? pring->txcmplq_max : 0); } @@ -3328,7 +3346,7 @@ lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr, struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; - return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo); + return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo); } /** @@ -3831,8 +3849,9 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr, val); return -EINVAL; } - if (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC && - val == 4) { + if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC || + phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) && + val == 4) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "3114 Loop mode not supported\n"); return -EINVAL; @@ -4254,7 +4273,7 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr, uint32_t prev_val, if_type; if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); - if (if_type == LPFC_SLI_INTF_IF_TYPE_2 && + if (if_type >= LPFC_SLI_INTF_IF_TYPE_2 && phba->hba_flag & HBA_FORCED_LINK_SPEED) return -EPERM; @@ -4830,19 +4849,19 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr, switch (phba->cfg_fcp_cpu_map) { case 0: - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, "fcp_cpu_map: No mapping (%d)\n", phba->cfg_fcp_cpu_map); return len; case 1: - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, "fcp_cpu_map: HBA centric mapping (%d): " "%d online CPUs\n", phba->cfg_fcp_cpu_map, phba->sli4_hba.num_online_cpu); break; case 2: - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, "fcp_cpu_map: Driver centric mapping (%d): " "%d online CPUs\n", phba->cfg_fcp_cpu_map, @@ -4855,14 +4874,14 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr, /* margin should fit in this and the truncated message */ if (cpup->irq == LPFC_VECTOR_MAP_EMPTY) - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, "CPU %02d io_chan %02d " "physid %d coreid %d\n", phba->sli4_hba.curr_disp_cpu, cpup->channel_id, cpup->phys_id, cpup->core_id); else - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, "CPU %02d io_chan %02d " "physid %d coreid %d IRQ %d\n", phba->sli4_hba.curr_disp_cpu, @@ -4875,7 +4894,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr, if (phba->sli4_hba.curr_disp_cpu < phba->sli4_hba.num_present_cpu && (len >= (PAGE_SIZE - 64))) { - len += snprintf(buf + len, PAGE_SIZE-len, "more...\n"); + len += scnprintf(buf + len, PAGE_SIZE-len, "more...\n"); break; } } @@ -6296,7 +6315,7 @@ lpfc_show_rport_##field (struct device *dev, \ { \ struct fc_rport *rport = transport_class_to_rport(dev); \ struct lpfc_rport_data *rdata = rport->hostdata; \ - return snprintf(buf, sz, format_string, \ + return scnprintf(buf, sz, format_string, \ (rdata->target) ? cast rdata->target->field : 0); \ } diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 90745feca80800395f6a7af4905f60a8d1eebb60..21f104c5eab6262b963475b0901e467b4289d2d6 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -2221,7 +2221,7 @@ lpfc_bsg_diag_loopback_mode(struct bsg_job *job) if (phba->sli_rev < LPFC_SLI_REV4) rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job); - else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == + else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= LPFC_SLI_INTF_IF_TYPE_2) rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job); else @@ -2261,7 +2261,7 @@ lpfc_sli4_bsg_diag_mode_end(struct bsg_job *job) if (phba->sli_rev < LPFC_SLI_REV4) return -ENODEV; - if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != + if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < LPFC_SLI_INTF_IF_TYPE_2) return -ENODEV; @@ -2353,7 +2353,7 @@ lpfc_sli4_bsg_link_diag_test(struct bsg_job *job) rc = -ENODEV; goto job_error; } - if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != + if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < LPFC_SLI_INTF_IF_TYPE_2) { rc = -ENODEV; goto job_error; @@ -4419,12 +4419,6 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job, phba->mbox_ext_buf_ctx.seqNum++; nemb_tp = phba->mbox_ext_buf_ctx.nembType; - dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); - if (!dd_data) { - rc = -ENOMEM; - goto job_error; - } - pbuf = (uint8_t *)dmabuf->virt; size = job->request_payload.payload_len; sg_copy_to_buffer(job->request_payload.sg_list, @@ -4461,6 +4455,13 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job, "2968 SLI_CONFIG ext-buffer wr all %d " "ebuffers received\n", phba->mbox_ext_buf_ctx.numBuf); + + dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); + if (!dd_data) { + rc = -ENOMEM; + goto job_error; + } + /* mailbox command structure for base driver */ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmboxq) { @@ -4509,6 +4510,8 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job, return SLI_CONFIG_HANDLED; job_error: + if (pmboxq) + mempool_free(pmboxq, phba->mbox_mem_pool); lpfc_bsg_dma_page_free(phba, dmabuf); kfree(dd_data); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 1cbdc892ff958bc0b156595b76d12fbec5265f29..384f5cd7c3c81f3d010ceda13358db28bd778145 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -471,11 +471,6 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) "Parse GID_FTrsp: did:x%x flg:x%x x%x", Did, ndlp->nlp_flag, vport->fc_flag); - /* Don't assume the rport is always the previous - * FC4 type. - */ - ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); - /* By default, the driver expects to support FCP FC4 */ if (fc4_type == FC_TYPE_FCP) ndlp->nlp_fc4_type |= NLP_FC4_FCP; @@ -1220,7 +1215,7 @@ lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol, * Name object. NPIV is not in play so this integer * value is sufficient and unique per FC-ID. */ - n = snprintf(symbol, size, "%d", vport->phba->brd_no); + n = scnprintf(symbol, size, "%d", vport->phba->brd_no); return n; } @@ -1234,26 +1229,26 @@ lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol, lpfc_decode_firmware_rev(vport->phba, fwrev, 0); - n = snprintf(symbol, size, "Emulex %s", vport->phba->ModelName); + n = scnprintf(symbol, size, "Emulex %s", vport->phba->ModelName); if (size < n) return n; - n += snprintf(symbol + n, size - n, " FV%s", fwrev); + n += scnprintf(symbol + n, size - n, " FV%s", fwrev); if (size < n) return n; - n += snprintf(symbol + n, size - n, " DV%s.", + n += scnprintf(symbol + n, size - n, " DV%s.", lpfc_release_version); if (size < n) return n; - n += snprintf(symbol + n, size - n, " HN:%s.", + n += scnprintf(symbol + n, size - n, " HN:%s.", init_utsname()->nodename); if (size < n) return n; /* Note :- OS name is "Linux" */ - n += snprintf(symbol + n, size - n, " OS:%s\n", + n += scnprintf(symbol + n, size - n, " OS:%s\n", init_utsname()->sysname); return n; } @@ -1762,6 +1757,9 @@ lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport, ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; memset(ae, 0, 256); + /* This string MUST be consistent with other FC platforms + * supported by Broadcom. + */ strncpy(ae->un.AttrString, "Emulex Corporation", sizeof(ae->un.AttrString)); @@ -2117,10 +2115,11 @@ lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport, ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; memset(ae, 0, 32); - ae->un.AttrTypes[3] = 0x02; /* Type 1 - ELS */ - ae->un.AttrTypes[2] = 0x01; /* Type 8 - FCP */ - ae->un.AttrTypes[6] = 0x01; /* Type 40 - NVME */ - ae->un.AttrTypes[7] = 0x01; /* Type 32 - CT */ + ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */ + ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */ + if (vport->nvmei_support || vport->phba->nvmet_support) + ae->un.AttrTypes[6] = 0x01; /* Type 0x28 - NVME */ + ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */ size = FOURBYTES + 32; ad->AttrLen = cpu_to_be16(size); ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_FC4_TYPES); @@ -2425,9 +2424,11 @@ lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport, ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; memset(ae, 0, 32); - ae->un.AttrTypes[3] = 0x02; /* Type 1 - ELS */ - ae->un.AttrTypes[2] = 0x01; /* Type 8 - FCP */ - ae->un.AttrTypes[7] = 0x01; /* Type 32 - CT */ + ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */ + ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */ + if (vport->phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) + ae->un.AttrTypes[6] = 0x1; /* Type 0x28 - NVME */ + ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */ size = FOURBYTES + 32; ad->AttrLen = cpu_to_be16(size); ad->AttrType = cpu_to_be16(RPRT_ACTIVE_FC4_TYPES); diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index aec5b10a8c855ff9d37433afed149beddc670104..f1951c432766d1a2680e6611d0c1705c423bc6e6 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -170,7 +170,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size) snprintf(buffer, LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n", dtp->seq_cnt, ms, dtp->fmt); - len += snprintf(buf+len, size-len, buffer, + len += scnprintf(buf+len, size-len, buffer, dtp->data1, dtp->data2, dtp->data3); } for (i = 0; i < index; i++) { @@ -181,7 +181,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size) snprintf(buffer, LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n", dtp->seq_cnt, ms, dtp->fmt); - len += snprintf(buf+len, size-len, buffer, + len += scnprintf(buf+len, size-len, buffer, dtp->data1, dtp->data2, dtp->data3); } @@ -236,7 +236,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size) snprintf(buffer, LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n", dtp->seq_cnt, ms, dtp->fmt); - len += snprintf(buf+len, size-len, buffer, + len += scnprintf(buf+len, size-len, buffer, dtp->data1, dtp->data2, dtp->data3); } for (i = 0; i < index; i++) { @@ -247,7 +247,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size) snprintf(buffer, LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n", dtp->seq_cnt, ms, dtp->fmt); - len += snprintf(buf+len, size-len, buffer, + len += scnprintf(buf+len, size-len, buffer, dtp->data1, dtp->data2, dtp->data3); } @@ -307,7 +307,7 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size) i = lpfc_debugfs_last_hbq; - len += snprintf(buf+len, size-len, "HBQ %d Info\n", i); + len += scnprintf(buf+len, size-len, "HBQ %d Info\n", i); hbqs = &phba->hbqs[i]; posted = 0; @@ -315,21 +315,21 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size) posted++; hip = lpfc_hbq_defs[i]; - len += snprintf(buf+len, size-len, + len += scnprintf(buf+len, size-len, "idx:%d prof:%d rn:%d bufcnt:%d icnt:%d acnt:%d posted %d\n", hip->hbq_index, hip->profile, hip->rn, hip->buffer_count, hip->init_count, hip->add_count, posted); raw_index = phba->hbq_get[i]; getidx = le32_to_cpu(raw_index); - len += snprintf(buf+len, size-len, + len += scnprintf(buf+len, size-len, "entries:%d bufcnt:%d Put:%d nPut:%d localGet:%d hbaGet:%d\n", hbqs->entry_count, hbqs->buffer_count, hbqs->hbqPutIdx, hbqs->next_hbqPutIdx, hbqs->local_hbqGetIdx, getidx); hbqe = (struct lpfc_hbq_entry *) phba->hbqs[i].hbq_virt; for (j=0; jentry_count; j++) { - len += snprintf(buf+len, size-len, + len += scnprintf(buf+len, size-len, "%03d: %08x %04x %05x ", j, le32_to_cpu(hbqe->bde.addrLow), le32_to_cpu(hbqe->bde.tus.w), @@ -341,14 +341,16 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size) low = hbqs->hbqPutIdx - posted; if (low >= 0) { if ((j >= hbqs->hbqPutIdx) || (j < low)) { - len += snprintf(buf+len, size-len, "Unused\n"); + len += scnprintf(buf + len, size - len, + "Unused\n"); goto skipit; } } else { if ((j >= hbqs->hbqPutIdx) && (j < (hbqs->entry_count+low))) { - len += snprintf(buf+len, size-len, "Unused\n"); + len += scnprintf(buf + len, size - len, + "Unused\n"); goto skipit; } } @@ -358,7 +360,7 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size) hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); phys = ((uint64_t)hbq_buf->dbuf.phys & 0xffffffff); if (phys == le32_to_cpu(hbqe->bde.addrLow)) { - len += snprintf(buf+len, size-len, + len += scnprintf(buf+len, size-len, "Buf%d: %p %06x\n", i, hbq_buf->dbuf.virt, hbq_buf->tag); found = 1; @@ -367,7 +369,7 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size) i++; } if (!found) { - len += snprintf(buf+len, size-len, "No DMAinfo?\n"); + len += scnprintf(buf+len, size-len, "No DMAinfo?\n"); } skipit: hbqe++; @@ -413,7 +415,7 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size) off = 0; spin_lock_irq(&phba->hbalock); - len += snprintf(buf+len, size-len, "HBA SLIM\n"); + len += scnprintf(buf+len, size-len, "HBA SLIM\n"); lpfc_memcpy_from_slim(buffer, phba->MBslimaddr + lpfc_debugfs_last_hba_slim_off, 1024); @@ -427,7 +429,7 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size) i = 1024; while (i > 0) { - len += snprintf(buf+len, size-len, + len += scnprintf(buf+len, size-len, "%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n", off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4), *(ptr+5), *(ptr+6), *(ptr+7)); @@ -471,11 +473,11 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size) off = 0; spin_lock_irq(&phba->hbalock); - len += snprintf(buf+len, size-len, "SLIM Mailbox\n"); + len += scnprintf(buf+len, size-len, "SLIM Mailbox\n"); ptr = (uint32_t *)phba->slim2p.virt; i = sizeof(MAILBOX_t); while (i > 0) { - len += snprintf(buf+len, size-len, + len += scnprintf(buf+len, size-len, "%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n", off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4), *(ptr+5), *(ptr+6), *(ptr+7)); @@ -484,11 +486,11 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size) off += (8 * sizeof(uint32_t)); } - len += snprintf(buf+len, size-len, "SLIM PCB\n"); + len += scnprintf(buf+len, size-len, "SLIM PCB\n"); ptr = (uint32_t *)phba->pcb; i = sizeof(PCB_t); while (i > 0) { - len += snprintf(buf+len, size-len, + len += scnprintf(buf+len, size-len, "%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n", off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4), *(ptr+5), *(ptr+6), *(ptr+7)); @@ -501,7 +503,7 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size) for (i = 0; i < 4; i++) { pgpp = &phba->port_gp[i]; pring = &psli->sli3_ring[i]; - len += snprintf(buf+len, size-len, + len += scnprintf(buf+len, size-len, "Ring %d: CMD GetInx:%d " "(Max:%d Next:%d " "Local:%d flg:x%x) " @@ -518,7 +520,7 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size) word1 = readl(phba->CAregaddr); word2 = readl(phba->HSregaddr); word3 = readl(phba->HCregaddr); - len += snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x " + len += scnprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x " "HC:%08x\n", word0, word1, word2, word3); } spin_unlock_irq(&phba->hbalock); @@ -557,12 +559,12 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE); outio = 0; - len += snprintf(buf+len, size-len, "\nFCP Nodelist Entries ...\n"); + len += scnprintf(buf+len, size-len, "\nFCP Nodelist Entries ...\n"); spin_lock_irq(shost->host_lock); list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { iocnt = 0; if (!cnt) { - len += snprintf(buf+len, size-len, + len += scnprintf(buf+len, size-len, "Missing Nodelist Entries\n"); break; } @@ -600,62 +602,62 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) default: statep = "UNKNOWN"; } - len += snprintf(buf+len, size-len, "%s DID:x%06x ", + len += scnprintf(buf+len, size-len, "%s DID:x%06x ", statep, ndlp->nlp_DID); - len += snprintf(buf+len, size-len, + len += scnprintf(buf+len, size-len, "WWPN x%llx ", wwn_to_u64(ndlp->nlp_portname.u.wwn)); - len += snprintf(buf+len, size-len, + len += scnprintf(buf+len, size-len, "WWNN x%llx ", wwn_to_u64(ndlp->nlp_nodename.u.wwn)); if (ndlp->nlp_flag & NLP_RPI_REGISTERED) - len += snprintf(buf+len, size-len, "RPI:%03d ", + len += scnprintf(buf+len, size-len, "RPI:%03d ", ndlp->nlp_rpi); else - len += snprintf(buf+len, size-len, "RPI:none "); - len += snprintf(buf+len, size-len, "flag:x%08x ", + len += scnprintf(buf+len, size-len, "RPI:none "); + len += scnprintf(buf+len, size-len, "flag:x%08x ", ndlp->nlp_flag); if (!ndlp->nlp_type) - len += snprintf(buf+len, size-len, "UNKNOWN_TYPE "); + len += scnprintf(buf+len, size-len, "UNKNOWN_TYPE "); if (ndlp->nlp_type & NLP_FC_NODE) - len += snprintf(buf+len, size-len, "FC_NODE "); + len += scnprintf(buf+len, size-len, "FC_NODE "); if (ndlp->nlp_type & NLP_FABRIC) { - len += snprintf(buf+len, size-len, "FABRIC "); + len += scnprintf(buf+len, size-len, "FABRIC "); iocnt = 0; } if (ndlp->nlp_type & NLP_FCP_TARGET) - len += snprintf(buf+len, size-len, "FCP_TGT sid:%d ", + len += scnprintf(buf+len, size-len, "FCP_TGT sid:%d ", ndlp->nlp_sid); if (ndlp->nlp_type & NLP_FCP_INITIATOR) - len += snprintf(buf+len, size-len, "FCP_INITIATOR "); + len += scnprintf(buf+len, size-len, "FCP_INITIATOR "); if (ndlp->nlp_type & NLP_NVME_TARGET) - len += snprintf(buf + len, + len += scnprintf(buf + len, size - len, "NVME_TGT sid:%d ", NLP_NO_SID); if (ndlp->nlp_type & NLP_NVME_INITIATOR) - len += snprintf(buf + len, + len += scnprintf(buf + len, size - len, "NVME_INITIATOR "); - len += snprintf(buf+len, size-len, "usgmap:%x ", + len += scnprintf(buf+len, size-len, "usgmap:%x ", ndlp->nlp_usg_map); - len += snprintf(buf+len, size-len, "refcnt:%x", + len += scnprintf(buf+len, size-len, "refcnt:%x", kref_read(&ndlp->kref)); if (iocnt) { i = atomic_read(&ndlp->cmd_pending); - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, " OutIO:x%x Qdepth x%x", i, ndlp->cmd_qdepth); outio += i; } - len += snprintf(buf+len, size-len, "\n"); + len += scnprintf(buf+len, size-len, "\n"); } spin_unlock_irq(shost->host_lock); - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "\nOutstanding IO x%x\n", outio); if (phba->nvmet_support && phba->targetport && (vport == phba->pport)) { tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "\nNVME Targetport Entry ...\n"); /* Port state is only one of two values for now. */ @@ -663,18 +665,18 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) statep = "REGISTERED"; else statep = "INIT"; - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "TGT WWNN x%llx WWPN x%llx State %s\n", wwn_to_u64(vport->fc_nodename.u.wwn), wwn_to_u64(vport->fc_portname.u.wwn), statep); - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, " Targetport DID x%06x\n", phba->targetport->port_id); goto out_exit; } - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "\nNVME Lport/Rport Entries ...\n"); localport = vport->localport; @@ -689,17 +691,19 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) else statep = "UNKNOWN "; - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "Lport DID x%06x PortState %s\n", localport->port_id, statep); - len += snprintf(buf + len, size - len, "\tRport List:\n"); + len += scnprintf(buf + len, size - len, "\tRport List:\n"); list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { /* local short-hand pointer. */ spin_lock(&phba->hbalock); rport = lpfc_ndlp_get_nrport(ndlp); if (rport) nrport = rport->remoteport; + else + nrport = NULL; spin_unlock(&phba->hbalock); if (!nrport) continue; @@ -718,32 +722,32 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) } /* Tab in to show lport ownership. */ - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "\t%s Port ID:x%06x ", statep, nrport->port_id); - len += snprintf(buf + len, size - len, "WWPN x%llx ", + len += scnprintf(buf + len, size - len, "WWPN x%llx ", nrport->port_name); - len += snprintf(buf + len, size - len, "WWNN x%llx ", + len += scnprintf(buf + len, size - len, "WWNN x%llx ", nrport->node_name); /* An NVME rport can have multiple roles. */ if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "INITIATOR "); if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "TARGET "); if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "DISCSRVC "); if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR | FC_PORT_ROLE_NVME_TARGET | FC_PORT_ROLE_NVME_DISCOVERY)) - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "UNKNOWN ROLE x%x", nrport->port_role); /* Terminate the string. */ - len += snprintf(buf + len, size - len, "\n"); + len += scnprintf(buf + len, size - len, "\n"); } spin_unlock_irq(shost->host_lock); @@ -782,35 +786,35 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) if (!phba->targetport) return len; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "\nNVME Targetport Statistics\n"); - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "LS: Rcv %08x Drop %08x Abort %08x\n", atomic_read(&tgtp->rcv_ls_req_in), atomic_read(&tgtp->rcv_ls_req_drop), atomic_read(&tgtp->xmt_ls_abort)); if (atomic_read(&tgtp->rcv_ls_req_in) != atomic_read(&tgtp->rcv_ls_req_out)) { - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "Rcv LS: in %08x != out %08x\n", atomic_read(&tgtp->rcv_ls_req_in), atomic_read(&tgtp->rcv_ls_req_out)); } - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "LS: Xmt %08x Drop %08x Cmpl %08x\n", atomic_read(&tgtp->xmt_ls_rsp), atomic_read(&tgtp->xmt_ls_drop), atomic_read(&tgtp->xmt_ls_rsp_cmpl)); - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "LS: RSP Abort %08x xb %08x Err %08x\n", atomic_read(&tgtp->xmt_ls_rsp_aborted), atomic_read(&tgtp->xmt_ls_rsp_xb_set), atomic_read(&tgtp->xmt_ls_rsp_error)); - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "FCP: Rcv %08x Defer %08x Release %08x " "Drop %08x\n", atomic_read(&tgtp->rcv_fcp_cmd_in), @@ -820,13 +824,13 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) if (atomic_read(&tgtp->rcv_fcp_cmd_in) != atomic_read(&tgtp->rcv_fcp_cmd_out)) { - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "Rcv FCP: in %08x != out %08x\n", atomic_read(&tgtp->rcv_fcp_cmd_in), atomic_read(&tgtp->rcv_fcp_cmd_out)); } - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "FCP Rsp: read %08x readrsp %08x " "write %08x rsp %08x\n", atomic_read(&tgtp->xmt_fcp_read), @@ -834,31 +838,31 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) atomic_read(&tgtp->xmt_fcp_write), atomic_read(&tgtp->xmt_fcp_rsp)); - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "FCP Rsp Cmpl: %08x err %08x drop %08x\n", atomic_read(&tgtp->xmt_fcp_rsp_cmpl), atomic_read(&tgtp->xmt_fcp_rsp_error), atomic_read(&tgtp->xmt_fcp_rsp_drop)); - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "FCP Rsp Abort: %08x xb %08x xricqe %08x\n", atomic_read(&tgtp->xmt_fcp_rsp_aborted), atomic_read(&tgtp->xmt_fcp_rsp_xb_set), atomic_read(&tgtp->xmt_fcp_xri_abort_cqe)); - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "ABORT: Xmt %08x Cmpl %08x\n", atomic_read(&tgtp->xmt_fcp_abort), atomic_read(&tgtp->xmt_fcp_abort_cmpl)); - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x", atomic_read(&tgtp->xmt_abort_sol), atomic_read(&tgtp->xmt_abort_unsol), atomic_read(&tgtp->xmt_abort_rsp), atomic_read(&tgtp->xmt_abort_rsp_error)); - len += snprintf(buf + len, size - len, "\n"); + len += scnprintf(buf + len, size - len, "\n"); cnt = 0; spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); @@ -869,7 +873,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) } spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); if (cnt) { - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "ABORT: %d ctx entries\n", cnt); spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); list_for_each_entry_safe(ctxp, next_ctxp, @@ -877,7 +881,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) list) { if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ)) break; - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "Entry: oxid %x state %x " "flag %x\n", ctxp->oxid, ctxp->state, @@ -891,7 +895,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) tot += atomic_read(&tgtp->xmt_fcp_release); tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot; - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "IO_CTX: %08x WAIT: cur %08x tot %08x\n" "CTX Outstanding %08llx\n", phba->sli4_hba.nvmet_xri_cnt, @@ -909,10 +913,10 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) if (!lport) return len; - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "\nNVME Lport Statistics\n"); - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "LS: Xmt %016x Cmpl %016x\n", atomic_read(&lport->fc4NvmeLsRequests), atomic_read(&lport->fc4NvmeLsCmpls)); @@ -936,20 +940,20 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) if (i >= 32) continue; - len += snprintf(buf + len, PAGE_SIZE - len, + len += scnprintf(buf + len, PAGE_SIZE - len, "FCP (%d): Rd %016llx Wr %016llx " "IO %016llx ", i, data1, data2, data3); - len += snprintf(buf + len, PAGE_SIZE - len, + len += scnprintf(buf + len, PAGE_SIZE - len, "Cmpl %016llx OutIO %016llx\n", tot, ((data1 + data2 + data3) - tot)); } - len += snprintf(buf + len, PAGE_SIZE - len, + len += scnprintf(buf + len, PAGE_SIZE - len, "Total FCP Cmpl %016llx Issue %016llx " "OutIO %016llx\n", totin, totout, totout - totin); - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "LS Xmt Err: Abrt %08x Err %08x " "Cmpl Err: xb %08x Err %08x\n", atomic_read(&lport->xmt_ls_abort), @@ -957,7 +961,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) atomic_read(&lport->cmpl_ls_xb), atomic_read(&lport->cmpl_ls_err)); - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "FCP Xmt Err: noxri %06x nondlp %06x " "qdepth %06x wqerr %06x err %06x Abrt %06x\n", atomic_read(&lport->xmt_fcp_noxri), @@ -967,7 +971,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) atomic_read(&lport->xmt_fcp_err), atomic_read(&lport->xmt_fcp_abort)); - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "FCP Cmpl Err: xb %08x Err %08x\n", atomic_read(&lport->cmpl_fcp_xb), atomic_read(&lport->cmpl_fcp_err)); @@ -999,58 +1003,58 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size) if (phba->nvmet_support == 0) { /* NVME Initiator */ - len += snprintf(buf + len, PAGE_SIZE - len, + len += scnprintf(buf + len, PAGE_SIZE - len, "ktime %s: Total Samples: %lld\n", (phba->ktime_on ? "Enabled" : "Disabled"), phba->ktime_data_samples); if (phba->ktime_data_samples == 0) return len; - len += snprintf( + len += scnprintf( buf + len, PAGE_SIZE - len, "Segment 1: Last NVME Cmd cmpl " "done -to- Start of next NVME cnd (in driver)\n"); - len += snprintf( + len += scnprintf( buf + len, PAGE_SIZE - len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg1_total, phba->ktime_data_samples), phba->ktime_seg1_min, phba->ktime_seg1_max); - len += snprintf( + len += scnprintf( buf + len, PAGE_SIZE - len, "Segment 2: Driver start of NVME cmd " "-to- Firmware WQ doorbell\n"); - len += snprintf( + len += scnprintf( buf + len, PAGE_SIZE - len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg2_total, phba->ktime_data_samples), phba->ktime_seg2_min, phba->ktime_seg2_max); - len += snprintf( + len += scnprintf( buf + len, PAGE_SIZE - len, "Segment 3: Firmware WQ doorbell -to- " "MSI-X ISR cmpl\n"); - len += snprintf( + len += scnprintf( buf + len, PAGE_SIZE - len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg3_total, phba->ktime_data_samples), phba->ktime_seg3_min, phba->ktime_seg3_max); - len += snprintf( + len += scnprintf( buf + len, PAGE_SIZE - len, "Segment 4: MSI-X ISR cmpl -to- " "NVME cmpl done\n"); - len += snprintf( + len += scnprintf( buf + len, PAGE_SIZE - len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg4_total, phba->ktime_data_samples), phba->ktime_seg4_min, phba->ktime_seg4_max); - len += snprintf( + len += scnprintf( buf + len, PAGE_SIZE - len, "Total IO avg time: %08lld\n", div_u64(phba->ktime_seg1_total + @@ -1062,7 +1066,7 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size) } /* NVME Target */ - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, "ktime %s: Total Samples: %lld %lld\n", (phba->ktime_on ? "Enabled" : "Disabled"), phba->ktime_data_samples, @@ -1070,46 +1074,46 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size) if (phba->ktime_data_samples == 0) return len; - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, "Segment 1: MSI-X ISR Rcv cmd -to- " "cmd pass to NVME Layer\n"); - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg1_total, phba->ktime_data_samples), phba->ktime_seg1_min, phba->ktime_seg1_max); - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, "Segment 2: cmd pass to NVME Layer- " "-to- Driver rcv cmd OP (action)\n"); - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg2_total, phba->ktime_data_samples), phba->ktime_seg2_min, phba->ktime_seg2_max); - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, "Segment 3: Driver rcv cmd OP -to- " "Firmware WQ doorbell: cmd\n"); - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg3_total, phba->ktime_data_samples), phba->ktime_seg3_min, phba->ktime_seg3_max); - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, "Segment 4: Firmware WQ doorbell: cmd " "-to- MSI-X ISR for cmd cmpl\n"); - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg4_total, phba->ktime_data_samples), phba->ktime_seg4_min, phba->ktime_seg4_max); - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, "Segment 5: MSI-X ISR for cmd cmpl " "-to- NVME layer passed cmd done\n"); - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg5_total, phba->ktime_data_samples), @@ -1117,10 +1121,10 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size) phba->ktime_seg5_max); if (phba->ktime_status_samples == 0) { - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, "Total: cmd received by MSI-X ISR " "-to- cmd completed on wire\n"); - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, "avg:%08lld min:%08lld " "max %08lld\n", div_u64(phba->ktime_seg10_total, @@ -1130,46 +1134,46 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size) return len; } - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, "Segment 6: NVME layer passed cmd done " "-to- Driver rcv rsp status OP\n"); - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg6_total, phba->ktime_status_samples), phba->ktime_seg6_min, phba->ktime_seg6_max); - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, "Segment 7: Driver rcv rsp status OP " "-to- Firmware WQ doorbell: status\n"); - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg7_total, phba->ktime_status_samples), phba->ktime_seg7_min, phba->ktime_seg7_max); - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, "Segment 8: Firmware WQ doorbell: status" " -to- MSI-X ISR for status cmpl\n"); - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg8_total, phba->ktime_status_samples), phba->ktime_seg8_min, phba->ktime_seg8_max); - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, "Segment 9: MSI-X ISR for status cmpl " "-to- NVME layer passed status done\n"); - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg9_total, phba->ktime_status_samples), phba->ktime_seg9_min, phba->ktime_seg9_max); - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, "Total: cmd received by MSI-X ISR -to- " "cmd completed on wire\n"); - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, "avg:%08lld min:%08lld max %08lld\n", div_u64(phba->ktime_seg10_total, phba->ktime_status_samples), @@ -1204,7 +1208,7 @@ lpfc_debugfs_nvmeio_trc_data(struct lpfc_hba *phba, char *buf, int size) (phba->nvmeio_trc_size - 1); skip = phba->nvmeio_trc_output_idx; - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "%s IO Trace %s: next_idx %d skip %d size %d\n", (phba->nvmet_support ? "NVME" : "NVMET"), (state ? "Enabled" : "Disabled"), @@ -1226,18 +1230,18 @@ lpfc_debugfs_nvmeio_trc_data(struct lpfc_hba *phba, char *buf, int size) if (!dtp->fmt) continue; - len += snprintf(buf + len, size - len, dtp->fmt, + len += scnprintf(buf + len, size - len, dtp->fmt, dtp->data1, dtp->data2, dtp->data3); if (phba->nvmeio_trc_output_idx >= phba->nvmeio_trc_size) { phba->nvmeio_trc_output_idx = 0; - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "Trace Complete\n"); goto out; } if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ)) { - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "Trace Continue (%d of %d)\n", phba->nvmeio_trc_output_idx, phba->nvmeio_trc_size); @@ -1255,18 +1259,18 @@ lpfc_debugfs_nvmeio_trc_data(struct lpfc_hba *phba, char *buf, int size) if (!dtp->fmt) continue; - len += snprintf(buf + len, size - len, dtp->fmt, + len += scnprintf(buf + len, size - len, dtp->fmt, dtp->data1, dtp->data2, dtp->data3); if (phba->nvmeio_trc_output_idx >= phba->nvmeio_trc_size) { phba->nvmeio_trc_output_idx = 0; - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "Trace Complete\n"); goto out; } if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ)) { - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "Trace Continue (%d of %d)\n", phba->nvmeio_trc_output_idx, phba->nvmeio_trc_size); @@ -1274,7 +1278,7 @@ lpfc_debugfs_nvmeio_trc_data(struct lpfc_hba *phba, char *buf, int size) } } - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "Trace Done\n"); out: return len; @@ -1306,39 +1310,39 @@ lpfc_debugfs_cpucheck_data(struct lpfc_vport *vport, char *buf, int size) if (phba->nvmet_support == 0) { /* NVME Initiator */ - len += snprintf(buf + len, PAGE_SIZE - len, + len += scnprintf(buf + len, PAGE_SIZE - len, "CPUcheck %s\n", (phba->cpucheck_on & LPFC_CHECK_NVME_IO ? "Enabled" : "Disabled")); for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { if (i >= LPFC_CHECK_CPU_CNT) break; - len += snprintf(buf + len, PAGE_SIZE - len, + len += scnprintf(buf + len, PAGE_SIZE - len, "%02d: xmit x%08x cmpl x%08x\n", i, phba->cpucheck_xmt_io[i], phba->cpucheck_cmpl_io[i]); tot_xmt += phba->cpucheck_xmt_io[i]; tot_cmpl += phba->cpucheck_cmpl_io[i]; } - len += snprintf(buf + len, PAGE_SIZE - len, + len += scnprintf(buf + len, PAGE_SIZE - len, "tot:xmit x%08x cmpl x%08x\n", tot_xmt, tot_cmpl); return len; } /* NVME Target */ - len += snprintf(buf + len, PAGE_SIZE - len, + len += scnprintf(buf + len, PAGE_SIZE - len, "CPUcheck %s ", (phba->cpucheck_on & LPFC_CHECK_NVMET_IO ? "IO Enabled - " : "IO Disabled - ")); - len += snprintf(buf + len, PAGE_SIZE - len, + len += scnprintf(buf + len, PAGE_SIZE - len, "%s\n", (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV ? "Rcv Enabled\n" : "Rcv Disabled\n")); for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { if (i >= LPFC_CHECK_CPU_CNT) break; - len += snprintf(buf + len, PAGE_SIZE - len, + len += scnprintf(buf + len, PAGE_SIZE - len, "%02d: xmit x%08x ccmpl x%08x " "cmpl x%08x rcv x%08x\n", i, phba->cpucheck_xmt_io[i], @@ -1350,7 +1354,7 @@ lpfc_debugfs_cpucheck_data(struct lpfc_vport *vport, char *buf, int size) tot_cmpl += phba->cpucheck_cmpl_io[i]; tot_ccmpl += phba->cpucheck_ccmpl_io[i]; } - len += snprintf(buf + len, PAGE_SIZE - len, + len += scnprintf(buf + len, PAGE_SIZE - len, "tot:xmit x%08x ccmpl x%08x cmpl x%08x rcv x%08x\n", tot_xmt, tot_ccmpl, tot_cmpl, tot_rcv); return len; @@ -1795,28 +1799,29 @@ lpfc_debugfs_dif_err_read(struct file *file, char __user *buf, int cnt = 0; if (dent == phba->debug_writeGuard) - cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wgrd_cnt); + cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wgrd_cnt); else if (dent == phba->debug_writeApp) - cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wapp_cnt); + cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wapp_cnt); else if (dent == phba->debug_writeRef) - cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wref_cnt); + cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wref_cnt); else if (dent == phba->debug_readGuard) - cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rgrd_cnt); + cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rgrd_cnt); else if (dent == phba->debug_readApp) - cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rapp_cnt); + cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rapp_cnt); else if (dent == phba->debug_readRef) - cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rref_cnt); + cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rref_cnt); else if (dent == phba->debug_InjErrNPortID) - cnt = snprintf(cbuf, 32, "0x%06x\n", phba->lpfc_injerr_nportid); + cnt = scnprintf(cbuf, 32, "0x%06x\n", + phba->lpfc_injerr_nportid); else if (dent == phba->debug_InjErrWWPN) { memcpy(&tmp, &phba->lpfc_injerr_wwpn, sizeof(struct lpfc_name)); tmp = cpu_to_be64(tmp); - cnt = snprintf(cbuf, 32, "0x%016llx\n", tmp); + cnt = scnprintf(cbuf, 32, "0x%016llx\n", tmp); } else if (dent == phba->debug_InjErrLBA) { if (phba->lpfc_injerr_lba == (sector_t)(-1)) - cnt = snprintf(cbuf, 32, "off\n"); + cnt = scnprintf(cbuf, 32, "off\n"); else - cnt = snprintf(cbuf, 32, "0x%llx\n", + cnt = scnprintf(cbuf, 32, "0x%llx\n", (uint64_t) phba->lpfc_injerr_lba); } else lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -2622,17 +2627,17 @@ lpfc_idiag_pcicfg_read(struct file *file, char __user *buf, size_t nbytes, switch (count) { case SIZE_U8: /* byte (8 bits) */ pci_read_config_byte(pdev, where, &u8val); - len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, "%03x: %02x\n", where, u8val); break; case SIZE_U16: /* word (16 bits) */ pci_read_config_word(pdev, where, &u16val); - len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, "%03x: %04x\n", where, u16val); break; case SIZE_U32: /* double word (32 bits) */ pci_read_config_dword(pdev, where, &u32val); - len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, "%03x: %08x\n", where, u32val); break; case LPFC_PCI_CFG_BROWSE: /* browse all */ @@ -2652,25 +2657,25 @@ lpfc_idiag_pcicfg_read(struct file *file, char __user *buf, size_t nbytes, offset = offset_label; /* Read PCI config space */ - len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, "%03x: ", offset_label); while (index > 0) { pci_read_config_dword(pdev, offset, &u32val); - len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, "%08x ", u32val); offset += sizeof(uint32_t); if (offset >= LPFC_PCI_CFG_SIZE) { - len += snprintf(pbuffer+len, + len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, "\n"); break; } index -= sizeof(uint32_t); if (!index) - len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, "\n"); else if (!(index % (8 * sizeof(uint32_t)))) { offset_label += (8 * sizeof(uint32_t)); - len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, "\n%03x: ", offset_label); } } @@ -2941,7 +2946,7 @@ lpfc_idiag_baracc_read(struct file *file, char __user *buf, size_t nbytes, if (acc_range == SINGLE_WORD) { offset_run = offset; u32val = readl(mem_mapped_bar + offset_run); - len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len, "%05x: %08x\n", offset_run, u32val); } else goto baracc_browse; @@ -2955,35 +2960,35 @@ lpfc_idiag_baracc_read(struct file *file, char __user *buf, size_t nbytes, offset_run = offset_label; /* Read PCI bar memory mapped space */ - len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len, "%05x: ", offset_label); index = LPFC_PCI_BAR_RD_SIZE; while (index > 0) { u32val = readl(mem_mapped_bar + offset_run); - len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len, "%08x ", u32val); offset_run += sizeof(uint32_t); if (acc_range == LPFC_PCI_BAR_BROWSE) { if (offset_run >= bar_size) { - len += snprintf(pbuffer+len, + len += scnprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n"); break; } } else { if (offset_run >= offset + (acc_range * sizeof(uint32_t))) { - len += snprintf(pbuffer+len, + len += scnprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n"); break; } } index -= sizeof(uint32_t); if (!index) - len += snprintf(pbuffer+len, + len += scnprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n"); else if (!(index % (8 * sizeof(uint32_t)))) { offset_label += (8 * sizeof(uint32_t)); - len += snprintf(pbuffer+len, + len += scnprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n%05x: ", offset_label); } @@ -3156,19 +3161,19 @@ __lpfc_idiag_print_wq(struct lpfc_queue *qp, char *wqtype, if (!qp) return len; - len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\t\t%s WQ info: ", wqtype); - len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "AssocCQID[%04d]: WQ-STAT[oflow:x%x posted:x%llx]\n", qp->assoc_qid, qp->q_cnt_1, (unsigned long long)qp->q_cnt_4); - len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\t\tWQID[%02d], QE-CNT[%04d], QE-SZ[%04d], " "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]", qp->queue_id, qp->entry_count, qp->entry_size, qp->host_index, qp->hba_index, qp->entry_repost); - len += snprintf(pbuffer + len, + len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); return len; } @@ -3206,21 +3211,21 @@ __lpfc_idiag_print_cq(struct lpfc_queue *qp, char *cqtype, if (!qp) return len; - len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\t%s CQ info: ", cqtype); - len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "AssocEQID[%02d]: CQ STAT[max:x%x relw:x%x " "xabt:x%x wq:x%llx]\n", qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); - len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\tCQID[%02d], QE-CNT[%04d], QE-SZ[%04d], " "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]", qp->queue_id, qp->entry_count, qp->entry_size, qp->host_index, qp->hba_index, qp->entry_repost); - len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); + len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); return len; } @@ -3232,19 +3237,19 @@ __lpfc_idiag_print_rqpair(struct lpfc_queue *qp, struct lpfc_queue *datqp, if (!qp || !datqp) return len; - len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\t\t%s RQ info: ", rqtype); - len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "AssocCQID[%02d]: RQ-STAT[nopost:x%x nobuf:x%x " "posted:x%x rcv:x%llx]\n", qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); - len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\t\tHQID[%02d], QE-CNT[%04d], QE-SZ[%04d], " "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]\n", qp->queue_id, qp->entry_count, qp->entry_size, qp->host_index, qp->hba_index, qp->entry_repost); - len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\t\tDQID[%02d], QE-CNT[%04d], QE-SZ[%04d], " "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]\n", datqp->queue_id, datqp->entry_count, @@ -3329,17 +3334,17 @@ __lpfc_idiag_print_eq(struct lpfc_queue *qp, char *eqtype, if (!qp) return len; - len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n%s EQ info: EQ-STAT[max:x%x noE:x%x " "cqe_proc:x%x eqe_proc:x%llx eqd %d]\n", eqtype, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3, (unsigned long long)qp->q_cnt_4, qp->q_mode); - len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "EQID[%02d], QE-CNT[%04d], QE-SZ[%04d], " "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]", qp->queue_id, qp->entry_count, qp->entry_size, qp->host_index, qp->hba_index, qp->entry_repost); - len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); + len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); return len; } @@ -3397,7 +3402,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes, if (phba->cfg_fof == 0) phba->lpfc_idiag_last_eq = 0; - len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, + len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "EQ %d out of %d HBA EQs\n", x, phba->io_channel_irqs); @@ -3510,7 +3515,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes, return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); too_big: - len += snprintf(pbuffer + len, + len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "Truncated ...\n"); out: spin_unlock_irq(&phba->hbalock); @@ -3566,22 +3571,22 @@ lpfc_idiag_queacc_read_qe(char *pbuffer, int len, struct lpfc_queue *pque, return 0; esize = pque->entry_size; - len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, "QE-INDEX[%04d]:\n", index); offset = 0; pentry = pque->qe[index].address; while (esize > 0) { - len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, "%08x ", *pentry); pentry++; offset += sizeof(uint32_t); esize -= sizeof(uint32_t); if (esize > 0 && !(offset % (4 * sizeof(uint32_t)))) - len += snprintf(pbuffer+len, + len += scnprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, "\n"); } - len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, "\n"); + len += scnprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, "\n"); return len; } @@ -3987,27 +3992,27 @@ lpfc_idiag_drbacc_read_reg(struct lpfc_hba *phba, char *pbuffer, switch (drbregid) { case LPFC_DRB_EQ: - len += snprintf(pbuffer + len, LPFC_DRB_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer + len, LPFC_DRB_ACC_BUF_SIZE-len, "EQ-DRB-REG: 0x%08x\n", readl(phba->sli4_hba.EQDBregaddr)); break; case LPFC_DRB_CQ: - len += snprintf(pbuffer + len, LPFC_DRB_ACC_BUF_SIZE - len, + len += scnprintf(pbuffer + len, LPFC_DRB_ACC_BUF_SIZE - len, "CQ-DRB-REG: 0x%08x\n", readl(phba->sli4_hba.CQDBregaddr)); break; case LPFC_DRB_MQ: - len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len, "MQ-DRB-REG: 0x%08x\n", readl(phba->sli4_hba.MQDBregaddr)); break; case LPFC_DRB_WQ: - len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len, "WQ-DRB-REG: 0x%08x\n", readl(phba->sli4_hba.WQDBregaddr)); break; case LPFC_DRB_RQ: - len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len, "RQ-DRB-REG: 0x%08x\n", readl(phba->sli4_hba.RQDBregaddr)); break; @@ -4197,37 +4202,37 @@ lpfc_idiag_ctlacc_read_reg(struct lpfc_hba *phba, char *pbuffer, switch (ctlregid) { case LPFC_CTL_PORT_SEM: - len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len, "Port SemReg: 0x%08x\n", readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_SEM_OFFSET)); break; case LPFC_CTL_PORT_STA: - len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len, "Port StaReg: 0x%08x\n", readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_STA_OFFSET)); break; case LPFC_CTL_PORT_CTL: - len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len, "Port CtlReg: 0x%08x\n", readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_CTL_OFFSET)); break; case LPFC_CTL_PORT_ER1: - len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len, "Port Er1Reg: 0x%08x\n", readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_ER1_OFFSET)); break; case LPFC_CTL_PORT_ER2: - len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len, "Port Er2Reg: 0x%08x\n", readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PORT_ER2_OFFSET)); break; case LPFC_CTL_PDEV_CTL: - len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len, "PDev CtlReg: 0x%08x\n", readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET)); @@ -4420,13 +4425,13 @@ lpfc_idiag_mbxacc_get_setup(struct lpfc_hba *phba, char *pbuffer) mbx_dump_cnt = idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX]; mbx_word_cnt = idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX]; - len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len, "mbx_dump_map: 0x%08x\n", mbx_dump_map); - len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len, "mbx_dump_cnt: %04d\n", mbx_dump_cnt); - len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len, "mbx_word_cnt: %04d\n", mbx_word_cnt); - len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len, "mbx_mbox_cmd: 0x%02x\n", mbx_mbox_cmd); return len; @@ -4575,35 +4580,35 @@ lpfc_idiag_extacc_avail_get(struct lpfc_hba *phba, char *pbuffer, int len) { uint16_t ext_cnt, ext_size; - len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\nAvailable Extents Information:\n"); - len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\tPort Available VPI extents: "); lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VPI, &ext_cnt, &ext_size); - len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "Count %3d, Size %3d\n", ext_cnt, ext_size); - len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\tPort Available VFI extents: "); lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VFI, &ext_cnt, &ext_size); - len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "Count %3d, Size %3d\n", ext_cnt, ext_size); - len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\tPort Available RPI extents: "); lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_RPI, &ext_cnt, &ext_size); - len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "Count %3d, Size %3d\n", ext_cnt, ext_size); - len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\tPort Available XRI extents: "); lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_XRI, &ext_cnt, &ext_size); - len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "Count %3d, Size %3d\n", ext_cnt, ext_size); return len; @@ -4627,55 +4632,55 @@ lpfc_idiag_extacc_alloc_get(struct lpfc_hba *phba, char *pbuffer, int len) uint16_t ext_cnt, ext_size; int rc; - len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\nAllocated Extents Information:\n"); - len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\tHost Allocated VPI extents: "); rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_VPI, &ext_cnt, &ext_size); if (!rc) - len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "Port %d Extent %3d, Size %3d\n", phba->brd_no, ext_cnt, ext_size); else - len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "N/A\n"); - len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\tHost Allocated VFI extents: "); rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_VFI, &ext_cnt, &ext_size); if (!rc) - len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "Port %d Extent %3d, Size %3d\n", phba->brd_no, ext_cnt, ext_size); else - len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "N/A\n"); - len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\tHost Allocated RPI extents: "); rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_RPI, &ext_cnt, &ext_size); if (!rc) - len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "Port %d Extent %3d, Size %3d\n", phba->brd_no, ext_cnt, ext_size); else - len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "N/A\n"); - len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\tHost Allocated XRI extents: "); rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_XRI, &ext_cnt, &ext_size); if (!rc) - len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "Port %d Extent %3d, Size %3d\n", phba->brd_no, ext_cnt, ext_size); else - len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "N/A\n"); return len; @@ -4699,49 +4704,49 @@ lpfc_idiag_extacc_drivr_get(struct lpfc_hba *phba, char *pbuffer, int len) struct lpfc_rsrc_blks *rsrc_blks; int index; - len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\nDriver Extents Information:\n"); - len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\tVPI extents:\n"); index = 0; list_for_each_entry(rsrc_blks, &phba->lpfc_vpi_blk_list, list) { - len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\t\tBlock %3d: Start %4d, Count %4d\n", index, rsrc_blks->rsrc_start, rsrc_blks->rsrc_size); index++; } - len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\tVFI extents:\n"); index = 0; list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_vfi_blk_list, list) { - len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\t\tBlock %3d: Start %4d, Count %4d\n", index, rsrc_blks->rsrc_start, rsrc_blks->rsrc_size); index++; } - len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\tRPI extents:\n"); index = 0; list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_rpi_blk_list, list) { - len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\t\tBlock %3d: Start %4d, Count %4d\n", index, rsrc_blks->rsrc_start, rsrc_blks->rsrc_size); index++; } - len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\tXRI extents:\n"); index = 0; list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_xri_blk_list, list) { - len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, + len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len, "\t\tBlock %3d: Start %4d, Count %4d\n", index, rsrc_blks->rsrc_start, rsrc_blks->rsrc_size); @@ -5135,11 +5140,11 @@ lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *phba, enum nemb_type nemb_tp, if (i != 0) pr_err("%s\n", line_buf); len = 0; - len += snprintf(line_buf+len, + len += scnprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len, "%03d: ", i); } - len += snprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len, + len += scnprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len, "%08x ", (uint32_t)*pword); pword++; } @@ -5202,11 +5207,11 @@ lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *phba, MAILBOX_t *pmbox) pr_err("%s\n", line_buf); len = 0; memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ); - len += snprintf(line_buf+len, + len += scnprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len, "%03d: ", i); } - len += snprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len, + len += scnprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len, "%08x ", ((uint32_t)*pword) & 0xffffffff); pword++; @@ -5225,18 +5230,18 @@ lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *phba, MAILBOX_t *pmbox) pr_err("%s\n", line_buf); len = 0; memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ); - len += snprintf(line_buf+len, + len += scnprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len, "%03d: ", i); } for (j = 0; j < 4; j++) { - len += snprintf(line_buf+len, + len += scnprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len, "%02x", ((uint8_t)*pbyte) & 0xff); pbyte++; } - len += snprintf(line_buf+len, + len += scnprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len, " "); } if ((i - 1) % 8) diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h index 30efc7bf91bd93d97c5ccc4938622fc56bd991b7..824de3e410ca21d34ec715e01ded1272632de8f0 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.h +++ b/drivers/scsi/lpfc/lpfc_debugfs.h @@ -342,7 +342,7 @@ lpfc_debug_dump_qe(struct lpfc_queue *q, uint32_t idx) pword = q->qe[idx].address; len = 0; - len += snprintf(line_buf+len, LPFC_LBUF_SZ-len, "QE[%04d]: ", idx); + len += scnprintf(line_buf+len, LPFC_LBUF_SZ-len, "QE[%04d]: ", idx); if (qe_word_cnt > 8) printk(KERN_ERR "%s\n", line_buf); @@ -353,11 +353,11 @@ lpfc_debug_dump_qe(struct lpfc_queue *q, uint32_t idx) if (qe_word_cnt > 8) { len = 0; memset(line_buf, 0, LPFC_LBUF_SZ); - len += snprintf(line_buf+len, LPFC_LBUF_SZ-len, + len += scnprintf(line_buf+len, LPFC_LBUF_SZ-len, "%03d: ", i); } } - len += snprintf(line_buf+len, LPFC_LBUF_SZ-len, "%08x ", + len += scnprintf(line_buf+len, LPFC_LBUF_SZ-len, "%08x ", ((uint32_t)*pword) & 0xffffffff); pword++; } diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 4dda969e947cc33036e81de54b3f6a7c2277f4f9..a6eae7c31bf316e1d2c04cc6a2494841449f4ab1 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -242,6 +242,8 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, icmd->ulpCommand = CMD_ELS_REQUEST64_CR; if (elscmd == ELS_CMD_FLOGI) icmd->ulpTimeout = FF_DEF_RATOV * 2; + else if (elscmd == ELS_CMD_LOGO) + icmd->ulpTimeout = phba->fc_ratov; else icmd->ulpTimeout = phba->fc_ratov * 2; } else { @@ -1055,9 +1057,9 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto flogifail; lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, - "0150 FLOGI failure Status:x%x/x%x TMO:x%x\n", + "0150 FLOGI failure Status:x%x/x%x xri x%x TMO:x%x\n", irsp->ulpStatus, irsp->un.ulpWord[4], - irsp->ulpTimeout); + cmdiocb->sli4_xritag, irsp->ulpTimeout); /* FLOGI failed, so there is no fabric */ spin_lock_irq(shost->host_lock); @@ -1111,7 +1113,8 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* FLOGI completes successfully */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0101 FLOGI completes successfully, I/O tag:x%x, " - "Data: x%x x%x x%x x%x x%x x%x\n", cmdiocb->iotag, + "xri x%x Data: x%x x%x x%x x%x x%x %x\n", + cmdiocb->iotag, cmdiocb->sli4_xritag, irsp->un.ulpWord[4], sp->cmn.e_d_tov, sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution, vport->port_state, vport->fc_flag); @@ -1155,6 +1158,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, phba->fcf.fcf_flag &= ~FCF_DISCOVERY; phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); spin_unlock_irq(&phba->hbalock); + phba->fcf.fcf_redisc_attempted = 0; /* reset */ goto out; } if (!rc) { @@ -1169,6 +1173,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, phba->fcf.fcf_flag &= ~FCF_DISCOVERY; phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); spin_unlock_irq(&phba->hbalock); + phba->fcf.fcf_redisc_attempted = 0; /* reset */ goto out; } } @@ -1338,6 +1343,8 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba) Fabric_DID); pring = lpfc_phba_elsring(phba); + if (unlikely(!pring)) + return -EIO; /* * Check the txcmplq for an iocb that matches the nport the driver is @@ -1551,8 +1558,10 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, */ new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); + /* return immediately if the WWPN matches ndlp */ if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp)) return ndlp; + if (phba->sli_rev == LPFC_SLI_REV4) { active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool, GFP_KERNEL); @@ -1561,9 +1570,13 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, phba->cfg_rrq_xri_bitmap_sz); } - lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "3178 PLOGI confirm: ndlp %p x%x: new_ndlp %p\n", - ndlp, ndlp->nlp_DID, new_ndlp); + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, + "3178 PLOGI confirm: ndlp x%x x%x x%x: " + "new_ndlp x%x x%x x%x\n", + ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_fc4_type, + (new_ndlp ? new_ndlp->nlp_DID : 0), + (new_ndlp ? new_ndlp->nlp_flag : 0), + (new_ndlp ? new_ndlp->nlp_fc4_type : 0)); if (!new_ndlp) { rc = memcmp(&ndlp->nlp_portname, name, @@ -1612,6 +1625,14 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, phba->cfg_rrq_xri_bitmap_sz); } + /* At this point in this routine, we know new_ndlp will be + * returned. however, any previous GID_FTs that were done + * would have updated nlp_fc4_type in ndlp, so we must ensure + * new_ndlp has the right value. + */ + if (vport->fc_flag & FC_FABRIC) + new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type; + lpfc_unreg_rpi(vport, new_ndlp); new_ndlp->nlp_DID = ndlp->nlp_DID; new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; @@ -1661,7 +1682,6 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, if (ndlp->nrport) { ndlp->nrport = NULL; lpfc_nlp_put(ndlp); - new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type; } /* We shall actually free the ndlp with both nlp_DID and @@ -1735,6 +1755,12 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, active_rrqs_xri_bitmap) mempool_free(active_rrqs_xri_bitmap, phba->active_rrq_pool); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, + "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n", + new_ndlp->nlp_DID, new_ndlp->nlp_flag, + new_ndlp->nlp_fc4_type); + return new_ndlp; } @@ -2682,16 +2708,15 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto out; } + /* The LOGO will not be retried on failure. A LOGO was + * issued to the remote rport and a ACC or RJT or no Answer are + * all acceptable. Note the failure and move forward with + * discovery. The PLOGI will retry. + */ if (irsp->ulpStatus) { - /* Check for retry */ - if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { - /* ELS command is being retried */ - skip_recovery = 1; - goto out; - } /* LOGO failed */ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, - "2756 LOGO failure DID:%06X Status:x%x/x%x\n", + "2756 LOGO failure, No Retry DID:%06X Status:x%x/x%x\n", ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4]); /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ @@ -2737,7 +2762,8 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * For any other port type, the rpi is unregistered as an implicit * LOGO. */ - if ((ndlp->nlp_type & NLP_FCP_TARGET) && (skip_recovery == 0)) { + if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) && + skip_recovery == 0) { lpfc_cancel_retry_delay_tmo(vport, ndlp); spin_lock_irqsave(shost->host_lock, flags); ndlp->nlp_flag |= NLP_NPR_2B_DISC; @@ -2770,6 +2796,8 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * will be stored into the context1 field of the IOCB for the completion * callback function to the LOGO ELS command. * + * Callers of this routine are expected to unregister the RPI first + * * Return code * 0 - successfully issued logo * 1 - failed to issue logo @@ -2811,22 +2839,6 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, "Issue LOGO: did:x%x", ndlp->nlp_DID, 0, 0); - /* - * If we are issuing a LOGO, we may try to recover the remote NPort - * by issuing a PLOGI later. Even though we issue ELS cmds by the - * VPI, if we have a valid RPI, and that RPI gets unreg'ed while - * that ELS command is in-flight, the HBA returns a IOERR_INVALID_RPI - * for that ELS cmd. To avoid this situation, lets get rid of the - * RPI right now, before any ELS cmds are sent. - */ - spin_lock_irq(shost->host_lock); - ndlp->nlp_flag |= NLP_ISSUE_LOGO; - spin_unlock_irq(shost->host_lock); - if (lpfc_unreg_rpi(vport, ndlp)) { - lpfc_els_free_iocb(phba, elsiocb); - return 0; - } - phba->fc_stat.elsXmitLOGO++; elsiocb->iocb_cmpl = lpfc_cmpl_els_logo; spin_lock_irq(shost->host_lock); @@ -2834,7 +2846,6 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; spin_unlock_irq(shost->host_lock); rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); - if (rc == IOCB_ERROR) { spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_LOGO_SND; @@ -2842,6 +2853,11 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_els_free_iocb(phba, elsiocb); return 1; } + + spin_lock_irq(shost->host_lock); + ndlp->nlp_prev_state = ndlp->nlp_state; + spin_unlock_irq(shost->host_lock); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); return 0; } @@ -3157,23 +3173,23 @@ lpfc_els_retry_delay(struct timer_list *t) unsigned long flags; struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; + /* Hold a node reference for outstanding queued work */ + if (!lpfc_nlp_get(ndlp)) + return; + spin_lock_irqsave(&phba->hbalock, flags); if (!list_empty(&evtp->evt_listp)) { spin_unlock_irqrestore(&phba->hbalock, flags); + lpfc_nlp_put(ndlp); return; } - /* We need to hold the node by incrementing the reference - * count until the queued work is done - */ - evtp->evt_arg1 = lpfc_nlp_get(ndlp); - if (evtp->evt_arg1) { - evtp->evt = LPFC_EVT_ELS_RETRY; - list_add_tail(&evtp->evt_listp, &phba->work_list); - lpfc_worker_wake_up(phba); - } + evtp->evt_arg1 = ndlp; + evtp->evt = LPFC_EVT_ELS_RETRY; + list_add_tail(&evtp->evt_listp, &phba->work_list); spin_unlock_irqrestore(&phba->hbalock, flags); - return; + + lpfc_worker_wake_up(phba); } /** @@ -4094,7 +4110,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, mempool_free(mbox, phba->mbox_mem_pool); } out: - if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { + if (ndlp && NLP_CHK_NODE_ACT(ndlp) && shost) { spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI); spin_unlock_irq(shost->host_lock); @@ -4272,14 +4288,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, default: return 1; } - /* Xmit ELS ACC response tag */ - lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "0128 Xmit ELS ACC response tag x%x, XRI: x%x, " - "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x " - "fc_flag x%x\n", - elsiocb->iotag, elsiocb->iocb.ulpContext, - ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, - ndlp->nlp_rpi, vport->fc_flag); if (ndlp->nlp_flag & NLP_LOGO_ACC) { spin_lock_irq(shost->host_lock); if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED || @@ -4448,6 +4456,15 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, lpfc_els_free_iocb(phba, elsiocb); return 1; } + + /* Xmit ELS ACC response tag */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " + "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " + "RPI: x%x, fc_flag x%x\n", + rc, elsiocb->iotag, elsiocb->sli4_xritag, + ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, + ndlp->nlp_rpi, vport->fc_flag); return 0; } @@ -5542,7 +5559,7 @@ lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, struct ls_rjt stat; if (phba->sli_rev < LPFC_SLI_REV4 || - bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != + bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < LPFC_SLI_INTF_IF_TYPE_2) { rjt_err = LSRJT_UNABLE_TPC; rjt_expl = LSEXP_REQ_UNSUPPORTED; @@ -5701,6 +5718,9 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) + stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; + elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; phba->fc_stat.elsXmitLSRJT++; rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); @@ -6455,6 +6475,11 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, port_state = vport->port_state; vport->fc_flag |= FC_PT2PT; vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); + + /* Acking an unsol FLOGI. Count 1 for link bounce + * work-around. + */ + vport->rcv_flogi_cnt++; spin_unlock_irq(shost->host_lock); lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "3311 Rcv Flogi PS x%x new PS x%x " @@ -7099,7 +7124,10 @@ int lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq) { struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport, - rrq->nlp_DID); + rrq->nlp_DID); + if (!ndlp) + return 1; + if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag)) return lpfc_issue_els_rrq(rrq->vport, ndlp, rrq->nlp_DID, rrq); @@ -7849,8 +7877,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct ls_rjt stat; uint32_t *payload; uint32_t cmd, did, newnode; - uint8_t rjt_exp, rjt_err = 0; + uint8_t rjt_exp, rjt_err = 0, init_link = 0; IOCB_t *icmd = &elsiocb->iocb; + LPFC_MBOXQ_t *mbox; if (!vport || !(elsiocb->context2)) goto dropit; @@ -7999,6 +8028,19 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvFLOGI++; + + /* If the driver believes fabric discovery is done and is ready, + * bounce the link. There is some descrepancy. + */ + if (vport->port_state >= LPFC_LOCAL_CFG_LINK && + vport->fc_flag & FC_PT2PT && + vport->rcv_flogi_cnt >= 1) { + rjt_err = LSRJT_LOGICAL_BSY; + rjt_exp = LSEXP_NOTHING_MORE; + init_link++; + goto lsrjt; + } + lpfc_els_rcv_flogi(vport, elsiocb, ndlp); if (newnode) lpfc_nlp_put(ndlp); @@ -8227,6 +8269,27 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, lpfc_nlp_put(elsiocb->context1); elsiocb->context1 = NULL; + + /* Special case. Driver received an unsolicited command that + * unsupportable given the driver's current state. Reset the + * link and start over. + */ + if (init_link) { + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return; + lpfc_linkdown(phba); + lpfc_init_link(phba, mbox, + phba->cfg_topology, + phba->cfg_link_speed); + mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + mbox->vport = vport; + if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == + MBX_NOT_FINISHED) + mempool_free(mbox, phba->mbox_mem_pool); + } + return; dropit: @@ -9502,7 +9565,8 @@ lpfc_sli_abts_recover_port(struct lpfc_vport *vport, "rport in state 0x%x\n", ndlp->nlp_state); return; } - lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, + LOG_ELS | LOG_FCP_ERROR | LOG_NVME_IOERR, "3094 Start rport recovery on shost id 0x%x " "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " "flags 0x%x\n", @@ -9515,8 +9579,8 @@ lpfc_sli_abts_recover_port(struct lpfc_vport *vport, */ spin_lock_irqsave(shost->host_lock, flags); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; + ndlp->nlp_flag |= NLP_ISSUE_LOGO; spin_unlock_irqrestore(shost->host_lock, flags); - lpfc_issue_els_logo(vport, ndlp, 0); - lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); + lpfc_unreg_rpi(vport, ndlp); } diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index eb71877f12f8b712394d92150d7a35dc02a28702..4949f4870c8a08a0d31b02838c15167d1349a1c3 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -183,7 +183,9 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) if (evtp->evt_arg1) { evtp->evt = LPFC_EVT_DEV_LOSS; list_add_tail(&evtp->evt_listp, &phba->work_list); + spin_unlock_irq(&phba->hbalock); lpfc_worker_wake_up(phba); + return; } spin_unlock_irq(&phba->hbalock); @@ -921,7 +923,11 @@ lpfc_linkdown(struct lpfc_hba *phba) } } lpfc_destroy_vport_work_array(phba, vports); - /* Clean up any firmware default rpi's */ + + /* Clean up any SLI3 firmware default rpi's */ + if (phba->sli_rev > LPFC_SLI_REV3) + goto skip_unreg_did; + mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mb) { lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb); @@ -933,6 +939,7 @@ lpfc_linkdown(struct lpfc_hba *phba) } } + skip_unreg_did: /* Setup myDID for link up if we are in pt2pt mode */ if (phba->pport->fc_flag & FC_PT2PT) { mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); @@ -947,6 +954,7 @@ lpfc_linkdown(struct lpfc_hba *phba) } spin_lock_irq(shost->host_lock); phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI); + phba->pport->rcv_flogi_cnt = 0; spin_unlock_irq(shost->host_lock); } return 0; @@ -1018,6 +1026,7 @@ lpfc_linkup(struct lpfc_hba *phba) { struct lpfc_vport **vports; int i; + struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); phba->link_state = LPFC_LINK_UP; @@ -1031,6 +1040,13 @@ lpfc_linkup(struct lpfc_hba *phba) lpfc_linkup_port(vports[i]); lpfc_destroy_vport_work_array(phba, vports); + /* Clear the pport flogi counter in case the link down was + * absorbed without an ACQE. No lock here - in worker thread + * and discovery is synchronized. + */ + spin_lock_irq(shost->host_lock); + phba->pport->rcv_flogi_cnt = 0; + spin_unlock_irq(shost->host_lock); return 0; } @@ -1992,6 +2008,26 @@ int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index) "failover and change port state:x%x/x%x\n", phba->pport->port_state, LPFC_VPORT_UNKNOWN); phba->pport->port_state = LPFC_VPORT_UNKNOWN; + + if (!phba->fcf.fcf_redisc_attempted) { + lpfc_unregister_fcf(phba); + + rc = lpfc_sli4_redisc_fcf_table(phba); + if (!rc) { + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "3195 Rediscover FCF table\n"); + phba->fcf.fcf_redisc_attempted = 1; + lpfc_sli4_clear_fcf_rr_bmask(phba); + } else { + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, + "3196 Rediscover FCF table " + "failed. Status:x%x\n", rc); + } + } else { + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, + "3197 Already rediscover FCF table " + "attempted. No more retry\n"); + } goto stop_flogi_current_fcf; } else { lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS, @@ -4193,7 +4229,7 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (new_state == NLP_STE_MAPPED_NODE || new_state == NLP_STE_UNMAPPED_NODE) { - if (ndlp->nlp_fc4_type & NLP_FC4_FCP || + if (ndlp->nlp_fc4_type || ndlp->nlp_DID == Fabric_DID || ndlp->nlp_DID == NameServer_DID || ndlp->nlp_DID == FDMI_DID) { @@ -4746,7 +4782,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) if (phba->sli_rev == LPFC_SLI_REV4 && (!(vport->load_flag & FC_UNLOADING)) && (bf_get(lpfc_sli_intf_if_type, - &phba->sli4_hba.sli_intf) == + &phba->sli4_hba.sli_intf) >= LPFC_SLI_INTF_IF_TYPE_2) && (kref_read(&ndlp->kref) > 0)) { mbox->context1 = lpfc_nlp_get(ndlp); @@ -4855,6 +4891,10 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport) LPFC_MBOXQ_t *mbox; int rc; + /* Unreg DID is an SLI3 operation. */ + if (phba->sli_rev > LPFC_SLI_REV3) + return; + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mbox) { lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS, @@ -5193,9 +5233,14 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) /* If we've already received a PLOGI from this NPort * we don't need to try to discover it again. */ - if (ndlp->nlp_flag & NLP_RCV_PLOGI) + if (ndlp->nlp_flag & NLP_RCV_PLOGI && + !(ndlp->nlp_type & + (NLP_FCP_TARGET | NLP_NVME_TARGET))) return NULL; + ndlp->nlp_prev_state = ndlp->nlp_state; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; spin_unlock_irq(shost->host_lock); @@ -6400,7 +6445,9 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba) if (rc) return; /* Reset HBA FCF states after successful unregister FCF */ + spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag = 0; + spin_unlock_irq(&phba->hbalock); phba->fcf.current_rec.flag = 0; /* diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index f3cae733ae2dffea29e37ff562853c2260f9b681..57510a831735bc4d30069fee23f2d33930c08657 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -167,7 +167,11 @@ lpfc_config_port_prep(struct lpfc_hba *phba) sizeof(phba->wwpn)); } - phba->sli3_options = 0x0; + /* + * Clear all option bits except LPFC_SLI3_BG_ENABLED, + * which was already set in lpfc_get_cfgparam() + */ + phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED; /* Setup and issue mailbox READ REV command */ lpfc_read_rev(phba, pmb); @@ -1797,7 +1801,12 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, lpfc_offline(phba); /* release interrupt for possible resource change */ lpfc_sli4_disable_intr(phba); - lpfc_sli_brdrestart(phba); + rc = lpfc_sli_brdrestart(phba); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6309 Failed to restart board\n"); + return rc; + } /* request and enable interrupt */ intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); if (intr_mode == LPFC_INTR_ERROR) { @@ -5040,7 +5049,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, break; } /* If fast FCF failover rescan event is pending, do nothing */ - if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { + if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) { spin_unlock_irq(&phba->hbalock); break; } @@ -7757,6 +7766,9 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); phba->sli4_hba.max_cfg_param.max_vpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); + /* Limit the max we support */ + if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS) + phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS; phba->sli4_hba.max_cfg_param.vpi_base = bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); phba->sli4_hba.max_cfg_param.max_rpi = diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index deb094fdbb793ecc21b09989a8144ca0f6a0322b..e6bf5e8bc76700eb00531e53ba7136f9a5633b22 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -513,9 +513,9 @@ lpfc_init_link(struct lpfc_hba * phba, break; } - if (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC && - mb->un.varInitLnk.link_flags & FLAGS_TOPOLOGY_MODE_LOOP) { - /* Failover is not tried for Lancer G6 */ + if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC || + phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) && + mb->un.varInitLnk.link_flags & FLAGS_TOPOLOGY_MODE_LOOP) { mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT; phba->cfg_topology = FLAGS_TOPOLOGY_MODE_PT_PT; } diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index bd9bce9d9974aedbcc6c2f373a0b30d241cddb0f..22e336cb2284785601d71ba190846080bf93bf47 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -483,8 +483,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * single discovery thread, this will cause a huge delay in * discovery. Also this will cause multiple state machines * running in parallel for this node. + * This only applies to a fabric environment. */ - if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) { + if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) && + (vport->fc_flag & FC_FABRIC)) { /* software abort outstanding PLOGI */ lpfc_els_abort(phba, ndlp); } @@ -599,8 +601,10 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* Save the ELS cmd */ elsiocb->drvrTimeout = cmd; - lpfc_sli4_resume_rpi(ndlp, - lpfc_mbx_cmpl_resume_rpi, elsiocb); + if (lpfc_sli4_resume_rpi(ndlp, + lpfc_mbx_cmpl_resume_rpi, + elsiocb)) + kfree(elsiocb); goto out; } } @@ -836,22 +840,27 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) struct Scsi_Host *shost = lpfc_shost_from_vport(vport); if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) { + spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_NPR_ADISC; + spin_unlock_irq(shost->host_lock); return 0; } if (!(vport->fc_flag & FC_PT2PT)) { /* Check config parameter use-adisc or FCP-2 */ - if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) || + if (vport->cfg_use_adisc && ((vport->fc_flag & FC_RSCN_MODE) || ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) && - (ndlp->nlp_type & NLP_FCP_TARGET))) { + (ndlp->nlp_type & NLP_FCP_TARGET)))) { spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_ADISC; spin_unlock_irq(shost->host_lock); return 1; } } + + spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_NPR_ADISC; + spin_unlock_irq(shost->host_lock); lpfc_unreg_rpi(vport, ndlp); return 0; } @@ -2318,6 +2327,7 @@ lpfc_device_recov_unmap_node(struct lpfc_vport *vport, lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); + ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); spin_unlock_irq(shost->host_lock); lpfc_disc_set_adisc(vport, ndlp); @@ -2395,6 +2405,7 @@ lpfc_device_recov_mapped_node(struct lpfc_vport *vport, lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); + ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); spin_unlock_irq(shost->host_lock); lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; @@ -2652,6 +2663,7 @@ lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_cancel_retry_delay_tmo(vport, ndlp); spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); + ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); spin_unlock_irq(shost->host_lock); return ndlp->nlp_state; } @@ -2860,8 +2872,9 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* DSM in event on NPort in state */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0211 DSM in event x%x on NPort x%x in " - "state %d Data: x%x\n", - evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag); + "state %d Data: x%x x%x\n", + evt, ndlp->nlp_DID, cur_state, + ndlp->nlp_flag, ndlp->nlp_fc4_type); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, "DSM in: evt:%d ste:%d did:x%x", diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 918ae18ef8a8273f9385e2eb872947bac8a7275c..6c355d87c709d19074e850f2c6eaad0d4bc6298d 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -282,7 +282,7 @@ lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport, vport = lport->vport; lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, - "6001 ENTER. lpfc_pnvme %p, qidx x%xi qhandle %p\n", + "6001 ENTER. lpfc_pnvme %p, qidx x%x qhandle %p\n", lport, qidx, handle); kfree(handle); } @@ -297,7 +297,8 @@ lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport) lport); /* release any threads waiting for the unreg to complete */ - complete(&lport->lport_unreg_done); + if (lport->vport->localport) + complete(lport->lport_unreg_cmp); } /* lpfc_nvme_remoteport_delete @@ -1855,7 +1856,6 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG); /* word 7 */ - bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0); bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com, nvmereq_wqe->iocb.ulpClass); @@ -1870,7 +1870,6 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, abts_buf->iotag); /* word 10 */ - bf_set(wqe_wqid, &abts_wqe->abort_cmd.wqe_com, nvmereq_wqe->hba_wqidx); bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1); bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); @@ -1904,6 +1903,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, /* Declare and initialization an instance of the FC NVME template. */ static struct nvme_fc_port_template lpfc_nvme_template = { + .module = THIS_MODULE, + /* initiator-based functions */ .localport_delete = lpfc_nvme_localport_delete, .remoteport_delete = lpfc_nvme_remoteport_delete, @@ -2476,6 +2477,9 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport) lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel; + if (!IS_ENABLED(CONFIG_NVME_FC)) + return ret; + cstat = kmalloc((sizeof(struct lpfc_nvme_ctrl_stat) * phba->cfg_nvme_io_channel), GFP_KERNEL); if (!cstat) @@ -2484,12 +2488,9 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport) /* localport is allocated from the stack, but the registration * call allocates heap memory as well as the private area. */ -#if (IS_ENABLED(CONFIG_NVME_FC)) + ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template, &vport->phba->pcidev->dev, &localport); -#else - ret = -ENOMEM; -#endif if (!ret) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC, "6005 Successfully registered local " @@ -2556,7 +2557,8 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport) */ void lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, - struct lpfc_nvme_lport *lport) + struct lpfc_nvme_lport *lport, + struct completion *lport_unreg_cmp) { #if (IS_ENABLED(CONFIG_NVME_FC)) u32 wait_tmo; @@ -2568,8 +2570,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, */ wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000); while (true) { - ret = wait_for_completion_timeout(&lport->lport_unreg_done, - wait_tmo); + ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo); if (unlikely(!ret)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, "6176 Lport %p Localport %p wait " @@ -2603,12 +2604,12 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport) struct lpfc_nvme_lport *lport; struct lpfc_nvme_ctrl_stat *cstat; int ret; + DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp); if (vport->nvmei_support == 0) return; localport = vport->localport; - vport->localport = NULL; lport = (struct lpfc_nvme_lport *)localport->private; cstat = lport->cstat; @@ -2619,13 +2620,14 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport) /* lport's rport list is clear. Unregister * lport and release resources. */ - init_completion(&lport->lport_unreg_done); + lport->lport_unreg_cmp = &lport_unreg_cmp; ret = nvme_fc_unregister_localport(localport); /* Wait for completion. This either blocks * indefinitely or succeeds */ - lpfc_nvme_lport_unreg_wait(vport, lport); + lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp); + vport->localport = NULL; kfree(cstat); /* Regardless of the unregister upcall response, clear diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h index cfd4719be25c3d3eb35ae4ae5ea7e56d357d0d21..b234d02989942ba65f0a87db0aab71c9c1b7cbb2 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.h +++ b/drivers/scsi/lpfc/lpfc_nvme.h @@ -50,7 +50,7 @@ struct lpfc_nvme_ctrl_stat { /* Declare nvme-based local and remote port definitions. */ struct lpfc_nvme_lport { struct lpfc_vport *vport; - struct completion lport_unreg_done; + struct completion *lport_unreg_cmp; /* Add stats counters here */ struct lpfc_nvme_ctrl_stat *cstat; atomic_t fc4NvmeLsRequests; diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index b766afe10d3d7205936c548eec62f3ea9d02b32d..768eba8c111d9867ddcca2875529a8d46935d10d 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -1003,7 +1003,8 @@ lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport) struct lpfc_nvmet_tgtport *tport = targetport->private; /* release any threads waiting for the unreg to complete */ - complete(&tport->tport_unreg_done); + if (tport->phba->targetport) + complete(tport->tport_unreg_cmp); } static void @@ -1339,15 +1340,14 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) idx = 0; } - infop = phba->sli4_hba.nvmet_ctx_info; - for (j = 0; j < phba->cfg_nvmet_mrq; j++) { - for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { + for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { + for (j = 0; j < phba->cfg_nvmet_mrq; j++) { + infop = lpfc_get_ctx_list(phba, i, j); lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, "6408 TOTAL NVMET ctx for CPU %d " "MRQ %d: cnt %d nextcpu %p\n", i, j, infop->nvmet_ctx_list_cnt, infop->nvmet_ctx_next_cpu); - infop++; } } return 0; @@ -1700,6 +1700,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) struct lpfc_nvmet_tgtport *tgtp; struct lpfc_queue *wq; uint32_t qidx; + DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp); if (phba->nvmet_support == 0) return; @@ -1709,9 +1710,13 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) wq = phba->sli4_hba.nvme_wq[qidx]; lpfc_nvmet_wqfull_flush(phba, wq, NULL); } - init_completion(&tgtp->tport_unreg_done); + tgtp->tport_unreg_cmp = &tport_unreg_cmp; nvmet_fc_unregister_targetport(phba->targetport); - wait_for_completion_timeout(&tgtp->tport_unreg_done, 5); + if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp, + msecs_to_jiffies(LPFC_NVMET_WAIT_TMO))) + lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + "6179 Unreg targetport %p timeout " + "reached.\n", phba->targetport); lpfc_nvmet_cleanup_io_context(phba); } phba->targetport = NULL; diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h index 1aaff63f1f419209b08cdfa0c6e7141a2018854d..3b170284a0e59b7376006a17140e712efe0a6d97 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.h +++ b/drivers/scsi/lpfc/lpfc_nvmet.h @@ -31,10 +31,12 @@ #define LPFC_NVMET_MRQ_AUTO 0 #define LPFC_NVMET_MRQ_MAX 16 +#define LPFC_NVMET_WAIT_TMO (5 * MSEC_PER_SEC) + /* Used for NVME Target */ struct lpfc_nvmet_tgtport { struct lpfc_hba *phba; - struct completion tport_unreg_done; + struct completion *tport_unreg_cmp; /* Stats counters - lpfc_nvmet_unsol_ls_buffer */ atomic_t rcv_ls_req_in; diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 5c7858e735c9e8236ca3d92b05fe378c784a4bc5..eea8cd5f961f8d739d7d47fbbac6ff8ed6fa03e1 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -2732,6 +2732,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; int prot_group_type = 0; int fcpdl; + struct lpfc_vport *vport = phba->pport; /* * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd @@ -2837,6 +2838,14 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, */ iocb_cmd->un.fcpi.fcpi_parm = fcpdl; + /* + * For First burst, we may need to adjust the initial transfer + * length for DIF + */ + if (iocb_cmd->un.fcpi.fcpi_XRdy && + (fcpdl < vport->cfg_first_burst_size)) + iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl; + return 0; err: if (lpfc_cmd->seg_cnt) @@ -3401,6 +3410,7 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; int prot_group_type = 0; int fcpdl; + struct lpfc_vport *vport = phba->pport; /* * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd @@ -3516,6 +3526,14 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, */ iocb_cmd->un.fcpi.fcpi_parm = fcpdl; + /* + * For First burst, we may need to adjust the initial transfer + * length for DIF + */ + if (iocb_cmd->un.fcpi.fcpi_XRdy && + (fcpdl < vport->cfg_first_burst_size)) + iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl; + /* * If the OAS driver feature is enabled and the lun is enabled for * OAS, set the oas iocb related flags. @@ -4087,7 +4105,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED || lpfc_cmd->result == IOERR_TX_DMA_FAILED) && pIocbOut->iocb.unsli3.sli3_bg.bgstat) { - if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { + if (!scsi_prot_op_normal(cmd)) { /* * This is a response for a BG enabled * cmd. Parse BG error @@ -4158,9 +4176,17 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, } lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); - spin_lock_irqsave(&phba->hbalock, flags); - lpfc_cmd->pCmd = NULL; - spin_unlock_irqrestore(&phba->hbalock, flags); + /* If pCmd was set to NULL from abort path, do not call scsi_done */ + if (xchg(&lpfc_cmd->pCmd, NULL) == NULL) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "5688 FCP cmd already NULL, sid: 0x%06x, " + "did: 0x%06x, oxid: 0x%04x\n", + vport->fc_myDID, + (pnode) ? pnode->nlp_DID : 0, + phba->sli_rev == LPFC_SLI_REV4 ? + lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff); + return; + } /* The sdev is not guaranteed to be valid post scsi_done upcall. */ cmd->scsi_done(cmd); @@ -4560,7 +4586,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) } ndlp = rdata->pnode; - if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) && + if (!scsi_prot_op_normal(cmnd) && (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) { lpfc_printf_log(phba, KERN_ERR, LOG_BG, @@ -4630,7 +4656,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) lpfc_cmd->start_time = jiffies; cmnd->host_scribble = (unsigned char *)lpfc_cmd; - if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { + if (!scsi_prot_op_normal(cmnd)) { if (vport->phba->cfg_enable_bg) { lpfc_printf_vlog(vport, KERN_INFO, LOG_SCSI_CMD, diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 9830bdb6e072625cd75e46d8b4b99845e739305d..89340a74d1b50b0131af266d9363946f9e049b02 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -392,11 +392,7 @@ lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q) struct lpfc_register doorbell; doorbell.word0 = 0; - bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); - bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); - bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, - (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); - bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); + bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id); writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); } @@ -1108,9 +1104,9 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, empty = list_empty(&phba->active_rrq_list); list_add_tail(&rrq->list, &phba->active_rrq_list); phba->hba_flag |= HBA_RRQ_ACTIVE; + spin_unlock_irqrestore(&phba->hbalock, iflags); if (empty) lpfc_worker_wake_up(phba); - spin_unlock_irqrestore(&phba->hbalock, iflags); return 0; out: spin_unlock_irqrestore(&phba->hbalock, iflags); @@ -3797,6 +3793,7 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf; struct lpfc_cq_event *cq_event; unsigned long iflag; + int count = 0; spin_lock_irqsave(&phba->hbalock, iflag); phba->hba_flag &= ~HBA_SP_QUEUE_EVT; @@ -3818,16 +3815,22 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, if (irspiocbq) lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq); + count++; break; case CQE_CODE_RECEIVE: case CQE_CODE_RECEIVE_V1: dmabuf = container_of(cq_event, struct hbq_dmabuf, cq_event); lpfc_sli4_handle_received_buffer(phba, dmabuf); + count++; break; default: break; } + + /* Limit the number of events to 64 to avoid soft lockups */ + if (count == 64) + break; } } @@ -4637,6 +4640,8 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; rc = lpfc_sli4_brdreset(phba); + if (rc) + return rc; spin_lock_irq(&phba->hbalock); phba->pport->stopped = 0; @@ -4962,7 +4967,6 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED | LPFC_SLI3_CRP_ENABLED | - LPFC_SLI3_BG_ENABLED | LPFC_SLI3_DSS_ENABLED); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -10835,10 +10839,12 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, if (cmdiocb->iocb_flag & LPFC_IO_FOF) abtsiocbp->iocb_flag |= LPFC_IO_FOF; - if (phba->link_state >= LPFC_LINK_UP) - iabt->ulpCommand = CMD_ABORT_XRI_CN; - else + if (phba->link_state < LPFC_LINK_UP || + (phba->sli_rev == LPFC_SLI_REV4 && + phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN)) iabt->ulpCommand = CMD_CLOSE_XRI_CN; + else + iabt->ulpCommand = CMD_ABORT_XRI_CN; abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl; abtsiocbp->vport = vport; @@ -10985,19 +10991,12 @@ lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, /* Complete prepping the abort wqe and issue to the FW. */ abts_wqe = &abtsiocbp->wqe; - bf_set(abort_cmd_ia, &abts_wqe->abort_cmd, 0); - bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG); - - /* Explicitly set reserved fields to zero.*/ - abts_wqe->abort_cmd.rsrvd4 = 0; - abts_wqe->abort_cmd.rsrvd5 = 0; - /* WQE Common - word 6. Context is XRI tag. Set 0. */ - bf_set(wqe_xri_tag, &abts_wqe->abort_cmd.wqe_com, 0); - bf_set(wqe_ctxt_tag, &abts_wqe->abort_cmd.wqe_com, 0); + /* Clear any stale WQE contents */ + memset(abts_wqe, 0, sizeof(union lpfc_wqe)); + bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG); /* word 7 */ - bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0); bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com, cmdiocb->iocb.ulpClass); @@ -11012,7 +11011,6 @@ lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, abtsiocbp->iotag); /* word 10 */ - bf_set(wqe_wqid, &abts_wqe->abort_cmd.wqe_com, cmdiocb->hba_wqidx); bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1); bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); @@ -12932,13 +12930,19 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; /* Setting active mailbox pointer need to be in sync to flag clear */ phba->sli.mbox_active = NULL; + if (bf_get(lpfc_trailer_consumed, mcqe)) + lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); spin_unlock_irqrestore(&phba->hbalock, iflags); /* Wake up worker thread to post the next pending mailbox command */ lpfc_worker_wake_up(phba); + return workposted; + out_no_mqe_complete: + spin_lock_irqsave(&phba->hbalock, iflags); if (bf_get(lpfc_trailer_consumed, mcqe)) lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); - return workposted; + spin_unlock_irqrestore(&phba->hbalock, iflags); + return false; } /** @@ -14215,7 +14219,8 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size, hw_page_size))/hw_page_size; /* If needed, Adjust page count to match the max the adapter supports */ - if (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt) + if (phba->sli4_hba.pc_sli4_params.wqpcnt && + (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt)) queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt; INIT_LIST_HEAD(&queue->list); @@ -17864,6 +17869,13 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) static void __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) { + /* + * if the rpi value indicates a prior unreg has already + * been done, skip the unreg. + */ + if (rpi == LPFC_RPI_ALLOC_ERROR) + return; + if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) { phba->sli4_hba.rpi_count--; phba->sli4_hba.max_cfg_param.rpi_used--; @@ -18428,15 +18440,8 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) goto initial_priority; lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, "2844 No roundrobin failover FCF available\n"); - if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) - return LPFC_FCOE_FCF_NEXT_NONE; - else { - lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, - "3063 Only FCF available idx %d, flag %x\n", - next_fcf_index, - phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag); - return next_fcf_index; - } + + return LPFC_FCOE_FCF_NEXT_NONE; } if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX && @@ -19156,6 +19161,7 @@ lpfc_drain_txq(struct lpfc_hba *phba) fail_msg, piocbq->iotag, piocbq->sli4_xritag); list_add_tail(&piocbq->list, &completions); + fail_msg = NULL; } spin_unlock_irqrestore(&pring->ring_lock, iflags); } diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 399c0015c54655787c3e31b4f545fc909580f4d4..3dcc6615a23b20e7595156a32b9ce18d7f0913c1 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -279,6 +279,7 @@ struct lpfc_fcf { #define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */ #define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */ #define FCF_REDISC_PROG (FCF_REDISC_PEND | FCF_REDISC_EVT) + uint16_t fcf_redisc_attempted; uint32_t addr_mode; uint32_t eligible_fcf_cnt; struct lpfc_fcf_rec current_rec; diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 1ff0f7de910584e11dc8f839a0d59751908903fc..0b60ae6570304cf276b84f1322df78587b2990dc 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -690,10 +690,6 @@ lpfc_vport_delete(struct fc_vport *fc_vport) ns_ndlp_referenced = true; } - /* Remove FC host and then SCSI host with the vport */ - fc_remove_host(shost); - scsi_remove_host(shost); - ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); /* In case of driver unload, we shall not perform fabric logo as the @@ -796,6 +792,10 @@ lpfc_vport_delete(struct fc_vport *fc_vport) skip_logo: + /* Remove FC host and then SCSI host with the vport */ + fc_remove_host(shost); + scsi_remove_host(shost); + /* * If the NameServer ndlp has been incremented to allow the DA_ID CT * command to be sent, decrement the ndlp now. diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c index eb551f3cc471ded0fe22118c35a6e922349f7f28..71879f2207e0e92fc9c686c26c89f14fab5ea701 100644 --- a/drivers/scsi/mac_esp.c +++ b/drivers/scsi/mac_esp.c @@ -427,6 +427,8 @@ static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count, scsi_esp_cmd(esp, ESP_CMD_TI); } } + + esp->send_cmd_residual = esp_count; } static int mac_esp_irq_pending(struct esp *esp) diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c index dd6057359d7c6e681544b9f7b2be5393b39c7f1a..b5050c2ede00d330a53553a2b99e7809ac2a0fef 100644 --- a/drivers/scsi/mac_scsi.c +++ b/drivers/scsi/mac_scsi.c @@ -3,6 +3,8 @@ * * Copyright 1998, Michael Schmitz * + * Copyright 2019 Finn Thain + * * derived in part from: */ /* @@ -11,6 +13,7 @@ * Copyright 1995, Russell King */ +#include #include #include #include @@ -52,7 +55,7 @@ static int setup_cmd_per_lun = -1; module_param(setup_cmd_per_lun, int, 0); static int setup_sg_tablesize = -1; module_param(setup_sg_tablesize, int, 0); -static int setup_use_pdma = -1; +static int setup_use_pdma = 512; module_param(setup_use_pdma, int, 0); static int setup_hostid = -1; module_param(setup_hostid, int, 0); @@ -89,101 +92,217 @@ static int __init mac_scsi_setup(char *str) __setup("mac5380=", mac_scsi_setup); #endif /* !MODULE */ -/* Pseudo DMA asm originally by Ove Edlund */ - -#define CP_IO_TO_MEM(s,d,n) \ -__asm__ __volatile__ \ - (" cmp.w #4,%2\n" \ - " bls 8f\n" \ - " move.w %1,%%d0\n" \ - " neg.b %%d0\n" \ - " and.w #3,%%d0\n" \ - " sub.w %%d0,%2\n" \ - " bra 2f\n" \ - " 1: move.b (%0),(%1)+\n" \ - " 2: dbf %%d0,1b\n" \ - " move.w %2,%%d0\n" \ - " lsr.w #5,%%d0\n" \ - " bra 4f\n" \ - " 3: move.l (%0),(%1)+\n" \ - "31: move.l (%0),(%1)+\n" \ - "32: move.l (%0),(%1)+\n" \ - "33: move.l (%0),(%1)+\n" \ - "34: move.l (%0),(%1)+\n" \ - "35: move.l (%0),(%1)+\n" \ - "36: move.l (%0),(%1)+\n" \ - "37: move.l (%0),(%1)+\n" \ - " 4: dbf %%d0,3b\n" \ - " move.w %2,%%d0\n" \ - " lsr.w #2,%%d0\n" \ - " and.w #7,%%d0\n" \ - " bra 6f\n" \ - " 5: move.l (%0),(%1)+\n" \ - " 6: dbf %%d0,5b\n" \ - " and.w #3,%2\n" \ - " bra 8f\n" \ - " 7: move.b (%0),(%1)+\n" \ - " 8: dbf %2,7b\n" \ - " moveq.l #0, %2\n" \ - " 9: \n" \ - ".section .fixup,\"ax\"\n" \ - " .even\n" \ - "91: moveq.l #1, %2\n" \ - " jra 9b\n" \ - "94: moveq.l #4, %2\n" \ - " jra 9b\n" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - " .align 4\n" \ - " .long 1b,91b\n" \ - " .long 3b,94b\n" \ - " .long 31b,94b\n" \ - " .long 32b,94b\n" \ - " .long 33b,94b\n" \ - " .long 34b,94b\n" \ - " .long 35b,94b\n" \ - " .long 36b,94b\n" \ - " .long 37b,94b\n" \ - " .long 5b,94b\n" \ - " .long 7b,91b\n" \ - ".previous" \ - : "=a"(s), "=a"(d), "=d"(n) \ - : "0"(s), "1"(d), "2"(n) \ - : "d0") +/* + * According to "Inside Macintosh: Devices", Mac OS requires disk drivers to + * specify the number of bytes between the delays expected from a SCSI target. + * This allows the operating system to "prevent bus errors when a target fails + * to deliver the next byte within the processor bus error timeout period." + * Linux SCSI drivers lack knowledge of the timing behaviour of SCSI targets + * so bus errors are unavoidable. + * + * If a MOVE.B instruction faults, we assume that zero bytes were transferred + * and simply retry. That assumption probably depends on target behaviour but + * seems to hold up okay. The NOP provides synchronization: without it the + * fault can sometimes occur after the program counter has moved past the + * offending instruction. Post-increment addressing can't be used. + */ + +#define MOVE_BYTE(operands) \ + asm volatile ( \ + "1: moveb " operands " \n" \ + "11: nop \n" \ + " addq #1,%0 \n" \ + " subq #1,%1 \n" \ + "40: \n" \ + " \n" \ + ".section .fixup,\"ax\" \n" \ + ".even \n" \ + "90: movel #1, %2 \n" \ + " jra 40b \n" \ + ".previous \n" \ + " \n" \ + ".section __ex_table,\"a\" \n" \ + ".align 4 \n" \ + ".long 1b,90b \n" \ + ".long 11b,90b \n" \ + ".previous \n" \ + : "+a" (addr), "+r" (n), "+r" (result) : "a" (io)) + +/* + * If a MOVE.W (or MOVE.L) instruction faults, it cannot be retried because + * the residual byte count would be uncertain. In that situation the MOVE_WORD + * macro clears n in the fixup section to abort the transfer. + */ + +#define MOVE_WORD(operands) \ + asm volatile ( \ + "1: movew " operands " \n" \ + "11: nop \n" \ + " subq #2,%1 \n" \ + "40: \n" \ + " \n" \ + ".section .fixup,\"ax\" \n" \ + ".even \n" \ + "90: movel #0, %1 \n" \ + " movel #2, %2 \n" \ + " jra 40b \n" \ + ".previous \n" \ + " \n" \ + ".section __ex_table,\"a\" \n" \ + ".align 4 \n" \ + ".long 1b,90b \n" \ + ".long 11b,90b \n" \ + ".previous \n" \ + : "+a" (addr), "+r" (n), "+r" (result) : "a" (io)) + +#define MOVE_16_WORDS(operands) \ + asm volatile ( \ + "1: movew " operands " \n" \ + "2: movew " operands " \n" \ + "3: movew " operands " \n" \ + "4: movew " operands " \n" \ + "5: movew " operands " \n" \ + "6: movew " operands " \n" \ + "7: movew " operands " \n" \ + "8: movew " operands " \n" \ + "9: movew " operands " \n" \ + "10: movew " operands " \n" \ + "11: movew " operands " \n" \ + "12: movew " operands " \n" \ + "13: movew " operands " \n" \ + "14: movew " operands " \n" \ + "15: movew " operands " \n" \ + "16: movew " operands " \n" \ + "17: nop \n" \ + " subl #32,%1 \n" \ + "40: \n" \ + " \n" \ + ".section .fixup,\"ax\" \n" \ + ".even \n" \ + "90: movel #0, %1 \n" \ + " movel #2, %2 \n" \ + " jra 40b \n" \ + ".previous \n" \ + " \n" \ + ".section __ex_table,\"a\" \n" \ + ".align 4 \n" \ + ".long 1b,90b \n" \ + ".long 2b,90b \n" \ + ".long 3b,90b \n" \ + ".long 4b,90b \n" \ + ".long 5b,90b \n" \ + ".long 6b,90b \n" \ + ".long 7b,90b \n" \ + ".long 8b,90b \n" \ + ".long 9b,90b \n" \ + ".long 10b,90b \n" \ + ".long 11b,90b \n" \ + ".long 12b,90b \n" \ + ".long 13b,90b \n" \ + ".long 14b,90b \n" \ + ".long 15b,90b \n" \ + ".long 16b,90b \n" \ + ".long 17b,90b \n" \ + ".previous \n" \ + : "+a" (addr), "+r" (n), "+r" (result) : "a" (io)) + +#define MAC_PDMA_DELAY 32 + +static inline int mac_pdma_recv(void __iomem *io, unsigned char *start, int n) +{ + unsigned char *addr = start; + int result = 0; + + if (n >= 1) { + MOVE_BYTE("%3@,%0@"); + if (result) + goto out; + } + if (n >= 1 && ((unsigned long)addr & 1)) { + MOVE_BYTE("%3@,%0@"); + if (result) + goto out; + } + while (n >= 32) + MOVE_16_WORDS("%3@,%0@+"); + while (n >= 2) + MOVE_WORD("%3@,%0@+"); + if (result) + return start - addr; /* Negated to indicate uncertain length */ + if (n == 1) + MOVE_BYTE("%3@,%0@"); +out: + return addr - start; +} + +static inline int mac_pdma_send(unsigned char *start, void __iomem *io, int n) +{ + unsigned char *addr = start; + int result = 0; + + if (n >= 1) { + MOVE_BYTE("%0@,%3@"); + if (result) + goto out; + } + if (n >= 1 && ((unsigned long)addr & 1)) { + MOVE_BYTE("%0@,%3@"); + if (result) + goto out; + } + while (n >= 32) + MOVE_16_WORDS("%0@+,%3@"); + while (n >= 2) + MOVE_WORD("%0@+,%3@"); + if (result) + return start - addr; /* Negated to indicate uncertain length */ + if (n == 1) + MOVE_BYTE("%0@,%3@"); +out: + return addr - start; +} static inline int macscsi_pread(struct NCR5380_hostdata *hostdata, unsigned char *dst, int len) { u8 __iomem *s = hostdata->pdma_io + (INPUT_DATA_REG << 4); unsigned char *d = dst; - int n = len; - int transferred; + + hostdata->pdma_residual = len; while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, BASR_DRQ | BASR_PHASE_MATCH, BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) { - CP_IO_TO_MEM(s, d, n); + int bytes; - transferred = d - dst - n; - hostdata->pdma_residual = len - transferred; + bytes = mac_pdma_recv(s, d, min(hostdata->pdma_residual, 512)); - /* No bus error. */ - if (n == 0) + if (bytes > 0) { + d += bytes; + hostdata->pdma_residual -= bytes; + } + + if (hostdata->pdma_residual == 0) return 0; - /* Target changed phase early? */ if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ, - BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0) - scmd_printk(KERN_ERR, hostdata->connected, + BUS_AND_STATUS_REG, BASR_ACK, + BASR_ACK, HZ / 64) < 0) + scmd_printk(KERN_DEBUG, hostdata->connected, "%s: !REQ and !ACK\n", __func__); if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) return 0; + if (bytes == 0) + udelay(MAC_PDMA_DELAY); + + if (bytes >= 0) + continue; + dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host, - "%s: bus error (%d/%d)\n", __func__, transferred, len); + "%s: bus error (%d/%d)\n", __func__, d - dst, len); NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); - d = dst + transferred; - n = len - transferred; + return -1; } scmd_printk(KERN_ERR, hostdata->connected, @@ -192,93 +311,27 @@ static inline int macscsi_pread(struct NCR5380_hostdata *hostdata, return -1; } - -#define CP_MEM_TO_IO(s,d,n) \ -__asm__ __volatile__ \ - (" cmp.w #4,%2\n" \ - " bls 8f\n" \ - " move.w %0,%%d0\n" \ - " neg.b %%d0\n" \ - " and.w #3,%%d0\n" \ - " sub.w %%d0,%2\n" \ - " bra 2f\n" \ - " 1: move.b (%0)+,(%1)\n" \ - " 2: dbf %%d0,1b\n" \ - " move.w %2,%%d0\n" \ - " lsr.w #5,%%d0\n" \ - " bra 4f\n" \ - " 3: move.l (%0)+,(%1)\n" \ - "31: move.l (%0)+,(%1)\n" \ - "32: move.l (%0)+,(%1)\n" \ - "33: move.l (%0)+,(%1)\n" \ - "34: move.l (%0)+,(%1)\n" \ - "35: move.l (%0)+,(%1)\n" \ - "36: move.l (%0)+,(%1)\n" \ - "37: move.l (%0)+,(%1)\n" \ - " 4: dbf %%d0,3b\n" \ - " move.w %2,%%d0\n" \ - " lsr.w #2,%%d0\n" \ - " and.w #7,%%d0\n" \ - " bra 6f\n" \ - " 5: move.l (%0)+,(%1)\n" \ - " 6: dbf %%d0,5b\n" \ - " and.w #3,%2\n" \ - " bra 8f\n" \ - " 7: move.b (%0)+,(%1)\n" \ - " 8: dbf %2,7b\n" \ - " moveq.l #0, %2\n" \ - " 9: \n" \ - ".section .fixup,\"ax\"\n" \ - " .even\n" \ - "91: moveq.l #1, %2\n" \ - " jra 9b\n" \ - "94: moveq.l #4, %2\n" \ - " jra 9b\n" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - " .align 4\n" \ - " .long 1b,91b\n" \ - " .long 3b,94b\n" \ - " .long 31b,94b\n" \ - " .long 32b,94b\n" \ - " .long 33b,94b\n" \ - " .long 34b,94b\n" \ - " .long 35b,94b\n" \ - " .long 36b,94b\n" \ - " .long 37b,94b\n" \ - " .long 5b,94b\n" \ - " .long 7b,91b\n" \ - ".previous" \ - : "=a"(s), "=a"(d), "=d"(n) \ - : "0"(s), "1"(d), "2"(n) \ - : "d0") - static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata, unsigned char *src, int len) { unsigned char *s = src; u8 __iomem *d = hostdata->pdma_io + (OUTPUT_DATA_REG << 4); - int n = len; - int transferred; + + hostdata->pdma_residual = len; while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, BASR_DRQ | BASR_PHASE_MATCH, BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) { - CP_MEM_TO_IO(s, d, n); + int bytes; - transferred = s - src - n; - hostdata->pdma_residual = len - transferred; + bytes = mac_pdma_send(s, d, min(hostdata->pdma_residual, 512)); - /* Target changed phase early? */ - if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ, - BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0) - scmd_printk(KERN_ERR, hostdata->connected, - "%s: !REQ and !ACK\n", __func__); - if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) - return 0; + if (bytes > 0) { + s += bytes; + hostdata->pdma_residual -= bytes; + } - /* No bus error. */ - if (n == 0) { + if (hostdata->pdma_residual == 0) { if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG, TCR_LAST_BYTE_SENT, TCR_LAST_BYTE_SENT, HZ / 64) < 0) @@ -287,17 +340,29 @@ static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata, return 0; } + if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ, + BUS_AND_STATUS_REG, BASR_ACK, + BASR_ACK, HZ / 64) < 0) + scmd_printk(KERN_DEBUG, hostdata->connected, + "%s: !REQ and !ACK\n", __func__); + if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) + return 0; + + if (bytes == 0) + udelay(MAC_PDMA_DELAY); + + if (bytes >= 0) + continue; + dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host, - "%s: bus error (%d/%d)\n", __func__, transferred, len); + "%s: bus error (%d/%d)\n", __func__, s - src, len); NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); - s = src + transferred; - n = len - transferred; + return -1; } scmd_printk(KERN_ERR, hostdata->connected, "%s: phase mismatch or !DRQ\n", __func__); NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); - return -1; } @@ -305,7 +370,7 @@ static int macscsi_dma_xfer_len(struct NCR5380_hostdata *hostdata, struct scsi_cmnd *cmd) { if (hostdata->flags & FLAG_NO_PSEUDO_DMA || - cmd->SCp.this_residual < 16) + cmd->SCp.this_residual < setup_use_pdma) return 0; return cmd->SCp.this_residual; @@ -364,7 +429,7 @@ static int __init mac_scsi_probe(struct platform_device *pdev) mac_scsi_template.can_queue = setup_can_queue; if (setup_cmd_per_lun > 0) mac_scsi_template.cmd_per_lun = setup_cmd_per_lun; - if (setup_sg_tablesize >= 0) + if (setup_sg_tablesize > 0) mac_scsi_template.sg_tablesize = setup_sg_tablesize; if (setup_hostid >= 0) mac_scsi_template.this_id = setup_hostid & 7; diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 8c7154143a4eb1f3db016cc4787b227ade95c531..a84878fbf45d23619e0747bf512acc05819747b4 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c @@ -4189,11 +4189,11 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) */ if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ && pdev->subsystem_device == 0xC000) - return -ENODEV; + goto out_disable_device; /* Now check the magic signature byte */ pci_read_config_word(pdev, PCI_CONF_AMISIG, &magic); if (magic != HBA_SIGNATURE_471 && magic != HBA_SIGNATURE) - return -ENODEV; + goto out_disable_device; /* Ok it is probably a megaraid */ } diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 67d356d84717631eb7f1c85726a6eff334e2da7f..2398e60c4c5fe351686b42b7a9381f1c9f3ebc5f 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -62,6 +62,10 @@ #define PCI_DEVICE_ID_LSI_TOMCAT 0x0017 #define PCI_DEVICE_ID_LSI_VENTURA_4PORT 0x001B #define PCI_DEVICE_ID_LSI_CRUSADER_4PORT 0x001C +#define PCI_DEVICE_ID_LSI_AERO_10E1 0x10e1 +#define PCI_DEVICE_ID_LSI_AERO_10E2 0x10e2 +#define PCI_DEVICE_ID_LSI_AERO_10E5 0x10e5 +#define PCI_DEVICE_ID_LSI_AERO_10E6 0x10e6 /* * Intel HBA SSDIDs diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 9aa9590c53739a1b6f8f3123b439edd8eb1ec3c2..b993b79ed38f6f5e7da48244b81a531fd14b3827 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -165,6 +165,10 @@ static struct pci_device_id megasas_pci_table[] = { {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)}, {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)}, {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)}, + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E1)}, + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E2)}, + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E5)}, + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E6)}, {} }; @@ -3025,6 +3029,7 @@ megasas_fw_crash_buffer_show(struct device *cdev, u32 size; unsigned long buff_addr; unsigned long dmachunk = CRASH_DMA_BUF_SIZE; + unsigned long chunk_left_bytes; unsigned long src_addr; unsigned long flags; u32 buff_offset; @@ -3050,6 +3055,8 @@ megasas_fw_crash_buffer_show(struct device *cdev, } size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset; + chunk_left_bytes = dmachunk - (buff_offset % dmachunk); + size = (size > chunk_left_bytes) ? chunk_left_bytes : size; size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size; src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] + @@ -3891,12 +3898,12 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr) /* * The cur_state should not last for more than max_wait secs */ - for (i = 0; i < (max_wait * 1000); i++) { + for (i = 0; i < max_wait; i++) { curr_abs_state = instance->instancet-> read_fw_status_reg(instance->reg_set); if (abs_state == curr_abs_state) { - msleep(1); + msleep(1000); } else break; } @@ -4155,6 +4162,7 @@ int megasas_alloc_cmds(struct megasas_instance *instance) if (megasas_create_frame_pool(instance)) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n"); megasas_free_cmds(instance); + return -ENOMEM; } return 0; @@ -5214,7 +5222,7 @@ static int megasas_init_fw(struct megasas_instance *instance) { u32 max_sectors_1; u32 max_sectors_2, tmp_sectors, msix_enable; - u32 scratch_pad_2, scratch_pad_3, scratch_pad_4; + u32 scratch_pad_2, scratch_pad_3, scratch_pad_4, status_reg; resource_size_t base_addr; struct megasas_register_set __iomem *reg_set; struct megasas_ctrl_info *ctrl_info = NULL; @@ -5222,6 +5230,7 @@ static int megasas_init_fw(struct megasas_instance *instance) int i, j, loop, fw_msix_count = 0; struct IOV_111 *iovPtr; struct fusion_context *fusion; + bool do_adp_reset = true; fusion = instance->ctrl_context; @@ -5270,19 +5279,29 @@ static int megasas_init_fw(struct megasas_instance *instance) } if (megasas_transition_to_ready(instance, 0)) { - atomic_set(&instance->fw_reset_no_pci_access, 1); - instance->instancet->adp_reset - (instance, instance->reg_set); - atomic_set(&instance->fw_reset_no_pci_access, 0); - dev_info(&instance->pdev->dev, - "FW restarted successfully from %s!\n", - __func__); + if (instance->adapter_type >= INVADER_SERIES) { + status_reg = instance->instancet->read_fw_status_reg( + instance->reg_set); + do_adp_reset = status_reg & MFI_RESET_ADAPTER; + } - /*waitting for about 30 second before retry*/ - ssleep(30); + if (do_adp_reset) { + atomic_set(&instance->fw_reset_no_pci_access, 1); + instance->instancet->adp_reset + (instance, instance->reg_set); + atomic_set(&instance->fw_reset_no_pci_access, 0); + dev_info(&instance->pdev->dev, + "FW restarted successfully from %s!\n", + __func__); - if (megasas_transition_to_ready(instance, 0)) + /*waiting for about 30 second before retry*/ + ssleep(30); + + if (megasas_transition_to_ready(instance, 0)) + goto fail_ready_state; + } else { goto fail_ready_state; + } } megasas_init_ctrl_params(instance); @@ -5321,12 +5340,29 @@ static int megasas_init_fw(struct megasas_instance *instance) instance->msix_vectors = (scratch_pad_2 & MR_MAX_REPLY_QUEUES_OFFSET) + 1; fw_msix_count = instance->msix_vectors; - } else { /* Invader series supports more than 8 MSI-x vectors*/ + } else { instance->msix_vectors = ((scratch_pad_2 & MR_MAX_REPLY_QUEUES_EXT_OFFSET) >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; - if (instance->msix_vectors > 16) - instance->msix_combined = true; + + /* + * For Invader series, > 8 MSI-x vectors + * supported by FW/HW implies combined + * reply queue mode is enabled. + * For Ventura series, > 16 MSI-x vectors + * supported by FW/HW implies combined + * reply queue mode is enabled. + */ + switch (instance->adapter_type) { + case INVADER_SERIES: + if (instance->msix_vectors > 8) + instance->msix_combined = true; + break; + case VENTURA_SERIES: + if (instance->msix_vectors > 16) + instance->msix_combined = true; + break; + } if (rdpq_enable) instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ? @@ -5378,7 +5414,7 @@ static int megasas_init_fw(struct megasas_instance *instance) if (!instance->msix_vectors) { i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY); if (i < 0) - goto fail_setup_irqs; + goto fail_init_adapter; } megasas_setup_reply_map(instance); @@ -5587,9 +5623,8 @@ static int megasas_init_fw(struct megasas_instance *instance) fail_get_ld_pd_list: instance->instancet->disable_intr(instance); -fail_init_adapter: megasas_destroy_irqs(instance); -fail_setup_irqs: +fail_init_adapter: if (instance->msix_vectors) pci_free_irq_vectors(instance->pdev); instance->msix_vectors = 0; @@ -5861,7 +5896,8 @@ megasas_get_target_prop(struct megasas_instance *instance, int ret; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; - u16 targetId = (sdev->channel % 2) + sdev->id; + u16 targetId = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + + sdev->id; cmd = megasas_get_cmd(instance); @@ -6023,13 +6059,13 @@ static int megasas_io_attach(struct megasas_instance *instance) * @instance: Adapter soft state * Description: * - * For Ventura, driver/FW will operate in 64bit DMA addresses. + * For Ventura, driver/FW will operate in 63bit DMA addresses. * * For invader- * By default, driver/FW will operate in 32bit DMA addresses * for consistent DMA mapping but if 32 bit consistent - * DMA mask fails, driver will try with 64 bit consistent - * mask provided FW is true 64bit DMA capable + * DMA mask fails, driver will try with 63 bit consistent + * mask provided FW is true 63bit DMA capable * * For older controllers(Thunderbolt and MFI based adapters)- * driver/FW will operate in 32 bit consistent DMA addresses. @@ -6043,14 +6079,14 @@ megasas_set_dma_mask(struct megasas_instance *instance) pdev = instance->pdev; consistent_mask = (instance->adapter_type == VENTURA_SERIES) ? - DMA_BIT_MASK(64) : DMA_BIT_MASK(32); + DMA_BIT_MASK(63) : DMA_BIT_MASK(32); if (IS_DMA64) { - if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(63)) && dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) goto fail_set_dma_mask; - if ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) && + if ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) && (dma_set_coherent_mask(&pdev->dev, consistent_mask) && dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) { /* @@ -6063,7 +6099,7 @@ megasas_set_dma_mask(struct megasas_instance *instance) if (!(scratch_pad_2 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET)) goto fail_set_dma_mask; else if (dma_set_mask_and_coherent(&pdev->dev, - DMA_BIT_MASK(64))) + DMA_BIT_MASK(63))) goto fail_set_dma_mask; } } else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) @@ -6075,8 +6111,8 @@ megasas_set_dma_mask(struct megasas_instance *instance) instance->consistent_mask_64bit = true; dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n", - ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "64" : "32"), - (instance->consistent_mask_64bit ? "64" : "32")); + ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"), + (instance->consistent_mask_64bit ? "63" : "32")); return 0; @@ -6105,6 +6141,10 @@ static inline void megasas_set_adapter_type(struct megasas_instance *instance) instance->adapter_type = MFI_SERIES; } else { switch (instance->pdev->device) { + case PCI_DEVICE_ID_LSI_AERO_10E1: + case PCI_DEVICE_ID_LSI_AERO_10E2: + case PCI_DEVICE_ID_LSI_AERO_10E5: + case PCI_DEVICE_ID_LSI_AERO_10E6: case PCI_DEVICE_ID_LSI_VENTURA: case PCI_DEVICE_ID_LSI_CRUSADER: case PCI_DEVICE_ID_LSI_HARPOON: @@ -6458,6 +6498,13 @@ static int megasas_probe_one(struct pci_dev *pdev, struct megasas_instance *instance; u16 control = 0; + switch (pdev->device) { + case PCI_DEVICE_ID_LSI_AERO_10E1: + case PCI_DEVICE_ID_LSI_AERO_10E5: + dev_info(&pdev->dev, "Adapter is in configurable secure mode\n"); + break; + } + /* Reset MSI-X in the kdump kernel */ if (reset_devices) { pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); @@ -6573,11 +6620,16 @@ static int megasas_probe_one(struct pci_dev *pdev, return 0; fail_start_aen: + instance->unload = 1; + scsi_remove_host(instance->host); fail_io_attach: megasas_mgmt_info.count--; megasas_mgmt_info.max_index--; megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; + if (instance->requestorId && !instance->skip_heartbeat_timer_del) + del_timer_sync(&instance->sriov_heartbeat_timer); + instance->instancet->disable_intr(instance); megasas_destroy_irqs(instance); @@ -6585,8 +6637,13 @@ static int megasas_probe_one(struct pci_dev *pdev, megasas_release_fusion(instance); else megasas_release_mfi(instance); + if (instance->msix_vectors) pci_free_irq_vectors(instance->pdev); + instance->msix_vectors = 0; + + if (instance->fw_crash_state != UNAVAILABLE) + megasas_free_host_crash_buffer(instance); fail_init_mfi: scsi_host_put(host); fail_alloc_instance: @@ -7523,6 +7580,9 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg) get_user(user_sense_off, &cioc->sense_off)) return -EFAULT; + if (local_sense_off != user_sense_off) + return -EINVAL; + if (local_sense_len) { void __user **sense_ioc_ptr = (void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off); diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index 59ecbb3b53b52acd3a37a105dc181f7845239c53..a3362855042545ab8f3feffcdf388eefc243f850 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -1266,7 +1266,7 @@ void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map, for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { ld = MR_TargetIdToLdGet(ldCount, drv_map); - if (ld >= MAX_LOGICAL_DRIVES_EXT) { + if (ld >= MAX_LOGICAL_DRIVES_EXT - 1) { lbInfo[ldCount].loadBalanceFlag = 0; continue; } diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index c7f95bace353aff9f9fa91268dd2dceebd9c2694..9d4941cb961b10077c9bca3fa5c658c49cf95063 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -2832,7 +2832,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance, device_id < instance->fw_supported_vd_count)) { ld = MR_TargetIdToLdGet(device_id, local_map_ptr); - if (ld >= instance->fw_supported_vd_count) + if (ld >= instance->fw_supported_vd_count - 1) fp_possible = 0; else { raid = MR_LdRaidGet(ld, local_map_ptr); @@ -4857,6 +4857,7 @@ megasas_alloc_fusion_context(struct megasas_instance *instance) if (!fusion->log_to_span) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); + kfree(instance->ctrl_context); return -ENOMEM; } } diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 59d7844ee0222518d9a7c30a9adf57c9c399e7e2..ccb522a4ccb5523e65e9df2b85061553fdbfdd7f 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -2565,12 +2565,14 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) { struct sysinfo s; u64 consistent_dma_mask; + /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */ + int dma_mask = (ioc->hba_mpi_version_belonged > MPI2_VERSION) ? 63 : 64; if (ioc->is_mcpu_endpoint) goto try_32bit; if (ioc->dma_mask) - consistent_dma_mask = DMA_BIT_MASK(64); + consistent_dma_mask = DMA_BIT_MASK(dma_mask); else consistent_dma_mask = DMA_BIT_MASK(32); @@ -2578,11 +2580,11 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) const uint64_t required_mask = dma_get_required_mask(&pdev->dev); if ((required_mask > DMA_BIT_MASK(32)) && - !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && + !pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_mask)) && !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) { ioc->base_add_sg_single = &_base_add_sg_single_64; ioc->sge_size = sizeof(Mpi2SGESimple64_t); - ioc->dma_mask = 64; + ioc->dma_mask = dma_mask; goto out; } } @@ -2609,7 +2611,7 @@ static int _base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) { - if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { + if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(ioc->dma_mask))) { if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) return -ENODEV; } @@ -3280,12 +3282,18 @@ mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid) if (smid < ioc->hi_priority_smid) { struct scsiio_tracker *st; + void *request; st = _get_st_from_smid(ioc, smid); if (!st) { _base_recovery_check(ioc); return; } + + /* Clear MPI request frame */ + request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(request, 0, ioc->request_sz); + mpt3sas_base_clear_st(ioc, st); _base_recovery_check(ioc); return; @@ -3344,8 +3352,9 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr, static inline void _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) { + wmb(); __raw_writeq(b, addr); - mmiowb(); + barrier(); } #else static inline void @@ -4108,7 +4117,7 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc) * flag unset in NVDATA. */ mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11); - if (ioc->manu_pg11.EEDPTagMode == 0) { + if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) { pr_err("%s: overriding NVDATA EEDPTagMode setting\n", ioc->name); ioc->manu_pg11.EEDPTagMode &= ~0x3; @@ -4538,7 +4547,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) total_sz += sz; } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count)); - if (ioc->dma_mask == 64) { + if (ioc->dma_mask > 32) { if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) { pr_warn(MPT3SAS_FMT "no suitable consistent DMA mask for %s\n", @@ -6599,6 +6608,12 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8); if (ioc->facts.MaxDevHandle % 8) ioc->pd_handles_sz++; + /* + * pd_handles_sz should have, at least, the minimal room for + * set_bit()/test_bit(), otherwise out-of-memory touch may occur. + */ + ioc->pd_handles_sz = ALIGN(ioc->pd_handles_sz, sizeof(unsigned long)); + ioc->pd_handles = kzalloc(ioc->pd_handles_sz, GFP_KERNEL); if (!ioc->pd_handles) { @@ -6616,16 +6631,27 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8); if (ioc->facts.MaxDevHandle % 8) ioc->pend_os_device_add_sz++; + + /* + * pend_os_device_add_sz should have, at least, the minimal room for + * set_bit()/test_bit(), otherwise out-of-memory may occur. + */ + ioc->pend_os_device_add_sz = ALIGN(ioc->pend_os_device_add_sz, + sizeof(unsigned long)); ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz, GFP_KERNEL); - if (!ioc->pend_os_device_add) + if (!ioc->pend_os_device_add) { + r = -ENOMEM; goto out_free_resources; + } ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz; ioc->device_remove_in_progress = kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL); - if (!ioc->device_remove_in_progress) + if (!ioc->device_remove_in_progress) { + r = -ENOMEM; goto out_free_resources; + } ioc->fwfault_debug = mpt3sas_fwfault_debug; @@ -6702,6 +6728,13 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) if (r) goto out_free_resources; + /* + * Copy current copy of IOCFacts in prev_fw_facts + * and it will be used during online firmware upgrade. + */ + memcpy(&ioc->prev_fw_facts, &ioc->facts, + sizeof(struct mpt3sas_facts)); + ioc->non_operational_loop = 0; ioc->got_task_abort_from_ioctl = 0; return 0; @@ -6867,6 +6900,91 @@ mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc) wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ); } +/** + * _base_check_ioc_facts_changes - Look for increase/decrease of IOCFacts + * attributes during online firmware upgrade and update the corresponding + * IOC variables accordingly. + * + * @ioc: Pointer to MPT_ADAPTER structure + */ +static int +_base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc) +{ + u16 pd_handles_sz; + void *pd_handles = NULL, *blocking_handles = NULL; + void *pend_os_device_add = NULL, *device_remove_in_progress = NULL; + struct mpt3sas_facts *old_facts = &ioc->prev_fw_facts; + + if (ioc->facts.MaxDevHandle > old_facts->MaxDevHandle) { + pd_handles_sz = (ioc->facts.MaxDevHandle / 8); + if (ioc->facts.MaxDevHandle % 8) + pd_handles_sz++; + + /* + * pd_handles should have, at least, the minimal room for + * set_bit()/test_bit(), otherwise out-of-memory touch may + * occur. + */ + pd_handles_sz = ALIGN(pd_handles_sz, sizeof(unsigned long)); + pd_handles = krealloc(ioc->pd_handles, pd_handles_sz, + GFP_KERNEL); + if (!pd_handles) { + ioc_info(ioc, + "Unable to allocate the memory for pd_handles of sz: %d\n", + pd_handles_sz); + return -ENOMEM; + } + memset(pd_handles + ioc->pd_handles_sz, 0, + (pd_handles_sz - ioc->pd_handles_sz)); + ioc->pd_handles = pd_handles; + + blocking_handles = krealloc(ioc->blocking_handles, + pd_handles_sz, GFP_KERNEL); + if (!blocking_handles) { + ioc_info(ioc, + "Unable to allocate the memory for " + "blocking_handles of sz: %d\n", + pd_handles_sz); + return -ENOMEM; + } + memset(blocking_handles + ioc->pd_handles_sz, 0, + (pd_handles_sz - ioc->pd_handles_sz)); + ioc->blocking_handles = blocking_handles; + ioc->pd_handles_sz = pd_handles_sz; + + pend_os_device_add = krealloc(ioc->pend_os_device_add, + pd_handles_sz, GFP_KERNEL); + if (!pend_os_device_add) { + ioc_info(ioc, + "Unable to allocate the memory for pend_os_device_add of sz: %d\n", + pd_handles_sz); + return -ENOMEM; + } + memset(pend_os_device_add + ioc->pend_os_device_add_sz, 0, + (pd_handles_sz - ioc->pend_os_device_add_sz)); + ioc->pend_os_device_add = pend_os_device_add; + ioc->pend_os_device_add_sz = pd_handles_sz; + + device_remove_in_progress = krealloc( + ioc->device_remove_in_progress, pd_handles_sz, GFP_KERNEL); + if (!device_remove_in_progress) { + ioc_info(ioc, + "Unable to allocate the memory for " + "device_remove_in_progress of sz: %d\n " + , pd_handles_sz); + return -ENOMEM; + } + memset(device_remove_in_progress + + ioc->device_remove_in_progress_sz, 0, + (pd_handles_sz - ioc->device_remove_in_progress_sz)); + ioc->device_remove_in_progress = device_remove_in_progress; + ioc->device_remove_in_progress_sz = pd_handles_sz; + } + + memcpy(&ioc->prev_fw_facts, &ioc->facts, sizeof(struct mpt3sas_facts)); + return 0; +} + /** * mpt3sas_base_hard_reset_handler - reset controller * @ioc: Pointer to MPT_ADAPTER structure @@ -6932,6 +7050,13 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, if (r) goto out; + r = _base_check_ioc_facts_changes(ioc); + if (r) { + ioc_info(ioc, + "Some of the parameters got changed in this new firmware" + " image and it requires system reboot\n"); + goto out; + } if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable) panic("%s: Issue occurred with flashing controller firmware." "Please reboot the system and ensure that the correct" diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index 96dc15e90bd83c1bb4a8e534797f9ed495b037b8..b0297a9c923897aed2d12a701415d63c4be527a7 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -160,6 +160,15 @@ struct mpt3sas_nvme_cmd { */ #define MPT3SAS_FMT "%s: " +#define ioc_err(ioc, fmt, ...) \ + pr_err("%s: " fmt, (ioc)->name, ##__VA_ARGS__) +#define ioc_notice(ioc, fmt, ...) \ + pr_notice("%s: " fmt, (ioc)->name, ##__VA_ARGS__) +#define ioc_warn(ioc, fmt, ...) \ + pr_warn("%s: " fmt, (ioc)->name, ##__VA_ARGS__) +#define ioc_info(ioc, fmt, ...) \ + pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__) + /* * WarpDrive Specific Log codes */ @@ -1023,6 +1032,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc); * @event_log: event log pointer * @event_masks: events that are masked * @facts: static facts data + * @prev_fw_facts: previous fw facts data * @pfacts: static port facts data * @manu_pg0: static manufacturing page 0 * @manu_pg10: static manufacturing page 10 @@ -1226,6 +1236,7 @@ struct MPT3SAS_ADAPTER { /* static config pages */ struct mpt3sas_facts facts; + struct mpt3sas_facts prev_fw_facts; struct mpt3sas_port_facts *pfacts; Mpi2ManufacturingPage0_t manu_pg0; struct Mpi2ManufacturingPage10_t manu_pg10; diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c index d29a2dcc7d0eca93a0595e6d97297489c645f1ee..9b01c5a7aebd9df0a1e3e911449e3f688f333860 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_config.c +++ b/drivers/scsi/mpt3sas/mpt3sas_config.c @@ -692,10 +692,6 @@ mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc, r = _config_request(ioc, &mpi_request, mpi_reply, MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sizeof(*config_page)); - mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM; - r = _config_request(ioc, &mpi_request, mpi_reply, - MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, - sizeof(*config_page)); out: return r; } diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c index 5e8c059ce2c929156338372ddc9be5fe8bcd0291..90060af097cc945b4a51499401ba8dbef3ea36f9 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c @@ -1597,7 +1597,8 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc, " for diag buffers, requested size(%d)\n", ioc->name, __func__, request_data_sz); mpt3sas_base_free_smid(ioc, smid); - return -ENOMEM; + rc = -ENOMEM; + goto out; } ioc->diag_buffer[buffer_type] = request_data; ioc->diag_buffer_sz[buffer_type] = request_data_sz; @@ -2404,6 +2405,10 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg, break; } + if (karg.hdr.ioc_number != ioctl_header.ioc_number) { + ret = -EINVAL; + break; + } if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_command)) { uarg = arg; ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf); @@ -2960,7 +2965,7 @@ _ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr, if (!ioc->is_warpdrive) { pr_err(MPT3SAS_FMT "%s: BRM attribute is only for" " warpdrive\n", ioc->name, __func__); - goto out; + return 0; } /* pci_access_mutex lock acquired by sysfs show path */ mutex_lock(&ioc->pci_access_mutex); diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 53133cfd420f1d2c7c90dd3e0bdc87bf233feece..506b58772c6ec3c5dcfa96d8c7dd947d6d2bebe8 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -1474,11 +1474,23 @@ mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid) { struct scsi_cmnd *scmd = NULL; struct scsiio_tracker *st; + Mpi25SCSIIORequest_t *mpi_request; if (smid > 0 && smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) { u32 unique_tag = smid - 1; + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + + /* + * If SCSI IO request is outstanding at driver level then + * DevHandle filed must be non-zero. If DevHandle is zero + * then it means that this smid is free at driver level, + * so return NULL. + */ + if (!mpi_request->DevHandle) + return scmd; + scmd = scsi_host_find_tag(ioc->shost, unique_tag); if (scmd) { st = scsi_cmd_priv(scmd); @@ -3203,6 +3215,7 @@ static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc) fw_event = list_first_entry(&ioc->fw_event_list, struct fw_event_work, list); list_del_init(&fw_event->list); + fw_event_work_put(fw_event); } spin_unlock_irqrestore(&ioc->fw_event_lock, flags); @@ -3237,7 +3250,6 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc) if (cancel_work_sync(&fw_event->work)) fw_event_work_put(fw_event); - fw_event_work_put(fw_event); } } @@ -3352,7 +3364,7 @@ _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address) shost_for_each_device(sdev, ioc->shost) { sas_device_priv_data = sdev->hostdata; - if (!sas_device_priv_data) + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) continue; if (sas_device_priv_data->sas_target->sas_address != sas_address) @@ -3779,6 +3791,40 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, return _scsih_check_for_pending_tm(ioc, smid); } +/** _scsih_allow_scmd_to_device - check whether scmd needs to + * issue to IOC or not. + * @ioc: per adapter object + * @scmd: pointer to scsi command object + * + * Returns true if scmd can be issued to IOC otherwise returns false. + */ +inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc, + struct scsi_cmnd *scmd) +{ + + if (ioc->pci_error_recovery) + return false; + + if (ioc->hba_mpi_version_belonged == MPI2_VERSION) { + if (ioc->remove_host) + return false; + + return true; + } + + if (ioc->remove_host) { + + switch (scmd->cmnd[0]) { + case SYNCHRONIZE_CACHE: + case START_STOP: + return true; + default: + return false; + } + } + + return true; +} /** * _scsih_sas_control_complete - completion routine @@ -4611,7 +4657,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) return 0; } - if (ioc->pci_error_recovery || ioc->remove_host) { + if (!(_scsih_allow_scmd_to_device(ioc, scmd))) { scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); return 0; @@ -9809,6 +9855,7 @@ static void scsih_remove(struct pci_dev *pdev) /* release all the volumes */ _scsih_ir_shutdown(ioc); + sas_remove_host(shost); list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list, list) { if (raid_device->starget) { @@ -9851,7 +9898,6 @@ static void scsih_remove(struct pci_dev *pdev) ioc->sas_hba.num_phys = 0; } - sas_remove_host(shost); mpt3sas_base_detach(ioc); spin_lock(&gioc_lock); list_del(&ioc->list); diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c index f8cc2677c1cd605b87bd9c83be1ec402505703a8..20d36061c217ce450bef794155d4d77da0f68d7d 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_transport.c +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c @@ -834,10 +834,13 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, mpt3sas_port->remote_identify.sas_address, mpt3sas_phy->phy_id); mpt3sas_phy->phy_belongs_to_port = 0; - sas_port_delete_phy(mpt3sas_port->port, mpt3sas_phy->phy); + if (!ioc->remove_host) + sas_port_delete_phy(mpt3sas_port->port, + mpt3sas_phy->phy); list_del(&mpt3sas_phy->port_siblings); } - sas_port_delete(mpt3sas_port->port); + if (!ioc->remove_host) + sas_port_delete(mpt3sas_port->port); kfree(mpt3sas_port); } diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index 8c91637cd598d971afd11b2ae5e4aceb024a7b22..77699c08a1bd0f3473bee8c5a351591970e60939 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c @@ -49,6 +49,7 @@ static struct scsi_host_template mvs_sht = { .module = THIS_MODULE, .name = DRV_NAME, .queuecommand = sas_queuecommand, + .slave_alloc = sas_slave_alloc, .target_alloc = sas_target_alloc, .slave_configure = sas_slave_configure, .scan_finished = mvs_scan_finished, diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index 4dd6cad330e8e2abf142bf759956ddb5f88b7894..3e814c0469fbd2e4d2bde841d941f3c0fbe587e5 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -1479,6 +1479,12 @@ u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, } else { u32 producer_index; void *pi_virt = circularQ->pi_virt; + /* spurious interrupt during setup if + * kexec-ing and driver doing a doorbell access + * with the pre-kexec oq interrupt setup + */ + if (!pi_virt) + break; /* Update the producer index from SPC */ producer_index = pm8001_read_32(pi_virt); circularQ->producer_index = cpu_to_le32(producer_index); diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 7a697ca68501ed072cee6b9bf384e140d859793c..5ef3c7e903f20cd0aa886fa219924910074117f1 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -74,6 +74,7 @@ static struct scsi_host_template pm8001_sht = { .module = THIS_MODULE, .name = DRV_NAME, .queuecommand = sas_queuecommand, + .slave_alloc = sas_slave_alloc, .target_alloc = sas_target_alloc, .slave_configure = sas_slave_configure, .scan_finished = pm8001_scan_finished, diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index 947d6017d004c83b3e758392d6278524613a8621..59feda261e08888c33a4893b944cfdaf52a3e23f 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -374,6 +374,13 @@ static int pm8001_task_exec(struct sas_task *task, return 0; } pm8001_ha = pm8001_find_ha_by_dev(task->dev); + if (pm8001_ha->controller_fatal_error) { + struct task_status_struct *ts = &t->task_status; + + ts->resp = SAS_TASK_UNDELIVERED; + t->task_done(t); + return 0; + } PM8001_IO_DBG(pm8001_ha, pm8001_printk("pm8001_task_exec device \n ")); spin_lock_irqsave(&pm8001_ha->lock, flags); do { @@ -466,7 +473,7 @@ static int pm8001_task_exec(struct sas_task *task, dev_printk(KERN_ERR, pm8001_ha->dev, "pm8001 exec failed[%d]!\n", rc); if (!sas_protocol_ata(t->task_proto)) if (n_elem) - dma_unmap_sg(pm8001_ha->dev, t->scatter, n_elem, + dma_unmap_sg(pm8001_ha->dev, t->scatter, t->num_scatter, t->data_dir); out_done: spin_unlock_irqrestore(&pm8001_ha->lock, flags); diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h index 80b4dd6df0c251adfc12e67d8f7cf64299f73a7d..1816e351071fa4d1cd3a23ce068f99052ce4067a 100644 --- a/drivers/scsi/pm8001/pm8001_sas.h +++ b/drivers/scsi/pm8001/pm8001_sas.h @@ -538,6 +538,7 @@ struct pm8001_hba_info { u32 logging_level; u32 fw_status; u32 smp_exp_mode; + bool controller_fatal_error; const struct firmware *fw_image; struct isr_param irq_vector[PM8001_MAX_MSIX_VEC]; u32 reset_in_progress; diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index 42f0405601ad1bd0a495af2d99f1702b5763ba78..327992fbb553a35fc7364b0dc7c57a0316261f51 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -577,6 +577,9 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha) pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size); pm8001_mw32(address, MAIN_PCS_EVENT_LOG_OPTION, pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity); + /* Update Fatal error interrupt vector */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt |= + ((pm8001_ha->number_of_intr - 1) << 8); pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT, pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt); pm8001_mw32(address, MAIN_EVENT_CRC_CHECK, @@ -1110,6 +1113,9 @@ static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha) return -EBUSY; } + /* Initialize the controller fatal error flag */ + pm8001_ha->controller_fatal_error = false; + /* Initialize pci space address eg: mpi offset */ init_pci_device_addresses(pm8001_ha); init_default_table_values(pm8001_ha); @@ -1218,13 +1224,17 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha) u32 bootloader_state; u32 ibutton0, ibutton1; - /* Check if MPI is in ready state to reset */ - if (mpi_uninit_check(pm8001_ha) != 0) { - PM8001_FAIL_DBG(pm8001_ha, - pm8001_printk("MPI state is not ready\n")); - return -1; + /* Process MPI table uninitialization only if FW is ready */ + if (!pm8001_ha->controller_fatal_error) { + /* Check if MPI is in ready state to reset */ + if (mpi_uninit_check(pm8001_ha) != 0) { + regval = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk( + "MPI state is not ready scratch1 :0x%x\n", + regval)); + return -1; + } } - /* checked for reset register normal state; 0x0 */ regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET); PM8001_INIT_DBG(pm8001_ha, @@ -2372,6 +2382,8 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_printk("task 0x%p done with io_status 0x%x" " resp 0x%x stat 0x%x but aborted by upper layer!\n", t, status, ts->resp, ts->stat)); + if (t->slow_task) + complete(&t->slow_task->completion); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); } else { spin_unlock_irqrestore(&t->task_state_lock, flags); @@ -3406,10 +3418,12 @@ static int mpi_set_controller_config_resp(struct pm8001_hba_info *pm8001_ha, (struct set_ctrl_cfg_resp *)(piomb + 4); u32 status = le32_to_cpu(pPayload->status); u32 err_qlfr_pgcd = le32_to_cpu(pPayload->err_qlfr_pgcd); + u32 tag = le32_to_cpu(pPayload->tag); PM8001_MSG_DBG(pm8001_ha, pm8001_printk( "SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x\n", status, err_qlfr_pgcd)); + pm8001_tag_free(pm8001_ha, tag); return 0; } @@ -3752,6 +3766,46 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) } } +static void print_scratchpad_registers(struct pm8001_hba_info *pm8001_ha) +{ + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("MSGU_SCRATCH_PAD_0: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0))); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("MSGU_SCRATCH_PAD_1:0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1))); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("MSGU_SCRATCH_PAD_2: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2))); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("MSGU_SCRATCH_PAD_3: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3))); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("MSGU_HOST_SCRATCH_PAD_0: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_0))); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("MSGU_HOST_SCRATCH_PAD_1: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_1))); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("MSGU_HOST_SCRATCH_PAD_2: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_2))); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("MSGU_HOST_SCRATCH_PAD_3: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_3))); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("MSGU_HOST_SCRATCH_PAD_4: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_4))); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("MSGU_HOST_SCRATCH_PAD_5: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_5))); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("MSGU_RSVD_SCRATCH_PAD_0: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_6))); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("MSGU_RSVD_SCRATCH_PAD_1: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_7))); +} + static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec) { struct outbound_queue_table *circularQ; @@ -3759,10 +3813,28 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec) u8 uninitialized_var(bc); u32 ret = MPI_IO_STATUS_FAIL; unsigned long flags; + u32 regval; + if (vec == (pm8001_ha->number_of_intr - 1)) { + regval = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + if ((regval & SCRATCH_PAD_MIPSALL_READY) != + SCRATCH_PAD_MIPSALL_READY) { + pm8001_ha->controller_fatal_error = true; + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk( + "Firmware Fatal error! Regval:0x%x\n", regval)); + print_scratchpad_registers(pm8001_ha); + return ret; + } + } spin_lock_irqsave(&pm8001_ha->lock, flags); circularQ = &pm8001_ha->outbnd_q_tbl[vec]; do { + /* spurious interrupt during setup if kexec-ing and + * driver doing a doorbell access w/ the pre-kexec oq + * interrupt setup. + */ + if (!circularQ->pi_virt) + break; ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc); if (MPI_IO_STATUS_SUCCESS == ret) { /* process the outbound message */ diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h index 889e69ce3689b18683ac142550dc6522cb97dcfd..7dd2699d0efb5da1b12483ec7af41788dd4ba78f 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.h +++ b/drivers/scsi/pm8001/pm80xx_hwi.h @@ -1384,6 +1384,9 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t; #define SCRATCH_PAD_BOOT_LOAD_SUCCESS 0x0 #define SCRATCH_PAD_IOP0_READY 0xC00 #define SCRATCH_PAD_IOP1_READY 0x3000 +#define SCRATCH_PAD_MIPSALL_READY (SCRATCH_PAD_IOP1_READY | \ + SCRATCH_PAD_IOP0_READY | \ + SCRATCH_PAD_RAAE_READY) /* boot loader state */ #define SCRATCH_PAD1_BOOTSTATE_MASK 0x70 /* Bit 4-6 */ diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index 4e86994e10e81f4c41a2617ce8660e97ee772e66..9d1384f3c8c00f0a628757c0911a26124de23da3 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -3609,7 +3609,7 @@ static long pmcraid_ioctl_passthrough( u32 ioasc; int request_size; int buffer_size; - u8 access, direction; + u8 direction; int rc = 0; /* If IOA reset is in progress, wait 10 secs for reset to complete */ @@ -3658,10 +3658,8 @@ static long pmcraid_ioctl_passthrough( request_size = le32_to_cpu(buffer->ioarcb.data_transfer_length); if (buffer->ioarcb.request_flags0 & TRANSFER_DIR_WRITE) { - access = VERIFY_READ; direction = DMA_TO_DEVICE; } else { - access = VERIFY_WRITE; direction = DMA_FROM_DEVICE; } diff --git a/drivers/scsi/qedf/qedf_dbg.c b/drivers/scsi/qedf/qedf_dbg.c index f2397ee9ba690d71a0454352bd8b9b1cb928362f..f7d170bffc826db7c3e23b0c217499ab54e6abed 100644 --- a/drivers/scsi/qedf/qedf_dbg.c +++ b/drivers/scsi/qedf/qedf_dbg.c @@ -15,10 +15,6 @@ qedf_dbg_err(struct qedf_dbg_ctx *qedf, const char *func, u32 line, { va_list va; struct va_format vaf; - char nfunc[32]; - - memset(nfunc, 0, sizeof(nfunc)); - memcpy(nfunc, func, sizeof(nfunc) - 1); va_start(va, fmt); @@ -27,9 +23,9 @@ qedf_dbg_err(struct qedf_dbg_ctx *qedf, const char *func, u32 line, if (likely(qedf) && likely(qedf->pdev)) pr_err("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)), - nfunc, line, qedf->host_no, &vaf); + func, line, qedf->host_no, &vaf); else - pr_err("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf); + pr_err("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf); va_end(va); } @@ -40,10 +36,6 @@ qedf_dbg_warn(struct qedf_dbg_ctx *qedf, const char *func, u32 line, { va_list va; struct va_format vaf; - char nfunc[32]; - - memset(nfunc, 0, sizeof(nfunc)); - memcpy(nfunc, func, sizeof(nfunc) - 1); va_start(va, fmt); @@ -55,9 +47,9 @@ qedf_dbg_warn(struct qedf_dbg_ctx *qedf, const char *func, u32 line, if (likely(qedf) && likely(qedf->pdev)) pr_warn("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)), - nfunc, line, qedf->host_no, &vaf); + func, line, qedf->host_no, &vaf); else - pr_warn("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf); + pr_warn("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf); ret: va_end(va); @@ -69,10 +61,6 @@ qedf_dbg_notice(struct qedf_dbg_ctx *qedf, const char *func, u32 line, { va_list va; struct va_format vaf; - char nfunc[32]; - - memset(nfunc, 0, sizeof(nfunc)); - memcpy(nfunc, func, sizeof(nfunc) - 1); va_start(va, fmt); @@ -84,10 +72,10 @@ qedf_dbg_notice(struct qedf_dbg_ctx *qedf, const char *func, u32 line, if (likely(qedf) && likely(qedf->pdev)) pr_notice("[%s]:[%s:%d]:%d: %pV", - dev_name(&(qedf->pdev->dev)), nfunc, line, + dev_name(&(qedf->pdev->dev)), func, line, qedf->host_no, &vaf); else - pr_notice("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf); + pr_notice("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf); ret: va_end(va); @@ -99,10 +87,6 @@ qedf_dbg_info(struct qedf_dbg_ctx *qedf, const char *func, u32 line, { va_list va; struct va_format vaf; - char nfunc[32]; - - memset(nfunc, 0, sizeof(nfunc)); - memcpy(nfunc, func, sizeof(nfunc) - 1); va_start(va, fmt); @@ -114,9 +98,9 @@ qedf_dbg_info(struct qedf_dbg_ctx *qedf, const char *func, u32 line, if (likely(qedf) && likely(qedf->pdev)) pr_info("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)), - nfunc, line, qedf->host_no, &vaf); + func, line, qedf->host_no, &vaf); else - pr_info("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf); + pr_info("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf); ret: va_end(va); diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c index c29c162a494ff13a0631def00d6156ca039e7fa2..e7a609b5791dec436cb446f50faf2d53e1e07898 100644 --- a/drivers/scsi/qedf/qedf_debugfs.c +++ b/drivers/scsi/qedf/qedf_debugfs.c @@ -181,7 +181,7 @@ qedf_dbg_debug_cmd_write(struct file *filp, const char __user *buffer, if (!count || *ppos) return 0; - kern_buf = memdup_user(buffer, count); + kern_buf = memdup_user_nul(buffer, count); if (IS_ERR(kern_buf)) return PTR_ERR(kern_buf); diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c index 04f0c4d2e256eca78f5a1d8be3e1e8b56f638377..5178cd03666a661aca0406b82545a44c922b19aa 100644 --- a/drivers/scsi/qedf/qedf_els.c +++ b/drivers/scsi/qedf/qedf_els.c @@ -23,8 +23,6 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op, int rc = 0; uint32_t did, sid; uint16_t xid; - uint32_t start_time = jiffies / HZ; - uint32_t current_time; struct fcoe_wqe *sqe; unsigned long flags; u16 sqe_idx; @@ -59,18 +57,12 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op, goto els_err; } -retry_els: els_req = qedf_alloc_cmd(fcport, QEDF_ELS); if (!els_req) { - current_time = jiffies / HZ; - if ((current_time - start_time) > 10) { - QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, - "els: Failed els 0x%x\n", op); - rc = -ENOMEM; - goto els_err; - } - mdelay(20 * USEC_PER_MSEC); - goto retry_els; + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS, + "Failed to alloc ELS request 0x%x\n", op); + rc = -ENOMEM; + goto els_err; } QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "initiate_els els_req = " diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c index 6bbc38b1b4654d199eae48f5baaaa7f0c8ece299..a17c13846d1eb3cceecaa95529721d73efe6d971 100644 --- a/drivers/scsi/qedf/qedf_io.c +++ b/drivers/scsi/qedf/qedf_io.c @@ -902,6 +902,7 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req) if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n"); kref_put(&io_req->refcount, qedf_release_cmd); + return -EINVAL; } /* Obtain free SQE */ diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index 0a5dd5595dd3c42179543d8453e9d3afc98ba3f7..30f62b65ffa9e77e617e0670494be88b7558cf0f 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -477,7 +477,9 @@ static void qedf_update_link_speed(struct qedf_ctx *qedf, lport->link_supported_speeds |= FC_PORTSPEED_50GBIT; if (link->supported_caps & SUPPORTED_100000baseKR4_Full) lport->link_supported_speeds |= FC_PORTSPEED_100GBIT; - fc_host_supported_speeds(lport->host) = lport->link_supported_speeds; + if (lport->host && lport->host->shost_data) + fc_host_supported_speeds(lport->host) = + lport->link_supported_speeds; } static void qedf_link_update(void *dev, struct qed_link_output *link) @@ -1418,7 +1420,7 @@ static struct libfc_function_template qedf_lport_template = { static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf) { - fcoe_ctlr_init(&qedf->ctlr, FIP_ST_AUTO); + fcoe_ctlr_init(&qedf->ctlr, FIP_MODE_AUTO); qedf->ctlr.send = qedf_fip_send; qedf->ctlr.get_src_addr = qedf_get_src_mac; diff --git a/drivers/scsi/qedi/qedi_dbg.c b/drivers/scsi/qedi/qedi_dbg.c index 8fd28b056f73f389f6bb124c385f414cee28c21e..3383314a3882bcdbf1e0cae1d76bfc4d41cd713c 100644 --- a/drivers/scsi/qedi/qedi_dbg.c +++ b/drivers/scsi/qedi/qedi_dbg.c @@ -16,10 +16,6 @@ qedi_dbg_err(struct qedi_dbg_ctx *qedi, const char *func, u32 line, { va_list va; struct va_format vaf; - char nfunc[32]; - - memset(nfunc, 0, sizeof(nfunc)); - memcpy(nfunc, func, sizeof(nfunc) - 1); va_start(va, fmt); @@ -28,9 +24,9 @@ qedi_dbg_err(struct qedi_dbg_ctx *qedi, const char *func, u32 line, if (likely(qedi) && likely(qedi->pdev)) pr_err("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev), - nfunc, line, qedi->host_no, &vaf); + func, line, qedi->host_no, &vaf); else - pr_err("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf); + pr_err("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf); va_end(va); } @@ -41,10 +37,6 @@ qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line, { va_list va; struct va_format vaf; - char nfunc[32]; - - memset(nfunc, 0, sizeof(nfunc)); - memcpy(nfunc, func, sizeof(nfunc) - 1); va_start(va, fmt); @@ -56,9 +48,9 @@ qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line, if (likely(qedi) && likely(qedi->pdev)) pr_warn("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev), - nfunc, line, qedi->host_no, &vaf); + func, line, qedi->host_no, &vaf); else - pr_warn("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf); + pr_warn("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf); ret: va_end(va); @@ -70,10 +62,6 @@ qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line, { va_list va; struct va_format vaf; - char nfunc[32]; - - memset(nfunc, 0, sizeof(nfunc)); - memcpy(nfunc, func, sizeof(nfunc) - 1); va_start(va, fmt); @@ -85,10 +73,10 @@ qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line, if (likely(qedi) && likely(qedi->pdev)) pr_notice("[%s]:[%s:%d]:%d: %pV", - dev_name(&qedi->pdev->dev), nfunc, line, + dev_name(&qedi->pdev->dev), func, line, qedi->host_no, &vaf); else - pr_notice("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf); + pr_notice("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf); ret: va_end(va); @@ -100,10 +88,6 @@ qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line, { va_list va; struct va_format vaf; - char nfunc[32]; - - memset(nfunc, 0, sizeof(nfunc)); - memcpy(nfunc, func, sizeof(nfunc) - 1); va_start(va, fmt); @@ -115,9 +99,9 @@ qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line, if (likely(qedi) && likely(qedi->pdev)) pr_info("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev), - nfunc, line, qedi->host_no, &vaf); + func, line, qedi->host_no, &vaf); else - pr_info("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf); + pr_info("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf); ret: va_end(va); diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c index fd914ca4149a8bfbc6de3e1e65b51824a0d947a9..6bb5f2b31b881163d6c6acab0d4edd54660a245b 100644 --- a/drivers/scsi/qedi/qedi_debugfs.c +++ b/drivers/scsi/qedi/qedi_debugfs.c @@ -136,15 +136,11 @@ static ssize_t qedi_dbg_do_not_recover_cmd_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { - size_t cnt = 0; - - if (*ppos) - return 0; + char buf[64]; + int len; - cnt = sprintf(buffer, "do_not_recover=%d\n", qedi_do_not_recover); - cnt = min_t(int, count, cnt - *ppos); - *ppos += cnt; - return cnt; + len = sprintf(buf, "do_not_recover=%d\n", qedi_do_not_recover); + return simple_read_from_buffer(buffer, count, ppos, buf, len); } static int diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c index 25d763ae5d5a6a81a56dde06608e1c2236aa802b..d9a6f021050ecc319a990fae16acfa98db129d6a 100644 --- a/drivers/scsi/qedi/qedi_fw.c +++ b/drivers/scsi/qedi/qedi_fw.c @@ -1454,7 +1454,7 @@ static void qedi_tmf_work(struct work_struct *work) ldel_exit: spin_lock_bh(&qedi_conn->tmf_work_lock); - if (!qedi_cmd->list_tmf_work) { + if (qedi_cmd->list_tmf_work) { list_del_init(&list_work->list); qedi_cmd->list_tmf_work = NULL; kfree(list_work); diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c index 2f0a4f2c5ff803fbc663093b9118479219c5a1f3..1b7049dce1699becfee72df21afb53526ea2fc31 100644 --- a/drivers/scsi/qedi/qedi_iscsi.c +++ b/drivers/scsi/qedi/qedi_iscsi.c @@ -810,8 +810,6 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, struct qedi_endpoint *qedi_ep; struct sockaddr_in *addr; struct sockaddr_in6 *addr6; - struct qed_dev *cdev = NULL; - struct qedi_uio_dev *udev = NULL; struct iscsi_path path_req; u32 msg_type = ISCSI_KEVENT_IF_DOWN; u32 iscsi_cid = QEDI_CID_RESERVED; @@ -831,8 +829,6 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, } qedi = iscsi_host_priv(shost); - cdev = qedi->cdev; - udev = qedi->udev; if (test_bit(QEDI_IN_OFFLINE, &qedi->flags) || test_bit(QEDI_IN_RECOVERY, &qedi->flags)) { @@ -954,6 +950,7 @@ static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) qedi_ep = ep->dd_data; if (qedi_ep->state == EP_STATE_IDLE || + qedi_ep->state == EP_STATE_OFLDCONN_NONE || qedi_ep->state == EP_STATE_OFLDCONN_FAILED) return -1; @@ -1000,6 +997,9 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep) qedi_ep = ep->dd_data; qedi = qedi_ep->qedi; + if (qedi_ep->state == EP_STATE_OFLDCONN_START) + goto ep_exit_recover; + flush_work(&qedi_ep->offload_work); if (qedi_ep->conn) { @@ -1036,6 +1036,7 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep) switch (qedi_ep->state) { case EP_STATE_OFLDCONN_START: + case EP_STATE_OFLDCONN_NONE: goto ep_release_conn; case EP_STATE_OFLDCONN_FAILED: break; @@ -1226,6 +1227,7 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data) if (!is_valid_ether_addr(&path_data->mac_addr[0])) { QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n"); + qedi_ep->state = EP_STATE_OFLDCONN_NONE; ret = -EIO; goto set_path_exit; } diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h index 11260776212fa42cdcb6c481c728fe06be956065..892d70d545537320e6b742ca099e0e1583bcb64a 100644 --- a/drivers/scsi/qedi/qedi_iscsi.h +++ b/drivers/scsi/qedi/qedi_iscsi.h @@ -59,6 +59,7 @@ enum { EP_STATE_OFLDCONN_FAILED = 0x2000, EP_STATE_CONNECT_FAILED = 0x4000, EP_STATE_DISCONN_TIMEDOUT = 0x8000, + EP_STATE_OFLDCONN_NONE = 0x10000, }; struct qedi_conn; diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index e5bd035ebad0f7c7bc5e40ef4a6b3bbafb53df3c..04aa7953c0c4f61eeb860adfd7e27c3f32305e31 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -357,6 +357,7 @@ static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi, ret = qedi_ops->common->sb_init(qedi->cdev, sb_info, sb_virt, sb_phys, sb_id, QED_SB_TYPE_STORAGE); if (ret) { + dma_free_coherent(&qedi->pdev->dev, sizeof(*sb_virt), sb_virt, sb_phys); QEDI_ERR(&qedi->dbg_ctx, "Status block initialization failed for id = %d.\n", sb_id); @@ -629,7 +630,7 @@ static struct qedi_ctx *qedi_host_alloc(struct pci_dev *pdev) goto exit_setup_shost; } - shost->max_id = QEDI_MAX_ISCSI_CONNS_PER_HBA; + shost->max_id = QEDI_MAX_ISCSI_CONNS_PER_HBA - 1; shost->max_channel = 0; shost->max_lun = ~0; shost->max_cmd_len = 16; @@ -952,6 +953,12 @@ static int qedi_find_boot_info(struct qedi_ctx *qedi, cls_sess = iscsi_conn_to_session(cls_conn); sess = cls_sess->dd_data; + if (!iscsi_is_session_online(cls_sess)) + continue; + + if (!sess->targetname) + continue; + if (pri_ctrl_flags) { if (!strcmp(pri_tgt->iscsi_name, sess->targetname) && !strcmp(pri_tgt->ip_addr, ep_ip_addr)) { diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 4888b999e82fb3e4885dea290a921155599dd17b..b008d583dd6e1f9bf700d03cf1b03da01061304f 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -345,7 +345,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, } ha->optrom_region_start = start; - ha->optrom_region_size = start + size; + ha->optrom_region_size = size; ha->optrom_state = QLA_SREADING; ha->optrom_buffer = vmalloc(ha->optrom_region_size); @@ -418,7 +418,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, } ha->optrom_region_start = start; - ha->optrom_region_size = start + size; + ha->optrom_region_size = size; ha->optrom_state = QLA_SWRITING; ha->optrom_buffer = vmalloc(ha->optrom_region_size); @@ -655,7 +655,8 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, break; } else { /* Make sure FC side is not in reset */ - qla2x00_wait_for_hba_online(vha); + WARN_ON_ONCE(qla2x00_wait_for_hba_online(vha) != + QLA_SUCCESS); /* Issue MPI reset */ scsi_block_requests(vha->host); @@ -2161,6 +2162,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) msleep(1000); + qla_nvme_delete(vha); + qla24xx_disable_vp(vha); qla2x00_wait_for_sess_deletion(vha); @@ -2191,6 +2194,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l, vha->gnl.ldma); + vha->gnl.l = NULL; + vfree(vha->scan.l); if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) { diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index c11a89be292c83036a020442e3689181cb8e2809..5a56b8af8d2bb71dbf8ac094c010195bb28dbe32 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -258,7 +258,7 @@ qla2x00_process_els(struct bsg_job *bsg_job) srb_t *sp; const char *type; int req_sg_cnt, rsp_sg_cnt; - int rval = (DRIVER_ERROR << 16); + int rval = (DID_ERROR << 16); uint16_t nextlid = 0; if (bsg_request->msgcode == FC_BSG_RPT_ELS) { @@ -342,6 +342,8 @@ qla2x00_process_els(struct bsg_job *bsg_job) dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); if (!req_sg_cnt) { + dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); rval = -ENOMEM; goto done_free_fcport; } @@ -349,6 +351,8 @@ qla2x00_process_els(struct bsg_job *bsg_job) rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); if (!rsp_sg_cnt) { + dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); rval = -ENOMEM; goto done_free_fcport; } @@ -405,7 +409,7 @@ qla2x00_process_els(struct bsg_job *bsg_job) goto done_free_fcport; done_free_fcport: - if (bsg_request->msgcode == FC_BSG_RPT_ELS) + if (bsg_request->msgcode != FC_BSG_RPT_ELS) kfree(fcport); done: return rval; @@ -433,7 +437,7 @@ qla2x00_process_ct(struct bsg_job *bsg_job) struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; - int rval = (DRIVER_ERROR << 16); + int rval = (DID_ERROR << 16); int req_sg_cnt, rsp_sg_cnt; uint16_t loop_id; struct fc_port *fcport; @@ -1775,8 +1779,8 @@ qla24xx_process_bidir_cmd(struct bsg_job *bsg_job) uint16_t nextlid = 0; uint32_t tot_dsds; srb_t *sp = NULL; - uint32_t req_data_len = 0; - uint32_t rsp_data_len = 0; + uint32_t req_data_len; + uint32_t rsp_data_len; /* Check the type of the adapter */ if (!IS_BIDI_CAPABLE(ha)) { @@ -1881,6 +1885,9 @@ qla24xx_process_bidir_cmd(struct bsg_job *bsg_job) goto done_unmap_sg; } + req_data_len = bsg_job->request_payload.payload_len; + rsp_data_len = bsg_job->reply_payload.payload_len; + if (req_data_len != rsp_data_len) { rval = EXT_STATUS_BUSY; ql_log(ql_log_warn, vha, 0x70aa, @@ -1888,10 +1895,6 @@ qla24xx_process_bidir_cmd(struct bsg_job *bsg_job) goto done_unmap_sg; } - req_data_len = bsg_job->request_payload.payload_len; - rsp_data_len = bsg_job->reply_payload.payload_len; - - /* Alloc SRB structure */ sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL); if (!sp) { @@ -1948,7 +1951,7 @@ qlafx00_mgmt_cmd(struct bsg_job *bsg_job) struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; - int rval = (DRIVER_ERROR << 16); + int rval = (DID_ERROR << 16); struct qla_mt_iocb_rqst_fx00 *piocb_rqst; srb_t *sp; int req_sg_cnt = 0, rsp_sg_cnt = 0; @@ -2487,7 +2490,7 @@ qla24xx_bsg_request(struct bsg_job *bsg_job) vha = shost_priv(host); } - if (qla2x00_reset_active(vha)) { + if (qla2x00_chip_is_down(vha)) { ql_dbg(ql_dbg_user, vha, 0x709f, "BSG: ISP abort active/needed -- cmd=%d.\n", bsg_request->msgcode); diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index a0038d879b9dfb285ac9bca02d27bc115715aef8..f621cb55ccfb27196605f849d8a56cb2a4df0b6c 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -3261,6 +3261,11 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res) "Async done-%s res %x, WWPN %8phC \n", sp->name, res, fcport->port_name); + fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); + + if (res == QLA_FUNCTION_TIMEOUT) + goto done; + if (res == (DID_ERROR << 16)) { /* entry status error */ goto done; @@ -3272,7 +3277,7 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res) ql_dbg(ql_dbg_disc, vha, 0x2019, "GPSC command unsupported, disabling query.\n"); ha->flags.gpsc_supported = 0; - res = QLA_SUCCESS; + goto done; } } else { switch (be16_to_cpu(ct_rsp->rsp.gpsc.speed)) { @@ -3305,7 +3310,6 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res) be16_to_cpu(ct_rsp->rsp.gpsc.speeds), be16_to_cpu(ct_rsp->rsp.gpsc.speed)); } -done: memset(&ea, 0, sizeof(ea)); ea.event = FCME_GPSC_DONE; ea.rc = res; @@ -3313,6 +3317,7 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res) ea.sp = sp; qla2x00_fcport_event_handler(vha, &ea); +done: sp->free(sp); } @@ -3355,15 +3360,15 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport) sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; sp->done = qla24xx_async_gpsc_sp_done; - rval = qla2x00_start_sp(sp); - if (rval != QLA_SUCCESS) - goto done_free_sp; - ql_dbg(ql_dbg_disc, vha, 0x205e, "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n", sp->name, fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; return rval; done_free_sp: @@ -3724,13 +3729,14 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id) sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; sp->done = qla2x00_async_gpnid_sp_done; + ql_dbg(ql_dbg_disc, vha, 0x2067, + "Async-%s hdl=%x ID %3phC.\n", sp->name, + sp->handle, ct_req->req.port_id.port_id); + rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) goto done_free_sp; - ql_dbg(ql_dbg_disc, vha, 0x2067, - "Async-%s hdl=%x ID %3phC.\n", sp->name, - sp->handle, ct_req->req.port_id.port_id); return rval; done_free_sp: @@ -3896,9 +3902,10 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) fc_port_t *fcport; u32 i, rc; bool found; - struct fab_scan_rp *rp; + struct fab_scan_rp *rp, *trp; unsigned long flags; u8 recheck = 0; + u16 dup = 0, dup_cnt = 0; ql_dbg(ql_dbg_disc, vha, 0xffff, "%s enter\n", __func__); @@ -3929,6 +3936,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) for (i = 0; i < vha->hw->max_fibre_devices; i++) { u64 wwn; + int k; rp = &vha->scan.l[i]; found = false; @@ -3937,6 +3945,20 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) if (wwn == 0) continue; + /* Remove duplicate NPORT ID entries from switch data base */ + for (k = i + 1; k < vha->hw->max_fibre_devices; k++) { + trp = &vha->scan.l[k]; + if (rp->id.b24 == trp->id.b24) { + dup = 1; + dup_cnt++; + ql_dbg(ql_dbg_disc + ql_dbg_verbose, + vha, 0xffff, + "Detected duplicate NPORT ID from switch data base: ID %06x WWN %8phN WWN %8phN\n", + rp->id.b24, rp->port_name, trp->port_name); + memset(trp, 0, sizeof(*trp)); + } + } + if (!memcmp(rp->port_name, vha->port_name, WWN_SIZE)) continue; @@ -3976,6 +3998,12 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) } } + if (dup) { + ql_log(ql_log_warn, vha, 0xffff, + "Detected %d duplicate NPORT ID(s) from switch data base\n", + dup_cnt); + } + /* * Logout all previous fabric dev marked lost, except FCP2 devices. */ @@ -4039,6 +4067,41 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) } } +static int qla2x00_post_gnnft_gpnft_done_work(struct scsi_qla_host *vha, + srb_t *sp, int cmd) +{ + struct qla_work_evt *e; + + if (cmd != QLA_EVT_GPNFT_DONE && cmd != QLA_EVT_GNNFT_DONE) + return QLA_PARAMETER_ERROR; + + e = qla2x00_alloc_work(vha, cmd); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.iosb.sp = sp; + + return qla2x00_post_work(vha, e); +} + +static int qla2x00_post_nvme_gpnft_done_work(struct scsi_qla_host *vha, + srb_t *sp, int cmd) +{ + struct qla_work_evt *e; + + if (cmd != QLA_EVT_GPNFT) + return QLA_PARAMETER_ERROR; + + e = qla2x00_alloc_work(vha, cmd); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.gpnft.fc4_type = FC4_TYPE_NVME; + e->u.gpnft.sp = sp; + + return qla2x00_post_work(vha, e); +} + static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha, struct srb *sp) { @@ -4139,22 +4202,36 @@ static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res) { struct srb *sp = s; struct scsi_qla_host *vha = sp->vha; - struct qla_work_evt *e; struct ct_sns_req *ct_req = (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req; u16 cmd = be16_to_cpu(ct_req->command); u8 fc4_type = sp->gen2; unsigned long flags; + int rc; /* gen2 field is holding the fc4type */ ql_dbg(ql_dbg_disc, vha, 0xffff, "Async done-%s res %x FC4Type %x\n", sp->name, res, sp->gen2); + sp->rc = res; if (res) { unsigned long flags; + const char *name = sp->name; + + /* + * We are in an Interrupt context, queue up this + * sp for GNNFT_DONE work. This will allow all + * the resource to get freed up. + */ + rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp, + QLA_EVT_GNNFT_DONE); + if (rc) { + /* Cleanup here to prevent memory leak */ + qla24xx_sp_unmap(vha, sp); + sp->free(sp); + } - sp->free(sp); spin_lock_irqsave(&vha->work_lock, flags); vha->scan.scan_flags &= ~SF_SCANNING; vha->scan.scan_retry++; @@ -4165,9 +4242,9 @@ static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res) set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } else { - ql_dbg(ql_dbg_disc, sp->vha, 0xffff, - "Async done-%s rescan failed on all retries\n", - sp->name); + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async done-%s rescan failed on all retries.\n", + name); } return; } @@ -4182,77 +4259,31 @@ static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res) vha->scan.scan_flags &= ~SF_SCANNING; spin_unlock_irqrestore(&vha->work_lock, flags); - e = qla2x00_alloc_work(vha, QLA_EVT_GPNFT); - if (!e) { - /* - * please ignore kernel warning. Otherwise, - * we have mem leak. - */ - if (sp->u.iocb_cmd.u.ctarg.req) { - dma_free_coherent(&vha->hw->pdev->dev, - sp->u.iocb_cmd.u.ctarg.req_allocated_size, - sp->u.iocb_cmd.u.ctarg.req, - sp->u.iocb_cmd.u.ctarg.req_dma); - sp->u.iocb_cmd.u.ctarg.req = NULL; - } - if (sp->u.iocb_cmd.u.ctarg.rsp) { - dma_free_coherent(&vha->hw->pdev->dev, - sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, - sp->u.iocb_cmd.u.ctarg.rsp, - sp->u.iocb_cmd.u.ctarg.rsp_dma); - sp->u.iocb_cmd.u.ctarg.rsp = NULL; - } - - ql_dbg(ql_dbg_disc, vha, 0xffff, - "Async done-%s unable to alloc work element\n", - sp->name); - sp->free(sp); + sp->rc = res; + rc = qla2x00_post_nvme_gpnft_done_work(vha, sp, QLA_EVT_GPNFT); + if (!rc) { + qla24xx_sp_unmap(vha, sp); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); return; } - e->u.gpnft.fc4_type = FC4_TYPE_NVME; - sp->rc = res; - e->u.gpnft.sp = sp; - - qla2x00_post_work(vha, e); - return; } - if (cmd == GPN_FT_CMD) - e = qla2x00_alloc_work(vha, QLA_EVT_GPNFT_DONE); - else - e = qla2x00_alloc_work(vha, QLA_EVT_GNNFT_DONE); - if (!e) { - /* please ignore kernel warning. Otherwise, we have mem leak. */ - if (sp->u.iocb_cmd.u.ctarg.req) { - dma_free_coherent(&vha->hw->pdev->dev, - sp->u.iocb_cmd.u.ctarg.req_allocated_size, - sp->u.iocb_cmd.u.ctarg.req, - sp->u.iocb_cmd.u.ctarg.req_dma); - sp->u.iocb_cmd.u.ctarg.req = NULL; - } - if (sp->u.iocb_cmd.u.ctarg.rsp) { - dma_free_coherent(&vha->hw->pdev->dev, - sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, - sp->u.iocb_cmd.u.ctarg.rsp, - sp->u.iocb_cmd.u.ctarg.rsp_dma); - sp->u.iocb_cmd.u.ctarg.rsp = NULL; - } + if (cmd == GPN_FT_CMD) { + del_timer(&sp->u.iocb_cmd.timer); + rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp, + QLA_EVT_GPNFT_DONE); + } else { + rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp, + QLA_EVT_GNNFT_DONE); + } - ql_dbg(ql_dbg_disc, vha, 0xffff, - "Async done-%s unable to alloc work element\n", - sp->name); - sp->free(sp); + if (rc) { + qla24xx_sp_unmap(vha, sp); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); return; } - - sp->rc = res; - e->u.iosb.sp = sp; - - qla2x00_post_work(vha, e); } /* @@ -4351,7 +4382,6 @@ void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp) { ql_dbg(ql_dbg_disc, vha, 0xffff, "%s enter\n", __func__); - del_timer(&sp->u.iocb_cmd.timer); qla24xx_async_gnnft(vha, sp, sp->gen2); } @@ -4444,9 +4474,9 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); - rspsz = sizeof(struct ct_sns_gpnft_rsp) + - ((vha->hw->max_fibre_devices - 1) * - sizeof(struct ct_sns_gpn_ft_data)); + rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size; + memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size); + memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size); ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; /* CT_IU preamble */ @@ -4601,6 +4631,7 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport) done_free_sp: sp->free(sp); + fcport->flags &= ~FCF_ASYNC_SENT; done: return rval; } diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index b934977c5c260086a4ff9776d0b9c577b169542e..753338d5343b912b15153547ff149279f1e153e7 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -52,12 +52,14 @@ qla2x00_sp_timeout(struct timer_list *t) struct srb_iocb *iocb; struct req_que *req; unsigned long flags; + struct qla_hw_data *ha = sp->vha->hw; - spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); + WARN_ON_ONCE(irqs_disabled()); + spin_lock_irqsave(&ha->hardware_lock, flags); req = sp->qpair->req; req->outstanding_cmds[sp->handle] = NULL; iocb = &sp->u.iocb_cmd; - spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); + spin_unlock_irqrestore(&ha->hardware_lock, flags); iocb->timeout(sp); } @@ -214,8 +216,13 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, struct srb_iocb *lio; int rval = QLA_FUNCTION_FAILED; - if (!vha->flags.online) - goto done; + if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) || + fcport->loop_id == FC_NO_LOOP_ID) { + ql_log(ql_log_warn, vha, 0xffff, + "%s: %8phC - not sending command.\n", + __func__, fcport->port_name); + return rval; + } sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) @@ -235,15 +242,19 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); sp->done = qla2x00_async_login_sp_done; - if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport)) { + if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport)) lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY; - } else { + else lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI; - if (fcport->fc4f_nvme) - lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI; + if (fcport->fc4f_nvme) + lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI; - } + ql_dbg(ql_dbg_disc, vha, 0x2072, + "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x " + "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id, + fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, + fcport->login_retry); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { @@ -252,11 +263,6 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, goto done_free_sp; } - ql_dbg(ql_dbg_disc, vha, 0x2072, - "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x " - "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id, - fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, - fcport->login_retry); return rval; done_free_sp: @@ -285,9 +291,6 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) struct srb_iocb *lio; int rval = QLA_FUNCTION_FAILED; - if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) - return rval; - fcport->flags |= FCF_ASYNC_SENT; sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) @@ -301,15 +304,16 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); sp->done = qla2x00_async_logout_sp_done; - rval = qla2x00_start_sp(sp); - if (rval != QLA_SUCCESS) - goto done_free_sp; ql_dbg(ql_dbg_disc, vha, 0x2070, "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n", sp->handle, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, fcport->port_name); + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; return rval; done_free_sp: @@ -429,6 +433,7 @@ int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport) e->u.fcport.fcport = fcport; fcport->flags |= FCF_ASYNC_ACTIVE; + fcport->disc_state = DSC_LOGIN_PEND; return qla2x00_post_work(vha, e); } @@ -487,13 +492,15 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport, sp->done = qla2x00_async_adisc_sp_done; if (data[1] & QLA_LOGIO_LOGIN_RETRIED) lio->u.logio.flags |= SRB_LOGIN_RETRIED; - rval = qla2x00_start_sp(sp); - if (rval != QLA_SUCCESS) - goto done_free_sp; ql_dbg(ql_dbg_disc, vha, 0x206f, "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n", sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name); + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; + return rval; done_free_sp: @@ -641,11 +648,14 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, break; case DSC_LS_PORT_UNAVAIL: default: - if (fcport->loop_id != FC_NO_LOOP_ID) - qla2x00_clear_loop_id(fcport); - - fcport->loop_id = loop_id; - fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; + if (fcport->loop_id == FC_NO_LOOP_ID) { + qla2x00_find_new_loop_id(vha, fcport); + fcport->fw_login_state = + DSC_LS_PORT_UNAVAIL; + } + ql_dbg(ql_dbg_disc, vha, 0x20e5, + "%s %d %8phC\n", __func__, __LINE__, + fcport->port_name); qla24xx_fcport_handle_login(vha, fcport); break; } @@ -787,6 +797,9 @@ qla24xx_async_gnl_sp_done(void *s, int res) sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1], sp->u.iocb_cmd.u.mbx.in_mb[2]); + if (res == QLA_FUNCTION_TIMEOUT) + return; + memset(&ea, 0, sizeof(ea)); ea.sp = sp; ea.rc = res; @@ -972,6 +985,9 @@ void qla24xx_async_gpdb_sp_done(void *s, int res) fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); + if (res == QLA_FUNCTION_TIMEOUT) + goto done; + memset(&ea, 0, sizeof(ea)); ea.event = FCME_GPDB_DONE; ea.fcport = fcport; @@ -979,6 +995,7 @@ void qla24xx_async_gpdb_sp_done(void *s, int res) qla2x00_fcport_event_handler(vha, &ea); +done: dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in, sp->u.iocb_cmd.u.mbx.in_dma); @@ -1106,8 +1123,13 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) struct port_database_24xx *pd; struct qla_hw_data *ha = vha->hw; - if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) + if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) || + fcport->loop_id == FC_NO_LOOP_ID) { + ql_log(ql_log_warn, vha, 0xffff, + "%s: %8phC - not sending command.\n", + __func__, fcport->port_name); return rval; + } fcport->disc_state = DSC_GPDB; @@ -1147,14 +1169,13 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) sp->done = qla24xx_async_gpdb_sp_done; - rval = qla2x00_start_sp(sp); - if (rval != QLA_SUCCESS) - goto done_free_sp; - ql_dbg(ql_dbg_disc, vha, 0x20dc, "Async-%s %8phC hndl %x opt %x\n", sp->name, fcport->port_name, sp->handle, opt); + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; return rval; done_free_sp: @@ -1684,15 +1705,14 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, tm_iocb->u.tmf.data = tag; sp->done = qla2x00_tmf_sp_done; - rval = qla2x00_start_sp(sp); - if (rval != QLA_SUCCESS) - goto done_free_sp; - ql_dbg(ql_dbg_taskm, vha, 0x802f, "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n", sp->handle, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; wait_for_completion(&tm_iocb->u.tmf.comp); rval = tm_iocb->u.tmf.data; @@ -1708,13 +1728,13 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, /* Issue Marker IOCB */ qla2x00_marker(vha, vha->hw->req_q_map[0], - vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun, + vha->hw->rsp_q_map[0], fcport->loop_id, lun, flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); } done_free_sp: sp->free(sp); - sp->fcport->flags &= ~FCF_ASYNC_SENT; + fcport->flags &= ~FCF_ASYNC_SENT; done: return rval; } @@ -1747,47 +1767,45 @@ int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) { scsi_qla_host_t *vha = cmd_sp->vha; - fc_port_t *fcport = cmd_sp->fcport; struct srb_iocb *abt_iocb; srb_t *sp; int rval = QLA_FUNCTION_FAILED; - sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + sp = qla2xxx_get_qpair_sp(cmd_sp->qpair, cmd_sp->fcport, GFP_KERNEL); if (!sp) goto done; abt_iocb = &sp->u.iocb_cmd; sp->type = SRB_ABT_CMD; sp->name = "abort"; + sp->qpair = cmd_sp->qpair; if (wait) sp->flags = SRB_WAKEUP_ON_COMP; abt_iocb->timeout = qla24xx_abort_iocb_timeout; init_completion(&abt_iocb->u.abt.comp); - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)); + /* FW can send 2 x ABTS's timeout/20s */ + qla2x00_init_timer(sp, 42); abt_iocb->u.abt.cmd_hndl = cmd_sp->handle; - - if (vha->flags.qpairs_available && cmd_sp->qpair) - abt_iocb->u.abt.req_que_no = - cpu_to_le16(cmd_sp->qpair->req->id); - else - abt_iocb->u.abt.req_que_no = cpu_to_le16(vha->req->id); + abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id); sp->done = qla24xx_abort_sp_done; + ql_dbg(ql_dbg_async, vha, 0x507c, + "Abort command issued - hdl=%x, type=%x\n", + cmd_sp->handle, cmd_sp->type); + rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) goto done_free_sp; - ql_dbg(ql_dbg_async, vha, 0x507c, - "Abort command issued - hdl=%x, target_id=%x\n", - cmd_sp->handle, fcport->tgt_id); - if (wait) { wait_for_completion(&abt_iocb->u.abt.comp); rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ? QLA_SUCCESS : QLA_FUNCTION_FAILED; + } else { + goto done; } done_free_sp: @@ -1887,8 +1905,11 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) return; } - if (fcport->disc_state == DSC_DELETE_PEND) + if ((fcport->disc_state == DSC_DELETE_PEND) || + (fcport->disc_state == DSC_DELETED)) { + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); return; + } if (ea->sp->gen2 != fcport->login_gen) { /* target side must have changed it. */ @@ -1952,25 +1973,15 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) cid.b.rsvd_1 = 0; ql_dbg(ql_dbg_disc, vha, 0x20ec, - "%s %d %8phC LoopID 0x%x in use post gnl\n", + "%s %d %8phC lid %#x in use with pid %06x post gnl\n", __func__, __LINE__, ea->fcport->port_name, - ea->fcport->loop_id); + ea->fcport->loop_id, cid.b24); - if (IS_SW_RESV_ADDR(cid)) { - set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); - ea->fcport->loop_id = FC_NO_LOOP_ID; - } else { - qla2x00_clear_loop_id(ea->fcport); - } + set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); + ea->fcport->loop_id = FC_NO_LOOP_ID; qla24xx_post_gnl_work(vha, ea->fcport); break; case MBS_PORT_ID_USED: - ql_dbg(ql_dbg_disc, vha, 0x20ed, - "%s %d %8phC NPortId %02x%02x%02x inuse post gidpn\n", - __func__, __LINE__, ea->fcport->port_name, - ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area, - ea->fcport->d_id.b.al_pa); - lid = ea->iop[1] & 0xffff; qlt_find_sess_invalidate_other(vha, wwn_to_u64(ea->fcport->port_name), @@ -4711,6 +4722,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) fcport->loop_id = FC_NO_LOOP_ID; qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); fcport->supported_classes = FC_COS_UNSPECIFIED; + fcport->fp_speed = PORT_SPEED_UNKNOWN; fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma, @@ -4725,7 +4737,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) ql_log(ql_log_warn, vha, 0xd049, "Failed to allocate ct_sns request.\n"); kfree(fcport); - fcport = NULL; + return NULL; } INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn); INIT_LIST_HEAD(&fcport->gnl_entry); @@ -4802,14 +4814,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) set_bit(RSCN_UPDATE, &flags); clear_bit(LOCAL_LOOP_UPDATE, &flags); - } else if (ha->current_topology == ISP_CFG_N) { - clear_bit(RSCN_UPDATE, &flags); - if (qla_tgt_mode_enabled(vha)) { - /* allow the other side to start the login */ - clear_bit(LOCAL_LOOP_UPDATE, &flags); - set_bit(RELOGIN_NEEDED, &vha->dpc_flags); - } - } else if (ha->current_topology == ISP_CFG_NL) { + } else if (ha->current_topology == ISP_CFG_NL || + ha->current_topology == ISP_CFG_N) { clear_bit(RSCN_UPDATE, &flags); set_bit(LOCAL_LOOP_UPDATE, &flags); } else if (!vha->flags.online || @@ -4853,19 +4859,10 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) */ if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { - if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) { - spin_lock_irqsave(&ha->tgt.atio_lock, - flags); - qlt_24xx_process_atio_queue(vha, 0); - spin_unlock_irqrestore( - &ha->tgt.atio_lock, flags); - } else { - spin_lock_irqsave(&ha->hardware_lock, - flags); - qlt_24xx_process_atio_queue(vha, 1); - spin_unlock_irqrestore( - &ha->hardware_lock, flags); - } + spin_lock_irqsave(&ha->tgt.atio_lock, flags); + qlt_24xx_process_atio_queue(vha, 0); + spin_unlock_irqrestore(&ha->tgt.atio_lock, + flags); } } } @@ -6494,6 +6491,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) if (!(IS_P3P_TYPE(ha))) ha->isp_ops->reset_chip(vha); + ha->link_data_rate = PORT_SPEED_UNKNOWN; SAVE_TOPO(ha); ha->flags.rida_fmt2 = 0; ha->flags.n2n_ae = 0; @@ -6549,8 +6547,10 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) } /* Clear all async request states across all VPs. */ - list_for_each_entry(fcport, &vha->vp_fcports, list) + list_for_each_entry(fcport, &vha->vp_fcports, list) { fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); + fcport->scan_state = 0; + } spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry(vp, &ha->vp_list, list) { atomic_inc(&vp->vref_count); @@ -6682,7 +6682,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) * The next call disables the board * completely. */ - ha->isp_ops->reset_adapter(vha); + qla2x00_abort_isp_cleanup(vha); vha->flags.online = 0; clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); @@ -7142,7 +7142,6 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) } icb->firmware_options_2 &= cpu_to_le32( ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); - vha->flags.process_response_queue = 0; if (ha->zio_mode != QLA_ZIO_DISABLED) { ha->zio_mode = QLA_ZIO_MODE_6; @@ -7153,7 +7152,6 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) icb->firmware_options_2 |= cpu_to_le32( (uint32_t)ha->zio_mode); icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); - vha->flags.process_response_queue = 1; } if (rval) { @@ -8718,8 +8716,6 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair) struct qla_hw_data *ha = qpair->hw; qpair->delete_in_progress = 1; - while (atomic_read(&qpair->ref_count)) - msleep(500); ret = qla25xx_delete_req_que(vha, qpair->req); if (ret != QLA_SUCCESS) diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 42ac8e097419cb6169d3e2c4e2fba18d1b152732..c64d8da49491e65f3d733af361ad8295fb3483b1 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -1526,12 +1526,6 @@ qla24xx_start_scsi(srb_t *sp) /* Set chip new ring index. */ WRT_REG_DWORD(req->req_q_in, req->ring_index); - RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr); - - /* Manage unprocessed RIO/ZIO commands in response queue. */ - if (vha->flags.process_response_queue && - rsp->ring_ptr->signature != RESPONSE_PROCESSED) - qla24xx_process_response_queue(vha, rsp); spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_SUCCESS; @@ -1575,7 +1569,7 @@ qla24xx_dif_start_scsi(srb_t *sp) #define QDSS_GOT_Q_SPACE BIT_0 /* Only process protection or >16 cdb in this routine */ - if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) { + if (scsi_prot_op_normal(cmd)) { if (cmd->cmd_len <= 16) return qla24xx_start_scsi(sp); } @@ -1725,12 +1719,6 @@ qla24xx_dif_start_scsi(srb_t *sp) /* Set chip new ring index. */ WRT_REG_DWORD(req->req_q_in, req->ring_index); - RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr); - - /* Manage unprocessed RIO/ZIO commands in response queue. */ - if (vha->flags.process_response_queue && - rsp->ring_ptr->signature != RESPONSE_PROCESSED) - qla24xx_process_response_queue(vha, rsp); spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -1880,11 +1868,6 @@ qla2xxx_start_scsi_mq(srb_t *sp) /* Set chip new ring index. */ WRT_REG_DWORD(req->req_q_in, req->ring_index); - /* Manage unprocessed RIO/ZIO commands in response queue. */ - if (vha->flags.process_response_queue && - rsp->ring_ptr->signature != RESPONSE_PROCESSED) - qla24xx_process_response_queue(vha, rsp); - spin_unlock_irqrestore(&qpair->qp_lock, flags); return QLA_SUCCESS; @@ -1935,13 +1918,13 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp) } if (!qpair->difdix_supported && - scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { + !scsi_prot_op_normal(cmd)) { cmd->result = DID_NO_CONNECT << 16; return QLA_INTERFACE_ERROR; } /* Only process protection or >16 cdb in this routine */ - if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) { + if (scsi_prot_op_normal(cmd)) { if (cmd->cmd_len <= 16) return qla2xxx_start_scsi_mq(sp); } @@ -2554,7 +2537,8 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073, "PLOGI ELS IOCB:\n"); ql_dump_buffer(ql_log_info, vha, 0x0109, - (uint8_t *)els_iocb, 0x70); + (uint8_t *)els_iocb, + sizeof(*els_iocb)); } else { els_iocb->tx_byte_count = sizeof(struct els_logo_payload); els_iocb->tx_address[0] = @@ -2720,7 +2704,8 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n"); ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109, - (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70); + (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, + sizeof(*elsio->u.els_plogi.els_plogi_pyld)); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { @@ -3314,19 +3299,21 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb) { struct srb_iocb *aio = &sp->u.iocb_cmd; scsi_qla_host_t *vha = sp->vha; - struct req_que *req = vha->req; + struct req_que *req = sp->qpair->req; memset(abt_iocb, 0, sizeof(struct abort_entry_24xx)); abt_iocb->entry_type = ABORT_IOCB_TYPE; abt_iocb->entry_count = 1; abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle)); - abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); + if (sp->fcport) { + abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); + abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; + abt_iocb->port_id[1] = sp->fcport->d_id.b.area; + abt_iocb->port_id[2] = sp->fcport->d_id.b.domain; + } abt_iocb->handle_to_abort = cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no, aio->u.abt.cmd_hndl)); - abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; - abt_iocb->port_id[1] = sp->fcport->d_id.b.area; - abt_iocb->port_id[2] = sp->fcport->d_id.b.domain; abt_iocb->vp_index = vha->vp_idx; abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no); /* Send the command to the firmware */ diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 36cbb29c84f63977063414f5082165e196fc2142..e6d162945f5db4785178230863e8d6f3822a9509 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1049,8 +1049,6 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) ql_dbg(ql_dbg_async, vha, 0x5011, "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", mb[1], mb[2], mb[3]); - - qlt_async_event(mb[0], vha, mb); break; } @@ -1067,8 +1065,6 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); set_bit(VP_CONFIG_OK, &vha->vp_flags); - - qlt_async_event(mb[0], vha, mb); break; case MBA_RSCN_UPDATE: /* State Change Registration */ @@ -2837,6 +2833,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) case ELS_IOCB_TYPE: case ABORT_IOCB_TYPE: case MBX_IOCB_TYPE: + default: sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); if (sp) { sp->done(sp, res); @@ -2847,7 +2844,6 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) case ABTS_RESP_24XX: case CTIO_TYPE7: case CTIO_CRC2: - default: return 1; } fatal: @@ -3121,6 +3117,7 @@ qla24xx_intr_handler(int irq, void *dev_id) uint16_t mb[8]; struct rsp_que *rsp; unsigned long flags; + bool process_atio = false; rsp = (struct rsp_que *) dev_id; if (!rsp) { @@ -3181,22 +3178,13 @@ qla24xx_intr_handler(int irq, void *dev_id) qla24xx_process_response_queue(vha, rsp); break; case INTR_ATIO_QUE_UPDATE_27XX: - case INTR_ATIO_QUE_UPDATE:{ - unsigned long flags2; - spin_lock_irqsave(&ha->tgt.atio_lock, flags2); - qlt_24xx_process_atio_queue(vha, 1); - spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2); + case INTR_ATIO_QUE_UPDATE: + process_atio = true; break; - } - case INTR_ATIO_RSP_QUE_UPDATE: { - unsigned long flags2; - spin_lock_irqsave(&ha->tgt.atio_lock, flags2); - qlt_24xx_process_atio_queue(vha, 1); - spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2); - + case INTR_ATIO_RSP_QUE_UPDATE: + process_atio = true; qla24xx_process_response_queue(vha, rsp); break; - } default: ql_dbg(ql_dbg_async, vha, 0x504f, "Unrecognized interrupt type (%d).\n", stat * 0xff); @@ -3210,6 +3198,12 @@ qla24xx_intr_handler(int irq, void *dev_id) qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); + if (process_atio) { + spin_lock_irqsave(&ha->tgt.atio_lock, flags); + qlt_24xx_process_atio_queue(vha, 0); + spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); + } + return IRQ_HANDLED; } @@ -3256,6 +3250,7 @@ qla24xx_msix_default(int irq, void *dev_id) uint32_t hccr; uint16_t mb[8]; unsigned long flags; + bool process_atio = false; rsp = (struct rsp_que *) dev_id; if (!rsp) { @@ -3312,22 +3307,13 @@ qla24xx_msix_default(int irq, void *dev_id) qla24xx_process_response_queue(vha, rsp); break; case INTR_ATIO_QUE_UPDATE_27XX: - case INTR_ATIO_QUE_UPDATE:{ - unsigned long flags2; - spin_lock_irqsave(&ha->tgt.atio_lock, flags2); - qlt_24xx_process_atio_queue(vha, 1); - spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2); + case INTR_ATIO_QUE_UPDATE: + process_atio = true; break; - } - case INTR_ATIO_RSP_QUE_UPDATE: { - unsigned long flags2; - spin_lock_irqsave(&ha->tgt.atio_lock, flags2); - qlt_24xx_process_atio_queue(vha, 1); - spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2); - + case INTR_ATIO_RSP_QUE_UPDATE: + process_atio = true; qla24xx_process_response_queue(vha, rsp); break; - } default: ql_dbg(ql_dbg_async, vha, 0x5051, "Unrecognized interrupt type (%d).\n", stat & 0xff); @@ -3338,6 +3324,12 @@ qla24xx_msix_default(int irq, void *dev_id) qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); + if (process_atio) { + spin_lock_irqsave(&ha->tgt.atio_lock, flags); + qlt_24xx_process_atio_queue(vha, 0); + spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); + } + return IRQ_HANDLED; } @@ -3422,10 +3414,8 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) ha->msix_count, ret); goto msix_out; } else if (ret < ha->msix_count) { - ql_log(ql_log_warn, vha, 0x00c6, - "MSI-X: Failed to enable support " - "with %d vectors, using %d vectors.\n", - ha->msix_count, ret); + ql_log(ql_log_info, vha, 0x00c6, + "MSI-X: Using %d vectors\n", ret); ha->msix_count = ret; /* Recalculate queue values */ if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) { @@ -3449,7 +3439,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) ql_log(ql_log_fatal, vha, 0x00c8, "Failed to allocate memory for ha->msix_entries.\n"); ret = -ENOMEM; - goto msix_out; + goto free_irqs; } ha->flags.msix_enabled = 1; @@ -3532,6 +3522,10 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) msix_out: return ret; + +free_irqs: + pci_free_irq_vectors(ha->pdev); + goto msix_out; } int diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 2c6c2cd5a0d0748499aa14b0d2b3e2df74914b6f..abef3b29fa101c407629f99039954846c1dc55df 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -493,7 +493,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } - } else if (!abort_active) { + } else if (current == ha->dpc_thread) { /* call abort directly since we are in the DPC thread */ ql_dbg(ql_dbg_mbx, vha, 0x101d, "Timeout, calling abort_isp.\n"); @@ -684,6 +684,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) mcp->mb[2] = LSW(risc_addr); mcp->mb[3] = 0; mcp->mb[4] = 0; + mcp->mb[11] = 0; ha->flags.using_lr_setting = 0; if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { @@ -727,7 +728,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) if (ha->flags.exchoffld_enabled) mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD; - mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1; + mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11; mcp->in_mb |= MBX_3 | MBX_2 | MBX_1; } else { mcp->mb[1] = LSW(risc_addr); @@ -3762,10 +3763,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, mcp->mb[0] = MBC_PORT_PARAMS; mcp->mb[1] = loop_id; mcp->mb[2] = BIT_0; - if (IS_CNA_CAPABLE(vha->hw)) - mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0); - else - mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0); + mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0); mcp->mb[9] = vha->vp_idx; mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_3|MBX_1|MBX_0; @@ -3873,6 +3871,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, vha->d_id.b24 = 0; vha->d_id.b.al_pa = 1; ha->flags.n2n_bigger = 1; + ha->flags.n2n_ae = 0; id.b.al_pa = 2; ql_dbg(ql_dbg_async, vha, 0x5075, @@ -3883,6 +3882,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, "Format 1: Remote login - Waiting for WWPN %8phC.\n", rptid_entry->u.f1.port_name); ha->flags.n2n_bigger = 0; + ha->flags.n2n_ae = 1; } qla24xx_post_newsess_work(vha, &id, rptid_entry->u.f1.port_name, @@ -3894,7 +3894,6 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, /* if our portname is higher then initiate N2N login */ set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags); - ha->flags.n2n_ae = 1; return; break; case TOPO_FL: @@ -6133,17 +6132,13 @@ int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp) case QLA_SUCCESS: ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n", __func__, sp->name); - sp->free(sp); break; default: ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n", __func__, sp->name, rval); - sp->free(sp); break; } - return rval; - done_free_sp: sp->free(sp); done: diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index d620f4bebcd0d191ac76df53e2a376c20cc593f2..7b53a6f104f54f824c77e36dce10c4c6b66ca600 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -161,7 +161,7 @@ qla24xx_disable_vp(scsi_qla_host_t *vha) atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); list_for_each_entry(fcport, &vha->vp_fcports, list) - fcport->logout_on_delete = 0; + fcport->logout_on_delete = 1; qla2x00_mark_all_devices_lost(vha, 0); @@ -931,7 +931,7 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd) sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL); if (!sp) - goto done; + return rval; sp->type = SRB_CTRL_VP; sp->name = "ctrl_vp"; @@ -946,7 +946,7 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd) ql_dbg(ql_dbg_async, vha, 0xffff, "%s: %s Failed submission. %x.\n", __func__, sp->name, rval); - goto done_free_sp; + goto done; } ql_dbg(ql_dbg_vport, vha, 0x113f, "%s hndl %x submitted\n", @@ -962,16 +962,13 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd) case QLA_SUCCESS: ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s done.\n", __func__, sp->name); - goto done_free_sp; + break; default: ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Failed. %x.\n", __func__, sp->name, rval); - goto done_free_sp; + break; } done: - return rval; - -done_free_sp: sp->free(sp); return rval; } diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index 20d9dc39f0fbe00762e83c0b056be68cd434312b..daa412667d6e87882be0083732010950d8364e33 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -30,7 +30,10 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport) return 0; } - if (!vha->nvme_local_port && qla_nvme_register_hba(vha)) + if (qla_nvme_register_hba(vha)) + return 0; + + if (!vha->nvme_local_port) return 0; if (!(fcport->nvme_prli_service_param & @@ -474,21 +477,10 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport, int rval = -ENODEV; srb_t *sp; struct qla_qpair *qpair = hw_queue_handle; - struct nvme_private *priv; + struct nvme_private *priv = fd->private; struct qla_nvme_rport *qla_rport = rport->private; - if (!fd || !qpair) { - ql_log(ql_log_warn, NULL, 0x2134, - "NO NVMe request or Queue Handle\n"); - return rval; - } - - priv = fd->private; fcport = qla_rport->fcport; - if (!fcport) { - ql_log(ql_log_warn, NULL, 0x210e, "No fcport ptr\n"); - return rval; - } vha = fcport->vha; @@ -517,6 +509,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport, sp->name = "nvme_cmd"; sp->done = qla_nvme_sp_done; sp->qpair = qpair; + sp->vha = vha; nvme = &sp->u.iocb_cmd; nvme->u.nvme.desc = fd; @@ -564,12 +557,13 @@ static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport) schedule_work(&fcport->free_work); } - fcport->nvme_flag &= ~(NVME_FLAG_REGISTERED | NVME_FLAG_DELETING); + fcport->nvme_flag &= ~NVME_FLAG_DELETING; ql_log(ql_log_info, fcport->vha, 0x2110, "remoteport_delete of %p completed.\n", fcport); } static struct nvme_fc_port_template qla_nvme_fc_transport = { + .module = THIS_MODULE, .localport_delete = qla_nvme_localport_delete, .remoteport_delete = qla_nvme_remoteport_delete, .create_queue = qla_nvme_alloc_queue, @@ -607,7 +601,7 @@ void qla_nvme_abort(struct qla_hw_data *ha, struct srb *sp, int res) { int rval; - if (!test_bit(ABORT_ISP_ACTIVE, &sp->vha->dpc_flags)) { + if (ha->flags.fw_started) { rval = ha->isp_ops->abort_command(sp); if (!rval && !qla_nvme_wait_on_command(sp)) ql_log(ql_log_warn, NULL, 0x2112, @@ -660,9 +654,6 @@ void qla_nvme_delete(struct scsi_qla_host *vha) __func__, fcport); nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0); - init_completion(&fcport->nvme_del_done); - nvme_fc_unregister_remoteport(fcport->nvme_remote_port); - wait_for_completion(&fcport->nvme_del_done); } if (vha->nvme_local_port) { diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 42b8f0d3e580da932237826d07c2c36e49bfa7f3..8e9d386146ac6f87618de7567ba936e97eb3ba39 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -889,7 +889,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) } if (!vha->flags.difdix_supported && - scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { + !scsi_prot_op_normal(cmd)) { ql_dbg(ql_dbg_io, vha, 0x3004, "DIF Cap not reg, fail DIF capable cmd's:%p.\n", cmd); @@ -1028,8 +1028,6 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078, "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd); - if (rval == QLA_INTERFACE_ERROR) - goto qc24_fail_command; goto qc24_host_busy_free_sp; } @@ -1744,6 +1742,7 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res) !ha->flags.eeh_busy && (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) && + !qla2x00_isp_reg_stat(ha) && (sp->type == SRB_SCSI_CMD)) { /* * Don't abort commands in @@ -3186,6 +3185,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out); ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0); + if (unlikely(!ha->wq)) { + ret = -ENOMEM; + goto probe_failed; + } if (ha->isp_ops->initialize_adapter(base_vha)) { ql_log(ql_log_fatal, base_vha, 0x00d6, @@ -3395,6 +3398,12 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) return 0; probe_failed: + if (base_vha->gnl.l) { + dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, + base_vha->gnl.l, base_vha->gnl.ldma); + base_vha->gnl.l = NULL; + } + if (base_vha->timer_active) qla2x00_stop_timer(base_vha); base_vha->flags.online = 0; @@ -3486,6 +3495,10 @@ qla2x00_shutdown(struct pci_dev *pdev) qla2x00_try_to_stop_firmware(vha); } + /* Disable timer */ + if (vha->timer_active) + qla2x00_stop_timer(vha); + /* Turn adapter off line */ vha->flags.online = 0; @@ -3523,6 +3536,8 @@ qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha) spin_unlock_irqrestore(&ha->vport_slock, flags); mutex_unlock(&ha->vport_lock); + qla_nvme_delete(vha); + fc_vport_terminate(vha->fc_vport); scsi_host_put(vha->host); @@ -3624,7 +3639,7 @@ qla2x00_remove_one(struct pci_dev *pdev) if (!atomic_read(&pdev->enable_cnt)) { dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma); - + base_vha->gnl.l = NULL; scsi_host_put(base_vha->host); kfree(ha); pci_set_drvdata(pdev, NULL); @@ -3663,6 +3678,8 @@ qla2x00_remove_one(struct pci_dev *pdev) dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma); + base_vha->gnl.l = NULL; + vfree(base_vha->scan.l); if (IS_QLAFX00(ha)) @@ -4602,6 +4619,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, "Alloc failed for scan database.\n"); dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l, vha->gnl.ldma); + vha->gnl.l = NULL; scsi_remove_host(vha->host); return NULL; } @@ -4855,6 +4873,7 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) if (fcport) { fcport->id_changed = 1; fcport->scan_state = QLA_FCPORT_FOUND; + fcport->chip_reset = vha->hw->base_qpair->chip_reset; memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE); if (pla) { @@ -6041,12 +6060,27 @@ qla2x00_do_dpc(void *data) if (test_and_clear_bit (ISP_ABORT_NEEDED, &base_vha->dpc_flags) && !test_bit(UNLOADING, &base_vha->dpc_flags)) { + bool do_reset = true; + + switch (ql2x_ini_mode) { + case QLA2XXX_INI_MODE_ENABLED: + break; + case QLA2XXX_INI_MODE_DISABLED: + if (!qla_tgt_mode_enabled(base_vha)) + do_reset = false; + break; + case QLA2XXX_INI_MODE_DUAL: + if (!qla_dual_mode_enabled(base_vha)) + do_reset = false; + break; + default: + break; + } - ql_dbg(ql_dbg_dpc, base_vha, 0x4007, - "ISP abort scheduled.\n"); - if (!(test_and_set_bit(ABORT_ISP_ACTIVE, + if (do_reset && !(test_and_set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags))) { - + ql_dbg(ql_dbg_dpc, base_vha, 0x4007, + "ISP abort scheduled.\n"); if (ha->isp_ops->abort_isp(base_vha)) { /* failed. retry later */ set_bit(ISP_ABORT_NEEDED, @@ -6054,10 +6088,9 @@ qla2x00_do_dpc(void *data) } clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); + ql_dbg(ql_dbg_dpc, base_vha, 0x4008, + "ISP abort end.\n"); } - - ql_dbg(ql_dbg_dpc, base_vha, 0x4008, - "ISP abort end.\n"); } if (test_and_clear_bit(FCPORT_UPDATE_NEEDED, diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 8c811b251d4289c73bc4d2691f1a154e3187b7a8..5236912603f0e8ac75a3402e31353d5709203c69 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -684,7 +684,6 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport, void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e) { fc_port_t *t; - unsigned long flags; switch (e->u.nack.type) { case SRB_NACK_PRLI: @@ -694,10 +693,8 @@ void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e) if (t) { ql_log(ql_log_info, vha, 0xd034, "%s create sess success %p", __func__, t); - spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); /* create sess has an extra kref */ vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport); - spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); } break; } @@ -709,9 +706,6 @@ void qla24xx_delete_sess_fn(struct work_struct *work) { fc_port_t *fcport = container_of(work, struct fc_port, del_work); struct qla_hw_data *ha = fcport->vha->hw; - unsigned long flags; - - spin_lock_irqsave(&ha->tgt.sess_lock, flags); if (fcport->se_sess) { ha->tgt.tgt_ops->shutdown_sess(fcport); @@ -719,7 +713,6 @@ void qla24xx_delete_sess_fn(struct work_struct *work) } else { qlt_unreg_sess(fcport); } - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); } /* @@ -788,8 +781,9 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) fcport->port_name, sess->loop_id); sess->local = 0; } - ha->tgt.tgt_ops->put_sess(sess); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + + ha->tgt.tgt_ops->put_sess(sess); } /* @@ -981,6 +975,8 @@ void qlt_free_session_done(struct work_struct *work) sess->send_els_logo); if (!IS_SW_RESV_ADDR(sess->d_id)) { + qla2x00_mark_device_lost(vha, sess, 0, 0); + if (sess->send_els_logo) { qlt_port_logo_t logo; @@ -1027,6 +1023,7 @@ void qlt_free_session_done(struct work_struct *work) if (logout_started) { bool traced = false; + u16 cnt = 0; while (!READ_ONCE(sess->logout_completed)) { if (!traced) { @@ -1036,12 +1033,25 @@ void qlt_free_session_done(struct work_struct *work) traced = true; } msleep(100); + cnt++; + if (cnt > 200) + break; } ql_dbg(ql_dbg_disc, vha, 0xf087, "%s: sess %p logout completed\n", __func__, sess); } + /* check for any straggling io left behind */ + if (!(sess->flags & FCF_FCP2_DEVICE) && + qla2x00_eh_wait_for_pending_commands(sess->vha, sess->d_id.b24, 0, WAIT_TARGET)) { + ql_log(ql_log_warn, vha, 0x3027, + "IO not return. Resetting.\n"); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + qla2x00_wait_for_chip_reset(vha); + } + if (sess->logo_ack_needed) { sess->logo_ack_needed = 0; qla24xx_async_notify_ack(vha, sess, @@ -1161,8 +1171,6 @@ void qlt_unreg_sess(struct fc_port *sess) if (sess->se_sess) vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); - qla2x00_mark_device_lost(vha, sess, 0, 0); - sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; sess->disc_state = DSC_DELETE_PEND; sess->last_rscn_gen = sess->rscn_gen; @@ -1222,7 +1230,6 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess) sess->logout_on_delete = 0; sess->logo_ack_needed = 0; sess->fw_login_state = DSC_LS_PORT_UNAVAIL; - sess->scan_state = 0; } } @@ -1261,9 +1268,9 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess) qla24xx_chk_fcp_state(sess); ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, - "Scheduling sess %p for deletion\n", sess); + "Scheduling sess %p for deletion %8phC\n", + sess, sess->port_name); - INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn); WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work)); } @@ -4134,9 +4141,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) /* * Drop extra session reference from qla_tgt_handle_cmd_for_atio*( */ - spin_lock_irqsave(&ha->tgt.sess_lock, flags); ha->tgt.tgt_ops->put_sess(sess); - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return; out_term: @@ -4153,9 +4158,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) target_free_tag(sess->se_sess, &cmd->se_cmd); spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); - spin_lock_irqsave(&ha->tgt.sess_lock, flags); ha->tgt.tgt_ops->put_sess(sess); - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); } static void qlt_do_work(struct work_struct *work) @@ -4364,9 +4367,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, if (!cmd) { ql_dbg(ql_dbg_io, vha, 0x3062, "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); - spin_lock_irqsave(&ha->tgt.sess_lock, flags); ha->tgt.tgt_ops->put_sess(sess); - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return -EBUSY; } @@ -4711,6 +4712,12 @@ static int qlt_handle_login(struct scsi_qla_host *vha, sess = qlt_find_sess_invalidate_other(vha, wwn, port_id, loop_id, &conflict_sess); spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + } else { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d Term INOT due to WWN=0 lid=%d, NportID %06X ", + __func__, __LINE__, loop_id, port_id.b24); + qlt_send_term_imm_notif(vha, iocb, 1); + goto out; } if (IS_SW_RESV_ADDR(port_id)) { @@ -4782,6 +4789,7 @@ static int qlt_handle_login(struct scsi_qla_host *vha, switch (sess->disc_state) { case DSC_DELETED: + case DSC_LOGIN_PEND: qlt_plogi_ack_unref(vha, pla); break; @@ -6064,7 +6072,6 @@ static void qlt_abort_work(struct qla_tgt *tgt, struct qla_hw_data *ha = vha->hw; struct fc_port *sess = NULL; unsigned long flags = 0, flags2 = 0; - uint32_t be_s_id; uint8_t s_id[3]; int rc; @@ -6077,8 +6084,7 @@ static void qlt_abort_work(struct qla_tgt *tgt, s_id[1] = prm->abts.fcp_hdr_le.s_id[1]; s_id[2] = prm->abts.fcp_hdr_le.s_id[0]; - sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, - (unsigned char *)&be_s_id); + sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); if (!sess) { spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); @@ -6104,17 +6110,19 @@ static void qlt_abort_work(struct qla_tgt *tgt, } rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess); - ha->tgt.tgt_ops->put_sess(sess); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); + ha->tgt.tgt_ops->put_sess(sess); + if (rc != 0) goto out_term; return; out_term2: + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); + if (sess) ha->tgt.tgt_ops->put_sess(sess); - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); out_term: spin_lock_irqsave(&ha->hardware_lock, flags); @@ -6174,9 +6182,10 @@ static void qlt_tmr_work(struct qla_tgt *tgt, scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun); rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); - ha->tgt.tgt_ops->put_sess(sess); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + ha->tgt.tgt_ops->put_sess(sess); + if (rc != 0) goto out_term; return; @@ -6544,7 +6553,8 @@ qlt_enable_vha(struct scsi_qla_host *vha) } else { set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); qla2xxx_wake_dpc(base_vha); - qla2x00_wait_for_hba_online(base_vha); + WARN_ON_ONCE(qla2x00_wait_for_hba_online(base_vha) != + QLA_SUCCESS); } } EXPORT_SYMBOL(qlt_enable_vha); @@ -6574,7 +6584,9 @@ static void qlt_disable_vha(struct scsi_qla_host *vha) set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); - qla2x00_wait_for_hba_online(vha); + if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) + ql_dbg(ql_dbg_tgt, vha, 0xe081, + "qla2x00_wait_for_hba_online() failed\n"); } /* diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index e03d12a5f986ccbaf9eb8741f35e58f114051a51..654e1af7f542c0ebcaa4d9937c97688d95d2b003 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -350,7 +350,6 @@ static void tcm_qla2xxx_put_sess(struct fc_port *sess) if (!sess) return; - assert_spin_locked(&sess->vha->hw->tgt.sess_lock); kref_put(&sess->sess_kref, tcm_qla2xxx_release_session); } @@ -365,8 +364,9 @@ static void tcm_qla2xxx_close_session(struct se_session *se_sess) spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); target_sess_cmd_list_set_waiting(se_sess); - tcm_qla2xxx_put_sess(sess); spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + tcm_qla2xxx_put_sess(sess); } static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess) @@ -390,6 +390,8 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd) cmd->se_cmd.transport_state, cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags); + transport_generic_request_failure(&cmd->se_cmd, + TCM_CHECK_CONDITION_ABORT_CMD); return 0; } cmd->trc_flags |= TRC_XFR_RDY; @@ -718,10 +720,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) cmd->sg_cnt = 0; cmd->offset = 0; cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); - if (cmd->trc_flags & TRC_XMIT_STATUS) { - pr_crit("Multiple calls for status = %p.\n", cmd); - dump_stack(); - } cmd->trc_flags |= TRC_XMIT_STATUS; if (se_cmd->data_direction == DMA_FROM_DEVICE) { @@ -833,7 +831,6 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct fc_port *sess) static void tcm_qla2xxx_shutdown_sess(struct fc_port *sess) { - assert_spin_locked(&sess->vha->hw->tgt.sess_lock); target_sess_cmd_list_set_waiting(sess->se_sess); } @@ -929,38 +926,14 @@ static ssize_t tcm_qla2xxx_tpg_enable_show(struct config_item *item, atomic_read(&tpg->lport_tpg_enabled)); } -static void tcm_qla2xxx_depend_tpg(struct work_struct *work) -{ - struct tcm_qla2xxx_tpg *base_tpg = container_of(work, - struct tcm_qla2xxx_tpg, tpg_base_work); - struct se_portal_group *se_tpg = &base_tpg->se_tpg; - struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha; - - if (!target_depend_item(&se_tpg->tpg_group.cg_item)) { - atomic_set(&base_tpg->lport_tpg_enabled, 1); - qlt_enable_vha(base_vha); - } - complete(&base_tpg->tpg_base_comp); -} - -static void tcm_qla2xxx_undepend_tpg(struct work_struct *work) -{ - struct tcm_qla2xxx_tpg *base_tpg = container_of(work, - struct tcm_qla2xxx_tpg, tpg_base_work); - struct se_portal_group *se_tpg = &base_tpg->se_tpg; - struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha; - - if (!qlt_stop_phase1(base_vha->vha_tgt.qla_tgt)) { - atomic_set(&base_tpg->lport_tpg_enabled, 0); - target_undepend_item(&se_tpg->tpg_group.cg_item); - } - complete(&base_tpg->tpg_base_comp); -} - static ssize_t tcm_qla2xxx_tpg_enable_store(struct config_item *item, const char *page, size_t count) { struct se_portal_group *se_tpg = to_tpg(item); + struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; + struct tcm_qla2xxx_lport *lport = container_of(se_wwn, + struct tcm_qla2xxx_lport, lport_wwn); + struct scsi_qla_host *vha = lport->qla_vha; struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); unsigned long op; @@ -979,24 +952,16 @@ static ssize_t tcm_qla2xxx_tpg_enable_store(struct config_item *item, if (atomic_read(&tpg->lport_tpg_enabled)) return -EEXIST; - INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_depend_tpg); + atomic_set(&tpg->lport_tpg_enabled, 1); + qlt_enable_vha(vha); } else { if (!atomic_read(&tpg->lport_tpg_enabled)) return count; - INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_undepend_tpg); + atomic_set(&tpg->lport_tpg_enabled, 0); + qlt_stop_phase1(vha->vha_tgt.qla_tgt); } - init_completion(&tpg->tpg_base_comp); - schedule_work(&tpg->tpg_base_work); - wait_for_completion(&tpg->tpg_base_comp); - if (op) { - if (!atomic_read(&tpg->lport_tpg_enabled)) - return -ENODEV; - } else { - if (atomic_read(&tpg->lport_tpg_enabled)) - return -EPERM; - } return count; } diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h index 7550ba2831c36a890f26312ef1404170c3766f78..147cf6c903666b4aa3844a51014fe54e0ef8e1d2 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h @@ -48,9 +48,6 @@ struct tcm_qla2xxx_tpg { struct tcm_qla2xxx_tpg_attrib tpg_attrib; /* Returned by tcm_qla2xxx_make_tpg() */ struct se_portal_group se_tpg; - /* Items for dealing with configfs_depend_item */ - struct completion tpg_base_comp; - struct work_struct tpg_base_work; }; struct tcm_qla2xxx_fc_loopid { diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 0e13349dce57094b3bc211093ec93a26f2797f11..f8acf101af3d9a89493b33ac05316f1bc2838d98 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -3204,6 +3204,8 @@ static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session, if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) return -EINVAL; ep = iscsi_lookup_endpoint(transport_fd); + if (!ep) + return -EINVAL; conn = cls_conn->dd_data; qla_conn = conn->dd_data; qla_conn->qla_ep = ep->dd_data; @@ -4278,7 +4280,6 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha) return QLA_SUCCESS; mem_alloc_error_exit: - qla4xxx_mem_free(ha); return QLA_ERROR; } @@ -5933,7 +5934,7 @@ static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[]) val = rd_nvram_byte(ha, sec_addr); if (val & BIT_7) ddb_index[1] = (val & 0x7f); - + goto exit_boot_info; } else if (is_qla80XX(ha)) { buf = dma_alloc_coherent(&ha->pdev->dev, size, &buf_dma, GFP_KERNEL); @@ -7237,6 +7238,8 @@ static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha, rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry); + if (rc) + goto free_sess; ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n", __func__, fnode_sess->dev.kobj.name); diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c index ea88906d2cc5297d27d8f202abc505b6229326d1..cd7912e34dcd6e36e22bb69b8b8a69a2d90b5b6f 100644 --- a/drivers/scsi/raid_class.c +++ b/drivers/scsi/raid_class.c @@ -63,8 +63,7 @@ static int raid_match(struct attribute_container *cont, struct device *dev) * emulated RAID devices, so start with SCSI */ struct raid_internal *i = ac_to_raid_internal(cont); -#if defined(CONFIG_SCSI) || defined(CONFIG_SCSI_MODULE) - if (scsi_is_sdev_device(dev)) { + if (IS_ENABLED(CONFIG_SCSI) && scsi_is_sdev_device(dev)) { struct scsi_device *sdev = to_scsi_device(dev); if (i->f->cookie != sdev->host->hostt) @@ -72,7 +71,6 @@ static int raid_match(struct attribute_container *cont, struct device *dev) return i->f->is_raid(dev); } -#endif /* FIXME: look at other subsystems too */ return 0; } @@ -212,53 +210,6 @@ raid_attr_ro_state(level); raid_attr_ro_fn(resync); raid_attr_ro_state_fn(state); -static void raid_component_release(struct device *dev) -{ - struct raid_component *rc = - container_of(dev, struct raid_component, dev); - dev_printk(KERN_ERR, rc->dev.parent, "COMPONENT RELEASE\n"); - put_device(rc->dev.parent); - kfree(rc); -} - -int raid_component_add(struct raid_template *r,struct device *raid_dev, - struct device *component_dev) -{ - struct device *cdev = - attribute_container_find_class_device(&r->raid_attrs.ac, - raid_dev); - struct raid_component *rc; - struct raid_data *rd = dev_get_drvdata(cdev); - int err; - - rc = kzalloc(sizeof(*rc), GFP_KERNEL); - if (!rc) - return -ENOMEM; - - INIT_LIST_HEAD(&rc->node); - device_initialize(&rc->dev); - rc->dev.release = raid_component_release; - rc->dev.parent = get_device(component_dev); - rc->num = rd->component_count++; - - dev_set_name(&rc->dev, "component-%d", rc->num); - list_add_tail(&rc->node, &rd->component_list); - rc->dev.class = &raid_class.class; - err = device_add(&rc->dev); - if (err) - goto err_out; - - return 0; - -err_out: - list_del(&rc->node); - rd->component_count--; - put_device(component_dev); - kfree(rc); - return err; -} -EXPORT_SYMBOL(raid_component_add); - struct raid_template * raid_class_attach(struct raid_function_template *ft) { diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index fc1356d101b0a27de2a1240840336e723b5dcb45..6178b476bec6ea36ee8c29d9db81da472732191e 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -351,11 +351,18 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer, if (result) return -EIO; - /* Sanity check that we got the page back that we asked for */ + /* + * Sanity check that we got the page back that we asked for and that + * the page size is not 0. + */ if (buffer[1] != page) return -EIO; - return get_unaligned_be16(&buffer[2]) + 4; + result = get_unaligned_be16(&buffer[2]); + if (!result) + return -EIO; + + return result + 4; } /** @@ -550,11 +557,15 @@ EXPORT_SYMBOL(scsi_report_opcode); */ int scsi_device_get(struct scsi_device *sdev) { + struct module *module; + if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL) goto fail; if (!get_device(&sdev->sdev_gendev)) goto fail; - if (!try_module_get(sdev->host->hostt->module)) + + module = sdev->host->hostt->module; + if ((!module && !sdev->host->is_builtin) || !try_module_get(module)) goto fail_put_device; return 0; @@ -575,8 +586,10 @@ EXPORT_SYMBOL(scsi_device_get); */ void scsi_device_put(struct scsi_device *sdev) { - module_put(sdev->host->hostt->module); + struct module *mod = sdev->host->hostt->module; + put_device(&sdev->sdev_gendev); + module_put(mod); } EXPORT_SYMBOL(scsi_device_put); diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 60bcc6df97a9171babba8a9e506843411e0be254..e0279b0b5ec85eab85c5af27ddc4594680d207d0 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -62,7 +62,7 @@ /* make sure inq_product_rev string corresponds to this version */ #define SDEBUG_VERSION "0188" /* format to fit INQUIRY revision field */ -static const char *sdebug_version_date = "20180128"; +static const char *sdebug_version_date = "20190125"; #define MY_NAME "scsi_debug" @@ -735,7 +735,7 @@ static inline bool scsi_debug_lbp(void) (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10); } -static void *fake_store(unsigned long long lba) +static void *lba2fake_store(unsigned long long lba) { lba = do_div(lba, sdebug_store_sectors); @@ -836,7 +836,8 @@ static void mk_sense_invalid_opcode(struct scsi_cmnd *scp) mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0); } -static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg) +static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd, + void __user *arg) { if (sdebug_verbose) { if (0x1261 == cmd) @@ -1664,7 +1665,7 @@ static int resp_readcap16(struct scsi_cmnd *scp, { unsigned char *cmd = scp->cmnd; unsigned char arr[SDEBUG_READCAP16_ARR_SZ]; - int alloc_len; + u32 alloc_len; alloc_len = get_unaligned_be32(cmd + 10); /* following just in case virtual_gb changed */ @@ -1693,7 +1694,7 @@ static int resp_readcap16(struct scsi_cmnd *scp, } return fill_from_dev_buffer(scp, arr, - min(alloc_len, SDEBUG_READCAP16_ARR_SZ)); + min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ)); } #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412 @@ -1704,8 +1705,9 @@ static int resp_report_tgtpgs(struct scsi_cmnd *scp, unsigned char *cmd = scp->cmnd; unsigned char *arr; int host_no = devip->sdbg_host->shost->host_no; - int n, ret, alen, rlen; int port_group_a, port_group_b, port_a, port_b; + u32 alen, n, rlen; + int ret; alen = get_unaligned_be32(cmd + 6); arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC); @@ -1769,7 +1771,7 @@ static int resp_report_tgtpgs(struct scsi_cmnd *scp, */ rlen = min(alen,n); ret = fill_from_dev_buffer(scp, arr, - min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ)); + min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ)); kfree(arr); return ret; } @@ -2300,11 +2302,11 @@ static int resp_mode_select(struct scsi_cmnd *scp, __func__, param_len, res); md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2); bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6); - if (md_len > 2) { + off = bd_len + (mselect6 ? 4 : 8); + if (md_len > 2 || off >= res) { mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1); return check_condition_result; } - off = bd_len + (mselect6 ? 4 : 8); mpage = arr[off] & 0x3f; ps = !!(arr[off] & 0x80); if (ps) { @@ -2514,8 +2516,8 @@ static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba, return ret; } -/* If fake_store(lba,num) compares equal to arr(num), then copy top half of - * arr into fake_store(lba,num) and return true. If comparison fails then +/* If lba2fake_store(lba,num) compares equal to arr(num), then copy top half of + * arr into lba2fake_store(lba,num) and return true. If comparison fails then * return false. */ static bool comp_write_worker(u64 lba, u32 num, const u8 *arr) { @@ -2643,7 +2645,7 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec, if (sdt->app_tag == cpu_to_be16(0xffff)) continue; - ret = dif_verify(sdt, fake_store(sector), sector, ei_lba); + ret = dif_verify(sdt, lba2fake_store(sector), sector, ei_lba); if (ret) { dif_errors++; return ret; @@ -3261,10 +3263,12 @@ static int resp_write_scat(struct scsi_cmnd *scp, static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, u32 ei_lba, bool unmap, bool ndob) { + int ret; unsigned long iflags; unsigned long long i; - int ret; - u64 lba_off; + u32 lb_size = sdebug_sector_size; + u64 block, lbaa; + u8 *fs1p; ret = check_device_access_params(scp, lba, num); if (ret) @@ -3276,31 +3280,30 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, unmap_region(lba, num); goto out; } - - lba_off = lba * sdebug_sector_size; + lbaa = lba; + block = do_div(lbaa, sdebug_store_sectors); /* if ndob then zero 1 logical block, else fetch 1 logical block */ + fs1p = fake_storep + (block * lb_size); if (ndob) { - memset(fake_storep + lba_off, 0, sdebug_sector_size); + memset(fs1p, 0, lb_size); ret = 0; } else - ret = fetch_to_dev_buffer(scp, fake_storep + lba_off, - sdebug_sector_size); + ret = fetch_to_dev_buffer(scp, fs1p, lb_size); if (-1 == ret) { write_unlock_irqrestore(&atomic_rw, iflags); return DID_ERROR << 16; - } else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size)) + } else if (sdebug_verbose && !ndob && (ret < lb_size)) sdev_printk(KERN_INFO, scp->device, "%s: %s: lb size=%u, IO sent=%d bytes\n", - my_name, "write same", - sdebug_sector_size, ret); + my_name, "write same", lb_size, ret); /* Copy first sector to remaining blocks */ - for (i = 1 ; i < num ; i++) - memcpy(fake_storep + ((lba + i) * sdebug_sector_size), - fake_storep + lba_off, - sdebug_sector_size); - + for (i = 1 ; i < num ; i++) { + lbaa = lba + i; + block = do_div(lbaa, sdebug_store_sectors); + memmove(fake_storep + (block * lb_size), fs1p, lb_size); + } if (scsi_debug_lbp()) map_region(lba, num); out: @@ -5350,6 +5353,11 @@ static int __init scsi_debug_init(void) return -EINVAL; } + if (sdebug_num_tgts < 0) { + pr_err("num_tgts must be >= 0\n"); + return -EINVAL; + } + if (sdebug_guard > 1) { pr_err("guard must be 0 or 1\n"); return -EINVAL; diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index c4cbfd07b9167f0e29b635b9b24e65a6df3826d9..a08ff3bd63105141840e774fc0af3081aa78178a 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -238,6 +238,7 @@ static struct { {"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, + {"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36}, {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN}, {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */ diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c index 5a58cbf3a75da9123899ce668934e002933d1416..c14006ac98f91c6bb3c7d360b7bd78720d19df84 100644 --- a/drivers/scsi/scsi_dh.c +++ b/drivers/scsi/scsi_dh.c @@ -75,6 +75,7 @@ static const struct scsi_dh_blist scsi_dh_blist[] = { {"NETAPP", "INF-01-00", "rdac", }, {"LSI", "INF-01-00", "rdac", }, {"ENGENIO", "INF-01-00", "rdac", }, + {"LENOVO", "DE_Series", "rdac", }, {NULL, NULL, NULL }, }; diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index b7a8fdfeb2f47babeb014a55c9357b8b4f87fb34..698bf495b5356b4bbb10be392db632192736860a 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -88,6 +88,13 @@ void scsi_schedule_eh(struct Scsi_Host *shost) if (scsi_host_set_state(shost, SHOST_RECOVERY) == 0 || scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY) == 0) { + /* + * We have to order shost_state store above and test of + * the host_busy(scsi_eh_wakeup will test it), because + * scsi_dec_host_busy accesses these variables without + * host_lock. + */ + smp_mb__before_atomic(); shost->host_eh_scheduled++; scsi_eh_wakeup(shost); } @@ -338,9 +345,6 @@ int scsi_block_when_processing_errors(struct scsi_device *sdev) online = scsi_device_online(sdev); - SCSI_LOG_ERROR_RECOVERY(5, sdev_printk(KERN_INFO, sdev, - "%s: rtn: %d\n", __func__, online)); - return online; } EXPORT_SYMBOL(scsi_block_when_processing_errors); @@ -970,6 +974,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses, ses->sdb = scmd->sdb; ses->next_rq = scmd->request->next_rq; ses->result = scmd->result; + ses->resid_len = scmd->req.resid_len; ses->underflow = scmd->underflow; ses->prot_op = scmd->prot_op; ses->eh_eflags = scmd->eh_eflags; @@ -981,6 +986,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses, memset(&scmd->sdb, 0, sizeof(scmd->sdb)); scmd->request->next_rq = NULL; scmd->result = 0; + scmd->req.resid_len = 0; if (sense_bytes) { scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE, @@ -1034,6 +1040,7 @@ void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses) scmd->sdb = ses->sdb; scmd->request->next_rq = ses->next_rq; scmd->result = ses->result; + scmd->req.resid_len = ses->resid_len; scmd->underflow = ses->underflow; scmd->prot_op = ses->prot_op; scmd->eh_eflags = ses->eh_eflags; diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c index cc30fccc1a2ec6a49cdcd9942e6eb15d6d7cf132..840d96fe81bc15fb67efd9bc90335b6e16aa524c 100644 --- a/drivers/scsi/scsi_ioctl.c +++ b/drivers/scsi/scsi_ioctl.c @@ -221,7 +221,7 @@ int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) switch (cmd) { case SCSI_IOCTL_GET_IDLUN: - if (!access_ok(VERIFY_WRITE, arg, sizeof(struct scsi_idlun))) + if (!access_ok(arg, sizeof(struct scsi_idlun))) return -EFAULT; __put_user((sdev->id & 0xff) diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index eb97d2dd36516d0a3a5c0a9db1f255aaf5a9c56f..15fd51cc33640aa452c456ca6162313f06bf676b 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -71,11 +71,11 @@ int scsi_init_sense_cache(struct Scsi_Host *shost) struct kmem_cache *cache; int ret = 0; + mutex_lock(&scsi_sense_cache_mutex); cache = scsi_select_sense_cache(shost->unchecked_isa_dma); if (cache) - return 0; + goto exit; - mutex_lock(&scsi_sense_cache_mutex); if (shost->unchecked_isa_dma) { scsi_sense_isadma_cache = kmem_cache_create("scsi_sense_cache(DMA)", @@ -91,7 +91,7 @@ int scsi_init_sense_cache(struct Scsi_Host *shost) if (!scsi_sense_cache) ret = -ENOMEM; } - + exit: mutex_unlock(&scsi_sense_cache_mutex); return ret; } @@ -208,7 +208,13 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy) } spin_lock_irqsave(q->queue_lock, flags); blk_requeue_request(q, cmd->request); - kblockd_schedule_work(&device->requeue_work); + /* + * need to get q_usage_counter which will be + * put in scsi_requeue_run_queue. + */ + percpu_ref_get(&q->q_usage_counter); + if (!kblockd_schedule_work(&device->requeue_work)) + percpu_ref_put(&q->q_usage_counter); spin_unlock_irqrestore(q->queue_lock, flags); } @@ -265,6 +271,13 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, struct scsi_request *rq; int ret = DRIVER_ERROR << 24; + /* + * Zero-initialize sshdr for those callers that check the *sshdr + * contents even if no sense data is available. + */ + if (sshdr) + memset(sshdr, 0, sizeof(struct scsi_sense_hdr)); + req = blk_get_request(sdev->request_queue, data_direction == DMA_TO_DEVICE ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, BLK_MQ_REQ_PREEMPT); @@ -346,6 +359,11 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost) rcu_read_lock(); atomic_dec(&shost->host_busy); + /* + * We have to order host_busy dec above and test of the shost_state + * below outside the host_lock. + */ + smp_mb__after_atomic(); if (unlikely(scsi_host_in_recovery(shost))) { spin_lock_irqsave(shost->host_lock, flags); if (shost->host_failed || shost->host_eh_scheduled) @@ -548,6 +566,11 @@ void scsi_requeue_run_queue(struct work_struct *work) sdev = container_of(work, struct scsi_device, requeue_work); q = sdev->request_queue; scsi_run_queue(q); + /* + * need to put q_usage_counter which is got in + * scsi_end_request or __scsi_queue_insert. + */ + percpu_ref_put(&q->q_usage_counter); } /* @@ -662,6 +685,37 @@ static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd) cmd->request->next_rq->special = NULL; } +static void scsi_run_queue_async(struct scsi_device *sdev) +{ + struct request_queue *q = sdev->request_queue; + + percpu_ref_get(&q->q_usage_counter); + if (scsi_target(sdev)->single_lun || + !list_empty(&sdev->host->starved_list)) { + if (!kblockd_schedule_work(&sdev->requeue_work)) + percpu_ref_put(&q->q_usage_counter); + } else { + /* + * smp_mb() present in sbitmap_queue_clear() or implied in + * .end_io is for ordering writing .device_busy in + * scsi_device_unbusy() and reading sdev->restarts. + */ + int old = atomic_read(&sdev->restarts); + + /* + * ->restarts has to be kept as non-zero if new budget + * contention occurs. + * + * No need to run queue when either another re-run + * queue wins in updating ->restarts or a new budget + * contention occurs. + */ + if (old && atomic_cmpxchg(&sdev->restarts, old, 0) == old) + blk_mq_run_hw_queues(sdev->request_queue, true); + percpu_ref_put(&q->q_usage_counter); + } +} + /* Returns false when no more bytes to process, true if there are more */ static bool scsi_end_request(struct request *req, blk_status_t error, unsigned int bytes, unsigned int bidi_bytes) @@ -684,9 +738,16 @@ static bool scsi_end_request(struct request *req, blk_status_t error, if (!blk_rq_is_scsi(req)) { WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED)); cmd->flags &= ~SCMD_INITIALIZED; - destroy_rcu_head(&cmd->rcu); } + /* + * Calling rcu_barrier() is not necessary here because the + * SCSI error handler guarantees that the function called by + * call_rcu() has been called before scsi_end_request() is + * called. + */ + destroy_rcu_head(&cmd->rcu); + if (req->mq_ctx) { /* * In the MQ case the command gets freed by __blk_mq_end_request, @@ -697,13 +758,17 @@ static bool scsi_end_request(struct request *req, blk_status_t error, */ scsi_mq_uninit_cmd(cmd); + /* + * queue is still alive, so grab the ref for preventing it + * from being cleaned up during running queue. + */ + percpu_ref_get(&q->q_usage_counter); + __blk_mq_end_request(req, error); - if (scsi_target(sdev)->single_lun || - !list_empty(&sdev->host->starved_list)) - kblockd_schedule_work(&sdev->requeue_work); - else - blk_mq_run_hw_queues(q, true); + scsi_run_queue_async(sdev); + + percpu_ref_put(&q->q_usage_counter); } else { unsigned long flags; @@ -749,6 +814,7 @@ static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result) set_host_byte(cmd, DID_OK); return BLK_STS_TARGET; case DID_NEXUS_FAILURE: + set_host_byte(cmd, DID_OK); return BLK_STS_NEXUS; case DID_ALLOC_FAILURE: set_host_byte(cmd, DID_OK); @@ -863,6 +929,7 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result) case 0x07: /* operation in progress */ case 0x08: /* Long write in progress */ case 0x09: /* self test in progress */ + case 0x11: /* notify (enable spinup) required */ case 0x14: /* space allocation in progress */ case 0x1a: /* start stop unit in progress */ case 0x1b: /* sanitize in progress */ @@ -1243,6 +1310,18 @@ static void scsi_initialize_rq(struct request *rq) cmd->retries = 0; } +/* + * Only called when the request isn't completed by SCSI, and not freed by + * SCSI + */ +static void scsi_cleanup_rq(struct request *rq) +{ + if (rq->rq_flags & RQF_DONTPREP) { + scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq)); + rq->rq_flags &= ~RQF_DONTPREP; + } +} + /* Add a command to the list used by the aacraid and dpt_i2o drivers */ void scsi_add_cmd_to_list(struct scsi_cmnd *cmd) { @@ -1385,8 +1464,11 @@ scsi_prep_state_check(struct scsi_device *sdev, struct request *req) * commands. The device must be brought online * before trying any recovery commands. */ - sdev_printk(KERN_ERR, sdev, - "rejecting I/O to offline device\n"); + if (!sdev->offline_already) { + sdev->offline_already = 1; + sdev_printk(KERN_ERR, sdev, + "rejecting I/O to offline device\n"); + } ret = BLKPREP_KILL; break; case SDEV_DEL: @@ -2071,7 +2153,23 @@ static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx) out_put_device: put_device(&sdev->sdev_gendev); + atomic_inc(&sdev->restarts); + + /* + * Orders atomic_inc(&sdev->restarts) and atomic_read(&sdev->device_busy). + * .restarts must be incremented before .device_busy is read because the + * code in scsi_run_queue_async() depends on the order of these operations. + */ + smp_mb__after_atomic(); out: + /* + * If all in-flight requests originated from this LUN are completed + * before reading .device_busy, sdev->device_busy will be observed as + * zero, then blk_mq_delay_run_hw_queues() will dispatch this request + * soon. Otherwise, completion of one of these requests will observe + * the .restarts flag, and the request queue will be run for handling + * this request, see scsi_end_request(). + */ if (atomic_read(&sdev->device_busy) == 0 && !scsi_device_blocked(sdev)) blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY); return false; @@ -2135,18 +2233,22 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, case BLK_STS_OK: break; case BLK_STS_RESOURCE: - if (atomic_read(&sdev->device_busy) || - scsi_device_blocked(sdev)) + if (scsi_device_blocked(sdev)) ret = BLK_STS_DEV_RESOURCE; break; default: + if (unlikely(!scsi_device_online(sdev))) + scsi_req(req)->result = DID_NO_CONNECT << 16; + else + scsi_req(req)->result = DID_ERROR << 16; /* - * Make sure to release all allocated ressources when + * Make sure to release all allocated resources when * we hit an error, as we will never see this command * again. */ if (req->rq_flags & RQF_DONTPREP) scsi_mq_uninit_cmd(cmd); + scsi_run_queue_async(sdev); break; } return ret; @@ -2326,6 +2428,7 @@ static const struct blk_mq_ops scsi_mq_ops = { .init_request = scsi_mq_init_request, .exit_request = scsi_mq_exit_request, .initialize_rq_fn = scsi_initialize_rq, + .cleanup_rq = scsi_cleanup_rq, .map_queues = scsi_map_queues, }; @@ -2345,7 +2448,8 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost) { unsigned int cmd_size, sgl_size; - sgl_size = scsi_mq_sgl_size(shost); + sgl_size = max_t(unsigned int, sizeof(struct scatterlist), + scsi_mq_sgl_size(shost)); cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size; if (scsi_host_get_prot(shost)) cmd_size += sizeof(struct scsi_data_buffer) + sgl_size; @@ -2541,7 +2645,7 @@ EXPORT_SYMBOL_GPL(scsi_mode_select); /** * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary. * @sdev: SCSI device to be queried - * @dbd: set if mode sense will allow block descriptors to be returned + * @dbd: set to prevent mode sense from returning block descriptors * @modepage: mode page being requested * @buffer: request buffer (may not be smaller than eight bytes) * @len: length of request buffer. @@ -2551,9 +2655,7 @@ EXPORT_SYMBOL_GPL(scsi_mode_select); * @sshdr: place to put sense data (or NULL if no sense to be collected). * must be SCSI_SENSE_BUFFERSIZE big. * - * Returns zero if unsuccessful, or the header offset (either 4 - * or 8 depending on whether a six or ten byte command was - * issued) if successful. + * Returns zero if successful, or a negative error number on failure */ int scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, @@ -2576,18 +2678,18 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, sshdr = &my_sshdr; retry: - use_10_for_ms = sdev->use_10_for_ms; + use_10_for_ms = sdev->use_10_for_ms || len > 255; if (use_10_for_ms) { - if (len < 8) - len = 8; + if (len < 8 || len > 65535) + return -EINVAL; cmd[0] = MODE_SENSE_10; - cmd[8] = len; + put_unaligned_be16(len, &cmd[7]); header_length = 8; } else { if (len < 4) - len = 4; + return -EINVAL; cmd[0] = MODE_SENSE; cmd[4] = len; @@ -2598,58 +2700,66 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, sshdr, timeout, retries, NULL); + if (result < 0) + return result; /* This code looks awful: what it's doing is making sure an * ILLEGAL REQUEST sense return identifies the actual command * byte as the problem. MODE_SENSE commands can return * ILLEGAL REQUEST if the code page isn't supported */ - if (use_10_for_ms && !scsi_status_is_good(result) && - driver_byte(result) == DRIVER_SENSE) { - if (scsi_sense_valid(sshdr)) { + if (!scsi_status_is_good(result)) { + if (driver_byte(result) == DRIVER_SENSE && + scsi_sense_valid(sshdr)) { if ((sshdr->sense_key == ILLEGAL_REQUEST) && (sshdr->asc == 0x20) && (sshdr->ascq == 0)) { - /* - * Invalid command operation code + /* + * Invalid command operation code: retry using + * MODE SENSE(6) if this was a MODE SENSE(10) + * request, except if the request mode page is + * too large for MODE SENSE single byte + * allocation length field. */ - sdev->use_10_for_ms = 0; + if (use_10_for_ms) { + if (len > 255) + return -EIO; + sdev->use_10_for_ms = 0; + goto retry; + } + } + if ((status_byte(result) == CHECK_CONDITION) && + sshdr->sense_key == UNIT_ATTENTION && + retry_count) { + retry_count--; goto retry; } } + return -EIO; + } + if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b && + (modepage == 6 || modepage == 8))) { + /* Initio breakage? */ + header_length = 0; + data->length = 13; + data->medium_type = 0; + data->device_specific = 0; + data->longlba = 0; + data->block_descriptor_length = 0; + } else if (use_10_for_ms) { + data->length = get_unaligned_be16(&buffer[0]) + 2; + data->medium_type = buffer[2]; + data->device_specific = buffer[3]; + data->longlba = buffer[4] & 0x01; + data->block_descriptor_length = get_unaligned_be16(&buffer[6]); + } else { + data->length = buffer[0] + 1; + data->medium_type = buffer[1]; + data->device_specific = buffer[2]; + data->block_descriptor_length = buffer[3]; } + data->header_length = header_length; - if(scsi_status_is_good(result)) { - if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b && - (modepage == 6 || modepage == 8))) { - /* Initio breakage? */ - header_length = 0; - data->length = 13; - data->medium_type = 0; - data->device_specific = 0; - data->longlba = 0; - data->block_descriptor_length = 0; - } else if(use_10_for_ms) { - data->length = buffer[0]*256 + buffer[1] + 2; - data->medium_type = buffer[2]; - data->device_specific = buffer[3]; - data->longlba = buffer[4] & 0x01; - data->block_descriptor_length = buffer[6]*256 - + buffer[7]; - } else { - data->length = buffer[0] + 1; - data->medium_type = buffer[1]; - data->device_specific = buffer[2]; - data->block_descriptor_length = buffer[3]; - } - data->header_length = header_length; - } else if ((status_byte(result) == CHECK_CONDITION) && - scsi_sense_valid(sshdr) && - sshdr->sense_key == UNIT_ATTENTION && retry_count) { - retry_count--; - goto retry; - } - - return result; + return 0; } EXPORT_SYMBOL(scsi_mode_sense); @@ -2753,6 +2863,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) switch (oldstate) { case SDEV_RUNNING: case SDEV_CREATED_BLOCK: + case SDEV_OFFLINE: break; default: goto illegal; @@ -2797,6 +2908,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) break; } + sdev->offline_already = 0; sdev->sdev_state = state; return 0; @@ -3046,11 +3158,14 @@ scsi_device_quiesce(struct scsi_device *sdev) */ WARN_ON_ONCE(sdev->quiesced_by && sdev->quiesced_by != current); - blk_set_preempt_only(q); + if (sdev->quiesced_by == current) + return 0; + + blk_set_pm_only(q); blk_mq_freeze_queue(q); /* - * Ensure that the effect of blk_set_preempt_only() will be visible + * Ensure that the effect of blk_set_pm_only() will be visible * for percpu_ref_tryget() callers that occur after the queue * unfreeze even if the queue was already frozen before this function * was called. See also https://lwn.net/Articles/573497/. @@ -3063,7 +3178,7 @@ scsi_device_quiesce(struct scsi_device *sdev) if (err == 0) sdev->quiesced_by = current; else - blk_clear_preempt_only(q); + blk_clear_pm_only(q); mutex_unlock(&sdev->state_mutex); return err; @@ -3086,9 +3201,10 @@ void scsi_device_resume(struct scsi_device *sdev) * device deleted during suspend) */ mutex_lock(&sdev->state_mutex); - WARN_ON_ONCE(!sdev->quiesced_by); - sdev->quiesced_by = NULL; - blk_clear_preempt_only(sdev->request_queue); + if (sdev->quiesced_by) { + sdev->quiesced_by = NULL; + blk_clear_pm_only(sdev->request_queue); + } if (sdev->sdev_state == SDEV_QUIESCE) scsi_device_set_state(sdev, SDEV_RUNNING); mutex_unlock(&sdev->state_mutex); @@ -3253,6 +3369,7 @@ int scsi_internal_device_unblock_nowait(struct scsi_device *sdev, break; case SDEV_CANCEL: case SDEV_OFFLINE: + case SDEV_RUNNING: break; default: return -EINVAL; @@ -3415,6 +3532,78 @@ void sdev_enable_disk_events(struct scsi_device *sdev) } EXPORT_SYMBOL(sdev_enable_disk_events); +static unsigned char designator_prio(const unsigned char *d) +{ + if (d[1] & 0x30) + /* not associated with LUN */ + return 0; + + if (d[3] == 0) + /* invalid length */ + return 0; + + /* + * Order of preference for lun descriptor: + * - SCSI name string + * - NAA IEEE Registered Extended + * - EUI-64 based 16-byte + * - EUI-64 based 12-byte + * - NAA IEEE Registered + * - NAA IEEE Extended + * - EUI-64 based 8-byte + * - SCSI name string (truncated) + * - T10 Vendor ID + * as longer descriptors reduce the likelyhood + * of identification clashes. + */ + + switch (d[1] & 0xf) { + case 8: + /* SCSI name string, variable-length UTF-8 */ + return 9; + case 3: + switch (d[4] >> 4) { + case 6: + /* NAA registered extended */ + return 8; + case 5: + /* NAA registered */ + return 5; + case 4: + /* NAA extended */ + return 4; + case 3: + /* NAA locally assigned */ + return 1; + default: + break; + } + break; + case 2: + switch (d[3]) { + case 16: + /* EUI64-based, 16 byte */ + return 7; + case 12: + /* EUI64-based, 12 byte */ + return 6; + case 8: + /* EUI64-based, 8 byte */ + return 3; + default: + break; + } + break; + case 1: + /* T10 vendor ID */ + return 1; + default: + break; + } + + return 0; +} + /** * scsi_vpd_lun_id - return a unique device identification * @sdev: SCSI device @@ -3431,7 +3620,7 @@ EXPORT_SYMBOL(sdev_enable_disk_events); */ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) { - u8 cur_id_type = 0xff; + u8 cur_id_prio = 0; u8 cur_id_size = 0; const unsigned char *d, *cur_id_str; const struct scsi_vpd *vpd_pg83; @@ -3444,20 +3633,6 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) return -ENXIO; } - /* - * Look for the correct descriptor. - * Order of preference for lun descriptor: - * - SCSI name string - * - NAA IEEE Registered Extended - * - EUI-64 based 16-byte - * - EUI-64 based 12-byte - * - NAA IEEE Registered - * - NAA IEEE Extended - * - T10 Vendor ID - * as longer descriptors reduce the likelyhood - * of identification clashes. - */ - /* The id string must be at least 20 bytes + terminating NULL byte */ if (id_len < 21) { rcu_read_unlock(); @@ -3467,8 +3642,9 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) memset(id, 0, id_len); d = vpd_pg83->data + 4; while (d < vpd_pg83->data + vpd_pg83->len) { - /* Skip designators not referring to the LUN */ - if ((d[1] & 0x30) != 0x00) + u8 prio = designator_prio(d); + + if (prio == 0 || cur_id_prio > prio) goto next_desig; switch (d[1] & 0xf) { @@ -3476,28 +3652,19 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) /* T10 Vendor ID */ if (cur_id_size > d[3]) break; - /* Prefer anything */ - if (cur_id_type > 0x01 && cur_id_type != 0xff) - break; + cur_id_prio = prio; cur_id_size = d[3]; if (cur_id_size + 4 > id_len) cur_id_size = id_len - 4; cur_id_str = d + 4; - cur_id_type = d[1] & 0xf; id_size = snprintf(id, id_len, "t10.%*pE", cur_id_size, cur_id_str); break; case 0x2: /* EUI-64 */ - if (cur_id_size > d[3]) - break; - /* Prefer NAA IEEE Registered Extended */ - if (cur_id_type == 0x3 && - cur_id_size == d[3]) - break; + cur_id_prio = prio; cur_id_size = d[3]; cur_id_str = d + 4; - cur_id_type = d[1] & 0xf; switch (cur_id_size) { case 8: id_size = snprintf(id, id_len, @@ -3515,17 +3682,14 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) cur_id_str); break; default: - cur_id_size = 0; break; } break; case 0x3: /* NAA */ - if (cur_id_size > d[3]) - break; + cur_id_prio = prio; cur_id_size = d[3]; cur_id_str = d + 4; - cur_id_type = d[1] & 0xf; switch (cur_id_size) { case 8: id_size = snprintf(id, id_len, @@ -3538,26 +3702,25 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) cur_id_str); break; default: - cur_id_size = 0; break; } break; case 0x8: /* SCSI name string */ - if (cur_id_size + 4 > d[3]) + if (cur_id_size > d[3]) break; /* Prefer others for truncated descriptor */ - if (cur_id_size && d[3] > id_len) - break; + if (d[3] > id_len) { + prio = 2; + if (cur_id_prio > prio) + break; + } + cur_id_prio = prio; cur_id_size = id_size = d[3]; cur_id_str = d + 4; - cur_id_type = d[1] & 0xf; if (cur_id_size >= id_len) cur_id_size = id_len - 1; memcpy(id, cur_id_str, cur_id_size); - /* Decrease priority for truncated descriptor */ - if (cur_id_size != id_size) - cur_id_size = 6; break; default: break; diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c index bd70339c1242eb1f2515a7a947b007472823a3c8..03d9855a6afd71ef5893b37623dc27c39db1cdbc 100644 --- a/drivers/scsi/scsi_logging.c +++ b/drivers/scsi/scsi_logging.c @@ -16,57 +16,15 @@ #include #include -#define SCSI_LOG_SPOOLSIZE 4096 - -#if (SCSI_LOG_SPOOLSIZE / SCSI_LOG_BUFSIZE) > BITS_PER_LONG -#warning SCSI logging bitmask too large -#endif - -struct scsi_log_buf { - char buffer[SCSI_LOG_SPOOLSIZE]; - unsigned long map; -}; - -static DEFINE_PER_CPU(struct scsi_log_buf, scsi_format_log); - static char *scsi_log_reserve_buffer(size_t *len) { - struct scsi_log_buf *buf; - unsigned long map_bits = sizeof(buf->buffer) / SCSI_LOG_BUFSIZE; - unsigned long idx = 0; - - preempt_disable(); - buf = this_cpu_ptr(&scsi_format_log); - idx = find_first_zero_bit(&buf->map, map_bits); - if (likely(idx < map_bits)) { - while (test_and_set_bit(idx, &buf->map)) { - idx = find_next_zero_bit(&buf->map, map_bits, idx); - if (idx >= map_bits) - break; - } - } - if (WARN_ON(idx >= map_bits)) { - preempt_enable(); - return NULL; - } - *len = SCSI_LOG_BUFSIZE; - return buf->buffer + idx * SCSI_LOG_BUFSIZE; + *len = 128; + return kmalloc(*len, GFP_ATOMIC); } static void scsi_log_release_buffer(char *bufptr) { - struct scsi_log_buf *buf; - unsigned long idx; - int ret; - - buf = this_cpu_ptr(&scsi_format_log); - if (bufptr >= buf->buffer && - bufptr < buf->buffer + SCSI_LOG_SPOOLSIZE) { - idx = (bufptr - buf->buffer) / SCSI_LOG_BUFSIZE; - ret = test_and_clear_bit(idx, &buf->map); - WARN_ON(!ret); - } - preempt_enable(); + kfree(bufptr); } static inline const char *scmd_name(const struct scsi_cmnd *scmd) diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c index b44c1bb687a2e12557fc9acfdcd833cea89b0962..ebc193f7f7ddbc626e98e36f9fe622211a52be25 100644 --- a/drivers/scsi/scsi_pm.c +++ b/drivers/scsi/scsi_pm.c @@ -79,8 +79,22 @@ static int scsi_dev_type_resume(struct device *dev, if (err == 0) { pm_runtime_disable(dev); - pm_runtime_set_active(dev); + err = pm_runtime_set_active(dev); pm_runtime_enable(dev); + + /* + * Forcibly set runtime PM status of request queue to "active" + * to make sure we can again get requests from the queue + * (see also blk_pm_peek_request()). + * + * The resume hook will correct runtime PM status of the disk. + */ + if (!err && scsi_is_sdev_device(dev)) { + struct scsi_device *sdev = to_scsi_device(dev); + + if (sdev->request_queue->dev) + blk_set_runtime_active(sdev->request_queue); + } } return err; @@ -139,16 +153,6 @@ static int scsi_bus_resume_common(struct device *dev, else fn = NULL; - /* - * Forcibly set runtime PM status of request queue to "active" to - * make sure we can again get requests from the queue (see also - * blk_pm_peek_request()). - * - * The resume hook will correct runtime PM status of the disk. - */ - if (scsi_is_sdev_device(dev) && pm_runtime_suspended(dev)) - blk_set_runtime_active(to_scsi_device(dev)->request_queue); - if (fn) { async_schedule_domain(fn, dev, &scsi_sd_pm_domain); diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c index 7f0ceb65c3f39dcc89ac21a10f36de6391b5bcee..99f472bb9f7e5f43eb84856a38bc9f9cd2329372 100644 --- a/drivers/scsi/scsi_proc.c +++ b/drivers/scsi/scsi_proc.c @@ -311,7 +311,7 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf, size_t length, loff_t *ppos) { int host, channel, id, lun; - char *buffer, *p; + char *buffer, *end, *p; int err; if (!buf || length > PAGE_SIZE) @@ -326,10 +326,14 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf, goto out; err = -EINVAL; - if (length < PAGE_SIZE) - buffer[length] = '\0'; - else if (buffer[PAGE_SIZE-1]) - goto out; + if (length < PAGE_SIZE) { + end = buffer + length; + *end = '\0'; + } else { + end = buffer + PAGE_SIZE - 1; + if (*end) + goto out; + } /* * Usage: echo "scsi add-single-device 0 1 2 3" >/proc/scsi/scsi @@ -338,10 +342,10 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf, if (!strncmp("scsi add-single-device", buffer, 22)) { p = buffer + 23; - host = simple_strtoul(p, &p, 0); - channel = simple_strtoul(p + 1, &p, 0); - id = simple_strtoul(p + 1, &p, 0); - lun = simple_strtoul(p + 1, &p, 0); + host = (p < end) ? simple_strtoul(p, &p, 0) : 0; + channel = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; + id = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; + lun = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; err = scsi_add_single_device(host, channel, id, lun); @@ -352,10 +356,10 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf, } else if (!strncmp("scsi remove-single-device", buffer, 25)) { p = buffer + 26; - host = simple_strtoul(p, &p, 0); - channel = simple_strtoul(p + 1, &p, 0); - id = simple_strtoul(p + 1, &p, 0); - lun = simple_strtoul(p + 1, &p, 0); + host = (p < end) ? simple_strtoul(p, &p, 0) : 0; + channel = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; + id = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; + lun = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; err = scsi_remove_single_device(host, channel, id, lun); } diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 78ca63dfba4ac58af8cc7263c4fddf932a959291..af9ee2feb82df1c9050260473b9a9eea20a1ce2d 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -220,7 +220,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size, - GFP_ATOMIC); + GFP_KERNEL); if (!sdev) goto out; @@ -462,7 +462,8 @@ static struct scsi_target *scsi_alloc_target(struct device *parent, error = shost->hostt->target_alloc(starget); if(error) { - dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error); + if (error != -ENXIO) + dev_err(dev, "target allocation failed, error %d\n", error); /* don't want scsi_target_reap to do the final * put because it will be under the host lock */ scsi_target_destroy(starget); @@ -796,7 +797,7 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, */ sdev->inquiry = kmemdup(inq_result, max_t(size_t, sdev->inquiry_len, 36), - GFP_ATOMIC); + GFP_KERNEL); if (sdev->inquiry == NULL) return SCSI_SCAN_NO_RESPONSE; @@ -844,6 +845,10 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, *bflags |= BLIST_NOREPORTLUN; } + if (sdev->type == TYPE_ENCLOSURE) + set_bit(QUEUE_FLAG_FORECE_QUIESCE, + &sdev->request_queue->queue_flags); + /* * For a peripheral qualifier (PQ) value of 1 (001b), the SCSI * spec says: The device server is capable of supporting the @@ -1087,7 +1092,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget, if (!sdev) goto out; - result = kmalloc(result_len, GFP_ATOMIC | + result = kmalloc(result_len, GFP_KERNEL | ((shost->unchecked_isa_dma) ? __GFP_DMA : 0)); if (!result) goto out_free_sdev; @@ -1722,15 +1727,16 @@ static void scsi_sysfs_add_devices(struct Scsi_Host *shost) */ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost) { - struct async_scan_data *data; + struct async_scan_data *data = NULL; unsigned long flags; if (strncmp(scsi_scan_type, "sync", 4) == 0) return NULL; + mutex_lock(&shost->scan_mutex); if (shost->async_scan) { shost_printk(KERN_DEBUG, shost, "%s called twice\n", __func__); - return NULL; + goto err; } data = kmalloc(sizeof(*data), GFP_KERNEL); @@ -1741,7 +1747,6 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost) goto err; init_completion(&data->prev_finished); - mutex_lock(&shost->scan_mutex); spin_lock_irqsave(shost->host_lock, flags); shost->async_scan = 1; spin_unlock_irqrestore(shost->host_lock, flags); @@ -1756,6 +1761,7 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost) return data; err: + mutex_unlock(&shost->scan_mutex); kfree(data); return NULL; } diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 3aee9464a7bfab94a56d2db0463e3a7c40e0692a..18299693e4ffa18adeea557db474723564f5737f 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -431,9 +431,12 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work) struct list_head *this, *tmp; struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL; unsigned long flags; + struct module *mod; sdev = container_of(work, struct scsi_device, ew.work); + mod = sdev->host->hostt->module; + scsi_dh_release_device(sdev); parent = sdev->sdev_gendev.parent; @@ -474,11 +477,17 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work) if (parent) put_device(parent); + module_put(mod); } static void scsi_device_dev_release(struct device *dev) { struct scsi_device *sdp = to_scsi_device(dev); + + /* Set module pointer as NULL in case of module unloading */ + if (!try_module_get(sdp->host->hostt->module)) + sdp->host->hostt->module = NULL; + execute_in_process_context(scsi_device_dev_release_usercontext, &sdp->ew); } @@ -723,6 +732,14 @@ sdev_store_delete(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct kernfs_node *kn; + struct scsi_device *sdev = to_scsi_device(dev); + + /* + * We need to try to get module, avoiding the module been removed + * during delete. + */ + if (scsi_device_get(sdev)) + return -ENODEV; kn = sysfs_break_active_protection(&dev->kobj, &attr->attr); WARN_ON_ONCE(!kn); @@ -737,9 +754,10 @@ sdev_store_delete(struct device *dev, struct device_attribute *attr, * state into SDEV_DEL. */ device_remove_file(dev, attr); - scsi_remove_device(to_scsi_device(dev)); + scsi_remove_device(sdev); if (kn) sysfs_unbreak_active_protection(kn); + scsi_device_put(sdev); return count; }; static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete); @@ -765,6 +783,12 @@ store_state_field(struct device *dev, struct device_attribute *attr, mutex_lock(&sdev->state_mutex); ret = scsi_device_set_state(sdev, state); + /* If device use blk-mq, the device state changes to + * SDEV_RUNNING, we need to run hw queue to avoid io hung. + */ + if ((ret == 0) && (state == SDEV_RUNNING) && + (sdev->request_queue->mq_ops != NULL)) + blk_mq_run_hw_queues(sdev->request_queue, true); mutex_unlock(&sdev->state_mutex); return ret == 0 ? count : -EINVAL; @@ -1392,7 +1416,8 @@ void __scsi_remove_device(struct scsi_device *sdev) mutex_unlock(&sdev->state_mutex); blk_cleanup_queue(sdev->request_queue); - cancel_work_sync(&sdev->requeue_work); + if (cancel_work_sync(&sdev->requeue_work)) + percpu_ref_put(&sdev->request_queue->q_usage_counter); if (sdev->host->hostt->slave_destroy) sdev->host->hostt->slave_destroy(sdev); @@ -1422,6 +1447,40 @@ void scsi_remove_device(struct scsi_device *sdev) } EXPORT_SYMBOL(scsi_remove_device); +/* Cancel the inflight async probe for scsi_device */ +static void __scsi_kill_devices(struct scsi_target *starget) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct scsi_device *sdev, *to_put = NULL; + unsigned long flags; + + spin_lock_irqsave(shost->host_lock, flags); + list_for_each_entry(sdev, &shost->__devices, siblings) { + if (sdev->channel != starget->channel || + sdev->id != starget->id) + continue; + + if ((sdev->sdev_state != SDEV_DEL && + sdev->sdev_state != SDEV_CANCEL) || !sdev->is_visible) + continue; + if (!kobject_get_unless_zero(&sdev->sdev_gendev.kobj)) + continue; + spin_unlock_irqrestore(shost->host_lock, flags); + + if (to_put) + put_device(&to_put->sdev_gendev); + device_lock(&sdev->sdev_gendev); + kill_device(&sdev->sdev_gendev); + device_unlock(&sdev->sdev_gendev); + to_put = sdev; + + spin_lock_irqsave(shost->host_lock, flags); + } + spin_unlock_irqrestore(shost->host_lock, flags); + if (to_put) + put_device(&to_put->sdev_gendev); +} + static void __scsi_remove_target(struct scsi_target *starget) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); @@ -1451,6 +1510,8 @@ static void __scsi_remove_target(struct scsi_target *starget) goto restart; } spin_unlock_irqrestore(shost->host_lock, flags); + + __scsi_kill_devices(starget); } /** @@ -1475,7 +1536,16 @@ void scsi_remove_target(struct device *dev) starget->state == STARGET_CREATED_REMOVE) continue; if (starget->dev.parent == dev || &starget->dev == dev) { - kref_get(&starget->reap_ref); + /* + * If the reference count is already zero, skip + * this target. Calling kref_get_unless_zero() if + * the reference count is zero is safe because + * scsi_target_destroy() will wait until the host + * lock has been released before freeing starget. + */ + if (!kref_get_unless_zero(&starget->reap_ref)) + continue; + if (starget->state == STARGET_CREATED) starget->state = STARGET_CREATED_REMOVE; else diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c index 0ff083bbf5b1f00b6651fc1f256ab890d533dadb..22472d140ef7c6bce8955edbc37d1c9270eee10a 100644 --- a/drivers/scsi/scsi_trace.c +++ b/drivers/scsi/scsi_trace.c @@ -21,7 +21,7 @@ #include #define SERVICE_ACTION16(cdb) (cdb[1] & 0x1f) -#define SERVICE_ACTION32(cdb) ((cdb[8] << 8) | cdb[9]) +#define SERVICE_ACTION32(cdb) (get_unaligned_be16(&cdb[8])) static const char * scsi_trace_misc(struct trace_seq *, unsigned char *, int); @@ -30,15 +30,18 @@ static const char * scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = trace_seq_buffer_ptr(p); - sector_t lba = 0, txlen = 0; + u32 lba = 0, txlen; lba |= ((cdb[1] & 0x1F) << 16); lba |= (cdb[2] << 8); lba |= cdb[3]; - txlen = cdb[4]; + /* + * From SBC-2: a TRANSFER LENGTH field set to zero specifies that 256 + * logical blocks shall be read (READ(6)) or written (WRITE(6)). + */ + txlen = cdb[4] ? cdb[4] : 256; - trace_seq_printf(p, "lba=%llu txlen=%llu", - (unsigned long long)lba, (unsigned long long)txlen); + trace_seq_printf(p, "lba=%u txlen=%u", lba, txlen); trace_seq_putc(p, 0); return ret; @@ -48,17 +51,12 @@ static const char * scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = trace_seq_buffer_ptr(p); - sector_t lba = 0, txlen = 0; + u32 lba, txlen; - lba |= (cdb[2] << 24); - lba |= (cdb[3] << 16); - lba |= (cdb[4] << 8); - lba |= cdb[5]; - txlen |= (cdb[7] << 8); - txlen |= cdb[8]; + lba = get_unaligned_be32(&cdb[2]); + txlen = get_unaligned_be16(&cdb[7]); - trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u", - (unsigned long long)lba, (unsigned long long)txlen, + trace_seq_printf(p, "lba=%u txlen=%u protect=%u", lba, txlen, cdb[1] >> 5); if (cdb[0] == WRITE_SAME) @@ -73,19 +71,12 @@ static const char * scsi_trace_rw12(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = trace_seq_buffer_ptr(p); - sector_t lba = 0, txlen = 0; - - lba |= (cdb[2] << 24); - lba |= (cdb[3] << 16); - lba |= (cdb[4] << 8); - lba |= cdb[5]; - txlen |= (cdb[6] << 24); - txlen |= (cdb[7] << 16); - txlen |= (cdb[8] << 8); - txlen |= cdb[9]; - - trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u", - (unsigned long long)lba, (unsigned long long)txlen, + u32 lba, txlen; + + lba = get_unaligned_be32(&cdb[2]); + txlen = get_unaligned_be32(&cdb[6]); + + trace_seq_printf(p, "lba=%u txlen=%u protect=%u", lba, txlen, cdb[1] >> 5); trace_seq_putc(p, 0); @@ -96,23 +87,13 @@ static const char * scsi_trace_rw16(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = trace_seq_buffer_ptr(p); - sector_t lba = 0, txlen = 0; - - lba |= ((u64)cdb[2] << 56); - lba |= ((u64)cdb[3] << 48); - lba |= ((u64)cdb[4] << 40); - lba |= ((u64)cdb[5] << 32); - lba |= (cdb[6] << 24); - lba |= (cdb[7] << 16); - lba |= (cdb[8] << 8); - lba |= cdb[9]; - txlen |= (cdb[10] << 24); - txlen |= (cdb[11] << 16); - txlen |= (cdb[12] << 8); - txlen |= cdb[13]; - - trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u", - (unsigned long long)lba, (unsigned long long)txlen, + u64 lba; + u32 txlen; + + lba = get_unaligned_be64(&cdb[2]); + txlen = get_unaligned_be32(&cdb[10]); + + trace_seq_printf(p, "lba=%llu txlen=%u protect=%u", lba, txlen, cdb[1] >> 5); if (cdb[0] == WRITE_SAME_16) @@ -127,8 +108,8 @@ static const char * scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = trace_seq_buffer_ptr(p), *cmd; - sector_t lba = 0, txlen = 0; - u32 ei_lbrt = 0; + u64 lba; + u32 ei_lbrt, txlen; switch (SERVICE_ACTION32(cdb)) { case READ_32: @@ -148,26 +129,12 @@ scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len) goto out; } - lba |= ((u64)cdb[12] << 56); - lba |= ((u64)cdb[13] << 48); - lba |= ((u64)cdb[14] << 40); - lba |= ((u64)cdb[15] << 32); - lba |= (cdb[16] << 24); - lba |= (cdb[17] << 16); - lba |= (cdb[18] << 8); - lba |= cdb[19]; - ei_lbrt |= (cdb[20] << 24); - ei_lbrt |= (cdb[21] << 16); - ei_lbrt |= (cdb[22] << 8); - ei_lbrt |= cdb[23]; - txlen |= (cdb[28] << 24); - txlen |= (cdb[29] << 16); - txlen |= (cdb[30] << 8); - txlen |= cdb[31]; - - trace_seq_printf(p, "%s_32 lba=%llu txlen=%llu protect=%u ei_lbrt=%u", - cmd, (unsigned long long)lba, - (unsigned long long)txlen, cdb[10] >> 5, ei_lbrt); + lba = get_unaligned_be64(&cdb[12]); + ei_lbrt = get_unaligned_be32(&cdb[20]); + txlen = get_unaligned_be32(&cdb[28]); + + trace_seq_printf(p, "%s_32 lba=%llu txlen=%u protect=%u ei_lbrt=%u", + cmd, lba, txlen, cdb[10] >> 5, ei_lbrt); if (SERVICE_ACTION32(cdb) == WRITE_SAME_32) trace_seq_printf(p, " unmap=%u", cdb[10] >> 3 & 1); @@ -182,7 +149,7 @@ static const char * scsi_trace_unmap(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = trace_seq_buffer_ptr(p); - unsigned int regions = cdb[7] << 8 | cdb[8]; + unsigned int regions = get_unaligned_be16(&cdb[7]); trace_seq_printf(p, "regions=%u", (regions - 8) / 16); trace_seq_putc(p, 0); @@ -194,8 +161,8 @@ static const char * scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = trace_seq_buffer_ptr(p), *cmd; - sector_t lba = 0; - u32 alloc_len = 0; + u64 lba; + u32 alloc_len; switch (SERVICE_ACTION16(cdb)) { case SAI_READ_CAPACITY_16: @@ -209,21 +176,10 @@ scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len) goto out; } - lba |= ((u64)cdb[2] << 56); - lba |= ((u64)cdb[3] << 48); - lba |= ((u64)cdb[4] << 40); - lba |= ((u64)cdb[5] << 32); - lba |= (cdb[6] << 24); - lba |= (cdb[7] << 16); - lba |= (cdb[8] << 8); - lba |= cdb[9]; - alloc_len |= (cdb[10] << 24); - alloc_len |= (cdb[11] << 16); - alloc_len |= (cdb[12] << 8); - alloc_len |= cdb[13]; - - trace_seq_printf(p, "%s lba=%llu alloc_len=%u", cmd, - (unsigned long long)lba, alloc_len); + lba = get_unaligned_be64(&cdb[2]); + alloc_len = get_unaligned_be32(&cdb[10]); + + trace_seq_printf(p, "%s lba=%llu alloc_len=%u", cmd, lba, alloc_len); out: trace_seq_putc(p, 0); diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 6fd2fe210fc324b2bde0f1a72bd79f2939b3bca9..a8af1c6443690b8ca491651b50563025585b8855 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -37,6 +37,8 @@ #define ISCSI_TRANSPORT_VERSION "2.0-870" +#define ISCSI_SEND_MAX_ALLOWED 10 + static int dbg_session; module_param_named(debug_session, dbg_session, int, S_IRUGO | S_IWUSR); @@ -117,7 +119,11 @@ show_transport_handle(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_internal *priv = dev_to_iscsi_internal(dev); - return sprintf(buf, "%llu\n", (unsigned long long)iscsi_handle(priv->iscsi_transport)); + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + return sysfs_emit(buf, "%llu\n", + (unsigned long long)iscsi_handle(priv->iscsi_transport)); } static DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL); @@ -127,7 +133,7 @@ show_transport_##name(struct device *dev, \ struct device_attribute *attr,char *buf) \ { \ struct iscsi_internal *priv = dev_to_iscsi_internal(dev); \ - return sprintf(buf, format"\n", priv->iscsi_transport->name); \ + return sysfs_emit(buf, format"\n", priv->iscsi_transport->name);\ } \ static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL); @@ -168,7 +174,7 @@ static ssize_t show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev); - return sprintf(buf, "%llu\n", (unsigned long long) ep->id); + return sysfs_emit(buf, "%llu\n", (unsigned long long) ep->id); } static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL); @@ -421,40 +427,9 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj, struct device *dev = container_of(kobj, struct device, kobj); struct iscsi_iface *iface = iscsi_dev_to_iface(dev); struct iscsi_transport *t = iface->transport; - int param; - int param_type; + int param = -1; - if (attr == &dev_attr_iface_enabled.attr) - param = ISCSI_NET_PARAM_IFACE_ENABLE; - else if (attr == &dev_attr_iface_vlan_id.attr) - param = ISCSI_NET_PARAM_VLAN_ID; - else if (attr == &dev_attr_iface_vlan_priority.attr) - param = ISCSI_NET_PARAM_VLAN_PRIORITY; - else if (attr == &dev_attr_iface_vlan_enabled.attr) - param = ISCSI_NET_PARAM_VLAN_ENABLED; - else if (attr == &dev_attr_iface_mtu.attr) - param = ISCSI_NET_PARAM_MTU; - else if (attr == &dev_attr_iface_port.attr) - param = ISCSI_NET_PARAM_PORT; - else if (attr == &dev_attr_iface_ipaddress_state.attr) - param = ISCSI_NET_PARAM_IPADDR_STATE; - else if (attr == &dev_attr_iface_delayed_ack_en.attr) - param = ISCSI_NET_PARAM_DELAYED_ACK_EN; - else if (attr == &dev_attr_iface_tcp_nagle_disable.attr) - param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE; - else if (attr == &dev_attr_iface_tcp_wsf_disable.attr) - param = ISCSI_NET_PARAM_TCP_WSF_DISABLE; - else if (attr == &dev_attr_iface_tcp_wsf.attr) - param = ISCSI_NET_PARAM_TCP_WSF; - else if (attr == &dev_attr_iface_tcp_timer_scale.attr) - param = ISCSI_NET_PARAM_TCP_TIMER_SCALE; - else if (attr == &dev_attr_iface_tcp_timestamp_en.attr) - param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN; - else if (attr == &dev_attr_iface_cache_id.attr) - param = ISCSI_NET_PARAM_CACHE_ID; - else if (attr == &dev_attr_iface_redirect_en.attr) - param = ISCSI_NET_PARAM_REDIRECT_EN; - else if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr) + if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr) param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO; else if (attr == &dev_attr_iface_header_digest.attr) param = ISCSI_IFACE_PARAM_HDRDGST_EN; @@ -490,6 +465,40 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj, param = ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN; else if (attr == &dev_attr_iface_initiator_name.attr) param = ISCSI_IFACE_PARAM_INITIATOR_NAME; + + if (param != -1) + return t->attr_is_visible(ISCSI_IFACE_PARAM, param); + + if (attr == &dev_attr_iface_enabled.attr) + param = ISCSI_NET_PARAM_IFACE_ENABLE; + else if (attr == &dev_attr_iface_vlan_id.attr) + param = ISCSI_NET_PARAM_VLAN_ID; + else if (attr == &dev_attr_iface_vlan_priority.attr) + param = ISCSI_NET_PARAM_VLAN_PRIORITY; + else if (attr == &dev_attr_iface_vlan_enabled.attr) + param = ISCSI_NET_PARAM_VLAN_ENABLED; + else if (attr == &dev_attr_iface_mtu.attr) + param = ISCSI_NET_PARAM_MTU; + else if (attr == &dev_attr_iface_port.attr) + param = ISCSI_NET_PARAM_PORT; + else if (attr == &dev_attr_iface_ipaddress_state.attr) + param = ISCSI_NET_PARAM_IPADDR_STATE; + else if (attr == &dev_attr_iface_delayed_ack_en.attr) + param = ISCSI_NET_PARAM_DELAYED_ACK_EN; + else if (attr == &dev_attr_iface_tcp_nagle_disable.attr) + param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE; + else if (attr == &dev_attr_iface_tcp_wsf_disable.attr) + param = ISCSI_NET_PARAM_TCP_WSF_DISABLE; + else if (attr == &dev_attr_iface_tcp_wsf.attr) + param = ISCSI_NET_PARAM_TCP_WSF; + else if (attr == &dev_attr_iface_tcp_timer_scale.attr) + param = ISCSI_NET_PARAM_TCP_TIMER_SCALE; + else if (attr == &dev_attr_iface_tcp_timestamp_en.attr) + param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN; + else if (attr == &dev_attr_iface_cache_id.attr) + param = ISCSI_NET_PARAM_CACHE_ID; + else if (attr == &dev_attr_iface_redirect_en.attr) + param = ISCSI_NET_PARAM_REDIRECT_EN; else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { if (attr == &dev_attr_ipv4_iface_ipaddress.attr) param = ISCSI_NET_PARAM_IPV4_ADDR; @@ -580,32 +589,7 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj, return 0; } - switch (param) { - case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: - case ISCSI_IFACE_PARAM_HDRDGST_EN: - case ISCSI_IFACE_PARAM_DATADGST_EN: - case ISCSI_IFACE_PARAM_IMM_DATA_EN: - case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: - case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: - case ISCSI_IFACE_PARAM_PDU_INORDER_EN: - case ISCSI_IFACE_PARAM_ERL: - case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: - case ISCSI_IFACE_PARAM_FIRST_BURST: - case ISCSI_IFACE_PARAM_MAX_R2T: - case ISCSI_IFACE_PARAM_MAX_BURST: - case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: - case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: - case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: - case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: - case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: - case ISCSI_IFACE_PARAM_INITIATOR_NAME: - param_type = ISCSI_IFACE_PARAM; - break; - default: - param_type = ISCSI_NET_PARAM; - } - - return t->attr_is_visible(param_type, param); + return t->attr_is_visible(ISCSI_NET_PARAM, param); } static struct attribute *iscsi_iface_attrs[] = { @@ -1721,12 +1705,14 @@ EXPORT_SYMBOL_GPL(iscsi_is_session_online); static void iscsi_session_release(struct device *dev) { struct iscsi_cls_session *session = iscsi_dev_to_session(dev); + struct iscsi_cls_session_wrapper *session_wrapper = + iscsi_cls_session_to_wrapper(session); struct Scsi_Host *shost; shost = iscsi_session_to_shost(session); scsi_host_put(shost); ISCSI_DBG_TRANS_SESSION(session, "Completing session release\n"); - kfree(session); + kfree(session_wrapper); } int iscsi_is_session_dev(const struct device *dev) @@ -1908,12 +1894,12 @@ static void session_recovery_timedout(struct work_struct *work) } spin_unlock_irqrestore(&session->lock, flags); - if (session->transport->session_recovery_timedout) - session->transport->session_recovery_timedout(session); - ISCSI_DBG_TRANS_SESSION(session, "Unblocking SCSI target\n"); scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE); ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking SCSI target\n"); + + if (session->transport->session_recovery_timedout) + session->transport->session_recovery_timedout(session); } static void __iscsi_unblock_session(struct work_struct *work) @@ -2008,7 +1994,7 @@ static void __iscsi_unbind_session(struct work_struct *work) if (session->target_id == ISCSI_MAX_TARGET) { spin_unlock_irqrestore(&session->lock, flags); mutex_unlock(&ihost->mutex); - return; + goto unbind_session_exit; } target_id = session->target_id; @@ -2020,6 +2006,8 @@ static void __iscsi_unbind_session(struct work_struct *work) ida_simple_remove(&iscsi_sess_ida, target_id); scsi_remove_target(&session->dev); + +unbind_session_exit: iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION); ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n"); } @@ -2028,13 +2016,14 @@ struct iscsi_cls_session * iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport, int dd_size) { + struct iscsi_cls_session_wrapper *session_wrapper; struct iscsi_cls_session *session; - session = kzalloc(sizeof(*session) + dd_size, - GFP_KERNEL); - if (!session) + session_wrapper = kzalloc(sizeof(*session_wrapper) + dd_size, + GFP_KERNEL); + if (!session_wrapper) return NULL; - + session = &session_wrapper->cls_sess; session->transport = transport; session->creator = -1; session->recovery_tmo = 120; @@ -2153,7 +2142,11 @@ static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data) { if (!iscsi_is_conn_dev(dev)) return 0; - return iscsi_destroy_conn(iscsi_dev_to_conn(dev)); + + iscsi_remove_conn(iscsi_dev_to_conn(dev)); + iscsi_put_conn(iscsi_dev_to_conn(dev)); + + return 0; } void iscsi_remove_session(struct iscsi_cls_session *session) @@ -2185,6 +2178,8 @@ void iscsi_remove_session(struct iscsi_cls_session *session) scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE); /* flush running scans then delete devices */ flush_work(&session->scan_work); + /* flush running unbind operations */ + flush_work(&session->unbind_work); __iscsi_unbind_session(&session->unbind_work); /* hw iscsi may not have removed all connections from session */ @@ -2210,6 +2205,95 @@ void iscsi_free_session(struct iscsi_cls_session *session) } EXPORT_SYMBOL_GPL(iscsi_free_session); +/** + * iscsi_alloc_conn - alloc iscsi class connection + * @session: iscsi cls session + * @dd_size: private driver data size + * @cid: connection id + */ +struct iscsi_cls_conn * +iscsi_alloc_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid) +{ + struct iscsi_transport *transport = session->transport; + struct iscsi_cls_conn *conn; + + conn = kzalloc(sizeof(*conn) + dd_size, GFP_KERNEL); + if (!conn) + return NULL; + if (dd_size) + conn->dd_data = &conn[1]; + + mutex_init(&conn->ep_mutex); + INIT_LIST_HEAD(&conn->conn_list); + conn->transport = transport; + conn->cid = cid; + + /* this is released in the dev's release function */ + if (!get_device(&session->dev)) + goto free_conn; + + dev_set_name(&conn->dev, "connection%d:%u", session->sid, cid); + device_initialize(&conn->dev); + conn->dev.parent = &session->dev; + conn->dev.release = iscsi_conn_release; + + return conn; + +free_conn: + kfree(conn); + return NULL; +} +EXPORT_SYMBOL_GPL(iscsi_alloc_conn); + +/** + * iscsi_add_conn - add iscsi class connection + * @conn: iscsi cls connection + * + * This will expose iscsi_cls_conn to sysfs so make sure the related + * resources for sysfs attributes are initialized before calling this. + */ +int iscsi_add_conn(struct iscsi_cls_conn *conn) +{ + int err; + unsigned long flags; + struct iscsi_cls_session *session = iscsi_dev_to_session(conn->dev.parent); + + err = device_add(&conn->dev); + if (err) { + iscsi_cls_session_printk(KERN_ERR, session, + "could not register connection's dev\n"); + return err; + } + transport_register_device(&conn->dev); + + spin_lock_irqsave(&connlock, flags); + list_add(&conn->conn_list, &connlist); + spin_unlock_irqrestore(&connlock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(iscsi_add_conn); + +/** + * iscsi_remove_conn - remove iscsi class connection from sysfs + * @conn: iscsi cls connection + * + * Remove iscsi_cls_conn from sysfs, and wait for previous + * read/write of iscsi_cls_conn's attributes in sysfs to finish. + */ +void iscsi_remove_conn(struct iscsi_cls_conn *conn) +{ + unsigned long flags; + + spin_lock_irqsave(&connlock, flags); + list_del(&conn->conn_list); + spin_unlock_irqrestore(&connlock, flags); + + transport_unregister_device(&conn->dev); + device_del(&conn->dev); +} +EXPORT_SYMBOL_GPL(iscsi_remove_conn); + /** * iscsi_create_conn - create iscsi class connection * @session: iscsi cls session @@ -2296,6 +2380,18 @@ int iscsi_destroy_conn(struct iscsi_cls_conn *conn) } EXPORT_SYMBOL_GPL(iscsi_destroy_conn); +void iscsi_put_conn(struct iscsi_cls_conn *conn) +{ + put_device(&conn->dev); +} +EXPORT_SYMBOL_GPL(iscsi_put_conn); + +void iscsi_get_conn(struct iscsi_cls_conn *conn) +{ + get_device(&conn->dev); +} +EXPORT_SYMBOL_GPL(iscsi_get_conn); + /* * iscsi interface functions */ @@ -2757,11 +2853,18 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev) struct iscsi_cls_session *session; int err = 0, value = 0; + if (ev->u.set_param.len > PAGE_SIZE) + return -EINVAL; + session = iscsi_session_lookup(ev->u.set_param.sid); conn = iscsi_conn_lookup(ev->u.set_param.sid, ev->u.set_param.cid); if (!conn || !session) return -EINVAL; + /* data will be regarded as NULL-ended string, do length check */ + if (strlen(data) > ev->u.set_param.len) + return -EINVAL; + switch (ev->u.set_param.param) { case ISCSI_PARAM_SESS_RECOVERY_TMO: sscanf(data, "%d", &value); @@ -2904,6 +3007,9 @@ iscsi_set_host_param(struct iscsi_transport *transport, if (!transport->set_host_param) return -ENOSYS; + if (ev->u.set_host_param.len > PAGE_SIZE) + return -EINVAL; + shost = scsi_host_lookup(ev->u.set_host_param.host_no); if (!shost) { printk(KERN_ERR "set_host_param could not find host no %u\n", @@ -2911,6 +3017,10 @@ iscsi_set_host_param(struct iscsi_transport *transport, return -ENODEV; } + /* see similar check in iscsi_if_set_param() */ + if (strlen(data) > ev->u.set_host_param.len) + return -EINVAL; + err = transport->set_host_param(shost, ev->u.set_host_param.param, data, ev->u.set_host_param.len); scsi_host_put(shost); @@ -2941,6 +3051,24 @@ iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev) return err; } +static int iscsi_session_has_conns(int sid) +{ + struct iscsi_cls_conn *conn; + unsigned long flags; + int found = 0; + + spin_lock_irqsave(&connlock, flags); + list_for_each_entry(conn, &connlist, conn_list) { + if (iscsi_conn_get_sid(conn) == sid) { + found = 1; + break; + } + } + spin_unlock_irqrestore(&connlock, flags); + + return found; +} + static int iscsi_set_iface_params(struct iscsi_transport *transport, struct iscsi_uevent *ev, uint32_t len) @@ -3148,7 +3276,7 @@ static int iscsi_set_flashnode_param(struct iscsi_transport *transport, pr_err("%s could not find host no %u\n", __func__, ev->u.set_flashnode.host_no); err = -ENODEV; - goto put_host; + goto exit_set_fnode; } idx = ev->u.set_flashnode.flashnode_idx; @@ -3473,6 +3601,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) { int err = 0; u32 portid; + u32 pdu_len; struct iscsi_uevent *ev = nlmsg_data(nlh); struct iscsi_transport *transport = NULL; struct iscsi_internal *priv; @@ -3480,6 +3609,9 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) struct iscsi_cls_conn *conn; struct iscsi_endpoint *ep = NULL; + if (!netlink_capable(skb, CAP_SYS_ADMIN)) + return -EPERM; + if (nlh->nlmsg_type == ISCSI_UEVENT_PATH_UPDATE) *group = ISCSI_NL_GRP_UIP; else @@ -3518,10 +3650,12 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) break; case ISCSI_UEVENT_DESTROY_SESSION: session = iscsi_session_lookup(ev->u.d_session.sid); - if (session) - transport->destroy_session(session); - else + if (!session) err = -EINVAL; + else if (iscsi_session_has_conns(ev->u.d_session.sid)) + err = -EBUSY; + else + transport->destroy_session(session); break; case ISCSI_UEVENT_UNBIND_SESSION: session = iscsi_session_lookup(ev->u.d_session.sid); @@ -3585,6 +3719,14 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) err = -EINVAL; break; case ISCSI_UEVENT_SEND_PDU: + pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev); + + if ((ev->u.send_pdu.hdr_size > pdu_len) || + (ev->u.send_pdu.data_size > (pdu_len - ev->u.send_pdu.hdr_size))) { + err = -EINVAL; + break; + } + conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid); if (conn) ev->r.retcode = transport->send_pdu(conn, @@ -3678,6 +3820,7 @@ iscsi_if_rx(struct sk_buff *skb) struct nlmsghdr *nlh; struct iscsi_uevent *ev; uint32_t group; + int retries = ISCSI_SEND_MAX_ALLOWED; nlh = nlmsg_hdr(skb); if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) || @@ -3708,6 +3851,10 @@ iscsi_if_rx(struct sk_buff *skb) break; err = iscsi_if_send_reply(portid, nlh->nlmsg_type, ev, sizeof(*ev)); + if (err == -EAGAIN && --retries < 0) { + printk(KERN_WARNING "Send reply failed, error %d\n", err); + break; + } } while (err < 0 && err != -ECONNREFUSED && err != -ESRCH); skb_pull(skb, rlen); } @@ -3986,7 +4133,7 @@ show_priv_session_state(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent); - return sprintf(buf, "%s\n", iscsi_session_state_name(session->state)); + return sysfs_emit(buf, "%s\n", iscsi_session_state_name(session->state)); } static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state, NULL); @@ -3995,7 +4142,7 @@ show_priv_session_creator(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent); - return sprintf(buf, "%d\n", session->creator); + return sysfs_emit(buf, "%d\n", session->creator); } static ISCSI_CLASS_ATTR(priv_sess, creator, S_IRUGO, show_priv_session_creator, NULL); @@ -4004,7 +4151,7 @@ show_priv_session_target_id(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent); - return sprintf(buf, "%d\n", session->target_id); + return sysfs_emit(buf, "%d\n", session->target_id); } static ISCSI_CLASS_ATTR(priv_sess, target_id, S_IRUGO, show_priv_session_target_id, NULL); @@ -4017,8 +4164,8 @@ show_priv_session_##field(struct device *dev, \ struct iscsi_cls_session *session = \ iscsi_dev_to_session(dev->parent); \ if (session->field == -1) \ - return sprintf(buf, "off\n"); \ - return sprintf(buf, format"\n", session->field); \ + return sysfs_emit(buf, "off\n"); \ + return sysfs_emit(buf, format"\n", session->field); \ } #define iscsi_priv_session_attr_store(field) \ diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 0cd16e80b01956c8b4161635e7a5f489b1226668..824e4f053e683599544b987d303d005e8ea8bff5 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -1235,16 +1235,15 @@ int sas_read_port_mode_page(struct scsi_device *sdev) char *buffer = kzalloc(BUF_SIZE, GFP_KERNEL), *msdata; struct sas_end_device *rdev = sas_sdev_to_rdev(sdev); struct scsi_mode_data mode_data; - int res, error; + int error; if (!buffer) return -ENOMEM; - res = scsi_mode_sense(sdev, 1, 0x19, buffer, BUF_SIZE, 30*HZ, 3, - &mode_data, NULL); + error = scsi_mode_sense(sdev, 1, 0x19, buffer, BUF_SIZE, 30*HZ, 3, + &mode_data, NULL); - error = -EINVAL; - if (!scsi_status_is_good(res)) + if (error) goto out; msdata = buffer + mode_data.header_length + diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c index 40b85b752b794fe46c1828c71d3c25a154396a97..5390a900558440467bd5551e606da01db590c51e 100644 --- a/drivers/scsi/scsi_transport_spi.c +++ b/drivers/scsi/scsi_transport_spi.c @@ -130,12 +130,16 @@ static int spi_execute(struct scsi_device *sdev, const void *cmd, sshdr = &sshdr_tmp; for(i = 0; i < DV_RETRIES; i++) { + /* + * The purpose of the RQF_PM flag below is to bypass the + * SDEV_QUIESCE state. + */ result = scsi_execute(sdev, cmd, dir, buffer, bufflen, sense, sshdr, DV_TIMEOUT, /* retries */ 1, REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER, - 0, NULL); + RQF_PM, NULL); if (driver_byte(result) != DRIVER_SENSE || sshdr->sense_key != UNIT_ATTENTION) break; @@ -1018,23 +1022,26 @@ spi_dv_device(struct scsi_device *sdev) */ lock_system_sleep(); + if (scsi_autopm_get_device(sdev)) + goto unlock_system_sleep; + if (unlikely(spi_dv_in_progress(starget))) - goto unlock; + goto put_autopm; if (unlikely(scsi_device_get(sdev))) - goto unlock; + goto put_autopm; spi_dv_in_progress(starget) = 1; buffer = kzalloc(len, GFP_KERNEL); if (unlikely(!buffer)) - goto out_put; + goto put_sdev; /* We need to verify that the actual device will quiesce; the * later target quiesce is just a nice to have */ if (unlikely(scsi_device_quiesce(sdev))) - goto out_free; + goto free_buffer; scsi_target_quiesce(starget); @@ -1054,12 +1061,16 @@ spi_dv_device(struct scsi_device *sdev) spi_initial_dv(starget) = 1; - out_free: +free_buffer: kfree(buffer); - out_put: + +put_sdev: spi_dv_in_progress(starget) = 0; scsi_device_put(sdev); -unlock: +put_autopm: + scsi_autopm_put_device(sdev); + +unlock_system_sleep: unlock_system_sleep(); } EXPORT_SYMBOL(spi_dv_device); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 4a57ffecc7e616fd2bcc4a7897996994deec1543..f618726bbe70ee1a814c1f02d3a87b3853624dde 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -132,6 +132,7 @@ static DEFINE_MUTEX(sd_ref_mutex); static struct kmem_cache *sd_cdb_cache; static mempool_t *sd_cdb_pool; +static mempool_t *sd_page_pool; static const char *sd_cache_types[] = { "write through", "none", "write back", @@ -204,6 +205,12 @@ cache_type_store(struct device *dev, struct device_attribute *attr, sp = buffer_data[0] & 0x80 ? 1 : 0; buffer_data[0] &= ~0x80; + /* + * Ensure WP, DPOFUA, and RESERVED fields are cleared in + * received mode parameter buffer before doing MODE SELECT. + */ + data.device_specific = 0; + if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT, SD_MAX_RETRIES, &data, &sshdr)) { if (scsi_sense_valid(&sshdr)) @@ -758,9 +765,10 @@ static int sd_setup_unmap_cmnd(struct scsi_cmnd *cmd) unsigned int data_len = 24; char *buf; - rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO); + rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC); if (!rq->special_vec.bv_page) return BLKPREP_DEFER; + clear_highpage(rq->special_vec.bv_page); rq->special_vec.bv_offset = 0; rq->special_vec.bv_len = data_len; rq->rq_flags |= RQF_SPECIAL_PAYLOAD; @@ -791,9 +799,10 @@ static int sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd, bool unmap) u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9); u32 data_len = sdp->sector_size; - rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO); + rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC); if (!rq->special_vec.bv_page) return BLKPREP_DEFER; + clear_highpage(rq->special_vec.bv_page); rq->special_vec.bv_offset = 0; rq->special_vec.bv_len = data_len; rq->rq_flags |= RQF_SPECIAL_PAYLOAD; @@ -821,9 +830,10 @@ static int sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd, bool unmap) u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9); u32 data_len = sdp->sector_size; - rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO); + rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC); if (!rq->special_vec.bv_page) return BLKPREP_DEFER; + clear_highpage(rq->special_vec.bv_page); rq->special_vec.bv_offset = 0; rq->special_vec.bv_len = data_len; rq->rq_flags |= RQF_SPECIAL_PAYLOAD; @@ -1016,6 +1026,7 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt) sector_t block = blk_rq_pos(rq); sector_t threshold; unsigned int this_count = blk_rq_sectors(rq); + unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); unsigned int dif, dix; int ret; unsigned char protect; @@ -1206,6 +1217,10 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt) SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff; SCpnt->cmnd[8] = (unsigned char) this_count & 0xff; } else { + /* Avoid that 0 blocks gets translated into 256 blocks. */ + if (WARN_ON_ONCE(nr_blocks == 0)) + return BLK_STS_IOERR; + if (unlikely(rq->cmd_flags & REQ_FUA)) { /* * This happens only if this drive failed @@ -1287,7 +1302,7 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt) u8 *cmnd; if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) - __free_page(rq->special_vec.bv_page); + mempool_free(rq->special_vec.bv_page, sd_page_pool); if (SCpnt->cmnd != scsi_req(rq)->cmd) { cmnd = SCpnt->cmnd; @@ -1398,11 +1413,6 @@ static void sd_release(struct gendisk *disk, fmode_t mode) scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW); } - /* - * XXX and what if there are packets in flight and this close() - * XXX is followed by a "rmmod sd_mod"? - */ - scsi_disk_put(sdkp); } @@ -1641,7 +1651,8 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr) /* we need to evaluate the error return */ if (scsi_sense_valid(sshdr) && (sshdr->asc == 0x3a || /* medium not present */ - sshdr->asc == 0x20)) /* invalid command */ + sshdr->asc == 0x20 || /* invalid command */ + (sshdr->asc == 0x74 && sshdr->ascq == 0x71))) /* drive is password locked */ /* this is no error here */ return 0; @@ -1679,20 +1690,30 @@ static void sd_rescan(struct device *dev) static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { - struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device; + struct gendisk *disk = bdev->bd_disk; + struct scsi_disk *sdkp = scsi_disk(disk); + struct scsi_device *sdev = sdkp->device; + void __user *p = compat_ptr(arg); int error; + error = scsi_verify_blk_ioctl(bdev, cmd); + if (error < 0) + return error; + error = scsi_ioctl_block_when_processing_errors(sdev, cmd, (mode & FMODE_NDELAY) != 0); if (error) return error; + + if (is_sed_ioctl(cmd)) + return sed_ioctl(sdkp->opal_dev, cmd, p); /* * Let the static ioctl translation table take care of it. */ if (!sdev->host->hostt->compat_ioctl) return -ENOIOCTLCMD; - return sdev->host->hostt->compat_ioctl(sdev, cmd, (void __user *)arg); + return sdev->host->hostt->compat_ioctl(sdev, cmd, p); } #endif @@ -1953,9 +1974,13 @@ static int sd_done(struct scsi_cmnd *SCpnt) } break; case REQ_OP_ZONE_REPORT: + /* To avoid that the block layer performs an incorrect + * bio_advance() call and restart of the remainder of + * incomplete report zone BIOs, always indicate a full + * completion of REQ_OP_ZONE_REPORT. + */ if (!result) { - good_bytes = scsi_bufflen(SCpnt) - - scsi_get_resid(SCpnt); + good_bytes = scsi_bufflen(SCpnt); scsi_set_resid(SCpnt, 0); } else { good_bytes = 0; @@ -2189,8 +2214,10 @@ static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer u8 type; int ret = 0; - if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) + if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) { + sdkp->protection_type = 0; return ret; + } type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */ @@ -2585,6 +2612,13 @@ sd_do_mode_sense(struct scsi_device *sdp, int dbd, int modepage, unsigned char *buffer, int len, struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) { + /* + * If we must use MODE SENSE(10), make sure that the buffer length + * is at least 8 bytes so that the mode sense header fits. + */ + if (sdp->use_10_for_ms && len < 8) + len = 8; + return scsi_mode_sense(sdp, dbd, modepage, buffer, len, SD_TIMEOUT, SD_MAX_RETRIES, data, sshdr); @@ -2600,7 +2634,6 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer) int res; struct scsi_device *sdp = sdkp->device; struct scsi_mode_data data; - int disk_ro = get_disk_ro(sdkp->disk); int old_wp = sdkp->write_prot; set_disk_ro(sdkp->disk, 0); @@ -2625,23 +2658,23 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer) * 5: Illegal Request, Sense Code 24: Invalid field in * CDB. */ - if (!scsi_status_is_good(res)) + if (res < 0) res = sd_do_mode_sense(sdp, 0, 0, buffer, 4, &data, NULL); /* * Third attempt: ask 255 bytes, as we did earlier. */ - if (!scsi_status_is_good(res)) + if (res < 0) res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 255, &data, NULL); } - if (!scsi_status_is_good(res)) { + if (res < 0) { sd_first_printk(KERN_WARNING, sdkp, "Test WP failed, assume Write Enabled\n"); } else { sdkp->write_prot = ((data.device_specific & 0x80) != 0); - set_disk_ro(sdkp->disk, sdkp->write_prot || disk_ro); + set_disk_ro(sdkp->disk, sdkp->write_prot); if (sdkp->first_scan || old_wp != sdkp->write_prot) { sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n", sdkp->write_prot ? "on" : "off"); @@ -2697,7 +2730,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) res = sd_do_mode_sense(sdp, dbd, modepage, buffer, first_len, &data, &sshdr); - if (!scsi_status_is_good(res)) + if (res < 0) goto bad_sense; if (!data.header_length) { @@ -2729,7 +2762,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr); - if (scsi_status_is_good(res)) { + if (!res) { int offset = data.header_length + data.block_descriptor_length; while (offset < len) { @@ -2847,7 +2880,7 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer) res = scsi_mode_sense(sdp, 1, 0x0a, buffer, 36, SD_TIMEOUT, SD_MAX_RETRIES, &data, &sshdr); - if (!scsi_status_is_good(res) || !data.header_length || + if (res < 0 || !data.header_length || data.length < 6) { sd_first_printk(KERN_WARNING, sdkp, "getting Control mode page failed, assume no ATO\n"); @@ -2960,9 +2993,6 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) if (rot == 1) { blk_queue_flag_set(QUEUE_FLAG_NONROT, q); blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); - } else { - blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); - blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q); } if (sdkp->device->type == TYPE_ZBC) { @@ -3059,6 +3089,58 @@ static void sd_read_security(struct scsi_disk *sdkp, unsigned char *buffer) sdkp->security = 1; } +/* + * Determine the device's preferred I/O size for reads and writes + * unless the reported value is unreasonably small, large, not a + * multiple of the physical block size, or simply garbage. + */ +static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp, + unsigned int dev_max) +{ + struct scsi_device *sdp = sdkp->device; + unsigned int opt_xfer_bytes = + logical_to_bytes(sdp, sdkp->opt_xfer_blocks); + + if (sdkp->opt_xfer_blocks == 0) + return false; + + if (sdkp->opt_xfer_blocks > dev_max) { + sd_first_printk(KERN_WARNING, sdkp, + "Optimal transfer size %u logical blocks " \ + "> dev_max (%u logical blocks)\n", + sdkp->opt_xfer_blocks, dev_max); + return false; + } + + if (sdkp->opt_xfer_blocks > SD_DEF_XFER_BLOCKS) { + sd_first_printk(KERN_WARNING, sdkp, + "Optimal transfer size %u logical blocks " \ + "> sd driver limit (%u logical blocks)\n", + sdkp->opt_xfer_blocks, SD_DEF_XFER_BLOCKS); + return false; + } + + if (opt_xfer_bytes < PAGE_SIZE) { + sd_first_printk(KERN_WARNING, sdkp, + "Optimal transfer size %u bytes < " \ + "PAGE_SIZE (%u bytes)\n", + opt_xfer_bytes, (unsigned int)PAGE_SIZE); + return false; + } + + if (opt_xfer_bytes & (sdkp->physical_block_size - 1)) { + sd_first_printk(KERN_WARNING, sdkp, + "Optimal transfer size %u bytes not a " \ + "multiple of physical block size (%u bytes)\n", + opt_xfer_bytes, sdkp->physical_block_size); + return false; + } + + sd_first_printk(KERN_INFO, sdkp, "Optimal transfer size %u bytes\n", + opt_xfer_bytes); + return true; +} + /** * sd_revalidate_disk - called the first time a new disk is seen, * performs disk spin up, read_capacity, etc. @@ -3099,6 +3181,15 @@ static int sd_revalidate_disk(struct gendisk *disk) if (sdkp->media_present) { sd_read_capacity(sdkp, buffer); + /* + * set the default to rotational. All non-rotational devices + * support the block characteristics VPD page, which will + * cause this to be updated correctly and any device which + * doesn't support it should be treated as rotational. + */ + blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); + blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q); + if (scsi_device_supports_vpd(sdp)) { sd_read_block_provisioning(sdkp); sd_read_block_limits(sdkp); @@ -3128,20 +3219,14 @@ static int sd_revalidate_disk(struct gendisk *disk) dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks); q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max); - /* - * Determine the device's preferred I/O size for reads and writes - * unless the reported value is unreasonably small, large, or - * garbage. - */ - if (sdkp->opt_xfer_blocks && - sdkp->opt_xfer_blocks <= dev_max && - sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS && - logical_to_bytes(sdp, sdkp->opt_xfer_blocks) >= PAGE_SIZE) { + if (sd_validate_opt_xfer_size(sdkp, dev_max)) { q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks); rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks); - } else + } else { + q->limits.io_opt = 0; rw_max = min_not_zero(logical_to_sectors(sdp, dev_max), (sector_t)BLK_DEF_MAX_SECTORS); + } /* Do not exceed controller limit */ rw_max = min(rw_max, queue_max_hw_sectors(q)); @@ -3276,6 +3361,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie) blk_pm_runtime_init(sdp->request_queue, dev); device_add_disk(dev, gd); + blk_delete_region(disk_devt(sdkp->disk), SD_MINORS, sd_default_probe); if (sdkp->capacity) sd_dif_config_host(sdkp); @@ -3371,15 +3457,16 @@ static int sd_probe(struct device *dev) } device_initialize(&sdkp->dev); - sdkp->dev.parent = dev; + sdkp->dev.parent = get_device(dev); sdkp->dev.class = &sd_disk_class; dev_set_name(&sdkp->dev, "%s", dev_name(dev)); error = device_add(&sdkp->dev); - if (error) - goto out_free_index; + if (error) { + put_device(&sdkp->dev); + goto out; + } - get_device(dev); dev_set_drvdata(dev, sdkp); get_device(&sdkp->dev); /* prevent release before async_schedule */ @@ -3452,9 +3539,21 @@ static void scsi_disk_release(struct device *dev) { struct scsi_disk *sdkp = to_scsi_disk(dev); struct gendisk *disk = sdkp->disk; - + struct request_queue *q = disk->queue; + ida_free(&sd_index_ida, sdkp->index); + /* + * Wait until all requests that are in progress have completed. + * This is necessary to avoid that e.g. scsi_end_request() crashes + * due to clearing the disk->private_data pointer. Wait from inside + * scsi_disk_release() instead of from sd_release() to avoid that + * freezing and unfreezing the request queue affects user space I/O + * in case multiple processes open a /dev/sd... node concurrently. + */ + blk_mq_freeze_queue(q); + blk_mq_unfreeze_queue(q); + disk->private_data = NULL; put_disk(disk); put_device(&sdkp->device->sdev_gendev); @@ -3635,6 +3734,13 @@ static int __init init_sd(void) goto err_out_cache; } + sd_page_pool = mempool_create_page_pool(SD_MEMPOOL_SIZE, 0); + if (!sd_page_pool) { + printk(KERN_ERR "sd: can't init discard page pool\n"); + err = -ENOMEM; + goto err_out_ppool; + } + err = scsi_register_driver(&sd_template.gendrv); if (err) goto err_out_driver; @@ -3642,6 +3748,9 @@ static int __init init_sd(void) return 0; err_out_driver: + mempool_destroy(sd_page_pool); + +err_out_ppool: mempool_destroy(sd_cdb_pool); err_out_cache: @@ -3668,6 +3777,7 @@ static void __exit exit_sd(void) scsi_unregister_driver(&sd_template.gendrv); mempool_destroy(sd_cdb_pool); + mempool_destroy(sd_page_pool); kmem_cache_destroy(sd_cdb_cache); class_unregister(&sd_disk_class); diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index a7d4f50b67d433080eff0f6a7df34e1910683513..2a139386b34de951c8295e3ffc320dfd681f3848 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -2,6 +2,8 @@ #ifndef _SCSI_DISK_H #define _SCSI_DISK_H +#include + /* * More than enough for everybody ;) The huge number of majors * is a leftover from 16bit dev_t days, we don't really need that @@ -117,6 +119,9 @@ struct scsi_disk { unsigned urswrz : 1; unsigned security : 1; unsigned ignore_medium_access_errors : 1; + + KABI_RESERVE(1) + KABI_RESERVE(2) }; #define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev) diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index 0fc39224ce1e4f10c81db98ab01650215e2d44c8..e3d4f5b925f6be62e0a5d4917b0f392550b1075d 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c @@ -493,9 +493,6 @@ static int ses_enclosure_find_by_addr(struct enclosure_device *edev, int i; struct ses_component *scomp; - if (!edev->component[0].scratch) - return 0; - for (i = 0; i < edev->components; i++) { scomp = edev->component[i].scratch; if (scomp->addr != efd->addr) @@ -561,11 +558,11 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, struct enclosure_component *ecomp; if (desc_ptr) { - if (desc_ptr >= buf + page7_len) { + len = (desc_ptr[2] << 8) + desc_ptr[3]; + desc_ptr += 4; + if (desc_ptr + len > buf + page7_len) { desc_ptr = NULL; } else { - len = (desc_ptr[2] << 8) + desc_ptr[3]; - desc_ptr += 4; /* Add trailing zero - pushes into * reserved space */ desc_ptr[len] = '\0'; @@ -581,8 +578,10 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, components++, type_ptr[0], name); - else + else if (components < edev->components) ecomp = &edev->component[components++]; + else + ecomp = ERR_PTR(-EINVAL); if (!IS_ERR(ecomp)) { if (addl_desc_ptr) @@ -747,9 +746,11 @@ static int ses_intf_add(struct device *cdev, buf = NULL; } page2_not_supported: - scomp = kcalloc(components, sizeof(struct ses_component), GFP_KERNEL); - if (!scomp) - goto err_free; + if (components > 0) { + scomp = kcalloc(components, sizeof(struct ses_component), GFP_KERNEL); + if (!scomp) + goto err_free; + } edev = enclosure_register(cdev->parent, dev_name(&sdev->sdev_gendev), components, &ses_enclosure_callbacks); @@ -829,7 +830,8 @@ static void ses_intf_remove_enclosure(struct scsi_device *sdev) kfree(ses_dev->page2); kfree(ses_dev); - kfree(edev->component[0].scratch); + if (edev->components > 0) + kfree(edev->component[0].scratch); put_device(&edev->edev); enclosure_unregister(edev); diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 8a254bb46a9b402b83e8e99e4474327c75c52c18..7c9c63be214db310bdd4b8bd484643f0ce0b0abd 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -395,7 +395,6 @@ sg_release(struct inode *inode, struct file *filp) mutex_lock(&sdp->open_rel_lock); scsi_autopm_put_device(sdp->device); - kref_put(&sfp->f_ref, sg_remove_sfp); sdp->open_cnt--; /* possibly many open()s waiting on exlude clearing, start many; @@ -407,6 +406,7 @@ sg_release(struct inode *inode, struct file *filp) wake_up_interruptible(&sdp->open_wait); } mutex_unlock(&sdp->open_rel_lock); + kref_put(&sfp->f_ref, sg_remove_sfp); return 0; } @@ -434,7 +434,7 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_read: count=%d\n", (int) count)); - if (!access_ok(VERIFY_WRITE, buf, count)) + if (!access_ok(buf, count)) return -EFAULT; if (sfp->force_packid && (count >= SZ_SG_HEADER)) { old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL); @@ -632,7 +632,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) scsi_block_when_processing_errors(sdp->device))) return -ENXIO; - if (!access_ok(VERIFY_READ, buf, count)) + if (!access_ok(buf, count)) return -EFAULT; /* protects following copy_from_user()s + get_user()s */ if (count < SZ_SG_HEADER) return -EIO; @@ -694,8 +694,10 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) hp->flags = input_size; /* structure abuse ... */ hp->pack_id = old_hdr.pack_id; hp->usr_ptr = NULL; - if (__copy_from_user(cmnd, buf, cmd_size)) + if (__copy_from_user(cmnd, buf, cmd_size)) { + sg_remove_request(sfp, srp); return -EFAULT; + } /* * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV, * but is is possible that the app intended SG_DXFER_TO_DEV, because there @@ -729,7 +731,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, if (count < SZ_SG_IO_HDR) return -EINVAL; - if (!access_ok(VERIFY_READ, buf, count)) + if (!access_ok(buf, count)) return -EFAULT; /* protects following copy_from_user()s + get_user()s */ sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */ @@ -768,7 +770,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, sg_remove_request(sfp, srp); return -EMSGSIZE; } - if (!access_ok(VERIFY_READ, hp->cmdp, hp->cmd_len)) { + if (!access_ok(hp->cmdp, hp->cmd_len)) { sg_remove_request(sfp, srp); return -EFAULT; /* protects following copy_from_user()s + get_user()s */ } @@ -808,8 +810,10 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) cmnd[0], (int) hp->cmd_len)); - if (hp->dxfer_len >= SZ_256M) + if (hp->dxfer_len >= SZ_256M) { + sg_remove_request(sfp, srp); return -EINVAL; + } k = sg_start_req(srp, cmnd); if (k) { @@ -822,7 +826,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, if (atomic_read(&sdp->detaching)) { if (srp->bio) { scsi_req_free_cmd(scsi_req(srp->rq)); - blk_end_request_all(srp->rq, BLK_STS_IOERR); + blk_put_request(srp->rq); srp->rq = NULL; } @@ -922,7 +926,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) return -ENODEV; if (!scsi_block_when_processing_errors(sdp->device)) return -ENXIO; - if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR)) + if (!access_ok(p, SZ_SG_IO_HDR)) return -EFAULT; result = sg_new_write(sfp, filp, p, SZ_SG_IO_HDR, 1, read_only, 1, &srp); @@ -968,7 +972,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) case SG_GET_LOW_DMA: return put_user((int) sdp->device->host->unchecked_isa_dma, ip); case SG_GET_SCSI_ID: - if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t))) + if (!access_ok(p, sizeof (sg_scsi_id_t))) return -EFAULT; else { sg_scsi_id_t __user *sg_idp = p; @@ -997,7 +1001,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) sfp->force_packid = val ? 1 : 0; return 0; case SG_GET_PACK_ID: - if (!access_ok(VERIFY_WRITE, ip, sizeof (int))) + if (!access_ok(ip, sizeof (int))) return -EFAULT; read_lock_irqsave(&sfp->rq_list_lock, iflags); list_for_each_entry(srp, &sfp->rq_list, entry) { @@ -1078,7 +1082,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) val = (sdp->device ? 1 : 0); return put_user(val, ip); case SG_GET_REQUEST_TABLE: - if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE)) + if (!access_ok(p, SZ_SG_REQ_INFO * SG_MAX_QUEUE)) return -EFAULT; else { sg_req_info_t *rinfo; diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 2112ea6723c60f65252b61f0ad9471ce327d60f7..dafad05dccdc226741bf603a116d489e3f8bc788 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -176,6 +176,11 @@ static inline void pqi_scsi_done(struct scsi_cmnd *scmd) scmd->scsi_done(scmd); } +static inline void pqi_disable_write_same(struct scsi_device *sdev) +{ + sdev->no_write_same = 1; +} + static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2) { return memcmp(scsi3addr1, scsi3addr2, 8) == 0; @@ -653,6 +658,7 @@ struct bmic_host_wellness_driver_version { u8 driver_version_tag[2]; __le16 driver_version_length; char driver_version[32]; + u8 dont_write_tag[2]; u8 end_tag[2]; }; @@ -682,6 +688,8 @@ static int pqi_write_driver_version_to_host_wellness( strncpy(buffer->driver_version, "Linux " DRIVER_VERSION, sizeof(buffer->driver_version) - 1); buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0'; + buffer->dont_write_tag[0] = 'D'; + buffer->dont_write_tag[1] = 'W'; buffer->end_tag[0] = 'Z'; buffer->end_tag[1] = 'Z'; @@ -1181,6 +1189,9 @@ static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info, if (rc) goto out; + if (vpd->page_code != CISS_VPD_LV_STATUS) + goto out; + page_length = offsetof(struct ciss_vpd_logical_volume_status, volume_status) + vpd->page_length; if (page_length < sizeof(*vpd)) @@ -2720,6 +2731,9 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, switch (response->header.iu_type) { case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS: case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS: + if (io_request->scmd) + io_request->scmd->result = 0; + /* fall through */ case PQI_RESPONSE_IU_GENERAL_MANAGEMENT: break; case PQI_RESPONSE_IU_TASK_MANAGEMENT: @@ -3688,8 +3702,10 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, return -ETIMEDOUT; msecs_blocked = jiffies_to_msecs(jiffies - start_jiffies); - if (msecs_blocked >= timeout_msecs) - return -ETIMEDOUT; + if (msecs_blocked >= timeout_msecs) { + rc = -ETIMEDOUT; + goto out; + } timeout_msecs -= msecs_blocked; } } @@ -5324,6 +5340,8 @@ static int pqi_slave_alloc(struct scsi_device *sdev) scsi_change_queue_depth(sdev, device->advertised_queue_depth); } + if (pqi_is_logical_device(device)) + pqi_disable_write_same(sdev); } spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); @@ -5588,7 +5606,8 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) return rc; } -static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) +static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd, + void __user *arg) { int rc; struct pqi_ctrl_info *ctrl_info; @@ -6369,7 +6388,7 @@ static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) else mask = DMA_BIT_MASK(32); - rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask); + rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask); if (rc) { dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n"); goto disable_device; @@ -6686,6 +6705,7 @@ static void pqi_shutdown(struct pci_dev *pci_dev) * storage. */ rc = pqi_flush_cache(ctrl_info, SHUTDOWN); + pqi_free_interrupts(ctrl_info); pqi_reset(ctrl_info); if (rc == 0) return; diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c index 5141bd4c9f06135e5e3a5dc30277200a112dc998..ca7dfb3a520ff2c7653d7822d86b88bc442b1840 100644 --- a/drivers/scsi/smartpqi/smartpqi_sis.c +++ b/drivers/scsi/smartpqi/smartpqi_sis.c @@ -59,7 +59,7 @@ #define SIS_CTRL_KERNEL_UP 0x80 #define SIS_CTRL_KERNEL_PANIC 0x100 -#define SIS_CTRL_READY_TIMEOUT_SECS 30 +#define SIS_CTRL_READY_TIMEOUT_SECS 180 #define SIS_CTRL_READY_RESUME_TIMEOUT_SECS 90 #define SIS_CTRL_READY_POLL_INTERVAL_MSECS 10 diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c index 1f9a087daf69f6de5057570bf1ea9eb4726d0252..3102a75984d3b02ce39d6d59d376ccbde87b9c94 100644 --- a/drivers/scsi/sni_53c710.c +++ b/drivers/scsi/sni_53c710.c @@ -78,10 +78,8 @@ static int snirm710_probe(struct platform_device *dev) base = res->start; hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL); - if (!hostdata) { - dev_printk(KERN_ERR, dev, "Failed to allocate host data\n"); + if (!hostdata) return -ENOMEM; - } hostdata->dev = &dev->dev; dma_set_mask(&dev->dev, DMA_BIT_MASK(32)); diff --git a/drivers/scsi/spraid/Kconfig b/drivers/scsi/spraid/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..bfbba3db8db03758e41ceb3e039747a7825c1ffe --- /dev/null +++ b/drivers/scsi/spraid/Kconfig @@ -0,0 +1,13 @@ +# +# Ramaxel driver configuration +# + +config RAMAXEL_SPRAID + tristate "Ramaxel spraid Adapter" + depends on PCI && SCSI + select BLK_DEV_BSGLIB + depends on ARM64 || X86_64 + help + This driver supports Ramaxel SPRxxx serial + raid controller, which has PCIE Gen4 interface + with host and supports SAS/SATA Hdd/ssd. diff --git a/drivers/scsi/spraid/Makefile b/drivers/scsi/spraid/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..aadc2ffd37ebdcdfb52bcfc3d0746a3de3248de3 --- /dev/null +++ b/drivers/scsi/spraid/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for the Ramaxel device drivers. +# + +obj-$(CONFIG_RAMAXEL_SPRAID) += spraid.o + +spraid-objs := spraid_main.o \ No newline at end of file diff --git a/drivers/scsi/spraid/spraid.h b/drivers/scsi/spraid/spraid.h new file mode 100644 index 0000000000000000000000000000000000000000..c1e4980e18e5d66ca86ea8549357eafefb228126 --- /dev/null +++ b/drivers/scsi/spraid/spraid.h @@ -0,0 +1,746 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +#ifndef __SPRAID_H_ +#define __SPRAID_H_ + +#define SPRAID_CAP_MQES(cap) ((cap) & 0xffff) +#define SPRAID_CAP_STRIDE(cap) (((cap) >> 32) & 0xf) +#define SPRAID_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf) +#define SPRAID_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf) +#define SPRAID_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff) +#define SPRAID_CAP_DMAMASK(cap) (((cap) >> 37) & 0xff) + +#define SPRAID_DEFAULT_MAX_CHANNEL 4 +#define SPRAID_DEFAULT_MAX_ID 240 +#define SPRAID_DEFAULT_MAX_LUN_PER_HOST 8 +#define MAX_SECTORS 2048 + +#define IO_SQE_SIZE sizeof(struct spraid_ioq_command) +#define ADMIN_SQE_SIZE sizeof(struct spraid_admin_command) +#define SQE_SIZE(qid) (((qid) > 0) ? IO_SQE_SIZE : ADMIN_SQE_SIZE) +#define CQ_SIZE(depth) ((depth) * sizeof(struct spraid_completion)) +#define SQ_SIZE(qid, depth) ((depth) * SQE_SIZE(qid)) + +#define SENSE_SIZE(depth) ((depth) * SCSI_SENSE_BUFFERSIZE) + +#define SPRAID_AQ_DEPTH 128 +#define SPRAID_NR_AEN_COMMANDS 16 +#define SPRAID_AQ_BLK_MQ_DEPTH (SPRAID_AQ_DEPTH - SPRAID_NR_AEN_COMMANDS) +#define SPRAID_AQ_MQ_TAG_DEPTH (SPRAID_AQ_BLK_MQ_DEPTH - 1) + +#define SPRAID_ADMIN_QUEUE_NUM 1 +#define SPRAID_PTCMDS_PERQ 1 +#define SPRAID_IO_BLK_MQ_DEPTH (hdev->shost->can_queue) +#define SPRAID_NR_IOQ_PTCMDS (SPRAID_PTCMDS_PERQ * hdev->shost->nr_hw_queues) + +#define FUA_MASK 0x08 +#define SPRAID_MINORS BIT(MINORBITS) + +#define COMMAND_IS_WRITE(cmd) ((cmd)->common.opcode & 1) + +#define SPRAID_IO_IOSQES 7 +#define SPRAID_IO_IOCQES 4 +#define PRP_ENTRY_SIZE 8 + +#define SMALL_POOL_SIZE 256 +#define MAX_SMALL_POOL_NUM 16 +#define MAX_CMD_PER_DEV 64 +#define MAX_CDB_LEN 32 + +#define SPRAID_UP_TO_MULTY4(x) (((x) + 4) & (~0x03)) + +#define CQE_STATUS_SUCCESS (0x0) + +#define PCI_VENDOR_ID_RAMAXEL_LOGIC 0x1E81 + +#define SPRAID_SERVER_DEVICE_HBA_DID 0x2100 +#define SPRAID_SERVER_DEVICE_RAID_DID 0x2200 + +#define IO_6_DEFAULT_TX_LEN 256 + +#define SPRAID_INT_PAGES 2 +#define SPRAID_INT_BYTES(hdev) (SPRAID_INT_PAGES * (hdev)->page_size) + +enum { + SPRAID_REQ_CANCELLED = (1 << 0), + SPRAID_REQ_USERCMD = (1 << 1), +}; + +enum { + SPRAID_SC_SUCCESS = 0x0, + SPRAID_SC_INVALID_OPCODE = 0x1, + SPRAID_SC_INVALID_FIELD = 0x2, + + SPRAID_SC_ABORT_LIMIT = 0x103, + SPRAID_SC_ABORT_MISSING = 0x104, + SPRAID_SC_ASYNC_LIMIT = 0x105, + + SPRAID_SC_DNR = 0x4000, +}; + +enum { + SPRAID_REG_CAP = 0x0000, + SPRAID_REG_CC = 0x0014, + SPRAID_REG_CSTS = 0x001c, + SPRAID_REG_AQA = 0x0024, + SPRAID_REG_ASQ = 0x0028, + SPRAID_REG_ACQ = 0x0030, + SPRAID_REG_DBS = 0x1000, +}; + +enum { + SPRAID_CC_ENABLE = 1 << 0, + SPRAID_CC_CSS_NVM = 0 << 4, + SPRAID_CC_MPS_SHIFT = 7, + SPRAID_CC_AMS_SHIFT = 11, + SPRAID_CC_SHN_SHIFT = 14, + SPRAID_CC_IOSQES_SHIFT = 16, + SPRAID_CC_IOCQES_SHIFT = 20, + SPRAID_CC_AMS_RR = 0 << SPRAID_CC_AMS_SHIFT, + SPRAID_CC_SHN_NONE = 0 << SPRAID_CC_SHN_SHIFT, + SPRAID_CC_IOSQES = SPRAID_IO_IOSQES << SPRAID_CC_IOSQES_SHIFT, + SPRAID_CC_IOCQES = SPRAID_IO_IOCQES << SPRAID_CC_IOCQES_SHIFT, + SPRAID_CC_SHN_NORMAL = 1 << SPRAID_CC_SHN_SHIFT, + SPRAID_CC_SHN_MASK = 3 << SPRAID_CC_SHN_SHIFT, + SPRAID_CSTS_CFS_SHIFT = 1, + SPRAID_CSTS_SHST_SHIFT = 2, + SPRAID_CSTS_PP_SHIFT = 5, + SPRAID_CSTS_RDY = 1 << 0, + SPRAID_CSTS_SHST_CMPLT = 2 << 2, + SPRAID_CSTS_SHST_MASK = 3 << 2, + SPRAID_CSTS_CFS_MASK = 1 << SPRAID_CSTS_CFS_SHIFT, + SPRAID_CSTS_PP_MASK = 1 << SPRAID_CSTS_PP_SHIFT, +}; + +enum { + SPRAID_ADMIN_DELETE_SQ = 0x00, + SPRAID_ADMIN_CREATE_SQ = 0x01, + SPRAID_ADMIN_DELETE_CQ = 0x04, + SPRAID_ADMIN_CREATE_CQ = 0x05, + SPRAID_ADMIN_ABORT_CMD = 0x08, + SPRAID_ADMIN_SET_FEATURES = 0x09, + SPRAID_ADMIN_ASYNC_EVENT = 0x0c, + SPRAID_ADMIN_GET_INFO = 0xc6, + SPRAID_ADMIN_RESET = 0xc8, +}; + +enum { + SPRAID_GET_INFO_CTRL = 0, + SPRAID_GET_INFO_DEV_LIST = 1, +}; + +enum { + SPRAID_RESET_TARGET = 0, + SPRAID_RESET_BUS = 1, +}; + +enum { + SPRAID_AEN_ERROR = 0, + SPRAID_AEN_NOTICE = 2, + SPRAID_AEN_VS = 7, +}; + +enum { + SPRAID_AEN_DEV_CHANGED = 0x00, + SPRAID_AEN_FW_ACT_START = 0x01, + SPRAID_AEN_HOST_PROBING = 0x10, +}; + +enum { + SPRAID_AEN_TIMESYN = 0x00, + SPRAID_AEN_FW_ACT_FINISH = 0x02, + SPRAID_AEN_EVENT_MIN = 0x80, + SPRAID_AEN_EVENT_MAX = 0xff, +}; + +enum { + SPRAID_CMD_WRITE = 0x01, + SPRAID_CMD_READ = 0x02, + + SPRAID_CMD_NONIO_NONE = 0x80, + SPRAID_CMD_NONIO_TODEV = 0x81, + SPRAID_CMD_NONIO_FROMDEV = 0x82, +}; + +enum { + SPRAID_QUEUE_PHYS_CONTIG = (1 << 0), + SPRAID_CQ_IRQ_ENABLED = (1 << 1), + + SPRAID_FEAT_NUM_QUEUES = 0x07, + SPRAID_FEAT_ASYNC_EVENT = 0x0b, + SPRAID_FEAT_TIMESTAMP = 0x0e, +}; + +enum spraid_state { + SPRAID_NEW, + SPRAID_LIVE, + SPRAID_RESETTING, + SPRAID_DELETING, + SPRAID_DEAD, +}; + +enum { + SPRAID_CARD_HBA, + SPRAID_CARD_RAID, +}; + +enum spraid_cmd_type { + SPRAID_CMD_ADM, + SPRAID_CMD_IOPT, +}; + +struct spraid_completion { + __le32 result; + union { + struct { + __u8 sense_len; + __u8 resv[3]; + }; + __le32 result1; + }; + __le16 sq_head; + __le16 sq_id; + __u16 cmd_id; + __le16 status; +}; + +struct spraid_ctrl_info { + __le32 nd; + __le16 max_cmds; + __le16 max_channel; + __le32 max_tgt_id; + __le16 max_lun; + __le16 max_num_sge; + __le16 lun_num_in_boot; + __u8 mdts; + __u8 acl; + __u8 aerl; + __u8 card_type; + __u16 rsvd; + __u32 rtd3e; + __u8 sn[32]; + __u8 fr[16]; + __u8 rsvd1[4020]; +}; + +struct spraid_dev { + struct pci_dev *pdev; + struct device *dev; + struct Scsi_Host *shost; + struct spraid_queue *queues; + struct dma_pool *prp_page_pool; + struct dma_pool *prp_small_pool[MAX_SMALL_POOL_NUM]; + mempool_t *iod_mempool; + void __iomem *bar; + u32 max_qid; + u32 num_vecs; + u32 queue_count; + u32 ioq_depth; + int db_stride; + u32 __iomem *dbs; + struct rw_semaphore devices_rwsem; + int numa_node; + u32 page_size; + u32 ctrl_config; + u32 online_queues; + u64 cap; + int instance; + struct spraid_ctrl_info *ctrl_info; + struct spraid_dev_info *devices; + + struct spraid_cmd *adm_cmds; + struct list_head adm_cmd_list; + spinlock_t adm_cmd_lock; + + struct spraid_cmd *ioq_ptcmds; + struct list_head ioq_pt_list; + spinlock_t ioq_pt_lock; + + struct work_struct scan_work; + struct work_struct timesyn_work; + struct work_struct reset_work; + struct work_struct fw_act_work; + + enum spraid_state state; + spinlock_t state_lock; + + struct request_queue *bsg_queue; +}; + +struct spraid_sgl_desc { + __le64 addr; + __le32 length; + __u8 rsvd[3]; + __u8 type; +}; + +union spraid_data_ptr { + struct { + __le64 prp1; + __le64 prp2; + }; + struct spraid_sgl_desc sgl; +}; + +struct spraid_admin_common_command { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 hdid; + __le32 cdw2[4]; + union spraid_data_ptr dptr; + __le32 cdw10; + __le32 cdw11; + __le32 cdw12; + __le32 cdw13; + __le32 cdw14; + __le32 cdw15; +}; + +struct spraid_features { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 hdid; + __u64 rsvd2[2]; + union spraid_data_ptr dptr; + __le32 fid; + __le32 dword11; + __le32 dword12; + __le32 dword13; + __le32 dword14; + __le32 dword15; +}; + +struct spraid_create_cq { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[5]; + __le64 prp1; + __u64 rsvd8; + __le16 cqid; + __le16 qsize; + __le16 cq_flags; + __le16 irq_vector; + __u32 rsvd12[4]; +}; + +struct spraid_create_sq { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[5]; + __le64 prp1; + __u64 rsvd8; + __le16 sqid; + __le16 qsize; + __le16 sq_flags; + __le16 cqid; + __u32 rsvd12[4]; +}; + +struct spraid_delete_queue { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[9]; + __le16 qid; + __u16 rsvd10; + __u32 rsvd11[5]; +}; + +struct spraid_get_info { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 hdid; + __u32 rsvd2[4]; + union spraid_data_ptr dptr; + __u8 type; + __u8 rsvd10[3]; + __le32 cdw11; + __u32 rsvd12[4]; +}; + +struct spraid_usr_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 hdid; + union { + struct { + __le16 subopcode; + __le16 rsvd1; + } info_0; + __le32 cdw2; + }; + union { + struct { + __le16 data_len; + __le16 param_len; + } info_1; + __le32 cdw3; + }; + __u64 metadata; + union spraid_data_ptr dptr; + __le32 cdw10; + __le32 cdw11; + __le32 cdw12; + __le32 cdw13; + __le32 cdw14; + __le32 cdw15; +}; + +enum { + SPRAID_CMD_FLAG_SGL_METABUF = (1 << 6), + SPRAID_CMD_FLAG_SGL_METASEG = (1 << 7), + SPRAID_CMD_FLAG_SGL_ALL = SPRAID_CMD_FLAG_SGL_METABUF | + SPRAID_CMD_FLAG_SGL_METASEG, +}; + +enum spraid_cmd_state { + SPRAID_CMD_IDLE = 0, + SPRAID_CMD_IN_FLIGHT = 1, + SPRAID_CMD_COMPLETE = 2, + SPRAID_CMD_TIMEOUT = 3, + SPRAID_CMD_TMO_COMPLETE = 4, +}; + +struct spraid_abort_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 hdid; + __u64 rsvd2[4]; + __le16 sqid; + __le16 cid; + __u32 rsvd11[5]; +}; + +struct spraid_reset_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 hdid; + __u64 rsvd2[4]; + __u8 type; + __u8 rsvd10[3]; + __u32 rsvd11[5]; +}; + +struct spraid_admin_command { + union { + struct spraid_admin_common_command common; + struct spraid_features features; + struct spraid_create_cq create_cq; + struct spraid_create_sq create_sq; + struct spraid_delete_queue delete_queue; + struct spraid_get_info get_info; + struct spraid_abort_cmd abort; + struct spraid_reset_cmd reset; + struct spraid_usr_cmd usr_cmd; + }; +}; + +struct spraid_ioq_common_command { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 hdid; + __le16 sense_len; + __u8 cdb_len; + __u8 rsvd2; + __le32 cdw3[3]; + union spraid_data_ptr dptr; + __le32 cdw10[6]; + __u8 cdb[32]; + __le64 sense_addr; + __le32 cdw26[6]; +}; + +struct spraid_rw_command { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 hdid; + __le16 sense_len; + __u8 cdb_len; + __u8 rsvd2; + __u32 rsvd3[3]; + union spraid_data_ptr dptr; + __le64 slba; + __le16 nlb; + __le16 control; + __u32 rsvd13[3]; + __u8 cdb[32]; + __le64 sense_addr; + __u32 rsvd26[6]; +}; + +struct spraid_scsi_nonio { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 hdid; + __le16 sense_len; + __u8 cdb_length; + __u8 rsvd2; + __u32 rsvd3[3]; + union spraid_data_ptr dptr; + __u32 rsvd10[5]; + __le32 buffer_len; + __u8 cdb[32]; + __le64 sense_addr; + __u32 rsvd26[6]; +}; + +struct spraid_ioq_command { + union { + struct spraid_ioq_common_command common; + struct spraid_rw_command rw; + struct spraid_scsi_nonio scsi_nonio; + }; +}; + +struct spraid_passthru_common_cmd { + __u8 opcode; + __u8 flags; + __u16 rsvd0; + __u32 nsid; + union { + struct { + __u16 subopcode; + __u16 rsvd1; + } info_0; + __u32 cdw2; + }; + union { + struct { + __u16 data_len; + __u16 param_len; + } info_1; + __u32 cdw3; + }; + __u64 metadata; + + __u64 addr; + __u64 prp2; + + __u32 cdw10; + __u32 cdw11; + __u32 cdw12; + __u32 cdw13; + __u32 cdw14; + __u32 cdw15; + __u32 timeout_ms; + __u32 result0; + __u32 result1; +}; + +struct spraid_ioq_passthru_cmd { + __u8 opcode; + __u8 flags; + __u16 rsvd0; + __u32 nsid; + union { + struct { + __u16 res_sense_len; + __u8 cdb_len; + __u8 rsvd0; + } info_0; + __u32 cdw2; + }; + union { + struct { + __u16 subopcode; + __u16 rsvd1; + } info_1; + __u32 cdw3; + }; + union { + struct { + __u16 rsvd; + __u16 param_len; + } info_2; + __u32 cdw4; + }; + __u32 cdw5; + __u64 addr; + __u64 prp2; + union { + struct { + __u16 eid; + __u16 sid; + } info_3; + __u32 cdw10; + }; + union { + struct { + __u16 did; + __u8 did_flag; + __u8 rsvd2; + } info_4; + __u32 cdw11; + }; + __u32 cdw12; + __u32 cdw13; + __u32 cdw14; + __u32 data_len; + __u32 cdw16; + __u32 cdw17; + __u32 cdw18; + __u32 cdw19; + __u32 cdw20; + __u32 cdw21; + __u32 cdw22; + __u32 cdw23; + __u64 sense_addr; + __u32 cdw26[4]; + __u32 timeout_ms; + __u32 result0; + __u32 result1; +}; + +struct spraid_bsg_request { + u32 msgcode; + u32 control; + union { + struct spraid_passthru_common_cmd admcmd; + struct spraid_ioq_passthru_cmd ioqcmd; + }; +}; + +enum { + SPRAID_BSG_ADM, + SPRAID_BSG_IOQ, +}; + +struct spraid_cmd { + int qid; + int cid; + u32 result0; + u32 result1; + u16 status; + void *priv; + enum spraid_cmd_state state; + struct completion cmd_done; + struct list_head list; +}; + +struct spraid_queue { + struct spraid_dev *hdev; + spinlock_t sq_lock; + + spinlock_t cq_lock ____cacheline_aligned_in_smp; + + void *sq_cmds; + + struct spraid_completion *cqes; + + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u8 cq_phase; + u8 sqes; + u16 qid; + u16 sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 q_depth; + s16 cq_vector; + void *sense; + dma_addr_t sense_dma_addr; + struct dma_pool *prp_small_pool; +}; + +struct spraid_iod { + struct spraid_queue *spraidq; + enum spraid_cmd_state state; + int npages; + u32 nsge; + u32 length; + bool use_sgl; + bool sg_drv_mgmt; + dma_addr_t first_dma; + void *sense; + dma_addr_t sense_dma; + struct scatterlist *sg; + struct scatterlist inline_sg[0]; +}; + +#define SPRAID_DEV_INFO_ATTR_BOOT(attr) ((attr) & 0x01) +#define SPRAID_DEV_INFO_ATTR_VD(attr) (((attr) & 0x02) == 0x0) +#define SPRAID_DEV_INFO_ATTR_PT(attr) (((attr) & 0x22) == 0x02) +#define SPRAID_DEV_INFO_ATTR_RAWDISK(attr) ((attr) & 0x20) + +#define SPRAID_DEV_INFO_FLAG_VALID(flag) ((flag) & 0x01) +#define SPRAID_DEV_INFO_FLAG_CHANGE(flag) ((flag) & 0x02) + +#define BGTASK_TYPE_REBUILD 4 +#define USR_CMD_READ 0xc2 +#define USR_CMD_RDLEN 0x1000 +#define USR_CMD_VDINFO 0x704 +#define USR_CMD_BGTASK 0x504 +#define VDINFO_PARAM_LEN 0x04 + +struct spraid_vd_info { + __u8 name[32]; + __le16 id; + __u8 rg_id; + __u8 rg_level; + __u8 sg_num; + __u8 sg_disk_num; + __u8 vd_status; + __u8 vd_type; + __u8 rsvd1[4056]; +}; + +#define MAX_REALTIME_BGTASK_NUM 32 + +struct bgtask_info { + __u8 type; + __u8 progress; + __u8 rate; + __u8 rsvd0; + __le16 vd_id; + __le16 time_left; + __u8 rsvd1[4]; +}; + +struct spraid_bgtask { + __u8 sw; + __u8 task_num; + __u8 rsvd[6]; + struct bgtask_info bgtask[MAX_REALTIME_BGTASK_NUM]; +}; + +struct spraid_dev_info { + __le32 hdid; + __le16 target; + __u8 channel; + __u8 lun; + __u8 attr; + __u8 flag; + __le16 max_io_kb; +}; + +#define MAX_DEV_ENTRY_PER_PAGE_4K 340 +struct spraid_dev_list { + __le32 dev_num; + __u32 rsvd0[3]; + struct spraid_dev_info devices[MAX_DEV_ENTRY_PER_PAGE_4K]; +}; + +struct spraid_sdev_hostdata { + u32 hdid; + u16 max_io_kb; + u8 attr; + u8 flag; + u8 rg_id; + u8 rsvd[3]; +}; + +#endif + diff --git a/drivers/scsi/spraid/spraid_main.c b/drivers/scsi/spraid/spraid_main.c new file mode 100644 index 0000000000000000000000000000000000000000..7069582d741ab800afddf518b0ece1aa0eb60f3a --- /dev/null +++ b/drivers/scsi/spraid/spraid_main.c @@ -0,0 +1,3869 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ + +/* Ramaxel Raid SPXXX Series Linux Driver */ + +#define pr_fmt(fmt) "spraid: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + + +#include "spraid.h" + +static u32 admin_tmout = 60; +module_param(admin_tmout, uint, 0644); +MODULE_PARM_DESC(admin_tmout, "admin commands timeout (seconds)"); + +static u32 scmd_tmout_rawdisk = 180; +module_param(scmd_tmout_rawdisk, uint, 0644); +MODULE_PARM_DESC(scmd_tmout_rawdisk, "scsi commands timeout for rawdisk(seconds)"); + +static u32 scmd_tmout_vd = 180; +module_param(scmd_tmout_vd, uint, 0644); +MODULE_PARM_DESC(scmd_tmout_vd, "scsi commands timeout for vd(seconds)"); + +static bool max_io_force; +module_param(max_io_force, bool, 0644); +MODULE_PARM_DESC(max_io_force, "force max_hw_sectors_kb = 1024, default false(performance first)"); + +static int ioq_depth_set(const char *val, const struct kernel_param *kp); +static const struct kernel_param_ops ioq_depth_ops = { + .set = ioq_depth_set, + .get = param_get_uint, +}; + +static u32 io_queue_depth = 1024; +module_param_cb(io_queue_depth, &ioq_depth_ops, &io_queue_depth, 0644); +MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2"); + +static int log_debug_switch_set(const char *val, const struct kernel_param *kp) +{ + u8 n = 0; + int ret; + + ret = kstrtou8(val, 10, &n); + if (ret != 0) + return -EINVAL; + + return param_set_byte(val, kp); +} + +static const struct kernel_param_ops log_debug_switch_ops = { + .set = log_debug_switch_set, + .get = param_get_byte, +}; + +static unsigned char log_debug_switch; +module_param_cb(log_debug_switch, &log_debug_switch_ops, + &log_debug_switch, 0644); +MODULE_PARM_DESC(log_debug_switch, + "set log state, default zero for switch off"); + +static int small_pool_num_set(const char *val, const struct kernel_param *kp) +{ + u8 n = 0; + int ret; + + ret = kstrtou8(val, 10, &n); + if (ret != 0) + return -EINVAL; + if (n > MAX_SMALL_POOL_NUM) + n = MAX_SMALL_POOL_NUM; + if (n < 1) + n = 1; + *((u8 *)kp->arg) = n; + + return 0; +} + +static const struct kernel_param_ops small_pool_num_ops = { + .set = small_pool_num_set, + .get = param_get_byte, +}; + +/* It was found that the spindlock of a single pool conflicts + * a lot with multiple CPUs.So multiple pools are introduced + * to reduce the conflictions. + */ +static unsigned char small_pool_num = 4; +module_param_cb(small_pool_num, &small_pool_num_ops, &small_pool_num, 0644); +MODULE_PARM_DESC(small_pool_num, "set prp small pool num, default 4, MAX 16"); + +static void spraid_free_queue(struct spraid_queue *spraidq); +static void spraid_handle_aen_notice(struct spraid_dev *hdev, u32 result); +static void spraid_handle_aen_vs(struct spraid_dev *hdev, + u32 result, u32 result1); + +static DEFINE_IDA(spraid_instance_ida); + +static struct class *spraid_class; + +#define SPRAID_CAP_TIMEOUT_UNIT_MS (HZ / 2) + +static struct workqueue_struct *spraid_wq; + +#define dev_log_dbg(dev, fmt, ...) do { \ + if (unlikely(log_debug_switch)) \ + dev_info(dev, "[%s] [%d] " fmt, \ + __func__, __LINE__, ##__VA_ARGS__); \ +} while (0) + +#define SPRAID_DRV_VERSION "1.0.0.0" + +#define ADMIN_TIMEOUT (admin_tmout * HZ) + +#define SPRAID_WAIT_ABNL_CMD_TIMEOUT (3 * 2) + +#define SPRAID_DMA_MSK_BIT_MAX 64 + +enum FW_STAT_CODE { + FW_STAT_OK = 0, + FW_STAT_NEED_CHECK, + FW_STAT_ERROR, + FW_STAT_EP_PCIE_ERROR, + FW_STAT_NAC_DMA_ERROR, + FW_STAT_ABORTED, + FW_STAT_NEED_RETRY +}; + +static const char * const raid_levels[] = {"0", "1", "5", "6", "10", "50", "60", + "NA"}; + +static const char * const raid_states[] = { + "NA", "NORMAL", "FAULT", "DEGRADE", "NOT_FORMATTED", + "FORMATTING", "SANITIZING", "INITIALIZING", "INITIALIZE_FAIL", + "DELETING", "DELETE_FAIL", "WRITE_PROTECT" +}; + +static int ioq_depth_set(const char *val, const struct kernel_param *kp) +{ + int n = 0; + int ret; + + ret = kstrtoint(val, 10, &n); + if (ret != 0 || n < 2) + return -EINVAL; + + return param_set_int(val, kp); +} + +static int spraid_remap_bar(struct spraid_dev *hdev, u32 size) +{ + struct pci_dev *pdev = hdev->pdev; + + if (size > pci_resource_len(pdev, 0)) { + dev_err(hdev->dev, "Input size[%u] exceed bar0 length[%llu]\n", + size, pci_resource_len(pdev, 0)); + return -ENOMEM; + } + + if (hdev->bar) + iounmap(hdev->bar); + + hdev->bar = ioremap(pci_resource_start(pdev, 0), size); + if (!hdev->bar) { + dev_err(hdev->dev, "ioremap for bar0 failed\n"); + return -ENOMEM; + } + hdev->dbs = hdev->bar + SPRAID_REG_DBS; + + return 0; +} + +static int spraid_dev_map(struct spraid_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + int ret; + + ret = pci_request_mem_regions(pdev, "spraid"); + if (ret) { + dev_err(hdev->dev, "fail to request memory regions\n"); + return ret; + } + + ret = spraid_remap_bar(hdev, SPRAID_REG_DBS + 4096); + if (ret) { + pci_release_mem_regions(pdev); + return ret; + } + + return 0; +} + +static void spraid_dev_unmap(struct spraid_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + + if (hdev->bar) { + iounmap(hdev->bar); + hdev->bar = NULL; + } + pci_release_mem_regions(pdev); +} + +static int spraid_pci_enable(struct spraid_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + int ret = -ENOMEM; + u64 maskbit = SPRAID_DMA_MSK_BIT_MAX; + + if (pci_enable_device_mem(pdev)) { + dev_err(hdev->dev, + "Enable pci device memory resources failed\n"); + return ret; + } + pci_set_master(pdev); + + if (readl(hdev->bar + SPRAID_REG_CSTS) == U32_MAX) { + ret = -ENODEV; + dev_err(hdev->dev, "Read csts register failed\n"); + goto disable; + } + + hdev->cap = lo_hi_readq(hdev->bar + SPRAID_REG_CAP); + hdev->ioq_depth = min_t(u32, SPRAID_CAP_MQES(hdev->cap) + 1, + io_queue_depth); + hdev->db_stride = 1 << SPRAID_CAP_STRIDE(hdev->cap); + + maskbit = SPRAID_CAP_DMAMASK(hdev->cap); + if (maskbit < 32 || maskbit > SPRAID_DMA_MSK_BIT_MAX) { + dev_err(hdev->dev, + "err, dma mask invalid[%llu], set to default\n", + maskbit); + maskbit = SPRAID_DMA_MSK_BIT_MAX; + } + if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(maskbit)) && + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { + dev_err(hdev->dev, "set dma mask and coherent failed\n"); + goto disable; + } + + dev_info(hdev->dev, "set dma mask[%llu] success\n", maskbit); + + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); + + if (ret < 0) { + dev_err(hdev->dev, "Allocate one IRQ for setup admin channel failed\n"); + goto disable; + } + + pci_enable_pcie_error_reporting(pdev); + pci_save_state(pdev); + + return 0; + +disable: + pci_disable_device(pdev); + return ret; +} + +static int spraid_npages_prp(u32 size, struct spraid_dev *hdev) +{ + u32 nprps = DIV_ROUND_UP(size + hdev->page_size, hdev->page_size); + + return DIV_ROUND_UP(PRP_ENTRY_SIZE * nprps, PAGE_SIZE - PRP_ENTRY_SIZE); +} + +static int spraid_npages_sgl(u32 nseg) +{ + return DIV_ROUND_UP(nseg * sizeof(struct spraid_sgl_desc), PAGE_SIZE); +} + +static void **spraid_iod_list(struct spraid_iod *iod) +{ + return (void **)(iod->inline_sg + (iod->sg_drv_mgmt ? iod->nsge : 0)); +} + +static u32 spraid_iod_ext_size(struct spraid_dev *hdev, u32 size, u32 nsge, + bool sg_drv_mgmt, bool use_sgl) +{ + size_t alloc_size, sg_size; + + if (use_sgl) + alloc_size = sizeof(__le64 *) * spraid_npages_sgl(nsge); + else + alloc_size = sizeof(__le64 *) * spraid_npages_prp(size, hdev); + + sg_size = sg_drv_mgmt ? (sizeof(struct scatterlist) * nsge) : 0; + return sg_size + alloc_size; +} + +static u32 spraid_cmd_size(struct spraid_dev *hdev, + bool sg_drv_mgmt, bool use_sgl) +{ + u32 alloc_size = spraid_iod_ext_size(hdev, SPRAID_INT_BYTES(hdev), + SPRAID_INT_PAGES, sg_drv_mgmt, use_sgl); + + dev_info(hdev->dev, "sg_drv_mgmt: %s, use_sgl: %s, iod size: %lu;" + " alloc_size: %u\n", sg_drv_mgmt ? "true" : "false", + use_sgl ? "true" : "false", + sizeof(struct spraid_iod), alloc_size); + + return sizeof(struct spraid_iod) + alloc_size; +} + +static int spraid_setup_prps(struct spraid_dev *hdev, struct spraid_iod *iod) +{ + struct scatterlist *sg = iod->sg; + u64 dma_addr = sg_dma_address(sg); + int dma_len = sg_dma_len(sg); + __le64 *prp_list, *old_prp_list; + u32 page_size = hdev->page_size; + int offset = dma_addr & (page_size - 1); + void **list = spraid_iod_list(iod); + int length = iod->length; + struct dma_pool *pool; + dma_addr_t prp_dma; + int nprps, i; + + length -= (page_size - offset); + if (length <= 0) { + iod->first_dma = 0; + return 0; + } + + dma_len -= (page_size - offset); + if (dma_len) { + dma_addr += (page_size - offset); + } else { + sg = sg_next(sg); + dma_addr = sg_dma_address(sg); + dma_len = sg_dma_len(sg); + } + + if (length <= page_size) { + iod->first_dma = dma_addr; + return 0; + } + + nprps = DIV_ROUND_UP(length, page_size); + if (nprps <= (SMALL_POOL_SIZE / PRP_ENTRY_SIZE)) { + pool = iod->spraidq->prp_small_pool; + iod->npages = 0; + } else { + pool = hdev->prp_page_pool; + iod->npages = 1; + } + + prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); + if (!prp_list) { + dev_err_ratelimited(hdev->dev, + "Allocate first prp_list memory failed\n"); + iod->first_dma = dma_addr; + iod->npages = -1; + return -ENOMEM; + } + list[0] = prp_list; + iod->first_dma = prp_dma; + i = 0; + for (;;) { + if (i == page_size / PRP_ENTRY_SIZE) { + old_prp_list = prp_list; + + prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); + if (!prp_list) { + dev_err_ratelimited(hdev->dev, "Allocate %dth;" + " prp_list memory failed\n", + iod->npages + 1); + return -ENOMEM; + } + list[iod->npages++] = prp_list; + prp_list[0] = old_prp_list[i - 1]; + old_prp_list[i - 1] = cpu_to_le64(prp_dma); + i = 1; + } + prp_list[i++] = cpu_to_le64(dma_addr); + dma_len -= page_size; + dma_addr += page_size; + length -= page_size; + if (length <= 0) + break; + if (dma_len > 0) + continue; + if (unlikely(dma_len < 0)) + goto bad_sgl; + sg = sg_next(sg); + dma_addr = sg_dma_address(sg); + dma_len = sg_dma_len(sg); + } + + return 0; + +bad_sgl: + dev_err(hdev->dev, + "Setup prps, invalid SGL for payload: %d nents: %d\n", + iod->length, iod->nsge); + return -EIO; +} + +#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct spraid_sgl_desc)) + +static void spraid_submit_cmd(struct spraid_queue *spraidq, const void *cmd) +{ + u32 sqes = SQE_SIZE(spraidq->qid); + unsigned long flags; + struct spraid_admin_common_command *acd = + (struct spraid_admin_common_command *)cmd; + + spin_lock_irqsave(&spraidq->sq_lock, flags); + memcpy((spraidq->sq_cmds + sqes * spraidq->sq_tail), cmd, sqes); + if (++spraidq->sq_tail == spraidq->q_depth) + spraidq->sq_tail = 0; + + writel(spraidq->sq_tail, spraidq->q_db); + spin_unlock_irqrestore(&spraidq->sq_lock, flags); + + dev_log_dbg(spraidq->hdev->dev, + "cid[%d] qid[%d], opcode[0x%x], flags[0x%x], hdid[%u]\n", + acd->command_id, spraidq->qid, acd->opcode, + acd->flags, le32_to_cpu(acd->hdid)); +} + +static u32 spraid_mod64(u64 dividend, u32 divisor) +{ + u64 d; + u32 remainder; + + if (!divisor) + pr_err("DIVISOR is zero, in div fn\n"); + + d = dividend; + remainder = do_div(d, divisor); + return remainder; +} + +static inline bool spraid_is_rw_scmd(struct scsi_cmnd *scmd) +{ + switch (scmd->cmnd[0]) { + case READ_6: + case READ_10: + case READ_12: + case READ_16: + case READ_32: + case WRITE_6: + case WRITE_10: + case WRITE_12: + case WRITE_16: + case WRITE_32: + return true; + default: + return false; + } +} + +static bool spraid_is_prp(struct spraid_dev *hdev, + struct scsi_cmnd *scmd, u32 nsge) +{ + struct scatterlist *sg = scsi_sglist(scmd); + u32 page_size = hdev->page_size; + bool is_prp = true; + int i = 0; + + scsi_for_each_sg(scmd, sg, nsge, i) { + if (i != 0 && i != nsge - 1) { + if (spraid_mod64(sg_dma_len(sg), page_size) || + spraid_mod64(sg_dma_address(sg), page_size)) { + is_prp = false; + break; + } + } + + if (nsge > 1 && i == 0) { + if ((spraid_mod64((sg_dma_address(sg) + sg_dma_len(sg)), + page_size))) { + is_prp = false; + break; + } + } + + if (nsge > 1 && i == (nsge - 1)) { + if (spraid_mod64(sg_dma_address(sg), page_size)) { + is_prp = false; + break; + } + } + } + + return is_prp; +} + +enum { + SPRAID_SGL_FMT_DATA_DESC = 0x00, + SPRAID_SGL_FMT_SEG_DESC = 0x02, + SPRAID_SGL_FMT_LAST_SEG_DESC = 0x03, + SPRAID_KEY_SGL_FMT_DATA_DESC = 0x04, + SPRAID_TRANSPORT_SGL_DATA_DESC = 0x05 +}; + +static void spraid_sgl_set_data(struct spraid_sgl_desc *sge, + struct scatterlist *sg) +{ + sge->addr = cpu_to_le64(sg_dma_address(sg)); + sge->length = cpu_to_le32(sg_dma_len(sg)); + sge->type = SPRAID_SGL_FMT_DATA_DESC << 4; +} + +static void spraid_sgl_set_seg(struct spraid_sgl_desc *sge, + dma_addr_t dma_addr, int entries) +{ + sge->addr = cpu_to_le64(dma_addr); + if (entries <= SGES_PER_PAGE) { + sge->length = cpu_to_le32(entries * sizeof(*sge)); + sge->type = SPRAID_SGL_FMT_LAST_SEG_DESC << 4; + } else { + sge->length = cpu_to_le32(PAGE_SIZE); + sge->type = SPRAID_SGL_FMT_SEG_DESC << 4; + } +} + +static int spraid_setup_ioq_cmd_sgl(struct spraid_dev *hdev, + struct scsi_cmnd *scmd, + struct spraid_ioq_command *ioq_cmd, + struct spraid_iod *iod) +{ + struct spraid_sgl_desc *sg_list, *link, *old_sg_list; + struct scatterlist *sg = scsi_sglist(scmd); + void **list = spraid_iod_list(iod); + struct dma_pool *pool; + int nsge = iod->nsge; + dma_addr_t sgl_dma; + int i = 0; + + ioq_cmd->common.flags |= SPRAID_CMD_FLAG_SGL_METABUF; + + if (nsge == 1) { + spraid_sgl_set_data(&ioq_cmd->common.dptr.sgl, sg); + return 0; + } + + if (nsge <= (SMALL_POOL_SIZE / sizeof(struct spraid_sgl_desc))) { + pool = iod->spraidq->prp_small_pool; + iod->npages = 0; + } else { + pool = hdev->prp_page_pool; + iod->npages = 1; + } + + sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); + if (!sg_list) { + dev_err_ratelimited(hdev->dev, + "Allocate first sgl_list failed\n"); + iod->npages = -1; + return -ENOMEM; + } + + list[0] = sg_list; + iod->first_dma = sgl_dma; + spraid_sgl_set_seg(&ioq_cmd->common.dptr.sgl, sgl_dma, nsge); + do { + if (i == SGES_PER_PAGE) { + old_sg_list = sg_list; + link = &old_sg_list[SGES_PER_PAGE - 1]; + + sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); + if (!sg_list) { + dev_err_ratelimited(hdev->dev, + "Allocate %dth sgl_list;" + " failed\n", + iod->npages + 1); + return -ENOMEM; + } + list[iod->npages++] = sg_list; + + i = 0; + memcpy(&sg_list[i++], link, sizeof(*link)); + spraid_sgl_set_seg(link, sgl_dma, nsge); + } + + spraid_sgl_set_data(&sg_list[i++], sg); + sg = sg_next(sg); + } while (--nsge > 0); + + return 0; +} + +#define SPRAID_RW_FUA BIT(14) + +static void spraid_setup_rw_cmd(struct spraid_dev *hdev, + struct spraid_rw_command *rw, + struct scsi_cmnd *scmd) +{ + u32 start_lba_lo, start_lba_hi; + u32 datalength = 0; + u16 control = 0; + + start_lba_lo = 0; + start_lba_hi = 0; + + if (scmd->sc_data_direction == DMA_TO_DEVICE) { + rw->opcode = SPRAID_CMD_WRITE; + } else if (scmd->sc_data_direction == DMA_FROM_DEVICE) { + rw->opcode = SPRAID_CMD_READ; + } else { + dev_err(hdev->dev, + "Invalid IO for unsupported data direction: %d\n", + scmd->sc_data_direction); + WARN_ON(1); + } + + /* 6-byte READ(0x08) or WRITE(0x0A) cdb */ + if (scmd->cmd_len == 6) { + datalength = (u32)(scmd->cmnd[4] == 0 ? + IO_6_DEFAULT_TX_LEN : scmd->cmnd[4]); + start_lba_lo = (u32)get_unaligned_be24(&scmd->cmnd[1]); + + start_lba_lo &= 0x1FFFFF; + } + + /* 10-byte READ(0x28) or WRITE(0x2A) cdb */ + else if (scmd->cmd_len == 10) { + datalength = (u32)get_unaligned_be16(&scmd->cmnd[7]); + start_lba_lo = get_unaligned_be32(&scmd->cmnd[2]); + + if (scmd->cmnd[1] & FUA_MASK) + control |= SPRAID_RW_FUA; + } + + /* 12-byte READ(0xA8) or WRITE(0xAA) cdb */ + else if (scmd->cmd_len == 12) { + datalength = get_unaligned_be32(&scmd->cmnd[6]); + start_lba_lo = get_unaligned_be32(&scmd->cmnd[2]); + + if (scmd->cmnd[1] & FUA_MASK) + control |= SPRAID_RW_FUA; + } + /* 16-byte READ(0x88) or WRITE(0x8A) cdb */ + else if (scmd->cmd_len == 16) { + datalength = get_unaligned_be32(&scmd->cmnd[10]); + start_lba_lo = get_unaligned_be32(&scmd->cmnd[6]); + start_lba_hi = get_unaligned_be32(&scmd->cmnd[2]); + + if (scmd->cmnd[1] & FUA_MASK) + control |= SPRAID_RW_FUA; + } + /* 32-byte READ(0x88) or WRITE(0x8A) cdb */ + else if (scmd->cmd_len == 32) { + datalength = get_unaligned_be32(&scmd->cmnd[28]); + start_lba_lo = get_unaligned_be32(&scmd->cmnd[16]); + start_lba_hi = get_unaligned_be32(&scmd->cmnd[12]); + + if (scmd->cmnd[10] & FUA_MASK) + control |= SPRAID_RW_FUA; + } + + if (unlikely(datalength > U16_MAX || datalength == 0)) { + dev_err(hdev->dev, + "Invalid IO for illegal transfer data length: %u\n", + datalength); + WARN_ON(1); + } + + rw->slba = cpu_to_le64(((u64)start_lba_hi << 32) | start_lba_lo); + /* 0base for nlb */ + rw->nlb = cpu_to_le16((u16)(datalength - 1)); + rw->control = cpu_to_le16(control); +} + +static void spraid_setup_nonio_cmd(struct spraid_dev *hdev, + struct spraid_scsi_nonio *scsi_nonio, + struct scsi_cmnd *scmd) +{ + scsi_nonio->buffer_len = cpu_to_le32(scsi_bufflen(scmd)); + + switch (scmd->sc_data_direction) { + case DMA_NONE: + scsi_nonio->opcode = SPRAID_CMD_NONIO_NONE; + break; + case DMA_TO_DEVICE: + scsi_nonio->opcode = SPRAID_CMD_NONIO_TODEV; + break; + case DMA_FROM_DEVICE: + scsi_nonio->opcode = SPRAID_CMD_NONIO_FROMDEV; + break; + default: + dev_err(hdev->dev, + "Invalid IO for unsupported data direction: %d\n", + scmd->sc_data_direction); + WARN_ON(1); + } +} + +static void spraid_setup_ioq_cmd(struct spraid_dev *hdev, + struct spraid_ioq_command *ioq_cmd, + struct scsi_cmnd *scmd) +{ + memcpy(ioq_cmd->common.cdb, scmd->cmnd, scmd->cmd_len); + ioq_cmd->common.cdb_len = scmd->cmd_len; + + if (spraid_is_rw_scmd(scmd)) + spraid_setup_rw_cmd(hdev, &ioq_cmd->rw, scmd); + else + spraid_setup_nonio_cmd(hdev, &ioq_cmd->scsi_nonio, scmd); +} + +static int spraid_init_iod(struct spraid_dev *hdev, struct spraid_iod *iod, + struct spraid_ioq_command *ioq_cmd, + struct scsi_cmnd *scmd) +{ + if (unlikely(!iod->sense)) { + dev_err(hdev->dev, "Allocate sense data buffer failed\n"); + return -ENOMEM; + } + ioq_cmd->common.sense_addr = cpu_to_le64(iod->sense_dma); + ioq_cmd->common.sense_len = cpu_to_le16(SCSI_SENSE_BUFFERSIZE); + + iod->nsge = 0; + iod->npages = -1; + iod->use_sgl = 0; + iod->sg_drv_mgmt = false; + WRITE_ONCE(iod->state, SPRAID_CMD_IDLE); + + return 0; +} + +static void spraid_free_iod_res(struct spraid_dev *hdev, struct spraid_iod *iod) +{ + const int last_prp = hdev->page_size / sizeof(__le64) - 1; + dma_addr_t dma_addr, next_dma_addr; + struct spraid_sgl_desc *sg_list; + __le64 *prp_list; + void *addr; + int i; + + dma_addr = iod->first_dma; + if (iod->npages == 0) + dma_pool_free(iod->spraidq->prp_small_pool, + spraid_iod_list(iod)[0], dma_addr); + + for (i = 0; i < iod->npages; i++) { + addr = spraid_iod_list(iod)[i]; + + if (iod->use_sgl) { + sg_list = addr; + next_dma_addr = + le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr); + } else { + prp_list = addr; + next_dma_addr = le64_to_cpu(prp_list[last_prp]); + } + + dma_pool_free(hdev->prp_page_pool, addr, dma_addr); + dma_addr = next_dma_addr; + } + + if (iod->sg_drv_mgmt && iod->sg != iod->inline_sg) { + iod->sg_drv_mgmt = false; + mempool_free(iod->sg, hdev->iod_mempool); + } + + iod->sense = NULL; + iod->npages = -1; +} + +static int spraid_io_map_data(struct spraid_dev *hdev, struct spraid_iod *iod, + struct scsi_cmnd *scmd, + struct spraid_ioq_command *ioq_cmd) +{ + int ret; + + iod->nsge = scsi_dma_map(scmd); + + /* No data to DMA, it may be scsi no-rw command */ + if (unlikely(iod->nsge == 0)) + return 0; + + iod->length = scsi_bufflen(scmd); + iod->sg = scsi_sglist(scmd); + iod->use_sgl = !spraid_is_prp(hdev, scmd, iod->nsge); + + if (iod->use_sgl) { + ret = spraid_setup_ioq_cmd_sgl(hdev, scmd, ioq_cmd, iod); + } else { + ret = spraid_setup_prps(hdev, iod); + ioq_cmd->common.dptr.prp1 = + cpu_to_le64(sg_dma_address(iod->sg)); + ioq_cmd->common.dptr.prp2 = cpu_to_le64(iod->first_dma); + } + + if (ret) + scsi_dma_unmap(scmd); + + return ret; +} + +static void spraid_map_status(struct spraid_iod *iod, struct scsi_cmnd *scmd, + struct spraid_completion *cqe) +{ + scsi_set_resid(scmd, 0); + + switch ((le16_to_cpu(cqe->status) >> 1) & 0x7f) { + case FW_STAT_OK: + set_host_byte(scmd, DID_OK); + break; + case FW_STAT_NEED_CHECK: + set_host_byte(scmd, DID_OK); + scmd->result |= le16_to_cpu(cqe->status) >> 8; + if (scmd->result & SAM_STAT_CHECK_CONDITION) { + memset(scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); + memcpy(scmd->sense_buffer, iod->sense, + SCSI_SENSE_BUFFERSIZE); + scmd->result = + (scmd->result & 0x00ffffff) | (DRIVER_SENSE << 24); + } + break; + case FW_STAT_ABORTED: + set_host_byte(scmd, DID_ABORT); + break; + case FW_STAT_NEED_RETRY: + set_host_byte(scmd, DID_REQUEUE); + break; + default: + set_host_byte(scmd, DID_BAD_TARGET); + dev_warn(iod->spraidq->hdev->dev, "[%s] cid[%d] qid[%d];" + "bad status[0x%x]\n", __func__, cqe->cmd_id, + le16_to_cpu(cqe->sq_id), le16_to_cpu(cqe->status)); + break; + } +} + +static inline void spraid_get_tag_from_scmd(struct scsi_cmnd *scmd, + u16 *qid, u16 *cid) +{ + u32 tag = blk_mq_unique_tag(scmd->request); + + *qid = blk_mq_unique_tag_to_hwq(tag) + 1; + *cid = blk_mq_unique_tag_to_tag(tag); +} + +static int spraid_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) +{ + struct spraid_iod *iod = scsi_cmd_priv(scmd); + struct spraid_dev *hdev = shost_priv(shost); + struct scsi_device *sdev = scmd->device; + struct spraid_sdev_hostdata *hostdata; + struct spraid_ioq_command ioq_cmd; + struct spraid_queue *ioq; + unsigned long elapsed; + u16 hwq, cid; + int ret; + + if (unlikely(!scmd)) { + dev_err(hdev->dev, "err, scmd is null\n"); + return 0; + } + + if (unlikely(hdev->state != SPRAID_LIVE)) { + set_host_byte(scmd, DID_NO_CONNECT); + scmd->scsi_done(scmd); + return 0; + } + + if (log_debug_switch) + scsi_print_command(scmd); + + spraid_get_tag_from_scmd(scmd, &hwq, &cid); + hostdata = sdev->hostdata; + ioq = &hdev->queues[hwq]; + memset(&ioq_cmd, 0, sizeof(ioq_cmd)); + ioq_cmd.rw.hdid = cpu_to_le32(hostdata->hdid); + ioq_cmd.rw.command_id = cid; + + spraid_setup_ioq_cmd(hdev, &ioq_cmd, scmd); + + ret = cid * SCSI_SENSE_BUFFERSIZE; + iod->sense = ioq->sense + ret; + iod->sense_dma = ioq->sense_dma_addr + ret; + + ret = spraid_init_iod(hdev, iod, &ioq_cmd, scmd); + if (unlikely(ret)) + return SCSI_MLQUEUE_HOST_BUSY; + + iod->spraidq = ioq; + ret = spraid_io_map_data(hdev, iod, scmd, &ioq_cmd); + if (unlikely(ret)) { + dev_err(hdev->dev, "spraid_io_map_data Err.\n"); + set_host_byte(scmd, DID_ERROR); + scmd->scsi_done(scmd); + ret = 0; + goto deinit_iod; + } + + WRITE_ONCE(iod->state, SPRAID_CMD_IN_FLIGHT); + spraid_submit_cmd(ioq, &ioq_cmd); + elapsed = jiffies - scmd->jiffies_at_alloc; + dev_log_dbg(hdev->dev, + "cid[%d] qid[%d] submit IO cost %3ld.%3ld seconds\n", + cid, hwq, elapsed / HZ, elapsed % HZ); + return 0; + +deinit_iod: + spraid_free_iod_res(hdev, iod); + return ret; +} + +static int spraid_match_dev(struct spraid_dev *hdev, u16 idx, + struct scsi_device *sdev) +{ + if (SPRAID_DEV_INFO_FLAG_VALID(hdev->devices[idx].flag)) { + if (sdev->channel == hdev->devices[idx].channel && + sdev->id == le16_to_cpu(hdev->devices[idx].target) && + sdev->lun < hdev->devices[idx].lun) { + dev_info(hdev->dev, + "Match device success, channel;" + "target:lun[%d:%d:%d]\n", + hdev->devices[idx].channel, + hdev->devices[idx].target, + hdev->devices[idx].lun); + return 1; + } + } + + return 0; +} + +static int spraid_slave_alloc(struct scsi_device *sdev) +{ + struct spraid_sdev_hostdata *hostdata; + struct spraid_dev *hdev; + u16 idx; + + hdev = shost_priv(sdev->host); + hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL); + if (!hostdata) { + dev_err(hdev->dev, "Alloc scsi host data memory failed\n"); + return -ENOMEM; + } + + down_read(&hdev->devices_rwsem); + for (idx = 0; idx < le32_to_cpu(hdev->ctrl_info->nd); idx++) { + if (spraid_match_dev(hdev, idx, sdev)) + goto scan_host; + } + up_read(&hdev->devices_rwsem); + + kfree(hostdata); + return -ENXIO; + +scan_host: + hostdata->hdid = le32_to_cpu(hdev->devices[idx].hdid); + hostdata->max_io_kb = le16_to_cpu(hdev->devices[idx].max_io_kb); + hostdata->attr = hdev->devices[idx].attr; + hostdata->flag = hdev->devices[idx].flag; + hostdata->rg_id = 0xff; + sdev->hostdata = hostdata; + up_read(&hdev->devices_rwsem); + return 0; +} + +static void spraid_slave_destroy(struct scsi_device *sdev) +{ + kfree(sdev->hostdata); + sdev->hostdata = NULL; +} + +static int spraid_slave_configure(struct scsi_device *sdev) +{ + unsigned int timeout = scmd_tmout_rawdisk * HZ; + struct spraid_dev *hdev = shost_priv(sdev->host); + struct spraid_sdev_hostdata *hostdata = sdev->hostdata; + u32 max_sec = sdev->host->max_sectors; + + if (hostdata) { + if (SPRAID_DEV_INFO_ATTR_VD(hostdata->attr)) + timeout = scmd_tmout_vd * HZ; + else if (SPRAID_DEV_INFO_ATTR_RAWDISK(hostdata->attr)) + timeout = scmd_tmout_rawdisk * HZ; + max_sec = hostdata->max_io_kb << 1; + } else { + dev_err(hdev->dev, "[%s] err, sdev->hostdata is null\n", + __func__); + } + + blk_queue_rq_timeout(sdev->request_queue, timeout); + sdev->eh_timeout = timeout; + + if ((max_sec == 0) || (max_sec > sdev->host->max_sectors)) + max_sec = sdev->host->max_sectors; + + if (!max_io_force) + blk_queue_max_hw_sectors(sdev->request_queue, max_sec); + + dev_info(hdev->dev, "[%s] sdev->channel:id:lun[%d:%d:%lld];" + "scmd_timeout[%d]s, maxsec[%d]\n", __func__, sdev->channel, + sdev->id, sdev->lun, timeout / HZ, max_sec); + + return 0; +} + +static void spraid_shost_init(struct spraid_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + u8 domain, bus; + u32 dev_func; + + domain = pci_domain_nr(pdev->bus); + bus = pdev->bus->number; + dev_func = pdev->devfn; + + hdev->shost->nr_hw_queues = hdev->online_queues - 1; + hdev->shost->can_queue = (hdev->ioq_depth - SPRAID_PTCMDS_PERQ); + + hdev->shost->sg_tablesize = le16_to_cpu(hdev->ctrl_info->max_num_sge); + /* 512B per sector */ + hdev->shost->max_sectors = + (1U << ((hdev->ctrl_info->mdts) * 1U) << 12) / 512; + hdev->shost->cmd_per_lun = MAX_CMD_PER_DEV; + hdev->shost->max_channel = + le16_to_cpu(hdev->ctrl_info->max_channel) - 1; + hdev->shost->max_id = le32_to_cpu(hdev->ctrl_info->max_tgt_id); + hdev->shost->max_lun = le16_to_cpu(hdev->ctrl_info->max_lun); + + hdev->shost->this_id = -1; + hdev->shost->unique_id = (domain << 16) | (bus << 8) | dev_func; + hdev->shost->max_cmd_len = MAX_CDB_LEN; + hdev->shost->hostt->cmd_size = max(spraid_cmd_size(hdev, false, true), + spraid_cmd_size(hdev, false, false)); +} + +static inline void spraid_host_deinit(struct spraid_dev *hdev) +{ + ida_free(&spraid_instance_ida, hdev->instance); +} + +static int spraid_alloc_queue(struct spraid_dev *hdev, u16 qid, u16 depth) +{ + struct spraid_queue *spraidq = &hdev->queues[qid]; + int ret = 0; + + if (hdev->queue_count > qid) { + dev_info(hdev->dev, "[%s] warn: queue[%d] is exist\n", + __func__, qid); + return 0; + } + + spraidq->cqes = dma_alloc_coherent(hdev->dev, CQ_SIZE(depth), + &spraidq->cq_dma_addr, + GFP_KERNEL | __GFP_ZERO); + if (!spraidq->cqes) + return -ENOMEM; + + spraidq->sq_cmds = dma_alloc_coherent(hdev->dev, SQ_SIZE(qid, depth), + &spraidq->sq_dma_addr, + GFP_KERNEL); + if (!spraidq->sq_cmds) { + ret = -ENOMEM; + goto free_cqes; + } + + spin_lock_init(&spraidq->sq_lock); + spin_lock_init(&spraidq->cq_lock); + spraidq->hdev = hdev; + spraidq->q_depth = depth; + spraidq->qid = qid; + spraidq->cq_vector = -1; + hdev->queue_count++; + + /* alloc sense buffer */ + spraidq->sense = dma_alloc_coherent(hdev->dev, SENSE_SIZE(depth), + &spraidq->sense_dma_addr, + GFP_KERNEL | __GFP_ZERO); + if (!spraidq->sense) { + ret = -ENOMEM; + goto free_sq_cmds; + } + + return 0; + +free_sq_cmds: + dma_free_coherent(hdev->dev, SQ_SIZE(qid, depth), + (void *)spraidq->sq_cmds, spraidq->sq_dma_addr); +free_cqes: + dma_free_coherent(hdev->dev, CQ_SIZE(depth), (void *)spraidq->cqes, + spraidq->cq_dma_addr); + return ret; +} + +static int spraid_wait_ready(struct spraid_dev *hdev, u64 cap, bool enabled) +{ + unsigned long timeout = + ((SPRAID_CAP_TIMEOUT(cap) + 1) * SPRAID_CAP_TIMEOUT_UNIT_MS) + jiffies; + u32 bit = enabled ? SPRAID_CSTS_RDY : 0; + + while ((readl(hdev->bar + SPRAID_REG_CSTS) & SPRAID_CSTS_RDY) != bit) { + usleep_range(1000, 2000); + if (fatal_signal_pending(current)) + return -EINTR; + + if (time_after(jiffies, timeout)) { + dev_err(hdev->dev, "Device not ready; aborting %s\n", + enabled ? "initialisation" : "reset"); + return -ENODEV; + } + } + return 0; +} + +static int spraid_shutdown_ctrl(struct spraid_dev *hdev) +{ + unsigned long timeout = hdev->ctrl_info->rtd3e + jiffies; + + hdev->ctrl_config &= ~SPRAID_CC_SHN_MASK; + hdev->ctrl_config |= SPRAID_CC_SHN_NORMAL; + writel(hdev->ctrl_config, hdev->bar + SPRAID_REG_CC); + + while ((readl(hdev->bar + SPRAID_REG_CSTS) & SPRAID_CSTS_SHST_MASK) != + SPRAID_CSTS_SHST_CMPLT) { + msleep(100); + if (fatal_signal_pending(current)) + return -EINTR; + if (time_after(jiffies, timeout)) { + dev_err(hdev->dev, + "Device shutdown incomplete; abort shutdown\n"); + return -ENODEV; + } + } + return 0; +} + +static int spraid_disable_ctrl(struct spraid_dev *hdev) +{ + hdev->ctrl_config &= ~SPRAID_CC_SHN_MASK; + hdev->ctrl_config &= ~SPRAID_CC_ENABLE; + writel(hdev->ctrl_config, hdev->bar + SPRAID_REG_CC); + + return spraid_wait_ready(hdev, hdev->cap, false); +} + +static int spraid_enable_ctrl(struct spraid_dev *hdev) +{ + u64 cap = hdev->cap; + u32 dev_page_min = SPRAID_CAP_MPSMIN(cap) + 12; + u32 page_shift = PAGE_SHIFT; + + if (page_shift < dev_page_min) { + dev_err(hdev->dev, + "Minimum device page size[%u], too large for host[%u]\n", + 1U << dev_page_min, 1U << page_shift); + return -ENODEV; + } + + page_shift = min_t(unsigned int, SPRAID_CAP_MPSMAX(cap) + 12, + PAGE_SHIFT); + hdev->page_size = 1U << page_shift; + + hdev->ctrl_config = SPRAID_CC_CSS_NVM; + hdev->ctrl_config |= (page_shift - 12) << SPRAID_CC_MPS_SHIFT; + hdev->ctrl_config |= SPRAID_CC_AMS_RR | SPRAID_CC_SHN_NONE; + hdev->ctrl_config |= SPRAID_CC_IOSQES | SPRAID_CC_IOCQES; + hdev->ctrl_config |= SPRAID_CC_ENABLE; + writel(hdev->ctrl_config, hdev->bar + SPRAID_REG_CC); + + return spraid_wait_ready(hdev, cap, true); +} + +static void spraid_init_queue(struct spraid_queue *spraidq, u16 qid) +{ + struct spraid_dev *hdev = spraidq->hdev; + + memset((void *)spraidq->cqes, 0, CQ_SIZE(spraidq->q_depth)); + + spraidq->sq_tail = 0; + spraidq->cq_head = 0; + spraidq->cq_phase = 1; + spraidq->q_db = &hdev->dbs[qid * 2 * hdev->db_stride]; + spraidq->prp_small_pool = hdev->prp_small_pool[qid % small_pool_num]; + hdev->online_queues++; +} + +static inline bool spraid_cqe_pending(struct spraid_queue *spraidq) +{ + return (le16_to_cpu(spraidq->cqes[spraidq->cq_head].status) & 1) == + spraidq->cq_phase; +} + +static void spraid_sata_report_zone_handle(struct scsi_cmnd *scmd, + struct spraid_iod *iod) +{ + int i = 0; + unsigned int bytes = 0; + struct scatterlist *sg = scsi_sglist(scmd); + + scsi_for_each_sg(scmd, sg, iod->nsge, i) { + unsigned int offset = 0; + + if (bytes == 0) { + char *hdr; + u32 list_length; + u64 max_lba, opt_lba; + u16 same; + + hdr = sg_virt(sg); + + list_length = get_unaligned_le32(&hdr[0]); + same = get_unaligned_le16(&hdr[4]); + max_lba = get_unaligned_le64(&hdr[8]); + opt_lba = get_unaligned_le64(&hdr[16]); + put_unaligned_be32(list_length, &hdr[0]); + hdr[4] = same & 0xf; + put_unaligned_be64(max_lba, &hdr[8]); + put_unaligned_be64(opt_lba, &hdr[16]); + offset += 64; + bytes += 64; + } + while (offset < sg_dma_len(sg)) { + char *rec; + u8 cond, type, non_seq, reset; + u64 size, start, wp; + + rec = sg_virt(sg) + offset; + type = rec[0] & 0xf; + cond = (rec[1] >> 4) & 0xf; + non_seq = (rec[1] & 2); + reset = (rec[1] & 1); + size = get_unaligned_le64(&rec[8]); + start = get_unaligned_le64(&rec[16]); + wp = get_unaligned_le64(&rec[24]); + rec[0] = type; + rec[1] = (cond << 4) | non_seq | reset; + put_unaligned_be64(size, &rec[8]); + put_unaligned_be64(start, &rec[16]); + put_unaligned_be64(wp, &rec[24]); + WARN_ON(offset + 64 > sg_dma_len(sg)); + offset += 64; + bytes += 64; + } + } +} + +static inline void spraid_handle_ata_cmd(struct spraid_dev *hdev, + struct scsi_cmnd *scmd, + struct spraid_iod *iod) +{ + if (hdev->ctrl_info->card_type != SPRAID_CARD_HBA) + return; + + switch (scmd->cmnd[0]) { + case ZBC_IN: + dev_info(hdev->dev, "[%s] process report zone\n", __func__); + spraid_sata_report_zone_handle(scmd, iod); + break; + default: + break; + } +} + +static void spraid_complete_ioq_cmnd(struct spraid_queue *ioq, + struct spraid_completion *cqe) +{ + struct spraid_dev *hdev = ioq->hdev; + struct blk_mq_tags *tags; + struct scsi_cmnd *scmd; + struct spraid_iod *iod; + struct request *req; + unsigned long elapsed; + + tags = hdev->shost->tag_set.tags[ioq->qid - 1]; + req = blk_mq_tag_to_rq(tags, cqe->cmd_id); + if (unlikely(!req || !blk_mq_request_started(req))) { + dev_warn(hdev->dev, "Invalid id %d completed on queue %d\n", + cqe->cmd_id, ioq->qid); + return; + } + + scmd = blk_mq_rq_to_pdu(req); + iod = scsi_cmd_priv(scmd); + + elapsed = jiffies - scmd->jiffies_at_alloc; + dev_log_dbg(hdev->dev, + "cid[%d] qid[%d] finish IO cost %3ld.%3ld seconds\n", + cqe->cmd_id, ioq->qid, elapsed / HZ, elapsed % HZ); + + if (cmpxchg(&iod->state, SPRAID_CMD_IN_FLIGHT, SPRAID_CMD_COMPLETE) != + SPRAID_CMD_IN_FLIGHT) { + dev_warn(hdev->dev, + "cid[%d] qid[%d] enters abnormal handler;" + " cost %3ld.%3ld seconds\n", + cqe->cmd_id, ioq->qid, elapsed / HZ, elapsed % HZ); + WRITE_ONCE(iod->state, SPRAID_CMD_TMO_COMPLETE); + + if (iod->nsge) { + iod->nsge = 0; + scsi_dma_unmap(scmd); + } + spraid_free_iod_res(hdev, iod); + + return; + } + + spraid_handle_ata_cmd(hdev, scmd, iod); + + spraid_map_status(iod, scmd, cqe); + if (iod->nsge) { + iod->nsge = 0; + scsi_dma_unmap(scmd); + } + spraid_free_iod_res(hdev, iod); + scmd->scsi_done(scmd); +} + +static void spraid_complete_adminq_cmnd(struct spraid_queue *adminq, + struct spraid_completion *cqe) +{ + struct spraid_dev *hdev = adminq->hdev; + struct spraid_cmd *adm_cmd; + + adm_cmd = hdev->adm_cmds + cqe->cmd_id; + if (unlikely(adm_cmd->state == SPRAID_CMD_IDLE)) { + dev_warn(adminq->hdev->dev, + "Invalid id %d completed on queue %d\n", + cqe->cmd_id, le16_to_cpu(cqe->sq_id)); + return; + } + + adm_cmd->status = le16_to_cpu(cqe->status) >> 1; + adm_cmd->result0 = le32_to_cpu(cqe->result); + adm_cmd->result1 = le32_to_cpu(cqe->result1); + + complete(&adm_cmd->cmd_done); +} + +static void spraid_send_aen(struct spraid_dev *hdev, u16 cid); + +static void spraid_complete_aen(struct spraid_queue *spraidq, + struct spraid_completion *cqe) +{ + struct spraid_dev *hdev = spraidq->hdev; + u32 result = le32_to_cpu(cqe->result); + + dev_info(hdev->dev, "rcv aen, cid[%d], status[0x%x], result[0x%x]\n", + cqe->cmd_id, le16_to_cpu(cqe->status) >> 1, result); + + spraid_send_aen(hdev, cqe->cmd_id); + + if ((le16_to_cpu(cqe->status) >> 1) != SPRAID_SC_SUCCESS) + return; + switch (result & 0x7) { + case SPRAID_AEN_NOTICE: + spraid_handle_aen_notice(hdev, result); + break; + case SPRAID_AEN_VS: + spraid_handle_aen_vs(hdev, result, le32_to_cpu(cqe->result1)); + break; + default: + dev_warn(hdev->dev, "Unsupported async event type: %u\n", + result & 0x7); + break; + } +} + +static void spraid_complete_ioq_sync_cmnd(struct spraid_queue *ioq, + struct spraid_completion *cqe) +{ + struct spraid_dev *hdev = ioq->hdev; + struct spraid_cmd *ptcmd; + + ptcmd = hdev->ioq_ptcmds + (ioq->qid - 1) * SPRAID_PTCMDS_PERQ + + cqe->cmd_id - SPRAID_IO_BLK_MQ_DEPTH; + + ptcmd->status = le16_to_cpu(cqe->status) >> 1; + ptcmd->result0 = le32_to_cpu(cqe->result); + ptcmd->result1 = le32_to_cpu(cqe->result1); + + complete(&ptcmd->cmd_done); +} + +static inline void spraid_handle_cqe(struct spraid_queue *spraidq, u16 idx) +{ + struct spraid_completion *cqe = &spraidq->cqes[idx]; + struct spraid_dev *hdev = spraidq->hdev; + + if (unlikely(cqe->cmd_id >= spraidq->q_depth)) { + dev_err(hdev->dev, + "Invalid command id[%d] completed on queue %d\n", + cqe->cmd_id, cqe->sq_id); + return; + } + + dev_log_dbg(hdev->dev, "cid[%d] qid[%d];" + " result[0x%x], sq_id[%d], status[0x%x]\n", + cqe->cmd_id, spraidq->qid, le32_to_cpu(cqe->result), + le16_to_cpu(cqe->sq_id), le16_to_cpu(cqe->status)); + + if (unlikely(spraidq->qid == 0 + && cqe->cmd_id >= SPRAID_AQ_BLK_MQ_DEPTH)) { + spraid_complete_aen(spraidq, cqe); + return; + } + + if (unlikely(spraidq->qid && cqe->cmd_id >= SPRAID_IO_BLK_MQ_DEPTH)) { + spraid_complete_ioq_sync_cmnd(spraidq, cqe); + return; + } + + if (spraidq->qid) + spraid_complete_ioq_cmnd(spraidq, cqe); + else + spraid_complete_adminq_cmnd(spraidq, cqe); +} + +static void spraid_complete_cqes(struct spraid_queue *spraidq, + u16 start, u16 end) +{ + while (start != end) { + spraid_handle_cqe(spraidq, start); + if (++start == spraidq->q_depth) + start = 0; + } +} + +static inline void spraid_update_cq_head(struct spraid_queue *spraidq) +{ + if (++spraidq->cq_head == spraidq->q_depth) { + spraidq->cq_head = 0; + spraidq->cq_phase = !spraidq->cq_phase; + } +} + +static inline bool spraid_process_cq(struct spraid_queue *spraidq, + u16 *start, u16 *end, int tag) +{ + bool found = false; + + *start = spraidq->cq_head; + while (!found && spraid_cqe_pending(spraidq)) { + if (spraidq->cqes[spraidq->cq_head].cmd_id == tag) + found = true; + spraid_update_cq_head(spraidq); + } + *end = spraidq->cq_head; + + if (*start != *end) + writel(spraidq->cq_head, + spraidq->q_db + spraidq->hdev->db_stride); + + return found; +} + +static bool spraid_poll_cq(struct spraid_queue *spraidq, int cid) +{ + u16 start, end; + bool found; + + if (!spraid_cqe_pending(spraidq)) + return 0; + + spin_lock_irq(&spraidq->cq_lock); + found = spraid_process_cq(spraidq, &start, &end, cid); + spin_unlock_irq(&spraidq->cq_lock); + + spraid_complete_cqes(spraidq, start, end); + return found; +} + +static irqreturn_t spraid_irq(int irq, void *data) +{ + struct spraid_queue *spraidq = data; + irqreturn_t ret = IRQ_NONE; + u16 start, end; + + spin_lock(&spraidq->cq_lock); + if (spraidq->cq_head != spraidq->last_cq_head) + ret = IRQ_HANDLED; + + spraid_process_cq(spraidq, &start, &end, -1); + spraidq->last_cq_head = spraidq->cq_head; + spin_unlock(&spraidq->cq_lock); + + if (start != end) { + spraid_complete_cqes(spraidq, start, end); + ret = IRQ_HANDLED; + } + return ret; +} + +static int spraid_setup_admin_queue(struct spraid_dev *hdev) +{ + struct spraid_queue *adminq = &hdev->queues[0]; + u32 aqa; + int ret; + + dev_info(hdev->dev, "[%s] start disable ctrl\n", __func__); + + ret = spraid_disable_ctrl(hdev); + if (ret) + return ret; + + ret = spraid_alloc_queue(hdev, 0, SPRAID_AQ_DEPTH); + if (ret) + return ret; + + aqa = adminq->q_depth - 1; + aqa |= aqa << 16; + writel(aqa, hdev->bar + SPRAID_REG_AQA); + lo_hi_writeq(adminq->sq_dma_addr, hdev->bar + SPRAID_REG_ASQ); + lo_hi_writeq(adminq->cq_dma_addr, hdev->bar + SPRAID_REG_ACQ); + + dev_info(hdev->dev, "[%s] start enable ctrl\n", __func__); + + ret = spraid_enable_ctrl(hdev); + if (ret) { + ret = -ENODEV; + goto free_queue; + } + + adminq->cq_vector = 0; + spraid_init_queue(adminq, 0); + ret = pci_request_irq(hdev->pdev, adminq->cq_vector, spraid_irq, NULL, + adminq, "spraid%d_q%d", + hdev->instance, adminq->qid); + + if (ret) { + adminq->cq_vector = -1; + hdev->online_queues--; + goto free_queue; + } + + dev_info(hdev->dev, "[%s] success, queuecount:[%d], onlinequeue:[%d]\n", + __func__, hdev->queue_count, hdev->online_queues); + + return 0; + +free_queue: + spraid_free_queue(adminq); + return ret; +} + +static u32 spraid_bar_size(struct spraid_dev *hdev, u32 nr_ioqs) +{ + return (SPRAID_REG_DBS + ((nr_ioqs + 1) * 8 * hdev->db_stride)); +} + +static int spraid_alloc_admin_cmds(struct spraid_dev *hdev) +{ + int i; + + INIT_LIST_HEAD(&hdev->adm_cmd_list); + spin_lock_init(&hdev->adm_cmd_lock); + + hdev->adm_cmds = kcalloc_node(SPRAID_AQ_BLK_MQ_DEPTH, + sizeof(struct spraid_cmd), + GFP_KERNEL, hdev->numa_node); + + if (!hdev->adm_cmds) { + dev_err(hdev->dev, "Alloc admin cmds failed\n"); + return -ENOMEM; + } + + for (i = 0; i < SPRAID_AQ_BLK_MQ_DEPTH; i++) { + hdev->adm_cmds[i].qid = 0; + hdev->adm_cmds[i].cid = i; + list_add_tail(&(hdev->adm_cmds[i].list), &hdev->adm_cmd_list); + } + + dev_info(hdev->dev, "Alloc admin cmds success, num[%d]\n", + SPRAID_AQ_BLK_MQ_DEPTH); + + return 0; +} + +static void spraid_free_admin_cmds(struct spraid_dev *hdev) +{ + kfree(hdev->adm_cmds); + hdev->adm_cmds = NULL; + INIT_LIST_HEAD(&hdev->adm_cmd_list); +} + +static struct spraid_cmd *spraid_get_cmd(struct spraid_dev *hdev, + enum spraid_cmd_type type) +{ + struct spraid_cmd *cmd = NULL; + unsigned long flags; + struct list_head *head = &hdev->adm_cmd_list; + spinlock_t *slock = &hdev->adm_cmd_lock; + + if (type == SPRAID_CMD_IOPT) { + head = &hdev->ioq_pt_list; + slock = &hdev->ioq_pt_lock; + } + + spin_lock_irqsave(slock, flags); + if (list_empty(head)) { + spin_unlock_irqrestore(slock, flags); + dev_err(hdev->dev, "err, cmd[%d] list empty\n", type); + return NULL; + } + cmd = list_entry(head->next, struct spraid_cmd, list); + list_del_init(&cmd->list); + spin_unlock_irqrestore(slock, flags); + + WRITE_ONCE(cmd->state, SPRAID_CMD_IN_FLIGHT); + + return cmd; +} + +static void spraid_put_cmd(struct spraid_dev *hdev, struct spraid_cmd *cmd, + enum spraid_cmd_type type) +{ + unsigned long flags; + struct list_head *head = &hdev->adm_cmd_list; + spinlock_t *slock = &hdev->adm_cmd_lock; + + if (type == SPRAID_CMD_IOPT) { + head = &hdev->ioq_pt_list; + slock = &hdev->ioq_pt_lock; + } + + spin_lock_irqsave(slock, flags); + WRITE_ONCE(cmd->state, SPRAID_CMD_IDLE); + list_add_tail(&cmd->list, head); + spin_unlock_irqrestore(slock, flags); +} + + +static int spraid_submit_admin_sync_cmd(struct spraid_dev *hdev, + struct spraid_admin_command *cmd, + u32 *result0, u32 *result1, u32 timeout) +{ + struct spraid_cmd *adm_cmd = spraid_get_cmd(hdev, SPRAID_CMD_ADM); + + if (!adm_cmd) { + dev_err(hdev->dev, "err, get admin cmd failed\n"); + return -EFAULT; + } + + timeout = timeout ? timeout : ADMIN_TIMEOUT; + + init_completion(&adm_cmd->cmd_done); + + cmd->common.command_id = adm_cmd->cid; + spraid_submit_cmd(&hdev->queues[0], cmd); + + if (!wait_for_completion_timeout(&adm_cmd->cmd_done, timeout)) { + dev_err(hdev->dev, "[%s] cid[%d] qid[%d] timeout;" + " opcode[0x%x] subopcode[0x%x]\n", + __func__, adm_cmd->cid, adm_cmd->qid, + cmd->usr_cmd.opcode, cmd->usr_cmd.info_0.subopcode); + WRITE_ONCE(adm_cmd->state, SPRAID_CMD_TIMEOUT); + spraid_put_cmd(hdev, adm_cmd, SPRAID_CMD_ADM); + return -ETIME; + } + + if (result0) + *result0 = adm_cmd->result0; + if (result1) + *result1 = adm_cmd->result1; + + spraid_put_cmd(hdev, adm_cmd, SPRAID_CMD_ADM); + + return adm_cmd->status; +} + +static int spraid_create_cq(struct spraid_dev *hdev, u16 qid, + struct spraid_queue *spraidq, u16 cq_vector) +{ + struct spraid_admin_command admin_cmd; + int flags = SPRAID_QUEUE_PHYS_CONTIG | SPRAID_CQ_IRQ_ENABLED; + + memset(&admin_cmd, 0, sizeof(admin_cmd)); + admin_cmd.create_cq.opcode = SPRAID_ADMIN_CREATE_CQ; + admin_cmd.create_cq.prp1 = cpu_to_le64(spraidq->cq_dma_addr); + admin_cmd.create_cq.cqid = cpu_to_le16(qid); + admin_cmd.create_cq.qsize = cpu_to_le16(spraidq->q_depth - 1); + admin_cmd.create_cq.cq_flags = cpu_to_le16(flags); + admin_cmd.create_cq.irq_vector = cpu_to_le16(cq_vector); + + return spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0); +} + +static int spraid_create_sq(struct spraid_dev *hdev, u16 qid, + struct spraid_queue *spraidq) +{ + struct spraid_admin_command admin_cmd; + int flags = SPRAID_QUEUE_PHYS_CONTIG; + + memset(&admin_cmd, 0, sizeof(admin_cmd)); + admin_cmd.create_sq.opcode = SPRAID_ADMIN_CREATE_SQ; + admin_cmd.create_sq.prp1 = cpu_to_le64(spraidq->sq_dma_addr); + admin_cmd.create_sq.sqid = cpu_to_le16(qid); + admin_cmd.create_sq.qsize = cpu_to_le16(spraidq->q_depth - 1); + admin_cmd.create_sq.sq_flags = cpu_to_le16(flags); + admin_cmd.create_sq.cqid = cpu_to_le16(qid); + + return spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0); +} + +static void spraid_free_queue(struct spraid_queue *spraidq) +{ + struct spraid_dev *hdev = spraidq->hdev; + + hdev->queue_count--; + dma_free_coherent(hdev->dev, CQ_SIZE(spraidq->q_depth), + (void *)spraidq->cqes, spraidq->cq_dma_addr); + dma_free_coherent(hdev->dev, SQ_SIZE(spraidq->qid, spraidq->q_depth), + spraidq->sq_cmds, spraidq->sq_dma_addr); + dma_free_coherent(hdev->dev, SENSE_SIZE(spraidq->q_depth), + spraidq->sense, spraidq->sense_dma_addr); +} + +static void spraid_free_admin_queue(struct spraid_dev *hdev) +{ + spraid_free_queue(&hdev->queues[0]); +} + +static void spraid_free_io_queues(struct spraid_dev *hdev) +{ + int i; + + for (i = hdev->queue_count - 1; i >= 1; i--) + spraid_free_queue(&hdev->queues[i]); +} + +static int spraid_delete_queue(struct spraid_dev *hdev, u8 op, u16 id) +{ + struct spraid_admin_command admin_cmd; + int ret; + + memset(&admin_cmd, 0, sizeof(admin_cmd)); + admin_cmd.delete_queue.opcode = op; + admin_cmd.delete_queue.qid = cpu_to_le16(id); + + ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0); + + if (ret) + dev_err(hdev->dev, "Delete %s:[%d] failed\n", + (op == SPRAID_ADMIN_DELETE_CQ) ? "cq" : "sq", id); + + return ret; +} + +static int spraid_delete_cq(struct spraid_dev *hdev, u16 cqid) +{ + return spraid_delete_queue(hdev, SPRAID_ADMIN_DELETE_CQ, cqid); +} + +static int spraid_delete_sq(struct spraid_dev *hdev, u16 sqid) +{ + return spraid_delete_queue(hdev, SPRAID_ADMIN_DELETE_SQ, sqid); +} + +static int spraid_create_queue(struct spraid_queue *spraidq, u16 qid) +{ + struct spraid_dev *hdev = spraidq->hdev; + u16 cq_vector; + int ret; + + cq_vector = (hdev->num_vecs == 1) ? 0 : qid; + ret = spraid_create_cq(hdev, qid, spraidq, cq_vector); + if (ret) + return ret; + + ret = spraid_create_sq(hdev, qid, spraidq); + if (ret) + goto delete_cq; + + spraid_init_queue(spraidq, qid); + spraidq->cq_vector = cq_vector; + + ret = pci_request_irq(hdev->pdev, cq_vector, spraid_irq, NULL, + spraidq, "spraid%d_q%d", hdev->instance, qid); + + if (ret) { + dev_err(hdev->dev, "Request queue[%d] irq failed\n", qid); + goto delete_sq; + } + + return 0; + +delete_sq: + spraidq->cq_vector = -1; + hdev->online_queues--; + spraid_delete_sq(hdev, qid); +delete_cq: + spraid_delete_cq(hdev, qid); + + return ret; +} + +static int spraid_create_io_queues(struct spraid_dev *hdev) +{ + u32 i, max; + int ret = 0; + + max = min(hdev->max_qid, hdev->queue_count - 1); + for (i = hdev->online_queues; i <= max; i++) { + ret = spraid_create_queue(&hdev->queues[i], i); + if (ret) { + dev_err(hdev->dev, "Create queue[%d] failed\n", i); + break; + } + } + + dev_info(hdev->dev, "[%s] queue_count[%d], online_queue[%d]", + __func__, hdev->queue_count, hdev->online_queues); + + return ret >= 0 ? 0 : ret; +} + +static int spraid_set_features(struct spraid_dev *hdev, u32 fid, + u32 dword11, void *buffer, + size_t buflen, u32 *result) +{ + struct spraid_admin_command admin_cmd; + int ret; + u8 *data_ptr = NULL; + dma_addr_t data_dma = 0; + + if (buffer && buflen) { + data_ptr = dma_alloc_coherent(hdev->dev, buflen, + &data_dma, GFP_KERNEL); + if (!data_ptr) + return -ENOMEM; + + memcpy(data_ptr, buffer, buflen); + } + + memset(&admin_cmd, 0, sizeof(admin_cmd)); + admin_cmd.features.opcode = SPRAID_ADMIN_SET_FEATURES; + admin_cmd.features.fid = cpu_to_le32(fid); + admin_cmd.features.dword11 = cpu_to_le32(dword11); + admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma); + + ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, result, NULL, 0); + + if (data_ptr) + dma_free_coherent(hdev->dev, buflen, data_ptr, data_dma); + + return ret; +} + +static int spraid_configure_timestamp(struct spraid_dev *hdev) +{ + __le64 ts; + int ret; + + ts = cpu_to_le64(ktime_to_ms(ktime_get_real())); + ret = spraid_set_features(hdev, SPRAID_FEAT_TIMESTAMP, + 0, &ts, sizeof(ts), NULL); + + if (ret) + dev_err(hdev->dev, "set timestamp failed: %d\n", ret); + return ret; +} + +static int spraid_set_queue_cnt(struct spraid_dev *hdev, u32 *cnt) +{ + u32 q_cnt = (*cnt - 1) | ((*cnt - 1) << 16); + u32 nr_ioqs, result; + int status; + + status = spraid_set_features(hdev, SPRAID_FEAT_NUM_QUEUES, + q_cnt, NULL, 0, &result); + if (status) { + dev_err(hdev->dev, "Set queue count failed, status: %d\n", + status); + return -EIO; + } + + nr_ioqs = min(result & 0xffff, result >> 16) + 1; + *cnt = min(*cnt, nr_ioqs); + if (*cnt == 0) { + dev_err(hdev->dev, "Illegal queue count: zero\n"); + return -EIO; + } + return 0; +} + +static int spraid_setup_io_queues(struct spraid_dev *hdev) +{ + struct spraid_queue *adminq = &hdev->queues[0]; + struct pci_dev *pdev = hdev->pdev; + u32 nr_ioqs = num_online_cpus(); + u32 i, size; + int ret; + + struct irq_affinity affd = { + .pre_vectors = 1 + }; + + ret = spraid_set_queue_cnt(hdev, &nr_ioqs); + if (ret < 0) + return ret; + + size = spraid_bar_size(hdev, nr_ioqs); + ret = spraid_remap_bar(hdev, size); + if (ret) + return -ENOMEM; + + adminq->q_db = hdev->dbs; + + pci_free_irq(pdev, 0, adminq); + pci_free_irq_vectors(pdev); + + ret = pci_alloc_irq_vectors_affinity(pdev, 1, (nr_ioqs + 1), + PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd); + if (ret <= 0) + return -EIO; + + hdev->num_vecs = ret; + + hdev->max_qid = max(ret - 1, 1); + + ret = pci_request_irq(pdev, adminq->cq_vector, spraid_irq, NULL, + adminq, "spraid%d_q%d", + hdev->instance, adminq->qid); + if (ret) { + dev_err(hdev->dev, "Request admin irq failed\n"); + adminq->cq_vector = -1; + return ret; + } + + for (i = hdev->queue_count; i <= hdev->max_qid; i++) { + ret = spraid_alloc_queue(hdev, i, hdev->ioq_depth); + if (ret) + break; + } + dev_info(hdev->dev, "[%s] max_qid: %d, queue_count: %d;" + " online_queue: %d, ioq_depth: %d\n", + __func__, hdev->max_qid, hdev->queue_count, + hdev->online_queues, hdev->ioq_depth); + + return spraid_create_io_queues(hdev); +} + +static void spraid_delete_io_queues(struct spraid_dev *hdev) +{ + u16 queues = hdev->online_queues - 1; + u8 opcode = SPRAID_ADMIN_DELETE_SQ; + u16 i, pass; + + if (!pci_device_is_present(hdev->pdev)) { + dev_err(hdev->dev, + "pci_device is not present, skip disable io queues\n"); + return; + } + + if (hdev->online_queues < 2) { + dev_err(hdev->dev, "[%s] err, io queue has been delete\n", + __func__); + return; + } + + for (pass = 0; pass < 2; pass++) { + for (i = queues; i > 0; i--) + if (spraid_delete_queue(hdev, opcode, i)) + break; + + opcode = SPRAID_ADMIN_DELETE_CQ; + } +} + +static void spraid_remove_io_queues(struct spraid_dev *hdev) +{ + spraid_delete_io_queues(hdev); + spraid_free_io_queues(hdev); +} + +static void spraid_pci_disable(struct spraid_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + u32 i; + + for (i = 0; i < hdev->online_queues; i++) + pci_free_irq(pdev, hdev->queues[i].cq_vector, &hdev->queues[i]); + pci_free_irq_vectors(pdev); + if (pci_is_enabled(pdev)) { + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + } + hdev->online_queues = 0; +} + +static void spraid_disable_admin_queue(struct spraid_dev *hdev, bool shutdown) +{ + struct spraid_queue *adminq = &hdev->queues[0]; + u16 start, end; + + if (pci_device_is_present(hdev->pdev)) { + if (shutdown) + spraid_shutdown_ctrl(hdev); + else + spraid_disable_ctrl(hdev); + } + + if (hdev->queue_count == 0) { + dev_err(hdev->dev, "[%s] err, admin queue has been delete\n", + __func__); + return; + } + + spin_lock_irq(&adminq->cq_lock); + spraid_process_cq(adminq, &start, &end, -1); + spin_unlock_irq(&adminq->cq_lock); + + spraid_complete_cqes(adminq, start, end); + spraid_free_admin_queue(hdev); +} + +static int spraid_create_dma_pools(struct spraid_dev *hdev) +{ + int i; + char poolname[20] = { 0 }; + + hdev->prp_page_pool = dma_pool_create("prp list page", hdev->dev, + PAGE_SIZE, PAGE_SIZE, 0); + + if (!hdev->prp_page_pool) { + dev_err(hdev->dev, "create prp_page_pool failed\n"); + return -ENOMEM; + } + + for (i = 0; i < small_pool_num; i++) { + sprintf(poolname, "prp_list_256_%d", i); + hdev->prp_small_pool[i] = + dma_pool_create(poolname, hdev->dev, SMALL_POOL_SIZE, + SMALL_POOL_SIZE, 0); + + if (!hdev->prp_small_pool[i]) { + dev_err(hdev->dev, "create prp_small_pool %d failed\n", + i); + goto destroy_prp_small_pool; + } + } + + return 0; + +destroy_prp_small_pool: + while (i > 0) + dma_pool_destroy(hdev->prp_small_pool[--i]); + dma_pool_destroy(hdev->prp_page_pool); + + return -ENOMEM; +} + +static void spraid_destroy_dma_pools(struct spraid_dev *hdev) +{ + int i; + + for (i = 0; i < small_pool_num; i++) + dma_pool_destroy(hdev->prp_small_pool[i]); + dma_pool_destroy(hdev->prp_page_pool); +} + +static int spraid_get_dev_list(struct spraid_dev *hdev, + struct spraid_dev_info *devices) +{ + u32 nd = le32_to_cpu(hdev->ctrl_info->nd); + struct spraid_admin_command admin_cmd; + struct spraid_dev_list *list_buf; + dma_addr_t data_dma = 0; + u32 i, idx, hdid, ndev; + int ret = 0; + + list_buf = dma_alloc_coherent(hdev->dev, PAGE_SIZE, + &data_dma, GFP_KERNEL); + if (!list_buf) + return -ENOMEM; + + for (idx = 0; idx < nd;) { + memset(&admin_cmd, 0, sizeof(admin_cmd)); + admin_cmd.get_info.opcode = SPRAID_ADMIN_GET_INFO; + admin_cmd.get_info.type = SPRAID_GET_INFO_DEV_LIST; + admin_cmd.get_info.cdw11 = cpu_to_le32(idx); + admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma); + + ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, + NULL, NULL, 0); + + if (ret) { + dev_err(hdev->dev, "Get device list failed, nd: %u;" + "idx: %u, ret: %d\n", + nd, idx, ret); + goto out; + } + ndev = le32_to_cpu(list_buf->dev_num); + + dev_info(hdev->dev, "ndev numbers: %u\n", ndev); + + for (i = 0; i < ndev; i++) { + hdid = le32_to_cpu(list_buf->devices[i].hdid); + dev_info(hdev->dev, "list_buf->devices[%d], hdid: %u;" + "target: %d, channel: %d, lun: %d, attr[0x%x]\n", + i, hdid, + le16_to_cpu(list_buf->devices[i].target), + list_buf->devices[i].channel, + list_buf->devices[i].lun, + list_buf->devices[i].attr); + if (hdid > nd || hdid == 0) { + dev_err(hdev->dev, "err, hdid[%d] invalid\n", + hdid); + continue; + } + memcpy(&devices[hdid - 1], &list_buf->devices[i], + sizeof(struct spraid_dev_info)); + } + idx += ndev; + + if (idx < MAX_DEV_ENTRY_PER_PAGE_4K) + break; + } + +out: + dma_free_coherent(hdev->dev, PAGE_SIZE, list_buf, data_dma); + return ret; +} + +static void spraid_send_aen(struct spraid_dev *hdev, u16 cid) +{ + struct spraid_queue *adminq = &hdev->queues[0]; + struct spraid_admin_command admin_cmd; + + memset(&admin_cmd, 0, sizeof(admin_cmd)); + admin_cmd.common.opcode = SPRAID_ADMIN_ASYNC_EVENT; + admin_cmd.common.command_id = cid; + + spraid_submit_cmd(adminq, &admin_cmd); + dev_info(hdev->dev, "send aen, cid[%d]\n", cid); +} + +static inline void spraid_send_all_aen(struct spraid_dev *hdev) +{ + u16 i; + + for (i = 0; i < hdev->ctrl_info->aerl; i++) + spraid_send_aen(hdev, i + SPRAID_AQ_BLK_MQ_DEPTH); +} + +static int spraid_add_device(struct spraid_dev *hdev, + struct spraid_dev_info *device) +{ + struct Scsi_Host *shost = hdev->shost; + struct scsi_device *sdev; + + dev_info(hdev->dev, "add device, hdid: %u target: %d, channel: %d;" + " lun: %d, attr[0x%x]\n", + le32_to_cpu(device->hdid), le16_to_cpu(device->target), + device->channel, device->lun, device->attr); + + sdev = scsi_device_lookup(shost, device->channel, + le16_to_cpu(device->target), 0); + if (sdev) { + dev_warn(hdev->dev, "Device is already exist, channel: %d;" + " target_id: %d, lun: %d\n", + device->channel, le16_to_cpu(device->target), 0); + scsi_device_put(sdev); + return -EEXIST; + } + scsi_add_device(shost, device->channel, le16_to_cpu(device->target), 0); + return 0; +} + +static int spraid_rescan_device(struct spraid_dev *hdev, + struct spraid_dev_info *device) +{ + struct Scsi_Host *shost = hdev->shost; + struct scsi_device *sdev; + + dev_info(hdev->dev, "rescan device, hdid: %u target: %d, channel: %d;" + " lun: %d, attr[0x%x]\n", + le32_to_cpu(device->hdid), le16_to_cpu(device->target), + device->channel, device->lun, device->attr); + + sdev = scsi_device_lookup(shost, device->channel, + le16_to_cpu(device->target), 0); + if (!sdev) { + dev_warn(hdev->dev, "device is not exit rescan it, channel: %d;" + " target_id: %d, lun: %d\n", + device->channel, le16_to_cpu(device->target), 0); + return -ENODEV; + } + + scsi_rescan_device(&sdev->sdev_gendev); + scsi_device_put(sdev); + return 0; +} + +static int spraid_remove_device(struct spraid_dev *hdev, + struct spraid_dev_info *org_device) +{ + struct Scsi_Host *shost = hdev->shost; + struct scsi_device *sdev; + + dev_info(hdev->dev, "remove device, hdid: %u target: %d, channel: %d;" + " lun: %d, attr[0x%x]\n", + le32_to_cpu(org_device->hdid), le16_to_cpu(org_device->target), + org_device->channel, org_device->lun, org_device->attr); + + sdev = scsi_device_lookup(shost, org_device->channel, + le16_to_cpu(org_device->target), 0); + if (!sdev) { + dev_warn(hdev->dev, "device is not exit remove it, channel: %d;" + " target_id: %d, lun: %d\n", + org_device->channel, + le16_to_cpu(org_device->target), 0); + return -ENODEV; + } + + scsi_remove_device(sdev); + scsi_device_put(sdev); + return 0; +} + +static int spraid_dev_list_init(struct spraid_dev *hdev) +{ + u32 nd = le32_to_cpu(hdev->ctrl_info->nd); + int i, ret; + + hdev->devices = kzalloc_node(nd * sizeof(struct spraid_dev_info), + GFP_KERNEL, hdev->numa_node); + if (!hdev->devices) + return -ENOMEM; + + ret = spraid_get_dev_list(hdev, hdev->devices); + if (ret) { + dev_err(hdev->dev, + "Ignore failure of getting device list;" + " within initialization\n"); + return 0; + } + + for (i = 0; i < nd; i++) { + if (SPRAID_DEV_INFO_FLAG_VALID(hdev->devices[i].flag) && + SPRAID_DEV_INFO_ATTR_BOOT(hdev->devices[i].attr)) { + spraid_add_device(hdev, &hdev->devices[i]); + break; + } + } + return 0; +} + +static int luntarget_cmp_func(const void *l, const void *r) +{ + const struct spraid_dev_info *ln = l; + const struct spraid_dev_info *rn = r; + + if (ln->channel == rn->channel) + return le16_to_cpu(ln->target) - le16_to_cpu(rn->target); + + return ln->channel - rn->channel; +} + +static void spraid_scan_work(struct work_struct *work) +{ + struct spraid_dev *hdev = + container_of(work, struct spraid_dev, scan_work); + struct spraid_dev_info *devices, *org_devices; + struct spraid_dev_info *sortdevice; + u32 nd = le32_to_cpu(hdev->ctrl_info->nd); + u8 flag, org_flag; + int i, ret; + int count = 0; + + devices = kcalloc(nd, sizeof(struct spraid_dev_info), GFP_KERNEL); + if (!devices) + return; + + sortdevice = kcalloc(nd, sizeof(struct spraid_dev_info), GFP_KERNEL); + if (!sortdevice) + goto free_list; + + ret = spraid_get_dev_list(hdev, devices); + if (ret) + goto free_all; + org_devices = hdev->devices; + for (i = 0; i < nd; i++) { + org_flag = org_devices[i].flag; + flag = devices[i].flag; + + dev_log_dbg(hdev->dev, "i: %d, org_flag: 0x%x, flag: 0x%x\n", + i, org_flag, flag); + + if (SPRAID_DEV_INFO_FLAG_VALID(flag)) { + if (!SPRAID_DEV_INFO_FLAG_VALID(org_flag)) { + down_write(&hdev->devices_rwsem); + memcpy(&org_devices[i], &devices[i], + sizeof(struct spraid_dev_info)); + memcpy(&sortdevice[count++], &devices[i], + sizeof(struct spraid_dev_info)); + up_write(&hdev->devices_rwsem); + } else if (SPRAID_DEV_INFO_FLAG_CHANGE(flag)) { + spraid_rescan_device(hdev, &devices[i]); + } + } else { + if (SPRAID_DEV_INFO_FLAG_VALID(org_flag)) { + down_write(&hdev->devices_rwsem); + org_devices[i].flag &= 0xfe; + up_write(&hdev->devices_rwsem); + spraid_remove_device(hdev, &org_devices[i]); + } + } + } + + dev_info(hdev->dev, "scan work add device count = %d\n", count); + + sort(sortdevice, count, sizeof(sortdevice[0]), + luntarget_cmp_func, NULL); + + for (i = 0; i < count; i++) + spraid_add_device(hdev, &sortdevice[i]); + +free_all: + kfree(sortdevice); +free_list: + kfree(devices); +} + +static void spraid_timesyn_work(struct work_struct *work) +{ + struct spraid_dev *hdev = + container_of(work, struct spraid_dev, timesyn_work); + + spraid_configure_timestamp(hdev); +} + +static int spraid_init_ctrl_info(struct spraid_dev *hdev); +static void spraid_fw_act_work(struct work_struct *work) +{ + struct spraid_dev *hdev = + container_of(work, struct spraid_dev, fw_act_work); + + if (spraid_init_ctrl_info(hdev)) + dev_err(hdev->dev, "get ctrl info failed after fw act\n"); +} + +static void spraid_queue_scan(struct spraid_dev *hdev) +{ + queue_work(spraid_wq, &hdev->scan_work); +} + +static void spraid_handle_aen_notice(struct spraid_dev *hdev, u32 result) +{ + switch ((result & 0xff00) >> 8) { + case SPRAID_AEN_DEV_CHANGED: + spraid_queue_scan(hdev); + break; + case SPRAID_AEN_FW_ACT_START: + dev_info(hdev->dev, "fw activation starting\n"); + break; + case SPRAID_AEN_HOST_PROBING: + break; + default: + dev_warn(hdev->dev, "async event result %08x\n", result); + } +} + +static void spraid_handle_aen_vs(struct spraid_dev *hdev, + u32 result, u32 result1) +{ + switch ((result & 0xff00) >> 8) { + case SPRAID_AEN_TIMESYN: + queue_work(spraid_wq, &hdev->timesyn_work); + break; + case SPRAID_AEN_FW_ACT_FINISH: + dev_info(hdev->dev, "fw activation finish\n"); + queue_work(spraid_wq, &hdev->fw_act_work); + break; + case SPRAID_AEN_EVENT_MIN ... SPRAID_AEN_EVENT_MAX: + dev_info(hdev->dev, "rcv card event[%d];" + " param1[0x%x] param2[0x%x]\n", + (result & 0xff00) >> 8, result, result1); + break; + default: + dev_warn(hdev->dev, "async event result: 0x%x\n", result); + } +} + +static int spraid_alloc_resources(struct spraid_dev *hdev) +{ + int ret, nqueue; + + ret = ida_alloc(&spraid_instance_ida, GFP_KERNEL); + if (ret < 0) { + dev_err(hdev->dev, "Get instance id failed\n"); + return ret; + } + hdev->instance = ret; + + hdev->ctrl_info = kzalloc_node(sizeof(*hdev->ctrl_info), + GFP_KERNEL, hdev->numa_node); + if (!hdev->ctrl_info) { + ret = -ENOMEM; + goto release_instance; + } + + ret = spraid_create_dma_pools(hdev); + if (ret) + goto free_ctrl_info; + nqueue = num_possible_cpus() + 1; + hdev->queues = kcalloc_node(nqueue, sizeof(struct spraid_queue), + GFP_KERNEL, hdev->numa_node); + if (!hdev->queues) { + ret = -ENOMEM; + goto destroy_dma_pools; + } + + ret = spraid_alloc_admin_cmds(hdev); + if (ret) + goto free_queues; + + dev_info(hdev->dev, "[%s] queues num: %d\n", __func__, nqueue); + + return 0; + +free_queues: + kfree(hdev->queues); +destroy_dma_pools: + spraid_destroy_dma_pools(hdev); +free_ctrl_info: + kfree(hdev->ctrl_info); +release_instance: + ida_free(&spraid_instance_ida, hdev->instance); + return ret; +} + +static void spraid_free_resources(struct spraid_dev *hdev) +{ + spraid_free_admin_cmds(hdev); + kfree(hdev->queues); + spraid_destroy_dma_pools(hdev); + kfree(hdev->ctrl_info); + ida_free(&spraid_instance_ida, hdev->instance); +} + +static void spraid_bsg_unmap_data(struct spraid_dev *hdev, struct bsg_job *job) +{ + struct request *rq = blk_mq_rq_from_pdu(job); + struct spraid_iod *iod = job->dd_data; + enum dma_data_direction dma_dir = + rq_data_dir(rq) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; + + if (iod->nsge) + dma_unmap_sg(hdev->dev, iod->sg, iod->nsge, dma_dir); + + spraid_free_iod_res(hdev, iod); +} + +static int spraid_bsg_map_data(struct spraid_dev *hdev, struct bsg_job *job, + struct spraid_admin_command *cmd) +{ + struct request *rq = blk_mq_rq_from_pdu(job); + struct spraid_iod *iod = job->dd_data; + enum dma_data_direction dma_dir = + rq_data_dir(rq) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; + int ret = 0; + + iod->sg = job->request_payload.sg_list; + iod->nsge = job->request_payload.sg_cnt; + iod->length = job->request_payload.payload_len; + iod->use_sgl = false; + iod->npages = -1; + iod->sg_drv_mgmt = false; + + if (!iod->nsge) + goto out; + + ret = dma_map_sg_attrs(hdev->dev, iod->sg, iod->nsge, + dma_dir, DMA_ATTR_NO_WARN); + if (!ret) + goto out; + + ret = spraid_setup_prps(hdev, iod); + if (ret) + goto unmap; + + cmd->common.dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); + cmd->common.dptr.prp2 = cpu_to_le64(iod->first_dma); + + return 0; + +unmap: + dma_unmap_sg(hdev->dev, iod->sg, iod->nsge, dma_dir); +out: + return ret; +} + +static int spraid_get_ctrl_info(struct spraid_dev *hdev, + struct spraid_ctrl_info *ctrl_info) +{ + struct spraid_admin_command admin_cmd; + u8 *data_ptr = NULL; + dma_addr_t data_dma = 0; + int ret; + + data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE, + &data_dma, GFP_KERNEL); + if (!data_ptr) + return -ENOMEM; + + memset(&admin_cmd, 0, sizeof(admin_cmd)); + admin_cmd.get_info.opcode = SPRAID_ADMIN_GET_INFO; + admin_cmd.get_info.type = SPRAID_GET_INFO_CTRL; + admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma); + + ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0); + if (!ret) + memcpy(ctrl_info, data_ptr, sizeof(struct spraid_ctrl_info)); + + dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, data_dma); + + return ret; +} + +static int spraid_init_ctrl_info(struct spraid_dev *hdev) +{ + int ret; + + hdev->ctrl_info->nd = cpu_to_le32(240); + hdev->ctrl_info->mdts = 8; + hdev->ctrl_info->max_cmds = cpu_to_le16(4096); + hdev->ctrl_info->max_num_sge = cpu_to_le16(128); + hdev->ctrl_info->max_channel = cpu_to_le16(4); + hdev->ctrl_info->max_tgt_id = cpu_to_le32(3239); + hdev->ctrl_info->max_lun = cpu_to_le16(2); + + ret = spraid_get_ctrl_info(hdev, hdev->ctrl_info); + if (ret) + dev_err(hdev->dev, "get controller info failed: %d\n", ret); + + dev_info(hdev->dev, "[%s]nd = %d\n", __func__, hdev->ctrl_info->nd); + dev_info(hdev->dev, "[%s]max_cmd = %d\n", + __func__, hdev->ctrl_info->max_cmds); + dev_info(hdev->dev, "[%s]max_channel = %d\n", + __func__, hdev->ctrl_info->max_channel); + dev_info(hdev->dev, "[%s]max_tgt_id = %d\n", + __func__, hdev->ctrl_info->max_tgt_id); + dev_info(hdev->dev, "[%s]max_lun = %d\n", + __func__, hdev->ctrl_info->max_lun); + dev_info(hdev->dev, "[%s]max_num_sge = %d\n", + __func__, hdev->ctrl_info->max_num_sge); + dev_info(hdev->dev, "[%s]lun_num_boot = %d\n", + __func__, hdev->ctrl_info->lun_num_in_boot); + dev_info(hdev->dev, "[%s]mdts = %d\n", __func__, hdev->ctrl_info->mdts); + dev_info(hdev->dev, "[%s]acl = %d\n", __func__, hdev->ctrl_info->acl); + dev_info(hdev->dev, "[%s]aer1 = %d\n", __func__, hdev->ctrl_info->aerl); + dev_info(hdev->dev, "[%s]card_type = %d\n", + __func__, hdev->ctrl_info->card_type); + dev_info(hdev->dev, "[%s]rtd3e = %d\n", + __func__, hdev->ctrl_info->rtd3e); + dev_info(hdev->dev, "[%s]sn = %s\n", __func__, hdev->ctrl_info->sn); + dev_info(hdev->dev, "[%s]fr = %s\n", __func__, hdev->ctrl_info->fr); + + if (!hdev->ctrl_info->aerl) + hdev->ctrl_info->aerl = 1; + if (hdev->ctrl_info->aerl > SPRAID_NR_AEN_COMMANDS) + hdev->ctrl_info->aerl = SPRAID_NR_AEN_COMMANDS; + + return 0; +} + +#define SPRAID_MAX_ADMIN_PAYLOAD_SIZE BIT(16) +static int spraid_alloc_iod_ext_mem_pool(struct spraid_dev *hdev) +{ + u16 max_sge = le16_to_cpu(hdev->ctrl_info->max_num_sge); + size_t alloc_size; + + alloc_size = spraid_iod_ext_size(hdev, SPRAID_MAX_ADMIN_PAYLOAD_SIZE, + max_sge, true, false); + if (alloc_size > PAGE_SIZE) + dev_warn(hdev->dev, "It is unreasonable ;" + " sg allocation more than one page\n"); + hdev->iod_mempool = mempool_create_node(1, mempool_kmalloc, + mempool_kfree, + (void *)alloc_size, GFP_KERNEL, + hdev->numa_node); + if (!hdev->iod_mempool) { + dev_err(hdev->dev, "Create iod extension memory pool failed\n"); + return -ENOMEM; + } + + return 0; +} + +static void spraid_free_iod_ext_mem_pool(struct spraid_dev *hdev) +{ + mempool_destroy(hdev->iod_mempool); +} + +static int spraid_user_admin_cmd(struct spraid_dev *hdev, struct bsg_job *job) +{ + struct spraid_bsg_request *bsg_req = job->request; + struct spraid_passthru_common_cmd *cmd = &(bsg_req->admcmd); + struct spraid_admin_command admin_cmd; + u32 timeout = msecs_to_jiffies(cmd->timeout_ms); + u32 result[2] = {0}; + int status; + + if (hdev->state >= SPRAID_RESETTING) { + dev_err(hdev->dev, "[%s] err, host state:[%d] is not right\n", + __func__, hdev->state); + return -EBUSY; + } + + memset(&admin_cmd, 0, sizeof(admin_cmd)); + admin_cmd.common.opcode = cmd->opcode; + admin_cmd.common.flags = cmd->flags; + admin_cmd.common.hdid = cpu_to_le32(cmd->nsid); + admin_cmd.common.cdw2[0] = cpu_to_le32(cmd->cdw2); + admin_cmd.common.cdw2[1] = cpu_to_le32(cmd->cdw3); + admin_cmd.common.cdw10 = cpu_to_le32(cmd->cdw10); + admin_cmd.common.cdw11 = cpu_to_le32(cmd->cdw11); + admin_cmd.common.cdw12 = cpu_to_le32(cmd->cdw12); + admin_cmd.common.cdw13 = cpu_to_le32(cmd->cdw13); + admin_cmd.common.cdw14 = cpu_to_le32(cmd->cdw14); + admin_cmd.common.cdw15 = cpu_to_le32(cmd->cdw15); + + status = spraid_bsg_map_data(hdev, job, &admin_cmd); + if (status) { + dev_err(hdev->dev, "[%s] err, map data failed\n", __func__); + return status; + } + + status = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, &result[0], + &result[1], timeout); + if (status >= 0) { + job->reply_len = sizeof(result); + memcpy(job->reply, result, sizeof(result)); + } + + if (status) + dev_info(hdev->dev, "[%s] opcode[0x%x] subopcode[0x%x];" + " status[0x%x] result0[0x%x] result1[0x%x]\n", + __func__, cmd->opcode, cmd->info_0.subopcode, + status, result[0], result[1]); + + spraid_bsg_unmap_data(hdev, job); + + return status; +} + +static int spraid_alloc_ioq_ptcmds(struct spraid_dev *hdev) +{ + int i; + int ptnum = SPRAID_NR_IOQ_PTCMDS; + + INIT_LIST_HEAD(&hdev->ioq_pt_list); + spin_lock_init(&hdev->ioq_pt_lock); + + hdev->ioq_ptcmds = kcalloc_node(ptnum, sizeof(struct spraid_cmd), + GFP_KERNEL, hdev->numa_node); + + if (!hdev->ioq_ptcmds) { + dev_err(hdev->dev, "Alloc ioq_ptcmds failed\n"); + return -ENOMEM; + } + + for (i = 0; i < ptnum; i++) { + hdev->ioq_ptcmds[i].qid = i / SPRAID_PTCMDS_PERQ + 1; + hdev->ioq_ptcmds[i].cid = i % SPRAID_PTCMDS_PERQ + + SPRAID_IO_BLK_MQ_DEPTH; + list_add_tail(&(hdev->ioq_ptcmds[i].list), &hdev->ioq_pt_list); + } + + dev_info(hdev->dev, "Alloc ioq_ptcmds success, ptnum[%d]\n", ptnum); + + return 0; +} + +static void spraid_free_ioq_ptcmds(struct spraid_dev *hdev) +{ + kfree(hdev->ioq_ptcmds); + hdev->ioq_ptcmds = NULL; + + INIT_LIST_HEAD(&hdev->ioq_pt_list); +} + +static int spraid_submit_ioq_sync_cmd(struct spraid_dev *hdev, + struct spraid_ioq_command *cmd, + u32 *result, u32 *reslen, u32 timeout) +{ + int ret; + dma_addr_t sense_dma; + struct spraid_queue *ioq; + void *sense_addr = NULL; + struct spraid_cmd *pt_cmd = spraid_get_cmd(hdev, SPRAID_CMD_IOPT); + + if (!pt_cmd) { + dev_err(hdev->dev, "err, get ioq cmd failed\n"); + return -EFAULT; + } + + timeout = timeout ? timeout : ADMIN_TIMEOUT; + + init_completion(&pt_cmd->cmd_done); + + ioq = &hdev->queues[pt_cmd->qid]; + ret = pt_cmd->cid * SCSI_SENSE_BUFFERSIZE; + sense_addr = ioq->sense + ret; + sense_dma = ioq->sense_dma_addr + ret; + + cmd->common.sense_addr = cpu_to_le64(sense_dma); + cmd->common.sense_len = cpu_to_le16(SCSI_SENSE_BUFFERSIZE); + cmd->common.command_id = pt_cmd->cid; + + spraid_submit_cmd(ioq, cmd); + + if (!wait_for_completion_timeout(&pt_cmd->cmd_done, timeout)) { + dev_err(hdev->dev, "[%s] cid[%d] qid[%d] timeout;" + " opcode[0x%x] subopcode[0x%x]\n", + __func__, pt_cmd->cid, pt_cmd->qid, cmd->common.opcode, + (le32_to_cpu(cmd->common.cdw3[0]) & 0xffff)); + WRITE_ONCE(pt_cmd->state, SPRAID_CMD_TIMEOUT); + spraid_put_cmd(hdev, pt_cmd, SPRAID_CMD_IOPT); + return -ETIME; + } + + if (result && reslen) { + if ((pt_cmd->status & 0x17f) == 0x101) { + memcpy(result, sense_addr, SCSI_SENSE_BUFFERSIZE); + *reslen = SCSI_SENSE_BUFFERSIZE; + } + } + + spraid_put_cmd(hdev, pt_cmd, SPRAID_CMD_IOPT); + + return pt_cmd->status; +} + +static int spraid_user_ioq_cmd(struct spraid_dev *hdev, struct bsg_job *job) +{ + struct spraid_bsg_request *bsg_req = + (struct spraid_bsg_request *)(job->request); + struct spraid_ioq_passthru_cmd *cmd = &(bsg_req->ioqcmd); + struct spraid_ioq_command ioq_cmd; + int status = 0; + u32 timeout = msecs_to_jiffies(cmd->timeout_ms); + + if (cmd->data_len > PAGE_SIZE) { + dev_err(hdev->dev, "[%s] data len bigger than 4k\n", __func__); + return -EFAULT; + } + + if (hdev->state != SPRAID_LIVE) { + dev_err(hdev->dev, "[%s] err, host state:[%d] is not live\n", + __func__, hdev->state); + return -EBUSY; + } + + dev_info(hdev->dev, "[%s] opcode[0x%x] subopcode[0x%x] init;" + " datalen[%d]\n", + __func__, cmd->opcode, cmd->info_1.subopcode, cmd->data_len); + + memset(&ioq_cmd, 0, sizeof(ioq_cmd)); + ioq_cmd.common.opcode = cmd->opcode; + ioq_cmd.common.flags = cmd->flags; + ioq_cmd.common.hdid = cpu_to_le32(cmd->nsid); + ioq_cmd.common.sense_len = cpu_to_le16(cmd->info_0.res_sense_len); + ioq_cmd.common.cdb_len = cmd->info_0.cdb_len; + ioq_cmd.common.rsvd2 = cmd->info_0.rsvd0; + ioq_cmd.common.cdw3[0] = cpu_to_le32(cmd->cdw3); + ioq_cmd.common.cdw3[1] = cpu_to_le32(cmd->cdw4); + ioq_cmd.common.cdw3[2] = cpu_to_le32(cmd->cdw5); + + ioq_cmd.common.cdw10[0] = cpu_to_le32(cmd->cdw10); + ioq_cmd.common.cdw10[1] = cpu_to_le32(cmd->cdw11); + ioq_cmd.common.cdw10[2] = cpu_to_le32(cmd->cdw12); + ioq_cmd.common.cdw10[3] = cpu_to_le32(cmd->cdw13); + ioq_cmd.common.cdw10[4] = cpu_to_le32(cmd->cdw14); + ioq_cmd.common.cdw10[5] = cpu_to_le32(cmd->data_len); + + memcpy(ioq_cmd.common.cdb, &cmd->cdw16, cmd->info_0.cdb_len); + + ioq_cmd.common.cdw26[0] = cpu_to_le32(cmd->cdw26[0]); + ioq_cmd.common.cdw26[1] = cpu_to_le32(cmd->cdw26[1]); + ioq_cmd.common.cdw26[2] = cpu_to_le32(cmd->cdw26[2]); + ioq_cmd.common.cdw26[3] = cpu_to_le32(cmd->cdw26[3]); + + status = spraid_bsg_map_data(hdev, job, + (struct spraid_admin_command *)&ioq_cmd); + if (status) { + dev_err(hdev->dev, "[%s] err, map data failed\n", __func__); + return status; + } + + status = spraid_submit_ioq_sync_cmd(hdev, &ioq_cmd, job->reply, + &job->reply_len, timeout); + + dev_info(hdev->dev, "[%s] opcode[0x%x] subopcode[0x%x], status[0x%x];" + " reply_len[%d]\n", + __func__, cmd->opcode, cmd->info_1.subopcode, + status, job->reply_len); + + spraid_bsg_unmap_data(hdev, job); + + return status; +} + +static bool spraid_check_scmd_completed(struct scsi_cmnd *scmd) +{ + struct spraid_dev *hdev = shost_priv(scmd->device->host); + struct spraid_iod *iod = scsi_cmd_priv(scmd); + struct spraid_queue *spraidq; + u16 hwq, cid; + + spraid_get_tag_from_scmd(scmd, &hwq, &cid); + spraidq = &hdev->queues[hwq]; + if (READ_ONCE(iod->state) == SPRAID_CMD_COMPLETE + || spraid_poll_cq(spraidq, cid)) { + dev_warn(hdev->dev, "cid[%d] qid[%d] has been completed\n", + cid, spraidq->qid); + return true; + } + return false; +} + +static enum blk_eh_timer_return spraid_scmd_timeout(struct scsi_cmnd *scmd) +{ + struct spraid_iod *iod = scsi_cmd_priv(scmd); + unsigned int timeout = scmd->device->request_queue->rq_timeout; + + if (spraid_check_scmd_completed(scmd)) + goto out; + + if (time_after(jiffies, scmd->jiffies_at_alloc + timeout)) { + if (cmpxchg(&iod->state, + SPRAID_CMD_IN_FLIGHT, + SPRAID_CMD_TIMEOUT) == SPRAID_CMD_IN_FLIGHT) { + return BLK_EH_DONE; + } + } +out: + return BLK_EH_RESET_TIMER; +} + +/* send abort command by admin queue temporary */ +static int spraid_send_abort_cmd(struct spraid_dev *hdev, + u32 hdid, u16 qid, u16 cid) +{ + struct spraid_admin_command admin_cmd; + + memset(&admin_cmd, 0, sizeof(admin_cmd)); + admin_cmd.abort.opcode = SPRAID_ADMIN_ABORT_CMD; + admin_cmd.abort.hdid = cpu_to_le32(hdid); + admin_cmd.abort.sqid = cpu_to_le16(qid); + admin_cmd.abort.cid = cpu_to_le16(cid); + + return spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0); +} + +/* send reset command by admin quueue temporary */ +static int spraid_send_reset_cmd(struct spraid_dev *hdev, int type, u32 hdid) +{ + struct spraid_admin_command admin_cmd; + + memset(&admin_cmd, 0, sizeof(admin_cmd)); + admin_cmd.reset.opcode = SPRAID_ADMIN_RESET; + admin_cmd.reset.hdid = cpu_to_le32(hdid); + admin_cmd.reset.type = type; + + return spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0); +} + +static bool spraid_change_host_state(struct spraid_dev *hdev, + enum spraid_state newstate) +{ + unsigned long flags; + enum spraid_state oldstate; + bool change = false; + + spin_lock_irqsave(&hdev->state_lock, flags); + + oldstate = hdev->state; + switch (newstate) { + case SPRAID_LIVE: + switch (oldstate) { + case SPRAID_NEW: + case SPRAID_RESETTING: + change = true; + break; + default: + break; + } + break; + case SPRAID_RESETTING: + switch (oldstate) { + case SPRAID_LIVE: + change = true; + break; + default: + break; + } + break; + case SPRAID_DELETING: + if (oldstate != SPRAID_DELETING) + change = true; + break; + case SPRAID_DEAD: + switch (oldstate) { + case SPRAID_NEW: + case SPRAID_LIVE: + case SPRAID_RESETTING: + change = true; + break; + default: + break; + } + break; + default: + break; + } + if (change) + hdev->state = newstate; + spin_unlock_irqrestore(&hdev->state_lock, flags); + + dev_info(hdev->dev, "[%s][%d]->[%d], change[%d]\n", + __func__, oldstate, newstate, change); + + return change; +} + +static void spraid_back_fault_cqe(struct spraid_queue *ioq, + struct spraid_completion *cqe) +{ + struct spraid_dev *hdev = ioq->hdev; + struct blk_mq_tags *tags; + struct scsi_cmnd *scmd; + struct spraid_iod *iod; + struct request *req; + + tags = hdev->shost->tag_set.tags[ioq->qid - 1]; + req = blk_mq_tag_to_rq(tags, cqe->cmd_id); + if (unlikely(!req || !blk_mq_request_started(req))) + return; + + scmd = blk_mq_rq_to_pdu(req); + iod = scsi_cmd_priv(scmd); + + set_host_byte(scmd, DID_NO_CONNECT); + if (iod->nsge) + scsi_dma_unmap(scmd); + spraid_free_iod_res(hdev, iod); + scmd->scsi_done(scmd); + dev_warn(hdev->dev, "Back fault CQE, cid[%d] qid[%d]\n", + cqe->cmd_id, ioq->qid); +} + +static void spraid_back_all_io(struct spraid_dev *hdev) +{ + int i, j; + struct spraid_queue *ioq; + struct spraid_completion cqe = { 0 }; + + scsi_block_requests(hdev->shost); + + for (i = 1; i <= hdev->shost->nr_hw_queues; i++) { + ioq = &hdev->queues[i]; + for (j = 0; j < hdev->shost->can_queue; j++) { + cqe.cmd_id = j; + spraid_back_fault_cqe(ioq, &cqe); + } + } + + scsi_unblock_requests(hdev->shost); +} + +static void spraid_dev_disable(struct spraid_dev *hdev, bool shutdown) +{ + struct spraid_queue *adminq = &hdev->queues[0]; + u16 start, end; + unsigned long timeout = jiffies + 600 * HZ; + + if (pci_device_is_present(hdev->pdev)) { + if (shutdown) + spraid_shutdown_ctrl(hdev); + else + spraid_disable_ctrl(hdev); + } + + while (!time_after(jiffies, timeout)) { + if (!pci_device_is_present(hdev->pdev)) { + dev_info(hdev->dev, "[%s] pci_device not present;" + " skip wait\n", __func__); + break; + } + if (!spraid_wait_ready(hdev, hdev->cap, false)) { + dev_info(hdev->dev, + "[%s] wait ready success after reset\n", + __func__); + break; + } + dev_info(hdev->dev, "[%s] waiting csts_rdy ready\n", __func__); + } + + if (hdev->queue_count == 0) { + dev_err(hdev->dev, "[%s] warn, queue has been delete\n", + __func__); + return; + } + + spin_lock_irq(&adminq->cq_lock); + spraid_process_cq(adminq, &start, &end, -1); + spin_unlock_irq(&adminq->cq_lock); + spraid_complete_cqes(adminq, start, end); + + spraid_pci_disable(hdev); + + spraid_back_all_io(hdev); +} + +static void spraid_reset_work(struct work_struct *work) +{ + int ret; + struct spraid_dev *hdev = + container_of(work, struct spraid_dev, reset_work); + + if (hdev->state != SPRAID_RESETTING) { + dev_err(hdev->dev, "[%s] err, host is not reset state\n", + __func__); + return; + } + + dev_info(hdev->dev, "[%s] enter host reset\n", __func__); + + if (hdev->ctrl_config & SPRAID_CC_ENABLE) { + dev_info(hdev->dev, "[%s] start dev_disable\n", __func__); + spraid_dev_disable(hdev, false); + } + + ret = spraid_pci_enable(hdev); + if (ret) + goto out; + + ret = spraid_setup_admin_queue(hdev); + if (ret) + goto pci_disable; + + ret = spraid_setup_io_queues(hdev); + if (ret || hdev->online_queues <= hdev->shost->nr_hw_queues) + goto pci_disable; + + spraid_change_host_state(hdev, SPRAID_LIVE); + + spraid_send_all_aen(hdev); + + return; + +pci_disable: + spraid_pci_disable(hdev); +out: + spraid_change_host_state(hdev, SPRAID_DEAD); + dev_err(hdev->dev, "[%s] err, host reset failed\n", __func__); +} + +static int spraid_reset_work_sync(struct spraid_dev *hdev) +{ + if (!spraid_change_host_state(hdev, SPRAID_RESETTING)) { + dev_info(hdev->dev, "[%s] can't change to reset state\n", + __func__); + return -EBUSY; + } + + if (!queue_work(spraid_wq, &hdev->reset_work)) { + dev_err(hdev->dev, "[%s] err, host is already in reset state\n", + __func__); + return -EBUSY; + } + + flush_work(&hdev->reset_work); + if (hdev->state != SPRAID_LIVE) + return -ENODEV; + + return 0; +} + +static int spraid_wait_abnl_cmd_done(struct spraid_iod *iod) +{ + u16 times = 0; + + do { + if (READ_ONCE(iod->state) == SPRAID_CMD_TMO_COMPLETE) + break; + msleep(500); + times++; + } while (times <= SPRAID_WAIT_ABNL_CMD_TIMEOUT); + + /* wait command completion timeout after abort/reset success */ + if (times >= SPRAID_WAIT_ABNL_CMD_TIMEOUT) + return -ETIMEDOUT; + + return 0; +} + +static int spraid_abort_handler(struct scsi_cmnd *scmd) +{ + struct spraid_dev *hdev = shost_priv(scmd->device->host); + struct spraid_iod *iod = scsi_cmd_priv(scmd); + struct spraid_sdev_hostdata *hostdata; + u16 hwq, cid; + int ret; + + scsi_print_command(scmd); + + if (hdev->state != SPRAID_LIVE || !spraid_wait_abnl_cmd_done(iod) || + spraid_check_scmd_completed(scmd)) + return SUCCESS; + + hostdata = scmd->device->hostdata; + spraid_get_tag_from_scmd(scmd, &hwq, &cid); + + dev_warn(hdev->dev, "cid[%d] qid[%d] timeout, aborting\n", cid, hwq); + ret = spraid_send_abort_cmd(hdev, hostdata->hdid, hwq, cid); + if (ret != -ETIME) { + ret = spraid_wait_abnl_cmd_done(iod); + if (ret) { + dev_warn(hdev->dev, "cid[%d] qid[%d] abort failed;" + " not found\n", cid, hwq); + return FAILED; + } + dev_warn(hdev->dev, "cid[%d] qid[%d] abort succ\n", cid, hwq); + return SUCCESS; + } + dev_warn(hdev->dev, "cid[%d] qid[%d] abort failed, timeout\n", + cid, hwq); + return FAILED; +} + +static int spraid_tgt_reset_handler(struct scsi_cmnd *scmd) +{ + struct spraid_dev *hdev = shost_priv(scmd->device->host); + struct spraid_iod *iod = scsi_cmd_priv(scmd); + struct spraid_sdev_hostdata *hostdata; + u16 hwq, cid; + int ret; + + scsi_print_command(scmd); + + if (hdev->state != SPRAID_LIVE || !spraid_wait_abnl_cmd_done(iod) || + spraid_check_scmd_completed(scmd)) + return SUCCESS; + + hostdata = scmd->device->hostdata; + spraid_get_tag_from_scmd(scmd, &hwq, &cid); + + dev_warn(hdev->dev, "cid[%d] qid[%d] timeout, target reset\n", + cid, hwq); + ret = spraid_send_reset_cmd(hdev, SPRAID_RESET_TARGET, hostdata->hdid); + if (ret == 0) { + ret = spraid_wait_abnl_cmd_done(iod); + if (ret) { + dev_warn(hdev->dev, + "cid[%d] qid[%d]target reset failed;" + " not found\n", cid, hwq); + return FAILED; + } + + dev_warn(hdev->dev, "cid[%d] qid[%d] target reset success\n", + cid, hwq); + return SUCCESS; + } + + dev_warn(hdev->dev, "cid[%d] qid[%d] ret[%d] target reset failed\n", + cid, hwq, ret); + return FAILED; +} + +static int spraid_bus_reset_handler(struct scsi_cmnd *scmd) +{ + struct spraid_dev *hdev = shost_priv(scmd->device->host); + struct spraid_iod *iod = scsi_cmd_priv(scmd); + struct spraid_sdev_hostdata *hostdata; + u16 hwq, cid; + int ret; + + scsi_print_command(scmd); + + if (hdev->state != SPRAID_LIVE || !spraid_wait_abnl_cmd_done(iod) || + spraid_check_scmd_completed(scmd)) + return SUCCESS; + + hostdata = scmd->device->hostdata; + spraid_get_tag_from_scmd(scmd, &hwq, &cid); + + dev_warn(hdev->dev, "cid[%d] qid[%d] timeout, bus reset\n", cid, hwq); + ret = spraid_send_reset_cmd(hdev, SPRAID_RESET_BUS, hostdata->hdid); + if (ret == 0) { + ret = spraid_wait_abnl_cmd_done(iod); + if (ret) { + dev_warn(hdev->dev, + "cid[%d] qid[%d] bus reset failed;" + " not found\n", cid, hwq); + return FAILED; + } + + dev_warn(hdev->dev, "cid[%d] qid[%d] bus reset succ\n", + cid, hwq); + return SUCCESS; + } + + dev_warn(hdev->dev, "cid[%d] qid[%d] ret[%d] bus reset failed\n", + cid, hwq, ret); + return FAILED; +} + +static int spraid_shost_reset_handler(struct scsi_cmnd *scmd) +{ + u16 hwq, cid; + struct spraid_dev *hdev = shost_priv(scmd->device->host); + + scsi_print_command(scmd); + if (hdev->state != SPRAID_LIVE || spraid_check_scmd_completed(scmd)) + return SUCCESS; + + spraid_get_tag_from_scmd(scmd, &hwq, &cid); + dev_warn(hdev->dev, "cid[%d] qid[%d] host reset\n", cid, hwq); + + if (spraid_reset_work_sync(hdev)) { + dev_warn(hdev->dev, "cid[%d] qid[%d] host reset failed\n", + cid, hwq); + return FAILED; + } + + dev_warn(hdev->dev, "cid[%d] qid[%d] host reset success\n", cid, hwq); + + return SUCCESS; +} + +static pci_ers_result_t spraid_pci_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct spraid_dev *hdev = pci_get_drvdata(pdev); + + dev_info(hdev->dev, "enter pci error detect, state:%d\n", state); + + switch (state) { + case pci_channel_io_normal: + dev_warn(hdev->dev, "channel is normal, do nothing\n"); + + return PCI_ERS_RESULT_CAN_RECOVER; + case pci_channel_io_frozen: + dev_warn(hdev->dev, + "channel io frozen, need reset controller\n"); + + scsi_block_requests(hdev->shost); + + spraid_change_host_state(hdev, SPRAID_RESETTING); + + return PCI_ERS_RESULT_NEED_RESET; + case pci_channel_io_perm_failure: + dev_warn(hdev->dev, "channel io failure, request disconnect\n"); + + return PCI_ERS_RESULT_DISCONNECT; + } + + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t spraid_pci_slot_reset(struct pci_dev *pdev) +{ + struct spraid_dev *hdev = pci_get_drvdata(pdev); + + dev_info(hdev->dev, "restart after slot reset\n"); + + pci_restore_state(pdev); + + if (!queue_work(spraid_wq, &hdev->reset_work)) { + dev_err(hdev->dev, "[%s] err, the device is resetting state\n", + __func__); + return PCI_ERS_RESULT_NONE; + } + + flush_work(&hdev->reset_work); + + scsi_unblock_requests(hdev->shost); + + return PCI_ERS_RESULT_RECOVERED; +} + +static void spraid_reset_done(struct pci_dev *pdev) +{ + struct spraid_dev *hdev = pci_get_drvdata(pdev); + + dev_info(hdev->dev, "enter spraid reset done\n"); +} + +static ssize_t csts_pp_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct spraid_dev *hdev = shost_priv(shost); + int ret = -1; + + if (pci_device_is_present(hdev->pdev)) { + ret = (readl(hdev->bar + SPRAID_REG_CSTS) + & SPRAID_CSTS_PP_MASK); + ret >>= SPRAID_CSTS_PP_SHIFT; + } + + return snprintf(buf, PAGE_SIZE, "%d\n", ret); +} + +static ssize_t csts_shst_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct spraid_dev *hdev = shost_priv(shost); + int ret = -1; + + if (pci_device_is_present(hdev->pdev)) { + ret = (readl(hdev->bar + SPRAID_REG_CSTS) + & SPRAID_CSTS_SHST_MASK); + ret >>= SPRAID_CSTS_SHST_SHIFT; + } + + return snprintf(buf, PAGE_SIZE, "%d\n", ret); +} + +static ssize_t csts_cfs_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct spraid_dev *hdev = shost_priv(shost); + int ret = -1; + + if (pci_device_is_present(hdev->pdev)) { + ret = (readl(hdev->bar + SPRAID_REG_CSTS) + & SPRAID_CSTS_CFS_MASK); + ret >>= SPRAID_CSTS_CFS_SHIFT; + } + + return snprintf(buf, PAGE_SIZE, "%d\n", ret); +} + +static ssize_t csts_rdy_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct spraid_dev *hdev = shost_priv(shost); + int ret = -1; + + if (pci_device_is_present(hdev->pdev)) + ret = (readl(hdev->bar + SPRAID_REG_CSTS) & SPRAID_CSTS_RDY); + + return snprintf(buf, PAGE_SIZE, "%d\n", ret); +} + +static ssize_t fw_version_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct spraid_dev *hdev = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%s\n", hdev->ctrl_info->fr); +} + +static DEVICE_ATTR_RO(csts_pp); +static DEVICE_ATTR_RO(csts_shst); +static DEVICE_ATTR_RO(csts_cfs); +static DEVICE_ATTR_RO(csts_rdy); +static DEVICE_ATTR_RO(fw_version); + +static struct device_attribute *spraid_host_attrs[] = { + &dev_attr_csts_pp, + &dev_attr_csts_shst, + &dev_attr_csts_cfs, + &dev_attr_csts_rdy, + &dev_attr_fw_version, + NULL, +}; + +static int spraid_get_vd_info(struct spraid_dev *hdev, + struct spraid_vd_info *vd_info, u16 vid) +{ + struct spraid_admin_command admin_cmd; + u8 *data_ptr = NULL; + dma_addr_t data_dma = 0; + int ret; + + data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE, + &data_dma, GFP_KERNEL); + if (!data_ptr) + return -ENOMEM; + + memset(&admin_cmd, 0, sizeof(admin_cmd)); + admin_cmd.usr_cmd.opcode = USR_CMD_READ; + admin_cmd.usr_cmd.info_0.subopcode = cpu_to_le16(USR_CMD_VDINFO); + admin_cmd.usr_cmd.info_1.data_len = cpu_to_le16(USR_CMD_RDLEN); + admin_cmd.usr_cmd.info_1.param_len = cpu_to_le16(VDINFO_PARAM_LEN); + admin_cmd.usr_cmd.cdw10 = cpu_to_le32(vid); + admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma); + + ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0); + if (!ret) + memcpy(vd_info, data_ptr, sizeof(struct spraid_vd_info)); + + dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, data_dma); + + return ret; +} + +static int spraid_get_bgtask(struct spraid_dev *hdev, + struct spraid_bgtask *bgtask) +{ + struct spraid_admin_command admin_cmd; + u8 *data_ptr = NULL; + dma_addr_t data_dma = 0; + int ret; + + data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE, + &data_dma, GFP_KERNEL); + if (!data_ptr) + return -ENOMEM; + + memset(&admin_cmd, 0, sizeof(admin_cmd)); + admin_cmd.usr_cmd.opcode = USR_CMD_READ; + admin_cmd.usr_cmd.info_0.subopcode = cpu_to_le16(USR_CMD_BGTASK); + admin_cmd.usr_cmd.info_1.data_len = cpu_to_le16(USR_CMD_RDLEN); + admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma); + + ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0); + if (!ret) + memcpy(bgtask, data_ptr, sizeof(struct spraid_bgtask)); + + dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, data_dma); + + return ret; +} + +static ssize_t raid_level_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev; + struct spraid_dev *hdev; + struct spraid_vd_info *vd_info; + struct spraid_sdev_hostdata *hostdata; + int ret; + + sdev = to_scsi_device(dev); + hdev = shost_priv(sdev->host); + hostdata = sdev->hostdata; + + vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL); + if (!vd_info || !SPRAID_DEV_INFO_ATTR_VD(hostdata->attr)) + return snprintf(buf, PAGE_SIZE, "NA\n"); + + ret = spraid_get_vd_info(hdev, vd_info, sdev->id); + if (ret) + vd_info->rg_level = ARRAY_SIZE(raid_levels) - 1; + + ret = (vd_info->rg_level < ARRAY_SIZE(raid_levels)) ? + vd_info->rg_level : (ARRAY_SIZE(raid_levels) - 1); + + kfree(vd_info); + + return snprintf(buf, PAGE_SIZE, "RAID-%s\n", raid_levels[ret]); +} + +static ssize_t raid_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev; + struct spraid_dev *hdev; + struct spraid_vd_info *vd_info; + struct spraid_sdev_hostdata *hostdata; + int ret; + + sdev = to_scsi_device(dev); + hdev = shost_priv(sdev->host); + hostdata = sdev->hostdata; + + vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL); + if (!vd_info || !SPRAID_DEV_INFO_ATTR_VD(hostdata->attr)) + return snprintf(buf, PAGE_SIZE, "NA\n"); + + ret = spraid_get_vd_info(hdev, vd_info, sdev->id); + if (ret) { + vd_info->vd_status = 0; + vd_info->rg_id = 0xff; + } + + ret = (vd_info->vd_status < ARRAY_SIZE(raid_states)) ? + vd_info->vd_status : 0; + + kfree(vd_info); + + return snprintf(buf, PAGE_SIZE, "%s\n", raid_states[ret]); +} + +static ssize_t raid_resync_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev; + struct spraid_dev *hdev; + struct spraid_vd_info *vd_info; + struct spraid_bgtask *bgtask; + struct spraid_sdev_hostdata *hostdata; + u8 rg_id, i, progress = 0; + int ret; + + sdev = to_scsi_device(dev); + hdev = shost_priv(sdev->host); + hostdata = sdev->hostdata; + + vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL); + if (!vd_info || !SPRAID_DEV_INFO_ATTR_VD(hostdata->attr)) + return snprintf(buf, PAGE_SIZE, "NA\n"); + + ret = spraid_get_vd_info(hdev, vd_info, sdev->id); + if (ret) + goto out; + + rg_id = vd_info->rg_id; + + bgtask = (struct spraid_bgtask *)vd_info; + ret = spraid_get_bgtask(hdev, bgtask); + if (ret) + goto out; + for (i = 0; i < bgtask->task_num; i++) { + if ((bgtask->bgtask[i].type == BGTASK_TYPE_REBUILD) && + (le16_to_cpu(bgtask->bgtask[i].vd_id) == rg_id)) + progress = bgtask->bgtask[i].progress; + } + +out: + kfree(vd_info); + return snprintf(buf, PAGE_SIZE, "%d\n", progress); +} + +static DEVICE_ATTR_RO(raid_level); +static DEVICE_ATTR_RO(raid_state); +static DEVICE_ATTR_RO(raid_resync); + +static struct device_attribute *spraid_dev_attrs[] = { + &dev_attr_raid_level, + &dev_attr_raid_state, + &dev_attr_raid_resync, + NULL, +}; + +static struct pci_error_handlers spraid_err_handler = { + .error_detected = spraid_pci_error_detected, + .slot_reset = spraid_pci_slot_reset, + .reset_done = spraid_reset_done, +}; + +static int spraid_sysfs_host_reset(struct Scsi_Host *shost, int reset_type) +{ + int ret; + struct spraid_dev *hdev = shost_priv(shost); + + dev_info(hdev->dev, "[%s] start sysfs host reset cmd\n", __func__); + ret = spraid_reset_work_sync(hdev); + dev_info(hdev->dev, "[%s] stop sysfs host reset cmd[%d]\n", + __func__, ret); + + return ret; +} + +static struct scsi_host_template spraid_driver_template = { + .module = THIS_MODULE, + .name = "Ramaxel Logic spraid driver", + .proc_name = "spraid", + .queuecommand = spraid_queue_command, + .slave_alloc = spraid_slave_alloc, + .slave_destroy = spraid_slave_destroy, + .slave_configure = spraid_slave_configure, + .eh_timed_out = spraid_scmd_timeout, + .eh_abort_handler = spraid_abort_handler, + .eh_target_reset_handler = spraid_tgt_reset_handler, + .eh_bus_reset_handler = spraid_bus_reset_handler, + .eh_host_reset_handler = spraid_shost_reset_handler, + .change_queue_depth = scsi_change_queue_depth, + .this_id = -1, + .shost_attrs = spraid_host_attrs, + .sdev_attrs = spraid_dev_attrs, + .host_reset = spraid_sysfs_host_reset, +}; + +static void spraid_shutdown(struct pci_dev *pdev) +{ + struct spraid_dev *hdev = pci_get_drvdata(pdev); + + spraid_remove_io_queues(hdev); + spraid_disable_admin_queue(hdev, true); +} + +/* bsg dispatch user command */ +static int spraid_bsg_host_dispatch(struct bsg_job *job) +{ + struct Scsi_Host *shost = dev_to_shost(job->dev); + struct spraid_dev *hdev = shost_priv(shost); + struct request *rq = blk_mq_rq_from_pdu(job); + struct spraid_bsg_request *bsg_req = job->request; + int ret = 0; + + dev_log_dbg(hdev->dev, "[%s] msgcode[%d], msglen[%d], timeout[%d];" + " req_nsge[%d], req_len[%d]\n", + __func__, bsg_req->msgcode, job->request_len, + rq->timeout, job->request_payload.sg_cnt, + job->request_payload.payload_len); + + job->reply_len = 0; + + switch (bsg_req->msgcode) { + case SPRAID_BSG_ADM: + ret = spraid_user_admin_cmd(hdev, job); + break; + case SPRAID_BSG_IOQ: + ret = spraid_user_ioq_cmd(hdev, job); + break; + default: + dev_info(hdev->dev, "[%s] unsupport msgcode[%d]\n", + __func__, bsg_req->msgcode); + break; + } + + if (ret > 0) + ret = ret | (ret << 8); + + bsg_job_done(job, ret, 0); + return 0; +} + +static inline void spraid_remove_bsg(struct spraid_dev *hdev) +{ + if (hdev->bsg_queue) { + bsg_unregister_queue(hdev->bsg_queue); + blk_cleanup_queue(hdev->bsg_queue); + } +} +static int spraid_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct spraid_dev *hdev; + struct Scsi_Host *shost; + int node, ret; + char bsg_name[15]; + + shost = scsi_host_alloc(&spraid_driver_template, sizeof(*hdev)); + if (!shost) { + dev_err(&pdev->dev, "Failed to allocate scsi host\n"); + return -ENOMEM; + } + hdev = shost_priv(shost); + hdev->pdev = pdev; + hdev->dev = get_device(&pdev->dev); + + node = dev_to_node(hdev->dev); + if (node == NUMA_NO_NODE) { + node = first_memory_node; + set_dev_node(hdev->dev, node); + } + hdev->numa_node = node; + hdev->shost = shost; + pci_set_drvdata(pdev, hdev); + + ret = spraid_dev_map(hdev); + if (ret) + goto put_dev; + + init_rwsem(&hdev->devices_rwsem); + INIT_WORK(&hdev->scan_work, spraid_scan_work); + INIT_WORK(&hdev->timesyn_work, spraid_timesyn_work); + INIT_WORK(&hdev->reset_work, spraid_reset_work); + INIT_WORK(&hdev->fw_act_work, spraid_fw_act_work); + spin_lock_init(&hdev->state_lock); + + ret = spraid_alloc_resources(hdev); + if (ret) + goto dev_unmap; + + ret = spraid_pci_enable(hdev); + if (ret) + goto resources_free; + + ret = spraid_setup_admin_queue(hdev); + if (ret) + goto pci_disable; + + ret = spraid_init_ctrl_info(hdev); + if (ret) + goto disable_admin_q; + + ret = spraid_alloc_iod_ext_mem_pool(hdev); + if (ret) + goto disable_admin_q; + + ret = spraid_setup_io_queues(hdev); + if (ret) + goto free_iod_mempool; + + spraid_shost_init(hdev); + + ret = scsi_add_host(hdev->shost, hdev->dev); + if (ret) { + dev_err(hdev->dev, "Add shost to system failed, ret: %d\n", + ret); + goto remove_io_queues; + } + + snprintf(bsg_name, sizeof(bsg_name), "spraid%d", shost->host_no); + hdev->bsg_queue = bsg_setup_queue(&shost->shost_gendev, bsg_name, + spraid_bsg_host_dispatch, + spraid_cmd_size(hdev, true, false)); + if (IS_ERR(hdev->bsg_queue)) { + dev_err(hdev->dev, "err, setup bsg failed\n"); + hdev->bsg_queue = NULL; + goto remove_io_queues; + } + + if (hdev->online_queues == SPRAID_ADMIN_QUEUE_NUM) { + dev_warn(hdev->dev, "warn only admin queue can be used\n"); + return 0; + } + + hdev->state = SPRAID_LIVE; + + spraid_send_all_aen(hdev); + + ret = spraid_dev_list_init(hdev); + if (ret) + goto remove_bsg; + + ret = spraid_configure_timestamp(hdev); + if (ret) + dev_warn(hdev->dev, "init set timestamp failed\n"); + + ret = spraid_alloc_ioq_ptcmds(hdev); + if (ret) + goto remove_bsg; + + scsi_scan_host(hdev->shost); + + return 0; + +remove_bsg: + spraid_remove_bsg(hdev); +remove_io_queues: + spraid_remove_io_queues(hdev); +free_iod_mempool: + spraid_free_iod_ext_mem_pool(hdev); +disable_admin_q: + spraid_disable_admin_queue(hdev, false); +pci_disable: + spraid_pci_disable(hdev); +resources_free: + spraid_free_resources(hdev); +dev_unmap: + spraid_dev_unmap(hdev); +put_dev: + put_device(hdev->dev); + scsi_host_put(shost); + + return -ENODEV; +} + +static void spraid_remove(struct pci_dev *pdev) +{ + struct spraid_dev *hdev = pci_get_drvdata(pdev); + struct Scsi_Host *shost = hdev->shost; + + dev_info(hdev->dev, "enter spraid remove\n"); + + spraid_change_host_state(hdev, SPRAID_DELETING); + flush_work(&hdev->reset_work); + + if (!pci_device_is_present(pdev)) + spraid_back_all_io(hdev); + + spraid_remove_bsg(hdev); + scsi_remove_host(shost); + spraid_free_ioq_ptcmds(hdev); + kfree(hdev->devices); + spraid_remove_io_queues(hdev); + spraid_free_iod_ext_mem_pool(hdev); + spraid_disable_admin_queue(hdev, false); + spraid_pci_disable(hdev); + spraid_free_resources(hdev); + spraid_dev_unmap(hdev); + put_device(hdev->dev); + scsi_host_put(shost); + + dev_info(hdev->dev, "exit spraid remove\n"); +} + +static const struct pci_device_id spraid_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_RAMAXEL_LOGIC, + SPRAID_SERVER_DEVICE_HBA_DID) }, + { PCI_DEVICE(PCI_VENDOR_ID_RAMAXEL_LOGIC, + SPRAID_SERVER_DEVICE_RAID_DID) }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, spraid_id_table); + +static struct pci_driver spraid_driver = { + .name = "spraid", + .id_table = spraid_id_table, + .probe = spraid_probe, + .remove = spraid_remove, + .shutdown = spraid_shutdown, + .err_handler = &spraid_err_handler, +}; + +static int __init spraid_init(void) +{ + int ret; + + spraid_wq = alloc_workqueue("spraid-wq", WQ_UNBOUND | WQ_MEM_RECLAIM | + WQ_SYSFS, 0); + if (!spraid_wq) + return -ENOMEM; + + spraid_class = class_create(THIS_MODULE, "spraid"); + if (IS_ERR(spraid_class)) { + ret = PTR_ERR(spraid_class); + goto destroy_wq; + } + + ret = pci_register_driver(&spraid_driver); + if (ret < 0) + goto destroy_class; + + return 0; + +destroy_class: + class_destroy(spraid_class); +destroy_wq: + destroy_workqueue(spraid_wq); + + return ret; +} + +static void __exit spraid_exit(void) +{ + pci_unregister_driver(&spraid_driver); + class_destroy(spraid_class); + destroy_workqueue(spraid_wq); + ida_destroy(&spraid_instance_ida); +} + +MODULE_AUTHOR("songyl@ramaxel.com"); +MODULE_DESCRIPTION("Ramaxel Memory Technology SPraid Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(SPRAID_DRV_VERSION); +module_init(spraid_init); +module_exit(spraid_exit); diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index d0389b20574d0f778e2bfd95b07e80458970dbd5..f464d5c050fc15ed1ede007f1b0e2d54846806d0 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -216,6 +216,8 @@ static unsigned int sr_get_events(struct scsi_device *sdev) return DISK_EVENT_EJECT_REQUEST; else if (med->media_event_code == 2) return DISK_EVENT_MEDIA_CHANGE; + else if (med->media_event_code == 3) + return DISK_EVENT_MEDIA_CHANGE; return 0; } @@ -748,7 +750,7 @@ static int sr_probe(struct device *dev) cd->cdi.disk = disk; if (register_cdrom(&cd->cdi)) - goto fail_put; + goto fail_minor; /* * Initialize block layer runtime PM stuffs before the @@ -766,6 +768,10 @@ static int sr_probe(struct device *dev) return 0; +fail_minor: + spin_lock(&sr_index_lock); + clear_bit(minor, sr_index_bits); + spin_unlock(&sr_index_lock); fail_put: put_disk(disk); fail_free: @@ -879,7 +885,7 @@ static void get_capabilities(struct scsi_cd *cd) /* allocate transfer buffer */ - buffer = kmalloc(512, GFP_KERNEL | GFP_DMA); + buffer = kmalloc(512, GFP_KERNEL); if (!buffer) { sr_printk(KERN_ERR, cd, "out of memory.\n"); return; @@ -892,7 +898,7 @@ static void get_capabilities(struct scsi_cd *cd) rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, ms_len, SR_TIMEOUT, 3, &data, NULL); - if (!scsi_status_is_good(rc) || data.length > ms_len || + if (rc < 0 || data.length > ms_len || data.header_length + data.block_descriptor_length > data.length) { /* failed, drive doesn't have capabilities mode page */ cd->cdi.speed = 1; diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c index ffcf902da3901c5708ae76535b33162d1e83adb7..85ed1c6be6ce4ed4099429f2dc097ee23d20a851 100644 --- a/drivers/scsi/sr_ioctl.c +++ b/drivers/scsi/sr_ioctl.c @@ -45,7 +45,7 @@ static int sr_read_tochdr(struct cdrom_device_info *cdi, int result; unsigned char *buffer; - buffer = kmalloc(32, GFP_KERNEL | SR_GFP_DMA(cd)); + buffer = kzalloc(32, GFP_KERNEL | SR_GFP_DMA(cd)); if (!buffer) return -ENOMEM; @@ -59,10 +59,13 @@ static int sr_read_tochdr(struct cdrom_device_info *cdi, cgc.data_direction = DMA_FROM_DEVICE; result = sr_do_ioctl(cd, &cgc); + if (result) + goto err; tochdr->cdth_trk0 = buffer[2]; tochdr->cdth_trk1 = buffer[3]; +err: kfree(buffer); return result; } @@ -75,7 +78,7 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi, int result; unsigned char *buffer; - buffer = kmalloc(32, GFP_KERNEL | SR_GFP_DMA(cd)); + buffer = kzalloc(32, GFP_KERNEL | SR_GFP_DMA(cd)); if (!buffer) return -ENOMEM; @@ -90,6 +93,8 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi, cgc.data_direction = DMA_FROM_DEVICE; result = sr_do_ioctl(cd, &cgc); + if (result) + goto err; tocentry->cdte_ctrl = buffer[5] & 0xf; tocentry->cdte_adr = buffer[5] >> 4; @@ -102,6 +107,7 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi, tocentry->cdte_addr.lba = (((((buffer[8] << 8) + buffer[9]) << 8) + buffer[10]) << 8) + buffer[11]; +err: kfree(buffer); return result; } @@ -206,6 +212,11 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc) /* Minimal error checking. Ignore cases we know about, and report the rest. */ if (driver_byte(result) != 0) { + if (!scsi_sense_valid(sshdr)) { + err = -EIO; + goto out; + } + switch (sshdr->sense_key) { case UNIT_ATTENTION: SDev->changed = 1; @@ -384,7 +395,7 @@ int sr_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn) { Scsi_CD *cd = cdi->handle; struct packet_command cgc; - char *buffer = kmalloc(32, GFP_KERNEL | SR_GFP_DMA(cd)); + char *buffer = kzalloc(32, GFP_KERNEL | SR_GFP_DMA(cd)); int result; if (!buffer) @@ -400,10 +411,13 @@ int sr_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn) cgc.data_direction = DMA_FROM_DEVICE; cgc.timeout = IOCTL_TIMEOUT; result = sr_do_ioctl(cd, &cgc); + if (result) + goto err; memcpy(mcn->medium_catalog_number, buffer + 9, 13); mcn->medium_catalog_number[13] = 0; +err: kfree(buffer); return result; } diff --git a/drivers/scsi/sr_vendor.c b/drivers/scsi/sr_vendor.c index e3b0ce25162baa10ff1447f5f488376720f4249e..2887be4316be92386e9cc5547d8b73637425c2c2 100644 --- a/drivers/scsi/sr_vendor.c +++ b/drivers/scsi/sr_vendor.c @@ -119,7 +119,7 @@ int sr_set_blocklength(Scsi_CD *cd, int blocklength) density = (blocklength > 2048) ? 0x81 : 0x83; #endif - buffer = kmalloc(512, GFP_KERNEL | GFP_DMA); + buffer = kmalloc(512, GFP_KERNEL); if (!buffer) return -ENOMEM; @@ -167,7 +167,7 @@ int sr_cd_check(struct cdrom_device_info *cdi) if (cd->cdi.mask & CDC_MULTI_SESSION) return 0; - buffer = kmalloc(512, GFP_KERNEL | GFP_DMA); + buffer = kmalloc(512, GFP_KERNEL); if (!buffer) return -ENOMEM; diff --git a/drivers/scsi/sssraid/Kconfig b/drivers/scsi/sssraid/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..769f68732bf3c609093f6554b199320f668cec04 --- /dev/null +++ b/drivers/scsi/sssraid/Kconfig @@ -0,0 +1,14 @@ +# +# Kernel configuration file for the 3SNIC +# + +config SCSI_3SNIC_SSSRAID + tristate "3SNIC sssraid Adapter" + depends on PCI && SCSI + select BLK_DEV_BSGLIB + depends on ARM64 || X86_64 + help + This driver supports 3SNIC 3S5xx serial RAID controller, which has + PCI Express Gen4 interface with host and supports SAS/SATA HDD/SSD. + To compile this driver as a module, choose M here: the module will + be called sssraid. diff --git a/drivers/scsi/sssraid/Makefile b/drivers/scsi/sssraid/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..c80605866b42de1ce59e8b4a281055d47638b3a1 --- /dev/null +++ b/drivers/scsi/sssraid/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for the 3SNIC sssraid drivers. +# + +obj-$(CONFIG_SCSI_3SNIC_SSSRAID) += sssraid.o + +sssraid-objs := sssraid_os.o sssraid_fw.o diff --git a/drivers/scsi/sssraid/sssraid.h b/drivers/scsi/sssraid/sssraid.h new file mode 100644 index 0000000000000000000000000000000000000000..7f5fe6317bf90ce9fd1bbf22700b8579179a7fea --- /dev/null +++ b/drivers/scsi/sssraid/sssraid.h @@ -0,0 +1,1004 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 3SNIC Information Technology, Ltd */ + +/* 3SNIC RAID SSSXXX Series Linux Driver */ + +#ifndef __SSSRAID_H_ +#define __SSSRAID_H_ + +#define SSSRAID_DRIVER_VERSION "1.0.0.0" +#define SSSRAID_DRIVER_RELDATE "12-December-2022" + +#define SSSRAID_DRIVER_NAME "sssraid" + +#define SSSRAID_NAME_LENGTH 32 +#define BSG_NAME_SIZE 15 + +/* + * SSSRAID Vendor ID and Device IDs + */ +#define PCI_VENDOR_ID_3SNIC_LOGIC 0x1F3F + +#define SSSRAID_SERVER_DEVICE_HBA_DID 0x2100 +#define SSSRAID_SERVER_DEVICE_RAID_DID 0x2200 + +#define SSSRAID_CAP_MQES(cap) ((cap)&0xffff) +#define SSSRAID_CAP_STRIDE(cap) (((cap) >> 32) & 0xf) +#define SSSRAID_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf) +#define SSSRAID_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf) +#define SSSRAID_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff) +#define SSSRAID_CAP_DMAMASK(cap) (((cap) >> 37) & 0xff) + +#define SSSRAID_DEFAULT_MAX_CHANNEL 4 +#define SSSRAID_DEFAULT_MAX_ID 240 +#define SSSRAID_DEFAULT_MAX_LUN_PER_HOST 8 +#define MAX_SECTORS 2048 + +/* + * Time define + */ +#define SSSRAID_WATCHDOG_INTERVAL 1000 /* in milli seconds */ +#define SSSRAID_PORTENABLE_TIMEOUT 300 + +/* + * SSSRAID queue and entry size for Admin and I/O type + */ +#define IOCMD_SQE_SIZE sizeof(struct sssraid_ioq_command) +#define ADMIN_SQE_SIZE sizeof(struct sssraid_admin_command) +#define SQE_SIZE(qid) (((qid) > 0) ? IOCMD_SQE_SIZE : ADMIN_SQE_SIZE) +#define CQ_SIZE(depth) ((depth) * sizeof(struct sssraid_completion)) +#define SQ_SIZE(qid, depth) ((depth)*SQE_SIZE(qid)) + +#define SENSE_SIZE(depth) ((depth)*SCSI_SENSE_BUFFERSIZE) + +#define SSSRAID_ADMQ_DEPTH 128 +#define SSSRAID_NR_AEN_CMDS 16 +#define SSSRAID_AMDQ_BLK_MQ_DEPTH (SSSRAID_ADMQ_DEPTH - SSSRAID_NR_AEN_CMDS) +#define SSSRAID_AMDQ_MQ_TAG_DEPTH (SSSRAID_AMDQ_BLK_MQ_DEPTH - 1) + +#define SSSRAID_ADM_QUEUE_NUM 1 +#define SSSRAID_PTCMDS_PERQ 1 +#define SSSRAID_IO_BLK_MQ_DEPTH (sdioc->scsi_qd) +#define SSSRAID_NR_HW_QUEUES (sdioc->init_done_queue_cnt - 1) +#define SSSRAID_NR_IOQ_PTCMDS (SSSRAID_PTCMDS_PERQ * SSSRAID_NR_HW_QUEUES) + +#define FUA_MASK 0x08 +#define SSSRAID_MINORS BIT(MINORBITS) +#define SSSRAID_RW_FUA BIT(14) + +#define COMMAND_IS_WRITE(cmd) ((cmd)->common.opcode & 1) + +#define SSSRAID_IO_IOSQES 7 +#define SSSRAID_IO_IOCQES 4 +#define PRP_ENTRY_SIZE 8 + +#define SMALL_POOL_SIZE 256 +#define MAX_SMALL_POOL_NUM 16 +#define MAX_CMD_PER_DEV 64 +#define MAX_CDB_LEN 16 + +#define SSSRAID_UP_TO_MULTY4(x) (((x) + 4) & (~0x03)) + +#define CQE_STATUS_SUCCESS (0x0) + +#define IO_6_DEFAULT_TX_LEN 256 + +#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct sssraid_sgl_desc)) + +#define SSSRAID_CAP_TIMEOUT_UNIT_MS (HZ / 2) + +extern u32 admin_tmout; +#define ADMIN_TIMEOUT (admin_tmout * HZ) + +#define SSSRAID_WAIT_ABNL_CMD_TIMEOUT 6 +#define SSSRAID_WAIT_RST_IO_TIMEOUT 10 +#define SSSRAID_DMA_MSK_BIT_MAX 64 + +enum { + SCSI_6_BYTE_CDB_LEN = 6, + SCSI_10_BYTE_CDB_LEN = 10, + SCSI_12_BYTE_CDB_LEN = 12, + SCSI_16_BYTE_CDB_LEN = 16, +}; + +enum { + SSSRAID_SGL_FMT_DATA_DESC = 0x00, + SSSRAID_SGL_FMT_SEG_DESC = 0x02, + SSSRAID_SGL_FMT_LAST_SEG_DESC = 0x03, + SSSRAID_KEY_SGL_FMT_DATA_DESC = 0x04, + SSSRAID_TRANSPORT_SGL_DATA_DESC = 0x05 +}; + +enum { + SSSRAID_REQ_CANCELLED = (1 << 0), + SSSRAID_REQ_USERCMD = (1 << 1), +}; + +enum { + SSSRAID_SC_SUCCESS = 0x0, + SSSRAID_SC_INVALID_OPCODE = 0x1, + SSSRAID_SC_INVALID_FIELD = 0x2, + + SSSRAID_SC_ABORT_LIMIT = 0x103, + SSSRAID_SC_ABORT_MISSING = 0x104, + SSSRAID_SC_ASYNC_LIMIT = 0x105, + + SSSRAID_SC_DNR = 0x4000, +}; + +enum { + SSSRAID_REG_CAP = 0x0000, + SSSRAID_REG_CC = 0x0014, + SSSRAID_REG_CSTS = 0x001c, + SSSRAID_REG_AQA = 0x0024, + SSSRAID_REG_ASQ = 0x0028, + SSSRAID_REG_ACQ = 0x0030, + SSSRAID_REG_DBS = 0x1000, +}; + +enum { + SSSRAID_CC_ENABLE = 1 << 0, + SSSRAID_CC_CSS_NVM = 0 << 4, + SSSRAID_CC_MPS_SHIFT = 7, + SSSRAID_CC_AMS_SHIFT = 11, + SSSRAID_CC_SHN_SHIFT = 14, + SSSRAID_CC_IOSQES_SHIFT = 16, + SSSRAID_CC_IOCQES_SHIFT = 20, + SSSRAID_CC_AMS_RR = 0 << SSSRAID_CC_AMS_SHIFT, + SSSRAID_CC_SHN_NONE = 0 << SSSRAID_CC_SHN_SHIFT, + SSSRAID_CC_IOSQES = SSSRAID_IO_IOSQES << SSSRAID_CC_IOSQES_SHIFT, + SSSRAID_CC_IOCQES = SSSRAID_IO_IOCQES << SSSRAID_CC_IOCQES_SHIFT, + SSSRAID_CC_SHN_NORMAL = 1 << SSSRAID_CC_SHN_SHIFT, + SSSRAID_CC_SHN_MASK = 3 << SSSRAID_CC_SHN_SHIFT, + SSSRAID_CSTS_CFS_SHIFT = 1, + SSSRAID_CSTS_SHST_SHIFT = 2, + SSSRAID_CSTS_PP_SHIFT = 5, + SSSRAID_CSTS_RDY = 1 << 0, + SSSRAID_CSTS_SHST_CMPLT = 2 << 2, + SSSRAID_CSTS_SHST_MASK = 3 << 2, + SSSRAID_CSTS_CFS_MASK = 1 << SSSRAID_CSTS_CFS_SHIFT, + SSSRAID_CSTS_PP_MASK = 1 << SSSRAID_CSTS_PP_SHIFT, +}; + +enum { + SSSRAID_ADM_DELETE_SQ = 0x00, + SSSRAID_ADM_CREATE_SQ = 0x01, + SSSRAID_ADM_DELETE_CQ = 0x04, + SSSRAID_ADM_CREATE_CQ = 0x05, + SSSRAID_ADM_ABORT_CMD = 0x08, + SSSRAID_ADM_SET_FEATURES = 0x09, + SSSRAID_ADM_ASYNC_EVENT = 0x0c, + SSSRAID_ADM_GET_INFO = 0xc6, + SSSRAID_ADM_RESET = 0xc8, +}; + +enum { + SSSRAID_GET_INFO_CTRL = 0, + SSSRAID_GET_INFO_DEV_LIST = 1, +}; + +enum sssraid_scsi_rst_type { + SSSRAID_RESET_TARGET = 0, + SSSRAID_RESET_BUS = 1, +}; + +enum { + SSSRAID_AEN_ERROR = 0, + SSSRAID_AEN_NOTICE = 2, + SSSRAID_AEN_VS = 7, +}; + +enum { + SSSRAID_AEN_DEV_CHANGED = 0x00, + SSSRAID_AEN_FW_ACT_START = 0x01, + SSSRAID_AEN_HOST_PROBING = 0x10, +}; + +enum { + SSSRAID_AEN_TIMESYN = 0x00, + SSSRAID_AEN_FW_ACT_FINISH = 0x02, + SSSRAID_AEN_EVENT_MIN = 0x80, + SSSRAID_AEN_EVENT_MAX = 0xff, +}; + +enum { + SSSRAID_IOCMD_WRITE = 0x01, + SSSRAID_IOCMD_READ = 0x02, + + SSSRAID_IOCMD_NONRW_NODIR = 0x80, + SSSRAID_IOCMD_NONRW_TODEV = 0x81, + SSSRAID_IOCMD_NONRW_FROMDEV = 0x82, +}; + +enum { + SSSRAID_QUEUE_PHYS_CONTIG = (1 << 0), + SSSRAID_CQ_IRQ_ENABLED = (1 << 1), + + SSSRAID_FEAT_NUM_QUEUES = 0x07, + SSSRAID_FEAT_ASYNC_EVENT = 0x0b, + SSSRAID_FEAT_TIMESTAMP = 0x0e, +}; + +enum sssraid_state { + SSSRAID_NEW, + SSSRAID_LIVE, + SSSRAID_RESETTING, + SSSRAID_DELETING, + SSSRAID_DEAD, +}; + +enum { + SSSRAID_CARD_HBA, + SSSRAID_CARD_RAID, +}; + +enum sssraid_cmd_type { + SSSRAID_CMD_ADM, + SSSRAID_CMD_IOPT, +}; + +/* + * SSSRAID completion queue entry struct + */ +struct sssraid_completion { + __le32 result; + union { + struct { + __u8 sense_len; + __u8 resv[3]; + }; + __le32 result1; + }; + __le16 sq_head; + __le16 sq_id; + __le16 cmd_id; + __le16 status; +}; + +/* + * SSSRAID firmware controller properties + */ +struct sssraid_ctrl_info { + __le32 nd; + __le16 max_cmds; + __le16 max_channel; + __le32 max_tgt_id; + __le16 max_lun; + __le16 max_num_sge; + __le16 lun_num_in_boot; + __u8 mdts; + __u8 acl; + __u8 aerl; + __u8 card_type; + __u16 rsvd; + __le32 rtd3e; + __u8 sn[32]; + __u8 fr[16]; + __u8 rsvd1[4020]; +}; + +struct sssraid_intr_info { + struct sssraid_ioc *sdioc; + u16 msix_index; + struct sssraid_cqueue *cqinfo; + char name[SSSRAID_NAME_LENGTH]; +}; + +struct sssraid_fwevt { + struct list_head list; + struct work_struct work; + struct sssraid_ioc *sdioc; + u16 event_id; + bool send_ack; + bool process_evt; + u32 evt_ctx; + struct kref ref_count; + char event_data[0] __aligned(4); +}; + +/* + * SSSRAID private device struct definition + */ +struct sssraid_ioc { + struct pci_dev *pdev; + struct Scsi_Host *shost; + struct sssraid_squeue *sqinfo; + struct sssraid_cqueue *cqinfo; + struct dma_pool *prp_page_pool; + struct dma_pool *prp_small_pool[MAX_SMALL_POOL_NUM]; + void __iomem *bar; + + u32 init_done_queue_cnt; + u32 ioq_depth; + u32 db_stride; + u32 __iomem *dbs; + struct rw_semaphore devices_rwsem; + int numa_node; + u32 page_size; + u32 ctrl_config; + u64 cap; + u32 instance; + u32 scsi_qd; + struct sssraid_ctrl_info *ctrl_info; + struct sssraid_dev_info *devices; + + int logging_level; + + char name[SSSRAID_NAME_LENGTH]; + u32 cpu_count; + /* + * before_affinity_msix_cnt is + * min("FW support IO Queue count", num_online_cpus)+1 + */ + u16 before_affinity_msix_cnt; + + struct sssraid_cmd *adm_cmds; + struct list_head adm_cmd_list; + spinlock_t adm_cmd_lock; + + struct sssraid_cmd *ioq_ptcmds; + struct list_head ioq_pt_list; + spinlock_t ioq_pt_lock; + + int reset_flag; + + enum sssraid_state state; + spinlock_t state_lock; + + struct request_queue *bsg_queue; + + u8 intr_enabled; + + struct sssraid_intr_info *intr_info; + u32 intr_info_count; + + char watchdog_work_q_name[20]; + struct workqueue_struct *watchdog_work_q; + struct delayed_work watchdog_work; + spinlock_t watchdog_lock; + + char fwevt_worker_name[SSSRAID_NAME_LENGTH]; + struct workqueue_struct *fwevt_worker_thread; + spinlock_t fwevt_lock; + struct list_head fwevt_list; + + struct sssraid_fwevt *current_event; + + void *senses; + dma_addr_t sense_dma_addr; + u32 last_qcnt; + u8 hdd_dispatch; +}; + +/* + * SSSRAID scatter list descriptor + */ +struct sssraid_sgl_desc { + __le64 addr; + __le32 length; + __u8 rsvd[3]; + __u8 type; +}; + +union sssraid_data_ptr { + struct { + __le64 prp1; + __le64 prp2; + }; + struct sssraid_sgl_desc sgl; +}; + +/* + * SSSRAID general admin class command format struct + */ +struct sssraid_admin_common_command { + __u8 opcode; + __u8 flags; + __le16 command_id; + __le32 hdid; + __le32 cdw2[4]; + union sssraid_data_ptr dptr; + __le32 cdw10; + __le32 cdw11; + __le32 cdw12; + __le32 cdw13; + __le32 cdw14; + __le32 cdw15; +}; + +struct sssraid_features { + __u8 opcode; + __u8 flags; + __le16 command_id; + __le32 hdid; + __u64 rsvd2[2]; + union sssraid_data_ptr dptr; + __le32 fid; + __le32 dword11; + __le32 dword12; + __le32 dword13; + __le32 dword14; + __le32 dword15; +}; + +/* + * SSSRAID create completion queue command struct + */ +struct sssraid_create_cq { + __u8 opcode; + __u8 flags; + __le16 command_id; + __u32 rsvd1[5]; + __le64 prp1; + __u64 rsvd8; + __le16 cqid; + __le16 qsize; + __le16 cq_flags; + __le16 irq_vector; + __u32 rsvd12[4]; +}; + +/* + * SSSRAID create submission queue command struct + */ +struct sssraid_create_sq { + __u8 opcode; + __u8 flags; + __le16 command_id; + __u32 rsvd1[5]; + __le64 prp1; + __u64 rsvd8; + __le16 sqid; + __le16 qsize; + __le16 sq_flags; + __le16 cqid; + __u32 rsvd12[4]; +}; + +/* + * SSSRAID delete submission queue command struct + */ +struct sssraid_delete_queue { + __u8 opcode; + __u8 flags; + __le16 command_id; + __u32 rsvd1[9]; + __le16 qid; + __u16 rsvd10; + __u32 rsvd11[5]; +}; + +/* + * SSSRAID access to information command struct + */ +struct sssraid_get_info { + __u8 opcode; + __u8 flags; + __le16 command_id; + __le32 hdid; + __u32 rsvd2[4]; + union sssraid_data_ptr dptr; + __u8 type; + __u8 rsvd10[3]; + __le32 cdw11; + __u32 rsvd12[4]; +}; + +/* + * User command struct + */ +struct sssraid_usr_cmd { + __u8 opcode; + __u8 flags; + __le16 command_id; + __le32 hdid; + union { + struct { + __le16 subopcode; + __le16 rsvd1; + } info_0; + __le32 cdw2; + }; + union { + struct { + __le16 data_len; + __le16 param_len; + } info_1; + __le32 cdw3; + }; + __u64 metadata; + union sssraid_data_ptr dptr; + __le32 cdw10; + __le32 cdw11; + __le32 cdw12; + __le32 cdw13; + __le32 cdw14; + __le32 cdw15; +}; + +enum { + SSSRAID_CMD_FLAG_SGL_METABUF = (1 << 6), + SSSRAID_CMD_FLAG_SGL_METASEG = (1 << 7), + SSSRAID_CMD_FLAG_SGL_ALL = + SSSRAID_CMD_FLAG_SGL_METABUF | SSSRAID_CMD_FLAG_SGL_METASEG, +}; + +enum sssraid_cmd_state { + SSSRAID_CMDSTAT_IDLE = 0, + SSSRAID_CMDSTAT_FLIGHT = 1, + SSSRAID_CMDSTAT_COMPLETE = 2, + SSSRAID_CMDSTAT_TIMEOUT = 3, + SSSRAID_CMDSTAT_TMO_COMPLETE = 4, +}; + +/* + * SSSRAID abort command struct + */ +struct sssraid_abort_cmd { + __u8 opcode; + __u8 flags; + __le16 command_id; + __le32 hdid; + __u64 rsvd2[4]; + __le16 sqid; + __le16 cid; + __u32 rsvd11[5]; +}; + +/* + * SSSRAID reset command struct + */ +struct sssraid_reset_cmd { + __u8 opcode; + __u8 flags; + __le16 command_id; + __le32 hdid; + __u64 rsvd2[4]; + __u8 type; + __u8 rsvd10[3]; + __u32 rsvd11[5]; +}; + +/* + * SSSRAID admin class command set struct + */ +struct sssraid_admin_command { + union { + struct sssraid_admin_common_command common; + struct sssraid_features features; + struct sssraid_create_cq create_cq; + struct sssraid_create_sq create_sq; + struct sssraid_delete_queue delete_queue; + struct sssraid_get_info get_info; + struct sssraid_abort_cmd abort; + struct sssraid_reset_cmd reset; + struct sssraid_usr_cmd usr_cmd; + }; +}; + +/* + * SSSRAID general IO class command format struct + */ +struct sssraid_ioq_common_command { + __u8 opcode; + __u8 flags; + __le16 command_id; + __le32 hdid; + __le16 sense_len; + __u8 cdb_len; + __u8 rsvd2; + __le32 cdw3[3]; + union sssraid_data_ptr dptr; + __le32 cdw10[6]; + __u8 cdb[32]; + __le64 sense_addr; + __le32 cdw26[6]; +}; + +/* + * SSSRAID read or write command struct + */ +struct sssraid_rw_command { + __u8 opcode; + __u8 flags; + __le16 command_id; + __le32 hdid; + __le16 sense_len; + __u8 cdb_len; + __u8 rsvd2; + __u32 rsvd3[3]; + union sssraid_data_ptr dptr; + __le64 slba; + __le16 nlb; + __le16 control; + __u32 rsvd13[3]; + __u8 cdb[32]; + __le64 sense_addr; + __u32 rsvd26[6]; +}; + +struct sssraid_scsi_nonio { + __u8 opcode; + __u8 flags; + __le16 command_id; + __le32 hdid; + __le16 sense_len; + __u8 cdb_length; + __u8 rsvd2; + __u32 rsvd3[3]; + union sssraid_data_ptr dptr; + __u32 rsvd10[5]; + __le32 buffer_len; + __u8 cdb[32]; + __le64 sense_addr; + __u32 rsvd26[6]; +}; + +/* + * SSSRAID IO class command struct + */ +struct sssraid_ioq_command { + union { + struct sssraid_ioq_common_command common; + struct sssraid_rw_command rw; + struct sssraid_scsi_nonio scsi_nonio; + }; +}; + +/* + * SSSRAID passthru command struct + */ +struct sssraid_passthru_common_cmd { + __u8 opcode; + __u8 flags; + __u16 rsvd0; + __u32 nsid; + union { + struct { + __u16 subopcode; + __u16 rsvd1; + } info_0; + __u32 cdw2; + }; + union { + struct { + __u16 data_len; + __u16 param_len; + } info_1; + __u32 cdw3; + }; + __u64 metadata; + + __u64 addr; + __u64 prp2; + + __u32 cdw10; + __u32 cdw11; + __u32 cdw12; + __u32 cdw13; + __u32 cdw14; + __u32 cdw15; + __u32 timeout_ms; + __u32 result0; + __u32 result1; +}; + +struct sssraid_ioq_passthru_cmd { + __u8 opcode; + __u8 flags; + __u16 rsvd0; + __u32 nsid; + union { + struct { + __u16 res_sense_len; + __u8 cdb_len; + __u8 rsvd0; + } info_0; + __u32 cdw2; + }; + union { + struct { + __u16 subopcode; + __u16 rsvd1; + } info_1; + __u32 cdw3; + }; + union { + struct { + __u16 rsvd; + __u16 param_len; + } info_2; + __u32 cdw4; + }; + __u32 cdw5; + __u64 addr; + __u64 prp2; + union { + struct { + __u16 eid; + __u16 sid; + } info_3; + __u32 cdw10; + }; + union { + struct { + __u16 did; + __u8 did_flag; + __u8 rsvd2; + } info_4; + __u32 cdw11; + }; + __u32 cdw12; + __u32 cdw13; + __u32 cdw14; + __u32 data_len; + __u32 cdw16; + __u32 cdw17; + __u32 cdw18; + __u32 cdw19; + __u32 cdw20; + __u32 cdw21; + __u32 cdw22; + __u32 cdw23; + __u64 sense_addr; + __u32 cdw26[4]; + __u32 timeout_ms; + __u32 result0; + __u32 result1; +}; + +struct sssraid_bsg_request { + u32 msgcode; + u32 control; + union { + struct sssraid_passthru_common_cmd admcmd; + struct sssraid_ioq_passthru_cmd ioqcmd; + }; +}; + +enum { + SSSRAID_BSG_ADM, + SSSRAID_BSG_IOQ, +}; + +/* + * define the transfer command struct + */ +struct sssraid_cmd { + u16 qid; + u16 cid; + u32 result0; + u32 result1; + u16 status; + void *priv; + enum sssraid_cmd_state state; + struct completion cmd_done; + struct list_head list; +}; + +/* + * define the SSSRAID physical queue struct + */ +struct sssraid_squeue { + struct sssraid_ioc *sdioc; + spinlock_t sq_lock; /* spinlock for lock handling */ + + void *sq_cmds; + + dma_addr_t sq_dma_addr; + u32 __iomem *q_db; + u8 cq_phase; + u8 sqes; + u16 qidx; + u16 sq_tail; + u16 last_cq_head; + u16 q_depth; + void *sense; + dma_addr_t sense_dma_addr; + struct dma_pool *prp_small_pool; + atomic_t inflight; +}; + +struct sssraid_cqueue { + struct sssraid_ioc *sdioc; + + spinlock_t cq_lock + ____cacheline_aligned_in_smp; /* spinlock for lock handling */ + + struct sssraid_completion *cqes; + + dma_addr_t cq_dma_addr; + u8 cq_phase; + u16 cq_head; + u16 last_cq_head; +}; + +/* + * define the SSSRAID IO queue descriptor struct + */ +struct sssraid_iod { + struct sssraid_squeue *sqinfo; + enum sssraid_cmd_state state; + int npages; + u32 nsge; + u16 cid; + u32 length; + bool use_sgl; + dma_addr_t first_dma; + void *sense; + dma_addr_t sense_dma; + struct scatterlist *sg; + void *list[0]; +}; + +/* + * define the SSSRAID scsi device attribution and information + */ +#define SSSRAID_DISK_INFO_ATTR_BOOT(attr) ((attr)&0x01) +#define SSSRAID_DISK_INFO_ATTR_VD(attr) (((attr)&0x02) == 0x0) +#define SSSRAID_DISK_INFO_ATTR_PT(attr) (((attr)&0x22) == 0x02) +#define SSSRAID_DISK_INFO_ATTR_RAW(attr) ((attr)&0x20) +#define SSSRAID_DISK_TYPE(attr) ((attr)&0x1e) + +#define SSSRAID_DISK_INFO_FLAG_VALID(flag) ((flag)&0x01) +#define SSSRAID_DISK_INFO_FLAG_CHANGE(flag) ((flag)&0x02) + +/* + * define the SSSRAID scsi device identifier + */ +enum { + SSSRAID_SAS_HDD_VD = 0x04, + SSSRAID_SATA_HDD_VD = 0x08, + SSSRAID_SAS_SSD_VD = 0x0c, + SSSRAID_SATA_SSD_VD = 0x10, + SSSRAID_NVME_SSD_VD = 0x14, + SSSRAID_SAS_HDD_PD = 0x06, + SSSRAID_SATA_HDD_PD = 0x0a, + SSSRAID_SAS_SSD_PD = 0x0e, + SSSRAID_SATA_SSD_PD = 0x12, + SSSRAID_NVME_SSD_PD = 0x16, +}; + +enum { + DISPATCH_BY_CPU, + DISPATCH_BY_DISK, +}; + +/* + * define the SSSRAID scsi device queue depth + */ +#define SSSRAID_HDD_PD_QD 64 +#define SSSRAID_HDD_VD_QD 256 +#define SSSRAID_SSD_PD_QD 64 +#define SSSRAID_SSD_VD_QD 256 + +#define BGTASK_TYPE_REBUILD 4 +#define USR_CMD_READ 0xc2 +#define USR_CMD_RDLEN 0x1000 +#define USR_CMD_VDINFO 0x704 +#define USR_CMD_BGTASK 0x504 +#define VDINFO_PARAM_LEN 0x04 + +/* + * SSSRAID virtual device information struct + */ +struct sssraid_vd_info { + __u8 name[32]; + __le16 id; + __u8 rg_id; + __u8 rg_level; + __u8 sg_num; + __u8 sg_disk_num; + __u8 vd_status; + __u8 vd_type; + __u8 rsvd1[4056]; +}; + +#define MAX_REALTIME_BGTASK_NUM 32 + +struct bgtask_info { + __u8 type; + __u8 progress; + __u8 rate; + __u8 rsvd0; + __le16 vd_id; + __le16 time_left; + __u8 rsvd1[4]; +}; + +struct sssraid_bgtask { + __u8 sw; + __u8 task_num; + __u8 rsvd[6]; + struct bgtask_info bgtask[MAX_REALTIME_BGTASK_NUM]; +}; + +/* + * SSSRAID scsi device information struct + */ +struct sssraid_dev_info { + __le32 hdid; + __le16 target; + __u8 channel; + __u8 lun; + __u8 attr; + __u8 flag; + __le16 max_io_kb; +}; + +#define IOQ_PT_DATA_LEN 4096 +#define MAX_DEV_ENTRY_PER_PAGE_4K 340 +struct sssraid_dev_list { + __le32 dev_num; + __u32 rsvd0[3]; + struct sssraid_dev_info devices[MAX_DEV_ENTRY_PER_PAGE_4K]; +}; + +/* + * SSSRAID scsi device host data struct + */ +struct sssraid_sdev_hostdata { + u32 hdid; + u16 max_io_kb; + u8 attr; + u8 flag; + u8 rg_id; + u8 hwq; + u16 pend_count; +}; + +extern unsigned char small_pool_num; +extern u32 io_queue_depth; +extern u32 max_hwq_num; +extern bool work_mode; +irqreturn_t sssraid_isr_poll(int irq, void *privdata); +bool sssraid_poll_cq(struct sssraid_ioc *sdioc, u16 qidx, int cid); +void sssraid_submit_cmd(struct sssraid_squeue *sqinfo, const void *cmd); +int sssraid_get_dev_list(struct sssraid_ioc *sdioc, + struct sssraid_dev_info *devices); +int sssraid_submit_admin_sync_cmd(struct sssraid_ioc *sdioc, + struct sssraid_admin_command *cmd, + u32 *result0, u32 *result1, u32 timeout); +int sssraid_send_abort_cmd(struct sssraid_ioc *sdioc, u32 hdid, u16 qidx, + u16 cid); +int sssraid_send_reset_cmd(struct sssraid_ioc *sdioc, u8 type, u32 hdid); +void sssraid_adm_timeout(struct sssraid_ioc *sdioc, struct sssraid_cmd *cmd); +int sssraid_init_ioc(struct sssraid_ioc *sdioc, u8 re_init); +void sssraid_cleanup_ioc(struct sssraid_ioc *sdioc, u8 re_init); +int sssraid_soft_reset_handler(struct sssraid_ioc *sdioc); +void sssraid_free_iod_res(struct sssraid_ioc *sdioc, struct sssraid_iod *iod); +bool sssraid_change_host_state(struct sssraid_ioc *sdioc, + enum sssraid_state newstate); +int sssraid_configure_timestamp(struct sssraid_ioc *sdioc); +int sssraid_init_ctrl_info(struct sssraid_ioc *sdioc); +struct sssraid_cmd *sssraid_get_cmd(struct sssraid_ioc *sdioc, + enum sssraid_cmd_type type); +void sssraid_put_cmd(struct sssraid_ioc *sdioc, struct sssraid_cmd *cmd, + enum sssraid_cmd_type type); +int sssraid_send_event_ack(struct sssraid_ioc *sdioc, u8 event, u32 event_ctx, + u16 cid); +struct sssraid_fwevt *sssraid_alloc_fwevt(int len); +void sssraid_fwevt_add_to_list(struct sssraid_ioc *sdioc, + struct sssraid_fwevt *fwevt); +void sssraid_cleanup_fwevt_list(struct sssraid_ioc *sdioc); +void sssraid_ioc_enable_intr(struct sssraid_ioc *sdioc); +void sssraid_ioc_disable_intr(struct sssraid_ioc *sdioc); +void sssraid_cleanup_resources(struct sssraid_ioc *sdioc); +void sssraid_complete_cqes(struct sssraid_ioc *sdioc, u16 qidx, u16 start, + u16 end); +int sssraid_io_map_data(struct sssraid_ioc *sdioc, struct sssraid_iod *iod, + struct scsi_cmnd *scmd, + struct sssraid_ioq_command *ioq_cmd); +void sssraid_map_status(struct sssraid_iod *iod, struct scsi_cmnd *scmd, + struct sssraid_completion *cqe); +void sssraid_scan_disk(struct sssraid_ioc *sdioc); +void sssraid_complete_aen(struct sssraid_ioc *sdioc, + struct sssraid_completion *cqe); +void sssraid_back_all_io(struct sssraid_ioc *sdioc); + +static inline void **sssraid_iod_list(struct sssraid_iod *iod) +{ + return iod->list; +} + +#endif diff --git a/drivers/scsi/sssraid/sssraid_debug.h b/drivers/scsi/sssraid/sssraid_debug.h new file mode 100644 index 0000000000000000000000000000000000000000..0bb92967840521a92d145d567fdd74853e5c87e6 --- /dev/null +++ b/drivers/scsi/sssraid/sssraid_debug.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 3SNIC Information Technology, Ltd */ + +/* 3SNIC RAID SSSXXX Series Linux Driver */ + +#ifndef SSSRAID_DEBUG_H_INCLUDED +#define SSSRAID_DEBUG_H_INCLUDED + +/* + * debug levels + */ +#define SSSRAID_DEBUG 0x00000001 + +/* + * debug macros + */ + +#define ioc_err(ioc, fmt, ...) pr_err("%s: " fmt, (ioc)->name, ##__VA_ARGS__) +#define ioc_notice(ioc, fmt, ...) \ + pr_notice("%s: " fmt, (ioc)->name, ##__VA_ARGS__) +#define ioc_warn(ioc, fmt, ...) pr_warn("%s: " fmt, (ioc)->name, ##__VA_ARGS__) +#define ioc_info(ioc, fmt, ...) pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__) + +#define dbgprint(IOC, FMT, ...) \ + do { \ + if (unlikely(IOC->logging_level & SSSRAID_DEBUG)) \ + pr_info("%s: " FMT, (IOC)->name, ##__VA_ARGS__); \ + } while (0) + +#endif /* SSSRAID_DEBUG_H_INCLUDED */ diff --git a/drivers/scsi/sssraid/sssraid_fw.c b/drivers/scsi/sssraid/sssraid_fw.c new file mode 100644 index 0000000000000000000000000000000000000000..da785a353118a731155e816c999207c1cafaeec0 --- /dev/null +++ b/drivers/scsi/sssraid/sssraid_fw.c @@ -0,0 +1,1881 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 3SNIC Information Technology, Ltd */ + +/* 3SNIC RAID SSSXXX Series Linux Driver */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "sssraid.h" +#include "sssraid_debug.h" + +static int sssraid_wait_ready(struct sssraid_ioc *sdioc, u64 cap, bool enabled) +{ + unsigned long timeout = + ((SSSRAID_CAP_TIMEOUT(cap) + 1) * SSSRAID_CAP_TIMEOUT_UNIT_MS) + + jiffies; + u32 bit = enabled ? SSSRAID_CSTS_RDY : 0; + + while ((readl(sdioc->bar + SSSRAID_REG_CSTS) & SSSRAID_CSTS_RDY) != + bit) { + usleep_range(1000, 2000); + if (fatal_signal_pending(current)) + return -EINTR; + + if (time_after(jiffies, timeout)) { + ioc_err(sdioc, "controller not ready, aborting %s\n", + enabled ? "initialization" : "reset"); + return -ENODEV; + } + } + return 0; +} + +static int sssraid_enable_ctrl(struct sssraid_ioc *sdioc) +{ + u64 cap = sdioc->cap; + u32 dev_page_min = SSSRAID_CAP_MPSMIN(cap) + 12; + u32 page_shift = PAGE_SHIFT; + + if (page_shift < dev_page_min) { + ioc_err(sdioc, + "err: minimum ioc page size[%u], too large for host[%u]\n", + 1U << dev_page_min, 1U << page_shift); + return -ENODEV; + } + + page_shift = + min_t(unsigned int, SSSRAID_CAP_MPSMAX(cap) + 12, PAGE_SHIFT); + sdioc->page_size = 1U << page_shift; + + sdioc->ctrl_config = SSSRAID_CC_CSS_NVM; + sdioc->ctrl_config |= (page_shift - 12) << SSSRAID_CC_MPS_SHIFT; + sdioc->ctrl_config |= SSSRAID_CC_AMS_RR | SSSRAID_CC_SHN_NONE; + sdioc->ctrl_config |= SSSRAID_CC_IOSQES | SSSRAID_CC_IOCQES; + sdioc->ctrl_config |= SSSRAID_CC_ENABLE; + writel(sdioc->ctrl_config, sdioc->bar + SSSRAID_REG_CC); + + return sssraid_wait_ready(sdioc, cap, true); +} + +static int sssraid_disable_ctrl(struct sssraid_ioc *sdioc) +{ + sdioc->ctrl_config &= ~SSSRAID_CC_SHN_MASK; + sdioc->ctrl_config &= ~SSSRAID_CC_ENABLE; + writel(sdioc->ctrl_config, sdioc->bar + SSSRAID_REG_CC); + + return sssraid_wait_ready(sdioc, sdioc->cap, false); +} + +static int sssraid_shutdown_ctrl(struct sssraid_ioc *sdioc) +{ + unsigned long timeout = + le32_to_cpu(sdioc->ctrl_info->rtd3e) / 1000000 * HZ + jiffies; + + sdioc->ctrl_config &= ~SSSRAID_CC_SHN_MASK; + sdioc->ctrl_config |= SSSRAID_CC_SHN_NORMAL; + writel(sdioc->ctrl_config, sdioc->bar + SSSRAID_REG_CC); + + while ((readl(sdioc->bar + SSSRAID_REG_CSTS) & + SSSRAID_CSTS_SHST_MASK) != SSSRAID_CSTS_SHST_CMPLT) { + msleep(100); + if (fatal_signal_pending(current)) + return -EINTR; + if (time_after(jiffies, timeout)) { + ioc_err(sdioc, + "ioc shutdown incomplete, abort shutdown\n"); + return -ENODEV; + } + } + return 0; +} + +static int sssraid_remap_bar(struct sssraid_ioc *sdioc, u32 size) +{ + struct pci_dev *pdev = sdioc->pdev; + + if (size > pci_resource_len(pdev, 0)) { + ioc_err(sdioc, "err: input size[%u] exceed bar0 length[%llu]\n", + size, pci_resource_len(pdev, 0)); + return -ENODEV; + } + + if (sdioc->bar) + iounmap(sdioc->bar); + + sdioc->bar = ioremap(pci_resource_start(pdev, 0), size); + if (!sdioc->bar) { + ioc_err(sdioc, "err: ioremap for bar0 failed\n"); + return -ENODEV; + } + sdioc->dbs = sdioc->bar + SSSRAID_REG_DBS; + + return 0; +} + +static int sssraid_create_dma_pools(struct sssraid_ioc *sdioc) +{ + int i; + char poolname[20] = { 0 }; + + sdioc->prp_page_pool = dma_pool_create( + "prp list page", &sdioc->pdev->dev, PAGE_SIZE, PAGE_SIZE, 0); + + if (!sdioc->prp_page_pool) { + ioc_err(sdioc, "err: create prp page pool failed\n"); + return -ENOMEM; + } + + for (i = 0; i < small_pool_num; i++) { + sprintf(poolname, "prp_list_256_%d", i); + sdioc->prp_small_pool[i] = + dma_pool_create(poolname, &sdioc->pdev->dev, + SMALL_POOL_SIZE, SMALL_POOL_SIZE, 0); + + if (!sdioc->prp_small_pool[i]) { + ioc_err(sdioc, "err: create prp small pool %d failed\n", + i); + goto destroy_prp_small_pool; + } + } + + return 0; + +destroy_prp_small_pool: + while (i > 0) + dma_pool_destroy(sdioc->prp_small_pool[--i]); + dma_pool_destroy(sdioc->prp_page_pool); + + return -ENOMEM; +} + +static void sssraid_destroy_dma_pools(struct sssraid_ioc *sdioc) +{ + int i; + + for (i = 0; i < small_pool_num; i++) + dma_pool_destroy(sdioc->prp_small_pool[i]); + dma_pool_destroy(sdioc->prp_page_pool); +} + +static int sssraid_alloc_resources(struct sssraid_ioc *sdioc) +{ + int retval, nqueue; + + sdioc->ctrl_info = kzalloc_node(sizeof(*sdioc->ctrl_info), GFP_KERNEL, + sdioc->numa_node); + if (!sdioc->ctrl_info) + return -ENOMEM; + + retval = sssraid_create_dma_pools(sdioc); + if (retval) { + ioc_err(sdioc, "err: failure at create dma pool!\n"); + goto free_ctrl_info; + } + + /* not num_online_cpus */ + nqueue = min(num_possible_cpus(), max_hwq_num) + 1; + sdioc->cqinfo = kcalloc_node(nqueue, sizeof(struct sssraid_cqueue), + GFP_KERNEL, sdioc->numa_node); + if (!sdioc->cqinfo) { + retval = -ENOMEM; + ioc_err(sdioc, "err: failure at alloc memory for cqueue!"); + goto destroy_dma_pools; + } + + sdioc->sqinfo = kcalloc_node(nqueue, sizeof(struct sssraid_squeue), + GFP_KERNEL, sdioc->numa_node); + if (!sdioc->sqinfo) { + retval = -ENOMEM; + ioc_err(sdioc, "err: failure at alloc memory for squeue!"); + goto free_cqueues; + } + + /* sssraid_alloc_admin_cmds moved to sssraid_init_ioc */ + + ioc_info(sdioc, "Request Queues Count: %d\n", nqueue); + + return 0; + +free_cqueues: + kfree(sdioc->cqinfo); +destroy_dma_pools: + sssraid_destroy_dma_pools(sdioc); +free_ctrl_info: + kfree(sdioc->ctrl_info); + + return retval; +} + +void sssraid_ioc_enable_intr(struct sssraid_ioc *sdioc) +{ + sdioc->intr_enabled = 1; +} + +void sssraid_ioc_disable_intr(struct sssraid_ioc *sdioc) +{ + u16 i, max_vectors; + + sdioc->intr_enabled = 0; + max_vectors = sdioc->intr_info_count; + + for (i = 0; i < max_vectors; i++) + synchronize_irq(pci_irq_vector(sdioc->pdev, i)); +} + +static int sssraid_setup_resources(struct sssraid_ioc *sdioc) +{ + struct pci_dev *pdev = sdioc->pdev; + int retval = 0; + u64 maskbit = SSSRAID_DMA_MSK_BIT_MAX; + + if (pci_enable_device_mem(pdev)) { + ioc_err(sdioc, "err: pci_enable_device_mem failed\n"); + retval = -ENODEV; + goto out_failed; + } + + retval = pci_request_mem_regions(pdev, SSSRAID_DRIVER_NAME); + if (retval) { + ioc_err(sdioc, "err: fail to request memory regions\n"); + retval = -ENODEV; + goto out_failed; + } + + /* get cap value at first, so keep + * sssraid_remap_bar(hdev, SSSRAID_REG_DBS + 4096) + * ioremap(pci_resource_start(..)) still in sssraid_remap_bar + */ + retval = sssraid_remap_bar(sdioc, SSSRAID_REG_DBS + 4096); + if (retval) { + ioc_err(sdioc, "Failed to re-map bar, error %d\n", retval); + goto out_failed; + } + + pci_set_master(pdev); + + if (readl(sdioc->bar + SSSRAID_REG_CSTS) == U32_MAX) { + retval = -ENODEV; + ioc_err(sdioc, "read BAR offset:csts register failed\n"); + goto out_failed; + } + + sdioc->cap = lo_hi_readq(sdioc->bar + SSSRAID_REG_CAP); + sdioc->ioq_depth = + min_t(u32, SSSRAID_CAP_MQES(sdioc->cap) + 1, io_queue_depth); + sdioc->db_stride = 1 << SSSRAID_CAP_STRIDE(sdioc->cap); + + maskbit = SSSRAID_CAP_DMAMASK(sdioc->cap); + if (maskbit < 32 || maskbit > SSSRAID_DMA_MSK_BIT_MAX) { + ioc_notice(sdioc, + "err: DMA MASK BIT invalid[%llu], set to default\n", + maskbit); + maskbit = SSSRAID_DMA_MSK_BIT_MAX; + } + + if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(maskbit))) { + if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { + ioc_err(sdioc, + "err: Set DMA MASK: 32 BIT and coherent failed\n"); + retval = -ENODEV; + goto out_failed; + } + ioc_info(sdioc, "Set DMA MASK: 32 BIT success\n"); + } else { + ioc_info(sdioc, "Set DMA MASK: %llu BIT success\n", maskbit); + } + + pci_set_drvdata(pdev, sdioc->shost); + + pci_enable_pcie_error_reporting(pdev); + pci_save_state(pdev); + + sssraid_ioc_disable_intr(sdioc); + + return retval; + +out_failed: + sssraid_cleanup_resources(sdioc); + return retval; +} + +static int sssraid_alloc_admin_cmds(struct sssraid_ioc *sdioc) +{ + u16 i; + + INIT_LIST_HEAD(&sdioc->adm_cmd_list); + spin_lock_init(&sdioc->adm_cmd_lock); + + sdioc->adm_cmds = kcalloc_node(SSSRAID_AMDQ_BLK_MQ_DEPTH, + sizeof(struct sssraid_cmd), GFP_KERNEL, + sdioc->numa_node); + + if (!sdioc->adm_cmds) { + ioc_err(sdioc, "Alloc admin cmds failed\n"); + return -ENOMEM; + } + + for (i = 0; i < SSSRAID_AMDQ_BLK_MQ_DEPTH; i++) { + sdioc->adm_cmds[i].qid = 0; + sdioc->adm_cmds[i].cid = i; + list_add_tail(&(sdioc->adm_cmds[i].list), &sdioc->adm_cmd_list); + } + + ioc_info(sdioc, "Alloc admin cmds success, count: %d\n", + SSSRAID_AMDQ_BLK_MQ_DEPTH); + + return 0; +} + +static int sssraid_alloc_qpair(struct sssraid_ioc *sdioc, u16 qidx, u16 depth) +{ + struct sssraid_cqueue *cqinfo = &sdioc->cqinfo[qidx]; + struct sssraid_squeue *sqinfo = &sdioc->sqinfo[qidx]; + int retval = 0; + + if (sdioc->init_done_queue_cnt > qidx) { + ioc_warn(sdioc, "warn: queue: %d exists!\n", qidx); + return 0; + } + + cqinfo->cqes = dma_alloc_coherent(&sdioc->pdev->dev, CQ_SIZE(depth), + &cqinfo->cq_dma_addr, + GFP_KERNEL | __GFP_ZERO); + if (!cqinfo->cqes) + return -ENOMEM; + + sqinfo->sq_cmds = + dma_alloc_coherent(&sdioc->pdev->dev, SQ_SIZE(qidx, depth), + &sqinfo->sq_dma_addr, GFP_KERNEL); + if (!sqinfo->sq_cmds) { + retval = -ENOMEM; + ioc_err(sdioc, "failure at alloc dma space for squeue cmds.\n"); + goto free_cqes; + } + + /* + * if single hw queue, we do not need to alloc sense buffer + * for every queue, we have alloced all on hiraid_alloc_resources. + */ + if (work_mode) + goto initq; + + /* alloc sense buffer */ + sqinfo->sense = dma_alloc_coherent(&sdioc->pdev->dev, SENSE_SIZE(depth), + &sqinfo->sense_dma_addr, + GFP_KERNEL | __GFP_ZERO); + if (!sqinfo->sense) { + retval = -ENOMEM; + ioc_err(sdioc, "failure at alloc dma space for sense data.\n"); + goto free_sq_cmds; + } + +initq: + spin_lock_init(&sqinfo->sq_lock); + spin_lock_init(&cqinfo->cq_lock); + cqinfo->sdioc = sdioc; + sqinfo->sdioc = sdioc; + sqinfo->q_depth = depth; + sqinfo->qidx = qidx; + /* cq_vector replaced by msix_index */ + + /* + * online_queues: completely initialized queue count: sssraid_init_queue + * queue_count: allocated but not completely initialized queue count: + * sssraid_alloc_queue. online_queues/queue_count replaced by + * init_done_queue_cnt. + */ + sdioc->init_done_queue_cnt++; + + return 0; + +free_sq_cmds: + dma_free_coherent(&sdioc->pdev->dev, SQ_SIZE(qidx, depth), + (void *)sqinfo->sq_cmds, sqinfo->sq_dma_addr); +free_cqes: + dma_free_coherent(&sdioc->pdev->dev, CQ_SIZE(depth), + (void *)cqinfo->cqes, cqinfo->cq_dma_addr); + return retval; +} + +static void sssraid_init_queue(struct sssraid_ioc *sdioc, u16 qidx) +{ + struct sssraid_cqueue *cqinfo = &sdioc->cqinfo[qidx]; + struct sssraid_squeue *sqinfo = &sdioc->sqinfo[qidx]; + + memset((void *)cqinfo->cqes, 0, CQ_SIZE(sqinfo->q_depth)); + + sqinfo->sq_tail = 0; + atomic_set(&sqinfo->inflight, 0); + + cqinfo->cq_head = 0; + cqinfo->cq_phase = 1; + sqinfo->q_db = &sdioc->dbs[qidx * 2 * sdioc->db_stride]; + sqinfo->prp_small_pool = sdioc->prp_small_pool[qidx % small_pool_num]; +} + +static int sssraid_setup_admin_qpair(struct sssraid_ioc *sdioc) +{ + struct sssraid_cqueue *cqinfo = &sdioc->cqinfo[0]; + struct sssraid_squeue *sqinfo = &sdioc->sqinfo[0]; + u32 aqa; + int retval; + + ioc_info(sdioc, "Starting disable ctrl...\n"); + + retval = sssraid_disable_ctrl(sdioc); + if (retval) { + ioc_err(sdioc, "disable ctrl failed\n"); + return retval; + } + + /* this func don't alloc admin queue */ + + aqa = sqinfo->q_depth - 1; + aqa |= aqa << 16; + writel(aqa, sdioc->bar + SSSRAID_REG_AQA); + lo_hi_writeq(sqinfo->sq_dma_addr, sdioc->bar + SSSRAID_REG_ASQ); + lo_hi_writeq(cqinfo->cq_dma_addr, sdioc->bar + SSSRAID_REG_ACQ); + + ioc_info(sdioc, "Starting enable ctrl...\n"); + + retval = sssraid_enable_ctrl(sdioc); + if (retval) { + ioc_err(sdioc, "enable ctrl failed\n"); + retval = -ENODEV; + return retval; + } + + /* interrupt registry not here */ + /* cq_vector replaced by msix_index */ + + sssraid_init_queue(sdioc, 0); + + ioc_info(sdioc, "success, init done queuecount:[%d], pagesize[%d]\n", + sdioc->init_done_queue_cnt, sdioc->page_size); + + return 0; +} + +static void sssraid_cleanup_isr(struct sssraid_ioc *sdioc) +{ + u16 i; + + sssraid_ioc_disable_intr(sdioc); + + if (!sdioc->intr_info) + return; + + for (i = 0; i < sdioc->intr_info_count; i++) + free_irq(pci_irq_vector(sdioc->pdev, i), + (sdioc->intr_info + i)); + + kfree(sdioc->intr_info); + sdioc->intr_info = NULL; + sdioc->intr_info_count = 0; + pci_free_irq_vectors(sdioc->pdev); +} + +static void sssraid_complete_adminq_cmnd(struct sssraid_ioc *sdioc, u16 qidx, + struct sssraid_completion *cqe) +{ + struct sssraid_cmd *adm_cmd; + + adm_cmd = sdioc->adm_cmds + le16_to_cpu(cqe->cmd_id); + if (unlikely(adm_cmd->state == SSSRAID_CMDSTAT_IDLE)) { + ioc_warn(sdioc, + "warn: invalid cmd id %d completed on queue %d\n", + le16_to_cpu(cqe->cmd_id), le16_to_cpu(cqe->sq_id)); + return; + } + + adm_cmd->status = le16_to_cpu(cqe->status) >> 1; + adm_cmd->result0 = le32_to_cpu(cqe->result); + adm_cmd->result1 = le32_to_cpu(cqe->result1); + + complete(&adm_cmd->cmd_done); +} + +static inline bool sssraid_cqe_pending(struct sssraid_cqueue *cqinfo) +{ + return (le16_to_cpu(cqinfo->cqes[cqinfo->cq_head].status) & 1) == + cqinfo->cq_phase; +} + +static inline void sssraid_update_cq_head(struct sssraid_ioc *sdioc, u16 qidx) +{ + struct sssraid_cqueue *cqinfo = &sdioc->cqinfo[qidx]; + struct sssraid_squeue *sqinfo = &sdioc->sqinfo[qidx]; + + if (++cqinfo->cq_head == sqinfo->q_depth) { + cqinfo->cq_head = 0; + cqinfo->cq_phase = !cqinfo->cq_phase; + } +} + +static inline bool sssraid_process_cq(struct sssraid_ioc *sdioc, u16 qidx, + u16 *start, u16 *end, int tag) +{ + bool found = false; + struct sssraid_cqueue *cqinfo = &sdioc->cqinfo[qidx]; + struct sssraid_squeue *sqinfo = &sdioc->sqinfo[qidx]; + + *start = cqinfo->cq_head; + while (!found && sssraid_cqe_pending(cqinfo)) { + if (le16_to_cpu(cqinfo->cqes[cqinfo->cq_head].cmd_id) == tag) + found = true; + sssraid_update_cq_head(sdioc, qidx); + } + *end = cqinfo->cq_head; + + if (*start != *end) + writel(cqinfo->cq_head, + sqinfo->q_db + sqinfo->sdioc->db_stride); + + return found; +} + +static irqreturn_t sssraid_isr(int irq, void *privdata) +{ + struct sssraid_intr_info *intr_info = privdata; + struct sssraid_ioc *sdioc = intr_info->sdioc; + irqreturn_t ret = IRQ_NONE; + struct sssraid_cqueue *cqinfo; + u16 midx, start, end; + + if (!intr_info) + return IRQ_NONE; + + if (!sdioc->intr_enabled) + return IRQ_NONE; + + midx = intr_info->msix_index; + cqinfo = &sdioc->cqinfo[midx]; + + spin_lock(&cqinfo->cq_lock); + if (cqinfo->cq_head != cqinfo->last_cq_head) + ret = IRQ_HANDLED; + + sssraid_process_cq(sdioc, midx, &start, &end, -1); + cqinfo->last_cq_head = cqinfo->cq_head; + spin_unlock(&cqinfo->cq_lock); + + if (start != end) { + sssraid_complete_cqes(sdioc, midx, start, end); + ret = IRQ_HANDLED; + } + return ret; +} + +irqreturn_t sssraid_isr_poll(int irq, void *privdata) +{ + return IRQ_NONE; +} + +bool sssraid_poll_cq(struct sssraid_ioc *sdioc, u16 qidx, int cid) +{ + u16 start, end; + bool found; + struct sssraid_cqueue *cqinfo = &sdioc->cqinfo[qidx]; + + if (!sssraid_cqe_pending(cqinfo)) + return 0; + + spin_lock_irq(&cqinfo->cq_lock); + found = sssraid_process_cq(sdioc, qidx, &start, &end, cid); + spin_unlock_irq(&cqinfo->cq_lock); + + sssraid_complete_cqes(sdioc, qidx, start, end); + return found; +} + +static inline int sssraid_request_irq(struct sssraid_ioc *sdioc, u16 index) +{ + struct pci_dev *pdev = sdioc->pdev; + struct sssraid_intr_info *intr_info = sdioc->intr_info + index; + int retval = 0; + + intr_info->sdioc = sdioc; + intr_info->msix_index = index; + intr_info->cqinfo = NULL; + + snprintf(intr_info->name, SSSRAID_NAME_LENGTH, "%s%d-msix%d", + SSSRAID_DRIVER_NAME, sdioc->instance, index); + + retval = request_threaded_irq(pci_irq_vector(pdev, index), sssraid_isr, + sssraid_isr_poll, IRQF_SHARED, + intr_info->name, intr_info); + + if (retval) { + ioc_err(sdioc, + "Err: %s: unable to allocate interrupt on vector %d!\n", + intr_info->name, pci_irq_vector(pdev, index)); + return retval; + } + + return retval; +} + +static int sssraid_setup_isr(struct sssraid_ioc *sdioc, u8 setup_one) +{ + unsigned int irq_flags = PCI_IRQ_MSIX; + u16 max_vectors = 0, i; + int retval = 0; + + struct irq_affinity desc = { .pre_vectors = 1 }; + + sssraid_cleanup_isr(sdioc); + + if (setup_one) + max_vectors = 1; + else { + max_vectors = sdioc->before_affinity_msix_cnt; + + ioc_info(sdioc, + "Before affinity, MSI-x vectors requested: %d\n", + max_vectors); + } + + irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES; + + i = pci_alloc_irq_vectors_affinity(sdioc->pdev, 1, max_vectors, + irq_flags, &desc); + + if (i <= 0) { + ioc_err(sdioc, "Err: alloc irq vectors fail.\n"); + goto out_failed; + } + if (i != max_vectors) { + ioc_warn( + sdioc, + "Allocated vectors (%d) are less than requested (%d)\n", + i, max_vectors); + + max_vectors = i; + } + + sdioc->intr_info = kzalloc( + sizeof(struct sssraid_intr_info) * max_vectors, GFP_KERNEL); + if (!sdioc->intr_info) { + retval = -ENOMEM; + ioc_err(sdioc, "err: failed to alloc memory for intr_info!\n"); + pci_free_irq_vectors(sdioc->pdev); + goto out_failed; + } + + for (i = 0; i < max_vectors; i++) { + retval = sssraid_request_irq(sdioc, i); + if (retval) { + ioc_err(sdioc, + "err: request irq for pci device failed.\n"); + sdioc->intr_info_count = i; + goto out_failed; + } + } + + /* intr_info_count replace max_qid */ + sdioc->intr_info_count = max_vectors; + sssraid_ioc_enable_intr(sdioc); + return retval; +out_failed: + sssraid_cleanup_isr(sdioc); + + return retval; +} + +static bool sssraid_adm_need_reset(struct sssraid_admin_command *cmd) +{ + switch (cmd->common.opcode) { + case SSSRAID_ADM_DELETE_SQ: + case SSSRAID_ADM_CREATE_SQ: + case SSSRAID_ADM_DELETE_CQ: + case SSSRAID_ADM_CREATE_CQ: + case SSSRAID_ADM_SET_FEATURES: + return false; + default: + return true; + } +} + +void sssraid_submit_cmd(struct sssraid_squeue *sqinfo, const void *cmd) +{ + u32 sqes = SQE_SIZE(sqinfo->qidx); + unsigned long flags; + struct sssraid_admin_common_command *acd = + (struct sssraid_admin_common_command *)cmd; + + spin_lock_irqsave(&sqinfo->sq_lock, flags); + memcpy((sqinfo->sq_cmds + sqes * sqinfo->sq_tail), cmd, sqes); + if (++sqinfo->sq_tail == sqinfo->q_depth) + sqinfo->sq_tail = 0; + + writel(sqinfo->sq_tail, sqinfo->q_db); + spin_unlock_irqrestore(&sqinfo->sq_lock, flags); + + dbgprint(sqinfo->sdioc, + "cid[%d] qidx[%d], opcode[0x%x], flags[0x%x], hdid[%u]\n", + le16_to_cpu(acd->command_id), sqinfo->qidx, acd->opcode, + acd->flags, le32_to_cpu(acd->hdid)); +} + +int sssraid_submit_admin_sync_cmd(struct sssraid_ioc *sdioc, + struct sssraid_admin_command *cmd, + u32 *result0, u32 *result1, u32 timeout) +{ + struct sssraid_cmd *adm_cmd = sssraid_get_cmd(sdioc, SSSRAID_CMD_ADM); + + if (!adm_cmd) { + ioc_err(sdioc, "err: get admin cmd failed\n"); + return -EFAULT; + } + + timeout = timeout ? timeout : ADMIN_TIMEOUT; + + /* + * watch dog not as optimized as + * init_completion/complete + */ + init_completion(&adm_cmd->cmd_done); + + cmd->common.command_id = cpu_to_le16(adm_cmd->cid); + sssraid_submit_cmd(&sdioc->sqinfo[0], cmd); + + if (!wait_for_completion_timeout(&adm_cmd->cmd_done, timeout)) { + ioc_err(sdioc, + "err: cid[%d] qidx[%d] timeout, opcode[0x%x] subopcode[0x%x]\n", + adm_cmd->cid, adm_cmd->qid, cmd->usr_cmd.opcode, + cmd->usr_cmd.info_0.subopcode); + + /* reset controller if admin timeout */ + if (sssraid_adm_need_reset(cmd)) + sssraid_adm_timeout(sdioc, adm_cmd); + + sssraid_put_cmd(sdioc, adm_cmd, SSSRAID_CMD_ADM); + return -ETIME; + } + + if (result0) + *result0 = adm_cmd->result0; + if (result1) + *result1 = adm_cmd->result1; + + sssraid_put_cmd(sdioc, adm_cmd, SSSRAID_CMD_ADM); + + return adm_cmd->status; +} + +static int sssraid_get_ctrl_info(struct sssraid_ioc *sdioc, + struct sssraid_ctrl_info *ctrl_info) +{ + struct sssraid_admin_command admin_cmd; + u8 *data_ptr = NULL; + dma_addr_t data_dma = 0; + int retval; + + data_ptr = dma_alloc_coherent(&sdioc->pdev->dev, PAGE_SIZE, &data_dma, + GFP_KERNEL); + if (!data_ptr) + return -ENOMEM; + + memset(&admin_cmd, 0, sizeof(admin_cmd)); + admin_cmd.get_info.opcode = SSSRAID_ADM_GET_INFO; + admin_cmd.get_info.type = SSSRAID_GET_INFO_CTRL; + admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma); + + retval = + sssraid_submit_admin_sync_cmd(sdioc, &admin_cmd, NULL, NULL, 0); + if (!retval) + memcpy(ctrl_info, data_ptr, sizeof(struct sssraid_ctrl_info)); + + dma_free_coherent(&sdioc->pdev->dev, PAGE_SIZE, data_ptr, data_dma); + + return retval; +} + +int sssraid_init_ctrl_info(struct sssraid_ioc *sdioc) +{ + int retval; + + sdioc->ctrl_info->nd = cpu_to_le32(240); + sdioc->ctrl_info->mdts = 8; + sdioc->ctrl_info->max_cmds = cpu_to_le16(4096); + sdioc->ctrl_info->max_num_sge = cpu_to_le16(128); + sdioc->ctrl_info->max_channel = cpu_to_le16(4); + sdioc->ctrl_info->max_tgt_id = cpu_to_le32(3239); + sdioc->ctrl_info->max_lun = cpu_to_le16(2); + + retval = sssraid_get_ctrl_info(sdioc, sdioc->ctrl_info); + if (retval) + ioc_err(sdioc, "err: fetch controller info fail, ret = %d\n", + retval); + + ioc_info(sdioc, "support disk cnt = %d\n", + le32_to_cpu(sdioc->ctrl_info->nd)); + ioc_info(sdioc, "max concurrent cmd = %d\n", + le16_to_cpu(sdioc->ctrl_info->max_cmds)); + ioc_info(sdioc, "max channel = %d\n", + le16_to_cpu(sdioc->ctrl_info->max_channel)); + ioc_info(sdioc, "max target = %d\n", + le32_to_cpu(sdioc->ctrl_info->max_tgt_id)); + ioc_info(sdioc, "max lun = %d\n", + le16_to_cpu(sdioc->ctrl_info->max_lun)); + ioc_info(sdioc, "max sg entry cnt = %d\n", + le16_to_cpu(sdioc->ctrl_info->max_num_sge)); + ioc_info(sdioc, "lun boot num = %d\n", + le16_to_cpu(sdioc->ctrl_info->lun_num_in_boot)); + ioc_info(sdioc, "buf in 4K size = %d\n", sdioc->ctrl_info->mdts); + ioc_info(sdioc, "ACL = %d\n", sdioc->ctrl_info->acl); + ioc_info(sdioc, "async evt req depth= %d\n", sdioc->ctrl_info->aerl); + ioc_info(sdioc, "card type = %d\n", + sdioc->ctrl_info->card_type); + ioc_info(sdioc, "timeout in microsec= %d\n", + le32_to_cpu(sdioc->ctrl_info->rtd3e)); + ioc_info(sdioc, "serial number = %s\n", sdioc->ctrl_info->sn); + ioc_info(sdioc, "FW version = %s\n", sdioc->ctrl_info->fr); + + if (!sdioc->ctrl_info->aerl) + sdioc->ctrl_info->aerl = 1; + if (sdioc->ctrl_info->aerl > SSSRAID_NR_AEN_CMDS) + sdioc->ctrl_info->aerl = SSSRAID_NR_AEN_CMDS; + + sdioc->scsi_qd = work_mode ? + le16_to_cpu(sdioc->ctrl_info->max_cmds) : + sdioc->ioq_depth - SSSRAID_PTCMDS_PERQ; + return 0; +} + +static int sssraid_set_features(struct sssraid_ioc *sdioc, u32 fid, u32 dword11, + void *buffer, size_t buflen, u32 *result) +{ + struct sssraid_admin_command admin_cmd; + int ret; + u8 *data_ptr = NULL; + dma_addr_t data_dma = 0; + + if (buffer && buflen) { + data_ptr = dma_alloc_coherent(&sdioc->pdev->dev, buflen, + &data_dma, GFP_KERNEL); + if (!data_ptr) + return -ENOMEM; + + memcpy(data_ptr, buffer, buflen); + } + + memset(&admin_cmd, 0, sizeof(admin_cmd)); + admin_cmd.features.opcode = SSSRAID_ADM_SET_FEATURES; + admin_cmd.features.fid = cpu_to_le32(fid); + admin_cmd.features.dword11 = cpu_to_le32(dword11); + admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma); + + ret = sssraid_submit_admin_sync_cmd(sdioc, &admin_cmd, result, NULL, 0); + + if (data_ptr) + dma_free_coherent(&sdioc->pdev->dev, buflen, data_ptr, + data_dma); + + return ret; +} + +static int sssraid_set_queue_cnt(struct sssraid_ioc *sdioc, u32 *cnt) +{ + u32 q_cnt = (*cnt - 1) | ((*cnt - 1) << 16); + u32 nr_ioqs, result; + int status; + + status = sssraid_set_features(sdioc, SSSRAID_FEAT_NUM_QUEUES, q_cnt, + NULL, 0, &result); + if (status) { + ioc_err(sdioc, "err: set queue count failed, status: %d\n", + status); + return -EIO; + } + + nr_ioqs = min(result & 0xffff, result >> 16) + 1; + *cnt = min(*cnt, nr_ioqs); + if (*cnt == 0) { + ioc_err(sdioc, "err: illegal queue count: zero\n"); + return -EIO; + } + return 0; +} + +static int sssraid_create_cq(struct sssraid_ioc *sdioc, u16 qidx) +{ + struct sssraid_cqueue *cqinfo = &sdioc->cqinfo[qidx]; + struct sssraid_squeue *sqinfo = &sdioc->sqinfo[qidx]; + struct sssraid_admin_command admin_cmd; + int flags = SSSRAID_QUEUE_PHYS_CONTIG | SSSRAID_CQ_IRQ_ENABLED; + + memset(&admin_cmd, 0, sizeof(admin_cmd)); + admin_cmd.create_cq.opcode = SSSRAID_ADM_CREATE_CQ; + admin_cmd.create_cq.prp1 = cpu_to_le64(cqinfo->cq_dma_addr); + admin_cmd.create_cq.cqid = cpu_to_le16(qidx); + admin_cmd.create_cq.qsize = cpu_to_le16(sqinfo->q_depth - 1); + admin_cmd.create_cq.cq_flags = cpu_to_le16(flags); + admin_cmd.create_cq.irq_vector = cpu_to_le16(qidx); + + return sssraid_submit_admin_sync_cmd(sdioc, &admin_cmd, NULL, NULL, 0); +} + +static int sssraid_create_io_cq(struct sssraid_ioc *sdioc, u16 qidx) +{ + int retval; + struct sssraid_cqueue *cqinfo = sdioc->cqinfo + qidx; + u16 midx = qidx; + + retval = sssraid_create_cq(sdioc, qidx); + if (retval) + return retval; + + if (!sdioc->last_qcnt) + sdioc->last_qcnt = sdioc->init_done_queue_cnt; + /* + * cqinfo initialization at sssraid_init_queue + */ + sdioc->intr_info[midx].cqinfo = cqinfo; + + return retval; +} + +static int sssraid_create_sq(struct sssraid_ioc *sdioc, u16 qidx) +{ + struct sssraid_squeue *sqinfo = &sdioc->sqinfo[qidx]; + struct sssraid_admin_command admin_cmd; + int flags = SSSRAID_QUEUE_PHYS_CONTIG; + + memset(&admin_cmd, 0, sizeof(admin_cmd)); + admin_cmd.create_sq.opcode = SSSRAID_ADM_CREATE_SQ; + admin_cmd.create_sq.prp1 = cpu_to_le64(sqinfo->sq_dma_addr); + admin_cmd.create_sq.sqid = cpu_to_le16(qidx); + admin_cmd.create_sq.qsize = cpu_to_le16(sqinfo->q_depth - 1); + admin_cmd.create_sq.sq_flags = cpu_to_le16(flags); + admin_cmd.create_sq.cqid = cpu_to_le16(qidx); + + return sssraid_submit_admin_sync_cmd(sdioc, &admin_cmd, NULL, NULL, 0); +} + +static int sssraid_create_io_sq(struct sssraid_ioc *sdioc, u16 qidx) +{ + return sssraid_create_sq(sdioc, qidx); +} + +int sssraid_get_dev_list(struct sssraid_ioc *sdioc, + struct sssraid_dev_info *devices) +{ + u32 nd = le32_to_cpu(sdioc->ctrl_info->nd); + struct sssraid_admin_command admin_cmd; + struct sssraid_dev_list *list_buf; + dma_addr_t data_dma = 0; + u32 i, idx, hdid, ndev; + int ret = 0; + + list_buf = dma_alloc_coherent(&sdioc->pdev->dev, PAGE_SIZE, &data_dma, + GFP_KERNEL); + if (!list_buf) + return -ENOMEM; + + for (idx = 0; idx < nd;) { + memset(&admin_cmd, 0, sizeof(admin_cmd)); + admin_cmd.get_info.opcode = SSSRAID_ADM_GET_INFO; + admin_cmd.get_info.type = SSSRAID_GET_INFO_DEV_LIST; + admin_cmd.get_info.cdw11 = cpu_to_le32(idx); + admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma); + + ret = sssraid_submit_admin_sync_cmd(sdioc, &admin_cmd, NULL, + NULL, 0); + + if (ret) { + ioc_err(sdioc, + "Err: Get FW disk list failed, support nd: %u, idx: %u, ret: %d\n", + nd, idx, ret); + goto out; + } + ndev = le32_to_cpu(list_buf->dev_num); + + ioc_info(sdioc, "ndev numbers: %u\n", ndev); + + for (i = 0; i < ndev; i++) { + hdid = le32_to_cpu(list_buf->devices[i].hdid); + ioc_info( + sdioc, + "Get FW disk: %u, hdid: %u, target: %d, channel: %d, lun: %d, attr[0x%x]\n", + i, hdid, + le16_to_cpu(list_buf->devices[i].target), + list_buf->devices[i].channel, + list_buf->devices[i].lun, + list_buf->devices[i].attr); + if (hdid > nd || hdid == 0) { + ioc_err(sdioc, "Err: hdid: %d invalid\n", hdid); + continue; + } + memcpy(&devices[hdid - 1], &list_buf->devices[i], + sizeof(struct sssraid_dev_info)); + } + idx += ndev; + + if (ndev < MAX_DEV_ENTRY_PER_PAGE_4K) + break; + } + +out: + dma_free_coherent(&sdioc->pdev->dev, PAGE_SIZE, list_buf, data_dma); + return ret; +} + +/* send abort command by admin queue temporary */ +int sssraid_send_abort_cmd(struct sssraid_ioc *sdioc, u32 hdid, u16 qidx, + u16 cid) +{ + struct sssraid_admin_command admin_cmd; + + memset(&admin_cmd, 0, sizeof(admin_cmd)); + admin_cmd.abort.opcode = SSSRAID_ADM_ABORT_CMD; + admin_cmd.abort.hdid = cpu_to_le32(hdid); + admin_cmd.abort.sqid = cpu_to_le16(qidx); + admin_cmd.abort.cid = cpu_to_le16(cid); + + return sssraid_submit_admin_sync_cmd(sdioc, &admin_cmd, NULL, NULL, 0); +} + +/* send reset command by admin quueue temporary */ +int sssraid_send_reset_cmd(struct sssraid_ioc *sdioc, u8 type, u32 hdid) +{ + struct sssraid_admin_command admin_cmd; + + memset(&admin_cmd, 0, sizeof(admin_cmd)); + admin_cmd.reset.opcode = SSSRAID_ADM_RESET; + admin_cmd.reset.hdid = cpu_to_le32(hdid); + admin_cmd.reset.type = type; + + return sssraid_submit_admin_sync_cmd(sdioc, &admin_cmd, NULL, NULL, 0); +} + +static int sssraid_delete_queue(struct sssraid_ioc *sdioc, u8 op, u16 qidx) +{ + struct sssraid_admin_command admin_cmd; + int retval; + + memset(&admin_cmd, 0, sizeof(admin_cmd)); + admin_cmd.delete_queue.opcode = op; + admin_cmd.delete_queue.qid = cpu_to_le16(qidx); + + retval = + sssraid_submit_admin_sync_cmd(sdioc, &admin_cmd, NULL, NULL, 0); + + if (retval) + ioc_err(sdioc, "Err: Delete %s:[%d] failed\n", + (op == SSSRAID_ADM_DELETE_CQ) ? "cq" : "sq", qidx); + + return retval; +} + +static int sssraid_delete_cq(struct sssraid_ioc *sdioc, u16 qidx) +{ + return sssraid_delete_queue(sdioc, SSSRAID_ADM_DELETE_CQ, qidx); +} + +void sssraid_adm_timeout(struct sssraid_ioc *sdioc, struct sssraid_cmd *cmd) +{ + /* command may be returned because controller reset */ + if (READ_ONCE(cmd->state) == SSSRAID_CMDSTAT_COMPLETE) + return; + + if (!sssraid_change_host_state(sdioc, SSSRAID_RESETTING)) { + ioc_info(sdioc, "Can't change to reset state\n"); + return; + } + sssraid_soft_reset_handler(sdioc); +} + +static int sssraid_create_io_qpair(struct sssraid_ioc *sdioc, u16 qidx) +{ + int retval; + + retval = sssraid_create_io_cq(sdioc, qidx); + if (retval) + return retval; + + retval = sssraid_create_io_sq(sdioc, qidx); + if (retval) + goto delete_cq; + + /* intr_info.msix_index substitute cq_vector */ + + /* io interrupt registry: + * not here, put above + */ + + sssraid_init_queue(sdioc, qidx); + + return 0; + +delete_cq: + sssraid_delete_cq(sdioc, qidx); + + return retval; +} + +static int sssraid_setup_io_qpair(struct sssraid_ioc *sdioc) +{ + u32 i, num_queues; + int retval = 0; + + num_queues = + min(sdioc->intr_info_count, sdioc->init_done_queue_cnt - 1); + for (i = 1; i <= num_queues; i++) { + retval = sssraid_create_io_qpair(sdioc, i); + if (retval) { + ioc_err(sdioc, "Err: Create queue[%d] failed\n", i); + break; + } + } + + if (work_mode && !sdioc->senses) { + sdioc->senses = dma_alloc_coherent( + &sdioc->pdev->dev, + SENSE_SIZE(SSSRAID_IO_BLK_MQ_DEPTH + + max_hwq_num * SSSRAID_PTCMDS_PERQ), + &sdioc->sense_dma_addr, GFP_KERNEL | __GFP_ZERO); + if (!sdioc->senses) + return -ENOMEM; + } + + ioc_info( + sdioc, + "init_done_queue_cnt[%d], intr_info_count[%d] num_queues[%d], last_online[%d]", + sdioc->init_done_queue_cnt, sdioc->intr_info_count, num_queues, + sdioc->last_qcnt); + + return retval >= 0 ? 0 : retval; +} + +static int sssraid_alloc_ioq_ptcmds(struct sssraid_ioc *sdioc) +{ + int i; + int ptnum = SSSRAID_NR_IOQ_PTCMDS; + + INIT_LIST_HEAD(&sdioc->ioq_pt_list); + spin_lock_init(&sdioc->ioq_pt_lock); + + sdioc->ioq_ptcmds = kcalloc_node(ptnum, sizeof(struct sssraid_cmd), + GFP_KERNEL, sdioc->numa_node); + + if (!sdioc->ioq_ptcmds) { + ioc_err(sdioc, "Err: Alloc sync ioq ptcmds failed\n"); + return -ENOMEM; + } + + for (i = 0; i < ptnum; i++) { + sdioc->ioq_ptcmds[i].qid = i / SSSRAID_PTCMDS_PERQ + 1; + sdioc->ioq_ptcmds[i].cid = + i % SSSRAID_PTCMDS_PERQ + SSSRAID_IO_BLK_MQ_DEPTH; + list_add_tail(&(sdioc->ioq_ptcmds[i].list), + &sdioc->ioq_pt_list); + } + + ioc_info(sdioc, "Alloc sync ioq ptcmds success, ptnum: %d\n", ptnum); + + return 0; +} + +int sssraid_send_event_ack(struct sssraid_ioc *sdioc, u8 event, u32 event_ctx, + u16 cid) +{ + /* event,event_ctx no use at this time */ + struct sssraid_squeue *sqinfo = &sdioc->sqinfo[0]; + struct sssraid_admin_command admin_cmd; + + memset(&admin_cmd, 0, sizeof(admin_cmd)); + admin_cmd.common.opcode = SSSRAID_ADM_ASYNC_EVENT; + admin_cmd.common.command_id = cpu_to_le16(cid); + + sssraid_submit_cmd(sqinfo, &admin_cmd); + ioc_info(sdioc, "send async evt ack, cid[%d]\n", cid); + + return 0; +} + +static void sssraid_handle_aen_notice(struct sssraid_ioc *sdioc, u32 result) +{ + switch ((result & 0xff00) >> 8) { + case SSSRAID_AEN_DEV_CHANGED: + sssraid_scan_disk(sdioc); + break; + case SSSRAID_AEN_FW_ACT_START: + ioc_info(sdioc, "Activating FW starting\n"); + break; + case SSSRAID_AEN_HOST_PROBING: + break; + default: + ioc_warn(sdioc, "warn: async evt result %08x\n", result); + } +} + +static void sssraid_handle_aen_vs(struct sssraid_ioc *sdioc, u32 result, + u32 result1) +{ + switch ((result & 0xff00) >> 8) { + case SSSRAID_AEN_TIMESYN: + sssraid_configure_timestamp(sdioc); + break; + case SSSRAID_AEN_FW_ACT_FINISH: + ioc_info(sdioc, "Activating FW finish\n"); + if (sssraid_init_ctrl_info(sdioc)) + ioc_err(sdioc, + "Err: fetch ctrl info failed after fw act\n"); + break; + case SSSRAID_AEN_EVENT_MIN ... SSSRAID_AEN_EVENT_MAX: + ioc_info(sdioc, + "Rcv card async evt[%d], param1[0x%x] param2[0x%x]\n", + (result & 0xff00) >> 8, result, result1); + break; + default: + ioc_warn(sdioc, "warn: async evt result: 0x%x\n", result); + } +} + +static inline void sssraid_send_all_aen(struct sssraid_ioc *sdioc) +{ + u16 i; + + for (i = 0; i < sdioc->ctrl_info->aerl; i++) + sssraid_send_event_ack(sdioc, 0, 0, + i + SSSRAID_AMDQ_BLK_MQ_DEPTH); +} + +static int sssraid_disk_list_init(struct sssraid_ioc *sdioc) +{ + u32 nd = le32_to_cpu(sdioc->ctrl_info->nd); + + sdioc->devices = kzalloc_node(nd * sizeof(struct sssraid_dev_info), + GFP_KERNEL, sdioc->numa_node); + if (!sdioc->devices) + return -ENOMEM; + + return 0; +} + +int sssraid_configure_timestamp(struct sssraid_ioc *sdioc) +{ + __le64 ts; + int retval; + + ts = cpu_to_le64(ktime_to_ms(ktime_get_real())); + retval = sssraid_set_features(sdioc, SSSRAID_FEAT_TIMESTAMP, 0, &ts, + sizeof(ts), NULL); + + if (retval) + ioc_err(sdioc, "Err: set timestamp fail, ret: %d\n", retval); + return retval; +} + +int sssraid_init_ioc(struct sssraid_ioc *sdioc, u8 re_init) +{ + int retval = 0; + int i; + u32 nr_ioqs, bar_size; + + if (!re_init) { + sdioc->cpu_count = num_online_cpus(); + + retval = sssraid_alloc_resources(sdioc); + if (retval) { + ioc_err(sdioc, + "Err: Failed to alloc resources, ret %d\n", + retval); + goto out_nocleanup; + } + + /* reset need re-setup */ + retval = sssraid_setup_resources(sdioc); + if (retval) { + ioc_err(sdioc, + "Err: Failed to setup resources, ret %d\n", + retval); + goto out_failed; + } + + retval = sssraid_alloc_admin_cmds(sdioc); + if (retval) { + ioc_err(sdioc, + "Err: Failed to alloc admin cmds, ret %d\n", + retval); + goto out_failed; + } + /* put here: + * alloc admin queue + */ + retval = sssraid_alloc_qpair(sdioc, 0, SSSRAID_ADMQ_DEPTH); + if (retval) { + ioc_err(sdioc, + "Err: Failed to alloc admin queue, ret %d\n", + retval); + goto out_failed; + } + } + + retval = sssraid_setup_admin_qpair(sdioc); + if (retval) + goto out_failed; + + /* 1. unregister all interrupt + * 2. admin interrupt registry + */ + retval = sssraid_setup_isr(sdioc, 1); + if (retval) { + ioc_err(sdioc, "Failed to setup ISR error %d\n", retval); + goto out_failed; + } + + if (!re_init) { + retval = sssraid_init_ctrl_info(sdioc); + if (retval) { + ioc_err(sdioc, "Failed to get ctrl info error %d\n", + retval); + goto out_failed; + } + } + + nr_ioqs = min(sdioc->cpu_count, max_hwq_num); + retval = sssraid_set_queue_cnt(sdioc, &nr_ioqs); + if (retval) { + ioc_err(sdioc, "Failed to set queue cnt error %d\n", retval); + goto out_failed; + } + + sdioc->before_affinity_msix_cnt = nr_ioqs + 1; + + /* 1. unregister all interrupt + * 2. admin interrupt re-registry + * 3. io interrupt registry + */ + retval = sssraid_setup_isr(sdioc, 0); + if (retval) { + ioc_err(sdioc, "Failed to re-setup ISR, error %d\n", retval); + goto out_failed; + } + + /* num_vecs no sense, abandon */ + + if (!re_init) { + /* remap */ + bar_size = SSSRAID_REG_DBS + + ((nr_ioqs + 1) * 8 * sdioc->db_stride); + retval = sssraid_remap_bar(sdioc, bar_size); + if (retval) { + ioc_err(sdioc, "Failed to re-map bar, error %d\n", + retval); + goto out_failed; + } + sdioc->sqinfo[0].q_db = sdioc->dbs; + + for (i = sdioc->init_done_queue_cnt; i < sdioc->intr_info_count; + i++) { + retval = + sssraid_alloc_qpair(sdioc, i, sdioc->ioq_depth); + if (retval) { + ioc_err(sdioc, + "Failed to alloc io queue:error %d\n", + retval); + goto out_failed; + } + } + ioc_info( + sdioc, + "intr_info_count: %d, init_done_queue_cnt: %d, ioq_depth: %d\n", + sdioc->intr_info_count, sdioc->init_done_queue_cnt, + sdioc->ioq_depth); + } + + retval = sssraid_setup_io_qpair(sdioc); + if (retval) { + ioc_err(sdioc, "Failed to setup io qpair, error %d\n", retval); + goto out_failed; + } + + if (!re_init) { + retval = sssraid_alloc_ioq_ptcmds(sdioc); + if (retval) { + ioc_err(sdioc, "Failed to alloc ioq ptcmds, error %d\n", + retval); + goto out_failed; + } + } + + sssraid_send_all_aen(sdioc); + + if (!re_init) { + retval = sssraid_disk_list_init(sdioc); + if (retval) { + ioc_err(sdioc, "Failed to init device list, error %d\n", + retval); + goto out_failed; + } + + retval = sssraid_configure_timestamp(sdioc); + if (retval) { + ioc_err(sdioc, + "Failed to configure timestamp, error %d\n", + retval); + goto out_failed; + } + } + + return retval; + +out_failed: + sssraid_cleanup_ioc(sdioc, re_init); +out_nocleanup: + return retval; +} + +void sssraid_cleanup_resources(struct sssraid_ioc *sdioc) +{ + struct pci_dev *pdev = sdioc->pdev; + + pci_set_drvdata(pdev, NULL); + sssraid_cleanup_isr(sdioc); + + if (sdioc->bar) { + iounmap(sdioc->bar); + sdioc->bar = NULL; + } + + if (pci_is_enabled(pdev)) { + pci_disable_pcie_error_reporting(pdev); + pci_release_mem_regions(pdev); + pci_disable_device(pdev); + } +} + +static void sssraid_free_disk_list(struct sssraid_ioc *sdioc) +{ + kfree(sdioc->devices); + sdioc->devices = NULL; +} + +static void sssraid_free_ioq_ptcmds(struct sssraid_ioc *sdioc) +{ + kfree(sdioc->ioq_ptcmds); + sdioc->ioq_ptcmds = NULL; + + INIT_LIST_HEAD(&sdioc->ioq_pt_list); +} + +static void sssraid_free_sense_buffer(struct sssraid_ioc *sdioc) +{ + if (sdioc->senses) { + dma_free_coherent(&sdioc->pdev->dev, + SENSE_SIZE(SSSRAID_IO_BLK_MQ_DEPTH + + max_hwq_num * SSSRAID_PTCMDS_PERQ), + sdioc->senses, sdioc->sense_dma_addr); + sdioc->senses = NULL; + } +} + +static void sssraid_delete_io_queues(struct sssraid_ioc *sdioc) +{ + u16 queues = sdioc->init_done_queue_cnt - SSSRAID_ADM_QUEUE_NUM; + u8 opcode = SSSRAID_ADM_DELETE_SQ; + u16 i, pass; + + if (!pci_device_is_present(sdioc->pdev)) { + ioc_err(sdioc, + "Err: controller is not present, skip disable io queues\n"); + return; + } + + if (sdioc->init_done_queue_cnt <= SSSRAID_ADM_QUEUE_NUM) { + ioc_err(sdioc, "Err: io queue has been delete\n"); + return; + } + + sssraid_free_sense_buffer(sdioc); + for (pass = 0; pass < 2; pass++) { + for (i = queues; i > 0; i--) + if (sssraid_delete_queue(sdioc, opcode, i)) + break; + + opcode = SSSRAID_ADM_DELETE_CQ; + } +} + +void sssraid_complete_aen(struct sssraid_ioc *sdioc, + struct sssraid_completion *cqe) +{ + u32 result = le32_to_cpu(cqe->result); + + ioc_info(sdioc, "Rcv async evt, cid[%d], status[0x%x], result[0x%x]\n", + le16_to_cpu(cqe->cmd_id), le16_to_cpu(cqe->status) >> 1, + result); + + /* + * The response to event moved from this func. + * sssraid_send_aen changed to name sssraid_send_event_ack + */ + + if ((le16_to_cpu(cqe->status) >> 1) != SSSRAID_SC_SUCCESS) + return; + switch (result & 0x7) { + case SSSRAID_AEN_NOTICE: + sssraid_handle_aen_notice(sdioc, result); + break; + case SSSRAID_AEN_VS: + sssraid_handle_aen_vs(sdioc, result, le32_to_cpu(cqe->result1)); + break; + default: + ioc_warn(sdioc, "warn: unsupported async event type: %u\n", + result & 0x7); + break; + } +} + +void sssraid_free_iod_res(struct sssraid_ioc *sdioc, struct sssraid_iod *iod) +{ + const int last_prp = sdioc->page_size / sizeof(__le64) - 1; + dma_addr_t dma_addr, next_dma_addr; + struct sssraid_sgl_desc *sg_list; + __le64 *prp_list; + void *addr; + int i; + + dma_addr = iod->first_dma; + if (iod->npages == 0) + dma_pool_free(iod->sqinfo->prp_small_pool, + sssraid_iod_list(iod)[0], dma_addr); + + for (i = 0; i < iod->npages; i++) { + addr = sssraid_iod_list(iod)[i]; + + if (iod->use_sgl) { + sg_list = addr; + next_dma_addr = + le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr); + } else { + prp_list = addr; + next_dma_addr = le64_to_cpu(prp_list[last_prp]); + } + + dma_pool_free(sdioc->prp_page_pool, addr, dma_addr); + dma_addr = next_dma_addr; + } + + iod->sense = NULL; + iod->npages = -1; +} + +static void sssraid_complete_ioq_sync_cmnd(struct sssraid_ioc *sdioc, u16 qidx, + struct sssraid_completion *cqe) +{ + struct sssraid_cmd *ptcmd; + struct sssraid_squeue *sqinfo = &sdioc->sqinfo[qidx]; + + ptcmd = sdioc->ioq_ptcmds + (sqinfo->qidx - 1) * SSSRAID_PTCMDS_PERQ + + le16_to_cpu(cqe->cmd_id) - SSSRAID_IO_BLK_MQ_DEPTH; + + ptcmd->status = le16_to_cpu(cqe->status) >> 1; + ptcmd->result0 = le32_to_cpu(cqe->result); + ptcmd->result1 = le32_to_cpu(cqe->result1); + + complete(&ptcmd->cmd_done); +} + +static void sssraid_complete_ioq_cmnd(struct sssraid_ioc *sdioc, u16 qidx, + struct sssraid_completion *cqe) +{ + struct sssraid_squeue *sqinfo = &sdioc->sqinfo[qidx]; + + struct blk_mq_tags *tags; + struct scsi_cmnd *scmd; + struct sssraid_iod *iod; + struct request *req; + unsigned long elapsed; + + atomic_dec(&sqinfo->inflight); + + if (work_mode) + tags = sdioc->shost->tag_set.tags[0]; + else + tags = sdioc->shost->tag_set.tags[sqinfo->qidx - 1]; + + req = blk_mq_tag_to_rq(tags, le16_to_cpu(cqe->cmd_id)); + if (unlikely(!req || !blk_mq_request_started(req))) { + ioc_warn(sdioc, + "warn: invalid cmd id %d completed on queue %d\n", + le16_to_cpu(cqe->cmd_id), sqinfo->qidx); + return; + } + + scmd = blk_mq_rq_to_pdu(req); + iod = scsi_cmd_priv(scmd); + + elapsed = jiffies - scmd->jiffies_at_alloc; + dbgprint(sdioc, "cid[%d] qidx[%d] finish IO cost %3ld.%3ld seconds\n", + le16_to_cpu(cqe->cmd_id), sqinfo->qidx, elapsed / HZ, + elapsed % HZ); + + if (cmpxchg(&iod->state, SSSRAID_CMDSTAT_FLIGHT, + SSSRAID_CMDSTAT_COMPLETE) != SSSRAID_CMDSTAT_FLIGHT) { + ioc_warn( + sdioc, + "warn: cid[%d] qidx[%d] enters abnormal handler, cost %3ld.%3ld seconds\n", + le16_to_cpu(cqe->cmd_id), sqinfo->qidx, elapsed / HZ, + elapsed % HZ); + WRITE_ONCE(iod->state, SSSRAID_CMDSTAT_TMO_COMPLETE); + + if (iod->nsge) { + iod->nsge = 0; + scsi_dma_unmap(scmd); + } + sssraid_free_iod_res(sdioc, iod); + + return; + } + + sssraid_map_status(iod, scmd, cqe); + if (iod->nsge) { + iod->nsge = 0; + scsi_dma_unmap(scmd); + } + sssraid_free_iod_res(sdioc, iod); + scmd->scsi_done(scmd); +} + +static void sssraid_process_admin_cq(struct sssraid_ioc *sdioc, + struct sssraid_squeue *sqinfo, + struct sssraid_completion *cqe) +{ + struct sssraid_fwevt *fwevt = NULL; + u16 cid = le16_to_cpu(cqe->cmd_id), sz; + + if (likely(cid < SSSRAID_AMDQ_BLK_MQ_DEPTH)) + sssraid_complete_adminq_cmnd(sdioc, sqinfo->qidx, cqe); + else { + sz = sizeof(*cqe); + fwevt = sssraid_alloc_fwevt(sz); + if (!fwevt) { + ioc_err(sdioc, "%s :failure at %s:%d/%s()!\n", __func__, + __FILE__, __LINE__, __func__); + return; + } + + memcpy(fwevt->event_data, cqe, sz); + fwevt->sdioc = sdioc; + fwevt->event_id = 0; /* evt_type:0 */ + fwevt->send_ack = 1; /* ack_req:1 */ + fwevt->process_evt = 1; /* process_evt_bh:1 */ + fwevt->evt_ctx = 0; /* 0 */ + sssraid_fwevt_add_to_list(sdioc, fwevt); + } +} + +static void sssraid_process_io_cq(struct sssraid_ioc *sdioc, + struct sssraid_squeue *sqinfo, + struct sssraid_completion *cqe) +{ + u16 cid = le16_to_cpu(cqe->cmd_id); + + if (likely(cid < SSSRAID_IO_BLK_MQ_DEPTH)) + sssraid_complete_ioq_cmnd(sdioc, sqinfo->qidx, cqe); + else /* io sync handle */ + sssraid_complete_ioq_sync_cmnd(sdioc, sqinfo->qidx, cqe); +} + +static inline void sssraid_handle_cqe(struct sssraid_ioc *sdioc, u16 mdix, + u16 didx) +{ + struct sssraid_cqueue *cqinfo = &sdioc->cqinfo[mdix]; + struct sssraid_squeue *sqinfo = &sdioc->sqinfo[mdix]; + + struct sssraid_completion *cqe = &cqinfo->cqes[didx]; + u16 cid = le16_to_cpu(cqe->cmd_id); + + if (unlikely(!work_mode && (cid >= sqinfo->q_depth))) { + ioc_err(sdioc, + "Err: invalid command id[%d] completed on queue %d\n", + cid, cqe->sq_id); + return; + } + + dbgprint(sdioc, + "cid[%d] mdix[%d], result[0x%x], sq_id[%d], status[0x%x]\n", + cid, sqinfo->qidx, le32_to_cpu(cqe->result), + le16_to_cpu(cqe->sq_id), le16_to_cpu(cqe->status)); + + if (!mdix) /* admin */ + sssraid_process_admin_cq(sdioc, sqinfo, cqe); + else /* io */ + sssraid_process_io_cq(sdioc, sqinfo, cqe); +} + +void sssraid_complete_cqes(struct sssraid_ioc *sdioc, u16 midx, u16 start, + u16 end) +{ + struct sssraid_squeue *sqinfo = &sdioc->sqinfo[midx]; + + while (start != end) { + sssraid_handle_cqe(sdioc, midx, start); + if (++start == sqinfo->q_depth) + start = 0; + } +} + +static int sssraid_disable_admin_queue(struct sssraid_ioc *sdioc, bool shutdown) +{ + struct sssraid_cqueue *adm_cqinfo = &sdioc->cqinfo[0]; + u16 start, end; + int ret = 0; + + if (pci_device_is_present(sdioc->pdev)) { + if (shutdown) + sssraid_shutdown_ctrl(sdioc); + else + ret = sssraid_disable_ctrl(sdioc); + } + + if (sdioc->init_done_queue_cnt == 0) { + ioc_err(sdioc, "err: admin queue has been delete\n"); + return -ENODEV; + } + + spin_lock_irq(&adm_cqinfo->cq_lock); + sssraid_process_cq(sdioc, 0, &start, &end, -1); + spin_unlock_irq(&adm_cqinfo->cq_lock); + sssraid_complete_cqes(sdioc, 0, start, end); + + return ret; +} + +static void sssraid_free_all_queues(struct sssraid_ioc *sdioc) +{ + int i; + struct sssraid_cqueue *cqinfo; + struct sssraid_squeue *sqinfo; + + for (i = 0; i < sdioc->init_done_queue_cnt; i++) { + cqinfo = &sdioc->cqinfo[i]; + sqinfo = &sdioc->sqinfo[i]; + dma_free_coherent(&sdioc->pdev->dev, CQ_SIZE(sqinfo->q_depth), + (void *)cqinfo->cqes, cqinfo->cq_dma_addr); + dma_free_coherent(&sdioc->pdev->dev, + SQ_SIZE(sqinfo->qidx, sqinfo->q_depth), + sqinfo->sq_cmds, sqinfo->sq_dma_addr); + if (!work_mode) + dma_free_coherent(&sdioc->pdev->dev, + SENSE_SIZE(sqinfo->q_depth), + sqinfo->sense, + sqinfo->sense_dma_addr); + } + + sdioc->init_done_queue_cnt = 0; +} + +static void sssraid_free_admin_cmds(struct sssraid_ioc *sdioc) +{ + kfree(sdioc->adm_cmds); + sdioc->adm_cmds = NULL; + INIT_LIST_HEAD(&sdioc->adm_cmd_list); +} + +static void sssraid_free_resources(struct sssraid_ioc *sdioc) +{ + sssraid_free_admin_cmds(sdioc); + kfree(sdioc->sqinfo); + kfree(sdioc->cqinfo); + sssraid_destroy_dma_pools(sdioc); + kfree(sdioc->ctrl_info); +} + +void sssraid_cleanup_ioc(struct sssraid_ioc *sdioc, u8 re_init) +{ + if (!re_init) { + sssraid_free_disk_list(sdioc); + sssraid_free_ioq_ptcmds(sdioc); + } + + sssraid_delete_io_queues(sdioc); + sssraid_disable_admin_queue(sdioc, !re_init); + + if (!re_init) + sssraid_free_all_queues(sdioc); + + sssraid_ioc_disable_intr(sdioc); + sssraid_cleanup_resources(sdioc); + + if (!re_init) + sssraid_free_resources(sdioc); +} + +int sssraid_soft_reset_handler(struct sssraid_ioc *sdioc) +{ + int retval = 0; + + if (sdioc->state != SSSRAID_RESETTING) { + ioc_err(sdioc, "err: host is not reset state\n"); + return retval; + } + + ioc_info(sdioc, "host reset entry\n"); + + sssraid_cleanup_fwevt_list(sdioc); + + /* + * realize sssraid_dev_disable, + * i.e. sssraid_cleanup_ioc(1) + */ + if (sdioc->ctrl_config & SSSRAID_CC_ENABLE) { + ioc_info(sdioc, "\n"); + retval = sssraid_disable_admin_queue(sdioc, 0); + } + + sssraid_cleanup_isr(sdioc); + + /* realize above here: + * sssraid_dev_disable -> sssraid_back_all_io + */ + sssraid_back_all_io(sdioc); + + if (retval) + goto host_reset_failed; + + retval = sssraid_init_ioc(sdioc, 1); + if (retval || sdioc->last_qcnt != sdioc->init_done_queue_cnt) + goto host_reset_failed; + + sssraid_scan_disk(sdioc); + sssraid_change_host_state(sdioc, SSSRAID_LIVE); + return 0; + +host_reset_failed: + sssraid_change_host_state(sdioc, SSSRAID_DEAD); + ioc_err(sdioc, "err, host reset failed\n"); + return retval; +} diff --git a/drivers/scsi/sssraid/sssraid_os.c b/drivers/scsi/sssraid/sssraid_os.c new file mode 100644 index 0000000000000000000000000000000000000000..6836bd3343a367b41a4ef1a0b36949980d383c2d --- /dev/null +++ b/drivers/scsi/sssraid/sssraid_os.c @@ -0,0 +1,2711 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 3SNIC Information Technology, Ltd */ + +/* 3SNIC RAID SSSXXX Series Linux Driver */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "sssraid.h" +#include "sssraid_debug.h" + +#define MAX_IO_QUEUES 128 +#define MIN_IO_QUEUES 1 + +u32 admin_tmout = 60; +module_param(admin_tmout, uint, 0644); +MODULE_PARM_DESC(admin_tmout, "admin commands timeout (seconds)"); + +static u32 scmd_tmout_rawdisk = 180; +module_param(scmd_tmout_rawdisk, uint, 0644); +MODULE_PARM_DESC(scmd_tmout_rawdisk, + "scsi commands timeout for rawdisk(seconds)"); + +static u32 scmd_tmout_vd = 180; +module_param(scmd_tmout_vd, uint, 0644); +MODULE_PARM_DESC(scmd_tmout_vd, "scsi commands timeout for vd(seconds)"); + +bool work_mode; +module_param(work_mode, bool, 0444); +MODULE_PARM_DESC(work_mode, + "work mode switch, default false for multi hw queues"); + +static int ioq_num_set(const char *val, const struct kernel_param *kp) +{ + int n = 0; + int ret; + + ret = kstrtoint(val, 10, &n); + if (ret != 0 || n < MIN_IO_QUEUES || n > MAX_IO_QUEUES) + return -EINVAL; + + return param_set_int(val, kp); +} + +static const struct kernel_param_ops max_hwq_num_ops = { + .set = ioq_num_set, + .get = param_get_uint, +}; + +u32 max_hwq_num = 128; +module_param_cb(max_hwq_num, &max_hwq_num_ops, &max_hwq_num, 0444); +MODULE_PARM_DESC(max_hwq_num, + "max num of hw io queues, should >= 1, default 128"); + +static int ioq_depth_set(const char *val, const struct kernel_param *kp); +static const struct kernel_param_ops ioq_depth_ops = { + .set = ioq_depth_set, + .get = param_get_uint, +}; + +u32 io_queue_depth = 1024; +module_param_cb(io_queue_depth, &ioq_depth_ops, &io_queue_depth, 0644); +MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2"); + +static int logging_level_set(const char *val, const struct kernel_param *kp) +{ + u8 n = 0; + int ret; + + ret = kstrtou8(val, 10, &n); + if (ret != 0) + return -EINVAL; + + return param_set_byte(val, kp); +} + +static const struct kernel_param_ops logging_level_ops = { + .set = logging_level_set, + .get = param_get_byte, +}; + +static unsigned char logging_level; +module_param_cb(logging_level, &logging_level_ops, &logging_level, 0644); +MODULE_PARM_DESC(logging_level, "set log level, default zero for switch off"); + +static int small_pool_num_set(const char *val, const struct kernel_param *kp) +{ + u8 n = 0; + int ret; + + ret = kstrtou8(val, 10, &n); + if (ret != 0) + return -EINVAL; + if (n > MAX_SMALL_POOL_NUM) + n = MAX_SMALL_POOL_NUM; + if (n < 1) + n = 1; + *((u8 *)kp->arg) = n; + + return 0; +} + +static const struct kernel_param_ops small_pool_num_ops = { + .set = small_pool_num_set, + .get = param_get_byte, +}; + +/* Small pools are used to save PRP for small IOs.It was + * found that the spinlock of a single pool conflicts a + * lot with multiple CPUs.So multiple pools are introduced + * to reduce the conflictions. + */ +unsigned char small_pool_num = 4; +module_param_cb(small_pool_num, &small_pool_num_ops, &small_pool_num, 0644); +MODULE_PARM_DESC(small_pool_num, "set prp small pool num, default 4, MAX 16"); + +//static struct class *sssraid_class; + +enum FW_STAT_CODE { + FW_STAT_OK = 0, + FW_STAT_NEED_CHECK, + FW_STAT_ERROR, + FW_STAT_EP_PCIE_ERROR, + FW_STAT_NAC_DMA_ERROR, + FW_STAT_ABORTED, + FW_STAT_NEED_RETRY +}; + +enum { FW_EH_OK = 0, FW_EH_DEV_NONE = 0x701 }; + +static const char *const raid_levels[] = { "0", "1", "5", "6", + "10", "50", "60", "NA" }; + +static const char *const raid_states[] = { + "NA", "NORMAL", "FAULT", + "DEGRADE", "NOT_FORMATTED", "FORMATTING", + "SANITIZING", "INITIALIZING", "INITIALIZE_FAIL", + "DELETING", "DELETE_FAIL", "WRITE_PROTECT" +}; + +static int ioq_depth_set(const char *val, const struct kernel_param *kp) +{ + int n = 0; + int ret; + + ret = kstrtoint(val, 10, &n); + if (ret != 0 || n < 2) + return -EINVAL; + + return param_set_int(val, kp); +} + +/* + * common + */ +static struct class *sssraid_class; + +struct sssraid_fwevt *sssraid_alloc_fwevt(int len) +{ + struct sssraid_fwevt *fwevt; + + fwevt = kzalloc(sizeof(*fwevt) + len, GFP_ATOMIC); + if (!fwevt) + return NULL; + + kref_init(&fwevt->ref_count); + return fwevt; +} + +static void sssraid_fwevt_free(struct kref *r) +{ + kfree(container_of(r, struct sssraid_fwevt, ref_count)); +} + +static void sssraid_fwevt_get(struct sssraid_fwevt *fwevt) +{ + kref_get(&fwevt->ref_count); +} + +static void sssraid_fwevt_put(struct sssraid_fwevt *fwevt) +{ + kref_put(&fwevt->ref_count, sssraid_fwevt_free); +} + +static void sssraid_fwevt_del_from_list(struct sssraid_ioc *sdioc, + struct sssraid_fwevt *fwevt) +{ + unsigned long flags; + + spin_lock_irqsave(&sdioc->fwevt_lock, flags); + if (!list_empty(&fwevt->list)) { + list_del_init(&fwevt->list); + /* + * Put fwevt reference count after + * removing it from fwevt_list + */ + sssraid_fwevt_put(fwevt); + } + spin_unlock_irqrestore(&sdioc->fwevt_lock, flags); +} + +static void sssraid_fwevt_bh(struct sssraid_ioc *sdioc, + struct sssraid_fwevt *fwevt) +{ + struct sssraid_completion *cqe; + + sdioc->current_event = fwevt; + sssraid_fwevt_del_from_list(sdioc, fwevt); + + cqe = (struct sssraid_completion *)fwevt->event_data; + + if (!fwevt->process_evt) + goto evt_ack; + + sssraid_complete_aen(sdioc, cqe); + +evt_ack: + /* event response put here: event has been handled. */ + sssraid_send_event_ack(sdioc, fwevt->event_id, fwevt->evt_ctx, + le16_to_cpu(cqe->cmd_id)); + sssraid_fwevt_put(fwevt); + sdioc->current_event = NULL; +} + +static void sssraid_fwevt_worker(struct work_struct *work) +{ + struct sssraid_fwevt *fwevt = + container_of(work, struct sssraid_fwevt, work); + sssraid_fwevt_bh(fwevt->sdioc, fwevt); + /* + * Put fwevt reference count after + * dequeuing it from worker queue + */ + sssraid_fwevt_put(fwevt); +} + +void sssraid_fwevt_add_to_list(struct sssraid_ioc *sdioc, + struct sssraid_fwevt *fwevt) +{ + unsigned long flags; + + if (!sdioc->fwevt_worker_thread) + return; + + spin_lock_irqsave(&sdioc->fwevt_lock, flags); + /* get fwevt reference count while adding it to fwevt_list */ + sssraid_fwevt_get(fwevt); + INIT_LIST_HEAD(&fwevt->list); + list_add_tail(&fwevt->list, &sdioc->fwevt_list); + INIT_WORK(&fwevt->work, sssraid_fwevt_worker); + /* get fwevt reference count while enqueueing it to worker queue */ + sssraid_fwevt_get(fwevt); + queue_work(sdioc->fwevt_worker_thread, &fwevt->work); + spin_unlock_irqrestore(&sdioc->fwevt_lock, flags); +} + +static struct sssraid_fwevt *sssraid_dequeue_fwevt(struct sssraid_ioc *sdioc) +{ + unsigned long flags; + struct sssraid_fwevt *fwevt = NULL; + + spin_lock_irqsave(&sdioc->fwevt_lock, flags); + if (!list_empty(&sdioc->fwevt_list)) { + fwevt = list_first_entry(&sdioc->fwevt_list, + struct sssraid_fwevt, list); + list_del_init(&fwevt->list); + /* + * Put fwevt reference count after + * removing it from fwevt_list + */ + sssraid_fwevt_put(fwevt); + } + spin_unlock_irqrestore(&sdioc->fwevt_lock, flags); + + return fwevt; +} + +static bool sssraid_disk_is_hdd(u8 attr) +{ + switch (SSSRAID_DISK_TYPE(attr)) { + case SSSRAID_SAS_HDD_VD: + case SSSRAID_SATA_HDD_VD: + case SSSRAID_SAS_HDD_PD: + case SSSRAID_SATA_HDD_PD: + return true; + default: + return false; + } +} + +void sssraid_cleanup_fwevt_list(struct sssraid_ioc *sdioc) +{ + struct sssraid_fwevt *fwevt = NULL; + + if ((list_empty(&sdioc->fwevt_list) && !sdioc->current_event) || + !sdioc->fwevt_worker_thread) + return; + + while ((fwevt = sssraid_dequeue_fwevt(sdioc)) || + (fwevt = sdioc->current_event)) { + /* + * Wait on the fwevt to complete. If this returns 1, then + * the event was never executed, and we need a put for the + * reference the work had on the fwevt. + * + * If it did execute, we wait for it to finish, and the put will + * happen from sssraid_process_fwevt() + */ + if (cancel_work_sync(&fwevt->work)) { + /* + * Put fwevt reference count after + * dequeuing it from worker queue + */ + sssraid_fwevt_put(fwevt); + /* + * Put fwevt reference count to neutralize + * kref_init increment + */ + sssraid_fwevt_put(fwevt); + } + } +} + +/* + * common 1 + */ +static int sssraid_npages_prp(struct sssraid_ioc *sdioc) +{ + u32 size = (1U << ((sdioc->ctrl_info->mdts) * 1U)) << 12; + u32 nprps = DIV_ROUND_UP(size + sdioc->page_size, sdioc->page_size); + + return DIV_ROUND_UP(PRP_ENTRY_SIZE * nprps, + sdioc->page_size - PRP_ENTRY_SIZE); +} + +static int sssraid_npages_sgl(struct sssraid_ioc *sdioc) +{ + u32 nsge = le16_to_cpu(sdioc->ctrl_info->max_num_sge); + + return DIV_ROUND_UP(nsge * sizeof(struct sssraid_sgl_desc), + sdioc->page_size); +} + +static u32 sssraid_cmd_size(struct sssraid_ioc *sdioc) +{ + u32 alloc_size = sizeof(__le64 *) * max(sssraid_npages_prp(sdioc), + sssraid_npages_sgl(sdioc)); + + ioc_info(sdioc, + "iod structure size: %lu, alloc for shost cmd_size: %u\n", + sizeof(struct sssraid_iod), alloc_size); + + return sizeof(struct sssraid_iod) + alloc_size; +} + +static int sssraid_setup_prps(struct sssraid_ioc *sdioc, + struct sssraid_iod *iod) +{ + struct scatterlist *sg = iod->sg; + u64 dma_addr = sg_dma_address(sg); + int dma_len = sg_dma_len(sg); + __le64 *prp_list, *old_prp_list; + int page_size = sdioc->page_size; + int offset = dma_addr & (page_size - 1); + void **list = sssraid_iod_list(iod); + int length = iod->length; + struct dma_pool *pool; + dma_addr_t prp_dma; + int nprps, i; + + length -= (page_size - offset); + if (length <= 0) { + iod->first_dma = 0; + return 0; + } + + dma_len -= (page_size - offset); + if (dma_len) { + dma_addr += (page_size - offset); + } else { + sg = sg_next(sg); + dma_addr = sg_dma_address(sg); + dma_len = sg_dma_len(sg); + } + + if (length <= page_size) { + iod->first_dma = dma_addr; + return 0; + } + + nprps = DIV_ROUND_UP(length, page_size); + if (nprps <= (SMALL_POOL_SIZE / PRP_ENTRY_SIZE)) { + pool = iod->sqinfo->prp_small_pool; + iod->npages = 0; + } else { + pool = sdioc->prp_page_pool; + iod->npages = 1; + } + + prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); + if (!prp_list) { + dev_err_ratelimited(&sdioc->pdev->dev, + "Allocate first prp_list memory failed\n"); + iod->first_dma = dma_addr; + iod->npages = -1; + return -ENOMEM; + } + list[0] = prp_list; + iod->first_dma = prp_dma; + i = 0; + for (;;) { + if (i == page_size / PRP_ENTRY_SIZE) { + old_prp_list = prp_list; + + prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); + if (!prp_list) { + dev_err_ratelimited( + &sdioc->pdev->dev, + "Allocate %dth prp_list memory failed\n", + iod->npages + 1); + return -ENOMEM; + } + list[iod->npages++] = prp_list; + prp_list[0] = old_prp_list[i - 1]; + old_prp_list[i - 1] = cpu_to_le64(prp_dma); + i = 1; + } + prp_list[i++] = cpu_to_le64(dma_addr); + dma_len -= page_size; + dma_addr += page_size; + length -= page_size; + if (length <= 0) + break; + if (dma_len > 0) + continue; + if (unlikely(dma_len < 0)) + goto bad_sgl; + sg = sg_next(sg); + dma_addr = sg_dma_address(sg); + dma_len = sg_dma_len(sg); + } + + return 0; + +bad_sgl: + ioc_err(sdioc, + "Setup prps: invalid SGL for payload len: %d sg entry count: %d\n", + iod->length, iod->nsge); + return -EIO; +} + +static inline bool sssraid_is_rw_scmd(struct scsi_cmnd *scmd) +{ + switch (scmd->cmnd[0]) { + case READ_6: + case READ_10: + case READ_12: + case READ_16: + case WRITE_6: + case WRITE_10: + case WRITE_12: + case WRITE_16: + return true; + default: + return false; + } +} + +static bool sssraid_is_prp(struct sssraid_ioc *sdioc, struct scsi_cmnd *scmd, + u32 nsge) +{ + struct scatterlist *sg = scsi_sglist(scmd); + u32 page_mask = sdioc->page_size - 1; + bool is_prp = true; + int i = 0; + + scsi_for_each_sg(scmd, sg, nsge, i) { + if (i != 0 && i != nsge - 1) { + if ((sg_dma_len(sg) & page_mask) || + (sg_dma_address(sg) & page_mask)) { + is_prp = false; + break; + } + } + + if (nsge > 1 && i == 0) { + if ((sg_dma_address(sg) + sg_dma_len(sg)) & page_mask) { + is_prp = false; + break; + } + } + + if (nsge > 1 && i == (nsge - 1)) { + if (sg_dma_address(sg) & page_mask) { + is_prp = false; + break; + } + } + } + + return is_prp; +} + +static void sssraid_sgl_set_data(struct sssraid_sgl_desc *sge, + struct scatterlist *sg) +{ + sge->addr = cpu_to_le64(sg_dma_address(sg)); + sge->length = cpu_to_le32(sg_dma_len(sg)); + sge->type = SSSRAID_SGL_FMT_DATA_DESC << 4; +} + +static void sssraid_sgl_set_seg(struct sssraid_sgl_desc *sge, + dma_addr_t dma_addr, int entries) +{ + sge->addr = cpu_to_le64(dma_addr); + if (entries <= SGES_PER_PAGE) { + sge->length = cpu_to_le32(entries * sizeof(*sge)); + sge->type = SSSRAID_SGL_FMT_LAST_SEG_DESC << 4; + } else { + sge->length = cpu_to_le32(PAGE_SIZE); + sge->type = SSSRAID_SGL_FMT_SEG_DESC << 4; + } +} + +static int sssraid_setup_ioq_cmd_sgl(struct sssraid_ioc *sdioc, + struct scsi_cmnd *scmd, + struct sssraid_ioq_command *ioq_cmd, + struct sssraid_iod *iod) +{ + struct sssraid_sgl_desc *sg_list, *link, *old_sg_list; + struct scatterlist *sg = scsi_sglist(scmd); + void **list = sssraid_iod_list(iod); + struct dma_pool *pool; + int nsge = iod->nsge; + dma_addr_t sgl_dma; + int i = 0; + + ioq_cmd->common.flags |= SSSRAID_CMD_FLAG_SGL_METABUF; + + if (nsge == 1) { + sssraid_sgl_set_data(&ioq_cmd->common.dptr.sgl, sg); + return 0; + } + + if (nsge <= (SMALL_POOL_SIZE / sizeof(struct sssraid_sgl_desc))) { + pool = iod->sqinfo->prp_small_pool; + iod->npages = 0; + } else { + pool = sdioc->prp_page_pool; + iod->npages = 1; + } + + sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); + if (!sg_list) { + dev_err_ratelimited(&sdioc->pdev->dev, + "Allocate first sgl_list failed\n"); + iod->npages = -1; + return -ENOMEM; + } + + list[0] = sg_list; + iod->first_dma = sgl_dma; + sssraid_sgl_set_seg(&ioq_cmd->common.dptr.sgl, sgl_dma, nsge); + do { + if (i == SGES_PER_PAGE) { + old_sg_list = sg_list; + link = &old_sg_list[SGES_PER_PAGE - 1]; + + sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); + if (!sg_list) { + dev_err_ratelimited( + &sdioc->pdev->dev, + "Allocate %dth sgl_list failed\n", + iod->npages + 1); + return -ENOMEM; + } + list[iod->npages++] = sg_list; + + i = 0; + memcpy(&sg_list[i++], link, sizeof(*link)); + sssraid_sgl_set_seg(link, sgl_dma, nsge); + } + + sssraid_sgl_set_data(&sg_list[i++], sg); + sg = sg_next(sg); + } while (--nsge > 0); + + return 0; +} + +static void sssraid_shost_init(struct sssraid_ioc *sdioc) +{ + struct pci_dev *pdev = sdioc->pdev; + u8 domain, bus; + u32 dev_func; + + domain = pci_domain_nr(pdev->bus); + bus = pdev->bus->number; + dev_func = pdev->devfn; + + sdioc->shost->nr_hw_queues = work_mode ? 1 : SSSRAID_NR_HW_QUEUES; + sdioc->shost->can_queue = SSSRAID_IO_BLK_MQ_DEPTH; + + sdioc->shost->sg_tablesize = le16_to_cpu(sdioc->ctrl_info->max_num_sge); + /* 512B per sector */ + sdioc->shost->max_sectors = + (1U << ((sdioc->ctrl_info->mdts) * 1U) << 12) / 512; + sdioc->shost->cmd_per_lun = MAX_CMD_PER_DEV; + sdioc->shost->max_channel = + le16_to_cpu(sdioc->ctrl_info->max_channel) - 1; + sdioc->shost->max_id = le32_to_cpu(sdioc->ctrl_info->max_tgt_id); + sdioc->shost->max_lun = le16_to_cpu(sdioc->ctrl_info->max_lun); + + sdioc->shost->this_id = -1; + sdioc->shost->unique_id = (domain << 16) | (bus << 8) | dev_func; + sdioc->shost->max_cmd_len = MAX_CDB_LEN; + sdioc->shost->hostt->cmd_size = sssraid_cmd_size(sdioc); +} + +static inline void +sssraid_get_tag_from_scmd(struct scsi_cmnd *scmd, u16 *qidx, + struct sssraid_ioc *sdioc, u16 *cid, + struct sssraid_sdev_hostdata *hostdata) +{ + u32 tag = blk_mq_unique_tag(scmd->request); + + if (work_mode) { + if ((sdioc->hdd_dispatch == DISPATCH_BY_DISK) && + (hostdata->hwq != 0)) + *qidx = hostdata->hwq; + else + *qidx = raw_smp_processor_id() % + (sdioc->init_done_queue_cnt - 1) + + 1; + } else { + *qidx = blk_mq_unique_tag_to_hwq(tag) + 1; + } + + *cid = blk_mq_unique_tag_to_tag(tag); +} + +static inline uint32_t get_unaligned_be24(const uint8_t *const p) +{ + return get_unaligned_be32(p - 1) & 0xffffffU; +} + +static int sssraid_setup_rw_cmd(struct sssraid_ioc *sdioc, + struct sssraid_rw_command *rw, + struct scsi_cmnd *scmd) +{ + u32 start_lba_lo, start_lba_hi; + u32 datalength = 0; + u16 control = 0; + + start_lba_lo = 0; + start_lba_hi = 0; + + if (scmd->sc_data_direction == DMA_TO_DEVICE) { + rw->opcode = SSSRAID_IOCMD_WRITE; + } else if (scmd->sc_data_direction == DMA_FROM_DEVICE) { + rw->opcode = SSSRAID_IOCMD_READ; + } else { + ioc_err(sdioc, + "err: unsupported data direction: %d, SCSI IO cmd invalid\n", + scmd->sc_data_direction); + WARN_ON(1); + return -EINVAL; + } + + /* 6-byte READ(0x08) or WRITE(0x0A) cdb */ + if (scmd->cmd_len == SCSI_6_BYTE_CDB_LEN) { + datalength = (u32)(scmd->cmnd[4] == 0 ? IO_6_DEFAULT_TX_LEN : + scmd->cmnd[4]); + start_lba_lo = (u32)get_unaligned_be24(&scmd->cmnd[1]); + + start_lba_lo &= 0x1FFFFF; + } + + /* 10-byte READ(0x28) or WRITE(0x2A) cdb */ + else if (scmd->cmd_len == SCSI_10_BYTE_CDB_LEN) { + datalength = (u32)get_unaligned_be16(&scmd->cmnd[7]); + start_lba_lo = get_unaligned_be32(&scmd->cmnd[2]); + + if (scmd->cmnd[1] & FUA_MASK) + control |= SSSRAID_RW_FUA; + } + + /* 12-byte READ(0xA8) or WRITE(0xAA) cdb */ + else if (scmd->cmd_len == SCSI_12_BYTE_CDB_LEN) { + datalength = get_unaligned_be32(&scmd->cmnd[6]); + start_lba_lo = get_unaligned_be32(&scmd->cmnd[2]); + + if (scmd->cmnd[1] & FUA_MASK) + control |= SSSRAID_RW_FUA; + } + /* 16-byte READ(0x88) or WRITE(0x8A) cdb */ + else if (scmd->cmd_len == SCSI_16_BYTE_CDB_LEN) { + datalength = get_unaligned_be32(&scmd->cmnd[10]); + start_lba_lo = get_unaligned_be32(&scmd->cmnd[6]); + start_lba_hi = get_unaligned_be32(&scmd->cmnd[2]); + + if (scmd->cmnd[1] & FUA_MASK) + control |= SSSRAID_RW_FUA; + } + + if (unlikely(datalength > U16_MAX || datalength == 0)) { + ioc_err(sdioc, + "err: illegal transfer data length: %u, Invalid IO\n", + datalength); + WARN_ON(1); + return -EINVAL; + } + + rw->slba = cpu_to_le64(((u64)start_lba_hi << 32) | start_lba_lo); + /* 0base for nlb */ + rw->nlb = cpu_to_le16((u16)(datalength - 1)); + rw->control = cpu_to_le16(control); + + return 0; +} + +static int sssraid_setup_nonio_cmd(struct sssraid_ioc *sdioc, + struct sssraid_scsi_nonio *scsi_nonio, + struct scsi_cmnd *scmd) +{ + scsi_nonio->buffer_len = cpu_to_le32(scsi_bufflen(scmd)); + + switch (scmd->sc_data_direction) { + case DMA_NONE: + scsi_nonio->opcode = SSSRAID_IOCMD_NONRW_NODIR; + break; + case DMA_TO_DEVICE: + scsi_nonio->opcode = SSSRAID_IOCMD_NONRW_TODEV; + break; + case DMA_FROM_DEVICE: + scsi_nonio->opcode = SSSRAID_IOCMD_NONRW_FROMDEV; + break; + default: + ioc_err(sdioc, + "err: unsupported data direction: %d, invalid SCSI NON_IO cmd\n", + scmd->sc_data_direction); + WARN_ON(1); + return -EINVAL; + } + + return 0; +} + +static int sssraid_setup_ioq_cmd(struct sssraid_ioc *sdioc, + struct sssraid_ioq_command *ioq_cmd, + struct scsi_cmnd *scmd) +{ + memcpy(ioq_cmd->common.cdb, scmd->cmnd, scmd->cmd_len); + ioq_cmd->common.cdb_len = scmd->cmd_len; + + if (sssraid_is_rw_scmd(scmd)) + return sssraid_setup_rw_cmd(sdioc, &ioq_cmd->rw, scmd); + else + return sssraid_setup_nonio_cmd(sdioc, &ioq_cmd->scsi_nonio, + scmd); +} + +static inline void sssraid_init_iod(struct sssraid_iod *iod) +{ + iod->nsge = 0; + iod->npages = -1; + iod->use_sgl = false; + WRITE_ONCE(iod->state, SSSRAID_CMDSTAT_IDLE); +} + +int sssraid_io_map_data(struct sssraid_ioc *sdioc, struct sssraid_iod *iod, + struct scsi_cmnd *scmd, + struct sssraid_ioq_command *ioq_cmd) +{ + int retval; + + retval = scsi_dma_map(scmd); + if (unlikely(retval < 0)) + return retval; + iod->nsge = retval; + /* No data to DMA, it may be scsi no-rw command */ + if (unlikely(iod->nsge == 0)) + return 0; + + iod->length = scsi_bufflen(scmd); + iod->sg = scsi_sglist(scmd); + iod->use_sgl = !sssraid_is_prp(sdioc, scmd, iod->nsge); + + if (iod->use_sgl) { + retval = sssraid_setup_ioq_cmd_sgl(sdioc, scmd, ioq_cmd, iod); + } else { + retval = sssraid_setup_prps(sdioc, iod); + ioq_cmd->common.dptr.prp1 = + cpu_to_le64(sg_dma_address(iod->sg)); + ioq_cmd->common.dptr.prp2 = cpu_to_le64(iod->first_dma); + } + + if (retval) + scsi_dma_unmap(scmd); + + return retval; +} + +void sssraid_map_status(struct sssraid_iod *iod, struct scsi_cmnd *scmd, + struct sssraid_completion *cqe) +{ + struct sssraid_ioc *sdioc = iod->sqinfo->sdioc; + + scsi_set_resid(scmd, 0); + + switch ((le16_to_cpu(cqe->status) >> 1) & 0x7f) { + case FW_STAT_OK: + set_host_byte(scmd, DID_OK); + break; + case FW_STAT_NEED_CHECK: + set_host_byte(scmd, DID_OK); + scmd->result |= le16_to_cpu(cqe->status) >> 8; + if (scmd->result & SAM_STAT_CHECK_CONDITION) { + memset(scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); + memcpy(scmd->sense_buffer, iod->sense, + SCSI_SENSE_BUFFERSIZE); + scmd->result = (scmd->result & 0x00ffffff) | + (DRIVER_SENSE << 24); + } + break; + case FW_STAT_ABORTED: + set_host_byte(scmd, DID_ABORT); + break; + case FW_STAT_NEED_RETRY: + set_host_byte(scmd, DID_REQUEUE); + break; + default: + set_host_byte(scmd, DID_BAD_TARGET); + ioc_warn(sdioc, + "warn: cid[%d] qid[%d] unsupport status[0x%x]\n", + le16_to_cpu(cqe->cmd_id), le16_to_cpu(cqe->sq_id), + le16_to_cpu(cqe->status)); + break; + } +} + +struct sssraid_cmd *sssraid_get_cmd(struct sssraid_ioc *sdioc, + enum sssraid_cmd_type type) +{ + struct sssraid_cmd *cmd = NULL; + unsigned long flags; + struct list_head *head = &sdioc->adm_cmd_list; + spinlock_t *slock = &sdioc->adm_cmd_lock; + + if (type == SSSRAID_CMD_IOPT) { + head = &sdioc->ioq_pt_list; + slock = &sdioc->ioq_pt_lock; + } + + spin_lock_irqsave(slock, flags); + if (list_empty(head)) { + spin_unlock_irqrestore(slock, flags); + ioc_err(sdioc, "err: tool get cmd[%d] list empty\n", type); + return NULL; + } + cmd = list_entry(head->next, struct sssraid_cmd, list); + list_del_init(&cmd->list); + spin_unlock_irqrestore(slock, flags); + + WRITE_ONCE(cmd->state, SSSRAID_CMDSTAT_FLIGHT); + + return cmd; +} + +static int sssraid_add_device(struct sssraid_ioc *sdioc, + struct sssraid_dev_info *device) +{ + struct Scsi_Host *shost = sdioc->shost; + struct scsi_device *sdev; + + ioc_info( + sdioc, + "add scsi disk, hdid: %u target: %d, channel: %d, lun: %d, attr[0x%x]\n", + le32_to_cpu(device->hdid), le16_to_cpu(device->target), + device->channel, device->lun, device->attr); + + sdev = scsi_device_lookup(shost, device->channel, + le16_to_cpu(device->target), 0); + if (sdev) { + ioc_warn( + sdioc, + "warn: scsi disk already exist, channel: %d, target_id: %d, lun: %d\n", + device->channel, le16_to_cpu(device->target), 0); + scsi_device_put(sdev); + return -EEXIST; + } + scsi_add_device(shost, device->channel, le16_to_cpu(device->target), 0); + return 0; +} + +static int sssraid_rescan_device(struct sssraid_ioc *sdioc, + struct sssraid_dev_info *device) +{ + struct Scsi_Host *shost = sdioc->shost; + struct scsi_device *sdev; + + ioc_info( + sdioc, + "rescan scsi disk, hdid: %u target: %d, channel: %d, lun: %d, attr[0x%x]\n", + le32_to_cpu(device->hdid), le16_to_cpu(device->target), + device->channel, device->lun, device->attr); + + sdev = scsi_device_lookup(shost, device->channel, + le16_to_cpu(device->target), 0); + if (!sdev) { + ioc_warn( + sdioc, + "warn: rescan, scsi disk not exist, channel: %d, target_id: %d, lun: %d\n", + device->channel, le16_to_cpu(device->target), 0); + return -ENODEV; + } + + scsi_rescan_device(&sdev->sdev_gendev); + scsi_device_put(sdev); + return 0; +} + +static int sssraid_remove_device(struct sssraid_ioc *sdioc, + struct sssraid_dev_info *org_device) +{ + struct Scsi_Host *shost = sdioc->shost; + struct scsi_device *sdev; + + ioc_info( + sdioc, + "remove scsi disk, hdid: %u target: %d, channel: %d, lun: %d, attr[0x%x]\n", + le32_to_cpu(org_device->hdid), le16_to_cpu(org_device->target), + org_device->channel, org_device->lun, org_device->attr); + + sdev = scsi_device_lookup(shost, org_device->channel, + le16_to_cpu(org_device->target), 0); + if (!sdev) { + ioc_warn( + sdioc, + "warn: remove, scsi disk not exist, channel: %d, target_id: %d, lun: %d\n", + org_device->channel, le16_to_cpu(org_device->target), + 0); + return -ENODEV; + } + + scsi_remove_device(sdev); + scsi_device_put(sdev); + return 0; +} + +static int luntarget_cmp_func(const void *l, const void *r) +{ + const struct sssraid_dev_info *ln = l; + const struct sssraid_dev_info *rn = r; + int l_attr = SSSRAID_DISK_INFO_ATTR_BOOT(ln->attr); + int r_attr = SSSRAID_DISK_INFO_ATTR_BOOT(rn->attr); + + /* boot first */ + if (l_attr != r_attr) + return (r_attr - l_attr); + + if (ln->channel == rn->channel) + return le16_to_cpu(ln->target) - le16_to_cpu(rn->target); + + return ln->channel - rn->channel; +} + +void sssraid_scan_disk(struct sssraid_ioc *sdioc) +{ + struct sssraid_dev_info *devices, *org_devices; + struct sssraid_dev_info *sortdevice; + u32 nd = le32_to_cpu(sdioc->ctrl_info->nd); + u8 flag, org_flag; + int i, ret; + int count = 0; + + devices = kcalloc(nd, sizeof(struct sssraid_dev_info), GFP_KERNEL); + if (!devices) + return; + + sortdevice = kcalloc(nd, sizeof(struct sssraid_dev_info), GFP_KERNEL); + if (!sortdevice) + goto free_list; + + ret = sssraid_get_dev_list(sdioc, devices); + if (ret) + goto free_all; + org_devices = sdioc->devices; + for (i = 0; i < nd; i++) { + org_flag = org_devices[i].flag; + flag = devices[i].flag; + + dbgprint(sdioc, "i: %d, org_flag: 0x%x, flag: 0x%x\n", i, + org_flag, flag); + + if (SSSRAID_DISK_INFO_FLAG_VALID(flag)) { + if (!SSSRAID_DISK_INFO_FLAG_VALID(org_flag)) { + down_write(&sdioc->devices_rwsem); + memcpy(&org_devices[i], &devices[i], + sizeof(struct sssraid_dev_info)); + memcpy(&sortdevice[count++], &devices[i], + sizeof(struct sssraid_dev_info)); + up_write(&sdioc->devices_rwsem); + } else if (SSSRAID_DISK_INFO_FLAG_CHANGE(flag)) { + sssraid_rescan_device(sdioc, &devices[i]); + } + } else { + if (SSSRAID_DISK_INFO_FLAG_VALID(org_flag)) { + down_write(&sdioc->devices_rwsem); + org_devices[i].flag &= 0xfe; + up_write(&sdioc->devices_rwsem); + sssraid_remove_device(sdioc, &org_devices[i]); + } + } + } + + ioc_info(sdioc, "scan work add device count = %d\n", count); + + sort(sortdevice, count, sizeof(sortdevice[0]), luntarget_cmp_func, + NULL); + + for (i = 0; i < count; i++) + sssraid_add_device(sdioc, &sortdevice[i]); + +free_all: + kfree(sortdevice); +free_list: + kfree(devices); +} + +static int sssraid_wait_abnl_cmd_done(struct sssraid_iod *iod) +{ + u16 times = 0; + + do { + if (READ_ONCE(iod->state) == SSSRAID_CMDSTAT_TMO_COMPLETE) + break; + msleep(500); + times++; + } while (times <= SSSRAID_WAIT_ABNL_CMD_TIMEOUT); + + /* wait command completion timeout after abort/reset success */ + if (times >= SSSRAID_WAIT_ABNL_CMD_TIMEOUT) + return -ETIMEDOUT; + + return 0; +} + +static bool sssraid_check_scmd_completed(struct scsi_cmnd *scmd) +{ + struct sssraid_ioc *sdioc = shost_priv(scmd->device->host); + struct sssraid_iod *iod = scsi_cmd_priv(scmd); + struct sssraid_squeue *sqinfo; + + sqinfo = iod->sqinfo; + if (!sqinfo) + return false; + + if (READ_ONCE(iod->state) == SSSRAID_CMDSTAT_COMPLETE || + sssraid_poll_cq(sdioc, sqinfo->qidx, iod->cid)) { + ioc_warn(sdioc, "warn: cid[%d] qidx[%d] has completed\n", + iod->cid, sqinfo->qidx); + return true; + } + return false; +} + +static void sssraid_tgt_rst_pending_io_count(struct request *rq, void *data, + bool reserved) +{ + unsigned int id = *(unsigned int *)data; + struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); + struct sssraid_iod *iod; + struct sssraid_sdev_hostdata *hostdata; + + if (scmd) { + iod = scsi_cmd_priv(scmd); + if ((iod->state == SSSRAID_CMDSTAT_FLIGHT) || + (iod->state == SSSRAID_CMDSTAT_TIMEOUT)) { + if ((scmd->device) && (scmd->device->id == id)) { + hostdata = scmd->device->hostdata; + hostdata->pend_count++; + } + } + } +} + +static int wait_tgt_reset_io_done(struct scsi_cmnd *scmd) +{ + u16 timeout = 0; + struct sssraid_sdev_hostdata *hostdata; + struct sssraid_ioc *sdioc = shost_priv(scmd->device->host); + + hostdata = scmd->device->hostdata; + + do { + hostdata->pend_count = 0; + blk_mq_tagset_busy_iter(&sdioc->shost->tag_set, + sssraid_tgt_rst_pending_io_count, + (void *)(&scmd->device->id)); + if (!hostdata->pend_count) + return 0; + + msleep(500); + timeout++; + } while (timeout <= SSSRAID_WAIT_RST_IO_TIMEOUT); + + return -ETIMEDOUT; +} + +static int sssraid_scsi_reset(struct scsi_cmnd *scmd, + enum sssraid_scsi_rst_type rst) +{ + struct sssraid_ioc *sdioc = shost_priv(scmd->device->host); + struct sssraid_sdev_hostdata *hostdata; + int ret; + + if (sdioc->state != SSSRAID_LIVE) + return SUCCESS; + + hostdata = scmd->device->hostdata; + + ioc_warn(sdioc, "sdev[%d:%d] send %s reset\n", scmd->device->channel, + scmd->device->id, rst ? "bus" : "target"); + ret = sssraid_send_reset_cmd(sdioc, rst, hostdata->hdid); + if ((ret == 0) || + (ret == FW_EH_DEV_NONE && rst == SSSRAID_RESET_TARGET)) { + if (rst == SSSRAID_RESET_TARGET) { + ret = wait_tgt_reset_io_done(scmd); + if (ret) { + ioc_warn( + sdioc, + "sdev[%d:%d] target has %d pending comands;" + "target reset failed\n", + scmd->device->channel, scmd->device->id, + hostdata->pend_count); + return FAILED; + } + } + + ioc_warn(sdioc, "sdev[%d:%d] %s reset success\n", + scmd->device->channel, scmd->device->id, + rst ? "bus" : "target"); + return SUCCESS; + } + + ioc_warn(sdioc, "sdev[%d:%d] %s reset failed\n", scmd->device->channel, + scmd->device->id, rst ? "bus" : "target"); + + return FAILED; +} + +bool sssraid_change_host_state(struct sssraid_ioc *sdioc, + enum sssraid_state newstate) +{ + unsigned long flags; + enum sssraid_state oldstate; + bool change = false; + + spin_lock_irqsave(&sdioc->state_lock, flags); + + oldstate = sdioc->state; + switch (newstate) { + case SSSRAID_LIVE: + switch (oldstate) { + case SSSRAID_NEW: + case SSSRAID_RESETTING: + change = true; + break; + default: + break; + } + break; + case SSSRAID_RESETTING: + switch (oldstate) { + case SSSRAID_LIVE: + change = true; + break; + default: + break; + } + break; + case SSSRAID_DELETING: + if (oldstate != SSSRAID_DELETING) + change = true; + break; + case SSSRAID_DEAD: + switch (oldstate) { + case SSSRAID_NEW: + case SSSRAID_LIVE: + case SSSRAID_RESETTING: + change = true; + break; + default: + break; + } + break; + default: + break; + } + + if (change) + sdioc->state = newstate; + spin_unlock_irqrestore(&sdioc->state_lock, flags); + + ioc_info(sdioc, "[%d]->[%d], change[%d]\n", oldstate, newstate, change); + + return change; +} + +static int sssraid_get_qd_by_disk(u8 attr) +{ + switch (SSSRAID_DISK_TYPE(attr)) { + case SSSRAID_SAS_HDD_VD: + case SSSRAID_SATA_HDD_VD: + return SSSRAID_HDD_VD_QD; + case SSSRAID_SAS_SSD_VD: + case SSSRAID_SATA_SSD_VD: + case SSSRAID_NVME_SSD_VD: + return SSSRAID_SSD_VD_QD; + case SSSRAID_SAS_HDD_PD: + case SSSRAID_SATA_HDD_PD: + return SSSRAID_HDD_PD_QD; + case SSSRAID_SAS_SSD_PD: + case SSSRAID_SATA_SSD_PD: + case SSSRAID_NVME_SSD_PD: + return SSSRAID_SSD_PD_QD; + default: + return MAX_CMD_PER_DEV; + } +} + +static int sssraid_match_dev(struct sssraid_ioc *sdioc, u16 idx, + struct scsi_device *sdev) +{ + if (SSSRAID_DISK_INFO_FLAG_VALID(sdioc->devices[idx].flag)) { + if (sdev->channel == sdioc->devices[idx].channel && + sdev->id == le16_to_cpu(sdioc->devices[idx].target) && + sdev->lun < sdioc->devices[idx].lun) { + ioc_info( + sdioc, + "Match device success, channel:target:lun[%d:%d:%d]\n", + sdioc->devices[idx].channel, + sdioc->devices[idx].target, + sdioc->devices[idx].lun); + return 1; + } + } + + return 0; +} + +static int sssraid_bsg_map_data(struct sssraid_ioc *sdioc, struct bsg_job *job, + struct sssraid_admin_command *cmd) +{ + struct request *rq = blk_mq_rq_from_pdu(job); + struct sssraid_iod *iod = job->dd_data; + enum dma_data_direction dma_dir = + rq_data_dir(rq) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; + int ret = 0; + + iod->sg = job->request_payload.sg_list; + iod->nsge = job->request_payload.sg_cnt; + iod->length = job->request_payload.payload_len; + iod->use_sgl = false; + iod->npages = -1; + + if (!iod->nsge) + goto out; + + ret = dma_map_sg_attrs(&sdioc->pdev->dev, iod->sg, iod->nsge, dma_dir, + DMA_ATTR_NO_WARN); + if (!ret) + goto out; + + ret = sssraid_setup_prps(sdioc, iod); + if (ret) + goto unmap; + + cmd->common.dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); + cmd->common.dptr.prp2 = cpu_to_le64(iod->first_dma); + + return 0; + +unmap: + dma_unmap_sg(&sdioc->pdev->dev, iod->sg, iod->nsge, dma_dir); +out: + return ret; +} + +static void sssraid_bsg_unmap_data(struct sssraid_ioc *sdioc, + struct bsg_job *job) +{ + struct request *rq = blk_mq_rq_from_pdu(job); + struct sssraid_iod *iod = job->dd_data; + enum dma_data_direction dma_dir = + rq_data_dir(rq) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; + + if (iod->nsge) + dma_unmap_sg(&sdioc->pdev->dev, iod->sg, iod->nsge, dma_dir); + + sssraid_free_iod_res(sdioc, iod); +} + +void sssraid_put_cmd(struct sssraid_ioc *sdioc, struct sssraid_cmd *cmd, + enum sssraid_cmd_type type) +{ + unsigned long flags; + struct list_head *head = &sdioc->adm_cmd_list; + spinlock_t *slock = &sdioc->adm_cmd_lock; + + if (type == SSSRAID_CMD_IOPT) { + head = &sdioc->ioq_pt_list; + slock = &sdioc->ioq_pt_lock; + } + + spin_lock_irqsave(slock, flags); + WRITE_ONCE(cmd->state, SSSRAID_CMDSTAT_IDLE); + list_add_tail(&cmd->list, head); + spin_unlock_irqrestore(slock, flags); +} + +static int sssraid_user_admin_cmd(struct sssraid_ioc *sdioc, + struct bsg_job *job) +{ + struct sssraid_bsg_request *bsg_req = job->request; + struct sssraid_passthru_common_cmd *cmd = &(bsg_req->admcmd); + struct sssraid_admin_command admin_cmd; + u32 timeout = msecs_to_jiffies(cmd->timeout_ms); + u32 result[2] = { 0 }; + int status; + + if (sdioc->state >= SSSRAID_RESETTING) { + ioc_err(sdioc, "err: tool adm host state:[%d] is not right\n", + sdioc->state); + return -EBUSY; + } + + memset(&admin_cmd, 0, sizeof(admin_cmd)); + admin_cmd.common.opcode = cmd->opcode; + admin_cmd.common.flags = cmd->flags; + admin_cmd.common.hdid = cpu_to_le32(cmd->nsid); + admin_cmd.common.cdw2[0] = cpu_to_le32(cmd->cdw2); + admin_cmd.common.cdw2[1] = cpu_to_le32(cmd->cdw3); + admin_cmd.common.cdw10 = cpu_to_le32(cmd->cdw10); + admin_cmd.common.cdw11 = cpu_to_le32(cmd->cdw11); + admin_cmd.common.cdw12 = cpu_to_le32(cmd->cdw12); + admin_cmd.common.cdw13 = cpu_to_le32(cmd->cdw13); + admin_cmd.common.cdw14 = cpu_to_le32(cmd->cdw14); + admin_cmd.common.cdw15 = cpu_to_le32(cmd->cdw15); + + status = sssraid_bsg_map_data(sdioc, job, &admin_cmd); + if (status) { + ioc_err(sdioc, "err: bsg map data failed\n"); + return status; + } + + status = sssraid_submit_admin_sync_cmd(sdioc, &admin_cmd, &result[0], + &result[1], timeout); + if (status >= 0) { + job->reply_len = sizeof(result); + memcpy(job->reply, result, sizeof(result)); + } + + if (status) + ioc_info( + sdioc, + "tool adm opcode[0x%x] subopcode[0x%x], status[0x%x] result0[0x%x] result1[0x%x]\n", + cmd->opcode, cmd->info_0.subopcode, status, result[0], + result[1]); + + sssraid_bsg_unmap_data(sdioc, job); + + return status; +} + +static int sssraid_submit_ioq_sync_cmd(struct sssraid_ioc *sdioc, + struct sssraid_ioq_command *cmd, + u32 *result, u32 *reslen, u32 timeout) +{ + int ret; + dma_addr_t sense_dma; + struct sssraid_squeue *sqinfo; + void *sense_addr = NULL; + struct sssraid_cmd *pt_cmd = sssraid_get_cmd(sdioc, SSSRAID_CMD_IOPT); + + if (!pt_cmd) { + ioc_err(sdioc, "err: sync ioq get sqinfo cmd failed\n"); + return -EFAULT; + } + + timeout = timeout ? timeout : ADMIN_TIMEOUT; + + init_completion(&pt_cmd->cmd_done); + + sqinfo = &sdioc->sqinfo[pt_cmd->qid]; + ret = pt_cmd->cid * SCSI_SENSE_BUFFERSIZE; + + if (work_mode) { + ret = ((pt_cmd->qid - 1) * SSSRAID_PTCMDS_PERQ + pt_cmd->cid) * + SCSI_SENSE_BUFFERSIZE; + sense_addr = sdioc->senses + ret; + sense_dma = sdioc->sense_dma_addr + ret; + } else { + ret = pt_cmd->cid * SCSI_SENSE_BUFFERSIZE; + sense_addr = sqinfo->sense + ret; + sense_dma = sqinfo->sense_dma_addr + ret; + } + + cmd->common.sense_addr = cpu_to_le64(sense_dma); + cmd->common.sense_len = cpu_to_le16(SCSI_SENSE_BUFFERSIZE); + cmd->common.command_id = cpu_to_le16(pt_cmd->cid); + + sssraid_submit_cmd(sqinfo, cmd); + + if (!wait_for_completion_timeout(&pt_cmd->cmd_done, timeout)) { + ioc_err(sdioc, + "err: sync ioq cid[%d] qid[%d] timeout, opcode[0x%x] subopcode[0x%x]\n", + pt_cmd->cid, pt_cmd->qid, cmd->common.opcode, + (le32_to_cpu(cmd->common.cdw3[0]) & 0xffff)); + + /* reset controller if admin timeout */ + sssraid_adm_timeout(sdioc, pt_cmd); + + sssraid_put_cmd(sdioc, pt_cmd, SSSRAID_CMD_IOPT); + return -ETIME; + } + + if (result && reslen) { + if ((pt_cmd->status & 0x17f) == 0x101) { + memcpy(result, sense_addr, SCSI_SENSE_BUFFERSIZE); + *reslen = SCSI_SENSE_BUFFERSIZE; + } + } + + sssraid_put_cmd(sdioc, pt_cmd, SSSRAID_CMD_IOPT); + + return pt_cmd->status; +} + +static int sssraid_user_ioq_cmd(struct sssraid_ioc *sdioc, struct bsg_job *job) +{ + struct sssraid_bsg_request *bsg_req = + (struct sssraid_bsg_request *)(job->request); + struct sssraid_ioq_passthru_cmd *cmd = &(bsg_req->ioqcmd); + struct sssraid_ioq_command ioq_cmd; + int status = 0; + u32 timeout = msecs_to_jiffies(cmd->timeout_ms); + + if (cmd->data_len > IOQ_PT_DATA_LEN) { + ioc_err(sdioc, "err: tool ioq data len bigger than 4k\n"); + return -EFAULT; + } + + if (sdioc->state != SSSRAID_LIVE) { + ioc_err(sdioc, "err: tool ioq host state:[%d] is not live\n", + sdioc->state); + return -EBUSY; + } + + ioc_info(sdioc, + "tool ioq opcode[0x%x] subopcode[0x%x] init, datalen[%d]\n", + cmd->opcode, cmd->info_1.subopcode, cmd->data_len); + + memset(&ioq_cmd, 0, sizeof(ioq_cmd)); + ioq_cmd.common.opcode = cmd->opcode; + ioq_cmd.common.flags = cmd->flags; + ioq_cmd.common.hdid = cpu_to_le32(cmd->nsid); + ioq_cmd.common.sense_len = cpu_to_le16(cmd->info_0.res_sense_len); + ioq_cmd.common.cdb_len = cmd->info_0.cdb_len; + ioq_cmd.common.rsvd2 = cmd->info_0.rsvd0; + ioq_cmd.common.cdw3[0] = cpu_to_le32(cmd->cdw3); + ioq_cmd.common.cdw3[1] = cpu_to_le32(cmd->cdw4); + ioq_cmd.common.cdw3[2] = cpu_to_le32(cmd->cdw5); + + ioq_cmd.common.cdw10[0] = cpu_to_le32(cmd->cdw10); + ioq_cmd.common.cdw10[1] = cpu_to_le32(cmd->cdw11); + ioq_cmd.common.cdw10[2] = cpu_to_le32(cmd->cdw12); + ioq_cmd.common.cdw10[3] = cpu_to_le32(cmd->cdw13); + ioq_cmd.common.cdw10[4] = cpu_to_le32(cmd->cdw14); + ioq_cmd.common.cdw10[5] = cpu_to_le32(cmd->data_len); + + memcpy(ioq_cmd.common.cdb, &cmd->cdw16, cmd->info_0.cdb_len); + + ioq_cmd.common.cdw26[0] = cpu_to_le32(cmd->cdw26[0]); + ioq_cmd.common.cdw26[1] = cpu_to_le32(cmd->cdw26[1]); + ioq_cmd.common.cdw26[2] = cpu_to_le32(cmd->cdw26[2]); + ioq_cmd.common.cdw26[3] = cpu_to_le32(cmd->cdw26[3]); + + status = sssraid_bsg_map_data(sdioc, job, + (struct sssraid_admin_command *)&ioq_cmd); + if (status) { + ioc_err(sdioc, "err: map bsg data failed\n"); + return status; + } + + status = sssraid_submit_ioq_sync_cmd(sdioc, &ioq_cmd, job->reply, + &job->reply_len, timeout); + if (status) + ioc_info( + sdioc, + "tool ioq opcode[0x%x] subopcode[0x%x], status[0x%x], reply_len[%d]\n", + cmd->opcode, cmd->info_1.subopcode, status, + job->reply_len); + + sssraid_bsg_unmap_data(sdioc, job); + + return status; +} + +/* bsg dispatch user command */ +static int sssraid_bsg_host_dispatch(struct bsg_job *job) +{ + struct Scsi_Host *shost = dev_to_shost(job->dev); + struct sssraid_ioc *sdioc = shost_priv(shost); + struct request *rq = blk_mq_rq_from_pdu(job); + struct sssraid_bsg_request *bsg_req = job->request; + int ret = -ENOMSG; + + job->reply_len = 0; + + if (bsg_req == NULL || + job->request_len != sizeof(struct sssraid_bsg_request)) { + bsg_job_done(job, ret, 0); + return 0; + } + dbgprint(sdioc, + "bsg msgcode[%d] msglen[%d] timeout[%d];" + "reqnsge[%d], reqlen[%d]\n", + bsg_req->msgcode, job->request_len, rq->timeout, + job->request_payload.sg_cnt, job->request_payload.payload_len); + + switch (bsg_req->msgcode) { + case SSSRAID_BSG_ADM: + ret = sssraid_user_admin_cmd(sdioc, job); + break; + case SSSRAID_BSG_IOQ: + ret = sssraid_user_ioq_cmd(sdioc, job); + break; + default: + ioc_info(sdioc, "bsg unsupport msgcode[%d]\n", + bsg_req->msgcode); + break; + } + + if (ret > 0) + ret = ret | (ret << 8); + + bsg_job_done(job, ret, 0); + return 0; +} + +static void sssraid_drain_pending_ios(struct sssraid_ioc *sdioc); +void sssraid_back_all_io(struct sssraid_ioc *sdioc) +{ + int i, j; + + scsi_block_requests(sdioc->shost); + + sssraid_drain_pending_ios(sdioc); + + scsi_unblock_requests(sdioc->shost); + + j = SSSRAID_AMDQ_BLK_MQ_DEPTH; + for (i = 0; i < j; i++) { + if (READ_ONCE(sdioc->adm_cmds[i].state) == + SSSRAID_CMDSTAT_FLIGHT) { + ioc_info(sdioc, "backup adm, cid[%d]\n", i); + sdioc->adm_cmds[i].status = 0xFFFF; + WRITE_ONCE(sdioc->adm_cmds[i].state, + SSSRAID_CMDSTAT_COMPLETE); + complete(&(sdioc->adm_cmds[i].cmd_done)); + } + } + + j = SSSRAID_NR_IOQ_PTCMDS; + for (i = 0; i < j; i++) { + if (READ_ONCE(sdioc->ioq_ptcmds[i].state) == + SSSRAID_CMDSTAT_FLIGHT) { + sdioc->ioq_ptcmds[i].status = 0xFFFF; + WRITE_ONCE(sdioc->ioq_ptcmds[i].state, + SSSRAID_CMDSTAT_COMPLETE); + complete(&(sdioc->ioq_ptcmds[i].cmd_done)); + } + } +} + +/* + * static struct scsi_host_template sssraid_driver_template + */ +static int sssraid_scan_finished(struct Scsi_Host *shost, unsigned long time) +{ + struct sssraid_ioc *sdioc = shost_priv(shost); + + sssraid_scan_disk(sdioc); + + return 1; +} + +/* eh_target_reset_handler call back */ +static int sssraid_eh_target_reset(struct scsi_cmnd *scmd) +{ + return sssraid_scsi_reset(scmd, SSSRAID_RESET_TARGET); +} + +/* eh_bus_reset_handler call back */ +static int sssraid_bus_reset_handler(struct scsi_cmnd *scmd) +{ + return sssraid_scsi_reset(scmd, SSSRAID_RESET_BUS); +} + +/* eh_host_reset_handler call back */ +static int sssraid_eh_host_reset(struct scsi_cmnd *scmd) +{ + struct sssraid_ioc *sdioc = shost_priv(scmd->device->host); + + if (sdioc->state != SSSRAID_LIVE) + return SUCCESS; + + ioc_warn(sdioc, "sdev[%d:%d] send host reset\n", scmd->device->channel, + scmd->device->id); + + /* It's useless: + * old code sssraid_reset_work_sync + * queue_work(reset_work) at first, + * then flush_work to synchronize. + */ + if (!sssraid_change_host_state(sdioc, SSSRAID_RESETTING)) { + ioc_info(sdioc, "can't change to reset state\n"); + return FAILED; + } + if (sssraid_soft_reset_handler(sdioc)) { + ioc_warn(sdioc, "warn: sdev[%d:%d] host reset failed\n", + scmd->device->channel, scmd->device->id); + return FAILED; + } + + ioc_warn(sdioc, "sdev[%d:%d] host reset success\n", + scmd->device->channel, scmd->device->id); + + return SUCCESS; +} + +/* host_reset call back */ +static int sssraid_sysfs_host_reset(struct Scsi_Host *shost, int reset_type) +{ + int ret; + struct sssraid_ioc *sdioc = shost_priv(shost); + + ioc_info(sdioc, "start sysfs host reset cmd\n"); + if (!sssraid_change_host_state(sdioc, SSSRAID_RESETTING)) { + ioc_info(sdioc, "can't change to reset state\n"); + return -EBUSY; + } + ret = sssraid_soft_reset_handler(sdioc); + ioc_info(sdioc, "stop sysfs host reset cmd[%d]\n", ret); + + return ret; +} + +static int sssraid_map_queues(struct Scsi_Host *shost) +{ + struct sssraid_ioc *sdioc = shost_priv(shost); + struct pci_dev *pdev = sdioc->pdev; + struct msi_desc *entry = NULL; + struct blk_mq_tag_set *tag_set = &shost->tag_set; + unsigned int *map = tag_set->mq_map; + const struct cpumask *node_mask = NULL; + unsigned int nr_queues = tag_set->nr_hw_queues; + unsigned int node_id, node_id_last = 0xFFFFFFFF; + int cpu, queue = 0; + u8 node_count = 0, i; + unsigned int node_id_array[100]; + + for_each_pci_msi_entry(entry, pdev) { + struct list_head *msi_list = &pdev->dev.msi_list; + + if (list_is_last(msi_list, &entry->list)) + goto get_next_numa_node; + + if (entry->irq) { + node_mask = entry->affinity; + + cpu = cpumask_first(node_mask); + node_id = cpu_to_node(cpu); + if (node_id_last == node_id) + continue; + + for (i = 0; i < node_count; i++) { + if (node_id == node_id_array[i]) + goto get_next_numa_node; + } + node_id_array[node_count++] = node_id; + node_id_last = node_id; + } +get_next_numa_node: + continue; + } + + for (i = 0; i < node_count; i++) { + node_mask = cpumask_of_node(node_id_array[i]); + dbgprint(sdioc, "NUMA_node = %d\n", node_id_array[i]); + for_each_cpu(cpu, node_mask) { + map[cpu] = (queue < nr_queues) ? queue++ : 0; + dbgprint(sdioc, "map[%d] = %d\n", cpu, map[cpu]); + } + } + + return 0; +} + +/* queuecommand call back */ +static int sssraid_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) +{ + struct sssraid_iod *iod = scsi_cmd_priv(scmd); + struct sssraid_ioc *sdioc = shost_priv(shost); + struct scsi_device *sdev = scmd->device; + struct sssraid_sdev_hostdata *hostdata; + u16 hwq, cid; + struct sssraid_squeue *sq; + struct sssraid_ioq_command ioq_cmd; + int retval; + + if (unlikely(sdioc->state == SSSRAID_RESETTING)) + return SCSI_MLQUEUE_HOST_BUSY; + + if (unlikely(sdioc->state != SSSRAID_LIVE)) { + set_host_byte(scmd, DID_NO_CONNECT); + scmd->scsi_done(scmd); + return 0; + } + + if (unlikely(sdioc->logging_level & SSSRAID_DEBUG)) + scsi_print_command(scmd); + + hostdata = sdev->hostdata; + sssraid_get_tag_from_scmd(scmd, &hwq, sdioc, &cid, hostdata); + sq = &sdioc->sqinfo[hwq]; + + if (unlikely(atomic_inc_return(&sq->inflight) > + (sdioc->ioq_depth - SSSRAID_PTCMDS_PERQ))) { + atomic_dec(&sq->inflight); + return SCSI_MLQUEUE_HOST_BUSY; + } + + memset(&ioq_cmd, 0, sizeof(ioq_cmd)); + ioq_cmd.rw.hdid = cpu_to_le32(hostdata->hdid); + ioq_cmd.rw.command_id = cpu_to_le16(cid); + + retval = sssraid_setup_ioq_cmd(sdioc, &ioq_cmd, scmd); + if (unlikely(retval)) { + set_host_byte(scmd, DID_ERROR); + atomic_dec(&sq->inflight); + scmd->scsi_done(scmd); + return 0; + } + + retval = cid * SCSI_SENSE_BUFFERSIZE; + if (work_mode) { + iod->sense = sdioc->senses + retval; + iod->sense_dma = sdioc->sense_dma_addr + retval; + } else { + iod->sense = sq->sense + retval; + iod->sense_dma = sq->sense_dma_addr + retval; + } + + ioq_cmd.common.sense_addr = cpu_to_le64(iod->sense_dma); + ioq_cmd.common.sense_len = cpu_to_le16(SCSI_SENSE_BUFFERSIZE); + + sssraid_init_iod(iod); + + iod->sqinfo = sq; + iod->cid = cid; + retval = sssraid_io_map_data(sdioc, iod, scmd, &ioq_cmd); + if (unlikely(retval)) { + ioc_err(sdioc, "err: io map data fail.\n"); + set_host_byte(scmd, DID_ERROR); + scmd->scsi_done(scmd); + retval = 0; + goto deinit_iod; + } + + WRITE_ONCE(iod->state, SSSRAID_CMDSTAT_FLIGHT); + sssraid_submit_cmd(sq, &ioq_cmd); + + return 0; + +deinit_iod: + atomic_dec(&sq->inflight); + sssraid_free_iod_res(sdioc, iod); + return retval; +} + +/* change_queue_depth call back: + * keep as old + */ + +/* slave_configure call back */ +static int sssraid_slave_configure(struct scsi_device *sdev) +{ + int qd = MAX_CMD_PER_DEV; + unsigned int timeout = scmd_tmout_rawdisk * HZ; + struct sssraid_ioc *sdioc = shost_priv(sdev->host); + struct sssraid_sdev_hostdata *hostdata = sdev->hostdata; + u32 max_sec = sdev->host->max_sectors; + + if (hostdata) { + if (SSSRAID_DISK_INFO_ATTR_VD(hostdata->attr)) + timeout = scmd_tmout_vd * HZ; + else if (SSSRAID_DISK_INFO_ATTR_RAW(hostdata->attr)) + timeout = scmd_tmout_rawdisk * HZ; + max_sec = hostdata->max_io_kb << 1; + qd = sssraid_get_qd_by_disk(hostdata->attr); + + if (sssraid_disk_is_hdd(hostdata->attr)) + hostdata->hwq = + hostdata->hdid % + (sdioc->init_done_queue_cnt - 1) + + 1; + else + hostdata->hwq = 0; + } else { + ioc_err(sdioc, "err: scsi dev hostdata is null\n"); + } + + blk_queue_rq_timeout(sdev->request_queue, timeout); + sdev->eh_timeout = timeout; + scsi_change_queue_depth(sdev, qd); + + if ((max_sec == 0) || (max_sec > sdev->host->max_sectors)) + max_sec = sdev->host->max_sectors; + + blk_queue_max_hw_sectors(sdev->request_queue, max_sec); + + ioc_info( + sdioc, + "scsi dev channel:id:lun[%d:%d:%lld], scmd_timeout[%d]s, maxsec[%d]\n", + sdev->channel, sdev->id, sdev->lun, timeout / HZ, max_sec); + + return 0; +} + +static void sssraid_clean_pending_io(struct request *rq, void *data, + bool reserved) +{ + struct sssraid_ioc *sdioc = data; + struct scsi_cmnd *scmd; + struct sssraid_iod *iod; + + if (unlikely(!rq || !blk_mq_request_started(rq))) + return; + + scmd = blk_mq_rq_to_pdu(rq); + iod = scsi_cmd_priv(scmd); + + if ((cmpxchg(&iod->state, SSSRAID_CMDSTAT_FLIGHT, + SSSRAID_CMDSTAT_COMPLETE) != SSSRAID_CMDSTAT_FLIGHT) && + (cmpxchg(&iod->state, SSSRAID_CMDSTAT_TIMEOUT, + SSSRAID_CMDSTAT_COMPLETE) != SSSRAID_CMDSTAT_TIMEOUT)) + return; + + set_host_byte(scmd, DID_NO_CONNECT); + if (iod->nsge) + scsi_dma_unmap(scmd); + sssraid_free_iod_res(sdioc, iod); + dev_warn_ratelimited(&sdioc->pdev->dev, + "back unfinished CQE, cid[%d] qid[%d]\n", iod->cid, + iod->sqinfo->qidx); + scmd->scsi_done(scmd); +} + +static void sssraid_drain_pending_ios(struct sssraid_ioc *sdioc) +{ + blk_mq_tagset_busy_iter(&sdioc->shost->tag_set, + sssraid_clean_pending_io, (void *)(sdioc)); +} + +/* slave_alloc call back */ +static int sssraid_slave_alloc(struct scsi_device *sdev) +{ + struct sssraid_sdev_hostdata *hostdata; + struct sssraid_ioc *sdioc; + u16 idx; + + sdioc = shost_priv(sdev->host); + hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL); + if (!hostdata) { + ioc_err(sdioc, "err: alloc scsi host data failed\n"); + return -ENOMEM; + } + + down_read(&sdioc->devices_rwsem); + for (idx = 0; idx < le32_to_cpu(sdioc->ctrl_info->nd); idx++) { + if (sssraid_match_dev(sdioc, idx, sdev)) + goto scan_host; + } + up_read(&sdioc->devices_rwsem); + + kfree(hostdata); + return -ENXIO; + +scan_host: + hostdata->hdid = le32_to_cpu(sdioc->devices[idx].hdid); + hostdata->max_io_kb = le16_to_cpu(sdioc->devices[idx].max_io_kb); + hostdata->attr = sdioc->devices[idx].attr; + hostdata->flag = sdioc->devices[idx].flag; + hostdata->rg_id = 0xff; + sdev->hostdata = hostdata; + up_read(&sdioc->devices_rwsem); + return 0; +} + +/* slave_destroy call back */ +static void sssraid_slave_destroy(struct scsi_device *sdev) +{ + kfree(sdev->hostdata); + sdev->hostdata = NULL; +} + +/* eh_timed_out call back */ +static enum blk_eh_timer_return sssraid_scmd_timeout(struct scsi_cmnd *scmd) +{ + struct sssraid_iod *iod = scsi_cmd_priv(scmd); + unsigned int timeout = scmd->device->request_queue->rq_timeout; + + if (sssraid_check_scmd_completed(scmd)) + goto out; + + if (time_after(jiffies, scmd->jiffies_at_alloc + timeout)) { + if (cmpxchg(&iod->state, SSSRAID_CMDSTAT_FLIGHT, + SSSRAID_CMDSTAT_TIMEOUT) == + SSSRAID_CMDSTAT_FLIGHT) { + return BLK_EH_DONE; + } + } +out: + return BLK_EH_RESET_TIMER; +} + +/* eh_abort_handler call back */ +static int sssraid_abort_handler(struct scsi_cmnd *scmd) +{ + struct sssraid_ioc *sdioc = shost_priv(scmd->device->host); + struct sssraid_iod *iod = scsi_cmd_priv(scmd); + struct sssraid_sdev_hostdata *hostdata; + u16 hwq, cid; + int ret; + + scsi_print_command(scmd); + + if (sdioc->state != SSSRAID_LIVE || !sssraid_wait_abnl_cmd_done(iod) || + sssraid_check_scmd_completed(scmd)) + return SUCCESS; + + hostdata = scmd->device->hostdata; + cid = iod->cid; + hwq = iod->sqinfo->qidx; + + ioc_warn(sdioc, "warn: cid[%d] qidx[%d] timeout, aborting\n", cid, hwq); + ret = sssraid_send_abort_cmd(sdioc, hostdata->hdid, hwq, cid); + if (ret != -ETIME) { + ret = sssraid_wait_abnl_cmd_done(iod); + if (ret) { + ioc_warn(sdioc, "warn: cid[%d] qidx[%d] abort failed\n", + cid, hwq); + return FAILED; + } + ioc_warn(sdioc, "cid[%d] qidx[%d] abort success\n", cid, hwq); + return SUCCESS; + } + ioc_warn(sdioc, "warn: cid[%d] qidx[%d] abort failed, timeout\n", cid, + hwq); + return FAILED; +} + +static ssize_t csts_pp_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sssraid_ioc *sdioc = shost_priv(shost); + int ret = -1; + + if (pci_device_is_present(sdioc->pdev)) { + ret = (readl(sdioc->bar + SSSRAID_REG_CSTS) & + SSSRAID_CSTS_PP_MASK); + ret >>= SSSRAID_CSTS_PP_SHIFT; + } + + return snprintf(buf, PAGE_SIZE, "%d\n", ret); +} + +static ssize_t csts_shst_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sssraid_ioc *sdioc = shost_priv(shost); + int ret = -1; + + if (pci_device_is_present(sdioc->pdev)) { + ret = (readl(sdioc->bar + SSSRAID_REG_CSTS) & + SSSRAID_CSTS_SHST_MASK); + ret >>= SSSRAID_CSTS_SHST_SHIFT; + } + + return snprintf(buf, PAGE_SIZE, "%d\n", ret); +} + +static ssize_t csts_cfs_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sssraid_ioc *sdioc = shost_priv(shost); + int ret = -1; + + if (pci_device_is_present(sdioc->pdev)) { + ret = (readl(sdioc->bar + SSSRAID_REG_CSTS) & + SSSRAID_CSTS_CFS_MASK); + ret >>= SSSRAID_CSTS_CFS_SHIFT; + } + + return snprintf(buf, PAGE_SIZE, "%d\n", ret); +} + +static ssize_t csts_rdy_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sssraid_ioc *sdioc = shost_priv(shost); + int ret = -1; + + if (pci_device_is_present(sdioc->pdev)) + ret = (readl(sdioc->bar + SSSRAID_REG_CSTS) & SSSRAID_CSTS_RDY); + + return snprintf(buf, PAGE_SIZE, "%d\n", ret); +} + +static ssize_t fw_version_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sssraid_ioc *sdioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%s\n", sdioc->ctrl_info->fr); +} + +static ssize_t hdd_dispatch_store(struct device *cdev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int val = 0; + struct Scsi_Host *shost = class_to_shost(cdev); + struct sssraid_ioc *sdioc = shost_priv(shost); + + if (kstrtoint(buf, 0, &val) != 0) + return -EINVAL; + if (val < DISPATCH_BY_CPU || val > DISPATCH_BY_DISK) + return -EINVAL; + sdioc->hdd_dispatch = val; + + return strlen(buf); +} + +static ssize_t hdd_dispatch_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sssraid_ioc *sdioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%d\n", sdioc->hdd_dispatch); +} + +static ssize_t can_queue_count_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct blk_mq_tag_set *tag_set = &shost->tag_set; + + return snprintf(buf, 20, "%d\n", tag_set->nr_hw_queues + 1); +} + +static DEVICE_ATTR_RO(csts_pp); +static DEVICE_ATTR_RO(csts_shst); +static DEVICE_ATTR_RO(csts_cfs); +static DEVICE_ATTR_RO(csts_rdy); +static DEVICE_ATTR_RO(fw_version); +static DEVICE_ATTR_RW(hdd_dispatch); +static DEVICE_ATTR_RO(can_queue_count); + +static struct device_attribute *sssraid_host_attrs[] = { + &dev_attr_csts_pp, &dev_attr_csts_shst, + &dev_attr_csts_cfs, &dev_attr_csts_rdy, + &dev_attr_fw_version, &dev_attr_hdd_dispatch, + &dev_attr_can_queue_count, NULL, +}; + +static int sssraid_get_vd_info(struct sssraid_ioc *sdioc, + struct sssraid_vd_info *vd_info, u16 vid) +{ + struct sssraid_admin_command admin_cmd; + u8 *data_ptr = NULL; + dma_addr_t data_dma = 0; + int ret; + + if (sdioc->state >= SSSRAID_RESETTING) { + ioc_err(sdioc, "err: host state:%d invalid\n", sdioc->state); + return -EBUSY; + } + + data_ptr = dma_alloc_coherent(&sdioc->pdev->dev, PAGE_SIZE, &data_dma, + GFP_KERNEL); + if (!data_ptr) + return -ENOMEM; + + memset(&admin_cmd, 0, sizeof(admin_cmd)); + admin_cmd.usr_cmd.opcode = USR_CMD_READ; + admin_cmd.usr_cmd.info_0.subopcode = cpu_to_le16(USR_CMD_VDINFO); + admin_cmd.usr_cmd.info_1.data_len = cpu_to_le16(USR_CMD_RDLEN); + admin_cmd.usr_cmd.info_1.param_len = cpu_to_le16(VDINFO_PARAM_LEN); + admin_cmd.usr_cmd.cdw10 = cpu_to_le32(vid); + admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma); + + ret = sssraid_submit_admin_sync_cmd(sdioc, &admin_cmd, NULL, NULL, 0); + if (!ret) + memcpy(vd_info, data_ptr, sizeof(struct sssraid_vd_info)); + + dma_free_coherent(&sdioc->pdev->dev, PAGE_SIZE, data_ptr, data_dma); + + return ret; +} + +static int sssraid_get_bgtask(struct sssraid_ioc *sdioc, + struct sssraid_bgtask *bgtask) +{ + struct sssraid_admin_command admin_cmd; + u8 *data_ptr = NULL; + dma_addr_t data_dma = 0; + int ret; + + if (sdioc->state >= SSSRAID_RESETTING) { + ioc_err(sdioc, "err: host state:%d invalid\n", sdioc->state); + return -EBUSY; + } + + data_ptr = dma_alloc_coherent(&sdioc->pdev->dev, PAGE_SIZE, &data_dma, + GFP_KERNEL); + if (!data_ptr) + return -ENOMEM; + + memset(&admin_cmd, 0, sizeof(admin_cmd)); + admin_cmd.usr_cmd.opcode = USR_CMD_READ; + admin_cmd.usr_cmd.info_0.subopcode = cpu_to_le16(USR_CMD_BGTASK); + admin_cmd.usr_cmd.info_1.data_len = cpu_to_le16(USR_CMD_RDLEN); + admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma); + + ret = sssraid_submit_admin_sync_cmd(sdioc, &admin_cmd, NULL, NULL, 0); + if (!ret) + memcpy(bgtask, data_ptr, sizeof(struct sssraid_bgtask)); + + dma_free_coherent(&sdioc->pdev->dev, PAGE_SIZE, data_ptr, data_dma); + + return ret; +} + +static ssize_t raid_level_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev; + struct sssraid_ioc *sdioc; + struct sssraid_vd_info *vd_info; + struct sssraid_sdev_hostdata *hostdata; + int ret; + + sdev = to_scsi_device(dev); + sdioc = shost_priv(sdev->host); + hostdata = sdev->hostdata; + + vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL); + if (!vd_info || !SSSRAID_DISK_INFO_ATTR_VD(hostdata->attr)) + return snprintf(buf, PAGE_SIZE, "NA\n"); + + ret = sssraid_get_vd_info(sdioc, vd_info, sdev->id); + if (ret) + vd_info->rg_level = ARRAY_SIZE(raid_levels) - 1; + + ret = (vd_info->rg_level < ARRAY_SIZE(raid_levels)) ? + vd_info->rg_level : + (ARRAY_SIZE(raid_levels) - 1); + + kfree(vd_info); + + return snprintf(buf, PAGE_SIZE, "RAID-%s\n", raid_levels[ret]); +} + +static ssize_t raid_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev; + struct sssraid_ioc *sdioc; + struct sssraid_vd_info *vd_info; + struct sssraid_sdev_hostdata *hostdata; + int ret; + + sdev = to_scsi_device(dev); + sdioc = shost_priv(sdev->host); + hostdata = sdev->hostdata; + + vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL); + if (!vd_info || !SSSRAID_DISK_INFO_ATTR_VD(hostdata->attr)) + return snprintf(buf, PAGE_SIZE, "NA\n"); + + ret = sssraid_get_vd_info(sdioc, vd_info, sdev->id); + if (ret) { + vd_info->vd_status = 0; + vd_info->rg_id = 0xff; + } + + ret = (vd_info->vd_status < ARRAY_SIZE(raid_states)) ? + vd_info->vd_status : + 0; + + kfree(vd_info); + + return snprintf(buf, PAGE_SIZE, "%s\n", raid_states[ret]); +} + +static ssize_t raid_resync_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev; + struct sssraid_ioc *sdioc; + struct sssraid_vd_info *vd_info; + struct sssraid_bgtask *bgtask; + struct sssraid_sdev_hostdata *hostdata; + u8 rg_id, i, progress = 0; + int ret; + + sdev = to_scsi_device(dev); + sdioc = shost_priv(sdev->host); + hostdata = sdev->hostdata; + + vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL); + if (!vd_info || !SSSRAID_DISK_INFO_ATTR_VD(hostdata->attr)) + return snprintf(buf, PAGE_SIZE, "NA\n"); + + ret = sssraid_get_vd_info(sdioc, vd_info, sdev->id); + if (ret) + goto out; + + rg_id = vd_info->rg_id; + + bgtask = (struct sssraid_bgtask *)vd_info; + ret = sssraid_get_bgtask(sdioc, bgtask); + if (ret) + goto out; + for (i = 0; i < bgtask->task_num; i++) { + if ((bgtask->bgtask[i].type == BGTASK_TYPE_REBUILD) && + (le16_to_cpu(bgtask->bgtask[i].vd_id) == rg_id)) + progress = bgtask->bgtask[i].progress; + } + +out: + kfree(vd_info); + return snprintf(buf, PAGE_SIZE, "%d\n", progress); +} + +static ssize_t dispatch_hwq_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct sssraid_sdev_hostdata *hostdata; + + hostdata = to_scsi_device(dev)->hostdata; + return snprintf(buf, PAGE_SIZE, "%d\n", hostdata->hwq); +} + +static ssize_t dispatch_hwq_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int val; + struct sssraid_ioc *sdioc; + struct scsi_device *sdev; + struct sssraid_sdev_hostdata *hostdata; + + sdev = to_scsi_device(dev); + sdioc = shost_priv(sdev->host); + hostdata = sdev->hostdata; + + if (kstrtoint(buf, 0, &val) != 0) + return -EINVAL; + if (val <= 0 || val >= sdioc->init_done_queue_cnt) + return -EINVAL; + if (!sssraid_disk_is_hdd(hostdata->attr)) + return -EINVAL; + + hostdata->hwq = val; + return strlen(buf); +} + +static DEVICE_ATTR_RO(raid_level); +static DEVICE_ATTR_RO(raid_state); +static DEVICE_ATTR_RO(raid_resync); +static DEVICE_ATTR_RW(dispatch_hwq); + +static struct device_attribute *sssraid_dev_attrs[] = { + &dev_attr_raid_level, + &dev_attr_raid_state, + &dev_attr_raid_resync, + &dev_attr_dispatch_hwq, + NULL, +}; + +static struct scsi_host_template sssraid_driver_template = { + .module = THIS_MODULE, + .name = "3SNIC Logic sssraid driver", + .proc_name = "sssraid", + .queuecommand = sssraid_qcmd, + .map_queues = sssraid_map_queues, + .slave_alloc = sssraid_slave_alloc, + .slave_destroy = sssraid_slave_destroy, + .slave_configure = sssraid_slave_configure, + .scan_finished = sssraid_scan_finished, + .eh_timed_out = sssraid_scmd_timeout, + .eh_abort_handler = sssraid_abort_handler, + .eh_target_reset_handler = sssraid_eh_target_reset, + .eh_bus_reset_handler = sssraid_bus_reset_handler, + .eh_host_reset_handler = sssraid_eh_host_reset, + .change_queue_depth = scsi_change_queue_depth, + .this_id = -1, + .unchecked_isa_dma = 0, + .shost_attrs = sssraid_host_attrs, + .sdev_attrs = sssraid_dev_attrs, + .host_reset = sssraid_sysfs_host_reset, +}; + +static inline void sssraid_remove_bsg(struct sssraid_ioc *sdioc) +{ + if (sdioc->bsg_queue) { + bsg_unregister_queue(sdioc->bsg_queue); + blk_cleanup_queue(sdioc->bsg_queue); + } +} + +/** + * sssraid_probe - PCI probe callback + * @pdev: PCI device instance + * @id: PCI device ID details + * + * controller initialization routine. + * Allocate per adapter instance through shost_priv and + * initialize controller specific data structures, initializae + * the controller hardware, add shost to the SCSI subsystem. + * + * Return: 0 on success, non-zero on failure. + */ + +static int sssraid_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct sssraid_ioc *sdioc; + struct Scsi_Host *shost; + int node; + char bsg_name[BSG_NAME_SIZE]; + int retval = 0; + + node = dev_to_node(&pdev->dev); + if (node == NUMA_NO_NODE) { + node = first_memory_node; + set_dev_node(&pdev->dev, node); + } + + shost = scsi_host_alloc(&sssraid_driver_template, sizeof(*sdioc)); + if (!shost) { + retval = -ENODEV; + dev_err(&pdev->dev, "err: failed to allocate scsi host\n"); + goto shost_failed; + } + + sdioc = shost_priv(shost); + sdioc->numa_node = node; + sdioc->instance = shost->host_no; /* for device instance */ + snprintf(sdioc->name, sizeof(sdioc->name), "%s%d", SSSRAID_DRIVER_NAME, + sdioc->instance); + + init_rwsem(&sdioc->devices_rwsem); + spin_lock_init(&sdioc->state_lock); + + spin_lock_init(&sdioc->fwevt_lock); + spin_lock_init(&sdioc->watchdog_lock); + + INIT_LIST_HEAD(&sdioc->fwevt_list); + + sdioc->logging_level = logging_level; /* according to log_debug_switch*/ + + snprintf(sdioc->fwevt_worker_name, sizeof(sdioc->fwevt_worker_name), + "%s%d_fwevt_wrkr", SSSRAID_DRIVER_NAME, sdioc->instance); + sdioc->fwevt_worker_thread = alloc_ordered_workqueue( + sdioc->fwevt_worker_name, WQ_MEM_RECLAIM); + if (!sdioc->fwevt_worker_thread) { + ioc_err(sdioc, + "err: fail to alloc workqueue for fwevt_work!\n"); + retval = -ENODEV; + goto out_fwevtthread_failed; + } + + sdioc->shost = shost; + sdioc->pdev = pdev; + + if (sssraid_init_ioc(sdioc, 0)) { + ioc_err(sdioc, "err: failure at init sssraid_ioc!\n"); + retval = -ENODEV; + goto out_iocinit_failed; + } + + sssraid_shost_init(sdioc); + + retval = scsi_add_host(shost, &pdev->dev); + if (retval) { + ioc_err(sdioc, "err: add shost to system failed!\n"); + goto addhost_failed; + } + + snprintf(bsg_name, sizeof(bsg_name), "%s%d", SSSRAID_DRIVER_NAME, + shost->host_no); + sdioc->bsg_queue = bsg_setup_queue(&shost->shost_gendev, bsg_name, + sssraid_bsg_host_dispatch, + sssraid_cmd_size(sdioc)); + if (IS_ERR(sdioc->bsg_queue)) { + ioc_err(sdioc, "err: setup bsg failed!\n"); + sdioc->bsg_queue = NULL; + goto bsg_setup_failed; + } + + if (!sssraid_change_host_state(sdioc, SSSRAID_LIVE)) { + retval = -ENODEV; + ioc_err(sdioc, "err: change host state failed!\n"); + goto sssraid_state_change_failed; + } + + scsi_scan_host(shost); + return retval; + +sssraid_state_change_failed: + sssraid_remove_bsg(sdioc); +bsg_setup_failed: + scsi_remove_host(shost); +addhost_failed: + sssraid_cleanup_ioc(sdioc, 0); +out_iocinit_failed: + destroy_workqueue(sdioc->fwevt_worker_thread); +out_fwevtthread_failed: + scsi_host_put(shost); +shost_failed: + return retval; +} + +static void sssraid_remove(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct sssraid_ioc *sdioc = NULL; + + if (!shost) { + dev_err(&pdev->dev, + "driver probe process failed, remove not be allowed.\n"); + return; + } + sdioc = shost_priv(shost); + + ioc_info(sdioc, "sssraid remove entry\n"); + sssraid_change_host_state(sdioc, SSSRAID_DELETING); + + if (!pci_device_is_present(pdev)) + sssraid_back_all_io(sdioc); + + sssraid_cleanup_fwevt_list(sdioc); + destroy_workqueue(sdioc->fwevt_worker_thread); + + sssraid_remove_bsg(sdioc); + scsi_remove_host(shost); + sssraid_cleanup_ioc(sdioc, 0); + + scsi_host_put(shost); +} + +static void sssraid_shutdown(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct sssraid_ioc *sdioc; + + if (!shost) + return; + + sdioc = shost_priv(shost); + + sssraid_cleanup_fwevt_list(sdioc); + destroy_workqueue(sdioc->fwevt_worker_thread); + sssraid_cleanup_ioc(sdioc, 0); +} + +#ifdef CONFIG_PM +static int sssraid_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct sssraid_ioc *sdioc; + pci_power_t device_state; + + if (!shost) + return 0; + + sdioc = shost_priv(shost); + + while (sdioc->state == SSSRAID_RESETTING) + ssleep(1); + sssraid_cleanup_fwevt_list(sdioc); + scsi_block_requests(shost); + sssraid_cleanup_ioc(sdioc, 1); + + device_state = pci_choose_state(pdev, state); + pci_save_state(pdev); + pci_set_power_state(pdev, device_state); + + return 0; +} + +static int sssraid_resume(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct sssraid_ioc *sdioc; + pci_power_t device_state = pdev->current_state; + + if (!shost) + return 0; + + sdioc = shost_priv(shost); + + ioc_info(sdioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n", + pdev, pci_name(pdev), device_state); + pci_set_power_state(pdev, PCI_D0); + pci_enable_wake(pdev, PCI_D0, 0); + pci_restore_state(pdev); + sdioc->pdev = pdev; + sdioc->cpu_count = num_online_cpus(); + + /* sssraid_setup_resources in sssraid_init_ioc */ + sssraid_init_ioc(sdioc, 1); + scsi_unblock_requests(shost); + + return 0; +} +#endif + +static pci_ers_result_t sssraid_pci_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct sssraid_ioc *sdioc; + + if (!shost) + return PCI_ERS_RESULT_NONE; + + sdioc = shost_priv(shost); + + ioc_info(sdioc, "pci error detect entry, state:%d\n", state); + + switch (state) { + case pci_channel_io_normal: + ioc_warn(sdioc, "pci channel is normal, do nothing\n"); + + return PCI_ERS_RESULT_CAN_RECOVER; + case pci_channel_io_frozen: + ioc_warn(sdioc, + "pci channel io frozen, need reset controller\n"); + + scsi_block_requests(sdioc->shost); + + sssraid_change_host_state(sdioc, SSSRAID_RESETTING); + + return PCI_ERS_RESULT_NEED_RESET; + case pci_channel_io_perm_failure: + ioc_warn(sdioc, "pci channel io failure, request disconnect\n"); + + return PCI_ERS_RESULT_DISCONNECT; + } + + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t sssraid_pci_slot_reset(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct sssraid_ioc *sdioc; + + if (!shost) + return PCI_ERS_RESULT_NONE; + + sdioc = shost_priv(shost); + + ioc_info(sdioc, "restart after pci slot reset\n"); + + pci_restore_state(pdev); + + sssraid_soft_reset_handler(sdioc); + + scsi_unblock_requests(sdioc->shost); + + return PCI_ERS_RESULT_RECOVERED; +} + +static void sssraid_reset_done(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct sssraid_ioc *sdioc; + + if (!shost) + return; + + sdioc = shost_priv(shost); + + ioc_info(sdioc, "sssraid reset exit\n"); +} + +static struct pci_error_handlers sssraid_err_handler = { + .error_detected = sssraid_pci_error_detected, + .slot_reset = sssraid_pci_slot_reset, + .reset_done = sssraid_reset_done, +}; + +static const struct pci_device_id sssraid_pci_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_3SNIC_LOGIC, + SSSRAID_SERVER_DEVICE_HBA_DID) }, + { PCI_DEVICE(PCI_VENDOR_ID_3SNIC_LOGIC, + SSSRAID_SERVER_DEVICE_RAID_DID) }, + { + 0, + } +}; +MODULE_DEVICE_TABLE(pci, sssraid_pci_id_table); + +static struct pci_driver sssraid_pci_driver = { + .name = SSSRAID_DRIVER_NAME, + .id_table = sssraid_pci_id_table, + .probe = sssraid_probe, + .remove = sssraid_remove, + .shutdown = sssraid_shutdown, +#ifdef CONFIG_PM + .suspend = sssraid_suspend, + .resume = sssraid_resume, +#endif + .err_handler = &sssraid_err_handler, +}; + +static int __init sssraid_init(void) +{ + int ret_val; + + pr_info("Loading %s version %s\n", SSSRAID_DRIVER_NAME, + SSSRAID_DRIVER_VERSION); + + sssraid_class = class_create(THIS_MODULE, "sssraid"); + if (IS_ERR(sssraid_class)) { + ret_val = PTR_ERR(sssraid_class); + return ret_val; + } + + ret_val = pci_register_driver(&sssraid_pci_driver); + + return ret_val; +} + +static void __exit sssraid_exit(void) +{ + pci_unregister_driver(&sssraid_pci_driver); + class_destroy(sssraid_class); + + pr_info("Unloading %s version %s\n", SSSRAID_DRIVER_NAME, + SSSRAID_DRIVER_VERSION); +} + +MODULE_AUTHOR("steven.song@3snic.com"); +MODULE_DESCRIPTION("3SNIC Information Technology SSSRAID Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(SSSRAID_DRIVER_VERSION); +module_init(sssraid_init); +module_exit(sssraid_exit); diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 307df2fa39a3821a378dc8f96a8796b79032c8d2..5078db7743cd08139b105cd48e46f936c36245cb 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -1265,8 +1265,8 @@ static int st_open(struct inode *inode, struct file *filp) spin_lock(&st_use_lock); if (STp->in_use) { spin_unlock(&st_use_lock); - scsi_tape_put(STp); DEBC_printk(STp, "Device already in use.\n"); + scsi_tape_put(STp); return (-EBUSY); } diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c index 9b20643ab49de59fee1ee2d90e9a433d3fb1a4f7..124a5d0ec05cad5bb5c907a01315837e6d7d6f95 100644 --- a/drivers/scsi/stex.c +++ b/drivers/scsi/stex.c @@ -673,16 +673,17 @@ stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) return 0; case PASSTHRU_CMD: if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) { - struct st_drvver ver; + const struct st_drvver ver = { + .major = ST_VER_MAJOR, + .minor = ST_VER_MINOR, + .oem = ST_OEM, + .build = ST_BUILD_VER, + .signature[0] = PASSTHRU_SIGNATURE, + .console_id = host->max_id - 1, + .host_no = hba->host->host_no, + }; size_t cp_len = sizeof(ver); - ver.major = ST_VER_MAJOR; - ver.minor = ST_VER_MINOR; - ver.oem = ST_OEM; - ver.build = ST_BUILD_VER; - ver.signature[0] = PASSTHRU_SIGNATURE; - ver.console_id = host->max_id - 1; - ver.host_no = hba->host->host_no; cp_len = scsi_sg_copy_from_buffer(cmd, &ver, cp_len); cmd->result = sizeof(ver) == cp_len ? DID_OK << 16 | COMMAND_COMPLETE << 8 : diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index f03dc03a42c35c31eb91ec2af88f18ddf98069de..0c2ba075bc713c80e2e25b4733ba634d666222a6 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -664,13 +664,22 @@ static void handle_sc_creation(struct vmbus_channel *new_sc) static void handle_multichannel_storage(struct hv_device *device, int max_chns) { struct storvsc_device *stor_device; - int num_cpus = num_online_cpus(); int num_sc; struct storvsc_cmd_request *request; struct vstor_packet *vstor_packet; int ret, t; - num_sc = ((max_chns > num_cpus) ? num_cpus : max_chns); + /* + * If the number of CPUs is artificially restricted, such as + * with maxcpus=1 on the kernel boot line, Hyper-V could offer + * sub-channels >= the number of CPUs. These sub-channels + * should not be created. The primary channel is already created + * and assigned to one CPU, so check against # CPUs - 1. + */ + num_sc = min((int)(num_online_cpus() - 1), max_chns); + if (!num_sc) + return; + stor_device = get_out_stor_device(device); if (!stor_device) return; diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c index 9492638296c86183d01bae23e5e5cabbac6a2f10..af8a7ef9c858c43987007cba34db6c0cb9379c0a 100644 --- a/drivers/scsi/sun3_scsi.c +++ b/drivers/scsi/sun3_scsi.c @@ -498,7 +498,7 @@ static struct scsi_host_template sun3_scsi_template = { .eh_host_reset_handler = sun3scsi_host_reset, .can_queue = 16, .this_id = 7, - .sg_tablesize = SG_NONE, + .sg_tablesize = 1, .cmd_per_lun = 2, .use_clustering = DISABLE_CLUSTERING, .cmd_size = NCR5380_CMD_SIZE, @@ -520,7 +520,7 @@ static int __init sun3_scsi_probe(struct platform_device *pdev) sun3_scsi_template.can_queue = setup_can_queue; if (setup_cmd_per_lun > 0) sun3_scsi_template.cmd_per_lun = setup_cmd_per_lun; - if (setup_sg_tablesize >= 0) + if (setup_sg_tablesize > 0) sun3_scsi_template.sg_tablesize = setup_sg_tablesize; if (setup_hostid >= 0) sun3_scsi_template.this_id = setup_hostid & 7; diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c index bd3f6e2d68344a008ca7dbca2232409f0b668b1d..0a2a54517b151d8d1451ba85b5d7b17366d4e6d2 100644 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.c +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c @@ -4370,6 +4370,13 @@ static void sym_nego_rejected(struct sym_hcb *np, struct sym_tcb *tp, struct sym OUTB(np, HS_PRT, HS_BUSY); } +#define sym_printk(lvl, tp, cp, fmt, v...) do { \ + if (cp) \ + scmd_printk(lvl, cp->cmd, fmt, ##v); \ + else \ + starget_printk(lvl, tp->starget, fmt, ##v); \ +} while (0) + /* * chip exception handler for programmed interrupts. */ @@ -4415,7 +4422,7 @@ static void sym_int_sir(struct sym_hcb *np) * been selected with ATN. We do not want to handle that. */ case SIR_SEL_ATN_NO_MSG_OUT: - scmd_printk(KERN_WARNING, cp->cmd, + sym_printk(KERN_WARNING, tp, cp, "No MSG OUT phase after selection with ATN\n"); goto out_stuck; /* @@ -4423,7 +4430,7 @@ static void sym_int_sir(struct sym_hcb *np) * having reselected the initiator. */ case SIR_RESEL_NO_MSG_IN: - scmd_printk(KERN_WARNING, cp->cmd, + sym_printk(KERN_WARNING, tp, cp, "No MSG IN phase after reselection\n"); goto out_stuck; /* @@ -4431,7 +4438,7 @@ static void sym_int_sir(struct sym_hcb *np) * an IDENTIFY. */ case SIR_RESEL_NO_IDENTIFY: - scmd_printk(KERN_WARNING, cp->cmd, + sym_printk(KERN_WARNING, tp, cp, "No IDENTIFY after reselection\n"); goto out_stuck; /* @@ -4460,7 +4467,7 @@ static void sym_int_sir(struct sym_hcb *np) case SIR_RESEL_ABORTED: np->lastmsg = np->msgout[0]; np->msgout[0] = M_NOOP; - scmd_printk(KERN_WARNING, cp->cmd, + sym_printk(KERN_WARNING, tp, cp, "message %x sent on bad reselection\n", np->lastmsg); goto out; /* diff --git a/drivers/scsi/ufs/tc-dwc-g210-pci.c b/drivers/scsi/ufs/tc-dwc-g210-pci.c index 2f41722a8c28dc064bd3e5523be420ecf89671db..2c6cb7f6b61a6afd9d10de570bfe3bd21c4ec3a0 100644 --- a/drivers/scsi/ufs/tc-dwc-g210-pci.c +++ b/drivers/scsi/ufs/tc-dwc-g210-pci.c @@ -138,7 +138,6 @@ tc_dwc_g210_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return err; } - pci_set_drvdata(pdev, hba); pm_runtime_put_noidle(&pdev->dev); pm_runtime_allow(&pdev->dev); diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c index 46df707e6f2c0404a3c1c4b5f92a76e03feee622..452e19f8fb47027ab4c264f60da67b69eafdbf49 100644 --- a/drivers/scsi/ufs/ufs-hisi.c +++ b/drivers/scsi/ufs/ufs-hisi.c @@ -20,6 +20,7 @@ #include "unipro.h" #include "ufs-hisi.h" #include "ufshci.h" +#include "ufs_quirks.h" static int ufs_hisi_check_hibern8(struct ufs_hba *hba) { @@ -390,6 +391,14 @@ static void ufs_hisi_set_dev_cap(struct ufs_hisi_dev_params *hisi_param) static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba) { + if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME) { + pr_info("ufs flash device must set VS_DebugSaveConfigTime 0x10\n"); + /* VS_DebugSaveConfigTime */ + ufshcd_dme_set(hba, UIC_ARG_MIB(0xD0A0), 0x10); + /* sync length */ + ufshcd_dme_set(hba, UIC_ARG_MIB(0x1556), 0x48); + } + /* update */ ufshcd_dme_set(hba, UIC_ARG_MIB(0x15A8), 0x1); /* PA_TxSkip */ diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index 14e5bf7af0bb1c82ff8c47f3284eb48154950036..c3bcaaec0fc5c2113716a8de0667418424139031 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -195,7 +195,7 @@ enum ufs_desc_def_size { QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90, QUERY_DESC_UNIT_DEF_SIZE = 0x23, QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06, - QUERY_DESC_GEOMETRY_DEF_SIZE = 0x44, + QUERY_DESC_GEOMETRY_DEF_SIZE = 0x48, QUERY_DESC_POWER_DEF_SIZE = 0x62, QUERY_DESC_HEALTH_DEF_SIZE = 0x25, }; diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h index 71f73d1d1ad1fb9b7c357c65baf0f49ec9dfa780..5d2dfdb41a6ffcc6c20a88189c070a3917d8d4e3 100644 --- a/drivers/scsi/ufs/ufs_quirks.h +++ b/drivers/scsi/ufs/ufs_quirks.h @@ -131,4 +131,10 @@ struct ufs_dev_fix { */ #define UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME (1 << 8) +/* + * Some UFS devices require VS_DebugSaveConfigTime is 0x10, + * enabling this quirk ensure this. + */ +#define UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME (1 << 9) + #endif /* UFS_QUIRKS_H_ */ diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c index 895a9b5ac98993ecac1c7c3ff2621548b3dd6cea..57985841a879e3763c0dc6ef2b7dd9937c589cb3 100644 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c @@ -340,24 +340,19 @@ int ufshcd_pltfrm_init(struct platform_device *pdev, goto dealloc_host; } - pm_runtime_set_active(&pdev->dev); - pm_runtime_enable(&pdev->dev); - ufshcd_init_lanes_per_dir(hba); err = ufshcd_init(hba, mmio_base, irq); if (err) { dev_err(dev, "Initialization failed\n"); - goto out_disable_rpm; + goto dealloc_host; } - platform_set_drvdata(pdev, hba); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); return 0; -out_disable_rpm: - pm_runtime_disable(&pdev->dev); - pm_runtime_set_suspended(&pdev->dev); dealloc_host: ufshcd_dealloc_host(hba); out: diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index c55f38ec391ca7d07e5ae1714168596f64acb125..2082907bea5008fded1594ecacba8d71a1c19b6e 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -109,13 +109,19 @@ int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, const char *prefix) { - u8 *regs; + u32 *regs; + size_t pos; + + if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */ + return -EINVAL; regs = kzalloc(len, GFP_KERNEL); if (!regs) return -ENOMEM; - memcpy_fromio(regs, hba->mmio_base + offset, len); + for (pos = 0; pos < len; pos += 4) + regs[pos / 4] = ufshcd_readl(hba, offset + pos); + ufshcd_hex_dump(prefix, regs, len); kfree(regs); @@ -230,6 +236,8 @@ static struct ufs_dev_fix ufs_fixups[] = { UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ), UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME), + UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/, + UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME), END_FIX }; @@ -1691,8 +1699,9 @@ static void __ufshcd_release(struct ufs_hba *hba) hba->clk_gating.state = REQ_CLKS_OFF; trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); - schedule_delayed_work(&hba->clk_gating.gate_work, - msecs_to_jiffies(hba->clk_gating.delay_ms)); + queue_delayed_work(hba->clk_gating.clk_gating_workq, + &hba->clk_gating.gate_work, + msecs_to_jiffies(hba->clk_gating.delay_ms)); } void ufshcd_release(struct ufs_hba *hba) @@ -1763,6 +1772,34 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev, return count; } +static void ufshcd_init_clk_scaling(struct ufs_hba *hba) +{ + char wq_name[sizeof("ufs_clkscaling_00")]; + + if (!ufshcd_is_clkscaling_supported(hba)) + return; + + INIT_WORK(&hba->clk_scaling.suspend_work, + ufshcd_clk_scaling_suspend_work); + INIT_WORK(&hba->clk_scaling.resume_work, + ufshcd_clk_scaling_resume_work); + + snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d", + hba->host->host_no); + hba->clk_scaling.workq = create_singlethread_workqueue(wq_name); + + ufshcd_clkscaling_init_sysfs(hba); +} + +static void ufshcd_exit_clk_scaling(struct ufs_hba *hba) +{ + if (!ufshcd_is_clkscaling_supported(hba)) + return; + + destroy_workqueue(hba->clk_scaling.workq); + ufshcd_devfreq_remove(hba); +} + static void ufshcd_init_clk_gating(struct ufs_hba *hba) { char wq_name[sizeof("ufs_clk_gating_00")]; @@ -1905,7 +1942,8 @@ int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE); /* Get the descriptor */ - if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) { + if (hba->dev_cmd.query.descriptor && + lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) { u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + GENERAL_UPIU_REQUEST_SIZE; u16 resp_len; @@ -2973,10 +3011,10 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba, goto out_unlock; } - hba->dev_cmd.query.descriptor = NULL; *buf_len = be16_to_cpu(response->upiu_res.length); out_unlock: + hba->dev_cmd.query.descriptor = NULL; mutex_unlock(&hba->dev_cmd.lock); out: ufshcd_release(hba); @@ -3837,15 +3875,24 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba) ktime_to_us(ktime_sub(ktime_get(), start)), ret); if (ret) { + int err; + dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n", __func__, ret); /* - * If link recovery fails then return error so that caller - * don't retry the hibern8 enter again. + * If link recovery fails then return error code returned from + * ufshcd_link_recovery(). + * If link recovery succeeds then return -EAGAIN to attempt + * hibern8 enter retry again. */ - if (ufshcd_link_recovery(hba)) - ret = -ENOLINK; + err = ufshcd_link_recovery(hba); + if (err) { + dev_err(hba->dev, "%s: link recovery failed", __func__); + ret = err; + } else { + ret = -EAGAIN; + } } else ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, POST_CHANGE); @@ -3859,7 +3906,7 @@ static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) { ret = __ufshcd_uic_hibern8_enter(hba); - if (!ret || ret == -ENOLINK) + if (!ret) goto out; } out: @@ -5705,19 +5752,16 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) { struct Scsi_Host *host; struct ufs_hba *hba; - unsigned int tag; u32 pos; int err; - u8 resp = 0xF; - struct ufshcd_lrb *lrbp; + u8 resp = 0xF, lun; unsigned long flags; host = cmd->device->host; hba = shost_priv(host); - tag = cmd->request->tag; - lrbp = &hba->lrb[tag]; - err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp); + lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); + err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp); if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) { if (!err) err = resp; @@ -5726,7 +5770,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) /* clear the commands that were pending for corresponding LUN */ for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) { - if (hba->lrb[pos].lun == lrbp->lun) { + if (hba->lrb[pos].lun == lun) { err = ufshcd_clear_cmd(hba, pos); if (err) break; @@ -6121,19 +6165,19 @@ static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba, goto out; } - if (hba->vreg_info.vcc) + if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA) icc_level = ufshcd_get_max_icc_level( hba->vreg_info.vcc->max_uA, POWER_DESC_MAX_ACTV_ICC_LVLS - 1, &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]); - if (hba->vreg_info.vccq) + if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA) icc_level = ufshcd_get_max_icc_level( hba->vreg_info.vccq->max_uA, icc_level, &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]); - if (hba->vreg_info.vccq2) + if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA) icc_level = ufshcd_get_max_icc_level( hba->vreg_info.vccq2->max_uA, icc_level, @@ -6666,6 +6710,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) */ if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) { pm_runtime_put_sync(hba->dev); + ufshcd_exit_clk_scaling(hba); ufshcd_hba_exit(hba); } @@ -6758,6 +6803,15 @@ static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg, if (!vreg) return 0; + /* + * "set_load" operation shall be required on those regulators + * which specifically configured current limitation. Otherwise + * zero max_uA may cause unexpected behavior when regulator is + * enabled or set as high power mode. + */ + if (!vreg->max_uA) + return 0; + ret = regulator_set_load(vreg->reg, ua); if (ret < 0) { dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n", @@ -6804,12 +6858,15 @@ static int ufshcd_config_vreg(struct device *dev, name = vreg->name; if (regulator_count_voltages(reg) > 0) { - min_uV = on ? vreg->min_uV : 0; - ret = regulator_set_voltage(reg, min_uV, vreg->max_uV); - if (ret) { - dev_err(dev, "%s: %s set voltage failed, err=%d\n", + if (vreg->min_uV && vreg->max_uV) { + min_uV = on ? vreg->min_uV : 0; + ret = regulator_set_voltage(reg, min_uV, vreg->max_uV); + if (ret) { + dev_err(dev, + "%s: %s set voltage failed, err=%d\n", __func__, name, ret); - goto out; + goto out; + } } uA_load = on ? vreg->max_uA : 0; @@ -7201,12 +7258,9 @@ static void ufshcd_hba_exit(struct ufs_hba *hba) ufshcd_variant_hba_exit(hba); ufshcd_setup_vreg(hba, false); ufshcd_suspend_clkscaling(hba); - if (ufshcd_is_clkscaling_supported(hba)) { + if (ufshcd_is_clkscaling_supported(hba)) if (hba->devfreq) ufshcd_suspend_clkscaling(hba); - destroy_workqueue(hba->clk_scaling.workq); - ufshcd_devfreq_remove(hba); - } ufshcd_setup_clocks(hba, false); ufshcd_setup_hba_vreg(hba, false); hba->is_powered = false; @@ -7759,6 +7813,8 @@ int ufshcd_system_resume(struct ufs_hba *hba) trace_ufshcd_system_resume(dev_name(hba->dev), ret, ktime_to_us(ktime_sub(ktime_get(), start)), hba->curr_dev_pwr_mode, hba->uic_link_state); + if (!ret) + hba->is_sys_suspended = false; return ret; } EXPORT_SYMBOL(ufshcd_system_resume); @@ -7850,6 +7906,9 @@ int ufshcd_shutdown(struct ufs_hba *hba) { int ret = 0; + if (!hba->is_powered) + goto out; + if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba)) goto out; @@ -7881,6 +7940,7 @@ void ufshcd_remove(struct ufs_hba *hba) ufshcd_disable_intr(hba, hba->intr_mask); ufshcd_hba_stop(hba, true); + ufshcd_exit_clk_scaling(hba); ufshcd_exit_clk_gating(hba); if (ufshcd_is_clkscaling_supported(hba)) device_remove_file(hba->dev, &hba->clk_scaling.enable_attr); @@ -7972,6 +8032,13 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) struct Scsi_Host *host = hba->host; struct device *dev = hba->dev; + /* + * dev_set_drvdata() must be called before any callbacks are registered + * that use dev_get_drvdata() (frequency scaling, clock scaling, hwmon, + * sysfs). + */ + dev_set_drvdata(dev, hba); + if (!mmio_base) { dev_err(hba->dev, "Invalid memory reference for mmio_base is NULL\n"); @@ -8052,6 +8119,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) ufshcd_init_clk_gating(hba); + ufshcd_init_clk_scaling(hba); + /* * In order to avoid any spurious interrupt immediately after * registering UFS controller interrupt handler, clear any pending UFS @@ -8090,21 +8159,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) goto out_remove_scsi_host; } - if (ufshcd_is_clkscaling_supported(hba)) { - char wq_name[sizeof("ufs_clkscaling_00")]; - - INIT_WORK(&hba->clk_scaling.suspend_work, - ufshcd_clk_scaling_suspend_work); - INIT_WORK(&hba->clk_scaling.resume_work, - ufshcd_clk_scaling_resume_work); - - snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d", - host->host_no); - hba->clk_scaling.workq = create_singlethread_workqueue(wq_name); - - ufshcd_clkscaling_init_sysfs(hba); - } - /* * Set the default power management level for runtime and system PM. * Default power saving mode is to keep UFS link in Hibern8 state @@ -8142,6 +8196,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) out_remove_scsi_host: scsi_remove_host(hba->host); exit_gating: + ufshcd_exit_clk_scaling(hba); ufshcd_exit_clk_gating(hba); out_disable: hba->is_irq_enabled = false; diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h index 23129d7b2678dfea93b21f010c31a7aece9e9233..c77e365264478f3a74e1a28cdf0b6a881d5b991f 100644 --- a/drivers/scsi/ufs/unipro.h +++ b/drivers/scsi/ufs/unipro.h @@ -52,7 +52,7 @@ #define RX_HS_UNTERMINATED_ENABLE 0x00A6 #define RX_ENTER_HIBERN8 0x00A7 #define RX_BYPASS_8B10B_ENABLE 0x00A8 -#define RX_TERMINATION_FORCE_ENABLE 0x0089 +#define RX_TERMINATION_FORCE_ENABLE 0x00A9 #define RX_MIN_ACTIVATETIME_CAPABILITY 0x008F #define RX_HIBERN8TIME_CAPABILITY 0x0092 #define RX_REFCLKFREQ 0x00EB diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 1c72db94270e89990b59e9c2a410110580c50827..ba84514b662aa56f281027e74c6d1347aae3682c 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -323,7 +323,12 @@ static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi, switch (virtio32_to_cpu(vscsi->vdev, event->reason)) { case VIRTIO_SCSI_EVT_RESET_RESCAN: - scsi_add_device(shost, 0, target, lun); + if (lun == 0) { + scsi_scan_target(&shost->shost_gendev, 0, target, + SCAN_WILD_CARD, SCSI_SCAN_INITIAL); + } else { + scsi_add_device(shost, 0, target, lun); + } break; case VIRTIO_SCSI_EVT_RESET_REMOVED: sdev = scsi_device_lookup(shost, 0, target, lun); @@ -621,7 +626,6 @@ static int virtscsi_device_reset(struct scsi_cmnd *sc) return FAILED; memset(cmd, 0, sizeof(*cmd)); - cmd->sc = sc; cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){ .type = VIRTIO_SCSI_T_TMF, .subtype = cpu_to_virtio32(vscsi->vdev, @@ -680,7 +684,6 @@ static int virtscsi_abort(struct scsi_cmnd *sc) return FAILED; memset(cmd, 0, sizeof(*cmd)); - cmd->sc = sc; cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){ .type = VIRTIO_SCSI_T_TMF, .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK, @@ -855,6 +858,7 @@ static int virtscsi_probe(struct virtio_device *vdev) /* We need to know how many queues before we allocate. */ num_queues = virtscsi_config_get(vdev, num_queues) ? : 1; + num_queues = min_t(unsigned int, nr_cpu_ids, num_queues); num_targets = virtscsi_config_get(vdev, max_target) + 1; diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index 0cd947f78b5bfdfa013ce4909afdc8f894b6146b..64eb8ffb2ddfa57d7874affa79a656515ced9ea7 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c @@ -763,6 +763,7 @@ static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd struct pvscsi_adapter *adapter = shost_priv(host); struct pvscsi_ctx *ctx; unsigned long flags; + unsigned char op; spin_lock_irqsave(&adapter->hw_lock, flags); @@ -775,13 +776,14 @@ static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd } cmd->scsi_done = done; + op = cmd->cmnd[0]; dev_dbg(&cmd->device->sdev_gendev, - "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, cmd->cmnd[0]); + "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, op); spin_unlock_irqrestore(&adapter->hw_lock, flags); - pvscsi_kick_io(adapter, cmd->cmnd[0]); + pvscsi_kick_io(adapter, op); return 0; } @@ -1202,8 +1204,6 @@ static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter) static void pvscsi_release_resources(struct pvscsi_adapter *adapter) { - pvscsi_shutdown_intr(adapter); - if (adapter->workqueue) destroy_workqueue(adapter->workqueue); @@ -1535,6 +1535,7 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id) out_reset_adapter: ll_adapter_reset(adapter); out_release_resources: + pvscsi_shutdown_intr(adapter); pvscsi_release_resources(adapter); scsi_host_put(host); out_disable_device: @@ -1543,6 +1544,7 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id) return error; out_release_resources_and_disable: + pvscsi_shutdown_intr(adapter); pvscsi_release_resources(adapter); goto out_disable_device; } diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c index 61389bdc7926690100fc0a38fc59e8b6a73853ab..a1d822ae9fcaaedcb3bdd0437568f3382d9532b1 100644 --- a/drivers/scsi/xen-scsifront.c +++ b/drivers/scsi/xen-scsifront.c @@ -233,12 +233,11 @@ static void scsifront_gnttab_done(struct vscsifrnt_info *info, return; for (i = 0; i < shadow->nr_grants; i++) { - if (unlikely(gnttab_query_foreign_access(shadow->gref[i]))) { + if (unlikely(!gnttab_try_end_foreign_access(shadow->gref[i]))) { shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME "grant still in use by backend\n"); BUG(); } - gnttab_end_foreign_access(shadow->gref[i], 0, 0UL); } kfree(shadow->sg); diff --git a/drivers/scsi/zorro_esp.c b/drivers/scsi/zorro_esp.c index bb70882e6b56e0bd4f1d37e367e205bbb16f19ac..6a5b547eae5902502590994af92d1db1d646c619 100644 --- a/drivers/scsi/zorro_esp.c +++ b/drivers/scsi/zorro_esp.c @@ -245,7 +245,14 @@ static int fastlane_esp_irq_pending(struct esp *esp) static u32 zorro_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len) { - return dma_len > 0xFFFFFF ? 0xFFFFFF : dma_len; + return dma_len > (1U << 16) ? (1U << 16) : dma_len; +} + +static u32 fastlane_esp_dma_length_limit(struct esp *esp, u32 dma_addr, + u32 dma_len) +{ + /* The old driver used 0xfffc as limit, so do that here too */ + return dma_len > 0xfffc ? 0xfffc : dma_len; } static void zorro_esp_reset_dma(struct esp *esp) @@ -484,7 +491,6 @@ static void zorro_esp_send_blz1230_dma_cmd(struct esp *esp, u32 addr, scsi_esp_cmd(esp, ESP_CMD_DMA); zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); - zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI); scsi_esp_cmd(esp, cmd); } @@ -529,7 +535,6 @@ static void zorro_esp_send_blz1230II_dma_cmd(struct esp *esp, u32 addr, scsi_esp_cmd(esp, ESP_CMD_DMA); zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); - zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI); scsi_esp_cmd(esp, cmd); } @@ -574,7 +579,6 @@ static void zorro_esp_send_blz2060_dma_cmd(struct esp *esp, u32 addr, scsi_esp_cmd(esp, ESP_CMD_DMA); zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); - zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI); scsi_esp_cmd(esp, cmd); } @@ -599,7 +603,6 @@ static void zorro_esp_send_cyber_dma_cmd(struct esp *esp, u32 addr, zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); - zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI); if (write) { /* DMA receive */ @@ -649,7 +652,6 @@ static void zorro_esp_send_cyberII_dma_cmd(struct esp *esp, u32 addr, zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); - zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI); if (write) { /* DMA receive */ @@ -691,7 +693,6 @@ static void zorro_esp_send_fastlane_dma_cmd(struct esp *esp, u32 addr, zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); - zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI); if (write) { /* DMA receive */ @@ -824,7 +825,7 @@ static const struct esp_driver_ops fastlane_esp_ops = { .unmap_single = zorro_esp_unmap_single, .unmap_sg = zorro_esp_unmap_sg, .irq_pending = fastlane_esp_irq_pending, - .dma_length_limit = zorro_esp_dma_length_limit, + .dma_length_limit = fastlane_esp_dma_length_limit, .reset_dma = zorro_esp_reset_dma, .dma_drain = zorro_esp_dma_drain, .dma_invalidate = fastlane_esp_dma_invalidate, diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c index 46f0f322d4d8f7d4c2b4d373d74da5d58c8774d5..48fe5fab5693d9f16cc792e7c922fddefc2354e4 100644 --- a/drivers/sh/intc/core.c +++ b/drivers/sh/intc/core.c @@ -194,7 +194,6 @@ int __init register_intc_controller(struct intc_desc *desc) goto err0; INIT_LIST_HEAD(&d->list); - list_add_tail(&d->list, &intc_list); raw_spin_lock_init(&d->lock); INIT_RADIX_TREE(&d->tree, GFP_ATOMIC); @@ -380,6 +379,7 @@ int __init register_intc_controller(struct intc_desc *desc) d->skip_suspend = desc->skip_syscore_suspend; + list_add_tail(&d->list, &intc_list); nr_intc_controllers++; return 0; diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c index 8be4d6786c6105721ed03d55d173aaaa9925406c..9221ba7b78637b7e5a2f8b5791dff6f39a2dae00 100644 --- a/drivers/slimbus/qcom-ngd-ctrl.c +++ b/drivers/slimbus/qcom-ngd-ctrl.c @@ -1234,8 +1234,17 @@ static int qcom_slim_ngd_enable(struct qcom_slim_ngd_ctrl *ctrl, bool enable) pm_runtime_resume(ctrl->dev); pm_runtime_mark_last_busy(ctrl->dev); pm_runtime_put(ctrl->dev); + + ret = slim_register_controller(&ctrl->ctrl); + if (ret) { + dev_err(ctrl->dev, "error adding slim controller\n"); + return ret; + } + + dev_info(ctrl->dev, "SLIM controller Registered\n"); } else { qcom_slim_qmi_exit(ctrl); + slim_unregister_controller(&ctrl->ctrl); } return 0; @@ -1317,11 +1326,12 @@ static int of_qcom_slim_ngd_register(struct device *parent, { const struct ngd_reg_offset_data *data; struct qcom_slim_ngd *ngd; + const struct of_device_id *match; struct device_node *node; u32 id; - data = of_match_node(qcom_slim_ngd_dt_match, parent->of_node)->data; - + match = of_match_node(qcom_slim_ngd_dt_match, parent->of_node); + data = match->data; for_each_available_child_of_node(parent->of_node, node) { if (of_property_read_u32(node, "reg", &id)) continue; @@ -1331,6 +1341,10 @@ static int of_qcom_slim_ngd_register(struct device *parent, return -ENOMEM; ngd->pdev = platform_device_alloc(QCOM_SLIM_NGD_DRV_NAME, id); + if (!ngd->pdev) { + kfree(ngd); + return -ENOMEM; + } ngd->id = id; ngd->pdev->dev.parent = parent; ngd->pdev->driver_override = QCOM_SLIM_NGD_DRV_NAME; @@ -1342,7 +1356,6 @@ static int of_qcom_slim_ngd_register(struct device *parent, ngd->base = ctrl->base + ngd->id * data->offset + (ngd->id - 1) * data->size; ctrl->ngd = ngd; - platform_driver_register(&qcom_slim_ngd_driver); return 0; } @@ -1357,11 +1370,6 @@ static int qcom_slim_ngd_probe(struct platform_device *pdev) int ret; ctrl->ctrl.dev = dev; - ret = slim_register_controller(&ctrl->ctrl); - if (ret) { - dev_err(dev, "error adding slim controller\n"); - return ret; - } pm_runtime_use_autosuspend(dev); pm_runtime_set_autosuspend_delay(dev, QCOM_SLIM_NGD_AUTOSUSPEND); @@ -1371,7 +1379,7 @@ static int qcom_slim_ngd_probe(struct platform_device *pdev) ret = qcom_slim_ngd_qmi_svc_event_init(ctrl); if (ret) { dev_err(&pdev->dev, "QMI service registration failed:%d", ret); - goto err; + return ret; } INIT_WORK(&ctrl->m_work, qcom_slim_ngd_master_worker); @@ -1383,14 +1391,12 @@ static int qcom_slim_ngd_probe(struct platform_device *pdev) } return 0; -err: - slim_unregister_controller(&ctrl->ctrl); wq_err: qcom_slim_ngd_qmi_svc_event_deinit(&ctrl->qmi); if (ctrl->mwq) destroy_workqueue(ctrl->mwq); - return 0; + return ret; } static int qcom_slim_ngd_ctrl_probe(struct platform_device *pdev) @@ -1441,6 +1447,7 @@ static int qcom_slim_ngd_ctrl_probe(struct platform_device *pdev) init_completion(&ctrl->reconf); init_completion(&ctrl->qmi.qmi_comp); + platform_driver_register(&qcom_slim_ngd_driver); return of_qcom_slim_ngd_register(dev, ctrl); } @@ -1456,7 +1463,7 @@ static int qcom_slim_ngd_remove(struct platform_device *pdev) struct qcom_slim_ngd_ctrl *ctrl = platform_get_drvdata(pdev); pm_runtime_disable(&pdev->dev); - slim_unregister_controller(&ctrl->ctrl); + qcom_slim_ngd_enable(ctrl, false); qcom_slim_ngd_exit_dma(ctrl); qcom_slim_ngd_qmi_svc_event_deinit(&ctrl->qmi); if (ctrl->mwq) @@ -1467,7 +1474,7 @@ static int qcom_slim_ngd_remove(struct platform_device *pdev) return 0; } -static int qcom_slim_ngd_runtime_idle(struct device *dev) +static int __maybe_unused qcom_slim_ngd_runtime_idle(struct device *dev) { struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev); @@ -1477,8 +1484,7 @@ static int qcom_slim_ngd_runtime_idle(struct device *dev) return -EAGAIN; } -#ifdef CONFIG_PM -static int qcom_slim_ngd_runtime_suspend(struct device *dev) +static int __maybe_unused qcom_slim_ngd_runtime_suspend(struct device *dev) { struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev); int ret = 0; @@ -1491,7 +1497,6 @@ static int qcom_slim_ngd_runtime_suspend(struct device *dev) return ret; } -#endif static const struct dev_pm_ops qcom_slim_ngd_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile index 113e884697fd8c3478d9b7900a79360fb3fad2f5..f0d46b16e08c4f518e7a9be90a948fab7c129b0f 100644 --- a/drivers/soc/Makefile +++ b/drivers/soc/Makefile @@ -13,7 +13,7 @@ obj-$(CONFIG_ARCH_GEMINI) += gemini/ obj-$(CONFIG_ARCH_MXC) += imx/ obj-$(CONFIG_SOC_XWAY) += lantiq/ obj-y += mediatek/ -obj-$(CONFIG_ARCH_MESON) += amlogic/ +obj-y += amlogic/ obj-y += qcom/ obj-y += renesas/ obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/ diff --git a/drivers/soc/bcm/brcmstb/biuctrl.c b/drivers/soc/bcm/brcmstb/biuctrl.c index 6d89ebf13b8af5ebb7e22e04bd983aba8ee34fd1..20b63bee5b09fefb3d1339799f138e5135f051f5 100644 --- a/drivers/soc/bcm/brcmstb/biuctrl.c +++ b/drivers/soc/bcm/brcmstb/biuctrl.c @@ -56,7 +56,7 @@ static inline void cbc_writel(u32 val, int reg) if (offset == -1) return; - writel_relaxed(val, cpubiuctrl_base + offset); + writel(val, cpubiuctrl_base + offset); } enum cpubiuctrl_regs { @@ -246,7 +246,9 @@ static int __init brcmstb_biuctrl_init(void) if (!np) return 0; - setup_hifcpubiuctrl_regs(np); + ret = setup_hifcpubiuctrl_regs(np); + if (ret) + return ret; ret = mcp_write_pairing_set(); if (ret) { diff --git a/drivers/soc/bcm/brcmstb/common.c b/drivers/soc/bcm/brcmstb/common.c index 14185451901debbd2c6693c908317ad623070ccd..bf9123f727e8035c908d2891c575ff8c3ad10d37 100644 --- a/drivers/soc/bcm/brcmstb/common.c +++ b/drivers/soc/bcm/brcmstb/common.c @@ -31,13 +31,17 @@ static const struct of_device_id brcmstb_machine_match[] = { bool soc_is_brcmstb(void) { + const struct of_device_id *match; struct device_node *root; root = of_find_node_by_path("/"); if (!root) return false; - return of_match_node(brcmstb_machine_match, root) != NULL; + match = of_match_node(brcmstb_machine_match, root); + of_node_put(root); + + return match != NULL; } u32 brcmstb_get_family_id(void) diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c b/drivers/soc/bcm/brcmstb/pm/pm-arm.c index a5577dd5eb0870bccff391f38c69f47c8d708fde..fd124e0850207c2dc9d1e1d4ebac5c89da6dbd1a 100644 --- a/drivers/soc/bcm/brcmstb/pm/pm-arm.c +++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c @@ -404,7 +404,7 @@ noinline int brcmstb_pm_s3_finish(void) { struct brcmstb_s3_params *params = ctrl.s3_params; dma_addr_t params_pa = ctrl.s3_params_pa; - phys_addr_t reentry = virt_to_phys(&cpu_resume); + phys_addr_t reentry = virt_to_phys(&cpu_resume_arm); enum bsp_initiate_command cmd; u32 flags; @@ -689,13 +689,14 @@ static int brcmstb_pm_probe(struct platform_device *pdev) const struct of_device_id *of_id = NULL; struct device_node *dn; void __iomem *base; - int ret, i; + int ret, i, s; /* AON ctrl registers */ base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL); if (IS_ERR(base)) { pr_err("error mapping AON_CTRL\n"); - return PTR_ERR(base); + ret = PTR_ERR(base); + goto aon_err; } ctrl.aon_ctrl_base = base; @@ -705,8 +706,10 @@ static int brcmstb_pm_probe(struct platform_device *pdev) /* Assume standard offset */ ctrl.aon_sram = ctrl.aon_ctrl_base + AON_CTRL_SYSTEM_DATA_RAM_OFS; + s = 0; } else { ctrl.aon_sram = base; + s = 1; } writel_relaxed(0, ctrl.aon_sram + AON_REG_PANIC); @@ -716,7 +719,8 @@ static int brcmstb_pm_probe(struct platform_device *pdev) (const void **)&ddr_phy_data); if (IS_ERR(base)) { pr_err("error mapping DDR PHY\n"); - return PTR_ERR(base); + ret = PTR_ERR(base); + goto ddr_phy_err; } ctrl.support_warm_boot = ddr_phy_data->supports_warm_boot; ctrl.pll_status_offset = ddr_phy_data->pll_status_offset; @@ -736,17 +740,20 @@ static int brcmstb_pm_probe(struct platform_device *pdev) for_each_matching_node(dn, ddr_shimphy_dt_ids) { i = ctrl.num_memc; if (i >= MAX_NUM_MEMC) { + of_node_put(dn); pr_warn("too many MEMCs (max %d)\n", MAX_NUM_MEMC); break; } base = of_io_request_and_map(dn, 0, dn->full_name); if (IS_ERR(base)) { + of_node_put(dn); if (!ctrl.support_warm_boot) break; pr_err("error mapping DDR SHIMPHY %d\n", i); - return PTR_ERR(base); + ret = PTR_ERR(base); + goto ddr_shimphy_err; } ctrl.memcs[i].ddr_shimphy_base = base; ctrl.num_memc++; @@ -757,14 +764,18 @@ static int brcmstb_pm_probe(struct platform_device *pdev) for_each_matching_node(dn, brcmstb_memc_of_match) { base = of_iomap(dn, 0); if (!base) { + of_node_put(dn); pr_err("error mapping DDR Sequencer %d\n", i); - return -ENOMEM; + ret = -ENOMEM; + goto brcmstb_memc_err; } of_id = of_match_node(brcmstb_memc_of_match, dn); if (!of_id) { iounmap(base); - return -EINVAL; + of_node_put(dn); + ret = -EINVAL; + goto brcmstb_memc_err; } ddr_seq_data = of_id->data; @@ -784,20 +795,24 @@ static int brcmstb_pm_probe(struct platform_device *pdev) dn = of_find_matching_node(NULL, sram_dt_ids); if (!dn) { pr_err("SRAM not found\n"); - return -EINVAL; + ret = -EINVAL; + goto brcmstb_memc_err; } ret = brcmstb_init_sram(dn); + of_node_put(dn); if (ret) { pr_err("error setting up SRAM for PM\n"); - return ret; + goto brcmstb_memc_err; } ctrl.pdev = pdev; ctrl.s3_params = kmalloc(sizeof(*ctrl.s3_params), GFP_KERNEL); - if (!ctrl.s3_params) - return -ENOMEM; + if (!ctrl.s3_params) { + ret = -ENOMEM; + goto s3_params_err; + } ctrl.s3_params_pa = dma_map_single(&pdev->dev, ctrl.s3_params, sizeof(*ctrl.s3_params), DMA_TO_DEVICE); @@ -817,7 +832,21 @@ static int brcmstb_pm_probe(struct platform_device *pdev) out: kfree(ctrl.s3_params); - +s3_params_err: + iounmap(ctrl.boot_sram); +brcmstb_memc_err: + for (i--; i >= 0; i--) + iounmap(ctrl.memcs[i].ddr_ctrl); +ddr_shimphy_err: + for (i = 0; i < ctrl.num_memc; i++) + iounmap(ctrl.memcs[i].ddr_shimphy_base); + + iounmap(ctrl.memcs[0].ddr_phy_base); +ddr_phy_err: + iounmap(ctrl.aon_ctrl_base); + if (s) + iounmap(ctrl.aon_sram); +aon_err: pr_warn("PM: initialization failed with code %d\n", ret); return ret; diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c index 2f71f7df3465a0a65301cb031ab183c4b0ddee7c..f9edd28894fda4163bb69b992aa5765205acf384 100644 --- a/drivers/soc/fsl/qbman/bman_portal.c +++ b/drivers/soc/fsl/qbman/bman_portal.c @@ -91,7 +91,15 @@ static int bman_portal_probe(struct platform_device *pdev) struct device_node *node = dev->of_node; struct bm_portal_config *pcfg; struct resource *addr_phys[2]; - int irq, cpu; + int irq, cpu, err; + + err = bman_is_probed(); + if (!err) + return -EPROBE_DEFER; + if (err < 0) { + dev_err(&pdev->dev, "failing probe due to bman probe error\n"); + return -ENODEV; + } pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL); if (!pcfg) diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index 8cc0151830433230e8f629c2660eec90871b7e8c..09ec2baaec16d4d4ed2a0aa5593c0a8d0d9d157e 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c @@ -976,7 +976,7 @@ struct qman_portal { /* linked-list of CSCN handlers. */ struct list_head cgr_cbs; /* list lock */ - spinlock_t cgr_lock; + raw_spinlock_t cgr_lock; struct work_struct congestion_work; struct work_struct mr_work; char irqname[MAX_IRQNAME]; @@ -1081,18 +1081,19 @@ static void qm_mr_process_task(struct work_struct *work); static irqreturn_t portal_isr(int irq, void *ptr) { struct qman_portal *p = ptr; - - u32 clear = QM_DQAVAIL_MASK | p->irq_sources; u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources; + u32 clear = 0; if (unlikely(!is)) return IRQ_NONE; /* DQRR-handling if it's interrupt-driven */ - if (is & QM_PIRQ_DQRI) + if (is & QM_PIRQ_DQRI) { __poll_portal_fast(p, QMAN_POLL_LIMIT); + clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI; + } /* Handling of anything else that's interrupt-driven */ - clear |= __poll_portal_slow(p, is); + clear |= __poll_portal_slow(p, is) & QM_PIRQ_SLOW; qm_out(&p->p, QM_REG_ISR, clear); return IRQ_HANDLED; } @@ -1193,7 +1194,7 @@ static int qman_create_portal(struct qman_portal *portal, /* if the given mask is NULL, assume all CGRs can be seen */ qman_cgrs_fill(&portal->cgrs[0]); INIT_LIST_HEAD(&portal->cgr_cbs); - spin_lock_init(&portal->cgr_lock); + raw_spin_lock_init(&portal->cgr_lock); INIT_WORK(&portal->congestion_work, qm_congestion_task); INIT_WORK(&portal->mr_work, qm_mr_process_task); portal->bits = 0; @@ -1368,11 +1369,14 @@ static void qm_congestion_task(struct work_struct *work) union qm_mc_result *mcr; struct qman_cgr *cgr; - spin_lock(&p->cgr_lock); + /* + * FIXME: QM_MCR_TIMEOUT is 10ms, which is too long for a raw spinlock! + */ + raw_spin_lock_irq(&p->cgr_lock); qm_mc_start(&p->p); qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION); if (!qm_mc_result_timeout(&p->p, &mcr)) { - spin_unlock(&p->cgr_lock); + raw_spin_unlock_irq(&p->cgr_lock); dev_crit(p->config->dev, "QUERYCONGESTION timeout\n"); qman_p_irqsource_add(p, QM_PIRQ_CSCI); return; @@ -1388,7 +1392,7 @@ static void qm_congestion_task(struct work_struct *work) list_for_each_entry(cgr, &p->cgr_cbs, node) if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid)) cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid)); - spin_unlock(&p->cgr_lock); + raw_spin_unlock_irq(&p->cgr_lock); qman_p_irqsource_add(p, QM_PIRQ_CSCI); } @@ -2345,7 +2349,7 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags, preempt_enable(); cgr->chan = p->config->channel; - spin_lock(&p->cgr_lock); + raw_spin_lock_irq(&p->cgr_lock); if (opts) { struct qm_mcc_initcgr local_opts = *opts; @@ -2382,7 +2386,7 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags, qman_cgrs_get(&p->cgrs[1], cgr->cgrid)) cgr->cb(p, cgr, 1); out: - spin_unlock(&p->cgr_lock); + raw_spin_unlock_irq(&p->cgr_lock); put_affine_portal(); return ret; } @@ -2407,7 +2411,7 @@ int qman_delete_cgr(struct qman_cgr *cgr) goto put_portal; } memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr)); - spin_lock_irqsave(&p->cgr_lock, irqflags); + raw_spin_lock_irqsave(&p->cgr_lock, irqflags); list_del(&cgr->node); /* * If there are no other CGR objects for this CGRID in the list, @@ -2432,7 +2436,7 @@ int qman_delete_cgr(struct qman_cgr *cgr) /* add back to the list */ list_add(&cgr->node, &p->cgr_cbs); release_lock: - spin_unlock_irqrestore(&p->cgr_lock, irqflags); + raw_spin_unlock_irqrestore(&p->cgr_lock, irqflags); put_portal: put_affine_portal(); return ret; diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c index b3da635970ea70f56d98a4db94d234178cfa9a82..d160fc2a7b7a20a39d9710c2f5ac49a2737b1dda 100644 --- a/drivers/soc/imx/gpc.c +++ b/drivers/soc/imx/gpc.c @@ -69,7 +69,7 @@ static int imx6_pm_domain_power_off(struct generic_pm_domain *genpd) u32 val; /* Read ISO and ISO2SW power down delays */ - regmap_read(pd->regmap, pd->reg_offs + GPC_PGC_PUPSCR_OFFS, &val); + regmap_read(pd->regmap, pd->reg_offs + GPC_PGC_PDNSCR_OFFS, &val); iso = val & 0x3f; iso2sw = (val >> 8) & 0x3f; diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c index 4e931fdf4d0918aa59368390ed7d8dc8c194b26c..011a40b5fb4907de7cec67d0d5115faf41b107b9 100644 --- a/drivers/soc/mediatek/mtk-pmic-wrap.c +++ b/drivers/soc/mediatek/mtk-pmic-wrap.c @@ -1104,7 +1104,7 @@ static bool pwrap_is_pmic_cipher_ready(struct pmic_wrapper *wrp) static int pwrap_init_cipher(struct pmic_wrapper *wrp) { int ret; - u32 rdata; + u32 rdata = 0; pwrap_writel(wrp, 0x1, PWRAP_CIPHER_SWRST); pwrap_writel(wrp, 0x0, PWRAP_CIPHER_SWRST); diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c index 57af8a5373325f2895f08022629a723fb68a3d01..ee9197f5aae96e62c1cd79af0f546cb5623621b5 100644 --- a/drivers/soc/qcom/apr.c +++ b/drivers/soc/qcom/apr.c @@ -219,9 +219,9 @@ static int apr_add_device(struct device *dev, struct device_node *np, adev->domain_id = id->domain_id; adev->version = id->svc_version; if (np) - strncpy(adev->name, np->name, APR_NAME_SIZE); + strscpy(adev->name, np->name, APR_NAME_SIZE); else - strncpy(adev->name, id->name, APR_NAME_SIZE); + strscpy(adev->name, id->name, APR_NAME_SIZE); dev_set_name(&adev->dev, "aprsvc:%s:%x:%x", adev->name, id->domain_id, id->svc_id); diff --git a/drivers/soc/qcom/llcc-slice.c b/drivers/soc/qcom/llcc-slice.c index 54063a31132fa0b21e2c1ee3ed5fd85c87e759c8..3f1cac3145a4938e2c30eec7e100ebbe3bea1bce 100644 --- a/drivers/soc/qcom/llcc-slice.c +++ b/drivers/soc/qcom/llcc-slice.c @@ -283,6 +283,9 @@ int qcom_llcc_probe(struct platform_device *pdev, void __iomem *base; int ret, i; + if (!IS_ERR(drv_data)) + return -EBUSY; + drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL); if (!drv_data) return -ENOMEM; diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c index feed3db21c10888681a0f205cccd24c73be078c1..9ab596183ac3e896e4c81ee14b3515e37f8dd5fd 100644 --- a/drivers/soc/qcom/qcom-geni-se.c +++ b/drivers/soc/qcom/qcom-geni-se.c @@ -513,7 +513,7 @@ EXPORT_SYMBOL(geni_se_resources_on); */ int geni_se_clk_tbl_get(struct geni_se *se, unsigned long **tbl) { - unsigned long freq = 0; + long freq = 0; int i; if (se->clk_perf_tbl) { @@ -529,7 +529,8 @@ int geni_se_clk_tbl_get(struct geni_se *se, unsigned long **tbl) for (i = 0; i < MAX_CLK_PERF_LEVEL; i++) { freq = clk_round_rate(se->clk, freq + 1); - if (!freq || freq == se->clk_perf_tbl[i - 1]) + if (freq <= 0 || + (i > 0 && freq == se->clk_perf_tbl[i - 1])) break; se->clk_perf_tbl[i] = freq; } @@ -544,16 +545,17 @@ EXPORT_SYMBOL(geni_se_clk_tbl_get); * @se: Pointer to the concerned serial engine. * @req_freq: Requested clock frequency. * @index: Index of the resultant frequency in the table. - * @res_freq: Resultant frequency which matches or is closer to the - * requested frequency. + * @res_freq: Resultant frequency of the source clock. * @exact: Flag to indicate exact multiple requirement of the requested * frequency. * - * This function is called by the protocol drivers to determine the matching - * or exact multiple of the requested frequency, as provided by the serial - * engine clock in order to meet the performance requirements. If there is - * no matching or exact multiple of the requested frequency found, then it - * selects the closest floor frequency, if exact flag is not set. + * This function is called by the protocol drivers to determine the best match + * of the requested frequency as provided by the serial engine clock in order + * to meet the performance requirements. + * + * If we return success: + * - if @exact is true then @res_freq / == @req_freq + * - if @exact is false then @res_freq / <= @req_freq * * Return: 0 on success, standard Linux error codes on failure. */ @@ -564,6 +566,9 @@ int geni_se_clk_freq_match(struct geni_se *se, unsigned long req_freq, unsigned long *tbl; int num_clk_levels; int i; + unsigned long best_delta; + unsigned long new_delta; + unsigned int divider; num_clk_levels = geni_se_clk_tbl_get(se, &tbl); if (num_clk_levels < 0) @@ -572,18 +577,21 @@ int geni_se_clk_freq_match(struct geni_se *se, unsigned long req_freq, if (num_clk_levels == 0) return -EINVAL; - *res_freq = 0; + best_delta = ULONG_MAX; for (i = 0; i < num_clk_levels; i++) { - if (!(tbl[i] % req_freq)) { + divider = DIV_ROUND_UP(tbl[i], req_freq); + new_delta = req_freq - tbl[i] / divider; + if (new_delta < best_delta) { + /* We have a new best! */ *index = i; *res_freq = tbl[i]; - return 0; - } - if (!(*res_freq) || ((tbl[i] > *res_freq) && - (tbl[i] < req_freq))) { - *index = i; - *res_freq = tbl[i]; + /* If the new best is exact then we're done */ + if (new_delta == 0) + return 0; + + /* Record how close we got */ + best_delta = new_delta; } } diff --git a/drivers/soc/qcom/qcom_gsbi.c b/drivers/soc/qcom/qcom_gsbi.c index 09c669e70d636861df12f9670dcdf339cf4021ee..038abc377fdb9705eda5f98a363d6d1b3d100478 100644 --- a/drivers/soc/qcom/qcom_gsbi.c +++ b/drivers/soc/qcom/qcom_gsbi.c @@ -138,7 +138,7 @@ static int gsbi_probe(struct platform_device *pdev) struct resource *res; void __iomem *base; struct gsbi_info *gsbi; - int i; + int i, ret; u32 mask, gsbi_num; const struct crci_config *config = NULL; @@ -221,7 +221,10 @@ static int gsbi_probe(struct platform_device *pdev) platform_set_drvdata(pdev, gsbi); - return of_platform_populate(node, NULL, NULL, &pdev->dev); + ret = of_platform_populate(node, NULL, NULL, &pdev->dev); + if (ret) + clk_disable_unprepare(gsbi->hclk); + return ret; } static int gsbi_remove(struct platform_device *pdev) diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c index 8a3678c2e83cf609daddfdcd7c50e9de0a671ad2..97bb5989aa21158dee92d58e5f22bc74729dbbc2 100644 --- a/drivers/soc/qcom/rmtfs_mem.c +++ b/drivers/soc/qcom/rmtfs_mem.c @@ -212,6 +212,11 @@ static int qcom_rmtfs_mem_probe(struct platform_device *pdev) dev_err(&pdev->dev, "failed to parse qcom,vmid\n"); goto remove_cdev; } else if (!ret) { + if (!qcom_scm_is_available()) { + ret = -EPROBE_DEFER; + goto remove_cdev; + } + perms[0].vmid = QCOM_SCM_VMID_HLOS; perms[0].perm = QCOM_SCM_PERM_RW; perms[1].vmid = vmid; diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c index ee75da66d64bf06466791a41182bae0e64e1643f..75bd9a83aef00670d474a69c86fde81aa4794e93 100644 --- a/drivers/soc/qcom/rpmh-rsc.c +++ b/drivers/soc/qcom/rpmh-rsc.c @@ -121,6 +121,7 @@ static int tcs_invalidate(struct rsc_drv *drv, int type) return -EAGAIN; } write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, m, 0); + write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, m, 0); } bitmap_zero(tcs->slots, MAX_TCS_SLOTS); spin_unlock(&tcs->lock); @@ -239,6 +240,7 @@ static irqreturn_t tcs_tx_done(int irq, void *p) skip: /* Reclaim the TCS */ write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, i, 0); + write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, i, 0); write_tcs_reg(drv, RSC_DRV_IRQ_CLEAR, 0, BIT(i)); spin_lock(&drv->lock); clear_bit(i, drv->tcs_in_use); diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c index c7beb684128916173eb4e411435c9d1be32fc260..ab8f731a3426ba929cddcb7a3abe9100dc5d5956 100644 --- a/drivers/soc/qcom/rpmh.c +++ b/drivers/soc/qcom/rpmh.c @@ -80,6 +80,7 @@ void rpmh_tx_done(const struct tcs_request *msg, int r) struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request, msg); struct completion *compl = rpm_msg->completion; + bool free = rpm_msg->needs_free; rpm_msg->err = r; @@ -94,7 +95,7 @@ void rpmh_tx_done(const struct tcs_request *msg, int r) complete(compl); exit: - if (rpm_msg->needs_free) + if (free) kfree(rpm_msg); } @@ -348,11 +349,12 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state, { struct batch_cache_req *req; struct rpmh_request *rpm_msgs; - DECLARE_COMPLETION_ONSTACK(compl); + struct completion *compls; struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); unsigned long time_left; int count = 0; - int ret, i, j; + int ret, i; + void *ptr; if (!cmd || !n) return -EINVAL; @@ -362,10 +364,15 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state, if (!count) return -EINVAL; - req = kzalloc(sizeof(*req) + count * sizeof(req->rpm_msgs[0]), + ptr = kzalloc(sizeof(*req) + + count * (sizeof(req->rpm_msgs[0]) + sizeof(*compls)), GFP_ATOMIC); - if (!req) + if (!ptr) return -ENOMEM; + + req = ptr; + compls = ptr + sizeof(*req) + count * sizeof(*rpm_msgs); + req->count = count; rpm_msgs = req->rpm_msgs; @@ -380,25 +387,26 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state, } for (i = 0; i < count; i++) { - rpm_msgs[i].completion = &compl; + struct completion *compl = &compls[i]; + + init_completion(compl); + rpm_msgs[i].completion = compl; ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg); if (ret) { pr_err("Error(%d) sending RPMH message addr=%#x\n", ret, rpm_msgs[i].msg.cmds[0].addr); - for (j = i; j < count; j++) - rpmh_tx_done(&rpm_msgs[j].msg, ret); break; } } time_left = RPMH_TIMEOUT_MS; - for (i = 0; i < count; i++) { - time_left = wait_for_completion_timeout(&compl, time_left); + while (i--) { + time_left = wait_for_completion_timeout(&compls[i], time_left); if (!time_left) { /* * Better hope they never finish because they'll signal - * the completion on our stack and that's bad once - * we've returned from the function. + * the completion that we're going to free once + * we've returned from this function. */ WARN_ON(1); ret = -ETIMEDOUT; @@ -407,7 +415,7 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state, } exit: - kfree(req); + kfree(ptr); return ret; } diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c index df3ccb30bc2dddba0d2d6accccdd6a6c5a7bc53c..373400dd816d6802378dcf5bc7373fb921b22343 100644 --- a/drivers/soc/qcom/wcnss_ctrl.c +++ b/drivers/soc/qcom/wcnss_ctrl.c @@ -281,7 +281,7 @@ struct rpmsg_endpoint *qcom_wcnss_open_channel(void *wcnss, const char *name, rp struct rpmsg_channel_info chinfo; struct wcnss_ctrl *_wcnss = wcnss; - strncpy(chinfo.name, name, sizeof(chinfo.name)); + strscpy(chinfo.name, name, sizeof(chinfo.name)); chinfo.src = RPMSG_ADDR_ANY; chinfo.dst = RPMSG_ADDR_ANY; diff --git a/drivers/soc/renesas/r8a77970-sysc.c b/drivers/soc/renesas/r8a77970-sysc.c index caf894f193edcc6446165d2d763c98fd090da540..77422baa7a56a494d7fec1d5d32cc9ac11b6e81b 100644 --- a/drivers/soc/renesas/r8a77970-sysc.c +++ b/drivers/soc/renesas/r8a77970-sysc.c @@ -27,8 +27,8 @@ static const struct rcar_sysc_area r8a77970_areas[] __initconst = { { "a3ir", 0x180, 0, R8A77970_PD_A3IR, R8A77970_PD_ALWAYS_ON }, { "a2ir0", 0x400, 0, R8A77970_PD_A2IR0, R8A77970_PD_A3IR }, { "a2ir1", 0x400, 1, R8A77970_PD_A2IR1, R8A77970_PD_A3IR }, - { "a2ir2", 0x400, 2, R8A77970_PD_A2IR2, R8A77970_PD_A3IR }, - { "a2ir3", 0x400, 3, R8A77970_PD_A2IR3, R8A77970_PD_A3IR }, + { "a2dp", 0x400, 2, R8A77970_PD_A2DP, R8A77970_PD_A3IR }, + { "a2cn", 0x400, 3, R8A77970_PD_A2CN, R8A77970_PD_A3IR }, { "a2sc0", 0x400, 4, R8A77970_PD_A2SC0, R8A77970_PD_A3IR }, { "a2sc1", 0x400, 5, R8A77970_PD_A2SC1, R8A77970_PD_A3IR }, }; diff --git a/drivers/soc/renesas/r8a77980-sysc.c b/drivers/soc/renesas/r8a77980-sysc.c index 9265fb525ef342281546683ff2d0dcf13e335f25..a8dbe55e8ba82d7e40d6c2cdf58fa26796834513 100644 --- a/drivers/soc/renesas/r8a77980-sysc.c +++ b/drivers/soc/renesas/r8a77980-sysc.c @@ -38,12 +38,12 @@ static const struct rcar_sysc_area r8a77980_areas[] __initconst = { { "a2sc2", 0x400, 8, R8A77980_PD_A2SC2, R8A77980_PD_A3IR }, { "a2sc3", 0x400, 9, R8A77980_PD_A2SC3, R8A77980_PD_A3IR }, { "a2sc4", 0x400, 10, R8A77980_PD_A2SC4, R8A77980_PD_A3IR }, - { "a2pd0", 0x400, 11, R8A77980_PD_A2PD0, R8A77980_PD_A3IR }, - { "a2pd1", 0x400, 12, R8A77980_PD_A2PD1, R8A77980_PD_A3IR }, + { "a2dp0", 0x400, 11, R8A77980_PD_A2DP0, R8A77980_PD_A3IR }, + { "a2dp1", 0x400, 12, R8A77980_PD_A2DP1, R8A77980_PD_A3IR }, { "a2cn", 0x400, 13, R8A77980_PD_A2CN, R8A77980_PD_A3IR }, - { "a3vip", 0x2c0, 0, R8A77980_PD_A3VIP, R8A77980_PD_ALWAYS_ON }, - { "a3vip1", 0x300, 0, R8A77980_PD_A3VIP1, R8A77980_PD_A3VIP }, - { "a3vip2", 0x280, 0, R8A77980_PD_A3VIP2, R8A77980_PD_A3VIP }, + { "a3vip0", 0x2c0, 0, R8A77980_PD_A3VIP0, R8A77980_PD_ALWAYS_ON }, + { "a3vip1", 0x300, 0, R8A77980_PD_A3VIP1, R8A77980_PD_ALWAYS_ON }, + { "a3vip2", 0x280, 0, R8A77980_PD_A3VIP2, R8A77980_PD_ALWAYS_ON }, }; const struct rcar_sysc_info r8a77980_sysc_info __initconst = { diff --git a/drivers/soc/renesas/r8a77990-sysc.c b/drivers/soc/renesas/r8a77990-sysc.c index 15579ebc5ed2059dbace28ba6137614e0a4c81e7..664b244eb1dd9d95fabe3305334784f69213c90c 100644 --- a/drivers/soc/renesas/r8a77990-sysc.c +++ b/drivers/soc/renesas/r8a77990-sysc.c @@ -28,19 +28,6 @@ static struct rcar_sysc_area r8a77990_areas[] __initdata = { { "3dg-b", 0x100, 1, R8A77990_PD_3DG_B, R8A77990_PD_3DG_A }, }; -static void __init rcar_sysc_fix_parent(struct rcar_sysc_area *areas, - unsigned int num_areas, u8 id, - int new_parent) -{ - unsigned int i; - - for (i = 0; i < num_areas; i++) - if (areas[i].isr_bit == id) { - areas[i].parent = new_parent; - return; - } -} - /* Fixups for R-Car E3 ES1.0 revision */ static const struct soc_device_attribute r8a77990[] __initconst = { { .soc_id = "r8a77990", .revision = "ES1.0" }, @@ -50,12 +37,10 @@ static const struct soc_device_attribute r8a77990[] __initconst = { static int __init r8a77990_sysc_init(void) { if (soc_device_match(r8a77990)) { - rcar_sysc_fix_parent(r8a77990_areas, - ARRAY_SIZE(r8a77990_areas), - R8A77990_PD_3DG_A, R8A77990_PD_3DG_B); - rcar_sysc_fix_parent(r8a77990_areas, - ARRAY_SIZE(r8a77990_areas), - R8A77990_PD_3DG_B, R8A77990_PD_ALWAYS_ON); + /* Fix incorrect 3DG hierarchy */ + swap(r8a77990_areas[7], r8a77990_areas[8]); + r8a77990_areas[7].parent = R8A77990_PD_ALWAYS_ON; + r8a77990_areas[8].parent = R8A77990_PD_3DG_B; } return 0; diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c index d44d0e687ab8adb3bdcf97537d5ac97aade5c6c6..2a43d6e99962f856382f7f6d93cb1e856f712d2a 100644 --- a/drivers/soc/renesas/renesas-soc.c +++ b/drivers/soc/renesas/renesas-soc.c @@ -285,6 +285,9 @@ static int __init renesas_soc_init(void) /* R-Car M3-W ES1.1 incorrectly identifies as ES2.0 */ if ((product & 0x7fff) == 0x5210) product ^= 0x11; + /* R-Car M3-W ES1.3 incorrectly identifies as ES2.1 */ + if ((product & 0x7fff) == 0x5211) + product ^= 0x12; if (soc->id && ((product >> 8) & 0xff) != soc->id) { pr_warn("SoC mismatch (product = 0x%x)\n", product); return -ENODEV; diff --git a/drivers/soc/rockchip/grf.c b/drivers/soc/rockchip/grf.c index 96882ffde67ea6b7a6303fa5f69f51864e82fb94..3b81e1d75a97e75fd579fcc38117f06e94d2450d 100644 --- a/drivers/soc/rockchip/grf.c +++ b/drivers/soc/rockchip/grf.c @@ -66,9 +66,11 @@ static const struct rockchip_grf_info rk3228_grf __initconst = { }; #define RK3288_GRF_SOC_CON0 0x244 +#define RK3288_GRF_SOC_CON2 0x24c static const struct rockchip_grf_value rk3288_defaults[] __initconst = { { "jtag switching", RK3288_GRF_SOC_CON0, HIWORD_UPDATE(0, 1, 12) }, + { "pwm select", RK3288_GRF_SOC_CON2, HIWORD_UPDATE(1, 1, 0) }, }; static const struct rockchip_grf_info rk3288_grf __initconst = { diff --git a/drivers/soc/sunxi/Kconfig b/drivers/soc/sunxi/Kconfig index 353b07e40176ef3f77911b7a380095723da26ba8..e84eb4e59f586dfbed04ebb8cfd89041d5f59db4 100644 --- a/drivers/soc/sunxi/Kconfig +++ b/drivers/soc/sunxi/Kconfig @@ -4,6 +4,7 @@ config SUNXI_SRAM bool default ARCH_SUNXI + select REGMAP_MMIO help Say y here to enable the SRAM controller support. This device is responsible on mapping the SRAM in the sunXi SoCs diff --git a/drivers/soc/tegra/common.c b/drivers/soc/tegra/common.c index cd8f41351addfdd69432d4d6e7d5136be4b28e68..7bfb154d6fa5ebe3ed6a78b38dd5177430801e3a 100644 --- a/drivers/soc/tegra/common.c +++ b/drivers/soc/tegra/common.c @@ -22,11 +22,15 @@ static const struct of_device_id tegra_machine_match[] = { bool soc_is_tegra(void) { + const struct of_device_id *match; struct device_node *root; root = of_find_node_by_path("/"); if (!root) return false; - return of_match_node(tegra_machine_match, root) != NULL; + match = of_match_node(tegra_machine_match, root); + of_node_put(root); + + return match != NULL; } diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c index a33ee8ef8b6b99cc6f4b6e450f3dfbb59c850d36..51625703399e4426e16a506d7968ad264dcb36e1 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra.c +++ b/drivers/soc/tegra/fuse/fuse-tegra.c @@ -137,13 +137,17 @@ static int tegra_fuse_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); fuse->phys = res->start; fuse->base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(fuse->base)) - return PTR_ERR(fuse->base); + if (IS_ERR(fuse->base)) { + err = PTR_ERR(fuse->base); + fuse->base = base; + return err; + } fuse->clk = devm_clk_get(&pdev->dev, "fuse"); if (IS_ERR(fuse->clk)) { dev_err(&pdev->dev, "failed to get FUSE clock: %ld", PTR_ERR(fuse->clk)); + fuse->base = base; return PTR_ERR(fuse->clk); } @@ -152,8 +156,10 @@ static int tegra_fuse_probe(struct platform_device *pdev) if (fuse->soc->probe) { err = fuse->soc->probe(fuse); - if (err < 0) + if (err < 0) { + fuse->base = base; return err; + } } if (tegra_fuse_create_sysfs(&pdev->dev, fuse->soc->info->size, diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index 2d6f3fcf321104a32f18c185a87e8fb6b5ef8226..f17a678154047ed2fc935d8e96ce7ef9e31a6dac 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -65,6 +65,8 @@ #define PWRGATE_STATUS 0x38 +#define PMC_IMPL_E_33V_PWR 0x40 + #define PMC_PWR_DET 0x48 #define PMC_SCRATCH0_MODE_RECOVERY BIT(31) @@ -154,6 +156,7 @@ struct tegra_pmc_soc { bool has_tsense_reset; bool has_gpu_clamps; bool needs_mbist_war; + bool has_impl_33v_pwr; const struct tegra_io_pad_soc *io_pads; unsigned int num_io_pads; @@ -524,16 +527,10 @@ EXPORT_SYMBOL(tegra_powergate_power_off); */ int tegra_powergate_is_powered(unsigned int id) { - int status; - if (!tegra_powergate_is_valid(id)) return -EINVAL; - mutex_lock(&pmc->powergates_lock); - status = tegra_powergate_state(id); - mutex_unlock(&pmc->powergates_lock); - - return status; + return tegra_powergate_state(id); } /** @@ -1073,20 +1070,31 @@ int tegra_io_pad_set_voltage(enum tegra_io_pad id, mutex_lock(&pmc->powergates_lock); - /* write-enable PMC_PWR_DET_VALUE[pad->voltage] */ - value = tegra_pmc_readl(PMC_PWR_DET); - value |= BIT(pad->voltage); - tegra_pmc_writel(value, PMC_PWR_DET); + if (pmc->soc->has_impl_33v_pwr) { + value = tegra_pmc_readl(PMC_IMPL_E_33V_PWR); - /* update I/O voltage */ - value = tegra_pmc_readl(PMC_PWR_DET_VALUE); + if (voltage == TEGRA_IO_PAD_1800000UV) + value &= ~BIT(pad->voltage); + else + value |= BIT(pad->voltage); - if (voltage == TEGRA_IO_PAD_1800000UV) - value &= ~BIT(pad->voltage); - else + tegra_pmc_writel(value, PMC_IMPL_E_33V_PWR); + } else { + /* write-enable PMC_PWR_DET_VALUE[pad->voltage] */ + value = tegra_pmc_readl(PMC_PWR_DET); value |= BIT(pad->voltage); + tegra_pmc_writel(value, PMC_PWR_DET); + + /* update I/O voltage */ + value = tegra_pmc_readl(PMC_PWR_DET_VALUE); - tegra_pmc_writel(value, PMC_PWR_DET_VALUE); + if (voltage == TEGRA_IO_PAD_1800000UV) + value &= ~BIT(pad->voltage); + else + value |= BIT(pad->voltage); + + tegra_pmc_writel(value, PMC_PWR_DET_VALUE); + } mutex_unlock(&pmc->powergates_lock); @@ -1108,7 +1116,10 @@ int tegra_io_pad_get_voltage(enum tegra_io_pad id) if (pad->voltage == UINT_MAX) return -ENOTSUPP; - value = tegra_pmc_readl(PMC_PWR_DET_VALUE); + if (pmc->soc->has_impl_33v_pwr) + value = tegra_pmc_readl(PMC_IMPL_E_33V_PWR); + else + value = tegra_pmc_readl(PMC_PWR_DET_VALUE); if ((value & BIT(pad->voltage)) == 0) return TEGRA_IO_PAD_1800000UV; @@ -1288,7 +1299,7 @@ static void tegra_pmc_init_tsense_reset(struct tegra_pmc *pmc) if (!pmc->soc->has_tsense_reset) return; - np = of_find_node_by_name(pmc->dev->of_node, "i2c-thermtrip"); + np = of_get_child_by_name(pmc->dev->of_node, "i2c-thermtrip"); if (!np) { dev_warn(dev, "i2c-thermtrip node not found, %s.\n", disabled); return; @@ -1567,6 +1578,7 @@ static const struct tegra_pmc_soc tegra30_pmc_soc = { .cpu_powergates = tegra30_cpu_powergates, .has_tsense_reset = true, .has_gpu_clamps = false, + .has_impl_33v_pwr = false, .num_io_pads = 0, .io_pads = NULL, .regs = &tegra20_pmc_regs, @@ -1609,6 +1621,7 @@ static const struct tegra_pmc_soc tegra114_pmc_soc = { .cpu_powergates = tegra114_cpu_powergates, .has_tsense_reset = true, .has_gpu_clamps = false, + .has_impl_33v_pwr = false, .num_io_pads = 0, .io_pads = NULL, .regs = &tegra20_pmc_regs, @@ -1689,6 +1702,7 @@ static const struct tegra_pmc_soc tegra124_pmc_soc = { .cpu_powergates = tegra124_cpu_powergates, .has_tsense_reset = true, .has_gpu_clamps = true, + .has_impl_33v_pwr = false, .num_io_pads = ARRAY_SIZE(tegra124_io_pads), .io_pads = tegra124_io_pads, .regs = &tegra20_pmc_regs, @@ -1778,6 +1792,7 @@ static const struct tegra_pmc_soc tegra210_pmc_soc = { .cpu_powergates = tegra210_cpu_powergates, .has_tsense_reset = true, .has_gpu_clamps = true, + .has_impl_33v_pwr = false, .needs_mbist_war = true, .num_io_pads = ARRAY_SIZE(tegra210_io_pads), .io_pads = tegra210_io_pads, @@ -1806,7 +1821,7 @@ static const struct tegra_io_pad_soc tegra186_io_pads[] = { { .id = TEGRA_IO_PAD_HDMI_DP0, .dpd = 28, .voltage = UINT_MAX }, { .id = TEGRA_IO_PAD_HDMI_DP1, .dpd = 29, .voltage = UINT_MAX }, { .id = TEGRA_IO_PAD_PEX_CNTRL, .dpd = 32, .voltage = UINT_MAX }, - { .id = TEGRA_IO_PAD_SDMMC2_HV, .dpd = 34, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_SDMMC2_HV, .dpd = 34, .voltage = 5 }, { .id = TEGRA_IO_PAD_SDMMC4, .dpd = 36, .voltage = UINT_MAX }, { .id = TEGRA_IO_PAD_CAM, .dpd = 38, .voltage = UINT_MAX }, { .id = TEGRA_IO_PAD_DSIB, .dpd = 40, .voltage = UINT_MAX }, @@ -1818,12 +1833,13 @@ static const struct tegra_io_pad_soc tegra186_io_pads[] = { { .id = TEGRA_IO_PAD_CSIF, .dpd = 46, .voltage = UINT_MAX }, { .id = TEGRA_IO_PAD_SPI, .dpd = 47, .voltage = UINT_MAX }, { .id = TEGRA_IO_PAD_UFS, .dpd = 49, .voltage = UINT_MAX }, - { .id = TEGRA_IO_PAD_DMIC_HV, .dpd = 52, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_DMIC_HV, .dpd = 52, .voltage = 2 }, { .id = TEGRA_IO_PAD_EDP, .dpd = 53, .voltage = UINT_MAX }, - { .id = TEGRA_IO_PAD_SDMMC1_HV, .dpd = 55, .voltage = UINT_MAX }, - { .id = TEGRA_IO_PAD_SDMMC3_HV, .dpd = 56, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_SDMMC1_HV, .dpd = 55, .voltage = 4 }, + { .id = TEGRA_IO_PAD_SDMMC3_HV, .dpd = 56, .voltage = 6 }, { .id = TEGRA_IO_PAD_CONN, .dpd = 60, .voltage = UINT_MAX }, - { .id = TEGRA_IO_PAD_AUDIO_HV, .dpd = 61, .voltage = UINT_MAX }, + { .id = TEGRA_IO_PAD_AUDIO_HV, .dpd = 61, .voltage = 1 }, + { .id = TEGRA_IO_PAD_AO_HV, .dpd = UINT_MAX, .voltage = 0 }, }; static const struct tegra_pmc_regs tegra186_pmc_regs = { @@ -1876,6 +1892,7 @@ static const struct tegra_pmc_soc tegra186_pmc_soc = { .cpu_powergates = NULL, .has_tsense_reset = false, .has_gpu_clamps = false, + .has_impl_33v_pwr = true, .num_io_pads = ARRAY_SIZE(tegra186_io_pads), .io_pads = tegra186_io_pads, .regs = &tegra186_pmc_regs, diff --git a/drivers/soc/ti/knav_qmss.h b/drivers/soc/ti/knav_qmss.h index 3efc47e82973b43ff7431bdacad428aea757bffc..bd040c29c4bf812272f0f98ed76bf157f65b827d 100644 --- a/drivers/soc/ti/knav_qmss.h +++ b/drivers/soc/ti/knav_qmss.h @@ -329,8 +329,8 @@ struct knav_range_ops { }; struct knav_irq_info { - int irq; - u32 cpu_map; + int irq; + struct cpumask *cpu_mask; }; struct knav_range_info { diff --git a/drivers/soc/ti/knav_qmss_acc.c b/drivers/soc/ti/knav_qmss_acc.c index 316e82e46f6cbff0500ba8409529dffc3d0eddbe..2f7fb2dcc1d66d130580b57d7574ecabbbdb1546 100644 --- a/drivers/soc/ti/knav_qmss_acc.c +++ b/drivers/soc/ti/knav_qmss_acc.c @@ -205,18 +205,18 @@ static int knav_range_setup_acc_irq(struct knav_range_info *range, { struct knav_device *kdev = range->kdev; struct knav_acc_channel *acc; - unsigned long cpu_map; + struct cpumask *cpu_mask; int ret = 0, irq; u32 old, new; if (range->flags & RANGE_MULTI_QUEUE) { acc = range->acc; irq = range->irqs[0].irq; - cpu_map = range->irqs[0].cpu_map; + cpu_mask = range->irqs[0].cpu_mask; } else { acc = range->acc + queue; irq = range->irqs[queue].irq; - cpu_map = range->irqs[queue].cpu_map; + cpu_mask = range->irqs[queue].cpu_mask; } old = acc->open_mask; @@ -239,8 +239,8 @@ static int knav_range_setup_acc_irq(struct knav_range_info *range, acc->name, acc->name); ret = request_irq(irq, knav_acc_int_handler, 0, acc->name, range); - if (!ret && cpu_map) { - ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map)); + if (!ret && cpu_mask) { + ret = irq_set_affinity_hint(irq, cpu_mask); if (ret) { dev_warn(range->kdev->dev, "Failed to set IRQ affinity\n"); diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c index 6755f2af56195d773248ae60f8a6261c2e34ae44..ef36acc0e7088e0c6be1f3cb13bfcbf781deaa33 100644 --- a/drivers/soc/ti/knav_qmss_queue.c +++ b/drivers/soc/ti/knav_qmss_queue.c @@ -118,19 +118,17 @@ static int knav_queue_setup_irq(struct knav_range_info *range, struct knav_queue_inst *inst) { unsigned queue = inst->id - range->queue_base; - unsigned long cpu_map; int ret = 0, irq; if (range->flags & RANGE_HAS_IRQ) { irq = range->irqs[queue].irq; - cpu_map = range->irqs[queue].cpu_map; ret = request_irq(irq, knav_queue_int_handler, 0, inst->irq_name, inst); if (ret) return ret; disable_irq(irq); - if (cpu_map) { - ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map)); + if (range->irqs[queue].cpu_mask) { + ret = irq_set_affinity_hint(irq, range->irqs[queue].cpu_mask); if (ret) { dev_warn(range->kdev->dev, "Failed to set IRQ affinity\n"); @@ -1262,9 +1260,19 @@ static int knav_setup_queue_range(struct knav_device *kdev, range->num_irqs++; - if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3) - range->irqs[i].cpu_map = - (oirq.args[2] & 0x0000ff00) >> 8; + if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3) { + unsigned long mask; + int bit; + + range->irqs[i].cpu_mask = devm_kzalloc(dev, + cpumask_size(), GFP_KERNEL); + if (!range->irqs[i].cpu_mask) + return -ENOMEM; + + mask = (oirq.args[2] & 0x0000ff00) >> 8; + for_each_set_bit(bit, &mask, BITS_PER_LONG) + cpumask_set_cpu(bit, range->irqs[i].cpu_mask); + } } range->num_irqs = min(range->num_irqs, range->num_queues); diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig index 19c8efb9a5ee786b523a96ae9e41226553757963..c7708feaa62e332c8c215ebd4e356df654050ad2 100644 --- a/drivers/soundwire/Kconfig +++ b/drivers/soundwire/Kconfig @@ -3,8 +3,9 @@ # menuconfig SOUNDWIRE - bool "SoundWire support" - ---help--- + tristate "SoundWire support" + depends on ACPI + help SoundWire is a 2-Pin interface with data and clock line ratified by the MIPI Alliance. SoundWire is used for transporting data typically related to audio functions. SoundWire interface is @@ -16,17 +17,12 @@ if SOUNDWIRE comment "SoundWire Devices" -config SOUNDWIRE_BUS - tristate - select REGMAP_SOUNDWIRE - config SOUNDWIRE_CADENCE tristate config SOUNDWIRE_INTEL tristate "Intel SoundWire Master driver" select SOUNDWIRE_CADENCE - select SOUNDWIRE_BUS depends on X86 && ACPI && SND_SOC ---help--- SoundWire Intel Master driver. diff --git a/drivers/soundwire/Makefile b/drivers/soundwire/Makefile index 5817beaca0e1f22db0bf753cad12c250e8200363..1e2c00163142ef0ffffc88a62b0f5d0fae1750b4 100644 --- a/drivers/soundwire/Makefile +++ b/drivers/soundwire/Makefile @@ -4,7 +4,7 @@ #Bus Objs soundwire-bus-objs := bus_type.o bus.o slave.o mipi_disco.o stream.o -obj-$(CONFIG_SOUNDWIRE_BUS) += soundwire-bus.o +obj-$(CONFIG_SOUNDWIRE) += soundwire-bus.o #Cadence Objs soundwire-cadence-objs := cadence_master.o diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c index dcc0ff9f0c224e93d88a05aa933b293c4390d559..df172bf3925f6415747c6d4f1b35928432e95270 100644 --- a/drivers/soundwire/bus.c +++ b/drivers/soundwire/bus.c @@ -175,6 +175,7 @@ static inline int do_transfer_defer(struct sdw_bus *bus, defer->msg = msg; defer->length = msg->len; + init_completion(&defer->complete); for (i = 0; i <= retry; i++) { resp = bus->ops->xfer_msg_defer(bus, msg, defer); @@ -805,7 +806,7 @@ static int sdw_handle_port_interrupt(struct sdw_slave *slave, static int sdw_handle_slave_alerts(struct sdw_slave *slave) { struct sdw_slave_intr_status slave_intr; - u8 clear = 0, bit, port_status[15]; + u8 clear = 0, bit, port_status[15] = {0}; int port_num, stat, ret, count = 0; unsigned long port; bool slave_notify = false; diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c index cb6a331f448ab681ed4ef0bd24f8243debf8c232..70f78eda037e805604edbc7b051ee966fc1e5bc6 100644 --- a/drivers/soundwire/cadence_master.c +++ b/drivers/soundwire/cadence_master.c @@ -81,8 +81,8 @@ #define CDNS_MCP_INTSET 0x4C -#define CDNS_SDW_SLAVE_STAT 0x50 -#define CDNS_MCP_SLAVE_STAT_MASK BIT(1, 0) +#define CDNS_MCP_SLAVE_STAT 0x50 +#define CDNS_MCP_SLAVE_STAT_MASK GENMASK(1, 0) #define CDNS_MCP_SLAVE_INTSTAT0 0x54 #define CDNS_MCP_SLAVE_INTSTAT1 0x58 @@ -96,8 +96,8 @@ #define CDNS_MCP_SLAVE_INTMASK0 0x5C #define CDNS_MCP_SLAVE_INTMASK1 0x60 -#define CDNS_MCP_SLAVE_INTMASK0_MASK GENMASK(30, 0) -#define CDNS_MCP_SLAVE_INTMASK1_MASK GENMASK(16, 0) +#define CDNS_MCP_SLAVE_INTMASK0_MASK GENMASK(31, 0) +#define CDNS_MCP_SLAVE_INTMASK1_MASK GENMASK(15, 0) #define CDNS_MCP_PORT_INTSTAT 0x64 #define CDNS_MCP_PDI_STAT 0x6C diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c index 0a8990e758f9c057156a10a573ad494244611c27..e49d3c810677ad98215d6ad89596e8673dc82948 100644 --- a/drivers/soundwire/intel.c +++ b/drivers/soundwire/intel.c @@ -282,6 +282,16 @@ intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num, bool pcm) if (pcm) { count = intel_readw(shim, SDW_SHIM_PCMSYCHC(link_id, pdi_num)); + + /* + * WORKAROUND: on all existing Intel controllers, pdi + * number 2 reports channel count as 1 even though it + * supports 8 channels. Performing hardcoding for pdi + * number 2. + */ + if (pdi_num == 2) + count = 7; + } else { count = intel_readw(shim, SDW_SHIM_PDMSCAP(link_id)); count = ((count & SDW_SHIM_PDMSCAP_CPSS) >> @@ -342,7 +352,10 @@ intel_pdi_shim_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi) unsigned int link_id = sdw->instance; int pdi_conf = 0; - pdi->intel_alh_id = (link_id * 16) + pdi->num + 5; + /* the Bulk and PCM streams are not contiguous */ + pdi->intel_alh_id = (link_id * 16) + pdi->num + 3; + if (pdi->num >= 2) + pdi->intel_alh_id += 2; /* * Program stream parameters to stream SHIM register @@ -371,7 +384,10 @@ intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi) unsigned int link_id = sdw->instance; unsigned int conf; - pdi->intel_alh_id = (link_id * 16) + pdi->num + 5; + /* the Bulk and PCM streams are not contiguous */ + pdi->intel_alh_id = (link_id * 16) + pdi->num + 3; + if (pdi->num >= 2) + pdi->intel_alh_id += 2; /* Program Stream config ALH register */ conf = intel_readl(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id)); @@ -651,8 +667,8 @@ static int intel_create_dai(struct sdw_cdns *cdns, return -ENOMEM; } - dais[i].playback.channels_min = 1; - dais[i].playback.channels_max = max_ch; + dais[i].capture.channels_min = 1; + dais[i].capture.channels_max = max_ch; dais[i].capture.rates = SNDRV_PCM_RATE_48000; dais[i].capture.formats = SNDRV_PCM_FMTBIT_S16_LE; } diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c index d1ea6b4d0ad306e8413bc81a4e6d4bfebd839924..6bd689d6a13890cfb157b88cde974dbebeb4a89d 100644 --- a/drivers/soundwire/intel_init.c +++ b/drivers/soundwire/intel_init.c @@ -12,6 +12,7 @@ #include #include "intel.h" +#define SDW_LINK_TYPE 4 /* from Intel ACPI documentation */ #define SDW_MAX_LINKS 4 #define SDW_SHIM_LCAP 0x0 #define SDW_SHIM_BASE 0x2C000 @@ -149,14 +150,33 @@ static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level, { struct sdw_intel_res *res = cdata; struct acpi_device *adev; + acpi_status status; + u64 adr; + + status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &adr); + if (ACPI_FAILURE(status)) + return AE_OK; /* keep going */ if (acpi_bus_get_device(handle, &adev)) { - dev_err(&adev->dev, "Couldn't find ACPI handle\n"); + pr_err("%s: Couldn't find ACPI handle\n", __func__); return AE_NOT_FOUND; } + /* + * On some Intel platforms, multiple children of the HDAS + * device can be found, but only one of them is the SoundWire + * controller. The SNDW device is always exposed with + * Name(_ADR, 0x40000000), with bits 31..28 representing the + * SoundWire link so filter accordingly + */ + if ((adr & GENMASK(31, 28)) >> 28 != SDW_LINK_TYPE) + return AE_OK; /* keep going */ + + /* found the correct SoundWire controller */ res->handle = handle; - return AE_OK; + + /* device found, stop namespace walk */ + return AE_CTRL_TERMINATE; } /** diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c index e5c7e1ef63188914bfc655b1d187da5fa44166a1..42bc701e23040b9f18b3402b9f78732cd4a189ea 100644 --- a/drivers/soundwire/stream.c +++ b/drivers/soundwire/stream.c @@ -1182,8 +1182,16 @@ int sdw_stream_add_slave(struct sdw_slave *slave, } ret = sdw_config_stream(&slave->dev, stream, stream_config, true); - if (ret) + if (ret) { + /* + * sdw_release_master_stream will release s_rt in slave_rt_list in + * stream_error case, but s_rt is only added to slave_rt_list + * when sdw_config_stream is successful, so free s_rt explicitly + * when sdw_config_stream is failed. + */ + kfree(s_rt); goto stream_error; + } list_add_tail(&s_rt->m_rt_node, &m_rt->slave_rt_list); @@ -1236,9 +1244,7 @@ struct sdw_dpn_prop *sdw_get_slave_dpn_prop(struct sdw_slave *slave, } for (i = 0; i < num_ports; i++) { - dpn_prop = &dpn_prop[i]; - - if (dpn_prop->num == port_num) + if (dpn_prop[i].num == port_num) return &dpn_prop[i]; } diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 671d078349cc667e32835ded589b145b3bb7d9a2..86c81ca79569b4864677a3425c9d076bfa5c53a1 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -454,6 +454,31 @@ config SPI_ORION This enables using the SPI master controller on the Orion and MVEBU chips. +config SPI_PHYTIUM + tristate + depends on ARCH_PHYTIUM || COMPILE_TEST + +config SPI_PHYTIUM_PLAT + tristate "Phytium SPI controller platform support" + select SPI_PHYTIUM + help + This selects a platform driver for Phytium SPI controller. + + If you say yes to this option, support will be included for + Phytium SoC families of SPI controller. + +config SPI_PHYTIUM_PCI + tristate "Phytium SPI controller PCI support" + depends on PCI + select SPI_PHYTIUM + help + This selects a PCI driver for Phytium SPI controller. + + If you say yes to this option, support will be included for + Phytium PCIe chipsets of SPI controller. + + If unsure, say N. + config SPI_PIC32 tristate "Microchip PIC32 series SPI" depends on MACH_PIC32 || COMPILE_TEST @@ -817,4 +842,7 @@ config SPI_SLAVE_SYSTEM_CONTROL endif # SPI_SLAVE +config SPI_DYNAMIC + def_bool ACPI || OF_DYNAMIC || SPI_SLAVE + endif # SPI diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index a90d55970036536d4bedeccfe533864ddb02622e..01d74f2a1afefaaced47cf3a1585adeecf2c5922 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -35,6 +35,9 @@ obj-$(CONFIG_SPI_DESIGNWARE) += spi-dw.o obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o obj-$(CONFIG_SPI_DW_PCI) += spi-dw-midpci.o spi-dw-midpci-objs := spi-dw-pci.o spi-dw-mid.o +obj-$(CONFIG_SPI_PHYTIUM) += spi-phytium.o +obj-$(CONFIG_SPI_PHYTIUM_PLAT) += spi-phytium-plat.o +obj-$(CONFIG_SPI_PHYTIUM_PCI) += spi-phytium-pci.o obj-$(CONFIG_SPI_EFM32) += spi-efm32.o obj-$(CONFIG_SPI_EP93XX) += spi-ep93xx.o obj-$(CONFIG_SPI_FALCON) += spi-falcon.o diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index 3f890d16293411ba86618d54d4f8f70887aec31d..2fb43c582559de0b9e1b8571fcf015246ca0b374 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c @@ -1193,10 +1193,8 @@ static int atmel_spi_setup(struct spi_device *spi) as = spi_master_get_devdata(spi->master); /* see notes above re chipselect */ - if (!atmel_spi_is_v2(as) - && spi->chip_select == 0 - && (spi->mode & SPI_CS_HIGH)) { - dev_dbg(&spi->dev, "setup: can't be active-high\n"); + if (!as->use_cs_gpios && (spi->mode & SPI_CS_HIGH)) { + dev_warn(&spi->dev, "setup: non GPIO CS can't be active-high\n"); return -EINVAL; } diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c index 8612525fa4e34e6281d26c5f306f3f1e36a212ce..584bcb018a62db6743eb2028dd29e15b178c734f 100644 --- a/drivers/spi/spi-bcm-qspi.c +++ b/drivers/spi/spi-bcm-qspi.c @@ -89,7 +89,7 @@ #define BSPI_BPP_MODE_SELECT_MASK BIT(8) #define BSPI_BPP_ADDR_SELECT_MASK BIT(16) -#define BSPI_READ_LENGTH 512 +#define BSPI_READ_LENGTH 256 /* MSPI register offsets */ #define MSPI_SPCR0_LSB 0x000 @@ -355,7 +355,7 @@ static int bcm_qspi_bspi_set_flex_mode(struct bcm_qspi *qspi, int bpc = 0, bpp = 0; u8 command = op->cmd.opcode; int width = op->cmd.buswidth ? op->cmd.buswidth : SPI_NBITS_SINGLE; - int addrlen = op->addr.nbytes * 8; + int addrlen = op->addr.nbytes; int flex_mode = 1; dev_dbg(&qspi->pdev->dev, "set flex mode w %x addrlen %x hp %d\n", diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index f35cc10772f6670397ea923ad30158270dd68578..eab27d41ba83f3ae713b0fccecc02aacb226834d 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c @@ -88,7 +88,7 @@ struct bcm2835_spi { u8 *rx_buf; int tx_len; int rx_len; - bool dma_pending; + unsigned int dma_pending; }; static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg) @@ -155,8 +155,7 @@ static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id) /* Write as many bytes as possible to FIFO */ bcm2835_wr_fifo(bs); - /* based on flags decide if we can finish the transfer */ - if (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE) { + if (!bs->rx_len) { /* Transfer complete - reset SPI HW */ bcm2835_spi_reset_hw(master); /* wake up the framework */ @@ -233,10 +232,9 @@ static void bcm2835_spi_dma_done(void *data) * is called the tx-dma must have finished - can't get to this * situation otherwise... */ - dmaengine_terminate_all(master->dma_tx); - - /* mark as no longer pending */ - bs->dma_pending = 0; + if (cmpxchg(&bs->dma_pending, true, false)) { + dmaengine_terminate_all(master->dma_tx); + } /* and mark as completed */; complete(&master->xfer_completion); @@ -342,6 +340,7 @@ static int bcm2835_spi_transfer_one_dma(struct spi_master *master, if (ret) { /* need to reset on errors */ dmaengine_terminate_all(master->dma_tx); + bs->dma_pending = false; bcm2835_spi_reset_hw(master); return ret; } @@ -555,7 +554,8 @@ static int bcm2835_spi_transfer_one(struct spi_master *master, bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv); /* handle all the 3-wire mode */ - if ((spi->mode & SPI_3WIRE) && (tfr->rx_buf)) + if (spi->mode & SPI_3WIRE && tfr->rx_buf && + tfr->rx_buf != master->dummy_rx) cs |= BCM2835_SPI_CS_REN; else cs &= ~BCM2835_SPI_CS_REN; @@ -617,10 +617,9 @@ static void bcm2835_spi_handle_err(struct spi_master *master, struct bcm2835_spi *bs = spi_master_get_devdata(master); /* if an error occurred and we have an active dma, then terminate */ - if (bs->dma_pending) { + if (cmpxchg(&bs->dma_pending, true, false)) { dmaengine_terminate_all(master->dma_tx); dmaengine_terminate_all(master->dma_rx); - bs->dma_pending = 0; } /* and reset */ bcm2835_spi_reset_hw(master); diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c index 3094d818cf06d4751122611bc2eb807e71d965fd..12c1fa5b06c5b6e3da0028e6ab4f72d5b2f5447c 100644 --- a/drivers/spi/spi-bcm2835aux.c +++ b/drivers/spi/spi-bcm2835aux.c @@ -178,24 +178,14 @@ static void bcm2835aux_spi_reset_hw(struct bcm2835aux_spi *bs) BCM2835_AUX_SPI_CNTL0_CLEARFIFO); } -static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id) +static void bcm2835aux_spi_transfer_helper(struct bcm2835aux_spi *bs) { - struct spi_master *master = dev_id; - struct bcm2835aux_spi *bs = spi_master_get_devdata(master); - irqreturn_t ret = IRQ_NONE; - - /* IRQ may be shared, so return if our interrupts are disabled */ - if (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_CNTL1) & - (BCM2835_AUX_SPI_CNTL1_TXEMPTY | BCM2835_AUX_SPI_CNTL1_IDLE))) - return ret; + u32 stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT); /* check if we have data to read */ - while (bs->rx_len && - (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) & - BCM2835_AUX_SPI_STAT_RX_EMPTY))) { + for (; bs->rx_len && (stat & BCM2835_AUX_SPI_STAT_RX_LVL); + stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT)) bcm2835aux_rd_fifo(bs); - ret = IRQ_HANDLED; - } /* check if we have data to write */ while (bs->tx_len && @@ -203,16 +193,21 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id) (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) & BCM2835_AUX_SPI_STAT_TX_FULL))) { bcm2835aux_wr_fifo(bs); - ret = IRQ_HANDLED; } +} - /* and check if we have reached "done" */ - while (bs->rx_len && - (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) & - BCM2835_AUX_SPI_STAT_BUSY))) { - bcm2835aux_rd_fifo(bs); - ret = IRQ_HANDLED; - } +static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id) +{ + struct spi_master *master = dev_id; + struct bcm2835aux_spi *bs = spi_master_get_devdata(master); + + /* IRQ may be shared, so return if our interrupts are disabled */ + if (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_CNTL1) & + (BCM2835_AUX_SPI_CNTL1_TXEMPTY | BCM2835_AUX_SPI_CNTL1_IDLE))) + return IRQ_NONE; + + /* do common fifo handling */ + bcm2835aux_spi_transfer_helper(bs); if (!bs->tx_len) { /* disable tx fifo empty interrupt */ @@ -226,8 +221,7 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id) complete(&master->xfer_completion); } - /* and return */ - return ret; + return IRQ_HANDLED; } static int __bcm2835aux_spi_transfer_one_irq(struct spi_master *master, @@ -273,7 +267,6 @@ static int bcm2835aux_spi_transfer_one_poll(struct spi_master *master, { struct bcm2835aux_spi *bs = spi_master_get_devdata(master); unsigned long timeout; - u32 stat; /* configure spi */ bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]); @@ -284,24 +277,9 @@ static int bcm2835aux_spi_transfer_one_poll(struct spi_master *master, /* loop until finished the transfer */ while (bs->rx_len) { - /* read status */ - stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT); - - /* fill in tx fifo with remaining data */ - if ((bs->tx_len) && (!(stat & BCM2835_AUX_SPI_STAT_TX_FULL))) { - bcm2835aux_wr_fifo(bs); - continue; - } - /* read data from fifo for both cases */ - if (!(stat & BCM2835_AUX_SPI_STAT_RX_EMPTY)) { - bcm2835aux_rd_fifo(bs); - continue; - } - if (!(stat & BCM2835_AUX_SPI_STAT_BUSY)) { - bcm2835aux_rd_fifo(bs); - continue; - } + /* do common fifo handling */ + bcm2835aux_spi_transfer_helper(bs); /* there is still data pending to read check the timeout */ if (bs->rx_len && time_after(jiffies, timeout)) { diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c index c23849f7aa7bc673bd9cba50ca8755b77742ab34..9a06ffdb73b88641e68da218e63f59ba15704ebf 100644 --- a/drivers/spi/spi-bcm63xx-hsspi.c +++ b/drivers/spi/spi-bcm63xx-hsspi.c @@ -101,6 +101,7 @@ struct bcm63xx_hsspi { struct platform_device *pdev; struct clk *clk; + struct clk *pll_clk; void __iomem *regs; u8 __iomem *fifo; @@ -332,7 +333,7 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev) struct resource *res_mem; void __iomem *regs; struct device *dev = &pdev->dev; - struct clk *clk; + struct clk *clk, *pll_clk = NULL; int irq, ret; u32 reg, rate, num_cs = HSSPI_SPI_MAX_CS; @@ -358,7 +359,7 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev) rate = clk_get_rate(clk); if (!rate) { - struct clk *pll_clk = devm_clk_get(dev, "pll"); + pll_clk = devm_clk_get(dev, "pll"); if (IS_ERR(pll_clk)) { ret = PTR_ERR(pll_clk); @@ -373,19 +374,20 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev) clk_disable_unprepare(pll_clk); if (!rate) { ret = -EINVAL; - goto out_disable_clk; + goto out_disable_pll_clk; } } master = spi_alloc_master(&pdev->dev, sizeof(*bs)); if (!master) { ret = -ENOMEM; - goto out_disable_clk; + goto out_disable_pll_clk; } bs = spi_master_get_devdata(master); bs->pdev = pdev; bs->clk = clk; + bs->pll_clk = pll_clk; bs->regs = regs; bs->speed_hz = rate; bs->fifo = (u8 __iomem *)(bs->regs + HSSPI_FIFO_REG(0)); @@ -440,6 +442,8 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev) out_put_master: spi_master_put(master); +out_disable_pll_clk: + clk_disable_unprepare(pll_clk); out_disable_clk: clk_disable_unprepare(clk); return ret; @@ -453,6 +457,7 @@ static int bcm63xx_hsspi_remove(struct platform_device *pdev) /* reset the hardware and block queue progress */ __raw_writel(0, bs->regs + HSSPI_INT_MASK_REG); + clk_disable_unprepare(bs->pll_clk); clk_disable_unprepare(bs->clk); return 0; @@ -465,6 +470,7 @@ static int bcm63xx_hsspi_suspend(struct device *dev) struct bcm63xx_hsspi *bs = spi_master_get_devdata(master); spi_master_suspend(master); + clk_disable_unprepare(bs->pll_clk); clk_disable_unprepare(bs->clk); return 0; @@ -480,6 +486,12 @@ static int bcm63xx_hsspi_resume(struct device *dev) if (ret) return ret; + if (bs->pll_clk) { + ret = clk_prepare_enable(bs->pll_clk); + if (ret) + return ret; + } + spi_master_resume(master); return 0; diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c index f29176000b8df1b0839f5030d8d046fbd07a8ef1..06cf9388e74f96b7e6251197b92a3f3b4897be8d 100644 --- a/drivers/spi/spi-bitbang.c +++ b/drivers/spi/spi-bitbang.c @@ -416,7 +416,7 @@ int spi_bitbang_start(struct spi_bitbang *bitbang) if (ret) spi_master_put(master); - return 0; + return ret; } EXPORT_SYMBOL_GPL(spi_bitbang_start); diff --git a/drivers/spi/spi-cavium-thunderx.c b/drivers/spi/spi-cavium-thunderx.c index 87793770624086892d57515605f5931e6939bd94..828fbbebc3c48d861d026288920d52e842cb88c4 100644 --- a/drivers/spi/spi-cavium-thunderx.c +++ b/drivers/spi/spi-cavium-thunderx.c @@ -81,6 +81,7 @@ static int thunderx_spi_probe(struct pci_dev *pdev, error: clk_disable_unprepare(p->clk); + pci_release_regions(pdev); spi_master_put(master); return ret; } @@ -95,6 +96,7 @@ static void thunderx_spi_remove(struct pci_dev *pdev) return; clk_disable_unprepare(p->clk); + pci_release_regions(pdev); /* Put everything in a known state. */ writeq(0, p->register_base + OCTEON_SPI_CFG(p)); } diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c index 3db905f5f345c689ce4ffd965a9055a47f396c76..10f328558d556ebd0fcf7ed51a0c39f948bf6219 100644 --- a/drivers/spi/spi-dw-mid.c +++ b/drivers/spi/spi-dw-mid.c @@ -155,6 +155,7 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws, if (!xfer->tx_buf) return NULL; + memset(&txconf, 0, sizeof(txconf)); txconf.direction = DMA_MEM_TO_DEV; txconf.dst_addr = dws->dma_addr; txconf.dst_maxburst = 16; @@ -201,6 +202,7 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws, if (!xfer->rx_buf) return NULL; + memset(&rxconf, 0, sizeof(rxconf)); rxconf.direction = DMA_DEV_TO_MEM; rxconf.src_addr = dws->dma_addr; rxconf.src_maxburst = 16; @@ -226,19 +228,23 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws, static int mid_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer) { - u16 dma_ctrl = 0; + u16 imr = 0, dma_ctrl = 0; dw_writel(dws, DW_SPI_DMARDLR, 0xf); dw_writel(dws, DW_SPI_DMATDLR, 0x10); - if (xfer->tx_buf) + if (xfer->tx_buf) { dma_ctrl |= SPI_DMA_TDMAE; - if (xfer->rx_buf) + imr |= SPI_INT_TXOI; + } + if (xfer->rx_buf) { dma_ctrl |= SPI_DMA_RDMAE; + imr |= SPI_INT_RXUI | SPI_INT_RXOI; + } dw_writel(dws, DW_SPI_DMACR, dma_ctrl); /* Set the interrupt mask */ - spi_umask_intr(dws, SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI); + spi_umask_intr(dws, imr); dws->transfer_handler = dma_transfer; @@ -268,7 +274,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer) dma_async_issue_pending(dws->txchan); } - return 0; + return 1; } static void mid_spi_dma_stop(struct dw_spi *dws) diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c index e80f60ed6fdf73d4cac750898eec8ef605d30694..71f2c81f534726a3dcd43098fcf7490e856ccacc 100644 --- a/drivers/spi/spi-dw-mmio.c +++ b/drivers/spi/spi-dw-mmio.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -217,12 +218,19 @@ static const struct of_device_id dw_spi_mmio_of_match[] = { }; MODULE_DEVICE_TABLE(of, dw_spi_mmio_of_match); +static const struct acpi_device_id dw_spi_mmio_acpi_match[] = { + {"HISI0173", 0}, + {}, +}; +MODULE_DEVICE_TABLE(acpi, dw_spi_mmio_acpi_match); + static struct platform_driver dw_spi_mmio_driver = { .probe = dw_spi_mmio_probe, .remove = dw_spi_mmio_remove, .driver = { .name = DRIVER_NAME, .of_match_table = dw_spi_mmio_of_match, + .acpi_match_table = ACPI_PTR(dw_spi_mmio_acpi_match), }, }; module_platform_driver(dw_spi_mmio_driver); diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c index ac2eb89ef7a5a0616fe915ec96453ac441f589be..66765a15cd931c4e7f4504cfd3be6d709b3e13a1 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw.c @@ -179,9 +179,11 @@ static inline u32 rx_max(struct dw_spi *dws) static void dw_writer(struct dw_spi *dws) { - u32 max = tx_max(dws); + u32 max; u16 txw = 0; + spin_lock(&dws->buf_lock); + max = tx_max(dws); while (max--) { /* Set the tx word if the transfer's original "tx" is not null */ if (dws->tx_end - dws->len) { @@ -193,13 +195,16 @@ static void dw_writer(struct dw_spi *dws) dw_write_io_reg(dws, DW_SPI_DR, txw); dws->tx += dws->n_bytes; } + spin_unlock(&dws->buf_lock); } static void dw_reader(struct dw_spi *dws) { - u32 max = rx_max(dws); + u32 max; u16 rxw; + spin_lock(&dws->buf_lock); + max = rx_max(dws); while (max--) { rxw = dw_read_io_reg(dws, DW_SPI_DR); /* Care rx only if the transfer's original "rx" is not null */ @@ -211,6 +216,7 @@ static void dw_reader(struct dw_spi *dws) } dws->rx += dws->n_bytes; } + spin_unlock(&dws->buf_lock); } static void int_error_stop(struct dw_spi *dws, const char *msg) @@ -283,18 +289,23 @@ static int dw_spi_transfer_one(struct spi_controller *master, { struct dw_spi *dws = spi_controller_get_devdata(master); struct chip_data *chip = spi_get_ctldata(spi); + unsigned long flags; u8 imask = 0; u16 txlevel = 0; u32 cr0; int ret; dws->dma_mapped = 0; - + spin_lock_irqsave(&dws->buf_lock, flags); dws->tx = (void *)transfer->tx_buf; dws->tx_end = dws->tx + transfer->len; dws->rx = transfer->rx_buf; dws->rx_end = dws->rx + transfer->len; dws->len = transfer->len; + spin_unlock_irqrestore(&dws->buf_lock, flags); + + /* Ensure dw->rx and dw->rx_end are visible */ + smp_mb(); spi_enable_chip(dws, 0); @@ -372,11 +383,8 @@ static int dw_spi_transfer_one(struct spi_controller *master, spi_enable_chip(dws, 1); - if (dws->dma_mapped) { - ret = dws->dma_ops->dma_transfer(dws, transfer); - if (ret < 0) - return ret; - } + if (dws->dma_mapped) + return dws->dma_ops->dma_transfer(dws, transfer); if (chip->poll_mode) return poll_transfer(dws); @@ -485,6 +493,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) dws->type = SSI_MOTO_SPI; dws->dma_inited = 0; dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR); + spin_lock_init(&dws->buf_lock); spi_controller_set_devdata(master, dws); @@ -506,6 +515,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) master->handle_err = dw_spi_handle_err; master->max_speed_hz = dws->max_freq; master->dev.of_node = dev->of_node; + master->dev.fwnode = dev->fwnode; master->flags = SPI_MASTER_GPIO_SS; if (dws->set_cs) @@ -521,10 +531,11 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) dws->dma_inited = 0; } else { master->can_dma = dws->dma_ops->can_dma; + master->flags |= SPI_CONTROLLER_MUST_TX; } } - ret = devm_spi_register_controller(dev, master); + ret = spi_register_controller(master); if (ret) { dev_err(&master->dev, "problem registering spi master\n"); goto err_dma_exit; @@ -548,6 +559,8 @@ void dw_spi_remove_host(struct dw_spi *dws) { dw_spi_debugfs_remove(dws); + spi_unregister_controller(dws->master); + if (dws->dma_ops && dws->dma_ops->dma_exit) dws->dma_ops->dma_exit(dws); diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h index 0168b08364d5d1a83f598c870e297b452f15b952..20a09fe79ae7d029dcdf4161b766f66683f8db75 100644 --- a/drivers/spi/spi-dw.h +++ b/drivers/spi/spi-dw.h @@ -118,6 +118,7 @@ struct dw_spi { size_t len; void *tx; void *tx_end; + spinlock_t buf_lock; void *rx; void *rx_end; int dma_mapped; diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c index f1526757aaf6da88d7be55aaa7ae70c6c3579c2d..79fc3940245a461129e333b2d1e1ff107115faa0 100644 --- a/drivers/spi/spi-ep93xx.c +++ b/drivers/spi/spi-ep93xx.c @@ -246,6 +246,19 @@ static int ep93xx_spi_read_write(struct spi_master *master) return -EINPROGRESS; } +static enum dma_transfer_direction +ep93xx_dma_data_to_trans_dir(enum dma_data_direction dir) +{ + switch (dir) { + case DMA_TO_DEVICE: + return DMA_MEM_TO_DEV; + case DMA_FROM_DEVICE: + return DMA_DEV_TO_MEM; + default: + return DMA_TRANS_NONE; + } +} + /** * ep93xx_spi_dma_prepare() - prepares a DMA transfer * @master: SPI master @@ -257,7 +270,7 @@ static int ep93xx_spi_read_write(struct spi_master *master) */ static struct dma_async_tx_descriptor * ep93xx_spi_dma_prepare(struct spi_master *master, - enum dma_transfer_direction dir) + enum dma_data_direction dir) { struct ep93xx_spi *espi = spi_master_get_devdata(master); struct spi_transfer *xfer = master->cur_msg->state; @@ -277,9 +290,9 @@ ep93xx_spi_dma_prepare(struct spi_master *master, buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; memset(&conf, 0, sizeof(conf)); - conf.direction = dir; + conf.direction = ep93xx_dma_data_to_trans_dir(dir); - if (dir == DMA_DEV_TO_MEM) { + if (dir == DMA_FROM_DEVICE) { chan = espi->dma_rx; buf = xfer->rx_buf; sgt = &espi->rx_sgt; @@ -343,7 +356,8 @@ ep93xx_spi_dma_prepare(struct spi_master *master, if (!nents) return ERR_PTR(-ENOMEM); - txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir, DMA_CTRL_ACK); + txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, conf.direction, + DMA_CTRL_ACK); if (!txd) { dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); return ERR_PTR(-ENOMEM); @@ -360,13 +374,13 @@ ep93xx_spi_dma_prepare(struct spi_master *master, * unmapped. */ static void ep93xx_spi_dma_finish(struct spi_master *master, - enum dma_transfer_direction dir) + enum dma_data_direction dir) { struct ep93xx_spi *espi = spi_master_get_devdata(master); struct dma_chan *chan; struct sg_table *sgt; - if (dir == DMA_DEV_TO_MEM) { + if (dir == DMA_FROM_DEVICE) { chan = espi->dma_rx; sgt = &espi->rx_sgt; } else { @@ -381,8 +395,8 @@ static void ep93xx_spi_dma_callback(void *callback_param) { struct spi_master *master = callback_param; - ep93xx_spi_dma_finish(master, DMA_MEM_TO_DEV); - ep93xx_spi_dma_finish(master, DMA_DEV_TO_MEM); + ep93xx_spi_dma_finish(master, DMA_TO_DEVICE); + ep93xx_spi_dma_finish(master, DMA_FROM_DEVICE); spi_finalize_current_transfer(master); } @@ -392,15 +406,15 @@ static int ep93xx_spi_dma_transfer(struct spi_master *master) struct ep93xx_spi *espi = spi_master_get_devdata(master); struct dma_async_tx_descriptor *rxd, *txd; - rxd = ep93xx_spi_dma_prepare(master, DMA_DEV_TO_MEM); + rxd = ep93xx_spi_dma_prepare(master, DMA_FROM_DEVICE); if (IS_ERR(rxd)) { dev_err(&master->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd)); return PTR_ERR(rxd); } - txd = ep93xx_spi_dma_prepare(master, DMA_MEM_TO_DEV); + txd = ep93xx_spi_dma_prepare(master, DMA_TO_DEVICE); if (IS_ERR(txd)) { - ep93xx_spi_dma_finish(master, DMA_DEV_TO_MEM); + ep93xx_spi_dma_finish(master, DMA_FROM_DEVICE); dev_err(&master->dev, "DMA TX failed: %ld\n", PTR_ERR(txd)); return PTR_ERR(txd); } diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 3082e72e4f6c62ed00651e389381ee2896336c52..7f3fcc3f9b63d4bf78a79404c0cf27680623967e 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -1114,11 +1114,13 @@ static int dspi_probe(struct platform_device *pdev) ret = spi_register_master(master); if (ret != 0) { dev_err(&pdev->dev, "Problem registering DSPI master\n"); - goto out_clk_put; + goto out_release_dma; } return ret; +out_release_dma: + dspi_release_dma(dspi); out_clk_put: clk_disable_unprepare(dspi->clk); out_master_put: diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c index e6d5cc6ab108b190e4551994d3f21f355d9e08c1..51670976faa35b668152a35b69211e802cc613d6 100644 --- a/drivers/spi/spi-fsl-lpspi.c +++ b/drivers/spi/spi-fsl-lpspi.c @@ -276,7 +276,7 @@ static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi) fsl_lpspi_set_watermark(fsl_lpspi); - temp = CFGR1_PCSCFG | CFGR1_MASTER | CFGR1_NOSTALL; + temp = CFGR1_PCSCFG | CFGR1_MASTER; if (fsl_lpspi->config.mode & SPI_CS_HIGH) temp |= CFGR1_PCSPOL; writel(temp, fsl_lpspi->base + IMX7ULP_CFGR1); diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c index 8f2e97857e8bccfc6db0563c86f21c36fa3cc554..8b79e36fab21c478d673d6f654e4df590483da66 100644 --- a/drivers/spi/spi-fsl-spi.c +++ b/drivers/spi/spi-fsl-spi.c @@ -832,9 +832,9 @@ static int of_fsl_spi_probe(struct platform_device *ofdev) if (ret) goto err; - irq = irq_of_parse_and_map(np, 0); - if (!irq) { - ret = -EINVAL; + irq = platform_get_irq(ofdev, 0); + if (irq < 0) { + ret = irq; goto err; } @@ -847,7 +847,6 @@ static int of_fsl_spi_probe(struct platform_device *ofdev) return 0; err: - irq_dispose_mapping(irq); if (type == TYPE_FSL) of_fsl_spi_free_chipselects(dev); return ret; diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c index 421bfc7dda67413bd72ae96055c3767d01a33fa7..19687da23774000c01655e77be49739c30c95123 100644 --- a/drivers/spi/spi-gpio.c +++ b/drivers/spi/spi-gpio.c @@ -295,9 +295,11 @@ static int spi_gpio_request(struct device *dev, spi_gpio->miso = devm_gpiod_get_optional(dev, "miso", GPIOD_IN); if (IS_ERR(spi_gpio->miso)) return PTR_ERR(spi_gpio->miso); - if (!spi_gpio->miso) - /* HW configuration without MISO pin */ - *mflags |= SPI_MASTER_NO_RX; + /* + * No setting SPI_MASTER_NO_RX here - if there is only a MOSI + * pin connected the host can still do RX by changing the + * direction of the line. + */ spi_gpio->sck = devm_gpiod_get(dev, "sck", GPIOD_OUT_LOW); if (IS_ERR(spi_gpio->sck)) @@ -359,6 +361,11 @@ static inline int spi_gpio_probe_dt(struct platform_device *pdev) } #endif +static void spi_gpio_put(void *data) +{ + spi_master_put(data); +} + static int spi_gpio_probe(struct platform_device *pdev) { int status; @@ -384,6 +391,12 @@ static int spi_gpio_probe(struct platform_device *pdev) if (!master) return -ENOMEM; + status = devm_add_action_or_reset(&pdev->dev, spi_gpio_put, master); + if (status) { + spi_master_put(master); + return status; + } + spi_gpio = spi_master_get_devdata(master); spi_gpio->cs_gpios = devm_kcalloc(&pdev->dev, @@ -408,7 +421,7 @@ static int spi_gpio_probe(struct platform_device *pdev) return status; master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32); - master->mode_bits = SPI_3WIRE | SPI_CPHA | SPI_CPOL; + master->mode_bits = SPI_3WIRE | SPI_CPHA | SPI_CPOL | SPI_CS_HIGH; master->flags = master_flags; master->bus_num = pdev->id; /* The master needs to think there is a chipselect even if not connected */ @@ -423,7 +436,7 @@ static int spi_gpio_probe(struct platform_device *pdev) spi_gpio->bitbang.chipselect = spi_gpio_chipselect; spi_gpio->bitbang.set_line_direction = spi_gpio_set_direction; - if ((master_flags & (SPI_MASTER_NO_TX | SPI_MASTER_NO_RX)) == 0) { + if ((master_flags & SPI_MASTER_NO_TX) == 0) { spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0; spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1; spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2; @@ -435,13 +448,8 @@ static int spi_gpio_probe(struct platform_device *pdev) spi_gpio->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_spec_txrx_word_mode3; } spi_gpio->bitbang.setup_transfer = spi_bitbang_setup_transfer; - spi_gpio->bitbang.flags = SPI_CS_HIGH; - status = spi_bitbang_start(&spi_gpio->bitbang); - if (status) - spi_master_put(master); - - return status; + return spi_bitbang_start(&spi_gpio->bitbang); } static int spi_gpio_remove(struct platform_device *pdev) @@ -455,8 +463,6 @@ static int spi_gpio_remove(struct platform_device *pdev) /* stop() unregisters child devices too */ spi_bitbang_stop(&spi_gpio->bitbang); - spi_master_put(spi_gpio->bitbang.master); - return 0; } diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c index e6eb979f1b8a09f038ec012f288dba4219bfe604..e4b31d6e6e33e1f7cfdd16d0d607f0c03473f7a0 100644 --- a/drivers/spi/spi-img-spfi.c +++ b/drivers/spi/spi-img-spfi.c @@ -676,6 +676,8 @@ static int img_spfi_probe(struct platform_device *pdev) dma_release_channel(spfi->tx_ch); if (spfi->rx_ch) dma_release_channel(spfi->rx_ch); + spfi->tx_ch = NULL; + spfi->rx_ch = NULL; dev_warn(spfi->dev, "Failed to get DMA channels, falling back to PIO mode\n"); } else { master->dma_tx = spfi->tx_ch; diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 08dd3a31a3e5f7d49a40400300f85477ef6a464e..5b6f3655c366a6f6c09244eb54d895955a83864c 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -1427,7 +1427,7 @@ static int spi_imx_transfer(struct spi_device *spi, /* flush rxfifo before transfer */ while (spi_imx->devtype_data->rx_available(spi_imx)) - spi_imx->rx(spi_imx); + readl(spi_imx->base + MXC_CSPIRXDATA); if (spi_imx->slave_mode) return spi_imx_pio_transfer_slave(spi, transfer); diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c index e43842c7a31a90388b4eeb3be6ab9a420e30d28b..eb72dba71d832a47165b824c2f859fed00a33a4e 100644 --- a/drivers/spi/spi-mem.c +++ b/drivers/spi/spi-mem.c @@ -346,10 +346,25 @@ EXPORT_SYMBOL_GPL(spi_mem_get_name); int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) { struct spi_controller *ctlr = mem->spi->controller; + size_t len; + + len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes; if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size) return ctlr->mem_ops->adjust_op_size(mem, op); + if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) { + if (len > spi_max_transfer_size(mem->spi)) + return -EINVAL; + + op->data.nbytes = min3((size_t)op->data.nbytes, + spi_max_transfer_size(mem->spi), + spi_max_message_size(mem->spi) - + len); + if (!op->data.nbytes) + return -EINVAL; + } + return 0; } EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size); diff --git a/drivers/spi/spi-mpc52xx.c b/drivers/spi/spi-mpc52xx.c index 0e55784a3ad93604634a1241b630ada7106b10e6..99eb2cee6f37a09706cbb5946e459392c418549b 100644 --- a/drivers/spi/spi-mpc52xx.c +++ b/drivers/spi/spi-mpc52xx.c @@ -520,6 +520,7 @@ static int mpc52xx_spi_remove(struct platform_device *op) struct mpc52xx_spi *ms = spi_master_get_devdata(master); int i; + cancel_work_sync(&ms->work); free_irq(ms->irq0, ms); free_irq(ms->irq1, ms); diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c index 86bf45667a0402c43ab76057dc01d2527f35457c..1a1c54bf2ae13d4dc24442fe881c8780945479aa 100644 --- a/drivers/spi/spi-mt65xx.c +++ b/drivers/spi/spi-mt65xx.c @@ -98,6 +98,7 @@ struct mtk_spi { struct clk *parent_clk, *sel_clk, *spi_clk; struct spi_transfer *cur_transfer; u32 xfer_len; + u32 num_xfered; struct scatterlist *tx_sgl, *rx_sgl; u32 tx_sgl_len, rx_sgl_len; const struct mtk_spi_compatible *dev_comp; @@ -385,6 +386,7 @@ static int mtk_spi_fifo_transfer(struct spi_master *master, mdata->cur_transfer = xfer; mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, xfer->len); + mdata->num_xfered = 0; mtk_spi_prepare_transfer(master, xfer); mtk_spi_setup_packet(master); @@ -415,6 +417,7 @@ static int mtk_spi_dma_transfer(struct spi_master *master, mdata->tx_sgl_len = 0; mdata->rx_sgl_len = 0; mdata->cur_transfer = xfer; + mdata->num_xfered = 0; mtk_spi_prepare_transfer(master, xfer); @@ -482,7 +485,7 @@ static int mtk_spi_setup(struct spi_device *spi) static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id) { - u32 cmd, reg_val, cnt, remainder; + u32 cmd, reg_val, cnt, remainder, len; struct spi_master *master = dev_id; struct mtk_spi *mdata = spi_master_get_devdata(master); struct spi_transfer *trans = mdata->cur_transfer; @@ -497,37 +500,41 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id) if (trans->rx_buf) { cnt = mdata->xfer_len / 4; ioread32_rep(mdata->base + SPI_RX_DATA_REG, - trans->rx_buf, cnt); + trans->rx_buf + mdata->num_xfered, cnt); remainder = mdata->xfer_len % 4; if (remainder > 0) { reg_val = readl(mdata->base + SPI_RX_DATA_REG); - memcpy(trans->rx_buf + (cnt * 4), - ®_val, remainder); + memcpy(trans->rx_buf + + mdata->num_xfered + + (cnt * 4), + ®_val, + remainder); } } - trans->len -= mdata->xfer_len; - if (!trans->len) { + mdata->num_xfered += mdata->xfer_len; + if (mdata->num_xfered == trans->len) { spi_finalize_current_transfer(master); return IRQ_HANDLED; } - if (trans->tx_buf) - trans->tx_buf += mdata->xfer_len; - if (trans->rx_buf) - trans->rx_buf += mdata->xfer_len; - - mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, trans->len); + len = trans->len - mdata->num_xfered; + mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len); mtk_spi_setup_packet(master); - cnt = trans->len / 4; - iowrite32_rep(mdata->base + SPI_TX_DATA_REG, trans->tx_buf, cnt); + if (trans->tx_buf) { + cnt = mdata->xfer_len / 4; + iowrite32_rep(mdata->base + SPI_TX_DATA_REG, + trans->tx_buf + mdata->num_xfered, cnt); - remainder = trans->len % 4; - if (remainder > 0) { - reg_val = 0; - memcpy(®_val, trans->tx_buf + (cnt * 4), remainder); - writel(reg_val, mdata->base + SPI_TX_DATA_REG); + remainder = mdata->xfer_len % 4; + if (remainder > 0) { + reg_val = 0; + memcpy(®_val, + trans->tx_buf + (cnt * 4) + mdata->num_xfered, + remainder); + writel(reg_val, mdata->base + SPI_TX_DATA_REG); + } } mtk_spi_enable_transfer(master); diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index 508c61c669e7d701525af13309430d7a4d1e9ce8..eb2d2de172af3da4ded93b5f1a613392ec402247 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -299,7 +299,7 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi, struct omap2_mcspi_cs *cs = spi->controller_state; struct omap2_mcspi *mcspi; unsigned int wcnt; - int max_fifo_depth, fifo_depth, bytes_per_word; + int max_fifo_depth, bytes_per_word; u32 chconf, xferlevel; mcspi = spi_master_get_devdata(master); @@ -315,10 +315,6 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi, else max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH; - fifo_depth = gcd(t->len, max_fifo_depth); - if (fifo_depth < 2 || fifo_depth % bytes_per_word != 0) - goto disable_fifo; - wcnt = t->len / bytes_per_word; if (wcnt > OMAP2_MCSPI_MAX_FIFOWCNT) goto disable_fifo; @@ -326,16 +322,17 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi, xferlevel = wcnt << 16; if (t->rx_buf != NULL) { chconf |= OMAP2_MCSPI_CHCONF_FFER; - xferlevel |= (fifo_depth - 1) << 8; + xferlevel |= (bytes_per_word - 1) << 8; } + if (t->tx_buf != NULL) { chconf |= OMAP2_MCSPI_CHCONF_FFET; - xferlevel |= fifo_depth - 1; + xferlevel |= bytes_per_word - 1; } mcspi_write_reg(master, OMAP2_MCSPI_XFERLEVEL, xferlevel); mcspi_write_chconf0(spi, chconf); - mcspi->fifo_depth = fifo_depth; + mcspi->fifo_depth = max_fifo_depth; return; } @@ -585,7 +582,6 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) struct dma_slave_config cfg; enum dma_slave_buswidth width; unsigned es; - u32 burst; void __iomem *chstat_reg; void __iomem *irqstat_reg; int wait_res; @@ -605,22 +601,14 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) } count = xfer->len; - burst = 1; - - if (mcspi->fifo_depth > 0) { - if (count > mcspi->fifo_depth) - burst = mcspi->fifo_depth / es; - else - burst = count / es; - } memset(&cfg, 0, sizeof(cfg)); cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0; cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0; cfg.src_addr_width = width; cfg.dst_addr_width = width; - cfg.src_maxburst = burst; - cfg.dst_maxburst = burst; + cfg.src_maxburst = 1; + cfg.dst_maxburst = 1; rx = xfer->rx_buf; tx = xfer->tx_buf; @@ -1455,13 +1443,26 @@ static int omap2_mcspi_remove(struct platform_device *pdev) /* work with hotplug and coldplug */ MODULE_ALIAS("platform:omap2_mcspi"); -#ifdef CONFIG_SUSPEND -static int omap2_mcspi_suspend_noirq(struct device *dev) +static int __maybe_unused omap2_mcspi_suspend(struct device *dev) { - return pinctrl_pm_select_sleep_state(dev); + struct spi_master *master = dev_get_drvdata(dev); + struct omap2_mcspi *mcspi = spi_master_get_devdata(master); + int error; + + error = pinctrl_pm_select_sleep_state(dev); + if (error) + dev_warn(mcspi->dev, "%s: failed to set pins: %i\n", + __func__, error); + + error = spi_master_suspend(master); + if (error) + dev_warn(mcspi->dev, "%s: master suspend failed: %i\n", + __func__, error); + + return pm_runtime_force_suspend(dev); } -static int omap2_mcspi_resume_noirq(struct device *dev) +static int __maybe_unused omap2_mcspi_resume(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); struct omap2_mcspi *mcspi = spi_master_get_devdata(master); @@ -1472,17 +1473,17 @@ static int omap2_mcspi_resume_noirq(struct device *dev) dev_warn(mcspi->dev, "%s: failed to set pins: %i\n", __func__, error); - return 0; -} + error = spi_master_resume(master); + if (error) + dev_warn(mcspi->dev, "%s: master resume failed: %i\n", + __func__, error); -#else -#define omap2_mcspi_suspend_noirq NULL -#define omap2_mcspi_resume_noirq NULL -#endif + return pm_runtime_force_resume(dev); +} static const struct dev_pm_ops omap2_mcspi_pm_ops = { - .suspend_noirq = omap2_mcspi_suspend_noirq, - .resume_noirq = omap2_mcspi_resume_noirq, + SET_SYSTEM_SLEEP_PM_OPS(omap2_mcspi_suspend, + omap2_mcspi_resume) .runtime_resume = omap_mcspi_runtime_resume, }; diff --git a/drivers/spi/spi-phytium-pci.c b/drivers/spi/spi-phytium-pci.c new file mode 100644 index 0000000000000000000000000000000000000000..ad97042339a7f7b0e5622b12bffc30927d8ad521 --- /dev/null +++ b/drivers/spi/spi-phytium-pci.c @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium SPI core controller PCI driver. + * + * Copyright (c) 2019-2023, Phytium Technology Co., Ltd. + * + * Derived from drivers/spi/spi-dw-pci.c + * Copyright (c) 2009, 2014 Intel Corporation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "spi-phytium.h" + +#define DRIVER_NAME "phytium_spi_pci" + +static int phytium_spi_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct phytium_spi *fts; + int pci_bar = 0; + int ret; + + fts = devm_kzalloc(&pdev->dev, sizeof(struct phytium_spi), + GFP_KERNEL); + if (!fts) + return -ENOMEM; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + ret = pcim_iomap_regions(pdev, 1 << pci_bar, pci_name(pdev)); + if (ret) { + dev_err(&pdev->dev, "pci iomap failed?\n"); + return ret; + } + + fts->regs = pcim_iomap_table(pdev)[pci_bar]; + if (IS_ERR(fts->regs)) { + dev_err(&pdev->dev, "SPI region map failed\n"); + return PTR_ERR(fts->regs); + } + + fts->irq = pdev->irq; + if (fts->irq < 0) { + dev_err(&pdev->dev, "no irq resource?\n"); + return fts->irq; /* -ENXIO */ + } + + fts->bus_num = -1; + + fts->max_freq = 48000000; + + fts->num_cs = 4; + + fts->global_cs = 1; + + ret = phytium_spi_add_host(&pdev->dev, fts); + if (ret) + return ret; + + pci_set_drvdata(pdev, fts); + return 0; +} + +static void phytium_spi_pci_remove(struct pci_dev *pdev) +{ + struct phytium_spi *fts = pci_get_drvdata(pdev); + + phytium_spi_remove_host(fts); +} + + +#ifdef CONFIG_PM_SLEEP +static int spi_suspend(struct device *dev) +{ + struct spi_master *master = dev_get_drvdata(dev); + struct phytium_spi *fts = spi_master_get_devdata(master); + + return phytium_spi_suspend_host(fts); +} + +static int spi_resume(struct device *dev) +{ + struct spi_master *master = dev_get_drvdata(dev); + struct phytium_spi *fts = spi_master_get_devdata(master); + + return phytium_spi_resume_host(fts); +} +#endif + +static SIMPLE_DEV_PM_OPS(phytium_spi_pm_ops, spi_suspend, spi_resume); + +static const struct pci_device_id phytium_device_pci_tbl[] = { + { PCI_VDEVICE(PHYTIUM, 0xdc2c) }, + {}, +}; + +static struct pci_driver phytium_spi_pci_driver = { + .name = DRIVER_NAME, + .id_table = phytium_device_pci_tbl, + .probe = phytium_spi_pci_probe, + .remove = phytium_spi_pci_remove, + .driver = { + .pm = &phytium_spi_pm_ops, + } +}; + +module_pci_driver(phytium_spi_pci_driver); + +MODULE_AUTHOR("Yiqun Zhang "); +MODULE_DESCRIPTION("PCI Driver for Phytium SPI controller core"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/spi-phytium-plat.c b/drivers/spi/spi-phytium-plat.c new file mode 100644 index 0000000000000000000000000000000000000000..5b94d01a5860aaf38cace1d57fa038ca489b8b20 --- /dev/null +++ b/drivers/spi/spi-phytium-plat.c @@ -0,0 +1,212 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium SPI core controller platform driver. + * + * Copyright (c) 2019-2023, Phytium Technology Co., Ltd. + * + * Derived from drivers/spi/spi-dw-mmio.c + * Copyright (c) 2010, Octasic semiconductor. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "spi-phytium.h" + +#define DRIVER_NAME "phytium_spi" + +struct phytium_spi_clk { + struct phytium_spi fts; + struct clk *clk; +}; + +static int phytium_spi_probe(struct platform_device *pdev) +{ + struct phytium_spi_clk *ftsc; + struct phytium_spi *fts; + struct resource *mem; + int ret; + int num_cs; + int cs_gpio; + int global_cs; + int i; + + ftsc = devm_kzalloc(&pdev->dev, sizeof(struct phytium_spi_clk), + GFP_KERNEL); + if (!ftsc) + return -ENOMEM; + + fts = &ftsc->fts; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) { + dev_err(&pdev->dev, "no mem resource?\n"); + return -EINVAL; + } + + fts->regs = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(fts->regs)) { + dev_err(&pdev->dev, "SPI region map failed\n"); + return PTR_ERR(fts->regs); + } + + fts->irq = platform_get_irq(pdev, 0); + if (fts->irq < 0) { + dev_err(&pdev->dev, "no irq resource?\n"); + return fts->irq; /* -ENXIO */ + } + + if (pdev->dev.of_node) { + ftsc->clk = devm_clk_get(&pdev->dev, NULL); + + if (IS_ERR(ftsc->clk)) + return PTR_ERR(ftsc->clk); + ret = clk_prepare_enable(ftsc->clk); + if (ret) + return ret; + + fts->max_freq = clk_get_rate(ftsc->clk); + } else if (has_acpi_companion(&pdev->dev)) { + fts->max_freq = 48000000; + } + + fts->bus_num = pdev->id; + device_property_read_u32(&pdev->dev, + "reg-io-width", &fts->reg_io_width); + + num_cs = 4; + + device_property_read_u32(&pdev->dev, "num-cs", &num_cs); + + fts->num_cs = num_cs; + + if (pdev->dev.of_node) { + int i; + + for (i = 0; i < fts->num_cs; i++) { + cs_gpio = of_get_named_gpio(pdev->dev.of_node, + "cs-gpios", i); + + if (cs_gpio == -EPROBE_DEFER) { + ret = cs_gpio; + goto out; + } + + if (gpio_is_valid(cs_gpio)) { + ret = devm_gpio_request(&pdev->dev, cs_gpio, + dev_name(&pdev->dev)); + if (ret) + goto out; + } + } + } else if (has_acpi_companion(&pdev->dev)) { + int n; + int *cs; + struct gpio_desc *gpiod; + + n = gpiod_count(&pdev->dev, "cs"); + + cs = devm_kcalloc(&pdev->dev, n, sizeof(int), GFP_KERNEL); + fts->cs = cs; + + for (i = 0; i < n; i++) { + gpiod = devm_gpiod_get_index_optional(&pdev->dev, + "cs", i, GPIOD_OUT_LOW); + + if (IS_ERR(gpiod)) { + ret = PTR_ERR(gpiod); + goto out; + } + + cs_gpio = desc_to_gpio(gpiod); + cs[i] = cs_gpio; + } + } + + device_property_read_u32(&pdev->dev, "global-cs", &global_cs); + fts->global_cs = global_cs; + + ret = phytium_spi_add_host(&pdev->dev, fts); + if (ret) + goto out; + + platform_set_drvdata(pdev, ftsc); + return 0; + +out: + clk_disable_unprepare(ftsc->clk); + return ret; +} + +static int phytium_spi_remove(struct platform_device *pdev) +{ + struct phytium_spi_clk *ftsc = platform_get_drvdata(pdev); + + phytium_spi_remove_host(&ftsc->fts); + clk_disable_unprepare(ftsc->clk); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int spi_suspend(struct device *dev) +{ + struct spi_master *master = dev_get_drvdata(dev); + struct phytium_spi *fts = spi_master_get_devdata(master); + + return phytium_spi_suspend_host(fts); +} + +static int spi_resume(struct device *dev) +{ + struct spi_master *master = dev_get_drvdata(dev); + struct phytium_spi *fts = spi_master_get_devdata(master); + + return phytium_spi_resume_host(fts); +} +#endif + +static SIMPLE_DEV_PM_OPS(phytium_spi_pm_ops, spi_suspend, spi_resume); + +static const struct of_device_id phytium_spi_of_match[] = { + { .compatible = "phytium,spi", .data = (void *)0 }, + { /* end of table */} +}; +MODULE_DEVICE_TABLE(of, phytium_spi_of_match); + +static const struct acpi_device_id phytium_spi_acpi_match[] = { + {"PHYT000E", 0}, + {} +}; +MODULE_DEVICE_TABLE(acpi, phytium_spi_acpi_match); + +static struct platform_driver phytium_spi_driver = { + .probe = phytium_spi_probe, + .remove = phytium_spi_remove, + .driver = { + .name = DRIVER_NAME, + .of_match_table = of_match_ptr(phytium_spi_of_match), + .acpi_match_table = ACPI_PTR(phytium_spi_acpi_match), + .pm = &phytium_spi_pm_ops, + }, +}; +module_platform_driver(phytium_spi_driver); + +MODULE_AUTHOR("Yiqun Zhang "); +MODULE_DESCRIPTION("Platform Driver for Phytium SPI controller core"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/spi-phytium.c b/drivers/spi/spi-phytium.c new file mode 100644 index 0000000000000000000000000000000000000000..0ddddf3eb01964513531472a37f5642d0462206b --- /dev/null +++ b/drivers/spi/spi-phytium.c @@ -0,0 +1,540 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium SPI core controller driver. + * + * Copyright (c) 2019-2023, Phytium Technology Co., Ltd.. + * + * Derived from drivers/spi/spi-dw.c + * Copyright (c) 2009, Intel Corporation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "spi-phytium.h" + +static inline u32 phytium_readl(struct phytium_spi *fts, u32 offset) +{ + return __raw_readl(fts->regs + offset); +} + +static inline u16 phytium_readw(struct phytium_spi *fts, u32 offset) +{ + return __raw_readw(fts->regs + offset); +} + +static inline void phytium_writel(struct phytium_spi *fts, u32 offset, u32 val) +{ + __raw_writel(val, fts->regs + offset); +} + +static inline void phytium_writew(struct phytium_spi *fts, u32 offset, u16 val) +{ + __raw_writew(val, fts->regs + offset); +} + +static inline u32 phytium_read_io_reg(struct phytium_spi *fts, u32 offset) +{ + switch (fts->reg_io_width) { + case 2: + return phytium_readw(fts, offset); + case 4: + default: + return phytium_readl(fts, offset); + } +} + +static inline void phytium_write_io_reg(struct phytium_spi *fts, + u32 offset, u32 val) +{ + switch (fts->reg_io_width) { + case 2: + phytium_writew(fts, offset, val); + break; + case 4: + default: + phytium_writel(fts, offset, val); + break; + } +} + +static inline void spi_enable_chip(struct phytium_spi *fts, int enable) +{ + phytium_writel(fts, SSIENR, (enable ? 1 : 0)); +} + +static inline void spi_set_clk(struct phytium_spi *fts, u16 div) +{ + phytium_writel(fts, BAUDR, div); +} + +static inline void spi_mask_intr(struct phytium_spi *fts, u32 mask) +{ + u32 new_mask; + + new_mask = phytium_readl(fts, IMR) & ~mask; + phytium_writel(fts, IMR, new_mask); +} + +static inline void spi_umask_intr(struct phytium_spi *fts, u32 mask) +{ + u32 new_mask; + + new_mask = phytium_readl(fts, IMR) | mask; + phytium_writel(fts, IMR, new_mask); +} + +static inline void spi_global_cs(struct phytium_spi *fts) +{ + u32 global_cs_en, mask, setmask; + + mask = GENMASK(fts->num_cs-1, 0) << fts->num_cs; + setmask = ~GENMASK(fts->num_cs-1, 0); + global_cs_en = (phytium_readl(fts, GCSR) | mask) & setmask; + + phytium_writel(fts, GCSR, global_cs_en); +} + +static inline void spi_reset_chip(struct phytium_spi *fts) +{ + spi_enable_chip(fts, 0); + if (fts->global_cs) + spi_global_cs(fts); + spi_mask_intr(fts, 0xff); + spi_enable_chip(fts, 1); +} + +static inline void spi_shutdown_chip(struct phytium_spi *fts) +{ + spi_enable_chip(fts, 0); + spi_set_clk(fts, 0); +} + +struct phytium_spi_chip { + u8 poll_mode; + u8 type; + void (*cs_control)(u32 command); +}; + +struct chip_data { + u8 cs; + u8 tmode; + u8 type; + + u8 poll_mode; + + u16 clk_div; + u32 speed_hz; + void (*cs_control)(u32 command); +}; + +static void phytium_spi_set_cs(struct spi_device *spi, bool enable) +{ + struct phytium_spi *fts = spi_master_get_devdata(spi->master); + struct chip_data *chip = spi_get_ctldata(spi); + u32 origin; + + if (chip && chip->cs_control) + chip->cs_control(!enable); + + if (!enable) { + phytium_writel(fts, SER, BIT(spi->chip_select)); + if (fts->global_cs) { + origin = phytium_readl(fts, GCSR); + phytium_writel(fts, GCSR, + origin | (1 << spi->chip_select)); + } + } else { + if (fts->global_cs) { + origin = phytium_readl(fts, GCSR); + phytium_writel(fts, GCSR, + origin & ~(1 << spi->chip_select)); + } + } +} + +static inline u32 tx_max(struct phytium_spi *fts) +{ + u32 tx_left, tx_room, rxtx_gap; + + tx_left = (fts->tx_end - fts->tx) / fts->n_bytes; + tx_room = fts->fifo_len - phytium_readl(fts, TXFLR); + + rxtx_gap = ((fts->rx_end - fts->rx) - (fts->tx_end - fts->tx)) + / fts->n_bytes; + + return min3(tx_left, tx_room, (u32) (fts->fifo_len - rxtx_gap)); +} + +static inline u32 rx_max(struct phytium_spi *fts) +{ + u32 rx_left = (fts->rx_end - fts->rx) / fts->n_bytes; + + return min_t(u32, rx_left, phytium_readl(fts, RXFLR)); +} + +static void phytium_writer(struct phytium_spi *fts) +{ + u32 max = tx_max(fts); + u16 txw = 0; + + while (max--) { + if (fts->tx_end - fts->len) { + if (fts->n_bytes == 1) + txw = *(u8 *)(fts->tx); + else + txw = *(u16 *)(fts->tx); + } + phytium_write_io_reg(fts, DR, txw); + fts->tx += fts->n_bytes; + } +} + +static void phytium_reader(struct phytium_spi *fts) +{ + u32 max = rx_max(fts); + u16 rxw; + + while (max--) { + rxw = phytium_read_io_reg(fts, DR); + if (fts->rx_end - fts->len) { + if (fts->n_bytes == 1) + *(u8 *)(fts->rx) = rxw; + else + *(u16 *)(fts->rx) = rxw; + } + fts->rx += fts->n_bytes; + } +} + +static void int_error_stop(struct phytium_spi *fts, const char *msg) +{ + spi_reset_chip(fts); + + dev_err(&fts->master->dev, "%s\n", msg); + fts->master->cur_msg->status = -EIO; + spi_finalize_current_transfer(fts->master); +} + +static irqreturn_t interrupt_transfer(struct phytium_spi *fts) +{ + u16 irq_status = phytium_readl(fts, ISR); + + if (irq_status & (INT_TXOI | INT_RXOI | INT_RXUI)) { + phytium_readl(fts, ICR); + int_error_stop(fts, "irq transfer: fifo overrun/underrun"); + return IRQ_HANDLED; + } + + phytium_reader(fts); + if (fts->rx_end == fts->rx) { + spi_mask_intr(fts, INT_TXEI); + spi_finalize_current_transfer(fts->master); + return IRQ_HANDLED; + } + if (irq_status & INT_TXEI) { + spi_mask_intr(fts, INT_TXEI); + phytium_writer(fts); + spi_umask_intr(fts, INT_TXEI); + } + + return IRQ_HANDLED; +} + +static irqreturn_t phytium_spi_irq(int irq, void *dev_id) +{ + struct spi_master *master = dev_id; + struct phytium_spi *fts = spi_master_get_devdata(master); + u16 irq_status = phytium_readl(fts, ISR) & 0x3f; + + if (!irq_status) + return IRQ_NONE; + + if (!master->cur_msg) { + spi_mask_intr(fts, INT_TXEI); + return IRQ_HANDLED; + } + + if (fts->transfer_handler) + return fts->transfer_handler(fts); + else + return IRQ_HANDLED; +} + +static int poll_transfer(struct phytium_spi *fts) +{ + do { + phytium_writer(fts); + phytium_reader(fts); + cpu_relax(); + } while (fts->rx_end > fts->rx); + + return 0; +} + +static int phytium_spi_transfer_one(struct spi_master *master, + struct spi_device *spi, struct spi_transfer *transfer) +{ + struct phytium_spi *fts = spi_master_get_devdata(master); + struct chip_data *chip = spi_get_ctldata(spi); + u8 imask = 0; + u16 txlevel = 0; + u16 clk_div; + u32 cr0; + + fts->tx = (void *)transfer->tx_buf; + fts->tx_end = fts->tx + transfer->len; + fts->rx = transfer->rx_buf; + fts->rx_end = fts->rx + transfer->len; + fts->len = transfer->len; + + spi_enable_chip(fts, 0); + + if (transfer->speed_hz != chip->speed_hz) { + clk_div = (fts->max_freq / transfer->speed_hz + 1) & 0xfffe; + + chip->speed_hz = transfer->speed_hz; + chip->clk_div = clk_div; + + spi_set_clk(fts, chip->clk_div); + } + + if (transfer->bits_per_word == 8) + fts->n_bytes = 1; + else if (transfer->bits_per_word == 16) + fts->n_bytes = 2; + else + return -EINVAL; + + cr0 = (transfer->bits_per_word - 1) + | (chip->type << FRF_OFFSET) + | (spi->mode << MODE_OFFSET) + | (chip->tmode << TMOD_OFFSET); + + if (chip->cs_control) { + if (fts->rx && fts->tx) + chip->tmode = TMOD_TR; + else if (fts->rx) + chip->tmode = TMOD_RO; + else + chip->tmode = TMOD_TO; + + cr0 &= ~TMOD_MASK; + cr0 |= (chip->tmode << TMOD_OFFSET); + } + + phytium_writel(fts, CTRL0, cr0); + + spi_mask_intr(fts, 0xff); + + if (!chip->poll_mode) { + txlevel = min_t(u16, fts->fifo_len / 2, + fts->len / fts->n_bytes); + phytium_writel(fts, TXFLTR, txlevel); + + imask |= INT_TXEI | INT_TXOI | + INT_RXUI | INT_RXOI; + spi_umask_intr(fts, imask); + + fts->transfer_handler = interrupt_transfer; + } + + spi_enable_chip(fts, 1); + + if (chip->poll_mode) + return poll_transfer(fts); + + return 1; +} + +static void phytium_spi_handle_err(struct spi_master *master, + struct spi_message *msg) +{ + struct phytium_spi *fts = spi_master_get_devdata(master); + + spi_reset_chip(fts); +} + +static int phytium_spi_setup(struct spi_device *spi) +{ + struct phytium_spi_chip *chip_info = NULL; + struct chip_data *chip; + struct spi_master *master = spi->master; + struct phytium_spi *fts = spi_master_get_devdata(master); + int ret; + u32 cr0; + + spi_enable_chip(fts, 0); + + chip = spi_get_ctldata(spi); + if (!chip) { + chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); + if (!chip) + return -ENOMEM; + spi_set_ctldata(spi, chip); + } + + chip_info = spi->controller_data; + + if (chip_info) { + if (chip_info->cs_control) + chip->cs_control = chip_info->cs_control; + + chip->poll_mode = chip_info->poll_mode; + chip->type = chip_info->type; + } + + chip->tmode = 0; + + cr0 = (spi->bits_per_word - 1) | (chip->type << FRF_OFFSET) | + (spi->mode << MODE_OFFSET) | (chip->tmode << TMOD_OFFSET); + + phytium_writel(fts, CTRL0, cr0); + + if (gpio_is_valid(spi->cs_gpio)) { + ret = gpio_direction_output(spi->cs_gpio, + !(spi->mode & SPI_CS_HIGH)); + if (ret) + return ret; + } + + spi_enable_chip(fts, 1); + + return 0; +} + +static void phytium_spi_cleanup(struct spi_device *spi) +{ + struct chip_data *chip = spi_get_ctldata(spi); + + kfree(chip); + spi_set_ctldata(spi, NULL); +} + +static void spi_hw_init(struct device *dev, struct phytium_spi *fts) +{ + spi_reset_chip(fts); + + if (!fts->fifo_len) { + u32 fifo; + + for (fifo = 1; fifo < 256; fifo++) { + phytium_writel(fts, TXFLTR, fifo); + if (fifo != phytium_readl(fts, TXFLTR)) + break; + } + phytium_writel(fts, TXFLTR, 0); + + fts->fifo_len = (fifo == 1) ? 0 : fifo; + dev_dbg(dev, "Detected FIFO size: %u bytes\n", fts->fifo_len); + } +} + +int phytium_spi_add_host(struct device *dev, struct phytium_spi *fts) +{ + struct spi_master *master; + int ret; + + WARN_ON(fts == NULL); + + master = spi_alloc_master(dev, 0); + if (!master) + return -ENOMEM; + + fts->master = master; + snprintf(fts->name, sizeof(fts->name), "phytium_spi%d", fts->bus_num); + + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP; + master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); + master->bus_num = fts->bus_num; + master->num_chipselect = fts->num_cs; + master->setup = phytium_spi_setup; + master->cleanup = phytium_spi_cleanup; + master->set_cs = phytium_spi_set_cs; + master->transfer_one = phytium_spi_transfer_one; + master->handle_err = phytium_spi_handle_err; + master->max_speed_hz = fts->max_freq; + master->dev.of_node = dev->of_node; + master->dev.fwnode = dev->fwnode; + master->flags = SPI_MASTER_GPIO_SS; + master->cs_gpios = fts->cs; + + spi_hw_init(dev, fts); + + spi_master_set_devdata(master, fts); + + ret = request_irq(fts->irq, phytium_spi_irq, + IRQF_SHARED, fts->name, master); + if (ret < 0) { + dev_err(dev, "can not get IRQ\n"); + goto err_free_master; + } + + ret = devm_spi_register_master(dev, master); + if (ret) { + dev_err(&master->dev, "problem registering spi master\n"); + goto err_exit; + } + + return 0; + +err_exit: + spi_enable_chip(fts, 0); + free_irq(fts->irq, master); +err_free_master: + spi_master_put(master); + return ret; +} +EXPORT_SYMBOL_GPL(phytium_spi_add_host); + +void phytium_spi_remove_host(struct phytium_spi *fts) +{ + spi_shutdown_chip(fts); + + free_irq(fts->irq, fts->master); +} +EXPORT_SYMBOL_GPL(phytium_spi_remove_host); + +int phytium_spi_suspend_host(struct phytium_spi *fts) +{ + int ret; + + ret = spi_controller_suspend(fts->master); + if (ret) + return ret; + + spi_shutdown_chip(fts); + return 0; +} +EXPORT_SYMBOL_GPL(phytium_spi_suspend_host); + +int phytium_spi_resume_host(struct phytium_spi *fts) +{ + int ret; + + spi_hw_init(&fts->master->dev, fts); + ret = spi_controller_resume(fts->master); + if (ret) + dev_err(&fts->master->dev, "fail to start queue (%d)\n", ret); + return ret; +} +EXPORT_SYMBOL_GPL(phytium_spi_resume_host); + +MODULE_AUTHOR("Zhu Mingshuai "); +MODULE_AUTHOR("Chen Baozi "); +MODULE_DESCRIPTION("Driver for Phytium SPI controller core"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/spi-phytium.h b/drivers/spi/spi-phytium.h new file mode 100644 index 0000000000000000000000000000000000000000..145403051e5f60e3c50736f974ca9e05bbeb83b5 --- /dev/null +++ b/drivers/spi/spi-phytium.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Phytium SPI controller driver. + * + * Copyright (c) 2019-2023, Phytium Technology Co., Ltd. + */ +#ifndef PHYTIUM_SPI_HEADER_H +#define PHYTIUM_SPI_HEADER_H + +#include +#include +#include + +#define CTRL0 0x00 +#define SSIENR 0x08 +#define SER 0x10 +#define BAUDR 0x14 +#define TXFLTR 0x18 +#define TXFLR 0x20 +#define RXFLR 0x24 +#define IMR 0x2c +#define ISR 0x30 +#define ICR 0x48 +#define DR 0x60 +#define GCSR 0x100 + +#define FRF_OFFSET 4 +#define MODE_OFFSET 6 +#define TMOD_OFFSET 8 + +#define TMOD_MASK (0x3 << TMOD_OFFSET) +#define TMOD_TR 0x0 +#define TMOD_TO 0x1 +#define TMOD_RO 0x2 + +#define INT_TXEI (1 << 0) +#define INT_TXOI (1 << 1) +#define INT_RXUI (1 << 2) +#define INT_RXOI (1 << 3) + +struct phytium_spi { + struct spi_master *master; + char name[16]; + + void __iomem *regs; + bool global_cs; + unsigned long paddr; + int irq; + u32 fifo_len; + u32 max_freq; + + u32 reg_io_width; + u16 bus_num; + u16 num_cs; + int *cs; + + size_t len; + void *tx; + void *tx_end; + void *rx; + void *rx_end; + u8 n_bytes; + irqreturn_t (*transfer_handler)(struct phytium_spi *fts); +}; + +extern int phytium_spi_add_host(struct device *dev, struct phytium_spi *fts); +extern void phytium_spi_remove_host(struct phytium_spi *fts); +extern int phytium_spi_suspend_host(struct phytium_spi *fts); +extern int phytium_spi_resume_host(struct phytium_spi *fts); + +#endif /* PHYTIUM_SPI_HEADER_H */ diff --git a/drivers/spi/spi-pic32.c b/drivers/spi/spi-pic32.c index f8a45af1fa9f2434689b7946435d26859f895306..288002f6c613ee81a5a421568b86ef06561da332 100644 --- a/drivers/spi/spi-pic32.c +++ b/drivers/spi/spi-pic32.c @@ -320,7 +320,7 @@ static int pic32_spi_dma_transfer(struct pic32_spi *pic32s, desc_rx = dmaengine_prep_slave_sg(master->dma_rx, xfer->rx_sg.sgl, xfer->rx_sg.nents, - DMA_FROM_DEVICE, + DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc_rx) { ret = -EINVAL; @@ -330,7 +330,7 @@ static int pic32_spi_dma_transfer(struct pic32_spi *pic32s, desc_tx = dmaengine_prep_slave_sg(master->dma_tx, xfer->tx_sg.sgl, xfer->tx_sg.nents, - DMA_TO_DEVICE, + DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc_tx) { ret = -EINVAL; diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 14f4ea59caff7a235faafc7c5cfb91776bcc68a9..525388126e260eeb1e685def9889d993b28f6a1c 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -876,10 +876,14 @@ static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate) rate = min_t(int, ssp_clk, rate); + /* + * Calculate the divisor for the SCR (Serial Clock Rate), avoiding + * that the SSP transmission rate can be greater than the device rate + */ if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP) - return (ssp_clk / (2 * rate) - 1) & 0xff; + return (DIV_ROUND_UP(ssp_clk, 2 * rate) - 1) & 0xff; else - return (ssp_clk / rate - 1) & 0xfff; + return (DIV_ROUND_UP(ssp_clk, rate) - 1) & 0xfff; } static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data, @@ -1412,12 +1416,7 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = { static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param) { - struct device *dev = param; - - if (dev != chan->device->dev->parent) - return false; - - return true; + return param == chan->device->dev; } static struct pxa2xx_spi_master * @@ -1471,7 +1470,13 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev) } ssp->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(ssp->clk)) + return NULL; + ssp->irq = platform_get_irq(pdev, 0); + if (ssp->irq < 0) + return NULL; + ssp->type = type; ssp->pdev = pdev; ssp->port_id = pxa2xx_spi_get_port_id(adev); @@ -1612,6 +1617,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) platform_info->enable_dma = false; } else { master->can_dma = pxa2xx_spi_can_dma; + master->max_dma_len = MAX_DMA_LEN; } } diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c index fdcf3076681b5eeffb1885b428c8d00ff2682d2f..185bbdce62b1467acbe833255f3fe79e4fad0840 100644 --- a/drivers/spi/spi-rockchip.c +++ b/drivers/spi/spi-rockchip.c @@ -445,6 +445,9 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs) struct dma_slave_config rxconf, txconf; struct dma_async_tx_descriptor *rxdesc, *txdesc; + memset(&rxconf, 0, sizeof(rxconf)); + memset(&txconf, 0, sizeof(txconf)); + spin_lock_irqsave(&rs->lock, flags); rs->state &= ~RXBUSY; rs->state &= ~TXBUSY; diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index b37de1d991d6abe1e0a25c5fffcf04d9851aba3e..d61120822f026308269bd2a89bedd7b55bf8b7d5 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c @@ -279,7 +279,8 @@ static int rspi_set_config_register(struct rspi_data *rspi, int access_size) /* Sets parity, interrupt mask */ rspi_write8(rspi, 0x00, RSPI_SPCR2); - /* Sets SPCMD */ + /* Resets sequencer */ + rspi_write8(rspi, 0, RSPI_SPSCR); rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size); rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0); @@ -323,7 +324,8 @@ static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size) rspi_write8(rspi, 0x00, RSPI_SSLND); rspi_write8(rspi, 0x00, RSPI_SPND); - /* Sets SPCMD */ + /* Resets sequencer */ + rspi_write8(rspi, 0, RSPI_SPSCR); rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size); rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0); @@ -374,7 +376,8 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size) /* Sets buffer to allow normal operation */ rspi_write8(rspi, 0x00, QSPI_SPBFCR); - /* Sets SPCMD */ + /* Resets sequencer */ + rspi_write8(rspi, 0, RSPI_SPSCR); rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0); /* Sets RSPI mode */ diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 101cd6aae2ea520afcac89671071cdabe2341f8f..30ea0a2068e094ab47822b63da58da4778858ee3 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -1343,8 +1343,8 @@ static int sh_msiof_spi_probe(struct platform_device *pdev) i = platform_get_irq(pdev, 0); if (i < 0) { - dev_err(&pdev->dev, "cannot get platform IRQ\n"); - ret = -ENOENT; + dev_err(&pdev->dev, "cannot get IRQ\n"); + ret = i; goto err1; } diff --git a/drivers/spi/spi-sprd-adi.c b/drivers/spi/spi-sprd-adi.c index df5960bddfe6129afde034f70c363fcdca3f3c67..f1fc2bde6ef307cd494d4babb3101d90b3fe959a 100644 --- a/drivers/spi/spi-sprd-adi.c +++ b/drivers/spi/spi-sprd-adi.c @@ -367,6 +367,9 @@ static int sprd_adi_restart_handler(struct notifier_block *this, val |= BIT_WDG_RUN | BIT_WDG_RST; sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_CTRL, val); + /* Lock the watchdog */ + sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOCK, ~WDG_UNLOCK_KEY); + mdelay(1000); dev_emerg(sadi->dev, "Unable to restart system\n"); diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c index a4e43fc19ece556d5b1c82b4db9416add2afb4ad..5df01ffdef468e4a29aeb9a2cda71c9f12996205 100644 --- a/drivers/spi/spi-st-ssc4.c +++ b/drivers/spi/spi-st-ssc4.c @@ -385,6 +385,7 @@ static int spi_st_probe(struct platform_device *pdev) return 0; clk_disable: + pm_runtime_disable(&pdev->dev); clk_disable_unprepare(spi_st->clk); put_master: spi_master_put(master); @@ -396,6 +397,8 @@ static int spi_st_remove(struct platform_device *pdev) struct spi_master *master = platform_get_drvdata(pdev); struct spi_st *spi_st = spi_master_get_devdata(master); + pm_runtime_disable(&pdev->dev); + clk_disable_unprepare(spi_st->clk); pinctrl_pm_select_sleep_state(&pdev->dev); diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c index a76acedd7e2f402190a3525804cc012770747fc7..a1888dc6a938af435cc78a0635358b3b32ac1db9 100644 --- a/drivers/spi/spi-tegra114.c +++ b/drivers/spi/spi-tegra114.c @@ -1067,27 +1067,19 @@ static int tegra_spi_probe(struct platform_device *pdev) spi_irq = platform_get_irq(pdev, 0); tspi->irq = spi_irq; - ret = request_threaded_irq(tspi->irq, tegra_spi_isr, - tegra_spi_isr_thread, IRQF_ONESHOT, - dev_name(&pdev->dev), tspi); - if (ret < 0) { - dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n", - tspi->irq); - goto exit_free_master; - } tspi->clk = devm_clk_get(&pdev->dev, "spi"); if (IS_ERR(tspi->clk)) { dev_err(&pdev->dev, "can not get clock\n"); ret = PTR_ERR(tspi->clk); - goto exit_free_irq; + goto exit_free_master; } tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi"); if (IS_ERR(tspi->rst)) { dev_err(&pdev->dev, "can not get reset\n"); ret = PTR_ERR(tspi->rst); - goto exit_free_irq; + goto exit_free_master; } tspi->max_buf_size = SPI_FIFO_DEPTH << 2; @@ -1095,7 +1087,7 @@ static int tegra_spi_probe(struct platform_device *pdev) ret = tegra_spi_init_dma_param(tspi, true); if (ret < 0) - goto exit_free_irq; + goto exit_free_master; ret = tegra_spi_init_dma_param(tspi, false); if (ret < 0) goto exit_rx_dma_free; @@ -1117,18 +1109,32 @@ static int tegra_spi_probe(struct platform_device *pdev) dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret); goto exit_pm_disable; } + + reset_control_assert(tspi->rst); + udelay(2); + reset_control_deassert(tspi->rst); tspi->def_command1_reg = SPI_M_S; tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1); pm_runtime_put(&pdev->dev); + ret = request_threaded_irq(tspi->irq, tegra_spi_isr, + tegra_spi_isr_thread, IRQF_ONESHOT, + dev_name(&pdev->dev), tspi); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n", + tspi->irq); + goto exit_pm_disable; + } master->dev.of_node = pdev->dev.of_node; ret = devm_spi_register_master(&pdev->dev, master); if (ret < 0) { dev_err(&pdev->dev, "can not register to master err %d\n", ret); - goto exit_pm_disable; + goto exit_free_irq; } return ret; +exit_free_irq: + free_irq(spi_irq, tspi); exit_pm_disable: pm_runtime_disable(&pdev->dev); if (!pm_runtime_status_suspended(&pdev->dev)) @@ -1136,8 +1142,6 @@ static int tegra_spi_probe(struct platform_device *pdev) tegra_spi_deinit_dma_param(tspi, false); exit_rx_dma_free: tegra_spi_deinit_dma_param(tspi, true); -exit_free_irq: - free_irq(spi_irq, tspi); exit_free_master: spi_master_put(master); return ret; diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c index 1427f343b39a3dc4468e14c3612814508993b3c0..d1187317bb5d7c95a27e1ebbd4e16e7d820afcc0 100644 --- a/drivers/spi/spi-tegra20-slink.c +++ b/drivers/spi/spi-tegra20-slink.c @@ -1078,7 +1078,7 @@ static int tegra_slink_probe(struct platform_device *pdev) ret = clk_enable(tspi->clk); if (ret < 0) { dev_err(&pdev->dev, "Clock enable failed %d\n", ret); - goto exit_free_master; + goto exit_clk_unprepare; } spi_irq = platform_get_irq(pdev, 0); @@ -1151,6 +1151,8 @@ static int tegra_slink_probe(struct platform_device *pdev) free_irq(spi_irq, tspi); exit_clk_disable: clk_disable(tspi->clk); +exit_clk_unprepare: + clk_unprepare(tspi->clk); exit_free_master: spi_master_put(master); return ret; @@ -1164,6 +1166,7 @@ static int tegra_slink_remove(struct platform_device *pdev) free_irq(tspi->irq, tspi); clk_disable(tspi->clk); + clk_unprepare(tspi->clk); if (tspi->tx_dma_chan) tegra_slink_deinit_dma_param(tspi, false); diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c index 5f19016bbf104b20b77ed7a3ecbaa8ea4af2793d..95c28abaa027204dcb527c9f3557dbdbe46cfe43 100644 --- a/drivers/spi/spi-ti-qspi.c +++ b/drivers/spi/spi-ti-qspi.c @@ -69,6 +69,7 @@ struct ti_qspi { u32 dc; bool mmap_enabled; + int current_cs; }; #define QSPI_PID (0x0) @@ -490,10 +491,11 @@ static void ti_qspi_enable_memory_map(struct spi_device *spi) ti_qspi_write(qspi, MM_SWITCH, QSPI_SPI_SWITCH_REG); if (qspi->ctrl_base) { regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg, - MEM_CS_EN(spi->chip_select), - MEM_CS_MASK); + MEM_CS_MASK, + MEM_CS_EN(spi->chip_select)); } qspi->mmap_enabled = true; + qspi->current_cs = spi->chip_select; } static void ti_qspi_disable_memory_map(struct spi_device *spi) @@ -503,8 +505,9 @@ static void ti_qspi_disable_memory_map(struct spi_device *spi) ti_qspi_write(qspi, 0, QSPI_SPI_SWITCH_REG); if (qspi->ctrl_base) regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg, - 0, MEM_CS_MASK); + MEM_CS_MASK, 0); qspi->mmap_enabled = false; + qspi->current_cs = -1; } static void ti_qspi_setup_mmap_read(struct spi_device *spi, u8 opcode, @@ -550,7 +553,7 @@ static int ti_qspi_exec_mem_op(struct spi_mem *mem, mutex_lock(&qspi->list_lock); - if (!qspi->mmap_enabled) + if (!qspi->mmap_enabled || qspi->current_cs != mem->spi->chip_select) ti_qspi_enable_memory_map(mem->spi); ti_qspi_setup_mmap_read(mem->spi, op->cmd.opcode, op->data.buswidth, op->addr.nbytes, op->dummy.nbytes); @@ -807,6 +810,7 @@ static int ti_qspi_probe(struct platform_device *pdev) } } qspi->mmap_enabled = false; + qspi->current_cs = -1; ret = devm_spi_register_master(&pdev->dev, master); if (!ret) diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c index 97d137591b18d5fe12359e5a8865fd00b93a9118..4389ab80c23e6cdbaeba15d8ccc5cfba736d7fa2 100644 --- a/drivers/spi/spi-topcliff-pch.c +++ b/drivers/spi/spi-topcliff-pch.c @@ -1294,18 +1294,27 @@ static void pch_free_dma_buf(struct pch_spi_board_data *board_dat, dma->rx_buf_virt, dma->rx_buf_dma); } -static void pch_alloc_dma_buf(struct pch_spi_board_data *board_dat, +static int pch_alloc_dma_buf(struct pch_spi_board_data *board_dat, struct pch_spi_data *data) { struct pch_spi_dma_ctrl *dma; + int ret; dma = &data->dma; + ret = 0; /* Get Consistent memory for Tx DMA */ dma->tx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE, &dma->tx_buf_dma, GFP_KERNEL); + if (!dma->tx_buf_virt) + ret = -ENOMEM; + /* Get Consistent memory for Rx DMA */ dma->rx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE, &dma->rx_buf_dma, GFP_KERNEL); + if (!dma->rx_buf_virt) + ret = -ENOMEM; + + return ret; } static int pch_spi_pd_probe(struct platform_device *plat_dev) @@ -1382,7 +1391,9 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev) if (use_dma) { dev_info(&plat_dev->dev, "Use DMA for data transfers\n"); - pch_alloc_dma_buf(board_dat, data); + ret = pch_alloc_dma_buf(board_dat, data); + if (ret) + goto err_spi_register_master; } ret = spi_register_master(master); diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 9da0bc5a036cfff6af0395ace48cad20ae9852d6..1a32183b9df9152d44da23bde5c7e36093d28335 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -362,9 +362,11 @@ static int spi_drv_probe(struct device *dev) if (ret) return ret; - ret = sdrv->probe(spi); - if (ret) - dev_pm_domain_detach(dev, true); + if (sdrv->probe) { + ret = sdrv->probe(spi); + if (ret) + dev_pm_domain_detach(dev, true); + } return ret; } @@ -372,9 +374,10 @@ static int spi_drv_probe(struct device *dev) static int spi_drv_remove(struct device *dev) { const struct spi_driver *sdrv = to_spi_driver(dev->driver); - int ret; + int ret = 0; - ret = sdrv->remove(to_spi_device(dev)); + if (sdrv->remove) + ret = sdrv->remove(to_spi_device(dev)); dev_pm_domain_detach(dev, true); return ret; @@ -399,10 +402,8 @@ int __spi_register_driver(struct module *owner, struct spi_driver *sdrv) { sdrv->driver.owner = owner; sdrv->driver.bus = &spi_bus_type; - if (sdrv->probe) - sdrv->driver.probe = spi_drv_probe; - if (sdrv->remove) - sdrv->driver.remove = spi_drv_remove; + sdrv->driver.probe = spi_drv_probe; + sdrv->driver.remove = spi_drv_remove; if (sdrv->shutdown) sdrv->driver.shutdown = spi_drv_shutdown; return driver_register(&sdrv->driver); @@ -510,7 +511,6 @@ static int spi_dev_check(struct device *dev, void *data) */ int spi_add_device(struct spi_device *spi) { - static DEFINE_MUTEX(spi_add_lock); struct spi_controller *ctlr = spi->controller; struct device *dev = ctlr->dev.parent; int status; @@ -529,7 +529,7 @@ int spi_add_device(struct spi_device *spi) * chipselect **BEFORE** we call setup(), else we'll trash * its configuration. Lock against concurrent add() calls. */ - mutex_lock(&spi_add_lock); + mutex_lock(ctlr->dev.add_lock); status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); if (status) { @@ -538,6 +538,13 @@ int spi_add_device(struct spi_device *spi) goto done; } + /* Controller may unregister concurrently */ + if (IS_ENABLED(CONFIG_SPI_DYNAMIC) && + !device_is_registered(&ctlr->dev)) { + status = -ENODEV; + goto done; + } + if (ctlr->cs_gpios) spi->cs_gpio = ctlr->cs_gpios[spi->chip_select]; @@ -561,7 +568,7 @@ int spi_add_device(struct spi_device *spi) dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); done: - mutex_unlock(&spi_add_lock); + mutex_unlock(ctlr->dev.add_lock); return status; } EXPORT_SYMBOL_GPL(spi_add_device); @@ -982,6 +989,8 @@ static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) if (max_tx || max_rx) { list_for_each_entry(xfer, &msg->transfers, transfer_list) { + if (!xfer->len) + continue; if (!xfer->tx_buf) xfer->tx_buf = ctlr->dummy_tx; if (!xfer->rx_buf) @@ -1102,8 +1111,6 @@ static int spi_transfer_one_message(struct spi_controller *ctlr, if (msg->status && ctlr->handle_err) ctlr->handle_err(ctlr, msg); - spi_res_release(ctlr, msg); - spi_finalize_current_message(ctlr); return ret; @@ -1361,6 +1368,13 @@ void spi_finalize_current_message(struct spi_controller *ctlr) spi_unmap_msg(ctlr, mesg); + /* In the prepare_messages callback the spi bus has the opportunity to + * split a transfer to smaller chunks. + * Release splited transfers here since spi_map_msg is done on the + * splited transfers. + */ + spi_res_release(ctlr, mesg); + if (ctlr->cur_msg_prepared && ctlr->unprepare_message) { ret = ctlr->unprepare_message(ctlr, mesg); if (ret) { @@ -2031,16 +2045,13 @@ struct spi_controller *__spi_alloc_controller(struct device *dev, } EXPORT_SYMBOL_GPL(__spi_alloc_controller); -#ifdef CONFIG_OF -static int of_spi_register_master(struct spi_controller *ctlr) +static int __spi_register_controller(struct spi_controller *ctlr) { int nb, i, *cs; struct device_node *np = ctlr->dev.of_node; + struct gpio_desc *desc; - if (!np) - return 0; - - nb = of_gpio_named_count(np, "cs-gpios"); + nb = gpiod_count(&ctlr->dev, "cs"); ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect); /* Return error only for an incorrectly formed cs-gpios property */ @@ -2059,17 +2070,20 @@ static int of_spi_register_master(struct spi_controller *ctlr) for (i = 0; i < ctlr->num_chipselect; i++) cs[i] = -ENOENT; - for (i = 0; i < nb; i++) - cs[i] = of_get_named_gpio(np, "cs-gpios", i); - - return 0; -} -#else -static int of_spi_register_master(struct spi_controller *ctlr) -{ + if (IS_ENABLED(CONFIG_OF) && np) { + for (i = 0; i < nb; i++) + cs[i] = of_get_named_gpio(np, "cs-gpios", i); + } else if (IS_ENABLED(CONFIG_ACPI) && ACPI_HANDLE(&ctlr->dev)) { + for (i = 0; i < nb; i++) { + desc = devm_gpiod_get_index(&ctlr->dev, "cs", + i, GPIOD_ASIS); + if (IS_ERR(desc)) + continue; + cs[i] = desc_to_gpio(desc); + } + } return 0; } -#endif static int spi_controller_check_ops(struct spi_controller *ctlr) { @@ -2133,11 +2147,15 @@ int spi_register_controller(struct spi_controller *ctlr) return status; if (!spi_controller_is_slave(ctlr)) { - status = of_spi_register_master(ctlr); + status = __spi_register_controller(ctlr); if (status) return status; } + ctlr->dev.add_lock = kmalloc(sizeof(struct mutex), GFP_KERNEL); + if (!ctlr->dev.add_lock) + return -ENOMEM; + /* even if it's just one always-selected device, there must * be at least one chipselect */ @@ -2149,8 +2167,10 @@ int spi_register_controller(struct spi_controller *ctlr) id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num, ctlr->bus_num + 1, GFP_KERNEL); mutex_unlock(&board_lock); - if (WARN(id < 0, "couldn't get idr")) - return id == -ENOSPC ? -EBUSY : id; + if (WARN(id < 0, "couldn't get idr")) { + status = (id == -ENOSPC) ? -EBUSY : id; + goto done; + } ctlr->bus_num = id; } else if (ctlr->dev.of_node) { /* allocate dynamic bus number using Linux idr */ @@ -2161,8 +2181,10 @@ int spi_register_controller(struct spi_controller *ctlr) id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num, ctlr->bus_num + 1, GFP_KERNEL); mutex_unlock(&board_lock); - if (WARN(id < 0, "couldn't get idr")) - return id == -ENOSPC ? -EBUSY : id; + if (WARN(id < 0, "couldn't get idr")) { + status = (id == -ENOSPC) ? -EBUSY : id; + goto done; + } } } if (ctlr->bus_num < 0) { @@ -2176,8 +2198,11 @@ int spi_register_controller(struct spi_controller *ctlr) id = idr_alloc(&spi_master_idr, ctlr, first_dynamic, 0, GFP_KERNEL); mutex_unlock(&board_lock); - if (WARN(id < 0, "couldn't get idr")) - return id; + if (WARN(id < 0, "couldn't get idr")) { + status = id; + goto done; + } + ctlr->bus_num = id; } INIT_LIST_HEAD(&ctlr->queue); @@ -2185,6 +2210,7 @@ int spi_register_controller(struct spi_controller *ctlr) spin_lock_init(&ctlr->bus_lock_spinlock); mutex_init(&ctlr->bus_lock_mutex); mutex_init(&ctlr->io_mutex); + mutex_init(ctlr->dev.add_lock); ctlr->bus_lock_flag = 0; init_completion(&ctlr->xfer_completion); if (!ctlr->max_dma_len) @@ -2236,7 +2262,10 @@ int spi_register_controller(struct spi_controller *ctlr) /* Register devices from the device tree and ACPI */ of_register_spi_devices(ctlr); acpi_register_spi_devices(ctlr); + return status; + done: + kfree(ctlr->dev.add_lock); return status; } EXPORT_SYMBOL_GPL(spi_register_controller); @@ -2303,7 +2332,12 @@ void spi_unregister_controller(struct spi_controller *ctlr) { struct spi_controller *found; int id = ctlr->bus_num; - int dummy; + + /* Prevent addition of new devices, unregister existing ones */ + if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) + mutex_lock(ctlr->dev.add_lock); + + device_for_each_child(&ctlr->dev, NULL, __unregister); /* First make sure that this controller was ever added */ mutex_lock(&board_lock); @@ -2317,13 +2351,17 @@ void spi_unregister_controller(struct spi_controller *ctlr) list_del(&ctlr->list); mutex_unlock(&board_lock); - dummy = device_for_each_child(&ctlr->dev, NULL, __unregister); device_unregister(&ctlr->dev); /* free bus id */ mutex_lock(&board_lock); if (found == ctlr) idr_remove(&spi_master_idr, id); mutex_unlock(&board_lock); + + if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) + mutex_unlock(ctlr->dev.add_lock); + + kfree(ctlr->dev.add_lock); } EXPORT_SYMBOL_GPL(spi_unregister_controller); diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index cda10719d1d1b21b32866d2b79363faa461ab8e1..77bcb92284cecc807c907f1f76eba9e4c0083110 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -634,6 +634,9 @@ static int spidev_release(struct inode *inode, struct file *filp) if (dofree) kfree(spidev); } +#ifdef CONFIG_SPI_SLAVE + spi_slave_abort(spidev->spi); +#endif mutex_unlock(&device_list_lock); return 0; @@ -664,11 +667,34 @@ static const struct file_operations spidev_fops = { static struct class *spidev_class; #ifdef CONFIG_OF + +static void spi_cpld_init(struct spi_device *spi) +{ + int ret; + unsigned int chip_select, mode; + + ret = of_property_read_u32(spi->dev.of_node, "reg", &(chip_select)); + if (ret != 0) + dev_warn(&spi->dev, "of_property_read_u8 get chip_select fail\n"); + + spi->chip_select = (u8)(chip_select); + ret = of_property_read_u32(spi->dev.of_node, "mode", &(mode)); + if (ret != 0) + dev_warn(&spi->dev, "of_property_read_u16 get num-cs fail\n"); + + spi->mode = (u16)(mode); + ret = of_property_read_u32(spi->dev.of_node, "spi-max-frequency", + &(spi->max_speed_hz)); + if (ret != 0) + dev_warn(&spi->dev, "of_property_read_u32 get spi-max-frequency fail\n"); +} + static const struct of_device_id spidev_dt_ids[] = { { .compatible = "rohm,dh2228fv" }, { .compatible = "lineartechnology,ltc2488" }, { .compatible = "ge,achc" }, { .compatible = "semtech,sx1301" }, + { .compatible = "spi-cpld", .data = spi_cpld_init, }, {}, }; MODULE_DEVICE_TABLE(of, spidev_dt_ids); @@ -719,19 +745,23 @@ static int spidev_probe(struct spi_device *spi) int status; unsigned long minor; + void (*spi_init)(struct spi_device *dev); + /* * spidev should never be referenced in DT without a specific * compatible string, it is a Linux implementation thing * rather than a description of the hardware. */ - if (spi->dev.of_node && !of_match_device(spidev_dt_ids, &spi->dev)) { - dev_err(&spi->dev, "buggy DT: spidev listed directly in DT\n"); - WARN_ON(spi->dev.of_node && - !of_match_device(spidev_dt_ids, &spi->dev)); - } + WARN(spi->dev.of_node && + of_device_is_compatible(spi->dev.of_node, "spidev"), + "%pOF: buggy DT: spidev listed directly in DT\n", spi->dev.of_node); spidev_probe_acpi(spi); + spi_init = of_device_get_match_data(&spi->dev); + if (spi_init) + spi_init(spi); + /* Allocate driver data */ spidev = kzalloc(sizeof(*spidev), GFP_KERNEL); if (!spidev) diff --git a/drivers/ssb/bridge_pcmcia_80211.c b/drivers/ssb/bridge_pcmcia_80211.c index f51f150307dfbcab4d6ed3ddad9077e62427e37a..ffa379efff83c7609ecf2be2516e5edd34bc250e 100644 --- a/drivers/ssb/bridge_pcmcia_80211.c +++ b/drivers/ssb/bridge_pcmcia_80211.c @@ -113,16 +113,21 @@ static struct pcmcia_driver ssb_host_pcmcia_driver = { .resume = ssb_host_pcmcia_resume, }; +static int pcmcia_init_failed; + /* * These are not module init/exit functions! * The module_pcmcia_driver() helper cannot be used here. */ int ssb_host_pcmcia_init(void) { - return pcmcia_register_driver(&ssb_host_pcmcia_driver); + pcmcia_init_failed = pcmcia_register_driver(&ssb_host_pcmcia_driver); + + return pcmcia_init_failed; } void ssb_host_pcmcia_exit(void) { - pcmcia_unregister_driver(&ssb_host_pcmcia_driver); + if (!pcmcia_init_failed) + pcmcia_unregister_driver(&ssb_host_pcmcia_driver); } diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 1abf76be2aa8c7c49f867bc09dbbf68ca374a4d3..d51fa4f4e7ca1904715277a103898d9cf7c842f6 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -126,4 +126,6 @@ source "drivers/staging/axis-fifo/Kconfig" source "drivers/staging/erofs/Kconfig" +source "drivers/staging/gmjstcm/Kconfig" + endif # STAGING diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index ab0cbe8815b1c1b6112c2dfd93c06bd508dd92e9..1562b51985d0650a6cb844b9d5d86874c16a466a 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -53,3 +53,4 @@ obj-$(CONFIG_SOC_MT7621) += mt7621-dts/ obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket/ obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/ obj-$(CONFIG_EROFS_FS) += erofs/ +obj-$(CONFIG_GMJS_TCM) += gmjstcm/ diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index a880b5c6c6c321c8d4e10de3f0298fc90fb2e46c..e3df4bf521b50edb008a481bab2973155ea1f897 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -75,6 +75,9 @@ struct ashmem_range { /* LRU list of unpinned pages, protected by ashmem_mutex */ static LIST_HEAD(ashmem_lru_list); +static atomic_t ashmem_shrink_inflight = ATOMIC_INIT(0); +static DECLARE_WAIT_QUEUE_HEAD(ashmem_shrink_wait); + /* * long lru_count - The count of pages on our LRU list. * @@ -168,19 +171,15 @@ static inline void lru_del(struct ashmem_range *range) * @end: The ending page (inclusive) * * This function is protected by ashmem_mutex. - * - * Return: 0 if successful, or -ENOMEM if there is an error */ -static int range_alloc(struct ashmem_area *asma, - struct ashmem_range *prev_range, unsigned int purged, - size_t start, size_t end) +static void range_alloc(struct ashmem_area *asma, + struct ashmem_range *prev_range, unsigned int purged, + size_t start, size_t end, + struct ashmem_range **new_range) { - struct ashmem_range *range; - - range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL); - if (!range) - return -ENOMEM; + struct ashmem_range *range = *new_range; + *new_range = NULL; range->asma = asma; range->pgstart = start; range->pgend = end; @@ -190,8 +189,6 @@ static int range_alloc(struct ashmem_area *asma, if (range_on_lru(range)) lru_add(range); - - return 0; } /** @@ -353,8 +350,23 @@ static inline vm_flags_t calc_vm_may_flags(unsigned long prot) _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC); } +static int ashmem_vmfile_mmap(struct file *file, struct vm_area_struct *vma) +{ + /* do not allow to mmap ashmem backing shmem file directly */ + return -EPERM; +} + +static unsigned long +ashmem_vmfile_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags) +{ + return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); +} + static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) { + static struct file_operations vmfile_fops; struct ashmem_area *asma = file->private_data; int ret = 0; @@ -395,6 +407,19 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) } vmfile->f_mode |= FMODE_LSEEK; asma->file = vmfile; + /* + * override mmap operation of the vmfile so that it can't be + * remapped which would lead to creation of a new vma with no + * asma permission checks. Have to override get_unmapped_area + * as well to prevent VM_BUG_ON check for f_ops modification. + */ + if (!vmfile_fops.mmap) { + vmfile_fops = *vmfile->f_op; + vmfile_fops.mmap = ashmem_vmfile_mmap; + vmfile_fops.get_unmapped_area = + ashmem_vmfile_get_unmapped_area; + } + vmfile->f_op = &vmfile_fops; } get_file(asma->file); @@ -438,7 +463,6 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) static unsigned long ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { - struct ashmem_range *range, *next; unsigned long freed = 0; /* We might recurse into filesystem code, so bail out if necessary */ @@ -448,21 +472,33 @@ ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) if (!mutex_trylock(&ashmem_mutex)) return -1; - list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) { + while (!list_empty(&ashmem_lru_list)) { + struct ashmem_range *range = + list_first_entry(&ashmem_lru_list, typeof(*range), lru); loff_t start = range->pgstart * PAGE_SIZE; loff_t end = (range->pgend + 1) * PAGE_SIZE; + struct file *f = range->asma->file; - range->asma->file->f_op->fallocate(range->asma->file, - FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, - start, end - start); + get_file(f); + atomic_inc(&ashmem_shrink_inflight); range->purged = ASHMEM_WAS_PURGED; lru_del(range); freed += range_size(range); + mutex_unlock(&ashmem_mutex); + f->f_op->fallocate(f, + FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, + start, end - start); + fput(f); + if (atomic_dec_and_test(&ashmem_shrink_inflight)) + wake_up_all(&ashmem_shrink_wait); + if (!mutex_trylock(&ashmem_mutex)) + goto out; if (--sc->nr_to_scan <= 0) break; } mutex_unlock(&ashmem_mutex); +out: return freed; } @@ -582,7 +618,8 @@ static int get_name(struct ashmem_area *asma, void __user *name) * * Caller must hold ashmem_mutex. */ -static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend) +static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend, + struct ashmem_range **new_range) { struct ashmem_range *range, *next; int ret = ASHMEM_NOT_PURGED; @@ -635,7 +672,7 @@ static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend) * second half and adjust the first chunk's endpoint. */ range_alloc(asma, range, range->purged, - pgend + 1, range->pgend); + pgend + 1, range->pgend, new_range); range_shrink(range, range->pgstart, pgstart - 1); break; } @@ -649,7 +686,8 @@ static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend) * * Caller must hold ashmem_mutex. */ -static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend) +static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend, + struct ashmem_range **new_range) { struct ashmem_range *range, *next; unsigned int purged = ASHMEM_NOT_PURGED; @@ -675,7 +713,8 @@ static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend) } } - return range_alloc(asma, range, purged, pgstart, pgend); + range_alloc(asma, range, purged, pgstart, pgend, new_range); + return 0; } /* @@ -708,11 +747,19 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, struct ashmem_pin pin; size_t pgstart, pgend; int ret = -EINVAL; + struct ashmem_range *range = NULL; if (copy_from_user(&pin, p, sizeof(pin))) return -EFAULT; + if (cmd == ASHMEM_PIN || cmd == ASHMEM_UNPIN) { + range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL); + if (!range) + return -ENOMEM; + } + mutex_lock(&ashmem_mutex); + wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight)); if (!asma->file) goto out_unlock; @@ -735,10 +782,10 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, switch (cmd) { case ASHMEM_PIN: - ret = ashmem_pin(asma, pgstart, pgend); + ret = ashmem_pin(asma, pgstart, pgend, &range); break; case ASHMEM_UNPIN: - ret = ashmem_unpin(asma, pgstart, pgend); + ret = ashmem_unpin(asma, pgstart, pgend, &range); break; case ASHMEM_GET_PIN_STATUS: ret = ashmem_get_pin_status(asma, pgstart, pgend); @@ -747,6 +794,8 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, out_unlock: mutex_unlock(&ashmem_mutex); + if (range) + kmem_cache_free(ashmem_range_cachep, range); return ret; } diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index 99073325b0c00ca8784cbd1ed7849d4cdb09ec8e..b062066d9841960c021d27de629697d93a651590 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -237,10 +237,10 @@ static void ion_dma_buf_detatch(struct dma_buf *dmabuf, struct ion_dma_buf_attachment *a = attachment->priv; struct ion_buffer *buffer = dmabuf->priv; - free_duped_table(a->table); mutex_lock(&buffer->lock); list_del(&a->list); mutex_unlock(&buffer->lock); + free_duped_table(a->table); kfree(a); } @@ -303,45 +303,50 @@ static void ion_dma_buf_release(struct dma_buf *dmabuf) static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset) { struct ion_buffer *buffer = dmabuf->priv; + void *vaddr; + + if (!buffer->heap->ops->map_kernel) { + pr_err("%s: map kernel is not implemented by this heap.\n", + __func__); + return ERR_PTR(-ENOTTY); + } + mutex_lock(&buffer->lock); + vaddr = ion_buffer_kmap_get(buffer); + mutex_unlock(&buffer->lock); + + if (IS_ERR(vaddr)) + return vaddr; - return buffer->vaddr + offset * PAGE_SIZE; + return vaddr + offset * PAGE_SIZE; } static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset, void *ptr) { + struct ion_buffer *buffer = dmabuf->priv; + + if (buffer->heap->ops->map_kernel) { + mutex_lock(&buffer->lock); + ion_buffer_kmap_put(buffer); + mutex_unlock(&buffer->lock); + } + } static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction) { struct ion_buffer *buffer = dmabuf->priv; - void *vaddr; struct ion_dma_buf_attachment *a; - int ret = 0; - - /* - * TODO: Move this elsewhere because we don't always need a vaddr - */ - if (buffer->heap->ops->map_kernel) { - mutex_lock(&buffer->lock); - vaddr = ion_buffer_kmap_get(buffer); - if (IS_ERR(vaddr)) { - ret = PTR_ERR(vaddr); - goto unlock; - } - mutex_unlock(&buffer->lock); - } mutex_lock(&buffer->lock); list_for_each_entry(a, &buffer->attachments, list) { dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents, direction); } - -unlock: mutex_unlock(&buffer->lock); - return ret; + + return 0; } static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, @@ -350,12 +355,6 @@ static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, struct ion_buffer *buffer = dmabuf->priv; struct ion_dma_buf_attachment *a; - if (buffer->heap->ops->map_kernel) { - mutex_lock(&buffer->lock); - ion_buffer_kmap_put(buffer); - mutex_unlock(&buffer->lock); - } - mutex_lock(&buffer->lock); list_for_each_entry(a, &buffer->attachments, list) { dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents, diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c index 9bc56eb48d2a894c2f3d760b8b129813d18b5177..890d264ac68798a1c7d8a40243d729fa7d75a6fa 100644 --- a/drivers/staging/android/ion/ion_page_pool.c +++ b/drivers/staging/android/ion/ion_page_pool.c @@ -8,11 +8,14 @@ #include #include #include +#include #include "ion.h" static inline struct page *ion_page_pool_alloc_pages(struct ion_page_pool *pool) { + if (fatal_signal_pending(current)) + return NULL; return alloc_pages(pool->gfp_mask, pool->order); } diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c index 701eb9f3b0f134f054740a98385336f6a5438821..0a1b1ff1be042abbcdb97b4b6e6d68e0ca847b55 100644 --- a/drivers/staging/android/ion/ion_system_heap.c +++ b/drivers/staging/android/ion/ion_system_heap.c @@ -247,10 +247,10 @@ static void ion_system_heap_destroy_pools(struct ion_page_pool **pools) static int ion_system_heap_create_pools(struct ion_page_pool **pools) { int i; - gfp_t gfp_flags = low_order_gfp_flags; for (i = 0; i < NUM_ORDERS; i++) { struct ion_page_pool *pool; + gfp_t gfp_flags = low_order_gfp_flags; if (orders[i] > 4) gfp_flags = high_order_gfp_flags; diff --git a/drivers/staging/axis-fifo/Kconfig b/drivers/staging/axis-fifo/Kconfig index 687537203d9cfba144fba1dc846d3cc410776b08..d9725888af6fc34045806fbb5c91ca372b7c9a46 100644 --- a/drivers/staging/axis-fifo/Kconfig +++ b/drivers/staging/axis-fifo/Kconfig @@ -3,6 +3,7 @@ # config XIL_AXIS_FIFO tristate "Xilinx AXI-Stream FIFO IP core driver" + depends on OF default n help This adds support for the Xilinx AXI-Stream diff --git a/drivers/staging/comedi/comedi_compat32.c b/drivers/staging/comedi/comedi_compat32.c index fa9d239474ee97d1300051016cecf8086d220d77..36a3564ba1fb5d8ac8e1261a4a6ab5123b2fbe95 100644 --- a/drivers/staging/comedi/comedi_compat32.c +++ b/drivers/staging/comedi/comedi_compat32.c @@ -102,8 +102,8 @@ static int compat_chaninfo(struct file *file, unsigned long arg) chaninfo = compat_alloc_user_space(sizeof(*chaninfo)); /* Copy chaninfo structure. Ignore unused members. */ - if (!access_ok(VERIFY_READ, chaninfo32, sizeof(*chaninfo32)) || - !access_ok(VERIFY_WRITE, chaninfo, sizeof(*chaninfo))) + if (!access_ok(chaninfo32, sizeof(*chaninfo32)) || + !access_ok(chaninfo, sizeof(*chaninfo))) return -EFAULT; err = 0; @@ -136,8 +136,8 @@ static int compat_rangeinfo(struct file *file, unsigned long arg) rangeinfo = compat_alloc_user_space(sizeof(*rangeinfo)); /* Copy rangeinfo structure. */ - if (!access_ok(VERIFY_READ, rangeinfo32, sizeof(*rangeinfo32)) || - !access_ok(VERIFY_WRITE, rangeinfo, sizeof(*rangeinfo))) + if (!access_ok(rangeinfo32, sizeof(*rangeinfo32)) || + !access_ok(rangeinfo, sizeof(*rangeinfo))) return -EFAULT; err = 0; @@ -163,8 +163,8 @@ static int get_compat_cmd(struct comedi_cmd __user *cmd, } temp; /* Copy cmd structure. */ - if (!access_ok(VERIFY_READ, cmd32, sizeof(*cmd32)) || - !access_ok(VERIFY_WRITE, cmd, sizeof(*cmd))) + if (!access_ok(cmd32, sizeof(*cmd32)) || + !access_ok(cmd, sizeof(*cmd))) return -EFAULT; err = 0; @@ -217,8 +217,8 @@ static int put_compat_cmd(struct comedi32_cmd_struct __user *cmd32, * Assume the pointer values are already valid. * (Could use ptr_to_compat() to set them.) */ - if (!access_ok(VERIFY_READ, cmd, sizeof(*cmd)) || - !access_ok(VERIFY_WRITE, cmd32, sizeof(*cmd32))) + if (!access_ok(cmd, sizeof(*cmd)) || + !access_ok(cmd32, sizeof(*cmd32))) return -EFAULT; err = 0; @@ -317,8 +317,8 @@ static int get_compat_insn(struct comedi_insn __user *insn, /* Copy insn structure. Ignore the unused members. */ err = 0; - if (!access_ok(VERIFY_READ, insn32, sizeof(*insn32)) || - !access_ok(VERIFY_WRITE, insn, sizeof(*insn))) + if (!access_ok(insn32, sizeof(*insn32)) || + !access_ok(insn, sizeof(*insn))) return -EFAULT; err |= __get_user(temp.uint, &insn32->insn); @@ -350,7 +350,7 @@ static int compat_insnlist(struct file *file, unsigned long arg) insnlist32 = compat_ptr(arg); /* Get 32-bit insnlist structure. */ - if (!access_ok(VERIFY_READ, insnlist32, sizeof(*insnlist32))) + if (!access_ok(insnlist32, sizeof(*insnlist32))) return -EFAULT; err = 0; @@ -365,7 +365,7 @@ static int compat_insnlist(struct file *file, unsigned long arg) insn[n_insns])); /* Set native insnlist structure. */ - if (!access_ok(VERIFY_WRITE, &s->insnlist, sizeof(s->insnlist))) + if (!access_ok(&s->insnlist, sizeof(s->insnlist))) return -EFAULT; err |= __put_user(n_insns, &s->insnlist.n_insns); diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h index 5775a93917f4d5d035c5460fbca910d8e370481e..fbbdf4b0f6c5ddf288a3f00068052d2a797e1700 100644 --- a/drivers/staging/comedi/comedidev.h +++ b/drivers/staging/comedi/comedidev.h @@ -987,6 +987,8 @@ int comedi_dio_insn_config(struct comedi_device *dev, unsigned int mask); unsigned int comedi_dio_update_state(struct comedi_subdevice *s, unsigned int *data); +unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s, + struct comedi_cmd *cmd); unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s); unsigned int comedi_nscans_left(struct comedi_subdevice *s, unsigned int nscans); diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c index 57dd63d548b7c94e03bbdf615daf1889acfc986a..5329a39552143885c8c0ae933006fb10890beafd 100644 --- a/drivers/staging/comedi/drivers.c +++ b/drivers/staging/comedi/drivers.c @@ -381,11 +381,13 @@ unsigned int comedi_dio_update_state(struct comedi_subdevice *s, EXPORT_SYMBOL_GPL(comedi_dio_update_state); /** - * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes + * comedi_bytes_per_scan_cmd() - Get length of asynchronous command "scan" in + * bytes * @s: COMEDI subdevice. + * @cmd: COMEDI command. * * Determines the overall scan length according to the subdevice type and the - * number of channels in the scan. + * number of channels in the scan for the specified command. * * For digital input, output or input/output subdevices, samples for * multiple channels are assumed to be packed into one or more unsigned @@ -395,9 +397,9 @@ EXPORT_SYMBOL_GPL(comedi_dio_update_state); * * Returns the overall scan length in bytes. */ -unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s) +unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s, + struct comedi_cmd *cmd) { - struct comedi_cmd *cmd = &s->async->cmd; unsigned int num_samples; unsigned int bits_per_sample; @@ -414,6 +416,29 @@ unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s) } return comedi_samples_to_bytes(s, num_samples); } +EXPORT_SYMBOL_GPL(comedi_bytes_per_scan_cmd); + +/** + * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes + * @s: COMEDI subdevice. + * + * Determines the overall scan length according to the subdevice type and the + * number of channels in the scan for the current command. + * + * For digital input, output or input/output subdevices, samples for + * multiple channels are assumed to be packed into one or more unsigned + * short or unsigned int values according to the subdevice's %SDF_LSAMPL + * flag. For other types of subdevice, samples are assumed to occupy a + * whole unsigned short or unsigned int according to the %SDF_LSAMPL flag. + * + * Returns the overall scan length in bytes. + */ +unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s) +{ + struct comedi_cmd *cmd = &s->async->cmd; + + return comedi_bytes_per_scan_cmd(s, cmd); +} EXPORT_SYMBOL_GPL(comedi_bytes_per_scan); static unsigned int __comedi_nscans_left(struct comedi_subdevice *s, diff --git a/drivers/staging/comedi/drivers/amplc_pci230.c b/drivers/staging/comedi/drivers/amplc_pci230.c index 08ffe26c5d43f0724aaab89984aef3ceacb26f12..0f16e85911f2fd416906e1b17dccd8aef1f4357f 100644 --- a/drivers/staging/comedi/drivers/amplc_pci230.c +++ b/drivers/staging/comedi/drivers/amplc_pci230.c @@ -2330,7 +2330,8 @@ static irqreturn_t pci230_interrupt(int irq, void *d) devpriv->intr_running = false; spin_unlock_irqrestore(&devpriv->isr_spinlock, irqflags); - comedi_handle_events(dev, s_ao); + if (s_ao) + comedi_handle_events(dev, s_ao); comedi_handle_events(dev, s_ai); return IRQ_HANDLED; diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c index 3be927f1d3a92694faf94dc664f3c8bb829ba804..e15e33ed94ae3c81566895633b499a8dfc4a8067 100644 --- a/drivers/staging/comedi/drivers/dt282x.c +++ b/drivers/staging/comedi/drivers/dt282x.c @@ -557,7 +557,8 @@ static irqreturn_t dt282x_interrupt(int irq, void *d) } #endif comedi_handle_events(dev, s); - comedi_handle_events(dev, s_ao); + if (s_ao) + comedi_handle_events(dev, s_ao); return IRQ_RETVAL(handled); } diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c index 2edf3ee91300007c4dff503774bca1f2fe04b57b..caf4d4df4bd3044f68a6e26b0bc5145b20e150ef 100644 --- a/drivers/staging/comedi/drivers/dt3000.c +++ b/drivers/staging/comedi/drivers/dt3000.c @@ -342,9 +342,9 @@ static irqreturn_t dt3k_interrupt(int irq, void *d) static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec, unsigned int flags) { - int divider, base, prescale; + unsigned int divider, base, prescale; - /* This function needs improvment */ + /* This function needs improvement */ /* Don't know if divider==0 works. */ for (prescale = 0; prescale < 16; prescale++) { @@ -358,7 +358,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec, divider = (*nanosec) / base; break; case CMDF_ROUND_UP: - divider = (*nanosec) / base; + divider = DIV_ROUND_UP(*nanosec, base); break; } if (divider < 65536) { @@ -368,7 +368,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec, } prescale = 15; - base = timer_base * (1 << prescale); + base = timer_base * (prescale + 1); divider = 65535; *nanosec = divider * base; return (prescale << 16) | (divider); diff --git a/drivers/staging/comedi/drivers/dt9812.c b/drivers/staging/comedi/drivers/dt9812.c index 75cc9e8e5b94b1f4b14bc86ba310579668278c22..ee0402bf6e67519b14beee4c421922b46ccc4dcc 100644 --- a/drivers/staging/comedi/drivers/dt9812.c +++ b/drivers/staging/comedi/drivers/dt9812.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include "../comedi_usb.h" @@ -237,22 +238,42 @@ static int dt9812_read_info(struct comedi_device *dev, { struct usb_device *usb = comedi_to_usb_dev(dev); struct dt9812_private *devpriv = dev->private; - struct dt9812_usb_cmd cmd; + struct dt9812_usb_cmd *cmd; + size_t tbuf_size; int count, ret; + void *tbuf; - cmd.cmd = cpu_to_le32(DT9812_R_FLASH_DATA); - cmd.u.flash_data_info.address = + tbuf_size = max(sizeof(*cmd), buf_size); + + tbuf = kzalloc(tbuf_size, GFP_KERNEL); + if (!tbuf) + return -ENOMEM; + + cmd = tbuf; + + cmd->cmd = cpu_to_le32(DT9812_R_FLASH_DATA); + cmd->u.flash_data_info.address = cpu_to_le16(DT9812_DIAGS_BOARD_INFO_ADDR + offset); - cmd.u.flash_data_info.numbytes = cpu_to_le16(buf_size); + cmd->u.flash_data_info.numbytes = cpu_to_le16(buf_size); /* DT9812 only responds to 32 byte writes!! */ ret = usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->cmd_wr.addr), - &cmd, 32, &count, DT9812_USB_TIMEOUT); + cmd, sizeof(*cmd), &count, DT9812_USB_TIMEOUT); if (ret) - return ret; + goto out; + + ret = usb_bulk_msg(usb, usb_rcvbulkpipe(usb, devpriv->cmd_rd.addr), + tbuf, buf_size, &count, DT9812_USB_TIMEOUT); + if (!ret) { + if (count == buf_size) + memcpy(buf, tbuf, buf_size); + else + ret = -EREMOTEIO; + } +out: + kfree(tbuf); - return usb_bulk_msg(usb, usb_rcvbulkpipe(usb, devpriv->cmd_rd.addr), - buf, buf_size, &count, DT9812_USB_TIMEOUT); + return ret; } static int dt9812_read_multiple_registers(struct comedi_device *dev, @@ -261,22 +282,42 @@ static int dt9812_read_multiple_registers(struct comedi_device *dev, { struct usb_device *usb = comedi_to_usb_dev(dev); struct dt9812_private *devpriv = dev->private; - struct dt9812_usb_cmd cmd; + struct dt9812_usb_cmd *cmd; int i, count, ret; + size_t buf_size; + void *buf; - cmd.cmd = cpu_to_le32(DT9812_R_MULTI_BYTE_REG); - cmd.u.read_multi_info.count = reg_count; + buf_size = max_t(size_t, sizeof(*cmd), reg_count); + + buf = kzalloc(buf_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + cmd = buf; + + cmd->cmd = cpu_to_le32(DT9812_R_MULTI_BYTE_REG); + cmd->u.read_multi_info.count = reg_count; for (i = 0; i < reg_count; i++) - cmd.u.read_multi_info.address[i] = address[i]; + cmd->u.read_multi_info.address[i] = address[i]; /* DT9812 only responds to 32 byte writes!! */ ret = usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->cmd_wr.addr), - &cmd, 32, &count, DT9812_USB_TIMEOUT); + cmd, sizeof(*cmd), &count, DT9812_USB_TIMEOUT); if (ret) - return ret; + goto out; + + ret = usb_bulk_msg(usb, usb_rcvbulkpipe(usb, devpriv->cmd_rd.addr), + buf, reg_count, &count, DT9812_USB_TIMEOUT); + if (!ret) { + if (count == reg_count) + memcpy(value, buf, reg_count); + else + ret = -EREMOTEIO; + } +out: + kfree(buf); - return usb_bulk_msg(usb, usb_rcvbulkpipe(usb, devpriv->cmd_rd.addr), - value, reg_count, &count, DT9812_USB_TIMEOUT); + return ret; } static int dt9812_write_multiple_registers(struct comedi_device *dev, @@ -285,19 +326,27 @@ static int dt9812_write_multiple_registers(struct comedi_device *dev, { struct usb_device *usb = comedi_to_usb_dev(dev); struct dt9812_private *devpriv = dev->private; - struct dt9812_usb_cmd cmd; + struct dt9812_usb_cmd *cmd; int i, count; + int ret; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; - cmd.cmd = cpu_to_le32(DT9812_W_MULTI_BYTE_REG); - cmd.u.read_multi_info.count = reg_count; + cmd->cmd = cpu_to_le32(DT9812_W_MULTI_BYTE_REG); + cmd->u.read_multi_info.count = reg_count; for (i = 0; i < reg_count; i++) { - cmd.u.write_multi_info.write[i].address = address[i]; - cmd.u.write_multi_info.write[i].value = value[i]; + cmd->u.write_multi_info.write[i].address = address[i]; + cmd->u.write_multi_info.write[i].value = value[i]; } /* DT9812 only responds to 32 byte writes!! */ - return usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->cmd_wr.addr), - &cmd, 32, &count, DT9812_USB_TIMEOUT); + ret = usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->cmd_wr.addr), + cmd, sizeof(*cmd), &count, DT9812_USB_TIMEOUT); + kfree(cmd); + + return ret; } static int dt9812_rmw_multiple_registers(struct comedi_device *dev, @@ -306,17 +355,25 @@ static int dt9812_rmw_multiple_registers(struct comedi_device *dev, { struct usb_device *usb = comedi_to_usb_dev(dev); struct dt9812_private *devpriv = dev->private; - struct dt9812_usb_cmd cmd; + struct dt9812_usb_cmd *cmd; int i, count; + int ret; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; - cmd.cmd = cpu_to_le32(DT9812_RMW_MULTI_BYTE_REG); - cmd.u.rmw_multi_info.count = reg_count; + cmd->cmd = cpu_to_le32(DT9812_RMW_MULTI_BYTE_REG); + cmd->u.rmw_multi_info.count = reg_count; for (i = 0; i < reg_count; i++) - cmd.u.rmw_multi_info.rmw[i] = rmw[i]; + cmd->u.rmw_multi_info.rmw[i] = rmw[i]; /* DT9812 only responds to 32 byte writes!! */ - return usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->cmd_wr.addr), - &cmd, 32, &count, DT9812_USB_TIMEOUT); + ret = usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->cmd_wr.addr), + cmd, sizeof(*cmd), &count, DT9812_USB_TIMEOUT); + kfree(cmd); + + return ret; } static int dt9812_digital_in(struct comedi_device *dev, u8 *bits) diff --git a/drivers/staging/comedi/drivers/gsc_hpdi.c b/drivers/staging/comedi/drivers/gsc_hpdi.c index 4bdf44d8287996fb9a07af330316744078cb0eb0..dc62db1ee1dde83b3dfd550bf1cb9eaae5a3ea25 100644 --- a/drivers/staging/comedi/drivers/gsc_hpdi.c +++ b/drivers/staging/comedi/drivers/gsc_hpdi.c @@ -623,6 +623,11 @@ static int gsc_hpdi_auto_attach(struct comedi_device *dev, dma_alloc_coherent(&pcidev->dev, DMA_BUFFER_SIZE, &devpriv->dio_buffer_phys_addr[i], GFP_KERNEL); + if (!devpriv->dio_buffer[i]) { + dev_warn(dev->class_dev, + "failed to allocate DMA buffer\n"); + return -ENOMEM; + } } /* allocate dma descriptors */ devpriv->dma_desc = dma_alloc_coherent(&pcidev->dev, @@ -630,6 +635,11 @@ static int gsc_hpdi_auto_attach(struct comedi_device *dev, NUM_DMA_DESCRIPTORS, &devpriv->dma_desc_phys_addr, GFP_KERNEL); + if (!devpriv->dma_desc) { + dev_warn(dev->class_dev, + "failed to allocate DMA descriptors\n"); + return -ENOMEM; + } if (devpriv->dma_desc_phys_addr & 0xf) { dev_warn(dev->class_dev, " dma descriptors not quad-word aligned (bug)\n"); diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c index e521ed9d0887043bc5e4711e02c1c1f0b0d8d29f..35bd4d2efe16696c31c7ae40c6f1b0b9cb362a6b 100644 --- a/drivers/staging/comedi/drivers/ni_660x.c +++ b/drivers/staging/comedi/drivers/ni_660x.c @@ -602,6 +602,7 @@ static int ni_660x_set_pfi_routing(struct comedi_device *dev, case NI_660X_PFI_OUTPUT_DIO: if (chan > 31) return -EINVAL; + break; default: return -EINVAL; } diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c index 4dee2fc37aedeb2f6fc088ecb8e89ce6a8f141a5..d799b1b55de39a6ae443a6ac28018d79cf48407f 100644 --- a/drivers/staging/comedi/drivers/ni_mio_common.c +++ b/drivers/staging/comedi/drivers/ni_mio_common.c @@ -3516,6 +3516,7 @@ static int ni_cdio_check_chanlist(struct comedi_device *dev, static int ni_cdio_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { + unsigned int bytes_per_scan; int err = 0; int tmp; @@ -3545,9 +3546,12 @@ static int ni_cdio_cmdtest(struct comedi_device *dev, err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0); err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len); - err |= comedi_check_trigger_arg_max(&cmd->stop_arg, - s->async->prealloc_bufsz / - comedi_bytes_per_scan(s)); + bytes_per_scan = comedi_bytes_per_scan_cmd(s, cmd); + if (bytes_per_scan) { + err |= comedi_check_trigger_arg_max(&cmd->stop_arg, + s->async->prealloc_bufsz / + bytes_per_scan); + } if (err) return 3; diff --git a/drivers/staging/comedi/drivers/ni_usb6501.c b/drivers/staging/comedi/drivers/ni_usb6501.c index 808ed92ed66fe4bedfbbba500452d86771e8162e..75e5b57ae0d7ff6ec51e24f64494680533928996 100644 --- a/drivers/staging/comedi/drivers/ni_usb6501.c +++ b/drivers/staging/comedi/drivers/ni_usb6501.c @@ -144,6 +144,10 @@ static const u8 READ_COUNTER_RESPONSE[] = {0x00, 0x01, 0x00, 0x10, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00}; +/* Largest supported packets */ +static const size_t TX_MAX_SIZE = sizeof(SET_PORT_DIR_REQUEST); +static const size_t RX_MAX_SIZE = sizeof(READ_PORT_RESPONSE); + enum commands { READ_PORT, WRITE_PORT, @@ -463,10 +467,8 @@ static int ni6501_alloc_usb_buffers(struct comedi_device *dev) size = usb_endpoint_maxp(devpriv->ep_tx); devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL); - if (!devpriv->usb_tx_buf) { - kfree(devpriv->usb_rx_buf); + if (!devpriv->usb_tx_buf) return -ENOMEM; - } return 0; } @@ -503,6 +505,12 @@ static int ni6501_find_endpoints(struct comedi_device *dev) if (!devpriv->ep_rx || !devpriv->ep_tx) return -ENODEV; + if (usb_endpoint_maxp(devpriv->ep_rx) < RX_MAX_SIZE) + return -ENODEV; + + if (usb_endpoint_maxp(devpriv->ep_tx) < TX_MAX_SIZE) + return -ENODEV; + return 0; } @@ -518,6 +526,9 @@ static int ni6501_auto_attach(struct comedi_device *dev, if (!devpriv) return -ENOMEM; + mutex_init(&devpriv->mut); + usb_set_intfdata(intf, devpriv); + ret = ni6501_find_endpoints(dev); if (ret) return ret; @@ -526,9 +537,6 @@ static int ni6501_auto_attach(struct comedi_device *dev, if (ret) return ret; - mutex_init(&devpriv->mut); - usb_set_intfdata(intf, devpriv); - ret = comedi_alloc_subdevices(dev, 2); if (ret) return ret; diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c index e18c0723b760c80763e235489c32e1c98c4639fb..0d38589c2600729181b9386ed0d5ef937a08ee9a 100644 --- a/drivers/staging/comedi/drivers/usbduxfast.c +++ b/drivers/staging/comedi/drivers/usbduxfast.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ /* - * Copyright (C) 2004-2014 Bernd Porr, mail@berndporr.me.uk + * Copyright (C) 2004-2019 Bernd Porr, mail@berndporr.me.uk */ /* @@ -8,7 +8,7 @@ * Description: University of Stirling USB DAQ & INCITE Technology Limited * Devices: [ITL] USB-DUX-FAST (usbduxfast) * Author: Bernd Porr - * Updated: 10 Oct 2014 + * Updated: 16 Nov 2019 * Status: stable */ @@ -22,6 +22,7 @@ * * * Revision history: + * 1.0: Fixed a rounding error in usbduxfast_ai_cmdtest * 0.9: Dropping the first data packet which seems to be from the last transfer. * Buffer overflows in the FX2 are handed over to comedi. * 0.92: Dropping now 4 packets. The quad buffer has to be emptied. @@ -350,6 +351,7 @@ static int usbduxfast_ai_cmdtest(struct comedi_device *dev, struct comedi_cmd *cmd) { int err = 0; + int err2 = 0; unsigned int steps; unsigned int arg; @@ -399,11 +401,16 @@ static int usbduxfast_ai_cmdtest(struct comedi_device *dev, */ steps = (cmd->convert_arg * 30) / 1000; if (cmd->chanlist_len != 1) - err |= comedi_check_trigger_arg_min(&steps, - MIN_SAMPLING_PERIOD); - err |= comedi_check_trigger_arg_max(&steps, MAX_SAMPLING_PERIOD); - arg = (steps * 1000) / 30; - err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg); + err2 |= comedi_check_trigger_arg_min(&steps, + MIN_SAMPLING_PERIOD); + else + err2 |= comedi_check_trigger_arg_min(&steps, 1); + err2 |= comedi_check_trigger_arg_max(&steps, MAX_SAMPLING_PERIOD); + if (err2) { + err |= err2; + arg = (steps * 1000) / 30; + err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg); + } if (cmd->stop_src == TRIG_COUNT) err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1); diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c index 6234b649d887ccb3abac4c73dcb38aa095768600..ff52299c1fb0a666cef784fc58790615e0380740 100644 --- a/drivers/staging/comedi/drivers/vmk80xx.c +++ b/drivers/staging/comedi/drivers/vmk80xx.c @@ -90,6 +90,8 @@ enum { #define IC3_VERSION BIT(0) #define IC6_VERSION BIT(1) +#define MIN_BUF_SIZE 64 + enum vmk80xx_model { VMK8055_MODEL, VMK8061_MODEL @@ -157,22 +159,20 @@ static void vmk80xx_do_bulk_msg(struct comedi_device *dev) __u8 rx_addr; unsigned int tx_pipe; unsigned int rx_pipe; - size_t size; + size_t tx_size; + size_t rx_size; tx_addr = devpriv->ep_tx->bEndpointAddress; rx_addr = devpriv->ep_rx->bEndpointAddress; tx_pipe = usb_sndbulkpipe(usb, tx_addr); rx_pipe = usb_rcvbulkpipe(usb, rx_addr); - - /* - * The max packet size attributes of the K8061 - * input/output endpoints are identical - */ - size = usb_endpoint_maxp(devpriv->ep_tx); + tx_size = usb_endpoint_maxp(devpriv->ep_tx); + rx_size = usb_endpoint_maxp(devpriv->ep_rx); usb_bulk_msg(usb, tx_pipe, devpriv->usb_tx_buf, - size, NULL, devpriv->ep_tx->bInterval); - usb_bulk_msg(usb, rx_pipe, devpriv->usb_rx_buf, size, NULL, HZ * 10); + tx_size, NULL, devpriv->ep_tx->bInterval); + + usb_bulk_msg(usb, rx_pipe, devpriv->usb_rx_buf, rx_size, NULL, HZ * 10); } static int vmk80xx_read_packet(struct comedi_device *dev) @@ -640,33 +640,22 @@ static int vmk80xx_find_usb_endpoints(struct comedi_device *dev) struct vmk80xx_private *devpriv = dev->private; struct usb_interface *intf = comedi_to_usb_interface(dev); struct usb_host_interface *iface_desc = intf->cur_altsetting; - struct usb_endpoint_descriptor *ep_desc; - int i; - - if (iface_desc->desc.bNumEndpoints != 2) - return -ENODEV; - - for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) { - ep_desc = &iface_desc->endpoint[i].desc; - - if (usb_endpoint_is_int_in(ep_desc) || - usb_endpoint_is_bulk_in(ep_desc)) { - if (!devpriv->ep_rx) - devpriv->ep_rx = ep_desc; - continue; - } + struct usb_endpoint_descriptor *ep_rx_desc, *ep_tx_desc; + int ret; - if (usb_endpoint_is_int_out(ep_desc) || - usb_endpoint_is_bulk_out(ep_desc)) { - if (!devpriv->ep_tx) - devpriv->ep_tx = ep_desc; - continue; - } - } + if (devpriv->model == VMK8061_MODEL) + ret = usb_find_common_endpoints(iface_desc, &ep_rx_desc, + &ep_tx_desc, NULL, NULL); + else + ret = usb_find_common_endpoints(iface_desc, NULL, NULL, + &ep_rx_desc, &ep_tx_desc); - if (!devpriv->ep_rx || !devpriv->ep_tx) + if (ret) return -ENODEV; + devpriv->ep_rx = ep_rx_desc; + devpriv->ep_tx = ep_tx_desc; + return 0; } @@ -675,17 +664,15 @@ static int vmk80xx_alloc_usb_buffers(struct comedi_device *dev) struct vmk80xx_private *devpriv = dev->private; size_t size; - size = usb_endpoint_maxp(devpriv->ep_rx); + size = max(usb_endpoint_maxp(devpriv->ep_rx), MIN_BUF_SIZE); devpriv->usb_rx_buf = kzalloc(size, GFP_KERNEL); if (!devpriv->usb_rx_buf) return -ENOMEM; - size = usb_endpoint_maxp(devpriv->ep_tx); + size = max(usb_endpoint_maxp(devpriv->ep_tx), MIN_BUF_SIZE); devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL); - if (!devpriv->usb_tx_buf) { - kfree(devpriv->usb_rx_buf); + if (!devpriv->usb_tx_buf) return -ENOMEM; - } return 0; } @@ -800,6 +787,8 @@ static int vmk80xx_auto_attach(struct comedi_device *dev, devpriv->model = board->model; + sema_init(&devpriv->limit_sem, 8); + ret = vmk80xx_find_usb_endpoints(dev); if (ret) return ret; @@ -808,8 +797,6 @@ static int vmk80xx_auto_attach(struct comedi_device *dev, if (ret) return ret; - sema_init(&devpriv->limit_sem, 8); - usb_set_intfdata(intf, devpriv); if (devpriv->model == VMK8055_MODEL) diff --git a/drivers/staging/erofs/data.c b/drivers/staging/erofs/data.c index ac263a180253e5aaf5375a8da9b65f0ad8b06c40..894e60ecebe202c9e282abbdeaef7d23fd47a829 100644 --- a/drivers/staging/erofs/data.c +++ b/drivers/staging/erofs/data.c @@ -25,7 +25,7 @@ static inline void read_endio(struct bio *bio) struct page *page = bvec->bv_page; /* page is already locked */ - BUG_ON(PageUptodate(page)); + DBG_BUGON(PageUptodate(page)); if (unlikely(err)) SetPageError(page); @@ -91,12 +91,12 @@ static int erofs_map_blocks_flatmode(struct inode *inode, struct erofs_map_blocks *map, int flags) { + int err = 0; erofs_blk_t nblocks, lastblk; u64 offset = map->m_la; struct erofs_vnode *vi = EROFS_V(inode); trace_erofs_map_blocks_flatmode_enter(inode, map, flags); - BUG_ON(is_inode_layout_compression(inode)); nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE); lastblk = nblocks - is_inode_layout_inline(inode); @@ -123,18 +123,27 @@ static int erofs_map_blocks_flatmode(struct inode *inode, map->m_plen = inode->i_size - offset; /* inline data should locate in one meta block */ - BUG_ON(erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE); + if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) { + DBG_BUGON(1); + err = -EIO; + goto err_out; + } + map->m_flags |= EROFS_MAP_META; } else { errln("internal error @ nid: %llu (size %llu), m_la 0x%llx", vi->nid, inode->i_size, map->m_la); - BUG(); + DBG_BUGON(1); + err = -EIO; + goto err_out; } out: map->m_llen = map->m_plen; + +err_out: trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0); - return 0; + return err; } #ifdef CONFIG_EROFS_FS_ZIP @@ -190,7 +199,7 @@ static inline struct bio *erofs_read_raw_page( erofs_off_t current_block = (erofs_off_t)page->index; int err; - BUG_ON(!nblocks); + DBG_BUGON(!nblocks); if (PageUptodate(page)) { err = 0; @@ -233,7 +242,7 @@ static inline struct bio *erofs_read_raw_page( } /* for RAW access mode, m_plen must be equal to m_llen */ - BUG_ON(map.m_plen != map.m_llen); + DBG_BUGON(map.m_plen != map.m_llen); blknr = erofs_blknr(map.m_pa); blkoff = erofs_blkoff(map.m_pa); @@ -243,7 +252,7 @@ static inline struct bio *erofs_read_raw_page( void *vsrc, *vto; struct page *ipage; - BUG_ON(map.m_plen > PAGE_SIZE); + DBG_BUGON(map.m_plen > PAGE_SIZE); ipage = erofs_get_meta_page(inode->i_sb, blknr, 0); @@ -270,7 +279,7 @@ static inline struct bio *erofs_read_raw_page( } /* pa must be block-aligned for raw reading */ - BUG_ON(erofs_blkoff(map.m_pa) != 0); + DBG_BUGON(erofs_blkoff(map.m_pa)); /* max # of continuous pages */ if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE)) @@ -331,7 +340,7 @@ static int erofs_raw_access_readpage(struct file *file, struct page *page) if (IS_ERR(bio)) return PTR_ERR(bio); - BUG_ON(bio != NULL); /* since we have only one bio -- must be NULL */ + DBG_BUGON(bio); /* since we have only one bio -- must be NULL */ return 0; } @@ -369,7 +378,7 @@ static int erofs_raw_access_readpages(struct file *filp, /* pages could still be locked */ put_page(page); } - BUG_ON(!list_empty(pages)); + DBG_BUGON(!list_empty(pages)); /* the rare case (end in gaps) */ if (unlikely(bio != NULL)) diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c index be6ae3b1bdbe1c8394dae3361d46ec48a4394f19..fe6683effd0570054a5d16813fef9ce813a90a3b 100644 --- a/drivers/staging/erofs/dir.c +++ b/drivers/staging/erofs/dir.c @@ -23,6 +23,21 @@ static const unsigned char erofs_filetype_table[EROFS_FT_MAX] = { [EROFS_FT_SYMLINK] = DT_LNK, }; +static void debug_one_dentry(unsigned char d_type, const char *de_name, + unsigned int de_namelen) +{ +#ifdef CONFIG_EROFS_FS_DEBUG + /* since the on-disk name could not have the trailing '\0' */ + unsigned char dbg_namebuf[EROFS_NAME_LEN + 1]; + + memcpy(dbg_namebuf, de_name, de_namelen); + dbg_namebuf[de_namelen] = '\0'; + + debugln("found dirent %s de_len %u d_type %d", dbg_namebuf, + de_namelen, d_type); +#endif +} + static int erofs_fill_dentries(struct dir_context *ctx, void *dentry_blk, unsigned *ofs, unsigned nameoff, unsigned maxsize) @@ -33,14 +48,10 @@ static int erofs_fill_dentries(struct dir_context *ctx, de = dentry_blk + *ofs; while (de < end) { const char *de_name; - int de_namelen; + unsigned int de_namelen; unsigned char d_type; -#ifdef CONFIG_EROFS_FS_DEBUG - unsigned dbg_namelen; - unsigned char dbg_namebuf[EROFS_NAME_LEN]; -#endif - if (unlikely(de->file_type < EROFS_FT_MAX)) + if (de->file_type < EROFS_FT_MAX) d_type = erofs_filetype_table[de->file_type]; else d_type = DT_UNKNOWN; @@ -48,23 +59,20 @@ static int erofs_fill_dentries(struct dir_context *ctx, nameoff = le16_to_cpu(de->nameoff); de_name = (char *)dentry_blk + nameoff; - de_namelen = unlikely(de + 1 >= end) ? - /* last directory entry */ - strnlen(de_name, maxsize - nameoff) : - le16_to_cpu(de[1].nameoff) - nameoff; - - /* the corrupted directory found */ - BUG_ON(de_namelen < 0); - -#ifdef CONFIG_EROFS_FS_DEBUG - dbg_namelen = min(EROFS_NAME_LEN - 1, de_namelen); - memcpy(dbg_namebuf, de_name, dbg_namelen); - dbg_namebuf[dbg_namelen] = '\0'; + /* the last dirent in the block? */ + if (de + 1 >= end) + de_namelen = strnlen(de_name, maxsize - nameoff); + else + de_namelen = le16_to_cpu(de[1].nameoff) - nameoff; - debugln("%s, found de_name %s de_len %d d_type %d", __func__, - dbg_namebuf, de_namelen, d_type); -#endif + /* a corrupted entry is found */ + if (unlikely(nameoff + de_namelen > maxsize || + de_namelen > EROFS_NAME_LEN)) { + DBG_BUGON(1); + return -EIO; + } + debug_one_dentry(d_type, de_name, de_namelen); if (!dir_emit(ctx, de_name, de_namelen, le64_to_cpu(de->nid), d_type)) /* stoped by some reason */ @@ -92,8 +100,15 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx) unsigned nameoff, maxsize; dentry_page = read_mapping_page(mapping, i, NULL); - if (IS_ERR(dentry_page)) - continue; + if (dentry_page == ERR_PTR(-ENOMEM)) { + err = -ENOMEM; + break; + } else if (IS_ERR(dentry_page)) { + errln("fail to readdir of logical block %u of nid %llu", + i, EROFS_V(dir)->nid); + err = PTR_ERR(dentry_page); + break; + } lock_page(dentry_page); de = (struct erofs_dirent *)kmap(dentry_page); diff --git a/drivers/staging/erofs/erofs_fs.h b/drivers/staging/erofs/erofs_fs.h index 2f8e2bf70941dcf32badc5fb54bced880116108b..7677da889f1255988117bae54bc10fdaedec64c4 100644 --- a/drivers/staging/erofs/erofs_fs.h +++ b/drivers/staging/erofs/erofs_fs.h @@ -17,10 +17,16 @@ #define EROFS_SUPER_MAGIC_V1 0xE0F5E1E2 #define EROFS_SUPER_OFFSET 1024 +/* + * Any bits that aren't in EROFS_ALL_REQUIREMENTS should be + * incompatible with this kernel version. + */ +#define EROFS_ALL_REQUIREMENTS 0 + struct erofs_super_block { /* 0 */__le32 magic; /* in the little endian */ /* 4 */__le32 checksum; /* crc32c(super_block) */ -/* 8 */__le32 features; +/* 8 */__le32 features; /* (aka. feature_compat) */ /* 12 */__u8 blkszbits; /* support block_size == PAGE_SIZE only */ /* 13 */__u8 reserved; @@ -34,9 +40,10 @@ struct erofs_super_block { /* 44 */__le32 xattr_blkaddr; /* 48 */__u8 uuid[16]; /* 128-bit uuid for volume */ /* 64 */__u8 volume_name[16]; /* volume name */ +/* 80 */__le32 requirements; /* (aka. feature_incompat) */ -/* 80 */__u8 reserved2[48]; /* 128 bytes */ -} __packed; +/* 84 */__u8 reserved2[44]; +} __packed; /* 128 bytes */ #define __EROFS_BIT(_prefix, _cur, _pre) enum { \ _prefix ## _cur ## _BIT = _prefix ## _pre ## _BIT + \ diff --git a/drivers/staging/erofs/inode.c b/drivers/staging/erofs/inode.c index fbf6ff25cd1bde7517bcde93042c0b7ec21e09fc..7448744cc51590ec183e7a78b2e4ab378252b373 100644 --- a/drivers/staging/erofs/inode.c +++ b/drivers/staging/erofs/inode.c @@ -132,7 +132,13 @@ static int fill_inline_data(struct inode *inode, void *data, unsigned m_pofs) return -ENOMEM; m_pofs += vi->inode_isize + vi->xattr_isize; - BUG_ON(m_pofs + inode->i_size > PAGE_SIZE); + + /* inline symlink data shouldn't across page boundary as well */ + if (unlikely(m_pofs + inode->i_size > PAGE_SIZE)) { + DBG_BUGON(1); + kfree(lnk); + return -EIO; + } /* get in-page inline data */ memcpy(lnk, data + m_pofs, inode->i_size); @@ -170,7 +176,7 @@ static int fill_inode(struct inode *inode, int isdir) return PTR_ERR(page); } - BUG_ON(!PageUptodate(page)); + DBG_BUGON(!PageUptodate(page)); data = page_address(page); err = read_inode(inode, data + ofs); @@ -178,16 +184,16 @@ static int fill_inode(struct inode *inode, int isdir) /* setup the new inode */ if (S_ISREG(inode->i_mode)) { #ifdef CONFIG_EROFS_FS_XATTR - if (vi->xattr_isize) - inode->i_op = &erofs_generic_xattr_iops; + inode->i_op = &erofs_generic_xattr_iops; #endif inode->i_fop = &generic_ro_fops; } else if (S_ISDIR(inode->i_mode)) { inode->i_op = #ifdef CONFIG_EROFS_FS_XATTR - vi->xattr_isize ? &erofs_dir_xattr_iops : -#endif + &erofs_dir_xattr_iops; +#else &erofs_dir_iops; +#endif inode->i_fop = &erofs_dir_fops; } else if (S_ISLNK(inode->i_mode)) { /* by default, page_get_link is used for symlink */ diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h index 367b39fe46e523ef8da89e083e147cf6f31aa63b..541326c457f08f01b3da570b2711709cc0fd96f7 100644 --- a/drivers/staging/erofs/internal.h +++ b/drivers/staging/erofs/internal.h @@ -39,7 +39,7 @@ #define debugln(x, ...) ((void)0) #define dbg_might_sleep() ((void)0) -#define DBG_BUGON(...) ((void)0) +#define DBG_BUGON(x) ((void)(x)) #endif #ifdef CONFIG_EROFS_FAULT_INJECTION @@ -111,6 +111,8 @@ struct erofs_sb_info { u8 uuid[16]; /* 128-bit uuid for volume */ u8 volume_name[16]; /* volume name */ + u32 requirements; + char *dev_name; unsigned int mount_opt; @@ -184,50 +186,41 @@ struct erofs_workgroup { #define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL) -static inline bool erofs_workgroup_try_to_freeze( - struct erofs_workgroup *grp, int v) +static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp, + int val) { -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) - if (v != atomic_cmpxchg(&grp->refcount, - v, EROFS_LOCKED_MAGIC)) - return false; - preempt_disable(); -#else preempt_disable(); - if (atomic_read(&grp->refcount) != v) { + if (val != atomic_cmpxchg(&grp->refcount, val, EROFS_LOCKED_MAGIC)) { preempt_enable(); return false; } -#endif return true; } -static inline void erofs_workgroup_unfreeze( - struct erofs_workgroup *grp, int v) +static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp, + int orig_val) { -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) - atomic_set(&grp->refcount, v); -#endif + /* + * other observers should notice all modifications + * in the freezing period. + */ + smp_mb(); + atomic_set(&grp->refcount, orig_val); preempt_enable(); } +static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp) +{ + return atomic_cond_read_relaxed(&grp->refcount, + VAL != EROFS_LOCKED_MAGIC); +} + static inline bool erofs_workgroup_get(struct erofs_workgroup *grp, int *ocnt) { - const int locked = (int)EROFS_LOCKED_MAGIC; int o; repeat: - o = atomic_read(&grp->refcount); - - /* spin if it is temporarily locked at the reclaim path */ - if (unlikely(o == locked)) { -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) - do - cpu_relax(); - while (atomic_read(&grp->refcount) == locked); -#endif - goto repeat; - } + o = erofs_wait_on_workgroup_freezed(grp); if (unlikely(o <= 0)) return -1; @@ -240,6 +233,7 @@ static inline bool erofs_workgroup_get(struct erofs_workgroup *grp, int *ocnt) } #define __erofs_workgroup_get(grp) atomic_inc(&(grp)->refcount) +#define __erofs_workgroup_put(grp) atomic_dec(&(grp)->refcount) extern int erofs_workgroup_put(struct erofs_workgroup *grp); @@ -307,12 +301,17 @@ static inline erofs_off_t iloc(struct erofs_sb_info *sbi, erofs_nid_t nid) return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits); } -#define inode_set_inited_xattr(inode) (EROFS_V(inode)->flags |= 1) -#define inode_has_inited_xattr(inode) (EROFS_V(inode)->flags & 1) +/* atomic flag definitions */ +#define EROFS_V_EA_INITED_BIT 0 + +/* bitlock definitions (arranged in reverse order) */ +#define EROFS_V_BL_XATTR_BIT (BITS_PER_LONG - 1) struct erofs_vnode { erofs_nid_t nid; - unsigned int flags; + + /* atomic flags (including bitlocks) */ + unsigned long flags; unsigned char data_mapping_mode; /* inline size in bytes */ @@ -465,8 +464,9 @@ struct erofs_map_blocks_iter { }; -static inline struct page *erofs_get_inline_page(struct inode *inode, - erofs_blk_t blkaddr) +static inline struct page * +erofs_get_inline_page(struct inode *inode, + erofs_blk_t blkaddr) { return erofs_get_meta_page(inode->i_sb, blkaddr, S_ISDIR(inode->i_mode)); diff --git a/drivers/staging/erofs/namei.c b/drivers/staging/erofs/namei.c index 546a47156101ab73784072c2826be383549c5c23..023f64fa2c875ee3360f6fe185845e025915dbe5 100644 --- a/drivers/staging/erofs/namei.c +++ b/drivers/staging/erofs/namei.c @@ -15,74 +15,77 @@ #include -/* based on the value of qn->len is accurate */ -static inline int dirnamecmp(struct qstr *qn, - struct qstr *qd, unsigned *matched) +struct erofs_qstr { + const unsigned char *name; + const unsigned char *end; +}; + +/* based on the end of qn is accurate and it must have the trailing '\0' */ +static inline int dirnamecmp(const struct erofs_qstr *qn, + const struct erofs_qstr *qd, + unsigned int *matched) { - unsigned i = *matched, len = min(qn->len, qd->len); -loop: - if (unlikely(i >= len)) { - *matched = i; - if (qn->len < qd->len) { - /* - * actually (qn->len == qd->len) - * when qd->name[i] == '\0' - */ - return qd->name[i] == '\0' ? 0 : -1; + unsigned int i = *matched; + + /* + * on-disk error, let's only BUG_ON in the debugging mode. + * otherwise, it will return 1 to just skip the invalid name + * and go on (in consideration of the lookup performance). + */ + DBG_BUGON(qd->name > qd->end); + + /* qd could not have trailing '\0' */ + /* However it is absolutely safe if < qd->end */ + while (qd->name + i < qd->end && qd->name[i] != '\0') { + if (qn->name[i] != qd->name[i]) { + *matched = i; + return qn->name[i] > qd->name[i] ? 1 : -1; } - return (qn->len > qd->len); + ++i; } - - if (qn->name[i] != qd->name[i]) { - *matched = i; - return qn->name[i] > qd->name[i] ? 1 : -1; - } - - ++i; - goto loop; + *matched = i; + /* See comments in __d_alloc on the terminating NUL character */ + return qn->name[i] == '\0' ? 0 : 1; } -static struct erofs_dirent *find_target_dirent( - struct qstr *name, - u8 *data, int maxsize) +#define nameoff_from_disk(off, sz) (le16_to_cpu(off) & ((sz) - 1)) + +static struct erofs_dirent *find_target_dirent(struct erofs_qstr *name, + u8 *data, + unsigned int dirblksize, + const int ndirents) { - unsigned ndirents, head, back; - unsigned startprfx, endprfx; + int head, back; + unsigned int startprfx, endprfx; struct erofs_dirent *const de = (struct erofs_dirent *)data; - /* make sure that maxsize is valid */ - BUG_ON(maxsize < sizeof(struct erofs_dirent)); - - ndirents = le16_to_cpu(de->nameoff) / sizeof(*de); - - /* corrupted dir (may be unnecessary...) */ - BUG_ON(!ndirents); - - head = 0; + /* since the 1st dirent has been evaluated previously */ + head = 1; back = ndirents - 1; startprfx = endprfx = 0; while (head <= back) { - unsigned mid = head + (back - head) / 2; - unsigned nameoff = le16_to_cpu(de[mid].nameoff); - unsigned matched = min(startprfx, endprfx); - - struct qstr dname = QSTR_INIT(data + nameoff, - unlikely(mid >= ndirents - 1) ? - maxsize - nameoff : - le16_to_cpu(de[mid + 1].nameoff) - nameoff); + const int mid = head + (back - head) / 2; + const int nameoff = nameoff_from_disk(de[mid].nameoff, + dirblksize); + unsigned int matched = min(startprfx, endprfx); + struct erofs_qstr dname = { + .name = data + nameoff, + .end = unlikely(mid >= ndirents - 1) ? + data + dirblksize : + data + nameoff_from_disk(de[mid + 1].nameoff, + dirblksize) + }; /* string comparison without already matched prefix */ int ret = dirnamecmp(name, &dname, &matched); - if (unlikely(!ret)) + if (unlikely(!ret)) { return de + mid; - else if (ret > 0) { + } else if (ret > 0) { head = mid + 1; startprfx = matched; - } else if (unlikely(mid < 1)) /* fix "mid" overflow */ - break; - else { + } else { back = mid - 1; endprfx = matched; } @@ -91,12 +94,12 @@ static struct erofs_dirent *find_target_dirent( return ERR_PTR(-ENOENT); } -static struct page *find_target_block_classic( - struct inode *dir, - struct qstr *name, int *_diff) +static struct page *find_target_block_classic(struct inode *dir, + struct erofs_qstr *name, + int *_ndirents) { - unsigned startprfx, endprfx; - unsigned head, back; + unsigned int startprfx, endprfx; + int head, back; struct address_space *const mapping = dir->i_mapping; struct page *candidate = ERR_PTR(-ENOENT); @@ -105,41 +108,43 @@ static struct page *find_target_block_classic( back = inode_datablocks(dir) - 1; while (head <= back) { - unsigned mid = head + (back - head) / 2; + const int mid = head + (back - head) / 2; struct page *page = read_mapping_page(mapping, mid, NULL); - if (IS_ERR(page)) { -exact_out: - if (!IS_ERR(candidate)) /* valid candidate */ - put_page(candidate); - return page; - } else { - int diff; - unsigned ndirents, matched; - struct qstr dname; + if (!IS_ERR(page)) { struct erofs_dirent *de = kmap_atomic(page); - unsigned nameoff = le16_to_cpu(de->nameoff); - - ndirents = nameoff / sizeof(*de); + const int nameoff = nameoff_from_disk(de->nameoff, + EROFS_BLKSIZ); + const int ndirents = nameoff / sizeof(*de); + int diff; + unsigned int matched; + struct erofs_qstr dname; - /* corrupted dir (should have one entry at least) */ - BUG_ON(!ndirents || nameoff > PAGE_SIZE); + if (unlikely(!ndirents)) { + DBG_BUGON(1); + kunmap_atomic(de); + put_page(page); + page = ERR_PTR(-EIO); + goto out; + } matched = min(startprfx, endprfx); dname.name = (u8 *)de + nameoff; - dname.len = ndirents == 1 ? - /* since the rest of the last page is 0 */ - EROFS_BLKSIZ - nameoff - : le16_to_cpu(de[1].nameoff) - nameoff; + if (ndirents == 1) + dname.end = (u8 *)de + EROFS_BLKSIZ; + else + dname.end = (u8 *)de + + nameoff_from_disk(de[1].nameoff, + EROFS_BLKSIZ); /* string comparison without already matched prefix */ diff = dirnamecmp(name, &dname, &matched); kunmap_atomic(de); if (unlikely(!diff)) { - *_diff = 0; - goto exact_out; + *_ndirents = 0; + goto out; } else if (diff > 0) { head = mid + 1; startprfx = matched; @@ -147,45 +152,51 @@ static struct page *find_target_block_classic( if (likely(!IS_ERR(candidate))) put_page(candidate); candidate = page; + *_ndirents = ndirents; } else { put_page(page); - if (unlikely(mid < 1)) /* fix "mid" overflow */ - break; - back = mid - 1; endprfx = matched; } + continue; } +out: /* free if the candidate is valid */ + if (!IS_ERR(candidate)) + put_page(candidate); + return page; } - *_diff = 1; return candidate; } int erofs_namei(struct inode *dir, - struct qstr *name, - erofs_nid_t *nid, unsigned *d_type) + struct qstr *name, + erofs_nid_t *nid, unsigned int *d_type) { - int diff; + int ndirents; struct page *page; - u8 *data; + void *data; struct erofs_dirent *de; + struct erofs_qstr qn; if (unlikely(!dir->i_size)) return -ENOENT; - diff = 1; - page = find_target_block_classic(dir, name, &diff); + qn.name = name->name; + qn.end = name->name + name->len; + + ndirents = 0; + page = find_target_block_classic(dir, &qn, &ndirents); if (unlikely(IS_ERR(page))) return PTR_ERR(page); data = kmap_atomic(page); /* the target page has been mapped */ - de = likely(diff) ? - /* since the rest of the last page is 0 */ - find_target_dirent(name, data, EROFS_BLKSIZ) : - (struct erofs_dirent *)data; + if (ndirents) + de = find_target_dirent(&qn, data, EROFS_BLKSIZ, ndirents); + else + de = (struct erofs_dirent *)data; if (likely(!IS_ERR(de))) { *nid = le64_to_cpu(de->nid); diff --git a/drivers/staging/erofs/super.c b/drivers/staging/erofs/super.c index 2df9768edac96d1dc199071698558c6fb1e5da5d..b49ebdf6ebdaf0d79663dc8af351f9503b271e38 100644 --- a/drivers/staging/erofs/super.c +++ b/drivers/staging/erofs/super.c @@ -40,7 +40,6 @@ static int erofs_init_inode_cache(void) static void erofs_exit_inode_cache(void) { - BUG_ON(erofs_inode_cachep == NULL); kmem_cache_destroy(erofs_inode_cachep); } @@ -76,6 +75,22 @@ static void destroy_inode(struct inode *inode) call_rcu(&inode->i_rcu, i_callback); } +static bool check_layout_compatibility(struct super_block *sb, + struct erofs_super_block *layout) +{ + const unsigned int requirements = le32_to_cpu(layout->requirements); + + EROFS_SB(sb)->requirements = requirements; + + /* check if current kernel meets all mandatory requirements */ + if (requirements & (~EROFS_ALL_REQUIREMENTS)) { + errln("unidentified requirements %x, please upgrade kernel version", + requirements & ~EROFS_ALL_REQUIREMENTS); + return false; + } + return true; +} + static int superblock_read(struct super_block *sb) { struct erofs_sb_info *sbi; @@ -109,6 +124,9 @@ static int superblock_read(struct super_block *sb) goto out; } + if (!check_layout_compatibility(sb, layout)) + goto out; + sbi->blocks = le32_to_cpu(layout->blocks); sbi->meta_blkaddr = le32_to_cpu(layout->meta_blkaddr); #ifdef CONFIG_EROFS_FS_XATTR @@ -265,8 +283,8 @@ static int managed_cache_releasepage(struct page *page, gfp_t gfp_mask) int ret = 1; /* 0 - busy */ struct address_space *const mapping = page->mapping; - BUG_ON(!PageLocked(page)); - BUG_ON(mapping->a_ops != &managed_cache_aops); + DBG_BUGON(!PageLocked(page)); + DBG_BUGON(mapping->a_ops != &managed_cache_aops); if (PagePrivate(page)) ret = erofs_try_to_free_cached_page(mapping, page); @@ -279,10 +297,10 @@ static void managed_cache_invalidatepage(struct page *page, { const unsigned int stop = length + offset; - BUG_ON(!PageLocked(page)); + DBG_BUGON(!PageLocked(page)); - /* Check for overflow */ - BUG_ON(stop > PAGE_SIZE || stop < length); + /* Check for potential overflow in debug mode */ + DBG_BUGON(stop > PAGE_SIZE || stop < length); if (offset == 0 && stop == PAGE_SIZE) while (!managed_cache_releasepage(page, GFP_NOFS)) @@ -404,12 +422,6 @@ static int erofs_read_super(struct super_block *sb, erofs_register_super(sb); - /* - * We already have a positive dentry, which was instantiated - * by d_make_root. Just need to d_rehash it. - */ - d_rehash(sb->s_root); - if (!silent) infoln("mounted on %s with opts: %s.", dev_name, (char *)data); @@ -625,7 +637,7 @@ static int erofs_show_options(struct seq_file *seq, struct dentry *root) static int erofs_remount(struct super_block *sb, int *flags, char *data) { - BUG_ON(!sb_rdonly(sb)); + DBG_BUGON(!sb_rdonly(sb)); *flags |= SB_RDONLY; return 0; diff --git a/drivers/staging/erofs/unzip_pagevec.h b/drivers/staging/erofs/unzip_pagevec.h index 0956615b86f72778c05c48a665fba0900b784744..23856ba2742d882dd76ef6864ba09f6ad4d449f4 100644 --- a/drivers/staging/erofs/unzip_pagevec.h +++ b/drivers/staging/erofs/unzip_pagevec.h @@ -150,7 +150,7 @@ z_erofs_pagevec_ctor_dequeue(struct z_erofs_pagevec_ctor *ctor, erofs_vtptr_t t; if (unlikely(ctor->index >= ctor->nr)) { - BUG_ON(ctor->next == NULL); + DBG_BUGON(!ctor->next); z_erofs_pagevec_ctor_pagedown(ctor, true); } diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c index 8721f0a41d157b7ac799eed40bdfea06aa30decd..0f1558c6747efdd9782226b4631c3fa0507b3230 100644 --- a/drivers/staging/erofs/unzip_vle.c +++ b/drivers/staging/erofs/unzip_vle.c @@ -18,9 +18,6 @@ static struct kmem_cache *z_erofs_workgroup_cachep __read_mostly; void z_erofs_exit_zip_subsystem(void) { - BUG_ON(z_erofs_workqueue == NULL); - BUG_ON(z_erofs_workgroup_cachep == NULL); - destroy_workqueue(z_erofs_workqueue); kmem_cache_destroy(z_erofs_workgroup_cachep); } @@ -60,15 +57,30 @@ enum z_erofs_vle_work_role { Z_EROFS_VLE_WORK_SECONDARY, Z_EROFS_VLE_WORK_PRIMARY, /* - * The current work has at least been linked with the following - * processed chained works, which means if the processing page - * is the tail partial page of the work, the current work can - * safely use the whole page, as illustrated below: - * +--------------+-------------------------------------------+ - * | tail page | head page (of the previous work) | - * +--------------+-------------------------------------------+ - * /\ which belongs to the current work - * [ (*) this page can be used for the current work itself. ] + * The current work was the tail of an exist chain, and the previous + * processed chained works are all decided to be hooked up to it. + * A new chain should be created for the remaining unprocessed works, + * therefore different from Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED, + * the next work cannot reuse the whole page in the following scenario: + * ________________________________________________________________ + * | tail (partial) page | head (partial) page | + * | (belongs to the next work) | (belongs to the current work) | + * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________| + */ + Z_EROFS_VLE_WORK_PRIMARY_HOOKED, + /* + * The current work has been linked with the processed chained works, + * and could be also linked with the potential remaining works, which + * means if the processing page is the tail partial page of the work, + * the current work can safely use the whole page (since the next work + * is under control) for in-place decompression, as illustrated below: + * ________________________________________________________________ + * | tail (partial) page | head (partial) page | + * | (of the current work) | (of the previous work) | + * | PRIMARY_FOLLOWED or | | + * |_____PRIMARY_HOOKED____|____________PRIMARY_FOLLOWED____________| + * + * [ (*) the above page can be used for the current work itself. ] */ Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED, Z_EROFS_VLE_WORK_MAX @@ -237,10 +249,10 @@ static int z_erofs_vle_work_add_page( return ret ? 0 : -EAGAIN; } -static inline bool try_to_claim_workgroup( - struct z_erofs_vle_workgroup *grp, - z_erofs_vle_owned_workgrp_t *owned_head, - bool *hosted) +static enum z_erofs_vle_work_role +try_to_claim_workgroup(struct z_erofs_vle_workgroup *grp, + z_erofs_vle_owned_workgrp_t *owned_head, + bool *hosted) { DBG_BUGON(*hosted == true); @@ -254,6 +266,9 @@ static inline bool try_to_claim_workgroup( *owned_head = grp; *hosted = true; + /* lucky, I am the followee :) */ + return Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED; + } else if (grp->next == Z_EROFS_VLE_WORKGRP_TAIL) { /* * type 2, link to the end of a existing open chain, @@ -263,12 +278,11 @@ static inline bool try_to_claim_workgroup( if (Z_EROFS_VLE_WORKGRP_TAIL != cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_TAIL, *owned_head)) goto retry; - *owned_head = Z_EROFS_VLE_WORKGRP_TAIL; - } else - return false; /* :( better luck next time */ + return Z_EROFS_VLE_WORK_PRIMARY_HOOKED; + } - return true; /* lucky, I am the followee :) */ + return Z_EROFS_VLE_WORK_PRIMARY; /* :( better luck next time */ } static struct z_erofs_vle_work * @@ -293,14 +307,15 @@ z_erofs_vle_work_lookup(struct super_block *sb, *grp_ret = grp = container_of(egrp, struct z_erofs_vle_workgroup, obj); -#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF work = z_erofs_vle_grab_work(grp, pageofs); + /* if multiref is disabled, `primary' is always true */ primary = true; -#else - BUG(); -#endif - DBG_BUGON(work->pageofs != pageofs); + if (work->pageofs != pageofs) { + DBG_BUGON(1); + erofs_workgroup_put(egrp); + return ERR_PTR(-EIO); + } /* * lock must be taken first to avoid grp->next == NIL between @@ -343,12 +358,8 @@ z_erofs_vle_work_lookup(struct super_block *sb, *hosted = false; if (!primary) *role = Z_EROFS_VLE_WORK_SECONDARY; - /* claim the workgroup if possible */ - else if (try_to_claim_workgroup(grp, owned_head, hosted)) - *role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED; - else - *role = Z_EROFS_VLE_WORK_PRIMARY; - + else /* claim the workgroup if possible */ + *role = try_to_claim_workgroup(grp, owned_head, hosted); return work; } @@ -365,12 +376,12 @@ z_erofs_vle_work_register(struct super_block *sb, struct z_erofs_vle_workgroup *grp = *grp_ret; struct z_erofs_vle_work *work; -#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF - BUG_ON(grp != NULL); -#else - if (grp != NULL) - goto skip; -#endif + /* if multiref is disabled, grp should never be nullptr */ + if (unlikely(grp)) { + DBG_BUGON(1); + return ERR_PTR(-EINVAL); + } + /* no available workgroup, let's allocate one */ grp = kmem_cache_zalloc(z_erofs_workgroup_cachep, GFP_NOFS); if (unlikely(grp == NULL)) @@ -393,13 +404,7 @@ z_erofs_vle_work_register(struct super_block *sb, *hosted = true; newgrp = true; -#ifdef CONFIG_EROFS_FS_ZIP_MULTIREF -skip: - /* currently unimplemented */ - BUG(); -#else work = z_erofs_vle_grab_primary_work(grp); -#endif work->pageofs = pageofs; mutex_init(&work->lock); @@ -431,6 +436,9 @@ static inline void __update_workgrp_llen(struct z_erofs_vle_workgroup *grp, } } +#define builder_is_hooked(builder) \ + ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_HOOKED) + #define builder_is_followed(builder) \ ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED) @@ -595,7 +603,7 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe, struct z_erofs_vle_work_builder *const builder = &fe->builder; const loff_t offset = page_offset(page); - bool tight = builder_is_followed(builder); + bool tight = builder_is_hooked(builder); struct z_erofs_vle_work *work = builder->work; #ifdef EROFS_FS_HAS_MANAGED_CACHE @@ -606,7 +614,7 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe, enum z_erofs_page_type page_type; unsigned cur, end, spiltted, index; - int err; + int err = 0; /* register locked file pages as online pages in pack */ z_erofs_onlinepage_init(page); @@ -618,13 +626,17 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe, /* lucky, within the range of the current map_blocks */ if (offset + cur >= map->m_la && - offset + cur < map->m_la + map->m_llen) + offset + cur < map->m_la + map->m_llen) { + /* didn't get a valid unzip work previously (very rare) */ + if (!builder->work) + goto restart_now; goto hitted; + } /* go ahead the next map_blocks */ debugln("%s: [out-of-range] pos %llu", __func__, offset + cur); - if (!z_erofs_vle_work_iter_end(builder)) + if (z_erofs_vle_work_iter_end(builder)) fe->initial = false; map->m_la = offset + cur; @@ -633,12 +645,12 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe, if (unlikely(err)) goto err_out; - /* deal with hole (FIXME! broken now) */ +restart_now: if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) goto hitted; DBG_BUGON(map->m_plen != 1 << sbi->clusterbits); - BUG_ON(erofs_blkoff(map->m_pa)); + DBG_BUGON(erofs_blkoff(map->m_pa)); err = z_erofs_vle_work_iter_begin(builder, sb, map, &fe->owned_head); if (unlikely(err)) @@ -659,7 +671,7 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe, builder->role = Z_EROFS_VLE_WORK_PRIMARY; #endif - tight &= builder_is_followed(builder); + tight &= builder_is_hooked(builder); work = builder->work; hitted: cur = end - min_t(unsigned, offset + end - map->m_la, end); @@ -674,6 +686,9 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe, (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE : Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED)); + if (cur) + tight &= builder_is_followed(builder); + retry: err = z_erofs_vle_work_add_page(builder, page, page_type); /* should allocate an additional staging page for pagevec */ @@ -683,7 +698,7 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe, err = z_erofs_vle_work_add_page(builder, newpage, Z_EROFS_PAGE_TYPE_EXCLUSIVE); - if (!err) + if (likely(!err)) goto retry; } @@ -694,9 +709,10 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe, /* FIXME! avoid the last relundant fixup & endio */ z_erofs_onlinepage_fixup(page, index, true); - ++spiltted; - /* also update nr_pages and increase queued_pages */ + /* bump up the number of spiltted parts of a page */ + ++spiltted; + /* also update nr_pages */ work->nr_pages = max_t(pgoff_t, work->nr_pages, index + 1); next_part: /* can be used for verification */ @@ -706,16 +722,18 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe, if (end > 0) goto repeat; +out: /* FIXME! avoid the last relundant fixup & endio */ z_erofs_onlinepage_endio(page); debugln("%s, finish page: %pK spiltted: %u map->m_llen %llu", __func__, page, spiltted, map->m_llen); - return 0; + return err; + /* if some error occurred while processing this page */ err_out: - /* TODO: the missing error handing cases */ - return err; + SetPageError(page); + goto out; } static void z_erofs_vle_unzip_kickoff(void *ptr, int bios) @@ -724,13 +742,18 @@ static void z_erofs_vle_unzip_kickoff(void *ptr, int bios) struct z_erofs_vle_unzip_io *io = tagptr_unfold_ptr(t); bool background = tagptr_unfold_tags(t); - if (atomic_add_return(bios, &io->pending_bios)) + if (!background) { + unsigned long flags; + + spin_lock_irqsave(&io->u.wait.lock, flags); + if (!atomic_add_return(bios, &io->pending_bios)) + wake_up_locked(&io->u.wait); + spin_unlock_irqrestore(&io->u.wait.lock, flags); return; + } - if (background) + if (!atomic_add_return(bios, &io->pending_bios)) queue_work(z_erofs_workqueue, &io->u.work); - else - wake_up(&io->u.wait); } static inline void z_erofs_vle_read_endio(struct bio *bio) @@ -747,7 +770,7 @@ static inline void z_erofs_vle_read_endio(struct bio *bio) bool cachemngd = false; DBG_BUGON(PageUptodate(page)); - BUG_ON(page->mapping == NULL); + DBG_BUGON(!page->mapping); #ifdef EROFS_FS_HAS_MANAGED_CACHE if (unlikely(mngda == NULL && !z_erofs_is_stagingpage(page))) { @@ -791,10 +814,8 @@ static int z_erofs_vle_unzip(struct super_block *sb, const unsigned clusterpages = erofs_clusterpages(sbi); struct z_erofs_pagevec_ctor ctor; - unsigned nr_pages; -#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF - unsigned sparsemem_pages = 0; -#endif + unsigned int nr_pages; + unsigned int sparsemem_pages = 0; struct page *pages_onstack[Z_EROFS_VLE_VMAP_ONSTACK_PAGES]; struct page **pages, **compressed_pages, *page; unsigned i, llen; @@ -806,12 +827,8 @@ static int z_erofs_vle_unzip(struct super_block *sb, int err; might_sleep(); -#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF work = z_erofs_vle_grab_primary_work(grp); -#else - BUG(); -#endif - BUG_ON(!READ_ONCE(work->nr_pages)); + DBG_BUGON(!READ_ONCE(work->nr_pages)); mutex_lock(&work->lock); nr_pages = work->nr_pages; @@ -840,6 +857,7 @@ static int z_erofs_vle_unzip(struct super_block *sb, for (i = 0; i < nr_pages; ++i) pages[i] = NULL; + err = 0; z_erofs_pagevec_ctor_init(&ctor, Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, 0); @@ -860,14 +878,21 @@ static int z_erofs_vle_unzip(struct super_block *sb, else pagenr = z_erofs_onlinepage_index(page); - BUG_ON(pagenr >= nr_pages); + DBG_BUGON(pagenr >= nr_pages); -#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF - BUG_ON(pages[pagenr] != NULL); - ++sparsemem_pages; -#endif + /* + * currently EROFS doesn't support multiref(dedup), + * so here erroring out one multiref page. + */ + if (pages[pagenr]) { + DBG_BUGON(1); + SetPageError(pages[pagenr]); + z_erofs_onlinepage_endio(pages[pagenr]); + err = -EIO; + } pages[pagenr] = page; } + sparsemem_pages = i; z_erofs_pagevec_ctor_exit(&ctor, true); @@ -883,35 +908,47 @@ static int z_erofs_vle_unzip(struct super_block *sb, DBG_BUGON(page == NULL); DBG_BUGON(page->mapping == NULL); - if (z_erofs_is_stagingpage(page)) - continue; + if (!z_erofs_is_stagingpage(page)) { #ifdef EROFS_FS_HAS_MANAGED_CACHE - else if (page->mapping == mngda) { - BUG_ON(PageLocked(page)); - BUG_ON(!PageUptodate(page)); - continue; - } + if (page->mapping == mngda) { + if (unlikely(!PageUptodate(page))) + err = -EIO; + continue; + } #endif - /* only non-head page could be reused as a compressed page */ - pagenr = z_erofs_onlinepage_index(page); + /* + * only if non-head page can be selected + * for inplace decompression + */ + pagenr = z_erofs_onlinepage_index(page); - BUG_ON(pagenr >= nr_pages); -#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF - BUG_ON(pages[pagenr] != NULL); - ++sparsemem_pages; -#endif - pages[pagenr] = page; + DBG_BUGON(pagenr >= nr_pages); + if (pages[pagenr]) { + DBG_BUGON(1); + SetPageError(pages[pagenr]); + z_erofs_onlinepage_endio(pages[pagenr]); + err = -EIO; + } + ++sparsemem_pages; + pages[pagenr] = page; - overlapped = true; + overlapped = true; + } + + /* PG_error needs checking for inplaced and staging pages */ + if (unlikely(PageError(page))) { + DBG_BUGON(PageUptodate(page)); + err = -EIO; + } } + if (unlikely(err)) + goto out; + llen = (nr_pages << PAGE_SHIFT) - work->pageofs; if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) { - /* FIXME! this should be fixed in the future */ - BUG_ON(grp->llen != llen); - err = z_erofs_vle_plain_copy(compressed_pages, clusterpages, pages, nr_pages, work->pageofs); goto out; @@ -920,18 +957,13 @@ static int z_erofs_vle_unzip(struct super_block *sb, if (llen > grp->llen) llen = grp->llen; - err = z_erofs_vle_unzip_fast_percpu(compressed_pages, - clusterpages, pages, llen, work->pageofs, - z_erofs_onlinepage_endio); + err = z_erofs_vle_unzip_fast_percpu(compressed_pages, clusterpages, + pages, llen, work->pageofs); if (err != -ENOTSUPP) - goto out_percpu; + goto out; -#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF - if (sparsemem_pages >= nr_pages) { - BUG_ON(sparsemem_pages > nr_pages); + if (sparsemem_pages >= nr_pages) goto skip_allocpage; - } -#endif for (i = 0; i < nr_pages; ++i) { if (pages[i] != NULL) @@ -940,10 +972,12 @@ static int z_erofs_vle_unzip(struct super_block *sb, pages[i] = __stagingpage_alloc(page_pool, GFP_NOFS); } -#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF skip_allocpage: -#endif vout = erofs_vmap(pages, nr_pages); + if (!vout) { + err = -ENOMEM; + goto out; + } err = z_erofs_vle_unzip_vmap(compressed_pages, clusterpages, vout, llen, work->pageofs, overlapped); @@ -951,8 +985,25 @@ static int z_erofs_vle_unzip(struct super_block *sb, erofs_vunmap(vout, nr_pages); out: + /* must handle all compressed pages before endding pages */ + for (i = 0; i < clusterpages; ++i) { + page = compressed_pages[i]; + +#ifdef EROFS_FS_HAS_MANAGED_CACHE + if (page->mapping == mngda) + continue; +#endif + /* recycle all individual staging pages */ + (void)z_erofs_gather_if_stagingpage(page_pool, page); + + WRITE_ONCE(compressed_pages[i], NULL); + } + for (i = 0; i < nr_pages; ++i) { page = pages[i]; + if (!page) + continue; + DBG_BUGON(page->mapping == NULL); /* recycle all individual staging pages */ @@ -965,20 +1016,6 @@ static int z_erofs_vle_unzip(struct super_block *sb, z_erofs_onlinepage_endio(page); } -out_percpu: - for (i = 0; i < clusterpages; ++i) { - page = compressed_pages[i]; - -#ifdef EROFS_FS_HAS_MANAGED_CACHE - if (page->mapping == mngda) - continue; -#endif - /* recycle all individual staging pages */ - (void)z_erofs_gather_if_stagingpage(page_pool, page); - - WRITE_ONCE(compressed_pages[i], NULL); - } - if (pages == z_pagemap_global) mutex_unlock(&z_pagemap_global_lock); else if (unlikely(pages != pages_onstack)) @@ -1026,7 +1063,7 @@ static void z_erofs_vle_unzip_wq(struct work_struct *work) struct z_erofs_vle_unzip_io_sb, io.u.work); LIST_HEAD(page_pool); - BUG_ON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED); + DBG_BUGON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED); z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &page_pool); put_pages_list(&page_pool); @@ -1077,6 +1114,8 @@ static inline bool recover_managed_page(struct z_erofs_vle_workgroup *grp, return true; lock_page(page); + ClearPageError(page); + if (unlikely(!PagePrivate(page))) { set_page_private(page, (unsigned long)grp); SetPagePrivate(page); @@ -1314,19 +1353,18 @@ static int z_erofs_vle_normalaccess_readpage(struct file *file, err = z_erofs_do_read_page(&f, page, &pagepool); (void)z_erofs_vle_work_iter_end(&f.builder); - if (err) { + /* if some compressed cluster ready, need submit them anyway */ + z_erofs_submit_and_unzip(&f, &pagepool, true); + + if (err) errln("%s, failed to read, err [%d]", __func__, err); - goto out; - } - z_erofs_submit_and_unzip(&f, &pagepool, true); -out: if (f.m_iter.mpage != NULL) put_page(f.m_iter.mpage); /* clean up the remaining free pages */ put_pages_list(&pagepool); - return 0; + return err; } static inline int __z_erofs_vle_normalaccess_readpages( @@ -1355,7 +1393,6 @@ static inline int __z_erofs_vle_normalaccess_readpages( continue; } - BUG_ON(PagePrivate(page)); set_page_private(page, (unsigned long)head); head = page; } @@ -1490,6 +1527,7 @@ static erofs_off_t vle_get_logical_extent_head( unsigned long long ofs; const unsigned int clusterbits = EROFS_SB(inode->i_sb)->clusterbits; const unsigned int clustersize = 1 << clusterbits; + unsigned int delta0; if (page->index != blkaddr) { kunmap_atomic(*kaddr_iter); @@ -1504,12 +1542,13 @@ static erofs_off_t vle_get_logical_extent_head( di = *kaddr_iter + vle_extent_blkoff(inode, lcn); switch (vle_cluster_type(di)) { case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: - BUG_ON(!di->di_u.delta[0]); - BUG_ON(lcn < di->di_u.delta[0]); + delta0 = le16_to_cpu(di->di_u.delta[0]); + DBG_BUGON(!delta0); + DBG_BUGON(lcn < delta0); ofs = vle_get_logical_extent_head(inode, page_iter, kaddr_iter, - lcn - di->di_u.delta[0], pcn, flags); + lcn - delta0, pcn, flags); break; case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: *flags ^= EROFS_MAP_ZIPPED; diff --git a/drivers/staging/erofs/unzip_vle.h b/drivers/staging/erofs/unzip_vle.h index 39399850086521e95ef03c9f5a49c9e39a072ecc..684ff06fc7bf85a2627464e81e4bc0cff779481b 100644 --- a/drivers/staging/erofs/unzip_vle.h +++ b/drivers/staging/erofs/unzip_vle.h @@ -47,13 +47,6 @@ static inline bool z_erofs_gather_if_stagingpage(struct list_head *page_pool, #define Z_EROFS_VLE_INLINE_PAGEVECS 3 struct z_erofs_vle_work { - /* struct z_erofs_vle_work *left, *right; */ - -#ifdef CONFIG_EROFS_FS_ZIP_MULTIREF - struct list_head list; - - atomic_t refcount; -#endif struct mutex lock; /* I: decompression offset in page */ @@ -107,10 +100,8 @@ static inline void z_erofs_vle_set_workgrp_fmt( grp->flags = fmt | (grp->flags & ~Z_EROFS_VLE_WORKGRP_FMT_MASK); } -#ifdef CONFIG_EROFS_FS_ZIP_MULTIREF -#error multiref decompression is unimplemented yet -#else +/* definitions if multiref is disabled */ #define z_erofs_vle_grab_primary_work(grp) (&(grp)->work) #define z_erofs_vle_grab_work(grp, pageofs) (&(grp)->work) #define z_erofs_vle_work_workgroup(wrk, primary) \ @@ -118,7 +109,6 @@ static inline void z_erofs_vle_set_workgrp_fmt( struct z_erofs_vle_workgroup, work) : \ ({ BUG(); (void *)NULL; })) -#endif #define Z_EROFS_WORKGROUP_SIZE sizeof(struct z_erofs_vle_workgroup) @@ -228,8 +218,7 @@ extern int z_erofs_vle_plain_copy(struct page **compressed_pages, extern int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages, unsigned clusterpages, struct page **pages, - unsigned outlen, unsigned short pageofs, - void (*endio)(struct page *)); + unsigned int outlen, unsigned short pageofs); extern int z_erofs_vle_unzip_vmap(struct page **compressed_pages, unsigned clusterpages, void *vaddr, unsigned llen, diff --git a/drivers/staging/erofs/unzip_vle_lz4.c b/drivers/staging/erofs/unzip_vle_lz4.c index f5b665f15be5294f9a293c044e4ee7425b1f4057..3a7428317f0ad07db9d0eacc168dbc4f14aedc2f 100644 --- a/drivers/staging/erofs/unzip_vle_lz4.c +++ b/drivers/staging/erofs/unzip_vle_lz4.c @@ -57,7 +57,7 @@ int z_erofs_vle_plain_copy(struct page **compressed_pages, if (compressed_pages[j] != page) continue; - BUG_ON(mirrored[j]); + DBG_BUGON(mirrored[j]); memcpy(percpu_data + j * PAGE_SIZE, dst, PAGE_SIZE); mirrored[j] = true; break; @@ -105,8 +105,7 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages, unsigned clusterpages, struct page **pages, unsigned outlen, - unsigned short pageofs, - void (*endio)(struct page *)) + unsigned short pageofs) { void *vin, *vout; unsigned nr_pages, i, j; @@ -117,10 +116,13 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages, nr_pages = DIV_ROUND_UP(outlen + pageofs, PAGE_SIZE); - if (clusterpages == 1) + if (clusterpages == 1) { vin = kmap_atomic(compressed_pages[0]); - else + } else { vin = erofs_vmap(compressed_pages, clusterpages); + if (!vin) + return -ENOMEM; + } preempt_disable(); vout = erofs_pcpubuf[smp_processor_id()].data; @@ -128,31 +130,30 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages, ret = z_erofs_unzip_lz4(vin, vout + pageofs, clusterpages * PAGE_SIZE, outlen); - if (ret >= 0) { - outlen = ret; - ret = 0; - } + if (ret < 0) + goto out; + ret = 0; for (i = 0; i < nr_pages; ++i) { j = min((unsigned)PAGE_SIZE - pageofs, outlen); if (pages[i] != NULL) { - if (ret < 0) - SetPageError(pages[i]); - else if (clusterpages == 1 && pages[i] == compressed_pages[0]) + if (clusterpages == 1 && + pages[i] == compressed_pages[0]) { memcpy(vin + pageofs, vout + pageofs, j); - else { + } else { void *dst = kmap_atomic(pages[i]); memcpy(dst + pageofs, vout + pageofs, j); kunmap_atomic(dst); } - endio(pages[i]); } vout += PAGE_SIZE; outlen -= j; pageofs = 0; } + +out: preempt_enable(); if (clusterpages == 1) diff --git a/drivers/staging/erofs/utils.c b/drivers/staging/erofs/utils.c index 595cf90af9bb2f98a5695e38070c7616e84ba208..2d96820da62ecb3e3ad94d9a90cd7a1a9c997749 100644 --- a/drivers/staging/erofs/utils.c +++ b/drivers/staging/erofs/utils.c @@ -23,9 +23,6 @@ struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp) list_del(&page->lru); } else { page = alloc_pages(gfp | __GFP_NOFAIL, 0); - - BUG_ON(page == NULL); - BUG_ON(page->mapping != NULL); } return page; } @@ -60,7 +57,7 @@ struct erofs_workgroup *erofs_find_workgroup( /* decrease refcount added by erofs_workgroup_put */ if (unlikely(oldcount == 1)) atomic_long_dec(&erofs_global_shrink_cnt); - BUG_ON(index != grp->index); + DBG_BUGON(index != grp->index); } rcu_read_unlock(); return grp; @@ -73,8 +70,11 @@ int erofs_register_workgroup(struct super_block *sb, struct erofs_sb_info *sbi; int err; - /* grp->refcount should not < 1 */ - BUG_ON(!atomic_read(&grp->refcount)); + /* grp shouldn't be broken or used before */ + if (unlikely(atomic_read(&grp->refcount) != 1)) { + DBG_BUGON(1); + return -EINVAL; + } err = radix_tree_preload(GFP_NOFS); if (err) @@ -87,12 +87,21 @@ int erofs_register_workgroup(struct super_block *sb, grp = (void *)((unsigned long)grp | 1UL << RADIX_TREE_EXCEPTIONAL_SHIFT); - err = radix_tree_insert(&sbi->workstn_tree, - grp->index, grp); + /* + * Bump up reference count before making this workgroup + * visible to other users in order to avoid potential UAF + * without serialized by erofs_workstn_lock. + */ + __erofs_workgroup_get(grp); - if (!err) { - __erofs_workgroup_get(grp); - } + err = radix_tree_insert(&sbi->workstn_tree, + grp->index, grp); + if (unlikely(err)) + /* + * it's safe to decrease since the workgroup isn't visible + * and refcount >= 2 (cannot be freezed). + */ + __erofs_workgroup_put(grp); erofs_workstn_unlock(sbi); radix_tree_preload_end(); @@ -101,19 +110,99 @@ int erofs_register_workgroup(struct super_block *sb, extern void erofs_workgroup_free_rcu(struct erofs_workgroup *grp); +static void __erofs_workgroup_free(struct erofs_workgroup *grp) +{ + atomic_long_dec(&erofs_global_shrink_cnt); + erofs_workgroup_free_rcu(grp); +} + int erofs_workgroup_put(struct erofs_workgroup *grp) { int count = atomic_dec_return(&grp->refcount); if (count == 1) atomic_long_inc(&erofs_global_shrink_cnt); - else if (!count) { - atomic_long_dec(&erofs_global_shrink_cnt); - erofs_workgroup_free_rcu(grp); - } + else if (!count) + __erofs_workgroup_free(grp); return count; } +#ifdef EROFS_FS_HAS_MANAGED_CACHE +/* for cache-managed case, customized reclaim paths exist */ +static void erofs_workgroup_unfreeze_final(struct erofs_workgroup *grp) +{ + erofs_workgroup_unfreeze(grp, 0); + __erofs_workgroup_free(grp); +} + +bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi, + struct erofs_workgroup *grp, + bool cleanup) +{ + void *entry; + + /* + * for managed cache enabled, the refcount of workgroups + * themselves could be < 0 (freezed). So there is no guarantee + * that all refcount > 0 if managed cache is enabled. + */ + if (!erofs_workgroup_try_to_freeze(grp, 1)) + return false; + + /* + * note that all cached pages should be unlinked + * before delete it from the radix tree. + * Otherwise some cached pages of an orphan old workgroup + * could be still linked after the new one is available. + */ + if (erofs_try_to_free_all_cached_pages(sbi, grp)) { + erofs_workgroup_unfreeze(grp, 1); + return false; + } + + /* + * it is impossible to fail after the workgroup is freezed, + * however in order to avoid some race conditions, add a + * DBG_BUGON to observe this in advance. + */ + entry = radix_tree_delete(&sbi->workstn_tree, grp->index); + DBG_BUGON((void *)((unsigned long)entry & + ~RADIX_TREE_EXCEPTIONAL_ENTRY) != grp); + + /* + * if managed cache is enable, the last refcount + * should indicate the related workstation. + */ + erofs_workgroup_unfreeze_final(grp); + return true; +} + +#else +/* for nocache case, no customized reclaim path at all */ +bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi, + struct erofs_workgroup *grp, + bool cleanup) +{ + int cnt = atomic_read(&grp->refcount); + void *entry; + + DBG_BUGON(cnt <= 0); + DBG_BUGON(cleanup && cnt != 1); + + if (cnt > 1) + return false; + + entry = radix_tree_delete(&sbi->workstn_tree, grp->index); + DBG_BUGON((void *)((unsigned long)entry & + ~RADIX_TREE_EXCEPTIONAL_ENTRY) != grp); + + /* (rarely) could be grabbed again when freeing */ + erofs_workgroup_put(grp); + return true; +} + +#endif + unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi, unsigned long nr_shrink, bool cleanup) @@ -130,43 +219,15 @@ unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi, batch, first_index, PAGEVEC_SIZE); for (i = 0; i < found; ++i) { - int cnt; struct erofs_workgroup *grp = (void *) ((unsigned long)batch[i] & ~RADIX_TREE_EXCEPTIONAL_ENTRY); first_index = grp->index + 1; - cnt = atomic_read(&grp->refcount); - BUG_ON(cnt <= 0); - - if (cleanup) - BUG_ON(cnt != 1); - -#ifndef EROFS_FS_HAS_MANAGED_CACHE - else if (cnt > 1) -#else - if (!erofs_workgroup_try_to_freeze(grp, 1)) -#endif - continue; - - if (radix_tree_delete(&sbi->workstn_tree, - grp->index) != grp) { -#ifdef EROFS_FS_HAS_MANAGED_CACHE -skip: - erofs_workgroup_unfreeze(grp, 1); -#endif + /* try to shrink each valid workgroup */ + if (!erofs_try_to_release_workgroup(sbi, grp, cleanup)) continue; - } - -#ifdef EROFS_FS_HAS_MANAGED_CACHE - if (erofs_try_to_free_all_cached_pages(sbi, grp)) - goto skip; - - erofs_workgroup_unfreeze(grp, 1); -#endif - /* (rarely) grabbed again when freeing */ - erofs_workgroup_put(grp); ++freed; if (unlikely(!--nr_shrink)) diff --git a/drivers/staging/erofs/xattr.c b/drivers/staging/erofs/xattr.c index 0e9cfeccdf99789dbccfca79568f9e191f7cd9e4..d48687ca21990279a8bead27bf2b12d9524d2360 100644 --- a/drivers/staging/erofs/xattr.c +++ b/drivers/staging/erofs/xattr.c @@ -24,36 +24,77 @@ struct xattr_iter { static inline void xattr_iter_end(struct xattr_iter *it, bool atomic) { - /* only init_inode_xattrs use non-atomic once */ + /* the only user of kunmap() is 'init_inode_xattrs' */ if (unlikely(!atomic)) kunmap(it->page); else kunmap_atomic(it->kaddr); + unlock_page(it->page); put_page(it->page); } -static void init_inode_xattrs(struct inode *inode) +static inline void xattr_iter_end_final(struct xattr_iter *it) +{ + if (!it->page) + return; + + xattr_iter_end(it, true); +} + +static int init_inode_xattrs(struct inode *inode) { + struct erofs_vnode *const vi = EROFS_V(inode); struct xattr_iter it; unsigned i; struct erofs_xattr_ibody_header *ih; struct erofs_sb_info *sbi; - struct erofs_vnode *vi; bool atomic_map; + int ret = 0; - if (likely(inode_has_inited_xattr(inode))) - return; + /* the most case is that xattrs of this inode are initialized. */ + if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags)) + return 0; + + if (wait_on_bit_lock(&vi->flags, EROFS_V_BL_XATTR_BIT, TASK_KILLABLE)) + return -ERESTARTSYS; - vi = EROFS_V(inode); - BUG_ON(!vi->xattr_isize); + /* someone has initialized xattrs for us? */ + if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags)) + goto out_unlock; + + /* + * bypass all xattr operations if ->xattr_isize is not greater than + * sizeof(struct erofs_xattr_ibody_header), in detail: + * 1) it is not enough to contain erofs_xattr_ibody_header then + * ->xattr_isize should be 0 (it means no xattr); + * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk + * undefined right now (maybe use later with some new sb feature). + */ + if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) { + errln("xattr_isize %d of nid %llu is not supported yet", + vi->xattr_isize, vi->nid); + ret = -ENOTSUPP; + goto out_unlock; + } else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) { + if (unlikely(vi->xattr_isize)) { + DBG_BUGON(1); + ret = -EIO; + goto out_unlock; /* xattr ondisk layout error */ + } + ret = -ENOATTR; + goto out_unlock; + } sbi = EROFS_I_SB(inode); it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize); it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize); it.page = erofs_get_inline_page(inode, it.blkaddr); - BUG_ON(IS_ERR(it.page)); + if (IS_ERR(it.page)) { + ret = PTR_ERR(it.page); + goto out_unlock; + } /* read in shared xattr array (non-atomic, see kmalloc below) */ it.kaddr = kmap(it.page); @@ -62,9 +103,13 @@ static void init_inode_xattrs(struct inode *inode) ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs); vi->xattr_shared_count = ih->h_shared_count; - vi->xattr_shared_xattrs = (unsigned *)kmalloc_array( - vi->xattr_shared_count, sizeof(unsigned), - GFP_KERNEL | __GFP_NOFAIL); + vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count, + sizeof(uint), GFP_KERNEL); + if (!vi->xattr_shared_xattrs) { + xattr_iter_end(&it, atomic_map); + ret = -ENOMEM; + goto out_unlock; + } /* let's skip ibody header */ it.ofs += sizeof(struct erofs_xattr_ibody_header); @@ -77,7 +122,12 @@ static void init_inode_xattrs(struct inode *inode) it.page = erofs_get_meta_page(inode->i_sb, ++it.blkaddr, S_ISDIR(inode->i_mode)); - BUG_ON(IS_ERR(it.page)); + if (IS_ERR(it.page)) { + kfree(vi->xattr_shared_xattrs); + vi->xattr_shared_xattrs = NULL; + ret = PTR_ERR(it.page); + goto out_unlock; + } it.kaddr = kmap_atomic(it.page); atomic_map = true; @@ -89,7 +139,11 @@ static void init_inode_xattrs(struct inode *inode) } xattr_iter_end(&it, atomic_map); - inode_set_inited_xattr(inode); + set_bit(EROFS_V_EA_INITED_BIT, &vi->flags); + +out_unlock: + clear_and_wake_up_bit(EROFS_V_BL_XATTR_BIT, &vi->flags); + return ret; } struct xattr_iter_handlers { @@ -99,18 +153,25 @@ struct xattr_iter_handlers { void (*value)(struct xattr_iter *, unsigned, char *, unsigned); }; -static void xattr_iter_fixup(struct xattr_iter *it) +static inline int xattr_iter_fixup(struct xattr_iter *it) { - if (unlikely(it->ofs >= EROFS_BLKSIZ)) { - xattr_iter_end(it, true); + if (it->ofs < EROFS_BLKSIZ) + return 0; - it->blkaddr += erofs_blknr(it->ofs); - it->page = erofs_get_meta_page(it->sb, it->blkaddr, false); - BUG_ON(IS_ERR(it->page)); + xattr_iter_end(it, true); - it->kaddr = kmap_atomic(it->page); - it->ofs = erofs_blkoff(it->ofs); + it->blkaddr += erofs_blknr(it->ofs); + it->page = erofs_get_meta_page(it->sb, it->blkaddr, false); + if (IS_ERR(it->page)) { + int err = PTR_ERR(it->page); + + it->page = NULL; + return err; } + + it->kaddr = kmap_atomic(it->page); + it->ofs = erofs_blkoff(it->ofs); + return 0; } static int inline_xattr_iter_begin(struct xattr_iter *it, @@ -132,21 +193,24 @@ static int inline_xattr_iter_begin(struct xattr_iter *it, it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs); it->page = erofs_get_inline_page(inode, it->blkaddr); - BUG_ON(IS_ERR(it->page)); - it->kaddr = kmap_atomic(it->page); + if (IS_ERR(it->page)) + return PTR_ERR(it->page); + it->kaddr = kmap_atomic(it->page); return vi->xattr_isize - xattr_header_sz; } static int xattr_foreach(struct xattr_iter *it, - struct xattr_iter_handlers *op, unsigned *tlimit) + const struct xattr_iter_handlers *op, unsigned int *tlimit) { struct erofs_xattr_entry entry; unsigned value_sz, processed, slice; int err; /* 0. fixup blkaddr, ofs, ipage */ - xattr_iter_fixup(it); + err = xattr_iter_fixup(it); + if (err) + return err; /* * 1. read xattr entry to the memory, @@ -178,7 +242,9 @@ static int xattr_foreach(struct xattr_iter *it, if (it->ofs >= EROFS_BLKSIZ) { BUG_ON(it->ofs > EROFS_BLKSIZ); - xattr_iter_fixup(it); + err = xattr_iter_fixup(it); + if (err) + goto out; it->ofs = 0; } @@ -210,7 +276,10 @@ static int xattr_foreach(struct xattr_iter *it, while (processed < value_sz) { if (it->ofs >= EROFS_BLKSIZ) { BUG_ON(it->ofs > EROFS_BLKSIZ); - xattr_iter_fixup(it); + + err = xattr_iter_fixup(it); + if (err) + goto out; it->ofs = 0; } @@ -270,7 +339,7 @@ static void xattr_copyvalue(struct xattr_iter *_it, memcpy(it->buffer + processed, buf, len); } -static struct xattr_iter_handlers find_xattr_handlers = { +static const struct xattr_iter_handlers find_xattr_handlers = { .entry = xattr_entrymatch, .name = xattr_namematch, .alloc_buffer = xattr_checkbuffer, @@ -291,8 +360,11 @@ static int inline_getxattr(struct inode *inode, struct getxattr_iter *it) ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining); if (ret >= 0) break; + + if (ret != -ENOATTR) /* -ENOMEM, -EIO, etc. */ + break; } - xattr_iter_end(&it->it, true); + xattr_iter_end_final(&it->it); return ret < 0 ? ret : it->buffer_size; } @@ -315,8 +387,10 @@ static int shared_getxattr(struct inode *inode, struct getxattr_iter *it) xattr_iter_end(&it->it, true); it->it.page = erofs_get_meta_page(inode->i_sb, - blkaddr, false); - BUG_ON(IS_ERR(it->it.page)); + blkaddr, false); + if (IS_ERR(it->it.page)) + return PTR_ERR(it->it.page); + it->it.kaddr = kmap_atomic(it->it.page); it->it.blkaddr = blkaddr; } @@ -324,9 +398,12 @@ static int shared_getxattr(struct inode *inode, struct getxattr_iter *it) ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL); if (ret >= 0) break; + + if (ret != -ENOATTR) /* -ENOMEM, -EIO, etc. */ + break; } if (vi->xattr_shared_count) - xattr_iter_end(&it->it, true); + xattr_iter_end_final(&it->it); return ret < 0 ? ret : it->buffer_size; } @@ -351,7 +428,9 @@ int erofs_getxattr(struct inode *inode, int index, if (unlikely(name == NULL)) return -EINVAL; - init_inode_xattrs(inode); + ret = init_inode_xattrs(inode); + if (ret) + return ret; it.index = index; @@ -374,7 +453,6 @@ static int erofs_xattr_generic_get(const struct xattr_handler *handler, struct dentry *unused, struct inode *inode, const char *name, void *buffer, size_t size) { - struct erofs_vnode *const vi = EROFS_V(inode); struct erofs_sb_info *const sbi = EROFS_I_SB(inode); switch (handler->flags) { @@ -392,9 +470,6 @@ static int erofs_xattr_generic_get(const struct xattr_handler *handler, return -EINVAL; } - if (!vi->xattr_isize) - return -ENOATTR; - return erofs_getxattr(inode, handler->flags, name, buffer, size); } @@ -494,7 +569,7 @@ static int xattr_skipvalue(struct xattr_iter *_it, return 1; } -static struct xattr_iter_handlers list_xattr_handlers = { +static const struct xattr_iter_handlers list_xattr_handlers = { .entry = xattr_entrylist, .name = xattr_namelist, .alloc_buffer = xattr_skipvalue, @@ -516,7 +591,7 @@ static int inline_listxattr(struct listxattr_iter *it) if (ret < 0) break; } - xattr_iter_end(&it->it, true); + xattr_iter_end_final(&it->it); return ret < 0 ? ret : it->buffer_ofs; } @@ -538,8 +613,10 @@ static int shared_listxattr(struct listxattr_iter *it) xattr_iter_end(&it->it, true); it->it.page = erofs_get_meta_page(inode->i_sb, - blkaddr, false); - BUG_ON(IS_ERR(it->it.page)); + blkaddr, false); + if (IS_ERR(it->it.page)) + return PTR_ERR(it->it.page); + it->it.kaddr = kmap_atomic(it->it.page); it->it.blkaddr = blkaddr; } @@ -549,7 +626,7 @@ static int shared_listxattr(struct listxattr_iter *it) break; } if (vi->xattr_shared_count) - xattr_iter_end(&it->it, true); + xattr_iter_end_final(&it->it); return ret < 0 ? ret : it->buffer_ofs; } @@ -560,7 +637,11 @@ ssize_t erofs_listxattr(struct dentry *dentry, int ret; struct listxattr_iter it; - init_inode_xattrs(d_inode(dentry)); + ret = init_inode_xattrs(d_inode(dentry)); + if (ret == -ENOATTR) + return 0; + if (ret) + return ret; it.dentry = dentry; it.buffer = buffer; diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c index a2df02d97a8ebc31896925c3beb978bb3a25c4bd..3fe4738720eab6c48e49a4e3e71a49512bb411c8 100644 --- a/drivers/staging/fbtft/fbtft-core.c +++ b/drivers/staging/fbtft/fbtft-core.c @@ -771,7 +771,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display, fbdefio->deferred_io = fbtft_deferred_io; fb_deferred_io_init(info); - strncpy(info->fix.id, dev->driver->name, 16); + snprintf(info->fix.id, sizeof(info->fix.id), "%s", dev->driver->name); info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.visual = FB_VISUAL_TRUECOLOR; info->fix.xpanstep = 0; @@ -819,7 +819,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display, if (par->gamma.curves && gamma) { if (fbtft_gamma_parse_str(par, par->gamma.curves, gamma, strlen(gamma))) - goto alloc_fail; + goto release_framebuf; } /* Transmit buffer */ @@ -836,7 +836,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display, if (txbuflen > 0) { txbuf = devm_kzalloc(par->info->device, txbuflen, GFP_KERNEL); if (!txbuf) - goto alloc_fail; + goto release_framebuf; par->txbuf.buf = txbuf; par->txbuf.len = txbuflen; } @@ -872,6 +872,9 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display, return info; +release_framebuf: + framebuffer_release(info); + alloc_fail: vfree(vmem); diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c index 9329fcad95acd336535662bc32e0ed5fce39ca39..2f8f1f3f1bcd90d4fa3cb3a3464ad994d222fff8 100644 --- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c @@ -2632,10 +2632,10 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) fsl_mc_portal_free(priv->mc_io); - free_netdev(net_dev); - dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name); + free_netdev(net_dev); + return 0; } diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c index ecdd3d84f95668cd66dc1cc7b0519ad1d5dda24b..8549e809363e40dd9f00c5d40679af0ac5afd495 100644 --- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c +++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c @@ -1073,6 +1073,7 @@ static int port_switchdev_event(struct notifier_block *unused, dev_hold(dev); break; default: + kfree(switchdev_work); return NOTIFY_DONE; } diff --git a/drivers/staging/fsl-dpaa2/rtc/rtc.c b/drivers/staging/fsl-dpaa2/rtc/rtc.c index 0d52cb85441f051cc1d1a072591052a1e37b7502..318a33c2f7a7b090c12f94163d92d5a3e972c81a 100644 --- a/drivers/staging/fsl-dpaa2/rtc/rtc.c +++ b/drivers/staging/fsl-dpaa2/rtc/rtc.c @@ -142,7 +142,10 @@ static int rtc_probe(struct fsl_mc_device *mc_dev) err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io); if (err) { - dev_err(dev, "fsl_mc_portal_allocate err %d\n", err); + if (err == -ENXIO) + err = -EPROBE_DEFER; + else + dev_err(dev, "fsl_mc_portal_allocate err %d\n", err); goto err_exit; } diff --git a/drivers/staging/gasket/apex_driver.c b/drivers/staging/gasket/apex_driver.c index c747e9ca451860309dab8440bad5705492d014a4..0cef1d6d2e2b0c9963621e8a476c6c1fcc23c0b3 100644 --- a/drivers/staging/gasket/apex_driver.c +++ b/drivers/staging/gasket/apex_driver.c @@ -538,7 +538,7 @@ static ssize_t sysfs_show(struct device *device, struct device_attribute *attr, break; case ATTR_KERNEL_HIB_SIMPLE_PAGE_TABLE_SIZE: ret = scnprintf(buf, PAGE_SIZE, "%u\n", - gasket_page_table_num_entries( + gasket_page_table_num_simple_entries( gasket_dev->page_table[0])); break; case ATTR_KERNEL_HIB_NUM_ACTIVE_PAGES: diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c index 3c2aab7a921e89157f2929adf8be1241835d558b..fc64d3fed8b4a35a25d096de9427981d1c4a5e73 100644 --- a/drivers/staging/gdm724x/gdm_lte.c +++ b/drivers/staging/gdm724x/gdm_lte.c @@ -76,14 +76,15 @@ static void tx_complete(void *arg) static int gdm_lte_rx(struct sk_buff *skb, struct nic *nic, int nic_type) { - int ret; + int ret, len; + len = skb->len + ETH_HLEN; ret = netif_rx_ni(skb); if (ret == NET_RX_DROP) { nic->stats.rx_dropped++; } else { nic->stats.rx_packets++; - nic->stats.rx_bytes += skb->len + ETH_HLEN; + nic->stats.rx_bytes += len; } return 0; diff --git a/drivers/staging/gmjstcm/Kconfig b/drivers/staging/gmjstcm/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..5b5397ae183201f534994f86530fef4e304a5c47 --- /dev/null +++ b/drivers/staging/gmjstcm/Kconfig @@ -0,0 +1,21 @@ +menu "GMJS TCM support" + +config GMJS_TCM + bool + +config GMJS_TCM_CORE + tristate "GMJS TCM core support" + depends on ARM64 || MIPS + default m + select GMJS_TCM + help + GMJS TCM core support. + +config GMJS_TCM_SPI + tristate "GMJS TCM support on SPI interface" + depends on GMJS_TCM_CORE && SPI_MASTER + default m + help + GMJS TCM support on SPI interface. + +endmenu diff --git a/drivers/staging/gmjstcm/Makefile b/drivers/staging/gmjstcm/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..369f01119372cd197642da9c37366f6f506084ba --- /dev/null +++ b/drivers/staging/gmjstcm/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_GMJS_TCM_CORE) += tcm_core.o +tcm_core-objs := tcm.o +obj-$(CONFIG_GMJS_TCM_SPI) += tcm_tis_spi.o diff --git a/drivers/staging/gmjstcm/tcm.c b/drivers/staging/gmjstcm/tcm.c new file mode 100644 index 0000000000000000000000000000000000000000..133be151f7101cf282a6579a3dd1c1ab422519df --- /dev/null +++ b/drivers/staging/gmjstcm/tcm.c @@ -0,0 +1,952 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2009 Nationz Technologies Inc. + * + * Description: Exprot symbol for tcm_tis module + * + * Major Function: public write read register function etc. + * + */ + +#include +#include +#include +#include +#include "tcm.h" + +/* + * const var + */ +enum tcm_const { + TCM_MINOR = 224, /* officially assigned */ + TCM_BUFSIZE = 2048, /* Buffer Size */ + TCM_NUM_DEVICES = 256, /* Max supporting tcm device number */ +}; + +/* + * CMD duration + */ +enum tcm_duration { + TCM_SHORT = 0, + TCM_MEDIUM = 1, + TCM_LONG = 2, + TCM_UNDEFINED, +}; + +/* Max Total of Command Number */ +#define TCM_MAX_ORDINAL 88 /*243*/ + +static LIST_HEAD(tcm_chip_list); +static DEFINE_SPINLOCK(driver_lock); /* spin lock */ +static DECLARE_BITMAP(dev_mask, TCM_NUM_DEVICES); + +typedef struct tagTCM_Command { + u8 ordinal; + u8 DURATION; +} TCM_Command; + +static const TCM_Command TCM_Command_List[TCM_MAX_ORDINAL + 1] = { + {/*TCM_ORD_ActivateIdentity, */122, 1}, + {/*TCM_ORD_CertifyKey, */50, 1}, + {/*TCM_ORD_CertifyKeyM, */51, 1}, + {/*TCM_ORD_ChangeAuth, */12, 1}, + {/*TCM_ORD_ChangeAuthOwner, */16, 0}, + {/*TCM_ORD_ContinueSelfTeSt, */83, 2}, + {/*TCM_ORD_CreateCounter, */220, 0}, + {/*TCM_ORD_CreateWrapKey, */31, 2}, + {/*TCM_ORD_DiSableForceClear, */94, 0}, + {/*TCM_ORD_DiSableOwnerClear, */92, 0}, + {/*TCM_ORD_EStabliShTranSport, */230, 0}, + {/*TCM_ORD_ExecuteTranSport, */231, 2}, + {/*TCM_ORD_Extend, */20, 0}, + {/*TCM_ORD_FieldUpgrade, */170, 2}, + {/*TCM_ORD_FluShSpecific, */186, 0}, + {/*TCM_ORD_ForceClear, */93, 0}, + {/*TCM_ORD_GetAuditDigeSt, */133, 0}, + {/*TCM_ORD_GetAuditDigeStSigned, */134, 1}, + {/*TCM_ORD_GetCapability, */101, 0}, + {/*TCM_ORD_GetPubKey, */33, 0}, + {/*TCM_ORD_GetRandoM, */70, 0}, + {/*TCM_ORD_GetTeStReSult, */84, 0}, + {/*TCM_ORD_GetTickS, */241, 0}, + {/*TCM_ORD_IncreMentCounter, */221, 0}, + {/*TCM_ORD_LoadContext, */185, 1}, + {/*TCM_ORD_MakeIdentity, */121, 2}, + {/*TCM_ORD_NV_DefineSpace, */204, 0}, + {/*TCM_ORD_NV_ReadValue, */207, 0}, + {/*TCM_ORD_NV_ReadValueAuth, */208, 0}, + {/*TCM_ORD_NV_WriteValue, */205, 0}, + {/*TCM_ORD_NV_WriteValueAuth, */206, 0}, + {/*TCM_ORD_OwnerClear, */91, 0}, + {/*TCM_ORD_OwnerReadInternalPub, */129, 0}, + {/*TCM_ORD_OwnerSetDiSable, */110, 0}, + {/*TCM_ORD_PCR_ReSet, */200, 0}, + {/*TCM_ORD_PcrRead, */21, 0}, + {/*TCM_ORD_PhySicalDiSable, */112, 0}, + {/*TCM_ORD_PhySicalEnable, */111, 0}, + {/*TCM_ORD_PhySicalSetDeactivated, */114, 0}, + {/*TCM_ORD_Quote, */22, 1}, + {/*TCM_ORD_QuoteM, */62, 1}, + {/*TCM_ORD_ReadCounter, */222, 0}, + {/*TCM_ORD_ReadPubek, */124, 0}, + {/*TCM_ORD_ReleaSeCounter, */223, 0}, + {/*TCM_ORD_ReleaSeCounterOwner, */224, 0}, + {/*TCM_ORD_ReleaSeTranSportSigned, */232, 1}, + {/*TCM_ORD_ReSetLockValue, */64, 0}, + {/*TCM_ORD_RevokeTruSt, */128, 0}, + {/*TCM_ORD_SaveContext, */184, 1}, + {/*TCM_ORD_SaveState, */152, 1}, + {/*TCM_ORD_Seal, */23, 1}, + {/*TCM_ORD_Sealx, */61, 1}, + {/*TCM_ORD_SelfTeStFull, */80, 2}, + {/*TCM_ORD_SetCapability, */63, 0}, + {/*TCM_ORD_SetOperatorAuth, */116, 0}, + {/*TCM_ORD_SetOrdinalAuditStatuS, */141, 0}, + {/*TCM_ORD_SetOwnerInStall, */113, 0}, + {/*TCM_ORD_SetTeMpDeactivated, */115, 0}, + {/*TCM_ORD_Sign, */60, 1}, + {/*TCM_ORD_Startup, */153, 0}, + {/*TCM_ORD_TakeOwnerShip, */13, 1}, + {/*TCM_ORD_TickStaMpBlob, */242, 1}, + {/*TCM_ORD_UnSeal, */24, 1}, + {/*TSC_ORD_PhySicalPreSence, */10, 0}, + {/*TSC_ORD_ReSetEStabliShMentBit, */11, 0}, + {/*TCM_ORD_WrapKey, */189, 2}, + {/*TCM_ORD_APcreate, */191, 0}, + {/*TCM_ORD_APTerMinate, */192, 0}, + {/*TCM_ORD_CreateMigratedBlob, */193, 1}, + {/*TCM_ORD_ConvertMigratedBlob, */194, 1}, + {/*TCM_ORD_AuthorizeMigrationKey, */195, 0}, + {/*TCM_ORD_SMS4Encrypt, */197, 1}, + {/*TCM_ORD_SMS4Decrypt, */198, 1}, + {/*TCM_ORD_ReadEKCert, */199, 1}, + {/*TCM_ORD_WriteEKCert, */233, 1}, + {/*TCM_ORD_SCHStart, */234, 0}, + {/*TCM_ORD_SCHUpdata, */235, 0}, + {/*TCM_ORD_SCHCoMplete, */236, 0}, + {/*TCM_ORD_SCHCoMpleteExtend, */237, 0}, + {/*TCM_ORD_ECCDecrypt, */238, 1}, + {/*TCM_ORD_LoadKey, */239, 1}, + {/*TCM_ORD_CreateEndorSeMentKeyPair, */120, 2}, + {/*TCM_ORD_CreateRevocableEK, */127, 2}, + {/*TCM_ORD_ReleaSeECCExchangeSeSSion, */174, 1}, + {/*TCM_ORD_CreateECCExchangeSeSSion, */175, 1}, + {/*TCM_ORD_GetKeyECCExchangeSeSSion, */176, 1}, + {/*TCM_ORD_ActivatePEK, */217, 1}, + {/*TCM_ORD_ActivatePEKCert, */218, 1}, + {0, 0} +}; + +static void user_reader_timeout(struct timer_list *t) +{ + struct tcm_chip *chip = from_timer(chip, t, user_read_timer); + + schedule_work(&chip->work); +} + +static void timeout_work(struct work_struct *work) +{ + struct tcm_chip *chip = container_of(work, struct tcm_chip, work); + + mutex_lock(&chip->buffer_mutex); + atomic_set(&chip->data_pending, 0); + memset(chip->data_buffer, 0, TCM_BUFSIZE); + mutex_unlock(&chip->buffer_mutex); +} + +unsigned long tcm_calc_ordinal_duration(struct tcm_chip *chip, + u32 ordinal) +{ + int duration_idx = TCM_UNDEFINED; + int duration = 0; + int i = 0; + + for (i = 0; i < TCM_MAX_ORDINAL; i++) { + if (ordinal == TCM_Command_List[i].ordinal) { + duration_idx = TCM_Command_List[i].DURATION; + break; + } + } + + if (duration_idx != TCM_UNDEFINED) + duration = chip->vendor.duration[duration_idx]; + if (duration <= 0) + return 2 * 60 * HZ; + else + return duration; +} +EXPORT_SYMBOL_GPL(tcm_calc_ordinal_duration); + +/* + * Internal kernel interface to transmit TCM commands + * buff format: TAG(2 bytes) + Total Size(4 bytes ) + + * Command Ordinal(4 bytes ) + ...... + */ +static ssize_t tcm_transmit(struct tcm_chip *chip, const char *buf, + size_t bufsiz) +{ + ssize_t rc = 0; + u32 count = 0, ordinal = 0; + unsigned long stop = 0; + + count = be32_to_cpu(*((__be32 *)(buf + 2))); /* buff size */ + ordinal = be32_to_cpu(*((__be32 *)(buf + 6))); /* command ordinal */ + + if (count == 0) + return -ENODATA; + if (count > bufsiz) { /* buff size err ,invalid buff stream */ + dev_err(chip->dev, "invalid count value %x, %zx\n", + count, bufsiz); + return -E2BIG; + } + + mutex_lock(&chip->tcm_mutex); /* enter mutex */ + + rc = chip->vendor.send(chip, (u8 *)buf, count); + if (rc < 0) { + dev_err(chip->dev, "%s: tcm_send: error %zd\n", + __func__, rc); + goto out; + } + + if (chip->vendor.irq) + goto out_recv; + + stop = jiffies + tcm_calc_ordinal_duration(chip, + ordinal); /* cmd duration */ + do { + u8 status = chip->vendor.status(chip); + + if ((status & chip->vendor.req_complete_mask) == + chip->vendor.req_complete_val) + goto out_recv; + + if ((status == chip->vendor.req_canceled)) { + dev_err(chip->dev, "Operation Canceled\n"); + rc = -ECANCELED; + goto out; + } + + msleep(TCM_TIMEOUT); /* CHECK */ + rmb(); + } while (time_before(jiffies, stop)); + /* time out */ + chip->vendor.cancel(chip); + dev_err(chip->dev, "Operation Timed out\n"); + rc = -ETIME; + goto out; + +out_recv: + rc = chip->vendor.recv(chip, (u8 *)buf, bufsiz); + if (rc < 0) + dev_err(chip->dev, "%s: tcm_recv: error %zd\n", + __func__, rc); +out: + mutex_unlock(&chip->tcm_mutex); + return rc; +} + +#define TCM_DIGEST_SIZE 32 +#define TCM_ERROR_SIZE 10 +#define TCM_RET_CODE_IDX 6 +#define TCM_GET_CAP_RET_SIZE_IDX 10 +#define TCM_GET_CAP_RET_UINT32_1_IDX 14 +#define TCM_GET_CAP_RET_UINT32_2_IDX 18 +#define TCM_GET_CAP_RET_UINT32_3_IDX 22 +#define TCM_GET_CAP_RET_UINT32_4_IDX 26 +#define TCM_GET_CAP_PERM_DISABLE_IDX 16 +#define TCM_GET_CAP_PERM_INACTIVE_IDX 18 +#define TCM_GET_CAP_RET_BOOL_1_IDX 14 +#define TCM_GET_CAP_TEMP_INACTIVE_IDX 16 + +#define TCM_CAP_IDX 13 +#define TCM_CAP_SUBCAP_IDX 21 + +enum tcm_capabilities { + TCM_CAP_FLAG = 4, + TCM_CAP_PROP = 5, +}; + +enum tcm_sub_capabilities { + TCM_CAP_PROP_PCR = 0x1, /* tcm 0x101 */ + TCM_CAP_PROP_MANUFACTURER = 0x3, /* tcm 0x103 */ + TCM_CAP_FLAG_PERM = 0x8, /* tcm 0x108 */ + TCM_CAP_FLAG_VOL = 0x9, /* tcm 0x109 */ + TCM_CAP_PROP_OWNER = 0x11, /* tcm 0x101 */ + TCM_CAP_PROP_TIS_TIMEOUT = 0x15, /* tcm 0x115 */ + TCM_CAP_PROP_TIS_DURATION = 0x20, /* tcm 0x120 */ +}; + +/* + * This is a semi generic GetCapability command for use + * with the capability type TCM_CAP_PROP or TCM_CAP_FLAG + * and their associated sub_capabilities. + */ + +static const u8 tcm_cap[] = { + 0, 193, /* TCM_TAG_RQU_COMMAND 0xc1*/ + 0, 0, 0, 22, /* length */ + 0, 0, 128, 101, /* TCM_ORD_GetCapability */ + 0, 0, 0, 0, /* TCM_CAP_ */ + 0, 0, 0, 4, /* TCM_CAP_SUB_ size */ + 0, 0, 1, 0 /* TCM_CAP_SUB_ */ +}; + +static ssize_t transmit_cmd(struct tcm_chip *chip, u8 *data, int len, + char *desc) +{ + int err = 0; + + len = tcm_transmit(chip, data, len); + if (len < 0) + return len; + if (len == TCM_ERROR_SIZE) { + err = be32_to_cpu(*((__be32 *)(data + TCM_RET_CODE_IDX))); + dev_dbg(chip->dev, "A TCM error (%d) occurred %s\n", err, desc); + return err; + } + return 0; +} + +/* + * Get default timeouts value form tcm by GetCapability with TCM_CAP_PROP_TIS_TIMEOUT prop + */ +void tcm_get_timeouts(struct tcm_chip *chip) +{ + u8 data[max_t(int, ARRAY_SIZE(tcm_cap), 30)]; + ssize_t rc = 0; + u32 timeout = 0; + + memcpy(data, tcm_cap, sizeof(tcm_cap)); + data[TCM_CAP_IDX] = TCM_CAP_PROP; + data[TCM_CAP_SUBCAP_IDX] = TCM_CAP_PROP_TIS_TIMEOUT; + + rc = transmit_cmd(chip, data, sizeof(data), + "attempting to determine the timeouts"); + if (rc) + goto duration; + + if (be32_to_cpu(*((__be32 *)(data + TCM_GET_CAP_RET_SIZE_IDX))) != + 4 * sizeof(u32)) + goto duration; + + /* Don't overwrite default if value is 0 */ + timeout = be32_to_cpu(*((__be32 *)(data + TCM_GET_CAP_RET_UINT32_1_IDX))); + if (timeout) + chip->vendor.timeout_a = msecs_to_jiffies(timeout); + timeout = be32_to_cpu(*((__be32 *)(data + TCM_GET_CAP_RET_UINT32_2_IDX))); + if (timeout) + chip->vendor.timeout_b = msecs_to_jiffies(timeout); + timeout = be32_to_cpu(*((__be32 *)(data + TCM_GET_CAP_RET_UINT32_3_IDX))); + if (timeout) + chip->vendor.timeout_c = msecs_to_jiffies(timeout); + timeout = be32_to_cpu(*((__be32 *)(data + TCM_GET_CAP_RET_UINT32_4_IDX))); + if (timeout) + chip->vendor.timeout_d = msecs_to_jiffies(timeout); + +duration: + memcpy(data, tcm_cap, sizeof(tcm_cap)); + data[TCM_CAP_IDX] = TCM_CAP_PROP; + data[TCM_CAP_SUBCAP_IDX] = TCM_CAP_PROP_TIS_DURATION; + + rc = transmit_cmd(chip, data, sizeof(data), + "attempting to determine the durations"); + if (rc) + return; + + if (be32_to_cpu(*((__be32 *)(data + TCM_GET_CAP_RET_SIZE_IDX))) != + 3 * sizeof(u32)) + return; + + chip->vendor.duration[TCM_SHORT] = + msecs_to_jiffies(be32_to_cpu(*((__be32 *)(data + + TCM_GET_CAP_RET_UINT32_1_IDX)))); + chip->vendor.duration[TCM_MEDIUM] = + msecs_to_jiffies(be32_to_cpu(*((__be32 *)(data + + TCM_GET_CAP_RET_UINT32_2_IDX)))); + chip->vendor.duration[TCM_LONG] = + msecs_to_jiffies(be32_to_cpu(*((__be32 *)(data + + TCM_GET_CAP_RET_UINT32_3_IDX)))); +} +EXPORT_SYMBOL_GPL(tcm_get_timeouts); + +ssize_t tcm_show_enabled(struct device *dev, struct device_attribute *attr, + char *buf) +{ + u8 data[max_t(int, ARRAY_SIZE(tcm_cap), 35)]; + ssize_t rc = 0; + struct tcm_chip *chip = dev_get_drvdata(dev); + + if (chip == NULL) + return -ENODEV; + + memcpy(data, tcm_cap, sizeof(tcm_cap)); + data[TCM_CAP_IDX] = TCM_CAP_FLAG; + data[TCM_CAP_SUBCAP_IDX] = TCM_CAP_FLAG_PERM; + + rc = transmit_cmd(chip, data, sizeof(data), + "attemtping to determine the permanent state"); + if (rc) + return 0; + if (data[TCM_GET_CAP_PERM_DISABLE_IDX]) + return sprintf(buf, "disable\n"); + else + return sprintf(buf, "enable\n"); +} +EXPORT_SYMBOL_GPL(tcm_show_enabled); + +ssize_t tcm_show_active(struct device *dev, struct device_attribute *attr, + char *buf) +{ + u8 data[max_t(int, ARRAY_SIZE(tcm_cap), 35)]; + ssize_t rc = 0; + struct tcm_chip *chip = dev_get_drvdata(dev); + + if (chip == NULL) + return -ENODEV; + + memcpy(data, tcm_cap, sizeof(tcm_cap)); + data[TCM_CAP_IDX] = TCM_CAP_FLAG; + data[TCM_CAP_SUBCAP_IDX] = TCM_CAP_FLAG_PERM; + + rc = transmit_cmd(chip, data, sizeof(data), + "attemtping to determine the permanent state"); + if (rc) + return 0; + if (data[TCM_GET_CAP_PERM_INACTIVE_IDX]) + return sprintf(buf, "deactivated\n"); + else + return sprintf(buf, "activated\n"); +} +EXPORT_SYMBOL_GPL(tcm_show_active); + +ssize_t tcm_show_owned(struct device *dev, struct device_attribute *attr, + char *buf) +{ + u8 data[sizeof(tcm_cap)]; + ssize_t rc = 0; + struct tcm_chip *chip = dev_get_drvdata(dev); + + if (chip == NULL) + return -ENODEV; + + memcpy(data, tcm_cap, sizeof(tcm_cap)); + data[TCM_CAP_IDX] = TCM_CAP_PROP; + data[TCM_CAP_SUBCAP_IDX] = TCM_CAP_PROP_OWNER; + + rc = transmit_cmd(chip, data, sizeof(data), + "attempting to determine the owner state"); + if (rc) + return 0; + if (data[TCM_GET_CAP_RET_BOOL_1_IDX]) + return sprintf(buf, "Owner installed\n"); + else + return sprintf(buf, "Owner have not installed\n"); +} +EXPORT_SYMBOL_GPL(tcm_show_owned); + +ssize_t tcm_show_temp_deactivated(struct device *dev, + struct device_attribute *attr, char *buf) +{ + u8 data[sizeof(tcm_cap)]; + ssize_t rc = 0; + struct tcm_chip *chip = dev_get_drvdata(dev); + + if (chip == NULL) + return -ENODEV; + + memcpy(data, tcm_cap, sizeof(tcm_cap)); + data[TCM_CAP_IDX] = TCM_CAP_FLAG; + data[TCM_CAP_SUBCAP_IDX] = TCM_CAP_FLAG_VOL; + + rc = transmit_cmd(chip, data, sizeof(data), + "attempting to determine the temporary state"); + if (rc) + return 0; + if (data[TCM_GET_CAP_TEMP_INACTIVE_IDX]) + return sprintf(buf, "Temp deactivated\n"); + else + return sprintf(buf, "activated\n"); +} +EXPORT_SYMBOL_GPL(tcm_show_temp_deactivated); + +static const u8 pcrread[] = { + 0, 193, /* TCM_TAG_RQU_COMMAND */ + 0, 0, 0, 14, /* length */ + 0, 0, 128, 21, /* TCM_ORD_PcrRead */ + 0, 0, 0, 0 /* PCR index */ +}; + +ssize_t tcm_show_pcrs(struct device *dev, struct device_attribute *attr, + char *buf) +{ + u8 data[1024]; + ssize_t rc = 0; + int i = 0, j = 0, num_pcrs = 0; + __be32 index = 0; + char *str = buf; + struct tcm_chip *chip = dev_get_drvdata(dev); + + if (chip == NULL) + return -ENODEV; + + memcpy(data, tcm_cap, sizeof(tcm_cap)); + data[TCM_CAP_IDX] = TCM_CAP_PROP; + data[TCM_CAP_SUBCAP_IDX] = TCM_CAP_PROP_PCR; + + rc = transmit_cmd(chip, data, sizeof(data), + "attempting to determine the number of PCRS"); + if (rc) + return 0; + + num_pcrs = be32_to_cpu(*((__be32 *)(data + 14))); + for (i = 0; i < num_pcrs; i++) { + memcpy(data, pcrread, sizeof(pcrread)); + index = cpu_to_be32(i); + memcpy(data + 10, &index, 4); + rc = transmit_cmd(chip, data, sizeof(data), + "attempting to read a PCR"); + if (rc) + goto out; + str += sprintf(str, "PCR-%02d: ", i); + for (j = 0; j < TCM_DIGEST_SIZE; j++) + str += sprintf(str, "%02X ", *(data + 10 + j)); + str += sprintf(str, "\n"); + memset(data, 0, 1024); + } +out: + return str - buf; +} +EXPORT_SYMBOL_GPL(tcm_show_pcrs); + +#define READ_PUBEK_RESULT_SIZE 128 +static const u8 readpubek[] = { + 0, 193, /* TCM_TAG_RQU_COMMAND */ + 0, 0, 0, 42, /* length */ + 0, 0, 128, 124, /* TCM_ORD_ReadPubek */ + 0, 0, 0, 0, 0, 0, 0, 0, /* NONCE */ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0 +}; + +ssize_t tcm_show_pubek(struct device *dev, struct device_attribute *attr, + char *buf) +{ + u8 data[READ_PUBEK_RESULT_SIZE] = {0}; + ssize_t err = 0; + int i = 0, rc = 0; + char *str = buf; + struct tcm_chip *chip = dev_get_drvdata(dev); + + if (chip == NULL) + return -ENODEV; + + memcpy(data, readpubek, sizeof(readpubek)); + + err = transmit_cmd(chip, data, sizeof(data), + "attempting to read the PUBEK"); + if (err) + goto out; + + str += sprintf(str, "PUBEK:"); + for (i = 0 ; i < 65 ; i++) { + if ((i) % 16 == 0) + str += sprintf(str, "\n"); + str += sprintf(str, "%02X ", data[i+10]); + } + + str += sprintf(str, "\n"); +out: + rc = str - buf; + return rc; +} +EXPORT_SYMBOL_GPL(tcm_show_pubek); + +#define CAP_VERSION_1_1 6 +#define CAP_VERSION_1_2 0x1A +#define CAP_VERSION_IDX 13 +static const u8 cap_version[] = { + 0, 193, /* TCM_TAG_RQU_COMMAND */ + 0, 0, 0, 18, /* length */ + 0, 0, 128, 101, /* TCM_ORD_GetCapability */ + 0, 0, 0, 0, + 0, 0, 0, 0 +}; + +ssize_t tcm_show_caps(struct device *dev, struct device_attribute *attr, + char *buf) +{ + u8 data[max_t(int, max(ARRAY_SIZE(tcm_cap), ARRAY_SIZE(cap_version)), 30)]; + ssize_t rc = 0; + char *str = buf; + struct tcm_chip *chip = dev_get_drvdata(dev); + + if (chip == NULL) + return -ENODEV; + + memcpy(data, tcm_cap, sizeof(tcm_cap)); + data[TCM_CAP_IDX] = TCM_CAP_PROP; + data[TCM_CAP_SUBCAP_IDX] = TCM_CAP_PROP_MANUFACTURER; + + rc = transmit_cmd(chip, data, sizeof(data), + "attempting to determine the manufacturer"); + if (rc) + return 0; + + str += sprintf(str, "Manufacturer: 0x%x\n", + be32_to_cpu(*((__be32 *)(data + TCM_GET_CAP_RET_UINT32_1_IDX)))); + + memcpy(data, cap_version, sizeof(cap_version)); + data[CAP_VERSION_IDX] = CAP_VERSION_1_1; + rc = transmit_cmd(chip, data, sizeof(data), + "attempting to determine the 1.1 version"); + if (rc) + goto out; + + str += sprintf(str, "Firmware version: %02X.%02X.%02X.%02X\n", + (int)data[14], (int)data[15], (int)data[16], + (int)data[17]); + +out: + return str - buf; +} +EXPORT_SYMBOL_GPL(tcm_show_caps); + +ssize_t tcm_store_cancel(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct tcm_chip *chip = dev_get_drvdata(dev); + + if (chip == NULL) + return 0; + + chip->vendor.cancel(chip); + return count; +} +EXPORT_SYMBOL_GPL(tcm_store_cancel); + +/* + * Device file system interface to the TCM + * when App call file open in usr space ,this func will respone + */ +int tcm_open(struct inode *inode, struct file *file) +{ + int rc = 0, minor = iminor(inode); + struct tcm_chip *chip = NULL, *pos = NULL; + + spin_lock(&driver_lock); + + list_for_each_entry(pos, &tcm_chip_list, list) { + if (pos->vendor.miscdev.minor == minor) { + chip = pos; + break; + } + } + + if (chip == NULL) { + rc = -ENODEV; + goto err_out; + } + + if (chip->num_opens) { + dev_dbg(chip->dev, "Another process owns this TCM\n"); + rc = -EBUSY; + goto err_out; + } + + chip->num_opens++; + get_device(chip->dev); + + spin_unlock(&driver_lock); + + chip->data_buffer = kzalloc(TCM_BUFSIZE, GFP_KERNEL); + if (chip->data_buffer == NULL) { + chip->num_opens--; + put_device(chip->dev); + return -ENOMEM; + } + + atomic_set(&chip->data_pending, 0); + + file->private_data = chip; + return 0; + +err_out: + spin_unlock(&driver_lock); + return rc; +} +EXPORT_SYMBOL_GPL(tcm_open); + +int tcm_release(struct inode *inode, struct file *file) +{ + struct tcm_chip *chip = file->private_data; + + spin_lock(&driver_lock); + file->private_data = NULL; + chip->num_opens--; + del_singleshot_timer_sync(&chip->user_read_timer); + flush_work(&chip->work); + atomic_set(&chip->data_pending, 0); + put_device(chip->dev); + kfree(chip->data_buffer); + spin_unlock(&driver_lock); + return 0; +} +EXPORT_SYMBOL_GPL(tcm_release); + +ssize_t tcm_write(struct file *file, const char __user *buf, + size_t size, loff_t *off) +{ + struct tcm_chip *chip = file->private_data; + int in_size = size, out_size; + + /* + * cannot perform a write until the read has cleared + * either via tcm_read or a user_read_timer timeout + */ + while (atomic_read(&chip->data_pending) != 0) + msleep(TCM_TIMEOUT); + + mutex_lock(&chip->buffer_mutex); + + if (in_size > TCM_BUFSIZE) + in_size = TCM_BUFSIZE; + + if (copy_from_user(chip->data_buffer, (void __user *)buf, in_size)) { + mutex_unlock(&chip->buffer_mutex); + return -EFAULT; + } + + /* atomic tcm command send and result receive */ + out_size = tcm_transmit(chip, chip->data_buffer, TCM_BUFSIZE); + + if (out_size >= 0) { + atomic_set(&chip->data_pending, out_size); + mutex_unlock(&chip->buffer_mutex); + + /* Set a timeout by which the reader must come claim the result */ + mod_timer(&chip->user_read_timer, jiffies + (60 * HZ)); + } else + mutex_unlock(&chip->buffer_mutex); + + return in_size; +} +EXPORT_SYMBOL_GPL(tcm_write); + +ssize_t tcm_read(struct file *file, char __user *buf, + size_t size, loff_t *off) +{ + struct tcm_chip *chip = file->private_data; + int ret_size = 0; + int rc; + + del_singleshot_timer_sync(&chip->user_read_timer); + flush_work(&chip->work); + ret_size = atomic_read(&chip->data_pending); + atomic_set(&chip->data_pending, 0); + if (ret_size > 0) { /* relay data */ + if (size < ret_size) + ret_size = size; + + mutex_lock(&chip->buffer_mutex); + rc = copy_to_user(buf, chip->data_buffer, ret_size); + memset(chip->data_buffer, 0, ret_size); + if (rc) + ret_size = -EFAULT; + mutex_unlock(&chip->buffer_mutex); + } + + return ret_size; +} +EXPORT_SYMBOL_GPL(tcm_read); + +void tcm_remove_hardware(struct device *dev) +{ + struct tcm_chip *chip = dev_get_drvdata(dev); + + if (chip == NULL) { + dev_err(dev, "No device data found\n"); + return; + } + + spin_lock(&driver_lock); + list_del(&chip->list); + spin_unlock(&driver_lock); + + dev_set_drvdata(dev, NULL); + misc_deregister(&chip->vendor.miscdev); + kfree(chip->vendor.miscdev.name); + + sysfs_remove_group(&dev->kobj, chip->vendor.attr_group); + /* tcm_bios_log_teardown(chip->bios_dir); */ + + clear_bit(chip->dev_num, dev_mask); + kfree(chip); + put_device(dev); +} +EXPORT_SYMBOL_GPL(tcm_remove_hardware); + +static u8 savestate[] = { + 0, 193, /* TCM_TAG_RQU_COMMAND */ + 0, 0, 0, 10, /* blob length (in bytes) */ + 0, 0, 128, 152 /* TCM_ORD_SaveState */ +}; + +/* + * We are about to suspend. Save the TCM state + * so that it can be restored. + */ +int tcm_pm_suspend(struct device *dev, pm_message_t pm_state) +{ + struct tcm_chip *chip = dev_get_drvdata(dev); + + if (chip == NULL) + return -ENODEV; + + tcm_transmit(chip, savestate, sizeof(savestate)); + return 0; +} +EXPORT_SYMBOL_GPL(tcm_pm_suspend); + +int tcm_pm_suspend_p(struct device *dev) +{ + struct tcm_chip *chip = dev_get_drvdata(dev); + + if (chip == NULL) + return -ENODEV; + + tcm_transmit(chip, savestate, sizeof(savestate)); + return 0; +} +EXPORT_SYMBOL_GPL(tcm_pm_suspend_p); + +void tcm_startup(struct tcm_chip *chip) +{ + u8 start_up[] = { + 0, 193, /* TCM_TAG_RQU_COMMAND */ + 0, 0, 0, 12, /* blob length (in bytes) */ + 0, 0, 128, 153, /* TCM_ORD_SaveState */ + 0, 1 + }; + if (chip == NULL) + return; + tcm_transmit(chip, start_up, sizeof(start_up)); +} +EXPORT_SYMBOL_GPL(tcm_startup); + +/* + * Resume from a power safe. The BIOS already restored + * the TCM state. + */ +int tcm_pm_resume(struct device *dev) +{ + u8 start_up[] = { + 0, 193, /* TCM_TAG_RQU_COMMAND */ + 0, 0, 0, 12, /* blob length (in bytes) */ + 0, 0, 128, 153, /* TCM_ORD_SaveState */ + 0, 1 + }; + struct tcm_chip *chip = dev_get_drvdata(dev); + /* dev_info(chip->dev ,"--call tcm_pm_resume\n"); */ + if (chip == NULL) + return -ENODEV; + + tcm_transmit(chip, start_up, sizeof(start_up)); + return 0; +} +EXPORT_SYMBOL_GPL(tcm_pm_resume); + +/* + * Called from tcm_.c probe function only for devices + * the driver has determined it should claim. Prior to calling + * this function the specific probe function has called pci_enable_device + * upon errant exit from this function specific probe function should call + * pci_disable_device + */ +struct tcm_chip *tcm_register_hardware(struct device *dev, + const struct tcm_vendor_specific *entry) +{ + int rc; +#define DEVNAME_SIZE 7 + + char *devname = NULL; + struct tcm_chip *chip = NULL; + + /* Driver specific per-device data */ + chip = kzalloc(sizeof(*chip), GFP_KERNEL); + if (chip == NULL) { + dev_err(dev, "chip kzalloc err\n"); + return NULL; + } + + mutex_init(&chip->buffer_mutex); + mutex_init(&chip->tcm_mutex); + INIT_LIST_HEAD(&chip->list); + + INIT_WORK(&chip->work, timeout_work); + timer_setup(&chip->user_read_timer, user_reader_timeout, 0); + + memcpy(&chip->vendor, entry, sizeof(struct tcm_vendor_specific)); + + chip->dev_num = find_first_zero_bit(dev_mask, TCM_NUM_DEVICES); + + if (chip->dev_num >= TCM_NUM_DEVICES) { + dev_err(dev, "No available tcm device numbers\n"); + kfree(chip); + return NULL; + } else if (chip->dev_num == 0) + chip->vendor.miscdev.minor = TCM_MINOR; + else + chip->vendor.miscdev.minor = MISC_DYNAMIC_MINOR; + + set_bit(chip->dev_num, dev_mask); + + devname = kmalloc(DEVNAME_SIZE, GFP_KERNEL); + scnprintf(devname, DEVNAME_SIZE, "%s%d", "tcm", chip->dev_num); + chip->vendor.miscdev.name = devname; + + /* chip->vendor.miscdev.dev = dev; */ + + chip->dev = get_device(dev); + + if (misc_register(&chip->vendor.miscdev)) { + dev_err(chip->dev, + "unable to misc_register %s, minor %d\n", + chip->vendor.miscdev.name, + chip->vendor.miscdev.minor); + put_device(dev); + clear_bit(chip->dev_num, dev_mask); + kfree(chip); + kfree(devname); + return NULL; + } + + spin_lock(&driver_lock); + dev_set_drvdata(dev, chip); + list_add(&chip->list, &tcm_chip_list); + spin_unlock(&driver_lock); + + rc = sysfs_create_group(&dev->kobj, chip->vendor.attr_group); + /* chip->bios_dir = tcm_bios_log_setup(devname); */ + + return chip; +} +EXPORT_SYMBOL_GPL(tcm_register_hardware); + +static int __init tcm_init_module(void) +{ + return 0; +} + +static void __exit tcm_exit_module(void) +{ +} + +module_init(tcm_init_module); +module_exit(tcm_exit_module); + +MODULE_AUTHOR("Nationz Technologies Inc."); +MODULE_DESCRIPTION("TCM Driver"); +MODULE_VERSION("1.1.1.0"); +MODULE_LICENSE("GPL"); diff --git a/drivers/staging/gmjstcm/tcm.h b/drivers/staging/gmjstcm/tcm.h new file mode 100644 index 0000000000000000000000000000000000000000..40cd0a879c3aa81b5e653ab90685eb9af36c9d5a --- /dev/null +++ b/drivers/staging/gmjstcm/tcm.h @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2009 Nationz Technologies Inc. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include + +struct device; +struct tcm_chip; + +enum tcm_timeout { + TCM_TIMEOUT = 5, +}; + +/* TCM addresses */ +enum tcm_addr { + TCM_SUPERIO_ADDR = 0x2E, + TCM_ADDR = 0x4E, +}; + +extern ssize_t tcm_show_pubek(struct device *, struct device_attribute *attr, + char *); +extern ssize_t tcm_show_pcrs(struct device *, struct device_attribute *attr, + char *); +extern ssize_t tcm_show_caps(struct device *, struct device_attribute *attr, + char *); +extern ssize_t tcm_store_cancel(struct device *, struct device_attribute *attr, + const char *, size_t); +extern ssize_t tcm_show_enabled(struct device *, struct device_attribute *attr, + char *); +extern ssize_t tcm_show_active(struct device *, struct device_attribute *attr, + char *); +extern ssize_t tcm_show_owned(struct device *, struct device_attribute *attr, + char *); +extern ssize_t tcm_show_temp_deactivated(struct device *, + struct device_attribute *attr, char *); + +struct tcm_vendor_specific { + const u8 req_complete_mask; + const u8 req_complete_val; + const u8 req_canceled; + void __iomem *iobase; /* ioremapped address */ + void __iomem *iolbc; + unsigned long base; /* TCM base address */ + + int irq; + + int region_size; + int have_region; + + int (*recv)(struct tcm_chip *, u8 *, size_t); + int (*send)(struct tcm_chip *, u8 *, size_t); + void (*cancel)(struct tcm_chip *); + u8 (*status)(struct tcm_chip *); + struct miscdevice miscdev; + struct attribute_group *attr_group; + struct list_head list; + int locality; + unsigned long timeout_a, timeout_b, timeout_c, timeout_d; /* jiffies */ + unsigned long duration[3]; /* jiffies */ + + wait_queue_head_t read_queue; + wait_queue_head_t int_queue; +}; + +struct tcm_chip { + struct device *dev; /* Device stuff */ + + int dev_num; /* /dev/tcm# */ + int num_opens; /* only one allowed */ + int time_expired; + + /* Data passed to and from the tcm via the read/write calls */ + u8 *data_buffer; + atomic_t data_pending; + struct mutex buffer_mutex; + + struct timer_list user_read_timer; /* user needs to claim result */ + struct work_struct work; + struct mutex tcm_mutex; /* tcm is processing */ + + struct tcm_vendor_specific vendor; + + struct dentry **bios_dir; + + struct list_head list; +}; + +#define to_tcm_chip(n) container_of(n, struct tcm_chip, vendor) + +static inline int tcm_read_index(int base, int index) +{ + outb(index, base); + return inb(base+1) & 0xFF; +} + +static inline void tcm_write_index(int base, int index, int value) +{ + outb(index, base); + outb(value & 0xFF, base+1); +} +extern void tcm_startup(struct tcm_chip *); +extern void tcm_get_timeouts(struct tcm_chip *); +extern unsigned long tcm_calc_ordinal_duration(struct tcm_chip *, u32); +extern struct tcm_chip *tcm_register_hardware(struct device *, + const struct tcm_vendor_specific *); +extern int tcm_open(struct inode *, struct file *); +extern int tcm_release(struct inode *, struct file *); +extern ssize_t tcm_write(struct file *, const char __user *, size_t, + loff_t *); +extern ssize_t tcm_read(struct file *, char __user *, size_t, loff_t *); +extern void tcm_remove_hardware(struct device *); +extern int tcm_pm_suspend(struct device *, pm_message_t); +extern int tcm_pm_suspend_p(struct device *); +extern int tcm_pm_resume(struct device *); diff --git a/drivers/staging/gmjstcm/tcm_tis_spi.c b/drivers/staging/gmjstcm/tcm_tis_spi.c new file mode 100644 index 0000000000000000000000000000000000000000..d30430e364460578cfd4560cc2593babbec15662 --- /dev/null +++ b/drivers/staging/gmjstcm/tcm_tis_spi.c @@ -0,0 +1,847 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 Kylin Tech. Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tcm.h" + +static int is_ft_all(void) { + return 0; +} + +#define TCM_HEADER_SIZE 10 + +static bool tcm_debug; +module_param_named(debug, tcm_debug, bool, 0600); +MODULE_PARM_DESC(debug, "Turn TCM debugging mode on and off"); + +#define tcm_dbg(fmt, args...) \ +{ \ + if (tcm_debug) \ + pr_err(fmt, ## args); \ +} + +enum tis_access { + TCM_ACCESS_VALID = 0x80, + TCM_ACCESS_ACTIVE_LOCALITY = 0x20, + TCM_ACCESS_REQUEST_PENDING = 0x04, + TCM_ACCESS_REQUEST_USE = 0x02, +}; + +enum tis_status { + TCM_STS_VALID = 0x80, + TCM_STS_COMMAND_READY = 0x40, + TCM_STS_GO = 0x20, + TCM_STS_DATA_AVAIL = 0x10, + TCM_STS_DATA_EXPECT = 0x08, +}; + +enum tis_int_flags { + TCM_GLOBAL_INT_ENABLE = 0x80000000, + TCM_INTF_BURST_COUNT_STATIC = 0x100, + TCM_INTF_CMD_READY_INT = 0x080, + TCM_INTF_INT_EDGE_FALLING = 0x040, + TCM_INTF_INT_EDGE_RISING = 0x020, + TCM_INTF_INT_LEVEL_LOW = 0x010, + TCM_INTF_INT_LEVEL_HIGH = 0x008, + TCM_INTF_LOCALITY_CHANGE_INT = 0x004, + TCM_INTF_STS_VALID_INT = 0x002, + TCM_INTF_DATA_AVAIL_INT = 0x001, +}; + +enum tis_defaults { + TIS_SHORT_TIMEOUT = 750, /* ms */ + TIS_LONG_TIMEOUT = 2000, /* 2 sec */ +}; + +#define TCM_ACCESS(l) (0x0000 | ((l) << 12)) +#define TCM_INT_ENABLE(l) (0x0008 | ((l) << 12)) /* interperet */ +#define TCM_INT_VECTOR(l) (0x000C | ((l) << 12)) +#define TCM_INT_STATUS(l) (0x0010 | ((l) << 12)) +#define TCM_INTF_CAPS(l) (0x0014 | ((l) << 12)) +#define TCM_STS(l) (0x0018 | ((l) << 12)) +#define TCM_DATA_FIFO(l) (0x0024 | ((l) << 12)) + +#define TCM_DID_VID(l) (0x0F00 | ((l) << 12)) +#define TCM_RID(l) (0x0F04 | ((l) << 12)) + +#define TIS_MEM_BASE_huawei 0x3fed40000LL + +#define MAX_SPI_FRAMESIZE 64 + +// +#define _CPU_FT2000A4 +#define REUSE_CONF_REG_BASE 0x28180208 +#define REUSE_GPIO1_A5_BASE 0x28005000 + +static void *__iomem reuse_conf_reg; +static void *__iomem gpio1_a5; + +// +static LIST_HEAD(tis_chips); +static DEFINE_SPINLOCK(tis_lock); + +struct chip_data { + u8 cs; + u8 tmode; + u8 type; + u8 poll_mode; + u16 clk_div; + u32 speed_hz; + void (*cs_control)(u32 command); +}; + +struct tcm_tis_spi_phy { + struct spi_device *spi_device; + struct completion ready; + u8 *iobuf; +}; + +int tcm_tis_spi_transfer(struct device *dev, u32 addr, u16 len, + u8 *in, const u8 *out) +{ + struct tcm_tis_spi_phy *phy = dev_get_drvdata(dev); + int ret = 0; + struct spi_message m; + struct spi_transfer spi_xfer; + u8 transfer_len; + + tcm_dbg("TCM-dbg: %s, addr: 0x%x, len: %x, %s\n", + __func__, addr, len, (in) ? "in" : "out"); + + spi_bus_lock(phy->spi_device->master); + + /* set gpio1_a5 to LOW */ + if (is_ft_all() && (phy->spi_device->chip_select == 0)) { + iowrite32(0x0, gpio1_a5); + } + + while (len) { + transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE); + + phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1); + phy->iobuf[1] = 0xd4; + phy->iobuf[2] = addr >> 8; + phy->iobuf[3] = addr; + + memset(&spi_xfer, 0, sizeof(spi_xfer)); + spi_xfer.tx_buf = phy->iobuf; + spi_xfer.rx_buf = phy->iobuf; + spi_xfer.len = 4; + spi_xfer.cs_change = 1; + + spi_message_init(&m); + spi_message_add_tail(&spi_xfer, &m); + ret = spi_sync_locked(phy->spi_device, &m); + if (ret < 0) + goto exit; + + spi_xfer.cs_change = 0; + spi_xfer.len = transfer_len; + spi_xfer.delay_usecs = 5; + + if (in) { + spi_xfer.tx_buf = NULL; + } else if (out) { + spi_xfer.rx_buf = NULL; + memcpy(phy->iobuf, out, transfer_len); + out += transfer_len; + } + + spi_message_init(&m); + spi_message_add_tail(&spi_xfer, &m); + reinit_completion(&phy->ready); + ret = spi_sync_locked(phy->spi_device, &m); + if (ret < 0) + goto exit; + + if (in) { + memcpy(in, phy->iobuf, transfer_len); + in += transfer_len; + } + + len -= transfer_len; + } + +exit: + /* set gpio1_a5 to HIGH */ + if (is_ft_all() && (phy->spi_device->chip_select == 0)) { + iowrite32(0x20, gpio1_a5); + } + + spi_bus_unlock(phy->spi_device->master); + tcm_dbg("TCM-dbg: ret: %d\n", ret); + return ret; +} + +static int tcm_tis_read8(struct device *dev, + u32 addr, u16 len, u8 *result) +{ + return tcm_tis_spi_transfer(dev, addr, len, result, NULL); +} + +static int tcm_tis_write8(struct device *dev, + u32 addr, u16 len, u8 *value) +{ + return tcm_tis_spi_transfer(dev, addr, len, NULL, value); +} + +static int tcm_tis_readb(struct device *dev, u32 addr, u8 *value) +{ + return tcm_tis_read8(dev, addr, sizeof(u8), value); +} + +static int tcm_tis_writeb(struct device *dev, u32 addr, u8 value) +{ + return tcm_tis_write8(dev, addr, sizeof(u8), &value); +} + +static int tcm_tis_readl(struct device *dev, u32 addr, u32 *result) +{ + int rc; + __le32 result_le; + + rc = tcm_tis_read8(dev, addr, sizeof(u32), (u8 *)&result_le); + tcm_dbg("TCM-dbg: result_le: 0x%x\n", result_le); + if (!rc) + *result = le32_to_cpu(result_le); + + return rc; +} + +static int tcm_tis_writel(struct device *dev, u32 addr, u32 value) +{ + int rc; + __le32 value_le; + + value_le = cpu_to_le32(value); + rc = tcm_tis_write8(dev, addr, sizeof(u32), (u8 *)&value_le); + + return rc; +} + +static int request_locality(struct tcm_chip *chip, int l); +static void release_locality(struct tcm_chip *chip, int l, int force); +static void cleanup_tis(void) +{ + int ret; + u32 inten; + struct tcm_vendor_specific *i, *j; + struct tcm_chip *chip; + + spin_lock(&tis_lock); + list_for_each_entry_safe(i, j, &tis_chips, list) { + chip = to_tcm_chip(i); + ret = tcm_tis_readl(chip->dev, + TCM_INT_ENABLE(chip->vendor.locality), &inten); + if (ret < 0) + return; + + tcm_tis_writel(chip->dev, TCM_INT_ENABLE(chip->vendor.locality), + ~TCM_GLOBAL_INT_ENABLE & inten); + release_locality(chip, chip->vendor.locality, 1); + } + spin_unlock(&tis_lock); +} + +static void tcm_tis_init(struct tcm_chip *chip) +{ + int ret; + u8 rid; + u32 vendor, intfcaps; + + ret = tcm_tis_readl(chip->dev, TCM_DID_VID(0), &vendor); + + if ((vendor & 0xffff) != 0x19f5 && (vendor & 0xffff) != 0x1B4E) + pr_info("there is no Nationz TCM on you computer\n"); + + ret = tcm_tis_readb(chip->dev, TCM_RID(0), &rid); + if (ret < 0) + return; + pr_info("kylin: 2019-09-21 1.2 TCM (device-id 0x%X, rev-id %d)\n", + vendor >> 16, rid); + + /* Figure out the capabilities */ + ret = tcm_tis_readl(chip->dev, + TCM_INTF_CAPS(chip->vendor.locality), &intfcaps); + if (ret < 0) + return; + + if (request_locality(chip, 0) != 0) + pr_err("tcm request_locality err\n"); + + atomic_set(&chip->data_pending, 0); +} + +static void tcm_handle_err(struct tcm_chip *chip) +{ + cleanup_tis(); + tcm_tis_init(chip); +} + +static bool check_locality(struct tcm_chip *chip, int l) +{ + int ret; + u8 access; + + ret = tcm_tis_readb(chip->dev, TCM_ACCESS(l), &access); + tcm_dbg("TCM-dbg: access: 0x%x\n", access); + if (ret < 0) + return false; + + if ((access & (TCM_ACCESS_ACTIVE_LOCALITY | TCM_ACCESS_VALID)) == + (TCM_ACCESS_ACTIVE_LOCALITY | TCM_ACCESS_VALID)) { + chip->vendor.locality = l; + return true; + } + + return false; +} + +static int request_locality(struct tcm_chip *chip, int l) +{ + unsigned long stop; + + if (check_locality(chip, l)) + return l; + + tcm_tis_writeb(chip->dev, TCM_ACCESS(l), TCM_ACCESS_REQUEST_USE); + + /* wait for burstcount */ + stop = jiffies + chip->vendor.timeout_a; + do { + if (check_locality(chip, l)) + return l; + msleep(TCM_TIMEOUT); + } while (time_before(jiffies, stop)); + + return -1; +} + +static void release_locality(struct tcm_chip *chip, int l, int force) +{ + int ret; + u8 access; + + ret = tcm_tis_readb(chip->dev, TCM_ACCESS(l), &access); + if (ret < 0) + return; + if (force || (access & (TCM_ACCESS_REQUEST_PENDING | TCM_ACCESS_VALID)) == + (TCM_ACCESS_REQUEST_PENDING | TCM_ACCESS_VALID)) + tcm_tis_writeb(chip->dev, + TCM_ACCESS(l), TCM_ACCESS_ACTIVE_LOCALITY); +} + +static u8 tcm_tis_status(struct tcm_chip *chip) +{ + int ret; + u8 status; + + ret = tcm_tis_readb(chip->dev, + TCM_STS(chip->vendor.locality), &status); + tcm_dbg("TCM-dbg: status: 0x%x\n", status); + if (ret < 0) + return 0; + + return status; +} + +static void tcm_tis_ready(struct tcm_chip *chip) +{ + /* this causes the current command to be aboreted */ + tcm_tis_writeb(chip->dev, TCM_STS(chip->vendor.locality), + TCM_STS_COMMAND_READY); +} + +static int get_burstcount(struct tcm_chip *chip) +{ + int ret; + unsigned long stop; + u8 tmp, tmp1; + int burstcnt = 0; + + /* wait for burstcount */ + /* which timeout value, spec has 2 answers (c & d) */ + stop = jiffies + chip->vendor.timeout_d; + do { + ret = tcm_tis_readb(chip->dev, + TCM_STS(chip->vendor.locality) + 1, + &tmp); + tcm_dbg("TCM-dbg: burstcnt: 0x%x\n", burstcnt); + if (ret < 0) + return -EINVAL; + ret = tcm_tis_readb(chip->dev, + (TCM_STS(chip->vendor.locality) + 2), + &tmp1); + tcm_dbg("TCM-dbg: burstcnt: 0x%x\n", burstcnt); + if (ret < 0) + return -EINVAL; + + burstcnt = tmp | (tmp1 << 8); + if (burstcnt) + return burstcnt; + msleep(TCM_TIMEOUT); + } while (time_before(jiffies, stop)); + + return -EBUSY; +} + +static int wait_for_stat(struct tcm_chip *chip, u8 mask, + unsigned long timeout, + wait_queue_head_t *queue) +{ + unsigned long stop; + u8 status; + + /* check current status */ + status = tcm_tis_status(chip); + if ((status & mask) == mask) + return 0; + + stop = jiffies + timeout; + do { + msleep(TCM_TIMEOUT); + status = tcm_tis_status(chip); + if ((status & mask) == mask) + return 0; + } while (time_before(jiffies, stop)); + + return -ETIME; +} + +static int recv_data(struct tcm_chip *chip, u8 *buf, size_t count) +{ + int ret; + int size = 0, burstcnt; + + while (size < count && wait_for_stat(chip, + TCM_STS_DATA_AVAIL | TCM_STS_VALID, + chip->vendor.timeout_c, + &chip->vendor.read_queue) == 0) { + burstcnt = get_burstcount(chip); + + if (burstcnt < 0) { + dev_err(chip->dev, "Unable to read burstcount\n"); + return burstcnt; + } + + for (; burstcnt > 0 && size < count; burstcnt--) { + ret = tcm_tis_readb(chip->dev, + TCM_DATA_FIFO(chip->vendor.locality), + &buf[size]); + tcm_dbg("TCM-dbg: buf[%d]: 0x%x\n", size, buf[size]); + size++; + } + } + + return size; +} + +static int tcm_tis_recv(struct tcm_chip *chip, u8 *buf, size_t count) +{ + int size = 0; + int expected, status; + unsigned long stop; + + if (count < TCM_HEADER_SIZE) { + dev_err(chip->dev, "read size is to small: %d\n", (u32)(count)); + size = -EIO; + goto out; + } + + /* read first 10 bytes, including tag, paramsize, and result */ + size = recv_data(chip, buf, TCM_HEADER_SIZE); + if (size < TCM_HEADER_SIZE) { + dev_err(chip->dev, "Unable to read header\n"); + goto out; + } + + expected = be32_to_cpu(*(__be32 *)(buf + 2)); + if (expected > count) { + dev_err(chip->dev, "Expected data count\n"); + size = -EIO; + goto out; + } + + size += recv_data(chip, &buf[TCM_HEADER_SIZE], + expected - TCM_HEADER_SIZE); + if (size < expected) { + dev_err(chip->dev, "Unable to read remainder of result\n"); + size = -ETIME; + goto out; + } + + wait_for_stat(chip, TCM_STS_VALID, chip->vendor.timeout_c, + &chip->vendor.int_queue); + + stop = jiffies + chip->vendor.timeout_c; + do { + msleep(TCM_TIMEOUT); + status = tcm_tis_status(chip); + if ((status & TCM_STS_DATA_AVAIL) == 0) + break; + + } while (time_before(jiffies, stop)); + + status = tcm_tis_status(chip); + if (status & TCM_STS_DATA_AVAIL) { /* retry? */ + dev_err(chip->dev, "Error left over data\n"); + size = -EIO; + goto out; + } + +out: + tcm_tis_ready(chip); + release_locality(chip, chip->vendor.locality, 0); + if (size < 0) + tcm_handle_err(chip); + return size; +} + +/* + * If interrupts are used (signaled by an irq set in the vendor structure) + * tcm.c can skip polling for the data to be available as the interrupt is + * waited for here + */ +static int tcm_tis_send(struct tcm_chip *chip, u8 *buf, size_t len) +{ + int rc, status, burstcnt; + size_t count = 0; + u32 ordinal; + unsigned long stop; + int send_again = 0; + +tcm_tis_send_again: + count = 0; + if (request_locality(chip, 0) < 0) { + dev_err(chip->dev, "send, tcm is busy\n"); + return -EBUSY; + } + status = tcm_tis_status(chip); + + if ((status & TCM_STS_COMMAND_READY) == 0) { + tcm_tis_ready(chip); + if (wait_for_stat(chip, TCM_STS_COMMAND_READY, + chip->vendor.timeout_b, &chip->vendor.int_queue) < 0) { + dev_err(chip->dev, "send, tcm wait time out1\n"); + rc = -ETIME; + goto out_err; + } + } + + while (count < len - 1) { + burstcnt = get_burstcount(chip); + if (burstcnt < 0) { + dev_err(chip->dev, "Unable to read burstcount\n"); + rc = burstcnt; + goto out_err; + } + for (; burstcnt > 0 && count < len - 1; burstcnt--) { + tcm_tis_writeb(chip->dev, + TCM_DATA_FIFO(chip->vendor.locality), buf[count]); + count++; + } + + wait_for_stat(chip, TCM_STS_VALID, chip->vendor.timeout_c, + &chip->vendor.int_queue); + } + + /* write last byte */ + tcm_tis_writeb(chip->dev, + TCM_DATA_FIFO(chip->vendor.locality), buf[count]); + + wait_for_stat(chip, TCM_STS_VALID, + chip->vendor.timeout_c, &chip->vendor.int_queue); + stop = jiffies + chip->vendor.timeout_c; + do { + msleep(TCM_TIMEOUT); + status = tcm_tis_status(chip); + if ((status & TCM_STS_DATA_EXPECT) == 0) + break; + + } while (time_before(jiffies, stop)); + + if ((status & TCM_STS_DATA_EXPECT) != 0) { + dev_err(chip->dev, "send, tcm expect data\n"); + rc = -EIO; + goto out_err; + } + + /* go and do it */ + tcm_tis_writeb(chip->dev, TCM_STS(chip->vendor.locality), TCM_STS_GO); + + ordinal = be32_to_cpu(*((__be32 *)(buf + 6))); + if (wait_for_stat(chip, TCM_STS_DATA_AVAIL | TCM_STS_VALID, + tcm_calc_ordinal_duration(chip, ordinal), + &chip->vendor.read_queue) < 0) { + dev_err(chip->dev, "send, tcm wait time out2\n"); + rc = -ETIME; + goto out_err; + } + + return len; + +out_err: + tcm_tis_ready(chip); + release_locality(chip, chip->vendor.locality, 0); + tcm_handle_err(chip); + if (send_again++ < 3) { + goto tcm_tis_send_again; + } + + dev_err(chip->dev, "kylin send, err: %d\n", rc); + return rc; +} + +static struct file_operations tis_ops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .open = tcm_open, + .read = tcm_read, + .write = tcm_write, + .release = tcm_release, +}; + +static DEVICE_ATTR(pubek, S_IRUGO, tcm_show_pubek, NULL); +static DEVICE_ATTR(pcrs, S_IRUGO, tcm_show_pcrs, NULL); +static DEVICE_ATTR(enabled, S_IRUGO, tcm_show_enabled, NULL); +static DEVICE_ATTR(active, S_IRUGO, tcm_show_active, NULL); +static DEVICE_ATTR(owned, S_IRUGO, tcm_show_owned, NULL); +static DEVICE_ATTR(temp_deactivated, S_IRUGO, tcm_show_temp_deactivated, + NULL); +static DEVICE_ATTR(caps, S_IRUGO, tcm_show_caps, NULL); +static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tcm_store_cancel); + +static struct attribute *tis_attrs[] = { + &dev_attr_pubek.attr, + &dev_attr_pcrs.attr, + &dev_attr_enabled.attr, + &dev_attr_active.attr, + &dev_attr_owned.attr, + &dev_attr_temp_deactivated.attr, + &dev_attr_caps.attr, + &dev_attr_cancel.attr, NULL, +}; + +static struct attribute_group tis_attr_grp = { + .attrs = tis_attrs +}; + +static struct tcm_vendor_specific tcm_tis = { + .status = tcm_tis_status, + .recv = tcm_tis_recv, + .send = tcm_tis_send, + .cancel = tcm_tis_ready, + .req_complete_mask = TCM_STS_DATA_AVAIL | TCM_STS_VALID, + .req_complete_val = TCM_STS_DATA_AVAIL | TCM_STS_VALID, + .req_canceled = TCM_STS_COMMAND_READY, + .attr_group = &tis_attr_grp, + .miscdev = { + .fops = &tis_ops, + }, +}; + +static struct tcm_chip *chip; +static int tcm_tis_spi_probe(struct spi_device *spi) +{ + int ret; + u8 revid; + u32 vendor, intfcaps; + struct tcm_tis_spi_phy *phy; + struct chip_data *spi_chip; + + pr_info("TCM(ky): __func__(v=%d) ..\n", + 10); + + tcm_dbg("TCM-dbg: %s/%d, enter\n", __func__, __LINE__); + phy = devm_kzalloc(&spi->dev, sizeof(struct tcm_tis_spi_phy), + GFP_KERNEL); + if (!phy) + return -ENOMEM; + + phy->iobuf = devm_kmalloc(&spi->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL); + if (!phy->iobuf) + return -ENOMEM; + + phy->spi_device = spi; + init_completion(&phy->ready); + + tcm_dbg("TCM-dbg: %s/%d\n", __func__, __LINE__); + /* init spi dev */ + spi->chip_select = 0; /* cs0 */ + spi->mode = SPI_MODE_0; + spi->bits_per_word = 8; + spi->max_speed_hz = spi->max_speed_hz ? : 24000000; + spi_setup(spi); + + spi_chip = spi_get_ctldata(spi); + if (!spi_chip) { + pr_err("There was wrong in spi master\n"); + return -ENODEV; + } + /* tcm does not support interrupt mode, we use poll mode instead. */ + spi_chip->poll_mode = 1; + + tcm_dbg("TCM-dbg: %s/%d\n", __func__, __LINE__); + /* regiter tcm hw */ + chip = tcm_register_hardware(&spi->dev, &tcm_tis); + if (!chip) { + dev_err(&spi->dev, "tcm tis register hardware err\n"); + return -ENODEV; + } + + dev_set_drvdata(chip->dev, phy); + + /** + * phytium2000a4 spi controller's clk clk level is unstable, + * so it is solved by using the low level of gpio output. + **/ + if (is_ft_all() && (spi->chip_select == 0)) { + /* reuse conf reg base */ + reuse_conf_reg = ioremap(REUSE_CONF_REG_BASE, 0x10); + if (!reuse_conf_reg) { + dev_err(&spi->dev, "Failed to ioremap reuse conf reg\n"); + ret = -ENOMEM; + goto out_err; + } + + /* gpio1 a5 base addr */ + gpio1_a5 = ioremap(REUSE_GPIO1_A5_BASE, 0x10); + if (!gpio1_a5) { + dev_err(&spi->dev, "Failed to ioremap gpio1 a5\n"); + ret = -ENOMEM; + goto out_err; + } + + /* reuse cs0 to gpio1_a5 */ + iowrite32((ioread32(reuse_conf_reg) | 0xFFFF0) & 0xFFF9004F, + reuse_conf_reg); + /* set gpio1 a5 to output */ + iowrite32(0x20, gpio1_a5 + 0x4); + } + + tcm_dbg("TCM-dbg: %s/%d\n", + __func__, __LINE__); + ret = tcm_tis_readl(chip->dev, TCM_DID_VID(0), &vendor); + if (ret < 0) + goto out_err; + + tcm_dbg("TCM-dbg: %s/%d, vendor: 0x%x\n", + __func__, __LINE__, vendor); + if ((vendor & 0xffff) != 0x19f5 && (vendor & 0xffff) != 0x1B4E) { + dev_err(chip->dev, "there is no Nationz TCM on you computer\n"); + goto out_err; + } + + ret = tcm_tis_readb(chip->dev, TCM_RID(0), &revid); + tcm_dbg("TCM-dbg: %s/%d, revid: 0x%x\n", + __func__, __LINE__, revid); + if (ret < 0) + goto out_err; + dev_info(chip->dev, "kylin: 2019-09-21 1.2 TCM " + "(device-id 0x%X, rev-id %d)\n", + vendor >> 16, revid); + + /* Default timeouts */ + chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT); + chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT); + chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT); + chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT); + + tcm_dbg("TCM-dbg: %s/%d\n", + __func__, __LINE__); + /* Figure out the capabilities */ + ret = tcm_tis_readl(chip->dev, + TCM_INTF_CAPS(chip->vendor.locality), &intfcaps); + if (ret < 0) + goto out_err; + + tcm_dbg("TCM-dbg: %s/%d, intfcaps: 0x%x\n", + __func__, __LINE__, intfcaps); + if (request_locality(chip, 0) != 0) { + dev_err(chip->dev, "tcm request_locality err\n"); + ret = -ENODEV; + goto out_err; + } + + INIT_LIST_HEAD(&chip->vendor.list); + spin_lock(&tis_lock); + list_add(&chip->vendor.list, &tis_chips); + spin_unlock(&tis_lock); + + tcm_get_timeouts(chip); + tcm_startup(chip); + + tcm_dbg("TCM-dbg: %s/%d, exit\n", __func__, __LINE__); + return 0; + +out_err: + if (is_ft_all()) { + if (reuse_conf_reg) + iounmap(reuse_conf_reg); + if (gpio1_a5) + iounmap(gpio1_a5); + } + tcm_dbg("TCM-dbg: %s/%d, error\n", __func__, __LINE__); + dev_set_drvdata(chip->dev, chip); + tcm_remove_hardware(chip->dev); + + return ret; +} + +static int tcm_tis_spi_remove(struct spi_device *dev) +{ + if (is_ft_all()) { + if (reuse_conf_reg) + iounmap(reuse_conf_reg); + if (gpio1_a5) + iounmap(gpio1_a5); + } + + dev_info(&dev->dev, "%s\n", __func__); + dev_set_drvdata(chip->dev, chip); + tcm_remove_hardware(&dev->dev); + + return 0; +} + +static const struct acpi_device_id tcm_tis_spi_acpi_match[] = { + {"TCMS0001", 0}, + {"SMO0768", 0}, + {"ZIC0601", 0}, + {} +}; +MODULE_DEVICE_TABLE(acpi, tcm_tis_spi_acpi_match); + +static const struct spi_device_id tcm_tis_spi_id_table[] = { + {"SMO0768", 0}, + {"ZIC0601", 0}, + {} +}; +MODULE_DEVICE_TABLE(spi, tcm_tis_spi_id_table); + +static struct spi_driver tcm_tis_spi_drv = { + .driver = { + .name = "tcm_tis_spi", + .acpi_match_table = ACPI_PTR(tcm_tis_spi_acpi_match), + }, + .id_table = tcm_tis_spi_id_table, + .probe = tcm_tis_spi_probe, + .remove = tcm_tis_spi_remove, +}; + +module_spi_driver(tcm_tis_spi_drv); + +MODULE_AUTHOR("xiongxin"); +MODULE_DESCRIPTION("TCM Driver Base Spi"); +MODULE_VERSION("6.1.0.2"); +MODULE_LICENSE("GPL"); diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c index 010ae1e9c7fbfabee6b26ceb4fe412e9b20b436f..0b6714dfcd9bbbd2a37f1a5e880820b707dcb1e4 100644 --- a/drivers/staging/greybus/light.c +++ b/drivers/staging/greybus/light.c @@ -149,6 +149,9 @@ static int __gb_lights_flash_brightness_set(struct gb_channel *channel) channel = get_channel_from_mode(channel->light, GB_CHANNEL_MODE_TORCH); + if (!channel) + return -EINVAL; + /* For not flash we need to convert brightness to intensity */ intensity = channel->intensity_uA.min + (channel->intensity_uA.step * channel->led->brightness); @@ -552,7 +555,10 @@ static int gb_lights_light_v4l2_register(struct gb_light *light) } channel_flash = get_channel_from_mode(light, GB_CHANNEL_MODE_FLASH); - WARN_ON(!channel_flash); + if (!channel_flash) { + dev_err(dev, "failed to get flash channel from mode\n"); + return -EINVAL; + } fled = &channel_flash->fled; diff --git a/drivers/staging/greybus/power_supply.c b/drivers/staging/greybus/power_supply.c index 0529e5628c24c40c56c1785f7b9c3cf9b058f1aa..ae5c0285a9420f7325612798e13c1bc91a9fe5e9 100644 --- a/drivers/staging/greybus/power_supply.c +++ b/drivers/staging/greybus/power_supply.c @@ -520,7 +520,7 @@ static int gb_power_supply_prop_descriptors_get(struct gb_power_supply *gbpsy) op = gb_operation_create(connection, GB_POWER_SUPPLY_TYPE_GET_PROP_DESCRIPTORS, - sizeof(req), sizeof(*resp) + props_count * + sizeof(*req), sizeof(*resp) + props_count * sizeof(struct gb_power_supply_props_desc), GFP_KERNEL); if (!op) diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c index 8a006323c3c11924409c83b9965361307d967550..58c6319fc45ab579c778c6d0bb2e399a47fe5812 100644 --- a/drivers/staging/greybus/uart.c +++ b/drivers/staging/greybus/uart.c @@ -801,6 +801,17 @@ static void gb_tty_port_shutdown(struct tty_port *port) gbphy_runtime_put_autosuspend(gb_tty->gbphy_dev); } +static void gb_tty_port_destruct(struct tty_port *port) +{ + struct gb_tty *gb_tty = container_of(port, struct gb_tty, port); + + if (gb_tty->minor != GB_NUM_MINORS) + release_minor(gb_tty); + kfifo_free(&gb_tty->write_fifo); + kfree(gb_tty->buffer); + kfree(gb_tty); +} + static const struct tty_operations gb_ops = { .install = gb_tty_install, .open = gb_tty_open, @@ -824,6 +835,7 @@ static const struct tty_port_operations gb_port_ops = { .dtr_rts = gb_tty_dtr_rts, .activate = gb_tty_port_activate, .shutdown = gb_tty_port_shutdown, + .destruct = gb_tty_port_destruct, }; static int gb_uart_probe(struct gbphy_device *gbphy_dev, @@ -836,17 +848,11 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev, int retval; int minor; - gb_tty = kzalloc(sizeof(*gb_tty), GFP_KERNEL); - if (!gb_tty) - return -ENOMEM; - connection = gb_connection_create(gbphy_dev->bundle, le16_to_cpu(gbphy_dev->cport_desc->id), gb_uart_request_handler); - if (IS_ERR(connection)) { - retval = PTR_ERR(connection); - goto exit_tty_free; - } + if (IS_ERR(connection)) + return PTR_ERR(connection); max_payload = gb_operation_get_payload_size_max(connection); if (max_payload < sizeof(struct gb_uart_send_data_request)) { @@ -854,13 +860,23 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev, goto exit_connection_destroy; } + gb_tty = kzalloc(sizeof(*gb_tty), GFP_KERNEL); + if (!gb_tty) { + retval = -ENOMEM; + goto exit_connection_destroy; + } + + tty_port_init(&gb_tty->port); + gb_tty->port.ops = &gb_port_ops; + gb_tty->minor = GB_NUM_MINORS; + gb_tty->buffer_payload_max = max_payload - sizeof(struct gb_uart_send_data_request); gb_tty->buffer = kzalloc(gb_tty->buffer_payload_max, GFP_KERNEL); if (!gb_tty->buffer) { retval = -ENOMEM; - goto exit_connection_destroy; + goto exit_put_port; } INIT_WORK(&gb_tty->tx_work, gb_uart_tx_write_work); @@ -868,7 +884,7 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev, retval = kfifo_alloc(&gb_tty->write_fifo, GB_UART_WRITE_FIFO_SIZE, GFP_KERNEL); if (retval) - goto exit_buf_free; + goto exit_put_port; gb_tty->credits = GB_UART_FIRMWARE_CREDITS; init_completion(&gb_tty->credits_complete); @@ -882,7 +898,7 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev, } else { retval = minor; } - goto exit_kfifo_free; + goto exit_put_port; } gb_tty->minor = minor; @@ -891,9 +907,6 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev, init_waitqueue_head(&gb_tty->wioctl); mutex_init(&gb_tty->mutex); - tty_port_init(&gb_tty->port); - gb_tty->port.ops = &gb_port_ops; - gb_tty->connection = connection; gb_tty->gbphy_dev = gbphy_dev; gb_connection_set_data(connection, gb_tty); @@ -901,7 +914,7 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev, retval = gb_connection_enable_tx(connection); if (retval) - goto exit_release_minor; + goto exit_put_port; send_control(gb_tty, gb_tty->ctrlout); @@ -928,16 +941,10 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev, exit_connection_disable: gb_connection_disable(connection); -exit_release_minor: - release_minor(gb_tty); -exit_kfifo_free: - kfifo_free(&gb_tty->write_fifo); -exit_buf_free: - kfree(gb_tty->buffer); +exit_put_port: + tty_port_put(&gb_tty->port); exit_connection_destroy: gb_connection_destroy(connection); -exit_tty_free: - kfree(gb_tty); return retval; } @@ -968,15 +975,10 @@ static void gb_uart_remove(struct gbphy_device *gbphy_dev) gb_connection_disable_rx(connection); tty_unregister_device(gb_tty_driver, gb_tty->minor); - /* FIXME - free transmit / receive buffers */ - gb_connection_disable(connection); - tty_port_destroy(&gb_tty->port); gb_connection_destroy(connection); - release_minor(gb_tty); - kfifo_free(&gb_tty->write_fifo); - kfree(gb_tty->buffer); - kfree(gb_tty); + + tty_port_put(&gb_tty->port); } static int gb_tty_init(void) diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c index df0499fc4802fb4cb7ac8a17950dcac4266e1f1f..6857a4bf729760c3230dd762a58e715df3b7d264 100644 --- a/drivers/staging/iio/adc/ad7192.c +++ b/drivers/staging/iio/adc/ad7192.c @@ -109,10 +109,10 @@ #define AD7192_CH_AIN3 BIT(6) /* AIN3 - AINCOM */ #define AD7192_CH_AIN4 BIT(7) /* AIN4 - AINCOM */ -#define AD7193_CH_AIN1P_AIN2M 0x000 /* AIN1(+) - AIN2(-) */ -#define AD7193_CH_AIN3P_AIN4M 0x001 /* AIN3(+) - AIN4(-) */ -#define AD7193_CH_AIN5P_AIN6M 0x002 /* AIN5(+) - AIN6(-) */ -#define AD7193_CH_AIN7P_AIN8M 0x004 /* AIN7(+) - AIN8(-) */ +#define AD7193_CH_AIN1P_AIN2M 0x001 /* AIN1(+) - AIN2(-) */ +#define AD7193_CH_AIN3P_AIN4M 0x002 /* AIN3(+) - AIN4(-) */ +#define AD7193_CH_AIN5P_AIN6M 0x004 /* AIN5(+) - AIN6(-) */ +#define AD7193_CH_AIN7P_AIN8M 0x008 /* AIN7(+) - AIN8(-) */ #define AD7193_CH_TEMP 0x100 /* Temp senseor */ #define AD7193_CH_AIN2P_AIN2M 0x200 /* AIN2(+) - AIN2(-) */ #define AD7193_CH_AIN1 0x401 /* AIN1 - AINCOM */ diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c index b736275c10f53532b0d27d445711dec1bda6de67..6a48ad067a8bd39cc1f2bd086dfd8078c9d418e5 100644 --- a/drivers/staging/iio/adc/ad7280a.c +++ b/drivers/staging/iio/adc/ad7280a.c @@ -256,7 +256,9 @@ static int ad7280_read(struct ad7280_state *st, unsigned int devaddr, if (ret) return ret; - __ad7280_read32(st, &tmp); + ret = __ad7280_read32(st, &tmp); + if (ret) + return ret; if (ad7280_check_crc(st, tmp)) return -EIO; @@ -294,7 +296,9 @@ static int ad7280_read_channel(struct ad7280_state *st, unsigned int devaddr, ad7280_delay(st); - __ad7280_read32(st, &tmp); + ret = __ad7280_read32(st, &tmp); + if (ret) + return ret; if (ad7280_check_crc(st, tmp)) return -EIO; @@ -327,7 +331,9 @@ static int ad7280_read_all_channels(struct ad7280_state *st, unsigned int cnt, ad7280_delay(st); for (i = 0; i < cnt; i++) { - __ad7280_read32(st, &tmp); + ret = __ad7280_read32(st, &tmp); + if (ret) + return ret; if (ad7280_check_crc(st, tmp)) return -EIO; @@ -370,7 +376,10 @@ static int ad7280_chain_setup(struct ad7280_state *st) return ret; for (n = 0; n <= AD7280A_MAX_CHAIN; n++) { - __ad7280_read32(st, &val); + ret = __ad7280_read32(st, &val); + if (ret) + return ret; + if (val == 0) return n - 1; diff --git a/drivers/staging/iio/adc/ad7606.c b/drivers/staging/iio/adc/ad7606.c index 25b9fcd5e3a4bcedd8b4a5288cf6ff2e3e6605eb..ce3351832fb187ec4b6eb605107525c2646abdfb 100644 --- a/drivers/staging/iio/adc/ad7606.c +++ b/drivers/staging/iio/adc/ad7606.c @@ -26,9 +26,12 @@ #include "ad7606.h" -/* Scales are computed as 2.5/2**16 and 5/2**16 respectively */ +/* + * Scales are computed as 5000/32768 and 10000/32768 respectively, + * so that when applied to the raw values they provide mV values + */ static const unsigned int scale_avail[2][2] = { - {0, 38147}, {0, 76294} + {0, 152588}, {0, 305176} }; static int ad7606_reset(struct ad7606_state *st) diff --git a/drivers/staging/iio/adc/ad7780.c b/drivers/staging/iio/adc/ad7780.c index 16d72072c076cbf08ec40daeccb81aa303960b81..8bcb5d5de749c6a5b804bdff562f01f795b48216 100644 --- a/drivers/staging/iio/adc/ad7780.c +++ b/drivers/staging/iio/adc/ad7780.c @@ -87,12 +87,16 @@ static int ad7780_read_raw(struct iio_dev *indio_dev, long m) { struct ad7780_state *st = iio_priv(indio_dev); + int voltage_uv; switch (m) { case IIO_CHAN_INFO_RAW: return ad_sigma_delta_single_conversion(indio_dev, chan, val); case IIO_CHAN_INFO_SCALE: - *val = st->int_vref_mv * st->gain; + voltage_uv = regulator_get_voltage(st->reg); + if (voltage_uv < 0) + return voltage_uv; + *val = (voltage_uv / 1000) * st->gain; *val2 = chan->scan_type.realbits - 1; return IIO_VAL_FRACTIONAL_LOG2; case IIO_CHAN_INFO_OFFSET: diff --git a/drivers/staging/iio/addac/adt7316-i2c.c b/drivers/staging/iio/addac/adt7316-i2c.c index f66dd3ebbab1fe787fccc180b70441a4e598ab09..856bcfa60c6c47220984ea16fb3fae1aa62ba21f 100644 --- a/drivers/staging/iio/addac/adt7316-i2c.c +++ b/drivers/staging/iio/addac/adt7316-i2c.c @@ -35,6 +35,8 @@ static int adt7316_i2c_read(void *client, u8 reg, u8 *data) return ret; } + *data = ret; + return 0; } diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c index 3f22d10887139b58c2c554870e5ebf47cdd51c48..68866f552290641b37ed0d50f46e46225c2f1f29 100644 --- a/drivers/staging/iio/addac/adt7316.c +++ b/drivers/staging/iio/addac/adt7316.c @@ -47,6 +47,8 @@ #define ADT7516_MSB_AIN3 0xA #define ADT7516_MSB_AIN4 0xB #define ADT7316_DA_DATA_BASE 0x10 +#define ADT7316_DA_10_BIT_LSB_SHIFT 6 +#define ADT7316_DA_12_BIT_LSB_SHIFT 4 #define ADT7316_DA_MSB_DATA_REGS 4 #define ADT7316_LSB_DAC_A 0x10 #define ADT7316_MSB_DAC_A 0x11 @@ -1086,7 +1088,7 @@ static ssize_t adt7316_store_DAC_internal_Vref(struct device *dev, ldac_config = chip->ldac_config & (~ADT7516_DAC_IN_VREF_MASK); if (data & 0x1) ldac_config |= ADT7516_DAC_AB_IN_VREF; - else if (data & 0x2) + if (data & 0x2) ldac_config |= ADT7516_DAC_CD_IN_VREF; } else { ret = kstrtou8(buf, 16, &data); @@ -1408,7 +1410,7 @@ static IIO_DEVICE_ATTR(ex_analog_temp_offset, 0644, static ssize_t adt7316_show_DAC(struct adt7316_chip_info *chip, int channel, char *buf) { - u16 data; + u16 data = 0; u8 msb, lsb, offset; int ret; @@ -1433,7 +1435,11 @@ static ssize_t adt7316_show_DAC(struct adt7316_chip_info *chip, if (ret) return -EIO; - data = (msb << offset) + (lsb & ((1 << offset) - 1)); + if (chip->dac_bits == 12) + data = lsb >> ADT7316_DA_12_BIT_LSB_SHIFT; + else if (chip->dac_bits == 10) + data = lsb >> ADT7316_DA_10_BIT_LSB_SHIFT; + data |= msb << offset; return sprintf(buf, "%d\n", data); } @@ -1441,7 +1447,7 @@ static ssize_t adt7316_show_DAC(struct adt7316_chip_info *chip, static ssize_t adt7316_store_DAC(struct adt7316_chip_info *chip, int channel, const char *buf, size_t len) { - u8 msb, lsb, offset; + u8 msb, lsb, lsb_reg, offset; u16 data; int ret; @@ -1459,9 +1465,13 @@ static ssize_t adt7316_store_DAC(struct adt7316_chip_info *chip, return -EINVAL; if (chip->dac_bits > 8) { - lsb = data & (1 << offset); + lsb = data & ((1 << offset) - 1); + if (chip->dac_bits == 12) + lsb_reg = lsb << ADT7316_DA_12_BIT_LSB_SHIFT; + else + lsb_reg = lsb << ADT7316_DA_10_BIT_LSB_SHIFT; ret = chip->bus.write(chip->bus.client, - ADT7316_DA_DATA_BASE + channel * 2, lsb); + ADT7316_DA_DATA_BASE + channel * 2, lsb_reg); if (ret) return -EIO; } diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c index d16084d7068ce06a94e0adca5f5f517fc8c97045..a354ce6b2b7b72d193bba50ce914924b4bb36c9b 100644 --- a/drivers/staging/iio/cdc/ad7150.c +++ b/drivers/staging/iio/cdc/ad7150.c @@ -6,6 +6,7 @@ * Licensed under the GPL-2 or later. */ +#include #include #include #include @@ -130,7 +131,7 @@ static int ad7150_read_event_config(struct iio_dev *indio_dev, { int ret; u8 threshtype; - bool adaptive; + bool thrfixed; struct ad7150_chip_info *chip = iio_priv(indio_dev); ret = i2c_smbus_read_byte_data(chip->client, AD7150_CFG); @@ -138,21 +139,23 @@ static int ad7150_read_event_config(struct iio_dev *indio_dev, return ret; threshtype = (ret >> 5) & 0x03; - adaptive = !!(ret & 0x80); + + /*check if threshold mode is fixed or adaptive*/ + thrfixed = FIELD_GET(AD7150_CFG_FIX, ret); switch (type) { case IIO_EV_TYPE_MAG_ADAPTIVE: if (dir == IIO_EV_DIR_RISING) - return adaptive && (threshtype == 0x1); - return adaptive && (threshtype == 0x0); + return !thrfixed && (threshtype == 0x1); + return !thrfixed && (threshtype == 0x0); case IIO_EV_TYPE_THRESH_ADAPTIVE: if (dir == IIO_EV_DIR_RISING) - return adaptive && (threshtype == 0x3); - return adaptive && (threshtype == 0x2); + return !thrfixed && (threshtype == 0x3); + return !thrfixed && (threshtype == 0x2); case IIO_EV_TYPE_THRESH: if (dir == IIO_EV_DIR_RISING) - return !adaptive && (threshtype == 0x1); - return !adaptive && (threshtype == 0x0); + return thrfixed && (threshtype == 0x1); + return thrfixed && (threshtype == 0x0); default: break; } diff --git a/drivers/staging/iio/meter/ade7854.c b/drivers/staging/iio/meter/ade7854.c index 029c3bf42d4d942f2e58c81cfb03292fc8eae0d3..07774c000c5a68db9f7f6c1e93eae1840fac23ec 100644 --- a/drivers/staging/iio/meter/ade7854.c +++ b/drivers/staging/iio/meter/ade7854.c @@ -269,7 +269,7 @@ static IIO_DEV_ATTR_VPEAK(0644, static IIO_DEV_ATTR_IPEAK(0644, ade7854_read_32bit, ade7854_write_32bit, - ADE7854_VPEAK); + ADE7854_IPEAK); static IIO_DEV_ATTR_APHCAL(0644, ade7854_read_16bit, ade7854_write_16bit, diff --git a/drivers/staging/iio/resolver/ad2s90.c b/drivers/staging/iio/resolver/ad2s90.c index 59586947a9366c15ae4f2b857d358bd83ab6b1e9..51cda91514124b4d5a733b8d74171edf553b7b19 100644 --- a/drivers/staging/iio/resolver/ad2s90.c +++ b/drivers/staging/iio/resolver/ad2s90.c @@ -85,7 +85,12 @@ static int ad2s90_probe(struct spi_device *spi) /* need 600ns between CS and the first falling edge of SCLK */ spi->max_speed_hz = 830000; spi->mode = SPI_MODE_3; - spi_setup(spi); + ret = spi_setup(spi); + + if (ret < 0) { + dev_err(&spi->dev, "spi_setup failed!\n"); + return ret; + } return 0; } diff --git a/drivers/staging/media/davinci_vpfe/Kconfig b/drivers/staging/media/davinci_vpfe/Kconfig index aea449a8dbf8a08a2e2e6b648d2228853ce66a70..76818cc48ddcb574ed35ec226bf45abc9f0c0bae 100644 --- a/drivers/staging/media/davinci_vpfe/Kconfig +++ b/drivers/staging/media/davinci_vpfe/Kconfig @@ -1,7 +1,7 @@ config VIDEO_DM365_VPFE tristate "DM365 VPFE Media Controller Capture Driver" depends on VIDEO_V4L2 - depends on (ARCH_DAVINCI_DM365 && !VIDEO_DM365_ISIF) || COMPILE_TEST + depends on (ARCH_DAVINCI_DM365 && !VIDEO_DM365_ISIF) || (COMPILE_TEST && !ARCH_OMAP1) depends on VIDEO_V4L2_SUBDEV_API depends on VIDEO_DAVINCI_VPBE_DISPLAY select VIDEOBUF2_DMA_CONTIG diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c index 1269a983455e57c5434b71dc145f07f1a088623c..13b890b9ef187f959c1decfae7f60169102cb31a 100644 --- a/drivers/staging/media/davinci_vpfe/vpfe_video.c +++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c @@ -422,6 +422,9 @@ static int vpfe_open(struct file *file) /* If decoder is not initialized. initialize it */ if (!video->initialized && vpfe_update_pipe_state(video)) { mutex_unlock(&video->lock); + v4l2_fh_del(&handle->vfh); + v4l2_fh_exit(&handle->vfh); + kfree(handle); return -ENODEV; } /* Increment device users counter */ diff --git a/drivers/staging/media/imx/imx-ic-prpencvf.c b/drivers/staging/media/imx/imx-ic-prpencvf.c index 28f41caba05d67221b0dc2bd0f3ec1d48b4afc4b..fb442499f806604f033ce1de5e5c586c9461ff45 100644 --- a/drivers/staging/media/imx/imx-ic-prpencvf.c +++ b/drivers/staging/media/imx/imx-ic-prpencvf.c @@ -680,12 +680,23 @@ static int prp_start(struct prp_priv *priv) goto out_free_nfb4eof_irq; } + /* start upstream */ + ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 1); + ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0; + if (ret) { + v4l2_err(&ic_priv->sd, + "upstream stream on failed: %d\n", ret); + goto out_free_eof_irq; + } + /* start the EOF timeout timer */ mod_timer(&priv->eof_timeout_timer, jiffies + msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT)); return 0; +out_free_eof_irq: + devm_free_irq(ic_priv->dev, priv->eof_irq, priv); out_free_nfb4eof_irq: devm_free_irq(ic_priv->dev, priv->nfb4eof_irq, priv); out_unsetup: @@ -717,6 +728,12 @@ static void prp_stop(struct prp_priv *priv) if (ret == 0) v4l2_warn(&ic_priv->sd, "wait last EOF timeout\n"); + /* stop upstream */ + ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 0); + if (ret && ret != -ENOIOCTLCMD) + v4l2_warn(&ic_priv->sd, + "upstream stream off failed: %d\n", ret); + devm_free_irq(ic_priv->dev, priv->eof_irq, priv); devm_free_irq(ic_priv->dev, priv->nfb4eof_irq, priv); @@ -1148,15 +1165,6 @@ static int prp_s_stream(struct v4l2_subdev *sd, int enable) if (ret) goto out; - /* start/stop upstream */ - ret = v4l2_subdev_call(priv->src_sd, video, s_stream, enable); - ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0; - if (ret) { - if (enable) - prp_stop(priv); - goto out; - } - update_count: priv->stream_count += enable ? 1 : -1; if (priv->stream_count < 0) diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c index cd2c291e1e9429c7ec4944c21cd90c365e441147..0f8fdc347091b5fb6726b7909ad8ea94c2f32cd4 100644 --- a/drivers/staging/media/imx/imx-media-csi.c +++ b/drivers/staging/media/imx/imx-media-csi.c @@ -153,9 +153,10 @@ static inline bool requires_passthrough(struct v4l2_fwnode_endpoint *ep, /* * Parses the fwnode endpoint from the source pad of the entity * connected to this CSI. This will either be the entity directly - * upstream from the CSI-2 receiver, or directly upstream from the - * video mux. The endpoint is needed to determine the bus type and - * bus config coming into the CSI. + * upstream from the CSI-2 receiver, directly upstream from the + * video mux, or directly upstream from the CSI itself. The endpoint + * is needed to determine the bus type and bus config coming into + * the CSI. */ static int csi_get_upstream_endpoint(struct csi_priv *priv, struct v4l2_fwnode_endpoint *ep) @@ -165,10 +166,14 @@ static int csi_get_upstream_endpoint(struct csi_priv *priv, struct v4l2_subdev *sd; struct media_pad *pad; + if (!IS_ENABLED(CONFIG_OF)) + return -ENXIO; + if (!priv->src_sd) return -EPIPE; - src = &priv->src_sd->entity; + sd = priv->src_sd; + src = &sd->entity; if (src->function == MEDIA_ENT_F_VID_MUX) { /* @@ -182,6 +187,14 @@ static int csi_get_upstream_endpoint(struct csi_priv *priv, src = &sd->entity; } + /* + * If the source is neither the video mux nor the CSI-2 receiver, + * get the source pad directly upstream from CSI itself. + */ + if (src->function != MEDIA_ENT_F_VID_MUX && + sd->grp_id != IMX_MEDIA_GRP_ID_CSI2) + src = &priv->sd.entity; + /* get source pad of entity directly upstream from src */ pad = imx_media_find_upstream_pad(priv->md, src, 0); if (IS_ERR(pad)) @@ -626,7 +639,7 @@ static int csi_idmac_start(struct csi_priv *priv) return ret; } -static void csi_idmac_stop(struct csi_priv *priv) +static void csi_idmac_wait_last_eof(struct csi_priv *priv) { unsigned long flags; int ret; @@ -643,7 +656,10 @@ static void csi_idmac_stop(struct csi_priv *priv) &priv->last_eof_comp, msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT)); if (ret == 0) v4l2_warn(&priv->sd, "wait last EOF timeout\n"); +} +static void csi_idmac_stop(struct csi_priv *priv) +{ devm_free_irq(priv->dev, priv->eof_irq, priv); devm_free_irq(priv->dev, priv->nfb4eof_irq, priv); @@ -719,10 +735,16 @@ static int csi_start(struct csi_priv *priv) output_fi = &priv->frame_interval[priv->active_output_pad]; + /* start upstream */ + ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 1); + ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0; + if (ret) + return ret; + if (priv->dest == IPU_CSI_DEST_IDMAC) { ret = csi_idmac_start(priv); if (ret) - return ret; + goto stop_upstream; } ret = csi_setup(priv); @@ -750,11 +772,26 @@ static int csi_start(struct csi_priv *priv) idmac_stop: if (priv->dest == IPU_CSI_DEST_IDMAC) csi_idmac_stop(priv); +stop_upstream: + v4l2_subdev_call(priv->src_sd, video, s_stream, 0); return ret; } static void csi_stop(struct csi_priv *priv) { + if (priv->dest == IPU_CSI_DEST_IDMAC) + csi_idmac_wait_last_eof(priv); + + /* + * Disable the CSI asap, after syncing with the last EOF. + * Doing so after the IDMA channel is disabled has shown to + * create hard system-wide hangs. + */ + ipu_csi_disable(priv->csi); + + /* stop upstream */ + v4l2_subdev_call(priv->src_sd, video, s_stream, 0); + if (priv->dest == IPU_CSI_DEST_IDMAC) { csi_idmac_stop(priv); @@ -762,8 +799,6 @@ static void csi_stop(struct csi_priv *priv) if (priv->fim) imx_media_fim_set_stream(priv->fim, NULL, false); } - - ipu_csi_disable(priv->csi); } static const struct csi_skip_desc csi_skip[12] = { @@ -924,23 +959,13 @@ static int csi_s_stream(struct v4l2_subdev *sd, int enable) goto update_count; if (enable) { - /* upstream must be started first, before starting CSI */ - ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 1); - ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0; - if (ret) - goto out; - dev_dbg(priv->dev, "stream ON\n"); ret = csi_start(priv); - if (ret) { - v4l2_subdev_call(priv->src_sd, video, s_stream, 0); + if (ret) goto out; - } } else { dev_dbg(priv->dev, "stream OFF\n"); - /* CSI must be stopped first, then stop upstream */ csi_stop(priv); - v4l2_subdev_call(priv->src_sd, video, s_stream, 0); } update_count: @@ -1050,7 +1075,7 @@ static int csi_link_validate(struct v4l2_subdev *sd, struct v4l2_subdev_format *sink_fmt) { struct csi_priv *priv = v4l2_get_subdevdata(sd); - struct v4l2_fwnode_endpoint upstream_ep = {}; + struct v4l2_fwnode_endpoint upstream_ep; bool is_csi2; int ret; diff --git a/drivers/staging/media/imx/imx-media-of.c b/drivers/staging/media/imx/imx-media-of.c index acde372c6795b309aa474e3238bf23e37a604750..1647da216bf9d6ac35d6d8bb9c661d4319b3c81a 100644 --- a/drivers/staging/media/imx/imx-media-of.c +++ b/drivers/staging/media/imx/imx-media-of.c @@ -233,15 +233,18 @@ int imx_media_create_csi_of_links(struct imx_media_dev *imxmd, struct v4l2_subdev *csi) { struct device_node *csi_np = csi->dev->of_node; - struct fwnode_handle *fwnode, *csi_ep; - struct v4l2_fwnode_link link; struct device_node *ep; - int ret; - - link.local_node = of_fwnode_handle(csi_np); - link.local_port = CSI_SINK_PAD; for_each_child_of_node(csi_np, ep) { + struct fwnode_handle *fwnode, *csi_ep; + struct v4l2_fwnode_link link; + int ret; + + memset(&link, 0, sizeof(link)); + + link.local_node = of_fwnode_handle(csi_np); + link.local_port = CSI_SINK_PAD; + csi_ep = of_fwnode_handle(ep); fwnode = fwnode_graph_get_remote_endpoint(csi_ep); diff --git a/drivers/staging/media/imx/imx6-mipi-csi2.c b/drivers/staging/media/imx/imx6-mipi-csi2.c index ceeeb3069a0248d361753050fd595e889c25c58d..212fa06f7c57c0b6949297f6bf6f90db5d36048a 100644 --- a/drivers/staging/media/imx/imx6-mipi-csi2.c +++ b/drivers/staging/media/imx/imx6-mipi-csi2.c @@ -247,7 +247,7 @@ static int __maybe_unused csi2_dphy_wait_ulp(struct csi2_dev *csi2) } /* Waits for low-power LP-11 state on data and clock lanes. */ -static int csi2_dphy_wait_stopstate(struct csi2_dev *csi2) +static void csi2_dphy_wait_stopstate(struct csi2_dev *csi2) { u32 mask, reg; int ret; @@ -258,11 +258,9 @@ static int csi2_dphy_wait_stopstate(struct csi2_dev *csi2) ret = readl_poll_timeout(csi2->base + CSI2_PHY_STATE, reg, (reg & mask) == mask, 0, 500000); if (ret) { - v4l2_err(&csi2->sd, "LP-11 timeout, phy_state = 0x%08x\n", reg); - return ret; + v4l2_warn(&csi2->sd, "LP-11 wait timeout, likely a sensor driver bug, expect capture failures.\n"); + v4l2_warn(&csi2->sd, "phy_state = 0x%08x\n", reg); } - - return 0; } /* Wait for active clock on the clock lane. */ @@ -320,9 +318,7 @@ static int csi2_start(struct csi2_dev *csi2) csi2_enable(csi2, true); /* Step 5 */ - ret = csi2_dphy_wait_stopstate(csi2); - if (ret) - goto err_assert_reset; + csi2_dphy_wait_stopstate(csi2); /* Step 6 */ ret = v4l2_subdev_call(csi2->src_sd, video, s_stream, 1); diff --git a/drivers/staging/most/cdev/cdev.c b/drivers/staging/most/cdev/cdev.c index 4569838f27a02437d3bc353c3b629244a72fe9cd..0b48677fa958a3f8e2e60ea4e26182761ff792ad 100644 --- a/drivers/staging/most/cdev/cdev.c +++ b/drivers/staging/most/cdev/cdev.c @@ -546,7 +546,7 @@ static void __exit mod_exit(void) destroy_cdev(c); destroy_channel(c); } - unregister_chrdev_region(comp.devno, 1); + unregister_chrdev_region(comp.devno, CHRDEV_REGION_SIZE); ida_destroy(&comp.minor_id); class_destroy(comp.class); } diff --git a/drivers/staging/most/core.c b/drivers/staging/most/core.c index f4c464625a67b7869c2a419cb829aee2ca0468c0..25a077f4ea94d30e13622bfc5d83754b1f1c2af7 100644 --- a/drivers/staging/most/core.c +++ b/drivers/staging/most/core.c @@ -351,7 +351,7 @@ static ssize_t set_datatype_show(struct device *dev, for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) { if (c->cfg.data_type & ch_data_type[i].most_ch_data_type) - return snprintf(buf, PAGE_SIZE, ch_data_type[i].name); + return snprintf(buf, PAGE_SIZE, "%s", ch_data_type[i].name); } return snprintf(buf, PAGE_SIZE, "unconfigured\n"); } @@ -1412,7 +1412,7 @@ int most_register_interface(struct most_interface *iface) INIT_LIST_HEAD(&iface->p->channel_list); iface->p->dev_id = id; - snprintf(iface->p->name, STRING_SIZE, "mdev%d", id); + strcpy(iface->p->name, iface->description); iface->dev.init_name = iface->p->name; iface->dev.bus = &mc.bus; iface->dev.parent = &mc.dev; diff --git a/drivers/staging/most/net/net.c b/drivers/staging/most/net/net.c index 30d816b7e16567f843d67d4ed0c41e1becbd39af..ff80834ef04aab316006cfcc8ffedf7c9e4323ce 100644 --- a/drivers/staging/most/net/net.c +++ b/drivers/staging/most/net/net.c @@ -81,6 +81,11 @@ static int skb_to_mamac(const struct sk_buff *skb, struct mbo *mbo) unsigned int payload_len = skb->len - ETH_HLEN; unsigned int mdp_len = payload_len + MDP_HDR_LEN; + if (mdp_len < skb->len) { + pr_err("drop: too large packet! (%u)\n", skb->len); + return -EINVAL; + } + if (mbo->buffer_length < mdp_len) { pr_err("drop: too small buffer! (%d for %d)\n", mbo->buffer_length, mdp_len); @@ -128,6 +133,11 @@ static int skb_to_mep(const struct sk_buff *skb, struct mbo *mbo) u8 *buff = mbo->virt_address; unsigned int mep_len = skb->len + MEP_HDR_LEN; + if (mep_len < skb->len) { + pr_err("drop: too large packet! (%u)\n", skb->len); + return -EINVAL; + } + if (mbo->buffer_length < mep_len) { pr_err("drop: too small buffer! (%d for %d)\n", mbo->buffer_length, mep_len); diff --git a/drivers/staging/most/video/video.c b/drivers/staging/most/video/video.c index cf342eb58e10a34d0e5a9b774c10e3492ce45c2e..ad7e28ab9a4fb79ff864cc746031023be204bb0b 100644 --- a/drivers/staging/most/video/video.c +++ b/drivers/staging/most/video/video.c @@ -530,7 +530,7 @@ static int comp_disconnect_channel(struct most_interface *iface, return 0; } -static struct core_component comp_info = { +static struct core_component comp = { .name = "video", .probe_channel = comp_probe_channel, .disconnect_channel = comp_disconnect_channel, @@ -565,7 +565,7 @@ static void __exit comp_exit(void) } spin_unlock_irq(&list_lock); - most_deregister_component(&comp_info); + most_deregister_component(&comp); BUG_ON(!list_empty(&video_devices)); } diff --git a/drivers/staging/mt7621-dma/mtk-hsdma.c b/drivers/staging/mt7621-dma/mtk-hsdma.c index df6ebf41bdea4dc7301414c5cb9b42e5679d5472..5831f816c17b1d37c86b3c332aeff2305930c117 100644 --- a/drivers/staging/mt7621-dma/mtk-hsdma.c +++ b/drivers/staging/mt7621-dma/mtk-hsdma.c @@ -335,6 +335,8 @@ static int mtk_hsdma_start_transfer(struct mtk_hsdam_engine *hsdma, /* tx desc */ src = sg->src_addr; for (i = 0; i < chan->desc->num_sgs; i++) { + tx_desc = &chan->tx_ring[chan->tx_idx]; + if (len > HSDMA_MAX_PLEN) tlen = HSDMA_MAX_PLEN; else @@ -344,7 +346,6 @@ static int mtk_hsdma_start_transfer(struct mtk_hsdam_engine *hsdma, tx_desc->addr1 = src; tx_desc->flags |= HSDMA_DESC_PLEN1(tlen); } else { - tx_desc = &chan->tx_ring[chan->tx_idx]; tx_desc->addr0 = src; tx_desc->flags = HSDMA_DESC_PLEN0(tlen); diff --git a/drivers/staging/mt7621-pci/Kconfig b/drivers/staging/mt7621-pci/Kconfig index d33533872a16f1c4e4e3d7207d6cc04cd6a63379..c8fa17cfa807a24f6450034c714ebc94b59507d4 100644 --- a/drivers/staging/mt7621-pci/Kconfig +++ b/drivers/staging/mt7621-pci/Kconfig @@ -1,6 +1,7 @@ config PCI_MT7621 tristate "MediaTek MT7621 PCI Controller" depends on RALINK + depends on PCI select PCI_DRIVERS_GENERIC help This selects a driver for the MediaTek MT7621 PCI Controller. diff --git a/drivers/staging/mt7621-pinctrl/Kconfig b/drivers/staging/mt7621-pinctrl/Kconfig index 37cf9c3273bebca6b157ff301563709badde200d..fc361271130729cf8c21548fee9ab048c2e47aec 100644 --- a/drivers/staging/mt7621-pinctrl/Kconfig +++ b/drivers/staging/mt7621-pinctrl/Kconfig @@ -2,3 +2,4 @@ config PINCTRL_RT2880 bool "RT2800 pinctrl driver for RALINK/Mediatek SOCs" depends on RALINK select PINMUX + select GENERIC_PINCONF diff --git a/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c b/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c index b8566ed898f15afdbbd85d61289543efbc3e1bdc..80e7067cfb797a3361148137b53f513a02fc3e0b 100644 --- a/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c +++ b/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -73,48 +74,12 @@ static int rt2880_get_group_pins(struct pinctrl_dev *pctrldev, return 0; } -static int rt2880_pinctrl_dt_node_to_map(struct pinctrl_dev *pctrldev, - struct device_node *np_config, - struct pinctrl_map **map, - unsigned int *num_maps) -{ - struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); - struct property *prop; - const char *function_name, *group_name; - int ret; - int ngroups; - unsigned int reserved_maps = 0; - - for_each_node_with_property(np_config, "group") - ngroups++; - - *map = NULL; - ret = pinctrl_utils_reserve_map(pctrldev, map, &reserved_maps, - num_maps, ngroups); - if (ret) { - dev_err(p->dev, "can't reserve map: %d\n", ret); - return ret; - } - - of_property_for_each_string(np_config, "group", prop, group_name) { - ret = pinctrl_utils_add_map_mux(pctrldev, map, &reserved_maps, - num_maps, group_name, - function_name); - if (ret) { - dev_err(p->dev, "can't add map: %d\n", ret); - return ret; - } - } - - return 0; -} - static const struct pinctrl_ops rt2880_pctrl_ops = { .get_groups_count = rt2880_get_group_count, .get_group_name = rt2880_get_group_name, .get_group_pins = rt2880_get_group_pins, - .dt_node_to_map = rt2880_pinctrl_dt_node_to_map, - .dt_free_map = pinctrl_utils_free_map, + .dt_node_to_map = pinconf_generic_dt_node_to_map_all, + .dt_free_map = pinconf_generic_dt_free_map, }; static int rt2880_pmx_func_count(struct pinctrl_dev *pctrldev) diff --git a/drivers/staging/mt7621-spi/spi-mt7621.c b/drivers/staging/mt7621-spi/spi-mt7621.c index d045b5568e0f83b485db7102e18d2805764bd9d7..578aa6824ad3e3dae3e3d1ffbb4388e84d5d947a 100644 --- a/drivers/staging/mt7621-spi/spi-mt7621.c +++ b/drivers/staging/mt7621-spi/spi-mt7621.c @@ -429,6 +429,7 @@ static int mt7621_spi_probe(struct platform_device *pdev) int status = 0; struct clk *clk; struct mt7621_spi_ops *ops; + int ret; match = of_match_device(mt7621_spi_match, &pdev->dev); if (!match) @@ -476,7 +477,11 @@ static int mt7621_spi_probe(struct platform_device *pdev) rs->pending_write = 0; dev_info(&pdev->dev, "sys_freq: %u\n", rs->sys_freq); - device_reset(&pdev->dev); + ret = device_reset(&pdev->dev); + if (ret) { + dev_err(&pdev->dev, "SPI reset failed!\n"); + return ret; + } mt7621_spi_reset(rs, 0); diff --git a/drivers/staging/olpc_dcon/Kconfig b/drivers/staging/olpc_dcon/Kconfig index c91a56f77bcb2281e4b30b13c63e0e84c736a97e..192cc8d0853fdd00fda2eb92458026a3950b2bf0 100644 --- a/drivers/staging/olpc_dcon/Kconfig +++ b/drivers/staging/olpc_dcon/Kconfig @@ -2,6 +2,7 @@ config FB_OLPC_DCON tristate "One Laptop Per Child Display CONtroller support" depends on OLPC && FB depends on I2C + depends on BACKLIGHT_LCD_SUPPORT depends on (GPIO_CS5535 || GPIO_CS5535=n) select BACKLIGHT_CLASS_DEVICE help diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c index c85a805a1243169a6b8478240be85855a5ade4c8..a497ec1978721aef83165438a6a67d14b84537ea 100644 --- a/drivers/staging/pi433/pi433_if.c +++ b/drivers/staging/pi433/pi433_if.c @@ -1255,6 +1255,10 @@ static int pi433_probe(struct spi_device *spi) /* create cdev */ device->cdev = cdev_alloc(); + if (!device->cdev) { + dev_dbg(device->dev, "allocation of cdev failed"); + goto cdev_failed; + } device->cdev->owner = THIS_MODULE; cdev_init(device->cdev, &pi433_fops); retval = cdev_add(device->cdev, device->devt, 1); diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c index 2a48b09ea9ae36d5508e97c9338198bb49248316..470ea2c0c433acbb5a1ecc8c12f4d7110b1feef0 100644 --- a/drivers/staging/rtl8188eu/core/rtw_security.c +++ b/drivers/staging/rtl8188eu/core/rtw_security.c @@ -154,7 +154,7 @@ void rtw_wep_encrypt(struct adapter *padapter, u8 *pxmitframe) pframe = ((struct xmit_frame *)pxmitframe)->buf_addr + hw_hdr_offset; - crypto_ops = try_then_request_module(lib80211_get_crypto_ops("WEP"), "lib80211_crypt_wep"); + crypto_ops = lib80211_get_crypto_ops("WEP"); if (!crypto_ops) return; @@ -210,7 +210,7 @@ int rtw_wep_decrypt(struct adapter *padapter, u8 *precvframe) void *crypto_private = NULL; int status = _SUCCESS; const int keyindex = prxattrib->key_index; - struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("WEP"), "lib80211_crypt_wep"); + struct lib80211_crypto_ops *crypto_ops = lib80211_get_crypto_ops("WEP"); char iv[4], icv[4]; if (!crypto_ops) { @@ -1292,7 +1292,7 @@ u32 rtw_aes_decrypt(struct adapter *padapter, u8 *precvframe) struct sk_buff *skb = ((struct recv_frame *)precvframe)->pkt; void *crypto_private = NULL; u8 *key, *pframe = skb->data; - struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("CCMP"), "lib80211_crypt_ccmp"); + struct lib80211_crypto_ops *crypto_ops = lib80211_get_crypto_ops("CCMP"); struct security_priv *psecuritypriv = &padapter->securitypriv; char iv[8], icv[8]; diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c index 2130d78e0d9fad3da294f4cd078b71f94e10e54e..c6a5b62cb3639e9c059bf866f955dc20217d19a0 100644 --- a/drivers/staging/rtl8188eu/core/rtw_xmit.c +++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c @@ -178,7 +178,9 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter) pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf; - rtw_alloc_hwxmits(padapter); + res = rtw_alloc_hwxmits(padapter); + if (res == _FAIL) + goto exit; rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry); for (i = 0; i < 4; i++) @@ -776,7 +778,7 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr memcpy(pwlanhdr->addr2, get_bssid(pmlmepriv), ETH_ALEN); memcpy(pwlanhdr->addr3, pattrib->src, ETH_ALEN); - if (psta->qos_option) + if (psta && psta->qos_option) qos_option = true; } else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) || check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) { @@ -784,7 +786,7 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN); memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv), ETH_ALEN); - if (psta->qos_option) + if (psta && psta->qos_option) qos_option = true; } else { RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("fw_state:%x is not allowed to xmit frame\n", get_fwstate(pmlmepriv))); @@ -1502,7 +1504,7 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe) return res; } -void rtw_alloc_hwxmits(struct adapter *padapter) +s32 rtw_alloc_hwxmits(struct adapter *padapter) { struct hw_xmit *hwxmits; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; @@ -1511,6 +1513,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter) pxmitpriv->hwxmits = kcalloc(pxmitpriv->hwxmit_entry, sizeof(struct hw_xmit), GFP_KERNEL); + if (!pxmitpriv->hwxmits) + return _FAIL; hwxmits = pxmitpriv->hwxmits; @@ -1518,6 +1522,7 @@ void rtw_alloc_hwxmits(struct adapter *padapter) hwxmits[1] .sta_queue = &pxmitpriv->vi_pending; hwxmits[2] .sta_queue = &pxmitpriv->be_pending; hwxmits[3] .sta_queue = &pxmitpriv->bk_pending; + return _SUCCESS; } void rtw_free_hwxmits(struct adapter *padapter) diff --git a/drivers/staging/rtl8188eu/include/rtw_xmit.h b/drivers/staging/rtl8188eu/include/rtw_xmit.h index 788f59c74ea1e45fb7e598a8f83361b586647969..ba7e15fbde72d60ab9f5589756f024642359a82f 100644 --- a/drivers/staging/rtl8188eu/include/rtw_xmit.h +++ b/drivers/staging/rtl8188eu/include/rtw_xmit.h @@ -336,7 +336,7 @@ s32 rtw_txframes_sta_ac_pending(struct adapter *padapter, void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry); s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter); void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv); -void rtw_alloc_hwxmits(struct adapter *padapter); +s32 rtw_alloc_hwxmits(struct adapter *padapter); void rtw_free_hwxmits(struct adapter *padapter); s32 rtw_xmit(struct adapter *padapter, struct sk_buff **pkt); diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c index bee3c3a7a7a999e00d2acb67c6340793ee003f08..72791920f8a7ba120a70e10385aef12e8f2632f1 100644 --- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c +++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c @@ -1158,9 +1158,11 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a, break; } sec_len = *(pos++); len -= 1; - if (sec_len > 0 && sec_len <= len) { + if (sec_len > 0 && + sec_len <= len && + sec_len <= 32) { ssid[ssid_index].SsidLength = sec_len; - memcpy(ssid[ssid_index].Ssid, pos, ssid[ssid_index].SsidLength); + memcpy(ssid[ssid_index].Ssid, pos, sec_len); ssid_index++; } pos += sec_len; diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c index 28cbd6b3d26c39e09f5b8586756f22d83fd9b97a..55952dd883598798e5317a26fe61df33823725fb 100644 --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c @@ -35,6 +35,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = { {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */ {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */ + {USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */ {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */ {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */ @@ -69,7 +70,7 @@ static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf) phost_conf = pusbd->actconfig; pconf_desc = &phost_conf->desc; - phost_iface = &usb_intf->altsetting[0]; + phost_iface = usb_intf->cur_altsetting; piface_desc = &phost_iface->desc; pdvobjpriv->NumInterfaces = pconf_desc->bNumInterfaces; @@ -347,8 +348,10 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj, } padapter->HalData = kzalloc(sizeof(struct hal_data_8188e), GFP_KERNEL); - if (!padapter->HalData) - DBG_88E("cant not alloc memory for HAL DATA\n"); + if (!padapter->HalData) { + DBG_88E("Failed to allocate memory for HAL data\n"); + goto free_adapter; + } /* step read_chip_version */ rtw_hal_read_chip_version(padapter); diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c index d2605158546bb54c2985ea1bf1c0dffa185ba63d..da73998bc5f703fa758c194d96341fced307a956 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c @@ -1627,14 +1627,15 @@ static void _rtl92e_hard_data_xmit(struct sk_buff *skb, struct net_device *dev, memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev)); skb_push(skb, priv->rtllib->tx_headroom); ret = _rtl92e_tx(dev, skb); - if (ret != 0) - kfree_skb(skb); if (queue_index != MGNT_QUEUE) { priv->rtllib->stats.tx_bytes += (skb->len - priv->rtllib->tx_headroom); priv->rtllib->stats.tx_packets++; } + + if (ret != 0) + kfree_skb(skb); } static int _rtl92e_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) @@ -2578,13 +2579,14 @@ static void _rtl92e_pci_disconnect(struct pci_dev *pdev) free_irq(dev->irq, dev); priv->irq = 0; } - free_rtllib(dev); if (dev->mem_start != 0) { iounmap((void __iomem *)dev->mem_start); release_mem_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1)); } + + free_rtllib(dev); } else { priv = rtllib_priv(dev); } diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c index e218b5c20642ddcd32cee0e11b997ef0d0f9289d..2066a1d9bc84e7240f508a33106678dd850b8902 100644 --- a/drivers/staging/rtl8192u/r8192U_core.c +++ b/drivers/staging/rtl8192u/r8192U_core.c @@ -1467,7 +1467,7 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb) (struct tx_fwinfo_819x_usb *)(skb->data + USB_HWDESC_HEADER_LEN); struct usb_device *udev = priv->udev; int pend; - int status; + int status, rt = -1; struct urb *tx_urb = NULL, *tx_urb_zero = NULL; unsigned int idx_pipe; @@ -1611,8 +1611,10 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb) } if (bSend0Byte) { tx_urb_zero = usb_alloc_urb(0, GFP_ATOMIC); - if (!tx_urb_zero) - return -ENOMEM; + if (!tx_urb_zero) { + rt = -ENOMEM; + goto error; + } usb_fill_bulk_urb(tx_urb_zero, udev, usb_sndbulkpipe(udev, idx_pipe), &zero, 0, tx_zero_isr, dev); @@ -1622,7 +1624,7 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb) "Error TX URB for zero byte %d, error %d", atomic_read(&priv->tx_pending[tcb_desc->queue_index]), status); - return -1; + goto error; } } netif_trans_update(dev); @@ -1633,7 +1635,12 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb) RT_TRACE(COMP_ERR, "Error TX URB %d, error %d", atomic_read(&priv->tx_pending[tcb_desc->queue_index]), status); - return -1; + +error: + dev_kfree_skb_any(skb); + usb_free_urb(tx_urb); + usb_free_urb(tx_urb_zero); + return rt; } static short rtl8192_usb_initendpoints(struct net_device *dev) diff --git a/drivers/staging/rtl8712/mlme_linux.c b/drivers/staging/rtl8712/mlme_linux.c index baaa52f04560dc5a9361986a6a050b8fca7b77f8..52095086574f0efe87dcc5e7a8f4a04b5cdb5536 100644 --- a/drivers/staging/rtl8712/mlme_linux.c +++ b/drivers/staging/rtl8712/mlme_linux.c @@ -158,7 +158,7 @@ void r8712_report_sec_ie(struct _adapter *adapter, u8 authmode, u8 *sec_ie) p = buff; p += sprintf(p, "ASSOCINFO(ReqIEs="); len = sec_ie[1] + 2; - len = (len < IW_CUSTOM_MAX) ? len : IW_CUSTOM_MAX - 1; + len = (len < IW_CUSTOM_MAX) ? len : IW_CUSTOM_MAX; for (i = 0; i < len; i++) p += sprintf(p, "%02x", sec_ie[i]); p += sprintf(p, ")"); diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c index b1dfe9f466191a78598488e5340c490a298b6019..fb092d4ec521992fcf88eea09f93c09d168dc471 100644 --- a/drivers/staging/rtl8712/rtl8712_cmd.c +++ b/drivers/staging/rtl8712/rtl8712_cmd.c @@ -129,47 +129,11 @@ static void r871x_internal_cmd_hdl(struct _adapter *padapter, u8 *pbuf) kfree(pdrvcmd->pbuf); } -static u8 read_macreg_hdl(struct _adapter *padapter, u8 *pbuf) -{ - void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd); - struct cmd_obj *pcmd = (struct cmd_obj *)pbuf; - - /* invoke cmd->callback function */ - pcmd_callback = cmd_callback[pcmd->cmdcode].callback; - if (!pcmd_callback) - r8712_free_cmd_obj(pcmd); - else - pcmd_callback(padapter, pcmd); - return H2C_SUCCESS; -} - -static u8 write_macreg_hdl(struct _adapter *padapter, u8 *pbuf) -{ - void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd); - struct cmd_obj *pcmd = (struct cmd_obj *)pbuf; - - /* invoke cmd->callback function */ - pcmd_callback = cmd_callback[pcmd->cmdcode].callback; - if (!pcmd_callback) - r8712_free_cmd_obj(pcmd); - else - pcmd_callback(padapter, pcmd); - return H2C_SUCCESS; -} - static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf) { - u32 val; - void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd); struct cmd_obj *pcmd = (struct cmd_obj *)pbuf; - if (pcmd->rsp && pcmd->rspsz > 0) - memcpy(pcmd->rsp, (u8 *)&val, pcmd->rspsz); - pcmd_callback = cmd_callback[pcmd->cmdcode].callback; - if (!pcmd_callback) - r8712_free_cmd_obj(pcmd); - else - pcmd_callback(padapter, pcmd); + r8712_free_cmd_obj(pcmd); return H2C_SUCCESS; } @@ -233,14 +197,6 @@ static struct cmd_obj *cmd_hdl_filter(struct _adapter *padapter, pcmd_r = NULL; switch (pcmd->cmdcode) { - case GEN_CMD_CODE(_Read_MACREG): - read_macreg_hdl(padapter, (u8 *)pcmd); - pcmd_r = pcmd; - break; - case GEN_CMD_CODE(_Write_MACREG): - write_macreg_hdl(padapter, (u8 *)pcmd); - pcmd_r = pcmd; - break; case GEN_CMD_CODE(_Read_BBREG): read_bbreg_hdl(padapter, (u8 *)pcmd); break; diff --git a/drivers/staging/rtl8712/rtl8712_cmd.h b/drivers/staging/rtl8712/rtl8712_cmd.h index 9181bb6b04c343f87d1ea0b3736a0f7a530cfce9..a101a0a50955a82e898e2af29bd305678edf9344 100644 --- a/drivers/staging/rtl8712/rtl8712_cmd.h +++ b/drivers/staging/rtl8712/rtl8712_cmd.h @@ -152,7 +152,7 @@ enum rtl8712_h2c_cmd { static struct _cmd_callback cmd_callback[] = { {GEN_CMD_CODE(_Read_MACREG), NULL}, /*0*/ {GEN_CMD_CODE(_Write_MACREG), NULL}, - {GEN_CMD_CODE(_Read_BBREG), &r8712_getbbrfreg_cmdrsp_callback}, + {GEN_CMD_CODE(_Read_BBREG), NULL}, {GEN_CMD_CODE(_Write_BBREG), NULL}, {GEN_CMD_CODE(_Read_RFREG), &r8712_getbbrfreg_cmdrsp_callback}, {GEN_CMD_CODE(_Write_RFREG), NULL}, /*5*/ diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c index c3ff7c3e6681e3e328e6a6b788d5888dfc7187ef..2f490a4bf60ac8e1f294cc9bdc1b726a49e15c68 100644 --- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c +++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c @@ -141,10 +141,91 @@ static inline void handle_group_key(struct ieee_param *param, } } -static noinline_for_stack char *translate_scan(struct _adapter *padapter, - struct iw_request_info *info, - struct wlan_network *pnetwork, - char *start, char *stop) +static noinline_for_stack char *translate_scan_wpa(struct iw_request_info *info, + struct wlan_network *pnetwork, + struct iw_event *iwe, + char *start, char *stop) +{ + /* parsing WPA/WPA2 IE */ + u8 buf[MAX_WPA_IE_LEN]; + u8 wpa_ie[255], rsn_ie[255]; + u16 wpa_len = 0, rsn_len = 0; + int n, i; + + r8712_get_sec_ie(pnetwork->network.IEs, + pnetwork->network.IELength, rsn_ie, &rsn_len, + wpa_ie, &wpa_len); + if (wpa_len > 0) { + memset(buf, 0, MAX_WPA_IE_LEN); + n = sprintf(buf, "wpa_ie="); + for (i = 0; i < wpa_len; i++) { + n += snprintf(buf + n, MAX_WPA_IE_LEN - n, + "%02x", wpa_ie[i]); + if (n >= MAX_WPA_IE_LEN) + break; + } + memset(iwe, 0, sizeof(*iwe)); + iwe->cmd = IWEVCUSTOM; + iwe->u.data.length = (u16)strlen(buf); + start = iwe_stream_add_point(info, start, stop, + iwe, buf); + memset(iwe, 0, sizeof(*iwe)); + iwe->cmd = IWEVGENIE; + iwe->u.data.length = (u16)wpa_len; + start = iwe_stream_add_point(info, start, stop, + iwe, wpa_ie); + } + if (rsn_len > 0) { + memset(buf, 0, MAX_WPA_IE_LEN); + n = sprintf(buf, "rsn_ie="); + for (i = 0; i < rsn_len; i++) { + n += snprintf(buf + n, MAX_WPA_IE_LEN - n, + "%02x", rsn_ie[i]); + if (n >= MAX_WPA_IE_LEN) + break; + } + memset(iwe, 0, sizeof(*iwe)); + iwe->cmd = IWEVCUSTOM; + iwe->u.data.length = strlen(buf); + start = iwe_stream_add_point(info, start, stop, + iwe, buf); + memset(iwe, 0, sizeof(*iwe)); + iwe->cmd = IWEVGENIE; + iwe->u.data.length = rsn_len; + start = iwe_stream_add_point(info, start, stop, iwe, + rsn_ie); + } + + return start; +} + +static noinline_for_stack char *translate_scan_wps(struct iw_request_info *info, + struct wlan_network *pnetwork, + struct iw_event *iwe, + char *start, char *stop) +{ + /* parsing WPS IE */ + u8 wps_ie[512]; + uint wps_ielen; + + if (r8712_get_wps_ie(pnetwork->network.IEs, + pnetwork->network.IELength, + wps_ie, &wps_ielen)) { + if (wps_ielen > 2) { + iwe->cmd = IWEVGENIE; + iwe->u.data.length = (u16)wps_ielen; + start = iwe_stream_add_point(info, start, stop, + iwe, wps_ie); + } + } + + return start; +} + +static char *translate_scan(struct _adapter *padapter, + struct iw_request_info *info, + struct wlan_network *pnetwork, + char *start, char *stop) { struct iw_event iwe; struct ieee80211_ht_cap *pht_capie; @@ -257,73 +338,11 @@ static noinline_for_stack char *translate_scan(struct _adapter *padapter, /* Check if we added any event */ if ((current_val - start) > iwe_stream_lcp_len(info)) start = current_val; - /* parsing WPA/WPA2 IE */ - { - u8 buf[MAX_WPA_IE_LEN]; - u8 wpa_ie[255], rsn_ie[255]; - u16 wpa_len = 0, rsn_len = 0; - int n; - - r8712_get_sec_ie(pnetwork->network.IEs, - pnetwork->network.IELength, rsn_ie, &rsn_len, - wpa_ie, &wpa_len); - if (wpa_len > 0) { - memset(buf, 0, MAX_WPA_IE_LEN); - n = sprintf(buf, "wpa_ie="); - for (i = 0; i < wpa_len; i++) { - n += snprintf(buf + n, MAX_WPA_IE_LEN - n, - "%02x", wpa_ie[i]); - if (n >= MAX_WPA_IE_LEN) - break; - } - memset(&iwe, 0, sizeof(iwe)); - iwe.cmd = IWEVCUSTOM; - iwe.u.data.length = (u16)strlen(buf); - start = iwe_stream_add_point(info, start, stop, - &iwe, buf); - memset(&iwe, 0, sizeof(iwe)); - iwe.cmd = IWEVGENIE; - iwe.u.data.length = (u16)wpa_len; - start = iwe_stream_add_point(info, start, stop, - &iwe, wpa_ie); - } - if (rsn_len > 0) { - memset(buf, 0, MAX_WPA_IE_LEN); - n = sprintf(buf, "rsn_ie="); - for (i = 0; i < rsn_len; i++) { - n += snprintf(buf + n, MAX_WPA_IE_LEN - n, - "%02x", rsn_ie[i]); - if (n >= MAX_WPA_IE_LEN) - break; - } - memset(&iwe, 0, sizeof(iwe)); - iwe.cmd = IWEVCUSTOM; - iwe.u.data.length = strlen(buf); - start = iwe_stream_add_point(info, start, stop, - &iwe, buf); - memset(&iwe, 0, sizeof(iwe)); - iwe.cmd = IWEVGENIE; - iwe.u.data.length = rsn_len; - start = iwe_stream_add_point(info, start, stop, &iwe, - rsn_ie); - } - } - { /* parsing WPS IE */ - u8 wps_ie[512]; - uint wps_ielen; + start = translate_scan_wpa(info, pnetwork, &iwe, start, stop); + + start = translate_scan_wps(info, pnetwork, &iwe, start, stop); - if (r8712_get_wps_ie(pnetwork->network.IEs, - pnetwork->network.IELength, - wps_ie, &wps_ielen)) { - if (wps_ielen > 2) { - iwe.cmd = IWEVGENIE; - iwe.u.data.length = (u16)wps_ielen; - start = iwe_stream_add_point(info, start, stop, - &iwe, wps_ie); - } - } - } /* Add quality statistics */ iwe.cmd = IWEVQUAL; rssi = r8712_signal_scale_mapping(pnetwork->network.Rssi); diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c index ac547ddd72d1df2d3374c6366266dbd7cebb3a26..d7e88d2a8b1bfd1806be3960fe62e393559c96ef 100644 --- a/drivers/staging/rtl8712/rtl871x_mlme.c +++ b/drivers/staging/rtl8712/rtl871x_mlme.c @@ -1358,7 +1358,7 @@ sint r8712_restruct_sec_ie(struct _adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_len) { u8 authmode = 0, match; - u8 sec_ie[255], uncst_oui[4], bkup_ie[255]; + u8 sec_ie[IW_CUSTOM_MAX], uncst_oui[4], bkup_ie[255]; u8 wpa_oui[4] = {0x0, 0x50, 0xf2, 0x01}; uint ielength, cnt, remove_cnt; int iEntry; diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c index 85eadddfaf06092e1525c762c9c0334418844d01..709d687180bf86cfd5cd76bf1deffbc412e8b89a 100644 --- a/drivers/staging/rtl8712/usb_intf.c +++ b/drivers/staging/rtl8712/usb_intf.c @@ -275,7 +275,7 @@ static uint r8712_usb_dvobj_init(struct _adapter *padapter) pdvobjpriv->padapter = padapter; padapter->EepromAddressSize = 6; - phost_iface = &pintf->altsetting[0]; + phost_iface = pintf->cur_altsetting; piface_desc = &phost_iface->desc; pdvobjpriv->nr_endpoint = piface_desc->bNumEndpoints; if (pusbd->speed == USB_SPEED_HIGH) { @@ -623,13 +623,13 @@ static void r871xu_dev_remove(struct usb_interface *pusb_intf) if (pnetdev) { struct _adapter *padapter = netdev_priv(pnetdev); + unregister_netdev(pnetdev); /* will call netdev_close() */ usb_set_intfdata(pusb_intf, NULL); release_firmware(padapter->fw); /* never exit with a firmware callback pending */ wait_for_completion(&padapter->rtl8712_fw_ready); if (drvpriv.drv_registered) padapter->bSurpriseRemoved = true; - unregister_netdev(pnetdev); /* will call netdev_close() */ flush_scheduled_work(); udelay(1); /* Stop driver mlme relation timer */ diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c index 0952d15f6d40f66243cc8c91e7d109fb749886f2..ca6f1fa3466a15a71e5891d37d1225ca6d58600b 100644 --- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c +++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c @@ -1566,7 +1566,7 @@ unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame) if (pstat->aid > 0) { DBG_871X(" old AID %d\n", pstat->aid); } else { - for (pstat->aid = 1; pstat->aid < NUM_STA; pstat->aid++) + for (pstat->aid = 1; pstat->aid <= NUM_STA; pstat->aid++) if (pstapriv->sta_aid[pstat->aid - 1] == NULL) break; diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c index edb678190b4bc9407fdc3360388bb86aed48668d..16291de5c0d99aedf26483aff158feb5cdf6ace9 100644 --- a/drivers/staging/rtl8723bs/core/rtw_xmit.c +++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c @@ -260,7 +260,9 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter) } } - rtw_alloc_hwxmits(padapter); + res = rtw_alloc_hwxmits(padapter); + if (res == _FAIL) + goto exit; rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry); for (i = 0; i < 4; i++) { @@ -2144,7 +2146,7 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe) return res; } -void rtw_alloc_hwxmits(struct adapter *padapter) +s32 rtw_alloc_hwxmits(struct adapter *padapter) { struct hw_xmit *hwxmits; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; @@ -2155,10 +2157,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter) pxmitpriv->hwxmits = rtw_zmalloc(sizeof(struct hw_xmit) * pxmitpriv->hwxmit_entry); - if (pxmitpriv->hwxmits == NULL) { - DBG_871X("alloc hwxmits fail!...\n"); - return; - } + if (!pxmitpriv->hwxmits) + return _FAIL; hwxmits = pxmitpriv->hwxmits; @@ -2204,7 +2204,7 @@ void rtw_alloc_hwxmits(struct adapter *padapter) } - + return _SUCCESS; } void rtw_free_hwxmits(struct adapter *padapter) diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c index 85077947b9b8d44b82a467a8e4cd5f86e5f5feb9..85aba8a503cd200b5fbfddc39a62932303767121 100644 --- a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c +++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c @@ -109,12 +109,12 @@ static void update_recvframe_phyinfo(union recv_frame *precvframe, rx_bssid = get_hdr_bssid(wlanhdr); pkt_info.bssid_match = ((!IsFrameTypeCtrl(wlanhdr)) && !pattrib->icv_err && !pattrib->crc_err && - !ether_addr_equal(rx_bssid, my_bssid)); + ether_addr_equal(rx_bssid, my_bssid)); rx_ra = get_ra(wlanhdr); my_hwaddr = myid(&padapter->eeprompriv); pkt_info.to_self = pkt_info.bssid_match && - !ether_addr_equal(rx_ra, my_hwaddr); + ether_addr_equal(rx_ra, my_hwaddr); pkt_info.is_beacon = pkt_info.bssid_match && diff --git a/drivers/staging/rtl8723bs/include/ieee80211.h b/drivers/staging/rtl8723bs/include/ieee80211.h index bcc8dfa8e67287b80bd68ae6134133c5b0aac26c..9efb4dcb9d3a8f1d32544b3c8e9b5694da14a63d 100644 --- a/drivers/staging/rtl8723bs/include/ieee80211.h +++ b/drivers/staging/rtl8723bs/include/ieee80211.h @@ -850,18 +850,18 @@ enum ieee80211_state { #define IP_FMT "%pI4" #define IP_ARG(x) (x) -extern __inline int is_multicast_mac_addr(const u8 *addr) +static inline int is_multicast_mac_addr(const u8 *addr) { return ((addr[0] != 0xff) && (0x01 & addr[0])); } -extern __inline int is_broadcast_mac_addr(const u8 *addr) +static inline int is_broadcast_mac_addr(const u8 *addr) { return ((addr[0] == 0xff) && (addr[1] == 0xff) && (addr[2] == 0xff) && \ (addr[3] == 0xff) && (addr[4] == 0xff) && (addr[5] == 0xff)); } -extern __inline int is_zero_mac_addr(const u8 *addr) +static inline int is_zero_mac_addr(const u8 *addr) { return ((addr[0] == 0x00) && (addr[1] == 0x00) && (addr[2] == 0x00) && \ (addr[3] == 0x00) && (addr[4] == 0x00) && (addr[5] == 0x00)); diff --git a/drivers/staging/rtl8723bs/include/rtw_xmit.h b/drivers/staging/rtl8723bs/include/rtw_xmit.h index a75b668d09a61d28e2ba548aa994c3c11607ba01..021c72361fbb337dc9a5031e1280bbe66e5c53a8 100644 --- a/drivers/staging/rtl8723bs/include/rtw_xmit.h +++ b/drivers/staging/rtl8723bs/include/rtw_xmit.h @@ -486,7 +486,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter); void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv); -void rtw_alloc_hwxmits(struct adapter *padapter); +s32 rtw_alloc_hwxmits(struct adapter *padapter); void rtw_free_hwxmits(struct adapter *padapter); diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c index af2234798fa8e18f4fd2dcff394e6a9d9343b2e8..db553f2e4c0b835386330ec9773c219085b82c2f 100644 --- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c +++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c @@ -1277,7 +1277,7 @@ static int cfg80211_rtw_get_station(struct wiphy *wiphy, sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS); sinfo->tx_packets = psta->sta_stats.tx_pkts; - + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED); } /* for Ad-Hoc/AP mode */ diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c index c38298d960fffc18b3cca843ef1881de839a4629..4f120e72c7d2a5587c20162628ee1d491f9fa4ba 100644 --- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c +++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c @@ -2289,7 +2289,7 @@ static int rtw_wx_read32(struct net_device *dev, exit: kfree(ptmp); - return 0; + return ret; } static int rtw_wx_write32(struct net_device *dev, diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c index 6d02904de63f393d7eca2c6031ea24051993c880..49ea780f9f42a71adbb71abe95f1d1dbbf9f737c 100644 --- a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c +++ b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c @@ -17,18 +17,13 @@ static const struct sdio_device_id sdio_ids[] = { { SDIO_DEVICE(0x024c, 0x0523), }, + { SDIO_DEVICE(0x024c, 0x0525), }, { SDIO_DEVICE(0x024c, 0x0623), }, { SDIO_DEVICE(0x024c, 0x0626), }, { SDIO_DEVICE(0x024c, 0xb723), }, { /* end: all zeroes */ }, }; -static const struct acpi_device_id acpi_ids[] = { - {"OBDA8723", 0x0000}, - {} -}; - MODULE_DEVICE_TABLE(sdio, sdio_ids); -MODULE_DEVICE_TABLE(acpi, acpi_ids); static int rtw_drv_init(struct sdio_func *func, const struct sdio_device_id *id); static void rtw_dev_remove(struct sdio_func *func); diff --git a/drivers/staging/rtlwifi/phydm/rtl_phydm.c b/drivers/staging/rtlwifi/phydm/rtl_phydm.c index 9930ed954abb2d8aa437a1784fbd3e3f83a043ca..4cc77b2016e1e45f14834820b429f7e8cda19b42 100644 --- a/drivers/staging/rtlwifi/phydm/rtl_phydm.c +++ b/drivers/staging/rtlwifi/phydm/rtl_phydm.c @@ -180,6 +180,8 @@ static int rtl_phydm_init_priv(struct rtl_priv *rtlpriv, rtlpriv->phydm.internal = kzalloc(sizeof(struct phy_dm_struct), GFP_KERNEL); + if (!rtlpriv->phydm.internal) + return 0; _rtl_phydm_init_com_info(rtlpriv, ic, params); diff --git a/drivers/staging/rtlwifi/rtl8822be/fw.c b/drivers/staging/rtlwifi/rtl8822be/fw.c index a403966148143cd052aff46a97be47e4c7c84436..c1ed52df05f089f3a0d06665d3379bc91397dbef 100644 --- a/drivers/staging/rtlwifi/rtl8822be/fw.c +++ b/drivers/staging/rtlwifi/rtl8822be/fw.c @@ -741,6 +741,8 @@ void rtl8822be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) u1_rsvd_page_loc, 3); skb = dev_alloc_skb(totalpacketlen); + if (!skb) + return; memcpy((u8 *)skb_put(skb, totalpacketlen), &reserved_page_packet, totalpacketlen); diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c index 869f40ebf1a7079dc6f0be3ce25f2ffa46b50df9..548a5e1cbe1f01a8187a79bb3d6c791b3e1d1a97 100644 --- a/drivers/staging/speakup/main.c +++ b/drivers/staging/speakup/main.c @@ -577,7 +577,7 @@ static u_long get_word(struct vc_data *vc) } attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr); buf[cnt++] = attr_ch; - while (tmpx < vc->vc_cols - 1) { + while (tmpx < vc->vc_cols - 1 && cnt < ARRAY_SIZE(buf) - 1) { tmp_pos += 2; tmpx++; ch = get_char(vc, (u_short *)tmp_pos, &temp); diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c index 947c79532e1004818eea7ef94b101ec93303e2b2..d5383974d40e82b18790edd9148f1f3e893edb91 100644 --- a/drivers/staging/speakup/speakup_soft.c +++ b/drivers/staging/speakup/speakup_soft.c @@ -208,12 +208,15 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count, return -EINVAL; spin_lock_irqsave(&speakup_info.spinlock, flags); + synth_soft.alive = 1; while (1) { prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE); - if (!unicode) - synth_buffer_skip_nonlatin1(); - if (!synth_buffer_empty() || speakup_info.flushing) - break; + if (synth_current() == &synth_soft) { + if (!unicode) + synth_buffer_skip_nonlatin1(); + if (!synth_buffer_empty() || speakup_info.flushing) + break; + } spin_unlock_irqrestore(&speakup_info.spinlock, flags); if (fp->f_flags & O_NONBLOCK) { finish_wait(&speakup_event, &wait); @@ -233,6 +236,8 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count, /* Keep 3 bytes available for a 16bit UTF-8-encoded character */ while (chars_sent <= count - bytes_per_ch) { + if (synth_current() != &synth_soft) + break; if (speakup_info.flushing) { speakup_info.flushing = 0; ch = '\x18'; @@ -329,7 +334,8 @@ static __poll_t softsynth_poll(struct file *fp, struct poll_table_struct *wait) poll_wait(fp, &speakup_event, wait); spin_lock_irqsave(&speakup_info.spinlock, flags); - if (!synth_buffer_empty() || speakup_info.flushing) + if (synth_current() == &synth_soft && + (!synth_buffer_empty() || speakup_info.flushing)) ret = EPOLLIN | EPOLLRDNORM; spin_unlock_irqrestore(&speakup_info.spinlock, flags); return ret; diff --git a/drivers/staging/speakup/spk_priv.h b/drivers/staging/speakup/spk_priv.h index 7b3a16e1fa231e55e99e3b863ff022d19b3493bf..796ffcca43c1815c66504d9b9d76e0ebdd7b9b0a 100644 --- a/drivers/staging/speakup/spk_priv.h +++ b/drivers/staging/speakup/spk_priv.h @@ -72,6 +72,7 @@ int synth_request_region(unsigned long start, unsigned long n); int synth_release_region(unsigned long start, unsigned long n); int synth_add(struct spk_synth *in_synth); void synth_remove(struct spk_synth *in_synth); +struct spk_synth *synth_current(void); extern struct speakup_info_t speakup_info; diff --git a/drivers/staging/speakup/spk_ttyio.c b/drivers/staging/speakup/spk_ttyio.c index eac63aab81622cc447ee31d263f7ddab150e4b3b..ef9451174b710a10190e830384deed3a36b23312 100644 --- a/drivers/staging/speakup/spk_ttyio.c +++ b/drivers/staging/speakup/spk_ttyio.c @@ -47,9 +47,13 @@ static int spk_ttyio_ldisc_open(struct tty_struct *tty) { struct spk_ldisc_data *ldisc_data; + if (tty != speakup_tty) + /* Somebody tried to use this line discipline outside speakup */ + return -ENODEV; + if (tty->ops->write == NULL) return -EOPNOTSUPP; - speakup_tty = tty; + ldisc_data = kmalloc(sizeof(struct spk_ldisc_data), GFP_KERNEL); if (!ldisc_data) @@ -57,7 +61,7 @@ static int spk_ttyio_ldisc_open(struct tty_struct *tty) sema_init(&ldisc_data->sem, 0); ldisc_data->buf_free = true; - speakup_tty->disc_data = ldisc_data; + tty->disc_data = ldisc_data; return 0; } @@ -85,7 +89,7 @@ static int spk_ttyio_receive_buf2(struct tty_struct *tty, } if (!ldisc_data->buf_free) - /* ttyio_in will tty_schedule_flip */ + /* ttyio_in will tty_flip_buffer_push */ return 0; /* Make sure the consumer has read buf before we have seen @@ -177,9 +181,25 @@ static int spk_ttyio_initialise_ldisc(struct spk_synth *synth) tty_unlock(tty); + mutex_lock(&speakup_tty_mutex); + speakup_tty = tty; ret = tty_set_ldisc(tty, N_SPEAKUP); if (ret) - pr_err("speakup: Failed to set N_SPEAKUP on tty\n"); + speakup_tty = NULL; + mutex_unlock(&speakup_tty_mutex); + + if (!ret) + /* Success */ + return 0; + + pr_err("speakup: Failed to set N_SPEAKUP on tty\n"); + + tty_lock(tty); + if (tty->ops->close) + tty->ops->close(tty, NULL); + tty_unlock(tty); + + tty_kclose(tty); return ret; } @@ -265,7 +285,8 @@ static void spk_ttyio_send_xchar(char ch) return; } - speakup_tty->ops->send_xchar(speakup_tty, ch); + if (speakup_tty->ops->send_xchar) + speakup_tty->ops->send_xchar(speakup_tty, ch); mutex_unlock(&speakup_tty_mutex); } @@ -277,7 +298,8 @@ static void spk_ttyio_tiocmset(unsigned int set, unsigned int clear) return; } - speakup_tty->ops->tiocmset(speakup_tty, set, clear); + if (speakup_tty->ops->tiocmset) + speakup_tty->ops->tiocmset(speakup_tty, set, clear); mutex_unlock(&speakup_tty_mutex); } @@ -300,7 +322,7 @@ static unsigned char ttyio_in(int timeout) mb(); ldisc_data->buf_free = true; /* Let TTY push more characters */ - tty_schedule_flip(speakup_tty->port); + tty_flip_buffer_push(speakup_tty->port); return rv; } diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c index 25f259ee4ffc74990e5a19c8560840ca9a59058e..3568bfb89912c3316d649b6c19223f4206936457 100644 --- a/drivers/staging/speakup/synth.c +++ b/drivers/staging/speakup/synth.c @@ -481,4 +481,10 @@ void synth_remove(struct spk_synth *in_synth) } EXPORT_SYMBOL_GPL(synth_remove); +struct spk_synth *synth_current(void) +{ + return synth; +} +EXPORT_SYMBOL_GPL(synth_current); + short spk_punc_masks[] = { 0, SOME, MOST, PUNC, PUNC | B_SYM }; diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c index c04bdf070c87610c93ff8ab89962bd6908a64300..455082867246d84caeec1b5ee103f9b73574be8e 100644 --- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c +++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c @@ -342,16 +342,13 @@ static void buffer_cb(struct vchiq_mmal_instance *instance, return; } else if (length == 0) { /* stream ended */ - if (buf) { - /* this should only ever happen if the port is - * disabled and there are buffers still queued + if (dev->capture.frame_count) { + /* empty buffer whilst capturing - expected to be an + * EOS, so grab another frame */ - vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); - pr_debug("Empty buffer"); - } else if (dev->capture.frame_count) { - /* grab another frame */ if (is_capturing(dev)) { - pr_debug("Grab another frame"); + v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, + "Grab another frame"); vchiq_mmal_port_parameter_set( instance, dev->capture.camera_port, @@ -359,8 +356,14 @@ static void buffer_cb(struct vchiq_mmal_instance *instance, &dev->capture.frame_count, sizeof(dev->capture.frame_count)); } + if (vchiq_mmal_submit_buffer(instance, port, buf)) + v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, + "Failed to return EOS buffer"); } else { - /* signal frame completion */ + /* stopping streaming. + * return buffer, and signal frame completion + */ + vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); complete(&dev->capture.frame_cmplt); } } else { @@ -582,6 +585,7 @@ static void stop_streaming(struct vb2_queue *vq) int ret; unsigned long timeout; struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vq); + struct vchiq_mmal_port *port = dev->capture.port; v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "%s: dev:%p\n", __func__, dev); @@ -605,12 +609,6 @@ static void stop_streaming(struct vb2_queue *vq) &dev->capture.frame_count, sizeof(dev->capture.frame_count)); - /* wait for last frame to complete */ - timeout = wait_for_completion_timeout(&dev->capture.frame_cmplt, HZ); - if (timeout == 0) - v4l2_err(&dev->v4l2_dev, - "timed out waiting for frame completion\n"); - v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "disabling connection\n"); @@ -625,6 +623,21 @@ static void stop_streaming(struct vb2_queue *vq) ret); } + /* wait for all buffers to be returned */ + while (atomic_read(&port->buffers_with_vpu)) { + v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, + "%s: Waiting for buffers to be returned - %d outstanding\n", + __func__, atomic_read(&port->buffers_with_vpu)); + timeout = wait_for_completion_timeout(&dev->capture.frame_cmplt, + HZ); + if (timeout == 0) { + v4l2_err(&dev->v4l2_dev, "%s: Timeout waiting for buffers to be returned - %d outstanding\n", + __func__, + atomic_read(&port->buffers_with_vpu)); + break; + } + } + if (disable_camera(dev) < 0) v4l2_err(&dev->v4l2_dev, "Failed to disable camera\n"); } diff --git a/drivers/staging/vc04_services/bcm2835-camera/controls.c b/drivers/staging/vc04_services/bcm2835-camera/controls.c index cff7b1e07153b951f397a5a789920cef979cb2a4..b688ebc0174059da6c1534e4b47615539a45195a 100644 --- a/drivers/staging/vc04_services/bcm2835-camera/controls.c +++ b/drivers/staging/vc04_services/bcm2835-camera/controls.c @@ -576,7 +576,7 @@ static int ctrl_set_image_effect(struct bm2835_mmal_dev *dev, dev->colourfx.enable ? "true" : "false", dev->colourfx.u, dev->colourfx.v, ret, (ret == 0 ? 0 : -EINVAL)); - return (ret == 0 ? 0 : EINVAL); + return (ret == 0 ? 0 : -EINVAL); } static int ctrl_set_colfx(struct bm2835_mmal_dev *dev, @@ -600,7 +600,7 @@ static int ctrl_set_colfx(struct bm2835_mmal_dev *dev, "%s: After: mmal_ctrl:%p ctrl id:0x%x ctrl val:%d ret %d(%d)\n", __func__, mmal_ctrl, ctrl->id, ctrl->val, ret, (ret == 0 ? 0 : -EINVAL)); - return (ret == 0 ? 0 : EINVAL); + return (ret == 0 ? 0 : -EINVAL); } static int ctrl_set_bitrate(struct bm2835_mmal_dev *dev, diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c index 51e5b04ff0f58d6dffc79ded005977595c341c45..daa2b9656552900e50974773ae28125a4b2ed6de 100644 --- a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c +++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c @@ -162,7 +162,8 @@ struct vchiq_mmal_instance { void *bulk_scratch; struct idr context_map; - spinlock_t context_map_lock; + /* protect accesses to context_map */ + struct mutex context_map_lock; /* component to use next */ int component_idx; @@ -185,10 +186,10 @@ get_msg_context(struct vchiq_mmal_instance *instance) * that when we service the VCHI reply, we can look up what * message is being replied to. */ - spin_lock(&instance->context_map_lock); + mutex_lock(&instance->context_map_lock); handle = idr_alloc(&instance->context_map, msg_context, 0, 0, GFP_KERNEL); - spin_unlock(&instance->context_map_lock); + mutex_unlock(&instance->context_map_lock); if (handle < 0) { kfree(msg_context); @@ -212,9 +213,9 @@ release_msg_context(struct mmal_msg_context *msg_context) { struct vchiq_mmal_instance *instance = msg_context->instance; - spin_lock(&instance->context_map_lock); + mutex_lock(&instance->context_map_lock); idr_remove(&instance->context_map, msg_context->handle); - spin_unlock(&instance->context_map_lock); + mutex_unlock(&instance->context_map_lock); kfree(msg_context); } @@ -240,6 +241,8 @@ static void buffer_work_cb(struct work_struct *work) struct mmal_msg_context *msg_context = container_of(work, struct mmal_msg_context, u.bulk.work); + atomic_dec(&msg_context->u.bulk.port->buffers_with_vpu); + msg_context->u.bulk.port->buffer_cb(msg_context->u.bulk.instance, msg_context->u.bulk.port, msg_context->u.bulk.status, @@ -288,8 +291,6 @@ static int bulk_receive(struct vchiq_mmal_instance *instance, /* store length */ msg_context->u.bulk.buffer_used = rd_len; - msg_context->u.bulk.mmal_flags = - msg->u.buffer_from_host.buffer_header.flags; msg_context->u.bulk.dts = msg->u.buffer_from_host.buffer_header.dts; msg_context->u.bulk.pts = msg->u.buffer_from_host.buffer_header.pts; @@ -380,6 +381,8 @@ buffer_from_host(struct vchiq_mmal_instance *instance, /* initialise work structure ready to schedule callback */ INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb); + atomic_inc(&port->buffers_with_vpu); + /* prep the buffer from host message */ memset(&m, 0xbc, sizeof(m)); /* just to make debug clearer */ @@ -448,6 +451,9 @@ static void buffer_to_host_cb(struct vchiq_mmal_instance *instance, return; } + msg_context->u.bulk.mmal_flags = + msg->u.buffer_from_host.buffer_header.flags; + if (msg->h.status != MMAL_MSG_STATUS_SUCCESS) { /* message reception had an error */ pr_warn("error %d in reply\n", msg->h.status); @@ -1324,16 +1330,6 @@ static int port_enable(struct vchiq_mmal_instance *instance, if (port->enabled) return 0; - /* ensure there are enough buffers queued to cover the buffer headers */ - if (port->buffer_cb) { - hdr_count = 0; - list_for_each(buf_head, &port->buffers) { - hdr_count++; - } - if (hdr_count < port->current_buffer.num) - return -ENOSPC; - } - ret = port_action_port(instance, port, MMAL_MSG_PORT_ACTION_TYPE_ENABLE); if (ret) @@ -1854,7 +1850,7 @@ int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance) instance->bulk_scratch = vmalloc(PAGE_SIZE); - spin_lock_init(&instance->context_map_lock); + mutex_init(&instance->context_map_lock); idr_init_base(&instance->context_map, 1); params.callback_param = instance; diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h index 22b839ecd5f02471f3cb2918436529d05d595064..b0ee1716525b4776e51233249eeef3b317742e60 100644 --- a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h +++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h @@ -71,6 +71,9 @@ struct vchiq_mmal_port { struct list_head buffers; /* lock to serialise adding and removing buffers from list */ spinlock_t slock; + + /* Count of buffers the VPU has yet to return */ + atomic_t buffers_with_vpu; /* callback on buffer completion */ vchiq_mmal_buffer_cb buffer_cb; /* callback context */ diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c index e767209030642dcc0ca48f0037f0b7e93d1f0e27..3bece6b86831eb148e9027a1b06a9bb8ebfb5478 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c @@ -208,6 +208,9 @@ vchiq_platform_init_state(VCHIQ_STATE_T *state) struct vchiq_2835_state *platform_state; state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL); + if (!state->platform_state) + return VCHIQ_ERROR; + platform_state = (struct vchiq_2835_state *)state->platform_state; platform_state->inited = 1; @@ -407,9 +410,18 @@ create_pagelist(char __user *buf, size_t count, unsigned short type) int dma_buffers; dma_addr_t dma_addr; + if (count >= INT_MAX - PAGE_SIZE) + return NULL; + offset = ((unsigned int)(unsigned long)buf & (PAGE_SIZE - 1)); num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE); + if (num_pages > (SIZE_MAX - sizeof(PAGELIST_T) - + sizeof(struct vchiq_pagelist_info)) / + (sizeof(u32) + sizeof(pages[0]) + + sizeof(struct scatterlist))) + return NULL; + pagelist_size = sizeof(PAGELIST_T) + (num_pages * sizeof(u32)) + (num_pages * sizeof(pages[0]) + diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c index bc05c69383b8598ad0b7a5978f89b4dd55710a93..fe431302a030b08d6a1cf4da3fa2e618db066a86 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c @@ -1787,6 +1787,7 @@ vchiq_compat_ioctl_await_completion(struct file *file, struct vchiq_await_completion32 args32; struct vchiq_completion_data32 completion32; unsigned int *msgbufcount32; + unsigned int msgbufcount_native; compat_uptr_t msgbuf32; void *msgbuf; void **msgbufptr; @@ -1898,7 +1899,11 @@ vchiq_compat_ioctl_await_completion(struct file *file, sizeof(completion32))) return -EFAULT; - args32.msgbufcount--; + if (get_user(msgbufcount_native, &args->msgbufcount)) + return -EFAULT; + + if (!msgbufcount_native) + args32.msgbufcount--; msgbufcount32 = &((struct vchiq_await_completion32 __user *)arg)->msgbufcount; diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c index 7642ced3143644c01484ca9dbd5617d7fa96ddfc..63ce567eb6b75af820f39e0cc70be242a6020ba5 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c @@ -2537,6 +2537,8 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero, local->debug[DEBUG_ENTRIES] = DEBUG_MAX; status = vchiq_platform_init_state(state); + if (status != VCHIQ_SUCCESS) + return VCHIQ_ERROR; /* bring up slot handler thread diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c index 1ab0e8562d403b39cc73ba93e9a48e07238fd86c..76f434c1c088a52226a88bfd92c9a89fe62f71ff 100644 --- a/drivers/staging/vt6655/device_main.c +++ b/drivers/staging/vt6655/device_main.c @@ -1040,8 +1040,6 @@ static void vnt_interrupt_process(struct vnt_private *priv) return; } - MACvIntDisable(priv->PortOffset); - spin_lock_irqsave(&priv->lock, flags); /* Read low level stats */ @@ -1129,8 +1127,6 @@ static void vnt_interrupt_process(struct vnt_private *priv) } spin_unlock_irqrestore(&priv->lock, flags); - - MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE); } static void vnt_interrupt_work(struct work_struct *work) @@ -1140,14 +1136,17 @@ static void vnt_interrupt_work(struct work_struct *work) if (priv->vif) vnt_interrupt_process(priv); + + MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE); } static irqreturn_t vnt_interrupt(int irq, void *arg) { struct vnt_private *priv = arg; - if (priv->vif) - schedule_work(&priv->interrupt_work); + schedule_work(&priv->interrupt_work); + + MACvIntDisable(priv->PortOffset); return IRQ_HANDLED; } @@ -1756,8 +1755,10 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent) priv->hw->max_signal = 100; - if (vnt_init(priv)) + if (vnt_init(priv)) { + device_free_info(priv); return -ENODEV; + } device_print_info(priv); pci_set_drvdata(pcid, priv); diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c index ccafcc2c87ac980da5f70e52cfa89ad41a0cdf45..70433f756d8e1ff10b8f0e113d167f058019ccbf 100644 --- a/drivers/staging/vt6656/main_usb.c +++ b/drivers/staging/vt6656/main_usb.c @@ -402,16 +402,19 @@ static void vnt_free_int_bufs(struct vnt_private *priv) kfree(priv->int_buf.data_buf); } -static bool vnt_alloc_bufs(struct vnt_private *priv) +static int vnt_alloc_bufs(struct vnt_private *priv) { + int ret = 0; struct vnt_usb_send_context *tx_context; struct vnt_rcb *rcb; int ii; for (ii = 0; ii < priv->num_tx_context; ii++) { tx_context = kmalloc(sizeof(*tx_context), GFP_KERNEL); - if (!tx_context) + if (!tx_context) { + ret = -ENOMEM; goto free_tx; + } priv->tx_context[ii] = tx_context; tx_context->priv = priv; @@ -419,16 +422,20 @@ static bool vnt_alloc_bufs(struct vnt_private *priv) /* allocate URBs */ tx_context->urb = usb_alloc_urb(0, GFP_KERNEL); - if (!tx_context->urb) + if (!tx_context->urb) { + ret = -ENOMEM; goto free_tx; + } tx_context->in_use = false; } for (ii = 0; ii < priv->num_rcb; ii++) { priv->rcb[ii] = kzalloc(sizeof(*priv->rcb[ii]), GFP_KERNEL); - if (!priv->rcb[ii]) + if (!priv->rcb[ii]) { + ret = -ENOMEM; goto free_rx_tx; + } rcb = priv->rcb[ii]; @@ -436,39 +443,46 @@ static bool vnt_alloc_bufs(struct vnt_private *priv) /* allocate URBs */ rcb->urb = usb_alloc_urb(0, GFP_KERNEL); - if (!rcb->urb) + if (!rcb->urb) { + ret = -ENOMEM; goto free_rx_tx; + } rcb->skb = dev_alloc_skb(priv->rx_buf_sz); - if (!rcb->skb) + if (!rcb->skb) { + ret = -ENOMEM; goto free_rx_tx; + } rcb->in_use = false; /* submit rx urb */ - if (vnt_submit_rx_urb(priv, rcb)) + ret = vnt_submit_rx_urb(priv, rcb); + if (ret) goto free_rx_tx; } priv->interrupt_urb = usb_alloc_urb(0, GFP_KERNEL); - if (!priv->interrupt_urb) + if (!priv->interrupt_urb) { + ret = -ENOMEM; goto free_rx_tx; + } priv->int_buf.data_buf = kmalloc(MAX_INTERRUPT_SIZE, GFP_KERNEL); if (!priv->int_buf.data_buf) { - usb_free_urb(priv->interrupt_urb); - goto free_rx_tx; + ret = -ENOMEM; + goto free_rx_tx_urb; } - return true; + return 0; +free_rx_tx_urb: + usb_free_urb(priv->interrupt_urb); free_rx_tx: vnt_free_rx_bufs(priv); - free_tx: vnt_free_tx_bufs(priv); - - return false; + return ret; } static void vnt_tx_80211(struct ieee80211_hw *hw, diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c index 3b8d237decbf179b7016242583909fc46146c99c..25798119426b383505d056be2a3915c6163d6d02 100644 --- a/drivers/staging/wilc1000/linux_wlan.c +++ b/drivers/staging/wilc1000/linux_wlan.c @@ -649,17 +649,17 @@ static int wilc_wlan_initialize(struct net_device *dev, struct wilc_vif *vif) goto fail_locks; } - if (wl->gpio_irq && init_irq(dev)) { - ret = -EIO; - goto fail_locks; - } - ret = wlan_initialize_threads(dev); if (ret < 0) { ret = -EIO; goto fail_wilc_wlan; } + if (wl->gpio_irq && init_irq(dev)) { + ret = -EIO; + goto fail_threads; + } + if (!wl->dev_irq_num && wl->hif_func->enable_interrupt && wl->hif_func->enable_interrupt(wl)) { @@ -715,7 +715,7 @@ static int wilc_wlan_initialize(struct net_device *dev, struct wilc_vif *vif) fail_irq_init: if (wl->dev_irq_num) deinit_irq(dev); - +fail_threads: wlan_deinitialize_threads(dev); fail_wilc_wlan: wilc_wlan_cleanup(dev); @@ -1090,8 +1090,8 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type, vif->wilc = *wilc; vif->ndev = ndev; wl->vif[i] = vif; - wl->vif_num = i; - vif->idx = wl->vif_num; + wl->vif_num = i + 1; + vif->idx = i; ndev->netdev_ops = &wilc_netdev_ops; diff --git a/drivers/staging/wilc1000/wilc_sdio.c b/drivers/staging/wilc1000/wilc_sdio.c index b2080d8b801f872496afe36a2ec2a1ee29b7febc..e52c3bdeaf04b1e0a83a1c8a65a09ed29d284d7b 100644 --- a/drivers/staging/wilc1000/wilc_sdio.c +++ b/drivers/staging/wilc1000/wilc_sdio.c @@ -831,6 +831,7 @@ static int sdio_read_int(struct wilc *wilc, u32 *int_status) if (!g_sdio.irq_gpio) { int i; + cmd.read_write = 0; cmd.function = 1; cmd.address = 0x04; cmd.data = 0; diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c index d4cf09b11e3324503e1323bf6eb07191b78926f1..095df245ced5ab9b3b2b30df3cf81a8ce784b489 100644 --- a/drivers/staging/wlan-ng/cfg80211.c +++ b/drivers/staging/wlan-ng/cfg80211.c @@ -476,10 +476,8 @@ static int prism2_connect(struct wiphy *wiphy, struct net_device *dev, /* Set the encryption - we only support wep */ if (is_wep) { if (sme->key) { - if (sme->key_idx >= NUM_WEPKEYS) { - err = -EINVAL; - goto exit; - } + if (sme->key_idx >= NUM_WEPKEYS) + return -EINVAL; result = prism2_domibset_uint32(wlandev, DIDmib_dot11smt_dot11PrivacyTable_dot11WEPDefaultKeyID, diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c index 16f7dd266e3b18961c56a298ecbc3ff49323e3e0..767ec8184adfee1451c8a6d35bc274326f60d030 100644 --- a/drivers/staging/wlan-ng/hfa384x_usb.c +++ b/drivers/staging/wlan-ng/hfa384x_usb.c @@ -3119,7 +3119,9 @@ static void hfa384x_usbin_callback(struct urb *urb) break; } + /* Save values from the RX URB before reposting overwrites it. */ urb_status = urb->status; + usbin = (union hfa384x_usbin *)urb->transfer_buffer; if (action != ABORT) { /* Repost the RX URB */ @@ -3136,7 +3138,6 @@ static void hfa384x_usbin_callback(struct urb *urb) /* Note: the check of the sw_support field, the type field doesn't * have bit 12 set like the docs suggest. */ - usbin = (union hfa384x_usbin *)urb->transfer_buffer; type = le16_to_cpu(usbin->type); if (HFA384x_USB_ISRXFRM(type)) { if (action == HANDLE) { diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c index 8de16016b6de90441c2892367f1b1d9b949382c3..d46eee3698640acc640d162e7dd5f6629cc0365a 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c @@ -598,9 +598,12 @@ static void cxgbit_free_cdev_np(struct cxgbit_np *cnp) mutex_unlock(&cdev_list_lock); } +static void __cxgbit_free_conn(struct cxgbit_sock *csk); + void cxgbit_free_np(struct iscsi_np *np) { struct cxgbit_np *cnp = np->np_context; + struct cxgbit_sock *csk, *tmp; cnp->com.state = CSK_STATE_DEAD; if (cnp->com.cdev) @@ -608,6 +611,13 @@ void cxgbit_free_np(struct iscsi_np *np) else cxgbit_free_all_np(cnp); + spin_lock_bh(&cnp->np_accept_lock); + list_for_each_entry_safe(csk, tmp, &cnp->np_accept_list, accept_node) { + list_del_init(&csk->accept_node); + __cxgbit_free_conn(csk); + } + spin_unlock_bh(&cnp->np_accept_lock); + np->np_context = NULL; cxgbit_put_cnp(cnp); } @@ -631,8 +641,11 @@ static void cxgbit_send_halfclose(struct cxgbit_sock *csk) static void cxgbit_arp_failure_discard(void *handle, struct sk_buff *skb) { + struct cxgbit_sock *csk = handle; + pr_debug("%s cxgbit_device %p\n", __func__, handle); kfree_skb(skb); + cxgbit_put_csk(csk); } static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb) @@ -705,9 +718,9 @@ void cxgbit_abort_conn(struct cxgbit_sock *csk) csk->tid, 600, __func__); } -void cxgbit_free_conn(struct iscsi_conn *conn) +static void __cxgbit_free_conn(struct cxgbit_sock *csk) { - struct cxgbit_sock *csk = conn->context; + struct iscsi_conn *conn = csk->conn; bool release = false; pr_debug("%s: state %d\n", @@ -716,7 +729,7 @@ void cxgbit_free_conn(struct iscsi_conn *conn) spin_lock_bh(&csk->lock); switch (csk->com.state) { case CSK_STATE_ESTABLISHED: - if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { + if (conn && (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)) { csk->com.state = CSK_STATE_CLOSING; cxgbit_send_halfclose(csk); } else { @@ -741,6 +754,11 @@ void cxgbit_free_conn(struct iscsi_conn *conn) cxgbit_put_csk(csk); } +void cxgbit_free_conn(struct iscsi_conn *conn) +{ + __cxgbit_free_conn(conn->context); +} + static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt) { csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] - @@ -803,6 +821,7 @@ void _cxgbit_free_csk(struct kref *kref) spin_unlock_bh(&cdev->cskq.lock); cxgbit_free_skb(csk); + cxgbit_put_cnp(csk->cnp); cxgbit_put_cdev(cdev); kfree(csk); @@ -1190,7 +1209,7 @@ cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req) rpl5->opt0 = cpu_to_be64(opt0); rpl5->opt2 = cpu_to_be32(opt2); set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx); - t4_set_arp_err_handler(skb, NULL, cxgbit_arp_failure_discard); + t4_set_arp_err_handler(skb, csk, cxgbit_arp_failure_discard); cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t); } @@ -1351,6 +1370,7 @@ cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb) goto rel_skb; } + cxgbit_get_cnp(cnp); cxgbit_get_cdev(cdev); spin_lock(&cdev->cskq.lock); @@ -1812,7 +1832,7 @@ static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb) while (credits) { struct sk_buff *p = cxgbit_sock_peek_wr(csk); - const u32 csum = (__force u32)p->csum; + u32 csum; if (unlikely(!p)) { pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n", @@ -1821,6 +1841,7 @@ static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb) break; } + csum = (__force u32)p->csum; if (unlikely(credits < csum)) { pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n", csk, csk->tid, diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c index f3f8856bfb68e8446ad19a0bf8156217f42cacab..c011c826fc26d70c523c82e3f6c0cbc152e15c6e 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_main.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c @@ -58,6 +58,7 @@ static void *cxgbit_uld_add(const struct cxgb4_lld_info *lldi) return ERR_PTR(-ENOMEM); kref_init(&cdev->kref); + spin_lock_init(&cdev->np_lock); cdev->lldi = *lldi; diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index cc756a123fd8c33f8ccfcad75edd783efb15698d..58ccded1be857e5401eaab1926e0802de918f0b3 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -492,8 +492,7 @@ EXPORT_SYMBOL(iscsit_queue_rsp); void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) { spin_lock_bh(&conn->cmd_lock); - if (!list_empty(&cmd->i_conn_node) && - !(cmd->se_cmd.transport_state & CMD_T_FABRIC_STOP)) + if (!list_empty(&cmd->i_conn_node)) list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); @@ -1381,14 +1380,27 @@ static u32 iscsit_do_crypto_hash_sg( sg = cmd->first_data_sg; page_off = cmd->first_data_sg_off; + if (data_length && page_off) { + struct scatterlist first_sg; + u32 len = min_t(u32, data_length, sg->length - page_off); + + sg_init_table(&first_sg, 1); + sg_set_page(&first_sg, sg_page(sg), len, sg->offset + page_off); + + ahash_request_set_crypt(hash, &first_sg, NULL, len); + crypto_ahash_update(hash); + + data_length -= len; + sg = sg_next(sg); + } + while (data_length) { - u32 cur_len = min_t(u32, data_length, (sg->length - page_off)); + u32 cur_len = min_t(u32, data_length, sg->length); ahash_request_set_crypt(hash, sg, NULL, cur_len); crypto_ahash_update(hash); data_length -= cur_len; - page_off = 0; /* iscsit_map_iovec has already checked for invalid sg pointers */ sg = sg_next(sg); } @@ -4041,13 +4053,23 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn) spin_lock_bh(&conn->cmd_lock); list_splice_init(&conn->conn_cmd_list, &tmp_list); - list_for_each_entry(cmd, &tmp_list, i_conn_node) { + list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) { struct se_cmd *se_cmd = &cmd->se_cmd; if (se_cmd->se_tfo != NULL) { - spin_lock(&se_cmd->t_state_lock); - se_cmd->transport_state |= CMD_T_FABRIC_STOP; - spin_unlock(&se_cmd->t_state_lock); + spin_lock_irq(&se_cmd->t_state_lock); + if (se_cmd->transport_state & CMD_T_ABORTED) { + /* + * LIO's abort path owns the cleanup for this, + * so put it back on the list and let + * aborted_task handle it. + */ + list_move_tail(&cmd->i_conn_node, + &conn->conn_cmd_list); + } else { + se_cmd->transport_state |= CMD_T_FABRIC_STOP; + } + spin_unlock_irq(&se_cmd->t_state_lock); } } spin_unlock_bh(&conn->cmd_lock); @@ -4275,30 +4297,37 @@ int iscsit_close_connection( if (!atomic_read(&sess->session_reinstatement) && atomic_read(&sess->session_fall_back_to_erl0)) { spin_unlock_bh(&sess->conn_lock); + complete_all(&sess->session_wait_comp); iscsit_close_session(sess); return 0; } else if (atomic_read(&sess->session_logout)) { pr_debug("Moving to TARG_SESS_STATE_FREE.\n"); sess->session_state = TARG_SESS_STATE_FREE; - spin_unlock_bh(&sess->conn_lock); - if (atomic_read(&sess->sleep_on_sess_wait_comp)) - complete(&sess->session_wait_comp); + if (atomic_read(&sess->session_close)) { + spin_unlock_bh(&sess->conn_lock); + complete_all(&sess->session_wait_comp); + iscsit_close_session(sess); + } else { + spin_unlock_bh(&sess->conn_lock); + } return 0; } else { pr_debug("Moving to TARG_SESS_STATE_FAILED.\n"); sess->session_state = TARG_SESS_STATE_FAILED; - if (!atomic_read(&sess->session_continuation)) { - spin_unlock_bh(&sess->conn_lock); + if (!atomic_read(&sess->session_continuation)) iscsit_start_time2retain_handler(sess); - } else - spin_unlock_bh(&sess->conn_lock); - if (atomic_read(&sess->sleep_on_sess_wait_comp)) - complete(&sess->session_wait_comp); + if (atomic_read(&sess->session_close)) { + spin_unlock_bh(&sess->conn_lock); + complete_all(&sess->session_wait_comp); + iscsit_close_session(sess); + } else { + spin_unlock_bh(&sess->conn_lock); + } return 0; } @@ -4404,9 +4433,9 @@ static void iscsit_logout_post_handler_closesession( complete(&conn->conn_logout_comp); iscsit_dec_conn_usage_count(conn); + atomic_set(&sess->session_close, 1); iscsit_stop_session(sess, sleep, sleep); iscsit_dec_session_usage_count(sess); - iscsit_close_session(sess); } static void iscsit_logout_post_handler_samecid( @@ -4541,49 +4570,6 @@ void iscsit_fail_session(struct iscsi_session *sess) sess->session_state = TARG_SESS_STATE_FAILED; } -int iscsit_free_session(struct iscsi_session *sess) -{ - u16 conn_count = atomic_read(&sess->nconn); - struct iscsi_conn *conn, *conn_tmp = NULL; - int is_last; - - spin_lock_bh(&sess->conn_lock); - atomic_set(&sess->sleep_on_sess_wait_comp, 1); - - list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list, - conn_list) { - if (conn_count == 0) - break; - - if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) { - is_last = 1; - } else { - iscsit_inc_conn_usage_count(conn_tmp); - is_last = 0; - } - iscsit_inc_conn_usage_count(conn); - - spin_unlock_bh(&sess->conn_lock); - iscsit_cause_connection_reinstatement(conn, 1); - spin_lock_bh(&sess->conn_lock); - - iscsit_dec_conn_usage_count(conn); - if (is_last == 0) - iscsit_dec_conn_usage_count(conn_tmp); - - conn_count--; - } - - if (atomic_read(&sess->nconn)) { - spin_unlock_bh(&sess->conn_lock); - wait_for_completion(&sess->session_wait_comp); - } else - spin_unlock_bh(&sess->conn_lock); - - iscsit_close_session(sess); - return 0; -} - void iscsit_stop_session( struct iscsi_session *sess, int session_sleep, @@ -4594,8 +4580,6 @@ void iscsit_stop_session( int is_last; spin_lock_bh(&sess->conn_lock); - if (session_sleep) - atomic_set(&sess->sleep_on_sess_wait_comp, 1); if (connection_sleep) { list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list, @@ -4653,12 +4637,15 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force) spin_lock(&sess->conn_lock); if (atomic_read(&sess->session_fall_back_to_erl0) || atomic_read(&sess->session_logout) || + atomic_read(&sess->session_close) || (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) { spin_unlock(&sess->conn_lock); continue; } + iscsit_inc_session_usage_count(sess); atomic_set(&sess->session_reinstatement, 1); atomic_set(&sess->session_fall_back_to_erl0, 1); + atomic_set(&sess->session_close, 1); spin_unlock(&sess->conn_lock); list_move_tail(&se_sess->sess_list, &free_list); @@ -4668,7 +4655,9 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force) list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) { sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; - iscsit_free_session(sess); + list_del_init(&se_sess->sess_list); + iscsit_stop_session(sess, 1, 1); + iscsit_dec_session_usage_count(sess); session_count++; } diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h index 48bac0acf8c76e055c022bc2b74423cca50b04bf..11a481cf6eadefabc14c46607df41a513f400bc2 100644 --- a/drivers/target/iscsi/iscsi_target.h +++ b/drivers/target/iscsi/iscsi_target.h @@ -43,7 +43,6 @@ extern int iscsi_target_rx_thread(void *); extern int iscsit_close_connection(struct iscsi_conn *); extern int iscsit_close_session(struct iscsi_session *); extern void iscsit_fail_session(struct iscsi_session *); -extern int iscsit_free_session(struct iscsi_session *); extern void iscsit_stop_session(struct iscsi_session *, int, int); extern int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *, int); diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index 4e680d753941f71ea299d87b6c0cd18fc307b50f..b6bf605fa5c157bf9abdd6a5c3ed3014820d1314 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c @@ -78,7 +78,7 @@ static int chap_check_algorithm(const char *a_str) if (!token) goto out; - if (!strncmp(token, "5", 1)) { + if (!strcmp(token, "5")) { pr_debug("Selected MD5 Algorithm\n"); kfree(orig); return CHAP_DIGEST_MD5; @@ -89,6 +89,12 @@ static int chap_check_algorithm(const char *a_str) return CHAP_DIGEST_UNKNOWN; } +static void chap_close(struct iscsi_conn *conn) +{ + kfree(conn->auth_protocol); + conn->auth_protocol = NULL; +} + static struct iscsi_chap *chap_server_open( struct iscsi_conn *conn, struct iscsi_node_auth *auth, @@ -126,7 +132,7 @@ static struct iscsi_chap *chap_server_open( case CHAP_DIGEST_UNKNOWN: default: pr_err("Unsupported CHAP_A value\n"); - kfree(conn->auth_protocol); + chap_close(conn); return NULL; } @@ -141,19 +147,13 @@ static struct iscsi_chap *chap_server_open( * Generate Challenge. */ if (chap_gen_challenge(conn, 1, aic_str, aic_len) < 0) { - kfree(conn->auth_protocol); + chap_close(conn); return NULL; } return chap; } -static void chap_close(struct iscsi_conn *conn) -{ - kfree(conn->auth_protocol); - conn->auth_protocol = NULL; -} - static int chap_server_compute_md5( struct iscsi_conn *conn, struct iscsi_node_auth *auth, diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index 95d0a22b2ad61d362600a2df0c7fc80b98269c03..d25cadc4f4f11f7f476b127c8086c53064d4adcb 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -1501,20 +1501,23 @@ static void lio_tpg_close_session(struct se_session *se_sess) spin_lock(&sess->conn_lock); if (atomic_read(&sess->session_fall_back_to_erl0) || atomic_read(&sess->session_logout) || + atomic_read(&sess->session_close) || (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) { spin_unlock(&sess->conn_lock); spin_unlock_bh(&se_tpg->session_lock); return; } + iscsit_inc_session_usage_count(sess); atomic_set(&sess->session_reinstatement, 1); atomic_set(&sess->session_fall_back_to_erl0, 1); + atomic_set(&sess->session_close, 1); spin_unlock(&sess->conn_lock); iscsit_stop_time2retain_timer(sess); spin_unlock_bh(&se_tpg->session_lock); iscsit_stop_session(sess, 1, 1); - iscsit_close_session(sess); + iscsit_dec_session_usage_count(sess); } static u32 lio_tpg_get_inst_index(struct se_portal_group *se_tpg) diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index bb90c80ff3889bdde58df0877a6f7c603bc3eb05..db93bd0a9b886b725ab44e69ac6db2d42997ba7f 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -164,6 +164,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn) spin_lock(&sess_p->conn_lock); if (atomic_read(&sess_p->session_fall_back_to_erl0) || atomic_read(&sess_p->session_logout) || + atomic_read(&sess_p->session_close) || (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED)) { spin_unlock(&sess_p->conn_lock); continue; @@ -174,6 +175,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn) (sess_p->sess_ops->SessionType == sessiontype))) { atomic_set(&sess_p->session_reinstatement, 1); atomic_set(&sess_p->session_fall_back_to_erl0, 1); + atomic_set(&sess_p->session_close, 1); spin_unlock(&sess_p->conn_lock); iscsit_inc_session_usage_count(sess_p); iscsit_stop_time2retain_timer(sess_p); @@ -198,7 +200,6 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn) if (sess->session_state == TARG_SESS_STATE_FAILED) { spin_unlock_bh(&sess->conn_lock); iscsit_dec_session_usage_count(sess); - iscsit_close_session(sess); return 0; } spin_unlock_bh(&sess->conn_lock); @@ -206,7 +207,6 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn) iscsit_stop_session(sess, 1, 1); iscsit_dec_session_usage_count(sess); - iscsit_close_session(sess); return 0; } @@ -494,6 +494,7 @@ static int iscsi_login_non_zero_tsih_s2( sess_p = (struct iscsi_session *)se_sess->fabric_sess_ptr; if (atomic_read(&sess_p->session_fall_back_to_erl0) || atomic_read(&sess_p->session_logout) || + atomic_read(&sess_p->session_close) || (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED)) continue; if (!memcmp(sess_p->isid, pdu->isid, 6) && @@ -1182,7 +1183,7 @@ void iscsit_free_conn(struct iscsi_conn *conn) } void iscsi_target_login_sess_out(struct iscsi_conn *conn, - struct iscsi_np *np, bool zero_tsih, bool new_sess) + bool zero_tsih, bool new_sess) { if (!new_sess) goto old_sess_out; @@ -1200,7 +1201,6 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn, conn->sess = NULL; old_sess_out: - iscsi_stop_login_thread_timer(np); /* * If login negotiation fails check if the Time2Retain timer * needs to be restarted. @@ -1440,8 +1440,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) new_sess_out: new_sess = true; old_sess_out: + iscsi_stop_login_thread_timer(np); tpg_np = conn->tpg_np; - iscsi_target_login_sess_out(conn, np, zero_tsih, new_sess); + iscsi_target_login_sess_out(conn, zero_tsih, new_sess); new_sess = false; if (tpg) { diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h index 3b8e3639ff5d01c6dbeb31ffb1201bb2453a822c..fc95e6150253f6ed744bb9ae955960b8bb1a3230 100644 --- a/drivers/target/iscsi/iscsi_target_login.h +++ b/drivers/target/iscsi/iscsi_target_login.h @@ -22,8 +22,7 @@ extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32); extern void iscsit_free_conn(struct iscsi_conn *); extern int iscsit_start_kthreads(struct iscsi_conn *); extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8); -extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *, - bool, bool); +extern void iscsi_target_login_sess_out(struct iscsi_conn *, bool, bool); extern int iscsi_target_login_thread(void *); extern void iscsi_handle_login_thread_timeout(struct timer_list *t); diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 8a5e8d17a942620d8f17bb519bbb4fffd369e494..5db8842a8026534942876bf372dc757d668d149e 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -554,12 +554,11 @@ static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned in static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login) { - struct iscsi_np *np = login->np; bool zero_tsih = login->zero_tsih; iscsi_remove_failed_auth_entry(conn); iscsi_target_nego_release(conn); - iscsi_target_login_sess_out(conn, np, zero_tsih, true); + iscsi_target_login_sess_out(conn, zero_tsih, true); } struct conn_timeout { diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c index 101d62105c9325d939ff86aa20091266dea15827..f3671ffdf14953530e39523244f094a5f96b44e8 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.c +++ b/drivers/target/iscsi/iscsi_target_tpg.c @@ -451,6 +451,9 @@ static bool iscsit_tpg_check_network_portal( break; } spin_unlock(&tpg->tpg_np_lock); + + if (match) + break; } spin_unlock(&tiqn->tiqn_tpg_lock); diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 47b5ef153135cbbe89d8a1395b75ff94054ef6f4..f990aae1d6213c3b0d3384d468295c66a798979d 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -164,7 +164,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun) struct se_session *se_sess = se_cmd->se_sess; struct se_node_acl *nacl = se_sess->se_node_acl; struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; - unsigned long flags; rcu_read_lock(); deve = target_nacl_find_deve(nacl, unpacked_lun); @@ -195,10 +194,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun) se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev); - spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags); - list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list); - spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags); - return 0; } EXPORT_SYMBOL(transport_lookup_tmr_lun); @@ -1128,27 +1123,6 @@ passthrough_parse_cdb(struct se_cmd *cmd, struct se_device *dev = cmd->se_dev; unsigned int size; - /* - * Clear a lun set in the cdb if the initiator talking to use spoke - * and old standards version, as we can't assume the underlying device - * won't choke up on it. - */ - switch (cdb[0]) { - case READ_10: /* SBC - RDProtect */ - case READ_12: /* SBC - RDProtect */ - case READ_16: /* SBC - RDProtect */ - case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ - case VERIFY: /* SBC - VRProtect */ - case VERIFY_16: /* SBC - VRProtect */ - case WRITE_VERIFY: /* SBC - VRProtect */ - case WRITE_VERIFY_12: /* SBC - VRProtect */ - case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */ - break; - default: - cdb[1] &= 0x1f; /* clear logical unit number */ - break; - } - /* * For REPORT LUNS we always need to emulate the response, for everything * else, pass it up. diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index ce1321a5cb7bfe28320baf0fe64641eae1477f6a..854b2bcca7c1a46f213d9d8bd31aa4fb68028626 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -514,8 +514,8 @@ iblock_execute_write_same(struct se_cmd *cmd) } /* Always in 512 byte units for Linux/Block */ - block_lba += sg->length >> IBLOCK_LBA_SHIFT; - sectors -= 1; + block_lba += sg->length >> SECTOR_SHIFT; + sectors -= sg->length >> SECTOR_SHIFT; } iblock_submit_bios(&list); @@ -757,7 +757,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, } /* Always in 512 byte units for Linux/Block */ - block_lba += sg->length >> IBLOCK_LBA_SHIFT; + block_lba += sg->length >> SECTOR_SHIFT; sg_num--; } diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h index 9cc3843404d44cf08a60d1e6e12fc006061ca743..cefc641145b3be44f6aaa76e2627820bb9a61208 100644 --- a/drivers/target/target_core_iblock.h +++ b/drivers/target/target_core_iblock.h @@ -9,7 +9,6 @@ #define IBLOCK_VERSION "4.0" #define IBLOCK_MAX_CDBS 16 -#define IBLOCK_LBA_SHIFT 9 struct iblock_req { refcount_t pending; diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 10db5656fd5dcb8e95769a922223b8e88cf23983..949879f2f1d15bc06f47b7ca35fb3a49e6fc3833 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -3742,6 +3742,7 @@ core_scsi3_pri_read_keys(struct se_cmd *cmd) spin_unlock(&dev->t10_pr.registration_lock); put_unaligned_be32(add_len, &buf[4]); + target_set_cmd_data_length(cmd, 8 + add_len); transport_kunmap_data_sg(cmd); @@ -3760,7 +3761,7 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd) struct t10_pr_registration *pr_reg; unsigned char *buf; u64 pr_res_key; - u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */ + u32 add_len = 0; if (cmd->data_length < 8) { pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u" @@ -3778,8 +3779,9 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd) pr_reg = dev->dev_pr_res_holder; if (pr_reg) { /* - * Set the hardcoded Additional Length + * Set the Additional Length to 16 when a reservation is held */ + add_len = 16; put_unaligned_be32(add_len, &buf[4]); if (cmd->data_length < 22) @@ -3815,6 +3817,8 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd) (pr_reg->pr_res_type & 0x0f); } + target_set_cmd_data_length(cmd, 8 + add_len); + err: spin_unlock(&dev->dev_reservation_lock); transport_kunmap_data_sg(cmd); @@ -3833,7 +3837,7 @@ core_scsi3_pri_report_capabilities(struct se_cmd *cmd) struct se_device *dev = cmd->se_dev; struct t10_reservation *pr_tmpl = &dev->t10_pr; unsigned char *buf; - u16 add_len = 8; /* Hardcoded to 8. */ + u16 len = 8; /* Hardcoded to 8. */ if (cmd->data_length < 6) { pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:" @@ -3845,7 +3849,7 @@ core_scsi3_pri_report_capabilities(struct se_cmd *cmd) if (!buf) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - put_unaligned_be16(add_len, &buf[0]); + put_unaligned_be16(len, &buf[0]); buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */ buf[2] |= 0x08; /* SIP_C: Specify Initiator Ports Capable bit */ buf[2] |= 0x04; /* ATP_C: All Target Ports Capable bit */ @@ -3874,6 +3878,8 @@ core_scsi3_pri_report_capabilities(struct se_cmd *cmd) buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */ buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */ + target_set_cmd_data_length(cmd, len); + transport_kunmap_data_sg(cmd); return 0; @@ -4034,6 +4040,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) * Set ADDITIONAL_LENGTH */ put_unaligned_be32(add_len, &buf[4]); + target_set_cmd_data_length(cmd, 8 + add_len); transport_kunmap_data_sg(cmd); diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 47d76c862014732b7da469dc35e74ee27cc223de..02c4e3beb2640dc56403d9aef63968b3b7b68134 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -970,6 +970,14 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, return 0; fail: + if (bio) + bio_put(bio); + while (req->bio) { + bio = req->bio; + req->bio = bio->bi_next; + bio_put(bio); + } + req->biotail = NULL; return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index ebac2b49b9c6e503173f5178d20907dd626820cf..af9b038da3ba6aa04d594125a08033eabd5a9d36 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -38,7 +38,7 @@ #include "target_core_alua.h" static sense_reason_t -sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool); +sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char, u32, bool); static sense_reason_t sbc_execute_unmap(struct se_cmd *cmd); static sense_reason_t @@ -292,14 +292,14 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) } static sense_reason_t -sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops) +sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, struct sbc_ops *ops) { struct se_device *dev = cmd->se_dev; sector_t end_lba = dev->transport->get_blocks(dev) + 1; unsigned int sectors = sbc_get_write_same_sectors(cmd); sense_reason_t ret; - if ((flags[0] & 0x04) || (flags[0] & 0x02)) { + if ((flags & 0x04) || (flags & 0x02)) { pr_err("WRITE_SAME PBDATA and LBDATA" " bits not supported for Block Discard" " Emulation\n"); @@ -321,7 +321,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o } /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */ - if (flags[0] & 0x10) { + if (flags & 0x10) { pr_warn("WRITE SAME with ANCHOR not supported\n"); return TCM_INVALID_CDB_FIELD; } @@ -329,7 +329,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting * translated into block discard requests within backend code. */ - if (flags[0] & 0x08) { + if (flags & 0x08) { if (!ops->execute_unmap) return TCM_UNSUPPORTED_SCSI_OPCODE; @@ -344,7 +344,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o if (!ops->execute_write_same) return TCM_UNSUPPORTED_SCSI_OPCODE; - ret = sbc_check_prot(dev, cmd, &cmd->t_task_cdb[0], sectors, true); + ret = sbc_check_prot(dev, cmd, flags >> 5, sectors, true); if (ret) return ret; @@ -702,10 +702,9 @@ sbc_set_prot_op_checks(u8 protect, bool fabric_prot, enum target_prot_type prot_ } static sense_reason_t -sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, +sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char protect, u32 sectors, bool is_write) { - u8 protect = cdb[1] >> 5; int sp_ops = cmd->se_sess->sup_prot_ops; int pi_prot_type = dev->dev_attrib.pi_prot_type; bool fabric_prot = false; @@ -753,7 +752,7 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, /* Fallthrough */ default: pr_err("Unable to determine pi_prot_type for CDB: 0x%02x " - "PROTECT: 0x%02x\n", cdb[0], protect); + "PROTECT: 0x%02x\n", cmd->t_task_cdb[0], protect); return TCM_INVALID_CDB_FIELD; } @@ -828,7 +827,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) if (sbc_check_dpofua(dev, cmd, cdb)) return TCM_INVALID_CDB_FIELD; - ret = sbc_check_prot(dev, cmd, cdb, sectors, false); + ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false); if (ret) return ret; @@ -842,7 +841,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) if (sbc_check_dpofua(dev, cmd, cdb)) return TCM_INVALID_CDB_FIELD; - ret = sbc_check_prot(dev, cmd, cdb, sectors, false); + ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false); if (ret) return ret; @@ -856,7 +855,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) if (sbc_check_dpofua(dev, cmd, cdb)) return TCM_INVALID_CDB_FIELD; - ret = sbc_check_prot(dev, cmd, cdb, sectors, false); + ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false); if (ret) return ret; @@ -877,7 +876,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) if (sbc_check_dpofua(dev, cmd, cdb)) return TCM_INVALID_CDB_FIELD; - ret = sbc_check_prot(dev, cmd, cdb, sectors, true); + ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true); if (ret) return ret; @@ -891,7 +890,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) if (sbc_check_dpofua(dev, cmd, cdb)) return TCM_INVALID_CDB_FIELD; - ret = sbc_check_prot(dev, cmd, cdb, sectors, true); + ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true); if (ret) return ret; @@ -906,7 +905,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) if (sbc_check_dpofua(dev, cmd, cdb)) return TCM_INVALID_CDB_FIELD; - ret = sbc_check_prot(dev, cmd, cdb, sectors, true); + ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true); if (ret) return ret; @@ -965,7 +964,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) size = sbc_get_size(cmd, 1); cmd->t_task_lba = get_unaligned_be64(&cdb[12]); - ret = sbc_setup_write_same(cmd, &cdb[10], ops); + ret = sbc_setup_write_same(cmd, cdb[10], ops); if (ret) return ret; break; @@ -1064,7 +1063,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) size = sbc_get_size(cmd, 1); cmd->t_task_lba = get_unaligned_be64(&cdb[2]); - ret = sbc_setup_write_same(cmd, &cdb[1], ops); + ret = sbc_setup_write_same(cmd, cdb[1], ops); if (ret) return ret; break; @@ -1082,7 +1081,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) * Follow sbcr26 with WRITE_SAME (10) and check for the existence * of byte 1 bit 3 UNMAP instead of original reserved field */ - ret = sbc_setup_write_same(cmd, &cdb[1], ops); + ret = sbc_setup_write_same(cmd, cdb[1], ops); if (ret) return ret; break; diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index cb0461a10808067f8d218d9d9c4c0f4cf02f9855..93424db5f002b5ce24922ac415b873bb979eaa87 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -108,12 +108,17 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf) buf[7] = 0x2; /* CmdQue=1 */ - memcpy(&buf[8], "LIO-ORG ", 8); - memset(&buf[16], 0x20, 16); + /* + * ASCII data fields described as being left-aligned shall have any + * unused bytes at the end of the field (i.e., highest offset) and the + * unused bytes shall be filled with ASCII space characters (20h). + */ + memset(&buf[8], 0x20, 8 + 16 + 4); + memcpy(&buf[8], "LIO-ORG", sizeof("LIO-ORG") - 1); memcpy(&buf[16], dev->t10_wwn.model, - min_t(size_t, strlen(dev->t10_wwn.model), 16)); + strnlen(dev->t10_wwn.model, 16)); memcpy(&buf[32], dev->t10_wwn.revision, - min_t(size_t, strlen(dev->t10_wwn.revision), 4)); + strnlen(dev->t10_wwn.revision, 4)); buf[4] = 31; /* Set additional length to 31 */ return 0; @@ -251,7 +256,9 @@ spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) buf[off] = 0x2; /* ASCII */ buf[off+1] = 0x1; /* T10 Vendor ID */ buf[off+2] = 0x0; - memcpy(&buf[off+4], "LIO-ORG", 8); + /* left align Vendor ID and pad with spaces */ + memset(&buf[off+4], 0x20, 8); + memcpy(&buf[off+4], "LIO-ORG", sizeof("LIO-ORG") - 1); /* Extra Byte for NULL Terminator */ id_len++; /* Identifier Length */ diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 86c0156e6c884a3ea4e18fb9455e2b2b26be56cb..7bd863e2c05b5a0638c58bafcf4100381ec410e4 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -224,19 +224,28 @@ void transport_subsystem_check_init(void) sub_api_initialized = 1; } +static void target_release_sess_cmd_refcnt(struct percpu_ref *ref) +{ + struct se_session *sess = container_of(ref, typeof(*sess), cmd_count); + + wake_up(&sess->cmd_list_wq); +} + /** * transport_init_session - initialize a session object * @se_sess: Session object pointer. * * The caller must have zero-initialized @se_sess before calling this function. */ -void transport_init_session(struct se_session *se_sess) +int transport_init_session(struct se_session *se_sess) { INIT_LIST_HEAD(&se_sess->sess_list); INIT_LIST_HEAD(&se_sess->sess_acl_list); INIT_LIST_HEAD(&se_sess->sess_cmd_list); spin_lock_init(&se_sess->sess_cmd_lock); init_waitqueue_head(&se_sess->cmd_list_wq); + return percpu_ref_init(&se_sess->cmd_count, + target_release_sess_cmd_refcnt, 0, GFP_KERNEL); } EXPORT_SYMBOL(transport_init_session); @@ -247,6 +256,7 @@ EXPORT_SYMBOL(transport_init_session); struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops) { struct se_session *se_sess; + int ret; se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); if (!se_sess) { @@ -254,7 +264,11 @@ struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops) " se_sess_cache\n"); return ERR_PTR(-ENOMEM); } - transport_init_session(se_sess); + ret = transport_init_session(se_sess); + if (ret < 0) { + kmem_cache_free(se_sess_cache, se_sess); + return ERR_PTR(ret); + } se_sess->sup_prot_ops = sup_prot_ops; return se_sess; @@ -581,6 +595,7 @@ void transport_free_session(struct se_session *se_sess) sbitmap_queue_free(&se_sess->sess_tag_pool); kvfree(se_sess->sess_cmd_map); } + percpu_ref_exit(&se_sess->cmd_count); kmem_cache_free(se_sess_cache, se_sess); } EXPORT_SYMBOL(transport_free_session); @@ -826,11 +841,9 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) } EXPORT_SYMBOL(target_complete_cmd); -void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) +void target_set_cmd_data_length(struct se_cmd *cmd, int length) { - if ((scsi_status == SAM_STAT_GOOD || - cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) && - length < cmd->data_length) { + if (length < cmd->data_length) { if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { cmd->residual_count += cmd->data_length - length; } else { @@ -840,6 +853,15 @@ void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int len cmd->data_length = length; } +} +EXPORT_SYMBOL(target_set_cmd_data_length); + +void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) +{ + if (scsi_status == SAM_STAT_GOOD || + cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) { + target_set_cmd_data_length(cmd, length); + } target_complete_cmd(cmd, scsi_status); } @@ -2724,6 +2746,7 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) } se_cmd->transport_state |= CMD_T_PRE_EXECUTE; list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); + percpu_ref_get(&se_sess->cmd_count); out: spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); @@ -2754,8 +2777,6 @@ static void target_release_cmd_kref(struct kref *kref) if (se_sess) { spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); list_del_init(&se_cmd->se_cmd_list); - if (list_empty(&se_sess->sess_cmd_list)) - wake_up(&se_sess->cmd_list_wq); spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); } @@ -2763,6 +2784,8 @@ static void target_release_cmd_kref(struct kref *kref) se_cmd->se_tfo->release_cmd(se_cmd); if (compl) complete(compl); + + percpu_ref_put(&se_sess->cmd_count); } /** @@ -2891,6 +2914,8 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess) spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); se_sess->sess_tearing_down = 1; spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + + percpu_ref_kill(&se_sess->cmd_count); } EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); @@ -2905,17 +2930,14 @@ void target_wait_for_sess_cmds(struct se_session *se_sess) WARN_ON_ONCE(!se_sess->sess_tearing_down); - spin_lock_irq(&se_sess->sess_cmd_lock); do { - ret = wait_event_interruptible_lock_irq_timeout( - se_sess->cmd_list_wq, - list_empty(&se_sess->sess_cmd_list), - se_sess->sess_cmd_lock, 180 * HZ); + ret = wait_event_timeout(se_sess->cmd_list_wq, + percpu_ref_is_zero(&se_sess->cmd_count), + 180 * HZ); list_for_each_entry(cmd, &se_sess->sess_cmd_list, se_cmd_list) target_show_cmd("session shutdown: still waiting for ", cmd); } while (ret <= 0); - spin_unlock_irq(&se_sess->sess_cmd_lock); } EXPORT_SYMBOL(target_wait_for_sess_cmds); @@ -2960,9 +2982,7 @@ __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop, __releases(&cmd->t_state_lock) __acquires(&cmd->t_state_lock) { - - assert_spin_locked(&cmd->t_state_lock); - WARN_ON_ONCE(!irqs_disabled()); + lockdep_assert_held(&cmd->t_state_lock); if (fabric_stop) cmd->transport_state |= CMD_T_FABRIC_STOP; @@ -3411,6 +3431,10 @@ int transport_generic_handle_tmr( unsigned long flags; bool aborted = false; + spin_lock_irqsave(&cmd->se_dev->se_tmr_lock, flags); + list_add_tail(&cmd->se_tmr_req->tmr_list, &cmd->se_dev->dev_tmr_list); + spin_unlock_irqrestore(&cmd->se_dev->se_tmr_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); if (cmd->transport_state & CMD_T_ABORTED) { aborted = true; diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 9cd404acdb82b15a0634de86f7f1da35655bfe72..de27014685ac9283431c3afc1e1ee8f7eb5eb664 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -148,7 +148,7 @@ struct tcmu_dev { size_t ring_size; struct mutex cmdr_lock; - struct list_head cmdr_queue; + struct list_head qfull_queue; uint32_t dbi_max; uint32_t dbi_thresh; @@ -159,6 +159,7 @@ struct tcmu_dev { struct timer_list cmd_timer; unsigned int cmd_time_out; + struct list_head inflight_queue; struct timer_list qfull_timer; int qfull_time_out; @@ -179,7 +180,7 @@ struct tcmu_dev { struct tcmu_cmd { struct se_cmd *se_cmd; struct tcmu_dev *tcmu_dev; - struct list_head cmdr_queue_entry; + struct list_head queue_entry; uint16_t cmd_id; @@ -192,6 +193,7 @@ struct tcmu_cmd { unsigned long deadline; #define TCMU_CMD_BIT_EXPIRED 0 +#define TCMU_CMD_BIT_INFLIGHT 1 unsigned long flags; }; /* @@ -586,7 +588,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) if (!tcmu_cmd) return NULL; - INIT_LIST_HEAD(&tcmu_cmd->cmdr_queue_entry); + INIT_LIST_HEAD(&tcmu_cmd->queue_entry); tcmu_cmd->se_cmd = se_cmd; tcmu_cmd->tcmu_dev = udev; @@ -610,7 +612,7 @@ static inline void tcmu_flush_dcache_range(void *vaddr, size_t size) size = round_up(size+offset, PAGE_SIZE); while (size) { - flush_dcache_page(virt_to_page(start)); + flush_dcache_page(vmalloc_to_page(start)); start += PAGE_SIZE; size -= PAGE_SIZE; } @@ -678,15 +680,17 @@ static void scatter_data_area(struct tcmu_dev *udev, void *from, *to = NULL; size_t copy_bytes, to_offset, offset; struct scatterlist *sg; - struct page *page; + struct page *page = NULL; for_each_sg(data_sg, sg, data_nents, i) { int sg_remaining = sg->length; from = kmap_atomic(sg_page(sg)) + sg->offset; while (sg_remaining > 0) { if (block_remaining == 0) { - if (to) + if (to) { + flush_dcache_page(page); kunmap_atomic(to); + } block_remaining = DATA_BLOCK_SIZE; dbi = tcmu_cmd_get_dbi(tcmu_cmd); @@ -731,7 +735,6 @@ static void scatter_data_area(struct tcmu_dev *udev, memcpy(to + offset, from + sg->length - sg_remaining, copy_bytes); - tcmu_flush_dcache_range(to, copy_bytes); } sg_remaining -= copy_bytes; @@ -740,8 +743,10 @@ static void scatter_data_area(struct tcmu_dev *udev, kunmap_atomic(from - sg->offset); } - if (to) + if (to) { + flush_dcache_page(page); kunmap_atomic(to); + } } static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, @@ -787,13 +792,13 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, dbi = tcmu_cmd_get_dbi(cmd); page = tcmu_get_block_page(udev, dbi); from = kmap_atomic(page); + flush_dcache_page(page); } copy_bytes = min_t(size_t, sg_remaining, block_remaining); if (read_len < copy_bytes) copy_bytes = read_len; offset = DATA_BLOCK_SIZE - block_remaining; - tcmu_flush_dcache_range(from, copy_bytes); memcpy(to + sg->length - sg_remaining, from + offset, copy_bytes); @@ -915,11 +920,13 @@ static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo, return 0; tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo)); - mod_timer(timer, tcmu_cmd->deadline); + if (!timer_pending(timer)) + mod_timer(timer, tcmu_cmd->deadline); + return 0; } -static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd) +static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd) { struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; unsigned int tmo; @@ -942,7 +949,7 @@ static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd) if (ret) return ret; - list_add_tail(&tcmu_cmd->cmdr_queue_entry, &udev->cmdr_queue); + list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue); pr_debug("adding cmd %u on dev %s to ring space wait queue\n", tcmu_cmd->cmd_id, udev->name); return 0; @@ -958,7 +965,7 @@ static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd) * 0 success * 1 internally queued to wait for ring memory to free. */ -static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err) +static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) { struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; struct se_cmd *se_cmd = tcmu_cmd->se_cmd; @@ -999,7 +1006,7 @@ static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err) base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt); command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size); - if (!list_empty(&udev->cmdr_queue)) + if (!list_empty(&udev->qfull_queue)) goto queue; mb = udev->mb_addr; @@ -1033,7 +1040,7 @@ static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err) entry->hdr.cmd_id = 0; /* not used for PAD */ entry->hdr.kflags = 0; entry->hdr.uflags = 0; - tcmu_flush_dcache_range(entry, sizeof(*entry)); + tcmu_flush_dcache_range(entry, sizeof(entry->hdr)); UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); tcmu_flush_dcache_range(mb, sizeof(*mb)); @@ -1091,18 +1098,21 @@ static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err) cdb_off = CMDR_OFF + cmd_head + base_command_size; memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb)); entry->req.cdb_off = cdb_off; - tcmu_flush_dcache_range(entry, sizeof(*entry)); + tcmu_flush_dcache_range(entry, command_size); UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); tcmu_flush_dcache_range(mb, sizeof(*mb)); + list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue); + set_bit(TCMU_CMD_BIT_INFLIGHT, &tcmu_cmd->flags); + /* TODO: only if FLUSH and FUA? */ uio_event_notify(&udev->uio_info); return 0; queue: - if (add_to_cmdr_queue(tcmu_cmd)) { + if (add_to_qfull_queue(tcmu_cmd)) { *scsi_err = TCM_OUT_OF_RESOURCES; return -1; } @@ -1136,14 +1146,18 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry * struct se_cmd *se_cmd = cmd->se_cmd; struct tcmu_dev *udev = cmd->tcmu_dev; bool read_len_valid = false; - uint32_t read_len = se_cmd->data_length; + uint32_t read_len; /* * cmd has been completed already from timeout, just reclaim * data area space and free cmd */ - if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) + if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { + WARN_ON_ONCE(se_cmd); goto out; + } + + list_del_init(&cmd->queue_entry); tcmu_cmd_reset_dbi_cur(cmd); @@ -1154,6 +1168,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry * goto done; } + read_len = se_cmd->data_length; if (se_cmd->data_direction == DMA_FROM_DEVICE && (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) { read_len_valid = true; @@ -1194,9 +1209,29 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry * tcmu_free_cmd(cmd); } -static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) +static void tcmu_set_next_deadline(struct list_head *queue, + struct timer_list *timer) +{ + struct tcmu_cmd *tcmu_cmd, *tmp_cmd; + unsigned long deadline = 0; + + list_for_each_entry_safe(tcmu_cmd, tmp_cmd, queue, queue_entry) { + if (!time_after(jiffies, tcmu_cmd->deadline)) { + deadline = tcmu_cmd->deadline; + break; + } + } + + if (deadline) + mod_timer(timer, deadline); + else + del_timer(timer); +} + +static bool tcmu_handle_completions(struct tcmu_dev *udev) { struct tcmu_mailbox *mb; + struct tcmu_cmd *cmd; int handled = 0; if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { @@ -1210,9 +1245,15 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) { struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; - struct tcmu_cmd *cmd; - tcmu_flush_dcache_range(entry, sizeof(*entry)); + /* + * Flush max. up to end of cmd ring since current entry might + * be a padding that is shorter than sizeof(*entry) + */ + size_t ring_left = head_to_end(udev->cmdr_last_cleaned, + udev->cmdr_size); + tcmu_flush_dcache_range(entry, ring_left < sizeof(*entry) ? + ring_left : sizeof(*entry)); if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) { UPDATE_HEAD(udev->cmdr_last_cleaned, @@ -1227,7 +1268,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) pr_err("cmd_id %u not found, ring is broken\n", entry->hdr.cmd_id); set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); - break; + return false; } tcmu_handle_completion(cmd, entry); @@ -1243,7 +1284,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) /* no more pending commands */ del_timer(&udev->cmd_timer); - if (list_empty(&udev->cmdr_queue)) { + if (list_empty(&udev->qfull_queue)) { /* * no more pending or waiting commands so try to * reclaim blocks if needed. @@ -1252,6 +1293,8 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) tcmu_global_max_blocks) schedule_delayed_work(&tcmu_unmap_work, 0); } + } else if (udev->cmd_time_out) { + tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer); } return handled; @@ -1271,7 +1314,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data) if (!time_after(jiffies, cmd->deadline)) return 0; - is_running = list_empty(&cmd->cmdr_queue_entry); + is_running = test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags); se_cmd = cmd->se_cmd; if (is_running) { @@ -1287,9 +1330,10 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data) * target_complete_cmd will translate this to LUN COMM FAILURE */ scsi_status = SAM_STAT_CHECK_CONDITION; + list_del_init(&cmd->queue_entry); + cmd->se_cmd = NULL; } else { - list_del_init(&cmd->cmdr_queue_entry); - + list_del_init(&cmd->queue_entry); idr_remove(&udev->commands, id); tcmu_free_cmd(cmd); scsi_status = SAM_STAT_TASK_SET_FULL; @@ -1372,7 +1416,8 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) INIT_LIST_HEAD(&udev->node); INIT_LIST_HEAD(&udev->timedout_entry); - INIT_LIST_HEAD(&udev->cmdr_queue); + INIT_LIST_HEAD(&udev->qfull_queue); + INIT_LIST_HEAD(&udev->inflight_queue); idr_init(&udev->commands); timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0); @@ -1383,7 +1428,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) return &udev->se_dev; } -static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail) +static bool run_qfull_queue(struct tcmu_dev *udev, bool fail) { struct tcmu_cmd *tcmu_cmd, *tmp_cmd; LIST_HEAD(cmds); @@ -1391,15 +1436,15 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail) sense_reason_t scsi_ret; int ret; - if (list_empty(&udev->cmdr_queue)) + if (list_empty(&udev->qfull_queue)) return true; pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail); - list_splice_init(&udev->cmdr_queue, &cmds); + list_splice_init(&udev->qfull_queue, &cmds); - list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, cmdr_queue_entry) { - list_del_init(&tcmu_cmd->cmdr_queue_entry); + list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) { + list_del_init(&tcmu_cmd->queue_entry); pr_debug("removing cmd %u on dev %s from queue\n", tcmu_cmd->cmd_id, udev->name); @@ -1437,14 +1482,13 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail) * cmd was requeued, so just put all cmds back in * the queue */ - list_splice_tail(&cmds, &udev->cmdr_queue); + list_splice_tail(&cmds, &udev->qfull_queue); drained = false; - goto done; + break; } } - if (list_empty(&udev->cmdr_queue)) - del_timer(&udev->qfull_timer); -done: + + tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); return drained; } @@ -1454,7 +1498,7 @@ static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on) mutex_lock(&udev->cmdr_lock); tcmu_handle_completions(udev); - run_cmdr_queue(udev, false); + run_qfull_queue(udev, false); mutex_unlock(&udev->cmdr_lock); return 0; @@ -1982,7 +2026,7 @@ static void tcmu_block_dev(struct tcmu_dev *udev) /* complete IO that has executed successfully */ tcmu_handle_completions(udev); /* fail IO waiting to be queued */ - run_cmdr_queue(udev, true); + run_qfull_queue(udev, true); unlock: mutex_unlock(&udev->cmdr_lock); @@ -1997,7 +2041,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) mutex_lock(&udev->cmdr_lock); idr_for_each_entry(&udev->commands, cmd, i) { - if (!list_empty(&cmd->cmdr_queue_entry)) + if (!test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags)) continue; pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n", @@ -2006,6 +2050,8 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) idr_remove(&udev->commands, i); if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { + WARN_ON(!cmd->se_cmd); + list_del_init(&cmd->queue_entry); if (err_level == 1) { /* * Userspace was not able to start the @@ -2666,6 +2712,10 @@ static void check_timedout_devices(void) mutex_lock(&udev->cmdr_lock); idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL); + + tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer); + tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); + mutex_unlock(&udev->cmdr_lock); spin_lock_bh(&timed_out_udevs_lock); diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index 2718a933c0c6e13146d059aeb6a44ca50e8594b1..1709b8a99bd79252016cfaabe7e4bd131b6c2d3a 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c @@ -55,60 +55,83 @@ static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf) return 0; } -struct xcopy_dev_search_info { - const unsigned char *dev_wwn; - struct se_device *found_dev; -}; - +/** + * target_xcopy_locate_se_dev_e4_iter - compare XCOPY NAA device identifiers + * + * @se_dev: device being considered for match + * @dev_wwn: XCOPY requested NAA dev_wwn + * @return: 1 on match, 0 on no-match + */ static int target_xcopy_locate_se_dev_e4_iter(struct se_device *se_dev, - void *data) + const unsigned char *dev_wwn) { - struct xcopy_dev_search_info *info = data; unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN]; int rc; - if (!se_dev->dev_attrib.emulate_3pc) + if (!se_dev->dev_attrib.emulate_3pc) { + pr_debug("XCOPY: emulate_3pc disabled on se_dev %p\n", se_dev); return 0; + } memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN); target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]); - rc = memcmp(&tmp_dev_wwn[0], info->dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN); - if (rc != 0) - return 0; - - info->found_dev = se_dev; - pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev); - - rc = target_depend_item(&se_dev->dev_group.cg_item); + rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN); if (rc != 0) { - pr_err("configfs_depend_item attempt failed: %d for se_dev: %p\n", - rc, se_dev); - return rc; + pr_debug("XCOPY: skip non-matching: %*ph\n", + XCOPY_NAA_IEEE_REGEX_LEN, tmp_dev_wwn); + return 0; } + pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev); - pr_debug("Called configfs_depend_item for se_dev: %p se_dev->se_dev_group: %p\n", - se_dev, &se_dev->dev_group); return 1; } -static int target_xcopy_locate_se_dev_e4(const unsigned char *dev_wwn, - struct se_device **found_dev) +static int target_xcopy_locate_se_dev_e4(struct se_session *sess, + const unsigned char *dev_wwn, + struct se_device **_found_dev, + struct percpu_ref **_found_lun_ref) { - struct xcopy_dev_search_info info; - int ret; - - memset(&info, 0, sizeof(info)); - info.dev_wwn = dev_wwn; - - ret = target_for_each_device(target_xcopy_locate_se_dev_e4_iter, &info); - if (ret == 1) { - *found_dev = info.found_dev; - return 0; - } else { - pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n"); - return -EINVAL; + struct se_dev_entry *deve; + struct se_node_acl *nacl; + struct se_lun *this_lun = NULL; + struct se_device *found_dev = NULL; + + /* cmd with NULL sess indicates no associated $FABRIC_MOD */ + if (!sess) + goto err_out; + + pr_debug("XCOPY 0xe4: searching for: %*ph\n", + XCOPY_NAA_IEEE_REGEX_LEN, dev_wwn); + + nacl = sess->se_node_acl; + rcu_read_lock(); + hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { + struct se_device *this_dev; + int rc; + + this_lun = rcu_dereference(deve->se_lun); + this_dev = rcu_dereference_raw(this_lun->lun_se_dev); + + rc = target_xcopy_locate_se_dev_e4_iter(this_dev, dev_wwn); + if (rc) { + if (percpu_ref_tryget_live(&this_lun->lun_ref)) + found_dev = this_dev; + break; + } } + rcu_read_unlock(); + if (found_dev == NULL) + goto err_out; + + pr_debug("lun_ref held for se_dev: %p se_dev->se_dev_group: %p\n", + found_dev, &found_dev->dev_group); + *_found_dev = found_dev; + *_found_lun_ref = &this_lun->lun_ref; + return 0; +err_out: + pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n"); + return -EINVAL; } static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop, @@ -255,12 +278,16 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, switch (xop->op_origin) { case XCOL_SOURCE_RECV_OP: - rc = target_xcopy_locate_se_dev_e4(xop->dst_tid_wwn, - &xop->dst_dev); + rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess, + xop->dst_tid_wwn, + &xop->dst_dev, + &xop->remote_lun_ref); break; case XCOL_DEST_RECV_OP: - rc = target_xcopy_locate_se_dev_e4(xop->src_tid_wwn, - &xop->src_dev); + rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess, + xop->src_tid_wwn, + &xop->src_dev, + &xop->remote_lun_ref); break; default: pr_err("XCOPY CSCD descriptor IDs not found in CSCD list - " @@ -412,18 +439,12 @@ static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd) static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop) { - struct se_device *remote_dev; - if (xop->op_origin == XCOL_SOURCE_RECV_OP) - remote_dev = xop->dst_dev; + pr_debug("putting dst lun_ref for %p\n", xop->dst_dev); else - remote_dev = xop->src_dev; + pr_debug("putting src lun_ref for %p\n", xop->src_dev); - pr_debug("Calling configfs_undepend_item for" - " remote_dev: %p remote_dev->dev_group: %p\n", - remote_dev, &remote_dev->dev_group.cg_item); - - target_undepend_item(&remote_dev->dev_group.cg_item); + percpu_ref_put(xop->remote_lun_ref); } static void xcopy_pt_release_cmd(struct se_cmd *se_cmd) @@ -480,6 +501,8 @@ static const struct target_core_fabric_ops xcopy_pt_tfo = { int target_xcopy_setup_pt(void) { + int ret; + xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0); if (!xcopy_wq) { pr_err("Unable to allocate xcopy_wq\n"); @@ -497,7 +520,9 @@ int target_xcopy_setup_pt(void) INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list); INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list); memset(&xcopy_pt_sess, 0, sizeof(struct se_session)); - transport_init_session(&xcopy_pt_sess); + ret = transport_init_session(&xcopy_pt_sess); + if (ret < 0) + return ret; xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg; xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess; diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h index 26ba4c3c9cffda838e9dac362ffebbbb019dce90..974bc1e19ff2bedaf6ed62c3813e31e82a5a5fb4 100644 --- a/drivers/target/target_core_xcopy.h +++ b/drivers/target/target_core_xcopy.h @@ -29,6 +29,7 @@ struct xcopy_op { struct se_device *dst_dev; unsigned char dst_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN]; unsigned char local_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN]; + struct percpu_ref *remote_lun_ref; sector_t src_lba; sector_t dst_lba; diff --git a/drivers/tc/tc.c b/drivers/tc/tc.c index 3be9519654e51018032623561a9a0b78f952ea7b..cf3fad2cb87140b810f094d390df69f51e8e3aff 100644 --- a/drivers/tc/tc.c +++ b/drivers/tc/tc.c @@ -2,7 +2,7 @@ * TURBOchannel bus services. * * Copyright (c) Harald Koerfgen, 1998 - * Copyright (c) 2001, 2003, 2005, 2006 Maciej W. Rozycki + * Copyright (c) 2001, 2003, 2005, 2006, 2018 Maciej W. Rozycki * Copyright (c) 2005 James Simmons * * This file is subject to the terms and conditions of the GNU @@ -10,6 +10,7 @@ * directory of this archive for more details. */ #include +#include #include #include #include @@ -92,6 +93,11 @@ static void __init tc_bus_add_devices(struct tc_bus *tbus) tdev->dev.bus = &tc_bus_type; tdev->slot = slot; + /* TURBOchannel has 34-bit DMA addressing (16GiB space). */ + tdev->dma_mask = DMA_BIT_MASK(34); + tdev->dev.dma_mask = &tdev->dma_mask; + tdev->dev.coherent_dma_mask = DMA_BIT_MASK(34); + for (i = 0; i < 8; i++) { tdev->firmware[i] = readb(module + offset + TC_FIRM_VER + 4 * i); diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig index 3c59e19029befd7ad0070db571de0b4b8271dfdb..26d8a42cdc8baaa54393ca6d958bea308d3efb7c 100644 --- a/drivers/tee/optee/Kconfig +++ b/drivers/tee/optee/Kconfig @@ -13,3 +13,33 @@ config OPTEE_SHM_NUM_PRIV_PAGES help This sets the number of private shared memory pages to be used by OP-TEE TEE driver. + +if OPTEE + +choice + prompt "Default conduit method" + default OPTEE_DEFAULT_METHOD_NONE + help + This option sets the default conduit method for OP-TEE in case + firmware misses "method" property. If in doubt, select "none" + which depends on firmware to provide the value. + +config OPTEE_DEFAULT_METHOD_NONE + bool "none" + help + There is no default conduit method used by the driver. Require + firwmare to provide the method explicitly. + +config OPTEE_DEFAULT_METHOD_HVC + bool "hvc" + help + Use the "hvc" as default conduit method. + +config OPTEE_DEFAULT_METHOD_SMC + bool "smc" + help + Use the "hvc" as default conduit method. + +endchoice + +endif diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c index e1aafe842d660bb11b1146133105f37bff24873e..51d20d234c6112aa3115347b13b72b1e8ccad054 100644 --- a/drivers/tee/optee/core.c +++ b/drivers/tee/optee/core.c @@ -14,6 +14,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include #include #include @@ -529,15 +530,23 @@ static void optee_smccc_hvc(unsigned long a0, unsigned long a1, arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res); } -static optee_invoke_fn *get_invoke_func(struct device_node *np) +#if defined(CONFIG_OPTEE_DEFAULT_METHOD_HVC) +#define DEFAULT_CONDUIT_METHOD optee_smccc_hvc +#elif defined(CONFIG_OPTEE_DEFAULT_METHOD_SMC) +#define DEFAULT_CONDUIT_METHOD optee_smccc_hvc +#else +#define DEFAULT_CONDUIT_METHOD ERR_PTR(-ENXIO) +#endif + +static optee_invoke_fn *get_invoke_func(struct device *dev) { const char *method; - pr_info("probing for conduit method from DT.\n"); + pr_info("probing for conduit method.\n"); - if (of_property_read_string(np, "method", &method)) { + if (device_property_read_string(dev, "method", &method)) { pr_warn("missing \"method\" property\n"); - return ERR_PTR(-ENXIO); + return DEFAULT_CONDUIT_METHOD; } if (!strcmp("hvc", method)) @@ -549,7 +558,37 @@ static optee_invoke_fn *get_invoke_func(struct device_node *np) return ERR_PTR(-EINVAL); } -static struct optee *optee_probe(struct device_node *np) +static int optee_remove(struct platform_device *pdev) +{ + struct optee *optee = platform_get_drvdata(pdev); + + /* + * Ask OP-TEE to free all cached shared memory objects to decrease + * reference counters and also avoid wild pointers in secure world + * into the old shared memory range. + */ + optee_disable_shm_cache(optee); + + /* + * The two devices have to be unregistered before we can free the + * other resources. + */ + tee_device_unregister(optee->supp_teedev); + tee_device_unregister(optee->teedev); + + tee_shm_pool_free(optee->pool); + if (optee->memremaped_shm) + memunmap(optee->memremaped_shm); + optee_wait_queue_exit(&optee->wait_queue); + optee_supp_uninit(&optee->supp); + mutex_destroy(&optee->call_queue.mutex); + + kfree(optee); + + return 0; +} + +static int optee_probe(struct platform_device *pdev) { optee_invoke_fn *invoke_fn; struct tee_shm_pool *pool; @@ -559,25 +598,25 @@ static struct optee *optee_probe(struct device_node *np) u32 sec_caps; int rc; - invoke_fn = get_invoke_func(np); + invoke_fn = get_invoke_func(&pdev->dev); if (IS_ERR(invoke_fn)) - return (void *)invoke_fn; + return PTR_ERR(invoke_fn); if (!optee_msg_api_uid_is_optee_api(invoke_fn)) { pr_warn("api uid mismatch\n"); - return ERR_PTR(-EINVAL); + return -EINVAL; } optee_msg_get_os_revision(invoke_fn); if (!optee_msg_api_revision_is_compatible(invoke_fn)) { pr_warn("api revision mismatch\n"); - return ERR_PTR(-EINVAL); + return -EINVAL; } if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) { pr_warn("capabilities mismatch\n"); - return ERR_PTR(-EINVAL); + return -EINVAL; } /* @@ -589,7 +628,7 @@ static struct optee *optee_probe(struct device_node *np) pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm, sec_caps); if (IS_ERR(pool)) - return (void *)pool; + return PTR_ERR(pool); optee = kzalloc(sizeof(*optee), GFP_KERNEL); if (!optee) { @@ -631,8 +670,10 @@ static struct optee *optee_probe(struct device_node *np) optee_enable_shm_cache(optee); + platform_set_drvdata(pdev, optee); + pr_info("initialized driver\n"); - return optee; + return 0; err: if (optee) { /* @@ -648,81 +689,37 @@ static struct optee *optee_probe(struct device_node *np) tee_shm_pool_free(pool); if (memremaped_shm) memunmap(memremaped_shm); - return ERR_PTR(rc); -} - -static void optee_remove(struct optee *optee) -{ - /* - * Ask OP-TEE to free all cached shared memory objects to decrease - * reference counters and also avoid wild pointers in secure world - * into the old shared memory range. - */ - optee_disable_shm_cache(optee); - - /* - * The two devices has to be unregistered before we can free the - * other resources. - */ - tee_device_unregister(optee->supp_teedev); - tee_device_unregister(optee->teedev); - - tee_shm_pool_free(optee->pool); - if (optee->memremaped_shm) - memunmap(optee->memremaped_shm); - optee_wait_queue_exit(&optee->wait_queue); - optee_supp_uninit(&optee->supp); - mutex_destroy(&optee->call_queue.mutex); - - kfree(optee); + return rc; } -static const struct of_device_id optee_match[] = { +static const struct of_device_id optee_dt_match[] = { { .compatible = "linaro,optee-tz" }, {}, }; +MODULE_DEVICE_TABLE(of, optee_dt_match); -static struct optee *optee_svc; - -static int __init optee_driver_init(void) -{ - struct device_node *fw_np; - struct device_node *np; - struct optee *optee; - - /* Node is supposed to be below /firmware */ - fw_np = of_find_node_by_name(NULL, "firmware"); - if (!fw_np) - return -ENODEV; - - np = of_find_matching_node(fw_np, optee_match); - if (!np) - return -ENODEV; - - optee = optee_probe(np); - of_node_put(np); - - if (IS_ERR(optee)) - return PTR_ERR(optee); - - optee_svc = optee; - - return 0; -} -module_init(optee_driver_init); - -static void __exit optee_driver_exit(void) -{ - struct optee *optee = optee_svc; - - optee_svc = NULL; - if (optee) - optee_remove(optee); -} -module_exit(optee_driver_exit); +#ifdef CONFIG_ACPI +static const struct acpi_device_id optee_acpi_match[] = { + { "PHYT8003" }, + { } +}; +MODULE_DEVICE_TABLE(acpi, optee_acpi_match); +#endif + +static struct platform_driver optee_driver = { + .probe = optee_probe, + .remove = optee_remove, + .driver = { + .name = "optee", + .of_match_table = optee_dt_match, + .acpi_match_table = ACPI_PTR(optee_acpi_match), + }, +}; +module_platform_driver(optee_driver); MODULE_AUTHOR("Linaro"); MODULE_DESCRIPTION("OP-TEE driver"); MODULE_SUPPORTED_DEVICE(""); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:optee"); diff --git a/drivers/tee/optee/supp.c b/drivers/tee/optee/supp.c index df35fc01fd3e5eec43088112ac7976f72c69164f..43626e15703a80ddf48360c0622f37b813b76809 100644 --- a/drivers/tee/optee/supp.c +++ b/drivers/tee/optee/supp.c @@ -19,7 +19,7 @@ struct optee_supp_req { struct list_head link; - bool busy; + bool in_queue; u32 func; u32 ret; size_t num_params; @@ -54,7 +54,6 @@ void optee_supp_release(struct optee_supp *supp) /* Abort all request retrieved by supplicant */ idr_for_each_entry(&supp->idr, req, id) { - req->busy = false; idr_remove(&supp->idr, id); req->ret = TEEC_ERROR_COMMUNICATION; complete(&req->c); @@ -63,6 +62,7 @@ void optee_supp_release(struct optee_supp *supp) /* Abort all queued requests */ list_for_each_entry_safe(req, req_tmp, &supp->reqs, link) { list_del(&req->link); + req->in_queue = false; req->ret = TEEC_ERROR_COMMUNICATION; complete(&req->c); } @@ -103,6 +103,7 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, /* Insert the request in the request list */ mutex_lock(&supp->mutex); list_add_tail(&req->link, &supp->reqs); + req->in_queue = true; mutex_unlock(&supp->mutex); /* Tell an eventual waiter there's a new request */ @@ -130,9 +131,10 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, * will serve all requests in a timely manner and * interrupting then wouldn't make sense. */ - interruptable = !req->busy; - if (!req->busy) + if (req->in_queue) { list_del(&req->link); + req->in_queue = false; + } } mutex_unlock(&supp->mutex); @@ -176,7 +178,7 @@ static struct optee_supp_req *supp_pop_entry(struct optee_supp *supp, return ERR_PTR(-ENOMEM); list_del(&req->link); - req->busy = true; + req->in_queue = false; return req; } @@ -318,7 +320,6 @@ static struct optee_supp_req *supp_pop_req(struct optee_supp *supp, if ((num_params - nm) != req->num_params) return ERR_PTR(-EINVAL); - req->busy = false; idr_remove(&supp->idr, id); supp->req_id = -1; *num_meta = nm; diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c index dd46b758852aa9ba2866348e6f973da3447b3623..0d252f151584b346e325bede300f8172b2558e8c 100644 --- a/drivers/tee/tee_core.c +++ b/drivers/tee/tee_core.c @@ -56,7 +56,6 @@ static int tee_open(struct inode *inode, struct file *filp) kref_init(&ctx->refcount); ctx->teedev = teedev; - INIT_LIST_HEAD(&ctx->list_shm); filp->private_data = ctx; rc = teedev->desc->ops->open(ctx); if (rc) diff --git a/drivers/tee/tee_private.h b/drivers/tee/tee_private.h index 85d99d621603d7f2dd111c2549c312c01069df85..e3d62a21cd53b2c4d5c4b9552e130a41186a2f19 100644 --- a/drivers/tee/tee_private.h +++ b/drivers/tee/tee_private.h @@ -46,7 +46,8 @@ struct tee_shm_pool { * @num_users: number of active users of this device * @c_no_user: completion used when unregistering the device * @mutex: mutex protecting @num_users and @idr - * @idr: register of shared memory object allocated on this device + * @idr: register of user space shared memory objects allocated or + * registered on this device * @pool: shared memory pool */ struct tee_device { diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index 0b9ab1d0dd45dd69046f921e6bf846e7af23fe88..c591e7ba4435cd3c035cd440adf4966f6d9c1c88 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2016, Linaro Limited + * Copyright (c) 2015-2017, 2019-2021 Linaro Limited * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -11,25 +11,17 @@ * GNU General Public License for more details. * */ +#include #include -#include -#include #include +#include #include #include #include #include "tee_private.h" -static void tee_shm_release(struct tee_shm *shm) +static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm) { - struct tee_device *teedev = shm->teedev; - - mutex_lock(&teedev->mutex); - idr_remove(&teedev->idr, shm->id); - if (shm->ctx) - list_del(&shm->link); - mutex_unlock(&teedev->mutex); - if (shm->flags & TEE_SHM_POOL) { struct tee_shm_pool_mgr *poolm; @@ -61,51 +53,6 @@ static void tee_shm_release(struct tee_shm *shm) tee_device_put(teedev); } -static struct sg_table *tee_shm_op_map_dma_buf(struct dma_buf_attachment - *attach, enum dma_data_direction dir) -{ - return NULL; -} - -static void tee_shm_op_unmap_dma_buf(struct dma_buf_attachment *attach, - struct sg_table *table, - enum dma_data_direction dir) -{ -} - -static void tee_shm_op_release(struct dma_buf *dmabuf) -{ - struct tee_shm *shm = dmabuf->priv; - - tee_shm_release(shm); -} - -static void *tee_shm_op_map(struct dma_buf *dmabuf, unsigned long pgnum) -{ - return NULL; -} - -static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) -{ - struct tee_shm *shm = dmabuf->priv; - size_t size = vma->vm_end - vma->vm_start; - - /* Refuse sharing shared memory provided by application */ - if (shm->flags & TEE_SHM_REGISTER) - return -EINVAL; - - return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT, - size, vma->vm_page_prot); -} - -static const struct dma_buf_ops tee_shm_dma_buf_ops = { - .map_dma_buf = tee_shm_op_map_dma_buf, - .unmap_dma_buf = tee_shm_op_unmap_dma_buf, - .release = tee_shm_op_release, - .map = tee_shm_op_map, - .mmap = tee_shm_op_mmap, -}; - static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx, struct tee_device *teedev, size_t size, u32 flags) @@ -146,6 +93,7 @@ static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx, goto err_dev_put; } + refcount_set(&shm->refcount, 1); shm->flags = flags | TEE_SHM_POOL; shm->teedev = teedev; shm->ctx = ctx; @@ -160,41 +108,20 @@ static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx, goto err_kfree; } - mutex_lock(&teedev->mutex); - shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL); - mutex_unlock(&teedev->mutex); - if (shm->id < 0) { - ret = ERR_PTR(shm->id); - goto err_pool_free; - } - if (flags & TEE_SHM_DMA_BUF) { - DEFINE_DMA_BUF_EXPORT_INFO(exp_info); - - exp_info.ops = &tee_shm_dma_buf_ops; - exp_info.size = shm->size; - exp_info.flags = O_RDWR; - exp_info.priv = shm; - - shm->dmabuf = dma_buf_export(&exp_info); - if (IS_ERR(shm->dmabuf)) { - ret = ERR_CAST(shm->dmabuf); - goto err_rem; + mutex_lock(&teedev->mutex); + shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL); + mutex_unlock(&teedev->mutex); + if (shm->id < 0) { + ret = ERR_PTR(shm->id); + goto err_pool_free; } } - if (ctx) { + if (ctx) teedev_ctx_get(ctx); - mutex_lock(&teedev->mutex); - list_add_tail(&shm->link, &ctx->list_shm); - mutex_unlock(&teedev->mutex); - } return shm; -err_rem: - mutex_lock(&teedev->mutex); - idr_remove(&teedev->idr, shm->id); - mutex_unlock(&teedev->mutex); err_pool_free: poolm->ops->free(poolm, shm); err_kfree: @@ -259,6 +186,7 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, goto err; } + refcount_set(&shm->refcount, 1); shm->flags = flags | TEE_SHM_REGISTER; shm->teedev = teedev; shm->ctx = ctx; @@ -299,26 +227,6 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, goto err; } - if (flags & TEE_SHM_DMA_BUF) { - DEFINE_DMA_BUF_EXPORT_INFO(exp_info); - - exp_info.ops = &tee_shm_dma_buf_ops; - exp_info.size = shm->size; - exp_info.flags = O_RDWR; - exp_info.priv = shm; - - shm->dmabuf = dma_buf_export(&exp_info); - if (IS_ERR(shm->dmabuf)) { - ret = ERR_CAST(shm->dmabuf); - teedev->desc->ops->shm_unregister(ctx, shm); - goto err; - } - } - - mutex_lock(&teedev->mutex); - list_add_tail(&shm->link, &ctx->list_shm); - mutex_unlock(&teedev->mutex); - return shm; err: if (shm) { @@ -342,6 +250,35 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, } EXPORT_SYMBOL_GPL(tee_shm_register); +static int tee_shm_fop_release(struct inode *inode, struct file *filp) +{ + tee_shm_put(filp->private_data); + return 0; +} + +static int tee_shm_fop_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct tee_shm *shm = filp->private_data; + size_t size = vma->vm_end - vma->vm_start; + + /* Refuse sharing shared memory provided by application */ + if (shm->flags & TEE_SHM_USER_MAPPED) + return -EINVAL; + + /* check for overflowing the buffer's size */ + if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT) + return -EINVAL; + + return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT, + size, vma->vm_page_prot); +} + +static const struct file_operations tee_shm_fops = { + .owner = THIS_MODULE, + .release = tee_shm_fop_release, + .mmap = tee_shm_fop_mmap, +}; + /** * tee_shm_get_fd() - Increase reference count and return file descriptor * @shm: Shared memory handle @@ -354,10 +291,11 @@ int tee_shm_get_fd(struct tee_shm *shm) if (!(shm->flags & TEE_SHM_DMA_BUF)) return -EINVAL; - get_dma_buf(shm->dmabuf); - fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC); + /* matched by tee_shm_put() in tee_shm_op_release() */ + refcount_inc(&shm->refcount); + fd = anon_inode_getfd("tee_shm", &tee_shm_fops, shm, O_RDWR); if (fd < 0) - dma_buf_put(shm->dmabuf); + tee_shm_put(shm); return fd; } @@ -367,17 +305,7 @@ int tee_shm_get_fd(struct tee_shm *shm) */ void tee_shm_free(struct tee_shm *shm) { - /* - * dma_buf_put() decreases the dmabuf reference counter and will - * call tee_shm_release() when the last reference is gone. - * - * In the case of driver private memory we call tee_shm_release - * directly instead as it doesn't have a reference counter. - */ - if (shm->flags & TEE_SHM_DMA_BUF) - dma_buf_put(shm->dmabuf); - else - tee_shm_release(shm); + tee_shm_put(shm); } EXPORT_SYMBOL_GPL(tee_shm_free); @@ -484,10 +412,15 @@ struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id) teedev = ctx->teedev; mutex_lock(&teedev->mutex); shm = idr_find(&teedev->idr, id); + /* + * If the tee_shm was found in the IDR it must have a refcount + * larger than 0 due to the guarantee in tee_shm_put() below. So + * it's safe to use refcount_inc(). + */ if (!shm || shm->ctx != ctx) shm = ERR_PTR(-EINVAL); - else if (shm->flags & TEE_SHM_DMA_BUF) - get_dma_buf(shm->dmabuf); + else + refcount_inc(&shm->refcount); mutex_unlock(&teedev->mutex); return shm; } @@ -499,7 +432,24 @@ EXPORT_SYMBOL_GPL(tee_shm_get_from_id); */ void tee_shm_put(struct tee_shm *shm) { - if (shm->flags & TEE_SHM_DMA_BUF) - dma_buf_put(shm->dmabuf); + struct tee_device *teedev = shm->ctx->teedev; + bool do_release = false; + + mutex_lock(&teedev->mutex); + if (refcount_dec_and_test(&shm->refcount)) { + /* + * refcount has reached 0, we must now remove it from the + * IDR before releasing the mutex. This will guarantee that + * the refcount_inc() in tee_shm_get_from_id() never starts + * from 0. + */ + if (shm->flags & TEE_SHM_DMA_BUF) + idr_remove(&teedev->idr, shm->id); + do_release = true; + } + mutex_unlock(&teedev->mutex); + + if (do_release) + tee_shm_release(teedev, shm); } EXPORT_SYMBOL_GPL(tee_shm_put); diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c index 2c2f6d93034ec9764f3596d18cd3891d0e3565b6..1c9830b2c84da97d808a2cd099f3d9ded3175edc 100644 --- a/drivers/thermal/armada_thermal.c +++ b/drivers/thermal/armada_thermal.c @@ -357,7 +357,7 @@ static int armada_get_temp_legacy(struct thermal_zone_device *thermal, int ret; /* Valid check */ - if (armada_is_valid(priv)) { + if (!armada_is_valid(priv)) { dev_err(priv->dev, "Temperature sensor reading not valid\n"); return -EIO; @@ -526,8 +526,8 @@ static int armada_thermal_probe_legacy(struct platform_device *pdev, /* First memory region points towards the status register */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (IS_ERR(res)) - return PTR_ERR(res); + if (!res) + return -EIO; /* * Edit the resource start address and length to map over all the diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c index 23ad4f9f21438e45a819da46962025eeeb922590..8646fb7425f2f5ef709eca32b24116d9c8ba8273 100644 --- a/drivers/thermal/broadcom/bcm2835_thermal.c +++ b/drivers/thermal/broadcom/bcm2835_thermal.c @@ -27,6 +27,8 @@ #include #include +#include "../thermal_hwmon.h" + #define BCM2835_TS_TSENSCTL 0x00 #define BCM2835_TS_TSENSSTAT 0x04 @@ -126,8 +128,7 @@ static const struct debugfs_reg32 bcm2835_thermal_regs[] = { static void bcm2835_thermal_debugfs(struct platform_device *pdev) { - struct thermal_zone_device *tz = platform_get_drvdata(pdev); - struct bcm2835_thermal_data *data = tz->devdata; + struct bcm2835_thermal_data *data = platform_get_drvdata(pdev); struct debugfs_regset32 *regset; data->debugfsdir = debugfs_create_dir("bcm2835_thermal", NULL); @@ -273,7 +274,16 @@ static int bcm2835_thermal_probe(struct platform_device *pdev) data->tz = tz; - platform_set_drvdata(pdev, tz); + platform_set_drvdata(pdev, data); + + /* + * Thermal_zone doesn't enable hwmon as default, + * enable it here + */ + tz->tzp->no_hwmon = false; + err = thermal_add_hwmon_sysfs(tz); + if (err) + goto err_tz; bcm2835_thermal_debugfs(pdev); @@ -288,8 +298,8 @@ static int bcm2835_thermal_probe(struct platform_device *pdev) static int bcm2835_thermal_remove(struct platform_device *pdev) { - struct thermal_zone_device *tz = platform_get_drvdata(pdev); - struct bcm2835_thermal_data *data = tz->devdata; + struct bcm2835_thermal_data *data = platform_get_drvdata(pdev); + struct thermal_zone_device *tz = data->tz; debugfs_remove_recursive(data->debugfsdir); thermal_zone_of_sensor_unregister(&pdev->dev, tz); diff --git a/drivers/thermal/da9062-thermal.c b/drivers/thermal/da9062-thermal.c index dd8dd947b7f0737c8a1228c1a7ce89ea915e44d1..01b0cb99445778513b67506a5cd7654909f2988c 100644 --- a/drivers/thermal/da9062-thermal.c +++ b/drivers/thermal/da9062-thermal.c @@ -106,7 +106,7 @@ static void da9062_thermal_poll_on(struct work_struct *work) THERMAL_EVENT_UNSPECIFIED); delay = msecs_to_jiffies(thermal->zone->passive_delay); - schedule_delayed_work(&thermal->work, delay); + queue_delayed_work(system_freezable_wq, &thermal->work, delay); return; } @@ -125,7 +125,7 @@ static irqreturn_t da9062_thermal_irq_handler(int irq, void *data) struct da9062_thermal *thermal = data; disable_irq_nosync(thermal->irq); - schedule_delayed_work(&thermal->work, 0); + queue_delayed_work(system_freezable_wq, &thermal->work, 0); return IRQ_HANDLED; } diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c index e26b01c05e8224cec140633bb3b47787759662db..77967da5d4061f141e2b10bade7af7dc83181697 100644 --- a/drivers/thermal/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/int340x_thermal/int3400_thermal.c @@ -22,6 +22,13 @@ enum int3400_thermal_uuid { INT3400_THERMAL_PASSIVE_1, INT3400_THERMAL_ACTIVE, INT3400_THERMAL_CRITICAL, + INT3400_THERMAL_ADAPTIVE_PERFORMANCE, + INT3400_THERMAL_EMERGENCY_CALL_MODE, + INT3400_THERMAL_PASSIVE_2, + INT3400_THERMAL_POWER_BOSS, + INT3400_THERMAL_VIRTUAL_SENSOR, + INT3400_THERMAL_COOLING_MODE, + INT3400_THERMAL_HARDWARE_DUTY_CYCLING, INT3400_THERMAL_MAXIMUM_UUID, }; @@ -29,6 +36,13 @@ static char *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = { "42A441D6-AE6A-462b-A84B-4A8CE79027D3", "3A95C389-E4B8-4629-A526-C52C88626BAE", "97C68AE7-15FA-499c-B8C9-5DA81D606E0A", + "63BE270F-1C11-48FD-A6F7-3AF253FF3E2D", + "5349962F-71E6-431D-9AE8-0A635B710AEE", + "9E04115A-AE87-4D1C-9500-0F3E340BFE75", + "F5A35014-C209-46A4-993A-EB56DE7530A1", + "6ED722A7-9240-48A5-B479-31EEF723D7CF", + "16CAF1B7-DD38-40ED-B1C1-1B8A1913D531", + "BE84BABF-C4D4-403D-B495-3128FD44dAC1", }; struct int3400_thermal_priv { @@ -209,6 +223,10 @@ static void int3400_notify(acpi_handle handle, thermal_prop[4] = NULL; kobject_uevent_env(&priv->thermal->device.kobj, KOBJ_CHANGE, thermal_prop); + kfree(thermal_prop[0]); + kfree(thermal_prop[1]); + kfree(thermal_prop[2]); + kfree(thermal_prop[3]); break; default: /* Ignore unknown notification codes sent to INT3400 device */ @@ -302,10 +320,9 @@ static int int3400_thermal_probe(struct platform_device *pdev) platform_set_drvdata(pdev, priv); - if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) { - int3400_thermal_ops.get_mode = int3400_thermal_get_mode; - int3400_thermal_ops.set_mode = int3400_thermal_set_mode; - } + int3400_thermal_ops.get_mode = int3400_thermal_get_mode; + int3400_thermal_ops.set_mode = int3400_thermal_set_mode; + priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0, priv, &int3400_thermal_ops, &int3400_thermal_params, 0, 0); diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c index 284cf2c5a8fd92db5bde9705e67d2510fe984731..8e1cf4d789be10df2413e1311bba63dde3545f43 100644 --- a/drivers/thermal/int340x_thermal/processor_thermal_device.c +++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c @@ -84,7 +84,12 @@ static ssize_t power_limit_##index##_##suffix##_show(struct device *dev, \ struct pci_dev *pci_dev; \ struct platform_device *pdev; \ struct proc_thermal_device *proc_dev; \ -\ + \ + if (proc_thermal_emum_mode == PROC_THERMAL_NONE) { \ + dev_warn(dev, "Attempted to get power limit before device was initialized!\n"); \ + return 0; \ + } \ + \ if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) { \ pdev = to_platform_device(dev); \ proc_dev = platform_get_drvdata(pdev); \ @@ -298,11 +303,6 @@ static int proc_thermal_add(struct device *dev, *priv = proc_priv; ret = proc_thermal_read_ppcc(proc_priv); - if (!ret) { - ret = sysfs_create_group(&dev->kobj, - &power_limit_attribute_group); - - } if (ret) return ret; @@ -316,8 +316,7 @@ static int proc_thermal_add(struct device *dev, proc_priv->int340x_zone = int340x_thermal_zone_add(adev, ops); if (IS_ERR(proc_priv->int340x_zone)) { - ret = PTR_ERR(proc_priv->int340x_zone); - goto remove_group; + return PTR_ERR(proc_priv->int340x_zone); } else ret = 0; @@ -331,9 +330,6 @@ static int proc_thermal_add(struct device *dev, remove_zone: int340x_thermal_zone_remove(proc_priv->int340x_zone); -remove_group: - sysfs_remove_group(&proc_priv->dev->kobj, - &power_limit_attribute_group); return ret; } @@ -364,7 +360,10 @@ static int int3401_add(struct platform_device *pdev) platform_set_drvdata(pdev, proc_priv); proc_thermal_emum_mode = PROC_THERMAL_PLATFORM_DEV; - return 0; + dev_info(&pdev->dev, "Creating sysfs group for PROC_THERMAL_PLATFORM_DEV\n"); + + return sysfs_create_group(&pdev->dev.kobj, + &power_limit_attribute_group); } static int int3401_remove(struct platform_device *pdev) @@ -423,7 +422,7 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev, proc_priv->soc_dts = intel_soc_dts_iosf_init( INTEL_SOC_DTS_INTERRUPT_MSI, 2, 0); - if (proc_priv->soc_dts && pdev->irq) { + if (!IS_ERR(proc_priv->soc_dts) && pdev->irq) { ret = pci_enable_msi(pdev); if (!ret) { ret = request_threaded_irq(pdev->irq, NULL, @@ -441,7 +440,10 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev, dev_err(&pdev->dev, "No auxiliary DTSs enabled\n"); } - return 0; + dev_info(&pdev->dev, "Creating sysfs group for PROC_THERMAL_PCI\n"); + + return sysfs_create_group(&pdev->dev.kobj, + &power_limit_attribute_group); } static void proc_thermal_pci_remove(struct pci_dev *pdev) diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c index cde891c54cde0dcbc1965fcd7fec6a8f434a9bae..8e8328347c0ed34fbd99ba53e8a96a1b910ad87a 100644 --- a/drivers/thermal/intel_powerclamp.c +++ b/drivers/thermal/intel_powerclamp.c @@ -101,7 +101,7 @@ struct powerclamp_worker_data { bool clamping; }; -static struct powerclamp_worker_data * __percpu worker_data; +static struct powerclamp_worker_data __percpu *worker_data; static struct thermal_cooling_device *cooling_dev; static unsigned long *cpu_clamping_mask; /* bit map for tracking per cpu * clamping kthread worker @@ -494,7 +494,7 @@ static void start_power_clamp_worker(unsigned long cpu) struct powerclamp_worker_data *w_data = per_cpu_ptr(worker_data, cpu); struct kthread_worker *worker; - worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inject/%ld", cpu); + worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inj/%ld", cpu); if (IS_ERR(worker)) return; diff --git a/drivers/thermal/intel_soc_dts_thermal.c b/drivers/thermal/intel_soc_dts_thermal.c index 1e47511a6bd5baa0e0db1846d858b8fd2757fb19..d748527d7a38a98737a1f3e4d70e1cbda048a921 100644 --- a/drivers/thermal/intel_soc_dts_thermal.c +++ b/drivers/thermal/intel_soc_dts_thermal.c @@ -45,7 +45,7 @@ static irqreturn_t soc_irq_thread_fn(int irq, void *dev_data) } static const struct x86_cpu_id soc_thermal_ids[] = { - { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1, 0, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT, 0, BYT_SOC_DTS_APIC_IRQ}, {} }; diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c index 4f28165592056d5ec90309c5bff737f5ba66ea0b..856dce4d72e2081ddbaad2d6d126ed9125e85f75 100644 --- a/drivers/thermal/of-thermal.c +++ b/drivers/thermal/of-thermal.c @@ -77,7 +77,7 @@ static int of_thermal_get_temp(struct thermal_zone_device *tz, { struct __thermal_zone *data = tz->devdata; - if (!data->ops->get_temp) + if (!data->ops || !data->ops->get_temp) return -EINVAL; return data->ops->get_temp(data->sensor_data, temp); @@ -174,6 +174,10 @@ static int of_thermal_set_emul_temp(struct thermal_zone_device *tz, { struct __thermal_zone *data = tz->devdata; + if (!data->ops || !data->ops->set_emul_temp) + return -EINVAL; + + return data->ops->set_emul_temp(data->sensor_data, temp); } @@ -182,7 +186,7 @@ static int of_thermal_get_trend(struct thermal_zone_device *tz, int trip, { struct __thermal_zone *data = tz->devdata; - if (!data->ops->get_trend) + if (!data->ops || !data->ops->get_trend) return -EINVAL; return data->ops->get_trend(data->sensor_data, trip, trend); @@ -310,7 +314,7 @@ static int of_thermal_set_trip_temp(struct thermal_zone_device *tz, int trip, if (trip >= data->ntrips || trip < 0) return -EDOM; - if (data->ops->set_trip_temp) { + if (data->ops && data->ops->set_trip_temp) { int ret; ret = data->ops->set_trip_temp(data->sensor_data, trip, temp); diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c index a2c9bfae3d867e8aa5e7ef2daa146212e673ff72..b139713289a42dbe94198271d3a3b9df8979917a 100644 --- a/drivers/thermal/qcom/tsens.c +++ b/drivers/thermal/qcom/tsens.c @@ -171,7 +171,8 @@ static int tsens_probe(struct platform_device *pdev) if (tmdev->ops->calibrate) { ret = tmdev->ops->calibrate(tmdev); if (ret < 0) { - dev_err(dev, "tsens calibration failed\n"); + if (ret != -EPROBE_DEFER) + dev_err(dev, "tsens calibration failed\n"); return ret; } } diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c index 7aed5337bdd35b3b3214372e7d5f9a01cf26789c..704c8ad045bb557977917ae513cd96286864512a 100644 --- a/drivers/thermal/rcar_gen3_thermal.c +++ b/drivers/thermal/rcar_gen3_thermal.c @@ -328,6 +328,9 @@ MODULE_DEVICE_TABLE(of, rcar_gen3_thermal_dt_ids); static int rcar_gen3_thermal_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; + struct rcar_gen3_thermal_priv *priv = dev_get_drvdata(dev); + + rcar_thermal_irq_set(priv, false); pm_runtime_put(dev); pm_runtime_disable(dev); diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c index 78f932822d381c9dbc013aee618be56e533baaab..4dc30e7890f6c4c99eed6a8109ffd7aaf880d2a0 100644 --- a/drivers/thermal/rcar_thermal.c +++ b/drivers/thermal/rcar_thermal.c @@ -434,8 +434,8 @@ static irqreturn_t rcar_thermal_irq(int irq, void *data) rcar_thermal_for_each_priv(priv, common) { if (rcar_thermal_had_changed(priv, status)) { rcar_thermal_irq_disable(priv); - schedule_delayed_work(&priv->work, - msecs_to_jiffies(300)); + queue_delayed_work(system_freezable_wq, &priv->work, + msecs_to_jiffies(300)); } } @@ -453,6 +453,7 @@ static int rcar_thermal_remove(struct platform_device *pdev) rcar_thermal_for_each_priv(priv, common) { rcar_thermal_irq_disable(priv); + cancel_delayed_work_sync(&priv->work); if (priv->chip->use_of_thermal) thermal_remove_hwmon_sysfs(priv->zone); else @@ -492,7 +493,7 @@ static int rcar_thermal_probe(struct platform_device *pdev) pm_runtime_get_sync(dev); for (i = 0; i < chip->nirqs; i++) { - irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + irq = platform_get_resource(pdev, IORESOURCE_IRQ, i); if (!irq) continue; if (!common->base) { diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index 48eef552cba48edb5d60f7a75b715f9536e1b5ae..fc9399d9c0820d59a4321520fd5dcf9242620867 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c @@ -666,7 +666,7 @@ static int exynos_get_temp(void *p, int *temp) struct exynos_tmu_data *data = p; int value, ret = 0; - if (!data || !data->tmu_read || !data->enabled) + if (!data || !data->tmu_read) return -EINVAL; else if (!data->enabled) /* diff --git a/drivers/thermal/thermal-generic-adc.c b/drivers/thermal/thermal-generic-adc.c index bf1c628d4a7ad3f78454946b736c4bacd7d3dcfb..e22fc60ad36dcf7e3b36321cab50c85b7e73079a 100644 --- a/drivers/thermal/thermal-generic-adc.c +++ b/drivers/thermal/thermal-generic-adc.c @@ -26,7 +26,7 @@ struct gadc_thermal_info { static int gadc_thermal_adc_to_temp(struct gadc_thermal_info *gti, int val) { - int temp, adc_hi, adc_lo; + int temp, temp_hi, temp_lo, adc_hi, adc_lo; int i; for (i = 0; i < gti->nlookup_table; i++) { @@ -36,13 +36,17 @@ static int gadc_thermal_adc_to_temp(struct gadc_thermal_info *gti, int val) if (i == 0) { temp = gti->lookup_table[0]; - } else if (i >= (gti->nlookup_table - 1)) { + } else if (i >= gti->nlookup_table) { temp = gti->lookup_table[2 * (gti->nlookup_table - 1)]; } else { adc_hi = gti->lookup_table[2 * i - 1]; adc_lo = gti->lookup_table[2 * i + 1]; - temp = gti->lookup_table[2 * i]; - temp -= ((val - adc_lo) * 1000) / (adc_hi - adc_lo); + + temp_hi = gti->lookup_table[2 * i - 2]; + temp_lo = gti->lookup_table[2 * i]; + + temp = temp_hi + mult_frac(temp_lo - temp_hi, val - adc_hi, + adc_lo - adc_hi); } return temp; diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 6ab982309e6a04cd3c933850d34aabaaf3dd60e4..98d9bba6f4ac37eb384deb75c2e1408f4e780ced 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -451,16 +451,20 @@ static void update_temperature(struct thermal_zone_device *tz) tz->last_temperature, tz->temperature); } -static void thermal_zone_device_reset(struct thermal_zone_device *tz) +static void thermal_zone_device_init(struct thermal_zone_device *tz) { struct thermal_instance *pos; - tz->temperature = THERMAL_TEMP_INVALID; - tz->passive = 0; list_for_each_entry(pos, &tz->thermal_instances, tz_node) pos->initialized = false; } +static void thermal_zone_device_reset(struct thermal_zone_device *tz) +{ + tz->passive = 0; + thermal_zone_device_init(tz); +} + void thermal_zone_device_update(struct thermal_zone_device *tz, enum thermal_notify_event event) { @@ -729,7 +733,8 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, if (result) goto release_ida; - sprintf(dev->attr_name, "cdev%d_trip_point", dev->id); + snprintf(dev->attr_name, sizeof(dev->attr_name), "cdev%d_trip_point", + dev->id); sysfs_attr_init(&dev->attr.attr); dev->attr.attr.name = dev->attr_name; dev->attr.attr.mode = 0444; @@ -738,7 +743,8 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, if (result) goto remove_symbol_link; - sprintf(dev->weight_attr_name, "cdev%d_weight", dev->id); + snprintf(dev->weight_attr_name, sizeof(dev->weight_attr_name), + "cdev%d_weight", dev->id); sysfs_attr_init(&dev->weight_attr.attr); dev->weight_attr.attr.name = dev->weight_attr_name; dev->weight_attr.attr.mode = S_IWUSR | S_IRUGO; @@ -1102,8 +1108,9 @@ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev) mutex_unlock(&thermal_list_lock); ida_simple_remove(&thermal_cdev_ida, cdev->id); - device_unregister(&cdev->device); + device_del(&cdev->device); thermal_cooling_device_destroy_sysfs(cdev); + put_device(&cdev->device); } EXPORT_SYMBOL_GPL(thermal_cooling_device_unregister); @@ -1344,7 +1351,7 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz) mutex_unlock(&thermal_list_lock); - thermal_zone_device_set_polling(tz, 0); + cancel_delayed_work_sync(&tz->poll_queue); thermal_set_governor(tz, NULL); @@ -1501,7 +1508,7 @@ static int thermal_pm_notify(struct notifier_block *nb, case PM_POST_SUSPEND: atomic_set(&in_suspend, 0); list_for_each_entry(tz, &thermal_tz_list, node) { - thermal_zone_device_reset(tz); + thermal_zone_device_init(tz); thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); } diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c index 40c69a533b240787c2dfa7a9328aecb05ed24bbf..dd5d8ee3792870fccbff4c5ab0b7fb251d007612 100644 --- a/drivers/thermal/thermal_hwmon.c +++ b/drivers/thermal/thermal_hwmon.c @@ -87,13 +87,17 @@ static struct thermal_hwmon_device * thermal_hwmon_lookup_by_type(const struct thermal_zone_device *tz) { struct thermal_hwmon_device *hwmon; + char type[THERMAL_NAME_LENGTH]; mutex_lock(&thermal_hwmon_list_lock); - list_for_each_entry(hwmon, &thermal_hwmon_list, node) - if (!strcmp(hwmon->type, tz->type)) { + list_for_each_entry(hwmon, &thermal_hwmon_list, node) { + strcpy(type, tz->type); + strreplace(type, '-', '_'); + if (!strcmp(hwmon->type, type)) { mutex_unlock(&thermal_hwmon_list_lock); return hwmon; } + } mutex_unlock(&thermal_hwmon_list_lock); return NULL; diff --git a/drivers/thermal/thermal_hwmon.h b/drivers/thermal/thermal_hwmon.h index 019f6f88224e951a65bb0cfb58baf7c9a19855e4..a160b9d62dd0a1bc66147375d52bfce309a1024b 100644 --- a/drivers/thermal/thermal_hwmon.h +++ b/drivers/thermal/thermal_hwmon.h @@ -19,13 +19,13 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz); void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz); #else -static int +static inline int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) { return 0; } -static void +static inline void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz) { } diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c index 2241ceae7d7f15eaf09fa89ae2423ad470dd839f..4dce4a8f71ed93d96f83e58800d7516d2bb354ef 100644 --- a/drivers/thermal/thermal_sysfs.c +++ b/drivers/thermal/thermal_sysfs.c @@ -712,11 +712,14 @@ cur_state_store(struct device *dev, struct device_attribute *attr, if ((long)state < 0) return -EINVAL; + mutex_lock(&cdev->lock); + result = cdev->ops->set_cur_state(cdev, state); - if (result) - return result; - thermal_cooling_device_stats_update(cdev, state); - return count; + if (!result) + thermal_cooling_device_stats_update(cdev, state); + + mutex_unlock(&cdev->lock); + return result ? result : count; } static struct device_attribute @@ -767,6 +770,9 @@ void thermal_cooling_device_stats_update(struct thermal_cooling_device *cdev, { struct cooling_dev_stats *stats = cdev->stats; + if (!stats) + return; + spin_lock(&stats->lock); if (stats->state == new_state) diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c index 28fc4ce75edb49ddfe68fc00796fa0934a8df8ff..8490a1b6b61564320768012605f4837c77cf0663 100644 --- a/drivers/thunderbolt/icm.c +++ b/drivers/thunderbolt/icm.c @@ -476,6 +476,11 @@ static void add_switch(struct tb_switch *parent_sw, u64 route, goto out; sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL); + if (!sw->uuid) { + tb_sw_warn(sw, "cannot allocate memory for switch\n"); + tb_switch_put(sw); + goto out; + } sw->connection_id = connection_id; sw->connection_key = connection_key; sw->link = link; diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c index 5cd6bdfa068f9bc91fe2e83bedcd4e81160230c4..d436a1534fc2bb1f0be976b769b0dc7e8a6929c4 100644 --- a/drivers/thunderbolt/nhi.c +++ b/drivers/thunderbolt/nhi.c @@ -142,9 +142,20 @@ static void __iomem *ring_options_base(struct tb_ring *ring) return io; } -static void ring_iowrite16desc(struct tb_ring *ring, u32 value, u32 offset) +static void ring_iowrite_cons(struct tb_ring *ring, u16 cons) { - iowrite16(value, ring_desc_base(ring) + offset); + /* + * The other 16-bits in the register is read-only and writes to it + * are ignored by the hardware so we can save one ioread32() by + * filling the read-only bits with zeroes. + */ + iowrite32(cons, ring_desc_base(ring) + 8); +} + +static void ring_iowrite_prod(struct tb_ring *ring, u16 prod) +{ + /* See ring_iowrite_cons() above for explanation */ + iowrite32(prod << 16, ring_desc_base(ring) + 8); } static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset) @@ -196,7 +207,10 @@ static void ring_write_descriptors(struct tb_ring *ring) descriptor->sof = frame->sof; } ring->head = (ring->head + 1) % ring->size; - ring_iowrite16desc(ring, ring->head, ring->is_tx ? 10 : 8); + if (ring->is_tx) + ring_iowrite_prod(ring, ring->head); + else + ring_iowrite_cons(ring, ring->head); } } @@ -660,7 +674,7 @@ void tb_ring_stop(struct tb_ring *ring) ring_iowrite32options(ring, 0, 0); ring_iowrite64desc(ring, 0, 0); - ring_iowrite16desc(ring, 0, ring->is_tx ? 10 : 8); + ring_iowrite32desc(ring, 0, 8); ring_iowrite32desc(ring, 0, 12); ring->head = 0; ring->tail = 0; diff --git a/drivers/thunderbolt/property.c b/drivers/thunderbolt/property.c index 8fe913a95b4ad0250b642aedcdcac491581fd9c3..be3f8b592b05bb4169de0ee83d626ae6c33edcec 100644 --- a/drivers/thunderbolt/property.c +++ b/drivers/thunderbolt/property.c @@ -551,6 +551,11 @@ int tb_property_add_data(struct tb_property_dir *parent, const char *key, property->length = size / 4; property->value.data = kzalloc(size, GFP_KERNEL); + if (!property->value.data) { + kfree(property); + return -ENOMEM; + } + memcpy(property->value.data, buf, buflen); list_add_tail(&property->list, &parent->properties); @@ -581,7 +586,12 @@ int tb_property_add_text(struct tb_property_dir *parent, const char *key, return -ENOMEM; property->length = size / 4; - property->value.data = kzalloc(size, GFP_KERNEL); + property->value.text = kzalloc(size, GFP_KERNEL); + if (!property->value.text) { + kfree(property); + return -ENOMEM; + } + strcpy(property->value.text, text); list_add_tail(&property->list, &parent->properties); diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index 7442bc4c64335cb52307698a5ffd7c7466227a04..678bf336594707439914939ec6f7f8b868228236 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -9,15 +9,13 @@ #include #include #include +#include #include #include #include #include "tb.h" -/* Switch authorization from userspace is serialized by this lock */ -static DEFINE_MUTEX(switch_lock); - /* Switch NVM support */ #define NVM_DEVID 0x05 @@ -169,7 +167,7 @@ static int nvm_validate_and_write(struct tb_switch *sw) static int nvm_authenticate_host(struct tb_switch *sw) { - int ret; + int ret = 0; /* * Root switch NVM upgrade requires that we disconnect the @@ -177,6 +175,8 @@ static int nvm_authenticate_host(struct tb_switch *sw) * already). */ if (!sw->safe_mode) { + u32 status; + ret = tb_domain_disconnect_all_paths(sw->tb); if (ret) return ret; @@ -185,7 +185,16 @@ static int nvm_authenticate_host(struct tb_switch *sw) * everything goes well so getting timeout is expected. */ ret = dma_port_flash_update_auth(sw->dma_port); - return ret == -ETIMEDOUT ? 0 : ret; + if (!ret || ret == -ETIMEDOUT) + return 0; + + /* + * Any error from update auth operation requires power + * cycling of the host router. + */ + tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n"); + if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0) + nvm_set_auth_status(sw, status); } /* @@ -193,7 +202,7 @@ static int nvm_authenticate_host(struct tb_switch *sw) * switch. */ dma_port_power_cycle(sw->dma_port); - return 0; + return ret; } static int nvm_authenticate_device(struct tb_switch *sw) @@ -201,8 +210,16 @@ static int nvm_authenticate_device(struct tb_switch *sw) int ret, retries = 10; ret = dma_port_flash_update_auth(sw->dma_port); - if (ret && ret != -ETIMEDOUT) + switch (ret) { + case 0: + case -ETIMEDOUT: + case -EACCES: + case -EINVAL: + /* Power cycle is required */ + break; + default: return ret; + } /* * Poll here for the authentication status. It takes some time @@ -253,8 +270,8 @@ static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val, struct tb_switch *sw = priv; int ret = 0; - if (mutex_lock_interruptible(&switch_lock)) - return -ERESTARTSYS; + if (!mutex_trylock(&sw->tb->lock)) + return restart_syscall(); /* * Since writing the NVM image might require some special steps, @@ -274,7 +291,7 @@ static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val, memcpy(sw->nvm->buf + offset, val, bytes); unlock: - mutex_unlock(&switch_lock); + mutex_unlock(&sw->tb->lock); return ret; } @@ -363,10 +380,7 @@ static int tb_switch_nvm_add(struct tb_switch *sw) } nvm->non_active = nvm_dev; - mutex_lock(&switch_lock); sw->nvm = nvm; - mutex_unlock(&switch_lock); - return 0; err_nvm_active: @@ -383,10 +397,8 @@ static void tb_switch_nvm_remove(struct tb_switch *sw) { struct tb_switch_nvm *nvm; - mutex_lock(&switch_lock); nvm = sw->nvm; sw->nvm = NULL; - mutex_unlock(&switch_lock); if (!nvm) return; @@ -717,8 +729,8 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) { int ret = -EINVAL; - if (mutex_lock_interruptible(&switch_lock)) - return -ERESTARTSYS; + if (!mutex_trylock(&sw->tb->lock)) + return restart_syscall(); if (sw->authorized) goto unlock; @@ -761,7 +773,7 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) } unlock: - mutex_unlock(&switch_lock); + mutex_unlock(&sw->tb->lock); return ret; } @@ -818,15 +830,15 @@ static ssize_t key_show(struct device *dev, struct device_attribute *attr, struct tb_switch *sw = tb_to_switch(dev); ssize_t ret; - if (mutex_lock_interruptible(&switch_lock)) - return -ERESTARTSYS; + if (!mutex_trylock(&sw->tb->lock)) + return restart_syscall(); if (sw->key) ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key); else ret = sprintf(buf, "\n"); - mutex_unlock(&switch_lock); + mutex_unlock(&sw->tb->lock); return ret; } @@ -843,8 +855,8 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr, else if (hex2bin(key, buf, sizeof(key))) return -EINVAL; - if (mutex_lock_interruptible(&switch_lock)) - return -ERESTARTSYS; + if (!mutex_trylock(&sw->tb->lock)) + return restart_syscall(); if (sw->authorized) { ret = -EBUSY; @@ -859,11 +871,35 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr, } } - mutex_unlock(&switch_lock); + mutex_unlock(&sw->tb->lock); return ret; } static DEVICE_ATTR(key, 0600, key_show, key_store); +static void nvm_authenticate_start(struct tb_switch *sw) +{ + struct pci_dev *root_port; + + /* + * During host router NVM upgrade we should not allow root port to + * go into D3cold because some root ports cannot trigger PME + * itself. To be on the safe side keep the root port in D0 during + * the whole upgrade process. + */ + root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev); + if (root_port) + pm_runtime_get_noresume(&root_port->dev); +} + +static void nvm_authenticate_complete(struct tb_switch *sw) +{ + struct pci_dev *root_port; + + root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev); + if (root_port) + pm_runtime_put(&root_port->dev); +} + static ssize_t nvm_authenticate_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -881,8 +917,8 @@ static ssize_t nvm_authenticate_store(struct device *dev, bool val; int ret; - if (mutex_lock_interruptible(&switch_lock)) - return -ERESTARTSYS; + if (!mutex_trylock(&sw->tb->lock)) + return restart_syscall(); /* If NVMem devices are not yet added */ if (!sw->nvm) { @@ -913,16 +949,22 @@ static ssize_t nvm_authenticate_store(struct device *dev, sw->nvm->authenticating = true; - if (!tb_route(sw)) + if (!tb_route(sw)) { + /* + * Keep root port from suspending as long as the + * NVM upgrade process is running. + */ + nvm_authenticate_start(sw); ret = nvm_authenticate_host(sw); - else + } else { ret = nvm_authenticate_device(sw); + } pm_runtime_mark_last_busy(&sw->dev); pm_runtime_put_autosuspend(&sw->dev); } exit_unlock: - mutex_unlock(&switch_lock); + mutex_unlock(&sw->tb->lock); if (ret) return ret; @@ -936,8 +978,8 @@ static ssize_t nvm_version_show(struct device *dev, struct tb_switch *sw = tb_to_switch(dev); int ret; - if (mutex_lock_interruptible(&switch_lock)) - return -ERESTARTSYS; + if (!mutex_trylock(&sw->tb->lock)) + return restart_syscall(); if (sw->safe_mode) ret = -ENODATA; @@ -946,7 +988,7 @@ static ssize_t nvm_version_show(struct device *dev, else ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor); - mutex_unlock(&switch_lock); + mutex_unlock(&sw->tb->lock); return ret; } @@ -1264,13 +1306,14 @@ int tb_switch_configure(struct tb_switch *sw) return tb_plug_events_active(sw, true); } -static void tb_switch_set_uuid(struct tb_switch *sw) +static int tb_switch_set_uuid(struct tb_switch *sw) { u32 uuid[4]; - int cap; + int cap, ret; + ret = 0; if (sw->uuid) - return; + return ret; /* * The newer controllers include fused UUID as part of link @@ -1278,7 +1321,9 @@ static void tb_switch_set_uuid(struct tb_switch *sw) */ cap = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER); if (cap > 0) { - tb_sw_read(sw, uuid, TB_CFG_SWITCH, cap + 3, 4); + ret = tb_sw_read(sw, uuid, TB_CFG_SWITCH, cap + 3, 4); + if (ret) + return ret; } else { /* * ICM generates UUID based on UID and fills the upper @@ -1293,6 +1338,9 @@ static void tb_switch_set_uuid(struct tb_switch *sw) } sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); + if (!sw->uuid) + ret = -ENOMEM; + return ret; } static int tb_switch_add_dma_port(struct tb_switch *sw) @@ -1301,13 +1349,16 @@ static int tb_switch_add_dma_port(struct tb_switch *sw) int ret; switch (sw->generation) { - case 3: - break; - case 2: /* Only root switch can be upgraded */ if (tb_route(sw)) return 0; + + /* fallthrough */ + case 3: + ret = tb_switch_set_uuid(sw); + if (ret) + return ret; break; default: @@ -1327,6 +1378,19 @@ static int tb_switch_add_dma_port(struct tb_switch *sw) if (!sw->dma_port) return 0; + /* + * If there is status already set then authentication failed + * when the dma_port_flash_update_auth() returned. Power cycling + * is not needed (it was done already) so only thing we do here + * is to unblock runtime PM of the root port. + */ + nvm_get_auth_status(sw, &status); + if (status) { + if (!tb_route(sw)) + nvm_authenticate_complete(sw); + return 0; + } + /* * Check status of the previous flash authentication. If there * is one we need to power cycle the switch in any case to make @@ -1336,9 +1400,12 @@ static int tb_switch_add_dma_port(struct tb_switch *sw) if (ret <= 0) return ret; + /* Now we can allow root port to suspend again */ + if (!tb_route(sw)) + nvm_authenticate_complete(sw); + if (status) { tb_sw_info(sw, "switch flash authentication failed\n"); - tb_switch_set_uuid(sw); nvm_set_auth_status(sw, status); } @@ -1388,7 +1455,9 @@ int tb_switch_add(struct tb_switch *sw) } tb_sw_info(sw, "uid: %#llx\n", sw->uid); - tb_switch_set_uuid(sw); + ret = tb_switch_set_uuid(sw); + if (ret) + return ret; for (i = 0; i <= sw->config.max_port_number; i++) { if (sw->ports[i].disabled) { diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index 5067d69d05018b2fcf1bd214fdb5937ddaa60083..7a0ee9836a8a76a023d50241ea012fb2b6a6f54d 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -79,8 +79,7 @@ struct tb_switch_nvm { * @depth: Depth in the chain this switch is connected (ICM only) * * When the switch is being added or removed to the domain (other - * switches) you need to have domain lock held. For switch authorization - * internal switch_lock is enough. + * switches) you need to have domain lock held. */ struct tb_switch { struct device dev; diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c index db8bece6332706aae480f33e64f3169f934266e0..befe754906979d2458bbecebb54e1db8d6c240e5 100644 --- a/drivers/thunderbolt/xdomain.c +++ b/drivers/thunderbolt/xdomain.c @@ -743,6 +743,7 @@ static void enumerate_services(struct tb_xdomain *xd) struct tb_service *svc; struct tb_property *p; struct device *dev; + int id; /* * First remove all services that are not available anymore in @@ -771,7 +772,12 @@ static void enumerate_services(struct tb_xdomain *xd) break; } - svc->id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL); + id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL); + if (id < 0) { + kfree(svc); + break; + } + svc->id = id; svc->dev.bus = &tb_bus_type; svc->dev.type = &tb_service_type; svc->dev.parent = &xd->dev; diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig index 0840d27381ea79fa501bee4e26f3f928fa58031f..dcfd593d0b2b8cbc30b9cb3b61be1cb56a96ab13 100644 --- a/drivers/tty/Kconfig +++ b/drivers/tty/Kconfig @@ -389,6 +389,15 @@ config GOLDFISH_TTY_EARLY_CONSOLE default y if GOLDFISH_TTY=y select SERIAL_EARLYCON +config IPWIRELESS + tristate "IPWireless 3G UMTS PCMCIA card support" + depends on PCMCIA && NETDEVICES + select PPP + help + This is a driver for 3G UMTS PCMCIA card from IPWireless company. In + some countries (for example Czech Republic, T-Mobile ISP) this card + is shipped for service called UMTS 4G. + config MIPS_EJTAG_FDC_TTY bool "MIPS EJTAG Fast Debug Channel TTY" depends on MIPS_CDMM @@ -441,4 +450,28 @@ config VCC depends on SUN_LDOMS help Support for Sun logical domain consoles. + +config LDISC_AUTOLOAD + bool "Automatically load TTY Line Disciplines" + default y + help + Historically the kernel has always automatically loaded any + line discipline that is in a kernel module when a user asks + for it to be loaded with the TIOCSETD ioctl, or through other + means. This is not always the best thing to do on systems + where you know you will not be using some of the more + "ancient" line disciplines, so prevent the kernel from doing + this unless the request is coming from a process with the + CAP_SYS_MODULE permissions. + + Say 'Y' here if you trust your userspace users to do the right + thing, or if you have only provided the line disciplines that + you know you will be using, or if you wish to continue to use + the traditional method of on-demand loading of these modules + by any user. + + This functionality can be changed at runtime with the + dev.tty.ldisc_autoload sysctl, this configuration option will + only set the default value of this functionality. + endif # TTY diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c index 6d3c58051ce3362da0aed135db0c6d70022460a0..db048dbe9f785fc81c5cfbc92bfab5ff063749e9 100644 --- a/drivers/tty/cyclades.c +++ b/drivers/tty/cyclades.c @@ -556,7 +556,7 @@ static void cyy_chip_rx(struct cyclades_card *cinfo, int chip, } info->idle_stats.recv_idle = jiffies; } - tty_schedule_flip(port); + tty_flip_buffer_push(port); /* end of service */ cyy_writeb(info, CyRIR, save_xir & 0x3f); @@ -996,7 +996,7 @@ static void cyz_handle_rx(struct cyclades_port *info) mod_timer(&info->rx_full_timer, jiffies + 1); #endif info->idle_stats.recv_idle = jiffies; - tty_schedule_flip(&info->port); + tty_flip_buffer_push(&info->port); /* Update rx_get */ cy_writel(&buf_ctrl->rx_get, new_rx_get); @@ -1172,7 +1172,7 @@ static void cyz_handle_cmd(struct cyclades_card *cinfo) if (delta_count) wake_up_interruptible(&info->port.delta_msr_wait); if (special_count) - tty_schedule_flip(&info->port); + tty_flip_buffer_push(&info->port); } } diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c index c8c5cdfc5e19908e3110ec6ac1532b6896aed9a2..ed0f3a7b8fe5edc9ee7571fd3afeb30edb7f140c 100644 --- a/drivers/tty/goldfish.c +++ b/drivers/tty/goldfish.c @@ -151,7 +151,7 @@ static irqreturn_t goldfish_tty_interrupt(int irq, void *dev_id) address = (unsigned long)(void *)buf; goldfish_tty_rw(qtty, address, count, 0); - tty_schedule_flip(&qtty->port); + tty_flip_buffer_push(&qtty->port); return IRQ_HANDLED; } diff --git a/drivers/tty/hvc/hvc_riscv_sbi.c b/drivers/tty/hvc/hvc_riscv_sbi.c index 75155bde2b8810306dc19f97e47f334c26a3979d..31f53fa77e4af5f1c4cd1ecd4677210d1d515cf0 100644 --- a/drivers/tty/hvc/hvc_riscv_sbi.c +++ b/drivers/tty/hvc/hvc_riscv_sbi.c @@ -53,7 +53,6 @@ device_initcall(hvc_sbi_init); static int __init hvc_sbi_console_init(void) { hvc_instantiate(0, 0, &hvc_sbi_ops); - add_preferred_console("hvc", 0, NULL); return 0; } diff --git a/drivers/tty/hvc/hvc_vio.c b/drivers/tty/hvc/hvc_vio.c index 59eaa620bf13a00ff28f3c3d1902b5a30e63b029..80fd06fbd712a79a7200ee63072846accd38d386 100644 --- a/drivers/tty/hvc/hvc_vio.c +++ b/drivers/tty/hvc/hvc_vio.c @@ -107,6 +107,14 @@ static int hvterm_raw_get_chars(uint32_t vtermno, char *buf, int count) return got; } +/** + * hvterm_raw_put_chars: send characters to firmware for given vterm adapter + * @vtermno: The virtual terminal number. + * @buf: The characters to send. Because of the underlying hypercall in + * hvc_put_chars(), this buffer must be at least 16 bytes long, even if + * you are sending fewer chars. + * @count: number of chars to send. + */ static int hvterm_raw_put_chars(uint32_t vtermno, const char *buf, int count) { struct hvterm_priv *pv = hvterm_privs[vtermno]; @@ -219,6 +227,7 @@ static const struct hv_ops hvterm_hvsi_ops = { static void udbg_hvc_putc(char c) { int count = -1; + unsigned char bounce_buffer[16]; if (!hvterm_privs[0]) return; @@ -229,7 +238,12 @@ static void udbg_hvc_putc(char c) do { switch(hvterm_privs[0]->proto) { case HV_PROTOCOL_RAW: - count = hvterm_raw_put_chars(0, &c, 1); + /* + * hvterm_raw_put_chars requires at least a 16-byte + * buffer, so go via the bounce buffer + */ + bounce_buffer[0] = c; + count = hvterm_raw_put_chars(0, bounce_buffer, 1); break; case HV_PROTOCOL_HVSI: count = hvterm_hvsi_put_chars(0, &c, 1); diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c index dc43fa96c3de7883a63a4ce61de21c100ef22f2e..62df09f7cee656018cd9c7e6120c95d1b1ba7d74 100644 --- a/drivers/tty/hvc/hvc_xen.c +++ b/drivers/tty/hvc/hvc_xen.c @@ -37,6 +37,8 @@ struct xencons_info { struct xenbus_device *xbdev; struct xencons_interface *intf; unsigned int evtchn; + XENCONS_RING_IDX out_cons; + unsigned int out_cons_same; struct hvc_struct *hvc; int irq; int vtermno; @@ -131,6 +133,8 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len) XENCONS_RING_IDX cons, prod; int recv = 0; struct xencons_info *xencons = vtermno_to_xencons(vtermno); + unsigned int eoiflag = 0; + if (xencons == NULL) return -EINVAL; intf = xencons->intf; @@ -146,7 +150,27 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len) mb(); /* read ring before consuming */ intf->in_cons = cons; - notify_daemon(xencons); + /* + * When to mark interrupt having been spurious: + * - there was no new data to be read, and + * - the backend did not consume some output bytes, and + * - the previous round with no read data didn't see consumed bytes + * (we might have a race with an interrupt being in flight while + * updating xencons->out_cons, so account for that by allowing one + * round without any visible reason) + */ + if (intf->out_cons != xencons->out_cons) { + xencons->out_cons = intf->out_cons; + xencons->out_cons_same = 0; + } + if (recv) { + notify_daemon(xencons); + } else if (xencons->out_cons_same++ > 1) { + eoiflag = XEN_EOI_FLAG_SPURIOUS; + } + + xen_irq_lateeoi(xencons->irq, eoiflag); + return recv; } @@ -375,7 +399,7 @@ static int xencons_connect_backend(struct xenbus_device *dev, if (ret) return ret; info->evtchn = evtchn; - irq = bind_evtchn_to_irq(evtchn); + irq = bind_interdomain_evtchn_to_irq_lateeoi(dev->otherend_id, evtchn); if (irq < 0) return irq; info->irq = irq; @@ -539,7 +563,7 @@ static int __init xen_hvc_init(void) return r; info = vtermno_to_xencons(HVC_COOKIE); - info->irq = bind_evtchn_to_irq(info->evtchn); + info->irq = bind_evtchn_to_irq_lateeoi(info->evtchn); } if (info->irq < 0) info->irq = 0; /* NO_IRQ */ diff --git a/drivers/tty/ipwireless/main.c b/drivers/tty/ipwireless/main.c index 3475e841ef5c1f9c1902a2991ca9aa2da26d0729..4c18bbfe1a92ee9359f5c14d5e228c43fbd9fb34 100644 --- a/drivers/tty/ipwireless/main.c +++ b/drivers/tty/ipwireless/main.c @@ -114,6 +114,10 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data) ipw->common_memory = ioremap(p_dev->resource[2]->start, resource_size(p_dev->resource[2])); + if (!ipw->common_memory) { + ret = -ENOMEM; + goto exit1; + } if (!request_mem_region(p_dev->resource[2]->start, resource_size(p_dev->resource[2]), IPWIRELESS_PCCARD_NAME)) { @@ -134,6 +138,10 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data) ipw->attr_memory = ioremap(p_dev->resource[3]->start, resource_size(p_dev->resource[3])); + if (!ipw->attr_memory) { + ret = -ENOMEM; + goto exit3; + } if (!request_mem_region(p_dev->resource[3]->start, resource_size(p_dev->resource[3]), IPWIRELESS_PCCARD_NAME)) { diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c index 250a19f042d74ce7bf46c21a15addfb34a193d1a..181381fa5126ca3303c3fd140fc41d4e84fd2dea 100644 --- a/drivers/tty/moxa.c +++ b/drivers/tty/moxa.c @@ -1393,7 +1393,7 @@ static int moxa_poll_port(struct moxa_port *p, unsigned int handle, if (inited && !tty_throttled(tty) && MoxaPortRxQueue(p) > 0) { /* RX */ MoxaPortReadData(p); - tty_schedule_flip(&p->port); + tty_flip_buffer_push(&p->port); } } else { clear_bit(EMPTYWAIT, &p->statusflags); @@ -1418,7 +1418,7 @@ static int moxa_poll_port(struct moxa_port *p, unsigned int handle, if (tty && (intr & IntrBreak) && !I_IGNBRK(tty)) { /* BREAK */ tty_insert_flip_char(&p->port, 0, TTY_BREAK); - tty_schedule_flip(&p->port); + tty_flip_buffer_push(&p->port); } if (intr & IntrLine) diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 86b7e20ffd7f1e90f1087f9212a85f201c057287..6a41ea82fe471d5eb41484589218f47b6045046f 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -313,6 +313,7 @@ static struct tty_driver *gsm_tty_driver; #define GSM1_ESCAPE_BITS 0x20 #define XON 0x11 #define XOFF 0x13 +#define ISO_IEC_646_MASK 0x7F static const struct tty_port_operations gsm_port_ops; @@ -427,7 +428,7 @@ static u8 gsm_encode_modem(const struct gsm_dlci *dlci) modembits |= MDM_RTR; if (dlci->modem_tx & TIOCM_RI) modembits |= MDM_IC; - if (dlci->modem_tx & TIOCM_CD) + if (dlci->modem_tx & TIOCM_CD || dlci->gsm->initiator) modembits |= MDM_DV; return modembits; } @@ -531,7 +532,8 @@ static int gsm_stuff_frame(const u8 *input, u8 *output, int len) int olen = 0; while (len--) { if (*input == GSM1_SOF || *input == GSM1_ESCAPE - || *input == XON || *input == XOFF) { + || (*input & ISO_IEC_646_MASK) == XON + || (*input & ISO_IEC_646_MASK) == XOFF) { *output++ = GSM1_ESCAPE; *output++ = *input++ ^ GSM1_ESCAPE_BITS; olen++; @@ -1484,7 +1486,7 @@ static void gsm_dlci_t1(struct timer_list *t) dlci->mode = DLCI_MODE_ADM; gsm_dlci_open(dlci); } else { - gsm_dlci_close(dlci); + gsm_dlci_begin_close(dlci); /* prevent half open link */ } break; @@ -1912,8 +1914,12 @@ static void gsm0_receive(struct gsm_mux *gsm, unsigned char c) break; case GSM_DATA: /* Data */ gsm->buf[gsm->count++] = c; - if (gsm->count == gsm->len) + if (gsm->count >= MAX_MRU) { + gsm->bad_size++; + gsm->state = GSM_SEARCH; + } else if (gsm->count >= gsm->len) { gsm->state = GSM_FCS; + } break; case GSM_FCS: /* FCS follows the packet */ gsm->received_fcs = c; @@ -1992,7 +1998,7 @@ static void gsm1_receive(struct gsm_mux *gsm, unsigned char c) gsm->state = GSM_DATA; break; case GSM_DATA: /* Data */ - if (gsm->count > gsm->mru) { /* Allow one for the FCS */ + if (gsm->count > gsm->mru || gsm->count > MAX_MRU) { /* Allow one for the FCS */ gsm->state = GSM_OVERRUN; gsm->bad_size++; } else @@ -2022,49 +2028,37 @@ static void gsm_error(struct gsm_mux *gsm, gsm->io_error++; } -static int gsm_disconnect(struct gsm_mux *gsm) -{ - struct gsm_dlci *dlci = gsm->dlci[0]; - struct gsm_control *gc; - - if (!dlci) - return 0; - - /* In theory disconnecting DLCI 0 is sufficient but for some - modems this is apparently not the case. */ - gc = gsm_control_send(gsm, CMD_CLD, NULL, 0); - if (gc) - gsm_control_wait(gsm, gc); - - del_timer_sync(&gsm->t2_timer); - /* Now we are sure T2 has stopped */ - - gsm_dlci_begin_close(dlci); - wait_event_interruptible(gsm->event, - dlci->state == DLCI_CLOSED); - - if (signal_pending(current)) - return -EINTR; - - return 0; -} - /** * gsm_cleanup_mux - generic GSM protocol cleanup * @gsm: our mux + * @disc: disconnect link? * * Clean up the bits of the mux which are the same for all framing * protocols. Remove the mux from the mux table, stop all the timers * and then shut down each device hanging up the channels as we go. */ -static void gsm_cleanup_mux(struct gsm_mux *gsm) +static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc) { int i; - struct gsm_dlci *dlci = gsm->dlci[0]; + struct gsm_dlci *dlci; struct gsm_msg *txq, *ntxq; + unsigned long flags; gsm->dead = 1; + mutex_lock(&gsm->mutex); + + dlci = gsm->dlci[0]; + if (dlci) { + if (disc && dlci->state != DLCI_CLOSED) { + gsm_dlci_begin_close(dlci); + wait_event(gsm->event, dlci->state == DLCI_CLOSED); + } + dlci->dead = true; + } + + /* Finish outstanding timers, making sure they are done */ + del_timer_sync(&gsm->t2_timer); spin_lock(&gsm_mux_lock); for (i = 0; i < MAX_MUX; i++) { @@ -2078,21 +2072,18 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm) if (i == MAX_MUX) return; - del_timer_sync(&gsm->t2_timer); - /* Now we are sure T2 has stopped */ - if (dlci) - dlci->dead = 1; - /* Free up any link layer users */ - mutex_lock(&gsm->mutex); for (i = 0; i < NUM_DLCI; i++) if (gsm->dlci[i]) gsm_dlci_release(gsm->dlci[i]); mutex_unlock(&gsm->mutex); /* Now wipe the queues */ + + spin_lock_irqsave(&gsm->tx_lock, flags); list_for_each_entry_safe(txq, ntxq, &gsm->tx_list, list) kfree(txq); INIT_LIST_HEAD(&gsm->tx_list); + spin_unlock_irqrestore(&gsm->tx_lock, flags); } /** @@ -2283,7 +2274,7 @@ static void gsmld_detach_gsm(struct tty_struct *tty, struct gsm_mux *gsm) WARN_ON(tty != gsm->tty); for (i = 1; i < NUM_DLCI; i++) tty_unregister_device(gsm_tty_driver, base + i); - gsm_cleanup_mux(gsm); + gsm_cleanup_mux(gsm, false); tty_kref_put(gsm->tty); gsm->tty = NULL; } @@ -2373,6 +2364,9 @@ static int gsmld_open(struct tty_struct *tty) struct gsm_mux *gsm; int ret; + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + if (tty->ops->write == NULL) return -EINVAL; @@ -2389,7 +2383,7 @@ static int gsmld_open(struct tty_struct *tty) ret = gsmld_attach_gsm(tty, gsm); if (ret != 0) { - gsm_cleanup_mux(gsm); + gsm_cleanup_mux(gsm, false); mux_put(gsm); } return ret; @@ -2538,19 +2532,11 @@ static int gsmld_config(struct tty_struct *tty, struct gsm_mux *gsm, /* * Close down what is needed, restart and initiate the new - * configuration + * configuration. On the first time there is no DLCI[0] + * and closing or cleaning up is not necessary. */ - - if (need_close || need_restart) { - int ret; - - ret = gsm_disconnect(gsm); - - if (ret) - return ret; - } - if (need_restart) - gsm_cleanup_mux(gsm); + if (need_close || need_restart) + gsm_cleanup_mux(gsm, true); gsm->initiator = c->initiator; gsm->mru = c->mru; diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c index dabb391909aad63d9152e2cdd5dab35f73d1fe7e..1bf719578f7bbcf54e01f2567ca335d8398afc75 100644 --- a/drivers/tty/n_hdlc.c +++ b/drivers/tty/n_hdlc.c @@ -573,7 +573,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file, return -EIO; /* verify user access to buffer */ - if (!access_ok(VERIFY_WRITE, buf, nr)) { + if (!access_ok(buf, nr)) { printk(KERN_WARNING "%s(%d) n_hdlc_tty_read() can't verify user " "buffer\n", __FILE__, __LINE__); return -EFAULT; @@ -597,6 +597,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file, /* too large for caller's buffer */ ret = -EOVERFLOW; } else { + __set_current_state(TASK_RUNNING); if (copy_to_user(buf, rbuf->buf, rbuf->count)) ret = -EFAULT; else @@ -612,7 +613,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file, } /* no data */ - if (file->f_flags & O_NONBLOCK) { + if (tty_io_nonblock(tty, file)) { ret = -EAGAIN; break; } @@ -679,7 +680,7 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file, if (tbuf) break; - if (file->f_flags & O_NONBLOCK) { + if (tty_io_nonblock(tty, file)) { error = -EAGAIN; break; } @@ -967,6 +968,11 @@ static int __init n_hdlc_init(void) } /* end of init_module() */ +#ifdef CONFIG_SPARC +#undef __exitdata +#define __exitdata +#endif + static const char hdlc_unregister_ok[] __exitdata = KERN_INFO "N_HDLC: line discipline unregistered\n"; static const char hdlc_unregister_fail[] __exitdata = diff --git a/drivers/tty/n_r3964.c b/drivers/tty/n_r3964.c index dbf1ab36758ebd6404e97a2fb0fbc4273f6c0460..a3969b773cbe42a98d2188d2a0f5e6719e88faa2 100644 --- a/drivers/tty/n_r3964.c +++ b/drivers/tty/n_r3964.c @@ -1078,7 +1078,7 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file, pMsg = remove_msg(pInfo, pClient); if (pMsg == NULL) { /* no messages available. */ - if (file->f_flags & O_NONBLOCK) { + if (tty_io_nonblock(tty, file)) { ret = -EAGAIN; goto unlock; } diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 43174220170924e094567ad6271703bd881e2a6f..bc6345664fdb53f4c51315c04fa59beda22dd52a 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -152,17 +152,28 @@ static inline unsigned char *echo_buf_addr(struct n_tty_data *ldata, size_t i) return &ldata->echo_buf[i & (N_TTY_BUF_SIZE - 1)]; } +/* If we are not echoing the data, perhaps this is a secret so erase it */ +static void zero_buffer(struct tty_struct *tty, u8 *buffer, int size) +{ + bool icanon = !!L_ICANON(tty); + bool no_echo = !L_ECHO(tty); + + if (icanon && no_echo) + memset(buffer, 0x00, size); +} + static int tty_copy_to_user(struct tty_struct *tty, void __user *to, size_t tail, size_t n) { struct n_tty_data *ldata = tty->disc_data; size_t size = N_TTY_BUF_SIZE - tail; - const void *from = read_buf_addr(ldata, tail); + void *from = read_buf_addr(ldata, tail); int uncopied; if (n > size) { tty_audit_add_data(tty, from, size); uncopied = copy_to_user(to, from, size); + zero_buffer(tty, from, size - uncopied); if (uncopied) return uncopied; to += size; @@ -171,7 +182,9 @@ static int tty_copy_to_user(struct tty_struct *tty, void __user *to, } tty_audit_add_data(tty, from, n); - return copy_to_user(to, from, n); + uncopied = copy_to_user(to, from, n); + zero_buffer(tty, from, n - uncopied); + return uncopied; } /** @@ -191,8 +204,8 @@ static void n_tty_kick_worker(struct tty_struct *tty) struct n_tty_data *ldata = tty->disc_data; /* Did the input worker stop? Restart it */ - if (unlikely(ldata->no_room)) { - ldata->no_room = 0; + if (unlikely(READ_ONCE(ldata->no_room))) { + WRITE_ONCE(ldata->no_room, 0); WARN_RATELIMIT(tty->port->itty == NULL, "scheduling with invalid itty\n"); @@ -535,9 +548,9 @@ static ssize_t process_output_block(struct tty_struct *tty, mutex_lock(&ldata->output_lock); space = tty_write_room(tty); - if (!space) { + if (space <= 0) { mutex_unlock(&ldata->output_lock); - return 0; + return space; } if (nr > space) nr = space; @@ -1362,7 +1375,7 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c) put_tty_queue(c, ldata); smp_store_release(&ldata->canon_head, ldata->read_head); kill_fasync(&tty->fasync, SIGIO, POLL_IN); - wake_up_interruptible_poll(&tty->read_wait, EPOLLIN); + wake_up_interruptible_poll(&tty->read_wait, EPOLLIN | EPOLLRDNORM); return 0; } } @@ -1643,7 +1656,7 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp, if (read_cnt(ldata)) { kill_fasync(&tty->fasync, SIGIO, POLL_IN); - wake_up_interruptible_poll(&tty->read_wait, EPOLLIN); + wake_up_interruptible_poll(&tty->read_wait, EPOLLIN | EPOLLRDNORM); } } @@ -1689,7 +1702,7 @@ n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp, down_read(&tty->termios_rwsem); - while (1) { + do { /* * When PARMRK is set, each input char may take up to 3 chars * in the read buf; reduce the buffer space avail by 3x @@ -1714,7 +1727,7 @@ n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp, if (overflow && room < 0) ldata->read_head--; room = overflow; - ldata->no_room = flow && !room; + WRITE_ONCE(ldata->no_room, flow && !room); } else overflow = 0; @@ -1731,7 +1744,7 @@ n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp, fp += n; count -= n; rcvd += n; - } + } while (!test_bit(TTY_LDISC_CHANGING, &tty->flags)); tty->receive_room = room; @@ -1745,6 +1758,17 @@ n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp, } else n_tty_check_throttle(tty); + if (unlikely(ldata->no_room)) { + /* + * Barrier here is to ensure to read the latest read_tail in + * chars_in_buffer() and to make sure that read_tail is not loaded + * before ldata->no_room is set. + */ + smp_mb(); + if (!chars_in_buffer(tty)) + n_tty_kick_worker(tty); + } + up_read(&tty->termios_rwsem); return rcvd; @@ -1960,11 +1984,12 @@ static int copy_from_read_buf(struct tty_struct *tty, n = min(head - ldata->read_tail, N_TTY_BUF_SIZE - tail); n = min(*nr, n); if (n) { - const unsigned char *from = read_buf_addr(ldata, tail); + unsigned char *from = read_buf_addr(ldata, tail); retval = copy_to_user(*b, from, n); n -= retval; is_eof = n == 1 && *from == EOF_CHAR(tty); tty_audit_add_data(tty, from, n); + zero_buffer(tty, from, n); smp_store_release(&ldata->read_tail, ldata->read_tail + n); /* Turn single EOF into zero-length read */ if (L_EXTPROC(tty) && ldata->icanon && is_eof && @@ -2197,7 +2222,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, break; if (!timeout) break; - if (file->f_flags & O_NONBLOCK) { + if (tty_io_nonblock(tty, file)) { retval = -EAGAIN; break; } @@ -2247,8 +2272,14 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, if (time) timeout = time; } - if (tail != ldata->read_tail) + if (tail != ldata->read_tail) { + /* + * Make sure no_room is not read in n_tty_kick_worker() + * before setting ldata->read_tail in copy_from_read_buf(). + */ + smp_mb(); n_tty_kick_worker(tty); + } up_read(&tty->termios_rwsem); remove_wait_queue(&tty->read_wait, &wait); @@ -2351,7 +2382,7 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file, } if (!nr) break; - if (file->f_flags & O_NONBLOCK) { + if (tty_io_nonblock(tty, file)) { retval = -EAGAIN; break; } diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index 678406e0948b227ab0188d938450b384e4dbae82..73226e482e91990e105a7b48689cf0a98149ab8a 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c @@ -28,6 +28,7 @@ #include #include #include +#include #undef TTY_DEBUG_HANGUP #ifdef TTY_DEBUG_HANGUP @@ -110,21 +111,11 @@ static void pty_unthrottle(struct tty_struct *tty) static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c) { struct tty_struct *to = tty->link; - unsigned long flags; - if (tty->stopped) + if (tty->stopped || !c) return 0; - if (c > 0) { - spin_lock_irqsave(&to->port->lock, flags); - /* Stuff the data into the input queue of the other end */ - c = tty_insert_flip_string(to->port, buf, c); - /* And shovel */ - if (c) - tty_flip_buffer_push(to->port); - spin_unlock_irqrestore(&to->port->lock, flags); - } - return c; + return tty_insert_flip_string_and_push_buffer(to->port, buf, c); } /** @@ -488,6 +479,7 @@ static int pty_bsd_ioctl(struct tty_struct *tty, return -ENOIOCTLCMD; } +#ifdef CONFIG_COMPAT static long pty_bsd_compat_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { @@ -495,8 +487,11 @@ static long pty_bsd_compat_ioctl(struct tty_struct *tty, * PTY ioctls don't require any special translation between 32-bit and * 64-bit userspace, they are already compatible. */ - return pty_bsd_ioctl(tty, cmd, arg); + return pty_bsd_ioctl(tty, cmd, (unsigned long)compat_ptr(arg)); } +#else +#define pty_bsd_compat_ioctl NULL +#endif static int legacy_count = CONFIG_LEGACY_PTY_COUNT; /* @@ -676,6 +671,7 @@ static int pty_unix98_ioctl(struct tty_struct *tty, return -ENOIOCTLCMD; } +#ifdef CONFIG_COMPAT static long pty_unix98_compat_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { @@ -683,8 +679,12 @@ static long pty_unix98_compat_ioctl(struct tty_struct *tty, * PTY ioctls don't require any special translation between 32-bit and * 64-bit userspace, they are already compatible. */ - return pty_unix98_ioctl(tty, cmd, arg); + return pty_unix98_ioctl(tty, cmd, + cmd == TIOCSIG ? arg : (unsigned long)compat_ptr(arg)); } +#else +#define pty_unix98_compat_ioctl NULL +#endif /** * ptm_unix98_lookup - find a pty master diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c index b121d8f8f3d7d1a9d1dfc4341d61d51a5253a3f1..27aeca30eeae16845644a6edaf2f53d798ca2609 100644 --- a/drivers/tty/rocket.c +++ b/drivers/tty/rocket.c @@ -266,7 +266,7 @@ MODULE_PARM_DESC(pc104_3, "set interface types for ISA(PC104) board #3 (e.g. pc1 module_param_array(pc104_4, ulong, NULL, 0); MODULE_PARM_DESC(pc104_4, "set interface types for ISA(PC104) board #4 (e.g. pc104_4=232,232,485,485,..."); -static int rp_init(void); +static int __init rp_init(void); static void rp_cleanup_module(void); module_init(rp_init); diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h index ebfb0bd5bef59d679c36cd0a2562145a11509f1a..8c8aa3b9c29894b556eab82e9c7c6555352b609f 100644 --- a/drivers/tty/serial/8250/8250.h +++ b/drivers/tty/serial/8250/8250.h @@ -217,6 +217,13 @@ extern int serial8250_rx_dma(struct uart_8250_port *); extern void serial8250_rx_dma_flush(struct uart_8250_port *); extern int serial8250_request_dma(struct uart_8250_port *); extern void serial8250_release_dma(struct uart_8250_port *); + +static inline bool serial8250_tx_dma_running(struct uart_8250_port *p) +{ + struct uart_8250_dma *dma = p->dma; + + return dma && dma->tx_running; +} #else static inline int serial8250_tx_dma(struct uart_8250_port *p) { @@ -232,6 +239,11 @@ static inline int serial8250_request_dma(struct uart_8250_port *p) return -1; } static inline void serial8250_release_dma(struct uart_8250_port *p) { } + +static inline bool serial8250_tx_dma_running(struct uart_8250_port *p) +{ + return false; +} #endif static inline int ns16550a_goto_highspeed(struct uart_8250_port *up) diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index 8fe3d0ed229ed27c9bdb53ade9c356aaed1abc85..66e0d5d7171bfad3252cfeb56dcb0625a31a1035 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -528,6 +528,7 @@ static void __init serial8250_isa_init_ports(void) */ up->mcr_mask = ~ALPHA_KLUDGE_MCR; up->mcr_force = ALPHA_KLUDGE_MCR; + serial8250_set_defaults(up); } /* chain base port ops to support Remote Supervisor Adapter */ @@ -551,7 +552,6 @@ static void __init serial8250_isa_init_ports(void) port->membase = old_serial_port[i].iomem_base; port->iotype = old_serial_port[i].io_type; port->regshift = old_serial_port[i].iomem_reg_shift; - serial8250_set_defaults(up); port->irqflags |= irqflag; if (serial8250_isa_config != NULL) @@ -946,6 +946,21 @@ static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port * return NULL; } +static void serial_8250_overrun_backoff_work(struct work_struct *work) +{ + struct uart_8250_port *up = + container_of(to_delayed_work(work), struct uart_8250_port, + overrun_backoff); + struct uart_port *port = &up->port; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + up->ier |= UART_IER_RLSI | UART_IER_RDI; + up->port.read_status_mask |= UART_LSR_DR; + serial_out(up, UART_IER, up->ier); + spin_unlock_irqrestore(&port->lock, flags); +} + /** * serial8250_register_8250_port - register a serial port * @up: serial port template @@ -1048,8 +1063,10 @@ int serial8250_register_8250_port(struct uart_8250_port *up) serial8250_apply_quirks(uart); ret = uart_add_one_port(&serial8250_reg, &uart->port); - if (ret == 0) - ret = uart->port.line; + if (ret) + goto err; + + ret = uart->port.line; } else { dev_info(uart->port.dev, "skipping CIR port at 0x%lx / 0x%llx, IRQ %d\n", @@ -1059,10 +1076,26 @@ int serial8250_register_8250_port(struct uart_8250_port *up) ret = 0; } + + /* Initialise interrupt backoff work if required */ + if (up->overrun_backoff_time_ms > 0) { + uart->overrun_backoff_time_ms = + up->overrun_backoff_time_ms; + INIT_DELAYED_WORK(&uart->overrun_backoff, + serial_8250_overrun_backoff_work); + } else { + uart->overrun_backoff_time_ms = 0; + } } + mutex_unlock(&serial_mutex); return ret; + +err: + uart->port.dev = NULL; + mutex_unlock(&serial_mutex); + return ret; } EXPORT_SYMBOL(serial8250_register_8250_port); @@ -1093,6 +1126,7 @@ void serial8250_unregister_port(int line) uart->port.type = PORT_UNKNOWN; uart->port.dev = &serial8250_isa_devs->dev; uart->capabilities = 0; + serial8250_init_port(uart); serial8250_apply_quirks(uart); uart_add_one_port(&serial8250_reg, &uart->port); } else { diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c index bfa1a857f3ffab6bd039e532c68fcf85c1afb0d4..c8b17070faf4e2e5e948c6c98d5b50893d9c612b 100644 --- a/drivers/tty/serial/8250/8250_dma.c +++ b/drivers/tty/serial/8250/8250_dma.c @@ -48,19 +48,39 @@ static void __dma_rx_complete(void *param) struct uart_8250_dma *dma = p->dma; struct tty_port *tty_port = &p->port.state->port; struct dma_tx_state state; + enum dma_status dma_status; int count; - dma->rx_running = 0; - dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state); + /* + * New DMA Rx can be started during the completion handler before it + * could acquire port's lock and it might still be ongoing. Don't to + * anything in such case. + */ + dma_status = dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state); + if (dma_status == DMA_IN_PROGRESS) + return; count = dma->rx_size - state.residue; tty_insert_flip_string(tty_port, dma->rx_buf, count); p->port.icount.rx += count; + dma->rx_running = 0; tty_flip_buffer_push(tty_port); } +static void dma_rx_complete(void *param) +{ + struct uart_8250_port *p = param; + struct uart_8250_dma *dma = p->dma; + unsigned long flags; + + spin_lock_irqsave(&p->port.lock, flags); + if (dma->rx_running) + __dma_rx_complete(p); + spin_unlock_irqrestore(&p->port.lock, flags); +} + int serial8250_tx_dma(struct uart_8250_port *p) { struct uart_8250_dma *dma = p->dma; @@ -126,7 +146,7 @@ int serial8250_rx_dma(struct uart_8250_port *p) return -EBUSY; dma->rx_running = 1; - desc->callback = __dma_rx_complete; + desc->callback = dma_rx_complete; desc->callback_param = p; dma->rx_cookie = dmaengine_submit(desc); diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index d31b975dd3fd7b7c4ac052390bca7884b2bff468..cc9d1f416db8426c889b3135df7a04722604eac5 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -140,12 +140,15 @@ static void dw8250_check_lcr(struct uart_port *p, int value) /* Returns once the transmitter is empty or we run out of retries */ static void dw8250_tx_wait_empty(struct uart_port *p) { + struct uart_8250_port *up = up_to_u8250p(p); unsigned int tries = 20000; unsigned int delay_threshold = tries - 1000; unsigned int lsr; while (tries--) { lsr = readb (p->membase + (UART_LSR << p->regshift)); + up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; + if (lsr & UART_LSR_TEMT) break; @@ -365,7 +368,7 @@ static bool dw8250_fallback_dma_filter(struct dma_chan *chan, void *param) static bool dw8250_idma_filter(struct dma_chan *chan, void *param) { - return param == chan->device->dev->parent; + return param == chan->device->dev; } /* @@ -434,7 +437,7 @@ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data) data->uart_16550_compatible = true; } - /* Platforms with iDMA */ + /* Platforms with iDMA 64-bit */ if (platform_get_resource_byname(to_platform_device(p->dev), IORESOURCE_MEM, "lpss_priv")) { data->dma.rx_param = p->dev->parent; @@ -769,7 +772,7 @@ static struct platform_driver dw8250_platform_driver = { .name = "dw-apb-uart", .pm = &dw8250_pm_ops, .of_match_table = dw8250_of_match, - .acpi_match_table = ACPI_PTR(dw8250_acpi_match), + .acpi_match_table = dw8250_acpi_match, }, .probe = dw8250_probe, .remove = dw8250_remove, diff --git a/drivers/tty/serial/8250/8250_dwlib.c b/drivers/tty/serial/8250/8250_dwlib.c new file mode 100644 index 0000000000000000000000000000000000000000..1cf229cca59281923fd78199889ec5369dc46e9f --- /dev/null +++ b/drivers/tty/serial/8250/8250_dwlib.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Synopsys DesignWare 8250 library. */ + +#include +#include +#include +#include +#include +#include + +#include "8250_dwlib.h" + +/* Offsets for the DesignWare specific registers */ +#define DW_UART_DLF 0xc0 /* Divisor Latch Fraction Register */ +#define DW_UART_CPR 0xf4 /* Component Parameter Register */ +#define DW_UART_UCV 0xf8 /* UART Component Version */ + +/* Component Parameter Register bits */ +#define DW_UART_CPR_ABP_DATA_WIDTH (3 << 0) +#define DW_UART_CPR_AFCE_MODE (1 << 4) +#define DW_UART_CPR_THRE_MODE (1 << 5) +#define DW_UART_CPR_SIR_MODE (1 << 6) +#define DW_UART_CPR_SIR_LP_MODE (1 << 7) +#define DW_UART_CPR_ADDITIONAL_FEATURES (1 << 8) +#define DW_UART_CPR_FIFO_ACCESS (1 << 9) +#define DW_UART_CPR_FIFO_STAT (1 << 10) +#define DW_UART_CPR_SHADOW (1 << 11) +#define DW_UART_CPR_ENCODED_PARMS (1 << 12) +#define DW_UART_CPR_DMA_EXTRA (1 << 13) +#define DW_UART_CPR_FIFO_MODE (0xff << 16) + +/* Helper for FIFO size calculation */ +#define DW_UART_CPR_FIFO_SIZE(a) (((a >> 16) & 0xff) * 16) + +static inline u32 dw8250_readl_ext(struct uart_port *p, int offset) +{ + if (p->iotype == UPIO_MEM32BE) + return ioread32be(p->membase + offset); + return readl(p->membase + offset); +} + +static inline void dw8250_writel_ext(struct uart_port *p, int offset, u32 reg) +{ + if (p->iotype == UPIO_MEM32BE) + iowrite32be(reg, p->membase + offset); + else + writel(reg, p->membase + offset); +} + +/* + * divisor = div(I) + div(F) + * "I" means integer, "F" means fractional + * quot = div(I) = clk / (16 * baud) + * frac = div(F) * 2^dlf_size + * + * let rem = clk % (16 * baud) + * we have: div(F) * (16 * baud) = rem + * so frac = 2^dlf_size * rem / (16 * baud) = (rem << dlf_size) / (16 * baud) + */ +static unsigned int dw8250_get_divisor(struct uart_port *p, unsigned int baud, + unsigned int *frac) +{ + unsigned int quot, rem, base_baud = baud * 16; + struct dw8250_port_data *d = p->private_data; + + quot = p->uartclk / base_baud; + rem = p->uartclk % base_baud; + *frac = DIV_ROUND_CLOSEST(rem << d->dlf_size, base_baud); + + return quot; +} + +static void dw8250_set_divisor(struct uart_port *p, unsigned int baud, + unsigned int quot, unsigned int quot_frac) +{ + dw8250_writel_ext(p, DW_UART_DLF, quot_frac); + serial8250_do_set_divisor(p, baud, quot, quot_frac); +} + +void dw8250_setup_port(struct uart_port *p) +{ + struct uart_8250_port *up = up_to_u8250p(p); + u32 reg, old_dlf; + + /* + * If the Component Version Register returns zero, we know that + * ADDITIONAL_FEATURES are not enabled. No need to go any further. + */ + reg = dw8250_readl_ext(p, DW_UART_UCV); + if (!reg) + return; + + dev_dbg(p->dev, "Designware UART version %c.%c%c\n", + (reg >> 24) & 0xff, (reg >> 16) & 0xff, (reg >> 8) & 0xff); + + /* Preserve value written by firmware or bootloader */ + old_dlf = dw8250_readl_ext(p, DW_UART_DLF); + dw8250_writel_ext(p, DW_UART_DLF, ~0U); + reg = dw8250_readl_ext(p, DW_UART_DLF); + dw8250_writel_ext(p, DW_UART_DLF, old_dlf); + + if (reg) { + struct dw8250_port_data *d = p->private_data; + + d->dlf_size = fls(reg); + p->get_divisor = dw8250_get_divisor; + p->set_divisor = dw8250_set_divisor; + } + + reg = dw8250_readl_ext(p, DW_UART_CPR); + if (!reg) + return; + + /* Select the type based on FIFO */ + if (reg & DW_UART_CPR_FIFO_MODE) { + p->type = PORT_16550A; + p->flags |= UPF_FIXED_TYPE; + p->fifosize = DW_UART_CPR_FIFO_SIZE(reg); + up->capabilities = UART_CAP_FIFO; + } + + if (reg & DW_UART_CPR_AFCE_MODE) + up->capabilities |= UART_CAP_AFE; + + if (reg & DW_UART_CPR_SIR_MODE) + up->capabilities |= UART_CAP_IRDA; +} +EXPORT_SYMBOL_GPL(dw8250_setup_port); diff --git a/drivers/tty/serial/8250/8250_dwlib.h b/drivers/tty/serial/8250/8250_dwlib.h new file mode 100644 index 0000000000000000000000000000000000000000..87a4db2a8aba6499bebe9a43fe2c0a416aa31986 --- /dev/null +++ b/drivers/tty/serial/8250/8250_dwlib.h @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Synopsys DesignWare 8250 library header file. */ + +#include + +#include "8250.h" + +struct dw8250_port_data { + /* Port properties */ + int line; + + /* DMA operations */ + struct uart_8250_dma dma; + + /* Hardware configuration */ + u8 dlf_size; +}; + +void dw8250_setup_port(struct uart_port *p); diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c index 0089aa305ef9f931dcba31f4828f299241dea4dc..c1444b1400159d829e344651de3c00d8af11dfe1 100644 --- a/drivers/tty/serial/8250/8250_exar.c +++ b/drivers/tty/serial/8250/8250_exar.c @@ -535,6 +535,7 @@ static void exar_pci_remove(struct pci_dev *pcidev) for (i = 0; i < priv->nr; i++) serial8250_unregister_port(priv->line[i]); + /* Ensure that every init quirk is properly torn down */ if (priv->board->exit) priv->board->exit(pcidev); } @@ -549,10 +550,6 @@ static int __maybe_unused exar_suspend(struct device *dev) if (priv->line[i] >= 0) serial8250_suspend_port(priv->line[i]); - /* Ensure that every init quirk is properly torn down */ - if (priv->board->exit) - priv->board->exit(pcidev); - return 0; } @@ -604,6 +601,24 @@ static const struct exar8250_board pbn_exar_XR17V35x = { .exit = pci_xr17v35x_exit, }; +static const struct exar8250_board pbn_fastcom35x_2 = { + .num_ports = 2, + .setup = pci_xr17v35x_setup, + .exit = pci_xr17v35x_exit, +}; + +static const struct exar8250_board pbn_fastcom35x_4 = { + .num_ports = 4, + .setup = pci_xr17v35x_setup, + .exit = pci_xr17v35x_exit, +}; + +static const struct exar8250_board pbn_fastcom35x_8 = { + .num_ports = 8, + .setup = pci_xr17v35x_setup, + .exit = pci_xr17v35x_exit, +}; + static const struct exar8250_board pbn_exar_XR17V4358 = { .num_ports = 12, .setup = pci_xr17v35x_setup, @@ -665,9 +680,9 @@ static const struct pci_device_id exar_pci_tbl[] = { EXAR_DEVICE(EXAR, EXAR_XR17V358, pbn_exar_XR17V35x), EXAR_DEVICE(EXAR, EXAR_XR17V4358, pbn_exar_XR17V4358), EXAR_DEVICE(EXAR, EXAR_XR17V8358, pbn_exar_XR17V8358), - EXAR_DEVICE(COMMTECH, COMMTECH_4222PCIE, pbn_exar_XR17V35x), - EXAR_DEVICE(COMMTECH, COMMTECH_4224PCIE, pbn_exar_XR17V35x), - EXAR_DEVICE(COMMTECH, COMMTECH_4228PCIE, pbn_exar_XR17V35x), + EXAR_DEVICE(COMMTECH, COMMTECH_4222PCIE, pbn_fastcom35x_2), + EXAR_DEVICE(COMMTECH, COMMTECH_4224PCIE, pbn_fastcom35x_4), + EXAR_DEVICE(COMMTECH, COMMTECH_4228PCIE, pbn_fastcom35x_8), EXAR_DEVICE(COMMTECH, COMMTECH_4222PCI335, pbn_fastcom335_2), EXAR_DEVICE(COMMTECH, COMMTECH_4224PCI335, pbn_fastcom335_4), diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c index 79a4958b3f5c8a1d6c34bd44996a199494d2a66d..440023069f4f3c99116160908e6f4e0589d65069 100644 --- a/drivers/tty/serial/8250/8250_fintek.c +++ b/drivers/tty/serial/8250/8250_fintek.c @@ -197,12 +197,12 @@ static int fintek_8250_rs485_config(struct uart_port *port, if (!pdata) return -EINVAL; - /* Hardware do not support same RTS level on send and receive */ - if (!(rs485->flags & SER_RS485_RTS_ON_SEND) == - !(rs485->flags & SER_RS485_RTS_AFTER_SEND)) - return -EINVAL; if (rs485->flags & SER_RS485_ENABLED) { + /* Hardware do not support same RTS level on send and receive */ + if (!(rs485->flags & SER_RS485_RTS_ON_SEND) == + !(rs485->flags & SER_RS485_RTS_AFTER_SEND)) + return -EINVAL; memset(rs485->padding, 0, sizeof(rs485->padding)); config |= RS485_URA; } else { diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c index 6640a4c7ddd1dd5e10f96049249ed97595a365c0..bb9571eed275d10cc51b7872a5f203d57a8e5f80 100644 --- a/drivers/tty/serial/8250/8250_fsl.c +++ b/drivers/tty/serial/8250/8250_fsl.c @@ -45,8 +45,29 @@ int fsl8250_handle_irq(struct uart_port *port) lsr = orig_lsr = up->port.serial_in(&up->port, UART_LSR); - if (lsr & (UART_LSR_DR | UART_LSR_BI)) + /* Process incoming characters first */ + if ((lsr & (UART_LSR_DR | UART_LSR_BI)) && + (up->ier & (UART_IER_RLSI | UART_IER_RDI))) { lsr = serial8250_rx_chars(up, lsr); + } + + /* Stop processing interrupts on input overrun */ + if ((orig_lsr & UART_LSR_OE) && (up->overrun_backoff_time_ms > 0)) { + unsigned long delay; + + up->ier = port->serial_in(port, UART_IER); + if (up->ier & (UART_IER_RLSI | UART_IER_RDI)) { + port->ops->stop_rx(port); + } else { + /* Keep restarting the timer until + * the input overrun subsides. + */ + cancel_delayed_work(&up->overrun_backoff); + } + + delay = msecs_to_jiffies(up->overrun_backoff_time_ms); + schedule_delayed_work(&up->overrun_backoff, delay); + } serial8250_modem_status(up); diff --git a/drivers/tty/serial/8250/8250_gsc.c b/drivers/tty/serial/8250/8250_gsc.c index 0809ae2aa9b141f4ef91b60518c7a566b09d8409..51cc985216ff3d96703346e440e50ce436b199c4 100644 --- a/drivers/tty/serial/8250/8250_gsc.c +++ b/drivers/tty/serial/8250/8250_gsc.c @@ -26,7 +26,7 @@ static int __init serial_init_chip(struct parisc_device *dev) unsigned long address; int err; -#ifdef CONFIG_64BIT +#if defined(CONFIG_64BIT) && defined(CONFIG_IOSAPIC) if (!dev->irq && (dev->id.sversion == 0xad)) dev->irq = iosapic_serial_irq(dev); #endif diff --git a/drivers/tty/serial/8250/8250_men_mcb.c b/drivers/tty/serial/8250/8250_men_mcb.c index 127017cc41d9296042b82acf2026ceb7d84fcf89..057b1eaf6d2ebfddfcf8b5bcfc7bc8c929c09dc6 100644 --- a/drivers/tty/serial/8250/8250_men_mcb.c +++ b/drivers/tty/serial/8250/8250_men_mcb.c @@ -71,8 +71,8 @@ static int serial_8250_men_mcb_probe(struct mcb_device *mdev, { struct serial_8250_men_mcb_data *data; struct resource *mem; - unsigned int num_ports; - unsigned int i; + int num_ports; + int i; void __iomem *membase; mem = mcb_get_resource(mdev, IORESOURCE_MEM); @@ -87,7 +87,7 @@ static int serial_8250_men_mcb_probe(struct mcb_device *mdev, dev_dbg(&mdev->dev, "found a 16z%03u with %u ports\n", mdev->id, num_ports); - if (num_ports == 0 || num_ports > 4) { + if (num_ports <= 0 || num_ports > 4) { dev_err(&mdev->dev, "unexpected number of ports: %u\n", num_ports); return -ENODEV; @@ -132,7 +132,7 @@ static int serial_8250_men_mcb_probe(struct mcb_device *mdev, static void serial_8250_men_mcb_remove(struct mcb_device *mdev) { - unsigned int num_ports, i; + int num_ports, i; struct serial_8250_men_mcb_data *data = mcb_get_drvdata(mdev); if (!data) diff --git a/drivers/tty/serial/8250/8250_mid.c b/drivers/tty/serial/8250/8250_mid.c index efa0515139f8ec052486a414f614519780ff96d3..e6c1791609ddf339427212a857eddcb1f98d8a10 100644 --- a/drivers/tty/serial/8250/8250_mid.c +++ b/drivers/tty/serial/8250/8250_mid.c @@ -73,6 +73,11 @@ static int pnw_setup(struct mid8250 *mid, struct uart_port *p) return 0; } +static void pnw_exit(struct mid8250 *mid) +{ + pci_dev_put(mid->dma_dev); +} + static int tng_handle_irq(struct uart_port *p) { struct mid8250 *mid = p->private_data; @@ -124,6 +129,11 @@ static int tng_setup(struct mid8250 *mid, struct uart_port *p) return 0; } +static void tng_exit(struct mid8250 *mid) +{ + pci_dev_put(mid->dma_dev); +} + static int dnv_handle_irq(struct uart_port *p) { struct mid8250 *mid = p->private_data; @@ -330,9 +340,9 @@ static int mid8250_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_drvdata(pdev, mid); return 0; + err: - if (mid->board->exit) - mid->board->exit(mid); + mid->board->exit(mid); return ret; } @@ -342,8 +352,7 @@ static void mid8250_remove(struct pci_dev *pdev) serial8250_unregister_port(mid->line); - if (mid->board->exit) - mid->board->exit(mid); + mid->board->exit(mid); } static const struct mid8250_board pnw_board = { @@ -351,6 +360,7 @@ static const struct mid8250_board pnw_board = { .freq = 50000000, .base_baud = 115200, .setup = pnw_setup, + .exit = pnw_exit, }; static const struct mid8250_board tng_board = { @@ -358,6 +368,7 @@ static const struct mid8250_board tng_board = { .freq = 38400000, .base_baud = 1843200, .setup = tng_setup, + .exit = tng_exit, }; static const struct mid8250_board dnv_board = { diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c index dd5e1cede2b5847979965aa217c29028c36f89cd..c3f933d10295eba70bc8cff4a501333109c1abeb 100644 --- a/drivers/tty/serial/8250/8250_mtk.c +++ b/drivers/tty/serial/8250/8250_mtk.c @@ -213,17 +213,17 @@ static int mtk8250_probe(struct platform_device *pdev) platform_set_drvdata(pdev, data); - pm_runtime_enable(&pdev->dev); - if (!pm_runtime_enabled(&pdev->dev)) { - err = mtk8250_runtime_resume(&pdev->dev); - if (err) - return err; - } + err = mtk8250_runtime_resume(&pdev->dev); + if (err) + return err; data->line = serial8250_register_8250_port(&uart); if (data->line < 0) return data->line; + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + return 0; } @@ -234,13 +234,11 @@ static int mtk8250_remove(struct platform_device *pdev) pm_runtime_get_sync(&pdev->dev); serial8250_unregister_port(data->line); + mtk8250_runtime_suspend(&pdev->dev); pm_runtime_disable(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); - if (!pm_runtime_status_suspended(&pdev->dev)) - mtk8250_runtime_suspend(&pdev->dev); - return 0; } diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c index af8beefe9b5c30b1153ab3ac769e73faecb47ee3..dcd2105e2b60da20c8420ba13af8c9b55de37047 100644 --- a/drivers/tty/serial/8250/8250_of.c +++ b/drivers/tty/serial/8250/8250_of.c @@ -58,7 +58,7 @@ static int of_platform_serial_setup(struct platform_device *ofdev, struct resource resource; struct device_node *np = ofdev->dev.of_node; u32 clk, spd, prop; - int ret; + int ret, irq; memset(port, 0, sizeof *port); @@ -104,8 +104,17 @@ static int of_platform_serial_setup(struct platform_device *ofdev, port->mapsize = resource_size(&resource); /* Check for shifted address mapping */ - if (of_property_read_u32(np, "reg-offset", &prop) == 0) + if (of_property_read_u32(np, "reg-offset", &prop) == 0) { + if (prop >= port->mapsize) { + dev_warn(&ofdev->dev, "reg-offset %u exceeds region size %pa\n", + prop, &port->mapsize); + ret = -EINVAL; + goto err_unprepare; + } + port->mapbase += prop; + port->mapsize -= prop; + } port->iotype = UPIO_MEM; if (of_property_read_u32(np, "reg-io-width", &prop) == 0) { @@ -130,6 +139,10 @@ static int of_platform_serial_setup(struct platform_device *ofdev, port->flags |= UPF_IOREMAP; } + /* Compatibility with the deprecated pxa driver and 8250_pxa drivers. */ + if (of_device_is_compatible(np, "mrvl,mmp-uart")) + port->regshift = 2; + /* Check for registers offset within the devices address range */ if (of_property_read_u32(np, "reg-shift", &prop) == 0) port->regshift = prop; @@ -143,21 +156,27 @@ static int of_platform_serial_setup(struct platform_device *ofdev, if (ret >= 0) port->line = ret; - port->irq = irq_of_parse_and_map(np, 0); - if (!port->irq) { - ret = -EPROBE_DEFER; - goto err_unprepare; + irq = of_irq_get(np, 0); + if (irq < 0) { + if (irq == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto err_unprepare; + } + /* IRQ support not mandatory */ + irq = 0; } + port->irq = irq; + info->rst = devm_reset_control_get_optional_shared(&ofdev->dev, NULL); if (IS_ERR(info->rst)) { ret = PTR_ERR(info->rst); - goto err_dispose; + goto err_unprepare; } ret = reset_control_deassert(info->rst); if (ret) - goto err_dispose; + goto err_unprepare; port->type = type; port->uartclk = clk; @@ -184,8 +203,6 @@ static int of_platform_serial_setup(struct platform_device *ofdev, port->handle_irq = fsl8250_handle_irq; return 0; -err_dispose: - irq_dispose_mapping(port->irq); err_unprepare: clk_disable_unprepare(info->clk); err_pmruntime: @@ -236,6 +253,11 @@ static int of_platform_serial_probe(struct platform_device *ofdev) if (of_property_read_bool(ofdev->dev.of_node, "auto-flow-control")) port8250.capabilities |= UART_CAP_AFE; + if (of_property_read_u32(ofdev->dev.of_node, + "overrun-throttle-ms", + &port8250.overrun_backoff_time_ms) != 0) + port8250.overrun_backoff_time_ms = 0; + ret = serial8250_register_8250_port(&port8250); if (ret < 0) goto err_dispose; diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index a019286f8bb65c5673285bdd5ad06c7b0a63aae8..cbd006fb7fbb986bde3407be6e2e02b8bb500e4f 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -781,7 +781,10 @@ static void __dma_rx_do_complete(struct uart_8250_port *p) dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state); count = dma->rx_size - state.residue; - + if (count < dma->rx_size) + dmaengine_terminate_async(dma->rxchan); + if (!count) + goto unlock; ret = tty_insert_flip_string(tty_port, dma->rx_buf, count); p->port.icount.rx += ret; @@ -843,7 +846,6 @@ static void omap_8250_rx_dma_flush(struct uart_8250_port *p) spin_unlock_irqrestore(&priv->rx_dma_lock, flags); __dma_rx_do_complete(p); - dmaengine_terminate_all(dma->rxchan); } static int omap_8250_rx_dma(struct uart_8250_port *p) @@ -1227,11 +1229,11 @@ static int omap8250_probe(struct platform_device *pdev) spin_lock_init(&priv->rx_dma_lock); device_init_wakeup(&pdev->dev, true); + pm_runtime_enable(&pdev->dev); pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_set_autosuspend_delay(&pdev->dev, -1); pm_runtime_irq_safe(&pdev->dev); - pm_runtime_enable(&pdev->dev); pm_runtime_get_sync(&pdev->dev); diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index f80a300b5d68f6e8ad61b7daf2544234da7e1662..e44461bc987ca3e31fbbf424e4c280a860dfaf1d 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -2027,6 +2027,111 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = { .setup = pci_default_setup, .exit = pci_plx9050_exit, }, + { + .vendor = PCI_VENDOR_ID_ACCESIO, + .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_pericom_setup, + }, + { + .vendor = PCI_VENDOR_ID_ACCESIO, + .device = PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_pericom_setup, + }, + { + .vendor = PCI_VENDOR_ID_ACCESIO, + .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_pericom_setup, + }, + { + .vendor = PCI_VENDOR_ID_ACCESIO, + .device = PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_pericom_setup, + }, + { + .vendor = PCI_VENDOR_ID_ACCESIO, + .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_pericom_setup, + }, + { + .vendor = PCI_VENDOR_ID_ACCESIO, + .device = PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_pericom_setup, + }, + { + .vendor = PCI_VENDOR_ID_ACCESIO, + .device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_pericom_setup, + }, + { + .vendor = PCI_VENDOR_ID_ACCESIO, + .device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_pericom_setup, + }, + { + .vendor = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S, + .device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_pericom_setup, + }, + { + .vendor = PCI_VENDOR_ID_ACCESIO, + .device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_pericom_setup, + }, + { + .vendor = PCI_VENDOR_ID_ACCESIO, + .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_pericom_setup, + }, + { + .vendor = PCI_VENDOR_ID_ACCESIO, + .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_pericom_setup, + }, + { + .vendor = PCI_VENDOR_ID_ACCESIO, + .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_pericom_setup, + }, + { + .vendor = PCI_VENDOR_ID_ACCESIO, + .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_pericom_setup, + }, + { + .vendor = PCI_VENDOR_ID_ACCESIO, + .device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_pericom_setup, + }, /* * SBS Technologies, Inc., PMC-OCTALPRO 232 */ @@ -2557,7 +2662,7 @@ enum pci_board_num_t { pbn_panacom2, pbn_panacom4, pbn_plx_romulus, - pbn_endrun_2_4000000, + pbn_endrun_2_3906250, pbn_oxsemi, pbn_oxsemi_1_4000000, pbn_oxsemi_2_4000000, @@ -3073,10 +3178,10 @@ static struct pciserial_board pci_boards[] = { * signal now many ports are available * 2 port 952 Uart support */ - [pbn_endrun_2_4000000] = { + [pbn_endrun_2_3906250] = { .flags = FL_BASE0, .num_ports = 2, - .base_baud = 4000000, + .base_baud = 3906250, .uart_offset = 0x200, .first_offset = 0x1000, }, @@ -3420,6 +3525,11 @@ static int serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board) { int num_iomem, num_port, first_port = -1, i; + int rc; + + rc = serial_pci_is_class_communication(dev); + if (rc) + return rc; /* * Should we try to make guesses for multiport serial devices later? @@ -3647,10 +3757,6 @@ pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent) board = &pci_boards[ent->driver_data]; - rc = serial_pci_is_class_communication(dev); - if (rc) - return rc; - rc = serial_pci_is_blacklisted(dev); if (rc) return rc; @@ -3928,7 +4034,7 @@ static const struct pci_device_id serial_pci_tbl[] = { */ { PCI_VENDOR_ID_ENDRUN, PCI_DEVICE_ID_ENDRUN_1588, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_endrun_2_4000000 }, + pbn_endrun_2_3906250 }, /* * Quatech cards. These actually have configurable clocks but for * now we just use the default. @@ -4574,10 +4680,10 @@ static const struct pci_device_id serial_pci_tbl[] = { */ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_pericom_PI7C9X7954 }, + pbn_pericom_PI7C9X7952 }, { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_pericom_PI7C9X7954 }, + pbn_pericom_PI7C9X7952 }, { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_pericom_PI7C9X7954 }, @@ -4586,10 +4692,10 @@ static const struct pci_device_id serial_pci_tbl[] = { pbn_pericom_PI7C9X7954 }, { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_2DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_pericom_PI7C9X7954 }, + pbn_pericom_PI7C9X7952 }, { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_pericom_PI7C9X7954 }, + pbn_pericom_PI7C9X7952 }, { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_pericom_PI7C9X7954 }, @@ -4598,10 +4704,10 @@ static const struct pci_device_id serial_pci_tbl[] = { pbn_pericom_PI7C9X7954 }, { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SMDB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_pericom_PI7C9X7954 }, + pbn_pericom_PI7C9X7952 }, { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2SM, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_pericom_PI7C9X7954 }, + pbn_pericom_PI7C9X7952 }, { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_pericom_PI7C9X7954 }, @@ -4610,13 +4716,13 @@ static const struct pci_device_id serial_pci_tbl[] = { pbn_pericom_PI7C9X7954 }, { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_pericom_PI7C9X7954 }, + pbn_pericom_PI7C9X7951 }, { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_pericom_PI7C9X7954 }, + pbn_pericom_PI7C9X7952 }, { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_pericom_PI7C9X7954 }, + pbn_pericom_PI7C9X7952 }, { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_pericom_PI7C9X7954 }, @@ -4625,16 +4731,16 @@ static const struct pci_device_id serial_pci_tbl[] = { pbn_pericom_PI7C9X7954 }, { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2S, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_pericom_PI7C9X7954 }, + pbn_pericom_PI7C9X7952 }, { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_pericom_PI7C9X7954 }, { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_pericom_PI7C9X7954 }, + pbn_pericom_PI7C9X7952 }, { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_pericom_PI7C9X7954 }, + pbn_pericom_PI7C9X7952 }, { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_pericom_PI7C9X7954 }, @@ -4643,13 +4749,13 @@ static const struct pci_device_id serial_pci_tbl[] = { pbn_pericom_PI7C9X7954 }, { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2SM, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_pericom_PI7C9X7954 }, + pbn_pericom_PI7C9X7952 }, { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_pericom_PI7C9X7958 }, + pbn_pericom_PI7C9X7954 }, { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_pericom_PI7C9X7958 }, + pbn_pericom_PI7C9X7954 }, { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_pericom_PI7C9X7958 }, @@ -4658,19 +4764,19 @@ static const struct pci_device_id serial_pci_tbl[] = { pbn_pericom_PI7C9X7958 }, { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_pericom_PI7C9X7958 }, + pbn_pericom_PI7C9X7954 }, { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_pericom_PI7C9X7958 }, { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_pericom_PI7C9X7958 }, + pbn_pericom_PI7C9X7954 }, { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_pericom_PI7C9X7958 }, { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_pericom_PI7C9X7958 }, + pbn_pericom_PI7C9X7954 }, /* * Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke) */ diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 3f779d25ec0cdfa10575b153b62dc34c0c5b218b..f920bfa1645284d342d0233da24ba4d689c9bcb6 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -313,7 +313,11 @@ static const struct serial8250_config uart_config[] = { /* Uart divisor latch read */ static int default_serial_dl_read(struct uart_8250_port *up) { - return serial_in(up, UART_DLL) | serial_in(up, UART_DLM) << 8; + /* Assign these in pieces to truncate any bits above 7. */ + unsigned char dll = serial_in(up, UART_DLL); + unsigned char dlm = serial_in(up, UART_DLM); + + return dll | dlm << 8; } /* Uart divisor latch write */ @@ -1301,9 +1305,11 @@ static void autoconfig(struct uart_8250_port *up) serial_out(up, UART_LCR, 0); serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO); - scratch = serial_in(up, UART_IIR) >> 6; - switch (scratch) { + /* Assign this as it is to truncate any bits above 7. */ + scratch = serial_in(up, UART_IIR); + + switch (scratch >> 6) { case 0: autoconfig_8250(up); break; @@ -1515,6 +1521,8 @@ static inline void __stop_tx(struct uart_8250_port *p) if (em485) { unsigned char lsr = serial_in(p, UART_LSR); + p->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; + /* * To provide required timeing and allow FIFO transfer, * __stop_tx_rs485() must be called only when both FIFO and @@ -1587,6 +1595,18 @@ static inline void start_tx_rs485(struct uart_port *port) if (!(up->port.rs485.flags & SER_RS485_RX_DURING_TX)) serial8250_stop_rx(&up->port); + /* + * While serial8250_em485_handle_stop_tx() is a noop if + * em485->active_timer != &em485->stop_tx_timer, it might happen that + * the timer is still armed and triggers only after the current bunch of + * chars is send and em485->active_timer == &em485->stop_tx_timer again. + * So cancel the timer. There is still a theoretical race condition if + * the timer is already running and only comes around to check for + * em485->active_timer when &em485->stop_tx_timer is armed again. + */ + if (em485->active_timer == &em485->stop_tx_timer) + hrtimer_try_to_cancel(&em485->stop_tx_timer); + em485->active_timer = NULL; mcr = serial8250_in_MCR(up); @@ -1844,10 +1864,13 @@ EXPORT_SYMBOL_GPL(serial8250_modem_status); static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir) { switch (iir & 0x3f) { - case UART_IIR_RX_TIMEOUT: - serial8250_rx_dma_flush(up); + case UART_IIR_RDI: + if (!up->dma->rx_running) + break; /* fall-through */ case UART_IIR_RLSI: + case UART_IIR_RX_TIMEOUT: + serial8250_rx_dma_flush(up); return true; } return up->dma->rx_dma(up); @@ -1861,6 +1884,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir) unsigned char status; unsigned long flags; struct uart_8250_port *up = up_to_u8250p(port); + bool skip_rx = false; if (iir & UART_IIR_NO_INT) return 0; @@ -1869,13 +1893,26 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir) status = serial_port_in(port, UART_LSR); - if (status & (UART_LSR_DR | UART_LSR_BI) && - iir & UART_IIR_RDI) { + /* + * If port is stopped and there are no error conditions in the + * FIFO, then don't drain the FIFO, as this may lead to TTY buffer + * overflow. Not servicing, RX FIFO would trigger auto HW flow + * control when FIFO occupancy reaches preset threshold, thus + * halting RX. This only works when auto HW flow control is + * available. + */ + if (!(status & (UART_LSR_FIFOE | UART_LSR_BRK_ERROR_BITS)) && + (port->status & (UPSTAT_AUTOCTS | UPSTAT_AUTORTS)) && + !(port->read_status_mask & UART_LSR_DR)) + skip_rx = true; + + if (status & (UART_LSR_DR | UART_LSR_BI) && !skip_rx) { if (!up->dma || handle_rx_dma(up, iir)) status = serial8250_rx_chars(up, status); } serial8250_modem_status(up); - if ((!up->dma || up->dma->tx_err) && (status & UART_LSR_THRE)) + if ((!up->dma || up->dma->tx_err) && (status & UART_LSR_THRE) && + (up->ier & UART_IER_THRI)) serial8250_tx_chars(up); spin_unlock_irqrestore(&port->lock, flags); @@ -1925,19 +1962,25 @@ static int serial8250_tx_threshold_handle_irq(struct uart_port *port) static unsigned int serial8250_tx_empty(struct uart_port *port) { struct uart_8250_port *up = up_to_u8250p(port); + unsigned int result = 0; unsigned long flags; unsigned int lsr; serial8250_rpm_get(up); spin_lock_irqsave(&port->lock, flags); - lsr = serial_port_in(port, UART_LSR); - up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; + if (!serial8250_tx_dma_running(up)) { + lsr = serial_port_in(port, UART_LSR); + up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; + + if ((lsr & BOTH_EMPTY) == BOTH_EMPTY) + result = TIOCSER_TEMT; + } spin_unlock_irqrestore(&port->lock, flags); serial8250_rpm_put(up); - return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0; + return result; } unsigned int serial8250_do_get_mctrl(struct uart_port *port) @@ -2255,6 +2298,10 @@ int serial8250_do_startup(struct uart_port *port) if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) { unsigned char iir1; + + if (port->irqflags & IRQF_SHARED) + disable_irq_nosync(port->irq); + /* * Test for UARTs that do not reassert THRE when the * transmitter is idle and the interrupt has already @@ -2264,8 +2311,6 @@ int serial8250_do_startup(struct uart_port *port) * allow register changes to become visible. */ spin_lock_irqsave(&port->lock, flags); - if (up->port.irqflags & IRQF_SHARED) - disable_irq_nosync(port->irq); wait_for_xmitr(up, UART_LSR_THRE); serial_port_out_sync(port, UART_IER, UART_IER_THRI); @@ -2277,9 +2322,10 @@ int serial8250_do_startup(struct uart_port *port) iir = serial_port_in(port, UART_IIR); serial_port_out(port, UART_IER, 0); + spin_unlock_irqrestore(&port->lock, flags); + if (port->irqflags & IRQF_SHARED) enable_irq(port->irq); - spin_unlock_irqrestore(&port->lock, flags); /* * If the interrupt is not reasserted, or we otherwise @@ -2624,6 +2670,8 @@ static unsigned int serial8250_get_baud_rate(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { + unsigned int tolerance = port->uartclk / 100; + /* * Ask the core to calculate the divisor for us. * Allow 1% tolerance at the upper limit so uart clks marginally @@ -2632,7 +2680,7 @@ static unsigned int serial8250_get_baud_rate(struct uart_port *port, */ return uart_get_baud_rate(port, termios, old, port->uartclk / 16 / UART_DIV_MAX, - port->uartclk); + (port->uartclk + tolerance) / 16); } void @@ -2860,8 +2908,10 @@ static int serial8250_request_std_resource(struct uart_8250_port *up) case UPIO_MEM32BE: case UPIO_MEM16: case UPIO_MEM: - if (!port->mapbase) + if (!port->mapbase) { + ret = -EINVAL; break; + } if (!request_mem_region(port->mapbase, size, "serial")) { ret = -EBUSY; @@ -3157,6 +3207,7 @@ void serial8250_init_port(struct uart_8250_port *up) struct uart_port *port = &up->port; spin_lock_init(&port->lock); + port->pm = NULL; port->ops = &serial8250_pops; up->cur_iotype = 0xFF; @@ -3218,7 +3269,7 @@ static void serial8250_console_restore(struct uart_8250_port *up) serial8250_set_divisor(port, baud, quot, frac); serial_port_out(port, UART_LCR, up->lcr); - serial8250_out_MCR(up, UART_MCR_DTR | UART_MCR_RTS); + serial8250_out_MCR(up, up->mcr | UART_MCR_DTR | UART_MCR_RTS); } /* diff --git a/drivers/tty/serial/8250/8250_pxa.c b/drivers/tty/serial/8250/8250_pxa.c index b9bcbe20a2be173af6a8a35e0fc2eaa39ae843b2..c47188860e32577f1833d3711cfabd28227c25c4 100644 --- a/drivers/tty/serial/8250/8250_pxa.c +++ b/drivers/tty/serial/8250/8250_pxa.c @@ -113,6 +113,10 @@ static int serial_pxa_probe(struct platform_device *pdev) if (ret) return ret; + ret = of_alias_get_id(pdev->dev.of_node, "serial"); + if (ret >= 0) + uart.port.line = ret; + uart.port.type = PORT_XSCALE; uart.port.iotype = UPIO_MEM32; uart.port.mapbase = mmres->start; diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig index f005eaf8bc57aab704adffc21258f8ee5e8cabbf..42b6ae1e71f8ce30184c7947a0abf8920299ade9 100644 --- a/drivers/tty/serial/8250/Kconfig +++ b/drivers/tty/serial/8250/Kconfig @@ -312,6 +312,9 @@ config SERIAL_8250_RSA If you don't have such card, or if unsure, say N. +config SERIAL_8250_DWLIB + bool + config SERIAL_8250_ACORN tristate "Acorn expansion card serial port support" depends on ARCH_ACORN && SERIAL_8250 diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile index 18751bc63a848ec6c806415eefd3c4088df06c3e..9b451d81588b27d61e03ae962e814fc610f01028 100644 --- a/drivers/tty/serial/8250/Makefile +++ b/drivers/tty/serial/8250/Makefile @@ -8,6 +8,7 @@ obj-$(CONFIG_SERIAL_8250) += 8250.o 8250_base.o 8250-$(CONFIG_SERIAL_8250_PNP) += 8250_pnp.o 8250_base-y := 8250_port.o 8250_base-$(CONFIG_SERIAL_8250_DMA) += 8250_dma.o +8250_base-$(CONFIG_SERIAL_8250_DWLIB) += 8250_dwlib.o 8250_base-$(CONFIG_SERIAL_8250_FINTEK) += 8250_fintek.o obj-$(CONFIG_SERIAL_8250_GSC) += 8250_gsc.o obj-$(CONFIG_SERIAL_8250_PCI) += 8250_pci.o diff --git a/drivers/tty/serial/8250/serial_cs.c b/drivers/tty/serial/8250/serial_cs.c index c8186a05a453c011d049ac1f51aca2ec66c0cfc4..271c0388e00d8e3e5b71430711a49e03ff059ad2 100644 --- a/drivers/tty/serial/8250/serial_cs.c +++ b/drivers/tty/serial/8250/serial_cs.c @@ -306,6 +306,7 @@ static int serial_resume(struct pcmcia_device *link) static int serial_probe(struct pcmcia_device *link) { struct serial_info *info; + int ret; dev_dbg(&link->dev, "serial_attach()\n"); @@ -320,7 +321,15 @@ static int serial_probe(struct pcmcia_device *link) if (do_sound) link->config_flags |= CONF_ENABLE_SPKR; - return serial_config(link); + ret = serial_config(link); + if (ret) + goto free_info; + + return 0; + +free_info: + kfree(info); + return ret; } static void serial_detach(struct pcmcia_device *link) diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index df8bd0c7b97db06eff11016a8dd31448d0e97754..66e69c7d10a3edc12fea1990c198572ec2565406 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -73,6 +73,24 @@ config SERIAL_AMBA_PL011_CONSOLE your boot loader (lilo or loadlin) about how to pass options to the kernel at boot time.) +if ASCEND_FEATURES + +config SERIAL_ATTACHED_MBIGEN + bool "Serial port interrupt signal lines connected to the mbigen" + depends on SERIAL_AMBA_PL011=y + default n + help + Say Y here when the interrupt signal line of the serial port is + connected to the mbigne. The mbigen device has the function of + clearing interrupts automatically. However, the interrupt processing + function of the serial port driver may process multiple interrupts + at a time. The mbigen device cannot adapt to this scenario. + As a result, interrupts are lost.Because it maybe discard interrupt. + + If unsure, say N. + +endif + config SERIAL_EARLYCON_ARM_SEMIHOST bool "Early console using ARM semihosting" depends on ARM64 || ARM diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index ebd33c0232e6390e3e2301ae7850e656c83943ae..c987db50757caa3b3d1e8b344e7231e37ba3c10d 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -313,8 +313,9 @@ static void pl011_write(unsigned int val, const struct uart_amba_port *uap, */ static int pl011_fifo_to_tty(struct uart_amba_port *uap) { - u16 status; unsigned int ch, flag, fifotaken; + int sysrq; + u16 status; for (fifotaken = 0; fifotaken != 256; fifotaken++) { status = pl011_read(uap, REG_FR); @@ -349,10 +350,12 @@ static int pl011_fifo_to_tty(struct uart_amba_port *uap) flag = TTY_FRAME; } - if (uart_handle_sysrq_char(&uap->port, ch & 255)) - continue; + spin_unlock(&uap->port.lock); + sysrq = uart_handle_sysrq_char(&uap->port, ch & 255); + spin_lock(&uap->port.lock); - uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag); + if (!sysrq) + uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag); } return fifotaken; @@ -813,10 +816,8 @@ __acquires(&uap->port.lock) if (!uap->using_tx_dma) return; - /* Avoid deadlock with the DMA engine callback */ - spin_unlock(&uap->port.lock); - dmaengine_terminate_all(uap->dmatx.chan); - spin_lock(&uap->port.lock); + dmaengine_terminate_async(uap->dmatx.chan); + if (uap->dmatx.queued) { dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1, DMA_TO_DEVICE); @@ -1052,6 +1053,9 @@ static void pl011_dma_rx_callback(void *data) */ static inline void pl011_dma_rx_stop(struct uart_amba_port *uap) { + if (!uap->using_rx_dma) + return; + /* FIXME. Just disable the DMA enable */ uap->dmacr &= ~UART011_RXDMAE; pl011_write(uap->dmacr, uap, REG_DMACR); @@ -1334,6 +1338,15 @@ static void pl011_stop_rx(struct uart_port *port) pl011_dma_rx_stop(uap); } +static void pl011_throttle_rx(struct uart_port *port) +{ + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + pl011_stop_rx(port); + spin_unlock_irqrestore(&port->lock, flags); +} + static void pl011_enable_ms(struct uart_port *port) { struct uart_amba_port *uap = @@ -1475,6 +1488,65 @@ static void check_apply_cts_event_workaround(struct uart_amba_port *uap) dummy_read = pl011_read(uap, REG_ICR); } +#ifdef CONFIG_SERIAL_ATTACHED_MBIGEN +struct workaround_oem_info { + char oem_id[ACPI_OEM_ID_SIZE + 1]; + char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; + u32 oem_revision; +}; + +static bool pl011_enable_hisi_wkrd; +static struct workaround_oem_info pl011_wkrd_info[] = { + { + .oem_id = "HISI ", + .oem_table_id = "HIP08 ", + .oem_revision = 0x300, + }, { + .oem_id = "HISI ", + .oem_table_id = "HIP08 ", + .oem_revision = 0x301, + }, { + .oem_id = "HISI ", + .oem_table_id = "HIP08 ", + .oem_revision = 0x400, + }, { + .oem_id = "HISI ", + .oem_table_id = "HIP08 ", + .oem_revision = 0x401, + }, { + .oem_id = "HISI ", + .oem_table_id = "HIP08 ", + .oem_revision = 0x402, + } +}; + +static void pl011_check_hisi_workaround(void) +{ + struct acpi_table_header *tbl; + acpi_status status = AE_OK; + int i; + + status = acpi_get_table(ACPI_SIG_MADT, 0, &tbl); + if (ACPI_FAILURE(status) || !tbl) + return; + + for (i = 0; i < ARRAY_SIZE(pl011_wkrd_info); i++) { + if (!memcmp(pl011_wkrd_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) && + !memcmp(pl011_wkrd_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && + pl011_wkrd_info[i].oem_revision == tbl->oem_revision) { + pl011_enable_hisi_wkrd = true; + break; + } + } + + acpi_put_table(tbl); +} + +#else +#define pl011_enable_hisi_wkrd 0 +static inline void pl011_check_hisi_workaround(void){ } +#endif + static irqreturn_t pl011_int(int irq, void *dev_id) { struct uart_amba_port *uap = dev_id; @@ -1512,6 +1584,11 @@ static irqreturn_t pl011_int(int irq, void *dev_id) handled = 1; } + if (pl011_enable_hisi_wkrd) { + pl011_write(0, uap, REG_IMSC); + pl011_write(uap->im, uap, REG_IMSC); + } + spin_unlock_irqrestore(&uap->port.lock, flags); return IRQ_RETVAL(handled); @@ -1689,6 +1766,8 @@ static int pl011_hwinit(struct uart_port *port) if (plat->init) plat->init(); } + + pl011_check_hisi_workaround(); return 0; } @@ -1727,9 +1806,10 @@ static int pl011_allocate_irq(struct uart_amba_port *uap) */ static void pl011_enable_interrupts(struct uart_amba_port *uap) { + unsigned long flags; unsigned int i; - spin_lock_irq(&uap->port.lock); + spin_lock_irqsave(&uap->port.lock, flags); /* Clear out any spuriously appearing RX interrupts */ pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR); @@ -1751,7 +1831,23 @@ static void pl011_enable_interrupts(struct uart_amba_port *uap) if (!pl011_dma_rx_running(uap)) uap->im |= UART011_RXIM; pl011_write(uap->im, uap, REG_IMSC); - spin_unlock_irq(&uap->port.lock); + spin_unlock_irqrestore(&uap->port.lock, flags); +} + +static void pl011_unthrottle_rx(struct uart_port *port) +{ + struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port); + unsigned long flags; + + spin_lock_irqsave(&uap->port.lock, flags); + + uap->im = UART011_RTIM; + if (!pl011_dma_rx_running(uap)) + uap->im |= UART011_RXIM; + + pl011_write(uap->im, uap, REG_IMSC); + + spin_unlock_irqrestore(&uap->port.lock, flags); } static int pl011_startup(struct uart_port *port) @@ -2093,32 +2189,13 @@ static const char *pl011_type(struct uart_port *port) return uap->port.type == PORT_AMBA ? uap->type : NULL; } -/* - * Release the memory region(s) being used by 'port' - */ -static void pl011_release_port(struct uart_port *port) -{ - release_mem_region(port->mapbase, SZ_4K); -} - -/* - * Request the memory region(s) being used by 'port' - */ -static int pl011_request_port(struct uart_port *port) -{ - return request_mem_region(port->mapbase, SZ_4K, "uart-pl011") - != NULL ? 0 : -EBUSY; -} - /* * Configure/autoconfigure the port. */ static void pl011_config_port(struct uart_port *port, int flags) { - if (flags & UART_CONFIG_TYPE) { + if (flags & UART_CONFIG_TYPE) port->type = PORT_AMBA; - pl011_request_port(port); - } } /* @@ -2133,6 +2210,8 @@ static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser) ret = -EINVAL; if (ser->baud_base < 9600) ret = -EINVAL; + if (port->mapbase != (unsigned long) ser->iomem_base) + ret = -EINVAL; return ret; } @@ -2143,6 +2222,8 @@ static const struct uart_ops amba_pl011_pops = { .stop_tx = pl011_stop_tx, .start_tx = pl011_start_tx, .stop_rx = pl011_stop_rx, + .throttle = pl011_throttle_rx, + .unthrottle = pl011_unthrottle_rx, .enable_ms = pl011_enable_ms, .break_ctl = pl011_break_ctl, .startup = pl011_startup, @@ -2150,8 +2231,6 @@ static const struct uart_ops amba_pl011_pops = { .flush_buffer = pl011_dma_flush_buffer, .set_termios = pl011_set_termios, .type = pl011_type, - .release_port = pl011_release_port, - .request_port = pl011_request_port, .config_port = pl011_config_port, .verify_port = pl011_verify_port, #ifdef CONFIG_CONSOLE_POLL @@ -2181,8 +2260,6 @@ static const struct uart_ops sbsa_uart_pops = { .shutdown = sbsa_uart_shutdown, .set_termios = sbsa_uart_set_termios, .type = pl011_type, - .release_port = pl011_release_port, - .request_port = pl011_request_port, .config_port = pl011_config_port, .verify_port = pl011_verify_port, #ifdef CONFIG_CONSOLE_POLL @@ -2254,9 +2331,8 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) clk_disable(uap->clk); } -static void __init -pl011_console_get_options(struct uart_amba_port *uap, int *baud, - int *parity, int *bits) +static void pl011_console_get_options(struct uart_amba_port *uap, int *baud, + int *parity, int *bits) { if (pl011_read(uap, REG_CR) & UART01x_CR_UARTEN) { unsigned int lcr_h, ibrd, fbrd; @@ -2289,7 +2365,7 @@ pl011_console_get_options(struct uart_amba_port *uap, int *baud, } } -static int __init pl011_console_setup(struct console *co, char *options) +static int pl011_console_setup(struct console *co, char *options) { struct uart_amba_port *uap; int baud = 38400; @@ -2357,8 +2433,8 @@ static int __init pl011_console_setup(struct console *co, char *options) * * Returns 0 if console matches; otherwise non-zero to use default matching */ -static int __init pl011_console_match(struct console *co, char *name, int idx, - char *options) +static int pl011_console_match(struct console *co, char *name, int idx, + char *options) { unsigned char iotype; resource_size_t addr; @@ -2587,6 +2663,7 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap, uap->port.fifosize = uap->fifosize; uap->port.flags = UPF_BOOT_AUTOCONF; uap->port.line = index; + spin_lock_init(&uap->port.lock); amba_ports[index] = uap; @@ -2595,7 +2672,7 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap, static int pl011_register_port(struct uart_amba_port *uap) { - int ret; + int ret, i; /* Ensure interrupts from this UART are masked and cleared */ pl011_write(0, uap, REG_IMSC); @@ -2606,6 +2683,9 @@ static int pl011_register_port(struct uart_amba_port *uap) if (ret < 0) { dev_err(uap->port.dev, "Failed to register AMBA-PL011 driver\n"); + for (i = 0; i < ARRAY_SIZE(amba_ports); i++) + if (amba_ports[i] == uap) + amba_ports[i] = NULL; return ret; } } @@ -2769,6 +2849,7 @@ MODULE_DEVICE_TABLE(of, sbsa_uart_of_match); static const struct acpi_device_id sbsa_uart_acpi_match[] = { { "ARMH0011", 0 }, + { "ARMHB000", 0 }, {}, }; MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match); @@ -2780,6 +2861,7 @@ static struct platform_driver arm_sbsa_uart_platform_driver = { .name = "sbsa-uart", .of_match_table = of_match_ptr(sbsa_uart_of_match), .acpi_match_table = ACPI_PTR(sbsa_uart_acpi_match), + .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011), }, }; @@ -2808,6 +2890,7 @@ static struct amba_driver pl011_driver = { .drv = { .name = "uart-pl011", .pm = &pl011_dev_pm_ops, + .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011), }, .id_table = pl011_ids, .probe = pl011_probe, diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c index db5df3d548188b6c440db8928ac157b5512174bb..3bdd56a1021b26d6e74ff98ad6f0e25f25f5de08 100644 --- a/drivers/tty/serial/ar933x_uart.c +++ b/drivers/tty/serial/ar933x_uart.c @@ -49,11 +49,6 @@ struct ar933x_uart_port { struct clk *clk; }; -static inline bool ar933x_uart_console_enabled(void) -{ - return IS_ENABLED(CONFIG_SERIAL_AR933X_CONSOLE); -} - static inline unsigned int ar933x_uart_read(struct ar933x_uart_port *up, int offset) { @@ -508,6 +503,7 @@ static const struct uart_ops ar933x_uart_ops = { .verify_port = ar933x_uart_verify_port, }; +#ifdef CONFIG_SERIAL_AR933X_CONSOLE static struct ar933x_uart_port * ar933x_console_ports[CONFIG_SERIAL_AR933X_NR_UARTS]; @@ -604,14 +600,7 @@ static struct console ar933x_uart_console = { .index = -1, .data = &ar933x_uart_driver, }; - -static void ar933x_uart_add_console_port(struct ar933x_uart_port *up) -{ - if (!ar933x_uart_console_enabled()) - return; - - ar933x_console_ports[up->port.line] = up; -} +#endif /* CONFIG_SERIAL_AR933X_CONSOLE */ static struct uart_driver ar933x_uart_driver = { .owner = THIS_MODULE, @@ -700,7 +689,9 @@ static int ar933x_uart_probe(struct platform_device *pdev) baud = ar933x_uart_get_baud(port->uartclk, 0, AR933X_UART_MAX_STEP); up->max_baud = min_t(unsigned int, baud, AR933X_UART_MAX_BAUD); - ar933x_uart_add_console_port(up); +#ifdef CONFIG_SERIAL_AR933X_CONSOLE + ar933x_console_ports[up->port.line] = up; +#endif ret = uart_add_one_port(&ar933x_uart_driver, &up->port); if (ret) @@ -749,8 +740,9 @@ static int __init ar933x_uart_init(void) { int ret; - if (ar933x_uart_console_enabled()) - ar933x_uart_driver.cons = &ar933x_uart_console; +#ifdef CONFIG_SERIAL_AR933X_CONSOLE + ar933x_uart_driver.cons = &ar933x_uart_console; +#endif ret = uart_register_driver(&ar933x_uart_driver); if (ret) diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 8e4428725848eb0e9917af79123442d6ac751cf2..f34520e9ad6e595fc1f6e17d27ea7acaef4f99ad 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -163,6 +163,8 @@ struct atmel_uart_port { unsigned int pending_status; spinlock_t lock_suspended; + bool hd_start_rx; /* can start RX during half-duplex operation */ + #ifdef CONFIG_PM struct { u32 cr; @@ -225,6 +227,12 @@ static inline void atmel_uart_write_char(struct uart_port *port, u8 value) __raw_writeb(value, port->membase + ATMEL_US_THR); } +static inline int atmel_uart_is_half_duplex(struct uart_port *port) +{ + return (port->rs485.flags & SER_RS485_ENABLED) && + !(port->rs485.flags & SER_RS485_RX_DURING_TX); +} + #ifdef CONFIG_SERIAL_ATMEL_PDC static bool atmel_use_pdc_rx(struct uart_port *port) { @@ -481,9 +489,9 @@ static void atmel_stop_tx(struct uart_port *port) /* Disable interrupts */ atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); - if ((port->rs485.flags & SER_RS485_ENABLED) && - !(port->rs485.flags & SER_RS485_RX_DURING_TX)) + if (atmel_uart_is_half_duplex(port)) atmel_start_rx(port); + } /* @@ -500,8 +508,7 @@ static void atmel_start_tx(struct uart_port *port) return; if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port)) - if ((port->rs485.flags & SER_RS485_ENABLED) && - !(port->rs485.flags & SER_RS485_RX_DURING_TX)) + if (atmel_uart_is_half_duplex(port)) atmel_stop_rx(port); if (atmel_use_pdc_tx(port)) @@ -799,10 +806,14 @@ static void atmel_complete_tx_dma(void *arg) */ if (!uart_circ_empty(xmit)) atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx); - else if ((port->rs485.flags & SER_RS485_ENABLED) && - !(port->rs485.flags & SER_RS485_RX_DURING_TX)) { - /* DMA done, stop TX, start RX for RS485 */ - atmel_start_rx(port); + else if (atmel_uart_is_half_duplex(port)) { + /* + * DMA done, re-enable TXEMPTY and signal that we can stop + * TX and start RX for RS485 + */ + atmel_port->hd_start_rx = true; + atmel_uart_writel(port, ATMEL_US_IER, + atmel_port->tx_done_mask); } spin_unlock_irqrestore(&port->lock, flags); @@ -1156,6 +1167,10 @@ static int atmel_prepare_rx_dma(struct uart_port *port) sg_dma_len(&atmel_port->sg_rx)/2, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); + if (!desc) { + dev_err(port->dev, "Preparing DMA cyclic failed\n"); + goto chan_err; + } desc->callback = atmel_complete_rx_dma; desc->callback_param = port; atmel_port->desc_rx = desc; @@ -1244,9 +1259,19 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); if (pending & atmel_port->tx_done_mask) { - /* Either PDC or interrupt transmission */ atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); + + /* Start RX if flag was set and FIFO is empty */ + if (atmel_port->hd_start_rx) { + if (!(atmel_uart_readl(port, ATMEL_US_CSR) + & ATMEL_US_TXEMPTY)) + dev_warn(port->dev, "Should start RX, but TX fifo is not empty\n"); + + atmel_port->hd_start_rx = false; + atmel_start_rx(port); + } + atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx); } } @@ -1373,8 +1398,7 @@ static void atmel_tx_pdc(struct uart_port *port) atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask); } else { - if ((port->rs485.flags & SER_RS485_ENABLED) && - !(port->rs485.flags & SER_RS485_RX_DURING_TX)) { + if (atmel_uart_is_half_duplex(port)) { /* DMA done, stop TX, start RX for RS485 */ atmel_start_rx(port); } @@ -2130,27 +2154,6 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios, mode |= ATMEL_US_USMODE_NORMAL; } - /* set the mode, clock divisor, parity, stop bits and data size */ - atmel_uart_writel(port, ATMEL_US_MR, mode); - - /* - * when switching the mode, set the RTS line state according to the - * new mode, otherwise keep the former state - */ - if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) { - unsigned int rts_state; - - if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) { - /* let the hardware control the RTS line */ - rts_state = ATMEL_US_RTSDIS; - } else { - /* force RTS line to low level */ - rts_state = ATMEL_US_RTSEN; - } - - atmel_uart_writel(port, ATMEL_US_CR, rts_state); - } - /* * Set the baud rate: * Fractional baudrate allows to setup output frequency more @@ -2176,6 +2179,28 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios, quot = cd | fp << ATMEL_US_FP_OFFSET; atmel_uart_writel(port, ATMEL_US_BRGR, quot); + + /* set the mode, clock divisor, parity, stop bits and data size */ + atmel_uart_writel(port, ATMEL_US_MR, mode); + + /* + * when switching the mode, set the RTS line state according to the + * new mode, otherwise keep the former state + */ + if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) { + unsigned int rts_state; + + if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) { + /* let the hardware control the RTS line */ + rts_state = ATMEL_US_RTSDIS; + } else { + /* force RTS line to low level */ + rts_state = ATMEL_US_RTSEN; + } + + atmel_uart_writel(port, ATMEL_US_CR, rts_state); + } + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN); atmel_port->tx_stopped = false; diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c index e5389591bb4f1f83a207ee5be7e3577648972599..ad40c75bb58f84d5aceba4ca5340f673b16e0a75 100644 --- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c +++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c @@ -407,7 +407,16 @@ static int cpm_uart_startup(struct uart_port *port) clrbits16(&pinfo->sccp->scc_sccm, UART_SCCM_RX); } cpm_uart_initbd(pinfo); - cpm_line_cr_cmd(pinfo, CPM_CR_INIT_TRX); + if (IS_SMC(pinfo)) { + out_be32(&pinfo->smcup->smc_rstate, 0); + out_be32(&pinfo->smcup->smc_tstate, 0); + out_be16(&pinfo->smcup->smc_rbptr, + in_be16(&pinfo->smcup->smc_rbase)); + out_be16(&pinfo->smcup->smc_tbptr, + in_be16(&pinfo->smcup->smc_tbase)); + } else { + cpm_line_cr_cmd(pinfo, CPM_CR_INIT_TRX); + } } /* Install interrupt handler. */ retval = request_irq(port->irq, cpm_uart_int, 0, "cpm_uart", port); @@ -861,16 +870,14 @@ static void cpm_uart_init_smc(struct uart_cpm_port *pinfo) (u8 __iomem *)pinfo->tx_bd_base - DPRAM_BASE); /* - * In case SMC1 is being relocated... + * In case SMC is being relocated... */ -#if defined (CONFIG_I2C_SPI_SMC1_UCODE_PATCH) out_be16(&up->smc_rbptr, in_be16(&pinfo->smcup->smc_rbase)); out_be16(&up->smc_tbptr, in_be16(&pinfo->smcup->smc_tbase)); out_be32(&up->smc_rstate, 0); out_be32(&up->smc_tstate, 0); out_be16(&up->smc_brkcr, 1); /* number of break chars */ out_be16(&up->smc_brkec, 0); -#endif /* Set up the uart parameters in the * parameter ram. @@ -884,8 +891,6 @@ static void cpm_uart_init_smc(struct uart_cpm_port *pinfo) out_be16(&up->smc_brkec, 0); out_be16(&up->smc_brkcr, 1); - cpm_line_cr_cmd(pinfo, CPM_CR_INIT_TRX); - /* Set UART mode, 8 bit, no parity, one stop. * Enable receive and transmit. */ diff --git a/drivers/tty/serial/digicolor-usart.c b/drivers/tty/serial/digicolor-usart.c index f460cca139e239c066b9aacaaf8f0c22500d7ade..13ac36e2da4f0f2ea36e48c7619f369dde3239bd 100644 --- a/drivers/tty/serial/digicolor-usart.c +++ b/drivers/tty/serial/digicolor-usart.c @@ -541,7 +541,11 @@ static int __init digicolor_uart_init(void) if (ret) return ret; - return platform_driver_register(&digicolor_uart_platform); + ret = platform_driver_register(&digicolor_uart_platform); + if (ret) + uart_unregister_driver(&digicolor_uart); + + return ret; } module_init(digicolor_uart_init); diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index 3f8d1274fc85c2ee0306d93db03042e55c7e193d..c0ebcd34cadb7cec8d881be4d98bb8e6c430a135 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -376,8 +376,8 @@ static void lpuart_dma_tx(struct lpuart_port *sport) } sport->dma_tx_desc = dmaengine_prep_slave_sg(sport->dma_tx_chan, sgl, - sport->dma_tx_nents, - DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); + ret, DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT); if (!sport->dma_tx_desc) { dma_unmap_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE); dev_err(dev, "Cannot prepare TX slave DMA!\n"); @@ -1477,6 +1477,8 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios, else cr1 &= ~UARTCR1_PT; } + } else { + cr1 &= ~UARTCR1_PE; } /* ask the core to calculate the divisor */ @@ -1688,10 +1690,12 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios, else ctrl &= ~UARTCTRL_PT; } + } else { + ctrl &= ~UARTCTRL_PE; } /* ask the core to calculate the divisor */ - baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16); + baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 4); spin_lock_irqsave(&sport->port.lock, flags); @@ -2157,7 +2161,7 @@ static int lpuart_probe(struct platform_device *pdev) return PTR_ERR(sport->port.membase); sport->port.membase += sdata->reg_off; - sport->port.mapbase = res->start; + sport->port.mapbase = res->start + sdata->reg_off; sport->port.dev = &pdev->dev; sport->port.type = PORT_LPUART; ret = platform_get_irq(pdev, 0); diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c index ffefd218761e04c633171b3f1b14151e7f05b721..31033d517e828eca8058a4eb8e094871b1169eb1 100644 --- a/drivers/tty/serial/ifx6x60.c +++ b/drivers/tty/serial/ifx6x60.c @@ -1230,6 +1230,9 @@ static int ifx_spi_spi_remove(struct spi_device *spi) struct ifx_spi_device *ifx_dev = spi_get_drvdata(spi); /* stop activity */ tasklet_kill(&ifx_dev->io_work_tasklet); + + pm_runtime_disable(&spi->dev); + /* free irq */ free_irq(gpio_to_irq(ifx_dev->gpio.reset_out), ifx_dev); free_irq(gpio_to_irq(ifx_dev->gpio.srdy), ifx_dev); diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 0f67197a3783ffdd62eccd3b41a813a80d66b6df..989ca7d662f3fa18fea74d3aa41ffca96362124f 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -382,6 +382,7 @@ static void imx_uart_ucrs_restore(struct imx_port *sport, } #endif +/* called with port.lock taken and irqs caller dependent */ static void imx_uart_rts_active(struct imx_port *sport, u32 *ucr2) { *ucr2 &= ~(UCR2_CTSC | UCR2_CTS); @@ -390,6 +391,7 @@ static void imx_uart_rts_active(struct imx_port *sport, u32 *ucr2) mctrl_gpio_set(sport->gpios, sport->port.mctrl); } +/* called with port.lock taken and irqs caller dependent */ static void imx_uart_rts_inactive(struct imx_port *sport, u32 *ucr2) { *ucr2 &= ~UCR2_CTSC; @@ -399,6 +401,7 @@ static void imx_uart_rts_inactive(struct imx_port *sport, u32 *ucr2) mctrl_gpio_set(sport->gpios, sport->port.mctrl); } +/* called with port.lock taken and irqs caller dependent */ static void imx_uart_rts_auto(struct imx_port *sport, u32 *ucr2) { *ucr2 |= UCR2_CTSC; @@ -1554,6 +1557,16 @@ imx_uart_set_termios(struct uart_port *port, struct ktermios *termios, old_csize = CS8; } + del_timer_sync(&sport->timer); + + /* + * Ask the core to calculate the divisor for us. + */ + baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16); + quot = uart_get_divisor(port, baud); + + spin_lock_irqsave(&sport->port.lock, flags); + if ((termios->c_cflag & CSIZE) == CS8) ucr2 = UCR2_WS | UCR2_SRST | UCR2_IRTS; else @@ -1597,16 +1610,6 @@ imx_uart_set_termios(struct uart_port *port, struct ktermios *termios, ucr2 |= UCR2_PROE; } - del_timer_sync(&sport->timer); - - /* - * Ask the core to calculate the divisor for us. - */ - baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16); - quot = uart_get_divisor(port, baud); - - spin_lock_irqsave(&sport->port.lock, flags); - sport->port.read_status_mask = 0; if (termios->c_iflag & INPCK) sport->port.read_status_mask |= (URXD_FRMERR | URXD_PRERR); @@ -2068,7 +2071,7 @@ imx_uart_console_setup(struct console *co, char *options) retval = clk_prepare(sport->clk_per); if (retval) - clk_disable_unprepare(sport->clk_ipg); + clk_unprepare(sport->clk_ipg); error_console: return retval; diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c index b4ba2b1dab767cac45a96dd6f0e2064ad138447c..b0aa864f84a97ed61b1ffecdd79551cfdb414dca 100644 --- a/drivers/tty/serial/kgdboc.c +++ b/drivers/tty/serial/kgdboc.c @@ -128,19 +128,6 @@ static void kgdboc_unregister_kbd(void) #define kgdboc_restore_input() #endif /* ! CONFIG_KDB_KEYBOARD */ -static int kgdboc_option_setup(char *opt) -{ - if (strlen(opt) >= MAX_CONFIG_LEN) { - printk(KERN_ERR "kgdboc: config string too long\n"); - return -ENOSPC; - } - strcpy(config, opt); - - return 0; -} - -__setup("kgdboc=", kgdboc_option_setup); - static void cleanup_kgdboc(void) { if (kgdb_unregister_nmi_console()) @@ -154,15 +141,15 @@ static int configure_kgdboc(void) { struct tty_driver *p; int tty_line = 0; - int err; + int err = -ENODEV; char *cptr = config; struct console *cons; - err = kgdboc_option_setup(config); - if (err || !strlen(config) || isspace(config[0])) + if (!strlen(config) || isspace(config[0])) { + err = 0; goto noconfig; + } - err = -ENODEV; kgdboc_io_ops.is_console = 0; kgdb_tty_driver = NULL; @@ -245,7 +232,7 @@ static void kgdboc_put_char(u8 chr) static int param_set_kgdboc_var(const char *kmessage, const struct kernel_param *kp) { - int len = strlen(kmessage); + size_t len = strlen(kmessage); if (len >= MAX_CONFIG_LEN) { printk(KERN_ERR "kgdboc: config string too long\n"); @@ -267,7 +254,7 @@ static int param_set_kgdboc_var(const char *kmessage, strcpy(config, kmessage); /* Chop out \n char as a result of echo */ - if (config[len - 1] == '\n') + if (len && config[len - 1] == '\n') config[len - 1] = '\0'; if (configured == 1) @@ -311,6 +298,25 @@ static struct kgdb_io kgdboc_io_ops = { }; #ifdef CONFIG_KGDB_SERIAL_CONSOLE +static int kgdboc_option_setup(char *opt) +{ + if (!opt) { + pr_err("config string not provided\n"); + return -EINVAL; + } + + if (strlen(opt) >= MAX_CONFIG_LEN) { + pr_err("config string too long\n"); + return -ENOSPC; + } + strcpy(config, opt); + + return 0; +} + +__setup("kgdboc=", kgdboc_option_setup); + + /* This is only available if kgdboc is a built in for early debugging */ static int __init kgdboc_early_init(char *opt) { diff --git a/drivers/tty/serial/lpc32xx_hs.c b/drivers/tty/serial/lpc32xx_hs.c index d1d73261575b294150a24f62c335373ade79f395..9c145ed0dcf70024d2dbff31a9d2f4487db58192 100644 --- a/drivers/tty/serial/lpc32xx_hs.c +++ b/drivers/tty/serial/lpc32xx_hs.c @@ -341,7 +341,7 @@ static irqreturn_t serial_lpc32xx_interrupt(int irq, void *dev_id) LPC32XX_HSUART_IIR(port->membase)); port->icount.overrun++; tty_insert_flip_char(tport, 0, TTY_OVERRUN); - tty_schedule_flip(tport); + tty_flip_buffer_push(tport); } /* Data received? */ diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c index 371569a0fd00a8161f76ac715a734368e2419e60..c1ee88f530334a590112ad0dcfc1a246429224f8 100644 --- a/drivers/tty/serial/max3100.c +++ b/drivers/tty/serial/max3100.c @@ -213,7 +213,7 @@ static int max3100_sr(struct max3100_port *s, u16 tx, u16 *rx) return 0; } -static int max3100_handlerx(struct max3100_port *s, u16 rx) +static int max3100_handlerx_unlocked(struct max3100_port *s, u16 rx) { unsigned int ch, flg, status = 0; int ret = 0, cts; @@ -253,6 +253,17 @@ static int max3100_handlerx(struct max3100_port *s, u16 rx) return ret; } +static int max3100_handlerx(struct max3100_port *s, u16 rx) +{ + unsigned long flags; + int ret; + + uart_port_lock_irqsave(&s->port, &flags); + ret = max3100_handlerx_unlocked(s, rx); + uart_port_unlock_irqrestore(&s->port, flags); + return ret; +} + static void max3100_work(struct work_struct *w) { struct max3100_port *s = container_of(w, struct max3100_port, work); @@ -743,13 +754,14 @@ static int max3100_probe(struct spi_device *spi) mutex_lock(&max3100s_lock); if (!uart_driver_registered) { - uart_driver_registered = 1; retval = uart_register_driver(&max3100_uart_driver); if (retval) { printk(KERN_ERR "Couldn't register max3100 uart driver\n"); mutex_unlock(&max3100s_lock); return retval; } + + uart_driver_registered = 1; } for (i = 0; i < MAX_MAX3100; i++) @@ -835,6 +847,7 @@ static int max3100_remove(struct spi_device *spi) } pr_debug("removing max3100 driver\n"); uart_unregister_driver(&max3100_uart_driver); + uart_driver_registered = 0; mutex_unlock(&max3100s_lock); return 0; diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c index 3db48fcd6068d853f2c3376c885289222a063d10..0c35c3c5e37349953df8fe1daff97a0b94bd1394 100644 --- a/drivers/tty/serial/max310x.c +++ b/drivers/tty/serial/max310x.c @@ -491,37 +491,48 @@ static bool max310x_reg_precious(struct device *dev, unsigned int reg) static int max310x_set_baud(struct uart_port *port, int baud) { - unsigned int mode = 0, clk = port->uartclk, div = clk / baud; + unsigned int mode = 0, div = 0, frac = 0, c = 0, F = 0; - /* Check for minimal value for divider */ - if (div < 16) - div = 16; - - if (clk % baud && (div / 16) < 0x8000) { + /* + * Calculate the integer divisor first. Select a proper mode + * in case if the requested baud is too high for the pre-defined + * clocks frequency. + */ + div = port->uartclk / baud; + if (div < 8) { + /* Mode x4 */ + c = 4; + mode = MAX310X_BRGCFG_4XMODE_BIT; + } else if (div < 16) { /* Mode x2 */ + c = 8; mode = MAX310X_BRGCFG_2XMODE_BIT; - clk = port->uartclk * 2; - div = clk / baud; - - if (clk % baud && (div / 16) < 0x8000) { - /* Mode x4 */ - mode = MAX310X_BRGCFG_4XMODE_BIT; - clk = port->uartclk * 4; - div = clk / baud; - } + } else { + c = 16; } - max310x_port_write(port, MAX310X_BRGDIVMSB_REG, (div / 16) >> 8); - max310x_port_write(port, MAX310X_BRGDIVLSB_REG, div / 16); - max310x_port_write(port, MAX310X_BRGCFG_REG, (div % 16) | mode); + /* Calculate the divisor in accordance with the fraction coefficient */ + div /= c; + F = c*baud; - return DIV_ROUND_CLOSEST(clk, div); + /* Calculate the baud rate fraction */ + if (div > 0) + frac = (16*(port->uartclk % F)) / F; + else + div = 1; + + max310x_port_write(port, MAX310X_BRGDIVMSB_REG, div >> 8); + max310x_port_write(port, MAX310X_BRGDIVLSB_REG, div); + max310x_port_write(port, MAX310X_BRGCFG_REG, frac | mode); + + /* Return the actual baud rate we just programmed */ + return (16*port->uartclk) / (c*(16*div + frac)); } static int max310x_update_best_err(unsigned long f, long *besterr) { /* Use baudrate 115200 for calculate error */ - long err = f % (115200 * 16); + long err = f % (460800 * 16); if ((*besterr < 0) || (*besterr > err)) { *besterr = err; @@ -576,7 +587,7 @@ static int max310x_set_ref_clk(struct device *dev, struct max310x_port *s, } /* Configure clock source */ - clksrc = xtal ? MAX310X_CLKSRC_CRYST_BIT : MAX310X_CLKSRC_EXTCLK_BIT; + clksrc = MAX310X_CLKSRC_EXTCLK_BIT | (xtal ? MAX310X_CLKSRC_CRYST_BIT : 0); /* Configure PLL */ if (pllcfg) { @@ -833,12 +844,9 @@ static void max310x_wq_proc(struct work_struct *ws) static unsigned int max310x_tx_empty(struct uart_port *port) { - unsigned int lvl, sts; - - lvl = max310x_port_read(port, MAX310X_TXFIFOLVL_REG); - sts = max310x_port_read(port, MAX310X_IRQSTS_REG); + u8 lvl = max310x_port_read(port, MAX310X_TXFIFOLVL_REG); - return ((sts & MAX310X_IRQ_TXEMPTY_BIT) && !lvl) ? TIOCSER_TEMT : 0; + return lvl ? 0 : TIOCSER_TEMT; } static unsigned int max310x_get_mctrl(struct uart_port *port) @@ -1419,6 +1427,8 @@ static int max310x_spi_probe(struct spi_device *spi) if (spi->dev.of_node) { const struct of_device_id *of_id = of_match_device(max310x_dt_ids, &spi->dev); + if (!of_id) + return -ENODEV; devtype = (struct max310x_devtype *)of_id->data; } else { diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c index 736b74fd6623760b997a223190ddbc4355f43411..4caeca67fd1435cea7d68f3aac511545c5354000 100644 --- a/drivers/tty/serial/msm_serial.c +++ b/drivers/tty/serial/msm_serial.c @@ -383,10 +383,14 @@ static void msm_request_rx_dma(struct msm_port *msm_port, resource_size_t base) static inline void msm_wait_for_xmitr(struct uart_port *port) { + unsigned int timeout = 500000; + while (!(msm_read(port, UART_SR) & UART_SR_TX_EMPTY)) { if (msm_read(port, UART_ISR) & UART_ISR_TX_READY) break; udelay(1); + if (!timeout--) + break; } msm_write(port, UART_CR_CMD_RESET_TX_READY, UART_CR); } @@ -860,6 +864,7 @@ static void msm_handle_tx(struct uart_port *port) struct circ_buf *xmit = &msm_port->uart.state->xmit; struct msm_dma *dma = &msm_port->tx_dma; unsigned int pio_count, dma_count, dma_min; + char buf[4] = { 0 }; void __iomem *tf; int err = 0; @@ -869,10 +874,12 @@ static void msm_handle_tx(struct uart_port *port) else tf = port->membase + UART_TF; + buf[0] = port->x_char; + if (msm_port->is_uartdm) msm_reset_dm_count(port, 1); - iowrite8_rep(tf, &port->x_char, 1); + iowrite32_rep(tf, buf, 1); port->icount.tx++; port->x_char = 0; return; @@ -973,6 +980,7 @@ static unsigned int msm_get_mctrl(struct uart_port *port) static void msm_reset(struct uart_port *port) { struct msm_port *msm_port = UART_TO_MSM(port); + unsigned int mr; /* reset everything */ msm_write(port, UART_CR_CMD_RESET_RX, UART_CR); @@ -980,7 +988,10 @@ static void msm_reset(struct uart_port *port) msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR); msm_write(port, UART_CR_CMD_RESET_BREAK_INT, UART_CR); msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR); - msm_write(port, UART_CR_CMD_SET_RFR, UART_CR); + msm_write(port, UART_CR_CMD_RESET_RFR, UART_CR); + mr = msm_read(port, UART_MR1); + mr &= ~UART_MR1_RX_RDY_CTL; + msm_write(port, mr, UART_MR1); /* Disable DM modes */ if (msm_port->is_uartdm) @@ -1569,6 +1580,7 @@ static void __msm_console_write(struct uart_port *port, const char *s, int num_newlines = 0; bool replaced = false; void __iomem *tf; + int locked = 1; if (is_uartdm) tf = port->membase + UARTDM_TF; @@ -1581,7 +1593,13 @@ static void __msm_console_write(struct uart_port *port, const char *s, num_newlines++; count += num_newlines; - spin_lock(&port->lock); + if (port->sysrq) + locked = 0; + else if (oops_in_progress) + locked = spin_trylock(&port->lock); + else + spin_lock(&port->lock); + if (is_uartdm) msm_reset_dm_count(port, count); @@ -1617,7 +1635,9 @@ static void __msm_console_write(struct uart_port *port, const char *s, iowrite32_rep(tf, buf, 1); i += num_chars; } - spin_unlock(&port->lock); + + if (locked) + spin_unlock(&port->lock); } static void msm_console_write(struct console *co, const char *s, diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c index 170e446a2f625c4af4bf29c05f87931182979a73..494e476ac8db9f9b5e7924c0e484c7363f3578c0 100644 --- a/drivers/tty/serial/mvebu-uart.c +++ b/drivers/tty/serial/mvebu-uart.c @@ -162,7 +162,7 @@ static unsigned int mvebu_uart_tx_empty(struct uart_port *port) st = readl(port->membase + UART_STAT); spin_unlock_irqrestore(&port->lock, flags); - return (st & STAT_TX_FIFO_EMP) ? TIOCSER_TEMT : 0; + return (st & STAT_TX_EMP) ? TIOCSER_TEMT : 0; } static unsigned int mvebu_uart_get_mctrl(struct uart_port *port) diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c index 76aa289652f7ae20a532e9355affc501de8fe763..55c341addb6ed72a0090ab9ef2619721c7c72520 100644 --- a/drivers/tty/serial/mxs-auart.c +++ b/drivers/tty/serial/mxs-auart.c @@ -1128,11 +1128,13 @@ static void mxs_auart_set_ldisc(struct uart_port *port, static irqreturn_t mxs_auart_irq_handle(int irq, void *context) { - u32 istat; + u32 istat, stat; struct mxs_auart_port *s = context; u32 mctrl_temp = s->mctrl_prev; - u32 stat = mxs_read(s, REG_STAT); + uart_port_lock(&s->port); + + stat = mxs_read(s, REG_STAT); istat = mxs_read(s, REG_INTR); /* ack irq */ @@ -1168,6 +1170,8 @@ static irqreturn_t mxs_auart_irq_handle(int irq, void *context) istat &= ~AUART_INTR_TXIS; } + uart_port_unlock(&s->port); + return IRQ_HANDLED; } @@ -1634,8 +1638,9 @@ static int mxs_auart_request_gpio_irq(struct mxs_auart_port *s) /* * If something went wrong, rollback. + * Be careful: i may be unsigned. */ - while (err && (--i >= 0)) + while (err && (i-- > 0)) if (irq[i] >= 0) free_irq(irq[i], s); @@ -1685,6 +1690,10 @@ static int mxs_auart_probe(struct platform_device *pdev) s->port.mapbase = r->start; s->port.membase = ioremap(r->start, resource_size(r)); + if (!s->port.membase) { + ret = -ENOMEM; + goto out_disable_clks; + } s->port.ops = &mxs_auart_ops; s->port.iotype = UPIO_MEM; s->port.fifosize = MXS_AUART_FIFO_SIZE; diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c index 29a6dc6a8d23c7350fb77a71da9949d9c5b659bf..73fcc6bdb031220ff79272ddaa135d4b98ea4bb5 100644 --- a/drivers/tty/serial/owl-uart.c +++ b/drivers/tty/serial/owl-uart.c @@ -742,7 +742,7 @@ static int __init owl_uart_init(void) return ret; } -static void __init owl_uart_exit(void) +static void __exit owl_uart_exit(void) { platform_driver_unregister(&owl_uart_platform_driver); uart_unregister_driver(&owl_uart_driver); diff --git a/drivers/tty/serial/pic32_uart.c b/drivers/tty/serial/pic32_uart.c index fd80d999308d49772497c3230fa939907e221aa8..0bdf1687983f5a70e3f632703ecf9fba584bafd5 100644 --- a/drivers/tty/serial/pic32_uart.c +++ b/drivers/tty/serial/pic32_uart.c @@ -919,6 +919,7 @@ static struct platform_driver pic32_uart_platform_driver = { .driver = { .name = PIC32_DEV_NAME, .of_match_table = of_match_ptr(pic32_serial_dt_ids), + .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_PIC32), }, }; diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c index 3d21790d961e3d03ce92c360408eff5b9ff974df..2cddcf74f702a1b7820c41e9f35a858a3d0f7a78 100644 --- a/drivers/tty/serial/pmac_zilog.c +++ b/drivers/tty/serial/pmac_zilog.c @@ -220,7 +220,6 @@ static bool pmz_receive_chars(struct uart_pmac_port *uap) { struct tty_port *port; unsigned char ch, r1, drop, error, flag; - int loops = 0; /* Sanity check, make sure the old bug is no longer happening */ if (uap->port.state == NULL) { @@ -303,24 +302,11 @@ static bool pmz_receive_chars(struct uart_pmac_port *uap) if (r1 & Rx_OVR) tty_insert_flip_char(port, 0, TTY_OVERRUN); next_char: - /* We can get stuck in an infinite loop getting char 0 when the - * line is in a wrong HW state, we break that here. - * When that happens, I disable the receive side of the driver. - * Note that what I've been experiencing is a real irq loop where - * I'm getting flooded regardless of the actual port speed. - * Something strange is going on with the HW - */ - if ((++loops) > 1000) - goto flood; ch = read_zsreg(uap, R0); if (!(ch & Rx_CH_AV)) break; } - return true; - flood: - pmz_interrupt_control(uap, 0); - pmz_error("pmz: rx irq flood !\n"); return true; } diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index 1515074e18fb6d60a37de13daf6102b98712841f..b3f7d1a1e97f824e4b4793ec30c677432261954a 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -113,6 +113,8 @@ struct qcom_geni_serial_port { u32 *rx_fifo; u32 loopback; bool brk; + + unsigned int tx_remaining; }; static const struct uart_ops qcom_geni_console_pops; @@ -221,7 +223,7 @@ static unsigned int qcom_geni_serial_get_mctrl(struct uart_port *uport) unsigned int mctrl = TIOCM_DSR | TIOCM_CAR; u32 geni_ios; - if (uart_console(uport) || !uart_cts_enabled(uport)) { + if (uart_console(uport)) { mctrl |= TIOCM_CTS; } else { geni_ios = readl_relaxed(uport->membase + SE_GENI_IOS); @@ -237,7 +239,7 @@ static void qcom_geni_serial_set_mctrl(struct uart_port *uport, { u32 uart_manual_rfr = 0; - if (uart_console(uport) || !uart_cts_enabled(uport)) + if (uart_console(uport)) return; if (!(mctrl & TIOCM_RTS)) @@ -435,6 +437,7 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s, struct qcom_geni_serial_port *port; bool locked = true; unsigned long flags; + u32 geni_status; WARN_ON(co->index < 0 || co->index >= GENI_UART_CONS_PORTS); @@ -448,6 +451,8 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s, else spin_lock_irqsave(&uport->lock, flags); + geni_status = readl_relaxed(uport->membase + SE_GENI_STATUS); + /* Cancel the current write to log the fault */ if (!locked) { geni_se_cancel_m_cmd(&port->se); @@ -461,9 +466,19 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s, } writel_relaxed(M_CMD_CANCEL_EN, uport->membase + SE_GENI_M_IRQ_CLEAR); + } else if ((geni_status & M_GENI_CMD_ACTIVE) && !port->tx_remaining) { + /* + * It seems we can't interrupt existing transfers if all data + * has been sent, in which case we need to look for done first. + */ + qcom_geni_serial_poll_tx_done(uport); } __qcom_geni_serial_console_write(uport, s, count); + + if (port->tx_remaining) + qcom_geni_serial_setup_tx(uport, port->tx_remaining); + if (locked) spin_unlock_irqrestore(&uport->lock, flags); } @@ -694,40 +709,45 @@ static void qcom_geni_serial_handle_rx(struct uart_port *uport, bool drop) port->handle_rx(uport, total_bytes, drop); } -static void qcom_geni_serial_handle_tx(struct uart_port *uport) +static void qcom_geni_serial_handle_tx(struct uart_port *uport, bool done, + bool active) { struct qcom_geni_serial_port *port = to_dev_port(uport, uport); struct circ_buf *xmit = &uport->state->xmit; size_t avail; size_t remaining; + size_t pending; int i; u32 status; unsigned int chunk; int tail; - u32 irq_en; - chunk = uart_circ_chars_pending(xmit); status = readl_relaxed(uport->membase + SE_GENI_TX_FIFO_STATUS); - /* Both FIFO and framework buffer are drained */ - if (!chunk && !status) { + + /* Complete the current tx command before taking newly added data */ + if (active) + pending = port->tx_remaining; + else + pending = uart_circ_chars_pending(xmit); + + /* All data has been transmitted and acknowledged as received */ + if (!pending && !status && done) { qcom_geni_serial_stop_tx(uport); goto out_write_wakeup; } - if (!uart_console(uport)) { - irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN); - irq_en &= ~(M_TX_FIFO_WATERMARK_EN); - writel_relaxed(0, uport->membase + SE_GENI_TX_WATERMARK_REG); - writel_relaxed(irq_en, uport->membase + SE_GENI_M_IRQ_EN); - } + avail = port->tx_fifo_depth - (status & TX_FIFO_WC); + avail *= port->tx_bytes_pw; - avail = (port->tx_fifo_depth - port->tx_wm) * port->tx_bytes_pw; tail = xmit->tail; - chunk = min3((size_t)chunk, (size_t)(UART_XMIT_SIZE - tail), avail); + chunk = min3(avail, pending, (size_t)(UART_XMIT_SIZE - tail)); if (!chunk) goto out_write_wakeup; - qcom_geni_serial_setup_tx(uport, chunk); + if (!port->tx_remaining) { + qcom_geni_serial_setup_tx(uport, pending); + port->tx_remaining = pending; + } remaining = chunk; for (i = 0; i < chunk; ) { @@ -746,11 +766,10 @@ static void qcom_geni_serial_handle_tx(struct uart_port *uport) tail += tx_bytes; uport->icount.tx += tx_bytes; remaining -= tx_bytes; + port->tx_remaining -= tx_bytes; } xmit->tail = tail & (UART_XMIT_SIZE - 1); - if (uart_console(uport)) - qcom_geni_serial_poll_tx_done(uport); out_write_wakeup: if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(uport); @@ -760,6 +779,7 @@ static irqreturn_t qcom_geni_serial_isr(int isr, void *dev) { unsigned int m_irq_status; unsigned int s_irq_status; + unsigned int geni_status; struct uart_port *uport = dev; unsigned long flags; unsigned int m_irq_en; @@ -773,6 +793,7 @@ static irqreturn_t qcom_geni_serial_isr(int isr, void *dev) spin_lock_irqsave(&uport->lock, flags); m_irq_status = readl_relaxed(uport->membase + SE_GENI_M_IRQ_STATUS); s_irq_status = readl_relaxed(uport->membase + SE_GENI_S_IRQ_STATUS); + geni_status = readl_relaxed(uport->membase + SE_GENI_STATUS); m_irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN); writel_relaxed(m_irq_status, uport->membase + SE_GENI_M_IRQ_CLEAR); writel_relaxed(s_irq_status, uport->membase + SE_GENI_S_IRQ_CLEAR); @@ -787,7 +808,8 @@ static irqreturn_t qcom_geni_serial_isr(int isr, void *dev) if (m_irq_status & (M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN) && m_irq_en & (M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN)) - qcom_geni_serial_handle_tx(uport); + qcom_geni_serial_handle_tx(uport, m_irq_status & M_CMD_DONE_EN, + geni_status & M_GENI_CMD_ACTIVE); if (s_irq_status & S_GP_IRQ_0_EN || s_irq_status & S_GP_IRQ_1_EN) { if (s_irq_status & S_GP_IRQ_0_EN) @@ -851,6 +873,23 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport) { struct qcom_geni_serial_port *port = to_dev_port(uport, uport); unsigned int rxstale = DEFAULT_BITS_PER_CHAR * STALE_TIMEOUT; + u32 proto; + + if (uart_console(uport)) + port->tx_bytes_pw = 1; + else + port->tx_bytes_pw = 4; + port->rx_bytes_pw = RX_BYTES_PW; + + proto = geni_se_read_proto(&port->se); + if (proto != GENI_SE_UART) { + dev_err(uport->dev, "Invalid FW loaded, proto: %d\n", proto); + return -ENXIO; + } + + qcom_geni_serial_stop_rx(uport); + + get_tx_fifo_size(port); set_rfr_wm(port); writel_relaxed(rxstale, uport->membase + SE_UART_RX_STALE_CNT); @@ -874,30 +913,19 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport) return -ENOMEM; } port->setup = true; + return 0; } static int qcom_geni_serial_startup(struct uart_port *uport) { int ret; - u32 proto; struct qcom_geni_serial_port *port = to_dev_port(uport, uport); scnprintf(port->name, sizeof(port->name), "qcom_serial_%s%d", (uart_console(uport) ? "console" : "uart"), uport->line); - if (!uart_console(uport)) { - port->tx_bytes_pw = 4; - port->rx_bytes_pw = RX_BYTES_PW; - } - proto = geni_se_read_proto(&port->se); - if (proto != GENI_SE_UART) { - dev_err(uport->dev, "Invalid FW loaded, proto: %d\n", proto); - return -ENXIO; - } - - get_tx_fifo_size(port); if (!port->setup) { ret = qcom_geni_serial_port_setup(uport); if (ret) @@ -1052,10 +1080,11 @@ static int __init qcom_geni_console_setup(struct console *co, char *options) { struct uart_port *uport; struct qcom_geni_serial_port *port; - int baud; + int baud = 9600; int bits = 8; int parity = 'n'; int flow = 'n'; + int ret; if (co->index >= GENI_UART_CONS_PORTS || co->index < 0) return -ENXIO; @@ -1071,21 +1100,10 @@ static int __init qcom_geni_console_setup(struct console *co, char *options) if (unlikely(!uport->membase)) return -ENXIO; - if (geni_se_resources_on(&port->se)) { - dev_err(port->se.dev, "Error turning on resources\n"); - return -ENXIO; - } - - if (unlikely(geni_se_read_proto(&port->se) != GENI_SE_UART)) { - geni_se_resources_off(&port->se); - return -ENXIO; - } - if (!port->setup) { - port->tx_bytes_pw = 1; - port->rx_bytes_pw = RX_BYTES_PW; - qcom_geni_serial_stop_rx(uport); - qcom_geni_serial_port_setup(uport); + ret = qcom_geni_serial_port_setup(uport); + if (ret) + return ret; } if (options) @@ -1203,11 +1221,12 @@ static void qcom_geni_serial_pm(struct uart_port *uport, { struct qcom_geni_serial_port *port = to_dev_port(uport, uport); + /* If we've never been called, treat it as off */ + if (old_state == UART_PM_STATE_UNDEFINED) + old_state = UART_PM_STATE_OFF; + if (new_state == UART_PM_STATE_ON && old_state == UART_PM_STATE_OFF) geni_se_resources_on(&port->se); - else if (!uart_console(uport) && (new_state == UART_PM_STATE_ON && - old_state == UART_PM_STATE_UNDEFINED)) - geni_se_resources_on(&port->se); else if (new_state == UART_PM_STATE_OFF && old_state == UART_PM_STATE_ON) geni_se_resources_off(&port->se); diff --git a/drivers/tty/serial/rp2.c b/drivers/tty/serial/rp2.c index 5690c09cc0417f3a34d005b804b08e6808ff1d28..944a4c01057952168a7433dfe6b33076644eb1e2 100644 --- a/drivers/tty/serial/rp2.c +++ b/drivers/tty/serial/rp2.c @@ -195,7 +195,6 @@ struct rp2_card { void __iomem *bar0; void __iomem *bar1; spinlock_t card_lock; - struct completion fw_loaded; }; #define RP_ID(prod) PCI_VDEVICE(RP, (prod)) @@ -664,17 +663,10 @@ static void rp2_remove_ports(struct rp2_card *card) card->initialized_ports = 0; } -static void rp2_fw_cb(const struct firmware *fw, void *context) +static int rp2_load_firmware(struct rp2_card *card, const struct firmware *fw) { - struct rp2_card *card = context; resource_size_t phys_base; - int i, rc = -ENOENT; - - if (!fw) { - dev_err(&card->pdev->dev, "cannot find '%s' firmware image\n", - RP2_FW_NAME); - goto no_fw; - } + int i, rc = 0; phys_base = pci_resource_start(card->pdev, 1); @@ -720,23 +712,13 @@ static void rp2_fw_cb(const struct firmware *fw, void *context) card->initialized_ports++; } - release_firmware(fw); -no_fw: - /* - * rp2_fw_cb() is called from a workqueue long after rp2_probe() - * has already returned success. So if something failed here, - * we'll just leave the now-dormant device in place until somebody - * unbinds it. - */ - if (rc) - dev_warn(&card->pdev->dev, "driver initialization failed\n"); - - complete(&card->fw_loaded); + return rc; } static int rp2_probe(struct pci_dev *pdev, const struct pci_device_id *id) { + const struct firmware *fw; struct rp2_card *card; struct rp2_uart_port *ports; void __iomem * const *bars; @@ -747,7 +729,6 @@ static int rp2_probe(struct pci_dev *pdev, return -ENOMEM; pci_set_drvdata(pdev, card); spin_lock_init(&card->card_lock); - init_completion(&card->fw_loaded); rc = pcim_enable_device(pdev); if (rc) @@ -780,21 +761,23 @@ static int rp2_probe(struct pci_dev *pdev, return -ENOMEM; card->ports = ports; - rc = devm_request_irq(&pdev->dev, pdev->irq, rp2_uart_interrupt, - IRQF_SHARED, DRV_NAME, card); - if (rc) + rc = request_firmware(&fw, RP2_FW_NAME, &pdev->dev); + if (rc < 0) { + dev_err(&pdev->dev, "cannot find '%s' firmware image\n", + RP2_FW_NAME); return rc; + } - /* - * Only catastrophic errors (e.g. ENOMEM) are reported here. - * If the FW image is missing, we'll find out in rp2_fw_cb() - * and print an error message. - */ - rc = request_firmware_nowait(THIS_MODULE, 1, RP2_FW_NAME, &pdev->dev, - GFP_KERNEL, card, rp2_fw_cb); + rc = rp2_load_firmware(card, fw); + + release_firmware(fw); + if (rc < 0) + return rc; + + rc = devm_request_irq(&pdev->dev, pdev->irq, rp2_uart_interrupt, + IRQF_SHARED, DRV_NAME, card); if (rc) return rc; - dev_dbg(&pdev->dev, "waiting for firmware blob...\n"); return 0; } @@ -803,7 +786,6 @@ static void rp2_remove(struct pci_dev *pdev) { struct rp2_card *card = pci_get_drvdata(pdev); - wait_for_completion(&card->fw_loaded); rp2_remove_ports(card); } diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c index 2f8fa184aafaadbc9d796ec74588a19d3861caab..2a49b6d876b87b0bdf23ce1f85bfb77bd44c0e02 100644 --- a/drivers/tty/serial/samsung.c +++ b/drivers/tty/serial/samsung.c @@ -1365,11 +1365,14 @@ static void s3c24xx_serial_set_termios(struct uart_port *port, wr_regl(port, S3C2410_ULCON, ulcon); wr_regl(port, S3C2410_UBRDIV, quot); + port->status &= ~UPSTAT_AUTOCTS; + umcon = rd_regl(port, S3C2410_UMCON); if (termios->c_cflag & CRTSCTS) { umcon |= S3C2410_UMCOM_AFC; /* Disable RTS when RX FIFO contains 63 bytes */ umcon &= ~S3C2412_UMCON_AFC_8; + port->status = UPSTAT_AUTOCTS; } else { umcon &= ~S3C2410_UMCOM_AFC; } @@ -1941,7 +1944,11 @@ static int s3c24xx_serial_resume(struct device *dev) if (port) { clk_prepare_enable(ourport->clk); + if (!IS_ERR(ourport->baudclk)) + clk_prepare_enable(ourport->baudclk); s3c24xx_serial_resetport(port, s3c24xx_port_to_cfg(port)); + if (!IS_ERR(ourport->baudclk)) + clk_disable_unprepare(ourport->baudclk); clk_disable_unprepare(ourport->clk); uart_resume_port(&s3c24xx_uart_drv, port); @@ -1964,7 +1971,11 @@ static int s3c24xx_serial_resume_noirq(struct device *dev) if (rx_enabled(port)) uintm &= ~S3C64XX_UINTM_RXD_MSK; clk_prepare_enable(ourport->clk); + if (!IS_ERR(ourport->baudclk)) + clk_prepare_enable(ourport->baudclk); wr_regl(port, S3C64XX_UINTM, uintm); + if (!IS_ERR(ourport->baudclk)) + clk_disable_unprepare(ourport->baudclk); clk_disable_unprepare(ourport->clk); } } diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index 243c9602505306c3d7581f7e58a5967b680df838..ebea4a9d8e694bddf7789a3ee751925b6c30d973 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -328,6 +328,7 @@ struct sc16is7xx_port { struct kthread_worker kworker; struct task_struct *kworker_task; struct kthread_work irq_work; + struct mutex efr_lock; struct sc16is7xx_one p[0]; }; @@ -499,6 +500,21 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud) div /= 4; } + /* In an amazing feat of design, the Enhanced Features Register shares + * the address of the Interrupt Identification Register, and is + * switched in by writing a magic value (0xbf) to the Line Control + * Register. Any interrupt firing during this time will see the EFR + * where it expects the IIR to be, leading to "Unexpected interrupt" + * messages. + * + * Prevent this possibility by claiming a mutex while accessing the + * EFR, and claiming the same mutex from within the interrupt handler. + * This is similar to disabling the interrupt, but that doesn't work + * because the bulk of the interrupt processing is run as a workqueue + * job in thread context. + */ + mutex_lock(&s->efr_lock); + lcr = sc16is7xx_port_read(port, SC16IS7XX_LCR_REG); /* Open the LCR divisors for configuration */ @@ -514,6 +530,8 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud) /* Put LCR back to the normal mode */ sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr); + mutex_unlock(&s->efr_lock); + sc16is7xx_port_update(port, SC16IS7XX_MCR_REG, SC16IS7XX_MCR_CLKSEL_BIT, prescaler); @@ -657,7 +675,7 @@ static void sc16is7xx_handle_tx(struct uart_port *port) uart_write_wakeup(port); } -static void sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno) +static bool sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno) { struct uart_port *port = &s->p[portno].port; @@ -666,7 +684,7 @@ static void sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno) iir = sc16is7xx_port_read(port, SC16IS7XX_IIR_REG); if (iir & SC16IS7XX_IIR_NO_INT_BIT) - break; + return false; iir &= SC16IS7XX_IIR_ID_MASK; @@ -688,16 +706,27 @@ static void sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno) port->line, iir); break; } - } while (1); + } while (0); + return true; } static void sc16is7xx_ist(struct kthread_work *ws) { struct sc16is7xx_port *s = to_sc16is7xx_port(ws, irq_work); - int i; - for (i = 0; i < s->devtype->nr_uart; ++i) - sc16is7xx_port_irq(s, i); + mutex_lock(&s->efr_lock); + + while (1) { + bool keep_polling = false; + int i; + + for (i = 0; i < s->devtype->nr_uart; ++i) + keep_polling |= sc16is7xx_port_irq(s, i); + if (!keep_polling) + break; + } + + mutex_unlock(&s->efr_lock); } static irqreturn_t sc16is7xx_irq(int irq, void *dev_id) @@ -892,6 +921,9 @@ static void sc16is7xx_set_termios(struct uart_port *port, if (!(termios->c_cflag & CREAD)) port->ignore_status_mask |= SC16IS7XX_LSR_BRK_ERROR_MASK; + /* As above, claim the mutex while accessing the EFR. */ + mutex_lock(&s->efr_lock); + sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, SC16IS7XX_LCR_CONF_MODE_B); @@ -913,6 +945,8 @@ static void sc16is7xx_set_termios(struct uart_port *port, /* Update LCR register */ sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr); + mutex_unlock(&s->efr_lock); + /* Get baud rate generator configuration */ baud = uart_get_baud_rate(port, termios, old, port->uartclk / 16 / 4 / 0xffff, @@ -1178,6 +1212,7 @@ static int sc16is7xx_probe(struct device *dev, s->regmap = regmap; s->devtype = devtype; dev_set_drvdata(dev, s); + mutex_init(&s->efr_lock); kthread_init_worker(&s->kworker); kthread_init_work(&s->irq_work, sc16is7xx_ist); @@ -1474,7 +1509,7 @@ static int __init sc16is7xx_init(void) ret = i2c_add_driver(&sc16is7xx_i2c_uart_driver); if (ret < 0) { pr_err("failed to init sc16is7xx i2c --> %d\n", ret); - return ret; + goto err_i2c; } #endif @@ -1482,10 +1517,20 @@ static int __init sc16is7xx_init(void) ret = spi_register_driver(&sc16is7xx_spi_uart_driver); if (ret < 0) { pr_err("failed to init sc16is7xx spi --> %d\n", ret); - return ret; + goto err_spi; } #endif return ret; + +#ifdef CONFIG_SERIAL_SC16IS7XX_SPI +err_spi: +#ifdef CONFIG_SERIAL_SC16IS7XX_I2C + i2c_del_driver(&sc16is7xx_i2c_uart_driver); +#endif +#endif +err_i2c: + uart_unregister_driver(&sc16is7xx_uart); + return ret; } module_init(sc16is7xx_init); diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 80bb56facfb684423e415e702e334fb97be152b1..d9d7506a9f6e75abf442f316bf5a7d7be87e4425 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -159,7 +159,7 @@ static void uart_port_dtr_rts(struct uart_port *uport, int raise) int RTS_after_send = !!(uport->rs485.flags & SER_RS485_RTS_AFTER_SEND); if (raise) { - if (rs485_on && !RTS_after_send) { + if (rs485_on && RTS_after_send) { uart_set_mctrl(uport, TIOCM_DTR); uart_clear_mctrl(uport, TIOCM_RTS); } else { @@ -168,7 +168,7 @@ static void uart_port_dtr_rts(struct uart_port *uport, int raise) } else { unsigned int clear = TIOCM_DTR; - clear |= (!rs485_on || !RTS_after_send) ? TIOCM_RTS : 0; + clear |= (!rs485_on || RTS_after_send) ? TIOCM_RTS : 0; uart_clear_mctrl(uport, clear); } } @@ -205,10 +205,15 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state, if (!state->xmit.buf) { state->xmit.buf = (unsigned char *) page; uart_circ_clear(&state->xmit); + uart_port_unlock(uport, flags); } else { + uart_port_unlock(uport, flags); + /* + * Do not free() the page under the port lock, see + * uart_shutdown(). + */ free_page(page); } - uart_port_unlock(uport, flags); retval = uport->ops->startup(uport); if (retval == 0) { @@ -268,6 +273,7 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state) struct uart_port *uport = uart_port_check(state); struct tty_port *port = &state->port; unsigned long flags = 0; + char *xmit_buf = NULL; /* * Set the TTY IO error marker @@ -281,11 +287,13 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state) /* * Turn off DTR and RTS early. */ - if (uport && uart_console(uport) && tty) - uport->cons->cflag = tty->termios.c_cflag; + if (uport) { + if (uart_console(uport) && tty) + uport->cons->cflag = tty->termios.c_cflag; - if (!tty || C_HUPCL(tty)) - uart_port_dtr_rts(uport, 0); + if (!tty || C_HUPCL(tty)) + uart_port_dtr_rts(uport, 0); + } uart_port_shutdown(port); } @@ -298,14 +306,18 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state) tty_port_set_suspended(port, 0); /* - * Free the transmit buffer page. + * Do not free() the transmit buffer page under the port lock since + * this can create various circular locking scenarios. For instance, + * console driver may need to allocate/free a debug object, which + * can endup in printk() recursion. */ uart_port_lock(state, flags); - if (state->xmit.buf) { - free_page((unsigned long)state->xmit.buf); - state->xmit.buf = NULL; - } + xmit_buf = state->xmit.buf; + state->xmit.buf = NULL; uart_port_unlock(uport, flags); + + if (xmit_buf) + free_page((unsigned long)xmit_buf); } /** @@ -540,10 +552,12 @@ static int uart_put_char(struct tty_struct *tty, unsigned char c) int ret = 0; circ = &state->xmit; - if (!circ->buf) + port = uart_port_lock(state, flags); + if (!circ->buf) { + uart_port_unlock(port, flags); return 0; + } - port = uart_port_lock(state, flags); if (port && uart_circ_chars_free(circ) != 0) { circ->buf[circ->head] = c; circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1); @@ -576,11 +590,13 @@ static int uart_write(struct tty_struct *tty, return -EL3HLT; } + port = uart_port_lock(state, flags); circ = &state->xmit; - if (!circ->buf) + if (!circ->buf) { + uart_port_unlock(port, flags); return 0; + } - port = uart_port_lock(state, flags); while (port) { c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE); if (count < c) @@ -836,6 +852,14 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port, new_flags = (__force upf_t)new_info->flags; old_custom_divisor = uport->custom_divisor; + if (!(uport->flags & UPF_FIXED_PORT)) { + unsigned int uartclk = new_info->baud_base * 16; + /* check needs to be done here before other settings made */ + if (uartclk == 0) { + retval = -EINVAL; + goto exit; + } + } if (!capable(CAP_SYS_ADMIN)) { retval = -EPERM; if (change_irq || change_port || @@ -1054,8 +1078,8 @@ static int uart_tiocmget(struct tty_struct *tty) goto out; if (!tty_io_error(tty)) { - result = uport->mctrl; spin_lock_irq(&uport->lock); + result = uport->mctrl; result |= uport->ops->get_mctrl(uport); spin_unlock_irq(&uport->lock); } @@ -1098,7 +1122,7 @@ static int uart_break_ctl(struct tty_struct *tty, int break_state) if (!uport) goto out; - if (uport->type != PORT_UNKNOWN) + if (uport->type != PORT_UNKNOWN && uport->ops->break_ctl) uport->ops->break_ctl(uport, break_state); ret = 0; out: @@ -1407,6 +1431,10 @@ static void uart_set_ldisc(struct tty_struct *tty) { struct uart_state *state = tty->driver_data; struct uart_port *uport; + struct tty_port *port = &state->port; + + if (!tty_port_initialized(port)) + return; mutex_lock(&state->port.mutex); uport = uart_port_check(state); @@ -1502,6 +1530,7 @@ static void uart_tty_port_shutdown(struct tty_port *port) { struct uart_state *state = container_of(port, struct uart_state, port); struct uart_port *uport = uart_port_check(state); + char *buf; /* * At this point, we stop accepting input. To do this, we @@ -1523,8 +1552,18 @@ static void uart_tty_port_shutdown(struct tty_port *port) */ tty_port_set_suspended(port, 0); - uart_change_pm(state, UART_PM_STATE_OFF); + /* + * Free the transmit buffer. + */ + spin_lock_irq(&uport->lock); + buf = state->xmit.buf; + state->xmit.buf = NULL; + spin_unlock_irq(&uport->lock); + + if (buf) + free_page((unsigned long)buf); + uart_change_pm(state, UART_PM_STATE_OFF); } static void uart_wait_until_sent(struct tty_struct *tty, int timeout) @@ -1688,6 +1727,16 @@ static void uart_dtr_rts(struct tty_port *port, int raise) uart_port_deref(uport); } +static int uart_install(struct tty_driver *driver, struct tty_struct *tty) +{ + struct uart_driver *drv = driver->driver_state; + struct uart_state *state = drv->state + tty->index; + + tty->driver_data = state; + + return tty_standard_install(driver, tty); +} + /* * Calls to uart_open are serialised by the tty_lock in * drivers/tty/tty_io.c:tty_open() @@ -1700,11 +1749,8 @@ static void uart_dtr_rts(struct tty_port *port, int raise) */ static int uart_open(struct tty_struct *tty, struct file *filp) { - struct uart_driver *drv = tty->driver->driver_state; - int retval, line = tty->index; - struct uart_state *state = drv->state + line; - - tty->driver_data = state; + struct uart_state *state = tty->driver_data; + int retval; retval = tty_port_open(&state->port, tty, filp); if (retval > 0) @@ -1717,6 +1763,7 @@ static int uart_port_activate(struct tty_port *port, struct tty_struct *tty) { struct uart_state *state = container_of(port, struct uart_state, port); struct uart_port *uport; + int ret; uport = uart_port_check(state); if (!uport || uport->flags & UPF_DEAD) @@ -1727,7 +1774,11 @@ static int uart_port_activate(struct tty_port *port, struct tty_struct *tty) /* * Start up the serial port. */ - return uart_startup(tty, state, 0); + ret = uart_startup(tty, state, 0); + if (ret > 0) + tty_port_set_active(port, 1); + + return ret; } static const char *uart_type(struct uart_port *port) @@ -2389,6 +2440,7 @@ static void uart_poll_put_char(struct tty_driver *driver, int line, char ch) #endif static const struct tty_operations uart_ops = { + .install = uart_install, .open = uart_open, .close = uart_close, .write = uart_write, @@ -2759,6 +2811,7 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport) if (uport->cons && uport->dev) of_console_check(uport->dev->of_node, uport->cons->name, uport->line); + tty_port_link_device(port, drv->tty_driver, uport->line); uart_configure_port(drv, state, uport); port->console = uart_console(uport); diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c index 1c06325beacaeb3a6e19011385fcc379ef16ce8e..af0412a784d2786ebc5adb5e336c82858258973c 100644 --- a/drivers/tty/serial/serial_mctrl_gpio.c +++ b/drivers/tty/serial/serial_mctrl_gpio.c @@ -12,6 +12,7 @@ #include #include #include +#include #include "serial_mctrl_gpio.h" @@ -59,6 +60,9 @@ EXPORT_SYMBOL_GPL(mctrl_gpio_set); struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios, enum mctrl_gpio_idx gidx) { + if (gpios == NULL) + return NULL; + return gpios->gpio[gidx]; } EXPORT_SYMBOL_GPL(mctrl_gpio_to_gpiod); @@ -115,6 +119,19 @@ struct mctrl_gpios *mctrl_gpio_init_noauto(struct device *dev, unsigned int idx) for (i = 0; i < UART_GPIO_MAX; i++) { enum gpiod_flags flags; + char *gpio_str; + bool present; + + /* Check if GPIO property exists and continue if not */ + gpio_str = kasprintf(GFP_KERNEL, "%s-gpios", + mctrl_gpios_desc[i].name); + if (!gpio_str) + continue; + + present = device_property_present(dev, gpio_str); + kfree(gpio_str); + if (!present) + continue; if (mctrl_gpios_desc[i].dir_out) flags = GPIOD_OUT_LOW; diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index ab3f6e91853da3c269cdb6c22c82226195778ece..9e1a6af23ca2b1257ebcaab83cb80a2c7935ded3 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -838,19 +838,9 @@ static void sci_transmit_chars(struct uart_port *port) if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); - if (uart_circ_empty(xmit)) { + if (uart_circ_empty(xmit)) sci_stop_tx(port); - } else { - ctrl = serial_port_in(port, SCSCR); - if (port->type != PORT_SCI) { - serial_port_in(port, SCxSR); /* Dummy read */ - sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port)); - } - - ctrl |= SCSCR_TIE; - serial_port_out(port, SCSCR, ctrl); - } } /* On SH3, SCIF may read end-of-break as a space->mark char */ @@ -1331,7 +1321,7 @@ static void sci_tx_dma_release(struct sci_port *s) dma_release_channel(chan); } -static void sci_submit_rx(struct sci_port *s) +static int sci_submit_rx(struct sci_port *s, bool port_lock_held) { struct dma_chan *chan = s->chan_rx; struct uart_port *port = &s->port; @@ -1359,19 +1349,22 @@ static void sci_submit_rx(struct sci_port *s) s->active_rx = s->cookie_rx[0]; dma_async_issue_pending(chan); - return; + return 0; fail: + /* Switch to PIO */ + if (!port_lock_held) + spin_lock_irqsave(&port->lock, flags); if (i) dmaengine_terminate_async(chan); for (i = 0; i < 2; i++) s->cookie_rx[i] = -EINVAL; - s->active_rx = -EINVAL; - /* Switch to PIO */ - spin_lock_irqsave(&port->lock, flags); + s->active_rx = 0; s->chan_rx = NULL; sci_start_rx(port); - spin_unlock_irqrestore(&port->lock, flags); + if (!port_lock_held) + spin_unlock_irqrestore(&port->lock, flags); + return -EAGAIN; } static void work_fn_tx(struct work_struct *work) @@ -1383,6 +1376,7 @@ static void work_fn_tx(struct work_struct *work) struct circ_buf *xmit = &port->state->xmit; unsigned long flags; dma_addr_t buf; + int head, tail; /* * DMA is idle now. @@ -1392,16 +1386,23 @@ static void work_fn_tx(struct work_struct *work) * consistent xmit buffer state. */ spin_lock_irq(&port->lock); - buf = s->tx_dma_addr + (xmit->tail & (UART_XMIT_SIZE - 1)); + head = xmit->head; + tail = xmit->tail; + buf = s->tx_dma_addr + (tail & (UART_XMIT_SIZE - 1)); s->tx_dma_len = min_t(unsigned int, - CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE), - CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE)); - spin_unlock_irq(&port->lock); + CIRC_CNT(head, tail, UART_XMIT_SIZE), + CIRC_CNT_TO_END(head, tail, UART_XMIT_SIZE)); + if (!s->tx_dma_len) { + /* Transmit buffer has been flushed */ + spin_unlock_irq(&port->lock); + return; + } desc = dmaengine_prep_slave_single(chan, buf, s->tx_dma_len, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc) { + spin_unlock_irq(&port->lock); dev_warn(port->dev, "Failed preparing Tx DMA descriptor\n"); goto switch_to_pio; } @@ -1409,18 +1410,18 @@ static void work_fn_tx(struct work_struct *work) dma_sync_single_for_device(chan->device->dev, buf, s->tx_dma_len, DMA_TO_DEVICE); - spin_lock_irq(&port->lock); desc->callback = sci_dma_tx_complete; desc->callback_param = s; - spin_unlock_irq(&port->lock); s->cookie_tx = dmaengine_submit(desc); if (dma_submit_error(s->cookie_tx)) { + spin_unlock_irq(&port->lock); dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n"); goto switch_to_pio; } + spin_unlock_irq(&port->lock); dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n", - __func__, xmit->buf, xmit->tail, xmit->head, s->cookie_tx); + __func__, xmit->buf, tail, head, s->cookie_tx); dma_async_issue_pending(chan); return; @@ -1491,7 +1492,7 @@ static enum hrtimer_restart rx_timer_fn(struct hrtimer *t) } if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) - sci_submit_rx(s); + sci_submit_rx(s, true); /* Direct new serial port interrupts back to CPU */ scr = serial_port_in(port, SCSCR); @@ -1549,6 +1550,13 @@ static void sci_request_dma(struct uart_port *port) dev_dbg(port->dev, "%s: port %d\n", __func__, port->line); + /* + * DMA on console may interfere with Kernel log messages which use + * plain putchar(). So, simply don't use it with a console. + */ + if (uart_console(port)) + return; + if (!port->dev->of_node) return; @@ -1614,10 +1622,10 @@ static void sci_request_dma(struct uart_port *port) hrtimer_init(&s->rx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); s->rx_timer.function = rx_timer_fn; - if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) - sci_submit_rx(s); - s->chan_rx_saved = s->chan_rx = chan; + + if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) + sci_submit_rx(s, false); } } @@ -1633,11 +1641,18 @@ static void sci_free_dma(struct uart_port *port) static void sci_flush_buffer(struct uart_port *port) { + struct sci_port *s = to_sci_port(port); + /* * In uart_flush_buffer(), the xmit circular buffer has just been - * cleared, so we have to reset tx_dma_len accordingly. + * cleared, so we have to reset tx_dma_len accordingly, and stop any + * pending transfers */ - to_sci_port(port)->tx_dma_len = 0; + s->tx_dma_len = 0; + if (s->chan_tx) { + dmaengine_terminate_async(s->chan_tx); + s->cookie_tx = -EINVAL; + } } #else /* !CONFIG_SERIAL_SH_SCI_DMA */ static inline void sci_request_dma(struct uart_port *port) @@ -1666,8 +1681,10 @@ static irqreturn_t sci_rx_interrupt(int irq, void *ptr) disable_irq_nosync(irq); scr |= SCSCR_RDRQE; } else { + if (sci_submit_rx(s, false) < 0) + goto handle_pio; + scr &= ~SCSCR_RIE; - sci_submit_rx(s); } serial_port_out(port, SCSCR, scr); /* Clear current interrupt */ @@ -1679,6 +1696,8 @@ static irqreturn_t sci_rx_interrupt(int irq, void *ptr) return IRQ_HANDLED; } + +handle_pio: #endif if (s->rx_trigger > 1 && s->rx_fifo_timeout > 0) { @@ -1914,7 +1933,7 @@ static int sci_request_irq(struct sci_port *port) static void sci_free_irq(struct sci_port *port) { - int i; + int i, j; /* * Intentionally in reverse order so we iterate over the muxed @@ -1930,6 +1949,13 @@ static void sci_free_irq(struct sci_port *port) if (unlikely(irq < 0)) continue; + /* Check if already freed (irq was muxed) */ + for (j = 0; j < i; j++) + if (port->irqs[j] == irq) + j = i + 1; + if (j > i) + continue; + free_irq(port->irqs[i], port); kfree(port->irqstr[i]); @@ -2493,14 +2519,16 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios, * center of the last stop bit in sampling clocks. */ int last_stop = bits * 2 - 1; - int deviation = min_err * srr * last_stop / 2 / baud; + int deviation = DIV_ROUND_CLOSEST(min_err * last_stop * + (int)(srr + 1), + 2 * (int)baud); if (abs(deviation) >= 2) { /* At least two sampling clocks off at the * last stop bit; we can increase the error * margin by shifting the sampling point. */ - int shift = min(-8, max(7, deviation / 2)); + int shift = clamp(deviation / 2, -8, 7); hssrr |= (shift << HSCIF_SRHP_SHIFT) & HSCIF_SRHP_MASK; @@ -3102,6 +3130,7 @@ static struct uart_driver sci_uart_driver = { static int sci_remove(struct platform_device *dev) { struct sci_port *port = platform_get_drvdata(dev); + unsigned int type = port->port.type; /* uart_remove_... clears it */ sci_ports_in_use &= ~BIT(port->port.line); uart_remove_one_port(&sci_uart_driver, &port->port); @@ -3112,8 +3141,7 @@ static int sci_remove(struct platform_device *dev) sysfs_remove_file(&dev->dev.kobj, &dev_attr_rx_fifo_trigger.attr); } - if (port->port.type == PORT_SCIFA || port->port.type == PORT_SCIFB || - port->port.type == PORT_HSCIF) { + if (type == PORT_SCIFA || type == PORT_SCIFB || type == PORT_HSCIF) { sysfs_remove_file(&dev->dev.kobj, &dev_attr_rx_fifo_timeout.attr); } diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c index 828f1143859c8010f9916fada4958e229ce4e555..2774af86763e05c8ff217f82334e6e20548b31e0 100644 --- a/drivers/tty/serial/sprd_serial.c +++ b/drivers/tty/serial/sprd_serial.c @@ -232,7 +232,7 @@ static inline void sprd_rx(struct uart_port *port) if (lsr & (SPRD_LSR_BI | SPRD_LSR_PE | SPRD_LSR_FE | SPRD_LSR_OE)) - if (handle_lsr_errors(port, &lsr, &flag)) + if (handle_lsr_errors(port, &flag, &lsr)) continue; if (uart_handle_sysrq_char(port, ch)) continue; diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c index e8d7a7bb4339e4ef40fc22cb433588c54dbd43c0..f761e1038c3466eeba7ffd047782070bff3d33bc 100644 --- a/drivers/tty/serial/stm32-usart.c +++ b/drivers/tty/serial/stm32-usart.c @@ -506,7 +506,7 @@ static void stm32_start_tx(struct uart_port *port) { struct circ_buf *xmit = &port->state->xmit; - if (uart_circ_empty(xmit)) + if (uart_circ_empty(xmit) && !port->x_char) return; stm32_transmit_chars(port); diff --git a/drivers/tty/serial/suncore.c b/drivers/tty/serial/suncore.c index 70a4ea4eaa6e72b1191c27d66111c4ddb11d46f5..990376576970ae3607a5b645407050a4dfab82af 100644 --- a/drivers/tty/serial/suncore.c +++ b/drivers/tty/serial/suncore.c @@ -112,6 +112,7 @@ void sunserial_console_termios(struct console *con, struct device_node *uart_dp) mode = of_get_property(dp, mode_prop, NULL); if (!mode) mode = "9600,8,n,1,-"; + of_node_put(dp); } cflag = CREAD | HUPCL | CLOCAL; diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c index 63e34d868de8f32973b163267b0f45777bdfaaf2..f8503f8fc44e20edecfb16eb74459daa16e793ac 100644 --- a/drivers/tty/serial/sunhv.c +++ b/drivers/tty/serial/sunhv.c @@ -397,7 +397,7 @@ static const struct uart_ops sunhv_pops = { static struct uart_driver sunhv_reg = { .owner = THIS_MODULE, .driver_name = "sunhv", - .dev_name = "ttyS", + .dev_name = "ttyHV", .major = TTY_MAJOR, }; diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c index 6cf3e9b0728f83d0c7b7adae94bcf45fea694066..3e77475668c02d25f88feaa9e95eb7e789ef8a46 100644 --- a/drivers/tty/serial/sunsu.c +++ b/drivers/tty/serial/sunsu.c @@ -1394,22 +1394,43 @@ static inline struct console *SUNSU_CONSOLE(void) static enum su_type su_get_type(struct device_node *dp) { struct device_node *ap = of_find_node_by_path("/aliases"); + enum su_type rc = SU_PORT_PORT; if (ap) { const char *keyb = of_get_property(ap, "keyboard", NULL); const char *ms = of_get_property(ap, "mouse", NULL); + struct device_node *match; if (keyb) { - if (dp == of_find_node_by_path(keyb)) - return SU_PORT_KBD; + match = of_find_node_by_path(keyb); + + /* + * The pointer is used as an identifier not + * as a pointer, we can drop the refcount on + * the of__node immediately after getting it. + */ + of_node_put(match); + + if (dp == match) { + rc = SU_PORT_KBD; + goto out; + } } if (ms) { - if (dp == of_find_node_by_path(ms)) - return SU_PORT_MS; + match = of_find_node_by_path(ms); + + of_node_put(match); + + if (dp == match) { + rc = SU_PORT_MS; + goto out; + } } } - return SU_PORT_PORT; +out: + of_node_put(ap); + return rc; } static int su_probe(struct platform_device *op) diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c index 98d3eadd2fd0380e467b90f3340020ca1f3ccdd6..5d1b7455e627d45dbc3a4029828d25b6253eb7db 100644 --- a/drivers/tty/serial/uartlite.c +++ b/drivers/tty/serial/uartlite.c @@ -618,7 +618,7 @@ static struct uart_driver ulite_uart_driver = { * * Returns: 0 on success, <0 otherwise */ -static int ulite_assign(struct device *dev, int id, u32 base, int irq, +static int ulite_assign(struct device *dev, int id, phys_addr_t base, int irq, struct uartlite_data *pdata) { struct uart_port *port; @@ -837,7 +837,8 @@ static int __init ulite_init(void) static void __exit ulite_exit(void) { platform_driver_unregister(&ulite_platform_driver); - uart_unregister_driver(&ulite_uart_driver); + if (ulite_uart_driver.state) + uart_unregister_driver(&ulite_uart_driver); } module_init(ulite_init); diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index a48f19b1b88f1d48c5c6ae43389eb1e0ef79b9b7..66d49d511885354cdb927501fd275054c4080e22 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -125,7 +125,7 @@ MODULE_PARM_DESC(rx_timeout, "Rx timeout, 1-255"); #define CDNS_UART_IXR_RXTRIG 0x00000001 /* RX FIFO trigger interrupt */ #define CDNS_UART_IXR_RXFULL 0x00000004 /* RX FIFO full interrupt. */ #define CDNS_UART_IXR_RXEMPTY 0x00000002 /* RX FIFO empty interrupt. */ -#define CDNS_UART_IXR_MASK 0x00001FFF /* Valid bit mask */ +#define CDNS_UART_IXR_RXMASK 0x000021e7 /* Valid RX bit mask */ /* * Do not enable parity error interrupt for the following @@ -362,7 +362,13 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id) cdns_uart_handle_tx(dev_id); isrstatus &= ~CDNS_UART_IXR_TXEMPTY; } - if (isrstatus & CDNS_UART_IXR_MASK) + + /* + * Skip RX processing if RX is disabled as RXEMPTY will never be set + * as read bytes will not be removed from the FIFO. + */ + if (isrstatus & CDNS_UART_IXR_RXMASK && + !(readl(port->membase + CDNS_UART_CR) & CDNS_UART_CR_RX_DIS)) cdns_uart_handle_rx(dev_id, isrstatus); spin_unlock(&port->lock); @@ -1217,7 +1223,7 @@ static void cdns_uart_console_write(struct console *co, const char *s, * * Return: 0 on success, negative errno otherwise. */ -static int __init cdns_uart_console_setup(struct console *co, char *options) +static int cdns_uart_console_setup(struct console *co, char *options) { struct uart_port *port = console_port; @@ -1273,24 +1279,11 @@ static struct uart_driver cdns_uart_uart_driver = { static int cdns_uart_suspend(struct device *device) { struct uart_port *port = dev_get_drvdata(device); - struct tty_struct *tty; - struct device *tty_dev; - int may_wake = 0; - - /* Get the tty which could be NULL so don't assume it's valid */ - tty = tty_port_tty_get(&port->state->port); - if (tty) { - tty_dev = tty->dev; - may_wake = device_may_wakeup(tty_dev); - tty_kref_put(tty); - } + int may_wake; - /* - * Call the API provided in serial_core.c file which handles - * the suspend. - */ - uart_suspend_port(&cdns_uart_uart_driver, port); - if (!(console_suspend_enabled && !may_wake)) { + may_wake = device_may_wakeup(device); + + if (console_suspend_enabled && may_wake) { unsigned long flags = 0; spin_lock_irqsave(&port->lock, flags); @@ -1305,7 +1298,11 @@ static int cdns_uart_suspend(struct device *device) spin_unlock_irqrestore(&port->lock, flags); } - return 0; + /* + * Call the API provided in serial_core.c file which handles + * the suspend. + */ + return uart_suspend_port(&cdns_uart_uart_driver, port); } /** @@ -1319,17 +1316,9 @@ static int cdns_uart_resume(struct device *device) struct uart_port *port = dev_get_drvdata(device); unsigned long flags = 0; u32 ctrl_reg; - struct tty_struct *tty; - struct device *tty_dev; - int may_wake = 0; - - /* Get the tty which could be NULL so don't assume it's valid */ - tty = tty_port_tty_get(&port->state->port); - if (tty) { - tty_dev = tty->dev; - may_wake = device_may_wakeup(tty_dev); - tty_kref_put(tty); - } + int may_wake; + + may_wake = device_may_wakeup(device); if (console_suspend_enabled && !may_wake) { struct cdns_uart *cdns_uart = port->private_data; @@ -1608,6 +1597,7 @@ static struct platform_driver cdns_uart_platform_driver = { .name = CDNS_UART_NAME, .of_match_table = cdns_uart_of_match, .pm = &cdns_uart_dev_pm_ops, + .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_XILINX_PS_UART), }, }; diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c index a94086597ebd682a46f537b1c62b2f3b2a8b17e9..b88ecf102764e43e532a461f148ad97c282bea78 100644 --- a/drivers/tty/synclink_gt.c +++ b/drivers/tty/synclink_gt.c @@ -1186,14 +1186,13 @@ static long slgt_compat_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct slgt_info *info = tty->driver_data; - int rc = -ENOIOCTLCMD; + int rc; if (sanity_check(info, tty->name, "compat_ioctl")) return -ENODEV; DBGINFO(("%s compat_ioctl() cmd=%08X\n", info->device_name, cmd)); switch (cmd) { - case MGSL_IOCSPARAMS32: rc = set_params32(info, compat_ptr(arg)); break; @@ -1213,18 +1212,11 @@ static long slgt_compat_ioctl(struct tty_struct *tty, case MGSL_IOCWAITGPIO: case MGSL_IOCGXSYNC: case MGSL_IOCGXCTRL: - case MGSL_IOCSTXIDLE: - case MGSL_IOCTXENABLE: - case MGSL_IOCRXENABLE: - case MGSL_IOCTXABORT: - case TIOCMIWAIT: - case MGSL_IOCSIF: - case MGSL_IOCSXSYNC: - case MGSL_IOCSXCTRL: - rc = ioctl(tty, cmd, arg); + rc = ioctl(tty, cmd, (unsigned long)compat_ptr(arg)); break; + default: + rc = ioctl(tty, cmd, arg); } - DBGINFO(("%s compat_ioctl() cmd=%08X rc=%d\n", info->device_name, cmd, rc)); return rc; } diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index 06ed20dd01ba6f9cc443c690f04c131f09790cba..72a8c70094aba3cb3e09def04ad844da5a52377b 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -348,7 +348,7 @@ static void send_sig_all(int sig) if (is_global_init(p)) continue; - do_send_sig_info(sig, SEND_SIG_FORCED, p, PIDTYPE_MAX); + do_send_sig_info(sig, SEND_SIG_PRIV, p, PIDTYPE_MAX); } read_unlock(&tasklist_lock); } @@ -546,7 +546,6 @@ void __handle_sysrq(int key, bool check_mask) */ orig_log_level = console_loglevel; console_loglevel = CONSOLE_LOGLEVEL_DEFAULT; - pr_info("SysRq : "); op_p = __sysrq_get_key_op(key); if (op_p) { @@ -555,14 +554,15 @@ void __handle_sysrq(int key, bool check_mask) * should not) and is the invoked operation enabled? */ if (!check_mask || sysrq_on_mask(op_p->enable_mask)) { - pr_cont("%s\n", op_p->action_msg); + pr_info("%s\n", op_p->action_msg); console_loglevel = orig_log_level; op_p->handler(key); } else { - pr_cont("This sysrq operation is disabled.\n"); + pr_info("This sysrq operation is disabled.\n"); + console_loglevel = orig_log_level; } } else { - pr_cont("HELP : "); + pr_info("HELP : "); /* Only print the help msg once per handler */ for (i = 0; i < ARRAY_SIZE(sysrq_key_table); i++) { if (sysrq_key_table[i]) { @@ -1091,6 +1091,9 @@ int unregister_sysrq_key(int key, struct sysrq_key_op *op_p) EXPORT_SYMBOL(unregister_sysrq_key); #ifdef CONFIG_PROC_FS + +static DEFINE_MUTEX(sysrq_mutex); + /* * writing 'C' to /proc/sysrq-trigger is like sysrq-C */ @@ -1102,7 +1105,10 @@ static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf, if (get_user(c, buf)) return -EFAULT; + + mutex_lock(&sysrq_mutex); __handle_sysrq(c, false); + mutex_unlock(&sysrq_mutex); } return count; diff --git a/drivers/tty/tty_baudrate.c b/drivers/tty/tty_baudrate.c index 7576ceace57151a21007847f14dcf091005f09bb..f438eaa682463bffe42c27d923fe8426adef0926 100644 --- a/drivers/tty/tty_baudrate.c +++ b/drivers/tty/tty_baudrate.c @@ -77,7 +77,7 @@ speed_t tty_termios_baud_rate(struct ktermios *termios) else cbaud += 15; } - return baud_table[cbaud]; + return cbaud >= n_baud_table ? 0 : baud_table[cbaud]; } EXPORT_SYMBOL(tty_termios_baud_rate); @@ -113,7 +113,7 @@ speed_t tty_termios_input_baud_rate(struct ktermios *termios) else cbaud += 15; } - return baud_table[cbaud]; + return cbaud >= n_baud_table ? 0 : baud_table[cbaud]; #else /* IBSHIFT */ return tty_termios_baud_rate(termios); #endif /* IBSHIFT */ diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c index c996b6859c5e70c72827f28c33c46bd3d96a1cda..93f9f9dc09fc418154e2c7377325202f1a771299 100644 --- a/drivers/tty/tty_buffer.c +++ b/drivers/tty/tty_buffer.c @@ -26,7 +26,7 @@ * Byte threshold to limit memory consumption for flip buffers. * The actual memory limit is > 2x this amount. */ -#define TTYB_DEFAULT_MEM_LIMIT 65536 +#define TTYB_DEFAULT_MEM_LIMIT (640 * 1024UL) /* * We default to dicing tty buffer allocations to this many characters @@ -167,7 +167,9 @@ static struct tty_buffer *tty_buffer_alloc(struct tty_port *port, size_t size) have queued and recycle that ? */ if (atomic_read(&port->buf.mem_used) > port->buf.mem_limit) return NULL; + printk_safe_enter(); p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC); + printk_safe_exit(); if (p == NULL) return NULL; @@ -388,27 +390,6 @@ int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag) } EXPORT_SYMBOL(__tty_insert_flip_char); -/** - * tty_schedule_flip - push characters to ldisc - * @port: tty port to push from - * - * Takes any pending buffers and transfers their ownership to the - * ldisc side of the queue. It then schedules those characters for - * processing by the line discipline. - */ - -void tty_schedule_flip(struct tty_port *port) -{ - struct tty_bufhead *buf = &port->buf; - - /* paired w/ acquire in flush_to_ldisc(); ensures - * flush_to_ldisc() sees buffer data. - */ - smp_store_release(&buf->tail->commit, buf->tail->used); - queue_work(system_unbound_wq, &buf->work); -} -EXPORT_SYMBOL(tty_schedule_flip); - /** * tty_prepare_flip_string - make room for characters * @port: tty port @@ -468,11 +449,15 @@ receive_buf(struct tty_port *port, struct tty_buffer *head, int count) { unsigned char *p = char_buf_ptr(head, head->read); char *f = NULL; + int n; if (~head->flags & TTYB_NORMAL) f = flag_buf_ptr(head, head->read); - return port->client_ops->receive_buf(port, p, f, count); + n = port->client_ops->receive_buf(port, p, f, count); + if (n > 0) + memset(p, 0, n); + return n; } /** @@ -525,12 +510,24 @@ static void flush_to_ldisc(struct work_struct *work) if (!count) break; head->read += count; + + if (need_resched()) + cond_resched(); } mutex_unlock(&buf->lock); } +static inline void tty_flip_buffer_commit(struct tty_buffer *tail) +{ + /* + * Paired w/ acquire in flush_to_ldisc(); ensures flush_to_ldisc() sees + * buffer data. + */ + smp_store_release(&tail->commit, tail->used); +} + /** * tty_flip_buffer_push - terminal * @port: tty port to push @@ -544,10 +541,44 @@ static void flush_to_ldisc(struct work_struct *work) void tty_flip_buffer_push(struct tty_port *port) { - tty_schedule_flip(port); + struct tty_bufhead *buf = &port->buf; + + tty_flip_buffer_commit(buf->tail); + queue_work(system_unbound_wq, &buf->work); } EXPORT_SYMBOL(tty_flip_buffer_push); +/** + * tty_insert_flip_string_and_push_buffer - add characters to the tty buffer and + * push + * @port: tty port + * @chars: characters + * @size: size + * + * The function combines tty_insert_flip_string() and tty_flip_buffer_push() + * with the exception of properly holding the @port->lock. + * + * To be used only internally (by pty currently). + * + * Returns: the number added. + */ +int tty_insert_flip_string_and_push_buffer(struct tty_port *port, + const unsigned char *chars, size_t size) +{ + struct tty_bufhead *buf = &port->buf; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + size = tty_insert_flip_string(port, chars, size); + if (size) + tty_flip_buffer_commit(buf->tail); + spin_unlock_irqrestore(&port->lock, flags); + + queue_work(system_unbound_wq, &buf->work); + + return size; +} + /** * tty_buffer_init - prepare a tty buffer structure * @tty: tty to initialise diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 5e5da9acaf0a9f2522b0b1659968499d91a97e4a..58c3f7b92b0d389d74be2d6111162b0075572953 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -408,7 +408,7 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line) mutex_lock(&tty_mutex); /* Search through the tty devices to look for a match */ list_for_each_entry(p, &tty_drivers, tty_drivers) { - if (strncmp(name, p->name, len) != 0) + if (!len || strncmp(name, p->name, len) != 0) continue; stp = str; if (*stp == ',') @@ -512,6 +512,8 @@ static const struct file_operations hung_up_tty_fops = { static DEFINE_SPINLOCK(redirect_lock); static struct file *redirect; +extern void tty_sysctl_init(void); + /** * tty_wakeup - request more data * @tty: terminal @@ -873,13 +875,13 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count, return i; } -static void tty_write_unlock(struct tty_struct *tty) +void tty_write_unlock(struct tty_struct *tty) { mutex_unlock(&tty->atomic_write_lock); wake_up_interruptible_poll(&tty->write_wait, EPOLLOUT); } -static int tty_write_lock(struct tty_struct *tty, int ndelay) +int tty_write_lock(struct tty_struct *tty, int ndelay) { if (!mutex_trylock(&tty->atomic_write_lock)) { if (ndelay) @@ -1153,14 +1155,16 @@ static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver, { struct tty_struct *tty; - if (driver->ops->lookup) + if (driver->ops->lookup) { if (!file) tty = ERR_PTR(-EIO); else tty = driver->ops->lookup(driver, file, idx); - else + } else { + if (idx >= driver->num) + return ERR_PTR(-EINVAL); tty = driver->ttys[idx]; - + } if (!IS_ERR(tty)) tty_kref_get(tty); return tty; @@ -1255,7 +1259,8 @@ static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct * static int tty_reopen(struct tty_struct *tty) { struct tty_driver *driver = tty->driver; - int retval; + struct tty_ldisc *ld; + int retval = 0; if (driver->type == TTY_DRIVER_TYPE_PTY && driver->subtype == PTY_TYPE_MASTER) @@ -1267,14 +1272,21 @@ static int tty_reopen(struct tty_struct *tty) if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN)) return -EBUSY; - tty->count++; + ld = tty_ldisc_ref_wait(tty); + if (ld) { + tty_ldisc_deref(ld); + } else { + retval = tty_ldisc_lock(tty, 5 * HZ); + if (retval) + return retval; - if (tty->ldisc) - return 0; + if (!tty->ldisc) + retval = tty_ldisc_reinit(tty, tty->termios.c_line); + tty_ldisc_unlock(tty); + } - retval = tty_ldisc_reinit(tty, tty->termios.c_line); - if (retval) - tty->count--; + if (retval == 0) + tty->count++; return retval; } @@ -1333,9 +1345,12 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx) if (!tty->port) tty->port = driver->ports[idx]; - WARN_RATELIMIT(!tty->port, - "%s: %s driver does not set tty->port. This will crash the kernel later. Fix the driver!\n", - __func__, tty->driver->name); + if (WARN_RATELIMIT(!tty->port, + "%s: %s driver does not set tty->port. This would crash the kernel. Fix the driver!\n", + __func__, tty->driver->name)) { + retval = -EINVAL; + goto err_release_lock; + } retval = tty_ldisc_lock(tty, 5 * HZ); if (retval) @@ -1372,7 +1387,13 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx) return ERR_PTR(retval); } -static void tty_free_termios(struct tty_struct *tty) +/** + * tty_save_termios() - save tty termios data in driver table + * @tty: tty whose termios data to save + * + * Locking: Caller guarantees serialisation with tty_init_termios(). + */ +void tty_save_termios(struct tty_struct *tty) { struct ktermios *tp; int idx = tty->index; @@ -1391,6 +1412,7 @@ static void tty_free_termios(struct tty_struct *tty) } *tp = tty->termios; } +EXPORT_SYMBOL_GPL(tty_save_termios); /** * tty_flush_works - flush all works of a tty/pty pair @@ -1490,12 +1512,14 @@ static void release_tty(struct tty_struct *tty, int idx) WARN_ON(!mutex_is_locked(&tty_mutex)); if (tty->ops->shutdown) tty->ops->shutdown(tty); - tty_free_termios(tty); + tty_save_termios(tty); tty_driver_remove_tty(tty->driver, tty); - tty->port->itty = NULL; + if (tty->port) + tty->port->itty = NULL; if (tty->link) tty->link->port->itty = NULL; - tty_buffer_cancel_work(tty->port); + if (tty->port) + tty_buffer_cancel_work(tty->port); if (tty->link) tty_buffer_cancel_work(tty->link->port); @@ -1819,7 +1843,7 @@ static struct tty_struct *tty_open_current_tty(dev_t device, struct file *filp) static struct tty_driver *tty_lookup_driver(dev_t device, struct file *filp, int *index) { - struct tty_driver *driver; + struct tty_driver *driver = NULL; switch (device) { #ifdef CONFIG_VT @@ -1840,6 +1864,8 @@ static struct tty_driver *tty_lookup_driver(dev_t device, struct file *filp, break; } } + if (driver) + tty_driver_kref_put(driver); return ERR_PTR(-ENODEV); } default: @@ -2156,8 +2182,6 @@ static int tty_fasync(int fd, struct file *filp, int on) * Locking: * Called functions take tty_ldiscs_lock * current->signal->tty check is safe without locks - * - * FIXME: may race normal receive processing */ static int tiocsti(struct tty_struct *tty, char __user *p) @@ -2173,7 +2197,10 @@ static int tiocsti(struct tty_struct *tty, char __user *p) ld = tty_ldisc_ref_wait(tty); if (!ld) return -EIO; - ld->ops->receive_buf(tty, &ch, &mbz, 1); + tty_buffer_lock_exclusive(tty->port); + if (ld->ops->receive_buf) + ld->ops->receive_buf(tty, &ch, &mbz, 1); + tty_buffer_unlock_exclusive(tty->port); tty_ldisc_deref(ld); return 0; } @@ -2729,10 +2756,14 @@ void __do_SAK(struct tty_struct *tty) struct task_struct *g, *p; struct pid *session; int i; + unsigned long flags; if (!tty) return; - session = tty->session; + + spin_lock_irqsave(&tty->ctrl_lock, flags); + session = get_pid(tty->session); + spin_unlock_irqrestore(&tty->ctrl_lock, flags); tty_ldisc_flush(tty); @@ -2764,6 +2795,7 @@ void __do_SAK(struct tty_struct *tty) task_unlock(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); + put_pid(session); #endif } @@ -3324,6 +3356,7 @@ void console_sysfs_notify(void) */ int __init tty_init(void) { + tty_sysctl_init(); cdev_init(&tty_cdev, &tty_fops); if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) || register_chrdev_region(MKDEV(TTYAUX_MAJOR, 0), 1, "/dev/tty") < 0) diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c index d99fec44036c38156c996d840ac5c45643d2da2c..095c8780e210ba38f098ce05f09d0e9a1c73cd98 100644 --- a/drivers/tty/tty_ioctl.c +++ b/drivers/tty/tty_ioctl.c @@ -397,21 +397,42 @@ static int set_termios(struct tty_struct *tty, void __user *arg, int opt) tmp_termios.c_ispeed = tty_termios_input_baud_rate(&tmp_termios); tmp_termios.c_ospeed = tty_termios_baud_rate(&tmp_termios); - ld = tty_ldisc_ref(tty); + if (opt & (TERMIOS_FLUSH|TERMIOS_WAIT)) { +retry_write_wait: + retval = wait_event_interruptible(tty->write_wait, !tty_chars_in_buffer(tty)); + if (retval < 0) + return retval; - if (ld != NULL) { - if ((opt & TERMIOS_FLUSH) && ld->ops->flush_buffer) - ld->ops->flush_buffer(tty); - tty_ldisc_deref(ld); - } + if (tty_write_lock(tty, 0) < 0) + goto retry_write_wait; - if (opt & TERMIOS_WAIT) { - tty_wait_until_sent(tty, 0); - if (signal_pending(current)) - return -ERESTARTSYS; - } + /* Racing writer? */ + if (tty_chars_in_buffer(tty)) { + tty_write_unlock(tty); + goto retry_write_wait; + } + + ld = tty_ldisc_ref(tty); + if (ld != NULL) { + if ((opt & TERMIOS_FLUSH) && ld->ops->flush_buffer) + ld->ops->flush_buffer(tty); + tty_ldisc_deref(ld); + } + + if ((opt & TERMIOS_WAIT) && tty->ops->wait_until_sent) { + tty->ops->wait_until_sent(tty, 0); + if (signal_pending(current)) { + tty_write_unlock(tty); + return -ERESTARTSYS; + } + } + + tty_set_termios(tty, &tmp_termios); - tty_set_termios(tty, &tmp_termios); + tty_write_unlock(tty); + } else { + tty_set_termios(tty, &tmp_termios); + } /* FIXME: Arguably if tmp_termios == tty->termios AND the actual requested termios was not tmp_termios then we may diff --git a/drivers/tty/tty_jobctrl.c b/drivers/tty/tty_jobctrl.c index c4ecd66fafefda80aac843464b34501409eaf9a6..5a87f61498764f84638a6a47dc3b5fe760fda8ec 100644 --- a/drivers/tty/tty_jobctrl.c +++ b/drivers/tty/tty_jobctrl.c @@ -44,7 +44,7 @@ int __tty_check_change(struct tty_struct *tty, int sig) tty_pgrp = tty->pgrp; spin_unlock_irqrestore(&tty->ctrl_lock, flags); - if (tty_pgrp && pgrp != tty->pgrp) { + if (tty_pgrp && pgrp != tty_pgrp) { if (is_ignored(sig)) { if (sig == SIGTTIN) ret = -EIO; @@ -103,8 +103,8 @@ static void __proc_set_tty(struct tty_struct *tty) put_pid(tty->session); put_pid(tty->pgrp); tty->pgrp = get_pid(task_pgrp(current)); - spin_unlock_irqrestore(&tty->ctrl_lock, flags); tty->session = get_pid(task_session(current)); + spin_unlock_irqrestore(&tty->ctrl_lock, flags); if (current->signal->tty) { tty_debug(tty, "current tty %s not NULL!!\n", current->signal->tty->name); @@ -290,23 +290,31 @@ void disassociate_ctty(int on_exit) return; } - spin_lock_irq(¤t->sighand->siglock); - put_pid(current->signal->tty_old_pgrp); - current->signal->tty_old_pgrp = NULL; - - tty = tty_kref_get(current->signal->tty); + tty = get_current_tty(); if (tty) { unsigned long flags; + + tty_lock(tty); spin_lock_irqsave(&tty->ctrl_lock, flags); put_pid(tty->session); put_pid(tty->pgrp); tty->session = NULL; tty->pgrp = NULL; spin_unlock_irqrestore(&tty->ctrl_lock, flags); + tty_unlock(tty); tty_kref_put(tty); } + /* If tty->ctrl.pgrp is not NULL, it may be assigned to + * current->signal->tty_old_pgrp in a race condition, and + * cause pid memleak. Release current->signal->tty_old_pgrp + * after tty->ctrl.pgrp set to NULL. + */ + spin_lock_irq(¤t->sighand->siglock); + put_pid(current->signal->tty_old_pgrp); + current->signal->tty_old_pgrp = NULL; spin_unlock_irq(¤t->sighand->siglock); + /* Now clear signal->tty under the lock */ read_lock(&tasklist_lock); session_clear_tty(task_session(current)); @@ -477,14 +485,19 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t return -ENOTTY; if (retval) return retval; - if (!current->signal->tty || - (current->signal->tty != real_tty) || - (real_tty->session != task_session(current))) - return -ENOTTY; + if (get_user(pgrp_nr, p)) return -EFAULT; if (pgrp_nr < 0) return -EINVAL; + + spin_lock_irq(&real_tty->ctrl_lock); + if (!current->signal->tty || + (current->signal->tty != real_tty) || + (real_tty->session != task_session(current))) { + retval = -ENOTTY; + goto out_unlock_ctrl; + } rcu_read_lock(); pgrp = find_vpid(pgrp_nr); retval = -ESRCH; @@ -494,12 +507,12 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t if (session_of_pgrp(pgrp) != task_session(current)) goto out_unlock; retval = 0; - spin_lock_irq(&tty->ctrl_lock); put_pid(real_tty->pgrp); real_tty->pgrp = get_pid(pgrp); - spin_unlock_irq(&tty->ctrl_lock); out_unlock: rcu_read_unlock(); +out_unlock_ctrl: + spin_unlock_irq(&real_tty->ctrl_lock); return retval; } @@ -511,20 +524,30 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t * * Obtain the session id of the tty. If there is no session * return an error. - * - * Locking: none. Reference to current->signal->tty is safe. */ static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p) { + unsigned long flags; + pid_t sid; + /* * (tty == real_tty) is a cheap way of * testing if the tty is NOT a master pty. */ if (tty == real_tty && current->signal->tty != real_tty) return -ENOTTY; + + spin_lock_irqsave(&real_tty->ctrl_lock, flags); if (!real_tty->session) - return -ENOTTY; - return put_user(pid_vnr(real_tty->session), p); + goto err; + sid = pid_vnr(real_tty->session); + spin_unlock_irqrestore(&real_tty->ctrl_lock, flags); + + return put_user(sid, p); + +err: + spin_unlock_irqrestore(&real_tty->ctrl_lock, flags); + return -ENOTTY; } /* diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c index fc4c97cae01ef4812da9b1bf61d6efe5d9cf6784..245c9a51c2de2f174a3467fa2d4c2adca3a7ed2d 100644 --- a/drivers/tty/tty_ldisc.c +++ b/drivers/tty/tty_ldisc.c @@ -156,6 +156,13 @@ static void put_ldops(struct tty_ldisc_ops *ldops) * takes tty_ldiscs_lock to guard against ldisc races */ +#if defined(CONFIG_LDISC_AUTOLOAD) + #define INITIAL_AUTOLOAD_STATE 1 +#else + #define INITIAL_AUTOLOAD_STATE 0 +#endif +static int tty_ldisc_autoload = INITIAL_AUTOLOAD_STATE; + static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc) { struct tty_ldisc *ld; @@ -170,6 +177,8 @@ static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc) */ ldops = get_ldops(disc); if (IS_ERR(ldops)) { + if (!capable(CAP_SYS_MODULE) && !tty_ldisc_autoload) + return ERR_PTR(-EPERM); request_module("tty-ldisc-%d", disc); ldops = get_ldops(disc); if (IS_ERR(ldops)) @@ -327,6 +336,11 @@ int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout) { int ret; + /* Kindly asking blocked readers to release the read side */ + set_bit(TTY_LDISC_CHANGING, &tty->flags); + wake_up_interruptible_all(&tty->read_wait); + wake_up_interruptible_all(&tty->write_wait); + ret = __tty_ldisc_lock(tty, timeout); if (!ret) return -EBUSY; @@ -337,6 +351,8 @@ int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout) void tty_ldisc_unlock(struct tty_struct *tty) { clear_bit(TTY_LDISC_HALTED, &tty->flags); + /* Can be cleared here - ldisc_unlock will wake up writers firstly */ + clear_bit(TTY_LDISC_CHANGING, &tty->flags); __tty_ldisc_unlock(tty); } @@ -829,3 +845,41 @@ void tty_ldisc_deinit(struct tty_struct *tty) tty_ldisc_put(tty->ldisc); tty->ldisc = NULL; } + +static int zero; +static int one = 1; +static struct ctl_table tty_table[] = { + { + .procname = "ldisc_autoload", + .data = &tty_ldisc_autoload, + .maxlen = sizeof(tty_ldisc_autoload), + .mode = 0644, + .proc_handler = proc_dointvec, + .extra1 = &zero, + .extra2 = &one, + }, + { } +}; + +static struct ctl_table tty_dir_table[] = { + { + .procname = "tty", + .mode = 0555, + .child = tty_table, + }, + { } +}; + +static struct ctl_table tty_root_table[] = { + { + .procname = "dev", + .mode = 0555, + .child = tty_dir_table, + }, + { } +}; + +void tty_sysctl_init(void) +{ + register_sysctl_table(tty_root_table); +} diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c index 0c98d88f795a7bf290f4b472a60024e151feccfd..2f0372976459eb771c830442efe94d24c70ba489 100644 --- a/drivers/tty/tty_ldsem.c +++ b/drivers/tty/tty_ldsem.c @@ -116,8 +116,7 @@ static void __ldsem_wake_readers(struct ld_semaphore *sem) list_for_each_entry_safe(waiter, next, &sem->read_wait, list) { tsk = waiter->task; - smp_mb(); - waiter->task = NULL; + smp_store_release(&waiter->task, NULL); wake_up_process(tsk); put_task_struct(tsk); } @@ -217,7 +216,7 @@ down_read_failed(struct ld_semaphore *sem, long count, long timeout) for (;;) { set_current_state(TASK_UNINTERRUPTIBLE); - if (!waiter.task) + if (!smp_load_acquire(&waiter.task)) break; if (!timeout) break; @@ -293,6 +292,16 @@ down_write_failed(struct ld_semaphore *sem, long count, long timeout) if (!locked) atomic_long_add_return(-LDSEM_WAIT_BIAS, &sem->count); list_del(&waiter.list); + + /* + * In case of timeout, wake up every reader who gave the right of way + * to writer. Prevent separation readers into two groups: + * one that helds semaphore and another that sleeps. + * (in case of no contention with a writer) + */ + if (!locked && list_empty(&sem->write_wait)) + __ldsem_wake_readers(sem); + raw_spin_unlock_irq(&sem->wait_lock); __set_current_state(TASK_RUNNING); diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c index 25d736880013b256cf692a2f5d3e6f51c4af9ece..c699d41a2a48f5ce8acffab4251e35bbd54c62c1 100644 --- a/drivers/tty/tty_port.c +++ b/drivers/tty/tty_port.c @@ -640,7 +640,8 @@ void tty_port_close(struct tty_port *port, struct tty_struct *tty, if (tty_port_close_start(port, tty, filp) == 0) return; tty_port_shutdown(port, tty); - set_bit(TTY_IO_ERROR, &tty->flags); + if (!port->console) + set_bit(TTY_IO_ERROR, &tty->flags); tty_port_close_end(port, tty); tty_port_tty_set(port, NULL); } diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c index 58b454c34560a76f098269644f6368049312a543..73dae890221627a80f610c995d066d5c54e3cfc6 100644 --- a/drivers/tty/vcc.c +++ b/drivers/tty/vcc.c @@ -586,18 +586,22 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id) return -ENOMEM; name = kstrdup(dev_name(&vdev->dev), GFP_KERNEL); + if (!name) { + rv = -ENOMEM; + goto free_port; + } rv = vio_driver_init(&port->vio, vdev, VDEV_CONSOLE_CON, vcc_versions, ARRAY_SIZE(vcc_versions), NULL, name); if (rv) - goto free_port; + goto free_name; port->vio.debug = vcc_dbg_vio; vcc_ldc_cfg.debug = vcc_dbg_ldc; rv = vio_ldc_alloc(&port->vio, &vcc_ldc_cfg, port); if (rv) - goto free_port; + goto free_name; spin_lock_init(&port->lock); @@ -630,6 +634,11 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id) goto unreg_tty; } port->domain = kstrdup(domain, GFP_KERNEL); + if (!port->domain) { + rv = -ENOMEM; + goto unreg_tty; + } + mdesc_release(hp); @@ -659,8 +668,9 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id) vcc_table_remove(port->index); free_ldc: vio_ldc_free(&port->vio); -free_port: +free_name: kfree(name); +free_port: kfree(port); return rv; diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c index 88312c6c92cc6d2f9e1c806571a6360ecab5e5b2..e3aac11d770fee2feda00642d7c1dab4f2519c86 100644 --- a/drivers/tty/vt/keyboard.c +++ b/drivers/tty/vt/keyboard.c @@ -123,10 +123,15 @@ static const int NR_TYPES = ARRAY_SIZE(max_vals); static struct input_handler kbd_handler; static DEFINE_SPINLOCK(kbd_event_lock); static DEFINE_SPINLOCK(led_lock); +static DEFINE_SPINLOCK(func_buf_lock); /* guard 'func_buf' and friends */ static unsigned long key_down[BITS_TO_LONGS(KEY_CNT)]; /* keyboard key bitmap */ static unsigned char shift_down[NR_SHIFT]; /* shift state counters.. */ static bool dead_key_next; -static int npadch = -1; /* -1 or number assembled on pad */ + +/* Handles a number being assembled on the number pad */ +static bool npadch_active; +static unsigned int npadch_value; + static unsigned int diacr; static char rep; /* flag telling character repeat */ @@ -305,7 +310,7 @@ int kbd_rate(struct kbd_repeat *rpt) static void put_queue(struct vc_data *vc, int ch) { tty_insert_flip_char(&vc->port, ch, 0); - tty_schedule_flip(&vc->port); + tty_flip_buffer_push(&vc->port); } static void puts_queue(struct vc_data *vc, char *cp) @@ -314,7 +319,7 @@ static void puts_queue(struct vc_data *vc, char *cp) tty_insert_flip_char(&vc->port, *cp, 0); cp++; } - tty_schedule_flip(&vc->port); + tty_flip_buffer_push(&vc->port); } static void applkey(struct vc_data *vc, int key, char mode) @@ -559,7 +564,7 @@ static void fn_inc_console(struct vc_data *vc) static void fn_send_intr(struct vc_data *vc) { tty_insert_flip_char(&vc->port, 0, TTY_BREAK); - tty_schedule_flip(&vc->port); + tty_flip_buffer_push(&vc->port); } static void fn_scroll_forw(struct vc_data *vc) @@ -737,8 +742,13 @@ static void k_fn(struct vc_data *vc, unsigned char value, char up_flag) return; if ((unsigned)value < ARRAY_SIZE(func_table)) { + unsigned long flags; + + spin_lock_irqsave(&func_buf_lock, flags); if (func_table[value]) puts_queue(vc, func_table[value]); + spin_unlock_irqrestore(&func_buf_lock, flags); + } else pr_err("k_fn called with value=%d\n", value); } @@ -844,12 +854,12 @@ static void k_shift(struct vc_data *vc, unsigned char value, char up_flag) shift_state &= ~(1 << value); /* kludge */ - if (up_flag && shift_state != old_state && npadch != -1) { + if (up_flag && shift_state != old_state && npadch_active) { if (kbd->kbdmode == VC_UNICODE) - to_utf8(vc, npadch); + to_utf8(vc, npadch_value); else - put_queue(vc, npadch & 0xff); - npadch = -1; + put_queue(vc, npadch_value & 0xff); + npadch_active = false; } } @@ -867,7 +877,7 @@ static void k_meta(struct vc_data *vc, unsigned char value, char up_flag) static void k_ascii(struct vc_data *vc, unsigned char value, char up_flag) { - int base; + unsigned int base; if (up_flag) return; @@ -881,10 +891,12 @@ static void k_ascii(struct vc_data *vc, unsigned char value, char up_flag) base = 16; } - if (npadch == -1) - npadch = value; - else - npadch = npadch * base + value; + if (!npadch_active) { + npadch_value = 0; + npadch_active = true; + } + + npadch_value = npadch_value * base + value; } static void k_lock(struct vc_data *vc, unsigned char value, char up_flag) @@ -1490,7 +1502,7 @@ static void kbd_event(struct input_handle *handle, unsigned int event_type, if (event_type == EV_MSC && event_code == MSC_RAW && HW_RAW(handle->dev)) kbd_rawcode(value); - if (event_type == EV_KEY) + if (event_type == EV_KEY && event_code <= KEY_MAX) kbd_keycode(event_code, value, HW_RAW(handle->dev)); spin_unlock(&kbd_event_lock); @@ -1983,18 +1995,17 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, #undef s #undef v -/* FIXME: This one needs untangling and locking */ +/* FIXME: This one needs untangling */ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm) { struct kbsentry *kbs; - char *p; u_char *q; - u_char __user *up; - int sz; + int sz, fnw_sz; int delta; char *first_free, *fj, *fnw; int i, j, k; int ret; + unsigned long flags; if (!capable(CAP_SYS_TTY_CONFIG)) perm = 0; @@ -2014,30 +2025,33 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm) i = kbs->kb_func; switch (cmd) { - case KDGKBSENT: - sz = sizeof(kbs->kb_string) - 1; /* sz should have been - a struct member */ - up = user_kdgkb->kb_string; - p = func_table[i]; - if(p) - for ( ; *p && sz; p++, sz--) - if (put_user(*p, up++)) { - ret = -EFAULT; - goto reterr; - } - if (put_user('\0', up)) { - ret = -EFAULT; - goto reterr; - } - kfree(kbs); - return ((p && *p) ? -EOVERFLOW : 0); + case KDGKBSENT: { + /* size should have been a struct member */ + ssize_t len = sizeof(user_kdgkb->kb_string); + + spin_lock_irqsave(&func_buf_lock, flags); + len = strlcpy(kbs->kb_string, func_table[i] ? : "", len); + spin_unlock_irqrestore(&func_buf_lock, flags); + + ret = copy_to_user(user_kdgkb->kb_string, kbs->kb_string, + len + 1) ? -EFAULT : 0; + + goto reterr; + } case KDSKBSENT: if (!perm) { ret = -EPERM; goto reterr; } + fnw = NULL; + fnw_sz = 0; + /* race aginst other writers */ + again: + spin_lock_irqsave(&func_buf_lock, flags); q = func_table[i]; + + /* fj pointer to next entry after 'q' */ first_free = funcbufptr + (funcbufsize - funcbufleft); for (j = i+1; j < MAX_NR_FUNC && !func_table[j]; j++) ; @@ -2045,10 +2059,12 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm) fj = func_table[j]; else fj = first_free; - + /* buffer usage increase by new entry */ delta = (q ? -strlen(q) : 1) + strlen(kbs->kb_string); + if (delta <= funcbufleft) { /* it fits in current buf */ if (j < MAX_NR_FUNC) { + /* make enough space for new entry at 'fj' */ memmove(fj + delta, fj, first_free - fj); for (k = j; k < MAX_NR_FUNC; k++) if (func_table[k]) @@ -2061,20 +2077,28 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm) sz = 256; while (sz < funcbufsize - funcbufleft + delta) sz <<= 1; - fnw = kmalloc(sz, GFP_KERNEL); - if(!fnw) { - ret = -ENOMEM; - goto reterr; + if (fnw_sz != sz) { + spin_unlock_irqrestore(&func_buf_lock, flags); + kfree(fnw); + fnw = kmalloc(sz, GFP_KERNEL); + fnw_sz = sz; + if (!fnw) { + ret = -ENOMEM; + goto reterr; + } + goto again; } if (!q) func_table[i] = fj; + /* copy data before insertion point to new location */ if (fj > funcbufptr) memmove(fnw, funcbufptr, fj - funcbufptr); for (k = 0; k < j; k++) if (func_table[k]) func_table[k] = fnw + (func_table[k] - funcbufptr); + /* copy data after insertion point to new location */ if (first_free > fj) { memmove(fnw + (fj - funcbufptr) + delta, fj, first_free - fj); for (k = j; k < MAX_NR_FUNC; k++) @@ -2087,7 +2111,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm) funcbufleft = funcbufleft - delta + sz - funcbufsize; funcbufsize = sz; } + /* finally insert item itself */ strcpy(func_table[i], kbs->kb_string); + spin_unlock_irqrestore(&func_buf_lock, flags); break; } ret = 0; diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c index 07496c711d7dcc1dde58fb1b3db7a29384ef336d..34a0e529cb5a34a92f465bc81aab9e9262f8d403 100644 --- a/drivers/tty/vt/selection.c +++ b/drivers/tty/vt/selection.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -27,6 +28,8 @@ #include #include +#include + /* Don't take this from : 011-015 on the screen aren't spaces */ #define isspace(c) ((c) == ' ') @@ -41,6 +44,7 @@ static volatile int sel_start = -1; /* cleared by clear_selection */ static int sel_end; static int sel_buffer_lth; static char *sel_buffer; +static DEFINE_MUTEX(sel_lock); /* clear_selection, highlight and highlight_pointer can be called from interrupt (via scrollback/front) */ @@ -81,6 +85,11 @@ void clear_selection(void) } } +bool vc_is_sel(struct vc_data *vc) +{ + return vc == sel_cons; +} + /* * User settable table: what characters are to be considered alphabetic? * 128 bits. Locked by the console lock. @@ -163,7 +172,7 @@ static int store_utf8(u32 c, char *p) * The entire selection process is managed under the console_lock. It's * a lot under the lock but its hardly a performance path */ -int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *tty) +static int __set_selection(const struct tiocl_selection __user *sel, struct tty_struct *tty) { struct vc_data *vc = vc_cons[fg_console].d; int new_sel_start, new_sel_end, spc; @@ -171,7 +180,7 @@ int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *t char *bp, *obp; int i, ps, pe, multiplier; u32 c; - int mode; + int mode, ret = 0; poke_blanked_console(); if (copy_from_user(&v, sel, sizeof(*sel))) @@ -320,7 +329,21 @@ int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *t } } sel_buffer_lth = bp - sel_buffer; - return 0; + + return ret; +} + +int set_selection(const struct tiocl_selection __user *v, struct tty_struct *tty) +{ + int ret; + + mutex_lock(&sel_lock); + console_lock(); + ret = __set_selection(v, tty); + console_unlock(); + mutex_unlock(&sel_lock); + + return ret; } /* Insert the contents of the selection buffer into the @@ -337,6 +360,7 @@ int paste_selection(struct tty_struct *tty) unsigned int count; struct tty_ldisc *ld; DECLARE_WAITQUEUE(wait, current); + int ret = 0; console_lock(); poke_blanked_console(); @@ -348,10 +372,17 @@ int paste_selection(struct tty_struct *tty) tty_buffer_lock_exclusive(&vc->port); add_wait_queue(&vc->paste_wait, &wait); + mutex_lock(&sel_lock); while (sel_buffer && sel_buffer_lth > pasted) { set_current_state(TASK_INTERRUPTIBLE); + if (signal_pending(current)) { + ret = -EINTR; + break; + } if (tty_throttled(tty)) { + mutex_unlock(&sel_lock); schedule(); + mutex_lock(&sel_lock); continue; } __set_current_state(TASK_RUNNING); @@ -360,10 +391,11 @@ int paste_selection(struct tty_struct *tty) count); pasted += count; } + mutex_unlock(&sel_lock); remove_wait_queue(&vc->paste_wait, &wait); __set_current_state(TASK_RUNNING); tty_buffer_unlock_exclusive(&vc->port); tty_ldisc_deref(ld); - return 0; + return ret; } diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c index 2384ea85ffafed31c9450481125dc7740a4b387c..2fb509d57e88c4ee5d1e99b00086ff11ccaec361 100644 --- a/drivers/tty/vt/vc_screen.c +++ b/drivers/tty/vt/vc_screen.c @@ -437,6 +437,9 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) size_t ret; char *con_buf; + if (use_unicode(inode)) + return -EOPNOTSUPP; + con_buf = (char *) __get_free_page(GFP_KERNEL); if (!con_buf) return -ENOMEM; diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 5f1183b0b89df3bc672ccf7299dc103dfddbf138..12ddd82f0dadbf1523731ca04682d5bdf4bacbb6 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -81,6 +81,7 @@ #include #include #include +#include #include #include #include @@ -350,7 +351,7 @@ static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows) /* allocate everything in one go */ memsize = cols * rows * sizeof(char32_t); memsize += rows * sizeof(char32_t *); - p = kmalloc(memsize, GFP_KERNEL); + p = vmalloc(memsize); if (!p) return NULL; @@ -364,9 +365,14 @@ static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows) return uniscr; } +static void vc_uniscr_free(struct uni_screen *uniscr) +{ + vfree(uniscr); +} + static void vc_uniscr_set(struct vc_data *vc, struct uni_screen *new_uniscr) { - kfree(vc->vc_uni_screen); + vc_uniscr_free(vc->vc_uni_screen); vc->vc_uni_screen = new_uniscr; } @@ -399,7 +405,7 @@ static void vc_uniscr_delete(struct vc_data *vc, unsigned int nr) char32_t *ln = uniscr->lines[vc->vc_y]; unsigned int x = vc->vc_x, cols = vc->vc_cols; - memcpy(&ln[x], &ln[x + nr], (cols - x - nr) * sizeof(*ln)); + memmove(&ln[x], &ln[x + nr], (cols - x - nr) * sizeof(*ln)); memset32(&ln[cols - nr], ' ', nr); } } @@ -849,7 +855,7 @@ static void delete_char(struct vc_data *vc, unsigned int nr) unsigned short *p = (unsigned short *) vc->vc_pos; vc_uniscr_delete(vc, nr); - scr_memcpyw(p, p + nr, (vc->vc_cols - vc->vc_x - nr) * 2); + scr_memmovew(p, p + nr, (vc->vc_cols - vc->vc_x - nr) * 2); scr_memsetw(p + vc->vc_cols - vc->vc_x - nr, vc->vc_video_erase_char, nr * 2); vc->vc_need_wrap = 0; @@ -890,8 +896,9 @@ static void hide_softcursor(struct vc_data *vc) static void hide_cursor(struct vc_data *vc) { - if (vc == sel_cons) + if (vc_is_sel(vc)) clear_selection(); + vc->vc_sw->con_cursor(vc, CM_ERASE); hide_softcursor(vc); } @@ -901,7 +908,7 @@ static void set_cursor(struct vc_data *vc) if (!con_is_fg(vc) || console_blanked || vc->vc_mode == KD_GRAPHICS) return; if (vc->vc_deccm) { - if (vc == sel_cons) + if (vc_is_sel(vc)) clear_selection(); add_softcursor(vc); if ((vc->vc_cursor_type & 0x0f) != 1) @@ -935,8 +942,22 @@ static void flush_scrollback(struct vc_data *vc) { WARN_CONSOLE_UNLOCKED(); - if (vc->vc_sw->con_flush_scrollback) + set_origin(vc); + if (vc->vc_sw->con_flush_scrollback) { vc->vc_sw->con_flush_scrollback(vc); + } else if (con_is_visible(vc)) { + /* + * When no con_flush_scrollback method is provided then the + * legacy way for flushing the scrollback buffer is to use + * a side effect of the con_switch method. We do it only on + * the foreground console as background consoles have no + * scrollback buffers in that case and we obviously don't + * want to switch to them. + */ + hide_cursor(vc); + vc->vc_sw->con_switch(vc); + set_cursor(vc); + } } /* @@ -1056,10 +1077,37 @@ static void visual_init(struct vc_data *vc, int num, int init) vc->vc_screenbuf_size = vc->vc_rows * vc->vc_size_row; } + +static void visual_deinit(struct vc_data *vc) +{ + vc->vc_sw->con_deinit(vc); + module_put(vc->vc_sw->owner); +} + +static void vc_port_destruct(struct tty_port *port) +{ + struct vc_data *vc = container_of(port, struct vc_data, port); + + kfree(vc); +} + +static const struct tty_port_operations vc_port_ops = { + .destruct = vc_port_destruct, +}; + +/* + * Change # of rows and columns (0 means unchanged/the size of fg_console) + * [this is to be used together with some user program + * like resize that changes the hardware videomode] + */ +#define VC_MAXCOL (32767) +#define VC_MAXROW (32767) + int vc_allocate(unsigned int currcons) /* return 0 on success */ { struct vt_notifier_param param; struct vc_data *vc; + int err; WARN_CONSOLE_UNLOCKED(); @@ -1081,6 +1129,7 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */ vc_cons[currcons].d = vc; tty_port_init(&vc->port); + vc->port.ops = &vc_port_ops; INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK); visual_init(vc, currcons, 1); @@ -1088,6 +1137,11 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */ if (!*vc->vc_uni_pagedir_loc) con_set_default_unimap(vc); + err = -EINVAL; + if (vc->vc_cols > VC_MAXCOL || vc->vc_rows > VC_MAXROW || + vc->vc_screenbuf_size > KMALLOC_MAX_SIZE || !vc->vc_screenbuf_size) + goto err_free; + err = -ENOMEM; vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_KERNEL); if (!vc->vc_screenbuf) goto err_free; @@ -1103,9 +1157,10 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */ return 0; err_free: + visual_deinit(vc); kfree(vc); vc_cons[currcons].d = NULL; - return -ENOMEM; + return err; } static inline int resize_screen(struct vc_data *vc, int width, int height, @@ -1114,20 +1169,12 @@ static inline int resize_screen(struct vc_data *vc, int width, int height, /* Resizes the resolution of the display adapater */ int err = 0; - if (vc->vc_mode != KD_GRAPHICS && vc->vc_sw->con_resize) + if (vc->vc_sw->con_resize) err = vc->vc_sw->con_resize(vc, width, height, user); return err; } -/* - * Change # of rows and columns (0 means unchanged/the size of fg_console) - * [this is to be used together with some user program - * like resize that changes the hardware videomode] - */ -#define VC_RESIZE_MAXCOL (32767) -#define VC_RESIZE_MAXROW (32767) - /** * vc_do_resize - resizing method for the tty * @tty: tty being resized @@ -1152,7 +1199,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, unsigned int old_rows, old_row_size, first_copied_row; unsigned int new_cols, new_rows, new_row_size, new_screen_size; unsigned int user; - unsigned short *newscreen; + unsigned short *oldscreen, *newscreen; struct uni_screen *new_uniscr = NULL; WARN_CONSOLE_UNLOCKED(); @@ -1163,7 +1210,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, user = vc->vc_resize_user; vc->vc_resize_user = 0; - if (cols > VC_RESIZE_MAXCOL || lines > VC_RESIZE_MAXROW) + if (cols > VC_MAXCOL || lines > VC_MAXROW) return -EINVAL; new_cols = (cols ? cols : vc->vc_cols); @@ -1171,10 +1218,27 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, new_row_size = new_cols << 1; new_screen_size = new_row_size * new_rows; - if (new_cols == vc->vc_cols && new_rows == vc->vc_rows) - return 0; + if (new_cols == vc->vc_cols && new_rows == vc->vc_rows) { + /* + * This function is being called here to cover the case + * where the userspace calls the FBIOPUT_VSCREENINFO twice, + * passing the same fb_var_screeninfo containing the fields + * yres/xres equal to a number non-multiple of vc_font.height + * and yres_virtual/xres_virtual equal to number lesser than the + * vc_font.height and yres/xres. + * In the second call, the struct fb_var_screeninfo isn't + * being modified by the underlying driver because of the + * if above, and this causes the fbcon_display->vrows to become + * negative and it eventually leads to out-of-bound + * access by the imageblit function. + * To give the correct values to the struct and to not have + * to deal with possible errors from the code below, we call + * the resize_screen here as well. + */ + return resize_screen(vc, new_cols, new_rows, user); + } - if (new_screen_size > (4 << 20)) + if (new_screen_size > KMALLOC_MAX_SIZE || !new_screen_size) return -EINVAL; newscreen = kzalloc(new_screen_size, GFP_USER); if (!newscreen) @@ -1188,7 +1252,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, } } - if (vc == sel_cons) + if (vc_is_sel(vc)) clear_selection(); old_rows = vc->vc_rows; @@ -1197,7 +1261,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, err = resize_screen(vc, new_cols, new_rows, user); if (err) { kfree(newscreen); - kfree(new_uniscr); + vc_uniscr_free(new_uniscr); return err; } @@ -1250,10 +1314,11 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, if (new_scr_end > new_origin) scr_memsetw((void *)new_origin, vc->vc_video_erase_char, new_scr_end - new_origin); - kfree(vc->vc_screenbuf); + oldscreen = vc->vc_screenbuf; vc->vc_screenbuf = newscreen; vc->vc_screenbuf_size = new_screen_size; set_origin(vc); + kfree(oldscreen); /* do part of a reset_terminal() */ vc->vc_top = 0; @@ -1275,6 +1340,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, if (con_is_visible(vc)) update_screen(vc); vt_event_post(VT_EVENT_RESIZE, vc->vc_num, vc->vc_num); + notify_update(vc); return err; } @@ -1330,9 +1396,9 @@ struct vc_data *vc_deallocate(unsigned int currcons) param.vc = vc = vc_cons[currcons].d; atomic_notifier_call_chain(&vt_notifier_list, VT_DEALLOCATE, ¶m); vcs_remove_sysfs(currcons); - vc->vc_sw->con_deinit(vc); + visual_deinit(vc); + con_free_unimap(vc); put_pid(vc->vt_pid); - module_put(vc->vc_sw->owner); vc_uniscr_set(vc, NULL); kfree(vc->vc_screenbuf); vc_cons[currcons].d = NULL; @@ -1505,8 +1571,10 @@ static void csi_J(struct vc_data *vc, int vpar) count = ((vc->vc_pos - vc->vc_origin) >> 1) + 1; start = (unsigned short *)vc->vc_origin; break; + case 3: /* include scrollback */ + flush_scrollback(vc); + /* fallthrough */ case 2: /* erase whole display */ - case 3: /* (and scrollback buffer later) */ vc_uniscr_clear_lines(vc, 0, vc->vc_rows); count = vc->vc_cols * vc->vc_rows; start = (unsigned short *)vc->vc_origin; @@ -1515,12 +1583,7 @@ static void csi_J(struct vc_data *vc, int vpar) return; } scr_memsetw(start, vc->vc_video_erase_char, 2 * count); - if (vpar == 3) { - set_origin(vc); - flush_scrollback(vc); - if (con_is_visible(vc)) - update_screen(vc); - } else if (con_should_update(vc)) + if (con_should_update(vc)) do_update_region(vc, (unsigned long) start, count); vc->vc_need_wrap = 0; } @@ -1551,7 +1614,7 @@ static void csi_K(struct vc_data *vc, int vpar) scr_memsetw(start + offset, vc->vc_video_erase_char, 2 * count); vc->vc_need_wrap = 0; if (con_should_update(vc)) - do_update_region(vc, (unsigned long) start, count); + do_update_region(vc, (unsigned long)(start + offset), count); } static void csi_X(struct vc_data *vc, int vpar) /* erase the following vpar positions */ @@ -1775,7 +1838,7 @@ static void respond_string(const char *p, struct tty_port *port) tty_insert_flip_char(port, *p, 0); p++; } - tty_schedule_flip(port); + tty_flip_buffer_push(port); } static void cursor_report(struct vc_data *vc, struct tty_struct *tty) @@ -2767,8 +2830,8 @@ static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int co con_flush(vc, draw_from, draw_to, &draw_x); vc_uniscr_debug_check(vc); console_conditional_schedule(); - console_unlock(); notify_update(vc); + console_unlock(); return n; } @@ -2887,8 +2950,7 @@ static void vt_console_print(struct console *co, const char *b, unsigned count) unsigned char c; static DEFINE_SPINLOCK(printing_lock); const ushort *start; - ushort cnt = 0; - ushort myx; + ushort start_x, cnt; int kmsg_console; /* console busy or not yet initialized */ @@ -2901,10 +2963,6 @@ static void vt_console_print(struct console *co, const char *b, unsigned count) if (kmsg_console && vc_cons_allocated(kmsg_console - 1)) vc = vc_cons[kmsg_console - 1].d; - /* read `x' only after setting currcons properly (otherwise - the `x' macro will read the x of the foreground console). */ - myx = vc->vc_x; - if (!vc_cons_allocated(fg_console)) { /* impossible */ /* printk("vt_console_print: tty %d not allocated ??\n", currcons+1); */ @@ -2919,53 +2977,41 @@ static void vt_console_print(struct console *co, const char *b, unsigned count) hide_cursor(vc); start = (ushort *)vc->vc_pos; - - /* Contrived structure to try to emulate original need_wrap behaviour - * Problems caused when we have need_wrap set on '\n' character */ + start_x = vc->vc_x; + cnt = 0; while (count--) { c = *b++; if (c == 10 || c == 13 || c == 8 || vc->vc_need_wrap) { - if (cnt > 0) { - if (con_is_visible(vc)) - vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x); - vc->vc_x += cnt; - if (vc->vc_need_wrap) - vc->vc_x--; - cnt = 0; - } + if (cnt && con_is_visible(vc)) + vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x); + cnt = 0; if (c == 8) { /* backspace */ bs(vc); start = (ushort *)vc->vc_pos; - myx = vc->vc_x; + start_x = vc->vc_x; continue; } if (c != 13) lf(vc); cr(vc); start = (ushort *)vc->vc_pos; - myx = vc->vc_x; + start_x = vc->vc_x; if (c == 10 || c == 13) continue; } + vc_uniscr_putc(vc, c); scr_writew((vc->vc_attr << 8) + c, (unsigned short *)vc->vc_pos); notify_write(vc, c); cnt++; - if (myx == vc->vc_cols - 1) { - vc->vc_need_wrap = 1; - continue; - } - vc->vc_pos += 2; - myx++; - } - if (cnt > 0) { - if (con_is_visible(vc)) - vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x); - vc->vc_x += cnt; - if (vc->vc_x == vc->vc_cols) { - vc->vc_x--; + if (vc->vc_x == vc->vc_cols - 1) { vc->vc_need_wrap = 1; + } else { + vc->vc_pos += 2; + vc->vc_x++; } } + if (cnt && con_is_visible(vc)) + vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x); set_cursor(vc); notify_update(vc); @@ -3020,9 +3066,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg) switch (type) { case TIOCL_SETSEL: - console_lock(); ret = set_selection((struct tiocl_selection __user *)(p+1), tty); - console_unlock(); break; case TIOCL_PASTESEL: ret = paste_selection(tty); @@ -3228,6 +3272,7 @@ static int con_install(struct tty_driver *driver, struct tty_struct *tty) tty->driver_data = vc; vc->port.tty = tty; + tty_port_get(&vc->port); if (!tty->winsize.ws_row && !tty->winsize.ws_col) { tty->winsize.ws_row = vc_cons[currcons].d->vc_rows; @@ -3263,6 +3308,13 @@ static void con_shutdown(struct tty_struct *tty) console_unlock(); } +static void con_cleanup(struct tty_struct *tty) +{ + struct vc_data *vc = tty->driver_data; + + tty_port_put(&vc->port); +} + static int default_color = 7; /* white */ static int default_italic_color = 2; // green (ASCII) static int default_underline_color = 3; // cyan (ASCII) @@ -3344,6 +3396,7 @@ static int __init con_init(void) INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK); tty_port_init(&vc->port); visual_init(vc, currcons, 1); + /* Assuming vc->vc_{cols,rows,screenbuf_size} are sane here. */ vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_NOWAIT); vc_init(vc, vc->vc_rows, vc->vc_cols, currcons || !vc->vc_sw->con_save_screen); @@ -3387,7 +3440,8 @@ static const struct tty_operations con_ops = { .throttle = con_throttle, .unthrottle = con_unthrottle, .resize = vt_resize, - .shutdown = con_shutdown + .shutdown = con_shutdown, + .cleanup = con_cleanup, }; static struct cdev vc0_cdev; @@ -4171,8 +4225,6 @@ void do_blank_screen(int entering_gfx) return; } - if (blank_state != blank_normal_wait) - return; blank_state = blank_off; /* don't blank graphics */ @@ -4420,16 +4472,8 @@ static int con_font_get(struct vc_data *vc, struct console_font_op *op) if (op->data && font.charcount > op->charcount) rc = -ENOSPC; - if (!(op->flags & KD_FONT_FLAG_OLD)) { - if (font.width > op->width || font.height > op->height) - rc = -ENOSPC; - } else { - if (font.width != 8) - rc = -EIO; - else if ((op->height && font.height > op->height) || - font.height > 32) - rc = -ENOSPC; - } + if (font.width > op->width || font.height > op->height) + rc = -ENOSPC; if (rc) goto out; @@ -4457,7 +4501,7 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op) return -EINVAL; if (op->charcount > 512) return -EINVAL; - if (op->width <= 0 || op->width > 32 || op->height > 32) + if (op->width <= 0 || op->width > 32 || !op->height || op->height > 32) return -EINVAL; size = (op->width+7)/8 * 32 * op->charcount; if (size > max_font_size) @@ -4467,31 +4511,6 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op) if (IS_ERR(font.data)) return PTR_ERR(font.data); - if (!op->height) { /* Need to guess font height [compat] */ - int h, i; - u8 *charmap = font.data; - - /* - * If from KDFONTOP ioctl, don't allow things which can be done - * in userland,so that we can get rid of this soon - */ - if (!(op->flags & KD_FONT_FLAG_OLD)) { - kfree(font.data); - return -EINVAL; - } - - for (h = 32; h > 0; h--) - for (i = 0; i < op->charcount; i++) - if (charmap[32*i+h-1]) - goto nonzero; - - kfree(font.data); - return -EINVAL; - - nonzero: - op->height = h; - } - font.charcount = op->charcount; font.width = op->width; font.height = op->height; @@ -4540,27 +4559,6 @@ static int con_font_default(struct vc_data *vc, struct console_font_op *op) return rc; } -static int con_font_copy(struct vc_data *vc, struct console_font_op *op) -{ - int con = op->height; - int rc; - - - console_lock(); - if (vc->vc_mode != KD_TEXT) - rc = -EINVAL; - else if (!vc->vc_sw->con_font_copy) - rc = -ENOSYS; - else if (con < 0 || !vc_cons_allocated(con)) - rc = -ENOTTY; - else if (con == vc->vc_num) /* nothing to do */ - rc = 0; - else - rc = vc->vc_sw->con_font_copy(vc, con); - console_unlock(); - return rc; -} - int con_font_op(struct vc_data *vc, struct console_font_op *op) { switch (op->op) { @@ -4571,7 +4569,8 @@ int con_font_op(struct vc_data *vc, struct console_font_op *op) case KD_FONT_OP_SET_DEFAULT: return con_font_default(vc, op); case KD_FONT_OP_COPY: - return con_font_copy(vc, op); + /* was buggy and never really used */ + return -EINVAL; } return -ENOSYS; } diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c index 73cdc0d633dd62c12e6ed1ca6545846b8dabaa09..9a1ede70f4034901b587977e8ddd43b927511ccb 100644 --- a/drivers/tty/vt/vt_ioctl.c +++ b/drivers/tty/vt/vt_ioctl.c @@ -39,11 +39,32 @@ #include #include -char vt_dont_switch; -extern struct tty_driver *console_driver; +bool vt_dont_switch; -#define VT_IS_IN_USE(i) (console_driver->ttys[i] && console_driver->ttys[i]->count) -#define VT_BUSY(i) (VT_IS_IN_USE(i) || i == fg_console || vc_cons[i].d == sel_cons) +static inline bool vt_in_use(unsigned int i) +{ + const struct vc_data *vc = vc_cons[i].d; + + /* + * console_lock must be held to prevent the vc from being deallocated + * while we're checking whether it's in-use. + */ + WARN_CONSOLE_UNLOCKED(); + + return vc && kref_read(&vc->port.kref) > 1; +} + +static inline bool vt_busy(int i) +{ + if (vt_in_use(i)) + return true; + if (i == fg_console) + return true; + if (vc_is_sel(vc_cons[i].d)) + return true; + + return false; +} /* * Console (vt and kd) routines, as defined by USL SVR4 manual, and by @@ -220,48 +241,6 @@ int vt_waitactive(int n) #define GPLAST 0x3df #define GPNUM (GPLAST - GPFIRST + 1) - - -static inline int -do_fontx_ioctl(int cmd, struct consolefontdesc __user *user_cfd, int perm, struct console_font_op *op) -{ - struct consolefontdesc cfdarg; - int i; - - if (copy_from_user(&cfdarg, user_cfd, sizeof(struct consolefontdesc))) - return -EFAULT; - - switch (cmd) { - case PIO_FONTX: - if (!perm) - return -EPERM; - op->op = KD_FONT_OP_SET; - op->flags = KD_FONT_FLAG_OLD; - op->width = 8; - op->height = cfdarg.charheight; - op->charcount = cfdarg.charcount; - op->data = cfdarg.chardata; - return con_font_op(vc_cons[fg_console].d, op); - case GIO_FONTX: { - op->op = KD_FONT_OP_GET; - op->flags = KD_FONT_FLAG_OLD; - op->width = 8; - op->height = cfdarg.charheight; - op->charcount = cfdarg.charcount; - op->data = cfdarg.chardata; - i = con_font_op(vc_cons[fg_console].d, op); - if (i) - return i; - cfdarg.charheight = op->height; - cfdarg.charcount = op->charcount; - if (copy_to_user(user_cfd, &cfdarg, sizeof(struct consolefontdesc))) - return -EFAULT; - return 0; - } - } - return -EINVAL; -} - static inline int do_unimap_ioctl(int cmd, struct unimapdesc __user *user_ud, int perm, struct vc_data *vc) { @@ -289,16 +268,14 @@ static int vt_disallocate(unsigned int vc_num) int ret = 0; console_lock(); - if (VT_BUSY(vc_num)) + if (vt_busy(vc_num)) ret = -EBUSY; else if (vc_num) vc = vc_deallocate(vc_num); console_unlock(); - if (vc && vc_num >= MIN_NR_CONSOLES) { - tty_port_destroy(&vc->port); - kfree(vc); - } + if (vc && vc_num >= MIN_NR_CONSOLES) + tty_port_put(&vc->port); return ret; } @@ -311,17 +288,15 @@ static void vt_disallocate_all(void) console_lock(); for (i = 1; i < MAX_NR_CONSOLES; i++) - if (!VT_BUSY(i)) + if (!vt_busy(i)) vc[i] = vc_deallocate(i); else vc[i] = NULL; console_unlock(); for (i = 1; i < MAX_NR_CONSOLES; i++) { - if (vc[i] && i >= MIN_NR_CONSOLES) { - tty_port_destroy(&vc[i]->port); - kfree(vc[i]); - } + if (vc[i] && i >= MIN_NR_CONSOLES) + tty_port_put(&vc[i]->port); } } @@ -335,22 +310,13 @@ int vt_ioctl(struct tty_struct *tty, { struct vc_data *vc = tty->driver_data; struct console_font_op op; /* used in multiple places here */ - unsigned int console; + unsigned int console = vc->vc_num; unsigned char ucval; unsigned int uival; void __user *up = (void __user *)arg; int i, perm; int ret = 0; - console = vc->vc_num; - - - if (!vc_cons_allocated(console)) { /* impossible? */ - ret = -ENOIOCTLCMD; - goto out; - } - - /* * To have permissions to do most of the vt ioctls, we either have * to be the owner of the tty, or have CAP_SYS_TTY_CONFIG. @@ -476,16 +442,19 @@ int vt_ioctl(struct tty_struct *tty, ret = -EINVAL; goto out; } - /* FIXME: this needs the console lock extending */ - if (vc->vc_mode == (unsigned char) arg) + console_lock(); + if (vc->vc_mode == (unsigned char) arg) { + console_unlock(); break; + } vc->vc_mode = (unsigned char) arg; - if (console != fg_console) + if (console != fg_console) { + console_unlock(); break; + } /* * explicitly blank/unblank the screen if switching modes */ - console_lock(); if (arg == KD_TEXT) do_unblank_screen(1); else @@ -641,15 +610,16 @@ int vt_ioctl(struct tty_struct *tty, struct vt_stat __user *vtstat = up; unsigned short state, mask; - /* Review: FIXME: Console lock ? */ if (put_user(fg_console + 1, &vtstat->v_active)) ret = -EFAULT; else { state = 1; /* /dev/tty0 is always open */ + console_lock(); /* required by vt_in_use() */ for (i = 0, mask = 2; i < MAX_NR_CONSOLES && mask; ++i, mask <<= 1) - if (VT_IS_IN_USE(i)) + if (vt_in_use(i)) state |= mask; + console_unlock(); ret = put_user(state, &vtstat->v_state); } break; @@ -659,10 +629,11 @@ int vt_ioctl(struct tty_struct *tty, * Returns the first available (non-opened) console. */ case VT_OPENQRY: - /* FIXME: locking ? - but then this is a stupid API */ + console_lock(); /* required by vt_in_use() */ for (i = 0; i < MAX_NR_CONSOLES; ++i) - if (! VT_IS_IN_USE(i)) + if (!vt_in_use(i)) break; + console_unlock(); uival = i < MAX_NR_CONSOLES ? (i+1) : -1; goto setint; @@ -702,9 +673,9 @@ int vt_ioctl(struct tty_struct *tty, if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES) ret = -ENXIO; else { - vsa.console = array_index_nospec(vsa.console, - MAX_NR_CONSOLES + 1); vsa.console--; + vsa.console = array_index_nospec(vsa.console, + MAX_NR_CONSOLES); console_lock(); ret = vc_allocate(vsa.console); if (ret == 0) { @@ -876,44 +847,35 @@ int vt_ioctl(struct tty_struct *tty, return -EINVAL; for (i = 0; i < MAX_NR_CONSOLES; i++) { + struct vc_data *vcp; + if (!vc_cons[i].d) continue; console_lock(); - if (v.v_vlin) - vc_cons[i].d->vc_scan_lines = v.v_vlin; - if (v.v_clin) - vc_cons[i].d->vc_font.height = v.v_clin; - vc_cons[i].d->vc_resize_user = 1; - vc_resize(vc_cons[i].d, v.v_cols, v.v_rows); + vcp = vc_cons[i].d; + if (vcp) { + int ret; + int save_scan_lines = vcp->vc_scan_lines; + int save_font_height = vcp->vc_font.height; + + if (v.v_vlin) + vcp->vc_scan_lines = v.v_vlin; + if (v.v_clin) + vcp->vc_font.height = v.v_clin; + vcp->vc_resize_user = 1; + ret = vc_resize(vcp, v.v_cols, v.v_rows); + if (ret) { + vcp->vc_scan_lines = save_scan_lines; + vcp->vc_font.height = save_font_height; + console_unlock(); + return ret; + } + } console_unlock(); } break; } - case PIO_FONT: { - if (!perm) - return -EPERM; - op.op = KD_FONT_OP_SET; - op.flags = KD_FONT_FLAG_OLD | KD_FONT_FLAG_DONT_RECALC; /* Compatibility */ - op.width = 8; - op.height = 0; - op.charcount = 256; - op.data = up; - ret = con_font_op(vc_cons[fg_console].d, &op); - break; - } - - case GIO_FONT: { - op.op = KD_FONT_OP_GET; - op.flags = KD_FONT_FLAG_OLD; - op.width = 8; - op.height = 32; - op.charcount = 256; - op.data = up; - ret = con_font_op(vc_cons[fg_console].d, &op); - break; - } - case PIO_CMAP: if (!perm) ret = -EPERM; @@ -925,36 +887,6 @@ int vt_ioctl(struct tty_struct *tty, ret = con_get_cmap(up); break; - case PIO_FONTX: - case GIO_FONTX: - ret = do_fontx_ioctl(cmd, up, perm, &op); - break; - - case PIO_FONTRESET: - { - if (!perm) - return -EPERM; - -#ifdef BROKEN_GRAPHICS_PROGRAMS - /* With BROKEN_GRAPHICS_PROGRAMS defined, the default - font is not saved. */ - ret = -ENOSYS; - break; -#else - { - op.op = KD_FONT_OP_SET_DEFAULT; - op.data = NULL; - ret = con_font_op(vc_cons[fg_console].d, &op); - if (ret) - break; - console_lock(); - con_set_default_unimap(vc_cons[fg_console].d); - console_unlock(); - break; - } -#endif - } - case KDFONTOP: { if (copy_from_user(&op, up, sizeof(op))) { ret = -EFAULT; @@ -1006,12 +938,12 @@ int vt_ioctl(struct tty_struct *tty, case VT_LOCKSWITCH: if (!capable(CAP_SYS_TTY_CONFIG)) return -EPERM; - vt_dont_switch = 1; + vt_dont_switch = true; break; case VT_UNLOCKSWITCH: if (!capable(CAP_SYS_TTY_CONFIG)) return -EPERM; - vt_dont_switch = 0; + vt_dont_switch = false; break; case VT_GETHIFONTMASK: ret = put_user(vc->vc_hi_font_mask, @@ -1068,52 +1000,6 @@ void vc_SAK(struct work_struct *work) #ifdef CONFIG_COMPAT -struct compat_consolefontdesc { - unsigned short charcount; /* characters in font (256 or 512) */ - unsigned short charheight; /* scan lines per character (1-32) */ - compat_caddr_t chardata; /* font data in expanded form */ -}; - -static inline int -compat_fontx_ioctl(int cmd, struct compat_consolefontdesc __user *user_cfd, - int perm, struct console_font_op *op) -{ - struct compat_consolefontdesc cfdarg; - int i; - - if (copy_from_user(&cfdarg, user_cfd, sizeof(struct compat_consolefontdesc))) - return -EFAULT; - - switch (cmd) { - case PIO_FONTX: - if (!perm) - return -EPERM; - op->op = KD_FONT_OP_SET; - op->flags = KD_FONT_FLAG_OLD; - op->width = 8; - op->height = cfdarg.charheight; - op->charcount = cfdarg.charcount; - op->data = compat_ptr(cfdarg.chardata); - return con_font_op(vc_cons[fg_console].d, op); - case GIO_FONTX: - op->op = KD_FONT_OP_GET; - op->flags = KD_FONT_FLAG_OLD; - op->width = 8; - op->height = cfdarg.charheight; - op->charcount = cfdarg.charcount; - op->data = compat_ptr(cfdarg.chardata); - i = con_font_op(vc_cons[fg_console].d, op); - if (i) - return i; - cfdarg.charheight = op->height; - cfdarg.charcount = op->charcount; - if (copy_to_user(user_cfd, &cfdarg, sizeof(struct compat_consolefontdesc))) - return -EFAULT; - return 0; - } - return -EINVAL; -} - struct compat_console_font_op { compat_uint_t op; /* operation code KD_FONT_OP_* */ compat_uint_t flags; /* KD_FONT_FLAG_* */ @@ -1175,18 +1061,10 @@ long vt_compat_ioctl(struct tty_struct *tty, { struct vc_data *vc = tty->driver_data; struct console_font_op op; /* used in multiple places here */ - unsigned int console; void __user *up = (void __user *)arg; int perm; int ret = 0; - console = vc->vc_num; - - if (!vc_cons_allocated(console)) { /* impossible? */ - ret = -ENOIOCTLCMD; - goto out; - } - /* * To have permissions to do most of the vt ioctls, we either have * to be the owner of the tty, or have CAP_SYS_TTY_CONFIG. @@ -1199,11 +1077,6 @@ long vt_compat_ioctl(struct tty_struct *tty, /* * these need special handlers for incompatible data structures */ - case PIO_FONTX: - case GIO_FONTX: - ret = compat_fontx_ioctl(cmd, up, perm, &op); - break; - case KDFONTOP: ret = compat_kdfontop_ioctl(up, perm, &op, vc); break; @@ -1246,7 +1119,7 @@ long vt_compat_ioctl(struct tty_struct *tty, arg = (unsigned long)compat_ptr(arg); goto fallback; } -out: + return ret; fallback: diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index 70a7981b94b3fd854b0e4ef7460cd4eebce8ec9b..facccb36f6924fa52d927166725b78c056e52990 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -274,6 +274,8 @@ static struct class uio_class = { .dev_groups = uio_groups, }; +static bool uio_class_registered; + /* * device functions */ @@ -411,10 +413,10 @@ static int uio_get_minor(struct uio_device *idev) return retval; } -static void uio_free_minor(struct uio_device *idev) +static void uio_free_minor(unsigned long minor) { mutex_lock(&minor_lock); - idr_remove(&uio_idr, idev->minor); + idr_remove(&uio_idr, minor); mutex_unlock(&minor_lock); } @@ -462,13 +464,13 @@ static int uio_open(struct inode *inode, struct file *filep) mutex_lock(&minor_lock); idev = idr_find(&uio_idr, iminor(inode)); - mutex_unlock(&minor_lock); if (!idev) { ret = -ENODEV; + mutex_unlock(&minor_lock); goto out; } - get_device(&idev->dev); + mutex_unlock(&minor_lock); if (!try_module_get(idev->owner)) { ret = -ENODEV; @@ -489,7 +491,7 @@ static int uio_open(struct inode *inode, struct file *filep) if (!idev->info) { mutex_unlock(&idev->info_lock); ret = -EINVAL; - goto err_alloc_listener; + goto err_infoopen; } if (idev->info && idev->info->open) @@ -567,20 +569,20 @@ static ssize_t uio_read(struct file *filep, char __user *buf, ssize_t retval = 0; s32 event_count; - mutex_lock(&idev->info_lock); - if (!idev->info || !idev->info->irq) - retval = -EIO; - mutex_unlock(&idev->info_lock); - - if (retval) - return retval; - if (count != sizeof(s32)) return -EINVAL; add_wait_queue(&idev->wait, &wait); do { + mutex_lock(&idev->info_lock); + if (!idev->info || !idev->info->irq) { + retval = -EIO; + mutex_unlock(&idev->info_lock); + break; + } + mutex_unlock(&idev->info_lock); + set_current_state(TASK_INTERRUPTIBLE); event_count = atomic_read(&idev->event); @@ -736,7 +738,8 @@ static int uio_mmap_physical(struct vm_area_struct *vma) return -EINVAL; vma->vm_ops = &uio_physical_vm_ops; - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + if (idev->info->mem[mi].memtype == UIO_MEM_PHYS) + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); /* * We cannot use the vm_iomap_memory() helper here, @@ -793,18 +796,19 @@ static int uio_mmap(struct file *filep, struct vm_area_struct *vma) } switch (idev->info->mem[mi].memtype) { - case UIO_MEM_PHYS: - ret = uio_mmap_physical(vma); - break; - case UIO_MEM_LOGICAL: - case UIO_MEM_VIRTUAL: - ret = uio_mmap_logical(vma); - break; - default: - ret = -EINVAL; + case UIO_MEM_IOVA: + case UIO_MEM_PHYS: + ret = uio_mmap_physical(vma); + break; + case UIO_MEM_LOGICAL: + case UIO_MEM_VIRTUAL: + ret = uio_mmap_logical(vma); + break; + default: + ret = -EINVAL; } -out: + out: mutex_unlock(&idev->info_lock); return ret; } @@ -876,6 +880,9 @@ static int init_uio_class(void) printk(KERN_ERR "class_register failed for uio\n"); goto err_class_register; } + + uio_class_registered = true; + return 0; err_class_register: @@ -886,6 +893,7 @@ static int init_uio_class(void) static void release_uio_class(void) { + uio_class_registered = false; class_unregister(&uio_class); uio_major_cleanup(); } @@ -912,6 +920,9 @@ int __uio_register_device(struct module *owner, struct uio_device *idev; int ret = 0; + if (!uio_class_registered) + return -EPROBE_DEFER; + if (!parent || !info || !info->name || !info->version) return -EINVAL; @@ -929,9 +940,12 @@ int __uio_register_device(struct module *owner, atomic_set(&idev->event, 0); ret = uio_get_minor(idev); - if (ret) + if (ret) { + kfree(idev); return ret; + } + device_initialize(&idev->dev); idev->dev.devt = MKDEV(uio_major, idev->minor); idev->dev.class = &uio_class; idev->dev.parent = parent; @@ -942,7 +956,7 @@ int __uio_register_device(struct module *owner, if (ret) goto err_device_create; - ret = device_register(&idev->dev); + ret = device_add(&idev->dev); if (ret) goto err_device_create; @@ -950,6 +964,8 @@ int __uio_register_device(struct module *owner, if (ret) goto err_uio_dev_add_attributes; + info->uio_dev = idev; + if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) { /* * Note that we deliberately don't use devm_request_irq @@ -961,19 +977,21 @@ int __uio_register_device(struct module *owner, */ ret = request_irq(info->irq, uio_interrupt, info->irq_flags, info->name, idev); - if (ret) + if (ret) { + info->uio_dev = NULL; goto err_request_irq; + } } - info->uio_dev = idev; return 0; err_request_irq: uio_dev_del_attributes(idev); err_uio_dev_add_attributes: - device_unregister(&idev->dev); + device_del(&idev->dev); err_device_create: - uio_free_minor(idev); + uio_free_minor(idev->minor); + put_device(&idev->dev); return ret; } EXPORT_SYMBOL_GPL(__uio_register_device); @@ -986,13 +1004,13 @@ EXPORT_SYMBOL_GPL(__uio_register_device); void uio_unregister_device(struct uio_info *info) { struct uio_device *idev; + unsigned long minor; if (!info || !info->uio_dev) return; idev = info->uio_dev; - - uio_free_minor(idev); + minor = idev->minor; mutex_lock(&idev->info_lock); uio_dev_del_attributes(idev); @@ -1003,6 +1021,10 @@ void uio_unregister_device(struct uio_info *info) idev->info = NULL; mutex_unlock(&idev->info_lock); + wake_up_interruptible(&idev->wait); + kill_fasync(&idev->async_queue, SIGIO, POLL_HUP); + + uio_free_minor(minor); device_unregister(&idev->dev); return; diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c index e1134a4d97f3fd0005e30dc29e9acf9da77e0e46..a00b4aee6c799cd12b8273db0101990c6437033a 100644 --- a/drivers/uio/uio_dmem_genirq.c +++ b/drivers/uio/uio_dmem_genirq.c @@ -135,11 +135,13 @@ static int uio_dmem_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on) if (irq_on) { if (test_and_clear_bit(0, &priv->flags)) enable_irq(dev_info->irq); + spin_unlock_irqrestore(&priv->lock, flags); } else { - if (!test_and_set_bit(0, &priv->flags)) + if (!test_and_set_bit(0, &priv->flags)) { + spin_unlock_irqrestore(&priv->lock, flags); disable_irq(dev_info->irq); + } } - spin_unlock_irqrestore(&priv->lock, flags); return 0; } diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c index e401be8321ab573b0d4c973fb7ac3f673de0f49b..0899e12adc1d1d5117546fcaeb80c2d10f61a94b 100644 --- a/drivers/uio/uio_hv_generic.c +++ b/drivers/uio/uio_hv_generic.c @@ -104,10 +104,11 @@ static void hv_uio_channel_cb(void *context) /* * Callback from vmbus_event when channel is rescinded. + * It is meant for rescind of primary channels only. */ static void hv_uio_rescind(struct vmbus_channel *channel) { - struct hv_device *hv_dev = channel->primary_channel->device_obj; + struct hv_device *hv_dev = channel->device_obj; struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev); /* @@ -131,11 +132,12 @@ static int hv_uio_ring_mmap(struct file *filp, struct kobject *kobj, = container_of(kobj, struct vmbus_channel, kobj); struct hv_device *dev = channel->primary_channel->device_obj; u16 q_idx = channel->offermsg.offer.sub_channel_index; + void *ring_buffer = page_address(channel->ringbuffer_page); dev_dbg(&dev->device, "mmap channel %u pages %#lx at %#lx\n", q_idx, vma_pages(vma), vma->vm_pgoff); - return vm_iomap_memory(vma, virt_to_phys(channel->ringbuffer_pages), + return vm_iomap_memory(vma, virt_to_phys(ring_buffer), channel->ringbuffer_pagecount << PAGE_SHIFT); } @@ -224,7 +226,7 @@ hv_uio_probe(struct hv_device *dev, /* mem resources */ pdata->info.mem[TXRX_RING_MAP].name = "txrx_rings"; pdata->info.mem[TXRX_RING_MAP].addr - = (uintptr_t)dev->channel->ringbuffer_pages; + = (uintptr_t)page_address(dev->channel->ringbuffer_page); pdata->info.mem[TXRX_RING_MAP].size = dev->channel->ringbuffer_pagecount << PAGE_SHIFT; pdata->info.mem[TXRX_RING_MAP].memtype = UIO_MEM_LOGICAL; @@ -249,8 +251,10 @@ hv_uio_probe(struct hv_device *dev, ret = vmbus_establish_gpadl(dev->channel, pdata->recv_buf, RECV_BUFFER_SIZE, &pdata->recv_gpadl); - if (ret) + if (ret) { + vfree(pdata->recv_buf); goto fail_close; + } /* put Global Physical Address Label in name */ snprintf(pdata->recv_name, sizeof(pdata->recv_name), @@ -270,8 +274,10 @@ hv_uio_probe(struct hv_device *dev, ret = vmbus_establish_gpadl(dev->channel, pdata->send_buf, SEND_BUFFER_SIZE, &pdata->send_gpadl); - if (ret) + if (ret) { + vfree(pdata->send_buf); goto fail_close; + } snprintf(pdata->send_name, sizeof(pdata->send_name), "send:%u", pdata->send_gpadl); diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c index f598ecddc8a703a9e8cec96228d3bf526c92b517..b58a504240c4e0d776739e19f7450522d26f566b 100644 --- a/drivers/uio/uio_pdrv_genirq.c +++ b/drivers/uio/uio_pdrv_genirq.c @@ -148,7 +148,7 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev) if (!uioinfo->irq) { ret = platform_get_irq(pdev, 0); uioinfo->irq = ret; - if (ret == -ENXIO && pdev->dev.of_node) + if (ret == -ENXIO) uioinfo->irq = UIO_IRQ_NONE; else if (ret < 0) { dev_err(&pdev->dev, "failed to get IRQ\n"); diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig index 987fc5ba63211bb5829a8f38629e86ab6022a094..70e6c956c23cefe12e37ee0bbb8fa7617088dd8c 100644 --- a/drivers/usb/Kconfig +++ b/drivers/usb/Kconfig @@ -205,8 +205,4 @@ config USB_ULPI_BUS To compile this driver as a module, choose M here: the module will be called ulpi. -config USB_ROLE_SWITCH - tristate - select USB_COMMON - endif # USB_SUPPORT diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c index e57a2be8754ab08f5d1fce04e399f253a06a85fd..7808725ecae09b8fcc5ccd56d5dfaa6b73840a67 100644 --- a/drivers/usb/atm/cxacru.c +++ b/drivers/usb/atm/cxacru.c @@ -1127,6 +1127,7 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance, struct cxacru_data *instance; struct usb_device *usb_dev = interface_to_usbdev(intf); struct usb_host_endpoint *cmd_ep = usb_dev->ep_in[CXACRU_EP_CMD]; + struct usb_endpoint_descriptor *in, *out; int ret; /* instance init */ @@ -1173,6 +1174,19 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance, goto fail; } + if (usb_endpoint_xfer_int(&cmd_ep->desc)) + ret = usb_find_common_endpoints(intf->cur_altsetting, + NULL, NULL, &in, &out); + else + ret = usb_find_common_endpoints(intf->cur_altsetting, + &in, &out, NULL, NULL); + + if (ret) { + usb_err(usbatm_instance, "cxacru_bind: interface has incorrect endpoints\n"); + ret = -ENODEV; + goto fail; + } + if ((cmd_ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT) { usb_fill_int_urb(instance->rcv_urb, diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c index 2754b4ce7136c65039a5f2b65ec36e4992815404..60e292ce04f7d1a547984f5477cc2fa3561e6726 100644 --- a/drivers/usb/atm/ueagle-atm.c +++ b/drivers/usb/atm/ueagle-atm.c @@ -2168,10 +2168,11 @@ static void uea_intr(struct urb *urb) /* * Start the modem : init the data and start kernel thread */ -static int uea_boot(struct uea_softc *sc) +static int uea_boot(struct uea_softc *sc, struct usb_interface *intf) { - int ret, size; struct intr_pkt *intr; + int ret = -ENOMEM; + int size; uea_enters(INS_TO_USBDEV(sc)); @@ -2196,6 +2197,11 @@ static int uea_boot(struct uea_softc *sc) if (UEA_CHIP_VERSION(sc) == ADI930) load_XILINX_firmware(sc); + if (intf->cur_altsetting->desc.bNumEndpoints < 1) { + ret = -ENODEV; + goto err0; + } + intr = kmalloc(size, GFP_KERNEL); if (!intr) goto err0; @@ -2207,8 +2213,7 @@ static int uea_boot(struct uea_softc *sc) usb_fill_int_urb(sc->urb_int, sc->usb_dev, usb_rcvintpipe(sc->usb_dev, UEA_INTR_PIPE), intr, size, uea_intr, sc, - sc->usb_dev->actconfig->interface[0]->altsetting[0]. - endpoint[0].desc.bInterval); + intf->cur_altsetting->endpoint[0].desc.bInterval); ret = usb_submit_urb(sc->urb_int, GFP_KERNEL); if (ret < 0) { @@ -2223,6 +2228,7 @@ static int uea_boot(struct uea_softc *sc) sc->kthread = kthread_create(uea_kthread, sc, "ueagle-atm"); if (IS_ERR(sc->kthread)) { uea_err(INS_TO_USBDEV(sc), "failed to create thread\n"); + ret = PTR_ERR(sc->kthread); goto err2; } @@ -2237,7 +2243,7 @@ static int uea_boot(struct uea_softc *sc) kfree(intr); err0: uea_leaves(INS_TO_USBDEV(sc)); - return -ENOMEM; + return ret; } /* @@ -2598,7 +2604,7 @@ static int uea_bind(struct usbatm_data *usbatm, struct usb_interface *intf, if (ret < 0) goto error; - ret = uea_boot(sc); + ret = uea_boot(sc, intf); if (ret < 0) goto error_rm_grp; diff --git a/drivers/usb/chipidea/ci_hdrc_tegra.c b/drivers/usb/chipidea/ci_hdrc_tegra.c index 772851bee99b6bfe74396cbdf89dc9665c15a459..12025358bb3c23e7d66b167084e41f5f4d685c31 100644 --- a/drivers/usb/chipidea/ci_hdrc_tegra.c +++ b/drivers/usb/chipidea/ci_hdrc_tegra.c @@ -130,6 +130,7 @@ static int tegra_udc_remove(struct platform_device *pdev) { struct tegra_udc *udc = platform_get_drvdata(pdev); + ci_hdrc_remove_device(udc->dev); usb_phy_set_suspend(udc->phy, 1); clk_disable_unprepare(udc->clk); diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 85fc6db48e449214e092c7a36443995d7544b713..159b897c5e805820056f696c5da63b5a62f1ad18 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -935,8 +935,15 @@ static int ci_hdrc_probe(struct platform_device *pdev) } else if (ci->platdata->usb_phy) { ci->usb_phy = ci->platdata->usb_phy; } else { + ci->usb_phy = devm_usb_get_phy_by_phandle(dev->parent, "phys", + 0); ci->phy = devm_phy_get(dev->parent, "usb-phy"); - ci->usb_phy = devm_usb_get_phy(dev->parent, USB_PHY_TYPE_USB2); + + /* Fallback to grabbing any registered USB2 PHY */ + if (IS_ERR(ci->usb_phy) && + PTR_ERR(ci->usb_phy) != -EPROBE_DEFER) + ci->usb_phy = devm_usb_get_phy(dev->parent, + USB_PHY_TYPE_USB2); /* if both generic PHY and USB PHY layers aren't enabled */ if (PTR_ERR(ci->phy) == -ENOSYS && diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c index db4ceffcf2a6118c9063fa720830cc337a6f92c3..f25d4827fd49c41179326f517b8aeaa7999f54dd 100644 --- a/drivers/usb/chipidea/otg.c +++ b/drivers/usb/chipidea/otg.c @@ -203,14 +203,17 @@ static void ci_otg_work(struct work_struct *work) } pm_runtime_get_sync(ci->dev); + if (ci->id_event) { ci->id_event = false; ci_handle_id_switch(ci); - } else if (ci->b_sess_valid_event) { + } + + if (ci->b_sess_valid_event) { ci->b_sess_valid_event = false; ci_handle_vbus_change(ci); - } else - dev_err(ci->dev, "unexpected event occurs at %s\n", __func__); + } + pm_runtime_put_sync(ci->dev); enable_irq(ci->irq); diff --git a/drivers/usb/chipidea/otg.h b/drivers/usb/chipidea/otg.h index 7e7428e48bfa52e328b8b9bf7d772dc7a3892f49..4f8b8179ec96dc594c65d01b0513d17230730304 100644 --- a/drivers/usb/chipidea/otg.h +++ b/drivers/usb/chipidea/otg.h @@ -17,7 +17,8 @@ void ci_handle_vbus_change(struct ci_hdrc *ci); static inline void ci_otg_queue_work(struct ci_hdrc *ci) { disable_irq_nosync(ci->irq); - queue_work(ci->wq, &ci->work); + if (queue_work(ci->wq, &ci->work) == false) + enable_irq(ci->irq); } #endif /* __DRIVERS_USB_CHIPIDEA_OTG_H */ diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index 9852ec5e6e017e91049d1ecd082137c3eb53ce5f..169ccfacfc7550b145d49adcd97bf121bb093258 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -708,12 +708,6 @@ static int _gadget_stop_activity(struct usb_gadget *gadget) struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget); unsigned long flags; - spin_lock_irqsave(&ci->lock, flags); - ci->gadget.speed = USB_SPEED_UNKNOWN; - ci->remote_wakeup = 0; - ci->suspended = 0; - spin_unlock_irqrestore(&ci->lock, flags); - /* flush all endpoints */ gadget_for_each_ep(ep, gadget) { usb_ep_fifo_flush(ep); @@ -731,6 +725,12 @@ static int _gadget_stop_activity(struct usb_gadget *gadget) ci->status = NULL; } + spin_lock_irqsave(&ci->lock, flags); + ci->gadget.speed = USB_SPEED_UNKNOWN; + ci->remote_wakeup = 0; + ci->suspended = 0; + spin_unlock_irqrestore(&ci->lock, flags); + return 0; } @@ -1302,6 +1302,10 @@ static int ep_disable(struct usb_ep *ep) return -EBUSY; spin_lock_irqsave(hwep->lock, flags); + if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) { + spin_unlock_irqrestore(hwep->lock, flags); + return 0; + } /* only internal SW should disable ctrl endpts */ @@ -1391,6 +1395,10 @@ static int ep_queue(struct usb_ep *ep, struct usb_request *req, return -EINVAL; spin_lock_irqsave(hwep->lock, flags); + if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) { + spin_unlock_irqrestore(hwep->lock, flags); + return 0; + } retval = _ep_queue(ep, req, gfp_flags); spin_unlock_irqrestore(hwep->lock, flags); return retval; @@ -1414,8 +1422,8 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req) return -EINVAL; spin_lock_irqsave(hwep->lock, flags); - - hw_ep_flush(hwep->ci, hwep->num, hwep->dir); + if (hwep->ci->gadget.speed != USB_SPEED_UNKNOWN) + hw_ep_flush(hwep->ci, hwep->num, hwep->dir); list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) { dma_pool_free(hwep->td_pool, node->ptr, node->dma); @@ -1486,6 +1494,10 @@ static void ep_fifo_flush(struct usb_ep *ep) } spin_lock_irqsave(hwep->lock, flags); + if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) { + spin_unlock_irqrestore(hwep->lock, flags); + return; + } hw_ep_flush(hwep->ci, hwep->num, hwep->dir); @@ -1558,6 +1570,10 @@ static int ci_udc_wakeup(struct usb_gadget *_gadget) int ret = 0; spin_lock_irqsave(&ci->lock, flags); + if (ci->gadget.speed == USB_SPEED_UNKNOWN) { + spin_unlock_irqrestore(&ci->lock, flags); + return 0; + } if (!ci->remote_wakeup) { ret = -EOPNOTSUPP; goto out; @@ -1621,6 +1637,25 @@ static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on) static int ci_udc_start(struct usb_gadget *gadget, struct usb_gadget_driver *driver); static int ci_udc_stop(struct usb_gadget *gadget); + +/* Match ISOC IN from the highest endpoint */ +static struct usb_ep *ci_udc_match_ep(struct usb_gadget *gadget, + struct usb_endpoint_descriptor *desc, + struct usb_ss_ep_comp_descriptor *comp_desc) +{ + struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget); + struct usb_ep *ep; + + if (usb_endpoint_xfer_isoc(desc) && usb_endpoint_dir_in(desc)) { + list_for_each_entry_reverse(ep, &ci->gadget.ep_list, ep_list) { + if (ep->caps.dir_in && !ep->claimed) + return ep; + } + } + + return NULL; +} + /** * Device operations part of the API to the USB controller hardware, * which don't involve endpoints (or i/o) @@ -1634,6 +1669,7 @@ static const struct usb_gadget_ops usb_gadget_ops = { .vbus_draw = ci_udc_vbus_draw, .udc_start = ci_udc_start, .udc_stop = ci_udc_stop, + .match_ep = ci_udc_match_ep, }; static int init_eps(struct ci_hdrc *ci) diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c index 34ad5bf8acd8d0ee58cba9c1a5efadace009b9a0..424ecb1f003feb1f8e6cdc01b12d6277f85326f7 100644 --- a/drivers/usb/chipidea/usbmisc_imx.c +++ b/drivers/usb/chipidea/usbmisc_imx.c @@ -343,6 +343,8 @@ static int usbmisc_imx6q_init(struct imx_usbmisc_data *data) } else if (data->oc_polarity == 1) { /* High active */ reg &= ~(MX6_BM_OVER_CUR_DIS | MX6_BM_OVER_CUR_POLARITY); + } else { + reg &= ~(MX6_BM_OVER_CUR_DIS); } writel(reg, usbmisc->base + data->index * 4); diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 9ede35cecb1267be281ca9f3733f187a79f16d23..59675cc7aa017e0bcce341a5367f2d551608ccc0 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -470,12 +470,12 @@ static void acm_read_bulk_callback(struct urb *urb) struct acm *acm = rb->instance; unsigned long flags; int status = urb->status; + bool stopped = false; + bool stalled = false; dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n", rb->index, urb->actual_length, status); - set_bit(rb->index, &acm->read_urbs_free); - if (!acm->dev) { dev_dbg(&acm->data->dev, "%s - disconnected\n", __func__); return; @@ -488,15 +488,16 @@ static void acm_read_bulk_callback(struct urb *urb) break; case -EPIPE: set_bit(EVENT_RX_STALL, &acm->flags); - schedule_work(&acm->work); - return; + stalled = true; + break; case -ENOENT: case -ECONNRESET: case -ESHUTDOWN: dev_dbg(&acm->data->dev, "%s - urb shutting down with status: %d\n", __func__, status); - return; + stopped = true; + break; default: dev_dbg(&acm->data->dev, "%s - nonzero urb status received: %d\n", @@ -505,10 +506,24 @@ static void acm_read_bulk_callback(struct urb *urb) } /* - * Unthrottle may run on another CPU which needs to see events - * in the same order. Submission has an implict barrier + * Make sure URB processing is done before marking as free to avoid + * racing with unthrottle() on another CPU. Matches the barriers + * implied by the test_and_clear_bit() in acm_submit_read_urb(). */ smp_mb__before_atomic(); + set_bit(rb->index, &acm->read_urbs_free); + /* + * Make sure URB is marked as free before checking the throttled flag + * to avoid racing with unthrottle() on another CPU. Matches the + * smp_mb() in unthrottle(). + */ + smp_mb__after_atomic(); + + if (stopped || stalled) { + if (stalled) + schedule_work(&acm->work); + return; + } /* throttle device if requested by tty */ spin_lock_irqsave(&acm->read_lock, flags); @@ -558,10 +573,8 @@ static void acm_softint(struct work_struct *work) clear_bit(EVENT_RX_STALL, &acm->flags); } - if (test_bit(EVENT_TTY_WAKEUP, &acm->flags)) { + if (test_and_clear_bit(EVENT_TTY_WAKEUP, &acm->flags)) tty_port_tty_wakeup(&acm->port); - clear_bit(EVENT_TTY_WAKEUP, &acm->flags); - } } /* @@ -581,6 +594,13 @@ static int acm_tty_install(struct tty_driver *driver, struct tty_struct *tty) if (retval) goto error_init_termios; + /* + * Suppress initial echoing for some devices which might send data + * immediately after acm driver has been installed. + */ + if (acm->quirks & DISABLE_ECHO) + tty->termios.c_lflag &= ~ECHO; + tty->driver_data = acm; return 0; @@ -837,6 +857,9 @@ static void acm_tty_unthrottle(struct tty_struct *tty) acm->throttle_req = 0; spin_unlock_irq(&acm->read_lock); + /* Matches the smp_mb__after_atomic() in acm_read_bulk_callback(). */ + smp_mb(); + if (was_throttled) acm_submit_read_urbs(acm, GFP_KERNEL); } @@ -1310,10 +1333,6 @@ static int acm_probe(struct usb_interface *intf, tty_port_init(&acm->port); acm->port.ops = &acm_port_ops; - minor = acm_alloc_minor(acm); - if (minor < 0) - goto alloc_fail1; - ctrlsize = usb_endpoint_maxp(epctrl); readsize = usb_endpoint_maxp(epread) * (quirks == SINGLE_RX_URB ? 1 : 2); @@ -1321,6 +1340,13 @@ static int acm_probe(struct usb_interface *intf, acm->writesize = usb_endpoint_maxp(epwrite) * 20; acm->control = control_interface; acm->data = data_interface; + + usb_get_intf(acm->control); /* undone in destruct() */ + + minor = acm_alloc_minor(acm); + if (minor < 0) + goto alloc_fail1; + acm->minor = minor; acm->dev = usb_dev; if (h.usb_cdc_acm_descriptor) @@ -1467,7 +1493,6 @@ static int acm_probe(struct usb_interface *intf, usb_driver_claim_interface(&acm_driver, data_interface, acm); usb_set_intfdata(data_interface, acm); - usb_get_intf(control_interface); tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor, &control_interface->dev); if (IS_ERR(tty_dev)) { @@ -1672,6 +1697,9 @@ static const struct usb_device_id acm_ids[] = { { USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; andrey.arapov@gmail.com */ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ }, + { USB_DEVICE(0x0e8d, 0x2000), /* MediaTek Inc Preloader */ + .driver_info = DISABLE_ECHO, /* DISABLE ECHO in termios flag */ + }, { USB_DEVICE(0x0e8d, 0x3329), /* MediaTek Inc GPS */ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ }, @@ -1711,6 +1739,9 @@ static const struct usb_device_id acm_ids[] = { { USB_DEVICE(0x0572, 0x1328), /* Shiro / Aztech USB MODEM UM-3100 */ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ }, + { USB_DEVICE(0x0572, 0x1349), /* Hiro (Conexant) USB MODEM H50228 */ + .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ + }, { USB_DEVICE(0x20df, 0x0001), /* Simtec Electronics Entropy Key */ .driver_info = QUIRK_CONTROL_LINE_STATE, }, { USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */ @@ -1867,6 +1898,13 @@ static const struct usb_device_id acm_ids[] = { .driver_info = IGNORE_DEVICE, }, + { USB_DEVICE(0x1bc7, 0x0021), /* Telit 3G ACM only composition */ + .driver_info = SEND_ZERO_PACKET, + }, + { USB_DEVICE(0x1bc7, 0x0023), /* Telit 3G ACM + ECM composition */ + .driver_info = SEND_ZERO_PACKET, + }, + /* control interfaces without any protocol set */ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, USB_CDC_PROTO_NONE) }, diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h index ca06b20d7af9cc9567da1f243ad99935f8bc99e7..515aad0847ee84b408e7fceb503081f5d97fc78b 100644 --- a/drivers/usb/class/cdc-acm.h +++ b/drivers/usb/class/cdc-acm.h @@ -140,3 +140,4 @@ struct acm { #define QUIRK_CONTROL_LINE_STATE BIT(6) #define CLEAR_HALT_CONDITIONS BIT(7) #define SEND_ZERO_PACKET BIT(8) +#define DISABLE_ECHO BIT(9) diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index bec581fb7c6361891a81a3a0aa88d3863b686f58..45d1760c85a916295cfb850f0912ab60ef090ec4 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -249,14 +249,14 @@ static void wdm_int_callback(struct urb *urb) dev_err(&desc->intf->dev, "Stall on int endpoint\n"); goto sw; /* halt is cleared in work */ default: - dev_err(&desc->intf->dev, + dev_err_ratelimited(&desc->intf->dev, "nonzero urb status received: %d\n", status); break; } } if (urb->actual_length < sizeof(struct usb_cdc_notification)) { - dev_err(&desc->intf->dev, "wdm_int_callback - %d bytes\n", + dev_err_ratelimited(&desc->intf->dev, "wdm_int_callback - %d bytes\n", urb->actual_length); goto exit; } @@ -587,10 +587,20 @@ static int wdm_flush(struct file *file, fl_owner_t id) { struct wdm_device *desc = file->private_data; - wait_event(desc->wait, !test_bit(WDM_IN_USE, &desc->flags)); + wait_event(desc->wait, + /* + * needs both flags. We cannot do with one + * because resetting it would cause a race + * with write() yet we need to signal + * a disconnect + */ + !test_bit(WDM_IN_USE, &desc->flags) || + test_bit(WDM_DISCONNECTING, &desc->flags)); /* cannot dereference desc->intf if WDM_DISCONNECTING */ - if (desc->werr < 0 && !test_bit(WDM_DISCONNECTING, &desc->flags)) + if (test_bit(WDM_DISCONNECTING, &desc->flags)) + return -ENODEV; + if (desc->werr < 0) dev_err(&desc->intf->dev, "Error in flush path: %d\n", desc->werr); @@ -974,8 +984,6 @@ static void wdm_disconnect(struct usb_interface *intf) spin_lock_irqsave(&desc->iuspin, flags); set_bit(WDM_DISCONNECTING, &desc->flags); set_bit(WDM_READ, &desc->flags); - /* to terminate pending flushes */ - clear_bit(WDM_IN_USE, &desc->flags); spin_unlock_irqrestore(&desc->iuspin, flags); wake_up_all(&desc->wait); mutex_lock(&desc->rlock); diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c index 407a7a6198a25ba3bb3d078bf53adab23b5e3a34..4a80103675d5961f9607a0516fc2a76a4cf8c91f 100644 --- a/drivers/usb/class/usblp.c +++ b/drivers/usb/class/usblp.c @@ -445,6 +445,7 @@ static void usblp_cleanup(struct usblp *usblp) kfree(usblp->readbuf); kfree(usblp->device_id_string); kfree(usblp->statusbuf); + usb_put_intf(usblp->intf); kfree(usblp); } @@ -461,10 +462,12 @@ static int usblp_release(struct inode *inode, struct file *file) mutex_lock(&usblp_mutex); usblp->used = 0; - if (usblp->present) { + if (usblp->present) usblp_unlink_urbs(usblp); - usb_autopm_put_interface(usblp->intf); - } else /* finish cleanup from disconnect */ + + usb_autopm_put_interface(usblp->intf); + + if (!usblp->present) /* finish cleanup from disconnect */ usblp_cleanup(usblp); mutex_unlock(&usblp_mutex); return 0; @@ -1105,7 +1108,7 @@ static int usblp_probe(struct usb_interface *intf, init_waitqueue_head(&usblp->wwait); init_usb_anchor(&usblp->urbs); usblp->ifnum = intf->cur_altsetting->desc.bInterfaceNumber; - usblp->intf = intf; + usblp->intf = usb_get_intf(intf); /* Malloc device ID string buffer to the largest expected length, * since we can re-query it on an ioctl and a dynamic string @@ -1194,6 +1197,7 @@ static int usblp_probe(struct usb_interface *intf, kfree(usblp->readbuf); kfree(usblp->statusbuf); kfree(usblp->device_id_string); + usb_put_intf(usblp->intf); kfree(usblp); abort_ret: return retval; diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index 83ffa5a14c3dbbc53e8b964a4101e6e4a19c57a5..e6a7c86b70f250ee4077948b2bdbcb2ec3d868d4 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c @@ -342,7 +342,8 @@ static int usbtmc_ioctl_abort_bulk_in(struct usbtmc_device_data *data) } -static int usbtmc_ioctl_abort_bulk_out(struct usbtmc_device_data *data) +static int usbtmc_ioctl_abort_bulk_out_tag(struct usbtmc_device_data *data, + u8 tag) { struct device *dev; u8 *buffer; @@ -359,8 +360,8 @@ static int usbtmc_ioctl_abort_bulk_out(struct usbtmc_device_data *data) usb_rcvctrlpipe(data->usb_dev, 0), USBTMC_REQUEST_INITIATE_ABORT_BULK_OUT, USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT, - data->bTag_last_write, data->bulk_out, - buffer, 2, USBTMC_TIMEOUT); + tag, data->bulk_out, + buffer, 2, USB_CTRL_GET_TIMEOUT); if (rv < 0) { dev_err(dev, "usb_control_msg returned %d\n", rv); @@ -379,12 +380,14 @@ static int usbtmc_ioctl_abort_bulk_out(struct usbtmc_device_data *data) n = 0; usbtmc_abort_bulk_out_check_status: + /* do not stress device with subsequent requests */ + msleep(50); rv = usb_control_msg(data->usb_dev, usb_rcvctrlpipe(data->usb_dev, 0), USBTMC_REQUEST_CHECK_ABORT_BULK_OUT_STATUS, USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT, 0, data->bulk_out, buffer, 0x08, - USBTMC_TIMEOUT); + USB_CTRL_GET_TIMEOUT); n++; if (rv < 0) { dev_err(dev, "usb_control_msg returned %d\n", rv); @@ -418,6 +421,11 @@ static int usbtmc_ioctl_abort_bulk_out(struct usbtmc_device_data *data) return rv; } +static int usbtmc_ioctl_abort_bulk_out(struct usbtmc_device_data *data) +{ + return usbtmc_ioctl_abort_bulk_out_tag(data, data->bTag_last_write); +} + static int usbtmc488_ioctl_read_stb(struct usbtmc_file_data *file_data, void __user *arg) { @@ -1008,6 +1016,7 @@ static int usbtmc_ioctl_clear(struct usbtmc_device_data *data) do { dev_dbg(dev, "Reading from bulk in EP\n"); + actual = 0; rv = usb_bulk_msg(data->usb_dev, usb_rcvbulkpipe(data->usb_dev, data->bulk_in), diff --git a/drivers/usb/common/Makefile b/drivers/usb/common/Makefile index fb4d5ef4165c7d2e188395be35cf313935046c1d..0a7c45e8548135f27a4a47157d5a674fb4d29dfe 100644 --- a/drivers/usb/common/Makefile +++ b/drivers/usb/common/Makefile @@ -9,4 +9,3 @@ usb-common-$(CONFIG_USB_LED_TRIG) += led.o obj-$(CONFIG_USB_OTG_FSM) += usb-otg-fsm.o obj-$(CONFIG_USB_ULPI_BUS) += ulpi.o -obj-$(CONFIG_USB_ROLE_SWITCH) += roles.o diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c index 48277bbc15e4d155fc9c1c7315fcf57d6347b51e..73c8e65917461f8f83d9233c96bdf0d2b8956b27 100644 --- a/drivers/usb/common/common.c +++ b/drivers/usb/common/common.c @@ -145,6 +145,8 @@ enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0) do { controller = of_find_node_with_property(controller, "phys"); + if (!of_device_is_available(controller)) + continue; index = 0; do { if (arg0 == -1) { diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 7b5cb28ffb3578c8785ee4e0ca541c88846890be..4781eebefd206e9b155ce78aad9f2d7716726775 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -203,9 +203,58 @@ static const unsigned short super_speed_maxpacket_maxes[4] = { [USB_ENDPOINT_XFER_INT] = 1024, }; -static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, - int asnum, struct usb_host_interface *ifp, int num_ep, - unsigned char *buffer, int size) +static bool endpoint_is_duplicate(struct usb_endpoint_descriptor *e1, + struct usb_endpoint_descriptor *e2) +{ + if (e1->bEndpointAddress == e2->bEndpointAddress) + return true; + + if (usb_endpoint_xfer_control(e1) || usb_endpoint_xfer_control(e2)) { + if (usb_endpoint_num(e1) == usb_endpoint_num(e2)) + return true; + } + + return false; +} + +/* + * Check for duplicate endpoint addresses in other interfaces and in the + * altsetting currently being parsed. + */ +static bool config_endpoint_is_duplicate(struct usb_host_config *config, + int inum, int asnum, struct usb_endpoint_descriptor *d) +{ + struct usb_endpoint_descriptor *epd; + struct usb_interface_cache *intfc; + struct usb_host_interface *alt; + int i, j, k; + + for (i = 0; i < config->desc.bNumInterfaces; ++i) { + intfc = config->intf_cache[i]; + + for (j = 0; j < intfc->num_altsetting; ++j) { + alt = &intfc->altsetting[j]; + + if (alt->desc.bInterfaceNumber == inum && + alt->desc.bAlternateSetting != asnum) + continue; + + for (k = 0; k < alt->desc.bNumEndpoints; ++k) { + epd = &alt->endpoint[k].desc; + + if (endpoint_is_duplicate(epd, d)) + return true; + } + } + } + + return false; +} + +static int usb_parse_endpoint(struct device *ddev, int cfgno, + struct usb_host_config *config, int inum, int asnum, + struct usb_host_interface *ifp, int num_ep, + unsigned char *buffer, int size) { unsigned char *buffer0 = buffer; struct usb_endpoint_descriptor *d; @@ -241,20 +290,29 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, if (ifp->desc.bNumEndpoints >= num_ep) goto skip_to_next_endpoint_or_interface_descriptor; + /* Save a copy of the descriptor and use it instead of the original */ + endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints]; + memcpy(&endpoint->desc, d, n); + d = &endpoint->desc; + + /* Clear the reserved bits in bEndpointAddress */ + i = d->bEndpointAddress & + (USB_ENDPOINT_DIR_MASK | USB_ENDPOINT_NUMBER_MASK); + if (i != d->bEndpointAddress) { + dev_notice(ddev, "config %d interface %d altsetting %d has an endpoint descriptor with address 0x%X, changing to 0x%X\n", + cfgno, inum, asnum, d->bEndpointAddress, i); + endpoint->desc.bEndpointAddress = i; + } + /* Check for duplicate endpoint addresses */ - for (i = 0; i < ifp->desc.bNumEndpoints; ++i) { - if (ifp->endpoint[i].desc.bEndpointAddress == - d->bEndpointAddress) { - dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n", - cfgno, inum, asnum, d->bEndpointAddress); - goto skip_to_next_endpoint_or_interface_descriptor; - } + if (config_endpoint_is_duplicate(config, inum, asnum, d)) { + dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n", + cfgno, inum, asnum, d->bEndpointAddress); + goto skip_to_next_endpoint_or_interface_descriptor; } - endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints]; + /* Accept this endpoint */ ++ifp->desc.bNumEndpoints; - - memcpy(&endpoint->desc, d, n); INIT_LIST_HEAD(&endpoint->urb_list); /* @@ -348,6 +406,11 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, /* Validate the wMaxPacketSize field */ maxp = usb_endpoint_maxp(&endpoint->desc); + if (maxp == 0) { + dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has wMaxPacketSize 0, skipping\n", + cfgno, inum, asnum, d->bEndpointAddress); + goto skip_to_next_endpoint_or_interface_descriptor; + } /* Find the highest legal maxpacket size for this endpoint */ i = 0; /* additional transactions per microframe */ @@ -517,8 +580,8 @@ static int usb_parse_interface(struct device *ddev, int cfgno, if (((struct usb_descriptor_header *) buffer)->bDescriptorType == USB_DT_INTERFACE) break; - retval = usb_parse_endpoint(ddev, cfgno, inum, asnum, alt, - num_ep, buffer, size); + retval = usb_parse_endpoint(ddev, cfgno, config, inum, asnum, + alt, num_ep, buffer, size); if (retval < 0) return retval; ++n; @@ -925,7 +988,7 @@ int usb_get_bos_descriptor(struct usb_device *dev) struct usb_bos_descriptor *bos; struct usb_dev_cap_header *cap; struct usb_ssp_cap_descriptor *ssp_cap; - unsigned char *buffer; + unsigned char *buffer, *buffer0; int length, total_len, num, i, ssac; __u8 cap_type; int ret; @@ -936,8 +999,8 @@ int usb_get_bos_descriptor(struct usb_device *dev) /* Get BOS descriptor */ ret = usb_get_descriptor(dev, USB_DT_BOS, 0, bos, USB_DT_BOS_SIZE); - if (ret < USB_DT_BOS_SIZE) { - dev_err(ddev, "unable to get BOS descriptor\n"); + if (ret < USB_DT_BOS_SIZE || bos->bLength < USB_DT_BOS_SIZE) { + dev_err(ddev, "unable to get BOS descriptor or descriptor too short\n"); if (ret >= 0) ret = -ENOMSG; kfree(bos); @@ -970,10 +1033,12 @@ int usb_get_bos_descriptor(struct usb_device *dev) ret = -ENOMSG; goto err; } + + buffer0 = buffer; total_len -= length; + buffer += length; for (i = 0; i < num; i++) { - buffer += length; cap = (struct usb_dev_cap_header *)buffer; if (total_len < sizeof(*cap) || total_len < cap->bLength) { @@ -987,8 +1052,6 @@ int usb_get_bos_descriptor(struct usb_device *dev) break; } - total_len -= length; - if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) { dev_warn(ddev, "descriptor type invalid, skip\n"); continue; @@ -1023,7 +1086,11 @@ int usb_get_bos_descriptor(struct usb_device *dev) default: break; } + + total_len -= length; + buffer += length; } + dev->bos->desc->wTotalLength = cpu_to_le16(buffer - buffer0); return 0; diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c index 3de3c750b5f6e01414d84e1579c001a36e53e478..44f28a114c2b6bb43456b2c67a03b8ec02b7a389 100644 --- a/drivers/usb/core/devices.c +++ b/drivers/usb/core/devices.c @@ -598,7 +598,7 @@ static ssize_t usb_device_read(struct file *file, char __user *buf, return -EINVAL; if (nbytes <= 0) return 0; - if (!access_ok(VERIFY_WRITE, buf, nbytes)) + if (!access_ok(buf, nbytes)) return -EFAULT; mutex_lock(&usb_bus_idr_lock); diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index ffccd40ea67da4c5d70a96ec057d4cc32cb04ed4..b3b88c08be8a295369ed6b834bff89ec0eddbb5c 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -739,8 +739,15 @@ static int claimintf(struct usb_dev_state *ps, unsigned int ifnum) intf = usb_ifnum_to_if(dev, ifnum); if (!intf) err = -ENOENT; - else + else { + unsigned int old_suppress; + + /* suppress uevents while claiming interface */ + old_suppress = dev_get_uevent_suppress(&intf->dev); + dev_set_uevent_suppress(&intf->dev, 1); err = usb_driver_claim_interface(&usbfs_driver, intf, ps); + dev_set_uevent_suppress(&intf->dev, old_suppress); + } if (err == 0) set_bit(ifnum, &ps->ifclaimed); return err; @@ -760,7 +767,13 @@ static int releaseintf(struct usb_dev_state *ps, unsigned int ifnum) if (!intf) err = -ENOENT; else if (test_and_clear_bit(ifnum, &ps->ifclaimed)) { + unsigned int old_suppress; + + /* suppress uevents while releasing interface */ + old_suppress = dev_get_uevent_suppress(&intf->dev); + dev_set_uevent_suppress(&intf->dev, 1); usb_driver_release_interface(&usbfs_driver, intf); + dev_set_uevent_suppress(&intf->dev, old_suppress); err = 0; } return err; @@ -1094,7 +1107,7 @@ static int proc_control(struct usb_dev_state *ps, void __user *arg) ctrl.bRequestType, ctrl.bRequest, ctrl.wValue, ctrl.wIndex, ctrl.wLength); if (ctrl.bRequestType & 0x80) { - if (ctrl.wLength && !access_ok(VERIFY_WRITE, ctrl.data, + if (ctrl.wLength && !access_ok(ctrl.data, ctrl.wLength)) { ret = -EINVAL; goto done; @@ -1176,14 +1189,19 @@ static int proc_bulk(struct usb_dev_state *ps, void __user *arg) ret = usbfs_increase_memory_usage(len1 + sizeof(struct urb)); if (ret) return ret; - tbuf = kmalloc(len1, GFP_KERNEL); + + /* + * len1 can be almost arbitrarily large. Don't WARN if it's + * too big, just fail the request. + */ + tbuf = kmalloc(len1, GFP_KERNEL | __GFP_NOWARN); if (!tbuf) { ret = -ENOMEM; goto done; } tmo = bulk.timeout; if (bulk.ep & 0x80) { - if (len1 && !access_ok(VERIFY_WRITE, bulk.data, len1)) { + if (len1 && !access_ok(bulk.data, len1)) { ret = -EINVAL; goto done; } @@ -1584,8 +1602,7 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb } if (uurb->buffer_length > 0 && - !access_ok(is_in ? VERIFY_WRITE : VERIFY_READ, - uurb->buffer, uurb->buffer_length)) { + !access_ok(uurb->buffer, uurb->buffer_length)) { ret = -EFAULT; goto error; } @@ -1618,7 +1635,7 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb if (num_sgs) { as->urb->sg = kmalloc_array(num_sgs, sizeof(struct scatterlist), - GFP_KERNEL); + GFP_KERNEL | __GFP_NOWARN); if (!as->urb->sg) { ret = -ENOMEM; goto error; @@ -1653,7 +1670,7 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb (uurb_start - as->usbm->vm_start); } else { as->urb->transfer_buffer = kmalloc(uurb->buffer_length, - GFP_KERNEL); + GFP_KERNEL | __GFP_NOWARN); if (!as->urb->transfer_buffer) { ret = -ENOMEM; goto error; @@ -1792,8 +1809,6 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb return 0; error: - if (as && as->usbm) - dec_usb_memory_use_count(as->usbm, &as->usbm->urb_use_count); kfree(isopkt); kfree(dr); if (as) diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index a1f225f077cd23b3ebbf07951b0734183c209bf5..3255b2bb0fd56d721858ee5bae0d545be4df7b16 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c @@ -473,11 +473,6 @@ static int usb_unbind_interface(struct device *dev) pm_runtime_disable(dev); pm_runtime_set_suspended(dev); - /* Undo any residual pm_autopm_get_interface_* calls */ - for (r = atomic_read(&intf->pm_usage_cnt); r > 0; --r) - usb_autopm_put_interface_no_suspend(intf); - atomic_set(&intf->pm_usage_cnt, 0); - if (!error) usb_autosuspend_device(udev); @@ -1636,7 +1631,6 @@ void usb_autopm_put_interface(struct usb_interface *intf) int status; usb_mark_last_busy(udev); - atomic_dec(&intf->pm_usage_cnt); status = pm_runtime_put_sync(&intf->dev); dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n", __func__, atomic_read(&intf->dev.power.usage_count), @@ -1665,7 +1659,6 @@ void usb_autopm_put_interface_async(struct usb_interface *intf) int status; usb_mark_last_busy(udev); - atomic_dec(&intf->pm_usage_cnt); status = pm_runtime_put(&intf->dev); dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n", __func__, atomic_read(&intf->dev.power.usage_count), @@ -1687,7 +1680,6 @@ void usb_autopm_put_interface_no_suspend(struct usb_interface *intf) struct usb_device *udev = interface_to_usbdev(intf); usb_mark_last_busy(udev); - atomic_dec(&intf->pm_usage_cnt); pm_runtime_put_noidle(&intf->dev); } EXPORT_SYMBOL_GPL(usb_autopm_put_interface_no_suspend); @@ -1718,8 +1710,6 @@ int usb_autopm_get_interface(struct usb_interface *intf) status = pm_runtime_get_sync(&intf->dev); if (status < 0) pm_runtime_put_sync(&intf->dev); - else - atomic_inc(&intf->pm_usage_cnt); dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n", __func__, atomic_read(&intf->dev.power.usage_count), status); @@ -1753,8 +1743,6 @@ int usb_autopm_get_interface_async(struct usb_interface *intf) status = pm_runtime_get(&intf->dev); if (status < 0 && status != -EINPROGRESS) pm_runtime_put_noidle(&intf->dev); - else - atomic_inc(&intf->pm_usage_cnt); dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n", __func__, atomic_read(&intf->dev.power.usage_count), status); @@ -1778,7 +1766,6 @@ void usb_autopm_get_interface_no_resume(struct usb_interface *intf) struct usb_device *udev = interface_to_usbdev(intf); usb_mark_last_busy(udev); - atomic_inc(&intf->pm_usage_cnt); pm_runtime_get_noresume(&intf->dev); } EXPORT_SYMBOL_GPL(usb_autopm_get_interface_no_resume); @@ -1899,14 +1886,11 @@ int usb_runtime_idle(struct device *dev) return -EBUSY; } -int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable) +static int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable) { struct usb_hcd *hcd = bus_to_hcd(udev->bus); int ret = -EPERM; - if (enable && !udev->usb2_hw_lpm_allowed) - return 0; - if (hcd->driver->set_usb2_hw_lpm) { ret = hcd->driver->set_usb2_hw_lpm(hcd, udev, enable); if (!ret) @@ -1916,6 +1900,24 @@ int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable) return ret; } +int usb_enable_usb2_hardware_lpm(struct usb_device *udev) +{ + if (!udev->usb2_hw_lpm_capable || + !udev->usb2_hw_lpm_allowed || + udev->usb2_hw_lpm_enabled) + return 0; + + return usb_set_usb2_hardware_lpm(udev, 1); +} + +int usb_disable_usb2_hardware_lpm(struct usb_device *udev) +{ + if (!udev->usb2_hw_lpm_enabled) + return 0; + + return usb_set_usb2_hardware_lpm(udev, 0); +} + #endif /* CONFIG_PM */ struct bus_type usb_bus_type = { diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c index 65de6f73b6725200d8ec5cb2301b7a557cdf8a9c..558890ada0e5bdcf8c573af0cc5be59c52c99ef1 100644 --- a/drivers/usb/core/file.c +++ b/drivers/usb/core/file.c @@ -193,9 +193,10 @@ int usb_register_dev(struct usb_interface *intf, intf->minor = minor; break; } - up_write(&minor_rwsem); - if (intf->minor < 0) + if (intf->minor < 0) { + up_write(&minor_rwsem); return -EXFULL; + } /* create a usb class device for this usb interface */ snprintf(name, sizeof(name), class_driver->name, minor - minor_base); @@ -203,12 +204,11 @@ int usb_register_dev(struct usb_interface *intf, MKDEV(USB_MAJOR, minor), class_driver, "%s", kbasename(name)); if (IS_ERR(intf->usb_dev)) { - down_write(&minor_rwsem); usb_minors[minor] = NULL; intf->minor = -1; - up_write(&minor_rwsem); retval = PTR_ERR(intf->usb_dev); } + up_write(&minor_rwsem); return retval; } EXPORT_SYMBOL_GPL(usb_register_dev); @@ -234,12 +234,12 @@ void usb_deregister_dev(struct usb_interface *intf, return; dev_dbg(&intf->dev, "removing %d minor\n", intf->minor); + device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor)); down_write(&minor_rwsem); usb_minors[intf->minor] = NULL; up_write(&minor_rwsem); - device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor)); intf->usb_dev = NULL; intf->minor = -1; destroy_usb_class(); diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c index 03432467b05fb12810d7cac77d954a11ed0a002a..d4234f81b791e173e4aa6a7083b4bd8d105ff7eb 100644 --- a/drivers/usb/core/hcd-pci.c +++ b/drivers/usb/core/hcd-pci.c @@ -49,6 +49,7 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd, struct pci_dev *companion; struct usb_hcd *companion_hcd; unsigned int slot = PCI_SLOT(pdev->devfn); + struct pci_driver *drv; /* * Iterate through other PCI functions in the same slot. @@ -61,6 +62,15 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd, PCI_SLOT(companion->devfn) != slot) continue; + drv = companion->driver; + if (!drv) + continue; + + if (strncmp(drv->name, "uhci_hcd", sizeof("uhci_hcd") - 1) && + strncmp(drv->name, "ohci-pci", sizeof("ohci-pci") - 1) && + strncmp(drv->name, "ehci-pci", sizeof("ehci-pci") - 1)) + continue; + /* * Companion device should be either UHCI,OHCI or EHCI host * controller, otherwise skip. @@ -216,17 +226,18 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) /* EHCI, OHCI */ hcd->rsrc_start = pci_resource_start(dev, 0); hcd->rsrc_len = pci_resource_len(dev, 0); - if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, - driver->description)) { + if (!devm_request_mem_region(&dev->dev, hcd->rsrc_start, + hcd->rsrc_len, driver->description)) { dev_dbg(&dev->dev, "controller already in use\n"); retval = -EBUSY; goto put_hcd; } - hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len); + hcd->regs = devm_ioremap_nocache(&dev->dev, hcd->rsrc_start, + hcd->rsrc_len); if (hcd->regs == NULL) { dev_dbg(&dev->dev, "error mapping memory\n"); retval = -EFAULT; - goto release_mem_region; + goto put_hcd; } } else { @@ -240,8 +251,8 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) hcd->rsrc_start = pci_resource_start(dev, region); hcd->rsrc_len = pci_resource_len(dev, region); - if (request_region(hcd->rsrc_start, hcd->rsrc_len, - driver->description)) + if (devm_request_region(&dev->dev, hcd->rsrc_start, + hcd->rsrc_len, driver->description)) break; } if (region == PCI_ROM_RESOURCE) { @@ -275,20 +286,13 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) } if (retval != 0) - goto unmap_registers; + goto put_hcd; device_wakeup_enable(hcd->self.controller); if (pci_dev_run_wake(dev)) pm_runtime_put_noidle(&dev->dev); return retval; -unmap_registers: - if (driver->flags & HCD_MEMORY) { - iounmap(hcd->regs); -release_mem_region: - release_mem_region(hcd->rsrc_start, hcd->rsrc_len); - } else - release_region(hcd->rsrc_start, hcd->rsrc_len); put_hcd: usb_put_hcd(hcd); disable_pci: @@ -347,14 +351,6 @@ void usb_hcd_pci_remove(struct pci_dev *dev) dev_set_drvdata(&dev->dev, NULL); up_read(&companions_rwsem); } - - if (hcd->driver->flags & HCD_MEMORY) { - iounmap(hcd->regs); - release_mem_region(hcd->rsrc_start, hcd->rsrc_len); - } else { - release_region(hcd->rsrc_start, hcd->rsrc_len); - } - usb_put_hcd(hcd); pci_disable_device(dev); } diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 1c21955fe7c00736d4a18fc7cff33783137ef60e..acff95a9f239928b3b528090c5e5db14c1aa198a 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1663,6 +1663,13 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags) urb->hcpriv = NULL; INIT_LIST_HEAD(&urb->urb_list); atomic_dec(&urb->use_count); + /* + * Order the write of urb->use_count above before the read + * of urb->reject below. Pairs with the memory barriers in + * usb_kill_urb() and usb_poison_urb(). + */ + smp_mb__after_atomic(); + atomic_dec(&urb->dev->urbnum); if (atomic_read(&urb->reject)) wake_up(&usb_kill_urb_queue); @@ -1772,6 +1779,13 @@ static void __usb_hcd_giveback_urb(struct urb *urb) usb_anchor_resume_wakeups(anchor); atomic_dec(&urb->use_count); + /* + * Order the write of urb->use_count above before the read + * of urb->reject below. Pairs with the memory barriers in + * usb_kill_urb() and usb_poison_urb(). + */ + smp_mb__after_atomic(); + if (unlikely(atomic_read(&urb->reject))) wake_up(&usb_kill_urb_queue); usb_put_urb(urb); @@ -1784,7 +1798,6 @@ static void usb_giveback_urb_bh(unsigned long param) spin_lock_irq(&bh->lock); bh->running = true; - restart: list_replace_init(&bh->head, &local_list); spin_unlock_irq(&bh->lock); @@ -1798,10 +1811,17 @@ static void usb_giveback_urb_bh(unsigned long param) bh->completing_ep = NULL; } - /* check if there are new URBs to giveback */ + /* + * giveback new URBS next time to prevent this function + * from not exiting for a long time. + */ spin_lock_irq(&bh->lock); - if (!list_empty(&bh->head)) - goto restart; + if (!list_empty(&bh->head)) { + if (bh->high_prio) + tasklet_hi_schedule(&bh->bh); + else + tasklet_schedule(&bh->bh); + } bh->running = false; spin_unlock_irq(&bh->lock); } @@ -1826,7 +1846,7 @@ static void usb_giveback_urb_bh(unsigned long param) void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb, int status) { struct giveback_urb_bh *bh; - bool running, high_prio_bh; + bool running; /* pass status to tasklet via unlinked */ if (likely(!urb->unlinked)) @@ -1837,13 +1857,10 @@ void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb, int status) return; } - if (usb_pipeisoc(urb->pipe) || usb_pipeint(urb->pipe)) { + if (usb_pipeisoc(urb->pipe) || usb_pipeint(urb->pipe)) bh = &hcd->high_prio_bh; - high_prio_bh = true; - } else { + else bh = &hcd->low_prio_bh; - high_prio_bh = false; - } spin_lock(&bh->lock); list_add_tail(&urb->urb_list, &bh->head); @@ -1852,7 +1869,7 @@ void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb, int status) if (running) ; - else if (high_prio_bh) + else if (bh->high_prio) tasklet_hi_schedule(&bh->bh); else tasklet_schedule(&bh->bh); @@ -2860,6 +2877,7 @@ int usb_add_hcd(struct usb_hcd *hcd, /* initialize tasklets */ init_giveback_urb_bh(&hcd->high_prio_bh); + hcd->high_prio_bh.high_prio = true; init_giveback_urb_bh(&hcd->low_prio_bh); /* enable irqs just before we start the controller, @@ -3017,6 +3035,9 @@ usb_hcd_platform_shutdown(struct platform_device *dev) { struct usb_hcd *hcd = platform_get_drvdata(dev); + /* No need for pm_runtime_put(), we're shutting down */ + pm_runtime_get_sync(&dev->dev); + if (hcd->driver->shutdown) hcd->driver->shutdown(hcd); } diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 462ce49f683a01da9b6ddd7045ccb358cb897ab4..7750a304d7b41dec9ce5c7ba9b2af9918860086c 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -107,6 +107,8 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem); static void hub_release(struct kref *kref); static int usb_reset_and_verify_device(struct usb_device *udev); static int hub_port_disable(struct usb_hub *hub, int port1, int set_state); +static bool hub_port_warm_reset_required(struct usb_hub *hub, int port1, + u16 portstatus); static inline char *portspeed(struct usb_hub *hub, int portstatus) { @@ -136,6 +138,10 @@ int usb_device_supports_lpm(struct usb_device *udev) if (udev->quirks & USB_QUIRK_NO_LPM) return 0; + /* Skip if the device BOS descriptor couldn't be read */ + if (!udev->bos) + return 0; + /* USB 2.1 (and greater) devices indicate LPM support through * their USB 2.0 Extended Capabilities BOS descriptor. */ @@ -309,6 +315,10 @@ static void usb_set_lpm_parameters(struct usb_device *udev) if (!udev->lpm_capable || udev->speed < USB_SPEED_SUPER) return; + /* Skip if the device BOS descriptor couldn't be read */ + if (!udev->bos) + return; + hub = usb_hub_to_struct_hub(udev->parent); /* It doesn't take time to transition the roothub into U0, since it * doesn't have an upstream link. @@ -1111,6 +1121,21 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) USB_PORT_FEAT_ENABLE); } + /* Make sure a warm-reset request is handled by port_event */ + if (type == HUB_RESUME && + hub_port_warm_reset_required(hub, port1, portstatus)) + set_bit(port1, hub->event_bits); + + /* + * Add debounce if USB3 link is in polling/link training state. + * Link will automatically transition to Enabled state after + * link training completes. + */ + if (hub_is_superspeed(hdev) && + ((portstatus & USB_PORT_STAT_LINK_STATE) == + USB_SS_PORT_LS_POLLING)) + need_debounce_delay = true; + /* Clear status-change flags; we'll debounce later */ if (portchange & USB_PORT_STAT_C_CONNECTION) { need_debounce_delay = true; @@ -2250,7 +2275,7 @@ static int usb_enumerate_device_otg(struct usb_device *udev) /* descriptor may appear anywhere in config */ err = __usb_get_extra_descriptor(udev->rawdescriptors[0], le16_to_cpu(udev->config[0].desc.wTotalLength), - USB_DT_OTG, (void **) &desc); + USB_DT_OTG, (void **) &desc, sizeof(*desc)); if (err || !(desc->bmAttributes & USB_OTG_HNP)) return 0; @@ -2599,7 +2624,8 @@ int usb_authorize_device(struct usb_device *usb_dev) } /* - * Return 1 if port speed is SuperSpeedPlus, 0 otherwise + * Return 1 if port speed is SuperSpeedPlus, 0 otherwise or if the + * capability couldn't be checked. * check it from the link protocol field of the current speed ID attribute. * current speed ID is got from ext port status request. Sublink speed attribute * table is returned with the hub BOS SSP device capability descriptor @@ -2609,8 +2635,12 @@ static int port_speed_is_ssp(struct usb_device *hdev, int speed_id) int ssa_count; u32 ss_attr; int i; - struct usb_ssp_cap_descriptor *ssp_cap = hdev->bos->ssp_cap; + struct usb_ssp_cap_descriptor *ssp_cap; + if (!hdev->bos) + return 0; + + ssp_cap = hdev->bos->ssp_cap; if (!ssp_cap) return 0; @@ -2640,7 +2670,7 @@ static unsigned hub_is_wusb(struct usb_hub *hub) #define SET_ADDRESS_TRIES 2 #define GET_DESCRIPTOR_TRIES 2 #define SET_CONFIG_TRIES (2 * (use_both_schemes + 1)) -#define USE_NEW_SCHEME(i, scheme) ((i) / 2 == (int)scheme) +#define USE_NEW_SCHEME(i, scheme) ((i) / 2 == (int)(scheme)) #define HUB_ROOT_RESET_TIME 60 /* times are in msec */ #define HUB_SHORT_RESET_TIME 10 @@ -2791,6 +2821,7 @@ static int hub_port_reset(struct usb_hub *hub, int port1, int i, status; u16 portchange, portstatus; struct usb_port *port_dev = hub->ports[port1 - 1]; + int reset_recovery_time; if (!hub_is_superspeed(hub->hdev)) { if (warm) { @@ -2846,7 +2877,9 @@ static int hub_port_reset(struct usb_hub *hub, int port1, USB_PORT_FEAT_C_BH_PORT_RESET); usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_PORT_LINK_STATE); - usb_clear_port_feature(hub->hdev, port1, + + if (udev) + usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_CONNECTION); /* @@ -2882,11 +2915,18 @@ static int hub_port_reset(struct usb_hub *hub, int port1, done: if (status == 0) { - /* TRSTRCY = 10 ms; plus some extra */ if (port_dev->quirks & USB_PORT_QUIRK_FAST_ENUM) usleep_range(10000, 12000); - else - msleep(10 + 40); + else { + /* TRSTRCY = 10 ms; plus some extra */ + reset_recovery_time = 10 + 40; + + /* Hub needs extra delay after resetting its port. */ + if (hub->hdev->quirks & USB_QUIRK_HUB_SLOW_RESET) + reset_recovery_time += 100; + + msleep(reset_recovery_time); + } if (udev) { struct usb_hcd *hcd = bus_to_hcd(udev->bus); @@ -3197,8 +3237,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg) } /* disable USB2 hardware LPM */ - if (udev->usb2_hw_lpm_enabled == 1) - usb_set_usb2_hardware_lpm(udev, 0); + usb_disable_usb2_hardware_lpm(udev); if (usb_disable_ltm(udev)) { dev_err(&udev->dev, "Failed to disable LTM before suspend\n"); @@ -3236,8 +3275,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg) usb_enable_ltm(udev); err_ltm: /* Try to enable USB2 hardware LPM again */ - if (udev->usb2_hw_lpm_capable == 1) - usb_set_usb2_hardware_lpm(udev, 1); + usb_enable_usb2_hardware_lpm(udev); if (udev->do_remote_wakeup) (void) usb_disable_remote_wakeup(udev); @@ -3520,8 +3558,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg) hub_port_logical_disconnect(hub, port1); } else { /* Try to enable USB2 hardware LPM */ - if (udev->usb2_hw_lpm_capable == 1) - usb_set_usb2_hardware_lpm(udev, 1); + usb_enable_usb2_hardware_lpm(udev); /* Try to enable USB3 LTM */ usb_enable_ltm(udev); @@ -3558,6 +3595,7 @@ static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port, struct usb_device *hdev; struct usb_device *udev; int connect_change = 0; + u16 link_state; int ret; hdev = hub->hdev; @@ -3567,9 +3605,11 @@ static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port, return 0; usb_clear_port_feature(hdev, port, USB_PORT_FEAT_C_SUSPEND); } else { + link_state = portstatus & USB_PORT_STAT_LINK_STATE; if (!udev || udev->state != USB_STATE_SUSPENDED || - (portstatus & USB_PORT_STAT_LINK_STATE) != - USB_SS_PORT_LS_U0) + (link_state != USB_SS_PORT_LS_U0 && + link_state != USB_SS_PORT_LS_U1 && + link_state != USB_SS_PORT_LS_U2)) return 0; } @@ -3941,6 +3981,9 @@ static int usb_set_lpm_timeout(struct usb_device *udev, * control transfers to set the hub timeout or enable device-initiated U1/U2 * will be successful. * + * If the control transfer to enable device-initiated U1/U2 entry fails, then + * hub-initiated U1/U2 will be disabled. + * * If we cannot set the parent hub U1/U2 timeout, we attempt to let the xHCI * driver know about it. If that call fails, it should be harmless, and just * take up more slightly more bus bandwidth for unnecessary U1/U2 exit latency. @@ -3949,8 +3992,15 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev, enum usb3_link_state state) { int timeout, ret; - __u8 u1_mel = udev->bos->ss_cap->bU1devExitLat; - __le16 u2_mel = udev->bos->ss_cap->bU2DevExitLat; + __u8 u1_mel; + __le16 u2_mel; + + /* Skip if the device BOS descriptor couldn't be read */ + if (!udev->bos) + return; + + u1_mel = udev->bos->ss_cap->bU1devExitLat; + u2_mel = udev->bos->ss_cap->bU2DevExitLat; /* If the device says it doesn't have *any* exit latency to come out of * U1 or U2, it's probably lying. Assume it doesn't implement that link @@ -3995,23 +4045,24 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev, * host know that this link state won't be enabled. */ hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state); - } else { - /* Only a configured device will accept the Set Feature - * U1/U2_ENABLE - */ - if (udev->actconfig) - usb_set_device_initiated_lpm(udev, state, true); + return; + } - /* As soon as usb_set_lpm_timeout(timeout) returns 0, the - * hub-initiated LPM is enabled. Thus, LPM is enabled no - * matter the result of usb_set_device_initiated_lpm(). - * The only difference is whether device is able to initiate - * LPM. - */ + /* Only a configured device will accept the Set Feature + * U1/U2_ENABLE + */ + if (udev->actconfig && + usb_set_device_initiated_lpm(udev, state, true) == 0) { if (state == USB3_LPM_U1) udev->usb3_lpm_u1_enabled = 1; else if (state == USB3_LPM_U2) udev->usb3_lpm_u2_enabled = 1; + } else { + /* Don't request U1/U2 entry if the device + * cannot transition to U1/U2. + */ + usb_set_lpm_timeout(udev, state, 0); + hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state); } } @@ -4412,7 +4463,7 @@ static void hub_set_initial_usb2_lpm_policy(struct usb_device *udev) if ((udev->bos->ext_cap->bmAttributes & cpu_to_le32(USB_BESL_SUPPORT)) || connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) { udev->usb2_hw_lpm_allowed = 1; - usb_set_usb2_hardware_lpm(udev, 1); + usb_enable_usb2_hardware_lpm(udev); } } @@ -5588,8 +5639,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev) /* Disable USB2 hardware LPM. * It will be re-enabled by the enumeration process. */ - if (udev->usb2_hw_lpm_enabled == 1) - usb_set_usb2_hardware_lpm(udev, 0); + usb_disable_usb2_hardware_lpm(udev); /* Disable LPM while we reset the device and reinstall the alt settings. * Device-initiated LPM, and system exit latency settings are cleared @@ -5692,7 +5742,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev) done: /* Now that the alt settings are re-installed, enable LTM and LPM. */ - usb_set_usb2_hardware_lpm(udev, 1); + usb_enable_usb2_hardware_lpm(udev); usb_unlocked_enable_lpm(udev); usb_enable_ltm(udev); usb_release_bos_descriptor(udev); @@ -5710,13 +5760,18 @@ static int usb_reset_and_verify_device(struct usb_device *udev) /** * usb_reset_device - warn interface drivers and perform a USB port reset - * @udev: device to reset (not in SUSPENDED or NOTATTACHED state) + * @udev: device to reset (not in NOTATTACHED state) * * Warns all drivers bound to registered interfaces (using their pre_reset * method), performs the port reset, and then lets the drivers know that * the reset is over (using their post_reset method). * * Return: The same as for usb_reset_and_verify_device(). + * However, if a reset is already in progress (for instance, if a + * driver doesn't have pre_reset() or post_reset() callbacks, and while + * being unbound or re-bound during the ongoing reset its disconnect() + * or probe() routine tries to perform a second, nested reset), the + * routine returns -EINPROGRESS. * * Note: * The caller must own the device lock. For example, it's safe to use @@ -5738,8 +5793,7 @@ int usb_reset_device(struct usb_device *udev) struct usb_host_config *config = udev->actconfig; struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent); - if (udev->state == USB_STATE_NOTATTACHED || - udev->state == USB_STATE_SUSPENDED) { + if (udev->state == USB_STATE_NOTATTACHED) { dev_dbg(&udev->dev, "device reset not allowed in state %d\n", udev->state); return -EINVAL; @@ -5751,6 +5805,10 @@ int usb_reset_device(struct usb_device *udev) return -EISDIR; } + if (udev->reset_in_progress) + return -EINPROGRESS; + udev->reset_in_progress = 1; + port_dev = hub->ports[udev->portnum - 1]; /* @@ -5807,11 +5865,15 @@ int usb_reset_device(struct usb_device *udev) cintf->needs_binding = 1; } } - usb_unbind_and_rebind_marked_interfaces(udev); + + /* If the reset failed, hub_wq will unbind drivers later */ + if (ret == 0) + usb_unbind_and_rebind_marked_interfaces(udev); } usb_autosuspend_device(udev); memalloc_noio_restore(noio_flag); + udev->reset_in_progress = 0; return ret; } EXPORT_SYMBOL_GPL(usb_reset_device); diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h index 4accfb63f7dcbddf77c407bb46d84be2d33a3118..2ee899501220c80709d1cb124511b53ed4b0c586 100644 --- a/drivers/usb/core/hub.h +++ b/drivers/usb/core/hub.h @@ -138,7 +138,7 @@ static inline int hub_is_superspeedplus(struct usb_device *hdev) { return (hdev->descriptor.bDeviceProtocol == USB_HUB_PR_SS && le16_to_cpu(hdev->descriptor.bcdUSB) >= 0x0310 && - hdev->bos->ssp_cap); + hdev->bos && hdev->bos->ssp_cap); } static inline unsigned hub_power_on_good_delay(struct usb_hub *hub) diff --git a/drivers/usb/core/ledtrig-usbport.c b/drivers/usb/core/ledtrig-usbport.c index dc7f7fd71684cb7d7e2d0f3bc39f49f114618def..c12ac56606c3f681fe8cebe0357a8284ec405f25 100644 --- a/drivers/usb/core/ledtrig-usbport.c +++ b/drivers/usb/core/ledtrig-usbport.c @@ -119,11 +119,6 @@ static const struct attribute_group ports_group = { .attrs = ports_attrs, }; -static const struct attribute_group *ports_groups[] = { - &ports_group, - NULL -}; - /*************************************** * Adding & removing ports ***************************************/ @@ -307,6 +302,7 @@ static int usbport_trig_notify(struct notifier_block *nb, unsigned long action, static int usbport_trig_activate(struct led_classdev *led_cdev) { struct usbport_trig_data *usbport_data; + int err; usbport_data = kzalloc(sizeof(*usbport_data), GFP_KERNEL); if (!usbport_data) @@ -315,6 +311,9 @@ static int usbport_trig_activate(struct led_classdev *led_cdev) /* List of ports */ INIT_LIST_HEAD(&usbport_data->ports); + err = sysfs_create_group(&led_cdev->dev->kobj, &ports_group); + if (err) + goto err_free; usb_for_each_dev(usbport_data, usbport_trig_add_usb_dev_ports); usbport_trig_update_count(usbport_data); @@ -322,8 +321,11 @@ static int usbport_trig_activate(struct led_classdev *led_cdev) usbport_data->nb.notifier_call = usbport_trig_notify; led_set_trigger_data(led_cdev, usbport_data); usb_register_notify(&usbport_data->nb); - return 0; + +err_free: + kfree(usbport_data); + return err; } static void usbport_trig_deactivate(struct led_classdev *led_cdev) @@ -335,6 +337,8 @@ static void usbport_trig_deactivate(struct led_classdev *led_cdev) usbport_trig_remove_port(usbport_data, port); } + sysfs_remove_group(&led_cdev->dev->kobj, &ports_group); + usb_unregister_notify(&usbport_data->nb); kfree(usbport_data); @@ -344,7 +348,6 @@ static struct led_trigger usbport_led_trigger = { .name = "usbport", .activate = usbport_trig_activate, .deactivate = usbport_trig_deactivate, - .groups = ports_groups, }; static int __init usbport_trig_init(void) diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index bfa5eda0cc2606a79373651fd079f87526238f36..0be20b17e754b6340b5ff4c96fc9394fcc79e22f 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -161,6 +161,140 @@ int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request, } EXPORT_SYMBOL_GPL(usb_control_msg); +/** + * usb_control_msg_send - Builds a control "send" message, sends it off and waits for completion + * @dev: pointer to the usb device to send the message to + * @endpoint: endpoint to send the message to + * @request: USB message request value + * @requesttype: USB message request type value + * @value: USB message value + * @index: USB message index value + * @driver_data: pointer to the data to send + * @size: length in bytes of the data to send + * @timeout: time in msecs to wait for the message to complete before timing + * out (if 0 the wait is forever) + * @memflags: the flags for memory allocation for buffers + * + * Context: !in_interrupt () + * + * This function sends a control message to a specified endpoint that is not + * expected to fill in a response (i.e. a "send message") and waits for the + * message to complete, or timeout. + * + * Do not use this function from within an interrupt context. If you need + * an asynchronous message, or need to send a message from within interrupt + * context, use usb_submit_urb(). If a thread in your driver uses this call, + * make sure your disconnect() method can wait for it to complete. Since you + * don't have a handle on the URB used, you can't cancel the request. + * + * The data pointer can be made to a reference on the stack, or anywhere else, + * as it will not be modified at all. This does not have the restriction that + * usb_control_msg() has where the data pointer must be to dynamically allocated + * memory (i.e. memory that can be successfully DMAed to a device). + * + * Return: If successful, 0 is returned, Otherwise, a negative error number. + */ +int usb_control_msg_send(struct usb_device *dev, __u8 endpoint, __u8 request, + __u8 requesttype, __u16 value, __u16 index, + const void *driver_data, __u16 size, int timeout, + gfp_t memflags) +{ + unsigned int pipe = usb_sndctrlpipe(dev, endpoint); + int ret; + u8 *data = NULL; + + if (size) { + data = kmemdup(driver_data, size, memflags); + if (!data) + return -ENOMEM; + } + + ret = usb_control_msg(dev, pipe, request, requesttype, value, index, + data, size, timeout); + kfree(data); + + if (ret < 0) + return ret; + if (ret == size) + return 0; + return -EINVAL; +} +EXPORT_SYMBOL_GPL(usb_control_msg_send); + +/** + * usb_control_msg_recv - Builds a control "receive" message, sends it off and waits for completion + * @dev: pointer to the usb device to send the message to + * @endpoint: endpoint to send the message to + * @request: USB message request value + * @requesttype: USB message request type value + * @value: USB message value + * @index: USB message index value + * @driver_data: pointer to the data to be filled in by the message + * @size: length in bytes of the data to be received + * @timeout: time in msecs to wait for the message to complete before timing + * out (if 0 the wait is forever) + * @memflags: the flags for memory allocation for buffers + * + * Context: !in_interrupt () + * + * This function sends a control message to a specified endpoint that is + * expected to fill in a response (i.e. a "receive message") and waits for the + * message to complete, or timeout. + * + * Do not use this function from within an interrupt context. If you need + * an asynchronous message, or need to send a message from within interrupt + * context, use usb_submit_urb(). If a thread in your driver uses this call, + * make sure your disconnect() method can wait for it to complete. Since you + * don't have a handle on the URB used, you can't cancel the request. + * + * The data pointer can be made to a reference on the stack, or anywhere else + * that can be successfully written to. This function does not have the + * restriction that usb_control_msg() has where the data pointer must be to + * dynamically allocated memory (i.e. memory that can be successfully DMAed to a + * device). + * + * The "whole" message must be properly received from the device in order for + * this function to be successful. If a device returns less than the expected + * amount of data, then the function will fail. Do not use this for messages + * where a variable amount of data might be returned. + * + * Return: If successful, 0 is returned, Otherwise, a negative error number. + */ +int usb_control_msg_recv(struct usb_device *dev, __u8 endpoint, __u8 request, + __u8 requesttype, __u16 value, __u16 index, + void *driver_data, __u16 size, int timeout, + gfp_t memflags) +{ + unsigned int pipe = usb_rcvctrlpipe(dev, endpoint); + int ret; + u8 *data; + + if (!size || !driver_data) + return -EINVAL; + + data = kmalloc(size, memflags); + if (!data) + return -ENOMEM; + + ret = usb_control_msg(dev, pipe, request, requesttype, value, index, + data, size, timeout); + + if (ret < 0) + goto exit; + + if (ret == size) { + memcpy(driver_data, data, size); + ret = 0; + } else { + ret = -EINVAL; + } + +exit: + kfree(data); + return ret; +} +EXPORT_SYMBOL_GPL(usb_control_msg_recv); + /** * usb_interrupt_msg - Builds an interrupt urb, sends it off and waits for completion * @usb_dev: pointer to the usb device to send the message to @@ -588,12 +722,13 @@ void usb_sg_cancel(struct usb_sg_request *io) int i, retval; spin_lock_irqsave(&io->lock, flags); - if (io->status) { + if (io->status || io->count == 0) { spin_unlock_irqrestore(&io->lock, flags); return; } /* shut everything down */ io->status = -ECONNRESET; + io->count++; /* Keep the request alive until we're done */ spin_unlock_irqrestore(&io->lock, flags); for (i = io->entries - 1; i >= 0; --i) { @@ -607,6 +742,12 @@ void usb_sg_cancel(struct usb_sg_request *io) dev_warn(&io->dev->dev, "%s, unlink --> %d\n", __func__, retval); } + + spin_lock_irqsave(&io->lock, flags); + io->count--; + if (!io->count) + complete(&io->complete); + spin_unlock_irqrestore(&io->lock, flags); } EXPORT_SYMBOL_GPL(usb_sg_cancel); @@ -820,9 +961,11 @@ int usb_string(struct usb_device *dev, int index, char *buf, size_t size) if (dev->state == USB_STATE_SUSPENDED) return -EHOSTUNREACH; - if (size <= 0 || !buf || !index) + if (size <= 0 || !buf) return -EINVAL; buf[0] = 0; + if (index <= 0 || index >= 256) + return -EINVAL; tbuf = kmalloc(256, GFP_NOIO); if (!tbuf) return -ENOMEM; @@ -1243,8 +1386,7 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0) dev->actconfig->interface[i] = NULL; } - if (dev->usb2_hw_lpm_enabled == 1) - usb_set_usb2_hardware_lpm(dev, 0); + usb_disable_usb2_hardware_lpm(dev); usb_unlocked_disable_lpm(dev); usb_disable_ltm(dev); @@ -2210,14 +2352,14 @@ int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr, (struct usb_cdc_dmm_desc *)buffer; break; case USB_CDC_MDLM_TYPE: - if (elength < sizeof(struct usb_cdc_mdlm_desc *)) + if (elength < sizeof(struct usb_cdc_mdlm_desc)) goto next_desc; if (desc) return -EINVAL; desc = (struct usb_cdc_mdlm_desc *)buffer; break; case USB_CDC_MDLM_DETAIL_TYPE: - if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *)) + if (elength < sizeof(struct usb_cdc_mdlm_detail_desc)) goto next_desc; if (detail) return -EINVAL; diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 178d6c6063c0280a06c6dc3dc6611394d8233479..6b6413073584339ba9e271e17ffa63fc4f883aa4 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -128,6 +128,9 @@ static int quirks_param_set(const char *val, const struct kernel_param *kp) case 'n': flags |= USB_QUIRK_DELAY_CTRL_MSG; break; + case 'o': + flags |= USB_QUIRK_HUB_SLOW_RESET; + break; /* Ignore unrecognized flag characters */ } } @@ -206,6 +209,15 @@ static const struct usb_device_id usb_quirk_list[] = { /* Microsoft LifeCam-VX700 v2.0 */ { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Microsoft Surface Dock Ethernet (RTL8153 GigE) */ + { USB_DEVICE(0x045e, 0x07c6), .driver_info = USB_QUIRK_NO_LPM }, + + /* Cherry Stream G230 2.0 (G85-231) and 3.0 (G85-232) */ + { USB_DEVICE(0x046a, 0x0023), .driver_info = USB_QUIRK_RESET_RESUME }, + + /* Logitech HD Webcam C270 */ + { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */ { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT }, @@ -327,6 +339,10 @@ static const struct usb_device_id usb_quirk_list[] = { /* Midiman M-Audio Keystation 88es */ { USB_DEVICE(0x0763, 0x0192), .driver_info = USB_QUIRK_RESET_RESUME }, + /* SanDisk Ultra Fit and Ultra Flair */ + { USB_DEVICE(0x0781, 0x5583), .driver_info = USB_QUIRK_NO_LPM }, + { USB_DEVICE(0x0781, 0x5591), .driver_info = USB_QUIRK_NO_LPM }, + /* M-Systems Flash Disk Pioneers */ { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, @@ -380,8 +396,12 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x1a0a, 0x0200), .driver_info = USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, + /* Terminus Technology Inc. Hub */ + { USB_DEVICE(0x1a40, 0x0101), .driver_info = USB_QUIRK_HUB_SLOW_RESET }, + /* Corsair K70 RGB */ - { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT }, + { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT | + USB_QUIRK_DELAY_CTRL_MSG }, /* Corsair Strafe */ { USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT | @@ -391,6 +411,9 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT | USB_QUIRK_DELAY_CTRL_MSG }, + /* Corsair K70 LUX RGB */ + { USB_DEVICE(0x1b1c, 0x1b33), .driver_info = USB_QUIRK_DELAY_INIT }, + /* Corsair K70 LUX */ { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT }, @@ -411,6 +434,11 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x2040, 0x7200), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, + /* Raydium Touchscreen */ + { USB_DEVICE(0x2386, 0x3114), .driver_info = USB_QUIRK_NO_LPM }, + + { USB_DEVICE(0x2386, 0x3119), .driver_info = USB_QUIRK_NO_LPM }, + /* DJI CineSSD */ { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM }, diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c index ea18284dfa9af08a641aa30bae449c9d08ec0a6a..314f2d996c56497d8eab765a766712907fd5f07d 100644 --- a/drivers/usb/core/sysfs.c +++ b/drivers/usb/core/sysfs.c @@ -528,7 +528,10 @@ static ssize_t usb2_hardware_lpm_store(struct device *dev, if (!ret) { udev->usb2_hw_lpm_allowed = value; - ret = usb_set_usb2_hardware_lpm(udev, value); + if (value) + ret = usb_enable_usb2_hardware_lpm(udev); + else + ret = usb_disable_usb2_hardware_lpm(udev); } usb_unlock_device(udev); @@ -1066,14 +1069,24 @@ static ssize_t interface_authorized_store(struct device *dev, { struct usb_interface *intf = to_usb_interface(dev); bool val; + struct kernfs_node *kn; if (strtobool(buf, &val) != 0) return -EINVAL; - if (val) + if (val) { usb_authorize_interface(intf); - else - usb_deauthorize_interface(intf); + } else { + /* + * Prevent deadlock if another process is concurrently + * trying to unregister intf. + */ + kn = sysfs_break_active_protection(&dev->kobj, &attr->attr); + if (kn) { + usb_deauthorize_interface(intf); + sysfs_unbreak_active_protection(kn); + } + } return count; } diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c index f51750bcd1528ba5aa3c8996b9dd0bc320ce8beb..c06d4b61f276a9e9cbb7620ed8afea9391b892cf 100644 --- a/drivers/usb/core/urb.c +++ b/drivers/usb/core/urb.c @@ -45,6 +45,7 @@ void usb_init_urb(struct urb *urb) if (urb) { memset(urb, 0, sizeof(*urb)); kref_init(&urb->kref); + INIT_LIST_HEAD(&urb->urb_list); INIT_LIST_HEAD(&urb->anchor_list); } } @@ -192,24 +193,39 @@ static const int pipetypes[4] = { }; /** - * usb_urb_ep_type_check - sanity check of endpoint in the given urb - * @urb: urb to be checked + * usb_pipe_type_check - sanity check of a specific pipe for a usb device + * @dev: struct usb_device to be checked + * @pipe: pipe to check * * This performs a light-weight sanity check for the endpoint in the - * given urb. It returns 0 if the urb contains a valid endpoint, otherwise - * a negative error code. + * given usb device. It returns 0 if the pipe is valid for the specific usb + * device, otherwise a negative error code. */ -int usb_urb_ep_type_check(const struct urb *urb) +int usb_pipe_type_check(struct usb_device *dev, unsigned int pipe) { const struct usb_host_endpoint *ep; - ep = usb_pipe_endpoint(urb->dev, urb->pipe); + ep = usb_pipe_endpoint(dev, pipe); if (!ep) return -EINVAL; - if (usb_pipetype(urb->pipe) != pipetypes[usb_endpoint_type(&ep->desc)]) + if (usb_pipetype(pipe) != pipetypes[usb_endpoint_type(&ep->desc)]) return -EINVAL; return 0; } +EXPORT_SYMBOL_GPL(usb_pipe_type_check); + +/** + * usb_urb_ep_type_check - sanity check of endpoint in the given urb + * @urb: urb to be checked + * + * This performs a light-weight sanity check for the endpoint in the + * given urb. It returns 0 if the urb contains a valid endpoint, otherwise + * a negative error code. + */ +int usb_urb_ep_type_check(const struct urb *urb) +{ + return usb_pipe_type_check(urb->dev, urb->pipe); +} EXPORT_SYMBOL_GPL(usb_urb_ep_type_check); /** @@ -474,7 +490,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) */ /* Check that the pipe's type matches the endpoint's type */ - if (usb_urb_ep_type_check(urb)) + if (usb_pipe_type_check(urb->dev, urb->pipe)) dev_WARN(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n", usb_pipetype(urb->pipe), pipetypes[xfertype]); @@ -691,6 +707,12 @@ void usb_kill_urb(struct urb *urb) if (!(urb && urb->dev && urb->ep)) return; atomic_inc(&urb->reject); + /* + * Order the write of urb->reject above before the read + * of urb->use_count below. Pairs with the barriers in + * __usb_hcd_giveback_urb() and usb_hcd_submit_urb(). + */ + smp_mb__after_atomic(); usb_hcd_unlink_urb(urb, -ENOENT); wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0); @@ -732,6 +754,12 @@ void usb_poison_urb(struct urb *urb) if (!urb) return; atomic_inc(&urb->reject); + /* + * Order the write of urb->reject above before the read + * of urb->use_count below. Pairs with the barriers in + * __usb_hcd_giveback_urb() and usb_hcd_submit_urb(). + */ + smp_mb__after_atomic(); if (!urb->dev || !urb->ep) return; diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 79d8bd7a612e65b5c765f16e0c5567a440cdc1da..c369920b485484a621d3bf2e4acd0f0ad41bbc3b 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c @@ -209,6 +209,82 @@ int usb_find_common_endpoints_reverse(struct usb_host_interface *alt, } EXPORT_SYMBOL_GPL(usb_find_common_endpoints_reverse); +/** + * usb_find_endpoint() - Given an endpoint address, search for the endpoint's + * usb_host_endpoint structure in an interface's current altsetting. + * @intf: the interface whose current altsetting should be searched + * @ep_addr: the endpoint address (number and direction) to find + * + * Search the altsetting's list of endpoints for one with the specified address. + * + * Return: Pointer to the usb_host_endpoint if found, %NULL otherwise. + */ +static const struct usb_host_endpoint *usb_find_endpoint( + const struct usb_interface *intf, unsigned int ep_addr) +{ + int n; + const struct usb_host_endpoint *ep; + + n = intf->cur_altsetting->desc.bNumEndpoints; + ep = intf->cur_altsetting->endpoint; + for (; n > 0; (--n, ++ep)) { + if (ep->desc.bEndpointAddress == ep_addr) + return ep; + } + return NULL; +} + +/** + * usb_check_bulk_endpoints - Check whether an interface's current altsetting + * contains a set of bulk endpoints with the given addresses. + * @intf: the interface whose current altsetting should be searched + * @ep_addrs: 0-terminated array of the endpoint addresses (number and + * direction) to look for + * + * Search for endpoints with the specified addresses and check their types. + * + * Return: %true if all the endpoints are found and are bulk, %false otherwise. + */ +bool usb_check_bulk_endpoints( + const struct usb_interface *intf, const u8 *ep_addrs) +{ + const struct usb_host_endpoint *ep; + + for (; *ep_addrs; ++ep_addrs) { + ep = usb_find_endpoint(intf, *ep_addrs); + if (!ep || !usb_endpoint_xfer_bulk(&ep->desc)) + return false; + } + return true; +} +EXPORT_SYMBOL_GPL(usb_check_bulk_endpoints); + +/** + * usb_check_int_endpoints - Check whether an interface's current altsetting + * contains a set of interrupt endpoints with the given addresses. + * @intf: the interface whose current altsetting should be searched + * @ep_addrs: 0-terminated array of the endpoint addresses (number and + * direction) to look for + * + * Search for endpoints with the specified addresses and check their types. + * + * Return: %true if all the endpoints are found and are interrupt, + * %false otherwise. + */ +bool usb_check_int_endpoints( + const struct usb_interface *intf, const u8 *ep_addrs) +{ + const struct usb_host_endpoint *ep; + + for (; *ep_addrs; ++ep_addrs) { + ep = usb_find_endpoint(intf, *ep_addrs); + if (!ep || !usb_endpoint_xfer_int(&ep->desc)) + return false; + } + return true; +} +EXPORT_SYMBOL_GPL(usb_check_int_endpoints); + /** * usb_find_alt_setting() - Given a configuration, find the alternate setting * for the given interface. @@ -832,14 +908,14 @@ EXPORT_SYMBOL_GPL(usb_get_current_frame_number); */ int __usb_get_extra_descriptor(char *buffer, unsigned size, - unsigned char type, void **ptr) + unsigned char type, void **ptr, size_t minsize) { struct usb_descriptor_header *header; while (size >= sizeof(struct usb_descriptor_header)) { header = (struct usb_descriptor_header *)buffer; - if (header->bLength < 2) { + if (header->bLength < 2 || header->bLength > size) { printk(KERN_ERR "%s: bogus descriptor, type %d length %d\n", usbcore_name, @@ -848,7 +924,7 @@ int __usb_get_extra_descriptor(char *buffer, unsigned size, return -1; } - if (header->bDescriptorType == type) { + if (header->bDescriptorType == type && header->bLength >= minsize) { *ptr = header; return 0; } diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h index 546a2219454b2de9bbf7e72f07dc1ae3f3602b60..d95a5358f73df21ea3b1ba4cdea97bc0ad07c7ee 100644 --- a/drivers/usb/core/usb.h +++ b/drivers/usb/core/usb.h @@ -92,7 +92,8 @@ extern int usb_remote_wakeup(struct usb_device *dev); extern int usb_runtime_suspend(struct device *dev); extern int usb_runtime_resume(struct device *dev); extern int usb_runtime_idle(struct device *dev); -extern int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable); +extern int usb_enable_usb2_hardware_lpm(struct usb_device *udev); +extern int usb_disable_usb2_hardware_lpm(struct usb_device *udev); #else @@ -112,7 +113,12 @@ static inline int usb_autoresume_device(struct usb_device *udev) return 0; } -static inline int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable) +static inline int usb_enable_usb2_hardware_lpm(struct usb_device *udev) +{ + return 0; +} + +static inline int usb_disable_usb2_hardware_lpm(struct usb_device *udev) { return 0; } diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c index 55d5ae2a7ec7bf805d66502f842f739926e7995b..633ba0123efbff959a75f88aac63f2fcfea3a0bf 100644 --- a/drivers/usb/dwc2/core.c +++ b/drivers/usb/dwc2/core.c @@ -524,14 +524,14 @@ int dwc2_core_reset(struct dwc2_hsotg *hsotg, bool skip_wait) greset |= GRSTCTL_CSFTRST; dwc2_writel(hsotg, greset, GRSTCTL); - if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_CSFTRST, 50)) { + if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_CSFTRST, 10000)) { dev_warn(hsotg->dev, "%s: HANG! Soft Reset timeout GRSTCTL GRSTCTL_CSFTRST\n", __func__); return -EBUSY; } /* Wait for AHB master IDLE state */ - if (dwc2_hsotg_wait_bit_set(hsotg, GRSTCTL, GRSTCTL_AHBIDLE, 50)) { + if (dwc2_hsotg_wait_bit_set(hsotg, GRSTCTL, GRSTCTL_AHBIDLE, 10000)) { dev_warn(hsotg->dev, "%s: HANG! AHB Idle timeout GRSTCTL GRSTCTL_AHBIDLE\n", __func__); return -EBUSY; diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 220c0f9b89b0ba38fec14f088a789887425283a6..3f68edde0f03a613fee93cbbcba9c15c3ab7261b 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -675,13 +675,11 @@ static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep) unsigned int maxsize; if (is_isoc) - maxsize = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT : - DEV_DMA_ISOC_RX_NBYTES_LIMIT; + maxsize = (hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT : + DEV_DMA_ISOC_RX_NBYTES_LIMIT) * + MAX_DMA_DESC_NUM_HS_ISOC; else - maxsize = DEV_DMA_NBYTES_LIMIT; - - /* Above size of one descriptor was chosen, multiple it */ - maxsize *= MAX_DMA_DESC_NUM_GENERIC; + maxsize = DEV_DMA_NBYTES_LIMIT * MAX_DMA_DESC_NUM_GENERIC; return maxsize; } @@ -864,7 +862,7 @@ static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep, /* Update index of last configured entry in the chain */ hs_ep->next_desc++; - if (hs_ep->next_desc >= MAX_DMA_DESC_NUM_GENERIC) + if (hs_ep->next_desc >= MAX_DMA_DESC_NUM_HS_ISOC) hs_ep->next_desc = 0; return 0; @@ -896,7 +894,7 @@ static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep) } /* Initialize descriptor chain by Host Busy status */ - for (i = 0; i < MAX_DMA_DESC_NUM_GENERIC; i++) { + for (i = 0; i < MAX_DMA_DESC_NUM_HS_ISOC; i++) { desc = &hs_ep->desc_list[i]; desc->status = 0; desc->status |= (DEV_DMA_BUFF_STS_HBUSY @@ -2083,7 +2081,7 @@ static void dwc2_gadget_complete_isoc_request_ddma(struct dwc2_hsotg_ep *hs_ep) dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0); hs_ep->compl_desc++; - if (hs_ep->compl_desc > (MAX_DMA_DESC_NUM_GENERIC - 1)) + if (hs_ep->compl_desc > (MAX_DMA_DESC_NUM_HS_ISOC - 1)) hs_ep->compl_desc = 0; desc_sts = hs_ep->desc_list[hs_ep->compl_desc].status; } @@ -3127,6 +3125,7 @@ void dwc2_hsotg_disconnect(struct dwc2_hsotg *hsotg) hsotg->connected = 0; hsotg->test_mode = 0; + /* all endpoints should be shutdown */ for (ep = 0; ep < hsotg->num_of_eps; ep++) { if (hsotg->eps_in[ep]) kill_all_requests(hsotg, hsotg->eps_in[ep], @@ -3177,6 +3176,7 @@ static void dwc2_hsotg_irq_fifoempty(struct dwc2_hsotg *hsotg, bool periodic) GINTSTS_PTXFEMP | \ GINTSTS_RXFLVL) +static int dwc2_hsotg_ep_disable(struct usb_ep *ep); /** * dwc2_hsotg_core_init - issue softreset to the core * @hsotg: The device state @@ -3191,13 +3191,23 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg, u32 val; u32 usbcfg; u32 dcfg = 0; + int ep; /* Kill any ep0 requests as controller will be reinitialized */ kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET); - if (!is_usb_reset) + if (!is_usb_reset) { if (dwc2_core_reset(hsotg, true)) return; + } else { + /* all endpoints should be shutdown */ + for (ep = 1; ep < hsotg->num_of_eps; ep++) { + if (hsotg->eps_in[ep]) + dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep); + if (hsotg->eps_out[ep]) + dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep); + } + } /* * we must now enable ep0 ready for host detection and then @@ -3779,6 +3789,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep, unsigned int i, val, size; int ret = 0; unsigned char ep_type; + int desc_num; dev_dbg(hsotg->dev, "%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n", @@ -3825,11 +3836,15 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep, dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n", __func__, epctrl, epctrl_reg); + if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC) + desc_num = MAX_DMA_DESC_NUM_HS_ISOC; + else + desc_num = MAX_DMA_DESC_NUM_GENERIC; + /* Allocate DMA descriptor chain for non-ctrl endpoints */ if (using_desc_dma(hsotg) && !hs_ep->desc_list) { hs_ep->desc_list = dmam_alloc_coherent(hsotg->dev, - MAX_DMA_DESC_NUM_GENERIC * - sizeof(struct dwc2_dma_desc), + desc_num * sizeof(struct dwc2_dma_desc), &hs_ep->desc_list_dma, GFP_ATOMIC); if (!hs_ep->desc_list) { ret = -ENOMEM; @@ -3971,7 +3986,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep, error2: if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) { - dmam_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC * + dmam_free_coherent(hsotg->dev, desc_num * sizeof(struct dwc2_dma_desc), hs_ep->desc_list, hs_ep->desc_list_dma); hs_ep->desc_list = NULL; @@ -3990,7 +4005,6 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep) struct dwc2_hsotg *hsotg = hs_ep->parent; int dir_in = hs_ep->dir_in; int index = hs_ep->index; - unsigned long flags; u32 epctrl_reg; u32 ctrl; @@ -4008,8 +4022,6 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep) epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index); - spin_lock_irqsave(&hsotg->lock, flags); - ctrl = dwc2_readl(hsotg, epctrl_reg); if (ctrl & DXEPCTL_EPENA) @@ -4032,10 +4044,22 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep) hs_ep->fifo_index = 0; hs_ep->fifo_size = 0; - spin_unlock_irqrestore(&hsotg->lock, flags); return 0; } +static int dwc2_hsotg_ep_disable_lock(struct usb_ep *ep) +{ + struct dwc2_hsotg_ep *hs_ep = our_ep(ep); + struct dwc2_hsotg *hsotg = hs_ep->parent; + unsigned long flags; + int ret; + + spin_lock_irqsave(&hsotg->lock, flags); + ret = dwc2_hsotg_ep_disable(ep); + spin_unlock_irqrestore(&hsotg->lock, flags); + return ret; +} + /** * on_list - check request is on the given endpoint * @ep: The endpoint to check. @@ -4183,7 +4207,7 @@ static int dwc2_hsotg_ep_sethalt_lock(struct usb_ep *ep, int value) static const struct usb_ep_ops dwc2_hsotg_ep_ops = { .enable = dwc2_hsotg_ep_enable, - .disable = dwc2_hsotg_ep_disable, + .disable = dwc2_hsotg_ep_disable_lock, .alloc_request = dwc2_hsotg_ep_alloc_request, .free_request = dwc2_hsotg_ep_free_request, .queue = dwc2_hsotg_ep_queue_lock, @@ -4323,9 +4347,9 @@ static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget) /* all endpoints should be shutdown */ for (ep = 1; ep < hsotg->num_of_eps; ep++) { if (hsotg->eps_in[ep]) - dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep); + dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep); if (hsotg->eps_out[ep]) - dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep); + dwc2_hsotg_ep_disable_lock(&hsotg->eps_out[ep]->ep); } spin_lock_irqsave(&hsotg->lock, flags); @@ -4773,9 +4797,9 @@ int dwc2_hsotg_suspend(struct dwc2_hsotg *hsotg) for (ep = 0; ep < hsotg->num_of_eps; ep++) { if (hsotg->eps_in[ep]) - dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep); + dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep); if (hsotg->eps_out[ep]) - dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep); + dwc2_hsotg_ep_disable_lock(&hsotg->eps_out[ep]->ep); } } diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index 2bd6e6bfc241b8673b0fcd558f874381493ce6bd..8b8c113b726757273b30c8d9448189a812e64886 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -2673,8 +2673,10 @@ static void dwc2_free_dma_aligned_buffer(struct urb *urb) return; /* Restore urb->transfer_buffer from the end of the allocated area */ - memcpy(&stored_xfer_buffer, urb->transfer_buffer + - urb->transfer_buffer_length, sizeof(urb->transfer_buffer)); + memcpy(&stored_xfer_buffer, + PTR_ALIGN(urb->transfer_buffer + urb->transfer_buffer_length, + dma_get_cache_alignment()), + sizeof(urb->transfer_buffer)); if (usb_urb_dir_in(urb)) { if (usb_pipeisoc(urb->pipe)) @@ -2706,6 +2708,7 @@ static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags) * DMA */ kmalloc_size = urb->transfer_buffer_length + + (dma_get_cache_alignment() - 1) + sizeof(urb->transfer_buffer); kmalloc_ptr = kmalloc(kmalloc_size, mem_flags); @@ -2716,7 +2719,8 @@ static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags) * Position value of original urb->transfer_buffer pointer to the end * of allocation for later referencing */ - memcpy(kmalloc_ptr + urb->transfer_buffer_length, + memcpy(PTR_ALIGN(kmalloc_ptr + urb->transfer_buffer_length, + dma_get_cache_alignment()), &urb->transfer_buffer, sizeof(urb->transfer_buffer)); if (usb_urb_dir_out(urb)) @@ -2801,7 +2805,7 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) chan->dev_addr = dwc2_hcd_get_dev_addr(&urb->pipe_info); chan->ep_num = dwc2_hcd_get_ep_num(&urb->pipe_info); chan->speed = qh->dev_speed; - chan->max_packet = dwc2_max_packet(qh->maxp); + chan->max_packet = qh->maxp; chan->xfer_started = 0; chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS; @@ -2879,7 +2883,7 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) * This value may be modified when the transfer is started * to reflect the actual transfer length */ - chan->multi_count = dwc2_hb_mult(qh->maxp); + chan->multi_count = qh->maxp_mult; if (hsotg->params.dma_desc_enable) { chan->desc_list_addr = qh->desc_list_dma; @@ -3564,6 +3568,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq, u32 port_status; u32 speed; u32 pcgctl; + u32 pwr; switch (typereq) { case ClearHubFeature: @@ -3612,8 +3617,11 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq, dev_dbg(hsotg->dev, "ClearPortFeature USB_PORT_FEAT_POWER\n"); hprt0 = dwc2_read_hprt0(hsotg); + pwr = hprt0 & HPRT0_PWR; hprt0 &= ~HPRT0_PWR; dwc2_writel(hsotg, hprt0, HPRT0); + if (pwr) + dwc2_vbus_supply_exit(hsotg); break; case USB_PORT_FEAT_INDICATOR: @@ -3823,8 +3831,11 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq, dev_dbg(hsotg->dev, "SetPortFeature - USB_PORT_FEAT_POWER\n"); hprt0 = dwc2_read_hprt0(hsotg); + pwr = hprt0 & HPRT0_PWR; hprt0 |= HPRT0_PWR; dwc2_writel(hsotg, hprt0, HPRT0); + if (!pwr) + dwc2_vbus_supply_init(hsotg); break; case USB_PORT_FEAT_RESET: @@ -3841,6 +3852,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq, dwc2_writel(hsotg, 0, PCGCTL); hprt0 = dwc2_read_hprt0(hsotg); + pwr = hprt0 & HPRT0_PWR; /* Clear suspend bit if resetting from suspend state */ hprt0 &= ~HPRT0_SUSP; @@ -3854,6 +3866,8 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq, dev_dbg(hsotg->dev, "In host mode, hprt0=%08x\n", hprt0); dwc2_writel(hsotg, hprt0, HPRT0); + if (!pwr) + dwc2_vbus_supply_init(hsotg); } /* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */ @@ -3991,19 +4005,21 @@ static struct dwc2_hcd_urb *dwc2_hcd_urb_alloc(struct dwc2_hsotg *hsotg, static void dwc2_hcd_urb_set_pipeinfo(struct dwc2_hsotg *hsotg, struct dwc2_hcd_urb *urb, u8 dev_addr, - u8 ep_num, u8 ep_type, u8 ep_dir, u16 mps) + u8 ep_num, u8 ep_type, u8 ep_dir, + u16 maxp, u16 maxp_mult) { if (dbg_perio() || ep_type == USB_ENDPOINT_XFER_BULK || ep_type == USB_ENDPOINT_XFER_CONTROL) dev_vdbg(hsotg->dev, - "addr=%d, ep_num=%d, ep_dir=%1x, ep_type=%1x, mps=%d\n", - dev_addr, ep_num, ep_dir, ep_type, mps); + "addr=%d, ep_num=%d, ep_dir=%1x, ep_type=%1x, maxp=%d (%d mult)\n", + dev_addr, ep_num, ep_dir, ep_type, maxp, maxp_mult); urb->pipe_info.dev_addr = dev_addr; urb->pipe_info.ep_num = ep_num; urb->pipe_info.pipe_type = ep_type; urb->pipe_info.pipe_dir = ep_dir; - urb->pipe_info.mps = mps; + urb->pipe_info.maxp = maxp; + urb->pipe_info.maxp_mult = maxp_mult; } /* @@ -4094,8 +4110,9 @@ void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg) dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT"); dev_dbg(hsotg->dev, - " Max packet size: %d\n", - dwc2_hcd_get_mps(&urb->pipe_info)); + " Max packet size: %d (%d mult)\n", + dwc2_hcd_get_maxp(&urb->pipe_info), + dwc2_hcd_get_maxp_mult(&urb->pipe_info)); dev_dbg(hsotg->dev, " transfer_buffer: %p\n", urb->buf); @@ -4393,6 +4410,8 @@ static int _dwc2_hcd_start(struct usb_hcd *hcd) struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); struct usb_bus *bus = hcd_to_bus(hcd); unsigned long flags; + u32 hprt0; + int ret; dev_dbg(hsotg->dev, "DWC OTG HCD START\n"); @@ -4408,6 +4427,17 @@ static int _dwc2_hcd_start(struct usb_hcd *hcd) dwc2_hcd_reinit(hsotg); + hprt0 = dwc2_read_hprt0(hsotg); + /* Has vbus power been turned on in dwc2_core_host_init ? */ + if (hprt0 & HPRT0_PWR) { + /* Enable external vbus supply before resuming root hub */ + spin_unlock_irqrestore(&hsotg->lock, flags); + ret = dwc2_vbus_supply_init(hsotg); + if (ret) + return ret; + spin_lock_irqsave(&hsotg->lock, flags); + } + /* Initialize and connect root hub if one is not already attached */ if (bus->root_hub) { dev_dbg(hsotg->dev, "DWC OTG HCD Has Root Hub\n"); @@ -4417,7 +4447,7 @@ static int _dwc2_hcd_start(struct usb_hcd *hcd) spin_unlock_irqrestore(&hsotg->lock, flags); - return dwc2_vbus_supply_init(hsotg); + return 0; } /* @@ -4428,6 +4458,7 @@ static void _dwc2_hcd_stop(struct usb_hcd *hcd) { struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); unsigned long flags; + u32 hprt0; /* Turn off all host-specific interrupts */ dwc2_disable_host_interrupts(hsotg); @@ -4436,6 +4467,7 @@ static void _dwc2_hcd_stop(struct usb_hcd *hcd) synchronize_irq(hcd->irq); spin_lock_irqsave(&hsotg->lock, flags); + hprt0 = dwc2_read_hprt0(hsotg); /* Ensure hcd is disconnected */ dwc2_hcd_disconnect(hsotg, true); dwc2_hcd_stop(hsotg); @@ -4444,7 +4476,9 @@ static void _dwc2_hcd_stop(struct usb_hcd *hcd) clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); spin_unlock_irqrestore(&hsotg->lock, flags); - dwc2_vbus_supply_exit(hsotg); + /* keep balanced supply init/exit by checking HPRT0_PWR */ + if (hprt0 & HPRT0_PWR) + dwc2_vbus_supply_exit(hsotg); usleep_range(1000, 3000); } @@ -4482,7 +4516,9 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd) hprt0 |= HPRT0_SUSP; hprt0 &= ~HPRT0_PWR; dwc2_writel(hsotg, hprt0, HPRT0); + spin_unlock_irqrestore(&hsotg->lock, flags); dwc2_vbus_supply_exit(hsotg); + spin_lock_irqsave(&hsotg->lock, flags); } /* Enter partial_power_down */ @@ -4643,8 +4679,10 @@ static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb, } dev_vdbg(hsotg->dev, " Speed: %s\n", speed); - dev_vdbg(hsotg->dev, " Max packet size: %d\n", - usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))); + dev_vdbg(hsotg->dev, " Max packet size: %d (%d mult)\n", + usb_endpoint_maxp(&urb->ep->desc), + usb_endpoint_maxp_mult(&urb->ep->desc)); + dev_vdbg(hsotg->dev, " Data buffer length: %d\n", urb->transfer_buffer_length); dev_vdbg(hsotg->dev, " Transfer buffer: %p, Transfer DMA: %08lx\n", @@ -4727,8 +4765,8 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, dwc2_hcd_urb_set_pipeinfo(hsotg, dwc2_urb, usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe), ep_type, usb_pipein(urb->pipe), - usb_maxpacket(urb->dev, urb->pipe, - !(usb_pipein(urb->pipe)))); + usb_endpoint_maxp(&ep->desc), + usb_endpoint_maxp_mult(&ep->desc)); buf = urb->transfer_buffer; @@ -4806,8 +4844,8 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, if (qh_allocated && qh->channel && qh->channel->qh == qh) qh->channel->qh = NULL; fail2: - spin_unlock_irqrestore(&hsotg->lock, flags); urb->hcpriv = NULL; + spin_unlock_irqrestore(&hsotg->lock, flags); kfree(qtd); qtd = NULL; fail1: @@ -5195,6 +5233,10 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg) hcd->has_tt = 1; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + retval = -EINVAL; + goto error2; + } hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h index 3f9bccc95add1fc777cfb2938aa46fdec77e119b..ce6445a065889bf4a4a459bec129f231fd854f07 100644 --- a/drivers/usb/dwc2/hcd.h +++ b/drivers/usb/dwc2/hcd.h @@ -171,7 +171,8 @@ struct dwc2_hcd_pipe_info { u8 ep_num; u8 pipe_type; u8 pipe_dir; - u16 mps; + u16 maxp; + u16 maxp_mult; }; struct dwc2_hcd_iso_packet_desc { @@ -264,6 +265,7 @@ struct dwc2_hs_transfer_time { * - USB_ENDPOINT_XFER_ISOC * @ep_is_in: Endpoint direction * @maxp: Value from wMaxPacketSize field of Endpoint Descriptor + * @maxp_mult: Multiplier for maxp * @dev_speed: Device speed. One of the following values: * - USB_SPEED_LOW * - USB_SPEED_FULL @@ -340,6 +342,7 @@ struct dwc2_qh { u8 ep_type; u8 ep_is_in; u16 maxp; + u16 maxp_mult; u8 dev_speed; u8 data_toggle; u8 ping_state; @@ -366,7 +369,7 @@ struct dwc2_qh { u32 desc_list_sz; u32 *n_bytes; struct timer_list unreserve_timer; - struct timer_list wait_timer; + struct hrtimer wait_timer; struct dwc2_tt *dwc_tt; int ttport; unsigned tt_buffer_dirty:1; @@ -503,9 +506,14 @@ static inline u8 dwc2_hcd_get_pipe_type(struct dwc2_hcd_pipe_info *pipe) return pipe->pipe_type; } -static inline u16 dwc2_hcd_get_mps(struct dwc2_hcd_pipe_info *pipe) +static inline u16 dwc2_hcd_get_maxp(struct dwc2_hcd_pipe_info *pipe) +{ + return pipe->maxp; +} + +static inline u16 dwc2_hcd_get_maxp_mult(struct dwc2_hcd_pipe_info *pipe) { - return pipe->mps; + return pipe->maxp_mult; } static inline u8 dwc2_hcd_get_dev_addr(struct dwc2_hcd_pipe_info *pipe) @@ -620,12 +628,6 @@ static inline bool dbg_urb(struct urb *urb) static inline bool dbg_perio(void) { return false; } #endif -/* High bandwidth multiplier as encoded in highspeed endpoint descriptors */ -#define dwc2_hb_mult(wmaxpacketsize) (1 + (((wmaxpacketsize) >> 11) & 0x03)) - -/* Packet size for any kind of endpoint descriptor */ -#define dwc2_max_packet(wmaxpacketsize) ((wmaxpacketsize) & 0x07ff) - /* * Returns true if frame1 index is greater than frame2 index. The comparison * is done modulo FRLISTEN_64_SIZE. This accounts for the rollover of the diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c index 88b5dcf3aefc57f6ee91e2200cb95e91284d2919..a052d39b4375e469a59cde0e9220a5c900a0f54c 100644 --- a/drivers/usb/dwc2/hcd_intr.c +++ b/drivers/usb/dwc2/hcd_intr.c @@ -1617,8 +1617,9 @@ static void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg, dev_err(hsotg->dev, " Speed: %s\n", speed); - dev_err(hsotg->dev, " Max packet size: %d\n", - dwc2_hcd_get_mps(&urb->pipe_info)); + dev_err(hsotg->dev, " Max packet size: %d (mult %d)\n", + dwc2_hcd_get_maxp(&urb->pipe_info), + dwc2_hcd_get_maxp_mult(&urb->pipe_info)); dev_err(hsotg->dev, " Data buffer length: %d\n", urb->length); dev_err(hsotg->dev, " Transfer buffer: %p, Transfer DMA: %08lx\n", urb->buf, (unsigned long)urb->dma); diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c index 40839591d2ec173ca507cb356248a4cb8a3adba0..68bbac64b7536aec43574d543be8ff758b718fa9 100644 --- a/drivers/usb/dwc2/hcd_queue.c +++ b/drivers/usb/dwc2/hcd_queue.c @@ -59,7 +59,7 @@ #define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5)) /* If we get a NAK, wait this long before retrying */ -#define DWC2_RETRY_WAIT_DELAY (msecs_to_jiffies(1)) +#define DWC2_RETRY_WAIT_DELAY 1*1E6L /** * dwc2_periodic_channel_available() - Checks that a channel is available for a @@ -708,7 +708,7 @@ static void dwc2_hs_pmap_unschedule(struct dwc2_hsotg *hsotg, static int dwc2_uframe_schedule_split(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) { - int bytecount = dwc2_hb_mult(qh->maxp) * dwc2_max_packet(qh->maxp); + int bytecount = qh->maxp_mult * qh->maxp; int ls_search_slice; int err = 0; int host_interval_in_sched; @@ -1332,7 +1332,7 @@ static int dwc2_check_max_xfer_size(struct dwc2_hsotg *hsotg, u32 max_channel_xfer_size; int status = 0; - max_xfer_size = dwc2_max_packet(qh->maxp) * dwc2_hb_mult(qh->maxp); + max_xfer_size = qh->maxp * qh->maxp_mult; max_channel_xfer_size = hsotg->params.max_transfer_size; if (max_xfer_size > max_channel_xfer_size) { @@ -1464,10 +1464,12 @@ static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg, * qh back to the "inactive" list, then queues transactions. * * @t: Pointer to wait_timer in a qh. + * + * Return: HRTIMER_NORESTART to not automatically restart this timer. */ -static void dwc2_wait_timer_fn(struct timer_list *t) +static enum hrtimer_restart dwc2_wait_timer_fn(struct hrtimer *t) { - struct dwc2_qh *qh = from_timer(qh, t, wait_timer); + struct dwc2_qh *qh = container_of(t, struct dwc2_qh, wait_timer); struct dwc2_hsotg *hsotg = qh->hsotg; unsigned long flags; @@ -1491,6 +1493,7 @@ static void dwc2_wait_timer_fn(struct timer_list *t) } spin_unlock_irqrestore(&hsotg->lock, flags); + return HRTIMER_NORESTART; } /** @@ -1514,19 +1517,22 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, u32 prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT; bool do_split = (prtspd == HPRT0_SPD_HIGH_SPEED && dev_speed != USB_SPEED_HIGH); - int maxp = dwc2_hcd_get_mps(&urb->pipe_info); - int bytecount = dwc2_hb_mult(maxp) * dwc2_max_packet(maxp); + int maxp = dwc2_hcd_get_maxp(&urb->pipe_info); + int maxp_mult = dwc2_hcd_get_maxp_mult(&urb->pipe_info); + int bytecount = maxp_mult * maxp; char *speed, *type; /* Initialize QH */ qh->hsotg = hsotg; timer_setup(&qh->unreserve_timer, dwc2_unreserve_timer_fn, 0); - timer_setup(&qh->wait_timer, dwc2_wait_timer_fn, 0); + hrtimer_init(&qh->wait_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + qh->wait_timer.function = &dwc2_wait_timer_fn; qh->ep_type = ep_type; qh->ep_is_in = ep_is_in; qh->data_toggle = DWC2_HC_PID_DATA0; qh->maxp = maxp; + qh->maxp_mult = maxp_mult; INIT_LIST_HEAD(&qh->qtd_list); INIT_LIST_HEAD(&qh->qh_list_entry); @@ -1690,7 +1696,7 @@ void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) * won't do anything anyway, but we want it to finish before we free * memory. */ - del_timer_sync(&qh->wait_timer); + hrtimer_cancel(&qh->wait_timer); dwc2_host_put_tt_info(hsotg, qh->dwc_tt); @@ -1716,6 +1722,7 @@ int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) { int status; u32 intr_mask; + ktime_t delay; if (dbg_qh(qh)) dev_vdbg(hsotg->dev, "%s()\n", __func__); @@ -1734,8 +1741,8 @@ int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) list_add_tail(&qh->qh_list_entry, &hsotg->non_periodic_sched_waiting); qh->wait_timer_cancel = false; - mod_timer(&qh->wait_timer, - jiffies + DWC2_RETRY_WAIT_DELAY + 1); + delay = ktime_set(0, DWC2_RETRY_WAIT_DELAY); + hrtimer_start(&qh->wait_timer, delay, HRTIMER_MODE_REL); } else { list_add_tail(&qh->qh_list_entry, &hsotg->non_periodic_sched_inactive); diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c index bf7052e037d6b3285d2afe8d62fdab3d24e1320a..a93415f33bf3692fe26c2d4bec80b524eb66540f 100644 --- a/drivers/usb/dwc2/params.c +++ b/drivers/usb/dwc2/params.c @@ -71,6 +71,13 @@ static void dwc2_set_his_params(struct dwc2_hsotg *hsotg) p->power_down = false; } +static void dwc2_set_s3c6400_params(struct dwc2_hsotg *hsotg) +{ + struct dwc2_core_params *p = &hsotg->params; + + p->power_down = 0; +} + static void dwc2_set_rk_params(struct dwc2_hsotg *hsotg) { struct dwc2_core_params *p = &hsotg->params; @@ -81,6 +88,7 @@ static void dwc2_set_rk_params(struct dwc2_hsotg *hsotg) p->host_perio_tx_fifo_size = 256; p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 << GAHBCFG_HBSTLEN_SHIFT; + p->power_down = 0; } static void dwc2_set_ltq_params(struct dwc2_hsotg *hsotg) @@ -110,6 +118,7 @@ static void dwc2_set_amlogic_params(struct dwc2_hsotg *hsotg) p->phy_type = DWC2_PHY_TYPE_PARAM_UTMI; p->ahbcfg = GAHBCFG_HBSTLEN_INCR8 << GAHBCFG_HBSTLEN_SHIFT; + p->power_down = DWC2_POWER_DOWN_PARAM_NONE; } static void dwc2_set_amcc_params(struct dwc2_hsotg *hsotg) @@ -150,7 +159,8 @@ const struct of_device_id dwc2_of_match_table[] = { { .compatible = "lantiq,arx100-usb", .data = dwc2_set_ltq_params }, { .compatible = "lantiq,xrx200-usb", .data = dwc2_set_ltq_params }, { .compatible = "snps,dwc2" }, - { .compatible = "samsung,s3c6400-hsotg" }, + { .compatible = "samsung,s3c6400-hsotg", + .data = dwc2_set_s3c6400_params }, { .compatible = "amlogic,meson8-usb", .data = dwc2_set_amlogic_params }, { .compatible = "amlogic,meson8b-usb", diff --git a/drivers/usb/dwc2/pci.c b/drivers/usb/dwc2/pci.c index d257c541e51ba4e51530e5d8c3d1048794f5f013..7afc10872f1f031370727a277180f664fe8e4c56 100644 --- a/drivers/usb/dwc2/pci.c +++ b/drivers/usb/dwc2/pci.c @@ -120,6 +120,7 @@ static int dwc2_pci_probe(struct pci_dev *pci, dwc2 = platform_device_alloc("dwc2", PLATFORM_DEVID_AUTO); if (!dwc2) { dev_err(dev, "couldn't allocate dwc2 device\n"); + ret = -ENOMEM; goto err; } diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 88c80fcc39f57bbb69dfacd92dd3b3ae691466e8..384175b3135c6dc5113c14120af4ea77a2bc1514 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -111,26 +111,31 @@ void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode) dwc->current_dr_role = mode; } +static int dwc3_core_soft_reset(struct dwc3 *dwc); + static void __dwc3_set_mode(struct work_struct *work) { struct dwc3 *dwc = work_to_dwc(work); unsigned long flags; int ret; + u32 reg; + + mutex_lock(&dwc->mutex); if (dwc->dr_mode != USB_DR_MODE_OTG) - return; + goto out; if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG) dwc3_otg_update(dwc, 0); if (!dwc->desired_dr_role) - return; + goto out; if (dwc->desired_dr_role == dwc->current_dr_role) - return; + goto out; if (dwc->desired_dr_role == DWC3_GCTL_PRTCAP_OTG && dwc->edev) - return; + goto out; switch (dwc->current_dr_role) { case DWC3_GCTL_PRTCAP_HOST: @@ -151,6 +156,29 @@ static void __dwc3_set_mode(struct work_struct *work) break; } + /* + * When current_dr_role is not set, there's no role switching. + * Only perform GCTL.CoreSoftReset when there's DRD role switching. + */ + if (dwc->current_dr_role && + dwc->desired_dr_role != DWC3_GCTL_PRTCAP_OTG) { + reg = dwc3_readl(dwc->regs, DWC3_GCTL); + reg |= DWC3_GCTL_CORESOFTRESET; + dwc3_writel(dwc->regs, DWC3_GCTL, reg); + + /* + * Wait for internal clocks to synchronized. DWC_usb31 and + * DWC_usb32 may need at least 50ms (less for DWC_usb3). To + * keep it consistent across different IPs, let's wait up to + * 100ms before clearing GCTL.CORESOFTRESET. + */ + msleep(100); + + reg = dwc3_readl(dwc->regs, DWC3_GCTL); + reg &= ~DWC3_GCTL_CORESOFTRESET; + dwc3_writel(dwc->regs, DWC3_GCTL, reg); + } + spin_lock_irqsave(&dwc->lock, flags); dwc3_set_prtcap(dwc, dwc->desired_dr_role); @@ -171,6 +199,8 @@ static void __dwc3_set_mode(struct work_struct *work) } break; case DWC3_GCTL_PRTCAP_DEVICE: + dwc3_core_soft_reset(dwc); + dwc3_event_buffers_setup(dwc); if (dwc->usb2_phy) @@ -190,6 +220,8 @@ static void __dwc3_set_mode(struct work_struct *work) break; } +out: + mutex_unlock(&dwc->mutex); } void dwc3_set_mode(struct dwc3 *dwc, u32 mode) @@ -225,19 +257,6 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc) { u32 reg; int retries = 1000; - int ret; - - usb_phy_init(dwc->usb2_phy); - usb_phy_init(dwc->usb3_phy); - ret = phy_init(dwc->usb2_generic_phy); - if (ret < 0) - return ret; - - ret = phy_init(dwc->usb3_generic_phy); - if (ret < 0) { - phy_exit(dwc->usb2_generic_phy); - return ret; - } /* * We're resetting only the device side because, if we're in host mode, @@ -259,9 +278,6 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc) udelay(1); } while (--retries); - phy_exit(dwc->usb3_generic_phy); - phy_exit(dwc->usb2_generic_phy); - return -ETIMEDOUT; done: @@ -299,8 +315,7 @@ static void dwc3_frame_length_adjustment(struct dwc3 *dwc) reg = dwc3_readl(dwc->regs, DWC3_GFLADJ); dft = reg & DWC3_GFLADJ_30MHZ_MASK; - if (!dev_WARN_ONCE(dwc->dev, dft == dwc->fladj, - "request value same as default, ignoring\n")) { + if (dft != dwc->fladj) { reg &= ~DWC3_GFLADJ_30MHZ_MASK; reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | dwc->fladj; dwc3_writel(dwc->regs, DWC3_GFLADJ, reg); @@ -373,6 +388,13 @@ static void dwc3_free_event_buffers(struct dwc3 *dwc) static int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned length) { struct dwc3_event_buffer *evt; + unsigned int hw_mode; + + hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); + if (hw_mode == DWC3_GHWPARAMS0_MODE_HOST) { + dwc->ev_buf = NULL; + return 0; + } evt = dwc3_alloc_one_event_buffer(dwc, length); if (IS_ERR(evt)) { @@ -394,6 +416,9 @@ int dwc3_event_buffers_setup(struct dwc3 *dwc) { struct dwc3_event_buffer *evt; + if (!dwc->ev_buf) + return 0; + evt = dwc->ev_buf; evt->lpos = 0; dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0), @@ -410,6 +435,17 @@ int dwc3_event_buffers_setup(struct dwc3 *dwc) void dwc3_event_buffers_cleanup(struct dwc3 *dwc) { struct dwc3_event_buffer *evt; + u32 reg; + + if (!dwc->ev_buf) + return; + /* + * Exynos platforms may not be able to access event buffer if the + * controller failed to halt on dwc3_core_exit(). + */ + reg = dwc3_readl(dwc->regs, DWC3_DSTS); + if (!(reg & DWC3_DSTS_DEVCTRLHLT)) + return; evt = dwc->ev_buf; @@ -926,9 +962,21 @@ static int dwc3_core_init(struct dwc3 *dwc) dwc->phys_ready = true; } + usb_phy_init(dwc->usb2_phy); + usb_phy_init(dwc->usb3_phy); + ret = phy_init(dwc->usb2_generic_phy); + if (ret < 0) + goto err0a; + + ret = phy_init(dwc->usb3_generic_phy); + if (ret < 0) { + phy_exit(dwc->usb2_generic_phy); + goto err0a; + } + ret = dwc3_core_soft_reset(dwc); if (ret) - goto err0a; + goto err1; dwc3_core_setup_global_control(dwc); dwc3_core_num_eps(dwc); @@ -1214,7 +1262,7 @@ static void dwc3_get_properties(struct dwc3 *dwc) u8 tx_max_burst_prd; /* default to highest possible threshold */ - lpm_nyet_threshold = 0xff; + lpm_nyet_threshold = 0xf; /* default to -3.5dB de-emphasis */ tx_de_emphasis = 1; @@ -1454,6 +1502,7 @@ static int dwc3_probe(struct platform_device *pdev) dwc3_cache_hwparams(dwc); spin_lock_init(&dwc->lock); + mutex_init(&dwc->mutex); pm_runtime_set_active(dev); pm_runtime_use_autosuspend(dev); @@ -1482,7 +1531,8 @@ static int dwc3_probe(struct platform_device *pdev) ret = dwc3_core_init(dwc); if (ret) { - dev_err(dev, "failed to initialize core\n"); + if (ret != -EPROBE_DEFER) + dev_err(dev, "failed to initialize core: %d\n", ret); goto err4; } @@ -1499,6 +1549,7 @@ static int dwc3_probe(struct platform_device *pdev) err5: dwc3_event_buffers_cleanup(dwc); + dwc3_ulpi_exit(dwc); err4: dwc3_free_scratch_buffers(dwc); @@ -1590,6 +1641,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg) spin_lock_irqsave(&dwc->lock, flags); dwc3_gadget_suspend(dwc); spin_unlock_irqrestore(&dwc->lock, flags); + synchronize_irq(dwc->irq_gadget); dwc3_core_exit(dwc); break; case DWC3_GCTL_PRTCAP_HOST: @@ -1622,6 +1674,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg) spin_lock_irqsave(&dwc->lock, flags); dwc3_gadget_suspend(dwc); spin_unlock_irqrestore(&dwc->lock, flags); + synchronize_irq(dwc->irq_gadget); } dwc3_otg_exit(dwc); diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 5bfb62533e0fecb7de12738e2f3cb66a6fb7154f..a5d3004ae388b315e779c8a347a6140030c7f59c 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -13,6 +13,7 @@ #include #include +#include #include #include #include @@ -636,9 +637,9 @@ struct dwc3_event_buffer { /** * struct dwc3_ep - device side endpoint representation * @endpoint: usb endpoint + * @cancelled_list: list of cancelled requests for this endpoint * @pending_list: list of pending requests for this endpoint * @started_list: list of started requests on this endpoint - * @wait_end_transfer: wait_queue_head_t for waiting on End Transfer complete * @lock: spinlock for endpoint request queue traversal * @regs: pointer to first endpoint register * @trb_pool: array of transaction buffers @@ -659,11 +660,10 @@ struct dwc3_event_buffer { */ struct dwc3_ep { struct usb_ep endpoint; + struct list_head cancelled_list; struct list_head pending_list; struct list_head started_list; - wait_queue_head_t wait_end_transfer; - spinlock_t lock; void __iomem *regs; @@ -847,11 +847,12 @@ struct dwc3_hwparams { * @epnum: endpoint number to which this request refers * @trb: pointer to struct dwc3_trb * @trb_dma: DMA address of @trb - * @unaligned: true for OUT endpoints with length not divisible by maxp + * @num_trbs: number of TRBs used by this request + * @needs_extra_trb: true when request needs one extra TRB (either due to ZLP + * or unaligned OUT) * @direction: IN or OUT direction flag * @mapped: true when request has been dma-mapped * @started: request is started - * @zero: wants a ZLP */ struct dwc3_request { struct usb_request request; @@ -867,11 +868,12 @@ struct dwc3_request { struct dwc3_trb *trb; dma_addr_t trb_dma; - unsigned unaligned:1; + unsigned num_trbs; + + unsigned needs_extra_trb:1; unsigned direction:1; unsigned mapped:1; unsigned started:1; - unsigned zero:1; }; /* @@ -895,6 +897,7 @@ struct dwc3_scratchpad_array { * @scratch_addr: dma address of scratchbuf * @ep0_in_setup: one control transfer is completed and enter setup phase * @lock: for synchronizing + * @mutex: for mode switching * @dev: pointer to our struct device * @sysdev: pointer to the DMA-capable device * @xhci: pointer to our xHCI child @@ -1016,6 +1019,9 @@ struct dwc3 { /* device lock */ spinlock_t lock; + /* mode switching lock */ + struct mutex mutex; + struct device *dev; struct device *sysdev; diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h index c66d216dcc3041ddecc2e7af10301ce188fc030c..4f75ab3505b77a1871cd25e4a92907dc9cbcc075 100644 --- a/drivers/usb/dwc3/debug.h +++ b/drivers/usb/dwc3/debug.h @@ -116,6 +116,35 @@ dwc3_gadget_link_string(enum dwc3_link_state link_state) } } +/** + * dwc3_gadget_hs_link_string - returns highspeed and below link name + * @link_state: link state code + */ +static inline const char * +dwc3_gadget_hs_link_string(enum dwc3_link_state link_state) +{ + switch (link_state) { + case DWC3_LINK_STATE_U0: + return "On"; + case DWC3_LINK_STATE_U2: + return "Sleep"; + case DWC3_LINK_STATE_U3: + return "Suspend"; + case DWC3_LINK_STATE_SS_DIS: + return "Disconnected"; + case DWC3_LINK_STATE_RX_DET: + return "Early Suspend"; + case DWC3_LINK_STATE_RECOV: + return "Recovery"; + case DWC3_LINK_STATE_RESET: + return "Reset"; + case DWC3_LINK_STATE_RESUME: + return "Resume"; + default: + return "UNKNOWN link state\n"; + } +} + /** * dwc3_trb_type_string - returns TRB type as a string * @type: the type of the TRB diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c index df8e73ec3342eca2365d976f5dcfd00c6c8da446..04ce7c01a09e64ce39427399a7587cef0c7b0dfd 100644 --- a/drivers/usb/dwc3/debugfs.c +++ b/drivers/usb/dwc3/debugfs.c @@ -433,13 +433,17 @@ static int dwc3_link_state_show(struct seq_file *s, void *unused) unsigned long flags; enum dwc3_link_state state; u32 reg; + u8 speed; spin_lock_irqsave(&dwc->lock, flags); reg = dwc3_readl(dwc->regs, DWC3_DSTS); state = DWC3_DSTS_USBLNKST(reg); - spin_unlock_irqrestore(&dwc->lock, flags); + speed = reg & DWC3_DSTS_CONNECTSPD; - seq_printf(s, "%s\n", dwc3_gadget_link_string(state)); + seq_printf(s, "%s\n", (speed >= DWC3_DSTS_SUPERSPEED) ? + dwc3_gadget_link_string(state) : + dwc3_gadget_hs_link_string(state)); + spin_unlock_irqrestore(&dwc->lock, flags); return 0; } @@ -457,6 +461,8 @@ static ssize_t dwc3_link_state_write(struct file *file, unsigned long flags; enum dwc3_link_state state = 0; char buf[32]; + u32 reg; + u8 speed; if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; @@ -477,6 +483,15 @@ static ssize_t dwc3_link_state_write(struct file *file, return -EINVAL; spin_lock_irqsave(&dwc->lock, flags); + reg = dwc3_readl(dwc->regs, DWC3_DSTS); + speed = reg & DWC3_DSTS_CONNECTSPD; + + if (speed < DWC3_DSTS_SUPERSPEED && + state != DWC3_LINK_STATE_RECOV) { + spin_unlock_irqrestore(&dwc->lock, flags); + return -EINVAL; + } + dwc3_gadget_set_link_state(dwc, state); spin_unlock_irqrestore(&dwc->lock, flags); diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 1286076a8890308a66d6ef494b85169ae92d7798..8fa39e66494057cd478815b58c4d7381198f09f6 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -29,6 +29,8 @@ #define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa #define PCI_DEVICE_ID_INTEL_APL 0x5aaa #define PCI_DEVICE_ID_INTEL_KBP 0xa2b0 +#define PCI_DEVICE_ID_INTEL_CMLLP 0x02ee +#define PCI_DEVICE_ID_INTEL_CMLH 0x06ee #define PCI_DEVICE_ID_INTEL_GLK 0x31aa #define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee #define PCI_DEVICE_ID_INTEL_CNPH 0xa36e @@ -170,20 +172,20 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc) * put the gpio descriptors again here because the phy driver * might want to grab them, too. */ - gpio = devm_gpiod_get_optional(&pdev->dev, "cs", - GPIOD_OUT_LOW); + gpio = gpiod_get_optional(&pdev->dev, "cs", GPIOD_OUT_LOW); if (IS_ERR(gpio)) return PTR_ERR(gpio); gpiod_set_value_cansleep(gpio, 1); + gpiod_put(gpio); - gpio = devm_gpiod_get_optional(&pdev->dev, "reset", - GPIOD_OUT_LOW); + gpio = gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(gpio)) return PTR_ERR(gpio); if (gpio) { gpiod_set_value_cansleep(gpio, 1); + gpiod_put(gpio); usleep_range(10000, 11000); } } @@ -255,7 +257,7 @@ static int dwc3_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) ret = platform_device_add_properties(dwc->dwc3, p); if (ret < 0) - return ret; + goto err; ret = dwc3_pci_quirks(dwc); if (ret) @@ -283,8 +285,10 @@ static int dwc3_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) static void dwc3_pci_remove(struct pci_dev *pci) { struct dwc3_pci *dwc = pci_get_drvdata(pci); + struct pci_dev *pdev = dwc->pci; - gpiod_remove_lookup_table(&platform_bytcr_gpios); + if (pdev->device == PCI_DEVICE_ID_INTEL_BYT) + gpiod_remove_lookup_table(&platform_bytcr_gpios); #ifdef CONFIG_PM cancel_work_sync(&dwc->wakeup_work); #endif @@ -303,6 +307,12 @@ static const struct pci_device_id dwc3_pci_id_table[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD), (kernel_ulong_t) &dwc3_pci_mrfld_properties, }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CMLLP), + (kernel_ulong_t) &dwc3_pci_intel_properties, }, + + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CMLH), + (kernel_ulong_t) &dwc3_pci_intel_properties, }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SPTLP), (kernel_ulong_t) &dwc3_pci_intel_properties, }, diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c index 16081383c401c91d47cfaa52aa52f1062f0e825b..6127505770cec1c565e61c9561632454c2ae8fb3 100644 --- a/drivers/usb/dwc3/dwc3-st.c +++ b/drivers/usb/dwc3/dwc3-st.c @@ -219,10 +219,8 @@ static int st_dwc3_probe(struct platform_device *pdev) dwc3_data->regmap = regmap; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "syscfg-reg"); - if (!res) { - ret = -ENXIO; - goto undo_platform_dev_alloc; - } + if (!res) + return -ENXIO; dwc3_data->syscfg_reg_off = res->start; @@ -233,8 +231,7 @@ static int st_dwc3_probe(struct platform_device *pdev) devm_reset_control_get_exclusive(dev, "powerdown"); if (IS_ERR(dwc3_data->rstc_pwrdn)) { dev_err(&pdev->dev, "could not get power controller\n"); - ret = PTR_ERR(dwc3_data->rstc_pwrdn); - goto undo_platform_dev_alloc; + return PTR_ERR(dwc3_data->rstc_pwrdn); } /* Manage PowerDown */ @@ -296,8 +293,6 @@ static int st_dwc3_probe(struct platform_device *pdev) reset_control_assert(dwc3_data->rstc_rst); undo_powerdown: reset_control_assert(dwc3_data->rstc_pwrdn); -undo_platform_dev_alloc: - platform_device_put(pdev); return ret; } diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index 8efde178eef4d55faeb91aa924bcc82e7b548a41..095704dcee7764131ca8ba0f75426c0bf05c7435 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -292,6 +292,9 @@ static struct dwc3_ep *dwc3_wIndex_to_dep(struct dwc3 *dwc, __le16 wIndex_le) epnum |= 1; dep = dwc->eps[epnum]; + if (dep == NULL) + return NULL; + if (dep->flags & DWC3_EP_ENABLED) return dep; @@ -1110,6 +1113,9 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc, void dwc3_ep0_interrupt(struct dwc3 *dwc, const struct dwc3_event_depevt *event) { + struct dwc3_ep *dep = dwc->eps[event->endpoint_number]; + u8 cmd; + switch (event->endpoint_event) { case DWC3_DEPEVT_XFERCOMPLETE: dwc3_ep0_xfer_complete(dwc, event); @@ -1122,7 +1128,12 @@ void dwc3_ep0_interrupt(struct dwc3 *dwc, case DWC3_DEPEVT_XFERINPROGRESS: case DWC3_DEPEVT_RXTXFIFOEVT: case DWC3_DEPEVT_STREAMEVT: + break; case DWC3_DEPEVT_EPCMDCMPLT: + cmd = DEPEVT_PARAMETER_CMD(event->parameters); + + if (cmd == DWC3_DEPCMD_ENDTRANSFER) + dep->flags &= ~DWC3_EP_TRANSFER_STARTED; break; } } diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 2b53194081bafd3041f0fa7278711e984dd75237..a6e682a000fc7d424cb946be31c525d5801a05ae 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -177,6 +177,7 @@ static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep, req->started = false; list_del(&req->list); req->remaining = 0; + req->needs_extra_trb = false; if (req->request.status == -EINPROGRESS) req->request.status = status; @@ -270,27 +271,36 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd, const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; struct dwc3 *dwc = dep->dwc; u32 timeout = 1000; + u32 saved_config = 0; u32 reg; int cmd_status = 0; - int susphy = false; int ret = -EINVAL; /* - * Synopsys Databook 2.60a states, on section 6.3.2.5.[1-8], that if - * we're issuing an endpoint command, we must check if - * GUSB2PHYCFG.SUSPHY bit is set. If it is, then we need to clear it. + * When operating in USB 2.0 speeds (HS/FS), if GUSB2PHYCFG.ENBLSLPM or + * GUSB2PHYCFG.SUSPHY is set, it must be cleared before issuing an + * endpoint command. * - * We will also set SUSPHY bit to what it was before returning as stated - * by the same section on Synopsys databook. + * Save and clear both GUSB2PHYCFG.ENBLSLPM and GUSB2PHYCFG.SUSPHY + * settings. Restore them after the command is completed. + * + * DWC_usb3 3.30a and DWC_usb31 1.90a programming guide section 3.2.2 */ if (dwc->gadget.speed <= USB_SPEED_HIGH) { reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) { - susphy = true; + saved_config |= DWC3_GUSB2PHYCFG_SUSPHY; reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; - dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); } + + if (reg & DWC3_GUSB2PHYCFG_ENBLSLPM) { + saved_config |= DWC3_GUSB2PHYCFG_ENBLSLPM; + reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM; + } + + if (saved_config) + dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); } if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) { @@ -374,24 +384,14 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd, trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status); - if (ret == 0) { - switch (DWC3_DEPCMD_CMD(cmd)) { - case DWC3_DEPCMD_STARTTRANSFER: - dep->flags |= DWC3_EP_TRANSFER_STARTED; - dwc3_gadget_ep_get_transfer_index(dep); - break; - case DWC3_DEPCMD_ENDTRANSFER: - dep->flags &= ~DWC3_EP_TRANSFER_STARTED; - break; - default: - /* nothing */ - break; - } + if (ret == 0 && DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) { + dep->flags |= DWC3_EP_TRANSFER_STARTED; + dwc3_gadget_ep_get_transfer_index(dep); } - if (unlikely(susphy)) { + if (saved_config) { reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); - reg |= DWC3_GUSB2PHYCFG_SUSPHY; + reg |= saved_config; dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); } @@ -638,8 +638,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action) reg |= DWC3_DALEPENA_EP(dep->number); dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); - init_waitqueue_head(&dep->wait_end_transfer); - if (usb_endpoint_xfer_control(desc)) goto out; @@ -709,6 +707,12 @@ static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) dwc3_gadget_giveback(dep, req, -ESHUTDOWN); } + + while (!list_empty(&dep->cancelled_list)) { + req = next_request(&dep->cancelled_list); + + dwc3_gadget_giveback(dep, req, -ESHUTDOWN); + } } /** @@ -910,8 +914,6 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb, struct usb_gadget *gadget = &dwc->gadget; enum usb_device_speed speed = gadget->speed; - dwc3_ep_inc_enq(dep); - trb->size = DWC3_TRB_SIZE_LENGTH(length); trb->bpl = lower_32_bits(dma); trb->bph = upper_32_bits(dma); @@ -981,16 +983,20 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb, usb_endpoint_type(dep->endpoint.desc)); } - /* always enable Continue on Short Packet */ + /* + * Enable Continue on Short Packet + * when endpoint is not a stream capable + */ if (usb_endpoint_dir_out(dep->endpoint.desc)) { - trb->ctrl |= DWC3_TRB_CTRL_CSP; + if (!dep->stream_capable) + trb->ctrl |= DWC3_TRB_CTRL_CSP; if (short_not_ok) trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; } if ((!no_interrupt && !chain) || - (dwc3_calc_trbs_left(dep) == 0)) + (dwc3_calc_trbs_left(dep) == 1)) trb->ctrl |= DWC3_TRB_CTRL_IOC; if (chain) @@ -1001,6 +1007,8 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb, trb->ctrl |= DWC3_TRB_CTRL_HWO; + dwc3_ep_inc_enq(dep); + trace_dwc3_prepare_trb(dep, trb); } @@ -1037,6 +1045,8 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, req->trb_dma = dwc3_trb_dma_offset(dep, trb); } + req->num_trbs++; + __dwc3_prepare_one_trb(dep, trb, dma, length, chain, node, stream_id, short_not_ok, no_interrupt); } @@ -1064,15 +1074,16 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, struct dwc3 *dwc = dep->dwc; struct dwc3_trb *trb; - req->unaligned = true; + req->needs_extra_trb = true; /* prepare normal TRB */ dwc3_prepare_one_trb(dep, req, true, i); /* Now prepare one extra TRB to align transfer size */ trb = &dep->trb_pool[dep->trb_enqueue]; + req->num_trbs++; __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, - maxp - rem, false, 0, + maxp - rem, false, 1, req->request.stream_id, req->request.short_not_ok, req->request.no_interrupt); @@ -1104,19 +1115,20 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); unsigned int rem = length % maxp; - if (rem && usb_endpoint_dir_out(dep->endpoint.desc)) { + if ((!length || rem) && usb_endpoint_dir_out(dep->endpoint.desc)) { struct dwc3 *dwc = dep->dwc; struct dwc3_trb *trb; - req->unaligned = true; + req->needs_extra_trb = true; /* prepare normal TRB */ dwc3_prepare_one_trb(dep, req, true, 0); /* Now prepare one extra TRB to align transfer size */ trb = &dep->trb_pool[dep->trb_enqueue]; + req->num_trbs++; __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp - rem, - false, 0, req->request.stream_id, + false, 1, req->request.stream_id, req->request.short_not_ok, req->request.no_interrupt); } else if (req->request.zero && req->request.length && @@ -1124,15 +1136,16 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, struct dwc3 *dwc = dep->dwc; struct dwc3_trb *trb; - req->zero = true; + req->needs_extra_trb = true; /* prepare normal TRB */ dwc3_prepare_one_trb(dep, req, true, 0); /* Now prepare one extra TRB to handle ZLP */ trb = &dep->trb_pool[dep->trb_enqueue]; + req->num_trbs++; __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0, - false, 0, req->request.stream_id, + false, 1, req->request.stream_id, req->request.short_not_ok, req->request.no_interrupt); } else { @@ -1332,6 +1345,42 @@ static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, return ret; } +static void dwc3_gadget_ep_skip_trbs(struct dwc3_ep *dep, struct dwc3_request *req) +{ + int i; + + /* + * If request was already started, this means we had to + * stop the transfer. With that we also need to ignore + * all TRBs used by the request, however TRBs can only + * be modified after completion of END_TRANSFER + * command. So what we do here is that we wait for + * END_TRANSFER completion and only after that, we jump + * over TRBs by clearing HWO and incrementing dequeue + * pointer. + */ + for (i = 0; i < req->num_trbs; i++) { + struct dwc3_trb *trb; + + trb = req->trb + i; + trb->ctrl &= ~DWC3_TRB_CTRL_HWO; + dwc3_ep_inc_deq(dep); + } + + req->num_trbs = 0; +} + +static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep) +{ + struct dwc3_request *req; + struct dwc3_request *tmp; + + list_for_each_entry_safe(req, tmp, &dep->cancelled_list, list) { + dwc3_gadget_ep_skip_trbs(dep, req); + dwc3_gadget_giveback(dep, req, -ECONNRESET); + } +} + static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, struct usb_request *request) { @@ -1362,68 +1411,14 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, /* wait until it is processed */ dwc3_stop_active_transfer(dep, true); - /* - * If request was already started, this means we had to - * stop the transfer. With that we also need to ignore - * all TRBs used by the request, however TRBs can only - * be modified after completion of END_TRANSFER - * command. So what we do here is that we wait for - * END_TRANSFER completion and only after that, we jump - * over TRBs by clearing HWO and incrementing dequeue - * pointer. - * - * Note that we have 2 possible types of transfers here: - * - * i) Linear buffer request - * ii) SG-list based request - * - * SG-list based requests will have r->num_pending_sgs - * set to a valid number (> 0). Linear requests, - * normally use a single TRB. - * - * For each of these two cases, if r->unaligned flag is - * set, one extra TRB has been used to align transfer - * size to wMaxPacketSize. - * - * All of these cases need to be taken into - * consideration so we don't mess up our TRB ring - * pointers. - */ - wait_event_lock_irq(dep->wait_end_transfer, - !(dep->flags & DWC3_EP_END_TRANSFER_PENDING), - dwc->lock); - if (!r->trb) goto out0; - if (r->num_pending_sgs) { - struct dwc3_trb *trb; - int i = 0; - - for (i = 0; i < r->num_pending_sgs; i++) { - trb = r->trb + i; - trb->ctrl &= ~DWC3_TRB_CTRL_HWO; - dwc3_ep_inc_deq(dep); - } - - if (r->unaligned || r->zero) { - trb = r->trb + r->num_pending_sgs + 1; - trb->ctrl &= ~DWC3_TRB_CTRL_HWO; - dwc3_ep_inc_deq(dep); - } - } else { - struct dwc3_trb *trb = r->trb; - - trb->ctrl &= ~DWC3_TRB_CTRL_HWO; - dwc3_ep_inc_deq(dep); - - if (r->unaligned || r->zero) { - trb = r->trb + 1; - trb->ctrl &= ~DWC3_TRB_CTRL_HWO; - dwc3_ep_inc_deq(dep); - } - } - goto out1; + dwc3_gadget_move_cancelled_request(req); + if (dep->flags & DWC3_EP_TRANSFER_STARTED) + goto out0; + else + goto out1; } dev_err(dwc->dev, "request %pK was not queued to %s\n", request, ep->name); @@ -1432,8 +1427,6 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, } out1: - /* giveback the request */ - dwc3_gadget_giveback(dep, req, -ECONNRESET); out0: @@ -1461,9 +1454,6 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) unsigned transfer_in_flight; unsigned started; - if (dep->flags & DWC3_EP_STALL) - return 0; - if (dep->number > 1) trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue); else @@ -1485,8 +1475,6 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) else dep->flags |= DWC3_EP_STALL; } else { - if (!(dep->flags & DWC3_EP_STALL)) - return 0; ret = dwc3_send_clear_stall_ep_cmd(dep); if (ret) @@ -1863,6 +1851,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc) /* begin to receive SETUP packets */ dwc->ep0state = EP0_SETUP_PHASE; + dwc->link_state = DWC3_LINK_STATE_SS_DIS; dwc3_ep0_out_start(dwc); dwc3_gadget_enable_irq(dwc); @@ -1930,8 +1919,6 @@ static int dwc3_gadget_stop(struct usb_gadget *g) { struct dwc3 *dwc = gadget_to_dwc(g); unsigned long flags; - int epnum; - u32 tmo_eps = 0; spin_lock_irqsave(&dwc->lock, flags); @@ -1940,36 +1927,6 @@ static int dwc3_gadget_stop(struct usb_gadget *g) __dwc3_gadget_stop(dwc); - for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) { - struct dwc3_ep *dep = dwc->eps[epnum]; - int ret; - - if (!dep) - continue; - - if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING)) - continue; - - ret = wait_event_interruptible_lock_irq_timeout(dep->wait_end_transfer, - !(dep->flags & DWC3_EP_END_TRANSFER_PENDING), - dwc->lock, msecs_to_jiffies(5)); - - if (ret <= 0) { - /* Timed out or interrupted! There's nothing much - * we can do so we just log here and print which - * endpoints timed out at the end. - */ - tmo_eps |= 1 << epnum; - dep->flags &= DWC3_EP_END_TRANSFER_PENDING; - } - } - - if (tmo_eps) { - dev_err(dwc->dev, - "end transfer timed out on endpoints 0x%x [bitmap]\n", - tmo_eps); - } - out: dwc->gadget_driver = NULL; spin_unlock_irqrestore(&dwc->lock, flags); @@ -2172,6 +2129,7 @@ static int dwc3_gadget_init_endpoint(struct dwc3 *dwc, u8 epnum) INIT_LIST_HEAD(&dep->pending_list); INIT_LIST_HEAD(&dep->started_list); + INIT_LIST_HEAD(&dep->cancelled_list); return 0; } @@ -2231,6 +2189,7 @@ static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep, dwc3_ep_inc_deq(dep); trace_dwc3_complete_trb(dep, trb); + req->num_trbs--; /* * If we're in the middle of series of chained TRBs and we @@ -2250,7 +2209,8 @@ static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep, * with one TRB pending in the ring. We need to manually clear HWO bit * from that TRB. */ - if ((req->zero || req->unaligned) && (trb->ctrl & DWC3_TRB_CTRL_HWO)) { + + if (req->needs_extra_trb && !(trb->ctrl & DWC3_TRB_CTRL_CHN)) { trb->ctrl &= ~DWC3_TRB_CTRL_HWO; return 1; } @@ -2311,6 +2271,13 @@ static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep, static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req) { + /* + * For OUT direction, host may send less than the setup + * length. Return true for all OUT requests. + */ + if (!req->direction) + return true; + return req->request.actual == req->request.length; } @@ -2327,16 +2294,15 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep, ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, status); - if (req->unaligned || req->zero) { + if (req->needs_extra_trb) { ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, status); - req->unaligned = false; - req->zero = false; + req->needs_extra_trb = false; } req->request.actual = req->request.length - req->remaining; - if (!dwc3_gadget_ep_request_completed(req) && + if (!dwc3_gadget_ep_request_completed(req) || req->num_pending_sgs) { __dwc3_gadget_kick_transfer(dep); goto out; @@ -2463,8 +2429,9 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc, cmd = DEPEVT_PARAMETER_CMD(event->parameters); if (cmd == DWC3_DEPCMD_ENDTRANSFER) { - dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING; - wake_up(&dep->wait_end_transfer); + dep->flags &= ~(DWC3_EP_END_TRANSFER_PENDING | + DWC3_EP_TRANSFER_STARTED); + dwc3_gadget_ep_cleanup_cancelled_requests(dep); } break; case DWC3_DEPEVT_STREAMEVT: @@ -3231,6 +3198,8 @@ int dwc3_gadget_init(struct dwc3 *dwc) goto err4; } + dwc3_gadget_set_speed(&dwc->gadget, dwc->maximum_speed); + return 0; err4: diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h index 2aacd1afd9ff0eac2b668166f3a202f649ea3400..023a473648eb77ddc36c5921a8a339a4d5c49817 100644 --- a/drivers/usb/dwc3/gadget.h +++ b/drivers/usb/dwc3/gadget.h @@ -79,6 +79,21 @@ static inline void dwc3_gadget_move_started_request(struct dwc3_request *req) list_move_tail(&req->list, &dep->started_list); } +/** + * dwc3_gadget_move_cancelled_request - move @req to the cancelled_list + * @req: the request to be moved + * + * Caller should take care of locking. This function will move @req from its + * current list to the endpoint's cancelled_list. + */ +static inline void dwc3_gadget_move_cancelled_request(struct dwc3_request *req) +{ + struct dwc3_ep *dep = req->dep; + + req->started = false; + list_move_tail(&req->list, &dep->cancelled_list); +} + void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, int status); diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h index f22714cce070b28d60ebe5354ac3e64e54fa289f..f27c5cbe285c5580a779fa62d99294450ca978af 100644 --- a/drivers/usb/dwc3/trace.h +++ b/drivers/usb/dwc3/trace.h @@ -251,9 +251,11 @@ DECLARE_EVENT_CLASS(dwc3_log_trb, s = "2x "; break; case 3: + default: s = "3x "; break; } + break; default: s = ""; } s; }), diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index b8a15840b4ffd574430cdc5d0e57e74a3c116242..0f2af1e1e8f4bc621f59c9e14d59711611cca0fa 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -1569,6 +1569,18 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) struct usb_function *f = NULL; u8 endp; + if (w_length > USB_COMP_EP0_BUFSIZ) { + if (ctrl->bRequestType & USB_DIR_IN) { + /* Cast away the const, we are going to overwrite on purpose. */ + __le16 *temp = (__le16 *)&ctrl->wLength; + + *temp = cpu_to_le16(USB_COMP_EP0_BUFSIZ); + w_length = USB_COMP_EP0_BUFSIZ; + } else { + goto done; + } + } + /* partial re-init of the response message; the function or the * gadget might need to intercept e.g. a control-OUT completion * when we delegate to it. @@ -1853,6 +1865,9 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) if (w_index != 0x5 || (w_value >> 8)) break; interface = w_value & 0xFF; + if (interface >= MAX_CONFIG_INTERFACES || + !os_desc_cfg->interface[interface]) + break; buf[6] = w_index; count = count_ext_prop(os_desc_cfg, interface); @@ -1976,6 +1991,7 @@ void composite_disconnect(struct usb_gadget *gadget) * disconnect callbacks? */ spin_lock_irqsave(&cdev->lock, flags); + cdev->suspended = 0; if (cdev->config) reset_config(cdev); if (cdev->driver->disconnect) @@ -2081,7 +2097,7 @@ int composite_dev_prepare(struct usb_composite_driver *composite, if (!cdev->req) return -ENOMEM; - cdev->req->buf = kmalloc(USB_COMP_EP0_BUFSIZ, GFP_KERNEL); + cdev->req->buf = kzalloc(USB_COMP_EP0_BUFSIZ, GFP_KERNEL); if (!cdev->req->buf) goto fail; @@ -2155,14 +2171,18 @@ void composite_dev_cleanup(struct usb_composite_dev *cdev) usb_ep_dequeue(cdev->gadget->ep0, cdev->os_desc_req); kfree(cdev->os_desc_req->buf); + cdev->os_desc_req->buf = NULL; usb_ep_free_request(cdev->gadget->ep0, cdev->os_desc_req); + cdev->os_desc_req = NULL; } if (cdev->req) { if (cdev->setup_pending) usb_ep_dequeue(cdev->gadget->ep0, cdev->req); kfree(cdev->req->buf); + cdev->req->buf = NULL; usb_ep_free_request(cdev->gadget->ep0, cdev->req); + cdev->req = NULL; } cdev->next_string_id = 0; device_remove_file(&cdev->gadget->dev, &dev_attr_suspended); diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c index 2d115353424c2cb073d07251f67a574a4407188e..7275b58ce8cca91f0f35bcc6e844477f7deb5650 100644 --- a/drivers/usb/gadget/config.c +++ b/drivers/usb/gadget/config.c @@ -164,6 +164,14 @@ int usb_assign_descriptors(struct usb_function *f, { struct usb_gadget *g = f->config->cdev->gadget; + /* super-speed-plus descriptor falls back to super-speed one, + * if such a descriptor was provided, thus avoiding a NULL + * pointer dereference if a 5gbps capable gadget is used with + * a 10gbps capable config (device port + cable + host port) + */ + if (!ssp) + ssp = ss; + if (fs) { f->fs_descriptors = usb_copy_descriptors(fs); if (!f->fs_descriptors) diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 02512994289463b43107c49871995816272db5f8..39a483b6648403272708fceeb47fd736d483f1e8 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -61,6 +61,8 @@ struct gadget_info { bool use_os_desc; char b_vendor_code; char qw_sign[OS_STRING_QW_SIGN_LEN]; + spinlock_t spinlock; + bool unbind; }; static inline struct gadget_info *to_gadget_info(struct config_item *item) @@ -112,9 +114,12 @@ static int usb_string_copy(const char *s, char **s_copy) int ret; char *str; char *copy = *s_copy; + ret = strlen(s); if (ret > 126) return -EOVERFLOW; + if (ret < 1) + return -EINVAL; str = kstrdup(s, GFP_KERNEL); if (!str) @@ -231,9 +236,16 @@ static ssize_t gadget_dev_desc_bcdUSB_store(struct config_item *item, static ssize_t gadget_dev_desc_UDC_show(struct config_item *item, char *page) { - char *udc_name = to_gadget_info(item)->composite.gadget_driver.udc_name; + struct gadget_info *gi = to_gadget_info(item); + char *udc_name; + int ret; + + mutex_lock(&gi->lock); + udc_name = gi->composite.gadget_driver.udc_name; + ret = sprintf(page, "%s\n", udc_name ?: ""); + mutex_unlock(&gi->lock); - return sprintf(page, "%s\n", udc_name ?: ""); + return ret; } static int unregister_gadget(struct gadget_info *gi) @@ -258,6 +270,9 @@ static ssize_t gadget_dev_desc_UDC_store(struct config_item *item, char *name; int ret; + if (strlen(page) < len) + return -EOVERFLOW; + name = kstrdup(page, GFP_KERNEL); if (!name) return -ENOMEM; @@ -1244,6 +1259,7 @@ static int configfs_composite_bind(struct usb_gadget *gadget, int ret; /* the gi->lock is hold by the caller */ + gi->unbind = 0; cdev->gadget = gadget; set_gadget_data(gadget, cdev); ret = composite_dev_prepare(composite, cdev); @@ -1376,31 +1392,128 @@ static void configfs_composite_unbind(struct usb_gadget *gadget) { struct usb_composite_dev *cdev; struct gadget_info *gi; + unsigned long flags; /* the gi->lock is hold by the caller */ cdev = get_gadget_data(gadget); gi = container_of(cdev, struct gadget_info, cdev); + spin_lock_irqsave(&gi->spinlock, flags); + gi->unbind = 1; + spin_unlock_irqrestore(&gi->spinlock, flags); kfree(otg_desc[0]); otg_desc[0] = NULL; purge_configs_funcs(gi); composite_dev_cleanup(cdev); usb_ep_autoconfig_reset(cdev->gadget); + spin_lock_irqsave(&gi->spinlock, flags); cdev->gadget = NULL; set_gadget_data(gadget, NULL); + spin_unlock_irqrestore(&gi->spinlock, flags); +} + +static int configfs_composite_setup(struct usb_gadget *gadget, + const struct usb_ctrlrequest *ctrl) +{ + struct usb_composite_dev *cdev; + struct gadget_info *gi; + unsigned long flags; + int ret; + + cdev = get_gadget_data(gadget); + if (!cdev) + return 0; + + gi = container_of(cdev, struct gadget_info, cdev); + spin_lock_irqsave(&gi->spinlock, flags); + cdev = get_gadget_data(gadget); + if (!cdev || gi->unbind) { + spin_unlock_irqrestore(&gi->spinlock, flags); + return 0; + } + + ret = composite_setup(gadget, ctrl); + spin_unlock_irqrestore(&gi->spinlock, flags); + return ret; +} + +static void configfs_composite_disconnect(struct usb_gadget *gadget) +{ + struct usb_composite_dev *cdev; + struct gadget_info *gi; + unsigned long flags; + + cdev = get_gadget_data(gadget); + if (!cdev) + return; + + gi = container_of(cdev, struct gadget_info, cdev); + spin_lock_irqsave(&gi->spinlock, flags); + cdev = get_gadget_data(gadget); + if (!cdev || gi->unbind) { + spin_unlock_irqrestore(&gi->spinlock, flags); + return; + } + + composite_disconnect(gadget); + spin_unlock_irqrestore(&gi->spinlock, flags); +} + +static void configfs_composite_suspend(struct usb_gadget *gadget) +{ + struct usb_composite_dev *cdev; + struct gadget_info *gi; + unsigned long flags; + + cdev = get_gadget_data(gadget); + if (!cdev) + return; + + gi = container_of(cdev, struct gadget_info, cdev); + spin_lock_irqsave(&gi->spinlock, flags); + cdev = get_gadget_data(gadget); + if (!cdev || gi->unbind) { + spin_unlock_irqrestore(&gi->spinlock, flags); + return; + } + + composite_suspend(gadget); + spin_unlock_irqrestore(&gi->spinlock, flags); +} + +static void configfs_composite_resume(struct usb_gadget *gadget) +{ + struct usb_composite_dev *cdev; + struct gadget_info *gi; + unsigned long flags; + + cdev = get_gadget_data(gadget); + if (!cdev) + return; + + gi = container_of(cdev, struct gadget_info, cdev); + spin_lock_irqsave(&gi->spinlock, flags); + cdev = get_gadget_data(gadget); + if (!cdev || gi->unbind) { + spin_unlock_irqrestore(&gi->spinlock, flags); + return; + } + + composite_resume(gadget); + spin_unlock_irqrestore(&gi->spinlock, flags); } static const struct usb_gadget_driver configfs_driver_template = { .bind = configfs_composite_bind, .unbind = configfs_composite_unbind, - .setup = composite_setup, - .reset = composite_disconnect, - .disconnect = composite_disconnect, + .setup = configfs_composite_setup, + .reset = configfs_composite_disconnect, + .disconnect = configfs_composite_disconnect, - .suspend = composite_suspend, - .resume = composite_resume, + .suspend = configfs_composite_suspend, + .resume = configfs_composite_resume, .max_speed = USB_SPEED_SUPER, .driver = { @@ -1444,6 +1557,7 @@ static struct config_group *gadgets_make( gi->composite.resume = NULL; gi->composite.max_speed = USB_SPEED_SUPER; + spin_lock_init(&gi->spinlock); mutex_init(&gi->lock); INIT_LIST_HEAD(&gi->string_list); INIT_LIST_HEAD(&gi->available_func); diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c index 6ce044008cf6c4197dda79df4f5f1aab74b846b9..fc4419f0bf743076aa098014fe829683ed283dd5 100644 --- a/drivers/usb/gadget/function/f_ecm.c +++ b/drivers/usb/gadget/function/f_ecm.c @@ -621,8 +621,12 @@ static void ecm_disable(struct usb_function *f) DBG(cdev, "ecm deactivated\n"); - if (ecm->port.in_ep->enabled) + if (ecm->port.in_ep->enabled) { gether_disconnect(&ecm->port); + } else { + ecm->port.in_ep->desc = NULL; + ecm->port.out_ep->desc = NULL; + } usb_ep_disable(ecm->notify); ecm->notify->desc = NULL; @@ -784,7 +788,7 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f) fs_ecm_notify_desc.bEndpointAddress; status = usb_assign_descriptors(f, ecm_fs_function, ecm_hs_function, - ecm_ss_function, NULL); + ecm_ss_function, ecm_ss_function); if (status) goto fail; diff --git a/drivers/usb/gadget/function/f_eem.c b/drivers/usb/gadget/function/f_eem.c index c13befa311107d9b4447a63f761e9bffb420534a..3f56e1ce4e0e255c3938e5c148db480393f24a73 100644 --- a/drivers/usb/gadget/function/f_eem.c +++ b/drivers/usb/gadget/function/f_eem.c @@ -305,7 +305,7 @@ static int eem_bind(struct usb_configuration *c, struct usb_function *f) eem_ss_out_desc.bEndpointAddress = eem_fs_out_desc.bEndpointAddress; status = usb_assign_descriptors(f, eem_fs_function, eem_hs_function, - eem_ss_function, NULL); + eem_ss_function, eem_ss_function); if (status) goto fail; diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 3ada83d81bda8d2810ab04d154b65658f57e0ba2..4ec3f27a0aeacb1c30809c6c2d98f6f86855261c 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -215,7 +215,6 @@ struct ffs_io_data { struct mm_struct *mm; struct work_struct work; - struct work_struct cancellation_work; struct usb_ep *ep; struct usb_request *req; @@ -757,6 +756,7 @@ static void ffs_user_copy_worker(struct work_struct *work) int ret = io_data->req->status ? io_data->req->status : io_data->req->actual; bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD; + unsigned long flags; if (io_data->read && ret > 0) { mm_segment_t oldfs = get_fs(); @@ -773,7 +773,10 @@ static void ffs_user_copy_worker(struct work_struct *work) if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd) eventfd_signal(io_data->ffs->ffs_eventfd, 1); + spin_lock_irqsave(&io_data->ffs->eps_lock, flags); usb_ep_free_request(io_data->ep, io_data->req); + io_data->req = NULL; + spin_unlock_irqrestore(&io_data->ffs->eps_lock, flags); if (io_data->read) kfree(io_data->to_free); @@ -1009,6 +1012,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) * condition with req->complete callback. */ usb_ep_dequeue(ep->ep, req); + wait_for_completion(&done); interrupted = ep->status < 0; } @@ -1073,31 +1077,22 @@ ffs_epfile_open(struct inode *inode, struct file *file) return 0; } -static void ffs_aio_cancel_worker(struct work_struct *work) -{ - struct ffs_io_data *io_data = container_of(work, struct ffs_io_data, - cancellation_work); - - ENTER(); - - usb_ep_dequeue(io_data->ep, io_data->req); -} - static int ffs_aio_cancel(struct kiocb *kiocb) { struct ffs_io_data *io_data = kiocb->private; - struct ffs_data *ffs = io_data->ffs; + struct ffs_epfile *epfile = kiocb->ki_filp->private_data; int value; ENTER(); - if (likely(io_data && io_data->ep && io_data->req)) { - INIT_WORK(&io_data->cancellation_work, ffs_aio_cancel_worker); - queue_work(ffs->io_completion_wq, &io_data->cancellation_work); - value = -EINPROGRESS; - } else { + spin_lock_irq(&epfile->ffs->eps_lock); + + if (likely(io_data && io_data->ep && io_data->req)) + value = usb_ep_dequeue(io_data->ep, io_data->req); + else value = -EINVAL; - } + + spin_unlock_irq(&epfile->ffs->eps_lock); return value; } @@ -1110,11 +1105,12 @@ static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from) ENTER(); if (!is_sync_kiocb(kiocb)) { - p = kmalloc(sizeof(io_data), GFP_KERNEL); + p = kzalloc(sizeof(io_data), GFP_KERNEL); if (unlikely(!p)) return -ENOMEM; p->aio = true; } else { + memset(p, 0, sizeof(*p)); p->aio = false; } @@ -1146,11 +1142,12 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to) ENTER(); if (!is_sync_kiocb(kiocb)) { - p = kmalloc(sizeof(io_data), GFP_KERNEL); + p = kzalloc(sizeof(io_data), GFP_KERNEL); if (unlikely(!p)) return -ENOMEM; p->aio = true; } else { + memset(p, 0, sizeof(*p)); p->aio = false; } @@ -1643,16 +1640,24 @@ static void ffs_data_put(struct ffs_data *ffs) static void ffs_data_closed(struct ffs_data *ffs) { + struct ffs_epfile *epfiles; + unsigned long flags; + ENTER(); if (atomic_dec_and_test(&ffs->opened)) { if (ffs->no_disconnect) { ffs->state = FFS_DEACTIVATED; - if (ffs->epfiles) { - ffs_epfiles_destroy(ffs->epfiles, - ffs->eps_count); - ffs->epfiles = NULL; - } + spin_lock_irqsave(&ffs->eps_lock, flags); + epfiles = ffs->epfiles; + ffs->epfiles = NULL; + spin_unlock_irqrestore(&ffs->eps_lock, + flags); + + if (epfiles) + ffs_epfiles_destroy(epfiles, + ffs->eps_count); + if (ffs->setup_state == FFS_SETUP_PENDING) __ffs_ep0_stall(ffs); } else { @@ -1699,17 +1704,34 @@ static struct ffs_data *ffs_data_new(const char *dev_name) static void ffs_data_clear(struct ffs_data *ffs) { + struct ffs_epfile *epfiles; + unsigned long flags; + ENTER(); ffs_closed(ffs); BUG_ON(ffs->gadget); - if (ffs->epfiles) - ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count); + spin_lock_irqsave(&ffs->eps_lock, flags); + epfiles = ffs->epfiles; + ffs->epfiles = NULL; + spin_unlock_irqrestore(&ffs->eps_lock, flags); - if (ffs->ffs_eventfd) + /* + * potential race possible between ffs_func_eps_disable + * & ffs_epfile_release therefore maintaining a local + * copy of epfile will save us from use-after-free. + */ + if (epfiles) { + ffs_epfiles_destroy(epfiles, ffs->eps_count); + ffs->epfiles = NULL; + } + + if (ffs->ffs_eventfd) { eventfd_ctx_put(ffs->ffs_eventfd); + ffs->ffs_eventfd = NULL; + } kfree(ffs->raw_descs_data); kfree(ffs->raw_strings); @@ -1722,7 +1744,6 @@ static void ffs_data_reset(struct ffs_data *ffs) ffs_data_clear(ffs); - ffs->epfiles = NULL; ffs->raw_descs_data = NULL; ffs->raw_descs = NULL; ffs->raw_strings = NULL; @@ -1847,12 +1868,15 @@ static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count) static void ffs_func_eps_disable(struct ffs_function *func) { - struct ffs_ep *ep = func->eps; - struct ffs_epfile *epfile = func->ffs->epfiles; - unsigned count = func->ffs->eps_count; + struct ffs_ep *ep; + struct ffs_epfile *epfile; + unsigned short count; unsigned long flags; spin_lock_irqsave(&func->ffs->eps_lock, flags); + count = func->ffs->eps_count; + epfile = func->ffs->epfiles; + ep = func->eps; while (count--) { /* pending requests get nuked */ if (likely(ep->ep)) @@ -1870,14 +1894,18 @@ static void ffs_func_eps_disable(struct ffs_function *func) static int ffs_func_eps_enable(struct ffs_function *func) { - struct ffs_data *ffs = func->ffs; - struct ffs_ep *ep = func->eps; - struct ffs_epfile *epfile = ffs->epfiles; - unsigned count = ffs->eps_count; + struct ffs_data *ffs; + struct ffs_ep *ep; + struct ffs_epfile *epfile; + unsigned short count; unsigned long flags; int ret = 0; spin_lock_irqsave(&func->ffs->eps_lock, flags); + ffs = func->ffs; + ep = func->eps; + epfile = ffs->epfiles; + count = ffs->eps_count; while(count--) { ep->ep->driver_data = ep; diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c index 54e859dcb25c3c136795ce605ee50dac47c54d0a..0586229ac83deba3688b6d0004f2ac3fe673dfab 100644 --- a/drivers/usb/gadget/function/f_hid.c +++ b/drivers/usb/gadget/function/f_hid.c @@ -252,7 +252,7 @@ static ssize_t f_hidg_read(struct file *file, char __user *buffer, if (!count) return 0; - if (!access_ok(VERIFY_WRITE, buffer, count)) + if (!access_ok(buffer, count)) return -EFAULT; spin_lock_irqsave(&hidg->read_spinlock, flags); @@ -339,7 +339,7 @@ static ssize_t f_hidg_write(struct file *file, const char __user *buffer, unsigned long flags; ssize_t status = -ENOMEM; - if (!access_ok(VERIFY_READ, buffer, count)) + if (!access_ok(buffer, count)) return -EFAULT; spin_lock_irqsave(&hidg->write_spinlock, flags); @@ -391,20 +391,20 @@ static ssize_t f_hidg_write(struct file *file, const char __user *buffer, req->complete = f_hidg_req_complete; req->context = hidg; + spin_unlock_irqrestore(&hidg->write_spinlock, flags); + status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC); if (status < 0) { ERROR(hidg->func.config->cdev, "usb_ep_queue error on int endpoint %zd\n", status); - goto release_write_pending_unlocked; + goto release_write_pending; } else { status = count; } - spin_unlock_irqrestore(&hidg->write_spinlock, flags); return status; release_write_pending: spin_lock_irqsave(&hidg->write_spinlock, flags); -release_write_pending_unlocked: hidg->write_pending = 0; spin_unlock_irqrestore(&hidg->write_spinlock, flags); @@ -808,7 +808,8 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f) hidg_fs_out_ep_desc.bEndpointAddress; status = usb_assign_descriptors(f, hidg_fs_descriptors, - hidg_hs_descriptors, hidg_ss_descriptors, NULL); + hidg_hs_descriptors, hidg_ss_descriptors, + hidg_ss_descriptors); if (status) goto fail; diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c index 1803646b367805facd3b1e0f37a870cb8603ff61..90215a81c178bbae9d9c37b1b5460cbea37be712 100644 --- a/drivers/usb/gadget/function/f_loopback.c +++ b/drivers/usb/gadget/function/f_loopback.c @@ -207,7 +207,7 @@ static int loopback_bind(struct usb_configuration *c, struct usb_function *f) ss_loop_sink_desc.bEndpointAddress = fs_loop_sink_desc.bEndpointAddress; ret = usb_assign_descriptors(f, fs_loopback_descs, hs_loopback_descs, - ss_loopback_descs, NULL); + ss_loopback_descs, ss_loopback_descs); if (ret) return ret; diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index 1074cb82ec172d2ac464d72d9e52c9461715c868..0b7b4d09785b6e22be0d3a22d111bf9402e003ff 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c @@ -261,7 +261,7 @@ struct fsg_common; struct fsg_common { struct usb_gadget *gadget; struct usb_composite_dev *cdev; - struct fsg_dev *fsg, *new_fsg; + struct fsg_dev *fsg; wait_queue_head_t io_wait; wait_queue_head_t fsg_wait; @@ -290,6 +290,7 @@ struct fsg_common { unsigned int bulk_out_maxpacket; enum fsg_state state; /* For exception handling */ unsigned int exception_req_tag; + void *exception_arg; enum data_direction data_dir; u32 data_size; @@ -391,7 +392,8 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep) /* These routines may be called in process context or in_irq */ -static void raise_exception(struct fsg_common *common, enum fsg_state new_state) +static void __raise_exception(struct fsg_common *common, enum fsg_state new_state, + void *arg) { unsigned long flags; @@ -404,6 +406,7 @@ static void raise_exception(struct fsg_common *common, enum fsg_state new_state) if (common->state <= new_state) { common->exception_req_tag = common->ep0_req_tag; common->state = new_state; + common->exception_arg = arg; if (common->thread_task) send_sig_info(SIGUSR1, SEND_SIG_FORCED, common->thread_task); @@ -411,6 +414,10 @@ static void raise_exception(struct fsg_common *common, enum fsg_state new_state) spin_unlock_irqrestore(&common->lock, flags); } +static void raise_exception(struct fsg_common *common, enum fsg_state new_state) +{ + __raise_exception(common, new_state, NULL); +} /*-------------------------------------------------------------------------*/ @@ -2285,16 +2292,16 @@ static int do_set_interface(struct fsg_common *common, struct fsg_dev *new_fsg) static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt) { struct fsg_dev *fsg = fsg_from_func(f); - fsg->common->new_fsg = fsg; - raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE); + + __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, fsg); return USB_GADGET_DELAYED_STATUS; } static void fsg_disable(struct usb_function *f) { struct fsg_dev *fsg = fsg_from_func(f); - fsg->common->new_fsg = NULL; - raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE); + + __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, NULL); } @@ -2307,6 +2314,7 @@ static void handle_exception(struct fsg_common *common) enum fsg_state old_state; struct fsg_lun *curlun; unsigned int exception_req_tag; + struct fsg_dev *new_fsg; /* * Clear the existing signals. Anything but SIGUSR1 is converted @@ -2360,6 +2368,7 @@ static void handle_exception(struct fsg_common *common) common->next_buffhd_to_fill = &common->buffhds[0]; common->next_buffhd_to_drain = &common->buffhds[0]; exception_req_tag = common->exception_req_tag; + new_fsg = common->exception_arg; old_state = common->state; common->state = FSG_STATE_NORMAL; @@ -2413,8 +2422,8 @@ static void handle_exception(struct fsg_common *common) break; case FSG_STATE_CONFIG_CHANGE: - do_set_interface(common, common->new_fsg); - if (common->new_fsg) + do_set_interface(common, new_fsg); + if (new_fsg) usb_composite_setup_continue(common->cdev); break; @@ -2989,8 +2998,7 @@ static void fsg_unbind(struct usb_configuration *c, struct usb_function *f) DBG(fsg, "unbind\n"); if (fsg->common->fsg == fsg) { - fsg->common->new_fsg = NULL; - raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE); + __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, NULL); /* FIXME: make interruptible or killable somehow? */ wait_event(common->fsg_wait, common->fsg != fsg); } diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c index 5780fba620ab13014369ec0304910d9ee317e1bf..6031e96cf3510d95a9dd3d95f723ad5dda9d85ed 100644 --- a/drivers/usb/gadget/function/f_ncm.c +++ b/drivers/usb/gadget/function/f_ncm.c @@ -85,7 +85,9 @@ static inline struct f_ncm *func_to_ncm(struct usb_function *f) /* peak (theoretical) bulk transfer rate in bits-per-second */ static inline unsigned ncm_bitrate(struct usb_gadget *g) { - if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER) + if (!g) + return 0; + else if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER) return 13 * 1024 * 8 * 1000 * 8; else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH) return 13 * 512 * 8 * 1000 * 8; @@ -885,7 +887,7 @@ static int ncm_set_alt(struct usb_function *f, unsigned intf, unsigned alt) if (alt > 1) goto fail; - if (ncm->port.in_ep->enabled) { + if (ncm->netdev) { DBG(cdev, "reset ncm\n"); ncm->timer_stopping = true; ncm->netdev = NULL; @@ -1322,7 +1324,7 @@ static void ncm_disable(struct usb_function *f) DBG(cdev, "ncm deactivated\n"); - if (ncm->port.in_ep->enabled) { + if (ncm->netdev) { ncm->timer_stopping = true; ncm->netdev = NULL; gether_disconnect(&ncm->port); diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c index 9c7ed2539ff7727c64427d76a681a063c82937b7..32de1f5343d0c8816ba7c981f4c79495e80e66ca 100644 --- a/drivers/usb/gadget/function/f_printer.c +++ b/drivers/usb/gadget/function/f_printer.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -64,7 +65,7 @@ struct printer_dev { struct usb_gadget *gadget; s8 interface; struct usb_ep *in_ep, *out_ep; - + struct kref kref; struct list_head rx_reqs; /* List of free RX structs */ struct list_head rx_reqs_active; /* List of Active RX xfers */ struct list_head rx_buffers; /* List of completed xfers */ @@ -218,6 +219,13 @@ static inline struct usb_endpoint_descriptor *ep_desc(struct usb_gadget *gadget, /*-------------------------------------------------------------------------*/ +static void printer_dev_free(struct kref *kref) +{ + struct printer_dev *dev = container_of(kref, struct printer_dev, kref); + + kfree(dev); +} + static struct usb_request * printer_req_alloc(struct usb_ep *ep, unsigned len, gfp_t gfp_flags) { @@ -348,6 +356,7 @@ printer_open(struct inode *inode, struct file *fd) spin_unlock_irqrestore(&dev->lock, flags); + kref_get(&dev->kref); DBG(dev, "printer_open returned %x\n", ret); return ret; } @@ -365,6 +374,7 @@ printer_close(struct inode *inode, struct file *fd) dev->printer_status &= ~PRINTER_SELECTED; spin_unlock_irqrestore(&dev->lock, flags); + kref_put(&dev->kref, printer_dev_free); DBG(dev, "printer_close\n"); return 0; @@ -1053,7 +1063,8 @@ static int printer_func_bind(struct usb_configuration *c, ss_ep_out_desc.bEndpointAddress = fs_ep_out_desc.bEndpointAddress; ret = usb_assign_descriptors(f, fs_printer_function, - hs_printer_function, ss_printer_function, NULL); + hs_printer_function, ss_printer_function, + ss_printer_function); if (ret) return ret; @@ -1350,7 +1361,8 @@ static void gprinter_free(struct usb_function *f) struct f_printer_opts *opts; opts = container_of(f->fi, struct f_printer_opts, func_inst); - kfree(dev); + + kref_put(&dev->kref, printer_dev_free); mutex_lock(&opts->lock); --opts->refcnt; mutex_unlock(&opts->lock); @@ -1419,6 +1431,7 @@ static struct usb_function *gprinter_alloc(struct usb_function_instance *fi) return ERR_PTR(-ENOMEM); } + kref_init(&dev->kref); ++opts->refcnt; dev->minor = opts->minor; dev->pnp_string = opts->pnp_string; diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c index d48df36622b74b565b037d5ce56d95dd1b150162..8b09dfcadf89cc479e4511d33ab53766ee3a29fc 100644 --- a/drivers/usb/gadget/function/f_rndis.c +++ b/drivers/usb/gadget/function/f_rndis.c @@ -618,6 +618,7 @@ static void rndis_disable(struct usb_function *f) gether_disconnect(&rndis->port); usb_ep_disable(rndis->notify); + rndis->notify->desc = NULL; } /*-------------------------------------------------------------------------*/ @@ -786,7 +787,7 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f) ss_notify_desc.bEndpointAddress = fs_notify_desc.bEndpointAddress; status = usb_assign_descriptors(f, eth_fs_function, eth_hs_function, - eth_ss_function, NULL); + eth_ss_function, eth_ss_function); if (status) goto fail; diff --git a/drivers/usb/gadget/function/f_serial.c b/drivers/usb/gadget/function/f_serial.c index c860f30a0ea2bb9d0266d4134b39c890c3bb135d..6db973d0c252ee40f2efa481073c756346e02bd9 100644 --- a/drivers/usb/gadget/function/f_serial.c +++ b/drivers/usb/gadget/function/f_serial.c @@ -233,7 +233,7 @@ static int gser_bind(struct usb_configuration *c, struct usb_function *f) gser_ss_out_desc.bEndpointAddress = gser_fs_out_desc.bEndpointAddress; status = usb_assign_descriptors(f, gser_fs_function, gser_hs_function, - gser_ss_function, NULL); + gser_ss_function, gser_ss_function); if (status) goto fail; dev_dbg(&cdev->gadget->dev, "generic ttyGS%d: %s speed IN/%s OUT/%s\n", diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c index 9cdef108fb1b3da5581c99c8251903702b04c6a8..2c65a9bb3c81bc52a5cd073b758b90deba5c714f 100644 --- a/drivers/usb/gadget/function/f_sourcesink.c +++ b/drivers/usb/gadget/function/f_sourcesink.c @@ -431,7 +431,8 @@ sourcesink_bind(struct usb_configuration *c, struct usb_function *f) ss_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress; ret = usb_assign_descriptors(f, fs_source_sink_descs, - hs_source_sink_descs, ss_source_sink_descs, NULL); + hs_source_sink_descs, ss_source_sink_descs, + ss_source_sink_descs); if (ret) return ret; @@ -582,6 +583,7 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in, if (is_iso) { switch (speed) { + case USB_SPEED_SUPER_PLUS: case USB_SPEED_SUPER: size = ss->isoc_maxpacket * (ss->isoc_mult + 1) * @@ -838,7 +840,7 @@ static struct usb_function *source_sink_alloc_func( ss = kzalloc(sizeof(*ss), GFP_KERNEL); if (!ss) - return NULL; + return ERR_PTR(-ENOMEM); ss_opts = container_of(fi, struct f_ss_opts, func_inst); diff --git a/drivers/usb/gadget/function/f_subset.c b/drivers/usb/gadget/function/f_subset.c index 4d945254905d91957da12c01366d60883919699d..51c1cae162d9b0a4dc2e80c80a36db6dcc500e69 100644 --- a/drivers/usb/gadget/function/f_subset.c +++ b/drivers/usb/gadget/function/f_subset.c @@ -358,7 +358,7 @@ geth_bind(struct usb_configuration *c, struct usb_function *f) fs_subset_out_desc.bEndpointAddress; status = usb_assign_descriptors(f, fs_eth_function, hs_eth_function, - ss_eth_function, NULL); + ss_eth_function, ss_eth_function); if (status) goto fail; diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c index 106988a6661ab404743b121e3cdf2fa42c49eed3..bc714dc48152cd4bd6165b2021782b2f08e73962 100644 --- a/drivers/usb/gadget/function/f_tcm.c +++ b/drivers/usb/gadget/function/f_tcm.c @@ -2070,7 +2070,8 @@ static int tcm_bind(struct usb_configuration *c, struct usb_function *f) uasp_fs_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress; ret = usb_assign_descriptors(f, uasp_fs_function_desc, - uasp_hs_function_desc, uasp_ss_function_desc, NULL); + uasp_hs_function_desc, uasp_ss_function_desc, + uasp_ss_function_desc); if (ret) goto ep_fail; diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c index d8ce7868fe225a5a72902d50bb16c9fed65af559..847434e5d7ae645caa24bf3d8909be22c6afe679 100644 --- a/drivers/usb/gadget/function/f_uvc.c +++ b/drivers/usb/gadget/function/f_uvc.c @@ -219,8 +219,9 @@ uvc_function_ep0_complete(struct usb_ep *ep, struct usb_request *req) memset(&v4l2_event, 0, sizeof(v4l2_event)); v4l2_event.type = UVC_EVENT_DATA; - uvc_event->data.length = req->actual; - memcpy(&uvc_event->data.data, req->buf, req->actual); + uvc_event->data.length = min_t(unsigned int, req->actual, + sizeof(uvc_event->data.data)); + memcpy(&uvc_event->data.data, req->buf, uvc_event->data.length); v4l2_event_queue(&uvc->vdev, &v4l2_event); } } diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c index 04c142c1307592c3826545f299325f2c393f5a7b..eaa4cf163eaf106ec3e319d80cb70b4a73893d87 100644 --- a/drivers/usb/gadget/function/rndis.c +++ b/drivers/usb/gadget/function/rndis.c @@ -506,6 +506,10 @@ static int gen_ndis_set_resp(struct rndis_params *params, u32 OID, switch (OID) { case RNDIS_OID_GEN_CURRENT_PACKET_FILTER: + if (buf_len < 2) { + pr_err("%s:Not support for buf_len < 2\n", __func__); + break; + } /* these NDIS_PACKET_TYPE_* bitflags are shared with * cdc_filter; it's not RNDIS-specific @@ -592,6 +596,7 @@ static int rndis_query_response(struct rndis_params *params, rndis_query_msg_type *buf) { rndis_query_cmplt_type *resp; + u32 BufOffset, BufLength; rndis_resp_t *r; /* pr_debug("%s: OID = %08X\n", __func__, cpu_to_le32(buf->OID)); */ @@ -612,12 +617,25 @@ static int rndis_query_response(struct rndis_params *params, resp->MessageType = cpu_to_le32(RNDIS_MSG_QUERY_C); resp->RequestID = buf->RequestID; /* Still LE in msg buffer */ + BufOffset = le32_to_cpu(buf->InformationBufferOffset); + BufLength = le32_to_cpu(buf->InformationBufferLength); + + /* + * If the address of the buf to be accessed exceeds the valid + * range of the buf, then return RNDIS_STATUS_NOT_SUPPORTED. + */ + if (8 + BufOffset + BufLength >= USB_COMP_EP0_BUFSIZ) { + resp->Status = cpu_to_le32(RNDIS_STATUS_NOT_SUPPORTED); + resp->MessageLength = cpu_to_le32(sizeof(*resp)); + resp->InformationBufferLength = cpu_to_le32(0); + resp->InformationBufferOffset = cpu_to_le32(0); + params->resp_avail(params->v); + return 0; + } if (gen_ndis_query_resp(params, le32_to_cpu(buf->OID), - le32_to_cpu(buf->InformationBufferOffset) - + 8 + (u8 *)buf, - le32_to_cpu(buf->InformationBufferLength), - r)) { + BufOffset + 8 + (u8 *)buf, BufLength, + r)) { /* OID not supported */ resp->Status = cpu_to_le32(RNDIS_STATUS_NOT_SUPPORTED); resp->MessageLength = cpu_to_le32(sizeof *resp); @@ -637,14 +655,18 @@ static int rndis_set_response(struct rndis_params *params, rndis_set_cmplt_type *resp; rndis_resp_t *r; + BufLength = le32_to_cpu(buf->InformationBufferLength); + BufOffset = le32_to_cpu(buf->InformationBufferOffset); + if ((BufLength > RNDIS_MAX_TOTAL_SIZE) || + (BufOffset > RNDIS_MAX_TOTAL_SIZE) || + (BufOffset + 8 >= RNDIS_MAX_TOTAL_SIZE)) + return -EINVAL; + r = rndis_add_response(params, sizeof(rndis_set_cmplt_type)); if (!r) return -ENOMEM; resp = (rndis_set_cmplt_type *)r->buf; - BufLength = le32_to_cpu(buf->InformationBufferLength); - BufOffset = le32_to_cpu(buf->InformationBufferOffset); - #ifdef VERBOSE_DEBUG pr_debug("%s: Length: %d\n", __func__, BufLength); pr_debug("%s: Offset: %d\n", __func__, BufOffset); @@ -660,6 +682,17 @@ static int rndis_set_response(struct rndis_params *params, resp->MessageType = cpu_to_le32(RNDIS_MSG_SET_C); resp->MessageLength = cpu_to_le32(16); resp->RequestID = buf->RequestID; /* Still LE in msg buffer */ + + /* + * If the address of the buf to be accessed exceeds the valid + * range of the buf, then return RNDIS_STATUS_NOT_SUPPORTED. + */ + if (8 + BufOffset + BufLength >= USB_COMP_EP0_BUFSIZ) { + resp->Status = cpu_to_le32(RNDIS_STATUS_NOT_SUPPORTED); + params->resp_avail(params->v); + return 0; + } + if (gen_ndis_set_resp(params, le32_to_cpu(buf->OID), ((u8 *)buf) + 8 + BufOffset, BufLength, r)) resp->Status = cpu_to_le32(RNDIS_STATUS_NOT_SUPPORTED); @@ -919,6 +952,7 @@ struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v) params->resp_avail = resp_avail; params->v = v; INIT_LIST_HEAD(¶ms->resp_queue); + spin_lock_init(¶ms->resp_lock); pr_debug("%s: configNr = %d\n", __func__, i); return params; @@ -1012,12 +1046,14 @@ void rndis_free_response(struct rndis_params *params, u8 *buf) { rndis_resp_t *r, *n; + spin_lock(¶ms->resp_lock); list_for_each_entry_safe(r, n, ¶ms->resp_queue, list) { if (r->buf == buf) { list_del(&r->list); kfree(r); } } + spin_unlock(¶ms->resp_lock); } EXPORT_SYMBOL_GPL(rndis_free_response); @@ -1027,14 +1063,17 @@ u8 *rndis_get_next_response(struct rndis_params *params, u32 *length) if (!length) return NULL; + spin_lock(¶ms->resp_lock); list_for_each_entry_safe(r, n, ¶ms->resp_queue, list) { if (!r->send) { r->send = 1; *length = r->length; + spin_unlock(¶ms->resp_lock); return r->buf; } } + spin_unlock(¶ms->resp_lock); return NULL; } EXPORT_SYMBOL_GPL(rndis_get_next_response); @@ -1051,7 +1090,9 @@ static rndis_resp_t *rndis_add_response(struct rndis_params *params, u32 length) r->length = length; r->send = 0; + spin_lock(¶ms->resp_lock); list_add_tail(&r->list, ¶ms->resp_queue); + spin_unlock(¶ms->resp_lock); return r; } diff --git a/drivers/usb/gadget/function/rndis.h b/drivers/usb/gadget/function/rndis.h index c7e3a70ce6c1fa73e48b52eb80d782a6ca1ff6bf..c996ba28bcb7718a0022aa0d3e1215b6ac2dd16e 100644 --- a/drivers/usb/gadget/function/rndis.h +++ b/drivers/usb/gadget/function/rndis.h @@ -174,6 +174,7 @@ typedef struct rndis_params { void (*resp_avail)(void *v); void *v; struct list_head resp_queue; + spinlock_t resp_lock; } rndis_params; /* RNDIS Message parser and other useless functions */ diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c index 1000d864929c3569162e49d83058f29eb3310706..0ef00315ec7376e33a8223159cd01a24f1ac404b 100644 --- a/drivers/usb/gadget/function/u_ether.c +++ b/drivers/usb/gadget/function/u_ether.c @@ -186,11 +186,12 @@ rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags) out = dev->port_usb->out_ep; else out = NULL; - spin_unlock_irqrestore(&dev->lock, flags); if (!out) + { + spin_unlock_irqrestore(&dev->lock, flags); return -ENOTCONN; - + } /* Padding up to RX_EXTRA handles minor disagreements with host. * Normally we use the USB "terminate on short read" convention; @@ -214,6 +215,7 @@ rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags) if (dev->port_usb->is_fixed) size = max_t(size_t, size, dev->port_usb->fixed_out_len); + spin_unlock_irqrestore(&dev->lock, flags); skb = __netdev_alloc_skb(dev->net, size + NET_IP_ALIGN, gfp_flags); if (skb == NULL) { @@ -401,12 +403,12 @@ static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n) static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags) { struct usb_request *req; - struct usb_request *tmp; unsigned long flags; /* fill unused rxq slots with some skb */ spin_lock_irqsave(&dev->req_lock, flags); - list_for_each_entry_safe(req, tmp, &dev->rx_reqs, list) { + while (!list_empty(&dev->rx_reqs)) { + req = list_first_entry(&dev->rx_reqs, struct usb_request, list); list_del_init(&req->list); spin_unlock_irqrestore(&dev->req_lock, flags); @@ -1125,7 +1127,6 @@ void gether_disconnect(struct gether *link) { struct eth_dev *dev = link->ioport; struct usb_request *req; - struct usb_request *tmp; WARN_ON(!dev); if (!dev) @@ -1142,7 +1143,8 @@ void gether_disconnect(struct gether *link) */ usb_ep_disable(link->in_ep); spin_lock(&dev->req_lock); - list_for_each_entry_safe(req, tmp, &dev->tx_reqs, list) { + while (!list_empty(&dev->tx_reqs)) { + req = list_first_entry(&dev->tx_reqs, struct usb_request, list); list_del(&req->list); spin_unlock(&dev->req_lock); @@ -1154,7 +1156,8 @@ void gether_disconnect(struct gether *link) usb_ep_disable(link->out_ep); spin_lock(&dev->req_lock); - list_for_each_entry_safe(req, tmp, &dev->rx_reqs, list) { + while (!list_empty(&dev->rx_reqs)) { + req = list_first_entry(&dev->rx_reqs, struct usb_request, list); list_del(&req->list); spin_unlock(&dev->req_lock); diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c index 29436f75bbe06b5e94df5a063a1a5222c3b6509d..d4d317db89df5ded4af8ee8979a1214bb68eb2ab 100644 --- a/drivers/usb/gadget/function/u_serial.c +++ b/drivers/usb/gadget/function/u_serial.c @@ -1246,8 +1246,10 @@ int gserial_alloc_line(unsigned char *line_num) __func__, port_num, PTR_ERR(tty_dev)); ret = PTR_ERR(tty_dev); + mutex_lock(&ports[port_num].lock); port = ports[port_num].port; ports[port_num].port = NULL; + mutex_unlock(&ports[port_num].lock); gserial_free_port(port); goto err; } diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c index b51f0d27882697504707c3d00c9f72fb0b6e87b6..2e4c0391b583652aed360dc43384d829b8ee4c57 100644 --- a/drivers/usb/gadget/function/uvc_configfs.c +++ b/drivers/usb/gadget/function/uvc_configfs.c @@ -9,6 +9,9 @@ * * Author: Andrzej Pietrasiewicz */ + +#include + #include "u_uvc.h" #include "uvc_configfs.h" @@ -31,6 +34,14 @@ static struct configfs_attribute prefix##attr_##cname = { \ .show = prefix##cname##_show, \ } +static int uvcg_config_compare_u32(const void *l, const void *r) +{ + u32 li = *(const u32 *)l; + u32 ri = *(const u32 *)r; + + return li < ri ? -1 : li == ri ? 0 : 1; +} + static inline struct f_uvc_opts *to_f_uvc_opts(struct config_item *item) { return container_of(to_config_group(item), struct f_uvc_opts, @@ -544,6 +555,7 @@ static int uvcg_control_class_allow_link(struct config_item *src, unlock: mutex_unlock(&opts->lock); out: + config_item_put(header); mutex_unlock(su_mutex); return ret; } @@ -579,6 +591,7 @@ static void uvcg_control_class_drop_link(struct config_item *src, unlock: mutex_unlock(&opts->lock); out: + config_item_put(header); mutex_unlock(su_mutex); } @@ -764,6 +777,7 @@ static int uvcg_streaming_header_allow_link(struct config_item *src, format_ptr->fmt = target_fmt; list_add_tail(&format_ptr->entry, &src_hdr->formats); ++src_hdr->num_fmt; + ++target_fmt->linked; out: mutex_unlock(&opts->lock); @@ -801,6 +815,8 @@ static void uvcg_streaming_header_drop_link(struct config_item *src, break; } + --target_fmt->linked; + out: mutex_unlock(&opts->lock); mutex_unlock(su_mutex); @@ -1129,6 +1145,8 @@ static ssize_t uvcg_frame_dw_frame_interval_store(struct config_item *item, kfree(ch->dw_frame_interval); ch->dw_frame_interval = frm_intrv; ch->frame.b_frame_interval_type = n; + sort(ch->dw_frame_interval, n, sizeof(*ch->dw_frame_interval), + uvcg_config_compare_u32, NULL); ret = len; end: @@ -2038,6 +2056,7 @@ static int uvcg_streaming_class_allow_link(struct config_item *src, unlock: mutex_unlock(&opts->lock); out: + config_item_put(header); mutex_unlock(su_mutex); return ret; } @@ -2078,6 +2097,7 @@ static void uvcg_streaming_class_drop_link(struct config_item *src, unlock: mutex_unlock(&opts->lock); out: + config_item_put(header); mutex_unlock(su_mutex); } diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c index d3567b90343a4ab57cd185360b30761307304e3a..2c9821ec836e771e3c22b0899da60974ff2417fb 100644 --- a/drivers/usb/gadget/function/uvc_video.c +++ b/drivers/usb/gadget/function/uvc_video.c @@ -125,6 +125,21 @@ uvc_video_encode_isoc(struct usb_request *req, struct uvc_video *video, * Request handling */ +static int uvcg_video_ep_queue(struct uvc_video *video, struct usb_request *req) +{ + int ret; + + ret = usb_ep_queue(video->ep, req, GFP_ATOMIC); + if (ret < 0) { + printk(KERN_INFO "Failed to queue request (%d).\n", ret); + /* Isochronous endpoints can't be halted. */ + if (usb_endpoint_xfer_bulk(video->ep->desc)) + usb_ep_set_halt(video->ep); + } + + return ret; +} + /* * I somehow feel that synchronisation won't be easy to achieve here. We have * three events that control USB requests submission: @@ -189,14 +204,13 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req) video->encode(req, video, buf); - if ((ret = usb_ep_queue(ep, req, GFP_ATOMIC)) < 0) { - printk(KERN_INFO "Failed to queue request (%d).\n", ret); - usb_ep_set_halt(ep); - spin_unlock_irqrestore(&video->queue.irqlock, flags); + ret = uvcg_video_ep_queue(video, req); + spin_unlock_irqrestore(&video->queue.irqlock, flags); + + if (ret < 0) { uvcg_queue_cancel(queue, 0); goto requeue; } - spin_unlock_irqrestore(&video->queue.irqlock, flags); return; @@ -316,15 +330,13 @@ int uvcg_video_pump(struct uvc_video *video) video->encode(req, video, buf); /* Queue the USB request */ - ret = usb_ep_queue(video->ep, req, GFP_ATOMIC); + ret = uvcg_video_ep_queue(video, req); + spin_unlock_irqrestore(&queue->irqlock, flags); + if (ret < 0) { - printk(KERN_INFO "Failed to queue request (%d)\n", ret); - usb_ep_set_halt(video->ep); - spin_unlock_irqrestore(&queue->irqlock, flags); uvcg_queue_cancel(queue, 0); break; } - spin_unlock_irqrestore(&queue->irqlock, flags); } spin_lock_irqsave(&video->req_lock, flags); diff --git a/drivers/usb/gadget/legacy/dbgp.c b/drivers/usb/gadget/legacy/dbgp.c index e1d566c9918ae576d55fc322f317d2992193b7f1..ffe58d44b2cb1a1d624fa27a57a6b171afa13337 100644 --- a/drivers/usb/gadget/legacy/dbgp.c +++ b/drivers/usb/gadget/legacy/dbgp.c @@ -345,6 +345,19 @@ static int dbgp_setup(struct usb_gadget *gadget, void *data = NULL; u16 len = 0; + if (length > DBGP_REQ_LEN) { + if (ctrl->bRequestType & USB_DIR_IN) { + /* Cast away the const, we are going to overwrite on purpose. */ + __le16 *temp = (__le16 *)&ctrl->wLength; + + *temp = cpu_to_le16(DBGP_REQ_LEN); + length = DBGP_REQ_LEN; + } else { + return err; + } + } + + if (request == USB_REQ_GET_DESCRIPTOR) { switch (value>>8) { case USB_DT_DEVICE: diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index 37ca0e669bd85401c2337d75aa888ea5f9772d0d..6528df6f3709aff47eba8855ca1de657db108cc1 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -109,6 +109,8 @@ enum ep0_state { /* enough for the whole queue: most events invalidate others */ #define N_EVENT 5 +#define RBUF_SIZE 256 + struct dev_data { spinlock_t lock; refcount_t count; @@ -143,7 +145,7 @@ struct dev_data { struct dentry *dentry; /* except this scratch i/o buffer for ep0 */ - u8 rbuf [256]; + u8 rbuf[RBUF_SIZE]; }; static inline void get_dev (struct dev_data *data) @@ -1332,6 +1334,18 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) u16 w_value = le16_to_cpu(ctrl->wValue); u16 w_length = le16_to_cpu(ctrl->wLength); + if (w_length > RBUF_SIZE) { + if (ctrl->bRequestType & USB_DIR_IN) { + /* Cast away the const, we are going to overwrite on purpose. */ + __le16 *temp = (__le16 *)&ctrl->wLength; + + *temp = cpu_to_le16(RBUF_SIZE); + w_length = RBUF_SIZE; + } else { + return value; + } + } + spin_lock (&dev->lock); dev->setup_abort = 0; if (dev->state == STATE_DEV_UNCONNECTED) { @@ -1815,8 +1829,9 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) spin_lock_irq (&dev->lock); value = -EINVAL; if (dev->buf) { + spin_unlock_irq(&dev->lock); kfree(kbuf); - goto fail; + return value; } dev->buf = kbuf; @@ -1863,8 +1878,8 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) value = usb_gadget_probe_driver(&gadgetfs_driver); if (value != 0) { - kfree (dev->buf); - dev->buf = NULL; + spin_lock_irq(&dev->lock); + goto fail; } else { /* at this point "good" hardware has for the first time * let the USB the host see us. alternatively, if users @@ -1881,6 +1896,9 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) return value; fail: + dev->config = NULL; + dev->hs_config = NULL; + dev->dev = NULL; spin_unlock_irq (&dev->lock); pr_debug ("%s: %s fail %zd, %p\n", shortname, __func__, value, dev); kfree (dev->buf); diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index 17147b8c771ef04024c2f203ee3636f9620d6f73..fabbc31184e89383a37a15a504ddd1b3c2f8775b 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c @@ -88,7 +88,7 @@ static ssize_t queue_dbg_read(struct file *file, char __user *buf, size_t len, remaining, actual = 0; char tmpbuf[38]; - if (!access_ok(VERIFY_WRITE, buf, nbytes)) + if (!access_ok(buf, nbytes)) return -EFAULT; inode_lock(file_inode(file)); @@ -436,9 +436,11 @@ static void submit_request(struct usba_ep *ep, struct usba_request *req) next_fifo_transaction(ep, req); if (req->last_transaction) { usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY); - usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE); + if (ep_is_control(ep)) + usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE); } else { - usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); + if (ep_is_control(ep)) + usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY); } } @@ -2017,6 +2019,8 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev, udc->errata = match->data; udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9g45-pmc"); + if (IS_ERR(udc->pmc)) + udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9rl-pmc"); if (IS_ERR(udc->pmc)) udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9x5-pmc"); if (udc->errata && IS_ERR(udc->pmc)) diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index af88b48c1cea9f5cbb1c756df6602a8bba25a815..1335a715f5dd3749e66a31e20d381575a1d913af 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -98,6 +98,15 @@ int usb_ep_enable(struct usb_ep *ep) if (ep->enabled) goto out; + /* UDC drivers can't handle endpoints with maxpacket size 0 */ + if (!ep->desc || usb_endpoint_maxp(ep->desc) == 0) { + WARN_ONCE(1, "%s: ep%d (%s) has %s\n", __func__, ep->address, ep->name, + (!ep->desc) ? "NULL descriptor" : "maxpacket 0"); + + ret = -EINVAL; + goto out; + } + ret = ep->ops->enable(ep, ep->desc); if (ret) goto out; @@ -262,7 +271,9 @@ int usb_ep_queue(struct usb_ep *ep, { int ret = 0; - if (WARN_ON_ONCE(!ep->enabled && ep->address)) { + if (!ep->enabled && ep->address) { + pr_debug("USB gadget: queue request to disabled ep 0x%x (%s)\n", + ep->address, ep->name); ret = -ESHUTDOWN; goto out; } @@ -1138,7 +1149,7 @@ static int check_pending_gadget_drivers(struct usb_udc *udc) dev_name(&udc->dev)) == 0) { ret = udc_bind_to_driver(udc, driver); if (ret != -EPROBE_DEFER) - list_del(&driver->pending); + list_del_init(&driver->pending); break; } @@ -1286,7 +1297,6 @@ static void usb_gadget_remove_driver(struct usb_udc *udc) usb_gadget_udc_stop(udc); udc->driver = NULL; - udc->dev.driver = NULL; udc->gadget->dev.driver = NULL; } @@ -1335,7 +1345,6 @@ static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *dri driver->function); udc->driver = driver; - udc->dev.driver = &driver->driver; udc->gadget->dev.driver = &driver->driver; usb_gadget_udc_set_speed(udc, driver->max_speed); @@ -1357,7 +1366,6 @@ static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *dri dev_err(&udc->dev, "failed to start %s: %d\n", udc->driver->function, ret); udc->driver = NULL; - udc->dev.driver = NULL; udc->gadget->dev.driver = NULL; return ret; } diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c index baf72f95f0f1cb38ff443bc802a258b6f04ef899..d0248c58dcb6a3d0af3d058d5aff033777e2fd7e 100644 --- a/drivers/usb/gadget/udc/dummy_hcd.c +++ b/drivers/usb/gadget/udc/dummy_hcd.c @@ -48,6 +48,7 @@ #define DRIVER_VERSION "02 May 2005" #define POWER_BUDGET 500 /* in mA; use 8 for low-power port testing */ +#define POWER_BUDGET_3 900 /* in mA */ static const char driver_name[] = "dummy_hcd"; static const char driver_desc[] = "USB Host+Gadget Emulator"; @@ -979,8 +980,18 @@ static int dummy_udc_start(struct usb_gadget *g, struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g); struct dummy *dum = dum_hcd->dum; - if (driver->max_speed == USB_SPEED_UNKNOWN) + switch (g->speed) { + /* All the speeds we support */ + case USB_SPEED_LOW: + case USB_SPEED_FULL: + case USB_SPEED_HIGH: + case USB_SPEED_SUPER: + break; + default: + dev_err(dummy_dev(dum_hcd), "Unsupported driver max speed %d\n", + driver->max_speed); return -EINVAL; + } /* * SLAVE side init ... the layer above hardware, which @@ -1324,7 +1335,7 @@ static int dummy_perform_transfer(struct urb *urb, struct dummy_request *req, u32 this_sg; bool next_sg; - to_host = usb_pipein(urb->pipe); + to_host = usb_urb_dir_in(urb); rbuf = req->req.buf + req->req.actual; if (!urb->num_sgs) { @@ -1412,7 +1423,7 @@ static int transfer(struct dummy_hcd *dum_hcd, struct urb *urb, /* FIXME update emulated data toggle too */ - to_host = usb_pipein(urb->pipe); + to_host = usb_urb_dir_in(urb); if (unlikely(len == 0)) is_short = 1; else { @@ -1784,9 +1795,10 @@ static void dummy_timer(struct timer_list *t) /* Bus speed is 500000 bytes/ms, so use a little less */ total = 490000; break; - default: + default: /* Can't happen */ dev_err(dummy_dev(dum_hcd), "bogus device speed\n"); - return; + total = 0; + break; } /* FIXME if HZ != 1000 this will probably misbehave ... */ @@ -1828,11 +1840,11 @@ static void dummy_timer(struct timer_list *t) /* Used up this frame's bandwidth? */ if (total <= 0) - break; + continue; /* find the gadget's ep for this request (if configured) */ address = usb_pipeendpoint (urb->pipe); - if (usb_pipein(urb->pipe)) + if (usb_urb_dir_in(urb)) address |= USB_DIR_IN; ep = find_endpoint(dum, address); if (!ep) { @@ -2387,7 +2399,7 @@ static inline ssize_t show_urb(char *buf, size_t size, struct urb *urb) s = "?"; break; } s; }), - ep, ep ? (usb_pipein(urb->pipe) ? "in" : "out") : "", + ep, ep ? (usb_urb_dir_in(urb) ? "in" : "out") : "", ({ char *s; \ switch (usb_pipetype(urb->pipe)) { \ case PIPE_CONTROL: \ @@ -2435,7 +2447,7 @@ static int dummy_start_ss(struct dummy_hcd *dum_hcd) dum_hcd->rh_state = DUMMY_RH_RUNNING; dum_hcd->stream_en_ep = 0; INIT_LIST_HEAD(&dum_hcd->urbp_list); - dummy_hcd_to_hcd(dum_hcd)->power_budget = POWER_BUDGET; + dummy_hcd_to_hcd(dum_hcd)->power_budget = POWER_BUDGET_3; dummy_hcd_to_hcd(dum_hcd)->state = HC_STATE_RUNNING; dummy_hcd_to_hcd(dum_hcd)->uses_new_polling = 1; #ifdef CONFIG_USB_OTG @@ -2727,7 +2739,7 @@ static struct platform_driver dummy_hcd_driver = { }; /*-------------------------------------------------------------------------*/ -#define MAX_NUM_UDC 2 +#define MAX_NUM_UDC 32 static struct platform_device *the_udc_pdev[MAX_NUM_UDC]; static struct platform_device *the_hcd_pdev[MAX_NUM_UDC]; diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c index 587c5037ff079e0d74984703c91cc5dbca8428cd..bc6abaea907d88db2555afdd9549d1486a96498f 100644 --- a/drivers/usb/gadget/udc/fotg210-udc.c +++ b/drivers/usb/gadget/udc/fotg210-udc.c @@ -741,7 +741,7 @@ static void fotg210_get_status(struct fotg210_udc *fotg210, fotg210->ep0_req->length = 2; spin_unlock(&fotg210->lock); - fotg210_ep_queue(fotg210->gadget.ep0, fotg210->ep0_req, GFP_KERNEL); + fotg210_ep_queue(fotg210->gadget.ep0, fotg210->ep0_req, GFP_ATOMIC); spin_lock(&fotg210->lock); } diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c index be59309e848c335fafc8301eaec211f3fec9657e..d44b26d5b2a2c5b4b26c54018a692dccee36bb17 100644 --- a/drivers/usb/gadget/udc/fsl_udc_core.c +++ b/drivers/usb/gadget/udc/fsl_udc_core.c @@ -2552,7 +2552,7 @@ static int fsl_udc_remove(struct platform_device *pdev) dma_pool_destroy(udc_controller->td_pool); free_irq(udc_controller->irq, udc_controller); iounmap(dr_regs); - if (pdata->operating_mode == FSL_USB2_DR_DEVICE) + if (res && (pdata->operating_mode == FSL_USB2_DR_DEVICE)) release_mem_region(res->start, resource_size(res)); /* free udc --wait for the release() finished */ diff --git a/drivers/usb/gadget/udc/fusb300_udc.c b/drivers/usb/gadget/udc/fusb300_udc.c index 263804d154a77d2d54e4809cde3a54e1387f3882..00e3f66836a982352bf3c3f31907fa3c9154df5a 100644 --- a/drivers/usb/gadget/udc/fusb300_udc.c +++ b/drivers/usb/gadget/udc/fusb300_udc.c @@ -1342,12 +1342,15 @@ static const struct usb_gadget_ops fusb300_gadget_ops = { static int fusb300_remove(struct platform_device *pdev) { struct fusb300 *fusb300 = platform_get_drvdata(pdev); + int i; usb_del_gadget_udc(&fusb300->gadget); iounmap(fusb300->reg); free_irq(platform_get_irq(pdev, 0), fusb300); fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req); + for (i = 0; i < FUSB300_MAX_NUM_EP; i++) + kfree(fusb300->ep[i]); kfree(fusb300); return 0; @@ -1491,6 +1494,8 @@ static int fusb300_probe(struct platform_device *pdev) if (fusb300->ep0_req) fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req); + for (i = 0; i < FUSB300_MAX_NUM_EP; i++) + kfree(fusb300->ep[i]); kfree(fusb300); } if (reg) diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c index b0781771704e66fb810e7a993f2b30a3e4e95438..21921db068f6d80ae6d0fb6fcea8cbd9a8b018e3 100644 --- a/drivers/usb/gadget/udc/lpc32xx_udc.c +++ b/drivers/usb/gadget/udc/lpc32xx_udc.c @@ -922,8 +922,7 @@ static struct lpc32xx_usbd_dd_gad *udc_dd_alloc(struct lpc32xx_udc *udc) dma_addr_t dma; struct lpc32xx_usbd_dd_gad *dd; - dd = (struct lpc32xx_usbd_dd_gad *) dma_pool_alloc( - udc->dd_cache, (GFP_KERNEL | GFP_DMA), &dma); + dd = dma_pool_alloc(udc->dd_cache, GFP_ATOMIC | GFP_DMA, &dma); if (dd) dd->this_dma = dma; @@ -1166,11 +1165,11 @@ static void udc_pop_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes) tmp = readl(USBD_RXDATA(udc->udp_baseaddr)); bl = bytes - n; - if (bl > 3) - bl = 3; + if (bl > 4) + bl = 4; for (i = 0; i < bl; i++) - data[n + i] = (u8) ((tmp >> (n * 8)) & 0xFF); + data[n + i] = (u8) ((tmp >> (i * 8)) & 0xFF); } break; diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c index 660878a195055b32568db74ca3716b9fbccbae12..c2011cd7df8cf5fbf0c5a5db153c37a1b0f451a6 100644 --- a/drivers/usb/gadget/udc/net2272.c +++ b/drivers/usb/gadget/udc/net2272.c @@ -945,6 +945,7 @@ net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req) break; } if (&req->req != _req) { + ep->stopped = stopped; spin_unlock_irqrestore(&ep->dev->lock, flags); return -EINVAL; } @@ -2083,7 +2084,7 @@ static irqreturn_t net2272_irq(int irq, void *_dev) #if defined(PLX_PCI_RDK2) /* see if PCI int for us by checking irqstat */ intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT); - if (!intcsr & (1 << NET2272_PCI_IRQ)) { + if (!(intcsr & (1 << NET2272_PCI_IRQ))) { spin_unlock(&dev->lock); return IRQ_NONE; } diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c index b02ab2a8d927f6f76fb3173113b83dbd6555363a..ee872cad5270552cf283a50c52f7307c8a5b3f6d 100644 --- a/drivers/usb/gadget/udc/net2280.c +++ b/drivers/usb/gadget/udc/net2280.c @@ -866,9 +866,6 @@ static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma) (void) readl(&ep->dev->pci->pcimstctl); writel(BIT(DMA_START), &dma->dmastat); - - if (!ep->is_in) - stop_out_naking(ep); } static void start_dma(struct net2280_ep *ep, struct net2280_request *req) @@ -907,6 +904,7 @@ static void start_dma(struct net2280_ep *ep, struct net2280_request *req) writel(BIT(DMA_START), &dma->dmastat); return; } + stop_out_naking(ep); } tmp = dmactl_default; @@ -1275,9 +1273,9 @@ static int net2280_dequeue(struct usb_ep *_ep, struct usb_request *_req) break; } if (&req->req != _req) { + ep->stopped = stopped; spin_unlock_irqrestore(&ep->dev->lock, flags); - dev_err(&ep->dev->pdev->dev, "%s: Request mismatch\n", - __func__); + ep_dbg(ep->dev, "%s: Request mismatch\n", __func__); return -EINVAL; } diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c index 3a16431da3211c470db38e5e2980996142073b65..fcf13ef33b312020d744cd99898294f02c1ba72a 100644 --- a/drivers/usb/gadget/udc/omap_udc.c +++ b/drivers/usb/gadget/udc/omap_udc.c @@ -2033,6 +2033,7 @@ static inline int machine_without_vbus_sense(void) { return machine_is_omap_innovator() || machine_is_omap_osk() + || machine_is_omap_palmte() || machine_is_sx1() /* No known omap7xx boards with vbus sense */ || cpu_is_omap7xx(); @@ -2041,7 +2042,7 @@ static inline int machine_without_vbus_sense(void) static int omap_udc_start(struct usb_gadget *g, struct usb_gadget_driver *driver) { - int status = -ENODEV; + int status; struct omap_ep *ep; unsigned long flags; @@ -2079,6 +2080,7 @@ static int omap_udc_start(struct usb_gadget *g, goto done; } } else { + status = 0; if (can_pullup(udc)) pullup_enable(udc); else @@ -2593,9 +2595,22 @@ omap_ep_setup(char *name, u8 addr, u8 type, static void omap_udc_release(struct device *dev) { - complete(udc->done); + pullup_disable(udc); + if (!IS_ERR_OR_NULL(udc->transceiver)) { + usb_put_phy(udc->transceiver); + udc->transceiver = NULL; + } + omap_writew(0, UDC_SYSCON1); + remove_proc_file(); + if (udc->dc_clk) { + if (udc->clk_requested) + omap_udc_enable_clock(0); + clk_put(udc->hhc_clk); + clk_put(udc->dc_clk); + } + if (udc->done) + complete(udc->done); kfree(udc); - udc = NULL; } static int @@ -2627,6 +2642,7 @@ omap_udc_setup(struct platform_device *odev, struct usb_phy *xceiv) udc->gadget.speed = USB_SPEED_UNKNOWN; udc->gadget.max_speed = USB_SPEED_FULL; udc->gadget.name = driver_name; + udc->gadget.quirk_ep_out_aligned_size = 1; udc->transceiver = xceiv; /* ep0 is special; put it right after the SETUP buffer */ @@ -2867,8 +2883,8 @@ static int omap_udc_probe(struct platform_device *pdev) udc->clr_halt = UDC_RESET_EP; /* USB general purpose IRQ: ep0, state changes, dma, etc */ - status = request_irq(pdev->resource[1].start, omap_udc_irq, - 0, driver_name, udc); + status = devm_request_irq(&pdev->dev, pdev->resource[1].start, + omap_udc_irq, 0, driver_name, udc); if (status != 0) { ERR("can't get irq %d, err %d\n", (int) pdev->resource[1].start, status); @@ -2876,20 +2892,20 @@ static int omap_udc_probe(struct platform_device *pdev) } /* USB "non-iso" IRQ (PIO for all but ep0) */ - status = request_irq(pdev->resource[2].start, omap_udc_pio_irq, - 0, "omap_udc pio", udc); + status = devm_request_irq(&pdev->dev, pdev->resource[2].start, + omap_udc_pio_irq, 0, "omap_udc pio", udc); if (status != 0) { ERR("can't get irq %d, err %d\n", (int) pdev->resource[2].start, status); - goto cleanup2; + goto cleanup1; } #ifdef USE_ISO - status = request_irq(pdev->resource[3].start, omap_udc_iso_irq, - 0, "omap_udc iso", udc); + status = devm_request_irq(&pdev->dev, pdev->resource[3].start, + omap_udc_iso_irq, 0, "omap_udc iso", udc); if (status != 0) { ERR("can't get irq %d, err %d\n", (int) pdev->resource[3].start, status); - goto cleanup3; + goto cleanup1; } #endif if (cpu_is_omap16xx() || cpu_is_omap7xx()) { @@ -2900,23 +2916,8 @@ static int omap_udc_probe(struct platform_device *pdev) } create_proc_file(); - status = usb_add_gadget_udc_release(&pdev->dev, &udc->gadget, - omap_udc_release); - if (status) - goto cleanup4; - - return 0; - -cleanup4: - remove_proc_file(); - -#ifdef USE_ISO -cleanup3: - free_irq(pdev->resource[2].start, udc); -#endif - -cleanup2: - free_irq(pdev->resource[1].start, udc); + return usb_add_gadget_udc_release(&pdev->dev, &udc->gadget, + omap_udc_release); cleanup1: kfree(udc); @@ -2943,42 +2944,15 @@ static int omap_udc_remove(struct platform_device *pdev) { DECLARE_COMPLETION_ONSTACK(done); - if (!udc) - return -ENODEV; - - usb_del_gadget_udc(&udc->gadget); - if (udc->driver) - return -EBUSY; - udc->done = &done; - pullup_disable(udc); - if (!IS_ERR_OR_NULL(udc->transceiver)) { - usb_put_phy(udc->transceiver); - udc->transceiver = NULL; - } - omap_writew(0, UDC_SYSCON1); - - remove_proc_file(); - -#ifdef USE_ISO - free_irq(pdev->resource[3].start, udc); -#endif - free_irq(pdev->resource[2].start, udc); - free_irq(pdev->resource[1].start, udc); + usb_del_gadget_udc(&udc->gadget); - if (udc->dc_clk) { - if (udc->clk_requested) - omap_udc_enable_clock(0); - clk_put(udc->hhc_clk); - clk_put(udc->dc_clk); - } + wait_for_completion(&done); release_mem_region(pdev->resource[0].start, pdev->resource[0].end - pdev->resource[0].start + 1); - wait_for_completion(&done); - return 0; } diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c index afaea11ec7719c3350080f010777a3d0f81ef925..991184b8bb41ebd7b43e83550f0f7bb8254fe390 100644 --- a/drivers/usb/gadget/udc/pch_udc.c +++ b/drivers/usb/gadget/udc/pch_udc.c @@ -1520,7 +1520,6 @@ static void pch_udc_free_dma_chain(struct pch_udc_dev *dev, td = phys_to_virt(addr); addr2 = (dma_addr_t)td->next; dma_pool_free(dev->data_requests, td, addr); - td->next = 0x00; addr = addr2; } req->chain_len = 1; diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index e1656f361e081d398d8ab55820a00cf9497f3562..3cbb372db43280a463b729c63e5e2e16f52ab465 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -358,6 +359,7 @@ struct renesas_usb3 { bool extcon_host; /* check id and set EXTCON_USB_HOST */ bool extcon_usb; /* check vbus and set EXTCON_USB */ bool forced_b_device; + bool start_to_connect; }; #define gadget_to_renesas_usb3(_gadget) \ @@ -476,7 +478,8 @@ static void usb3_init_axi_bridge(struct renesas_usb3 *usb3) static void usb3_init_epc_registers(struct renesas_usb3 *usb3) { usb3_write(usb3, ~0, USB3_USB_INT_STA_1); - usb3_enable_irq_1(usb3, USB_INT_1_VBUS_CNG); + if (!usb3->workaround_for_vbus) + usb3_enable_irq_1(usb3, USB_INT_1_VBUS_CNG); } static bool usb3_wakeup_usb2_phy(struct renesas_usb3 *usb3) @@ -700,8 +703,7 @@ static void usb3_mode_config(struct renesas_usb3 *usb3, bool host, bool a_dev) usb3_set_mode_by_role_sw(usb3, host); usb3_vbus_out(usb3, a_dev); /* for A-Peripheral or forced B-device mode */ - if ((!host && a_dev) || - (usb3->workaround_for_vbus && usb3->forced_b_device)) + if ((!host && a_dev) || usb3->start_to_connect) usb3_connect(usb3); spin_unlock_irqrestore(&usb3->lock, flags); } @@ -2377,9 +2379,9 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr, if (usb3->forced_b_device) return -EBUSY; - if (!strncmp(buf, "host", strlen("host"))) + if (sysfs_streq(buf, "host")) new_mode_is_host = true; - else if (!strncmp(buf, "peripheral", strlen("peripheral"))) + else if (sysfs_streq(buf, "peripheral")) new_mode_is_host = false; else return -EINVAL; @@ -2432,12 +2434,19 @@ static ssize_t renesas_usb3_b_device_write(struct file *file, if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; - if (!strncmp(buf, "1", 1)) + usb3->start_to_connect = false; + if (usb3->workaround_for_vbus && usb3->forced_b_device && + !strncmp(buf, "2", 1)) + usb3->start_to_connect = true; + else if (!strncmp(buf, "1", 1)) usb3->forced_b_device = true; else usb3->forced_b_device = false; - /* Let this driver call usb3_connect() anyway */ + if (usb3->workaround_for_vbus) + usb3_disconnect(usb3); + + /* Let this driver call usb3_connect() if needed */ usb3_check_id(usb3); return count; @@ -2468,6 +2477,7 @@ static int renesas_usb3_remove(struct platform_device *pdev) debugfs_remove_recursive(usb3->dentry); device_remove_file(&pdev->dev, &dev_attr_role); + cancel_work_sync(&usb3->role_work); usb_role_switch_unregister(usb3->role_sw); usb_del_gadget_udc(&usb3->gadget); diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c index 6407e433bc78de40d3292a0461a93136d4b63e0c..72f1bc6a680e37d7f29431325550f1c3fa29e7ff 100644 --- a/drivers/usb/gadget/udc/udc-xilinx.c +++ b/drivers/usb/gadget/udc/udc-xilinx.c @@ -1613,6 +1613,8 @@ static void xudc_getstatus(struct xusb_udc *udc) break; case USB_RECIP_ENDPOINT: epnum = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK; + if (epnum >= XUSB_MAX_ENDPOINTS) + goto stall; target_ep = &udc->ep[epnum]; epcfgreg = udc->read_fn(udc->addr + target_ep->offset); halt = epcfgreg & XUSB_EP_CFG_STALL_MASK; @@ -1680,6 +1682,10 @@ static void xudc_set_clear_feature(struct xusb_udc *udc) case USB_RECIP_ENDPOINT: if (!udc->setup.wValue) { endpoint = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK; + if (endpoint >= XUSB_MAX_ENDPOINTS) { + xudc_ep0_stall(udc); + return; + } target_ep = &udc->ep[endpoint]; outinbit = udc->setup.wIndex & USB_ENDPOINT_DIR_MASK; outinbit = outinbit >> 7; diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 8608ac513fb764e4c8069cf870ecd1e12e068d1d..0daa6d98f9b91567a66d5811b828b5fedf9b8bc4 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -1110,6 +1110,27 @@ int ehci_suspend(struct usb_hcd *hcd, bool do_wakeup) return -EBUSY; } + /* Clear wakeup signal locked in S0 state when device plug in */ + if (ehci->zx_wakeup_clear == 1) { + u32 __iomem *reg = &ehci->regs->port_status[4]; + u32 t1 = ehci_readl(ehci, reg); + + t1 &= (u32)~0xf0000; + t1 |= PORT_TEST_FORCE; + ehci_writel(ehci, t1, reg); + t1 = ehci_readl(ehci, reg); + usleep_range(1000, 2000); + t1 &= (u32)~0xf0000; + ehci_writel(ehci, t1, reg); + usleep_range(1000, 2000); + t1 = ehci_readl(ehci, reg); + ehci_writel(ehci, t1 | PORT_CSC, reg); + udelay(500); + t1 = ehci_readl(ehci, &ehci->regs->status); + ehci_writel(ehci, t1 & STS_PCD, &ehci->regs->status); + ehci_readl(ehci, &ehci->regs->status); + } + return 0; } EXPORT_SYMBOL_GPL(ehci_suspend); diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index ce0eaf7d7c12a11f1fd9fb0c76d7275613669bfb..a99c1ac5d8c8bfbdb6795d29fc6d4c22d0acadb5 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c @@ -346,8 +346,12 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) unlink_empty_async_suspended(ehci); + /* Some Synopsys controllers mistakenly leave IAA turned on */ + ehci_writel(ehci, STS_IAA, &ehci->regs->status); + /* Any IAA cycle that started before the suspend is now invalid */ end_iaa_cycle(ehci); + ehci_handle_start_intr_unlinks(ehci); ehci_handle_intr_unlinks(ehci); end_free_itds(ehci); diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c index 7e4c13346a1ee154240c9beb9cfc91836da50a0e..7d20296cbe9f9459d31742a29049c4c9c518b27e 100644 --- a/drivers/usb/host/ehci-omap.c +++ b/drivers/usb/host/ehci-omap.c @@ -159,11 +159,12 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) /* get the PHY device */ phy = devm_usb_get_phy_by_phandle(dev, "phys", i); if (IS_ERR(phy)) { - /* Don't bail out if PHY is not absolutely necessary */ - if (pdata->port_mode[i] != OMAP_EHCI_PORT_MODE_PHY) + ret = PTR_ERR(phy); + if (ret == -ENODEV) { /* no PHY */ + phy = NULL; continue; + } - ret = PTR_ERR(phy); if (ret != -EPROBE_DEFER) dev_err(dev, "Can't get PHY for port %d: %d\n", i, ret); diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index fe9422d3bcdc4cbb3263bd9b15449785ef4626ac..357dc140e91402d93bdb000f802442bc048e5f99 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c @@ -216,6 +216,10 @@ static int ehci_pci_setup(struct usb_hcd *hcd) ehci_info(ehci, "applying MosChip frame-index workaround\n"); ehci->frame_index_bug = 1; break; + case PCI_VENDOR_ID_ZHAOXIN: + if (pdev->device == 0x3104 && (pdev->revision & 0xf0) == 0x90) + ehci->zx_wakeup_clear = 1; + break; } /* optional debug port, normally in the first BAR */ @@ -291,6 +295,9 @@ static int ehci_pci_setup(struct usb_hcd *hcd) if (pdev->vendor == PCI_VENDOR_ID_STMICRO && pdev->device == PCI_DEVICE_ID_STMICRO_USB_HOST) ; /* ConneXT has no sbrn register */ + else if (pdev->vendor == PCI_VENDOR_ID_HUAWEI + && pdev->device == 0xa239) + ; /* HUAWEI Kunpeng920 USB EHCI has no sbrn register */ else pci_read_config_byte(pdev, 0x60, &ehci->sbrn); diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 3276304056952b5d377ffb55971e62140c414760..f643603c8de6dd925c95a0986ca4cbace1522d25 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c @@ -27,6 +27,10 @@ /*-------------------------------------------------------------------------*/ +/* PID Codes that are used here, from EHCI specification, Table 3-16. */ +#define PID_CODE_IN 1 +#define PID_CODE_SETUP 2 + /* fill a qtd, returning how much of the buffer we were able to queue up */ static int @@ -190,7 +194,7 @@ static int qtd_copy_status ( int status = -EINPROGRESS; /* count IN/OUT bytes, not SETUP (even short packets) */ - if (likely (QTD_PID (token) != 2)) + if (likely(QTD_PID(token) != PID_CODE_SETUP)) urb->actual_length += length - QTD_LENGTH (token); /* don't modify error codes */ @@ -206,6 +210,13 @@ static int qtd_copy_status ( if (token & QTD_STS_BABBLE) { /* FIXME "must" disable babbling device's port too */ status = -EOVERFLOW; + /* + * When MMF is active and PID Code is IN, queue is halted. + * EHCI Specification, Table 4-13. + */ + } else if ((token & QTD_STS_MMF) && + (QTD_PID(token) == PID_CODE_IN)) { + status = -EPROTO; /* CERR nonzero + halt --> stall */ } else if (QTD_CERR(token)) { status = -EPIPE; diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h index c8e9a48e1d517db699120899368355d84441cecf..9625b6030757858cb029eb191c8f19f72a823080 100644 --- a/drivers/usb/host/ehci.h +++ b/drivers/usb/host/ehci.h @@ -218,6 +218,7 @@ struct ehci_hcd { /* one per controller */ unsigned frame_index_bug:1; /* MosChip (AKA NetMos) */ unsigned need_oc_pp_cycle:1; /* MPC834X port power */ unsigned imx28_write_fix:1; /* For Freescale i.MX28 */ + unsigned zx_wakeup_clear:1; /* required for usb32 quirk */ #define OHCI_CTRL_HCFS (3 << 6) diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c index e64eb47770c8bb0f2b2c89d4abf9d6e3abf85140..2d5a72c15069e4caee454080045ecae6c8afd8d4 100644 --- a/drivers/usb/host/fotg210-hcd.c +++ b/drivers/usb/host/fotg210-hcd.c @@ -1627,6 +1627,10 @@ static int fotg210_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, /* see what we found out */ temp = check_reset_complete(fotg210, wIndex, status_reg, fotg210_readl(fotg210, status_reg)); + + /* restart schedule */ + fotg210->command |= CMD_RUN; + fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command); } if (!(temp & (PORT_RESUME|PORT_RESET))) { diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c index 684d6f074c3a490a291109ff9603ec4b44f84e58..6968b9f2b76b5865bd70e7366b2701f6b2c50f6f 100644 --- a/drivers/usb/host/hwa-hc.c +++ b/drivers/usb/host/hwa-hc.c @@ -159,7 +159,7 @@ static int hwahc_op_start(struct usb_hcd *usb_hcd) return result; error_set_cluster_id: - wusb_cluster_id_put(wusbhc->cluster_id); + wusb_cluster_id_put(addr); error_cluster_id_get: goto out; @@ -640,7 +640,7 @@ static int hwahc_security_create(struct hwahc *hwahc) top = itr + itr_size; result = __usb_get_extra_descriptor(usb_dev->rawdescriptors[index], le16_to_cpu(usb_dev->actconfig->desc.wTotalLength), - USB_DT_SECURITY, (void **) &secd); + USB_DT_SECURITY, (void **) &secd, sizeof(*secd)); if (result == -1) { dev_warn(dev, "BUG? WUSB host has no security descriptors\n"); return 0; diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c index afa321ab55fcf82bd6d473c06cc0efade3ad486b..4a33a18dc0b310b98abb92445e4754cfb74a0552 100644 --- a/drivers/usb/host/max3421-hcd.c +++ b/drivers/usb/host/max3421-hcd.c @@ -153,8 +153,6 @@ struct max3421_hcd { */ struct urb *curr_urb; enum scheduling_pass sched_pass; - struct usb_device *loaded_dev; /* dev that's loaded into the chip */ - int loaded_epnum; /* epnum whose toggles are loaded */ int urb_done; /* > 0 -> no errors, < 0: errno */ size_t curr_len; u8 hien; @@ -492,39 +490,17 @@ max3421_set_speed(struct usb_hcd *hcd, struct usb_device *dev) * Caller must NOT hold HCD spinlock. */ static void -max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum, - int force_toggles) +max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum) { - struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); - int old_epnum, same_ep, rcvtog, sndtog; - struct usb_device *old_dev; + int rcvtog, sndtog; u8 hctl; - old_dev = max3421_hcd->loaded_dev; - old_epnum = max3421_hcd->loaded_epnum; - - same_ep = (dev == old_dev && epnum == old_epnum); - if (same_ep && !force_toggles) - return; - - if (old_dev && !same_ep) { - /* save the old end-points toggles: */ - u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL); - - rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1; - sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1; - - /* no locking: HCD (i.e., we) own toggles, don't we? */ - usb_settoggle(old_dev, old_epnum, 0, rcvtog); - usb_settoggle(old_dev, old_epnum, 1, sndtog); - } /* setup new endpoint's toggle bits: */ rcvtog = usb_gettoggle(dev, epnum, 0); sndtog = usb_gettoggle(dev, epnum, 1); hctl = (BIT(rcvtog + MAX3421_HCTL_RCVTOG0_BIT) | BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT)); - max3421_hcd->loaded_epnum = epnum; spi_wr8(hcd, MAX3421_REG_HCTL, hctl); /* @@ -532,7 +508,6 @@ max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum, * address-assignment so it's best to just always load the * address whenever the end-point changed/was forced. */ - max3421_hcd->loaded_dev = dev; spi_wr8(hcd, MAX3421_REG_PERADDR, dev->devnum); } @@ -667,7 +642,7 @@ max3421_select_and_start_urb(struct usb_hcd *hcd) struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); struct urb *urb, *curr_urb = NULL; struct max3421_ep *max3421_ep; - int epnum, force_toggles = 0; + int epnum; struct usb_host_endpoint *ep; struct list_head *pos; unsigned long flags; @@ -777,7 +752,6 @@ max3421_select_and_start_urb(struct usb_hcd *hcd) usb_settoggle(urb->dev, epnum, 0, 1); usb_settoggle(urb->dev, epnum, 1, 1); max3421_ep->pkt_state = PKT_STATE_SETUP; - force_toggles = 1; } else max3421_ep->pkt_state = PKT_STATE_TRANSFER; } @@ -785,7 +759,7 @@ max3421_select_and_start_urb(struct usb_hcd *hcd) spin_unlock_irqrestore(&max3421_hcd->lock, flags); max3421_ep->last_active = max3421_hcd->frame_number; - max3421_set_address(hcd, urb->dev, epnum, force_toggles); + max3421_set_address(hcd, urb->dev, epnum); max3421_set_speed(hcd, urb->dev); max3421_next_transfer(hcd, 0); return 1; @@ -1380,6 +1354,16 @@ max3421_urb_done(struct usb_hcd *hcd) status = 0; urb = max3421_hcd->curr_urb; if (urb) { + /* save the old end-points toggles: */ + u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL); + int rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1; + int sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1; + int epnum = usb_endpoint_num(&urb->ep->desc); + + /* no locking: HCD (i.e., we) own toggles, don't we? */ + usb_settoggle(urb->dev, epnum, 0, rcvtog); + usb_settoggle(urb->dev, epnum, 1, sndtog); + max3421_hcd->curr_urb = NULL; spin_lock_irqsave(&max3421_hcd->lock, flags); usb_hcd_unlink_urb_from_ep(hcd, urb); diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c index e9867395402016591bc68b1aa20d8c45b5e4c74a..ec6739ef3129e405f594f4486e739a31f5c657a9 100644 --- a/drivers/usb/host/ohci-at91.c +++ b/drivers/usb/host/ohci-at91.c @@ -551,6 +551,8 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev) pdata->overcurrent_pin[i] = devm_gpiod_get_index_optional(&pdev->dev, "atmel,oc", i, GPIOD_IN); + if (!pdata->overcurrent_pin[i]) + continue; if (IS_ERR(pdata->overcurrent_pin[i])) { err = PTR_ERR(pdata->overcurrent_pin[i]); dev_err(&pdev->dev, "unable to claim gpio \"overcurrent\": %d\n", err); diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 210181fd98d2e9d6e850662becf6ec08f7b901c2..af11887f5f9e4b9619534b431d684f50099b2664 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -418,8 +418,7 @@ static void ohci_usb_reset (struct ohci_hcd *ohci) * other cases where the next software may expect clean state from the * "firmware". this is bus-neutral, unlike shutdown() methods. */ -static void -ohci_shutdown (struct usb_hcd *hcd) +static void _ohci_shutdown(struct usb_hcd *hcd) { struct ohci_hcd *ohci; @@ -435,6 +434,16 @@ ohci_shutdown (struct usb_hcd *hcd) ohci->rh_state = OHCI_RH_HALTED; } +static void ohci_shutdown(struct usb_hcd *hcd) +{ + struct ohci_hcd *ohci = hcd_to_ohci(hcd); + unsigned long flags; + + spin_lock_irqsave(&ohci->lock, flags); + _ohci_shutdown(hcd); + spin_unlock_irqrestore(&ohci->lock, flags); +} + /*-------------------------------------------------------------------------* * HC functions *-------------------------------------------------------------------------*/ @@ -752,7 +761,7 @@ static void io_watchdog_func(struct timer_list *t) died: usb_hc_died(ohci_to_hcd(ohci)); ohci_dump(ohci); - ohci_shutdown(ohci_to_hcd(ohci)); + _ohci_shutdown(ohci_to_hcd(ohci)); goto done; } else { /* No write back because the done queue was empty */ diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c index a631dbb369d76a4bf17c0b8cf96c1f8aa6c8afeb..06ee17dddc0e8678a2d92628e052662ec49c0b40 100644 --- a/drivers/usb/host/ohci-tmio.c +++ b/drivers/usb/host/ohci-tmio.c @@ -196,7 +196,7 @@ static int ohci_hcd_tmio_drv_probe(struct platform_device *dev) if (usb_disabled()) return -ENODEV; - if (!cell) + if (!cell || !regs || !config || !sram) return -EINVAL; hcd = usb_create_hcd(&ohci_tmio_hc_driver, &dev->dev, dev_name(&dev->dev)); diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index 3625a5c1a41b81c88176bcc6a0b9e5c94bf4d59a..070c66f86e67d9ccce855378bdcfd5e9ef2e4204 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -205,7 +205,7 @@ int usb_amd_find_chipset_info(void) { unsigned long flags; struct amd_chipset_info info; - int ret; + int need_pll_quirk = 0; spin_lock_irqsave(&amd_lock, flags); @@ -219,21 +219,28 @@ int usb_amd_find_chipset_info(void) spin_unlock_irqrestore(&amd_lock, flags); if (!amd_chipset_sb_type_init(&info)) { - ret = 0; goto commit; } - /* Below chipset generations needn't enable AMD PLL quirk */ - if (info.sb_type.gen == AMD_CHIPSET_UNKNOWN || - info.sb_type.gen == AMD_CHIPSET_SB600 || - info.sb_type.gen == AMD_CHIPSET_YANGTZE || - (info.sb_type.gen == AMD_CHIPSET_SB700 && - info.sb_type.rev > 0x3b)) { + switch (info.sb_type.gen) { + case AMD_CHIPSET_SB700: + need_pll_quirk = info.sb_type.rev <= 0x3B; + break; + case AMD_CHIPSET_SB800: + case AMD_CHIPSET_HUDSON2: + case AMD_CHIPSET_BOLTON: + need_pll_quirk = 1; + break; + default: + need_pll_quirk = 0; + break; + } + + if (!need_pll_quirk) { if (info.smbus_dev) { pci_dev_put(info.smbus_dev); info.smbus_dev = NULL; } - ret = 0; goto commit; } @@ -252,7 +259,7 @@ int usb_amd_find_chipset_info(void) } } - ret = info.probe_result = 1; + need_pll_quirk = info.probe_result = 1; printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n"); commit: @@ -263,7 +270,7 @@ int usb_amd_find_chipset_info(void) /* Mark that we where here */ amd_chipset.probe_count++; - ret = amd_chipset.probe_result; + need_pll_quirk = amd_chipset.probe_result; spin_unlock_irqrestore(&amd_lock, flags); @@ -277,7 +284,7 @@ int usb_amd_find_chipset_info(void) spin_unlock_irqrestore(&amd_lock, flags); } - return ret; + return need_pll_quirk; } EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info); diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c index 984892dd72f550a2de7dafac8a2d64ca8a0ab215..42668aeca57c8a96a9a42fce95ceab4bd1b18bd4 100644 --- a/drivers/usb/host/r8a66597-hcd.c +++ b/drivers/usb/host/r8a66597-hcd.c @@ -1979,6 +1979,8 @@ static int r8a66597_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, static void r8a66597_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep) +__acquires(r8a66597->lock) +__releases(r8a66597->lock) { struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd); struct r8a66597_pipe *pipe = (struct r8a66597_pipe *)hep->hcpriv; @@ -1991,13 +1993,14 @@ static void r8a66597_endpoint_disable(struct usb_hcd *hcd, return; pipenum = pipe->info.pipenum; + spin_lock_irqsave(&r8a66597->lock, flags); if (pipenum == 0) { kfree(hep->hcpriv); hep->hcpriv = NULL; + spin_unlock_irqrestore(&r8a66597->lock, flags); return; } - spin_lock_irqsave(&r8a66597->lock, flags); pipe_stop(r8a66597, pipe); pipe_irq_disable(r8a66597, pipenum); disable_irq_empty(r8a66597, pipenum); diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c index 5b8a3d9530c4a89bacb3850852c72d3a19c979dd..5cac83aaeac3abe5fbc64344429a2bf39100e6a5 100644 --- a/drivers/usb/host/u132-hcd.c +++ b/drivers/usb/host/u132-hcd.c @@ -3202,6 +3202,9 @@ static int __init u132_hcd_init(void) printk(KERN_INFO "driver %s\n", hcd_name); workqueue = create_singlethread_workqueue("u132"); retval = platform_driver_register(&u132_platform_driver); + if (retval) + destroy_workqueue(workqueue); + return retval; } diff --git a/drivers/usb/host/uhci-hcd.h b/drivers/usb/host/uhci-hcd.h index 7f9f33c8c232413d5e85a01e29b3de4ec5151b9a..f2252f46c3e686b3bde56136aa30fc1385f96d1f 100644 --- a/drivers/usb/host/uhci-hcd.h +++ b/drivers/usb/host/uhci-hcd.h @@ -432,6 +432,7 @@ struct uhci_hcd { unsigned int big_endian_mmio:1; /* Big endian registers */ unsigned int big_endian_desc:1; /* Big endian descriptors */ unsigned int is_aspeed:1; /* Aspeed impl. workarounds */ + unsigned int auto_suspend_delay:1; /* delay root hub autosuspend*/ /* Support for port suspend/resume/reset */ unsigned long port_c_suspend; /* Bit-arrays of ports */ diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c index 47106dd8ca7cce69d675f74e2d9470c1eb35e7a2..7ff2ba4deae50b0092d91eb54a60f1c9bf883dd4 100644 --- a/drivers/usb/host/uhci-hub.c +++ b/drivers/usb/host/uhci-hub.c @@ -217,7 +217,10 @@ static int uhci_hub_status_data(struct usb_hcd *hcd, char *buf) /* are any devices attached? */ if (!any_ports_active(uhci)) { uhci->rh_state = UHCI_RH_RUNNING_NODEVS; - uhci->auto_stop_time = jiffies + HZ; + if (!uhci->auto_suspend_delay) + uhci->auto_stop_time = jiffies + HZ; + else + uhci->auto_stop_time = jiffies + 3 * HZ; } break; diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c index 0dd944277c99b04f9575281641d1587d60bc7310..8dfd051b84b3cd1c73e52c322944204fa31f817d 100644 --- a/drivers/usb/host/uhci-pci.c +++ b/drivers/usb/host/uhci-pci.c @@ -134,6 +134,11 @@ static int uhci_pci_init(struct usb_hcd *hcd) if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_INTEL) device_set_wakeup_capable(uhci_dev(uhci), true); + if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_ZHAOXIN) { + uhci->oc_low = 1; + uhci->auto_suspend_delay = 1; + } + /* Set up pointers to PCI-specific functions */ uhci->reset_hc = uhci_pci_reset_hc; uhci->check_and_reset_hc = uhci_pci_check_and_reset_hc; diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c index 86cff5c28eff8a3b41512ddb42e18ffedf4e829b..ba841c569c4823a287d6f0b5e3de3062e1a60b52 100644 --- a/drivers/usb/host/xhci-dbgcap.c +++ b/drivers/usb/host/xhci-dbgcap.c @@ -516,7 +516,6 @@ static int xhci_do_dbc_stop(struct xhci_hcd *xhci) return -1; writel(0, &dbc->regs->control); - xhci_dbc_mem_cleanup(xhci); dbc->state = DS_DISABLED; return 0; @@ -562,8 +561,10 @@ static void xhci_dbc_stop(struct xhci_hcd *xhci) ret = xhci_do_dbc_stop(xhci); spin_unlock_irqrestore(&dbc->lock, flags); - if (!ret) + if (!ret) { + xhci_dbc_mem_cleanup(xhci); pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller); + } } static void diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c index cadc01336bf80ef87d68a7161bcd6f796aa1ab08..76c3f29562d2b9d2e3263f3d3dc1b45b92728140 100644 --- a/drivers/usb/host/xhci-debugfs.c +++ b/drivers/usb/host/xhci-debugfs.c @@ -202,10 +202,10 @@ static void xhci_ring_dump_segment(struct seq_file *s, trb = &seg->trbs[i]; dma = seg->dma + i * sizeof(*trb); seq_printf(s, "%pad: %s\n", &dma, - xhci_decode_trb(trb->generic.field[0], - trb->generic.field[1], - trb->generic.field[2], - trb->generic.field[3])); + xhci_decode_trb(le32_to_cpu(trb->generic.field[0]), + le32_to_cpu(trb->generic.field[1]), + le32_to_cpu(trb->generic.field[2]), + le32_to_cpu(trb->generic.field[3]))); } } @@ -263,10 +263,10 @@ static int xhci_slot_context_show(struct seq_file *s, void *unused) xhci = hcd_to_xhci(bus_to_hcd(dev->udev->bus)); slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); seq_printf(s, "%pad: %s\n", &dev->out_ctx->dma, - xhci_decode_slot_context(slot_ctx->dev_info, - slot_ctx->dev_info2, - slot_ctx->tt_info, - slot_ctx->dev_state)); + xhci_decode_slot_context(le32_to_cpu(slot_ctx->dev_info), + le32_to_cpu(slot_ctx->dev_info2), + le32_to_cpu(slot_ctx->tt_info), + le32_to_cpu(slot_ctx->dev_state))); return 0; } @@ -286,10 +286,10 @@ static int xhci_endpoint_context_show(struct seq_file *s, void *unused) ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, dci); dma = dev->out_ctx->dma + dci * CTX_SIZE(xhci->hcc_params); seq_printf(s, "%pad: %s\n", &dma, - xhci_decode_ep_context(ep_ctx->ep_info, - ep_ctx->ep_info2, - ep_ctx->deq, - ep_ctx->tx_info)); + xhci_decode_ep_context(le32_to_cpu(ep_ctx->ep_info), + le32_to_cpu(ep_ctx->ep_info2), + le64_to_cpu(ep_ctx->deq), + le32_to_cpu(ep_ctx->tx_info))); } return 0; @@ -440,6 +440,9 @@ void xhci_debugfs_create_endpoint(struct xhci_hcd *xhci, struct xhci_ep_priv *epriv; struct xhci_slot_priv *spriv = dev->debugfs_private; + if (!spriv) + return; + if (spriv->eps[ep_index]) return; diff --git a/drivers/usb/host/xhci-histb.c b/drivers/usb/host/xhci-histb.c index 27f00160332e2186327da464d7be327d82bc3170..3c4abb5a1c3fc6bdb86e749ab62cf256381c1f75 100644 --- a/drivers/usb/host/xhci-histb.c +++ b/drivers/usb/host/xhci-histb.c @@ -325,14 +325,16 @@ static int xhci_histb_remove(struct platform_device *dev) struct xhci_hcd_histb *histb = platform_get_drvdata(dev); struct usb_hcd *hcd = histb->hcd; struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct usb_hcd *shared_hcd = xhci->shared_hcd; xhci->xhc_state |= XHCI_STATE_REMOVING; - usb_remove_hcd(xhci->shared_hcd); + usb_remove_hcd(shared_hcd); + xhci->shared_hcd = NULL; device_wakeup_disable(&dev->dev); usb_remove_hcd(hcd); - usb_put_hcd(xhci->shared_hcd); + usb_put_hcd(shared_hcd); xhci_histb_host_disable(histb); usb_put_hcd(hcd); diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 7e2a531ba321d4d3d50b24fe1278332e128821c1..9c7b4394e0bb019d565d3c5e9a7ef969d69e8e79 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -55,6 +55,7 @@ static u8 usb_bos_descriptor [] = { static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf, u16 wLength) { + struct xhci_port_cap *port_cap = NULL; int i, ssa_count; u32 temp; u16 desc_size, ssp_cap_size, ssa_size = 0; @@ -64,16 +65,24 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf, ssp_cap_size = sizeof(usb_bos_descriptor) - desc_size; /* does xhci support USB 3.1 Enhanced SuperSpeed */ - if (xhci->usb3_rhub.min_rev >= 0x01) { + for (i = 0; i < xhci->num_port_caps; i++) { + if (xhci->port_caps[i].maj_rev == 0x03 && + xhci->port_caps[i].min_rev >= 0x01) { + usb3_1 = true; + port_cap = &xhci->port_caps[i]; + break; + } + } + + if (usb3_1) { /* does xhci provide a PSI table for SSA speed attributes? */ - if (xhci->usb3_rhub.psi_count) { + if (port_cap->psi_count) { /* two SSA entries for each unique PSI ID, RX and TX */ - ssa_count = xhci->usb3_rhub.psi_uid_count * 2; + ssa_count = port_cap->psi_uid_count * 2; ssa_size = ssa_count * sizeof(u32); ssp_cap_size -= 16; /* skip copying the default SSA */ } desc_size += ssp_cap_size; - usb3_1 = true; } memcpy(buf, &usb_bos_descriptor, min(desc_size, wLength)); @@ -99,7 +108,7 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf, } /* If PSI table exists, add the custom speed attributes from it */ - if (usb3_1 && xhci->usb3_rhub.psi_count) { + if (usb3_1 && port_cap->psi_count) { u32 ssp_cap_base, bm_attrib, psi, psi_mant, psi_exp; int offset; @@ -111,7 +120,7 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf, /* attribute count SSAC bits 4:0 and ID count SSIC bits 8:5 */ bm_attrib = (ssa_count - 1) & 0x1f; - bm_attrib |= (xhci->usb3_rhub.psi_uid_count - 1) << 5; + bm_attrib |= (port_cap->psi_uid_count - 1) << 5; put_unaligned_le32(bm_attrib, &buf[ssp_cap_base + 4]); if (wLength < desc_size + ssa_size) @@ -124,8 +133,8 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf, * USB 3.1 requires two SSA entries (RX and TX) for every link */ offset = desc_size; - for (i = 0; i < xhci->usb3_rhub.psi_count; i++) { - psi = xhci->usb3_rhub.psi[i]; + for (i = 0; i < port_cap->psi_count; i++) { + psi = port_cap->psi[i]; psi &= ~USB_SSP_SUBLINK_SPEED_RSVD; psi_exp = XHCI_EXT_PORT_PSIE(psi); psi_mant = XHCI_EXT_PORT_PSIM(psi); @@ -615,6 +624,7 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci, continue; retval = xhci_disable_slot(xhci, i); + xhci_free_virt_device(xhci, i); if (retval) xhci_err(xhci, "Failed to disable slot %d, %d. Enter test mode anyway\n", i, retval); @@ -831,7 +841,7 @@ static u32 xhci_get_ext_port_status(u32 raw_port_status, u32 port_li) static u32 xhci_get_port_status(struct usb_hcd *hcd, struct xhci_bus_state *bus_state, u16 wIndex, u32 raw_port_status, - unsigned long flags) + unsigned long *flags) __releases(&xhci->lock) __acquires(&xhci->lock) { @@ -868,6 +878,14 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, status |= USB_PORT_STAT_C_BH_RESET << 16; if ((raw_port_status & PORT_CEC)) status |= USB_PORT_STAT_C_CONFIG_ERROR << 16; + + /* USB3 remote wake resume signaling completed */ + if (bus_state->port_remote_wakeup & (1 << wIndex) && + (raw_port_status & PORT_PLS_MASK) != XDEV_RESUME && + (raw_port_status & PORT_PLS_MASK) != XDEV_RECOVERY) { + bus_state->port_remote_wakeup &= ~(1 << wIndex); + usb_hcd_end_port_resume(&hcd->self, wIndex); + } } if (hcd->speed < HCD_USB3) { @@ -876,7 +894,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, status |= USB_PORT_STAT_SUSPEND; } if ((raw_port_status & PORT_PLS_MASK) == XDEV_RESUME && - !DEV_SUPERSPEED_ANY(raw_port_status)) { + !DEV_SUPERSPEED_ANY(raw_port_status) && hcd->speed < HCD_USB3) { if ((raw_port_status & PORT_RESET) || !(raw_port_status & PORT_PE)) return 0xffffffff; @@ -900,6 +918,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, set_bit(wIndex, &bus_state->resuming_ports); bus_state->resume_done[wIndex] = timeout; mod_timer(&hcd->rh_timer, timeout); + usb_hcd_start_port_resume(&hcd->self, wIndex); } /* Has resume been signalled for USB_RESUME_TIME yet? */ } else if (time_after_eq(jiffies, @@ -916,12 +935,12 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, xhci_test_and_clear_bit(xhci, port, PORT_PLC); xhci_set_link_state(xhci, port, XDEV_U0); - spin_unlock_irqrestore(&xhci->lock, flags); + spin_unlock_irqrestore(&xhci->lock, *flags); time_left = wait_for_completion_timeout( &bus_state->rexit_done[wIndex], msecs_to_jiffies( - XHCI_MAX_REXIT_TIMEOUT)); - spin_lock_irqsave(&xhci->lock, flags); + XHCI_MAX_REXIT_TIMEOUT_MS)); + spin_lock_irqsave(&xhci->lock, *flags); if (time_left) { slot_id = xhci_find_slot_id_by_port(hcd, @@ -934,12 +953,13 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, } else { int port_status = readl(port->addr); xhci_warn(xhci, "Port resume took longer than %i msec, port status = 0x%x\n", - XHCI_MAX_REXIT_TIMEOUT, + XHCI_MAX_REXIT_TIMEOUT_MS, port_status); status |= USB_PORT_STAT_SUSPEND; clear_bit(wIndex, &bus_state->rexit_ports); } + usb_hcd_end_port_resume(&hcd->self, wIndex); bus_state->port_c_suspend |= 1 << wIndex; bus_state->suspended_ports &= ~(1 << wIndex); } else { @@ -962,6 +982,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, (raw_port_status & PORT_PLS_MASK) != XDEV_RESUME) { bus_state->resume_done[wIndex] = 0; clear_bit(wIndex, &bus_state->resuming_ports); + usb_hcd_end_port_resume(&hcd->self, wIndex); } @@ -1073,7 +1094,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, } trace_xhci_get_port_status(wIndex, temp); status = xhci_get_port_status(hcd, bus_state, wIndex, temp, - flags); + &flags); if (status == 0xffffffff) goto error; @@ -1093,7 +1114,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, } port_li = readl(ports[wIndex]->addr + PORTLI); status = xhci_get_ext_port_status(temp, port_li); - put_unaligned_le32(cpu_to_le32(status), &buf[4]); + put_unaligned_le32(status, &buf[4]); } break; case SetPortFeature: @@ -1337,6 +1358,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, goto error; set_bit(wIndex, &bus_state->resuming_ports); + usb_hcd_start_port_resume(&hcd->self, wIndex); xhci_set_link_state(xhci, ports[wIndex], XDEV_RESUME); spin_unlock_irqrestore(&xhci->lock, flags); @@ -1345,6 +1367,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, xhci_set_link_state(xhci, ports[wIndex], XDEV_U0); clear_bit(wIndex, &bus_state->resuming_ports); + usb_hcd_end_port_resume(&hcd->self, wIndex); } bus_state->port_c_suspend |= 1 << wIndex; @@ -1469,15 +1492,18 @@ int xhci_bus_suspend(struct usb_hcd *hcd) unsigned long flags; struct xhci_hub *rhub; struct xhci_port **ports; + u32 portsc_buf[USB_MAXCHILDREN]; + bool wake_enabled; rhub = xhci_get_rhub(hcd); ports = rhub->ports; max_ports = rhub->num_ports; bus_state = &xhci->bus_state[hcd_index(hcd)]; + wake_enabled = hcd->self.root_hub->do_remote_wakeup; spin_lock_irqsave(&xhci->lock, flags); - if (hcd->self.root_hub->do_remote_wakeup) { + if (wake_enabled) { if (bus_state->resuming_ports || /* USB2 */ bus_state->port_remote_wakeup) { /* USB3 */ spin_unlock_irqrestore(&xhci->lock, flags); @@ -1485,26 +1511,42 @@ int xhci_bus_suspend(struct usb_hcd *hcd) return -EBUSY; } } - - port_index = max_ports; + /* + * Prepare ports for suspend, but don't write anything before all ports + * are checked and we know bus suspend can proceed + */ bus_state->bus_suspended = 0; + port_index = max_ports; while (port_index--) { - /* suspend the port if the port is not suspended */ u32 t1, t2; - int slot_id; - + int retries = 10; +retry: t1 = readl(ports[port_index]->addr); t2 = xhci_port_state_to_neutral(t1); + portsc_buf[port_index] = 0; - if ((t1 & PORT_PE) && !(t1 & PORT_PLS_MASK)) { - xhci_dbg(xhci, "port %d not suspended\n", port_index); - slot_id = xhci_find_slot_id_by_port(hcd, xhci, - port_index + 1); - if (slot_id) { + /* + * Give a USB3 port in link training time to finish, but don't + * prevent suspend as port might be stuck + */ + if ((hcd->speed >= HCD_USB3) && retries-- && + (t1 & PORT_PLS_MASK) == XDEV_POLLING) { + spin_unlock_irqrestore(&xhci->lock, flags); + msleep(XHCI_PORT_POLLING_LFPS_TIME); + spin_lock_irqsave(&xhci->lock, flags); + xhci_dbg(xhci, "port %d polling in bus suspend, waiting\n", + port_index); + goto retry; + } + /* suspend ports in U0, or bail out for new connect changes */ + if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) { + if ((t1 & PORT_CSC) && wake_enabled) { + bus_state->bus_suspended = 0; spin_unlock_irqrestore(&xhci->lock, flags); - xhci_stop_device(xhci, slot_id, 1); - spin_lock_irqsave(&xhci->lock, flags); + xhci_dbg(xhci, "Bus suspend bailout, port connect change\n"); + return -EBUSY; } + xhci_dbg(xhci, "port %d not suspended\n", port_index); t2 &= ~PORT_PLS_MASK; t2 |= PORT_LINK_STROBE | XDEV_U3; set_bit(port_index, &bus_state->bus_suspended); @@ -1513,7 +1555,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd) * including the USB 3.0 roothub, but only if CONFIG_PM * is enabled, so also enable remote wake here. */ - if (hcd->self.root_hub->do_remote_wakeup) { + if (wake_enabled) { if (t1 & PORT_CONNECT) { t2 |= PORT_WKOC_E | PORT_WKDISC_E; t2 &= ~PORT_WKCONN_E; @@ -1533,7 +1575,26 @@ int xhci_bus_suspend(struct usb_hcd *hcd) t1 = xhci_port_state_to_neutral(t1); if (t1 != t2) - writel(t2, ports[port_index]->addr); + portsc_buf[port_index] = t2; + } + + /* write port settings, stopping and suspending ports if needed */ + port_index = max_ports; + while (port_index--) { + if (!portsc_buf[port_index]) + continue; + if (test_bit(port_index, &bus_state->bus_suspended)) { + int slot_id; + + slot_id = xhci_find_slot_id_by_port(hcd, xhci, + port_index + 1); + if (slot_id) { + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_stop_device(xhci, slot_id, 1); + spin_lock_irqsave(&xhci->lock, flags); + } + } + writel(portsc_buf[port_index], ports[port_index]->addr); } hcd->state = HC_STATE_SUSPENDED; bus_state->next_statechange = jiffies + msecs_to_jiffies(10); diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index b1f27aa38b1008c0edc8f425e0caf4b4a2c06959..a6101f095db81bbcee04389d57984f0dfba1aa3b 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -1475,9 +1475,15 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, /* Allow 3 retries for everything but isoc, set CErr = 3 */ if (!usb_endpoint_xfer_isoc(&ep->desc)) err_count = 3; - /* Some devices get this wrong */ - if (usb_endpoint_xfer_bulk(&ep->desc) && udev->speed == USB_SPEED_HIGH) - max_packet = 512; + /* HS bulk max packet should be 512, FS bulk supports 8, 16, 32 or 64 */ + if (usb_endpoint_xfer_bulk(&ep->desc)) { + if (udev->speed == USB_SPEED_HIGH) + max_packet = 512; + if (udev->speed == USB_SPEED_FULL) { + max_packet = rounddown_pow_of_two(max_packet); + max_packet = clamp_val(max_packet, 8, 64); + } + } /* xHCI 1.0 and 1.1 indicates that ctrl ep avg TRB Length should be 8 */ if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100) avg_trb_len = 8; @@ -1913,6 +1919,10 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) kfree(xhci->hw_ports); kfree(xhci->rh_bw); kfree(xhci->ext_caps); + for (i = 0; i < xhci->num_port_caps; i++) + kfree(xhci->port_caps[i].psi); + kfree(xhci->port_caps); + xhci->num_port_caps = 0; xhci->usb2_rhub.ports = NULL; xhci->usb3_rhub.ports = NULL; @@ -2116,6 +2126,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, u8 major_revision, minor_revision; struct xhci_hub *rhub; struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + struct xhci_port_cap *port_cap; temp = readl(addr); major_revision = XHCI_EXT_PORT_MAJOR(temp); @@ -2150,31 +2161,39 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, /* WTF? "Valid values are ‘1’ to MaxPorts" */ return; - rhub->psi_count = XHCI_EXT_PORT_PSIC(temp); - if (rhub->psi_count) { - rhub->psi = kcalloc_node(rhub->psi_count, sizeof(*rhub->psi), - GFP_KERNEL, dev_to_node(dev)); - if (!rhub->psi) - rhub->psi_count = 0; + port_cap = &xhci->port_caps[xhci->num_port_caps++]; + if (xhci->num_port_caps > max_caps) + return; + + port_cap->maj_rev = major_revision; + port_cap->min_rev = minor_revision; + port_cap->psi_count = XHCI_EXT_PORT_PSIC(temp); + + if (port_cap->psi_count) { + port_cap->psi = kcalloc_node(port_cap->psi_count, + sizeof(*port_cap->psi), + GFP_KERNEL, dev_to_node(dev)); + if (!port_cap->psi) + port_cap->psi_count = 0; - rhub->psi_uid_count++; - for (i = 0; i < rhub->psi_count; i++) { - rhub->psi[i] = readl(addr + 4 + i); + port_cap->psi_uid_count++; + for (i = 0; i < port_cap->psi_count; i++) { + port_cap->psi[i] = readl(addr + 4 + i); /* count unique ID values, two consecutive entries can * have the same ID if link is assymetric */ - if (i && (XHCI_EXT_PORT_PSIV(rhub->psi[i]) != - XHCI_EXT_PORT_PSIV(rhub->psi[i - 1]))) - rhub->psi_uid_count++; + if (i && (XHCI_EXT_PORT_PSIV(port_cap->psi[i]) != + XHCI_EXT_PORT_PSIV(port_cap->psi[i - 1]))) + port_cap->psi_uid_count++; xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n", - XHCI_EXT_PORT_PSIV(rhub->psi[i]), - XHCI_EXT_PORT_PSIE(rhub->psi[i]), - XHCI_EXT_PORT_PLT(rhub->psi[i]), - XHCI_EXT_PORT_PFD(rhub->psi[i]), - XHCI_EXT_PORT_LP(rhub->psi[i]), - XHCI_EXT_PORT_PSIM(rhub->psi[i])); + XHCI_EXT_PORT_PSIV(port_cap->psi[i]), + XHCI_EXT_PORT_PSIE(port_cap->psi[i]), + XHCI_EXT_PORT_PLT(port_cap->psi[i]), + XHCI_EXT_PORT_PFD(port_cap->psi[i]), + XHCI_EXT_PORT_LP(port_cap->psi[i]), + XHCI_EXT_PORT_PSIM(port_cap->psi[i])); } } /* cache usb2 port capabilities */ @@ -2221,6 +2240,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, continue; } hw_port->rhub = rhub; + hw_port->port_cap = port_cap; rhub->num_ports++; } /* FIXME: Should we disable ports not in the Extended Capabilities? */ @@ -2311,6 +2331,11 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) if (!xhci->ext_caps) return -ENOMEM; + xhci->port_caps = kcalloc_node(cap_count, sizeof(*xhci->port_caps), + flags, dev_to_node(dev)); + if (!xhci->port_caps) + return -ENOMEM; + offset = cap_start; while (offset) { @@ -2425,8 +2450,15 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) * and our use of dma addresses in the trb_address_map radix tree needs * TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need. */ - xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, - TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size); + /* With xHCI TRB prefetch patch:To fix cross page boundry access issue + * in IOV environment */ + if (xhci->quirks & XHCI_ZHAOXIN_TRB_FETCH) { + xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, + TRB_SEGMENT_SIZE*2, TRB_SEGMENT_SIZE*2, xhci->page_size*2); + } else { + xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, + TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size); + } /* See Table 46 and Note on Figure 55 */ xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c index fa33d6e5b1cbd8aaab17c4995088d1b544530530..d04fdd173ed2eff9bd77740cf69e1fc847b8ac7b 100644 --- a/drivers/usb/host/xhci-mtk-sch.c +++ b/drivers/usb/host/xhci-mtk-sch.c @@ -113,7 +113,9 @@ static void setup_sch_info(struct usb_device *udev, } if (ep_type == ISOC_IN_EP || ep_type == ISOC_OUT_EP) { - if (esit_pkts <= sch_ep->esit) + if (sch_ep->esit == 1) + sch_ep->pkts = esit_pkts; + else if (esit_pkts <= sch_ep->esit) sch_ep->pkts = 1; else sch_ep->pkts = roundup_pow_of_two(esit_pkts) diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c index 71d0d33c3286254b0327646720161df6ba5cc1e9..60987c787e44f457d918659140a0671ed7a717e8 100644 --- a/drivers/usb/host/xhci-mtk.c +++ b/drivers/usb/host/xhci-mtk.c @@ -590,12 +590,14 @@ static int xhci_mtk_remove(struct platform_device *dev) struct xhci_hcd_mtk *mtk = platform_get_drvdata(dev); struct usb_hcd *hcd = mtk->hcd; struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct usb_hcd *shared_hcd = xhci->shared_hcd; - usb_remove_hcd(xhci->shared_hcd); + usb_remove_hcd(shared_hcd); + xhci->shared_hcd = NULL; device_init_wakeup(&dev->dev, false); usb_remove_hcd(hcd); - usb_put_hcd(xhci->shared_hcd); + usb_put_hcd(shared_hcd); usb_put_hcd(hcd); xhci_mtk_sch_exit(mtk); xhci_mtk_clks_disable(mtk); diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 51dd8e00c4f8ea20a53b48e38c2b0552e5cb3acc..4c2376a4c1d6b2eb0a5e14b9f08275fd06430ea0 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -41,6 +41,7 @@ #define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8 #define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8 #define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0 +#define PCI_DEVICE_ID_INTEL_CML_XHCI 0xa3af #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9 #define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba @@ -132,6 +133,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == 0x43bb)) xhci->quirks |= XHCI_SUSPEND_DELAY; + if (pdev->vendor == PCI_VENDOR_ID_AMD && + (pdev->device == 0x15e0 || pdev->device == 0x15e1)) + xhci->quirks |= XHCI_SNPS_BROKEN_SUSPEND; + if (pdev->vendor == PCI_VENDOR_ID_AMD) xhci->quirks |= XHCI_TRUST_TX_LENGTH; @@ -175,7 +180,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI || - pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) { + pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_CML_XHCI)) { xhci->quirks |= XHCI_PME_STUCK_QUIRK; } if (pdev->vendor == PCI_VENDOR_ID_INTEL && @@ -183,6 +189,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_SSIC_PORT_UNUSED; if (pdev->vendor == PCI_VENDOR_ID_INTEL && (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI)) xhci->quirks |= XHCI_INTEL_USB_ROLE_SW; if (pdev->vendor == PCI_VENDOR_ID_INTEL && @@ -211,12 +218,24 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) } if (pdev->vendor == PCI_VENDOR_ID_VIA) xhci->quirks |= XHCI_RESET_ON_RESUME; + if (pdev->vendor == PCI_VENDOR_ID_ZHAOXIN) { + xhci->quirks |= XHCI_LPM_SUPPORT; + xhci->quirks |= XHCI_ZHAOXIN_HOST; + } + + if (pdev->vendor == PCI_VENDOR_ID_ZHAOXIN) + xhci->quirks |= XHCI_SUSPEND_DELAY; /* See https://bugzilla.kernel.org/show_bug.cgi?id=79511 */ if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3432) xhci->quirks |= XHCI_BROKEN_STREAMS; + if (pdev->vendor == PCI_VENDOR_ID_ZHAOXIN && + (pdev->device == 0x9202 || + pdev->device == 0x9203)) + xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH; + if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == 0x1042) xhci->quirks |= XHCI_BROKEN_STREAMS; @@ -231,6 +250,14 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7; + if (pdev->vendor == PCI_VENDOR_ID_ZHAOXIN && pdev->device == 0x9202) + xhci->quirks |= XHCI_RESET_ON_RESUME; + + if ((pdev->vendor == PCI_VENDOR_ID_BROADCOM || + pdev->vendor == PCI_VENDOR_ID_CAVIUM) && + pdev->device == 0x9026) + xhci->quirks |= XHCI_RESET_PLL_ON_DISCONNECT; + if (xhci->quirks & XHCI_RESET_ON_RESUME) xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "QUIRK: Resetting on resume"); @@ -273,12 +300,29 @@ static int xhci_pci_setup(struct usb_hcd *hcd) if (!usb_hcd_is_primary_hcd(hcd)) return 0; + if (xhci->quirks & XHCI_PME_STUCK_QUIRK) + xhci_pme_acpi_rtd3_enable(pdev); + xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int) xhci->sbrn); /* Find any debug ports */ return xhci_pci_reinit(xhci, pdev); } +#ifdef CONFIG_ARM64 +#include +static void phytium_xhci_pci_workaround(struct pci_dev *dev) +{ + u32 midr = read_cpuid_id(); + + /* Firmware bug, DMA mask is not reported by the firmware */ + if ((midr & MIDR_CPU_MODEL_MASK) == MIDR_FT_2000PLUS) + dma_set_mask(&dev->dev, DMA_BIT_MASK(64)); +} +#else +static inline void phytium_xhci_pci_workaround(struct pci_dev *dev) { } +#endif + /* * We need to register our own PCI probe function (instead of the USB core's * function) in order to create a second roothub under xHCI. @@ -292,6 +336,8 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) driver = (struct hc_driver *)id->driver_data; + phytium_xhci_pci_workaround(dev); + /* Prevent runtime suspending between USB-2 and USB-3 initialization */ pm_runtime_get_noresume(&dev->dev); @@ -330,9 +376,6 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) HCC_MAX_PSA(xhci->hcc_params) >= 4) xhci->shared_hcd->can_do_streams = 1; - if (xhci->quirks & XHCI_PME_STUCK_QUIRK) - xhci_pme_acpi_rtd3_enable(dev); - /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */ pm_runtime_put_noidle(&dev->dev); @@ -356,6 +399,7 @@ static void xhci_pci_remove(struct pci_dev *dev) if (xhci->shared_hcd) { usb_remove_hcd(xhci->shared_hcd); usb_put_hcd(xhci->shared_hcd); + xhci->shared_hcd = NULL; } /* Workaround for spurious wakeups at shutdown with HSW */ @@ -482,6 +526,18 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated) retval = xhci_resume(xhci, hibernated); return retval; } + +static void xhci_pci_shutdown(struct usb_hcd *hcd) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct pci_dev *pdev = to_pci_dev(hcd->self.controller); + + xhci_shutdown(hcd); + + /* Yet another workaround for spurious wakeups at shutdown with HSW */ + if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) + pci_set_power_state(pdev, PCI_D3hot); +} #endif /* CONFIG_PM */ /*-------------------------------------------------------------------------*/ @@ -519,6 +575,7 @@ static int __init xhci_pci_init(void) #ifdef CONFIG_PM xhci_pci_hc_driver.pci_suspend = xhci_pci_suspend; xhci_pci_hc_driver.pci_resume = xhci_pci_resume; + xhci_pci_hc_driver.shutdown = xhci_pci_shutdown; #endif return pci_register_driver(&xhci_pci_driver); } diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 94e939249b2b6dc0f65fb197a0b084889a7fb153..e5da8ce629141177f1f6c382d6308714c8929c76 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -359,14 +359,16 @@ static int xhci_plat_remove(struct platform_device *dev) struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct clk *clk = xhci->clk; struct clk *reg_clk = xhci->reg_clk; + struct usb_hcd *shared_hcd = xhci->shared_hcd; xhci->xhc_state |= XHCI_STATE_REMOVING; - usb_remove_hcd(xhci->shared_hcd); + usb_remove_hcd(shared_hcd); + xhci->shared_hcd = NULL; usb_phy_shutdown(hcd->usb_phy); usb_remove_hcd(hcd); - usb_put_hcd(xhci->shared_hcd); + usb_put_hcd(shared_hcd); clk_disable_unprepare(clk); clk_disable_unprepare(reg_clk); diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c index a6e4637157799769cc0f77b6b2f815c7c4ad6490..2b0ccd150209fe3a1f55bb73d29a965fe1cd6f26 100644 --- a/drivers/usb/host/xhci-rcar.c +++ b/drivers/usb/host/xhci-rcar.c @@ -104,7 +104,7 @@ static int xhci_rcar_is_gen2(struct device *dev) return of_device_is_compatible(node, "renesas,xhci-r8a7790") || of_device_is_compatible(node, "renesas,xhci-r8a7791") || of_device_is_compatible(node, "renesas,xhci-r8a7793") || - of_device_is_compatible(node, "renensas,rcar-gen2-xhci"); + of_device_is_compatible(node, "renesas,rcar-gen2-xhci"); } static int xhci_rcar_is_gen3(struct device *dev) @@ -238,14 +238,20 @@ int xhci_rcar_init_quirk(struct usb_hcd *hcd) * pointers. So, this driver clears the AC64 bit of xhci->hcc_params * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in * xhci_gen_setup(). + * + * And, since the firmware/internal CPU control the USBSTS.STS_HALT + * and the process speed is down when the roothub port enters U3, + * long delay for the handshake of STS_HALT is neeed in xhci_suspend(). */ if (xhci_rcar_is_gen2(hcd->self.controller) || - xhci_rcar_is_gen3(hcd->self.controller)) - xhci->quirks |= XHCI_NO_64BIT_SUPPORT; + xhci_rcar_is_gen3(hcd->self.controller)) { + xhci->quirks |= XHCI_NO_64BIT_SUPPORT | XHCI_SLOW_SUSPEND; + } if (!xhci_rcar_wait_for_pll_active(hcd)) return -ETIMEDOUT; + xhci->quirks |= XHCI_TRUST_TX_LENGTH; return xhci_rcar_download_firmware(hcd); } diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index f0a99aa0ac586c4cedb0724d26493b81b2a0b752..6658a352e82ffef2f8aff3330c6b9e3842285721 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -339,16 +339,29 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci, /* Must be called with xhci->lock held, releases and aquires lock back */ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags) { - u64 temp_64; + struct xhci_segment *new_seg = xhci->cmd_ring->deq_seg; + union xhci_trb *new_deq = xhci->cmd_ring->dequeue; + u64 crcr; int ret; xhci_dbg(xhci, "Abort command ring\n"); reinit_completion(&xhci->cmd_ring_stop_completion); - temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); - xhci_write_64(xhci, temp_64 | CMD_RING_ABORT, - &xhci->op_regs->cmd_ring); + /* + * The control bits like command stop, abort are located in lower + * dword of the command ring control register. + * Some controllers require all 64 bits to be written to abort the ring. + * Make sure the upper dword is valid, pointing to the next command, + * avoiding corrupting the command ring pointer in case the command ring + * is stopped by the time the upper dword is written. + */ + next_trb(xhci, NULL, &new_seg, &new_deq); + if (trb_is_link(new_deq)) + next_trb(xhci, NULL, &new_seg, &new_deq); + + crcr = xhci_trb_virt_to_dma(new_seg, new_deq); + xhci_write_64(xhci, crcr | CMD_RING_ABORT, &xhci->op_regs->cmd_ring); /* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the * completion of the Command Abort operation. If CRR is not negated in 5 @@ -656,6 +669,7 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, struct device *dev = xhci_to_hcd(xhci)->self.controller; struct xhci_segment *seg = td->bounce_seg; struct urb *urb = td->urb; + size_t len; if (!ring || !seg || !urb) return; @@ -666,11 +680,14 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, return; } - /* for in tranfers we need to copy the data from bounce to sg */ - sg_pcopy_from_buffer(urb->sg, urb->num_mapped_sgs, seg->bounce_buf, - seg->bounce_len, seg->bounce_offs); dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, DMA_FROM_DEVICE); + /* for in tranfers we need to copy the data from bounce to sg */ + len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, + seg->bounce_len, seg->bounce_offs); + if (len != seg->bounce_len) + xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n", + len, seg->bounce_len); seg->bounce_len = 0; seg->bounce_offs = 0; } @@ -1181,7 +1198,6 @@ static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) if (xhci->quirks & XHCI_EP_LIMIT_QUIRK) /* Delete default control endpoint resources */ xhci_free_device_endpoint_resources(xhci, virt_dev, true); - xhci_free_virt_device(xhci, slot_id); } static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id, @@ -1517,6 +1533,35 @@ static void handle_device_notification(struct xhci_hcd *xhci, usb_wakeup_notification(udev->parent, udev->portnum); } +/* + * Quirk hanlder for errata seen on Cavium ThunderX2 processor XHCI + * Controller. + * As per ThunderX2errata-129 USB 2 device may come up as USB 1 + * If a connection to a USB 1 device is followed by another connection + * to a USB 2 device. + * + * Reset the PHY after the USB device is disconnected if device speed + * is less than HCD_USB3. + * Retry the reset sequence max of 4 times checking the PLL lock status. + * + */ +static void xhci_cavium_reset_phy_quirk(struct xhci_hcd *xhci) +{ + struct usb_hcd *hcd = xhci_to_hcd(xhci); + u32 pll_lock_check; + u32 retry_count = 4; + + do { + /* Assert PHY reset */ + writel(0x6F, hcd->regs + 0x1048); + udelay(10); + /* De-assert the PHY reset */ + writel(0x7F, hcd->regs + 0x1048); + udelay(200); + pll_lock_check = readl(hcd->regs + 0x1070); + } while (!(pll_lock_check & 0x1) && --retry_count); +} + static void handle_port_status(struct xhci_hcd *xhci, union xhci_trb *event) { @@ -1552,6 +1597,13 @@ static void handle_port_status(struct xhci_hcd *xhci, goto cleanup; } + /* We might get interrupts after shared_hcd is removed */ + if (port->rhub == &xhci->usb3_rhub && xhci->shared_hcd == NULL) { + xhci_dbg(xhci, "ignore port event for removed USB3 hcd\n"); + bogus_port_status = true; + goto cleanup; + } + hcd = port->rhub->hcd; bus_state = &xhci->bus_state[hcd_index(hcd)]; hcd_portnum = port->hcd_portnum; @@ -1564,8 +1616,12 @@ static void handle_port_status(struct xhci_hcd *xhci, usb_hcd_resume_root_hub(hcd); } - if (hcd->speed >= HCD_USB3 && (portsc & PORT_PLS_MASK) == XDEV_INACTIVE) - bus_state->port_remote_wakeup &= ~(1 << hcd_portnum); + if (hcd->speed >= HCD_USB3 && + (portsc & PORT_PLS_MASK) == XDEV_INACTIVE) { + slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1); + if (slot_id && xhci->devs[slot_id]) + xhci->devs[slot_id]->flags |= VDEV_PORT_ERROR; + } if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) { xhci_dbg(xhci, "port resume event for port %d\n", port_id); @@ -1585,6 +1641,7 @@ static void handle_port_status(struct xhci_hcd *xhci, bus_state->port_remote_wakeup |= 1 << hcd_portnum; xhci_test_and_clear_bit(xhci, port, PORT_PLC); xhci_set_link_state(xhci, port, XDEV_U0); + usb_hcd_start_port_resume(&hcd->self, hcd_portnum); /* Need to wait until the next link state change * indicates the device is actually in U0. */ @@ -1602,14 +1659,18 @@ static void handle_port_status(struct xhci_hcd *xhci, set_bit(HCD_FLAG_POLL_RH, &hcd->flags); mod_timer(&hcd->rh_timer, bus_state->resume_done[hcd_portnum]); + usb_hcd_start_port_resume(&hcd->self, hcd_portnum); bogus_port_status = true; } } - if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_U0 && - DEV_SUPERSPEED_ANY(portsc)) { + if ((portsc & PORT_PLC) && + DEV_SUPERSPEED_ANY(portsc) && + ((portsc & PORT_PLS_MASK) == XDEV_U0 || + (portsc & PORT_PLS_MASK) == XDEV_U1 || + (portsc & PORT_PLS_MASK) == XDEV_U2)) { xhci_dbg(xhci, "resume SS port %d finished\n", port_id); - /* We've just brought the device into U0 through either the + /* We've just brought the device into U0/1/2 through either the * Resume state after a device remote wakeup, or through the * U3Exit state after a host-initiated resume. If it's a device * initiated remote wake, don't pass up the link state change, @@ -1620,7 +1681,6 @@ static void handle_port_status(struct xhci_hcd *xhci, if (slot_id && xhci->devs[slot_id]) xhci_ring_device(xhci, slot_id); if (bus_state->port_remote_wakeup & (1 << hcd_portnum)) { - bus_state->port_remote_wakeup &= ~(1 << hcd_portnum); xhci_test_and_clear_bit(xhci, port, PORT_PLC); usb_wakeup_notification(hcd->self.root_hub, hcd_portnum + 1); @@ -1634,7 +1694,7 @@ static void handle_port_status(struct xhci_hcd *xhci, * RExit to a disconnect state). If so, let the the driver know it's * out of the RExit state. */ - if (!DEV_SUPERSPEED_ANY(portsc) && + if (!DEV_SUPERSPEED_ANY(portsc) && hcd->speed < HCD_USB3 && test_and_clear_bit(hcd_portnum, &bus_state->rexit_ports)) { complete(&bus_state->rexit_done[hcd_portnum]); @@ -1642,8 +1702,12 @@ static void handle_port_status(struct xhci_hcd *xhci, goto cleanup; } - if (hcd->speed < HCD_USB3) + if (hcd->speed < HCD_USB3) { xhci_test_and_clear_bit(xhci, port, PORT_PLC); + if ((xhci->quirks & XHCI_RESET_PLL_ON_DISCONNECT) && + (portsc & PORT_CSC) && !(portsc & PORT_CONNECT)) + xhci_cavium_reset_phy_quirk(xhci); + } cleanup: /* Update event ring dequeue pointer before dropping the lock */ @@ -1745,6 +1809,14 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, { struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; struct xhci_command *command; + + /* + * Avoid resetting endpoint if link is inactive. Can cause host hang. + * Device will be reset soon to recover the link so don't do anything + */ + if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) + return; + command = xhci_alloc_command(xhci, false, GFP_ATOMIC); if (!command) return; @@ -2246,6 +2318,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, goto cleanup; case COMP_RING_UNDERRUN: case COMP_RING_OVERRUN: + case COMP_STOPPED_LENGTH_INVALID: goto cleanup; default: xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n", @@ -2268,7 +2341,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, case COMP_SUCCESS: if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) break; - if (xhci->quirks & XHCI_TRUST_TX_LENGTH) + if (xhci->quirks & XHCI_TRUST_TX_LENGTH || + ep_ring->last_td_was_short) trb_comp_code = COMP_SHORT_PACKET; else xhci_warn_ratelimited(xhci, @@ -3059,6 +3133,7 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, unsigned int unalign; unsigned int max_pkt; u32 new_buff_len; + size_t len; max_pkt = usb_endpoint_maxp(&urb->ep->desc); unalign = (enqd_len + *trb_buff_len) % max_pkt; @@ -3089,8 +3164,12 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, /* create a max max_pkt sized bounce buffer pointed to by last trb */ if (usb_urb_dir_out(urb)) { - sg_pcopy_to_buffer(urb->sg, urb->num_mapped_sgs, + len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, new_buff_len, enqd_len); + if (len != new_buff_len) + xhci_warn(xhci, + "WARN Wrong bounce buffer write length: %zu != %d\n", + len, new_buff_len); seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, max_pkt, DMA_TO_DEVICE); } else { diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c index 4b463e5202a421705be74610a136239dc3a9c423..fe37dacc695fcb0dfb15f47078c8847902feaeb1 100644 --- a/drivers/usb/host/xhci-tegra.c +++ b/drivers/usb/host/xhci-tegra.c @@ -1148,6 +1148,16 @@ static int tegra_xusb_probe(struct platform_device *pdev) tegra_xusb_ipfs_config(tegra, regs); + /* + * The XUSB Falcon microcontroller can only address 40 bits, so set + * the DMA mask accordingly. + */ + err = dma_set_mask_and_coherent(tegra->dev, DMA_BIT_MASK(40)); + if (err < 0) { + dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err); + goto put_rpm; + } + err = tegra_xusb_load_firmware(tegra); if (err < 0) { dev_err(&pdev->dev, "failed to load firmware: %d\n", err); @@ -1240,6 +1250,7 @@ static int tegra_xusb_remove(struct platform_device *pdev) usb_remove_hcd(xhci->shared_hcd); usb_put_hcd(xhci->shared_hcd); + xhci->shared_hcd = NULL; usb_remove_hcd(tegra->hcd); usb_put_hcd(tegra->hcd); diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 0420eefa647a15cb5321dfa5fd95556a5a5432e5..22e47392f16cd8b28785abdf7f2cacfa3ec24078 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -9,6 +9,7 @@ */ #include +#include #include #include #include @@ -52,7 +53,6 @@ static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring) return false; } -/* TODO: copied from ehci-hcd.c - can this be refactored? */ /* * xhci_handshake - spin reading hc until handshake completes or fails * @ptr: address of hc register to be read @@ -69,18 +69,16 @@ static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring) int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec) { u32 result; + int ret; - do { - result = readl(ptr); - if (result == ~(u32)0) /* card removed */ - return -ENODEV; - result &= mask; - if (result == done) - return 0; - udelay(1); - usec--; - } while (usec > 0); - return -ETIMEDOUT; + ret = readl_poll_timeout_atomic(ptr, result, + (result & mask) == done || + result == U32_MAX, + 1, usec); + if (result == U32_MAX) /* card removed */ + return -ENODEV; + + return ret; } /* @@ -719,8 +717,6 @@ static void xhci_stop(struct usb_hcd *hcd) /* Only halt host and free memory after both hcds are removed */ if (!usb_hcd_is_primary_hcd(hcd)) { - /* usb core will free this hcd shortly, unset pointer */ - xhci->shared_hcd = NULL; mutex_unlock(&xhci->mutex); return; } @@ -773,7 +769,7 @@ static void xhci_stop(struct usb_hcd *hcd) * * This will only ever be called with the main usb_hcd (the USB3 roothub). */ -static void xhci_shutdown(struct usb_hcd *hcd) +void xhci_shutdown(struct usb_hcd *hcd) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); @@ -792,11 +788,8 @@ static void xhci_shutdown(struct usb_hcd *hcd) xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_shutdown completed - status = %x", readl(&xhci->op_regs->status)); - - /* Yet another workaround for spurious wakeups at shutdown with HSW */ - if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) - pci_set_power_state(to_pci_dev(hcd->self.sysdev), PCI_D3hot); } +EXPORT_SYMBOL_GPL(xhci_shutdown); #ifdef CONFIG_PM static void xhci_save_registers(struct xhci_hcd *xhci) @@ -967,9 +960,10 @@ static bool xhci_pending_portevent(struct xhci_hcd *xhci) int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) { int rc = 0; - unsigned int delay = XHCI_MAX_HALT_USEC; + unsigned int delay = XHCI_MAX_HALT_USEC * 2; struct usb_hcd *hcd = xhci_to_hcd(xhci); u32 command; + u32 res; if (!hcd->state) return 0; @@ -1023,11 +1017,28 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) command = readl(&xhci->op_regs->command); command |= CMD_CSS; writel(command, &xhci->op_regs->command); + xhci->broken_suspend = 0; if (xhci_handshake(&xhci->op_regs->status, - STS_SAVE, 0, 10 * 1000)) { - xhci_warn(xhci, "WARN: xHC save state timeout\n"); - spin_unlock_irq(&xhci->lock); - return -ETIMEDOUT; + STS_SAVE, 0, 20 * 1000)) { + /* + * AMD SNPS xHC 3.0 occasionally does not clear the + * SSS bit of USBSTS and when driver tries to poll + * to see if the xHC clears BIT(8) which never happens + * and driver assumes that controller is not responding + * and times out. To workaround this, its good to check + * if SRE and HCE bits are not set (as per xhci + * Section 5.4.2) and bypass the timeout. + */ + res = readl(&xhci->op_regs->status); + if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) && + (((res & STS_SRE) == 0) && + ((res & STS_HCE) == 0))) { + xhci->broken_suspend = 1; + } else { + xhci_warn(xhci, "WARN: xHC save state timeout\n"); + spin_unlock_irq(&xhci->lock); + return -ETIMEDOUT; + } } spin_unlock_irq(&xhci->lock); @@ -1080,10 +1091,22 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); spin_lock_irq(&xhci->lock); - if (xhci->quirks & XHCI_RESET_ON_RESUME) + if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend) hibernated = true; if (!hibernated) { + /* + * Some controllers might lose power during suspend, so wait + * for controller not ready bit to clear, just as in xHC init. + */ + retval = xhci_handshake(&xhci->op_regs->status, + STS_CNR, 0, 10 * 1000 * 1000); + if (retval) { + xhci_warn(xhci, "Controller not ready at resume %d\n", + retval); + spin_unlock_irq(&xhci->lock); + return retval; + } /* step 1: restore register */ xhci_restore_registers(xhci); /* step 2: initialize command ring buffer */ @@ -1427,6 +1450,10 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag xhci_dbg(xhci, "urb submitted during PCI suspend\n"); return -ESHUTDOWN; } + if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) { + xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n"); + return -ENODEV; + } if (usb_endpoint_xfer_isoc(&urb->ep->desc)) num_tds = urb->number_of_packets; @@ -2721,7 +2748,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, xhci->num_active_eps); return -ENOMEM; } - if ((xhci->quirks & XHCI_SW_BW_CHECKING) && + if ((xhci->quirks & XHCI_SW_BW_CHECKING) && !ctx_change && xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) { if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) xhci_free_host_resources(xhci, ctrl_ctx); @@ -3035,6 +3062,7 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd, unsigned int ep_index; unsigned long flags; u32 ep_flag; + int err; xhci = hcd_to_xhci(hcd); if (!host_ep->hcpriv) @@ -3084,7 +3112,17 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd, xhci_free_command(xhci, cfg_cmd); goto cleanup; } - xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, ep_index, 0); + + err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, + ep_index, 0); + if (err < 0) { + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_free_command(xhci, cfg_cmd); + xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ", + __func__, err); + goto cleanup; + } + xhci_ring_cmd_db(xhci); spin_unlock_irqrestore(&xhci->lock, flags); @@ -3098,8 +3136,16 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd, ctrl_ctx, ep_flag, ep_flag); xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index); - xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma, + err = xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma, udev->slot_id, false); + if (err < 0) { + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_free_command(xhci, cfg_cmd); + xhci_dbg(xhci, "%s: Failed to queue config ep command, %d ", + __func__, err); + goto cleanup; + } + xhci_ring_cmd_db(xhci); spin_unlock_irqrestore(&xhci->lock, flags); @@ -3710,6 +3756,7 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd, } /* If necessary, update the number of active TTs on this root port */ xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); + virt_dev->flags = 0; ret = 0; command_cleanup: @@ -3727,6 +3774,7 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct xhci_virt_device *virt_dev; struct xhci_slot_ctx *slot_ctx; + unsigned long flags; int i, ret; #ifndef CONFIG_USB_DEFAULT_PERSIST @@ -3755,11 +3803,13 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING; del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); } - xhci_debugfs_remove_slot(xhci, udev->slot_id); virt_dev->udev = NULL; - ret = xhci_disable_slot(xhci, udev->slot_id); - if (ret) - xhci_free_virt_device(xhci, udev->slot_id); + xhci_disable_slot(xhci, udev->slot_id); + + spin_lock_irqsave(&xhci->lock, flags); + xhci_free_virt_device(xhci, udev->slot_id); + spin_unlock_irqrestore(&xhci->lock, flags); + } int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) @@ -3769,10 +3819,12 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) u32 state; int ret = 0; - command = xhci_alloc_command(xhci, false, GFP_KERNEL); + command = xhci_alloc_command(xhci, true, GFP_KERNEL); if (!command) return -ENOMEM; + xhci_debugfs_remove_slot(xhci, slot_id); + spin_lock_irqsave(&xhci->lock, flags); /* Don't disable the slot if the host controller is dead. */ state = readl(&xhci->op_regs->status); @@ -3792,6 +3844,15 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) } xhci_ring_cmd_db(xhci); spin_unlock_irqrestore(&xhci->lock, flags); + + wait_for_completion(command->completion); + + if (command->status != COMP_SUCCESS) + xhci_warn(xhci, "Unsuccessful disable slot %u command, status %d\n", + slot_id, command->status); + + xhci_free_command(xhci, command); + return ret; } @@ -3902,9 +3963,8 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) return 1; disable_slot: - ret = xhci_disable_slot(xhci, udev->slot_id); - if (ret) - xhci_free_virt_device(xhci, udev->slot_id); + xhci_disable_slot(xhci, udev->slot_id); + xhci_free_virt_device(xhci, udev->slot_id); return 0; } @@ -4033,8 +4093,11 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, mutex_unlock(&xhci->mutex); ret = xhci_disable_slot(xhci, udev->slot_id); - if (!ret) - xhci_alloc_dev(hcd, udev); + xhci_free_virt_device(xhci, udev->slot_id); + if (!ret) { + if (xhci_alloc_dev(hcd, udev) == 1) + xhci_setup_addressable_virt_dev(xhci, udev); + } kfree(command->completion); kfree(command); return -EPROTO; @@ -4273,7 +4336,6 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, pm_addr = ports[port_num]->addr + PORTPMSC; pm_val = readl(pm_addr); hlpm_addr = ports[port_num]->addr + PORTHLPMC; - field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n", enable ? "enable" : "disable", port_num + 1); @@ -4285,6 +4347,7 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, * default one which works with mixed HIRD and BESL * systems. See XHCI_DEFAULT_BESL definition in xhci.h */ + field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); if ((field & USB_BESL_SUPPORT) && (field & USB_BESL_BASELINE_VALID)) hird = USB_GET_BESL_BASELINE(field); @@ -4498,7 +4561,15 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci, { unsigned long long timeout_ns; - if (xhci->quirks & XHCI_INTEL_HOST) + /* Prevent U1 if service interval is shorter than U1 exit latency */ + if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) { + if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) { + dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n"); + return USB3_LPM_DISABLED; + } + } + + if (xhci->quirks & (XHCI_INTEL_HOST | XHCI_ZHAOXIN_HOST)) timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc); else timeout_ns = udev->u1_params.sel; @@ -4554,7 +4625,15 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci, { unsigned long long timeout_ns; - if (xhci->quirks & XHCI_INTEL_HOST) + /* Prevent U2 if service interval is shorter than U2 exit latency */ + if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) { + if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) { + dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n"); + return USB3_LPM_DISABLED; + } + } + + if (xhci->quirks & (XHCI_INTEL_HOST | XHCI_ZHAOXIN_HOST)) timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc); else timeout_ns = udev->u2_params.sel; @@ -4596,12 +4675,12 @@ static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci, alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev, desc, state, timeout); - /* If we found we can't enable hub-initiated LPM, or + /* If we found we can't enable hub-initiated LPM, and * the U1 or U2 exit latency was too high to allow - * device-initiated LPM as well, just stop searching. + * device-initiated LPM as well, then we will disable LPM + * for this device, so stop searching any further. */ - if (alt_timeout == USB3_LPM_DISABLED || - alt_timeout == USB3_LPM_DEVICE_INITIATED) { + if (alt_timeout == USB3_LPM_DISABLED) { *timeout = alt_timeout; return -E2BIG; } @@ -4651,12 +4730,42 @@ static int xhci_check_intel_tier_policy(struct usb_device *udev, return -E2BIG; } +static int xhci_check_zhaoxin_tier_policy(struct usb_device *udev, + enum usb3_link_state state) +{ + struct usb_device *parent; + unsigned int num_hubs; + char *state_name; + + if (state == USB3_LPM_U1) + state_name = "U1"; + else if (state == USB3_LPM_U2) + state_name = "U2"; + else + state_name = "Unknown"; + /* Don't enable U1/U2 if the device is on an external hub*/ + for (parent = udev->parent, num_hubs = 0; parent->parent; + parent = parent->parent) + num_hubs++; + + if (num_hubs < 1) + return 0; + + dev_dbg(&udev->dev, "Disabling %s link state for device" \ + " below external hub.\n", state_name); + dev_dbg(&udev->dev, "Plug device into root port " \ + "to decrease power consumption.\n"); + return -E2BIG; +} + static int xhci_check_tier_policy(struct xhci_hcd *xhci, struct usb_device *udev, enum usb3_link_state state) { if (xhci->quirks & XHCI_INTEL_HOST) return xhci_check_intel_tier_policy(udev, state); + else if (xhci->quirks & XHCI_ZHAOXIN_HOST) + return xhci_check_zhaoxin_tier_policy(udev, state); else return 0; } @@ -4712,10 +4821,12 @@ static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd, if (intf->dev.driver) { driver = to_usb_driver(intf->dev.driver); if (driver && driver->disable_hub_initiated_lpm) { - dev_dbg(&udev->dev, "Hub-initiated %s disabled " - "at request of driver %s\n", - state_name, driver->name); - return xhci_get_timeout_no_hub_lpm(udev, state); + dev_dbg(&udev->dev, "Hub-initiated %s disabled at request of driver %s\n", + state_name, driver->name); + timeout = xhci_get_timeout_no_hub_lpm(udev, + state); + if (timeout == USB3_LPM_DISABLED) + return timeout; } } @@ -4970,6 +5081,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) */ struct device *dev = hcd->self.sysdev; unsigned int minor_rev; + u8 i, j; int retval; /* Accept arbitrarily long scatter-gather lists */ @@ -4999,17 +5111,52 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) hcd->has_tt = 1; } else { /* - * Some 3.1 hosts return sbrn 0x30, use xhci supported protocol - * minor revision instead of sbrn + * Early xHCI 1.1 spec did not mention USB 3.1 capable hosts + * should return 0x31 for sbrn, or that the minor revision + * is a two digit BCD containig minor and sub-minor numbers. + * This was later clarified in xHCI 1.2. + * + * Some USB 3.1 capable hosts therefore have sbrn 0x30, and + * minor revision set to 0x1 instead of 0x10. */ - minor_rev = xhci->usb3_rhub.min_rev; - if (minor_rev) { + if (xhci->usb3_rhub.min_rev == 0x1) + minor_rev = 1; + else + minor_rev = xhci->usb3_rhub.min_rev / 0x10; + + switch (minor_rev) { + case 2: + hcd->speed = HCD_USB32; + hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; + hcd->self.root_hub->rx_lanes = 2; + hcd->self.root_hub->tx_lanes = 2; + break; + case 1: hcd->speed = HCD_USB31; hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; + break; } - xhci_info(xhci, "Host supports USB 3.%x %s SuperSpeed\n", + + /* usb3.1 has gen1 and gen2, Some zx's xHCI controller that follow usb3.1 spec + * but only support gen1 + */ + if (xhci->quirks & XHCI_ZHAOXIN_HOST) { + minor_rev = 0; + for (j = 0; j < xhci->num_port_caps; j++) { + for (i = 0; i < xhci->port_caps[j].psi_count; i++) { + if (XHCI_EXT_PORT_PSIV(xhci->port_caps[j].psi[i]) >= 5) + minor_rev = 1; + } + } + if (minor_rev != 1) { + hcd->speed = HCD_USB3; + hcd->self.root_hub->speed = USB_SPEED_SUPER; + } + } + + xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n", minor_rev, - minor_rev ? "Enhanced" : ""); + minor_rev ? "Enhanced " : ""); xhci->usb3_rhub.hcd = hcd; /* xHCI private pointer was set in xhci_pci_probe for the second diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 6230a578324cf722108ce292620a073038a33343..485ba36b566caff7d33a82903fe00bd2df95c47c 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -452,6 +452,14 @@ struct xhci_op_regs { */ #define XHCI_DEFAULT_BESL 4 +/* + * USB3 specification define a 360ms tPollingLFPSTiemout for USB3 ports + * to complete link training. usually link trainig completes much faster + * so check status 10 times with 36ms sleep in places we need to wait for + * polling to complete. + */ +#define XHCI_PORT_POLLING_LFPS_TIME 36 + /** * struct xhci_intr_reg - Interrupt Register Set * @irq_pending: IMAN - Interrupt Management Register. Used to enable @@ -1002,6 +1010,15 @@ struct xhci_virt_device { u8 real_port; struct xhci_interval_bw_table *bw_table; struct xhci_tt_bw_info *tt_info; + /* + * flags for state tracking based on events and issued commands. + * Software can not rely on states from output contexts because of + * latency between events and xHC updating output context values. + * See xhci 1.1 section 4.8.3 for more details + */ + unsigned long flags; +#define VDEV_PORT_ERROR BIT(0) /* Port error, link inactive */ + /* The current max exit latency for the enabled USB3 link states. */ u16 current_mel; /* Used for the debugfs interfaces. */ @@ -1678,7 +1695,7 @@ struct xhci_bus_state { * It can take up to 20 ms to transition from RExit to U0 on the * Intel Lynx Point LP xHCI host. */ -#define XHCI_MAX_REXIT_TIMEOUT (20 * 1000) +#define XHCI_MAX_REXIT_TIMEOUT_MS 20 static inline unsigned int hcd_index(struct usb_hcd *hcd) { @@ -1687,11 +1704,21 @@ static inline unsigned int hcd_index(struct usb_hcd *hcd) else return 1; } + +struct xhci_port_cap { + u32 *psi; /* array of protocol speed ID entries */ + u8 psi_count; + u8 psi_uid_count; + u8 maj_rev; + u8 min_rev; +}; + struct xhci_port { __le32 __iomem *addr; int hw_portnum; int hcd_portnum; struct xhci_hub *rhub; + struct xhci_port_cap *port_cap; }; struct xhci_hub { @@ -1701,9 +1728,6 @@ struct xhci_hub { /* supported prococol extended capabiliy values */ u8 maj_rev; u8 min_rev; - u32 *psi; /* array of protocol speed ID entries */ - u8 psi_count; - u8 psi_uid_count; }; /* There is one xhci_hcd structure per controller */ @@ -1846,6 +1870,10 @@ struct xhci_hcd { #define XHCI_SUSPEND_DELAY BIT_ULL(30) #define XHCI_INTEL_USB_ROLE_SW BIT_ULL(31) #define XHCI_ZERO_64B_REGS BIT_ULL(32) +#define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34) +#define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35) +#define XHCI_ZHAOXIN_HOST BIT_ULL(36) +#define XHCI_ZHAOXIN_TRB_FETCH BIT_ULL(39) unsigned int num_active_eps; unsigned int limit_active_eps; @@ -1858,9 +1886,14 @@ struct xhci_hcd { unsigned sw_lpm_support:1; /* support xHCI 1.0 spec USB2 hardware LPM */ unsigned hw_lpm_support:1; + /* Broken Suspend flag for SNPS Suspend resume issue */ + unsigned broken_suspend:1; /* cached usb2 extened protocol capabilites */ u32 *ext_caps; unsigned int num_ext_caps; + /* cached extended protocol port capabilities */ + struct xhci_port_cap *port_caps; + unsigned int num_port_caps; /* Compliance Mode Recovery Data */ struct timer_list comp_mode_recovery_timer; u32 port_status_u0; @@ -2031,6 +2064,7 @@ int xhci_start(struct xhci_hcd *xhci); int xhci_reset(struct xhci_hcd *xhci); int xhci_run(struct usb_hcd *hcd); int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks); +void xhci_shutdown(struct usb_hcd *hcd); void xhci_init_driver(struct hc_driver *drv, const struct xhci_driver_overrides *over); int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id); diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c index 9f2f563c82ed161912683091a93d2752219c714a..addbb47a50333afc4d48c45c67d645d8699d7ef9 100644 --- a/drivers/usb/image/microtek.c +++ b/drivers/usb/image/microtek.c @@ -721,6 +721,10 @@ static int mts_usb_probe(struct usb_interface *intf, } + if (ep_in_current != &ep_in_set[2]) { + MTS_WARNING("couldn't find two input bulk endpoints. Bailing out.\n"); + return -ENODEV; + } if ( ep_out == -1 ) { MTS_WARNING( "couldn't find an output bulk endpoint. Bailing out.\n" ); diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig index 68d2f2cd17dda953d2efcca2a99c5083a2213c89..2e3fc63619b7f8b93a126281aac9e59eede88adb 100644 --- a/drivers/usb/misc/Kconfig +++ b/drivers/usb/misc/Kconfig @@ -46,16 +46,6 @@ config USB_SEVSEG To compile this driver as a module, choose M here: the module will be called usbsevseg. -config USB_RIO500 - tristate "USB Diamond Rio500 support" - help - Say Y here if you want to connect a USB Rio500 mp3 player to your - computer's USB port. Please read - for more information. - - To compile this driver as a module, choose M here: the - module will be called rio500. - config USB_LEGOTOWER tristate "USB Lego Infrared Tower support" help diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile index 109f54f5b9aa4b8ecc69c9f301e6ff3d668cfd93..0d416eb624bbe12247493afa40fe60242086e0d7 100644 --- a/drivers/usb/misc/Makefile +++ b/drivers/usb/misc/Makefile @@ -17,7 +17,6 @@ obj-$(CONFIG_USB_ISIGHTFW) += isight_firmware.o obj-$(CONFIG_USB_LCD) += usblcd.o obj-$(CONFIG_USB_LD) += ldusb.o obj-$(CONFIG_USB_LEGOTOWER) += legousbtower.o -obj-$(CONFIG_USB_RIO500) += rio500.o obj-$(CONFIG_USB_TEST) += usbtest.o obj-$(CONFIG_USB_EHSET_TEST_FIXTURE) += ehset.o obj-$(CONFIG_USB_TRANCEVIBRATOR) += trancevibrator.o diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c index 9465fb95d70af83dd2e06a42a4248b5198f6587d..b8073f36ffdc6cff2d5cf7a47771bbeec7afdcb7 100644 --- a/drivers/usb/misc/adutux.c +++ b/drivers/usb/misc/adutux.c @@ -75,6 +75,7 @@ struct adu_device { char serial_number[8]; int open_count; /* number of times this port has been opened */ + unsigned long disconnected:1; char *read_buffer_primary; int read_buffer_length; @@ -116,7 +117,7 @@ static void adu_abort_transfers(struct adu_device *dev) { unsigned long flags; - if (dev->udev == NULL) + if (dev->disconnected) return; /* shutdown transfer */ @@ -148,6 +149,7 @@ static void adu_delete(struct adu_device *dev) kfree(dev->read_buffer_secondary); kfree(dev->interrupt_in_buffer); kfree(dev->interrupt_out_buffer); + usb_put_dev(dev->udev); kfree(dev); } @@ -243,7 +245,7 @@ static int adu_open(struct inode *inode, struct file *file) } dev = usb_get_intfdata(interface); - if (!dev || !dev->udev) { + if (!dev) { retval = -ENODEV; goto exit_no_device; } @@ -326,7 +328,7 @@ static int adu_release(struct inode *inode, struct file *file) } adu_release_internal(dev); - if (dev->udev == NULL) { + if (dev->disconnected) { /* the device was unplugged before the file was released */ if (!dev->open_count) /* ... and we're the last user */ adu_delete(dev); @@ -355,7 +357,7 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count, return -ERESTARTSYS; /* verify that the device wasn't unplugged */ - if (dev->udev == NULL) { + if (dev->disconnected) { retval = -ENODEV; pr_err("No device or device unplugged %d\n", retval); goto exit; @@ -520,7 +522,7 @@ static ssize_t adu_write(struct file *file, const __user char *buffer, goto exit_nolock; /* verify that the device wasn't unplugged */ - if (dev->udev == NULL) { + if (dev->disconnected) { retval = -ENODEV; pr_err("No device or device unplugged %d\n", retval); goto exit; @@ -665,11 +667,11 @@ static int adu_probe(struct usb_interface *interface, mutex_init(&dev->mtx); spin_lock_init(&dev->buflock); - dev->udev = udev; + dev->udev = usb_get_dev(udev); init_waitqueue_head(&dev->read_wait); init_waitqueue_head(&dev->write_wait); - res = usb_find_common_endpoints_reverse(&interface->altsetting[0], + res = usb_find_common_endpoints_reverse(interface->cur_altsetting, NULL, NULL, &dev->interrupt_in_endpoint, &dev->interrupt_out_endpoint); @@ -764,14 +766,18 @@ static void adu_disconnect(struct usb_interface *interface) dev = usb_get_intfdata(interface); - mutex_lock(&dev->mtx); /* not interruptible */ - dev->udev = NULL; /* poison */ usb_deregister_dev(interface, &adu_class); - mutex_unlock(&dev->mtx); + + usb_poison_urb(dev->interrupt_in_urb); + usb_poison_urb(dev->interrupt_out_urb); mutex_lock(&adutux_mutex); usb_set_intfdata(interface, NULL); + mutex_lock(&dev->mtx); /* not interruptible */ + dev->disconnected = 1; + mutex_unlock(&dev->mtx); + /* if the device is not opened, then we clean up right now */ if (!dev->open_count) adu_delete(dev); diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c index d746c26a805522e0346bd0e2abc97ea2c697aeb9..718d692b07ac8ab50de231ed59901df18631fc4f 100644 --- a/drivers/usb/misc/appledisplay.c +++ b/drivers/usb/misc/appledisplay.c @@ -50,6 +50,8 @@ static const struct usb_device_id appledisplay_table[] = { { APPLEDISPLAY_DEVICE(0x9219) }, { APPLEDISPLAY_DEVICE(0x921c) }, { APPLEDISPLAY_DEVICE(0x921d) }, + { APPLEDISPLAY_DEVICE(0x9222) }, + { APPLEDISPLAY_DEVICE(0x9226) }, { APPLEDISPLAY_DEVICE(0x9236) }, /* Terminating entry */ @@ -146,8 +148,11 @@ static int appledisplay_bl_update_status(struct backlight_device *bd) pdata->msgdata, 2, ACD_USB_TIMEOUT); mutex_unlock(&pdata->sysfslock); - - return retval; + + if (retval < 0) + return retval; + else + return 0; } static int appledisplay_bl_get_brightness(struct backlight_device *bd) @@ -165,7 +170,12 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd) 0, pdata->msgdata, 2, ACD_USB_TIMEOUT); - brightness = pdata->msgdata[1]; + if (retval < 2) { + if (retval >= 0) + retval = -EMSGSIZE; + } else { + brightness = pdata->msgdata[1]; + } mutex_unlock(&pdata->sysfslock); if (retval < 0) @@ -300,6 +310,7 @@ static int appledisplay_probe(struct usb_interface *iface, if (pdata) { if (pdata->urb) { usb_kill_urb(pdata->urb); + cancel_delayed_work_sync(&pdata->work); if (pdata->urbdata) usb_free_coherent(pdata->udev, ACD_URB_BUFFER_LEN, pdata->urbdata, pdata->urb->transfer_dma); diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c index cf5828ce927a835af30de0dd739d0ce7e455a2db..87067c3d6109b966fb7a35be4e0ed6da9f15c9af 100644 --- a/drivers/usb/misc/chaoskey.c +++ b/drivers/usb/misc/chaoskey.c @@ -98,6 +98,7 @@ static void chaoskey_free(struct chaoskey *dev) usb_free_urb(dev->urb); kfree(dev->name); kfree(dev->buf); + usb_put_intf(dev->interface); kfree(dev); } } @@ -145,6 +146,8 @@ static int chaoskey_probe(struct usb_interface *interface, if (dev == NULL) goto out; + dev->interface = usb_get_intf(interface); + dev->buf = kmalloc(size, GFP_KERNEL); if (dev->buf == NULL) @@ -174,8 +177,6 @@ static int chaoskey_probe(struct usb_interface *interface, goto out; } - dev->interface = interface; - dev->in_ep = in_ep; if (le16_to_cpu(udev->descriptor.idVendor) != ALEA_VENDOR_ID) @@ -383,13 +384,17 @@ static int _chaoskey_fill(struct chaoskey *dev) !dev->reading, (started ? NAK_TIMEOUT : ALEA_FIRST_TIMEOUT) ); - if (result < 0) + if (result < 0) { + usb_kill_urb(dev->urb); goto out; + } - if (result == 0) + if (result == 0) { result = -ETIMEDOUT; - else + usb_kill_urb(dev->urb); + } else { result = dev->valid; + } out: /* Let the device go back to sleep eventually */ usb_autopm_put_interface(dev->interface); @@ -525,7 +530,21 @@ static int chaoskey_suspend(struct usb_interface *interface, static int chaoskey_resume(struct usb_interface *interface) { + struct chaoskey *dev; + struct usb_device *udev = interface_to_usbdev(interface); + usb_dbg(interface, "resume"); + dev = usb_get_intfdata(interface); + + /* + * We may have lost power. + * In that case the device that needs a long time + * for the first requests needs an extended timeout + * again + */ + if (le16_to_cpu(udev->descriptor.idVendor) == ALEA_VENDOR_ID) + dev->reads_started = false; + return 0; } #else diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c index 20b0f91a5d9b6921e0afb1d33b1dc9bf34a6bae4..bb24527f3c7010854c9cf58a75e9c95ecf94cd5c 100644 --- a/drivers/usb/misc/idmouse.c +++ b/drivers/usb/misc/idmouse.c @@ -337,7 +337,7 @@ static int idmouse_probe(struct usb_interface *interface, int result; /* check if we have gotten the data or the hid interface */ - iface_desc = &interface->altsetting[0]; + iface_desc = interface->cur_altsetting; if (iface_desc->desc.bInterfaceClass != 0x0A) return -ENODEV; diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index c2991b8a65ce455b9e35c15fbb758108b1012071..2d9d9490cdd4759cbf190c5a2be8653add77b3e0 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c @@ -87,6 +87,7 @@ struct iowarrior { char chip_serial[9]; /* the serial number string of the chip connected */ int report_size; /* number of bytes in a report */ u16 product_id; + struct usb_anchor submitted; }; /*--------------*/ @@ -243,6 +244,7 @@ static inline void iowarrior_delete(struct iowarrior *dev) kfree(dev->int_in_buffer); usb_free_urb(dev->int_in_urb); kfree(dev->read_queue); + usb_put_intf(dev->interface); kfree(dev); } @@ -424,11 +426,13 @@ static ssize_t iowarrior_write(struct file *file, retval = -EFAULT; goto error; } + usb_anchor_urb(int_out_urb, &dev->submitted); retval = usb_submit_urb(int_out_urb, GFP_KERNEL); if (retval) { dev_dbg(&dev->interface->dev, "submit error %d for urb nr.%d\n", retval, atomic_read(&dev->write_busy)); + usb_unanchor_urb(int_out_urb); goto error; } /* submit was ok */ @@ -764,11 +768,13 @@ static int iowarrior_probe(struct usb_interface *interface, init_waitqueue_head(&dev->write_wait); dev->udev = udev; - dev->interface = interface; + dev->interface = usb_get_intf(interface); iface_desc = interface->cur_altsetting; dev->product_id = le16_to_cpu(udev->descriptor.idProduct); + init_usb_anchor(&dev->submitted); + res = usb_find_last_int_in_endpoint(iface_desc, &dev->int_in_endpoint); if (res) { dev_err(&interface->dev, "no interrupt-in endpoint found\n"); @@ -868,8 +874,9 @@ static void iowarrior_disconnect(struct usb_interface *interface) usb_set_intfdata(interface, NULL); minor = dev->minor; + mutex_unlock(&iowarrior_open_disc_lock); + /* give back our minor - this will call close() locks need to be dropped at this point*/ - /* give back our minor */ usb_deregister_dev(interface, &iowarrior_class); mutex_lock(&dev->mutex); @@ -877,19 +884,19 @@ static void iowarrior_disconnect(struct usb_interface *interface) /* prevent device read, write and ioctl */ dev->present = 0; - mutex_unlock(&dev->mutex); - mutex_unlock(&iowarrior_open_disc_lock); - if (dev->opened) { /* There is a process that holds a filedescriptor to the device , so we only shutdown read-/write-ops going on. Deleting the device is postponed until close() was called. */ usb_kill_urb(dev->int_in_urb); + usb_kill_anchored_urbs(&dev->submitted); wake_up_interruptible(&dev->read_wait); wake_up_interruptible(&dev->write_wait); + mutex_unlock(&dev->mutex); } else { /* no process is using the device, cleanup now */ + mutex_unlock(&dev->mutex); iowarrior_delete(dev); } diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c index 006762b72ff54211f05fed030170bc7f1a70448d..67c1b8f5d54d6088c973198e61d1080d29191fad 100644 --- a/drivers/usb/misc/ldusb.c +++ b/drivers/usb/misc/ldusb.c @@ -153,6 +153,7 @@ MODULE_PARM_DESC(min_interrupt_out_interval, "Minimum interrupt out interval in struct ld_usb { struct mutex mutex; /* locks this structure */ struct usb_interface *intf; /* save off the usb interface pointer */ + unsigned long disconnected:1; int open_count; /* number of times this port has been opened */ @@ -192,12 +193,10 @@ static void ld_usb_abort_transfers(struct ld_usb *dev) /* shutdown transfer */ if (dev->interrupt_in_running) { dev->interrupt_in_running = 0; - if (dev->intf) - usb_kill_urb(dev->interrupt_in_urb); + usb_kill_urb(dev->interrupt_in_urb); } if (dev->interrupt_out_busy) - if (dev->intf) - usb_kill_urb(dev->interrupt_out_urb); + usb_kill_urb(dev->interrupt_out_urb); } /** @@ -205,8 +204,6 @@ static void ld_usb_abort_transfers(struct ld_usb *dev) */ static void ld_usb_delete(struct ld_usb *dev) { - ld_usb_abort_transfers(dev); - /* free data structures */ usb_free_urb(dev->interrupt_in_urb); usb_free_urb(dev->interrupt_out_urb); @@ -263,7 +260,7 @@ static void ld_usb_interrupt_in_callback(struct urb *urb) resubmit: /* resubmit if we're still running */ - if (dev->interrupt_in_running && !dev->buffer_overflow && dev->intf) { + if (dev->interrupt_in_running && !dev->buffer_overflow) { retval = usb_submit_urb(dev->interrupt_in_urb, GFP_ATOMIC); if (retval) { dev_err(&dev->intf->dev, @@ -383,16 +380,13 @@ static int ld_usb_release(struct inode *inode, struct file *file) goto exit; } - if (mutex_lock_interruptible(&dev->mutex)) { - retval = -ERESTARTSYS; - goto exit; - } + mutex_lock(&dev->mutex); if (dev->open_count != 1) { retval = -ENODEV; goto unlock_exit; } - if (dev->intf == NULL) { + if (dev->disconnected) { /* the device was unplugged before the file was released */ mutex_unlock(&dev->mutex); /* unlock here as ld_usb_delete frees dev */ @@ -423,7 +417,7 @@ static __poll_t ld_usb_poll(struct file *file, poll_table *wait) dev = file->private_data; - if (!dev->intf) + if (dev->disconnected) return EPOLLERR | EPOLLHUP; poll_wait(file, &dev->read_wait, wait); @@ -462,7 +456,7 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count, } /* verify that the device wasn't unplugged */ - if (dev->intf == NULL) { + if (dev->disconnected) { retval = -ENODEV; printk(KERN_ERR "ldusb: No device or device unplugged %d\n", retval); goto unlock_exit; @@ -470,7 +464,7 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count, /* wait for data */ spin_lock_irq(&dev->rbsl); - if (dev->ring_head == dev->ring_tail) { + while (dev->ring_head == dev->ring_tail) { dev->interrupt_in_done = 0; spin_unlock_irq(&dev->rbsl); if (file->f_flags & O_NONBLOCK) { @@ -480,15 +474,20 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count, retval = wait_event_interruptible(dev->read_wait, dev->interrupt_in_done); if (retval < 0) goto unlock_exit; - } else { - spin_unlock_irq(&dev->rbsl); + + spin_lock_irq(&dev->rbsl); } + spin_unlock_irq(&dev->rbsl); /* actual_buffer contains actual_length + interrupt_in_buffer */ actual_buffer = (size_t *)(dev->ring_buffer + dev->ring_tail * (sizeof(size_t)+dev->interrupt_in_endpoint_size)); + if (*actual_buffer > dev->interrupt_in_endpoint_size) { + retval = -EIO; + goto unlock_exit; + } bytes_to_read = min(count, *actual_buffer); if (bytes_to_read < *actual_buffer) - dev_warn(&dev->intf->dev, "Read buffer overflow, %zd bytes dropped\n", + dev_warn(&dev->intf->dev, "Read buffer overflow, %zu bytes dropped\n", *actual_buffer-bytes_to_read); /* copy one interrupt_in_buffer from ring_buffer into userspace */ @@ -496,11 +495,11 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count, retval = -EFAULT; goto unlock_exit; } - dev->ring_tail = (dev->ring_tail+1) % ring_buffer_size; - retval = bytes_to_read; spin_lock_irq(&dev->rbsl); + dev->ring_tail = (dev->ring_tail + 1) % ring_buffer_size; + if (dev->buffer_overflow) { dev->buffer_overflow = 0; spin_unlock_irq(&dev->rbsl); @@ -542,7 +541,7 @@ static ssize_t ld_usb_write(struct file *file, const char __user *buffer, } /* verify that the device wasn't unplugged */ - if (dev->intf == NULL) { + if (dev->disconnected) { retval = -ENODEV; printk(KERN_ERR "ldusb: No device or device unplugged %d\n", retval); goto unlock_exit; @@ -563,8 +562,9 @@ static ssize_t ld_usb_write(struct file *file, const char __user *buffer, /* write the data into interrupt_out_buffer from userspace */ bytes_to_write = min(count, write_buffer_size*dev->interrupt_out_endpoint_size); if (bytes_to_write < count) - dev_warn(&dev->intf->dev, "Write buffer overflow, %zd bytes dropped\n", count-bytes_to_write); - dev_dbg(&dev->intf->dev, "%s: count = %zd, bytes_to_write = %zd\n", + dev_warn(&dev->intf->dev, "Write buffer overflow, %zu bytes dropped\n", + count - bytes_to_write); + dev_dbg(&dev->intf->dev, "%s: count = %zu, bytes_to_write = %zu\n", __func__, count, bytes_to_write); if (copy_from_user(dev->interrupt_out_buffer, buffer, bytes_to_write)) { @@ -581,7 +581,7 @@ static ssize_t ld_usb_write(struct file *file, const char __user *buffer, 1 << 8, 0, dev->interrupt_out_buffer, bytes_to_write, - USB_CTRL_SET_TIMEOUT * HZ); + USB_CTRL_SET_TIMEOUT); if (retval < 0) dev_err(&dev->intf->dev, "Couldn't submit HID_REQ_SET_REPORT %d\n", @@ -696,10 +696,9 @@ static int ld_usb_probe(struct usb_interface *intf, const struct usb_device_id * dev_warn(&intf->dev, "Interrupt out endpoint not found (using control endpoint instead)\n"); dev->interrupt_in_endpoint_size = usb_endpoint_maxp(dev->interrupt_in_endpoint); - dev->ring_buffer = - kmalloc_array(ring_buffer_size, - sizeof(size_t) + dev->interrupt_in_endpoint_size, - GFP_KERNEL); + dev->ring_buffer = kcalloc(ring_buffer_size, + sizeof(size_t) + dev->interrupt_in_endpoint_size, + GFP_KERNEL); if (!dev->ring_buffer) goto error; dev->interrupt_in_buffer = kmalloc(dev->interrupt_in_endpoint_size, GFP_KERNEL); @@ -764,6 +763,9 @@ static void ld_usb_disconnect(struct usb_interface *intf) /* give back our minor */ usb_deregister_dev(intf, &ld_usb_class); + usb_poison_urb(dev->interrupt_in_urb); + usb_poison_urb(dev->interrupt_out_urb); + mutex_lock(&dev->mutex); /* if the device is not opened, then we clean up right now */ @@ -771,7 +773,7 @@ static void ld_usb_disconnect(struct usb_interface *intf) mutex_unlock(&dev->mutex); ld_usb_delete(dev); } else { - dev->intf = NULL; + dev->disconnected = 1; /* wake up pollers */ wake_up_interruptible_all(&dev->read_wait); wake_up_interruptible_all(&dev->write_wait); diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c index 006cf13b21999f58e512579fa62ee399907dba6b..23061f1526b4e03be818de50add926e982187ae2 100644 --- a/drivers/usb/misc/legousbtower.c +++ b/drivers/usb/misc/legousbtower.c @@ -179,7 +179,6 @@ static const struct usb_device_id tower_table[] = { }; MODULE_DEVICE_TABLE (usb, tower_table); -static DEFINE_MUTEX(open_disc_mutex); #define LEGO_USB_TOWER_MINOR_BASE 160 @@ -191,6 +190,7 @@ struct lego_usb_tower { unsigned char minor; /* the starting minor number for this device */ int open_count; /* number of times this port has been opened */ + unsigned long disconnected:1; char* read_buffer; size_t read_buffer_length; /* this much came in */ @@ -290,14 +290,13 @@ static inline void lego_usb_tower_debug_data(struct device *dev, */ static inline void tower_delete (struct lego_usb_tower *dev) { - tower_abort_transfers (dev); - /* free data structures */ usb_free_urb(dev->interrupt_in_urb); usb_free_urb(dev->interrupt_out_urb); kfree (dev->read_buffer); kfree (dev->interrupt_in_buffer); kfree (dev->interrupt_out_buffer); + usb_put_dev(dev->udev); kfree (dev); } @@ -332,18 +331,14 @@ static int tower_open (struct inode *inode, struct file *file) goto exit; } - mutex_lock(&open_disc_mutex); dev = usb_get_intfdata(interface); - if (!dev) { - mutex_unlock(&open_disc_mutex); retval = -ENODEV; goto exit; } /* lock this device */ if (mutex_lock_interruptible(&dev->lock)) { - mutex_unlock(&open_disc_mutex); retval = -ERESTARTSYS; goto exit; } @@ -351,12 +346,9 @@ static int tower_open (struct inode *inode, struct file *file) /* allow opening only once */ if (dev->open_count) { - mutex_unlock(&open_disc_mutex); retval = -EBUSY; goto unlock_exit; } - dev->open_count = 1; - mutex_unlock(&open_disc_mutex); /* reset the tower */ result = usb_control_msg (dev->udev, @@ -396,13 +388,14 @@ static int tower_open (struct inode *inode, struct file *file) dev_err(&dev->udev->dev, "Couldn't submit interrupt_in_urb %d\n", retval); dev->interrupt_in_running = 0; - dev->open_count = 0; goto unlock_exit; } /* save device in the file's private structure */ file->private_data = dev; + dev->open_count = 1; + unlock_exit: mutex_unlock(&dev->lock); @@ -423,22 +416,19 @@ static int tower_release (struct inode *inode, struct file *file) if (dev == NULL) { retval = -ENODEV; - goto exit_nolock; - } - - mutex_lock(&open_disc_mutex); - if (mutex_lock_interruptible(&dev->lock)) { - retval = -ERESTARTSYS; goto exit; } + mutex_lock(&dev->lock); + if (dev->open_count != 1) { dev_dbg(&dev->udev->dev, "%s: device not opened exactly once\n", __func__); retval = -ENODEV; goto unlock_exit; } - if (dev->udev == NULL) { + + if (dev->disconnected) { /* the device was unplugged before the file was released */ /* unlock here as tower_delete frees dev */ @@ -456,10 +446,7 @@ static int tower_release (struct inode *inode, struct file *file) unlock_exit: mutex_unlock(&dev->lock); - exit: - mutex_unlock(&open_disc_mutex); -exit_nolock: return retval; } @@ -477,10 +464,9 @@ static void tower_abort_transfers (struct lego_usb_tower *dev) if (dev->interrupt_in_running) { dev->interrupt_in_running = 0; mb(); - if (dev->udev) - usb_kill_urb (dev->interrupt_in_urb); + usb_kill_urb(dev->interrupt_in_urb); } - if (dev->interrupt_out_busy && dev->udev) + if (dev->interrupt_out_busy) usb_kill_urb(dev->interrupt_out_urb); } @@ -516,7 +502,7 @@ static __poll_t tower_poll (struct file *file, poll_table *wait) dev = file->private_data; - if (!dev->udev) + if (dev->disconnected) return EPOLLERR | EPOLLHUP; poll_wait(file, &dev->read_wait, wait); @@ -563,7 +549,7 @@ static ssize_t tower_read (struct file *file, char __user *buffer, size_t count, } /* verify that the device wasn't unplugged */ - if (dev->udev == NULL) { + if (dev->disconnected) { retval = -ENODEV; pr_err("No device or device unplugged %d\n", retval); goto unlock_exit; @@ -649,7 +635,7 @@ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t } /* verify that the device wasn't unplugged */ - if (dev->udev == NULL) { + if (dev->disconnected) { retval = -ENODEV; pr_err("No device or device unplugged %d\n", retval); goto unlock_exit; @@ -759,7 +745,7 @@ static void tower_interrupt_in_callback (struct urb *urb) resubmit: /* resubmit if we're still running */ - if (dev->interrupt_in_running && dev->udev) { + if (dev->interrupt_in_running) { retval = usb_submit_urb (dev->interrupt_in_urb, GFP_ATOMIC); if (retval) dev_err(&dev->udev->dev, @@ -822,8 +808,9 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device mutex_init(&dev->lock); - dev->udev = udev; + dev->udev = usb_get_dev(udev); dev->open_count = 0; + dev->disconnected = 0; dev->read_buffer = NULL; dev->read_buffer_length = 0; @@ -891,8 +878,10 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device get_version_reply, sizeof(*get_version_reply), 1000); - if (result < 0) { - dev_err(idev, "LEGO USB Tower get version control request failed\n"); + if (result != sizeof(*get_version_reply)) { + if (result >= 0) + result = -EIO; + dev_err(idev, "get version request failed: %d\n", result); retval = result; goto error; } @@ -910,7 +899,6 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device if (retval) { /* something prevented us from registering this driver */ dev_err(idev, "Not able to get a minor for this device.\n"); - usb_set_intfdata (interface, NULL); goto error; } dev->minor = interface->minor; @@ -942,23 +930,24 @@ static void tower_disconnect (struct usb_interface *interface) int minor; dev = usb_get_intfdata (interface); - mutex_lock(&open_disc_mutex); - usb_set_intfdata (interface, NULL); minor = dev->minor; - /* give back our minor */ + /* give back our minor and prevent further open() */ usb_deregister_dev (interface, &tower_class); + /* stop I/O */ + usb_poison_urb(dev->interrupt_in_urb); + usb_poison_urb(dev->interrupt_out_urb); + mutex_lock(&dev->lock); - mutex_unlock(&open_disc_mutex); /* if the device is not opened, then we clean up right now */ if (!dev->open_count) { mutex_unlock(&dev->lock); tower_delete (dev); } else { - dev->udev = NULL; + dev->disconnected = 1; /* wake up pollers */ wake_up_interruptible_all(&dev->read_wait); wake_up_interruptible_all(&dev->write_wait); diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c deleted file mode 100644 index 7b9adeb3e7aa1424b18b395ffa8e09c41e96062e..0000000000000000000000000000000000000000 --- a/drivers/usb/misc/rio500.c +++ /dev/null @@ -1,536 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* -*- linux-c -*- */ - -/* - * Driver for USB Rio 500 - * - * Cesar Miquel (miquel@df.uba.ar) - * - * based on hp_scanner.c by David E. Nelson (dnelson@jump.net) - * - * Based upon mouse.c (Brad Keryan) and printer.c (Michael Gee). - * - * Changelog: - * 30/05/2003 replaced lock/unlock kernel with up/down - * Daniele Bellucci bellucda@tiscali.it - * */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "rio500_usb.h" - -#define DRIVER_AUTHOR "Cesar Miquel " -#define DRIVER_DESC "USB Rio 500 driver" - -#define RIO_MINOR 64 - -/* stall/wait timeout for rio */ -#define NAK_TIMEOUT (HZ) - -#define IBUF_SIZE 0x1000 - -/* Size of the rio buffer */ -#define OBUF_SIZE 0x10000 - -struct rio_usb_data { - struct usb_device *rio_dev; /* init: probe_rio */ - unsigned int ifnum; /* Interface number of the USB device */ - int isopen; /* nz if open */ - int present; /* Device is present on the bus */ - char *obuf, *ibuf; /* transfer buffers */ - char bulk_in_ep, bulk_out_ep; /* Endpoint assignments */ - wait_queue_head_t wait_q; /* for timeouts */ - struct mutex lock; /* general race avoidance */ -}; - -static DEFINE_MUTEX(rio500_mutex); -static struct rio_usb_data rio_instance; - -static int open_rio(struct inode *inode, struct file *file) -{ - struct rio_usb_data *rio = &rio_instance; - - /* against disconnect() */ - mutex_lock(&rio500_mutex); - mutex_lock(&(rio->lock)); - - if (rio->isopen || !rio->present) { - mutex_unlock(&(rio->lock)); - mutex_unlock(&rio500_mutex); - return -EBUSY; - } - rio->isopen = 1; - - init_waitqueue_head(&rio->wait_q); - - mutex_unlock(&(rio->lock)); - - dev_info(&rio->rio_dev->dev, "Rio opened.\n"); - mutex_unlock(&rio500_mutex); - - return 0; -} - -static int close_rio(struct inode *inode, struct file *file) -{ - struct rio_usb_data *rio = &rio_instance; - - rio->isopen = 0; - - dev_info(&rio->rio_dev->dev, "Rio closed.\n"); - return 0; -} - -static long ioctl_rio(struct file *file, unsigned int cmd, unsigned long arg) -{ - struct RioCommand rio_cmd; - struct rio_usb_data *rio = &rio_instance; - void __user *data; - unsigned char *buffer; - int result, requesttype; - int retries; - int retval=0; - - mutex_lock(&(rio->lock)); - /* Sanity check to make sure rio is connected, powered, etc */ - if (rio->present == 0 || rio->rio_dev == NULL) { - retval = -ENODEV; - goto err_out; - } - - switch (cmd) { - case RIO_RECV_COMMAND: - data = (void __user *) arg; - if (data == NULL) - break; - if (copy_from_user(&rio_cmd, data, sizeof(struct RioCommand))) { - retval = -EFAULT; - goto err_out; - } - if (rio_cmd.length < 0 || rio_cmd.length > PAGE_SIZE) { - retval = -EINVAL; - goto err_out; - } - buffer = (unsigned char *) __get_free_page(GFP_KERNEL); - if (buffer == NULL) { - retval = -ENOMEM; - goto err_out; - } - if (copy_from_user(buffer, rio_cmd.buffer, rio_cmd.length)) { - retval = -EFAULT; - free_page((unsigned long) buffer); - goto err_out; - } - - requesttype = rio_cmd.requesttype | USB_DIR_IN | - USB_TYPE_VENDOR | USB_RECIP_DEVICE; - dev_dbg(&rio->rio_dev->dev, - "sending command:reqtype=%0x req=%0x value=%0x index=%0x len=%0x\n", - requesttype, rio_cmd.request, rio_cmd.value, - rio_cmd.index, rio_cmd.length); - /* Send rio control message */ - retries = 3; - while (retries) { - result = usb_control_msg(rio->rio_dev, - usb_rcvctrlpipe(rio-> rio_dev, 0), - rio_cmd.request, - requesttype, - rio_cmd.value, - rio_cmd.index, buffer, - rio_cmd.length, - jiffies_to_msecs(rio_cmd.timeout)); - if (result == -ETIMEDOUT) - retries--; - else if (result < 0) { - dev_err(&rio->rio_dev->dev, - "Error executing ioctrl. code = %d\n", - result); - retries = 0; - } else { - dev_dbg(&rio->rio_dev->dev, - "Executed ioctl. Result = %d (data=%02x)\n", - result, buffer[0]); - if (copy_to_user(rio_cmd.buffer, buffer, - rio_cmd.length)) { - free_page((unsigned long) buffer); - retval = -EFAULT; - goto err_out; - } - retries = 0; - } - - /* rio_cmd.buffer contains a raw stream of single byte - data which has been returned from rio. Data is - interpreted at application level. For data that - will be cast to data types longer than 1 byte, data - will be little_endian and will potentially need to - be swapped at the app level */ - - } - free_page((unsigned long) buffer); - break; - - case RIO_SEND_COMMAND: - data = (void __user *) arg; - if (data == NULL) - break; - if (copy_from_user(&rio_cmd, data, sizeof(struct RioCommand))) { - retval = -EFAULT; - goto err_out; - } - if (rio_cmd.length < 0 || rio_cmd.length > PAGE_SIZE) { - retval = -EINVAL; - goto err_out; - } - buffer = (unsigned char *) __get_free_page(GFP_KERNEL); - if (buffer == NULL) { - retval = -ENOMEM; - goto err_out; - } - if (copy_from_user(buffer, rio_cmd.buffer, rio_cmd.length)) { - free_page((unsigned long)buffer); - retval = -EFAULT; - goto err_out; - } - - requesttype = rio_cmd.requesttype | USB_DIR_OUT | - USB_TYPE_VENDOR | USB_RECIP_DEVICE; - dev_dbg(&rio->rio_dev->dev, - "sending command: reqtype=%0x req=%0x value=%0x index=%0x len=%0x\n", - requesttype, rio_cmd.request, rio_cmd.value, - rio_cmd.index, rio_cmd.length); - /* Send rio control message */ - retries = 3; - while (retries) { - result = usb_control_msg(rio->rio_dev, - usb_sndctrlpipe(rio-> rio_dev, 0), - rio_cmd.request, - requesttype, - rio_cmd.value, - rio_cmd.index, buffer, - rio_cmd.length, - jiffies_to_msecs(rio_cmd.timeout)); - if (result == -ETIMEDOUT) - retries--; - else if (result < 0) { - dev_err(&rio->rio_dev->dev, - "Error executing ioctrl. code = %d\n", - result); - retries = 0; - } else { - dev_dbg(&rio->rio_dev->dev, - "Executed ioctl. Result = %d\n", result); - retries = 0; - - } - - } - free_page((unsigned long) buffer); - break; - - default: - retval = -ENOTTY; - break; - } - - -err_out: - mutex_unlock(&(rio->lock)); - return retval; -} - -static ssize_t -write_rio(struct file *file, const char __user *buffer, - size_t count, loff_t * ppos) -{ - DEFINE_WAIT(wait); - struct rio_usb_data *rio = &rio_instance; - - unsigned long copy_size; - unsigned long bytes_written = 0; - unsigned int partial; - - int result = 0; - int maxretry; - int errn = 0; - int intr; - - intr = mutex_lock_interruptible(&(rio->lock)); - if (intr) - return -EINTR; - /* Sanity check to make sure rio is connected, powered, etc */ - if (rio->present == 0 || rio->rio_dev == NULL) { - mutex_unlock(&(rio->lock)); - return -ENODEV; - } - - - - do { - unsigned long thistime; - char *obuf = rio->obuf; - - thistime = copy_size = - (count >= OBUF_SIZE) ? OBUF_SIZE : count; - if (copy_from_user(rio->obuf, buffer, copy_size)) { - errn = -EFAULT; - goto error; - } - maxretry = 5; - while (thistime) { - if (!rio->rio_dev) { - errn = -ENODEV; - goto error; - } - if (signal_pending(current)) { - mutex_unlock(&(rio->lock)); - return bytes_written ? bytes_written : -EINTR; - } - - result = usb_bulk_msg(rio->rio_dev, - usb_sndbulkpipe(rio->rio_dev, 2), - obuf, thistime, &partial, 5000); - - dev_dbg(&rio->rio_dev->dev, - "write stats: result:%d thistime:%lu partial:%u\n", - result, thistime, partial); - - if (result == -ETIMEDOUT) { /* NAK - so hold for a while */ - if (!maxretry--) { - errn = -ETIME; - goto error; - } - prepare_to_wait(&rio->wait_q, &wait, TASK_INTERRUPTIBLE); - schedule_timeout(NAK_TIMEOUT); - finish_wait(&rio->wait_q, &wait); - continue; - } else if (!result && partial) { - obuf += partial; - thistime -= partial; - } else - break; - } - if (result) { - dev_err(&rio->rio_dev->dev, "Write Whoops - %x\n", - result); - errn = -EIO; - goto error; - } - bytes_written += copy_size; - count -= copy_size; - buffer += copy_size; - } while (count > 0); - - mutex_unlock(&(rio->lock)); - - return bytes_written ? bytes_written : -EIO; - -error: - mutex_unlock(&(rio->lock)); - return errn; -} - -static ssize_t -read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos) -{ - DEFINE_WAIT(wait); - struct rio_usb_data *rio = &rio_instance; - ssize_t read_count; - unsigned int partial; - int this_read; - int result; - int maxretry = 10; - char *ibuf; - int intr; - - intr = mutex_lock_interruptible(&(rio->lock)); - if (intr) - return -EINTR; - /* Sanity check to make sure rio is connected, powered, etc */ - if (rio->present == 0 || rio->rio_dev == NULL) { - mutex_unlock(&(rio->lock)); - return -ENODEV; - } - - ibuf = rio->ibuf; - - read_count = 0; - - - while (count > 0) { - if (signal_pending(current)) { - mutex_unlock(&(rio->lock)); - return read_count ? read_count : -EINTR; - } - if (!rio->rio_dev) { - mutex_unlock(&(rio->lock)); - return -ENODEV; - } - this_read = (count >= IBUF_SIZE) ? IBUF_SIZE : count; - - result = usb_bulk_msg(rio->rio_dev, - usb_rcvbulkpipe(rio->rio_dev, 1), - ibuf, this_read, &partial, - 8000); - - dev_dbg(&rio->rio_dev->dev, - "read stats: result:%d this_read:%u partial:%u\n", - result, this_read, partial); - - if (partial) { - count = this_read = partial; - } else if (result == -ETIMEDOUT || result == 15) { /* FIXME: 15 ??? */ - if (!maxretry--) { - mutex_unlock(&(rio->lock)); - dev_err(&rio->rio_dev->dev, - "read_rio: maxretry timeout\n"); - return -ETIME; - } - prepare_to_wait(&rio->wait_q, &wait, TASK_INTERRUPTIBLE); - schedule_timeout(NAK_TIMEOUT); - finish_wait(&rio->wait_q, &wait); - continue; - } else if (result != -EREMOTEIO) { - mutex_unlock(&(rio->lock)); - dev_err(&rio->rio_dev->dev, - "Read Whoops - result:%d partial:%u this_read:%u\n", - result, partial, this_read); - return -EIO; - } else { - mutex_unlock(&(rio->lock)); - return (0); - } - - if (this_read) { - if (copy_to_user(buffer, ibuf, this_read)) { - mutex_unlock(&(rio->lock)); - return -EFAULT; - } - count -= this_read; - read_count += this_read; - buffer += this_read; - } - } - mutex_unlock(&(rio->lock)); - return read_count; -} - -static const struct file_operations usb_rio_fops = { - .owner = THIS_MODULE, - .read = read_rio, - .write = write_rio, - .unlocked_ioctl = ioctl_rio, - .open = open_rio, - .release = close_rio, - .llseek = noop_llseek, -}; - -static struct usb_class_driver usb_rio_class = { - .name = "rio500%d", - .fops = &usb_rio_fops, - .minor_base = RIO_MINOR, -}; - -static int probe_rio(struct usb_interface *intf, - const struct usb_device_id *id) -{ - struct usb_device *dev = interface_to_usbdev(intf); - struct rio_usb_data *rio = &rio_instance; - int retval; - - dev_info(&intf->dev, "USB Rio found at address %d\n", dev->devnum); - - retval = usb_register_dev(intf, &usb_rio_class); - if (retval) { - dev_err(&dev->dev, - "Not able to get a minor for this device.\n"); - return -ENOMEM; - } - - rio->rio_dev = dev; - - if (!(rio->obuf = kmalloc(OBUF_SIZE, GFP_KERNEL))) { - dev_err(&dev->dev, - "probe_rio: Not enough memory for the output buffer\n"); - usb_deregister_dev(intf, &usb_rio_class); - return -ENOMEM; - } - dev_dbg(&intf->dev, "obuf address:%p\n", rio->obuf); - - if (!(rio->ibuf = kmalloc(IBUF_SIZE, GFP_KERNEL))) { - dev_err(&dev->dev, - "probe_rio: Not enough memory for the input buffer\n"); - usb_deregister_dev(intf, &usb_rio_class); - kfree(rio->obuf); - return -ENOMEM; - } - dev_dbg(&intf->dev, "ibuf address:%p\n", rio->ibuf); - - mutex_init(&(rio->lock)); - - usb_set_intfdata (intf, rio); - rio->present = 1; - - return 0; -} - -static void disconnect_rio(struct usb_interface *intf) -{ - struct rio_usb_data *rio = usb_get_intfdata (intf); - - usb_set_intfdata (intf, NULL); - mutex_lock(&rio500_mutex); - if (rio) { - usb_deregister_dev(intf, &usb_rio_class); - - mutex_lock(&(rio->lock)); - if (rio->isopen) { - rio->isopen = 0; - /* better let it finish - the release will do whats needed */ - rio->rio_dev = NULL; - mutex_unlock(&(rio->lock)); - mutex_unlock(&rio500_mutex); - return; - } - kfree(rio->ibuf); - kfree(rio->obuf); - - dev_info(&intf->dev, "USB Rio disconnected.\n"); - - rio->present = 0; - mutex_unlock(&(rio->lock)); - } - mutex_unlock(&rio500_mutex); -} - -static const struct usb_device_id rio_table[] = { - { USB_DEVICE(0x0841, 1) }, /* Rio 500 */ - { } /* Terminating entry */ -}; - -MODULE_DEVICE_TABLE (usb, rio_table); - -static struct usb_driver rio_driver = { - .name = "rio500", - .probe = probe_rio, - .disconnect = disconnect_rio, - .id_table = rio_table, -}; - -module_usb_driver(rio_driver); - -MODULE_AUTHOR( DRIVER_AUTHOR ); -MODULE_DESCRIPTION( DRIVER_DESC ); -MODULE_LICENSE("GPL"); - diff --git a/drivers/usb/misc/rio500_usb.h b/drivers/usb/misc/rio500_usb.h deleted file mode 100644 index 6db7a58634963ff146c5913732148205eca253f5..0000000000000000000000000000000000000000 --- a/drivers/usb/misc/rio500_usb.h +++ /dev/null @@ -1,20 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* ---------------------------------------------------------------------- - Copyright (C) 2000 Cesar Miquel (miquel@df.uba.ar) - ---------------------------------------------------------------------- */ - -#define RIO_SEND_COMMAND 0x1 -#define RIO_RECV_COMMAND 0x2 - -#define RIO_DIR_OUT 0x0 -#define RIO_DIR_IN 0x1 - -struct RioCommand { - short length; - int request; - int requesttype; - int value; - int index; - void __user *buffer; - int timeout; -}; diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c index 3198d0477cf85db84cd697f22dc6652bbcdf6e3c..c4f6ac5f035eba70005ae0e470b6c67d9c18df86 100644 --- a/drivers/usb/misc/sisusbvga/sisusb.c +++ b/drivers/usb/misc/sisusbvga/sisusb.c @@ -3029,6 +3029,13 @@ static int sisusb_probe(struct usb_interface *intf, mutex_init(&(sisusb->lock)); + sisusb->sisusb_dev = dev; + sisusb->vrambase = SISUSB_PCI_MEMBASE; + sisusb->mmiobase = SISUSB_PCI_MMIOBASE; + sisusb->mmiosize = SISUSB_PCI_MMIOSIZE; + sisusb->ioportbase = SISUSB_PCI_IOPORTBASE; + /* Everything else is zero */ + /* Register device */ retval = usb_register_dev(intf, &usb_sisusb_class); if (retval) { @@ -3039,13 +3046,7 @@ static int sisusb_probe(struct usb_interface *intf, goto error_1; } - sisusb->sisusb_dev = dev; - sisusb->minor = intf->minor; - sisusb->vrambase = SISUSB_PCI_MEMBASE; - sisusb->mmiobase = SISUSB_PCI_MMIOBASE; - sisusb->mmiosize = SISUSB_PCI_MMIOSIZE; - sisusb->ioportbase = SISUSB_PCI_IOPORTBASE; - /* Everything else is zero */ + sisusb->minor = intf->minor; /* Allocate buffers */ sisusb->ibufsize = SISUSB_IBUF_SIZE; diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c index a6efb9a7293912ca5647b31622a579f8f348e9fd..5f7734c729b1d9770ecfa969d0d1a867e461e323 100644 --- a/drivers/usb/misc/usb251xb.c +++ b/drivers/usb/misc/usb251xb.c @@ -601,7 +601,7 @@ static int usb251xb_probe(struct usb251xb *hub) dev); int err; - if (np) { + if (np && of_id) { err = usb251xb_get_ofdata(hub, (struct usb251xb_data *)of_id->data); if (err) { diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c index 9ba4a4e68d9140f450d4e58c51b96068bc716100..aa982d3ca36bebebccb9bda4e61fbd3cc85f73bd 100644 --- a/drivers/usb/misc/usblcd.c +++ b/drivers/usb/misc/usblcd.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -57,6 +58,8 @@ struct usb_lcd { using up all RAM */ struct usb_anchor submitted; /* URBs to wait for before suspend */ + struct rw_semaphore io_rwsem; + unsigned long disconnected:1; }; #define to_lcd_dev(d) container_of(d, struct usb_lcd, kref) @@ -142,6 +145,13 @@ static ssize_t lcd_read(struct file *file, char __user * buffer, dev = file->private_data; + down_read(&dev->io_rwsem); + + if (dev->disconnected) { + retval = -ENODEV; + goto out_up_io; + } + /* do a blocking bulk read to get data from the device */ retval = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, @@ -158,6 +168,9 @@ static ssize_t lcd_read(struct file *file, char __user * buffer, retval = bytes_read; } +out_up_io: + up_read(&dev->io_rwsem); + return retval; } @@ -237,11 +250,18 @@ static ssize_t lcd_write(struct file *file, const char __user * user_buffer, if (r < 0) return -EINTR; + down_read(&dev->io_rwsem); + + if (dev->disconnected) { + retval = -ENODEV; + goto err_up_io; + } + /* create a urb, and a buffer for it, and copy the data to the urb */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { retval = -ENOMEM; - goto err_no_buf; + goto err_up_io; } buf = usb_alloc_coherent(dev->udev, count, GFP_KERNEL, @@ -278,6 +298,7 @@ static ssize_t lcd_write(struct file *file, const char __user * user_buffer, the USB core will eventually free it entirely */ usb_free_urb(urb); + up_read(&dev->io_rwsem); exit: return count; error_unanchor: @@ -285,7 +306,8 @@ static ssize_t lcd_write(struct file *file, const char __user * user_buffer, error: usb_free_coherent(dev->udev, count, buf, urb->transfer_dma); usb_free_urb(urb); -err_no_buf: +err_up_io: + up_read(&dev->io_rwsem); up(&dev->limit_sem); return retval; } @@ -325,6 +347,7 @@ static int lcd_probe(struct usb_interface *interface, kref_init(&dev->kref); sema_init(&dev->limit_sem, USB_LCD_CONCURRENT_WRITES); + init_rwsem(&dev->io_rwsem); init_usb_anchor(&dev->submitted); dev->udev = usb_get_dev(interface_to_usbdev(interface)); @@ -422,6 +445,12 @@ static void lcd_disconnect(struct usb_interface *interface) /* give back our minor */ usb_deregister_dev(interface, &lcd_class); + down_write(&dev->io_rwsem); + dev->disconnected = 1; + up_write(&dev->io_rwsem); + + usb_kill_anchored_urbs(&dev->submitted); + /* decrement our usage count */ kref_put(&dev->kref, lcd_delete); diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c index c7f82310e73ec3f933b6cff1e62dc37d367abe3f..fc3fc9d48a55317726570670c747c75b0d35a4e4 100644 --- a/drivers/usb/misc/usbtest.c +++ b/drivers/usb/misc/usbtest.c @@ -2853,6 +2853,7 @@ static void usbtest_disconnect(struct usb_interface *intf) usb_set_intfdata(intf, NULL); dev_dbg(&intf->dev, "disconnect\n"); + kfree(dev->buf); kfree(dev); } diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c index b5d6616442635b8bf4c814bfd629a9ec18e927a3..0be8efcda15d5791aafd7cf48e81440afb3744b5 100644 --- a/drivers/usb/misc/uss720.c +++ b/drivers/usb/misc/uss720.c @@ -71,6 +71,7 @@ static void destroy_priv(struct kref *kref) dev_dbg(&priv->usbdev->dev, "destroying priv datastructure\n"); usb_put_dev(priv->usbdev); + priv->usbdev = NULL; kfree(priv); } @@ -753,7 +754,6 @@ static void uss720_disconnect(struct usb_interface *intf) usb_set_intfdata(intf, NULL); if (pp) { priv = pp->private_data; - priv->usbdev = NULL; priv->pp = NULL; dev_dbg(&intf->dev, "parport_remove_port\n"); parport_remove_port(pp); diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c index 6d9fd5f649036e8fb47c39eaeffa26f856724e99..be0505b8b5d4e5515996faf25ef7485e978c9248 100644 --- a/drivers/usb/misc/yurex.c +++ b/drivers/usb/misc/yurex.c @@ -60,6 +60,7 @@ struct usb_yurex { struct kref kref; struct mutex io_mutex; + unsigned long disconnected:1; struct fasync_struct *async_queue; wait_queue_head_t waitq; @@ -92,7 +93,6 @@ static void yurex_delete(struct kref *kref) dev_dbg(&dev->interface->dev, "%s\n", __func__); - usb_put_dev(dev->udev); if (dev->cntl_urb) { usb_kill_urb(dev->cntl_urb); kfree(dev->cntl_req); @@ -108,6 +108,8 @@ static void yurex_delete(struct kref *kref) dev->int_buffer, dev->urb->transfer_dma); usb_free_urb(dev->urb); } + usb_put_intf(dev->interface); + usb_put_dev(dev->udev); kfree(dev); } @@ -132,6 +134,7 @@ static void yurex_interrupt(struct urb *urb) switch (status) { case 0: /*success*/ break; + /* The device is terminated or messed up, give up */ case -EOVERFLOW: dev_err(&dev->interface->dev, "%s - overflow with length %d, actual length is %d\n", @@ -140,12 +143,13 @@ static void yurex_interrupt(struct urb *urb) case -ENOENT: case -ESHUTDOWN: case -EILSEQ: - /* The device is terminated, clean up */ + case -EPROTO: + case -ETIME: return; default: dev_err(&dev->interface->dev, "%s - unknown status received: %d\n", __func__, status); - goto exit; + return; } /* handle received message */ @@ -177,7 +181,6 @@ static void yurex_interrupt(struct urb *urb) break; } -exit: retval = usb_submit_urb(dev->urb, GFP_ATOMIC); if (retval) { dev_err(&dev->interface->dev, "%s - usb_submit_urb failed: %d\n", @@ -204,7 +207,7 @@ static int yurex_probe(struct usb_interface *interface, const struct usb_device_ init_waitqueue_head(&dev->waitq); dev->udev = usb_get_dev(interface_to_usbdev(interface)); - dev->interface = interface; + dev->interface = usb_get_intf(interface); /* set up the endpoint information */ iface_desc = interface->cur_altsetting; @@ -314,8 +317,10 @@ static void yurex_disconnect(struct usb_interface *interface) usb_deregister_dev(interface, &yurex_class); /* prevent more I/O from starting */ + usb_poison_urb(dev->urb); + usb_poison_urb(dev->cntl_urb); mutex_lock(&dev->io_mutex); - dev->interface = NULL; + dev->disconnected = 1; mutex_unlock(&dev->io_mutex); /* wakeup waiters */ @@ -403,7 +408,7 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count, dev = file->private_data; mutex_lock(&dev->io_mutex); - if (!dev->interface) { /* already disconnected */ + if (dev->disconnected) { /* already disconnected */ mutex_unlock(&dev->io_mutex); return -ENODEV; } @@ -438,7 +443,7 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer, goto error; mutex_lock(&dev->io_mutex); - if (!dev->interface) { /* already disconnected */ + if (dev->disconnected) { /* already disconnected */ mutex_unlock(&dev->io_mutex); retval = -ENODEV; goto error; diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c index ac2b4fcc265f65c10a128bcf092de3a134319dc8..094e812e9e69223d3c7a866bb1f416ef73a9aeac 100644 --- a/drivers/usb/mon/mon_bin.c +++ b/drivers/usb/mon/mon_bin.c @@ -1039,12 +1039,18 @@ static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg mutex_lock(&rp->fetch_lock); spin_lock_irqsave(&rp->b_lock, flags); - mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE); - kfree(rp->b_vec); - rp->b_vec = vec; - rp->b_size = size; - rp->b_read = rp->b_in = rp->b_out = rp->b_cnt = 0; - rp->cnt_lost = 0; + if (rp->mmap_active) { + mon_free_buff(vec, size/CHUNK_SIZE); + kfree(vec); + ret = -EBUSY; + } else { + mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE); + kfree(rp->b_vec); + rp->b_vec = vec; + rp->b_size = size; + rp->b_read = rp->b_in = rp->b_out = rp->b_cnt = 0; + rp->cnt_lost = 0; + } spin_unlock_irqrestore(&rp->b_lock, flags); mutex_unlock(&rp->fetch_lock); } @@ -1216,13 +1222,21 @@ mon_bin_poll(struct file *file, struct poll_table_struct *wait) static void mon_bin_vma_open(struct vm_area_struct *vma) { struct mon_reader_bin *rp = vma->vm_private_data; + unsigned long flags; + + spin_lock_irqsave(&rp->b_lock, flags); rp->mmap_active++; + spin_unlock_irqrestore(&rp->b_lock, flags); } static void mon_bin_vma_close(struct vm_area_struct *vma) { + unsigned long flags; + struct mon_reader_bin *rp = vma->vm_private_data; + spin_lock_irqsave(&rp->b_lock, flags); rp->mmap_active--; + spin_unlock_irqrestore(&rp->b_lock, flags); } /* @@ -1234,16 +1248,12 @@ static vm_fault_t mon_bin_vma_fault(struct vm_fault *vmf) unsigned long offset, chunk_idx; struct page *pageptr; - mutex_lock(&rp->fetch_lock); offset = vmf->pgoff << PAGE_SHIFT; - if (offset >= rp->b_size) { - mutex_unlock(&rp->fetch_lock); + if (offset >= rp->b_size) return VM_FAULT_SIGBUS; - } chunk_idx = offset / CHUNK_SIZE; pageptr = rp->b_vec[chunk_idx].pg; get_page(pageptr); - mutex_unlock(&rp->fetch_lock); vmf->page = pageptr; return 0; } @@ -1258,6 +1268,11 @@ static int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma) { /* don't do anything here: "fault" will set up page table entries */ vma->vm_ops = &mon_bin_vm_ops; + + if (vma->vm_flags & VM_WRITE) + return -EPERM; + + vma->vm_flags &= ~VM_MAYWRITE; vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; vma->vm_private_data = filp->private_data; mon_bin_vma_open(vma); diff --git a/drivers/usb/mtu3/Kconfig b/drivers/usb/mtu3/Kconfig index 40bbf1f5333785c27e6ef965db3f7a53b6d95daf..fe58904f350bdf2c781ae67d7cfd11807ec20d27 100644 --- a/drivers/usb/mtu3/Kconfig +++ b/drivers/usb/mtu3/Kconfig @@ -4,6 +4,7 @@ config USB_MTU3 tristate "MediaTek USB3 Dual Role controller" depends on USB || USB_GADGET depends on ARCH_MEDIATEK || COMPILE_TEST + depends on EXTCON || !EXTCON select USB_XHCI_MTK if USB_SUPPORT && USB_XHCI_HCD help Say Y or M here if your system runs on MediaTek SoCs with diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c index d045d8458f81c27930a090ab06ee1539cfeb420a..86069352013262d04b3ca4e884c152cfdf238906 100644 --- a/drivers/usb/mtu3/mtu3_core.c +++ b/drivers/usb/mtu3/mtu3_core.c @@ -185,8 +185,8 @@ static void mtu3_intr_enable(struct mtu3 *mtu) if (mtu->is_u3_ip) { /* Enable U3 LTSSM interrupts */ - value = HOT_RST_INTR | WARM_RST_INTR | VBUS_RISE_INTR | - VBUS_FALL_INTR | ENTER_U3_INTR | EXIT_U3_INTR; + value = HOT_RST_INTR | WARM_RST_INTR | + ENTER_U3_INTR | EXIT_U3_INTR; mtu3_writel(mbase, U3D_LTSSM_INTR_ENABLE, value); } @@ -578,8 +578,10 @@ static void mtu3_regs_init(struct mtu3 *mtu) if (mtu->is_u3_ip) { /* disable LGO_U1/U2 by default */ mtu3_clrbits(mbase, U3D_LINK_POWER_CONTROL, - SW_U1_ACCEPT_ENABLE | SW_U2_ACCEPT_ENABLE | SW_U1_REQUEST_ENABLE | SW_U2_REQUEST_ENABLE); + /* enable accept LGO_U1/U2 link command from host */ + mtu3_setbits(mbase, U3D_LINK_POWER_CONTROL, + SW_U1_ACCEPT_ENABLE | SW_U2_ACCEPT_ENABLE); /* device responses to u3_exit from host automatically */ mtu3_clrbits(mbase, U3D_LTSSM_CTRL, SOFT_U3_EXIT_EN); /* automatically build U2 link when U3 detect fail */ diff --git a/drivers/usb/mtu3/mtu3_gadget.c b/drivers/usb/mtu3/mtu3_gadget.c index 5c60a8c5a0b5c7cef25ca2fcbf2ac396c1b5b6b5..bbcd3332471dc2217042e62527b12745c48142e6 100644 --- a/drivers/usb/mtu3/mtu3_gadget.c +++ b/drivers/usb/mtu3/mtu3_gadget.c @@ -585,6 +585,17 @@ static const struct usb_gadget_ops mtu3_gadget_ops = { .udc_stop = mtu3_gadget_stop, }; +static void mtu3_state_reset(struct mtu3 *mtu) +{ + mtu->address = 0; + mtu->ep0_state = MU3D_EP0_STATE_SETUP; + mtu->may_wakeup = 0; + mtu->u1_enable = 0; + mtu->u2_enable = 0; + mtu->delayed_status = false; + mtu->test_mode = false; +} + static void init_hw_ep(struct mtu3 *mtu, struct mtu3_ep *mep, u32 epnum, u32 is_in) { @@ -702,6 +713,7 @@ void mtu3_gadget_disconnect(struct mtu3 *mtu) spin_lock(&mtu->lock); } + mtu3_state_reset(mtu); usb_gadget_set_state(&mtu->g, USB_STATE_NOTATTACHED); } @@ -712,12 +724,6 @@ void mtu3_gadget_reset(struct mtu3 *mtu) /* report disconnect, if we didn't flush EP state */ if (mtu->g.speed != USB_SPEED_UNKNOWN) mtu3_gadget_disconnect(mtu); - - mtu->address = 0; - mtu->ep0_state = MU3D_EP0_STATE_SETUP; - mtu->may_wakeup = 0; - mtu->u1_enable = 0; - mtu->u2_enable = 0; - mtu->delayed_status = false; - mtu->test_mode = false; + else + mtu3_state_reset(mtu); } diff --git a/drivers/usb/mtu3/mtu3_gadget_ep0.c b/drivers/usb/mtu3/mtu3_gadget_ep0.c index 25216e79cd6ee1ecf83efa13f23c4caf24353fd0..3c464d8ae023fa4eb6d28ef1f6a9db47f369551e 100644 --- a/drivers/usb/mtu3/mtu3_gadget_ep0.c +++ b/drivers/usb/mtu3/mtu3_gadget_ep0.c @@ -336,9 +336,9 @@ static int ep0_handle_feature_dev(struct mtu3 *mtu, lpc = mtu3_readl(mbase, U3D_LINK_POWER_CONTROL); if (set) - lpc |= SW_U1_ACCEPT_ENABLE; + lpc |= SW_U1_REQUEST_ENABLE; else - lpc &= ~SW_U1_ACCEPT_ENABLE; + lpc &= ~SW_U1_REQUEST_ENABLE; mtu3_writel(mbase, U3D_LINK_POWER_CONTROL, lpc); mtu->u1_enable = !!set; @@ -351,9 +351,9 @@ static int ep0_handle_feature_dev(struct mtu3 *mtu, lpc = mtu3_readl(mbase, U3D_LINK_POWER_CONTROL); if (set) - lpc |= SW_U2_ACCEPT_ENABLE; + lpc |= SW_U2_REQUEST_ENABLE; else - lpc &= ~SW_U2_ACCEPT_ENABLE; + lpc &= ~SW_U2_REQUEST_ENABLE; mtu3_writel(mbase, U3D_LINK_POWER_CONTROL, lpc); mtu->u2_enable = !!set; diff --git a/drivers/usb/mtu3/mtu3_qmu.c b/drivers/usb/mtu3/mtu3_qmu.c index ff62ba2321779c3a0ae92b517317d12be8fed0f3..326b40747128c2d5902de0050c9b75b2ac792cf0 100644 --- a/drivers/usb/mtu3/mtu3_qmu.c +++ b/drivers/usb/mtu3/mtu3_qmu.c @@ -427,7 +427,7 @@ static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum) return; } - dev_dbg(mtu->dev, "%s send ZLP for req=%p\n", __func__, mreq); + dev_dbg(mtu->dev, "%s send ZLP for req=%p\n", __func__, req); mtu3_clrbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN); diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig index ad08895e78f9631c0935aeba9704a0b948fd6d32..c3dae7d5cb6e7a06bad420bb6e4811113433b582 100644 --- a/drivers/usb/musb/Kconfig +++ b/drivers/usb/musb/Kconfig @@ -66,7 +66,7 @@ config USB_MUSB_SUNXI depends on NOP_USB_XCEIV depends on PHY_SUN4I_USB depends on EXTCON - depends on GENERIC_PHY + select GENERIC_PHY select SUNXI_SRAM config USB_MUSB_DAVINCI diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c index 23a0df79ef2129323d3b8f0548f06c25d1421916..403eb97915f8a86f15dc00f07c2077890663dc25 100644 --- a/drivers/usb/musb/musb_dsps.c +++ b/drivers/usb/musb/musb_dsps.c @@ -181,9 +181,11 @@ static void dsps_musb_enable(struct musb *musb) musb_writel(reg_base, wrp->epintr_set, epmask); musb_writel(reg_base, wrp->coreintr_set, coremask); - /* start polling for ID change in dual-role idle mode */ - if (musb->xceiv->otg->state == OTG_STATE_B_IDLE && - musb->port_mode == MUSB_OTG) + /* + * start polling for runtime PM active and idle, + * and for ID change in dual-role idle mode. + */ + if (musb->xceiv->otg->state == OTG_STATE_B_IDLE) dsps_mod_timer(glue, -1); } @@ -227,8 +229,13 @@ static int dsps_check_status(struct musb *musb, void *unused) switch (musb->xceiv->otg->state) { case OTG_STATE_A_WAIT_VRISE: - dsps_mod_timer_optional(glue); - break; + if (musb->port_mode == MUSB_HOST) { + musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON; + dsps_mod_timer_optional(glue); + break; + } + /* fall through */ + case OTG_STATE_A_WAIT_BCON: /* keep VBUS on for host-only mode */ if (musb->port_mode == MUSB_HOST) { @@ -249,6 +256,10 @@ static int dsps_check_status(struct musb *musb, void *unused) musb->xceiv->otg->state = OTG_STATE_A_IDLE; MUSB_HST_MODE(musb); } + + if (musb->port_mode == MUSB_PERIPHERAL) + skip_session = 1; + if (!(devctl & MUSB_DEVCTL_SESSION) && !skip_session) musb_writeb(mregs, MUSB_DEVCTL, MUSB_DEVCTL_SESSION); diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index eae8b1b1b45b864a545827b9c5403b88b8779f78..ffe462a657b15d8b592ac7801dc6f4be4629c86f 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -452,13 +452,10 @@ void musb_g_tx(struct musb *musb, u8 epnum) } if (request) { - u8 is_dma = 0; - bool short_packet = false; trace_musb_req_tx(req); if (dma && (csr & MUSB_TXCSR_DMAENAB)) { - is_dma = 1; csr |= MUSB_TXCSR_P_WZC_BITS; csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET); @@ -476,16 +473,8 @@ void musb_g_tx(struct musb *musb, u8 epnum) */ if ((request->zero && request->length) && (request->length % musb_ep->packet_sz == 0) - && (request->actual == request->length)) - short_packet = true; + && (request->actual == request->length)) { - if ((musb_dma_inventra(musb) || musb_dma_ux500(musb)) && - (is_dma && (!dma->desired_mode || - (request->actual & - (musb_ep->packet_sz - 1))))) - short_packet = true; - - if (short_packet) { /* * On DMA completion, FIFO may not be * available yet... diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c index a688f7f87829f760c9fdee9008f6ca365e342dd3..5fc6825745f21bd6246d71b99fd98d7313396f01 100644 --- a/drivers/usb/musb/musbhsdma.c +++ b/drivers/usb/musb/musbhsdma.c @@ -346,12 +346,10 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data) channel->status = MUSB_DMA_STATUS_FREE; /* completed */ - if ((devctl & MUSB_DEVCTL_HM) - && (musb_channel->transmit) - && ((channel->desired_mode == 0) - || (channel->actual_len & - (musb_channel->max_packet_sz - 1))) - ) { + if (musb_channel->transmit && + (!channel->desired_mode || + (channel->actual_len % + musb_channel->max_packet_sz))) { u8 epnum = musb_channel->epnum; int offset = musb->io.ep_offset(epnum, MUSB_TXCSR); @@ -363,11 +361,14 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data) */ musb_ep_select(mbase, epnum); txcsr = musb_readw(mbase, offset); - txcsr &= ~(MUSB_TXCSR_DMAENAB + if (channel->desired_mode == 1) { + txcsr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_AUTOSET); - musb_writew(mbase, offset, txcsr); - /* Send out the packet */ - txcsr &= ~MUSB_TXCSR_DMAMODE; + musb_writew(mbase, offset, txcsr); + /* Send out the packet */ + txcsr &= ~MUSB_TXCSR_DMAMODE; + txcsr |= MUSB_TXCSR_DMAENAB; + } txcsr |= MUSB_TXCSR_TXPKTRDY; musb_writew(mbase, offset, txcsr); } diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c index 39453287b5c36813288e0beb4c2f73f603e02c4e..9629ddb943d5cfaf776303d77f6e80c734937b26 100644 --- a/drivers/usb/musb/tusb6010.c +++ b/drivers/usb/musb/tusb6010.c @@ -1102,6 +1102,11 @@ static int tusb_musb_init(struct musb *musb) /* dma address for async dma */ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) { + pr_debug("no async dma resource?\n"); + ret = -ENODEV; + goto done; + } musb->async = mem->start; /* dma address for sync dma */ diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig index d7312eed60882a7c356b0d6736c5df836d2dbaf0..91ea3083e7ad97793872fbb9459ab205fd9b5161 100644 --- a/drivers/usb/phy/Kconfig +++ b/drivers/usb/phy/Kconfig @@ -21,7 +21,7 @@ config AB8500_USB config FSL_USB2_OTG bool "Freescale USB OTG Transceiver Driver" - depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM + depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM=y && PM depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y' select USB_PHY help diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c index 27bdb72225272394d1d1001dc8d50e17c0186601..f5f0568d8533e111a6d1287d0ef39916005efbb3 100644 --- a/drivers/usb/phy/phy-am335x.c +++ b/drivers/usb/phy/phy-am335x.c @@ -61,9 +61,6 @@ static int am335x_phy_probe(struct platform_device *pdev) if (ret) return ret; - ret = usb_add_phy_dev(&am_phy->usb_phy_gen.phy); - if (ret) - return ret; am_phy->usb_phy_gen.phy.init = am335x_init; am_phy->usb_phy_gen.phy.shutdown = am335x_shutdown; @@ -82,7 +79,7 @@ static int am335x_phy_probe(struct platform_device *pdev) device_set_wakeup_enable(dev, false); phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, am_phy->dr_mode, false); - return 0; + return usb_add_phy_dev(&am_phy->usb_phy_gen.phy); } static int am335x_phy_remove(struct platform_device *pdev) diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c index 4310df46639d25d1d4b8b96628aca3bc890869cf..b0792585d5bc205d102330ebb32be37f26e1c65d 100644 --- a/drivers/usb/renesas_usbhs/common.c +++ b/drivers/usb/renesas_usbhs/common.c @@ -457,6 +457,10 @@ static int usbhsc_drvcllbck_notify_hotplug(struct platform_device *pdev) * platform functions */ static const struct of_device_id usbhs_of_match[] = { + { + .compatible = "renesas,usbhs-r8a774c0", + .data = (void *)USBHS_TYPE_RCAR_GEN3_WITH_PLL, + }, { .compatible = "renesas,usbhs-r8a7790", .data = (void *)USBHS_TYPE_RCAR_GEN2, diff --git a/drivers/usb/renesas_usbhs/common.h b/drivers/usb/renesas_usbhs/common.h index 6137f7942c05cabc1423771c21a0855aaec3be70..63a75fd9fa0c3af651b0d68dfa1c67d6fb165d79 100644 --- a/drivers/usb/renesas_usbhs/common.h +++ b/drivers/usb/renesas_usbhs/common.h @@ -157,11 +157,12 @@ struct usbhs_priv; #define VBSTS (1 << 7) /* VBUS_0 and VBUSIN_0 Input Status */ #define VALID (1 << 3) /* USB Request Receive */ -#define DVSQ_MASK (0x3 << 4) /* Device State */ +#define DVSQ_MASK (0x7 << 4) /* Device State */ #define POWER_STATE (0 << 4) #define DEFAULT_STATE (1 << 4) #define ADDRESS_STATE (2 << 4) #define CONFIGURATION_STATE (3 << 4) +#define SUSPENDED_STATE (4 << 4) #define CTSQ_MASK (0x7) /* Control Transfer Stage */ #define IDLE_SETUP_STAGE 0 /* Idle stage or setup stage */ @@ -207,6 +208,7 @@ struct usbhs_priv; /* DCPCTR */ #define BSTS (1 << 15) /* Buffer Status */ #define SUREQ (1 << 14) /* Sending SETUP Token */ +#define INBUFM (1 << 14) /* (PIPEnCTR) Transfer Buffer Monitor */ #define CSSTS (1 << 12) /* CSSTS Status */ #define ACLRM (1 << 9) /* Buffer Auto-Clear Mode */ #define SQCLR (1 << 8) /* Toggle Bit Clear */ diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index 39fa2fc1b8b767e10d37b6bde54a696d00de9f0c..aeb53ec5cc6ab409dddc2cc2dbca38ecc86eb274 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c @@ -89,7 +89,7 @@ static void __usbhsf_pkt_del(struct usbhs_pkt *pkt) list_del_init(&pkt->node); } -static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe) +struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe) { return list_first_entry_or_null(&pipe->list, struct usbhs_pkt, node); } @@ -802,9 +802,8 @@ static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map) } static void usbhsf_dma_complete(void *arg); -static void xfer_work(struct work_struct *work) +static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt) { - struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work); struct usbhs_pipe *pipe = pkt->pipe; struct usbhs_fifo *fifo; struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); @@ -812,12 +811,10 @@ static void xfer_work(struct work_struct *work) struct dma_chan *chan; struct device *dev = usbhs_priv_to_dev(priv); enum dma_transfer_direction dir; - unsigned long flags; - usbhs_lock(priv, flags); fifo = usbhs_pipe_to_fifo(pipe); if (!fifo) - goto xfer_work_end; + return; chan = usbhsf_dma_chan_get(fifo, pkt); dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV; @@ -826,7 +823,7 @@ static void xfer_work(struct work_struct *work) pkt->trans, dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc) - goto xfer_work_end; + return; desc->callback = usbhsf_dma_complete; desc->callback_param = pipe; @@ -834,7 +831,7 @@ static void xfer_work(struct work_struct *work) pkt->cookie = dmaengine_submit(desc); if (pkt->cookie < 0) { dev_err(dev, "Failed to submit dma descriptor\n"); - goto xfer_work_end; + return; } dev_dbg(dev, " %s %d (%d/ %d)\n", @@ -845,8 +842,17 @@ static void xfer_work(struct work_struct *work) dma_async_issue_pending(chan); usbhsf_dma_start(pipe, fifo); usbhs_pipe_enable(pipe); +} + +static void xfer_work(struct work_struct *work) +{ + struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work); + struct usbhs_pipe *pipe = pkt->pipe; + struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); + unsigned long flags; -xfer_work_end: + usbhs_lock(priv, flags); + usbhsf_dma_xfer_preparing(pkt); usbhs_unlock(priv, flags); } @@ -899,8 +905,13 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done) pkt->trans = len; usbhsf_tx_irq_ctrl(pipe, 0); - INIT_WORK(&pkt->work, xfer_work); - schedule_work(&pkt->work); + /* FIXME: Workaound for usb dmac that driver can be used in atomic */ + if (usbhs_get_dparam(priv, has_usb_dmac)) { + usbhsf_dma_xfer_preparing(pkt); + } else { + INIT_WORK(&pkt->work, xfer_work); + schedule_work(&pkt->work); + } return 0; @@ -1006,8 +1017,7 @@ static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt, pkt->trans = pkt->length; - INIT_WORK(&pkt->work, xfer_work); - schedule_work(&pkt->work); + usbhsf_dma_xfer_preparing(pkt); return 0; diff --git a/drivers/usb/renesas_usbhs/fifo.h b/drivers/usb/renesas_usbhs/fifo.h index 88d1816bcda22c8d989c7665cfe1fc8478ef1ae9..c3d3cc35cee0f640784d48b5f3a471a9608ac6cf 100644 --- a/drivers/usb/renesas_usbhs/fifo.h +++ b/drivers/usb/renesas_usbhs/fifo.h @@ -97,5 +97,6 @@ void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt, void *buf, int len, int zero, int sequence); struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt); void usbhs_pkt_start(struct usbhs_pipe *pipe); +struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe); #endif /* RENESAS_USB_FIFO_H */ diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c index 59cac40aafcc13be4623a9f6c3158d97d6cbe1a3..f36248e9387db1972a66f0d49e91519bc7869cb4 100644 --- a/drivers/usb/renesas_usbhs/mod_gadget.c +++ b/drivers/usb/renesas_usbhs/mod_gadget.c @@ -456,12 +456,18 @@ static int usbhsg_irq_dev_state(struct usbhs_priv *priv, { struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); struct device *dev = usbhsg_gpriv_to_dev(gpriv); + int state = usbhs_status_get_device_state(irq_state); gpriv->gadget.speed = usbhs_bus_get_speed(priv); - dev_dbg(dev, "state = %x : speed : %d\n", - usbhs_status_get_device_state(irq_state), - gpriv->gadget.speed); + dev_dbg(dev, "state = %x : speed : %d\n", state, gpriv->gadget.speed); + + if (gpriv->gadget.speed != USB_SPEED_UNKNOWN && + (state & SUSPENDED_STATE)) { + if (gpriv->driver && gpriv->driver->suspend) + gpriv->driver->suspend(&gpriv->gadget); + usb_gadget_set_state(&gpriv->gadget, USB_STATE_SUSPENDED); + } return 0; } @@ -721,8 +727,7 @@ static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge) struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); struct device *dev = usbhsg_gpriv_to_dev(gpriv); unsigned long flags; - - usbhsg_pipe_disable(uep); + int ret = 0; dev_dbg(dev, "set halt %d (pipe %d)\n", halt, usbhs_pipe_number(pipe)); @@ -730,6 +735,18 @@ static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge) /******************** spin lock ********************/ usbhs_lock(priv, flags); + /* + * According to usb_ep_set_halt()'s description, this function should + * return -EAGAIN if the IN endpoint has any queue or data. Note + * that the usbhs_pipe_is_dir_in() returns false if the pipe is an + * IN endpoint in the gadget mode. + */ + if (!usbhs_pipe_is_dir_in(pipe) && (__usbhsf_pkt_get(pipe) || + usbhs_pipe_contains_transmittable_data(pipe))) { + ret = -EAGAIN; + goto out; + } + if (halt) usbhs_pipe_stall(pipe); else @@ -740,10 +757,11 @@ static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge) else usbhsg_status_clr(gpriv, USBHSG_STATUS_WEDGE); +out: usbhs_unlock(priv, flags); /******************** spin unlock ******************/ - return 0; + return ret; } static int usbhsg_ep_set_halt(struct usb_ep *ep, int value) diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c index c4922b96c93bcec16e3b60010eed556306914d6c..9e5afdde1adbf8263e9a72bc67b8b7dea034085b 100644 --- a/drivers/usb/renesas_usbhs/pipe.c +++ b/drivers/usb/renesas_usbhs/pipe.c @@ -277,6 +277,21 @@ int usbhs_pipe_is_accessible(struct usbhs_pipe *pipe) return -EBUSY; } +bool usbhs_pipe_contains_transmittable_data(struct usbhs_pipe *pipe) +{ + u16 val; + + /* Do not support for DCP pipe */ + if (usbhs_pipe_is_dcp(pipe)) + return false; + + val = usbhsp_pipectrl_get(pipe); + if (val & INBUFM) + return true; + + return false; +} + /* * PID ctrl */ diff --git a/drivers/usb/renesas_usbhs/pipe.h b/drivers/usb/renesas_usbhs/pipe.h index 3080423e600c7ec6777a950fde20823a72bfe17b..3b130529408ba9991aa4ef5d1f83c4040323eac1 100644 --- a/drivers/usb/renesas_usbhs/pipe.h +++ b/drivers/usb/renesas_usbhs/pipe.h @@ -83,6 +83,7 @@ void usbhs_pipe_clear(struct usbhs_pipe *pipe); void usbhs_pipe_clear_without_sequence(struct usbhs_pipe *pipe, int needs_bfre, int bfre_enable); int usbhs_pipe_is_accessible(struct usbhs_pipe *pipe); +bool usbhs_pipe_contains_transmittable_data(struct usbhs_pipe *pipe); void usbhs_pipe_enable(struct usbhs_pipe *pipe); void usbhs_pipe_disable(struct usbhs_pipe *pipe); void usbhs_pipe_stall(struct usbhs_pipe *pipe); diff --git a/drivers/usb/roles/Kconfig b/drivers/usb/roles/Kconfig index f5a5e6f79f1b977d56d1363c30595d32f32c6dd5..e4194ac94510771cafedfa9ec6e6edb24539d538 100644 --- a/drivers/usb/roles/Kconfig +++ b/drivers/usb/roles/Kconfig @@ -1,3 +1,16 @@ +config USB_ROLE_SWITCH + tristate "USB Role Switch Support" + help + USB Role Switch is a device that can select the USB role - host or + device - for a USB port (connector). In most cases dual-role capable + USB controller will also represent the switch, but on some platforms + multiplexer/demultiplexer switch is used to route the data lines on + the USB connector between separate USB host and device controllers. + + Say Y here if your USB connectors support both device and host roles. + To compile the driver as module, choose M here: the module will be + called roles.ko. + if USB_ROLE_SWITCH config USB_ROLES_INTEL_XHCI diff --git a/drivers/usb/roles/Makefile b/drivers/usb/roles/Makefile index e44b179ba2751261dfe562d22e58aa39e513f694..c02873206fc1855c6f162782193e2d26be9e88a5 100644 --- a/drivers/usb/roles/Makefile +++ b/drivers/usb/roles/Makefile @@ -1 +1,3 @@ -obj-$(CONFIG_USB_ROLES_INTEL_XHCI) += intel-xhci-usb-role-switch.o +obj-$(CONFIG_USB_ROLE_SWITCH) += roles.o +roles-y := class.o +obj-$(CONFIG_USB_ROLES_INTEL_XHCI) += intel-xhci-usb-role-switch.o diff --git a/drivers/usb/common/roles.c b/drivers/usb/roles/class.c similarity index 100% rename from drivers/usb/common/roles.c rename to drivers/usb/roles/class.c index 99116af07f1d9ca1e7c275a70122f370ec0d32a3..1dd492e89719e501d6bbd621339ba047fde6ea42 100644 --- a/drivers/usb/common/roles.c +++ b/drivers/usb/roles/class.c @@ -130,8 +130,8 @@ EXPORT_SYMBOL_GPL(usb_role_switch_get); void usb_role_switch_put(struct usb_role_switch *sw) { if (!IS_ERR_OR_NULL(sw)) { - put_device(&sw->dev); module_put(sw->dev.parent->driver->owner); + put_device(&sw->dev); } } EXPORT_SYMBOL_GPL(usb_role_switch_put); diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c index 17940589c647cfdcbcd81e7e3b0c5d30dec6e8b8..7d289302ff6cfd22578885acbbadb6a048a9f5af 100644 --- a/drivers/usb/serial/console.c +++ b/drivers/usb/serial/console.c @@ -101,7 +101,6 @@ static int usb_console_setup(struct console *co, char *options) cflag |= PARENB; break; } - co->cflag = cflag; /* * no need to check the index here: if the index is wrong, console @@ -164,6 +163,7 @@ static int usb_console_setup(struct console *co, char *options) serial->type->set_termios(tty, port, &dummy); tty_port_tty_set(&port->port, NULL); + tty_save_termios(tty); tty_kref_put(tty); } tty_port_set_initialized(&port->port, 1); diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index c0777a374a88f86847b8ada0891d9849e7aa70a8..7ae121567098c9442350a9e140322754b3f6a18b 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -61,6 +61,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */ + { USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */ { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */ { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */ { USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */ @@ -79,6 +80,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */ { USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */ { USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */ + { USB_DEVICE(0x10C4, 0x8056) }, /* Lorenz Messtechnik devices */ { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */ { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */ { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */ @@ -123,6 +125,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */ { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */ + { USB_DEVICE(0x10C4, 0x83AA) }, /* Mark-10 Digital Force Gauge */ { USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */ { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */ { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */ @@ -1353,8 +1356,13 @@ static int cp210x_gpio_get(struct gpio_chip *gc, unsigned int gpio) if (priv->partnum == CP210X_PARTNUM_CP2105) req_type = REQTYPE_INTERFACE_TO_HOST; + result = usb_autopm_get_interface(serial->interface); + if (result) + return result; + result = cp210x_read_vendor_block(serial, req_type, CP210X_READ_LATCH, &buf, sizeof(buf)); + usb_autopm_put_interface(serial->interface); if (result < 0) return result; @@ -1375,6 +1383,10 @@ static void cp210x_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value) buf.mask = BIT(gpio); + result = usb_autopm_get_interface(serial->interface); + if (result) + goto out; + if (priv->partnum == CP210X_PARTNUM_CP2105) { result = cp210x_write_vendor_block(serial, REQTYPE_HOST_TO_INTERFACE, @@ -1392,6 +1404,8 @@ static void cp210x_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value) NULL, 0, USB_CTRL_SET_TIMEOUT); } + usb_autopm_put_interface(serial->interface); +out: if (result < 0) { dev_err(&serial->interface->dev, "failed to set GPIO value: %d\n", result); diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c index e0035c0231202da04e88830e9b2c3e11e8416aad..2c58649fd47a4d121ebbd2aecdb0b2e98e8c3091 100644 --- a/drivers/usb/serial/cypress_m8.c +++ b/drivers/usb/serial/cypress_m8.c @@ -769,7 +769,7 @@ static void cypress_send(struct usb_serial_port *port) usb_fill_int_urb(port->interrupt_out_urb, port->serial->dev, usb_sndintpipe(port->serial->dev, port->interrupt_out_endpointAddress), - port->interrupt_out_buffer, port->interrupt_out_size, + port->interrupt_out_buffer, actual_size, cypress_write_int_callback, port, priv->write_urb_interval); result = usb_submit_urb(port->interrupt_out_urb, GFP_ATOMIC); if (result) { diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c index 96036f87b1dedcbf68de9b2e541c841b0e370678..087e5f1656f8b62aefe5292d11e2e81d19583cde 100644 --- a/drivers/usb/serial/f81232.c +++ b/drivers/usb/serial/f81232.c @@ -556,9 +556,12 @@ static int f81232_open(struct tty_struct *tty, struct usb_serial_port *port) static void f81232_close(struct usb_serial_port *port) { + struct f81232_private *port_priv = usb_get_serial_port_data(port); + f81232_port_disable(port); usb_serial_generic_close(port); usb_kill_urb(port->interrupt_in_urb); + flush_work(&port_priv->interrupt_work); } static void f81232_dtr_rts(struct usb_serial_port *port, int on) @@ -652,6 +655,40 @@ static int f81232_port_remove(struct usb_serial_port *port) return 0; } +static int f81232_suspend(struct usb_serial *serial, pm_message_t message) +{ + struct usb_serial_port *port = serial->port[0]; + struct f81232_private *port_priv = usb_get_serial_port_data(port); + int i; + + for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i) + usb_kill_urb(port->read_urbs[i]); + + usb_kill_urb(port->interrupt_in_urb); + + if (port_priv) + flush_work(&port_priv->interrupt_work); + + return 0; +} + +static int f81232_resume(struct usb_serial *serial) +{ + struct usb_serial_port *port = serial->port[0]; + int result; + + if (tty_port_initialized(&port->port)) { + result = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO); + if (result) { + dev_err(&port->dev, "submit interrupt urb failed: %d\n", + result); + return result; + } + } + + return usb_serial_generic_resume(serial); +} + static struct usb_serial_driver f81232_device = { .driver = { .owner = THIS_MODULE, @@ -675,6 +712,8 @@ static struct usb_serial_driver f81232_device = { .read_int_callback = f81232_read_int_callback, .port_probe = f81232_port_probe, .port_remove = f81232_port_remove, + .suspend = f81232_suspend, + .resume = f81232_resume, }; static struct usb_serial_driver * const serial_drivers[] = { diff --git a/drivers/usb/serial/f81534.c b/drivers/usb/serial/f81534.c index 4dfbff20bda4cbfb354aa9efcad90f5e4ce30461..db6c93c04b3c8fd52e433930b3b10b3aadabb827 100644 --- a/drivers/usb/serial/f81534.c +++ b/drivers/usb/serial/f81534.c @@ -45,14 +45,17 @@ #define F81534_CONFIG1_REG (0x09 + F81534_UART_BASE_ADDRESS) #define F81534_DEF_CONF_ADDRESS_START 0x3000 -#define F81534_DEF_CONF_SIZE 8 +#define F81534_DEF_CONF_SIZE 12 #define F81534_CUSTOM_ADDRESS_START 0x2f00 #define F81534_CUSTOM_DATA_SIZE 0x10 #define F81534_CUSTOM_NO_CUSTOM_DATA 0xff #define F81534_CUSTOM_VALID_TOKEN 0xf0 #define F81534_CONF_OFFSET 1 -#define F81534_CONF_GPIO_OFFSET 4 +#define F81534_CONF_INIT_GPIO_OFFSET 4 +#define F81534_CONF_WORK_GPIO_OFFSET 8 +#define F81534_CONF_GPIO_SHUTDOWN 7 +#define F81534_CONF_GPIO_RS232 1 #define F81534_MAX_DATA_BLOCK 64 #define F81534_MAX_BUS_RETRY 20 @@ -1359,8 +1362,19 @@ static int f81534_set_port_output_pin(struct usb_serial_port *port) serial_priv = usb_get_serial_data(serial); port_priv = usb_get_serial_port_data(port); - idx = F81534_CONF_GPIO_OFFSET + port_priv->phy_num; + idx = F81534_CONF_INIT_GPIO_OFFSET + port_priv->phy_num; value = serial_priv->conf_data[idx]; + if (value >= F81534_CONF_GPIO_SHUTDOWN) { + /* + * Newer IC configure will make transceiver in shutdown mode on + * initial power on. We need enable it before using UARTs. + */ + idx = F81534_CONF_WORK_GPIO_OFFSET + port_priv->phy_num; + value = serial_priv->conf_data[idx]; + if (value >= F81534_CONF_GPIO_SHUTDOWN) + value = F81534_CONF_GPIO_RS232; + } + pins = &f81534_port_out_pins[port_priv->phy_num]; for (i = 0; i < ARRAY_SIZE(pins->pin); ++i) { diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index b5cef322826f15a1c3980f7bb8bd99dbd9a08f11..3c0f38cd3a5a478225e26bb0f30b437d1316112b 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -599,6 +599,8 @@ static const struct usb_device_id id_table_combined[] = { .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLX_PLUS_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_NT_ORION_IO_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) }, @@ -1015,6 +1017,15 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) }, { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) }, { USB_DEVICE(AIRBUS_DS_VID, AIRBUS_DS_P8GR) }, + /* EZPrototypes devices */ + { USB_DEVICE(EZPROTOTYPES_VID, HJELMSLUND_USB485_ISO_PID) }, + { USB_DEVICE_INTERFACE_NUMBER(UNJO_VID, UNJO_ISODEBUG_V1_PID, 1) }, + /* Sienna devices */ + { USB_DEVICE(FTDI_VID, FTDI_SIENNA_PID) }, + { USB_DEVICE(ECHELON_VID, ECHELON_U20_PID) }, + /* U-Blox devices */ + { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) }, + { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 975d02666c5a0cf93cd647d8667f988e589c8171..e8373528264c3634a9e474f2836a82d2b5b8b7a5 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -39,6 +39,9 @@ #define FTDI_LUMEL_PD12_PID 0x6002 +/* Sienna Serial Interface by Secyourit GmbH */ +#define FTDI_SIENNA_PID 0x8348 + /* Cyber Cortex AV by Fabulous Silicon (http://fabuloussilicon.com) */ #define CYBER_CORTEX_AV_PID 0x8698 @@ -567,7 +570,9 @@ /* * NovaTech product ids (FTDI_VID) */ -#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */ +#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */ +#define FTDI_NT_ORIONLX_PLUS_PID 0x7c91 /* OrionLX+ Substation Automation Platform */ +#define FTDI_NT_ORION_IO_PID 0x7c92 /* Orion I/O */ /* * Synapse Wireless product ids (FTDI_VID) @@ -686,6 +691,12 @@ #define BANDB_TTL3USB9M_PID 0xAC50 #define BANDB_ZZ_PROG1_USB_PID 0xBA02 +/* + * Echelon USB Serial Interface + */ +#define ECHELON_VID 0x0920 +#define ECHELON_U20_PID 0x7500 + /* * Intrepid Control Systems (http://www.intrepidcs.com/) ValueCAN and NeoVI */ @@ -1308,6 +1319,12 @@ #define IONICS_VID 0x1c0c #define IONICS_PLUGCOMPUTER_PID 0x0102 +/* + * EZPrototypes (PID reseller) + */ +#define EZPROTOTYPES_VID 0x1c40 +#define HJELMSLUND_USB485_ISO_PID 0x0477 + /* * Dresden Elektronik Sensor Terminal Board */ @@ -1535,3 +1552,16 @@ #define CHETCO_SEASMART_DISPLAY_PID 0xA5AD /* SeaSmart NMEA2000 Display */ #define CHETCO_SEASMART_LITE_PID 0xA5AE /* SeaSmart Lite USB Adapter */ #define CHETCO_SEASMART_ANALOG_PID 0xA5AF /* SeaSmart Analog Adapter */ + +/* + * Unjo AB + */ +#define UNJO_VID 0x22B7 +#define UNJO_ISODEBUG_V1_PID 0x150D + +/* + * U-Blox products (http://www.u-blox.com). + */ +#define UBLOX_VID 0x1546 +#define UBLOX_C099F9P_ZED_PID 0x0502 +#define UBLOX_C099F9P_ODIN_PID 0x0503 diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c index 2274d9625f63590ca3d7e211fd387a109d761b37..0fff4968ea1bd3d727cf2dc6e15550697c1760ea 100644 --- a/drivers/usb/serial/generic.c +++ b/drivers/usb/serial/generic.c @@ -376,6 +376,7 @@ void usb_serial_generic_read_bulk_callback(struct urb *urb) struct usb_serial_port *port = urb->context; unsigned char *data = urb->transfer_buffer; unsigned long flags; + bool stopped = false; int status = urb->status; int i; @@ -383,33 +384,51 @@ void usb_serial_generic_read_bulk_callback(struct urb *urb) if (urb == port->read_urbs[i]) break; } - set_bit(i, &port->read_urbs_free); dev_dbg(&port->dev, "%s - urb %d, len %d\n", __func__, i, urb->actual_length); switch (status) { case 0: + usb_serial_debug_data(&port->dev, __func__, urb->actual_length, + data); + port->serial->type->process_read_urb(urb); break; case -ENOENT: case -ECONNRESET: case -ESHUTDOWN: dev_dbg(&port->dev, "%s - urb stopped: %d\n", __func__, status); - return; + stopped = true; + break; case -EPIPE: dev_err(&port->dev, "%s - urb stopped: %d\n", __func__, status); - return; + stopped = true; + break; default: dev_dbg(&port->dev, "%s - nonzero urb status: %d\n", __func__, status); - goto resubmit; + break; } - usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data); - port->serial->type->process_read_urb(urb); + /* + * Make sure URB processing is done before marking as free to avoid + * racing with unthrottle() on another CPU. Matches the barriers + * implied by the test_and_clear_bit() in + * usb_serial_generic_submit_read_urb(). + */ + smp_mb__before_atomic(); + set_bit(i, &port->read_urbs_free); + /* + * Make sure URB is marked as free before checking the throttled flag + * to avoid racing with unthrottle() on another CPU. Matches the + * smp_mb() in unthrottle(). + */ + smp_mb__after_atomic(); + + if (stopped) + return; -resubmit: /* Throttle the device if requested by tty */ spin_lock_irqsave(&port->lock, flags); port->throttled = port->throttle_req; @@ -484,6 +503,12 @@ void usb_serial_generic_unthrottle(struct tty_struct *tty) port->throttled = port->throttle_req = 0; spin_unlock_irq(&port->lock); + /* + * Matches the smp_mb__after_atomic() in + * usb_serial_generic_read_bulk_callback(). + */ + smp_mb(); + if (was_throttled) usb_serial_generic_submit_read_urbs(port, GFP_KERNEL); } diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c index 97c69d373ca6544d484f7ea246aebb13e703f1af..8090b6ec26415def01dc73f2e0edbbcfb74588b5 100644 --- a/drivers/usb/serial/io_edgeport.c +++ b/drivers/usb/serial/io_edgeport.c @@ -846,11 +846,12 @@ static void edge_bulk_out_data_callback(struct urb *urb) static void edge_bulk_out_cmd_callback(struct urb *urb) { struct edgeport_port *edge_port = urb->context; + struct device *dev = &urb->dev->dev; int status = urb->status; atomic_dec(&CmdUrbs); - dev_dbg(&urb->dev->dev, "%s - FREE URB %p (outstanding %d)\n", - __func__, urb, atomic_read(&CmdUrbs)); + dev_dbg(dev, "%s - FREE URB %p (outstanding %d)\n", __func__, urb, + atomic_read(&CmdUrbs)); /* clean up the transfer buffer */ @@ -860,8 +861,7 @@ static void edge_bulk_out_cmd_callback(struct urb *urb) usb_free_urb(urb); if (status) { - dev_dbg(&urb->dev->dev, - "%s - nonzero write bulk status received: %d\n", + dev_dbg(dev, "%s - nonzero write bulk status received: %d\n", __func__, status); return; } @@ -2919,16 +2919,18 @@ static int edge_startup(struct usb_serial *serial) response = 0; if (edge_serial->is_epic) { + struct usb_host_interface *alt; + + alt = serial->interface->cur_altsetting; + /* EPIC thing, set up our interrupt polling now and our read * urb, so that the device knows it really is connected. */ interrupt_in_found = bulk_in_found = bulk_out_found = false; - for (i = 0; i < serial->interface->altsetting[0] - .desc.bNumEndpoints; ++i) { + for (i = 0; i < alt->desc.bNumEndpoints; ++i) { struct usb_endpoint_descriptor *endpoint; int buffer_size; - endpoint = &serial->interface->altsetting[0]. - endpoint[i].desc; + endpoint = &alt->endpoint[i].desc; buffer_size = usb_endpoint_maxp(endpoint); if (!interrupt_in_found && (usb_endpoint_is_int_in(endpoint))) { diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c index d34779fe4a8d046e616cdb2e32c3258cda0c218f..e66a59ef43a1cb59cfa0d41b8af8c7316ededa69 100644 --- a/drivers/usb/serial/keyspan.c +++ b/drivers/usb/serial/keyspan.c @@ -1741,8 +1741,8 @@ static struct urb *keyspan_setup_urb(struct usb_serial *serial, int endpoint, ep_desc = find_ep(serial, endpoint); if (!ep_desc) { - /* leak the urb, something's wrong and the callers don't care */ - return urb; + usb_free_urb(urb); + return NULL; } if (usb_endpoint_xfer_int(ep_desc)) { ep_type_name = "INT"; diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index 27109522fd8b24fee3e5efc812061b44fe3c089c..c0232b67a40f4b975d46931b05377c4dbdea9e3e 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c @@ -366,8 +366,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport, if (!urbtrack) return -ENOMEM; - kref_get(&mos_parport->ref_count); - urbtrack->mos_parport = mos_parport; urbtrack->urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urbtrack->urb) { kfree(urbtrack); @@ -388,6 +386,8 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport, usb_sndctrlpipe(usbdev, 0), (unsigned char *)urbtrack->setup, NULL, 0, async_complete, urbtrack); + kref_get(&mos_parport->ref_count); + urbtrack->mos_parport = mos_parport; kref_init(&urbtrack->ref_count); INIT_LIST_HEAD(&urbtrack->urblist_entry); @@ -1894,10 +1894,6 @@ static int mos7720_startup(struct usb_serial *serial) product = le16_to_cpu(serial->dev->descriptor.idProduct); dev = serial->dev; - /* setting configuration feature to one */ - usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), - (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000); - if (product == MOSCHIP_DEVICE_ID_7715) { struct urb *urb = serial->port[0]->interrupt_in_urb; diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index b42bad85097ae853a49740a87356ec284bde2482..1d391565dbc604970df845ea23efcfbcbc6d687a 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -118,11 +118,15 @@ /* This driver also supports * ATEN UC2324 device using Moschip MCS7840 * ATEN UC2322 device using Moschip MCS7820 + * MOXA UPort 2210 device using Moschip MCS7820 */ #define USB_VENDOR_ID_ATENINTL 0x0557 #define ATENINTL_DEVICE_ID_UC2324 0x2011 #define ATENINTL_DEVICE_ID_UC2322 0x7820 +#define USB_VENDOR_ID_MOXA 0x110a +#define MOXA_DEVICE_ID_2210 0x2210 + /* Interrupt Routine Defines */ #define SERIAL_IIR_RLS 0x06 @@ -193,6 +197,7 @@ static const struct usb_device_id id_table[] = { {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)}, {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, + {USB_DEVICE(USB_VENDOR_ID_MOXA, MOXA_DEVICE_ID_2210)}, {} /* terminating entry */ }; MODULE_DEVICE_TABLE(usb, id_table); @@ -2053,6 +2058,7 @@ static int mos7840_probe(struct usb_serial *serial, const struct usb_device_id *id) { u16 product = le16_to_cpu(serial->dev->descriptor.idProduct); + u16 vid = le16_to_cpu(serial->dev->descriptor.idVendor); u8 *buf; int device_type; @@ -2062,6 +2068,11 @@ static int mos7840_probe(struct usb_serial *serial, goto out; } + if (vid == USB_VENDOR_ID_MOXA && product == MOXA_DEVICE_ID_2210) { + device_type = MOSCHIP_DEVICE_ID_7820; + goto out; + } + buf = kzalloc(VENDOR_READ_LENGTH, GFP_KERNEL); if (!buf) return -ENOMEM; @@ -2314,11 +2325,6 @@ static int mos7840_port_probe(struct usb_serial_port *port) goto error; } else dev_dbg(&port->dev, "ZLP_REG5 Writing success status%d\n", status); - - /* setting configuration feature to one */ - usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), - 0x03, 0x00, 0x01, 0x00, NULL, 0x00, - MOS_WDR_TIMEOUT); } return 0; error: @@ -2358,6 +2364,49 @@ static int mos7840_port_remove(struct usb_serial_port *port) return 0; } +static int mos7840_suspend(struct usb_serial *serial, pm_message_t message) +{ + struct moschip_port *mos7840_port; + struct usb_serial_port *port; + int i; + + for (i = 0; i < serial->num_ports; ++i) { + port = serial->port[i]; + if (!tty_port_initialized(&port->port)) + continue; + + mos7840_port = usb_get_serial_port_data(port); + + usb_kill_urb(mos7840_port->read_urb); + mos7840_port->read_urb_busy = false; + } + + return 0; +} + +static int mos7840_resume(struct usb_serial *serial) +{ + struct moschip_port *mos7840_port; + struct usb_serial_port *port; + int res; + int i; + + for (i = 0; i < serial->num_ports; ++i) { + port = serial->port[i]; + if (!tty_port_initialized(&port->port)) + continue; + + mos7840_port = usb_get_serial_port_data(port); + + mos7840_port->read_urb_busy = true; + res = usb_submit_urb(mos7840_port->read_urb, GFP_NOIO); + if (res) + mos7840_port->read_urb_busy = false; + } + + return 0; +} + static struct usb_serial_driver moschip7840_4port_device = { .driver = { .owner = THIS_MODULE, @@ -2386,6 +2435,8 @@ static struct usb_serial_driver moschip7840_4port_device = { .port_remove = mos7840_port_remove, .read_bulk_callback = mos7840_bulk_in_callback, .read_int_callback = mos7840_interrupt_callback, + .suspend = mos7840_suspend, + .resume = mos7840_resume, }; static struct usb_serial_driver * const serial_drivers[] = { diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index e72ad9f81c73973b86bd7992c0f331039a8192bf..553adab174bf345d04180d7f25666906593a0a48 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -197,6 +197,7 @@ static void option_instat_callback(struct urb *urb); #define DELL_PRODUCT_5804_MINICARD_ATT 0x819b /* Novatel E371 */ #define DELL_PRODUCT_5821E 0x81d7 +#define DELL_PRODUCT_5821E_ESIM 0x81e0 #define KYOCERA_VENDOR_ID 0x0c88 #define KYOCERA_PRODUCT_KPC650 0x17da @@ -246,6 +247,7 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_PRODUCT_EC25 0x0125 #define QUECTEL_PRODUCT_BG96 0x0296 #define QUECTEL_PRODUCT_EP06 0x0306 +#define QUECTEL_PRODUCT_EM12 0x0512 #define CMOTECH_VENDOR_ID 0x16d8 #define CMOTECH_PRODUCT_6001 0x6001 @@ -418,6 +420,7 @@ static void option_instat_callback(struct urb *urb); #define CINTERION_PRODUCT_PH8_AUDIO 0x0083 #define CINTERION_PRODUCT_AHXX_2RMNET 0x0084 #define CINTERION_PRODUCT_AHXX_AUDIO 0x0085 +#define CINTERION_PRODUCT_CLS8 0x00b0 /* Olivetti products */ #define OLIVETTI_VENDOR_ID 0x0b3c @@ -967,6 +970,11 @@ static const struct usb_device_id option_ids[] = { { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) }, + /* Motorola devices */ + { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2a70, 0xff, 0xff, 0xff) }, /* mdm6600 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2e0a, 0xff, 0xff, 0xff) }, /* mdm9600 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x4281, 0x0a, 0x00, 0xfc) }, /* mdm ram dl */ + { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x900e, 0xff, 0xff, 0xff) }, /* mdm qc dl */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, @@ -1037,6 +1045,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) }, { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E), .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, + { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E_ESIM), + .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, @@ -1066,7 +1076,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(3) }, { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ - { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000), /* SIMCom SIM5218 */ + .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | NCTRL(3) | RSVD(4) }, /* Quectel products using Qualcomm vendor ID */ { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)}, { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20), @@ -1087,6 +1098,9 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff), .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff), + .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), @@ -1144,10 +1158,22 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff), .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1050, 0xff), /* Telit FN980 (rmnet) */ + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1051, 0xff), /* Telit FN980 (MBIM) */ + .driver_info = NCTRL(0) | RSVD(1) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1052, 0xff), /* Telit FN980 (RNDIS) */ + .driver_info = NCTRL(2) | RSVD(3) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1053, 0xff), /* Telit FN980 (ECM) */ + .driver_info = NCTRL(0) | RSVD(1) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910), .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM), .driver_info = NCTRL(0) | RSVD(3) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1102, 0xff), /* Telit ME910 (ECM) */ + .driver_info = NCTRL(0) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x110a, 0xff), /* Telit ME910G1 */ + .driver_info = NCTRL(0) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4), @@ -1164,6 +1190,14 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214), .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) }, + { USB_DEVICE(TELIT_VENDOR_ID, 0x1260), + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, + { USB_DEVICE(TELIT_VENDOR_ID, 0x1261), + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, + { USB_DEVICE(TELIT_VENDOR_ID, 0x1900), /* Telit LN940 (QMI) */ + .driver_info = NCTRL(0) | RSVD(1) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */ + .driver_info = NCTRL(0) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), .driver_info = RSVD(1) }, @@ -1328,6 +1362,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) }, + { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x0601, 0xff) }, /* GosunCn ZTE WeLink ME3630 (RNDIS mode) */ + { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x0602, 0xff) }, /* GosunCn ZTE WeLink ME3630 (MBIM mode) */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff), .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff), @@ -1531,6 +1567,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(2) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */ .driver_info = RSVD(2) }, + { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */ + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) }, @@ -1758,6 +1796,9 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E), .driver_info = RSVD(5) | RSVD(6) }, + { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) }, /* Simcom SIM7500/SIM7600 MBIM mode */ + { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9011, 0xff), /* Simcom SIM7500/SIM7600 RNDIS mode */ + .driver_info = RSVD(7) }, { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200), .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) }, { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D), @@ -1820,6 +1861,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_2RMNET, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) }, + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_CLS8, 0xff), + .driver_info = RSVD(0) | RSVD(4) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) }, @@ -1931,16 +1974,37 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */ .driver_info = RSVD(4) }, - { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ - { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ - { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ - { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ + { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e3d, 0xff), /* D-Link DWM-222 A2 */ + .driver_info = RSVD(4) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ + { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */ + .driver_info = RSVD(4) }, + { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2060, 0xff), /* BroadMobi BM818 */ + .driver_info = RSVD(4) }, + { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) }, { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */ + { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */ + { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x10) }, /* HP lt4132 (Huawei ME906s-158) */ + { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x12) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x13) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x14) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) }, + { USB_DEVICE(0x0489, 0xe0b4), /* Foxconn T77W968 */ + .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, + { USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */ + .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, + { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */ + .driver_info = RSVD(4) | RSVD(5) | RSVD(6) }, + { USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */ + .driver_info = RSVD(4) | RSVD(5) }, + { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */ + .driver_info = RSVD(6) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index e41f725ac7aaeae00a451a5041d95c0f993be6dd..5d7b21ea6238344faa72f42da6ba689c3bb4fd9a 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -46,6 +46,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) }, { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) }, { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) }, + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_TB) }, { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) }, { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID), @@ -91,15 +92,21 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) }, { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) }, + { USB_DEVICE(HP_VENDOR_ID, HP_LD220TA_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LD960_PRODUCT_ID) }, + { USB_DEVICE(HP_VENDOR_ID, HP_LD960TA_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LCM960_PRODUCT_ID) }, + { USB_DEVICE(HP_VENDOR_ID, HP_LM920_PRODUCT_ID) }, + { USB_DEVICE(HP_VENDOR_ID, HP_LM940_PRODUCT_ID) }, + { USB_DEVICE(HP_VENDOR_ID, HP_TD620_PRODUCT_ID) }, { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) }, { USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) }, { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) }, { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) }, { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) }, { USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) }, + { USB_DEVICE(AT_VENDOR_ID, AT_VTKIT3_PRODUCT_ID) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index 26965cc23c170bc2bf51a5407ce078de2c1b8d60..b0175f17d1a2b67c7c53611bc002e8802cd351a8 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h @@ -8,6 +8,7 @@ #define PL2303_VENDOR_ID 0x067b #define PL2303_PRODUCT_ID 0x2303 +#define PL2303_PRODUCT_ID_TB 0x2304 #define PL2303_PRODUCT_ID_RSAQ2 0x04bb #define PL2303_PRODUCT_ID_DCU11 0x1234 #define PL2303_PRODUCT_ID_PHAROS 0xaaa0 @@ -20,6 +21,7 @@ #define PL2303_PRODUCT_ID_MOTOROLA 0x0307 #define PL2303_PRODUCT_ID_ZTEK 0xe1f1 + #define ATEN_VENDOR_ID 0x0557 #define ATEN_VENDOR_ID2 0x0547 #define ATEN_PRODUCT_ID 0x2008 @@ -119,10 +121,15 @@ /* Hewlett-Packard POS Pole Displays */ #define HP_VENDOR_ID 0x03f0 +#define HP_LM920_PRODUCT_ID 0x026b +#define HP_TD620_PRODUCT_ID 0x0956 #define HP_LD960_PRODUCT_ID 0x0b39 #define HP_LCM220_PRODUCT_ID 0x3139 #define HP_LCM960_PRODUCT_ID 0x3239 #define HP_LD220_PRODUCT_ID 0x3524 +#define HP_LD220TA_PRODUCT_ID 0x4349 +#define HP_LD960TA_PRODUCT_ID 0x4439 +#define HP_LM940_PRODUCT_ID 0x5039 /* Cressi Edy (diving computer) PC interface */ #define CRESSI_VENDOR_ID 0x04b8 @@ -148,3 +155,6 @@ #define SMART_VENDOR_ID 0x0b8c #define SMART_PRODUCT_ID 0x2303 +/* Allied Telesis VT-Kit3 */ +#define AT_VENDOR_ID 0x0caa +#define AT_VTKIT3_PRODUCT_ID 0x3001 diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index e3c5832337e0bdbb83df78285a697ac5f5a8af3f..c9201e0a82417d08981006db6db3481705ae6a4f 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c @@ -776,7 +776,6 @@ static void ti_close(struct usb_serial_port *port) struct ti_port *tport; int port_number; int status; - int do_unlock; unsigned long flags; tdev = usb_get_serial_data(port->serial); @@ -800,16 +799,13 @@ static void ti_close(struct usb_serial_port *port) "%s - cannot send close port command, %d\n" , __func__, status); - /* if mutex_lock is interrupted, continue anyway */ - do_unlock = !mutex_lock_interruptible(&tdev->td_open_close_lock); + mutex_lock(&tdev->td_open_close_lock); --tport->tp_tdev->td_open_port_count; - if (tport->tp_tdev->td_open_port_count <= 0) { + if (tport->tp_tdev->td_open_port_count == 0) { /* last port is closed, shut down interrupt urb */ usb_kill_urb(port->serial->port[0]->interrupt_in_urb); - tport->tp_tdev->td_open_port_count = 0; } - if (do_unlock) - mutex_unlock(&tdev->td_open_close_lock); + mutex_unlock(&tdev->td_open_close_lock); } diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c index 4d0273508043de920cc4868eab9722deabf4ba40..edbbb13d6de6ee39285fef25268be3d08f4e3b0e 100644 --- a/drivers/usb/serial/usb-serial-simple.c +++ b/drivers/usb/serial/usb-serial-simple.c @@ -85,7 +85,8 @@ DEVICE(moto_modem, MOTO_IDS); /* Motorola Tetra driver */ #define MOTOROLA_TETRA_IDS() \ { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \ - { USB_DEVICE(0x0cad, 0x9012) } /* MTP6550 */ + { USB_DEVICE(0x0cad, 0x9012) }, /* MTP6550 */ \ + { USB_DEVICE(0x0cad, 0x9016) } /* TPG2200 */ DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS); /* Novatel Wireless GPS driver */ diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index f7aaa7f079e1a041f666cc0b0fefbc4da0474972..434153790982191b693716c527177b0fadaa7f52 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -311,10 +311,7 @@ static void serial_cleanup(struct tty_struct *tty) serial = port->serial; owner = serial->type->driver.owner; - mutex_lock(&serial->disc_mutex); - if (!serial->disconnected) - usb_autopm_put_interface(serial->interface); - mutex_unlock(&serial->disc_mutex); + usb_autopm_put_interface(serial->interface); usb_serial_put(serial); module_put(owner); diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c index 1c7b46a8620cc039847bcdbbb814f76576a53e41..16b275123f10eb2798f7b7b77f9a3580572f6600 100644 --- a/drivers/usb/serial/whiteheat.c +++ b/drivers/usb/serial/whiteheat.c @@ -571,6 +571,10 @@ static int firm_send_command(struct usb_serial_port *port, __u8 command, command_port = port->serial->port[COMMAND_PORT]; command_info = usb_get_serial_port_data(command_port); + + if (command_port->bulk_out_size < datasize + 1) + return -EIO; + mutex_lock(&command_info->mutex); command_info->command_finished = false; @@ -644,6 +648,7 @@ static void firm_setup_port(struct tty_struct *tty) struct device *dev = &port->dev; struct whiteheat_port_settings port_settings; unsigned int cflag = tty->termios.c_cflag; + speed_t baud; port_settings.port = port->port_number + 1; @@ -704,11 +709,13 @@ static void firm_setup_port(struct tty_struct *tty) dev_dbg(dev, "%s - XON = %2x, XOFF = %2x\n", __func__, port_settings.xon, port_settings.xoff); /* get the baud rate wanted */ - port_settings.baud = tty_get_baud_rate(tty); - dev_dbg(dev, "%s - baud rate = %d\n", __func__, port_settings.baud); + baud = tty_get_baud_rate(tty); + port_settings.baud = cpu_to_le32(baud); + dev_dbg(dev, "%s - baud rate = %u\n", __func__, baud); /* fixme: should set validated settings */ - tty_encode_baud_rate(tty, port_settings.baud, port_settings.baud); + tty_encode_baud_rate(tty, baud, baud); + /* handle any settings that aren't specified in the tty structure */ port_settings.lloop = 0; diff --git a/drivers/usb/serial/whiteheat.h b/drivers/usb/serial/whiteheat.h index 72c1b0cf406314970f324f3f27a4dab4659f64eb..56a3e8323f330fcd3d691e4732965fdae94b3088 100644 --- a/drivers/usb/serial/whiteheat.h +++ b/drivers/usb/serial/whiteheat.h @@ -87,7 +87,7 @@ struct whiteheat_simple { struct whiteheat_port_settings { __u8 port; /* port number (1 to N) */ - __u32 baud; /* any value 7 - 460800, firmware calculates + __le32 baud; /* any value 7 - 460800, firmware calculates best fit; arrives little endian */ __u8 bits; /* 5, 6, 7, or 8 */ __u8 stop; /* 1 or 2, default 1 (2 = 1.5 if bits = 5) */ diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c index 4d261e4de9ad3e5b68060628050f01d842c79d75..e6768662c5edfb07f5ad3638c435c8ebae31fda3 100644 --- a/drivers/usb/storage/ene_ub6250.c +++ b/drivers/usb/storage/ene_ub6250.c @@ -940,7 +940,7 @@ static int ms_lib_process_bootblock(struct us_data *us, u16 PhyBlock, u8 *PageDa struct ms_lib_type_extdat ExtraData; struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; - PageBuffer = kmalloc(MS_BYTES_PER_PAGE, GFP_KERNEL); + PageBuffer = kzalloc(MS_BYTES_PER_PAGE * 2, GFP_KERNEL); if (PageBuffer == NULL) return (u32)-1; diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c index f5e4500d9970306a8c6c0482e3c073860fc802d5..b7255659229c790a3b9c3fb8cda9a2d6a3856ff8 100644 --- a/drivers/usb/storage/isd200.c +++ b/drivers/usb/storage/isd200.c @@ -1104,7 +1104,7 @@ static void isd200_dump_driveid(struct us_data *us, u16 *id) static int isd200_get_inquiry_data( struct us_data *us ) { struct isd200_info *info = (struct isd200_info *)us->extra; - int retStatus = ISD200_GOOD; + int retStatus; u16 *id = info->id; usb_stor_dbg(us, "Entering isd200_get_inquiry_data\n"); @@ -1136,6 +1136,13 @@ static int isd200_get_inquiry_data( struct us_data *us ) isd200_fix_driveid(id); isd200_dump_driveid(us, id); + /* Prevent division by 0 in isd200_scsi_to_ata() */ + if (id[ATA_ID_HEADS] == 0 || id[ATA_ID_SECTORS] == 0) { + usb_stor_dbg(us, " Invalid ATA Identify data\n"); + retStatus = ISD200_ERROR; + goto Done; + } + memset(&info->InquiryData, 0, sizeof(info->InquiryData)); /* Standard IDE interface only supports disks */ @@ -1201,6 +1208,7 @@ static int isd200_get_inquiry_data( struct us_data *us ) } } + Done: usb_stor_dbg(us, "Leaving isd200_get_inquiry_data %08X\n", retStatus); return(retStatus); @@ -1480,22 +1488,27 @@ static int isd200_init_info(struct us_data *us) static int isd200_Initialization(struct us_data *us) { + int rc = 0; + usb_stor_dbg(us, "ISD200 Initialization...\n"); /* Initialize ISD200 info struct */ - if (isd200_init_info(us) == ISD200_ERROR) { + if (isd200_init_info(us) < 0) { usb_stor_dbg(us, "ERROR Initializing ISD200 Info struct\n"); + rc = -ENOMEM; } else { /* Get device specific data */ - if (isd200_get_inquiry_data(us) != ISD200_GOOD) + if (isd200_get_inquiry_data(us) != ISD200_GOOD) { usb_stor_dbg(us, "ISD200 Initialization Failure\n"); - else + rc = -EINVAL; + } else { usb_stor_dbg(us, "ISD200 Initialization complete\n"); + } } - return 0; + return rc; } diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c index 31b0244419387c52ec5dcd13138b4d6a0bd063f7..1d9ce9cbc831d1035a6ad086b10f1f99e3188a63 100644 --- a/drivers/usb/storage/realtek_cr.c +++ b/drivers/usb/storage/realtek_cr.c @@ -38,7 +38,7 @@ MODULE_LICENSE("GPL"); static int auto_delink_en = 1; module_param(auto_delink_en, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(auto_delink_en, "enable auto delink"); +MODULE_PARM_DESC(auto_delink_en, "auto delink mode (0=firmware, 1=software [default])"); #ifdef CONFIG_REALTEK_AUTOPM static int ss_en = 1; @@ -763,18 +763,16 @@ static void rts51x_suspend_timer_fn(struct timer_list *t) break; case RTS51X_STAT_IDLE: case RTS51X_STAT_SS: - usb_stor_dbg(us, "RTS51X_STAT_SS, intf->pm_usage_cnt:%d, power.usage:%d\n", - atomic_read(&us->pusb_intf->pm_usage_cnt), + usb_stor_dbg(us, "RTS51X_STAT_SS, power.usage:%d\n", atomic_read(&us->pusb_intf->dev.power.usage_count)); - if (atomic_read(&us->pusb_intf->pm_usage_cnt) > 0) { + if (atomic_read(&us->pusb_intf->dev.power.usage_count) > 0) { usb_stor_dbg(us, "Ready to enter SS state\n"); rts51x_set_stat(chip, RTS51X_STAT_SS); /* ignore mass storage interface's children */ pm_suspend_ignore_children(&us->pusb_intf->dev, true); usb_autopm_put_interface_async(us->pusb_intf); - usb_stor_dbg(us, "RTS51X_STAT_SS 01, intf->pm_usage_cnt:%d, power.usage:%d\n", - atomic_read(&us->pusb_intf->pm_usage_cnt), + usb_stor_dbg(us, "RTS51X_STAT_SS 01, power.usage:%d\n", atomic_read(&us->pusb_intf->dev.power.usage_count)); } break; @@ -807,11 +805,10 @@ static void rts51x_invoke_transport(struct scsi_cmnd *srb, struct us_data *us) int ret; if (working_scsi(srb)) { - usb_stor_dbg(us, "working scsi, intf->pm_usage_cnt:%d, power.usage:%d\n", - atomic_read(&us->pusb_intf->pm_usage_cnt), + usb_stor_dbg(us, "working scsi, power.usage:%d\n", atomic_read(&us->pusb_intf->dev.power.usage_count)); - if (atomic_read(&us->pusb_intf->pm_usage_cnt) <= 0) { + if (atomic_read(&us->pusb_intf->dev.power.usage_count) <= 0) { ret = usb_autopm_get_interface(us->pusb_intf); usb_stor_dbg(us, "working scsi, ret=%d\n", ret); } @@ -999,12 +996,15 @@ static int init_realtek_cr(struct us_data *us) goto INIT_FAIL; } - if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) || - CHECK_FW_VER(chip, 0x5901)) - SET_AUTO_DELINK(chip); - if (STATUS_LEN(chip) == 16) { - if (SUPPORT_AUTO_DELINK(chip)) + if (CHECK_PID(chip, 0x0138) || CHECK_PID(chip, 0x0158) || + CHECK_PID(chip, 0x0159)) { + if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) || + CHECK_FW_VER(chip, 0x5901)) SET_AUTO_DELINK(chip); + if (STATUS_LEN(chip) == 16) { + if (SUPPORT_AUTO_DELINK(chip)) + SET_AUTO_DELINK(chip); + } } #ifdef CONFIG_REALTEK_AUTOPM if (ss_en) diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index e227bb5b794fe3905965647a442bbd43254d5099..f287ee8183df7aba9b83c3c8640619c61110e8f4 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c @@ -74,20 +74,8 @@ static int slave_alloc (struct scsi_device *sdev) sdev->inquiry_len = 36; /* - * USB has unusual DMA-alignment requirements: Although the - * starting address of each scatter-gather element doesn't matter, - * the length of each element except the last must be divisible - * by the Bulk maxpacket value. There's currently no way to - * express this by block-layer constraints, so we'll cop out - * and simply require addresses to be aligned at 512-byte - * boundaries. This is okay since most block I/O involves - * hardware sectors that are multiples of 512 bytes in length, - * and since host controllers up through USB 2.0 have maxpacket - * values no larger than 512. - * - * But it doesn't suffice for Wireless USB, where Bulk maxpacket - * values can be as large as 2048. To make that work properly - * will require changes to the block layer. + * Some host controllers may have alignment requirements. + * We'll play it safe by requiring 512-byte alignment always. */ blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1)); @@ -235,8 +223,12 @@ static int slave_configure(struct scsi_device *sdev) if (!(us->fflags & US_FL_NEEDS_CAP16)) sdev->try_rc_10_first = 1; - /* assume SPC3 or latter devices support sense size > 18 */ - if (sdev->scsi_level > SCSI_SPC_2) + /* + * assume SPC3 or latter devices support sense size > 18 + * unless US_FL_BAD_SENSE quirk is specified. + */ + if (sdev->scsi_level > SCSI_SPC_2 && + !(us->fflags & US_FL_BAD_SENSE)) us->fflags |= US_FL_SANE_SENSE; /* diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 1f7b401c4d041d44390f3fa519dd95b46aab1b42..1c6eb3a8741ee52bae713a65495a3c23f64d2a79 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -800,20 +800,9 @@ static int uas_slave_alloc(struct scsi_device *sdev) sdev->hostdata = devinfo; /* - * USB has unusual DMA-alignment requirements: Although the - * starting address of each scatter-gather element doesn't matter, - * the length of each element except the last must be divisible - * by the Bulk maxpacket value. There's currently no way to - * express this by block-layer constraints, so we'll cop out - * and simply require addresses to be aligned at 512-byte - * boundaries. This is okay since most block I/O involves - * hardware sectors that are multiples of 512 bytes in length, - * and since host controllers up through USB 2.0 have maxpacket - * values no larger than 512. - * - * But it doesn't suffice for Wireless USB, where Bulk maxpacket - * values can be as large as 2048. To make that work properly - * will require changes to the block layer. + * The protocol has no requirements on alignment in the strict sense. + * Controllers may or may not have alignment restrictions. + * As this is not exported, we use an extremely conservative guess. */ blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1)); @@ -843,6 +832,10 @@ static int uas_slave_configure(struct scsi_device *sdev) sdev->wce_default_on = 1; } + /* Some disks cannot handle READ_CAPACITY_16 */ + if (devinfo->flags & US_FL_NO_READ_CAPACITY_16) + sdev->no_read_capacity_16 = 1; + /* * Some disks return the total number of blocks in response * to READ CAPACITY rather than the highest block number. @@ -851,6 +844,12 @@ static int uas_slave_configure(struct scsi_device *sdev) if (devinfo->flags & US_FL_FIX_CAPACITY) sdev->fix_capacity = 1; + /* + * in some cases we have to guess + */ + if (devinfo->flags & US_FL_CAPACITY_HEURISTICS) + sdev->guess_capacity = 1; + /* * Some devices don't like MODE SENSE with page=0x3f, * which is the command used for checking if a device diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index f7f83b21dc746ab922d7f7e7c61add76a55fc560..1cd9b6305b06042fecf75942c4f79af41747feea 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -1265,6 +1265,18 @@ UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_FIX_CAPACITY ), +/* + * Reported by Icenowy Zheng + * The SMI SM3350 USB-UFS bridge controller will enter a wrong state + * that do not process read/write command if a long sense is requested, + * so force to use 18-byte sense. + */ +UNUSUAL_DEV( 0x090c, 0x3350, 0x0000, 0xffff, + "SMI", + "SM3350 UFS-to-USB-Mass-Storage bridge", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_BAD_SENSE ), + /* * Reported by Paul Hartman * This card reader returns "Illegal Request, Logical Block Address @@ -2088,7 +2100,7 @@ UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201, US_FL_IGNORE_RESIDUE ), /* Reported by Michael Büsch */ -UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0116, +UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0117, "JMicron", "USB to ATA/ATAPI Bridge", USB_SC_DEVICE, USB_PR_DEVICE, NULL, diff --git a/drivers/usb/storage/unusual_realtek.h b/drivers/usb/storage/unusual_realtek.h index d17cd95b55bbdec3874c80d0d09b449c4f1f8fa1..7e14c2d7cf734f7ac00c03ae7b5b2c87845cfe74 100644 --- a/drivers/usb/storage/unusual_realtek.h +++ b/drivers/usb/storage/unusual_realtek.h @@ -17,6 +17,11 @@ UNUSUAL_DEV(0x0bda, 0x0138, 0x0000, 0x9999, "USB Card Reader", USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0), +UNUSUAL_DEV(0x0bda, 0x0153, 0x0000, 0x9999, + "Realtek", + "USB Card Reader", + USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0), + UNUSUAL_DEV(0x0bda, 0x0158, 0x0000, 0x9999, "Realtek", "USB Card Reader", @@ -27,4 +32,14 @@ UNUSUAL_DEV(0x0bda, 0x0159, 0x0000, 0x9999, "USB Card Reader", USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0), +UNUSUAL_DEV(0x0bda, 0x0177, 0x0000, 0x9999, + "Realtek", + "USB Card Reader", + USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0), + +UNUSUAL_DEV(0x0bda, 0x0184, 0x0000, 0x9999, + "Realtek", + "USB Card Reader", + USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0), + #endif /* defined(CONFIG_USB_STORAGE_REALTEK) || ... */ diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c index e61dffb27a0c64bab899c36f674f922278d92f6e..80efe0a53a557e5152521256a88f6883150c4ba7 100644 --- a/drivers/usb/typec/class.c +++ b/drivers/usb/typec/class.c @@ -462,6 +462,7 @@ static void typec_altmode_release(struct device *dev) typec_altmode_put_partner(alt); altmode_id_remove(alt->adev.dev.parent, alt->id); + put_device(alt->adev.dev.parent); kfree(alt); } @@ -509,6 +510,8 @@ typec_register_altmode(struct device *parent, alt->adev.dev.type = &typec_altmode_dev_type; dev_set_name(&alt->adev.dev, "%s.%u", dev_name(parent), id); + get_device(alt->adev.dev.parent); + /* Link partners and plugs with the ports */ if (is_port) BLOCKING_INIT_NOTIFIER_HEAD(&alt->nh); @@ -1500,7 +1503,7 @@ typec_port_register_altmode(struct typec_port *port, sprintf(id, "id%04xm%02x", desc->svid, desc->mode); - mux = typec_mux_get(port->dev.parent, id); + mux = typec_mux_get(&port->dev, id); if (IS_ERR(mux)) return ERR_CAST(mux); @@ -1540,18 +1543,6 @@ struct typec_port *typec_register_port(struct device *parent, return ERR_PTR(id); } - port->sw = typec_switch_get(cap->fwnode ? &port->dev : parent); - if (IS_ERR(port->sw)) { - ret = PTR_ERR(port->sw); - goto err_switch; - } - - port->mux = typec_mux_get(parent, "typec-mux"); - if (IS_ERR(port->mux)) { - ret = PTR_ERR(port->mux); - goto err_mux; - } - switch (cap->type) { case TYPEC_PORT_SRC: port->pwr_role = TYPEC_SOURCE; @@ -1592,13 +1583,28 @@ struct typec_port *typec_register_port(struct device *parent, port->port_type = cap->type; port->prefer_role = cap->prefer_role; + device_initialize(&port->dev); port->dev.class = typec_class; port->dev.parent = parent; port->dev.fwnode = cap->fwnode; port->dev.type = &typec_port_dev_type; dev_set_name(&port->dev, "port%d", id); - ret = device_register(&port->dev); + port->sw = typec_switch_get(&port->dev); + if (IS_ERR(port->sw)) { + ret = PTR_ERR(port->sw); + put_device(&port->dev); + return ERR_PTR(ret); + } + + port->mux = typec_mux_get(&port->dev, "typec-mux"); + if (IS_ERR(port->mux)) { + ret = PTR_ERR(port->mux); + put_device(&port->dev); + return ERR_PTR(ret); + } + + ret = device_add(&port->dev); if (ret) { dev_err(parent, "failed to register port (%d)\n", ret); put_device(&port->dev); @@ -1606,15 +1612,6 @@ struct typec_port *typec_register_port(struct device *parent, } return port; - -err_mux: - typec_switch_put(port->sw); - -err_switch: - ida_simple_remove(&typec_index_ida, port->id); - kfree(port); - - return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(typec_register_port); diff --git a/drivers/usb/typec/fusb302/fusb302.c b/drivers/usb/typec/fusb302/fusb302.c index 82bed9810be6c4c76190d5e14c7e98511787b83c..62a0060d39d8dc6e8b82807df2d8c698ed3b36de 100644 --- a/drivers/usb/typec/fusb302/fusb302.c +++ b/drivers/usb/typec/fusb302/fusb302.c @@ -641,6 +641,8 @@ static int fusb302_set_toggling(struct fusb302_chip *chip, return ret; chip->intr_togdone = false; } else { + /* Datasheet says vconn MUST be off when toggling */ + WARN(chip->vconn_on, "Vconn is on during toggle start"); /* unmask TOGDONE interrupt */ ret = fusb302_i2c_clear_bits(chip, FUSB_REG_MASKA, FUSB_REG_MASKA_TOGDONE); diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c index 4f1f4215f3d6e29b191a4cdc7e722b4ca36249fd..39cf19001239313f5adb8ada3c8d0362eb342454 100644 --- a/drivers/usb/typec/tcpm.c +++ b/drivers/usb/typec/tcpm.c @@ -37,6 +37,7 @@ S(SRC_ATTACHED), \ S(SRC_STARTUP), \ S(SRC_SEND_CAPABILITIES), \ + S(SRC_SEND_CAPABILITIES_TIMEOUT), \ S(SRC_NEGOTIATE_CAPABILITIES), \ S(SRC_TRANSITION_SUPPLY), \ S(SRC_READY), \ @@ -317,6 +318,9 @@ struct tcpm_port { /* Deadline in jiffies to exit src_try_wait state */ unsigned long max_wait; + /* port belongs to a self powered device */ + bool self_powered; + #ifdef CONFIG_DEBUG_FS struct dentry *dentry; struct mutex logbuffer_lock; /* log buffer access lock */ @@ -375,7 +379,8 @@ static enum tcpm_state tcpm_default_state(struct tcpm_port *port) return SNK_UNATTACHED; else if (port->try_role == TYPEC_SOURCE) return SRC_UNATTACHED; - else if (port->tcpc->config->default_role == TYPEC_SINK) + else if (port->tcpc->config && + port->tcpc->config->default_role == TYPEC_SINK) return SNK_UNATTACHED; /* Fall through to return SRC_UNATTACHED */ } else if (port->port_type == TYPEC_PORT_SNK) { @@ -582,7 +587,20 @@ static void tcpm_debugfs_init(struct tcpm_port *port) static void tcpm_debugfs_exit(struct tcpm_port *port) { + int i; + + mutex_lock(&port->logbuffer_lock); + for (i = 0; i < LOG_BUFFER_ENTRIES; i++) { + kfree(port->logbuffer[i]); + port->logbuffer[i] = NULL; + } + mutex_unlock(&port->logbuffer_lock); + debugfs_remove(port->dentry); + if (list_empty(&rootdir->d_subdirs)) { + debugfs_remove(rootdir); + rootdir = NULL; + } } #else @@ -1091,7 +1109,8 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt, break; case CMD_ATTENTION: /* Attention command does not have response */ - typec_altmode_attention(adev, p[1]); + if (adev) + typec_altmode_attention(adev, p[1]); return 0; default: break; @@ -1143,20 +1162,26 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt, } break; case CMD_ENTER_MODE: - typec_altmode_update_active(pdev, true); - - if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) { - response[0] = VDO(adev->svid, 1, CMD_EXIT_MODE); - response[0] |= VDO_OPOS(adev->mode); - return 1; + if (adev && pdev) { + typec_altmode_update_active(pdev, true); + + if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) { + response[0] = VDO(adev->svid, 1, + CMD_EXIT_MODE); + response[0] |= VDO_OPOS(adev->mode); + return 1; + } } return 0; case CMD_EXIT_MODE: - typec_altmode_update_active(pdev, false); + if (adev && pdev) { + typec_altmode_update_active(pdev, false); - /* Back to USB Operation */ - WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB, - NULL)); + /* Back to USB Operation */ + WARN_ON(typec_altmode_notify(adev, + TYPEC_STATE_USB, + NULL)); + } break; default: break; @@ -1166,8 +1191,10 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt, switch (cmd) { case CMD_ENTER_MODE: /* Back to USB Operation */ - WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB, - NULL)); + if (adev) + WARN_ON(typec_altmode_notify(adev, + TYPEC_STATE_USB, + NULL)); break; default: break; @@ -1178,7 +1205,8 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt, } /* Informing the alternate mode drivers about everything */ - typec_altmode_vdm(adev, p[0], &p[1], cnt); + if (adev) + typec_altmode_vdm(adev, p[0], &p[1], cnt); return rlen; } @@ -1418,7 +1446,7 @@ static enum pdo_err tcpm_caps_err(struct tcpm_port *port, const u32 *pdo, else if ((pdo_min_voltage(pdo[i]) == pdo_min_voltage(pdo[i - 1])) && (pdo_max_voltage(pdo[i]) == - pdo_min_voltage(pdo[i - 1]))) + pdo_max_voltage(pdo[i - 1]))) return PDO_ERR_DUPE_PDO; break; /* @@ -1430,8 +1458,8 @@ static enum pdo_err tcpm_caps_err(struct tcpm_port *port, const u32 *pdo, if (pdo_apdo_type(pdo[i]) != APDO_TYPE_PPS) break; - if (pdo_pps_apdo_max_current(pdo[i]) < - pdo_pps_apdo_max_current(pdo[i - 1])) + if (pdo_pps_apdo_max_voltage(pdo[i]) < + pdo_pps_apdo_max_voltage(pdo[i - 1])) return PDO_ERR_PPS_APDO_NOT_SORTED; else if (pdo_pps_apdo_min_voltage(pdo[i]) == pdo_pps_apdo_min_voltage(pdo[i - 1]) && @@ -2960,10 +2988,34 @@ static void run_state_machine(struct tcpm_port *port) /* port->hard_reset_count = 0; */ port->caps_count = 0; port->pd_capable = true; - tcpm_set_state_cond(port, hard_reset_state(port), + tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT, PD_T_SEND_SOURCE_CAP); } break; + case SRC_SEND_CAPABILITIES_TIMEOUT: + /* + * Error recovery for a PD_DATA_SOURCE_CAP reply timeout. + * + * PD 2.0 sinks are supposed to accept src-capabilities with a + * 3.0 header and simply ignore any src PDOs which the sink does + * not understand such as PPS but some 2.0 sinks instead ignore + * the entire PD_DATA_SOURCE_CAP message, causing contract + * negotiation to fail. + * + * After PD_N_HARD_RESET_COUNT hard-reset attempts, we try + * sending src-capabilities with a lower PD revision to + * make these broken sinks work. + */ + if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) { + tcpm_set_state(port, HARD_RESET_SEND, 0); + } else if (port->negotiated_rev > PD_REV20) { + port->negotiated_rev--; + port->hard_reset_count = 0; + tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0); + } else { + tcpm_set_state(port, hard_reset_state(port), 0); + } + break; case SRC_NEGOTIATE_CAPABILITIES: ret = tcpm_pd_check_request(port); if (ret < 0) { @@ -3257,7 +3309,8 @@ static void run_state_machine(struct tcpm_port *port) case SRC_HARD_RESET_VBUS_OFF: tcpm_set_vconn(port, true); tcpm_set_vbus(port, false); - tcpm_set_roles(port, false, TYPEC_SOURCE, TYPEC_HOST); + tcpm_set_roles(port, port->self_powered, TYPEC_SOURCE, + TYPEC_HOST); tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER); break; case SRC_HARD_RESET_VBUS_ON: @@ -3269,8 +3322,10 @@ static void run_state_machine(struct tcpm_port *port) case SNK_HARD_RESET_SINK_OFF: memset(&port->pps_data, 0, sizeof(port->pps_data)); tcpm_set_vconn(port, false); - tcpm_set_charge(port, false); - tcpm_set_roles(port, false, TYPEC_SINK, TYPEC_DEVICE); + if (port->pd_capable) + tcpm_set_charge(port, false); + tcpm_set_roles(port, port->self_powered, TYPEC_SINK, + TYPEC_DEVICE); /* * VBUS may or may not toggle, depending on the adapter. * If it doesn't toggle, transition to SNK_HARD_RESET_SINK_ON @@ -3300,6 +3355,12 @@ static void run_state_machine(struct tcpm_port *port) * Similar, dual-mode ports in source mode should transition * to PE_SNK_Transition_to_default. */ + if (port->pd_capable) { + tcpm_set_current_limit(port, + tcpm_get_current_limit(port), + 5000); + tcpm_set_charge(port, true); + } tcpm_set_attached_state(port, true); tcpm_set_state(port, SNK_STARTUP, 0); break; @@ -4078,7 +4139,7 @@ static int tcpm_try_role(const struct typec_capability *cap, int role) mutex_lock(&port->lock); if (tcpc->try_role) ret = tcpc->try_role(tcpc, role); - if (!ret && !tcpc->config->try_role_hw) + if (!ret && (!tcpc->config || !tcpc->config->try_role_hw)) port->try_role = role; port->try_src_count = 0; port->try_snk_count = 0; @@ -4116,6 +4177,9 @@ static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr) goto port_unlock; } + /* Round down operating current to align with PPS valid steps */ + op_curr = op_curr - (op_curr % RDO_PROG_CURR_MA_STEP); + reinit_completion(&port->pps_complete); port->pps_data.op_curr = op_curr; port->pps_status = 0; @@ -4169,6 +4233,9 @@ static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 out_volt) goto port_unlock; } + /* Round down output voltage to align with PPS valid steps */ + out_volt = out_volt - (out_volt % RDO_PROG_VOLT_MV_STEP); + reinit_completion(&port->pps_complete); port->pps_data.out_volt = out_volt; port->pps_status = 0; @@ -4409,6 +4476,8 @@ static int tcpm_fw_get_caps(struct tcpm_port *port, return -EINVAL; port->operating_snk_mw = mw / 1000; + port->self_powered = fwnode_property_read_bool(fwnode, "self-powered"); + return 0; } @@ -4717,6 +4786,7 @@ static int tcpm_copy_caps(struct tcpm_port *port, port->typec_caps.prefer_role = tcfg->default_role; port->typec_caps.type = tcfg->type; port->typec_caps.data = tcfg->data; + port->self_powered = tcfg->self_powered; return 0; } diff --git a/drivers/usb/typec/tps6598x.c b/drivers/usb/typec/tps6598x.c index c84c8c189e902857e3272769c940836ff9464b17..a4dd23a8f19546ca29e9a41a3dde1b9fef02fed9 100644 --- a/drivers/usb/typec/tps6598x.c +++ b/drivers/usb/typec/tps6598x.c @@ -39,7 +39,7 @@ #define TPS_STATUS_VCONN(s) (!!((s) & BIT(7))) /* TPS_REG_SYSTEM_CONF bits */ -#define TPS_SYSCONF_PORTINFO(c) ((c) & 3) +#define TPS_SYSCONF_PORTINFO(c) ((c) & 7) enum { TPS_PORTINFO_SINK, @@ -93,7 +93,7 @@ tps6598x_block_read(struct tps6598x *tps, u8 reg, void *val, size_t len) u8 data[TPS_MAX_LEN + 1]; int ret; - if (WARN_ON(len + 1 > sizeof(data))) + if (len + 1 > sizeof(data)) return -EINVAL; if (!tps->i2c_protocol) @@ -110,6 +110,20 @@ tps6598x_block_read(struct tps6598x *tps, u8 reg, void *val, size_t len) return 0; } +static int tps6598x_block_write(struct tps6598x *tps, u8 reg, + const void *val, size_t len) +{ + u8 data[TPS_MAX_LEN + 1]; + + if (!tps->i2c_protocol) + return regmap_raw_write(tps->regmap, reg, val, len); + + data[0] = len; + memcpy(&data[1], val, len); + + return regmap_raw_write(tps->regmap, reg, data, sizeof(data)); +} + static inline int tps6598x_read16(struct tps6598x *tps, u8 reg, u16 *val) { return tps6598x_block_read(tps, reg, val, sizeof(u16)); @@ -127,23 +141,23 @@ static inline int tps6598x_read64(struct tps6598x *tps, u8 reg, u64 *val) static inline int tps6598x_write16(struct tps6598x *tps, u8 reg, u16 val) { - return regmap_raw_write(tps->regmap, reg, &val, sizeof(u16)); + return tps6598x_block_write(tps, reg, &val, sizeof(u16)); } static inline int tps6598x_write32(struct tps6598x *tps, u8 reg, u32 val) { - return regmap_raw_write(tps->regmap, reg, &val, sizeof(u32)); + return tps6598x_block_write(tps, reg, &val, sizeof(u32)); } static inline int tps6598x_write64(struct tps6598x *tps, u8 reg, u64 val) { - return regmap_raw_write(tps->regmap, reg, &val, sizeof(u64)); + return tps6598x_block_write(tps, reg, &val, sizeof(u64)); } static inline int tps6598x_write_4cc(struct tps6598x *tps, u8 reg, const char *val) { - return regmap_raw_write(tps->regmap, reg, &val, sizeof(u32)); + return tps6598x_block_write(tps, reg, val, 4); } static int tps6598x_read_partner_identity(struct tps6598x *tps) @@ -229,8 +243,8 @@ static int tps6598x_exec_cmd(struct tps6598x *tps, const char *cmd, return -EBUSY; if (in_len) { - ret = regmap_raw_write(tps->regmap, TPS_REG_DATA1, - in_data, in_len); + ret = tps6598x_block_write(tps, TPS_REG_DATA1, + in_data, in_len); if (ret) return ret; } diff --git a/drivers/usb/typec/typec_wcove.c b/drivers/usb/typec/typec_wcove.c index 423208e19383c0c2cd414d3b627b8f4c48b6f67b..6770afd4076548eeb0021eef062160013b97b1ea 100644 --- a/drivers/usb/typec/typec_wcove.c +++ b/drivers/usb/typec/typec_wcove.c @@ -615,8 +615,13 @@ static int wcove_typec_probe(struct platform_device *pdev) wcove->dev = &pdev->dev; wcove->regmap = pmic->regmap; - irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr, - platform_get_irq(pdev, 0)); + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "Failed to get IRQ: %d\n", irq); + return irq; + } + + irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr, irq); if (irq < 0) return irq; diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c index f101347e3ea351cdbefa359a910167001dd6fc7d..e0cf11f798c54c0423409a91901dbac98760a7ad 100644 --- a/drivers/usb/usb-skeleton.c +++ b/drivers/usb/usb-skeleton.c @@ -59,6 +59,7 @@ struct usb_skel { spinlock_t err_lock; /* lock for errors */ struct kref kref; struct mutex io_mutex; /* synchronize I/O with disconnect */ + unsigned long disconnected:1; wait_queue_head_t bulk_in_wait; /* to wait for an ongoing read */ }; #define to_skel_dev(d) container_of(d, struct usb_skel, kref) @@ -71,6 +72,7 @@ static void skel_delete(struct kref *kref) struct usb_skel *dev = to_skel_dev(kref); usb_free_urb(dev->bulk_in_urb); + usb_put_intf(dev->interface); usb_put_dev(dev->udev); kfree(dev->bulk_in_buffer); kfree(dev); @@ -122,10 +124,7 @@ static int skel_release(struct inode *inode, struct file *file) return -ENODEV; /* allow the device to be autosuspended */ - mutex_lock(&dev->io_mutex); - if (dev->interface) - usb_autopm_put_interface(dev->interface); - mutex_unlock(&dev->io_mutex); + usb_autopm_put_interface(dev->interface); /* decrement the count on our device */ kref_put(&dev->kref, skel_delete); @@ -238,7 +237,7 @@ static ssize_t skel_read(struct file *file, char *buffer, size_t count, if (rv < 0) return rv; - if (!dev->interface) { /* disconnect() was called */ + if (dev->disconnected) { /* disconnect() was called */ rv = -ENODEV; goto exit; } @@ -420,7 +419,7 @@ static ssize_t skel_write(struct file *file, const char *user_buffer, /* this lock makes sure we don't submit URBs to gone devices */ mutex_lock(&dev->io_mutex); - if (!dev->interface) { /* disconnect() was called */ + if (dev->disconnected) { /* disconnect() was called */ mutex_unlock(&dev->io_mutex); retval = -ENODEV; goto error; @@ -505,7 +504,7 @@ static int skel_probe(struct usb_interface *interface, init_waitqueue_head(&dev->bulk_in_wait); dev->udev = usb_get_dev(interface_to_usbdev(interface)); - dev->interface = interface; + dev->interface = usb_get_intf(interface); /* set up the endpoint information */ /* use only the first bulk-in and bulk-out endpoints */ @@ -571,7 +570,7 @@ static void skel_disconnect(struct usb_interface *interface) /* prevent more I/O from starting */ mutex_lock(&dev->io_mutex); - dev->interface = NULL; + dev->disconnected = 1; mutex_unlock(&dev->io_mutex); usb_kill_anchored_urbs(&dev->submitted); diff --git a/drivers/usb/usbip/Kconfig b/drivers/usb/usbip/Kconfig index a20b65cb6678f99821d17990cf873b9dbc6b58f5..8276a20ecea7e44fb2f1469e49d029018c149cf3 100644 --- a/drivers/usb/usbip/Kconfig +++ b/drivers/usb/usbip/Kconfig @@ -2,6 +2,7 @@ config USBIP_CORE tristate "USB/IP support" depends on NET select USB_COMMON + select SGL_ALLOC ---help--- This enables pushing USB packets over IP to allow remote machines direct access to USB devices. It provides the diff --git a/drivers/usb/usbip/stub.h b/drivers/usb/usbip/stub.h index 35618ceb279134bc02c6d8ff83671f55b25c82a8..d11270560c2477870374bfc8d694de9a294f6d31 100644 --- a/drivers/usb/usbip/stub.h +++ b/drivers/usb/usbip/stub.h @@ -52,7 +52,11 @@ struct stub_priv { unsigned long seqnum; struct list_head list; struct stub_device *sdev; - struct urb *urb; + struct urb **urbs; + struct scatterlist *sgl; + int num_urbs; + int completed_urbs; + int urb_status; int unlinking; }; @@ -86,6 +90,7 @@ extern struct usb_device_driver stub_driver; struct bus_id_priv *get_busid_priv(const char *busid); void put_busid_priv(struct bus_id_priv *bid); int del_match_busid(char *busid); +void stub_free_priv_and_urb(struct stub_priv *priv); void stub_device_cleanup_urbs(struct stub_device *sdev); /* stub_rx.c */ diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c index c0d6ff1baa721754d42d1cae3b076685dcd86fe2..93891d445c307ca394c7f2688a10cfd851a5b279 100644 --- a/drivers/usb/usbip/stub_dev.c +++ b/drivers/usb/usbip/stub_dev.c @@ -46,6 +46,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a int sockfd = 0; struct socket *socket; int rv; + struct task_struct *tcp_rx = NULL; + struct task_struct *tcp_tx = NULL; if (!sdev) { dev_err(dev, "sdev is null\n"); @@ -72,20 +74,36 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a if (!socket) goto err; - sdev->ud.tcp_socket = socket; - sdev->ud.sockfd = sockfd; - + /* unlock and create threads and get tasks */ spin_unlock_irq(&sdev->ud.lock); + tcp_rx = kthread_create(stub_rx_loop, &sdev->ud, "stub_rx"); + if (IS_ERR(tcp_rx)) { + sockfd_put(socket); + return -EINVAL; + } + tcp_tx = kthread_create(stub_tx_loop, &sdev->ud, "stub_tx"); + if (IS_ERR(tcp_tx)) { + kthread_stop(tcp_rx); + sockfd_put(socket); + return -EINVAL; + } - sdev->ud.tcp_rx = kthread_get_run(stub_rx_loop, &sdev->ud, - "stub_rx"); - sdev->ud.tcp_tx = kthread_get_run(stub_tx_loop, &sdev->ud, - "stub_tx"); + /* get task structs now */ + get_task_struct(tcp_rx); + get_task_struct(tcp_tx); + /* lock and update sdev->ud state */ spin_lock_irq(&sdev->ud.lock); + sdev->ud.tcp_socket = socket; + sdev->ud.sockfd = sockfd; + sdev->ud.tcp_rx = tcp_rx; + sdev->ud.tcp_tx = tcp_tx; sdev->ud.status = SDEV_ST_USED; spin_unlock_irq(&sdev->ud.lock); + wake_up_process(sdev->ud.tcp_rx); + wake_up_process(sdev->ud.tcp_tx); + } else { dev_info(dev, "stub down\n"); @@ -301,9 +319,17 @@ static int stub_probe(struct usb_device *udev) const char *udev_busid = dev_name(&udev->dev); struct bus_id_priv *busid_priv; int rc = 0; + char save_status; dev_dbg(&udev->dev, "Enter probe\n"); + /* Not sure if this is our device. Allocate here to avoid + * calling alloc while holding busid_table lock. + */ + sdev = stub_device_alloc(udev); + if (!sdev) + return -ENOMEM; + /* check we should claim or not by busid_table */ busid_priv = get_busid_priv(udev_busid); if (!busid_priv || (busid_priv->status == STUB_BUSID_REMOV) || @@ -318,6 +344,9 @@ static int stub_probe(struct usb_device *udev) * See driver_probe_device() in driver/base/dd.c */ rc = -ENODEV; + if (!busid_priv) + goto sdev_free; + goto call_put_busid_priv; } @@ -337,12 +366,6 @@ static int stub_probe(struct usb_device *udev) goto call_put_busid_priv; } - /* ok, this is my device */ - sdev = stub_device_alloc(udev); - if (!sdev) { - rc = -ENOMEM; - goto call_put_busid_priv; - } dev_info(&udev->dev, "usbip-host: register new device (bus %u dev %u)\n", @@ -352,9 +375,16 @@ static int stub_probe(struct usb_device *udev) /* set private data to usb_device */ dev_set_drvdata(&udev->dev, sdev); + busid_priv->sdev = sdev; busid_priv->udev = udev; + save_status = busid_priv->status; + busid_priv->status = STUB_BUSID_ALLOC; + + /* release the busid_lock */ + put_busid_priv(busid_priv); + /* * Claim this hub port. * It doesn't matter what value we pass as owner @@ -372,10 +402,8 @@ static int stub_probe(struct usb_device *udev) dev_err(&udev->dev, "stub_add_files for %s\n", udev_busid); goto err_files; } - busid_priv->status = STUB_BUSID_ALLOC; - rc = 0; - goto call_put_busid_priv; + return 0; err_files: usb_hub_release_port(udev->parent, udev->portnum, @@ -384,23 +412,30 @@ static int stub_probe(struct usb_device *udev) dev_set_drvdata(&udev->dev, NULL); usb_put_dev(udev); + /* we already have busid_priv, just lock busid_lock */ + spin_lock(&busid_priv->busid_lock); busid_priv->sdev = NULL; - stub_device_free(sdev); + busid_priv->status = save_status; + spin_unlock(&busid_priv->busid_lock); + /* lock is released - go to free */ + goto sdev_free; call_put_busid_priv: + /* release the busid_lock */ put_busid_priv(busid_priv); + +sdev_free: + stub_device_free(sdev); + return rc; } static void shutdown_busid(struct bus_id_priv *busid_priv) { - if (busid_priv->sdev && !busid_priv->shutdown_busid) { - busid_priv->shutdown_busid = 1; - usbip_event_add(&busid_priv->sdev->ud, SDEV_EVENT_REMOVED); + usbip_event_add(&busid_priv->sdev->ud, SDEV_EVENT_REMOVED); - /* wait for the stop of the event handler */ - usbip_stop_eh(&busid_priv->sdev->ud); - } + /* wait for the stop of the event handler */ + usbip_stop_eh(&busid_priv->sdev->ud); } /* @@ -427,11 +462,16 @@ static void stub_disconnect(struct usb_device *udev) /* get stub_device */ if (!sdev) { dev_err(&udev->dev, "could not get device"); - goto call_put_busid_priv; + /* release busid_lock */ + put_busid_priv(busid_priv); + return; } dev_set_drvdata(&udev->dev, NULL); + /* release busid_lock before call to remove device files */ + put_busid_priv(busid_priv); + /* * NOTE: rx/tx threads are invoked for each usb_device. */ @@ -442,27 +482,36 @@ static void stub_disconnect(struct usb_device *udev) (struct usb_dev_state *) udev); if (rc) { dev_dbg(&udev->dev, "unable to release port\n"); - goto call_put_busid_priv; + return; } /* If usb reset is called from event handler */ if (usbip_in_eh(current)) - goto call_put_busid_priv; + return; + + /* we already have busid_priv, just lock busid_lock */ + spin_lock(&busid_priv->busid_lock); + if (!busid_priv->shutdown_busid) + busid_priv->shutdown_busid = 1; + /* release busid_lock */ + spin_unlock(&busid_priv->busid_lock); /* shutdown the current connection */ shutdown_busid(busid_priv); usb_put_dev(sdev->udev); + /* we already have busid_priv, just lock busid_lock */ + spin_lock(&busid_priv->busid_lock); /* free sdev */ busid_priv->sdev = NULL; stub_device_free(sdev); if (busid_priv->status == STUB_BUSID_ALLOC) busid_priv->status = STUB_BUSID_ADDED; - -call_put_busid_priv: - put_busid_priv(busid_priv); + /* release busid_lock */ + spin_unlock(&busid_priv->busid_lock); + return; } #ifdef CONFIG_PM diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c index bf8a5feb0ee937a35ccd7a478e7c3dd01770b4e1..a20bb2d04f4dedcf2d3edceb48af4dcbb2119d79 100644 --- a/drivers/usb/usbip/stub_main.c +++ b/drivers/usb/usbip/stub_main.c @@ -6,6 +6,7 @@ #include #include #include +#include #include "usbip_common.h" #include "stub.h" @@ -283,13 +284,49 @@ static struct stub_priv *stub_priv_pop_from_listhead(struct list_head *listhead) struct stub_priv *priv, *tmp; list_for_each_entry_safe(priv, tmp, listhead, list) { - list_del(&priv->list); + list_del_init(&priv->list); return priv; } return NULL; } +void stub_free_priv_and_urb(struct stub_priv *priv) +{ + struct urb *urb; + int i; + + for (i = 0; i < priv->num_urbs; i++) { + urb = priv->urbs[i]; + + if (!urb) + return; + + kfree(urb->setup_packet); + urb->setup_packet = NULL; + + + if (urb->transfer_buffer && !priv->sgl) { + kfree(urb->transfer_buffer); + urb->transfer_buffer = NULL; + } + + if (urb->num_sgs) { + sgl_free(urb->sg); + urb->sg = NULL; + urb->num_sgs = 0; + } + + usb_free_urb(urb); + } + if (!list_empty(&priv->list)) + list_del(&priv->list); + if (priv->sgl) + sgl_free(priv->sgl); + kfree(priv->urbs); + kmem_cache_free(stub_priv_cache, priv); +} + static struct stub_priv *stub_priv_pop(struct stub_device *sdev) { unsigned long flags; @@ -316,25 +353,15 @@ static struct stub_priv *stub_priv_pop(struct stub_device *sdev) void stub_device_cleanup_urbs(struct stub_device *sdev) { struct stub_priv *priv; - struct urb *urb; + int i; dev_dbg(&sdev->udev->dev, "Stub device cleaning up urbs\n"); while ((priv = stub_priv_pop(sdev))) { - urb = priv->urb; - dev_dbg(&sdev->udev->dev, "free urb seqnum %lu\n", - priv->seqnum); - usb_kill_urb(urb); - - kmem_cache_free(stub_priv_cache, priv); + for (i = 0; i < priv->num_urbs; i++) + usb_kill_urb(priv->urbs[i]); - kfree(urb->transfer_buffer); - urb->transfer_buffer = NULL; - - kfree(urb->setup_packet); - urb->setup_packet = NULL; - - usb_free_urb(urb); + stub_free_priv_and_urb(priv); } } diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c index 97b09a42a10cabe1080f606acba13d35d543cf91..8c55cd8330988b168e0a7cc58fa17e213ccf7542 100644 --- a/drivers/usb/usbip/stub_rx.c +++ b/drivers/usb/usbip/stub_rx.c @@ -7,6 +7,7 @@ #include #include #include +#include #include "usbip_common.h" #include "stub.h" @@ -201,7 +202,7 @@ static void tweak_special_requests(struct urb *urb) static int stub_recv_cmd_unlink(struct stub_device *sdev, struct usbip_header *pdu) { - int ret; + int ret, i; unsigned long flags; struct stub_priv *priv; @@ -246,12 +247,14 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev, * so a driver in a client host will know the failure * of the unlink request ? */ - ret = usb_unlink_urb(priv->urb); - if (ret != -EINPROGRESS) - dev_err(&priv->urb->dev->dev, - "failed to unlink a urb # %lu, ret %d\n", - priv->seqnum, ret); - + for (i = priv->completed_urbs; i < priv->num_urbs; i++) { + ret = usb_unlink_urb(priv->urbs[i]); + if (ret != -EINPROGRESS) + dev_err(&priv->urbs[i]->dev->dev, + "failed to unlink %d/%d urb of seqnum %lu, ret %d\n", + i + 1, priv->num_urbs, + priv->seqnum, ret); + } return 0; } @@ -361,16 +364,10 @@ static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu) } if (usb_endpoint_xfer_isoc(epd)) { - /* validate packet size and number of packets */ - unsigned int maxp, packets, bytes; - - maxp = usb_endpoint_maxp(epd); - maxp *= usb_endpoint_maxp_mult(epd); - bytes = pdu->u.cmd_submit.transfer_buffer_length; - packets = DIV_ROUND_UP(bytes, maxp); - + /* validate number of packets */ if (pdu->u.cmd_submit.number_of_packets < 0 || - pdu->u.cmd_submit.number_of_packets > packets) { + pdu->u.cmd_submit.number_of_packets > + USBIP_MAX_ISO_PACKETS) { dev_err(&sdev->udev->dev, "CMD_SUBMIT: isoc invalid num packets %d\n", pdu->u.cmd_submit.number_of_packets); @@ -439,92 +436,191 @@ static void masking_bogus_flags(struct urb *urb) urb->transfer_flags &= allowed; } +static int stub_recv_xbuff(struct usbip_device *ud, struct stub_priv *priv) +{ + int ret; + int i; + + for (i = 0; i < priv->num_urbs; i++) { + ret = usbip_recv_xbuff(ud, priv->urbs[i]); + if (ret < 0) + break; + } + + return ret; +} + static void stub_recv_cmd_submit(struct stub_device *sdev, struct usbip_header *pdu) { - int ret; struct stub_priv *priv; struct usbip_device *ud = &sdev->ud; struct usb_device *udev = sdev->udev; + struct scatterlist *sgl = NULL, *sg; + void *buffer = NULL; + unsigned long long buf_len; + int nents; + int num_urbs = 1; int pipe = get_pipe(sdev, pdu); + int use_sg = pdu->u.cmd_submit.transfer_flags & URB_DMA_MAP_SG; + int support_sg = 1; + int np = 0; + int ret, i; if (pipe == -1) return; + /* + * Smatch reported the error case where use_sg is true and buf_len is 0. + * In this case, It adds SDEV_EVENT_ERROR_MALLOC and stub_priv will be + * released by stub event handler and connection will be shut down. + */ priv = stub_priv_alloc(sdev, pdu); if (!priv) return; - /* setup a urb */ - if (usb_pipeisoc(pipe)) - priv->urb = usb_alloc_urb(pdu->u.cmd_submit.number_of_packets, - GFP_KERNEL); - else - priv->urb = usb_alloc_urb(0, GFP_KERNEL); + buf_len = (unsigned long long)pdu->u.cmd_submit.transfer_buffer_length; - if (!priv->urb) { - usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC); - return; + if (use_sg && !buf_len) { + dev_err(&udev->dev, "sg buffer with zero length\n"); + goto err_malloc; } /* allocate urb transfer buffer, if needed */ - if (pdu->u.cmd_submit.transfer_buffer_length > 0) { - priv->urb->transfer_buffer = - kzalloc(pdu->u.cmd_submit.transfer_buffer_length, - GFP_KERNEL); - if (!priv->urb->transfer_buffer) { + if (buf_len) { + if (use_sg) { + sgl = sgl_alloc(buf_len, GFP_KERNEL, &nents); + if (!sgl) + goto err_malloc; + + /* Check if the server's HCD supports SG */ + if (!udev->bus->sg_tablesize) { + /* + * If the server's HCD doesn't support SG, break + * a single SG request into several URBs and map + * each SG list entry to corresponding URB + * buffer. The previously allocated SG list is + * stored in priv->sgl (If the server's HCD + * support SG, SG list is stored only in + * urb->sg) and it is used as an indicator that + * the server split single SG request into + * several URBs. Later, priv->sgl is used by + * stub_complete() and stub_send_ret_submit() to + * reassemble the divied URBs. + */ + support_sg = 0; + num_urbs = nents; + priv->completed_urbs = 0; + pdu->u.cmd_submit.transfer_flags &= + ~URB_DMA_MAP_SG; + } + } else { + buffer = kzalloc(buf_len, GFP_KERNEL); + if (!buffer) + goto err_malloc; + } + } + + /* allocate urb array */ + priv->num_urbs = num_urbs; + priv->urbs = kmalloc_array(num_urbs, sizeof(*priv->urbs), GFP_KERNEL); + if (!priv->urbs) + goto err_urbs; + + /* setup a urb */ + if (support_sg) { + if (usb_pipeisoc(pipe)) + np = pdu->u.cmd_submit.number_of_packets; + + priv->urbs[0] = usb_alloc_urb(np, GFP_KERNEL); + if (!priv->urbs[0]) + goto err_urb; + + if (buf_len) { + if (use_sg) { + priv->urbs[0]->sg = sgl; + priv->urbs[0]->num_sgs = nents; + priv->urbs[0]->transfer_buffer = NULL; + } else { + priv->urbs[0]->transfer_buffer = buffer; + } + } + + /* copy urb setup packet */ + priv->urbs[0]->setup_packet = kmemdup(&pdu->u.cmd_submit.setup, + 8, GFP_KERNEL); + if (!priv->urbs[0]->setup_packet) { usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC); return; } - } - /* copy urb setup packet */ - priv->urb->setup_packet = kmemdup(&pdu->u.cmd_submit.setup, 8, - GFP_KERNEL); - if (!priv->urb->setup_packet) { - dev_err(&udev->dev, "allocate setup_packet\n"); - usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC); - return; + usbip_pack_pdu(pdu, priv->urbs[0], USBIP_CMD_SUBMIT, 0); + } else { + for_each_sg(sgl, sg, nents, i) { + priv->urbs[i] = usb_alloc_urb(0, GFP_KERNEL); + /* The URBs which is previously allocated will be freed + * in stub_device_cleanup_urbs() if error occurs. + */ + if (!priv->urbs[i]) + goto err_urb; + + usbip_pack_pdu(pdu, priv->urbs[i], USBIP_CMD_SUBMIT, 0); + priv->urbs[i]->transfer_buffer = sg_virt(sg); + priv->urbs[i]->transfer_buffer_length = sg->length; + } + priv->sgl = sgl; } - /* set other members from the base header of pdu */ - priv->urb->context = (void *) priv; - priv->urb->dev = udev; - priv->urb->pipe = pipe; - priv->urb->complete = stub_complete; + for (i = 0; i < num_urbs; i++) { + /* set other members from the base header of pdu */ + priv->urbs[i]->context = (void *) priv; + priv->urbs[i]->dev = udev; + priv->urbs[i]->pipe = pipe; + priv->urbs[i]->complete = stub_complete; - usbip_pack_pdu(pdu, priv->urb, USBIP_CMD_SUBMIT, 0); + /* no need to submit an intercepted request, but harmless? */ + tweak_special_requests(priv->urbs[i]); + masking_bogus_flags(priv->urbs[i]); + } - if (usbip_recv_xbuff(ud, priv->urb) < 0) + if (stub_recv_xbuff(ud, priv) < 0) return; - if (usbip_recv_iso(ud, priv->urb) < 0) + if (usbip_recv_iso(ud, priv->urbs[0]) < 0) return; - /* no need to submit an intercepted request, but harmless? */ - tweak_special_requests(priv->urb); - - masking_bogus_flags(priv->urb); /* urb is now ready to submit */ - ret = usb_submit_urb(priv->urb, GFP_KERNEL); - - if (ret == 0) - usbip_dbg_stub_rx("submit urb ok, seqnum %u\n", - pdu->base.seqnum); - else { - dev_err(&udev->dev, "submit_urb error, %d\n", ret); - usbip_dump_header(pdu); - usbip_dump_urb(priv->urb); - - /* - * Pessimistic. - * This connection will be discarded. - */ - usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT); + for (i = 0; i < priv->num_urbs; i++) { + ret = usb_submit_urb(priv->urbs[i], GFP_KERNEL); + + if (ret == 0) + usbip_dbg_stub_rx("submit urb ok, seqnum %u\n", + pdu->base.seqnum); + else { + dev_err(&udev->dev, "submit_urb error, %d\n", ret); + usbip_dump_header(pdu); + usbip_dump_urb(priv->urbs[i]); + + /* + * Pessimistic. + * This connection will be discarded. + */ + usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT); + break; + } } usbip_dbg_stub_rx("Leave\n"); + return; + +err_urb: + kfree(priv->urbs); +err_urbs: + kfree(buffer); + sgl_free(sgl); +err_malloc: + usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC); } /* recv a pdu */ diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c index f0ec41a50cbc16f9814ca8b2a7590dbe3c465fab..36010a82b35930f1d3b154b5eebd1eac3e5b5857 100644 --- a/drivers/usb/usbip/stub_tx.c +++ b/drivers/usb/usbip/stub_tx.c @@ -5,25 +5,11 @@ #include #include +#include #include "usbip_common.h" #include "stub.h" -static void stub_free_priv_and_urb(struct stub_priv *priv) -{ - struct urb *urb = priv->urb; - - kfree(urb->setup_packet); - urb->setup_packet = NULL; - - kfree(urb->transfer_buffer); - urb->transfer_buffer = NULL; - - list_del(&priv->list); - kmem_cache_free(stub_priv_cache, priv); - usb_free_urb(urb); -} - /* be in spin_lock_irqsave(&sdev->priv_lock, flags) */ void stub_enqueue_ret_unlink(struct stub_device *sdev, __u32 seqnum, __u32 status) @@ -85,6 +71,22 @@ void stub_complete(struct urb *urb) break; } + /* + * If the server breaks single SG request into the several URBs, the + * URBs must be reassembled before sending completed URB to the vhci. + * Don't wake up the tx thread until all the URBs are completed. + */ + if (priv->sgl) { + priv->completed_urbs++; + + /* Only save the first error status */ + if (urb->status && !priv->urb_status) + priv->urb_status = urb->status; + + if (priv->completed_urbs < priv->num_urbs) + return; + } + /* link a urb to the queue of tx. */ spin_lock_irqsave(&sdev->priv_lock, flags); if (sdev->ud.tcp_socket == NULL) { @@ -156,18 +158,22 @@ static int stub_send_ret_submit(struct stub_device *sdev) size_t total_size = 0; while ((priv = dequeue_from_priv_tx(sdev)) != NULL) { - int ret; - struct urb *urb = priv->urb; + struct urb *urb = priv->urbs[0]; struct usbip_header pdu_header; struct usbip_iso_packet_descriptor *iso_buffer = NULL; struct kvec *iov = NULL; + struct scatterlist *sg; + u32 actual_length = 0; int iovnum = 0; + int ret; + int i; txsize = 0; memset(&pdu_header, 0, sizeof(pdu_header)); memset(&msg, 0, sizeof(msg)); - if (urb->actual_length > 0 && !urb->transfer_buffer) { + if (urb->actual_length > 0 && !urb->transfer_buffer && + !urb->num_sgs) { dev_err(&sdev->udev->dev, "urb: actual_length %d transfer_buffer null\n", urb->actual_length); @@ -176,6 +182,11 @@ static int stub_send_ret_submit(struct stub_device *sdev) if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) iovnum = 2 + urb->number_of_packets; + else if (usb_pipein(urb->pipe) && urb->actual_length > 0 && + urb->num_sgs) + iovnum = 1 + urb->num_sgs; + else if (usb_pipein(urb->pipe) && priv->sgl) + iovnum = 1 + priv->num_urbs; else iovnum = 2; @@ -192,6 +203,15 @@ static int stub_send_ret_submit(struct stub_device *sdev) setup_ret_submit_pdu(&pdu_header, urb); usbip_dbg_stub_tx("setup txdata seqnum: %d\n", pdu_header.base.seqnum); + + if (priv->sgl) { + for (i = 0; i < priv->num_urbs; i++) + actual_length += priv->urbs[i]->actual_length; + + pdu_header.u.ret_submit.status = priv->urb_status; + pdu_header.u.ret_submit.actual_length = actual_length; + } + usbip_header_correct_endian(&pdu_header, 1); iov[iovnum].iov_base = &pdu_header; @@ -200,12 +220,47 @@ static int stub_send_ret_submit(struct stub_device *sdev) txsize += sizeof(pdu_header); /* 2. setup transfer buffer */ - if (usb_pipein(urb->pipe) && + if (usb_pipein(urb->pipe) && priv->sgl) { + /* If the server split a single SG request into several + * URBs because the server's HCD doesn't support SG, + * reassemble the split URB buffers into a single + * return command. + */ + for (i = 0; i < priv->num_urbs; i++) { + iov[iovnum].iov_base = + priv->urbs[i]->transfer_buffer; + iov[iovnum].iov_len = + priv->urbs[i]->actual_length; + iovnum++; + } + txsize += actual_length; + } else if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS && urb->actual_length > 0) { - iov[iovnum].iov_base = urb->transfer_buffer; - iov[iovnum].iov_len = urb->actual_length; - iovnum++; + if (urb->num_sgs) { + unsigned int copy = urb->actual_length; + int size; + + for_each_sg(urb->sg, sg, urb->num_sgs, i) { + if (copy == 0) + break; + + if (copy < sg->length) + size = copy; + else + size = sg->length; + + iov[iovnum].iov_base = sg_virt(sg); + iov[iovnum].iov_len = size; + + iovnum++; + copy -= size; + } + } else { + iov[iovnum].iov_base = urb->transfer_buffer; + iov[iovnum].iov_len = urb->actual_length; + iovnum++; + } txsize += urb->actual_length; } else if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c index 9756752c0681f99c2acb1aaf1213ea23e03055f9..88eaf3c45dd597bfdc28f22036951ccc2283984b 100644 --- a/drivers/usb/usbip/usbip_common.c +++ b/drivers/usb/usbip/usbip_common.c @@ -680,8 +680,12 @@ EXPORT_SYMBOL_GPL(usbip_pad_iso); /* some members of urb must be substituted before. */ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb) { - int ret; + struct scatterlist *sg; + int ret = 0; + int recv; int size; + int copy; + int i; if (ud->side == USBIP_STUB || ud->side == USBIP_VUDC) { /* the direction of urb must be OUT. */ @@ -701,29 +705,51 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb) if (!(size > 0)) return 0; - if (size > urb->transfer_buffer_length) { + if (size > urb->transfer_buffer_length) /* should not happen, probably malicious packet */ - if (ud->side == USBIP_STUB) { - usbip_event_add(ud, SDEV_EVENT_ERROR_TCP); - return 0; - } else { - usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); - return -EPIPE; - } - } + goto error; - ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size); - if (ret != size) { - dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret); - if (ud->side == USBIP_STUB || ud->side == USBIP_VUDC) { - usbip_event_add(ud, SDEV_EVENT_ERROR_TCP); - } else { - usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); - return -EPIPE; + if (urb->num_sgs) { + copy = size; + for_each_sg(urb->sg, sg, urb->num_sgs, i) { + int recv_size; + + if (copy < sg->length) + recv_size = copy; + else + recv_size = sg->length; + + recv = usbip_recv(ud->tcp_socket, sg_virt(sg), + recv_size); + + if (recv != recv_size) + goto error; + + copy -= recv; + ret += recv; + + if (!copy) + break; } + + if (ret != size) + goto error; + } else { + ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size); + if (ret != size) + goto error; } return ret; + +error: + dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret); + if (ud->side == USBIP_STUB || ud->side == USBIP_VUDC) + usbip_event_add(ud, SDEV_EVENT_ERROR_TCP); + else + usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); + + return -EPIPE; } EXPORT_SYMBOL_GPL(usbip_recv_xbuff); diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h index bf8afe9b5883850325fb70fc3873bff20237040b..8be857a4fa132fc1e48d86bfb1f08f9f4fef7202 100644 --- a/drivers/usb/usbip/usbip_common.h +++ b/drivers/usb/usbip/usbip_common.h @@ -121,6 +121,13 @@ extern struct device_attribute dev_attr_usbip_debug; #define USBIP_DIR_OUT 0x00 #define USBIP_DIR_IN 0x01 +/* + * Arbitrary limit for the maximum number of isochronous packets in an URB, + * compare for example the uhci_submit_isochronous function in + * drivers/usb/host/uhci-q.c + */ +#define USBIP_MAX_ISO_PACKETS 1024 + /** * struct usbip_header_basic - data pertinent to every request * @command: the usbip request type diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index 1e592ec94ba49d19ba457af7274cc71c9779ea35..c1bc3bd2098dbb35c9126a614b5aa24a05fb5c2e 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c @@ -702,8 +702,11 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag } vdev = &vhci_hcd->vdev[portnum-1]; - /* patch to usb_sg_init() is in 2.5.60 */ - BUG_ON(!urb->transfer_buffer && urb->transfer_buffer_length); + if (!urb->transfer_buffer && !urb->num_sgs && + urb->transfer_buffer_length) { + dev_dbg(dev, "Null URB transfer buffer\n"); + return -EINVAL; + } spin_lock_irqsave(&vhci->lock, flags); @@ -738,6 +741,7 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag * */ if (usb_pipedevice(urb->pipe) == 0) { + struct usb_device *old; __u8 type = usb_pipetype(urb->pipe); struct usb_ctrlrequest *ctrlreq = (struct usb_ctrlrequest *) urb->setup_packet; @@ -748,14 +752,15 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag goto no_need_xmit; } + old = vdev->udev; switch (ctrlreq->bRequest) { case USB_REQ_SET_ADDRESS: /* set_address may come when a device is reset */ dev_info(dev, "SetAddress Request (%d) to port %d\n", ctrlreq->wValue, vdev->rhport); - usb_put_dev(vdev->udev); vdev->udev = usb_get_dev(urb->dev); + usb_put_dev(old); spin_lock(&vdev->ud.lock); vdev->ud.status = VDEV_ST_USED; @@ -774,8 +779,8 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag usbip_dbg_vhci_hc( "Not yet?:Get_Descriptor to device 0 (get max pipe size)\n"); - usb_put_dev(vdev->udev); vdev->udev = usb_get_dev(urb->dev); + usb_put_dev(old); goto out; default: @@ -1058,6 +1063,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud) static void vhci_device_reset(struct usbip_device *ud) { struct vhci_device *vdev = container_of(ud, struct vhci_device, ud); + struct usb_device *old = vdev->udev; unsigned long flags; spin_lock_irqsave(&ud->lock, flags); @@ -1065,8 +1071,8 @@ static void vhci_device_reset(struct usbip_device *ud) vdev->speed = 0; vdev->devid = 0; - usb_put_dev(vdev->udev); vdev->udev = NULL; + usb_put_dev(old); if (ud->tcp_socket) { sockfd_put(ud->tcp_socket); @@ -1146,6 +1152,15 @@ static int vhci_setup(struct usb_hcd *hcd) hcd->speed = HCD_USB3; hcd->self.root_hub->speed = USB_SPEED_SUPER; } + + /* + * Support SG. + * sg_tablesize is an arbitrary value to alleviate memory pressure + * on the host. + */ + hcd->self.sg_tablesize = 32; + hcd->self.no_sg_constraint = 1; + return 0; } diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c index 44cd645189250565ab0ea651a6a1a47b4649febd..00fc98741c5d1176bed0cc399a5e1e6e39a8cab5 100644 --- a/drivers/usb/usbip/vhci_rx.c +++ b/drivers/usb/usbip/vhci_rx.c @@ -77,19 +77,27 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev, usbip_pack_pdu(pdu, urb, USBIP_RET_SUBMIT, 0); /* recv transfer buffer */ - if (usbip_recv_xbuff(ud, urb) < 0) - return; + if (usbip_recv_xbuff(ud, urb) < 0) { + urb->status = -EPROTO; + goto error; + } /* recv iso_packet_descriptor */ - if (usbip_recv_iso(ud, urb) < 0) - return; + if (usbip_recv_iso(ud, urb) < 0) { + urb->status = -EPROTO; + goto error; + } /* restore the padding in iso packets */ usbip_pad_iso(ud, urb); +error: if (usbip_dbg_flag_vhci_rx) usbip_dump_urb(urb); + if (urb->num_sgs) + urb->transfer_flags &= ~URB_DMA_MAP_SG; + usbip_dbg_vhci_rx("now giveback urb %u\n", pdu->base.seqnum); spin_lock_irqsave(&vhci->lock, flags); diff --git a/drivers/usb/usbip/vhci_tx.c b/drivers/usb/usbip/vhci_tx.c index 9aed15a358b7b98b0fab9e6b02b6820cff9ff808..acac49402c2b1b4ca714188ef436c228c860f244 100644 --- a/drivers/usb/usbip/vhci_tx.c +++ b/drivers/usb/usbip/vhci_tx.c @@ -5,6 +5,7 @@ #include #include +#include #include "usbip_common.h" #include "vhci.h" @@ -50,19 +51,23 @@ static struct vhci_priv *dequeue_from_priv_tx(struct vhci_device *vdev) static int vhci_send_cmd_submit(struct vhci_device *vdev) { + struct usbip_iso_packet_descriptor *iso_buffer = NULL; struct vhci_priv *priv = NULL; + struct scatterlist *sg; struct msghdr msg; - struct kvec iov[3]; + struct kvec *iov; size_t txsize; size_t total_size = 0; + int iovnum; + int err = -ENOMEM; + int i; while ((priv = dequeue_from_priv_tx(vdev)) != NULL) { int ret; struct urb *urb = priv->urb; struct usbip_header pdu_header; - struct usbip_iso_packet_descriptor *iso_buffer = NULL; txsize = 0; memset(&pdu_header, 0, sizeof(pdu_header)); @@ -72,18 +77,45 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev) usbip_dbg_vhci_tx("setup txdata urb seqnum %lu\n", priv->seqnum); + if (urb->num_sgs && usb_pipeout(urb->pipe)) + iovnum = 2 + urb->num_sgs; + else + iovnum = 3; + + iov = kcalloc(iovnum, sizeof(*iov), GFP_KERNEL); + if (!iov) { + usbip_event_add(&vdev->ud, SDEV_EVENT_ERROR_MALLOC); + return -ENOMEM; + } + + if (urb->num_sgs) + urb->transfer_flags |= URB_DMA_MAP_SG; + /* 1. setup usbip_header */ setup_cmd_submit_pdu(&pdu_header, urb); usbip_header_correct_endian(&pdu_header, 1); + iovnum = 0; - iov[0].iov_base = &pdu_header; - iov[0].iov_len = sizeof(pdu_header); + iov[iovnum].iov_base = &pdu_header; + iov[iovnum].iov_len = sizeof(pdu_header); txsize += sizeof(pdu_header); + iovnum++; /* 2. setup transfer buffer */ if (!usb_pipein(urb->pipe) && urb->transfer_buffer_length > 0) { - iov[1].iov_base = urb->transfer_buffer; - iov[1].iov_len = urb->transfer_buffer_length; + if (urb->num_sgs && + !usb_endpoint_xfer_isoc(&urb->ep->desc)) { + for_each_sg(urb->sg, sg, urb->num_sgs, i) { + iov[iovnum].iov_base = sg_virt(sg); + iov[iovnum].iov_len = sg->length; + iovnum++; + } + } else { + iov[iovnum].iov_base = urb->transfer_buffer; + iov[iovnum].iov_len = + urb->transfer_buffer_length; + iovnum++; + } txsize += urb->transfer_buffer_length; } @@ -95,30 +127,43 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev) if (!iso_buffer) { usbip_event_add(&vdev->ud, SDEV_EVENT_ERROR_MALLOC); - return -1; + goto err_iso_buffer; } - iov[2].iov_base = iso_buffer; - iov[2].iov_len = len; + iov[iovnum].iov_base = iso_buffer; + iov[iovnum].iov_len = len; + iovnum++; txsize += len; } - ret = kernel_sendmsg(vdev->ud.tcp_socket, &msg, iov, 3, txsize); + ret = kernel_sendmsg(vdev->ud.tcp_socket, &msg, iov, iovnum, + txsize); if (ret != txsize) { pr_err("sendmsg failed!, ret=%d for %zd\n", ret, txsize); - kfree(iso_buffer); usbip_event_add(&vdev->ud, VDEV_EVENT_ERROR_TCP); - return -1; + err = -EPIPE; + goto err_tx; } + kfree(iov); + /* This is only for isochronous case */ kfree(iso_buffer); + iso_buffer = NULL; + usbip_dbg_vhci_tx("send txdata\n"); total_size += txsize; } return total_size; + +err_tx: + kfree(iso_buffer); +err_iso_buffer: + kfree(iov); + + return err; } static struct vhci_unlink *dequeue_from_unlink_tx(struct vhci_device *vdev) diff --git a/drivers/usb/usbip/vudc_main.c b/drivers/usb/usbip/vudc_main.c index 3fc22037a82f9803c619434361d5d46fa191ec79..390733e6937e15f534cf28912e65ba71b0d71a70 100644 --- a/drivers/usb/usbip/vudc_main.c +++ b/drivers/usb/usbip/vudc_main.c @@ -73,6 +73,10 @@ static int __init init(void) cleanup: list_for_each_entry_safe(udc_dev, udc_dev2, &vudc_devices, dev_entry) { list_del(&udc_dev->dev_entry); + /* + * Just do platform_device_del() here, put_vudc_device() + * calls the platform_device_put() + */ platform_device_del(udc_dev->pdev); put_vudc_device(udc_dev); } @@ -89,7 +93,11 @@ static void __exit cleanup(void) list_for_each_entry_safe(udc_dev, udc_dev2, &vudc_devices, dev_entry) { list_del(&udc_dev->dev_entry); - platform_device_unregister(udc_dev->pdev); + /* + * Just do platform_device_del() here, put_vudc_device() + * calls the platform_device_put() + */ + platform_device_del(udc_dev->pdev); put_vudc_device(udc_dev); } platform_driver_unregister(&vudc_driver); diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c index 0212f0ee8aea7577246c01c99821e0ba12cf9373..e052f62fdea7e8151cf4fd2b3d650805161da341 100644 --- a/drivers/vfio/mdev/mdev_core.c +++ b/drivers/vfio/mdev/mdev_core.c @@ -150,10 +150,10 @@ static int mdev_device_remove_ops(struct mdev_device *mdev, bool force_remove) static int mdev_device_remove_cb(struct device *dev, void *data) { - if (!dev_is_mdev(dev)) - return 0; + if (dev_is_mdev(dev)) + mdev_device_remove(dev, true); - return mdev_device_remove(dev, data ? *(bool *)data : true); + return 0; } /* @@ -182,6 +182,7 @@ int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops) /* Check for duplicate */ parent = __find_parent_device(dev); if (parent) { + parent = NULL; ret = -EEXIST; goto add_dev_err; } @@ -240,7 +241,6 @@ EXPORT_SYMBOL(mdev_register_device); void mdev_unregister_device(struct device *dev) { struct mdev_parent *parent; - bool force_remove = true; mutex_lock(&parent_list_lock); parent = __find_parent_device(dev); @@ -254,8 +254,7 @@ void mdev_unregister_device(struct device *dev) list_del(&parent->next); class_compat_remove_link(mdev_bus_compat_class, dev, NULL); - device_for_each_child(dev, (void *)&force_remove, - mdev_device_remove_cb); + device_for_each_child(dev, NULL, mdev_device_remove_cb); parent_remove_sysfs_files(parent); diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c index 249472f055097eebdebc087dc8d3a6caf56d541b..c99fcc6c2eba8b4947c52a7f4f83eb3275b13658 100644 --- a/drivers/vfio/mdev/mdev_sysfs.c +++ b/drivers/vfio/mdev/mdev_sysfs.c @@ -108,12 +108,13 @@ struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent, return ERR_PTR(-ENOMEM); type->kobj.kset = parent->mdev_types_kset; + type->parent = parent; ret = kobject_init_and_add(&type->kobj, &mdev_type_ktype, NULL, "%s-%s", dev_driver_string(parent->dev), group->name); if (ret) { - kfree(type); + kobject_put(&type->kobj); return ERR_PTR(ret); } @@ -135,7 +136,6 @@ struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent, } type->group = group; - type->parent = parent; return type; attrs_failed: @@ -280,7 +280,7 @@ int mdev_create_sysfs_files(struct device *dev, struct mdev_type *type) void mdev_remove_sysfs_files(struct device *dev, struct mdev_type *type) { + sysfs_remove_files(&dev->kobj, mdev_device_attrs); sysfs_remove_link(&dev->kobj, "mdev_type"); sysfs_remove_link(type->devices_kobj, dev_name(dev)); - sysfs_remove_files(&dev->kobj, mdev_device_attrs); } diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig index 42dc1d3d71cf05a7c91c5316ee832b62b15bf75e..fcbfd0aacebcdd13fa39c3faab6edd20bf3f7194 100644 --- a/drivers/vfio/pci/Kconfig +++ b/drivers/vfio/pci/Kconfig @@ -1,6 +1,7 @@ config VFIO_PCI tristate "VFIO support for PCI devices" depends on VFIO && PCI && EVENTFD + depends on MMU select VFIO_VIRQFD select IRQ_BYPASS_MANAGER help diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index cddb453a1ba578408a050a18d52c7372ace92029..51b791c750f1b72c96bf741572b823666a85b2bb 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -29,6 +29,7 @@ #include #include #include +#include #include "vfio_pci_private.h" @@ -117,8 +118,6 @@ static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev) int bar; struct vfio_pci_dummy_resource *dummy_res; - INIT_LIST_HEAD(&vdev->dummy_resources_list); - for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) { res = vdev->pdev->resource + bar; @@ -181,6 +180,7 @@ static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev) static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev); static void vfio_pci_disable(struct vfio_pci_device *vdev); +static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data); /* * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND @@ -373,11 +373,20 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev) pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); /* - * Try to reset the device. The success of this is dependent on - * being able to lock the device, which is not always possible. + * Try to get the locks ourselves to prevent a deadlock. The + * success of this is dependent on being able to lock the device, + * which is not always possible. + * We can not use the "try" reset interface here, which will + * overwrite the previously restored configuration information. */ - if (vdev->reset_works && !pci_try_reset_function(pdev)) - vdev->needs_reset = false; + if (vdev->reset_works && pci_cfg_access_trylock(pdev)) { + if (device_trylock(&pdev->dev)) { + if (!__pci_reset_function_locked(pdev)) + vdev->needs_reset = false; + device_unlock(&pdev->dev); + } + pci_cfg_access_unlock(pdev); + } pci_restore_state(pdev); out: @@ -398,6 +407,19 @@ static void vfio_pci_release(void *device_data) if (!(--vdev->refcnt)) { vfio_spapr_pci_eeh_release(vdev->pdev); vfio_pci_disable(vdev); + mutex_lock(&vdev->igate); + if (vdev->err_trigger) { + eventfd_ctx_put(vdev->err_trigger); + vdev->err_trigger = NULL; + } + mutex_unlock(&vdev->igate); + + mutex_lock(&vdev->igate); + if (vdev->req_trigger) { + eventfd_ctx_put(vdev->req_trigger); + vdev->req_trigger = NULL; + } + mutex_unlock(&vdev->igate); } mutex_unlock(&driver_lock); @@ -434,10 +456,14 @@ static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type) { if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) { u8 pin; + + if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || + vdev->nointx || vdev->pdev->is_virtfn) + return 0; + pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin); - if (IS_ENABLED(CONFIG_VFIO_PCI_INTX) && !vdev->nointx && pin) - return 1; + return pin ? 1 : 0; } else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) { u8 pos; u16 flags; @@ -610,6 +636,12 @@ int vfio_pci_register_dev_region(struct vfio_pci_device *vdev, return 0; } +struct vfio_devices { + struct vfio_device **devices; + int cur_index; + int max_index; +}; + static long vfio_pci_ioctl(void *device_data, unsigned int cmd, unsigned long arg) { @@ -683,6 +715,7 @@ static long vfio_pci_ioctl(void *device_data, { void __iomem *io; size_t size; + u16 cmd; info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); info.flags = 0; @@ -698,15 +731,20 @@ static long vfio_pci_ioctl(void *device_data, break; } - /* Is it really there? */ + /* + * Is it really there? Enable memory decode for + * implicit access in pci_map_rom(). + */ + cmd = vfio_pci_memory_lock_and_enable(vdev); io = pci_map_rom(pdev, &size); - if (!io || !size) { + if (io) { + info.flags = VFIO_REGION_INFO_FLAG_READ; + pci_unmap_rom(pdev, io); + } else { info.size = 0; - break; } - pci_unmap_rom(pdev, io); + vfio_pci_memory_unlock_and_restore(vdev, cmd); - info.flags = VFIO_REGION_INFO_FLAG_READ; break; } case VFIO_PCI_VGA_REGION_INDEX: @@ -843,8 +881,16 @@ static long vfio_pci_ioctl(void *device_data, return ret; } else if (cmd == VFIO_DEVICE_RESET) { - return vdev->reset_works ? - pci_try_reset_function(vdev->pdev) : -EINVAL; + int ret; + + if (!vdev->reset_works) + return -EINVAL; + + vfio_pci_zap_and_down_write_memory_lock(vdev); + ret = pci_try_reset_function(vdev->pdev); + up_write(&vdev->memory_lock); + + return ret; } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) { struct vfio_pci_hot_reset_info hdr; @@ -924,8 +970,9 @@ static long vfio_pci_ioctl(void *device_data, int32_t *group_fds; struct vfio_pci_group_entry *groups; struct vfio_pci_group_info info; + struct vfio_devices devs = { .cur_index = 0 }; bool slot = false; - int i, count = 0, ret = 0; + int i, group_idx, mem_idx = 0, count = 0, ret = 0; minsz = offsetofend(struct vfio_pci_hot_reset, count); @@ -977,9 +1024,9 @@ static long vfio_pci_ioctl(void *device_data, * user interface and store the group and iommu ID. This * ensures the group is held across the reset. */ - for (i = 0; i < hdr.count; i++) { + for (group_idx = 0; group_idx < hdr.count; group_idx++) { struct vfio_group *group; - struct fd f = fdget(group_fds[i]); + struct fd f = fdget(group_fds[group_idx]); if (!f.file) { ret = -EBADF; break; @@ -992,8 +1039,9 @@ static long vfio_pci_ioctl(void *device_data, break; } - groups[i].group = group; - groups[i].id = vfio_external_user_iommu_id(group); + groups[group_idx].group = group; + groups[group_idx].id = + vfio_external_user_iommu_id(group); } kfree(group_fds); @@ -1012,13 +1060,63 @@ static long vfio_pci_ioctl(void *device_data, ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_validate_devs, &info, slot); - if (!ret) - /* User has access, do the reset */ - ret = pci_reset_bus(vdev->pdev); + if (ret) + goto hot_reset_release; + + devs.max_index = count; + devs.devices = kcalloc(count, sizeof(struct vfio_device *), + GFP_KERNEL); + if (!devs.devices) { + ret = -ENOMEM; + goto hot_reset_release; + } + + /* + * We need to get memory_lock for each device, but devices + * can share mmap_sem, therefore we need to zap and hold + * the vma_lock for each device, and only then get each + * memory_lock. + */ + ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, + vfio_pci_try_zap_and_vma_lock_cb, + &devs, slot); + if (ret) + goto hot_reset_release; + + for (; mem_idx < devs.cur_index; mem_idx++) { + struct vfio_pci_device *tmp; + + tmp = vfio_device_data(devs.devices[mem_idx]); + + ret = down_write_trylock(&tmp->memory_lock); + if (!ret) { + ret = -EBUSY; + goto hot_reset_release; + } + mutex_unlock(&tmp->vma_lock); + } + + /* User has access, do the reset */ + ret = pci_reset_bus(vdev->pdev); hot_reset_release: - for (i--; i >= 0; i--) - vfio_group_put_external_user(groups[i].group); + for (i = 0; i < devs.cur_index; i++) { + struct vfio_device *device; + struct vfio_pci_device *tmp; + + device = devs.devices[i]; + tmp = vfio_device_data(device); + + if (i < mem_idx) + up_write(&tmp->memory_lock); + else + mutex_unlock(&tmp->vma_lock); + vfio_device_put(device); + } + kfree(devs.devices); + + for (group_idx--; group_idx >= 0; group_idx--) + vfio_group_put_external_user(groups[group_idx].group); kfree(groups); return ret; @@ -1099,6 +1197,215 @@ static ssize_t vfio_pci_write(void *device_data, const char __user *buf, return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true); } +/* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */ +static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try) +{ + struct vfio_pci_mmap_vma *mmap_vma, *tmp; + + /* + * Lock ordering: + * vma_lock is nested under mmap_sem for vm_ops callback paths. + * The memory_lock semaphore is used by both code paths calling + * into this function to zap vmas and the vm_ops.fault callback + * to protect the memory enable state of the device. + * + * When zapping vmas we need to maintain the mmap_sem => vma_lock + * ordering, which requires using vma_lock to walk vma_list to + * acquire an mm, then dropping vma_lock to get the mmap_sem and + * reacquiring vma_lock. This logic is derived from similar + * requirements in uverbs_user_mmap_disassociate(). + * + * mmap_sem must always be the top-level lock when it is taken. + * Therefore we can only hold the memory_lock write lock when + * vma_list is empty, as we'd need to take mmap_sem to clear + * entries. vma_list can only be guaranteed empty when holding + * vma_lock, thus memory_lock is nested under vma_lock. + * + * This enables the vm_ops.fault callback to acquire vma_lock, + * followed by memory_lock read lock, while already holding + * mmap_sem without risk of deadlock. + */ + while (1) { + struct mm_struct *mm = NULL; + + if (try) { + if (!mutex_trylock(&vdev->vma_lock)) + return 0; + } else { + mutex_lock(&vdev->vma_lock); + } + while (!list_empty(&vdev->vma_list)) { + mmap_vma = list_first_entry(&vdev->vma_list, + struct vfio_pci_mmap_vma, + vma_next); + mm = mmap_vma->vma->vm_mm; + if (mmget_not_zero(mm)) + break; + + list_del(&mmap_vma->vma_next); + kfree(mmap_vma); + mm = NULL; + } + if (!mm) + return 1; + mutex_unlock(&vdev->vma_lock); + + if (try) { + if (!down_read_trylock(&mm->mmap_sem)) { + mmput(mm); + return 0; + } + } else { + down_read(&mm->mmap_sem); + } + if (mmget_still_valid(mm)) { + if (try) { + if (!mutex_trylock(&vdev->vma_lock)) { + up_read(&mm->mmap_sem); + mmput(mm); + return 0; + } + } else { + mutex_lock(&vdev->vma_lock); + } + list_for_each_entry_safe(mmap_vma, tmp, + &vdev->vma_list, vma_next) { + struct vm_area_struct *vma = mmap_vma->vma; + + if (vma->vm_mm != mm) + continue; + + list_del(&mmap_vma->vma_next); + kfree(mmap_vma); + + zap_vma_ptes(vma, vma->vm_start, + vma->vm_end - vma->vm_start); + } + mutex_unlock(&vdev->vma_lock); + } + up_read(&mm->mmap_sem); + mmput(mm); + } +} + +void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device *vdev) +{ + vfio_pci_zap_and_vma_lock(vdev, false); + down_write(&vdev->memory_lock); + mutex_unlock(&vdev->vma_lock); +} + +u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev) +{ + u16 cmd; + + down_write(&vdev->memory_lock); + pci_read_config_word(vdev->pdev, PCI_COMMAND, &cmd); + if (!(cmd & PCI_COMMAND_MEMORY)) + pci_write_config_word(vdev->pdev, PCI_COMMAND, + cmd | PCI_COMMAND_MEMORY); + + return cmd; +} + +void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev, u16 cmd) +{ + pci_write_config_word(vdev->pdev, PCI_COMMAND, cmd); + up_write(&vdev->memory_lock); +} + +/* Caller holds vma_lock */ +static int __vfio_pci_add_vma(struct vfio_pci_device *vdev, + struct vm_area_struct *vma) +{ + struct vfio_pci_mmap_vma *mmap_vma; + + mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL); + if (!mmap_vma) + return -ENOMEM; + + mmap_vma->vma = vma; + list_add(&mmap_vma->vma_next, &vdev->vma_list); + + return 0; +} + +/* + * Zap mmaps on open so that we can fault them in on access and therefore + * our vma_list only tracks mappings accessed since last zap. + */ +static void vfio_pci_mmap_open(struct vm_area_struct *vma) +{ + zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); +} + +static void vfio_pci_mmap_close(struct vm_area_struct *vma) +{ + struct vfio_pci_device *vdev = vma->vm_private_data; + struct vfio_pci_mmap_vma *mmap_vma; + + mutex_lock(&vdev->vma_lock); + list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) { + if (mmap_vma->vma == vma) { + list_del(&mmap_vma->vma_next); + kfree(mmap_vma); + break; + } + } + mutex_unlock(&vdev->vma_lock); +} + +static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct vfio_pci_device *vdev = vma->vm_private_data; + struct vfio_pci_mmap_vma *mmap_vma; + vm_fault_t ret = VM_FAULT_NOPAGE; + + mutex_lock(&vdev->vma_lock); + down_read(&vdev->memory_lock); + + if (!__vfio_pci_memory_enabled(vdev)) { + ret = VM_FAULT_SIGBUS; + goto up_out; + } + + /* + * We populate the whole vma on fault, so we need to test whether + * the vma has already been mapped, such as for concurrent faults + * to the same vma. io_remap_pfn_range() will trigger a BUG_ON if + * we ask it to fill the same range again. + */ + list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) { + if (mmap_vma->vma == vma) + goto up_out; + } + + if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + vma->vm_end - vma->vm_start, + vma->vm_page_prot)) { + ret = VM_FAULT_SIGBUS; + zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); + goto up_out; + } + + if (__vfio_pci_add_vma(vdev, vma)) { + ret = VM_FAULT_OOM; + zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); + } + +up_out: + up_read(&vdev->memory_lock); + mutex_unlock(&vdev->vma_lock); + return ret; +} + +static const struct vm_operations_struct vfio_pci_mmap_ops = { + .open = vfio_pci_mmap_open, + .close = vfio_pci_mmap_close, + .fault = vfio_pci_mmap_fault, +}; + static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma) { struct vfio_pci_device *vdev = device_data; @@ -1148,8 +1455,14 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff; - return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, - req_len, vma->vm_page_prot); + /* + * See remap_pfn_range(), called from vfio_pci_fault() but we can't + * change vm_flags within the fault handler. Set them now. + */ + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_ops = &vfio_pci_mmap_ops; + + return 0; } static void vfio_pci_request(void *device_data, unsigned int count) @@ -1220,7 +1533,11 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) mutex_init(&vdev->igate); spin_lock_init(&vdev->irqlock); mutex_init(&vdev->ioeventfds_lock); + INIT_LIST_HEAD(&vdev->dummy_resources_list); INIT_LIST_HEAD(&vdev->ioeventfds_list); + mutex_init(&vdev->vma_lock); + INIT_LIST_HEAD(&vdev->vma_list); + init_rwsem(&vdev->memory_lock); ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev); if (ret) { @@ -1316,12 +1633,6 @@ static struct pci_driver vfio_pci_driver = { .err_handler = &vfio_err_handlers, }; -struct vfio_devices { - struct vfio_device **devices; - int cur_index; - int max_index; -}; - static int vfio_pci_get_devs(struct pci_dev *pdev, void *data) { struct vfio_devices *devs = data; @@ -1343,6 +1654,39 @@ static int vfio_pci_get_devs(struct pci_dev *pdev, void *data) return 0; } +static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data) +{ + struct vfio_devices *devs = data; + struct vfio_device *device; + struct vfio_pci_device *vdev; + + if (devs->cur_index == devs->max_index) + return -ENOSPC; + + device = vfio_device_get_from_dev(&pdev->dev); + if (!device) + return -EINVAL; + + if (pci_dev_driver(pdev) != &vfio_pci_driver) { + vfio_device_put(device); + return -EBUSY; + } + + vdev = vfio_device_data(device); + + /* + * Locking multiple devices is prone to deadlock, runaway and + * unwind if we hit contention. + */ + if (!vfio_pci_zap_and_vma_lock(vdev, true)) { + vfio_device_put(device); + return -EBUSY; + } + + devs->devices[devs->cur_index++] = device; + return 0; +} + /* * Attempt to do a bus/slot reset if there are devices affected by a reset for * this device that are needs_reset and all of the affected devices are unused @@ -1443,11 +1787,11 @@ static void __init vfio_pci_fill_ids(void) rc = pci_add_dynid(&vfio_pci_driver, vendor, device, subvendor, subdevice, class, class_mask, 0); if (rc) - pr_warn("failed to add dynamic id [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x (%d)\n", + pr_warn("failed to add dynamic id [%04x:%04x[%04x:%04x]] class %#08x/%08x (%d)\n", vendor, device, subvendor, subdevice, class, class_mask, rc); else - pr_info("add [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x\n", + pr_info("add [%04x:%04x[%04x:%04x]] class %#08x/%08x\n", vendor, device, subvendor, subdevice, class, class_mask); } diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index 115a36f6f40398b75dfa75b61b50e81f98bc5a63..0f20e2d977d482732b4de0a4804589aafcba25b8 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -315,6 +315,10 @@ static int vfio_virt_config_read(struct vfio_pci_device *vdev, int pos, return count; } +static struct perm_bits direct_ro_perms = { + .readfn = vfio_direct_config_read, +}; + /* Default capability regions to read-only, no-virtualization */ static struct perm_bits cap_perms[PCI_CAP_ID_MAX + 1] = { [0 ... PCI_CAP_ID_MAX] = { .readfn = vfio_direct_config_read } @@ -398,6 +402,20 @@ static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write) *(__le32 *)(&p->write[off]) = cpu_to_le32(write); } +/* Caller should hold memory_lock semaphore */ +bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev) +{ + struct pci_dev *pdev = vdev->pdev; + u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]); + + /* + * SR-IOV VF memory enable is handled by the MSE bit in the + * PF SR-IOV capability, there's therefore no need to trigger + * faults based on the virtual value. + */ + return pdev->is_virtfn || (cmd & PCI_COMMAND_MEMORY); +} + /* * Restore the *real* BARs after we detect a FLR or backdoor reset. * (backdoor = some device specific technique that we didn't catch) @@ -558,13 +576,18 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos, new_cmd = le32_to_cpu(val); + phys_io = !!(phys_cmd & PCI_COMMAND_IO); + virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO); + new_io = !!(new_cmd & PCI_COMMAND_IO); + phys_mem = !!(phys_cmd & PCI_COMMAND_MEMORY); virt_mem = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_MEMORY); new_mem = !!(new_cmd & PCI_COMMAND_MEMORY); - phys_io = !!(phys_cmd & PCI_COMMAND_IO); - virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO); - new_io = !!(new_cmd & PCI_COMMAND_IO); + if (!new_mem) + vfio_pci_zap_and_down_write_memory_lock(vdev); + else + down_write(&vdev->memory_lock); /* * If the user is writing mem/io enable (new_mem/io) and we @@ -581,8 +604,11 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos, } count = vfio_default_config_write(vdev, pos, count, perm, offset, val); - if (count < 0) + if (count < 0) { + if (offset == PCI_COMMAND) + up_write(&vdev->memory_lock); return count; + } /* * Save current memory/io enable bits in vconfig to allow for @@ -593,6 +619,8 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos, *virt_cmd &= cpu_to_le16(~mask); *virt_cmd |= cpu_to_le16(new_cmd & mask); + + up_write(&vdev->memory_lock); } /* Emulate INTx disable */ @@ -830,8 +858,11 @@ static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos, pos - offset + PCI_EXP_DEVCAP, &cap); - if (!ret && (cap & PCI_EXP_DEVCAP_FLR)) + if (!ret && (cap & PCI_EXP_DEVCAP_FLR)) { + vfio_pci_zap_and_down_write_memory_lock(vdev); pci_try_reset_function(vdev->pdev); + up_write(&vdev->memory_lock); + } } /* @@ -909,8 +940,11 @@ static int vfio_af_config_write(struct vfio_pci_device *vdev, int pos, pos - offset + PCI_AF_CAP, &cap); - if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP)) + if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP)) { + vfio_pci_zap_and_down_write_memory_lock(vdev); pci_try_reset_function(vdev->pdev); + up_write(&vdev->memory_lock); + } } return count; @@ -1180,8 +1214,10 @@ static int vfio_msi_cap_len(struct vfio_pci_device *vdev, u8 pos) return -ENOMEM; ret = init_pci_cap_msi_perm(vdev->msi_perm, len, flags); - if (ret) + if (ret) { + kfree(vdev->msi_perm); return ret; + } return len; } @@ -1462,7 +1498,12 @@ static int vfio_cap_init(struct vfio_pci_device *vdev) if (ret) return ret; - if (cap <= PCI_CAP_ID_MAX) { + /* + * ID 0 is a NULL capability, conflicting with our fake + * PCI_CAP_ID_BASIC. As it has no content, consider it + * hidden for now. + */ + if (cap && cap <= PCI_CAP_ID_MAX) { len = pci_cap_length[cap]; if (len == 0xFF) { /* Variable length */ len = vfio_cap_len(vdev, cap, pos); @@ -1542,7 +1583,7 @@ static int vfio_ecap_init(struct vfio_pci_device *vdev) if (len == 0xFF) { len = vfio_ext_cap_len(vdev, ecap, epos); if (len < 0) - return ret; + return len; } } @@ -1609,6 +1650,15 @@ static int vfio_ecap_init(struct vfio_pci_device *vdev) return 0; } +/* + * Nag about hardware bugs, hopefully to have vendors fix them, but at least + * to collect a list of dependencies for the VF INTx pin quirk below. + */ +static const struct pci_device_id known_bogus_vf_intx_pin[] = { + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x270c) }, + {} +}; + /* * For each device we allocate a pci_config_map that indicates the * capability occupying each dword and thus the struct perm_bits we @@ -1674,6 +1724,33 @@ int vfio_config_init(struct vfio_pci_device *vdev) if (pdev->is_virtfn) { *(__le16 *)&vconfig[PCI_VENDOR_ID] = cpu_to_le16(pdev->vendor); *(__le16 *)&vconfig[PCI_DEVICE_ID] = cpu_to_le16(pdev->device); + + /* + * Per SR-IOV spec rev 1.1, 3.4.1.18 the interrupt pin register + * does not apply to VFs and VFs must implement this register + * as read-only with value zero. Userspace is not readily able + * to identify whether a device is a VF and thus that the pin + * definition on the device is bogus should it violate this + * requirement. We already virtualize the pin register for + * other purposes, so we simply need to replace the bogus value + * and consider VFs when we determine INTx IRQ count. + */ + if (vconfig[PCI_INTERRUPT_PIN] && + !pci_match_id(known_bogus_vf_intx_pin, pdev)) + pci_warn(pdev, + "Hardware bug: VF reports bogus INTx pin %d\n", + vconfig[PCI_INTERRUPT_PIN]); + + vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */ + + /* + * VFs do no implement the memory enable bit of the COMMAND + * register therefore we'll not have it set in our initial + * copy of config space after pci_enable_device(). For + * consistency with PFs, set the virtual enable bit here. + */ + *(__le16 *)&vconfig[PCI_COMMAND] |= + cpu_to_le16(PCI_COMMAND_MEMORY); } if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx) @@ -1703,8 +1780,11 @@ void vfio_config_free(struct vfio_pci_device *vdev) vdev->vconfig = NULL; kfree(vdev->pci_config_map); vdev->pci_config_map = NULL; - kfree(vdev->msi_perm); - vdev->msi_perm = NULL; + if (vdev->msi_perm) { + free_perm_bits(vdev->msi_perm); + kfree(vdev->msi_perm); + vdev->msi_perm = NULL; + } } /* @@ -1761,9 +1841,17 @@ static ssize_t vfio_config_do_rw(struct vfio_pci_device *vdev, char __user *buf, cap_start = *ppos; } else { if (*ppos >= PCI_CFG_SPACE_SIZE) { - WARN_ON(cap_id > PCI_EXT_CAP_ID_MAX); + /* + * We can get a cap_id that exceeds PCI_EXT_CAP_ID_MAX + * if we're hiding an unknown capability at the start + * of the extended capability list. Use default, ro + * access, which will virtualize the id and next values. + */ + if (cap_id > PCI_EXT_CAP_ID_MAX) + perm = &direct_ro_perms; + else + perm = &ecap_perms[cap_id]; - perm = &ecap_perms[cap_id]; cap_start = vfio_find_cap_start(vdev, *ppos); } else { WARN_ON(cap_id > PCI_CAP_ID_MAX); diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index 1c46045b0e7fc6b2e8ef421853742851aa880d7e..405aaaec5950e088522df711b58a3ab66480e342 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c @@ -32,15 +32,22 @@ static void vfio_send_intx_eventfd(void *opaque, void *unused) { struct vfio_pci_device *vdev = opaque; - if (likely(is_intx(vdev) && !vdev->virq_disabled)) - eventfd_signal(vdev->ctx[0].trigger, 1); + if (likely(is_intx(vdev) && !vdev->virq_disabled)) { + struct eventfd_ctx *trigger; + + trigger = READ_ONCE(vdev->ctx[0].trigger); + if (likely(trigger)) + eventfd_signal(trigger, 1); + } } -void vfio_pci_intx_mask(struct vfio_pci_device *vdev) +static void __vfio_pci_intx_mask(struct vfio_pci_device *vdev) { struct pci_dev *pdev = vdev->pdev; unsigned long flags; + lockdep_assert_held(&vdev->igate); + spin_lock_irqsave(&vdev->irqlock, flags); /* @@ -68,6 +75,13 @@ void vfio_pci_intx_mask(struct vfio_pci_device *vdev) spin_unlock_irqrestore(&vdev->irqlock, flags); } +void vfio_pci_intx_mask(struct vfio_pci_device *vdev) +{ + mutex_lock(&vdev->igate); + __vfio_pci_intx_mask(vdev); + mutex_unlock(&vdev->igate); +} + /* * If this is triggered by an eventfd, we can't call eventfd_signal * or else we'll deadlock on the eventfd wait queue. Return >0 when @@ -110,12 +124,21 @@ static int vfio_pci_intx_unmask_handler(void *opaque, void *unused) return ret; } -void vfio_pci_intx_unmask(struct vfio_pci_device *vdev) +static void __vfio_pci_intx_unmask(struct vfio_pci_device *vdev) { + lockdep_assert_held(&vdev->igate); + if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0) vfio_send_intx_eventfd(vdev, NULL); } +void vfio_pci_intx_unmask(struct vfio_pci_device *vdev) +{ + mutex_lock(&vdev->igate); + __vfio_pci_intx_unmask(vdev); + mutex_unlock(&vdev->igate); +} + static irqreturn_t vfio_intx_handler(int irq, void *dev_id) { struct vfio_pci_device *vdev = dev_id; @@ -142,95 +165,106 @@ static irqreturn_t vfio_intx_handler(int irq, void *dev_id) return ret; } -static int vfio_intx_enable(struct vfio_pci_device *vdev) +static int vfio_intx_enable(struct vfio_pci_device *vdev, + struct eventfd_ctx *trigger) { + struct pci_dev *pdev = vdev->pdev; + unsigned long irqflags; + char *name; + int ret; + if (!is_irq_none(vdev)) return -EINVAL; - if (!vdev->pdev->irq) + if (!pdev->irq) return -ENODEV; + name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-intx(%s)", pci_name(pdev)); + if (!name) + return -ENOMEM; + vdev->ctx = kzalloc(sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL); - if (!vdev->ctx) + if (!vdev->ctx) { + kfree(name); return -ENOMEM; + } vdev->num_ctx = 1; + vdev->ctx[0].name = name; + vdev->ctx[0].trigger = trigger; + /* - * If the virtual interrupt is masked, restore it. Devices - * supporting DisINTx can be masked at the hardware level - * here, non-PCI-2.3 devices will have to wait until the - * interrupt is enabled. + * Fill the initial masked state based on virq_disabled. After + * enable, changing the DisINTx bit in vconfig directly changes INTx + * masking. igate prevents races during setup, once running masked + * is protected via irqlock. + * + * Devices supporting DisINTx also reflect the current mask state in + * the physical DisINTx bit, which is not affected during IRQ setup. + * + * Devices without DisINTx support require an exclusive interrupt. + * IRQ masking is performed at the IRQ chip. Again, igate protects + * against races during setup and IRQ handlers and irqfds are not + * yet active, therefore masked is stable and can be used to + * conditionally auto-enable the IRQ. + * + * irq_type must be stable while the IRQ handler is registered, + * therefore it must be set before request_irq(). */ vdev->ctx[0].masked = vdev->virq_disabled; - if (vdev->pci_2_3) - pci_intx(vdev->pdev, !vdev->ctx[0].masked); + if (vdev->pci_2_3) { + pci_intx(pdev, !vdev->ctx[0].masked); + irqflags = IRQF_SHARED; + } else { + irqflags = vdev->ctx[0].masked ? IRQF_NO_AUTOEN : 0; + } vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX; + ret = request_irq(pdev->irq, vfio_intx_handler, + irqflags, vdev->ctx[0].name, vdev); + if (ret) { + vdev->irq_type = VFIO_PCI_NUM_IRQS; + kfree(name); + vdev->num_ctx = 0; + kfree(vdev->ctx); + return ret; + } + return 0; } -static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd) +static int vfio_intx_set_signal(struct vfio_pci_device *vdev, + struct eventfd_ctx *trigger) { struct pci_dev *pdev = vdev->pdev; - unsigned long irqflags = IRQF_SHARED; - struct eventfd_ctx *trigger; - unsigned long flags; - int ret; - - if (vdev->ctx[0].trigger) { - free_irq(pdev->irq, vdev); - kfree(vdev->ctx[0].name); - eventfd_ctx_put(vdev->ctx[0].trigger); - vdev->ctx[0].trigger = NULL; - } - - if (fd < 0) /* Disable only */ - return 0; - - vdev->ctx[0].name = kasprintf(GFP_KERNEL, "vfio-intx(%s)", - pci_name(pdev)); - if (!vdev->ctx[0].name) - return -ENOMEM; - - trigger = eventfd_ctx_fdget(fd); - if (IS_ERR(trigger)) { - kfree(vdev->ctx[0].name); - return PTR_ERR(trigger); - } + struct eventfd_ctx *old; - vdev->ctx[0].trigger = trigger; + old = vdev->ctx[0].trigger; - if (!vdev->pci_2_3) - irqflags = 0; + WRITE_ONCE(vdev->ctx[0].trigger, trigger); - ret = request_irq(pdev->irq, vfio_intx_handler, - irqflags, vdev->ctx[0].name, vdev); - if (ret) { - vdev->ctx[0].trigger = NULL; - kfree(vdev->ctx[0].name); - eventfd_ctx_put(trigger); - return ret; + /* Releasing an old ctx requires synchronizing in-flight users */ + if (old) { + synchronize_irq(pdev->irq); + vfio_virqfd_flush_thread(&vdev->ctx[0].unmask); + eventfd_ctx_put(old); } - /* - * INTx disable will stick across the new irq setup, - * disable_irq won't. - */ - spin_lock_irqsave(&vdev->irqlock, flags); - if (!vdev->pci_2_3 && vdev->ctx[0].masked) - disable_irq_nosync(pdev->irq); - spin_unlock_irqrestore(&vdev->irqlock, flags); - return 0; } static void vfio_intx_disable(struct vfio_pci_device *vdev) { + struct pci_dev *pdev = vdev->pdev; + vfio_virqfd_disable(&vdev->ctx[0].unmask); vfio_virqfd_disable(&vdev->ctx[0].mask); - vfio_intx_set_signal(vdev, -1); + free_irq(pdev->irq, vdev); + if (vdev->ctx[0].trigger) + eventfd_ctx_put(vdev->ctx[0].trigger); + kfree(vdev->ctx[0].name); vdev->irq_type = VFIO_PCI_NUM_IRQS; vdev->num_ctx = 0; kfree(vdev->ctx); @@ -252,6 +286,7 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix) struct pci_dev *pdev = vdev->pdev; unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI; int ret; + u16 cmd; if (!is_irq_none(vdev)) return -EINVAL; @@ -261,13 +296,16 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix) return -ENOMEM; /* return the number of supported vectors if we can't get all: */ + cmd = vfio_pci_memory_lock_and_enable(vdev); ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag); if (ret < nvec) { if (ret > 0) pci_free_irq_vectors(pdev); + vfio_pci_memory_unlock_and_restore(vdev, cmd); kfree(vdev->ctx); return ret; } + vfio_pci_memory_unlock_and_restore(vdev, cmd); vdev->num_ctx = nvec; vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX : @@ -290,6 +328,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, struct pci_dev *pdev = vdev->pdev; struct eventfd_ctx *trigger; int irq, ret; + u16 cmd; if (vector < 0 || vector >= vdev->num_ctx) return -EINVAL; @@ -297,8 +336,12 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, irq = pci_irq_vector(pdev, vector); if (vdev->ctx[vector].trigger) { - free_irq(irq, vdev->ctx[vector].trigger); irq_bypass_unregister_producer(&vdev->ctx[vector].producer); + + cmd = vfio_pci_memory_lock_and_enable(vdev); + free_irq(irq, vdev->ctx[vector].trigger); + vfio_pci_memory_unlock_and_restore(vdev, cmd); + kfree(vdev->ctx[vector].name); eventfd_ctx_put(vdev->ctx[vector].trigger); vdev->ctx[vector].trigger = NULL; @@ -326,6 +369,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, * such a reset it would be unsuccessful. To avoid this, restore the * cached value of the message prior to enabling. */ + cmd = vfio_pci_memory_lock_and_enable(vdev); if (msix) { struct msi_msg msg; @@ -335,6 +379,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, ret = request_irq(irq, vfio_msihandler, 0, vdev->ctx[vector].name, trigger); + vfio_pci_memory_unlock_and_restore(vdev, cmd); if (ret) { kfree(vdev->ctx[vector].name); eventfd_ctx_put(trigger); @@ -344,11 +389,13 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, vdev->ctx[vector].producer.token = trigger; vdev->ctx[vector].producer.irq = irq; ret = irq_bypass_register_producer(&vdev->ctx[vector].producer); - if (unlikely(ret)) + if (unlikely(ret)) { dev_info(&pdev->dev, "irq bypass producer (token %p) registration fails: %d\n", vdev->ctx[vector].producer.token, ret); + vdev->ctx[vector].producer.token = NULL; + } vdev->ctx[vector].trigger = trigger; return 0; @@ -379,6 +426,7 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix) { struct pci_dev *pdev = vdev->pdev; int i; + u16 cmd; for (i = 0; i < vdev->num_ctx; i++) { vfio_virqfd_disable(&vdev->ctx[i].unmask); @@ -387,7 +435,9 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix) vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix); + cmd = vfio_pci_memory_lock_and_enable(vdev); pci_free_irq_vectors(pdev); + vfio_pci_memory_unlock_and_restore(vdev, cmd); /* * Both disable paths above use pci_intx_for_msi() to clear DisINTx @@ -412,11 +462,11 @@ static int vfio_pci_set_intx_unmask(struct vfio_pci_device *vdev, return -EINVAL; if (flags & VFIO_IRQ_SET_DATA_NONE) { - vfio_pci_intx_unmask(vdev); + __vfio_pci_intx_unmask(vdev); } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { uint8_t unmask = *(uint8_t *)data; if (unmask) - vfio_pci_intx_unmask(vdev); + __vfio_pci_intx_unmask(vdev); } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { int32_t fd = *(int32_t *)data; if (fd >= 0) @@ -439,11 +489,11 @@ static int vfio_pci_set_intx_mask(struct vfio_pci_device *vdev, return -EINVAL; if (flags & VFIO_IRQ_SET_DATA_NONE) { - vfio_pci_intx_mask(vdev); + __vfio_pci_intx_mask(vdev); } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { uint8_t mask = *(uint8_t *)data; if (mask) - vfio_pci_intx_mask(vdev); + __vfio_pci_intx_mask(vdev); } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { return -ENOTTY; /* XXX implement me */ } @@ -464,19 +514,23 @@ static int vfio_pci_set_intx_trigger(struct vfio_pci_device *vdev, return -EINVAL; if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { + struct eventfd_ctx *trigger = NULL; int32_t fd = *(int32_t *)data; int ret; - if (is_intx(vdev)) - return vfio_intx_set_signal(vdev, fd); + if (fd >= 0) { + trigger = eventfd_ctx_fdget(fd); + if (IS_ERR(trigger)) + return PTR_ERR(trigger); + } - ret = vfio_intx_enable(vdev); - if (ret) - return ret; + if (is_intx(vdev)) + ret = vfio_intx_set_signal(vdev, trigger); + else + ret = vfio_intx_enable(vdev, trigger); - ret = vfio_intx_set_signal(vdev, fd); - if (ret) - vfio_intx_disable(vdev); + if (ret && trigger) + eventfd_ctx_put(trigger); return ret; } diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h index cde3b5d3441ad5e98a579d0050317245216ce36c..17d2bae5b013c6fa1778edb104b14fc9ca9f8b00 100644 --- a/drivers/vfio/pci/vfio_pci_private.h +++ b/drivers/vfio/pci/vfio_pci_private.h @@ -76,6 +76,11 @@ struct vfio_pci_dummy_resource { struct list_head res_next; }; +struct vfio_pci_mmap_vma { + struct vm_area_struct *vma; + struct list_head vma_next; +}; + struct vfio_pci_device { struct pci_dev *pdev; void __iomem *barmap[PCI_STD_RESOURCE_END + 1]; @@ -111,6 +116,9 @@ struct vfio_pci_device { struct list_head dummy_resources_list; struct mutex ioeventfds_lock; struct list_head ioeventfds_list; + struct mutex vma_lock; + struct list_head vma_list; + struct rw_semaphore memory_lock; }; #define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX) @@ -149,6 +157,14 @@ extern int vfio_pci_register_dev_region(struct vfio_pci_device *vdev, unsigned int type, unsigned int subtype, const struct vfio_pci_regops *ops, size_t size, u32 flags, void *data); + +extern bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev); +extern void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device + *vdev); +extern u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev); +extern void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev, + u16 cmd); + #ifdef CONFIG_VFIO_PCI_IGD extern int vfio_pci_igd_init(struct vfio_pci_device *vdev); #else diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c index a6029d0a55244d2e34bd73febacfbb147117efe0..3d0ec2bbe131f046e0cffa96d39b5d640a8d3c93 100644 --- a/drivers/vfio/pci/vfio_pci_rdwr.c +++ b/drivers/vfio/pci/vfio_pci_rdwr.c @@ -165,6 +165,7 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, size_t x_start = 0, x_end = 0; resource_size_t end; void __iomem *io; + struct resource *res = &vdev->pdev->resource[bar]; ssize_t done; if (pci_resource_start(pdev, bar)) @@ -180,6 +181,14 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, count = min(count, (size_t)(end - pos)); + if (res->flags & IORESOURCE_MEM) { + down_read(&vdev->memory_lock); + if (!__vfio_pci_memory_enabled(vdev)) { + up_read(&vdev->memory_lock); + return -EIO; + } + } + if (bar == PCI_ROM_RESOURCE) { /* * The ROM can fill less space than the BAR, so we start the @@ -187,13 +196,17 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, * filling large ROM BARs much faster. */ io = pci_map_rom(pdev, &x_start); - if (!io) - return -ENOMEM; + if (!io) { + done = -ENOMEM; + goto out; + } x_end = end; } else { int ret = vfio_pci_setup_barmap(vdev, bar); - if (ret) - return ret; + if (ret) { + done = ret; + goto out; + } io = vdev->barmap[bar]; } @@ -210,6 +223,9 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, if (bar == PCI_ROM_RESOURCE) pci_unmap_rom(pdev, io); +out: + if (res->flags & IORESOURCE_MEM) + up_read(&vdev->memory_lock); return done; } diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c index c0cd824be2b767bea76dbea2d8137125762836c2..bf06825d977eef883ff6e5355bd40fc10d1a8018 100644 --- a/drivers/vfio/platform/vfio_platform_common.c +++ b/drivers/vfio/platform/vfio_platform_common.c @@ -296,7 +296,7 @@ static int vfio_platform_open(void *device_data) vfio_platform_regions_cleanup(vdev); err_reg: mutex_unlock(&driver_lock); - module_put(THIS_MODULE); + module_put(vdev->parent_module); return ret; } diff --git a/drivers/vfio/platform/vfio_platform_irq.c b/drivers/vfio/platform/vfio_platform_irq.c index 46d4750f43a8dccd574ea21f4fe53ecef4ec764c..ae5c65c760ce007d47b95718839a741a76772518 100644 --- a/drivers/vfio/platform/vfio_platform_irq.c +++ b/drivers/vfio/platform/vfio_platform_irq.c @@ -144,6 +144,16 @@ static int vfio_platform_set_irq_unmask(struct vfio_platform_device *vdev, return 0; } +/* + * The trigger eventfd is guaranteed valid in the interrupt path + * and protected by the igate mutex when triggered via ioctl. + */ +static void vfio_send_eventfd(struct vfio_platform_irq *irq_ctx) +{ + if (likely(irq_ctx->trigger)) + eventfd_signal(irq_ctx->trigger, 1); +} + static irqreturn_t vfio_automasked_irq_handler(int irq, void *dev_id) { struct vfio_platform_irq *irq_ctx = dev_id; @@ -163,7 +173,7 @@ static irqreturn_t vfio_automasked_irq_handler(int irq, void *dev_id) spin_unlock_irqrestore(&irq_ctx->lock, flags); if (ret == IRQ_HANDLED) - eventfd_signal(irq_ctx->trigger, 1); + vfio_send_eventfd(irq_ctx); return ret; } @@ -172,22 +182,19 @@ static irqreturn_t vfio_irq_handler(int irq, void *dev_id) { struct vfio_platform_irq *irq_ctx = dev_id; - eventfd_signal(irq_ctx->trigger, 1); + vfio_send_eventfd(irq_ctx); return IRQ_HANDLED; } static int vfio_set_trigger(struct vfio_platform_device *vdev, int index, - int fd, irq_handler_t handler) + int fd) { struct vfio_platform_irq *irq = &vdev->irqs[index]; struct eventfd_ctx *trigger; - int ret; if (irq->trigger) { - irq_clear_status_flags(irq->hwirq, IRQ_NOAUTOEN); - free_irq(irq->hwirq, irq); - kfree(irq->name); + disable_irq(irq->hwirq); eventfd_ctx_put(irq->trigger); irq->trigger = NULL; } @@ -195,30 +202,20 @@ static int vfio_set_trigger(struct vfio_platform_device *vdev, int index, if (fd < 0) /* Disable only */ return 0; - irq->name = kasprintf(GFP_KERNEL, "vfio-irq[%d](%s)", - irq->hwirq, vdev->name); - if (!irq->name) - return -ENOMEM; - trigger = eventfd_ctx_fdget(fd); - if (IS_ERR(trigger)) { - kfree(irq->name); + if (IS_ERR(trigger)) return PTR_ERR(trigger); - } irq->trigger = trigger; - irq_set_status_flags(irq->hwirq, IRQ_NOAUTOEN); - ret = request_irq(irq->hwirq, handler, 0, irq->name, irq); - if (ret) { - kfree(irq->name); - eventfd_ctx_put(trigger); - irq->trigger = NULL; - return ret; - } - - if (!irq->masked) - enable_irq(irq->hwirq); + /* + * irq->masked effectively provides nested disables within the overall + * enable relative to trigger. Specifically request_irq() is called + * with NO_AUTOEN, therefore the IRQ is initially disabled. The user + * may only further disable the IRQ with a MASK operations because + * irq->masked is initially false. + */ + enable_irq(irq->hwirq); return 0; } @@ -237,7 +234,7 @@ static int vfio_platform_set_irq_trigger(struct vfio_platform_device *vdev, handler = vfio_irq_handler; if (!count && (flags & VFIO_IRQ_SET_DATA_NONE)) - return vfio_set_trigger(vdev, index, -1, handler); + return vfio_set_trigger(vdev, index, -1); if (start != 0 || count != 1) return -EINVAL; @@ -245,7 +242,7 @@ static int vfio_platform_set_irq_trigger(struct vfio_platform_device *vdev, if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { int32_t fd = *(int32_t *)data; - return vfio_set_trigger(vdev, index, fd, handler); + return vfio_set_trigger(vdev, index, fd); } if (flags & VFIO_IRQ_SET_DATA_NONE) { @@ -269,6 +266,14 @@ int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev, unsigned start, unsigned count, uint32_t flags, void *data) = NULL; + /* + * For compatibility, errors from request_irq() are local to the + * SET_IRQS path and reflected in the name pointer. This allows, + * for example, polling mode fallback for an exclusive IRQ failure. + */ + if (IS_ERR(vdev->irqs[index].name)) + return PTR_ERR(vdev->irqs[index].name); + switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { case VFIO_IRQ_SET_ACTION_MASK: func = vfio_platform_set_irq_mask; @@ -289,7 +294,7 @@ int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev, int vfio_platform_irq_init(struct vfio_platform_device *vdev) { - int cnt = 0, i; + int cnt = 0, i, ret = 0; while (vdev->get_irq(vdev, cnt) >= 0) cnt++; @@ -300,37 +305,70 @@ int vfio_platform_irq_init(struct vfio_platform_device *vdev) for (i = 0; i < cnt; i++) { int hwirq = vdev->get_irq(vdev, i); + irq_handler_t handler = vfio_irq_handler; - if (hwirq < 0) + if (hwirq < 0) { + ret = -EINVAL; goto err; + } spin_lock_init(&vdev->irqs[i].lock); vdev->irqs[i].flags = VFIO_IRQ_INFO_EVENTFD; - if (irq_get_trigger_type(hwirq) & IRQ_TYPE_LEVEL_MASK) + if (irq_get_trigger_type(hwirq) & IRQ_TYPE_LEVEL_MASK) { vdev->irqs[i].flags |= VFIO_IRQ_INFO_MASKABLE | VFIO_IRQ_INFO_AUTOMASKED; + handler = vfio_automasked_irq_handler; + } vdev->irqs[i].count = 1; vdev->irqs[i].hwirq = hwirq; vdev->irqs[i].masked = false; + vdev->irqs[i].name = kasprintf(GFP_KERNEL, + "vfio-irq[%d](%s)", hwirq, + vdev->name); + if (!vdev->irqs[i].name) { + ret = -ENOMEM; + goto err; + } + + ret = request_irq(hwirq, handler, IRQF_NO_AUTOEN, + vdev->irqs[i].name, &vdev->irqs[i]); + if (ret) { + kfree(vdev->irqs[i].name); + vdev->irqs[i].name = ERR_PTR(ret); + } } vdev->num_irqs = cnt; return 0; err: + for (--i; i >= 0; i--) { + if (!IS_ERR(vdev->irqs[i].name)) { + free_irq(vdev->irqs[i].hwirq, &vdev->irqs[i]); + kfree(vdev->irqs[i].name); + } + } kfree(vdev->irqs); - return -EINVAL; + return ret; } void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev) { int i; - for (i = 0; i < vdev->num_irqs; i++) - vfio_set_trigger(vdev, i, -1, NULL); + for (i = 0; i < vdev->num_irqs; i++) { + vfio_virqfd_disable(&vdev->irqs[i].mask); + vfio_virqfd_disable(&vdev->irqs[i].unmask); + if (!IS_ERR(vdev->irqs[i].name)) { + free_irq(vdev->irqs[i].hwirq, &vdev->irqs[i]); + if (vdev->irqs[i].trigger) + eventfd_ctx_put(vdev->irqs[i].trigger); + kfree(vdev->irqs[i].name); + } + } vdev->num_irqs = 0; kfree(vdev->irqs); diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index 64833879f75d3cf2d0739f03e4ca4b587c639cca..7a386fb30bf1fc448e4515946bf4aaf2fbb25797 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -34,6 +34,7 @@ #include #include #include +#include #define DRIVER_VERSION "0.3" #define DRIVER_AUTHOR "Alex Williamson " @@ -904,30 +905,17 @@ void *vfio_device_data(struct vfio_device *device) } EXPORT_SYMBOL_GPL(vfio_device_data); -/* Given a referenced group, check if it contains the device */ -static bool vfio_dev_present(struct vfio_group *group, struct device *dev) -{ - struct vfio_device *device; - - device = vfio_group_get_device(group, dev); - if (!device) - return false; - - vfio_device_put(device); - return true; -} - /* * Decrement the device reference count and wait for the device to be * removed. Open file descriptors for the device... */ void *vfio_del_group_dev(struct device *dev) { + DEFINE_WAIT_FUNC(wait, woken_wake_function); struct vfio_device *device = dev_get_drvdata(dev); struct vfio_group *group = device->group; void *device_data = device->device_data; struct vfio_unbound_dev *unbound; unsigned int i = 0; - long ret; bool interrupted = false; /* @@ -964,6 +952,8 @@ void *vfio_del_group_dev(struct device *dev) * interval with counter to allow the driver to take escalating * measures to release the device if it has the ability to do so. */ + add_wait_queue(&vfio.release_q, &wait); + do { device = vfio_group_get_device(group, dev); if (!device) @@ -975,12 +965,10 @@ void *vfio_del_group_dev(struct device *dev) vfio_device_put(device); if (interrupted) { - ret = wait_event_timeout(vfio.release_q, - !vfio_dev_present(group, dev), HZ * 10); + wait_woken(&wait, TASK_UNINTERRUPTIBLE, HZ * 10); } else { - ret = wait_event_interruptible_timeout(vfio.release_q, - !vfio_dev_present(group, dev), HZ * 10); - if (ret == -ERESTARTSYS) { + wait_woken(&wait, TASK_INTERRUPTIBLE, HZ * 10); + if (signal_pending(current)) { interrupted = true; dev_warn(dev, "Device is currently in use, task" @@ -989,8 +977,10 @@ void *vfio_del_group_dev(struct device *dev) current->comm, task_pid_nr(current)); } } - } while (ret <= 0); + } while (1); + + remove_wait_queue(&vfio.release_q, &wait); /* * In order to support multiple devices per group, devices can be * plucked from the group while other devices in the group are still diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index 96721b154454f617689e56ccc029eac9181e4b85..b08e5161615399412b8e3aebfdebc15df5095196 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c @@ -45,16 +45,16 @@ static long try_increment_locked_vm(struct mm_struct *mm, long npages) return 0; down_write(&mm->mmap_sem); - locked = mm->locked_vm + npages; + locked = atomic_long_read(&mm->locked_vm) + npages; lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; if (locked > lock_limit && !capable(CAP_IPC_LOCK)) ret = -ENOMEM; else - mm->locked_vm += npages; + atomic_long_add(npages, &mm->locked_vm); pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid, npages << PAGE_SHIFT, - mm->locked_vm << PAGE_SHIFT, + atomic_long_read(&mm->locked_vm) << PAGE_SHIFT, rlimit(RLIMIT_MEMLOCK), ret ? " - exceeded" : ""); @@ -69,12 +69,12 @@ static void decrement_locked_vm(struct mm_struct *mm, long npages) return; down_write(&mm->mmap_sem); - if (WARN_ON_ONCE(npages > mm->locked_vm)) - npages = mm->locked_vm; - mm->locked_vm -= npages; + if (WARN_ON_ONCE(npages > atomic_long_read(&mm->locked_vm))) + npages = atomic_long_read(&mm->locked_vm); + atomic_long_sub(npages, &mm->locked_vm); pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid, npages << PAGE_SHIFT, - mm->locked_vm << PAGE_SHIFT, + atomic_long_read(&mm->locked_vm) << PAGE_SHIFT, rlimit(RLIMIT_MEMLOCK)); up_write(&mm->mmap_sem); } @@ -371,6 +371,7 @@ static void tce_iommu_release(void *iommu_data) { struct tce_container *container = iommu_data; struct tce_iommu_group *tcegrp; + struct tce_iommu_prereg *tcemem, *tmtmp; long i; while (tce_groups_attached(container)) { @@ -393,13 +394,8 @@ static void tce_iommu_release(void *iommu_data) tce_iommu_free_table(container, tbl); } - while (!list_empty(&container->prereg_list)) { - struct tce_iommu_prereg *tcemem; - - tcemem = list_first_entry(&container->prereg_list, - struct tce_iommu_prereg, next); - WARN_ON_ONCE(tce_iommu_prereg_free(container, tcemem)); - } + list_for_each_entry_safe(tcemem, tmtmp, &container->prereg_list, next) + WARN_ON(tce_iommu_prereg_free(container, tcemem)); tce_iommu_disable(container); if (container->mm) diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index d9fd3188615dc66267ba5ff715388f27356164b0..5a106963dd088a7a707540fcac8fbc8983561154 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -41,6 +42,7 @@ #include #include #include +#include #define DRIVER_VERSION "0.2" #define DRIVER_AUTHOR "Alex Williamson " @@ -58,12 +60,19 @@ module_param_named(disable_hugepages, MODULE_PARM_DESC(disable_hugepages, "Disable VFIO IOMMU support for IOMMU hugepages."); +static unsigned int dma_entry_limit __read_mostly = U16_MAX; +module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644); +MODULE_PARM_DESC(dma_entry_limit, + "Maximum number of user DMA mappings per container (65535)."); + struct vfio_iommu { struct list_head domain_list; + struct list_head mm_list; struct vfio_domain *external_domain; /* domain for external user */ struct mutex lock; struct rb_root dma_list; struct blocking_notifier_head notifier; + unsigned int dma_avail; bool v2; bool nesting; }; @@ -91,6 +100,14 @@ struct vfio_dma { struct vfio_group { struct iommu_group *iommu_group; struct list_head next; + bool sva_enabled; +}; + +struct vfio_mm { +#define VFIO_PASID_INVALID (-1) + int pasid; + struct mm_struct *mm; + struct list_head next; }; /* @@ -257,7 +274,8 @@ static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, struct vfio_pfn *vpfn) static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async) { struct mm_struct *mm; - int ret; + long locked_vm; + int ret = 0; if (!npage) return 0; @@ -266,24 +284,15 @@ static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async) if (!mm) return -ESRCH; /* process exited */ - ret = down_write_killable(&mm->mmap_sem); - if (!ret) { - if (npage > 0) { - if (!dma->lock_cap) { - unsigned long limit; - - limit = task_rlimit(dma->task, - RLIMIT_MEMLOCK) >> PAGE_SHIFT; + locked_vm = atomic_long_add_return(npage, &mm->locked_vm); - if (mm->locked_vm + npage > limit) - ret = -ENOMEM; - } + if (npage > 0 && !dma->lock_cap) { + unsigned long limit = task_rlimit(dma->task, RLIMIT_MEMLOCK) >> + PAGE_SHIFT; + if (locked_vm > limit) { + atomic_long_sub(npage, &mm->locked_vm); + ret = -ENOMEM; } - - if (!ret) - mm->locked_vm += npage; - - up_write(&mm->mmap_sem); } if (async) @@ -337,6 +346,32 @@ static int put_pfn(unsigned long pfn, int prot) return 0; } +static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm, + unsigned long vaddr, unsigned long *pfn, + bool write_fault) +{ + int ret; + + ret = follow_pfn(vma, vaddr, pfn); + if (ret) { + bool unlocked = false; + + ret = fixup_user_fault(NULL, mm, vaddr, + FAULT_FLAG_REMOTE | + (write_fault ? FAULT_FLAG_WRITE : 0), + &unlocked); + if (unlocked) + return -EAGAIN; + + if (ret) + return ret; + + ret = follow_pfn(vma, vaddr, pfn); + } + + return ret; +} + static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, int prot, unsigned long *pfn) { @@ -376,12 +411,16 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, down_read(&mm->mmap_sem); +retry: vma = find_vma_intersection(mm, vaddr, vaddr + 1); if (vma && vma->vm_flags & VM_PFNMAP) { - *pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; - if (is_invalid_reserved_pfn(*pfn)) - ret = 0; + ret = follow_fault_pfn(vma, mm, vaddr, pfn, prot & IOMMU_WRITE); + if (ret == -EAGAIN) + goto retry; + + if (!ret && !is_invalid_reserved_pfn(*pfn)) + ret = -EFAULT; } up_read(&mm->mmap_sem); @@ -395,7 +434,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, */ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, long npage, unsigned long *pfn_base, - unsigned long limit) + unsigned long limit, struct mm_struct *mm) { unsigned long pfn = 0; long ret, pinned = 0, lock_acct = 0; @@ -403,10 +442,10 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, dma_addr_t iova = vaddr - dma->vaddr + dma->iova; /* This code path is only user initiated */ - if (!current->mm) + if (!mm) return -ENODEV; - ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, pfn_base); + ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base); if (ret) return ret; @@ -418,7 +457,8 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, * pages are already counted against the user. */ if (!rsvd && !vfio_find_vpfn(dma, iova)) { - if (!dma->lock_cap && current->mm->locked_vm + 1 > limit) { + if (!dma->lock_cap && + atomic_long_read(&mm->locked_vm) + 1 > limit) { put_pfn(*pfn_base, dma->prot); pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__, limit << PAGE_SHIFT); @@ -433,7 +473,7 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, /* Lock all the consecutive pages from pfn_base */ for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage; pinned++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) { - ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, &pfn); + ret = vaddr_get_pfn(mm, vaddr, dma->prot, &pfn); if (ret) break; @@ -444,8 +484,8 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, } if (!rsvd && !vfio_find_vpfn(dma, iova)) { - if (!dma->lock_cap && - current->mm->locked_vm + lock_acct + 1 > limit) { + if (!dma->lock_cap && atomic_long_read(&mm->locked_vm) + + lock_acct + 1 > limit) { put_pfn(pfn, dma->prot); pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__, limit << PAGE_SHIFT); @@ -592,7 +632,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data, continue; } - remote_vaddr = dma->vaddr + iova - dma->iova; + remote_vaddr = dma->vaddr + (iova - dma->iova); ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn[i], do_accounting); if (ret) @@ -600,7 +640,8 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data, ret = vfio_add_to_pfn_list(dma, iova, phys_pfn[i]); if (ret) { - vfio_unpin_page_external(dma, iova, do_accounting); + if (put_pfn(phys_pfn[i], dma->prot) && do_accounting) + vfio_lock_acct(dma, -1, true); goto pin_unwind; } } @@ -752,15 +793,15 @@ static size_t unmap_unpin_slow(struct vfio_domain *domain, } static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma, + dma_addr_t iova, dma_addr_t end, bool do_accounting) { - dma_addr_t iova = dma->iova, end = dma->iova + dma->size; struct vfio_domain *domain, *d; LIST_HEAD(unmapped_region_list); int unmapped_region_cnt = 0; long unlocked = 0; - if (!dma->size) + if (iova == end) return 0; if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) @@ -777,7 +818,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma, struct vfio_domain, next); list_for_each_entry_continue(d, &iommu->domain_list, next) { - iommu_unmap(d->domain, dma->iova, dma->size); + iommu_unmap(d->domain, iova, end - iova); cond_resched(); } @@ -818,8 +859,6 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma, } } - dma->iommu_mapped = false; - if (unmapped_region_cnt) unlocked += vfio_sync_unpin(dma, domain, &unmapped_region_list); @@ -830,12 +869,20 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma, return unlocked; } -static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma) +static void vfio_remove_dma_finish(struct vfio_iommu *iommu, + struct vfio_dma *dma) { - vfio_unmap_unpin(iommu, dma, true); + dma->iommu_mapped = false; vfio_unlink_dma(iommu, dma); put_task_struct(dma->task); kfree(dma); + iommu->dma_avail++; +} + +static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma) +{ + vfio_unmap_unpin(iommu, dma, dma->iova, dma->iova + dma->size, true); + vfio_remove_dma_finish(iommu, dma); } static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu) @@ -878,7 +925,7 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu, return -EINVAL; if (!unmap->size || unmap->size & mask) return -EINVAL; - if (unmap->iova + unmap->size < unmap->iova || + if (unmap->iova + unmap->size - 1 < unmap->iova || unmap->size > SIZE_MAX) return -EINVAL; @@ -1031,20 +1078,29 @@ static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova, return ret; } -static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma, - size_t map_size) +struct vfio_pin_args { + struct vfio_iommu *iommu; + struct vfio_dma *dma; + unsigned long limit; + struct mm_struct *mm; +}; + +static int vfio_pin_map_dma_chunk(unsigned long start_vaddr, + unsigned long end_vaddr, + struct vfio_pin_args *args) { - dma_addr_t iova = dma->iova; - unsigned long vaddr = dma->vaddr; - size_t size = map_size; + struct vfio_dma *dma = args->dma; + dma_addr_t iova = dma->iova + (start_vaddr - dma->vaddr); + unsigned long unmapped_size = end_vaddr - start_vaddr; + unsigned long pfn, mapped_size = 0; long npage; - unsigned long pfn, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; int ret = 0; - while (size) { + while (unmapped_size) { /* Pin a contiguous chunk of memory */ - npage = vfio_pin_pages_remote(dma, vaddr + dma->size, - size >> PAGE_SHIFT, &pfn, limit); + npage = vfio_pin_pages_remote(dma, start_vaddr + mapped_size, + unmapped_size >> PAGE_SHIFT, + &pfn, args->limit, args->mm); if (npage <= 0) { WARN_ON(!npage); ret = (int)npage; @@ -1052,22 +1108,50 @@ static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma, } /* Map it! */ - ret = vfio_iommu_map(iommu, iova + dma->size, pfn, npage, - dma->prot); + ret = vfio_iommu_map(args->iommu, iova + mapped_size, pfn, + npage, dma->prot); if (ret) { - vfio_unpin_pages_remote(dma, iova + dma->size, pfn, + vfio_unpin_pages_remote(dma, iova + mapped_size, pfn, npage, true); break; } - size -= npage << PAGE_SHIFT; - dma->size += npage << PAGE_SHIFT; + unmapped_size -= npage << PAGE_SHIFT; + mapped_size += npage << PAGE_SHIFT; } + return (ret == 0) ? KTASK_RETURN_SUCCESS : ret; +} + +static void vfio_pin_map_dma_undo(unsigned long start_vaddr, + unsigned long end_vaddr, + struct vfio_pin_args *args) +{ + struct vfio_dma *dma = args->dma; + dma_addr_t iova = dma->iova + (start_vaddr - dma->vaddr); + dma_addr_t end = dma->iova + (end_vaddr - dma->vaddr); + + vfio_unmap_unpin(args->iommu, args->dma, iova, end, true); +} + +static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma, + size_t map_size) +{ + unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; + int ret = 0; + struct vfio_pin_args args = { iommu, dma, limit, current->mm }; + /* Stay on PMD boundary in case THP is being used. */ + DEFINE_KTASK_CTL(ctl, vfio_pin_map_dma_chunk, &args, KTASK_MEM_CHUNK); + + ktask_ctl_set_undo_func(&ctl, vfio_pin_map_dma_undo); + ret = ktask_run((void *)dma->vaddr, map_size, &ctl); + dma->iommu_mapped = true; if (ret) - vfio_remove_dma(iommu, dma); + vfio_remove_dma_finish(iommu, dma); + else + dma->size += map_size; return ret; } @@ -1110,12 +1194,18 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, goto out_unlock; } + if (!iommu->dma_avail) { + ret = -ENOSPC; + goto out_unlock; + } + dma = kzalloc(sizeof(*dma), GFP_KERNEL); if (!dma) { ret = -ENOMEM; goto out_unlock; } + iommu->dma_avail--; dma->iova = iova; dma->vaddr = vaddr; dma->prot = prot; @@ -1180,13 +1270,16 @@ static int vfio_bus_type(struct device *dev, void *data) static int vfio_iommu_replay(struct vfio_iommu *iommu, struct vfio_domain *domain) { - struct vfio_domain *d; + struct vfio_domain *d = NULL; struct rb_node *n; unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; int ret; /* Arbitrarily pick the first domain in the list for lookups */ - d = list_first_entry(&iommu->domain_list, struct vfio_domain, next); + if (!list_empty(&iommu->domain_list)) + d = list_first_entry(&iommu->domain_list, + struct vfio_domain, next); + n = rb_first(&iommu->dma_list); for (; n; n = rb_next(n)) { @@ -1204,6 +1297,11 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, phys_addr_t p; dma_addr_t i; + if (WARN_ON(!d)) { /* mapped w/o a domain?! */ + ret = -EINVAL; + goto unwind; + } + phys = iommu_iova_to_phys(d->domain, iova); if (WARN_ON(!phys)) { @@ -1229,11 +1327,12 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, npage = vfio_pin_pages_remote(dma, vaddr, n >> PAGE_SHIFT, - &pfn, limit); + &pfn, limit, + current->mm); if (npage <= 0) { WARN_ON(!npage); ret = (int)npage; - return ret; + goto unwind; } phys = pfn << PAGE_SHIFT; @@ -1242,16 +1341,227 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, ret = iommu_map(domain->domain, iova, phys, size, dma->prot | domain->prot); - if (ret) - return ret; + if (ret) { + if (!dma->iommu_mapped) + vfio_unpin_pages_remote(dma, iova, + phys >> PAGE_SHIFT, + size >> PAGE_SHIFT, + true); + goto unwind; + } iova += size; } + } + + /* All dmas are now mapped, defer to second tree walk for unwind */ + for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { + struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); + dma->iommu_mapped = true; } + + return 0; + +unwind: + for (; n; n = rb_prev(n)) { + struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); + dma_addr_t iova; + + if (dma->iommu_mapped) { + iommu_unmap(domain->domain, dma->iova, dma->size); + continue; + } + + iova = dma->iova; + while (iova < dma->iova + dma->size) { + phys_addr_t phys, p; + size_t size; + dma_addr_t i; + + phys = iommu_iova_to_phys(domain->domain, iova); + if (!phys) { + iova += PAGE_SIZE; + continue; + } + + size = PAGE_SIZE; + p = phys + size; + i = iova + size; + while (i < dma->iova + dma->size && + p == iommu_iova_to_phys(domain->domain, i)) { + size += PAGE_SIZE; + p += PAGE_SIZE; + i += PAGE_SIZE; + } + + iommu_unmap(domain->domain, iova, size); + vfio_unpin_pages_remote(dma, iova, phys >> PAGE_SHIFT, + size >> PAGE_SHIFT, true); + } + } + + return ret; +} + +static int vfio_iommu_mm_exit(struct device *dev, int pasid, void *data) +{ + struct vfio_mm *vfio_mm; + struct vfio_iommu *iommu = data; + + mutex_lock(&iommu->lock); + list_for_each_entry(vfio_mm, &iommu->mm_list, next) { + if (vfio_mm->pasid == pasid) { + list_del(&vfio_mm->next); + kfree(vfio_mm); + break; + } + } + mutex_unlock(&iommu->lock); + + return 0; +} + +static int vfio_iommu_sva_init(struct device *dev, void *data) +{ + return iommu_sva_device_init(dev, IOMMU_SVA_FEAT_IOPF, 0, + vfio_iommu_mm_exit); +} + +static int vfio_iommu_sva_shutdown(struct device *dev, void *data) +{ + iommu_sva_device_shutdown(dev); + return 0; } +struct vfio_iommu_sva_bind_data { + struct vfio_mm *vfio_mm; + struct vfio_iommu *iommu; + int count; +}; + +static int vfio_iommu_sva_bind_dev(struct device *dev, void *data) +{ + struct vfio_iommu_sva_bind_data *bind_data = data; + + /* Multi-device groups aren't support for SVA */ + if (bind_data->count++) + return -EINVAL; + + return __iommu_sva_bind_device(dev, bind_data->vfio_mm->mm, + &bind_data->vfio_mm->pasid, + IOMMU_SVA_FEAT_IOPF, bind_data->iommu); +} + +static int vfio_iommu_sva_unbind_dev(struct device *dev, void *data) +{ + struct vfio_mm *vfio_mm = data; + + return __iommu_sva_unbind_device(dev, vfio_mm->pasid); +} + +static int vfio_iommu_bind_group(struct vfio_iommu *iommu, + struct vfio_group *group, + struct vfio_mm *vfio_mm) +{ + int ret; + bool enabled_sva = false; + struct vfio_iommu_sva_bind_data data = { + .vfio_mm = vfio_mm, + .iommu = iommu, + .count = 0, + }; + + if (!group->sva_enabled) { + ret = iommu_group_for_each_dev(group->iommu_group, NULL, + vfio_iommu_sva_init); + if (ret) + return ret; + + group->sva_enabled = enabled_sva = true; + } + + ret = iommu_group_for_each_dev(group->iommu_group, &data, + vfio_iommu_sva_bind_dev); + if (ret && data.count > 1) + iommu_group_for_each_dev(group->iommu_group, vfio_mm, + vfio_iommu_sva_unbind_dev); + if (ret && enabled_sva) { + iommu_group_for_each_dev(group->iommu_group, NULL, + vfio_iommu_sva_shutdown); + group->sva_enabled = false; + } + + return ret; +} + +static void vfio_iommu_unbind_group(struct vfio_group *group, + struct vfio_mm *vfio_mm) +{ + iommu_group_for_each_dev(group->iommu_group, vfio_mm, + vfio_iommu_sva_unbind_dev); +} + +static void vfio_iommu_unbind(struct vfio_iommu *iommu, + struct vfio_mm *vfio_mm) +{ + struct vfio_group *group; + struct vfio_domain *domain; + + list_for_each_entry(domain, &iommu->domain_list, next) + list_for_each_entry(group, &domain->group_list, next) + vfio_iommu_unbind_group(group, vfio_mm); +} + +static int vfio_iommu_replay_bind(struct vfio_iommu *iommu, + struct vfio_group *group) +{ + int ret = 0; + struct vfio_mm *vfio_mm; + + list_for_each_entry(vfio_mm, &iommu->mm_list, next) { + /* + * Ensure that mm doesn't exit while we're binding it to the new + * group. It may already have exited, and the mm_exit notifier + * might be waiting on the IOMMU mutex to remove this vfio_mm + * from the list. + */ + if (!mmget_not_zero(vfio_mm->mm)) + continue; + ret = vfio_iommu_bind_group(iommu, group, vfio_mm); + /* + * Use async to avoid triggering an mm_exit callback right away, + * which would block on the mutex that we're holding. + */ + mmput_async(vfio_mm->mm); + + if (ret) + goto out_unbind; + } + + return 0; + +out_unbind: + list_for_each_entry_continue_reverse(vfio_mm, &iommu->mm_list, next) + vfio_iommu_unbind_group(group, vfio_mm); + + return ret; +} + +static void vfio_iommu_free_all_mm(struct vfio_iommu *iommu) +{ + struct vfio_mm *vfio_mm, *next; + + /* + * No need for unbind() here. Since all groups are detached from this + * iommu, bonds have been removed. + */ + list_for_each_entry_safe(vfio_mm, next, &iommu->mm_list, next) + kfree(vfio_mm); + INIT_LIST_HEAD(&iommu->mm_list); +} + /* * We change our unmap behavior slightly depending on whether the IOMMU * supports fine-grained superpages. IOMMUs like AMD-Vi will use a superpage @@ -1327,6 +1637,44 @@ static bool vfio_iommu_has_sw_msi(struct iommu_group *group, phys_addr_t *base) return ret; } +static int vfio_iommu_try_attach_group(struct vfio_iommu *iommu, + struct vfio_group *group, + struct vfio_domain *cur_domain, + struct vfio_domain *new_domain) +{ + struct iommu_group *iommu_group = group->iommu_group; + + /* + * Try to match an existing compatible domain. We don't want to + * preclude an IOMMU driver supporting multiple bus_types and being + * able to include different bus_types in the same IOMMU domain, so + * we test whether the domains use the same iommu_ops rather than + * testing if they're on the same bus_type. + */ + if (new_domain->domain->ops != cur_domain->domain->ops || + new_domain->prot != cur_domain->prot) + return 1; + + iommu_detach_group(cur_domain->domain, iommu_group); + + if (iommu_attach_group(new_domain->domain, iommu_group)) + goto out_reattach; + + if (vfio_iommu_replay_bind(iommu, group)) + goto out_detach; + + return 0; + +out_detach: + iommu_detach_group(new_domain->domain, iommu_group); + +out_reattach: + if (iommu_attach_group(cur_domain->domain, iommu_group)) + return -EINVAL; + + return 1; +} + static int vfio_iommu_type1_attach_group(void *iommu_data, struct iommu_group *iommu_group) { @@ -1424,28 +1772,16 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, if (iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY)) domain->prot |= IOMMU_CACHE; - /* - * Try to match an existing compatible domain. We don't want to - * preclude an IOMMU driver supporting multiple bus_types and being - * able to include different bus_types in the same IOMMU domain, so - * we test whether the domains use the same iommu_ops rather than - * testing if they're on the same bus_type. - */ list_for_each_entry(d, &iommu->domain_list, next) { - if (d->domain->ops == domain->domain->ops && - d->prot == domain->prot) { - iommu_detach_group(domain->domain, iommu_group); - if (!iommu_attach_group(d->domain, iommu_group)) { - list_add(&group->next, &d->group_list); - iommu_domain_free(domain->domain); - kfree(domain); - mutex_unlock(&iommu->lock); - return 0; - } - - ret = iommu_attach_group(domain->domain, iommu_group); - if (ret) - goto out_domain; + ret = vfio_iommu_try_attach_group(iommu, group, domain, d); + if (ret < 0) { + goto out_domain; + } else if (!ret) { + list_add(&group->next, &d->group_list); + iommu_domain_free(domain->domain); + kfree(domain); + mutex_unlock(&iommu->lock); + return 0; } } @@ -1456,6 +1792,10 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, if (ret) goto out_detach; + ret = vfio_iommu_replay_bind(iommu, group); + if (ret) + goto out_detach; + if (resv_msi) { ret = iommu_get_msi_cookie(domain->domain, resv_msi_base); if (ret) @@ -1497,7 +1837,9 @@ static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu) long locked = 0, unlocked = 0; dma = rb_entry(n, struct vfio_dma, node); - unlocked += vfio_unmap_unpin(iommu, dma, false); + unlocked += vfio_unmap_unpin(iommu, dma, dma->iova, + dma->iova + dma->size, false); + dma->iommu_mapped = false; p = rb_first(&dma->pfn_list); for (; p; p = rb_next(p)) { struct vfio_pfn *vpfn = rb_entry(p, struct vfio_pfn, @@ -1561,6 +1903,12 @@ static void vfio_iommu_type1_detach_group(void *iommu_data, continue; iommu_detach_group(domain->domain, iommu_group); + if (group->sva_enabled) { + iommu_group_for_each_dev(iommu_group, NULL, + vfio_iommu_sva_shutdown); + group->sva_enabled = false; + } + list_del(&group->next); kfree(group); /* @@ -1576,6 +1924,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data, vfio_iommu_unmap_unpin_all(iommu); else vfio_iommu_unmap_unpin_reaccount(iommu); + vfio_iommu_free_all_mm(iommu); } iommu_domain_free(domain->domain); list_del(&domain->next); @@ -1611,7 +1960,9 @@ static void *vfio_iommu_type1_open(unsigned long arg) } INIT_LIST_HEAD(&iommu->domain_list); + INIT_LIST_HEAD(&iommu->mm_list); iommu->dma_list = RB_ROOT; + iommu->dma_avail = dma_entry_limit; mutex_init(&iommu->lock); BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier); @@ -1645,6 +1996,7 @@ static void vfio_iommu_type1_release(void *iommu_data) kfree(iommu->external_domain); } + vfio_iommu_free_all_mm(iommu); vfio_iommu_unmap_unpin_all(iommu); list_for_each_entry_safe(domain, domain_tmp, @@ -1673,6 +2025,169 @@ static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu) return ret; } +static struct mm_struct *vfio_iommu_get_mm_by_vpid(pid_t vpid) +{ + struct mm_struct *mm; + struct task_struct *task; + + task = find_get_task_by_vpid(vpid); + if (!task) + return ERR_PTR(-ESRCH); + + /* Ensure that current has RW access on the mm */ + mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS); + put_task_struct(task); + + if (!mm) + return ERR_PTR(-ESRCH); + + return mm; +} + +static long vfio_iommu_type1_bind_process(struct vfio_iommu *iommu, + void __user *arg, + struct vfio_iommu_type1_bind *bind) +{ + struct vfio_iommu_type1_bind_process params; + struct vfio_domain *domain; + struct vfio_group *group; + struct vfio_mm *vfio_mm; + struct mm_struct *mm; + unsigned long minsz; + int ret = 0; + + minsz = sizeof(*bind) + sizeof(params); + if (bind->argsz < minsz) + return -EINVAL; + + arg += sizeof(*bind); + if (copy_from_user(¶ms, arg, sizeof(params))) + return -EFAULT; + + if (params.flags & ~VFIO_IOMMU_BIND_PID) + return -EINVAL; + + if (params.flags & VFIO_IOMMU_BIND_PID) { + mm = vfio_iommu_get_mm_by_vpid(params.pid); + if (IS_ERR(mm)) + return PTR_ERR(mm); + } else { + mm = get_task_mm(current); + if (!mm) + return -EINVAL; + } + + mutex_lock(&iommu->lock); + if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) { + ret = -EINVAL; + goto out_unlock; + } + + list_for_each_entry(vfio_mm, &iommu->mm_list, next) { + if (vfio_mm->mm == mm) { + params.pasid = vfio_mm->pasid; + ret = copy_to_user(arg, ¶ms, sizeof(params)) ? + -EFAULT : 0; + goto out_unlock; + } + } + + vfio_mm = kzalloc(sizeof(*vfio_mm), GFP_KERNEL); + if (!vfio_mm) { + ret = -ENOMEM; + goto out_unlock; + } + + vfio_mm->mm = mm; + vfio_mm->pasid = VFIO_PASID_INVALID; + + list_for_each_entry(domain, &iommu->domain_list, next) { + list_for_each_entry(group, &domain->group_list, next) { + ret = vfio_iommu_bind_group(iommu, group, vfio_mm); + if (ret) + goto out_unbind; + } + } + + list_add(&vfio_mm->next, &iommu->mm_list); + + params.pasid = vfio_mm->pasid; + ret = copy_to_user(arg, ¶ms, sizeof(params)) ? -EFAULT : 0; + if (ret) + goto out_delete; + + mutex_unlock(&iommu->lock); + mmput(mm); + return 0; + +out_delete: + list_del(&vfio_mm->next); + +out_unbind: + /* Undo all binds that already succeeded */ + vfio_iommu_unbind(iommu, vfio_mm); + kfree(vfio_mm); + +out_unlock: + mutex_unlock(&iommu->lock); + mmput(mm); + return ret; +} + +static long vfio_iommu_type1_unbind_process(struct vfio_iommu *iommu, + void __user *arg, + struct vfio_iommu_type1_bind *bind) +{ + int ret = -EINVAL; + unsigned long minsz; + struct mm_struct *mm; + struct vfio_mm *vfio_mm; + struct vfio_iommu_type1_bind_process params; + + minsz = sizeof(*bind) + sizeof(params); + if (bind->argsz < minsz) + return -EINVAL; + + arg += sizeof(*bind); + if (copy_from_user(¶ms, arg, sizeof(params))) + return -EFAULT; + + if (params.flags & ~VFIO_IOMMU_BIND_PID) + return -EINVAL; + + /* + * We can't simply call unbind with the PASID, because the process might + * have died and the PASID might have been reallocated to another + * process. Instead we need to fetch that process mm by PID again to + * make sure we remove the right vfio_mm. + */ + if (params.flags & VFIO_IOMMU_BIND_PID) { + mm = vfio_iommu_get_mm_by_vpid(params.pid); + if (IS_ERR(mm)) + return PTR_ERR(mm); + } else { + mm = get_task_mm(current); + if (!mm) + return -EINVAL; + } + + ret = -ESRCH; + mutex_lock(&iommu->lock); + list_for_each_entry(vfio_mm, &iommu->mm_list, next) { + if (vfio_mm->mm == mm) { + vfio_iommu_unbind(iommu, vfio_mm); + list_del(&vfio_mm->next); + kfree(vfio_mm); + ret = 0; + break; + } + } + mutex_unlock(&iommu->lock); + mmput(mm); + + return ret; +} + static long vfio_iommu_type1_ioctl(void *iommu_data, unsigned int cmd, unsigned long arg) { @@ -1743,6 +2258,45 @@ static long vfio_iommu_type1_ioctl(void *iommu_data, return copy_to_user((void __user *)arg, &unmap, minsz) ? -EFAULT : 0; + + } else if (cmd == VFIO_IOMMU_BIND) { + struct vfio_iommu_type1_bind bind; + + minsz = offsetofend(struct vfio_iommu_type1_bind, flags); + + if (copy_from_user(&bind, (void __user *)arg, minsz)) + return -EFAULT; + + if (bind.argsz < minsz) + return -EINVAL; + + switch (bind.flags) { + case VFIO_IOMMU_BIND_PROCESS: + return vfio_iommu_type1_bind_process(iommu, (void *)arg, + &bind); + default: + return -EINVAL; + } + + } else if (cmd == VFIO_IOMMU_UNBIND) { + struct vfio_iommu_type1_bind bind; + + minsz = offsetofend(struct vfio_iommu_type1_bind, flags); + + if (copy_from_user(&bind, (void __user *)arg, minsz)) + return -EFAULT; + + if (bind.argsz < minsz) + return -EINVAL; + + switch (bind.flags) { + case VFIO_IOMMU_BIND_PROCESS: + return vfio_iommu_type1_unbind_process(iommu, + (void *)arg, + &bind); + default: + return -EINVAL; + } } return -ENOTTY; diff --git a/drivers/vfio/virqfd.c b/drivers/vfio/virqfd.c index 2a1be859ee71c924520f0181066197371dbb298c..ab2a429d36e780565bb05ddcfc274719c8245788 100644 --- a/drivers/vfio/virqfd.c +++ b/drivers/vfio/virqfd.c @@ -104,6 +104,13 @@ static void virqfd_inject(struct work_struct *work) virqfd->thread(virqfd->opaque, virqfd->data); } +static void virqfd_flush_inject(struct work_struct *work) +{ + struct virqfd *virqfd = container_of(work, struct virqfd, flush_inject); + + flush_work(&virqfd->inject); +} + int vfio_virqfd_enable(void *opaque, int (*handler)(void *, void *), void (*thread)(void *, void *), @@ -127,6 +134,7 @@ int vfio_virqfd_enable(void *opaque, INIT_WORK(&virqfd->shutdown, virqfd_shutdown); INIT_WORK(&virqfd->inject, virqfd_inject); + INIT_WORK(&virqfd->flush_inject, virqfd_flush_inject); irqfd = fdget(fd); if (!irqfd.file) { @@ -217,6 +225,19 @@ void vfio_virqfd_disable(struct virqfd **pvirqfd) } EXPORT_SYMBOL_GPL(vfio_virqfd_disable); +void vfio_virqfd_flush_thread(struct virqfd **pvirqfd) +{ + unsigned long flags; + + spin_lock_irqsave(&virqfd_lock, flags); + if (*pvirqfd && (*pvirqfd)->thread) + queue_work(vfio_irqfd_cleanup_wq, &(*pvirqfd)->flush_inject); + spin_unlock_irqrestore(&virqfd_lock, flags); + + flush_workqueue(vfio_irqfd_cleanup_wq); +} +EXPORT_SYMBOL_GPL(vfio_virqfd_flush_thread); + module_init(vfio_virqfd_init); module_exit(vfio_virqfd_exit); diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 4e656f89cb225c83b42f579d1bc1f988224d49bc..4b9151474a24541babc69476103c37a33c0c1520 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -36,7 +36,7 @@ #include "vhost.h" -static int experimental_zcopytx = 1; +static int experimental_zcopytx = 0; module_param(experimental_zcopytx, int, 0444); MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;" " 1 -Enable; 0 - Disable"); @@ -497,12 +497,6 @@ static size_t init_iov_iter(struct vhost_virtqueue *vq, struct iov_iter *iter, return iov_iter_count(iter); } -static bool vhost_exceeds_weight(int pkts, int total_len) -{ - return total_len >= VHOST_NET_WEIGHT || - pkts >= VHOST_NET_PKT_WEIGHT; -} - static int get_tx_bufs(struct vhost_net *net, struct vhost_net_virtqueue *nvq, struct msghdr *msg, @@ -557,7 +551,7 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock) int err; int sent_pkts = 0; - for (;;) { + do { bool busyloop_intr = false; head = get_tx_bufs(net, nvq, &msg, &out, &in, &len, @@ -586,23 +580,20 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock) else msg.msg_flags &= ~MSG_MORE; - /* TODO: Check specific error and bomb out unless ENOBUFS? */ err = sock->ops->sendmsg(sock, &msg, len); if (unlikely(err < 0)) { - vhost_discard_vq_desc(vq, 1); - vhost_net_enable_vq(net, vq); - break; - } - if (err != len) + if (err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS) { + vhost_discard_vq_desc(vq, 1); + vhost_net_enable_vq(net, vq); + break; + } + pr_debug("Fail to send packet: err %d", err); + } else if (unlikely(err != len)) pr_debug("Truncated TX packet: len %d != %zd\n", err, len); if (++nvq->done_idx >= VHOST_NET_BATCH) vhost_net_signal_used(nvq); - if (vhost_exceeds_weight(++sent_pkts, total_len)) { - vhost_poll_queue(&vq->poll); - break; - } - } + } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len))); vhost_net_signal_used(nvq); } @@ -623,10 +614,11 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock) size_t len, total_len = 0; int err; struct vhost_net_ubuf_ref *uninitialized_var(ubufs); + struct ubuf_info *ubuf; bool zcopy_used; int sent_pkts = 0; - for (;;) { + do { bool busyloop_intr; /* Release DMAs done buffers first */ @@ -655,9 +647,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock) /* use msg_control to pass vhost zerocopy ubuf info to skb */ if (zcopy_used) { - struct ubuf_info *ubuf; ubuf = nvq->ubuf_info + nvq->upend_idx; - vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head); vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS; ubuf->callback = vhost_zerocopy_callback; @@ -681,19 +671,21 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock) msg.msg_flags &= ~MSG_MORE; } - /* TODO: Check specific error and bomb out unless ENOBUFS? */ err = sock->ops->sendmsg(sock, &msg, len); if (unlikely(err < 0)) { if (zcopy_used) { - vhost_net_ubuf_put(ubufs); + if (vq->heads[ubuf->desc].len == VHOST_DMA_IN_PROGRESS) + vhost_net_ubuf_put(ubufs); nvq->upend_idx = ((unsigned)nvq->upend_idx - 1) % UIO_MAXIOV; } - vhost_discard_vq_desc(vq, 1); - vhost_net_enable_vq(net, vq); - break; - } - if (err != len) + if (err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS) { + vhost_discard_vq_desc(vq, 1); + vhost_net_enable_vq(net, vq); + break; + } + pr_debug("Fail to send packet: err %d", err); + } else if (unlikely(err != len)) pr_debug("Truncated TX packet: " " len %d != %zd\n", err, len); if (!zcopy_used) @@ -701,11 +693,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock) else vhost_zerocopy_signal_used(net, vq); vhost_net_tx_packet(net); - if (unlikely(vhost_exceeds_weight(++sent_pkts, total_len))) { - vhost_poll_queue(&vq->poll); - break; - } - } + } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len))); } /* Expects to be always run from workqueue - which acts as @@ -941,8 +929,11 @@ static void handle_rx(struct vhost_net *net) vq->log : NULL; mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF); - while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk, - &busyloop_intr))) { + do { + sock_len = vhost_net_rx_peek_head_len(net, sock->sk, + &busyloop_intr); + if (!sock_len) + break; sock_len += sock_hlen; vhost_len = sock_len + vhost_hlen; headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx, @@ -1024,16 +1015,14 @@ static void handle_rx(struct vhost_net *net) if (nvq->done_idx > VHOST_NET_BATCH) vhost_net_signal_used(nvq); if (unlikely(vq_log)) - vhost_log_write(vq, vq_log, log, vhost_len); + vhost_log_write(vq, vq_log, log, vhost_len, + vq->iov, in); total_len += vhost_len; - if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) { - vhost_poll_queue(&vq->poll); - goto out; - } - } + } while (likely(!vhost_exceeds_weight(vq, ++recv_pkts, total_len))); + if (unlikely(busyloop_intr)) vhost_poll_queue(&vq->poll); - else + else if (!sock_len) vhost_net_enable_vq(net, vq); out: vhost_net_signal_used(nvq); @@ -1113,7 +1102,9 @@ static int vhost_net_open(struct inode *inode, struct file *f) n->vqs[i].rx_ring = NULL; vhost_net_buf_init(&n->vqs[i].rxq); } - vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX); + vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX, + UIO_MAXIOV + VHOST_NET_BATCH, + VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT); vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev); vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev); @@ -1198,10 +1189,6 @@ static int vhost_net_release(struct inode *inode, struct file *f) static struct socket *get_raw_socket(int fd) { - struct { - struct sockaddr_ll sa; - char buf[MAX_ADDR_LEN]; - } uaddr; int r; struct socket *sock = sockfd_lookup(fd, &r); @@ -1214,11 +1201,7 @@ static struct socket *get_raw_socket(int fd) goto err; } - r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa, 0); - if (r < 0) - goto err; - - if (uaddr.sa.sll_family != AF_PACKET) { + if (sock->sk->sk_family != AF_PACKET) { r = -EPFNOSUPPORT; goto err; } @@ -1228,13 +1211,9 @@ static struct socket *get_raw_socket(int fd) return ERR_PTR(r); } -static struct ptr_ring *get_tap_ptr_ring(int fd) +static struct ptr_ring *get_tap_ptr_ring(struct file *file) { struct ptr_ring *ring; - struct file *file = fget(fd); - - if (!file) - return NULL; ring = tun_get_tx_ring(file); if (!IS_ERR(ring)) goto out; @@ -1243,7 +1222,6 @@ static struct ptr_ring *get_tap_ptr_ring(int fd) goto out; ring = NULL; out: - fput(file); return ring; } @@ -1330,8 +1308,12 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) r = vhost_net_enable_vq(n, vq); if (r) goto err_used; - if (index == VHOST_NET_VQ_RX) - nvq->rx_ring = get_tap_ptr_ring(fd); + if (index == VHOST_NET_VQ_RX) { + if (sock) + nvq->rx_ring = get_tap_ptr_ring(sock->file); + else + nvq->rx_ring = NULL; + } oldubufs = nvq->ubufs; nvq->ubufs = ubufs; diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index c24bb690680b4104d6621c5b984d869f51af2cd3..0535f4abd37d263db8332c08d4ddac3d91797747 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -57,6 +57,12 @@ #define VHOST_SCSI_PREALLOC_UPAGES 2048 #define VHOST_SCSI_PREALLOC_PROT_SGLS 2048 +/* Max number of requests before requeueing the job. + * Using this limit prevents one virtqueue from starving others with + * request. + */ +#define VHOST_SCSI_WEIGHT 256 + struct vhost_scsi_inflight { /* Wait for the flush operation to finish */ struct completion comp; @@ -814,6 +820,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) int head, ret, prot_bytes; size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp); size_t out_size, in_size; + int c = 0; u16 lun; u8 *target, *lunp, task_attr; bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI); @@ -830,7 +837,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) vhost_disable_notify(&vs->dev, vq); - for (;;) { + do { head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), &out, &in, NULL, NULL); @@ -964,7 +971,8 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin); } /* - * Set prot_iter to data_iter, and advance past any + * Set prot_iter to data_iter and truncate it to + * prot_bytes, and advance data_iter past any * preceeding prot_bytes that may be present. * * Also fix up the exp_data_len to reflect only the @@ -973,6 +981,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) if (prot_bytes) { exp_data_len -= prot_bytes; prot_iter = data_iter; + iov_iter_truncate(&prot_iter, prot_bytes); iov_iter_advance(&data_iter, prot_bytes); } tag = vhost64_to_cpu(vq, v_req_pi.tag); @@ -1043,7 +1052,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) */ INIT_WORK(&cmd->work, vhost_scsi_submission_work); queue_work(vhost_scsi_workqueue, &cmd->work); - } + } while (likely(!vhost_exceeds_weight(vq, ++c, 0))); out: mutex_unlock(&vq->mutex); } @@ -1396,7 +1405,8 @@ static int vhost_scsi_open(struct inode *inode, struct file *f) vqs[i] = &vs->vqs[i].vq; vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick; } - vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ); + vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV, + VHOST_SCSI_WEIGHT, 0); vhost_scsi_init_inflight(vs, NULL); diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c index 40589850eb33c83c06d9211fa86aa95af655dd9e..a9be2d8e98df74c021c90bd358fa0a08fc2371c9 100644 --- a/drivers/vhost/test.c +++ b/drivers/vhost/test.c @@ -23,6 +23,12 @@ * Using this limit prevents one virtqueue from starving others. */ #define VHOST_TEST_WEIGHT 0x80000 +/* Max number of packets transferred before requeueing the job. + * Using this limit prevents one virtqueue from starving others with + * pkts. + */ +#define VHOST_TEST_PKT_WEIGHT 256 + enum { VHOST_TEST_VQ = 0, VHOST_TEST_VQ_MAX = 1, @@ -81,10 +87,8 @@ static void handle_vq(struct vhost_test *n) } vhost_add_used_and_signal(&n->dev, vq, head, 0); total_len += len; - if (unlikely(total_len >= VHOST_TEST_WEIGHT)) { - vhost_poll_queue(&vq->poll); + if (unlikely(vhost_exceeds_weight(vq, 0, total_len))) break; - } } mutex_unlock(&vq->mutex); @@ -116,7 +120,8 @@ static int vhost_test_open(struct inode *inode, struct file *f) dev = &n->dev; vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ]; n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick; - vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX); + vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV, + VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT); f->private_data = n; diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index b13c6b4b2c665a332a40aeada1eaa1e421d9a4aa..3ca5fe24eb4f5ea1aad76078f998048eb30598a2 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -30,6 +30,7 @@ #include #include #include +#include #include "vhost.h" @@ -321,8 +322,8 @@ static void vhost_vq_reset(struct vhost_dev *dev, vq->kick = NULL; vq->call_ctx = NULL; vq->log_ctx = NULL; - vhost_reset_is_le(vq); vhost_disable_cross_endian(vq); + vhost_reset_is_le(vq); vq->busyloop_timeout = 0; vq->umem = NULL; vq->iotlb = NULL; @@ -389,9 +390,9 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev) vq->indirect = kmalloc_array(UIO_MAXIOV, sizeof(*vq->indirect), GFP_KERNEL); - vq->log = kmalloc_array(UIO_MAXIOV, sizeof(*vq->log), + vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log), GFP_KERNEL); - vq->heads = kmalloc_array(UIO_MAXIOV, sizeof(*vq->heads), + vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads), GFP_KERNEL); if (!vq->indirect || !vq->log || !vq->heads) goto err_nomem; @@ -412,8 +413,24 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev) vhost_vq_free_iovecs(dev->vqs[i]); } +bool vhost_exceeds_weight(struct vhost_virtqueue *vq, + int pkts, int total_len) +{ + struct vhost_dev *dev = vq->dev; + + if ((dev->byte_weight && total_len >= dev->byte_weight) || + pkts >= dev->weight) { + vhost_poll_queue(&vq->poll); + return true; + } + + return false; +} +EXPORT_SYMBOL_GPL(vhost_exceeds_weight); + void vhost_dev_init(struct vhost_dev *dev, - struct vhost_virtqueue **vqs, int nvqs) + struct vhost_virtqueue **vqs, int nvqs, + int iov_limit, int weight, int byte_weight) { struct vhost_virtqueue *vq; int i; @@ -426,6 +443,9 @@ void vhost_dev_init(struct vhost_dev *dev, dev->iotlb = NULL; dev->mm = NULL; dev->worker = NULL; + dev->iov_limit = iov_limit; + dev->weight = weight; + dev->byte_weight = byte_weight; init_llist_head(&dev->work_list); init_waitqueue_head(&dev->wait); INIT_LIST_HEAD(&dev->read_list); @@ -654,7 +674,7 @@ static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz) a + (unsigned long)log_base > ULONG_MAX) return false; - return access_ok(VERIFY_WRITE, log_base + a, + return access_ok(log_base + a, (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8); } @@ -680,7 +700,7 @@ static bool vq_memory_access_ok(void __user *log_base, struct vhost_umem *umem, return false; - if (!access_ok(VERIFY_WRITE, (void __user *)a, + if (!access_ok((void __user *)a, node->size)) return false; else if (log_all && !log_access_ok(log_base, @@ -909,8 +929,12 @@ static int vhost_new_umem_range(struct vhost_umem *umem, u64 start, u64 size, u64 end, u64 userspace_addr, int perm) { - struct vhost_umem_node *tmp, *node = kmalloc(sizeof(*node), GFP_ATOMIC); + struct vhost_umem_node *tmp, *node; + if (!size) + return -EFAULT; + + node = kmalloc(sizeof(*node), GFP_ATOMIC); if (!node) return -ENOMEM; @@ -972,10 +996,10 @@ static bool umem_access_ok(u64 uaddr, u64 size, int access) return false; if ((access & VHOST_ACCESS_RO) && - !access_ok(VERIFY_READ, (void __user *)a, size)) + !access_ok((void __user *)a, size)) return false; if ((access & VHOST_ACCESS_WO) && - !access_ok(VERIFY_WRITE, (void __user *)a, size)) + !access_ok((void __user *)a, size)) return false; return true; } @@ -1033,8 +1057,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev, int type, ret; ret = copy_from_iter(&type, sizeof(type), from); - if (ret != sizeof(type)) + if (ret != sizeof(type)) { + ret = -EINVAL; goto done; + } switch (type) { case VHOST_IOTLB_MSG: @@ -1053,8 +1079,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev, iov_iter_advance(from, offset); ret = copy_from_iter(&msg, sizeof(msg), from); - if (ret != sizeof(msg)) + if (ret != sizeof(msg)) { + ret = -EINVAL; goto done; + } if (vhost_process_iotlb_msg(dev, &msg)) { ret = -EFAULT; goto done; @@ -1184,10 +1212,10 @@ static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num, { size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; - return access_ok(VERIFY_READ, desc, num * sizeof *desc) && - access_ok(VERIFY_READ, avail, + return access_ok(desc, num * sizeof *desc) && + access_ok(avail, sizeof *avail + num * sizeof *avail->ring + s) && - access_ok(VERIFY_WRITE, used, + access_ok(used, sizeof *used + num * sizeof *used->ring + s); } @@ -1397,6 +1425,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg if (idx >= d->nvqs) return -ENOBUFS; + idx = array_index_nospec(idx, d->nvqs); vq = d->vqs[idx]; mutex_lock(&vq->mutex); @@ -1731,13 +1760,87 @@ static int log_write(void __user *log_base, return r; } +static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len) +{ + struct vhost_umem *umem = vq->umem; + struct vhost_umem_node *u; + u64 start, end, l, min; + int r; + bool hit = false; + + while (len) { + min = len; + /* More than one GPAs can be mapped into a single HVA. So + * iterate all possible umems here to be safe. + */ + list_for_each_entry(u, &umem->umem_list, link) { + if (u->userspace_addr > hva - 1 + len || + u->userspace_addr - 1 + u->size < hva) + continue; + start = max(u->userspace_addr, hva); + end = min(u->userspace_addr - 1 + u->size, + hva - 1 + len); + l = end - start + 1; + r = log_write(vq->log_base, + u->start + start - u->userspace_addr, + l); + if (r < 0) + return r; + hit = true; + min = min(l, min); + } + + if (!hit) + return -EFAULT; + + len -= min; + hva += min; + } + + return 0; +} + +static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len) +{ + struct iovec iov[64]; + int i, ret; + + if (!vq->iotlb) + return log_write(vq->log_base, vq->log_addr + used_offset, len); + + ret = translate_desc(vq, (uintptr_t)vq->used + used_offset, + len, iov, 64, VHOST_ACCESS_WO); + if (ret < 0) + return ret; + + for (i = 0; i < ret; i++) { + ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base, + iov[i].iov_len); + if (ret) + return ret; + } + + return 0; +} + int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, - unsigned int log_num, u64 len) + unsigned int log_num, u64 len, struct iovec *iov, int count) { int i, r; /* Make sure data written is seen before log. */ smp_wmb(); + + if (vq->iotlb) { + for (i = 0; i < count; i++) { + r = log_write_hva(vq, (uintptr_t)iov[i].iov_base, + iov[i].iov_len); + if (r < 0) + return r; + } + return 0; + } + for (i = 0; i < log_num; ++i) { u64 l = min(log[i].len, len); r = log_write(vq->log_base, log[i].addr, l); @@ -1767,9 +1870,8 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq) smp_wmb(); /* Log used flag write. */ used = &vq->used->flags; - log_write(vq->log_base, vq->log_addr + - (used - (void __user *)vq->used), - sizeof vq->used->flags); + log_used(vq, (used - (void __user *)vq->used), + sizeof vq->used->flags); if (vq->log_ctx) eventfd_signal(vq->log_ctx, 1); } @@ -1787,9 +1889,8 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event) smp_wmb(); /* Log avail event write */ used = vhost_avail_event(vq); - log_write(vq->log_base, vq->log_addr + - (used - (void __user *)vq->used), - sizeof *vhost_avail_event(vq)); + log_used(vq, (used - (void __user *)vq->used), + sizeof *vhost_avail_event(vq)); if (vq->log_ctx) eventfd_signal(vq->log_ctx, 1); } @@ -1812,7 +1913,7 @@ int vhost_vq_init_access(struct vhost_virtqueue *vq) goto err; vq->signalled_used_valid = false; if (!vq->iotlb && - !access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx)) { + !access_ok(&vq->used->idx, sizeof vq->used->idx)) { r = -EFAULT; goto err; } @@ -1972,7 +2073,7 @@ static int get_indirect(struct vhost_virtqueue *vq, /* If this is an input descriptor, increment that count. */ if (access == VHOST_ACCESS_WO) { *in_num += ret; - if (unlikely(log)) { + if (unlikely(log && ret)) { log[*log_num].addr = vhost64_to_cpu(vq, desc.addr); log[*log_num].len = vhost32_to_cpu(vq, desc.len); ++*log_num; @@ -2115,7 +2216,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, /* If this is an input descriptor, * increment that count. */ *in_num += ret; - if (unlikely(log)) { + if (unlikely(log && ret)) { log[*log_num].addr = vhost64_to_cpu(vq, desc.addr); log[*log_num].len = vhost32_to_cpu(vq, desc.len); ++*log_num; @@ -2189,10 +2290,8 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq, /* Make sure data is seen before log. */ smp_wmb(); /* Log used ring entry write. */ - log_write(vq->log_base, - vq->log_addr + - ((void __user *)used - (void __user *)vq->used), - count * sizeof *used); + log_used(vq, ((void __user *)used - (void __user *)vq->used), + count * sizeof *used); } old = vq->last_used_idx; new = (vq->last_used_idx += count); @@ -2231,10 +2330,11 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, return -EFAULT; } if (unlikely(vq->log_used)) { + /* Make sure used idx is seen before log. */ + smp_wmb(); /* Log used index update. */ - log_write(vq->log_base, - vq->log_addr + offsetof(struct vring_used, idx), - sizeof vq->used->idx); + log_used(vq, offsetof(struct vring_used, idx), + sizeof vq->used->idx); if (vq->log_ctx) eventfd_signal(vq->log_ctx, 1); } @@ -2384,12 +2484,11 @@ EXPORT_SYMBOL_GPL(vhost_disable_notify); /* Create a new message. */ struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type) { - struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL); + /* Make sure all padding within the structure is initialized. */ + struct vhost_msg_node *node = kzalloc(sizeof(*node), GFP_KERNEL); if (!node) return NULL; - /* Make sure all padding within the structure is initialized. */ - memset(&node->msg, 0, sizeof node->msg); node->vq = vq; node->msg.type = type; return node; diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 466ef75422916c020fcb26d27cf812bd99f99284..27a78a9b8cc7dc6e21626f1134d34055ca71dfe2 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -170,9 +170,14 @@ struct vhost_dev { struct list_head read_list; struct list_head pending_list; wait_queue_head_t wait; + int iov_limit; + int weight; + int byte_weight; }; -void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs); +bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len); +void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, + int nvqs, int iov_limit, int weight, int byte_weight); long vhost_dev_set_owner(struct vhost_dev *dev); bool vhost_dev_has_owner(struct vhost_dev *dev); long vhost_dev_check_owner(struct vhost_dev *); @@ -205,7 +210,8 @@ bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *); bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *); int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, - unsigned int log_num, u64 len); + unsigned int log_num, u64 len, + struct iovec *iov, int count); int vq_iotlb_prefetch(struct vhost_virtqueue *vq); struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type); diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 34bc3ab40c6da8d5637e6e5bd19e6fb27ec27fba..5f5c5de31f104a321496cdb3141e1e84fec608ea 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -15,11 +15,20 @@ #include #include #include +#include #include #include "vhost.h" #define VHOST_VSOCK_DEFAULT_HOST_CID 2 +/* Max number of bytes transferred before requeueing the job. + * Using this limit prevents one virtqueue from starving others. */ +#define VHOST_VSOCK_WEIGHT 0x80000 +/* Max number of packets transferred before requeueing the job. + * Using this limit prevents one virtqueue from starving others with + * small pkts. + */ +#define VHOST_VSOCK_PKT_WEIGHT 256 enum { VHOST_VSOCK_FEATURES = VHOST_FEATURES, @@ -27,14 +36,14 @@ enum { /* Used to track all the vhost_vsock instances on the system. */ static DEFINE_SPINLOCK(vhost_vsock_lock); -static LIST_HEAD(vhost_vsock_list); +static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8); struct vhost_vsock { struct vhost_dev dev; struct vhost_virtqueue vqs[2]; - /* Link to global vhost_vsock_list, protected by vhost_vsock_lock */ - struct list_head list; + /* Link to global vhost_vsock_hash, writes use vhost_vsock_lock */ + struct hlist_node hash; struct vhost_work send_pkt_work; spinlock_t send_pkt_list_lock; @@ -50,11 +59,14 @@ static u32 vhost_transport_get_local_cid(void) return VHOST_VSOCK_DEFAULT_HOST_CID; } -static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid) +/* Callers that dereference the return value must hold vhost_vsock_lock or the + * RCU read lock. + */ +static struct vhost_vsock *vhost_vsock_get(u32 guest_cid) { struct vhost_vsock *vsock; - list_for_each_entry(vsock, &vhost_vsock_list, list) { + hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) { u32 other_cid = vsock->guest_cid; /* Skip instances that have no CID yet */ @@ -69,22 +81,12 @@ static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid) return NULL; } -static struct vhost_vsock *vhost_vsock_get(u32 guest_cid) -{ - struct vhost_vsock *vsock; - - spin_lock_bh(&vhost_vsock_lock); - vsock = __vhost_vsock_get(guest_cid); - spin_unlock_bh(&vhost_vsock_lock); - - return vsock; -} - static void vhost_transport_do_send_pkt(struct vhost_vsock *vsock, struct vhost_virtqueue *vq) { struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; + int pkts = 0, total_len = 0; bool added = false; bool restart_tx = false; @@ -96,12 +98,12 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, /* Avoid further vmexits, we're already processing the virtqueue */ vhost_disable_notify(&vsock->dev, vq); - for (;;) { + do { struct virtio_vsock_pkt *pkt; struct iov_iter iov_iter; unsigned out, in; size_t nbytes; - size_t len; + size_t iov_len, payload_len; int head; spin_lock_bh(&vsock->send_pkt_list_lock); @@ -146,8 +148,24 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, break; } - len = iov_length(&vq->iov[out], in); - iov_iter_init(&iov_iter, READ, &vq->iov[out], in, len); + iov_len = iov_length(&vq->iov[out], in); + if (iov_len < sizeof(pkt->hdr)) { + virtio_transport_free_pkt(pkt); + vq_err(vq, "Buffer len [%zu] too small\n", iov_len); + break; + } + + iov_iter_init(&iov_iter, READ, &vq->iov[out], in, iov_len); + payload_len = pkt->len - pkt->off; + + /* If the packet is greater than the space available in the + * buffer, we split it using multiple buffers. + */ + if (payload_len > iov_len - sizeof(pkt->hdr)) + payload_len = iov_len - sizeof(pkt->hdr); + + /* Set the correct length in the header */ + pkt->hdr.len = cpu_to_le32(payload_len); nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter); if (nbytes != sizeof(pkt->hdr)) { @@ -156,33 +174,48 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, break; } - nbytes = copy_to_iter(pkt->buf, pkt->len, &iov_iter); - if (nbytes != pkt->len) { + nbytes = copy_to_iter(pkt->buf + pkt->off, payload_len, + &iov_iter); + if (nbytes != payload_len) { virtio_transport_free_pkt(pkt); vq_err(vq, "Faulted on copying pkt buf\n"); break; } - vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len); + vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len); added = true; - if (pkt->reply) { - int val; - - val = atomic_dec_return(&vsock->queued_replies); - - /* Do we have resources to resume tx processing? */ - if (val + 1 == tx_vq->num) - restart_tx = true; - } - /* Deliver to monitoring devices all correctly transmitted * packets. */ virtio_transport_deliver_tap_pkt(pkt); - virtio_transport_free_pkt(pkt); - } + pkt->off += payload_len; + total_len += payload_len; + + /* If we didn't send all the payload we can requeue the packet + * to send it with the next available buffer. + */ + if (pkt->off < pkt->len) { + spin_lock_bh(&vsock->send_pkt_list_lock); + list_add(&pkt->list, &vsock->send_pkt_list); + spin_unlock_bh(&vsock->send_pkt_list_lock); + } else { + if (pkt->reply) { + int val; + + val = atomic_dec_return(&vsock->queued_replies); + + /* Do we have resources to resume tx + * processing? + */ + if (val + 1 == tx_vq->num) + restart_tx = true; + } + + virtio_transport_free_pkt(pkt); + } + } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len))); if (added) vhost_signal(&vsock->dev, vq); @@ -210,9 +243,12 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt) struct vhost_vsock *vsock; int len = pkt->len; + rcu_read_lock(); + /* Find the vhost_vsock according to guest context id */ vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); if (!vsock) { + rcu_read_unlock(); virtio_transport_free_pkt(pkt); return -ENODEV; } @@ -225,6 +261,8 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt) spin_unlock_bh(&vsock->send_pkt_list_lock); vhost_work_queue(&vsock->dev, &vsock->send_pkt_work); + + rcu_read_unlock(); return len; } @@ -234,12 +272,15 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk) struct vhost_vsock *vsock; struct virtio_vsock_pkt *pkt, *n; int cnt = 0; + int ret = -ENODEV; LIST_HEAD(freeme); + rcu_read_lock(); + /* Find the vhost_vsock according to guest context id */ vsock = vhost_vsock_get(vsk->remote_addr.svm_cid); if (!vsock) - return -ENODEV; + goto out; spin_lock_bh(&vsock->send_pkt_list_lock); list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) { @@ -265,7 +306,10 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk) vhost_poll_queue(&tx_vq->poll); } - return 0; + ret = 0; +out: + rcu_read_unlock(); + return ret; } static struct virtio_vsock_pkt * @@ -346,7 +390,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work) struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock, dev); struct virtio_vsock_pkt *pkt; - int head; + int head, pkts = 0, total_len = 0; unsigned int out, in; bool added = false; @@ -356,7 +400,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work) goto out; vhost_disable_notify(&vsock->dev, vq); - for (;;) { + do { u32 len; if (!vhost_vsock_more_replies(vsock)) { @@ -392,14 +436,18 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work) virtio_transport_deliver_tap_pkt(pkt); /* Only accept correctly addressed packets */ - if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid) + if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid && + le64_to_cpu(pkt->hdr.dst_cid) == + vhost_transport_get_local_cid()) virtio_transport_recv_pkt(pkt); else virtio_transport_free_pkt(pkt); - vhost_add_used(vq, head, sizeof(pkt->hdr) + len); + len += sizeof(pkt->hdr); + vhost_add_used(vq, head, len); + total_len += len; added = true; - } + } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len))); no_more_replies: if (added) @@ -527,16 +575,14 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file) vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick; vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick; - vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs)); + vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs), + UIO_MAXIOV, VHOST_VSOCK_PKT_WEIGHT, + VHOST_VSOCK_WEIGHT); file->private_data = vsock; spin_lock_init(&vsock->send_pkt_list_lock); INIT_LIST_HEAD(&vsock->send_pkt_list); vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work); - - spin_lock_bh(&vhost_vsock_lock); - list_add_tail(&vsock->list, &vhost_vsock_list); - spin_unlock_bh(&vhost_vsock_lock); return 0; out: @@ -563,13 +609,21 @@ static void vhost_vsock_reset_orphans(struct sock *sk) * executing. */ - if (!vhost_vsock_get(vsk->remote_addr.svm_cid)) { - sock_set_flag(sk, SOCK_DONE); - vsk->peer_shutdown = SHUTDOWN_MASK; - sk->sk_state = SS_UNCONNECTED; - sk->sk_err = ECONNRESET; - sk->sk_error_report(sk); - } + /* If the peer is still valid, no need to reset connection */ + if (vhost_vsock_get(vsk->remote_addr.svm_cid)) + return; + + /* If the close timeout is pending, let it expire. This avoids races + * with the timeout callback. + */ + if (vsk->close_work_scheduled) + return; + + sock_set_flag(sk, SOCK_DONE); + vsk->peer_shutdown = SHUTDOWN_MASK; + sk->sk_state = SS_UNCONNECTED; + sk->sk_err = ECONNRESET; + sk->sk_error_report(sk); } static int vhost_vsock_dev_release(struct inode *inode, struct file *file) @@ -577,9 +631,13 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file) struct vhost_vsock *vsock = file->private_data; spin_lock_bh(&vhost_vsock_lock); - list_del(&vsock->list); + if (vsock->guest_cid) + hash_del_rcu(&vsock->hash); spin_unlock_bh(&vhost_vsock_lock); + /* Wait for other CPUs to finish using vsock */ + synchronize_rcu(); + /* Iterating over all connections for all CIDs to find orphans is * inefficient. Room for improvement here. */ vsock_for_each_connected_socket(vhost_vsock_reset_orphans); @@ -620,12 +678,17 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid) /* Refuse if CID is already in use */ spin_lock_bh(&vhost_vsock_lock); - other = __vhost_vsock_get(guest_cid); + other = vhost_vsock_get(guest_cid); if (other && other != vsock) { spin_unlock_bh(&vhost_vsock_lock); return -EADDRINUSE; } + + if (vsock->guest_cid) + hash_del_rcu(&vsock->hash); + vsock->guest_cid = guest_cid; + hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid); spin_unlock_bh(&vhost_vsock_lock); return 0; diff --git a/drivers/video/backlight/lm3639_bl.c b/drivers/video/backlight/lm3639_bl.c index cd50df5807eadb2b4bf18b6fa8efc906beda0dee..086611c7bc03cf3505bbe7e041dc6419841f30f9 100644 --- a/drivers/video/backlight/lm3639_bl.c +++ b/drivers/video/backlight/lm3639_bl.c @@ -400,10 +400,8 @@ static int lm3639_remove(struct i2c_client *client) regmap_write(pchip->regmap, REG_ENABLE, 0x00); - if (&pchip->cdev_torch) - led_classdev_unregister(&pchip->cdev_torch); - if (&pchip->cdev_flash) - led_classdev_unregister(&pchip->cdev_flash); + led_classdev_unregister(&pchip->cdev_torch); + led_classdev_unregister(&pchip->cdev_flash); if (pchip->bled) device_remove_file(&(pchip->bled->dev), &dev_attr_bled_mode); return 0; diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c index bdfcc0a71db14c2f917ef31d660845a63430bef4..7ddc0930e98c696b98f42b2b703a8594f3d1af6e 100644 --- a/drivers/video/backlight/pwm_bl.c +++ b/drivers/video/backlight/pwm_bl.c @@ -262,6 +262,16 @@ static int pwm_backlight_parse_dt(struct device *dev, memset(data, 0, sizeof(*data)); + /* + * These values are optional and set as 0 by default, the out values + * are modified only if a valid u32 value can be decoded. + */ + of_property_read_u32(node, "post-pwm-on-delay-ms", + &data->post_pwm_on_delay); + of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay); + + data->enable_gpio = -EINVAL; + /* * Determine the number of brightness levels, if this property is not * set a default table of brightness levels will be used. @@ -374,15 +384,6 @@ static int pwm_backlight_parse_dt(struct device *dev, data->max_brightness--; } - /* - * These values are optional and set as 0 by default, the out values - * are modified only if a valid u32 value can be decoded. - */ - of_property_read_u32(node, "post-pwm-on-delay-ms", - &data->post_pwm_on_delay); - of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay); - - data->enable_gpio = -EINVAL; return 0; } @@ -424,7 +425,7 @@ static int pwm_backlight_initial_power_state(const struct pwm_bl_data *pb) */ /* if the enable GPIO is disabled, do not enable the backlight */ - if (pb->enable_gpio && gpiod_get_value(pb->enable_gpio) == 0) + if (pb->enable_gpio && gpiod_get_value_cansleep(pb->enable_gpio) == 0) return FB_BLANK_POWERDOWN; /* The regulator is disabled, do not enable the backlight */ diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig index 787792c3d08d66b93012c27deed878e820c06e12..40d5fea8513cff5c4308dcee210a52a3c93aa90c 100644 --- a/drivers/video/console/Kconfig +++ b/drivers/video/console/Kconfig @@ -21,52 +21,6 @@ config VGA_CONSOLE Say Y. -config VGACON_SOFT_SCROLLBACK - bool "Enable Scrollback Buffer in System RAM" - depends on VGA_CONSOLE - default n - help - The scrollback buffer of the standard VGA console is located in - the VGA RAM. The size of this RAM is fixed and is quite small. - If you require a larger scrollback buffer, this can be placed in - System RAM which is dynamically allocated during initialization. - Placing the scrollback buffer in System RAM will slightly slow - down the console. - - If you want this feature, say 'Y' here and enter the amount of - RAM to allocate for this buffer. If unsure, say 'N'. - -config VGACON_SOFT_SCROLLBACK_SIZE - int "Scrollback Buffer Size (in KB)" - depends on VGACON_SOFT_SCROLLBACK - range 1 1024 - default "64" - help - Enter the amount of System RAM to allocate for scrollback - buffers of VGA consoles. Each 64KB will give you approximately - 16 80x25 screenfuls of scrollback buffer. - -config VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT - bool "Persistent Scrollback History for each console by default" - depends on VGACON_SOFT_SCROLLBACK - default n - help - Say Y here if the scrollback history should persist by default when - switching between consoles. Otherwise, the scrollback history will be - flushed each time the console is switched. This feature can also be - enabled using the boot command line parameter - 'vgacon.scrollback_persistent=1'. - - This feature might break your tool of choice to flush the scrollback - buffer, e.g. clear(1) will work fine but Debian's clear_console(1) - will be broken, which might cause security issues. - You can use the escape sequence \e[3J instead if this feature is - activated. - - Note that a buffer of VGACON_SOFT_SCROLLBACK_SIZE is taken for each - created tty device. - So if you use a RAM-constrained system, say N here. - config MDA_CONSOLE depends on !M68K && !PARISC && ISA tristate "MDA text console (dual-headed)" diff --git a/drivers/video/console/newport_con.c b/drivers/video/console/newport_con.c index 7f2526b43b3364665df9350bb80d4f1eb168a6c4..46a6e65689947bf909713d44621d4ac9adcc4b0b 100644 --- a/drivers/video/console/newport_con.c +++ b/drivers/video/console/newport_con.c @@ -33,12 +33,6 @@ #define FONT_DATA ((unsigned char *)font_vga_8x16.data) -/* borrowed from fbcon.c */ -#define REFCOUNT(fd) (((int *)(fd))[-1]) -#define FNTSIZE(fd) (((int *)(fd))[-2]) -#define FNTCHARCNT(fd) (((int *)(fd))[-3]) -#define FONT_EXTRA_WORDS 3 - static unsigned char *font_data[MAX_NR_CONSOLES]; static struct newport_regs *npregs; @@ -519,6 +513,7 @@ static int newport_set_font(int unit, struct console_font *op) FNTSIZE(new_data) = size; FNTCHARCNT(new_data) = op->charcount; REFCOUNT(new_data) = 0; /* usage counter */ + FNTSUM(new_data) = 0; p = new_data; for (i = 0; i < op->charcount; i++) { diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index 09731b2f6815f98651acba84cf9cc8d679f34f7a..4509d0547087aac24e19f29251cb07c88945dc36 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -165,209 +165,6 @@ static inline void vga_set_mem_top(struct vc_data *c) write_vga(12, (c->vc_visible_origin - vga_vram_base) / 2); } -#ifdef CONFIG_VGACON_SOFT_SCROLLBACK -/* software scrollback */ -struct vgacon_scrollback_info { - void *data; - int tail; - int size; - int rows; - int cnt; - int cur; - int save; - int restore; -}; - -static struct vgacon_scrollback_info *vgacon_scrollback_cur; -static struct vgacon_scrollback_info vgacon_scrollbacks[MAX_NR_CONSOLES]; -static bool scrollback_persistent = \ - IS_ENABLED(CONFIG_VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT); -module_param_named(scrollback_persistent, scrollback_persistent, bool, 0000); -MODULE_PARM_DESC(scrollback_persistent, "Enable persistent scrollback for all vga consoles"); - -static void vgacon_scrollback_reset(int vc_num, size_t reset_size) -{ - struct vgacon_scrollback_info *scrollback = &vgacon_scrollbacks[vc_num]; - - if (scrollback->data && reset_size > 0) - memset(scrollback->data, 0, reset_size); - - scrollback->cnt = 0; - scrollback->tail = 0; - scrollback->cur = 0; -} - -static void vgacon_scrollback_init(int vc_num) -{ - int pitch = vga_video_num_columns * 2; - size_t size = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024; - int rows = size / pitch; - void *data; - - data = kmalloc_array(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE, 1024, - GFP_NOWAIT); - - vgacon_scrollbacks[vc_num].data = data; - vgacon_scrollback_cur = &vgacon_scrollbacks[vc_num]; - - vgacon_scrollback_cur->rows = rows - 1; - vgacon_scrollback_cur->size = rows * pitch; - - vgacon_scrollback_reset(vc_num, size); -} - -static void vgacon_scrollback_switch(int vc_num) -{ - if (!scrollback_persistent) - vc_num = 0; - - if (!vgacon_scrollbacks[vc_num].data) { - vgacon_scrollback_init(vc_num); - } else { - if (scrollback_persistent) { - vgacon_scrollback_cur = &vgacon_scrollbacks[vc_num]; - } else { - size_t size = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024; - - vgacon_scrollback_reset(vc_num, size); - } - } -} - -static void vgacon_scrollback_startup(void) -{ - vgacon_scrollback_cur = &vgacon_scrollbacks[0]; - vgacon_scrollback_init(0); -} - -static void vgacon_scrollback_update(struct vc_data *c, int t, int count) -{ - void *p; - - if (!vgacon_scrollback_cur->data || !vgacon_scrollback_cur->size || - c->vc_num != fg_console) - return; - - p = (void *) (c->vc_origin + t * c->vc_size_row); - - while (count--) { - scr_memcpyw(vgacon_scrollback_cur->data + - vgacon_scrollback_cur->tail, - p, c->vc_size_row); - - vgacon_scrollback_cur->cnt++; - p += c->vc_size_row; - vgacon_scrollback_cur->tail += c->vc_size_row; - - if (vgacon_scrollback_cur->tail >= vgacon_scrollback_cur->size) - vgacon_scrollback_cur->tail = 0; - - if (vgacon_scrollback_cur->cnt > vgacon_scrollback_cur->rows) - vgacon_scrollback_cur->cnt = vgacon_scrollback_cur->rows; - - vgacon_scrollback_cur->cur = vgacon_scrollback_cur->cnt; - } -} - -static void vgacon_restore_screen(struct vc_data *c) -{ - vgacon_scrollback_cur->save = 0; - - if (!vga_is_gfx && !vgacon_scrollback_cur->restore) { - scr_memcpyw((u16 *) c->vc_origin, (u16 *) c->vc_screenbuf, - c->vc_screenbuf_size > vga_vram_size ? - vga_vram_size : c->vc_screenbuf_size); - vgacon_scrollback_cur->restore = 1; - vgacon_scrollback_cur->cur = vgacon_scrollback_cur->cnt; - } -} - -static void vgacon_scrolldelta(struct vc_data *c, int lines) -{ - int start, end, count, soff; - - if (!lines) { - c->vc_visible_origin = c->vc_origin; - vga_set_mem_top(c); - return; - } - - if (!vgacon_scrollback_cur->data) - return; - - if (!vgacon_scrollback_cur->save) { - vgacon_cursor(c, CM_ERASE); - vgacon_save_screen(c); - vgacon_scrollback_cur->save = 1; - } - - vgacon_scrollback_cur->restore = 0; - start = vgacon_scrollback_cur->cur + lines; - end = start + abs(lines); - - if (start < 0) - start = 0; - - if (start > vgacon_scrollback_cur->cnt) - start = vgacon_scrollback_cur->cnt; - - if (end < 0) - end = 0; - - if (end > vgacon_scrollback_cur->cnt) - end = vgacon_scrollback_cur->cnt; - - vgacon_scrollback_cur->cur = start; - count = end - start; - soff = vgacon_scrollback_cur->tail - - ((vgacon_scrollback_cur->cnt - end) * c->vc_size_row); - soff -= count * c->vc_size_row; - - if (soff < 0) - soff += vgacon_scrollback_cur->size; - - count = vgacon_scrollback_cur->cnt - start; - - if (count > c->vc_rows) - count = c->vc_rows; - - if (count) { - int copysize; - - int diff = c->vc_rows - count; - void *d = (void *) c->vc_origin; - void *s = (void *) c->vc_screenbuf; - - count *= c->vc_size_row; - /* how much memory to end of buffer left? */ - copysize = min(count, vgacon_scrollback_cur->size - soff); - scr_memcpyw(d, vgacon_scrollback_cur->data + soff, copysize); - d += copysize; - count -= copysize; - - if (count) { - scr_memcpyw(d, vgacon_scrollback_cur->data, count); - d += count; - } - - if (diff) - scr_memcpyw(d, s, diff * c->vc_size_row); - } else - vgacon_cursor(c, CM_MOVE); -} - -static void vgacon_flush_scrollback(struct vc_data *c) -{ - size_t size = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024; - - vgacon_scrollback_reset(c->vc_num, size); -} -#else -#define vgacon_scrollback_startup(...) do { } while (0) -#define vgacon_scrollback_init(...) do { } while (0) -#define vgacon_scrollback_update(...) do { } while (0) -#define vgacon_scrollback_switch(...) do { } while (0) - static void vgacon_restore_screen(struct vc_data *c) { if (c->vc_origin != c->vc_visible_origin) @@ -381,11 +178,6 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines) vga_set_mem_top(c); } -static void vgacon_flush_scrollback(struct vc_data *c) -{ -} -#endif /* CONFIG_VGACON_SOFT_SCROLLBACK */ - static const char *vgacon_startup(void) { const char *display_desc = NULL; @@ -568,10 +360,7 @@ static const char *vgacon_startup(void) vgacon_xres = screen_info.orig_video_cols * VGA_FONTWIDTH; vgacon_yres = vga_scan_lines; - if (!vga_init_done) { - vgacon_scrollback_startup(); - vga_init_done = true; - } + vga_init_done = true; return display_desc; } @@ -862,7 +651,6 @@ static int vgacon_switch(struct vc_data *c) vgacon_doresize(c, c->vc_cols, c->vc_rows); } - vgacon_scrollback_switch(c->vc_num); return 0; /* Redrawing not needed */ } @@ -1315,12 +1103,23 @@ static int vgacon_font_get(struct vc_data *c, struct console_font *font) static int vgacon_resize(struct vc_data *c, unsigned int width, unsigned int height, unsigned int user) { + if ((width << 1) * height > vga_vram_size) + return -EINVAL; + + if (user) { + /* + * Ho ho! Someone (svgatextmode, eh?) may have reprogrammed + * the video mode! Set the new defaults then and go away. + */ + screen_info.orig_video_cols = width; + screen_info.orig_video_lines = height; + vga_default_font_height = c->vc_font.height; + return 0; + } if (width % 2 || width > screen_info.orig_video_cols || height > (screen_info.orig_video_lines * vga_default_font_height)/ c->vc_font.height) - /* let svgatextmode tinker with video timings and - return success */ - return (user) ? 0 : -EINVAL; + return -EINVAL; if (con_is_visible(c) && !vga_is_gfx) /* who knows */ vgacon_doresize(c, width, height); @@ -1376,7 +1175,6 @@ static bool vgacon_scroll(struct vc_data *c, unsigned int t, unsigned int b, oldo = c->vc_origin; delta = lines * c->vc_size_row; if (dir == SM_UP) { - vgacon_scrollback_update(c, t, lines); if (c->vc_scr_end + delta >= vga_vram_end) { scr_memcpyw((u16 *) vga_vram_base, (u16 *) (oldo + delta), @@ -1440,7 +1238,6 @@ const struct consw vga_con = { .con_save_screen = vgacon_save_screen, .con_build_attr = vgacon_build_attr, .con_invert_region = vgacon_invert_region, - .con_flush_scrollback = vgacon_flush_scrollback, }; EXPORT_SYMBOL(vga_con); diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index 591a13a597874ec008430f00d448d9aab4a7b847..f99558d006bf43501efb0d00213d5841951ff660 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -2,6 +2,18 @@ # fbdev configuration # +config FB_CMDLINE + bool + +config FB_NOTIFY + bool + +config FB_CLPS711X_OLD + tristate + select FB_CFB_FILLRECT + select FB_CFB_COPYAREA + select FB_CFB_IMAGEBLIT + menuconfig FB tristate "Support for frame buffer devices" select FB_CMDLINE @@ -54,12 +66,6 @@ config FIRMWARE_EDID combination with certain motherboards and monitors are known to suffer from this problem. -config FB_CMDLINE - bool - -config FB_NOTIFY - bool - config FB_DDC tristate depends on FB @@ -329,12 +335,6 @@ config FB_ACORN hardware found in Acorn RISC PCs and other ARM-based machines. If unsure, say N. -config FB_CLPS711X_OLD - tristate - select FB_CFB_FILLRECT - select FB_CFB_COPYAREA - select FB_CFB_IMAGEBLIT - config FB_CLPS711X tristate "CLPS711X LCD support" depends on FB && (ARCH_CLPS711X || COMPILE_TEST) @@ -1456,7 +1456,6 @@ if FB_VIA config FB_VIA_DIRECT_PROCFS bool "direct hardware access via procfs (DEPRECATED)(DANGEROUS)" - depends on FB_VIA default n help Allow direct hardware access to some output registers via procfs. @@ -1466,7 +1465,6 @@ config FB_VIA_DIRECT_PROCFS config FB_VIA_X_COMPATIBILITY bool "X server compatibility" - depends on FB_VIA default n help This option reduces the functionality (power saving, ...) of the @@ -2308,10 +2306,6 @@ config FB_SIMPLE Configuration re: surface address, size, and format must be provided through device tree, or plain old platform data. -source "drivers/video/fbdev/omap/Kconfig" -source "drivers/video/fbdev/omap2/Kconfig" -source "drivers/video/fbdev/mmp/Kconfig" - config FB_SSD1307 tristate "Solomon SSD1307 framebuffer support" depends on FB && I2C @@ -2341,3 +2335,7 @@ config FB_SM712 This driver is also available as a module. The module will be called sm712fb. If you want to compile it as a module, say M here and read . + +source "drivers/video/fbdev/omap/Kconfig" +source "drivers/video/fbdev/omap2/Kconfig" +source "drivers/video/fbdev/mmp/Kconfig" diff --git a/drivers/video/fbdev/amifb.c b/drivers/video/fbdev/amifb.c index 0777aff211e5cddad3c216e0f58f6c9576e9ac7f..7584570266940cf05f9b9a9947b131c2c3562339 100644 --- a/drivers/video/fbdev/amifb.c +++ b/drivers/video/fbdev/amifb.c @@ -1855,7 +1855,7 @@ static int ami_get_var_cursorinfo(struct fb_var_cursorinfo *var, var->yspot = par->crsr.spot_y; if (size > var->height * var->width) return -ENAMETOOLONG; - if (!access_ok(VERIFY_WRITE, data, size)) + if (!access_ok(data, size)) return -EFAULT; delta = 1 << par->crsr.fmode; lspr = lofsprite + (delta << 1); @@ -1935,7 +1935,7 @@ static int ami_set_var_cursorinfo(struct fb_var_cursorinfo *var, return -EINVAL; if (!var->height) return -EINVAL; - if (!access_ok(VERIFY_READ, data, var->width * var->height)) + if (!access_ok(data, var->width * var->height)) return -EFAULT; delta = 1 << fmode; lofsprite = shfsprite = (u_short *)spritememory; diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c index 076d24afbd728bb3e8b4ffbc41f8a3be9642b51d..4ed55e6bbb84047927c0da3a314b419e9312dd02 100644 --- a/drivers/video/fbdev/atmel_lcdfb.c +++ b/drivers/video/fbdev/atmel_lcdfb.c @@ -22,6 +22,7 @@ #include #include #include +#include